summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--.clang-format39
-rw-r--r--.gitignore3
-rw-r--r--.mailmap8
-rw-r--r--Documentation/ABI/stable/sysfs-bus-nvmem2
-rw-r--r--Documentation/ABI/testing/configfs-usb-gadget-acm7
-rw-r--r--Documentation/ABI/testing/configfs-usb-gadget-uac18
-rw-r--r--Documentation/ABI/testing/configfs-usb-gadget-uac211
-rw-r--r--Documentation/ABI/testing/debugfs-iio-ad946739
-rw-r--r--Documentation/ABI/testing/debugfs-iio-backend20
-rw-r--r--Documentation/ABI/testing/sysfs-block-zram7
-rw-r--r--Documentation/ABI/testing/sysfs-bus-iio76
-rw-r--r--Documentation/ABI/testing/sysfs-bus-iio-adc-max961117
-rw-r--r--Documentation/ABI/testing/sysfs-bus-iio-chemical-sgp4014
-rw-r--r--Documentation/ABI/testing/sysfs-bus-iio-dac61
-rw-r--r--Documentation/ABI/testing/sysfs-bus-iio-dac-ltc268831
-rw-r--r--Documentation/ABI/testing/sysfs-bus-iio-filter-admv88182
-rw-r--r--Documentation/ABI/testing/sysfs-bus-iio-ina2xx-adc9
-rw-r--r--Documentation/ABI/testing/sysfs-bus-pci72
-rw-r--r--Documentation/ABI/testing/sysfs-devices-memory6
-rw-r--r--Documentation/ABI/testing/sysfs-devices-system-cpu6
-rw-r--r--Documentation/ABI/testing/sysfs-driver-intel-i915-hwmon8
-rw-r--r--Documentation/ABI/testing/sysfs-driver-ufs27
-rw-r--r--Documentation/ABI/testing/sysfs-fs-f2fs56
-rw-r--r--Documentation/accel/qaic/qaic.rst6
-rw-r--r--Documentation/admin-guide/blockdev/zram.rst66
-rw-r--r--Documentation/admin-guide/cgroup-v1/memory.rst32
-rw-r--r--Documentation/admin-guide/cgroup-v2.rst43
-rw-r--r--Documentation/admin-guide/device-mapper/delay.rst41
-rw-r--r--Documentation/admin-guide/device-mapper/dm-crypt.rst4
-rw-r--r--Documentation/admin-guide/device-mapper/vdo.rst7
-rw-r--r--Documentation/admin-guide/ext4.rst10
-rw-r--r--Documentation/admin-guide/kernel-parameters.txt86
-rw-r--r--Documentation/admin-guide/media/cec.rst87
-rw-r--r--Documentation/admin-guide/media/mgb4.rst23
-rw-r--r--Documentation/admin-guide/media/rkisp1.rst11
-rw-r--r--Documentation/admin-guide/media/vivid.rst4
-rw-r--r--Documentation/admin-guide/mm/damon/start.rst4
-rw-r--r--Documentation/admin-guide/mm/damon/usage.rst8
-rw-r--r--Documentation/admin-guide/mm/memory-hotplug.rst5
-rw-r--r--Documentation/admin-guide/mm/transhuge.rst64
-rw-r--r--Documentation/arch/loongarch/irq-chip-model.rst32
-rw-r--r--Documentation/arch/s390/vfio-ap.rst30
-rw-r--r--Documentation/arch/x86/x86_64/boot-options.rst12
-rw-r--r--Documentation/bpf/btf.rst39
-rw-r--r--Documentation/bpf/libbpf/program_types.rst30
-rw-r--r--Documentation/bpf/verifier.rst2
-rw-r--r--Documentation/core-api/cleanup.rst8
-rw-r--r--Documentation/core-api/cpu_hotplug.rst10
-rw-r--r--Documentation/core-api/folio_queue.rst212
-rw-r--r--Documentation/core-api/index.rst1
-rw-r--r--Documentation/core-api/printk-formats.rst4
-rw-r--r--Documentation/dev-tools/kfence.rst7
-rw-r--r--Documentation/dev-tools/kunit/api/clk.rst10
-rw-r--r--Documentation/dev-tools/kunit/api/index.rst21
-rw-r--r--Documentation/dev-tools/kunit/api/of.rst13
-rw-r--r--Documentation/dev-tools/kunit/api/platformdevice.rst10
-rw-r--r--Documentation/devicetree/bindings/arm/cirrus/cirrus,ep9301.yaml38
-rw-r--r--Documentation/devicetree/bindings/arm/mediatek/mediatek,bdpsys.txt24
-rw-r--r--Documentation/devicetree/bindings/arm/mediatek/mediatek,camsys.txt24
-rw-r--r--Documentation/devicetree/bindings/arm/mediatek/mediatek,imgsys.txt30
-rw-r--r--Documentation/devicetree/bindings/arm/mediatek/mediatek,ipesys.txt22
-rw-r--r--Documentation/devicetree/bindings/arm/mediatek/mediatek,ipu.txt43
-rw-r--r--Documentation/devicetree/bindings/arm/mediatek/mediatek,jpgdecsys.txt22
-rw-r--r--Documentation/devicetree/bindings/arm/mediatek/mediatek,mcucfg.txt23
-rw-r--r--Documentation/devicetree/bindings/arm/mediatek/mediatek,mfgcfg.txt25
-rw-r--r--Documentation/devicetree/bindings/arm/mediatek/mediatek,mipi0a.txt28
-rw-r--r--Documentation/devicetree/bindings/arm/mediatek/mediatek,vcodecsys.txt27
-rw-r--r--Documentation/devicetree/bindings/arm/mediatek/mediatek,vdecsys.txt29
-rw-r--r--Documentation/devicetree/bindings/arm/mediatek/mediatek,vencltsys.txt22
-rw-r--r--Documentation/devicetree/bindings/arm/mediatek/mediatek,vencsys.txt26
-rw-r--r--Documentation/devicetree/bindings/ata/ahci-platform.yaml33
-rw-r--r--Documentation/devicetree/bindings/ata/cirrus,ep9312-pata.yaml42
-rw-r--r--Documentation/devicetree/bindings/ata/imx-sata.yaml47
-rw-r--r--Documentation/devicetree/bindings/ata/qcom-sata.txt48
-rw-r--r--Documentation/devicetree/bindings/clock/atmel,at91rm9200-pmc.yaml2
-rw-r--r--Documentation/devicetree/bindings/clock/atmel,at91sam9x5-sckc.yaml4
-rw-r--r--Documentation/devicetree/bindings/clock/baikal,bt1-ccu-div.yaml8
-rw-r--r--Documentation/devicetree/bindings/clock/cirrus,lochnagar.yaml6
-rw-r--r--Documentation/devicetree/bindings/clock/imx8mp-audiomix.yaml3
-rw-r--r--Documentation/devicetree/bindings/clock/mediatek,apmixedsys.yaml2
-rw-r--r--Documentation/devicetree/bindings/clock/mediatek,infracfg.yaml (renamed from Documentation/devicetree/bindings/arm/mediatek/mediatek,infracfg.yaml)2
-rw-r--r--Documentation/devicetree/bindings/clock/mediatek,mt8186-clock.yaml (renamed from Documentation/devicetree/bindings/arm/mediatek/mediatek,mt8186-clock.yaml)2
-rw-r--r--Documentation/devicetree/bindings/clock/mediatek,mt8186-sys-clock.yaml (renamed from Documentation/devicetree/bindings/arm/mediatek/mediatek,mt8186-sys-clock.yaml)2
-rw-r--r--Documentation/devicetree/bindings/clock/mediatek,mt8192-clock.yaml (renamed from Documentation/devicetree/bindings/arm/mediatek/mediatek,mt8192-clock.yaml)2
-rw-r--r--Documentation/devicetree/bindings/clock/mediatek,mt8192-sys-clock.yaml (renamed from Documentation/devicetree/bindings/arm/mediatek/mediatek,mt8192-sys-clock.yaml)2
-rw-r--r--Documentation/devicetree/bindings/clock/mediatek,mt8195-clock.yaml (renamed from Documentation/devicetree/bindings/arm/mediatek/mediatek,mt8195-clock.yaml)2
-rw-r--r--Documentation/devicetree/bindings/clock/mediatek,mt8195-sys-clock.yaml (renamed from Documentation/devicetree/bindings/arm/mediatek/mediatek,mt8195-sys-clock.yaml)2
-rw-r--r--Documentation/devicetree/bindings/clock/mediatek,pericfg.yaml (renamed from Documentation/devicetree/bindings/arm/mediatek/mediatek,pericfg.yaml)2
-rw-r--r--Documentation/devicetree/bindings/clock/mediatek,syscon.yaml93
-rw-r--r--Documentation/devicetree/bindings/clock/nxp,imx95-blk-ctl.yaml1
-rw-r--r--Documentation/devicetree/bindings/clock/nxp,lpc3220-clk.txt30
-rw-r--r--Documentation/devicetree/bindings/clock/nxp,lpc3220-clk.yaml51
-rw-r--r--Documentation/devicetree/bindings/clock/nxp,lpc3220-usb-clk.txt22
-rw-r--r--Documentation/devicetree/bindings/clock/nxp,lpc3220-usb-clk.yaml35
-rw-r--r--Documentation/devicetree/bindings/clock/qcom,a53pll.yaml4
-rw-r--r--Documentation/devicetree/bindings/clock/qcom,qcs404-turingcc.yaml47
-rw-r--r--Documentation/devicetree/bindings/clock/qcom,sc8280xp-lpasscc.yaml13
-rw-r--r--Documentation/devicetree/bindings/clock/qcom,sm8450-camcc.yaml19
-rw-r--r--Documentation/devicetree/bindings/clock/qcom,sm8450-videocc.yaml11
-rw-r--r--Documentation/devicetree/bindings/clock/qcom,turingcc.txt19
-rw-r--r--Documentation/devicetree/bindings/clock/renesas,cpg-clocks.yaml8
-rw-r--r--Documentation/devicetree/bindings/clock/renesas,cpg-mssr.yaml1
-rw-r--r--Documentation/devicetree/bindings/clock/rockchip,rk3576-cru.yaml56
-rw-r--r--Documentation/devicetree/bindings/clock/rockchip,rk3588-cru.yaml4
-rw-r--r--Documentation/devicetree/bindings/clock/st,stm32mp1-rcc.yaml10
-rw-r--r--Documentation/devicetree/bindings/display/bridge/toshiba,tc358767.yaml21
-rw-r--r--Documentation/devicetree/bindings/display/mediatek/mediatek,dpi.yaml17
-rw-r--r--Documentation/devicetree/bindings/display/msm/hdmi.yaml28
-rw-r--r--Documentation/devicetree/bindings/display/panel/boe,th101mb31ig002-28a.yaml21
-rw-r--r--Documentation/devicetree/bindings/display/panel/boe,tv101wum-ll2.yaml63
-rw-r--r--Documentation/devicetree/bindings/display/panel/himax,hx8394.yaml17
-rw-r--r--Documentation/devicetree/bindings/display/panel/ilitek,ili9806e.yaml1
-rw-r--r--Documentation/devicetree/bindings/display/panel/jadard,jd9365da-h3.yaml1
-rw-r--r--Documentation/devicetree/bindings/display/panel/panel-simple.yaml4
-rw-r--r--Documentation/devicetree/bindings/display/panel/sitronix,st7701.yaml69
-rw-r--r--Documentation/devicetree/bindings/display/renesas,rzg2l-du.yaml32
-rw-r--r--Documentation/devicetree/bindings/dma/cirrus,ep9301-dma-m2m.yaml84
-rw-r--r--Documentation/devicetree/bindings/dma/cirrus,ep9301-dma-m2p.yaml144
-rw-r--r--Documentation/devicetree/bindings/dma/fsl,imx-dma.yaml14
-rw-r--r--Documentation/devicetree/bindings/dma/fsl,mxs-dma.yaml15
-rw-r--r--Documentation/devicetree/bindings/dma/fsl-qdma.yaml13
-rw-r--r--Documentation/devicetree/bindings/dma/loongson,ls1b-apbdma.yaml65
-rw-r--r--Documentation/devicetree/bindings/dma/marvell,xor-v2.yaml61
-rw-r--r--Documentation/devicetree/bindings/dma/mv-xor-v2.txt28
-rw-r--r--Documentation/devicetree/bindings/dma/renesas,rz-dmac.yaml1
-rw-r--r--Documentation/devicetree/bindings/dma/xilinx/xlnx,zynqmp-dma-1.0.yaml4
-rw-r--r--Documentation/devicetree/bindings/eeprom/at24.yaml1
-rw-r--r--Documentation/devicetree/bindings/extcon/extcon-ptn5150.yaml11
-rw-r--r--Documentation/devicetree/bindings/extcon/extcon-usb-gpio.txt21
-rw-r--r--Documentation/devicetree/bindings/extcon/linux,extcon-usb-gpio.yaml37
-rw-r--r--Documentation/devicetree/bindings/gpio/gpio-ep9301.yaml9
-rw-r--r--Documentation/devicetree/bindings/hwlock/sprd,hwspinlock-r3p0.yaml50
-rw-r--r--Documentation/devicetree/bindings/hwlock/sprd-hwspinlock.txt23
-rw-r--r--Documentation/devicetree/bindings/i2c/aspeed,i2c.yaml5
-rw-r--r--Documentation/devicetree/bindings/i2c/i2c-rk3x.yaml1
-rw-r--r--Documentation/devicetree/bindings/i2c/i2c-sprd.txt31
-rw-r--r--Documentation/devicetree/bindings/i2c/nvidia,tegra20-i2c.yaml27
-rw-r--r--Documentation/devicetree/bindings/i2c/qcom,i2c-cci.yaml1
-rw-r--r--Documentation/devicetree/bindings/i2c/renesas,riic.yaml4
-rw-r--r--Documentation/devicetree/bindings/i2c/sprd,sc9860-i2c.yaml65
-rw-r--r--Documentation/devicetree/bindings/i2c/tsd,mule-i2c-mux.yaml69
-rw-r--r--Documentation/devicetree/bindings/iio/accel/adi,adxl380.yaml92
-rw-r--r--Documentation/devicetree/bindings/iio/accel/kionix,kxcjk1013.yaml1
-rw-r--r--Documentation/devicetree/bindings/iio/adc/adi,ad4695.yaml254
-rw-r--r--Documentation/devicetree/bindings/iio/adc/adi,ad7192.yaml33
-rw-r--r--Documentation/devicetree/bindings/iio/adc/adi,ad7380.yaml13
-rw-r--r--Documentation/devicetree/bindings/iio/adc/adi,ad7606.yaml123
-rw-r--r--Documentation/devicetree/bindings/iio/adc/adi,ad9467.yaml3
-rw-r--r--Documentation/devicetree/bindings/iio/adc/microchip,pac1921.yaml71
-rw-r--r--Documentation/devicetree/bindings/iio/adc/rockchip-saradc.yaml3
-rw-r--r--Documentation/devicetree/bindings/iio/adc/sigma-delta-modulator.yaml25
-rw-r--r--Documentation/devicetree/bindings/iio/adc/sophgo,cv1800b-saradc.yaml83
-rw-r--r--Documentation/devicetree/bindings/iio/adc/st,stm32-adc.yaml4
-rw-r--r--Documentation/devicetree/bindings/iio/adc/st,stm32-dfsdm-adc.yaml122
-rw-r--r--Documentation/devicetree/bindings/iio/adc/x-powers,axp209-adc.yaml12
-rw-r--r--Documentation/devicetree/bindings/iio/dac/adi,ltc2664.yaml181
-rw-r--r--Documentation/devicetree/bindings/iio/dac/adi,ltc2672.yaml160
-rw-r--r--Documentation/devicetree/bindings/iio/dac/dac.yaml50
-rw-r--r--Documentation/devicetree/bindings/iio/frequency/adi,adf4377.yaml10
-rw-r--r--Documentation/devicetree/bindings/iio/humidity/sciosense,ens210.yaml55
-rw-r--r--Documentation/devicetree/bindings/iio/light/liteon,ltrf216a.yaml4
-rw-r--r--Documentation/devicetree/bindings/iio/light/rohm,bh1745.yaml53
-rw-r--r--Documentation/devicetree/bindings/iio/light/rohm,bu27034anuc.yaml (renamed from Documentation/devicetree/bindings/iio/light/rohm,bu27034.yaml)11
-rw-r--r--Documentation/devicetree/bindings/iio/light/stk33xx.yaml13
-rw-r--r--Documentation/devicetree/bindings/iio/magnetometer/asahi-kasei,ak8975.yaml5
-rw-r--r--Documentation/devicetree/bindings/iio/magnetometer/bosch,bmc150_magn.yaml3
-rw-r--r--Documentation/devicetree/bindings/iio/pressure/sensirion,sdp500.yaml46
-rw-r--r--Documentation/devicetree/bindings/iio/proximity/awinic,aw96103.yaml61
-rw-r--r--Documentation/devicetree/bindings/iio/proximity/tyhx,hx9023s.yaml93
-rw-r--r--Documentation/devicetree/bindings/input/adi,adp5588.yaml38
-rw-r--r--Documentation/devicetree/bindings/input/cirrus,ep9307-keypad.yaml87
-rw-r--r--Documentation/devicetree/bindings/input/rotary-encoder.txt50
-rw-r--r--Documentation/devicetree/bindings/input/rotary-encoder.yaml90
-rw-r--r--Documentation/devicetree/bindings/input/touchscreen/ad7879.txt71
-rw-r--r--Documentation/devicetree/bindings/input/touchscreen/adi,ad7879.yaml150
-rw-r--r--Documentation/devicetree/bindings/input/touchscreen/ads7846.txt107
-rw-r--r--Documentation/devicetree/bindings/input/touchscreen/azoteq,iqs7211.yaml4
-rw-r--r--Documentation/devicetree/bindings/input/touchscreen/colibri-vf50-ts.txt34
-rw-r--r--Documentation/devicetree/bindings/input/touchscreen/edt-ft5x06.yaml2
-rw-r--r--Documentation/devicetree/bindings/input/touchscreen/goodix.yaml2
-rw-r--r--Documentation/devicetree/bindings/input/touchscreen/ti,ads7843.yaml183
-rw-r--r--Documentation/devicetree/bindings/input/touchscreen/toradex,vf50-touchscreen.yaml77
-rw-r--r--Documentation/devicetree/bindings/input/touchscreen/zinitix,bt400.yaml10
-rw-r--r--Documentation/devicetree/bindings/interconnect/qcom,msm8939.yaml25
-rw-r--r--Documentation/devicetree/bindings/interconnect/qcom,msm8953.yaml3
-rw-r--r--Documentation/devicetree/bindings/interconnect/qcom,msm8998-bwmon.yaml2
-rw-r--r--Documentation/devicetree/bindings/interconnect/qcom,rpmh.yaml5
-rw-r--r--Documentation/devicetree/bindings/leds/common.yaml2
-rw-r--r--Documentation/devicetree/bindings/leds/leds-lm3692x.txt65
-rw-r--r--Documentation/devicetree/bindings/leds/leds-sc27xx-bltc.txt43
-rw-r--r--Documentation/devicetree/bindings/leds/nxp,pca995x.yaml6
-rw-r--r--Documentation/devicetree/bindings/leds/sprd,sc2731-bltc.yaml84
-rw-r--r--Documentation/devicetree/bindings/leds/ti.lm36922.yaml110
-rw-r--r--Documentation/devicetree/bindings/mailbox/mtk,adsp-mbox.yaml12
-rw-r--r--Documentation/devicetree/bindings/mailbox/qcom-ipcc.yaml2
-rw-r--r--Documentation/devicetree/bindings/media/amlogic,gx-vdec.yaml3
-rw-r--r--Documentation/devicetree/bindings/media/i2c/ovti,og01a1b.yaml107
-rw-r--r--Documentation/devicetree/bindings/media/i2c/sony,imx335.yaml4
-rw-r--r--Documentation/devicetree/bindings/media/qcom,sc7280-venus.yaml1
-rw-r--r--Documentation/devicetree/bindings/media/renesas,fcp.yaml2
-rw-r--r--Documentation/devicetree/bindings/media/renesas,vin.yaml4
-rw-r--r--Documentation/devicetree/bindings/media/renesas,vsp1.yaml1
-rw-r--r--Documentation/devicetree/bindings/media/rockchip,rk3568-vepu.yaml1
-rw-r--r--Documentation/devicetree/bindings/media/rockchip-vpu.yaml7
-rw-r--r--Documentation/devicetree/bindings/mfd/adi,adp5585.yaml7
-rw-r--r--Documentation/devicetree/bindings/mfd/qcom,tcsr.yaml1
-rw-r--r--Documentation/devicetree/bindings/mfd/syscon.yaml3
-rw-r--r--Documentation/devicetree/bindings/misc/qcom,fastrpc.yaml3
-rw-r--r--Documentation/devicetree/bindings/mtd/technologic,nand.yaml45
-rw-r--r--Documentation/devicetree/bindings/net/cirrus,ep9301-eth.yaml59
-rw-r--r--Documentation/devicetree/bindings/net/ti,cc1352p7.yaml7
-rw-r--r--Documentation/devicetree/bindings/nvmem/fsl,layerscape-sfp.yaml1
-rw-r--r--Documentation/devicetree/bindings/nvmem/imx-ocotp.yaml3
-rw-r--r--Documentation/devicetree/bindings/nvmem/layouts/nvmem-layout.yaml1
-rw-r--r--Documentation/devicetree/bindings/nvmem/layouts/u-boot,env.yaml (renamed from Documentation/devicetree/bindings/nvmem/u-boot,env.yaml)39
-rw-r--r--Documentation/devicetree/bindings/nvmem/st,stm32-romem.yaml3
-rw-r--r--Documentation/devicetree/bindings/pci/altera-pcie-msi.txt27
-rw-r--r--Documentation/devicetree/bindings/pci/altera-pcie.txt50
-rw-r--r--Documentation/devicetree/bindings/pci/altr,msi-controller.yaml65
-rw-r--r--Documentation/devicetree/bindings/pci/altr,pcie-root-port.yaml114
-rw-r--r--Documentation/devicetree/bindings/pci/brcm,stb-pcie.yaml40
-rw-r--r--Documentation/devicetree/bindings/pci/fsl,imx6q-pcie-ep.yaml13
-rw-r--r--Documentation/devicetree/bindings/pci/fsl,imx6q-pcie.yaml16
-rw-r--r--Documentation/devicetree/bindings/pci/fsl,layerscape-pcie.yaml41
-rw-r--r--Documentation/devicetree/bindings/pci/hisilicon,kirin-pcie.yaml3
-rw-r--r--Documentation/devicetree/bindings/pci/host-generic-pci.yaml2
-rw-r--r--Documentation/devicetree/bindings/pci/mediatek-pcie-gen3.yaml68
-rw-r--r--Documentation/devicetree/bindings/pci/pci-ep.yaml14
-rw-r--r--Documentation/devicetree/bindings/pci/qcom,pcie-common.yaml7
-rw-r--r--Documentation/devicetree/bindings/pci/qcom,pcie-ep.yaml1
-rw-r--r--Documentation/devicetree/bindings/pci/qcom,pcie-sc7280.yaml27
-rw-r--r--Documentation/devicetree/bindings/pci/qcom,pcie-sc8280xp.yaml3
-rw-r--r--Documentation/devicetree/bindings/pci/qcom,pcie-sm8450.yaml10
-rw-r--r--Documentation/devicetree/bindings/pci/qcom,pcie.yaml3
-rw-r--r--Documentation/devicetree/bindings/pci/rcar-gen4-pci-ep.yaml1
-rw-r--r--Documentation/devicetree/bindings/pci/rcar-gen4-pci-host.yaml1
-rw-r--r--Documentation/devicetree/bindings/pci/renesas,pci-rcar-gen2.yaml8
-rw-r--r--Documentation/devicetree/bindings/pci/socionext,uniphier-pcie-ep.yaml8
-rw-r--r--Documentation/devicetree/bindings/pci/ti,j721e-pci-host.yaml10
-rw-r--r--Documentation/devicetree/bindings/pci/xlnx,nwl-pcie.yaml7
-rw-r--r--Documentation/devicetree/bindings/pci/xlnx,xdma-host.yaml36
-rw-r--r--Documentation/devicetree/bindings/phy/fsl,mxs-usbphy.yaml17
-rw-r--r--Documentation/devicetree/bindings/phy/hisilicon,hi3798cv200-combphy.yaml56
-rw-r--r--Documentation/devicetree/bindings/phy/nuvoton,ma35d1-usb2-phy.yaml45
-rw-r--r--Documentation/devicetree/bindings/phy/phy-hi3798cv200-combphy.txt59
-rw-r--r--Documentation/devicetree/bindings/phy/qcom,hdmi-phy-qmp.yaml1
-rw-r--r--Documentation/devicetree/bindings/phy/qcom,sata-phy.yaml55
-rw-r--r--Documentation/devicetree/bindings/phy/qcom,sc8280xp-qmp-pcie-phy.yaml3
-rw-r--r--Documentation/devicetree/bindings/phy/qcom,usb-8x16-phy.txt76
-rw-r--r--Documentation/devicetree/bindings/phy/qcom-apq8064-sata-phy.txt24
-rw-r--r--Documentation/devicetree/bindings/phy/qcom-ipq806x-sata-phy.txt23
-rw-r--r--Documentation/devicetree/bindings/phy/renesas,usb2-phy.yaml4
-rw-r--r--Documentation/devicetree/bindings/phy/rockchip,rk3588-hdptx-phy.yaml3
-rw-r--r--Documentation/devicetree/bindings/phy/socionext,uniphier-ahci-phy.yaml8
-rw-r--r--Documentation/devicetree/bindings/phy/socionext,uniphier-pcie-phy.yaml8
-rw-r--r--Documentation/devicetree/bindings/phy/socionext,uniphier-usb3hs-phy.yaml7
-rw-r--r--Documentation/devicetree/bindings/phy/socionext,uniphier-usb3ss-phy.yaml7
-rw-r--r--Documentation/devicetree/bindings/pinctrl/mobileye,eyeq5-pinctrl.yaml242
-rw-r--r--Documentation/devicetree/bindings/pinctrl/nuvoton,npcm845-pinctrl.yaml70
-rw-r--r--Documentation/devicetree/bindings/pinctrl/pincfg-node.yaml3
-rw-r--r--Documentation/devicetree/bindings/pinctrl/qcom,apq8064-pinctrl.txt95
-rw-r--r--Documentation/devicetree/bindings/pinctrl/qcom,apq8064-pinctrl.yaml110
-rw-r--r--Documentation/devicetree/bindings/pinctrl/qcom,apq8084-pinctrl.txt188
-rw-r--r--Documentation/devicetree/bindings/pinctrl/qcom,apq8084-pinctrl.yaml129
-rw-r--r--Documentation/devicetree/bindings/pinctrl/qcom,ipq4019-pinctrl.txt85
-rw-r--r--Documentation/devicetree/bindings/pinctrl/qcom,ipq4019-pinctrl.yaml103
-rw-r--r--Documentation/devicetree/bindings/pinctrl/qcom,ipq8064-pinctrl.txt101
-rw-r--r--Documentation/devicetree/bindings/pinctrl/qcom,ipq8064-pinctrl.yaml108
-rw-r--r--Documentation/devicetree/bindings/pinctrl/qcom,pmic-gpio.yaml1
-rw-r--r--Documentation/devicetree/bindings/pinctrl/qcom,sdm845-pinctrl.yaml1
-rw-r--r--Documentation/devicetree/bindings/pinctrl/renesas,pfc.yaml1
-rw-r--r--Documentation/devicetree/bindings/pinctrl/rockchip,pinctrl.yaml1
-rw-r--r--Documentation/devicetree/bindings/pinctrl/sophgo,cv1800-pinctrl.yaml122
-rw-r--r--Documentation/devicetree/bindings/power/wakeup-source.txt2
-rw-r--r--Documentation/devicetree/bindings/pwm/cirrus,ep9301-pwm.yaml53
-rw-r--r--Documentation/devicetree/bindings/remoteproc/qcom,sm8550-pas.yaml3
-rw-r--r--Documentation/devicetree/bindings/remoteproc/ti,k3-m4f-rproc.yaml125
-rw-r--r--Documentation/devicetree/bindings/remoteproc/xlnx,zynqmp-r5fss.yaml1
-rw-r--r--Documentation/devicetree/bindings/riscv/extensions.yaml7
-rw-r--r--Documentation/devicetree/bindings/rtc/microcrystal,rv3028.yaml3
-rw-r--r--Documentation/devicetree/bindings/rtc/sprd,sc2731-rtc.yaml49
-rw-r--r--Documentation/devicetree/bindings/rtc/sprd,sc27xx-rtc.txt26
-rw-r--r--Documentation/devicetree/bindings/rtc/st,stm32-rtc.yaml28
-rw-r--r--Documentation/devicetree/bindings/rtc/trivial-rtc.yaml9
-rw-r--r--Documentation/devicetree/bindings/serial/8250_omap.yaml1
-rw-r--r--Documentation/devicetree/bindings/serial/atmel,at91-usart.yaml9
-rw-r--r--Documentation/devicetree/bindings/serial/mediatek,uart.yaml1
-rw-r--r--Documentation/devicetree/bindings/serial/renesas,scif.yaml1
-rw-r--r--Documentation/devicetree/bindings/serial/samsung_uart.yaml70
-rw-r--r--Documentation/devicetree/bindings/soc/cirrus/cirrus,ep9301-syscon.yaml94
-rw-r--r--Documentation/devicetree/bindings/sound/cirrus,ep9301-i2s.yaml16
-rw-r--r--Documentation/devicetree/bindings/spi/cirrus,ep9301-spi.yaml70
-rw-r--r--Documentation/devicetree/bindings/usb/fsl,ls1028a.yaml52
-rw-r--r--Documentation/devicetree/bindings/usb/msm-hsusb.txt110
-rw-r--r--Documentation/devicetree/bindings/usb/qcom,dwc3.yaml20
-rw-r--r--Documentation/devicetree/bindings/usb/ti,j721e-usb.yaml3
-rw-r--r--Documentation/devicetree/bindings/vendor-prefixes.yaml6
-rw-r--r--Documentation/devicetree/bindings/watchdog/cirrus,ep9301-wdt.yaml42
-rw-r--r--Documentation/devicetree/bindings/watchdog/renesas,wdt.yaml17
-rw-r--r--Documentation/devicetree/bindings/watchdog/st,stm32-iwdg.yaml6
-rw-r--r--Documentation/dontdiff1
-rw-r--r--Documentation/driver-api/cxl/access-coordinates.rst91
-rw-r--r--Documentation/driver-api/cxl/index.rst1
-rw-r--r--Documentation/driver-api/firewire.rst2
-rw-r--r--Documentation/driver-api/media/mc-core.rst67
-rw-r--r--Documentation/features/vm/PG_uncached/arch-support.txt30
-rw-r--r--Documentation/filesystems/9p.rst58
-rw-r--r--Documentation/filesystems/bcachefs/CodingStyle.rst2
-rw-r--r--Documentation/filesystems/iomap/design.rst4
-rw-r--r--Documentation/filesystems/nfs/index.rst1
-rw-r--r--Documentation/filesystems/nfs/localio.rst357
-rw-r--r--Documentation/filesystems/vfs.rst3
-rw-r--r--Documentation/gpu/amdgpu/driver-core.rst2
-rw-r--r--Documentation/gpu/introduction.rst10
-rw-r--r--Documentation/gpu/todo.rst69
-rw-r--r--Documentation/gpu/xe/xe_mm.rst15
-rw-r--r--Documentation/i2c/slave-testunit-backend.rst120
-rw-r--r--Documentation/iio/ad4000.rst131
-rw-r--r--Documentation/iio/ad4695.rst167
-rw-r--r--Documentation/iio/ad7380.rst130
-rw-r--r--Documentation/iio/adxl380.rst233
-rw-r--r--Documentation/iio/index.rst4
-rw-r--r--Documentation/kbuild/kbuild.rst10
-rw-r--r--Documentation/kbuild/kconfig-language.rst6
-rw-r--r--Documentation/kbuild/makefiles.rst1
-rw-r--r--Documentation/kbuild/modules.rst224
-rw-r--r--Documentation/leds/leds-blinkm.rst29
-rw-r--r--Documentation/leds/well-known-leds.txt8
-rw-r--r--Documentation/mm/damon/design.rst2
-rw-r--r--Documentation/mm/damon/maintainer-profile.rst86
-rw-r--r--Documentation/mm/page_migration.rst22
-rw-r--r--Documentation/mm/transhuge.rst6
-rw-r--r--Documentation/mm/unevictable-lru.rst8
-rw-r--r--Documentation/networking/tproxy.rst2
-rw-r--r--Documentation/process/changes.rst7
-rw-r--r--Documentation/rust/general-information.rst27
-rw-r--r--Documentation/rust/index.rst18
-rw-r--r--Documentation/rust/quick-start.rst4
-rw-r--r--Documentation/scheduler/index.rst1
-rw-r--r--Documentation/scheduler/sched-deadline.rst14
-rw-r--r--Documentation/scheduler/sched-ext.rst326
-rw-r--r--Documentation/staging/xz.rst157
-rw-r--r--Documentation/trace/debugging.rst159
-rw-r--r--Documentation/trace/ftrace.rst12
-rw-r--r--Documentation/translations/zh_CN/admin-guide/mm/damon/start.rst4
-rw-r--r--Documentation/translations/zh_CN/admin-guide/mm/damon/usage.rst8
-rw-r--r--Documentation/translations/zh_CN/arch/loongarch/irq-chip-model.rst32
-rw-r--r--Documentation/translations/zh_CN/mm/page_migration.rst6
-rw-r--r--Documentation/translations/zh_TW/admin-guide/mm/damon/start.rst4
-rw-r--r--Documentation/translations/zh_TW/admin-guide/mm/damon/usage.rst8
-rw-r--r--Documentation/usb/functionfs-desc.rst39
-rw-r--r--Documentation/usb/functionfs.rst2
-rw-r--r--Documentation/usb/gadget-testing.rst19
-rw-r--r--Documentation/usb/index.rst1
-rw-r--r--Documentation/userspace-api/landlock.rst58
-rw-r--r--Documentation/userspace-api/media/cec/cec-ioc-adap-g-caps.rst6
-rw-r--r--Documentation/userspace-api/media/cec/cec-ioc-receive.rst15
-rw-r--r--Documentation/userspace-api/media/v4l/biblio.rst11
-rw-r--r--Documentation/userspace-api/media/v4l/buffer.rst35
-rw-r--r--Documentation/userspace-api/media/v4l/capture.c.rst6
-rw-r--r--Documentation/userspace-api/media/v4l/ext-ctrls-codec-stateless.rst20
-rw-r--r--Documentation/userspace-api/media/v4l/ext-ctrls-image-process.rst2
-rw-r--r--Documentation/userspace-api/media/v4l/metafmt-rkisp1.rst57
-rw-r--r--Documentation/userspace-api/media/v4l/mt2110t.svg315
-rw-r--r--Documentation/userspace-api/media/v4l/pixfmt-reserved.rst13
-rw-r--r--Documentation/userspace-api/media/v4l/pixfmt-yuv-planar.rst181
-rw-r--r--Documentation/userspace-api/media/v4l/vidioc-querycap.rst11
-rw-r--r--Documentation/userspace-api/media/v4l/vidioc-reqbufs.rst40
-rw-r--r--Documentation/userspace-api/media/videodev2.h.rst.exceptions1
-rw-r--r--Documentation/virt/kvm/api.rst31
-rw-r--r--Documentation/virt/kvm/locking.rst32
-rw-r--r--Documentation/virt/uml/user_mode_linux_howto_v2.rst37
-rw-r--r--Documentation/watchdog/convert_drivers_to_kernel_api.rst1
-rw-r--r--LICENSES/deprecated/0BSD23
-rw-r--r--MAINTAINERS198
-rw-r--r--Makefile30
-rw-r--r--arch/Kconfig25
-rw-r--r--arch/alpha/Kconfig2
-rw-r--r--arch/alpha/include/asm/cmpxchg.h239
-rw-r--r--arch/alpha/include/asm/xchg.h246
-rw-r--r--arch/alpha/kernel/osf_sys.c6
-rw-r--r--arch/arc/Kconfig2
-rw-r--r--arch/arc/configs/axs101_defconfig1
-rw-r--r--arch/arc/configs/axs103_defconfig1
-rw-r--r--arch/arc/configs/axs103_smp_defconfig1
-rw-r--r--arch/arc/configs/tb10x_defconfig1
-rw-r--r--arch/arc/mm/mmap.c3
-rw-r--r--arch/arm/Kconfig2
-rw-r--r--arch/arm/Makefile1
-rw-r--r--arch/arm/boot/dts/cirrus/Makefile4
-rw-r--r--arch/arm/boot/dts/cirrus/ep93xx-bk3.dts125
-rw-r--r--arch/arm/boot/dts/cirrus/ep93xx-edb9302.dts181
-rw-r--r--arch/arm/boot/dts/cirrus/ep93xx-ts7250.dts145
-rw-r--r--arch/arm/boot/dts/cirrus/ep93xx.dtsi444
-rw-r--r--arch/arm/configs/hisi_defconfig1
-rw-r--r--arch/arm/configs/multi_v7_defconfig1
-rw-r--r--arch/arm/configs/pxa_defconfig1
-rw-r--r--arch/arm/configs/socfpga_defconfig1
-rw-r--r--arch/arm/configs/spear13xx_defconfig1
-rw-r--r--arch/arm/configs/spear3xx_defconfig1
-rw-r--r--arch/arm/configs/spear6xx_defconfig1
-rw-r--r--arch/arm/kernel/sys_oabi-compat.c10
-rw-r--r--arch/arm/lib/xor-neon.c1
-rw-r--r--arch/arm/mach-ep93xx/Kconfig20
-rw-r--r--arch/arm/mach-ep93xx/Makefile11
-rw-r--r--arch/arm/mach-ep93xx/clock.c733
-rw-r--r--arch/arm/mach-ep93xx/core.c1018
-rw-r--r--arch/arm/mach-ep93xx/dma.c114
-rw-r--r--arch/arm/mach-ep93xx/edb93xx.c368
-rw-r--r--arch/arm/mach-ep93xx/ep93xx-regs.h38
-rw-r--r--arch/arm/mach-ep93xx/gpio-ep93xx.h111
-rw-r--r--arch/arm/mach-ep93xx/hardware.h25
-rw-r--r--arch/arm/mach-ep93xx/irqs.h76
-rw-r--r--arch/arm/mach-ep93xx/platform.h42
-rw-r--r--arch/arm/mach-ep93xx/soc.h212
-rw-r--r--arch/arm/mach-ep93xx/timer-ep93xx.c143
-rw-r--r--arch/arm/mach-ep93xx/ts72xx.c422
-rw-r--r--arch/arm/mach-ep93xx/ts72xx.h94
-rw-r--r--arch/arm/mach-ep93xx/vision_ep9307.c319
-rw-r--r--arch/arm/mach-lpc32xx/Kconfig1
-rw-r--r--arch/arm/mach-omap2/omap-mpuss-lowpower.c2
-rw-r--r--arch/arm/mach-pxa/spitz.c163
-rw-r--r--arch/arm/mm/fault-armv.c6
-rw-r--r--arch/arm/mm/mmap.c7
-rw-r--r--arch/arm64/Kconfig20
-rw-r--r--arch/arm64/Makefile3
-rw-r--r--arch/arm64/boot/Makefile5
-rw-r--r--arch/arm64/boot/dts/mediatek/mt7981b.dtsi33
-rw-r--r--arch/arm64/boot/dts/ti/k3-am625-beagleplay.dts3
-rw-r--r--arch/arm64/boot/dts/xilinx/zynqmp-zcu102-revA.dts1
-rw-r--r--arch/arm64/configs/defconfig1
-rw-r--r--arch/arm64/include/asm/Kbuild1
-rw-r--r--arch/arm64/include/asm/mmzone.h13
-rw-r--r--arch/arm64/include/asm/pgtable.h30
-rw-r--r--arch/arm64/include/asm/topology.h1
-rw-r--r--arch/arm64/kernel/vmlinux.lds.S3
-rw-r--r--arch/arm64/kvm/arm.c6
-rw-r--r--arch/arm64/kvm/nested.c1
-rw-r--r--arch/arm64/mm/init.c39
-rw-r--r--arch/arm64/net/bpf_jit_comp.c508
-rw-r--r--arch/csky/abiv1/mmap.c3
-rw-r--r--arch/csky/kernel/vdso.c28
-rw-r--r--arch/hexagon/kernel/vdso.c14
-rw-r--r--arch/loongarch/Kconfig7
-rw-r--r--arch/loongarch/configs/loongson3_defconfig1
-rw-r--r--arch/loongarch/include/asm/Kbuild1
-rw-r--r--arch/loongarch/include/asm/atomic.h2
-rw-r--r--arch/loongarch/include/asm/cpu-features.h2
-rw-r--r--arch/loongarch/include/asm/cpu.h30
-rw-r--r--arch/loongarch/include/asm/loongarch.h1
-rw-r--r--arch/loongarch/include/asm/mmu_context.h35
-rw-r--r--arch/loongarch/include/asm/mmzone.h16
-rw-r--r--arch/loongarch/include/asm/percpu.h124
-rw-r--r--arch/loongarch/include/asm/pgtable.h32
-rw-r--r--arch/loongarch/include/asm/set_memory.h21
-rw-r--r--arch/loongarch/include/asm/topology.h1
-rw-r--r--arch/loongarch/include/uapi/asm/hwcap.h1
-rw-r--r--arch/loongarch/include/uapi/asm/sigcontext.h1
-rw-r--r--arch/loongarch/kernel/acpi.c4
-rw-r--r--arch/loongarch/kernel/cpu-probe.c120
-rw-r--r--arch/loongarch/kernel/numa.c21
-rw-r--r--arch/loongarch/kernel/proc.c10
-rw-r--r--arch/loongarch/kernel/syscall.c4
-rw-r--r--arch/loongarch/kvm/main.c4
-rw-r--r--arch/loongarch/mm/Makefile3
-rw-r--r--arch/loongarch/mm/fault.c41
-rw-r--r--arch/loongarch/mm/mmap.c5
-rw-r--r--arch/loongarch/mm/pageattr.c218
-rw-r--r--arch/loongarch/pci/acpi.c1
-rw-r--r--arch/loongarch/vdso/vgetrandom-chacha.S92
-rw-r--r--arch/m68k/kernel/setup_no.c2
-rw-r--r--arch/mips/Kconfig7
-rw-r--r--arch/mips/configs/generic/board-ocelot.config1
-rw-r--r--arch/mips/include/asm/kvm_host.h4
-rw-r--r--arch/mips/include/asm/mach-ip27/mmzone.h1
-rw-r--r--arch/mips/include/asm/mach-loongson64/mmzone.h4
-rw-r--r--arch/mips/kvm/mips.c8
-rw-r--r--arch/mips/kvm/vz.c8
-rw-r--r--arch/mips/loongson64/numa.c28
-rw-r--r--arch/mips/mm/mmap.c5
-rw-r--r--arch/mips/sgi-ip27/ip27-memory.c12
-rw-r--r--arch/mips/sgi-ip27/ip27-smp.c2
-rw-r--r--arch/nios2/mm/init.c12
-rw-r--r--arch/parisc/Kconfig2
-rw-r--r--arch/parisc/kernel/perf.c1
-rw-r--r--arch/parisc/kernel/sys_parisc.c5
-rw-r--r--arch/parisc/mm/hugetlbpage.c2
-rw-r--r--arch/powerpc/Kconfig2
-rw-r--r--arch/powerpc/boot/xz_config.h3
-rw-r--r--arch/powerpc/configs/ppc64_defconfig1
-rw-r--r--arch/powerpc/crypto/Kconfig1
-rw-r--r--arch/powerpc/include/asm/book3s/64/pgtable.h3
-rw-r--r--arch/powerpc/include/asm/mmu_context.h9
-rw-r--r--arch/powerpc/include/asm/mmzone.h6
-rw-r--r--arch/powerpc/include/asm/pgtable.h1
-rw-r--r--arch/powerpc/kernel/eeh.c198
-rw-r--r--arch/powerpc/kernel/nvram_64.c8
-rw-r--r--arch/powerpc/kernel/vdso.c35
-rw-r--r--arch/powerpc/kvm/book3s_64_vio.c4
-rw-r--r--arch/powerpc/kvm/powerpc.c12
-rw-r--r--arch/powerpc/lib/crtsavres.S2
-rw-r--r--arch/powerpc/mm/book3s64/pgtable.c20
-rw-r--r--arch/powerpc/mm/book3s64/slice.c10
-rw-r--r--arch/powerpc/mm/mem.c5
-rw-r--r--arch/powerpc/mm/numa.c26
-rw-r--r--arch/powerpc/mm/pgtable-frag.c6
-rw-r--r--arch/powerpc/mm/pgtable.c6
-rw-r--r--arch/powerpc/platforms/cell/spu_syscalls.c8
-rw-r--r--arch/powerpc/platforms/powernv/opal-kmsg.c4
-rw-r--r--arch/powerpc/platforms/pseries/papr-vpd.c5
-rw-r--r--arch/riscv/Kconfig19
-rw-r--r--arch/riscv/Makefile6
-rw-r--r--arch/riscv/boot/Makefile3
-rw-r--r--arch/riscv/configs/defconfig13
-rw-r--r--arch/riscv/configs/nommu_k210_defconfig1
-rw-r--r--arch/riscv/configs/nommu_k210_sdcard_defconfig1
-rw-r--r--arch/riscv/errata/sifive/errata_cip_453.S8
-rw-r--r--arch/riscv/include/asm/Kbuild1
-rw-r--r--arch/riscv/include/asm/acpi.h2
-rw-r--r--arch/riscv/include/asm/bitops.h43
-rw-r--r--arch/riscv/include/asm/cacheflush.h18
-rw-r--r--arch/riscv/include/asm/exec.h8
-rw-r--r--arch/riscv/include/asm/fence.h1
-rw-r--r--arch/riscv/include/asm/hwcap.h1
-rw-r--r--arch/riscv/include/asm/irq.h5
-rw-r--r--arch/riscv/include/asm/mmzone.h13
-rw-r--r--arch/riscv/include/asm/page.h29
-rw-r--r--arch/riscv/include/asm/pgtable.h28
-rw-r--r--arch/riscv/include/asm/sbi.h1
-rw-r--r--arch/riscv/include/asm/set_memory.h2
-rw-r--r--arch/riscv/include/asm/sparsemem.h2
-rw-r--r--arch/riscv/include/asm/string.h2
-rw-r--r--arch/riscv/include/asm/thread_info.h11
-rw-r--r--arch/riscv/include/asm/topology.h4
-rw-r--r--arch/riscv/include/asm/vmalloc.h1
-rw-r--r--arch/riscv/include/asm/xip_fixup.h30
-rw-r--r--arch/riscv/kernel/acpi_numa.c2
-rw-r--r--arch/riscv/kernel/asm-offsets.c7
-rw-r--r--arch/riscv/kernel/cacheinfo.c5
-rw-r--r--arch/riscv/kernel/cpufeature.c1
-rw-r--r--arch/riscv/kernel/elf_kexec.c6
-rw-r--r--arch/riscv/kernel/entry.S91
-rw-r--r--arch/riscv/kernel/module.c4
-rw-r--r--arch/riscv/kernel/perf_callchain.c46
-rw-r--r--arch/riscv/kernel/pi/Makefile4
-rw-r--r--arch/riscv/kernel/pi/archrandom_early.c30
-rw-r--r--arch/riscv/kernel/pi/cmdline_early.c10
-rw-r--r--arch/riscv/kernel/pi/fdt_early.c167
-rw-r--r--arch/riscv/kernel/pi/pi.h20
-rw-r--r--arch/riscv/kernel/process.c9
-rw-r--r--arch/riscv/kernel/riscv_ksyms.c3
-rw-r--r--arch/riscv/kernel/smp.c43
-rw-r--r--arch/riscv/kernel/stacktrace.c43
-rw-r--r--arch/riscv/kernel/vdso/Makefile2
-rw-r--r--arch/riscv/kernel/vendor_extensions/andes.c2
-rw-r--r--arch/riscv/kernel/vmcore_info.c7
-rw-r--r--arch/riscv/kernel/vmlinux-xip.lds.S5
-rw-r--r--arch/riscv/kvm/main.c4
-rw-r--r--arch/riscv/lib/Makefile2
-rw-r--r--arch/riscv/lib/memset.S2
-rw-r--r--arch/riscv/lib/strcmp.S2
-rw-r--r--arch/riscv/lib/strlen.S1
-rw-r--r--arch/riscv/lib/strncmp.S2
-rw-r--r--arch/riscv/mm/init.c28
-rw-r--r--arch/riscv/mm/pgtable.c13
-rw-r--r--arch/riscv/purgatory/Makefile2
-rw-r--r--arch/s390/Kconfig38
-rw-r--r--arch/s390/Makefile.postlink38
-rw-r--r--arch/s390/boot/Makefile36
-rw-r--r--arch/s390/boot/als.c49
-rw-r--r--arch/s390/boot/boot.h2
-rw-r--r--arch/s390/boot/head.S4
-rw-r--r--arch/s390/boot/ipl_parm.c2
-rw-r--r--arch/s390/boot/kaslr.c2
-rw-r--r--arch/s390/boot/pgm_check_info.c160
-rw-r--r--arch/s390/boot/physmem_info.c26
-rw-r--r--arch/s390/boot/printk.c124
-rw-r--r--arch/s390/boot/startup.c7
-rw-r--r--arch/s390/configs/debug_defconfig5
-rw-r--r--arch/s390/configs/defconfig4
-rw-r--r--arch/s390/crypto/Kconfig10
-rw-r--r--arch/s390/crypto/Makefile1
-rw-r--r--arch/s390/crypto/aes_s390.c120
-rw-r--r--arch/s390/crypto/hmac_s390.c359
-rw-r--r--arch/s390/crypto/paes_s390.c9
-rw-r--r--arch/s390/crypto/sha.h1
-rw-r--r--arch/s390/crypto/sha3_256_s390.c11
-rw-r--r--arch/s390/crypto/sha3_512_s390.c11
-rw-r--r--arch/s390/crypto/sha_common.c20
-rw-r--r--arch/s390/hypfs/hypfs.h1
-rw-r--r--arch/s390/hypfs/hypfs_dbfs.c1
-rw-r--r--arch/s390/hypfs/hypfs_diag.c7
-rw-r--r--arch/s390/hypfs/inode.c1
-rw-r--r--arch/s390/include/asm/Kbuild1
-rw-r--r--arch/s390/include/asm/arch_hweight.h15
-rw-r--r--arch/s390/include/asm/atomic_ops.h7
-rw-r--r--arch/s390/include/asm/barrier.h4
-rw-r--r--arch/s390/include/asm/cpacf.h207
-rw-r--r--arch/s390/include/asm/ctlreg.h5
-rw-r--r--arch/s390/include/asm/diag.h9
-rw-r--r--arch/s390/include/asm/ftrace.h17
-rw-r--r--arch/s390/include/asm/hiperdispatch.h14
-rw-r--r--arch/s390/include/asm/irq.h2
-rw-r--r--arch/s390/include/asm/lowcore.h4
-rw-r--r--arch/s390/include/asm/march.h38
-rw-r--r--arch/s390/include/asm/mmzone.h17
-rw-r--r--arch/s390/include/asm/page.h2
-rw-r--r--arch/s390/include/asm/pci.h9
-rw-r--r--arch/s390/include/asm/percpu.h7
-rw-r--r--arch/s390/include/asm/perf_event.h24
-rw-r--r--arch/s390/include/asm/pgtable.h1
-rw-r--r--arch/s390/include/asm/pkey.h4
-rw-r--r--arch/s390/include/asm/preempt.h7
-rw-r--r--arch/s390/include/asm/processor.h1
-rw-r--r--arch/s390/include/asm/sclp.h1
-rw-r--r--arch/s390/include/asm/setup.h4
-rw-r--r--arch/s390/include/asm/smp.h4
-rw-r--r--arch/s390/include/asm/topology.h3
-rw-r--r--arch/s390/include/asm/trace/hiperdispatch.h58
-rw-r--r--arch/s390/include/uapi/asm/pkey.h5
-rw-r--r--arch/s390/kernel/Makefile7
-rw-r--r--arch/s390/kernel/asm-offsets.c3
-rw-r--r--arch/s390/kernel/cpacf.c119
-rw-r--r--arch/s390/kernel/debug.c1
-rw-r--r--arch/s390/kernel/diag.c17
-rw-r--r--arch/s390/kernel/dis.c20
-rw-r--r--arch/s390/kernel/early.c38
-rw-r--r--arch/s390/kernel/early_printk.c16
-rw-r--r--arch/s390/kernel/earlypgm.S23
-rw-r--r--arch/s390/kernel/entry.S36
-rw-r--r--arch/s390/kernel/ftrace.c106
-rw-r--r--arch/s390/kernel/ftrace.h2
-rw-r--r--arch/s390/kernel/hiperdispatch.c430
-rw-r--r--arch/s390/kernel/irq.c1
-rw-r--r--arch/s390/kernel/kprobes.c15
-rw-r--r--arch/s390/kernel/mcount.S5
-rw-r--r--arch/s390/kernel/numa.c3
-rw-r--r--arch/s390/kernel/perf_cpum_cf.c5
-rw-r--r--arch/s390/kernel/perf_cpum_sf.c309
-rw-r--r--arch/s390/kernel/perf_pai_crypto.c16
-rw-r--r--arch/s390/kernel/perf_pai_ext.c9
-rw-r--r--arch/s390/kernel/smp.c21
-rw-r--r--arch/s390/kernel/stacktrace.c19
-rw-r--r--arch/s390/kernel/sysinfo.c1
-rw-r--r--arch/s390/kernel/topology.c76
-rw-r--r--arch/s390/kernel/uv.c23
-rw-r--r--arch/s390/kernel/vdso64/vdso_user_wrapper.S14
-rw-r--r--arch/s390/kernel/vdso64/vgetrandom-chacha.S76
-rw-r--r--arch/s390/kernel/vmlinux.lds.S3
-rw-r--r--arch/s390/kernel/wti.c215
-rw-r--r--arch/s390/kvm/kvm-s390.c27
-rw-r--r--arch/s390/mm/cmm.c18
-rw-r--r--arch/s390/mm/dump_pagetables.c191
-rw-r--r--arch/s390/mm/fault.c16
-rw-r--r--arch/s390/mm/init.c4
-rw-r--r--arch/s390/mm/mmap.c4
-rw-r--r--arch/s390/pci/Makefile3
-rw-r--r--arch/s390/pci/pci.c1
-rw-r--r--arch/s390/pci/pci_clp.c1
-rw-r--r--arch/s390/pci/pci_mmio.c22
-rw-r--r--arch/s390/pci/pci_sysfs.c14
-rw-r--r--arch/s390/tools/opcodes.txt52
-rw-r--r--arch/sh/include/asm/irq.h6
-rw-r--r--arch/sh/include/asm/mmzone.h3
-rw-r--r--arch/sh/kernel/vsyscall/vsyscall.c14
-rw-r--r--arch/sh/mm/init.c7
-rw-r--r--arch/sh/mm/mmap.c5
-rw-r--r--arch/sh/mm/numa.c3
-rw-r--r--arch/sparc/Kconfig2
-rw-r--r--arch/sparc/include/asm/mmzone.h4
-rw-r--r--arch/sparc/include/asm/pgtable_64.h1
-rw-r--r--arch/sparc/kernel/sys_sparc_32.c2
-rw-r--r--arch/sparc/kernel/sys_sparc_64.c4
-rw-r--r--arch/sparc/mm/init_64.c11
-rw-r--r--arch/sparc/mm/leon_mm.c8
-rw-r--r--arch/um/Kconfig1
-rw-r--r--arch/um/drivers/harddog_kern.c1
-rw-r--r--arch/um/drivers/hostaudio_kern.c2
-rw-r--r--arch/um/drivers/vector_kern.c212
-rw-r--r--arch/um/drivers/vector_kern.h4
-rw-r--r--arch/um/drivers/vector_user.c83
-rw-r--r--arch/um/include/asm/pgtable.h7
-rw-r--r--arch/um/include/asm/processor-generic.h20
-rw-r--r--arch/um/include/asm/sysrq.h8
-rw-r--r--arch/um/include/shared/skas/mm_id.h5
-rw-r--r--arch/um/include/shared/skas/skas.h2
-rw-r--r--arch/um/kernel/exec.c3
-rw-r--r--arch/um/kernel/kmsg_dump.c2
-rw-r--r--arch/um/kernel/process.c8
-rw-r--r--arch/um/kernel/reboot.c2
-rw-r--r--arch/um/kernel/skas/mmu.c12
-rw-r--r--arch/um/kernel/skas/process.c4
-rw-r--r--arch/um/kernel/skas/syscall.c34
-rw-r--r--arch/um/kernel/sysrq.c1
-rw-r--r--arch/um/kernel/time.c2
-rw-r--r--arch/um/kernel/tlb.c16
-rw-r--r--arch/um/os-Linux/file.c8
-rw-r--r--arch/um/os-Linux/skas/mem.c2
-rw-r--r--arch/um/os-Linux/skas/process.c2
-rw-r--r--arch/x86/Kconfig18
-rw-r--r--arch/x86/Makefile11
-rw-r--r--arch/x86/boot/compressed/misc.c2
-rw-r--r--arch/x86/boot/compressed/misc.h2
-rw-r--r--arch/x86/coco/tdx/tdx.c6
-rw-r--r--arch/x86/configs/tiny.config4
-rw-r--r--arch/x86/include/asm/Kbuild1
-rw-r--r--arch/x86/include/asm/atomic64_32.h6
-rw-r--r--arch/x86/include/asm/cpuid.h1
-rw-r--r--arch/x86/include/asm/intel-family.h5
-rw-r--r--arch/x86/include/asm/kvm-x86-ops.h6
-rw-r--r--arch/x86/include/asm/kvm_host.h32
-rw-r--r--arch/x86/include/asm/mmu_context.h5
-rw-r--r--arch/x86/include/asm/mmzone.h6
-rw-r--r--arch/x86/include/asm/mmzone_32.h17
-rw-r--r--arch/x86/include/asm/mmzone_64.h18
-rw-r--r--arch/x86/include/asm/msr-index.h34
-rw-r--r--arch/x86/include/asm/numa.h26
-rw-r--r--arch/x86/include/asm/pgtable.h150
-rw-r--r--arch/x86/include/asm/pgtable_64.h24
-rw-r--r--arch/x86/include/asm/reboot.h4
-rw-r--r--arch/x86/include/asm/sparsemem.h9
-rw-r--r--arch/x86/include/asm/svm.h20
-rw-r--r--arch/x86/include/asm/uaccess_64.h11
-rw-r--r--arch/x86/include/asm/vmx.h40
-rw-r--r--arch/x86/include/uapi/asm/kvm.h1
-rw-r--r--arch/x86/kernel/cpu/mce/dev-mcelog.c1
-rw-r--r--arch/x86/kernel/cpu/mtrr/mtrr.c6
-rw-r--r--arch/x86/kernel/cpu/resctrl/pseudo_lock.c1
-rw-r--r--arch/x86/kernel/cpu/sgx/main.c4
-rw-r--r--arch/x86/kernel/head_64.S20
-rw-r--r--arch/x86/kernel/sys_x86_64.c21
-rw-r--r--arch/x86/kernel/vmlinux.lds.S3
-rw-r--r--arch/x86/kvm/cpuid.c30
-rw-r--r--arch/x86/kvm/irq.c10
-rw-r--r--arch/x86/kvm/lapic.c84
-rw-r--r--arch/x86/kvm/lapic.h3
-rw-r--r--arch/x86/kvm/mmu.h2
-rw-r--r--arch/x86/kvm/mmu/mmu.c556
-rw-r--r--arch/x86/kvm/mmu/mmu_internal.h5
-rw-r--r--arch/x86/kvm/mmu/mmutrace.h1
-rw-r--r--arch/x86/kvm/mmu/paging_tmpl.h63
-rw-r--r--arch/x86/kvm/mmu/tdp_mmu.c6
-rw-r--r--arch/x86/kvm/reverse_cpuid.h8
-rw-r--r--arch/x86/kvm/smm.c24
-rw-r--r--arch/x86/kvm/svm/nested.c4
-rw-r--r--arch/x86/kvm/svm/sev.c16
-rw-r--r--arch/x86/kvm/svm/svm.c87
-rw-r--r--arch/x86/kvm/svm/svm.h18
-rw-r--r--arch/x86/kvm/svm/vmenter.S8
-rw-r--r--arch/x86/kvm/vmx/capabilities.h10
-rw-r--r--arch/x86/kvm/vmx/main.c10
-rw-r--r--arch/x86/kvm/vmx/nested.c134
-rw-r--r--arch/x86/kvm/vmx/nested.h8
-rw-r--r--arch/x86/kvm/vmx/sgx.c2
-rw-r--r--arch/x86/kvm/vmx/vmx.c67
-rw-r--r--arch/x86/kvm/vmx/vmx.h9
-rw-r--r--arch/x86/kvm/vmx/vmx_onhyperv.h8
-rw-r--r--arch/x86/kvm/vmx/vmx_ops.h2
-rw-r--r--arch/x86/kvm/vmx/x86_ops.h7
-rw-r--r--arch/x86/kvm/x86.c1002
-rw-r--r--arch/x86/kvm/x86.h31
-rw-r--r--arch/x86/lib/atomic64_cx8_32.S9
-rw-r--r--arch/x86/mm/Makefile1
-rw-r--r--arch/x86/mm/amdtopology.c1
-rw-r--r--arch/x86/mm/numa.c622
-rw-r--r--arch/x86/mm/numa_internal.h24
-rw-r--r--arch/x86/mm/pat/memtype.c61
-rw-r--r--arch/x86/mm/pgtable.c18
-rw-r--r--arch/x86/mm/testmmiotrace.c1
-rw-r--r--arch/x86/net/bpf_jit_comp.c161
-rw-r--r--arch/x86/pci/fixup.c4
-rw-r--r--arch/x86/platform/pvh/head.S161
-rw-r--r--arch/x86/um/sysrq_32.c1
-rw-r--r--arch/x86/um/sysrq_64.c1
-rw-r--r--arch/x86/um/vdso/vma.c12
-rw-r--r--arch/x86/xen/enlighten_pvh.c23
-rw-r--r--arch/x86/xen/mmu_pv.c7
-rw-r--r--arch/xtensa/kernel/syscall.c3
-rw-r--r--block/bdev.c4
-rw-r--r--block/bio-integrity.c1
-rw-r--r--block/blk-core.c1
-rw-r--r--block/blk-integrity.c36
-rw-r--r--block/blk-merge.c4
-rw-r--r--block/blk-mq.c5
-rw-r--r--block/blk-settings.c42
-rw-r--r--block/elevator.c4
-rw-r--r--block/fops.c2
-rw-r--r--certs/Makefile2
-rw-r--r--certs/extract-cert.c138
-rw-r--r--crypto/asymmetric_keys/asymmetric_type.c7
-rw-r--r--drivers/Makefile4
-rw-r--r--drivers/accel/drm_accel.c110
-rw-r--r--drivers/accel/ivpu/ivpu_fw.c4
-rw-r--r--drivers/accel/qaic/qaic_drv.c4
-rw-r--r--drivers/acpi/Kconfig2
-rw-r--r--drivers/acpi/apei/einj-cxl.c2
-rw-r--r--drivers/acpi/apei/erst-dbg.c1
-rw-r--r--drivers/acpi/apei/ghes.c2
-rw-r--r--drivers/acpi/numa/srat.c1
-rw-r--r--drivers/acpi/pci_irq.c2
-rw-r--r--drivers/acpi/pci_mcfg.c12
-rw-r--r--drivers/android/binder.c288
-rw-r--r--drivers/android/binder_internal.h21
-rw-r--r--drivers/android/binderfs.c8
-rw-r--r--drivers/ata/ahci.c2
-rw-r--r--drivers/ata/ahci_brcm.c4
-rw-r--r--drivers/ata/ahci_imx.c404
-rw-r--r--drivers/ata/ata_piix.c1
-rw-r--r--drivers/ata/libahci_platform.c19
-rw-r--r--drivers/ata/libata-core.c886
-rw-r--r--drivers/ata/libata-eh.c68
-rw-r--r--drivers/ata/libata-pmp.c3
-rw-r--r--drivers/ata/libata-sata.c137
-rw-r--r--drivers/ata/libata-scsi.c30
-rw-r--r--drivers/ata/libata-sff.c11
-rw-r--r--drivers/ata/libata-transport.c305
-rw-r--r--drivers/ata/libata-zpodd.c2
-rw-r--r--drivers/ata/libata.h26
-rw-r--r--drivers/ata/pata_cs5520.c6
-rw-r--r--drivers/ata/pata_ep93xx.c109
-rw-r--r--drivers/ata/pata_ftide010.c1
-rw-r--r--drivers/ata/pata_hpt366.c10
-rw-r--r--drivers/ata/pata_hpt37x.c10
-rw-r--r--drivers/ata/pata_icside.c2
-rw-r--r--drivers/ata/pata_it821x.c4
-rw-r--r--drivers/ata/pata_ixp4xx_cf.c1
-rw-r--r--drivers/ata/pata_mpc52xx.c1
-rw-r--r--drivers/ata/pata_octeon_cf.c1
-rw-r--r--drivers/ata/pata_serverworks.c16
-rw-r--r--drivers/ata/sata_gemini.c1
-rw-r--r--drivers/ata/sata_sil.c14
-rw-r--r--drivers/auxdisplay/charlcd.c1
-rw-r--r--drivers/base/Kconfig1
-rw-r--r--drivers/base/arch_numa.c224
-rw-r--r--drivers/base/attribute_container.c48
-rw-r--r--drivers/base/auxiliary.c2
-rw-r--r--drivers/base/base.h2
-rw-r--r--drivers/base/bus.c19
-rw-r--r--drivers/base/cacheinfo.c41
-rw-r--r--drivers/base/class.c14
-rw-r--r--drivers/base/core.c168
-rw-r--r--drivers/base/dd.c3
-rw-r--r--drivers/base/devres.c2
-rw-r--r--drivers/base/driver.c2
-rw-r--r--drivers/base/firmware_loader/main.c30
-rw-r--r--drivers/base/module.c14
-rw-r--r--drivers/base/platform.c2
-rw-r--r--drivers/bcma/driver_pci_host.c10
-rw-r--r--drivers/block/drbd/drbd_main.c6
-rw-r--r--drivers/block/mtip32xx/mtip32xx.c2
-rw-r--r--drivers/block/pktcdvd.c1
-rw-r--r--drivers/block/ublk_drv.c1
-rw-r--r--drivers/block/zram/Kconfig77
-rw-r--r--drivers/block/zram/Makefile8
-rw-r--r--drivers/block/zram/backend_842.c61
-rw-r--r--drivers/block/zram/backend_842.h10
-rw-r--r--drivers/block/zram/backend_deflate.c146
-rw-r--r--drivers/block/zram/backend_deflate.h10
-rw-r--r--drivers/block/zram/backend_lz4.c127
-rw-r--r--drivers/block/zram/backend_lz4.h10
-rw-r--r--drivers/block/zram/backend_lz4hc.c128
-rw-r--r--drivers/block/zram/backend_lz4hc.h10
-rw-r--r--drivers/block/zram/backend_lzo.c59
-rw-r--r--drivers/block/zram/backend_lzo.h10
-rw-r--r--drivers/block/zram/backend_lzorle.c59
-rw-r--r--drivers/block/zram/backend_lzorle.h10
-rw-r--r--drivers/block/zram/backend_zstd.c226
-rw-r--r--drivers/block/zram/backend_zstd.h10
-rw-r--r--drivers/block/zram/zcomp.c194
-rw-r--r--drivers/block/zram/zcomp.h71
-rw-r--r--drivers/block/zram/zram_drv.c143
-rw-r--r--drivers/block/zram/zram_drv.h1
-rw-r--r--drivers/bluetooth/hci_vhci.c1
-rw-r--r--drivers/bus/fsl-mc/fsl-mc-bus.c2
-rw-r--r--drivers/bus/mhi/host/init.c2
-rw-r--r--drivers/bus/mhi/host/internal.h2
-rw-r--r--drivers/bus/mhi/host/pci_generic.c64
-rw-r--r--drivers/bus/moxtet.c2
-rw-r--r--drivers/cdx/controller/mcdi.c4
-rw-r--r--drivers/char/applicom.c1
-rw-r--r--drivers/char/ds1620.c1
-rw-r--r--drivers/char/dtlk.c1
-rw-r--r--drivers/char/hpet.c7
-rw-r--r--drivers/char/ipmi/ipmi_watchdog.c1
-rw-r--r--drivers/char/pc8736x_gpio.c1
-rw-r--r--drivers/char/ppdev.c1
-rw-r--r--drivers/char/scx200_gpio.c1
-rw-r--r--drivers/char/sonypi.c1
-rw-r--r--drivers/char/tpm/tpm-dev.c1
-rw-r--r--drivers/char/tpm/tpm_vtpm_proxy.c1
-rw-r--r--drivers/char/tpm/tpmrm-dev.c1
-rw-r--r--drivers/char/virtio_console.c1
-rw-r--r--drivers/clk/.kunitconfig2
-rw-r--r--drivers/clk/Kconfig19
-rw-r--r--drivers/clk/Makefile12
-rw-r--r--drivers/clk/at91/Makefile1
-rw-r--r--drivers/clk/at91/clk-sam9x60-pll.c42
-rw-r--r--drivers/clk/at91/dt-compat.c5
-rw-r--r--drivers/clk/at91/pmc.h18
-rw-r--r--drivers/clk/at91/sam9x60.c7
-rw-r--r--drivers/clk/at91/sam9x7.c946
-rw-r--r--drivers/clk/at91/sama7g5.c47
-rw-r--r--drivers/clk/axs10x/i2s_pll_clock.c2
-rw-r--r--drivers/clk/bcm/clk-bcm2711-dvp.c2
-rw-r--r--drivers/clk/bcm/clk-bcm53573-ilp.c2
-rw-r--r--drivers/clk/bcm/clk-bcm63xx-gate.c2
-rw-r--r--drivers/clk/bcm/clk-raspberrypi.c2
-rw-r--r--drivers/clk/clk-conf.c43
-rw-r--r--drivers/clk/clk-devres.c28
-rw-r--r--drivers/clk/clk-ep93xx.c850
-rw-r--r--drivers/clk/clk-fixed-factor.c2
-rw-r--r--drivers/clk/clk-fixed-mmio.c2
-rw-r--r--drivers/clk/clk-fixed-rate.c2
-rw-r--r--drivers/clk/clk-fixed-rate_test.c380
-rw-r--r--drivers/clk/clk-fixed-rate_test.h8
-rw-r--r--drivers/clk/clk-lmk04832.c43
-rw-r--r--drivers/clk/clk-palmas.c2
-rw-r--r--drivers/clk/clk-pwm.c2
-rw-r--r--drivers/clk/clk-s2mps11.c2
-rw-r--r--drivers/clk/clk-scmi.c16
-rw-r--r--drivers/clk/clk-scpi.c2
-rw-r--r--drivers/clk/clk.c4
-rw-r--r--drivers/clk/clk_kunit_helpers.c207
-rw-r--r--drivers/clk/clk_parent_data_test.h10
-rw-r--r--drivers/clk/clk_test.c453
-rw-r--r--drivers/clk/davinci/da8xx-cfgchip.c7
-rw-r--r--drivers/clk/hisilicon/clk-hi3519.c2
-rw-r--r--drivers/clk/hisilicon/clk-hi3559a.c9
-rw-r--r--drivers/clk/hisilicon/crg-hi3516cv300.c2
-rw-r--r--drivers/clk/hisilicon/crg-hi3798cv200.c2
-rw-r--r--drivers/clk/imx/Kconfig1
-rw-r--r--drivers/clk/imx/clk-composite-7ulp.c7
-rw-r--r--drivers/clk/imx/clk-composite-8m.c53
-rw-r--r--drivers/clk/imx/clk-composite-93.c15
-rw-r--r--drivers/clk/imx/clk-fracn-gppll.c6
-rw-r--r--drivers/clk/imx/clk-imx6ul.c4
-rw-r--r--drivers/clk/imx/clk-imx7d.c6
-rw-r--r--drivers/clk/imx/clk-imx8-acm.c40
-rw-r--r--drivers/clk/imx/clk-imx8mm.c2
-rw-r--r--drivers/clk/imx/clk-imx8mn.c1
-rw-r--r--drivers/clk/imx/clk-imx8mp-audiomix.c88
-rw-r--r--drivers/clk/imx/clk-imx8mp.c8
-rw-r--r--drivers/clk/imx/clk-imx8qxp.c51
-rw-r--r--drivers/clk/imx/clk-imx95-blk-ctl.c30
-rw-r--r--drivers/clk/imx/clk-imxrt1050.c1
-rw-r--r--drivers/clk/imx/clk.c1
-rw-r--r--drivers/clk/imx/clk.h4
-rw-r--r--drivers/clk/keystone/sci-clk.c2
-rw-r--r--drivers/clk/kunit_clk_fixed_rate_test.dtso19
-rw-r--r--drivers/clk/kunit_clk_parent_data_test.dtso28
-rw-r--r--drivers/clk/mediatek/clk-mt2701-aud.c2
-rw-r--r--drivers/clk/mediatek/clk-mt2701-bdp.c2
-rw-r--r--drivers/clk/mediatek/clk-mt2701-eth.c2
-rw-r--r--drivers/clk/mediatek/clk-mt2701-g3d.c2
-rw-r--r--drivers/clk/mediatek/clk-mt2701-hif.c2
-rw-r--r--drivers/clk/mediatek/clk-mt2701-img.c2
-rw-r--r--drivers/clk/mediatek/clk-mt2701-mm.c2
-rw-r--r--drivers/clk/mediatek/clk-mt2701-vdec.c2
-rw-r--r--drivers/clk/mediatek/clk-mt2712-apmixedsys.c2
-rw-r--r--drivers/clk/mediatek/clk-mt2712-bdp.c2
-rw-r--r--drivers/clk/mediatek/clk-mt2712-img.c2
-rw-r--r--drivers/clk/mediatek/clk-mt2712-jpgdec.c2
-rw-r--r--drivers/clk/mediatek/clk-mt2712-mfg.c2
-rw-r--r--drivers/clk/mediatek/clk-mt2712-mm.c2
-rw-r--r--drivers/clk/mediatek/clk-mt2712-vdec.c2
-rw-r--r--drivers/clk/mediatek/clk-mt2712-venc.c2
-rw-r--r--drivers/clk/mediatek/clk-mt2712.c2
-rw-r--r--drivers/clk/mediatek/clk-mt6765-audio.c2
-rw-r--r--drivers/clk/mediatek/clk-mt6765-cam.c2
-rw-r--r--drivers/clk/mediatek/clk-mt6765-img.c2
-rw-r--r--drivers/clk/mediatek/clk-mt6765-mipi0a.c2
-rw-r--r--drivers/clk/mediatek/clk-mt6765-mm.c2
-rw-r--r--drivers/clk/mediatek/clk-mt6765-vcodec.c2
-rw-r--r--drivers/clk/mediatek/clk-mt6779-aud.c2
-rw-r--r--drivers/clk/mediatek/clk-mt6779-cam.c2
-rw-r--r--drivers/clk/mediatek/clk-mt6779-img.c2
-rw-r--r--drivers/clk/mediatek/clk-mt6779-ipe.c2
-rw-r--r--drivers/clk/mediatek/clk-mt6779-mfg.c2
-rw-r--r--drivers/clk/mediatek/clk-mt6779-mm.c2
-rw-r--r--drivers/clk/mediatek/clk-mt6779-vdec.c2
-rw-r--r--drivers/clk/mediatek/clk-mt6779-venc.c2
-rw-r--r--drivers/clk/mediatek/clk-mt6779.c2
-rw-r--r--drivers/clk/mediatek/clk-mt6795-apmixedsys.c2
-rw-r--r--drivers/clk/mediatek/clk-mt6795-infracfg.c2
-rw-r--r--drivers/clk/mediatek/clk-mt6795-mfg.c2
-rw-r--r--drivers/clk/mediatek/clk-mt6795-mm.c2
-rw-r--r--drivers/clk/mediatek/clk-mt6795-pericfg.c2
-rw-r--r--drivers/clk/mediatek/clk-mt6795-topckgen.c2
-rw-r--r--drivers/clk/mediatek/clk-mt6795-vdecsys.c2
-rw-r--r--drivers/clk/mediatek/clk-mt6795-vencsys.c2
-rw-r--r--drivers/clk/mediatek/clk-mt6797-img.c2
-rw-r--r--drivers/clk/mediatek/clk-mt6797-mm.c2
-rw-r--r--drivers/clk/mediatek/clk-mt6797-vdec.c2
-rw-r--r--drivers/clk/mediatek/clk-mt6797-venc.c2
-rw-r--r--drivers/clk/mediatek/clk-mt7622-apmixedsys.c2
-rw-r--r--drivers/clk/mediatek/clk-mt7622-aud.c2
-rw-r--r--drivers/clk/mediatek/clk-mt7622-eth.c2
-rw-r--r--drivers/clk/mediatek/clk-mt7622-hif.c2
-rw-r--r--drivers/clk/mediatek/clk-mt7622-infracfg.c2
-rw-r--r--drivers/clk/mediatek/clk-mt7622.c2
-rw-r--r--drivers/clk/mediatek/clk-mt7629-hif.c2
-rw-r--r--drivers/clk/mediatek/clk-mt7981-eth.c2
-rw-r--r--drivers/clk/mediatek/clk-mt7981-infracfg.c2
-rw-r--r--drivers/clk/mediatek/clk-mt7981-topckgen.c2
-rw-r--r--drivers/clk/mediatek/clk-mt7986-eth.c2
-rw-r--r--drivers/clk/mediatek/clk-mt7986-infracfg.c2
-rw-r--r--drivers/clk/mediatek/clk-mt7986-topckgen.c2
-rw-r--r--drivers/clk/mediatek/clk-mt7988-eth.c2
-rw-r--r--drivers/clk/mediatek/clk-mt7988-infracfg.c2
-rw-r--r--drivers/clk/mediatek/clk-mt7988-topckgen.c2
-rw-r--r--drivers/clk/mediatek/clk-mt7988-xfipll.c2
-rw-r--r--drivers/clk/mediatek/clk-mt8135-apmixedsys.c2
-rw-r--r--drivers/clk/mediatek/clk-mt8135.c2
-rw-r--r--drivers/clk/mediatek/clk-mt8167-aud.c2
-rw-r--r--drivers/clk/mediatek/clk-mt8167-img.c2
-rw-r--r--drivers/clk/mediatek/clk-mt8167-mfgcfg.c2
-rw-r--r--drivers/clk/mediatek/clk-mt8167-mm.c2
-rw-r--r--drivers/clk/mediatek/clk-mt8167-vdec.c2
-rw-r--r--drivers/clk/mediatek/clk-mt8167.c2
-rw-r--r--drivers/clk/mediatek/clk-mt8173-apmixedsys.c2
-rw-r--r--drivers/clk/mediatek/clk-mt8173-img.c2
-rw-r--r--drivers/clk/mediatek/clk-mt8173-infracfg.c2
-rw-r--r--drivers/clk/mediatek/clk-mt8173-mm.c2
-rw-r--r--drivers/clk/mediatek/clk-mt8173-pericfg.c2
-rw-r--r--drivers/clk/mediatek/clk-mt8173-topckgen.c2
-rw-r--r--drivers/clk/mediatek/clk-mt8173-vdecsys.c2
-rw-r--r--drivers/clk/mediatek/clk-mt8173-vencsys.c2
-rw-r--r--drivers/clk/mediatek/clk-mt8183-audio.c2
-rw-r--r--drivers/clk/mediatek/clk-mt8183-cam.c2
-rw-r--r--drivers/clk/mediatek/clk-mt8183-img.c2
-rw-r--r--drivers/clk/mediatek/clk-mt8183-ipu0.c2
-rw-r--r--drivers/clk/mediatek/clk-mt8183-ipu1.c2
-rw-r--r--drivers/clk/mediatek/clk-mt8183-ipu_adl.c2
-rw-r--r--drivers/clk/mediatek/clk-mt8183-ipu_conn.c2
-rw-r--r--drivers/clk/mediatek/clk-mt8183-mfgcfg.c2
-rw-r--r--drivers/clk/mediatek/clk-mt8183-mm.c2
-rw-r--r--drivers/clk/mediatek/clk-mt8183-vdec.c2
-rw-r--r--drivers/clk/mediatek/clk-mt8183-venc.c2
-rw-r--r--drivers/clk/mediatek/clk-mt8183.c2
-rw-r--r--drivers/clk/mediatek/clk-mt8186-apmixedsys.c2
-rw-r--r--drivers/clk/mediatek/clk-mt8186-cam.c2
-rw-r--r--drivers/clk/mediatek/clk-mt8186-img.c2
-rw-r--r--drivers/clk/mediatek/clk-mt8186-imp_iic_wrap.c2
-rw-r--r--drivers/clk/mediatek/clk-mt8186-infra_ao.c2
-rw-r--r--drivers/clk/mediatek/clk-mt8186-ipe.c2
-rw-r--r--drivers/clk/mediatek/clk-mt8186-mcu.c2
-rw-r--r--drivers/clk/mediatek/clk-mt8186-mdp.c2
-rw-r--r--drivers/clk/mediatek/clk-mt8186-mfg.c2
-rw-r--r--drivers/clk/mediatek/clk-mt8186-mm.c2
-rw-r--r--drivers/clk/mediatek/clk-mt8186-topckgen.c2
-rw-r--r--drivers/clk/mediatek/clk-mt8186-vdec.c2
-rw-r--r--drivers/clk/mediatek/clk-mt8186-venc.c2
-rw-r--r--drivers/clk/mediatek/clk-mt8186-wpe.c2
-rw-r--r--drivers/clk/mediatek/clk-mt8188-adsp_audio26m.c2
-rw-r--r--drivers/clk/mediatek/clk-mt8188-apmixedsys.c2
-rw-r--r--drivers/clk/mediatek/clk-mt8188-cam.c2
-rw-r--r--drivers/clk/mediatek/clk-mt8188-ccu.c2
-rw-r--r--drivers/clk/mediatek/clk-mt8188-img.c2
-rw-r--r--drivers/clk/mediatek/clk-mt8188-imp_iic_wrap.c2
-rw-r--r--drivers/clk/mediatek/clk-mt8188-infra_ao.c2
-rw-r--r--drivers/clk/mediatek/clk-mt8188-ipe.c2
-rw-r--r--drivers/clk/mediatek/clk-mt8188-mfg.c2
-rw-r--r--drivers/clk/mediatek/clk-mt8188-peri_ao.c2
-rw-r--r--drivers/clk/mediatek/clk-mt8188-topckgen.c2
-rw-r--r--drivers/clk/mediatek/clk-mt8188-vdec.c2
-rw-r--r--drivers/clk/mediatek/clk-mt8188-vdo0.c2
-rw-r--r--drivers/clk/mediatek/clk-mt8188-vdo1.c2
-rw-r--r--drivers/clk/mediatek/clk-mt8188-venc.c2
-rw-r--r--drivers/clk/mediatek/clk-mt8188-vpp0.c2
-rw-r--r--drivers/clk/mediatek/clk-mt8188-vpp1.c2
-rw-r--r--drivers/clk/mediatek/clk-mt8188-wpe.c2
-rw-r--r--drivers/clk/mediatek/clk-mt8192-apmixedsys.c2
-rw-r--r--drivers/clk/mediatek/clk-mt8192-aud.c2
-rw-r--r--drivers/clk/mediatek/clk-mt8192-cam.c2
-rw-r--r--drivers/clk/mediatek/clk-mt8192-img.c2
-rw-r--r--drivers/clk/mediatek/clk-mt8192-imp_iic_wrap.c2
-rw-r--r--drivers/clk/mediatek/clk-mt8192-ipe.c2
-rw-r--r--drivers/clk/mediatek/clk-mt8192-mdp.c2
-rw-r--r--drivers/clk/mediatek/clk-mt8192-mfg.c2
-rw-r--r--drivers/clk/mediatek/clk-mt8192-mm.c2
-rw-r--r--drivers/clk/mediatek/clk-mt8192-msdc.c2
-rw-r--r--drivers/clk/mediatek/clk-mt8192-scp_adsp.c2
-rw-r--r--drivers/clk/mediatek/clk-mt8192-vdec.c2
-rw-r--r--drivers/clk/mediatek/clk-mt8192-venc.c2
-rw-r--r--drivers/clk/mediatek/clk-mt8192.c2
-rw-r--r--drivers/clk/mediatek/clk-mt8195-apmixedsys.c2
-rw-r--r--drivers/clk/mediatek/clk-mt8195-apusys_pll.c2
-rw-r--r--drivers/clk/mediatek/clk-mt8195-cam.c2
-rw-r--r--drivers/clk/mediatek/clk-mt8195-ccu.c2
-rw-r--r--drivers/clk/mediatek/clk-mt8195-img.c2
-rw-r--r--drivers/clk/mediatek/clk-mt8195-imp_iic_wrap.c2
-rw-r--r--drivers/clk/mediatek/clk-mt8195-infra_ao.c2
-rw-r--r--drivers/clk/mediatek/clk-mt8195-ipe.c2
-rw-r--r--drivers/clk/mediatek/clk-mt8195-mfg.c2
-rw-r--r--drivers/clk/mediatek/clk-mt8195-peri_ao.c2
-rw-r--r--drivers/clk/mediatek/clk-mt8195-scp_adsp.c2
-rw-r--r--drivers/clk/mediatek/clk-mt8195-topckgen.c2
-rw-r--r--drivers/clk/mediatek/clk-mt8195-vdec.c2
-rw-r--r--drivers/clk/mediatek/clk-mt8195-vdo0.c2
-rw-r--r--drivers/clk/mediatek/clk-mt8195-vdo1.c2
-rw-r--r--drivers/clk/mediatek/clk-mt8195-venc.c2
-rw-r--r--drivers/clk/mediatek/clk-mt8195-vpp0.c2
-rw-r--r--drivers/clk/mediatek/clk-mt8195-vpp1.c2
-rw-r--r--drivers/clk/mediatek/clk-mt8195-wpe.c2
-rw-r--r--drivers/clk/mediatek/clk-mt8365-apu.c2
-rw-r--r--drivers/clk/mediatek/clk-mt8365-cam.c2
-rw-r--r--drivers/clk/mediatek/clk-mt8365-mfg.c2
-rw-r--r--drivers/clk/mediatek/clk-mt8365-mm.c2
-rw-r--r--drivers/clk/mediatek/clk-mt8365-vdec.c2
-rw-r--r--drivers/clk/mediatek/clk-mt8365-venc.c2
-rw-r--r--drivers/clk/mediatek/clk-mt8365.c2
-rw-r--r--drivers/clk/mediatek/clk-mt8516-aud.c2
-rw-r--r--drivers/clk/mediatek/clk-mt8516.c2
-rw-r--r--drivers/clk/mediatek/reset.c61
-rw-r--r--drivers/clk/mediatek/reset.h10
-rw-r--r--drivers/clk/meson/a1-peripherals.c3
-rw-r--r--drivers/clk/meson/a1-pll.c3
-rw-r--r--drivers/clk/meson/axg-aoclk.c1
-rw-r--r--drivers/clk/meson/axg-audio.c39
-rw-r--r--drivers/clk/meson/axg-audio.h2
-rw-r--r--drivers/clk/meson/axg.c1
-rw-r--r--drivers/clk/meson/c3-peripherals.c3
-rw-r--r--drivers/clk/meson/c3-pll.c3
-rw-r--r--drivers/clk/meson/clk-cpu-dyndiv.c3
-rw-r--r--drivers/clk/meson/clk-dualdiv.c5
-rw-r--r--drivers/clk/meson/clk-mpll.c5
-rw-r--r--drivers/clk/meson/clk-phase.c8
-rw-r--r--drivers/clk/meson/clk-pll.c7
-rw-r--r--drivers/clk/meson/clk-regmap.c13
-rw-r--r--drivers/clk/meson/g12a-aoclk.c1
-rw-r--r--drivers/clk/meson/g12a.c1
-rw-r--r--drivers/clk/meson/gxbb-aoclk.c1
-rw-r--r--drivers/clk/meson/gxbb.c1
-rw-r--r--drivers/clk/meson/meson-aoclk.c3
-rw-r--r--drivers/clk/meson/meson-clkc-utils.c3
-rw-r--r--drivers/clk/meson/meson-eeclk.c3
-rw-r--r--drivers/clk/meson/s4-peripherals.c3
-rw-r--r--drivers/clk/meson/s4-pll.c3
-rw-r--r--drivers/clk/meson/sclk-div.c3
-rw-r--r--drivers/clk/meson/vclk.c5
-rw-r--r--drivers/clk/meson/vid-pll-div.c3
-rw-r--r--drivers/clk/mmp/clk-audio.c2
-rw-r--r--drivers/clk/mmp/clk-mix.c10
-rw-r--r--drivers/clk/mvebu/armada-37xx-periph.c2
-rw-r--r--drivers/clk/mvebu/armada-37xx-tbg.c2
-rw-r--r--drivers/clk/mvebu/armada-37xx-xtal.c2
-rw-r--r--drivers/clk/qcom/Kconfig51
-rw-r--r--drivers/clk/qcom/Makefile5
-rw-r--r--drivers/clk/qcom/a53-pll.c1
-rw-r--r--drivers/clk/qcom/apcs-msm8916.c2
-rw-r--r--drivers/clk/qcom/apcs-sdx55.c2
-rw-r--r--drivers/clk/qcom/camcc-sm4450.c1688
-rw-r--r--drivers/clk/qcom/camcc-sm8150.c2159
-rw-r--r--drivers/clk/qcom/clk-alpha-pll.c86
-rw-r--r--drivers/clk/qcom/clk-alpha-pll.h7
-rw-r--r--drivers/clk/qcom/clk-cbf-8996.c2
-rw-r--r--drivers/clk/qcom/clk-rpmh.c2
-rw-r--r--drivers/clk/qcom/dispcc-sm4450.c770
-rw-r--r--drivers/clk/qcom/dispcc-sm8250.c12
-rw-r--r--drivers/clk/qcom/dispcc-sm8550.c198
-rw-r--r--drivers/clk/qcom/dispcc-sm8650.c1796
-rw-r--r--drivers/clk/qcom/gcc-ipq5332.c36
-rw-r--r--drivers/clk/qcom/gcc-ipq6018.c2
-rw-r--r--drivers/clk/qcom/gcc-ipq806x.c4
-rw-r--r--drivers/clk/qcom/gcc-ipq8074.c4
-rw-r--r--drivers/clk/qcom/gcc-mdm9615.c4
-rw-r--r--drivers/clk/qcom/gcc-msm8660.c4
-rw-r--r--drivers/clk/qcom/gcc-msm8960.c8
-rw-r--r--drivers/clk/qcom/gcc-msm8994.c54
-rw-r--r--drivers/clk/qcom/gcc-msm8996.c2
-rw-r--r--drivers/clk/qcom/gcc-msm8998.c64
-rw-r--r--drivers/clk/qcom/gcc-sc8180x.c442
-rw-r--r--drivers/clk/qcom/gcc-sm8250.c6
-rw-r--r--drivers/clk/qcom/gcc-sm8450.c4
-rw-r--r--drivers/clk/qcom/gpucc-sm4450.c805
-rw-r--r--drivers/clk/qcom/lcc-ipq806x.c8
-rw-r--r--drivers/clk/qcom/lcc-msm8960.c8
-rw-r--r--drivers/clk/qcom/mmcc-apq8084.c50
-rw-r--r--drivers/clk/qcom/mmcc-msm8960.c30
-rw-r--r--drivers/clk/qcom/mmcc-msm8974.c52
-rw-r--r--drivers/clk/qcom/mmcc-msm8994.c8
-rw-r--r--drivers/clk/qcom/mmcc-msm8996.c8
-rw-r--r--drivers/clk/qcom/videocc-sm8550.c4
-rw-r--r--drivers/clk/renesas/Kconfig9
-rw-r--r--drivers/clk/renesas/Makefile2
-rw-r--r--drivers/clk/renesas/clk-mstp.c2
-rw-r--r--drivers/clk/renesas/r8a779a0-cpg-mssr.c37
-rw-r--r--drivers/clk/renesas/r8a779f0-cpg-mssr.c30
-rw-r--r--drivers/clk/renesas/r8a779g0-cpg-mssr.c38
-rw-r--r--drivers/clk/renesas/r8a779h0-cpg-mssr.c41
-rw-r--r--drivers/clk/renesas/r9a07g043-cpg.c12
-rw-r--r--drivers/clk/renesas/r9a08g045-cpg.c20
-rw-r--r--drivers/clk/renesas/r9a09g057-cpg.c164
-rw-r--r--drivers/clk/renesas/rcar-gen4-cpg.c210
-rw-r--r--drivers/clk/renesas/rcar-gen4-cpg.h36
-rw-r--r--drivers/clk/renesas/rcar-usb2-clock-sel.c2
-rw-r--r--drivers/clk/renesas/rzg2l-cpg.c71
-rw-r--r--drivers/clk/renesas/rzv2h-cpg.c853
-rw-r--r--drivers/clk/renesas/rzv2h-cpg.h190
-rw-r--r--drivers/clk/rockchip/Kconfig7
-rw-r--r--drivers/clk/rockchip/Makefile1
-rw-r--r--drivers/clk/rockchip/clk-pll.c6
-rw-r--r--drivers/clk/rockchip/clk-px30.c10
-rw-r--r--drivers/clk/rockchip/clk-rk3036.c5
-rw-r--r--drivers/clk/rockchip/clk-rk3228.c7
-rw-r--r--drivers/clk/rockchip/clk-rk3288.c5
-rw-r--r--drivers/clk/rockchip/clk-rk3308.c5
-rw-r--r--drivers/clk/rockchip/clk-rk3328.c5
-rw-r--r--drivers/clk/rockchip/clk-rk3368.c5
-rw-r--r--drivers/clk/rockchip/clk-rk3399.c10
-rw-r--r--drivers/clk/rockchip/clk-rk3576.c1818
-rw-r--r--drivers/clk/rockchip/clk-rk3588.c42
-rw-r--r--drivers/clk/rockchip/clk.c3
-rw-r--r--drivers/clk/rockchip/clk.h54
-rw-r--r--drivers/clk/rockchip/rst-rk3576.c651
-rw-r--r--drivers/clk/samsung/Makefile1
-rw-r--r--drivers/clk/samsung/clk-exynos-audss.c2
-rw-r--r--drivers/clk/samsung/clk-exynos-clkout.c2
-rw-r--r--drivers/clk/samsung/clk-exynos7885.c93
-rw-r--r--drivers/clk/samsung/clk-exynos850.c7
-rw-r--r--drivers/clk/samsung/clk-exynosautov9.c83
-rw-r--r--drivers/clk/samsung/clk-exynosautov920.c1173
-rw-r--r--drivers/clk/samsung/clk-pll.c62
-rw-r--r--drivers/clk/samsung/clk-pll.h2
-rw-r--r--drivers/clk/starfive/clk-starfive-jh7110-isp.c2
-rw-r--r--drivers/clk/starfive/clk-starfive-jh7110-vout.c4
-rw-r--r--drivers/clk/stm32/clk-stm32mp1.c2
-rw-r--r--drivers/clk/tegra/clk-tegra124-dfll-fcpu.c2
-rw-r--r--drivers/clk/ti/adpll.c2
-rw-r--r--drivers/clk/ti/clk-dra7-atl.c1
-rw-r--r--drivers/clk/versatile/clk-sp810.c2
-rw-r--r--drivers/clk/visconti/pll.c6
-rw-r--r--drivers/clk/x86/clk-fch.c2
-rw-r--r--drivers/clk/x86/clk-pmc-atom.c2
-rw-r--r--drivers/clk/xilinx/clk-xlnx-clock-wizard.c2
-rw-r--r--drivers/clk/xilinx/xlnx_vcu.c2
-rw-r--r--drivers/comedi/drivers/ni_atmio.c9
-rw-r--r--drivers/comedi/drivers/ni_mio_common.c9
-rw-r--r--drivers/comedi/drivers/ni_pcimio.c9
-rw-r--r--drivers/comedi/drivers/ni_routing/tools/convert_c_to_py.c5
-rw-r--r--drivers/comedi/drivers/ni_stc.h2
-rw-r--r--drivers/counter/counter-chrdev.c1
-rw-r--r--drivers/cpufreq/cppc_cpufreq.c6
-rw-r--r--drivers/crypto/Kconfig75
-rw-r--r--drivers/crypto/caam/caamhash.c1
-rw-r--r--drivers/cxl/Kconfig2
-rw-r--r--drivers/cxl/core/cdat.c508
-rw-r--r--drivers/cxl/core/core.h4
-rw-r--r--drivers/cxl/core/mbox.c96
-rw-r--r--drivers/cxl/core/memdev.c41
-rw-r--r--drivers/cxl/core/pci.c164
-rw-r--r--drivers/cxl/core/port.c206
-rw-r--r--drivers/cxl/core/region.c81
-rw-r--r--drivers/cxl/cxl.h9
-rw-r--r--drivers/cxl/cxlmem.h27
-rw-r--r--drivers/cxl/mem.c29
-rw-r--r--drivers/cxl/pci.c91
-rw-r--r--drivers/cxl/pmem.c26
-rw-r--r--drivers/cxl/port.c2
-rw-r--r--drivers/cxl/security.c23
-rw-r--r--drivers/dax/Kconfig2
-rw-r--r--drivers/dax/device.c6
-rw-r--r--drivers/dma-buf/dma-fence-array.c78
-rw-r--r--drivers/dma-buf/dma-heap.c27
-rw-r--r--drivers/dma/Kconfig20
-rw-r--r--drivers/dma/Makefile3
-rw-r--r--drivers/dma/acpi-dma.c4
-rw-r--r--drivers/dma/altera-msgdma.c4
-rw-r--r--drivers/dma/amba-pl08x.c2
-rw-r--r--drivers/dma/amd/Kconfig14
-rw-r--r--drivers/dma/amd/Makefile3
-rw-r--r--drivers/dma/amd/qdma/Makefile5
-rw-r--r--drivers/dma/amd/qdma/qdma-comm-regs.c64
-rw-r--r--drivers/dma/amd/qdma/qdma.c1143
-rw-r--r--drivers/dma/amd/qdma/qdma.h266
-rw-r--r--drivers/dma/at_hdmac.c6
-rw-r--r--drivers/dma/bcm-sba-raid.c4
-rw-r--r--drivers/dma/bcm2835-dma.c2
-rw-r--r--drivers/dma/dmaengine.c2
-rw-r--r--drivers/dma/dmatest.c2
-rw-r--r--drivers/dma/ep93xx_dma.c291
-rw-r--r--drivers/dma/fsl-dpaa2-qdma/dpaa2-qdma.h6
-rw-r--r--drivers/dma/fsl-edma-main.c27
-rw-r--r--drivers/dma/hisi_dma.c2
-rw-r--r--drivers/dma/idma64.c8
-rw-r--r--drivers/dma/idxd/init.c6
-rw-r--r--drivers/dma/idxd/perfmon.c4
-rw-r--r--drivers/dma/idxd/submit.c2
-rw-r--r--drivers/dma/imx-dma.c3
-rw-r--r--drivers/dma/ioat/init.c2
-rw-r--r--drivers/dma/lgm/lgm-dma.c2
-rw-r--r--drivers/dma/loongson1-apb-dma.c660
-rw-r--r--drivers/dma/lpc32xx-dmamux.c195
-rw-r--r--drivers/dma/ls2x-apb-dma.c4
-rw-r--r--drivers/dma/mediatek/mtk-cqdma.c4
-rw-r--r--drivers/dma/mediatek/mtk-hsdma.c2
-rw-r--r--drivers/dma/mv_xor.c4
-rw-r--r--drivers/dma/mv_xor.h2
-rw-r--r--drivers/dma/mv_xor_v2.c2
-rw-r--r--drivers/dma/nbpfaxi.c2
-rw-r--r--drivers/dma/of-dma.c4
-rw-r--r--drivers/dma/owl-dma.c2
-rw-r--r--drivers/dma/pl330.c5
-rw-r--r--drivers/dma/ppc4xx/adma.c2
-rw-r--r--drivers/dma/ppc4xx/dma.h2
-rw-r--r--drivers/dma/ptdma/ptdma.h2
-rw-r--r--drivers/dma/qcom/bam_dma.c10
-rw-r--r--drivers/dma/qcom/gpi.c2
-rw-r--r--drivers/dma/qcom/qcom_adm.c2
-rw-r--r--drivers/dma/sh/rcar-dmac.c4
-rw-r--r--drivers/dma/sh/shdmac.c2
-rw-r--r--drivers/dma/ste_dma40.c6
-rw-r--r--drivers/dma/ste_dma40.h2
-rw-r--r--drivers/dma/ste_dma40_ll.h2
-rw-r--r--drivers/dma/tegra20-apb-dma.c2
-rw-r--r--drivers/dma/ti/k3-udma.h1
-rw-r--r--drivers/dma/xgene-dma.c2
-rw-r--r--drivers/dma/xilinx/xilinx_dpdma.c101
-rw-r--r--drivers/dma/xilinx/zynqmp_dma.c27
-rw-r--r--drivers/extcon/Kconfig11
-rw-r--r--drivers/extcon/Makefile1
-rw-r--r--drivers/extcon/extcon-lc824206xa.c495
-rw-r--r--drivers/firewire/core-card.c91
-rw-r--r--drivers/firewire/core-cdev.c401
-rw-r--r--drivers/firewire/core-device.c202
-rw-r--r--drivers/firewire/core-iso.c49
-rw-r--r--drivers/firewire/core-topology.c7
-rw-r--r--drivers/firewire/core-transaction.c151
-rw-r--r--drivers/firewire/core.h28
-rw-r--r--drivers/firewire/ohci-serdes-test.c66
-rw-r--r--drivers/firewire/ohci.c574
-rw-r--r--drivers/firewire/ohci.h200
-rw-r--r--drivers/firmware/arm_scmi/driver.c1
-rw-r--r--drivers/firmware/arm_scmi/raw_mode.c5
-rw-r--r--drivers/firmware/efi/capsule-loader.c1
-rw-r--r--drivers/firmware/efi/cper.c11
-rw-r--r--drivers/firmware/efi/efi.c2
-rw-r--r--drivers/firmware/efi/libstub/efistub.h2
-rw-r--r--drivers/firmware/efi/libstub/tpm.c2
-rw-r--r--drivers/firmware/efi/libstub/unaccepted_memory.c3
-rw-r--r--drivers/firmware/efi/test/efi_test.c1
-rw-r--r--drivers/firmware/efi/unaccepted_memory.c18
-rw-r--r--drivers/firmware/qemu_fw_cfg.c2
-rw-r--r--drivers/firmware/turris-mox-rwtm.c1
-rw-r--r--drivers/fpga/socfpga.c7
-rw-r--r--drivers/fpga/tests/fpga-bridge-test.c25
-rw-r--r--drivers/fpga/tests/fpga-mgr-test.c28
-rw-r--r--drivers/fpga/tests/fpga-region-test.c41
-rw-r--r--drivers/fpga/zynq-fpga.c8
-rw-r--r--drivers/gnss/core.c1
-rw-r--r--drivers/gpio/gpio-ep93xx.c345
-rw-r--r--drivers/gpio/gpio-mockup.c1
-rw-r--r--drivers/gpio/gpio-sloppy-logic-analyzer.c1
-rw-r--r--drivers/gpio/gpiolib-cdev.c1
-rw-r--r--drivers/gpu/drm/Kconfig33
-rw-r--r--drivers/gpu/drm/Makefile1
-rw-r--r--drivers/gpu/drm/amd/amdgpu/Makefile18
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu.h39
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_aca.c18
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c48
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h22
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_aldebaran.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_arcturus.c7
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gc_9_4_3.c4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v10.c16
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v10.h9
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v10_3.c4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v11.c18
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v9.c213
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v9.h14
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c38
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_atombios.c35
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_bios.c74
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_cgs.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_connectors.c9
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c7
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c96
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_dev_coredump.c32
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_dev_coredump.h7
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_device.c231
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_display.c4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c28
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_gart.h2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c13
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_gem.h2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c458
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.h38
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_gfxhub.h2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.c3
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ids.c17
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ids.h3
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_isp.h1
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_job.c90
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_job.h3
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c60
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_mca.c14
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_mes.c162
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_mes.h39
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_mmhub.h2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_mode.h4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_object.c132
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_object.h29
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_pll.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c31
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_psp.h4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c80
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ras.h33
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ras_eeprom.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_reset.h6
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c6
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ring.h3
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ring_mux.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_sched.c9
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_sdma.h1
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_sync.c30
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_sync.h1
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.h11
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_umc.c102
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_umc.h5
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_umsch_mm.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c3
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.h3
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c8
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_virt.h1
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_vkms.c3
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c130
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h13
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_vm_cpu.c10
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_vm_pt.c58
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_vm_sdma.c35
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_xcp.h2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_xgmi.c4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgv_sriovmsg.h4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/aqua_vanjaram.c36
-rw-r--r--drivers/gpu/drm/amd/amdgpu/atombios_encoders.c40
-rw-r--r--drivers/gpu/drm/amd/amdgpu/cikd.h1
-rw-r--r--drivers/gpu/drm/amd/amdgpu/dce_v10_0.c4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/dce_v11_0.c4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/dce_v6_0.c4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/dce_v8_0.c4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c399
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfx_v11_0.c365
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfx_v11_0.h3
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfx_v11_0_3.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfx_v12_0.c353
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c77
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c76
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c375
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfx_v9_0_cleaner_shader.h26
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfx_v9_4_3.c714
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfx_v9_4_3_cleaner_shader.asm153
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfx_v9_4_3_cleaner_shader.h64
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfxhub_v1_0.c18
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfxhub_v1_2.c17
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gmc_v10_0.c3
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/hdp_v4_0.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/hdp_v5_0.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/hdp_v6_0.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/hdp_v7_0.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/imu_v11_0.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/isp_v4_1_0.c57
-rw-r--r--drivers/gpu/drm/amd/amdgpu/isp_v4_1_0.h11
-rw-r--r--drivers/gpu/drm/amd/amdgpu/isp_v4_1_1.c57
-rw-r--r--drivers/gpu/drm/amd/amdgpu/isp_v4_1_1.h11
-rw-r--r--drivers/gpu/drm/amd/amdgpu/jpeg_v4_0_3.c36
-rw-r--r--drivers/gpu/drm/amd/amdgpu/mes_v11_0.c168
-rw-r--r--drivers/gpu/drm/amd/amdgpu/mes_v12_0.c80
-rw-r--r--drivers/gpu/drm/amd/amdgpu/mmhub_v1_8.c21
-rw-r--r--drivers/gpu/drm/amd/amdgpu/mxgpu_nv.h2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/nbio_v2_3.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/psp_v13_0.c17
-rw-r--r--drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c102
-rw-r--r--drivers/gpu/drm/amd/amdgpu/sdma_v4_4_2.c104
-rw-r--r--drivers/gpu/drm/amd/amdgpu/sdma_v5_0.c106
-rw-r--r--drivers/gpu/drm/amd/amdgpu/sdma_v5_0.h1
-rw-r--r--drivers/gpu/drm/amd/amdgpu/sdma_v5_2.c106
-rw-r--r--drivers/gpu/drm/amd/amdgpu/sdma_v5_2.h1
-rw-r--r--drivers/gpu/drm/amd/amdgpu/sdma_v6_0.c112
-rw-r--r--drivers/gpu/drm/amd/amdgpu/sdma_v7_0.c120
-rw-r--r--drivers/gpu/drm/amd/amdgpu/smuio_v9_0.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/soc15.h4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/soc15d.h4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/soc24.c23
-rw-r--r--drivers/gpu/drm/amd/amdgpu/umc_v12_0.c173
-rw-r--r--drivers/gpu/drm/amd/amdgpu/umc_v12_0.h5
-rw-r--r--drivers/gpu/drm/amd/amdgpu/vce_v3_0.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/vce_v4_0.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c112
-rw-r--r--drivers/gpu/drm/amd/amdgpu/vcn_v2_0.c113
-rw-r--r--drivers/gpu/drm/amd/amdgpu/vcn_v2_5.c118
-rw-r--r--drivers/gpu/drm/amd/amdgpu/vcn_v3_0.c113
-rw-r--r--drivers/gpu/drm/amd/amdgpu/vcn_v4_0.c114
-rw-r--r--drivers/gpu/drm/amd/amdgpu/vcn_v4_0_3.c115
-rw-r--r--drivers/gpu/drm/amd/amdgpu/vcn_v4_0_5.c278
-rw-r--r--drivers/gpu/drm/amd/amdgpu/vcn_v5_0_0.c110
-rw-r--r--drivers/gpu/drm/amd/amdgpu/vid.h1
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_chardev.c142
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_debug.c20
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_device.c48
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c455
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.h24
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_events.c22
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_int_process_v10.c86
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_int_process_v11.c9
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_int_process_v9.c58
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager.c2
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v12.c4
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v9.c6
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_packet_manager_v9.c14
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_pm4_headers_ai.h5
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_pm4_headers_aldebaran.h2
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_priv.h47
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_process.c34
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c87
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_queue.c373
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_smi_events.c45
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_svm.c132
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_svm.h5
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_topology.c55
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_topology.h5
-rw-r--r--drivers/gpu/drm/amd/amdkfd/soc15_int.h1
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c253
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h2
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crtc.c60
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_debugfs.c5
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_helpers.c30
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c139
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_plane.c9
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_wb.c1
-rw-r--r--drivers/gpu/drm/amd/display/dc/Makefile1
-rw-r--r--drivers/gpu/drm/amd/display/dc/basics/dce_calcs.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/basics/fixpt31_32.c27
-rw-r--r--drivers/gpu/drm/amd/display/dc/bios/command_table2.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/clk_mgr/clk_mgr.c9
-rw-r--r--drivers/gpu/drm/amd/display/dc/clk_mgr/dce110/dce110_clk_mgr.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/clk_mgr/dcn35/dcn35_clk_mgr.c40
-rw-r--r--drivers/gpu/drm/amd/display/dc/clk_mgr/dcn401/dcn401_clk_mgr.c303
-rw-r--r--drivers/gpu/drm/amd/display/dc/core/dc.c252
-rw-r--r--drivers/gpu/drm/amd/display/dc/core/dc_hw_sequencer.c225
-rw-r--r--drivers/gpu/drm/amd/display/dc/core/dc_resource.c56
-rw-r--r--drivers/gpu/drm/amd/display/dc/core/dc_stat.c1
-rw-r--r--drivers/gpu/drm/amd/display/dc/core/dc_state.c14
-rw-r--r--drivers/gpu/drm/amd/display/dc/dc.h37
-rw-r--r--drivers/gpu/drm/amd/display/dc/dc_dmub_srv.c93
-rw-r--r--drivers/gpu/drm/amd/display/dc/dc_dmub_srv.h24
-rw-r--r--drivers/gpu/drm/amd/display/dc/dc_dp_types.h12
-rw-r--r--drivers/gpu/drm/amd/display/dc/dc_dsc.h4
-rw-r--r--drivers/gpu/drm/amd/display/dc/dc_hw_types.h7
-rw-r--r--drivers/gpu/drm/amd/display/dc/dc_spl_translate.c100
-rw-r--r--drivers/gpu/drm/amd/display/dc/dc_spl_translate.h1
-rw-r--r--drivers/gpu/drm/amd/display/dc/dc_stream.h12
-rw-r--r--drivers/gpu/drm/amd/display/dc/dc_types.h20
-rw-r--r--drivers/gpu/drm/amd/display/dc/dccg/dcn20/dcn20_dccg.h17
-rw-r--r--drivers/gpu/drm/amd/display/dc/dccg/dcn35/dcn35_dccg.c1469
-rw-r--r--drivers/gpu/drm/amd/display/dc/dccg/dcn35/dcn35_dccg.h1
-rw-r--r--drivers/gpu/drm/amd/display/dc/dccg/dcn401/dcn401_dccg.c32
-rw-r--r--drivers/gpu/drm/amd/display/dc/dccg/dcn401/dcn401_dccg.h4
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce/dce_audio.c6
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce/dce_audio.h1
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce/dce_aux.c10
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce/dmub_psr.c3
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce/dmub_replay.c20
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce110/dce110_timing_generator.c1
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce110/dce110_timing_generator.h1
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce110/dce110_timing_generator_v.c1
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce120/dce120_timing_generator.c1
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce60/dce60_timing_generator.c3
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce80/dce80_timing_generator.c3
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn10/Makefile2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn10/dcn10_cm_common.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn20/Makefile3
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn30/Makefile6
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn30/dcn30_cm_common.c6
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn301/Makefile2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn303/Makefile13
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn31/Makefile2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn314/Makefile10
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn401/Makefile10
-rw-r--r--drivers/gpu/drm/amd/display/dc/dio/Makefile18
-rw-r--r--drivers/gpu/drm/amd/display/dc/dio/dcn301/dcn301_dio_link_encoder.c (renamed from drivers/gpu/drm/amd/display/dc/dcn301/dcn301_dio_link_encoder.c)0
-rw-r--r--drivers/gpu/drm/amd/display/dc/dio/dcn301/dcn301_dio_link_encoder.h (renamed from drivers/gpu/drm/amd/display/dc/dcn301/dcn301_dio_link_encoder.h)0
-rw-r--r--drivers/gpu/drm/amd/display/dc/dio/dcn314/dcn314_dio_stream_encoder.c (renamed from drivers/gpu/drm/amd/display/dc/dcn314/dcn314_dio_stream_encoder.c)0
-rw-r--r--drivers/gpu/drm/amd/display/dc/dio/dcn314/dcn314_dio_stream_encoder.h (renamed from drivers/gpu/drm/amd/display/dc/dcn314/dcn314_dio_stream_encoder.h)0
-rw-r--r--drivers/gpu/drm/amd/display/dc/dio/dcn321/dcn321_dio_link_encoder.c1
-rw-r--r--drivers/gpu/drm/amd/display/dc/dio/dcn35/dcn35_dio_stream_encoder.c29
-rw-r--r--drivers/gpu/drm/amd/display/dc/dm_helpers.h3
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/dcn20/dcn20_fpu.c5
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/dcn20/display_rq_dlg_calc_20.c5
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/dcn20/display_rq_dlg_calc_20v2.c5
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/dcn21/display_rq_dlg_calc_21.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/dcn31/display_mode_vba_31.c9
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/dcn32/dcn32_fpu.c11
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/dcn321/dcn321_fpu.c4
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/display_mode_structs.h1
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/dml1_display_rq_dlg_calc.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml2/Makefile8
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml2/display_mode_core.c6
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml2/dml21/dml21_translation_helper.c118
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml2/dml21/dml21_translation_helper.h1
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml2/dml21/dml21_utils.c167
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml2/dml21/dml21_wrapper.c4
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml2/dml21/inc/bounding_boxes/dcn3_soc_bb.h8
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml2/dml21/inc/bounding_boxes/dcn4_soc_bb.h25
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml2/dml21/inc/dml_top.h1
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml2/dml21/inc/dml_top_dchub_registers.h1
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml2/dml21/inc/dml_top_display_cfg_types.h3
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml2/dml21/inc/dml_top_policy_types.h1
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml2/dml21/inc/dml_top_soc_parameter_types.h17
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml2/dml21/inc/dml_top_types.h14
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_core/dml2_core_dcn4.c127
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_core/dml2_core_dcn4.h1
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_core/dml2_core_dcn4_calcs.c987
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_core/dml2_core_dcn4_calcs.h2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_core/dml2_core_factory.c3
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_core/dml2_core_factory.h1
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_core/dml2_core_shared.c224
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_core/dml2_core_shared.h38
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_core/dml2_core_shared_types.h35
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_core/dml2_core_utils.c631
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_core/dml2_core_utils.h39
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_dpmm/dml2_dpmm_dcn4.c169
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_dpmm/dml2_dpmm_dcn4.h1
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_dpmm/dml2_dpmm_factory.c3
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_dpmm/dml2_dpmm_factory.h1
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_mcg/dml2_mcg_dcn4.c1
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_mcg/dml2_mcg_dcn4.h1
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_mcg/dml2_mcg_factory.c1
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_mcg/dml2_mcg_factory.h1
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_pmo/dml2_pmo_dcn3.c12
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_pmo/dml2_pmo_dcn3.h1
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_pmo/dml2_pmo_dcn4.c1250
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_pmo/dml2_pmo_dcn4.h25
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_pmo/dml2_pmo_dcn4_fams2.c598
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_pmo/dml2_pmo_dcn4_fams2.h1
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_pmo/dml2_pmo_factory.c8
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_pmo/dml2_pmo_factory.h1
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_standalone_libraries/lib_float_math.c1
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_standalone_libraries/lib_float_math.h1
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_top/dml2_top_optimization.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_top/dml2_top_optimization.h1
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_top/dml_top.c23
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_top/dml_top_mcache.c23
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_top/dml_top_mcache.h1
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml2/dml21/src/inc/dml2_debug.c1
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml2/dml21/src/inc/dml2_debug.h1
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml2/dml21/src/inc/dml2_internal_shared_types.h27
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml2/dml2_internal_types.h1
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml2/dml2_translation_helper.c56
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml2/dml2_utils.c10
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml2/dml2_wrapper.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml2/dml2_wrapper.h1
-rw-r--r--drivers/gpu/drm/amd/display/dc/dpp/dcn10/dcn10_dpp_cm.c3
-rw-r--r--drivers/gpu/drm/amd/display/dc/dpp/dcn35/dcn35_dpp.c27
-rw-r--r--drivers/gpu/drm/amd/display/dc/dpp/dcn35/dcn35_dpp.h3
-rw-r--r--drivers/gpu/drm/amd/display/dc/dpp/dcn401/dcn401_dpp.c3
-rw-r--r--drivers/gpu/drm/amd/display/dc/dpp/dcn401/dcn401_dpp_cm.c19
-rw-r--r--drivers/gpu/drm/amd/display/dc/dpp/dcn401/dcn401_dpp_dscl.c590
-rw-r--r--drivers/gpu/drm/amd/display/dc/dsc/dc_dsc.c15
-rw-r--r--drivers/gpu/drm/amd/display/dc/dsc/dcn401/dcn401_dsc.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dwb/Makefile9
-rw-r--r--drivers/gpu/drm/amd/display/dc/dwb/dcn30/dcn30_cm_common.h (renamed from drivers/gpu/drm/amd/display/dc/dcn30/dcn30_cm_common.h)0
-rw-r--r--drivers/gpu/drm/amd/display/dc/dwb/dcn30/dcn30_dwb.c (renamed from drivers/gpu/drm/amd/display/dc/dcn30/dcn30_dwb.c)0
-rw-r--r--drivers/gpu/drm/amd/display/dc/dwb/dcn30/dcn30_dwb.h (renamed from drivers/gpu/drm/amd/display/dc/dcn30/dcn30_dwb.h)0
-rw-r--r--drivers/gpu/drm/amd/display/dc/dwb/dcn30/dcn30_dwb_cm.c (renamed from drivers/gpu/drm/amd/display/dc/dcn30/dcn30_dwb_cm.c)0
-rw-r--r--drivers/gpu/drm/amd/display/dc/dwb/dcn35/dcn35_dwb.c1
-rw-r--r--drivers/gpu/drm/amd/display/dc/gpio/dcn401/hw_factory_dcn401.c16
-rw-r--r--drivers/gpu/drm/amd/display/dc/hpo/Makefile15
-rw-r--r--drivers/gpu/drm/amd/display/dc/hpo/dcn31/dcn31_hpo_dp_link_encoder.c (renamed from drivers/gpu/drm/amd/display/dc/dcn31/dcn31_hpo_dp_link_encoder.c)0
-rw-r--r--drivers/gpu/drm/amd/display/dc/hpo/dcn31/dcn31_hpo_dp_link_encoder.h (renamed from drivers/gpu/drm/amd/display/dc/dcn31/dcn31_hpo_dp_link_encoder.h)0
-rw-r--r--drivers/gpu/drm/amd/display/dc/hpo/dcn31/dcn31_hpo_dp_stream_encoder.c (renamed from drivers/gpu/drm/amd/display/dc/dcn31/dcn31_hpo_dp_stream_encoder.c)0
-rw-r--r--drivers/gpu/drm/amd/display/dc/hpo/dcn31/dcn31_hpo_dp_stream_encoder.h (renamed from drivers/gpu/drm/amd/display/dc/dcn31/dcn31_hpo_dp_stream_encoder.h)0
-rw-r--r--drivers/gpu/drm/amd/display/dc/hubbub/dcn35/dcn35_hubbub.c1
-rw-r--r--drivers/gpu/drm/amd/display/dc/hubbub/dcn401/dcn401_hubbub.c387
-rw-r--r--drivers/gpu/drm/amd/display/dc/hubp/dcn10/dcn10_hubp.c3
-rw-r--r--drivers/gpu/drm/amd/display/dc/hubp/dcn20/dcn20_hubp.c3
-rw-r--r--drivers/gpu/drm/amd/display/dc/hubp/dcn35/dcn35_hubp.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/hubp/dcn401/dcn401_hubp.c5
-rw-r--r--drivers/gpu/drm/amd/display/dc/hwss/dce110/dce110_hwseq.c136
-rw-r--r--drivers/gpu/drm/amd/display/dc/hwss/dcn10/dcn10_hwseq.c25
-rw-r--r--drivers/gpu/drm/amd/display/dc/hwss/dcn10/dcn10_init.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/hwss/dcn20/dcn20_hwseq.c54
-rw-r--r--drivers/gpu/drm/amd/display/dc/hwss/dcn20/dcn20_init.c1
-rw-r--r--drivers/gpu/drm/amd/display/dc/hwss/dcn201/dcn201_init.c1
-rw-r--r--drivers/gpu/drm/amd/display/dc/hwss/dcn21/dcn21_init.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/hwss/dcn30/dcn30_hwseq.c23
-rw-r--r--drivers/gpu/drm/amd/display/dc/hwss/dcn30/dcn30_init.c1
-rw-r--r--drivers/gpu/drm/amd/display/dc/hwss/dcn301/dcn301_init.c1
-rw-r--r--drivers/gpu/drm/amd/display/dc/hwss/dcn31/dcn31_hwseq.c4
-rw-r--r--drivers/gpu/drm/amd/display/dc/hwss/dcn31/dcn31_init.c3
-rw-r--r--drivers/gpu/drm/amd/display/dc/hwss/dcn314/dcn314_hwseq.c13
-rw-r--r--drivers/gpu/drm/amd/display/dc/hwss/dcn314/dcn314_hwseq.h2
-rw-r--r--drivers/gpu/drm/amd/display/dc/hwss/dcn314/dcn314_init.c3
-rw-r--r--drivers/gpu/drm/amd/display/dc/hwss/dcn32/dcn32_hwseq.c122
-rw-r--r--drivers/gpu/drm/amd/display/dc/hwss/dcn32/dcn32_hwseq.h6
-rw-r--r--drivers/gpu/drm/amd/display/dc/hwss/dcn32/dcn32_init.c3
-rw-r--r--drivers/gpu/drm/amd/display/dc/hwss/dcn35/dcn35_hwseq.c88
-rw-r--r--drivers/gpu/drm/amd/display/dc/hwss/dcn35/dcn35_init.c3
-rw-r--r--drivers/gpu/drm/amd/display/dc/hwss/dcn351/dcn351_init.c4
-rw-r--r--drivers/gpu/drm/amd/display/dc/hwss/dcn401/dcn401_hwseq.c235
-rw-r--r--drivers/gpu/drm/amd/display/dc/hwss/dcn401/dcn401_hwseq.h3
-rw-r--r--drivers/gpu/drm/amd/display/dc/hwss/dcn401/dcn401_init.c10
-rw-r--r--drivers/gpu/drm/amd/display/dc/hwss/hw_sequencer.h19
-rw-r--r--drivers/gpu/drm/amd/display/dc/hwss/hw_sequencer_private.h5
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/core_types.h2
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/hw/audio.h2
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/hw/clk_mgr.h4
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/hw/clk_mgr_internal.h4
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/hw/dccg.h6
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/hw/dchubbub.h1
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/hw/hw_shared.h13
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/hw/mem_input.h2
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/hw/optc.h5
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/hw/stream_encoder.h2
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/hw/timing_generator.h4
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/hw/transform.h10
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/resource.h7
-rw-r--r--drivers/gpu/drm/amd/display/dc/link/accessories/link_dp_cts.c13
-rw-r--r--drivers/gpu/drm/amd/display/dc/link/hwss/link_hwss_dio.c5
-rw-r--r--drivers/gpu/drm/amd/display/dc/link/hwss/link_hwss_dpia.c31
-rw-r--r--drivers/gpu/drm/amd/display/dc/link/hwss/link_hwss_hpo_dp.c12
-rw-r--r--drivers/gpu/drm/amd/display/dc/link/link_detection.c8
-rw-r--r--drivers/gpu/drm/amd/display/dc/link/link_dpms.c45
-rw-r--r--drivers/gpu/drm/amd/display/dc/link/link_factory.c3
-rw-r--r--drivers/gpu/drm/amd/display/dc/link/link_validation.c7
-rw-r--r--drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_capability.c74
-rw-r--r--drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_training.c80
-rw-r--r--drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_training.h16
-rw-r--r--drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_training_8b_10b.c21
-rw-r--r--drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_training_dpia.c64
-rw-r--r--drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_training_dpia.h19
-rw-r--r--drivers/gpu/drm/amd/display/dc/link/protocols/link_edp_panel_control.c14
-rw-r--r--drivers/gpu/drm/amd/display/dc/link/protocols/link_edp_panel_control.h2
-rw-r--r--drivers/gpu/drm/amd/display/dc/mmhubbub/Makefile9
-rw-r--r--drivers/gpu/drm/amd/display/dc/mmhubbub/dcn20/dcn20_mmhubbub.c (renamed from drivers/gpu/drm/amd/display/dc/dcn20/dcn20_mmhubbub.c)0
-rw-r--r--drivers/gpu/drm/amd/display/dc/mmhubbub/dcn20/dcn20_mmhubbub.h (renamed from drivers/gpu/drm/amd/display/dc/dcn20/dcn20_mmhubbub.h)0
-rw-r--r--drivers/gpu/drm/amd/display/dc/mpc/Makefile27
-rw-r--r--drivers/gpu/drm/amd/display/dc/mpc/dcn10/dcn10_mpc.c (renamed from drivers/gpu/drm/amd/display/dc/dcn10/dcn10_mpc.c)0
-rw-r--r--drivers/gpu/drm/amd/display/dc/mpc/dcn10/dcn10_mpc.h (renamed from drivers/gpu/drm/amd/display/dc/dcn10/dcn10_mpc.h)0
-rw-r--r--drivers/gpu/drm/amd/display/dc/mpc/dcn20/dcn20_mpc.c (renamed from drivers/gpu/drm/amd/display/dc/dcn20/dcn20_mpc.c)0
-rw-r--r--drivers/gpu/drm/amd/display/dc/mpc/dcn20/dcn20_mpc.h (renamed from drivers/gpu/drm/amd/display/dc/dcn20/dcn20_mpc.h)0
-rw-r--r--drivers/gpu/drm/amd/display/dc/mpc/dcn30/dcn30_mpc.c (renamed from drivers/gpu/drm/amd/display/dc/dcn30/dcn30_mpc.c)2
-rw-r--r--drivers/gpu/drm/amd/display/dc/mpc/dcn30/dcn30_mpc.h (renamed from drivers/gpu/drm/amd/display/dc/dcn30/dcn30_mpc.h)0
-rw-r--r--drivers/gpu/drm/amd/display/dc/opp/Makefile16
-rw-r--r--drivers/gpu/drm/amd/display/dc/opp/dcn10/dcn10_opp.c (renamed from drivers/gpu/drm/amd/display/dc/dcn10/dcn10_opp.c)0
-rw-r--r--drivers/gpu/drm/amd/display/dc/opp/dcn10/dcn10_opp.h (renamed from drivers/gpu/drm/amd/display/dc/dcn10/dcn10_opp.h)0
-rw-r--r--drivers/gpu/drm/amd/display/dc/opp/dcn20/dcn20_opp.c (renamed from drivers/gpu/drm/amd/display/dc/dcn20/dcn20_opp.c)0
-rw-r--r--drivers/gpu/drm/amd/display/dc/opp/dcn20/dcn20_opp.h (renamed from drivers/gpu/drm/amd/display/dc/dcn20/dcn20_opp.h)0
-rw-r--r--drivers/gpu/drm/amd/display/dc/optc/dcn10/dcn10_optc.c10
-rw-r--r--drivers/gpu/drm/amd/display/dc/optc/dcn10/dcn10_optc.h7
-rw-r--r--drivers/gpu/drm/amd/display/dc/optc/dcn31/dcn31_optc.c8
-rw-r--r--drivers/gpu/drm/amd/display/dc/optc/dcn401/dcn401_optc.c36
-rw-r--r--drivers/gpu/drm/amd/display/dc/optc/dcn401/dcn401_optc.h6
-rw-r--r--drivers/gpu/drm/amd/display/dc/resource/Makefile2
-rw-r--r--drivers/gpu/drm/amd/display/dc/resource/dce110/dce110_resource.c1
-rw-r--r--drivers/gpu/drm/amd/display/dc/resource/dce112/dce112_resource.c5
-rw-r--r--drivers/gpu/drm/amd/display/dc/resource/dcn20/dcn20_resource.c3
-rw-r--r--drivers/gpu/drm/amd/display/dc/resource/dcn201/dcn201_resource.c4
-rw-r--r--drivers/gpu/drm/amd/display/dc/resource/dcn21/dcn21_resource.c3
-rw-r--r--drivers/gpu/drm/amd/display/dc/resource/dcn31/dcn31_resource.c1
-rw-r--r--drivers/gpu/drm/amd/display/dc/resource/dcn32/dcn32_resource.c11
-rw-r--r--drivers/gpu/drm/amd/display/dc/resource/dcn32/dcn32_resource.h5
-rw-r--r--drivers/gpu/drm/amd/display/dc/resource/dcn32/dcn32_resource_helpers.c14
-rw-r--r--drivers/gpu/drm/amd/display/dc/resource/dcn321/dcn321_resource.c1
-rw-r--r--drivers/gpu/drm/amd/display/dc/resource/dcn35/dcn35_resource.c3
-rw-r--r--drivers/gpu/drm/amd/display/dc/resource/dcn351/dcn351_resource.c3
-rw-r--r--drivers/gpu/drm/amd/display/dc/resource/dcn401/dcn401_resource.c11
-rw-r--r--drivers/gpu/drm/amd/display/dc/resource/dcn401/dcn401_resource.h3
-rw-r--r--drivers/gpu/drm/amd/display/dc/spl/Makefile2
-rw-r--r--drivers/gpu/drm/amd/display/dc/spl/dc_spl.c1383
-rw-r--r--drivers/gpu/drm/amd/display/dc/spl/dc_spl.h8
-rw-r--r--drivers/gpu/drm/amd/display/dc/spl/dc_spl_filters.c15
-rw-r--r--drivers/gpu/drm/amd/display/dc/spl/dc_spl_filters.h15
-rw-r--r--drivers/gpu/drm/amd/display/dc/spl/dc_spl_isharp_filters.c425
-rw-r--r--drivers/gpu/drm/amd/display/dc/spl/dc_spl_isharp_filters.h36
-rw-r--r--drivers/gpu/drm/amd/display/dc/spl/dc_spl_scl_easf_filters.c1726
-rw-r--r--drivers/gpu/drm/amd/display/dc/spl/dc_spl_scl_easf_filters.h38
-rw-r--r--drivers/gpu/drm/amd/display/dc/spl/dc_spl_scl_filters.c92
-rw-r--r--drivers/gpu/drm/amd/display/dc/spl/dc_spl_scl_filters.h55
-rw-r--r--drivers/gpu/drm/amd/display/dc/spl/dc_spl_types.h123
-rw-r--r--drivers/gpu/drm/amd/display/dc/spl/spl_custom_float.c151
-rw-r--r--drivers/gpu/drm/amd/display/dc/spl/spl_custom_float.h29
-rw-r--r--drivers/gpu/drm/amd/display/dc/spl/spl_debug.h25
-rw-r--r--drivers/gpu/drm/amd/display/dc/spl/spl_fixpt31_32.c497
-rw-r--r--drivers/gpu/drm/amd/display/dc/spl/spl_fixpt31_32.h525
-rw-r--r--drivers/gpu/drm/amd/display/dc/spl/spl_os_types.h55
-rw-r--r--drivers/gpu/drm/amd/display/dmub/dmub_srv.h4
-rw-r--r--drivers/gpu/drm/amd/display/dmub/inc/dmub_cmd.h77
-rw-r--r--drivers/gpu/drm/amd/display/dmub/src/dmub_dcn31.c4
-rw-r--r--drivers/gpu/drm/amd/display/dmub/src/dmub_dcn32.c4
-rw-r--r--drivers/gpu/drm/amd/display/dmub/src/dmub_dcn35.c5
-rw-r--r--drivers/gpu/drm/amd/display/dmub/src/dmub_dcn401.c4
-rw-r--r--drivers/gpu/drm/amd/display/include/fixed31_32.h6
-rw-r--r--drivers/gpu/drm/amd/display/include/logger_types.h1
-rw-r--r--drivers/gpu/drm/amd/display/modules/freesync/freesync.c2
-rw-r--r--drivers/gpu/drm/amd/display/modules/hdcp/hdcp1_execution.c18
-rw-r--r--drivers/gpu/drm/amd/include/amd_shared.h77
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/uvd/uvd_4_0_sh_mask.h2
-rw-r--r--drivers/gpu/drm/amd/include/kgd_kfd_interface.h16
-rw-r--r--drivers/gpu/drm/amd/include/kgd_pp_interface.h1
-rw-r--r--drivers/gpu/drm/amd/pm/amdgpu_dpm.c6
-rw-r--r--drivers/gpu/drm/amd/pm/powerplay/hwmgr/processpptables.c2
-rw-r--r--drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega10_hwmgr.c4
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c2
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu_v13_0_6_pmfw.h6
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/smu11/navi10_ppt.c4
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/smu11/sienna_cichlid_ppt.c4
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_0_ppt.c6
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_6_ppt.c8
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_7_ppt.c3
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/smu14/smu_v14_0.c11
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/smu14/smu_v14_0_2_ppt.c892
-rw-r--r--drivers/gpu/drm/ast/Makefile4
-rw-r--r--drivers/gpu/drm/ast/ast_dp.c406
-rw-r--r--drivers/gpu/drm/ast/ast_dp501.c184
-rw-r--r--drivers/gpu/drm/ast/ast_drv.c12
-rw-r--r--drivers/gpu/drm/ast/ast_drv.h43
-rw-r--r--drivers/gpu/drm/ast/ast_main.c6
-rw-r--r--drivers/gpu/drm/ast/ast_mode.c576
-rw-r--r--drivers/gpu/drm/ast/ast_post.c2
-rw-r--r--drivers/gpu/drm/ast/ast_reg.h22
-rw-r--r--drivers/gpu/drm/ast/ast_sil164.c127
-rw-r--r--drivers/gpu/drm/ast/ast_vga.c127
-rw-r--r--drivers/gpu/drm/bridge/analogix/analogix_dp_core.c5
-rw-r--r--drivers/gpu/drm/bridge/analogix/anx7625.c22
-rw-r--r--drivers/gpu/drm/bridge/ite-it6505.c17
-rw-r--r--drivers/gpu/drm/bridge/lontium-lt8912b.c35
-rw-r--r--drivers/gpu/drm/bridge/lontium-lt9611uxc.c103
-rw-r--r--drivers/gpu/drm/bridge/nwl-dsi.c8
-rw-r--r--drivers/gpu/drm/bridge/nwl-dsi.h4
-rw-r--r--drivers/gpu/drm/bridge/synopsys/dw-hdmi.c66
-rw-r--r--drivers/gpu/drm/bridge/synopsys/dw-mipi-dsi.c7
-rw-r--r--drivers/gpu/drm/bridge/tc358767.c45
-rw-r--r--drivers/gpu/drm/ci/arm64.config1
-rw-r--r--drivers/gpu/drm/ci/container.yml8
-rw-r--r--drivers/gpu/drm/ci/gitlab-ci.yml29
-rwxr-xr-xdrivers/gpu/drm/ci/igt_runner.sh11
-rw-r--r--drivers/gpu/drm/ci/image-tags.yml8
-rwxr-xr-xdrivers/gpu/drm/ci/lava-submit.sh1
-rw-r--r--drivers/gpu/drm/ci/test.yml132
-rw-r--r--drivers/gpu/drm/ci/xfails/amdgpu-stoney-fails.txt2
-rw-r--r--drivers/gpu/drm/ci/xfails/amdgpu-stoney-flakes.txt14
-rw-r--r--drivers/gpu/drm/ci/xfails/amdgpu-stoney-skips.txt5
-rw-r--r--drivers/gpu/drm/ci/xfails/i915-amly-fails.txt12
-rw-r--r--drivers/gpu/drm/ci/xfails/i915-amly-flakes.txt41
-rw-r--r--drivers/gpu/drm/ci/xfails/i915-amly-skips.txt5
-rw-r--r--drivers/gpu/drm/ci/xfails/i915-apl-flakes.txt2
-rw-r--r--drivers/gpu/drm/ci/xfails/i915-apl-skips.txt4
-rw-r--r--drivers/gpu/drm/ci/xfails/i915-cml-fails.txt14
-rw-r--r--drivers/gpu/drm/ci/xfails/i915-cml-flakes.txt9
-rw-r--r--drivers/gpu/drm/ci/xfails/i915-cml-skips.txt5
-rw-r--r--drivers/gpu/drm/ci/xfails/i915-glk-fails.txt24
-rw-r--r--drivers/gpu/drm/ci/xfails/i915-glk-flakes.txt8
-rw-r--r--drivers/gpu/drm/ci/xfails/i915-glk-skips.txt4
-rw-r--r--drivers/gpu/drm/ci/xfails/i915-kbl-fails.txt2
-rw-r--r--drivers/gpu/drm/ci/xfails/i915-kbl-flakes.txt2
-rw-r--r--drivers/gpu/drm/ci/xfails/i915-kbl-skips.txt4
-rw-r--r--drivers/gpu/drm/ci/xfails/i915-tgl-fails.txt25
-rw-r--r--drivers/gpu/drm/ci/xfails/i915-tgl-skips.txt4
-rw-r--r--drivers/gpu/drm/ci/xfails/i915-whl-fails.txt17
-rw-r--r--drivers/gpu/drm/ci/xfails/i915-whl-flakes.txt2
-rw-r--r--drivers/gpu/drm/ci/xfails/i915-whl-skips.txt5
-rw-r--r--drivers/gpu/drm/ci/xfails/mediatek-mt8173-fails.txt10
-rw-r--r--drivers/gpu/drm/ci/xfails/mediatek-mt8173-flakes.txt32
-rw-r--r--drivers/gpu/drm/ci/xfails/mediatek-mt8173-skips.txt5
-rw-r--r--drivers/gpu/drm/ci/xfails/mediatek-mt8183-fails.txt28
-rw-r--r--drivers/gpu/drm/ci/xfails/mediatek-mt8183-flakes.txt20
-rw-r--r--drivers/gpu/drm/ci/xfails/mediatek-mt8183-skips.txt7
-rw-r--r--drivers/gpu/drm/ci/xfails/meson-g12b-fails.txt19
-rw-r--r--drivers/gpu/drm/ci/xfails/meson-g12b-skips.txt7
-rw-r--r--drivers/gpu/drm/ci/xfails/msm-apq8016-fails.txt6
-rw-r--r--drivers/gpu/drm/ci/xfails/msm-apq8016-skips.txt3
-rw-r--r--drivers/gpu/drm/ci/xfails/msm-apq8096-fails.txt1
-rw-r--r--drivers/gpu/drm/ci/xfails/msm-apq8096-flakes.txt2
-rw-r--r--drivers/gpu/drm/ci/xfails/msm-apq8096-skips.txt5
-rw-r--r--drivers/gpu/drm/ci/xfails/msm-sc7180-trogdor-kingoftown-fails.txt146
-rw-r--r--drivers/gpu/drm/ci/xfails/msm-sc7180-trogdor-kingoftown-flakes.txt18
-rw-r--r--drivers/gpu/drm/ci/xfails/msm-sc7180-trogdor-kingoftown-skips.txt6
-rw-r--r--drivers/gpu/drm/ci/xfails/msm-sc7180-trogdor-lazor-limozeen-fails.txt146
-rw-r--r--drivers/gpu/drm/ci/xfails/msm-sc7180-trogdor-lazor-limozeen-flakes.txt11
-rw-r--r--drivers/gpu/drm/ci/xfails/msm-sc7180-trogdor-lazor-limozeen-skips.txt3
-rw-r--r--drivers/gpu/drm/ci/xfails/msm-sdm845-fails.txt1
-rw-r--r--drivers/gpu/drm/ci/xfails/msm-sdm845-flakes.txt105
-rw-r--r--drivers/gpu/drm/ci/xfails/msm-sdm845-skips.txt5
-rw-r--r--drivers/gpu/drm/ci/xfails/panfrost-g12b-fails.txt1
-rw-r--r--drivers/gpu/drm/ci/xfails/panfrost-g12b-skips.txt23
-rw-r--r--drivers/gpu/drm/ci/xfails/panfrost-mt8183-fails.txt1
-rw-r--r--drivers/gpu/drm/ci/xfails/panfrost-mt8183-skips.txt23
-rw-r--r--drivers/gpu/drm/ci/xfails/panfrost-rk3288-fails.txt1
-rw-r--r--drivers/gpu/drm/ci/xfails/panfrost-rk3288-skips.txt26
-rw-r--r--drivers/gpu/drm/ci/xfails/panfrost-rk3399-fails.txt1
-rw-r--r--drivers/gpu/drm/ci/xfails/panfrost-rk3399-flakes.txt6
-rw-r--r--drivers/gpu/drm/ci/xfails/panfrost-rk3399-skips.txt26
-rw-r--r--drivers/gpu/drm/ci/xfails/requirements.txt2
-rw-r--r--drivers/gpu/drm/ci/xfails/rockchip-rk3288-fails.txt22
-rw-r--r--drivers/gpu/drm/ci/xfails/rockchip-rk3288-flakes.txt6
-rw-r--r--drivers/gpu/drm/ci/xfails/rockchip-rk3288-skips.txt57
-rw-r--r--drivers/gpu/drm/ci/xfails/rockchip-rk3399-fails.txt90
-rw-r--r--drivers/gpu/drm/ci/xfails/rockchip-rk3399-flakes.txt50
-rw-r--r--drivers/gpu/drm/ci/xfails/rockchip-rk3399-skips.txt10
-rw-r--r--drivers/gpu/drm/ci/xfails/virtio_gpu-none-fails.txt65
-rw-r--r--drivers/gpu/drm/ci/xfails/virtio_gpu-none-skips.txt5
-rw-r--r--drivers/gpu/drm/ci/xfails/vkms-none-fails.txt5
-rw-r--r--drivers/gpu/drm/ci/xfails/vkms-none-flakes.txt21
-rw-r--r--drivers/gpu/drm/ci/xfails/vkms-none-skips.txt106
-rw-r--r--drivers/gpu/drm/display/drm_dp_helper.c66
-rw-r--r--drivers/gpu/drm/display/drm_dp_mst_topology.c128
-rw-r--r--drivers/gpu/drm/drm_atomic.c6
-rw-r--r--drivers/gpu/drm/drm_atomic_helper.c2
-rw-r--r--drivers/gpu/drm/drm_bridge.c9
-rw-r--r--drivers/gpu/drm/drm_connector.c87
-rw-r--r--drivers/gpu/drm/drm_crtc_internal.h15
-rw-r--r--drivers/gpu/drm/drm_displayid.c3
-rw-r--r--drivers/gpu/drm/drm_drv.c100
-rw-r--r--drivers/gpu/drm/drm_edid.c24
-rw-r--r--drivers/gpu/drm/drm_exec.c3
-rw-r--r--drivers/gpu/drm/drm_fb_helper.c39
-rw-r--r--drivers/gpu/drm/drm_file.c42
-rw-r--r--drivers/gpu/drm/drm_gem.c7
-rw-r--r--drivers/gpu/drm/drm_internal.h5
-rw-r--r--drivers/gpu/drm/drm_mipi_dsi.c225
-rw-r--r--drivers/gpu/drm/drm_mode_config.c2
-rw-r--r--drivers/gpu/drm/drm_modes.c1
-rw-r--r--drivers/gpu/drm/drm_panel.c18
-rw-r--r--drivers/gpu/drm/drm_panic.c406
-rw-r--r--drivers/gpu/drm/drm_panic_qr.rs1003
-rw-r--r--drivers/gpu/drm/drm_prime.c84
-rw-r--r--drivers/gpu/drm/drm_print.c13
-rw-r--r--drivers/gpu/drm/drm_probe_helper.c12
-rw-r--r--drivers/gpu/drm/drm_rect.c1
-rw-r--r--drivers/gpu/drm/drm_syncobj.c6
-rw-r--r--drivers/gpu/drm/drm_vblank.c83
-rw-r--r--drivers/gpu/drm/etnaviv/etnaviv_sched.c2
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_drv.h4
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_fimc.c2
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_gsc.c4
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_scaler.c2
-rw-r--r--drivers/gpu/drm/gma500/cdv_intel_lvds.c2
-rw-r--r--drivers/gpu/drm/gma500/intel_bios.c22
-rw-r--r--drivers/gpu/drm/gma500/intel_bios.h4
-rw-r--r--drivers/gpu/drm/gma500/intel_gmbus.c2
-rw-r--r--drivers/gpu/drm/gma500/psb_drv.h2
-rw-r--r--drivers/gpu/drm/gma500/psb_intel_drv.h2
-rw-r--r--drivers/gpu/drm/gma500/psb_intel_lvds.c4
-rw-r--r--drivers/gpu/drm/gma500/psb_intel_sdvo.c26
-rw-r--r--drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_drv.h1
-rw-r--r--drivers/gpu/drm/i915/display/dvo_ch7017.c14
-rw-r--r--drivers/gpu/drm/i915/display/dvo_ch7xxx.c18
-rw-r--r--drivers/gpu/drm/i915/display/dvo_ivch.c16
-rw-r--r--drivers/gpu/drm/i915/display/dvo_ns2501.c18
-rw-r--r--drivers/gpu/drm/i915/display/dvo_sil164.c18
-rw-r--r--drivers/gpu/drm/i915/display/dvo_tfp410.c18
-rw-r--r--drivers/gpu/drm/i915/display/g4x_dp.c155
-rw-r--r--drivers/gpu/drm/i915/display/g4x_hdmi.c3
-rw-r--r--drivers/gpu/drm/i915/display/i9xx_wm.c2
-rw-r--r--drivers/gpu/drm/i915/display/icl_dsi.c12
-rw-r--r--drivers/gpu/drm/i915/display/intel_acpi.c17
-rw-r--r--drivers/gpu/drm/i915/display/intel_acpi.h18
-rw-r--r--drivers/gpu/drm/i915/display/intel_alpm.c56
-rw-r--r--drivers/gpu/drm/i915/display/intel_atomic.c6
-rw-r--r--drivers/gpu/drm/i915/display/intel_audio.c39
-rw-r--r--drivers/gpu/drm/i915/display/intel_backlight.c22
-rw-r--r--drivers/gpu/drm/i915/display/intel_bios.c883
-rw-r--r--drivers/gpu/drm/i915/display/intel_bios.h28
-rw-r--r--drivers/gpu/drm/i915/display/intel_cdclk.c5
-rw-r--r--drivers/gpu/drm/i915/display/intel_color.c56
-rw-r--r--drivers/gpu/drm/i915/display/intel_combo_phy.c8
-rw-r--r--drivers/gpu/drm/i915/display/intel_crtc_state_dump.c3
-rw-r--r--drivers/gpu/drm/i915/display/intel_ddi.c26
-rw-r--r--drivers/gpu/drm/i915/display/intel_ddi.h3
-rw-r--r--drivers/gpu/drm/i915/display/intel_display.c53
-rw-r--r--drivers/gpu/drm/i915/display/intel_display.h3
-rw-r--r--drivers/gpu/drm/i915/display/intel_display_core.h2
-rw-r--r--drivers/gpu/drm/i915/display/intel_display_debugfs.c19
-rw-r--r--drivers/gpu/drm/i915/display/intel_display_debugfs_params.c8
-rw-r--r--drivers/gpu/drm/i915/display/intel_display_debugfs_params.h4
-rw-r--r--drivers/gpu/drm/i915/display/intel_display_device.c240
-rw-r--r--drivers/gpu/drm/i915/display/intel_display_device.h21
-rw-r--r--drivers/gpu/drm/i915/display/intel_display_driver.c36
-rw-r--r--drivers/gpu/drm/i915/display/intel_display_irq.c66
-rw-r--r--drivers/gpu/drm/i915/display/intel_display_params.c6
-rw-r--r--drivers/gpu/drm/i915/display/intel_display_params.h4
-rw-r--r--drivers/gpu/drm/i915/display/intel_display_power.c28
-rw-r--r--drivers/gpu/drm/i915/display/intel_display_power.h5
-rw-r--r--drivers/gpu/drm/i915/display/intel_display_power_well.c19
-rw-r--r--drivers/gpu/drm/i915/display/intel_display_reset.c7
-rw-r--r--drivers/gpu/drm/i915/display/intel_display_types.h62
-rw-r--r--drivers/gpu/drm/i915/display/intel_display_wa.h8
-rw-r--r--drivers/gpu/drm/i915/display/intel_dmc.c2
-rw-r--r--drivers/gpu/drm/i915/display/intel_dp.c312
-rw-r--r--drivers/gpu/drm/i915/display/intel_dp.h3
-rw-r--r--drivers/gpu/drm/i915/display/intel_dp_aux.c114
-rw-r--r--drivers/gpu/drm/i915/display/intel_dp_aux.h4
-rw-r--r--drivers/gpu/drm/i915/display/intel_dp_aux_backlight.c70
-rw-r--r--drivers/gpu/drm/i915/display/intel_dp_hdcp.c13
-rw-r--r--drivers/gpu/drm/i915/display/intel_dp_link_training.c237
-rw-r--r--drivers/gpu/drm/i915/display/intel_dp_link_training.h6
-rw-r--r--drivers/gpu/drm/i915/display/intel_dp_mst.c100
-rw-r--r--drivers/gpu/drm/i915/display/intel_dp_mst.h1
-rw-r--r--drivers/gpu/drm/i915/display/intel_dp_tunnel.c77
-rw-r--r--drivers/gpu/drm/i915/display/intel_dp_tunnel.h11
-rw-r--r--drivers/gpu/drm/i915/display/intel_dpll.c9
-rw-r--r--drivers/gpu/drm/i915/display/intel_dpll_mgr.c3
-rw-r--r--drivers/gpu/drm/i915/display/intel_dpt.c4
-rw-r--r--drivers/gpu/drm/i915/display/intel_dpt.h3
-rw-r--r--drivers/gpu/drm/i915/display/intel_dsb.c325
-rw-r--r--drivers/gpu/drm/i915/display/intel_dsb.h16
-rw-r--r--drivers/gpu/drm/i915/display/intel_dsi.h2
-rw-r--r--drivers/gpu/drm/i915/display/intel_dsi_vbt.c20
-rw-r--r--drivers/gpu/drm/i915/display/intel_dvo.c14
-rw-r--r--drivers/gpu/drm/i915/display/intel_dvo_dev.h2
-rw-r--r--drivers/gpu/drm/i915/display/intel_fb.c36
-rw-r--r--drivers/gpu/drm/i915/display/intel_fb.h2
-rw-r--r--drivers/gpu/drm/i915/display/intel_fbc.c553
-rw-r--r--drivers/gpu/drm/i915/display/intel_fbc.h13
-rw-r--r--drivers/gpu/drm/i915/display/intel_fdi.c6
-rw-r--r--drivers/gpu/drm/i915/display/intel_fifo_underrun.c2
-rw-r--r--drivers/gpu/drm/i915/display/intel_frontbuffer.c7
-rw-r--r--drivers/gpu/drm/i915/display/intel_gmbus.c4
-rw-r--r--drivers/gpu/drm/i915/display/intel_hdcp.c23
-rw-r--r--drivers/gpu/drm/i915/display/intel_hdcp_gsc_message.c67
-rw-r--r--drivers/gpu/drm/i915/display/intel_hdmi.c500
-rw-r--r--drivers/gpu/drm/i915/display/intel_hdmi.h1
-rw-r--r--drivers/gpu/drm/i915/display/intel_hotplug_irq.c6
-rw-r--r--drivers/gpu/drm/i915/display/intel_hti.c20
-rw-r--r--drivers/gpu/drm/i915/display/intel_hti.h8
-rw-r--r--drivers/gpu/drm/i915/display/intel_link_bw.c29
-rw-r--r--drivers/gpu/drm/i915/display/intel_link_bw.h2
-rw-r--r--drivers/gpu/drm/i915/display/intel_load_detect.c27
-rw-r--r--drivers/gpu/drm/i915/display/intel_lspcon.c115
-rw-r--r--drivers/gpu/drm/i915/display/intel_lvds.c5
-rw-r--r--drivers/gpu/drm/i915/display/intel_modeset_setup.c2
-rw-r--r--drivers/gpu/drm/i915/display/intel_opregion.c322
-rw-r--r--drivers/gpu/drm/i915/display/intel_opregion.h62
-rw-r--r--drivers/gpu/drm/i915/display/intel_pch_display.c3
-rw-r--r--drivers/gpu/drm/i915/display/intel_pmdemand.c2
-rw-r--r--drivers/gpu/drm/i915/display/intel_pps.c567
-rw-r--r--drivers/gpu/drm/i915/display/intel_pps.h10
-rw-r--r--drivers/gpu/drm/i915/display/intel_psr.c884
-rw-r--r--drivers/gpu/drm/i915/display/intel_psr.h10
-rw-r--r--drivers/gpu/drm/i915/display/intel_quirks.c2
-rw-r--r--drivers/gpu/drm/i915/display/intel_sdvo.c33
-rw-r--r--drivers/gpu/drm/i915/display/intel_sprite.c209
-rw-r--r--drivers/gpu/drm/i915/display/intel_tc.c3
-rw-r--r--drivers/gpu/drm/i915/display/intel_tv.c204
-rw-r--r--drivers/gpu/drm/i915/display/intel_tv.h6
-rw-r--r--drivers/gpu/drm/i915/display/intel_vblank.c125
-rw-r--r--drivers/gpu/drm/i915/display/intel_vblank.h2
-rw-r--r--drivers/gpu/drm/i915/display/intel_vbt_defs.h41
-rw-r--r--drivers/gpu/drm/i915/display/intel_vdsc.c51
-rw-r--r--drivers/gpu/drm/i915/display/intel_vdsc.h4
-rw-r--r--drivers/gpu/drm/i915/display/intel_vrr.c130
-rw-r--r--drivers/gpu/drm/i915/display/skl_universal_plane.c16
-rw-r--r--drivers/gpu/drm/i915/display/skl_watermark.c24
-rw-r--r--drivers/gpu/drm/i915/display/vlv_dsi.c5
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c8
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_mman.c30
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_object.h1
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_object_types.h2
-rw-r--r--drivers/gpu/drm/i915/gem/selftests/i915_gem_dmabuf.c2
-rw-r--r--drivers/gpu/drm/i915/gt/intel_engine_cs.c2
-rw-r--r--drivers/gpu/drm/i915/gt/intel_ggtt.c1
-rw-r--r--drivers/gpu/drm/i915/gt/intel_ggtt_fencing.c1
-rw-r--r--drivers/gpu/drm/i915/gt/intel_gpu_commands.h1
-rw-r--r--drivers/gpu/drm/i915/gt/intel_gt.h7
-rw-r--r--drivers/gpu/drm/i915/gt/intel_gt_regs.h2
-rw-r--r--drivers/gpu/drm/i915/gt/intel_gt_types.h2
-rw-r--r--drivers/gpu/drm/i915/gt/intel_reset.c12
-rw-r--r--drivers/gpu/drm/i915/gt/intel_workarounds.c25
-rw-r--r--drivers/gpu/drm/i915/gt/selftest_migrate.c2
-rw-r--r--drivers/gpu/drm/i915/gt/sysfs_engines.c5
-rw-r--r--drivers/gpu/drm/i915/gt/uc/abi/guc_klvs_abi.h1
-rw-r--r--drivers/gpu/drm/i915/gt/uc/intel_guc.c2
-rw-r--r--drivers/gpu/drm/i915/gt/uc/intel_guc_ads.c18
-rw-r--r--drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c7
-rw-r--r--drivers/gpu/drm/i915/gt/uc/intel_uc.c2
-rw-r--r--drivers/gpu/drm/i915/gvt/edid.c28
-rw-r--r--drivers/gpu/drm/i915/gvt/edid.h4
-rw-r--r--drivers/gpu/drm/i915/gvt/kvmgt.c18
-rw-r--r--drivers/gpu/drm/i915/gvt/opregion.c2
-rw-r--r--drivers/gpu/drm/i915/gvt/trace.h2
-rw-r--r--drivers/gpu/drm/i915/i915_debugfs.c3
-rw-r--r--drivers/gpu/drm/i915/i915_driver.c44
-rw-r--r--drivers/gpu/drm/i915/i915_drv.h21
-rw-r--r--drivers/gpu/drm/i915/i915_gem.c2
-rw-r--r--drivers/gpu/drm/i915/i915_gem_gtt.c1
-rw-r--r--drivers/gpu/drm/i915/i915_gpu_error.c3
-rw-r--r--drivers/gpu/drm/i915/i915_hwmon.c88
-rw-r--r--drivers/gpu/drm/i915/i915_irq.c1
-rw-r--r--drivers/gpu/drm/i915/i915_mm.c12
-rw-r--r--drivers/gpu/drm/i915/i915_mm.h3
-rw-r--r--drivers/gpu/drm/i915/i915_module.c9
-rw-r--r--drivers/gpu/drm/i915/i915_params.c3
-rw-r--r--drivers/gpu/drm/i915/i915_pci.c7
-rw-r--r--drivers/gpu/drm/i915/i915_perf.c1
-rw-r--r--drivers/gpu/drm/i915/i915_reg.h6
-rw-r--r--drivers/gpu/drm/i915/i915_sysfs.c4
-rw-r--r--drivers/gpu/drm/i915/i915_utils.c51
-rw-r--r--drivers/gpu/drm/i915/i915_utils.h16
-rw-r--r--drivers/gpu/drm/i915/i915_vma.h1
-rw-r--r--drivers/gpu/drm/i915/intel_device_info.c7
-rw-r--r--drivers/gpu/drm/i915/intel_device_info.h2
-rw-r--r--drivers/gpu/drm/i915/intel_step.c84
-rw-r--r--drivers/gpu/drm/i915/intel_step.h3
-rw-r--r--drivers/gpu/drm/i915/intel_uncore.c7
-rw-r--r--drivers/gpu/drm/i915/selftests/intel_memory_region.c2
-rw-r--r--drivers/gpu/drm/i915/selftests/mock_gem_device.c2
-rw-r--r--drivers/gpu/drm/imagination/pvr_device.h2
-rw-r--r--drivers/gpu/drm/imagination/pvr_queue.c4
-rw-r--r--drivers/gpu/drm/imx/ipuv3/parallel-display.c14
-rw-r--r--drivers/gpu/drm/lima/lima_sched.c2
-rw-r--r--drivers/gpu/drm/loongson/lsdc_ttm.c8
-rw-r--r--drivers/gpu/drm/mediatek/mtk_crtc.c82
-rw-r--r--drivers/gpu/drm/mediatek/mtk_disp_ovl.c36
-rw-r--r--drivers/gpu/drm/mediatek/mtk_disp_ovl_adaptor.c5
-rw-r--r--drivers/gpu/drm/mediatek/mtk_disp_rdma.c13
-rw-r--r--drivers/gpu/drm/mediatek/mtk_drm_drv.c6
-rw-r--r--drivers/gpu/drm/mediatek/mtk_dsi.c106
-rw-r--r--drivers/gpu/drm/mediatek/mtk_ethdr.c13
-rw-r--r--drivers/gpu/drm/mediatek/mtk_plane.c29
-rw-r--r--drivers/gpu/drm/mediatek/mtk_plane.h3
-rw-r--r--drivers/gpu/drm/mgag200/Makefile1
-rw-r--r--drivers/gpu/drm/mgag200/mgag200_bmc.c111
-rw-r--r--drivers/gpu/drm/mgag200/mgag200_drv.c40
-rw-r--r--drivers/gpu/drm/mgag200/mgag200_drv.h58
-rw-r--r--drivers/gpu/drm/mgag200/mgag200_g200.c5
-rw-r--r--drivers/gpu/drm/mgag200/mgag200_g200eh.c11
-rw-r--r--drivers/gpu/drm/mgag200/mgag200_g200eh3.c11
-rw-r--r--drivers/gpu/drm/mgag200/mgag200_g200er.c22
-rw-r--r--drivers/gpu/drm/mgag200/mgag200_g200ev.c22
-rw-r--r--drivers/gpu/drm/mgag200/mgag200_g200ew3.c13
-rw-r--r--drivers/gpu/drm/mgag200/mgag200_g200se.c22
-rw-r--r--drivers/gpu/drm/mgag200/mgag200_g200wb.c13
-rw-r--r--drivers/gpu/drm/mgag200/mgag200_mode.c183
-rw-r--r--drivers/gpu/drm/mgag200/mgag200_reg.h7
-rw-r--r--drivers/gpu/drm/mgag200/mgag200_vga_bmc.c156
-rw-r--r--drivers/gpu/drm/msm/Makefile1
-rw-r--r--drivers/gpu/drm/msm/adreno/a3xx_catalog.c11
-rw-r--r--drivers/gpu/drm/msm/adreno/a3xx_gpu.c14
-rw-r--r--drivers/gpu/drm/msm/adreno/a5xx_gpu.c16
-rw-r--r--drivers/gpu/drm/msm/adreno/a5xx_gpu.h2
-rw-r--r--drivers/gpu/drm/msm/adreno/a5xx_preempt.c30
-rw-r--r--drivers/gpu/drm/msm/adreno/a6xx_catalog.c141
-rw-r--r--drivers/gpu/drm/msm/adreno/a6xx_gmu.c21
-rw-r--r--drivers/gpu/drm/msm/adreno/a6xx_gpu.c89
-rw-r--r--drivers/gpu/drm/msm/adreno/a6xx_gpu.h2
-rw-r--r--drivers/gpu/drm/msm/adreno/a6xx_gpu_state.c46
-rw-r--r--drivers/gpu/drm/msm/adreno/adreno_gen7_9_0_snapshot.h2
-rw-r--r--drivers/gpu/drm/msm/adreno/adreno_gpu.c15
-rw-r--r--drivers/gpu/drm/msm/adreno/adreno_gpu.h51
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_5_0_sm8150.h18
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_5_1_sc8180x.h18
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_5_4_sm6125.h18
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_6_4_sm6350.h18
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_crtc.h2
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_hw_catalog.c6
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_hw_top.c41
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_hw_top.h18
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_hwio.h7
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_kms.c12
-rw-r--r--drivers/gpu/drm/msm/disp/mdp5/mdp5_smp.c2
-rw-r--r--drivers/gpu/drm/msm/dp/dp_display.c10
-rw-r--r--drivers/gpu/drm/msm/dsi/phy/dsi_phy_7nm.c12
-rw-r--r--drivers/gpu/drm/msm/hdmi/hdmi.c1
-rw-r--r--drivers/gpu/drm/msm/hdmi/hdmi.h8
-rw-r--r--drivers/gpu/drm/msm/hdmi/hdmi_phy.c5
-rw-r--r--drivers/gpu/drm/msm/hdmi/hdmi_phy_8998.c779
-rw-r--r--drivers/gpu/drm/msm/msm_debugfs.c2
-rw-r--r--drivers/gpu/drm/msm/msm_drv.c3
-rw-r--r--drivers/gpu/drm/msm/msm_drv.h8
-rw-r--r--drivers/gpu/drm/msm/msm_gpu.c1
-rw-r--r--drivers/gpu/drm/msm/msm_perf.c1
-rw-r--r--drivers/gpu/drm/msm/msm_rd.c1
-rw-r--r--drivers/gpu/drm/msm/registers/adreno/a6xx.xml1118
-rw-r--r--drivers/gpu/drm/msm/registers/display/hdmi.xml89
-rw-r--r--drivers/gpu/drm/mxsfb/lcdif_kms.c5
-rw-r--r--drivers/gpu/drm/nouveau/Kbuild1
-rw-r--r--drivers/gpu/drm/nouveau/dispnv04/crtc.c57
-rw-r--r--drivers/gpu/drm/nouveau/dispnv04/dac.c2
-rw-r--r--drivers/gpu/drm/nouveau/dispnv04/dfp.c2
-rw-r--r--drivers/gpu/drm/nouveau/dispnv04/disp.c7
-rw-r--r--drivers/gpu/drm/nouveau/dispnv04/disp.h2
-rw-r--r--drivers/gpu/drm/nouveau/dispnv04/hw.c9
-rw-r--r--drivers/gpu/drm/nouveau/dispnv04/tvnv04.c4
-rw-r--r--drivers/gpu/drm/nouveau/dispnv04/tvnv17.c6
-rw-r--r--drivers/gpu/drm/nouveau/dispnv50/base507c.c21
-rw-r--r--drivers/gpu/drm/nouveau/dispnv50/base827c.c2
-rw-r--r--drivers/gpu/drm/nouveau/dispnv50/base907c.c10
-rw-r--r--drivers/gpu/drm/nouveau/dispnv50/core507d.c8
-rw-r--r--drivers/gpu/drm/nouveau/dispnv50/corec37d.c6
-rw-r--r--drivers/gpu/drm/nouveau/dispnv50/corec57d.c2
-rw-r--r--drivers/gpu/drm/nouveau/dispnv50/crc907d.c4
-rw-r--r--drivers/gpu/drm/nouveau/dispnv50/crcc37d.c4
-rw-r--r--drivers/gpu/drm/nouveau/dispnv50/crcc57d.c2
-rw-r--r--drivers/gpu/drm/nouveau/dispnv50/dac507d.c2
-rw-r--r--drivers/gpu/drm/nouveau/dispnv50/dac907d.c2
-rw-r--r--drivers/gpu/drm/nouveau/dispnv50/disp.c79
-rw-r--r--drivers/gpu/drm/nouveau/dispnv50/disp.h14
-rw-r--r--drivers/gpu/drm/nouveau/dispnv50/head507d.c24
-rw-r--r--drivers/gpu/drm/nouveau/dispnv50/head827d.c10
-rw-r--r--drivers/gpu/drm/nouveau/dispnv50/head907d.c26
-rw-r--r--drivers/gpu/drm/nouveau/dispnv50/head917d.c6
-rw-r--r--drivers/gpu/drm/nouveau/dispnv50/headc37d.c18
-rw-r--r--drivers/gpu/drm/nouveau/dispnv50/headc57d.c12
-rw-r--r--drivers/gpu/drm/nouveau/dispnv50/ovly507e.c6
-rw-r--r--drivers/gpu/drm/nouveau/dispnv50/ovly827e.c2
-rw-r--r--drivers/gpu/drm/nouveau/dispnv50/ovly907e.c2
-rw-r--r--drivers/gpu/drm/nouveau/dispnv50/pior507d.c2
-rw-r--r--drivers/gpu/drm/nouveau/dispnv50/sor507d.c2
-rw-r--r--drivers/gpu/drm/nouveau/dispnv50/sor907d.c2
-rw-r--r--drivers/gpu/drm/nouveau/dispnv50/sorc37d.c2
-rw-r--r--drivers/gpu/drm/nouveau/dispnv50/wimmc37b.c7
-rw-r--r--drivers/gpu/drm/nouveau/dispnv50/wndwc37e.c24
-rw-r--r--drivers/gpu/drm/nouveau/dispnv50/wndwc57e.c10
-rw-r--r--drivers/gpu/drm/nouveau/dispnv50/wndwc67e.c2
-rw-r--r--drivers/gpu/drm/nouveau/include/nvif/cl0080.h7
-rw-r--r--drivers/gpu/drm/nouveau/include/nvif/class.h3
-rw-r--r--drivers/gpu/drm/nouveau/include/nvif/client.h11
-rw-r--r--drivers/gpu/drm/nouveau/include/nvif/device.h37
-rw-r--r--drivers/gpu/drm/nouveau/include/nvif/driver.h5
-rw-r--r--drivers/gpu/drm/nouveau/include/nvif/if0000.h10
-rw-r--r--drivers/gpu/drm/nouveau/include/nvif/if0002.h39
-rw-r--r--drivers/gpu/drm/nouveau/include/nvif/if0003.h34
-rw-r--r--drivers/gpu/drm/nouveau/include/nvif/ioctl.h27
-rw-r--r--drivers/gpu/drm/nouveau/include/nvif/object.h24
-rw-r--r--drivers/gpu/drm/nouveau/include/nvif/os.h19
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/core/client.h1
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/core/device.h1
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/core/layout.h1
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/core/object.h14
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/core/oclass.h2
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/core/os.h19
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/core/pci.h1
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/core/tegra.h1
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/engine/pm.h29
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_abi16.c330
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_abi16.h6
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_bios.c4
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_bios.h1
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_bo.c10
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_bo.h50
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_bo0039.c6
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_bo5039.c6
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_bo74c1.c2
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_bo85b5.c2
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_bo9039.c4
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_bo90b5.c2
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_boa0b5.c4
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_chan.c98
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_chan.h8
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_display.c5
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_display.h2
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_dma.c2
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_dmem.c8
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_drm.c388
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_drv.h61
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_fence.c17
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_gem.c21
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_hwmon.c46
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_led.c2
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_mem.c38
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_mem.h4
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_nvif.c2
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_platform.c12
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_sched.c6
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_sgdma.c2
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_ttm.c12
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_usif.c194
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_usif.h10
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_vga.c22
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_vga.h1
-rw-r--r--drivers/gpu/drm/nouveau/nv04_fence.c2
-rw-r--r--drivers/gpu/drm/nouveau/nv10_fence.c4
-rw-r--r--drivers/gpu/drm/nouveau/nv17_fence.c12
-rw-r--r--drivers/gpu/drm/nouveau/nv50_fence.c4
-rw-r--r--drivers/gpu/drm/nouveau/nv84_fence.c16
-rw-r--r--drivers/gpu/drm/nouveau/nvc0_fence.c4
-rw-r--r--drivers/gpu/drm/nouveau/nvif/client.c32
-rw-r--r--drivers/gpu/drm/nouveau/nvif/device.c15
-rw-r--r--drivers/gpu/drm/nouveau/nvif/driver.c32
-rw-r--r--drivers/gpu/drm/nouveau/nvif/object.c40
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/core/client.c64
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/core/ioctl.c91
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/core/object.c50
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/core/oproxy.c42
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/core/uevent.c4
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/Kbuild1
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/device/base.c479
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/device/pci.c4
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/device/priv.h2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/device/tegra.c5
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/device/user.c93
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/disp/chan.c24
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/pm/Kbuild11
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/pm/base.c867
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/pm/g84.c165
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/pm/gf100.c243
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/pm/gf100.h20
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/pm/gf108.c66
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/pm/gf117.c80
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/pm/gk104.c184
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/pm/gt200.c157
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/pm/gt215.c138
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/pm/nv40.c123
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/pm/nv40.h15
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/pm/nv50.c175
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/pm/priv.h105
-rw-r--r--drivers/gpu/drm/omapdrm/dss/base.c3
-rw-r--r--drivers/gpu/drm/omapdrm/omap_drv.c5
-rw-r--r--drivers/gpu/drm/panel/Kconfig12
-rw-r--r--drivers/gpu/drm/panel/Makefile1
-rw-r--r--drivers/gpu/drm/panel/panel-boe-bf060y8m-aj0.c2
-rw-r--r--drivers/gpu/drm/panel/panel-boe-th101mb31ig002-28a.c325
-rw-r--r--drivers/gpu/drm/panel/panel-boe-tv101wum-ll2.c241
-rw-r--r--drivers/gpu/drm/panel/panel-boe-tv101wum-nl6.c190
-rw-r--r--drivers/gpu/drm/panel/panel-edp.c40
-rw-r--r--drivers/gpu/drm/panel/panel-himax-hx8394.c153
-rw-r--r--drivers/gpu/drm/panel/panel-ilitek-ili9341.c14
-rw-r--r--drivers/gpu/drm/panel/panel-ilitek-ili9806e.c165
-rw-r--r--drivers/gpu/drm/panel/panel-jadard-jd9365da-h3.c370
-rw-r--r--drivers/gpu/drm/panel/panel-jdi-fhd-r63452.c149
-rw-r--r--drivers/gpu/drm/panel/panel-mantix-mlaf057we51.c79
-rw-r--r--drivers/gpu/drm/panel/panel-newvision-nv3051d.c367
-rw-r--r--drivers/gpu/drm/panel/panel-novatek-nt35510.c2
-rw-r--r--drivers/gpu/drm/panel/panel-novatek-nt35950.c211
-rw-r--r--drivers/gpu/drm/panel/panel-novatek-nt36672e.c69
-rw-r--r--drivers/gpu/drm/panel/panel-orisetech-otm8009a.c4
-rw-r--r--drivers/gpu/drm/panel/panel-samsung-s6e3ha2.c10
-rw-r--r--drivers/gpu/drm/panel/panel-samsung-s6e63j0x03.c10
-rw-r--r--drivers/gpu/drm/panel/panel-simple.c97
-rw-r--r--drivers/gpu/drm/panel/panel-sitronix-st7701.c1122
-rw-r--r--drivers/gpu/drm/panel/panel-sony-acx565akm.c2
-rw-r--r--drivers/gpu/drm/panel/panel-sony-tulip-truly-nt35521.c29
-rw-r--r--drivers/gpu/drm/panel/panel-startek-kd070fhfid015.c115
-rw-r--r--drivers/gpu/drm/panel/panel-visionox-vtdr6130.c212
-rw-r--r--drivers/gpu/drm/panfrost/panfrost_job.c2
-rw-r--r--drivers/gpu/drm/panthor/panthor_mmu.c2
-rw-r--r--drivers/gpu/drm/panthor/panthor_sched.c2
-rw-r--r--drivers/gpu/drm/radeon/atombios_encoders.c4
-rw-r--r--drivers/gpu/drm/radeon/cik.c14
-rw-r--r--drivers/gpu/drm/radeon/dce6_afmt.c2
-rw-r--r--drivers/gpu/drm/radeon/evergreen.c12
-rw-r--r--drivers/gpu/drm/radeon/evergreen_cs.c62
-rw-r--r--drivers/gpu/drm/radeon/ni.c2
-rw-r--r--drivers/gpu/drm/radeon/r100.c94
-rw-r--r--drivers/gpu/drm/radeon/r300.c6
-rw-r--r--drivers/gpu/drm/radeon/r420.c6
-rw-r--r--drivers/gpu/drm/radeon/r520.c2
-rw-r--r--drivers/gpu/drm/radeon/r600.c12
-rw-r--r--drivers/gpu/drm/radeon/r600_cs.c2
-rw-r--r--drivers/gpu/drm/radeon/r600_dpm.c4
-rw-r--r--drivers/gpu/drm/radeon/r600_hdmi.c2
-rw-r--r--drivers/gpu/drm/radeon/radeon.h7
-rw-r--r--drivers/gpu/drm/radeon/radeon_acpi.c10
-rw-r--r--drivers/gpu/drm/radeon/radeon_agp.c2
-rw-r--r--drivers/gpu/drm/radeon/radeon_atombios.c40
-rw-r--r--drivers/gpu/drm/radeon/radeon_audio.c4
-rw-r--r--drivers/gpu/drm/radeon/radeon_combios.c38
-rw-r--r--drivers/gpu/drm/radeon/radeon_connectors.c7
-rw-r--r--drivers/gpu/drm/radeon/radeon_device.c13
-rw-r--r--drivers/gpu/drm/radeon/radeon_display.c76
-rw-r--r--drivers/gpu/drm/radeon/radeon_drv.c27
-rw-r--r--drivers/gpu/drm/radeon/radeon_fbdev.c26
-rw-r--r--drivers/gpu/drm/radeon/radeon_fence.c8
-rw-r--r--drivers/gpu/drm/radeon/radeon_gem.c4
-rw-r--r--drivers/gpu/drm/radeon/radeon_i2c.c2
-rw-r--r--drivers/gpu/drm/radeon/radeon_ib.c2
-rw-r--r--drivers/gpu/drm/radeon/radeon_irq_kms.c12
-rw-r--r--drivers/gpu/drm/radeon/radeon_kms.c8
-rw-r--r--drivers/gpu/drm/radeon/radeon_legacy_encoders.c2
-rw-r--r--drivers/gpu/drm/radeon/radeon_mode.h4
-rw-r--r--drivers/gpu/drm/radeon/radeon_object.c9
-rw-r--r--drivers/gpu/drm/radeon/radeon_pm.c20
-rw-r--r--drivers/gpu/drm/radeon/radeon_ring.c2
-rw-r--r--drivers/gpu/drm/radeon/radeon_ttm.c6
-rw-r--r--drivers/gpu/drm/radeon/rs400.c6
-rw-r--r--drivers/gpu/drm/radeon/rs600.c14
-rw-r--r--drivers/gpu/drm/radeon/rs690.c2
-rw-r--r--drivers/gpu/drm/radeon/rv515.c4
-rw-r--r--drivers/gpu/drm/radeon/rv770.c2
-rw-r--r--drivers/gpu/drm/radeon/si.c4
-rw-r--r--drivers/gpu/drm/renesas/rcar-du/Kconfig8
-rw-r--r--drivers/gpu/drm/renesas/rcar-du/Makefile2
-rw-r--r--drivers/gpu/drm/renesas/rz-du/Kconfig8
-rw-r--r--drivers/gpu/drm/renesas/rz-du/Makefile2
-rw-r--r--drivers/gpu/drm/renesas/rz-du/rzg2l_du_crtc.c8
-rw-r--r--drivers/gpu/drm/renesas/rz-du/rzg2l_du_drv.c11
-rw-r--r--drivers/gpu/drm/renesas/rz-du/rzg2l_du_kms.c3
-rw-r--r--drivers/gpu/drm/renesas/rz-du/rzg2l_mipi_dsi.c (renamed from drivers/gpu/drm/renesas/rcar-du/rzg2l_mipi_dsi.c)0
-rw-r--r--drivers/gpu/drm/renesas/rz-du/rzg2l_mipi_dsi_regs.h (renamed from drivers/gpu/drm/renesas/rcar-du/rzg2l_mipi_dsi_regs.h)0
-rw-r--r--drivers/gpu/drm/rockchip/analogix_dp-rockchip.c2
-rw-r--r--drivers/gpu/drm/rockchip/cdn-dp-core.c32
-rw-r--r--drivers/gpu/drm/rockchip/dw_hdmi-rockchip.c107
-rw-r--r--drivers/gpu/drm/rockchip/inno_hdmi.c2
-rw-r--r--drivers/gpu/drm/rockchip/rockchip_drm_drv.h3
-rw-r--r--drivers/gpu/drm/rockchip/rockchip_drm_vop.c8
-rw-r--r--drivers/gpu/drm/rockchip/rockchip_drm_vop.h1
-rw-r--r--drivers/gpu/drm/rockchip/rockchip_vop_reg.c2
-rw-r--r--drivers/gpu/drm/scheduler/sched_main.c25
-rw-r--r--drivers/gpu/drm/sti/sti_dvo.c1
-rw-r--r--drivers/gpu/drm/sti/sti_hda.c1
-rw-r--r--drivers/gpu/drm/sti/sti_hdmi.c25
-rw-r--r--drivers/gpu/drm/sti/sti_hqvdp.c1
-rw-r--r--drivers/gpu/drm/sti/sti_tvout.c1
-rw-r--r--drivers/gpu/drm/sti/sti_vtg.c1
-rw-r--r--drivers/gpu/drm/stm/drv.c7
-rw-r--r--drivers/gpu/drm/stm/ltdc.c107
-rw-r--r--drivers/gpu/drm/stm/lvds.c1
-rw-r--r--drivers/gpu/drm/tegra/drm.c6
-rw-r--r--drivers/gpu/drm/tegra/drm.h2
-rw-r--r--drivers/gpu/drm/tegra/gr3d.c46
-rw-r--r--drivers/gpu/drm/tegra/hub.c7
-rw-r--r--drivers/gpu/drm/tegra/output.c29
-rw-r--r--drivers/gpu/drm/tests/drm_gem_shmem_test.c27
-rw-r--r--drivers/gpu/drm/tilcdc/tilcdc_panel.c2
-rw-r--r--drivers/gpu/drm/tiny/gm12u320.c13
-rw-r--r--drivers/gpu/drm/ttm/tests/ttm_bo_test.c6
-rw-r--r--drivers/gpu/drm/ttm/tests/ttm_resource_test.c2
-rw-r--r--drivers/gpu/drm/ttm/ttm_bo.c460
-rw-r--r--drivers/gpu/drm/ttm/ttm_bo_util.c151
-rw-r--r--drivers/gpu/drm/ttm/ttm_device.c29
-rw-r--r--drivers/gpu/drm/ttm/ttm_pool.c2
-rw-r--r--drivers/gpu/drm/ttm/ttm_resource.c251
-rw-r--r--drivers/gpu/drm/udl/udl_edid.c2
-rw-r--r--drivers/gpu/drm/v3d/v3d_bo.c12
-rw-r--r--drivers/gpu/drm/v3d/v3d_drv.c11
-rw-r--r--drivers/gpu/drm/v3d/v3d_drv.h12
-rw-r--r--drivers/gpu/drm/v3d/v3d_perfmon.c40
-rw-r--r--drivers/gpu/drm/v3d/v3d_performance_counters.h16
-rw-r--r--drivers/gpu/drm/v3d/v3d_sched.c79
-rw-r--r--drivers/gpu/drm/v3d/v3d_submit.c255
-rw-r--r--drivers/gpu/drm/vboxvideo/vbox_main.c4
-rw-r--r--drivers/gpu/drm/vc4/vc4_bo.c14
-rw-r--r--drivers/gpu/drm/vc4/vc4_dpi.c14
-rw-r--r--drivers/gpu/drm/vc4/vc4_dsi.c32
-rw-r--r--drivers/gpu/drm/vc4/vc4_gem.c11
-rw-r--r--drivers/gpu/drm/vc4/vc4_hdmi.c44
-rw-r--r--drivers/gpu/drm/vc4/vc4_hvs.c4
-rw-r--r--drivers/gpu/drm/vc4/vc4_irq.c2
-rw-r--r--drivers/gpu/drm/vc4/vc4_v3d.c24
-rw-r--r--drivers/gpu/drm/vc4/vc4_validate.c8
-rw-r--r--drivers/gpu/drm/vc4/vc4_vec.c10
-rw-r--r--drivers/gpu/drm/vkms/vkms_drv.c6
-rw-r--r--drivers/gpu/drm/vkms/vkms_drv.h1
-rw-r--r--drivers/gpu/drm/vkms/vkms_formats.c14
-rw-r--r--drivers/gpu/drm/xe/Makefile44
-rw-r--r--drivers/gpu/drm/xe/abi/guc_klvs_abi.h1
-rw-r--r--drivers/gpu/drm/xe/compat-i915-headers/i915_drv.h19
-rw-r--r--drivers/gpu/drm/xe/compat-i915-headers/i915_vma.h7
-rw-r--r--drivers/gpu/drm/xe/compat-i915-headers/intel_step.h10
-rw-r--r--drivers/gpu/drm/xe/display/intel_fb_bo.c9
-rw-r--r--drivers/gpu/drm/xe/display/intel_fbdev_fb.c7
-rw-r--r--drivers/gpu/drm/xe/display/xe_display.c108
-rw-r--r--drivers/gpu/drm/xe/display/xe_display.h4
-rw-r--r--drivers/gpu/drm/xe/display/xe_display_wa.c16
-rw-r--r--drivers/gpu/drm/xe/display/xe_dsb_buffer.c1
-rw-r--r--drivers/gpu/drm/xe/display/xe_fb_pin.c51
-rw-r--r--drivers/gpu/drm/xe/display/xe_hdcp_gsc.c9
-rw-r--r--drivers/gpu/drm/xe/display/xe_plane_initial.c6
-rw-r--r--drivers/gpu/drm/xe/regs/xe_engine_regs.h1
-rw-r--r--drivers/gpu/drm/xe/regs/xe_gsc_regs.h4
-rw-r--r--drivers/gpu/drm/xe/regs/xe_gt_regs.h17
-rw-r--r--drivers/gpu/drm/xe/regs/xe_regs.h12
-rw-r--r--drivers/gpu/drm/xe/regs/xe_sriov_regs.h23
-rw-r--r--drivers/gpu/drm/xe/tests/Makefile6
-rw-r--r--drivers/gpu/drm/xe/tests/xe_bo.c53
-rw-r--r--drivers/gpu/drm/xe/tests/xe_bo_test.c21
-rw-r--r--drivers/gpu/drm/xe/tests/xe_bo_test.h14
-rw-r--r--drivers/gpu/drm/xe/tests/xe_dma_buf.c30
-rw-r--r--drivers/gpu/drm/xe/tests/xe_dma_buf_test.c20
-rw-r--r--drivers/gpu/drm/xe/tests/xe_dma_buf_test.h13
-rw-r--r--drivers/gpu/drm/xe/tests/xe_kunit_helpers.c39
-rw-r--r--drivers/gpu/drm/xe/tests/xe_kunit_helpers.h2
-rw-r--r--drivers/gpu/drm/xe/tests/xe_live_test_mod.c11
-rw-r--r--drivers/gpu/drm/xe/tests/xe_migrate.c436
-rw-r--r--drivers/gpu/drm/xe/tests/xe_migrate_test.c20
-rw-r--r--drivers/gpu/drm/xe/tests/xe_migrate_test.h13
-rw-r--r--drivers/gpu/drm/xe/tests/xe_mocs.c44
-rw-r--r--drivers/gpu/drm/xe/tests/xe_mocs_test.c21
-rw-r--r--drivers/gpu/drm/xe/tests/xe_mocs_test.h14
-rw-r--r--drivers/gpu/drm/xe/tests/xe_pci.c82
-rw-r--r--drivers/gpu/drm/xe/tests/xe_pci_test.c4
-rw-r--r--drivers/gpu/drm/xe/tests/xe_pci_test.h3
-rw-r--r--drivers/gpu/drm/xe/tests/xe_rtp_test.c219
-rw-r--r--drivers/gpu/drm/xe/tests/xe_test.h10
-rw-r--r--drivers/gpu/drm/xe/tests/xe_wa_test.c1
-rw-r--r--drivers/gpu/drm/xe/xe_assert.h2
-rw-r--r--drivers/gpu/drm/xe/xe_bb.c3
-rw-r--r--drivers/gpu/drm/xe/xe_bo.c65
-rw-r--r--drivers/gpu/drm/xe/xe_bo.h21
-rw-r--r--drivers/gpu/drm/xe/xe_bo_types.h7
-rw-r--r--drivers/gpu/drm/xe/xe_debugfs.c10
-rw-r--r--drivers/gpu/drm/xe/xe_debugfs.h4
-rw-r--r--drivers/gpu/drm/xe/xe_devcoredump.c121
-rw-r--r--drivers/gpu/drm/xe/xe_devcoredump_types.h8
-rw-r--r--drivers/gpu/drm/xe/xe_device.c27
-rw-r--r--drivers/gpu/drm/xe/xe_device.h26
-rw-r--r--drivers/gpu/drm/xe/xe_device_types.h50
-rw-r--r--drivers/gpu/drm/xe/xe_drm_client.c9
-rw-r--r--drivers/gpu/drm/xe/xe_exec.c22
-rw-r--r--drivers/gpu/drm/xe/xe_exec_queue.c240
-rw-r--r--drivers/gpu/drm/xe/xe_exec_queue.h10
-rw-r--r--drivers/gpu/drm/xe/xe_exec_queue_types.h8
-rw-r--r--drivers/gpu/drm/xe/xe_execlist.c26
-rw-r--r--drivers/gpu/drm/xe/xe_execlist_types.h2
-rw-r--r--drivers/gpu/drm/xe/xe_gen_wa_oob.c16
-rw-r--r--drivers/gpu/drm/xe/xe_ggtt.c490
-rw-r--r--drivers/gpu/drm/xe/xe_ggtt.h28
-rw-r--r--drivers/gpu/drm/xe/xe_ggtt_types.h54
-rw-r--r--drivers/gpu/drm/xe/xe_gpu_scheduler.c23
-rw-r--r--drivers/gpu/drm/xe/xe_gpu_scheduler.h12
-rw-r--r--drivers/gpu/drm/xe/xe_gsc.c61
-rw-r--r--drivers/gpu/drm/xe/xe_gsc.h3
-rw-r--r--drivers/gpu/drm/xe/xe_gsc_debugfs.c71
-rw-r--r--drivers/gpu/drm/xe/xe_gsc_debugfs.h14
-rw-r--r--drivers/gpu/drm/xe/xe_gsc_proxy.c45
-rw-r--r--drivers/gpu/drm/xe/xe_gt.c12
-rw-r--r--drivers/gpu/drm/xe/xe_gt.h10
-rw-r--r--drivers/gpu/drm/xe/xe_gt_debugfs.c13
-rw-r--r--drivers/gpu/drm/xe/xe_gt_mcr.c40
-rw-r--r--drivers/gpu/drm/xe/xe_gt_pagefault.c61
-rw-r--r--drivers/gpu/drm/xe/xe_gt_sriov_pf.c8
-rw-r--r--drivers/gpu/drm/xe/xe_gt_sriov_pf_config.c214
-rw-r--r--drivers/gpu/drm/xe/xe_gt_sriov_pf_config.h1
-rw-r--r--drivers/gpu/drm/xe/xe_gt_sriov_pf_config_types.h5
-rw-r--r--drivers/gpu/drm/xe/xe_gt_sriov_pf_control.c1260
-rw-r--r--drivers/gpu/drm/xe/xe_gt_sriov_pf_control.h3
-rw-r--r--drivers/gpu/drm/xe/xe_gt_sriov_pf_control_types.h107
-rw-r--r--drivers/gpu/drm/xe/xe_gt_sriov_pf_types.h6
-rw-r--r--drivers/gpu/drm/xe/xe_gt_sriov_vf.c70
-rw-r--r--drivers/gpu/drm/xe/xe_gt_sriov_vf.h1
-rw-r--r--drivers/gpu/drm/xe/xe_gt_stats.c49
-rw-r--r--drivers/gpu/drm/xe/xe_gt_stats.h29
-rw-r--r--drivers/gpu/drm/xe/xe_gt_tlb_invalidation.c2
-rw-r--r--drivers/gpu/drm/xe/xe_gt_topology.c27
-rw-r--r--drivers/gpu/drm/xe/xe_gt_types.h43
-rw-r--r--drivers/gpu/drm/xe/xe_guc.c4
-rw-r--r--drivers/gpu/drm/xe/xe_guc.h12
-rw-r--r--drivers/gpu/drm/xe/xe_guc_ads.c6
-rw-r--r--drivers/gpu/drm/xe/xe_guc_ct.c13
-rw-r--r--drivers/gpu/drm/xe/xe_guc_hwconfig.c97
-rw-r--r--drivers/gpu/drm/xe/xe_guc_hwconfig.h3
-rw-r--r--drivers/gpu/drm/xe/xe_guc_id_mgr.c4
-rw-r--r--drivers/gpu/drm/xe/xe_guc_pc.c2
-rw-r--r--drivers/gpu/drm/xe/xe_guc_submit.c96
-rw-r--r--drivers/gpu/drm/xe/xe_heci_gsc.c28
-rw-r--r--drivers/gpu/drm/xe/xe_heci_gsc.h10
-rw-r--r--drivers/gpu/drm/xe/xe_huc.c19
-rw-r--r--drivers/gpu/drm/xe/xe_hw_engine.c164
-rw-r--r--drivers/gpu/drm/xe/xe_hw_engine.h10
-rw-r--r--drivers/gpu/drm/xe/xe_hw_engine_group.c372
-rw-r--r--drivers/gpu/drm/xe/xe_hw_engine_group.h29
-rw-r--r--drivers/gpu/drm/xe/xe_hw_engine_group_types.h51
-rw-r--r--drivers/gpu/drm/xe/xe_hw_engine_types.h4
-rw-r--r--drivers/gpu/drm/xe/xe_hwmon.c95
-rw-r--r--drivers/gpu/drm/xe/xe_irq.c2
-rw-r--r--drivers/gpu/drm/xe/xe_lmtt.c4
-rw-r--r--drivers/gpu/drm/xe/xe_lrc.c37
-rw-r--r--drivers/gpu/drm/xe/xe_migrate.c559
-rw-r--r--drivers/gpu/drm/xe/xe_migrate.h43
-rw-r--r--drivers/gpu/drm/xe/xe_mmio.c213
-rw-r--r--drivers/gpu/drm/xe/xe_mmio.h1
-rw-r--r--drivers/gpu/drm/xe/xe_module.c54
-rw-r--r--drivers/gpu/drm/xe/xe_module.h2
-rw-r--r--drivers/gpu/drm/xe/xe_oa.c8
-rw-r--r--drivers/gpu/drm/xe/xe_oa_types.h2
-rw-r--r--drivers/gpu/drm/xe/xe_observation.c2
-rw-r--r--drivers/gpu/drm/xe/xe_pat.c2
-rw-r--r--drivers/gpu/drm/xe/xe_pci.c24
-rw-r--r--drivers/gpu/drm/xe/xe_pm.c130
-rw-r--r--drivers/gpu/drm/xe/xe_pm.h2
-rw-r--r--drivers/gpu/drm/xe/xe_preempt_fence.c12
-rw-r--r--drivers/gpu/drm/xe/xe_pt.c1355
-rw-r--r--drivers/gpu/drm/xe/xe_pt.h14
-rw-r--r--drivers/gpu/drm/xe/xe_pt_types.h48
-rw-r--r--drivers/gpu/drm/xe/xe_query.c6
-rw-r--r--drivers/gpu/drm/xe/xe_res_cursor.h1
-rw-r--r--drivers/gpu/drm/xe/xe_rtp.c44
-rw-r--r--drivers/gpu/drm/xe/xe_rtp.h4
-rw-r--r--drivers/gpu/drm/xe/xe_rtp_helpers.h6
-rw-r--r--drivers/gpu/drm/xe/xe_sa.c20
-rw-r--r--drivers/gpu/drm/xe/xe_sa_types.h1
-rw-r--r--drivers/gpu/drm/xe/xe_sched_job.c5
-rw-r--r--drivers/gpu/drm/xe/xe_sriov.c2
-rw-r--r--drivers/gpu/drm/xe/xe_step.c57
-rw-r--r--drivers/gpu/drm/xe/xe_step_types.h30
-rw-r--r--drivers/gpu/drm/xe/xe_sync.c29
-rw-r--r--drivers/gpu/drm/xe/xe_sync.h1
-rw-r--r--drivers/gpu/drm/xe/xe_trace.h52
-rw-r--r--drivers/gpu/drm/xe/xe_trace_bo.h10
-rw-r--r--drivers/gpu/drm/xe/xe_ttm_stolen_mgr.c1
-rw-r--r--drivers/gpu/drm/xe/xe_tuning.c21
-rw-r--r--drivers/gpu/drm/xe/xe_uc_debugfs.c2
-rw-r--r--drivers/gpu/drm/xe/xe_uc_fw.c42
-rw-r--r--drivers/gpu/drm/xe/xe_vm.c732
-rw-r--r--drivers/gpu/drm/xe/xe_vm.h2
-rw-r--r--drivers/gpu/drm/xe/xe_vm_types.h55
-rw-r--r--drivers/gpu/drm/xe/xe_vram.c1
-rw-r--r--drivers/gpu/drm/xe/xe_wa.c5
-rw-r--r--drivers/gpu/drm/xe/xe_wa.h7
-rw-r--r--drivers/gpu/drm/xe/xe_wa_oob.rules7
-rw-r--r--drivers/gpu/drm/xe/xe_wait_user_fence.c2
-rw-r--r--drivers/gpu/host1x/dev.c7
-rw-r--r--drivers/gpu/host1x/dev.h2
-rw-r--r--drivers/gpu/host1x/hw/intr_hw.c37
-rw-r--r--drivers/gpu/host1x/intr.c21
-rw-r--r--drivers/gpu/host1x/intr.h5
-rw-r--r--drivers/gpu/vga/vga_switcheroo.c3
-rw-r--r--drivers/greybus/Kconfig2
-rw-r--r--drivers/greybus/gb-beagleplay.c658
-rw-r--r--drivers/hid/uhid.c1
-rw-r--r--drivers/hv/hv_common.c4
-rw-r--r--drivers/hwmon/asus_atk0110.c1
-rw-r--r--drivers/hwmon/fschmd.c1
-rw-r--r--drivers/hwmon/w83793.c1
-rw-r--r--drivers/hwtracing/coresight/coresight-core.c37
-rw-r--r--drivers/hwtracing/coresight/coresight-cti-platform.c10
-rw-r--r--drivers/hwtracing/coresight/coresight-dummy.c7
-rw-r--r--drivers/hwtracing/coresight/coresight-etb10.c1
-rw-r--r--drivers/hwtracing/coresight/coresight-etm-perf.c43
-rw-r--r--drivers/hwtracing/coresight/coresight-etm-perf.h18
-rw-r--r--drivers/hwtracing/coresight/coresight-etm3x-core.c9
-rw-r--r--drivers/hwtracing/coresight/coresight-etm4x-core.c9
-rw-r--r--drivers/hwtracing/coresight/coresight-priv.h1
-rw-r--r--drivers/hwtracing/coresight/coresight-stm.c3
-rw-r--r--drivers/hwtracing/coresight/coresight-sysfs.c3
-rw-r--r--drivers/hwtracing/coresight/coresight-tmc-core.c1
-rw-r--r--drivers/hwtracing/coresight/coresight-tmc-etr.c7
-rw-r--r--drivers/hwtracing/coresight/coresight-tmc.h5
-rw-r--r--drivers/hwtracing/coresight/coresight-tpdm.c9
-rw-r--r--drivers/hwtracing/coresight/coresight-trace-id.c138
-rw-r--r--drivers/hwtracing/coresight/coresight-trace-id.h70
-rw-r--r--drivers/hwtracing/coresight/ultrasoc-smb.c1
-rw-r--r--drivers/hwtracing/intel_th/msu.c1
-rw-r--r--drivers/hwtracing/stm/core.c1
-rw-r--r--drivers/i2c/Kconfig8
-rw-r--r--drivers/i2c/busses/Kconfig41
-rw-r--r--drivers/i2c/busses/Makefile1
-rw-r--r--drivers/i2c/busses/i2c-ali1535.c3
-rw-r--r--drivers/i2c/busses/i2c-amd-mp2-plat.c2
-rw-r--r--drivers/i2c/busses/i2c-aspeed.c18
-rw-r--r--drivers/i2c/busses/i2c-designware-common.c194
-rw-r--r--drivers/i2c/busses/i2c-designware-core.h40
-rw-r--r--drivers/i2c/busses/i2c-designware-master.c74
-rw-r--r--drivers/i2c/busses/i2c-designware-pcidrv.c88
-rw-r--r--drivers/i2c/busses/i2c-designware-platdrv.c184
-rw-r--r--drivers/i2c/busses/i2c-designware-slave.c6
-rw-r--r--drivers/i2c/busses/i2c-digicolor.c2
-rw-r--r--drivers/i2c/busses/i2c-emev2.c25
-rw-r--r--drivers/i2c/busses/i2c-i801.c9
-rw-r--r--drivers/i2c/busses/i2c-imx-lpi2c.c2
-rw-r--r--drivers/i2c/busses/i2c-imx.c11
-rw-r--r--drivers/i2c/busses/i2c-isch.c3
-rw-r--r--drivers/i2c/busses/i2c-ismt.c10
-rw-r--r--drivers/i2c/busses/i2c-jz4780.c22
-rw-r--r--drivers/i2c/busses/i2c-keba.c598
-rw-r--r--drivers/i2c/busses/i2c-ljca.c6
-rw-r--r--drivers/i2c/busses/i2c-mpc.c23
-rw-r--r--drivers/i2c/busses/i2c-mt65xx.c5
-rw-r--r--drivers/i2c/busses/i2c-npcm7xx.c6
-rw-r--r--drivers/i2c/busses/i2c-omap.c2
-rw-r--r--drivers/i2c/busses/i2c-piix4.c2
-rw-r--r--drivers/i2c/busses/i2c-pnx.c2
-rw-r--r--drivers/i2c/busses/i2c-pxa-pci.c2
-rw-r--r--drivers/i2c/busses/i2c-pxa.c2
-rw-r--r--drivers/i2c/busses/i2c-qcom-geni.c27
-rw-r--r--drivers/i2c/busses/i2c-qup.c2
-rw-r--r--drivers/i2c/busses/i2c-rcar.c12
-rw-r--r--drivers/i2c/busses/i2c-riic.c228
-rw-r--r--drivers/i2c/busses/i2c-s3c2410.c2
-rw-r--r--drivers/i2c/busses/i2c-synquacer.c5
-rw-r--r--drivers/i2c/busses/i2c-virtio.c4
-rw-r--r--drivers/i2c/busses/i2c-xiic.c62
-rw-r--r--drivers/i2c/i2c-core-base.c79
-rw-r--r--drivers/i2c/i2c-core-slave.c7
-rw-r--r--drivers/i2c/i2c-dev.c1
-rw-r--r--drivers/i2c/i2c-slave-testunit.c166
-rw-r--r--drivers/i2c/muxes/Kconfig16
-rw-r--r--drivers/i2c/muxes/Makefile1
-rw-r--r--drivers/i2c/muxes/i2c-mux-mule.c148
-rw-r--r--drivers/i3c/master.c12
-rw-r--r--drivers/i3c/master/i3c-master-cdns.c2
-rw-r--r--drivers/i3c/master/mipi-i3c-hci/Makefile3
-rw-r--r--drivers/i3c/master/mipi-i3c-hci/cmd_v1.c12
-rw-r--r--drivers/i3c/master/mipi-i3c-hci/core.c36
-rw-r--r--drivers/i3c/master/mipi-i3c-hci/hci.h10
-rw-r--r--drivers/i3c/master/mipi-i3c-hci/hci_quirks.c44
-rw-r--r--drivers/i3c/master/svc-i3c-master.c84
-rw-r--r--drivers/idle/intel_idle.c37
-rw-r--r--drivers/iio/accel/Kconfig27
-rw-r--r--drivers/iio/accel/Makefile3
-rw-r--r--drivers/iio/accel/adxl367.c2
-rw-r--r--drivers/iio/accel/adxl367_spi.c2
-rw-r--r--drivers/iio/accel/adxl372.c2
-rw-r--r--drivers/iio/accel/adxl380.c1905
-rw-r--r--drivers/iio/accel/adxl380.h26
-rw-r--r--drivers/iio/accel/adxl380_i2c.c64
-rw-r--r--drivers/iio/accel/adxl380_spi.c66
-rw-r--r--drivers/iio/accel/bma180.c3
-rw-r--r--drivers/iio/accel/bma400_core.c11
-rw-r--r--drivers/iio/accel/bma400_spi.c2
-rw-r--r--drivers/iio/accel/bmc150-accel-core.c13
-rw-r--r--drivers/iio/accel/bmi088-accel-spi.c2
-rw-r--r--drivers/iio/accel/cros_ec_accel_legacy.c2
-rw-r--r--drivers/iio/accel/fxls8962af-core.c3
-rw-r--r--drivers/iio/accel/kxcjk-1013.c8
-rw-r--r--drivers/iio/accel/msa311.c3
-rw-r--r--drivers/iio/accel/sca3300.c3
-rw-r--r--drivers/iio/accel/stk8312.c3
-rw-r--r--drivers/iio/accel/stk8ba50.c3
-rw-r--r--drivers/iio/adc/Kconfig48
-rw-r--r--drivers/iio/adc/Makefile4
-rw-r--r--drivers/iio/adc/ad4000.c722
-rw-r--r--drivers/iio/adc/ad4695.c1185
-rw-r--r--drivers/iio/adc/ad7091r5.c6
-rw-r--r--drivers/iio/adc/ad7091r8.c2
-rw-r--r--drivers/iio/adc/ad7124.c38
-rw-r--r--drivers/iio/adc/ad7192.c189
-rw-r--r--drivers/iio/adc/ad7266.c7
-rw-r--r--drivers/iio/adc/ad7280a.c14
-rw-r--r--drivers/iio/adc/ad7291.c4
-rw-r--r--drivers/iio/adc/ad7292.c4
-rw-r--r--drivers/iio/adc/ad7298.c7
-rw-r--r--drivers/iio/adc/ad7380.c525
-rw-r--r--drivers/iio/adc/ad7476.c58
-rw-r--r--drivers/iio/adc/ad7606.c47
-rw-r--r--drivers/iio/adc/ad7606_par.c2
-rw-r--r--drivers/iio/adc/ad7606_spi.c9
-rw-r--r--drivers/iio/adc/ad7766.c14
-rw-r--r--drivers/iio/adc/ad7768-1.c7
-rw-r--r--drivers/iio/adc/ad7780.c10
-rw-r--r--drivers/iio/adc/ad7793.c20
-rw-r--r--drivers/iio/adc/ad7887.c4
-rw-r--r--drivers/iio/adc/ad7923.c18
-rw-r--r--drivers/iio/adc/ad799x.c3
-rw-r--r--drivers/iio/adc/ad9467.c491
-rw-r--r--drivers/iio/adc/ad_sigma_delta.c6
-rw-r--r--drivers/iio/adc/adi-axi-adc.c71
-rw-r--r--drivers/iio/adc/aspeed_adc.c5
-rw-r--r--drivers/iio/adc/at91_adc.c19
-rw-r--r--drivers/iio/adc/axp20x_adc.c182
-rw-r--r--drivers/iio/adc/axp288_adc.c2
-rw-r--r--drivers/iio/adc/bcm_iproc_adc.c2
-rw-r--r--drivers/iio/adc/berlin2-adc.c2
-rw-r--r--drivers/iio/adc/cc10001_adc.c4
-rw-r--r--drivers/iio/adc/dln2-adc.c8
-rw-r--r--drivers/iio/adc/ep93xx_adc.c2
-rw-r--r--drivers/iio/adc/exynos_adc.c2
-rw-r--r--drivers/iio/adc/hi8435.c2
-rw-r--r--drivers/iio/adc/hx711.c7
-rw-r--r--drivers/iio/adc/ina2xx-adc.c17
-rw-r--r--drivers/iio/adc/ingenic-adc.c2
-rw-r--r--drivers/iio/adc/lpc32xx_adc.c2
-rw-r--r--drivers/iio/adc/ltc2496.c2
-rw-r--r--drivers/iio/adc/ltc2497.c2
-rw-r--r--drivers/iio/adc/max1027.c16
-rw-r--r--drivers/iio/adc/max11100.c4
-rw-r--r--drivers/iio/adc/max1118.c7
-rw-r--r--drivers/iio/adc/max1241.c4
-rw-r--r--drivers/iio/adc/max1363.c34
-rw-r--r--drivers/iio/adc/max34408.c4
-rw-r--r--drivers/iio/adc/max9611.c6
-rw-r--r--drivers/iio/adc/mcp320x.c10
-rw-r--r--drivers/iio/adc/mcp3564.c54
-rw-r--r--drivers/iio/adc/mcp3911.c61
-rw-r--r--drivers/iio/adc/mp2629_adc.c4
-rw-r--r--drivers/iio/adc/mt6360-adc.c4
-rw-r--r--drivers/iio/adc/nau7802.c2
-rw-r--r--drivers/iio/adc/pac1921.c1261
-rw-r--r--drivers/iio/adc/pac1934.c6
-rw-r--r--drivers/iio/adc/qcom-pm8xxx-xoadc.c2
-rw-r--r--drivers/iio/adc/qcom-spmi-rradc.c2
-rw-r--r--drivers/iio/adc/rockchip_saradc.c4
-rw-r--r--drivers/iio/adc/rtq6056.c4
-rw-r--r--drivers/iio/adc/sd_adc_modulator.c97
-rw-r--r--drivers/iio/adc/sophgo-cv1800b-adc.c227
-rw-r--r--drivers/iio/adc/stm32-adc.c6
-rw-r--r--drivers/iio/adc/stm32-dfsdm-adc.c297
-rw-r--r--drivers/iio/adc/stm32-dfsdm-core.c2
-rw-r--r--drivers/iio/adc/stmpe-adc.c2
-rw-r--r--drivers/iio/adc/ti-adc0832.c7
-rw-r--r--drivers/iio/adc/ti-adc084s021.c7
-rw-r--r--drivers/iio/adc/ti-adc12138.c7
-rw-r--r--drivers/iio/adc/ti-adc161s626.c8
-rw-r--r--drivers/iio/adc/ti-ads1015.c6
-rw-r--r--drivers/iio/adc/ti-ads1119.c4
-rw-r--r--drivers/iio/adc/ti-ads124s08.c5
-rw-r--r--drivers/iio/adc/ti-ads1298.c3
-rw-r--r--drivers/iio/adc/ti-ads131e08.c6
-rw-r--r--drivers/iio/adc/ti-ads7924.c4
-rw-r--r--drivers/iio/adc/ti-ads7950.c2
-rw-r--r--drivers/iio/adc/ti-ads8344.c2
-rw-r--r--drivers/iio/adc/ti-ads8688.c10
-rw-r--r--drivers/iio/adc/ti-lmp92064.c2
-rw-r--r--drivers/iio/adc/ti-tlc4541.c8
-rw-r--r--drivers/iio/adc/ti-tsc2046.c83
-rw-r--r--drivers/iio/adc/vf610_adc.c2
-rw-r--r--drivers/iio/adc/xilinx-ams.c15
-rw-r--r--drivers/iio/adc/xilinx-xadc-core.c5
-rw-r--r--drivers/iio/buffer/industrialio-buffer-cb.c2
-rw-r--r--drivers/iio/buffer/industrialio-buffer-dma.c36
-rw-r--r--drivers/iio/buffer/industrialio-buffer-dmaengine.c1
-rw-r--r--drivers/iio/buffer/industrialio-hw-consumer.c4
-rw-r--r--drivers/iio/chemical/bme680.h41
-rw-r--r--drivers/iio/chemical/bme680_core.c633
-rw-r--r--drivers/iio/chemical/bme680_spi.c2
-rw-r--r--drivers/iio/chemical/sgp40.c11
-rw-r--r--drivers/iio/common/cros_ec_sensors/cros_ec_sensors_core.c8
-rw-r--r--drivers/iio/common/scmi_sensors/scmi_iio.c2
-rw-r--r--drivers/iio/dac/Kconfig11
-rw-r--r--drivers/iio/dac/Makefile1
-rw-r--r--drivers/iio/dac/ad5449.c15
-rw-r--r--drivers/iio/dac/ad9739a.c13
-rw-r--r--drivers/iio/dac/adi-axi-dac.c21
-rw-r--r--drivers/iio/dac/ltc2664.c735
-rw-r--r--drivers/iio/dac/ltc2688.c2
-rw-r--r--drivers/iio/dac/mcp4728.c45
-rw-r--r--drivers/iio/dac/mcp4922.c47
-rw-r--r--drivers/iio/dac/ti-dac7311.c4
-rw-r--r--drivers/iio/dummy/iio_simple_dummy_buffer.c2
-rw-r--r--drivers/iio/frequency/adf4377.c35
-rw-r--r--drivers/iio/health/afe4403.c3
-rw-r--r--drivers/iio/health/afe4404.c3
-rw-r--r--drivers/iio/health/max30102.c2
-rw-r--r--drivers/iio/humidity/Kconfig11
-rw-r--r--drivers/iio/humidity/Makefile1
-rw-r--r--drivers/iio/humidity/am2315.c3
-rw-r--r--drivers/iio/humidity/ens210.c339
-rw-r--r--drivers/iio/imu/adis16400.c18
-rw-r--r--drivers/iio/imu/adis16460.c18
-rw-r--r--drivers/iio/imu/adis16475.c12
-rw-r--r--drivers/iio/imu/adis16480.c20
-rw-r--r--drivers/iio/imu/bmi160/bmi160_core.c3
-rw-r--r--drivers/iio/imu/bmi323/bmi323.h1
-rw-r--r--drivers/iio/imu/bmi323/bmi323_core.c182
-rw-r--r--drivers/iio/imu/bmi323/bmi323_i2c.c3
-rw-r--r--drivers/iio/imu/bmi323/bmi323_spi.c3
-rw-r--r--drivers/iio/imu/bno055/bno055.c2
-rw-r--r--drivers/iio/imu/bno055/bno055_ser_core.c2
-rw-r--r--drivers/iio/imu/kmx61.c3
-rw-r--r--drivers/iio/imu/st_lsm6dsx/st_lsm6dsx_core.c93
-rw-r--r--drivers/iio/industrialio-backend.c264
-rw-r--r--drivers/iio/industrialio-buffer.c52
-rw-r--r--drivers/iio/industrialio-core.c46
-rw-r--r--drivers/iio/industrialio-trigger.c27
-rw-r--r--drivers/iio/light/Kconfig13
-rw-r--r--drivers/iio/light/Makefile1
-rw-r--r--drivers/iio/light/adjd_s311.c3
-rw-r--r--drivers/iio/light/apds9960.c55
-rw-r--r--drivers/iio/light/bh1745.c906
-rw-r--r--drivers/iio/light/gp2ap002.c2
-rw-r--r--drivers/iio/light/gp2ap020a00f.c9
-rw-r--r--drivers/iio/light/isl29125.c3
-rw-r--r--drivers/iio/light/ltr390.c241
-rw-r--r--drivers/iio/light/ltrf216a.c53
-rw-r--r--drivers/iio/light/noa1305.c169
-rw-r--r--drivers/iio/light/rohm-bu27034.c337
-rw-r--r--drivers/iio/light/si1145.c7
-rw-r--r--drivers/iio/light/stk3310.c7
-rw-r--r--drivers/iio/light/tcs3414.c3
-rw-r--r--drivers/iio/light/tcs3472.c3
-rw-r--r--drivers/iio/magnetometer/Kconfig2
-rw-r--r--drivers/iio/magnetometer/ak8975.c80
-rw-r--r--drivers/iio/magnetometer/rm3100-core.c2
-rw-r--r--drivers/iio/pressure/Kconfig11
-rw-r--r--drivers/iio/pressure/Makefile1
-rw-r--r--drivers/iio/pressure/bmp280-core.c654
-rw-r--r--drivers/iio/pressure/bmp280-i2c.c2
-rw-r--r--drivers/iio/pressure/bmp280-regmap.c45
-rw-r--r--drivers/iio/pressure/bmp280-spi.c18
-rw-r--r--drivers/iio/pressure/bmp280.h37
-rw-r--r--drivers/iio/pressure/dlhl60d.c3
-rw-r--r--drivers/iio/pressure/sdp500.c156
-rw-r--r--drivers/iio/proximity/Kconfig25
-rw-r--r--drivers/iio/proximity/Makefile2
-rw-r--r--drivers/iio/proximity/aw96103.c846
-rw-r--r--drivers/iio/proximity/cros_ec_mkbp_proximity.c2
-rw-r--r--drivers/iio/proximity/hx9023s.c1144
-rw-r--r--drivers/iio/proximity/sx9500.c3
-rw-r--r--drivers/iio/proximity/sx_common.c6
-rw-r--r--drivers/infiniband/core/cache.c4
-rw-r--r--drivers/infiniband/core/core_priv.h3
-rw-r--r--drivers/infiniband/core/device.c48
-rw-r--r--drivers/infiniband/core/iwcm.c2
-rw-r--r--drivers/infiniband/core/mad.c19
-rw-r--r--drivers/infiniband/core/netlink.c1
-rw-r--r--drivers/infiniband/core/nldev.c187
-rw-r--r--drivers/infiniband/core/sa_query.c2
-rw-r--r--drivers/infiniband/core/ucma.c7
-rw-r--r--drivers/infiniband/core/umem_dmabuf.c66
-rw-r--r--drivers/infiniband/core/user_mad.c2
-rw-r--r--drivers/infiniband/core/uverbs_cmd.c10
-rw-r--r--drivers/infiniband/core/uverbs_main.c4
-rw-r--r--drivers/infiniband/core/uverbs_std_types_mr.c2
-rw-r--r--drivers/infiniband/hw/bnxt_re/bnxt_re.h23
-rw-r--r--drivers/infiniband/hw/bnxt_re/ib_verbs.c254
-rw-r--r--drivers/infiniband/hw/bnxt_re/ib_verbs.h20
-rw-r--r--drivers/infiniband/hw/bnxt_re/main.c213
-rw-r--r--drivers/infiniband/hw/bnxt_re/qplib_fp.c72
-rw-r--r--drivers/infiniband/hw/bnxt_re/qplib_fp.h25
-rw-r--r--drivers/infiniband/hw/bnxt_re/qplib_res.h11
-rw-r--r--drivers/infiniband/hw/bnxt_re/qplib_sp.c19
-rw-r--r--drivers/infiniband/hw/bnxt_re/qplib_sp.h11
-rw-r--r--drivers/infiniband/hw/bnxt_re/roce_hsi.h36
-rw-r--r--drivers/infiniband/hw/cxgb4/cm.c5
-rw-r--r--drivers/infiniband/hw/cxgb4/cq.c8
-rw-r--r--drivers/infiniband/hw/cxgb4/iw_cxgb4.h40
-rw-r--r--drivers/infiniband/hw/cxgb4/provider.c67
-rw-r--r--drivers/infiniband/hw/cxgb4/qp.c32
-rw-r--r--drivers/infiniband/hw/efa/efa.h2
-rw-r--r--drivers/infiniband/hw/efa/efa_admin_cmds_defs.h3
-rw-r--r--drivers/infiniband/hw/efa/efa_com_cmd.c1
-rw-r--r--drivers/infiniband/hw/efa/efa_com_cmd.h1
-rw-r--r--drivers/infiniband/hw/efa/efa_main.c1
-rw-r--r--drivers/infiniband/hw/efa/efa_verbs.c4
-rw-r--r--drivers/infiniband/hw/erdma/erdma.h3
-rw-r--r--drivers/infiniband/hw/erdma/erdma_cmdq.c26
-rw-r--r--drivers/infiniband/hw/erdma/erdma_eq.c87
-rw-r--r--drivers/infiniband/hw/erdma/erdma_main.c5
-rw-r--r--drivers/infiniband/hw/erdma/erdma_verbs.c29
-rw-r--r--drivers/infiniband/hw/erdma/erdma_verbs.h1
-rw-r--r--drivers/infiniband/hw/hfi1/fault.c1
-rw-r--r--drivers/infiniband/hw/hns/hns_roce_ah.c14
-rw-r--r--drivers/infiniband/hw/hns/hns_roce_hem.c22
-rw-r--r--drivers/infiniband/hw/hns/hns_roce_hw_v2.c33
-rw-r--r--drivers/infiniband/hw/hns/hns_roce_qp.c16
-rw-r--r--drivers/infiniband/hw/irdma/verbs.c4
-rw-r--r--drivers/infiniband/hw/mana/main.c8
-rw-r--r--drivers/infiniband/hw/mlx4/alias_GUID.c4
-rw-r--r--drivers/infiniband/hw/mlx4/mad.c10
-rw-r--r--drivers/infiniband/hw/mlx5/Makefile1
-rw-r--r--drivers/infiniband/hw/mlx5/cmd.c21
-rw-r--r--drivers/infiniband/hw/mlx5/cmd.h2
-rw-r--r--drivers/infiniband/hw/mlx5/data_direct.c227
-rw-r--r--drivers/infiniband/hw/mlx5/data_direct.h23
-rw-r--r--drivers/infiniband/hw/mlx5/devx.c2
-rw-r--r--drivers/infiniband/hw/mlx5/ib_rep.c22
-rw-r--r--drivers/infiniband/hw/mlx5/main.c324
-rw-r--r--drivers/infiniband/hw/mlx5/mlx5_ib.h60
-rw-r--r--drivers/infiniband/hw/mlx5/mr.c418
-rw-r--r--drivers/infiniband/hw/mlx5/odp.c405
-rw-r--r--drivers/infiniband/hw/mlx5/std_types.c76
-rw-r--r--drivers/infiniband/hw/mlx5/umr.c96
-rw-r--r--drivers/infiniband/hw/mlx5/umr.h1
-rw-r--r--drivers/infiniband/hw/qib/qib_init.c9
-rw-r--r--drivers/infiniband/hw/qib/qib_verbs.h4
-rw-r--r--drivers/infiniband/sw/rdmavt/mr.c6
-rw-r--r--drivers/infiniband/sw/rxe/rxe_hdr.h2
-rw-r--r--drivers/infiniband/sw/rxe/rxe_resp.c4
-rw-r--r--drivers/infiniband/sw/siw/siw.h2
-rw-r--r--drivers/infiniband/sw/siw/siw_main.c37
-rw-r--r--drivers/infiniband/ulp/ipoib/ipoib.h4
-rw-r--r--drivers/infiniband/ulp/iser/iscsi_iser.h4
-rw-r--r--drivers/infiniband/ulp/rtrs/rtrs-clt.c92
-rw-r--r--drivers/infiniband/ulp/rtrs/rtrs-clt.h3
-rw-r--r--drivers/infiniband/ulp/rtrs/rtrs-pri.h2
-rw-r--r--drivers/infiniband/ulp/rtrs/rtrs-srv.c51
-rw-r--r--drivers/infiniband/ulp/rtrs/rtrs-srv.h2
-rw-r--r--drivers/input/evdev.c8
-rw-r--r--drivers/input/input.c2
-rw-r--r--drivers/input/joydev.c1
-rw-r--r--drivers/input/joystick/adc-joystick.c16
-rw-r--r--drivers/input/keyboard/Kconfig23
-rw-r--r--drivers/input/keyboard/Makefile2
-rw-r--r--drivers/input/keyboard/adc-keys.c5
-rw-r--r--drivers/input/keyboard/adp5588-keys.c99
-rw-r--r--drivers/input/keyboard/adp5589-keys.c22
-rw-r--r--drivers/input/keyboard/applespi.c1
-rw-r--r--drivers/input/keyboard/atkbd.c37
-rw-r--r--drivers/input/keyboard/ep93xx_keypad.c74
-rw-r--r--drivers/input/keyboard/gpio_keys.c48
-rw-r--r--drivers/input/keyboard/gpio_keys_polled.c4
-rw-r--r--drivers/input/keyboard/iqs62x-keys.c7
-rw-r--r--drivers/input/keyboard/matrix_keypad.c334
-rw-r--r--drivers/input/keyboard/mcs_touchkey.c268
-rw-r--r--drivers/input/keyboard/mt6779-keypad.c19
-rw-r--r--drivers/input/keyboard/nomadik-ske-keypad.c378
-rw-r--r--drivers/input/keyboard/qt1050.c15
-rw-r--r--drivers/input/keyboard/snvs_pwrkey.c24
-rw-r--r--drivers/input/keyboard/spear-keyboard.c16
-rw-r--r--drivers/input/keyboard/tc3589x-keypad.c3
-rw-r--r--drivers/input/keyboard/tegra-kbc.c117
-rw-r--r--drivers/input/matrix-keymap.c25
-rw-r--r--drivers/input/misc/ims-pcu.c2
-rw-r--r--drivers/input/misc/iqs269a.c7
-rw-r--r--drivers/input/misc/nxp-bbnsm-pwrkey.c38
-rw-r--r--drivers/input/misc/uinput.c1
-rw-r--r--drivers/input/misc/wistron_btns.c6
-rw-r--r--drivers/input/mouse/alps.c48
-rw-r--r--drivers/input/mouse/bcm5974.c35
-rw-r--r--drivers/input/rmi4/rmi_f12.c43
-rw-r--r--drivers/input/serio/i8042-acpipnpio.h37
-rw-r--r--drivers/input/serio/ps2-gpio.c6
-rw-r--r--drivers/input/serio/userio.c1
-rw-r--r--drivers/input/touchscreen/Kconfig42
-rw-r--r--drivers/input/touchscreen/Makefile6
-rw-r--r--drivers/input/touchscreen/colibri-vf50-ts.c10
-rw-r--r--drivers/input/touchscreen/cyttsp4_core.c2174
-rw-r--r--drivers/input/touchscreen/cyttsp4_core.h448
-rw-r--r--drivers/input/touchscreen/cyttsp4_i2c.c72
-rw-r--r--drivers/input/touchscreen/cyttsp4_spi.c187
-rw-r--r--drivers/input/touchscreen/cyttsp_core.c39
-rw-r--r--drivers/input/touchscreen/cyttsp_core.h5
-rw-r--r--drivers/input/touchscreen/cyttsp_i2c.c55
-rw-r--r--drivers/input/touchscreen/cyttsp_i2c_common.c86
-rw-r--r--drivers/input/touchscreen/goodix_berlin.h1
-rw-r--r--drivers/input/touchscreen/goodix_berlin_core.c43
-rw-r--r--drivers/input/touchscreen/goodix_berlin_i2c.c1
-rw-r--r--drivers/input/touchscreen/goodix_berlin_spi.c1
-rw-r--r--drivers/input/touchscreen/hynitron_cstxxx.c2
-rw-r--r--drivers/input/touchscreen/ilitek_ts_i2c.c19
-rw-r--r--drivers/input/touchscreen/mcs5000_ts.c288
-rw-r--r--drivers/input/touchscreen/tsc2004.c6
-rw-r--r--drivers/input/touchscreen/tsc2005.c6
-rw-r--r--drivers/input/touchscreen/tsc200x-core.c249
-rw-r--r--drivers/input/touchscreen/tsc200x-core.h1
-rw-r--r--drivers/input/touchscreen/usbtouchscreen.c956
-rw-r--r--drivers/input/touchscreen/zforce_ts.c474
-rw-r--r--drivers/input/touchscreen/zinitix.c134
-rw-r--r--drivers/interconnect/icc-clk.c3
-rw-r--r--drivers/interconnect/qcom/Kconfig18
-rw-r--r--drivers/interconnect/qcom/Makefile4
-rw-r--r--drivers/interconnect/qcom/msm8937.c1350
-rw-r--r--drivers/interconnect/qcom/msm8953.c2
-rw-r--r--drivers/interconnect/qcom/msm8976.c1440
-rw-r--r--drivers/interconnect/qcom/qcs404.c127
-rw-r--r--drivers/interconnect/qcom/sm8350.c155
-rw-r--r--drivers/interconnect/qcom/sm8350.h10
-rw-r--r--drivers/iommu/Kconfig2
-rw-r--r--drivers/iommu/dma-iommu.c137
-rw-r--r--drivers/iommu/intel/Kconfig1
-rw-r--r--drivers/iommu/iommu.c1
-rw-r--r--drivers/iommu/iommufd/device.c56
-rw-r--r--drivers/iommu/iommufd/fault.c6
-rw-r--r--drivers/iommu/iommufd/hw_pagetable.c3
-rw-r--r--drivers/iommu/iommufd/io_pagetable.c16
-rw-r--r--drivers/iommu/iommufd/io_pagetable.h2
-rw-r--r--drivers/iommu/iommufd/ioas.c2
-rw-r--r--drivers/iommu/iommufd/iommufd_private.h32
-rw-r--r--drivers/iommu/iommufd/iommufd_test.h2
-rw-r--r--drivers/iommu/iommufd/iova_bitmap.c2
-rw-r--r--drivers/iommu/iommufd/main.c8
-rw-r--r--drivers/iommu/iommufd/pages.c10
-rw-r--r--drivers/iommu/iommufd/selftest.c18
-rw-r--r--drivers/isdn/capi/capi.c1
-rw-r--r--drivers/isdn/mISDN/timerdev.c1
-rw-r--r--drivers/leds/Kconfig8
-rw-r--r--drivers/leds/flash/leds-aat1290.c14
-rw-r--r--drivers/leds/flash/leds-as3645a.c8
-rw-r--r--drivers/leds/flash/leds-ktd2692.c15
-rw-r--r--drivers/leds/flash/leds-lm3601x.c19
-rw-r--r--drivers/leds/flash/leds-max77693.c20
-rw-r--r--drivers/leds/flash/leds-qcom-flash.c163
-rw-r--r--drivers/leds/leds-88pm860x.c5
-rw-r--r--drivers/leds/leds-aw2013.c8
-rw-r--r--drivers/leds/leds-bcm6328.c7
-rw-r--r--drivers/leds/leds-bcm6358.c7
-rw-r--r--drivers/leds/leds-bd2606mvv.c23
-rw-r--r--drivers/leds/leds-blinkm.c220
-rw-r--r--drivers/leds/leds-gpio.c9
-rw-r--r--drivers/leds/leds-is31fl319x.c34
-rw-r--r--drivers/leds/leds-is31fl32xx.c14
-rw-r--r--drivers/leds/leds-lp55xx-common.c22
-rw-r--r--drivers/leds/leds-lp55xx-common.h1
-rw-r--r--drivers/leds/leds-mc13783.c24
-rw-r--r--drivers/leds/leds-mt6323.c22
-rw-r--r--drivers/leds/leds-netxbig.c19
-rw-r--r--drivers/leds/leds-pca9532.c12
-rw-r--r--drivers/leds/leds-pca995x.c78
-rw-r--r--drivers/leds/leds-sc27xx-bltc.c12
-rw-r--r--drivers/leds/leds-sun50i-a100.c2
-rw-r--r--drivers/leds/leds-turris-omnia.c9
-rw-r--r--drivers/leds/rgb/leds-qcom-lpg.c14
-rw-r--r--drivers/leds/trigger/ledtrig-netdev.c24
-rw-r--r--drivers/leds/uleds.c1
-rw-r--r--drivers/macintosh/adb.c1
-rw-r--r--drivers/macintosh/macio_asic.c4
-rw-r--r--drivers/macintosh/smu.c1
-rw-r--r--drivers/mailbox/Kconfig3
-rw-r--r--drivers/mailbox/bcm2835-mailbox.c3
-rw-r--r--drivers/mailbox/imx-mailbox.c6
-rw-r--r--drivers/mailbox/mailbox.c22
-rw-r--r--drivers/mailbox/omap-mailbox.c2
-rw-r--r--drivers/mailbox/rockchip-mailbox.c2
-rw-r--r--drivers/mailbox/sprd-mailbox.c25
-rw-r--r--drivers/md/dm-bufio.c3
-rw-r--r--drivers/md/dm-cache-target.c6
-rw-r--r--drivers/md/dm-clone-metadata.c5
-rw-r--r--drivers/md/dm-crypt.c47
-rw-r--r--drivers/md/dm-integrity.c326
-rw-r--r--drivers/md/dm-raid.c2
-rw-r--r--drivers/md/dm-rq.c4
-rw-r--r--drivers/md/dm-thin.c2
-rw-r--r--drivers/md/dm-vdo/data-vio.c15
-rw-r--r--drivers/md/dm-vdo/dedupe.c3
-rw-r--r--drivers/md/dm-vdo/dm-vdo-target.c29
-rw-r--r--drivers/md/dm-vdo/indexer/chapter-index.c2
-rw-r--r--drivers/md/dm-vdo/io-submitter.c1
-rw-r--r--drivers/md/dm-vdo/message-stats.c48
-rw-r--r--drivers/md/dm-vdo/message-stats.h1
-rw-r--r--drivers/md/dm-vdo/repair.c41
-rw-r--r--drivers/md/dm-vdo/status-codes.c2
-rw-r--r--drivers/md/dm-vdo/status-codes.h2
-rw-r--r--drivers/md/dm-verity-target.c83
-rw-r--r--drivers/md/dm-verity-verify-sig.c2
-rw-r--r--drivers/md/dm-verity.h1
-rw-r--r--drivers/md/dm.c11
-rw-r--r--drivers/md/dm.h5
-rw-r--r--drivers/media/cec/core/cec-adap.c53
-rw-r--r--drivers/media/cec/core/cec-api.c5
-rw-r--r--drivers/media/cec/core/cec-core.c31
-rw-r--r--drivers/media/cec/core/cec-priv.h2
-rw-r--r--drivers/media/cec/usb/Kconfig1
-rw-r--r--drivers/media/cec/usb/Makefile1
-rw-r--r--drivers/media/cec/usb/extron-da-hd-4k-plus/Kconfig14
-rw-r--r--drivers/media/cec/usb/extron-da-hd-4k-plus/Makefile8
-rw-r--r--drivers/media/cec/usb/extron-da-hd-4k-plus/cec-splitter.c657
-rw-r--r--drivers/media/cec/usb/extron-da-hd-4k-plus/cec-splitter.h51
-rw-r--r--drivers/media/cec/usb/extron-da-hd-4k-plus/extron-da-hd-4k-plus.c1836
-rw-r--r--drivers/media/cec/usb/extron-da-hd-4k-plus/extron-da-hd-4k-plus.h118
-rw-r--r--drivers/media/common/siano/smscoreapi.c15
-rw-r--r--drivers/media/common/siano/smscoreapi.h10
-rw-r--r--drivers/media/common/videobuf2/videobuf2-core.c166
-rw-r--r--drivers/media/common/videobuf2/videobuf2-dma-contig.c3
-rw-r--r--drivers/media/dvb-frontends/a8293.c2
-rw-r--r--drivers/media/dvb-frontends/af9013.c2
-rw-r--r--drivers/media/dvb-frontends/af9033.c2
-rw-r--r--drivers/media/dvb-frontends/au8522_decoder.c2
-rw-r--r--drivers/media/dvb-frontends/cxd2099.c2
-rw-r--r--drivers/media/dvb-frontends/cxd2820r_core.c2
-rw-r--r--drivers/media/dvb-frontends/lgdt3306a.c2
-rw-r--r--drivers/media/dvb-frontends/lgdt330x.c2
-rw-r--r--drivers/media/dvb-frontends/mn88472.c2
-rw-r--r--drivers/media/dvb-frontends/mn88473.c2
-rw-r--r--drivers/media/dvb-frontends/mxl692.c2
-rw-r--r--drivers/media/dvb-frontends/rtl2830.c4
-rw-r--r--drivers/media/dvb-frontends/rtl2832.c4
-rw-r--r--drivers/media/dvb-frontends/si2165.c2
-rw-r--r--drivers/media/dvb-frontends/si2168.c2
-rw-r--r--drivers/media/dvb-frontends/sp2.c2
-rw-r--r--drivers/media/dvb-frontends/stv090x.c2
-rw-r--r--drivers/media/dvb-frontends/stv6110x.c2
-rw-r--r--drivers/media/dvb-frontends/tda10071.c2
-rw-r--r--drivers/media/dvb-frontends/ts2020.c4
-rw-r--r--drivers/media/i2c/ad5820.c4
-rw-r--r--drivers/media/i2c/adp1653.c2
-rw-r--r--drivers/media/i2c/adv7170.c4
-rw-r--r--drivers/media/i2c/adv7175.c4
-rw-r--r--drivers/media/i2c/adv7183.c4
-rw-r--r--drivers/media/i2c/adv7343.c4
-rw-r--r--drivers/media/i2c/adv7393.c4
-rw-r--r--drivers/media/i2c/adv7511-v4l2.c2
-rw-r--r--drivers/media/i2c/adv7842.c2
-rw-r--r--drivers/media/i2c/ak881x.c4
-rw-r--r--drivers/media/i2c/ar0521.c22
-rw-r--r--drivers/media/i2c/bt819.c6
-rw-r--r--drivers/media/i2c/bt856.c2
-rw-r--r--drivers/media/i2c/bt866.c2
-rw-r--r--drivers/media/i2c/ccs/ccs-reg-access.h3
-rw-r--r--drivers/media/i2c/cs3308.c2
-rw-r--r--drivers/media/i2c/cs5345.c2
-rw-r--r--drivers/media/i2c/cs53l32a.c2
-rw-r--r--drivers/media/i2c/cx25840/cx25840-core.c2
-rw-r--r--drivers/media/i2c/ds90ub913.c5
-rw-r--r--drivers/media/i2c/dw9714.c4
-rw-r--r--drivers/media/i2c/et8ek8/et8ek8_driver.c2
-rw-r--r--drivers/media/i2c/gc05a2.c2
-rw-r--r--drivers/media/i2c/gc08a3.c2
-rw-r--r--drivers/media/i2c/imx274.c2
-rw-r--r--drivers/media/i2c/imx283.c33
-rw-r--r--drivers/media/i2c/imx335.c9
-rw-r--r--drivers/media/i2c/imx355.c12
-rw-r--r--drivers/media/i2c/isl7998x.c4
-rw-r--r--drivers/media/i2c/ks0127.c6
-rw-r--r--drivers/media/i2c/lm3560.c4
-rw-r--r--drivers/media/i2c/lm3646.c2
-rw-r--r--drivers/media/i2c/m52790.c2
-rw-r--r--drivers/media/i2c/max2175.c4
-rw-r--r--drivers/media/i2c/max96714.c18
-rw-r--r--drivers/media/i2c/max96717.c236
-rw-r--r--drivers/media/i2c/ml86v7667.c4
-rw-r--r--drivers/media/i2c/msp3400-driver.c2
-rw-r--r--drivers/media/i2c/mt9m001.c2
-rw-r--r--drivers/media/i2c/mt9m111.c2
-rw-r--r--drivers/media/i2c/mt9p031.c38
-rw-r--r--drivers/media/i2c/mt9t112.c2
-rw-r--r--drivers/media/i2c/mt9v011.c2
-rw-r--r--drivers/media/i2c/mt9v111.c3
-rw-r--r--drivers/media/i2c/og01a1b.c187
-rw-r--r--drivers/media/i2c/ov13858.c4
-rw-r--r--drivers/media/i2c/ov2640.c2
-rw-r--r--drivers/media/i2c/ov2659.c4
-rw-r--r--drivers/media/i2c/ov5640.c4
-rw-r--r--drivers/media/i2c/ov5645.c17
-rw-r--r--drivers/media/i2c/ov5647.c2
-rw-r--r--drivers/media/i2c/ov5675.c12
-rw-r--r--drivers/media/i2c/ov6650.c2
-rw-r--r--drivers/media/i2c/ov7640.c2
-rw-r--r--drivers/media/i2c/ov772x.c2
-rw-r--r--drivers/media/i2c/ov7740.c2
-rw-r--r--drivers/media/i2c/ov9640.c2
-rw-r--r--drivers/media/i2c/ov9650.c4
-rw-r--r--drivers/media/i2c/rj54n1cb0c.c2
-rw-r--r--drivers/media/i2c/s5c73m3/s5c73m3-core.c15
-rw-r--r--drivers/media/i2c/s5k5baf.c4
-rw-r--r--drivers/media/i2c/saa6588.c2
-rw-r--r--drivers/media/i2c/saa6752hs.c2
-rw-r--r--drivers/media/i2c/saa7110.c2
-rw-r--r--drivers/media/i2c/saa717x.c2
-rw-r--r--drivers/media/i2c/saa7185.c2
-rw-r--r--drivers/media/i2c/sony-btf-mpx.c2
-rw-r--r--drivers/media/i2c/tc358743.c2
-rw-r--r--drivers/media/i2c/tc358746.c12
-rw-r--r--drivers/media/i2c/tda1997x.c2
-rw-r--r--drivers/media/i2c/tda7432.c2
-rw-r--r--drivers/media/i2c/tda9840.c2
-rw-r--r--drivers/media/i2c/tea6415c.c2
-rw-r--r--drivers/media/i2c/tea6420.c2
-rw-r--r--drivers/media/i2c/thp7312.c2
-rw-r--r--drivers/media/i2c/ths7303.c6
-rw-r--r--drivers/media/i2c/ths8200.c4
-rw-r--r--drivers/media/i2c/tlv320aic23b.c2
-rw-r--r--drivers/media/i2c/tvaudio.c2
-rw-r--r--drivers/media/i2c/tvp5150.c6
-rw-r--r--drivers/media/i2c/tvp7002.c2
-rw-r--r--drivers/media/i2c/tw2804.c2
-rw-r--r--drivers/media/i2c/tw9900.c2
-rw-r--r--drivers/media/i2c/tw9903.c2
-rw-r--r--drivers/media/i2c/tw9906.c2
-rw-r--r--drivers/media/i2c/tw9910.c2
-rw-r--r--drivers/media/i2c/uda1342.c2
-rw-r--r--drivers/media/i2c/upd64031a.c2
-rw-r--r--drivers/media/i2c/upd64083.c2
-rw-r--r--drivers/media/i2c/vp27smpx.c2
-rw-r--r--drivers/media/i2c/vpx3220.c6
-rw-r--r--drivers/media/i2c/wm8739.c2
-rw-r--r--drivers/media/i2c/wm8775.c2
-rw-r--r--drivers/media/mc/mc-devnode.c1
-rw-r--r--drivers/media/mc/mc-request.c6
-rw-r--r--drivers/media/pci/intel/ipu6/Kconfig7
-rw-r--r--drivers/media/pci/intel/ipu6/ipu6.c24
-rw-r--r--drivers/media/pci/mgb4/mgb4_core.c2
-rw-r--r--drivers/media/pci/mgb4/mgb4_core.h2
-rw-r--r--drivers/media/pci/mgb4/mgb4_io.h29
-rw-r--r--drivers/media/pci/mgb4/mgb4_sysfs_out.c9
-rw-r--r--drivers/media/pci/mgb4/mgb4_vin.c193
-rw-r--r--drivers/media/pci/mgb4/mgb4_vin.h3
-rw-r--r--drivers/media/pci/mgb4/mgb4_vout.c309
-rw-r--r--drivers/media/pci/mgb4/mgb4_vout.h5
-rw-r--r--drivers/media/pci/solo6x10/solo6x10-p2m.c8
-rw-r--r--drivers/media/platform/allegro-dvt/allegro-core.c28
-rw-r--r--drivers/media/platform/atmel/atmel-isi.c8
-rw-r--r--drivers/media/platform/chips-media/coda/coda-bit.c2
-rw-r--r--drivers/media/platform/imagination/Kconfig1
-rw-r--r--drivers/media/platform/mediatek/vcodec/decoder/mtk_vcodec_dec_stateful.c2
-rw-r--r--drivers/media/platform/mediatek/vcodec/decoder/mtk_vcodec_dec_stateless.c2
-rw-r--r--drivers/media/platform/mediatek/vcodec/decoder/vdec/vdec_h264_req_if.c9
-rw-r--r--drivers/media/platform/mediatek/vcodec/decoder/vdec/vdec_h264_req_multi_if.c9
-rw-r--r--drivers/media/platform/mediatek/vcodec/decoder/vdec/vdec_vp8_req_if.c10
-rw-r--r--drivers/media/platform/microchip/microchip-isc-base.c19
-rw-r--r--drivers/media/platform/microchip/microchip-sama5d2-isc.c21
-rw-r--r--drivers/media/platform/microchip/microchip-sama7g5-isc.c21
-rw-r--r--drivers/media/platform/nvidia/tegra-vde/h264.c10
-rw-r--r--drivers/media/platform/nxp/imx-mipi-csis.c19
-rw-r--r--drivers/media/platform/nxp/imx-pxp.h9
-rw-r--r--drivers/media/platform/nxp/imx8mq-mipi-csi2.c17
-rw-r--r--drivers/media/platform/qcom/camss/camss-video.c6
-rw-r--r--drivers/media/platform/qcom/camss/camss.c5
-rw-r--r--drivers/media/platform/qcom/venus/core.c1
-rw-r--r--drivers/media/platform/qcom/venus/firmware.c6
-rw-r--r--drivers/media/platform/qcom/venus/hfi_cmds.c8
-rw-r--r--drivers/media/platform/qcom/venus/hfi_cmds.h16
-rw-r--r--drivers/media/platform/qcom/venus/hfi_helper.h20
-rw-r--r--drivers/media/platform/qcom/venus/hfi_parser.c2
-rw-r--r--drivers/media/platform/qcom/venus/hfi_plat_bufs_v6.c20
-rw-r--r--drivers/media/platform/raspberrypi/pisp_be/Kconfig1
-rw-r--r--drivers/media/platform/renesas/rcar-vin/rcar-core.c21
-rw-r--r--drivers/media/platform/renesas/rzg2l-cru/rzg2l-csi2.c1
-rw-r--r--drivers/media/platform/renesas/vsp1/vsp1_video.c22
-rw-r--r--drivers/media/platform/rockchip/rkisp1/rkisp1-common.c14
-rw-r--r--drivers/media/platform/rockchip/rkisp1/rkisp1-common.h49
-rw-r--r--drivers/media/platform/rockchip/rkisp1/rkisp1-csi.c5
-rw-r--r--drivers/media/platform/rockchip/rkisp1/rkisp1-dev.c15
-rw-r--r--drivers/media/platform/rockchip/rkisp1/rkisp1-isp.c9
-rw-r--r--drivers/media/platform/rockchip/rkisp1/rkisp1-params.c1041
-rw-r--r--drivers/media/platform/rockchip/rkisp1/rkisp1-regs.h23
-rw-r--r--drivers/media/platform/rockchip/rkisp1/rkisp1-resizer.c4
-rw-r--r--drivers/media/platform/rockchip/rkisp1/rkisp1-stats.c51
-rw-r--r--drivers/media/platform/samsung/exynos-gsc/gsc-core.c10
-rw-r--r--drivers/media/platform/samsung/exynos4-is/fimc-core.c10
-rw-r--r--drivers/media/platform/st/sti/bdisp/bdisp-v4l2.c10
-rw-r--r--drivers/media/platform/sunxi/sun4i-csi/sun4i_csi.c12
-rw-r--r--drivers/media/platform/ti/am437x/am437x-vpfe.c12
-rw-r--r--drivers/media/platform/ti/cal/cal-camerarx.c2
-rw-r--r--drivers/media/platform/ti/cal/cal.c8
-rw-r--r--drivers/media/platform/ti/davinci/vpif_capture.c14
-rw-r--r--drivers/media/platform/verisilicon/Kconfig8
-rw-r--r--drivers/media/platform/verisilicon/Makefile14
-rw-r--r--drivers/media/platform/verisilicon/hantro_drv.c48
-rw-r--r--drivers/media/platform/verisilicon/hantro_g2.c29
-rw-r--r--drivers/media/platform/verisilicon/hantro_g2_hevc_dec.c20
-rw-r--r--drivers/media/platform/verisilicon/hantro_g2_regs.h4
-rw-r--r--drivers/media/platform/verisilicon/hantro_hevc.c8
-rw-r--r--drivers/media/platform/verisilicon/hantro_hw.h38
-rw-r--r--drivers/media/platform/verisilicon/hantro_postproc.c6
-rw-r--r--drivers/media/platform/verisilicon/hantro_v4l2.c6
-rw-r--r--drivers/media/platform/verisilicon/rockchip_vpu981_hw_av1_dec.c3
-rw-r--r--drivers/media/platform/verisilicon/rockchip_vpu981_regs.h10
-rw-r--r--drivers/media/platform/verisilicon/rockchip_vpu_hw.c1
-rw-r--r--drivers/media/platform/xilinx/xilinx-vipp.c9
-rw-r--r--drivers/media/radio/radio-tea5764.c2
-rw-r--r--drivers/media/radio/saa7706h.c4
-rw-r--r--drivers/media/radio/si470x/radio-si470x-i2c.c2
-rw-r--r--drivers/media/radio/si4713/si4713.c4
-rw-r--r--drivers/media/radio/tef6862.c4
-rw-r--r--drivers/media/rc/ene_ir.c3
-rw-r--r--drivers/media/rc/ite-cir.c1
-rw-r--r--drivers/media/rc/lirc_dev.c9
-rw-r--r--drivers/media/rc/meson-ir.c27
-rw-r--r--drivers/media/rc/rc-loopback.c1
-rw-r--r--drivers/media/test-drivers/vicodec/vicodec-core.c6
-rw-r--r--drivers/media/test-drivers/vidtv/vidtv_demod.c2
-rw-r--r--drivers/media/test-drivers/vidtv/vidtv_tuner.c2
-rw-r--r--drivers/media/test-drivers/vivid/vivid-cec.c48
-rw-r--r--drivers/media/tuners/e4000.c2
-rw-r--r--drivers/media/tuners/fc2580.c2
-rw-r--r--drivers/media/tuners/m88rs6000t.c2
-rw-r--r--drivers/media/tuners/mt2060.c2
-rw-r--r--drivers/media/tuners/mxl301rf.c2
-rw-r--r--drivers/media/tuners/qm1d1b0004.c2
-rw-r--r--drivers/media/tuners/qm1d1c0042.c2
-rw-r--r--drivers/media/tuners/tda18212.c2
-rw-r--r--drivers/media/tuners/tda18250.c2
-rw-r--r--drivers/media/tuners/tua9001.c2
-rw-r--r--drivers/media/tuners/tuner-i2c.h4
-rw-r--r--drivers/media/usb/go7007/s2250-board.c2
-rw-r--r--drivers/media/usb/uvc/uvc_debugfs.c1
-rw-r--r--drivers/media/v4l2-core/v4l2-dev.c16
-rw-r--r--drivers/media/v4l2-core/v4l2-ioctl.c3
-rw-r--r--drivers/media/v4l2-core/v4l2-subdev.c53
-rw-r--r--drivers/message/fusion/lsi/mpi_cnfg.h60
-rw-r--r--drivers/message/fusion/mptbase.c10
-rw-r--r--drivers/message/fusion/mptbase.h3
-rw-r--r--drivers/message/fusion/mptctl.c3
-rw-r--r--drivers/message/fusion/mptfc.c7
-rw-r--r--drivers/mfd/88pm800.c2
-rw-r--r--drivers/mfd/88pm805.c4
-rw-r--r--drivers/mfd/88pm860x-core.c10
-rw-r--r--drivers/mfd/atc260x-core.c4
-rw-r--r--drivers/mfd/bd9571mwv.c4
-rw-r--r--drivers/mfd/cros_ec_dev.c15
-rw-r--r--drivers/mfd/da9062-core.c12
-rw-r--r--drivers/mfd/fsl-imx25-tsadc.c2
-rw-r--r--drivers/mfd/gateworks-gsc.c2
-rw-r--r--drivers/mfd/hi655x-pmic.c2
-rw-r--r--drivers/mfd/intel-lpss-pci.c39
-rw-r--r--drivers/mfd/intel-m10-bmc-pmci.c2
-rw-r--r--drivers/mfd/intel-m10-bmc-spi.c2
-rw-r--r--drivers/mfd/intel_soc_pmic_bxtwc.c14
-rw-r--r--drivers/mfd/intel_soc_pmic_chtwc.c1
-rw-r--r--drivers/mfd/max14577.c1
-rw-r--r--drivers/mfd/max77620.c5
-rw-r--r--drivers/mfd/mc13xxx-spi.c2
-rw-r--r--drivers/mfd/mt6360-core.c23
-rw-r--r--drivers/mfd/qcom-spmi-pmic.c5
-rw-r--r--drivers/mfd/retu-mfd.c12
-rw-r--r--drivers/mfd/rk8xx-core.c6
-rw-r--r--drivers/mfd/rk8xx-i2c.c25
-rw-r--r--drivers/mfd/rohm-bd71828.c8
-rw-r--r--drivers/mfd/rohm-bd718x7.c2
-rw-r--r--drivers/mfd/rohm-bd9576.c6
-rw-r--r--drivers/mfd/sprd-sc27xx-spi.c2
-rw-r--r--drivers/mfd/syscon.c20
-rw-r--r--drivers/mfd/tc3589x.c2
-rw-r--r--drivers/mfd/tps6105x.c2
-rw-r--r--drivers/mfd/tps65086.c2
-rw-r--r--drivers/mfd/tps65090.c2
-rw-r--r--drivers/mfd/tps65218.c2
-rw-r--r--drivers/mfd/tps65219.c4
-rw-r--r--drivers/mfd/tps65910.c6
-rw-r--r--drivers/mfd/tps65912-core.c2
-rw-r--r--drivers/mfd/twl6040.c2
-rw-r--r--drivers/mfd/wcd934x.c2
-rw-r--r--drivers/misc/cxl/of.c207
-rw-r--r--drivers/misc/cxl/pci.c32
-rw-r--r--drivers/misc/cxl/sysfs.c2
-rw-r--r--drivers/misc/fastrpc.c10
-rw-r--r--drivers/misc/kgdbts.c4
-rw-r--r--drivers/misc/lis3lv02d/lis3lv02d.c5
-rw-r--r--drivers/misc/mei/main.c1
-rw-r--r--drivers/misc/ntsync.c2
-rw-r--r--drivers/misc/ocxl/ocxl_internal.h2
-rw-r--r--drivers/misc/phantom.c1
-rw-r--r--drivers/misc/tsl2550.c8
-rw-r--r--drivers/misc/xilinx_tmr_inject.c1
-rw-r--r--drivers/mmc/core/block.c1
-rw-r--r--drivers/mmc/host/mmci_stm32_sdmmc.c3
-rw-r--r--drivers/mtd/mtdoops.c6
-rw-r--r--drivers/mtd/nand/raw/Kconfig6
-rw-r--r--drivers/mtd/nand/raw/Makefile1
-rw-r--r--drivers/mtd/nand/raw/nandsim.c2
-rw-r--r--drivers/mtd/nand/raw/technologic-nand-controller.c222
-rw-r--r--drivers/mtd/ubi/cdev.c2
-rw-r--r--drivers/mtd/ubi/debug.c1
-rw-r--r--drivers/net/bonding/bond_main.c6
-rw-r--r--drivers/net/ethernet/cirrus/ep93xx_eth.c65
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/cmd.c21
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/lag/lag.c76
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/main.c54
-rw-r--r--drivers/net/ethernet/microsoft/mana/gdma_main.c6
-rw-r--r--drivers/net/ethernet/realtek/r8169_main.c28
-rw-r--r--drivers/net/ethernet/renesas/ravb.h1
-rw-r--r--drivers/net/ethernet/renesas/ravb_main.c18
-rw-r--r--drivers/net/ethernet/seeq/ether3.c2
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_main.c2
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_tc.c1
-rw-r--r--drivers/net/ethernet/wangxun/Kconfig3
-rw-r--r--drivers/net/ethernet/xilinx/xilinx_axienet_main.c37
-rw-r--r--drivers/net/hamradio/6pack.c60
-rw-r--r--drivers/net/mctp/mctp-serial.c23
-rw-r--r--drivers/net/netdevsim/fib.c1
-rw-r--r--drivers/net/phy/aquantia/aquantia_firmware.c42
-rw-r--r--drivers/net/phy/aquantia/aquantia_leds.c3
-rw-r--r--drivers/net/phy/aquantia/aquantia_main.c24
-rw-r--r--drivers/net/tap.c1
-rw-r--r--drivers/net/tun.c1
-rw-r--r--drivers/net/usb/usbnet.c37
-rw-r--r--drivers/net/virtio_net.c10
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/core.c1
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/d3.c1
-rw-r--r--drivers/ntb/core.c4
-rw-r--r--drivers/ntb/hw/epf/ntb_hw_epf.c2
-rw-r--r--drivers/ntb/hw/idt/ntb_hw_idt.c2
-rw-r--r--drivers/ntb/hw/intel/ntb_hw_gen1.c2
-rw-r--r--drivers/ntb/hw/mscc/ntb_hw_switchtec.c1
-rw-r--r--drivers/ntb/ntb_transport.c33
-rw-r--r--drivers/ntb/test/ntb_perf.c2
-rw-r--r--drivers/nvdimm/namespace_devs.c43
-rw-r--r--drivers/nvdimm/nd_virtio.c9
-rw-r--r--drivers/nvdimm/of_pmem.c2
-rw-r--r--drivers/nvme/host/core.c5
-rw-r--r--drivers/nvme/host/fault_inject.c1
-rw-r--r--drivers/nvme/host/ioctl.c6
-rw-r--r--drivers/nvme/host/multipath.c14
-rw-r--r--drivers/nvme/host/rdma.c6
-rw-r--r--drivers/nvme/host/sysfs.c1
-rw-r--r--drivers/nvmem/Kconfig3
-rw-r--r--drivers/nvmem/imx-ocotp-ele.c32
-rw-r--r--drivers/nvmem/layouts.c2
-rw-r--r--drivers/nvmem/layouts/Kconfig11
-rw-r--r--drivers/nvmem/layouts/Makefile1
-rw-r--r--drivers/nvmem/layouts/u-boot-env.c211
-rw-r--r--drivers/nvmem/layouts/u-boot-env.h15
-rw-r--r--drivers/nvmem/sunplus-ocotp.c7
-rw-r--r--drivers/nvmem/u-boot-env.c165
-rw-r--r--drivers/of/.kunitconfig1
-rw-r--r--drivers/of/Kconfig10
-rw-r--r--drivers/of/Makefile3
-rw-r--r--drivers/of/fdt.c2
-rw-r--r--drivers/of/kunit_overlay_test.dtso9
-rw-r--r--drivers/of/of_kunit_helpers.c77
-rw-r--r--drivers/of/of_numa.c5
-rw-r--r--drivers/of/overlay_test.c115
-rw-r--r--drivers/of/platform.c9
-rw-r--r--drivers/of/unittest.c4
-rw-r--r--drivers/pci/Kconfig9
-rw-r--r--drivers/pci/Makefile1
-rw-r--r--drivers/pci/ats.c4
-rw-r--r--drivers/pci/controller/Kconfig2
-rw-r--r--drivers/pci/controller/cadence/Kconfig2
-rw-r--r--drivers/pci/controller/cadence/pci-j721e.c160
-rw-r--r--drivers/pci/controller/cadence/pcie-cadence-host.c44
-rw-r--r--drivers/pci/controller/cadence/pcie-cadence.h13
-rw-r--r--drivers/pci/controller/dwc/Kconfig5
-rw-r--r--drivers/pci/controller/dwc/Makefile1
-rw-r--r--drivers/pci/controller/dwc/pci-dra7xx.c11
-rw-r--r--drivers/pci/controller/dwc/pci-imx6.c1000
-rw-r--r--drivers/pci/controller/dwc/pci-keystone.c9
-rw-r--r--drivers/pci/controller/dwc/pcie-designware-host.c12
-rw-r--r--drivers/pci/controller/dwc/pcie-designware.c24
-rw-r--r--drivers/pci/controller/dwc/pcie-designware.h35
-rw-r--r--drivers/pci/controller/dwc/pcie-intel-gw.c4
-rw-r--r--drivers/pci/controller/dwc/pcie-kirin.c4
-rw-r--r--drivers/pci/controller/dwc/pcie-qcom-common.c78
-rw-r--r--drivers/pci/controller/dwc/pcie-qcom-common.h14
-rw-r--r--drivers/pci/controller/dwc/pcie-qcom-ep.c41
-rw-r--r--drivers/pci/controller/dwc/pcie-qcom.c133
-rw-r--r--drivers/pci/controller/dwc/pcie-rcar-gen4.c13
-rw-r--r--drivers/pci/controller/dwc/pcie-spear13xx.c2
-rw-r--r--drivers/pci/controller/dwc/pcie-tegra194.c37
-rw-r--r--drivers/pci/controller/mobiveil/pcie-mobiveil-host.c11
-rw-r--r--drivers/pci/controller/pci-aardvark.c74
-rw-r--r--drivers/pci/controller/pci-tegra.c10
-rw-r--r--drivers/pci/controller/pci-xgene.c6
-rw-r--r--drivers/pci/controller/pcie-altera-msi.c11
-rw-r--r--drivers/pci/controller/pcie-altera.c3
-rw-r--r--drivers/pci/controller/pcie-brcmstb.c572
-rw-r--r--drivers/pci/controller/pcie-iproc.c18
-rw-r--r--drivers/pci/controller/pcie-mediatek-gen3.c193
-rw-r--r--drivers/pci/controller/pcie-mediatek.c12
-rw-r--r--drivers/pci/controller/pcie-rcar-host.c10
-rw-r--r--drivers/pci/controller/pcie-xilinx-dma-pl.c64
-rw-r--r--drivers/pci/controller/pcie-xilinx-nwl.c150
-rw-r--r--drivers/pci/controller/pcie-xilinx.c9
-rw-r--r--drivers/pci/controller/plda/pcie-plda-host.c11
-rw-r--r--drivers/pci/controller/vmd.c17
-rw-r--r--drivers/pci/devres.c9
-rw-r--r--drivers/pci/endpoint/pci-epc-core.c14
-rw-r--r--drivers/pci/hotplug/TODO5
-rw-r--r--drivers/pci/hotplug/cpqphp_core.c2
-rw-r--r--drivers/pci/hotplug/cpqphp_pci.c4
-rw-r--r--drivers/pci/hotplug/s390_pci_hpc.c2
-rw-r--r--drivers/pci/hotplug/shpchp.h38
-rw-r--r--drivers/pci/hotplug/shpchp_core.c15
-rw-r--r--drivers/pci/hotplug/shpchp_ctrl.c79
-rw-r--r--drivers/pci/hotplug/shpchp_hpc.c63
-rw-r--r--drivers/pci/iomap.c2
-rw-r--r--drivers/pci/npem.c595
-rw-r--r--drivers/pci/pci-bridge-emul.c4
-rw-r--r--drivers/pci/pci-driver.c2
-rw-r--r--drivers/pci/pci-sysfs.c5
-rw-r--r--drivers/pci/pci.c75
-rw-r--r--drivers/pci/pci.h46
-rw-r--r--drivers/pci/pcie/aer_inject.c4
-rw-r--r--drivers/pci/probe.c37
-rw-r--r--drivers/pci/pwrctl/pci-pwrctl-pwrseq.c5
-rw-r--r--drivers/pci/quirks.c39
-rw-r--r--drivers/pci/remove.c4
-rw-r--r--drivers/perf/riscv_pmu.c2
-rw-r--r--drivers/perf/riscv_pmu_sbi.c31
-rw-r--r--drivers/phy/Kconfig1
-rw-r--r--drivers/phy/Makefile1
-rw-r--r--drivers/phy/broadcom/phy-bcm-cygnus-pcie.c20
-rw-r--r--drivers/phy/broadcom/phy-brcm-sata.c21
-rw-r--r--drivers/phy/cadence/phy-cadence-sierra.c95
-rw-r--r--drivers/phy/cadence/phy-cadence-torrent.c677
-rw-r--r--drivers/phy/hisilicon/phy-hisi-inno-usb2.c12
-rw-r--r--drivers/phy/marvell/phy-mvebu-cp110-comphy.c4
-rw-r--r--drivers/phy/mediatek/phy-mtk-tphy.c30
-rw-r--r--drivers/phy/mediatek/phy-mtk-xsphy.c27
-rw-r--r--drivers/phy/nuvoton/Kconfig12
-rw-r--r--drivers/phy/nuvoton/Makefile3
-rw-r--r--drivers/phy/nuvoton/phy-ma35d1-usb2.c143
-rw-r--r--drivers/phy/phy-airoha-pcie.c6
-rw-r--r--drivers/phy/qualcomm/phy-qcom-qmp-combo.c38
-rw-r--r--drivers/phy/qualcomm/phy-qcom-qmp-common.h19
-rw-r--r--drivers/phy/qualcomm/phy-qcom-qmp-pcie-msm8996.c19
-rw-r--r--drivers/phy/qualcomm/phy-qcom-qmp-pcie.c83
-rw-r--r--drivers/phy/qualcomm/phy-qcom-qmp-ufs.c12
-rw-r--r--drivers/phy/qualcomm/phy-qcom-qmp-usb.c10
-rw-r--r--drivers/phy/qualcomm/phy-qcom-qmp-usbc.c13
-rw-r--r--drivers/phy/renesas/phy-rcar-gen3-usb2.c60
-rw-r--r--drivers/phy/rockchip/phy-rockchip-samsung-hdptx.c206
-rw-r--r--drivers/phy/samsung/phy-exynos5-usbdrd.c12
-rw-r--r--drivers/phy/ti/phy-am654-serdes.c50
-rw-r--r--drivers/phy/ti/phy-gmii-sel.c16
-rw-r--r--drivers/phy/ti/phy-j721e-wiz.c16
-rw-r--r--drivers/pinctrl/Kconfig23
-rw-r--r--drivers/pinctrl/Makefile3
-rw-r--r--drivers/pinctrl/bcm/pinctrl-bcm2835.c1
-rw-r--r--drivers/pinctrl/cirrus/pinctrl-madera-core.c9
-rw-r--r--drivers/pinctrl/core.c2
-rw-r--r--drivers/pinctrl/freescale/pinctrl-imx-scmi.c2
-rw-r--r--drivers/pinctrl/freescale/pinctrl-imx.c7
-rw-r--r--drivers/pinctrl/freescale/pinctrl-imx8mq.c2
-rw-r--r--drivers/pinctrl/intel/pinctrl-baytrail.c7
-rw-r--r--drivers/pinctrl/intel/pinctrl-intel.c324
-rw-r--r--drivers/pinctrl/intel/pinctrl-intel.h3
-rw-r--r--drivers/pinctrl/intel/pinctrl-lynxpoint.c2
-rw-r--r--drivers/pinctrl/mediatek/pinctrl-paris.c7
-rw-r--r--drivers/pinctrl/meson/pinctrl-amlogic-c3.c12
-rw-r--r--drivers/pinctrl/meson/pinctrl-amlogic-t7.c12
-rw-r--r--drivers/pinctrl/meson/pinctrl-meson-a1.c12
-rw-r--r--drivers/pinctrl/meson/pinctrl-meson-axg-pmx.c12
-rw-r--r--drivers/pinctrl/meson/pinctrl-meson-axg-pmx.h2
-rw-r--r--drivers/pinctrl/meson/pinctrl-meson-axg.c24
-rw-r--r--drivers/pinctrl/meson/pinctrl-meson-g12a.c24
-rw-r--r--drivers/pinctrl/meson/pinctrl-meson-gxbb.c16
-rw-r--r--drivers/pinctrl/meson/pinctrl-meson-gxl.c16
-rw-r--r--drivers/pinctrl/meson/pinctrl-meson-s4.c12
-rw-r--r--drivers/pinctrl/meson/pinctrl-meson.c25
-rw-r--r--drivers/pinctrl/meson/pinctrl-meson.h8
-rw-r--r--drivers/pinctrl/meson/pinctrl-meson8-pmx.c6
-rw-r--r--drivers/pinctrl/meson/pinctrl-meson8.c16
-rw-r--r--drivers/pinctrl/meson/pinctrl-meson8b.c16
-rw-r--r--drivers/pinctrl/mvebu/pinctrl-dove.c42
-rw-r--r--drivers/pinctrl/nomadik/pinctrl-abx500.c3
-rw-r--r--drivers/pinctrl/nomadik/pinctrl-nomadik.c3
-rw-r--r--drivers/pinctrl/nuvoton/pinctrl-npcm8xx.c64
-rw-r--r--drivers/pinctrl/nxp/pinctrl-s32cc.c51
-rw-r--r--drivers/pinctrl/pinconf-generic.c2
-rw-r--r--drivers/pinctrl/pinctrl-ep93xx.c1434
-rw-r--r--drivers/pinctrl/pinctrl-eyeq5.c575
-rw-r--r--drivers/pinctrl/pinctrl-k210.c35
-rw-r--r--drivers/pinctrl/pinctrl-rockchip.c207
-rw-r--r--drivers/pinctrl/pinctrl-rockchip.h1
-rw-r--r--drivers/pinctrl/pinctrl-single.c3
-rw-r--r--drivers/pinctrl/pinctrl-stmfx.c5
-rw-r--r--drivers/pinctrl/pinctrl-utils.c4
-rw-r--r--drivers/pinctrl/pinctrl-zynq.c1
-rw-r--r--drivers/pinctrl/pinmux.c7
-rw-r--r--drivers/pinctrl/realtek/pinctrl-rtd.c2
-rw-r--r--drivers/pinctrl/renesas/pinctrl-rzg2l.c117
-rw-r--r--drivers/pinctrl/renesas/pinctrl-rzv2m.c3
-rw-r--r--drivers/pinctrl/renesas/pinctrl.c3
-rw-r--r--drivers/pinctrl/samsung/pinctrl-exynos-arm.c14
-rw-r--r--drivers/pinctrl/samsung/pinctrl-exynos.c16
-rw-r--r--drivers/pinctrl/samsung/pinctrl-s3c64xx.c14
-rw-r--r--drivers/pinctrl/samsung/pinctrl-samsung.c108
-rw-r--r--drivers/pinctrl/samsung/pinctrl-samsung.h21
-rw-r--r--drivers/pinctrl/sophgo/Kconfig54
-rw-r--r--drivers/pinctrl/sophgo/Makefile7
-rw-r--r--drivers/pinctrl/sophgo/pinctrl-cv1800b.c462
-rw-r--r--drivers/pinctrl/sophgo/pinctrl-cv1812h.c771
-rw-r--r--drivers/pinctrl/sophgo/pinctrl-cv18xx.c765
-rw-r--r--drivers/pinctrl/sophgo/pinctrl-cv18xx.h155
-rw-r--r--drivers/pinctrl/sophgo/pinctrl-sg2000.c771
-rw-r--r--drivers/pinctrl/sophgo/pinctrl-sg2002.c542
-rw-r--r--drivers/pinctrl/sunxi/pinctrl-sunxi.c14
-rw-r--r--drivers/pinctrl/ti/pinctrl-ti-iodelay.c58
-rw-r--r--drivers/platform/chrome/cros_ec_debugfs.c1
-rw-r--r--drivers/platform/chrome/wilco_ec/debugfs.c1
-rw-r--r--drivers/platform/chrome/wilco_ec/event.c1
-rw-r--r--drivers/platform/chrome/wilco_ec/telemetry.c1
-rw-r--r--drivers/platform/surface/surface_aggregator_cdev.c1
-rw-r--r--drivers/platform/surface/surface_dtx.c1
-rw-r--r--drivers/power/reset/Kconfig10
-rw-r--r--drivers/power/reset/Makefile1
-rw-r--r--drivers/power/reset/ep93xx-restart.c84
-rw-r--r--drivers/pps/clients/pps_parport.c8
-rw-r--r--drivers/pps/pps.c1
-rw-r--r--drivers/pwm/pwm-ep93xx.c26
-rw-r--r--drivers/remoteproc/Kconfig19
-rw-r--r--drivers/remoteproc/Makefile1
-rw-r--r--drivers/remoteproc/da8xx_remoteproc.c10
-rw-r--r--drivers/remoteproc/imx_dsp_rproc.c2
-rw-r--r--drivers/remoteproc/imx_rproc.c93
-rw-r--r--drivers/remoteproc/imx_rproc.h4
-rw-r--r--drivers/remoteproc/ingenic_rproc.c3
-rw-r--r--drivers/remoteproc/keystone_remoteproc.c21
-rw-r--r--drivers/remoteproc/qcom_q6v5_pas.c93
-rw-r--r--drivers/remoteproc/st_slim_rproc.c6
-rw-r--r--drivers/remoteproc/ti_k3_dsp_remoteproc.c108
-rw-r--r--drivers/remoteproc/ti_k3_m4_remoteproc.c667
-rw-r--r--drivers/remoteproc/ti_k3_r5_remoteproc.c130
-rw-r--r--drivers/remoteproc/ti_sci_proc.h26
-rw-r--r--drivers/remoteproc/xlnx_r5_remoteproc.c141
-rw-r--r--drivers/rpmsg/Makefile1
-rw-r--r--drivers/rpmsg/qcom_glink_native.c166
-rw-r--r--drivers/rpmsg/qcom_glink_trace.h406
-rw-r--r--drivers/rtc/Kconfig16
-rw-r--r--drivers/rtc/Makefile1
-rw-r--r--drivers/rtc/dev.c1
-rw-r--r--drivers/rtc/rtc-at91sam9.c1
-rw-r--r--drivers/rtc/rtc-m41t80.c1
-rw-r--r--drivers/rtc/rtc-m48t59.c4
-rw-r--r--drivers/rtc/rtc-rc5t619.c13
-rw-r--r--drivers/rtc/rtc-s35390a.c1
-rw-r--r--drivers/rtc/rtc-sd2405al.c227
-rw-r--r--drivers/rtc/rtc-stm32.c281
-rw-r--r--drivers/rtc/rtc-sun6i.c1
-rw-r--r--drivers/rtc/rtc-twl.c4
-rw-r--r--drivers/s390/char/fs3270.c1
-rw-r--r--drivers/s390/char/sclp_ctl.c1
-rw-r--r--drivers/s390/char/sclp_early.c1
-rw-r--r--drivers/s390/char/tape_char.c1
-rw-r--r--drivers/s390/char/uvdevice.c1
-rw-r--r--drivers/s390/char/vmcp.c1
-rw-r--r--drivers/s390/char/vmlogrdr.c1
-rw-r--r--drivers/s390/char/zcore.c2
-rw-r--r--drivers/s390/cio/chsc_sch.c1
-rw-r--r--drivers/s390/cio/css.c1
-rw-r--r--drivers/s390/crypto/Makefile16
-rw-r--r--drivers/s390/crypto/ap_bus.c59
-rw-r--r--drivers/s390/crypto/ap_queue.c20
-rw-r--r--drivers/s390/crypto/pkey_api.c2658
-rw-r--r--drivers/s390/crypto/pkey_base.c362
-rw-r--r--drivers/s390/crypto/pkey_base.h195
-rw-r--r--drivers/s390/crypto/pkey_cca.c629
-rw-r--r--drivers/s390/crypto/pkey_ep11.c578
-rw-r--r--drivers/s390/crypto/pkey_pckmo.c557
-rw-r--r--drivers/s390/crypto/pkey_sysfs.c648
-rw-r--r--drivers/s390/crypto/vfio_ap_drv.c13
-rw-r--r--drivers/s390/crypto/zcrypt_api.c30
-rw-r--r--drivers/s390/crypto/zcrypt_ccamisc.c8
-rw-r--r--drivers/s390/crypto/zcrypt_ccamisc.h6
-rw-r--r--drivers/s390/crypto/zcrypt_ep11misc.c28
-rw-r--r--drivers/s390/crypto/zcrypt_ep11misc.h14
-rw-r--r--drivers/s390/crypto/zcrypt_msgtype50.c10
-rw-r--r--drivers/s390/crypto/zcrypt_msgtype6.c37
-rw-r--r--drivers/sbus/char/openprom.c1
-rw-r--r--drivers/sbus/char/uctrl.c1
-rw-r--r--drivers/scsi/NCR5380.c233
-rw-r--r--drivers/scsi/NCR5380.h20
-rw-r--r--drivers/scsi/aacraid/aachba.c28
-rw-r--r--drivers/scsi/aacraid/aacraid.h21
-rw-r--r--drivers/scsi/aacraid/commctrl.c4
-rw-r--r--drivers/scsi/aacraid/comminit.c3
-rw-r--r--drivers/scsi/aacraid/commsup.c5
-rw-r--r--drivers/scsi/aacraid/src.c2
-rw-r--r--drivers/scsi/be2iscsi/be_main.c6
-rw-r--r--drivers/scsi/bfa/bfa_fcs.c2
-rw-r--r--drivers/scsi/bfa/bfad_im.c5
-rw-r--r--drivers/scsi/bfa/bfad_im.h1
-rw-r--r--drivers/scsi/bnx2fc/bnx2fc.h6
-rw-r--r--drivers/scsi/bnx2fc/bnx2fc_fcoe.c4
-rw-r--r--drivers/scsi/bnx2i/bnx2i.h11
-rw-r--r--drivers/scsi/cxgbi/libcxgbi.h3
-rw-r--r--drivers/scsi/device_handler/scsi_dh_rdac.c3
-rw-r--r--drivers/scsi/elx/efct/efct_lio.c3
-rw-r--r--drivers/scsi/elx/libefc/efc_nport.c2
-rw-r--r--drivers/scsi/esas2r/esas2r.h1
-rw-r--r--drivers/scsi/esas2r/esas2r_init.c5
-rw-r--r--drivers/scsi/fcoe/fcoe_sysfs.c18
-rw-r--r--drivers/scsi/fnic/fnic_main.c6
-rw-r--r--drivers/scsi/hisi_sas/hisi_sas_main.c3
-rw-r--r--drivers/scsi/hisi_sas/hisi_sas_v3_hw.c2
-rw-r--r--drivers/scsi/hosts.c9
-rw-r--r--drivers/scsi/ibmvscsi/ibmvfc.c21
-rw-r--r--drivers/scsi/ibmvscsi/ibmvfc.h2
-rw-r--r--drivers/scsi/ibmvscsi_tgt/ibmvscsi_tgt.c5
-rw-r--r--drivers/scsi/ipr.h4
-rw-r--r--drivers/scsi/libfc/fc_exch.c3
-rw-r--r--drivers/scsi/libfc/fc_rport.c3
-rw-r--r--drivers/scsi/libsas/sas_ata.c1
-rw-r--r--drivers/scsi/libsas/sas_init.c4
-rw-r--r--drivers/scsi/lpfc/lpfc.h12
-rw-r--r--drivers/scsi/lpfc/lpfc_bsg.c3
-rw-r--r--drivers/scsi/lpfc/lpfc_ct.c22
-rw-r--r--drivers/scsi/lpfc/lpfc_disc.h7
-rw-r--r--drivers/scsi/lpfc/lpfc_els.c211
-rw-r--r--drivers/scsi/lpfc/lpfc_hbadisc.c24
-rw-r--r--drivers/scsi/lpfc/lpfc_hw.h21
-rw-r--r--drivers/scsi/lpfc/lpfc_hw4.h3
-rw-r--r--drivers/scsi/lpfc/lpfc_init.c39
-rw-r--r--drivers/scsi/lpfc/lpfc_nportdisc.c22
-rw-r--r--drivers/scsi/lpfc/lpfc_scsi.c15
-rw-r--r--drivers/scsi/lpfc/lpfc_sli.c65
-rw-r--r--drivers/scsi/lpfc/lpfc_version.h2
-rw-r--r--drivers/scsi/lpfc/lpfc_vmid.c3
-rw-r--r--drivers/scsi/lpfc/lpfc_vport.c43
-rw-r--r--drivers/scsi/mac_scsi.c170
-rw-r--r--drivers/scsi/megaraid/megaraid_sas.h6
-rw-r--r--drivers/scsi/megaraid/megaraid_sas_base.c2
-rw-r--r--drivers/scsi/megaraid/megaraid_sas_fusion.c4
-rw-r--r--drivers/scsi/mpi3mr/mpi/mpi30_cnfg.h45
-rw-r--r--drivers/scsi/mpi3mr/mpi/mpi30_image.h13
-rw-r--r--drivers/scsi/mpi3mr/mpi/mpi30_ioc.h18
-rw-r--r--drivers/scsi/mpi3mr/mpi/mpi30_transport.h4
-rw-r--r--drivers/scsi/mpi3mr/mpi3mr.h13
-rw-r--r--drivers/scsi/mpi3mr/mpi3mr_fw.c115
-rw-r--r--drivers/scsi/mpi3mr/mpi3mr_os.c4
-rw-r--r--drivers/scsi/mpt3sas/mpt3sas_base.c9
-rw-r--r--drivers/scsi/mpt3sas/mpt3sas_base.h4
-rw-r--r--drivers/scsi/mpt3sas/mpt3sas_scsih.c4
-rw-r--r--drivers/scsi/myrb.c5
-rw-r--r--drivers/scsi/myrb.h1
-rw-r--r--drivers/scsi/myrs.c5
-rw-r--r--drivers/scsi/myrs.h1
-rw-r--r--drivers/scsi/pm8001/pm8001_init.c6
-rw-r--r--drivers/scsi/pm8001/pm80xx_hwi.c2
-rw-r--r--drivers/scsi/pmcraid.c4
-rw-r--r--drivers/scsi/qedf/qedf_io.c2
-rw-r--r--drivers/scsi/qedf/qedf_main.c20
-rw-r--r--drivers/scsi/qedi/qedi_main.c8
-rw-r--r--drivers/scsi/qla2xxx/qla_def.h1
-rw-r--r--drivers/scsi/qla2xxx/qla_os.c6
-rw-r--r--drivers/scsi/qla4xxx/ql4_os.c2
-rw-r--r--drivers/scsi/scsi_debug.c1
-rw-r--r--drivers/scsi/scsi_lib.c23
-rw-r--r--drivers/scsi/scsi_transport_fc.c11
-rw-r--r--drivers/scsi/sd.c34
-rw-r--r--drivers/scsi/sg.c1
-rw-r--r--drivers/scsi/smartpqi/smartpqi.h39
-rw-r--r--drivers/scsi/smartpqi/smartpqi_init.c496
-rw-r--r--drivers/scsi/smartpqi/smartpqi_sis.c60
-rw-r--r--drivers/scsi/smartpqi/smartpqi_sis.h3
-rw-r--r--drivers/scsi/snic/snic_main.c10
-rw-r--r--drivers/scsi/st.c5
-rw-r--r--drivers/scsi/stex.c6
-rw-r--r--drivers/scsi/sun3_scsi.c2
-rw-r--r--drivers/scsi/vmw_pvscsi.c3
-rw-r--r--drivers/scsi/zalon.c2
-rw-r--r--drivers/sh/intc/userimask.c5
-rw-r--r--drivers/slimbus/messaging.c9
-rw-r--r--drivers/slimbus/qcom-ctrl.c7
-rw-r--r--drivers/slimbus/qcom-ngd-ctrl.c29
-rw-r--r--drivers/soc/Kconfig1
-rw-r--r--drivers/soc/Makefile1
-rw-r--r--drivers/soc/cirrus/Kconfig17
-rw-r--r--drivers/soc/cirrus/Makefile2
-rw-r--r--drivers/soc/cirrus/soc-ep93xx.c252
-rw-r--r--drivers/soundwire/bus_type.c19
-rw-r--r--drivers/soundwire/cadence_master.c39
-rw-r--r--drivers/soundwire/cadence_master.h5
-rw-r--r--drivers/soundwire/intel.h2
-rw-r--r--drivers/soundwire/intel_auxdevice.c1
-rw-r--r--drivers/soundwire/intel_bus_common.c27
-rw-r--r--drivers/spi/atmel-quadspi.c15
-rw-r--r--drivers/spi/spi-airoha-snfi.c43
-rw-r--r--drivers/spi/spi-ep93xx.c68
-rw-r--r--drivers/spi/spi-fsl-lpspi.c1
-rw-r--r--drivers/spi/spidev.c1
-rw-r--r--drivers/staging/Kconfig2
-rw-r--r--drivers/staging/Makefile1
-rw-r--r--drivers/staging/fbtft/fb_ili9320.c2
-rw-r--r--drivers/staging/fbtft/fb_ra8875.c7
-rw-r--r--drivers/staging/fbtft/fb_sh1106.c3
-rw-r--r--drivers/staging/fbtft/fb_ssd1289.c3
-rw-r--r--drivers/staging/fbtft/fb_ssd1306.c3
-rw-r--r--drivers/staging/fbtft/fb_ssd1325.c9
-rw-r--r--drivers/staging/fbtft/fb_ssd1331.c2
-rw-r--r--drivers/staging/fbtft/fb_ssd1351.c5
-rw-r--r--drivers/staging/fbtft/fb_uc1611.c3
-rw-r--r--drivers/staging/fbtft/fbtft-bus.c9
-rw-r--r--drivers/staging/fbtft/fbtft-core.c13
-rw-r--r--drivers/staging/fbtft/fbtft-sysfs.c4
-rw-r--r--drivers/staging/fbtft/fbtft.h2
-rw-r--r--drivers/staging/greybus/gb-camera.h4
-rw-r--r--drivers/staging/greybus/spilib.c6
-rw-r--r--drivers/staging/iio/impedance-analyzer/ad5933.c5
-rw-r--r--drivers/staging/ks7010/Kconfig14
-rw-r--r--drivers/staging/ks7010/Makefile4
-rw-r--r--drivers/staging/ks7010/TODO36
-rw-r--r--drivers/staging/ks7010/eap_packet.h70
-rw-r--r--drivers/staging/ks7010/ks7010_sdio.c1143
-rw-r--r--drivers/staging/ks7010/ks_hostif.c2312
-rw-r--r--drivers/staging/ks7010/ks_hostif.h617
-rw-r--r--drivers/staging/ks7010/ks_wlan.h567
-rw-r--r--drivers/staging/ks7010/ks_wlan_ioctl.h61
-rw-r--r--drivers/staging/ks7010/ks_wlan_net.c2676
-rw-r--r--drivers/staging/media/atomisp/include/linux/atomisp.h4
-rw-r--r--drivers/staging/media/atomisp/include/linux/atomisp_platform.h6
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp_csi2_bridge.c2
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp_fops.c2
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp_subdev.c2
-rw-r--r--drivers/staging/media/atomisp/pci/hive_isp_css_common/host/vmem_local.h4
-rw-r--r--drivers/staging/media/atomisp/pci/hive_isp_css_include/assert_support.h6
-rw-r--r--drivers/staging/media/atomisp/pci/hive_isp_css_include/host/csi_rx_public.h4
-rw-r--r--drivers/staging/media/atomisp/pci/hive_isp_css_include/math_support.h6
-rw-r--r--drivers/staging/media/atomisp/pci/hmm/hmm.c5
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/bnr/bnr_1.0/ia_css_bnr.host.c3
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/de/de_1.0/ia_css_de.host.c3
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/eed1_8/ia_css_eed1_8.host.c22
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/eed1_8/ia_css_eed1_8_param.h4
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/fpn/fpn_1.0/ia_css_fpn.host.c3
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/ipu2_io_ls/bayer_io_ls/ia_css_bayer_io.host.c9
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/ipu2_io_ls/yuv444_io_ls/ia_css_yuv444_io.host.c9
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/iterator/iterator_1.0/ia_css_iterator.host.c3
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/xnr/xnr_3.0/ia_css_xnr3.host.c12
-rw-r--r--drivers/staging/media/atomisp/pci/runtime/binary/src/binary.c259
-rw-r--r--drivers/staging/media/atomisp/pci/runtime/isys/src/virtual_isys.c8
-rw-r--r--drivers/staging/media/atomisp/pci/sh_css.c44
-rw-r--r--drivers/staging/media/atomisp/pci/sh_css_dvs_info.h37
-rw-r--r--drivers/staging/media/atomisp/pci/sh_css_param_dvs.h1
-rw-r--r--drivers/staging/media/deprecated/atmel/atmel-sama5d2-isc.c10
-rw-r--r--drivers/staging/media/deprecated/atmel/atmel-sama7g5-isc.c10
-rw-r--r--drivers/staging/media/ipu3/ipu3-v4l2.c40
-rw-r--r--drivers/staging/media/meson/vdec/vdec.c2
-rw-r--r--drivers/staging/media/meson/vdec/vdec_1.c16
-rw-r--r--drivers/staging/media/meson/vdec/vdec_hevc.c43
-rw-r--r--drivers/staging/media/meson/vdec/vdec_platform.c44
-rw-r--r--drivers/staging/media/meson/vdec/vdec_platform.h2
-rw-r--r--drivers/staging/media/starfive/camss/stf-camss.c2
-rw-r--r--drivers/staging/media/starfive/camss/stf-capture.c4
-rw-r--r--drivers/staging/most/video/video.c6
-rw-r--r--drivers/staging/nvec/nvec.c17
-rw-r--r--drivers/staging/olpc_dcon/olpc_dcon.c2
-rw-r--r--drivers/staging/rtl8192e/rtl8192e/r8190P_def.h5
-rw-r--r--drivers/staging/rtl8192e/rtl8192e/r8192E_cmdpkt.c10
-rw-r--r--drivers/staging/rtl8192e/rtl8192e/r8192E_dev.c89
-rw-r--r--drivers/staging/rtl8192e/rtl8192e/r8192E_hw.h2
-rw-r--r--drivers/staging/rtl8192e/rtl8192e/r8192E_phy.c67
-rw-r--r--drivers/staging/rtl8192e/rtl8192e/r8192E_phy.h6
-rw-r--r--drivers/staging/rtl8192e/rtl8192e/rtl_core.c20
-rw-r--r--drivers/staging/rtl8192e/rtl8192e/rtl_core.h2
-rw-r--r--drivers/staging/rtl8192e/rtl8192e/rtl_dm.c3
-rw-r--r--drivers/staging/rtl8192e/rtl8192e/rtl_ps.c3
-rw-r--r--drivers/staging/rtl8192e/rtl8192e/rtl_wx.c15
-rw-r--r--drivers/staging/rtl8192e/rtl819x_HTProc.c40
-rw-r--r--drivers/staging/rtl8192e/rtl819x_TSProc.c30
-rw-r--r--drivers/staging/rtl8192e/rtllib.h141
-rw-r--r--drivers/staging/rtl8192e/rtllib_crypt_tkip.c6
-rw-r--r--drivers/staging/rtl8192e/rtllib_rx.c140
-rw-r--r--drivers/staging/rtl8192e/rtllib_softmac_wx.c4
-rw-r--r--drivers/staging/rtl8712/rtl8712_recv.c4
-rw-r--r--drivers/staging/rtl8712/rtl871x_cmd.c5
-rw-r--r--drivers/staging/rtl8712/rtl871x_cmd.h2
-rw-r--r--drivers/staging/rtl8712/rtl871x_io.c4
-rw-r--r--drivers/staging/rtl8712/usb_ops_linux.c2
-rw-r--r--drivers/staging/rtl8723bs/Kconfig1
-rw-r--r--drivers/staging/rtl8723bs/Makefile2
-rw-r--r--drivers/staging/rtl8723bs/core/rtw_ap.c5
-rw-r--r--drivers/staging/rtl8723bs/core/rtw_btcoex.c1
-rw-r--r--drivers/staging/rtl8723bs/core/rtw_cmd.c4
-rw-r--r--drivers/staging/rtl8723bs/core/rtw_debug.c68
-rw-r--r--drivers/staging/rtl8723bs/core/rtw_efuse.c60
-rw-r--r--drivers/staging/rtl8723bs/core/rtw_ieee80211.c5
-rw-r--r--drivers/staging/rtl8723bs/core/rtw_io.c1
-rw-r--r--drivers/staging/rtl8723bs/core/rtw_ioctl_set.c1
-rw-r--r--drivers/staging/rtl8723bs/core/rtw_mlme.c1
-rw-r--r--drivers/staging/rtl8723bs/core/rtw_mlme_ext.c7
-rw-r--r--drivers/staging/rtl8723bs/core/rtw_pwrctrl.c3
-rw-r--r--drivers/staging/rtl8723bs/core/rtw_recv.c6
-rw-r--r--drivers/staging/rtl8723bs/core/rtw_rf.c34
-rw-r--r--drivers/staging/rtl8723bs/core/rtw_security.c1
-rw-r--r--drivers/staging/rtl8723bs/core/rtw_sta_mgt.c1
-rw-r--r--drivers/staging/rtl8723bs/core/rtw_wlan_util.c3
-rw-r--r--drivers/staging/rtl8723bs/core/rtw_xmit.c3
-rw-r--r--drivers/staging/rtl8723bs/hal/HalPhyRf_8723B.c1
-rw-r--r--drivers/staging/rtl8723bs/hal/HalPwrSeqCmd.c1
-rw-r--r--drivers/staging/rtl8723bs/hal/hal_btcoex.c1
-rw-r--r--drivers/staging/rtl8723bs/hal/hal_com.c1
-rw-r--r--drivers/staging/rtl8723bs/hal/hal_com_phycfg.c1
-rw-r--r--drivers/staging/rtl8723bs/hal/hal_intf.c7
-rw-r--r--drivers/staging/rtl8723bs/hal/hal_sdio.c1
-rw-r--r--drivers/staging/rtl8723bs/hal/rtl8723b_cmd.c1
-rw-r--r--drivers/staging/rtl8723bs/hal/rtl8723b_dm.c1
-rw-r--r--drivers/staging/rtl8723bs/hal/rtl8723b_hal_init.c1
-rw-r--r--drivers/staging/rtl8723bs/hal/rtl8723b_phycfg.c1
-rw-r--r--drivers/staging/rtl8723bs/hal/rtl8723bs_recv.c1
-rw-r--r--drivers/staging/rtl8723bs/hal/rtl8723bs_xmit.c1
-rw-r--r--drivers/staging/rtl8723bs/hal/sdio_halinit.c5
-rw-r--r--drivers/staging/rtl8723bs/hal/sdio_ops.c1
-rw-r--r--drivers/staging/rtl8723bs/include/drv_types.h7
-rw-r--r--drivers/staging/rtl8723bs/include/hal_intf.h1
-rw-r--r--drivers/staging/rtl8723bs/include/hal_pwr_seq.h2
-rw-r--r--drivers/staging/rtl8723bs/include/osdep_service.h4
-rw-r--r--drivers/staging/rtl8723bs/include/osdep_service_linux.h72
-rw-r--r--drivers/staging/rtl8723bs/include/rtl8723b_hal.h2
-rw-r--r--drivers/staging/rtl8723bs/include/rtw_cmd.h4
-rw-r--r--drivers/staging/rtl8723bs/include/rtw_debug.h14
-rw-r--r--drivers/staging/rtl8723bs/include/rtw_event.h2
-rw-r--r--drivers/staging/rtl8723bs/include/rtw_io.h2
-rw-r--r--drivers/staging/rtl8723bs/include/rtw_mlme.h2
-rw-r--r--drivers/staging/rtl8723bs/include/rtw_mlme_ext.h6
-rw-r--r--drivers/staging/rtl8723bs/include/rtw_recv.h10
-rw-r--r--drivers/staging/rtl8723bs/include/rtw_rf.h2
-rw-r--r--drivers/staging/rtl8723bs/include/rtw_security.h127
-rw-r--r--drivers/staging/rtl8723bs/include/rtw_xmit.h2
-rw-r--r--drivers/staging/rtl8723bs/os_dep/ioctl_cfg80211.c1
-rw-r--r--drivers/staging/rtl8723bs/os_dep/ioctl_linux.c1
-rw-r--r--drivers/staging/rtl8723bs/os_dep/mlme_linux.c1
-rw-r--r--drivers/staging/rtl8723bs/os_dep/os_intfs.c1
-rw-r--r--drivers/staging/rtl8723bs/os_dep/osdep_service.c1
-rw-r--r--drivers/staging/rtl8723bs/os_dep/recv_linux.c1
-rw-r--r--drivers/staging/rtl8723bs/os_dep/sdio_intf.c1
-rw-r--r--drivers/staging/rtl8723bs/os_dep/sdio_ops_linux.c1
-rw-r--r--drivers/staging/rtl8723bs/os_dep/wifi_regd.c1
-rw-r--r--drivers/staging/rtl8723bs/os_dep/xmit_linux.c4
-rw-r--r--drivers/staging/vc04_services/interface/vchiq_arm/vchiq_arm.c20
-rw-r--r--drivers/staging/vc04_services/interface/vchiq_arm/vchiq_core.c323
-rw-r--r--drivers/staging/vc04_services/interface/vchiq_arm/vchiq_core.h16
-rw-r--r--drivers/staging/vc04_services/interface/vchiq_arm/vchiq_dev.c15
-rw-r--r--drivers/staging/vme_user/vme.c10
-rw-r--r--drivers/staging/vme_user/vme.h17
-rw-r--r--drivers/staging/vme_user/vme_fake.c10
-rw-r--r--drivers/staging/vme_user/vme_tsi148.c155
-rw-r--r--drivers/staging/vt6655/TODO2
-rw-r--r--drivers/staging/vt6655/card.c12
-rw-r--r--drivers/staging/vt6655/card.h4
-rw-r--r--drivers/staging/vt6655/device.h12
-rw-r--r--drivers/staging/vt6655/device_main.c34
-rw-r--r--drivers/staging/vt6655/mac.h4
-rw-r--r--drivers/staging/vt6655/rxtx.c14
-rw-r--r--drivers/target/iscsi/iscsi_target.h2
-rw-r--r--drivers/target/iscsi/iscsi_target_login.h1
-rw-r--r--drivers/target/iscsi/iscsi_target_nego.h2
-rw-r--r--drivers/target/iscsi/iscsi_target_tpg.h5
-rw-r--r--drivers/target/iscsi/iscsi_target_util.h5
-rw-r--r--drivers/thermal/intel/int340x_thermal/acpi_thermal_rel.c1
-rw-r--r--drivers/thunderbolt/acpi.c40
-rw-r--r--drivers/thunderbolt/debugfs.c382
-rw-r--r--drivers/thunderbolt/sb_regs.h18
-rw-r--r--drivers/thunderbolt/tb.h42
-rw-r--r--drivers/thunderbolt/usb4.c62
-rw-r--r--drivers/tty/hvc/hvsi_lib.c2
-rw-r--r--drivers/tty/mxser.c7
-rw-r--r--drivers/tty/serdev/core.c2
-rw-r--r--drivers/tty/serial/8250/8250_aspeed_vuart.c1
-rw-r--r--drivers/tty/serial/8250/8250_bcm2835aux.c47
-rw-r--r--drivers/tty/serial/8250/8250_dma.c19
-rw-r--r--drivers/tty/serial/8250/8250_dwlib.c2
-rw-r--r--drivers/tty/serial/8250/8250_early.c11
-rw-r--r--drivers/tty/serial/8250/8250_exar.c2
-rw-r--r--drivers/tty/serial/8250/8250_omap.c10
-rw-r--r--drivers/tty/serial/8250/8250_pci.c2
-rw-r--r--drivers/tty/serial/8250/8250_platform.c122
-rw-r--r--drivers/tty/serial/8250/8250_port.c4
-rw-r--r--drivers/tty/serial/8250/8250_pxa.c16
-rw-r--r--drivers/tty/serial/qcom_geni_serial.c137
-rw-r--r--drivers/tty/serial/rp2.c2
-rw-r--r--drivers/tty/serial/samsung_tty.c51
-rw-r--r--drivers/tty/serial/sc16is7xx.c183
-rw-r--r--drivers/tty/serial/serial_core.c142
-rw-r--r--drivers/tty/serial/st-asc.c10
-rw-r--r--drivers/tty/serial/xilinx_uartps.c2
-rw-r--r--drivers/tty/sysrq.c1
-rw-r--r--drivers/tty/tty_io.c14
-rw-r--r--drivers/ufs/core/ufs-fault-injection.c1
-rw-r--r--drivers/ufs/core/ufs-sysfs.c91
-rw-r--r--drivers/ufs/core/ufs_trace.h (renamed from include/trace/events/ufs.h)6
-rw-r--r--drivers/ufs/core/ufshcd.c85
-rw-r--r--drivers/ufs/host/ufs-qcom.c2
-rw-r--r--drivers/ufs/host/ufshcd-pltfrm.c14
-rw-r--r--drivers/uio/uio.c4
-rw-r--r--drivers/usb/cdns3/cdns3-pci-wrap.c5
-rw-r--r--drivers/usb/cdns3/cdnsp-pci.c29
-rw-r--r--drivers/usb/cdns3/cdnsp-ring.c6
-rw-r--r--drivers/usb/cdns3/host.c4
-rw-r--r--drivers/usb/chipidea/ci_hdrc_imx.c2
-rw-r--r--drivers/usb/chipidea/ci_hdrc_npcm.c4
-rw-r--r--drivers/usb/chipidea/udc.c8
-rw-r--r--drivers/usb/class/cdc-acm.c2
-rw-r--r--drivers/usb/class/usbtmc.c2
-rw-r--r--drivers/usb/common/common.c22
-rw-r--r--drivers/usb/core/usb-acpi.c53
-rw-r--r--drivers/usb/dwc2/debugfs.c1
-rw-r--r--drivers/usb/dwc2/drd.c9
-rw-r--r--drivers/usb/dwc2/params.c2
-rw-r--r--drivers/usb/dwc2/platform.c26
-rw-r--r--drivers/usb/dwc3/dwc3-imx8mp.c113
-rw-r--r--drivers/usb/dwc3/dwc3-octeon.c19
-rw-r--r--drivers/usb/dwc3/dwc3-qcom.c16
-rw-r--r--drivers/usb/dwc3/dwc3-rtk.c52
-rw-r--r--drivers/usb/dwc3/dwc3-st.c38
-rw-r--r--drivers/usb/dwc3/dwc3-xilinx.c7
-rw-r--r--drivers/usb/gadget/configfs.c12
-rw-r--r--drivers/usb/gadget/function/f_acm.c52
-rw-r--r--drivers/usb/gadget/function/f_fs.c16
-rw-r--r--drivers/usb/gadget/function/f_hid.c275
-rw-r--r--drivers/usb/gadget/function/f_loopback.c2
-rw-r--r--drivers/usb/gadget/function/f_mass_storage.c3
-rw-r--r--drivers/usb/gadget/function/f_midi.c2
-rw-r--r--drivers/usb/gadget/function/f_midi2.c2
-rw-r--r--drivers/usb/gadget/function/f_sourcesink.c2
-rw-r--r--drivers/usb/gadget/function/f_uac1.c63
-rw-r--r--drivers/usb/gadget/function/f_uac2.c80
-rw-r--r--drivers/usb/gadget/function/u_audio.c10
-rw-r--r--drivers/usb/gadget/function/u_serial.c22
-rw-r--r--drivers/usb/gadget/function/u_serial.h4
-rw-r--r--drivers/usb/gadget/function/u_uac1.h12
-rw-r--r--drivers/usb/gadget/function/u_uac2.h15
-rw-r--r--drivers/usb/gadget/function/uvc_v4l2.c12
-rw-r--r--drivers/usb/gadget/legacy/inode.c2
-rw-r--r--drivers/usb/gadget/legacy/raw_gadget.c1
-rw-r--r--drivers/usb/gadget/u_f.c2
-rw-r--r--drivers/usb/gadget/udc/atmel_usba_udc.c1
-rw-r--r--drivers/usb/gadget/udc/bdc/bdc_core.c1
-rw-r--r--drivers/usb/gadget/udc/cdns2/cdns2-gadget.c4
-rw-r--r--drivers/usb/gadget/udc/cdns2/cdns2-pci.c7
-rw-r--r--drivers/usb/gadget/udc/dummy_hcd.c14
-rw-r--r--drivers/usb/gadget/udc/lpc32xx_udc.c67
-rw-r--r--drivers/usb/gadget/udc/udc-xilinx.c2
-rw-r--r--drivers/usb/host/Kconfig2
-rw-r--r--drivers/usb/host/ehci-brcm.c1
-rw-r--r--drivers/usb/host/ehci-exynos.c9
-rw-r--r--drivers/usb/host/ohci-exynos.c9
-rw-r--r--drivers/usb/host/ohci-nxp.c18
-rw-r--r--drivers/usb/host/ohci-ppc-of.c4
-rw-r--r--drivers/usb/host/r8a66597-hcd.c6
-rw-r--r--drivers/usb/host/xhci-dbgcap.c133
-rw-r--r--drivers/usb/host/xhci-dbgcap.h3
-rw-r--r--drivers/usb/host/xhci-dbgtty.c32
-rw-r--r--drivers/usb/host/xhci-ext-caps.h5
-rw-r--r--drivers/usb/host/xhci-hub.c36
-rw-r--r--drivers/usb/host/xhci-mem.c8
-rw-r--r--drivers/usb/host/xhci-pci-renesas.c48
-rw-r--r--drivers/usb/host/xhci-pci.c96
-rw-r--r--drivers/usb/host/xhci-pci.h19
-rw-r--r--drivers/usb/host/xhci-plat.c6
-rw-r--r--drivers/usb/host/xhci-ring.c72
-rw-r--r--drivers/usb/host/xhci.c18
-rw-r--r--drivers/usb/host/xhci.h20
-rw-r--r--drivers/usb/misc/appledisplay.c15
-rw-r--r--drivers/usb/misc/brcmstb-usb-pinmap.c1
-rw-r--r--drivers/usb/misc/cypress_cy7c63.c4
-rw-r--r--drivers/usb/misc/ldusb.c1
-rw-r--r--drivers/usb/misc/onboard_usb_dev.c78
-rw-r--r--drivers/usb/misc/onboard_usb_dev.h2
-rw-r--r--drivers/usb/misc/qcom_eud.c2
-rw-r--r--drivers/usb/misc/yurex.c10
-rw-r--r--drivers/usb/mon/mon_bin.c1
-rw-r--r--drivers/usb/mon/mon_stat.c1
-rw-r--r--drivers/usb/mon/mon_text.c2
-rw-r--r--drivers/usb/musb/mediatek.c27
-rw-r--r--drivers/usb/musb/mpfs.c160
-rw-r--r--drivers/usb/phy/phy-gpio-vbus-usb.c1
-rw-r--r--drivers/usb/phy/phy-mxs-usb.c132
-rw-r--r--drivers/usb/roles/class.c7
-rw-r--r--drivers/usb/serial/aircable.c1
-rw-r--r--drivers/usb/serial/ark3116.c1
-rw-r--r--drivers/usb/serial/belkin_sa.c1
-rw-r--r--drivers/usb/serial/ch341.c1
-rw-r--r--drivers/usb/serial/cp210x.c1
-rw-r--r--drivers/usb/serial/cyberjack.c1
-rw-r--r--drivers/usb/serial/cypress_m8.c3
-rw-r--r--drivers/usb/serial/digi_acceleport.c2
-rw-r--r--drivers/usb/serial/empeg.c1
-rw-r--r--drivers/usb/serial/f81232.c2
-rw-r--r--drivers/usb/serial/f81534.c1
-rw-r--r--drivers/usb/serial/ftdi_sio.c1
-rw-r--r--drivers/usb/serial/garmin_gps.c1
-rw-r--r--drivers/usb/serial/generic.c1
-rw-r--r--drivers/usb/serial/io_edgeport.c4
-rw-r--r--drivers/usb/serial/io_ti.c2
-rw-r--r--drivers/usb/serial/ipaq.c1
-rw-r--r--drivers/usb/serial/ipw.c1
-rw-r--r--drivers/usb/serial/ir-usb.c1
-rw-r--r--drivers/usb/serial/iuu_phoenix.c1
-rw-r--r--drivers/usb/serial/keyspan.c4
-rw-r--r--drivers/usb/serial/keyspan_pda.c2
-rw-r--r--drivers/usb/serial/kl5kusb105.c1
-rw-r--r--drivers/usb/serial/kobil_sct.c4
-rw-r--r--drivers/usb/serial/mct_u232.c1
-rw-r--r--drivers/usb/serial/metro-usb.c1
-rw-r--r--drivers/usb/serial/mos7720.c1
-rw-r--r--drivers/usb/serial/mos7840.c1
-rw-r--r--drivers/usb/serial/mxuport.c1
-rw-r--r--drivers/usb/serial/navman.c1
-rw-r--r--drivers/usb/serial/omninet.c1
-rw-r--r--drivers/usb/serial/opticon.c1
-rw-r--r--drivers/usb/serial/option.c1
-rw-r--r--drivers/usb/serial/oti6858.c1
-rw-r--r--drivers/usb/serial/pl2303.c2
-rw-r--r--drivers/usb/serial/pl2303.h4
-rw-r--r--drivers/usb/serial/qcaux.c1
-rw-r--r--drivers/usb/serial/qcserial.c1
-rw-r--r--drivers/usb/serial/quatech2.c1
-rw-r--r--drivers/usb/serial/safe_serial.c1
-rw-r--r--drivers/usb/serial/sierra.c1
-rw-r--r--drivers/usb/serial/spcp8x5.c1
-rw-r--r--drivers/usb/serial/ssu100.c1
-rw-r--r--drivers/usb/serial/symbolserial.c1
-rw-r--r--drivers/usb/serial/ti_usb_3410_5052.c2
-rw-r--r--drivers/usb/serial/upd78f0730.c1
-rw-r--r--drivers/usb/serial/usb-serial-simple.c1
-rw-r--r--drivers/usb/serial/usb-serial.c12
-rw-r--r--drivers/usb/serial/usb_debug.c2
-rw-r--r--drivers/usb/serial/visor.c3
-rw-r--r--drivers/usb/serial/whiteheat.c2
-rw-r--r--drivers/usb/serial/wishbone-serial.c1
-rw-r--r--drivers/usb/serial/xr_serial.c1
-rw-r--r--drivers/usb/serial/xsens_mt.c1
-rw-r--r--drivers/usb/storage/alauda.c4
-rw-r--r--drivers/usb/storage/cypress_atacb.c4
-rw-r--r--drivers/usb/storage/datafab.c4
-rw-r--r--drivers/usb/storage/ene_ub6250.c6
-rw-r--r--drivers/usb/storage/freecom.c4
-rw-r--r--drivers/usb/storage/isd200.c4
-rw-r--r--drivers/usb/storage/jumpshot.c4
-rw-r--r--drivers/usb/storage/karma.c4
-rw-r--r--drivers/usb/storage/onetouch.c4
-rw-r--r--drivers/usb/storage/sddr09.c4
-rw-r--r--drivers/usb/storage/sddr55.c4
-rw-r--r--drivers/usb/storage/shuttle_usbat.c4
-rw-r--r--drivers/usb/storage/uas.c2
-rw-r--r--drivers/usb/typec/anx7411.c6
-rw-r--r--drivers/usb/typec/tcpm/maxim_contaminant.c53
-rw-r--r--drivers/usb/typec/tcpm/tcpci.c134
-rw-r--r--drivers/usb/typec/tcpm/tcpci_maxim.h34
-rw-r--r--drivers/usb/typec/tcpm/tcpci_maxim_core.c82
-rw-r--r--drivers/usb/typec/tcpm/tcpci_rt1711h.c27
-rw-r--r--drivers/usb/typec/tipd/core.c3
-rw-r--r--drivers/usb/typec/ucsi/ucsi.c158
-rw-r--r--drivers/usb/typec/ucsi/ucsi.h46
-rw-r--r--drivers/usb/typec/ucsi/ucsi_glink.c2
-rw-r--r--drivers/usb/usbip/vhci_hcd.c36
-rw-r--r--drivers/usb/usbip/vhci_sysfs.c3
-rw-r--r--drivers/vdpa/Kconfig10
-rw-r--r--drivers/vdpa/ifcvf/ifcvf_base.h3
-rw-r--r--drivers/vdpa/mlx5/core/mlx5_vdpa.h47
-rw-r--r--drivers/vdpa/mlx5/core/mr.c291
-rw-r--r--drivers/vdpa/mlx5/core/resources.c76
-rw-r--r--drivers/vdpa/mlx5/net/mlx5_vnet.c465
-rw-r--r--drivers/vdpa/pds/cmds.h1
-rw-r--r--drivers/vdpa/vdpa.c79
-rw-r--r--drivers/vdpa/vdpa_sim/vdpa_sim_net.c21
-rw-r--r--drivers/vdpa/vdpa_user/iova_domain.c19
-rw-r--r--drivers/vdpa/vdpa_user/iova_domain.h1
-rw-r--r--drivers/vfio/fsl-mc/vfio_fsl_mc_intr.c4
-rw-r--r--drivers/vfio/group.c6
-rw-r--r--drivers/vfio/mdev/mdev_private.h3
-rw-r--r--drivers/vfio/mdev/mdev_sysfs.c2
-rw-r--r--drivers/vfio/pci/hisilicon/hisi_acc_vfio_pci.c2
-rw-r--r--drivers/vfio/pci/mlx5/main.c2
-rw-r--r--drivers/vfio/pci/pds/lm.c2
-rw-r--r--drivers/vfio/pci/qat/main.c2
-rw-r--r--drivers/vfio/pci/vfio_pci_core.c67
-rw-r--r--drivers/vfio/vfio_iommu_type1.c16
-rw-r--r--drivers/vfio/virqfd.c6
-rw-r--r--drivers/vhost/vdpa.c16
-rw-r--r--drivers/video/backlight/l4f00242t03.c5
-rw-r--r--drivers/video/fbdev/core/fbcon.c26
-rw-r--r--drivers/video/fbdev/omap2/omapfb/dss/dss-of.c7
-rw-r--r--drivers/video/fbdev/sis/sis_main.c2
-rw-r--r--drivers/virt/acrn/irqfd.c6
-rw-r--r--drivers/virt/acrn/mm.c16
-rw-r--r--drivers/virt/coco/tdx-guest/tdx-guest.c1
-rw-r--r--drivers/virtio/virtio_balloon.c18
-rw-r--r--drivers/w1/masters/ds2482.c4
-rw-r--r--drivers/watchdog/Kconfig9
-rw-r--r--drivers/watchdog/Makefile1
-rw-r--r--drivers/watchdog/acquirewdt.c1
-rw-r--r--drivers/watchdog/advantechwdt.c1
-rw-r--r--drivers/watchdog/alim1535_wdt.c1
-rw-r--r--drivers/watchdog/alim7101_wdt.c1
-rw-r--r--drivers/watchdog/at91rm9200_wdt.c1
-rw-r--r--drivers/watchdog/ath79_wdt.c1
-rw-r--r--drivers/watchdog/cpu5wdt.c1
-rw-r--r--drivers/watchdog/cpwd.c1
-rw-r--r--drivers/watchdog/eurotechwdt.c1
-rw-r--r--drivers/watchdog/gef_wdt.c1
-rw-r--r--drivers/watchdog/geodewdt.c1
-rw-r--r--drivers/watchdog/iTCO_wdt.c4
-rw-r--r--drivers/watchdog/ib700wdt.c1
-rw-r--r--drivers/watchdog/ibmasr.c1
-rw-r--r--drivers/watchdog/imx2_wdt.c10
-rw-r--r--drivers/watchdog/imx7ulp_wdt.c21
-rw-r--r--drivers/watchdog/imx_sc_wdt.c46
-rw-r--r--drivers/watchdog/indydog.c1
-rw-r--r--drivers/watchdog/it8712f_wdt.c1
-rw-r--r--drivers/watchdog/m54xx_wdt.c1
-rw-r--r--drivers/watchdog/machzwd.c1
-rw-r--r--drivers/watchdog/marvell_gti_wdt.c4
-rw-r--r--drivers/watchdog/mixcomwd.c1
-rw-r--r--drivers/watchdog/mtx-1_wdt.c1
-rw-r--r--drivers/watchdog/nv_tco.c1
-rw-r--r--drivers/watchdog/pc87413_wdt.c1
-rw-r--r--drivers/watchdog/pcwd.c2
-rw-r--r--drivers/watchdog/pcwd_pci.c2
-rw-r--r--drivers/watchdog/pcwd_usb.c2
-rw-r--r--drivers/watchdog/pika_wdt.c1
-rw-r--r--drivers/watchdog/pm8916_wdt.c2
-rw-r--r--drivers/watchdog/rc32434_wdt.c1
-rw-r--r--drivers/watchdog/rdc321x_wdt.c1
-rw-r--r--drivers/watchdog/riowd.c1
-rw-r--r--drivers/watchdog/rzv2h_wdt.c273
-rw-r--r--drivers/watchdog/sa1100_wdt.c1
-rw-r--r--drivers/watchdog/sb_wdog.c1
-rw-r--r--drivers/watchdog/sbc60xxwdt.c1
-rw-r--r--drivers/watchdog/sbc7240_wdt.c1
-rw-r--r--drivers/watchdog/sbc8360.c1
-rw-r--r--drivers/watchdog/sbc_epx_c3.c1
-rw-r--r--drivers/watchdog/sbc_fitpc2_wdt.c1
-rw-r--r--drivers/watchdog/sc1200wdt.c1
-rw-r--r--drivers/watchdog/sc520_wdt.c1
-rw-r--r--drivers/watchdog/sch311x_wdt.c1
-rw-r--r--drivers/watchdog/scx200_wdt.c1
-rw-r--r--drivers/watchdog/smsc37b787_wdt.c1
-rw-r--r--drivers/watchdog/ts72xx_wdt.c8
-rw-r--r--drivers/watchdog/w83877f_wdt.c1
-rw-r--r--drivers/watchdog/w83977f_wdt.c1
-rw-r--r--drivers/watchdog/wafer5823wdt.c1
-rw-r--r--drivers/watchdog/wdrtas.c2
-rw-r--r--drivers/watchdog/wdt.c2
-rw-r--r--drivers/watchdog/wdt285.c1
-rw-r--r--drivers/watchdog/wdt977.c1
-rw-r--r--drivers/watchdog/wdt_pci.c2
-rw-r--r--drivers/xen/Kconfig5
-rw-r--r--drivers/xen/acpi.c50
-rw-r--r--drivers/xen/evtchn.c1
-rw-r--r--drivers/xen/mcelog.c1
-rw-r--r--drivers/xen/pci.c13
-rw-r--r--drivers/xen/privcmd.c42
-rw-r--r--drivers/xen/xen-pciback/conf_space_capability.c2
-rw-r--r--drivers/xen/xen-pciback/pci_stub.c78
-rw-r--r--drivers/xen/xenbus/xenbus_dev_frontend.c1
-rw-r--r--fs/Kconfig27
-rw-r--r--fs/Makefile1
-rw-r--r--fs/afs/afs_vl.h9
-rw-r--r--fs/afs/file.c1
-rw-r--r--fs/afs/fs_operation.c2
-rw-r--r--fs/afs/fs_probe.c4
-rw-r--r--fs/afs/rotate.c11
-rw-r--r--fs/bcachefs/Kconfig7
-rw-r--r--fs/bcachefs/Makefile1
-rw-r--r--fs/bcachefs/acl.c2
-rw-r--r--fs/bcachefs/alloc_background.c45
-rw-r--r--fs/bcachefs/alloc_background.h3
-rw-r--r--fs/bcachefs/alloc_foreground.c59
-rw-r--r--fs/bcachefs/alloc_foreground.h5
-rw-r--r--fs/bcachefs/backpointers.c108
-rw-r--r--fs/bcachefs/backpointers.h23
-rw-r--r--fs/bcachefs/bcachefs.h17
-rw-r--r--fs/bcachefs/bcachefs_format.h10
-rw-r--r--fs/bcachefs/bkey.h8
-rw-r--r--fs/bcachefs/bkey_methods.c2
-rw-r--r--fs/bcachefs/bkey_methods.h2
-rw-r--r--fs/bcachefs/bset.c182
-rw-r--r--fs/bcachefs/bset.h4
-rw-r--r--fs/bcachefs/btree_cache.c273
-rw-r--r--fs/bcachefs/btree_cache.h3
-rw-r--r--fs/bcachefs/btree_gc.c29
-rw-r--r--fs/bcachefs/btree_io.c14
-rw-r--r--fs/bcachefs/btree_io.h4
-rw-r--r--fs/bcachefs/btree_iter.c63
-rw-r--r--fs/bcachefs/btree_iter.h52
-rw-r--r--fs/bcachefs/btree_key_cache.c405
-rw-r--r--fs/bcachefs/btree_key_cache_types.h18
-rw-r--r--fs/bcachefs/btree_locking.h13
-rw-r--r--fs/bcachefs/btree_node_scan.c2
-rw-r--r--fs/bcachefs/btree_trans_commit.c110
-rw-r--r--fs/bcachefs/btree_types.h60
-rw-r--r--fs/bcachefs/btree_update.c12
-rw-r--r--fs/bcachefs/btree_update.h3
-rw-r--r--fs/bcachefs/btree_update_interior.c37
-rw-r--r--fs/bcachefs/btree_update_interior.h2
-rw-r--r--fs/bcachefs/buckets.c35
-rw-r--r--fs/bcachefs/buckets.h15
-rw-r--r--fs/bcachefs/buckets_types.h8
-rw-r--r--fs/bcachefs/chardev.c1
-rw-r--r--fs/bcachefs/checksum.c101
-rw-r--r--fs/bcachefs/clock.h9
-rw-r--r--fs/bcachefs/darray.c4
-rw-r--r--fs/bcachefs/darray.h26
-rw-r--r--fs/bcachefs/data_update.c4
-rw-r--r--fs/bcachefs/dirent.c66
-rw-r--r--fs/bcachefs/disk_accounting.c82
-rw-r--r--fs/bcachefs/disk_accounting.h29
-rw-r--r--fs/bcachefs/disk_accounting_types.h2
-rw-r--r--fs/bcachefs/ec.c303
-rw-r--r--fs/bcachefs/ec.h11
-rw-r--r--fs/bcachefs/ec_format.h9
-rw-r--r--fs/bcachefs/ec_types.h1
-rw-r--r--fs/bcachefs/errcode.h14
-rw-r--r--fs/bcachefs/error.c14
-rw-r--r--fs/bcachefs/error.h2
-rw-r--r--fs/bcachefs/extents.c33
-rw-r--r--fs/bcachefs/extents.h24
-rw-r--r--fs/bcachefs/fs-common.c5
-rw-r--r--fs/bcachefs/fs-io-buffered.c41
-rw-r--r--fs/bcachefs/fs-io-direct.c2
-rw-r--r--fs/bcachefs/fs-io-pagecache.c90
-rw-r--r--fs/bcachefs/fs-io-pagecache.h4
-rw-r--r--fs/bcachefs/fs-io.c178
-rw-r--r--fs/bcachefs/fs-ioctl.c4
-rw-r--r--fs/bcachefs/fs.c427
-rw-r--r--fs/bcachefs/fs.h18
-rw-r--r--fs/bcachefs/fsck.c295
-rw-r--r--fs/bcachefs/inode.c14
-rw-r--r--fs/bcachefs/inode.h1
-rw-r--r--fs/bcachefs/io_read.c22
-rw-r--r--fs/bcachefs/io_write.c11
-rw-r--r--fs/bcachefs/journal_io.c8
-rw-r--r--fs/bcachefs/journal_reclaim.c7
-rw-r--r--fs/bcachefs/logged_ops.c13
-rw-r--r--fs/bcachefs/opts.c85
-rw-r--r--fs/bcachefs/opts.h61
-rw-r--r--fs/bcachefs/rcu_pending.c650
-rw-r--r--fs/bcachefs/rcu_pending.h27
-rw-r--r--fs/bcachefs/rebalance.c3
-rw-r--r--fs/bcachefs/recovery.c29
-rw-r--r--fs/bcachefs/recovery_passes.c10
-rw-r--r--fs/bcachefs/recovery_passes_types.h2
-rw-r--r--fs/bcachefs/reflink.c2
-rw-r--r--fs/bcachefs/replicas.c28
-rw-r--r--fs/bcachefs/replicas.h2
-rw-r--r--fs/bcachefs/replicas_format.h9
-rw-r--r--fs/bcachefs/sb-clean.c3
-rw-r--r--fs/bcachefs/sb-downgrade.c9
-rw-r--r--fs/bcachefs/sb-errors.c6
-rw-r--r--fs/bcachefs/sb-errors.h2
-rw-r--r--fs/bcachefs/sb-errors_format.h39
-rw-r--r--fs/bcachefs/sb-members.c57
-rw-r--r--fs/bcachefs/sb-members.h22
-rw-r--r--fs/bcachefs/six.c14
-rw-r--r--fs/bcachefs/snapshot.c3
-rw-r--r--fs/bcachefs/str_hash.h2
-rw-r--r--fs/bcachefs/subvolume.c54
-rw-r--r--fs/bcachefs/subvolume.h45
-rw-r--r--fs/bcachefs/subvolume_types.h3
-rw-r--r--fs/bcachefs/super-io.c19
-rw-r--r--fs/bcachefs/super.c85
-rw-r--r--fs/bcachefs/sysfs.c55
-rw-r--r--fs/bcachefs/tests.c2
-rw-r--r--fs/bcachefs/thread_with_file.c4
-rw-r--r--fs/bcachefs/time_stats.c14
-rw-r--r--fs/bcachefs/time_stats.h3
-rw-r--r--fs/bcachefs/trace.h465
-rw-r--r--fs/bcachefs/util.c16
-rw-r--r--fs/bcachefs/util.h2
-rw-r--r--fs/bcachefs/xattr.c81
-rw-r--r--fs/bcachefs/xattr_format.h2
-rw-r--r--fs/binfmt_elf.c48
-rw-r--r--fs/bpf_fs_kfuncs.c185
-rw-r--r--fs/btrfs/btrfs_inode.h1
-rw-r--r--fs/btrfs/ctree.h2
-rw-r--r--fs/btrfs/defrag.c2
-rw-r--r--fs/btrfs/file.c34
-rw-r--r--fs/btrfs/ioctl.c4
-rw-r--r--fs/btrfs/tree-checker.c2
-rw-r--r--fs/cachefiles/namei.c7
-rw-r--r--fs/ceph/addr.c1
-rw-r--r--fs/ceph/caps.c29
-rw-r--r--fs/ceph/dir.c2
-rw-r--r--fs/ceph/inode.c2
-rw-r--r--fs/ceph/mds_client.c25
-rw-r--r--fs/ceph/mds_client.h7
-rw-r--r--fs/ceph/super.c1
-rw-r--r--fs/ceph/super.h7
-rw-r--r--fs/coredump.c107
-rw-r--r--fs/debugfs/file.c1
-rw-r--r--fs/dlm/debug_fs.c1
-rw-r--r--fs/efivarfs/file.c1
-rw-r--r--fs/eventfd.c4
-rw-r--r--fs/eventpoll.c30
-rw-r--r--fs/exec.c81
-rw-r--r--fs/exfat/balloc.c10
-rw-r--r--fs/exfat/exfat_fs.h24
-rw-r--r--fs/exfat/file.c110
-rw-r--r--fs/exfat/inode.c94
-rw-r--r--fs/exfat/namei.c17
-rw-r--r--fs/exfat/nls.c5
-rw-r--r--fs/exfat/super.c41
-rw-r--r--fs/ext4/bitmap.c8
-rw-r--r--fs/ext4/dir.c14
-rw-r--r--fs/ext4/ext4.h31
-rw-r--r--fs/ext4/extents.c823
-rw-r--r--fs/ext4/extents_status.c240
-rw-r--r--fs/ext4/extents_status.h28
-rw-r--r--fs/ext4/fast_commit.c47
-rw-r--r--fs/ext4/file.c20
-rw-r--r--fs/ext4/ialloc.c35
-rw-r--r--fs/ext4/indirect.c7
-rw-r--r--fs/ext4/inline.c46
-rw-r--r--fs/ext4/inode.c292
-rw-r--r--fs/ext4/ioctl.c6
-rw-r--r--fs/ext4/mballoc.c25
-rw-r--r--fs/ext4/migrate.c7
-rw-r--r--fs/ext4/move_extent.c90
-rw-r--r--fs/ext4/namei.c16
-rw-r--r--fs/ext4/readpage.c16
-rw-r--r--fs/ext4/resize.c3
-rw-r--r--fs/ext4/super.c65
-rw-r--r--fs/ext4/xattr.c31
-rw-r--r--fs/ext4/xattr.h7
-rw-r--r--fs/f2fs/checkpoint.c17
-rw-r--r--fs/f2fs/compress.c63
-rw-r--r--fs/f2fs/data.c164
-rw-r--r--fs/f2fs/debug.c2
-rw-r--r--fs/f2fs/dir.c8
-rw-r--r--fs/f2fs/extent_cache.c4
-rw-r--r--fs/f2fs/f2fs.h148
-rw-r--r--fs/f2fs/file.c205
-rw-r--r--fs/f2fs/gc.c113
-rw-r--r--fs/f2fs/gc.h29
-rw-r--r--fs/f2fs/inline.c31
-rw-r--r--fs/f2fs/inode.c9
-rw-r--r--fs/f2fs/namei.c68
-rw-r--r--fs/f2fs/node.c46
-rw-r--r--fs/f2fs/segment.c72
-rw-r--r--fs/f2fs/segment.h5
-rw-r--r--fs/f2fs/super.c119
-rw-r--r--fs/f2fs/sysfs.c82
-rw-r--r--fs/f2fs/verity.c5
-rw-r--r--fs/f2fs/xattr.c14
-rw-r--r--fs/fcntl.c38
-rw-r--r--fs/fhandle.c4
-rw-r--r--fs/file.c26
-rw-r--r--fs/fsopen.c7
-rw-r--r--fs/fuse/Makefile3
-rw-r--r--fs/fuse/acl.c10
-rw-r--r--fs/fuse/control.c4
-rw-r--r--fs/fuse/dev.c221
-rw-r--r--fs/fuse/dir.c152
-rw-r--r--fs/fuse/file.c184
-rw-r--r--fs/fuse/fuse_i.h42
-rw-r--r--fs/fuse/fuse_trace.h132
-rw-r--r--fs/fuse/inode.c13
-rw-r--r--fs/fuse/passthrough.c7
-rw-r--r--fs/fuse/virtio_fs.c206
-rw-r--r--fs/gfs2/aops.c30
-rw-r--r--fs/gfs2/file.c2
-rw-r--r--fs/gfs2/glock.c9
-rw-r--r--fs/gfs2/log.c12
-rw-r--r--fs/gfs2/meta_io.c24
-rw-r--r--fs/gfs2/ops_fstype.c3
-rw-r--r--fs/inode.c8
-rw-r--r--fs/ioctl.c30
-rw-r--r--fs/iomap/buffered-io.c199
-rw-r--r--fs/iomap/direct-io.c42
-rw-r--r--fs/isofs/rock.h2
-rw-r--r--fs/jbd2/checkpoint.c21
-rw-r--r--fs/jbd2/journal.c97
-rw-r--r--fs/kernel_read_file.c4
-rw-r--r--fs/lockd/host.c2
-rw-r--r--fs/lockd/svc.c9
-rw-r--r--fs/locks.c14
-rw-r--r--fs/mnt_idmapping.c22
-rw-r--r--fs/namei.c10
-rw-r--r--fs/namespace.c22
-rw-r--r--fs/netfs/buffered_write.c1
-rw-r--r--fs/netfs/internal.h1
-rw-r--r--fs/netfs/misc.c74
-rw-r--r--fs/netfs/write_issue.c24
-rw-r--r--fs/nfs/Kconfig1
-rw-r--r--fs/nfs/Makefile1
-rw-r--r--fs/nfs/callback.c2
-rw-r--r--fs/nfs/client.c21
-rw-r--r--fs/nfs/dir.c6
-rw-r--r--fs/nfs/filelayout/filelayout.c6
-rw-r--r--fs/nfs/flexfilelayout/flexfilelayout.c56
-rw-r--r--fs/nfs/flexfilelayout/flexfilelayoutdev.c6
-rw-r--r--fs/nfs/fs_context.c8
-rw-r--r--fs/nfs/getroot.c2
-rw-r--r--fs/nfs/inode.c53
-rw-r--r--fs/nfs/internal.h54
-rw-r--r--fs/nfs/localio.c757
-rw-r--r--fs/nfs/nfs2xdr.c70
-rw-r--r--fs/nfs/nfs3xdr.c108
-rw-r--r--fs/nfs/nfs4_fs.h2
-rw-r--r--fs/nfs/nfs4proc.c16
-rw-r--r--fs/nfs/nfs4state.c22
-rw-r--r--fs/nfs/nfs4xdr.c101
-rw-r--r--fs/nfs/nfstrace.h61
-rw-r--r--fs/nfs/pagelist.c16
-rw-r--r--fs/nfs/pnfs_nfs.c2
-rw-r--r--fs/nfs/read.c3
-rw-r--r--fs/nfs/super.c3
-rw-r--r--fs/nfs/write.c21
-rw-r--r--fs/nfs_common/Makefile5
-rw-r--r--fs/nfs_common/common.c134
-rw-r--r--fs/nfs_common/nfslocalio.c172
-rw-r--r--fs/nfsd/Kconfig1
-rw-r--r--fs/nfsd/Makefile1
-rw-r--r--fs/nfsd/auth.c14
-rw-r--r--fs/nfsd/auth.h2
-rw-r--r--fs/nfsd/blocklayout.c6
-rw-r--r--fs/nfsd/blocklayoutxdr.h2
-rw-r--r--fs/nfsd/cache.h2
-rw-r--r--fs/nfsd/export.c67
-rw-r--r--fs/nfsd/export.h7
-rw-r--r--fs/nfsd/filecache.c137
-rw-r--r--fs/nfsd/filecache.h6
-rw-r--r--fs/nfsd/localio.c169
-rw-r--r--fs/nfsd/netns.h19
-rw-r--r--fs/nfsd/nfs3proc.c44
-rw-r--r--fs/nfsd/nfs4callback.c8
-rw-r--r--fs/nfsd/nfs4idmap.c13
-rw-r--r--fs/nfsd/nfs4layouts.c1
-rw-r--r--fs/nfsd/nfs4proc.c69
-rw-r--r--fs/nfsd/nfs4recover.c13
-rw-r--r--fs/nfsd/nfs4state.c219
-rw-r--r--fs/nfsd/nfs4xdr.c29
-rw-r--r--fs/nfsd/nfsctl.c46
-rw-r--r--fs/nfsd/nfsd.h50
-rw-r--r--fs/nfsd/nfsfh.c185
-rw-r--r--fs/nfsd/nfsfh.h4
-rw-r--r--fs/nfsd/nfsproc.c49
-rw-r--r--fs/nfsd/nfssvc.c231
-rw-r--r--fs/nfsd/state.h1
-rw-r--r--fs/nfsd/trace.h145
-rw-r--r--fs/nfsd/vfs.c45
-rw-r--r--fs/nfsd/vfs.h6
-rw-r--r--fs/nfsd/xdr4.h1
-rw-r--r--fs/nilfs2/alloc.h2
-rw-r--r--fs/nilfs2/bmap.c2
-rw-r--r--fs/nilfs2/bmap.h20
-rw-r--r--fs/nilfs2/btnode.c63
-rw-r--r--fs/nilfs2/btree.c12
-rw-r--r--fs/nilfs2/btree.h1
-rw-r--r--fs/nilfs2/cpfile.c54
-rw-r--r--fs/nilfs2/dat.c17
-rw-r--r--fs/nilfs2/dir.c44
-rw-r--r--fs/nilfs2/inode.c79
-rw-r--r--fs/nilfs2/ioctl.c109
-rw-r--r--fs/nilfs2/mdt.c6
-rw-r--r--fs/nilfs2/nilfs.h27
-rw-r--r--fs/nilfs2/page.c21
-rw-r--r--fs/nilfs2/page.h4
-rw-r--r--fs/nilfs2/recovery.c11
-rw-r--r--fs/nilfs2/segment.c234
-rw-r--r--fs/nilfs2/segment.h10
-rw-r--r--fs/nilfs2/sufile.c52
-rw-r--r--fs/nilfs2/super.c9
-rw-r--r--fs/nilfs2/the_nilfs.c5
-rw-r--r--fs/nilfs2/the_nilfs.h6
-rw-r--r--fs/notify/fanotify/fanotify_user.c12
-rw-r--r--fs/notify/inotify/inotify_user.c12
-rw-r--r--fs/nsfs.c1
-rw-r--r--fs/ocfs2/aops.c7
-rw-r--r--fs/ocfs2/buffer_head_io.c4
-rw-r--r--fs/ocfs2/cluster/heartbeat.c6
-rw-r--r--fs/ocfs2/dir.c12
-rw-r--r--fs/ocfs2/dlmglue.c7
-rw-r--r--fs/ocfs2/extent_map.c8
-rw-r--r--fs/ocfs2/journal.c7
-rw-r--r--fs/ocfs2/localalloc.c19
-rw-r--r--fs/ocfs2/quota_global.c15
-rw-r--r--fs/ocfs2/quota_local.c8
-rw-r--r--fs/ocfs2/refcounttree.c39
-rw-r--r--fs/ocfs2/super.c10
-rw-r--r--fs/ocfs2/xattr.c26
-rw-r--r--fs/open.c24
-rw-r--r--fs/orangefs/orangefs-sysfs.c14
-rw-r--r--fs/overlayfs/file.c68
-rw-r--r--fs/pidfs.c5
-rw-r--r--fs/pipe.c1
-rw-r--r--fs/proc/base.c2
-rw-r--r--fs/proc/inode.c31
-rw-r--r--fs/proc/internal.h3
-rw-r--r--fs/proc/page.c11
-rw-r--r--fs/proc/proc_sysctl.c11
-rw-r--r--fs/proc/task_mmu.c15
-rw-r--r--fs/pstore/platform.c10
-rw-r--r--fs/quota/dquot.c14
-rw-r--r--fs/quota/quota.c8
-rw-r--r--fs/quota/quota_v1.c3
-rw-r--r--fs/quota/quota_v2.c9
-rw-r--r--fs/read_write.c118
-rw-r--r--fs/readdir.c20
-rw-r--r--fs/remap_range.c2
-rw-r--r--fs/select.c12
-rw-r--r--fs/signalfd.c6
-rw-r--r--fs/smb/client/cifsencrypt.c151
-rw-r--r--fs/smb/client/cifsfs.h4
-rw-r--r--fs/smb/client/cifsglob.h5
-rw-r--r--fs/smb/client/cifsproto.h12
-rw-r--r--fs/smb/client/connect.c66
-rw-r--r--fs/smb/client/dfs.c73
-rw-r--r--fs/smb/client/dfs.h42
-rw-r--r--fs/smb/client/dfs_cache.c218
-rw-r--r--fs/smb/client/fs_context.h1
-rw-r--r--fs/smb/client/inode.c19
-rw-r--r--fs/smb/client/ioctl.c8
-rw-r--r--fs/smb/client/misc.c6
-rw-r--r--fs/smb/client/namespace.c2
-rw-r--r--fs/smb/client/reparse.c10
-rw-r--r--fs/smb/client/reparse.h9
-rw-r--r--fs/smb/client/sess.c2
-rw-r--r--fs/smb/client/smb2misc.c28
-rw-r--r--fs/smb/client/smb2ops.c56
-rw-r--r--fs/smb/client/smb2pdu.c32
-rw-r--r--fs/smb/client/smb2proto.h2
-rw-r--r--fs/smb/client/smb2transport.c32
-rw-r--r--fs/smb/client/trace.h6
-rw-r--r--fs/smb/client/transport.c3
-rw-r--r--fs/smb/common/smb2pdu.h6
-rw-r--r--fs/smb/server/connection.c2
-rw-r--r--fs/smb/server/ksmbd_netlink.h2
-rw-r--r--fs/smb/server/oplock.c4
-rw-r--r--fs/smb/server/server.c2
-rw-r--r--fs/smb/server/smb2pdu.c35
-rw-r--r--fs/smb/server/smb2pdu.h4
-rw-r--r--fs/smb/server/smb_common.c2
-rw-r--r--fs/smb/server/vfs_cache.h4
-rw-r--r--fs/smb/server/xattr.h2
-rw-r--r--fs/splice.c22
-rw-r--r--fs/squashfs/decompressor_multi_percpu.c6
-rw-r--r--fs/stat.c8
-rw-r--r--fs/statfs.c4
-rw-r--r--fs/sync.c14
-rw-r--r--fs/timerfd.c8
-rw-r--r--fs/ubifs/debug.c2
-rw-r--r--fs/userfaultfd.c171
-rw-r--r--fs/utimes.c4
-rw-r--r--fs/xattr.c36
-rw-r--r--fs/xfs/libxfs/xfs_attr_leaf.c15
-rw-r--r--fs/xfs/libxfs/xfs_ialloc.c5
-rw-r--r--fs/xfs/libxfs/xfs_shared.h3
-rw-r--r--fs/xfs/scrub/xfile.c6
-rw-r--r--fs/xfs/xfs_buf_mem.c2
-rw-r--r--fs/xfs/xfs_exchrange.c8
-rw-r--r--fs/xfs/xfs_file.c2
-rw-r--r--fs/xfs/xfs_handle.c6
-rw-r--r--fs/xfs/xfs_icache.c6
-rw-r--r--fs/xfs/xfs_ioctl.c28
-rw-r--r--fs/xfs/xfs_iomap.c19
-rw-r--r--fs/xfs/xfs_iops.c12
-rw-r--r--fs/xfs/xfs_log_recover.c2
-rw-r--r--fs/xfs/xfs_mount.c8
-rw-r--r--fs/xfs/xfs_super.c28
-rw-r--r--fs/zonefs/file.c2
-rw-r--r--fs/zonefs/sysfs.c1
-rw-r--r--include/asm-generic/mm_hooks.h11
-rw-r--r--include/asm-generic/mmzone.h5
-rw-r--r--include/asm-generic/numa.h8
-rw-r--r--include/asm-generic/vmlinux.lds.h5
-rw-r--r--include/cxl/einj.h (renamed from include/linux/einj-cxl.h)0
-rw-r--r--include/cxl/event.h (renamed from include/linux/cxl-event.h)0
-rw-r--r--include/cxl/mailbox.h28
-rw-r--r--include/drm/display/drm_dp.h4
-rw-r--r--include/drm/display/drm_dp_helper.h3
-rw-r--r--include/drm/display/drm_dp_mst_helper.h14
-rw-r--r--include/drm/drm_accel.h18
-rw-r--r--include/drm/drm_atomic.h2
-rw-r--r--include/drm/drm_connector.h8
-rw-r--r--include/drm/drm_device.h5
-rw-r--r--include/drm/drm_drv.h28
-rw-r--r--include/drm/drm_edid.h2
-rw-r--r--include/drm/drm_fb_helper.h6
-rw-r--r--include/drm/drm_file.h5
-rw-r--r--include/drm/drm_fixed.h3
-rw-r--r--include/drm/drm_mipi_dsi.h12
-rw-r--r--include/drm/drm_mode_config.h16
-rw-r--r--include/drm/drm_panic.h21
-rw-r--r--include/drm/drm_prime.h3
-rw-r--r--include/drm/drm_print.h54
-rw-r--r--include/drm/drm_rect.h15
-rw-r--r--include/drm/drm_vblank.h37
-rw-r--r--include/drm/gpu_scheduler.h2
-rw-r--r--include/drm/ttm/ttm_bo.h48
-rw-r--r--include/drm/ttm/ttm_resource.h97
-rw-r--r--include/dt-bindings/clock/at91.h4
-rw-r--r--include/dt-bindings/clock/axg-audio-clkc.h7
-rw-r--r--include/dt-bindings/clock/cirrus,ep9301-syscon.h46
-rw-r--r--include/dt-bindings/clock/nxp,imx95-clock.h3
-rw-r--r--include/dt-bindings/clock/px30-cru.h4
-rw-r--r--include/dt-bindings/clock/qcom,gcc-sc8180x.h1
-rw-r--r--include/dt-bindings/clock/rk3036-cru.h2
-rw-r--r--include/dt-bindings/clock/rk3228-cru.h2
-rw-r--r--include/dt-bindings/clock/rk3288-cru.h2
-rw-r--r--include/dt-bindings/clock/rk3308-cru.h2
-rw-r--r--include/dt-bindings/clock/rk3328-cru.h2
-rw-r--r--include/dt-bindings/clock/rk3368-cru.h2
-rw-r--r--include/dt-bindings/clock/rk3399-cru.h4
-rw-r--r--include/dt-bindings/clock/rockchip,rk3576-cru.h592
-rw-r--r--include/dt-bindings/iio/adi,ad4695.h9
-rw-r--r--include/dt-bindings/interconnect/qcom,msm8937.h93
-rw-r--r--include/dt-bindings/interconnect/qcom,msm8976.h97
-rw-r--r--include/dt-bindings/interconnect/qcom,sm8350.h10
-rw-r--r--include/dt-bindings/pinctrl/pinctrl-cv1800b.h63
-rw-r--r--include/dt-bindings/pinctrl/pinctrl-cv1812h.h127
-rw-r--r--include/dt-bindings/pinctrl/pinctrl-cv18xx.h19
-rw-r--r--include/dt-bindings/pinctrl/pinctrl-sg2000.h127
-rw-r--r--include/dt-bindings/pinctrl/pinctrl-sg2002.h79
-rw-r--r--include/dt-bindings/reset/rockchip,rk3576-cru.h564
-rw-r--r--include/keys/dns_resolver-type.h4
-rw-r--r--include/kunit/clk.h28
-rw-r--r--include/kunit/of.h115
-rw-r--r--include/kunit/platform_device.h20
-rw-r--r--include/linux/acpi.h1
-rw-r--r--include/linux/alloc_tag.h26
-rw-r--r--include/linux/attribute_container.h6
-rw-r--r--include/linux/auxiliary_bus.h2
-rw-r--r--include/linux/bcma/bcma_driver_pci.h2
-rw-r--r--include/linux/bitmap.h140
-rw-r--r--include/linux/bits.h15
-rw-r--r--include/linux/blk-integrity.h15
-rw-r--r--include/linux/blk-mq.h3
-rw-r--r--include/linux/blk_types.h4
-rw-r--r--include/linux/blkdev.h2
-rw-r--r--include/linux/bpf.h39
-rw-r--r--include/linux/bpf_lsm.h8
-rw-r--r--include/linux/bpf_verifier.h27
-rw-r--r--include/linux/btf.h5
-rw-r--r--include/linux/buildid.h4
-rw-r--r--include/linux/ceph/osd_client.h2
-rw-r--r--include/linux/cgroup-defs.h11
-rw-r--r--include/linux/cgroup.h7
-rw-r--r--include/linux/cleanup.h138
-rw-r--r--include/linux/clk-provider.h14
-rw-r--r--include/linux/clk.h33
-rw-r--r--include/linux/cma.h16
-rw-r--r--include/linux/compiler.h2
-rw-r--r--include/linux/coredump.h8
-rw-r--r--include/linux/coresight-pmu.h17
-rw-r--r--include/linux/coresight.h21
-rw-r--r--include/linux/cpumask.h212
-rw-r--r--include/linux/damon.h3
-rw-r--r--include/linux/debugfs.h1
-rw-r--r--include/linux/decompress/unxz.h5
-rw-r--r--include/linux/device-mapper.h1
-rw-r--r--include/linux/device.h7
-rw-r--r--include/linux/device/bus.h6
-rw-r--r--include/linux/device/class.h2
-rw-r--r--include/linux/device/driver.h2
-rw-r--r--include/linux/dma-direct.h2
-rw-r--r--include/linux/dma-fence-array.h6
-rw-r--r--include/linux/dma-heap.h21
-rw-r--r--include/linux/dma-map-ops.h38
-rw-r--r--include/linux/dma-mapping.h25
-rw-r--r--include/linux/dma/ipu-dma.h174
-rw-r--r--include/linux/dma/k3-udma-glue.h2
-rw-r--r--include/linux/efi.h2
-rw-r--r--include/linux/err.h9
-rw-r--r--include/linux/f2fs_fs.h4
-rw-r--r--include/linux/fault-inject.h36
-rw-r--r--include/linux/fb.h1
-rw-r--r--include/linux/file.h53
-rw-r--r--include/linux/filter.h10
-rw-r--r--include/linux/find.h50
-rw-r--r--include/linux/firewire.h22
-rw-r--r--include/linux/folio_queue.h168
-rw-r--r--include/linux/fs.h12
-rw-r--r--include/linux/fsl/mc.h2
-rw-r--r--include/linux/generic-radix-tree.h105
-rw-r--r--include/linux/gfp.h25
-rw-r--r--include/linux/gfp_types.h8
-rw-r--r--include/linux/huge_mm.h158
-rw-r--r--include/linux/hugetlb.h27
-rw-r--r--include/linux/i2c.h3
-rw-r--r--include/linux/i3c/master.h16
-rw-r--r--include/linux/iio/backend.h62
-rw-r--r--include/linux/iio/iio.h39
-rw-r--r--include/linux/input/matrix_keypad.h48
-rw-r--r--include/linux/iomap.h13
-rw-r--r--include/linux/iommu-dma.h69
-rw-r--r--include/linux/iommufd.h12
-rw-r--r--include/linux/ioprio.h2
-rw-r--r--include/linux/jbd2.h4
-rw-r--r--include/linux/kernel-page-flags.h3
-rw-r--r--include/linux/key.h3
-rw-r--r--include/linux/kfence.h2
-rw-r--r--include/linux/khugepaged.h1
-rw-r--r--include/linux/kmsg_dump.h22
-rw-r--r--include/linux/kprobes.h9
-rw-r--r--include/linux/kvm_host.h18
-rw-r--r--include/linux/leds.h2
-rw-r--r--include/linux/libata.h157
-rw-r--r--include/linux/lockd/lockd.h2
-rw-r--r--include/linux/lru_cache.h4
-rw-r--r--include/linux/lsm_hook_defs.h2
-rw-r--r--include/linux/maple_tree.h20
-rw-r--r--include/linux/memblock.h1
-rw-r--r--include/linux/memcontrol.h67
-rw-r--r--include/linux/memory_hotplug.h48
-rw-r--r--include/linux/mfd/88pm80x.h2
-rw-r--r--include/linux/mfd/ds1wm.h29
-rw-r--r--include/linux/migrate.h3
-rw-r--r--include/linux/mlx5/device.h31
-rw-r--r--include/linux/mlx5/driver.h2
-rw-r--r--include/linux/mlx5/mlx5_ifc.h113
-rw-r--r--include/linux/mm.h304
-rw-r--r--include/linux/mm_types.h22
-rw-r--r--include/linux/mm_types_task.h3
-rw-r--r--include/linux/mmc/host.h1
-rw-r--r--include/linux/mmzone.h35
-rw-r--r--include/linux/mnt_idmapping.h1
-rw-r--r--include/linux/msi.h2
-rw-r--r--include/linux/mutex.h19
-rw-r--r--include/linux/netfilter.h4
-rw-r--r--include/linux/nfs.h9
-rw-r--r--include/linux/nfs4.h17
-rw-r--r--include/linux/nfs_common.h17
-rw-r--r--include/linux/nfs_fs_sb.h13
-rw-r--r--include/linux/nfs_xdr.h22
-rw-r--r--include/linux/nfslocalio.h74
-rw-r--r--include/linux/nodemask.h86
-rw-r--r--include/linux/numa.h8
-rw-r--r--include/linux/numa_memblks.h58
-rw-r--r--include/linux/page-flags.h202
-rw-r--r--include/linux/page_counter.h27
-rw-r--r--include/linux/pagemap.h124
-rw-r--r--include/linux/pagewalk.h58
-rw-r--r--include/linux/pci-epc.h3
-rw-r--r--include/linux/pci.h11
-rw-r--r--include/linux/pci_ids.h5
-rw-r--r--include/linux/percpu.h1
-rw-r--r--include/linux/pgalloc_tag.h31
-rw-r--r--include/linux/pgtable.h18
-rw-r--r--include/linux/pinctrl/pinconf-generic.h3
-rw-r--r--include/linux/platform_data/ad5449.h39
-rw-r--r--include/linux/platform_data/amd_qdma.h36
-rw-r--r--include/linux/platform_data/cyttsp4.h62
-rw-r--r--include/linux/platform_data/dma-ep93xx.h94
-rw-r--r--include/linux/platform_data/eth-ep93xx.h10
-rw-r--r--include/linux/platform_data/keypad-ep93xx.h32
-rw-r--r--include/linux/platform_data/keypad-nomadik-ske.h50
-rw-r--r--include/linux/platform_data/mcs.h30
-rw-r--r--include/linux/platform_data/spi-ep93xx.h15
-rw-r--r--include/linux/platform_data/zforce_ts.h15
-rw-r--r--include/linux/platform_device.h2
-rw-r--r--include/linux/quota.h2
-rw-r--r--include/linux/ratelimit_types.h2
-rw-r--r--include/linux/ring_buffer.h20
-rw-r--r--include/linux/rmap.h11
-rw-r--r--include/linux/sbitmap.h2
-rw-r--r--include/linux/sched.h33
-rw-r--r--include/linux/sched/deadline.h14
-rw-r--r--include/linux/sched/ext.h215
-rw-r--r--include/linux/sched/mm.h27
-rw-r--r--include/linux/sched/prio.h1
-rw-r--r--include/linux/sched/rt.h33
-rw-r--r--include/linux/sched/task.h8
-rw-r--r--include/linux/sched/task_stack.h18
-rw-r--r--include/linux/security.h4
-rw-r--r--include/linux/seqlock.h25
-rw-r--r--include/linux/serial_8250.h2
-rw-r--r--include/linux/serial_s3c.h24
-rw-r--r--include/linux/set_memory.h8
-rw-r--r--include/linux/shmem_fs.h15
-rw-r--r--include/linux/slab.h14
-rw-r--r--include/linux/soc/cirrus/ep93xx.h47
-rw-r--r--include/linux/soc/qcom/geni-se.h9
-rw-r--r--include/linux/soundwire/sdw.h2
-rw-r--r--include/linux/string.h12
-rw-r--r--include/linux/sunrpc/sched.h16
-rw-r--r--include/linux/sunrpc/svc.h51
-rw-r--r--include/linux/sunrpc/svc_rdma.h2
-rw-r--r--include/linux/sunrpc/svcauth.h6
-rw-r--r--include/linux/sunrpc/svcsock.h2
-rw-r--r--include/linux/sunrpc/xdrgen/_builtins.h243
-rw-r--r--include/linux/sunrpc/xdrgen/_defs.h26
-rw-r--r--include/linux/swap.h44
-rw-r--r--include/linux/tracepoint.h20
-rw-r--r--include/linux/uaccess.h7
-rw-r--r--include/linux/usb.h8
-rw-r--r--include/linux/usb/composite.h2
-rw-r--r--include/linux/usb/func_utils.h (renamed from drivers/usb/gadget/u_f.h)8
-rw-r--r--include/linux/usb/gadget_configfs.h7
-rw-r--r--include/linux/usb/serial.h7
-rw-r--r--include/linux/usb/tcpci.h31
-rw-r--r--include/linux/usb/usbnet.h15
-rw-r--r--include/linux/userfaultfd_k.h19
-rw-r--r--include/linux/vdpa.h9
-rw-r--r--include/linux/vm_event_item.h26
-rw-r--r--include/linux/vmalloc.h4
-rw-r--r--include/linux/vmstat.h1
-rw-r--r--include/linux/writeback.h3
-rw-r--r--include/linux/xz.h81
-rw-r--r--include/linux/zstd.h167
-rw-r--r--include/linux/zswap.h16
-rw-r--r--include/media/cec.h33
-rw-r--r--include/media/rc-core.h2
-rw-r--r--include/media/v4l2-mc.h3
-rw-r--r--include/media/v4l2-subdev.h6
-rw-r--r--include/media/videobuf2-core.h3
-rw-r--r--include/net/tcp.h21
-rw-r--r--include/rdma/ib_umem.h18
-rw-r--r--include/rdma/ib_verbs.h4
-rw-r--r--include/rdma/rdma_netlink.h12
-rw-r--r--include/scsi/fcoe_sysfs.h2
-rw-r--r--include/scsi/scsi_dbg.h7
-rw-r--r--include/scsi/scsi_host.h1
-rw-r--r--include/scsi/scsi_transport_fc.h6
-rw-r--r--include/trace/events/dma.h342
-rw-r--r--include/trace/events/f2fs.h3
-rw-r--r--include/trace/events/filemap.h84
-rw-r--r--include/trace/events/firewire.h4
-rw-r--r--include/trace/events/mmflags.h36
-rw-r--r--include/trace/events/netfs.h3
-rw-r--r--include/trace/events/oom.h4
-rw-r--r--include/trace/events/rpcrdma.h23
-rw-r--r--include/trace/events/sched_ext.h32
-rw-r--r--include/trace/misc/nfs.h1
-rw-r--r--include/uapi/drm/drm_fourcc.h25
-rw-r--r--include/uapi/drm/drm_mode.h2
-rw-r--r--include/uapi/drm/msm_drm.h2
-rw-r--r--include/uapi/drm/xe_drm.h10
-rw-r--r--include/uapi/linux/android/binder.h36
-rw-r--r--include/uapi/linux/bits.h3
-rw-r--r--include/uapi/linux/bpf.h18
-rw-r--r--include/uapi/linux/cec.h9
-rw-r--r--include/uapi/linux/const.h17
-rw-r--r--include/uapi/linux/exfat.h25
-rw-r--r--include/uapi/linux/fuse.h22
-rw-r--r--include/uapi/linux/io_uring.h6
-rw-r--r--include/uapi/linux/iommufd.h2
-rw-r--r--include/uapi/linux/kernel-page-flags.h2
-rw-r--r--include/uapi/linux/kfd_ioctl.h106
-rw-r--r--include/uapi/linux/landlock.h30
-rw-r--r--include/uapi/linux/pci_regs.h41
-rw-r--r--include/uapi/linux/rkisp1-config.h578
-rw-r--r--include/uapi/linux/sched.h1
-rw-r--r--include/uapi/linux/sched/types.h6
-rw-r--r--include/uapi/linux/serio.h1
-rw-r--r--include/uapi/linux/usb/ch9.h8
-rw-r--r--include/uapi/linux/usb/functionfs.h97
-rw-r--r--include/uapi/linux/usb/g_hid.h40
-rw-r--r--include/uapi/linux/usb/gadgetfs.h2
-rw-r--r--include/uapi/linux/vdpa.h1
-rw-r--r--include/uapi/linux/videodev2.h2
-rw-r--r--include/uapi/linux/virtio_balloon.h16
-rw-r--r--include/uapi/linux/virtio_gpu.h1
-rw-r--r--include/uapi/rdma/bnxt_re-abi.h13
-rw-r--r--include/uapi/rdma/mlx5_user_ioctl_cmds.h9
-rw-r--r--include/uapi/rdma/mlx5_user_ioctl_verbs.h4
-rw-r--r--include/uapi/rdma/rdma_netlink.h16
-rw-r--r--include/uapi/xen/privcmd.h7
-rw-r--r--include/ufs/ufs.h4
-rw-r--r--include/ufs/ufshcd.h1
-rw-r--r--include/ufs/ufshci.h5
-rw-r--r--include/xen/acpi.h27
-rw-r--r--include/xen/interface/elfnote.h93
-rw-r--r--include/xen/interface/physdev.h17
-rw-r--r--include/xen/pci.h6
-rw-r--r--init/Kconfig36
-rw-r--r--init/init_task.c12
-rw-r--r--io_uring/fdinfo.c3
-rw-r--r--io_uring/io_uring.c21
-rw-r--r--io_uring/register.c6
-rw-r--r--io_uring/register.h2
-rw-r--r--io_uring/rsrc.c23
-rw-r--r--io_uring/rsrc.h7
-rw-r--r--io_uring/sqpoll.c32
-rw-r--r--ipc/mqueue.c50
-rw-r--r--ipc/shm.c8
-rw-r--r--kernel/Kconfig.preempt27
-rw-r--r--kernel/Makefile1
-rw-r--r--kernel/bpf/Makefile6
-rw-r--r--kernel/bpf/arraymap.c17
-rw-r--r--kernel/bpf/bpf_inode_storage.c28
-rw-r--r--kernel/bpf/bpf_iter.c1
-rw-r--r--kernel/bpf/bpf_lsm.c65
-rw-r--r--kernel/bpf/bpf_struct_ops.c9
-rw-r--r--kernel/bpf/btf.c172
-rw-r--r--kernel/bpf/btf_iter.c2
-rw-r--r--kernel/bpf/btf_relocate.c2
-rw-r--r--kernel/bpf/cgroup.c2
-rw-r--r--kernel/bpf/core.c21
-rw-r--r--kernel/bpf/hashtab.c16
-rw-r--r--kernel/bpf/helpers.c94
-rw-r--r--kernel/bpf/inode.c4
-rw-r--r--kernel/bpf/local_storage.c4
-rw-r--r--kernel/bpf/map_in_map.c38
-rw-r--r--kernel/bpf/memalloc.c12
-rw-r--r--kernel/bpf/relo_core.c2
-rw-r--r--kernel/bpf/reuseport_array.c2
-rw-r--r--kernel/bpf/stackmap.c131
-rw-r--r--kernel/bpf/syscall.c228
-rw-r--r--kernel/bpf/token.c78
-rw-r--r--kernel/bpf/verifier.c1401
-rw-r--r--kernel/cgroup/cgroup-internal.h2
-rw-r--r--kernel/cgroup/cgroup.c27
-rw-r--r--kernel/configs/tiny.config6
-rw-r--r--kernel/crash_core.c33
-rw-r--r--kernel/crash_reserve.c3
-rw-r--r--kernel/dma/Kconfig7
-rw-r--r--kernel/dma/Makefile4
-rw-r--r--kernel/dma/direct.c8
-rw-r--r--kernel/dma/dummy.c21
-rw-r--r--kernel/dma/mapping.c154
-rw-r--r--kernel/dma/ops_helpers.c14
-rw-r--r--kernel/dma/pool.c4
-rw-r--r--kernel/dma/remap.c6
-rw-r--r--kernel/dma/swiotlb.c6
-rw-r--r--kernel/events/core.c27
-rw-r--r--kernel/events/uprobes.c35
-rw-r--r--kernel/exit.c57
-rw-r--r--kernel/fork.c23
-rw-r--r--kernel/freezer.c2
-rw-r--r--kernel/futex/core.c1
-rw-r--r--kernel/irq/msi.c2
-rw-r--r--kernel/jump_label.c34
-rw-r--r--kernel/kexec_internal.h3
-rw-r--r--kernel/kthread.c10
-rw-r--r--kernel/locking/lockdep.c53
-rw-r--r--kernel/locking/lockdep_proc.c2
-rw-r--r--kernel/locking/rtmutex.c4
-rw-r--r--kernel/locking/rwsem.c26
-rw-r--r--kernel/locking/test-ww_mutex.c1
-rw-r--r--kernel/locking/ww_mutex.h2
-rw-r--r--kernel/module/Kconfig78
-rw-r--r--kernel/module/debug_kmemleak.c18
-rw-r--r--kernel/module/main.c2
-rw-r--r--kernel/module/sysfs.c63
-rw-r--r--kernel/nsproxy.c12
-rw-r--r--kernel/numa.c26
-rw-r--r--kernel/panic.c2
-rw-r--r--kernel/pid.c10
-rw-r--r--kernel/power/user.c1
-rw-r--r--kernel/printk/printk.c11
-rw-r--r--kernel/relay.c1
-rw-r--r--kernel/resource.c71
-rw-r--r--kernel/resource_kunit.c143
-rw-r--r--kernel/sched/build_policy.c11
-rw-r--r--kernel/sched/core.c534
-rw-r--r--kernel/sched/cpufreq_schedutil.c56
-rw-r--r--kernel/sched/deadline.c503
-rw-r--r--kernel/sched/debug.c201
-rw-r--r--kernel/sched/ext.c7215
-rw-r--r--kernel/sched/ext.h91
-rw-r--r--kernel/sched/fair.c801
-rw-r--r--kernel/sched/features.h30
-rw-r--r--kernel/sched/idle.c25
-rw-r--r--kernel/sched/pelt.c20
-rw-r--r--kernel/sched/pelt.h1
-rw-r--r--kernel/sched/rt.c261
-rw-r--r--kernel/sched/sched.h301
-rw-r--r--kernel/sched/stop_task.c18
-rw-r--r--kernel/sched/syscalls.c141
-rw-r--r--kernel/sched/topology.c8
-rw-r--r--kernel/signal.c27
-rw-r--r--kernel/static_call_inline.c13
-rw-r--r--kernel/sys.c12
-rw-r--r--kernel/taskstats.c4
-rw-r--r--kernel/time/hrtimer.c2
-rw-r--r--kernel/time/posix-clock.c1
-rw-r--r--kernel/trace/bpf_trace.c108
-rw-r--r--kernel/trace/ring_buffer.c949
-rw-r--r--kernel/trace/rv/rv.c3
-rw-r--r--kernel/trace/rv/rv_reactors.c1
-rw-r--r--kernel/trace/trace.c375
-rw-r--r--kernel/trace/trace.h14
-rw-r--r--kernel/trace/trace_fprobe.c179
-rw-r--r--kernel/trace/trace_functions_graph.c23
-rw-r--r--kernel/trace/trace_output.c17
-rw-r--r--kernel/trace/trace_sched_wakeup.c2
-rw-r--r--kernel/trace/trace_syscalls.c12
-rw-r--r--kernel/trace/trace_uprobe.c24
-rw-r--r--kernel/tracepoint.c42
-rw-r--r--kernel/user_namespace.c5
-rw-r--r--kernel/vmcore_info.c8
-rw-r--r--kernel/watch_queue.c4
-rw-r--r--kernel/watchdog.c5
-rw-r--r--lib/Kconfig.debug36
-rw-r--r--lib/Makefile1
-rw-r--r--lib/bcd.c4
-rw-r--r--lib/buildid.c397
-rw-r--r--lib/checksum_kunit.c9
-rw-r--r--lib/closure.c2
-rw-r--r--lib/decompress_unxz.c40
-rw-r--r--lib/dim/Makefile2
-rw-r--r--lib/dump_stack.c1
-rw-r--r--lib/dynamic_debug.c4
-rw-r--r--lib/fault-inject.c1
-rw-r--r--lib/fortify_kunit.c3
-rw-r--r--lib/generic-radix-tree.c80
-rw-r--r--lib/glob.c2
-rw-r--r--lib/kunit/Makefile4
-rw-r--r--lib/kunit/platform-test.c224
-rw-r--r--lib/kunit/platform.c302
-rw-r--r--lib/list-test.c4
-rw-r--r--lib/lru_cache.c10
-rw-r--r--lib/lz4/lz4hc_compress.c1
-rw-r--r--lib/maple_tree.c805
-rw-r--r--lib/math/Makefile1
-rw-r--r--lib/math/div64.c115
-rw-r--r--lib/math/test_mul_u64_u64_div_u64.c99
-rw-r--r--lib/percpu_counter.c2
-rw-r--r--lib/rhashtable.c2
-rw-r--r--lib/sbitmap.c4
-rw-r--r--lib/strncpy_from_user.c9
-rw-r--r--lib/strnlen_user.c9
-rw-r--r--lib/test_bits.c34
-rw-r--r--lib/test_fpu_glue.c2
-rw-r--r--lib/test_hmm.c5
-rw-r--r--lib/test_objpool.c3
-rw-r--r--lib/test_printf.c26
-rw-r--r--lib/vsprintf.c21
-rw-r--r--lib/xz/Kconfig13
-rw-r--r--lib/xz/xz_crc32.c11
-rw-r--r--lib/xz/xz_dec_bcj.c191
-rw-r--r--lib/xz/xz_dec_lzma2.c15
-rw-r--r--lib/xz/xz_dec_stream.c13
-rw-r--r--lib/xz/xz_dec_syms.c14
-rw-r--r--lib/xz/xz_dec_test.c12
-rw-r--r--lib/xz/xz_lzma2.h5
-rw-r--r--lib/xz/xz_private.h40
-rw-r--r--lib/xz/xz_stream.h5
-rw-r--r--lib/zstd/compress/zstd_compress.c2
-rw-r--r--lib/zstd/zstd_compress_module.c49
-rw-r--r--lib/zstd/zstd_decompress_module.c36
-rw-r--r--mm/Kconfig87
-rw-r--r--mm/Makefile8
-rw-r--r--mm/cma.c57
-rw-r--r--mm/compaction.c47
-rw-r--r--mm/damon/Kconfig2
-rw-r--r--mm/damon/core.c24
-rw-r--r--mm/damon/dbgfs.c2
-rw-r--r--mm/damon/sysfs.c2
-rw-r--r--mm/damon/tests/.kunitconfig22
-rw-r--r--mm/damon/tests/core-kunit.h (renamed from mm/damon/core-test.h)35
-rw-r--r--mm/damon/tests/dbgfs-kunit.h (renamed from mm/damon/dbgfs-test.h)10
-rw-r--r--mm/damon/tests/sysfs-kunit.h (renamed from mm/damon/sysfs-test.h)0
-rw-r--r--mm/damon/tests/vaddr-kunit.h (renamed from mm/damon/vaddr-test.h)2
-rw-r--r--mm/damon/vaddr.c4
-rw-r--r--mm/debug.c31
-rw-r--r--mm/debug_vm_pgtable.c50
-rw-r--r--mm/fadvise.c4
-rw-r--r--mm/fail_page_alloc.c1
-rw-r--r--mm/failslab.c1
-rw-r--r--mm/filemap.c113
-rw-r--r--mm/folio-compat.c12
-rw-r--r--mm/gup.c69
-rw-r--r--mm/huge_memory.c657
-rw-r--r--mm/hugetlb.c479
-rw-r--r--mm/hugetlb_cgroup.c4
-rw-r--r--mm/hugetlb_vmemmap.c40
-rw-r--r--mm/internal.h227
-rw-r--r--mm/kasan/Makefile8
-rw-r--r--mm/kasan/kasan.h6
-rw-r--r--mm/kasan/kasan_test_c.c (renamed from mm/kasan/kasan_test.c)11
-rw-r--r--mm/kasan/kasan_test_rust.rs21
-rw-r--r--mm/kfence/core.c53
-rw-r--r--mm/kfence/kfence.h1
-rw-r--r--mm/kfence/report.c15
-rw-r--r--mm/khugepaged.c75
-rw-r--r--mm/kmemleak.c159
-rw-r--r--mm/ksm.c146
-rw-r--r--mm/madvise.c15
-rw-r--r--mm/memblock.c19
-rw-r--r--mm/memcontrol-v1.c138
-rw-r--r--mm/memcontrol-v1.h26
-rw-r--r--mm/memcontrol.c490
-rw-r--r--mm/memfd.c18
-rw-r--r--mm/memory-failure.c92
-rw-r--r--mm/memory-tiers.c31
-rw-r--r--mm/memory.c570
-rw-r--r--mm/memory_hotplug.c85
-rw-r--r--mm/mempolicy.c8
-rw-r--r--mm/migrate.c272
-rw-r--r--mm/migrate_device.c108
-rw-r--r--mm/mm_init.c12
-rw-r--r--mm/mmap.c2160
-rw-r--r--mm/mmu_notifier.c2
-rw-r--r--mm/mmzone.c2
-rw-r--r--mm/mprotect.c86
-rw-r--r--mm/mremap.c32
-rw-r--r--mm/mseal.c55
-rw-r--r--mm/nommu.c11
-rw-r--r--mm/numa.c69
-rw-r--r--mm/numa_emulation.c (renamed from arch/x86/mm/numa_emulation.c)42
-rw-r--r--mm/numa_memblks.c571
-rw-r--r--mm/page-writeback.c6
-rw-r--r--mm/page_alloc.c351
-rw-r--r--mm/page_counter.c48
-rw-r--r--mm/page_io.c113
-rw-r--r--mm/page_isolation.c36
-rw-r--r--mm/pagewalk.c202
-rw-r--r--mm/percpu.c31
-rw-r--r--mm/readahead.c93
-rw-r--r--mm/rmap.c71
-rw-r--r--mm/shmem.c457
-rw-r--r--mm/shmem_quota.c3
-rw-r--r--mm/show_mem.c11
-rw-r--r--mm/shrinker_debug.c2
-rw-r--r--mm/slab_common.c27
-rw-r--r--mm/swap.c298
-rw-r--r--mm/swap.h44
-rw-r--r--mm/swap_cgroup.c2
-rw-r--r--mm/swap_state.c78
-rw-r--r--mm/swapfile.c1482
-rw-r--r--mm/userfaultfd.c170
-rw-r--r--mm/util.c102
-rw-r--r--mm/vma.c2068
-rw-r--r--mm/vma.h558
-rw-r--r--mm/vma_internal.h49
-rw-r--r--mm/vmalloc.c139
-rw-r--r--mm/vmscan.c69
-rw-r--r--mm/vmstat.c28
-rw-r--r--mm/z3fold.c2
-rw-r--r--mm/zsmalloc.c38
-rw-r--r--mm/zswap.c307
-rw-r--r--net/9p/Kconfig6
-rw-r--r--net/9p/Makefile4
-rw-r--r--net/9p/trans_usbg.c956
-rw-r--r--net/bpf/bpf_dummy_struct_ops.c2
-rw-r--r--net/ceph/messenger.c2
-rw-r--r--net/core/filter.c75
-rw-r--r--net/core/net_namespace.c6
-rw-r--r--net/core/sock_map.c23
-rw-r--r--net/ipv4/bpf_tcp_ca.c26
-rw-r--r--net/ipv4/netfilter/nf_reject_ipv4.c10
-rw-r--r--net/ipv6/Kconfig1
-rw-r--r--net/ipv6/netfilter/nf_reject_ipv6.c19
-rw-r--r--net/mac80211/rc80211_minstrel_ht_debugfs.c2
-rw-r--r--net/netfilter/nf_conntrack_core.c141
-rw-r--r--net/netfilter/nf_conntrack_netlink.c9
-rw-r--r--net/netfilter/nf_nat_core.c121
-rw-r--r--net/netfilter/nf_tables_api.c6
-rw-r--r--net/netfilter/nft_compat.c6
-rw-r--r--net/netfilter/nft_log.c2
-rw-r--r--net/netfilter/nft_meta.c2
-rw-r--r--net/netfilter/nft_numgen.c2
-rw-r--r--net/netfilter/nft_set_pipapo.c13
-rw-r--r--net/netfilter/nft_tunnel.c5
-rw-r--r--net/qrtr/af_qrtr.c2
-rw-r--r--net/rfkill/core.c1
-rw-r--r--net/socket.c15
-rw-r--r--net/sunrpc/cache.c14
-rw-r--r--net/sunrpc/clnt.c13
-rw-r--r--net/sunrpc/rpc_pipe.c1
-rw-r--r--net/sunrpc/sunrpc.h4
-rw-r--r--net/sunrpc/svc.c198
-rw-r--r--net/sunrpc/svc_xprt.c11
-rw-r--r--net/sunrpc/svcauth.c29
-rw-r--r--net/sunrpc/svcauth_unix.c3
-rw-r--r--net/sunrpc/svcsock.c1
-rw-r--r--net/sunrpc/xprtrdma/svc_rdma_transport.c18
-rw-r--r--net/vmw_vsock/virtio_transport.c144
-rw-r--r--net/xdp/xsk.c23
-rw-r--r--rust/Makefile56
-rw-r--r--rust/bindings/bindings_helper.h2
-rw-r--r--rust/exports.c1
-rw-r--r--rust/helpers.c239
-rw-r--r--rust/helpers/blk.c14
-rw-r--r--rust/helpers/bug.c8
-rw-r--r--rust/helpers/build_assert.c25
-rw-r--r--rust/helpers/build_bug.c9
-rw-r--r--rust/helpers/err.c19
-rw-r--r--rust/helpers/helpers.c26
-rw-r--r--rust/helpers/kunit.c9
-rw-r--r--rust/helpers/mutex.c9
-rw-r--r--rust/helpers/page.c19
-rw-r--r--rust/helpers/rbtree.c9
-rw-r--r--rust/helpers/refcount.c19
-rw-r--r--rust/helpers/signal.c9
-rw-r--r--rust/helpers/slab.c9
-rw-r--r--rust/helpers/spinlock.c24
-rw-r--r--rust/helpers/task.c19
-rw-r--r--rust/helpers/uaccess.c15
-rw-r--r--rust/helpers/wait.c9
-rw-r--r--rust/helpers/workqueue.c15
-rw-r--r--rust/kernel/alloc/box_ext.rs33
-rw-r--r--rust/kernel/error.rs5
-rw-r--r--rust/kernel/init.rs191
-rw-r--r--rust/kernel/init/__internal.rs29
-rw-r--r--rust/kernel/lib.rs2
-rw-r--r--rust/kernel/list.rs686
-rw-r--r--rust/kernel/list/arc.rs521
-rw-r--r--rust/kernel/list/arc_field.rs96
-rw-r--r--rust/kernel/list/impl_list_item_mod.rs274
-rw-r--r--rust/kernel/prelude.rs2
-rw-r--r--rust/kernel/print.rs20
-rw-r--r--rust/kernel/rbtree.rs1278
-rw-r--r--rust/kernel/std_vendor.rs2
-rw-r--r--rust/kernel/sync/arc.rs25
-rw-r--r--rust/kernel/types.rs63
-rw-r--r--rust/macros/lib.rs4
-rw-r--r--rust/macros/module.rs12
-rw-r--r--samples/bpf/Makefile9
-rw-r--r--samples/bpf/tracex2.bpf.c99
-rw-r--r--samples/bpf/tracex2_user.c187
-rw-r--r--samples/bpf/tracex4.bpf.c4
-rw-r--r--samples/kmemleak/kmemleak-test.c2
-rw-r--r--samples/landlock/sandboxer.c73
-rw-r--r--samples/vfio-mdev/mtty.c2
-rw-r--r--scripts/Kconfig.include8
-rw-r--r--scripts/Makefile.build67
-rw-r--r--scripts/Makefile.compiler15
-rw-r--r--scripts/Makefile.dtbs142
-rw-r--r--scripts/Makefile.host5
-rw-r--r--scripts/Makefile.kasan84
-rw-r--r--scripts/Makefile.lib135
-rw-r--r--scripts/Makefile.modfinal9
-rw-r--r--scripts/Makefile.modinst10
-rw-r--r--scripts/Makefile.package3
-rw-r--r--scripts/Makefile.vmlinux18
-rw-r--r--scripts/Makefile.vmlinux_o3
-rw-r--r--scripts/basic/fixdep.c15
-rw-r--r--scripts/coccinelle/api/stream_open.cocci1
-rw-r--r--scripts/coccinelle/api/string_choices.cocci259
-rwxr-xr-xscripts/decode_stacktrace.sh51
-rw-r--r--scripts/gdb/linux/kasan.py44
-rw-r--r--scripts/gdb/linux/proc.py4
-rw-r--r--scripts/gdb/linux/rbtree.py12
-rw-r--r--scripts/gdb/linux/stackdepot.py27
-rw-r--r--scripts/gdb/linux/timerlist.py31
-rw-r--r--scripts/gdb/vmlinux-gdb.py1
-rwxr-xr-xscripts/generate_builtin_ranges.awk508
-rw-r--r--scripts/generate_rust_target.rs98
-rw-r--r--scripts/include/hash.h28
-rw-r--r--scripts/include/hashtable.h50
-rw-r--r--scripts/include/list.h69
-rw-r--r--scripts/include/xalloc.h53
-rw-r--r--scripts/kallsyms.c46
-rw-r--r--scripts/kconfig/confdata.c3
-rw-r--r--scripts/kconfig/expr.c482
-rw-r--r--scripts/kconfig/expr.h27
-rw-r--r--scripts/kconfig/internal.h6
-rw-r--r--scripts/kconfig/lexer.l1
-rw-r--r--scripts/kconfig/lkc.h6
-rw-r--r--scripts/kconfig/mconf.c1
-rw-r--r--scripts/kconfig/menu.c38
-rw-r--r--scripts/kconfig/nconf.c1
-rw-r--r--scripts/kconfig/nconf.gui.c1
-rw-r--r--scripts/kconfig/parser.y13
-rw-r--r--scripts/kconfig/preprocess.c1
-rw-r--r--scripts/kconfig/qconf.cc2
-rw-r--r--scripts/kconfig/symbol.c9
-rw-r--r--scripts/kconfig/util.c63
-rwxr-xr-xscripts/link-vmlinux.sh16
-rwxr-xr-xscripts/macro_checker.py131
-rw-r--r--scripts/mod/devicetable-offsets.c4
-rw-r--r--scripts/mod/file2alias.c11
-rw-r--r--scripts/mod/mk_elfconfig.c25
-rw-r--r--scripts/mod/modpost.c125
-rw-r--r--scripts/mod/modpost.h28
-rw-r--r--scripts/mod/sumversion.c6
-rw-r--r--scripts/mod/symsearch.c6
-rw-r--r--scripts/module-common.c25
-rw-r--r--scripts/package/PKGBUILD52
-rwxr-xr-xscripts/package/install-extmod-build55
-rwxr-xr-xscripts/rustc-version.sh26
-rw-r--r--scripts/sign-file.c132
-rw-r--r--scripts/ssl-common.h32
-rw-r--r--scripts/subarch.include2
-rwxr-xr-xscripts/verify_builtin_ranges.awk370
-rwxr-xr-xscripts/xz_wrap.sh158
-rw-r--r--security/bpf/hooks.c1
-rw-r--r--security/integrity/ima/ima_main.c4
-rw-r--r--security/ipe/policy_tests.c1
-rw-r--r--security/landlock/cred.h2
-rw-r--r--security/landlock/fs.c25
-rw-r--r--security/landlock/fs.h7
-rw-r--r--security/landlock/limits.h3
-rw-r--r--security/landlock/ruleset.c7
-rw-r--r--security/landlock/ruleset.h24
-rw-r--r--security/landlock/syscalls.c39
-rw-r--r--security/landlock/task.c193
-rw-r--r--security/loadpin/loadpin.c4
-rw-r--r--security/security.c2
-rw-r--r--security/selinux/hooks.c4
-rw-r--r--security/smack/smack_lsm.c13
-rw-r--r--security/smack/smack_netfilter.c4
-rw-r--r--security/smack/smackfs.c2
-rw-r--r--security/tomoyo/Kconfig15
-rw-r--r--security/tomoyo/Makefile8
-rw-r--r--security/tomoyo/common.c14
-rw-r--r--security/tomoyo/common.h72
-rw-r--r--security/tomoyo/domain.c9
-rw-r--r--security/tomoyo/gc.c3
-rw-r--r--security/tomoyo/hooks.h (renamed from security/tomoyo/tomoyo.c)110
-rw-r--r--security/tomoyo/init.c366
-rw-r--r--security/tomoyo/load_policy.c12
-rw-r--r--security/tomoyo/proxy.c82
-rw-r--r--security/tomoyo/securityfs_if.c10
-rw-r--r--security/tomoyo/util.c3
-rw-r--r--sound/core/control.c1
-rw-r--r--sound/core/oss/mixer_oss.c1
-rw-r--r--sound/core/oss/pcm_oss.c1
-rw-r--r--sound/core/pcm_native.c8
-rw-r--r--sound/core/rawmidi.c1
-rw-r--r--sound/core/seq/seq_clientmgr.c1
-rw-r--r--sound/core/timer.c1
-rw-r--r--sound/firewire/amdtp-stream.c34
-rw-r--r--sound/firewire/bebob/bebob_pcm.c1
-rw-r--r--sound/firewire/dice/dice-pcm.c1
-rw-r--r--sound/firewire/digi00x/digi00x-pcm.c1
-rw-r--r--sound/firewire/fireface/ff-pcm.c1
-rw-r--r--sound/firewire/fireworks/fireworks_pcm.c1
-rw-r--r--sound/firewire/isight.c1
-rw-r--r--sound/firewire/motu/motu-pcm.c1
-rw-r--r--sound/firewire/oxfw/oxfw-pcm.c1
-rw-r--r--sound/firewire/tascam/tascam-pcm.c1
-rw-r--r--sound/oss/dmasound/dmasound_core.c3
-rw-r--r--sound/pci/hda/hda_intel.c2
-rw-r--r--sound/soc/cirrus/Kconfig9
-rw-r--r--sound/soc/cirrus/Makefile4
-rw-r--r--sound/soc/cirrus/edb93xx.c116
-rw-r--r--sound/soc/cirrus/ep93xx-i2s.c19
-rw-r--r--sound/soc/cirrus/ep93xx-pcm.c19
-rw-r--r--sound/soc/intel/avs/debugfs.c3
-rw-r--r--tools/Makefile10
-rw-r--r--tools/arch/riscv/include/asm/barrier.h39
-rw-r--r--tools/arch/riscv/include/asm/fence.h13
-rw-r--r--tools/bpf/bpftool/Documentation/bpftool-gen.rst4
-rw-r--r--tools/bpf/bpftool/Documentation/bpftool-net.rst24
-rw-r--r--tools/bpf/bpftool/bash-completion/bpftool2
-rw-r--r--tools/bpf/bpftool/btf.c87
-rw-r--r--tools/bpf/bpftool/feature.c10
-rw-r--r--tools/bpf/bpftool/net.c80
-rw-r--r--tools/bpf/bpftool/xlated_dumper.c4
-rw-r--r--tools/bpf/runqslower/Makefile3
-rw-r--r--tools/build/Build3
-rw-r--r--tools/build/Makefile11
-rw-r--r--tools/build/Makefile.feature2
-rw-r--r--tools/build/Makefile.include12
-rw-r--r--tools/build/feature/Makefile11
-rw-r--r--tools/build/feature/test-all.c4
-rw-r--r--tools/build/feature/test-llvm-perf.cpp14
-rw-r--r--tools/iio/Makefile2
-rw-r--r--tools/iio/iio_generic_buffer.c4
-rw-r--r--tools/include/asm/barrier.h2
-rw-r--r--tools/include/linux/compiler.h4
-rw-r--r--tools/include/linux/coresight-pmu.h17
-rw-r--r--tools/include/linux/init.h (renamed from tools/testing/memblock/linux/init.h)19
-rw-r--r--tools/include/linux/linkage.h6
-rw-r--r--tools/include/linux/mm.h6
-rw-r--r--tools/include/linux/pfn.h1
-rw-r--r--tools/include/linux/ring_buffer.h2
-rw-r--r--tools/include/linux/string.h5
-rw-r--r--tools/include/uapi/linux/bpf.h9
-rw-r--r--tools/lib/api/Makefile4
-rw-r--r--tools/lib/api/fs/tracing_path.c2
-rw-r--r--tools/lib/bpf/.gitignore1
-rw-r--r--tools/lib/bpf/Makefile13
-rw-r--r--tools/lib/bpf/bpf.h4
-rw-r--r--tools/lib/bpf/bpf_helpers.h2
-rw-r--r--tools/lib/bpf/bpf_tracing.h25
-rw-r--r--tools/lib/bpf/btf.c8
-rw-r--r--tools/lib/bpf/btf.h2
-rw-r--r--tools/lib/bpf/btf_dump.c2
-rw-r--r--tools/lib/bpf/btf_relocate.c2
-rw-r--r--tools/lib/bpf/elf.c3
-rw-r--r--tools/lib/bpf/libbpf.c88
-rw-r--r--tools/lib/bpf/libbpf.h18
-rw-r--r--tools/lib/bpf/libbpf.map1
-rw-r--r--tools/lib/bpf/libbpf_legacy.h4
-rw-r--r--tools/lib/bpf/linker.c4
-rw-r--r--tools/lib/bpf/skel_internal.h2
-rw-r--r--tools/lib/bpf/usdt.bpf.h2
-rw-r--r--tools/lib/cmdline.c53
-rw-r--r--tools/lib/perf/.gitignore5
-rw-r--r--tools/lib/string.c13
-rw-r--r--tools/lib/subcmd/Makefile6
-rw-r--r--tools/lib/subcmd/parse-options.c8
-rw-r--r--tools/lib/symbol/Makefile4
-rw-r--r--tools/mm/Makefile2
-rw-r--r--tools/mm/page-types.c13
-rw-r--r--tools/net/sunrpc/xdrgen/.gitignore2
-rw-r--r--tools/net/sunrpc/xdrgen/README244
-rw-r--r--tools/net/sunrpc/xdrgen/__init__.py2
-rw-r--r--tools/net/sunrpc/xdrgen/generators/__init__.py113
-rw-r--r--tools/net/sunrpc/xdrgen/generators/constant.py20
-rw-r--r--tools/net/sunrpc/xdrgen/generators/enum.py44
-rw-r--r--tools/net/sunrpc/xdrgen/generators/header_bottom.py33
-rw-r--r--tools/net/sunrpc/xdrgen/generators/header_top.py45
-rw-r--r--tools/net/sunrpc/xdrgen/generators/pointer.py272
-rw-r--r--tools/net/sunrpc/xdrgen/generators/program.py168
-rw-r--r--tools/net/sunrpc/xdrgen/generators/source_top.py32
-rw-r--r--tools/net/sunrpc/xdrgen/generators/struct.py272
-rw-r--r--tools/net/sunrpc/xdrgen/generators/typedef.py255
-rw-r--r--tools/net/sunrpc/xdrgen/generators/union.py243
-rw-r--r--tools/net/sunrpc/xdrgen/grammars/xdr.lark119
-rw-r--r--tools/net/sunrpc/xdrgen/subcmds/__init__.py2
-rw-r--r--tools/net/sunrpc/xdrgen/subcmds/declarations.py76
-rw-r--r--tools/net/sunrpc/xdrgen/subcmds/definitions.py78
-rw-r--r--tools/net/sunrpc/xdrgen/subcmds/lint.py33
-rw-r--r--tools/net/sunrpc/xdrgen/subcmds/source.py118
-rw-r--r--tools/net/sunrpc/xdrgen/templates/C/constants/definition.j23
-rw-r--r--tools/net/sunrpc/xdrgen/templates/C/enum/declaration/close.j24
-rw-r--r--tools/net/sunrpc/xdrgen/templates/C/enum/decoder/enum.j219
-rw-r--r--tools/net/sunrpc/xdrgen/templates/C/enum/definition/close.j22
-rw-r--r--tools/net/sunrpc/xdrgen/templates/C/enum/definition/enumerator.j22
-rw-r--r--tools/net/sunrpc/xdrgen/templates/C/enum/definition/open.j23
-rw-r--r--tools/net/sunrpc/xdrgen/templates/C/enum/encoder/enum.j214
-rw-r--r--tools/net/sunrpc/xdrgen/templates/C/header_bottom/declaration/header.j23
-rw-r--r--tools/net/sunrpc/xdrgen/templates/C/header_bottom/definition/header.j23
-rw-r--r--tools/net/sunrpc/xdrgen/templates/C/header_top/declaration/header.j214
-rw-r--r--tools/net/sunrpc/xdrgen/templates/C/header_top/definition/header.j210
-rw-r--r--tools/net/sunrpc/xdrgen/templates/C/pointer/declaration/close.j24
-rw-r--r--tools/net/sunrpc/xdrgen/templates/C/pointer/decoder/basic.j26
-rw-r--r--tools/net/sunrpc/xdrgen/templates/C/pointer/decoder/close.j23
-rw-r--r--tools/net/sunrpc/xdrgen/templates/C/pointer/decoder/fixed_length_array.j28
-rw-r--r--tools/net/sunrpc/xdrgen/templates/C/pointer/decoder/fixed_length_opaque.j26
-rw-r--r--tools/net/sunrpc/xdrgen/templates/C/pointer/decoder/open.j222
-rw-r--r--tools/net/sunrpc/xdrgen/templates/C/pointer/decoder/optional_data.j26
-rw-r--r--tools/net/sunrpc/xdrgen/templates/C/pointer/decoder/variable_length_array.j213
-rw-r--r--tools/net/sunrpc/xdrgen/templates/C/pointer/decoder/variable_length_opaque.j26
-rw-r--r--tools/net/sunrpc/xdrgen/templates/C/pointer/decoder/variable_length_string.j26
-rw-r--r--tools/net/sunrpc/xdrgen/templates/C/pointer/definition/basic.j25
-rw-r--r--tools/net/sunrpc/xdrgen/templates/C/pointer/definition/close.j22
-rw-r--r--tools/net/sunrpc/xdrgen/templates/C/pointer/definition/fixed_length_array.j25
-rw-r--r--tools/net/sunrpc/xdrgen/templates/C/pointer/definition/fixed_length_opaque.j25
-rw-r--r--tools/net/sunrpc/xdrgen/templates/C/pointer/definition/open.j26
-rw-r--r--tools/net/sunrpc/xdrgen/templates/C/pointer/definition/optional_data.j25
-rw-r--r--tools/net/sunrpc/xdrgen/templates/C/pointer/definition/variable_length_array.j28
-rw-r--r--tools/net/sunrpc/xdrgen/templates/C/pointer/definition/variable_length_opaque.j25
-rw-r--r--tools/net/sunrpc/xdrgen/templates/C/pointer/definition/variable_length_string.j25
-rw-r--r--tools/net/sunrpc/xdrgen/templates/C/pointer/encoder/basic.j210
-rw-r--r--tools/net/sunrpc/xdrgen/templates/C/pointer/encoder/close.j23
-rw-r--r--tools/net/sunrpc/xdrgen/templates/C/pointer/encoder/fixed_length_array.j212
-rw-r--r--tools/net/sunrpc/xdrgen/templates/C/pointer/encoder/fixed_length_opaque.j26
-rw-r--r--tools/net/sunrpc/xdrgen/templates/C/pointer/encoder/open.j220
-rw-r--r--tools/net/sunrpc/xdrgen/templates/C/pointer/encoder/optional_data.j26
-rw-r--r--tools/net/sunrpc/xdrgen/templates/C/pointer/encoder/variable_length_array.j215
-rw-r--r--tools/net/sunrpc/xdrgen/templates/C/pointer/encoder/variable_length_opaque.j28
-rw-r--r--tools/net/sunrpc/xdrgen/templates/C/pointer/encoder/variable_length_string.j28
-rw-r--r--tools/net/sunrpc/xdrgen/templates/C/program/declaration/argument.j22
-rw-r--r--tools/net/sunrpc/xdrgen/templates/C/program/declaration/result.j22
-rw-r--r--tools/net/sunrpc/xdrgen/templates/C/program/decoder/argument.j221
-rw-r--r--tools/net/sunrpc/xdrgen/templates/C/program/decoder/result.j222
-rw-r--r--tools/net/sunrpc/xdrgen/templates/C/program/definition/close.j22
-rw-r--r--tools/net/sunrpc/xdrgen/templates/C/program/definition/open.j26
-rw-r--r--tools/net/sunrpc/xdrgen/templates/C/program/definition/procedure.j22
-rw-r--r--tools/net/sunrpc/xdrgen/templates/C/program/encoder/argument.j216
-rw-r--r--tools/net/sunrpc/xdrgen/templates/C/program/encoder/result.j221
-rw-r--r--tools/net/sunrpc/xdrgen/templates/C/source_top/client.j28
-rw-r--r--tools/net/sunrpc/xdrgen/templates/C/source_top/server.j28
-rw-r--r--tools/net/sunrpc/xdrgen/templates/C/struct/declaration/close.j24
-rw-r--r--tools/net/sunrpc/xdrgen/templates/C/struct/decoder/basic.j26
-rw-r--r--tools/net/sunrpc/xdrgen/templates/C/struct/decoder/close.j23
-rw-r--r--tools/net/sunrpc/xdrgen/templates/C/struct/decoder/fixed_length_array.j28
-rw-r--r--tools/net/sunrpc/xdrgen/templates/C/struct/decoder/fixed_length_opaque.j26
-rw-r--r--tools/net/sunrpc/xdrgen/templates/C/struct/decoder/open.j212
-rw-r--r--tools/net/sunrpc/xdrgen/templates/C/struct/decoder/optional_data.j26
-rw-r--r--tools/net/sunrpc/xdrgen/templates/C/struct/decoder/variable_length_array.j213
-rw-r--r--tools/net/sunrpc/xdrgen/templates/C/struct/decoder/variable_length_opaque.j26
-rw-r--r--tools/net/sunrpc/xdrgen/templates/C/struct/decoder/variable_length_string.j26
-rw-r--r--tools/net/sunrpc/xdrgen/templates/C/struct/definition/basic.j25
-rw-r--r--tools/net/sunrpc/xdrgen/templates/C/struct/definition/close.j22
-rw-r--r--tools/net/sunrpc/xdrgen/templates/C/struct/definition/fixed_length_array.j25
-rw-r--r--tools/net/sunrpc/xdrgen/templates/C/struct/definition/fixed_length_opaque.j25
-rw-r--r--tools/net/sunrpc/xdrgen/templates/C/struct/definition/open.j26
-rw-r--r--tools/net/sunrpc/xdrgen/templates/C/struct/definition/optional_data.j25
-rw-r--r--tools/net/sunrpc/xdrgen/templates/C/struct/definition/variable_length_array.j28
-rw-r--r--tools/net/sunrpc/xdrgen/templates/C/struct/definition/variable_length_opaque.j25
-rw-r--r--tools/net/sunrpc/xdrgen/templates/C/struct/definition/variable_length_string.j25
-rw-r--r--tools/net/sunrpc/xdrgen/templates/C/struct/encoder/basic.j210
-rw-r--r--tools/net/sunrpc/xdrgen/templates/C/struct/encoder/close.j23
-rw-r--r--tools/net/sunrpc/xdrgen/templates/C/struct/encoder/fixed_length_array.j212
-rw-r--r--tools/net/sunrpc/xdrgen/templates/C/struct/encoder/fixed_length_opaque.j26
-rw-r--r--tools/net/sunrpc/xdrgen/templates/C/struct/encoder/open.j212
-rw-r--r--tools/net/sunrpc/xdrgen/templates/C/struct/encoder/optional_data.j26
-rw-r--r--tools/net/sunrpc/xdrgen/templates/C/struct/encoder/variable_length_array.j215
-rw-r--r--tools/net/sunrpc/xdrgen/templates/C/struct/encoder/variable_length_opaque.j28
-rw-r--r--tools/net/sunrpc/xdrgen/templates/C/struct/encoder/variable_length_string.j28
-rw-r--r--tools/net/sunrpc/xdrgen/templates/C/typedef/declaration/basic.j28
-rw-r--r--tools/net/sunrpc/xdrgen/templates/C/typedef/declaration/fixed_length_array.j24
-rw-r--r--tools/net/sunrpc/xdrgen/templates/C/typedef/declaration/fixed_length_opaque.j24
-rw-r--r--tools/net/sunrpc/xdrgen/templates/C/typedef/declaration/variable_length_array.j24
-rw-r--r--tools/net/sunrpc/xdrgen/templates/C/typedef/declaration/variable_length_opaque.j24
-rw-r--r--tools/net/sunrpc/xdrgen/templates/C/typedef/declaration/variable_length_string.j24
-rw-r--r--tools/net/sunrpc/xdrgen/templates/C/typedef/decoder/basic.j217
-rw-r--r--tools/net/sunrpc/xdrgen/templates/C/typedef/decoder/fixed_length_array.j225
-rw-r--r--tools/net/sunrpc/xdrgen/templates/C/typedef/decoder/fixed_length_opaque.j217
-rw-r--r--tools/net/sunrpc/xdrgen/templates/C/typedef/decoder/variable_length_array.j226
-rw-r--r--tools/net/sunrpc/xdrgen/templates/C/typedef/decoder/variable_length_opaque.j217
-rw-r--r--tools/net/sunrpc/xdrgen/templates/C/typedef/decoder/variable_length_string.j217
-rw-r--r--tools/net/sunrpc/xdrgen/templates/C/typedef/definition/basic.j26
-rw-r--r--tools/net/sunrpc/xdrgen/templates/C/typedef/definition/fixed_length_array.j26
-rw-r--r--tools/net/sunrpc/xdrgen/templates/C/typedef/definition/fixed_length_opaque.j26
-rw-r--r--tools/net/sunrpc/xdrgen/templates/C/typedef/definition/variable_length_array.j29
-rw-r--r--tools/net/sunrpc/xdrgen/templates/C/typedef/definition/variable_length_opaque.j26
-rw-r--r--tools/net/sunrpc/xdrgen/templates/C/typedef/definition/variable_length_string.j26
-rw-r--r--tools/net/sunrpc/xdrgen/templates/C/typedef/encoder/basic.j221
-rw-r--r--tools/net/sunrpc/xdrgen/templates/C/typedef/encoder/fixed_length_array.j225
-rw-r--r--tools/net/sunrpc/xdrgen/templates/C/typedef/encoder/fixed_length_opaque.j217
-rw-r--r--tools/net/sunrpc/xdrgen/templates/C/typedef/encoder/variable_length_array.j230
-rw-r--r--tools/net/sunrpc/xdrgen/templates/C/typedef/encoder/variable_length_opaque.j217
-rw-r--r--tools/net/sunrpc/xdrgen/templates/C/typedef/encoder/variable_length_string.j217
-rw-r--r--tools/net/sunrpc/xdrgen/templates/C/union/decoder/basic.j26
-rw-r--r--tools/net/sunrpc/xdrgen/templates/C/union/decoder/break.j22
-rw-r--r--tools/net/sunrpc/xdrgen/templates/C/union/decoder/case_spec.j22
-rw-r--r--tools/net/sunrpc/xdrgen/templates/C/union/decoder/close.j24
-rw-r--r--tools/net/sunrpc/xdrgen/templates/C/union/decoder/default_spec.j22
-rw-r--r--tools/net/sunrpc/xdrgen/templates/C/union/decoder/open.j212
-rw-r--r--tools/net/sunrpc/xdrgen/templates/C/union/decoder/optional_data.j26
-rw-r--r--tools/net/sunrpc/xdrgen/templates/C/union/decoder/switch_spec.j27
-rw-r--r--tools/net/sunrpc/xdrgen/templates/C/union/decoder/variable_length_array.j213
-rw-r--r--tools/net/sunrpc/xdrgen/templates/C/union/decoder/variable_length_opaque.j26
-rw-r--r--tools/net/sunrpc/xdrgen/templates/C/union/decoder/variable_length_string.j26
-rw-r--r--tools/net/sunrpc/xdrgen/templates/C/union/decoder/void.j23
-rw-r--r--tools/net/sunrpc/xdrgen/templates/C/union/definition/case_spec.j22
-rw-r--r--tools/net/sunrpc/xdrgen/templates/C/union/definition/close.j28
-rw-r--r--tools/net/sunrpc/xdrgen/templates/C/union/definition/default_spec.j22
-rw-r--r--tools/net/sunrpc/xdrgen/templates/C/union/definition/open.j26
-rw-r--r--tools/net/sunrpc/xdrgen/templates/C/union/definition/switch_spec.j23
-rw-r--r--tools/net/sunrpc/xdrgen/templates/C/union/encoder/basic.j210
-rw-r--r--tools/net/sunrpc/xdrgen/templates/C/union/encoder/break.j22
-rw-r--r--tools/net/sunrpc/xdrgen/templates/C/union/encoder/case_spec.j22
-rw-r--r--tools/net/sunrpc/xdrgen/templates/C/union/encoder/close.j24
-rw-r--r--tools/net/sunrpc/xdrgen/templates/C/union/encoder/default_spec.j22
-rw-r--r--tools/net/sunrpc/xdrgen/templates/C/union/encoder/open.j212
-rw-r--r--tools/net/sunrpc/xdrgen/templates/C/union/encoder/switch_spec.j27
-rw-r--r--tools/net/sunrpc/xdrgen/templates/C/union/encoder/void.j23
-rw-r--r--tools/net/sunrpc/xdrgen/tests/test.x36
-rw-r--r--tools/net/sunrpc/xdrgen/xdr_ast.py510
-rw-r--r--tools/net/sunrpc/xdrgen/xdr_parse.py36
-rwxr-xr-xtools/net/sunrpc/xdrgen/xdrgen132
-rw-r--r--tools/objtool/arch/loongarch/decode.c11
-rw-r--r--tools/objtool/check.c75
-rw-r--r--tools/objtool/include/objtool/elf.h1
-rw-r--r--tools/objtool/noreturns.h2
-rw-r--r--tools/pci/Makefile2
-rw-r--r--tools/pci/pcitest.c2
-rw-r--r--tools/perf/Build1
-rw-r--r--tools/perf/Documentation/perf-annotate.txt3
-rw-r--r--tools/perf/Documentation/perf-check.txt82
-rw-r--r--tools/perf/Documentation/perf-ftrace.txt48
-rw-r--r--tools/perf/Documentation/perf-kvm.txt6
-rw-r--r--tools/perf/Documentation/perf-list.txt1
-rw-r--r--tools/perf/Documentation/perf-mem.txt94
-rw-r--r--tools/perf/Documentation/perf-record.txt14
-rw-r--r--tools/perf/Documentation/perf-report.txt1
-rw-r--r--tools/perf/Documentation/perf-sched.txt9
-rw-r--r--tools/perf/Documentation/perf-script.txt5
-rw-r--r--tools/perf/Documentation/perf-stat.txt8
-rw-r--r--tools/perf/Documentation/perf-top.txt4
-rw-r--r--tools/perf/Documentation/perf-trace.txt4
-rw-r--r--tools/perf/Documentation/topdown.txt30
-rw-r--r--tools/perf/Makefile8
-rw-r--r--tools/perf/Makefile.config51
-rw-r--r--tools/perf/Makefile.perf6
-rw-r--r--tools/perf/arch/arm/util/cs-etm.c12
-rw-r--r--tools/perf/arch/arm/util/pmu.c3
-rw-r--r--tools/perf/arch/arm64/annotate/instructions.c3
-rw-r--r--tools/perf/arch/arm64/util/arm-spe.c108
-rw-r--r--tools/perf/arch/arm64/util/hisi-ptt.c1
-rw-r--r--tools/perf/arch/loongarch/annotate/instructions.c6
-rw-r--r--tools/perf/arch/powerpc/annotate/instructions.c254
-rw-r--r--tools/perf/arch/powerpc/util/dwarf-regs.c53
-rw-r--r--tools/perf/arch/s390/annotate/instructions.c5
-rw-r--r--tools/perf/arch/x86/Makefile6
-rw-r--r--tools/perf/arch/x86/annotate/instructions.c389
-rw-r--r--tools/perf/arch/x86/entry/syscalls/syscall_32.tbl470
-rw-r--r--tools/perf/arch/x86/util/event.c4
-rw-r--r--tools/perf/arch/x86/util/evlist.c6
-rw-r--r--tools/perf/arch/x86/util/intel-bts.c1
-rw-r--r--tools/perf/arch/x86/util/intel-pt.c1
-rw-r--r--tools/perf/bench/synthesize.c2
-rw-r--r--tools/perf/builtin-annotate.c77
-rw-r--r--tools/perf/builtin-buildid-list.c10
-rw-r--r--tools/perf/builtin-c2c.c47
-rw-r--r--tools/perf/builtin-check.c180
-rw-r--r--tools/perf/builtin-daemon.c2
-rw-r--r--tools/perf/builtin-diff.c38
-rw-r--r--tools/perf/builtin-evlist.c18
-rw-r--r--tools/perf/builtin-ftrace.c462
-rw-r--r--tools/perf/builtin-help.c2
-rw-r--r--tools/perf/builtin-inject.c739
-rw-r--r--tools/perf/builtin-kmem.c22
-rw-r--r--tools/perf/builtin-kvm.c22
-rw-r--r--tools/perf/builtin-kwork.c36
-rw-r--r--tools/perf/builtin-list.c2
-rw-r--r--tools/perf/builtin-lock.c44
-rw-r--r--tools/perf/builtin-mem.c161
-rw-r--r--tools/perf/builtin-record.c79
-rw-r--r--tools/perf/builtin-report.c106
-rw-r--r--tools/perf/builtin-sched.c232
-rw-r--r--tools/perf/builtin-script.c184
-rw-r--r--tools/perf/builtin-stat.c42
-rw-r--r--tools/perf/builtin-timechart.c25
-rw-r--r--tools/perf/builtin-top.c10
-rw-r--r--tools/perf/builtin-trace.c497
-rw-r--r--tools/perf/builtin-version.c43
-rw-r--r--tools/perf/builtin.h17
-rwxr-xr-xtools/perf/check-headers.sh1
-rw-r--r--tools/perf/perf.c1
-rw-r--r--tools/perf/pmu-events/Build12
-rw-r--r--tools/perf/pmu-events/arch/arm64/ampere/ampereone/instruction.json3
-rw-r--r--tools/perf/pmu-events/arch/arm64/thead/yitian710/sys/ali_drw.json (renamed from tools/perf/pmu-events/arch/arm64/freescale/yitian710/sys/ali_drw.json)0
-rw-r--r--tools/perf/pmu-events/arch/arm64/thead/yitian710/sys/metrics.json (renamed from tools/perf/pmu-events/arch/arm64/freescale/yitian710/sys/metrics.json)0
-rw-r--r--tools/perf/pmu-events/arch/powerpc/power10/cache.json20
-rw-r--r--tools/perf/pmu-events/arch/powerpc/power10/datasource.json40
-rw-r--r--tools/perf/pmu-events/arch/powerpc/power10/frontend.json30
-rw-r--r--tools/perf/pmu-events/arch/powerpc/power10/locks.json10
-rw-r--r--tools/perf/pmu-events/arch/powerpc/power10/memory.json30
-rw-r--r--tools/perf/pmu-events/arch/powerpc/power10/others.json106
-rw-r--r--tools/perf/pmu-events/arch/powerpc/power10/pipeline.json45
-rw-r--r--tools/perf/pmu-events/arch/powerpc/power10/pmc.json10
-rw-r--r--tools/perf/pmu-events/arch/x86/cascadelakex/uncore-cache.json60
-rw-r--r--tools/perf/pmu-events/arch/x86/meteorlake/metricgroups.json142
-rw-r--r--tools/perf/pmu-events/arch/x86/meteorlake/mtl-metrics.json2535
-rw-r--r--tools/perf/pmu-events/arch/x86/skylakex/uncore-cache.json60
-rw-r--r--tools/perf/pmu-events/arch/x86/snowridgex/uncore-cache.json57
-rw-r--r--tools/perf/pmu-events/empty-pmu-events.c894
-rwxr-xr-xtools/perf/pmu-events/jevents.py27
-rwxr-xr-xtools/perf/pmu-events/models.py73
-rw-r--r--tools/perf/pmu-events/pmu-events.h9
-rwxr-xr-xtools/perf/scripts/python/arm-cs-trace-disasm.py9
-rw-r--r--tools/perf/tests/bp_account.c4
-rw-r--r--tools/perf/tests/bp_signal.c3
-rw-r--r--tools/perf/tests/bp_signal_overflow.c3
-rw-r--r--tools/perf/tests/builtin-test.c1
-rw-r--r--tools/perf/tests/cpumap.c6
-rw-r--r--tools/perf/tests/dlfilter-test.c2
-rw-r--r--tools/perf/tests/dwarf-unwind.c2
-rw-r--r--tools/perf/tests/event_update.c9
-rw-r--r--tools/perf/tests/make4
-rw-r--r--tools/perf/tests/parse-events.c6
-rw-r--r--tools/perf/tests/pmu-events.c12
-rw-r--r--tools/perf/tests/pmu.c9
-rwxr-xr-xtools/perf/tests/shell/annotate.sh3
-rw-r--r--tools/perf/tests/shell/base_probe/settings.sh48
-rwxr-xr-xtools/perf/tests/shell/base_probe/test_adding_blacklisted.sh67
-rwxr-xr-xtools/perf/tests/shell/base_probe/test_adding_kernel.sh3
-rwxr-xr-xtools/perf/tests/shell/base_probe/test_basic.sh80
-rwxr-xr-xtools/perf/tests/shell/base_probe/test_invalid_options.sh79
-rwxr-xr-xtools/perf/tests/shell/base_probe/test_line_semantics.sh55
-rwxr-xr-xtools/perf/tests/shell/base_report/setup.sh32
-rw-r--r--tools/perf/tests/shell/base_report/stderr-whitelist.txt5
-rwxr-xr-xtools/perf/tests/shell/base_report/test_basic.sh190
-rwxr-xr-xtools/perf/tests/shell/common/check_errors_whitelisted.pl51
-rw-r--r--tools/perf/tests/shell/common/init.sh31
-rw-r--r--tools/perf/tests/shell/common/settings.sh28
-rwxr-xr-xtools/perf/tests/shell/ftrace.sh89
-rw-r--r--tools/perf/tests/shell/lib/perf_metric_validation.py10
-rw-r--r--tools/perf/tests/shell/lib/probe_vfs_getname.sh11
-rwxr-xr-xtools/perf/tests/shell/perftool-testsuite_report.sh23
-rwxr-xr-xtools/perf/tests/shell/pipe_test.sh129
-rwxr-xr-xtools/perf/tests/shell/record+probe_libc_inet_pton.sh5
-rwxr-xr-xtools/perf/tests/shell/record+script_probe_vfs_getname.sh5
-rwxr-xr-xtools/perf/tests/shell/record.sh59
-rwxr-xr-xtools/perf/tests/shell/record_bpf_filter.sh86
-rwxr-xr-xtools/perf/tests/shell/record_lbr.sh161
-rwxr-xr-xtools/perf/tests/shell/script.sh3
-rwxr-xr-xtools/perf/tests/shell/test_stat_intel_tpebs.sh19
-rwxr-xr-xtools/perf/tests/shell/test_task_analyzer.sh7
-rwxr-xr-xtools/perf/tests/shell/test_uprobe_from_different_cu.sh7
-rwxr-xr-xtools/perf/tests/shell/trace_btf_enum.sh62
-rw-r--r--tools/perf/tests/stat.c6
-rw-r--r--tools/perf/tests/tests-scripts.c37
-rw-r--r--tools/perf/tests/tests.h1
-rw-r--r--tools/perf/tests/thread-map.c2
-rw-r--r--tools/perf/tests/vmlinux-kallsyms.c4
-rw-r--r--tools/perf/tests/workloads/Build1
-rw-r--r--tools/perf/tests/workloads/landlock.c66
-rw-r--r--tools/perf/tests/wp.c5
-rw-r--r--tools/perf/trace/beauty/beauty.h11
-rw-r--r--tools/perf/trace/beauty/perf_event_open.c6
-rw-r--r--tools/perf/trace/beauty/sockaddr.c2
-rw-r--r--tools/perf/trace/beauty/timespec.c2
-rw-r--r--tools/perf/ui/browsers/annotate-data.c376
-rw-r--r--tools/perf/ui/browsers/annotate.c20
-rw-r--r--tools/perf/ui/browsers/hists.c18
-rw-r--r--tools/perf/ui/hist.c10
-rw-r--r--tools/perf/ui/stdio/hist.c4
-rw-r--r--tools/perf/util/Build10
-rw-r--r--tools/perf/util/annotate-data.c1164
-rw-r--r--tools/perf/util/annotate-data.h86
-rw-r--r--tools/perf/util/annotate.c360
-rw-r--r--tools/perf/util/annotate.h33
-rw-r--r--tools/perf/util/arm-spe.c55
-rw-r--r--tools/perf/util/auxtrace.c16
-rw-r--r--tools/perf/util/auxtrace.h21
-rw-r--r--tools/perf/util/block-info.c66
-rw-r--r--tools/perf/util/block-info.h8
-rw-r--r--tools/perf/util/bpf-event.c4
-rw-r--r--tools/perf/util/bpf-filter.c631
-rw-r--r--tools/perf/util/bpf-filter.h19
-rw-r--r--tools/perf/util/bpf-filter.l28
-rw-r--r--tools/perf/util/bpf-filter.y28
-rw-r--r--tools/perf/util/bpf_counter_cgroup.c6
-rw-r--r--tools/perf/util/bpf_ftrace.c8
-rw-r--r--tools/perf/util/bpf_kwork.c9
-rw-r--r--tools/perf/util/bpf_kwork_top.c7
-rw-r--r--tools/perf/util/bpf_lock_contention.c45
-rw-r--r--tools/perf/util/bpf_map.c3
-rw-r--r--tools/perf/util/bpf_off_cpu.c16
-rw-r--r--tools/perf/util/bpf_skel/augmented_raw_syscalls.bpf.c222
-rw-r--r--tools/perf/util/bpf_skel/bperf_cgroup.bpf.c2
-rw-r--r--tools/perf/util/bpf_skel/func_latency.bpf.c7
-rw-r--r--tools/perf/util/bpf_skel/kwork_top.bpf.c2
-rw-r--r--tools/perf/util/bpf_skel/kwork_trace.bpf.c5
-rw-r--r--tools/perf/util/bpf_skel/lock_contention.bpf.c53
-rw-r--r--tools/perf/util/bpf_skel/lock_data.h4
-rw-r--r--tools/perf/util/bpf_skel/off_cpu.bpf.c9
-rw-r--r--tools/perf/util/bpf_skel/sample-filter.h13
-rw-r--r--tools/perf/util/bpf_skel/sample_filter.bpf.c105
-rw-r--r--tools/perf/util/bpf_skel/vmlinux/vmlinux.h7
-rw-r--r--tools/perf/util/branch.h1
-rw-r--r--tools/perf/util/build-id.c40
-rw-r--r--tools/perf/util/build-id.h8
-rw-r--r--tools/perf/util/callchain.c35
-rw-r--r--tools/perf/util/callchain.h6
-rw-r--r--tools/perf/util/cap.c63
-rw-r--r--tools/perf/util/cap.h23
-rw-r--r--tools/perf/util/cs-etm-decoder/cs-etm-decoder.c36
-rw-r--r--tools/perf/util/cs-etm-decoder/cs-etm-decoder.h2
-rw-r--r--tools/perf/util/cs-etm.c675
-rw-r--r--tools/perf/util/cs-etm.h12
-rw-r--r--tools/perf/util/data-convert-bt.c34
-rw-r--r--tools/perf/util/data-convert-json.c47
-rw-r--r--tools/perf/util/data.c7
-rw-r--r--tools/perf/util/debuginfo.h2
-rw-r--r--tools/perf/util/disasm.c852
-rw-r--r--tools/perf/util/disasm.h19
-rw-r--r--tools/perf/util/disasm_bpf.c195
-rw-r--r--tools/perf/util/disasm_bpf.h12
-rw-r--r--tools/perf/util/dso.c4
-rw-r--r--tools/perf/util/dso.h4
-rw-r--r--tools/perf/util/dsos.c12
-rw-r--r--tools/perf/util/dsos.h2
-rw-r--r--tools/perf/util/dump-insn.c2
-rw-r--r--tools/perf/util/dump-insn.h2
-rw-r--r--tools/perf/util/dwarf-aux.c18
-rw-r--r--tools/perf/util/dwarf-aux.h2
-rw-r--r--tools/perf/util/env.c15
-rw-r--r--tools/perf/util/env.h3
-rw-r--r--tools/perf/util/event.c54
-rw-r--r--tools/perf/util/event.h38
-rw-r--r--tools/perf/util/events_stats.h15
-rw-r--r--tools/perf/util/evlist.c89
-rw-r--r--tools/perf/util/evlist.h7
-rw-r--r--tools/perf/util/evsel.c122
-rw-r--r--tools/perf/util/evsel.h27
-rw-r--r--tools/perf/util/evsel_fprintf.c2
-rw-r--r--tools/perf/util/ftrace.h3
-rw-r--r--tools/perf/util/header.c157
-rw-r--r--tools/perf/util/header.h25
-rw-r--r--tools/perf/util/hisi-ptt.c6
-rw-r--r--tools/perf/util/hist.c63
-rw-r--r--tools/perf/util/hist.h4
-rw-r--r--tools/perf/util/include/dwarf-regs.h11
-rw-r--r--tools/perf/util/intel-bts.c37
-rw-r--r--tools/perf/util/intel-pt-decoder/intel-pt-insn-decoder.c5
-rw-r--r--tools/perf/util/intel-pt.c30
-rw-r--r--tools/perf/util/intel-tpebs.c432
-rw-r--r--tools/perf/util/intel-tpebs.h35
-rw-r--r--tools/perf/util/jit.h3
-rw-r--r--tools/perf/util/jitdump.c10
-rw-r--r--tools/perf/util/llvm-c-helpers.cpp197
-rw-r--r--tools/perf/util/llvm-c-helpers.h60
-rw-r--r--tools/perf/util/machine.c120
-rw-r--r--tools/perf/util/machine.h36
-rw-r--r--tools/perf/util/map.c25
-rw-r--r--tools/perf/util/map.h22
-rw-r--r--tools/perf/util/map_symbol.c18
-rw-r--r--tools/perf/util/map_symbol.h3
-rw-r--r--tools/perf/util/mem-events.c20
-rw-r--r--tools/perf/util/mem-events.h4
-rw-r--r--tools/perf/util/mem-info.c13
-rw-r--r--tools/perf/util/mem-info.h1
-rw-r--r--tools/perf/util/metricgroup.c10
-rw-r--r--tools/perf/util/mmap.c4
-rw-r--r--tools/perf/util/parse-events.c69
-rw-r--r--tools/perf/util/parse-events.h11
-rw-r--r--tools/perf/util/parse-events.l3
-rw-r--r--tools/perf/util/pmu.c75
-rw-r--r--tools/perf/util/pmu.h8
-rw-r--r--tools/perf/util/pmus.c22
-rw-r--r--tools/perf/util/pmus.h1
-rw-r--r--tools/perf/util/print-events.c3
-rw-r--r--tools/perf/util/print_insn.c14
-rw-r--r--tools/perf/util/s390-cpumsf.c11
-rw-r--r--tools/perf/util/scripting-engines/trace-event-python.c16
-rw-r--r--tools/perf/util/session.c394
-rw-r--r--tools/perf/util/session.h61
-rw-r--r--tools/perf/util/setup.py4
-rw-r--r--tools/perf/util/sort.c66
-rw-r--r--tools/perf/util/sort.h3
-rw-r--r--tools/perf/util/srcline.c59
-rw-r--r--tools/perf/util/stat-display.c3
-rw-r--r--tools/perf/util/stat-shadow.c2
-rw-r--r--tools/perf/util/symbol.c8
-rw-r--r--tools/perf/util/symbol_conf.h2
-rw-r--r--tools/perf/util/synthetic-events.c181
-rw-r--r--tools/perf/util/synthetic-events.h89
-rw-r--r--tools/perf/util/syscalltbl.c4
-rw-r--r--tools/perf/util/thread.c4
-rw-r--r--tools/perf/util/thread.h1
-rw-r--r--tools/perf/util/time-utils.c4
-rw-r--r--tools/perf/util/tool.c294
-rw-r--r--tools/perf/util/tool.h19
-rw-r--r--tools/perf/util/trace_augment.h6
-rw-r--r--tools/perf/util/tsc.c2
-rw-r--r--tools/perf/util/util.c12
-rw-r--r--tools/power/cpupower/bindings/python/.gitignore3
-rw-r--r--tools/power/cpupower/bindings/python/Makefile4
-rw-r--r--tools/power/cpupower/bindings/python/raw_pylibcpupower.swg (renamed from tools/power/cpupower/bindings/python/raw_pylibcpupower.i)0
-rw-r--r--tools/sched_ext/.gitignore2
-rw-r--r--tools/sched_ext/Makefile246
-rw-r--r--tools/sched_ext/README.md270
-rw-r--r--tools/sched_ext/include/bpf-compat/gnu/stubs.h11
-rw-r--r--tools/sched_ext/include/scx/common.bpf.h427
-rw-r--r--tools/sched_ext/include/scx/common.h75
-rw-r--r--tools/sched_ext/include/scx/compat.bpf.h47
-rw-r--r--tools/sched_ext/include/scx/compat.h186
-rw-r--r--tools/sched_ext/include/scx/user_exit_info.h115
-rw-r--r--tools/sched_ext/scx_central.bpf.c361
-rw-r--r--tools/sched_ext/scx_central.c135
-rw-r--r--tools/sched_ext/scx_flatcg.bpf.c957
-rw-r--r--tools/sched_ext/scx_flatcg.c233
-rw-r--r--tools/sched_ext/scx_flatcg.h51
-rw-r--r--tools/sched_ext/scx_qmap.bpf.c827
-rw-r--r--tools/sched_ext/scx_qmap.c153
-rw-r--r--tools/sched_ext/scx_show_state.py40
-rw-r--r--tools/sched_ext/scx_simple.bpf.c156
-rw-r--r--tools/sched_ext/scx_simple.c107
-rw-r--r--tools/testing/cxl/Kbuild2
-rw-r--r--tools/testing/cxl/mock_acpi.c2
-rw-r--r--tools/testing/cxl/test/mem.c44
-rw-r--r--tools/testing/cxl/test/mock.c10
-rwxr-xr-x[-rw-r--r--]tools/testing/fault-injection/failcmd.sh12
-rwxr-xr-xtools/testing/ktest/ktest.pl28
-rw-r--r--tools/testing/memblock/Makefile2
-rw-r--r--tools/testing/memblock/internal.h2
-rw-r--r--tools/testing/memblock/linux/kernel.h2
-rw-r--r--tools/testing/memblock/linux/mmzone.h1
-rw-r--r--tools/testing/radix-tree/.gitignore1
-rw-r--r--tools/testing/radix-tree/Makefile72
-rw-r--r--tools/testing/radix-tree/linux/init.h2
-rw-r--r--tools/testing/radix-tree/maple.c111
-rw-r--r--tools/testing/radix-tree/xarray.c10
-rw-r--r--tools/testing/selftests/bpf/.gitignore6
-rw-r--r--tools/testing/selftests/bpf/DENYLIST.riscv643
-rw-r--r--tools/testing/selftests/bpf/Makefile151
-rw-r--r--tools/testing/selftests/bpf/README.rst32
-rw-r--r--tools/testing/selftests/bpf/bench.c13
-rw-r--r--tools/testing/selftests/bpf/bench.h1
-rw-r--r--tools/testing/selftests/bpf/benchs/bench_trigger.c83
-rw-r--r--tools/testing/selftests/bpf/bpf_experimental.h26
-rw-r--r--tools/testing/selftests/bpf/bpf_kfuncs.h11
-rw-r--r--tools/testing/selftests/bpf/bpf_testmod/bpf_testmod.c257
-rw-r--r--tools/testing/selftests/bpf/bpf_testmod/bpf_testmod.h12
-rw-r--r--tools/testing/selftests/bpf/bpf_testmod/bpf_testmod_kfunc.h15
-rw-r--r--tools/testing/selftests/bpf/cgroup_helpers.c2
-rw-r--r--tools/testing/selftests/bpf/config.riscv6484
-rw-r--r--tools/testing/selftests/bpf/disasm_helpers.c69
-rw-r--r--tools/testing/selftests/bpf/disasm_helpers.h12
-rw-r--r--tools/testing/selftests/bpf/get_cgroup_id_user.c151
-rw-r--r--tools/testing/selftests/bpf/jit_disasm_helpers.c245
-rw-r--r--tools/testing/selftests/bpf/jit_disasm_helpers.h10
-rw-r--r--tools/testing/selftests/bpf/map_tests/htab_map_batch_ops.c2
-rw-r--r--tools/testing/selftests/bpf/map_tests/lpm_trie_map_batch_ops.c2
-rw-r--r--tools/testing/selftests/bpf/map_tests/map_percpu_stats.c18
-rw-r--r--tools/testing/selftests/bpf/map_tests/sk_storage_map.c2
-rw-r--r--tools/testing/selftests/bpf/network_helpers.c602
-rw-r--r--tools/testing/selftests/bpf/network_helpers.h25
-rw-r--r--tools/testing/selftests/bpf/prog_tests/attach_probe.c8
-rw-r--r--tools/testing/selftests/bpf/prog_tests/bpf_iter.c4
-rw-r--r--tools/testing/selftests/bpf/prog_tests/bpf_iter_setsockopt.c2
-rw-r--r--tools/testing/selftests/bpf/prog_tests/bpf_tcp_ca.c4
-rw-r--r--tools/testing/selftests/bpf/prog_tests/btf.c6
-rw-r--r--tools/testing/selftests/bpf/prog_tests/btf_distill.c68
-rw-r--r--tools/testing/selftests/bpf/prog_tests/btf_dump.c4
-rw-r--r--tools/testing/selftests/bpf/prog_tests/build_id.c118
-rw-r--r--tools/testing/selftests/bpf/prog_tests/cg_storage_multi.c2
-rw-r--r--tools/testing/selftests/bpf/prog_tests/cgroup_ancestor.c141
-rw-r--r--tools/testing/selftests/bpf/prog_tests/cgroup_dev.c125
-rw-r--r--tools/testing/selftests/bpf/prog_tests/cgroup_get_current_cgroup_id.c46
-rw-r--r--tools/testing/selftests/bpf/prog_tests/cgroup_storage.c96
-rw-r--r--tools/testing/selftests/bpf/prog_tests/cgroup_v1v2.c16
-rw-r--r--tools/testing/selftests/bpf/prog_tests/core_reloc.c1
-rw-r--r--tools/testing/selftests/bpf/prog_tests/core_reloc_raw.c125
-rw-r--r--tools/testing/selftests/bpf/prog_tests/crypto_sanity.c1
-rw-r--r--tools/testing/selftests/bpf/prog_tests/ctx_rewrite.c74
-rw-r--r--tools/testing/selftests/bpf/prog_tests/decap_sanity.c1
-rw-r--r--tools/testing/selftests/bpf/prog_tests/fexit_stress.c3
-rw-r--r--tools/testing/selftests/bpf/prog_tests/flow_dissector.c2
-rw-r--r--tools/testing/selftests/bpf/prog_tests/fs_kfuncs.c9
-rw-r--r--tools/testing/selftests/bpf/prog_tests/iters.c5
-rw-r--r--tools/testing/selftests/bpf/prog_tests/kfree_skb.c1
-rw-r--r--tools/testing/selftests/bpf/prog_tests/kfunc_call.c1
-rw-r--r--tools/testing/selftests/bpf/prog_tests/log_buf.c9
-rw-r--r--tools/testing/selftests/bpf/prog_tests/lwt_redirect.c1
-rw-r--r--tools/testing/selftests/bpf/prog_tests/lwt_reroute.c1
-rw-r--r--tools/testing/selftests/bpf/prog_tests/module_fentry_shadow.c3
-rw-r--r--tools/testing/selftests/bpf/prog_tests/nested_trust.c4
-rw-r--r--tools/testing/selftests/bpf/prog_tests/ns_current_pid_tgid.c2
-rw-r--r--tools/testing/selftests/bpf/prog_tests/parse_tcp_hdr_opt.c1
-rw-r--r--tools/testing/selftests/bpf/prog_tests/pro_epilogue.c60
-rw-r--r--tools/testing/selftests/bpf/prog_tests/raw_tp_writable_reject_nbd_invalid.c3
-rw-r--r--tools/testing/selftests/bpf/prog_tests/raw_tp_writable_test_run.c5
-rw-r--r--tools/testing/selftests/bpf/prog_tests/read_vsyscall.c1
-rw-r--r--tools/testing/selftests/bpf/prog_tests/reg_bounds.c32
-rw-r--r--tools/testing/selftests/bpf/prog_tests/resolve_btfids.c2
-rw-r--r--tools/testing/selftests/bpf/prog_tests/select_reuseport.c37
-rw-r--r--tools/testing/selftests/bpf/prog_tests/sk_lookup.c111
-rw-r--r--tools/testing/selftests/bpf/prog_tests/sock_addr.c1
-rw-r--r--tools/testing/selftests/bpf/prog_tests/sockmap_listen.c8
-rw-r--r--tools/testing/selftests/bpf/prog_tests/tailcalls.c385
-rw-r--r--tools/testing/selftests/bpf/prog_tests/tc_opts.c2
-rw-r--r--tools/testing/selftests/bpf/prog_tests/tc_redirect.c43
-rw-r--r--tools/testing/selftests/bpf/prog_tests/tcp_rtt.c1
-rw-r--r--tools/testing/selftests/bpf/prog_tests/test_bpf_syscall_macro.c4
-rw-r--r--tools/testing/selftests/bpf/prog_tests/test_bprm_opts.c2
-rw-r--r--tools/testing/selftests/bpf/prog_tests/test_lsm.c46
-rw-r--r--tools/testing/selftests/bpf/prog_tests/test_mmap_inner_array.c57
-rw-r--r--tools/testing/selftests/bpf/prog_tests/test_strncmp.c2
-rw-r--r--tools/testing/selftests/bpf/prog_tests/test_struct_ops_module.c2
-rw-r--r--tools/testing/selftests/bpf/prog_tests/test_xdp_veth.c213
-rw-r--r--tools/testing/selftests/bpf/prog_tests/token.c4
-rw-r--r--tools/testing/selftests/bpf/prog_tests/unpriv_bpf_disabled.c3
-rw-r--r--tools/testing/selftests/bpf/prog_tests/uprobe_multi_test.c529
-rw-r--r--tools/testing/selftests/bpf/prog_tests/user_ringbuf.c3
-rw-r--r--tools/testing/selftests/bpf/prog_tests/verifier.c14
-rw-r--r--tools/testing/selftests/bpf/progs/arena_atomics.c32
-rw-r--r--tools/testing/selftests/bpf/progs/bpf_cubic.c6
-rw-r--r--tools/testing/selftests/bpf/progs/bpf_dctcp.c8
-rw-r--r--tools/testing/selftests/bpf/progs/bpf_misc.h64
-rw-r--r--tools/testing/selftests/bpf/progs/bpf_syscall_macro.c2
-rw-r--r--tools/testing/selftests/bpf/progs/cg_storage_multi.h2
-rw-r--r--tools/testing/selftests/bpf/progs/cgroup_ancestor.c40
-rw-r--r--tools/testing/selftests/bpf/progs/cgroup_storage.c24
-rw-r--r--tools/testing/selftests/bpf/progs/dev_cgroup.c4
-rw-r--r--tools/testing/selftests/bpf/progs/dynptr_fail.c6
-rw-r--r--tools/testing/selftests/bpf/progs/epilogue_exit.c82
-rw-r--r--tools/testing/selftests/bpf/progs/epilogue_tailcall.c58
-rw-r--r--tools/testing/selftests/bpf/progs/err.h10
-rw-r--r--tools/testing/selftests/bpf/progs/get_cgroup_id_kern.c26
-rw-r--r--tools/testing/selftests/bpf/progs/iters_testmod.c125
-rw-r--r--tools/testing/selftests/bpf/progs/iters_testmod_seq.c50
-rw-r--r--tools/testing/selftests/bpf/progs/kfunc_call_fail.c7
-rw-r--r--tools/testing/selftests/bpf/progs/local_kptr_stash.c30
-rw-r--r--tools/testing/selftests/bpf/progs/lsm_tailcall.c34
-rw-r--r--tools/testing/selftests/bpf/progs/mmap_inner_array.c57
-rw-r--r--tools/testing/selftests/bpf/progs/nested_acquire.c33
-rw-r--r--tools/testing/selftests/bpf/progs/pro_epilogue.c154
-rw-r--r--tools/testing/selftests/bpf/progs/pro_epilogue_goto_start.c149
-rw-r--r--tools/testing/selftests/bpf/progs/rbtree_fail.c2
-rw-r--r--tools/testing/selftests/bpf/progs/read_vsyscall.c9
-rw-r--r--tools/testing/selftests/bpf/progs/refcounted_kptr_fail.c4
-rw-r--r--tools/testing/selftests/bpf/progs/strobemeta.h4
-rw-r--r--tools/testing/selftests/bpf/progs/syscall.c3
-rw-r--r--tools/testing/selftests/bpf/progs/tailcall_bpf2bpf_hierarchy1.c34
-rw-r--r--tools/testing/selftests/bpf/progs/tailcall_bpf2bpf_hierarchy2.c70
-rw-r--r--tools/testing/selftests/bpf/progs/tailcall_bpf2bpf_hierarchy3.c62
-rw-r--r--tools/testing/selftests/bpf/progs/tailcall_bpf2bpf_hierarchy_fentry.c35
-rw-r--r--tools/testing/selftests/bpf/progs/tailcall_freplace.c23
-rw-r--r--tools/testing/selftests/bpf/progs/task_kfunc_success.c56
-rw-r--r--tools/testing/selftests/bpf/progs/tc_bpf2bpf.c22
-rw-r--r--tools/testing/selftests/bpf/progs/tc_dummy.c12
-rw-r--r--tools/testing/selftests/bpf/progs/test_attach_probe.c64
-rw-r--r--tools/testing/selftests/bpf/progs/test_build_id.c31
-rw-r--r--tools/testing/selftests/bpf/progs/test_cls_redirect_dynptr.c2
-rw-r--r--tools/testing/selftests/bpf/progs/test_core_read_macros.c2
-rw-r--r--tools/testing/selftests/bpf/progs/test_get_xattr.c37
-rw-r--r--tools/testing/selftests/bpf/progs/test_global_func15.c2
-rw-r--r--tools/testing/selftests/bpf/progs/test_global_map_resize.c18
-rw-r--r--tools/testing/selftests/bpf/progs/test_libbpf_get_fd_by_id_opts.c1
-rw-r--r--tools/testing/selftests/bpf/progs/test_rdonly_maps.c3
-rw-r--r--tools/testing/selftests/bpf/progs/test_sig_in_xattr.c4
-rw-r--r--tools/testing/selftests/bpf/progs/test_skb_cgroup_id_kern.c45
-rw-r--r--tools/testing/selftests/bpf/progs/test_tunnel_kern.c27
-rw-r--r--tools/testing/selftests/bpf/progs/test_verify_pkcs7_sig.c8
-rw-r--r--tools/testing/selftests/bpf/progs/token_lsm.c4
-rw-r--r--tools/testing/selftests/bpf/progs/trigger_bench.c7
-rw-r--r--tools/testing/selftests/bpf/progs/unsupported_ops.c22
-rw-r--r--tools/testing/selftests/bpf/progs/uprobe_multi_consumers.c39
-rw-r--r--tools/testing/selftests/bpf/progs/uprobe_multi_pid_filter.c40
-rw-r--r--tools/testing/selftests/bpf/progs/verifier_bits_iter.c2
-rw-r--r--tools/testing/selftests/bpf/progs/verifier_bpf_fastcall.c900
-rw-r--r--tools/testing/selftests/bpf/progs/verifier_const.c69
-rw-r--r--tools/testing/selftests/bpf/progs/verifier_global_subprogs.c7
-rw-r--r--tools/testing/selftests/bpf/progs/verifier_int_ptr.c15
-rw-r--r--tools/testing/selftests/bpf/progs/verifier_jit_convergence.c114
-rw-r--r--tools/testing/selftests/bpf/progs/verifier_kfunc_prog_types.c48
-rw-r--r--tools/testing/selftests/bpf/progs/verifier_ldsx.c112
-rw-r--r--tools/testing/selftests/bpf/progs/verifier_lsm.c162
-rw-r--r--tools/testing/selftests/bpf/progs/verifier_scalar_ids.c256
-rw-r--r--tools/testing/selftests/bpf/progs/verifier_sdiv.c439
-rw-r--r--tools/testing/selftests/bpf/progs/verifier_spill_fill.c24
-rw-r--r--tools/testing/selftests/bpf/progs/verifier_subprog_precision.c2
-rw-r--r--tools/testing/selftests/bpf/progs/verifier_tailcall_jit.c105
-rw-r--r--tools/testing/selftests/bpf/progs/verifier_vfs_accept.c85
-rw-r--r--tools/testing/selftests/bpf/progs/verifier_vfs_reject.c161
-rw-r--r--tools/testing/selftests/bpf/progs/xdp_redirect_map.c6
-rw-r--r--tools/testing/selftests/bpf/test_cgroup_storage.c174
-rw-r--r--tools/testing/selftests/bpf/test_cpp.cpp4
-rw-r--r--tools/testing/selftests/bpf/test_dev_cgroup.c85
-rw-r--r--tools/testing/selftests/bpf/test_loader.c496
-rw-r--r--tools/testing/selftests/bpf/test_lru_map.c3
-rw-r--r--tools/testing/selftests/bpf/test_maps.c2
-rw-r--r--tools/testing/selftests/bpf/test_progs.c263
-rw-r--r--tools/testing/selftests/bpf/test_progs.h17
-rwxr-xr-xtools/testing/selftests/bpf/test_skb_cgroup_id.sh63
-rw-r--r--tools/testing/selftests/bpf/test_skb_cgroup_id_user.c183
-rwxr-xr-xtools/testing/selftests/bpf/test_xdp_veth.sh121
-rw-r--r--tools/testing/selftests/bpf/testing_helpers.c7
-rw-r--r--tools/testing/selftests/bpf/trace_helpers.c104
-rw-r--r--tools/testing/selftests/bpf/unpriv_helpers.c1
-rw-r--r--tools/testing/selftests/bpf/uprobe_multi.c41
-rw-r--r--tools/testing/selftests/bpf/uprobe_multi.ld11
-rw-r--r--tools/testing/selftests/bpf/verifier/calls.c2
-rw-r--r--tools/testing/selftests/bpf/verifier/map_kptr.c2
-rw-r--r--tools/testing/selftests/bpf/verifier/precise.c28
-rw-r--r--tools/testing/selftests/bpf/veristat.c16
-rwxr-xr-xtools/testing/selftests/bpf/vmtest.sh107
-rw-r--r--tools/testing/selftests/bpf/xskxceiver.c1
-rw-r--r--tools/testing/selftests/cgroup/cgroup_util.c22
-rw-r--r--tools/testing/selftests/cgroup/cgroup_util.h2
-rw-r--r--tools/testing/selftests/cgroup/test_memcontrol.c264
-rw-r--r--tools/testing/selftests/cgroup/test_zswap.c75
-rw-r--r--tools/testing/selftests/damon/.gitignore1
-rw-r--r--tools/testing/selftests/damon/Makefile2
-rwxr-xr-x[-rw-r--r--]tools/testing/selftests/damon/damon_nr_regions.py0
-rwxr-xr-x[-rw-r--r--]tools/testing/selftests/damon/damos_apply_interval.py0
-rwxr-xr-x[-rw-r--r--]tools/testing/selftests/damon/damos_quota.py0
-rwxr-xr-x[-rw-r--r--]tools/testing/selftests/damon/damos_quota_goal.py0
-rwxr-xr-x[-rw-r--r--]tools/testing/selftests/damon/damos_tried_regions.py0
-rwxr-xr-x[-rw-r--r--]tools/testing/selftests/damon/debugfs_target_ids_pid_leak.sh0
-rwxr-xr-x[-rw-r--r--]tools/testing/selftests/damon/debugfs_target_ids_read_before_terminate_race.sh0
-rwxr-xr-x[-rw-r--r--]tools/testing/selftests/damon/sysfs_update_schemes_tried_regions_hang.py0
-rwxr-xr-x[-rw-r--r--]tools/testing/selftests/damon/sysfs_update_schemes_tried_regions_wss_estimation.py0
-rw-r--r--tools/testing/selftests/filesystems/binderfs/binderfs_test.c1
-rw-r--r--tools/testing/selftests/ftrace/config1
-rw-r--r--tools/testing/selftests/ftrace/test.d/dynevent/add_remove_tprobe_module.tc61
-rw-r--r--tools/testing/selftests/ftrace/test.d/dynevent/tprobe_syntax_errors.tc1
-rw-r--r--tools/testing/selftests/kvm/.gitignore4
-rw-r--r--tools/testing/selftests/kvm/Makefile4
-rw-r--r--tools/testing/selftests/kvm/coalesced_io_test.c236
-rw-r--r--tools/testing/selftests/kvm/guest_print_test.c19
-rw-r--r--tools/testing/selftests/kvm/include/kvm_util.h28
-rw-r--r--tools/testing/selftests/kvm/include/s390x/debug_print.h69
-rw-r--r--tools/testing/selftests/kvm/include/s390x/processor.h5
-rw-r--r--tools/testing/selftests/kvm/include/s390x/sie.h240
-rw-r--r--tools/testing/selftests/kvm/include/x86_64/apic.h21
-rw-r--r--tools/testing/selftests/kvm/include/x86_64/hyperv.h18
-rw-r--r--tools/testing/selftests/kvm/include/x86_64/processor.h7
-rw-r--r--tools/testing/selftests/kvm/lib/kvm_util.c85
-rw-r--r--tools/testing/selftests/kvm/lib/s390x/processor.c10
-rw-r--r--tools/testing/selftests/kvm/lib/x86_64/hyperv.c67
-rw-r--r--tools/testing/selftests/kvm/lib/x86_64/processor.c69
-rw-r--r--tools/testing/selftests/kvm/memslot_modification_stress_test.c19
-rw-r--r--tools/testing/selftests/kvm/memslot_perf_test.c12
-rw-r--r--tools/testing/selftests/kvm/s390x/cmma_test.c7
-rw-r--r--tools/testing/selftests/kvm/s390x/config2
-rw-r--r--tools/testing/selftests/kvm/s390x/debug_test.c4
-rw-r--r--tools/testing/selftests/kvm/s390x/memop.c4
-rw-r--r--tools/testing/selftests/kvm/s390x/tprot.c5
-rw-r--r--tools/testing/selftests/kvm/s390x/ucontrol_test.c332
-rw-r--r--tools/testing/selftests/kvm/set_memory_region_test.c29
-rw-r--r--tools/testing/selftests/kvm/x86_64/debug_regs.c11
-rw-r--r--tools/testing/selftests/kvm/x86_64/hyperv_evmcs.c2
-rw-r--r--tools/testing/selftests/kvm/x86_64/hyperv_svm_test.c2
-rw-r--r--tools/testing/selftests/kvm/x86_64/sev_smoke_test.c32
-rw-r--r--tools/testing/selftests/kvm/x86_64/xapic_state_test.c54
-rw-r--r--tools/testing/selftests/kvm/x86_64/xen_vmcall_test.c1
-rw-r--r--tools/testing/selftests/landlock/base_test.c2
-rw-r--r--tools/testing/selftests/landlock/common.h39
-rw-r--r--tools/testing/selftests/landlock/fs_test.c1
-rw-r--r--tools/testing/selftests/landlock/net_test.c31
-rw-r--r--tools/testing/selftests/landlock/scoped_abstract_unix_test.c1041
-rw-r--r--tools/testing/selftests/landlock/scoped_base_variants.h156
-rw-r--r--tools/testing/selftests/landlock/scoped_common.h28
-rw-r--r--tools/testing/selftests/landlock/scoped_multiple_domain_variants.h152
-rw-r--r--tools/testing/selftests/landlock/scoped_signal_test.c484
-rw-r--r--tools/testing/selftests/landlock/scoped_test.c33
-rw-r--r--tools/testing/selftests/mm/Makefile2
-rwxr-xr-xtools/testing/selftests/mm/charge_reserved_hugetlb.sh2
-rw-r--r--tools/testing/selftests/mm/hugepage-mmap.c18
-rw-r--r--tools/testing/selftests/mm/hugepage-shm.c18
-rw-r--r--tools/testing/selftests/mm/hugepage-vmemmap.c17
-rw-r--r--tools/testing/selftests/mm/khugepaged.c4
-rw-r--r--tools/testing/selftests/mm/map_hugetlb.c18
-rw-r--r--tools/testing/selftests/mm/migration.c17
-rw-r--r--tools/testing/selftests/mm/mseal_test.c197
-rw-r--r--tools/testing/selftests/mm/pagemap_ioctl.c2
-rwxr-xr-xtools/testing/selftests/mm/run_vmtests.sh2
-rw-r--r--tools/testing/selftests/mm/split_huge_page_test.c71
-rw-r--r--tools/testing/selftests/mm/thp_settings.c46
-rw-r--r--tools/testing/selftests/mm/thp_settings.h9
-rw-r--r--tools/testing/selftests/mm/vm_util.c22
-rw-r--r--tools/testing/selftests/mm/vm_util.h1
-rw-r--r--tools/testing/selftests/mm/write_to_hugetlbfs.c21
-rw-r--r--tools/testing/selftests/net/netfilter/Makefile4
-rw-r--r--tools/testing/selftests/net/netfilter/config1
-rw-r--r--tools/testing/selftests/net/netfilter/conntrack_reverse_clash.c125
-rwxr-xr-xtools/testing/selftests/net/netfilter/conntrack_reverse_clash.sh51
-rwxr-xr-xtools/testing/selftests/net/netfilter/ipvs.sh2
-rwxr-xr-xtools/testing/selftests/net/netfilter/nft_queue.sh92
-rwxr-xr-xtools/testing/selftests/net/netfilter/nft_tproxy_tcp.sh358
-rwxr-xr-xtools/testing/selftests/net/netfilter/nft_tproxy_udp.sh262
-rwxr-xr-xtools/testing/selftests/net/packetdrill/ksft_runner.sh9
-rw-r--r--tools/testing/selftests/ring-buffer/map_test.c24
-rw-r--r--tools/testing/selftests/sched_ext/.gitignore6
-rw-r--r--tools/testing/selftests/sched_ext/Makefile218
-rw-r--r--tools/testing/selftests/sched_ext/config9
-rw-r--r--tools/testing/selftests/sched_ext/create_dsq.bpf.c58
-rw-r--r--tools/testing/selftests/sched_ext/create_dsq.c57
-rw-r--r--tools/testing/selftests/sched_ext/ddsp_bogus_dsq_fail.bpf.c42
-rw-r--r--tools/testing/selftests/sched_ext/ddsp_bogus_dsq_fail.c57
-rw-r--r--tools/testing/selftests/sched_ext/ddsp_vtimelocal_fail.bpf.c39
-rw-r--r--tools/testing/selftests/sched_ext/ddsp_vtimelocal_fail.c56
-rw-r--r--tools/testing/selftests/sched_ext/dsp_local_on.bpf.c65
-rw-r--r--tools/testing/selftests/sched_ext/dsp_local_on.c58
-rw-r--r--tools/testing/selftests/sched_ext/enq_last_no_enq_fails.bpf.c21
-rw-r--r--tools/testing/selftests/sched_ext/enq_last_no_enq_fails.c60
-rw-r--r--tools/testing/selftests/sched_ext/enq_select_cpu_fails.bpf.c43
-rw-r--r--tools/testing/selftests/sched_ext/enq_select_cpu_fails.c61
-rw-r--r--tools/testing/selftests/sched_ext/exit.bpf.c84
-rw-r--r--tools/testing/selftests/sched_ext/exit.c55
-rw-r--r--tools/testing/selftests/sched_ext/exit_test.h20
-rw-r--r--tools/testing/selftests/sched_ext/hotplug.bpf.c61
-rw-r--r--tools/testing/selftests/sched_ext/hotplug.c168
-rw-r--r--tools/testing/selftests/sched_ext/hotplug_test.h15
-rw-r--r--tools/testing/selftests/sched_ext/init_enable_count.bpf.c53
-rw-r--r--tools/testing/selftests/sched_ext/init_enable_count.c166
-rw-r--r--tools/testing/selftests/sched_ext/maximal.bpf.c164
-rw-r--r--tools/testing/selftests/sched_ext/maximal.c51
-rw-r--r--tools/testing/selftests/sched_ext/maybe_null.bpf.c36
-rw-r--r--tools/testing/selftests/sched_ext/maybe_null.c49
-rw-r--r--tools/testing/selftests/sched_ext/maybe_null_fail_dsp.bpf.c25
-rw-r--r--tools/testing/selftests/sched_ext/maybe_null_fail_yld.bpf.c28
-rw-r--r--tools/testing/selftests/sched_ext/minimal.bpf.c21
-rw-r--r--tools/testing/selftests/sched_ext/minimal.c58
-rw-r--r--tools/testing/selftests/sched_ext/prog_run.bpf.c33
-rw-r--r--tools/testing/selftests/sched_ext/prog_run.c78
-rw-r--r--tools/testing/selftests/sched_ext/reload_loop.c75
-rw-r--r--tools/testing/selftests/sched_ext/runner.c201
-rw-r--r--tools/testing/selftests/sched_ext/scx_test.h131
-rw-r--r--tools/testing/selftests/sched_ext/select_cpu_dfl.bpf.c40
-rw-r--r--tools/testing/selftests/sched_ext/select_cpu_dfl.c72
-rw-r--r--tools/testing/selftests/sched_ext/select_cpu_dfl_nodispatch.bpf.c89
-rw-r--r--tools/testing/selftests/sched_ext/select_cpu_dfl_nodispatch.c72
-rw-r--r--tools/testing/selftests/sched_ext/select_cpu_dispatch.bpf.c41
-rw-r--r--tools/testing/selftests/sched_ext/select_cpu_dispatch.c70
-rw-r--r--tools/testing/selftests/sched_ext/select_cpu_dispatch_bad_dsq.bpf.c37
-rw-r--r--tools/testing/selftests/sched_ext/select_cpu_dispatch_bad_dsq.c56
-rw-r--r--tools/testing/selftests/sched_ext/select_cpu_dispatch_dbl_dsp.bpf.c38
-rw-r--r--tools/testing/selftests/sched_ext/select_cpu_dispatch_dbl_dsp.c56
-rw-r--r--tools/testing/selftests/sched_ext/select_cpu_vtime.bpf.c92
-rw-r--r--tools/testing/selftests/sched_ext/select_cpu_vtime.c59
-rw-r--r--tools/testing/selftests/sched_ext/test_example.c49
-rw-r--r--tools/testing/selftests/sched_ext/util.c71
-rw-r--r--tools/testing/selftests/sched_ext/util.h13
-rw-r--r--tools/testing/selftests/vDSO/vdso_standalone_test_x86.c2
-rw-r--r--tools/testing/shared/autoconf.h (renamed from tools/testing/radix-tree/generated/autoconf.h)0
-rw-r--r--tools/testing/shared/linux.c (renamed from tools/testing/radix-tree/linux.c)26
-rw-r--r--tools/testing/shared/linux/bug.h (renamed from tools/testing/radix-tree/linux/bug.h)0
-rw-r--r--tools/testing/shared/linux/cpu.h (renamed from tools/testing/radix-tree/linux/cpu.h)0
-rw-r--r--tools/testing/shared/linux/idr.h (renamed from tools/testing/radix-tree/linux/idr.h)0
-rw-r--r--tools/testing/shared/linux/kconfig.h (renamed from tools/testing/radix-tree/linux/kconfig.h)0
-rw-r--r--tools/testing/shared/linux/kernel.h (renamed from tools/testing/radix-tree/linux/kernel.h)0
-rw-r--r--tools/testing/shared/linux/kmemleak.h (renamed from tools/testing/radix-tree/linux/kmemleak.h)0
-rw-r--r--tools/testing/shared/linux/local_lock.h (renamed from tools/testing/radix-tree/linux/local_lock.h)0
-rw-r--r--tools/testing/shared/linux/lockdep.h (renamed from tools/testing/radix-tree/linux/lockdep.h)0
-rw-r--r--tools/testing/shared/linux/maple_tree.h (renamed from tools/testing/radix-tree/linux/maple_tree.h)0
-rw-r--r--tools/testing/shared/linux/percpu.h (renamed from tools/testing/radix-tree/linux/percpu.h)0
-rw-r--r--tools/testing/shared/linux/preempt.h (renamed from tools/testing/radix-tree/linux/preempt.h)0
-rw-r--r--tools/testing/shared/linux/radix-tree.h (renamed from tools/testing/radix-tree/linux/radix-tree.h)0
-rw-r--r--tools/testing/shared/linux/rcupdate.h (renamed from tools/testing/radix-tree/linux/rcupdate.h)0
-rw-r--r--tools/testing/shared/linux/xarray.h (renamed from tools/testing/radix-tree/linux/xarray.h)0
-rw-r--r--tools/testing/shared/maple-shared.h13
-rw-r--r--tools/testing/shared/maple-shim.c7
-rw-r--r--tools/testing/shared/shared.h37
-rw-r--r--tools/testing/shared/shared.mk74
-rw-r--r--tools/testing/shared/trace/events/maple_tree.h (renamed from tools/testing/radix-tree/trace/events/maple_tree.h)0
-rw-r--r--tools/testing/shared/xarray-shared.c5
-rw-r--r--tools/testing/shared/xarray-shared.h8
-rw-r--r--tools/testing/vma/.gitignore7
-rw-r--r--tools/testing/vma/Makefile18
-rw-r--r--tools/testing/vma/linux/atomic.h12
-rw-r--r--tools/testing/vma/linux/mmzone.h38
-rw-r--r--tools/testing/vma/vma.c1563
-rw-r--r--tools/testing/vma/vma_internal.h923
-rwxr-xr-xtools/usb/p9_fwd.py243
-rw-r--r--tools/virtio/ringtest/main.c2
-rw-r--r--virt/kvm/coalesced_mmio.c31
-rw-r--r--virt/kvm/eventfd.c6
-rw-r--r--virt/kvm/kvm_main.c301
-rw-r--r--virt/kvm/vfio.c8
6311 files changed, 236198 insertions, 94322 deletions
diff --git a/.clang-format b/.clang-format
index 252820d9c80a..fe1aa1a30d40 100644
--- a/.clang-format
+++ b/.clang-format
@@ -141,11 +141,13 @@ ForEachMacros:
- 'damon_for_each_target_safe'
- 'damos_for_each_filter'
- 'damos_for_each_filter_safe'
+ - 'damos_for_each_quota_goal'
+ - 'damos_for_each_quota_goal_safe'
- 'data__for_each_file'
- 'data__for_each_file_new'
- 'data__for_each_file_start'
- 'device_for_each_child_node'
- - 'displayid_iter_for_each'
+ - 'device_for_each_child_node_scoped'
- 'dma_fence_array_for_each'
- 'dma_fence_chain_for_each'
- 'dma_fence_unwrap_for_each'
@@ -172,11 +174,14 @@ ForEachMacros:
- 'drm_for_each_plane'
- 'drm_for_each_plane_mask'
- 'drm_for_each_privobj'
- - 'drm_gem_for_each_gpuva'
- - 'drm_gem_for_each_gpuva_safe'
+ - 'drm_gem_for_each_gpuvm_bo'
+ - 'drm_gem_for_each_gpuvm_bo_safe'
- 'drm_gpuva_for_each_op'
- 'drm_gpuva_for_each_op_from_reverse'
+ - 'drm_gpuva_for_each_op_reverse'
- 'drm_gpuva_for_each_op_safe'
+ - 'drm_gpuvm_bo_for_each_va'
+ - 'drm_gpuvm_bo_for_each_va_safe'
- 'drm_gpuvm_for_each_va'
- 'drm_gpuvm_for_each_va_range'
- 'drm_gpuvm_for_each_va_range_safe'
@@ -192,11 +197,11 @@ ForEachMacros:
- 'dsa_switch_for_each_port_continue_reverse'
- 'dsa_switch_for_each_port_safe'
- 'dsa_switch_for_each_user_port'
+ - 'dsa_switch_for_each_user_port_continue_reverse'
- 'dsa_tree_for_each_cpu_port'
- 'dsa_tree_for_each_user_port'
- 'dsa_tree_for_each_user_port_continue_reverse'
- 'dso__for_each_symbol'
- - 'dsos__for_each_with_build_id'
- 'elf_hash_for_each_possible'
- 'elf_symtab__for_each_symbol'
- 'evlist__for_each_cpu'
@@ -216,6 +221,7 @@ ForEachMacros:
- 'for_each_and_bit'
- 'for_each_andnot_bit'
- 'for_each_available_child_of_node'
+ - 'for_each_available_child_of_node_scoped'
- 'for_each_bench'
- 'for_each_bio'
- 'for_each_board_func_rsrc'
@@ -234,6 +240,7 @@ ForEachMacros:
- 'for_each_card_widgets_safe'
- 'for_each_cgroup_storage_type'
- 'for_each_child_of_node'
+ - 'for_each_child_of_node_scoped'
- 'for_each_clear_bit'
- 'for_each_clear_bit_from'
- 'for_each_clear_bitrange'
@@ -251,6 +258,7 @@ ForEachMacros:
- 'for_each_cpu'
- 'for_each_cpu_and'
- 'for_each_cpu_andnot'
+ - 'for_each_cpu_from'
- 'for_each_cpu_or'
- 'for_each_cpu_wrap'
- 'for_each_dapm_widgets'
@@ -269,13 +277,14 @@ ForEachMacros:
- 'for_each_element'
- 'for_each_element_extid'
- 'for_each_element_id'
+ - 'for_each_enabled_cpu'
- 'for_each_endpoint_of_node'
- 'for_each_event'
- 'for_each_event_tps'
- 'for_each_evictable_lru'
- 'for_each_fib6_node_rt_rcu'
- 'for_each_fib6_walker_rt'
- - 'for_each_free_mem_pfn_range_in_zone'
+ - 'for_each_file_lock'
- 'for_each_free_mem_pfn_range_in_zone_from'
- 'for_each_free_mem_range'
- 'for_each_free_mem_range_reverse'
@@ -286,15 +295,18 @@ ForEachMacros:
- 'for_each_group_member'
- 'for_each_group_member_head'
- 'for_each_hstate'
+ - 'for_each_hwgpio'
- 'for_each_if'
- 'for_each_inject_fn'
- 'for_each_insn'
+ - 'for_each_insn_op_loc'
- 'for_each_insn_prefix'
- 'for_each_intid'
- 'for_each_iommu'
- 'for_each_ip_tunnel_rcu'
- 'for_each_irq_nr'
- 'for_each_lang'
+ - 'for_each_link_ch_maps'
- 'for_each_link_codecs'
- 'for_each_link_cpus'
- 'for_each_link_platforms'
@@ -332,6 +344,9 @@ ForEachMacros:
- 'for_each_new_plane_in_state_reverse'
- 'for_each_new_private_obj_in_state'
- 'for_each_new_reg'
+ - 'for_each_nhlt_endpoint'
+ - 'for_each_nhlt_endpoint_fmtcfg'
+ - 'for_each_nhlt_fmtcfg'
- 'for_each_node'
- 'for_each_node_by_name'
- 'for_each_node_by_type'
@@ -387,12 +402,15 @@ ForEachMacros:
- 'for_each_reloc_from'
- 'for_each_requested_gpio'
- 'for_each_requested_gpio_in_range'
+ - 'for_each_reserved_child_of_node'
- 'for_each_reserved_mem_range'
- 'for_each_reserved_mem_region'
+ - 'for_each_rtd_ch_maps'
- 'for_each_rtd_codec_dais'
- 'for_each_rtd_components'
- 'for_each_rtd_cpu_dais'
- 'for_each_rtd_dais'
+ - 'for_each_rtd_dais_reverse'
- 'for_each_sband_iftype_data'
- 'for_each_script'
- 'for_each_sec'
@@ -533,8 +551,6 @@ ForEachMacros:
- 'lwq_for_each_safe'
- 'map__for_each_symbol'
- 'map__for_each_symbol_by_name'
- - 'maps__for_each_entry'
- - 'maps__for_each_entry_safe'
- 'mas_for_each'
- 'mci_for_each_dimm'
- 'media_device_for_each_entity'
@@ -560,7 +576,9 @@ ForEachMacros:
- 'netdev_hw_addr_list_for_each'
- 'nft_rule_for_each_expr'
- 'nla_for_each_attr'
+ - 'nla_for_each_attr_type'
- 'nla_for_each_nested'
+ - 'nla_for_each_nested_type'
- 'nlmsg_for_each_attr'
- 'nlmsg_for_each_msg'
- 'nr_neigh_for_each'
@@ -579,6 +597,7 @@ ForEachMacros:
- 'perf_config_sections__for_each_entry'
- 'perf_config_set__for_each_entry'
- 'perf_cpu_map__for_each_cpu'
+ - 'perf_cpu_map__for_each_cpu_skip_any'
- 'perf_cpu_map__for_each_idx'
- 'perf_evlist__for_each_entry'
- 'perf_evlist__for_each_entry_reverse'
@@ -639,7 +658,6 @@ ForEachMacros:
- 'shost_for_each_device'
- 'sk_for_each'
- 'sk_for_each_bound'
- - 'sk_for_each_bound_bhash2'
- 'sk_for_each_entry_offset_rcu'
- 'sk_for_each_from'
- 'sk_for_each_rcu'
@@ -653,6 +671,7 @@ ForEachMacros:
- 'snd_soc_dapm_widget_for_each_path_safe'
- 'snd_soc_dapm_widget_for_each_sink_path'
- 'snd_soc_dapm_widget_for_each_source_path'
+ - 'sparsebit_for_each_set_range'
- 'strlist__for_each_entry'
- 'strlist__for_each_entry_safe'
- 'sym_for_each_insn'
@@ -662,7 +681,6 @@ ForEachMacros:
- 'tcf_act_for_each_action'
- 'tcf_exts_for_each_action'
- 'ttm_resource_manager_for_each_res'
- - 'twsk_for_each_bound_bhash2'
- 'udp_portaddr_for_each_entry'
- 'udp_portaddr_for_each_entry_rcu'
- 'usb_hub_for_each_child'
@@ -686,6 +704,9 @@ ForEachMacros:
- 'xbc_node_for_each_child'
- 'xbc_node_for_each_key_value'
- 'xbc_node_for_each_subkey'
+ - 'ynl_attr_for_each'
+ - 'ynl_attr_for_each_nested'
+ - 'ynl_attr_for_each_payload'
- 'zorro_for_each_dev'
IncludeBlocks: Preserve
diff --git a/.gitignore b/.gitignore
index 38b7ab9e7dc4..56972adb5031 100644
--- a/.gitignore
+++ b/.gitignore
@@ -47,7 +47,6 @@
*.so.dbg
*.su
*.symtypes
-*.symversions
*.tab.[ch]
*.tar
*.xz
@@ -71,6 +70,7 @@ modules.order
/Module.markers
/modules.builtin
/modules.builtin.modinfo
+/modules.builtin.ranges
/modules.nsdeps
#
@@ -143,7 +143,6 @@ GTAGS
# id-utils files
ID
-*.orig
*~
\#*#
diff --git a/.mailmap b/.mailmap
index 7c7f171d0e55..0374777cc662 100644
--- a/.mailmap
+++ b/.mailmap
@@ -154,6 +154,9 @@ Christian Brauner <brauner@kernel.org> <christian.brauner@ubuntu.com>
Christian Marangi <ansuelsmth@gmail.com>
Christophe Ricard <christophe.ricard@gmail.com>
Christoph Hellwig <hch@lst.de>
+Chuck Lever <chuck.lever@oracle.com> <cel@kernel.org>
+Chuck Lever <chuck.lever@oracle.com> <cel@netapp.com>
+Chuck Lever <chuck.lever@oracle.com> <cel@citi.umich.edu>
Claudiu Beznea <claudiu.beznea@tuxon.dev> <claudiu.beznea@microchip.com>
Colin Ian King <colin.i.king@gmail.com> <colin.king@canonical.com>
Corey Minyard <minyard@acm.org>
@@ -313,6 +316,7 @@ Jiri Slaby <jirislaby@kernel.org> <xslaby@fi.muni.cz>
Jisheng Zhang <jszhang@kernel.org> <jszhang@marvell.com>
Jisheng Zhang <jszhang@kernel.org> <Jisheng.Zhang@synaptics.com>
Jishnu Prakash <quic_jprakash@quicinc.com> <jprakash@codeaurora.org>
+Joel Granados <joel.granados@kernel.org> <j.granados@samsung.com>
Johan Hovold <johan@kernel.org> <jhovold@gmail.com>
Johan Hovold <johan@kernel.org> <johan@hovoldconsulting.com>
John Crispin <john@phrozen.org> <blogic@openwrt.org>
@@ -613,6 +617,10 @@ Shuah Khan <shuah@kernel.org> <shuah.kh@samsung.com>
Sibi Sankar <quic_sibis@quicinc.com> <sibis@codeaurora.org>
Sid Manning <quic_sidneym@quicinc.com> <sidneym@codeaurora.org>
Simon Arlott <simon@octiron.net> <simon@fire.lp0.eu>
+Simona Vetter <simona.vetter@ffwll.ch> <daniel.vetter@ffwll.ch>
+Simona Vetter <simona.vetter@ffwll.ch> <daniel.vetter@intel.com>
+Simona Vetter <simona.vetter@ffwll.ch> <daniel@ffwll.ch>
+Simona Vetter <simona.vetter@ffwll.ch> <daniel@biene.ffwll.ch>
Simon Horman <horms@kernel.org> <simon.horman@corigine.com>
Simon Horman <horms@kernel.org> <simon.horman@netronome.com>
Simon Kelley <simon@thekelleys.org.uk>
diff --git a/Documentation/ABI/stable/sysfs-bus-nvmem b/Documentation/ABI/stable/sysfs-bus-nvmem
index aa89adf18bc5..0ae8cb074acf 100644
--- a/Documentation/ABI/stable/sysfs-bus-nvmem
+++ b/Documentation/ABI/stable/sysfs-bus-nvmem
@@ -11,7 +11,7 @@ Description:
Read returns '0' or '1' for read-write or read-only modes
respectively.
Write parses one of 'YyTt1NnFf0', or [oO][NnFf] for "on"
- and "off", i.e. what kstrbool() supports.
+ and "off", i.e. what kstrtobool() supports.
Note: This file is only present if CONFIG_NVMEM_SYSFS
is enabled.
diff --git a/Documentation/ABI/testing/configfs-usb-gadget-acm b/Documentation/ABI/testing/configfs-usb-gadget-acm
index d21092d75a05..25e68be9eb66 100644
--- a/Documentation/ABI/testing/configfs-usb-gadget-acm
+++ b/Documentation/ABI/testing/configfs-usb-gadget-acm
@@ -6,3 +6,10 @@ Description:
This item contains just one readonly attribute: port_num.
It contains the port number of the /dev/ttyGS<n> device
associated with acm function's instance "name".
+
+What: /config/usb-gadget/gadget/functions/acm.name/protocol
+Date: Aug 2024
+KernelVersion: 6.13
+Description:
+ Reported bInterfaceProtocol for the ACM device. For legacy
+ reasons, this defaults to 1 (USB_CDC_ACM_PROTO_AT_V25TER).
diff --git a/Documentation/ABI/testing/configfs-usb-gadget-uac1 b/Documentation/ABI/testing/configfs-usb-gadget-uac1
index c4ba92f004c3..64188a85592b 100644
--- a/Documentation/ABI/testing/configfs-usb-gadget-uac1
+++ b/Documentation/ABI/testing/configfs-usb-gadget-uac1
@@ -30,4 +30,12 @@ Description:
req_number the number of pre-allocated requests
for both capture and playback
function_name name of the interface
+ p_it_name playback input terminal name
+ p_it_ch_name playback channels name
+ p_ot_name playback output terminal name
+ p_fu_vol_name playback mute/volume functional unit name
+ c_it_name capture input terminal name
+ c_it_ch_name capture channels name
+ c_ot_name capture output terminal name
+ c_fu_vol_name capture mute/volume functional unit name
===================== =======================================
diff --git a/Documentation/ABI/testing/configfs-usb-gadget-uac2 b/Documentation/ABI/testing/configfs-usb-gadget-uac2
index a2bf4fd82a5b..133e995c3e92 100644
--- a/Documentation/ABI/testing/configfs-usb-gadget-uac2
+++ b/Documentation/ABI/testing/configfs-usb-gadget-uac2
@@ -35,6 +35,17 @@ Description:
req_number the number of pre-allocated requests
for both capture and playback
function_name name of the interface
+ if_ctrl_name topology control name
+ clksrc_in_name input clock name
+ clksrc_out_name output clock name
+ p_it_name playback input terminal name
+ p_it_ch_name playback input first channel name
+ p_ot_name playback output terminal name
+ p_fu_vol_name playback mute/volume function unit name
+ c_it_name capture input terminal name
+ c_it_ch_name capture input first channel name
+ c_ot_name capture output terminal name
+ c_fu_vol_name capture mute/volume functional unit name
c_terminal_type code of the capture terminal type
p_terminal_type code of the playback terminal type
===================== =======================================
diff --git a/Documentation/ABI/testing/debugfs-iio-ad9467 b/Documentation/ABI/testing/debugfs-iio-ad9467
new file mode 100644
index 000000000000..0352fca1f7f2
--- /dev/null
+++ b/Documentation/ABI/testing/debugfs-iio-ad9467
@@ -0,0 +1,39 @@
+What: /sys/kernel/debug/iio/iio:deviceX/calibration_table_dump
+KernelVersion: 6.11
+Contact: linux-iio@vger.kernel.org
+Description:
+ This dumps the calibration table that was filled during the
+ digital interface tuning process.
+
+What: /sys/kernel/debug/iio/iio:deviceX/in_voltage_test_mode_available
+KernelVersion: 6.11
+Contact: linux-iio@vger.kernel.org
+Description:
+ List all the available test tones:
+ - off
+ - midscale_short
+ - pos_fullscale
+ - neg_fullscale
+ - checkerboard
+ - prbs23
+ - prbs9
+ - one_zero_toggle
+ - user
+ - bit_toggle
+ - sync
+ - one_bit_high
+ - mixed_bit_frequency
+ - ramp
+
+ Note that depending on the actual device being used, some of the
+ above might not be available (and they won't be listed when
+ reading the file).
+
+What: /sys/kernel/debug/iio/iio:deviceX/in_voltageY_test_mode
+KernelVersion: 6.11
+Contact: linux-iio@vger.kernel.org
+Description:
+ Writing to this file will initiate one of available test tone on
+ channel Y. Reading it, shows which test is running. In cases
+ where an IIO backend is available and supports the test tone,
+ additional information about the data correctness is given.
diff --git a/Documentation/ABI/testing/debugfs-iio-backend b/Documentation/ABI/testing/debugfs-iio-backend
new file mode 100644
index 000000000000..01ab94469432
--- /dev/null
+++ b/Documentation/ABI/testing/debugfs-iio-backend
@@ -0,0 +1,20 @@
+What: /sys/kernel/debug/iio/iio:deviceX/backendY/name
+KernelVersion: 6.11
+Contact: linux-iio@vger.kernel.org
+Description:
+ Name of Backend Y connected to device X.
+
+What: /sys/kernel/debug/iio/iio:deviceX/backendY/direct_reg_access
+KernelVersion: 6.11
+Contact: linux-iio@vger.kernel.org
+Description:
+ Directly access the registers of backend Y. Typical usage is:
+
+ Reading address 0x50
+ echo 0x50 > direct_reg_access
+ cat direct_reg_access
+
+ Writing address 0x50
+ echo 0x50 0x3 > direct_reg_access
+ //readback address 0x50
+ cat direct_reg_access
diff --git a/Documentation/ABI/testing/sysfs-block-zram b/Documentation/ABI/testing/sysfs-block-zram
index 628a00fb20a9..1ef69e0271f9 100644
--- a/Documentation/ABI/testing/sysfs-block-zram
+++ b/Documentation/ABI/testing/sysfs-block-zram
@@ -151,3 +151,10 @@ Contact: Sergey Senozhatsky <senozhatsky@chromium.org>
Description:
The recompress file is write-only and triggers re-compression
with secondary compression algorithms.
+
+What: /sys/block/zram<id>/algorithm_params
+Date: August 2024
+Contact: Sergey Senozhatsky <senozhatsky@chromium.org>
+Description:
+ The algorithm_params file is write-only and is used to setup
+ compression algorithm parameters.
diff --git a/Documentation/ABI/testing/sysfs-bus-iio b/Documentation/ABI/testing/sysfs-bus-iio
index 7cee78ad4108..89943c2d54e8 100644
--- a/Documentation/ABI/testing/sysfs-bus-iio
+++ b/Documentation/ABI/testing/sysfs-bus-iio
@@ -523,13 +523,27 @@ Description:
What: /sys/bus/iio/devices/iio:deviceX/in_accel_x_calibbias
What: /sys/bus/iio/devices/iio:deviceX/in_accel_y_calibbias
What: /sys/bus/iio/devices/iio:deviceX/in_accel_z_calibbias
+What: /sys/bus/iio/devices/iio:deviceX/in_altvoltageY_i_calibbias
+What: /sys/bus/iio/devices/iio:deviceX/in_altvoltageY_q_calibbias
What: /sys/bus/iio/devices/iio:deviceX/in_anglvel_x_calibbias
What: /sys/bus/iio/devices/iio:deviceX/in_anglvel_y_calibbias
What: /sys/bus/iio/devices/iio:deviceX/in_anglvel_z_calibbias
+What: /sys/bus/iio/devices/iio:deviceX/in_capacitance_calibbias
+What: /sys/bus/iio/devices/iio:deviceX/in_illuminance_calibbias
What: /sys/bus/iio/devices/iio:deviceX/in_illuminance0_calibbias
-What: /sys/bus/iio/devices/iio:deviceX/in_proximity0_calibbias
-What: /sys/bus/iio/devices/iio:deviceX/in_pressureY_calibbias
+What: /sys/bus/iio/devices/iio:deviceX/in_intensityY_calibbias
+What: /sys/bus/iio/devices/iio:deviceX/in_magn_x_calibbias
+What: /sys/bus/iio/devices/iio:deviceX/in_magn_y_calibbias
+What: /sys/bus/iio/devices/iio:deviceX/in_magn_z_calibbias
What: /sys/bus/iio/devices/iio:deviceX/in_pressure_calibbias
+What: /sys/bus/iio/devices/iio:deviceX/in_pressureY_calibbias
+What: /sys/bus/iio/devices/iio:deviceX/in_proximity_calibbias
+What: /sys/bus/iio/devices/iio:deviceX/in_proximity0_calibbias
+What: /sys/bus/iio/devices/iio:deviceX/in_resistance_calibbias
+What: /sys/bus/iio/devices/iio:deviceX/in_temp_calibbias
+What: /sys/bus/iio/devices/iio:deviceX/in_voltageY_calibbias
+What: /sys/bus/iio/devices/iio:deviceX/out_currentY_calibbias
+What: /sys/bus/iio/devices/iio:deviceX/out_voltageY_calibbias
KernelVersion: 2.6.35
Contact: linux-iio@vger.kernel.org
Description:
@@ -541,6 +555,10 @@ Description:
What: /sys/bus/iio/devices/iio:deviceX/in_accel_calibbias_available
What: /sys/bus/iio/devices/iio:deviceX/in_anglvel_calibbias_available
+What: /sys/bus/iio/devices/iio:deviceX/in_temp_calibbias_available
+What: /sys/bus/iio/devices/iio:deviceX/in_proximity_calibbias_available
+What: /sys/bus/iio/devices/iio:deviceX/in_voltageY_calibbias_available
+What: /sys/bus/iio/devices/iio:deviceX/out_voltageY_calibbias_available
KernelVersion: 5.8
Contact: linux-iio@vger.kernel.org
Description:
@@ -549,25 +567,34 @@ Description:
- a small discrete set of values like "0 2 4 6 8"
- a range specified as "[min step max]"
-What: /sys/bus/iio/devices/iio:deviceX/in_voltageY_calibscale
-What: /sys/bus/iio/devices/iio:deviceX/in_voltageY_supply_calibscale
-What: /sys/bus/iio/devices/iio:deviceX/in_voltageY_i_calibscale
-What: /sys/bus/iio/devices/iio:deviceX/in_voltageY_q_calibscale
-What: /sys/bus/iio/devices/iio:deviceX/in_voltage_i_calibscale
-What: /sys/bus/iio/devices/iio:deviceX/in_voltage_q_calibscale
-What: /sys/bus/iio/devices/iio:deviceX/in_altvoltage_calibscale
-What: /sys/bus/iio/devices/iio:deviceX/in_voltage_calibscale
What: /sys/bus/iio/devices/iio:deviceX/in_accel_x_calibscale
What: /sys/bus/iio/devices/iio:deviceX/in_accel_y_calibscale
What: /sys/bus/iio/devices/iio:deviceX/in_accel_z_calibscale
+What: /sys/bus/iio/devices/iio:deviceX/in_altvoltage_calibscale
What: /sys/bus/iio/devices/iio:deviceX/in_anglvel_x_calibscale
What: /sys/bus/iio/devices/iio:deviceX/in_anglvel_y_calibscale
What: /sys/bus/iio/devices/iio:deviceX/in_anglvel_z_calibscale
+What: /sys/bus/iio/devices/iio:deviceX/in_capacitance_calibscale
+What: /sys/bus/iio/devices/iio:deviceX/in_illuminance_calibscale
What: /sys/bus/iio/devices/iio:deviceX/in_illuminance0_calibscale
-What: /sys/bus/iio/devices/iio:deviceX/in_proximity0_calibscale
-What: /sys/bus/iio/devices/iio:deviceX/in_pressureY_calibscale
+What: /sys/bus/iio/devices/iio:deviceX/in_intensity_both_calibscale
+What: /sys/bus/iio/devices/iio:deviceX/in_intensity_calibscale
+What: /sys/bus/iio/devices/iio:deviceX/in_intensity_ir_calibscale
+What: /sys/bus/iio/devices/iio:deviceX/in_magn_x_calibscale
+What: /sys/bus/iio/devices/iio:deviceX/in_magn_y_calibscale
+What: /sys/bus/iio/devices/iio:deviceX/in_magn_z_calibscale
What: /sys/bus/iio/devices/iio:deviceX/in_pressure_calibscale
-What: /sys/bus/iio/devices/iio:deviceX/in_illuminance_calibscale
+What: /sys/bus/iio/devices/iio:deviceX/in_pressureY_calibscale
+What: /sys/bus/iio/devices/iio:deviceX/in_proximity0_calibscale
+What: /sys/bus/iio/devices/iio:deviceX/in_voltage_calibscale
+What: /sys/bus/iio/devices/iio:deviceX/in_voltage_i_calibscale
+What: /sys/bus/iio/devices/iio:deviceX/in_voltage_q_calibscale
+What: /sys/bus/iio/devices/iio:deviceX/in_voltageY_calibscale
+What: /sys/bus/iio/devices/iio:deviceX/in_voltageY_i_calibscale
+What: /sys/bus/iio/devices/iio:deviceX/in_voltageY_q_calibscale
+What: /sys/bus/iio/devices/iio:deviceX/in_voltageY_supply_calibscale
+What: /sys/bus/iio/devices/iio:deviceX/out_currentY_calibscale
+What: /sys/bus/iio/devices/iio:deviceX/out_voltageY_calibscale
KernelVersion: 2.6.35
Contact: linux-iio@vger.kernel.org
Description:
@@ -575,6 +602,20 @@ Description:
production inaccuracies). If shared across all channels,
<type>_calibscale is used.
+What: /sys/bus/iio/devices/iio:deviceX/in_illuminanceY_calibscale_available
+What: /sys/bus/iio/devices/iio:deviceX/in_intensityY_calibscale_available
+What: /sys/bus/iio/devices/iio:deviceX/in_proximityY_calibscale_available
+What: /sys/bus/iio/devices/iio:deviceX/in_voltageY_calibscale_available
+KernelVersion: 4.8
+Contact: linux-iio@vger.kernel.org
+Description:
+ Available values of calibscale. Maybe expressed as either of:
+
+ - a small discrete set of values like "1 8 16"
+ - a range specified as "[min step max]"
+
+ If shared across all channels, <type>_calibscale_available is used.
+
What: /sys/bus/iio/devices/iio:deviceX/in_activity_calibgender
What: /sys/bus/iio/devices/iio:deviceX/in_energy_calibgender
What: /sys/bus/iio/devices/iio:deviceX/in_distance_calibgender
@@ -708,6 +749,7 @@ Description:
2.5kohm_to_gnd: connected to ground via a 2.5kOhm resistor,
6kohm_to_gnd: connected to ground via a 6kOhm resistor,
20kohm_to_gnd: connected to ground via a 20kOhm resistor,
+ 42kohm_to_gnd: connected to ground via a 42kOhm resistor,
90kohm_to_gnd: connected to ground via a 90kOhm resistor,
100kohm_to_gnd: connected to ground via an 100kOhm resistor,
125kohm_to_gnd: connected to ground via an 125kOhm resistor,
@@ -2289,3 +2331,11 @@ KernelVersion: 6.7
Contact: linux-iio@vger.kernel.org
Description:
List of available timeout value for tap gesture confirmation.
+
+What: /sys/.../iio:deviceX/in_shunt_resistor
+What: /sys/.../iio:deviceX/in_current_shunt_resistor
+What: /sys/.../iio:deviceX/in_power_shunt_resistor
+KernelVersion: 6.10
+Contact: linux-iio@vger.kernel.org
+Description:
+ The value of current sense resistor in Ohms.
diff --git a/Documentation/ABI/testing/sysfs-bus-iio-adc-max9611 b/Documentation/ABI/testing/sysfs-bus-iio-adc-max9611
deleted file mode 100644
index 6d2d2b094941..000000000000
--- a/Documentation/ABI/testing/sysfs-bus-iio-adc-max9611
+++ /dev/null
@@ -1,17 +0,0 @@
-What: /sys/bus/iio/devices/iio:deviceX/in_power_shunt_resistor
-Date: March 2017
-KernelVersion: 4.12
-Contact: linux-iio@vger.kernel.org
-Description: The value of the shunt resistor used to compute power drain on
- common input voltage pin (RS+). In Ohms.
-
-What: /sys/bus/iio/devices/iio:deviceX/in_current_shunt_resistor
-Date: March 2017
-KernelVersion: 4.12
-Contact: linux-iio@vger.kernel.org
-Description: The value of the shunt resistor used to compute current flowing
- between RS+ and RS- voltage sense inputs. In Ohms.
-
-These attributes describe a single physical component, exposed as two distinct
-attributes as it is used to calculate two different values: power load and
-current flowing between RS+ and RS- inputs.
diff --git a/Documentation/ABI/testing/sysfs-bus-iio-chemical-sgp40 b/Documentation/ABI/testing/sysfs-bus-iio-chemical-sgp40
index 469a7c00fad4..a95547e874f1 100644
--- a/Documentation/ABI/testing/sysfs-bus-iio-chemical-sgp40
+++ b/Documentation/ABI/testing/sysfs-bus-iio-chemical-sgp40
@@ -15,17 +15,3 @@ Description:
Set the relative humidity. This value is sent to the sensor for
humidity compensation.
Default value: 50000 (50 % relative humidity)
-
-What: /sys/bus/iio/devices/iio:deviceX/in_resistance_calibbias
-Date: August 2021
-KernelVersion: 5.15
-Contact: Andreas Klinger <ak@it-klinger.de>
-Description:
- Set the bias value for the resistance which is used for
- calculation of in_concentration_input as follows:
-
- x = (in_resistance_raw - in_resistance_calibbias) * 0.65
-
- in_concentration_input = 500 / (1 + e^x)
-
- Default value: 30000
diff --git a/Documentation/ABI/testing/sysfs-bus-iio-dac b/Documentation/ABI/testing/sysfs-bus-iio-dac
new file mode 100644
index 000000000000..810eaac5533c
--- /dev/null
+++ b/Documentation/ABI/testing/sysfs-bus-iio-dac
@@ -0,0 +1,61 @@
+What: /sys/bus/iio/devices/iio:deviceX/out_currentY_toggle_en
+KernelVersion: 5.18
+Contact: linux-iio@vger.kernel.org
+Description:
+ Toggle enable. Write 1 to enable toggle or 0 to disable it. This
+ is useful when one wants to change the DAC output codes. For
+ autonomous toggling, the way it should be done is:
+
+ - disable toggle operation;
+ - change out_currentY_rawN, where N is the integer value of the symbol;
+ - enable toggle operation.
+
+What: /sys/bus/iio/devices/iio:deviceX/out_currentY_rawN
+KernelVersion: 5.18
+Contact: linux-iio@vger.kernel.org
+Description:
+ This attribute has the same meaning as out_currentY_raw. It is
+ specific to toggle enabled channels and refers to the DAC output
+ code in INPUT_N (_rawN), where N is the integer value of the symbol.
+ The same scale and offset as in out_currentY_raw applies.
+
+What: /sys/bus/iio/devices/iio:deviceX/out_currentY_symbol
+KernelVersion: 5.18
+Contact: linux-iio@vger.kernel.org
+Description:
+ Performs a SW switch to a predefined output symbol. This attribute
+ is specific to toggle enabled channels and allows switching between
+ multiple predefined symbols. Each symbol corresponds to a different
+ output, denoted as out_currentY_rawN, where N is the integer value
+ of the symbol. Writing an integer value N will select out_currentY_rawN.
+
+What: /sys/bus/iio/devices/iio:deviceX/out_voltageY_toggle_en
+KernelVersion: 5.18
+Contact: linux-iio@vger.kernel.org
+Description:
+ Toggle enable. Write 1 to enable toggle or 0 to disable it. This
+ is useful when one wants to change the DAC output codes. For
+ autonomous toggling, the way it should be done is:
+
+ - disable toggle operation;
+ - change out_voltageY_rawN, where N is the integer value of the symbol;
+ - enable toggle operation.
+
+What: /sys/bus/iio/devices/iio:deviceX/out_voltageY_rawN
+KernelVersion: 5.18
+Contact: linux-iio@vger.kernel.org
+Description:
+ This attribute has the same meaning as out_currentY_raw. It is
+ specific to toggle enabled channels and refers to the DAC output
+ code in INPUT_N (_rawN), where N is the integer value of the symbol.
+ The same scale and offset as in out_currentY_raw applies.
+
+What: /sys/bus/iio/devices/iio:deviceX/out_voltageY_symbol
+KernelVersion: 5.18
+Contact: linux-iio@vger.kernel.org
+Description:
+ Performs a SW switch to a predefined output symbol. This attribute
+ is specific to toggle enabled channels and allows switching between
+ multiple predefined symbols. Each symbol corresponds to a different
+ output, denoted as out_voltageY_rawN, where N is the integer value
+ of the symbol. Writing an integer value N will select out_voltageY_rawN.
diff --git a/Documentation/ABI/testing/sysfs-bus-iio-dac-ltc2688 b/Documentation/ABI/testing/sysfs-bus-iio-dac-ltc2688
index 1c35971277ba..ae95a5477382 100644
--- a/Documentation/ABI/testing/sysfs-bus-iio-dac-ltc2688
+++ b/Documentation/ABI/testing/sysfs-bus-iio-dac-ltc2688
@@ -53,34 +53,3 @@ KernelVersion: 5.18
Contact: linux-iio@vger.kernel.org
Description:
Returns the available values for the dither phase.
-
-What: /sys/bus/iio/devices/iio:deviceX/out_voltageY_toggle_en
-KernelVersion: 5.18
-Contact: linux-iio@vger.kernel.org
-Description:
- Toggle enable. Write 1 to enable toggle or 0 to disable it. This is
- useful when one wants to change the DAC output codes. The way it should
- be done is:
-
- - disable toggle operation;
- - change out_voltageY_raw0 and out_voltageY_raw1;
- - enable toggle operation.
-
-What: /sys/bus/iio/devices/iio:deviceX/out_voltageY_raw0
-What: /sys/bus/iio/devices/iio:deviceX/out_voltageY_raw1
-KernelVersion: 5.18
-Contact: linux-iio@vger.kernel.org
-Description:
- It has the same meaning as out_voltageY_raw. This attribute is
- specific to toggle enabled channels and refers to the DAC output
- code in INPUT_A (_raw0) and INPUT_B (_raw1). The same scale and offset
- as in out_voltageY_raw applies.
-
-What: /sys/bus/iio/devices/iio:deviceX/out_voltageY_symbol
-KernelVersion: 5.18
-Contact: linux-iio@vger.kernel.org
-Description:
- Performs a SW toggle. This attribute is specific to toggle
- enabled channels and allows to toggle between out_voltageY_raw0
- and out_voltageY_raw1 through software. Writing 0 will select
- out_voltageY_raw0 while 1 selects out_voltageY_raw1.
diff --git a/Documentation/ABI/testing/sysfs-bus-iio-filter-admv8818 b/Documentation/ABI/testing/sysfs-bus-iio-filter-admv8818
index 31dbb390573f..c431f0a13cf5 100644
--- a/Documentation/ABI/testing/sysfs-bus-iio-filter-admv8818
+++ b/Documentation/ABI/testing/sysfs-bus-iio-filter-admv8818
@@ -3,7 +3,7 @@ KernelVersion:
Contact: linux-iio@vger.kernel.org
Description:
Reading this returns the valid values that can be written to the
- on_altvoltage0_mode attribute:
+ filter_mode attribute:
- auto -> Adjust bandpass filter to track changes in input clock rate.
- manual -> disable/unregister the clock rate notifier / input clock tracking.
diff --git a/Documentation/ABI/testing/sysfs-bus-iio-ina2xx-adc b/Documentation/ABI/testing/sysfs-bus-iio-ina2xx-adc
index 8916f7ec6507..8dbca113112d 100644
--- a/Documentation/ABI/testing/sysfs-bus-iio-ina2xx-adc
+++ b/Documentation/ABI/testing/sysfs-bus-iio-ina2xx-adc
@@ -13,12 +13,3 @@ Description:
available for reading data. However, samples can be occasionally skipped
or repeated, depending on the beat between the capture and conversion
rates.
-
-What: /sys/bus/iio/devices/iio:deviceX/in_shunt_resistor
-Date: December 2015
-KernelVersion: 4.4
-Contact: linux-iio@vger.kernel.org
-Description:
- The value of the shunt resistor may be known only at runtime fom an
- eeprom content read by a client application. This attribute allows to
- set its value in ohms.
diff --git a/Documentation/ABI/testing/sysfs-bus-pci b/Documentation/ABI/testing/sysfs-bus-pci
index ecf47559f495..7f63c7e97773 100644
--- a/Documentation/ABI/testing/sysfs-bus-pci
+++ b/Documentation/ABI/testing/sysfs-bus-pci
@@ -500,3 +500,75 @@ Description:
console drivers from the device. Raw users of pci-sysfs
resourceN attributes must be terminated prior to resizing.
Success of the resizing operation is not guaranteed.
+
+What: /sys/bus/pci/devices/.../leds/*:enclosure:*/brightness
+What: /sys/class/leds/*:enclosure:*/brightness
+Date: August 2024
+KernelVersion: 6.12
+Description:
+ LED indications on PCIe storage enclosures which are controlled
+ through the NPEM interface (Native PCIe Enclosure Management,
+ PCIe r6.1 sec 6.28) are accessible as led class devices, both
+ below /sys/class/leds and below NPEM-capable PCI devices.
+
+ Although these led class devices could be manipulated manually,
+ in practice they are typically manipulated automatically by an
+ application such as ledmon(8).
+
+ The name of a led class device is as follows:
+ <bdf>:enclosure:<indication>
+ where:
+
+ - <bdf> is the domain, bus, device and function number
+ (e.g. 10000:02:05.0)
+ - <indication> is a short description of the LED indication
+
+ Valid indications per PCIe r6.1 table 6-27 are:
+
+ - ok (drive is functioning normally)
+ - locate (drive is being identified by an admin)
+ - fail (drive is not functioning properly)
+ - rebuild (drive is part of an array that is rebuilding)
+ - pfa (drive is predicted to fail soon)
+ - hotspare (drive is marked to be used as a replacement)
+ - ica (drive is part of an array that is degraded)
+ - ifa (drive is part of an array that is failed)
+ - idt (drive is not the right type for the connector)
+ - disabled (drive is disabled, removal is safe)
+ - specific0 to specific7 (enclosure-specific indications)
+
+ Broadly, the indications fall into one of these categories:
+
+ - to signify drive state (ok, locate, fail, idt, disabled)
+ - to signify drive role or state in a software RAID array
+ (rebuild, pfa, hotspare, ica, ifa)
+ - to signify any other role or state (specific0 to specific7)
+
+ Mandatory indications per PCIe r6.1 sec 7.9.19.2 comprise:
+ ok, locate, fail, rebuild. All others are optional.
+ A led class device is only visible if the corresponding
+ indication is supported by the device.
+
+ To manipulate the indications, write 0 (LED_OFF) or 1 (LED_ON)
+ to the "brightness" file. Note that manipulating an indication
+ may implicitly manipulate other indications at the vendor's
+ discretion. E.g. when the user lights up the "ok" indication,
+ the vendor may choose to automatically turn off the "fail"
+ indication. The current state of an indication can be
+ retrieved by reading its "brightness" file.
+
+ The PCIe Base Specification allows vendors leeway to choose
+ different colors or blinking patterns for the indications,
+ but they typically follow the IBPI standard. E.g. the "locate"
+ indication is usually presented as one or two LEDs blinking at
+ 4 Hz frequency:
+ https://en.wikipedia.org/wiki/International_Blinking_Pattern_Interpretation
+
+ PCI Firmware Specification r3.3 sec 4.7 defines a DSM interface
+ to facilitate shared access by operating system and platform
+ firmware to a device's NPEM registers. The kernel will use
+ this DSM interface where available, instead of accessing NPEM
+ registers directly. The DSM interface does not support the
+ enclosure-specific indications "specific0" to "specific7",
+ hence the corresponding led class devices are unavailable if
+ the DSM interface is used.
diff --git a/Documentation/ABI/testing/sysfs-devices-memory b/Documentation/ABI/testing/sysfs-devices-memory
index a95e0f17c35a..cec65827e602 100644
--- a/Documentation/ABI/testing/sysfs-devices-memory
+++ b/Documentation/ABI/testing/sysfs-devices-memory
@@ -115,6 +115,6 @@ What: /sys/devices/system/memory/crash_hotplug
Date: Aug 2023
Contact: Linux kernel mailing list <linux-kernel@vger.kernel.org>
Description:
- (RO) indicates whether or not the kernel directly supports
- modifying the crash elfcorehdr for memory hot un/plug and/or
- on/offline changes.
+ (RO) indicates whether or not the kernel updates relevant kexec
+ segments on memory hot un/plug and/or on/offline events, avoiding the
+ need to reload kdump kernel.
diff --git a/Documentation/ABI/testing/sysfs-devices-system-cpu b/Documentation/ABI/testing/sysfs-devices-system-cpu
index de725ca3be82..206079d3bd5b 100644
--- a/Documentation/ABI/testing/sysfs-devices-system-cpu
+++ b/Documentation/ABI/testing/sysfs-devices-system-cpu
@@ -704,9 +704,9 @@ What: /sys/devices/system/cpu/crash_hotplug
Date: Aug 2023
Contact: Linux kernel mailing list <linux-kernel@vger.kernel.org>
Description:
- (RO) indicates whether or not the kernel directly supports
- modifying the crash elfcorehdr for CPU hot un/plug and/or
- on/offline changes.
+ (RO) indicates whether or not the kernel updates relevant kexec
+ segments on memory hot un/plug and/or on/offline events, avoiding the
+ need to reload kdump kernel.
What: /sys/devices/system/cpu/enabled
Date: Nov 2022
diff --git a/Documentation/ABI/testing/sysfs-driver-intel-i915-hwmon b/Documentation/ABI/testing/sysfs-driver-intel-i915-hwmon
index 92fe7c5c5ac1..be4141a7522f 100644
--- a/Documentation/ABI/testing/sysfs-driver-intel-i915-hwmon
+++ b/Documentation/ABI/testing/sysfs-driver-intel-i915-hwmon
@@ -75,3 +75,11 @@ Description: RO. Energy input of device or gt in microjoules.
for the gt.
Only supported for particular Intel i915 graphics platforms.
+
+What: /sys/bus/pci/drivers/i915/.../hwmon/hwmon<i>/fan1_input
+Date: November 2024
+KernelVersion: 6.12
+Contact: intel-gfx@lists.freedesktop.org
+Description: RO. Fan speed of device in RPM.
+
+ Only supported for particular Intel i915 graphics platforms.
diff --git a/Documentation/ABI/testing/sysfs-driver-ufs b/Documentation/ABI/testing/sysfs-driver-ufs
index fe943ce76c60..5fa6655aee84 100644
--- a/Documentation/ABI/testing/sysfs-driver-ufs
+++ b/Documentation/ABI/testing/sysfs-driver-ufs
@@ -1532,3 +1532,30 @@ Contact: Bean Huo <beanhuo@micron.com>
Description:
rtc_update_ms indicates how often the host should synchronize or update the
UFS RTC. If set to 0, this will disable UFS RTC periodic update.
+
+What: /sys/devices/platform/.../ufshci_capabilities/version
+Date: August 2024
+Contact: Avri Altman <avri.altman@wdc.com>
+Description:
+ Host Capabilities register group: UFS version register.
+ Symbol - VER. This file shows the UFSHCD version.
+ Example: Version 3.12 would be represented as 0000_0312h.
+ The file is read only.
+
+What: /sys/devices/platform/.../ufshci_capabilities/product_id
+Date: August 2024
+Contact: Avri Altman <avri.altman@wdc.com>
+Description:
+ Host Capabilities register group: product ID register.
+ Symbol - HCPID. This file shows the UFSHCD product id.
+ The content of this register is vendor specific.
+ The file is read only.
+
+What: /sys/devices/platform/.../ufshci_capabilities/man_id
+Date: August 2024
+Contact: Avri Altman <avri.altman@wdc.com>
+Description:
+ Host Capabilities register group: manufacturer ID register.
+ Symbol - HCMID. This file shows the UFSHCD manufacturer id.
+ The Manufacturer ID is defined by JEDEC in JEDEC-JEP106.
+ The file is read only.
diff --git a/Documentation/ABI/testing/sysfs-fs-f2fs b/Documentation/ABI/testing/sysfs-fs-f2fs
index cad6c3dc1f9c..fdedf1ea944b 100644
--- a/Documentation/ABI/testing/sysfs-fs-f2fs
+++ b/Documentation/ABI/testing/sysfs-fs-f2fs
@@ -579,6 +579,12 @@ Description: When ATGC is on, it controls age threshold to bypass GCing young
candidates whose age is not beyond the threshold, by default it was
initialized as 604800 seconds (equals to 7 days).
+What: /sys/fs/f2fs/<disk>/atgc_enabled
+Date: Feb 2024
+Contact: "Jinbao Liu" <liujinbao1@xiaomi.com>
+Description: It represents whether ATGC is on or off. The value is 1 which
+ indicates that ATGC is on, and 0 indicates that it is off.
+
What: /sys/fs/f2fs/<disk>/gc_reclaimed_segments
Date: July 2021
Contact: "Daeho Jeong" <daehojeong@google.com>
@@ -763,3 +769,53 @@ Date: November 2023
Contact: "Chao Yu" <chao@kernel.org>
Description: It controls to enable/disable IO aware feature for background discard.
By default, the value is 1 which indicates IO aware is on.
+
+What: /sys/fs/f2fs/<disk>/blkzone_alloc_policy
+Date: July 2024
+Contact: "Yuanhong Liao" <liaoyuanhong@vivo.com>
+Description: The zone UFS we are currently using consists of two parts:
+ conventional zones and sequential zones. It can be used to control which part
+ to prioritize for writes, with a default value of 0.
+
+ ======================== =========================================
+ value description
+ blkzone_alloc_policy = 0 Prioritize writing to sequential zones
+ blkzone_alloc_policy = 1 Only allow writing to sequential zones
+ blkzone_alloc_policy = 2 Prioritize writing to conventional zones
+ ======================== =========================================
+
+What: /sys/fs/f2fs/<disk>/migration_window_granularity
+Date: September 2024
+Contact: "Daeho Jeong" <daehojeong@google.com>
+Description: Controls migration window granularity of garbage collection on large
+ section. it can control the scanning window granularity for GC migration
+ in a unit of segment, while migration_granularity controls the number
+ of segments which can be migrated at the same turn.
+
+What: /sys/fs/f2fs/<disk>/reserved_segments
+Date: September 2024
+Contact: "Daeho Jeong" <daehojeong@google.com>
+Description: In order to fine tune GC behavior, we can control the number of
+ reserved segments.
+
+What: /sys/fs/f2fs/<disk>/gc_no_zoned_gc_percent
+Date: September 2024
+Contact: "Daeho Jeong" <daehojeong@google.com>
+Description: If the percentage of free sections over total sections is above this
+ number, F2FS do not garbage collection for zoned devices through the
+ background GC thread. the default number is "60".
+
+What: /sys/fs/f2fs/<disk>/gc_boost_zoned_gc_percent
+Date: September 2024
+Contact: "Daeho Jeong" <daehojeong@google.com>
+Description: If the percentage of free sections over total sections is under this
+ number, F2FS boosts garbage collection for zoned devices through the
+ background GC thread. the default number is "25".
+
+What: /sys/fs/f2fs/<disk>/gc_valid_thresh_ratio
+Date: September 2024
+Contact: "Daeho Jeong" <daehojeong@google.com>
+Description: It controls the valid block ratio threshold not to trigger excessive GC
+ for zoned deivces. The initial value of it is 95(%). F2FS will stop the
+ background GC thread from intiating GC for sections having valid blocks
+ exceeding the ratio.
diff --git a/Documentation/accel/qaic/qaic.rst b/Documentation/accel/qaic/qaic.rst
index 628bf2f7a416..018d6cc173d7 100644
--- a/Documentation/accel/qaic/qaic.rst
+++ b/Documentation/accel/qaic/qaic.rst
@@ -147,12 +147,6 @@ DRM_IOCTL_QAIC_PERF_STATS_BO
recent execution of a BO. This allows userspace to construct an end to end
timeline of the BO processing for a performance analysis.
-DRM_IOCTL_QAIC_PART_DEV
- This IOCTL allows userspace to request a duplicate "shadow device". This extra
- accelN device is associated with a specific partition of resources on the
- AIC100 device and can be used for limiting a process to some subset of
- resources.
-
DRM_IOCTL_QAIC_DETACH_SLICE_BO
This IOCTL allows userspace to remove the slicing information from a BO that
was originally provided by a call to DRM_IOCTL_QAIC_ATTACH_SLICE_BO. This
diff --git a/Documentation/admin-guide/blockdev/zram.rst b/Documentation/admin-guide/blockdev/zram.rst
index 091e8bb38887..678d70d6e1c3 100644
--- a/Documentation/admin-guide/blockdev/zram.rst
+++ b/Documentation/admin-guide/blockdev/zram.rst
@@ -102,17 +102,41 @@ Examples::
#select lzo compression algorithm
echo lzo > /sys/block/zram0/comp_algorithm
-For the time being, the `comp_algorithm` content does not necessarily
-show every compression algorithm supported by the kernel. We keep this
-list primarily to simplify device configuration and one can configure
-a new device with a compression algorithm that is not listed in
-`comp_algorithm`. The thing is that, internally, ZRAM uses Crypto API
-and, if some of the algorithms were built as modules, it's impossible
-to list all of them using, for instance, /proc/crypto or any other
-method. This, however, has an advantage of permitting the usage of
-custom crypto compression modules (implementing S/W or H/W compression).
-
-4) Set Disksize
+For the time being, the `comp_algorithm` content shows only compression
+algorithms that are supported by zram.
+
+4) Set compression algorithm parameters: Optional
+=================================================
+
+Compression algorithms may support specific parameters which can be
+tweaked for particular dataset. ZRAM has an `algorithm_params` device
+attribute which provides a per-algorithm params configuration.
+
+For example, several compression algorithms support `level` parameter.
+In addition, certain compression algorithms support pre-trained dictionaries,
+which significantly change algorithms' characteristics. In order to configure
+compression algorithm to use external pre-trained dictionary, pass full
+path to the `dict` along with other parameters::
+
+ #pass path to pre-trained zstd dictionary
+ echo "algo=zstd dict=/etc/dictioary" > /sys/block/zram0/algorithm_params
+
+ #same, but using algorithm priority
+ echo "priority=1 dict=/etc/dictioary" > \
+ /sys/block/zram0/algorithm_params
+
+ #pass path to pre-trained zstd dictionary and compression level
+ echo "algo=zstd level=8 dict=/etc/dictioary" > \
+ /sys/block/zram0/algorithm_params
+
+Parameters are algorithm specific: not all algorithms support pre-trained
+dictionaries, not all algorithms support `level`. Furthermore, for certain
+algorithms `level` controls the compression level (the higher the value the
+better the compression ratio, it even can take negatives values for some
+algorithms), for other algorithms `level` is acceleration level (the higher
+the value the lower the compression ratio).
+
+5) Set Disksize
===============
Set disk size by writing the value to sysfs node 'disksize'.
@@ -132,7 +156,7 @@ There is little point creating a zram of greater than twice the size of memory
since we expect a 2:1 compression ratio. Note that zram uses about 0.1% of the
size of the disk when not in use so a huge zram is wasteful.
-5) Set memory limit: Optional
+6) Set memory limit: Optional
=============================
Set memory limit by writing the value to sysfs node 'mem_limit'.
@@ -151,7 +175,7 @@ Examples::
# To disable memory limit
echo 0 > /sys/block/zram0/mem_limit
-6) Activate
+7) Activate
===========
::
@@ -162,7 +186,7 @@ Examples::
mkfs.ext4 /dev/zram1
mount /dev/zram1 /tmp
-7) Add/remove zram devices
+8) Add/remove zram devices
==========================
zram provides a control interface, which enables dynamic (on-demand) device
@@ -182,7 +206,7 @@ execute::
echo X > /sys/class/zram-control/hot_remove
-8) Stats
+9) Stats
========
Per-device statistics are exported as various nodes under /sys/block/zram<id>/
@@ -205,6 +229,7 @@ writeback_limit_enable RW show and set writeback_limit feature
max_comp_streams RW the number of possible concurrent compress
operations
comp_algorithm RW show and change the compression algorithm
+algorithm_params WO setup compression algorithm parameters
compact WO trigger memory compaction
debug_stat RO this file is used for zram debugging purposes
backing_dev RW set up backend storage for zram to write out
@@ -283,15 +308,15 @@ a single line of text and contains the following stats separated by whitespace:
Unit: 4K bytes
============== =============================================================
-9) Deactivate
-=============
+10) Deactivate
+==============
::
swapoff /dev/zram0
umount /dev/zram1
-10) Reset
+11) Reset
=========
Write any positive value to 'reset' sysfs node::
@@ -487,11 +512,14 @@ registered compression algorithms, increases our chances of finding the
algorithm that successfully compresses a particular page. Sometimes, however,
it is convenient (and sometimes even necessary) to limit recompression to
only one particular algorithm so that it will not try any other algorithms.
-This can be achieved by providing a algo=NAME parameter:::
+This can be achieved by providing a `algo` or `priority` parameter:::
#use zstd algorithm only (if registered)
echo "type=huge algo=zstd" > /sys/block/zramX/recompress
+ #use zstd algorithm only (if zstd was registered under priority 1)
+ echo "type=huge priority=1" > /sys/block/zramX/recompress
+
memory tracking
===============
diff --git a/Documentation/admin-guide/cgroup-v1/memory.rst b/Documentation/admin-guide/cgroup-v1/memory.rst
index 9cde26d33843..270501db9f4e 100644
--- a/Documentation/admin-guide/cgroup-v1/memory.rst
+++ b/Documentation/admin-guide/cgroup-v1/memory.rst
@@ -78,18 +78,24 @@ Brief summary of control files.
memory.memsw.max_usage_in_bytes show max memory+Swap usage recorded
memory.soft_limit_in_bytes set/show soft limit of memory usage
This knob is not available on CONFIG_PREEMPT_RT systems.
+ This knob is deprecated and shouldn't be
+ used.
memory.stat show various statistics
memory.use_hierarchy set/show hierarchical account enabled
This knob is deprecated and shouldn't be
used.
memory.force_empty trigger forced page reclaim
memory.pressure_level set memory pressure notifications
+ This knob is deprecated and shouldn't be
+ used.
memory.swappiness set/show swappiness parameter of vmscan
(See sysctl's vm.swappiness)
memory.move_charge_at_immigrate set/show controls of moving charges
This knob is deprecated and shouldn't be
used.
memory.oom_control set/show oom controls.
+ This knob is deprecated and shouldn't be
+ used.
memory.numa_stat show the number of memory usage per numa
node
memory.kmem.limit_in_bytes Deprecated knob to set and read the kernel
@@ -105,10 +111,18 @@ Brief summary of control files.
memory.kmem.max_usage_in_bytes show max kernel memory usage recorded
memory.kmem.tcp.limit_in_bytes set/show hard limit for tcp buf memory
+ This knob is deprecated and shouldn't be
+ used.
memory.kmem.tcp.usage_in_bytes show current tcp buf memory allocation
+ This knob is deprecated and shouldn't be
+ used.
memory.kmem.tcp.failcnt show the number of tcp buf memory usage
hits limits
+ This knob is deprecated and shouldn't be
+ used.
memory.kmem.tcp.max_usage_in_bytes show max tcp buf memory usage recorded
+ This knob is deprecated and shouldn't be
+ used.
==================================== ==========================================
1. History
@@ -693,8 +707,10 @@ For compatibility reasons writing 1 to memory.use_hierarchy will always pass::
# echo 1 > memory.use_hierarchy
-7. Soft limits
-==============
+7. Soft limits (DEPRECATED)
+===========================
+
+THIS IS DEPRECATED!
Soft limits allow for greater sharing of memory. The idea behind soft limits
is to allow control groups to use as much of the memory as needed, provided
@@ -834,8 +850,10 @@ It's applicable for root and non-root cgroup.
.. _cgroup-v1-memory-oom-control:
-10. OOM Control
-===============
+10. OOM Control (DEPRECATED)
+============================
+
+THIS IS DEPRECATED!
memory.oom_control file is for OOM notification and other controls.
@@ -882,8 +900,10 @@ At reading, current status of OOM is shown.
The number of processes belonging to this cgroup killed by any
kind of OOM killer.
-11. Memory Pressure
-===================
+11. Memory Pressure (DEPRECATED)
+================================
+
+THIS IS DEPRECATED!
The pressure level notifications can be used to monitor the memory
allocation cost; based on the pressure, applications can implement
diff --git a/Documentation/admin-guide/cgroup-v2.rst b/Documentation/admin-guide/cgroup-v2.rst
index c65756afd372..69af2173555f 100644
--- a/Documentation/admin-guide/cgroup-v2.rst
+++ b/Documentation/admin-guide/cgroup-v2.rst
@@ -1343,11 +1343,14 @@ The following nested keys are defined.
all the existing limitations and potential future extensions.
memory.peak
- A read-only single value file which exists on non-root
- cgroups.
+ A read-write single value file which exists on non-root cgroups.
+
+ The max memory usage recorded for the cgroup and its descendants since
+ either the creation of the cgroup or the most recent reset for that FD.
- The max memory usage recorded for the cgroup and its
- descendants since the creation of the cgroup.
+ A write of any non-empty string to this file resets it to the
+ current memory usage for subsequent reads through the same
+ file descriptor.
memory.oom.group
A read-write single value file which exists on non-root
@@ -1624,6 +1627,25 @@ The following nested keys are defined.
Usually because failed to allocate some continuous swap space
for the huge page.
+ numa_pages_migrated (npn)
+ Number of pages migrated by NUMA balancing.
+
+ numa_pte_updates (npn)
+ Number of pages whose page table entries are modified by
+ NUMA balancing to produce NUMA hinting faults on access.
+
+ numa_hint_faults (npn)
+ Number of NUMA hinting faults.
+
+ pgdemote_kswapd
+ Number of pages demoted by kswapd.
+
+ pgdemote_direct
+ Number of pages demoted directly.
+
+ pgdemote_khugepaged
+ Number of pages demoted by khugepaged.
+
memory.numa_stat
A read-only nested-keyed file which exists on non-root cgroups.
@@ -1673,11 +1695,14 @@ The following nested keys are defined.
Healthy workloads are not expected to reach this limit.
memory.swap.peak
- A read-only single value file which exists on non-root
- cgroups.
+ A read-write single value file which exists on non-root cgroups.
+
+ The max swap usage recorded for the cgroup and its descendants since
+ the creation of the cgroup or the most recent reset for that FD.
- The max swap usage recorded for the cgroup and its
- descendants since the creation of the cgroup.
+ A write of any non-empty string to this file resets it to the
+ current memory usage for subsequent reads through the same
+ file descriptor.
memory.swap.max
A read-write single value file which exists on non-root
@@ -1741,6 +1766,8 @@ The following nested keys are defined.
Note that this is subtly different from setting memory.swap.max to
0, as it still allows for pages to be written to the zswap pool.
+ This setting has no effect if zswap is disabled, and swapping
+ is allowed unless memory.swap.max is set to 0.
memory.pressure
A read-only nested-keyed file.
diff --git a/Documentation/admin-guide/device-mapper/delay.rst b/Documentation/admin-guide/device-mapper/delay.rst
index 917ba8c33359..4d667228e744 100644
--- a/Documentation/admin-guide/device-mapper/delay.rst
+++ b/Documentation/admin-guide/device-mapper/delay.rst
@@ -3,29 +3,52 @@ dm-delay
========
Device-Mapper's "delay" target delays reads and/or writes
-and maps them to different devices.
+and/or flushs and optionally maps them to different devices.
-Parameters::
+Arguments::
<device> <offset> <delay> [<write_device> <write_offset> <write_delay>
[<flush_device> <flush_offset> <flush_delay>]]
-With separate write parameters, the first set is only used for reads.
+Table line has to either have 3, 6 or 9 arguments:
+
+3: apply offset and delay to read, write and flush operations on device
+
+6: apply offset and delay to device, also apply write_offset and write_delay
+ to write and flush operations on optionally different write_device with
+ optionally different sector offset
+
+9: same as 6 arguments plus define flush_offset and flush_delay explicitely
+ on/with optionally different flush_device/flush_offset.
+
Offsets are specified in sectors.
+
Delays are specified in milliseconds.
+
Example scripts
===============
::
-
#!/bin/sh
- # Create device delaying rw operation for 500ms
- echo "0 `blockdev --getsz $1` delay $1 0 500" | dmsetup create delayed
+ #
+ # Create mapped device named "delayed" delaying read, write and flush operations for 500ms.
+ #
+ dmsetup create delayed --table "0 `blockdev --getsz $1` delay $1 0 500"
::
+ #!/bin/sh
+ #
+ # Create mapped device delaying write and flush operations for 400ms and
+ # splitting reads to device $1 but writes and flushs to different device $2
+ # to different offsets of 2048 and 4096 sectors respectively.
+ #
+ dmsetup create delayed --table "0 `blockdev --getsz $1` delay $1 2048 0 $2 4096 400"
+::
#!/bin/sh
- # Create device delaying only write operation for 500ms and
- # splitting reads and writes to different devices $1 $2
- echo "0 `blockdev --getsz $1` delay $1 0 0 $2 0 500" | dmsetup create delayed
+ #
+ # Create mapped device delaying reads for 50ms, writes for 100ms and flushs for 333ms
+ # onto the same backing device at offset 0 sectors.
+ #
+ dmsetup create delayed --table "0 `blockdev --getsz $1` delay $1 0 50 $2 0 100 $1 0 333"
diff --git a/Documentation/admin-guide/device-mapper/dm-crypt.rst b/Documentation/admin-guide/device-mapper/dm-crypt.rst
index 48a48bd09372..9f8139ff97d6 100644
--- a/Documentation/admin-guide/device-mapper/dm-crypt.rst
+++ b/Documentation/admin-guide/device-mapper/dm-crypt.rst
@@ -160,6 +160,10 @@ iv_large_sectors
The <iv_offset> must be multiple of <sector_size> (in 512 bytes units)
if this flag is specified.
+integrity_key_size:<bytes>
+ Use an integrity key of <bytes> size instead of using an integrity key size
+ of the digest size of the used HMAC algorithm.
+
Module parameters::
max_read_size
diff --git a/Documentation/admin-guide/device-mapper/vdo.rst b/Documentation/admin-guide/device-mapper/vdo.rst
index c69ac186863a..a14e6d3e787c 100644
--- a/Documentation/admin-guide/device-mapper/vdo.rst
+++ b/Documentation/admin-guide/device-mapper/vdo.rst
@@ -251,7 +251,12 @@ The messages are:
by the vdostats userspace program to interpret the output
buffer.
- dump:
+ config:
+ Outputs useful vdo configuration information. Mostly used
+ by users who want to recreate a similar VDO volume and
+ want to know the creation configuration used.
+
+ dump:
Dumps many internal structures to the system log. This is
not always safe to run, so it should only be used to debug
a hung vdo. Optional parameters to specify structures to
diff --git a/Documentation/admin-guide/ext4.rst b/Documentation/admin-guide/ext4.rst
index 5740d85439ff..2418b0c2d3df 100644
--- a/Documentation/admin-guide/ext4.rst
+++ b/Documentation/admin-guide/ext4.rst
@@ -212,16 +212,6 @@ When mounting an ext4 filesystem, the following option are accepted:
that ext4's inode table readahead algorithm will pre-read into the
buffer cache. The default value is 32 blocks.
- nouser_xattr
- Disables Extended User Attributes. See the attr(5) manual page for
- more information about extended attributes.
-
- noacl
- This option disables POSIX Access Control List support. If ACL support
- is enabled in the kernel configuration (CONFIG_EXT4_FS_POSIX_ACL), ACL
- is enabled by default on mount. See the acl(5) manual page for more
- information about acl.
-
bsddf (*)
Make 'df' act like BSD.
diff --git a/Documentation/admin-guide/kernel-parameters.txt b/Documentation/admin-guide/kernel-parameters.txt
index 8337d0fed311..1518343bbe22 100644
--- a/Documentation/admin-guide/kernel-parameters.txt
+++ b/Documentation/admin-guide/kernel-parameters.txt
@@ -2677,6 +2677,23 @@
Default is Y (on).
+ kvm.enable_virt_at_load=[KVM,ARM64,LOONGARCH,MIPS,RISCV,X86]
+ If enabled, KVM will enable virtualization in hardware
+ when KVM is loaded, and disable virtualization when KVM
+ is unloaded (if KVM is built as a module).
+
+ If disabled, KVM will dynamically enable and disable
+ virtualization on-demand when creating and destroying
+ VMs, i.e. on the 0=>1 and 1=>0 transitions of the
+ number of VMs.
+
+ Enabling virtualization at module lode avoids potential
+ latency for creation of the 0=>1 VM, as KVM serializes
+ virtualization enabling across all online CPUs. The
+ "cost" of enabling virtualization when KVM is loaded,
+ is that doing so may interfere with using out-of-tree
+ hypervisors that want to "own" virtualization hardware.
+
kvm.enable_vmware_backdoor=[KVM] Support VMware backdoor PV interface.
Default is false (don't support).
@@ -4152,6 +4169,21 @@
Disable NUMA, Only set up a single NUMA node
spanning all memory.
+ numa=fake=<size>[MG]
+ [KNL, ARM64, RISCV, X86, EARLY]
+ If given as a memory unit, fills all system RAM with
+ nodes of size interleaved over physical nodes.
+
+ numa=fake=<N>
+ [KNL, ARM64, RISCV, X86, EARLY]
+ If given as an integer, fills all system RAM with N
+ fake nodes interleaved over physical nodes.
+
+ numa=fake=<N>U
+ [KNL, ARM64, RISCV, X86, EARLY]
+ If given as an integer followed by 'U', it will
+ divide each physical node into N emulated nodes.
+
numa_balancing= [KNL,ARM64,PPC,RISCV,S390,X86] Enable or disable automatic
NUMA balancing.
Allowed values are enable and disable
@@ -6655,6 +6687,15 @@
<deci-seconds>: poll all this frequency
0: no polling (default)
+ thp_anon= [KNL]
+ Format: <size>,<size>[KMG]:<state>;<size>-<size>[KMG]:<state>
+ state is one of "always", "madvise", "never" or "inherit".
+ Control the default behavior of the system with respect
+ to anonymous transparent hugepages.
+ Can be used multiple times for multiple anon THP sizes.
+ See Documentation/admin-guide/mm/transhuge.rst for more
+ details.
+
threadirqs [KNL,EARLY]
Force threading of all interrupt handlers except those
marked explicitly IRQF_NO_THREAD.
@@ -6784,6 +6825,51 @@
the same thing would happen if it was left off). The irq_handler_entry
event, and all events under the "initcall" system.
+ Flags can be added to the instance to modify its behavior when it is
+ created. The flags are separated by '^'.
+
+ The available flags are:
+
+ traceoff - Have the tracing instance tracing disabled after it is created.
+ traceprintk - Have trace_printk() write into this trace instance
+ (note, "printk" and "trace_printk" can also be used)
+
+ trace_instance=foo^traceoff^traceprintk,sched,irq
+
+ The flags must come before the defined events.
+
+ If memory has been reserved (see memmap for x86), the instance
+ can use that memory:
+
+ memmap=12M$0x284500000 trace_instance=boot_map@0x284500000:12M
+
+ The above will create a "boot_map" instance that uses the physical
+ memory at 0x284500000 that is 12Megs. The per CPU buffers of that
+ instance will be split up accordingly.
+
+ Alternatively, the memory can be reserved by the reserve_mem option:
+
+ reserve_mem=12M:4096:trace trace_instance=boot_map@trace
+
+ This will reserve 12 megabytes at boot up with a 4096 byte alignment
+ and place the ring buffer in this memory. Note that due to KASLR, the
+ memory may not be the same location each time, which will not preserve
+ the buffer content.
+
+ Also note that the layout of the ring buffer data may change between
+ kernel versions where the validator will fail and reset the ring buffer
+ if the layout is not the same as the previous kernel.
+
+ If the ring buffer is used for persistent bootups and has events enabled,
+ it is recommend to disable tracing so that events from a previous boot do not
+ mix with events of the current boot (unless you are debugging a random crash
+ at boot up).
+
+ reserve_mem=12M:4096:trace trace_instance=boot_map^traceoff^traceprintk@trace,sched,irq
+
+ See also Documentation/trace/debugging.rst
+
+
trace_options=[option-list]
[FTRACE] Enable or disable tracer options at boot.
The option-list is a comma delimited list of options
diff --git a/Documentation/admin-guide/media/cec.rst b/Documentation/admin-guide/media/cec.rst
index 6b30e355cf23..92690e1f2183 100644
--- a/Documentation/admin-guide/media/cec.rst
+++ b/Documentation/admin-guide/media/cec.rst
@@ -42,10 +42,14 @@ dongles):
``persistent_config``: by default this is off, but when set to 1 the driver
will store the current settings to the device's internal eeprom and restore
it the next time the device is connected to the USB port.
+
- RainShadow Tech. Note: this driver does not support the persistent_config
module option of the Pulse-Eight driver. The hardware supports it, but I
have no plans to add this feature. But I accept patches :-)
+- Extron DA HD 4K PLUS HDMI Distribution Amplifier. See
+ :ref:`extron_da_hd_4k_plus` for more information.
+
Miscellaneous:
- vivid: emulates a CEC receiver and CEC transmitter.
@@ -378,3 +382,86 @@ it later using ``--analyze-pin``.
You can also use this as a full-fledged CEC device by configuring it
using ``cec-ctl --tv -p0.0.0.0`` or ``cec-ctl --playback -p1.0.0.0``.
+
+.. _extron_da_hd_4k_plus:
+
+Extron DA HD 4K PLUS CEC Adapter driver
+=======================================
+
+This driver is for the Extron DA HD 4K PLUS series of HDMI Distribution
+Amplifiers: https://www.extron.com/product/dahd4kplusseries
+
+The 2, 4 and 6 port models are supported.
+
+Firmware version 1.02.0001 or higher is required.
+
+Note that older Extron hardware revisions have a problem with the CEC voltage,
+which may mean that CEC will not work. This is fixed in hardware revisions
+E34814 and up.
+
+The CEC support has two modes: the first is a manual mode where userspace has
+to manually control CEC for the HDMI Input and all HDMI Outputs. While this gives
+full control, it is also complicated.
+
+The second mode is an automatic mode, which is selected if the module option
+``vendor_id`` is set. In that case the driver controls CEC and CEC messages
+received in the input will be distributed to the outputs. It is still possible
+to use the /dev/cecX devices to talk to the connected devices directly, but it is
+the driver that configures everything and deals with things like Hotplug Detect
+changes.
+
+The driver also takes care of the EDIDs: /dev/videoX devices are created to
+read the EDIDs and (for the HDMI Input port) to set the EDID.
+
+By default userspace is responsible to set the EDID for the HDMI Input
+according to the EDIDs of the connected displays. But if the ``manufacturer_name``
+module option is set, then the driver will take care of setting the EDID
+of the HDMI Input based on the supported resolutions of the connected displays.
+Currently the driver only supports resolutions 1080p60 and 4kp60: if all connected
+displays support 4kp60, then it will advertise 4kp60 on the HDMI input, otherwise
+it will fall back to an EDID that just reports 1080p60.
+
+The status of the Extron is reported in ``/sys/kernel/debug/cec/cecX/status``.
+
+The extron-da-hd-4k-plus driver implements the following module options:
+
+``debug``
+---------
+
+If set to 1, then all serial port traffic is shown.
+
+``vendor_id``
+-------------
+
+The CEC Vendor ID to report to connected displays.
+
+If set, then the driver will take care of distributing CEC messages received
+on the input to the HDMI outputs. This is done for the following CEC messages:
+
+- <Standby>
+- <Image View On> and <Text View On>
+- <Give Device Power Status>
+- <Set System Audio Mode>
+- <Request Current Latency>
+
+If not set, then userspace is responsible for this, and it will have to
+configure the CEC devices for HDMI Input and the HDMI Outputs manually.
+
+``manufacturer_name``
+---------------------
+
+A three character manufacturer name that is used in the EDID for the HDMI
+Input. If not set, then userspace is reponsible for configuring an EDID.
+If set, then the driver will update the EDID automatically based on the
+resolutions supported by the connected displays, and it will not be possible
+anymore to manually set the EDID for the HDMI Input.
+
+``hpd_never_low``
+-----------------
+
+If set, then the Hotplug Detect pin of the HDMI Input will always be high,
+even if nothing is connected to the HDMI Outputs. If not set (the default)
+then the Hotplug Detect pin of the HDMI input will go low if all the detected
+Hotplug Detect pins of the HDMI Outputs are also low.
+
+This option may be changed dynamically.
diff --git a/Documentation/admin-guide/media/mgb4.rst b/Documentation/admin-guide/media/mgb4.rst
index e434d4a9eeb3..b9da127c074d 100644
--- a/Documentation/admin-guide/media/mgb4.rst
+++ b/Documentation/admin-guide/media/mgb4.rst
@@ -227,8 +227,13 @@ Common FPDL3/GMSL output parameters
open.*
**frame_rate** (RW):
- Output video frame rate in frames per second. The default frame rate is
- 60Hz.
+ Output video signal frame rate limit in frames per second. Due to
+ the limited output pixel clock steps, the card can not always generate
+ a frame rate perfectly matching the value required by the connected display.
+ Using this parameter one can limit the frame rate by "crippling" the signal
+ so that the lines are not equal (the porches of the last line differ) but
+ the signal appears like having the exact frame rate to the connected display.
+ The default frame rate limit is 60Hz.
**hsync_polarity** (RW):
HSYNC signal polarity.
@@ -253,33 +258,33 @@ Common FPDL3/GMSL output parameters
and there is a non-linear stepping between two consecutive allowed
frequencies. The driver finds the nearest allowed frequency to the given
value and sets it. When reading this property, you get the exact
- frequency set by the driver. The default frequency is 70000kHz.
+ frequency set by the driver. The default frequency is 61150kHz.
*Note: This parameter can not be changed while the output v4l2 device is
open.*
**hsync_width** (RW):
- Width of the HSYNC signal in pixels. The default value is 16.
+ Width of the HSYNC signal in pixels. The default value is 40.
**vsync_width** (RW):
- Width of the VSYNC signal in video lines. The default value is 2.
+ Width of the VSYNC signal in video lines. The default value is 20.
**hback_porch** (RW):
Number of PCLK pulses between deassertion of the HSYNC signal and the first
- valid pixel in the video line (marked by DE=1). The default value is 32.
+ valid pixel in the video line (marked by DE=1). The default value is 50.
**hfront_porch** (RW):
Number of PCLK pulses between the end of the last valid pixel in the video
line (marked by DE=1) and assertion of the HSYNC signal. The default value
- is 32.
+ is 50.
**vback_porch** (RW):
Number of video lines between deassertion of the VSYNC signal and the video
- line with the first valid pixel (marked by DE=1). The default value is 2.
+ line with the first valid pixel (marked by DE=1). The default value is 31.
**vfront_porch** (RW):
Number of video lines between the end of the last valid pixel line (marked
- by DE=1) and assertion of the VSYNC signal. The default value is 2.
+ by DE=1) and assertion of the VSYNC signal. The default value is 30.
FPDL3 specific input parameters
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
diff --git a/Documentation/admin-guide/media/rkisp1.rst b/Documentation/admin-guide/media/rkisp1.rst
index 6f14d9561fa5..6c878c71442f 100644
--- a/Documentation/admin-guide/media/rkisp1.rst
+++ b/Documentation/admin-guide/media/rkisp1.rst
@@ -114,11 +114,18 @@ to be applied to the hardware during a video stream, allowing userspace
to dynamically modify values such as black level, cross talk corrections
and others.
-The buffer format is defined by struct :c:type:`rkisp1_params_cfg`, and
-userspace should set
+The ISP driver supports two different parameters configuration methods, the
+`fixed parameters format` or the `extensible parameters format`.
+
+When using the `fixed parameters` method the buffer format is defined by struct
+:c:type:`rkisp1_params_cfg`, and userspace should set
:ref:`V4L2_META_FMT_RK_ISP1_PARAMS <v4l2-meta-fmt-rk-isp1-params>` as the
dataformat.
+When using the `extensible parameters` method the buffer format is defined by
+struct :c:type:`rkisp1_ext_params_cfg`, and userspace should set
+:ref:`V4L2_META_FMT_RK_ISP1_EXT_PARAMS <v4l2-meta-fmt-rk-isp1-ext-params>` as
+the dataformat.
Capturing Video Frames Example
==============================
diff --git a/Documentation/admin-guide/media/vivid.rst b/Documentation/admin-guide/media/vivid.rst
index c9d301ab46a3..034ca7c77fb9 100644
--- a/Documentation/admin-guide/media/vivid.rst
+++ b/Documentation/admin-guide/media/vivid.rst
@@ -1343,7 +1343,7 @@ Some Future Improvements
Just as a reminder and in no particular order:
- Add a virtual alsa driver to test audio
-- Add virtual sub-devices and media controller support
+- Add virtual sub-devices
- Some support for testing compressed video
- Add support to loop raw VBI output to raw VBI input
- Add support to loop teletext sliced VBI output to VBI input
@@ -1358,4 +1358,4 @@ Just as a reminder and in no particular order:
- Make a thread for the RDS generation, that would help in particular for the
"Controls" RDS Rx I/O Mode as the read-only RDS controls could be updated
in real-time.
-- Changing the EDID should cause hotplug detect emulation to happen.
+- Changing the EDID doesn't wait 100 ms before setting the HPD signal.
diff --git a/Documentation/admin-guide/mm/damon/start.rst b/Documentation/admin-guide/mm/damon/start.rst
index 054010a7f3d8..c4dddf6733cd 100644
--- a/Documentation/admin-guide/mm/damon/start.rst
+++ b/Documentation/admin-guide/mm/damon/start.rst
@@ -7,7 +7,7 @@ Getting Started
This document briefly describes how you can use DAMON by demonstrating its
default user space tool. Please note that this document describes only a part
of its features for brevity. Please refer to the usage `doc
-<https://github.com/awslabs/damo/blob/next/USAGE.md>`_ of the tool for more
+<https://github.com/damonitor/damo/blob/next/USAGE.md>`_ of the tool for more
details.
@@ -26,7 +26,7 @@ User Space Tool
For the demonstration, we will use the default user space tool for DAMON,
called DAMON Operator (DAMO). It is available at
-https://github.com/awslabs/damo. The examples below assume that ``damo`` is on
+https://github.com/damonitor/damo. The examples below assume that ``damo`` is on
your ``$PATH``. It's not mandatory, though.
Because DAMO is using the sysfs interface (refer to :doc:`usage` for the
diff --git a/Documentation/admin-guide/mm/damon/usage.rst b/Documentation/admin-guide/mm/damon/usage.rst
index 26df6cfa4441..d9be9f7caa7d 100644
--- a/Documentation/admin-guide/mm/damon/usage.rst
+++ b/Documentation/admin-guide/mm/damon/usage.rst
@@ -7,19 +7,19 @@ Detailed Usages
DAMON provides below interfaces for different users.
- *DAMON user space tool.*
- `This <https://github.com/awslabs/damo>`_ is for privileged people such as
+ `This <https://github.com/damonitor/damo>`_ is for privileged people such as
system administrators who want a just-working human-friendly interface.
Using this, users can use the DAMON’s major features in a human-friendly way.
It may not be highly tuned for special cases, though. For more detail,
please refer to its `usage document
- <https://github.com/awslabs/damo/blob/next/USAGE.md>`_.
+ <https://github.com/damonitor/damo/blob/next/USAGE.md>`_.
- *sysfs interface.*
:ref:`This <sysfs_interface>` is for privileged user space programmers who
want more optimized use of DAMON. Using this, users can use DAMON’s major
features by reading from and writing to special sysfs files. Therefore,
you can write and use your personalized DAMON sysfs wrapper programs that
reads/writes the sysfs files instead of you. The `DAMON user space tool
- <https://github.com/awslabs/damo>`_ is one example of such programs.
+ <https://github.com/damonitor/damo>`_ is one example of such programs.
- *Kernel Space Programming Interface.*
:doc:`This </mm/damon/api>` is for kernel space programmers. Using this,
users can utilize every feature of DAMON most flexibly and efficiently by
@@ -543,7 +543,7 @@ memory rate becomes larger than 60%, or lower than 30%". ::
# echo 300 > watermarks/low
Please note that it's highly recommended to use user space tools like `damo
-<https://github.com/awslabs/damo>`_ rather than manually reading and writing
+<https://github.com/damonitor/damo>`_ rather than manually reading and writing
the files as above. Above is only for an example.
.. _tracepoint:
diff --git a/Documentation/admin-guide/mm/memory-hotplug.rst b/Documentation/admin-guide/mm/memory-hotplug.rst
index 098f14d83e99..cb2c080f400c 100644
--- a/Documentation/admin-guide/mm/memory-hotplug.rst
+++ b/Documentation/admin-guide/mm/memory-hotplug.rst
@@ -294,8 +294,9 @@ The following files are currently defined:
``crash_hotplug`` read-only: when changes to the system memory map
occur due to hot un/plug of memory, this file contains
'1' if the kernel updates the kdump capture kernel memory
- map itself (via elfcorehdr), or '0' if userspace must update
- the kdump capture kernel memory map.
+ map itself (via elfcorehdr and other relevant kexec
+ segments), or '0' if userspace must update the kdump
+ capture kernel memory map.
Availability depends on the CONFIG_MEMORY_HOTPLUG kernel
configuration option.
diff --git a/Documentation/admin-guide/mm/transhuge.rst b/Documentation/admin-guide/mm/transhuge.rst
index 058485daf186..cfdd16a52e39 100644
--- a/Documentation/admin-guide/mm/transhuge.rst
+++ b/Documentation/admin-guide/mm/transhuge.rst
@@ -202,6 +202,16 @@ PMD-mappable transparent hugepage::
cat /sys/kernel/mm/transparent_hugepage/hpage_pmd_size
+All THPs at fault and collapse time will be added to _deferred_list,
+and will therefore be split under memory presure if they are considered
+"underused". A THP is underused if the number of zero-filled pages in
+the THP is above max_ptes_none (see below). It is possible to disable
+this behaviour by writing 0 to shrink_underused, and enable it by writing
+1 to it::
+
+ echo 0 > /sys/kernel/mm/transparent_hugepage/shrink_underused
+ echo 1 > /sys/kernel/mm/transparent_hugepage/shrink_underused
+
khugepaged will be automatically started when PMD-sized THP is enabled
(either of the per-size anon control or the top-level control are set
to "always" or "madvise"), and it'll be automatically shutdown when
@@ -284,13 +294,37 @@ that THP is shared. Exceeding the number would block the collapse::
A higher value may increase memory footprint for some workloads.
-Boot parameter
-==============
+Boot parameters
+===============
+
+You can change the sysfs boot time default for the top-level "enabled"
+control by passing the parameter ``transparent_hugepage=always`` or
+``transparent_hugepage=madvise`` or ``transparent_hugepage=never`` to the
+kernel command line.
+
+Alternatively, each supported anonymous THP size can be controlled by
+passing ``thp_anon=<size>,<size>[KMG]:<state>;<size>-<size>[KMG]:<state>``,
+where ``<size>`` is the THP size (must be a power of 2 of PAGE_SIZE and
+supported anonymous THP) and ``<state>`` is one of ``always``, ``madvise``,
+``never`` or ``inherit``.
+
+For example, the following will set 16K, 32K, 64K THP to ``always``,
+set 128K, 512K to ``inherit``, set 256K to ``madvise`` and 1M, 2M
+to ``never``::
-You can change the sysfs boot time defaults of Transparent Hugepage
-Support by passing the parameter ``transparent_hugepage=always`` or
-``transparent_hugepage=madvise`` or ``transparent_hugepage=never``
-to the kernel command line.
+ thp_anon=16K-64K:always;128K,512K:inherit;256K:madvise;1M-2M:never
+
+``thp_anon=`` may be specified multiple times to configure all THP sizes as
+required. If ``thp_anon=`` is specified at least once, any anon THP sizes
+not explicitly configured on the command line are implicitly set to
+``never``.
+
+``transparent_hugepage`` setting only affects the global toggle. If
+``thp_anon`` is not specified, PMD_ORDER THP will default to ``inherit``.
+However, if a valid ``thp_anon`` setting is provided by the user, the
+PMD_ORDER THP policy will be overridden. If the policy for PMD_ORDER
+is not defined within a valid ``thp_anon``, its policy will default to
+``never``.
Hugepages in tmpfs/shmem
========================
@@ -447,6 +481,12 @@ thp_deferred_split_page
splitting it would free up some memory. Pages on split queue are
going to be split under memory pressure.
+thp_underused_split_page
+ is incremented when a huge page on the split queue was split
+ because it was underused. A THP is underused if the number of
+ zero pages in the THP is above a certain threshold
+ (/sys/kernel/mm/transparent_hugepage/khugepaged/max_ptes_none).
+
thp_split_pmd
is incremented every time a PMD split into table of PTEs.
This can happen, for instance, when application calls mprotect() or
@@ -527,6 +567,18 @@ split_deferred
it would free up some memory. Pages on split queue are going to
be split under memory pressure, if splitting is possible.
+nr_anon
+ the number of anonymous THP we have in the whole system. These THPs
+ might be currently entirely mapped or have partially unmapped/unused
+ subpages.
+
+nr_anon_partially_mapped
+ the number of anonymous THP which are likely partially mapped, possibly
+ wasting memory, and have been queued for deferred memory reclamation.
+ Note that in corner some cases (e.g., failed migration), we might detect
+ an anonymous THP as "partially mapped" and count it here, even though it
+ is not actually partially mapped anymore.
+
As the system ages, allocating huge pages may be expensive as the
system uses memory compaction to copy data around memory to free a
huge page for use. There are some counters in ``/proc/vmstat`` to help
diff --git a/Documentation/arch/loongarch/irq-chip-model.rst b/Documentation/arch/loongarch/irq-chip-model.rst
index 7988f4192363..6dd48256e39f 100644
--- a/Documentation/arch/loongarch/irq-chip-model.rst
+++ b/Documentation/arch/loongarch/irq-chip-model.rst
@@ -85,6 +85,38 @@ to CPUINTC directly::
| Devices |
+---------+
+Advanced Extended IRQ model
+===========================
+
+In this model, IPI (Inter-Processor Interrupt) and CPU Local Timer interrupt go
+to CPUINTC directly, CPU UARTS interrupts go to LIOINTC, PCH-MSI interrupts go
+to AVECINTC, and then go to CPUINTC directly, while all other devices interrupts
+go to PCH-PIC/PCH-LPC and gathered by EIOINTC, and then go to CPUINTC directly::
+
+ +-----+ +-----------------------+ +-------+
+ | IPI | --> | CPUINTC | <-- | Timer |
+ +-----+ +-----------------------+ +-------+
+ ^ ^ ^
+ | | |
+ +---------+ +----------+ +---------+ +-------+
+ | EIOINTC | | AVECINTC | | LIOINTC | <-- | UARTs |
+ +---------+ +----------+ +---------+ +-------+
+ ^ ^
+ | |
+ +---------+ +---------+
+ | PCH-PIC | | PCH-MSI |
+ +---------+ +---------+
+ ^ ^ ^
+ | | |
+ +---------+ +---------+ +---------+
+ | Devices | | PCH-LPC | | Devices |
+ +---------+ +---------+ +---------+
+ ^
+ |
+ +---------+
+ | Devices |
+ +---------+
+
ACPI-related definitions
========================
diff --git a/Documentation/arch/s390/vfio-ap.rst b/Documentation/arch/s390/vfio-ap.rst
index ea744cbc8687..eba1991fbdba 100644
--- a/Documentation/arch/s390/vfio-ap.rst
+++ b/Documentation/arch/s390/vfio-ap.rst
@@ -999,6 +999,36 @@ the vfio_ap mediated device to which it is assigned as long as each new APQN
resulting from plugging it in references a queue device bound to the vfio_ap
device driver.
+Driver Features
+===============
+The vfio_ap driver exposes a sysfs file containing supported features.
+This exists so third party tools (like Libvirt and mdevctl) can query the
+availability of specific features.
+
+The features list can be found here: /sys/bus/matrix/devices/matrix/features
+
+Entries are space delimited. Each entry consists of a combination of
+alphanumeric and underscore characters.
+
+Example:
+cat /sys/bus/matrix/devices/matrix/features
+guest_matrix dyn ap_config
+
+the following features are advertised:
+
+---------------+---------------------------------------------------------------+
+| Flag | Description |
++==============+===============================================================+
+| guest_matrix | guest_matrix attribute exists. It reports the matrix of |
+| | adapters and domains that are or will be passed through to a |
+| | guest when the mdev is attached to it. |
++--------------+---------------------------------------------------------------+
+| dyn | Indicates hot plug/unplug of AP adapters, domains and control |
+| | domains for a guest to which the mdev is attached. |
++------------+-----------------------------------------------------------------+
+| ap_config | ap_config interface for one-shot modifications to mdev config |
++--------------+---------------------------------------------------------------+
+
Limitations
===========
Live guest migration is not supported for guests using AP devices without
diff --git a/Documentation/arch/x86/x86_64/boot-options.rst b/Documentation/arch/x86/x86_64/boot-options.rst
index 137432d34109..98d4805f0823 100644
--- a/Documentation/arch/x86/x86_64/boot-options.rst
+++ b/Documentation/arch/x86/x86_64/boot-options.rst
@@ -170,18 +170,6 @@ NUMA
Don't parse the HMAT table for NUMA setup, or soft-reserved memory
partitioning.
- numa=fake=<size>[MG]
- If given as a memory unit, fills all system RAM with nodes of
- size interleaved over physical nodes.
-
- numa=fake=<N>
- If given as an integer, fills all system RAM with N fake nodes
- interleaved over physical nodes.
-
- numa=fake=<N>U
- If given as an integer followed by 'U', it will divide each
- physical node into N emulated nodes.
-
ACPI
====
diff --git a/Documentation/bpf/btf.rst b/Documentation/bpf/btf.rst
index 257a7e1cdf5d..93060283b6fd 100644
--- a/Documentation/bpf/btf.rst
+++ b/Documentation/bpf/btf.rst
@@ -368,7 +368,7 @@ No additional type data follow ``btf_type``.
* ``info.kind_flag``: 0
* ``info.kind``: BTF_KIND_FUNC
* ``info.vlen``: linkage information (BTF_FUNC_STATIC, BTF_FUNC_GLOBAL
- or BTF_FUNC_EXTERN)
+ or BTF_FUNC_EXTERN - see :ref:`BTF_Function_Linkage_Constants`)
* ``type``: a BTF_KIND_FUNC_PROTO type
No additional type data follow ``btf_type``.
@@ -424,9 +424,8 @@ following data::
__u32 linkage;
};
-``struct btf_var`` encoding:
- * ``linkage``: currently only static variable 0, or globally allocated
- variable in ELF sections 1
+``btf_var.linkage`` may take the values: BTF_VAR_STATIC, BTF_VAR_GLOBAL_ALLOCATED or BTF_VAR_GLOBAL_EXTERN -
+see :ref:`BTF_Var_Linkage_Constants`.
Not all type of global variables are supported by LLVM at this point.
The following is currently available:
@@ -549,6 +548,38 @@ The ``btf_enum64`` encoding:
If the original enum value is signed and the size is less than 8,
that value will be sign extended into 8 bytes.
+2.3 Constant Values
+-------------------
+
+.. _BTF_Function_Linkage_Constants:
+
+2.3.1 Function Linkage Constant Values
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+.. table:: Function Linkage Values and Meanings
+
+ =================== ===== ===========
+ kind value description
+ =================== ===== ===========
+ ``BTF_FUNC_STATIC`` 0x0 definition of subprogram not visible outside containing compilation unit
+ ``BTF_FUNC_GLOBAL`` 0x1 definition of subprogram visible outside containing compilation unit
+ ``BTF_FUNC_EXTERN`` 0x2 declaration of a subprogram whose definition is outside the containing compilation unit
+ =================== ===== ===========
+
+
+.. _BTF_Var_Linkage_Constants:
+
+2.3.2 Variable Linkage Constant Values
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+.. table:: Variable Linkage Values and Meanings
+
+ ============================ ===== ===========
+ kind value description
+ ============================ ===== ===========
+ ``BTF_VAR_STATIC`` 0x0 definition of global variable not visible outside containing compilation unit
+ ``BTF_VAR_GLOBAL_ALLOCATED`` 0x1 definition of global variable visible outside containing compilation unit
+ ``BTF_VAR_GLOBAL_EXTERN`` 0x2 declaration of global variable whose definition is outside the containing compilation unit
+ ============================ ===== ===========
+
3. BTF Kernel API
=================
diff --git a/Documentation/bpf/libbpf/program_types.rst b/Documentation/bpf/libbpf/program_types.rst
index 63bb88846e50..218b020a2f81 100644
--- a/Documentation/bpf/libbpf/program_types.rst
+++ b/Documentation/bpf/libbpf/program_types.rst
@@ -121,6 +121,8 @@ described in more detail in the footnotes.
+-------------------------------------------+----------------------------------------+----------------------------------+-----------+
| ``BPF_PROG_TYPE_LWT_XMIT`` | | ``lwt_xmit`` | |
+-------------------------------------------+----------------------------------------+----------------------------------+-----------+
+| ``BPF_PROG_TYPE_NETFILTER`` | | ``netfilter`` | |
++-------------------------------------------+----------------------------------------+----------------------------------+-----------+
| ``BPF_PROG_TYPE_PERF_EVENT`` | | ``perf_event`` | |
+-------------------------------------------+----------------------------------------+----------------------------------+-----------+
| ``BPF_PROG_TYPE_RAW_TRACEPOINT_WRITABLE`` | | ``raw_tp.w+`` [#rawtp]_ | |
@@ -131,11 +133,23 @@ described in more detail in the footnotes.
+ + +----------------------------------+-----------+
| | | ``raw_tracepoint+`` | |
+-------------------------------------------+----------------------------------------+----------------------------------+-----------+
-| ``BPF_PROG_TYPE_SCHED_ACT`` | | ``action`` | |
+| ``BPF_PROG_TYPE_SCHED_ACT`` | | ``action`` [#tc_legacy]_ | |
+-------------------------------------------+----------------------------------------+----------------------------------+-----------+
-| ``BPF_PROG_TYPE_SCHED_CLS`` | | ``classifier`` | |
+| ``BPF_PROG_TYPE_SCHED_CLS`` | | ``classifier`` [#tc_legacy]_ | |
+ + +----------------------------------+-----------+
-| | | ``tc`` | |
+| | | ``tc`` [#tc_legacy]_ | |
++ +----------------------------------------+----------------------------------+-----------+
+| | ``BPF_NETKIT_PRIMARY`` | ``netkit/primary`` | |
++ +----------------------------------------+----------------------------------+-----------+
+| | ``BPF_NETKIT_PEER`` | ``netkit/peer`` | |
++ +----------------------------------------+----------------------------------+-----------+
+| | ``BPF_TCX_INGRESS`` | ``tc/ingress`` | |
++ +----------------------------------------+----------------------------------+-----------+
+| | ``BPF_TCX_EGRESS`` | ``tc/egress`` | |
++ +----------------------------------------+----------------------------------+-----------+
+| | ``BPF_TCX_INGRESS`` | ``tcx/ingress`` | |
++ +----------------------------------------+----------------------------------+-----------+
+| | ``BPF_TCX_EGRESS`` | ``tcx/egress`` | |
+-------------------------------------------+----------------------------------------+----------------------------------+-----------+
| ``BPF_PROG_TYPE_SK_LOOKUP`` | ``BPF_SK_LOOKUP`` | ``sk_lookup`` | |
+-------------------------------------------+----------------------------------------+----------------------------------+-----------+
@@ -155,7 +169,9 @@ described in more detail in the footnotes.
+-------------------------------------------+----------------------------------------+----------------------------------+-----------+
| ``BPF_PROG_TYPE_SOCK_OPS`` | ``BPF_CGROUP_SOCK_OPS`` | ``sockops`` | |
+-------------------------------------------+----------------------------------------+----------------------------------+-----------+
-| ``BPF_PROG_TYPE_STRUCT_OPS`` | | ``struct_ops+`` | |
+| ``BPF_PROG_TYPE_STRUCT_OPS`` | | ``struct_ops+`` [#struct_ops]_ | |
++ + +----------------------------------+-----------+
+| | | ``struct_ops.s+`` [#struct_ops]_ | Yes |
+-------------------------------------------+----------------------------------------+----------------------------------+-----------+
| ``BPF_PROG_TYPE_SYSCALL`` | | ``syscall`` | Yes |
+-------------------------------------------+----------------------------------------+----------------------------------+-----------+
@@ -209,5 +225,11 @@ described in more detail in the footnotes.
``a-zA-Z0-9_.*?``.
.. [#lsm] The ``lsm`` attachment format is ``lsm[.s]/<hook>``.
.. [#rawtp] The ``raw_tp`` attach format is ``raw_tracepoint[.w]/<tracepoint>``.
+.. [#tc_legacy] The ``tc``, ``classifier`` and ``action`` attach types are deprecated, use
+ ``tcx/*`` instead.
+.. [#struct_ops] The ``struct_ops`` attach format supports ``struct_ops[.s]/<name>`` convention,
+ but ``name`` is ignored and it is recommended to just use plain
+ ``SEC("struct_ops[.s]")``. The attachments are defined in a struct initializer
+ that is tagged with ``SEC(".struct_ops[.link]")``.
.. [#tp] The ``tracepoint`` attach format is ``tracepoint/<category>/<name>``.
.. [#iter] The ``iter`` attach format is ``iter[.s]/<struct-name>``.
diff --git a/Documentation/bpf/verifier.rst b/Documentation/bpf/verifier.rst
index 356894399fbf..d23761540002 100644
--- a/Documentation/bpf/verifier.rst
+++ b/Documentation/bpf/verifier.rst
@@ -418,7 +418,7 @@ The rules for correspondence between registers / stack slots are as follows:
linked to the registers and stack slots of the parent state with the same
indices.
-* For the outer stack frames, only caller saved registers (r6-r9) and stack
+* For the outer stack frames, only callee saved registers (r6-r9) and stack
slots are linked to the registers and stack slots of the parent state with the
same indices.
diff --git a/Documentation/core-api/cleanup.rst b/Documentation/core-api/cleanup.rst
new file mode 100644
index 000000000000..527eb2f8ec6e
--- /dev/null
+++ b/Documentation/core-api/cleanup.rst
@@ -0,0 +1,8 @@
+.. SPDX-License-Identifier: GPL-2.0
+
+===========================
+Scope-based Cleanup Helpers
+===========================
+
+.. kernel-doc:: include/linux/cleanup.h
+ :doc: scope-based cleanup helpers
diff --git a/Documentation/core-api/cpu_hotplug.rst b/Documentation/core-api/cpu_hotplug.rst
index dcb0e379e5e8..a21dbf261be7 100644
--- a/Documentation/core-api/cpu_hotplug.rst
+++ b/Documentation/core-api/cpu_hotplug.rst
@@ -737,8 +737,9 @@ can process the event further.
When changes to the CPUs in the system occur, the sysfs file
/sys/devices/system/cpu/crash_hotplug contains '1' if the kernel
-updates the kdump capture kernel list of CPUs itself (via elfcorehdr),
-or '0' if userspace must update the kdump capture kernel list of CPUs.
+updates the kdump capture kernel list of CPUs itself (via elfcorehdr and
+other relevant kexec segment), or '0' if userspace must update the kdump
+capture kernel list of CPUs.
The availability depends on the CONFIG_HOTPLUG_CPU kernel configuration
option.
@@ -750,8 +751,9 @@ file can be used in a udev rule as follows:
SUBSYSTEM=="cpu", ATTRS{crash_hotplug}=="1", GOTO="kdump_reload_end"
For a CPU hot un/plug event, if the architecture supports kernel updates
-of the elfcorehdr (which contains the list of CPUs), then the rule skips
-the unload-then-reload of the kdump capture kernel.
+of the elfcorehdr (which contains the list of CPUs) and other relevant
+kexec segments, then the rule skips the unload-then-reload of the kdump
+capture kernel.
Kernel Inline Documentations Reference
======================================
diff --git a/Documentation/core-api/folio_queue.rst b/Documentation/core-api/folio_queue.rst
new file mode 100644
index 000000000000..1fe7a9bc4b8d
--- /dev/null
+++ b/Documentation/core-api/folio_queue.rst
@@ -0,0 +1,212 @@
+.. SPDX-License-Identifier: GPL-2.0+
+
+===========
+Folio Queue
+===========
+
+:Author: David Howells <dhowells@redhat.com>
+
+.. Contents:
+
+ * Overview
+ * Initialisation
+ * Adding and removing folios
+ * Querying information about a folio
+ * Querying information about a folio_queue
+ * Folio queue iteration
+ * Folio marks
+ * Lockless simultaneous production/consumption issues
+
+
+Overview
+========
+
+The folio_queue struct forms a single segment in a segmented list of folios
+that can be used to form an I/O buffer. As such, the list can be iterated over
+using the ITER_FOLIOQ iov_iter type.
+
+The publicly accessible members of the structure are::
+
+ struct folio_queue {
+ struct folio_queue *next;
+ struct folio_queue *prev;
+ ...
+ };
+
+A pair of pointers are provided, ``next`` and ``prev``, that point to the
+segments on either side of the segment being accessed. Whilst this is a
+doubly-linked list, it is intentionally not a circular list; the outward
+sibling pointers in terminal segments should be NULL.
+
+Each segment in the list also stores:
+
+ * an ordered sequence of folio pointers,
+ * the size of each folio and
+ * three 1-bit marks per folio,
+
+but hese should not be accessed directly as the underlying data structure may
+change, but rather the access functions outlined below should be used.
+
+The facility can be made accessible by::
+
+ #include <linux/folio_queue.h>
+
+and to use the iterator::
+
+ #include <linux/uio.h>
+
+
+Initialisation
+==============
+
+A segment should be initialised by calling::
+
+ void folioq_init(struct folio_queue *folioq);
+
+with a pointer to the segment to be initialised. Note that this will not
+necessarily initialise all the folio pointers, so care must be taken to check
+the number of folios added.
+
+
+Adding and removing folios
+==========================
+
+Folios can be set in the next unused slot in a segment struct by calling one
+of::
+
+ unsigned int folioq_append(struct folio_queue *folioq,
+ struct folio *folio);
+
+ unsigned int folioq_append_mark(struct folio_queue *folioq,
+ struct folio *folio);
+
+Both functions update the stored folio count, store the folio and note its
+size. The second function also sets the first mark for the folio added. Both
+functions return the number of the slot used. [!] Note that no attempt is made
+to check that the capacity wasn't overrun and the list will not be extended
+automatically.
+
+A folio can be excised by calling::
+
+ void folioq_clear(struct folio_queue *folioq, unsigned int slot);
+
+This clears the slot in the array and also clears all the marks for that folio,
+but doesn't change the folio count - so future accesses of that slot must check
+if the slot is occupied.
+
+
+Querying information about a folio
+==================================
+
+Information about the folio in a particular slot may be queried by the
+following function::
+
+ struct folio *folioq_folio(const struct folio_queue *folioq,
+ unsigned int slot);
+
+If a folio has not yet been set in that slot, this may yield an undefined
+pointer. The size of the folio in a slot may be queried with either of::
+
+ unsigned int folioq_folio_order(const struct folio_queue *folioq,
+ unsigned int slot);
+
+ size_t folioq_folio_size(const struct folio_queue *folioq,
+ unsigned int slot);
+
+The first function returns the size as an order and the second as a number of
+bytes.
+
+
+Querying information about a folio_queue
+========================================
+
+Information may be retrieved about a particular segment with the following
+functions::
+
+ unsigned int folioq_nr_slots(const struct folio_queue *folioq);
+
+ unsigned int folioq_count(struct folio_queue *folioq);
+
+ bool folioq_full(struct folio_queue *folioq);
+
+The first function returns the maximum capacity of a segment. It must not be
+assumed that this won't vary between segments. The second returns the number
+of folios added to a segments and the third is a shorthand to indicate if the
+segment has been filled to capacity.
+
+Not that the count and fullness are not affected by clearing folios from the
+segment. These are more about indicating how many slots in the array have been
+initialised, and it assumed that slots won't get reused, but rather the segment
+will get discarded as the queue is consumed.
+
+
+Folio marks
+===========
+
+Folios within a queue can also have marks assigned to them. These marks can be
+used to note information such as if a folio needs folio_put() calling upon it.
+There are three marks available to be set for each folio.
+
+The marks can be set by::
+
+ void folioq_mark(struct folio_queue *folioq, unsigned int slot);
+ void folioq_mark2(struct folio_queue *folioq, unsigned int slot);
+ void folioq_mark3(struct folio_queue *folioq, unsigned int slot);
+
+Cleared by::
+
+ void folioq_unmark(struct folio_queue *folioq, unsigned int slot);
+ void folioq_unmark2(struct folio_queue *folioq, unsigned int slot);
+ void folioq_unmark3(struct folio_queue *folioq, unsigned int slot);
+
+And the marks can be queried by::
+
+ bool folioq_is_marked(const struct folio_queue *folioq, unsigned int slot);
+ bool folioq_is_marked2(const struct folio_queue *folioq, unsigned int slot);
+ bool folioq_is_marked3(const struct folio_queue *folioq, unsigned int slot);
+
+The marks can be used for any purpose and are not interpreted by this API.
+
+
+Folio queue iteration
+=====================
+
+A list of segments may be iterated over using the I/O iterator facility using
+an ``iov_iter`` iterator of ``ITER_FOLIOQ`` type. The iterator may be
+initialised with::
+
+ void iov_iter_folio_queue(struct iov_iter *i, unsigned int direction,
+ const struct folio_queue *folioq,
+ unsigned int first_slot, unsigned int offset,
+ size_t count);
+
+This may be told to start at a particular segment, slot and offset within a
+queue. The iov iterator functions will follow the next pointers when advancing
+and prev pointers when reverting when needed.
+
+
+Lockless simultaneous production/consumption issues
+===================================================
+
+If properly managed, the list can be extended by the producer at the head end
+and shortened by the consumer at the tail end simultaneously without the need
+to take locks. The ITER_FOLIOQ iterator inserts appropriate barriers to aid
+with this.
+
+Care must be taken when simultaneously producing and consuming a list. If the
+last segment is reached and the folios it refers to are entirely consumed by
+the IOV iterators, an iov_iter struct will be left pointing to the last segment
+with a slot number equal to the capacity of that segment. The iterator will
+try to continue on from this if there's another segment available when it is
+used again, but care must be taken lest the segment got removed and freed by
+the consumer before the iterator was advanced.
+
+It is recommended that the queue always contain at least one segment, even if
+that segment has never been filled or is entirely spent. This prevents the
+head and tail pointers from collapsing.
+
+
+API Function Reference
+======================
+
+.. kernel-doc:: include/linux/folio_queue.h
diff --git a/Documentation/core-api/index.rst b/Documentation/core-api/index.rst
index e18a2ffe0787..a331d2c814f5 100644
--- a/Documentation/core-api/index.rst
+++ b/Documentation/core-api/index.rst
@@ -35,6 +35,7 @@ Library functionality that is used throughout the kernel.
kobject
kref
+ cleanup
assoc_array
xarray
maple_tree
diff --git a/Documentation/core-api/printk-formats.rst b/Documentation/core-api/printk-formats.rst
index 4451ef501936..14e093da3ccd 100644
--- a/Documentation/core-api/printk-formats.rst
+++ b/Documentation/core-api/printk-formats.rst
@@ -576,13 +576,12 @@ The field width is passed by value, the bitmap is passed by reference.
Helper macros cpumask_pr_args() and nodemask_pr_args() are available to ease
printing cpumask and nodemask.
-Flags bitfields such as page flags, page_type, gfp_flags
+Flags bitfields such as page flags and gfp_flags
--------------------------------------------------------
::
%pGp 0x17ffffc0002036(referenced|uptodate|lru|active|private|node=0|zone=2|lastcpupid=0x1fffff)
- %pGt 0xffffff7f(buddy)
%pGg GFP_USER|GFP_DMA32|GFP_NOWARN
%pGv read|exec|mayread|maywrite|mayexec|denywrite
@@ -591,7 +590,6 @@ would construct the value. The type of flags is given by the third
character. Currently supported are:
- p - [p]age flags, expects value of type (``unsigned long *``)
- - t - page [t]ype, expects value of type (``unsigned int *``)
- v - [v]ma_flags, expects value of type (``unsigned long *``)
- g - [g]fp_flags, expects value of type (``gfp_t *``)
diff --git a/Documentation/dev-tools/kfence.rst b/Documentation/dev-tools/kfence.rst
index 936f6aaa75c8..541899353865 100644
--- a/Documentation/dev-tools/kfence.rst
+++ b/Documentation/dev-tools/kfence.rst
@@ -53,6 +53,13 @@ configurable via the Kconfig option ``CONFIG_KFENCE_DEFERRABLE``.
The KUnit test suite is very likely to fail when using a deferrable timer
since it currently causes very unpredictable sample intervals.
+By default KFENCE will only sample 1 heap allocation within each sample
+interval. *Burst mode* allows to sample successive heap allocations, where the
+kernel boot parameter ``kfence.burst`` can be set to a non-zero value which
+denotes the *additional* successive allocations within a sample interval;
+setting ``kfence.burst=N`` means that ``1 + N`` successive allocations are
+attempted through KFENCE for each sample interval.
+
The KFENCE memory pool is of fixed size, and if the pool is exhausted, no
further KFENCE allocations occur. With ``CONFIG_KFENCE_NUM_OBJECTS`` (default
255), the number of available guarded objects can be controlled. Each object
diff --git a/Documentation/dev-tools/kunit/api/clk.rst b/Documentation/dev-tools/kunit/api/clk.rst
new file mode 100644
index 000000000000..eeaa50089453
--- /dev/null
+++ b/Documentation/dev-tools/kunit/api/clk.rst
@@ -0,0 +1,10 @@
+.. SPDX-License-Identifier: GPL-2.0
+
+========
+Clk API
+========
+
+The KUnit clk API is used to test clk providers and clk consumers.
+
+.. kernel-doc:: drivers/clk/clk_kunit_helpers.c
+ :export:
diff --git a/Documentation/dev-tools/kunit/api/index.rst b/Documentation/dev-tools/kunit/api/index.rst
index 2d8f756aab56..5cdb552a0808 100644
--- a/Documentation/dev-tools/kunit/api/index.rst
+++ b/Documentation/dev-tools/kunit/api/index.rst
@@ -9,11 +9,17 @@ API Reference
test
resource
functionredirection
+ clk
+ of
+ platformdevice
This page documents the KUnit kernel testing API. It is divided into the
following sections:
+Core KUnit API
+==============
+
Documentation/dev-tools/kunit/api/test.rst
- Documents all of the standard testing API
@@ -25,3 +31,18 @@ Documentation/dev-tools/kunit/api/resource.rst
Documentation/dev-tools/kunit/api/functionredirection.rst
- Documents the KUnit Function Redirection API
+
+Driver KUnit API
+================
+
+Documentation/dev-tools/kunit/api/clk.rst
+
+ - Documents the KUnit clk API
+
+Documentation/dev-tools/kunit/api/of.rst
+
+ - Documents the KUnit device tree (OF) API
+
+Documentation/dev-tools/kunit/api/platformdevice.rst
+
+ - Documents the KUnit platform device API
diff --git a/Documentation/dev-tools/kunit/api/of.rst b/Documentation/dev-tools/kunit/api/of.rst
new file mode 100644
index 000000000000..cb4193dcddbb
--- /dev/null
+++ b/Documentation/dev-tools/kunit/api/of.rst
@@ -0,0 +1,13 @@
+.. SPDX-License-Identifier: GPL-2.0
+
+====================
+Device Tree (OF) API
+====================
+
+The KUnit device tree API is used to test device tree (of_*) dependent code.
+
+.. kernel-doc:: include/kunit/of.h
+ :internal:
+
+.. kernel-doc:: drivers/of/of_kunit_helpers.c
+ :export:
diff --git a/Documentation/dev-tools/kunit/api/platformdevice.rst b/Documentation/dev-tools/kunit/api/platformdevice.rst
new file mode 100644
index 000000000000..49ddd5729003
--- /dev/null
+++ b/Documentation/dev-tools/kunit/api/platformdevice.rst
@@ -0,0 +1,10 @@
+.. SPDX-License-Identifier: GPL-2.0
+
+===================
+Platform Device API
+===================
+
+The KUnit platform device API is used to test platform devices.
+
+.. kernel-doc:: lib/kunit/platform.c
+ :export:
diff --git a/Documentation/devicetree/bindings/arm/cirrus/cirrus,ep9301.yaml b/Documentation/devicetree/bindings/arm/cirrus/cirrus,ep9301.yaml
new file mode 100644
index 000000000000..170aad5dd7ed
--- /dev/null
+++ b/Documentation/devicetree/bindings/arm/cirrus/cirrus,ep9301.yaml
@@ -0,0 +1,38 @@
+# SPDX-License-Identifier: GPL-2.0-only OR BSD-2-Clause
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/arm/cirrus/cirrus,ep9301.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Cirrus Logic EP93xx platforms
+
+description:
+ The EP93xx SoC is a ARMv4T-based with 200 MHz ARM9 CPU.
+
+maintainers:
+ - Alexander Sverdlin <alexander.sverdlin@gmail.com>
+ - Nikita Shubin <nikita.shubin@maquefel.me>
+
+properties:
+ $nodename:
+ const: '/'
+ compatible:
+ oneOf:
+ - description: The TS-7250 is a compact, full-featured Single Board
+ Computer (SBC) based upon the Cirrus EP9302 ARM9 CPU
+ items:
+ - const: technologic,ts7250
+ - const: cirrus,ep9301
+
+ - description: The Liebherr BK3 is a derivate from ts7250 board
+ items:
+ - const: liebherr,bk3
+ - const: cirrus,ep9301
+
+ - description: EDB302 is an evaluation board by Cirrus Logic,
+ based on a Cirrus Logic EP9302 CPU
+ items:
+ - const: cirrus,edb9302
+ - const: cirrus,ep9301
+
+additionalProperties: true
diff --git a/Documentation/devicetree/bindings/arm/mediatek/mediatek,bdpsys.txt b/Documentation/devicetree/bindings/arm/mediatek/mediatek,bdpsys.txt
deleted file mode 100644
index 149567a38215..000000000000
--- a/Documentation/devicetree/bindings/arm/mediatek/mediatek,bdpsys.txt
+++ /dev/null
@@ -1,24 +0,0 @@
-Mediatek bdpsys controller
-============================
-
-The Mediatek bdpsys controller provides various clocks to the system.
-
-Required Properties:
-
-- compatible: Should be:
- - "mediatek,mt2701-bdpsys", "syscon"
- - "mediatek,mt2712-bdpsys", "syscon"
- - "mediatek,mt7623-bdpsys", "mediatek,mt2701-bdpsys", "syscon"
-- #clock-cells: Must be 1
-
-The bdpsys controller uses the common clk binding from
-Documentation/devicetree/bindings/clock/clock-bindings.txt
-The available clocks are defined in dt-bindings/clock/mt*-clk.h.
-
-Example:
-
-bdpsys: clock-controller@1c000000 {
- compatible = "mediatek,mt2701-bdpsys", "syscon";
- reg = <0 0x1c000000 0 0x1000>;
- #clock-cells = <1>;
-};
diff --git a/Documentation/devicetree/bindings/arm/mediatek/mediatek,camsys.txt b/Documentation/devicetree/bindings/arm/mediatek/mediatek,camsys.txt
deleted file mode 100644
index a0ce82085ad0..000000000000
--- a/Documentation/devicetree/bindings/arm/mediatek/mediatek,camsys.txt
+++ /dev/null
@@ -1,24 +0,0 @@
-MediaTek CAMSYS controller
-============================
-
-The MediaTek camsys controller provides various clocks to the system.
-
-Required Properties:
-
-- compatible: Should be one of:
- - "mediatek,mt6765-camsys", "syscon"
- - "mediatek,mt6779-camsys", "syscon"
- - "mediatek,mt8183-camsys", "syscon"
-- #clock-cells: Must be 1
-
-The camsys controller uses the common clk binding from
-Documentation/devicetree/bindings/clock/clock-bindings.txt
-The available clocks are defined in dt-bindings/clock/mt*-clk.h.
-
-Example:
-
-camsys: camsys@1a000000 {
- compatible = "mediatek,mt8183-camsys", "syscon";
- reg = <0 0x1a000000 0 0x1000>;
- #clock-cells = <1>;
-};
diff --git a/Documentation/devicetree/bindings/arm/mediatek/mediatek,imgsys.txt b/Documentation/devicetree/bindings/arm/mediatek/mediatek,imgsys.txt
deleted file mode 100644
index dce4c9241932..000000000000
--- a/Documentation/devicetree/bindings/arm/mediatek/mediatek,imgsys.txt
+++ /dev/null
@@ -1,30 +0,0 @@
-Mediatek imgsys controller
-============================
-
-The Mediatek imgsys controller provides various clocks to the system.
-
-Required Properties:
-
-- compatible: Should be one of:
- - "mediatek,mt2701-imgsys", "syscon"
- - "mediatek,mt2712-imgsys", "syscon"
- - "mediatek,mt6765-imgsys", "syscon"
- - "mediatek,mt6779-imgsys", "syscon"
- - "mediatek,mt6797-imgsys", "syscon"
- - "mediatek,mt7623-imgsys", "mediatek,mt2701-imgsys", "syscon"
- - "mediatek,mt8167-imgsys", "syscon"
- - "mediatek,mt8173-imgsys", "syscon"
- - "mediatek,mt8183-imgsys", "syscon"
-- #clock-cells: Must be 1
-
-The imgsys controller uses the common clk binding from
-Documentation/devicetree/bindings/clock/clock-bindings.txt
-The available clocks are defined in dt-bindings/clock/mt*-clk.h.
-
-Example:
-
-imgsys: clock-controller@15000000 {
- compatible = "mediatek,mt8173-imgsys", "syscon";
- reg = <0 0x15000000 0 0x1000>;
- #clock-cells = <1>;
-};
diff --git a/Documentation/devicetree/bindings/arm/mediatek/mediatek,ipesys.txt b/Documentation/devicetree/bindings/arm/mediatek/mediatek,ipesys.txt
deleted file mode 100644
index 2ce889b023d9..000000000000
--- a/Documentation/devicetree/bindings/arm/mediatek/mediatek,ipesys.txt
+++ /dev/null
@@ -1,22 +0,0 @@
-Mediatek ipesys controller
-============================
-
-The Mediatek ipesys controller provides various clocks to the system.
-
-Required Properties:
-
-- compatible: Should be one of:
- - "mediatek,mt6779-ipesys", "syscon"
-- #clock-cells: Must be 1
-
-The ipesys controller uses the common clk binding from
-Documentation/devicetree/bindings/clock/clock-bindings.txt
-The available clocks are defined in dt-bindings/clock/mt*-clk.h.
-
-Example:
-
-ipesys: clock-controller@1b000000 {
- compatible = "mediatek,mt6779-ipesys", "syscon";
- reg = <0 0x1b000000 0 0x1000>;
- #clock-cells = <1>;
-};
diff --git a/Documentation/devicetree/bindings/arm/mediatek/mediatek,ipu.txt b/Documentation/devicetree/bindings/arm/mediatek/mediatek,ipu.txt
deleted file mode 100644
index aabc8c5c8ed2..000000000000
--- a/Documentation/devicetree/bindings/arm/mediatek/mediatek,ipu.txt
+++ /dev/null
@@ -1,43 +0,0 @@
-Mediatek IPU controller
-============================
-
-The Mediatek ipu controller provides various clocks to the system.
-
-Required Properties:
-
-- compatible: Should be one of:
- - "mediatek,mt8183-ipu_conn", "syscon"
- - "mediatek,mt8183-ipu_adl", "syscon"
- - "mediatek,mt8183-ipu_core0", "syscon"
- - "mediatek,mt8183-ipu_core1", "syscon"
-- #clock-cells: Must be 1
-
-The ipu controller uses the common clk binding from
-Documentation/devicetree/bindings/clock/clock-bindings.txt
-The available clocks are defined in dt-bindings/clock/mt*-clk.h.
-
-Example:
-
-ipu_conn: syscon@19000000 {
- compatible = "mediatek,mt8183-ipu_conn", "syscon";
- reg = <0 0x19000000 0 0x1000>;
- #clock-cells = <1>;
-};
-
-ipu_adl: syscon@19010000 {
- compatible = "mediatek,mt8183-ipu_adl", "syscon";
- reg = <0 0x19010000 0 0x1000>;
- #clock-cells = <1>;
-};
-
-ipu_core0: syscon@19180000 {
- compatible = "mediatek,mt8183-ipu_core0", "syscon";
- reg = <0 0x19180000 0 0x1000>;
- #clock-cells = <1>;
-};
-
-ipu_core1: syscon@19280000 {
- compatible = "mediatek,mt8183-ipu_core1", "syscon";
- reg = <0 0x19280000 0 0x1000>;
- #clock-cells = <1>;
-};
diff --git a/Documentation/devicetree/bindings/arm/mediatek/mediatek,jpgdecsys.txt b/Documentation/devicetree/bindings/arm/mediatek/mediatek,jpgdecsys.txt
deleted file mode 100644
index 2df799cd06a7..000000000000
--- a/Documentation/devicetree/bindings/arm/mediatek/mediatek,jpgdecsys.txt
+++ /dev/null
@@ -1,22 +0,0 @@
-Mediatek jpgdecsys controller
-============================
-
-The Mediatek jpgdecsys controller provides various clocks to the system.
-
-Required Properties:
-
-- compatible: Should be:
- - "mediatek,mt2712-jpgdecsys", "syscon"
-- #clock-cells: Must be 1
-
-The jpgdecsys controller uses the common clk binding from
-Documentation/devicetree/bindings/clock/clock-bindings.txt
-The available clocks are defined in dt-bindings/clock/mt*-clk.h.
-
-Example:
-
-jpgdecsys: syscon@19000000 {
- compatible = "mediatek,mt2712-jpgdecsys", "syscon";
- reg = <0 0x19000000 0 0x1000>;
- #clock-cells = <1>;
-};
diff --git a/Documentation/devicetree/bindings/arm/mediatek/mediatek,mcucfg.txt b/Documentation/devicetree/bindings/arm/mediatek/mediatek,mcucfg.txt
deleted file mode 100644
index 2b882b7ca72e..000000000000
--- a/Documentation/devicetree/bindings/arm/mediatek/mediatek,mcucfg.txt
+++ /dev/null
@@ -1,23 +0,0 @@
-Mediatek mcucfg controller
-============================
-
-The Mediatek mcucfg controller provides various clocks to the system.
-
-Required Properties:
-
-- compatible: Should be one of:
- - "mediatek,mt2712-mcucfg", "syscon"
- - "mediatek,mt8183-mcucfg", "syscon"
-- #clock-cells: Must be 1
-
-The mcucfg controller uses the common clk binding from
-Documentation/devicetree/bindings/clock/clock-bindings.txt
-The available clocks are defined in dt-bindings/clock/mt*-clk.h.
-
-Example:
-
-mcucfg: syscon@10220000 {
- compatible = "mediatek,mt2712-mcucfg", "syscon";
- reg = <0 0x10220000 0 0x1000>;
- #clock-cells = <1>;
-};
diff --git a/Documentation/devicetree/bindings/arm/mediatek/mediatek,mfgcfg.txt b/Documentation/devicetree/bindings/arm/mediatek/mediatek,mfgcfg.txt
deleted file mode 100644
index 054424fb64b4..000000000000
--- a/Documentation/devicetree/bindings/arm/mediatek/mediatek,mfgcfg.txt
+++ /dev/null
@@ -1,25 +0,0 @@
-Mediatek mfgcfg controller
-============================
-
-The Mediatek mfgcfg controller provides various clocks to the system.
-
-Required Properties:
-
-- compatible: Should be one of:
- - "mediatek,mt2712-mfgcfg", "syscon"
- - "mediatek,mt6779-mfgcfg", "syscon"
- - "mediatek,mt8167-mfgcfg", "syscon"
- - "mediatek,mt8183-mfgcfg", "syscon"
-- #clock-cells: Must be 1
-
-The mfgcfg controller uses the common clk binding from
-Documentation/devicetree/bindings/clock/clock-bindings.txt
-The available clocks are defined in dt-bindings/clock/mt*-clk.h.
-
-Example:
-
-mfgcfg: syscon@13000000 {
- compatible = "mediatek,mt2712-mfgcfg", "syscon";
- reg = <0 0x13000000 0 0x1000>;
- #clock-cells = <1>;
-};
diff --git a/Documentation/devicetree/bindings/arm/mediatek/mediatek,mipi0a.txt b/Documentation/devicetree/bindings/arm/mediatek/mediatek,mipi0a.txt
deleted file mode 100644
index 1c671943ce4d..000000000000
--- a/Documentation/devicetree/bindings/arm/mediatek/mediatek,mipi0a.txt
+++ /dev/null
@@ -1,28 +0,0 @@
-Mediatek mipi0a (mipi_rx_ana_csi0a) controller
-============================
-
-The Mediatek mipi0a controller provides various clocks
-to the system.
-
-Required Properties:
-
-- compatible: Should be one of:
- - "mediatek,mt6765-mipi0a", "syscon"
-- #clock-cells: Must be 1
-
-The mipi0a controller uses the common clk binding from
-Documentation/devicetree/bindings/clock/clock-bindings.txt
-The available clocks are defined in dt-bindings/clock/mt*-clk.h.
-
-The mipi0a controller also uses the common power domain from
-Documentation/devicetree/bindings/soc/mediatek/scpsys.txt
-The available power domains are defined in dt-bindings/power/mt*-power.h.
-
-Example:
-
-mipi0a: clock-controller@11c10000 {
- compatible = "mediatek,mt6765-mipi0a", "syscon";
- reg = <0 0x11c10000 0 0x1000>;
- power-domains = <&scpsys MT6765_POWER_DOMAIN_CAM>;
- #clock-cells = <1>;
-};
diff --git a/Documentation/devicetree/bindings/arm/mediatek/mediatek,vcodecsys.txt b/Documentation/devicetree/bindings/arm/mediatek/mediatek,vcodecsys.txt
deleted file mode 100644
index f090147b7f1e..000000000000
--- a/Documentation/devicetree/bindings/arm/mediatek/mediatek,vcodecsys.txt
+++ /dev/null
@@ -1,27 +0,0 @@
-Mediatek vcodecsys controller
-============================
-
-The Mediatek vcodecsys controller provides various clocks to the system.
-
-Required Properties:
-
-- compatible: Should be one of:
- - "mediatek,mt6765-vcodecsys", "syscon"
-- #clock-cells: Must be 1
-
-The vcodecsys controller uses the common clk binding from
-Documentation/devicetree/bindings/clock/clock-bindings.txt
-The available clocks are defined in dt-bindings/clock/mt*-clk.h.
-
-The vcodecsys controller also uses the common power domain from
-Documentation/devicetree/bindings/soc/mediatek/scpsys.txt
-The available power domains are defined in dt-bindings/power/mt*-power.h.
-
-Example:
-
-venc_gcon: clock-controller@17000000 {
- compatible = "mediatek,mt6765-vcodecsys", "syscon";
- reg = <0 0x17000000 0 0x10000>;
- power-domains = <&scpsys MT6765_POWER_DOMAIN_VCODEC>;
- #clock-cells = <1>;
-};
diff --git a/Documentation/devicetree/bindings/arm/mediatek/mediatek,vdecsys.txt b/Documentation/devicetree/bindings/arm/mediatek/mediatek,vdecsys.txt
deleted file mode 100644
index 98195169176a..000000000000
--- a/Documentation/devicetree/bindings/arm/mediatek/mediatek,vdecsys.txt
+++ /dev/null
@@ -1,29 +0,0 @@
-Mediatek vdecsys controller
-============================
-
-The Mediatek vdecsys controller provides various clocks to the system.
-
-Required Properties:
-
-- compatible: Should be one of:
- - "mediatek,mt2701-vdecsys", "syscon"
- - "mediatek,mt2712-vdecsys", "syscon"
- - "mediatek,mt6779-vdecsys", "syscon"
- - "mediatek,mt6797-vdecsys", "syscon"
- - "mediatek,mt7623-vdecsys", "mediatek,mt2701-vdecsys", "syscon"
- - "mediatek,mt8167-vdecsys", "syscon"
- - "mediatek,mt8173-vdecsys", "syscon"
- - "mediatek,mt8183-vdecsys", "syscon"
-- #clock-cells: Must be 1
-
-The vdecsys controller uses the common clk binding from
-Documentation/devicetree/bindings/clock/clock-bindings.txt
-The available clocks are defined in dt-bindings/clock/mt*-clk.h.
-
-Example:
-
-vdecsys: clock-controller@16000000 {
- compatible = "mediatek,mt8173-vdecsys", "syscon";
- reg = <0 0x16000000 0 0x1000>;
- #clock-cells = <1>;
-};
diff --git a/Documentation/devicetree/bindings/arm/mediatek/mediatek,vencltsys.txt b/Documentation/devicetree/bindings/arm/mediatek/mediatek,vencltsys.txt
deleted file mode 100644
index 3cc299fd7857..000000000000
--- a/Documentation/devicetree/bindings/arm/mediatek/mediatek,vencltsys.txt
+++ /dev/null
@@ -1,22 +0,0 @@
-Mediatek vencltsys controller
-============================
-
-The Mediatek vencltsys controller provides various clocks to the system.
-
-Required Properties:
-
-- compatible: Should be:
- - "mediatek,mt8173-vencltsys", "syscon"
-- #clock-cells: Must be 1
-
-The vencltsys controller uses the common clk binding from
-Documentation/devicetree/bindings/clock/clock-bindings.txt
-The available clocks are defined in dt-bindings/clock/mt*-clk.h.
-
-Example:
-
-vencltsys: clock-controller@19000000 {
- compatible = "mediatek,mt8173-vencltsys", "syscon";
- reg = <0 0x19000000 0 0x1000>;
- #clock-cells = <1>;
-};
diff --git a/Documentation/devicetree/bindings/arm/mediatek/mediatek,vencsys.txt b/Documentation/devicetree/bindings/arm/mediatek/mediatek,vencsys.txt
deleted file mode 100644
index 6a6a14e15cd7..000000000000
--- a/Documentation/devicetree/bindings/arm/mediatek/mediatek,vencsys.txt
+++ /dev/null
@@ -1,26 +0,0 @@
-Mediatek vencsys controller
-============================
-
-The Mediatek vencsys controller provides various clocks to the system.
-
-Required Properties:
-
-- compatible: Should be one of:
- - "mediatek,mt2712-vencsys", "syscon"
- - "mediatek,mt6779-vencsys", "syscon"
- - "mediatek,mt6797-vencsys", "syscon"
- - "mediatek,mt8173-vencsys", "syscon"
- - "mediatek,mt8183-vencsys", "syscon"
-- #clock-cells: Must be 1
-
-The vencsys controller uses the common clk binding from
-Documentation/devicetree/bindings/clock/clock-bindings.txt
-The available clocks are defined in dt-bindings/clock/mt*-clk.h.
-
-Example:
-
-vencsys: clock-controller@18000000 {
- compatible = "mediatek,mt8173-vencsys", "syscon";
- reg = <0 0x18000000 0 0x1000>;
- #clock-cells = <1>;
-};
diff --git a/Documentation/devicetree/bindings/ata/ahci-platform.yaml b/Documentation/devicetree/bindings/ata/ahci-platform.yaml
index 358617115bb8..ef19468e3022 100644
--- a/Documentation/devicetree/bindings/ata/ahci-platform.yaml
+++ b/Documentation/devicetree/bindings/ata/ahci-platform.yaml
@@ -30,6 +30,8 @@ select:
- marvell,armada-3700-ahci
- marvell,armada-8k-ahci
- marvell,berlin2q-ahci
+ - qcom,apq8064-ahci
+ - qcom,ipq806x-ahci
- socionext,uniphier-pro4-ahci
- socionext,uniphier-pxs2-ahci
- socionext,uniphier-pxs3-ahci
@@ -45,6 +47,8 @@ properties:
- marvell,armada-8k-ahci
- marvell,berlin2-ahci
- marvell,berlin2q-ahci
+ - qcom,apq8064-ahci
+ - qcom,ipq806x-ahci
- socionext,uniphier-pro4-ahci
- socionext,uniphier-pxs2-ahci
- socionext,uniphier-pxs3-ahci
@@ -64,11 +68,11 @@ properties:
clocks:
minItems: 1
- maxItems: 3
+ maxItems: 5
clock-names:
minItems: 1
- maxItems: 3
+ maxItems: 5
interrupts:
maxItems: 1
@@ -97,6 +101,31 @@ required:
allOf:
- $ref: ahci-common.yaml#
+
+ - if:
+ properties:
+ compatible:
+ contains:
+ enum:
+ - qcom,apq8064-ahci
+ - qcom,ipq806x-ahci
+ then:
+ properties:
+ clocks:
+ minItems: 5
+ clock-names:
+ items:
+ - const: slave_iface
+ - const: iface
+ - const: core
+ - const: rxoob
+ - const: pmalive
+ required:
+ - phys
+ - phy-names
+ - clocks
+ - clock-names
+
- if:
properties:
compatible:
diff --git a/Documentation/devicetree/bindings/ata/cirrus,ep9312-pata.yaml b/Documentation/devicetree/bindings/ata/cirrus,ep9312-pata.yaml
new file mode 100644
index 000000000000..8130923fdc72
--- /dev/null
+++ b/Documentation/devicetree/bindings/ata/cirrus,ep9312-pata.yaml
@@ -0,0 +1,42 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/ata/cirrus,ep9312-pata.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Cirrus Logic EP9312 PATA controller
+
+maintainers:
+ - Damien Le Moal <dlemoal@kernel.org>
+
+properties:
+ compatible:
+ oneOf:
+ - const: cirrus,ep9312-pata
+ - items:
+ - const: cirrus,ep9315-pata
+ - const: cirrus,ep9312-pata
+
+ reg:
+ maxItems: 1
+
+ interrupts:
+ maxItems: 1
+
+required:
+ - compatible
+ - reg
+ - interrupts
+
+additionalProperties: false
+
+examples:
+ - |
+ ide@800a0000 {
+ compatible = "cirrus,ep9312-pata";
+ reg = <0x800a0000 0x38>;
+ interrupt-parent = <&vic1>;
+ interrupts = <8>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&ide_default_pins>;
+ };
diff --git a/Documentation/devicetree/bindings/ata/imx-sata.yaml b/Documentation/devicetree/bindings/ata/imx-sata.yaml
index 68ffb97ddc9b..f4eb3550a096 100644
--- a/Documentation/devicetree/bindings/ata/imx-sata.yaml
+++ b/Documentation/devicetree/bindings/ata/imx-sata.yaml
@@ -19,6 +19,7 @@ properties:
- fsl,imx53-ahci
- fsl,imx6q-ahci
- fsl,imx6qp-ahci
+ - fsl,imx8qm-ahci
reg:
maxItems: 1
@@ -27,12 +28,14 @@ properties:
maxItems: 1
clocks:
+ minItems: 2
items:
- description: sata clock
- description: sata reference clock
- description: ahb clock
clock-names:
+ minItems: 2
items:
- const: sata
- const: sata_ref
@@ -58,6 +61,25 @@ properties:
$ref: /schemas/types.yaml#/definitions/flag
description: if present, disable spread-spectrum clocking on the SATA link.
+ phys:
+ items:
+ - description: phandle to SATA PHY.
+ Since "REXT" pin is only present for first lane of i.MX8QM PHY, it's
+ calibration result will be stored, passed through second lane, and
+ shared with all three lanes PHY. The first two lanes PHY are used as
+ calibration PHYs, although only the third lane PHY is used by SATA.
+ - description: phandle to the first lane PHY of i.MX8QM.
+ - description: phandle to the second lane PHY of i.MX8QM.
+
+ phy-names:
+ items:
+ - const: sata-phy
+ - const: cali-phy0
+ - const: cali-phy1
+
+ power-domains:
+ maxItems: 1
+
required:
- compatible
- reg
@@ -65,6 +87,31 @@ required:
- clocks
- clock-names
+allOf:
+ - if:
+ properties:
+ compatible:
+ contains:
+ enum:
+ - fsl,imx53-ahci
+ - fsl,imx6q-ahci
+ - fsl,imx6qp-ahci
+ then:
+ properties:
+ clock-names:
+ minItems: 3
+
+ - if:
+ properties:
+ compatible:
+ contains:
+ enum:
+ - fsl,imx8qm-ahci
+ then:
+ properties:
+ clock-names:
+ minItems: 2
+
additionalProperties: false
examples:
diff --git a/Documentation/devicetree/bindings/ata/qcom-sata.txt b/Documentation/devicetree/bindings/ata/qcom-sata.txt
deleted file mode 100644
index 094de91cd9fd..000000000000
--- a/Documentation/devicetree/bindings/ata/qcom-sata.txt
+++ /dev/null
@@ -1,48 +0,0 @@
-* Qualcomm AHCI SATA Controller
-
-SATA nodes are defined to describe on-chip Serial ATA controllers.
-Each SATA controller should have its own node.
-
-Required properties:
-- compatible : compatible list, must contain "generic-ahci"
-- interrupts : <interrupt mapping for SATA IRQ>
-- reg : <registers mapping>
-- phys : Must contain exactly one entry as specified
- in phy-bindings.txt
-- phy-names : Must be "sata-phy"
-
-Required properties for "qcom,ipq806x-ahci" compatible:
-- clocks : Must contain an entry for each entry in clock-names.
-- clock-names : Shall be:
- "slave_iface" - Fabric port AHB clock for SATA
- "iface" - AHB clock
- "core" - core clock
- "rxoob" - RX out-of-band clock
- "pmalive" - Power Module Alive clock
-- assigned-clocks : Shall be:
- SATA_RXOOB_CLK
- SATA_PMALIVE_CLK
-- assigned-clock-rates : Shall be:
- 100Mhz (100000000) for SATA_RXOOB_CLK
- 100Mhz (100000000) for SATA_PMALIVE_CLK
-
-Example:
- sata@29000000 {
- compatible = "qcom,ipq806x-ahci", "generic-ahci";
- reg = <0x29000000 0x180>;
-
- interrupts = <0 209 0x0>;
-
- clocks = <&gcc SFAB_SATA_S_H_CLK>,
- <&gcc SATA_H_CLK>,
- <&gcc SATA_A_CLK>,
- <&gcc SATA_RXOOB_CLK>,
- <&gcc SATA_PMALIVE_CLK>;
- clock-names = "slave_iface", "iface", "core",
- "rxoob", "pmalive";
- assigned-clocks = <&gcc SATA_RXOOB_CLK>, <&gcc SATA_PMALIVE_CLK>;
- assigned-clock-rates = <100000000>, <100000000>;
-
- phys = <&sata_phy>;
- phy-names = "sata-phy";
- };
diff --git a/Documentation/devicetree/bindings/clock/atmel,at91rm9200-pmc.yaml b/Documentation/devicetree/bindings/clock/atmel,at91rm9200-pmc.yaml
index c1bdcd9058ed..c9eb60776b4d 100644
--- a/Documentation/devicetree/bindings/clock/atmel,at91rm9200-pmc.yaml
+++ b/Documentation/devicetree/bindings/clock/atmel,at91rm9200-pmc.yaml
@@ -42,6 +42,7 @@ properties:
- atmel,sama5d3-pmc
- atmel,sama5d4-pmc
- microchip,sam9x60-pmc
+ - microchip,sam9x7-pmc
- microchip,sama7g5-pmc
- const: syscon
@@ -88,6 +89,7 @@ allOf:
contains:
enum:
- microchip,sam9x60-pmc
+ - microchip,sam9x7-pmc
- microchip,sama7g5-pmc
then:
properties:
diff --git a/Documentation/devicetree/bindings/clock/atmel,at91sam9x5-sckc.yaml b/Documentation/devicetree/bindings/clock/atmel,at91sam9x5-sckc.yaml
index 7be29877e6d2..c2283cd07f05 100644
--- a/Documentation/devicetree/bindings/clock/atmel,at91sam9x5-sckc.yaml
+++ b/Documentation/devicetree/bindings/clock/atmel,at91sam9x5-sckc.yaml
@@ -18,7 +18,9 @@ properties:
- atmel,sama5d4-sckc
- microchip,sam9x60-sckc
- items:
- - const: microchip,sama7g5-sckc
+ - enum:
+ - microchip,sam9x7-sckc
+ - microchip,sama7g5-sckc
- const: microchip,sam9x60-sckc
reg:
diff --git a/Documentation/devicetree/bindings/clock/baikal,bt1-ccu-div.yaml b/Documentation/devicetree/bindings/clock/baikal,bt1-ccu-div.yaml
index bd4cefbb1244..30252c95700c 100644
--- a/Documentation/devicetree/bindings/clock/baikal,bt1-ccu-div.yaml
+++ b/Documentation/devicetree/bindings/clock/baikal,bt1-ccu-div.yaml
@@ -134,9 +134,13 @@ properties:
"#reset-cells":
const: 1
- clocks: true
+ clocks:
+ minItems: 3
+ maxItems: 4
- clock-names: true
+ clock-names:
+ minItems: 3
+ maxItems: 4
additionalProperties: false
diff --git a/Documentation/devicetree/bindings/clock/cirrus,lochnagar.yaml b/Documentation/devicetree/bindings/clock/cirrus,lochnagar.yaml
index 59de125647ec..ccff74eda9fb 100644
--- a/Documentation/devicetree/bindings/clock/cirrus,lochnagar.yaml
+++ b/Documentation/devicetree/bindings/clock/cirrus,lochnagar.yaml
@@ -67,9 +67,9 @@ properties:
minItems: 1
maxItems: 19
- clocks: true
- assigned-clocks: true
- assigned-clock-parents: true
+ clocks:
+ minItems: 1
+ maxItems: 19
additionalProperties: false
diff --git a/Documentation/devicetree/bindings/clock/imx8mp-audiomix.yaml b/Documentation/devicetree/bindings/clock/imx8mp-audiomix.yaml
index 0a6dc1a6e122..6588a17a7d9a 100644
--- a/Documentation/devicetree/bindings/clock/imx8mp-audiomix.yaml
+++ b/Documentation/devicetree/bindings/clock/imx8mp-audiomix.yaml
@@ -44,6 +44,9 @@ properties:
ID in its "clocks" phandle cell. See include/dt-bindings/clock/imx8mp-clock.h
for the full list of i.MX8MP IMX8MP_CLK_AUDIOMIX_ clock IDs.
+ '#reset-cells':
+ const: 1
+
required:
- compatible
- reg
diff --git a/Documentation/devicetree/bindings/clock/mediatek,apmixedsys.yaml b/Documentation/devicetree/bindings/clock/mediatek,apmixedsys.yaml
index 685535846cbb..db5f48e4dd15 100644
--- a/Documentation/devicetree/bindings/clock/mediatek,apmixedsys.yaml
+++ b/Documentation/devicetree/bindings/clock/mediatek,apmixedsys.yaml
@@ -35,7 +35,7 @@ properties:
- mediatek,mt2701-apmixedsys
- mediatek,mt2712-apmixedsys
- mediatek,mt6765-apmixedsys
- - mediatek,mt6779-apmixedsys
+ - mediatek,mt6779-apmixed
- mediatek,mt6795-apmixedsys
- mediatek,mt7629-apmixedsys
- mediatek,mt8167-apmixedsys
diff --git a/Documentation/devicetree/bindings/arm/mediatek/mediatek,infracfg.yaml b/Documentation/devicetree/bindings/clock/mediatek,infracfg.yaml
index 230b5188a88d..252c46d316ee 100644
--- a/Documentation/devicetree/bindings/arm/mediatek/mediatek,infracfg.yaml
+++ b/Documentation/devicetree/bindings/clock/mediatek,infracfg.yaml
@@ -1,7 +1,7 @@
# SPDX-License-Identifier: (GPL-2.0 OR BSD-2-Clause)
%YAML 1.2
---
-$id: http://devicetree.org/schemas/arm/mediatek/mediatek,infracfg.yaml#
+$id: http://devicetree.org/schemas/clock/mediatek,infracfg.yaml#
$schema: http://devicetree.org/meta-schemas/core.yaml#
title: MediaTek Infrastructure System Configuration Controller
diff --git a/Documentation/devicetree/bindings/arm/mediatek/mediatek,mt8186-clock.yaml b/Documentation/devicetree/bindings/clock/mediatek,mt8186-clock.yaml
index 7cd14b163abe..f4e58bfa504f 100644
--- a/Documentation/devicetree/bindings/arm/mediatek/mediatek,mt8186-clock.yaml
+++ b/Documentation/devicetree/bindings/clock/mediatek,mt8186-clock.yaml
@@ -1,7 +1,7 @@
# SPDX-License-Identifier: (GPL-2.0 OR BSD-2-Clause)
%YAML 1.2
---
-$id: http://devicetree.org/schemas/arm/mediatek/mediatek,mt8186-clock.yaml#
+$id: http://devicetree.org/schemas/clock/mediatek,mt8186-clock.yaml#
$schema: http://devicetree.org/meta-schemas/core.yaml#
title: MediaTek Functional Clock Controller for MT8186
diff --git a/Documentation/devicetree/bindings/arm/mediatek/mediatek,mt8186-sys-clock.yaml b/Documentation/devicetree/bindings/clock/mediatek,mt8186-sys-clock.yaml
index 64c769416690..1c446fbc5108 100644
--- a/Documentation/devicetree/bindings/arm/mediatek/mediatek,mt8186-sys-clock.yaml
+++ b/Documentation/devicetree/bindings/clock/mediatek,mt8186-sys-clock.yaml
@@ -1,7 +1,7 @@
# SPDX-License-Identifier: (GPL-2.0 OR BSD-2-Clause)
%YAML 1.2
---
-$id: http://devicetree.org/schemas/arm/mediatek/mediatek,mt8186-sys-clock.yaml#
+$id: http://devicetree.org/schemas/clock/mediatek,mt8186-sys-clock.yaml#
$schema: http://devicetree.org/meta-schemas/core.yaml#
title: MediaTek System Clock Controller for MT8186
diff --git a/Documentation/devicetree/bindings/arm/mediatek/mediatek,mt8192-clock.yaml b/Documentation/devicetree/bindings/clock/mediatek,mt8192-clock.yaml
index dff4c8e8fd4b..b8d690e28bdc 100644
--- a/Documentation/devicetree/bindings/arm/mediatek/mediatek,mt8192-clock.yaml
+++ b/Documentation/devicetree/bindings/clock/mediatek,mt8192-clock.yaml
@@ -1,7 +1,7 @@
# SPDX-License-Identifier: (GPL-2.0 OR BSD-2-Clause)
%YAML 1.2
---
-$id: http://devicetree.org/schemas/arm/mediatek/mediatek,mt8192-clock.yaml#
+$id: http://devicetree.org/schemas/clock/mediatek,mt8192-clock.yaml#
$schema: http://devicetree.org/meta-schemas/core.yaml#
title: MediaTek Functional Clock Controller for MT8192
diff --git a/Documentation/devicetree/bindings/arm/mediatek/mediatek,mt8192-sys-clock.yaml b/Documentation/devicetree/bindings/clock/mediatek,mt8192-sys-clock.yaml
index 8d608fddf3f9..bf8c9aacdf1e 100644
--- a/Documentation/devicetree/bindings/arm/mediatek/mediatek,mt8192-sys-clock.yaml
+++ b/Documentation/devicetree/bindings/clock/mediatek,mt8192-sys-clock.yaml
@@ -1,7 +1,7 @@
# SPDX-License-Identifier: (GPL-2.0 OR BSD-2-Clause)
%YAML 1.2
---
-$id: http://devicetree.org/schemas/arm/mediatek/mediatek,mt8192-sys-clock.yaml#
+$id: http://devicetree.org/schemas/clock/mediatek,mt8192-sys-clock.yaml#
$schema: http://devicetree.org/meta-schemas/core.yaml#
title: MediaTek System Clock Controller for MT8192
diff --git a/Documentation/devicetree/bindings/arm/mediatek/mediatek,mt8195-clock.yaml b/Documentation/devicetree/bindings/clock/mediatek,mt8195-clock.yaml
index d17164b0b13e..fcc963aff087 100644
--- a/Documentation/devicetree/bindings/arm/mediatek/mediatek,mt8195-clock.yaml
+++ b/Documentation/devicetree/bindings/clock/mediatek,mt8195-clock.yaml
@@ -1,7 +1,7 @@
# SPDX-License-Identifier: (GPL-2.0 OR BSD-2-Clause)
%YAML 1.2
---
-$id: http://devicetree.org/schemas/arm/mediatek/mediatek,mt8195-clock.yaml#
+$id: http://devicetree.org/schemas/clock/mediatek,mt8195-clock.yaml#
$schema: http://devicetree.org/meta-schemas/core.yaml#
title: MediaTek Functional Clock Controller for MT8195
diff --git a/Documentation/devicetree/bindings/arm/mediatek/mediatek,mt8195-sys-clock.yaml b/Documentation/devicetree/bindings/clock/mediatek,mt8195-sys-clock.yaml
index 066c9b3d6ac9..69f096eb168d 100644
--- a/Documentation/devicetree/bindings/arm/mediatek/mediatek,mt8195-sys-clock.yaml
+++ b/Documentation/devicetree/bindings/clock/mediatek,mt8195-sys-clock.yaml
@@ -1,7 +1,7 @@
# SPDX-License-Identifier: (GPL-2.0 OR BSD-2-Clause)
%YAML 1.2
---
-$id: http://devicetree.org/schemas/arm/mediatek/mediatek,mt8195-sys-clock.yaml#
+$id: http://devicetree.org/schemas/clock/mediatek,mt8195-sys-clock.yaml#
$schema: http://devicetree.org/meta-schemas/core.yaml#
title: MediaTek System Clock Controller for MT8195
diff --git a/Documentation/devicetree/bindings/arm/mediatek/mediatek,pericfg.yaml b/Documentation/devicetree/bindings/clock/mediatek,pericfg.yaml
index 33c94c491828..2f06baecfd23 100644
--- a/Documentation/devicetree/bindings/arm/mediatek/mediatek,pericfg.yaml
+++ b/Documentation/devicetree/bindings/clock/mediatek,pericfg.yaml
@@ -1,7 +1,7 @@
# SPDX-License-Identifier: (GPL-2.0 OR BSD-2-Clause)
%YAML 1.2
---
-$id: http://devicetree.org/schemas/arm/mediatek/mediatek,pericfg.yaml#
+$id: http://devicetree.org/schemas/clock/mediatek,pericfg.yaml#
$schema: http://devicetree.org/meta-schemas/core.yaml#
title: MediaTek Peripheral Configuration Controller
diff --git a/Documentation/devicetree/bindings/clock/mediatek,syscon.yaml b/Documentation/devicetree/bindings/clock/mediatek,syscon.yaml
new file mode 100644
index 000000000000..10483e26878f
--- /dev/null
+++ b/Documentation/devicetree/bindings/clock/mediatek,syscon.yaml
@@ -0,0 +1,93 @@
+# SPDX-License-Identifier: (GPL-2.0 OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/clock/mediatek,syscon.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: MediaTek Clock controller syscon's
+
+maintainers:
+ - Matthias Brugger <matthias.bgg@gmail.com>
+ - AngeloGioacchino Del Regno <angelogioacchino.delregno@collabora.com>
+
+description:
+ The MediaTek clock controller syscon's provide various clocks to the system.
+
+properties:
+ compatible:
+ oneOf:
+ - items:
+ - enum:
+ - mediatek,mt2701-bdpsys
+ - mediatek,mt2701-imgsys
+ - mediatek,mt2701-vdecsys
+ - mediatek,mt2712-bdpsys
+ - mediatek,mt2712-imgsys
+ - mediatek,mt2712-jpgdecsys
+ - mediatek,mt2712-mcucfg
+ - mediatek,mt2712-mfgcfg
+ - mediatek,mt2712-vdecsys
+ - mediatek,mt2712-vencsys
+ - mediatek,mt6765-camsys
+ - mediatek,mt6765-imgsys
+ - mediatek,mt6765-mipi0a
+ - mediatek,mt6765-vcodecsys
+ - mediatek,mt6779-camsys
+ - mediatek,mt6779-imgsys
+ - mediatek,mt6779-ipesys
+ - mediatek,mt6779-mfgcfg
+ - mediatek,mt6779-vdecsys
+ - mediatek,mt6779-vencsys
+ - mediatek,mt6797-imgsys
+ - mediatek,mt6797-vdecsys
+ - mediatek,mt6797-vencsys
+ - mediatek,mt8167-imgsys
+ - mediatek,mt8167-mfgcfg
+ - mediatek,mt8167-vdecsys
+ - mediatek,mt8173-imgsys
+ - mediatek,mt8173-vdecsys
+ - mediatek,mt8173-vencltsys
+ - mediatek,mt8173-vencsys
+ - mediatek,mt8183-camsys
+ - mediatek,mt8183-imgsys
+ - mediatek,mt8183-ipu_conn
+ - mediatek,mt8183-ipu_adl
+ - mediatek,mt8183-ipu_core0
+ - mediatek,mt8183-ipu_core1
+ - mediatek,mt8183-mcucfg
+ - mediatek,mt8183-mfgcfg
+ - mediatek,mt8183-vdecsys
+ - mediatek,mt8183-vencsys
+ - const: syscon
+ - items:
+ - const: mediatek,mt7623-bdpsys
+ - const: mediatek,mt2701-bdpsys
+ - const: syscon
+ - items:
+ - const: mediatek,mt7623-imgsys
+ - const: mediatek,mt2701-imgsys
+ - const: syscon
+ - items:
+ - const: mediatek,mt7623-vdecsys
+ - const: mediatek,mt2701-vdecsys
+ - const: syscon
+
+ reg:
+ maxItems: 1
+
+ '#clock-cells':
+ const: 1
+
+required:
+ - compatible
+ - '#clock-cells'
+
+additionalProperties: false
+
+examples:
+ - |
+ clock-controller@11220000 {
+ compatible = "mediatek,mt2701-bdpsys", "syscon";
+ reg = <0x11220000 0x2000>;
+ #clock-cells = <1>;
+ };
diff --git a/Documentation/devicetree/bindings/clock/nxp,imx95-blk-ctl.yaml b/Documentation/devicetree/bindings/clock/nxp,imx95-blk-ctl.yaml
index 2dffc02dcd8b..5dc360b2ea4b 100644
--- a/Documentation/devicetree/bindings/clock/nxp,imx95-blk-ctl.yaml
+++ b/Documentation/devicetree/bindings/clock/nxp,imx95-blk-ctl.yaml
@@ -16,6 +16,7 @@ properties:
- nxp,imx95-lvds-csr
- nxp,imx95-display-csr
- nxp,imx95-camera-csr
+ - nxp,imx95-netcmix-blk-ctrl
- nxp,imx95-vpu-csr
- const: syscon
diff --git a/Documentation/devicetree/bindings/clock/nxp,lpc3220-clk.txt b/Documentation/devicetree/bindings/clock/nxp,lpc3220-clk.txt
deleted file mode 100644
index 20cbca3f41d8..000000000000
--- a/Documentation/devicetree/bindings/clock/nxp,lpc3220-clk.txt
+++ /dev/null
@@ -1,30 +0,0 @@
-NXP LPC32xx Clock Controller
-
-Required properties:
-- compatible: should be "nxp,lpc3220-clk"
-- reg: should contain clock controller registers location and length
-- #clock-cells: must be 1, the cell holds id of a clock provided by the
- clock controller
-- clocks: phandles of external oscillators, the list must contain one
- 32768 Hz oscillator and may have one optional high frequency oscillator
-- clock-names: list of external oscillator clock names, must contain
- "xtal_32k" and may have optional "xtal"
-
-Examples:
-
- /* System Control Block */
- scb {
- compatible = "simple-bus";
- ranges = <0x0 0x040004000 0x00001000>;
- #address-cells = <1>;
- #size-cells = <1>;
-
- clk: clock-controller@0 {
- compatible = "nxp,lpc3220-clk";
- reg = <0x00 0x114>;
- #clock-cells = <1>;
-
- clocks = <&xtal_32k>, <&xtal>;
- clock-names = "xtal_32k", "xtal";
- };
- };
diff --git a/Documentation/devicetree/bindings/clock/nxp,lpc3220-clk.yaml b/Documentation/devicetree/bindings/clock/nxp,lpc3220-clk.yaml
new file mode 100644
index 000000000000..16f79616d18a
--- /dev/null
+++ b/Documentation/devicetree/bindings/clock/nxp,lpc3220-clk.yaml
@@ -0,0 +1,51 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/clock/nxp,lpc3220-clk.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: NXP LPC32xx Clock Controller
+
+maintainers:
+ - Animesh Agarwal <animeshagarwal28@gmail.com>
+
+properties:
+ compatible:
+ const: nxp,lpc3220-clk
+
+ reg:
+ maxItems: 1
+
+ '#clock-cells':
+ const: 1
+
+ clocks:
+ minItems: 1
+ items:
+ - description: External 32768 Hz oscillator.
+ - description: Optional high frequency oscillator.
+
+ clock-names:
+ minItems: 1
+ items:
+ - const: xtal_32k
+ - const: xtal
+
+required:
+ - compatible
+ - reg
+ - '#clock-cells'
+ - clocks
+ - clock-names
+
+additionalProperties: false
+
+examples:
+ - |
+ clock-controller@0 {
+ compatible = "nxp,lpc3220-clk";
+ reg = <0x00 0x114>;
+ #clock-cells = <1>;
+ clocks = <&xtal_32k>, <&xtal>;
+ clock-names = "xtal_32k", "xtal";
+ };
diff --git a/Documentation/devicetree/bindings/clock/nxp,lpc3220-usb-clk.txt b/Documentation/devicetree/bindings/clock/nxp,lpc3220-usb-clk.txt
deleted file mode 100644
index 0aa249409b51..000000000000
--- a/Documentation/devicetree/bindings/clock/nxp,lpc3220-usb-clk.txt
+++ /dev/null
@@ -1,22 +0,0 @@
-NXP LPC32xx USB Clock Controller
-
-Required properties:
-- compatible: should be "nxp,lpc3220-usb-clk"
-- reg: should contain clock controller registers location and length
-- #clock-cells: must be 1, the cell holds id of a clock provided by the
- USB clock controller
-
-Examples:
-
- usb {
- #address-cells = <1>;
- #size-cells = <1>;
- compatible = "simple-bus";
- ranges = <0x0 0x31020000 0x00001000>;
-
- usbclk: clock-controller@f00 {
- compatible = "nxp,lpc3220-usb-clk";
- reg = <0xf00 0x100>;
- #clock-cells = <1>;
- };
- };
diff --git a/Documentation/devicetree/bindings/clock/nxp,lpc3220-usb-clk.yaml b/Documentation/devicetree/bindings/clock/nxp,lpc3220-usb-clk.yaml
new file mode 100644
index 000000000000..10361d2292fb
--- /dev/null
+++ b/Documentation/devicetree/bindings/clock/nxp,lpc3220-usb-clk.yaml
@@ -0,0 +1,35 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/clock/nxp,lpc3220-usb-clk.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: NXP LPC32xx USB Clock Controller
+
+maintainers:
+ - Animesh Agarwal <animeshagarwal28@gmail.com>
+
+properties:
+ compatible:
+ const: nxp,lpc3220-usb-clk
+
+ reg:
+ maxItems: 1
+
+ '#clock-cells':
+ const: 1
+
+required:
+ - compatible
+ - reg
+ - '#clock-cells'
+
+additionalProperties: false
+
+examples:
+ - |
+ clock-controller@f00 {
+ compatible = "nxp,lpc3220-usb-clk";
+ reg = <0xf00 0x100>;
+ #clock-cells = <1>;
+ };
diff --git a/Documentation/devicetree/bindings/clock/qcom,a53pll.yaml b/Documentation/devicetree/bindings/clock/qcom,a53pll.yaml
index 5ca927a8b1d5..47ceab641a4c 100644
--- a/Documentation/devicetree/bindings/clock/qcom,a53pll.yaml
+++ b/Documentation/devicetree/bindings/clock/qcom,a53pll.yaml
@@ -21,6 +21,7 @@ properties:
- qcom,ipq6018-a53pll
- qcom,ipq8074-a53pll
- qcom,ipq9574-a73pll
+ - qcom,msm8226-a7pll
- qcom,msm8916-a53pll
- qcom,msm8939-a53pll
@@ -40,6 +41,9 @@ properties:
operating-points-v2: true
+ opp-table:
+ type: object
+
required:
- compatible
- reg
diff --git a/Documentation/devicetree/bindings/clock/qcom,qcs404-turingcc.yaml b/Documentation/devicetree/bindings/clock/qcom,qcs404-turingcc.yaml
new file mode 100644
index 000000000000..033e010754a2
--- /dev/null
+++ b/Documentation/devicetree/bindings/clock/qcom,qcs404-turingcc.yaml
@@ -0,0 +1,47 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/clock/qcom,qcs404-turingcc.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Qualcomm Turing Clock & Reset Controller on QCS404
+
+maintainers:
+ - Bjorn Andersson <andersson@kernel.org>
+
+properties:
+ compatible:
+ const: qcom,qcs404-turingcc
+
+ reg:
+ maxItems: 1
+
+ clocks:
+ maxItems: 1
+
+ '#clock-cells':
+ const: 1
+
+ '#reset-cells':
+ const: 1
+
+required:
+ - compatible
+ - reg
+ - clocks
+ - '#clock-cells'
+ - '#reset-cells'
+
+additionalProperties: false
+
+examples:
+ - |
+ #include <dt-bindings/clock/qcom,gcc-qcs404.h>
+ clock-controller@800000 {
+ compatible = "qcom,qcs404-turingcc";
+ reg = <0x00800000 0x30000>;
+ clocks = <&gcc GCC_CDSP_CFG_AHB_CLK>;
+
+ #clock-cells = <1>;
+ #reset-cells = <1>;
+ };
diff --git a/Documentation/devicetree/bindings/clock/qcom,sc8280xp-lpasscc.yaml b/Documentation/devicetree/bindings/clock/qcom,sc8280xp-lpasscc.yaml
index 3326dcd6766c..273d66e245c5 100644
--- a/Documentation/devicetree/bindings/clock/qcom,sc8280xp-lpasscc.yaml
+++ b/Documentation/devicetree/bindings/clock/qcom,sc8280xp-lpasscc.yaml
@@ -18,9 +18,16 @@ description: |
properties:
compatible:
- enum:
- - qcom,sc8280xp-lpassaudiocc
- - qcom,sc8280xp-lpasscc
+ oneOf:
+ - enum:
+ - qcom,sc8280xp-lpassaudiocc
+ - qcom,sc8280xp-lpasscc
+ - items:
+ - const: qcom,x1e80100-lpassaudiocc
+ - const: qcom,sc8280xp-lpassaudiocc
+ - items:
+ - const: qcom,x1e80100-lpasscc
+ - const: qcom,sc8280xp-lpasscc
reg:
maxItems: 1
diff --git a/Documentation/devicetree/bindings/clock/qcom,sm8450-camcc.yaml b/Documentation/devicetree/bindings/clock/qcom,sm8450-camcc.yaml
index f58edfc10f4c..26afbbe65511 100644
--- a/Documentation/devicetree/bindings/clock/qcom,sm8450-camcc.yaml
+++ b/Documentation/devicetree/bindings/clock/qcom,sm8450-camcc.yaml
@@ -21,9 +21,6 @@ description: |
include/dt-bindings/clock/qcom,sm8650-camcc.h
include/dt-bindings/clock/qcom,x1e80100-camcc.h
-allOf:
- - $ref: qcom,gcc.yaml#
-
properties:
compatible:
enum:
@@ -57,7 +54,21 @@ required:
- compatible
- clocks
- power-domains
- - required-opps
+
+allOf:
+ - $ref: qcom,gcc.yaml#
+ - if:
+ properties:
+ compatible:
+ contains:
+ enum:
+ - qcom,sc8280xp-camcc
+ - qcom,sm8450-camcc
+ - qcom,sm8550-camcc
+ - qcom,x1e80100-camcc
+ then:
+ required:
+ - required-opps
unevaluatedProperties: false
diff --git a/Documentation/devicetree/bindings/clock/qcom,sm8450-videocc.yaml b/Documentation/devicetree/bindings/clock/qcom,sm8450-videocc.yaml
index b2792b4bb554..9829ba28fe0e 100644
--- a/Documentation/devicetree/bindings/clock/qcom,sm8450-videocc.yaml
+++ b/Documentation/devicetree/bindings/clock/qcom,sm8450-videocc.yaml
@@ -44,11 +44,20 @@ required:
- compatible
- clocks
- power-domains
- - required-opps
- '#power-domain-cells'
allOf:
- $ref: qcom,gcc.yaml#
+ - if:
+ properties:
+ compatible:
+ contains:
+ enum:
+ - qcom,sm8450-videocc
+ - qcom,sm8550-videocc
+ then:
+ required:
+ - required-opps
unevaluatedProperties: false
diff --git a/Documentation/devicetree/bindings/clock/qcom,turingcc.txt b/Documentation/devicetree/bindings/clock/qcom,turingcc.txt
deleted file mode 100644
index 126517de5f9a..000000000000
--- a/Documentation/devicetree/bindings/clock/qcom,turingcc.txt
+++ /dev/null
@@ -1,19 +0,0 @@
-Qualcomm Turing Clock & Reset Controller Binding
-------------------------------------------------
-
-Required properties :
-- compatible: shall contain "qcom,qcs404-turingcc".
-- reg: shall contain base register location and length.
-- clocks: ahb clock for the TuringCC
-- #clock-cells: from common clock binding, shall contain 1.
-- #reset-cells: from common reset binding, shall contain 1.
-
-Example:
- turingcc: clock-controller@800000 {
- compatible = "qcom,qcs404-turingcc";
- reg = <0x00800000 0x30000>;
- clocks = <&gcc GCC_CDSP_CFG_AHB_CLK>;
-
- #clock-cells = <1>;
- #reset-cells = <1>;
- };
diff --git a/Documentation/devicetree/bindings/clock/renesas,cpg-clocks.yaml b/Documentation/devicetree/bindings/clock/renesas,cpg-clocks.yaml
index 9185d101737e..a0e09b7002f0 100644
--- a/Documentation/devicetree/bindings/clock/renesas,cpg-clocks.yaml
+++ b/Documentation/devicetree/bindings/clock/renesas,cpg-clocks.yaml
@@ -32,12 +32,16 @@ properties:
reg:
maxItems: 1
- clocks: true
+ clocks:
+ minItems: 1
+ maxItems: 3
'#clock-cells':
const: 1
- clock-output-names: true
+ clock-output-names:
+ minItems: 3
+ maxItems: 17
renesas,mode:
description: Board-specific settings of the MD_CK* bits on R-Mobile A1
diff --git a/Documentation/devicetree/bindings/clock/renesas,cpg-mssr.yaml b/Documentation/devicetree/bindings/clock/renesas,cpg-mssr.yaml
index 084259d30232..77ce3615c65a 100644
--- a/Documentation/devicetree/bindings/clock/renesas,cpg-mssr.yaml
+++ b/Documentation/devicetree/bindings/clock/renesas,cpg-mssr.yaml
@@ -31,6 +31,7 @@ properties:
- renesas,r8a7745-cpg-mssr # RZ/G1E
- renesas,r8a77470-cpg-mssr # RZ/G1C
- renesas,r8a774a1-cpg-mssr # RZ/G2M
+ - renesas,r8a774a3-cpg-mssr # RZ/G2M v3.0
- renesas,r8a774b1-cpg-mssr # RZ/G2N
- renesas,r8a774c0-cpg-mssr # RZ/G2E
- renesas,r8a774e1-cpg-mssr # RZ/G2H
diff --git a/Documentation/devicetree/bindings/clock/rockchip,rk3576-cru.yaml b/Documentation/devicetree/bindings/clock/rockchip,rk3576-cru.yaml
new file mode 100644
index 000000000000..9c9b36049c71
--- /dev/null
+++ b/Documentation/devicetree/bindings/clock/rockchip,rk3576-cru.yaml
@@ -0,0 +1,56 @@
+# SPDX-License-Identifier: GPL-2.0-only OR BSD-2-Clause
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/clock/rockchip,rk3576-cru.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Rockchip rk3576 Family Clock and Reset Control Module
+
+maintainers:
+ - Elaine Zhang <zhangqing@rock-chips.com>
+ - Heiko Stuebner <heiko@sntech.de>
+ - Detlev Casanova <detlev.casanova@collabora.com>
+
+description:
+ The RK3576 clock controller generates the clock and also implements a reset
+ controller for SoC peripherals. For example it provides SCLK_UART2 and
+ PCLK_UART2, as well as SRST_P_UART2 and SRST_S_UART2 for the second UART
+ module.
+
+properties:
+ compatible:
+ const: rockchip,rk3576-cru
+
+ reg:
+ maxItems: 1
+
+ "#clock-cells":
+ const: 1
+
+ "#reset-cells":
+ const: 1
+
+ clocks:
+ maxItems: 2
+
+ clock-names:
+ items:
+ - const: xin24m
+ - const: xin32k
+
+required:
+ - compatible
+ - reg
+ - "#clock-cells"
+ - "#reset-cells"
+
+additionalProperties: false
+
+examples:
+ - |
+ clock-controller@27200000 {
+ compatible = "rockchip,rk3576-cru";
+ reg = <0xfd7c0000 0x5c000>;
+ #clock-cells = <1>;
+ #reset-cells = <1>;
+ };
diff --git a/Documentation/devicetree/bindings/clock/rockchip,rk3588-cru.yaml b/Documentation/devicetree/bindings/clock/rockchip,rk3588-cru.yaml
index 74cd3f3f229a..4ff175c4992b 100644
--- a/Documentation/devicetree/bindings/clock/rockchip,rk3588-cru.yaml
+++ b/Documentation/devicetree/bindings/clock/rockchip,rk3588-cru.yaml
@@ -42,10 +42,6 @@ properties:
- const: xin24m
- const: xin32k
- assigned-clocks: true
-
- assigned-clock-rates: true
-
rockchip,grf:
$ref: /schemas/types.yaml#/definitions/phandle
description: >
diff --git a/Documentation/devicetree/bindings/clock/st,stm32mp1-rcc.yaml b/Documentation/devicetree/bindings/clock/st,stm32mp1-rcc.yaml
index 5194be0b410e..9b3aaae546cb 100644
--- a/Documentation/devicetree/bindings/clock/st,stm32mp1-rcc.yaml
+++ b/Documentation/devicetree/bindings/clock/st,stm32mp1-rcc.yaml
@@ -60,8 +60,14 @@ properties:
- st,stm32mp1-rcc
- st,stm32mp13-rcc
- const: syscon
- clocks: true
- clock-names: true
+
+ clocks:
+ minItems: 1
+ maxItems: 5
+
+ clock-names:
+ minItems: 1
+ maxItems: 5
reg:
maxItems: 1
diff --git a/Documentation/devicetree/bindings/display/bridge/toshiba,tc358767.yaml b/Documentation/devicetree/bindings/display/bridge/toshiba,tc358767.yaml
index 2ad0cd6dd49e..b78f64c9c5f4 100644
--- a/Documentation/devicetree/bindings/display/bridge/toshiba,tc358767.yaml
+++ b/Documentation/devicetree/bindings/display/bridge/toshiba,tc358767.yaml
@@ -92,12 +92,31 @@ properties:
reference to a valid DPI output or input endpoint node.
port@2:
- $ref: /schemas/graph.yaml#/properties/port
+ $ref: /schemas/graph.yaml#/$defs/port-base
+ unevaluatedProperties: false
description: |
eDP/DP output port. The remote endpoint phandle should be a
reference to a valid eDP panel input endpoint node. This port is
optional, treated as DP panel if not defined
+ properties:
+ endpoint:
+ $ref: /schemas/media/video-interfaces.yaml#
+ unevaluatedProperties: false
+
+ properties:
+ toshiba,pre-emphasis:
+ description:
+ Display port output Pre-Emphasis settings for both DP lanes.
+ $ref: /schemas/types.yaml#/definitions/uint8-array
+ minItems: 2
+ maxItems: 2
+ items:
+ enum:
+ - 0 # No pre-emphasis
+ - 1 # 3.5dB pre-emphasis
+ - 2 # 6dB pre-emphasis
+
oneOf:
- required:
- port@0
diff --git a/Documentation/devicetree/bindings/display/mediatek/mediatek,dpi.yaml b/Documentation/devicetree/bindings/display/mediatek/mediatek,dpi.yaml
index 5ca7679d5427..3a82aec9021c 100644
--- a/Documentation/devicetree/bindings/display/mediatek/mediatek,dpi.yaml
+++ b/Documentation/devicetree/bindings/display/mediatek/mediatek,dpi.yaml
@@ -62,6 +62,9 @@ properties:
- const: default
- const: sleep
+ power-domains:
+ maxItems: 1
+
port:
$ref: /schemas/graph.yaml#/properties/port
description:
@@ -76,6 +79,20 @@ required:
- clock-names
- port
+allOf:
+ - if:
+ not:
+ properties:
+ compatible:
+ contains:
+ enum:
+ - mediatek,mt6795-dpi
+ - mediatek,mt8173-dpi
+ - mediatek,mt8186-dpi
+ then:
+ properties:
+ power-domains: false
+
additionalProperties: false
examples:
diff --git a/Documentation/devicetree/bindings/display/msm/hdmi.yaml b/Documentation/devicetree/bindings/display/msm/hdmi.yaml
index 47e97669821c..d4a2033afea8 100644
--- a/Documentation/devicetree/bindings/display/msm/hdmi.yaml
+++ b/Documentation/devicetree/bindings/display/msm/hdmi.yaml
@@ -19,14 +19,15 @@ properties:
- qcom,hdmi-tx-8974
- qcom,hdmi-tx-8994
- qcom,hdmi-tx-8996
+ - qcom,hdmi-tx-8998
clocks:
minItems: 1
- maxItems: 5
+ maxItems: 8
clock-names:
minItems: 1
- maxItems: 5
+ maxItems: 8
reg:
minItems: 1
@@ -142,6 +143,7 @@ allOf:
properties:
clocks:
minItems: 5
+ maxItems: 5
clock-names:
items:
- const: mdp_core
@@ -151,6 +153,28 @@ allOf:
- const: extp
hdmi-mux-supplies: false
+ - if:
+ properties:
+ compatible:
+ contains:
+ enum:
+ - qcom,hdmi-tx-8998
+ then:
+ properties:
+ clocks:
+ minItems: 8
+ maxItems: 8
+ clock-names:
+ items:
+ - const: mdp_core
+ - const: iface
+ - const: core
+ - const: alt_iface
+ - const: extp
+ - const: bus
+ - const: mnoc
+ - const: iface_mmss
+
additionalProperties: false
examples:
diff --git a/Documentation/devicetree/bindings/display/panel/boe,th101mb31ig002-28a.yaml b/Documentation/devicetree/bindings/display/panel/boe,th101mb31ig002-28a.yaml
index 5eaccce13c21..6a82bd1ec763 100644
--- a/Documentation/devicetree/bindings/display/panel/boe,th101mb31ig002-28a.yaml
+++ b/Documentation/devicetree/bindings/display/panel/boe,th101mb31ig002-28a.yaml
@@ -9,20 +9,20 @@ title: BOE TH101MB31IG002-28A WXGA DSI Display Panel
maintainers:
- Manuel Traut <manut@mecka.net>
-allOf:
- - $ref: panel-common.yaml#
-
properties:
compatible:
enum:
# BOE TH101MB31IG002-28A 10.1" WXGA TFT LCD panel
- boe,th101mb31ig002-28a
+ # The Starry-er88577 is a 10.1" WXGA TFT-LCD panel
+ - starry,er88577
reg:
maxItems: 1
backlight: true
enable-gpios: true
+ reset-gpios: true
power-supply: true
port: true
rotation: true
@@ -33,6 +33,20 @@ required:
- enable-gpios
- power-supply
+allOf:
+ - $ref: panel-common.yaml#
+ - if:
+ properties:
+ compatible:
+ # The Starry-er88577 is a 10.1" WXGA TFT-LCD panel
+ const: starry,er88577
+ then:
+ properties:
+ reset-gpios: false
+ else:
+ required:
+ - reset-gpios
+
additionalProperties: false
examples:
@@ -47,6 +61,7 @@ examples:
reg = <0>;
backlight = <&backlight_lcd0>;
enable-gpios = <&gpio 45 GPIO_ACTIVE_HIGH>;
+ reset-gpios = <&gpio 55 GPIO_ACTIVE_LOW>;
rotation = <90>;
power-supply = <&vcc_3v3>;
port {
diff --git a/Documentation/devicetree/bindings/display/panel/boe,tv101wum-ll2.yaml b/Documentation/devicetree/bindings/display/panel/boe,tv101wum-ll2.yaml
new file mode 100644
index 000000000000..dced98e1c69a
--- /dev/null
+++ b/Documentation/devicetree/bindings/display/panel/boe,tv101wum-ll2.yaml
@@ -0,0 +1,63 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/display/panel/boe,tv101wum-ll2.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: BOE TV101WUM-LL2 DSI Display Panel
+
+maintainers:
+ - Neil Armstrong <neil.armstrong@linaro.org>
+
+allOf:
+ - $ref: panel-common.yaml#
+
+properties:
+ compatible:
+ const: boe,tv101wum-ll2
+
+ reg:
+ maxItems: 1
+ description: DSI virtual channel
+
+ backlight: true
+ reset-gpios: true
+ vsp-supply: true
+ vsn-supply: true
+ port: true
+ rotation: true
+
+required:
+ - compatible
+ - reg
+ - reset-gpios
+ - vsp-supply
+ - vsn-supply
+ - port
+
+additionalProperties: false
+
+examples:
+ - |
+ #include <dt-bindings/gpio/gpio.h>
+ dsi {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ panel@0 {
+ compatible = "boe,tv101wum-ll2";
+ reg = <0>;
+
+ vsn-supply = <&vsn_lcd>;
+ vsp-supply = <&vsp_lcd>;
+
+ reset-gpios = <&pio 45 GPIO_ACTIVE_LOW>;
+
+ port {
+ panel_in: endpoint {
+ remote-endpoint = <&dsi_out>;
+ };
+ };
+ };
+ };
+
+...
diff --git a/Documentation/devicetree/bindings/display/panel/himax,hx8394.yaml b/Documentation/devicetree/bindings/display/panel/himax,hx8394.yaml
index 644387e4fb6f..75ccabff308b 100644
--- a/Documentation/devicetree/bindings/display/panel/himax,hx8394.yaml
+++ b/Documentation/devicetree/bindings/display/panel/himax,hx8394.yaml
@@ -15,14 +15,12 @@ description:
such as the HannStar HSD060BHW4 720x1440 TFT LCD panel connected with
a MIPI-DSI video interface.
-allOf:
- - $ref: panel-common.yaml#
-
properties:
compatible:
items:
- enum:
- hannstar,hsd060bhw4
+ - microchip,ac40t08a-mipi-panel
- powkiddy,x55-panel
- const: himax,hx8394
@@ -46,7 +44,6 @@ properties:
required:
- compatible
- reg
- - reset-gpios
- backlight
- port
- vcc-supply
@@ -54,6 +51,18 @@ required:
additionalProperties: false
+allOf:
+ - $ref: panel-common.yaml#
+ - if:
+ not:
+ properties:
+ compatible:
+ enum:
+ - microchip,ac40t08a-mipi-panel
+ then:
+ required:
+ - reset-gpios
+
examples:
- |
#include <dt-bindings/gpio/gpio.h>
diff --git a/Documentation/devicetree/bindings/display/panel/ilitek,ili9806e.yaml b/Documentation/devicetree/bindings/display/panel/ilitek,ili9806e.yaml
index cfd7cc9c8725..f80307579485 100644
--- a/Documentation/devicetree/bindings/display/panel/ilitek,ili9806e.yaml
+++ b/Documentation/devicetree/bindings/display/panel/ilitek,ili9806e.yaml
@@ -16,6 +16,7 @@ properties:
compatible:
items:
- enum:
+ - densitron,dmt028vghmcmi-1d
- ortustech,com35h3p70ulc
- const: ilitek,ili9806e
diff --git a/Documentation/devicetree/bindings/display/panel/jadard,jd9365da-h3.yaml b/Documentation/devicetree/bindings/display/panel/jadard,jd9365da-h3.yaml
index 3d5bede98cf1..b8783eba3ddc 100644
--- a/Documentation/devicetree/bindings/display/panel/jadard,jd9365da-h3.yaml
+++ b/Documentation/devicetree/bindings/display/panel/jadard,jd9365da-h3.yaml
@@ -18,6 +18,7 @@ properties:
- enum:
- chongzhou,cz101b4001
- kingdisplay,kd101ne3-40ti
+ - melfas,lmfbx101117480
- radxa,display-10hd-ad001
- radxa,display-8hd-ad002
- const: jadard,jd9365da-h3
diff --git a/Documentation/devicetree/bindings/display/panel/panel-simple.yaml b/Documentation/devicetree/bindings/display/panel/panel-simple.yaml
index 8a87e0100dcb..b89e39790579 100644
--- a/Documentation/devicetree/bindings/display/panel/panel-simple.yaml
+++ b/Documentation/devicetree/bindings/display/panel/panel-simple.yaml
@@ -158,6 +158,8 @@ properties:
- innolux,at070tn92
# Innolux G070ACE-L01 7" WVGA (800x480) TFT LCD panel
- innolux,g070ace-l01
+ # Innolux G070ACE-LH3 7" WVGA (800x480) TFT LCD panel with WLED backlight
+ - innolux,g070ace-lh3
# Innolux G070Y2-L01 7" WVGA (800x480) TFT LCD panel
- innolux,g070y2-l01
# Innolux G070Y2-T02 7" WVGA (800x480) TFT LCD TTL panel
@@ -222,6 +224,8 @@ properties:
- okaya,rs800480t-7x0gp
# Olimex 4.3" TFT LCD panel
- olimex,lcd-olinuxino-43-ts
+ # On Tat Industrial Company 5" DPI TFT panel.
+ - ontat,kd50g21-40nt-a1
# On Tat Industrial Company 7" DPI TFT panel.
- ontat,yx700wv03
# OrtusTech COM37H3M05DTC Blanview 3.7" VGA portrait TFT-LCD panel
diff --git a/Documentation/devicetree/bindings/display/panel/sitronix,st7701.yaml b/Documentation/devicetree/bindings/display/panel/sitronix,st7701.yaml
index b348f5bf0a98..b07f3eca669b 100644
--- a/Documentation/devicetree/bindings/display/panel/sitronix,st7701.yaml
+++ b/Documentation/devicetree/bindings/display/panel/sitronix,st7701.yaml
@@ -20,21 +20,19 @@ description: |
Densitron DMT028VGHMCMI-1A is 480x640, 2-lane MIPI DSI LCD panel
which has built-in ST7701 chip.
-allOf:
- - $ref: panel-common.yaml#
-
properties:
compatible:
items:
- enum:
- anbernic,rg-arc-panel
+ - anbernic,rg28xx-panel
- densitron,dmt028vghmcmi-1a
- elida,kd50t048a
- techstar,ts8550b
- const: sitronix,st7701
reg:
- description: DSI virtual channel used by that screen
+ description: DSI / SPI channel used by that screen
maxItems: 1
VCC-supply:
@@ -43,6 +41,13 @@ properties:
IOVCC-supply:
description: I/O system regulator
+ dc-gpios:
+ maxItems: 1
+ description:
+ Controller data/command selection (D/CX) in 4-line SPI mode.
+ If not set, the controller is in 3-line SPI mode.
+ Disallowed for DSI.
+
port: true
reset-gpios: true
rotation: true
@@ -57,7 +62,38 @@ required:
- port
- reset-gpios
-additionalProperties: false
+allOf:
+ - $ref: panel-common.yaml#
+ - if:
+ properties:
+ compatible:
+ contains:
+ # SPI connected panels
+ enum:
+ - anbernic,rg28xx-panel
+ then:
+ $ref: /schemas/spi/spi-peripheral-props.yaml#
+
+ - if:
+ properties:
+ compatible:
+ not:
+ contains:
+ # DSI or SPI without D/CX pin
+ enum:
+ - anbernic,rg-arc-panel
+ - anbernic,rg28xx-panel
+ - densitron,dmt028vghmcmi-1a
+ - elida,kd50t048a
+ - techstar,ts8550b
+ then:
+ required:
+ - dc-gpios
+ else:
+ properties:
+ dc-gpios: false
+
+unevaluatedProperties: false
examples:
- |
@@ -82,3 +118,26 @@ examples:
};
};
};
+ - |
+ #include <dt-bindings/gpio/gpio.h>
+
+ spi {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ panel@0 {
+ compatible = "anbernic,rg28xx-panel", "sitronix,st7701";
+ reg = <0>;
+ spi-max-frequency = <3125000>;
+ VCC-supply = <&reg_lcd>;
+ IOVCC-supply = <&reg_lcd>;
+ reset-gpios = <&pio 8 14 GPIO_ACTIVE_HIGH>; /* LCD-RST: PI14 */
+ backlight = <&backlight>;
+
+ port {
+ panel_in_rgb: endpoint {
+ remote-endpoint = <&tcon_lcd0_out_lcd>;
+ };
+ };
+ };
+ };
diff --git a/Documentation/devicetree/bindings/display/renesas,rzg2l-du.yaml b/Documentation/devicetree/bindings/display/renesas,rzg2l-du.yaml
index 08e5b9478051..95e3d5e74b87 100644
--- a/Documentation/devicetree/bindings/display/renesas,rzg2l-du.yaml
+++ b/Documentation/devicetree/bindings/display/renesas,rzg2l-du.yaml
@@ -18,6 +18,7 @@ properties:
compatible:
oneOf:
- enum:
+ - renesas,r9a07g043u-du # RZ/G2UL
- renesas,r9a07g044-du # RZ/G2{L,LC}
- items:
- enum:
@@ -60,9 +61,6 @@ properties:
$ref: /schemas/graph.yaml#/properties/port
unevaluatedProperties: false
- required:
- - port@0
-
unevaluatedProperties: false
renesas,vsps:
@@ -88,6 +86,34 @@ required:
additionalProperties: false
+allOf:
+ - if:
+ properties:
+ compatible:
+ contains:
+ const: renesas,r9a07g043u-du
+ then:
+ properties:
+ ports:
+ properties:
+ port@0:
+ description: DPI
+
+ required:
+ - port@0
+ else:
+ properties:
+ ports:
+ properties:
+ port@0:
+ description: DSI
+ port@1:
+ description: DPI
+
+ required:
+ - port@0
+ - port@1
+
examples:
# RZ/G2L DU
- |
diff --git a/Documentation/devicetree/bindings/dma/cirrus,ep9301-dma-m2m.yaml b/Documentation/devicetree/bindings/dma/cirrus,ep9301-dma-m2m.yaml
new file mode 100644
index 000000000000..871b76ddf90f
--- /dev/null
+++ b/Documentation/devicetree/bindings/dma/cirrus,ep9301-dma-m2m.yaml
@@ -0,0 +1,84 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/dma/cirrus,ep9301-dma-m2m.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Cirrus Logic ep93xx SoC DMA controller
+
+maintainers:
+ - Alexander Sverdlin <alexander.sverdlin@gmail.com>
+ - Nikita Shubin <nikita.shubin@maquefel.me>
+
+allOf:
+ - $ref: dma-controller.yaml#
+
+properties:
+ compatible:
+ oneOf:
+ - const: cirrus,ep9301-dma-m2m
+ - items:
+ - enum:
+ - cirrus,ep9302-dma-m2m
+ - cirrus,ep9307-dma-m2m
+ - cirrus,ep9312-dma-m2m
+ - cirrus,ep9315-dma-m2m
+ - const: cirrus,ep9301-dma-m2m
+
+ reg:
+ items:
+ - description: m2m0 channel registers
+ - description: m2m1 channel registers
+
+ clocks:
+ items:
+ - description: m2m0 channel gate clock
+ - description: m2m1 channel gate clock
+
+ clock-names:
+ items:
+ - const: m2m0
+ - const: m2m1
+
+ interrupts:
+ items:
+ - description: m2m0 channel interrupt
+ - description: m2m1 channel interrupt
+
+ '#dma-cells':
+ const: 2
+ description: |
+ The first cell is the unique device channel number as indicated by this
+ table for ep93xx:
+
+ 10: SPI controller
+ 11: IDE controller
+
+ The second cell is the DMA direction line number:
+
+ 1: Memory to device
+ 2: Device to memory
+
+required:
+ - compatible
+ - reg
+ - clocks
+ - clock-names
+ - interrupts
+
+additionalProperties: false
+
+examples:
+ - |
+ #include <dt-bindings/clock/cirrus,ep9301-syscon.h>
+ dma-controller@80000100 {
+ compatible = "cirrus,ep9301-dma-m2m";
+ reg = <0x80000100 0x0040>,
+ <0x80000140 0x0040>;
+ clocks = <&syscon EP93XX_CLK_M2M0>,
+ <&syscon EP93XX_CLK_M2M1>;
+ clock-names = "m2m0", "m2m1";
+ interrupt-parent = <&vic0>;
+ interrupts = <17>, <18>;
+ #dma-cells = <2>;
+ };
diff --git a/Documentation/devicetree/bindings/dma/cirrus,ep9301-dma-m2p.yaml b/Documentation/devicetree/bindings/dma/cirrus,ep9301-dma-m2p.yaml
new file mode 100644
index 000000000000..d14c31553543
--- /dev/null
+++ b/Documentation/devicetree/bindings/dma/cirrus,ep9301-dma-m2p.yaml
@@ -0,0 +1,144 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/dma/cirrus,ep9301-dma-m2p.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Cirrus Logic ep93xx SoC M2P DMA controller
+
+maintainers:
+ - Alexander Sverdlin <alexander.sverdlin@gmail.com>
+ - Nikita Shubin <nikita.shubin@maquefel.me>
+
+allOf:
+ - $ref: dma-controller.yaml#
+
+properties:
+ compatible:
+ oneOf:
+ - const: cirrus,ep9301-dma-m2p
+ - items:
+ - enum:
+ - cirrus,ep9302-dma-m2p
+ - cirrus,ep9307-dma-m2p
+ - cirrus,ep9312-dma-m2p
+ - cirrus,ep9315-dma-m2p
+ - const: cirrus,ep9301-dma-m2p
+
+ reg:
+ items:
+ - description: m2p0 channel registers
+ - description: m2p1 channel registers
+ - description: m2p2 channel registers
+ - description: m2p3 channel registers
+ - description: m2p4 channel registers
+ - description: m2p5 channel registers
+ - description: m2p6 channel registers
+ - description: m2p7 channel registers
+ - description: m2p8 channel registers
+ - description: m2p9 channel registers
+
+ clocks:
+ items:
+ - description: m2p0 channel gate clock
+ - description: m2p1 channel gate clock
+ - description: m2p2 channel gate clock
+ - description: m2p3 channel gate clock
+ - description: m2p4 channel gate clock
+ - description: m2p5 channel gate clock
+ - description: m2p6 channel gate clock
+ - description: m2p7 channel gate clock
+ - description: m2p8 channel gate clock
+ - description: m2p9 channel gate clock
+
+ clock-names:
+ items:
+ - const: m2p0
+ - const: m2p1
+ - const: m2p2
+ - const: m2p3
+ - const: m2p4
+ - const: m2p5
+ - const: m2p6
+ - const: m2p7
+ - const: m2p8
+ - const: m2p9
+
+ interrupts:
+ items:
+ - description: m2p0 channel interrupt
+ - description: m2p1 channel interrupt
+ - description: m2p2 channel interrupt
+ - description: m2p3 channel interrupt
+ - description: m2p4 channel interrupt
+ - description: m2p5 channel interrupt
+ - description: m2p6 channel interrupt
+ - description: m2p7 channel interrupt
+ - description: m2p8 channel interrupt
+ - description: m2p9 channel interrupt
+
+ '#dma-cells':
+ const: 2
+ description: |
+ The first cell is the unique device channel number as indicated by this
+ table for ep93xx:
+
+ 0: I2S channel 1
+ 1: I2S channel 2 (unused)
+ 2: AC97 channel 1 (unused)
+ 3: AC97 channel 2 (unused)
+ 4: AC97 channel 3 (unused)
+ 5: I2S channel 3 (unused)
+ 6: UART1 (unused)
+ 7: UART2 (unused)
+ 8: UART3 (unused)
+ 9: IRDA (unused)
+
+ The second cell is the DMA direction line number:
+
+ 1: Memory to device
+ 2: Device to memory
+
+required:
+ - compatible
+ - reg
+ - clocks
+ - clock-names
+ - interrupts
+
+additionalProperties: false
+
+examples:
+ - |
+ #include <dt-bindings/clock/cirrus,ep9301-syscon.h>
+ dma-controller@80000000 {
+ compatible = "cirrus,ep9301-dma-m2p";
+ reg = <0x80000000 0x0040>,
+ <0x80000040 0x0040>,
+ <0x80000080 0x0040>,
+ <0x800000c0 0x0040>,
+ <0x80000240 0x0040>,
+ <0x80000200 0x0040>,
+ <0x800002c0 0x0040>,
+ <0x80000280 0x0040>,
+ <0x80000340 0x0040>,
+ <0x80000300 0x0040>;
+ clocks = <&syscon EP93XX_CLK_M2P0>,
+ <&syscon EP93XX_CLK_M2P1>,
+ <&syscon EP93XX_CLK_M2P2>,
+ <&syscon EP93XX_CLK_M2P3>,
+ <&syscon EP93XX_CLK_M2P4>,
+ <&syscon EP93XX_CLK_M2P5>,
+ <&syscon EP93XX_CLK_M2P6>,
+ <&syscon EP93XX_CLK_M2P7>,
+ <&syscon EP93XX_CLK_M2P8>,
+ <&syscon EP93XX_CLK_M2P9>;
+ clock-names = "m2p0", "m2p1",
+ "m2p2", "m2p3",
+ "m2p4", "m2p5",
+ "m2p6", "m2p7",
+ "m2p8", "m2p9";
+ interrupt-parent = <&vic0>;
+ interrupts = <7>, <8>, <9>, <10>, <11>, <12>, <13>, <14>, <15>, <16>;
+ #dma-cells = <2>;
+ };
diff --git a/Documentation/devicetree/bindings/dma/fsl,imx-dma.yaml b/Documentation/devicetree/bindings/dma/fsl,imx-dma.yaml
index 902a11f65be2..75957f9fb58b 100644
--- a/Documentation/devicetree/bindings/dma/fsl,imx-dma.yaml
+++ b/Documentation/devicetree/bindings/dma/fsl,imx-dma.yaml
@@ -28,6 +28,14 @@ properties:
- description: DMA Error interrupt
minItems: 1
+ clocks:
+ maxItems: 2
+
+ clock-names:
+ items:
+ - const: ipg
+ - const: ahb
+
"#dma-cells":
const: 1
@@ -42,15 +50,21 @@ required:
- reg
- interrupts
- "#dma-cells"
+ - clocks
+ - clock-names
additionalProperties: false
examples:
- |
+ #include <dt-bindings/clock/imx27-clock.h>
+
dma-controller@10001000 {
compatible = "fsl,imx27-dma";
reg = <0x10001000 0x1000>;
interrupts = <32 33>;
#dma-cells = <1>;
dma-channels = <16>;
+ clocks = <&clks IMX27_CLK_DMA_IPG_GATE>, <&clks IMX27_CLK_DMA_AHB_GATE>;
+ clock-names = "ipg", "ahb";
};
diff --git a/Documentation/devicetree/bindings/dma/fsl,mxs-dma.yaml b/Documentation/devicetree/bindings/dma/fsl,mxs-dma.yaml
index add9c77e8b52..a17cf2360dd4 100644
--- a/Documentation/devicetree/bindings/dma/fsl,mxs-dma.yaml
+++ b/Documentation/devicetree/bindings/dma/fsl,mxs-dma.yaml
@@ -11,6 +11,17 @@ maintainers:
allOf:
- $ref: dma-controller.yaml#
+ - if:
+ properties:
+ compatible:
+ contains:
+ const: fsl,imx8qxp-dma-apbh
+ then:
+ required:
+ - power-domains
+ else:
+ properties:
+ power-domains: false
properties:
compatible:
@@ -20,6 +31,7 @@ properties:
- fsl,imx6q-dma-apbh
- fsl,imx6sx-dma-apbh
- fsl,imx7d-dma-apbh
+ - fsl,imx8qxp-dma-apbh
- const: fsl,imx28-dma-apbh
- enum:
- fsl,imx23-dma-apbh
@@ -42,6 +54,9 @@ properties:
dma-channels:
enum: [4, 8, 16]
+ power-domains:
+ maxItems: 1
+
required:
- compatible
- reg
diff --git a/Documentation/devicetree/bindings/dma/fsl-qdma.yaml b/Documentation/devicetree/bindings/dma/fsl-qdma.yaml
index 1b9ebdbe528a..9401b1f6300d 100644
--- a/Documentation/devicetree/bindings/dma/fsl-qdma.yaml
+++ b/Documentation/devicetree/bindings/dma/fsl-qdma.yaml
@@ -11,11 +11,14 @@ maintainers:
properties:
compatible:
- enum:
- - fsl,ls1021a-qdma
- - fsl,ls1028a-qdma
- - fsl,ls1043a-qdma
- - fsl,ls1046a-qdma
+ oneOf:
+ - const: fsl,ls1021a-qdma
+ - items:
+ - enum:
+ - fsl,ls1028a-qdma
+ - fsl,ls1043a-qdma
+ - fsl,ls1046a-qdma
+ - const: fsl,ls1021a-qdma
reg:
items:
diff --git a/Documentation/devicetree/bindings/dma/loongson,ls1b-apbdma.yaml b/Documentation/devicetree/bindings/dma/loongson,ls1b-apbdma.yaml
new file mode 100644
index 000000000000..4c7d2fb7b292
--- /dev/null
+++ b/Documentation/devicetree/bindings/dma/loongson,ls1b-apbdma.yaml
@@ -0,0 +1,65 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/dma/loongson,ls1b-apbdma.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Loongson-1 APB DMA Controller
+
+maintainers:
+ - Keguang Zhang <keguang.zhang@gmail.com>
+
+description:
+ Loongson-1 APB DMA controller provides 3 independent channels for
+ peripherals such as NAND, audio playback and capture.
+
+properties:
+ compatible:
+ oneOf:
+ - const: loongson,ls1b-apbdma
+ - items:
+ - enum:
+ - loongson,ls1a-apbdma
+ - loongson,ls1c-apbdma
+ - const: loongson,ls1b-apbdma
+
+ reg:
+ maxItems: 1
+
+ interrupts:
+ items:
+ - description: NAND interrupt
+ - description: Audio playback interrupt
+ - description: Audio capture interrupt
+
+ interrupt-names:
+ items:
+ - const: ch0
+ - const: ch1
+ - const: ch2
+
+ '#dma-cells':
+ const: 1
+
+required:
+ - compatible
+ - reg
+ - interrupts
+ - interrupt-names
+ - '#dma-cells'
+
+additionalProperties: false
+
+examples:
+ - |
+ #include <dt-bindings/interrupt-controller/irq.h>
+ dma-controller@1fd01160 {
+ compatible = "loongson,ls1b-apbdma";
+ reg = <0x1fd01160 0x4>;
+ interrupt-parent = <&intc0>;
+ interrupts = <13 IRQ_TYPE_EDGE_RISING>,
+ <14 IRQ_TYPE_EDGE_RISING>,
+ <15 IRQ_TYPE_EDGE_RISING>;
+ interrupt-names = "ch0", "ch1", "ch2";
+ #dma-cells = <1>;
+ };
diff --git a/Documentation/devicetree/bindings/dma/marvell,xor-v2.yaml b/Documentation/devicetree/bindings/dma/marvell,xor-v2.yaml
new file mode 100644
index 000000000000..646b4e779d8a
--- /dev/null
+++ b/Documentation/devicetree/bindings/dma/marvell,xor-v2.yaml
@@ -0,0 +1,61 @@
+# SPDX-License-Identifier: GPL-2.0
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/dma/marvell,xor-v2.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Marvell XOR v2 engines
+
+maintainers:
+ - Andrew Lunn <andrew@lunn.ch>
+
+properties:
+ compatible:
+ oneOf:
+ - const: marvell,xor-v2
+ - items:
+ - enum:
+ - marvell,armada-7k-xor
+ - const: marvell,xor-v2
+
+ reg:
+ items:
+ - description: DMA registers
+ - description: global registers
+
+ clocks:
+ minItems: 1
+ maxItems: 2
+
+ clock-names:
+ minItems: 1
+ items:
+ - const: core
+ - const: reg
+
+ msi-parent:
+ description:
+ Phandle to the MSI-capable interrupt controller used for
+ interrupts.
+ maxItems: 1
+
+ dma-coherent: true
+
+required:
+ - compatible
+ - reg
+ - msi-parent
+ - dma-coherent
+
+additionalProperties: false
+
+examples:
+ - |
+ xor0@6a0000 {
+ compatible = "marvell,armada-7k-xor", "marvell,xor-v2";
+ reg = <0x6a0000 0x1000>, <0x6b0000 0x1000>;
+ clocks = <&ap_clk 0>, <&ap_clk 1>;
+ clock-names = "core", "reg";
+ msi-parent = <&gic_v2m0>;
+ dma-coherent;
+ };
diff --git a/Documentation/devicetree/bindings/dma/mv-xor-v2.txt b/Documentation/devicetree/bindings/dma/mv-xor-v2.txt
deleted file mode 100644
index 9c38bbe7e6d7..000000000000
--- a/Documentation/devicetree/bindings/dma/mv-xor-v2.txt
+++ /dev/null
@@ -1,28 +0,0 @@
-* Marvell XOR v2 engines
-
-Required properties:
-- compatible: one of the following values:
- "marvell,armada-7k-xor"
- "marvell,xor-v2"
-- reg: Should contain registers location and length (two sets)
- the first set is the DMA registers
- the second set is the global registers
-- msi-parent: Phandle to the MSI-capable interrupt controller used for
- interrupts.
-
-Optional properties:
-- clocks: Optional reference to the clocks used by the XOR engine.
-- clock-names: mandatory if there is a second clock, in this case the
- name must be "core" for the first clock and "reg" for the second
- one
-
-
-Example:
-
- xor0@400000 {
- compatible = "marvell,xor-v2";
- reg = <0x400000 0x1000>,
- <0x410000 0x1000>;
- msi-parent = <&gic_v2m0>;
- dma-coherent;
- };
diff --git a/Documentation/devicetree/bindings/dma/renesas,rz-dmac.yaml b/Documentation/devicetree/bindings/dma/renesas,rz-dmac.yaml
index a42b6a26a6d3..ca24cf48769f 100644
--- a/Documentation/devicetree/bindings/dma/renesas,rz-dmac.yaml
+++ b/Documentation/devicetree/bindings/dma/renesas,rz-dmac.yaml
@@ -19,6 +19,7 @@ properties:
- renesas,r9a07g043-dmac # RZ/G2UL and RZ/Five
- renesas,r9a07g044-dmac # RZ/G2{L,LC}
- renesas,r9a07g054-dmac # RZ/V2L
+ - renesas,r9a08g045-dmac # RZ/G3S
- const: renesas,rz-dmac
reg:
diff --git a/Documentation/devicetree/bindings/dma/xilinx/xlnx,zynqmp-dma-1.0.yaml b/Documentation/devicetree/bindings/dma/xilinx/xlnx,zynqmp-dma-1.0.yaml
index 769ce23aaac2..ac3198953b8e 100644
--- a/Documentation/devicetree/bindings/dma/xilinx/xlnx,zynqmp-dma-1.0.yaml
+++ b/Documentation/devicetree/bindings/dma/xilinx/xlnx,zynqmp-dma-1.0.yaml
@@ -24,7 +24,9 @@ properties:
const: 1
compatible:
- const: xlnx,zynqmp-dma-1.0
+ enum:
+ - amd,versal2-dma-1.0
+ - xlnx,zynqmp-dma-1.0
reg:
description: memory map for gdma/adma module access
diff --git a/Documentation/devicetree/bindings/eeprom/at24.yaml b/Documentation/devicetree/bindings/eeprom/at24.yaml
index e396e47b2f13..b6239ec3512b 100644
--- a/Documentation/devicetree/bindings/eeprom/at24.yaml
+++ b/Documentation/devicetree/bindings/eeprom/at24.yaml
@@ -116,6 +116,7 @@ properties:
- const: atmel,24c02
- items:
- enum:
+ - giantec,gt24c04a
- onnn,cat24c04
- onnn,cat24c05
- rohm,br24g04
diff --git a/Documentation/devicetree/bindings/extcon/extcon-ptn5150.yaml b/Documentation/devicetree/bindings/extcon/extcon-ptn5150.yaml
index d5cfa32ea52d..072b3c0c5fd0 100644
--- a/Documentation/devicetree/bindings/extcon/extcon-ptn5150.yaml
+++ b/Documentation/devicetree/bindings/extcon/extcon-ptn5150.yaml
@@ -37,6 +37,11 @@ properties:
GPIO pin (output) used to control VBUS. If skipped, no such control
takes place.
+ port:
+ $ref: /schemas/graph.yaml#/properties/port
+ description:
+ A port node to link the usb controller for the dual role switch.
+
required:
- compatible
- interrupts
@@ -58,5 +63,11 @@ examples:
interrupt-parent = <&msmgpio>;
interrupts = <78 IRQ_TYPE_LEVEL_HIGH>;
vbus-gpios = <&msmgpio 148 GPIO_ACTIVE_HIGH>;
+
+ port {
+ endpoint {
+ remote-endpoint = <&usb1_drd_sw>;
+ };
+ };
};
};
diff --git a/Documentation/devicetree/bindings/extcon/extcon-usb-gpio.txt b/Documentation/devicetree/bindings/extcon/extcon-usb-gpio.txt
deleted file mode 100644
index dfc14f71e81f..000000000000
--- a/Documentation/devicetree/bindings/extcon/extcon-usb-gpio.txt
+++ /dev/null
@@ -1,21 +0,0 @@
-USB GPIO Extcon device
-
-This is a virtual device used to generate USB cable states from the USB ID pin
-connected to a GPIO pin.
-
-Required properties:
-- compatible: Should be "linux,extcon-usb-gpio"
-
-Either one of id-gpio or vbus-gpio must be present. Both can be present as well.
-- id-gpio: gpio for USB ID pin. See gpio binding.
-- vbus-gpio: gpio for USB VBUS pin.
-
-Example: Examples of extcon-usb-gpio node in dra7-evm.dts as listed below:
- extcon_usb1 {
- compatible = "linux,extcon-usb-gpio";
- id-gpio = <&gpio6 1 GPIO_ACTIVE_HIGH>;
- }
-
- &omap_dwc3_1 {
- extcon = <&extcon_usb1>;
- };
diff --git a/Documentation/devicetree/bindings/extcon/linux,extcon-usb-gpio.yaml b/Documentation/devicetree/bindings/extcon/linux,extcon-usb-gpio.yaml
new file mode 100644
index 000000000000..8856107bdd33
--- /dev/null
+++ b/Documentation/devicetree/bindings/extcon/linux,extcon-usb-gpio.yaml
@@ -0,0 +1,37 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/extcon/linux,extcon-usb-gpio.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: USB GPIO Extcon device
+
+maintainers:
+ - Frank Li <Frank.Li@nxp.com>
+
+description:
+ This is a virtual device used to generate USB cable states from the USB ID pin
+ connected to a GPIO pin.
+
+properties:
+ compatible:
+ const: linux,extcon-usb-gpio
+
+ id-gpios:
+ description: gpio for USB ID pin. See gpio binding.
+ vbus-gpios:
+ description: gpio for USB VBUS pin.
+
+required:
+ - compatible
+
+additionalProperties: false
+
+examples:
+ - |
+ #include <dt-bindings/gpio/gpio.h>
+
+ extcon_usb1 {
+ compatible = "linux,extcon-usb-gpio";
+ id-gpios = <&gpio6 1 GPIO_ACTIVE_HIGH>;
+ };
diff --git a/Documentation/devicetree/bindings/gpio/gpio-ep9301.yaml b/Documentation/devicetree/bindings/gpio/gpio-ep9301.yaml
index daadfb4926c3..3a1079d6ee20 100644
--- a/Documentation/devicetree/bindings/gpio/gpio-ep9301.yaml
+++ b/Documentation/devicetree/bindings/gpio/gpio-ep9301.yaml
@@ -73,9 +73,10 @@ examples:
reg-names = "data", "dir", "intr";
gpio-controller;
#gpio-cells = <2>;
- interrupt-controller;
- interrupt-parent = <&vic1>;
- interrupts = <27>;
+ interrupt-controller;
+ #interrupt-cells = <2>;
+ interrupt-parent = <&vic1>;
+ interrupts = <27>;
};
gpio@80840004 {
@@ -87,6 +88,7 @@ examples:
gpio-controller;
#gpio-cells = <2>;
interrupt-controller;
+ #interrupt-cells = <2>;
interrupt-parent = <&vic1>;
interrupts = <27>;
};
@@ -127,6 +129,7 @@ examples:
gpio-controller;
#gpio-cells = <2>;
interrupt-controller;
+ #interrupt-cells = <2>;
interrupts-extended = <&vic0 19>, <&vic0 20>,
<&vic0 21>, <&vic0 22>,
<&vic1 15>, <&vic1 16>,
diff --git a/Documentation/devicetree/bindings/hwlock/sprd,hwspinlock-r3p0.yaml b/Documentation/devicetree/bindings/hwlock/sprd,hwspinlock-r3p0.yaml
new file mode 100644
index 000000000000..abe11df25761
--- /dev/null
+++ b/Documentation/devicetree/bindings/hwlock/sprd,hwspinlock-r3p0.yaml
@@ -0,0 +1,50 @@
+# SPDX-License-Identifier: GPL-2.0-only OR BSD-2-Clause
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/hwlock/sprd,hwspinlock-r3p0.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Spreadtrum hardware spinlock
+
+maintainers:
+ - Orson Zhai <orsonzhai@gmail.com>
+ - Baolin Wang <baolin.wang7@gmail.com>
+ - Chunyan Zhang <zhang.lyra@gmail.com>
+
+properties:
+ compatible:
+ const: sprd,hwspinlock-r3p0
+
+ reg:
+ maxItems: 1
+
+ clocks:
+ maxItems: 1
+
+ clock-names:
+ const: enable
+
+ '#hwlock-cells':
+ const: 1
+
+required:
+ - compatible
+ - reg
+ - clocks
+ - clock-names
+ - '#hwlock-cells'
+
+additionalProperties: false
+
+examples:
+ - |
+ #include <dt-bindings/clock/sprd,sc9860-clk.h>
+
+ hwlock@40500000 {
+ compatible = "sprd,hwspinlock-r3p0";
+ reg = <0x40500000 0x1000>;
+ clocks = <&aon_gate CLK_SPLK_EB>;
+ clock-names = "enable";
+ #hwlock-cells = <1>;
+ };
+...
diff --git a/Documentation/devicetree/bindings/hwlock/sprd-hwspinlock.txt b/Documentation/devicetree/bindings/hwlock/sprd-hwspinlock.txt
deleted file mode 100644
index 581db9d941ba..000000000000
--- a/Documentation/devicetree/bindings/hwlock/sprd-hwspinlock.txt
+++ /dev/null
@@ -1,23 +0,0 @@
-SPRD Hardware Spinlock Device Binding
--------------------------------------
-
-Required properties :
-- compatible : should be "sprd,hwspinlock-r3p0".
-- reg : the register address of hwspinlock.
-- #hwlock-cells : hwlock users only use the hwlock id to represent a specific
- hwlock, so the number of cells should be <1> here.
-- clock-names : Must contain "enable".
-- clocks : Must contain a phandle entry for the clock in clock-names, see the
- common clock bindings.
-
-Please look at the generic hwlock binding for usage information for consumers,
-"Documentation/devicetree/bindings/hwlock/hwlock.txt"
-
-Example of hwlock provider:
- hwspinlock@40500000 {
- compatible = "sprd,hwspinlock-r3p0";
- reg = <0 0x40500000 0 0x1000>;
- #hwlock-cells = <1>;
- clock-names = "enable";
- clocks = <&clk_aon_apb_gates0 22>;
- };
diff --git a/Documentation/devicetree/bindings/i2c/aspeed,i2c.yaml b/Documentation/devicetree/bindings/i2c/aspeed,i2c.yaml
index 6df27b47b922..5b9bd2feda3b 100644
--- a/Documentation/devicetree/bindings/i2c/aspeed,i2c.yaml
+++ b/Documentation/devicetree/bindings/i2c/aspeed,i2c.yaml
@@ -44,11 +44,6 @@ properties:
description: frequency of the bus clock in Hz defaults to 100 kHz when not
specified
- multi-master:
- type: boolean
- description:
- states that there is another master active on this bus
-
required:
- reg
- compatible
diff --git a/Documentation/devicetree/bindings/i2c/i2c-rk3x.yaml b/Documentation/devicetree/bindings/i2c/i2c-rk3x.yaml
index 82b9d6682297..a9dae5b52f28 100644
--- a/Documentation/devicetree/bindings/i2c/i2c-rk3x.yaml
+++ b/Documentation/devicetree/bindings/i2c/i2c-rk3x.yaml
@@ -38,6 +38,7 @@ properties:
- rockchip,rk3308-i2c
- rockchip,rk3328-i2c
- rockchip,rk3568-i2c
+ - rockchip,rk3576-i2c
- rockchip,rk3588-i2c
- rockchip,rv1126-i2c
- const: rockchip,rk3399-i2c
diff --git a/Documentation/devicetree/bindings/i2c/i2c-sprd.txt b/Documentation/devicetree/bindings/i2c/i2c-sprd.txt
deleted file mode 100644
index 7b6b3b8d0d11..000000000000
--- a/Documentation/devicetree/bindings/i2c/i2c-sprd.txt
+++ /dev/null
@@ -1,31 +0,0 @@
-I2C for Spreadtrum platforms
-
-Required properties:
-- compatible: Should be "sprd,sc9860-i2c".
-- reg: Specify the physical base address of the controller and length
- of memory mapped region.
-- interrupts: Should contain I2C interrupt.
-- clock-names: Should contain following entries:
- "i2c" for I2C clock,
- "source" for I2C source (parent) clock,
- "enable" for I2C module enable clock.
-- clocks: Should contain a clock specifier for each entry in clock-names.
-- clock-frequency: Contains desired I2C bus clock frequency in Hz.
-- #address-cells: Should be 1 to describe address cells for I2C device address.
-- #size-cells: Should be 0 means no size cell for I2C device address.
-
-Optional properties:
-- Child nodes conforming to I2C bus binding
-
-Examples:
-i2c0: i2c@70500000 {
- compatible = "sprd,sc9860-i2c";
- reg = <0 0x70500000 0 0x1000>;
- interrupts = <GIC_SPI 11 IRQ_TYPE_LEVEL_HIGH>;
- clock-names = "i2c", "source", "enable";
- clocks = <&clk_i2c3>, <&ext_26m>, <&clk_ap_apb_gates 11>;
- clock-frequency = <400000>;
- #address-cells = <1>;
- #size-cells = <0>;
-};
-
diff --git a/Documentation/devicetree/bindings/i2c/nvidia,tegra20-i2c.yaml b/Documentation/devicetree/bindings/i2c/nvidia,tegra20-i2c.yaml
index 92fbc1a2671a..b57ae6963e62 100644
--- a/Documentation/devicetree/bindings/i2c/nvidia,tegra20-i2c.yaml
+++ b/Documentation/devicetree/bindings/i2c/nvidia,tegra20-i2c.yaml
@@ -103,6 +103,9 @@ properties:
items:
- const: i2c
+ power-domains:
+ maxItems: 1
+
dmas:
items:
- description: DMA channel for the reception FIFO
@@ -124,6 +127,8 @@ allOf:
- nvidia,tegra30-i2c
then:
properties:
+ clocks:
+ minItems: 2
clock-names:
items:
- const: div-clk
@@ -133,20 +138,13 @@ allOf:
properties:
compatible:
contains:
- const: nvidia,tegra114-i2c
- then:
- properties:
- clock-names:
- items:
- - const: div-clk
-
- - if:
- properties:
- compatible:
- contains:
- const: nvidia,tegra210-i2c
+ enum:
+ - nvidia,tegra114-i2c
+ - nvidia,tegra210-i2c
then:
properties:
+ clocks:
+ maxItems: 1
clock-names:
items:
- const: div-clk
@@ -158,6 +156,8 @@ allOf:
const: nvidia,tegra210-i2c-vi
then:
properties:
+ clocks:
+ minItems: 2
clock-names:
items:
- const: div-clk
@@ -165,6 +165,9 @@ allOf:
power-domains:
items:
- description: phandle to the VENC power domain
+ else:
+ properties:
+ power-domains: false
unevaluatedProperties: false
diff --git a/Documentation/devicetree/bindings/i2c/qcom,i2c-cci.yaml b/Documentation/devicetree/bindings/i2c/qcom,i2c-cci.yaml
index c33ae7b63b84..7dab3852c7f8 100644
--- a/Documentation/devicetree/bindings/i2c/qcom,i2c-cci.yaml
+++ b/Documentation/devicetree/bindings/i2c/qcom,i2c-cci.yaml
@@ -130,6 +130,7 @@ allOf:
then:
properties:
clocks:
+ minItems: 4
maxItems: 4
clock-names:
items:
diff --git a/Documentation/devicetree/bindings/i2c/renesas,riic.yaml b/Documentation/devicetree/bindings/i2c/renesas,riic.yaml
index 7993fe463c4c..505a8ec92266 100644
--- a/Documentation/devicetree/bindings/i2c/renesas,riic.yaml
+++ b/Documentation/devicetree/bindings/i2c/renesas,riic.yaml
@@ -25,6 +25,10 @@ properties:
- renesas,riic-r9a07g054 # RZ/V2L
- const: renesas,riic-rz # RZ/A or RZ/G2L
+ - items:
+ - const: renesas,riic-r9a08g045 # RZ/G3S
+ - const: renesas,riic-r9a09g057 # RZ/V2H(P)
+
- const: renesas,riic-r9a09g057 # RZ/V2H(P)
reg:
diff --git a/Documentation/devicetree/bindings/i2c/sprd,sc9860-i2c.yaml b/Documentation/devicetree/bindings/i2c/sprd,sc9860-i2c.yaml
new file mode 100644
index 000000000000..ec0d39e73d26
--- /dev/null
+++ b/Documentation/devicetree/bindings/i2c/sprd,sc9860-i2c.yaml
@@ -0,0 +1,65 @@
+# SPDX-License-Identifier: GPL-2.0-only OR BSD-2-Clause
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/i2c/sprd,sc9860-i2c.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Spreadtrum SC9860 I2C controller
+
+maintainers:
+ - Orson Zhai <orsonzhai@gmail.com>
+ - Baolin Wang <baolin.wang7@gmail.com>
+ - Chunyan Zhang <zhang.lyra@gmail.com>
+
+allOf:
+ - $ref: /schemas/i2c/i2c-controller.yaml#
+
+properties:
+ compatible:
+ const: sprd,sc9860-i2c
+
+ reg:
+ maxItems: 1
+
+ interrupts:
+ maxItems: 1
+
+ clocks:
+ items:
+ - description: I2C clock
+ - description: I2C source (parent) clock
+ - description: I2C module enable clock
+
+ clock-names:
+ items:
+ - const: i2c
+ - const: source
+ - const: enable
+
+ clock-frequency: true
+
+required:
+ - compatible
+ - reg
+ - interrupts
+ - clocks
+ - clock-names
+ - clock-frequency
+
+unevaluatedProperties: false
+
+examples:
+ - |
+ #include <dt-bindings/interrupt-controller/arm-gic.h>
+ #include <dt-bindings/interrupt-controller/irq.h>
+
+ i2c@70500000 {
+ compatible = "sprd,sc9860-i2c";
+ reg = <0x70500000 0x1000>;
+ interrupts = <GIC_SPI 11 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&clk_i2c3>, <&ext_26m>, <&clk_ap_apb_gates 11>;
+ clock-names = "i2c", "source", "enable";
+ clock-frequency = <400000>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ };
diff --git a/Documentation/devicetree/bindings/i2c/tsd,mule-i2c-mux.yaml b/Documentation/devicetree/bindings/i2c/tsd,mule-i2c-mux.yaml
new file mode 100644
index 000000000000..28139b676661
--- /dev/null
+++ b/Documentation/devicetree/bindings/i2c/tsd,mule-i2c-mux.yaml
@@ -0,0 +1,69 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/i2c/tsd,mule-i2c-mux.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Theobroma Systems Mule I2C multiplexer
+
+maintainers:
+ - Farouk Bouabid <farouk.bouabid@cherry.de>
+ - Quentin Schulz <quentin.schulz@cherry.de>
+
+description: |
+ Theobroma Systems Mule is an MCU that emulates a set of I2C devices, among
+ which devices that are reachable through an I2C-mux. The devices on the mux
+ can be selected by writing the appropriate device number to an I2C config
+ register.
+
+
+ +--------------------------------------------------+
+ | Mule |
+ 0x18| +---------------+ |
+ -------->|Config register|----+ |
+ | +---------------+ | |
+ | V_ |
+ | | \ +--------+ |
+ | | \-------->| dev #0 | |
+ | | | +--------+ |
+ 0x6f| | M |-------->| dev #1 | |
+ ---------------------------->| U | +--------+ |
+ | | X |-------->| dev #2 | |
+ | | | +--------+ |
+ | | /-------->| dev #3 | |
+ | |__/ +--------+ |
+ +--------------------------------------------------+
+
+
+allOf:
+ - $ref: /schemas/i2c/i2c-mux.yaml#
+
+properties:
+ compatible:
+ const: tsd,mule-i2c-mux
+
+required:
+ - compatible
+
+unevaluatedProperties: false
+
+examples:
+ - |
+ i2c-mux {
+ compatible = "tsd,mule-i2c-mux";
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ i2c@0 {
+ reg = <0x0>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ rtc@6f {
+ compatible = "isil,isl1208";
+ reg = <0x6f>;
+ };
+ };
+ };
+...
+
diff --git a/Documentation/devicetree/bindings/iio/accel/adi,adxl380.yaml b/Documentation/devicetree/bindings/iio/accel/adi,adxl380.yaml
new file mode 100644
index 000000000000..f1ff5ff4f478
--- /dev/null
+++ b/Documentation/devicetree/bindings/iio/accel/adi,adxl380.yaml
@@ -0,0 +1,92 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/iio/accel/adi,adxl380.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Analog Devices ADXL380/382 3-Axis Digital Accelerometer
+
+maintainers:
+ - Ramona Gradinariu <ramona.gradinariu@analog.com>
+ - Antoniu Miclaus <antoniu.miclaus@analog.com>
+
+description: |
+ The ADXL380/ADXL382 is a low noise density, low power, 3-axis
+ accelerometer with selectable measurement ranges. The ADXL380
+ supports the ±4 g, ±8 g, and ±16 g ranges, and the ADXL382 supports
+ ±15 g, ±30 g, and ±60 g ranges.
+
+ https://www.analog.com/en/products/adxl380.html
+
+properties:
+ compatible:
+ enum:
+ - adi,adxl380
+ - adi,adxl382
+
+ reg:
+ maxItems: 1
+
+ interrupts:
+ minItems: 1
+ maxItems: 2
+
+ interrupt-names:
+ minItems: 1
+ items:
+ - enum: [INT0, INT1]
+ - const: INT1
+
+ vddio-supply: true
+
+ vsupply-supply: true
+
+required:
+ - compatible
+ - reg
+ - interrupts
+ - interrupt-names
+ - vddio-supply
+ - vsupply-supply
+
+allOf:
+ - $ref: /schemas/spi/spi-peripheral-props.yaml#
+
+unevaluatedProperties: false
+
+examples:
+ - |
+ #include <dt-bindings/interrupt-controller/irq.h>
+
+ i2c {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ accelerometer@54 {
+ compatible = "adi,adxl380";
+ reg = <0x54>;
+ vddio-supply = <&vddio>;
+ vsupply-supply = <&vsupply>;
+ interrupt-parent = <&gpio>;
+ interrupts = <25 IRQ_TYPE_LEVEL_HIGH>;
+ interrupt-names = "INT0";
+ };
+ };
+ - |
+ #include <dt-bindings/interrupt-controller/irq.h>
+
+ spi {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ accelerometer@0 {
+ compatible = "adi,adxl380";
+ reg = <0>;
+ spi-max-frequency = <8000000>;
+ vddio-supply = <&vddio>;
+ vsupply-supply = <&vsupply>;
+ interrupt-parent = <&gpio>;
+ interrupts = <25 IRQ_TYPE_LEVEL_HIGH>;
+ interrupt-names = "INT0";
+ };
+ };
diff --git a/Documentation/devicetree/bindings/iio/accel/kionix,kxcjk1013.yaml b/Documentation/devicetree/bindings/iio/accel/kionix,kxcjk1013.yaml
index 6ddb03f61bd9..951a3a2ba8fc 100644
--- a/Documentation/devicetree/bindings/iio/accel/kionix,kxcjk1013.yaml
+++ b/Documentation/devicetree/bindings/iio/accel/kionix,kxcjk1013.yaml
@@ -16,6 +16,7 @@ properties:
- kionix,kxcj91008
- kionix,kxtj21009
- kionix,kxtf9
+ - kionix,kx022-1020
- kionix,kx023-1025
reg:
diff --git a/Documentation/devicetree/bindings/iio/adc/adi,ad4695.yaml b/Documentation/devicetree/bindings/iio/adc/adi,ad4695.yaml
new file mode 100644
index 000000000000..310f046e139f
--- /dev/null
+++ b/Documentation/devicetree/bindings/iio/adc/adi,ad4695.yaml
@@ -0,0 +1,254 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/iio/adc/adi,ad4695.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Analog Devices Easy Drive Multiplexed SAR Analog to Digital Converters
+
+maintainers:
+ - Michael Hennerich <Michael.Hennerich@analog.com>
+ - Nuno Sá <nuno.sa@analog.com>
+
+description: |
+ A family of similar multi-channel analog to digital converters with SPI bus.
+
+ * https://www.analog.com/en/products/ad4695.html
+ * https://www.analog.com/en/products/ad4696.html
+ * https://www.analog.com/en/products/ad4697.html
+ * https://www.analog.com/en/products/ad4698.html
+
+$ref: /schemas/spi/spi-peripheral-props.yaml#
+
+properties:
+ compatible:
+ enum:
+ - adi,ad4695
+ - adi,ad4696
+ - adi,ad4697
+ - adi,ad4698
+
+ reg:
+ maxItems: 1
+
+ spi-max-frequency:
+ maximum: 80000000
+
+ spi-cpol: true
+ spi-cpha: true
+
+ spi-rx-bus-width:
+ minimum: 1
+ maximum: 4
+
+ avdd-supply:
+ description: Analog power supply.
+
+ vio-supply:
+ description: I/O pin power supply.
+
+ ldo-in-supply:
+ description: Internal LDO Input. Mutually exclusive with vdd-supply.
+
+ vdd-supply:
+ description: Core power supply. Mutually exclusive with ldo-in-supply.
+
+ ref-supply:
+ description:
+ External reference voltage. Mutually exclusive with refin-supply.
+
+ refin-supply:
+ description:
+ Internal reference buffer input. Mutually exclusive with ref-supply.
+
+ com-supply:
+ description: Common voltage supply for pseudo-differential analog inputs.
+
+ adi,no-ref-current-limit:
+ $ref: /schemas/types.yaml#/definitions/flag
+ description:
+ When this flag is present, the REF Overvoltage Reduced Current protection
+ is disabled.
+
+ adi,no-ref-high-z:
+ $ref: /schemas/types.yaml#/definitions/flag
+ description:
+ Enable this flag if the ref-supply requires Reference Input High-Z Mode
+ to be disabled for proper operation.
+
+ cnv-gpios:
+ description: The Convert Input (CNV). If omitted, CNV is tied to SPI CS.
+ maxItems: 1
+
+ reset-gpios:
+ description: The Reset Input (RESET). Should be configured GPIO_ACTIVE_LOW.
+ maxItems: 1
+
+ interrupts:
+ minItems: 1
+ items:
+ - description: Signal coming from the BSY_ALT_GP0 pin (ALERT or BUSY).
+ - description: Signal coming from the GP2 pin (ALERT).
+ - description: Signal coming from the GP3 pin (BUSY).
+
+ interrupt-names:
+ minItems: 1
+ items:
+ - const: gp0
+ - const: gp2
+ - const: gp3
+
+ gpio-controller: true
+
+ "#gpio-cells":
+ const: 2
+ description: |
+ The first cell is the GPn number: 0 to 3.
+ The second cell takes standard GPIO flags.
+
+ "#address-cells":
+ const: 1
+
+ "#size-cells":
+ const: 0
+
+patternProperties:
+ "^in(?:[13579]|1[135])-supply$":
+ description:
+ Optional voltage supply for odd numbered channels when they are used as
+ the negative input for a pseudo-differential channel.
+
+ "^channel@[0-9a-f]$":
+ type: object
+ $ref: adc.yaml
+ unevaluatedProperties: false
+ description:
+ Describes each individual channel. In addition the properties defined
+ below, bipolar from adc.yaml is also supported.
+
+ properties:
+ reg:
+ maximum: 15
+
+ common-mode-channel:
+ description:
+ Describes the common mode channel for single channels. 0xFF is REFGND
+ and OxFE is COM. Macros are available for these values in
+ dt-bindings/iio/adi,ad4695.h. Values 1 to 15 correspond to INx inputs.
+ Only odd numbered INx inputs can be used as common mode channels.
+ enum: [1, 3, 5, 7, 9, 11, 13, 15, 0xFE, 0xFF]
+ default: 0xFF
+
+ adi,no-high-z:
+ $ref: /schemas/types.yaml#/definitions/flag
+ description:
+ Enable this flag if the input pin requires the Analog Input High-Z
+ Mode to be disabled for proper operation.
+
+ required:
+ - reg
+
+ allOf:
+ # bipolar mode can't be used with REFGND
+ - if:
+ properties:
+ common-mode-channel:
+ const: 0xFF
+ then:
+ properties:
+ bipolar: false
+
+required:
+ - compatible
+ - reg
+ - avdd-supply
+ - vio-supply
+
+allOf:
+ - oneOf:
+ - required:
+ - ldo-in-supply
+ - required:
+ - vdd-supply
+
+ - oneOf:
+ - required:
+ - ref-supply
+ - required:
+ - refin-supply
+
+ # the internal reference buffer always requires high-z mode
+ - if:
+ required:
+ - refin-supply
+ then:
+ properties:
+ adi,no-ref-high-z: false
+
+ # limit channels for 8-channel chips
+ - if:
+ properties:
+ compatible:
+ contains:
+ enum:
+ - adi,ad4697
+ - adi,ad4698
+ then:
+ patternProperties:
+ "^in(?:9|1[135])-supply$": false
+ "^channel@[0-7]$":
+ properties:
+ reg:
+ maximum: 7
+ common-mode-channel:
+ enum: [1, 3, 5, 7, 0xFE, 0xFF]
+ "^channel@[8-9a-f]$": false
+
+unevaluatedProperties: false
+
+examples:
+ - |
+ #include <dt-bindings/gpio/gpio.h>
+ #include <dt-bindings/iio/adi,ad4695.h>
+
+ spi {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ adc@0 {
+ compatible = "adi,ad4695";
+ reg = <0>;
+ spi-cpol;
+ spi-cpha;
+ spi-max-frequency = <80000000>;
+ avdd-supply = <&power_supply>;
+ ldo-in-supply = <&power_supply>;
+ vio-supply = <&io_supply>;
+ refin-supply = <&supply_5V>;
+ com-supply = <&supply_2V5>;
+ in3-supply = <&supply_2V5>;
+ reset-gpios = <&gpio 1 GPIO_ACTIVE_LOW>;
+
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ /* Pseudo-differential channel between IN0 and REFGND. */
+ channel@0 {
+ reg = <0>;
+ };
+
+ /* Pseudo-differential channel between IN1 and COM. */
+ channel@1 {
+ reg = <1>;
+ common-mode-channel = <AD4695_COMMON_MODE_COM>;
+ bipolar;
+ };
+
+ /* Pseudo-differential channel between IN2 and IN3. */
+ channel@2 {
+ reg = <2>;
+ common-mode-channel = <3>;
+ bipolar;
+ };
+ };
+ };
diff --git a/Documentation/devicetree/bindings/iio/adc/adi,ad7192.yaml b/Documentation/devicetree/bindings/iio/adc/adi,ad7192.yaml
index 190889c7b62a..66dd1c549bd3 100644
--- a/Documentation/devicetree/bindings/iio/adc/adi,ad7192.yaml
+++ b/Documentation/devicetree/bindings/iio/adc/adi,ad7192.yaml
@@ -39,11 +39,21 @@ properties:
clocks:
maxItems: 1
- description: phandle to the master clock (mclk)
+ description:
+ Optionally, either a crystal can be attached externally between MCLK1 and
+ MCLK2 pins, or an external CMOS-compatible clock can drive the MCLK2
+ pin. If absent, internal 4.92MHz clock is used, which can be made
+ available on MCLK2 pin.
clock-names:
- items:
- - const: mclk
+ enum:
+ - xtal
+ - mclk
+
+ "#clock-cells":
+ const: 0
+ description:
+ If present when internal clock is used, configured as clock provider.
interrupts:
maxItems: 1
@@ -134,8 +144,6 @@ patternProperties:
required:
- compatible
- reg
- - clocks
- - clock-names
- interrupts
- dvdd-supply
- avdd-supply
@@ -156,6 +164,18 @@ allOf:
then:
patternProperties:
"^channel@[0-9a-f]+$": false
+ - if:
+ anyOf:
+ - required:
+ - clocks
+ - required:
+ - clock-names
+ then:
+ properties:
+ "#clock-cells": false
+ required:
+ - clocks
+ - clock-names
unevaluatedProperties: false
@@ -201,8 +221,7 @@ examples:
spi-max-frequency = <1000000>;
spi-cpol;
spi-cpha;
- clocks = <&ad7192_mclk>;
- clock-names = "mclk";
+ #clock-cells = <0>;
interrupts = <25 0x2>;
interrupt-parent = <&gpio>;
aincom-supply = <&aincom>;
diff --git a/Documentation/devicetree/bindings/iio/adc/adi,ad7380.yaml b/Documentation/devicetree/bindings/iio/adc/adi,ad7380.yaml
index 899b777017ce..bd19abb867d9 100644
--- a/Documentation/devicetree/bindings/iio/adc/adi,ad7380.yaml
+++ b/Documentation/devicetree/bindings/iio/adc/adi,ad7380.yaml
@@ -15,10 +15,17 @@ description: |
* https://www.analog.com/en/products/ad7381.html
* https://www.analog.com/en/products/ad7383.html
* https://www.analog.com/en/products/ad7384.html
+ * https://www.analog.com/en/products/ad7386.html
+ * https://www.analog.com/en/products/ad7387.html
+ * https://www.analog.com/en/products/ad7388.html
* https://www.analog.com/en/products/ad7380-4.html
* https://www.analog.com/en/products/ad7381-4.html
* https://www.analog.com/en/products/ad7383-4.html
* https://www.analog.com/en/products/ad7384-4.html
+ * https://www.analog.com/en/products/ad7386-4.html
+ * https://www.analog.com/en/products/ad7387-4.html
+ * https://www.analog.com/en/products/ad7388-4.html
+
$ref: /schemas/spi/spi-peripheral-props.yaml#
@@ -29,10 +36,16 @@ properties:
- adi,ad7381
- adi,ad7383
- adi,ad7384
+ - adi,ad7386
+ - adi,ad7387
+ - adi,ad7388
- adi,ad7380-4
- adi,ad7381-4
- adi,ad7383-4
- adi,ad7384-4
+ - adi,ad7386-4
+ - adi,ad7387-4
+ - adi,ad7388-4
reg:
maxItems: 1
diff --git a/Documentation/devicetree/bindings/iio/adc/adi,ad7606.yaml b/Documentation/devicetree/bindings/iio/adc/adi,ad7606.yaml
index 00fdaed11cbd..69408cae3db9 100644
--- a/Documentation/devicetree/bindings/iio/adc/adi,ad7606.yaml
+++ b/Documentation/devicetree/bindings/iio/adc/adi,ad7606.yaml
@@ -35,65 +35,83 @@ properties:
avcc-supply: true
+ vdrive-supply:
+ description:
+ Determines the voltage level at which the interface logic pins will
+ operate.
+
+ refin-supply:
+ description:
+ The voltage supply for optional external reference voltage.
+
interrupts:
+ description:
+ The BUSY pin falling edge indicates that the conversion is over, and thus
+ new data is available.
maxItems: 1
adi,conversion-start-gpios:
description:
- Must be the device tree identifier of the CONVST pin.
- This logic input is used to initiate conversions on the analog
- input channels. As the line is active high, it should be marked
- GPIO_ACTIVE_HIGH.
- maxItems: 1
+ Must be the device tree identifier of the CONVST pin(s). This logic input
+ is used to initiate conversions on the analog input channels. As the line
+ is active high, it should be marked GPIO_ACTIVE_HIGH.
+ minItems: 1
+ maxItems: 2
reset-gpios:
description:
- Must be the device tree identifier of the RESET pin. If specified,
- it will be asserted during driver probe. As the line is active high,
- it should be marked GPIO_ACTIVE_HIGH.
+ Must be the device tree identifier of the RESET pin. If specified, it will
+ be asserted during driver probe. On the AD7606x, as the line is active
+ high, it should be marked GPIO_ACTIVE_HIGH. On the AD7616, as the line is
+ active low, it should be marked GPIO_ACTIVE_LOW.
maxItems: 1
standby-gpios:
description:
- Must be the device tree identifier of the STBY pin. This pin is used
- to place the AD7606 into one of two power-down modes, Standby mode or
+ Must be the device tree identifier of the STBY pin. This pin is used to
+ place the AD7606 into one of two power-down modes, Standby mode or
Shutdown mode. As the line is active low, it should be marked
GPIO_ACTIVE_LOW.
maxItems: 1
adi,first-data-gpios:
description:
- Must be the device tree identifier of the FRSTDATA pin.
- The FRSTDATA output indicates when the first channel, V1, is
- being read back on either the parallel, byte or serial interface.
- As the line is active high, it should be marked GPIO_ACTIVE_HIGH.
+ Must be the device tree identifier of the FRSTDATA pin. The FRSTDATA
+ output indicates when the first channel, V1, is being read back on either
+ the parallel, byte or serial interface. As the line is active high, it
+ should be marked GPIO_ACTIVE_HIGH.
maxItems: 1
adi,range-gpios:
description:
- Must be the device tree identifier of the RANGE pin. The polarity on
- this pin determines the input range of the analog input channels. If
- this pin is tied to a logic high, the analog input range is ±10V for
- all channels. If this pin is tied to a logic low, the analog input range
+ Must be the device tree identifier of the RANGE pin. The state on this
+ pin determines the input range of the analog input channels. If this pin
+ is tied to a logic high, the analog input range is ±10V for all channels.
+ On the AD760X, if this pin is tied to a logic low, the analog input range
is ±5V for all channels. As the line is active high, it should be marked
- GPIO_ACTIVE_HIGH.
- maxItems: 1
+ GPIO_ACTIVE_HIGH. On the AD7616, there are 2 pins, and if the 2 pins are
+ tied to a logic high, software mode is enabled, otherwise one of the 3
+ possible range values is selected.
+ minItems: 1
+ maxItems: 2
adi,oversampling-ratio-gpios:
description:
- Must be the device tree identifier of the over-sampling
- mode pins. As the line is active high, it should be marked
- GPIO_ACTIVE_HIGH.
+ Must be the device tree identifier of the over-sampling mode pins. As the
+ line is active high, it should be marked GPIO_ACTIVE_HIGH. On the AD7606X
+ parts that support it, if all 3 pins are tied to a logic high, software
+ mode is enabled.
maxItems: 3
adi,sw-mode:
description:
- Software mode of operation, so far available only for ad7616 and ad7606b.
- It is enabled when all three oversampling mode pins are connected to
- high level. The device is configured by the corresponding registers. If the
- adi,oversampling-ratio-gpios property is defined, then the driver will set the
- oversampling gpios to high. Otherwise, it is assumed that the pins are hardwired
- to VDD.
+ Software mode of operation, so far available only for AD7616 and AD7606B.
+ It is enabled when all three oversampling mode pins are connected to high
+ level for the AD7606B, or both the range selection are connected to high
+ level for the AD7616. The device is configured by the corresponding
+ registers. If the adi,oversampling-ratio-gpios property is defined, then
+ the driver will set the oversampling gpios to high. Otherwise, it is
+ assumed that the pins are hardwired to VDD.
type: boolean
required:
@@ -101,12 +119,57 @@ required:
- reg
- spi-cpha
- avcc-supply
+ - vdrive-supply
- interrupts
- adi,conversion-start-gpios
allOf:
- $ref: /schemas/spi/spi-peripheral-props.yaml#
+ - if:
+ properties:
+ compatible:
+ contains:
+ const: adi,ad7616
+ then:
+ properties:
+ adi,first-data-gpios: false
+ standby-gpios: false
+ adi,range-gpios:
+ maxItems: 2
+ else:
+ properties:
+ adi,range-gpios:
+ maxItems: 1
+
+ - if:
+ properties:
+ compatible:
+ contains:
+ enum:
+ - adi,ad7605-4
+ - adi,ad7616
+ then:
+ properties:
+ adi,oversampling-ratio-gpios: false
+
+ - if:
+ properties:
+ compatible:
+ contains:
+ enum:
+ - adi,ad7605-4
+ - adi,ad7606-4
+ - adi,ad7606-6
+ - adi,ad7606-8
+ then:
+ properties:
+ adi,sw-mode: false
+ else:
+ properties:
+ adi,conversion-start-gpios:
+ maxItems: 1
+
unevaluatedProperties: false
examples:
@@ -125,6 +188,7 @@ examples:
spi-cpha;
avcc-supply = <&adc_vref>;
+ vdrive-supply = <&vdd_supply>;
interrupts = <25 IRQ_TYPE_EDGE_FALLING>;
interrupt-parent = <&gpio>;
@@ -136,7 +200,6 @@ examples:
<&gpio 23 GPIO_ACTIVE_HIGH>,
<&gpio 26 GPIO_ACTIVE_HIGH>;
standby-gpios = <&gpio 24 GPIO_ACTIVE_LOW>;
- adi,sw-mode;
};
};
...
diff --git a/Documentation/devicetree/bindings/iio/adc/adi,ad9467.yaml b/Documentation/devicetree/bindings/iio/adc/adi,ad9467.yaml
index eecd5fbab695..2606c0c5dfc6 100644
--- a/Documentation/devicetree/bindings/iio/adc/adi,ad9467.yaml
+++ b/Documentation/devicetree/bindings/iio/adc/adi,ad9467.yaml
@@ -28,6 +28,9 @@ properties:
- adi,ad9265
- adi,ad9434
- adi,ad9467
+ - adi,ad9643
+ - adi,ad9649
+ - adi,ad9652
reg:
maxItems: 1
diff --git a/Documentation/devicetree/bindings/iio/adc/microchip,pac1921.yaml b/Documentation/devicetree/bindings/iio/adc/microchip,pac1921.yaml
new file mode 100644
index 000000000000..12e56b1b3d3f
--- /dev/null
+++ b/Documentation/devicetree/bindings/iio/adc/microchip,pac1921.yaml
@@ -0,0 +1,71 @@
+# SPDX-License-Identifier: (GPL-2.0 OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/iio/adc/microchip,pac1921.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Microchip PAC1921 High-Side Power/Current Monitor with Anaog Output
+
+maintainers:
+ - Matteo Martelli <matteomartelli3@gmail.com>
+
+description: |
+ The PAC1921 is a power/current monitoring device with an analog output
+ and I2C/SMBus interface.
+
+ Datasheet can be found here:
+ https://ww1.microchip.com/downloads/en/DeviceDoc/PAC1921-Data-Sheet-DS20005293E.pdf
+
+properties:
+ compatible:
+ const: microchip,pac1921
+
+ reg:
+ maxItems: 1
+
+ vdd-supply: true
+
+ "#io-channel-cells":
+ const: 1
+
+ shunt-resistor-micro-ohms:
+ description:
+ Value in micro Ohms of the shunt resistor connected between
+ the SENSE+ and SENSE- inputs, across which the current is measured.
+ Value is needed to compute the scaling of the measured current.
+
+ label:
+ description: Unique name to identify which device this is.
+
+ read-integrate-gpios:
+ description:
+ READ/INT input pin to control the current state of the device, either in
+ the INTEGRATE state when driven high, or in the READ state when driven low.
+ When not connected the pin is floating and it can be overridden by the
+ INT_EN register bit after asserting the READ/INT_OVR register bit.
+ maxItems: 1
+
+required:
+ - compatible
+ - reg
+ - vdd-supply
+ - shunt-resistor-micro-ohms
+
+additionalProperties: false
+
+examples:
+ - |
+ i2c {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ adc@4c {
+ compatible = "microchip,pac1921";
+ reg = <0x4c>;
+ vdd-supply = <&vdd>;
+ #io-channel-cells = <1>;
+ label = "vbat";
+ shunt-resistor-micro-ohms = <10000>;
+ };
+ };
+...
diff --git a/Documentation/devicetree/bindings/iio/adc/rockchip-saradc.yaml b/Documentation/devicetree/bindings/iio/adc/rockchip-saradc.yaml
index aa24b841393c..fd93ed3991e0 100644
--- a/Documentation/devicetree/bindings/iio/adc/rockchip-saradc.yaml
+++ b/Documentation/devicetree/bindings/iio/adc/rockchip-saradc.yaml
@@ -17,6 +17,9 @@ properties:
- const: rockchip,rk3399-saradc
- const: rockchip,rk3588-saradc
- items:
+ - const: rockchip,rk3576-saradc
+ - const: rockchip,rk3588-saradc
+ - items:
- enum:
- rockchip,px30-saradc
- rockchip,rk3308-saradc
diff --git a/Documentation/devicetree/bindings/iio/adc/sigma-delta-modulator.yaml b/Documentation/devicetree/bindings/iio/adc/sigma-delta-modulator.yaml
index cab0d425eaa4..c3a116427dc3 100644
--- a/Documentation/devicetree/bindings/iio/adc/sigma-delta-modulator.yaml
+++ b/Documentation/devicetree/bindings/iio/adc/sigma-delta-modulator.yaml
@@ -18,18 +18,39 @@ properties:
- sd-modulator
- ads1201
+ '#io-backend-cells':
+ const: 0
+
'#io-channel-cells':
const: 0
+ vref-supply:
+ description: Phandle to the vref input analog reference voltage.
+
+dependencies:
+ vref-supply: [ '#io-backend-cells' ]
+
required:
- compatible
- - '#io-channel-cells'
+
+anyOf:
+ - required: ['#io-backend-cells']
+ - required: ['#io-channel-cells']
additionalProperties: false
examples:
- |
- ads1202: adc {
+ // Backend binding example. SD modulator configured as an IIO backend device
+ ads1201_0: adc {
+ compatible = "sd-modulator";
+ vref-supply = <&vdd_adc>;
+ #io-backend-cells = <0>;
+ };
+
+ - |
+ // Legacy binding example. SD modulator configured as an IIO channel provider
+ ads1201_1: adc {
compatible = "sd-modulator";
#io-channel-cells = <0>;
};
diff --git a/Documentation/devicetree/bindings/iio/adc/sophgo,cv1800b-saradc.yaml b/Documentation/devicetree/bindings/iio/adc/sophgo,cv1800b-saradc.yaml
new file mode 100644
index 000000000000..f652b98615f7
--- /dev/null
+++ b/Documentation/devicetree/bindings/iio/adc/sophgo,cv1800b-saradc.yaml
@@ -0,0 +1,83 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/iio/adc/sophgo,cv1800b-saradc.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title:
+ Sophgo CV1800B SoC 3 channels Successive Approximation Analog to
+ Digital Converters
+
+maintainers:
+ - Thomas Bonnefille <thomas.bonnefille@bootlin.com>
+
+description:
+ Datasheet at https://github.com/sophgo/sophgo-doc/releases
+
+properties:
+ compatible:
+ const: sophgo,cv1800b-saradc
+
+ reg:
+ maxItems: 1
+
+ interrupts:
+ maxItems: 1
+
+ clocks:
+ maxItems: 1
+
+ '#address-cells':
+ const: 1
+
+ '#size-cells':
+ const: 0
+
+patternProperties:
+ "^channel@[0-2]$":
+ $ref: adc.yaml
+
+ properties:
+ reg:
+ items:
+ - minimum: 0
+ maximum: 2
+
+ required:
+ - reg
+
+ additionalProperties: false
+
+required:
+ - compatible
+ - reg
+ - clocks
+ - '#address-cells'
+ - '#size-cells'
+
+additionalProperties: false
+
+examples:
+ - |
+ #include <dt-bindings/clock/sophgo,cv1800.h>
+ #include <dt-bindings/interrupt-controller/irq.h>
+ adc@30f0000 {
+ compatible = "sophgo,cv1800b-saradc";
+ reg = <0x030f0000 0x1000>;
+ clocks = <&clk CLK_SARADC>;
+ interrupts = <100 IRQ_TYPE_LEVEL_HIGH>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ channel@0 {
+ reg = <0>;
+ };
+
+ channel@1 {
+ reg = <1>;
+ };
+
+ channel@2 {
+ reg = <2>;
+ };
+ };
diff --git a/Documentation/devicetree/bindings/iio/adc/st,stm32-adc.yaml b/Documentation/devicetree/bindings/iio/adc/st,stm32-adc.yaml
index ec34c48d4878..ef9dcc365eab 100644
--- a/Documentation/devicetree/bindings/iio/adc/st,stm32-adc.yaml
+++ b/Documentation/devicetree/bindings/iio/adc/st,stm32-adc.yaml
@@ -54,7 +54,9 @@ properties:
It's not present on stm32f4.
It's required on stm32h7 and stm32mp1.
- clock-names: true
+ clock-names:
+ minItems: 1
+ maxItems: 2
st,max-clk-rate-hz:
description:
diff --git a/Documentation/devicetree/bindings/iio/adc/st,stm32-dfsdm-adc.yaml b/Documentation/devicetree/bindings/iio/adc/st,stm32-dfsdm-adc.yaml
index 2722edab1d9a..c24ac98bbb3d 100644
--- a/Documentation/devicetree/bindings/iio/adc/st,stm32-dfsdm-adc.yaml
+++ b/Documentation/devicetree/bindings/iio/adc/st,stm32-dfsdm-adc.yaml
@@ -102,9 +102,11 @@ patternProperties:
items:
minimum: 0
maximum: 7
+ deprecated: true
st,adc-channel-names:
description: List of single-ended channel names.
+ deprecated: true
st,filter-order:
description: |
@@ -118,6 +120,12 @@ patternProperties:
"#io-channel-cells":
const: 1
+ '#address-cells':
+ const: 1
+
+ '#size-cells':
+ const: 0
+
st,adc-channel-types:
description: |
Single-ended channel input type.
@@ -128,6 +136,7 @@ patternProperties:
items:
enum: [ SPI_R, SPI_F, MANCH_R, MANCH_F ]
$ref: /schemas/types.yaml#/definitions/non-unique-string-array
+ deprecated: true
st,adc-channel-clk-src:
description: |
@@ -139,6 +148,7 @@ patternProperties:
items:
enum: [ CLKIN, CLKOUT, CLKOUT_F, CLKOUT_R ]
$ref: /schemas/types.yaml#/definitions/non-unique-string-array
+ deprecated: true
st,adc-alt-channel:
description:
@@ -147,6 +157,7 @@ patternProperties:
If not set, channel n is connected to SPI input n.
If set, channel n is connected to SPI input n + 1.
type: boolean
+ deprecated: true
st,filter0-sync:
description:
@@ -165,11 +176,60 @@ patternProperties:
- compatible
- reg
- interrupts
- - st,adc-channels
- - st,adc-channel-names
- st,filter-order
- "#io-channel-cells"
+ patternProperties:
+ "^channel@[0-7]$":
+ type: object
+ $ref: adc.yaml
+ unevaluatedProperties: false
+ description: Represents the external channels which are connected to the DFSDM.
+
+ properties:
+ reg:
+ maximum: 7
+
+ label:
+ description:
+ Unique name to identify which channel this is.
+
+ st,adc-channel-type:
+ description: |
+ Single-ended channel input type.
+ - "SPI_R": SPI with data on rising edge (default)
+ - "SPI_F": SPI with data on falling edge
+ - "MANCH_R": manchester codec, rising edge = logic 0, falling edge = logic 1
+ - "MANCH_F": manchester codec, rising edge = logic 1, falling edge = logic 0
+ $ref: /schemas/types.yaml#/definitions/string
+ enum: [ SPI_R, SPI_F, MANCH_R, MANCH_F ]
+
+ st,adc-channel-clk-src:
+ description: |
+ Conversion clock source.
+ - "CLKIN": external SPI clock (CLKIN x)
+ - "CLKOUT": internal SPI clock (CLKOUT) (default)
+ - "CLKOUT_F": internal SPI clock divided by 2 (falling edge).
+ - "CLKOUT_R": internal SPI clock divided by 2 (rising edge).
+ $ref: /schemas/types.yaml#/definitions/string
+ enum: [ CLKIN, CLKOUT, CLKOUT_F, CLKOUT_R ]
+
+ st,adc-alt-channel:
+ description:
+ Must be defined if two sigma delta modulators are
+ connected on same SPI input.
+ If not set, channel n is connected to SPI input n.
+ If set, channel n is connected to SPI input n + 1.
+ type: boolean
+
+ io-backends:
+ description:
+ Used to pipe external sigma delta modulator or internal ADC backend to DFSDM channel.
+ maxItems: 1
+
+ required:
+ - reg
+
allOf:
- if:
properties:
@@ -199,9 +259,19 @@ patternProperties:
description:
From common IIO binding. Used to pipe external sigma delta
modulator or internal ADC output to DFSDM channel.
+ deprecated: true
- required:
- - io-channels
+ if:
+ required:
+ - st,adc-channels
+ then:
+ required:
+ - io-channels
+
+ patternProperties:
+ "^channel@[0-7]$":
+ required:
+ - io-backends
- if:
properties:
@@ -298,6 +368,7 @@ examples:
#address-cells = <1>;
#size-cells = <0>;
+ // Example 1: Audio use case with generic binding
dfsdm0: filter@0 {
compatible = "st,stm32-dfsdm-dmic";
reg = <0>;
@@ -305,12 +376,18 @@ examples:
dmas = <&dmamux1 101 0x400 0x01>;
dma-names = "rx";
#io-channel-cells = <1>;
- st,adc-channels = <1>;
- st,adc-channel-names = "dmic0";
- st,adc-channel-types = "SPI_R";
- st,adc-channel-clk-src = "CLKOUT";
+ #address-cells = <1>;
+ #size-cells = <0>;
st,filter-order = <5>;
+ channel@1 {
+ reg = <1>;
+ label = "dmic0";
+ st,adc-channel-type = "SPI_R";
+ st,adc-channel-clk-src = "CLKOUT";
+ st,adc-alt-channel;
+ };
+
asoc_pdm0: dfsdm-dai {
compatible = "st,stm32h7-dfsdm-dai";
#sound-dai-cells = <0>;
@@ -318,19 +395,34 @@ examples:
};
};
- dfsdm_pdm1: filter@1 {
+ // Example 2: Analog use case with generic binding
+ dfsdm1: filter@1 {
compatible = "st,stm32-dfsdm-adc";
reg = <1>;
interrupts = <GIC_SPI 111 IRQ_TYPE_LEVEL_HIGH>;
dmas = <&dmamux1 102 0x400 0x01>;
dma-names = "rx";
- #io-channel-cells = <1>;
- st,adc-channels = <2 3>;
- st,adc-channel-names = "in2", "in3";
- st,adc-channel-types = "SPI_R", "SPI_R";
- st,adc-channel-clk-src = "CLKOUT_F", "CLKOUT_F";
- io-channels = <&sd_adc2 &sd_adc3>;
st,filter-order = <1>;
+ #io-channel-cells = <1>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ channel@2 {
+ reg = <2>;
+ label = "in2";
+ st,adc-channel-type = "SPI_F";
+ st,adc-channel-clk-src = "CLKOUT";
+ st,adc-alt-channel;
+ io-backends = <&sd_adc2>;
+ };
+
+ channel@3 {
+ reg = <3>;
+ label = "in3";
+ st,adc-channel-type = "SPI_R";
+ st,adc-channel-clk-src = "CLKOUT";
+ io-backends = <&sd_adc3>;
+ };
};
};
diff --git a/Documentation/devicetree/bindings/iio/adc/x-powers,axp209-adc.yaml b/Documentation/devicetree/bindings/iio/adc/x-powers,axp209-adc.yaml
index d40689f233f2..1caa896fce82 100644
--- a/Documentation/devicetree/bindings/iio/adc/x-powers,axp209-adc.yaml
+++ b/Documentation/devicetree/bindings/iio/adc/x-powers,axp209-adc.yaml
@@ -37,6 +37,17 @@ description: |
3 | batt_dischrg_i
4 | ts_v
+ AXP717
+ ------
+ 0 | batt_v
+ 1 | ts_v
+ 2 | vbus_v
+ 3 | vsys_v
+ 4 | pmic_temp
+ 5 | batt_chrg_i
+ 6 | vmid_v
+ 7 | bkup_batt_v
+
AXP813
------
0 | pmic_temp
@@ -52,6 +63,7 @@ properties:
oneOf:
- const: x-powers,axp209-adc
- const: x-powers,axp221-adc
+ - const: x-powers,axp717-adc
- const: x-powers,axp813-adc
- items:
diff --git a/Documentation/devicetree/bindings/iio/dac/adi,ltc2664.yaml b/Documentation/devicetree/bindings/iio/dac/adi,ltc2664.yaml
new file mode 100644
index 000000000000..33490853497b
--- /dev/null
+++ b/Documentation/devicetree/bindings/iio/dac/adi,ltc2664.yaml
@@ -0,0 +1,181 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/iio/dac/adi,ltc2664.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Analog Devices LTC2664 DAC
+
+maintainers:
+ - Michael Hennerich <michael.hennerich@analog.com>
+ - Kim Seer Paller <kimseer.paller@analog.com>
+
+description: |
+ Analog Devices LTC2664 4 channel, 12-/16-Bit, +-10V DAC
+ https://www.analog.com/media/en/technical-documentation/data-sheets/2664fa.pdf
+
+properties:
+ compatible:
+ enum:
+ - adi,ltc2664
+
+ reg:
+ maxItems: 1
+
+ spi-max-frequency:
+ maximum: 50000000
+
+ vcc-supply:
+ description: Analog Supply Voltage Input.
+
+ v-pos-supply:
+ description: Positive Supply Voltage Input.
+
+ v-neg-supply:
+ description: Negative Supply Voltage Input.
+
+ iovcc-supply:
+ description: Digital Input/Output Supply Voltage.
+
+ ref-supply:
+ description:
+ Reference Input/Output. The voltage at the REF pin sets the full-scale
+ range of all channels. If not provided the internal reference is used and
+ also provided on the VREF pin.
+
+ reset-gpios:
+ description:
+ Active-low Asynchronous Clear Input. A logic low at this level-triggered
+ input clears the part to the reset code and range determined by the
+ hardwired option chosen using the MSPAN pins. The control registers are
+ cleared to zero.
+ maxItems: 1
+
+ adi,manual-span-operation-config:
+ description:
+ This property must mimic the MSPAN pin configurations. By tying the MSPAN
+ pins (MSP2, MSP1 and MSP0) to GND and/or VCC, any output range can be
+ hardware-configured with different mid-scale or zero-scale reset options.
+ The hardware configuration is latched during power on reset for proper
+ operation.
+ 0 - MPS2=GND, MPS1=GND, MSP0=GND (+-10V, reset to 0V)
+ 1 - MPS2=GND, MPS1=GND, MSP0=VCC (+-5V, reset to 0V)
+ 2 - MPS2=GND, MPS1=VCC, MSP0=GND (+-2.5V, reset to 0V)
+ 3 - MPS2=GND, MPS1=VCC, MSP0=VCC (0V to 10, reset to 0V)
+ 4 - MPS2=VCC, MPS1=GND, MSP0=GND (0V to 10V, reset to 5V)
+ 5 - MPS2=VCC, MPS1=GND, MSP0=VCC (0V to 5V, reset to 0V)
+ 6 - MPS2=VCC, MPS1=VCC, MSP0=GND (0V to 5V, reset to 2.5V)
+ 7 - MPS2=VCC, MPS1=VCC, MSP0=VCC (0V to 5V, reset to 0V, enables SoftSpan)
+ $ref: /schemas/types.yaml#/definitions/uint32
+ enum: [0, 1, 2, 3, 4, 5, 6, 7]
+ default: 7
+
+ io-channels:
+ description:
+ ADC channel to monitor voltages and temperature at the MUXOUT pin.
+ maxItems: 1
+
+ '#address-cells':
+ const: 1
+
+ '#size-cells':
+ const: 0
+
+patternProperties:
+ "^channel@[0-3]$":
+ $ref: dac.yaml
+ type: object
+ additionalProperties: false
+
+ properties:
+ reg:
+ description: The channel number representing the DAC output channel.
+ maximum: 3
+
+ adi,toggle-mode:
+ description:
+ Set the channel as a toggle enabled channel. Toggle operation enables
+ fast switching of a DAC output between two different DAC codes without
+ any SPI transaction.
+ type: boolean
+
+ output-range-microvolt:
+ description:
+ This property is only allowed when SoftSpan is enabled. If not present,
+ [0, 5000000] is the default output range.
+ oneOf:
+ - items:
+ - const: 0
+ - enum: [5000000, 10000000]
+ - items:
+ - const: -5000000
+ - const: 5000000
+ - items:
+ - const: -10000000
+ - const: 10000000
+ - items:
+ - const: -2500000
+ - const: 2500000
+
+ required:
+ - reg
+
+ allOf:
+ - if:
+ not:
+ properties:
+ adi,manual-span-operation-config:
+ const: 7
+ then:
+ patternProperties:
+ "^channel@[0-3]$":
+ properties:
+ output-range-microvolt: false
+
+required:
+ - compatible
+ - reg
+ - spi-max-frequency
+ - vcc-supply
+ - iovcc-supply
+ - v-pos-supply
+ - v-neg-supply
+
+allOf:
+ - $ref: /schemas/spi/spi-peripheral-props.yaml#
+
+additionalProperties: false
+
+examples:
+ - |
+ spi {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ dac@0 {
+ compatible = "adi,ltc2664";
+ reg = <0>;
+ spi-max-frequency = <10000000>;
+
+ vcc-supply = <&vcc>;
+ iovcc-supply = <&vcc>;
+ ref-supply = <&vref>;
+ v-pos-supply = <&vpos>;
+ v-neg-supply = <&vneg>;
+
+ io-channels = <&adc 0>;
+
+ #address-cells = <1>;
+ #size-cells = <0>;
+ channel@0 {
+ reg = <0>;
+ adi,toggle-mode;
+ output-range-microvolt = <(-10000000) 10000000>;
+ };
+
+ channel@1 {
+ reg = <1>;
+ output-range-microvolt= <0 10000000>;
+ };
+ };
+ };
+...
diff --git a/Documentation/devicetree/bindings/iio/dac/adi,ltc2672.yaml b/Documentation/devicetree/bindings/iio/dac/adi,ltc2672.yaml
new file mode 100644
index 000000000000..c8c434c10643
--- /dev/null
+++ b/Documentation/devicetree/bindings/iio/dac/adi,ltc2672.yaml
@@ -0,0 +1,160 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/iio/dac/adi,ltc2672.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Analog Devices LTC2672 DAC
+
+maintainers:
+ - Michael Hennerich <michael.hennerich@analog.com>
+ - Kim Seer Paller <kimseer.paller@analog.com>
+
+description: |
+ Analog Devices LTC2672 5 channel, 12-/16-Bit, 300mA DAC
+ https://www.analog.com/media/en/technical-documentation/data-sheets/ltc2672.pdf
+
+properties:
+ compatible:
+ enum:
+ - adi,ltc2672
+
+ reg:
+ maxItems: 1
+
+ spi-max-frequency:
+ maximum: 50000000
+
+ vcc-supply:
+ description: Analog Supply Voltage Input.
+
+ v-neg-supply:
+ description: Negative Supply Voltage Input.
+
+ vdd0-supply:
+ description: Positive Supply Voltage Input for DAC OUT0.
+
+ vdd1-supply:
+ description: Positive Supply Voltage Input for DAC OUT1.
+
+ vdd2-supply:
+ description: Positive Supply Voltage Input for DAC OUT2.
+
+ vdd3-supply:
+ description: Positive Supply Voltage Input for DAC OUT3.
+
+ vdd4-supply:
+ description: Positive Supply Voltage Input for DAC OUT4.
+
+ iovcc-supply:
+ description: Digital Input/Output Supply Voltage.
+
+ ref-supply:
+ description:
+ Reference Input/Output. The voltage at the REF pin sets the full-scale
+ range of all channels. If not provided the internal reference is used and
+ also provided on the VREF pin.
+
+ reset-gpios:
+ description:
+ Active Low Asynchronous Clear Input. A logic low at this level triggered
+ input clears the device to the default reset code and output range, which
+ is zero-scale with the outputs off. The control registers are cleared to
+ zero.
+ maxItems: 1
+
+ adi,rfsadj-ohms:
+ description:
+ If FSADJ is tied to VCC, an internal RFSADJ (20 kΩ) is selected, which
+ results in nominal output ranges. When an external resistor of 19 kΩ to
+ 41 kΩ can be used instead by connecting the resistor between FSADJ and GND
+ it controls the scaling of the ranges, and the internal resistor is
+ automatically disconnected.
+ minimum: 19000
+ maximum: 41000
+ default: 20000
+
+ io-channels:
+ description:
+ ADC channel to monitor voltages and currents at the MUX pin.
+ maxItems: 1
+
+ '#address-cells':
+ const: 1
+
+ '#size-cells':
+ const: 0
+
+patternProperties:
+ "^channel@[0-4]$":
+ $ref: dac.yaml
+ type: object
+ additionalProperties: false
+
+ properties:
+ reg:
+ description: The channel number representing the DAC output channel.
+ maximum: 4
+
+ adi,toggle-mode:
+ description:
+ Set the channel as a toggle enabled channel. Toggle operation enables
+ fast switching of a DAC output between two different DAC codes without
+ any SPI transaction.
+ type: boolean
+
+ output-range-microamp:
+ items:
+ - const: 0
+ - enum: [3125000, 6250000, 12500000, 25000000, 50000000, 100000000,
+ 200000000, 300000000]
+
+ required:
+ - reg
+ - output-range-microamp
+
+required:
+ - compatible
+ - reg
+ - spi-max-frequency
+ - vcc-supply
+ - iovcc-supply
+ - v-neg-supply
+
+allOf:
+ - $ref: /schemas/spi/spi-peripheral-props.yaml#
+
+additionalProperties: false
+
+examples:
+ - |
+ spi {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ dac@0 {
+ compatible = "adi,ltc2672";
+ reg = <0>;
+ spi-max-frequency = <10000000>;
+
+ vcc-supply = <&vcc>;
+ iovcc-supply = <&vcc>;
+ ref-supply = <&vref>;
+ v-neg-supply = <&vneg>;
+
+ io-channels = <&adc 0>;
+
+ #address-cells = <1>;
+ #size-cells = <0>;
+ channel@0 {
+ reg = <0>;
+ adi,toggle-mode;
+ output-range-microamp = <0 3125000>;
+ };
+
+ channel@1 {
+ reg = <1>;
+ output-range-microamp = <0 6250000>;
+ };
+ };
+ };
+...
diff --git a/Documentation/devicetree/bindings/iio/dac/dac.yaml b/Documentation/devicetree/bindings/iio/dac/dac.yaml
new file mode 100644
index 000000000000..daa40724e1cf
--- /dev/null
+++ b/Documentation/devicetree/bindings/iio/dac/dac.yaml
@@ -0,0 +1,50 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/iio/dac/dac.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: IIO Common Properties for DAC Channels
+
+maintainers:
+ - Jonathan Cameron <jic23@kernel.org>
+
+description:
+ A few properties are defined in a common way for DAC channels.
+
+properties:
+ $nodename:
+ pattern: "^channel(@[0-9a-f]+)?$"
+ description:
+ A channel index should match reg.
+
+ reg:
+ maxItems: 1
+
+ label:
+ description: Unique name to identify which channel this is.
+
+ output-range-microamp:
+ maxItems: 2
+ minItems: 2
+ description:
+ Specify the channel output full scale range in microamperes.
+
+ output-range-microvolt:
+ maxItems: 2
+ minItems: 2
+ description:
+ Specify the channel output full scale range in microvolts.
+
+anyOf:
+ - oneOf:
+ - required:
+ - reg
+ - output-range-microamp
+ - required:
+ - reg
+ - output-range-microvolt
+ - required:
+ - reg
+
+additionalProperties: true
diff --git a/Documentation/devicetree/bindings/iio/frequency/adi,adf4377.yaml b/Documentation/devicetree/bindings/iio/frequency/adi,adf4377.yaml
index aa6a3193b4e0..5f950ee9aec7 100644
--- a/Documentation/devicetree/bindings/iio/frequency/adi,adf4377.yaml
+++ b/Documentation/devicetree/bindings/iio/frequency/adi,adf4377.yaml
@@ -17,6 +17,7 @@ description: |
applications.
https://www.analog.com/en/products/adf4377.html
+ https://www.analog.com/en/products/adf4378.html
properties:
compatible:
@@ -73,6 +74,15 @@ required:
allOf:
- $ref: /schemas/spi/spi-peripheral-props.yaml#
+ - if:
+ properties:
+ compatible:
+ contains:
+ enum:
+ - adi,adf4378
+ then:
+ properties:
+ clk2-enable-gpios: false
unevaluatedProperties: false
diff --git a/Documentation/devicetree/bindings/iio/humidity/sciosense,ens210.yaml b/Documentation/devicetree/bindings/iio/humidity/sciosense,ens210.yaml
new file mode 100644
index 000000000000..ed0ea938f7f8
--- /dev/null
+++ b/Documentation/devicetree/bindings/iio/humidity/sciosense,ens210.yaml
@@ -0,0 +1,55 @@
+# SPDX-License-Identifier: (GPL-2.0 OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/iio/humidity/sciosense,ens210.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: ScioSense ENS210 temperature and humidity sensor
+
+maintainers:
+ - Joshua Felmeden <jfelmeden@thegoodpenguin.co.uk>
+
+description: |
+ Temperature and Humidity sensor.
+
+ Datasheet:
+ https://www.sciosense.com/wp-content/uploads/2024/04/ENS21x-Datasheet.pdf
+ https://www.sciosense.com/wp-content/uploads/2023/12/ENS210-Datasheet.pdf
+
+properties:
+ compatible:
+ oneOf:
+ - items:
+ - enum:
+ - sciosense,ens210a
+ - sciosense,ens211
+ - sciosense,ens212
+ - sciosense,ens213a
+ - sciosense,ens215
+ - const: sciosense,ens210
+ - const: sciosense,ens210
+
+ reg:
+ maxItems: 1
+
+ vdd-supply: true
+
+required:
+ - compatible
+ - reg
+
+additionalProperties: false
+
+examples:
+ - |
+ i2c {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ temperature-sensor@43 {
+ compatible = "sciosense,ens210";
+ reg = <0x43>;
+ };
+ };
+...
+
diff --git a/Documentation/devicetree/bindings/iio/light/liteon,ltrf216a.yaml b/Documentation/devicetree/bindings/iio/light/liteon,ltrf216a.yaml
index 7de1b0e721ca..877e955d4ebd 100644
--- a/Documentation/devicetree/bindings/iio/light/liteon,ltrf216a.yaml
+++ b/Documentation/devicetree/bindings/iio/light/liteon,ltrf216a.yaml
@@ -14,7 +14,9 @@ description:
properties:
compatible:
- const: liteon,ltrf216a
+ enum:
+ - liteon,ltr308
+ - liteon,ltrf216a
reg:
maxItems: 1
diff --git a/Documentation/devicetree/bindings/iio/light/rohm,bh1745.yaml b/Documentation/devicetree/bindings/iio/light/rohm,bh1745.yaml
new file mode 100644
index 000000000000..44896795c67e
--- /dev/null
+++ b/Documentation/devicetree/bindings/iio/light/rohm,bh1745.yaml
@@ -0,0 +1,53 @@
+# SPDX-License-Identifier: (GPL-2.0 OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/iio/light/rohm,bh1745.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: ROHM BH1745 colour sensor
+
+maintainers:
+ - Mudit Sharma <muditsharma.info@gmail.com>
+
+description:
+ BH1745 is an I2C colour sensor with red, green, blue and clear
+ channels. It has a programmable active low interrupt pin.
+ Interrupt occurs when the signal from the selected interrupt
+ source channel crosses set interrupt threshold high/low level.
+
+properties:
+ compatible:
+ const: rohm,bh1745
+
+ reg:
+ maxItems: 1
+
+ interrupts:
+ maxItems: 1
+
+ vdd-supply: true
+
+required:
+ - compatible
+ - reg
+ - vdd-supply
+
+additionalProperties: false
+
+examples:
+ - |
+ #include <dt-bindings/interrupt-controller/irq.h>
+ i2c {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ colour-sensor@38 {
+ compatible = "rohm,bh1745";
+ reg = <0x38>;
+ interrupt-parent = <&gpio>;
+ interrupts = <19 IRQ_TYPE_LEVEL_LOW>;
+ vdd-supply = <&vdd>;
+ };
+ };
+
+...
diff --git a/Documentation/devicetree/bindings/iio/light/rohm,bu27034.yaml b/Documentation/devicetree/bindings/iio/light/rohm,bu27034anuc.yaml
index 30a109a1bf3b..29c90ca5b258 100644
--- a/Documentation/devicetree/bindings/iio/light/rohm,bu27034.yaml
+++ b/Documentation/devicetree/bindings/iio/light/rohm,bu27034anuc.yaml
@@ -1,23 +1,22 @@
# SPDX-License-Identifier: (GPL-2.0 OR BSD-2-Clause)
%YAML 1.2
---
-$id: http://devicetree.org/schemas/iio/light/rohm,bu27034.yaml#
+$id: http://devicetree.org/schemas/iio/light/rohm,bu27034anuc.yaml#
$schema: http://devicetree.org/meta-schemas/core.yaml#
-title: ROHM BU27034 ambient light sensor
+title: ROHM BU27034ANUC ambient light sensor
maintainers:
- Matti Vaittinen <mazziesaccount@gmail.com>
description: |
- ROHM BU27034 is an ambient light sesnor with 3 channels and 3 photo diodes
+ ROHM BU27034ANUC is an ambient light sensor with 2 channels and 2 photo diodes
capable of detecting a very wide range of illuminance. Typical application
is adjusting LCD and backlight power of TVs and mobile phones.
- https://fscdn.rohm.com/en/products/databook/datasheet/ic/sensor/light/bu27034nuc-e.pdf
properties:
compatible:
- const: rohm,bu27034
+ const: rohm,bu27034anuc
reg:
maxItems: 1
@@ -37,7 +36,7 @@ examples:
#size-cells = <0>;
light-sensor@38 {
- compatible = "rohm,bu27034";
+ compatible = "rohm,bu27034anuc";
reg = <0x38>;
vdd-supply = <&vdd>;
};
diff --git a/Documentation/devicetree/bindings/iio/light/stk33xx.yaml b/Documentation/devicetree/bindings/iio/light/stk33xx.yaml
index f6e22dc9814a..e4341fdced98 100644
--- a/Documentation/devicetree/bindings/iio/light/stk33xx.yaml
+++ b/Documentation/devicetree/bindings/iio/light/stk33xx.yaml
@@ -18,10 +18,15 @@ allOf:
properties:
compatible:
- enum:
- - sensortek,stk3310
- - sensortek,stk3311
- - sensortek,stk3335
+ oneOf:
+ - enum:
+ - sensortek,stk3310
+ - sensortek,stk3311
+ - sensortek,stk3335
+ - items:
+ - enum:
+ - sensortek,stk3013
+ - const: sensortek,stk3310
reg:
maxItems: 1
diff --git a/Documentation/devicetree/bindings/iio/magnetometer/asahi-kasei,ak8975.yaml b/Documentation/devicetree/bindings/iio/magnetometer/asahi-kasei,ak8975.yaml
index 9790f75fc669..e8ca9a234027 100644
--- a/Documentation/devicetree/bindings/iio/magnetometer/asahi-kasei,ak8975.yaml
+++ b/Documentation/devicetree/bindings/iio/magnetometer/asahi-kasei,ak8975.yaml
@@ -18,12 +18,15 @@ properties:
- asahi-kasei,ak09911
- asahi-kasei,ak09912
- asahi-kasei,ak09916
+ - items:
+ # ak09918 is register compatible with ak09912.
+ - const: asahi-kasei,ak09918
+ - const: asahi-kasei,ak09912
- enum:
- ak8975
- ak8963
- ak09911
- ak09912
- - ak09916
deprecated: true
reg:
diff --git a/Documentation/devicetree/bindings/iio/magnetometer/bosch,bmc150_magn.yaml b/Documentation/devicetree/bindings/iio/magnetometer/bosch,bmc150_magn.yaml
index 2867ab6bf9b0..a3838ab0c524 100644
--- a/Documentation/devicetree/bindings/iio/magnetometer/bosch,bmc150_magn.yaml
+++ b/Documentation/devicetree/bindings/iio/magnetometer/bosch,bmc150_magn.yaml
@@ -36,6 +36,9 @@ properties:
interrupts:
maxItems: 1
+ mount-matrix:
+ description: an optional 3x3 mounting rotation matrix.
+
additionalProperties: false
required:
diff --git a/Documentation/devicetree/bindings/iio/pressure/sensirion,sdp500.yaml b/Documentation/devicetree/bindings/iio/pressure/sensirion,sdp500.yaml
new file mode 100644
index 000000000000..813239f6879a
--- /dev/null
+++ b/Documentation/devicetree/bindings/iio/pressure/sensirion,sdp500.yaml
@@ -0,0 +1,46 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/iio/pressure/sensirion,sdp500.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: sdp500/sdp510 pressure sensor with I2C bus interface
+
+maintainers:
+ - Petar Stoykov <petar.stoykov@prodrive-technologies.com>
+
+description: |
+ Pressure sensor from Sensirion with I2C bus interface.
+ There is no software difference between sdp500 and sdp510.
+
+properties:
+ compatible:
+ oneOf:
+ - items:
+ - const: sensirion,sdp510
+ - const: sensirion,sdp500
+ - const: sensirion,sdp500
+
+ reg:
+ maxItems: 1
+
+ vdd-supply: true
+
+required:
+ - compatible
+ - reg
+ - vdd-supply
+
+additionalProperties: false
+
+examples:
+ - |
+ i2c {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ pressure@40 {
+ compatible = "sensirion,sdp500";
+ reg = <0x40>;
+ vdd-supply = <&foo>;
+ };
+ };
diff --git a/Documentation/devicetree/bindings/iio/proximity/awinic,aw96103.yaml b/Documentation/devicetree/bindings/iio/proximity/awinic,aw96103.yaml
new file mode 100644
index 000000000000..7a83ceced11c
--- /dev/null
+++ b/Documentation/devicetree/bindings/iio/proximity/awinic,aw96103.yaml
@@ -0,0 +1,61 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/iio/proximity/awinic,aw96103.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Awinic's AW96103 capacitive proximity sensor and similar
+
+maintainers:
+ - Wang Shuaijie <wangshuaijie@awinic.com>
+
+description: |
+ Awinic's AW96103/AW96105 proximity sensor.
+ The specific absorption rate (SAR) is a metric that measures
+ the degree of absorption of electromagnetic radiation emitted by
+ wireless devices, such as mobile phones and tablets, by human tissue.
+ In mobile phone applications, the proximity sensor is primarily
+ used to detect the proximity of the human body to the phone. When the
+ phone approaches the human body, it will actively reduce the transmit
+ power of the antenna to keep the SAR within a safe range. Therefore,
+ we also refer to the proximity sensor as a SAR sensor.
+
+properties:
+ compatible:
+ enum:
+ - awinic,aw96103
+ - awinic,aw96105
+
+ reg:
+ maxItems: 1
+
+ interrupts:
+ description:
+ Generated by the device to announce that a close/far
+ proximity event has happened.
+ maxItems: 1
+
+ vcc-supply: true
+
+required:
+ - compatible
+ - reg
+ - interrupts
+ - vcc-supply
+
+additionalProperties: false
+
+examples:
+ - |
+ #include <dt-bindings/interrupt-controller/irq.h>
+ i2c {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ proximity@12 {
+ compatible = "awinic,aw96103";
+ reg = <0x12>;
+ interrupt-parent = <&gpio>;
+ interrupts = <23 IRQ_TYPE_EDGE_FALLING>;
+ vcc-supply = <&pp1800_prox>;
+ };
+ };
diff --git a/Documentation/devicetree/bindings/iio/proximity/tyhx,hx9023s.yaml b/Documentation/devicetree/bindings/iio/proximity/tyhx,hx9023s.yaml
new file mode 100644
index 000000000000..64ce8bc8bd36
--- /dev/null
+++ b/Documentation/devicetree/bindings/iio/proximity/tyhx,hx9023s.yaml
@@ -0,0 +1,93 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/iio/proximity/tyhx,hx9023s.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: TYHX HX9023S capacitive proximity sensor
+
+maintainers:
+ - Yasin Lee <yasin.lee.x@gmail.com>
+
+description: |
+ TYHX HX9023S proximity sensor. Datasheet can be found here:
+ http://www.tianyihexin.com/ueditor/php/upload/file/20240614/1718336303992081.pdf
+
+properties:
+ compatible:
+ const: tyhx,hx9023s
+
+ reg:
+ maxItems: 1
+
+ interrupts:
+ description:
+ Generated by device to announce preceding read request has finished
+ and data is available or that a close/far proximity event has happened.
+ maxItems: 1
+
+ vdd-supply: true
+
+ "#address-cells":
+ const: 1
+
+ "#size-cells":
+ const: 0
+
+patternProperties:
+ "^channel@[0-4]$":
+ $ref: /schemas/iio/adc/adc.yaml
+ type: object
+ unevaluatedProperties: false
+
+ properties:
+ reg:
+ minimum: 0
+ maximum: 4
+ description: The channel number.
+
+required:
+ - compatible
+ - reg
+ - vdd-supply
+
+unevaluatedProperties: false
+
+examples:
+ - |
+ #include <dt-bindings/interrupt-controller/irq.h>
+ i2c {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ proximity@2a {
+ compatible = "tyhx,hx9023s";
+ reg = <0x2a>;
+ interrupt-parent = <&pio>;
+ interrupts = <16 IRQ_TYPE_EDGE_FALLING>;
+ vdd-supply = <&pp1800_prox>;
+
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ channel@0 {
+ reg = <0>;
+ single-channel = <0>;
+ };
+ channel@1 {
+ reg = <1>;
+ single-channel = <1>;
+ };
+ channel@2 {
+ reg = <2>;
+ single-channel = <2>;
+ };
+ channel@3 {
+ reg = <3>;
+ diff-channels = <1 0>;
+ };
+ channel@4 {
+ reg = <4>;
+ diff-channels = <2 0>;
+ };
+ };
+ };
diff --git a/Documentation/devicetree/bindings/input/adi,adp5588.yaml b/Documentation/devicetree/bindings/input/adi,adp5588.yaml
index 26ea66834ae2..336bc352579a 100644
--- a/Documentation/devicetree/bindings/input/adi,adp5588.yaml
+++ b/Documentation/devicetree/bindings/input/adi,adp5588.yaml
@@ -49,7 +49,10 @@ properties:
interrupt-controller:
description:
This property applies if either keypad,num-rows lower than 8 or
- keypad,num-columns lower than 10.
+ keypad,num-columns lower than 10. This property is optional if
+ keypad,num-rows or keypad,num-columns are not specified as the
+ device is then configured to be used purely for gpio during which
+ interrupts may or may not be utilized.
'#interrupt-cells':
const: 2
@@ -65,13 +68,23 @@ properties:
minItems: 1
maxItems: 2
+dependencies:
+ keypad,num-rows:
+ - linux,keymap
+ - keypad,num-columns
+ keypad,num-columns:
+ - linux,keymap
+ - keypad,num-rows
+ linux,keymap:
+ - keypad,num-rows
+ - keypad,num-columns
+ - interrupts
+ interrupt-controller:
+ - interrupts
+
required:
- compatible
- reg
- - interrupts
- - keypad,num-rows
- - keypad,num-columns
- - linux,keymap
unevaluatedProperties: false
@@ -108,4 +121,19 @@ examples:
>;
};
};
+
+ - |
+ #include <dt-bindings/gpio/gpio.h>
+ i2c {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ gpio@34 {
+ compatible = "adi,adp5588";
+ reg = <0x34>;
+
+ #gpio-cells = <2>;
+ gpio-controller;
+ };
+ };
+
...
diff --git a/Documentation/devicetree/bindings/input/cirrus,ep9307-keypad.yaml b/Documentation/devicetree/bindings/input/cirrus,ep9307-keypad.yaml
new file mode 100644
index 000000000000..a0d2460c55ab
--- /dev/null
+++ b/Documentation/devicetree/bindings/input/cirrus,ep9307-keypad.yaml
@@ -0,0 +1,87 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/input/cirrus,ep9307-keypad.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Cirrus ep93xx keypad
+
+maintainers:
+ - Alexander Sverdlin <alexander.sverdlin@gmail.com>
+
+allOf:
+ - $ref: /schemas/input/matrix-keymap.yaml#
+
+description:
+ The KPP is designed to interface with a keypad matrix with 2-point contact
+ or 3-point contact keys. The KPP is designed to simplify the software task
+ of scanning a keypad matrix. The KPP is capable of detecting, debouncing,
+ and decoding one or multiple keys pressed simultaneously on a keypad.
+
+properties:
+ compatible:
+ oneOf:
+ - const: cirrus,ep9307-keypad
+ - items:
+ - enum:
+ - cirrus,ep9312-keypad
+ - cirrus,ep9315-keypad
+ - const: cirrus,ep9307-keypad
+
+ reg:
+ maxItems: 1
+
+ interrupts:
+ maxItems: 1
+
+ clocks:
+ maxItems: 1
+
+ debounce-delay-ms:
+ description: |
+ Time in microseconds that key must be pressed or
+ released for state change interrupt to trigger.
+
+ cirrus,prescale:
+ description: row/column counter pre-scaler load value
+ $ref: /schemas/types.yaml#/definitions/uint16
+ maximum: 1023
+
+required:
+ - compatible
+ - reg
+ - interrupts
+ - clocks
+ - linux,keymap
+
+unevaluatedProperties: false
+
+examples:
+ - |
+ #include <dt-bindings/input/input.h>
+ #include <dt-bindings/clock/cirrus,ep9301-syscon.h>
+ keypad@800f0000 {
+ compatible = "cirrus,ep9307-keypad";
+ reg = <0x800f0000 0x0c>;
+ interrupt-parent = <&vic0>;
+ interrupts = <29>;
+ clocks = <&eclk EP93XX_CLK_KEYPAD>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&keypad_default_pins>;
+ linux,keymap = <KEY_UP>,
+ <KEY_DOWN>,
+ <KEY_VOLUMEDOWN>,
+ <KEY_HOME>,
+ <KEY_RIGHT>,
+ <KEY_LEFT>,
+ <KEY_ENTER>,
+ <KEY_VOLUMEUP>,
+ <KEY_F6>,
+ <KEY_F8>,
+ <KEY_F9>,
+ <KEY_F10>,
+ <KEY_F1>,
+ <KEY_F2>,
+ <KEY_F3>,
+ <KEY_POWER>;
+ };
diff --git a/Documentation/devicetree/bindings/input/rotary-encoder.txt b/Documentation/devicetree/bindings/input/rotary-encoder.txt
deleted file mode 100644
index a644408b33b8..000000000000
--- a/Documentation/devicetree/bindings/input/rotary-encoder.txt
+++ /dev/null
@@ -1,50 +0,0 @@
-Rotary encoder DT bindings
-
-Required properties:
-- gpios: a spec for at least two GPIOs to be used, most significant first
-
-Optional properties:
-- linux,axis: the input subsystem axis to map to this rotary encoder.
- Defaults to 0 (ABS_X / REL_X)
-- rotary-encoder,steps: Number of steps in a full turnaround of the
- encoder. Only relevant for absolute axis. Defaults to 24 which is a
- typical value for such devices.
-- rotary-encoder,relative-axis: register a relative axis rather than an
- absolute one. Relative axis will only generate +1/-1 events on the input
- device, hence no steps need to be passed.
-- rotary-encoder,rollover: Automatic rollover when the rotary value becomes
- greater than the specified steps or smaller than 0. For absolute axis only.
-- rotary-encoder,steps-per-period: Number of steps (stable states) per period.
- The values have the following meaning:
- 1: Full-period mode (default)
- 2: Half-period mode
- 4: Quarter-period mode
-- wakeup-source: Boolean, rotary encoder can wake up the system.
-- rotary-encoder,encoding: String, the method used to encode steps.
- Supported are "gray" (the default and more common) and "binary".
-
-Deprecated properties:
-- rotary-encoder,half-period: Makes the driver work on half-period mode.
- This property is deprecated. Instead, a 'steps-per-period ' value should
- be used, such as "rotary-encoder,steps-per-period = <2>".
-
-See Documentation/input/devices/rotary-encoder.rst for more information.
-
-Example:
-
- rotary@0 {
- compatible = "rotary-encoder";
- gpios = <&gpio 19 1>, <&gpio 20 0>; /* GPIO19 is inverted */
- linux,axis = <0>; /* REL_X */
- rotary-encoder,encoding = "gray";
- rotary-encoder,relative-axis;
- };
-
- rotary@1 {
- compatible = "rotary-encoder";
- gpios = <&gpio 21 0>, <&gpio 22 0>;
- linux,axis = <1>; /* ABS_Y */
- rotary-encoder,steps = <24>;
- rotary-encoder,encoding = "binary";
- rotary-encoder,rollover;
- };
diff --git a/Documentation/devicetree/bindings/input/rotary-encoder.yaml b/Documentation/devicetree/bindings/input/rotary-encoder.yaml
new file mode 100644
index 000000000000..e315aab7f584
--- /dev/null
+++ b/Documentation/devicetree/bindings/input/rotary-encoder.yaml
@@ -0,0 +1,90 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/input/rotary-encoder.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Rotary encoder
+
+maintainers:
+ - Frank Li <Frank.Li@nxp.com>
+
+description:
+ See Documentation/input/devices/rotary-encoder.rst for more information.
+
+properties:
+ compatible:
+ const: rotary-encoder
+
+ gpios:
+ minItems: 2
+
+ linux,axis:
+ default: 0
+ description:
+ the input subsystem axis to map to this rotary encoder.
+ Defaults to 0 (ABS_X / REL_X)
+
+ rotary-encoder,steps:
+ $ref: /schemas/types.yaml#/definitions/uint32
+ default: 24
+ description:
+ Number of steps in a full turnaround of the
+ encoder. Only relevant for absolute axis. Defaults to 24 which is a
+ typical value for such devices.
+
+ rotary-encoder,relative-axis:
+ $ref: /schemas/types.yaml#/definitions/flag
+ description:
+ register a relative axis rather than an
+ absolute one. Relative axis will only generate +1/-1 events on the input
+ device, hence no steps need to be passed.
+
+ rotary-encoder,rollover:
+ $ref: /schemas/types.yaml#/definitions/int32
+ description:
+ Automatic rollover when the rotary value becomes
+ greater than the specified steps or smaller than 0. For absolute axis only.
+
+ rotary-encoder,steps-per-period:
+ $ref: /schemas/types.yaml#/definitions/uint32
+ default: 1
+ enum: [1, 2, 4]
+ description: |
+ Number of steps (stable states) per period.
+ The values have the following meaning:
+ 1: Full-period mode (default)
+ 2: Half-period mode
+ 4: Quarter-period mode
+
+ wakeup-source: true
+
+ rotary-encoder,encoding:
+ $ref: /schemas/types.yaml#/definitions/string
+ description: the method used to encode steps.
+ enum: [gray, binary]
+
+ rotary-encoder,half-period:
+ $ref: /schemas/types.yaml#/definitions/flag
+ deprecated: true
+ description:
+ Makes the driver work on half-period mode.
+ This property is deprecated. Instead, a 'steps-per-period ' value should
+ be used, such as "rotary-encoder,steps-per-period = <2>".
+
+required:
+ - compatible
+ - gpios
+
+additionalProperties: false
+
+examples:
+ - |
+ rotary {
+ compatible = "rotary-encoder";
+ gpios = <&gpio 19 1>, <&gpio 20 0>; /* GPIO19 is inverted */
+ linux,axis = <0>; /* REL_X */
+ rotary-encoder,encoding = "gray";
+ rotary-encoder,relative-axis;
+ };
+
diff --git a/Documentation/devicetree/bindings/input/touchscreen/ad7879.txt b/Documentation/devicetree/bindings/input/touchscreen/ad7879.txt
deleted file mode 100644
index afa38dc069f0..000000000000
--- a/Documentation/devicetree/bindings/input/touchscreen/ad7879.txt
+++ /dev/null
@@ -1,71 +0,0 @@
-* Analog Devices AD7879(-1)/AD7889(-1) touchscreen interface (SPI/I2C)
-
-Required properties:
-- compatible : for SPI slave, use "adi,ad7879"
- for I2C slave, use "adi,ad7879-1"
-- reg : SPI chipselect/I2C slave address
- See spi-bus.txt for more SPI slave properties
-- interrupts : touch controller interrupt
-- touchscreen-max-pressure : maximum reported pressure
-- adi,resistance-plate-x : total resistance of X-plate (for pressure
- calculation)
-Optional properties:
-- touchscreen-swapped-x-y : X and Y axis are swapped (boolean)
-- adi,first-conversion-delay : 0-12: In 128us steps (starting with 128us)
- 13 : 2.560ms
- 14 : 3.584ms
- 15 : 4.096ms
- This property has to be a '/bits/ 8' value
-- adi,acquisition-time : 0: 2us
- 1: 4us
- 2: 8us
- 3: 16us
- This property has to be a '/bits/ 8' value
-- adi,median-filter-size : 0: disabled
- 1: 4 measurements
- 2: 8 measurements
- 3: 16 measurements
- This property has to be a '/bits/ 8' value
-- adi,averaging : 0: 2 middle values (1 if median disabled)
- 1: 4 middle values
- 2: 8 middle values
- 3: 16 values
- This property has to be a '/bits/ 8' value
-- adi,conversion-interval: : 0 : convert one time only
- 1-255: 515us + val * 35us (up to 9.440ms)
- This property has to be a '/bits/ 8' value
-- gpio-controller : Switch AUX/VBAT/GPIO pin to GPIO mode
-
-Example:
-
- touchscreen0@2c {
- compatible = "adi,ad7879-1";
- reg = <0x2c>;
- interrupt-parent = <&gpio1>;
- interrupts = <13 IRQ_TYPE_EDGE_FALLING>;
- touchscreen-max-pressure = <4096>;
- adi,resistance-plate-x = <120>;
- adi,first-conversion-delay = /bits/ 8 <3>;
- adi,acquisition-time = /bits/ 8 <1>;
- adi,median-filter-size = /bits/ 8 <2>;
- adi,averaging = /bits/ 8 <1>;
- adi,conversion-interval = /bits/ 8 <255>;
- };
-
- touchscreen1@1 {
- compatible = "adi,ad7879";
- spi-max-frequency = <5000000>;
- reg = <1>;
- spi-cpol;
- spi-cpha;
- gpio-controller;
- interrupt-parent = <&gpio1>;
- interrupts = <13 IRQ_TYPE_EDGE_FALLING>;
- touchscreen-max-pressure = <4096>;
- adi,resistance-plate-x = <120>;
- adi,first-conversion-delay = /bits/ 8 <3>;
- adi,acquisition-time = /bits/ 8 <1>;
- adi,median-filter-size = /bits/ 8 <2>;
- adi,averaging = /bits/ 8 <1>;
- adi,conversion-interval = /bits/ 8 <255>;
- };
diff --git a/Documentation/devicetree/bindings/input/touchscreen/adi,ad7879.yaml b/Documentation/devicetree/bindings/input/touchscreen/adi,ad7879.yaml
new file mode 100644
index 000000000000..caa5fa3cc3f1
--- /dev/null
+++ b/Documentation/devicetree/bindings/input/touchscreen/adi,ad7879.yaml
@@ -0,0 +1,150 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/input/touchscreen/adi,ad7879.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Analog Devices AD7879(-1)/AD7889(-1) touchscreen interface (SPI/I2C)
+
+maintainers:
+ - Frank Li <Frank.Li@nxp.com>
+
+properties:
+ compatible:
+ description: |
+ for SPI slave, use "adi,ad7879"
+ for I2C slave, use "adi,ad7879-1"
+ enum:
+ - adi,ad7879
+ - adi,ad7879-1
+
+ reg:
+ maxItems: 1
+
+ interrupts:
+ maxItems: 1
+
+ touchscreen-max-pressure:
+ $ref: /schemas/types.yaml#/definitions/uint32
+ description: maximum reported pressure
+
+ adi,resistance-plate-x:
+ $ref: /schemas/types.yaml#/definitions/uint32
+ description: total resistance of X-plate (for pressure calculation)
+
+ touchscreen-swapped-x-y:
+ $ref: /schemas/types.yaml#/definitions/flag
+ description: X and Y axis are swapped (boolean)
+
+ adi,first-conversion-delay:
+ $ref: /schemas/types.yaml#/definitions/uint8
+ default: 0
+ minimum: 0
+ maximum: 15
+ description: |
+ 0-12: In 128us steps (starting with 128us)
+ 13 : 2.560ms
+ 14 : 3.584ms
+ 15 : 4.096ms
+ This property has to be a '/bits/ 8' value
+
+ adi,acquisition-time:
+ $ref: /schemas/types.yaml#/definitions/uint8
+ default: 0
+ enum: [0, 1, 2, 3]
+ description: |
+ 0: 2us
+ 1: 4us
+ 2: 8us
+ 3: 16us
+ This property has to be a '/bits/ 8' value
+
+ adi,median-filter-size:
+ $ref: /schemas/types.yaml#/definitions/uint8
+ default: 0
+ enum: [0, 1, 2, 3]
+ description: |
+ 0: disabled
+ 1: 4 measurements
+ 2: 8 measurements
+ 3: 16 measurements
+ This property has to be a '/bits/ 8' value
+
+ adi,averaging:
+ $ref: /schemas/types.yaml#/definitions/uint8
+ default: 0
+ enum: [0, 1, 2, 3]
+ description: |
+ 0: 2 middle values (1 if median disabled)
+ 1: 4 middle values
+ 2: 8 middle values
+ 3: 16 values
+ This property has to be a '/bits/ 8' value
+
+ adi,conversion-interval:
+ $ref: /schemas/types.yaml#/definitions/uint8
+ default: 0
+ description: |
+ 0 : convert one time only
+ 1-255: 515us + val * 35us (up to 9.440ms)
+ This property has to be a '/bits/ 8' value
+
+ gpio-controller: true
+
+ "#gpio-cells":
+ const: 1
+
+required:
+ - compatible
+ - reg
+
+allOf:
+ - $ref: /schemas/spi/spi-peripheral-props.yaml
+
+unevaluatedProperties: false
+
+examples:
+ - |
+ #include <dt-bindings/interrupt-controller/irq.h>
+ i2c {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ touchscreen0@2c {
+ compatible = "adi,ad7879-1";
+ reg = <0x2c>;
+ interrupt-parent = <&gpio1>;
+ interrupts = <13 IRQ_TYPE_EDGE_FALLING>;
+ touchscreen-max-pressure = <4096>;
+ adi,resistance-plate-x = <120>;
+ adi,first-conversion-delay = /bits/ 8 <3>;
+ adi,acquisition-time = /bits/ 8 <1>;
+ adi,median-filter-size = /bits/ 8 <2>;
+ adi,averaging = /bits/ 8 <1>;
+ adi,conversion-interval = /bits/ 8 <255>;
+ };
+ };
+
+ - |
+ #include <dt-bindings/interrupt-controller/irq.h>
+ spi {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ touchscreen1@1 {
+ compatible = "adi,ad7879";
+ reg = <1>;
+ spi-max-frequency = <5000000>;
+ gpio-controller;
+ #gpio-cells = <1>;
+ interrupt-parent = <&gpio1>;
+ interrupts = <13 IRQ_TYPE_EDGE_FALLING>;
+ touchscreen-max-pressure = <4096>;
+ adi,resistance-plate-x = <120>;
+ adi,first-conversion-delay = /bits/ 8 <3>;
+ adi,acquisition-time = /bits/ 8 <1>;
+ adi,median-filter-size = /bits/ 8 <2>;
+ adi,averaging = /bits/ 8 <1>;
+ adi,conversion-interval = /bits/ 8 <255>;
+ };
+ };
diff --git a/Documentation/devicetree/bindings/input/touchscreen/ads7846.txt b/Documentation/devicetree/bindings/input/touchscreen/ads7846.txt
deleted file mode 100644
index 399c87782935..000000000000
--- a/Documentation/devicetree/bindings/input/touchscreen/ads7846.txt
+++ /dev/null
@@ -1,107 +0,0 @@
-Device tree bindings for TI's ADS7843, ADS7845, ADS7846, ADS7873, TSC2046
-SPI driven touch screen controllers.
-
-The node for this driver must be a child node of a SPI controller, hence
-all mandatory properties described in
-
- Documentation/devicetree/bindings/spi/spi-bus.txt
-
-must be specified.
-
-Additional required properties:
-
- compatible Must be one of the following, depending on the
- model:
- "ti,tsc2046"
- "ti,ads7843"
- "ti,ads7845"
- "ti,ads7846"
- "ti,ads7873"
-
- interrupts An interrupt node describing the IRQ line the chip's
- !PENIRQ pin is connected to.
- vcc-supply A regulator node for the supply voltage.
-
-
-Optional properties:
-
- ti,vref-delay-usecs vref supply delay in usecs, 0 for
- external vref (u16).
- ti,vref-mv The VREF voltage, in millivolts (u16).
- Set to 0 to use internal references
- (ADS7846).
- ti,keep-vref-on set to keep vref on for differential
- measurements as well
- ti,settle-delay-usec Settling time of the analog signals;
- a function of Vcc and the capacitance
- on the X/Y drivers. If set to non-zero,
- two samples are taken with settle_delay
- us apart, and the second one is used.
- ~150 uSec with 0.01uF caps (u16).
- ti,penirq-recheck-delay-usecs If set to non-zero, after samples are
- taken this delay is applied and penirq
- is rechecked, to help avoid false
- events. This value is affected by the
- material used to build the touch layer
- (u16).
- ti,x-plate-ohms Resistance of the X-plate,
- in Ohms (u16).
- ti,y-plate-ohms Resistance of the Y-plate,
- in Ohms (u16).
- ti,x-min Minimum value on the X axis (u16).
- ti,y-min Minimum value on the Y axis (u16).
- ti,debounce-tol Tolerance used for filtering (u16).
- ti,debounce-rep Additional consecutive good readings
- required after the first two (u16).
- ti,pendown-gpio-debounce Platform specific debounce time for the
- pendown-gpio (u32).
- pendown-gpio GPIO handle describing the pin the !PENIRQ
- line is connected to.
- ti,hsync-gpios GPIO line to poll for hsync
- wakeup-source use any event on touchscreen as wakeup event.
- (Legacy property support: "linux,wakeup")
- touchscreen-size-x General touchscreen binding, see [1].
- touchscreen-size-y General touchscreen binding, see [1].
- touchscreen-max-pressure General touchscreen binding, see [1].
- touchscreen-min-pressure General touchscreen binding, see [1].
- touchscreen-average-samples General touchscreen binding, see [1].
- touchscreen-inverted-x General touchscreen binding, see [1].
- touchscreen-inverted-y General touchscreen binding, see [1].
- touchscreen-swapped-x-y General touchscreen binding, see [1].
-
-[1] All general touchscreen properties are described in
- Documentation/devicetree/bindings/input/touchscreen/touchscreen.txt.
-
-Deprecated properties:
-
- ti,swap-xy swap x and y axis
- ti,x-max Maximum value on the X axis (u16).
- ti,y-max Maximum value on the Y axis (u16).
- ti,pressure-min Minimum reported pressure value
- (threshold) - u16.
- ti,pressure-max Maximum reported pressure value (u16).
- ti,debounce-max Max number of additional readings per
- sample (u16).
-
-Example for a TSC2046 chip connected to an McSPI controller of an OMAP SoC::
-
- spi_controller {
- tsc2046@0 {
- reg = <0>; /* CS0 */
- compatible = "ti,tsc2046";
- interrupt-parent = <&gpio1>;
- interrupts = <8 0>; /* BOOT6 / GPIO 8 */
- spi-max-frequency = <1000000>;
- pendown-gpio = <&gpio1 8 0>;
- vcc-supply = <&reg_vcc3>;
-
- ti,x-min = /bits/ 16 <0>;
- ti,x-max = /bits/ 16 <8000>;
- ti,y-min = /bits/ 16 <0>;
- ti,y-max = /bits/ 16 <4800>;
- ti,x-plate-ohms = /bits/ 16 <40>;
- ti,pressure-max = /bits/ 16 <255>;
-
- wakeup-source;
- };
- };
diff --git a/Documentation/devicetree/bindings/input/touchscreen/azoteq,iqs7211.yaml b/Documentation/devicetree/bindings/input/touchscreen/azoteq,iqs7211.yaml
index 8cf371b99f19..e4dbbafb3779 100644
--- a/Documentation/devicetree/bindings/input/touchscreen/azoteq,iqs7211.yaml
+++ b/Documentation/devicetree/bindings/input/touchscreen/azoteq,iqs7211.yaml
@@ -666,7 +666,7 @@ examples:
#address-cells = <1>;
#size-cells = <0>;
- touch@56 {
+ touchscreen@56 {
compatible = "azoteq,iqs7210a";
reg = <0x56>;
irq-gpios = <&gpio 4 GPIO_ACTIVE_LOW>;
@@ -704,7 +704,7 @@ examples:
#address-cells = <1>;
#size-cells = <0>;
- touch@56 {
+ touchscreen@56 {
compatible = "azoteq,iqs7211e";
reg = <0x56>;
irq-gpios = <&gpio 4 (GPIO_ACTIVE_LOW |
diff --git a/Documentation/devicetree/bindings/input/touchscreen/colibri-vf50-ts.txt b/Documentation/devicetree/bindings/input/touchscreen/colibri-vf50-ts.txt
deleted file mode 100644
index ca304357c374..000000000000
--- a/Documentation/devicetree/bindings/input/touchscreen/colibri-vf50-ts.txt
+++ /dev/null
@@ -1,34 +0,0 @@
-* Toradex Colibri VF50 Touchscreen driver
-
-Required Properties:
-- compatible must be toradex,vf50-touchscreen
-- io-channels: adc channels being used by the Colibri VF50 module
- IIO ADC for Y-, X-, Y+, X+ connections
-- xp-gpios: FET gate driver for input of X+
-- xm-gpios: FET gate driver for input of X-
-- yp-gpios: FET gate driver for input of Y+
-- ym-gpios: FET gate driver for input of Y-
-- interrupts: pen irq interrupt for touch detection, signal from X plate
-- pinctrl-names: "idle", "default"
-- pinctrl-0: pinctrl node for pen/touch detection, pinctrl must provide
- pull-up resistor on X+, X-.
-- pinctrl-1: pinctrl node for X/Y and pressure measurement (ADC) state pinmux
-- vf50-ts-min-pressure: pressure level at which to stop measuring X/Y values
-
-Example:
-
- touchctrl: vf50_touchctrl {
- compatible = "toradex,vf50-touchscreen";
- io-channels = <&adc1 0>,<&adc0 0>,
- <&adc0 1>,<&adc1 2>;
- xp-gpios = <&gpio0 13 GPIO_ACTIVE_LOW>;
- xm-gpios = <&gpio2 29 GPIO_ACTIVE_HIGH>;
- yp-gpios = <&gpio0 12 GPIO_ACTIVE_LOW>;
- ym-gpios = <&gpio0 4 GPIO_ACTIVE_HIGH>;
- interrupt-parent = <&gpio0>;
- interrupts = <8 IRQ_TYPE_LEVEL_LOW>;
- pinctrl-names = "idle","default";
- pinctrl-0 = <&pinctrl_touchctrl_idle>, <&pinctrl_touchctrl_gpios>;
- pinctrl-1 = <&pinctrl_touchctrl_default>, <&pinctrl_touchctrl_gpios>;
- vf50-ts-min-pressure = <200>;
- };
diff --git a/Documentation/devicetree/bindings/input/touchscreen/edt-ft5x06.yaml b/Documentation/devicetree/bindings/input/touchscreen/edt-ft5x06.yaml
index 51d48d4130d3..70a922e213f2 100644
--- a/Documentation/devicetree/bindings/input/touchscreen/edt-ft5x06.yaml
+++ b/Documentation/devicetree/bindings/input/touchscreen/edt-ft5x06.yaml
@@ -126,7 +126,7 @@ examples:
i2c {
#address-cells = <1>;
#size-cells = <0>;
- edt-ft5x06@38 {
+ touchscreen@38 {
compatible = "edt,edt-ft5406";
reg = <0x38>;
interrupt-parent = <&gpio2>;
diff --git a/Documentation/devicetree/bindings/input/touchscreen/goodix.yaml b/Documentation/devicetree/bindings/input/touchscreen/goodix.yaml
index 2a2d86cfd104..eb4992f708b7 100644
--- a/Documentation/devicetree/bindings/input/touchscreen/goodix.yaml
+++ b/Documentation/devicetree/bindings/input/touchscreen/goodix.yaml
@@ -69,7 +69,7 @@ examples:
i2c {
#address-cells = <1>;
#size-cells = <0>;
- gt928@5d {
+ touchscreen@5d {
compatible = "goodix,gt928";
reg = <0x5d>;
interrupt-parent = <&gpio>;
diff --git a/Documentation/devicetree/bindings/input/touchscreen/ti,ads7843.yaml b/Documentation/devicetree/bindings/input/touchscreen/ti,ads7843.yaml
new file mode 100644
index 000000000000..604921733d2c
--- /dev/null
+++ b/Documentation/devicetree/bindings/input/touchscreen/ti,ads7843.yaml
@@ -0,0 +1,183 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/input/touchscreen/ti,ads7843.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: TI's SPI driven touch screen controllers
+
+maintainers:
+ - Alexander Stein <alexander.stein@ew.tq-group.com>
+ - Dmitry Torokhov <dmitry.torokhov@gmail.com>
+ - Marek Vasut <marex@denx.de>
+
+description:
+ TI's ADS7843, ADS7845, ADS7846, ADS7873, TSC2046 SPI driven touch screen
+ controllers.
+
+properties:
+ compatible:
+ enum:
+ - ti,ads7843
+ - ti,ads7845
+ - ti,ads7846
+ - ti,ads7873
+ - ti,tsc2046
+
+ interrupts:
+ maxItems: 1
+
+ pendown-gpio:
+ maxItems: 1
+ description:
+ GPIO handle describing the pin the !PENIRQ line is connected to.
+
+ vcc-supply:
+ description:
+ A regulator node for the supply voltage.
+
+ wakeup-source: true
+
+ ti,debounce-max:
+ deprecated: true
+ $ref: /schemas/types.yaml#/definitions/uint16
+ description:
+ Max number of additional readings per sample.
+
+ ti,debounce-rep:
+ $ref: /schemas/types.yaml#/definitions/uint16
+ description:
+ Additional consecutive good readings required after the first two.
+
+ ti,debounce-tol:
+ $ref: /schemas/types.yaml#/definitions/uint16
+ description:
+ Tolerance used for filtering.
+
+ ti,hsync-gpios:
+ maxItems: 1
+ description:
+ GPIO line to poll for hsync.
+
+ ti,keep-vref-on:
+ $ref: /schemas/types.yaml#/definitions/flag
+ description:
+ Set to keep Vref on for differential measurements as well.
+
+ ti,pendown-gpio-debounce:
+ $ref: /schemas/types.yaml#/definitions/uint32
+ description:
+ Platform specific debounce time for the pendown-gpio.
+
+ ti,penirq-recheck-delay-usecs:
+ $ref: /schemas/types.yaml#/definitions/uint16
+ description:
+ If set to non-zero, after samples are taken this delay is applied and
+ penirq is rechecked, to help avoid false events. This value is
+ affected by the material used to build the touch layer.
+
+ ti,pressure-max:
+ deprecated: true
+ $ref: /schemas/types.yaml#/definitions/uint16
+ description:
+ Maximum reported pressure value.
+
+ ti,pressure-min:
+ deprecated: true
+ $ref: /schemas/types.yaml#/definitions/uint16
+ description:
+ Minimum reported pressure value (threshold).
+
+ ti,settle-delay-usec:
+ $ref: /schemas/types.yaml#/definitions/uint16
+ description:
+ Settling time of the analog signals; a function of Vcc and the
+ capacitance on the X/Y drivers. If set to non-zero, two samples are
+ taken with settle_delay us apart, and the second one is used. ~150
+ uSec with 0.01uF caps.
+
+ ti,swap-xy:
+ deprecated: true
+ $ref: /schemas/types.yaml#/definitions/flag
+ description:
+ Swap x and y axis.
+
+ ti,vref-delay-usecs:
+ $ref: /schemas/types.yaml#/definitions/uint16
+ description:
+ Vref supply delay in usecs, 0 for external Vref.
+
+ ti,vref-mv:
+ $ref: /schemas/types.yaml#/definitions/uint16
+ description:
+ The VREF voltage, in millivolts.
+ Set to 0 to use internal references (ADS7846).
+
+ ti,x-plate-ohms:
+ $ref: /schemas/types.yaml#/definitions/uint16
+ description:
+ Resistance of the X-plate, in Ohms.
+
+ ti,x-max:
+ deprecated: true
+ $ref: /schemas/types.yaml#/definitions/uint16
+ description:
+ Maximum value on the X axis.
+
+ ti,x-min:
+ deprecated: true
+ $ref: /schemas/types.yaml#/definitions/uint16
+ description:
+ Minimum value on the X axis.
+
+ ti,y-plate-ohms:
+ $ref: /schemas/types.yaml#/definitions/uint16
+ description:
+ Resistance of the Y-plate, in Ohms.
+
+ ti,y-max:
+ deprecated: true
+ $ref: /schemas/types.yaml#/definitions/uint16
+ description:
+ Maximum value on the Y axis.
+
+ ti,y-min:
+ deprecated: true
+ $ref: /schemas/types.yaml#/definitions/uint16
+ description:
+ Minimum value on the Y axis.
+
+required:
+ - compatible
+ - reg
+
+allOf:
+ - $ref: touchscreen.yaml#
+ - $ref: /schemas/spi/spi-peripheral-props.yaml#
+
+unevaluatedProperties: false
+
+examples:
+ - |
+ spi{
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ touchscreen@0 {
+ compatible = "ti,tsc2046";
+ reg = <0>; /* CS0 */
+ interrupt-parent = <&gpio1>;
+ interrupts = <8 0>; /* BOOT6 / GPIO 8 */
+ pendown-gpio = <&gpio1 8 0>;
+ spi-max-frequency = <1000000>;
+ vcc-supply = <&reg_vcc3>;
+ wakeup-source;
+
+ ti,pressure-max = /bits/ 16 <255>;
+ ti,x-max = /bits/ 16 <8000>;
+ ti,x-min = /bits/ 16 <0>;
+ ti,x-plate-ohms = /bits/ 16 <40>;
+ ti,y-max = /bits/ 16 <4800>;
+ ti,y-min = /bits/ 16 <0>;
+ };
+ };
diff --git a/Documentation/devicetree/bindings/input/touchscreen/toradex,vf50-touchscreen.yaml b/Documentation/devicetree/bindings/input/touchscreen/toradex,vf50-touchscreen.yaml
new file mode 100644
index 000000000000..5094c5183c74
--- /dev/null
+++ b/Documentation/devicetree/bindings/input/touchscreen/toradex,vf50-touchscreen.yaml
@@ -0,0 +1,77 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/input/touchscreen/toradex,vf50-touchscreen.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Toradex Colibri VF50 Touchscreen
+
+maintainers:
+ - Dmitry Torokhov <dmitry.torokhov@gmail.com>
+ - Sanchayan Maity <maitysanchayan@gmail.com>
+
+properties:
+ compatible:
+ const: toradex,vf50-touchscreen
+
+ interrupts:
+ maxItems: 1
+
+ io-channels:
+ maxItems: 4
+ description:
+ adc channels being used by the Colibri VF50 module
+ IIO ADC for Y-, X-, Y+, X+ connections
+
+ xp-gpios:
+ description: FET gate driver for input of X+
+
+ xm-gpios:
+ description: FET gate driver for input of X-
+
+ yp-gpios:
+ description: FET gate driver for input of Y+
+
+ ym-gpios:
+ description: FET gate driver for input of Y-
+
+ vf50-ts-min-pressure:
+ $ref: /schemas/types.yaml#/definitions/uint32
+ minimum: 50
+ maximum: 2000
+ description: pressure level at which to stop measuring X/Y values
+
+required:
+ - compatible
+ - io-channels
+ - xp-gpios
+ - xm-gpios
+ - yp-gpios
+ - ym-gpios
+ - interrupts
+ - vf50-ts-min-pressure
+
+allOf:
+ - $ref: touchscreen.yaml#
+
+unevaluatedProperties: false
+
+examples:
+ - |
+ #include <dt-bindings/interrupt-controller/irq.h>
+ #include <dt-bindings/gpio/gpio.h>
+
+ touchscreen {
+ compatible = "toradex,vf50-touchscreen";
+ interrupt-parent = <&gpio0>;
+ interrupts = <8 IRQ_TYPE_LEVEL_LOW>;
+ io-channels = <&adc1 0>, <&adc0 0>, <&adc0 1>, <&adc1 2>;
+ xp-gpios = <&gpio0 13 GPIO_ACTIVE_LOW>;
+ xm-gpios = <&gpio2 29 GPIO_ACTIVE_HIGH>;
+ yp-gpios = <&gpio0 12 GPIO_ACTIVE_LOW>;
+ ym-gpios = <&gpio0 4 GPIO_ACTIVE_HIGH>;
+ pinctrl-names = "idle", "default";
+ pinctrl-0 = <&pinctrl_touchctrl_idle>, <&pinctrl_touchctrl_gpios>;
+ pinctrl-1 = <&pinctrl_touchctrl_default>, <&pinctrl_touchctrl_gpios>;
+ vf50-ts-min-pressure = <200>;
+ };
diff --git a/Documentation/devicetree/bindings/input/touchscreen/zinitix,bt400.yaml b/Documentation/devicetree/bindings/input/touchscreen/zinitix,bt400.yaml
index b1507463a03e..3f663ce3e44e 100644
--- a/Documentation/devicetree/bindings/input/touchscreen/zinitix,bt400.yaml
+++ b/Documentation/devicetree/bindings/input/touchscreen/zinitix,bt400.yaml
@@ -16,6 +16,7 @@ maintainers:
allOf:
- $ref: touchscreen.yaml#
+ - $ref: ../input.yaml#
properties:
$nodename:
@@ -79,6 +80,15 @@ properties:
$ref: /schemas/types.yaml#/definitions/uint32
enum: [1, 2]
+ linux,keycodes:
+ description:
+ This property specifies an array of keycodes assigned to the
+ touch-keys that can be present in some touchscreen configurations.
+ If the touch-keys are enabled, controller firmware will assign some
+ touch sense lines to those keys.
+ minItems: 1
+ maxItems: 8
+
touchscreen-size-x: true
touchscreen-size-y: true
touchscreen-fuzz-x: true
diff --git a/Documentation/devicetree/bindings/interconnect/qcom,msm8939.yaml b/Documentation/devicetree/bindings/interconnect/qcom,msm8939.yaml
index fd15ab5014fb..4b08be72bbd7 100644
--- a/Documentation/devicetree/bindings/interconnect/qcom,msm8939.yaml
+++ b/Documentation/devicetree/bindings/interconnect/qcom,msm8939.yaml
@@ -4,14 +4,14 @@
$id: http://devicetree.org/schemas/interconnect/qcom,msm8939.yaml#
$schema: http://devicetree.org/meta-schemas/core.yaml#
-title: Qualcomm MSM8939 Network-On-Chip interconnect
+title: Qualcomm MSM8937/MSM8939/MSM8976 Network-On-Chip interconnect
maintainers:
- Konrad Dybcio <konradybcio@kernel.org>
-description: |
- The Qualcomm MSM8939 interconnect providers support adjusting the
- bandwidth requirements between the various NoC fabrics.
+description:
+ The Qualcomm MSM8937/MSM8939/MSM8976 interconnect providers support
+ adjusting the bandwidth requirements between the various NoC fabrics.
allOf:
- $ref: qcom,rpm-common.yaml#
@@ -19,9 +19,15 @@ allOf:
properties:
compatible:
enum:
+ - qcom,msm8937-bimc
+ - qcom,msm8937-pcnoc
+ - qcom,msm8937-snoc
- qcom,msm8939-bimc
- qcom,msm8939-pcnoc
- qcom,msm8939-snoc
+ - qcom,msm8976-bimc
+ - qcom,msm8976-pcnoc
+ - qcom,msm8976-snoc
reg:
maxItems: 1
@@ -39,7 +45,10 @@ patternProperties:
properties:
compatible:
- const: qcom,msm8939-snoc-mm
+ enum:
+ - qcom,msm8937-snoc-mm
+ - qcom,msm8939-snoc-mm
+ - qcom,msm8976-snoc-mm
required:
- compatible
@@ -60,12 +69,6 @@ examples:
compatible = "qcom,msm8939-snoc";
reg = <0x00580000 0x14000>;
#interconnect-cells = <1>;
- };
-
- bimc: interconnect@400000 {
- compatible = "qcom,msm8939-bimc";
- reg = <0x00400000 0x62000>;
- #interconnect-cells = <1>;
snoc_mm: interconnect-snoc {
compatible = "qcom,msm8939-snoc-mm";
diff --git a/Documentation/devicetree/bindings/interconnect/qcom,msm8953.yaml b/Documentation/devicetree/bindings/interconnect/qcom,msm8953.yaml
index 732e9fa001a4..343ff62d7b65 100644
--- a/Documentation/devicetree/bindings/interconnect/qcom,msm8953.yaml
+++ b/Documentation/devicetree/bindings/interconnect/qcom,msm8953.yaml
@@ -13,8 +13,7 @@ description: |
The Qualcomm MSM8953 interconnect providers support adjusting the
bandwidth requirements between the various NoC fabrics.
- See also:
- - dt-bindings/interconnect/qcom,msm8953.h
+ See also: include/dt-bindings/interconnect/qcom,msm8953.h
properties:
compatible:
diff --git a/Documentation/devicetree/bindings/interconnect/qcom,msm8998-bwmon.yaml b/Documentation/devicetree/bindings/interconnect/qcom,msm8998-bwmon.yaml
index 2cd1f5590fd9..189f5900ee50 100644
--- a/Documentation/devicetree/bindings/interconnect/qcom,msm8998-bwmon.yaml
+++ b/Documentation/devicetree/bindings/interconnect/qcom,msm8998-bwmon.yaml
@@ -26,6 +26,7 @@ properties:
- items:
- enum:
- qcom,qcm2290-cpu-bwmon
+ - qcom,sa8775p-cpu-bwmon
- qcom,sc7180-cpu-bwmon
- qcom,sc7280-cpu-bwmon
- qcom,sc8280xp-cpu-bwmon
@@ -39,6 +40,7 @@ properties:
- const: qcom,sdm845-bwmon # BWMON v4, unified register space
- items:
- enum:
+ - qcom,sa8775p-llcc-bwmon
- qcom,sc7180-llcc-bwmon
- qcom,sc8280xp-llcc-bwmon
- qcom,sm6350-cpu-bwmon
diff --git a/Documentation/devicetree/bindings/interconnect/qcom,rpmh.yaml b/Documentation/devicetree/bindings/interconnect/qcom,rpmh.yaml
index 9318b845ec35..1b9164dc162f 100644
--- a/Documentation/devicetree/bindings/interconnect/qcom,rpmh.yaml
+++ b/Documentation/devicetree/bindings/interconnect/qcom,rpmh.yaml
@@ -71,7 +71,7 @@ properties:
- qcom,sdx65-system-noc
- qcom,sm8150-aggre1-noc
- qcom,sm8150-aggre2-noc
- - qcom,sm8150-camnoc-noc
+ - qcom,sm8150-camnoc-virt
- qcom,sm8150-compute-noc
- qcom,sm8150-config-noc
- qcom,sm8150-dc-noc
@@ -113,6 +113,9 @@ allOf:
properties:
compatible:
enum:
+ - qcom,sc8180x-camnoc-virt
+ - qcom,sc8180x-mc-virt
+ - qcom,sc8180x-qup-virt
- qcom,sdx65-mc-virt
- qcom,sm8250-qup-virt
then:
diff --git a/Documentation/devicetree/bindings/leds/common.yaml b/Documentation/devicetree/bindings/leds/common.yaml
index 8a3c2398b10c..bf9a101e4d42 100644
--- a/Documentation/devicetree/bindings/leds/common.yaml
+++ b/Documentation/devicetree/bindings/leds/common.yaml
@@ -113,6 +113,8 @@ properties:
# LED indicates NAND memory activity (deprecated),
# in new implementations use "mtd"
- nand-disk
+ # LED indicates network activity
+ - netdev
# No trigger assigned to the LED. This is the default mode
# if trigger is absent
- none
diff --git a/Documentation/devicetree/bindings/leds/leds-lm3692x.txt b/Documentation/devicetree/bindings/leds/leds-lm3692x.txt
deleted file mode 100644
index b1103d961d6c..000000000000
--- a/Documentation/devicetree/bindings/leds/leds-lm3692x.txt
+++ /dev/null
@@ -1,65 +0,0 @@
-* Texas Instruments - LM3692x Highly Efficient White LED Driver
-
-The LM3692x is an ultra-compact, highly efficient,
-white-LED driver designed for LCD display backlighting.
-
-The main difference between the LM36922 and LM36923 is the number of
-LED strings it supports. The LM36922 supports two strings while the LM36923
-supports three strings.
-
-Required properties:
- - compatible:
- "ti,lm36922"
- "ti,lm36923"
- - reg : I2C slave address
- - #address-cells : 1
- - #size-cells : 0
-
-Optional properties:
- - enable-gpios : gpio pin to enable/disable the device.
- - vled-supply : LED supply
- - ti,ovp-microvolt: Overvoltage protection in
- micro-volt, can be 17000000, 21000000, 25000000 or
- 29000000. If ti,ovp-microvolt is not specified it
- defaults to 29000000.
-
-Required child properties:
- - reg : 0 - Will enable all LED sync paths
- 1 - Will enable the LED1 sync
- 2 - Will enable the LED2 sync
- 3 - Will enable the LED3 sync (LM36923 only)
-
-Optional child properties:
- - function : see Documentation/devicetree/bindings/leds/common.txt
- - color : see Documentation/devicetree/bindings/leds/common.txt
- - label : see Documentation/devicetree/bindings/leds/common.txt (deprecated)
- - linux,default-trigger :
- see Documentation/devicetree/bindings/leds/common.txt
- - led-max-microamp :
- see Documentation/devicetree/bindings/leds/common.txt
-
-Example:
-
-#include <dt-bindings/leds/common.h>
-
-led-controller@36 {
- compatible = "ti,lm3692x";
- reg = <0x36>;
- #address-cells = <1>;
- #size-cells = <0>;
-
- enable-gpios = <&gpio1 28 GPIO_ACTIVE_HIGH>;
- vled-supply = <&vbatt>;
- ti,ovp-microvolt = <29000000>;
-
- led@0 {
- reg = <0>;
- function = LED_FUNCTION_BACKLIGHT;
- color = <LED_COLOR_ID_WHITE>;
- linux,default-trigger = "backlight";
- led-max-microamp = <20000>;
- };
-}
-
-For more product information please see the link below:
-https://www.ti.com/lit/ds/snvsa29/snvsa29.pdf
diff --git a/Documentation/devicetree/bindings/leds/leds-sc27xx-bltc.txt b/Documentation/devicetree/bindings/leds/leds-sc27xx-bltc.txt
deleted file mode 100644
index df2b4e1c492b..000000000000
--- a/Documentation/devicetree/bindings/leds/leds-sc27xx-bltc.txt
+++ /dev/null
@@ -1,43 +0,0 @@
-LEDs connected to Spreadtrum SC27XX PMIC breathing light controller
-
-The SC27xx breathing light controller supports to 3 outputs:
-red LED, green LED and blue LED. Each LED can work at normal
-PWM mode or breath light mode.
-
-Required properties:
-- compatible: Should be "sprd,sc2731-bltc".
-- #address-cells: Must be 1.
-- #size-cells: Must be 0.
-- reg: Specify the controller address.
-
-Required child properties:
-- reg: Port this LED is connected to.
-
-Optional child properties:
-- function: See Documentation/devicetree/bindings/leds/common.txt.
-- color: See Documentation/devicetree/bindings/leds/common.txt.
-- label: See Documentation/devicetree/bindings/leds/common.txt (deprecated).
-
-Examples:
-
-led-controller@200 {
- compatible = "sprd,sc2731-bltc";
- #address-cells = <1>;
- #size-cells = <0>;
- reg = <0x200>;
-
- led@0 {
- color = <LED_COLOR_ID_RED>;
- reg = <0x0>;
- };
-
- led@1 {
- color = <LED_COLOR_ID_GREEN>;
- reg = <0x1>;
- };
-
- led@2 {
- color = <LED_COLOR_ID_BLUE>;
- reg = <0x2>;
- };
-};
diff --git a/Documentation/devicetree/bindings/leds/nxp,pca995x.yaml b/Documentation/devicetree/bindings/leds/nxp,pca995x.yaml
index 654915c1f687..ab8c90cbadb5 100644
--- a/Documentation/devicetree/bindings/leds/nxp,pca995x.yaml
+++ b/Documentation/devicetree/bindings/leds/nxp,pca995x.yaml
@@ -11,19 +11,21 @@ maintainers:
- Marek Vasut <marex@denx.de>
description:
- The NXP PCA9952/PCA9955B are programmable LED controllers connected via I2C
- that can drive 16 separate lines. Each of them can be individually switched
+ The NXP PCA995x family are programmable LED controllers connected via I2C
+ that can drive separate lines. Each of them can be individually switched
on and off, and brightness can be controlled via individual PWM.
Datasheets are available at
https://www.nxp.com/docs/en/data-sheet/PCA9952_PCA9955.pdf
https://www.nxp.com/docs/en/data-sheet/PCA9955B.pdf
+ https://www.nxp.com/docs/en/data-sheet/PCA9956B.pdf
properties:
compatible:
enum:
- nxp,pca9952
- nxp,pca9955b
+ - nxp,pca9956b
reg:
maxItems: 1
diff --git a/Documentation/devicetree/bindings/leds/sprd,sc2731-bltc.yaml b/Documentation/devicetree/bindings/leds/sprd,sc2731-bltc.yaml
new file mode 100644
index 000000000000..5853410c7a45
--- /dev/null
+++ b/Documentation/devicetree/bindings/leds/sprd,sc2731-bltc.yaml
@@ -0,0 +1,84 @@
+# SPDX-License-Identifier: GPL-2.0-only OR BSD-2-Clause
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/leds/sprd,sc2731-bltc.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Spreadtrum SC2731 PMIC breathing light controller
+
+maintainers:
+ - Orson Zhai <orsonzhai@gmail.com>
+ - Baolin Wang <baolin.wang7@gmail.com>
+ - Chunyan Zhang <zhang.lyra@gmail.com>
+
+description: |
+ The SC2731 breathing light controller supports up to 3 outputs:
+ red LED, green LED and blue LED. Each LED can work at normal PWM mode
+ or breath light mode.
+
+properties:
+ compatible:
+ const: sprd,sc2731-bltc
+
+ reg:
+ maxItems: 1
+
+ '#address-cells':
+ const: 1
+
+ '#size-cells':
+ const: 0
+
+patternProperties:
+ "^led@[0-2]$":
+ type: object
+ $ref: common.yaml#
+ unevaluatedProperties: false
+
+ properties:
+ reg:
+ minimum: 0
+ maximum: 2
+
+ required:
+ - reg
+
+required:
+ - compatible
+ - reg
+ - '#address-cells'
+ - '#size-cells'
+
+additionalProperties: false
+
+examples:
+ - |
+ #include <dt-bindings/leds/common.h>
+
+ pmic {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ led-controller@200 {
+ compatible = "sprd,sc2731-bltc";
+ reg = <0x200>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ led@0 {
+ reg = <0x0>;
+ color = <LED_COLOR_ID_RED>;
+ };
+
+ led@1 {
+ reg = <0x1>;
+ color = <LED_COLOR_ID_GREEN>;
+ };
+
+ led@2 {
+ reg = <0x2>;
+ color = <LED_COLOR_ID_BLUE>;
+ };
+ };
+ };
+...
diff --git a/Documentation/devicetree/bindings/leds/ti.lm36922.yaml b/Documentation/devicetree/bindings/leds/ti.lm36922.yaml
new file mode 100644
index 000000000000..8ffbc6b785a3
--- /dev/null
+++ b/Documentation/devicetree/bindings/leds/ti.lm36922.yaml
@@ -0,0 +1,110 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/leds/ti.lm36922.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Texas Instruments - LM3692x Highly Efficient White LED Driver
+
+maintainers:
+ - Dan Murphy <dmurphy@ti.com>
+
+description: |
+ The LM3692x is an ultra-compact, highly efficient,
+ white-LED driver designed for LCD display backlighting.
+
+ The main difference between the LM36922 and LM36923 is the number of
+ LED strings it supports. The LM36922 supports two strings while the LM36923
+ supports three strings.
+
+ For more product information please see the link below:
+ https://www.ti.com/lit/ds/snvsa29/snvsa29.pdf
+
+properties:
+ compatible:
+ enum:
+ - ti,lm36922
+ - ti,lm36923
+
+ reg:
+ maxItems: 1
+
+ "#address-cells":
+ const: 1
+
+ "#size-cells":
+ const: 0
+
+ enable-gpios:
+ description: gpio pin to enable/disable the device.
+
+ vled-supply:
+ description: LED supply
+
+ ti,ovp-microvolt:
+ description: Overvoltage protection.
+ default: 29000000
+ enum: [17000000, 21000000, 25000000, 29000000]
+
+patternProperties:
+ '^led@[0-3]$':
+ type: object
+ $ref: common.yaml
+ properties:
+ reg:
+ enum: [0, 1, 2, 3]
+ description: |
+ 0 - Will enable all LED sync paths
+ 1 - Will enable the LED1 sync
+ 2 - Will enable the LED2 sync
+ 3 - Will enable the LED3 sync (LM36923 only)
+
+ unevaluatedProperties: false
+
+required:
+ - compatible
+ - reg
+ - "#address-cells"
+ - "#size-cells"
+
+allOf:
+ - if:
+ properties:
+ compatible:
+ contains:
+ const: ti,lm36922
+ then:
+ properties:
+ led@3: false
+
+additionalProperties: false
+
+examples:
+ - |
+ #include <dt-bindings/gpio/gpio.h>
+ #include <dt-bindings/leds/common.h>
+
+ i2c {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ led-controller@36 {
+ compatible = "ti,lm36922";
+ reg = <0x36>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ enable-gpios = <&gpio1 28 GPIO_ACTIVE_HIGH>;
+ vled-supply = <&vbatt>;
+ ti,ovp-microvolt = <29000000>;
+
+ led@0 {
+ reg = <0>;
+ function = LED_FUNCTION_BACKLIGHT;
+ color = <LED_COLOR_ID_WHITE>;
+ linux,default-trigger = "backlight";
+ led-max-microamp = <20000>;
+ };
+ };
+ };
+
diff --git a/Documentation/devicetree/bindings/mailbox/mtk,adsp-mbox.yaml b/Documentation/devicetree/bindings/mailbox/mtk,adsp-mbox.yaml
index 72c1d9e82c89..8a1369df4ecb 100644
--- a/Documentation/devicetree/bindings/mailbox/mtk,adsp-mbox.yaml
+++ b/Documentation/devicetree/bindings/mailbox/mtk,adsp-mbox.yaml
@@ -17,9 +17,15 @@ description: |
properties:
compatible:
- enum:
- - mediatek,mt8195-adsp-mbox
- - mediatek,mt8186-adsp-mbox
+ oneOf:
+ - enum:
+ - mediatek,mt8186-adsp-mbox
+ - mediatek,mt8195-adsp-mbox
+ - items:
+ - enum:
+ - mediatek,mt8188-adsp-mbox
+ - const: mediatek,mt8186-adsp-mbox
+
"#mbox-cells":
const: 0
diff --git a/Documentation/devicetree/bindings/mailbox/qcom-ipcc.yaml b/Documentation/devicetree/bindings/mailbox/qcom-ipcc.yaml
index 05e4e1d51713..2d66770ed361 100644
--- a/Documentation/devicetree/bindings/mailbox/qcom-ipcc.yaml
+++ b/Documentation/devicetree/bindings/mailbox/qcom-ipcc.yaml
@@ -24,7 +24,9 @@ properties:
compatible:
items:
- enum:
+ - qcom,qcs8300-ipcc
- qcom,qdu1000-ipcc
+ - qcom,sa8255p-ipcc
- qcom,sa8775p-ipcc
- qcom,sc7280-ipcc
- qcom,sc8280xp-ipcc
diff --git a/Documentation/devicetree/bindings/media/amlogic,gx-vdec.yaml b/Documentation/devicetree/bindings/media/amlogic,gx-vdec.yaml
index 55930f6107c9..47dce75aeae6 100644
--- a/Documentation/devicetree/bindings/media/amlogic,gx-vdec.yaml
+++ b/Documentation/devicetree/bindings/media/amlogic,gx-vdec.yaml
@@ -31,7 +31,8 @@ properties:
- items:
- enum:
- amlogic,gxbb-vdec # GXBB (S905)
- - amlogic,gxl-vdec # GXL (S905X, S905D)
+ - amlogic,gxl-vdec # GXL (S905D, S905W, S905X, S905Y)
+ - amlogic,gxlx-vdec # GXLX (S905L)
- amlogic,gxm-vdec # GXM (S912)
- const: amlogic,gx-vdec
- enum:
diff --git a/Documentation/devicetree/bindings/media/i2c/ovti,og01a1b.yaml b/Documentation/devicetree/bindings/media/i2c/ovti,og01a1b.yaml
new file mode 100644
index 000000000000..ca57c01739d2
--- /dev/null
+++ b/Documentation/devicetree/bindings/media/i2c/ovti,og01a1b.yaml
@@ -0,0 +1,107 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+# Copyright (c) 2023-2024 Linaro Ltd.
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/media/i2c/ovti,og01a1b.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: OmniVision OG01A1B Image Sensor
+
+maintainers:
+ - Vladimir Zapolskiy <vladimir.zapolskiy@linaro.org>
+
+description:
+ The OmniVision OG01A1B is black and white CMOS 1.3 Megapixel (1280x1024)
+ image sensor controlled over an I2C-compatible SCCB bus.
+ The sensor transmits images on a MIPI CSI-2 output interface with one or
+ two data lanes.
+
+allOf:
+ - $ref: /schemas/media/video-interface-devices.yaml#
+
+properties:
+ compatible:
+ const: ovti,og01a1b
+
+ reg:
+ maxItems: 1
+
+ clocks:
+ maxItems: 1
+
+ reset-gpios:
+ description: Active low GPIO connected to XSHUTDOWN pad of the sensor.
+ maxItems: 1
+
+ strobe-gpios:
+ description: Input GPIO connected to strobe pad of the sensor.
+ maxItems: 1
+
+ avdd-supply:
+ description: Analogue circuit voltage supply.
+
+ dovdd-supply:
+ description: I/O circuit voltage supply.
+
+ dvdd-supply:
+ description: Digital circuit voltage supply.
+
+ port:
+ $ref: /schemas/graph.yaml#/$defs/port-base
+ additionalProperties: false
+ description:
+ Output port node, single endpoint describing the CSI-2 transmitter.
+
+ properties:
+ endpoint:
+ $ref: /schemas/media/video-interfaces.yaml#
+ unevaluatedProperties: false
+
+ properties:
+ data-lanes:
+ minItems: 1
+ maxItems: 2
+ items:
+ enum: [1, 2]
+
+ link-frequencies: true
+
+ required:
+ - data-lanes
+ - link-frequencies
+
+required:
+ - compatible
+ - reg
+ - clocks
+ - port
+
+unevaluatedProperties: false
+
+examples:
+ - |
+ #include <dt-bindings/gpio/gpio.h>
+
+ i2c {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ sensor@60 {
+ compatible = "ovti,og01a1b";
+ reg = <0x60>;
+ clocks = <&clk 0>;
+ reset-gpios = <&gpio 117 GPIO_ACTIVE_LOW>;
+ avdd-supply = <&vreg_3v3>;
+ dovdd-supply = <&vreg_1p8>;
+ dvdd-supply = <&vreg_1p2>;
+
+ port {
+ og01a1b_ep: endpoint {
+ remote-endpoint = <&csiphy_ep>;
+ data-lanes = <1 2>;
+ link-frequencies = /bits/ 64 <500000000>;
+ };
+ };
+ };
+ };
+...
diff --git a/Documentation/devicetree/bindings/media/i2c/sony,imx335.yaml b/Documentation/devicetree/bindings/media/i2c/sony,imx335.yaml
index 106c36ee966d..77bf3a4ee89d 100644
--- a/Documentation/devicetree/bindings/media/i2c/sony,imx335.yaml
+++ b/Documentation/devicetree/bindings/media/i2c/sony,imx335.yaml
@@ -75,6 +75,8 @@ additionalProperties: false
examples:
- |
+ #include <dt-bindings/gpio/gpio.h>
+
i2c {
#address-cells = <1>;
#size-cells = <0>;
@@ -92,6 +94,8 @@ examples:
ovdd-supply = <&camera_vddo_1v8>;
dvdd-supply = <&camera_vddd_1v2>;
+ reset-gpios = <&gpio 50 GPIO_ACTIVE_LOW>;
+
port {
imx335: endpoint {
remote-endpoint = <&cam>;
diff --git a/Documentation/devicetree/bindings/media/qcom,sc7280-venus.yaml b/Documentation/devicetree/bindings/media/qcom,sc7280-venus.yaml
index 8f9b6433aeb8..10c334e6b3dc 100644
--- a/Documentation/devicetree/bindings/media/qcom,sc7280-venus.yaml
+++ b/Documentation/devicetree/bindings/media/qcom,sc7280-venus.yaml
@@ -43,6 +43,7 @@ properties:
- const: vcodec_bus
iommus:
+ minItems: 1
maxItems: 2
interconnects:
diff --git a/Documentation/devicetree/bindings/media/renesas,fcp.yaml b/Documentation/devicetree/bindings/media/renesas,fcp.yaml
index c6abe719881b..f94dacd96278 100644
--- a/Documentation/devicetree/bindings/media/renesas,fcp.yaml
+++ b/Documentation/devicetree/bindings/media/renesas,fcp.yaml
@@ -27,6 +27,7 @@ properties:
- renesas,fcpf # FCP for FDP
- items:
- enum:
+ - renesas,r9a07g043u-fcpvd # RZ/G2UL
- renesas,r9a07g044-fcpvd # RZ/G2{L,LC}
- renesas,r9a07g054-fcpvd # RZ/V2L
- const: renesas,fcpv # Generic FCP for VSP fallback
@@ -62,6 +63,7 @@ allOf:
compatible:
contains:
enum:
+ - renesas,r9a07g043u-fcpvd
- renesas,r9a07g044-fcpvd
- renesas,r9a07g054-fcpvd
then:
diff --git a/Documentation/devicetree/bindings/media/renesas,vin.yaml b/Documentation/devicetree/bindings/media/renesas,vin.yaml
index 5539d0f8e74d..cf54176f4fbd 100644
--- a/Documentation/devicetree/bindings/media/renesas,vin.yaml
+++ b/Documentation/devicetree/bindings/media/renesas,vin.yaml
@@ -52,8 +52,12 @@ properties:
- renesas,vin-r8a77980 # R-Car V3H
- renesas,vin-r8a77990 # R-Car E3
- renesas,vin-r8a77995 # R-Car D3
+ - items:
+ - enum:
- renesas,vin-r8a779a0 # R-Car V3U
- renesas,vin-r8a779g0 # R-Car V4H
+ - renesas,vin-r8a779h0 # R-Car V4M
+ - const: renesas,rcar-gen4-vin # Generic R-Car Gen4
reg:
maxItems: 1
diff --git a/Documentation/devicetree/bindings/media/renesas,vsp1.yaml b/Documentation/devicetree/bindings/media/renesas,vsp1.yaml
index 3265e922647c..1a03e67462a4 100644
--- a/Documentation/devicetree/bindings/media/renesas,vsp1.yaml
+++ b/Documentation/devicetree/bindings/media/renesas,vsp1.yaml
@@ -23,6 +23,7 @@ properties:
- renesas,vsp2 # R-Car Gen3 and RZ/G2
- items:
- enum:
+ - renesas,r9a07g043u-vsp2 # RZ/G2UL
- renesas,r9a07g054-vsp2 # RZ/V2L
- const: renesas,r9a07g044-vsp2 # RZ/G2L fallback
diff --git a/Documentation/devicetree/bindings/media/rockchip,rk3568-vepu.yaml b/Documentation/devicetree/bindings/media/rockchip,rk3568-vepu.yaml
index 9d90d8d0565a..947ad699cc5e 100644
--- a/Documentation/devicetree/bindings/media/rockchip,rk3568-vepu.yaml
+++ b/Documentation/devicetree/bindings/media/rockchip,rk3568-vepu.yaml
@@ -17,6 +17,7 @@ properties:
compatible:
enum:
- rockchip,rk3568-vepu
+ - rockchip,rk3588-vepu121
reg:
maxItems: 1
diff --git a/Documentation/devicetree/bindings/media/rockchip-vpu.yaml b/Documentation/devicetree/bindings/media/rockchip-vpu.yaml
index c57e1f488895..719aeb2dc593 100644
--- a/Documentation/devicetree/bindings/media/rockchip-vpu.yaml
+++ b/Documentation/devicetree/bindings/media/rockchip-vpu.yaml
@@ -26,11 +26,16 @@ properties:
- rockchip,rk3568-vpu
- rockchip,rk3588-av1-vpu
- items:
- - const: rockchip,rk3188-vpu
+ - enum:
+ - rockchip,rk3128-vpu
+ - rockchip,rk3188-vpu
- const: rockchip,rk3066-vpu
- items:
- const: rockchip,rk3228-vpu
- const: rockchip,rk3399-vpu
+ - items:
+ - const: rockchip,rk3588-vpu121
+ - const: rockchip,rk3568-vpu
reg:
maxItems: 1
diff --git a/Documentation/devicetree/bindings/mfd/adi,adp5585.yaml b/Documentation/devicetree/bindings/mfd/adi,adp5585.yaml
index f9c069f8534b..ee2272f754a3 100644
--- a/Documentation/devicetree/bindings/mfd/adi,adp5585.yaml
+++ b/Documentation/devicetree/bindings/mfd/adi,adp5585.yaml
@@ -42,6 +42,13 @@ properties:
"#pwm-cells":
const: 3
+patternProperties:
+ "-hog(-[0-9]+)?$":
+ type: object
+
+ required:
+ - gpio-hog
+
required:
- compatible
- reg
diff --git a/Documentation/devicetree/bindings/mfd/qcom,tcsr.yaml b/Documentation/devicetree/bindings/mfd/qcom,tcsr.yaml
index c6bd14ec5aa0..7d0b0b403150 100644
--- a/Documentation/devicetree/bindings/mfd/qcom,tcsr.yaml
+++ b/Documentation/devicetree/bindings/mfd/qcom,tcsr.yaml
@@ -21,6 +21,7 @@ properties:
- qcom,msm8998-tcsr
- qcom,qcm2290-tcsr
- qcom,qcs404-tcsr
+ - qcom,sa8775p-tcsr
- qcom,sc7180-tcsr
- qcom,sc7280-tcsr
- qcom,sc8280xp-tcsr
diff --git a/Documentation/devicetree/bindings/mfd/syscon.yaml b/Documentation/devicetree/bindings/mfd/syscon.yaml
index 9dc594ea3654..cc9b17ad69f2 100644
--- a/Documentation/devicetree/bindings/mfd/syscon.yaml
+++ b/Documentation/devicetree/bindings/mfd/syscon.yaml
@@ -103,6 +103,7 @@ select:
- rockchip,rk3368-qos
- rockchip,rk3399-qos
- rockchip,rk3568-qos
+ - rockchip,rk3576-qos
- rockchip,rk3588-qos
- rockchip,rv1126-qos
- st,spear1340-misc
@@ -113,6 +114,7 @@ select:
- ti,am625-dss-oldi-io-ctrl
- ti,am62p-cpsw-mac-efuse
- ti,am654-dss-oldi-io-ctrl
+ - ti,j784s4-acspcie-proxy-ctrl
- ti,j784s4-pcie-ctrl
- ti,keystone-pllctrl
required:
@@ -198,6 +200,7 @@ properties:
- rockchip,rk3368-qos
- rockchip,rk3399-qos
- rockchip,rk3568-qos
+ - rockchip,rk3576-qos
- rockchip,rk3588-qos
- rockchip,rv1126-qos
- st,spear1340-misc
diff --git a/Documentation/devicetree/bindings/misc/qcom,fastrpc.yaml b/Documentation/devicetree/bindings/misc/qcom,fastrpc.yaml
index c27a8f33d8d7..0840a3d92513 100644
--- a/Documentation/devicetree/bindings/misc/qcom,fastrpc.yaml
+++ b/Documentation/devicetree/bindings/misc/qcom,fastrpc.yaml
@@ -26,6 +26,7 @@ properties:
- mdsp
- sdsp
- cdsp
+ - cdsp1
memory-region:
maxItems: 1
@@ -81,7 +82,7 @@ patternProperties:
iommus:
minItems: 1
- maxItems: 3
+ maxItems: 10
qcom,nsessions:
$ref: /schemas/types.yaml#/definitions/uint32
diff --git a/Documentation/devicetree/bindings/mtd/technologic,nand.yaml b/Documentation/devicetree/bindings/mtd/technologic,nand.yaml
new file mode 100644
index 000000000000..f9d87c46094b
--- /dev/null
+++ b/Documentation/devicetree/bindings/mtd/technologic,nand.yaml
@@ -0,0 +1,45 @@
+# SPDX-License-Identifier: GPL-2.0-only OR BSD-2-Clause
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/mtd/technologic,nand.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Technologic Systems NAND controller
+
+maintainers:
+ - Nikita Shubin <nikita.shubin@maquefel.me>
+
+allOf:
+ - $ref: nand-controller.yaml
+
+properties:
+ compatible:
+ oneOf:
+ - const: technologic,ts7200-nand
+ - items:
+ - enum:
+ - technologic,ts7300-nand
+ - technologic,ts7260-nand
+ - technologic,ts7250-nand
+ - const: technologic,ts7200-nand
+
+ reg:
+ maxItems: 1
+
+required:
+ - compatible
+ - reg
+
+unevaluatedProperties: false
+
+examples:
+ - |
+ nand-controller@60000000 {
+ compatible = "technologic,ts7200-nand";
+ reg = <0x60000000 0x8000000>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ nand@0 {
+ reg = <0>;
+ };
+ };
diff --git a/Documentation/devicetree/bindings/net/cirrus,ep9301-eth.yaml b/Documentation/devicetree/bindings/net/cirrus,ep9301-eth.yaml
new file mode 100644
index 000000000000..ad0915307095
--- /dev/null
+++ b/Documentation/devicetree/bindings/net/cirrus,ep9301-eth.yaml
@@ -0,0 +1,59 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/net/cirrus,ep9301-eth.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: EP93xx SoC Ethernet Controller
+
+maintainers:
+ - Alexander Sverdlin <alexander.sverdlin@gmail.com>
+ - Nikita Shubin <nikita.shubin@maquefel.me>
+
+allOf:
+ - $ref: ethernet-controller.yaml#
+
+properties:
+ compatible:
+ oneOf:
+ - const: cirrus,ep9301-eth
+ - items:
+ - enum:
+ - cirrus,ep9302-eth
+ - cirrus,ep9307-eth
+ - cirrus,ep9312-eth
+ - cirrus,ep9315-eth
+ - const: cirrus,ep9301-eth
+
+ reg:
+ items:
+ - description: The physical base address and size of IO range
+
+ interrupts:
+ items:
+ - description: Combined signal for various interrupt events
+
+ phy-handle: true
+
+ mdio:
+ $ref: mdio.yaml#
+ unevaluatedProperties: false
+ description: optional node for embedded MDIO controller
+
+required:
+ - compatible
+ - reg
+ - interrupts
+ - phy-handle
+
+additionalProperties: false
+
+examples:
+ - |
+ ethernet@80010000 {
+ compatible = "cirrus,ep9301-eth";
+ reg = <0x80010000 0x10000>;
+ interrupt-parent = <&vic1>;
+ interrupts = <7>;
+ phy-handle = <&phy0>;
+ };
diff --git a/Documentation/devicetree/bindings/net/ti,cc1352p7.yaml b/Documentation/devicetree/bindings/net/ti,cc1352p7.yaml
index 3dde10de4630..4f4253441547 100644
--- a/Documentation/devicetree/bindings/net/ti,cc1352p7.yaml
+++ b/Documentation/devicetree/bindings/net/ti,cc1352p7.yaml
@@ -29,6 +29,12 @@ properties:
reset-gpios:
maxItems: 1
+ bootloader-backdoor-gpios:
+ maxItems: 1
+ description: |
+ gpios to enable bootloader backdoor in cc1352p7 bootloader to allow
+ flashing new firmware.
+
vdds-supply: true
required:
@@ -46,6 +52,7 @@ examples:
clocks = <&sclk_hf 0>, <&sclk_lf 25>;
clock-names = "sclk_hf", "sclk_lf";
reset-gpios = <&pio 35 GPIO_ACTIVE_LOW>;
+ bootloader-backdoor-gpios = <&pio 36 GPIO_ACTIVE_LOW>;
vdds-supply = <&vdds>;
};
};
diff --git a/Documentation/devicetree/bindings/nvmem/fsl,layerscape-sfp.yaml b/Documentation/devicetree/bindings/nvmem/fsl,layerscape-sfp.yaml
index 70fb2ad25103..1b20b49eee79 100644
--- a/Documentation/devicetree/bindings/nvmem/fsl,layerscape-sfp.yaml
+++ b/Documentation/devicetree/bindings/nvmem/fsl,layerscape-sfp.yaml
@@ -15,6 +15,7 @@ description: |
allOf:
- $ref: nvmem.yaml#
+ - $ref: nvmem-deprecated-cells.yaml
properties:
compatible:
diff --git a/Documentation/devicetree/bindings/nvmem/imx-ocotp.yaml b/Documentation/devicetree/bindings/nvmem/imx-ocotp.yaml
index e21c06e9a741..b2cb76cf9053 100644
--- a/Documentation/devicetree/bindings/nvmem/imx-ocotp.yaml
+++ b/Documentation/devicetree/bindings/nvmem/imx-ocotp.yaml
@@ -14,7 +14,7 @@ maintainers:
description: |
This binding represents the on-chip eFuse OTP controller found on
i.MX6Q/D, i.MX6DL/S, i.MX6SL, i.MX6SX, i.MX6UL, i.MX6ULL/ULZ, i.MX6SLL,
- i.MX7D/S, i.MX7ULP, i.MX8MQ, i.MX8MM, i.MX8MN i.MX8MP and i.MX93 SoCs.
+ i.MX7D/S, i.MX7ULP, i.MX8MQ, i.MX8MM, i.MX8MN i.MX8MP and i.MX93/5 SoCs.
allOf:
- $ref: nvmem.yaml#
@@ -36,6 +36,7 @@ properties:
- fsl,imx8mq-ocotp
- fsl,imx8mm-ocotp
- fsl,imx93-ocotp
+ - fsl,imx95-ocotp
- const: syscon
- items:
- enum:
diff --git a/Documentation/devicetree/bindings/nvmem/layouts/nvmem-layout.yaml b/Documentation/devicetree/bindings/nvmem/layouts/nvmem-layout.yaml
index 3b40f7880774..382507060651 100644
--- a/Documentation/devicetree/bindings/nvmem/layouts/nvmem-layout.yaml
+++ b/Documentation/devicetree/bindings/nvmem/layouts/nvmem-layout.yaml
@@ -21,6 +21,7 @@ oneOf:
- $ref: fixed-layout.yaml
- $ref: kontron,sl28-vpd.yaml
- $ref: onie,tlv-layout.yaml
+ - $ref: u-boot,env.yaml
properties:
compatible: true
diff --git a/Documentation/devicetree/bindings/nvmem/u-boot,env.yaml b/Documentation/devicetree/bindings/nvmem/layouts/u-boot,env.yaml
index 9c36afc7084b..56a8f55d4a09 100644
--- a/Documentation/devicetree/bindings/nvmem/u-boot,env.yaml
+++ b/Documentation/devicetree/bindings/nvmem/layouts/u-boot,env.yaml
@@ -1,10 +1,10 @@
# SPDX-License-Identifier: GPL-2.0-only OR BSD-2-Clause
%YAML 1.2
---
-$id: http://devicetree.org/schemas/nvmem/u-boot,env.yaml#
+$id: http://devicetree.org/schemas/nvmem/layouts/u-boot,env.yaml#
$schema: http://devicetree.org/meta-schemas/core.yaml#
-title: U-Boot environment variables
+title: U-Boot environment variables layout
description: |
U-Boot uses environment variables to store device parameters and
@@ -21,9 +21,6 @@ description: |
This binding allows marking storage device (as containing env data) and
specifying used format.
- Right now only flash partition case is covered but it may be extended to e.g.
- UBI volumes in the future.
-
Variables can be defined as NVMEM device subnodes.
maintainers:
@@ -42,6 +39,7 @@ properties:
const: brcm,env
reg:
+ description: Partition offset and size for env on top of MTD
maxItems: 1
bootcmd:
@@ -58,6 +56,17 @@ properties:
description: The first argument is a MAC address offset.
const: 1
+allOf:
+ - if:
+ properties:
+ $nodename:
+ not:
+ contains:
+ pattern: "^partition@[0-9a-f]+$"
+ then:
+ properties:
+ reg: false
+
additionalProperties: false
examples:
@@ -101,3 +110,23 @@ examples:
};
};
};
+ - |
+ partition@0 {
+ reg = <0x0 0x100000>;
+ label = "ubi";
+ compatible = "linux,ubi";
+
+ volumes {
+ ubi-volume-u-boot-env {
+ volname = "env";
+
+ nvmem-layout {
+ compatible = "u-boot,env";
+
+ ethaddr {
+ #nvmem-cell-cells = <1>;
+ };
+ };
+ };
+ };
+ };
diff --git a/Documentation/devicetree/bindings/nvmem/st,stm32-romem.yaml b/Documentation/devicetree/bindings/nvmem/st,stm32-romem.yaml
index 92bfe25f0571..3b2aa605a551 100644
--- a/Documentation/devicetree/bindings/nvmem/st,stm32-romem.yaml
+++ b/Documentation/devicetree/bindings/nvmem/st,stm32-romem.yaml
@@ -17,6 +17,7 @@ maintainers:
allOf:
- $ref: nvmem.yaml#
+ - $ref: nvmem-deprecated-cells.yaml#
properties:
compatible:
@@ -32,6 +33,8 @@ properties:
patternProperties:
"^.*@[0-9a-f]+$":
type: object
+ $ref: layouts/fixed-cell.yaml
+ unevaluatedProperties: false
properties:
st,non-secure-otp:
diff --git a/Documentation/devicetree/bindings/pci/altera-pcie-msi.txt b/Documentation/devicetree/bindings/pci/altera-pcie-msi.txt
deleted file mode 100644
index 9514c327d31b..000000000000
--- a/Documentation/devicetree/bindings/pci/altera-pcie-msi.txt
+++ /dev/null
@@ -1,27 +0,0 @@
-* Altera PCIe MSI controller
-
-Required properties:
-- compatible: should contain "altr,msi-1.0"
-- reg: specifies the physical base address of the controller and
- the length of the memory mapped region.
-- reg-names: must include the following entries:
- "csr": CSR registers
- "vector_slave": vectors slave port region
-- interrupts: specifies the interrupt source of the parent interrupt
- controller. The format of the interrupt specifier depends on the
- parent interrupt controller.
-- num-vectors: number of vectors, range 1 to 32.
-- msi-controller: indicates that this is MSI controller node
-
-
-Example
-msi0: msi@0xFF200000 {
- compatible = "altr,msi-1.0";
- reg = <0xFF200000 0x00000010
- 0xFF200010 0x00000080>;
- reg-names = "csr", "vector_slave";
- interrupt-parent = <&hps_0_arm_gic_0>;
- interrupts = <0 42 4>;
- msi-controller;
- num-vectors = <32>;
-};
diff --git a/Documentation/devicetree/bindings/pci/altera-pcie.txt b/Documentation/devicetree/bindings/pci/altera-pcie.txt
deleted file mode 100644
index 816b244a221e..000000000000
--- a/Documentation/devicetree/bindings/pci/altera-pcie.txt
+++ /dev/null
@@ -1,50 +0,0 @@
-* Altera PCIe controller
-
-Required properties:
-- compatible : should contain "altr,pcie-root-port-1.0" or "altr,pcie-root-port-2.0"
-- reg: a list of physical base address and length for TXS and CRA.
- For "altr,pcie-root-port-2.0", additional HIP base address and length.
-- reg-names: must include the following entries:
- "Txs": TX slave port region
- "Cra": Control register access region
- "Hip": Hard IP region (if "altr,pcie-root-port-2.0")
-- interrupts: specifies the interrupt source of the parent interrupt
- controller. The format of the interrupt specifier depends
- on the parent interrupt controller.
-- device_type: must be "pci"
-- #address-cells: set to <3>
-- #size-cells: set to <2>
-- #interrupt-cells: set to <1>
-- ranges: describes the translation of addresses for root ports and
- standard PCI regions.
-- interrupt-map-mask and interrupt-map: standard PCI properties to define the
- mapping of the PCIe interface to interrupt numbers.
-
-Optional properties:
-- msi-parent: Link to the hardware entity that serves as the MSI controller
- for this PCIe controller.
-- bus-range: PCI bus numbers covered
-
-Example
- pcie_0: pcie@c00000000 {
- compatible = "altr,pcie-root-port-1.0";
- reg = <0xc0000000 0x20000000>,
- <0xff220000 0x00004000>;
- reg-names = "Txs", "Cra";
- interrupt-parent = <&hps_0_arm_gic_0>;
- interrupts = <0 40 4>;
- interrupt-controller;
- #interrupt-cells = <1>;
- bus-range = <0x0 0xFF>;
- device_type = "pci";
- msi-parent = <&msi_to_gic_gen_0>;
- #address-cells = <3>;
- #size-cells = <2>;
- interrupt-map-mask = <0 0 0 7>;
- interrupt-map = <0 0 0 1 &pcie_0 1>,
- <0 0 0 2 &pcie_0 2>,
- <0 0 0 3 &pcie_0 3>,
- <0 0 0 4 &pcie_0 4>;
- ranges = <0x82000000 0x00000000 0x00000000 0xc0000000 0x00000000 0x10000000
- 0x82000000 0x00000000 0x10000000 0xd0000000 0x00000000 0x10000000>;
- };
diff --git a/Documentation/devicetree/bindings/pci/altr,msi-controller.yaml b/Documentation/devicetree/bindings/pci/altr,msi-controller.yaml
new file mode 100644
index 000000000000..98814862d006
--- /dev/null
+++ b/Documentation/devicetree/bindings/pci/altr,msi-controller.yaml
@@ -0,0 +1,65 @@
+# SPDX-License-Identifier: (GPL-2.0 OR BSD-2-Clause)
+# Copyright (C) 2015, 2024, Intel Corporation
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/altr,msi-controller.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Altera PCIe MSI controller
+
+maintainers:
+ - Matthew Gerlach <matthew.gerlach@linux.intel.com>
+
+properties:
+ compatible:
+ enum:
+ - altr,msi-1.0
+
+ reg:
+ items:
+ - description: CSR registers
+ - description: Vectors slave port region
+
+ reg-names:
+ items:
+ - const: csr
+ - const: vector_slave
+
+ interrupts:
+ maxItems: 1
+
+ msi-controller: true
+
+ num-vectors:
+ description: number of vectors
+ $ref: /schemas/types.yaml#/definitions/uint32
+ minimum: 1
+ maximum: 32
+
+required:
+ - compatible
+ - reg
+ - reg-names
+ - interrupts
+ - msi-controller
+ - num-vectors
+
+allOf:
+ - $ref: /schemas/interrupt-controller/msi-controller.yaml#
+
+unevaluatedProperties: false
+
+examples:
+ - |
+ #include <dt-bindings/interrupt-controller/arm-gic.h>
+ #include <dt-bindings/interrupt-controller/irq.h>
+ msi@ff200000 {
+ compatible = "altr,msi-1.0";
+ reg = <0xff200000 0x00000010>,
+ <0xff200010 0x00000080>;
+ reg-names = "csr", "vector_slave";
+ interrupt-parent = <&hps_0_arm_gic_0>;
+ interrupts = <GIC_SPI 42 IRQ_TYPE_LEVEL_HIGH>;
+ msi-controller;
+ num-vectors = <32>;
+ };
diff --git a/Documentation/devicetree/bindings/pci/altr,pcie-root-port.yaml b/Documentation/devicetree/bindings/pci/altr,pcie-root-port.yaml
new file mode 100644
index 000000000000..52533fccc134
--- /dev/null
+++ b/Documentation/devicetree/bindings/pci/altr,pcie-root-port.yaml
@@ -0,0 +1,114 @@
+# SPDX-License-Identifier: (GPL-2.0 OR BSD-2-Clause)
+# Copyright (C) 2015, 2019, 2024, Intel Corporation
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/altr,pcie-root-port.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Altera PCIe Root Port
+
+maintainers:
+ - Matthew Gerlach <matthew.gerlach@linux.intel.com>
+
+properties:
+ compatible:
+ enum:
+ - altr,pcie-root-port-1.0
+ - altr,pcie-root-port-2.0
+
+ reg:
+ items:
+ - description: TX slave port region
+ - description: Control register access region
+ - description: Hard IP region
+ minItems: 2
+
+ reg-names:
+ items:
+ - const: Txs
+ - const: Cra
+ - const: Hip
+ minItems: 2
+
+ interrupts:
+ maxItems: 1
+
+ interrupt-controller: true
+
+ interrupt-map-mask:
+ items:
+ - const: 0
+ - const: 0
+ - const: 0
+ - const: 7
+
+ interrupt-map:
+ maxItems: 4
+
+ "#interrupt-cells":
+ const: 1
+
+ msi-parent: true
+
+required:
+ - compatible
+ - reg
+ - reg-names
+ - interrupts
+ - "#interrupt-cells"
+ - interrupt-controller
+ - interrupt-map
+ - interrupt-map-mask
+
+allOf:
+ - $ref: /schemas/pci/pci-host-bridge.yaml#
+ - if:
+ properties:
+ compatible:
+ enum:
+ - altr,pcie-root-port-1.0
+ then:
+ properties:
+ reg:
+ maxItems: 2
+
+ reg-names:
+ maxItems: 2
+
+ else:
+ properties:
+ reg:
+ minItems: 3
+
+ reg-names:
+ minItems: 3
+
+
+unevaluatedProperties: false
+
+examples:
+ - |
+ #include <dt-bindings/interrupt-controller/arm-gic.h>
+ #include <dt-bindings/interrupt-controller/irq.h>
+ pcie_0: pcie@c00000000 {
+ compatible = "altr,pcie-root-port-1.0";
+ reg = <0xc0000000 0x20000000>,
+ <0xff220000 0x00004000>;
+ reg-names = "Txs", "Cra";
+ interrupt-parent = <&hps_0_arm_gic_0>;
+ interrupts = <GIC_SPI 40 IRQ_TYPE_LEVEL_HIGH>;
+ interrupt-controller;
+ #interrupt-cells = <1>;
+ bus-range = <0x0 0xff>;
+ device_type = "pci";
+ msi-parent = <&msi_to_gic_gen_0>;
+ #address-cells = <3>;
+ #size-cells = <2>;
+ interrupt-map-mask = <0 0 0 7>;
+ interrupt-map = <0 0 0 1 &pcie_0 0 0 0 1>,
+ <0 0 0 2 &pcie_0 0 0 0 2>,
+ <0 0 0 3 &pcie_0 0 0 0 3>,
+ <0 0 0 4 &pcie_0 0 0 0 4>;
+ ranges = <0x82000000 0x00000000 0x00000000 0xc0000000 0x00000000 0x10000000>,
+ <0x82000000 0x00000000 0x10000000 0xd0000000 0x00000000 0x10000000>;
+ };
diff --git a/Documentation/devicetree/bindings/pci/brcm,stb-pcie.yaml b/Documentation/devicetree/bindings/pci/brcm,stb-pcie.yaml
index 11f8ea33240c..0925c520195a 100644
--- a/Documentation/devicetree/bindings/pci/brcm,stb-pcie.yaml
+++ b/Documentation/devicetree/bindings/pci/brcm,stb-pcie.yaml
@@ -7,7 +7,7 @@ $schema: http://devicetree.org/meta-schemas/core.yaml#
title: Brcmstb PCIe Host Controller
maintainers:
- - Nicolas Saenz Julienne <nsaenzjulienne@suse.de>
+ - Jim Quinlan <james.quinlan@broadcom.com>
properties:
compatible:
@@ -16,11 +16,12 @@ properties:
- brcm,bcm2711-pcie # The Raspberry Pi 4
- brcm,bcm4908-pcie
- brcm,bcm7211-pcie # Broadcom STB version of RPi4
- - brcm,bcm7278-pcie # Broadcom 7278 Arm
- brcm,bcm7216-pcie # Broadcom 7216 Arm
- - brcm,bcm7445-pcie # Broadcom 7445 Arm
+ - brcm,bcm7278-pcie # Broadcom 7278 Arm
- brcm,bcm7425-pcie # Broadcom 7425 MIPs
- brcm,bcm7435-pcie # Broadcom 7435 MIPs
+ - brcm,bcm7445-pcie # Broadcom 7445 Arm
+ - brcm,bcm7712-pcie # Broadcom STB sibling of Rpi 5
reg:
maxItems: 1
@@ -95,6 +96,14 @@ properties:
minItems: 1
maxItems: 3
+ resets:
+ minItems: 1
+ maxItems: 3
+
+ reset-names:
+ minItems: 1
+ maxItems: 3
+
required:
- compatible
- reg
@@ -118,8 +127,7 @@ allOf:
then:
properties:
resets:
- items:
- - description: reset controller handling the PERST# signal
+ maxItems: 1
reset-names:
items:
@@ -136,12 +144,32 @@ allOf:
then:
properties:
resets:
+ maxItems: 1
+
+ reset-names:
items:
- - description: phandle pointing to the RESCAL reset controller
+ - const: rescal
+
+ required:
+ - resets
+ - reset-names
+
+ - if:
+ properties:
+ compatible:
+ contains:
+ const: brcm,bcm7712-pcie
+ then:
+ properties:
+ resets:
+ minItems: 3
+ maxItems: 3
reset-names:
items:
- const: rescal
+ - const: bridge
+ - const: swinit
required:
- resets
diff --git a/Documentation/devicetree/bindings/pci/fsl,imx6q-pcie-ep.yaml b/Documentation/devicetree/bindings/pci/fsl,imx6q-pcie-ep.yaml
index a06f75df8458..84ca12e8b25b 100644
--- a/Documentation/devicetree/bindings/pci/fsl,imx6q-pcie-ep.yaml
+++ b/Documentation/devicetree/bindings/pci/fsl,imx6q-pcie-ep.yaml
@@ -65,12 +65,14 @@ allOf:
then:
properties:
reg:
- minItems: 2
- maxItems: 2
+ minItems: 4
+ maxItems: 4
reg-names:
items:
- const: dbi
- const: addr_space
+ - const: dbi2
+ - const: atu
- if:
properties:
@@ -129,8 +131,11 @@ examples:
pcie_ep: pcie-ep@33800000 {
compatible = "fsl,imx8mp-pcie-ep";
- reg = <0x33800000 0x000400000>, <0x18000000 0x08000000>;
- reg-names = "dbi", "addr_space";
+ reg = <0x33800000 0x100000>,
+ <0x18000000 0x8000000>,
+ <0x33900000 0x100000>,
+ <0x33b00000 0x100000>;
+ reg-names = "dbi", "addr_space", "dbi2", "atu";
clocks = <&clk IMX8MP_CLK_HSIO_ROOT>,
<&clk IMX8MP_CLK_HSIO_AXI>,
<&clk IMX8MP_CLK_PCIE_ROOT>;
diff --git a/Documentation/devicetree/bindings/pci/fsl,imx6q-pcie.yaml b/Documentation/devicetree/bindings/pci/fsl,imx6q-pcie.yaml
index 8b8d77b1154b..1e05c560d797 100644
--- a/Documentation/devicetree/bindings/pci/fsl,imx6q-pcie.yaml
+++ b/Documentation/devicetree/bindings/pci/fsl,imx6q-pcie.yaml
@@ -30,6 +30,7 @@ properties:
- fsl,imx8mm-pcie
- fsl,imx8mp-pcie
- fsl,imx95-pcie
+ - fsl,imx8q-pcie
clocks:
minItems: 3
@@ -184,6 +185,21 @@ allOf:
- const: pcie_bus
- const: pcie_aux
+ - if:
+ properties:
+ compatible:
+ enum:
+ - fsl,imx8q-pcie
+ then:
+ properties:
+ clocks:
+ maxItems: 3
+ clock-names:
+ items:
+ - const: dbi
+ - const: mstr
+ - const: slv
+
unevaluatedProperties: false
examples:
diff --git a/Documentation/devicetree/bindings/pci/fsl,layerscape-pcie.yaml b/Documentation/devicetree/bindings/pci/fsl,layerscape-pcie.yaml
index 793986c5af7f..be79712836c4 100644
--- a/Documentation/devicetree/bindings/pci/fsl,layerscape-pcie.yaml
+++ b/Documentation/devicetree/bindings/pci/fsl,layerscape-pcie.yaml
@@ -22,18 +22,20 @@ description:
properties:
compatible:
- enum:
- - fsl,ls1021a-pcie
- - fsl,ls2080a-pcie
- - fsl,ls2085a-pcie
- - fsl,ls2088a-pcie
- - fsl,ls1088a-pcie
- - fsl,ls1046a-pcie
- - fsl,ls1043a-pcie
- - fsl,ls1012a-pcie
- - fsl,ls1028a-pcie
- - fsl,lx2160a-pcie
-
+ oneOf:
+ - enum:
+ - fsl,ls1012a-pcie
+ - fsl,ls1021a-pcie
+ - fsl,ls1028a-pcie
+ - fsl,ls1043a-pcie
+ - fsl,ls1046a-pcie
+ - fsl,ls1088a-pcie
+ - fsl,ls2080a-pcie
+ - fsl,ls2085a-pcie
+ - fsl,ls2088a-pcie
+ - items:
+ - const: fsl,lx2160ar2-pcie
+ - const: fsl,ls2088a-pcie
reg:
maxItems: 2
@@ -43,10 +45,15 @@ properties:
- const: config
fsl,pcie-scfg:
- $ref: /schemas/types.yaml#/definitions/phandle
+ $ref: /schemas/types.yaml#/definitions/phandle-array
description: A phandle to the SCFG device node. The second entry is the
physical PCIe controller index starting from '0'. This is used to get
SCFG PEXN registers.
+ items:
+ items:
+ - description: A phandle to the SCFG device node
+ - description: PCIe controller index starting from '0'
+ maxItems: 1
big-endian:
$ref: /schemas/types.yaml#/definitions/flag
@@ -67,6 +74,14 @@ properties:
minItems: 1
maxItems: 2
+ num-viewport:
+ $ref: /schemas/types.yaml#/definitions/uint32
+ deprecated: true
+ description:
+ Number of outbound view ports configured in hardware. It's the same as
+ the number of outbound AT windows.
+ maximum: 256
+
required:
- compatible
- reg
diff --git a/Documentation/devicetree/bindings/pci/hisilicon,kirin-pcie.yaml b/Documentation/devicetree/bindings/pci/hisilicon,kirin-pcie.yaml
index c9f04999c9cf..e863519f3161 100644
--- a/Documentation/devicetree/bindings/pci/hisilicon,kirin-pcie.yaml
+++ b/Documentation/devicetree/bindings/pci/hisilicon,kirin-pcie.yaml
@@ -37,7 +37,8 @@ properties:
minItems: 3
maxItems: 4
- clocks: true
+ clocks:
+ maxItems: 5
clock-names:
items:
diff --git a/Documentation/devicetree/bindings/pci/host-generic-pci.yaml b/Documentation/devicetree/bindings/pci/host-generic-pci.yaml
index bcfbaf5582cc..420d551e9af9 100644
--- a/Documentation/devicetree/bindings/pci/host-generic-pci.yaml
+++ b/Documentation/devicetree/bindings/pci/host-generic-pci.yaml
@@ -102,8 +102,6 @@ properties:
As described in IEEE Std 1275-1994, but must provide at least a
definition of non-prefetchable memory. One or both of prefetchable Memory
and IO Space may also be provided.
- minItems: 1
- maxItems: 3
dma-coherent: true
iommu-map: true
diff --git a/Documentation/devicetree/bindings/pci/mediatek-pcie-gen3.yaml b/Documentation/devicetree/bindings/pci/mediatek-pcie-gen3.yaml
index 76d742051f73..898c1be2d6a4 100644
--- a/Documentation/devicetree/bindings/pci/mediatek-pcie-gen3.yaml
+++ b/Documentation/devicetree/bindings/pci/mediatek-pcie-gen3.yaml
@@ -53,6 +53,7 @@ properties:
- mediatek,mt8195-pcie
- const: mediatek,mt8192-pcie
- const: mediatek,mt8192-pcie
+ - const: airoha,en7581-pcie
reg:
maxItems: 1
@@ -76,20 +77,20 @@ properties:
resets:
minItems: 1
- maxItems: 2
+ maxItems: 3
reset-names:
minItems: 1
- maxItems: 2
+ maxItems: 3
items:
- enum: [ phy, mac ]
+ enum: [ phy, mac, phy-lane0, phy-lane1, phy-lane2 ]
clocks:
- minItems: 4
+ minItems: 1
maxItems: 6
clock-names:
- minItems: 4
+ minItems: 1
maxItems: 6
assigned-clocks:
@@ -147,6 +148,9 @@ allOf:
const: mediatek,mt8192-pcie
then:
properties:
+ clocks:
+ minItems: 4
+
clock-names:
items:
- const: pl_250m
@@ -155,6 +159,15 @@ allOf:
- const: tl_32k
- const: peri_26m
- const: top_133m
+
+ resets:
+ minItems: 1
+ maxItems: 2
+
+ reset-names:
+ minItems: 1
+ maxItems: 2
+
- if:
properties:
compatible:
@@ -164,6 +177,9 @@ allOf:
- mediatek,mt8195-pcie
then:
properties:
+ clocks:
+ minItems: 4
+
clock-names:
items:
- const: pl_250m
@@ -172,6 +188,15 @@ allOf:
- const: tl_32k
- const: peri_26m
- const: peri_mem
+
+ resets:
+ minItems: 1
+ maxItems: 2
+
+ reset-names:
+ minItems: 1
+ maxItems: 2
+
- if:
properties:
compatible:
@@ -180,6 +205,9 @@ allOf:
- mediatek,mt7986-pcie
then:
properties:
+ clocks:
+ minItems: 4
+
clock-names:
items:
- const: pl_250m
@@ -187,6 +215,36 @@ allOf:
- const: peri_26m
- const: top_133m
+ resets:
+ minItems: 1
+ maxItems: 2
+
+ reset-names:
+ minItems: 1
+ maxItems: 2
+
+ - if:
+ properties:
+ compatible:
+ const: airoha,en7581-pcie
+ then:
+ properties:
+ clocks:
+ maxItems: 1
+
+ clock-names:
+ items:
+ - const: sys-ck
+
+ resets:
+ minItems: 3
+
+ reset-names:
+ items:
+ - const: phy-lane0
+ - const: phy-lane1
+ - const: phy-lane2
+
unevaluatedProperties: false
examples:
diff --git a/Documentation/devicetree/bindings/pci/pci-ep.yaml b/Documentation/devicetree/bindings/pci/pci-ep.yaml
index d1eef4825207..f75000e3093d 100644
--- a/Documentation/devicetree/bindings/pci/pci-ep.yaml
+++ b/Documentation/devicetree/bindings/pci/pci-ep.yaml
@@ -10,7 +10,8 @@ description: |
Common properties for PCI Endpoint Controller Nodes.
maintainers:
- - Kishon Vijay Abraham I <kishon@ti.com>
+ - Kishon Vijay Abraham I <kishon@kernel.org>
+ - Manivannan Sadhasivam <manivannan.sadhasivam@linaro.org>
properties:
$nodename:
@@ -41,6 +42,17 @@ properties:
default: 1
maximum: 16
+ linux,pci-domain:
+ description:
+ If present this property assigns a fixed PCI domain number to a PCI
+ Endpoint Controller, otherwise an unstable (across boots) unique number
+ will be assigned. It is required to either not set this property at all
+ or set it for all PCI endpoint controllers in the system, otherwise
+ potentially conflicting domain numbers may be assigned to endpoint
+ controllers. The domain number for each endpoint controller in the system
+ must be unique.
+ $ref: /schemas/types.yaml#/definitions/uint32
+
required:
- compatible
diff --git a/Documentation/devicetree/bindings/pci/qcom,pcie-common.yaml b/Documentation/devicetree/bindings/pci/qcom,pcie-common.yaml
index 0a39bbfcb28b..e18900c41576 100644
--- a/Documentation/devicetree/bindings/pci/qcom,pcie-common.yaml
+++ b/Documentation/devicetree/bindings/pci/qcom,pcie-common.yaml
@@ -21,11 +21,11 @@ properties:
interrupts:
minItems: 1
- maxItems: 8
+ maxItems: 9
interrupt-names:
minItems: 1
- maxItems: 8
+ maxItems: 9
iommu-map:
minItems: 1
@@ -78,6 +78,9 @@ properties:
description: GPIO controlled connection to WAKE# signal
maxItems: 1
+ vddpe-3v3-supply:
+ description: PCIe endpoint power supply
+
required:
- reg
- reg-names
diff --git a/Documentation/devicetree/bindings/pci/qcom,pcie-ep.yaml b/Documentation/devicetree/bindings/pci/qcom,pcie-ep.yaml
index 46802f7d9482..1226ee5d08d1 100644
--- a/Documentation/devicetree/bindings/pci/qcom,pcie-ep.yaml
+++ b/Documentation/devicetree/bindings/pci/qcom,pcie-ep.yaml
@@ -280,4 +280,5 @@ examples:
phy-names = "pciephy";
max-link-speed = <3>;
num-lanes = <2>;
+ linux,pci-domain = <0>;
};
diff --git a/Documentation/devicetree/bindings/pci/qcom,pcie-sc7280.yaml b/Documentation/devicetree/bindings/pci/qcom,pcie-sc7280.yaml
index 634da24ec3ed..76cb9fbfd476 100644
--- a/Documentation/devicetree/bindings/pci/qcom,pcie-sc7280.yaml
+++ b/Documentation/devicetree/bindings/pci/qcom,pcie-sc7280.yaml
@@ -53,11 +53,19 @@ properties:
- const: aggre1 # Aggre NoC PCIe1 AXI clock
interrupts:
- maxItems: 1
+ minItems: 8
+ maxItems: 8
interrupt-names:
items:
- - const: msi
+ - const: msi0
+ - const: msi1
+ - const: msi2
+ - const: msi3
+ - const: msi4
+ - const: msi5
+ - const: msi6
+ - const: msi7
resets:
maxItems: 1
@@ -66,9 +74,6 @@ properties:
items:
- const: pci
- vddpe-3v3-supply:
- description: PCIe endpoint power supply
-
allOf:
- $ref: qcom,pcie-common.yaml#
@@ -137,8 +142,16 @@ examples:
dma-coherent;
- interrupts = <GIC_SPI 307 IRQ_TYPE_LEVEL_HIGH>;
- interrupt-names = "msi";
+ interrupts = <GIC_SPI 307 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 308 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 309 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 312 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 313 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 314 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 374 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 375 IRQ_TYPE_LEVEL_HIGH>;
+ interrupt-names = "msi0", "msi1", "msi2", "msi3",
+ "msi4", "msi5", "msi6", "msi7";
#interrupt-cells = <1>;
interrupt-map-mask = <0 0 0 0x7>;
interrupt-map = <0 0 0 1 &intc 0 0 0 434 IRQ_TYPE_LEVEL_HIGH>,
diff --git a/Documentation/devicetree/bindings/pci/qcom,pcie-sc8280xp.yaml b/Documentation/devicetree/bindings/pci/qcom,pcie-sc8280xp.yaml
index 25c9f13ae977..15ba2385eb73 100644
--- a/Documentation/devicetree/bindings/pci/qcom,pcie-sc8280xp.yaml
+++ b/Documentation/devicetree/bindings/pci/qcom,pcie-sc8280xp.yaml
@@ -58,9 +58,6 @@ properties:
items:
- const: pci
- vddpe-3v3-supply:
- description: A phandle to the PCIe endpoint power supply
-
required:
- interconnects
- interconnect-names
diff --git a/Documentation/devicetree/bindings/pci/qcom,pcie-sm8450.yaml b/Documentation/devicetree/bindings/pci/qcom,pcie-sm8450.yaml
index d8c0afaa4b19..46bd59eefadb 100644
--- a/Documentation/devicetree/bindings/pci/qcom,pcie-sm8450.yaml
+++ b/Documentation/devicetree/bindings/pci/qcom,pcie-sm8450.yaml
@@ -55,8 +55,8 @@ properties:
- const: aggre1 # Aggre NoC PCIe1 AXI clock
interrupts:
- minItems: 8
- maxItems: 8
+ minItems: 9
+ maxItems: 9
interrupt-names:
items:
@@ -68,6 +68,7 @@ properties:
- const: msi5
- const: msi6
- const: msi7
+ - const: global
operating-points-v2: true
opp-table:
@@ -149,9 +150,10 @@ examples:
<GIC_SPI 145 IRQ_TYPE_LEVEL_HIGH>,
<GIC_SPI 146 IRQ_TYPE_LEVEL_HIGH>,
<GIC_SPI 147 IRQ_TYPE_LEVEL_HIGH>,
- <GIC_SPI 148 IRQ_TYPE_LEVEL_HIGH>;
+ <GIC_SPI 148 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 140 IRQ_TYPE_LEVEL_HIGH>;
interrupt-names = "msi0", "msi1", "msi2", "msi3",
- "msi4", "msi5", "msi6", "msi7";
+ "msi4", "msi5", "msi6", "msi7", "global";
#interrupt-cells = <1>;
interrupt-map-mask = <0 0 0 0x7>;
interrupt-map = <0 0 0 1 &intc 0 0 0 149 IRQ_TYPE_LEVEL_HIGH>, /* int_a */
diff --git a/Documentation/devicetree/bindings/pci/qcom,pcie.yaml b/Documentation/devicetree/bindings/pci/qcom,pcie.yaml
index f867746b1ae5..ffabbac57fc1 100644
--- a/Documentation/devicetree/bindings/pci/qcom,pcie.yaml
+++ b/Documentation/devicetree/bindings/pci/qcom,pcie.yaml
@@ -91,6 +91,9 @@ properties:
vdda_refclk-supply:
description: A phandle to the core analog power supply for IC which generates reference clock
+ vddpe-3v3-supply:
+ description: A phandle to the PCIe endpoint power supply
+
phys:
maxItems: 1
diff --git a/Documentation/devicetree/bindings/pci/rcar-gen4-pci-ep.yaml b/Documentation/devicetree/bindings/pci/rcar-gen4-pci-ep.yaml
index 91b81ac75592..b23293314a6d 100644
--- a/Documentation/devicetree/bindings/pci/rcar-gen4-pci-ep.yaml
+++ b/Documentation/devicetree/bindings/pci/rcar-gen4-pci-ep.yaml
@@ -19,6 +19,7 @@ properties:
- enum:
- renesas,r8a779f0-pcie-ep # R-Car S4-8
- renesas,r8a779g0-pcie-ep # R-Car V4H
+ - renesas,r8a779h0-pcie-ep # R-Car V4M
- const: renesas,rcar-gen4-pcie-ep # R-Car Gen4
reg:
diff --git a/Documentation/devicetree/bindings/pci/rcar-gen4-pci-host.yaml b/Documentation/devicetree/bindings/pci/rcar-gen4-pci-host.yaml
index 955c664f1fbb..bb3f843c59d9 100644
--- a/Documentation/devicetree/bindings/pci/rcar-gen4-pci-host.yaml
+++ b/Documentation/devicetree/bindings/pci/rcar-gen4-pci-host.yaml
@@ -19,6 +19,7 @@ properties:
- enum:
- renesas,r8a779f0-pcie # R-Car S4-8
- renesas,r8a779g0-pcie # R-Car V4H
+ - renesas,r8a779h0-pcie # R-Car V4M
- const: renesas,rcar-gen4-pcie # R-Car Gen4
reg:
diff --git a/Documentation/devicetree/bindings/pci/renesas,pci-rcar-gen2.yaml b/Documentation/devicetree/bindings/pci/renesas,pci-rcar-gen2.yaml
index b288cdb1ec70..065b7508d288 100644
--- a/Documentation/devicetree/bindings/pci/renesas,pci-rcar-gen2.yaml
+++ b/Documentation/devicetree/bindings/pci/renesas,pci-rcar-gen2.yaml
@@ -42,9 +42,13 @@ properties:
interrupts:
maxItems: 1
- clocks: true
+ clocks:
+ minItems: 1
+ maxItems: 3
- clock-names: true
+ clock-names:
+ minItems: 1
+ maxItems: 3
resets:
maxItems: 1
diff --git a/Documentation/devicetree/bindings/pci/socionext,uniphier-pcie-ep.yaml b/Documentation/devicetree/bindings/pci/socionext,uniphier-pcie-ep.yaml
index f0d8e486a07d..93f3d0f4bb94 100644
--- a/Documentation/devicetree/bindings/pci/socionext,uniphier-pcie-ep.yaml
+++ b/Documentation/devicetree/bindings/pci/socionext,uniphier-pcie-ep.yaml
@@ -38,13 +38,17 @@ properties:
minItems: 1
maxItems: 2
- clock-names: true
+ clock-names:
+ minItems: 1
+ maxItems: 2
resets:
minItems: 1
maxItems: 2
- reset-names: true
+ reset-names:
+ minItems: 1
+ maxItems: 2
num-ib-windows:
const: 16
diff --git a/Documentation/devicetree/bindings/pci/ti,j721e-pci-host.yaml b/Documentation/devicetree/bindings/pci/ti,j721e-pci-host.yaml
index 15a2658ceeef..69b499c96c71 100644
--- a/Documentation/devicetree/bindings/pci/ti,j721e-pci-host.yaml
+++ b/Documentation/devicetree/bindings/pci/ti,j721e-pci-host.yaml
@@ -38,6 +38,16 @@ properties:
- const: reg
- const: cfg
+ ti,syscon-acspcie-proxy-ctrl:
+ $ref: /schemas/types.yaml#/definitions/phandle-array
+ items:
+ - items:
+ - description: Phandle to the ACSPCIE Proxy Control Register
+ - description: Bitmask corresponding to the PAD IO Buffer
+ output enable fields (Active Low).
+ description: Specifier for enabling the ACSPCIE PAD outputs to drive
+ the reference clock to the Endpoint device.
+
ti,syscon-pcie-ctrl:
$ref: /schemas/types.yaml#/definitions/phandle-array
items:
diff --git a/Documentation/devicetree/bindings/pci/xlnx,nwl-pcie.yaml b/Documentation/devicetree/bindings/pci/xlnx,nwl-pcie.yaml
index 9cad860c51a3..9de3c09efb6e 100644
--- a/Documentation/devicetree/bindings/pci/xlnx,nwl-pcie.yaml
+++ b/Documentation/devicetree/bindings/pci/xlnx,nwl-pcie.yaml
@@ -61,6 +61,11 @@ properties:
interrupt-map:
maxItems: 4
+ phys:
+ minItems: 1
+ maxItems: 4
+ description: One phy per logical lane, in order
+
power-domains:
maxItems: 1
@@ -110,6 +115,7 @@ examples:
- |
#include <dt-bindings/interrupt-controller/arm-gic.h>
#include <dt-bindings/interrupt-controller/irq.h>
+ #include <dt-bindings/phy/phy.h>
#include <dt-bindings/power/xlnx-zynqmp-power.h>
soc {
#address-cells = <2>;
@@ -138,6 +144,7 @@ examples:
<0x0 0x0 0x0 0x3 &pcie_intc 0x3>,
<0x0 0x0 0x0 0x4 &pcie_intc 0x4>;
msi-parent = <&nwl_pcie>;
+ phys = <&psgtr 0 PHY_TYPE_PCIE 0 0>;
power-domains = <&zynqmp_firmware PD_PCIE>;
iommus = <&smmu 0x4d0>;
pcie_intc: legacy-interrupt-controller {
diff --git a/Documentation/devicetree/bindings/pci/xlnx,xdma-host.yaml b/Documentation/devicetree/bindings/pci/xlnx,xdma-host.yaml
index 2f59b3a73dd2..f1efd919c351 100644
--- a/Documentation/devicetree/bindings/pci/xlnx,xdma-host.yaml
+++ b/Documentation/devicetree/bindings/pci/xlnx,xdma-host.yaml
@@ -14,10 +14,21 @@ allOf:
properties:
compatible:
- const: xlnx,xdma-host-3.00
+ enum:
+ - xlnx,xdma-host-3.00
+ - xlnx,qdma-host-3.00
reg:
- maxItems: 1
+ items:
+ - description: configuration region and XDMA bridge register.
+ - description: QDMA bridge register.
+ minItems: 1
+
+ reg-names:
+ items:
+ - const: cfg
+ - const: breg
+ minItems: 1
ranges:
maxItems: 2
@@ -76,6 +87,27 @@ required:
- "#interrupt-cells"
- interrupt-controller
+if:
+ properties:
+ compatible:
+ contains:
+ enum:
+ - xlnx,qdma-host-3.00
+then:
+ properties:
+ reg:
+ minItems: 2
+ reg-names:
+ minItems: 2
+ required:
+ - reg-names
+else:
+ properties:
+ reg:
+ maxItems: 1
+ reg-names:
+ maxItems: 1
+
unevaluatedProperties: false
examples:
diff --git a/Documentation/devicetree/bindings/phy/fsl,mxs-usbphy.yaml b/Documentation/devicetree/bindings/phy/fsl,mxs-usbphy.yaml
index f4b1ca2fb562..ce665a2779b7 100644
--- a/Documentation/devicetree/bindings/phy/fsl,mxs-usbphy.yaml
+++ b/Documentation/devicetree/bindings/phy/fsl,mxs-usbphy.yaml
@@ -87,6 +87,12 @@ properties:
maximum: 119
default: 100
+ nxp,sim:
+ description:
+ The system integration module (SIM) provides system control and chip
+ configuration registers.
+ $ref: /schemas/types.yaml#/definitions/phandle
+
required:
- compatible
- reg
@@ -110,6 +116,17 @@ allOf:
required:
- fsl,anatop
+ - if:
+ properties:
+ compatible:
+ const: fsl,imx7ulp-usbphy
+ then:
+ required:
+ - nxp,sim
+ else:
+ properties:
+ nxp,sim: false
+
additionalProperties: false
examples:
diff --git a/Documentation/devicetree/bindings/phy/hisilicon,hi3798cv200-combphy.yaml b/Documentation/devicetree/bindings/phy/hisilicon,hi3798cv200-combphy.yaml
new file mode 100644
index 000000000000..81001966f657
--- /dev/null
+++ b/Documentation/devicetree/bindings/phy/hisilicon,hi3798cv200-combphy.yaml
@@ -0,0 +1,56 @@
+# SPDX-License-Identifier: GPL-2.0
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/phy/hisilicon,hi3798cv200-combphy.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: HiSilicon STB PCIE/SATA/USB3 PHY
+
+maintainers:
+ - Shawn Guo <shawn.guo@linaro.org>
+
+properties:
+ compatible:
+ const: hisilicon,hi3798cv200-combphy
+
+ reg:
+ maxItems: 1
+
+ '#phy-cells':
+ description: The cell contains the PHY mode
+ const: 1
+
+ clocks:
+ maxItems: 1
+
+ resets:
+ maxItems: 1
+
+ hisilicon,fixed-mode:
+ description: If the phy device doesn't support mode select but a fixed mode
+ setting, the property should be present to specify the particular mode.
+ $ref: /schemas/types.yaml#/definitions/uint32
+ enum: [ 1, 2, 4] # SATA, PCIE, USB3
+
+ hisilicon,mode-select-bits:
+ description: If the phy device support mode select, this property should be
+ present to specify the register bits in peripheral controller.
+ items:
+ - description: register_offset
+ - description: bit shift
+ - description: bit mask
+
+required:
+ - compatible
+ - reg
+ - '#phy-cells'
+ - clocks
+ - resets
+
+oneOf:
+ - required: ['hisilicon,fixed-mode']
+ - required: ['hisilicon,mode-select-bits']
+
+additionalProperties: false
+
+...
diff --git a/Documentation/devicetree/bindings/phy/nuvoton,ma35d1-usb2-phy.yaml b/Documentation/devicetree/bindings/phy/nuvoton,ma35d1-usb2-phy.yaml
new file mode 100644
index 000000000000..fff858c909a0
--- /dev/null
+++ b/Documentation/devicetree/bindings/phy/nuvoton,ma35d1-usb2-phy.yaml
@@ -0,0 +1,45 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/phy/nuvoton,ma35d1-usb2-phy.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Nuvoton MA35D1 USB2 phy
+
+maintainers:
+ - Hui-Ping Chen <hpchen0nvt@gmail.com>
+
+properties:
+ compatible:
+ enum:
+ - nuvoton,ma35d1-usb2-phy
+
+ "#phy-cells":
+ const: 0
+
+ clocks:
+ maxItems: 1
+
+ nuvoton,sys:
+ $ref: /schemas/types.yaml#/definitions/phandle
+ description:
+ phandle to syscon for checking the PHY clock status.
+
+required:
+ - compatible
+ - "#phy-cells"
+ - clocks
+ - nuvoton,sys
+
+additionalProperties: false
+
+examples:
+ - |
+ #include <dt-bindings/clock/nuvoton,ma35d1-clk.h>
+
+ usb_phy: usb-phy {
+ compatible = "nuvoton,ma35d1-usb2-phy";
+ clocks = <&clk USBD_GATE>;
+ nuvoton,sys = <&sys>;
+ #phy-cells = <0>;
+ };
diff --git a/Documentation/devicetree/bindings/phy/phy-hi3798cv200-combphy.txt b/Documentation/devicetree/bindings/phy/phy-hi3798cv200-combphy.txt
deleted file mode 100644
index 17b0c761370a..000000000000
--- a/Documentation/devicetree/bindings/phy/phy-hi3798cv200-combphy.txt
+++ /dev/null
@@ -1,59 +0,0 @@
-HiSilicon STB PCIE/SATA/USB3 PHY
-
-Required properties:
-- compatible: Should be "hisilicon,hi3798cv200-combphy"
-- reg: Should be the address space for COMBPHY configuration and state
- registers in peripheral controller, e.g. PERI_COMBPHY0_CFG and
- PERI_COMBPHY0_STATE for COMBPHY0 Hi3798CV200 SoC.
-- #phy-cells: Should be 1. The cell number is used to select the phy mode
- as defined in <dt-bindings/phy/phy.h>.
-- clocks: The phandle to clock provider and clock specifier pair.
-- resets: The phandle to reset controller and reset specifier pair.
-
-Refer to phy/phy-bindings.txt for the generic PHY binding properties.
-
-Optional properties:
-- hisilicon,fixed-mode: If the phy device doesn't support mode select
- but a fixed mode setting, the property should be present to specify
- the particular mode.
-- hisilicon,mode-select-bits: If the phy device support mode select,
- this property should be present to specify the register bits in
- peripheral controller, as a 3 integers tuple:
- <register_offset bit_shift bit_mask>.
-
-Notes:
-- Between hisilicon,fixed-mode and hisilicon,mode-select-bits, one and only
- one of them should be present.
-- The device node should be a child of peripheral controller that contains
- COMBPHY configuration/state and PERI_CTRL register used to select PHY mode.
- Refer to arm/hisilicon/hisilicon.txt for the parent peripheral controller
- bindings.
-
-Examples:
-
-perictrl: peripheral-controller@8a20000 {
- compatible = "hisilicon,hi3798cv200-perictrl", "syscon",
- "simple-mfd";
- reg = <0x8a20000 0x1000>;
- #address-cells = <1>;
- #size-cells = <1>;
- ranges = <0x0 0x8a20000 0x1000>;
-
- combphy0: phy@850 {
- compatible = "hisilicon,hi3798cv200-combphy";
- reg = <0x850 0x8>;
- #phy-cells = <1>;
- clocks = <&crg HISTB_COMBPHY0_CLK>;
- resets = <&crg 0x188 4>;
- hisilicon,fixed-mode = <PHY_TYPE_USB3>;
- };
-
- combphy1: phy@858 {
- compatible = "hisilicon,hi3798cv200-combphy";
- reg = <0x858 0x8>;
- #phy-cells = <1>;
- clocks = <&crg HISTB_COMBPHY1_CLK>;
- resets = <&crg 0x188 12>;
- hisilicon,mode-select-bits = <0x0008 11 (0x3 << 11)>;
- };
-};
diff --git a/Documentation/devicetree/bindings/phy/qcom,hdmi-phy-qmp.yaml b/Documentation/devicetree/bindings/phy/qcom,hdmi-phy-qmp.yaml
index 83fe4b39b56f..78607ee3e2e8 100644
--- a/Documentation/devicetree/bindings/phy/qcom,hdmi-phy-qmp.yaml
+++ b/Documentation/devicetree/bindings/phy/qcom,hdmi-phy-qmp.yaml
@@ -14,6 +14,7 @@ properties:
compatible:
enum:
- qcom,hdmi-phy-8996
+ - qcom,hdmi-phy-8998
reg:
maxItems: 6
diff --git a/Documentation/devicetree/bindings/phy/qcom,sata-phy.yaml b/Documentation/devicetree/bindings/phy/qcom,sata-phy.yaml
new file mode 100644
index 000000000000..0bf18d32c133
--- /dev/null
+++ b/Documentation/devicetree/bindings/phy/qcom,sata-phy.yaml
@@ -0,0 +1,55 @@
+# SPDX-License-Identifier: GPL-2.0-only OR BSD-2-Clause
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/phy/qcom,sata-phy.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Qualcomm SATA PHY Controller
+
+maintainers:
+ - Bjorn Andersson <andersson@kernel.org>
+ - Konrad Dybcio <konrad.dybcio@linaro.org>
+
+description:
+ The Qualcomm SATA PHY describes on-chip SATA Physical layer controllers.
+
+properties:
+ compatible:
+ enum:
+ - qcom,ipq806x-sata-phy
+ - qcom,apq8064-sata-phy
+
+ reg:
+ maxItems: 1
+
+ clocks:
+ maxItems: 1
+
+ clock-names:
+ const: cfg
+
+ '#phy-cells':
+ const: 0
+
+required:
+ - compatible
+ - reg
+ - clocks
+ - clock-names
+ - '#phy-cells'
+
+additionalProperties: false
+
+examples:
+ - |
+ #include <dt-bindings/clock/qcom,gcc-ipq806x.h>
+ sata_phy: sata-phy@1b400000 {
+ compatible = "qcom,ipq806x-sata-phy";
+ reg = <0x1b400000 0x200>;
+
+ clocks = <&gcc SATA_PHY_CFG_CLK>;
+ clock-names = "cfg";
+
+ #phy-cells = <0>;
+ };
+
diff --git a/Documentation/devicetree/bindings/phy/qcom,sc8280xp-qmp-pcie-phy.yaml b/Documentation/devicetree/bindings/phy/qcom,sc8280xp-qmp-pcie-phy.yaml
index 03dbd02cf9e7..dcf4fa55fbba 100644
--- a/Documentation/devicetree/bindings/phy/qcom,sc8280xp-qmp-pcie-phy.yaml
+++ b/Documentation/devicetree/bindings/phy/qcom,sc8280xp-qmp-pcie-phy.yaml
@@ -40,6 +40,7 @@ properties:
- qcom,sm8650-qmp-gen4x2-pcie-phy
- qcom,x1e80100-qmp-gen3x2-pcie-phy
- qcom,x1e80100-qmp-gen4x2-pcie-phy
+ - qcom,x1e80100-qmp-gen4x4-pcie-phy
reg:
minItems: 1
@@ -118,6 +119,7 @@ allOf:
contains:
enum:
- qcom,sc8280xp-qmp-gen3x4-pcie-phy
+ - qcom,x1e80100-qmp-gen4x4-pcie-phy
then:
properties:
reg:
@@ -169,6 +171,7 @@ allOf:
- qcom,sc8280xp-qmp-gen3x1-pcie-phy
- qcom,sc8280xp-qmp-gen3x2-pcie-phy
- qcom,sc8280xp-qmp-gen3x4-pcie-phy
+ - qcom,x1e80100-qmp-gen4x4-pcie-phy
then:
properties:
clocks:
diff --git a/Documentation/devicetree/bindings/phy/qcom,usb-8x16-phy.txt b/Documentation/devicetree/bindings/phy/qcom,usb-8x16-phy.txt
deleted file mode 100644
index 2cb2168cef41..000000000000
--- a/Documentation/devicetree/bindings/phy/qcom,usb-8x16-phy.txt
+++ /dev/null
@@ -1,76 +0,0 @@
-Qualcomm's APQ8016/MSM8916 USB transceiver controller
-
-- compatible:
- Usage: required
- Value type: <string>
- Definition: Should contain "qcom,usb-8x16-phy".
-
-- reg:
- Usage: required
- Value type: <prop-encoded-array>
- Definition: USB PHY base address and length of the register map
-
-- clocks:
- Usage: required
- Value type: <prop-encoded-array>
- Definition: See clock-bindings.txt section "consumers". List of
- two clock specifiers for interface and core controller
- clocks.
-
-- clock-names:
- Usage: required
- Value type: <string>
- Definition: Must contain "iface" and "core" strings.
-
-- vddcx-supply:
- Usage: required
- Value type: <phandle>
- Definition: phandle to the regulator VDCCX supply node.
-
-- v1p8-supply:
- Usage: required
- Value type: <phandle>
- Definition: phandle to the regulator 1.8V supply node.
-
-- v3p3-supply:
- Usage: required
- Value type: <phandle>
- Definition: phandle to the regulator 3.3V supply node.
-
-- resets:
- Usage: required
- Value type: <prop-encoded-array>
- Definition: See reset.txt section "consumers". PHY reset specifier.
-
-- reset-names:
- Usage: required
- Value type: <string>
- Definition: Must contain "phy" string.
-
-- switch-gpio:
- Usage: optional
- Value type: <prop-encoded-array>
- Definition: Some boards are using Dual SPDT USB Switch, witch is
- controlled by GPIO to de/multiplex D+/D- USB lines
- between connectors.
-
-Example:
- usb_phy: phy@78d9000 {
- compatible = "qcom,usb-8x16-phy";
- reg = <0x78d9000 0x400>;
-
- vddcx-supply = <&pm8916_s1_corner>;
- v1p8-supply = <&pm8916_l7>;
- v3p3-supply = <&pm8916_l13>;
-
- clocks = <&gcc GCC_USB_HS_AHB_CLK>,
- <&gcc GCC_USB_HS_SYSTEM_CLK>;
- clock-names = "iface", "core";
-
- resets = <&gcc GCC_USB2A_PHY_BCR>;
- reset-names = "phy";
-
- // D+/D- lines: 1 - Routed to HUB, 0 - Device connector
- switch-gpio = <&pm8916_gpios 4 GPIO_ACTIVE_HIGH>;
- };
-
diff --git a/Documentation/devicetree/bindings/phy/qcom-apq8064-sata-phy.txt b/Documentation/devicetree/bindings/phy/qcom-apq8064-sata-phy.txt
deleted file mode 100644
index 952f6c96bab9..000000000000
--- a/Documentation/devicetree/bindings/phy/qcom-apq8064-sata-phy.txt
+++ /dev/null
@@ -1,24 +0,0 @@
-Qualcomm APQ8064 SATA PHY Controller
-------------------------------------
-
-SATA PHY nodes are defined to describe on-chip SATA Physical layer controllers.
-Each SATA PHY controller should have its own node.
-
-Required properties:
-- compatible: compatible list, contains "qcom,apq8064-sata-phy".
-- reg: offset and length of the SATA PHY register set;
-- #phy-cells: must be zero
-- clocks: a list of phandles and clock-specifier pairs, one for each entry in
- clock-names.
-- clock-names: must be "cfg" for phy config clock.
-
-Example:
- sata_phy: sata-phy@1b400000 {
- compatible = "qcom,apq8064-sata-phy";
- reg = <0x1b400000 0x200>;
-
- clocks = <&gcc SATA_PHY_CFG_CLK>;
- clock-names = "cfg";
-
- #phy-cells = <0>;
- };
diff --git a/Documentation/devicetree/bindings/phy/qcom-ipq806x-sata-phy.txt b/Documentation/devicetree/bindings/phy/qcom-ipq806x-sata-phy.txt
deleted file mode 100644
index 76bfbd056202..000000000000
--- a/Documentation/devicetree/bindings/phy/qcom-ipq806x-sata-phy.txt
+++ /dev/null
@@ -1,23 +0,0 @@
-Qualcomm IPQ806x SATA PHY Controller
-------------------------------------
-
-SATA PHY nodes are defined to describe on-chip SATA Physical layer controllers.
-Each SATA PHY controller should have its own node.
-
-Required properties:
-- compatible: compatible list, contains "qcom,ipq806x-sata-phy"
-- reg: offset and length of the SATA PHY register set;
-- #phy-cells: must be zero
-- clocks: must be exactly one entry
-- clock-names: must be "cfg"
-
-Example:
- sata_phy: sata-phy@1b400000 {
- compatible = "qcom,ipq806x-sata-phy";
- reg = <0x1b400000 0x200>;
-
- clocks = <&gcc SATA_PHY_CFG_CLK>;
- clock-names = "cfg";
-
- #phy-cells = <0>;
- };
diff --git a/Documentation/devicetree/bindings/phy/renesas,usb2-phy.yaml b/Documentation/devicetree/bindings/phy/renesas,usb2-phy.yaml
index f82649a55e91..af275cea3456 100644
--- a/Documentation/devicetree/bindings/phy/renesas,usb2-phy.yaml
+++ b/Documentation/devicetree/bindings/phy/renesas,usb2-phy.yaml
@@ -13,7 +13,9 @@ properties:
compatible:
oneOf:
- items:
- - const: renesas,usb2-phy-r8a77470 # RZ/G1C
+ - enum:
+ - renesas,usb2-phy-r8a77470 # RZ/G1C
+ - renesas,usb2-phy-r9a08g045 # RZ/G3S
- items:
- enum:
diff --git a/Documentation/devicetree/bindings/phy/rockchip,rk3588-hdptx-phy.yaml b/Documentation/devicetree/bindings/phy/rockchip,rk3588-hdptx-phy.yaml
index 54e822c715f3..84fe59dbcf48 100644
--- a/Documentation/devicetree/bindings/phy/rockchip,rk3588-hdptx-phy.yaml
+++ b/Documentation/devicetree/bindings/phy/rockchip,rk3588-hdptx-phy.yaml
@@ -27,6 +27,9 @@ properties:
- const: ref
- const: apb
+ "#clock-cells":
+ const: 0
+
"#phy-cells":
const: 0
diff --git a/Documentation/devicetree/bindings/phy/socionext,uniphier-ahci-phy.yaml b/Documentation/devicetree/bindings/phy/socionext,uniphier-ahci-phy.yaml
index de3cffc850bc..e34b875a1bb8 100644
--- a/Documentation/devicetree/bindings/phy/socionext,uniphier-ahci-phy.yaml
+++ b/Documentation/devicetree/bindings/phy/socionext,uniphier-ahci-phy.yaml
@@ -30,13 +30,17 @@ properties:
minItems: 1
maxItems: 2
- clock-names: true
+ clock-names:
+ minItems: 1
+ maxItems: 6
resets:
minItems: 2
maxItems: 6
- reset-names: true
+ reset-names:
+ minItems: 2
+ maxItems: 6
allOf:
- if:
diff --git a/Documentation/devicetree/bindings/phy/socionext,uniphier-pcie-phy.yaml b/Documentation/devicetree/bindings/phy/socionext,uniphier-pcie-phy.yaml
index b3ed2f74a414..9fc0e87c508e 100644
--- a/Documentation/devicetree/bindings/phy/socionext,uniphier-pcie-phy.yaml
+++ b/Documentation/devicetree/bindings/phy/socionext,uniphier-pcie-phy.yaml
@@ -31,13 +31,17 @@ properties:
minItems: 1
maxItems: 2
- clock-names: true
+ clock-names:
+ minItems: 1
+ maxItems: 2
resets:
minItems: 1
maxItems: 2
- reset-names: true
+ reset-names:
+ minItems: 1
+ maxItems: 2
socionext,syscon:
$ref: /schemas/types.yaml#/definitions/phandle
diff --git a/Documentation/devicetree/bindings/phy/socionext,uniphier-usb3hs-phy.yaml b/Documentation/devicetree/bindings/phy/socionext,uniphier-usb3hs-phy.yaml
index 2107d98ace15..25c4159f86e4 100644
--- a/Documentation/devicetree/bindings/phy/socionext,uniphier-usb3hs-phy.yaml
+++ b/Documentation/devicetree/bindings/phy/socionext,uniphier-usb3hs-phy.yaml
@@ -34,12 +34,15 @@ properties:
minItems: 2
maxItems: 3
- clock-names: true
+ clock-names:
+ minItems: 2
+ maxItems: 3
resets:
maxItems: 2
- reset-names: true
+ reset-names:
+ maxItems: 2
vbus-supply:
description: A phandle to the regulator for USB VBUS
diff --git a/Documentation/devicetree/bindings/phy/socionext,uniphier-usb3ss-phy.yaml b/Documentation/devicetree/bindings/phy/socionext,uniphier-usb3ss-phy.yaml
index 8f5aa6238bf3..1f663e9901da 100644
--- a/Documentation/devicetree/bindings/phy/socionext,uniphier-usb3ss-phy.yaml
+++ b/Documentation/devicetree/bindings/phy/socionext,uniphier-usb3ss-phy.yaml
@@ -35,12 +35,15 @@ properties:
minItems: 2
maxItems: 3
- clock-names: true
+ clock-names:
+ minItems: 2
+ maxItems: 3
resets:
maxItems: 2
- reset-names: true
+ reset-names:
+ maxItems: 2
vbus-supply:
description: A phandle to the regulator for USB VBUS, only for USB host
diff --git a/Documentation/devicetree/bindings/pinctrl/mobileye,eyeq5-pinctrl.yaml b/Documentation/devicetree/bindings/pinctrl/mobileye,eyeq5-pinctrl.yaml
deleted file mode 100644
index 5f00604bf48c..000000000000
--- a/Documentation/devicetree/bindings/pinctrl/mobileye,eyeq5-pinctrl.yaml
+++ /dev/null
@@ -1,242 +0,0 @@
-# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
-%YAML 1.2
----
-$id: http://devicetree.org/schemas/pinctrl/mobileye,eyeq5-pinctrl.yaml#
-$schema: http://devicetree.org/meta-schemas/core.yaml#
-
-title: Mobileye EyeQ5 pin controller
-
-description: >
- The EyeQ5 pin controller handles the two pin banks of the system. It belongs
- to a system-controller block called OLB.
-
- Pin control is about bias (pull-down, pull-up), drive strength and muxing. Pin
- muxing supports two functions for each pin: first is GPIO, second is
- pin-dependent.
-
- Pins and groups are bijective.
-
-maintainers:
- - Grégory Clement <gregory.clement@bootlin.com>
- - Théo Lebrun <theo.lebrun@bootlin.com>
- - Vladimir Kondratiev <vladimir.kondratiev@mobileye.com>
-
-$ref: pinctrl.yaml#
-
-properties:
- compatible:
- enum:
- - mobileye,eyeq5-pinctrl
-
- reg:
- maxItems: 1
-
-patternProperties:
- "-pins?$":
- type: object
- description: Pin muxing configuration.
- $ref: pinmux-node.yaml#
- additionalProperties: false
- properties:
- pins: true
- function:
- enum: [gpio,
- # Bank A
- timer0, timer1, timer2, timer5, uart0, uart1, can0, can1, spi0,
- spi1, refclk0,
- # Bank B
- timer3, timer4, timer6, uart2, can2, spi2, spi3, mclk0]
- bias-disable: true
- bias-pull-down: true
- bias-pull-up: true
- drive-strength: true
- required:
- - pins
- - function
- allOf:
- - if:
- properties:
- function:
- const: gpio
- then:
- properties:
- pins:
- items: # PA0 - PA28, PB0 - PB22
- pattern: '^(P(A|B)1?[0-9]|PA2[0-8]|PB2[0-2])$'
- - if:
- properties:
- function:
- const: timer0
- then:
- properties:
- pins:
- items:
- enum: [PA0, PA1]
- - if:
- properties:
- function:
- const: timer1
- then:
- properties:
- pins:
- items:
- enum: [PA2, PA3]
- - if:
- properties:
- function:
- const: timer2
- then:
- properties:
- pins:
- items:
- enum: [PA4, PA5]
- - if:
- properties:
- function:
- const: timer5
- then:
- properties:
- pins:
- items:
- enum: [PA6, PA7, PA8, PA9]
- - if:
- properties:
- function:
- const: uart0
- then:
- properties:
- pins:
- items:
- enum: [PA10, PA11]
- - if:
- properties:
- function:
- const: uart1
- then:
- properties:
- pins:
- items:
- enum: [PA12, PA13]
- - if:
- properties:
- function:
- const: can0
- then:
- properties:
- pins:
- items:
- enum: [PA14, PA15]
- - if:
- properties:
- function:
- const: can1
- then:
- properties:
- pins:
- items:
- enum: [PA16, PA17]
- - if:
- properties:
- function:
- const: spi0
- then:
- properties:
- pins:
- items:
- enum: [PA18, PA19, PA20, PA21, PA22]
- - if:
- properties:
- function:
- const: spi1
- then:
- properties:
- pins:
- items:
- enum: [PA23, PA24, PA25, PA26, PA27]
- - if:
- properties:
- function:
- const: refclk0
- then:
- properties:
- pins:
- items:
- enum: [PA28]
- - if:
- properties:
- function:
- const: timer3
- then:
- properties:
- pins:
- items:
- enum: [PB0, PB1]
- - if:
- properties:
- function:
- const: timer4
- then:
- properties:
- pins:
- items:
- enum: [PB2, PB3]
- - if:
- properties:
- function:
- const: timer6
- then:
- properties:
- pins:
- items:
- enum: [PB4, PB5, PB6, PB7]
- - if:
- properties:
- function:
- const: uart2
- then:
- properties:
- pins:
- items:
- enum: [PB8, PB9]
- - if:
- properties:
- function:
- const: can2
- then:
- properties:
- pins:
- items:
- enum: [PB10, PB11]
- - if:
- properties:
- function:
- const: spi2
- then:
- properties:
- pins:
- items:
- enum: [PB12, PB13, PB14, PB15, PB16]
- - if:
- properties:
- function:
- const: spi3
- then:
- properties:
- pins:
- items:
- enum: [PB17, PB18, PB19, PB20, PB21]
- - if:
- properties:
- function:
- const: mclk0
- then:
- properties:
- pins:
- items:
- enum: [PB22]
-
-required:
- - compatible
- - reg
-
-additionalProperties: false
diff --git a/Documentation/devicetree/bindings/pinctrl/nuvoton,npcm845-pinctrl.yaml b/Documentation/devicetree/bindings/pinctrl/nuvoton,npcm845-pinctrl.yaml
index 814b9598edd1..8cd1f442240e 100644
--- a/Documentation/devicetree/bindings/pinctrl/nuvoton,npcm845-pinctrl.yaml
+++ b/Documentation/devicetree/bindings/pinctrl/nuvoton,npcm845-pinctrl.yaml
@@ -71,51 +71,49 @@ patternProperties:
One or more groups of pins to mux to a certain function
items:
enum: [ iox1, iox2, smb1d, smb2d, lkgpo1, lkgpo2, ioxh, gspi,
- smb5b, smb5c, lkgpo0, pspi, jm1, jm2, smb4den, smb4b,
- smb4c, smb15, smb16, smb17, smb18, smb19, smb20, smb21,
- smb22, smb23, smb23b, smb4d, smb14, smb5, smb4, smb3,
- spi0cs1, spi0cs2, spi0cs3, spi1cs0, spi1cs1, spi1cs2,
- spi1cs3, spi1cs23, smb3c, smb3b, bmcuart0a, uart1, jtag2,
- bmcuart1, uart2, sg1mdio, bmcuart0b, r1err, r1md, r1oen,
- r2oen, rmii3, r3oen, smb3d, fanin0, fanin1, fanin2, fanin3,
- fanin4, fanin5, fanin6, fanin7, fanin8, fanin9, fanin10,
- fanin11, fanin12, fanin13, fanin14, fanin15, pwm0, pwm1, pwm2,
- pwm3, r2, r2err, r2md, r3rxer, ga20kbc, smb5d, lpc, espi, rg2,
- ddr, i3c0, i3c1, i3c2, i3c3, i3c4, i3c5, smb0, smb1, smb2,
- smb2c, smb2b, smb1c, smb1b, smb8, smb9, smb10, smb11, sd1,
- sd1pwr, pwm4, pwm5, pwm6, pwm7, pwm8, pwm9, pwm10, pwm11,
- mmc8, mmc, mmcwp, mmccd, mmcrst, clkout, serirq, lpcclk,
- scipme, smi, smb6, smb6b, smb6c, smb6d, smb7, smb7b, smb7c,
- smb7d, spi1, faninx, r1, spi3, spi3cs1, spi3quad, spi3cs2,
- spi3cs3, nprd_smi, smb0b, smb0c, smb0den, smb0d, ddc, rg2mdio,
- wdog1, wdog2, smb12, smb13, spix, spixcs1, clkreq, hgpio0,
- hgpio1, hgpio2, hgpio3, hgpio4, hgpio5, hgpio6, hgpio7, bu4,
- bu4b, bu5, bu5b, bu6, gpo187 ]
+ smb5b, smb5c, lkgpo0, pspi, jm1, jm2, smb4b, smb4c, smb15,
+ smb16, smb17, smb18, smb19, smb20, smb21, smb22, smb23,
+ smb23b, smb4d, smb14, smb5, smb4, smb3, spi0cs1, spi0cs2,
+ spi0cs3, spi1cs0, spi1cs1, spi1cs2, spi1cs3, spi1cs23, smb3c,
+ smb3b, bmcuart0a, uart1, jtag2, bmcuart1, uart2, sg1mdio,
+ bmcuart0b, r1err, r1md, r1oen, r2oen, rmii3, r3oen, smb3d,
+ fanin0, fanin1, fanin2, fanin3, fanin4, fanin5, fanin6,
+ fanin7, fanin8, fanin9, fanin10, fanin11, fanin12, fanin13,
+ fanin14, fanin15, pwm0, pwm1, pwm2, pwm3, r2, r2err, r2md,
+ r3rxer, ga20kbc, smb5d, lpc, espi, rg2, ddr, i3c0, i3c1,
+ i3c2, i3c3, i3c4, i3c5, smb0, smb1, smb2, smb2c, smb2b, smb1c,
+ smb1b, smb8, smb9, smb10, smb11, sd1, sd1pwr, pwm4, pwm5,
+ pwm6, pwm7, pwm8, pwm9, pwm10, pwm11, mmc8, mmc, mmcwp, mmccd,
+ mmcrst, clkout, serirq, scipme, smi, smb6, smb6b, smb6c,
+ smb6d, smb7, smb7b, smb7c, smb7d, spi1, faninx, r1, spi3,
+ spi3cs1, spi3quad, spi3cs2, spi3cs3, nprd_smi, smb0b, smb0c,
+ smb0den, smb0d, ddc, rg2mdio, wdog1, wdog2, smb12, smb13,
+ spix, spixcs1, clkreq, hgpio0, hgpio1, hgpio2, hgpio3, hgpio4,
+ hgpio5, hgpio6, hgpio7, bu4, bu4b, bu5, bu5b, bu6, gpo187 ]
function:
description:
The function that a group of pins is muxed to
- enum: [ iox1, iox2, smb1d, smb2d, lkgpo1, lkgpo2, ioxh, gspi,
- smb5b, smb5c, lkgpo0, pspi, jm1, jm2, smb4den, smb4b,
- smb4c, smb15, smb16, smb17, smb18, smb19, smb20, smb21,
- smb22, smb23, smb23b, smb4d, smb14, smb5, smb4, smb3,
- spi0cs1, spi0cs2, spi0cs3, spi1cs0, spi1cs1, spi1cs2,
- spi1cs3, spi1cs23, smb3c, smb3b, bmcuart0a, uart1, jtag2,
- bmcuart1, uart2, sg1mdio, bmcuart0b, r1err, r1md, r1oen,
- r2oen, rmii3, r3oen, smb3d, fanin0, fanin1, fanin2, fanin3,
- fanin4, fanin5, fanin6, fanin7, fanin8, fanin9, fanin10,
+ enum: [ iox1, iox2, smb1d, smb2d, lkgpo1, lkgpo2, ioxh, gspi, smb5b,
+ smb5c, lkgpo0, pspi, jm1, jm2, smb4b, smb4c, smb15, smb16,
+ smb17, smb18, smb19, smb20, smb21, smb22, smb23, smb23b, smb4d,
+ smb14, smb5, smb4, smb3, spi0cs1, spi0cs2, spi0cs3, spi1cs0,
+ spi1cs1, spi1cs2, spi1cs3, spi1cs23, smb3c, smb3b, bmcuart0a,
+ uart1, jtag2, bmcuart1, uart2, sg1mdio, bmcuart0b, r1err, r1md,
+ r1oen, r2oen, rmii3, r3oen, smb3d, fanin0, fanin1, fanin2,
+ fanin3, fanin4, fanin5, fanin6, fanin7, fanin8, fanin9, fanin10,
fanin11, fanin12, fanin13, fanin14, fanin15, pwm0, pwm1, pwm2,
pwm3, r2, r2err, r2md, r3rxer, ga20kbc, smb5d, lpc, espi, rg2,
ddr, i3c0, i3c1, i3c2, i3c3, i3c4, i3c5, smb0, smb1, smb2,
smb2c, smb2b, smb1c, smb1b, smb8, smb9, smb10, smb11, sd1,
sd1pwr, pwm4, pwm5, pwm6, pwm7, pwm8, pwm9, pwm10, pwm11,
- mmc8, mmc, mmcwp, mmccd, mmcrst, clkout, serirq, lpcclk,
- scipme, smi, smb6, smb6b, smb6c, smb6d, smb7, smb7b, smb7c,
- smb7d, spi1, faninx, r1, spi3, spi3cs1, spi3quad, spi3cs2,
- spi3cs3, nprd_smi, smb0b, smb0c, smb0den, smb0d, ddc, rg2mdio,
- wdog1, wdog2, smb12, smb13, spix, spixcs1, clkreq, hgpio0,
- hgpio1, hgpio2, hgpio3, hgpio4, hgpio5, hgpio6, hgpio7, bu4,
- bu4b, bu5, bu5b, bu6, gpo187 ]
+ mmc8, mmc, mmcwp, mmccd, mmcrst, clkout, serirq, scipme, smi,
+ smb6, smb6b, smb6c, smb6d, smb7, smb7b, smb7c, smb7d, spi1,
+ faninx, r1, spi3, spi3cs1, spi3quad, spi3cs2, spi3cs3, nprd_smi,
+ smb0b, smb0c, smb0den, smb0d, ddc, rg2mdio, wdog1, wdog2,
+ smb12, smb13, spix, spixcs1, clkreq, hgpio0, hgpio1, hgpio2,
+ hgpio3, hgpio4, hgpio5, hgpio6, hgpio7, bu4, bu4b, bu5, bu5b,
+ bu6, gpo187 ]
dependencies:
groups: [ function ]
diff --git a/Documentation/devicetree/bindings/pinctrl/pincfg-node.yaml b/Documentation/devicetree/bindings/pinctrl/pincfg-node.yaml
index d0af21a564b4..cbfcf215e571 100644
--- a/Documentation/devicetree/bindings/pinctrl/pincfg-node.yaml
+++ b/Documentation/devicetree/bindings/pinctrl/pincfg-node.yaml
@@ -96,6 +96,9 @@ properties:
type: boolean
description: disable schmitt-trigger mode
+ input-schmitt-microvolt:
+ description: threshold strength for schmitt-trigger
+
input-debounce:
$ref: /schemas/types.yaml#/definitions/uint32-array
description: Takes the debounce time in usec as argument or 0 to disable
diff --git a/Documentation/devicetree/bindings/pinctrl/qcom,apq8064-pinctrl.txt b/Documentation/devicetree/bindings/pinctrl/qcom,apq8064-pinctrl.txt
deleted file mode 100644
index 4e90ddd77784..000000000000
--- a/Documentation/devicetree/bindings/pinctrl/qcom,apq8064-pinctrl.txt
+++ /dev/null
@@ -1,95 +0,0 @@
-Qualcomm APQ8064 TLMM block
-
-Required properties:
-- compatible: "qcom,apq8064-pinctrl"
-- reg: Should be the base address and length of the TLMM block.
-- interrupts: Should be the parent IRQ of the TLMM block.
-- interrupt-controller: Marks the device node as an interrupt controller.
-- #interrupt-cells: Should be two.
-- gpio-controller: Marks the device node as a GPIO controller.
-- #gpio-cells : Should be two.
- The first cell is the gpio pin number and the
- second cell is used for optional parameters.
-- gpio-ranges: see ../gpio/gpio.txt
-
-Optional properties:
-
-- gpio-reserved-ranges: see ../gpio/gpio.txt
-
-Please refer to ../gpio/gpio.txt and ../interrupt-controller/interrupts.txt for
-a general description of GPIO and interrupt bindings.
-
-Please refer to pinctrl-bindings.txt in this directory for details of the
-common pinctrl bindings used by client devices, including the meaning of the
-phrase "pin configuration node".
-
-Qualcomm's pin configuration nodes act as a container for an arbitrary number of
-subnodes. Each of these subnodes represents some desired configuration for a
-pin, a group, or a list of pins or groups. This configuration can include the
-mux function to select on those pin(s)/group(s), and various pin configuration
-parameters, such as pull-up, drive strength, etc.
-
-The name of each subnode is not important; all subnodes should be enumerated
-and processed purely based on their content.
-
-Each subnode only affects those parameters that are explicitly listed. In
-other words, a subnode that lists a mux function but no pin configuration
-parameters implies no information about any pin configuration parameters.
-Similarly, a pin subnode that describes a pullup parameter implies no
-information about e.g. the mux function.
-
-
-The following generic properties as defined in pinctrl-bindings.txt are valid
-to specify in a pin configuration subnode:
-
- pins, function, bias-disable, bias-pull-down, bias-pull-up, drive-strength,
- output-low, output-high.
-
-Non-empty subnodes must specify the 'pins' property.
-
-Valid values for pins are:
- gpio0-gpio89
-
-Valid values for function are:
- cam_mclk, codec_mic_i2s, codec_spkr_i2s, gp_clk_0a, gp_clk_0b, gp_clk_1a,
- gp_clk_1b, gp_clk_2a, gp_clk_2b, gpio, gsbi1, gsbi2, gsbi3, gsbi4,
- gsbi4_cam_i2c, gsbi5, gsbi5_spi_cs1, gsbi5_spi_cs2, gsbi5_spi_cs3, gsbi6,
- gsbi6_spi_cs1, gsbi6_spi_cs2, gsbi6_spi_cs3, gsbi7, gsbi7_spi_cs1,
- gsbi7_spi_cs2, gsbi7_spi_cs3, gsbi_cam_i2c, hdmi, mi2s, riva_bt, riva_fm,
- riva_wlan, sdc2, sdc4, slimbus, spkr_i2s, tsif1, tsif2, usb2_hsic, ps_hold
-
-Example:
-
- msmgpio: pinctrl@800000 {
- compatible = "qcom,apq8064-pinctrl";
- reg = <0x800000 0x4000>;
-
- gpio-controller;
- #gpio-cells = <2>;
- interrupt-controller;
- #interrupt-cells = <2>;
- interrupts = <0 16 0x4>;
-
- pinctrl-names = "default";
- pinctrl-0 = <&gsbi5_uart_default>;
- gpio-ranges = <&msmgpio 0 0 90>;
-
- gsbi5_uart_default: gsbi5_uart_default {
- mux {
- pins = "gpio51", "gpio52";
- function = "gsbi5";
- };
-
- tx {
- pins = "gpio51";
- drive-strength = <4>;
- bias-disable;
- };
-
- rx {
- pins = "gpio52";
- drive-strength = <2>;
- bias-pull-up;
- };
- };
- };
diff --git a/Documentation/devicetree/bindings/pinctrl/qcom,apq8064-pinctrl.yaml b/Documentation/devicetree/bindings/pinctrl/qcom,apq8064-pinctrl.yaml
new file mode 100644
index 000000000000..f251dcd4bb7f
--- /dev/null
+++ b/Documentation/devicetree/bindings/pinctrl/qcom,apq8064-pinctrl.yaml
@@ -0,0 +1,110 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/pinctrl/qcom,apq8064-pinctrl.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Qualcomm Technologies, Inc. APQ8064 TLMM block
+
+maintainers:
+ - Bjorn Andersson <bjorn.andersson@linaro.org>
+
+description: |
+ Top Level Mode Multiplexer pin controller in Qualcomm APQ8064 SoC.
+
+allOf:
+ - $ref: /schemas/pinctrl/qcom,tlmm-common.yaml#
+
+properties:
+ compatible:
+ const: qcom,apq8064-pinctrl
+
+ reg:
+ maxItems: 1
+
+ interrupts:
+ maxItems: 1
+
+ gpio-reserved-ranges: true
+
+patternProperties:
+ "-state$":
+ oneOf:
+ - $ref: "#/$defs/qcom-apq8064-tlmm-state"
+ - patternProperties:
+ "-pins$":
+ $ref: "#/$defs/qcom-apq8064-tlmm-state"
+ additionalProperties: false
+
+$defs:
+ qcom-apq8064-tlmm-state:
+ type: object
+ description:
+ Pinctrl node's client devices use subnodes for desired pin configuration.
+ Client device subnodes use below standard properties.
+ $ref: qcom,tlmm-common.yaml#/$defs/qcom-tlmm-state
+ unevaluatedProperties: false
+
+ properties:
+ pins:
+ description:
+ List of gpio pins affected by the properties specified in this
+ subnode.
+ items:
+ oneOf:
+ - pattern: "^gpio([0-9]|[1-8][0-9])$"
+ - enum: [ sdc1_clk, sdc1_cmd, sdc1_data, sdc3_clk, sdc3_cmd, sdc3_data ]
+ minItems: 1
+ maxItems: 36
+
+ function:
+ description:
+ Specify the alternative function to be configured for the specified
+ pins.
+ enum: [ cam_mclk, codec_mic_i2s, codec_spkr_i2s, gp_clk_0a,
+ gp_clk_0b, gp_clk_1a, gp_clk_1b, gp_clk_2a, gp_clk_2b,
+ gpio, gsbi1, gsbi2, gsbi3, gsbi4, gsbi4_cam_i2c,
+ gsbi5, gsbi5_spi_cs1, gsbi5_spi_cs2, gsbi5_spi_cs3,
+ gsbi6, gsbi6_spi_cs1, gsbi6_spi_cs2, gsbi6_spi_cs3,
+ gsbi7, gsbi7_spi_cs1, gsbi7_spi_cs2, gsbi7_spi_cs3,
+ gsbi_cam_i2c, hdmi, mi2s, riva_bt, riva_fm, riva_wlan,
+ sdc2, sdc4, slimbus, spkr_i2s, tsif1, tsif2, usb2_hsic,
+ ps_hold ]
+
+ required:
+ - pins
+
+required:
+ - compatible
+ - reg
+
+unevaluatedProperties: false
+
+examples:
+ - |
+ #include <dt-bindings/interrupt-controller/arm-gic.h>
+ tlmm: pinctrl@800000 {
+ compatible = "qcom,apq8064-pinctrl";
+ reg = <0x800000 0x4000>;
+
+ gpio-controller;
+ #gpio-cells = <2>;
+ gpio-ranges = <&tlmm 0 0 90>;
+ interrupt-controller;
+ #interrupt-cells = <2>;
+ interrupts = <GIC_SPI 16 IRQ_TYPE_LEVEL_HIGH>;
+
+ uart-state {
+ rx-pins {
+ pins = "gpio52";
+ function = "gsbi5";
+ bias-pull-up;
+ };
+
+ tx-pins {
+ pins = "gpio51";
+ function = "gsbi5";
+ bias-disable;
+ };
+ };
+ };
diff --git a/Documentation/devicetree/bindings/pinctrl/qcom,apq8084-pinctrl.txt b/Documentation/devicetree/bindings/pinctrl/qcom,apq8084-pinctrl.txt
deleted file mode 100644
index c9782397ff14..000000000000
--- a/Documentation/devicetree/bindings/pinctrl/qcom,apq8084-pinctrl.txt
+++ /dev/null
@@ -1,188 +0,0 @@
-Qualcomm APQ8084 TLMM block
-
-This binding describes the Top Level Mode Multiplexer block found in the
-MSM8960 platform.
-
-- compatible:
- Usage: required
- Value type: <string>
- Definition: must be "qcom,apq8084-pinctrl"
-
-- reg:
- Usage: required
- Value type: <prop-encoded-array>
- Definition: the base address and size of the TLMM register space.
-
-- interrupts:
- Usage: required
- Value type: <prop-encoded-array>
- Definition: should specify the TLMM summary IRQ.
-
-- interrupt-controller:
- Usage: required
- Value type: <none>
- Definition: identifies this node as an interrupt controller
-
-- #interrupt-cells:
- Usage: required
- Value type: <u32>
- Definition: must be 2. Specifying the pin number and flags, as defined
- in <dt-bindings/interrupt-controller/irq.h>
-
-- gpio-controller:
- Usage: required
- Value type: <none>
- Definition: identifies this node as a gpio controller
-
-- #gpio-cells:
- Usage: required
- Value type: <u32>
- Definition: must be 2. Specifying the pin number and flags, as defined
- in <dt-bindings/gpio/gpio.h>
-
-- gpio-ranges:
- Usage: required
- Definition: see ../gpio/gpio.txt
-
-- gpio-reserved-ranges:
- Usage: optional
- Definition: see ../gpio/gpio.txt
-
-Please refer to ../gpio/gpio.txt and ../interrupt-controller/interrupts.txt for
-a general description of GPIO and interrupt bindings.
-
-Please refer to pinctrl-bindings.txt in this directory for details of the
-common pinctrl bindings used by client devices, including the meaning of the
-phrase "pin configuration node".
-
-The pin configuration nodes act as a container for an arbitrary number of
-subnodes. Each of these subnodes represents some desired configuration for a
-pin, a group, or a list of pins or groups. This configuration can include the
-mux function to select on those pin(s)/group(s), and various pin configuration
-parameters, such as pull-up, drive strength, etc.
-
-
-PIN CONFIGURATION NODES:
-
-The name of each subnode is not important; all subnodes should be enumerated
-and processed purely based on their content.
-
-Each subnode only affects those parameters that are explicitly listed. In
-other words, a subnode that lists a mux function but no pin configuration
-parameters implies no information about any pin configuration parameters.
-Similarly, a pin subnode that describes a pullup parameter implies no
-information about e.g. the mux function.
-
-
-The following generic properties as defined in pinctrl-bindings.txt are valid
-to specify in a pin configuration subnode:
-
-- pins:
- Usage: required
- Value type: <string-array>
- Definition: List of gpio pins affected by the properties specified in
- this subnode. Valid pins are:
- gpio0-gpio146,
- sdc1_clk,
- sdc1_cmd,
- sdc1_data
- sdc2_clk,
- sdc2_cmd,
- sdc2_data
-
-- function:
- Usage: required
- Value type: <string>
- Definition: Specify the alternative function to be configured for the
- specified pins. Functions are only valid for gpio pins.
- Valid values are:
- adsp_ext, audio_ref, blsp_i2c1, blsp_i2c2, blsp_i2c3,
- blsp_i2c4, blsp_i2c5, blsp_i2c6, blsp_i2c7, blsp_i2c8,
- blsp_i2c9, blsp_i2c10, blsp_i2c11, blsp_i2c12,
- blsp_spi1, blsp_spi2, blsp_spi3, blsp_spi4, blsp_spi5,
- blsp_spi6, blsp_spi7, blsp_spi8, blsp_spi9, blsp_spi10,
- blsp_spi11, blsp_spi12, blsp_uart1, blsp_uart2, blsp_uart3,
- blsp_uart4, blsp_uart5, blsp_uart6, blsp_uart7, blsp_uart8,
- blsp_uart9, blsp_uart10, blsp_uart11, blsp_uart12,
- blsp_uim1, blsp_uim2, blsp_uim3, blsp_uim4, blsp_uim5,
- blsp_uim6, blsp_uim7, blsp_uim8, blsp_uim9, blsp_uim10,
- blsp_uim11, blsp_uim12, cam_mclk0, cam_mclk1, cam_mclk2,
- cam_mclk3, cci_async, cci_async_in0, cci_i2c0, cci_i2c1,
- cci_timer0, cci_timer1, cci_timer2, cci_timer3, cci_timer4,
- edp_hpd, gcc_gp1, gcc_gp2, gcc_gp3, gcc_obt, gcc_vtt,i
- gp_mn, gp_pdm0, gp_pdm1, gp_pdm2, gp0_clk, gp1_clk, gpio,
- hdmi_cec, hdmi_ddc, hdmi_dtest, hdmi_hpd, hdmi_rcv, hsic,
- ldo_en, ldo_update, mdp_vsync, pci_e0, pci_e0_n, pci_e0_rst,
- pci_e1, pci_e1_rst, pci_e1_rst_n, pci_e1_clkreq_n, pri_mi2s,
- qua_mi2s, sata_act, sata_devsleep, sata_devsleep_n,
- sd_write, sdc_emmc_mode, sdc3, sdc4, sec_mi2s, slimbus,
- spdif_tx, spkr_i2s, spkr_i2s_ws, spss_geni, ter_mi2s, tsif1,
- tsif2, uim, uim_batt_alarm
-
-- bias-disable:
- Usage: optional
- Value type: <none>
- Definition: The specified pins should be configured as no pull.
-
-- bias-pull-down:
- Usage: optional
- Value type: <none>
- Definition: The specified pins should be configured as pull down.
-
-- bias-pull-up:
- Usage: optional
- Value type: <none>
- Definition: The specified pins should be configured as pull up.
-
-- output-high:
- Usage: optional
- Value type: <none>
- Definition: The specified pins are configured in output mode, driven
- high.
- Not valid for sdc pins.
-
-- output-low:
- Usage: optional
- Value type: <none>
- Definition: The specified pins are configured in output mode, driven
- low.
- Not valid for sdc pins.
-
-- drive-strength:
- Usage: optional
- Value type: <u32>
- Definition: Selects the drive strength for the specified pins, in mA.
- Valid values are: 2, 4, 6, 8, 10, 12, 14 and 16
-
-Example:
-
- tlmm: pinctrl@fd510000 {
- compatible = "qcom,apq8084-pinctrl";
- reg = <0xfd510000 0x4000>;
-
- gpio-controller;
- #gpio-cells = <2>;
- gpio-ranges = <&tlmm 0 0 147>;
- interrupt-controller;
- #interrupt-cells = <2>;
- interrupts = <0 208 0>;
-
- uart2: uart2-default {
- mux {
- pins = "gpio4", "gpio5";
- function = "blsp_uart2";
- };
-
- tx {
- pins = "gpio4";
- drive-strength = <4>;
- bias-disable;
- };
-
- rx {
- pins = "gpio5";
- drive-strength = <2>;
- bias-pull-up;
- };
- };
- };
diff --git a/Documentation/devicetree/bindings/pinctrl/qcom,apq8084-pinctrl.yaml b/Documentation/devicetree/bindings/pinctrl/qcom,apq8084-pinctrl.yaml
new file mode 100644
index 000000000000..38877d8b97ff
--- /dev/null
+++ b/Documentation/devicetree/bindings/pinctrl/qcom,apq8084-pinctrl.yaml
@@ -0,0 +1,129 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/pinctrl/qcom,apq8084-pinctrl.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Qualcomm Technologies, Inc. APQ8084 TLMM block
+
+maintainers:
+ - Bjorn Andersson <bjorn.andersson@linaro.org>
+
+description: |
+ Top Level Mode Multiplexer pin controller in Qualcomm APQ8084 SoC.
+
+allOf:
+ - $ref: /schemas/pinctrl/qcom,tlmm-common.yaml#
+
+properties:
+ compatible:
+ const: qcom,apq8084-pinctrl
+
+ reg:
+ maxItems: 1
+
+ interrupts:
+ maxItems: 1
+
+ gpio-reserved-ranges: true
+
+patternProperties:
+ "-state$":
+ oneOf:
+ - $ref: "#/$defs/qcom-apq8084-tlmm-state"
+ - patternProperties:
+ "-pins$":
+ $ref: "#/$defs/qcom-apq8084-tlmm-state"
+ additionalProperties: false
+
+$defs:
+ qcom-apq8084-tlmm-state:
+ type: object
+ description:
+ Pinctrl node's client devices use subnodes for desired pin configuration.
+ Client device subnodes use below standard properties.
+ $ref: qcom,tlmm-common.yaml#/$defs/qcom-tlmm-state
+ unevaluatedProperties: false
+
+ properties:
+ pins:
+ description:
+ List of gpio pins affected by the properties specified in this
+ subnode.
+ items:
+ oneOf:
+ - pattern: "^gpio([0-9]|[1-9][0-9]|1[0-3][0-9]|14[0-6])$"
+ - enum: [ sdc1_clk, sdc1_cmd, sdc1_data, sdc2_clk, sdc2_cmd,
+ sdc2_data ]
+ minItems: 1
+ maxItems: 36
+
+ function:
+ description:
+ Specify the alternative function to be configured for the specified
+ pins.
+ enum: [ adsp_ext, audio_ref, blsp_i2c1, blsp_i2c2, blsp_i2c3,
+ blsp_i2c4, blsp_i2c5, blsp_i2c6, blsp_i2c7, blsp_i2c8,
+ blsp_i2c9, blsp_i2c10, blsp_i2c11, blsp_i2c12,
+ blsp_spi1, blsp_spi1_cs1, blsp_spi1_cs2, blsp_spi1_cs3,
+ blsp_spi2, blsp_spi3, blsp_spi3_cs1, blsp_spi3_cs2,
+ blsp_spi3_cs3, blsp_spi4, blsp_spi5, blsp_spi6,
+ blsp_spi7, blsp_spi8, blsp_spi9, blsp_spi10,
+ blsp_spi10_cs1, blsp_spi10_cs2, blsp_spi10_cs3,
+ blsp_spi11, blsp_spi12, blsp_uart1, blsp_uart2,
+ blsp_uart3, blsp_uart4, blsp_uart5, blsp_uart6,
+ blsp_uart7, blsp_uart8, blsp_uart9, blsp_uart10,
+ blsp_uart11, blsp_uart12, blsp_uim1, blsp_uim2,
+ blsp_uim3, blsp_uim4, blsp_uim5, blsp_uim6, blsp_uim7,
+ blsp_uim8, blsp_uim9, blsp_uim10, blsp_uim11,
+ blsp_uim12, cam_mclk0, cam_mclk1, cam_mclk2, cam_mclk3,
+ cci_async, cci_async_in0, cci_i2c0, cci_i2c1,
+ cci_timer0, cci_timer1, cci_timer2, cci_timer3,
+ cci_timer4, edp_hpd, gcc_gp1, gcc_gp2, gcc_gp3,
+ gcc_obt, gcc_vtt, gp_mn, gp_pdm0, gp_pdm1, gp_pdm2,
+ gp0_clk, gp1_clk, gpio, hdmi_cec, hdmi_ddc, hdmi_dtest,
+ hdmi_hpd, hdmi_rcv, hsic, ldo_en, ldo_update,
+ mdp_vsync, pci_e0, pci_e0_n, pci_e0_rst, pci_e1,
+ pci_e1_rst, pci_e1_rst_n, pci_e1_clkreq_n, pri_mi2s,
+ qua_mi2s, sata_act, sata_devsleep, sata_devsleep_n,
+ sd_write, sdc_emmc_mode, sdc3, sdc4, sec_mi2s, slimbus,
+ spdif_tx, spkr_i2s, spkr_i2s_ws, spss_geni, ter_mi2s,
+ tsif1, tsif2, uim, uim_batt_alarm ]
+
+ required:
+ - pins
+
+required:
+ - compatible
+ - reg
+
+unevaluatedProperties: false
+
+examples:
+ - |
+ #include <dt-bindings/interrupt-controller/arm-gic.h>
+ tlmm: pinctrl@fd510000 {
+ compatible = "qcom,apq8084-pinctrl";
+ reg = <0xfd510000 0x4000>;
+
+ gpio-controller;
+ #gpio-cells = <2>;
+ gpio-ranges = <&tlmm 0 0 147>;
+ interrupt-controller;
+ #interrupt-cells = <2>;
+ interrupts = <GIC_SPI 208 IRQ_TYPE_LEVEL_HIGH>;
+
+ uart-state {
+ rx-pins {
+ pins = "gpio5";
+ function = "blsp_uart2";
+ bias-pull-up;
+ };
+
+ tx-pins {
+ pins = "gpio4";
+ function = "blsp_uart2";
+ bias-disable;
+ };
+ };
+ };
diff --git a/Documentation/devicetree/bindings/pinctrl/qcom,ipq4019-pinctrl.txt b/Documentation/devicetree/bindings/pinctrl/qcom,ipq4019-pinctrl.txt
deleted file mode 100644
index 97858a7c07a2..000000000000
--- a/Documentation/devicetree/bindings/pinctrl/qcom,ipq4019-pinctrl.txt
+++ /dev/null
@@ -1,85 +0,0 @@
-Qualcomm Atheros IPQ4019 TLMM block
-
-This is the Top Level Mode Multiplexor block found on the Qualcomm IPQ8019
-platform, it provides pinctrl, pinmux, pinconf, and gpiolib facilities.
-
-Required properties:
-- compatible: "qcom,ipq4019-pinctrl"
-- reg: Should be the base address and length of the TLMM block.
-- interrupts: Should be the parent IRQ of the TLMM block.
-- interrupt-controller: Marks the device node as an interrupt controller.
-- #interrupt-cells: Should be two.
-- gpio-controller: Marks the device node as a GPIO controller.
-- #gpio-cells : Should be two.
- The first cell is the gpio pin number and the
- second cell is used for optional parameters.
-- gpio-ranges: see ../gpio/gpio.txt
-
-Optional properties:
-
-- gpio-reserved-ranges: see ../gpio/gpio.txt
-
-Please refer to ../gpio/gpio.txt and ../interrupt-controller/interrupts.txt for
-a general description of GPIO and interrupt bindings.
-
-Please refer to pinctrl-bindings.txt in this directory for details of the
-common pinctrl bindings used by client devices, including the meaning of the
-phrase "pin configuration node".
-
-The pin configuration nodes act as a container for an arbitrary number of
-subnodes. Each of these subnodes represents some desired configuration for a
-pin, a group, or a list of pins or groups. This configuration can include the
-mux function to select on those pin(s)/group(s), and various pin configuration
-parameters, such as pull-up, drive strength, etc.
-
-The name of each subnode is not important; all subnodes should be enumerated
-and processed purely based on their content.
-
-Each subnode only affects those parameters that are explicitly listed. In
-other words, a subnode that lists a mux function but no pin configuration
-parameters implies no information about any pin configuration parameters.
-Similarly, a pin subnode that describes a pullup parameter implies no
-information about e.g. the mux function.
-
-
-The following generic properties as defined in pinctrl-bindings.txt are valid
-to specify in a pin configuration subnode:
- pins, function, bias-disable, bias-pull-down, bias-pull-up, drive-open-drain,
- drive-strength.
-
-Non-empty subnodes must specify the 'pins' property.
-Note that not all properties are valid for all pins.
-
-
-Valid values for qcom,pins are:
- gpio0-gpio99
- Supports mux, bias and drive-strength
-
-Valid values for qcom,function are:
-aud_pin, audio_pwm, blsp_i2c0, blsp_i2c1, blsp_spi0, blsp_spi1, blsp_uart0,
-blsp_uart1, chip_rst, gpio, i2s_rx, i2s_spdif_in, i2s_spdif_out, i2s_td, i2s_tx,
-jtag, led0, led1, led2, led3, led4, led5, led6, led7, led8, led9, led10, led11,
-mdc, mdio, pcie, pmu, prng_rosc, qpic, rgmii, rmii, sdio, smart0, smart1,
-smart2, smart3, tm, wifi0, wifi1
-
-Example:
-
- tlmm: pinctrl@1000000 {
- compatible = "qcom,ipq4019-pinctrl";
- reg = <0x1000000 0x300000>;
-
- gpio-controller;
- #gpio-cells = <2>;
- gpio-ranges = <&tlmm 0 0 100>;
- interrupt-controller;
- #interrupt-cells = <2>;
- interrupts = <0 208 0>;
-
- serial_pins: serial_pinmux {
- mux {
- pins = "gpio60", "gpio61";
- function = "blsp_uart0";
- bias-disable;
- };
- };
- };
diff --git a/Documentation/devicetree/bindings/pinctrl/qcom,ipq4019-pinctrl.yaml b/Documentation/devicetree/bindings/pinctrl/qcom,ipq4019-pinctrl.yaml
new file mode 100644
index 000000000000..cc5de9f77680
--- /dev/null
+++ b/Documentation/devicetree/bindings/pinctrl/qcom,ipq4019-pinctrl.yaml
@@ -0,0 +1,103 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/pinctrl/qcom,ipq4019-pinctrl.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Qualcomm Technologies, Inc. IPQ4019 TLMM block
+
+maintainers:
+ - Bjorn Andersson <bjorn.andersson@linaro.org>
+
+description: |
+ Top Level Mode Multiplexer pin controller in Qualcomm IPQ4019 SoC.
+
+allOf:
+ - $ref: /schemas/pinctrl/qcom,tlmm-common.yaml#
+
+properties:
+ compatible:
+ const: qcom,ipq4019-pinctrl
+
+ reg:
+ maxItems: 1
+
+ interrupts:
+ maxItems: 1
+
+ gpio-reserved-ranges: true
+
+patternProperties:
+ "-state$":
+ oneOf:
+ - $ref: "#/$defs/qcom-ipq4019-tlmm-state"
+ - patternProperties:
+ "-pins$":
+ $ref: "#/$defs/qcom-ipq4019-tlmm-state"
+ additionalProperties: false
+
+ "-hog(-[0-9]+)?$":
+ type: object
+ required:
+ - gpio-hog
+
+$defs:
+ qcom-ipq4019-tlmm-state:
+ type: object
+ description:
+ Pinctrl node's client devices use subnodes for desired pin configuration.
+ Client device subnodes use below standard properties.
+ $ref: qcom,tlmm-common.yaml#/$defs/qcom-tlmm-state
+ unevaluatedProperties: false
+
+ properties:
+ pins:
+ description:
+ List of gpio pins affected by the properties specified in this
+ subnode.
+ items:
+ pattern: "^gpio([0-9]|[1-9][0-9])$"
+ minItems: 1
+ maxItems: 36
+
+ function:
+ description:
+ Specify the alternative function to be configured for the specified
+ pins.
+ enum: [ aud_pin, audio_pwm, blsp_i2c0, blsp_i2c1, blsp_spi0,
+ blsp_spi1, blsp_uart0, blsp_uart1, chip_rst, gpio,
+ i2s_rx, i2s_spdif_in, i2s_spdif_out, i2s_td, i2s_tx,
+ jtag, led0, led1, led2, led3, led4, led5, led6, led7,
+ led8, led9, led10, led11, mdc, mdio, pcie, pmu,
+ prng_rosc, qpic, rgmii, rmii, sdio, smart0, smart1,
+ smart2, smart3, tm, wifi0, wifi1 ]
+
+ required:
+ - pins
+
+required:
+ - compatible
+ - reg
+
+unevaluatedProperties: false
+
+examples:
+ - |
+ #include <dt-bindings/interrupt-controller/arm-gic.h>
+ tlmm: pinctrl@1000000 {
+ compatible = "qcom,ipq4019-pinctrl";
+ reg = <0x01000000 0x300000>;
+
+ gpio-controller;
+ #gpio-cells = <2>;
+ gpio-ranges = <&tlmm 0 0 100>;
+ interrupt-controller;
+ #interrupt-cells = <2>;
+ interrupts = <GIC_SPI 208 IRQ_TYPE_LEVEL_HIGH>;
+
+ uart-state {
+ pins = "gpio16", "gpio17";
+ function = "blsp_uart0";
+ bias-disable;
+ };
+ };
diff --git a/Documentation/devicetree/bindings/pinctrl/qcom,ipq8064-pinctrl.txt b/Documentation/devicetree/bindings/pinctrl/qcom,ipq8064-pinctrl.txt
deleted file mode 100644
index a7aaaa7db83b..000000000000
--- a/Documentation/devicetree/bindings/pinctrl/qcom,ipq8064-pinctrl.txt
+++ /dev/null
@@ -1,101 +0,0 @@
-Qualcomm IPQ8064 TLMM block
-
-Required properties:
-- compatible: "qcom,ipq8064-pinctrl"
-- reg: Should be the base address and length of the TLMM block.
-- interrupts: Should be the parent IRQ of the TLMM block.
-- interrupt-controller: Marks the device node as an interrupt controller.
-- #interrupt-cells: Should be two.
-- gpio-controller: Marks the device node as a GPIO controller.
-- #gpio-cells : Should be two.
- The first cell is the gpio pin number and the
- second cell is used for optional parameters.
-- gpio-ranges: see ../gpio/gpio.txt
-
-Optional properties:
-
-- gpio-reserved-ranges: see ../gpio/gpio.txt
-
-Please refer to ../gpio/gpio.txt and ../interrupt-controller/interrupts.txt for
-a general description of GPIO and interrupt bindings.
-
-Please refer to pinctrl-bindings.txt in this directory for details of the
-common pinctrl bindings used by client devices, including the meaning of the
-phrase "pin configuration node".
-
-Qualcomm's pin configuration nodes act as a container for an arbitrary number of
-subnodes. Each of these subnodes represents some desired configuration for a
-pin, a group, or a list of pins or groups. This configuration can include the
-mux function to select on those pin(s)/group(s), and various pin configuration
-parameters, such as pull-up, drive strength, etc.
-
-The name of each subnode is not important; all subnodes should be enumerated
-and processed purely based on their content.
-
-Each subnode only affects those parameters that are explicitly listed. In
-other words, a subnode that lists a mux function but no pin configuration
-parameters implies no information about any pin configuration parameters.
-Similarly, a pin subnode that describes a pullup parameter implies no
-information about e.g. the mux function.
-
-
-The following generic properties as defined in pinctrl-bindings.txt are valid
-to specify in a pin configuration subnode:
-
- pins, function, bias-disable, bias-pull-down, bias-pull-up, drive-strength,
- output-low, output-high.
-
-Non-empty subnodes must specify the 'pins' property.
-
-Valid values for qcom,pins are:
- gpio0-gpio68
- Supports mux, bias, and drive-strength
-
- sdc3_clk, sdc3_cmd, sdc3_data
- Supports bias and drive-strength
-
-
-Valid values for function are:
- mdio, mi2s, pdm, ssbi, spmi, audio_pcm, gpio, gsbi1, gsbi2, gsbi4, gsbi5,
- gsbi5_spi_cs1, gsbi5_spi_cs2, gsbi5_spi_cs3, gsbi6, gsbi7, nss_spi, sdc1,
- spdif, nand, tsif1, tsif2, usb_fs_n, usb_fs, usb2_hsic, rgmii2, sata,
- pcie1_rst, pcie1_prsnt, pcie1_pwren_n, pcie1_pwren, pcie1_pwrflt,
- pcie1_clk_req, pcie2_rst, pcie2_prsnt, pcie2_pwren_n, pcie2_pwren,
- pcie2_pwrflt, pcie2_clk_req, pcie3_rst, pcie3_prsnt, pcie3_pwren_n,
- pcie3_pwren, pcie3_pwrflt, pcie3_clk_req, ps_hold
-
-Example:
-
- pinmux: pinctrl@800000 {
- compatible = "qcom,ipq8064-pinctrl";
- reg = <0x800000 0x4000>;
-
- gpio-controller;
- #gpio-cells = <2>;
- gpio-ranges = <&pinmux 0 0 69>;
- interrupt-controller;
- #interrupt-cells = <2>;
- interrupts = <0 32 0x4>;
-
- pinctrl-names = "default";
- pinctrl-0 = <&gsbi5_uart_default>;
-
- gsbi5_uart_default: gsbi5_uart_default {
- mux {
- pins = "gpio18", "gpio19";
- function = "gsbi5";
- };
-
- tx {
- pins = "gpio18";
- drive-strength = <4>;
- bias-disable;
- };
-
- rx {
- pins = "gpio19";
- drive-strength = <2>;
- bias-pull-up;
- };
- };
- };
diff --git a/Documentation/devicetree/bindings/pinctrl/qcom,ipq8064-pinctrl.yaml b/Documentation/devicetree/bindings/pinctrl/qcom,ipq8064-pinctrl.yaml
new file mode 100644
index 000000000000..58f11e1bdd4f
--- /dev/null
+++ b/Documentation/devicetree/bindings/pinctrl/qcom,ipq8064-pinctrl.yaml
@@ -0,0 +1,108 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/pinctrl/qcom,ipq8064-pinctrl.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Qualcomm Technologies, Inc. IPQ8064 TLMM block
+
+maintainers:
+ - Bjorn Andersson <bjorn.andersson@linaro.org>
+
+description: |
+ Top Level Mode Multiplexer pin controller in Qualcomm IPQ8064 SoC.
+
+allOf:
+ - $ref: /schemas/pinctrl/qcom,tlmm-common.yaml#
+
+properties:
+ compatible:
+ const: qcom,ipq8064-pinctrl
+
+ reg:
+ maxItems: 1
+
+ interrupts:
+ maxItems: 1
+
+ gpio-reserved-ranges: true
+
+patternProperties:
+ "-state$":
+ oneOf:
+ - $ref: "#/$defs/qcom-ipq8064-tlmm-state"
+ - patternProperties:
+ "-pins$":
+ $ref: "#/$defs/qcom-ipq8064-tlmm-state"
+ additionalProperties: false
+
+$defs:
+ qcom-ipq8064-tlmm-state:
+ type: object
+ description:
+ Pinctrl node's client devices use subnodes for desired pin configuration.
+ Client device subnodes use below standard properties.
+ $ref: qcom,tlmm-common.yaml#/$defs/qcom-tlmm-state
+ unevaluatedProperties: false
+
+ properties:
+ pins:
+ description:
+ List of gpio pins affected by the properties specified in this
+ subnode.
+ items:
+ oneOf:
+ - pattern: "^gpio([0-9]|[1-5][0-9]|6[0-8])$"
+ - enum: [ sdc3_clk, sdc3_cmd, sdc3_data ]
+ minItems: 1
+ maxItems: 36
+
+ function:
+ description:
+ Specify the alternative function to be configured for the specified
+ pins.
+ enum: [ mdio, mi2s, pdm, ssbi, spmi, audio_pcm, gpio, gsbi1, gsbi2, gsbi4, gsbi5,
+ gsbi5_spi_cs1, gsbi5_spi_cs2, gsbi5_spi_cs3, gsbi6, gsbi7, nss_spi, sdc1,
+ spdif, nand, tsif1, tsif2, usb_fs_n, usb_fs, usb2_hsic, rgmii2, sata,
+ pcie1_rst, pcie1_prsnt, pcie1_pwren_n, pcie1_pwren, pcie1_pwrflt,
+ pcie1_clk_req, pcie2_rst, pcie2_prsnt, pcie2_pwren_n, pcie2_pwren,
+ pcie2_pwrflt, pcie2_clk_req, pcie3_rst, pcie3_prsnt, pcie3_pwren_n,
+ pcie3_pwren, pcie3_pwrflt, pcie3_clk_req, ps_hold ]
+
+ required:
+ - pins
+
+required:
+ - compatible
+ - reg
+
+unevaluatedProperties: false
+
+examples:
+ - |
+ #include <dt-bindings/interrupt-controller/arm-gic.h>
+ tlmm: pinctrl@800000 {
+ compatible = "qcom,ipq8064-pinctrl";
+ reg = <0x00800000 0x4000>;
+
+ gpio-controller;
+ #gpio-cells = <2>;
+ gpio-ranges = <&tlmm 0 0 69>;
+ interrupt-controller;
+ #interrupt-cells = <2>;
+ interrupts = <GIC_SPI 16 IRQ_TYPE_LEVEL_HIGH>;
+
+ uart-state {
+ rx-pins {
+ pins = "gpio19";
+ function = "gsbi5";
+ bias-pull-up;
+ };
+
+ tx-pins {
+ pins = "gpio18";
+ function = "gsbi5";
+ bias-disable;
+ };
+ };
+ };
diff --git a/Documentation/devicetree/bindings/pinctrl/qcom,pmic-gpio.yaml b/Documentation/devicetree/bindings/pinctrl/qcom,pmic-gpio.yaml
index 2784d32fdde2..c1b799167d81 100644
--- a/Documentation/devicetree/bindings/pinctrl/qcom,pmic-gpio.yaml
+++ b/Documentation/devicetree/bindings/pinctrl/qcom,pmic-gpio.yaml
@@ -425,6 +425,7 @@ patternProperties:
additionalProperties: false
"-hog(-[0-9]+)?$":
+ type: object
required:
- gpio-hog
diff --git a/Documentation/devicetree/bindings/pinctrl/qcom,sdm845-pinctrl.yaml b/Documentation/devicetree/bindings/pinctrl/qcom,sdm845-pinctrl.yaml
index dfe5616b9b85..0f331844608c 100644
--- a/Documentation/devicetree/bindings/pinctrl/qcom,sdm845-pinctrl.yaml
+++ b/Documentation/devicetree/bindings/pinctrl/qcom,sdm845-pinctrl.yaml
@@ -43,6 +43,7 @@ patternProperties:
additionalProperties: false
"-hog(-[0-9]+)?$":
+ type: object
required:
- gpio-hog
diff --git a/Documentation/devicetree/bindings/pinctrl/renesas,pfc.yaml b/Documentation/devicetree/bindings/pinctrl/renesas,pfc.yaml
index 5d84364d1358..cfe004573366 100644
--- a/Documentation/devicetree/bindings/pinctrl/renesas,pfc.yaml
+++ b/Documentation/devicetree/bindings/pinctrl/renesas,pfc.yaml
@@ -25,6 +25,7 @@ properties:
- renesas,pfc-r8a7745 # RZ/G1E
- renesas,pfc-r8a77470 # RZ/G1C
- renesas,pfc-r8a774a1 # RZ/G2M
+ - renesas,pfc-r8a774a3 # RZ/G2M v3.0
- renesas,pfc-r8a774b1 # RZ/G2N
- renesas,pfc-r8a774c0 # RZ/G2E
- renesas,pfc-r8a774e1 # RZ/G2H
diff --git a/Documentation/devicetree/bindings/pinctrl/rockchip,pinctrl.yaml b/Documentation/devicetree/bindings/pinctrl/rockchip,pinctrl.yaml
index 20e806dce1ec..6a23d845f1f2 100644
--- a/Documentation/devicetree/bindings/pinctrl/rockchip,pinctrl.yaml
+++ b/Documentation/devicetree/bindings/pinctrl/rockchip,pinctrl.yaml
@@ -45,6 +45,7 @@ properties:
- rockchip,rk3368-pinctrl
- rockchip,rk3399-pinctrl
- rockchip,rk3568-pinctrl
+ - rockchip,rk3576-pinctrl
- rockchip,rk3588-pinctrl
- rockchip,rv1108-pinctrl
- rockchip,rv1126-pinctrl
diff --git a/Documentation/devicetree/bindings/pinctrl/sophgo,cv1800-pinctrl.yaml b/Documentation/devicetree/bindings/pinctrl/sophgo,cv1800-pinctrl.yaml
new file mode 100644
index 000000000000..1e6a55afe26a
--- /dev/null
+++ b/Documentation/devicetree/bindings/pinctrl/sophgo,cv1800-pinctrl.yaml
@@ -0,0 +1,122 @@
+# SPDX-License-Identifier: GPL-2.0-only OR BSD-2-Clause
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/pinctrl/sophgo,cv1800-pinctrl.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Sophgo CV1800 Pin Controller
+
+maintainers:
+ - Inochi Amaoto <inochiama@outlook.com>
+
+properties:
+ compatible:
+ enum:
+ - sophgo,cv1800b-pinctrl
+ - sophgo,cv1812h-pinctrl
+ - sophgo,sg2000-pinctrl
+ - sophgo,sg2002-pinctrl
+
+ reg:
+ items:
+ - description: pinctrl for system domain
+ - description: pinctrl for rtc domain
+
+ reg-names:
+ items:
+ - const: sys
+ - const: rtc
+
+ resets:
+ maxItems: 1
+
+patternProperties:
+ '-cfg$':
+ type: object
+ description:
+ A pinctrl node should contain at least one subnode representing the
+ pinctrl groups available on the machine.
+
+ additionalProperties: false
+
+ patternProperties:
+ '-pins$':
+ type: object
+ description: |
+ Each subnode will list the pins it needs, and how they should
+ be configured, with regard to muxer configuration, bias, input
+ enable/disable, input schmitt trigger, slew-rate, drive strength
+ and bus hold state. In addition, all pins in the same subnode
+ should have the same power domain. For configuration detail,
+ refer to https://github.com/sophgo/sophgo-doc/.
+
+ allOf:
+ - $ref: pincfg-node.yaml#
+ - $ref: pinmux-node.yaml#
+
+ properties:
+ pinmux:
+ description: |
+ The list of GPIOs and their mux settings that properties in the
+ node apply to. This should be set using the GPIOMUX or GPIOMUX2
+ macro.
+
+ bias-pull-up:
+ type: boolean
+
+ bias-pull-down:
+ type: boolean
+
+ drive-strength-microamp:
+ description: typical current when output high level.
+
+ input-schmitt-microvolt:
+ description: typical threshold for schmitt trigger.
+
+ power-source:
+ description: power supplies at X mV.
+ enum: [ 1800, 3300 ]
+
+ slew-rate:
+ description: slew rate for output buffer (0 is fast, 1 is slow)
+ enum: [ 0, 1 ]
+
+ bias-bus-hold: true
+
+ required:
+ - pinmux
+ - power-source
+
+ additionalProperties: false
+
+required:
+ - compatible
+ - reg
+ - reg-names
+
+additionalProperties: false
+
+examples:
+ - |
+ #include <dt-bindings/pinctrl/pinctrl-cv1800b.h>
+
+ pinctrl@3001000 {
+ compatible = "sophgo,cv1800b-pinctrl";
+ reg = <0x03001000 0x1000>,
+ <0x05027000 0x1000>;
+ reg-names = "sys", "rtc";
+
+ uart0_cfg: uart0-cfg {
+ uart0-pins {
+ pinmux = <PINMUX(PIN_UART0_TX, 0)>,
+ <PINMUX(PIN_UART0_RX, 0)>;
+ bias-pull-up;
+ drive-strength-microamp = <10800>;
+ input-schmitt-microvolt = <0>;
+ power-source = <3300>;
+ slew-rate = <0>;
+ };
+ };
+ };
+
+...
diff --git a/Documentation/devicetree/bindings/power/wakeup-source.txt b/Documentation/devicetree/bindings/power/wakeup-source.txt
index 128b55be67b7..27f1797be963 100644
--- a/Documentation/devicetree/bindings/power/wakeup-source.txt
+++ b/Documentation/devicetree/bindings/power/wakeup-source.txt
@@ -25,7 +25,7 @@ List of legacy properties and respective binding document
2. "has-tpo" Documentation/devicetree/bindings/rtc/rtc-opal.txt
3. "linux,wakeup" Documentation/devicetree/bindings/input/gpio-matrix-keypad.txt
Documentation/devicetree/bindings/mfd/tc3589x.txt
- Documentation/devicetree/bindings/input/touchscreen/ads7846.txt
+ Documentation/devicetree/bindings/input/touchscreen/ti,ads7843.yaml
4. "linux,keypad-wakeup" Documentation/devicetree/bindings/input/qcom,pm8921-keypad.yaml
5. "linux,input-wakeup" Documentation/devicetree/bindings/input/samsung,s3c6410-keypad.yaml
6. "nvidia,wakeup-source" Documentation/devicetree/bindings/input/nvidia,tegra20-kbc.txt
diff --git a/Documentation/devicetree/bindings/pwm/cirrus,ep9301-pwm.yaml b/Documentation/devicetree/bindings/pwm/cirrus,ep9301-pwm.yaml
new file mode 100644
index 000000000000..903210ef9c31
--- /dev/null
+++ b/Documentation/devicetree/bindings/pwm/cirrus,ep9301-pwm.yaml
@@ -0,0 +1,53 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/pwm/cirrus,ep9301-pwm.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Cirrus Logic ep93xx PWM controller
+
+maintainers:
+ - Alexander Sverdlin <alexander.sverdlin@gmail.com>
+ - Nikita Shubin <nikita.shubin@maquefel.me>
+
+allOf:
+ - $ref: pwm.yaml#
+
+properties:
+ compatible:
+ oneOf:
+ - const: cirrus,ep9301-pwm
+ - items:
+ - enum:
+ - cirrus,ep9302-pwm
+ - cirrus,ep9307-pwm
+ - cirrus,ep9312-pwm
+ - cirrus,ep9315-pwm
+ - const: cirrus,ep9301-pwm
+
+ reg:
+ maxItems: 1
+
+ clocks:
+ items:
+ - description: SoC PWM clock
+
+ "#pwm-cells":
+ const: 3
+
+required:
+ - compatible
+ - reg
+ - clocks
+
+unevaluatedProperties: false
+
+examples:
+ - |
+ #include <dt-bindings/clock/cirrus,ep9301-syscon.h>
+ pwm@80910000 {
+ compatible = "cirrus,ep9301-pwm";
+ reg = <0x80910000 0x10>;
+ clocks = <&syscon EP93XX_CLK_PWM>;
+ #pwm-cells = <3>;
+ };
diff --git a/Documentation/devicetree/bindings/remoteproc/qcom,sm8550-pas.yaml b/Documentation/devicetree/bindings/remoteproc/qcom,sm8550-pas.yaml
index 73fda7565cd1..d7fad7b3c2c6 100644
--- a/Documentation/devicetree/bindings/remoteproc/qcom,sm8550-pas.yaml
+++ b/Documentation/devicetree/bindings/remoteproc/qcom,sm8550-pas.yaml
@@ -16,6 +16,7 @@ description:
properties:
compatible:
enum:
+ - qcom,sdx75-mpss-pas
- qcom,sm8550-adsp-pas
- qcom,sm8550-cdsp-pas
- qcom,sm8550-mpss-pas
@@ -113,6 +114,7 @@ allOf:
properties:
compatible:
enum:
+ - qcom,sdx75-mpss-pas
- qcom,sm8650-mpss-pas
then:
properties:
@@ -146,6 +148,7 @@ allOf:
properties:
compatible:
enum:
+ - qcom,sdx75-mpss-pas
- qcom,sm8550-mpss-pas
- qcom,sm8650-mpss-pas
then:
diff --git a/Documentation/devicetree/bindings/remoteproc/ti,k3-m4f-rproc.yaml b/Documentation/devicetree/bindings/remoteproc/ti,k3-m4f-rproc.yaml
new file mode 100644
index 000000000000..2bd0752b6ba9
--- /dev/null
+++ b/Documentation/devicetree/bindings/remoteproc/ti,k3-m4f-rproc.yaml
@@ -0,0 +1,125 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/remoteproc/ti,k3-m4f-rproc.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: TI K3 M4F processor subsystems
+
+maintainers:
+ - Hari Nagalla <hnagalla@ti.com>
+ - Mathieu Poirier <mathieu.poirier@linaro.org>
+
+description: |
+ Some K3 family SoCs have Arm Cortex M4F cores. AM64x is a SoC in K3
+ family with a M4F core. Typically safety oriented applications may use
+ the M4F core in isolation without an IPC. Where as some industrial and
+ home automation applications, may use the M4F core as a remote processor
+ with IPC communications.
+
+$ref: /schemas/arm/keystone/ti,k3-sci-common.yaml#
+
+properties:
+ compatible:
+ enum:
+ - ti,am64-m4fss
+
+ power-domains:
+ maxItems: 1
+
+ "#address-cells":
+ const: 2
+
+ "#size-cells":
+ const: 2
+
+ reg:
+ items:
+ - description: IRAM internal memory region
+ - description: DRAM internal memory region
+
+ reg-names:
+ items:
+ - const: iram
+ - const: dram
+
+ resets:
+ maxItems: 1
+
+ firmware-name:
+ maxItems: 1
+ description: Name of firmware to load for the M4F core
+
+ mboxes:
+ description:
+ OMAP Mailbox specifier denoting the sub-mailbox, to be used for
+ communication with the remote processor. This property should match
+ with the sub-mailbox node used in the firmware image.
+ maxItems: 1
+
+ memory-region:
+ description:
+ phandle to the reserved memory nodes to be associated with the
+ remoteproc device. Optional memory regions available for firmware
+ specific purposes.
+ (see reserved-memory/reserved-memory.yaml in dtschema project)
+ maxItems: 8
+ items:
+ - description: regions used for DMA allocations like vrings, vring buffers
+ and memory dedicated to firmware's specific purposes.
+ additionalItems: true
+
+required:
+ - compatible
+ - reg
+ - reg-names
+ - ti,sci
+ - ti,sci-dev-id
+ - ti,sci-proc-ids
+ - resets
+ - firmware-name
+
+unevaluatedProperties: false
+
+examples:
+ - |
+ reserved-memory {
+ #address-cells = <2>;
+ #size-cells = <2>;
+
+ mcu_m4fss_dma_memory_region: m4f-dma-memory@9cb00000 {
+ compatible = "shared-dma-pool";
+ reg = <0x00 0x9cb00000 0x00 0x100000>;
+ no-map;
+ };
+
+ mcu_m4fss_memory_region: m4f-memory@9cc00000 {
+ compatible = "shared-dma-pool";
+ reg = <0x00 0x9cc00000 0x00 0xe00000>;
+ no-map;
+ };
+ };
+
+ soc {
+ #address-cells = <2>;
+ #size-cells = <2>;
+
+ mailbox0_cluster0: mailbox-0 {
+ #mbox-cells = <1>;
+ };
+
+ remoteproc@5000000 {
+ compatible = "ti,am64-m4fss";
+ reg = <0x00 0x5000000 0x00 0x30000>,
+ <0x00 0x5040000 0x00 0x10000>;
+ reg-names = "iram", "dram";
+ resets = <&k3_reset 9 1>;
+ firmware-name = "am62-mcu-m4f0_0-fw";
+ mboxes = <&mailbox0_cluster0>, <&mbox_m4_0>;
+ memory-region = <&mcu_m4fss_dma_memory_region>,
+ <&mcu_m4fss_memory_region>;
+ ti,sci = <&dmsc>;
+ ti,sci-dev-id = <9>;
+ ti,sci-proc-ids = <0x18 0xff>;
+ };
+ };
diff --git a/Documentation/devicetree/bindings/remoteproc/xlnx,zynqmp-r5fss.yaml b/Documentation/devicetree/bindings/remoteproc/xlnx,zynqmp-r5fss.yaml
index 6f13da11f593..ee63c03949c9 100644
--- a/Documentation/devicetree/bindings/remoteproc/xlnx,zynqmp-r5fss.yaml
+++ b/Documentation/devicetree/bindings/remoteproc/xlnx,zynqmp-r5fss.yaml
@@ -62,6 +62,7 @@ properties:
patternProperties:
"^r(.*)@[0-9a-f]+$":
type: object
+ additionalProperties: false
description: |
The RPU is located in the Low Power Domain of the Processor Subsystem.
Each processor includes separate L1 instruction and data caches and
diff --git a/Documentation/devicetree/bindings/riscv/extensions.yaml b/Documentation/devicetree/bindings/riscv/extensions.yaml
index a06dbc6b4928..2cf2026cff57 100644
--- a/Documentation/devicetree/bindings/riscv/extensions.yaml
+++ b/Documentation/devicetree/bindings/riscv/extensions.yaml
@@ -171,6 +171,13 @@ properties:
memory types as ratified in the 20191213 version of the privileged
ISA specification.
+ - const: svvptc
+ description:
+ The standard Svvptc supervisor-level extension for
+ address-translation cache behaviour with respect to invalid entries
+ as ratified at commit 4a69197e5617 ("Update to ratified state") of
+ riscv-svvptc.
+
- const: zacas
description: |
The Zacas extension for Atomic Compare-and-Swap (CAS) instructions
diff --git a/Documentation/devicetree/bindings/rtc/microcrystal,rv3028.yaml b/Documentation/devicetree/bindings/rtc/microcrystal,rv3028.yaml
index 5ade5dfad048..cda8ad7c1203 100644
--- a/Documentation/devicetree/bindings/rtc/microcrystal,rv3028.yaml
+++ b/Documentation/devicetree/bindings/rtc/microcrystal,rv3028.yaml
@@ -22,6 +22,9 @@ properties:
interrupts:
maxItems: 1
+ "#clock-cells":
+ const: 0
+
trickle-resistor-ohms:
enum:
- 3000
diff --git a/Documentation/devicetree/bindings/rtc/sprd,sc2731-rtc.yaml b/Documentation/devicetree/bindings/rtc/sprd,sc2731-rtc.yaml
new file mode 100644
index 000000000000..f3d20e976965
--- /dev/null
+++ b/Documentation/devicetree/bindings/rtc/sprd,sc2731-rtc.yaml
@@ -0,0 +1,49 @@
+# SPDX-License-Identifier: GPL-2.0-only OR BSD-2-Clause
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/rtc/sprd,sc2731-rtc.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Spreadtrum SC2731 Real Time Clock
+
+maintainers:
+ - Orson Zhai <orsonzhai@gmail.com>
+ - Baolin Wang <baolin.wang7@gmail.com>
+ - Chunyan Zhang <zhang.lyra@gmail.com>
+
+properties:
+ compatible:
+ const: sprd,sc2731-rtc
+
+ reg:
+ maxItems: 1
+
+ interrupts:
+ maxItems: 1
+
+required:
+ - compatible
+ - reg
+ - interrupts
+
+allOf:
+ - $ref: rtc.yaml#
+
+unevaluatedProperties: false
+
+examples:
+ - |
+ #include <dt-bindings/interrupt-controller/irq.h>
+
+ pmic {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ rtc@280 {
+ compatible = "sprd,sc2731-rtc";
+ reg = <0x280>;
+ interrupt-parent = <&sc2731_pmic>;
+ interrupts = <2 IRQ_TYPE_LEVEL_HIGH>;
+ };
+ };
+...
diff --git a/Documentation/devicetree/bindings/rtc/sprd,sc27xx-rtc.txt b/Documentation/devicetree/bindings/rtc/sprd,sc27xx-rtc.txt
deleted file mode 100644
index 1f5754299d31..000000000000
--- a/Documentation/devicetree/bindings/rtc/sprd,sc27xx-rtc.txt
+++ /dev/null
@@ -1,26 +0,0 @@
-Spreadtrum SC27xx Real Time Clock
-
-Required properties:
-- compatible: should be "sprd,sc2731-rtc".
-- reg: address offset of rtc register.
-- interrupts: rtc alarm interrupt.
-
-Example:
-
- sc2731_pmic: pmic@0 {
- compatible = "sprd,sc2731";
- reg = <0>;
- spi-max-frequency = <26000000>;
- interrupts = <GIC_SPI 31 IRQ_TYPE_LEVEL_HIGH>;
- interrupt-controller;
- #interrupt-cells = <2>;
- #address-cells = <1>;
- #size-cells = <0>;
-
- rtc@280 {
- compatible = "sprd,sc2731-rtc";
- reg = <0x280>;
- interrupt-parent = <&sc2731_pmic>;
- interrupts = <2 IRQ_TYPE_LEVEL_HIGH>;
- };
- };
diff --git a/Documentation/devicetree/bindings/rtc/st,stm32-rtc.yaml b/Documentation/devicetree/bindings/rtc/st,stm32-rtc.yaml
index 7a0fab721cf1..aae06e570c22 100644
--- a/Documentation/devicetree/bindings/rtc/st,stm32-rtc.yaml
+++ b/Documentation/devicetree/bindings/rtc/st,stm32-rtc.yaml
@@ -53,6 +53,28 @@ properties:
override default rtc_ck parent clock phandle of the new parent clock of rtc_ck
maxItems: 1
+patternProperties:
+ "^rtc-[a-z]+-[0-9]+$":
+ type: object
+ $ref: /schemas/pinctrl/pinmux-node.yaml
+ description: |
+ Configuration of STM32 RTC pins description. STM32 RTC is able to output
+ some signals on specific pins:
+ - LSCO (Low Speed Clock Output) that allow to output LSE clock on a pin.
+ - Alarm out that allow to send a pulse on a pin when alarm A of the RTC
+ expires.
+ additionalProperties: false
+ properties:
+ function:
+ enum:
+ - lsco
+ - alarm-a
+ pins:
+ enum:
+ - out1
+ - out2
+ - out2_rmp
+
allOf:
- if:
properties:
@@ -68,6 +90,9 @@ allOf:
clock-names: false
+ patternProperties:
+ "^rtc-[a-z]+-[0-9]+$": false
+
required:
- st,syscfg
@@ -83,6 +108,9 @@ allOf:
minItems: 2
maxItems: 2
+ patternProperties:
+ "^rtc-[a-z]+-[0-9]+$": false
+
required:
- clock-names
- st,syscfg
diff --git a/Documentation/devicetree/bindings/rtc/trivial-rtc.yaml b/Documentation/devicetree/bindings/rtc/trivial-rtc.yaml
index fffd759c603f..7330a7200831 100644
--- a/Documentation/devicetree/bindings/rtc/trivial-rtc.yaml
+++ b/Documentation/devicetree/bindings/rtc/trivial-rtc.yaml
@@ -38,12 +38,13 @@ properties:
- dallas,ds1672
# Extremely Accurate I²C RTC with Integrated Crystal and SRAM
- dallas,ds3232
+ # SD2405AL Real-Time Clock
+ - dfrobot,sd2405al
# EM Microelectronic EM3027 RTC
- emmicro,em3027
# I2C-BUS INTERFACE REAL TIME CLOCK MODULE
- epson,rx8010
# I2C-BUS INTERFACE REAL TIME CLOCK MODULE
- - epson,rx8025
- epson,rx8035
# I2C-BUS INTERFACE REAL TIME CLOCK MODULE with Battery Backed RAM
- epson,rx8111
@@ -52,10 +53,6 @@ properties:
- epson,rx8581
# Android Goldfish Real-time Clock
- google,goldfish-rtc
- # Intersil ISL1208 Low Power RTC with Battery Backed SRAM
- - isil,isl1208
- # Intersil ISL1218 Low Power RTC with Battery Backed SRAM
- - isil,isl1218
# Mvebu Real-time Clock
- marvell,orion-rtc
# Maxim DS1742/DS1743 Real-time Clock
@@ -68,8 +65,6 @@ properties:
- microcrystal,rv8523
# NXP LPC32xx SoC Real-time Clock
- nxp,lpc3220-rtc
- # Real-time Clock Module
- - pericom,pt7c4338
# I2C bus SERIAL INTERFACE REAL-TIME CLOCK IC
- ricoh,r2025sd
# I2C bus SERIAL INTERFACE REAL-TIME CLOCK IC
diff --git a/Documentation/devicetree/bindings/serial/8250_omap.yaml b/Documentation/devicetree/bindings/serial/8250_omap.yaml
index 6a7be42da523..4b78de6b46a2 100644
--- a/Documentation/devicetree/bindings/serial/8250_omap.yaml
+++ b/Documentation/devicetree/bindings/serial/8250_omap.yaml
@@ -76,6 +76,7 @@ properties:
clock-frequency: true
current-speed: true
overrun-throttle-ms: true
+ wakeup-source: true
required:
- compatible
diff --git a/Documentation/devicetree/bindings/serial/atmel,at91-usart.yaml b/Documentation/devicetree/bindings/serial/atmel,at91-usart.yaml
index eb2992a447d7..f466c38518c4 100644
--- a/Documentation/devicetree/bindings/serial/atmel,at91-usart.yaml
+++ b/Documentation/devicetree/bindings/serial/atmel,at91-usart.yaml
@@ -23,13 +23,20 @@ properties:
- const: atmel,at91sam9260-dbgu
- const: atmel,at91sam9260-usart
- items:
- - const: microchip,sam9x60-usart
+ - enum:
+ - microchip,sam9x60-usart
+ - microchip,sam9x7-usart
- const: atmel,at91sam9260-usart
- items:
- const: microchip,sam9x60-dbgu
- const: microchip,sam9x60-usart
- const: atmel,at91sam9260-dbgu
- const: atmel,at91sam9260-usart
+ - items:
+ - const: microchip,sam9x7-dbgu
+ - const: atmel,at91sam9260-dbgu
+ - const: microchip,sam9x7-usart
+ - const: atmel,at91sam9260-usart
reg:
maxItems: 1
diff --git a/Documentation/devicetree/bindings/serial/mediatek,uart.yaml b/Documentation/devicetree/bindings/serial/mediatek,uart.yaml
index ff61ffdcad1d..1b02f0b197ff 100644
--- a/Documentation/devicetree/bindings/serial/mediatek,uart.yaml
+++ b/Documentation/devicetree/bindings/serial/mediatek,uart.yaml
@@ -36,6 +36,7 @@ properties:
- mediatek,mt7622-uart
- mediatek,mt7623-uart
- mediatek,mt7629-uart
+ - mediatek,mt7981-uart
- mediatek,mt7986-uart
- mediatek,mt7988-uart
- mediatek,mt8127-uart
diff --git a/Documentation/devicetree/bindings/serial/renesas,scif.yaml b/Documentation/devicetree/bindings/serial/renesas,scif.yaml
index afc7c05898a1..51d9fb0f4763 100644
--- a/Documentation/devicetree/bindings/serial/renesas,scif.yaml
+++ b/Documentation/devicetree/bindings/serial/renesas,scif.yaml
@@ -46,6 +46,7 @@ properties:
- items:
- enum:
- renesas,scif-r8a774a1 # RZ/G2M
+ - renesas,scif-r8a774a3 # RZ/G2M v3.0
- renesas,scif-r8a774b1 # RZ/G2N
- renesas,scif-r8a774c0 # RZ/G2E
- renesas,scif-r8a774e1 # RZ/G2H
diff --git a/Documentation/devicetree/bindings/serial/samsung_uart.yaml b/Documentation/devicetree/bindings/serial/samsung_uart.yaml
index 0f0131026911..788c80e47831 100644
--- a/Documentation/devicetree/bindings/serial/samsung_uart.yaml
+++ b/Documentation/devicetree/bindings/serial/samsung_uart.yaml
@@ -56,14 +56,8 @@ properties:
maxItems: 5
clock-names:
- description: N = 0 is allowed for SoCs without internal baud clock mux.
minItems: 2
- items:
- - const: uart
- - pattern: '^clk_uart_baud[0-3]$'
- - pattern: '^clk_uart_baud[0-3]$'
- - pattern: '^clk_uart_baud[0-3]$'
- - pattern: '^clk_uart_baud[0-3]$'
+ maxItems: 5
dmas:
items:
@@ -103,18 +97,45 @@ allOf:
compatible:
contains:
enum:
- - samsung,s5pv210-uart
+ - samsung,s3c6400-uart
then:
properties:
clocks:
- minItems: 2
+ minItems: 3
maxItems: 3
+
+ clock-names:
+ items:
+ - const: uart
+ - const: clk_uart_baud2
+ - const: clk_uart_baud3
+
+ else:
+ properties:
clock-names:
minItems: 2
items:
- const: uart
- - pattern: '^clk_uart_baud[0-1]$'
- - pattern: '^clk_uart_baud[0-1]$'
+ - const: clk_uart_baud0
+ - const: clk_uart_baud1
+ - const: clk_uart_baud2
+ - const: clk_uart_baud3
+
+ - if:
+ properties:
+ compatible:
+ contains:
+ enum:
+ - samsung,s5pv210-uart
+ then:
+ properties:
+ clocks:
+ minItems: 3
+ maxItems: 3
+
+ clock-names:
+ minItems: 3
+ maxItems: 3
- if:
properties:
@@ -129,10 +150,9 @@ allOf:
properties:
clocks:
maxItems: 2
+
clock-names:
- items:
- - const: uart
- - const: clk_uart_baud0
+ maxItems: 2
- if:
properties:
@@ -146,6 +166,12 @@ allOf:
properties:
reg-io-width: false
+ clocks:
+ maxItems: 2
+
+ clock-names:
+ maxItems: 2
+
unevaluatedProperties: false
examples:
@@ -163,3 +189,19 @@ examples:
<&clocks SCLK_UART>;
samsung,uart-fifosize = <16>;
};
+ - |
+ #include <dt-bindings/clock/google,gs101.h>
+ #include <dt-bindings/interrupt-controller/arm-gic.h>
+ #include <dt-bindings/interrupt-controller/irq.h>
+
+ serial_0: serial@10a00000 {
+ compatible = "google,gs101-uart";
+ reg = <0x10a00000 0xc0>;
+ clocks = <&cmu_peric0 CLK_GOUT_PERIC0_PERIC0_TOP1_PCLK_0>,
+ <&cmu_peric0 CLK_GOUT_PERIC0_PERIC0_TOP1_IPCLK_0>;
+ clock-names = "uart", "clk_uart_baud0";
+ interrupts = <GIC_SPI 634 IRQ_TYPE_LEVEL_HIGH 0>;
+ pinctrl-0 = <&uart0_bus>;
+ pinctrl-names = "default";
+ samsung,uart-fifosize = <256>;
+ };
diff --git a/Documentation/devicetree/bindings/soc/cirrus/cirrus,ep9301-syscon.yaml b/Documentation/devicetree/bindings/soc/cirrus/cirrus,ep9301-syscon.yaml
new file mode 100644
index 000000000000..7cb1b4114985
--- /dev/null
+++ b/Documentation/devicetree/bindings/soc/cirrus/cirrus,ep9301-syscon.yaml
@@ -0,0 +1,94 @@
+# SPDX-License-Identifier: GPL-2.0-only OR BSD-2-Clause
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/soc/cirrus/cirrus,ep9301-syscon.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Cirrus Logic EP93xx Platforms System Controller
+
+maintainers:
+ - Alexander Sverdlin <alexander.sverdlin@gmail.com>
+ - Nikita Shubin <nikita.shubin@maquefel.me>
+
+description: |
+ Central resources are controlled by a set of software-locked registers,
+ which can be used to prevent accidental accesses. Syscon generates
+ the various bus and peripheral clocks and controls the system startup
+ configuration.
+
+ The System Controller (Syscon) provides:
+ - Clock control
+ - Power management
+ - System configuration management
+
+ Syscon registers are common for all EP93xx SoC's, through some actual peripheral
+ may be missing depending on actual SoC model.
+
+properties:
+ compatible:
+ oneOf:
+ - items:
+ - enum:
+ - cirrus,ep9302-syscon
+ - cirrus,ep9307-syscon
+ - cirrus,ep9312-syscon
+ - cirrus,ep9315-syscon
+ - const: cirrus,ep9301-syscon
+ - const: syscon
+ - items:
+ - const: cirrus,ep9301-syscon
+ - const: syscon
+
+ reg:
+ maxItems: 1
+
+ "#clock-cells":
+ const: 1
+
+ clocks:
+ items:
+ - description: reference clock
+
+patternProperties:
+ '^pins-':
+ type: object
+ description: pin node
+ $ref: /schemas/pinctrl/pinmux-node.yaml
+
+ properties:
+ function:
+ enum: [ spi, ac97, i2s, pwm, keypad, pata, lcd, gpio ]
+
+ groups:
+ enum: [ ssp, ac97, i2s_on_ssp, i2s_on_ac97, pwm1, gpio1agrp,
+ gpio2agrp, gpio3agrp, gpio4agrp, gpio6agrp, gpio7agrp,
+ rasteronsdram0grp, rasteronsdram3grp, keypadgrp, idegrp ]
+
+ required:
+ - function
+ - groups
+
+ unevaluatedProperties: false
+
+required:
+ - compatible
+ - reg
+ - "#clock-cells"
+ - clocks
+
+additionalProperties: false
+
+examples:
+ - |
+ syscon@80930000 {
+ compatible = "cirrus,ep9301-syscon", "syscon";
+ reg = <0x80930000 0x1000>;
+
+ #clock-cells = <1>;
+ clocks = <&xtali>;
+
+ spi_default_pins: pins-spi {
+ function = "spi";
+ groups = "ssp";
+ };
+ };
diff --git a/Documentation/devicetree/bindings/sound/cirrus,ep9301-i2s.yaml b/Documentation/devicetree/bindings/sound/cirrus,ep9301-i2s.yaml
index 453d493c941f..4693e85aed37 100644
--- a/Documentation/devicetree/bindings/sound/cirrus,ep9301-i2s.yaml
+++ b/Documentation/devicetree/bindings/sound/cirrus,ep9301-i2s.yaml
@@ -40,6 +40,20 @@ properties:
- const: sclk
- const: lrclk
+ dmas:
+ items:
+ - description: out DMA channel
+ - description: in DMA channel
+
+ dma-names:
+ items:
+ - const: tx
+ - const: rx
+
+ port:
+ $ref: audio-graph-port.yaml#
+ unevaluatedProperties: false
+
required:
- compatible
- '#sound-dai-cells'
@@ -61,6 +75,8 @@ examples:
<&syscon 30>,
<&syscon 31>;
clock-names = "mclk", "sclk", "lrclk";
+ dmas = <&dma0 0 1>, <&dma0 0 2>;
+ dma-names = "tx", "rx";
};
...
diff --git a/Documentation/devicetree/bindings/spi/cirrus,ep9301-spi.yaml b/Documentation/devicetree/bindings/spi/cirrus,ep9301-spi.yaml
new file mode 100644
index 000000000000..73980a27dc00
--- /dev/null
+++ b/Documentation/devicetree/bindings/spi/cirrus,ep9301-spi.yaml
@@ -0,0 +1,70 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/spi/cirrus,ep9301-spi.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: EP93xx SoC SPI controller
+
+maintainers:
+ - Alexander Sverdlin <alexander.sverdlin@gmail.com>
+ - Nikita Shubin <nikita.shubin@maquefel.me>
+
+allOf:
+ - $ref: spi-controller.yaml#
+
+properties:
+ compatible:
+ oneOf:
+ - const: cirrus,ep9301-spi
+ - items:
+ - enum:
+ - cirrus,ep9302-spi
+ - cirrus,ep9307-spi
+ - cirrus,ep9312-spi
+ - cirrus,ep9315-spi
+ - const: cirrus,ep9301-spi
+
+ reg:
+ items:
+ - description: SPI registers region
+
+ interrupts:
+ maxItems: 1
+
+ clocks:
+ items:
+ - description: SPI Controller reference clock source
+
+ dmas:
+ items:
+ - description: rx DMA channel
+ - description: tx DMA channel
+
+ dma-names:
+ items:
+ - const: rx
+ - const: tx
+
+required:
+ - compatible
+ - reg
+ - interrupts
+ - clocks
+
+unevaluatedProperties: false
+
+examples:
+ - |
+ #include <dt-bindings/gpio/gpio.h>
+ #include <dt-bindings/clock/cirrus,ep9301-syscon.h>
+ spi@808a0000 {
+ compatible = "cirrus,ep9301-spi";
+ reg = <0x808a0000 0x18>;
+ interrupt-parent = <&vic1>;
+ interrupts = <21>;
+ clocks = <&syscon EP93XX_CLK_SPI>;
+ dmas = <&dma1 10 2>, <&dma1 10 1>;
+ dma-names = "rx", "tx";
+ cs-gpios = <&gpio5 2 GPIO_ACTIVE_HIGH>;
+ };
diff --git a/Documentation/devicetree/bindings/usb/fsl,ls1028a.yaml b/Documentation/devicetree/bindings/usb/fsl,ls1028a.yaml
new file mode 100644
index 000000000000..a44bdf391887
--- /dev/null
+++ b/Documentation/devicetree/bindings/usb/fsl,ls1028a.yaml
@@ -0,0 +1,52 @@
+# SPDX-License-Identifier: (GPL-2.0 OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/usb/fsl,ls1028a.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Freescale layerscape SuperSpeed DWC3 USB SoC controller
+
+maintainers:
+ - Frank Li <Frank.Li@nxp.com>
+
+select:
+ properties:
+ compatible:
+ contains:
+ enum:
+ - fsl,ls1028a-dwc3
+ required:
+ - compatible
+
+properties:
+ compatible:
+ items:
+ - enum:
+ - fsl,ls1028a-dwc3
+ - const: snps,dwc3
+
+ reg:
+ maxItems: 1
+
+ interrupts:
+ maxItems: 1
+
+unevaluatedProperties: false
+
+required:
+ - compatible
+ - reg
+ - interrupts
+
+allOf:
+ - $ref: snps,dwc3.yaml#
+
+examples:
+ - |
+ #include <dt-bindings/interrupt-controller/arm-gic.h>
+
+ usb@fe800000 {
+ compatible = "fsl,ls1028a-dwc3", "snps,dwc3";
+ reg = <0xfe800000 0x100000>;
+ interrupts = <GIC_SPI 105 IRQ_TYPE_LEVEL_HIGH>;
+ };
diff --git a/Documentation/devicetree/bindings/usb/msm-hsusb.txt b/Documentation/devicetree/bindings/usb/msm-hsusb.txt
deleted file mode 100644
index afc30e98b123..000000000000
--- a/Documentation/devicetree/bindings/usb/msm-hsusb.txt
+++ /dev/null
@@ -1,110 +0,0 @@
-MSM SoC HSUSB controllers
-
-EHCI
-
-Required properties:
-- compatible: Should contain "qcom,ehci-host"
-- regs: offset and length of the register set in the memory map
-- usb-phy: phandle for the PHY device
-
-Example EHCI controller device node:
-
- ehci: ehci@f9a55000 {
- compatible = "qcom,ehci-host";
- reg = <0xf9a55000 0x400>;
- usb-phy = <&usb_otg>;
- };
-
-USB PHY with optional OTG:
-
-Required properties:
-- compatible: Should contain:
- "qcom,usb-otg-ci" for chipsets with ChipIdea 45nm PHY
- "qcom,usb-otg-snps" for chipsets with Synopsys 28nm PHY
-
-- regs: Offset and length of the register set in the memory map
-- interrupts: interrupt-specifier for the OTG interrupt.
-
-- clocks: A list of phandle + clock-specifier pairs for the
- clocks listed in clock-names
-- clock-names: Should contain the following:
- "phy" USB PHY reference clock
- "core" Protocol engine clock
- "iface" Interface bus clock
- "alt_core" Protocol engine clock for targets with asynchronous
- reset methodology. (optional)
-
-- vdccx-supply: phandle to the regulator for the vdd supply for
- digital circuit operation.
-- v1p8-supply: phandle to the regulator for the 1.8V supply
-- v3p3-supply: phandle to the regulator for the 3.3V supply
-
-- resets: A list of phandle + reset-specifier pairs for the
- resets listed in reset-names
-- reset-names: Should contain the following:
- "phy" USB PHY controller reset
- "link" USB LINK controller reset
-
-- qcom,otg-control: OTG control (VBUS and ID notifications) can be one of
- 1 - PHY control
- 2 - PMIC control
-
-Optional properties:
-- dr_mode: One of "host", "peripheral" or "otg". Defaults to "otg"
-
-- switch-gpio: A phandle + gpio-specifier pair. Some boards are using Dual
- SPDT USB Switch, witch is controlled by GPIO to de/multiplex
- D+/D- USB lines between connectors.
-
-- qcom,phy-init-sequence: PHY configuration sequence values. This is related to Device
- Mode Eye Diagram test. Start address at which these values will be
- written is ULPI_EXT_VENDOR_SPECIFIC. Value of -1 is reserved as
- "do not overwrite default value at this address".
- For example: qcom,phy-init-sequence = < -1 0x63 >;
- Will update only value at address ULPI_EXT_VENDOR_SPECIFIC + 1.
-
-- qcom,phy-num: Select number of pyco-phy to use, can be one of
- 0 - PHY one, default
- 1 - Second PHY
- Some platforms may have configuration to allow USB
- controller work with any of the two HSPHYs present.
-
-- qcom,vdd-levels: This property must be a list of three integer values
- (no, min, max) where each value represents either a voltage
- in microvolts or a value corresponding to voltage corner.
-
-- qcom,manual-pullup: If present, vbus is not routed to USB controller/phy
- and controller driver therefore enables pull-up explicitly
- before starting controller using usbcmd run/stop bit.
-
-- extcon: phandles to external connector devices. First phandle
- should point to external connector, which provide "USB"
- cable events, the second should point to external connector
- device, which provide "USB-HOST" cable events. If one of
- the external connector devices is not required empty <0>
- phandle should be specified.
-
-Example HSUSB OTG controller device node:
-
- usb@f9a55000 {
- compatible = "qcom,usb-otg-snps";
- reg = <0xf9a55000 0x400>;
- interrupts = <0 134 0>;
- dr_mode = "peripheral";
-
- clocks = <&gcc GCC_XO_CLK>, <&gcc GCC_USB_HS_SYSTEM_CLK>,
- <&gcc GCC_USB_HS_AHB_CLK>;
-
- clock-names = "phy", "core", "iface";
-
- vddcx-supply = <&pm8841_s2_corner>;
- v1p8-supply = <&pm8941_l6>;
- v3p3-supply = <&pm8941_l24>;
-
- resets = <&gcc GCC_USB2A_PHY_BCR>, <&gcc GCC_USB_HS_BCR>;
- reset-names = "phy", "link";
-
- qcom,otg-control = <1>;
- qcom,phy-init-sequence = < -1 0x63 >;
- qcom,vdd-levels = <1 5 7>;
- };
diff --git a/Documentation/devicetree/bindings/usb/qcom,dwc3.yaml b/Documentation/devicetree/bindings/usb/qcom,dwc3.yaml
index efde47a5b145..18758efb8d29 100644
--- a/Documentation/devicetree/bindings/usb/qcom,dwc3.yaml
+++ b/Documentation/devicetree/bindings/usb/qcom,dwc3.yaml
@@ -52,6 +52,7 @@ properties:
- qcom,sm8550-dwc3
- qcom,sm8650-dwc3
- qcom,x1e80100-dwc3
+ - qcom,x1e80100-dwc3-mp
- const: qcom,dwc3
reg:
@@ -164,6 +165,7 @@ allOf:
contains:
enum:
- qcom,ipq4019-dwc3
+ - qcom,ipq5332-dwc3
then:
properties:
clocks:
@@ -267,7 +269,6 @@ allOf:
contains:
enum:
- qcom,ipq5018-dwc3
- - qcom,ipq5332-dwc3
- qcom,msm8994-dwc3
- qcom,qcs404-dwc3
then:
@@ -289,6 +290,7 @@ allOf:
- qcom,sc8280xp-dwc3
- qcom,sc8280xp-dwc3-mp
- qcom,x1e80100-dwc3
+ - qcom,x1e80100-dwc3-mp
then:
properties:
clocks:
@@ -428,6 +430,21 @@ allOf:
contains:
enum:
- qcom,ipq5332-dwc3
+ then:
+ properties:
+ interrupts:
+ maxItems: 3
+ interrupt-names:
+ items:
+ - const: pwr_event
+ - const: dp_hs_phy_irq
+ - const: dm_hs_phy_irq
+
+ - if:
+ properties:
+ compatible:
+ contains:
+ enum:
- qcom,x1e80100-dwc3
then:
properties:
@@ -486,6 +503,7 @@ allOf:
contains:
enum:
- qcom,sc8180x-dwc3-mp
+ - qcom,x1e80100-dwc3-mp
then:
properties:
interrupts:
diff --git a/Documentation/devicetree/bindings/usb/ti,j721e-usb.yaml b/Documentation/devicetree/bindings/usb/ti,j721e-usb.yaml
index 95ff9791baea..653a89586f4e 100644
--- a/Documentation/devicetree/bindings/usb/ti,j721e-usb.yaml
+++ b/Documentation/devicetree/bindings/usb/ti,j721e-usb.yaml
@@ -13,10 +13,9 @@ properties:
compatible:
oneOf:
- const: ti,j721e-usb
- - const: ti,am64-usb
- items:
- - const: ti,j721e-usb
- const: ti,am64-usb
+ - const: ti,j721e-usb
reg:
maxItems: 1
diff --git a/Documentation/devicetree/bindings/vendor-prefixes.yaml b/Documentation/devicetree/bindings/vendor-prefixes.yaml
index e5d64fc4fe31..b320a39de7fe 100644
--- a/Documentation/devicetree/bindings/vendor-prefixes.yaml
+++ b/Documentation/devicetree/bindings/vendor-prefixes.yaml
@@ -368,6 +368,8 @@ patternProperties:
description: Devantech, Ltd.
"^dfi,.*":
description: DFI Inc.
+ "^dfrobot,.*":
+ description: DFRobot Corporation
"^dh,.*":
description: DH electronics GmbH
"^difrnce,.*":
@@ -1478,6 +1480,8 @@ patternProperties:
description: Terasic Inc.
"^tesla,.*":
description: Tesla, Inc.
+ "^test,.*":
+ description: Reserved for use by tests. For example, KUnit.
"^tfc,.*":
description: Three Five Corp
"^thead,.*":
@@ -1537,6 +1541,8 @@ patternProperties:
description: Turing Machines, Inc.
"^tyan,.*":
description: Tyan Computer Corporation
+ "^tyhx,.*":
+ description: NanjingTianyihexin Electronics Ltd.
"^u-blox,.*":
description: u-blox
"^u-boot,.*":
diff --git a/Documentation/devicetree/bindings/watchdog/cirrus,ep9301-wdt.yaml b/Documentation/devicetree/bindings/watchdog/cirrus,ep9301-wdt.yaml
new file mode 100644
index 000000000000..5dbe891c70c6
--- /dev/null
+++ b/Documentation/devicetree/bindings/watchdog/cirrus,ep9301-wdt.yaml
@@ -0,0 +1,42 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/watchdog/cirrus,ep9301-wdt.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Cirrus Logic EP93xx Watchdog Timer
+
+maintainers:
+ - Nikita Shubin <nikita.shubin@maquefel.me>
+ - Alexander Sverdlin <alexander.sverdlin@gmail.com>
+
+allOf:
+ - $ref: watchdog.yaml#
+
+properties:
+ compatible:
+ oneOf:
+ - const: cirrus,ep9301-wdt
+ - items:
+ - enum:
+ - cirrus,ep9302-wdt
+ - cirrus,ep9307-wdt
+ - cirrus,ep9312-wdt
+ - cirrus,ep9315-wdt
+ - const: cirrus,ep9301-wdt
+
+ reg:
+ maxItems: 1
+
+required:
+ - compatible
+ - reg
+
+unevaluatedProperties: false
+
+examples:
+ - |
+ watchdog@80940000 {
+ compatible = "cirrus,ep9301-wdt";
+ reg = <0x80940000 0x08>;
+ };
diff --git a/Documentation/devicetree/bindings/watchdog/renesas,wdt.yaml b/Documentation/devicetree/bindings/watchdog/renesas,wdt.yaml
index eba454d1680f..29ada89fdcdc 100644
--- a/Documentation/devicetree/bindings/watchdog/renesas,wdt.yaml
+++ b/Documentation/devicetree/bindings/watchdog/renesas,wdt.yaml
@@ -75,6 +75,8 @@ properties:
- renesas,r8a779h0-wdt # R-Car V4M
- const: renesas,rcar-gen4-wdt # R-Car Gen4
+ - const: renesas,r9a09g057-wdt # RZ/V2H(P)
+
reg:
maxItems: 1
@@ -113,7 +115,6 @@ properties:
required:
- compatible
- reg
- - interrupts
- clocks
allOf:
@@ -137,6 +138,7 @@ allOf:
compatible:
contains:
enum:
+ - renesas,r9a09g057-wdt
- renesas,rzg2l-wdt
- renesas,rzv2m-wdt
then:
@@ -171,6 +173,19 @@ allOf:
interrupts:
maxItems: 1
+ - if:
+ properties:
+ compatible:
+ contains:
+ const: renesas,r9a09g057-wdt
+ then:
+ properties:
+ interrupts: false
+ interrupt-names: false
+ else:
+ required:
+ - interrupts
+
additionalProperties: false
examples:
diff --git a/Documentation/devicetree/bindings/watchdog/st,stm32-iwdg.yaml b/Documentation/devicetree/bindings/watchdog/st,stm32-iwdg.yaml
index 6b13bfc11e11..86bd39d50850 100644
--- a/Documentation/devicetree/bindings/watchdog/st,stm32-iwdg.yaml
+++ b/Documentation/devicetree/bindings/watchdog/st,stm32-iwdg.yaml
@@ -36,6 +36,12 @@ properties:
minItems: 1
maxItems: 2
+ interrupts:
+ maxItems: 1
+ description: Pre-timeout interrupt from the watchdog.
+
+ wakeup-source: true
+
required:
- compatible
- reg
diff --git a/Documentation/dontdiff b/Documentation/dontdiff
index 94b3492dc301..de2cb8de6112 100644
--- a/Documentation/dontdiff
+++ b/Documentation/dontdiff
@@ -180,6 +180,7 @@ modpost
modules-only.symvers
modules.builtin
modules.builtin.modinfo
+modules.builtin.ranges
modules.nsdeps
modules.order
modversions.h*
diff --git a/Documentation/driver-api/cxl/access-coordinates.rst b/Documentation/driver-api/cxl/access-coordinates.rst
new file mode 100644
index 000000000000..b07950ea30c9
--- /dev/null
+++ b/Documentation/driver-api/cxl/access-coordinates.rst
@@ -0,0 +1,91 @@
+.. SPDX-License-Identifier: GPL-2.0
+.. include:: <isonum.txt>
+
+==================================
+CXL Access Coordinates Computation
+==================================
+
+Shared Upstream Link Calculation
+================================
+For certain CXL region construction with endpoints behind CXL switches (SW) or
+Root Ports (RP), there is the possibility of the total bandwidth for all
+the endpoints behind a switch being more than the switch upstream link.
+A similar situation can occur within the host, upstream of the root ports.
+The CXL driver performs an additional pass after all the targets have
+arrived for a region in order to recalculate the bandwidths with possible
+upstream link being a limiting factor in mind.
+
+The algorithm assumes the configuration is a symmetric topology as that
+maximizes performance. When asymmetric topology is detected, the calculation
+is aborted. An asymmetric topology is detected during topology walk where the
+number of RPs detected as a grandparent is not equal to the number of devices
+iterated in the same iteration loop. The assumption is made that subtle
+asymmetry in properties does not happen and all paths to EPs are equal.
+
+There can be multiple switches under an RP. There can be multiple RPs under
+a CXL Host Bridge (HB). There can be multiple HBs under a CXL Fixed Memory
+Window Structure (CFMWS).
+
+An example hierarchy:
+
+> CFMWS 0
+> |
+> _________|_________
+> | |
+> ACPI0017-0 ACPI0017-1
+> GP0/HB0/ACPI0016-0 GP1/HB1/ACPI0016-1
+> | | | |
+> RP0 RP1 RP2 RP3
+> | | | |
+> SW 0 SW 1 SW 2 SW 3
+> | | | | | | | |
+> EP0 EP1 EP2 EP3 EP4 EP5 EP6 EP7
+
+Computation for the example hierarchy:
+
+Min (GP0 to CPU BW,
+ Min(SW 0 Upstream Link to RP0 BW,
+ Min(SW0SSLBIS for SW0DSP0 (EP0), EP0 DSLBIS, EP0 Upstream Link) +
+ Min(SW0SSLBIS for SW0DSP1 (EP1), EP1 DSLBIS, EP1 Upstream link)) +
+ Min(SW 1 Upstream Link to RP1 BW,
+ Min(SW1SSLBIS for SW1DSP0 (EP2), EP2 DSLBIS, EP2 Upstream Link) +
+ Min(SW1SSLBIS for SW1DSP1 (EP3), EP3 DSLBIS, EP3 Upstream link))) +
+Min (GP1 to CPU BW,
+ Min(SW 2 Upstream Link to RP2 BW,
+ Min(SW2SSLBIS for SW2DSP0 (EP4), EP4 DSLBIS, EP4 Upstream Link) +
+ Min(SW2SSLBIS for SW2DSP1 (EP5), EP5 DSLBIS, EP5 Upstream link)) +
+ Min(SW 3 Upstream Link to RP3 BW,
+ Min(SW3SSLBIS for SW3DSP0 (EP6), EP6 DSLBIS, EP6 Upstream Link) +
+ Min(SW3SSLBIS for SW3DSP1 (EP7), EP7 DSLBIS, EP7 Upstream link))))
+
+The calculation starts at cxl_region_shared_upstream_perf_update(). A xarray
+is created to collect all the endpoint bandwidths via the
+cxl_endpoint_gather_bandwidth() function. The min() of bandwidth from the
+endpoint CDAT and the upstream link bandwidth is calculated. If the endpoint
+has a CXL switch as a parent, then min() of calculated bandwidth and the
+bandwidth from the SSLBIS for the switch downstream port that is associated
+with the endpoint is calculated. The final bandwidth is stored in a
+'struct cxl_perf_ctx' in the xarray indexed by a device pointer. If the
+endpoint is direct attached to a root port (RP), the device pointer would be an
+RP device. If the endpoint is behind a switch, the device pointer would be the
+upstream device of the parent switch.
+
+At the next stage, the code walks through one or more switches if they exist
+in the topology. For endpoints directly attached to RPs, this step is skipped.
+If there is another switch upstream, the code takes the min() of the current
+gathered bandwidth and the upstream link bandwidth. If there's a switch
+upstream, then the SSLBIS of the upstream switch.
+
+Once the topology walk reaches the RP, whether it's direct attached endpoints
+or walking through the switch(es), cxl_rp_gather_bandwidth() is called. At
+this point all the bandwidths are aggregated per each host bridge, which is
+also the index for the resulting xarray.
+
+The next step is to take the min() of the per host bridge bandwidth and the
+bandwidth from the Generic Port (GP). The bandwidths for the GP is retrieved
+via ACPI tables SRAT/HMAT. The min bandwidth are aggregated under the same
+ACPI0017 device to form a new xarray.
+
+Finally, the cxl_region_update_bandwidth() is called and the aggregated
+bandwidth from all the members of the last xarray is updated for the
+access coordinates residing in the cxl region (cxlr) context.
diff --git a/Documentation/driver-api/cxl/index.rst b/Documentation/driver-api/cxl/index.rst
index 12b82725d322..965ba90e8fb7 100644
--- a/Documentation/driver-api/cxl/index.rst
+++ b/Documentation/driver-api/cxl/index.rst
@@ -8,6 +8,7 @@ Compute Express Link
:maxdepth: 1
memory-devices
+ access-coordinates
maturity-map
diff --git a/Documentation/driver-api/firewire.rst b/Documentation/driver-api/firewire.rst
index d3cfa73cbb2b..28a32410f7d2 100644
--- a/Documentation/driver-api/firewire.rst
+++ b/Documentation/driver-api/firewire.rst
@@ -43,6 +43,8 @@ Firewire core transaction interfaces
Firewire Isochronous I/O interfaces
===================================
+.. kernel-doc:: include/linux/firewire.h
+ :functions: fw_iso_context_schedule_flush_completions
.. kernel-doc:: drivers/firewire/core-iso.c
:export:
diff --git a/Documentation/driver-api/media/mc-core.rst b/Documentation/driver-api/media/mc-core.rst
index 2456950ce8ff..1d010bd7ec49 100644
--- a/Documentation/driver-api/media/mc-core.rst
+++ b/Documentation/driver-api/media/mc-core.rst
@@ -144,7 +144,8 @@ valid values are described at :c:func:`media_create_pad_link()` and
Graph traversal
^^^^^^^^^^^^^^^
-The media framework provides APIs to iterate over entities in a graph.
+The media framework provides APIs to traverse media graphs, locating connected
+entities and links.
To iterate over all entities belonging to a media device, drivers can use
the media_device_for_each_entity macro, defined in
@@ -159,31 +160,6 @@ the media_device_for_each_entity macro, defined in
...
}
-Drivers might also need to iterate over all entities in a graph that can be
-reached only through enabled links starting at a given entity. The media
-framework provides a depth-first graph traversal API for that purpose.
-
-.. note::
-
- Graphs with cycles (whether directed or undirected) are **NOT**
- supported by the graph traversal API. To prevent infinite loops, the graph
- traversal code limits the maximum depth to ``MEDIA_ENTITY_ENUM_MAX_DEPTH``,
- currently defined as 16.
-
-Drivers initiate a graph traversal by calling
-:c:func:`media_graph_walk_start()`
-
-The graph structure, provided by the caller, is initialized to start graph
-traversal at the given entity.
-
-Drivers can then retrieve the next entity by calling
-:c:func:`media_graph_walk_next()`
-
-When the graph traversal is complete the function will return ``NULL``.
-
-Graph traversal can be interrupted at any moment. No cleanup function call
-is required and the graph structure can be freed normally.
-
Helper functions can be used to find a link between two given pads, or a pad
connected to another pad through an enabled link
(:c:func:`media_entity_find_link()`, :c:func:`media_pad_remote_pad_first()`,
@@ -276,6 +252,45 @@ Subsystems should facilitate link validation by providing subsystem specific
helper functions to provide easy access for commonly needed information, and
in the end provide a way to use driver-specific callbacks.
+Pipeline traversal
+^^^^^^^^^^^^^^^^^^
+
+Once a pipeline has been constructed with :c:func:`media_pipeline_start()`,
+drivers can iterate over entities or pads in the pipeline with the
+:c:macro:´media_pipeline_for_each_entity` and
+:c:macro:´media_pipeline_for_each_pad` macros. Iterating over pads is
+straightforward:
+
+.. code-block:: c
+
+ media_pipeline_pad_iter iter;
+ struct media_pad *pad;
+
+ media_pipeline_for_each_pad(pipe, &iter, pad) {
+ /* 'pad' will point to each pad in turn */
+ ...
+ }
+
+To iterate over entities, the iterator needs to be initialized and cleaned up
+as an additional steps:
+
+.. code-block:: c
+
+ media_pipeline_entity_iter iter;
+ struct media_entity *entity;
+ int ret;
+
+ ret = media_pipeline_entity_iter_init(pipe, &iter);
+ if (ret)
+ ...;
+
+ media_pipeline_for_each_entity(pipe, &iter, entity) {
+ /* 'entity' will point to each entity in turn */
+ ...
+ }
+
+ media_pipeline_entity_iter_cleanup(&iter);
+
Media Controller Device Allocator API
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
diff --git a/Documentation/features/vm/PG_uncached/arch-support.txt b/Documentation/features/vm/PG_uncached/arch-support.txt
deleted file mode 100644
index 5a7508b8c967..000000000000
--- a/Documentation/features/vm/PG_uncached/arch-support.txt
+++ /dev/null
@@ -1,30 +0,0 @@
-#
-# Feature name: PG_uncached
-# Kconfig: ARCH_USES_PG_UNCACHED
-# description: arch supports the PG_uncached page flag
-#
- -----------------------
- | arch |status|
- -----------------------
- | alpha: | TODO |
- | arc: | TODO |
- | arm: | TODO |
- | arm64: | TODO |
- | csky: | TODO |
- | hexagon: | TODO |
- | loongarch: | TODO |
- | m68k: | TODO |
- | microblaze: | TODO |
- | mips: | TODO |
- | nios2: | TODO |
- | openrisc: | TODO |
- | parisc: | TODO |
- | powerpc: | TODO |
- | riscv: | TODO |
- | s390: | TODO |
- | sh: | TODO |
- | sparc: | TODO |
- | um: | TODO |
- | x86: | ok |
- | xtensa: | TODO |
- -----------------------
diff --git a/Documentation/filesystems/9p.rst b/Documentation/filesystems/9p.rst
index d270a0aa8d55..2bbf68b56b0d 100644
--- a/Documentation/filesystems/9p.rst
+++ b/Documentation/filesystems/9p.rst
@@ -48,11 +48,66 @@ For server running on QEMU host with virtio transport::
mount -t 9p -o trans=virtio <mount_tag> /mnt/9
-where mount_tag is the tag associated by the server to each of the exported
+where mount_tag is the tag generated by the server to each of the exported
mount points. Each 9P export is seen by the client as a virtio device with an
associated "mount_tag" property. Available mount tags can be
seen by reading /sys/bus/virtio/drivers/9pnet_virtio/virtio<n>/mount_tag files.
+USBG Usage
+==========
+
+To mount a 9p FS on a USB Host accessible via the gadget at runtime::
+
+ mount -t 9p -o trans=usbg,aname=/path/to/fs <device> /mnt/9
+
+To mount a 9p FS on a USB Host accessible via the gadget as root filesystem::
+
+ root=<device> rootfstype=9p rootflags=trans=usbg,cache=loose,uname=root,access=0,dfltuid=0,dfltgid=0,aname=/path/to/rootfs
+
+where <device> is the tag associated by the usb gadget transport.
+It is defined by the configfs instance name.
+
+USBG Example
+============
+
+The USB host exports a filesystem, while the gadget on the USB device
+side makes it mountable.
+
+Diod (9pfs server) and the forwarder are on the development host, where
+the root filesystem is actually stored. The gadget is initialized during
+boot (or later) on the embedded board. Then the forwarder will find it
+on the USB bus and start forwarding requests.
+
+In this case the 9p requests come from the device and are handled by the
+host. The reason is that USB device ports are normally not available on
+PCs, so a connection in the other direction would not work.
+
+When using the usbg transport, for now there is no native usb host
+service capable to handle the requests from the gadget driver. For
+this we have to use the extra python tool p9_fwd.py from tools/usb.
+
+Just start the 9pfs capable network server like diod/nfs-ganesha e.g.::
+
+ $ diod -f -n -d 0 -S -l 0.0.0.0:9999 -e $PWD
+
+Optionaly scan your bus if there are more then one usbg gadgets to find their path::
+
+ $ python $kernel_dir/tools/usb/p9_fwd.py list
+
+ Bus | Addr | Manufacturer | Product | ID | Path
+ --- | ---- | ---------------- | ---------------- | --------- | ----
+ 2 | 67 | unknown | unknown | 1d6b:0109 | 2-1.1.2
+ 2 | 68 | unknown | unknown | 1d6b:0109 | 2-1.1.3
+
+Then start the python transport::
+
+ $ python $kernel_dir/tools/usb/p9_fwd.py --path 2-1.1.2 connect -p 9999
+
+After that the gadget driver can be used as described above.
+
+One use-case is to use it as an alternative to NFS root booting during
+the development of embedded Linux devices.
+
Options
=======
@@ -68,6 +123,7 @@ Options
virtio connect to the next virtio channel available
(from QEMU with trans_virtio module)
rdma connect to a specified RDMA channel
+ usbg connect to a specified usb gadget channel
======== ============================================
uname=name user name to attempt mount as on the remote server. The
diff --git a/Documentation/filesystems/bcachefs/CodingStyle.rst b/Documentation/filesystems/bcachefs/CodingStyle.rst
index 0c45829a4899..01de555e21d8 100644
--- a/Documentation/filesystems/bcachefs/CodingStyle.rst
+++ b/Documentation/filesystems/bcachefs/CodingStyle.rst
@@ -175,7 +175,7 @@ errors in our thinking by running our code and seeing what happens. If your
time is being wasted because your tools are bad or too slow - don't accept it,
fix it.
-Put effort into your documentation, commmit messages, and code comments - but
+Put effort into your documentation, commit messages, and code comments - but
don't go overboard. A good commit message is wonderful - but if the information
was important enough to go in a commit message, ask yourself if it would be
even better as a code comment.
diff --git a/Documentation/filesystems/iomap/design.rst b/Documentation/filesystems/iomap/design.rst
index 37594e1c5914..b0d0188a095e 100644
--- a/Documentation/filesystems/iomap/design.rst
+++ b/Documentation/filesystems/iomap/design.rst
@@ -165,7 +165,7 @@ structure below:
u16 flags;
struct block_device *bdev;
struct dax_device *dax_dev;
- voidw *inline_data;
+ void *inline_data;
void *private;
const struct iomap_folio_ops *folio_ops;
u64 validity_cookie;
@@ -426,7 +426,7 @@ iomap is concerned:
The exact locking requirements are specific to the filesystem; for
certain operations, some of these locks can be elided.
-All further mention of locking are *recommendations*, not mandates.
+All further mentions of locking are *recommendations*, not mandates.
Each filesystem author must figure out the locking for themself.
Bugs and Limitations
diff --git a/Documentation/filesystems/nfs/index.rst b/Documentation/filesystems/nfs/index.rst
index 8536134f31fd..95c2c009874c 100644
--- a/Documentation/filesystems/nfs/index.rst
+++ b/Documentation/filesystems/nfs/index.rst
@@ -8,6 +8,7 @@ NFS
client-identifier
exporting
+ localio
pnfs
rpc-cache
rpc-server-gss
diff --git a/Documentation/filesystems/nfs/localio.rst b/Documentation/filesystems/nfs/localio.rst
new file mode 100644
index 000000000000..bd1967e2eab3
--- /dev/null
+++ b/Documentation/filesystems/nfs/localio.rst
@@ -0,0 +1,357 @@
+===========
+NFS LOCALIO
+===========
+
+Overview
+========
+
+The LOCALIO auxiliary RPC protocol allows the Linux NFS client and
+server to reliably handshake to determine if they are on the same
+host. Select "NFS client and server support for LOCALIO auxiliary
+protocol" in menuconfig to enable CONFIG_NFS_LOCALIO in the kernel
+config (both CONFIG_NFS_FS and CONFIG_NFSD must also be enabled).
+
+Once an NFS client and server handshake as "local", the client will
+bypass the network RPC protocol for read, write and commit operations.
+Due to this XDR and RPC bypass, these operations will operate faster.
+
+The LOCALIO auxiliary protocol's implementation, which uses the same
+connection as NFS traffic, follows the pattern established by the NFS
+ACL protocol extension.
+
+The LOCALIO auxiliary protocol is needed to allow robust discovery of
+clients local to their servers. In a private implementation that
+preceded use of this LOCALIO protocol, a fragile sockaddr network
+address based match against all local network interfaces was attempted.
+But unlike the LOCALIO protocol, the sockaddr-based matching didn't
+handle use of iptables or containers.
+
+The robust handshake between local client and server is just the
+beginning, the ultimate use case this locality makes possible is the
+client is able to open files and issue reads, writes and commits
+directly to the server without having to go over the network. The
+requirement is to perform these loopback NFS operations as efficiently
+as possible, this is particularly useful for container use cases
+(e.g. kubernetes) where it is possible to run an IO job local to the
+server.
+
+The performance advantage realized from LOCALIO's ability to bypass
+using XDR and RPC for reads, writes and commits can be extreme, e.g.:
+
+fio for 20 secs with directio, qd of 8, 16 libaio threads:
+ - With LOCALIO:
+ 4K read: IOPS=979k, BW=3825MiB/s (4011MB/s)(74.7GiB/20002msec)
+ 4K write: IOPS=165k, BW=646MiB/s (678MB/s)(12.6GiB/20002msec)
+ 128K read: IOPS=402k, BW=49.1GiB/s (52.7GB/s)(982GiB/20002msec)
+ 128K write: IOPS=11.5k, BW=1433MiB/s (1503MB/s)(28.0GiB/20004msec)
+
+ - Without LOCALIO:
+ 4K read: IOPS=79.2k, BW=309MiB/s (324MB/s)(6188MiB/20003msec)
+ 4K write: IOPS=59.8k, BW=234MiB/s (245MB/s)(4671MiB/20002msec)
+ 128K read: IOPS=33.9k, BW=4234MiB/s (4440MB/s)(82.7GiB/20004msec)
+ 128K write: IOPS=11.5k, BW=1434MiB/s (1504MB/s)(28.0GiB/20011msec)
+
+fio for 20 secs with directio, qd of 8, 1 libaio thread:
+ - With LOCALIO:
+ 4K read: IOPS=230k, BW=898MiB/s (941MB/s)(17.5GiB/20001msec)
+ 4K write: IOPS=22.6k, BW=88.3MiB/s (92.6MB/s)(1766MiB/20001msec)
+ 128K read: IOPS=38.8k, BW=4855MiB/s (5091MB/s)(94.8GiB/20001msec)
+ 128K write: IOPS=11.4k, BW=1428MiB/s (1497MB/s)(27.9GiB/20001msec)
+
+ - Without LOCALIO:
+ 4K read: IOPS=77.1k, BW=301MiB/s (316MB/s)(6022MiB/20001msec)
+ 4K write: IOPS=32.8k, BW=128MiB/s (135MB/s)(2566MiB/20001msec)
+ 128K read: IOPS=24.4k, BW=3050MiB/s (3198MB/s)(59.6GiB/20001msec)
+ 128K write: IOPS=11.4k, BW=1430MiB/s (1500MB/s)(27.9GiB/20001msec)
+
+FAQ
+===
+
+1. What are the use cases for LOCALIO?
+
+ a. Workloads where the NFS client and server are on the same host
+ realize improved IO performance. In particular, it is common when
+ running containerised workloads for jobs to find themselves
+ running on the same host as the knfsd server being used for
+ storage.
+
+2. What are the requirements for LOCALIO?
+
+ a. Bypass use of the network RPC protocol as much as possible. This
+ includes bypassing XDR and RPC for open, read, write and commit
+ operations.
+ b. Allow client and server to autonomously discover if they are
+ running local to each other without making any assumptions about
+ the local network topology.
+ c. Support the use of containers by being compatible with relevant
+ namespaces (e.g. network, user, mount).
+ d. Support all versions of NFS. NFSv3 is of particular importance
+ because it has wide enterprise usage and pNFS flexfiles makes use
+ of it for the data path.
+
+3. Why doesn’t LOCALIO just compare IP addresses or hostnames when
+ deciding if the NFS client and server are co-located on the same
+ host?
+
+ Since one of the main use cases is containerised workloads, we cannot
+ assume that IP addresses will be shared between the client and
+ server. This sets up a requirement for a handshake protocol that
+ needs to go over the same connection as the NFS traffic in order to
+ identify that the client and the server really are running on the
+ same host. The handshake uses a secret that is sent over the wire,
+ and can be verified by both parties by comparing with a value stored
+ in shared kernel memory if they are truly co-located.
+
+4. Does LOCALIO improve pNFS flexfiles?
+
+ Yes, LOCALIO complements pNFS flexfiles by allowing it to take
+ advantage of NFS client and server locality. Policy that initiates
+ client IO as closely to the server where the data is stored naturally
+ benefits from the data path optimization LOCALIO provides.
+
+5. Why not develop a new pNFS layout to enable LOCALIO?
+
+ A new pNFS layout could be developed, but doing so would put the
+ onus on the server to somehow discover that the client is co-located
+ when deciding to hand out the layout.
+ There is value in a simpler approach (as provided by LOCALIO) that
+ allows the NFS client to negotiate and leverage locality without
+ requiring more elaborate modeling and discovery of such locality in a
+ more centralized manner.
+
+6. Why is having the client perform a server-side file OPEN, without
+ using RPC, beneficial? Is the benefit pNFS specific?
+
+ Avoiding the use of XDR and RPC for file opens is beneficial to
+ performance regardless of whether pNFS is used. Especially when
+ dealing with small files its best to avoid going over the wire
+ whenever possible, otherwise it could reduce or even negate the
+ benefits of avoiding the wire for doing the small file I/O itself.
+ Given LOCALIO's requirements the current approach of having the
+ client perform a server-side file open, without using RPC, is ideal.
+ If in the future requirements change then we can adapt accordingly.
+
+7. Why is LOCALIO only supported with UNIX Authentication (AUTH_UNIX)?
+
+ Strong authentication is usually tied to the connection itself. It
+ works by establishing a context that is cached by the server, and
+ that acts as the key for discovering the authorisation token, which
+ can then be passed to rpc.mountd to complete the authentication
+ process. On the other hand, in the case of AUTH_UNIX, the credential
+ that was passed over the wire is used directly as the key in the
+ upcall to rpc.mountd. This simplifies the authentication process, and
+ so makes AUTH_UNIX easier to support.
+
+8. How do export options that translate RPC user IDs behave for LOCALIO
+ operations (eg. root_squash, all_squash)?
+
+ Export options that translate user IDs are managed by nfsd_setuser()
+ which is called by nfsd_setuser_and_check_port() which is called by
+ __fh_verify(). So they get handled exactly the same way for LOCALIO
+ as they do for non-LOCALIO.
+
+9. How does LOCALIO make certain that object lifetimes are managed
+ properly given NFSD and NFS operate in different contexts?
+
+ See the detailed "NFS Client and Server Interlock" section below.
+
+RPC
+===
+
+The LOCALIO auxiliary RPC protocol consists of a single "UUID_IS_LOCAL"
+RPC method that allows the Linux NFS client to verify the local Linux
+NFS server can see the nonce (single-use UUID) the client generated and
+made available in nfs_common. This protocol isn't part of an IETF
+standard, nor does it need to be considering it is Linux-to-Linux
+auxiliary RPC protocol that amounts to an implementation detail.
+
+The UUID_IS_LOCAL method encodes the client generated uuid_t in terms of
+the fixed UUID_SIZE (16 bytes). The fixed size opaque encode and decode
+XDR methods are used instead of the less efficient variable sized
+methods.
+
+The RPC program number for the NFS_LOCALIO_PROGRAM is 400122 (as assigned
+by IANA, see https://www.iana.org/assignments/rpc-program-numbers/ ):
+Linux Kernel Organization 400122 nfslocalio
+
+The LOCALIO protocol spec in rpcgen syntax is::
+
+ /* raw RFC 9562 UUID */
+ #define UUID_SIZE 16
+ typedef u8 uuid_t<UUID_SIZE>;
+
+ program NFS_LOCALIO_PROGRAM {
+ version LOCALIO_V1 {
+ void
+ NULL(void) = 0;
+
+ void
+ UUID_IS_LOCAL(uuid_t) = 1;
+ } = 1;
+ } = 400122;
+
+LOCALIO uses the same transport connection as NFS traffic. As such,
+LOCALIO is not registered with rpcbind.
+
+NFS Common and Client/Server Handshake
+======================================
+
+fs/nfs_common/nfslocalio.c provides interfaces that enable an NFS client
+to generate a nonce (single-use UUID) and associated short-lived
+nfs_uuid_t struct, register it with nfs_common for subsequent lookup and
+verification by the NFS server and if matched the NFS server populates
+members in the nfs_uuid_t struct. The NFS client then uses nfs_common to
+transfer the nfs_uuid_t from its nfs_uuids to the nn->nfsd_serv
+clients_list from the nfs_common's uuids_list. See:
+fs/nfs/localio.c:nfs_local_probe()
+
+nfs_common's nfs_uuids list is the basis for LOCALIO enablement, as such
+it has members that point to nfsd memory for direct use by the client
+(e.g. 'net' is the server's network namespace, through it the client can
+access nn->nfsd_serv with proper rcu read access). It is this client
+and server synchronization that enables advanced usage and lifetime of
+objects to span from the host kernel's nfsd to per-container knfsd
+instances that are connected to nfs client's running on the same local
+host.
+
+NFS Client and Server Interlock
+===============================
+
+LOCALIO provides the nfs_uuid_t object and associated interfaces to
+allow proper network namespace (net-ns) and NFSD object refcounting:
+
+ We don't want to keep a long-term counted reference on each NFSD's
+ net-ns in the client because that prevents a server container from
+ completely shutting down.
+
+ So we avoid taking a reference at all and rely on the per-cpu
+ reference to the server (detailed below) being sufficient to keep
+ the net-ns active. This involves allowing the NFSD's net-ns exit
+ code to iterate all active clients and clear their ->net pointers
+ (which are needed to find the per-cpu-refcount for the nfsd_serv).
+
+ Details:
+
+ - Embed nfs_uuid_t in nfs_client. nfs_uuid_t provides a list_head
+ that can be used to find the client. It does add the 16-byte
+ uuid_t to nfs_client so it is bigger than needed (given that
+ uuid_t is only used during the initial NFS client and server
+ LOCALIO handshake to determine if they are local to each other).
+ If that is really a problem we can find a fix.
+
+ - When the nfs server confirms that the uuid_t is local, it moves
+ the nfs_uuid_t onto a per-net-ns list in NFSD's nfsd_net.
+
+ - When each server's net-ns is shutting down - in a "pre_exit"
+ handler, all these nfs_uuid_t have their ->net cleared. There is
+ an rcu_synchronize() call between pre_exit() handlers and exit()
+ handlers so any caller that sees nfs_uuid_t ->net as not NULL can
+ safely manage the per-cpu-refcount for nfsd_serv.
+
+ - The client's nfs_uuid_t is passed to nfsd_open_local_fh() so it
+ can safely dereference ->net in a private rcu_read_lock() section
+ to allow safe access to the associated nfsd_net and nfsd_serv.
+
+So LOCALIO required the introduction and use of NFSD's percpu_ref to
+interlock nfsd_destroy_serv() and nfsd_open_local_fh(), to ensure each
+nn->nfsd_serv is not destroyed while in use by nfsd_open_local_fh(), and
+warrants a more detailed explanation:
+
+ nfsd_open_local_fh() uses nfsd_serv_try_get() before opening its
+ nfsd_file handle and then the caller (NFS client) must drop the
+ reference for the nfsd_file and associated nn->nfsd_serv using
+ nfs_file_put_local() once it has completed its IO.
+
+ This interlock working relies heavily on nfsd_open_local_fh() being
+ afforded the ability to safely deal with the possibility that the
+ NFSD's net-ns (and nfsd_net by association) may have been destroyed
+ by nfsd_destroy_serv() via nfsd_shutdown_net() -- which is only
+ possible given the nfs_uuid_t ->net pointer managemenet detailed
+ above.
+
+All told, this elaborate interlock of the NFS client and server has been
+verified to fix an easy to hit crash that would occur if an NFSD
+instance running in a container, with a LOCALIO client mounted, is
+shutdown. Upon restart of the container and associated NFSD the client
+would go on to crash due to NULL pointer dereference that occurred due
+to the LOCALIO client's attempting to nfsd_open_local_fh(), using
+nn->nfsd_serv, without having a proper reference on nn->nfsd_serv.
+
+NFS Client issues IO instead of Server
+======================================
+
+Because LOCALIO is focused on protocol bypass to achieve improved IO
+performance, alternatives to the traditional NFS wire protocol (SUNRPC
+with XDR) must be provided to access the backing filesystem.
+
+See fs/nfs/localio.c:nfs_local_open_fh() and
+fs/nfsd/localio.c:nfsd_open_local_fh() for the interface that makes
+focused use of select nfs server objects to allow a client local to a
+server to open a file pointer without needing to go over the network.
+
+The client's fs/nfs/localio.c:nfs_local_open_fh() will call into the
+server's fs/nfsd/localio.c:nfsd_open_local_fh() and carefully access
+both the associated nfsd network namespace and nn->nfsd_serv in terms of
+RCU. If nfsd_open_local_fh() finds that the client no longer sees valid
+nfsd objects (be it struct net or nn->nfsd_serv) it returns -ENXIO
+to nfs_local_open_fh() and the client will try to reestablish the
+LOCALIO resources needed by calling nfs_local_probe() again. This
+recovery is needed if/when an nfsd instance running in a container were
+to reboot while a LOCALIO client is connected to it.
+
+Once the client has an open nfsd_file pointer it will issue reads,
+writes and commits directly to the underlying local filesystem (normally
+done by the nfs server). As such, for these operations, the NFS client
+is issuing IO to the underlying local filesystem that it is sharing with
+the NFS server. See: fs/nfs/localio.c:nfs_local_doio() and
+fs/nfs/localio.c:nfs_local_commit().
+
+Security
+========
+
+Localio is only supported when UNIX-style authentication (AUTH_UNIX, aka
+AUTH_SYS) is used.
+
+Care is taken to ensure the same NFS security mechanisms are used
+(authentication, etc) regardless of whether LOCALIO or regular NFS
+access is used. The auth_domain established as part of the traditional
+NFS client access to the NFS server is also used for LOCALIO.
+
+Relative to containers, LOCALIO gives the client access to the network
+namespace the server has. This is required to allow the client to access
+the server's per-namespace nfsd_net struct. With traditional NFS, the
+client is afforded this same level of access (albeit in terms of the NFS
+protocol via SUNRPC). No other namespaces (user, mount, etc) have been
+altered or purposely extended from the server to the client.
+
+Testing
+=======
+
+The LOCALIO auxiliary protocol and associated NFS LOCALIO read, write
+and commit access have proven stable against various test scenarios:
+
+- Client and server both on the same host.
+
+- All permutations of client and server support enablement for both
+ local and remote client and server.
+
+- Testing against NFS storage products that don't support the LOCALIO
+ protocol was also performed.
+
+- Client on host, server within a container (for both v3 and v4.2).
+ The container testing was in terms of podman managed containers and
+ includes successful container stop/restart scenario.
+
+- Formalizing these test scenarios in terms of existing test
+ infrastructure is on-going. Initial regular coverage is provided in
+ terms of ktest running xfstests against a LOCALIO-enabled NFS loopback
+ mount configuration, and includes lockdep and KASAN coverage, see:
+ https://evilpiepirate.org/~testdashboard/ci?user=snitzer&branch=snitm-nfs-next
+ https://github.com/koverstreet/ktest
+
+- Various kdevops testing (in terms of "Chuck's BuildBot") has been
+ performed to regularly verify the LOCALIO changes haven't caused any
+ regressions to non-LOCALIO NFS use cases.
+
+- All of Hammerspace's various sanity tests pass with LOCALIO enabled
+ (this includes numerous pNFS and flexfiles tests).
diff --git a/Documentation/filesystems/vfs.rst b/Documentation/filesystems/vfs.rst
index 4f67b5ea0568..0b18af3f954e 100644
--- a/Documentation/filesystems/vfs.rst
+++ b/Documentation/filesystems/vfs.rst
@@ -913,8 +913,7 @@ cache in your filesystem. The following members are defined:
stop attempting I/O, it can simply return. The caller will
remove the remaining pages from the address space, unlock them
and decrement the page refcount. Set PageUptodate if the I/O
- completes successfully. Setting PageError on any page will be
- ignored; simply unlock the page if an I/O error occurs.
+ completes successfully.
``write_begin``
Called by the generic buffered write code to ask the filesystem
diff --git a/Documentation/gpu/amdgpu/driver-core.rst b/Documentation/gpu/amdgpu/driver-core.rst
index 467e6843aef6..32723a925377 100644
--- a/Documentation/gpu/amdgpu/driver-core.rst
+++ b/Documentation/gpu/amdgpu/driver-core.rst
@@ -179,4 +179,4 @@ IP Blocks
:doc: IP Blocks
.. kernel-doc:: drivers/gpu/drm/amd/include/amd_shared.h
- :identifiers: amd_ip_block_type amd_ip_funcs
+ :identifiers: amd_ip_block_type amd_ip_funcs DC_DEBUG_MASK
diff --git a/Documentation/gpu/introduction.rst b/Documentation/gpu/introduction.rst
index b7c0baf97dbe..3cd0c8860b94 100644
--- a/Documentation/gpu/introduction.rst
+++ b/Documentation/gpu/introduction.rst
@@ -154,11 +154,11 @@ Conference talks
* `An Overview of the Linux and Userspace Graphics Stack <https://www.youtube.com/watch?v=wjAJmqwg47k>`_ - Paul Kocialkowski (2020)
* `Getting pixels on screen on Linux: introduction to Kernel Mode Setting <https://www.youtube.com/watch?v=haes4_Xnc5Q>`_ - Simon Ser (2020)
-* `Everything Great about Upstream Graphics <https://www.youtube.com/watch?v=kVzHOgt6WGE>`_ - Daniel Vetter (2019)
+* `Everything Great about Upstream Graphics <https://www.youtube.com/watch?v=kVzHOgt6WGE>`_ - Simona Vetter (2019)
* `An introduction to the Linux DRM subsystem <https://www.youtube.com/watch?v=LbDOCJcDRoo>`_ - Maxime Ripard (2017)
-* `Embrace the Atomic (Display) Age <https://www.youtube.com/watch?v=LjiB_JeDn2M>`_ - Daniel Vetter (2016)
+* `Embrace the Atomic (Display) Age <https://www.youtube.com/watch?v=LjiB_JeDn2M>`_ - Simona Vetter (2016)
* `Anatomy of an Atomic KMS Driver <https://www.youtube.com/watch?v=lihqR9sENpc>`_ - Laurent Pinchart (2015)
-* `Atomic Modesetting for Drivers <https://www.youtube.com/watch?v=kl9suFgbTc8>`_ - Daniel Vetter (2015)
+* `Atomic Modesetting for Drivers <https://www.youtube.com/watch?v=kl9suFgbTc8>`_ - Simona Vetter (2015)
* `Anatomy of an Embedded KMS Driver <https://www.youtube.com/watch?v=Ja8fM7rTae4>`_ - Laurent Pinchart (2013)
Slides and articles
@@ -169,8 +169,8 @@ Slides and articles
* `Understanding the Linux Graphics Stack <https://bootlin.com/doc/training/graphics/graphics-slides.pdf>`_ - Bootlin (2022)
* `DRM KMS overview <https://wiki.st.com/stm32mpu/wiki/DRM_KMS_overview>`_ - STMicroelectronics (2021)
* `Linux graphic stack <https://studiopixl.com/2017-05-13/linux-graphic-stack-an-overview>`_ - Nathan Gauër (2017)
-* `Atomic mode setting design overview, part 1 <https://lwn.net/Articles/653071/>`_ - Daniel Vetter (2015)
-* `Atomic mode setting design overview, part 2 <https://lwn.net/Articles/653466/>`_ - Daniel Vetter (2015)
+* `Atomic mode setting design overview, part 1 <https://lwn.net/Articles/653071/>`_ - Simona Vetter (2015)
+* `Atomic mode setting design overview, part 2 <https://lwn.net/Articles/653466/>`_ - Simona Vetter (2015)
* `The DRM/KMS subsystem from a newbie’s point of view <https://bootlin.com/pub/conferences/2014/elce/brezillon-drm-kms/brezillon-drm-kms.pdf>`_ - Boris Brezillon (2014)
* `A brief introduction to the Linux graphics stack <https://blogs.igalia.com/itoral/2014/07/29/a-brief-introduction-to-the-linux-graphics-stack/>`_ - Iago Toral (2014)
* `The Linux Graphics Stack <https://blog.mecheye.net/2012/06/the-linux-graphics-stack/>`_ - Jasper St. Pierre (2012)
diff --git a/Documentation/gpu/todo.rst b/Documentation/gpu/todo.rst
index 2ea6ffc9b22b..2b281e3c75a4 100644
--- a/Documentation/gpu/todo.rst
+++ b/Documentation/gpu/todo.rst
@@ -37,7 +37,7 @@ Audit each individual driver, make sure it'll work with the generic
implementation (there's lots of outdated locking leftovers in various
implementations), and then remove it.
-Contact: Daniel Vetter, respective driver maintainers
+Contact: Simona Vetter, respective driver maintainers
Level: Intermediate
@@ -61,7 +61,7 @@ do by directly using the new atomic helper driver callbacks.
.. [2] https://lwn.net/Articles/653071/
.. [3] https://lwn.net/Articles/653466/
-Contact: Daniel Vetter, respective driver maintainers
+Contact: Simona Vetter, respective driver maintainers
Level: Advanced
@@ -75,7 +75,7 @@ helper should also be moved from drm_plane_helper.c to the atomic helpers, to
avoid confusion - the other helpers in that file are all deprecated legacy
helpers.
-Contact: Ville Syrjälä, Daniel Vetter, driver maintainers
+Contact: Ville Syrjälä, Simona Vetter, driver maintainers
Level: Advanced
@@ -97,7 +97,7 @@ with the current helpers:
- Then we could go through all the drivers and remove the more-or-less confused
checks for plane_state->fb and plane_state->crtc.
-Contact: Daniel Vetter
+Contact: Simona Vetter
Level: Advanced
@@ -116,7 +116,7 @@ Somewhat related is the legacy_cursor_update hack, which should be replaced with
the new atomic_async_check/commit functionality in the helpers in drivers that
still look at that flag.
-Contact: Daniel Vetter, respective driver maintainers
+Contact: Simona Vetter, respective driver maintainers
Level: Advanced
@@ -169,7 +169,7 @@ interfaces to fix these issues:
``_helper_funcs`` since they are not part of the core ABI. There's a
``FIXME`` comment in the kerneldoc for each such case in ``drm_crtc.h``.
-Contact: Daniel Vetter
+Contact: Simona Vetter
Level: Intermediate
@@ -194,7 +194,7 @@ performance-critical drivers it might also be better to go with a more
fine-grained per-buffer object and per-context lockings scheme. Currently only
the ``msm`` and `i915` drivers use ``struct_mutex``.
-Contact: Daniel Vetter, respective driver maintainers
+Contact: Simona Vetter, respective driver maintainers
Level: Advanced
@@ -251,7 +251,7 @@ being rewritten without dependencies on the fbdev module. Some of the
helpers could further benefit from using struct iosys_map instead of
raw pointers.
-Contact: Thomas Zimmermann <tzimmermann@suse.de>, Daniel Vetter
+Contact: Thomas Zimmermann <tzimmermann@suse.de>, Simona Vetter
Level: Advanced
@@ -297,7 +297,7 @@ Various hold-ups:
version of the varios drm_gem_fb_create functions. Maybe called
drm_gem_fb_create/_with_dirty/_with_funcs as needed.
-Contact: Daniel Vetter
+Contact: Simona Vetter
Level: Intermediate
@@ -329,7 +329,7 @@ everything after it has done the write-protect/mkwrite trickery:
Might be good to also have some igt testcases for this.
-Contact: Daniel Vetter, Noralf Tronnes
+Contact: Simona Vetter, Noralf Tronnes
Level: Advanced
@@ -359,7 +359,7 @@ between setting up the &drm_driver structure and calling drm_dev_register().
- Once all drivers are converted, remove the load/unload callbacks.
-Contact: Daniel Vetter
+Contact: Simona Vetter
Level: Intermediate
@@ -422,7 +422,7 @@ The task is to use struct iosys_map where it makes sense.
* TTM might benefit from using struct iosys_map internally.
* Framebuffer copying and blitting helpers should operate on struct iosys_map.
-Contact: Thomas Zimmermann <tzimmermann@suse.de>, Christian König, Daniel Vetter
+Contact: Thomas Zimmermann <tzimmermann@suse.de>, Christian König, Simona Vetter
Level: Intermediate
@@ -475,25 +475,22 @@ Remove disable/unprepare in remove/shutdown in panel-simple and panel-edp
As of commit d2aacaf07395 ("drm/panel: Check for already prepared/enabled in
drm_panel"), we have a check in the drm_panel core to make sure nobody
double-calls prepare/enable/disable/unprepare. Eventually that should probably
-be turned into a WARN_ON() or somehow made louder, but right now we actually
-expect it to trigger and so we don't want it to be too loud.
-
-Specifically, that warning will trigger for panel-edp and panel-simple at
-shutdown time because those panels hardcode a call to drm_panel_disable()
-and drm_panel_unprepare() at shutdown and remove time that they call regardless
-of panel state. On systems with a properly coded DRM modeset driver that
-calls drm_atomic_helper_shutdown() this is pretty much guaranteed to cause
-the warning to fire.
-
-Unfortunately we can't safely remove the calls in panel-edp and panel-simple
-until we're sure that all DRM modeset drivers that are used with those panels
-properly call drm_atomic_helper_shutdown(). This TODO item is to validate
-that all DRM modeset drivers used with panel-edp and panel-simple properly
-call drm_atomic_helper_shutdown() and then remove the calls to
-disable/unprepare from those panels. Alternatively, this TODO item could be
-removed by convincing stakeholders that those calls are fine and downgrading
-the error message in drm_panel_disable() / drm_panel_unprepare() to a
-debug-level message.
+be turned into a WARN_ON() or somehow made louder.
+
+At the moment, we expect that we may still encounter the warnings in the
+drm_panel core when using panel-simple and panel-edp. Since those panel
+drivers are used with a lot of different DRM modeset drivers they still
+make an extra effort to disable/unprepare the panel themsevles at shutdown
+time. Specifically we could still encounter those warnings if the panel
+driver gets shutdown() _before_ the DRM modeset driver and the DRM modeset
+driver properly calls drm_atomic_helper_shutdown() in its own shutdown()
+callback. Warnings could be avoided in such a case by using something like
+device links to ensure that the panel gets shutdown() after the DRM modeset
+driver.
+
+Once all DRM modeset drivers are known to shutdown properly, the extra
+calls to disable/unprepare in remove/shutdown in panel-simple and panel-edp
+should be removed and this TODO item marked complete.
Contact: Douglas Anderson <dianders@chromium.org>
@@ -561,7 +558,7 @@ This is a really varied tasks with lots of little bits and pieces:
<https://lore.kernel.org/lkml/1446217392-11981-1-git-send-email-alexandru.murtaza@intel.com/>`_
for some example code that could be reused.
-Contact: Daniel Vetter
+Contact: Simona Vetter
Level: Advanced
@@ -590,7 +587,7 @@ There's a bunch of issues with it:
this (together with the drm_minor->drm_device move) would allow us to remove
debugfs_init.
-Contact: Daniel Vetter
+Contact: Simona Vetter
Level: Intermediate
@@ -611,7 +608,7 @@ Both these problems can be solved by switching over to drmm_kzalloc(), and the
various convenience wrappers provided, e.g. drmm_crtc_alloc_with_planes(),
drmm_universal_plane_alloc(), ... and so on.
-Contact: Daniel Vetter
+Contact: Simona Vetter
Level: Intermediate
@@ -631,7 +628,7 @@ cache is also tied to &drm_gem_object.import_attach. Meanwhile we paper over
this problem for USB devices by fishing out the USB host controller device, as
long as that supports DMA. Otherwise importing can still needlessly fail.
-Contact: Thomas Zimmermann <tzimmermann@suse.de>, Daniel Vetter
+Contact: Thomas Zimmermann <tzimmermann@suse.de>, Simona Vetter
Level: Advanced
@@ -712,7 +709,7 @@ Plan to fix this:
2. In all, only look at one of the three status bits set by the above helpers.
3. Remove the other two status bits.
-Contact: Daniel Vetter
+Contact: Simona Vetter
Level: Intermediate
diff --git a/Documentation/gpu/xe/xe_mm.rst b/Documentation/gpu/xe/xe_mm.rst
index 6c8fd8b4a466..95864a4502dd 100644
--- a/Documentation/gpu/xe/xe_mm.rst
+++ b/Documentation/gpu/xe/xe_mm.rst
@@ -7,6 +7,21 @@ Memory Management
.. kernel-doc:: drivers/gpu/drm/xe/xe_bo_doc.h
:doc: Buffer Objects (BO)
+GGTT
+====
+
+.. kernel-doc:: drivers/gpu/drm/xe/xe_ggtt.c
+ :doc: Global Graphics Translation Table (GGTT)
+
+GGTT Internal API
+-----------------
+
+.. kernel-doc:: drivers/gpu/drm/xe/xe_ggtt_types.h
+ :internal:
+
+.. kernel-doc:: drivers/gpu/drm/xe/xe_ggtt.c
+ :internal:
+
Pagetable building
==================
diff --git a/Documentation/i2c/slave-testunit-backend.rst b/Documentation/i2c/slave-testunit-backend.rst
index 37142a48ab35..d752f433be07 100644
--- a/Documentation/i2c/slave-testunit-backend.rst
+++ b/Documentation/i2c/slave-testunit-backend.rst
@@ -20,11 +20,25 @@ Instantiating the device is regular. Example for bus 0, address 0x30::
# echo "slave-testunit 0x1030" > /sys/bus/i2c/devices/i2c-0/new_device
-After that, you will have a write-only device listening. Reads will just return
-an 8-bit version number of the testunit. When writing, the device consists of 4
-8-bit registers and, except for some "partial" commands, all registers must be
-written to start a testcase, i.e. you usually write 4 bytes to the device. The
-registers are:
+Or using firmware nodes. Here is a devicetree example (note this is only a
+debug device, so there are no official DT bindings)::
+
+ &i2c0 {
+ ...
+
+ testunit@30 {
+ compatible = "slave-testunit";
+ reg = <(0x30 | I2C_OWN_SLAVE_ADDRESS)>;
+ };
+ };
+
+After that, you will have the device listening. Reading will return a single
+byte. Its value is 0 if the testunit is idle, otherwise the command number of
+the currently running command.
+
+When writing, the device consists of 4 8-bit registers and, except for some
+"partial" commands, all registers must be written to start a testcase, i.e. you
+usually write 4 bytes to the device. The registers are:
.. csv-table::
:header: "Offset", "Name", "Description"
@@ -75,7 +89,7 @@ from another device on the bus. If the bus master under test also wants to
access the bus at the same time, the bus will be busy. Example to read 128
bytes from device 0x50 after 50ms of delay::
- # i2cset -y 0 0x30 0x01 0x50 0x80 0x05 i
+ # i2cset -y 0 0x30 1 0x50 0x80 5 i
0x02 SMBUS_HOST_NOTIFY
~~~~~~~~~~~~~~~~~~~~~~
@@ -95,9 +109,9 @@ bytes from device 0x50 after 50ms of delay::
Also needs master mode. This test will send an SMBUS_HOST_NOTIFY message to the
host. Note that the status word is currently ignored in the Linux Kernel.
-Example to send a notification after 10ms::
+Example to send a notification with status word 0x6442 after 10ms::
- # i2cset -y 0 0x30 0x02 0x42 0x64 0x01 i
+ # i2cset -y 0 0x30 2 0x42 0x64 1 i
If the host controller supports HostNotify, this message with debug level
should appear (Linux 6.11 and later)::
@@ -116,7 +130,7 @@ should appear (Linux 6.11 and later)::
- DELAY
* - 0x03
- - must be '1', i.e. one further byte will be written
+ - 0x01 (i.e. one further byte will be written)
- number of bytes to be sent back
- leave out, partial command!
@@ -131,5 +145,91 @@ from length-1 to 0. Here is an example which emulates
i2c_smbus_block_process_call() using i2ctransfer (you need i2c-tools v4.2 or
later)::
- # i2ctransfer -y 0 w3@0x30 0x03 0x01 0x10 r?
+ # i2ctransfer -y 0 w3@0x30 3 1 0x10 r?
0x10 0x0f 0x0e 0x0d 0x0c 0x0b 0x0a 0x09 0x08 0x07 0x06 0x05 0x04 0x03 0x02 0x01 0x00
+
+0x04 GET_VERSION_WITH_REP_START
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+.. list-table::
+ :header-rows: 1
+
+ * - CMD
+ - DATAL
+ - DATAH
+ - DELAY
+
+ * - 0x04
+ - currently unused
+ - currently unused
+ - leave out, partial command!
+
+Partial command. After sending this command, the testunit will reply to a read
+message with a NUL terminated version string based on UTS_RELEASE. The first
+character is always a 'v' and the length of the version string is at maximum
+128 bytes. However, it will only respond if the read message is connected to
+the write message via repeated start. If your controller driver handles
+repeated start correctly, this will work::
+
+ # i2ctransfer -y 0 w3@0x30 4 0 0 r128
+ 0x76 0x36 0x2e 0x31 0x31 0x2e 0x30 0x2d 0x72 0x63 0x31 0x2d 0x30 0x30 0x30 0x30 ...
+
+If you have i2c-tools 4.4 or later, you can print out the data right away::
+
+ # i2ctransfer -y -b 0 w3@0x30 4 0 0 r128
+ v6.11.0-rc1-00009-gd37a1b4d3fd0
+
+STOP/START combinations between the two messages will *not* work because they
+are not equivalent to a REPEATED START. As an example, this returns just the
+default response::
+
+ # i2cset -y 0 0x30 4 0 0 i; i2cget -y 0 0x30
+ 0x00
+
+0x05 SMBUS_ALERT_REQUEST
+~~~~~~~~~~~~~~~~~~~~~~~~
+
+.. list-table::
+ :header-rows: 1
+
+ * - CMD
+ - DATAL
+ - DATAH
+ - DELAY
+
+ * - 0x05
+ - response value (7 MSBs interpreted as I2C address)
+ - currently unused
+ - n * 10ms
+
+This test raises an interrupt via the SMBAlert pin which the host controller
+must handle. The pin must be connected to the testunit as a GPIO. GPIO access
+is not allowed to sleep. Currently, this can only be described using firmware
+nodes. So, for devicetree, you would add something like this to the testunit
+node::
+
+ gpios = <&gpio1 24 GPIO_ACTIVE_LOW>;
+
+The following command will trigger the alert with a response of 0xc9 after 1
+second of delay::
+
+ # i2cset -y 0 0x30 5 0xc9 0x00 100 i
+
+If the host controller supports SMBusAlert, this message with debug level
+should appear::
+
+ smbus_alert 0-000c: SMBALERT# from dev 0x64, flag 1
+
+This message may appear more than once because the testunit is software not
+hardware and, thus, may not be able to react to the response of the host fast
+enough. The interrupt count should increase only by one, though::
+
+ # cat /proc/interrupts | grep smbus_alert
+ 93: 1 gpio-rcar 26 Edge smbus_alert
+
+If the host does not respond to the alert within 1 second, the test will be
+aborted and the testunit will report an error.
+
+For this test, the testunit will shortly drop its assigned address and listen
+on the SMBus Alert Response Address (0x0c). It will reassign its original
+address afterwards.
diff --git a/Documentation/iio/ad4000.rst b/Documentation/iio/ad4000.rst
new file mode 100644
index 000000000000..de8fd3ae6e62
--- /dev/null
+++ b/Documentation/iio/ad4000.rst
@@ -0,0 +1,131 @@
+.. SPDX-License-Identifier: GPL-2.0-only
+
+=============
+AD4000 driver
+=============
+
+Device driver for Analog Devices Inc. AD4000 series of ADCs.
+
+Supported devices
+=================
+
+* `AD4000 <https://www.analog.com/AD4000>`_
+* `AD4001 <https://www.analog.com/AD4001>`_
+* `AD4002 <https://www.analog.com/AD4002>`_
+* `AD4003 <https://www.analog.com/AD4003>`_
+* `AD4004 <https://www.analog.com/AD4004>`_
+* `AD4005 <https://www.analog.com/AD4005>`_
+* `AD4006 <https://www.analog.com/AD4006>`_
+* `AD4007 <https://www.analog.com/AD4007>`_
+* `AD4008 <https://www.analog.com/AD4008>`_
+* `AD4010 <https://www.analog.com/AD4010>`_
+* `AD4011 <https://www.analog.com/AD4011>`_
+* `AD4020 <https://www.analog.com/AD4020>`_
+* `AD4021 <https://www.analog.com/AD4021>`_
+* `AD4022 <https://www.analog.com/AD4022>`_
+* `ADAQ4001 <https://www.analog.com/ADAQ4001>`_
+* `ADAQ4003 <https://www.analog.com/ADAQ4003>`_
+
+Wiring connections
+------------------
+
+Devices of the AD4000 series can be connected to the SPI host controller in a
+few different modes.
+
+CS mode, 3-wire turbo mode
+^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+Datasheet "3-wire" mode is what most resembles standard SPI connection which,
+for these devices, comprises of connecting the controller CS line to device CNV
+pin and other SPI lines as usual. This configuration is (misleadingly) called
+"CS Mode, 3-Wire Turbo Mode" connection in datasheets.
+NOTE: The datasheet definition of 3-wire mode for the AD4000 series is NOT the
+same of standard spi-3wire mode.
+This is the only connection mode that allows configuration register access but
+it requires the SPI controller to support the ``SPI_MOSI_IDLE_HIGH`` feature.
+
+Omit the ``adi,sdi-pin`` property in device tree to select this mode.
+
+::
+
+ +-------------+
+ + ----------------------------------| SDO |
+ | | |
+ | +-------------------| CS |
+ | v | |
+ | +--------------------+ | HOST |
+ | | CNV | | |
+ +--->| SDI AD4000 SDO |-------->| SDI |
+ | SCK | | |
+ +--------------------+ | |
+ ^ | |
+ +--------------------| SCLK |
+ +-------------+
+
+CS mode, 3-wire, without busy indicator
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+Another wiring configuration supported as "3-wire" mode has the SDI pin
+hard-wired to digital input/output interface supply (VIO). In this setup, the
+controller is not required to support ``SPI_MOSI_IDLE_HIGH`` but register access
+is not possible. This connection mode saves one wire and works with any SPI
+controller.
+
+Set the ``adi,sdi-pin`` device tree property to ``"high"`` to select this mode.
+
+::
+
+ +-------------+
+ +--------------------| CS |
+ v | |
+ VIO +--------------------+ | HOST |
+ | | CNV | | |
+ +--->| SDI AD4000 SDO |-------->| SDI |
+ | SCK | | |
+ +--------------------+ | |
+ ^ | |
+ +--------------------| SCLK |
+ +-------------+
+
+Alternatively, a GPIO may be connected to the device CNV pin. This is similar to
+the previous wiring configuration but saves the use of a CS line.
+
+::
+
+ +-------------+
+ +--------------------| GPIO |
+ v | |
+ VIO +--------------------+ | HOST |
+ | | CNV | | |
+ +--->| SDI AD4000 SDO |-------->| SDI |
+ | SCK | | |
+ +--------------------+ | |
+ ^ | |
+ +--------------------| SCLK |
+ +-------------+
+
+CS mode, 4-wire without busy indicator
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+In datasheet "4-wire" mode, the controller CS line is connected to the ADC SDI
+pin and a GPIO is connected to the ADC CNV pin. This connection mode may better
+suit scenarios where multiple ADCs can share one CNV trigger.
+
+Set ``adi,sdi-pin`` to ``"cs"`` to select this mode.
+
+
+::
+
+ +-------------+
+ + ----------------------------------| CS |
+ | | |
+ | +-------------------| GPIO |
+ | v | |
+ | +--------------------+ | HOST |
+ | | CNV | | |
+ +--->| SDI AD4000 SDO |-------->| SDI |
+ | SCK | | |
+ +--------------------+ | |
+ ^ | |
+ +--------------------| SCLK |
+ +-------------+
diff --git a/Documentation/iio/ad4695.rst b/Documentation/iio/ad4695.rst
new file mode 100644
index 000000000000..33ed29b7c98a
--- /dev/null
+++ b/Documentation/iio/ad4695.rst
@@ -0,0 +1,167 @@
+.. SPDX-License-Identifier: GPL-2.0-only
+
+=============
+AD4695 driver
+=============
+
+ADC driver for Analog Devices Inc. AD4695 and similar devices. The module name
+is ``ad4695``.
+
+
+Supported devices
+=================
+
+The following chips are supported by this driver:
+
+* `AD4695 <https://www.analog.com/AD4695>`_
+* `AD4696 <https://www.analog.com/AD4696>`_
+* `AD4697 <https://www.analog.com/AD4697>`_
+* `AD4698 <https://www.analog.com/AD4698>`_
+
+
+Supported features
+==================
+
+SPI wiring modes
+----------------
+
+The driver currently supports the following SPI wiring configuration:
+
+4-wire mode
+^^^^^^^^^^^
+
+In this mode, CNV and CS are tied together and there is a single SDO line.
+
+.. code-block::
+
+ +-------------+ +-------------+
+ | CS |<-+------| CS |
+ | CNV |<-+ | |
+ | ADC | | HOST |
+ | | | |
+ | SDI |<--------| SDO |
+ | SDO |-------->| SDI |
+ | SCLK |<--------| SCLK |
+ +-------------+ +-------------+
+
+To use this mode, in the device tree, omit the ``cnv-gpios`` and
+``spi-rx-bus-width`` properties.
+
+Channel configuration
+---------------------
+
+Since the chip supports multiple ways to configure each channel, this must be
+described in the device tree based on what is actually wired up to the inputs.
+
+There are three typical configurations:
+
+An ``INx`` pin is used as the positive input with the ``REFGND``, ``COM`` or
+the next ``INx`` pin as the negative input.
+
+Pairing with REFGND
+^^^^^^^^^^^^^^^^^^^
+
+Each ``INx`` pin can be used as a pseudo-differential input in conjunction with
+the ``REFGND`` pin. The device tree will look like this:
+
+.. code-block::
+
+ channel@0 {
+ reg = <0>; /* IN0 */
+ };
+
+If no other channel properties are needed (e.g. ``adi,no-high-z``), the channel
+node can be omitted entirely.
+
+This will appear on the IIO bus as the ``voltage0`` channel. The processed value
+(*raw × scale*) will be the voltage present on the ``IN0`` pin relative to
+``REFGND``. (Offset is always 0 when pairing with ``REFGND``.)
+
+Pairing with COM
+^^^^^^^^^^^^^^^^
+
+Each ``INx`` pin can be used as a pseudo-differential input in conjunction with
+the ``COM`` pin. The device tree will look like this:
+
+.. code-block::
+
+ com-supply = <&vref_div_2>;
+
+ channel@1 {
+ reg = <1>; /* IN1 */
+ common-mode-channel = <AD4695_COMMON_MODE_COM>;
+ bipolar;
+ };
+
+This will appear on the IIO bus as the ``voltage1`` channel. The processed value
+(*(raw + offset) × scale*) will be the voltage measured on the ``IN1`` pin
+relative to ``REFGND``. (The offset is determined by the ``com-supply`` voltage.)
+
+The macro comes from:
+
+.. code-block::
+
+ #include <dt-bindings/iio/adi,ad4695.h>
+
+Pairing two INx pins
+^^^^^^^^^^^^^^^^^^^^
+
+An even-numbered ``INx`` pin and the following odd-numbered ``INx`` pin can be
+used as a pseudo-differential input. The device tree for using ``IN2`` as the
+positive input and ``IN3`` as the negative input will look like this:
+
+.. code-block::
+
+ in3-supply = <&vref_div_2>;
+
+ channel@2 {
+ reg = <2>; /* IN2 */
+ common-mode-channel = <3>; /* IN3 */
+ bipolar;
+ };
+
+This will appear on the IIO bus as the ``voltage2`` channel. The processed value
+(*(raw + offset) × scale*) will be the voltage measured on the ``IN1`` pin
+relative to ``REFGND``. (Offset is determined by the ``in3-supply`` voltage.)
+
+VCC supply
+----------
+
+The chip supports being powered by an external LDO via the ``VCC`` input or an
+internal LDO via the ``LDO_IN`` input. The driver looks at the device tree to
+determine which is being used. If ``ldo-supply`` is present, then the internal
+LDO is used. If ``vcc-supply`` is present, then the external LDO is used and
+the internal LDO is disabled.
+
+Reference voltage
+-----------------
+
+The chip supports an external reference voltage via the ``REF`` input or an
+internal buffered reference voltage via the ``REFIN`` input. The driver looks
+at the device tree to determine which is being used. If ``ref-supply`` is
+present, then the external reference voltage is used and the internal buffer is
+disabled. If ``refin-supply`` is present, then the internal buffered reference
+voltage is used.
+
+Gain/offset calibration
+-----------------------
+
+System calibration is supported using the channel gain and offset registers via
+the ``calibscale`` and ``calibbias`` attributes respectively.
+
+Unimplemented features
+----------------------
+
+- Additional wiring modes
+- Threshold events
+- Oversampling
+- GPIO support
+- CRC support
+
+Device buffers
+==============
+
+This driver supports hardware triggered buffers. This uses the "advanced
+sequencer" feature of the chip to trigger a burst of conversions.
+
+Also see :doc:`iio_devbuf` for more general information.
diff --git a/Documentation/iio/ad7380.rst b/Documentation/iio/ad7380.rst
new file mode 100644
index 000000000000..9c784c1e652e
--- /dev/null
+++ b/Documentation/iio/ad7380.rst
@@ -0,0 +1,130 @@
+.. SPDX-License-Identifier: GPL-2.0-only
+
+=============
+AD7380 driver
+=============
+
+ADC driver for Analog Devices Inc. AD7380 and similar devices. The module name
+is ``ad7380``.
+
+
+Supported devices
+=================
+
+The following chips are supported by this driver:
+
+* `AD7380 <https://www.analog.com/en/products/ad7380.html>`_
+* `AD7381 <https://www.analog.com/en/products/ad7381.html>`_
+* `AD7383 <https://www.analog.com/en/products/ad7383.html>`_
+* `AD7384 <https://www.analog.com/en/products/ad7384.html>`_
+* `AD7386 <https://www.analog.com/en/products/ad7386.html>`_
+* `AD7387 <https://www.analog.com/en/products/ad7387.html>`_
+* `AD7388 <https://www.analog.com/en/products/ad7388.html>`_
+* `AD7380-4 <https://www.analog.com/en/products/ad7380-4.html>`_
+* `AD7381-4 <https://www.analog.com/en/products/ad7381-4.html>`_
+* `AD7383-4 <https://www.analog.com/en/products/ad7383-4.html>`_
+* `AD7384-4 <https://www.analog.com/en/products/ad7384-4.html>`_
+* `AD7386-4 <https://www.analog.com/en/products/ad7386-4.html>`_
+* `AD7387-4 <https://www.analog.com/en/products/ad7387-4.html>`_
+* `AD7388-4 <https://www.analog.com/en/products/ad7388-4.html>`_
+
+
+Supported features
+==================
+
+SPI wiring modes
+----------------
+
+ad738x ADCs can output data on several SDO lines (1/2/4). The driver currently
+supports only 1 SDO line.
+
+Reference voltage
+-----------------
+
+2 possible reference voltage sources are supported:
+
+- Internal reference (2.5V)
+- External reference (2.5V to 3.3V)
+
+The source is determined by the device tree. If ``refio-supply`` is present,
+then the external reference is used, else the internal reference is used.
+
+Oversampling and resolution boost
+---------------------------------
+
+This family supports 2 types of oversampling: normal average and rolling
+average. Only normal average is supported by the driver, as rolling average can
+be achieved by processing a captured data buffer. The following ratios are
+available: 1 (oversampling disabled)/2/4/8/16/32.
+
+When the on-chip oversampling function is enabled the performance of the ADC can
+exceed the default resolution. To accommodate the performance boost achievable,
+it is possible to enable an additional two bits of resolution. Because the
+resolution boost feature can only be enabled when oversampling is enabled and
+oversampling is not as useful without the resolution boost, the driver
+automatically enables the resolution boost if and only if oversampling is
+enabled.
+
+Since the resolution boost feature causes 16-bit chips to now have 18-bit data
+which means the storagebits has to change from 16 to 32 bits, we use the new
+ext_scan_type feature to allow changing the scan_type at runtime. Unfortunately
+libiio does not support it. So when enabling or disabling oversampling, user
+must restart iiod using the following command:
+
+.. code-block:: bash
+
+ root:~# systemctl restart iiod
+
+Channel selection and sequencer (single-end chips only)
+-------------------------------------------------------
+
+Single-ended chips of this family (ad7386/7/8(-4)) have a 2:1 multiplexer in
+front of each ADC. They also include additional configuration registers that
+allow for either manual selection or automatic switching (sequencer mode), of
+the multiplexer inputs.
+
+From an IIO point of view, all inputs are exported, i.e ad7386/7/8
+export 4 channels and ad7386-4/7-4/8-4 export 8 channels.
+
+Inputs ``AinX0`` of multiplexers correspond to the first half of IIO channels (i.e
+0-1 or 0-3) and inputs ``AinX1`` correspond to second half (i.e 2-3 or 4-7).
+Example for AD7386/7/8 (2 channels parts):
+
+.. code-block::
+
+ IIO | AD7386/7/8
+ | +----------------------------
+ | | _____ ______
+ | | | | | |
+ voltage0 | AinA0 --|--->| | | |
+ | | | mux |----->| ADCA |---
+ voltage2 | AinA1 --|--->| | | |
+ | | |_____| |_____ |
+ | | _____ ______
+ | | | | | |
+ voltage1 | AinB0 --|--->| | | |
+ | | | mux |----->| ADCB |---
+ voltage3 | AinB1 --|--->| | | |
+ | | |_____| |______|
+ | |
+ | +----------------------------
+
+
+When enabling sequencer mode, the effective sampling rate is divided by two.
+
+Unimplemented features
+----------------------
+
+- 2/4 SDO lines
+- Rolling average oversampling
+- Power down mode
+- CRC indication
+- Alert
+
+
+Device buffers
+==============
+
+This driver supports IIO triggered buffers.
+
+See :doc:`iio_devbuf` for more information.
diff --git a/Documentation/iio/adxl380.rst b/Documentation/iio/adxl380.rst
new file mode 100644
index 000000000000..376dee5fe1dd
--- /dev/null
+++ b/Documentation/iio/adxl380.rst
@@ -0,0 +1,233 @@
+.. SPDX-License-Identifier: GPL-2.0
+
+===============
+ADXL380 driver
+===============
+
+This driver supports Analog Device's ADXL380/382 on SPI/I2C bus.
+
+1. Supported devices
+====================
+
+* `ADXL380 <https://www.analog.com/ADXL380>`_
+* `ADXL382 <https://www.analog.com/ADXL382>`_
+
+The ADXL380/ADXL382 is a low noise density, low power, 3-axis accelerometer with
+selectable measurement ranges. The ADXL380 supports the ±4 g, ±8 g, and ±16 g
+ranges, and the ADXL382 supports ±15 g, ±30 g, and ±60 g ranges.
+
+2. Device attributes
+====================
+
+Accelerometer measurements are always provided.
+
+Temperature data are also provided. This data can be used to monitor the
+internal system temperature or to improve the temperature stability of the
+device via calibration.
+
+Each IIO device, has a device folder under ``/sys/bus/iio/devices/iio:deviceX``,
+where X is the IIO index of the device. Under these folders reside a set of
+device files, depending on the characteristics and features of the hardware
+device in questions. These files are consistently generalized and documented in
+the IIO ABI documentation.
+
+The following tables show the adxl380 related device files, found in the
+specific device folder path ``/sys/bus/iio/devices/iio:deviceX``.
+
++---------------------------------------------------+----------------------------------------------------------+
+| 3-Axis Accelerometer related device files | Description |
++---------------------------------------------------+----------------------------------------------------------+
+| in_accel_scale | Scale for the accelerometer channels. |
++---------------------------------------------------+----------------------------------------------------------+
+| in_accel_filter_high_pass_3db_frequency | Low pass filter bandwidth. |
++---------------------------------------------------+----------------------------------------------------------+
+| in_accel_filter_high_pass_3db_frequency_available | Available low pass filter bandwidth configurations. |
++---------------------------------------------------+----------------------------------------------------------+
+| in_accel_filter_low_pass_3db_frequency | High pass filter bandwidth. |
++---------------------------------------------------+----------------------------------------------------------+
+| in_accel_filter_low_pass_3db_frequency_available | Available high pass filter bandwidth configurations. |
++---------------------------------------------------+----------------------------------------------------------+
+| in_accel_x_calibbias | Calibration offset for the X-axis accelerometer channel. |
++---------------------------------------------------+----------------------------------------------------------+
+| in_accel_x_raw | Raw X-axis accelerometer channel value. |
++---------------------------------------------------+----------------------------------------------------------+
+| in_accel_y_calibbias | y-axis acceleration offset correction |
++---------------------------------------------------+----------------------------------------------------------+
+| in_accel_y_raw | Raw Y-axis accelerometer channel value. |
++---------------------------------------------------+----------------------------------------------------------+
+| in_accel_z_calibbias | Calibration offset for the Z-axis accelerometer channel. |
++---------------------------------------------------+----------------------------------------------------------+
+| in_accel_z_raw | Raw Z-axis accelerometer channel value. |
++---------------------------------------------------+----------------------------------------------------------+
+
++----------------------------------+--------------------------------------------+
+| Temperature sensor related files | Description |
++----------------------------------+--------------------------------------------+
+| in_temp_raw | Raw temperature channel value. |
++----------------------------------+--------------------------------------------+
+| in_temp_offset | Offset for the temperature sensor channel. |
++----------------------------------+--------------------------------------------+
+| in_temp_scale | Scale for the temperature sensor channel. |
++----------------------------------+--------------------------------------------+
+
++------------------------------+----------------------------------------------+
+| Miscellaneous device files | Description |
++------------------------------+----------------------------------------------+
+| name | Name of the IIO device. |
++------------------------------+----------------------------------------------+
+| sampling_frequency | Currently selected sample rate. |
++------------------------------+----------------------------------------------+
+| sampling_frequency_available | Available sampling frequency configurations. |
++------------------------------+----------------------------------------------+
+
+Channels processed values
+-------------------------
+
+A channel value can be read from its _raw attribute. The value returned is the
+raw value as reported by the devices. To get the processed value of the channel,
+apply the following formula:
+
+.. code-block:: bash
+
+ processed value = (_raw + _offset) * _scale
+
+Where _offset and _scale are device attributes. If no _offset attribute is
+present, simply assume its value is 0.
+
+The adis16475 driver offers data for 2 types of channels, the table below shows
+the measurement units for the processed value, which are defined by the IIO
+framework:
+
++-------------------------------------+---------------------------+
+| Channel type | Measurement unit |
++-------------------------------------+---------------------------+
+| Acceleration on X, Y, and Z axis | Meters per Second squared |
++-------------------------------------+---------------------------+
+| Temperature | Millidegrees Celsius |
++-------------------------------------+---------------------------+
+
+Usage examples
+--------------
+
+Show device name:
+
+.. code-block:: bash
+
+ root:/sys/bus/iio/devices/iio:device0> cat name
+ adxl382
+
+Show accelerometer channels value:
+
+.. code-block:: bash
+
+ root:/sys/bus/iio/devices/iio:device0> cat in_accel_x_raw
+ -1771
+ root:/sys/bus/iio/devices/iio:device0> cat in_accel_y_raw
+ 282
+ root:/sys/bus/iio/devices/iio:device0> cat in_accel_z_raw
+ -1523
+ root:/sys/bus/iio/devices/iio:device0> cat in_accel_scale
+ 0.004903325
+
+- X-axis acceleration = in_accel_x_raw * in_accel_scale = −8.683788575 m/s^2
+- Y-axis acceleration = in_accel_y_raw * in_accel_scale = 1.38273765 m/s^2
+- Z-axis acceleration = in_accel_z_raw * in_accel_scale = -7.467763975 m/s^2
+
+Set calibration offset for accelerometer channels:
+
+.. code-block:: bash
+
+ root:/sys/bus/iio/devices/iio:device0> cat in_accel_x_calibbias
+ 0
+
+ root:/sys/bus/iio/devices/iio:device0> echo 50 > in_accel_x_calibbias
+ root:/sys/bus/iio/devices/iio:device0> cat in_accel_x_calibbias
+ 50
+
+Set sampling frequency:
+
+.. code-block:: bash
+
+ root:/sys/bus/iio/devices/iio:device0> cat sampling_frequency
+ 16000
+ root:/sys/bus/iio/devices/iio:device0> cat sampling_frequency_available
+ 16000 32000 64000
+
+ root:/sys/bus/iio/devices/iio:device0> echo 32000 > sampling_frequency
+ root:/sys/bus/iio/devices/iio:device0> cat sampling_frequency
+ 32000
+
+Set low pass filter bandwidth for accelerometer channels:
+
+.. code-block:: bash
+
+ root:/sys/bus/iio/devices/iio:device0> cat in_accel_filter_low_pass_3db_frequency
+ 32000
+ root:/sys/bus/iio/devices/iio:device0> cat in_accel_filter_low_pass_3db_frequency_available
+ 32000 8000 4000 2000
+
+ root:/sys/bus/iio/devices/iio:device0> echo 2000 > in_accel_filter_low_pass_3db_frequency
+ root:/sys/bus/iio/devices/iio:device0> cat in_accel_filter_low_pass_3db_frequency
+ 2000
+
+3. Device buffers
+=================
+
+This driver supports IIO buffers.
+
+All devices support retrieving the raw acceleration and temperature measurements
+using buffers.
+
+Usage examples
+--------------
+
+Select channels for buffer read:
+
+.. code-block:: bash
+
+ root:/sys/bus/iio/devices/iio:device0> echo 1 > scan_elements/in_accel_x_en
+ root:/sys/bus/iio/devices/iio:device0> echo 1 > scan_elements/in_accel_y_en
+ root:/sys/bus/iio/devices/iio:device0> echo 1 > scan_elements/in_accel_z_en
+ root:/sys/bus/iio/devices/iio:device0> echo 1 > scan_elements/in_temp_en
+
+Set the number of samples to be stored in the buffer:
+
+.. code-block:: bash
+
+ root:/sys/bus/iio/devices/iio:device0> echo 10 > buffer/length
+
+Enable buffer readings:
+
+.. code-block:: bash
+
+ root:/sys/bus/iio/devices/iio:device0> echo 1 > buffer/enable
+
+Obtain buffered data:
+
+.. code-block:: bash
+
+ root:/sys/bus/iio/devices/iio:device0> hexdump -C /dev/iio\:device0
+ ...
+ 002bc300 f7 e7 00 a8 fb c5 24 80 f7 e7 01 04 fb d6 24 80 |......$.......$.|
+ 002bc310 f7 f9 00 ab fb dc 24 80 f7 c3 00 b8 fb e2 24 80 |......$.......$.|
+ 002bc320 f7 fb 00 bb fb d1 24 80 f7 b1 00 5f fb d1 24 80 |......$...._..$.|
+ 002bc330 f7 c4 00 c6 fb a6 24 80 f7 a6 00 68 fb f1 24 80 |......$....h..$.|
+ 002bc340 f7 b8 00 a3 fb e7 24 80 f7 9a 00 b1 fb af 24 80 |......$.......$.|
+ 002bc350 f7 b1 00 67 fb ee 24 80 f7 96 00 be fb 92 24 80 |...g..$.......$.|
+ 002bc360 f7 ab 00 7a fc 1b 24 80 f7 b6 00 ae fb 76 24 80 |...z..$......v$.|
+ 002bc370 f7 ce 00 a3 fc 02 24 80 f7 c0 00 be fb 8b 24 80 |......$.......$.|
+ 002bc380 f7 c3 00 93 fb d0 24 80 f7 ce 00 d8 fb c8 24 80 |......$.......$.|
+ 002bc390 f7 bd 00 c0 fb 82 24 80 f8 00 00 e8 fb db 24 80 |......$.......$.|
+ 002bc3a0 f7 d8 00 d3 fb b4 24 80 f8 0b 00 e5 fb c3 24 80 |......$.......$.|
+ 002bc3b0 f7 eb 00 c8 fb 92 24 80 f7 e7 00 ea fb cb 24 80 |......$.......$.|
+ 002bc3c0 f7 fd 00 cb fb 94 24 80 f7 e3 00 f2 fb b8 24 80 |......$.......$.|
+ ...
+
+See ``Documentation/iio/iio_devbuf.rst`` for more information about how buffered
+data is structured.
+
+4. IIO Interfacing Tools
+========================
+
+See ``Documentation/iio/iio_tools.rst`` for the description of the available IIO
+interfacing tools.
diff --git a/Documentation/iio/index.rst b/Documentation/iio/index.rst
index 9cb4c50cb20d..dfcf9618568a 100644
--- a/Documentation/iio/index.rst
+++ b/Documentation/iio/index.rst
@@ -18,8 +18,12 @@ Industrial I/O Kernel Drivers
.. toctree::
:maxdepth: 1
+ ad4000
+ ad4695
+ ad7380
ad7944
adis16475
adis16480
+ adxl380
bno055
ep93xx_adc
diff --git a/Documentation/kbuild/kbuild.rst b/Documentation/kbuild/kbuild.rst
index 9c8d1d046ea5..1796b3eba37b 100644
--- a/Documentation/kbuild/kbuild.rst
+++ b/Documentation/kbuild/kbuild.rst
@@ -22,6 +22,11 @@ modules.builtin.modinfo
This file contains modinfo from all modules that are built into the kernel.
Unlike modinfo of a separate module, all fields are prefixed with module name.
+modules.builtin.ranges
+----------------------
+This file contains address offset ranges (per ELF section) for all modules
+that are built into the kernel. Together with System.map, it can be used
+to associate module names with symbols.
Environment variables
=====================
@@ -129,6 +134,11 @@ KBUILD_OUTPUT
-------------
Specify the output directory when building the kernel.
+This variable can also be used to point to the kernel output directory when
+building external modules against a pre-built kernel in a separate build
+directory. Please note that this does NOT specify the output directory for the
+external modules themselves.
+
The output directory can also be specified using "O=...".
Setting "O=..." takes precedence over KBUILD_OUTPUT.
diff --git a/Documentation/kbuild/kconfig-language.rst b/Documentation/kbuild/kconfig-language.rst
index 71b38a7670f3..43037be96a16 100644
--- a/Documentation/kbuild/kconfig-language.rst
+++ b/Documentation/kbuild/kconfig-language.rst
@@ -70,7 +70,11 @@ applicable everywhere (see syntax).
Every menu entry can have at most one prompt, which is used to display
to the user. Optionally dependencies only for this prompt can be added
- with "if".
+ with "if". If a prompt is not present, the config option is a non-visible
+ symbol, meaning its value cannot be directly changed by the user (such as
+ altering the value in ``.config``) and the option will not appear in any
+ config menus. Its value can only be set via "default" and "select" (see
+ below).
- default value: "default" <expr> ["if" <expr>]
diff --git a/Documentation/kbuild/makefiles.rst b/Documentation/kbuild/makefiles.rst
index be43990f1e7f..7964e0c245ae 100644
--- a/Documentation/kbuild/makefiles.rst
+++ b/Documentation/kbuild/makefiles.rst
@@ -1665,6 +1665,5 @@ Credits
TODO
====
-- Describe how kbuild supports shipped files with _shipped.
- Generating offset header files.
- Add more variables to chapters 7 or 9?
diff --git a/Documentation/kbuild/modules.rst b/Documentation/kbuild/modules.rst
index 131863142cbb..cd5a54d91e6d 100644
--- a/Documentation/kbuild/modules.rst
+++ b/Documentation/kbuild/modules.rst
@@ -4,41 +4,12 @@ Building External Modules
This document describes how to build an out-of-tree kernel module.
-.. Table of Contents
-
- === 1 Introduction
- === 2 How to Build External Modules
- --- 2.1 Command Syntax
- --- 2.2 Options
- --- 2.3 Targets
- --- 2.4 Building Separate Files
- === 3. Creating a Kbuild File for an External Module
- --- 3.1 Shared Makefile
- --- 3.2 Separate Kbuild file and Makefile
- --- 3.3 Binary Blobs
- --- 3.4 Building Multiple Modules
- === 4. Include Files
- --- 4.1 Kernel Includes
- --- 4.2 Single Subdirectory
- --- 4.3 Several Subdirectories
- === 5. Module Installation
- --- 5.1 INSTALL_MOD_PATH
- --- 5.2 INSTALL_MOD_DIR
- === 6. Module Versioning
- --- 6.1 Symbols From the Kernel (vmlinux + modules)
- --- 6.2 Symbols and External Modules
- --- 6.3 Symbols From Another External Module
- === 7. Tips & Tricks
- --- 7.1 Testing for CONFIG_FOO_BAR
-
-
-
-1. Introduction
-===============
+Introduction
+============
"kbuild" is the build system used by the Linux kernel. Modules must use
kbuild to stay compatible with changes in the build infrastructure and
-to pick up the right flags to "gcc." Functionality for building modules
+to pick up the right flags to the compiler. Functionality for building modules
both in-tree and out-of-tree is provided. The method for building
either is similar, and all modules are initially developed and built
out-of-tree.
@@ -48,11 +19,11 @@ in building out-of-tree (or "external") modules. The author of an
external module should supply a makefile that hides most of the
complexity, so one only has to type "make" to build the module. This is
easily accomplished, and a complete example will be presented in
-section 3.
+section `Creating a Kbuild File for an External Module`_.
-2. How to Build External Modules
-================================
+How to Build External Modules
+=============================
To build external modules, you must have a prebuilt kernel available
that contains the configuration and header files used in the build.
@@ -69,12 +40,12 @@ NOTE: "modules_prepare" will not build Module.symvers even if
CONFIG_MODVERSIONS is set; therefore, a full kernel build needs to be
executed to make module versioning work.
-2.1 Command Syntax
-==================
+Command Syntax
+--------------
The command to build an external module is::
- $ make -C <path_to_kernel_src> M=$PWD
+ $ make -C <path_to_kernel_dir> M=$PWD
The kbuild system knows that an external module is being built
due to the "M=<dir>" option given in the command.
@@ -88,15 +59,18 @@ executed to make module versioning work.
$ make -C /lib/modules/`uname -r`/build M=$PWD modules_install
-2.2 Options
-===========
+Options
+-------
- ($KDIR refers to the path of the kernel source directory.)
+ ($KDIR refers to the path of the kernel source directory, or the path
+ of the kernel output directory if the kernel was built in a separate
+ build directory.)
make -C $KDIR M=$PWD
-C $KDIR
- The directory where the kernel source is located.
+ The directory that contains the kernel and relevant build
+ artifacts used for building an external module.
"make" will actually change to the specified directory
when executing and will change back when finished.
@@ -106,8 +80,8 @@ executed to make module versioning work.
directory where the external module (kbuild file) is
located.
-2.3 Targets
-===========
+Targets
+-------
When building an external module, only a subset of the "make"
targets are available.
@@ -129,7 +103,8 @@ executed to make module versioning work.
modules_install
Install the external module(s). The default location is
/lib/modules/<kernel_release>/updates/, but a prefix may
- be added with INSTALL_MOD_PATH (discussed in section 5).
+ be added with INSTALL_MOD_PATH (discussed in section
+ `Module Installation`_).
clean
Remove all generated files in the module directory only.
@@ -137,8 +112,8 @@ executed to make module versioning work.
help
List the available targets for external modules.
-2.4 Building Separate Files
-===========================
+Building Separate Files
+-----------------------
It is possible to build single files that are part of a module.
This works equally well for the kernel, a module, and even for
@@ -152,8 +127,8 @@ executed to make module versioning work.
make -C $KDIR M=$PWD ./
-3. Creating a Kbuild File for an External Module
-================================================
+Creating a Kbuild File for an External Module
+=============================================
In the last section we saw the command to build a module for the
running kernel. The module is not actually built, however, because a
@@ -180,10 +155,9 @@ module 8123.ko, which is built from the following files::
8123_if.c
8123_if.h
8123_pci.c
- 8123_bin.o_shipped <= Binary blob
-3.1 Shared Makefile
--------------------
+Shared Makefile
+---------------
An external module always includes a wrapper makefile that
supports building the module using "make" with no arguments.
@@ -198,7 +172,7 @@ module 8123.ko, which is built from the following files::
ifneq ($(KERNELRELEASE),)
# kbuild part of makefile
obj-m := 8123.o
- 8123-y := 8123_if.o 8123_pci.o 8123_bin.o
+ 8123-y := 8123_if.o 8123_pci.o
else
# normal makefile
@@ -207,10 +181,6 @@ module 8123.ko, which is built from the following files::
default:
$(MAKE) -C $(KDIR) M=$$PWD
- # Module specific targets
- genbin:
- echo "X" > 8123_bin.o_shipped
-
endif
The check for KERNELRELEASE is used to separate the two parts
@@ -221,19 +191,18 @@ module 8123.ko, which is built from the following files::
line; the second pass is by the kbuild system, which is
initiated by the parameterized "make" in the default target.
-3.2 Separate Kbuild File and Makefile
--------------------------------------
+Separate Kbuild File and Makefile
+---------------------------------
- In newer versions of the kernel, kbuild will first look for a
- file named "Kbuild," and only if that is not found, will it
- then look for a makefile. Utilizing a "Kbuild" file allows us
- to split up the makefile from example 1 into two files:
+ Kbuild will first look for a file named "Kbuild", and if it is not
+ found, it will then look for "Makefile". Utilizing a "Kbuild" file
+ allows us to split up the "Makefile" from example 1 into two files:
Example 2::
--> filename: Kbuild
obj-m := 8123.o
- 8123-y := 8123_if.o 8123_pci.o 8123_bin.o
+ 8123-y := 8123_if.o 8123_pci.o
--> filename: Makefile
KDIR ?= /lib/modules/`uname -r`/build
@@ -241,68 +210,13 @@ module 8123.ko, which is built from the following files::
default:
$(MAKE) -C $(KDIR) M=$$PWD
- # Module specific targets
- genbin:
- echo "X" > 8123_bin.o_shipped
-
The split in example 2 is questionable due to the simplicity of
each file; however, some external modules use makefiles
consisting of several hundred lines, and here it really pays
off to separate the kbuild part from the rest.
- The next example shows a backward compatible version.
-
- Example 3::
-
- --> filename: Kbuild
- obj-m := 8123.o
- 8123-y := 8123_if.o 8123_pci.o 8123_bin.o
-
- --> filename: Makefile
- ifneq ($(KERNELRELEASE),)
- # kbuild part of makefile
- include Kbuild
-
- else
- # normal makefile
- KDIR ?= /lib/modules/`uname -r`/build
-
- default:
- $(MAKE) -C $(KDIR) M=$$PWD
-
- # Module specific targets
- genbin:
- echo "X" > 8123_bin.o_shipped
-
- endif
-
- Here the "Kbuild" file is included from the makefile. This
- allows an older version of kbuild, which only knows of
- makefiles, to be used when the "make" and kbuild parts are
- split into separate files.
-
-3.3 Binary Blobs
-----------------
-
- Some external modules need to include an object file as a blob.
- kbuild has support for this, but requires the blob file to be
- named <filename>_shipped. When the kbuild rules kick in, a copy
- of <filename>_shipped is created with _shipped stripped off,
- giving us <filename>. This shortened filename can be used in
- the assignment to the module.
-
- Throughout this section, 8123_bin.o_shipped has been used to
- build the kernel module 8123.ko; it has been included as
- 8123_bin.o::
-
- 8123-y := 8123_if.o 8123_pci.o 8123_bin.o
-
- Although there is no distinction between the ordinary source
- files and the binary file, kbuild will pick up different rules
- when creating the object file for the module.
-
-3.4 Building Multiple Modules
-=============================
+Building Multiple Modules
+-------------------------
kbuild supports building multiple modules with a single build
file. For example, if you wanted to build two modules, foo.ko
@@ -315,8 +229,8 @@ module 8123.ko, which is built from the following files::
It is that simple!
-4. Include Files
-================
+Include Files
+=============
Within the kernel, header files are kept in standard locations
according to the following rule:
@@ -334,19 +248,19 @@ according to the following rule:
include/scsi; and architecture specific headers are located
under arch/$(SRCARCH)/include/.
-4.1 Kernel Includes
--------------------
+Kernel Includes
+---------------
To include a header file located under include/linux/, simply
use::
#include <linux/module.h>
- kbuild will add options to "gcc" so the relevant directories
+ kbuild will add options to the compiler so the relevant directories
are searched.
-4.2 Single Subdirectory
------------------------
+Single Subdirectory
+-------------------
External modules tend to place header files in a separate
include/ directory where their source is located, although this
@@ -360,15 +274,11 @@ according to the following rule:
--> filename: Kbuild
obj-m := 8123.o
- ccflags-y := -Iinclude
- 8123-y := 8123_if.o 8123_pci.o 8123_bin.o
-
- Note that in the assignment there is no space between -I and
- the path. This is a limitation of kbuild: there must be no
- space present.
+ ccflags-y := -I $(src)/include
+ 8123-y := 8123_if.o 8123_pci.o
-4.3 Several Subdirectories
---------------------------
+Several Subdirectories
+----------------------
kbuild can handle files that are spread over several directories.
Consider the following example::
@@ -407,8 +317,8 @@ according to the following rule:
file is located.
-5. Module Installation
-======================
+Module Installation
+===================
Modules which are included in the kernel are installed in the
directory:
@@ -419,8 +329,8 @@ And external modules are installed in:
/lib/modules/$(KERNELRELEASE)/updates/
-5.1 INSTALL_MOD_PATH
---------------------
+INSTALL_MOD_PATH
+----------------
Above are the default directories but as always some level of
customization is possible. A prefix can be added to the
@@ -434,8 +344,8 @@ And external modules are installed in:
calling "make." This has effect when installing both in-tree
and out-of-tree modules.
-5.2 INSTALL_MOD_DIR
--------------------
+INSTALL_MOD_DIR
+---------------
External modules are by default installed to a directory under
/lib/modules/$(KERNELRELEASE)/updates/, but you may wish to
@@ -448,8 +358,8 @@ And external modules are installed in:
=> Install dir: /lib/modules/$(KERNELRELEASE)/gandalf/
-6. Module Versioning
-====================
+Module Versioning
+=================
Module versioning is enabled by the CONFIG_MODVERSIONS tag, and is used
as a simple ABI consistency check. A CRC value of the full prototype
@@ -461,8 +371,8 @@ module.
Module.symvers contains a list of all exported symbols from a kernel
build.
-6.1 Symbols From the Kernel (vmlinux + modules)
------------------------------------------------
+Symbols From the Kernel (vmlinux + modules)
+-------------------------------------------
During a kernel build, a file named Module.symvers will be
generated. Module.symvers contains all exported symbols from
@@ -486,8 +396,8 @@ build.
1) It lists all exported symbols from vmlinux and all modules.
2) It lists the CRC if CONFIG_MODVERSIONS is enabled.
-6.2 Symbols and External Modules
---------------------------------
+Symbols and External Modules
+----------------------------
When building an external module, the build system needs access
to the symbols from the kernel to check if all external symbols
@@ -496,8 +406,8 @@ build.
tree. During the MODPOST step, a new Module.symvers file will be
written containing all exported symbols from that external module.
-6.3 Symbols From Another External Module
-----------------------------------------
+Symbols From Another External Module
+------------------------------------
Sometimes, an external module uses exported symbols from
another external module. Kbuild needs to have full knowledge of
@@ -537,11 +447,11 @@ build.
initialization of its symbol tables.
-7. Tips & Tricks
-================
+Tips & Tricks
+=============
-7.1 Testing for CONFIG_FOO_BAR
-------------------------------
+Testing for CONFIG_FOO_BAR
+--------------------------
Modules often need to check for certain `CONFIG_` options to
decide if a specific feature is included in the module. In
@@ -553,9 +463,3 @@ build.
ext2-y := balloc.o bitmap.o dir.o
ext2-$(CONFIG_EXT2_FS_XATTR) += xattr.o
-
- External modules have traditionally used "grep" to check for
- specific `CONFIG_` settings directly in .config. This usage is
- broken. As introduced before, external modules should use
- kbuild for building and can therefore use the same methods as
- in-tree modules when testing for `CONFIG_` definitions.
diff --git a/Documentation/leds/leds-blinkm.rst b/Documentation/leds/leds-blinkm.rst
index 2d3c226a371a..647be1c6c552 100644
--- a/Documentation/leds/leds-blinkm.rst
+++ b/Documentation/leds/leds-blinkm.rst
@@ -13,9 +13,31 @@ The device accepts RGB and HSB color values through separate commands.
Also you can store blinking sequences as "scripts" in
the controller and run them. Also fading is an option.
-The interface this driver provides is 2-fold:
+The interface this driver provides is 3-fold:
-a) LED class interface for use with triggers
+a) LED multicolor class interface for use with triggers
+#######################################################
+
+The registration follows the scheme::
+
+ blinkm-<i2c-bus-nr>-<i2c-device-nr>:rgb:indicator
+
+ $ ls -h /sys/class/leds/blinkm-1-9:rgb:indicator
+ brightness device max_brightness multi_index multi_intensity power subsystem trigger uevent
+
+Hue is controlled by the multi_intensity file and lightness is controlled by
+the brightness file.
+
+The order in which to write the intensity values can be found in multi_index.
+Exactly three values between 0 and 255 must be written to multi_intensity to
+change the color::
+
+ $ echo 255 100 50 > multi_intensity
+
+The overall lightness be changed by writing a value between 0 and 255 to the
+brightness file.
+
+b) LED class interface for use with triggers
############################################
The registration follows the scheme::
@@ -79,6 +101,7 @@ E.g.::
-as of 6/2012
+as of 07/2024
dl9pf <at> gmx <dot> de
+jstrauss <at> mailbox <dot> org
diff --git a/Documentation/leds/well-known-leds.txt b/Documentation/leds/well-known-leds.txt
index 67b44704801f..17ef78faf1f3 100644
--- a/Documentation/leds/well-known-leds.txt
+++ b/Documentation/leds/well-known-leds.txt
@@ -72,6 +72,14 @@ Good: "platform:*:charging" (allwinner sun50i, leds-cht-wcove)
Good: ":backlight" (Motorola Droid 4)
+* Indicators
+
+Good: ":indicator" (Blinkm)
+
+* RGB
+
+Good: ":rgb" (Blinkm)
+
* Ethernet LEDs
Currently two types of Network LEDs are support, those controlled by
diff --git a/Documentation/mm/damon/design.rst b/Documentation/mm/damon/design.rst
index 8730c246ceaa..f9c50525bdbf 100644
--- a/Documentation/mm/damon/design.rst
+++ b/Documentation/mm/damon/design.rst
@@ -586,7 +586,7 @@ API, and return the results to the user-space.
The ABIs are designed to be used for user space applications development,
rather than human beings' fingers. Human users are recommended to use such
user space tools. One such Python-written user space tool is available at
-Github (https://github.com/awslabs/damo), Pypi
+Github (https://github.com/damonitor/damo), Pypi
(https://pypistats.org/packages/damo), and Fedora
(https://packages.fedoraproject.org/pkgs/python-damo/damo/).
diff --git a/Documentation/mm/damon/maintainer-profile.rst b/Documentation/mm/damon/maintainer-profile.rst
index feccf6a0f6c3..2365c9a3c1f0 100644
--- a/Documentation/mm/damon/maintainer-profile.rst
+++ b/Documentation/mm/damon/maintainer-profile.rst
@@ -7,23 +7,27 @@ The DAMON subsystem covers the files that are listed in 'DATA ACCESS MONITOR'
section of 'MAINTAINERS' file.
The mailing lists for the subsystem are damon@lists.linux.dev and
-linux-mm@kvack.org. Patches should be made against the mm-unstable tree [1]_
-whenever possible and posted to the mailing lists.
+linux-mm@kvack.org. Patches should be made against the mm-unstable `tree
+<https://git.kernel.org/akpm/mm/h/mm-unstable>` whenever possible and posted to
+the mailing lists.
SCM Trees
---------
There are multiple Linux trees for DAMON development. Patches under
-development or testing are queued in damon/next [2]_ by the DAMON maintainer.
-Sufficiently reviewed patches will be queued in mm-unstable [1]_ by the memory
-management subsystem maintainer. After more sufficient tests, the patches will
-be queued in mm-stable [3]_ , and finally pull-requested to the mainline by the
-memory management subsystem maintainer.
-
-Note again the patches for mm-unstable tree [1]_ are queued by the memory
+development or testing are queued in `damon/next
+<https://git.kernel.org/sj/h/damon/next>` by the DAMON maintainer.
+Sufficiently reviewed patches will be queued in `mm-unstable
+<https://git.kernel.org/akpm/mm/h/mm-unstable>` by the memory management
+subsystem maintainer. After more sufficient tests, the patches will be queued
+in `mm-stable <https://git.kernel.org/akpm/mm/h/mm-stable>` , and finally
+pull-requested to the mainline by the memory management subsystem maintainer.
+
+Note again the patches for mm-unstable `tree
+<https://git.kernel.org/akpm/mm/h/mm-unstable>` are queued by the memory
management subsystem maintainer. If the patches requires some patches in
-damon/next tree [2]_ which not yet merged in mm-unstable, please make sure the
-requirement is clearly specified.
+damon/next `tree <https://git.kernel.org/sj/h/damon/next>` which not yet merged
+in mm-unstable, please make sure the requirement is clearly specified.
Submit checklist addendum
-------------------------
@@ -32,18 +36,27 @@ When making DAMON changes, you should do below.
- Build changes related outputs including kernel and documents.
- Ensure the builds introduce no new errors or warnings.
-- Run and ensure no new failures for DAMON selftests [4]_ and kunittests [5]_ .
+- Run and ensure no new failures for DAMON `selftests
+ <https://github.com/awslabs/damon-tests/blob/master/corr/run.sh#L49>` and
+ `kunittests
+ <https://github.com/awslabs/damon-tests/blob/master/corr/tests/kunit.sh>`.
Further doing below and putting the results will be helpful.
-- Run damon-tests/corr [6]_ for normal changes.
-- Run damon-tests/perf [7]_ for performance changes.
+- Run `damon-tests/corr
+ <https://github.com/awslabs/damon-tests/tree/master/corr>` for normal
+ changes.
+- Run `damon-tests/perf
+ <https://github.com/awslabs/damon-tests/tree/master/perf>` for performance
+ changes.
Key cycle dates
---------------
-Patches can be sent anytime. Key cycle dates of the mm-unstable [1]_ and
-mm-stable [3]_ trees depend on the memory management subsystem maintainer.
+Patches can be sent anytime. Key cycle dates of the `mm-unstable
+<https://git.kernel.org/akpm/mm/h/mm-unstable>` and `mm-stable
+<https://git.kernel.org/akpm/mm/h/mm-stable>` trees depend on the memory
+management subsystem maintainer.
Review cadence
--------------
@@ -58,16 +71,17 @@ Mailing tool
Like many other Linux kernel subsystems, DAMON uses the mailing lists
(damon@lists.linux.dev and linux-mm@kvack.org) as the major communication
-channel. There is a simple tool called HacKerMaiL (``hkml``) [8]_ , which is
-for people who are not very familiar with the mailing lists based
-communication. The tool could be particularly helpful for DAMON community
-members since it is developed and maintained by DAMON maintainer. The tool is
-also officially announced to support DAMON and general Linux kernel development
-workflow.
-
-In other words, ``hkml`` [8]_ is a mailing tool for DAMON community, which
-DAMON maintainer is committed to support. Please feel free to try and report
-issues or feature requests for the tool to the maintainer.
+channel. There is a simple tool called `HacKerMaiL
+<https://github.com/damonitor/hackermail>` (``hkml``), which is for people who
+are not very familiar with the mailing lists based communication. The tool
+could be particularly helpful for DAMON community members since it is developed
+and maintained by DAMON maintainer. The tool is also officially announced to
+support DAMON and general Linux kernel development workflow.
+
+In other words, `hkml <https://github.com/damonitor/hackermail>` is a mailing
+tool for DAMON community, which DAMON maintainer is committed to support.
+Please feel free to try and report issues or feature requests for the tool to
+the maintainer.
Community meetup
----------------
@@ -83,17 +97,9 @@ members including the maintainer. The maintainer shares the available time
slots, and attendees should reserve one of those at least 24 hours before the
time slot, by reaching out to the maintainer.
-Schedules and available reservation time slots are available at the Google doc
-[9]_ . DAMON maintainer will also provide periodic reminder to the mailing
-list (damon@lists.linux.dev).
-
-
-.. [1] https://git.kernel.org/akpm/mm/h/mm-unstable
-.. [2] https://git.kernel.org/sj/h/damon/next
-.. [3] https://git.kernel.org/akpm/mm/h/mm-stable
-.. [4] https://github.com/awslabs/damon-tests/blob/master/corr/run.sh#L49
-.. [5] https://github.com/awslabs/damon-tests/blob/master/corr/tests/kunit.sh
-.. [6] https://github.com/awslabs/damon-tests/tree/master/corr
-.. [7] https://github.com/awslabs/damon-tests/tree/master/perf
-.. [8] https://github.com/damonitor/hackermail
-.. [9] https://docs.google.com/document/d/1v43Kcj3ly4CYqmAkMaZzLiM2GEnWfgdGbZAH3mi2vpM/edit?usp=sharing
+Schedules and available reservation time slots are available at the Google `doc
+<https://docs.google.com/document/d/1v43Kcj3ly4CYqmAkMaZzLiM2GEnWfgdGbZAH3mi2vpM/edit?usp=sharing>`.
+There is also a public Google `calendar
+<https://calendar.google.com/calendar/u/0?cid=ZDIwOTA4YTMxNjc2MDQ3NTIyMmUzYTM5ZmQyM2U4NDA0ZGIwZjBiYmJlZGQxNDM0MmY4ZTRjOTE0NjdhZDRiY0Bncm91cC5jYWxlbmRhci5nb29nbGUuY29t>`
+that has the events. Anyone can subscribe it. DAMON maintainer will also
+provide periodic reminder to the mailing list (damon@lists.linux.dev).
diff --git a/Documentation/mm/page_migration.rst b/Documentation/mm/page_migration.rst
index f1ce67a26615..519b35a4caf5 100644
--- a/Documentation/mm/page_migration.rst
+++ b/Documentation/mm/page_migration.rst
@@ -63,15 +63,15 @@ and then a low level description of how the low level details work.
In kernel use of migrate_pages()
================================
-1. Remove pages from the LRU.
+1. Remove folios from the LRU.
- Lists of pages to be migrated are generated by scanning over
- pages and moving them into lists. This is done by
- calling isolate_lru_page().
- Calling isolate_lru_page() increases the references to the page
- so that it cannot vanish while the page migration occurs.
+ Lists of folios to be migrated are generated by scanning over
+ folios and moving them into lists. This is done by
+ calling folio_isolate_lru().
+ Calling folio_isolate_lru() increases the references to the folio
+ so that it cannot vanish while the folio migration occurs.
It also prevents the swapper or other scans from encountering
- the page.
+ the folio.
2. We need to have a function of type new_folio_t that can be
passed to migrate_pages(). This function should figure out
@@ -84,10 +84,10 @@ In kernel use of migrate_pages()
How migrate_pages() works
=========================
-migrate_pages() does several passes over its list of pages. A page is moved
-if all references to a page are removable at the time. The page has
-already been removed from the LRU via isolate_lru_page() and the refcount
-is increased so that the page cannot be freed while page migration occurs.
+migrate_pages() does several passes over its list of folios. A folio is moved
+if all references to a folio are removable at the time. The folio has
+already been removed from the LRU via folio_isolate_lru() and the refcount
+is increased so that the folio cannot be freed while folio migration occurs.
Steps:
diff --git a/Documentation/mm/transhuge.rst b/Documentation/mm/transhuge.rst
index 1ba0ad63246c..a2cd8800d527 100644
--- a/Documentation/mm/transhuge.rst
+++ b/Documentation/mm/transhuge.rst
@@ -31,10 +31,10 @@ Design principles
feature that applies to all dynamic high order allocations in the
kernel)
-get_user_pages and follow_page
-==============================
+get_user_pages and pin_user_pages
+=================================
-get_user_pages and follow_page if run on a hugepage, will return the
+get_user_pages and pin_user_pages if run on a hugepage, will return the
head or tail pages as usual (exactly as they would do on
hugetlbfs). Most GUP users will only care about the actual physical
address of the page and its temporary pinning to release after the I/O
diff --git a/Documentation/mm/unevictable-lru.rst b/Documentation/mm/unevictable-lru.rst
index 2feb2ed51ae2..8d11fe6a0854 100644
--- a/Documentation/mm/unevictable-lru.rst
+++ b/Documentation/mm/unevictable-lru.rst
@@ -80,7 +80,7 @@ on an additional LRU list for a few reasons:
(2) We want to be able to migrate unevictable folios between nodes for memory
defragmentation, workload management and memory hotplug. The Linux kernel
can only migrate folios that it can successfully isolate from the LRU
- lists (or "Movable" pages: outside of consideration here). If we were to
+ lists (or "Movable" folios: outside of consideration here). If we were to
maintain folios elsewhere than on an LRU-like list, where they can be
detected by folio_isolate_lru(), we would prevent their migration.
@@ -230,7 +230,7 @@ In Nick's patch, he used one of the struct page LRU list link fields as a count
of VM_LOCKED VMAs that map the page (Rik van Riel had the same idea three years
earlier). But this use of the link field for a count prevented the management
of the pages on an LRU list, and thus mlocked pages were not migratable as
-isolate_lru_page() could not detect them, and the LRU list link field was not
+folio_isolate_lru() could not detect them, and the LRU list link field was not
available to the migration subsystem.
Nick resolved this by putting mlocked pages back on the LRU list before
@@ -253,8 +253,8 @@ Basic Management
mlocked pages - pages mapped into a VM_LOCKED VMA - are a class of unevictable
pages. When such a page has been "noticed" by the memory management subsystem,
-the page is marked with the PG_mlocked flag. This can be manipulated using the
-PageMlocked() functions.
+the folio is marked with the PG_mlocked flag. This can be manipulated using
+folio_set_mlocked() and folio_clear_mlocked() functions.
A PG_mlocked page will be placed on the unevictable list when it is added to
the LRU. Such pages can be "noticed" by memory management in several places:
diff --git a/Documentation/networking/tproxy.rst b/Documentation/networking/tproxy.rst
index 00dc3a1a66b4..7f7c1ff6f159 100644
--- a/Documentation/networking/tproxy.rst
+++ b/Documentation/networking/tproxy.rst
@@ -17,7 +17,7 @@ The idea is that you identify packets with destination address matching a local
socket on your box, set the packet mark to a certain value::
# iptables -t mangle -N DIVERT
- # iptables -t mangle -A PREROUTING -p tcp -m socket -j DIVERT
+ # iptables -t mangle -A PREROUTING -p tcp -m socket --transparent -j DIVERT
# iptables -t mangle -A DIVERT -j MARK --set-mark 1
# iptables -t mangle -A DIVERT -j ACCEPT
diff --git a/Documentation/process/changes.rst b/Documentation/process/changes.rst
index 3fc63f27c226..00f1ed7c59c3 100644
--- a/Documentation/process/changes.rst
+++ b/Documentation/process/changes.rst
@@ -64,6 +64,7 @@ GNU tar 1.28 tar --version
gtags (optional) 6.6.5 gtags --version
mkimage (optional) 2017.01 mkimage --version
Python (optional) 3.5.x python3 --version
+GNU AWK (optional) 5.1.0 gawk --version
====================== =============== ========================================
.. [#f1] Sphinx is needed only to build the Kernel documentation
@@ -192,6 +193,12 @@ platforms. The tool is available via the ``u-boot-tools`` package or can be
built from the U-Boot source code. See the instructions at
https://docs.u-boot.org/en/latest/build/tools.html#building-tools-for-linux
+GNU AWK
+-------
+
+GNU AWK is needed if you want kernel builds to generate address range data for
+builtin modules (CONFIG_BUILTIN_MODULE_RANGES).
+
System utilities
****************
diff --git a/Documentation/rust/general-information.rst b/Documentation/rust/general-information.rst
index e3f388ef4ee4..6146b49b6a98 100644
--- a/Documentation/rust/general-information.rst
+++ b/Documentation/rust/general-information.rst
@@ -15,6 +15,8 @@ but not `std <https://doc.rust-lang.org/std/>`_. Crates for use in the
kernel must opt into this behavior using the ``#![no_std]`` attribute.
+.. _rust_code_documentation:
+
Code documentation
------------------
@@ -22,10 +24,17 @@ Rust kernel code is documented using ``rustdoc``, its built-in documentation
generator.
The generated HTML docs include integrated search, linked items (e.g. types,
-functions, constants), source code, etc. They may be read at (TODO: link when
-in mainline and generated alongside the rest of the documentation):
+functions, constants), source code, etc. They may be read at:
+
+ https://rust.docs.kernel.org
+
+For linux-next, please see:
+
+ https://rust.docs.kernel.org/next/
- http://kernel.org/
+There are also tags for each main release, e.g.:
+
+ https://rust.docs.kernel.org/6.10/
The docs can also be easily generated and read locally. This is quite fast
(same order as compiling the code itself) and no special tools or environment
@@ -75,7 +84,7 @@ should provide as-safe-as-possible abstractions as needed.
.. code-block::
rust/bindings/
- (rust/helpers.c)
+ (rust/helpers/)
include/ -----+ <-+
| |
@@ -112,7 +121,7 @@ output files in the ``rust/bindings/`` directory.
For parts of the C header that ``bindgen`` does not auto generate, e.g. C
``inline`` functions or non-trivial macros, it is acceptable to add a small
-wrapper function to ``rust/helpers.c`` to make it available for the Rust side as
+wrapper function to ``rust/helpers/`` to make it available for the Rust side as
well.
Abstractions
@@ -142,3 +151,11 @@ configuration:
#[cfg(CONFIG_X="y")] // Enabled as a built-in (`y`)
#[cfg(CONFIG_X="m")] // Enabled as a module (`m`)
#[cfg(not(CONFIG_X))] // Disabled
+
+For other predicates that Rust's ``cfg`` does not support, e.g. expressions with
+numerical comparisons, one may define a new Kconfig symbol:
+
+.. code-block:: kconfig
+
+ config RUSTC_VERSION_MIN_107900
+ def_bool y if RUSTC_VERSION >= 107900
diff --git a/Documentation/rust/index.rst b/Documentation/rust/index.rst
index 46d35bd395cf..55dcde9e9e7e 100644
--- a/Documentation/rust/index.rst
+++ b/Documentation/rust/index.rst
@@ -25,13 +25,27 @@ support is still in development/experimental, especially for certain kernel
configurations.
+Code documentation
+------------------
+
+Given a kernel configuration, the kernel may generate Rust code documentation,
+i.e. HTML rendered by the ``rustdoc`` tool.
+
.. only:: rustdoc and html
- You can also browse `rustdoc documentation <rustdoc/kernel/index.html>`_.
+ This kernel documentation was built with `Rust code documentation
+ <rustdoc/kernel/index.html>`_.
.. only:: not rustdoc and html
- This documentation does not include rustdoc generated information.
+ This kernel documentation was not built with Rust code documentation.
+
+A pregenerated version is provided at:
+
+ https://rust.docs.kernel.org
+
+Please see the :ref:`Code documentation <rust_code_documentation>` section for
+more details.
.. toctree::
:maxdepth: 1
diff --git a/Documentation/rust/quick-start.rst b/Documentation/rust/quick-start.rst
index 8e3ad9678719..2d107982c87b 100644
--- a/Documentation/rust/quick-start.rst
+++ b/Documentation/rust/quick-start.rst
@@ -39,8 +39,8 @@ of the box, e.g.::
Debian
******
-Debian Unstable (Sid), outside of the freeze period, provides recent Rust
-releases and thus it should generally work out of the box, e.g.::
+Debian Testing and Debian Unstable (Sid), outside of the freeze period, provide
+recent Rust releases and thus they should generally work out of the box, e.g.::
apt install rustc rust-src bindgen rustfmt rust-clippy
diff --git a/Documentation/scheduler/index.rst b/Documentation/scheduler/index.rst
index 1f2942c4d14b..5dd53e47bc0c 100644
--- a/Documentation/scheduler/index.rst
+++ b/Documentation/scheduler/index.rst
@@ -21,6 +21,7 @@ Scheduler
sched-nice-design
sched-rt-group
sched-stats
+ sched-ext
sched-debug
text_files
diff --git a/Documentation/scheduler/sched-deadline.rst b/Documentation/scheduler/sched-deadline.rst
index 9fe4846079bb..22838ed8e13a 100644
--- a/Documentation/scheduler/sched-deadline.rst
+++ b/Documentation/scheduler/sched-deadline.rst
@@ -749,21 +749,19 @@ Appendix A. Test suite
of the command line options. Please refer to rt-app documentation for more
details (`<rt-app-sources>/doc/*.json`).
- The second testing application is a modification of schedtool, called
- schedtool-dl, which can be used to setup SCHED_DEADLINE parameters for a
- certain pid/application. schedtool-dl is available at:
- https://github.com/scheduler-tools/schedtool-dl.git.
+ The second testing application is done using chrt which has support
+ for SCHED_DEADLINE.
The usage is straightforward::
- # schedtool -E -t 10000000:100000000 -e ./my_cpuhog_app
+ # chrt -d -T 10000000 -D 100000000 0 ./my_cpuhog_app
With this, my_cpuhog_app is put to run inside a SCHED_DEADLINE reservation
- of 10ms every 100ms (note that parameters are expressed in microseconds).
- You can also use schedtool to create a reservation for an already running
+ of 10ms every 100ms (note that parameters are expressed in nanoseconds).
+ You can also use chrt to create a reservation for an already running
application, given that you know its pid::
- # schedtool -E -t 10000000:100000000 my_app_pid
+ # chrt -d -T 10000000 -D 100000000 -p 0 my_app_pid
Appendix B. Minimal main()
==========================
diff --git a/Documentation/scheduler/sched-ext.rst b/Documentation/scheduler/sched-ext.rst
new file mode 100644
index 000000000000..6c0d70e2e27d
--- /dev/null
+++ b/Documentation/scheduler/sched-ext.rst
@@ -0,0 +1,326 @@
+==========================
+Extensible Scheduler Class
+==========================
+
+sched_ext is a scheduler class whose behavior can be defined by a set of BPF
+programs - the BPF scheduler.
+
+* sched_ext exports a full scheduling interface so that any scheduling
+ algorithm can be implemented on top.
+
+* The BPF scheduler can group CPUs however it sees fit and schedule them
+ together, as tasks aren't tied to specific CPUs at the time of wakeup.
+
+* The BPF scheduler can be turned on and off dynamically anytime.
+
+* The system integrity is maintained no matter what the BPF scheduler does.
+ The default scheduling behavior is restored anytime an error is detected,
+ a runnable task stalls, or on invoking the SysRq key sequence
+ :kbd:`SysRq-S`.
+
+* When the BPF scheduler triggers an error, debug information is dumped to
+ aid debugging. The debug dump is passed to and printed out by the
+ scheduler binary. The debug dump can also be accessed through the
+ `sched_ext_dump` tracepoint. The SysRq key sequence :kbd:`SysRq-D`
+ triggers a debug dump. This doesn't terminate the BPF scheduler and can
+ only be read through the tracepoint.
+
+Switching to and from sched_ext
+===============================
+
+``CONFIG_SCHED_CLASS_EXT`` is the config option to enable sched_ext and
+``tools/sched_ext`` contains the example schedulers. The following config
+options should be enabled to use sched_ext:
+
+.. code-block:: none
+
+ CONFIG_BPF=y
+ CONFIG_SCHED_CLASS_EXT=y
+ CONFIG_BPF_SYSCALL=y
+ CONFIG_BPF_JIT=y
+ CONFIG_DEBUG_INFO_BTF=y
+ CONFIG_BPF_JIT_ALWAYS_ON=y
+ CONFIG_BPF_JIT_DEFAULT_ON=y
+ CONFIG_PAHOLE_HAS_SPLIT_BTF=y
+ CONFIG_PAHOLE_HAS_BTF_TAG=y
+
+sched_ext is used only when the BPF scheduler is loaded and running.
+
+If a task explicitly sets its scheduling policy to ``SCHED_EXT``, it will be
+treated as ``SCHED_NORMAL`` and scheduled by CFS until the BPF scheduler is
+loaded.
+
+When the BPF scheduler is loaded and ``SCX_OPS_SWITCH_PARTIAL`` is not set
+in ``ops->flags``, all ``SCHED_NORMAL``, ``SCHED_BATCH``, ``SCHED_IDLE``, and
+``SCHED_EXT`` tasks are scheduled by sched_ext.
+
+However, when the BPF scheduler is loaded and ``SCX_OPS_SWITCH_PARTIAL`` is
+set in ``ops->flags``, only tasks with the ``SCHED_EXT`` policy are scheduled
+by sched_ext, while tasks with ``SCHED_NORMAL``, ``SCHED_BATCH`` and
+``SCHED_IDLE`` policies are scheduled by CFS.
+
+Terminating the sched_ext scheduler program, triggering :kbd:`SysRq-S`, or
+detection of any internal error including stalled runnable tasks aborts the
+BPF scheduler and reverts all tasks back to CFS.
+
+.. code-block:: none
+
+ # make -j16 -C tools/sched_ext
+ # tools/sched_ext/scx_simple
+ local=0 global=3
+ local=5 global=24
+ local=9 global=44
+ local=13 global=56
+ local=17 global=72
+ ^CEXIT: BPF scheduler unregistered
+
+The current status of the BPF scheduler can be determined as follows:
+
+.. code-block:: none
+
+ # cat /sys/kernel/sched_ext/state
+ enabled
+ # cat /sys/kernel/sched_ext/root/ops
+ simple
+
+You can check if any BPF scheduler has ever been loaded since boot by examining
+this monotonically incrementing counter (a value of zero indicates that no BPF
+scheduler has been loaded):
+
+.. code-block:: none
+
+ # cat /sys/kernel/sched_ext/enable_seq
+ 1
+
+``tools/sched_ext/scx_show_state.py`` is a drgn script which shows more
+detailed information:
+
+.. code-block:: none
+
+ # tools/sched_ext/scx_show_state.py
+ ops : simple
+ enabled : 1
+ switching_all : 1
+ switched_all : 1
+ enable_state : enabled (2)
+ bypass_depth : 0
+ nr_rejected : 0
+ enable_seq : 1
+
+If ``CONFIG_SCHED_DEBUG`` is set, whether a given task is on sched_ext can
+be determined as follows:
+
+.. code-block:: none
+
+ # grep ext /proc/self/sched
+ ext.enabled : 1
+
+The Basics
+==========
+
+Userspace can implement an arbitrary BPF scheduler by loading a set of BPF
+programs that implement ``struct sched_ext_ops``. The only mandatory field
+is ``ops.name`` which must be a valid BPF object name. All operations are
+optional. The following modified excerpt is from
+``tools/sched_ext/scx_simple.bpf.c`` showing a minimal global FIFO scheduler.
+
+.. code-block:: c
+
+ /*
+ * Decide which CPU a task should be migrated to before being
+ * enqueued (either at wakeup, fork time, or exec time). If an
+ * idle core is found by the default ops.select_cpu() implementation,
+ * then dispatch the task directly to SCX_DSQ_LOCAL and skip the
+ * ops.enqueue() callback.
+ *
+ * Note that this implementation has exactly the same behavior as the
+ * default ops.select_cpu implementation. The behavior of the scheduler
+ * would be exactly same if the implementation just didn't define the
+ * simple_select_cpu() struct_ops prog.
+ */
+ s32 BPF_STRUCT_OPS(simple_select_cpu, struct task_struct *p,
+ s32 prev_cpu, u64 wake_flags)
+ {
+ s32 cpu;
+ /* Need to initialize or the BPF verifier will reject the program */
+ bool direct = false;
+
+ cpu = scx_bpf_select_cpu_dfl(p, prev_cpu, wake_flags, &direct);
+
+ if (direct)
+ scx_bpf_dispatch(p, SCX_DSQ_LOCAL, SCX_SLICE_DFL, 0);
+
+ return cpu;
+ }
+
+ /*
+ * Do a direct dispatch of a task to the global DSQ. This ops.enqueue()
+ * callback will only be invoked if we failed to find a core to dispatch
+ * to in ops.select_cpu() above.
+ *
+ * Note that this implementation has exactly the same behavior as the
+ * default ops.enqueue implementation, which just dispatches the task
+ * to SCX_DSQ_GLOBAL. The behavior of the scheduler would be exactly same
+ * if the implementation just didn't define the simple_enqueue struct_ops
+ * prog.
+ */
+ void BPF_STRUCT_OPS(simple_enqueue, struct task_struct *p, u64 enq_flags)
+ {
+ scx_bpf_dispatch(p, SCX_DSQ_GLOBAL, SCX_SLICE_DFL, enq_flags);
+ }
+
+ s32 BPF_STRUCT_OPS_SLEEPABLE(simple_init)
+ {
+ /*
+ * By default, all SCHED_EXT, SCHED_OTHER, SCHED_IDLE, and
+ * SCHED_BATCH tasks should use sched_ext.
+ */
+ return 0;
+ }
+
+ void BPF_STRUCT_OPS(simple_exit, struct scx_exit_info *ei)
+ {
+ exit_type = ei->type;
+ }
+
+ SEC(".struct_ops")
+ struct sched_ext_ops simple_ops = {
+ .select_cpu = (void *)simple_select_cpu,
+ .enqueue = (void *)simple_enqueue,
+ .init = (void *)simple_init,
+ .exit = (void *)simple_exit,
+ .name = "simple",
+ };
+
+Dispatch Queues
+---------------
+
+To match the impedance between the scheduler core and the BPF scheduler,
+sched_ext uses DSQs (dispatch queues) which can operate as both a FIFO and a
+priority queue. By default, there is one global FIFO (``SCX_DSQ_GLOBAL``),
+and one local dsq per CPU (``SCX_DSQ_LOCAL``). The BPF scheduler can manage
+an arbitrary number of dsq's using ``scx_bpf_create_dsq()`` and
+``scx_bpf_destroy_dsq()``.
+
+A CPU always executes a task from its local DSQ. A task is "dispatched" to a
+DSQ. A non-local DSQ is "consumed" to transfer a task to the consuming CPU's
+local DSQ.
+
+When a CPU is looking for the next task to run, if the local DSQ is not
+empty, the first task is picked. Otherwise, the CPU tries to consume the
+global DSQ. If that doesn't yield a runnable task either, ``ops.dispatch()``
+is invoked.
+
+Scheduling Cycle
+----------------
+
+The following briefly shows how a waking task is scheduled and executed.
+
+1. When a task is waking up, ``ops.select_cpu()`` is the first operation
+ invoked. This serves two purposes. First, CPU selection optimization
+ hint. Second, waking up the selected CPU if idle.
+
+ The CPU selected by ``ops.select_cpu()`` is an optimization hint and not
+ binding. The actual decision is made at the last step of scheduling.
+ However, there is a small performance gain if the CPU
+ ``ops.select_cpu()`` returns matches the CPU the task eventually runs on.
+
+ A side-effect of selecting a CPU is waking it up from idle. While a BPF
+ scheduler can wake up any cpu using the ``scx_bpf_kick_cpu()`` helper,
+ using ``ops.select_cpu()`` judiciously can be simpler and more efficient.
+
+ A task can be immediately dispatched to a DSQ from ``ops.select_cpu()`` by
+ calling ``scx_bpf_dispatch()``. If the task is dispatched to
+ ``SCX_DSQ_LOCAL`` from ``ops.select_cpu()``, it will be dispatched to the
+ local DSQ of whichever CPU is returned from ``ops.select_cpu()``.
+ Additionally, dispatching directly from ``ops.select_cpu()`` will cause the
+ ``ops.enqueue()`` callback to be skipped.
+
+ Note that the scheduler core will ignore an invalid CPU selection, for
+ example, if it's outside the allowed cpumask of the task.
+
+2. Once the target CPU is selected, ``ops.enqueue()`` is invoked (unless the
+ task was dispatched directly from ``ops.select_cpu()``). ``ops.enqueue()``
+ can make one of the following decisions:
+
+ * Immediately dispatch the task to either the global or local DSQ by
+ calling ``scx_bpf_dispatch()`` with ``SCX_DSQ_GLOBAL`` or
+ ``SCX_DSQ_LOCAL``, respectively.
+
+ * Immediately dispatch the task to a custom DSQ by calling
+ ``scx_bpf_dispatch()`` with a DSQ ID which is smaller than 2^63.
+
+ * Queue the task on the BPF side.
+
+3. When a CPU is ready to schedule, it first looks at its local DSQ. If
+ empty, it then looks at the global DSQ. If there still isn't a task to
+ run, ``ops.dispatch()`` is invoked which can use the following two
+ functions to populate the local DSQ.
+
+ * ``scx_bpf_dispatch()`` dispatches a task to a DSQ. Any target DSQ can
+ be used - ``SCX_DSQ_LOCAL``, ``SCX_DSQ_LOCAL_ON | cpu``,
+ ``SCX_DSQ_GLOBAL`` or a custom DSQ. While ``scx_bpf_dispatch()``
+ currently can't be called with BPF locks held, this is being worked on
+ and will be supported. ``scx_bpf_dispatch()`` schedules dispatching
+ rather than performing them immediately. There can be up to
+ ``ops.dispatch_max_batch`` pending tasks.
+
+ * ``scx_bpf_consume()`` tranfers a task from the specified non-local DSQ
+ to the dispatching DSQ. This function cannot be called with any BPF
+ locks held. ``scx_bpf_consume()`` flushes the pending dispatched tasks
+ before trying to consume the specified DSQ.
+
+4. After ``ops.dispatch()`` returns, if there are tasks in the local DSQ,
+ the CPU runs the first one. If empty, the following steps are taken:
+
+ * Try to consume the global DSQ. If successful, run the task.
+
+ * If ``ops.dispatch()`` has dispatched any tasks, retry #3.
+
+ * If the previous task is an SCX task and still runnable, keep executing
+ it (see ``SCX_OPS_ENQ_LAST``).
+
+ * Go idle.
+
+Note that the BPF scheduler can always choose to dispatch tasks immediately
+in ``ops.enqueue()`` as illustrated in the above simple example. If only the
+built-in DSQs are used, there is no need to implement ``ops.dispatch()`` as
+a task is never queued on the BPF scheduler and both the local and global
+DSQs are consumed automatically.
+
+``scx_bpf_dispatch()`` queues the task on the FIFO of the target DSQ. Use
+``scx_bpf_dispatch_vtime()`` for the priority queue. Internal DSQs such as
+``SCX_DSQ_LOCAL`` and ``SCX_DSQ_GLOBAL`` do not support priority-queue
+dispatching, and must be dispatched to with ``scx_bpf_dispatch()``. See the
+function documentation and usage in ``tools/sched_ext/scx_simple.bpf.c`` for
+more information.
+
+Where to Look
+=============
+
+* ``include/linux/sched/ext.h`` defines the core data structures, ops table
+ and constants.
+
+* ``kernel/sched/ext.c`` contains sched_ext core implementation and helpers.
+ The functions prefixed with ``scx_bpf_`` can be called from the BPF
+ scheduler.
+
+* ``tools/sched_ext/`` hosts example BPF scheduler implementations.
+
+ * ``scx_simple[.bpf].c``: Minimal global FIFO scheduler example using a
+ custom DSQ.
+
+ * ``scx_qmap[.bpf].c``: A multi-level FIFO scheduler supporting five
+ levels of priority implemented with ``BPF_MAP_TYPE_QUEUE``.
+
+ABI Instability
+===============
+
+The APIs provided by sched_ext to BPF schedulers programs have no stability
+guarantees. This includes the ops table callbacks and constants defined in
+``include/linux/sched/ext.h``, as well as the ``scx_bpf_`` kfuncs defined in
+``kernel/sched/ext.c``.
+
+While we will attempt to provide a relatively stable API surface when
+possible, they are subject to change without warning between kernel
+versions.
diff --git a/Documentation/staging/xz.rst b/Documentation/staging/xz.rst
index b2f5ff12a161..6953a189e5f2 100644
--- a/Documentation/staging/xz.rst
+++ b/Documentation/staging/xz.rst
@@ -1,3 +1,5 @@
+.. SPDX-License-Identifier: 0BSD
+
============================
XZ data compression in Linux
============================
@@ -6,62 +8,55 @@ Introduction
============
XZ is a general purpose data compression format with high compression
-ratio and relatively fast decompression. The primary compression
-algorithm (filter) is LZMA2. Additional filters can be used to improve
-compression ratio even further. E.g. Branch/Call/Jump (BCJ) filters
-improve compression ratio of executable data.
-
-The XZ decompressor in Linux is called XZ Embedded. It supports
-the LZMA2 filter and optionally also BCJ filters. CRC32 is supported
-for integrity checking. The home page of XZ Embedded is at
-<https://tukaani.org/xz/embedded.html>, where you can find the
-latest version and also information about using the code outside
-the Linux kernel.
-
-For userspace, XZ Utils provide a zlib-like compression library
-and a gzip-like command line tool. XZ Utils can be downloaded from
-<https://tukaani.org/xz/>.
+ratio. The XZ decompressor in Linux is called XZ Embedded. It supports
+the LZMA2 filter and optionally also Branch/Call/Jump (BCJ) filters
+for executable code. CRC32 is supported for integrity checking.
+
+See the `XZ Embedded`_ home page for the latest version which includes
+a few optional extra features that aren't required in the Linux kernel
+and information about using the code outside the Linux kernel.
+
+For userspace, `XZ Utils`_ provide a zlib-like compression library
+and a gzip-like command line tool.
+
+.. _XZ Embedded: https://tukaani.org/xz/embedded.html
+.. _XZ Utils: https://tukaani.org/xz/
XZ related components in the kernel
===================================
The xz_dec module provides XZ decompressor with single-call (buffer
-to buffer) and multi-call (stateful) APIs. The usage of the xz_dec
-module is documented in include/linux/xz.h.
-
-The xz_dec_test module is for testing xz_dec. xz_dec_test is not
-useful unless you are hacking the XZ decompressor. xz_dec_test
-allocates a char device major dynamically to which one can write
-.xz files from userspace. The decompressed output is thrown away.
-Keep an eye on dmesg to see diagnostics printed by xz_dec_test.
-See the xz_dec_test source code for the details.
+to buffer) and multi-call (stateful) APIs in include/linux/xz.h.
For decompressing the kernel image, initramfs, and initrd, there
is a wrapper function in lib/decompress_unxz.c. Its API is the
same as in other decompress_*.c files, which is defined in
include/linux/decompress/generic.h.
-scripts/xz_wrap.sh is a wrapper for the xz command line tool found
-from XZ Utils. The wrapper sets compression options to values suitable
-for compressing the kernel image.
+For kernel makefiles, three commands are provided for use with
+``$(call if_changed)``. They require the xz tool from XZ Utils.
+
+- ``$(call if_changed,xzkern)`` is for compressing the kernel image.
+ It runs the script scripts/xz_wrap.sh which uses arch-optimized
+ options and a big LZMA2 dictionary.
+
+- ``$(call if_changed,xzkern_with_size)`` is like ``xzkern`` above but
+ this also appends a four-byte trailer containing the uncompressed size
+ of the file. The trailer is needed by the boot code on some archs.
-For kernel makefiles, two commands are provided for use with
-$(call if_needed). The kernel image should be compressed with
-$(call if_needed,xzkern) which will use a BCJ filter and a big LZMA2
-dictionary. It will also append a four-byte trailer containing the
-uncompressed size of the file, which is needed by the boot code.
-Other things should be compressed with $(call if_needed,xzmisc)
-which will use no BCJ filter and 1 MiB LZMA2 dictionary.
+- Other things can be compressed with ``$(call if_needed,xzmisc)``
+ which will use no BCJ filter and 1 MiB LZMA2 dictionary.
Notes on compression options
============================
-Since the XZ Embedded supports only streams with no integrity check or
-CRC32, make sure that you don't use some other integrity check type
-when encoding files that are supposed to be decoded by the kernel. With
-liblzma, you need to use either LZMA_CHECK_NONE or LZMA_CHECK_CRC32
-when encoding. With the xz command line tool, use --check=none or
---check=crc32.
+Since the XZ Embedded supports only streams with CRC32 or no integrity
+check, make sure that you don't use some other integrity check type
+when encoding files that are supposed to be decoded by the kernel.
+With liblzma from XZ Utils, you need to use either ``LZMA_CHECK_CRC32``
+or ``LZMA_CHECK_NONE`` when encoding. With the ``xz`` command line tool,
+use ``--check=crc32`` or ``--check=none`` to override the default
+``--check=crc64``.
Using CRC32 is strongly recommended unless there is some other layer
which will verify the integrity of the uncompressed data anyway.
@@ -71,57 +66,33 @@ by the decoder; you can only change the integrity check type (or
disable it) for the actual uncompressed data.
In userspace, LZMA2 is typically used with dictionary sizes of several
-megabytes. The decoder needs to have the dictionary in RAM, thus big
-dictionaries cannot be used for files that are intended to be decoded
-by the kernel. 1 MiB is probably the maximum reasonable dictionary
-size for in-kernel use (maybe more is OK for initramfs). The presets
-in XZ Utils may not be optimal when creating files for the kernel,
-so don't hesitate to use custom settings. Example::
-
- xz --check=crc32 --lzma2=dict=512KiB inputfile
-
-An exception to above dictionary size limitation is when the decoder
-is used in single-call mode. Decompressing the kernel itself is an
-example of this situation. In single-call mode, the memory usage
-doesn't depend on the dictionary size, and it is perfectly fine to
-use a big dictionary: for maximum compression, the dictionary should
-be at least as big as the uncompressed data itself.
-
-Future plans
-============
+megabytes. The decoder needs to have the dictionary in RAM:
+
+- In multi-call mode the dictionary is allocated as part of the
+ decoder state. The reasonable maximum dictionary size for in-kernel
+ use will depend on the target hardware: a few megabytes is fine for
+ desktop systems while 64 KiB to 1 MiB might be more appropriate on
+ some embedded systems.
+
+- In single-call mode the output buffer is used as the dictionary
+ buffer. That is, the size of the dictionary doesn't affect the
+ decompressor memory usage at all. Only the base data structures
+ are allocated which take a little less than 30 KiB of memory.
+ For the best compression, the dictionary should be at least
+ as big as the uncompressed data. A notable example of single-call
+ mode is decompressing the kernel itself (except on PowerPC).
+
+The compression presets in XZ Utils may not be optimal when creating
+files for the kernel, so don't hesitate to use custom settings to,
+for example, set the dictionary size. Also, xz may produce a smaller
+file in single-threaded mode so setting that explicitly is recommended.
+Example::
+
+ xz --threads=1 --check=crc32 --lzma2=dict=512KiB inputfile
+
+xz_dec API
+==========
+
+This is available with ``#include <linux/xz.h>``.
-Creating a limited XZ encoder may be considered if people think it is
-useful. LZMA2 is slower to compress than e.g. Deflate or LZO even at
-the fastest settings, so it isn't clear if LZMA2 encoder is wanted
-into the kernel.
-
-Support for limited random-access reading is planned for the
-decompression code. I don't know if it could have any use in the
-kernel, but I know that it would be useful in some embedded projects
-outside the Linux kernel.
-
-Conformance to the .xz file format specification
-================================================
-
-There are a couple of corner cases where things have been simplified
-at expense of detecting errors as early as possible. These should not
-matter in practice all, since they don't cause security issues. But
-it is good to know this if testing the code e.g. with the test files
-from XZ Utils.
-
-Reporting bugs
-==============
-
-Before reporting a bug, please check that it's not fixed already
-at upstream. See <https://tukaani.org/xz/embedded.html> to get the
-latest code.
-
-Report bugs to <lasse.collin@tukaani.org> or visit #tukaani on
-Freenode and talk to Larhzu. I don't actively read LKML or other
-kernel-related mailing lists, so if there's something I should know,
-you should email to me personally or use IRC.
-
-Don't bother Igor Pavlov with questions about the XZ implementation
-in the kernel or about XZ Utils. While these two implementations
-include essential code that is directly based on Igor Pavlov's code,
-these implementations aren't maintained nor supported by him.
+.. kernel-doc:: include/linux/xz.h
diff --git a/Documentation/trace/debugging.rst b/Documentation/trace/debugging.rst
new file mode 100644
index 000000000000..54fb16239d70
--- /dev/null
+++ b/Documentation/trace/debugging.rst
@@ -0,0 +1,159 @@
+==============================
+Using the tracer for debugging
+==============================
+
+Copyright 2024 Google LLC.
+
+:Author: Steven Rostedt <rostedt@goodmis.org>
+:License: The GNU Free Documentation License, Version 1.2
+ (dual licensed under the GPL v2)
+
+- Written for: 6.12
+
+Introduction
+------------
+The tracing infrastructure can be very useful for debugging the Linux
+kernel. This document is a place to add various methods of using the tracer
+for debugging.
+
+First, make sure that the tracefs file system is mounted::
+
+ $ sudo mount -t tracefs tracefs /sys/kernel/tracing
+
+
+Using trace_printk()
+--------------------
+
+trace_printk() is a very lightweight utility that can be used in any context
+inside the kernel, with the exception of "noinstr" sections. It can be used
+in normal, softirq, interrupt and even NMI context. The trace data is
+written to the tracing ring buffer in a lockless way. To make it even
+lighter weight, when possible, it will only record the pointer to the format
+string, and save the raw arguments into the buffer. The format and the
+arguments will be post processed when the ring buffer is read. This way the
+trace_printk() format conversions are not done during the hot path, where
+the trace is being recorded.
+
+trace_printk() is meant only for debugging, and should never be added into
+a subsystem of the kernel. If you need debugging traces, add trace events
+instead. If a trace_printk() is found in the kernel, the following will
+appear in the dmesg::
+
+ **********************************************************
+ ** NOTICE NOTICE NOTICE NOTICE NOTICE NOTICE NOTICE **
+ ** **
+ ** trace_printk() being used. Allocating extra memory. **
+ ** **
+ ** This means that this is a DEBUG kernel and it is **
+ ** unsafe for production use. **
+ ** **
+ ** If you see this message and you are not debugging **
+ ** the kernel, report this immediately to your vendor! **
+ ** **
+ ** NOTICE NOTICE NOTICE NOTICE NOTICE NOTICE NOTICE **
+ **********************************************************
+
+Debugging kernel crashes
+------------------------
+There is various methods of acquiring the state of the system when a kernel
+crash occurs. This could be from the oops message in printk, or one could
+use kexec/kdump. But these just show what happened at the time of the crash.
+It can be very useful in knowing what happened up to the point of the crash.
+The tracing ring buffer, by default, is a circular buffer than will
+overwrite older events with newer ones. When a crash happens, the content of
+the ring buffer will be all the events that lead up to the crash.
+
+There are several kernel command line parameters that can be used to help in
+this. The first is "ftrace_dump_on_oops". This will dump the tracing ring
+buffer when a oops occurs to the console. This can be useful if the console
+is being logged somewhere. If a serial console is used, it may be prudent to
+make sure the ring buffer is relatively small, otherwise the dumping of the
+ring buffer may take several minutes to hours to finish. Here's an example
+of the kernel command line::
+
+ ftrace_dump_on_oops trace_buf_size=50K
+
+Note, the tracing buffer is made up of per CPU buffers where each of these
+buffers is broken up into sub-buffers that are by default PAGE_SIZE. The
+above trace_buf_size option above sets each of the per CPU buffers to 50K,
+so, on a machine with 8 CPUs, that's actually 400K total.
+
+Persistent buffers across boots
+-------------------------------
+If the system memory allows it, the tracing ring buffer can be specified at
+a specific location in memory. If the location is the same across boots and
+the memory is not modified, the tracing buffer can be retrieved from the
+following boot. There's two ways to reserve memory for the use of the ring
+buffer.
+
+The more reliable way (on x86) is to reserve memory with the "memmap" kernel
+command line option and then use that memory for the trace_instance. This
+requires a bit of knowledge of the physical memory layout of the system. The
+advantage of using this method, is that the memory for the ring buffer will
+always be the same::
+
+ memmap==12M$0x284500000 trace_instance=boot_map@0x284500000:12M
+
+The memmap above reserves 12 megabytes of memory at the physical memory
+location 0x284500000. Then the trace_instance option will create a trace
+instance "boot_map" at that same location with the same amount of memory
+reserved. As the ring buffer is broke up into per CPU buffers, the 12
+megabytes will be broken up evenly between those CPUs. If you have 8 CPUs,
+each per CPU ring buffer will be 1.5 megabytes in size. Note, that also
+includes meta data, so the amount of memory actually used by the ring buffer
+will be slightly smaller.
+
+Another more generic but less robust way to allocate a ring buffer mapping
+at boot is with the "reserve_mem" option::
+
+ reserve_mem=12M:4096:trace trace_instance=boot_map@trace
+
+The reserve_mem option above will find 12 megabytes that are available at
+boot up, and align it by 4096 bytes. It will label this memory as "trace"
+that can be used by later command line options.
+
+The trace_instance option creates a "boot_map" instance and will use the
+memory reserved by reserve_mem that was labeled as "trace". This method is
+more generic but may not be as reliable. Due to KASLR, the memory reserved
+by reserve_mem may not be located at the same location. If this happens,
+then the ring buffer will not be from the previous boot and will be reset.
+
+Sometimes, by using a larger alignment, it can keep KASLR from moving things
+around in such a way that it will move the location of the reserve_mem. By
+using a larger alignment, you may find better that the buffer is more
+consistent to where it is placed::
+
+ reserve_mem=12M:0x2000000:trace trace_instance=boot_map@trace
+
+On boot up, the memory reserved for the ring buffer is validated. It will go
+through a series of tests to make sure that the ring buffer contains valid
+data. If it is, it will then set it up to be available to read from the
+instance. If it fails any of the tests, it will clear the entire ring buffer
+and initialize it as new.
+
+The layout of this mapped memory may not be consistent from kernel to
+kernel, so only the same kernel is guaranteed to work if the mapping is
+preserved. Switching to a different kernel version may find a different
+layout and mark the buffer as invalid.
+
+Using trace_printk() in the boot instance
+-----------------------------------------
+By default, the content of trace_printk() goes into the top level tracing
+instance. But this instance is never preserved across boots. To have the
+trace_printk() content, and some other internal tracing go to the preserved
+buffer (like dump stacks), either set the instance to be the trace_printk()
+destination from the kernel command line, or set it after boot up via the
+trace_printk_dest option.
+
+After boot up::
+
+ echo 1 > /sys/kernel/tracing/instances/boot_map/options/trace_printk_dest
+
+From the kernel command line::
+
+ reserve_mem=12M:4096:trace trace_instance=boot_map^traceprintk^traceoff@trace
+
+If setting it from the kernel command line, it is recommended to also
+disable tracing with the "traceoff" flag, and enable tracing after boot up.
+Otherwise the trace from the most recent boot will be mixed with the trace
+from the previous boot, and may make it confusing to read.
diff --git a/Documentation/trace/ftrace.rst b/Documentation/trace/ftrace.rst
index 5aba74872ba7..4073ca48af4a 100644
--- a/Documentation/trace/ftrace.rst
+++ b/Documentation/trace/ftrace.rst
@@ -1186,6 +1186,18 @@ Here are the available options:
trace_printk
Can disable trace_printk() from writing into the buffer.
+ trace_printk_dest
+ Set to have trace_printk() and similar internal tracing functions
+ write into this instance. Note, only one trace instance can have
+ this set. By setting this flag, it clears the trace_printk_dest flag
+ of the instance that had it set previously. By default, the top
+ level trace has this set, and will get it set again if another
+ instance has it set then clears it.
+
+ This flag cannot be cleared by the top level instance, as it is the
+ default instance. The only way the top level instance has this flag
+ cleared, is by it being set in another instance.
+
annotate
It is sometimes confusing when the CPU buffers are full
and one CPU buffer had a lot of events recently, thus
diff --git a/Documentation/translations/zh_CN/admin-guide/mm/damon/start.rst b/Documentation/translations/zh_CN/admin-guide/mm/damon/start.rst
index bf21ff84f396..cff7b6f98c59 100644
--- a/Documentation/translations/zh_CN/admin-guide/mm/damon/start.rst
+++ b/Documentation/translations/zh_CN/admin-guide/mm/damon/start.rst
@@ -15,7 +15,7 @@
本文通过演示DAMON的默认用户空间工具,简要地介绍了如何使用DAMON。请注意,为了简洁
起见,本文档只描述了它的部分功能。更多细节请参考该工具的使用文档。
-`doc <https://github.com/awslabs/damo/blob/next/USAGE.md>`_ .
+`doc <https://github.com/damonitor/damo/blob/next/USAGE.md>`_ .
前提条件
@@ -31,7 +31,7 @@
------------
在演示中,我们将使用DAMON的默认用户空间工具,称为DAMON Operator(DAMO)。它可以在
-https://github.com/awslabs/damo找到。下面的例子假设DAMO在你的$PATH上。当然,但
+https://github.com/damonitor/damo找到。下面的例子假设DAMO在你的$PATH上。当然,但
这并不是强制性的。
因为DAMO使用了DAMON的sysfs接口(详情请参考:doc:`usage`),你应该确保
diff --git a/Documentation/translations/zh_CN/admin-guide/mm/damon/usage.rst b/Documentation/translations/zh_CN/admin-guide/mm/damon/usage.rst
index da2745464ece..50f6f0b6bf11 100644
--- a/Documentation/translations/zh_CN/admin-guide/mm/damon/usage.rst
+++ b/Documentation/translations/zh_CN/admin-guide/mm/damon/usage.rst
@@ -16,16 +16,16 @@
DAMON 为不同的用户提供了下面这些接口。
- *DAMON用户空间工具。*
- `这 <https://github.com/awslabs/damo>`_ 为有这特权的人, 如系统管理员,希望有一个刚好
+ `这 <https://github.com/damonitor/damo>`_ 为有这特权的人, 如系统管理员,希望有一个刚好
可以工作的人性化界面。
使用它,用户可以以人性化的方式使用DAMON的主要功能。不过,它可能不会为特殊情况进行高度调整。
它同时支持虚拟和物理地址空间的监测。更多细节,请参考它的 `使用文档
- <https://github.com/awslabs/damo/blob/next/USAGE.md>`_。
+ <https://github.com/damonitor/damo/blob/next/USAGE.md>`_。
- *sysfs接口。*
:ref:`这 <sysfs_interface>` 是为那些希望更高级的使用DAMON的特权用户空间程序员准备的。
使用它,用户可以通过读取和写入特殊的sysfs文件来使用DAMON的主要功能。因此,你可以编写和使
用你个性化的DAMON sysfs包装程序,代替你读/写sysfs文件。 `DAMON用户空间工具
- <https://github.com/awslabs/damo>`_ 就是这种程序的一个例子 它同时支持虚拟和物理地址
+ <https://github.com/damonitor/damo>`_ 就是这种程序的一个例子 它同时支持虚拟和物理地址
空间的监测。注意,这个界面只提供简单的监测结果 :ref:`统计 <damos_stats>`。对于详细的监测
结果,DAMON提供了一个:ref:`跟踪点 <tracepoint>`。
- *debugfs interface.*
@@ -332,7 +332,7 @@ tried_regions/<N>/
# echo 500 > watermarks/mid
# echo 300 > watermarks/low
-请注意,我们强烈建议使用用户空间的工具,如 `damo <https://github.com/awslabs/damo>`_ ,
+请注意,我们强烈建议使用用户空间的工具,如 `damo <https://github.com/damonitor/damo>`_ ,
而不是像上面那样手动读写文件。以上只是一个例子。
debugfs接口
diff --git a/Documentation/translations/zh_CN/arch/loongarch/irq-chip-model.rst b/Documentation/translations/zh_CN/arch/loongarch/irq-chip-model.rst
index f1e9ab18206c..472761938682 100644
--- a/Documentation/translations/zh_CN/arch/loongarch/irq-chip-model.rst
+++ b/Documentation/translations/zh_CN/arch/loongarch/irq-chip-model.rst
@@ -87,6 +87,38 @@ PCH-LPC/PCH-MSI,然后被EIOINTC统一收集,再直接到达CPUINTC::
| Devices |
+---------+
+高级扩展IRQ模型
+===============
+
+在这种模型里面,IPI(Inter-Processor Interrupt)和CPU本地时钟中断直接发送到CPUINTC,
+CPU串口(UARTs)中断发送到LIOINTC,PCH-MSI中断发送到AVECINTC,而后通过AVECINTC直接
+送达CPUINTC,而其他所有设备的中断则分别发送到所连接的PCH-PIC/PCH-LPC,然后由EIOINTC
+统一收集,再直接到达CPUINTC::
+
+ +-----+ +-----------------------+ +-------+
+ | IPI | --> | CPUINTC | <-- | Timer |
+ +-----+ +-----------------------+ +-------+
+ ^ ^ ^
+ | | |
+ +---------+ +----------+ +---------+ +-------+
+ | EIOINTC | | AVECINTC | | LIOINTC | <-- | UARTs |
+ +---------+ +----------+ +---------+ +-------+
+ ^ ^
+ | |
+ +---------+ +---------+
+ | PCH-PIC | | PCH-MSI |
+ +---------+ +---------+
+ ^ ^ ^
+ | | |
+ +---------+ +---------+ +---------+
+ | Devices | | PCH-LPC | | Devices |
+ +---------+ +---------+ +---------+
+ ^
+ |
+ +---------+
+ | Devices |
+ +---------+
+
ACPI相关的定义
==============
diff --git a/Documentation/translations/zh_CN/mm/page_migration.rst b/Documentation/translations/zh_CN/mm/page_migration.rst
index f95063826a15..8c8461c6cb9f 100644
--- a/Documentation/translations/zh_CN/mm/page_migration.rst
+++ b/Documentation/translations/zh_CN/mm/page_migration.rst
@@ -50,8 +50,8 @@ mbind()设置一个新的内存策略。一个进程的页面也可以通过sys_
1. 从LRU中移除页面。
- 要迁移的页面列表是通过扫描页面并把它们移到列表中来生成的。这是通过调用 isolate_lru_page()
- 来完成的。调用isolate_lru_page()增加了对该页的引用,这样在页面迁移发生时它就不会
+ 要迁移的页面列表是通过扫描页面并把它们移到列表中来生成的。这是通过调用 folio_isolate_lru()
+ 来完成的。调用folio_isolate_lru()增加了对该页的引用,这样在页面迁移发生时它就不会
消失。它还可以防止交换器或其他扫描器遇到该页。
@@ -65,7 +65,7 @@ migrate_pages()如何工作
=======================
migrate_pages()对它的页面列表进行了多次处理。如果当时对一个页面的所有引用都可以被移除,
-那么这个页面就会被移动。该页已经通过isolate_lru_page()从LRU中移除,并且refcount被
+那么这个页面就会被移动。该页已经通过folio_isolate_lru()从LRU中移除,并且refcount被
增加,以便在页面迁移发生时不释放该页。
步骤:
diff --git a/Documentation/translations/zh_TW/admin-guide/mm/damon/start.rst b/Documentation/translations/zh_TW/admin-guide/mm/damon/start.rst
index 1822956be0e0..57d36bfbb1b3 100644
--- a/Documentation/translations/zh_TW/admin-guide/mm/damon/start.rst
+++ b/Documentation/translations/zh_TW/admin-guide/mm/damon/start.rst
@@ -15,7 +15,7 @@
本文通過演示DAMON的默認用戶空間工具,簡要地介紹瞭如何使用DAMON。請注意,爲了簡潔
起見,本文檔只描述了它的部分功能。更多細節請參考該工具的使用文檔。
-`doc <https://github.com/awslabs/damo/blob/next/USAGE.md>`_ .
+`doc <https://github.com/damonitor/damo/blob/next/USAGE.md>`_ .
前提條件
@@ -31,7 +31,7 @@
------------
在演示中,我們將使用DAMON的默認用戶空間工具,稱爲DAMON Operator(DAMO)。它可以在
-https://github.com/awslabs/damo找到。下面的例子假設DAMO在你的$PATH上。當然,但
+https://github.com/damonitor/damo找到。下面的例子假設DAMO在你的$PATH上。當然,但
這並不是強制性的。
因爲DAMO使用了DAMON的sysfs接口(詳情請參考:doc:`usage`),你應該確保
diff --git a/Documentation/translations/zh_TW/admin-guide/mm/damon/usage.rst b/Documentation/translations/zh_TW/admin-guide/mm/damon/usage.rst
index 7464279f9b7d..fbbbbad59ee4 100644
--- a/Documentation/translations/zh_TW/admin-guide/mm/damon/usage.rst
+++ b/Documentation/translations/zh_TW/admin-guide/mm/damon/usage.rst
@@ -16,16 +16,16 @@
DAMON 爲不同的用戶提供了下面這些接口。
- *DAMON用戶空間工具。*
- `這 <https://github.com/awslabs/damo>`_ 爲有這特權的人, 如系統管理員,希望有一個剛好
+ `這 <https://github.com/damonitor/damo>`_ 爲有這特權的人, 如系統管理員,希望有一個剛好
可以工作的人性化界面。
使用它,用戶可以以人性化的方式使用DAMON的主要功能。不過,它可能不會爲特殊情況進行高度調整。
它同時支持虛擬和物理地址空間的監測。更多細節,請參考它的 `使用文檔
- <https://github.com/awslabs/damo/blob/next/USAGE.md>`_。
+ <https://github.com/damonitor/damo/blob/next/USAGE.md>`_。
- *sysfs接口。*
:ref:`這 <sysfs_interface>` 是爲那些希望更高級的使用DAMON的特權用戶空間程序員準備的。
使用它,用戶可以通過讀取和寫入特殊的sysfs文件來使用DAMON的主要功能。因此,你可以編寫和使
用你個性化的DAMON sysfs包裝程序,代替你讀/寫sysfs文件。 `DAMON用戶空間工具
- <https://github.com/awslabs/damo>`_ 就是這種程序的一個例子 它同時支持虛擬和物理地址
+ <https://github.com/damonitor/damo>`_ 就是這種程序的一個例子 它同時支持虛擬和物理地址
空間的監測。注意,這個界面只提供簡單的監測結果 :ref:`統計 <damos_stats>`。對於詳細的監測
結果,DAMON提供了一個:ref:`跟蹤點 <tracepoint>`。
- *debugfs interface.*
@@ -332,7 +332,7 @@ tried_regions/<N>/
# echo 500 > watermarks/mid
# echo 300 > watermarks/low
-請注意,我們強烈建議使用用戶空間的工具,如 `damo <https://github.com/awslabs/damo>`_ ,
+請注意,我們強烈建議使用用戶空間的工具,如 `damo <https://github.com/damonitor/damo>`_ ,
而不是像上面那樣手動讀寫文件。以上只是一個例子。
debugfs接口
diff --git a/Documentation/usb/functionfs-desc.rst b/Documentation/usb/functionfs-desc.rst
new file mode 100644
index 000000000000..39649774da54
--- /dev/null
+++ b/Documentation/usb/functionfs-desc.rst
@@ -0,0 +1,39 @@
+======================
+FunctionFS Descriptors
+======================
+
+Some of the descriptors that can be written to the FFS gadget are
+described below. Device and configuration descriptors are handled
+by the composite gadget and are not written by the user to the
+FFS gadget.
+
+Descriptors are written to the "ep0" file in the FFS gadget
+following the descriptor header.
+
+.. kernel-doc:: include/uapi/linux/usb/functionfs.h
+ :doc: descriptors
+
+Interface Descriptors
+---------------------
+
+Standard USB interface descriptors may be written. The class/subclass of the
+most recent interface descriptor determines what type of class-specific
+descriptors are accepted.
+
+Class-Specific Descriptors
+--------------------------
+
+Class-specific descriptors are accepted only for the class/subclass of the
+most recent interface descriptor. The following are some of the
+class-specific descriptors that are supported.
+
+DFU Functional Descriptor
+~~~~~~~~~~~~~~~~~~~~~~~~~
+
+When the interface class is USB_CLASS_APP_SPEC and the interface subclass
+is USB_SUBCLASS_DFU, a DFU functional descriptor can be provided.
+The DFU functional descriptor is a described in the USB specification for
+Device Firmware Upgrade (DFU), version 1.1 as of this writing.
+
+.. kernel-doc:: include/uapi/linux/usb/functionfs.h
+ :doc: usb_dfu_functional_descriptor
diff --git a/Documentation/usb/functionfs.rst b/Documentation/usb/functionfs.rst
index d05a775bc45b..f7487b0d8057 100644
--- a/Documentation/usb/functionfs.rst
+++ b/Documentation/usb/functionfs.rst
@@ -25,6 +25,8 @@ interface numbers starting from zero). The FunctionFS changes
them as needed also handling situation when numbers differ in
different configurations.
+For more information about FunctionFS descriptors see :doc:`functionfs-desc`
+
When descriptors and strings are written "ep#" files appear
(one for each declared endpoint) which handle communication on
a single endpoint. Again, FunctionFS takes care of the real
diff --git a/Documentation/usb/gadget-testing.rst b/Documentation/usb/gadget-testing.rst
index b086c7ab72f0..bf555c2270f5 100644
--- a/Documentation/usb/gadget-testing.rst
+++ b/Documentation/usb/gadget-testing.rst
@@ -765,6 +765,17 @@ The uac2 function provides these attributes in its function directory:
req_number the number of pre-allocated request for both capture
and playback
function_name name of the interface
+ if_ctrl_name topology control name
+ clksrc_in_name input clock name
+ clksrc_out_name output clock name
+ p_it_name playback input terminal name
+ p_it_ch_name playback input first channel name
+ p_ot_name playback output terminal name
+ p_fu_vol_name playback function unit name
+ c_it_name capture input terminal name
+ c_it_ch_name capture input first channel name
+ c_ot_name capture output terminal name
+ c_fu_vol_name capture functional unit name
c_terminal_type code of the capture terminal type
p_terminal_type code of the playback terminal type
================ ====================================================
@@ -957,6 +968,14 @@ The uac1 function provides these attributes in its function directory:
req_number the number of pre-allocated requests for both capture
and playback
function_name name of the interface
+ p_it_name playback input terminal name
+ p_it_ch_name playback channels name
+ p_ot_name playback output terminal name
+ p_fu_vol_name playback mute/volume functional unit name
+ c_it_name capture input terminal name
+ c_it_ch_name capture channels name
+ c_ot_name capture output terminal name
+ c_fu_vol_name capture mute/volume functional unit name
================ ====================================================
The attributes have sane default values.
diff --git a/Documentation/usb/index.rst b/Documentation/usb/index.rst
index 27955dad95e1..826492c813ac 100644
--- a/Documentation/usb/index.rst
+++ b/Documentation/usb/index.rst
@@ -11,6 +11,7 @@ USB support
dwc3
ehci
functionfs
+ functionfs-desc
gadget_configfs
gadget_hid
gadget_multi
diff --git a/Documentation/userspace-api/landlock.rst b/Documentation/userspace-api/landlock.rst
index 37dafce8038b..c8d3e46badc5 100644
--- a/Documentation/userspace-api/landlock.rst
+++ b/Documentation/userspace-api/landlock.rst
@@ -8,7 +8,7 @@ Landlock: unprivileged access control
=====================================
:Author: Mickaël Salaün
-:Date: July 2024
+:Date: September 2024
The goal of Landlock is to enable to restrict ambient rights (e.g. global
filesystem or network access) for a set of processes. Because Landlock
@@ -81,6 +81,9 @@ to be explicit about the denied-by-default access rights.
.handled_access_net =
LANDLOCK_ACCESS_NET_BIND_TCP |
LANDLOCK_ACCESS_NET_CONNECT_TCP,
+ .scoped =
+ LANDLOCK_SCOPE_ABSTRACT_UNIX_SOCKET |
+ LANDLOCK_SCOPE_SIGNAL,
};
Because we may not know on which kernel version an application will be
@@ -119,6 +122,11 @@ version, and only use the available subset of access rights:
case 4:
/* Removes LANDLOCK_ACCESS_FS_IOCTL_DEV for ABI < 5 */
ruleset_attr.handled_access_fs &= ~LANDLOCK_ACCESS_FS_IOCTL_DEV;
+ __attribute__((fallthrough));
+ case 5:
+ /* Removes LANDLOCK_SCOPE_* for ABI < 6 */
+ ruleset_attr.scoped &= ~(LANDLOCK_SCOPE_ABSTRACT_UNIX_SOCKET |
+ LANDLOCK_SCOPE_SIGNAL);
}
This enables to create an inclusive ruleset that will contain our rules.
@@ -306,6 +314,38 @@ To be allowed to use :manpage:`ptrace(2)` and related syscalls on a target
process, a sandboxed process should have a subset of the target process rules,
which means the tracee must be in a sub-domain of the tracer.
+IPC scoping
+-----------
+
+Similar to the implicit `Ptrace restrictions`_, we may want to further restrict
+interactions between sandboxes. Each Landlock domain can be explicitly scoped
+for a set of actions by specifying it on a ruleset. For example, if a
+sandboxed process should not be able to :manpage:`connect(2)` to a
+non-sandboxed process through abstract :manpage:`unix(7)` sockets, we can
+specify such restriction with ``LANDLOCK_SCOPE_ABSTRACT_UNIX_SOCKET``.
+Moreover, if a sandboxed process should not be able to send a signal to a
+non-sandboxed process, we can specify this restriction with
+``LANDLOCK_SCOPE_SIGNAL``.
+
+A sandboxed process can connect to a non-sandboxed process when its domain is
+not scoped. If a process's domain is scoped, it can only connect to sockets
+created by processes in the same scope.
+Moreover, If a process is scoped to send signal to a non-scoped process, it can
+only send signals to processes in the same scope.
+
+A connected datagram socket behaves like a stream socket when its domain is
+scoped, meaning if the domain is scoped after the socket is connected , it can
+still :manpage:`send(2)` data just like a stream socket. However, in the same
+scenario, a non-connected datagram socket cannot send data (with
+:manpage:`sendto(2)`) outside its scope.
+
+A process with a scoped domain can inherit a socket created by a non-scoped
+process. The process cannot connect to this socket since it has a scoped
+domain.
+
+IPC scoping does not support exceptions, so if a domain is scoped, no rules can
+be added to allow access to resources or processes outside of the scope.
+
Truncating files
----------------
@@ -404,7 +444,7 @@ Access rights
-------------
.. kernel-doc:: include/uapi/linux/landlock.h
- :identifiers: fs_access net_access
+ :identifiers: fs_access net_access scope
Creating a new ruleset
----------------------
@@ -541,6 +581,20 @@ earlier ABI.
Starting with the Landlock ABI version 5, it is possible to restrict the use of
:manpage:`ioctl(2)` using the new ``LANDLOCK_ACCESS_FS_IOCTL_DEV`` right.
+Abstract UNIX socket scoping (ABI < 6)
+--------------------------------------
+
+Starting with the Landlock ABI version 6, it is possible to restrict
+connections to an abstract :manpage:`unix(7)` socket by setting
+``LANDLOCK_SCOPE_ABSTRACT_UNIX_SOCKET`` to the ``scoped`` ruleset attribute.
+
+Signal scoping (ABI < 6)
+------------------------
+
+Starting with the Landlock ABI version 6, it is possible to restrict
+:manpage:`signal(7)` sending by setting ``LANDLOCK_SCOPE_SIGNAL`` to the
+``scoped`` ruleset attribute.
+
.. _kernel_support:
Kernel support
diff --git a/Documentation/userspace-api/media/cec/cec-ioc-adap-g-caps.rst b/Documentation/userspace-api/media/cec/cec-ioc-adap-g-caps.rst
index d5e014ce19b5..1d5248979a6d 100644
--- a/Documentation/userspace-api/media/cec/cec-ioc-adap-g-caps.rst
+++ b/Documentation/userspace-api/media/cec/cec-ioc-adap-g-caps.rst
@@ -137,6 +137,12 @@ returns the information to the application. The ioctl never fails.
- 0x00000100
- If this capability is set, then :ref:`CEC_ADAP_G_CONNECTOR_INFO` can
be used.
+ * .. _`CEC-CAP-REPLY-VENDOR-ID`:
+
+ - ``CEC_CAP_REPLY_VENDOR_ID``
+ - 0x00000200
+ - If this capability is set, then
+ :ref:`CEC_MSG_FL_REPLY_VENDOR_ID <cec-msg-flags>` can be used.
Return Value
============
diff --git a/Documentation/userspace-api/media/cec/cec-ioc-receive.rst b/Documentation/userspace-api/media/cec/cec-ioc-receive.rst
index 364938ad34df..3e6c511e054f 100644
--- a/Documentation/userspace-api/media/cec/cec-ioc-receive.rst
+++ b/Documentation/userspace-api/media/cec/cec-ioc-receive.rst
@@ -232,6 +232,21 @@ View On' messages from initiator 0xf ('Unregistered') to destination 0 ('TV').
capability. If that is not set, then the ``EPERM`` error code is
returned.
+ * .. _`CEC-MSG-FL-REPLY-VENDOR-ID`:
+
+ - ``CEC_MSG_FL_REPLY_VENDOR_ID``
+ - 4
+ - This flag is only available if the ``CEC_CAP_REPLY_VENDOR_ID`` capability
+ is set. If this flag is set, then the reply is expected to consist of
+ the ``CEC_MSG_VENDOR_COMMAND_WITH_ID`` opcode followed by the Vendor ID
+ (in bytes 1-4 of the message), followed by the ``struct cec_msg``
+ ``reply`` field.
+
+ Note that this assumes that the byte after the Vendor ID is a
+ vendor-specific opcode.
+
+ This flag makes it easier to wait for replies to vendor commands.
+
.. tabularcolumns:: |p{5.6cm}|p{0.9cm}|p{10.8cm}|
.. _cec-tx-status:
diff --git a/Documentation/userspace-api/media/v4l/biblio.rst b/Documentation/userspace-api/media/v4l/biblio.rst
index 72aef1759b60..35674eeae20d 100644
--- a/Documentation/userspace-api/media/v4l/biblio.rst
+++ b/Documentation/userspace-api/media/v4l/biblio.rst
@@ -334,6 +334,17 @@ VESA DMT
:author: Video Electronics Standards Association (http://www.vesa.org)
+.. _vesaeddc:
+
+E-DDC
+=====
+
+
+:title: VESA Enhanced Display Data Channel (E-DDC) Standard
+:subtitle: Version 1.3
+
+:author: Video Electronics Standards Association (http://www.vesa.org)
+
.. _vesaedid:
EDID
diff --git a/Documentation/userspace-api/media/v4l/buffer.rst b/Documentation/userspace-api/media/v4l/buffer.rst
index 52bbee81c080..856874341882 100644
--- a/Documentation/userspace-api/media/v4l/buffer.rst
+++ b/Documentation/userspace-api/media/v4l/buffer.rst
@@ -694,41 +694,6 @@ enum v4l2_memory
- 4
- The buffer is used for :ref:`DMA shared buffer <dmabuf>` I/O.
-.. _memory-flags:
-
-Memory Consistency Flags
-------------------------
-
-.. raw:: latex
-
- \small
-
-.. tabularcolumns:: |p{7.0cm}|p{2.1cm}|p{8.4cm}|
-
-.. cssclass:: longtable
-
-.. flat-table::
- :header-rows: 0
- :stub-columns: 0
- :widths: 3 1 4
-
- * .. _`V4L2-MEMORY-FLAG-NON-COHERENT`:
-
- - ``V4L2_MEMORY_FLAG_NON_COHERENT``
- - 0x00000001
- - A buffer is allocated either in coherent (it will be automatically
- coherent between the CPU and the bus) or non-coherent memory. The
- latter can provide performance gains, for instance the CPU cache
- sync/flush operations can be avoided if the buffer is accessed by the
- corresponding device only and the CPU does not read/write to/from that
- buffer. However, this requires extra care from the driver -- it must
- guarantee memory consistency by issuing a cache flush/sync when
- consistency is needed. If this flag is set V4L2 will attempt to
- allocate the buffer in non-coherent memory. The flag takes effect
- only if the buffer is used for :ref:`memory mapping <mmap>` I/O and the
- queue reports the :ref:`V4L2_BUF_CAP_SUPPORTS_MMAP_CACHE_HINTS
- <V4L2-BUF-CAP-SUPPORTS-MMAP-CACHE-HINTS>` capability.
-
.. raw:: latex
\normalsize
diff --git a/Documentation/userspace-api/media/v4l/capture.c.rst b/Documentation/userspace-api/media/v4l/capture.c.rst
index eef6772967a1..349541b1dac0 100644
--- a/Documentation/userspace-api/media/v4l/capture.c.rst
+++ b/Documentation/userspace-api/media/v4l/capture.c.rst
@@ -333,7 +333,7 @@ file: media/v4l/capture.c
if (-1 == xioctl(fd, VIDIOC_REQBUFS, &req)) {
if (EINVAL == errno) {
fprintf(stderr, "%s does not support "
- "memory mappingn", dev_name);
+ "memory mapping\n", dev_name);
exit(EXIT_FAILURE);
} else {
errno_exit("VIDIOC_REQBUFS");
@@ -391,7 +391,7 @@ file: media/v4l/capture.c
if (-1 == xioctl(fd, VIDIOC_REQBUFS, &req)) {
if (EINVAL == errno) {
fprintf(stderr, "%s does not support "
- "user pointer i/on", dev_name);
+ "user pointer i/o\n", dev_name);
exit(EXIT_FAILURE);
} else {
errno_exit("VIDIOC_REQBUFS");
@@ -547,7 +547,7 @@ file: media/v4l/capture.c
}
if (!S_ISCHR(st.st_mode)) {
- fprintf(stderr, "%s is no devicen", dev_name);
+ fprintf(stderr, "%s is no device\n", dev_name);
exit(EXIT_FAILURE);
}
diff --git a/Documentation/userspace-api/media/v4l/ext-ctrls-codec-stateless.rst b/Documentation/userspace-api/media/v4l/ext-ctrls-codec-stateless.rst
index 22bde00d42df..0da635691fdc 100644
--- a/Documentation/userspace-api/media/v4l/ext-ctrls-codec-stateless.rst
+++ b/Documentation/userspace-api/media/v4l/ext-ctrls-codec-stateless.rst
@@ -2993,7 +2993,11 @@ This structure contains all loop filter related parameters. See sections
- Applications and drivers must set this to zero.
* - __u16
- ``max_frame_width_minus_1``
- - specifies the maximum frame width minus 1 for the frames represented by
+ - Specifies the maximum frame width minus 1 for the frames represented by
+ this sequence header.
+ * - __u16
+ - ``max_frame_height_minus_1``
+ - Specifies the maximum frame height minus 1 for the frames represented by
this sequence header.
.. _av1_sequence_flags:
@@ -3374,7 +3378,7 @@ semantics" of :ref:`av1`.
- ``uv_pri_strength[V4L2_AV1_CDEF_MAX]``
- Specifies the strength of the primary filter.
* - __u8
- - ``uv_secondary_strength[V4L2_AV1_CDEF_MAX]``
+ - ``uv_sec_strength[V4L2_AV1_CDEF_MAX]``
- Specifies the strength of the secondary filter.
.. c:type:: v4l2_av1_segment_feature
@@ -3439,7 +3443,7 @@ semantics" of :ref:`av1`.
- Bitmask defining which features are enabled in each segment. Use
V4L2_AV1_SEGMENT_FEATURE_ENABLED to build a suitable mask.
* - __u16
- - `feature_data[V4L2_AV1_MAX_SEGMENTS][V4L2_AV1_SEG_LVL_MAX]``
+ - ``feature_data[V4L2_AV1_MAX_SEGMENTS][V4L2_AV1_SEG_LVL_MAX]``
- Data attached to each feature. Data entry is only valid if the feature
is enabled.
@@ -3490,7 +3494,7 @@ AV1 Loop filter params as defined in section 6.8.10 "Loop filter semantics" of
.. tabularcolumns:: |p{1.5cm}|p{5.8cm}|p{10.0cm}|
-.. flat-table:: struct v4l2_av1_global_motion
+.. flat-table:: struct v4l2_av1_loop_filter
:header-rows: 0
:stub-columns: 0
:widths: 1 1 2
@@ -3806,12 +3810,12 @@ AV1 Tx mode as described in section 6.8.21 "TX mode semantics" of :ref:`av1`.
* - struct :c:type:`v4l2_av1_quantization`
- ``quantization``
- Quantization parameters.
- * - struct :c:type:`v4l2_av1_segmentation`
- - ``segmentation``
- - Segmentation parameters.
* - __u8
- ``superres_denom``
- The denominator for the upscaling ratio.
+ * - struct :c:type:`v4l2_av1_segmentation`
+ - ``segmentation``
+ - Segmentation parameters.
* - struct :c:type:`v4l2_av1_loop_filter`
- ``loop_filter``
- Loop filter params
@@ -3829,7 +3833,7 @@ AV1 Tx mode as described in section 6.8.21 "TX mode semantics" of :ref:`av1`.
* - struct :c:type:`v4l2_av1_loop_restoration`
- ``loop_restoration``
- Loop restoration parameters.
- * - struct :c:type:`v4l2_av1_loop_global_motion`
+ * - struct :c:type:`v4l2_av1_global_motion`
- ``global_motion``
- Global motion parameters.
* - __u32
diff --git a/Documentation/userspace-api/media/v4l/ext-ctrls-image-process.rst b/Documentation/userspace-api/media/v4l/ext-ctrls-image-process.rst
index b1c2ab2854af..27803dca8d3e 100644
--- a/Documentation/userspace-api/media/v4l/ext-ctrls-image-process.rst
+++ b/Documentation/userspace-api/media/v4l/ext-ctrls-image-process.rst
@@ -31,7 +31,7 @@ Image Process Control IDs
Pixel sampling rate in the device's pixel array. This control is
read-only and its unit is pixels / second.
- Some devices use horizontal and vertical balanking to configure the frame
+ Some devices use horizontal and vertical blanking to configure the frame
rate. The frame rate can be calculated from the pixel rate, analogue crop
rectangle as well as horizontal and vertical blanking. The pixel rate
control may be present in a different sub-device than the blanking controls
diff --git a/Documentation/userspace-api/media/v4l/metafmt-rkisp1.rst b/Documentation/userspace-api/media/v4l/metafmt-rkisp1.rst
index fa04f00bcd2e..959f6bde8695 100644
--- a/Documentation/userspace-api/media/v4l/metafmt-rkisp1.rst
+++ b/Documentation/userspace-api/media/v4l/metafmt-rkisp1.rst
@@ -1,28 +1,67 @@
.. SPDX-License-Identifier: GPL-2.0
-.. _v4l2-meta-fmt-rk-isp1-params:
-
.. _v4l2-meta-fmt-rk-isp1-stat-3a:
-*****************************************************************************
-V4L2_META_FMT_RK_ISP1_PARAMS ('rk1p'), V4L2_META_FMT_RK_ISP1_STAT_3A ('rk1s')
-*****************************************************************************
+************************************************************************************************************************
+V4L2_META_FMT_RK_ISP1_PARAMS ('rk1p'), V4L2_META_FMT_RK_ISP1_STAT_3A ('rk1s'), V4L2_META_FMT_RK_ISP1_EXT_PARAMS ('rk1e')
+************************************************************************************************************************
+========================
Configuration parameters
========================
-The configuration parameters are passed to the
+The configuration of the RkISP1 ISP is performed by userspace by providing
+parameters for the ISP to the driver using the :c:type:`v4l2_meta_format`
+interface.
+
+There are two methods that allow to configure the ISP, the `fixed parameters`
+configuration format and the `extensible parameters` configuration
+format.
+
+.. _v4l2-meta-fmt-rk-isp1-params:
+
+Fixed parameters configuration format
+=====================================
+
+When using the fixed configuration format, parameters are passed to the
:ref:`rkisp1_params <rkisp1_params>` metadata output video node, using
-the :c:type:`v4l2_meta_format` interface. The buffer contains
-a single instance of the C structure :c:type:`rkisp1_params_cfg` defined in
-``rkisp1-config.h``. So the structure can be obtained from the buffer by:
+the `V4L2_META_FMT_RK_ISP1_PARAMS` meta format.
+
+The buffer contains a single instance of the C structure
+:c:type:`rkisp1_params_cfg` defined in ``rkisp1-config.h``. So the structure can
+be obtained from the buffer by:
.. code-block:: c
struct rkisp1_params_cfg *params = (struct rkisp1_params_cfg*) buffer;
+This method supports a subset of the ISP features only, new applications should
+use the extensible parameters method.
+
+.. _v4l2-meta-fmt-rk-isp1-ext-params:
+
+Extensible parameters configuration format
+==========================================
+
+When using the extensible configuration format, parameters are passed to the
+:ref:`rkisp1_params <rkisp1_params>` metadata output video node, using
+the `V4L2_META_FMT_RK_ISP1_EXT_PARAMS` meta format.
+
+The buffer contains a single instance of the C structure
+:c:type:`rkisp1_ext_params_cfg` defined in ``rkisp1-config.h``. The
+:c:type:`rkisp1_ext_params_cfg` structure is designed to allow userspace to
+populate the data buffer with only the configuration data for the ISP blocks it
+intends to configure. The extensible parameters format design allows developers
+to define new block types to support new configuration parameters, and defines a
+versioning scheme so that it can be extended and versioned without breaking
+compatibility with existing applications.
+
+For these reasons, this configuration method is preferred over the `fixed
+parameters` format alternative.
+
.. rkisp1_stat_buffer
+===========================
3A and histogram statistics
===========================
diff --git a/Documentation/userspace-api/media/v4l/mt2110t.svg b/Documentation/userspace-api/media/v4l/mt2110t.svg
new file mode 100644
index 000000000000..a6e82f2c73df
--- /dev/null
+++ b/Documentation/userspace-api/media/v4l/mt2110t.svg
@@ -0,0 +1,315 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<!-- SPDX-License-Identifier: GPL-2.0 OR GFDL-1.1-no-invariants-or-later -->
+<!DOCTYPE svg PUBLIC "-//W3C//DTD SVG 1.1//EN" "http://www.w3.org/Graphics/SVG/1.1/DTD/svg11.dtd">
+<svg version="1.2" width="140mm" height="220mm" viewBox="0 0 14000 22000" preserveAspectRatio="xMidYMid" fill-rule="evenodd" stroke-width="28.222" stroke-linejoin="round" xmlns="http://www.w3.org/2000/svg" xmlns:ooo="http://xml.openoffice.org/svg/export" xmlns:xlink="http://www.w3.org/1999/xlink" xmlns:presentation="http://sun.com/xmlns/staroffice/presentation" xmlns:smil="http://www.w3.org/2001/SMIL20/" xmlns:anim="urn:oasis:names:tc:opendocument:xmlns:animation:1.0" xmlns:svg="urn:oasis:names:tc:opendocument:xmlns:svg-compatible:1.0" xml:space="preserve">
+ <defs class="ClipPathGroup">
+ <clipPath id="presentation_clip_path" clipPathUnits="userSpaceOnUse">
+ <rect x="0" y="0" width="14000" height="22000"/>
+ </clipPath>
+ <clipPath id="presentation_clip_path_shrink" clipPathUnits="userSpaceOnUse">
+ <rect x="14" y="22" width="13972" height="21956"/>
+ </clipPath>
+ </defs>
+ <defs>
+ <font id="EmbeddedFont_1" horiz-adv-x="2048">
+ <font-face font-family="Liberation Sans embedded" units-per-em="2048" font-weight="normal" font-style="normal" ascent="1852" descent="423"/>
+ <missing-glyph horiz-adv-x="2048" d="M 0,0 L 2047,0 2047,2047 0,2047 0,0 Z"/>
+ <glyph unicode="y" horiz-adv-x="1033" d="M 191,-425 C 142,-425 100,-421 67,-414 L 67,-279 C 92,-283 120,-285 151,-285 263,-285 352,-203 417,-38 L 434,5 5,1082 197,1082 425,484 C 428,475 432,464 437,451 442,438 457,394 482,320 507,246 521,205 523,196 L 593,393 830,1082 1020,1082 604,0 C 559,-115 518,-201 479,-258 440,-314 398,-356 351,-384 304,-411 250,-425 191,-425 Z"/>
+ <glyph unicode="x" horiz-adv-x="1006" d="M 801,0 L 510,444 217,0 23,0 408,556 41,1082 240,1082 510,661 778,1082 979,1082 612,558 1002,0 801,0 Z"/>
+ <glyph unicode="w" horiz-adv-x="1509" d="M 1174,0 L 965,0 776,765 740,934 C 734,904 725,861 712,805 699,748 631,480 508,0 L 300,0 -3,1082 175,1082 358,347 C 363,331 377,265 401,149 L 418,223 644,1082 837,1082 1026,339 1072,149 1103,288 1308,1082 1484,1082 1174,0 Z"/>
+ <glyph unicode="u" horiz-adv-x="874" d="M 314,1082 L 314,396 C 314,325 321,269 335,230 349,191 371,162 402,145 433,128 478,119 537,119 624,119 692,149 742,208 792,267 817,350 817,455 L 817,1082 997,1082 997,231 C 997,105 999,28 1003,0 L 833,0 C 832,3 832,12 831,27 830,42 830,59 829,78 828,97 826,132 825,185 L 822,185 C 781,110 733,58 679,27 624,-4 557,-20 476,-20 357,-20 271,10 216,69 161,128 133,225 133,361 L 133,1082 314,1082 Z"/>
+ <glyph unicode="t" horiz-adv-x="531" d="M 554,8 C 495,-8 434,-16 372,-16 228,-16 156,66 156,229 L 156,951 31,951 31,1082 163,1082 216,1324 336,1324 336,1082 536,1082 536,951 336,951 336,268 C 336,216 345,180 362,159 379,138 408,127 450,127 474,127 509,132 554,141 L 554,8 Z"/>
+ <glyph unicode="s" horiz-adv-x="901" d="M 950,299 C 950,197 912,118 835,63 758,8 650,-20 511,-20 376,-20 273,2 200,47 127,91 79,160 57,254 L 216,285 C 231,227 263,185 311,158 359,131 426,117 511,117 602,117 669,131 712,159 754,187 775,229 775,285 775,328 760,362 731,389 702,416 654,438 589,455 L 460,489 C 357,516 283,542 240,568 196,593 162,624 137,661 112,698 100,743 100,796 100,895 135,970 206,1022 276,1073 378,1099 513,1099 632,1099 727,1078 798,1036 868,994 912,927 931,834 L 769,814 C 759,862 732,899 689,925 645,950 586,963 513,963 432,963 372,951 333,926 294,901 275,864 275,814 275,783 283,758 299,738 315,718 339,701 370,687 401,673 467,654 568,629 663,605 732,583 774,563 816,542 849,520 874,495 898,470 917,442 930,410 943,377 950,340 950,299 Z"/>
+ <glyph unicode="r" horiz-adv-x="530" d="M 142,0 L 142,830 C 142,906 140,990 136,1082 L 306,1082 C 311,959 314,886 314,861 L 318,861 C 347,954 380,1017 417,1051 454,1085 507,1102 575,1102 599,1102 623,1099 648,1092 L 648,927 C 624,934 592,937 552,937 477,937 420,905 381,841 342,776 322,684 322,564 L 322,0 142,0 Z"/>
+ <glyph unicode="p" horiz-adv-x="953" d="M 1053,546 C 1053,169 920,-20 655,-20 488,-20 376,43 319,168 L 314,168 C 317,163 318,106 318,-2 L 318,-425 138,-425 138,861 C 138,972 136,1046 132,1082 L 306,1082 C 307,1079 308,1070 309,1054 310,1037 312,1012 314,978 315,944 316,921 316,908 L 320,908 C 352,975 394,1024 447,1055 500,1086 569,1101 655,1101 788,1101 888,1056 954,967 1020,878 1053,737 1053,546 Z M 864,542 C 864,693 844,800 803,865 762,930 698,962 609,962 538,962 482,947 442,917 401,887 371,840 350,777 329,713 318,630 318,528 318,386 341,281 386,214 431,147 505,113 607,113 696,113 762,146 803,212 844,277 864,387 864,542 Z"/>
+ <glyph unicode="o" horiz-adv-x="980" d="M 1053,542 C 1053,353 1011,212 928,119 845,26 724,-20 565,-20 407,-20 288,28 207,125 126,221 86,360 86,542 86,915 248,1102 571,1102 736,1102 858,1057 936,966 1014,875 1053,733 1053,542 Z M 864,542 C 864,691 842,800 798,868 753,935 679,969 574,969 469,969 393,935 346,866 299,797 275,689 275,542 275,399 298,292 345,221 391,149 464,113 563,113 671,113 748,148 795,217 841,286 864,395 864,542 Z"/>
+ <glyph unicode="l" horiz-adv-x="187" d="M 138,0 L 138,1484 318,1484 318,0 138,0 Z"/>
+ <glyph unicode="i" horiz-adv-x="187" d="M 137,1312 L 137,1484 317,1484 317,1312 137,1312 Z M 137,0 L 137,1082 317,1082 317,0 137,0 Z"/>
+ <glyph unicode="f" horiz-adv-x="557" d="M 361,951 L 361,0 181,0 181,951 29,951 29,1082 181,1082 181,1204 C 181,1303 203,1374 246,1417 289,1460 356,1482 445,1482 495,1482 537,1478 572,1470 L 572,1333 C 542,1338 515,1341 492,1341 446,1341 413,1329 392,1306 371,1283 361,1240 361,1179 L 361,1082 572,1082 572,951 361,951 Z"/>
+ <glyph unicode="e" horiz-adv-x="980" d="M 276,503 C 276,379 302,283 353,216 404,149 479,115 578,115 656,115 719,131 766,162 813,193 844,233 861,281 L 1019,236 C 954,65 807,-20 578,-20 418,-20 296,28 213,123 129,218 87,360 87,548 87,727 129,864 213,959 296,1054 416,1102 571,1102 889,1102 1048,910 1048,527 L 1048,503 276,503 Z M 862,641 C 852,755 823,838 775,891 727,943 658,969 568,969 481,969 412,940 361,882 310,823 282,743 278,641 L 862,641 Z"/>
+ <glyph unicode="b" horiz-adv-x="953" d="M 1053,546 C 1053,169 920,-20 655,-20 573,-20 505,-5 451,25 396,54 352,102 318,168 L 316,168 C 316,147 315,116 312,74 309,31 307,7 306,0 L 132,0 C 136,36 138,110 138,223 L 138,1484 318,1484 318,1061 C 318,1018 317,967 314,908 L 318,908 C 351,977 396,1027 451,1057 506,1087 574,1102 655,1102 792,1102 892,1056 957,964 1021,872 1053,733 1053,546 Z M 864,540 C 864,691 844,800 804,865 764,930 699,963 609,963 508,963 434,928 388,859 341,790 318,680 318,529 318,387 341,282 386,215 431,147 505,113 607,113 698,113 763,147 804,214 844,281 864,389 864,540 Z"/>
+ <glyph unicode="8" horiz-adv-x="980" d="M 1050,393 C 1050,263 1009,162 926,89 843,16 725,-20 570,-20 419,-20 302,16 217,87 132,158 89,260 89,391 89,483 115,560 168,623 221,686 288,724 370,737 L 370,741 C 293,759 233,798 189,858 144,918 122,988 122,1069 122,1176 162,1263 243,1330 323,1397 431,1430 566,1430 705,1430 814,1397 895,1332 975,1267 1015,1178 1015,1067 1015,986 993,916 948,856 903,796 842,758 765,743 L 765,739 C 855,724 925,686 975,625 1025,563 1050,486 1050,393 Z M 828,1057 C 828,1216 741,1296 566,1296 481,1296 417,1276 373,1236 328,1196 306,1136 306,1057 306,976 329,915 375,873 420,830 485,809 568,809 653,809 717,829 762,868 806,907 828,970 828,1057 Z M 863,410 C 863,497 837,563 785,608 733,652 660,674 566,674 475,674 403,650 352,603 301,555 275,489 275,406 275,212 374,115 572,115 670,115 743,139 791,186 839,233 863,307 863,410 Z"/>
+ <glyph unicode="6" horiz-adv-x="980" d="M 1049,461 C 1049,312 1009,195 928,109 847,23 736,-20 594,-20 435,-20 314,39 230,157 146,275 104,447 104,672 104,916 148,1103 235,1234 322,1365 447,1430 608,1430 821,1430 955,1334 1010,1143 L 838,1112 C 803,1227 725,1284 606,1284 503,1284 424,1236 368,1141 311,1045 283,906 283,725 316,786 362,832 421,864 480,895 548,911 625,911 755,911 858,870 935,789 1011,708 1049,598 1049,461 Z M 866,453 C 866,555 841,634 791,689 741,744 671,772 582,772 498,772 430,748 379,699 327,650 301,582 301,496 301,387 328,298 382,229 435,160 504,125 588,125 675,125 743,154 792,213 841,271 866,351 866,453 Z"/>
+ <glyph unicode="4" horiz-adv-x="1060" d="M 881,319 L 881,0 711,0 711,319 47,319 47,459 692,1409 881,1409 881,461 1079,461 1079,319 881,319 Z M 711,1206 C 710,1202 700,1184 683,1153 666,1122 653,1100 644,1087 L 283,555 229,481 213,461 711,461 711,1206 Z"/>
+ <glyph unicode="3" horiz-adv-x="1006" d="M 1049,389 C 1049,259 1008,158 925,87 842,16 724,-20 571,-20 428,-20 315,12 230,77 145,141 94,236 78,362 L 264,379 C 288,212 390,129 571,129 662,129 733,151 785,196 836,241 862,307 862,395 862,472 833,532 774,575 715,618 629,639 518,639 L 416,639 416,795 514,795 C 613,795 689,817 744,860 798,903 825,962 825,1038 825,1113 803,1173 759,1217 714,1260 648,1282 561,1282 482,1282 418,1262 369,1221 320,1180 291,1123 283,1049 L 102,1063 C 115,1178 163,1268 246,1333 328,1398 434,1430 563,1430 704,1430 814,1397 893,1332 971,1266 1010,1174 1010,1057 1010,967 985,894 935,838 884,781 811,743 715,723 L 715,719 C 820,708 902,672 961,613 1020,554 1049,479 1049,389 Z"/>
+ <glyph unicode="2" horiz-adv-x="954" d="M 103,0 L 103,127 C 137,205 179,274 228,334 277,393 328,447 382,496 436,544 490,589 543,630 596,671 643,713 686,754 729,795 763,839 790,884 816,929 829,981 829,1038 829,1115 806,1175 761,1218 716,1261 653,1282 572,1282 495,1282 432,1261 383,1220 333,1178 304,1119 295,1044 L 111,1061 C 124,1174 172,1263 255,1330 337,1397 443,1430 572,1430 714,1430 823,1397 900,1330 976,1263 1014,1167 1014,1044 1014,989 1002,935 977,881 952,827 914,773 865,719 816,665 721,581 582,468 505,405 444,349 399,299 354,248 321,200 301,153 L 1036,153 1036,0 103,0 Z"/>
+ <glyph unicode="1" horiz-adv-x="927" d="M 156,0 L 156,153 515,153 515,1237 197,1010 197,1180 530,1409 696,1409 696,153 1039,153 1039,0 156,0 Z"/>
+ <glyph unicode=" " horiz-adv-x="556"/>
+ </font>
+ </defs>
+ <defs class="TextShapeIndex">
+ <g ooo:slide="id1" ooo:id-list="id3 id4 id5 id6 id7 id8 id9 id10 id11 id12 id13 id14 id15 id16 id17 id18 id19 id20 id21 id22 id23 id24 id25 id26 id27 id28 id29 id30"/>
+ </defs>
+ <defs class="EmbeddedBulletChars">
+ <g id="bullet-char-template-57356" transform="scale(0.00048828125,-0.00048828125)">
+ <path d="M 580,1141 L 1163,571 580,0 -4,571 580,1141 Z"/>
+ </g>
+ <g id="bullet-char-template-57354" transform="scale(0.00048828125,-0.00048828125)">
+ <path d="M 8,1128 L 1137,1128 1137,0 8,0 8,1128 Z"/>
+ </g>
+ <g id="bullet-char-template-10146" transform="scale(0.00048828125,-0.00048828125)">
+ <path d="M 174,0 L 602,739 174,1481 1456,739 174,0 Z M 1358,739 L 309,1346 659,739 1358,739 Z"/>
+ </g>
+ <g id="bullet-char-template-10132" transform="scale(0.00048828125,-0.00048828125)">
+ <path d="M 2015,739 L 1276,0 717,0 1260,543 174,543 174,936 1260,936 717,1481 1274,1481 2015,739 Z"/>
+ </g>
+ <g id="bullet-char-template-10007" transform="scale(0.00048828125,-0.00048828125)">
+ <path d="M 0,-2 C -7,14 -16,27 -25,37 L 356,567 C 262,823 215,952 215,954 215,979 228,992 255,992 264,992 276,990 289,987 310,991 331,999 354,1012 L 381,999 492,748 772,1049 836,1024 860,1049 C 881,1039 901,1025 922,1006 886,937 835,863 770,784 769,783 710,716 594,584 L 774,223 C 774,196 753,168 711,139 L 727,119 C 717,90 699,76 672,76 641,76 570,178 457,381 L 164,-76 C 142,-110 111,-127 72,-127 30,-127 9,-110 8,-76 1,-67 -2,-52 -2,-32 -2,-23 -1,-13 0,-2 Z"/>
+ </g>
+ <g id="bullet-char-template-10004" transform="scale(0.00048828125,-0.00048828125)">
+ <path d="M 285,-33 C 182,-33 111,30 74,156 52,228 41,333 41,471 41,549 55,616 82,672 116,743 169,778 240,778 293,778 328,747 346,684 L 369,508 C 377,444 397,411 428,410 L 1163,1116 C 1174,1127 1196,1133 1229,1133 1271,1133 1292,1118 1292,1087 L 1292,965 C 1292,929 1282,901 1262,881 L 442,47 C 390,-6 338,-33 285,-33 Z"/>
+ </g>
+ <g id="bullet-char-template-9679" transform="scale(0.00048828125,-0.00048828125)">
+ <path d="M 813,0 C 632,0 489,54 383,161 276,268 223,411 223,592 223,773 276,916 383,1023 489,1130 632,1184 813,1184 992,1184 1136,1130 1245,1023 1353,916 1407,772 1407,592 1407,412 1353,268 1245,161 1136,54 992,0 813,0 Z"/>
+ </g>
+ <g id="bullet-char-template-8226" transform="scale(0.00048828125,-0.00048828125)">
+ <path d="M 346,457 C 273,457 209,483 155,535 101,586 74,649 74,723 74,796 101,859 155,911 209,963 273,989 346,989 419,989 480,963 531,910 582,859 608,796 608,723 608,648 583,586 532,535 482,483 420,457 346,457 Z"/>
+ </g>
+ <g id="bullet-char-template-8211" transform="scale(0.00048828125,-0.00048828125)">
+ <path d="M -4,459 L 1135,459 1135,606 -4,606 -4,459 Z"/>
+ </g>
+ <g id="bullet-char-template-61548" transform="scale(0.00048828125,-0.00048828125)">
+ <path d="M 173,740 C 173,903 231,1043 346,1159 462,1274 601,1332 765,1332 928,1332 1067,1274 1183,1159 1299,1043 1357,903 1357,740 1357,577 1299,437 1183,322 1067,206 928,148 765,148 601,148 462,206 346,322 231,437 173,577 173,740 Z"/>
+ </g>
+ </defs>
+ <g>
+ <g id="id2" class="Master_Slide">
+ <g id="bg-id2" class="Background"/>
+ <g id="bo-id2" class="BackgroundObjects"/>
+ </g>
+ </g>
+ <g class="SlideGroup">
+ <g>
+ <g id="container-id1">
+ <g id="id1" class="Slide" clip-path="url(#presentation_clip_path)">
+ <g class="Page">
+ <g class="com.sun.star.drawing.MeasureShape">
+ <g id="id3">
+ <rect class="BoundingBox" stroke="none" fill="none" x="1749" y="325" width="10003" height="15477"/>
+ <path fill="none" stroke="rgb(0,0,0)" d="M 2037,950 L 11463,950"/>
+ <path fill="rgb(0,0,0)" stroke="none" d="M 1750,950 L 2050,1050 2050,850 1750,950 Z"/>
+ <path fill="rgb(0,0,0)" stroke="none" d="M 11750,950 L 11450,850 11450,1050 11750,950 Z"/>
+ <path fill="none" stroke="rgb(0,0,0)" d="M 1750,15800 L 1750,750"/>
+ <path fill="none" stroke="rgb(0,0,0)" d="M 11750,1650 L 11750,750"/>
+ <text class="SVGTextShape"><tspan class="TextParagraph" font-family="Liberation Sans, sans-serif" font-size="635px" font-weight="400"><tspan class="TextPosition" x="5953" y="768"><tspan fill="rgb(0,0,0)" stroke="none" style="white-space: pre">16 px</tspan></tspan></tspan></text>
+ </g>
+ </g>
+ <g class="com.sun.star.drawing.MeasureShape">
+ <g id="id4">
+ <rect class="BoundingBox" stroke="none" fill="none" x="11795" y="1649" width="1853" height="19913"/>
+ <path fill="none" stroke="rgb(0,0,0)" d="M 13446,1937 L 13446,21273"/>
+ <path fill="rgb(0,0,0)" stroke="none" d="M 13446,1650 L 13346,1950 13546,1950 13446,1650 Z"/>
+ <path fill="rgb(0,0,0)" stroke="none" d="M 13446,21560 L 13546,21260 13346,21260 13446,21560 Z"/>
+ <path fill="none" stroke="rgb(0,0,0)" d="M 11796,1650 L 13646,1650"/>
+ <path fill="none" stroke="rgb(0,0,0)" d="M 11796,21560 L 13646,21560"/>
+ <text class="SVGTextShape" transform="rotate(-90 13395 12369)"><tspan class="TextParagraph" font-family="Liberation Sans, sans-serif" font-size="635px" font-weight="400"><tspan class="TextPosition" x="13395" y="12369"><tspan fill="rgb(0,0,0)" stroke="none" style="white-space: pre">32 px</tspan></tspan></tspan></text>
+ </g>
+ </g>
+ <g class="Group">
+ <g class="com.sun.star.drawing.CustomShape">
+ <g id="id5">
+ <rect class="BoundingBox" stroke="none" fill="none" x="1749" y="2449" width="10003" height="1753"/>
+ <path fill="none" stroke="rgb(52,101,164)" d="M 6750,4200 L 1750,4200 1750,2450 11750,2450 11750,4200 6750,4200 Z"/>
+ <text class="SVGTextShape"><tspan class="TextParagraph" font-family="Liberation Sans, sans-serif" font-size="635px" font-weight="400"><tspan class="TextPosition" x="3718" y="3545"><tspan fill="rgb(0,0,0)" stroke="none" style="white-space: pre">4 rows of upper 8 bits</tspan></tspan></tspan></text>
+ </g>
+ </g>
+ <g class="com.sun.star.drawing.CustomShape">
+ <g id="id6">
+ <rect class="BoundingBox" stroke="none" fill="none" x="1749" y="1699" width="10003" height="753"/>
+ <path fill="none" stroke="rgb(52,101,164)" d="M 6750,2450 L 1750,2450 1750,1700 11750,1700 11750,2450 6750,2450 Z"/>
+ <text class="SVGTextShape"><tspan class="TextParagraph" font-family="Liberation Sans, sans-serif" font-size="635px" font-weight="400"><tspan class="TextPosition" x="3860" y="2295"><tspan fill="rgb(0,0,0)" stroke="none" style="white-space: pre">4 rows of lower 2bits</tspan></tspan></tspan></text>
+ </g>
+ </g>
+ <g class="com.sun.star.drawing.CustomShape">
+ <g id="id7">
+ <rect class="BoundingBox" stroke="none" fill="none" x="1709" y="1609" width="10083" height="2583"/>
+ <path fill="none" stroke="rgb(52,101,164)" stroke-width="81" stroke-linejoin="round" d="M 6750,4150 L 1750,4150 1750,1650 11750,1650 11750,4150 6750,4150 Z"/>
+ </g>
+ </g>
+ </g>
+ <g class="com.sun.star.drawing.MeasureShape">
+ <g id="id8">
+ <rect class="BoundingBox" stroke="none" fill="none" x="385" y="1199" width="1294" height="3971"/>
+ <path fill="none" stroke="rgb(0,0,0)" d="M 1027,5079 L 1027,2737"/>
+ <path fill="rgb(0,0,0)" stroke="none" d="M 1027,2450 L 927,2750 1127,2750 1027,2450 Z"/>
+ <path fill="none" stroke="rgb(0,0,0)" d="M 1027,1413 L 1027,1200"/>
+ <path fill="rgb(0,0,0)" stroke="none" d="M 1027,1700 L 1127,1400 927,1400 1027,1700 Z"/>
+ <path fill="none" stroke="rgb(0,0,0)" d="M 1027,2450 L 1027,1700"/>
+ <path fill="none" stroke="rgb(0,0,0)" d="M 1677,2450 L 827,2450"/>
+ <path fill="none" stroke="rgb(0,0,0)" d="M 1677,1700 L 827,1700"/>
+ <text class="SVGTextShape" transform="rotate(-90 845 5217)"><tspan class="TextParagraph" font-family="Liberation Sans, sans-serif" font-size="635px" font-weight="400"><tspan class="TextPosition" x="845" y="5217"><tspan fill="rgb(0,0,0)" stroke="none" style="white-space: pre">16 bytes</tspan></tspan></tspan></text>
+ </g>
+ </g>
+ <g class="Group">
+ <g class="com.sun.star.drawing.CustomShape">
+ <g id="id9">
+ <rect class="BoundingBox" stroke="none" fill="none" x="1749" y="4929" width="10003" height="1753"/>
+ <path fill="none" stroke="rgb(52,101,164)" d="M 6750,6680 L 1750,6680 1750,4930 11750,4930 11750,6680 6750,6680 Z"/>
+ <text class="SVGTextShape"><tspan class="TextParagraph" font-family="Liberation Sans, sans-serif" font-size="635px" font-weight="400"><tspan class="TextPosition" x="3718" y="6025"><tspan fill="rgb(0,0,0)" stroke="none" style="white-space: pre">4 rows of upper 8 bits</tspan></tspan></tspan></text>
+ </g>
+ </g>
+ <g class="com.sun.star.drawing.CustomShape">
+ <g id="id10">
+ <rect class="BoundingBox" stroke="none" fill="none" x="1749" y="4179" width="10003" height="753"/>
+ <path fill="none" stroke="rgb(52,101,164)" d="M 6750,4930 L 1750,4930 1750,4180 11750,4180 11750,4930 6750,4930 Z"/>
+ <text class="SVGTextShape"><tspan class="TextParagraph" font-family="Liberation Sans, sans-serif" font-size="635px" font-weight="400"><tspan class="TextPosition" x="3771" y="4775"><tspan fill="rgb(0,0,0)" stroke="none" style="white-space: pre">4 rows of lower 2 bits</tspan></tspan></tspan></text>
+ </g>
+ </g>
+ <g class="com.sun.star.drawing.CustomShape">
+ <g id="id11">
+ <rect class="BoundingBox" stroke="none" fill="none" x="1709" y="4089" width="10083" height="2583"/>
+ <path fill="none" stroke="rgb(52,101,164)" stroke-width="81" stroke-linejoin="round" d="M 6750,6630 L 1750,6630 1750,4130 11750,4130 11750,6630 6750,6630 Z"/>
+ </g>
+ </g>
+ </g>
+ <g class="Group">
+ <g class="com.sun.star.drawing.CustomShape">
+ <g id="id12">
+ <rect class="BoundingBox" stroke="none" fill="none" x="1749" y="7409" width="10003" height="1753"/>
+ <path fill="none" stroke="rgb(52,101,164)" d="M 6750,9160 L 1750,9160 1750,7410 11750,7410 11750,9160 6750,9160 Z"/>
+ <text class="SVGTextShape"><tspan class="TextParagraph" font-family="Liberation Sans, sans-serif" font-size="635px" font-weight="400"><tspan class="TextPosition" x="3718" y="8505"><tspan fill="rgb(0,0,0)" stroke="none" style="white-space: pre">4 rows of upper 8 bits</tspan></tspan></tspan></text>
+ </g>
+ </g>
+ <g class="com.sun.star.drawing.CustomShape">
+ <g id="id13">
+ <rect class="BoundingBox" stroke="none" fill="none" x="1749" y="6659" width="10003" height="753"/>
+ <path fill="none" stroke="rgb(52,101,164)" d="M 6750,7410 L 1750,7410 1750,6660 11750,6660 11750,7410 6750,7410 Z"/>
+ <text class="SVGTextShape"><tspan class="TextParagraph" font-family="Liberation Sans, sans-serif" font-size="635px" font-weight="400"><tspan class="TextPosition" x="3771" y="7255"><tspan fill="rgb(0,0,0)" stroke="none" style="white-space: pre">4 rows of lower 2 bits</tspan></tspan></tspan></text>
+ </g>
+ </g>
+ <g class="com.sun.star.drawing.CustomShape">
+ <g id="id14">
+ <rect class="BoundingBox" stroke="none" fill="none" x="1709" y="6569" width="10083" height="2583"/>
+ <path fill="none" stroke="rgb(52,101,164)" stroke-width="81" stroke-linejoin="round" d="M 6750,9110 L 1750,9110 1750,6610 11750,6610 11750,9110 6750,9110 Z"/>
+ </g>
+ </g>
+ </g>
+ <g class="Group">
+ <g class="com.sun.star.drawing.CustomShape">
+ <g id="id15">
+ <rect class="BoundingBox" stroke="none" fill="none" x="1749" y="9889" width="10003" height="1753"/>
+ <path fill="none" stroke="rgb(52,101,164)" d="M 6750,11640 L 1750,11640 1750,9890 11750,9890 11750,11640 6750,11640 Z"/>
+ <text class="SVGTextShape"><tspan class="TextParagraph" font-family="Liberation Sans, sans-serif" font-size="635px" font-weight="400"><tspan class="TextPosition" x="3718" y="10985"><tspan fill="rgb(0,0,0)" stroke="none" style="white-space: pre">4 rows of upper 8 bits</tspan></tspan></tspan></text>
+ </g>
+ </g>
+ <g class="com.sun.star.drawing.CustomShape">
+ <g id="id16">
+ <rect class="BoundingBox" stroke="none" fill="none" x="1749" y="9139" width="10003" height="753"/>
+ <path fill="none" stroke="rgb(52,101,164)" d="M 6750,9890 L 1750,9890 1750,9140 11750,9140 11750,9890 6750,9890 Z"/>
+ <text class="SVGTextShape"><tspan class="TextParagraph" font-family="Liberation Sans, sans-serif" font-size="635px" font-weight="400"><tspan class="TextPosition" x="3771" y="9735"><tspan fill="rgb(0,0,0)" stroke="none" style="white-space: pre">4 rows of lower 2 bits</tspan></tspan></tspan></text>
+ </g>
+ </g>
+ <g class="com.sun.star.drawing.CustomShape">
+ <g id="id17">
+ <rect class="BoundingBox" stroke="none" fill="none" x="1709" y="9049" width="10083" height="2583"/>
+ <path fill="none" stroke="rgb(52,101,164)" stroke-width="81" stroke-linejoin="round" d="M 6750,11590 L 1750,11590 1750,9090 11750,9090 11750,11590 6750,11590 Z"/>
+ </g>
+ </g>
+ </g>
+ <g class="Group">
+ <g class="com.sun.star.drawing.CustomShape">
+ <g id="id18">
+ <rect class="BoundingBox" stroke="none" fill="none" x="1749" y="12369" width="10003" height="1753"/>
+ <path fill="none" stroke="rgb(52,101,164)" d="M 6750,14120 L 1750,14120 1750,12370 11750,12370 11750,14120 6750,14120 Z"/>
+ <text class="SVGTextShape"><tspan class="TextParagraph" font-family="Liberation Sans, sans-serif" font-size="635px" font-weight="400"><tspan class="TextPosition" x="3718" y="13465"><tspan fill="rgb(0,0,0)" stroke="none" style="white-space: pre">4 rows of upper 8 bits</tspan></tspan></tspan></text>
+ </g>
+ </g>
+ <g class="com.sun.star.drawing.CustomShape">
+ <g id="id19">
+ <rect class="BoundingBox" stroke="none" fill="none" x="1749" y="11619" width="10003" height="753"/>
+ <path fill="none" stroke="rgb(52,101,164)" d="M 6750,12370 L 1750,12370 1750,11620 11750,11620 11750,12370 6750,12370 Z"/>
+ <text class="SVGTextShape"><tspan class="TextParagraph" font-family="Liberation Sans, sans-serif" font-size="635px" font-weight="400"><tspan class="TextPosition" x="3771" y="12215"><tspan fill="rgb(0,0,0)" stroke="none" style="white-space: pre">4 rows of lower 2 bits</tspan></tspan></tspan></text>
+ </g>
+ </g>
+ <g class="com.sun.star.drawing.CustomShape">
+ <g id="id20">
+ <rect class="BoundingBox" stroke="none" fill="none" x="1709" y="11529" width="10083" height="2583"/>
+ <path fill="none" stroke="rgb(52,101,164)" stroke-width="81" stroke-linejoin="round" d="M 6750,14070 L 1750,14070 1750,11570 11750,11570 11750,14070 6750,14070 Z"/>
+ </g>
+ </g>
+ </g>
+ <g class="Group">
+ <g class="com.sun.star.drawing.CustomShape">
+ <g id="id21">
+ <rect class="BoundingBox" stroke="none" fill="none" x="1749" y="14849" width="10003" height="1753"/>
+ <path fill="none" stroke="rgb(52,101,164)" d="M 6750,16600 L 1750,16600 1750,14850 11750,14850 11750,16600 6750,16600 Z"/>
+ <text class="SVGTextShape"><tspan class="TextParagraph" font-family="Liberation Sans, sans-serif" font-size="635px" font-weight="400"><tspan class="TextPosition" x="3718" y="15945"><tspan fill="rgb(0,0,0)" stroke="none" style="white-space: pre">4 rows of upper 8 bits</tspan></tspan></tspan></text>
+ </g>
+ </g>
+ <g class="com.sun.star.drawing.CustomShape">
+ <g id="id22">
+ <rect class="BoundingBox" stroke="none" fill="none" x="1749" y="14099" width="10003" height="753"/>
+ <path fill="none" stroke="rgb(52,101,164)" d="M 6750,14850 L 1750,14850 1750,14100 11750,14100 11750,14850 6750,14850 Z"/>
+ <text class="SVGTextShape"><tspan class="TextParagraph" font-family="Liberation Sans, sans-serif" font-size="635px" font-weight="400"><tspan class="TextPosition" x="3771" y="14695"><tspan fill="rgb(0,0,0)" stroke="none" style="white-space: pre">4 rows of lower 2 bits</tspan></tspan></tspan></text>
+ </g>
+ </g>
+ <g class="com.sun.star.drawing.CustomShape">
+ <g id="id23">
+ <rect class="BoundingBox" stroke="none" fill="none" x="1709" y="14009" width="10083" height="2583"/>
+ <path fill="none" stroke="rgb(52,101,164)" stroke-width="81" stroke-linejoin="round" d="M 6750,16550 L 1750,16550 1750,14050 11750,14050 11750,16550 6750,16550 Z"/>
+ </g>
+ </g>
+ </g>
+ <g class="Group">
+ <g class="com.sun.star.drawing.CustomShape">
+ <g id="id24">
+ <rect class="BoundingBox" stroke="none" fill="none" x="1749" y="17329" width="10003" height="1753"/>
+ <path fill="none" stroke="rgb(52,101,164)" d="M 6750,19080 L 1750,19080 1750,17330 11750,17330 11750,19080 6750,19080 Z"/>
+ <text class="SVGTextShape"><tspan class="TextParagraph" font-family="Liberation Sans, sans-serif" font-size="635px" font-weight="400"><tspan class="TextPosition" x="3718" y="18425"><tspan fill="rgb(0,0,0)" stroke="none" style="white-space: pre">4 rows of upper 8 bits</tspan></tspan></tspan></text>
+ </g>
+ </g>
+ <g class="com.sun.star.drawing.CustomShape">
+ <g id="id25">
+ <rect class="BoundingBox" stroke="none" fill="none" x="1749" y="16579" width="10003" height="753"/>
+ <path fill="none" stroke="rgb(52,101,164)" d="M 6750,17330 L 1750,17330 1750,16580 11750,16580 11750,17330 6750,17330 Z"/>
+ <text class="SVGTextShape"><tspan class="TextParagraph" font-family="Liberation Sans, sans-serif" font-size="635px" font-weight="400"><tspan class="TextPosition" x="3771" y="17175"><tspan fill="rgb(0,0,0)" stroke="none" style="white-space: pre">4 rows of lower 2 bits</tspan></tspan></tspan></text>
+ </g>
+ </g>
+ <g class="com.sun.star.drawing.CustomShape">
+ <g id="id26">
+ <rect class="BoundingBox" stroke="none" fill="none" x="1709" y="16489" width="10083" height="2583"/>
+ <path fill="none" stroke="rgb(52,101,164)" stroke-width="81" stroke-linejoin="round" d="M 6750,19030 L 1750,19030 1750,16530 11750,16530 11750,19030 6750,19030 Z"/>
+ </g>
+ </g>
+ </g>
+ <g class="Group">
+ <g class="com.sun.star.drawing.CustomShape">
+ <g id="id27">
+ <rect class="BoundingBox" stroke="none" fill="none" x="1749" y="19809" width="10003" height="1753"/>
+ <path fill="none" stroke="rgb(52,101,164)" d="M 6750,21560 L 1750,21560 1750,19810 11750,19810 11750,21560 6750,21560 Z"/>
+ <text class="SVGTextShape"><tspan class="TextParagraph" font-family="Liberation Sans, sans-serif" font-size="635px" font-weight="400"><tspan class="TextPosition" x="3718" y="20905"><tspan fill="rgb(0,0,0)" stroke="none" style="white-space: pre">4 rows of upper 8 bits</tspan></tspan></tspan></text>
+ </g>
+ </g>
+ <g class="com.sun.star.drawing.CustomShape">
+ <g id="id28">
+ <rect class="BoundingBox" stroke="none" fill="none" x="1749" y="19059" width="10003" height="753"/>
+ <path fill="none" stroke="rgb(52,101,164)" d="M 6750,19810 L 1750,19810 1750,19060 11750,19060 11750,19810 6750,19810 Z"/>
+ <text class="SVGTextShape"><tspan class="TextParagraph" font-family="Liberation Sans, sans-serif" font-size="635px" font-weight="400"><tspan class="TextPosition" x="3771" y="19655"><tspan fill="rgb(0,0,0)" stroke="none" style="white-space: pre">4 rows of lower 2 bits</tspan></tspan></tspan></text>
+ </g>
+ </g>
+ <g class="com.sun.star.drawing.CustomShape">
+ <g id="id29">
+ <rect class="BoundingBox" stroke="none" fill="none" x="1709" y="18969" width="10083" height="2583"/>
+ <path fill="none" stroke="rgb(52,101,164)" stroke-width="81" stroke-linejoin="round" d="M 6750,21510 L 1750,21510 1750,19010 11750,19010 11750,21510 6750,21510 Z"/>
+ </g>
+ </g>
+ </g>
+ <g class="com.sun.star.drawing.MeasureShape">
+ <g id="id30">
+ <rect class="BoundingBox" stroke="none" fill="none" x="11849" y="1949" width="1237" height="4987"/>
+ <path fill="none" stroke="rgb(0,0,0)" d="M 12443,6845 L 12443,4487"/>
+ <path fill="rgb(0,0,0)" stroke="none" d="M 12443,4200 L 12343,4500 12543,4500 12443,4200 Z"/>
+ <path fill="none" stroke="rgb(0,0,0)" d="M 12443,2163 L 12443,1950"/>
+ <path fill="rgb(0,0,0)" stroke="none" d="M 12443,2450 L 12543,2150 12343,2150 12443,2450 Z"/>
+ <path fill="none" stroke="rgb(0,0,0)" d="M 12443,4200 L 12443,2450"/>
+ <path fill="none" stroke="rgb(0,0,0)" d="M 11850,4200 L 12643,4200"/>
+ <path fill="none" stroke="rgb(0,0,0)" d="M 11850,2450 L 12643,2450"/>
+ <text class="SVGTextShape" transform="rotate(-90 12953 6967)"><tspan class="TextParagraph" font-family="Liberation Sans, sans-serif" font-size="635px" font-weight="400"><tspan class="TextPosition" x="12953" y="6967"><tspan fill="rgb(0,0,0)" stroke="none" style="white-space: pre">64 bytes</tspan></tspan></tspan></text>
+ </g>
+ </g>
+ </g>
+ </g>
+ </g>
+ </g>
+ </g>
+</svg>
diff --git a/Documentation/userspace-api/media/v4l/pixfmt-reserved.rst b/Documentation/userspace-api/media/v4l/pixfmt-reserved.rst
index 886ba7b08d6b..ac52485252d9 100644
--- a/Documentation/userspace-api/media/v4l/pixfmt-reserved.rst
+++ b/Documentation/userspace-api/media/v4l/pixfmt-reserved.rst
@@ -275,19 +275,6 @@ please make a proposal on the linux-media mailing list.
Decoder's implementation can be found here,
`aspeed_codec <https://github.com/AspeedTech-BMC/aspeed_codec/>`__
- * .. _V4L2-PIX-FMT-MT2110T:
-
- - ``V4L2_PIX_FMT_MT2110T``
- - 'MT2110T'
- - This format is two-planar 10-Bit tile mode and having similitude with
- ``V4L2_PIX_FMT_MM21`` in term of alignment and tiling. Used for VP9, AV1
- and HEVC.
- * .. _V4L2-PIX-FMT-MT2110R:
-
- - ``V4L2_PIX_FMT_MT2110R``
- - 'MT2110R'
- - This format is two-planar 10-Bit raster mode and having similitude with
- ``V4L2_PIX_FMT_MM21`` in term of alignment and tiling. Used for AVC.
* .. _V4L2-PIX-FMT-HEXTILE:
- ``V4L2_PIX_FMT_HEXTILE``
diff --git a/Documentation/userspace-api/media/v4l/pixfmt-yuv-planar.rst b/Documentation/userspace-api/media/v4l/pixfmt-yuv-planar.rst
index 1840224faa41..b788f6933855 100644
--- a/Documentation/userspace-api/media/v4l/pixfmt-yuv-planar.rst
+++ b/Documentation/userspace-api/media/v4l/pixfmt-yuv-planar.rst
@@ -144,6 +144,20 @@ All components are stored with the same number of bits per component.
- Cb, Cr
- Yes
- 4x4 tiles
+ * - V4L2_PIX_FMT_MT2110T
+ - 'MT2T'
+ - 15
+ - 4:2:0
+ - Cb, Cr
+ - No
+ - 16x32 / 16x16 tiles tiled low bits
+ * - V4L2_PIX_FMT_MT2110R
+ - 'MT2R'
+ - 15
+ - 4:2:0
+ - Cb, Cr
+ - No
+ - 16x32 / 16x16 tiles raster low bits
* - V4L2_PIX_FMT_NV16
- 'NV16'
- 8
@@ -295,8 +309,6 @@ of the luma plane.
.. _V4L2-PIX-FMT-NV12-32L32:
.. _V4L2-PIX-FMT-NV12M-8L128:
.. _V4L2-PIX-FMT-NV12-8L128:
-.. _V4L2-PIX-FMT-NV12M-10BE-8L128:
-.. _V4L2-PIX-FMT-NV12-10BE-8L128:
.. _V4L2-PIX-FMT-MM21:
Tiled NV12
@@ -322,6 +334,22 @@ If the vertical resolution is an odd number of tiles, the last row of
tiles is stored in linear order. The layouts of the luma and chroma
planes are identical.
+.. _nv12mt:
+
+.. kernel-figure:: nv12mt.svg
+ :alt: nv12mt.svg
+ :align: center
+
+ V4L2_PIX_FMT_NV12MT macroblock Z shape memory layout
+
+.. _nv12mt_ex:
+
+.. kernel-figure:: nv12mt_example.svg
+ :alt: nv12mt_example.svg
+ :align: center
+
+ Example V4L2_PIX_FMT_NV12MT memory layout of tiles
+
``V4L2_PIX_FMT_NV12_4L4`` stores pixels in 4x4 tiles, and stores
tiles linearly in memory. The line stride and image height must be
aligned to a multiple of 4. The layouts of the luma and chroma planes are
@@ -345,6 +373,27 @@ The layouts of the luma and chroma planes are identical.
``V4L2_PIX_FMT_NV12_8L128`` is similar to ``V4L2_PIX_FMT_NV12M_8L128`` but stores
two planes in one memory.
+``V4L2_PIX_FMT_MM21`` store luma pixel in 16x32 tiles, and chroma pixels
+in 16x16 tiles. The line stride must be aligned to a multiple of 16 and the
+image height must be aligned to a multiple of 32. The number of luma and chroma
+tiles are identical, even though the tile size differ. The image is formed of
+two non-contiguous planes.
+
+
+.. _V4L2-PIX-FMT-NV15-4L4:
+.. _V4L2-PIX-FMT-NV12M-10BE-8L128:
+.. _V4L2-PIX-FMT-NV12-10BE-8L128:
+.. _V4L2-PIX-FMT-MT2110T:
+.. _V4L2-PIX-FMT-MT2110R:
+
+Tiled NV15
+----------
+
+``V4L2_PIX_FMT_NV15_4L4`` Semi-planar 10-bit YUV 4:2:0 formats, using 4x4 tiling.
+All components are packed without any padding between each other.
+As a side-effect, each group of 4 components are stored over 5 bytes
+(YYYY or UVUV = 4 * 10 bits = 40 bits = 5 bytes).
+
``V4L2_PIX_FMT_NV12M_10BE_8L128`` is similar to ``V4L2_PIX_FMT_NV12M`` but stores
10 bits pixels in 2D 8x128 tiles, and stores tiles linearly in memory.
the data is arranged in big endian order.
@@ -363,37 +412,119 @@ byte 4: Y3(bits 7-0)
``V4L2_PIX_FMT_NV12_10BE_8L128`` is similar to ``V4L2_PIX_FMT_NV12M_10BE_8L128`` but stores
two planes in one memory.
-``V4L2_PIX_FMT_MM21`` store luma pixel in 16x32 tiles, and chroma pixels
-in 16x16 tiles. The line stride must be aligned to a multiple of 16 and the
-image height must be aligned to a multiple of 32. The number of luma and chroma
-tiles are identical, even though the tile size differ. The image is formed of
-two non-contiguous planes.
-
-.. _nv12mt:
+``V4L2_PIX_FMT_MT2110T`` is one of Mediatek packed 10bit YUV 4:2:0 formats.
+It is fully packed 10bit 4:2:0 format like NV15 (15 bits per pixel), except
+that the lower two bits data is stored in separate partitions. The format is
+composed of 16x32 luma tiles, and 16x16 chroma tiles. Each tiles is 640 bytes
+long, divided into 8 partitions of 80 bytes. The first 16 bytes of the
+partition represent the 2 least significant bits of pixel data. The remaining
+64 bytes represent the 8 most significant bits of pixel data.
-.. kernel-figure:: nv12mt.svg
- :alt: nv12mt.svg
+.. kernel-figure:: mt2110t.svg
+ :alt: mt2110t.svg
:align: center
- V4L2_PIX_FMT_NV12MT macroblock Z shape memory layout
-
-.. _nv12mt_ex:
+ Layout of MT2110T Chroma Tile
-.. kernel-figure:: nv12mt_example.svg
- :alt: nv12mt_example.svg
- :align: center
+Filtering out the upper part of each partitions results in a valid
+``V4L2_PIX_FMT_MM21`` frame. A partition is a sub-tile of size 16 x 4. The
+lower two bits is said to be tiled since each bytes contains the lower two
+bits of the column of for pixel matching the same index. The chroma tiles
+only have 4 partitions.
- Example V4L2_PIX_FMT_NV12MT memory layout of tiles
+.. flat-table:: MT2110T LSB bits layout
+ :header-rows: 1
+ :stub-columns: 1
-.. _V4L2-PIX-FMT-NV15-4L4:
+ * -
+ - start + 0:
+ - start + 1:
+ - . . .
+ - start\ +\ 15:
+ * - Bits 1:0
+ - Y'\ :sub:`0:0`
+ - Y'\ :sub:`0:1`
+ - . . .
+ - Y'\ :sub:`0:15`
+ * - Bit 3:2
+ - Y'\ :sub:`1:0`
+ - Y'\ :sub:`1:1`
+ - . . .
+ - Y'\ :sub:`1:15`
+ * - Bits 5:4
+ - Y'\ :sub:`2:0`
+ - Y'\ :sub:`2:1`
+ - . . .
+ - Y'\ :sub:`2:15`
+ * - Bits 7:6
+ - Y'\ :sub:`3:0`
+ - Y'\ :sub:`3:1`
+ - . . .
+ - Y'\ :sub:`3:15`
+
+``V4L2_PIX_FMT_MT2110R`` is identical to ``V4L2_PIX_FMT_MT2110T`` except that
+the least significant two bits layout is in raster order. This means the first byte
+contains 4 pixels of the first row, with 4 bytes per line.
+
+.. flat-table:: MT2110R LSB bits layout
+ :header-rows: 2
+ :stub-columns: 1
-Tiled NV15
-----------
+ * -
+ - :cspan:`3` Byte 0
+ - ...
+ - :cspan:`3` Byte 3
+ * -
+ - 7:6
+ - 5:4
+ - 3:2
+ - 1:0
+ - ...
+ - 7:6
+ - 5:4
+ - 3:2
+ - 1:0
+ * - start + 0:
+ - Y'\ :sub:`0:3`
+ - Y'\ :sub:`0:2`
+ - Y'\ :sub:`0:1`
+ - Y'\ :sub:`0:0`
+ - ...
+ - Y'\ :sub:`0:15`
+ - Y'\ :sub:`0:14`
+ - Y'\ :sub:`0:13`
+ - Y'\ :sub:`0:12`
+ * - start + 4:
+ - Y'\ :sub:`1:3`
+ - Y'\ :sub:`1:2`
+ - Y'\ :sub:`1:1`
+ - Y'\ :sub:`1:0`
+ - ...
+ - Y'\ :sub:`1:15`
+ - Y'\ :sub:`1:14`
+ - Y'\ :sub:`1:13`
+ - Y'\ :sub:`1:12`
+ * - start + 8:
+ - Y'\ :sub:`2:3`
+ - Y'\ :sub:`2:2`
+ - Y'\ :sub:`2:1`
+ - Y'\ :sub:`2:0`
+ - ...
+ - Y'\ :sub:`2:15`
+ - Y'\ :sub:`2:14`
+ - Y'\ :sub:`2:13`
+ - Y'\ :sub:`2:12`
+ * - start\ +\ 12:
+ - Y'\ :sub:`3:3`
+ - Y'\ :sub:`3:2`
+ - Y'\ :sub:`3:1`
+ - Y'\ :sub:`3:0`
+ - ...
+ - Y'\ :sub:`3:15`
+ - Y'\ :sub:`3:14`
+ - Y'\ :sub:`3:13`
+ - Y'\ :sub:`3:12`
-Semi-planar 10-bit YUV 4:2:0 formats, using 4x4 tiling.
-All components are packed without any padding between each other.
-As a side-effect, each group of 4 components are stored over 5 bytes
-(YYYY or UVUV = 4 * 10 bits = 40 bits = 5 bytes).
.. _V4L2-PIX-FMT-NV16:
.. _V4L2-PIX-FMT-NV61:
diff --git a/Documentation/userspace-api/media/v4l/vidioc-querycap.rst b/Documentation/userspace-api/media/v4l/vidioc-querycap.rst
index 6c57b8428356..3d11d86d9cbf 100644
--- a/Documentation/userspace-api/media/v4l/vidioc-querycap.rst
+++ b/Documentation/userspace-api/media/v4l/vidioc-querycap.rst
@@ -244,6 +244,17 @@ specification the ioctl returns an ``EINVAL`` error code.
- 0x01000000
- The device supports the :c:func:`read()` and/or
:c:func:`write()` I/O methods.
+ * - ``V4L2_CAP_EDID``
+ - 0x02000000
+ - The device stores the EDID for a video input, or retrieves the EDID for a video
+ output. It is a standalone EDID device, so no video streaming etc. will take place.
+
+ For a video input this is typically an eeprom that supports the
+ :ref:`VESA Enhanced Display Data Channel Standard <vesaeddc>`. It can be something
+ else as well, for example a micro controller.
+
+ For a video output this is typically read from an external device such as an
+ HDMI splitter accessed by a serial port.
* - ``V4L2_CAP_STREAMING``
- 0x04000000
- The device supports the :ref:`streaming <mmap>` I/O method.
diff --git a/Documentation/userspace-api/media/v4l/vidioc-reqbufs.rst b/Documentation/userspace-api/media/v4l/vidioc-reqbufs.rst
index bbc22dd76032..daf9a6621b50 100644
--- a/Documentation/userspace-api/media/v4l/vidioc-reqbufs.rst
+++ b/Documentation/userspace-api/media/v4l/vidioc-reqbufs.rst
@@ -73,6 +73,8 @@ aborting or finishing any DMA in progress, an implicit
.. tabularcolumns:: |p{4.4cm}|p{4.4cm}|p{8.5cm}|
+.. cssclass:: longtable
+
.. flat-table:: struct v4l2_requestbuffers
:header-rows: 0
:stub-columns: 0
@@ -123,14 +125,6 @@ aborting or finishing any DMA in progress, an implicit
.. _V4L2-BUF-CAP-SUPPORTS-MAX-NUM-BUFFERS:
.. _V4L2-BUF-CAP-SUPPORTS-REMOVE-BUFS:
-.. raw:: latex
-
- \footnotesize
-
-.. tabularcolumns:: |p{8.1cm}|p{2.2cm}|p{7.0cm}|
-
-.. cssclass:: longtable
-
.. flat-table:: V4L2 Buffer Capabilities Flags
:header-rows: 0
:stub-columns: 0
@@ -166,6 +160,36 @@ aborting or finishing any DMA in progress, an implicit
:ref:`V4L2_BUF_FLAG_NO_CACHE_INVALIDATE <V4L2-BUF-FLAG-NO-CACHE-INVALIDATE>`,
:ref:`V4L2_BUF_FLAG_NO_CACHE_CLEAN <V4L2-BUF-FLAG-NO-CACHE-CLEAN>` and
:ref:`V4L2_MEMORY_FLAG_NON_COHERENT <V4L2-MEMORY-FLAG-NON-COHERENT>`.
+ * - ``V4L2_BUF_CAP_SUPPORTS_MAX_NUM_BUFFERS``
+ - 0x00000080
+ - If set, then the ``max_num_buffers`` field in ``struct v4l2_create_buffers``
+ is valid. If not set, then the maximum is ``VIDEO_MAX_FRAME`` buffers.
+ * - ``V4L2_BUF_CAP_SUPPORTS_REMOVE_BUFS``
+ - 0x00000100
+ - If set, then ``VIDIOC_REMOVE_BUFS`` is supported.
+
+.. _memory-flags:
+.. _V4L2-MEMORY-FLAG-NON-COHERENT:
+
+.. flat-table:: Memory Consistency Flags
+ :header-rows: 0
+ :stub-columns: 0
+ :widths: 3 1 4
+
+ * - ``V4L2_MEMORY_FLAG_NON_COHERENT``
+ - 0x00000001
+ - A buffer is allocated either in coherent (it will be automatically
+ coherent between the CPU and the bus) or non-coherent memory. The
+ latter can provide performance gains, for instance the CPU cache
+ sync/flush operations can be avoided if the buffer is accessed by the
+ corresponding device only and the CPU does not read/write to/from that
+ buffer. However, this requires extra care from the driver -- it must
+ guarantee memory consistency by issuing a cache flush/sync when
+ consistency is needed. If this flag is set V4L2 will attempt to
+ allocate the buffer in non-coherent memory. The flag takes effect
+ only if the buffer is used for :ref:`memory mapping <mmap>` I/O and the
+ queue reports the :ref:`V4L2_BUF_CAP_SUPPORTS_MMAP_CACHE_HINTS
+ <V4L2-BUF-CAP-SUPPORTS-MMAP-CACHE-HINTS>` capability.
.. raw:: latex
diff --git a/Documentation/userspace-api/media/videodev2.h.rst.exceptions b/Documentation/userspace-api/media/videodev2.h.rst.exceptions
index bdc628e8c1d6..d67fd4038d22 100644
--- a/Documentation/userspace-api/media/videodev2.h.rst.exceptions
+++ b/Documentation/userspace-api/media/videodev2.h.rst.exceptions
@@ -197,6 +197,7 @@ replace define V4L2_CAP_META_OUTPUT device-capabilities
replace define V4L2_CAP_DEVICE_CAPS device-capabilities
replace define V4L2_CAP_TOUCH device-capabilities
replace define V4L2_CAP_IO_MC device-capabilities
+replace define V4L2_CAP_EDID device-capabilities
# V4L2 pix flags
replace define V4L2_PIX_FMT_PRIV_MAGIC :c:type:`v4l2_pix_format`
diff --git a/Documentation/virt/kvm/api.rst b/Documentation/virt/kvm/api.rst
index b3be87489108..e32471977d0a 100644
--- a/Documentation/virt/kvm/api.rst
+++ b/Documentation/virt/kvm/api.rst
@@ -4214,7 +4214,9 @@ whether or not KVM_CAP_X86_USER_SPACE_MSR's KVM_MSR_EXIT_REASON_FILTER is
enabled. If KVM_MSR_EXIT_REASON_FILTER is enabled, KVM will exit to userspace
on denied accesses, i.e. userspace effectively intercepts the MSR access. If
KVM_MSR_EXIT_REASON_FILTER is not enabled, KVM will inject a #GP into the guest
-on denied accesses.
+on denied accesses. Note, if an MSR access is denied during emulation of MSR
+load/stores during VMX transitions, KVM ignores KVM_MSR_EXIT_REASON_FILTER.
+See the below warning for full details.
If an MSR access is allowed by userspace, KVM will emulate and/or virtualize
the access in accordance with the vCPU model. Note, KVM may still ultimately
@@ -4229,9 +4231,22 @@ filtering. In that mode, ``KVM_MSR_FILTER_DEFAULT_DENY`` is invalid and causes
an error.
.. warning::
- MSR accesses as part of nested VM-Enter/VM-Exit are not filtered.
- This includes both writes to individual VMCS fields and reads/writes
- through the MSR lists pointed to by the VMCS.
+ MSR accesses that are side effects of instruction execution (emulated or
+ native) are not filtered as hardware does not honor MSR bitmaps outside of
+ RDMSR and WRMSR, and KVM mimics that behavior when emulating instructions
+ to avoid pointless divergence from hardware. E.g. RDPID reads MSR_TSC_AUX,
+ SYSENTER reads the SYSENTER MSRs, etc.
+
+ MSRs that are loaded/stored via dedicated VMCS fields are not filtered as
+ part of VM-Enter/VM-Exit emulation.
+
+ MSRs that are loaded/store via VMX's load/store lists _are_ filtered as part
+ of VM-Enter/VM-Exit emulation. If an MSR access is denied on VM-Enter, KVM
+ synthesizes a consistency check VM-Exit(EXIT_REASON_MSR_LOAD_FAIL). If an
+ MSR access is denied on VM-Exit, KVM synthesizes a VM-Abort. In short, KVM
+ extends Intel's architectural list of MSRs that cannot be loaded/saved via
+ the VM-Enter/VM-Exit MSR list. It is platform owner's responsibility to
+ to communicate any such restrictions to their end users.
x2APIC MSR accesses cannot be filtered (KVM silently ignores filters that
cover any x2APIC MSRs).
@@ -8082,6 +8097,14 @@ KVM_X86_QUIRK_MWAIT_NEVER_UD_FAULTS By default, KVM emulates MONITOR/MWAIT (if
guest CPUID on writes to MISC_ENABLE if
KVM_X86_QUIRK_MISC_ENABLE_NO_MWAIT is
disabled.
+
+KVM_X86_QUIRK_SLOT_ZAP_ALL By default, KVM invalidates all SPTEs in
+ fast way for memslot deletion when VM type
+ is KVM_X86_DEFAULT_VM.
+ When this quirk is disabled or when VM type
+ is other than KVM_X86_DEFAULT_VM, KVM zaps
+ only leaf SPTEs that are within the range of
+ the memslot being deleted.
=================================== ============================================
7.32 KVM_CAP_MAX_VCPU_ID
diff --git a/Documentation/virt/kvm/locking.rst b/Documentation/virt/kvm/locking.rst
index 02880d5552d5..20a9a37d1cdd 100644
--- a/Documentation/virt/kvm/locking.rst
+++ b/Documentation/virt/kvm/locking.rst
@@ -11,6 +11,8 @@ The acquisition orders for mutexes are as follows:
- cpus_read_lock() is taken outside kvm_lock
+- kvm_usage_lock is taken outside cpus_read_lock()
+
- kvm->lock is taken outside vcpu->mutex
- kvm->lock is taken outside kvm->slots_lock and kvm->irq_lock
@@ -24,6 +26,13 @@ The acquisition orders for mutexes are as follows:
are taken on the waiting side when modifying memslots, so MMU notifiers
must not take either kvm->slots_lock or kvm->slots_arch_lock.
+cpus_read_lock() vs kvm_lock:
+
+- Taking cpus_read_lock() outside of kvm_lock is problematic, despite that
+ being the official ordering, as it is quite easy to unknowingly trigger
+ cpus_read_lock() while holding kvm_lock. Use caution when walking vm_list,
+ e.g. avoid complex operations when possible.
+
For SRCU:
- ``synchronize_srcu(&kvm->srcu)`` is called inside critical sections
@@ -227,10 +236,16 @@ time it will be set using the Dirty tracking mechanism described above.
:Type: mutex
:Arch: any
:Protects: - vm_list
- - kvm_usage_count
+
+``kvm_usage_lock``
+^^^^^^^^^^^^^^^^^^
+
+:Type: mutex
+:Arch: any
+:Protects: - kvm_usage_count
- hardware virtualization enable/disable
-:Comment: KVM also disables CPU hotplug via cpus_read_lock() during
- enable/disable.
+:Comment: Exists to allow taking cpus_read_lock() while kvm_usage_count is
+ protected, which simplifies the virtualization enabling logic.
``kvm->mn_invalidate_lock``
^^^^^^^^^^^^^^^^^^^^^^^^^^^
@@ -290,11 +305,12 @@ time it will be set using the Dirty tracking mechanism described above.
wakeup.
``vendor_module_lock``
-^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+^^^^^^^^^^^^^^^^^^^^^^
:Type: mutex
:Arch: x86
:Protects: loading a vendor module (kvm_amd or kvm_intel)
-:Comment: Exists because using kvm_lock leads to deadlock. cpu_hotplug_lock is
- taken outside of kvm_lock, e.g. in KVM's CPU online/offline callbacks, and
- many operations need to take cpu_hotplug_lock when loading a vendor module,
- e.g. updating static calls.
+:Comment: Exists because using kvm_lock leads to deadlock. kvm_lock is taken
+ in notifiers, e.g. __kvmclock_cpufreq_notifier(), that may be invoked while
+ cpu_hotplug_lock is held, e.g. from cpufreq_boost_trigger_state(), and many
+ operations need to take cpu_hotplug_lock when loading a vendor module, e.g.
+ updating static calls.
diff --git a/Documentation/virt/uml/user_mode_linux_howto_v2.rst b/Documentation/virt/uml/user_mode_linux_howto_v2.rst
index 27942446f406..584000b743f3 100644
--- a/Documentation/virt/uml/user_mode_linux_howto_v2.rst
+++ b/Documentation/virt/uml/user_mode_linux_howto_v2.rst
@@ -217,6 +217,8 @@ remote UML and other VM instances.
+-----------+--------+------------------------------------+------------+
| fd | vector | dependent on fd type | varies |
+-----------+--------+------------------------------------+------------+
+| vde | vector | dep. on VDE VPN: Virt.Net Locator | varies |
++-----------+--------+------------------------------------+------------+
| tuntap | legacy | none | ~ 500Mbit |
+-----------+--------+------------------------------------+------------+
| daemon | legacy | none | ~ 450Mbit |
@@ -573,6 +575,41 @@ https://github.com/NetSys/bess/wiki/Built-In-Modules-and-Ports
BESS transport does not require any special privileges.
+VDE vector transport
+--------------------
+
+Virtual Distributed Ethernet (VDE) is a project whose main goal is to provide a
+highly flexible support for virtual networking.
+
+http://wiki.virtualsquare.org/#/tutorials/vdebasics
+
+Common usages of VDE include fast prototyping and teaching.
+
+Examples:
+
+ ``vecX:transport=vde,vnl=tap://tap0``
+
+use tap0
+
+ ``vecX:transport=vde,vnl=slirp://``
+
+use slirp
+
+ ``vec0:transport=vde,vnl=vde:///tmp/switch``
+
+connect to a vde switch
+
+ ``vecX:transport=\"vde,vnl=cmd://ssh remote.host //tmp/sshlirp\"``
+
+connect to a remote slirp (instant VPN: convert ssh to VPN, it uses sshlirp)
+https://github.com/virtualsquare/sshlirp
+
+ ``vec0:transport=vde,vnl=vxvde://234.0.0.1``
+
+connect to a local area cloud (all the UML nodes using the same
+multicast address running on hosts in the same multicast domain (LAN)
+will be automagically connected together to a virtual LAN.
+
Configuring Legacy transports
=============================
diff --git a/Documentation/watchdog/convert_drivers_to_kernel_api.rst b/Documentation/watchdog/convert_drivers_to_kernel_api.rst
index a1c3f038ce0e..e83609a5d007 100644
--- a/Documentation/watchdog/convert_drivers_to_kernel_api.rst
+++ b/Documentation/watchdog/convert_drivers_to_kernel_api.rst
@@ -75,7 +75,6 @@ Example conversion::
-static const struct file_operations s3c2410wdt_fops = {
- .owner = THIS_MODULE,
- - .llseek = no_llseek,
- .write = s3c2410wdt_write,
- .unlocked_ioctl = s3c2410wdt_ioctl,
- .open = s3c2410wdt_open,
diff --git a/LICENSES/deprecated/0BSD b/LICENSES/deprecated/0BSD
new file mode 100644
index 000000000000..e4b95b749966
--- /dev/null
+++ b/LICENSES/deprecated/0BSD
@@ -0,0 +1,23 @@
+Valid-License-Identifier: 0BSD
+SPDX-URL: https://spdx.org/licenses/0BSD.html
+Usage-Guide:
+ To use the BSD Zero Clause License put the following SPDX tag/value
+ pair into a comment according to the placement guidelines in the
+ licensing rules documentation:
+ SPDX-License-Identifier: 0BSD
+License-Text:
+
+BSD Zero Clause License
+
+Copyright (c) <year> <copyright holders>
+
+Permission to use, copy, modify, and/or distribute this software for any
+purpose with or without fee is hereby granted.
+
+THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
+SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION
+OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN
+CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
diff --git a/MAINTAINERS b/MAINTAINERS
index 25056ed19363..c27f3190737f 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -449,6 +449,7 @@ S: Supported
W: https://wiki.analog.com/resources/tools-software/linux-drivers/iio-adc/ad738x
W: https://ez.analog.com/linux-software-drivers
F: Documentation/devicetree/bindings/iio/adc/adi,ad7380.yaml
+F: Documentation/iio/ad7380.rst
F: drivers/iio/adc/ad7380.c
AD7877 TOUCHSCREEN DRIVER
@@ -631,6 +632,17 @@ F: drivers/iio/accel/adxl372.c
F: drivers/iio/accel/adxl372_i2c.c
F: drivers/iio/accel/adxl372_spi.c
+ADXL380 THREE-AXIS DIGITAL ACCELEROMETER DRIVER
+M: Ramona Gradinariu <ramona.gradinariu@analog.com>
+M: Antoniu Miclaus <antoniu.miclaus@analog.com>
+S: Supported
+W: https://ez.analog.com/linux-software-drivers
+F: Documentation/devicetree/bindings/iio/accel/adi,adxl380.yaml
+F: drivers/iio/accel/adxl380.c
+F: drivers/iio/accel/adxl380.h
+F: drivers/iio/accel/adxl380_i2c.c
+F: drivers/iio/accel/adxl380_spi.c
+
AF8133J THREE-AXIS MAGNETOMETER DRIVER
M: Ondřej Jirman <megi@xff.cz>
S: Maintained
@@ -1025,6 +1037,13 @@ S: Supported
T: git https://gitlab.freedesktop.org/agd5f/linux.git
F: drivers/gpu/drm/amd/display/
+AMD DISPLAY CORE - DML
+M: Chaitanya Dhere <chaitanya.dhere@amd.com>
+M: Jun Lei <jun.lei@amd.com>
+S: Supported
+F: drivers/gpu/drm/amd/display/dc/dml/
+F: drivers/gpu/drm/amd/display/dc/dml2/
+
AMD FAM15H PROCESSOR POWER MONITORING DRIVER
M: Huang Rui <ray.huang@amd.com>
L: linux-hwmon@vger.kernel.org
@@ -1140,6 +1159,14 @@ L: dmaengine@vger.kernel.org
S: Maintained
F: drivers/dma/ptdma/
+AMD QDMA DRIVER
+M: Nishad Saraf <nishads@amd.com>
+M: Lizhi Hou <lizhi.hou@amd.com>
+L: dmaengine@vger.kernel.org
+S: Supported
+F: drivers/dma/amd/qdma/
+F: include/linux/platform_data/amd_qdma.h
+
AMD SEATTLE DEVICE TREE SUPPORT
M: Suravee Suthikulpanit <suravee.suthikulpanit@amd.com>
M: Tom Lendacky <thomas.lendacky@amd.com>
@@ -1227,6 +1254,8 @@ L: linux-iio@vger.kernel.org
S: Supported
W: https://ez.analog.com/linux-software-drivers
F: Documentation/devicetree/bindings/iio/adc/adi,ad4000.yaml
+F: Documentation/iio/ad4000.rst
+F: drivers/iio/adc/ad4000.c
ANALOG DEVICES INC AD4130 DRIVER
M: Cosmin Tanislav <cosmin.tanislav@analog.com>
@@ -1237,6 +1266,18 @@ F: Documentation/ABI/testing/sysfs-bus-iio-adc-ad4130
F: Documentation/devicetree/bindings/iio/adc/adi,ad4130.yaml
F: drivers/iio/adc/ad4130.c
+ANALOG DEVICES INC AD4695 DRIVER
+M: Michael Hennerich <michael.hennerich@analog.com>
+M: Nuno Sá <nuno.sa@analog.com>
+R: David Lechner <dlechner@baylibre.com>
+L: linux-iio@vger.kernel.org
+S: Supported
+W: https://ez.analog.com/linux-software-drivers
+F: Documentation/devicetree/bindings/iio/adc/adi,ad4695.yaml
+F: Documentation/iio/ad4695.rst
+F: drivers/iio/adc/ad4695.c
+F: include/dt-bindings/iio/adi,ad4695.h
+
ANALOG DEVICES INC AD7091R DRIVER
M: Marcelo Schmitt <marcelo.schmitt@analog.com>
L: linux-iio@vger.kernel.org
@@ -1303,6 +1344,16 @@ W: https://ez.analog.com/linux-software-drivers
F: Documentation/devicetree/bindings/iio/adc/adi,ad7780.yaml
F: drivers/iio/adc/ad7780.c
+ANALOG DEVICES INC AD9467 DRIVER
+M: Michael Hennerich <Michael.Hennerich@analog.com>
+M: Nuno Sa <nuno.sa@analog.com>
+L: linux-iio@vger.kernel.org
+S: Supported
+W: https://ez.analog.com/linux-software-drivers
+F: Documentation/ABI/testing/debugfs-iio-ad9467
+F: Documentation/devicetree/bindings/iio/adc/adi,ad9467.yaml
+F: drivers/iio/adc/ad9467.c
+
ANALOG DEVICES INC AD9739a DRIVER
M: Nuno Sa <nuno.sa@analog.com>
M: Dragos Bogdan <dragos.bogdan@analog.com>
@@ -1799,6 +1850,7 @@ L: dri-devel@lists.freedesktop.org
S: Supported
T: git https://gitlab.freedesktop.org/drm/misc/kernel.git
F: Documentation/gpu/panfrost.rst
+F: drivers/gpu/drm/ci/xfails/panfrost*
F: drivers/gpu/drm/panfrost/
F: include/uapi/drm/panfrost_drm.h
@@ -2220,6 +2272,7 @@ N: clps711x
ARM/CIRRUS LOGIC EP93XX ARM ARCHITECTURE
M: Hartley Sweeten <hsweeten@visionengravers.com>
M: Alexander Sverdlin <alexander.sverdlin@gmail.com>
+M: Nikita Shubin <nikita.shubin@maquefel.me>
L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
S: Maintained
F: Documentation/devicetree/bindings/iio/adc/cirrus,ep9301-adc.yaml
@@ -2484,6 +2537,7 @@ T: git git://github.com/vzapolskiy/linux-lpc32xx.git
F: Documentation/devicetree/bindings/i2c/nxp,pnx-i2c.yaml
F: arch/arm/boot/dts/nxp/lpc/lpc32*
F: arch/arm/mach-lpc32xx/
+F: drivers/dma/lpc32xx-dmamux.c
F: drivers/i2c/busses/i2c-pnx.c
F: drivers/net/ethernet/nxp/lpc_eth.c
F: drivers/usb/host/ohci-nxp.c
@@ -2785,7 +2839,7 @@ F: drivers/iommu/msm*
F: drivers/mfd/ssbi.c
F: drivers/mmc/host/mmci_qcom*
F: drivers/mmc/host/sdhci-msm.c
-F: drivers/pci/controller/dwc/pcie-qcom.c
+F: drivers/pci/controller/dwc/pcie-qcom*
F: drivers/phy/qualcomm/
F: drivers/power/*/msm*
F: drivers/reset/reset-qcom-*
@@ -3989,7 +4043,7 @@ F: Documentation/devicetree/bindings/iio/imu/bosch,bmi323.yaml
F: drivers/iio/imu/bmi323/
BPF JIT for ARC
-M: Shahab Vahedi <shahab@synopsys.com>
+M: Shahab Vahedi <list+bpf@vahedi.org>
L: bpf@vger.kernel.org
S: Maintained
F: arch/arc/net/
@@ -4156,6 +4210,7 @@ F: include/uapi/linux/btf*
F: include/uapi/linux/filter.h
F: kernel/bpf/
F: kernel/trace/bpf_trace.c
+F: lib/buildid.c
F: lib/test_bpf.c
F: net/bpf/
F: net/core/filter.c
@@ -4276,6 +4331,7 @@ L: bpf@vger.kernel.org
S: Maintained
F: kernel/bpf/stackmap.c
F: kernel/trace/bpf_trace.c
+F: lib/buildid.c
BROADCOM ASP 2.0 ETHERNET DRIVER
M: Justin Chen <justin.chen@broadcom.com>
@@ -5196,6 +5252,7 @@ S: Maintained
F: Documentation/admin-guide/module-signing.rst
F: certs/
F: scripts/sign-file.c
+F: scripts/ssl-common.h
F: tools/certs/
CFAG12864B LCD DRIVER
@@ -5671,8 +5728,7 @@ L: linux-cxl@vger.kernel.org
S: Maintained
F: Documentation/driver-api/cxl
F: drivers/cxl/
-F: include/linux/einj-cxl.h
-F: include/linux/cxl-event.h
+F: include/cxl/
F: include/uapi/linux/cxl_mem.h
F: tools/testing/cxl/
@@ -6537,6 +6593,12 @@ F: include/net/devlink.h
F: include/uapi/linux/devlink.h
F: net/devlink/
+DFROBOT SD2405AL RTC DRIVER
+M: Tóth János <gomba007@gmail.com>
+L: linux-rtc@vger.kernel.org
+S: Maintained
+F: drivers/rtc/rtc-sd2405al.c
+
DH ELECTRONICS IMX6 DHCOM/DHCOR BOARD SUPPORT
M: Christoph Niedermaier <cniedermaier@dh-electronics.com>
L: kernel@dh-electronics.com
@@ -6724,6 +6786,7 @@ F: drivers/dma-buf/dma-heap.c
F: drivers/dma-buf/heaps/*
F: include/linux/dma-heap.h
F: include/uapi/linux/dma-heap.h
+F: tools/testing/selftests/dmabuf-heaps/
DMC FREQUENCY DRIVER FOR SAMSUNG EXYNOS5422
M: Lukasz Luba <lukasz.luba@arm.com>
@@ -7398,10 +7461,10 @@ F: drivers/gpu/drm/udl/
DRM DRIVER FOR VIRTUAL KERNEL MODESETTING (VKMS)
M: Rodrigo Siqueira <rodrigosiqueiramelo@gmail.com>
-M: Melissa Wen <melissa.srw@gmail.com>
M: Maíra Canal <mairacanal@riseup.net>
R: Haneen Mohammed <hamohammed.sa@gmail.com>
-R: Daniel Vetter <daniel@ffwll.ch>
+R: Simona Vetter <simona@ffwll.ch>
+R: Melissa Wen <melissa.srw@gmail.com>
L: dri-devel@lists.freedesktop.org
S: Maintained
T: git https://gitlab.freedesktop.org/drm/misc/kernel.git
@@ -7434,7 +7497,7 @@ F: drivers/gpu/drm/panel/panel-widechips-ws2401.c
DRM DRIVERS
M: David Airlie <airlied@gmail.com>
-M: Daniel Vetter <daniel@ffwll.ch>
+M: Simona Vetter <simona@ffwll.ch>
L: dri-devel@lists.freedesktop.org
S: Maintained
B: https://gitlab.freedesktop.org/drm
@@ -7530,7 +7593,6 @@ M: Kyungmin Park <kyungmin.park@samsung.com>
L: dri-devel@lists.freedesktop.org
S: Supported
T: git git://git.kernel.org/pub/scm/linux/kernel/git/daeinki/drm-exynos.git
-F: Documentation/devicetree/bindings/display/exynos/
F: Documentation/devicetree/bindings/display/samsung/
F: drivers/gpu/drm/exynos/
F: include/uapi/drm/exynos_drm.h
@@ -8446,6 +8508,7 @@ N: binfmt
EXFAT FILE SYSTEM
M: Namjae Jeon <linkinjeon@kernel.org>
M: Sungjong Seo <sj1557.seo@samsung.com>
+R: Yuezhang Mo <yuezhang.mo@sony.com>
L: linux-fsdevel@vger.kernel.org
S: Maintained
T: git git://git.kernel.org/pub/scm/linux/kernel/git/linkinjeon/exfat.git
@@ -8529,6 +8592,13 @@ F: lib/bootconfig.c
F: tools/bootconfig/*
F: tools/bootconfig/scripts/*
+EXTRON DA HD 4K PLUS CEC DRIVER
+M: Hans Verkuil <hverkuil@xs4all.nl>
+L: linux-media@vger.kernel.org
+S: Maintained
+T: git git://linuxtv.org/media_tree.git
+F: drivers/media/cec/usb/extron-da-hd-4k-plus/
+
EXYNOS DP DRIVER
M: Jingoo Han <jingoohan1@gmail.com>
L: dri-devel@lists.freedesktop.org
@@ -8604,6 +8674,7 @@ M: Akinobu Mita <akinobu.mita@gmail.com>
S: Supported
F: Documentation/fault-injection/
F: lib/fault-inject.c
+F: tools/testing/fault-injection/
FBTFT Framebuffer drivers
L: dri-devel@lists.freedesktop.org
@@ -8877,7 +8948,7 @@ W: https://floatingpoint.billm.au/
F: arch/x86/math-emu/
FRAMEBUFFER CORE
-M: Daniel Vetter <daniel@ffwll.ch>
+M: Simona Vetter <simona@ffwll.ch>
S: Odd Fixes
T: git https://gitlab.freedesktop.org/drm/misc/kernel.git
F: drivers/video/fbdev/core/
@@ -10942,6 +11013,7 @@ M: Nuno Sa <nuno.sa@analog.com>
R: Olivier Moysan <olivier.moysan@foss.st.com>
L: linux-iio@vger.kernel.org
S: Maintained
+F: Documentation/ABI/testing/debugfs-iio-backend
F: drivers/iio/industrialio-backend.c
F: include/linux/iio/backend.h
@@ -11044,6 +11116,7 @@ T: git https://gitlab.freedesktop.org/drm/misc/kernel.git
F: Documentation/devicetree/bindings/gpu/img,powervr-rogue.yaml
F: Documentation/devicetree/bindings/gpu/img,powervr-sgx.yaml
F: Documentation/gpu/imagination/
+F: drivers/gpu/drm/ci/xfails/powervr*
F: drivers/gpu/drm/imagination/
F: include/uapi/drm/pvr_drm.h
@@ -11169,10 +11242,17 @@ F: Documentation/devicetree/bindings/serio/
F: Documentation/input/
F: drivers/input/
F: include/dt-bindings/input/
+F: include/linux/gameport.h
+F: include/linux/i8042.h
F: include/linux/input.h
F: include/linux/input/
+F: include/linux/libps2.h
+F: include/linux/serio.h
+F: include/uapi/linux/gameport.h
F: include/uapi/linux/input-event-codes.h
F: include/uapi/linux/input.h
+F: include/uapi/linux/serio.h
+F: include/uapi/linux/uinput.h
INPUT MULTITOUCH (MT) PROTOCOL
M: Henrik Rydberg <rydberg@bitmath.org>
@@ -11832,6 +11912,7 @@ T: git git://git.kernel.org/pub/scm/linux/kernel/git/iommu/linux.git
F: drivers/iommu/dma-iommu.c
F: drivers/iommu/dma-iommu.h
F: drivers/iommu/iova.c
+F: include/linux/iommu-dma.h
F: include/linux/iova.h
IOMMU SUBSYSTEM
@@ -13374,6 +13455,16 @@ S: Maintained
F: Documentation/devicetree/bindings/iio/dac/lltc,ltc1660.yaml
F: drivers/iio/dac/ltc1660.c
+LTC2664 IIO DAC DRIVER
+M: Michael Hennerich <michael.hennerich@analog.com>
+M: Kim Seer Paller <kimseer.paller@analog.com>
+L: linux-iio@vger.kernel.org
+S: Supported
+W: https://ez.analog.com/linux-software-drivers
+F: Documentation/devicetree/bindings/iio/dac/adi,ltc2664.yaml
+F: Documentation/devicetree/bindings/iio/dac/adi,ltc2672.yaml
+F: drivers/iio/dac/ltc2664.c
+
LTC2688 IIO DAC DRIVER
M: Nuno Sá <nuno.sa@analog.com>
L: linux-iio@vger.kernel.org
@@ -13614,7 +13705,7 @@ S: Maintained
F: Documentation/devicetree/bindings/mfd/marvell,88pm886-a1.yaml
F: drivers/input/misc/88pm886-onkey.c
F: drivers/mfd/88pm886.c
-F: drivers/regulators/88pm886-regulator.c
+F: drivers/regulator/88pm886-regulator.c
F: include/linux/mfd/88pm886.h
MARVELL ARMADA 3700 PHY DRIVERS
@@ -15142,6 +15233,13 @@ F: Documentation/devicetree/bindings/nvmem/microchip,sama7g5-otpc.yaml
F: drivers/nvmem/microchip-otpc.c
F: include/dt-bindings/nvmem/microchip,sama7g5-otpc.h
+MICROCHIP PAC1921 POWER/CURRENT MONITOR DRIVER
+M: Matteo Martelli <matteomartelli3@gmail.com>
+L: linux-iio@vger.kernel.org
+S: Supported
+F: Documentation/devicetree/bindings/iio/adc/microchip,pac1921.yaml
+F: drivers/iio/adc/pac1921.c
+
MICROCHIP PAC1934 POWER/ENERGY MONITOR DRIVER
M: Marius Cristea <marius.cristea@microchip.com>
L: linux-iio@vger.kernel.org
@@ -15580,6 +15678,9 @@ F: include/dt-bindings/clock/mobileye,eyeq5-clk.h
MODULE SUPPORT
M: Luis Chamberlain <mcgrof@kernel.org>
+R: Petr Pavlu <petr.pavlu@suse.com>
+R: Sami Tolvanen <samitolvanen@google.com>
+R: Daniel Gomez <da.gomez@samsung.com>
L: linux-modules@vger.kernel.org
L: linux-kernel@vger.kernel.org
S: Maintained
@@ -16942,6 +17043,7 @@ OMNIVISION OG01A1B SENSOR DRIVER
M: Sakari Ailus <sakari.ailus@linux.intel.com>
L: linux-media@vger.kernel.org
S: Maintained
+F: Documentation/devicetree/bindings/media/i2c/ovti,og01a1b.yaml
F: drivers/media/i2c/og01a1b.c
OMNIVISION OV01A10 SENSOR DRIVER
@@ -17217,8 +17319,8 @@ M: Parthiban Veerasooran <parthiban.veerasooran@microchip.com>
L: netdev@vger.kernel.org
S: Maintained
F: Documentation/networking/oa-tc6-framework.rst
-F: drivers/include/linux/oa_tc6.h
F: drivers/net/ethernet/oa_tc6.c
+F: include/linux/oa_tc6.h
OPEN FIRMWARE AND FLATTENED DEVICE TREE
M: Rob Herring <robh@kernel.org>
@@ -17531,7 +17633,7 @@ PCI DRIVER FOR ALTERA PCIE IP
M: Joyce Ooi <joyce.ooi@intel.com>
L: linux-pci@vger.kernel.org
S: Supported
-F: Documentation/devicetree/bindings/pci/altera-pcie.txt
+F: Documentation/devicetree/bindings/pci/altr,pcie-root-port.yaml
F: drivers/pci/controller/pcie-altera.c
PCI DRIVER FOR APPLIEDMICRO XGENE
@@ -17763,7 +17865,7 @@ PCI MSI DRIVER FOR ALTERA MSI IP
M: Joyce Ooi <joyce.ooi@intel.com>
L: linux-pci@vger.kernel.org
S: Supported
-F: Documentation/devicetree/bindings/pci/altera-pcie-msi.txt
+F: Documentation/devicetree/bindings/pci/altr,msi-controller.yaml
F: drivers/pci/controller/pcie-altera-msi.c
PCI MSI DRIVER FOR APPLIEDMICRO XGENE
@@ -17916,6 +18018,7 @@ M: Manivannan Sadhasivam <manivannan.sadhasivam@linaro.org>
L: linux-pci@vger.kernel.org
L: linux-arm-msm@vger.kernel.org
S: Maintained
+F: drivers/pci/controller/dwc/pcie-qcom-common.c
F: drivers/pci/controller/dwc/pcie-qcom.c
PCIE DRIVER FOR ROCKCHIP
@@ -17952,6 +18055,7 @@ L: linux-pci@vger.kernel.org
L: linux-arm-msm@vger.kernel.org
S: Maintained
F: Documentation/devicetree/bindings/pci/qcom,pcie-ep.yaml
+F: drivers/pci/controller/dwc/pcie-qcom-common.c
F: drivers/pci/controller/dwc/pcie-qcom-ep.c
PCMCIA SUBSYSTEM
@@ -18474,7 +18578,7 @@ F: tools/testing/selftests/proc/
PROC SYSCTL
M: Luis Chamberlain <mcgrof@kernel.org>
M: Kees Cook <kees@kernel.org>
-M: Joel Granados <j.granados@samsung.com>
+M: Joel Granados <joel.granados@kernel.org>
L: linux-kernel@vger.kernel.org
L: linux-fsdevel@vger.kernel.org
S: Maintained
@@ -18908,7 +19012,7 @@ M: Bryan O'Donoghue <bryan.odonoghue@linaro.org>
L: linux-media@vger.kernel.org
S: Maintained
F: Documentation/admin-guide/media/qcom_camss.rst
-F: Documentation/devicetree/bindings/media/*camss*
+F: Documentation/devicetree/bindings/media/qcom,*camss*
F: drivers/media/platform/qcom/camss/
QUALCOMM CLOCK DRIVERS
@@ -18923,7 +19027,6 @@ F: include/dt-bindings/clock/qcom,*
QUALCOMM CLOUD AI (QAIC) DRIVER
M: Jeffrey Hugo <quic_jhugo@quicinc.com>
R: Carl Vanderlip <quic_carlv@quicinc.com>
-R: Pranjal Ramajor Asha Kanojiya <quic_pkanojiy@quicinc.com>
L: linux-arm-msm@vger.kernel.org
L: dri-devel@lists.freedesktop.org
S: Supported
@@ -19244,10 +19347,7 @@ F: drivers/char/random.c
F: include/linux/random.h
F: include/uapi/linux/random.h
F: drivers/virt/vmgenid.c
-F: include/vdso/getrandom.h
-F: lib/vdso/getrandom.c
-F: arch/x86/entry/vdso/vgetrandom*
-F: arch/x86/include/asm/vdso/getrandom*
+N: ^.*/vdso/[^/]*getrandom[^/]+$
RAPIDIO SUBSYSTEM
M: Matt Porter <mporter@kernel.crashing.org>
@@ -19944,6 +20044,12 @@ S: Supported
F: drivers/power/supply/bd99954-charger.c
F: drivers/power/supply/bd99954-charger.h
+ROHM BH1745 COLOUR SENSOR
+M: Mudit Sharma <muditsharma.info@gmail.com>
+L: linux-iio@vger.kernel.org
+S: Maintained
+F: drivers/iio/light/bh1745.c
+
ROHM BH1750 AMBIENT LIGHT SENSOR DRIVER
M: Tomasz Duszynski <tduszyns@gmail.com>
S: Maintained
@@ -20104,6 +20210,7 @@ R: Björn Roy Baron <bjorn3_gh@protonmail.com>
R: Benno Lossin <benno.lossin@proton.me>
R: Andreas Hindborg <a.hindborg@kernel.org>
R: Alice Ryhl <aliceryhl@google.com>
+R: Trevor Gross <tmgross@umich.edu>
L: rust-for-linux@vger.kernel.org
S: Supported
W: https://rust-for-linux.com
@@ -20499,6 +20606,19 @@ F: include/linux/wait.h
F: include/uapi/linux/sched.h
F: kernel/sched/
+SCHEDULER - SCHED_EXT
+R: Tejun Heo <tj@kernel.org>
+R: David Vernet <void@manifault.com>
+L: linux-kernel@vger.kernel.org
+S: Maintained
+W: https://github.com/sched-ext/scx
+T: git://git.kernel.org/pub/scm/linux/kernel/git/tj/sched_ext.git
+F: include/linux/sched/ext.h
+F: kernel/sched/ext.h
+F: kernel/sched/ext.c
+F: tools/sched_ext/
+F: tools/testing/selftests/sched_ext
+
SCIOSENSE ENS160 MULTI-GAS SENSOR DRIVER
M: Gustavo Silva <gustavograzs@gmail.com>
S: Maintained
@@ -20783,6 +20903,12 @@ S: Maintained
F: Documentation/devicetree/bindings/iio/chemical/sensirion,scd4x.yaml
F: drivers/iio/chemical/scd4x.c
+SENSIRION SDP500 DIFFERENTIAL PRESSURE SENSOR DRIVER
+M: Petar Stoykov <petar.stoykov@prodrive-technologies.com>
+S: Maintained
+F: Documentation/devicetree/bindings/iio/pressure/sensirion,sdp500.yaml
+F: drivers/iio/pressure/sdp500.c
+
SENSIRION SGP40 GAS SENSOR DRIVER
M: Andreas Klinger <ak@it-klinger.de>
S: Maintained
@@ -23545,7 +23671,8 @@ F: drivers/media/pci/tw686x/
U-BOOT ENVIRONMENT VARIABLES
M: Rafał Miłecki <rafal@milecki.pl>
S: Maintained
-F: Documentation/devicetree/bindings/nvmem/u-boot,env.yaml
+F: Documentation/devicetree/bindings/nvmem/layouts/u-boot,env.yaml
+F: drivers/nvmem/layouts/u-boot-env.c
F: drivers/nvmem/u-boot-env.c
UACCE ACCELERATOR FRAMEWORK
@@ -24404,6 +24531,7 @@ F: include/linux/vdpa.h
F: include/linux/virtio*.h
F: include/linux/vringh.h
F: include/uapi/linux/virtio_*.h
+F: net/vmw_vsock/virtio*
F: tools/virtio/
F: tools/testing/selftests/drivers/net/virtio_net/
@@ -24594,6 +24722,20 @@ F: include/uapi/linux/vsockmon.h
F: net/vmw_vsock/
F: tools/testing/vsock/
+VMA
+M: Andrew Morton <akpm@linux-foundation.org>
+R: Liam R. Howlett <Liam.Howlett@oracle.com>
+R: Vlastimil Babka <vbabka@suse.cz>
+R: Lorenzo Stoakes <lorenzo.stoakes@oracle.com>
+L: linux-mm@kvack.org
+S: Maintained
+W: https://www.linux-mm.org
+T: git git://git.kernel.org/pub/scm/linux/kernel/git/akpm/mm
+F: mm/vma.c
+F: mm/vma.h
+F: mm/vma_internal.h
+F: tools/testing/vma/
+
VMALLOC
M: Andrew Morton <akpm@linux-foundation.org>
R: Uladzislau Rezki <urezki@gmail.com>
@@ -25436,6 +25578,19 @@ S: Maintained
F: drivers/spi/spi-xtensa-xtfpga.c
F: sound/soc/xtensa/xtfpga-i2s.c
+XZ EMBEDDED
+M: Lasse Collin <lasse.collin@tukaani.org>
+S: Maintained
+W: https://tukaani.org/xz/embedded.html
+B: https://github.com/tukaani-project/xz-embedded/issues
+C: irc://irc.libera.chat/tukaani
+F: Documentation/staging/xz.rst
+F: include/linux/decompress/unxz.h
+F: include/linux/xz.h
+F: lib/decompress_unxz.c
+F: lib/xz/
+F: scripts/xz_wrap.sh
+
YAM DRIVER FOR AX.25
M: Jean-Paul Roubelat <jpr@f6fbb.org>
L: linux-hams@vger.kernel.org
@@ -25460,7 +25615,6 @@ F: tools/net/ynl/
YEALINK PHONE DRIVER
M: Henk Vergonet <Henk.Vergonet@gmail.com>
-L: usbb2k-api-dev@nongnu.org
S: Maintained
F: Documentation/input/devices/yealink.rst
F: drivers/input/misc/yealink.*
diff --git a/Makefile b/Makefile
index 34bd1d5f9672..187a4ce2728e 100644
--- a/Makefile
+++ b/Makefile
@@ -1,8 +1,8 @@
# SPDX-License-Identifier: GPL-2.0
VERSION = 6
-PATCHLEVEL = 11
+PATCHLEVEL = 12
SUBLEVEL = 0
-EXTRAVERSION =
+EXTRAVERSION = -rc1
NAME = Baby Opossum Posse
# *DOCUMENTATION*
@@ -579,10 +579,6 @@ else
RUSTC_OR_CLIPPY = $(RUSTC)
endif
-ifdef RUST_LIB_SRC
- export RUST_LIB_SRC
-endif
-
# Allows the usage of unstable features in stable compilers.
export RUSTC_BOOTSTRAP := 1
@@ -649,9 +645,11 @@ endif
# The expansion should be delayed until arch/$(SRCARCH)/Makefile is included.
# Some architectures define CROSS_COMPILE in arch/$(SRCARCH)/Makefile.
-# CC_VERSION_TEXT is referenced from Kconfig (so it needs export),
-# and from include/config/auto.conf.cmd to detect the compiler upgrade.
+# CC_VERSION_TEXT and RUSTC_VERSION_TEXT are referenced from Kconfig (so they
+# need export), and from include/config/auto.conf.cmd to detect the compiler
+# upgrade.
CC_VERSION_TEXT = $(subst $(pound),,$(shell LC_ALL=C $(CC) --version 2>/dev/null | head -n 1))
+RUSTC_VERSION_TEXT = $(subst $(pound),,$(shell $(RUSTC) --version 2>/dev/null))
ifneq ($(findstring clang,$(CC_VERSION_TEXT)),)
include $(srctree)/scripts/Makefile.clang
@@ -672,7 +670,7 @@ ifdef config-build
# KBUILD_DEFCONFIG may point out an alternative default configuration
# used for 'make defconfig'
include $(srctree)/arch/$(SRCARCH)/Makefile
-export KBUILD_DEFCONFIG KBUILD_KCONFIG CC_VERSION_TEXT
+export KBUILD_DEFCONFIG KBUILD_KCONFIG CC_VERSION_TEXT RUSTC_VERSION_TEXT
config: outputmakefile scripts_basic FORCE
$(Q)$(MAKE) $(build)=scripts/kconfig $@
@@ -928,6 +926,7 @@ ifdef CONFIG_SHADOW_CALL_STACK
ifndef CONFIG_DYNAMIC_SCS
CC_FLAGS_SCS := -fsanitize=shadow-call-stack
KBUILD_CFLAGS += $(CC_FLAGS_SCS)
+KBUILD_RUSTFLAGS += -Zsanitizer=shadow-call-stack
endif
export CC_FLAGS_SCS
endif
@@ -952,6 +951,16 @@ endif
ifdef CONFIG_CFI_CLANG
CC_FLAGS_CFI := -fsanitize=kcfi
+ifdef CONFIG_CFI_ICALL_NORMALIZE_INTEGERS
+ CC_FLAGS_CFI += -fsanitize-cfi-icall-experimental-normalize-integers
+endif
+ifdef CONFIG_RUST
+ # Always pass -Zsanitizer-cfi-normalize-integers as CONFIG_RUST selects
+ # CONFIG_CFI_ICALL_NORMALIZE_INTEGERS.
+ RUSTC_FLAGS_CFI := -Zsanitizer=kcfi -Zsanitizer-cfi-normalize-integers
+ KBUILD_RUSTFLAGS += $(RUSTC_FLAGS_CFI)
+ export RUSTC_FLAGS_CFI
+endif
KBUILD_CFLAGS += $(CC_FLAGS_CFI)
export CC_FLAGS_CFI
endif
@@ -1483,6 +1492,7 @@ endif # CONFIG_MODULES
# Directories & files removed with 'make clean'
CLEAN_FILES += vmlinux.symvers modules-only.symvers \
modules.builtin modules.builtin.modinfo modules.nsdeps \
+ modules.builtin.ranges vmlinux.o.map \
compile_commands.json rust/test \
rust-project.json .vmlinux.objs .vmlinux.export.c
@@ -1947,7 +1957,7 @@ clean: $(clean-dirs)
-o -name '*.c.[012]*.*' \
-o -name '*.ll' \
-o -name '*.gcno' \
- -o -name '*.*.symversions' \) -type f -print \
+ \) -type f -print \
-o -name '.tmp_*' -print \
| xargs rm -rf
diff --git a/arch/Kconfig b/arch/Kconfig
index 4e2eaba9e305..98157b38f5cf 100644
--- a/arch/Kconfig
+++ b/arch/Kconfig
@@ -17,6 +17,15 @@ config CPU_MITIGATIONS
def_bool y
endif
+#
+# Selected by architectures that need custom DMA operations for e.g. legacy
+# IOMMUs not handled by dma-iommu. Drivers must never select this symbol.
+#
+config ARCH_HAS_DMA_OPS
+ depends on HAS_DMA
+ select DMA_OPS_HELPERS
+ bool
+
menu "General architecture-dependent options"
config ARCH_HAS_SUBPAGE_FAULTS
@@ -826,6 +835,22 @@ config CFI_CLANG
https://clang.llvm.org/docs/ControlFlowIntegrity.html
+config CFI_ICALL_NORMALIZE_INTEGERS
+ bool "Normalize CFI tags for integers"
+ depends on CFI_CLANG
+ depends on $(cc-option,-fsanitize=kcfi -fsanitize-cfi-icall-experimental-normalize-integers)
+ help
+ This option normalizes the CFI tags for integer types so that all
+ integer types of the same size and signedness receive the same CFI
+ tag.
+
+ The option is separate from CONFIG_RUST because it affects the ABI.
+ When working with build systems that care about the ABI, it is
+ convenient to be able to turn on this flag first, before Rust is
+ turned on.
+
+ This option is necessary for using CFI with Rust. If unsure, say N.
+
config CFI_PERMISSIVE
bool "Use CFI in permissive mode"
depends on CFI_CLANG
diff --git a/arch/alpha/Kconfig b/arch/alpha/Kconfig
index 50ff06d5b799..109a4cddcd13 100644
--- a/arch/alpha/Kconfig
+++ b/arch/alpha/Kconfig
@@ -4,12 +4,12 @@ config ALPHA
default y
select ARCH_32BIT_USTAT_F_TINODE
select ARCH_HAS_CURRENT_STACK_POINTER
+ select ARCH_HAS_DMA_OPS if PCI
select ARCH_MIGHT_HAVE_PC_PARPORT
select ARCH_MIGHT_HAVE_PC_SERIO
select ARCH_NO_PREEMPT
select ARCH_NO_SG_CHAIN
select ARCH_USE_CMPXCHG_LOCKREF
- select DMA_OPS if PCI
select FORCE_PCI
select PCI_DOMAINS if PCI
select PCI_SYSCALL if PCI
diff --git a/arch/alpha/include/asm/cmpxchg.h b/arch/alpha/include/asm/cmpxchg.h
index 91d4a4d9258c..ae1b96479d0c 100644
--- a/arch/alpha/include/asm/cmpxchg.h
+++ b/arch/alpha/include/asm/cmpxchg.h
@@ -3,17 +3,232 @@
#define _ALPHA_CMPXCHG_H
/*
- * Atomic exchange routines.
+ * Atomic exchange.
+ * Since it can be used to implement critical sections
+ * it must clobber "memory" (also for interrupts in UP).
*/
-#define ____xchg(type, args...) __arch_xchg ## type ## _local(args)
-#define ____cmpxchg(type, args...) __cmpxchg ## type ## _local(args)
-#include <asm/xchg.h>
+static inline unsigned long
+____xchg_u8(volatile char *m, unsigned long val)
+{
+ unsigned long ret, tmp, addr64;
+
+ __asm__ __volatile__(
+ " andnot %4,7,%3\n"
+ " insbl %1,%4,%1\n"
+ "1: ldq_l %2,0(%3)\n"
+ " extbl %2,%4,%0\n"
+ " mskbl %2,%4,%2\n"
+ " or %1,%2,%2\n"
+ " stq_c %2,0(%3)\n"
+ " beq %2,2f\n"
+ ".subsection 2\n"
+ "2: br 1b\n"
+ ".previous"
+ : "=&r" (ret), "=&r" (val), "=&r" (tmp), "=&r" (addr64)
+ : "r" ((long)m), "1" (val) : "memory");
+
+ return ret;
+}
+
+static inline unsigned long
+____xchg_u16(volatile short *m, unsigned long val)
+{
+ unsigned long ret, tmp, addr64;
+
+ __asm__ __volatile__(
+ " andnot %4,7,%3\n"
+ " inswl %1,%4,%1\n"
+ "1: ldq_l %2,0(%3)\n"
+ " extwl %2,%4,%0\n"
+ " mskwl %2,%4,%2\n"
+ " or %1,%2,%2\n"
+ " stq_c %2,0(%3)\n"
+ " beq %2,2f\n"
+ ".subsection 2\n"
+ "2: br 1b\n"
+ ".previous"
+ : "=&r" (ret), "=&r" (val), "=&r" (tmp), "=&r" (addr64)
+ : "r" ((long)m), "1" (val) : "memory");
+
+ return ret;
+}
+
+static inline unsigned long
+____xchg_u32(volatile int *m, unsigned long val)
+{
+ unsigned long dummy;
+
+ __asm__ __volatile__(
+ "1: ldl_l %0,%4\n"
+ " bis $31,%3,%1\n"
+ " stl_c %1,%2\n"
+ " beq %1,2f\n"
+ ".subsection 2\n"
+ "2: br 1b\n"
+ ".previous"
+ : "=&r" (val), "=&r" (dummy), "=m" (*m)
+ : "rI" (val), "m" (*m) : "memory");
+
+ return val;
+}
+
+static inline unsigned long
+____xchg_u64(volatile long *m, unsigned long val)
+{
+ unsigned long dummy;
+
+ __asm__ __volatile__(
+ "1: ldq_l %0,%4\n"
+ " bis $31,%3,%1\n"
+ " stq_c %1,%2\n"
+ " beq %1,2f\n"
+ ".subsection 2\n"
+ "2: br 1b\n"
+ ".previous"
+ : "=&r" (val), "=&r" (dummy), "=m" (*m)
+ : "rI" (val), "m" (*m) : "memory");
+
+ return val;
+}
+
+/* This function doesn't exist, so you'll get a linker error
+ if something tries to do an invalid xchg(). */
+extern void __xchg_called_with_bad_pointer(void);
+
+static __always_inline unsigned long
+____xchg(volatile void *ptr, unsigned long x, int size)
+{
+ return
+ size == 1 ? ____xchg_u8(ptr, x) :
+ size == 2 ? ____xchg_u16(ptr, x) :
+ size == 4 ? ____xchg_u32(ptr, x) :
+ size == 8 ? ____xchg_u64(ptr, x) :
+ (__xchg_called_with_bad_pointer(), x);
+}
+
+/*
+ * Atomic compare and exchange. Compare OLD with MEM, if identical,
+ * store NEW in MEM. Return the initial value in MEM. Success is
+ * indicated by comparing RETURN with OLD.
+ */
+
+static inline unsigned long
+____cmpxchg_u8(volatile char *m, unsigned char old, unsigned char new)
+{
+ unsigned long prev, tmp, cmp, addr64;
+
+ __asm__ __volatile__(
+ " andnot %5,7,%4\n"
+ " insbl %1,%5,%1\n"
+ "1: ldq_l %2,0(%4)\n"
+ " extbl %2,%5,%0\n"
+ " cmpeq %0,%6,%3\n"
+ " beq %3,2f\n"
+ " mskbl %2,%5,%2\n"
+ " or %1,%2,%2\n"
+ " stq_c %2,0(%4)\n"
+ " beq %2,3f\n"
+ "2:\n"
+ ".subsection 2\n"
+ "3: br 1b\n"
+ ".previous"
+ : "=&r" (prev), "=&r" (new), "=&r" (tmp), "=&r" (cmp), "=&r" (addr64)
+ : "r" ((long)m), "Ir" (old), "1" (new) : "memory");
+
+ return prev;
+}
+
+static inline unsigned long
+____cmpxchg_u16(volatile short *m, unsigned short old, unsigned short new)
+{
+ unsigned long prev, tmp, cmp, addr64;
+
+ __asm__ __volatile__(
+ " andnot %5,7,%4\n"
+ " inswl %1,%5,%1\n"
+ "1: ldq_l %2,0(%4)\n"
+ " extwl %2,%5,%0\n"
+ " cmpeq %0,%6,%3\n"
+ " beq %3,2f\n"
+ " mskwl %2,%5,%2\n"
+ " or %1,%2,%2\n"
+ " stq_c %2,0(%4)\n"
+ " beq %2,3f\n"
+ "2:\n"
+ ".subsection 2\n"
+ "3: br 1b\n"
+ ".previous"
+ : "=&r" (prev), "=&r" (new), "=&r" (tmp), "=&r" (cmp), "=&r" (addr64)
+ : "r" ((long)m), "Ir" (old), "1" (new) : "memory");
+
+ return prev;
+}
+
+static inline unsigned long
+____cmpxchg_u32(volatile int *m, int old, int new)
+{
+ unsigned long prev, cmp;
+
+ __asm__ __volatile__(
+ "1: ldl_l %0,%5\n"
+ " cmpeq %0,%3,%1\n"
+ " beq %1,2f\n"
+ " mov %4,%1\n"
+ " stl_c %1,%2\n"
+ " beq %1,3f\n"
+ "2:\n"
+ ".subsection 2\n"
+ "3: br 1b\n"
+ ".previous"
+ : "=&r"(prev), "=&r"(cmp), "=m"(*m)
+ : "r"((long) old), "r"(new), "m"(*m) : "memory");
+
+ return prev;
+}
+
+static inline unsigned long
+____cmpxchg_u64(volatile long *m, unsigned long old, unsigned long new)
+{
+ unsigned long prev, cmp;
+
+ __asm__ __volatile__(
+ "1: ldq_l %0,%5\n"
+ " cmpeq %0,%3,%1\n"
+ " beq %1,2f\n"
+ " mov %4,%1\n"
+ " stq_c %1,%2\n"
+ " beq %1,3f\n"
+ "2:\n"
+ ".subsection 2\n"
+ "3: br 1b\n"
+ ".previous"
+ : "=&r"(prev), "=&r"(cmp), "=m"(*m)
+ : "r"((long) old), "r"(new), "m"(*m) : "memory");
+
+ return prev;
+}
+
+/* This function doesn't exist, so you'll get a linker error
+ if something tries to do an invalid cmpxchg(). */
+extern void __cmpxchg_called_with_bad_pointer(void);
+
+static __always_inline unsigned long
+____cmpxchg(volatile void *ptr, unsigned long old, unsigned long new,
+ int size)
+{
+ return
+ size == 1 ? ____cmpxchg_u8(ptr, old, new) :
+ size == 2 ? ____cmpxchg_u16(ptr, old, new) :
+ size == 4 ? ____cmpxchg_u32(ptr, old, new) :
+ size == 8 ? ____cmpxchg_u64(ptr, old, new) :
+ (__cmpxchg_called_with_bad_pointer(), old);
+}
#define xchg_local(ptr, x) \
({ \
__typeof__(*(ptr)) _x_ = (x); \
- (__typeof__(*(ptr))) __arch_xchg_local((ptr), (unsigned long)_x_,\
+ (__typeof__(*(ptr))) ____xchg((ptr), (unsigned long)_x_, \
sizeof(*(ptr))); \
})
@@ -21,7 +236,7 @@
({ \
__typeof__(*(ptr)) _o_ = (o); \
__typeof__(*(ptr)) _n_ = (n); \
- (__typeof__(*(ptr))) __cmpxchg_local((ptr), (unsigned long)_o_, \
+ (__typeof__(*(ptr))) ____cmpxchg((ptr), (unsigned long)_o_, \
(unsigned long)_n_, \
sizeof(*(ptr))); \
})
@@ -32,12 +247,6 @@
cmpxchg_local((ptr), (o), (n)); \
})
-#undef ____xchg
-#undef ____cmpxchg
-#define ____xchg(type, args...) __arch_xchg ##type(args)
-#define ____cmpxchg(type, args...) __cmpxchg ##type(args)
-#include <asm/xchg.h>
-
/*
* The leading and the trailing memory barriers guarantee that these
* operations are fully ordered.
@@ -48,7 +257,7 @@
__typeof__(*(ptr)) _x_ = (x); \
smp_mb(); \
__ret = (__typeof__(*(ptr))) \
- __arch_xchg((ptr), (unsigned long)_x_, sizeof(*(ptr))); \
+ ____xchg((ptr), (unsigned long)_x_, sizeof(*(ptr))); \
smp_mb(); \
__ret; \
})
@@ -59,7 +268,7 @@
__typeof__(*(ptr)) _o_ = (o); \
__typeof__(*(ptr)) _n_ = (n); \
smp_mb(); \
- __ret = (__typeof__(*(ptr))) __cmpxchg((ptr), \
+ __ret = (__typeof__(*(ptr))) ____cmpxchg((ptr), \
(unsigned long)_o_, (unsigned long)_n_, sizeof(*(ptr)));\
smp_mb(); \
__ret; \
@@ -71,6 +280,4 @@
arch_cmpxchg((ptr), (o), (n)); \
})
-#undef ____cmpxchg
-
#endif /* _ALPHA_CMPXCHG_H */
diff --git a/arch/alpha/include/asm/xchg.h b/arch/alpha/include/asm/xchg.h
deleted file mode 100644
index 7adb80c6746a..000000000000
--- a/arch/alpha/include/asm/xchg.h
+++ /dev/null
@@ -1,246 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-#ifndef _ALPHA_CMPXCHG_H
-#error Do not include xchg.h directly!
-#else
-/*
- * xchg/xchg_local and cmpxchg/cmpxchg_local share the same code
- * except that local version do not have the expensive memory barrier.
- * So this file is included twice from asm/cmpxchg.h.
- */
-
-/*
- * Atomic exchange.
- * Since it can be used to implement critical sections
- * it must clobber "memory" (also for interrupts in UP).
- */
-
-static inline unsigned long
-____xchg(_u8, volatile char *m, unsigned long val)
-{
- unsigned long ret, tmp, addr64;
-
- __asm__ __volatile__(
- " andnot %4,7,%3\n"
- " insbl %1,%4,%1\n"
- "1: ldq_l %2,0(%3)\n"
- " extbl %2,%4,%0\n"
- " mskbl %2,%4,%2\n"
- " or %1,%2,%2\n"
- " stq_c %2,0(%3)\n"
- " beq %2,2f\n"
- ".subsection 2\n"
- "2: br 1b\n"
- ".previous"
- : "=&r" (ret), "=&r" (val), "=&r" (tmp), "=&r" (addr64)
- : "r" ((long)m), "1" (val) : "memory");
-
- return ret;
-}
-
-static inline unsigned long
-____xchg(_u16, volatile short *m, unsigned long val)
-{
- unsigned long ret, tmp, addr64;
-
- __asm__ __volatile__(
- " andnot %4,7,%3\n"
- " inswl %1,%4,%1\n"
- "1: ldq_l %2,0(%3)\n"
- " extwl %2,%4,%0\n"
- " mskwl %2,%4,%2\n"
- " or %1,%2,%2\n"
- " stq_c %2,0(%3)\n"
- " beq %2,2f\n"
- ".subsection 2\n"
- "2: br 1b\n"
- ".previous"
- : "=&r" (ret), "=&r" (val), "=&r" (tmp), "=&r" (addr64)
- : "r" ((long)m), "1" (val) : "memory");
-
- return ret;
-}
-
-static inline unsigned long
-____xchg(_u32, volatile int *m, unsigned long val)
-{
- unsigned long dummy;
-
- __asm__ __volatile__(
- "1: ldl_l %0,%4\n"
- " bis $31,%3,%1\n"
- " stl_c %1,%2\n"
- " beq %1,2f\n"
- ".subsection 2\n"
- "2: br 1b\n"
- ".previous"
- : "=&r" (val), "=&r" (dummy), "=m" (*m)
- : "rI" (val), "m" (*m) : "memory");
-
- return val;
-}
-
-static inline unsigned long
-____xchg(_u64, volatile long *m, unsigned long val)
-{
- unsigned long dummy;
-
- __asm__ __volatile__(
- "1: ldq_l %0,%4\n"
- " bis $31,%3,%1\n"
- " stq_c %1,%2\n"
- " beq %1,2f\n"
- ".subsection 2\n"
- "2: br 1b\n"
- ".previous"
- : "=&r" (val), "=&r" (dummy), "=m" (*m)
- : "rI" (val), "m" (*m) : "memory");
-
- return val;
-}
-
-/* This function doesn't exist, so you'll get a linker error
- if something tries to do an invalid xchg(). */
-extern void __xchg_called_with_bad_pointer(void);
-
-static __always_inline unsigned long
-____xchg(, volatile void *ptr, unsigned long x, int size)
-{
- switch (size) {
- case 1:
- return ____xchg(_u8, ptr, x);
- case 2:
- return ____xchg(_u16, ptr, x);
- case 4:
- return ____xchg(_u32, ptr, x);
- case 8:
- return ____xchg(_u64, ptr, x);
- }
- __xchg_called_with_bad_pointer();
- return x;
-}
-
-/*
- * Atomic compare and exchange. Compare OLD with MEM, if identical,
- * store NEW in MEM. Return the initial value in MEM. Success is
- * indicated by comparing RETURN with OLD.
- */
-
-static inline unsigned long
-____cmpxchg(_u8, volatile char *m, unsigned char old, unsigned char new)
-{
- unsigned long prev, tmp, cmp, addr64;
-
- __asm__ __volatile__(
- " andnot %5,7,%4\n"
- " insbl %1,%5,%1\n"
- "1: ldq_l %2,0(%4)\n"
- " extbl %2,%5,%0\n"
- " cmpeq %0,%6,%3\n"
- " beq %3,2f\n"
- " mskbl %2,%5,%2\n"
- " or %1,%2,%2\n"
- " stq_c %2,0(%4)\n"
- " beq %2,3f\n"
- "2:\n"
- ".subsection 2\n"
- "3: br 1b\n"
- ".previous"
- : "=&r" (prev), "=&r" (new), "=&r" (tmp), "=&r" (cmp), "=&r" (addr64)
- : "r" ((long)m), "Ir" (old), "1" (new) : "memory");
-
- return prev;
-}
-
-static inline unsigned long
-____cmpxchg(_u16, volatile short *m, unsigned short old, unsigned short new)
-{
- unsigned long prev, tmp, cmp, addr64;
-
- __asm__ __volatile__(
- " andnot %5,7,%4\n"
- " inswl %1,%5,%1\n"
- "1: ldq_l %2,0(%4)\n"
- " extwl %2,%5,%0\n"
- " cmpeq %0,%6,%3\n"
- " beq %3,2f\n"
- " mskwl %2,%5,%2\n"
- " or %1,%2,%2\n"
- " stq_c %2,0(%4)\n"
- " beq %2,3f\n"
- "2:\n"
- ".subsection 2\n"
- "3: br 1b\n"
- ".previous"
- : "=&r" (prev), "=&r" (new), "=&r" (tmp), "=&r" (cmp), "=&r" (addr64)
- : "r" ((long)m), "Ir" (old), "1" (new) : "memory");
-
- return prev;
-}
-
-static inline unsigned long
-____cmpxchg(_u32, volatile int *m, int old, int new)
-{
- unsigned long prev, cmp;
-
- __asm__ __volatile__(
- "1: ldl_l %0,%5\n"
- " cmpeq %0,%3,%1\n"
- " beq %1,2f\n"
- " mov %4,%1\n"
- " stl_c %1,%2\n"
- " beq %1,3f\n"
- "2:\n"
- ".subsection 2\n"
- "3: br 1b\n"
- ".previous"
- : "=&r"(prev), "=&r"(cmp), "=m"(*m)
- : "r"((long) old), "r"(new), "m"(*m) : "memory");
-
- return prev;
-}
-
-static inline unsigned long
-____cmpxchg(_u64, volatile long *m, unsigned long old, unsigned long new)
-{
- unsigned long prev, cmp;
-
- __asm__ __volatile__(
- "1: ldq_l %0,%5\n"
- " cmpeq %0,%3,%1\n"
- " beq %1,2f\n"
- " mov %4,%1\n"
- " stq_c %1,%2\n"
- " beq %1,3f\n"
- "2:\n"
- ".subsection 2\n"
- "3: br 1b\n"
- ".previous"
- : "=&r"(prev), "=&r"(cmp), "=m"(*m)
- : "r"((long) old), "r"(new), "m"(*m) : "memory");
-
- return prev;
-}
-
-/* This function doesn't exist, so you'll get a linker error
- if something tries to do an invalid cmpxchg(). */
-extern void __cmpxchg_called_with_bad_pointer(void);
-
-static __always_inline unsigned long
-____cmpxchg(, volatile void *ptr, unsigned long old, unsigned long new,
- int size)
-{
- switch (size) {
- case 1:
- return ____cmpxchg(_u8, ptr, old, new);
- case 2:
- return ____cmpxchg(_u16, ptr, old, new);
- case 4:
- return ____cmpxchg(_u32, ptr, old, new);
- case 8:
- return ____cmpxchg(_u64, ptr, old, new);
- }
- __cmpxchg_called_with_bad_pointer();
- return old;
-}
-
-#endif
diff --git a/arch/alpha/kernel/osf_sys.c b/arch/alpha/kernel/osf_sys.c
index e5f881bc8288..c0424de9e7cd 100644
--- a/arch/alpha/kernel/osf_sys.c
+++ b/arch/alpha/kernel/osf_sys.c
@@ -160,10 +160,10 @@ SYSCALL_DEFINE4(osf_getdirentries, unsigned int, fd,
.count = count
};
- if (!arg.file)
+ if (!fd_file(arg))
return -EBADF;
- error = iterate_dir(arg.file, &buf.ctx);
+ error = iterate_dir(fd_file(arg), &buf.ctx);
if (error >= 0)
error = buf.error;
if (count != buf.count)
@@ -1229,7 +1229,7 @@ arch_get_unmapped_area_1(unsigned long addr, unsigned long len,
unsigned long
arch_get_unmapped_area(struct file *filp, unsigned long addr,
unsigned long len, unsigned long pgoff,
- unsigned long flags)
+ unsigned long flags, vm_flags_t vm_flags)
{
unsigned long limit;
diff --git a/arch/arc/Kconfig b/arch/arc/Kconfig
index 163608fd49d1..5b2488142041 100644
--- a/arch/arc/Kconfig
+++ b/arch/arc/Kconfig
@@ -554,7 +554,7 @@ config ARC_BUILTIN_DTB_NAME
string "Built in DTB"
help
Set the name of the DTB to embed in the vmlinux binary
- Leaving it blank selects the minimal "skeleton" dtb
+ Leaving it blank selects the "nsim_700" dtb.
endmenu # "ARC Architecture Configuration"
diff --git a/arch/arc/configs/axs101_defconfig b/arch/arc/configs/axs101_defconfig
index 89720d6d7e0d..319bbe270322 100644
--- a/arch/arc/configs/axs101_defconfig
+++ b/arch/arc/configs/axs101_defconfig
@@ -66,6 +66,7 @@ CONFIG_SERIAL_OF_PLATFORM=y
# CONFIG_HW_RANDOM is not set
CONFIG_I2C=y
CONFIG_I2C_CHARDEV=y
+CONFIG_I2C_DESIGNWARE_CORE=y
CONFIG_I2C_DESIGNWARE_PLATFORM=y
# CONFIG_HWMON is not set
CONFIG_DRM=m
diff --git a/arch/arc/configs/axs103_defconfig b/arch/arc/configs/axs103_defconfig
index 73ec01ed0492..8c1f1a111a17 100644
--- a/arch/arc/configs/axs103_defconfig
+++ b/arch/arc/configs/axs103_defconfig
@@ -66,6 +66,7 @@ CONFIG_SERIAL_OF_PLATFORM=y
# CONFIG_HW_RANDOM is not set
CONFIG_I2C=y
CONFIG_I2C_CHARDEV=y
+CONFIG_I2C_DESIGNWARE_CORE=y
CONFIG_I2C_DESIGNWARE_PLATFORM=y
# CONFIG_HWMON is not set
CONFIG_FB=y
diff --git a/arch/arc/configs/axs103_smp_defconfig b/arch/arc/configs/axs103_smp_defconfig
index 4da0f626fa9d..75cab9f25b5b 100644
--- a/arch/arc/configs/axs103_smp_defconfig
+++ b/arch/arc/configs/axs103_smp_defconfig
@@ -66,6 +66,7 @@ CONFIG_SERIAL_OF_PLATFORM=y
# CONFIG_HW_RANDOM is not set
CONFIG_I2C=y
CONFIG_I2C_CHARDEV=y
+CONFIG_I2C_DESIGNWARE_CORE=y
CONFIG_I2C_DESIGNWARE_PLATFORM=y
# CONFIG_HWMON is not set
CONFIG_DRM=m
diff --git a/arch/arc/configs/tb10x_defconfig b/arch/arc/configs/tb10x_defconfig
index 1a68e4beebca..5aba3d850fa2 100644
--- a/arch/arc/configs/tb10x_defconfig
+++ b/arch/arc/configs/tb10x_defconfig
@@ -60,6 +60,7 @@ CONFIG_SERIAL_8250_DW=y
# CONFIG_HW_RANDOM is not set
CONFIG_I2C=y
# CONFIG_I2C_COMPAT is not set
+CONFIG_I2C_DESIGNWARE_CORE=y
CONFIG_I2C_DESIGNWARE_PLATFORM=y
CONFIG_GPIO_SYSFS=y
# CONFIG_HWMON is not set
diff --git a/arch/arc/mm/mmap.c b/arch/arc/mm/mmap.c
index 69a915297155..2185afe8d59f 100644
--- a/arch/arc/mm/mmap.c
+++ b/arch/arc/mm/mmap.c
@@ -23,7 +23,8 @@
*/
unsigned long
arch_get_unmapped_area(struct file *filp, unsigned long addr,
- unsigned long len, unsigned long pgoff, unsigned long flags)
+ unsigned long len, unsigned long pgoff,
+ unsigned long flags, vm_flags_t vm_flags)
{
struct mm_struct *mm = current->mm;
struct vm_area_struct *vma;
diff --git a/arch/arm/Kconfig b/arch/arm/Kconfig
index 0ec034933cae..749179a1d162 100644
--- a/arch/arm/Kconfig
+++ b/arch/arm/Kconfig
@@ -10,6 +10,7 @@ config ARM
select ARCH_HAS_CURRENT_STACK_POINTER
select ARCH_HAS_DEBUG_VIRTUAL if MMU
select ARCH_HAS_DMA_ALLOC if MMU
+ select ARCH_HAS_DMA_OPS
select ARCH_HAS_DMA_WRITE_COMBINE if !ARM_DMA_MEM_BUFFERABLE
select ARCH_HAS_ELF_RANDOMIZE
select ARCH_HAS_FORTIFY_SOURCE
@@ -54,7 +55,6 @@ config ARM
select DCACHE_WORD_ACCESS if HAVE_EFFICIENT_UNALIGNED_ACCESS
select DMA_DECLARE_COHERENT
select DMA_GLOBAL_POOL if !MMU
- select DMA_OPS
select DMA_NONCOHERENT_MMAP if MMU
select EDAC_SUPPORT
select EDAC_ATOMIC_SCRUB
diff --git a/arch/arm/Makefile b/arch/arm/Makefile
index 71afdd98ddf2..aafebf145738 100644
--- a/arch/arm/Makefile
+++ b/arch/arm/Makefile
@@ -183,7 +183,6 @@ machine-$(CONFIG_ARCH_CLPS711X) += clps711x
machine-$(CONFIG_ARCH_DAVINCI) += davinci
machine-$(CONFIG_ARCH_DIGICOLOR) += digicolor
machine-$(CONFIG_ARCH_DOVE) += dove
-machine-$(CONFIG_ARCH_EP93XX) += ep93xx
machine-$(CONFIG_ARCH_EXYNOS) += exynos
machine-$(CONFIG_ARCH_FOOTBRIDGE) += footbridge
machine-$(CONFIG_ARCH_GEMINI) += gemini
diff --git a/arch/arm/boot/dts/cirrus/Makefile b/arch/arm/boot/dts/cirrus/Makefile
index e944d3e2129d..e6015983e464 100644
--- a/arch/arm/boot/dts/cirrus/Makefile
+++ b/arch/arm/boot/dts/cirrus/Makefile
@@ -3,3 +3,7 @@ dtb-$(CONFIG_ARCH_CLPS711X) += \
ep7211-edb7211.dtb
dtb-$(CONFIG_ARCH_CLPS711X) += \
ep7211-edb7211.dtb
+dtb-$(CONFIG_ARCH_EP93XX) += \
+ ep93xx-edb9302.dtb \
+ ep93xx-bk3.dtb \
+ ep93xx-ts7250.dtb
diff --git a/arch/arm/boot/dts/cirrus/ep93xx-bk3.dts b/arch/arm/boot/dts/cirrus/ep93xx-bk3.dts
new file mode 100644
index 000000000000..40bc9b2a6ba8
--- /dev/null
+++ b/arch/arm/boot/dts/cirrus/ep93xx-bk3.dts
@@ -0,0 +1,125 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Device Tree file for Liebherr controller BK3.1 based on Cirrus EP9302 SoC
+ */
+/dts-v1/;
+#include "ep93xx.dtsi"
+
+/ {
+ model = "Liebherr controller BK3.1";
+ compatible = "liebherr,bk3", "cirrus,ep9301";
+ #address-cells = <1>;
+ #size-cells = <1>;
+
+ chosen {
+ };
+
+ memory@0 {
+ device_type = "memory";
+ /* should be set from ATAGS */
+ reg = <0x00000000 0x02000000>,
+ <0x000530c0 0x01fdd000>;
+ };
+
+ leds {
+ compatible = "gpio-leds";
+ led-0 {
+ label = "grled";
+ gpios = <&gpio4 0 GPIO_ACTIVE_HIGH>;
+ linux,default-trigger = "heartbeat";
+ function = LED_FUNCTION_HEARTBEAT;
+ };
+
+ led-1 {
+ label = "rdled";
+ gpios = <&gpio4 1 GPIO_ACTIVE_HIGH>;
+ function = LED_FUNCTION_FAULT;
+ };
+ };
+};
+
+&ebi {
+ nand-controller@60000000 {
+ compatible = "technologic,ts7200-nand";
+ reg = <0x60000000 0x8000000>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ nand@0 {
+ reg = <0>;
+ partitions {
+ compatible = "fixed-partitions";
+ #address-cells = <1>;
+ #size-cells = <1>;
+
+ partition@0 {
+ label = "System";
+ reg = <0x00000000 0x01e00000>;
+ read-only;
+ };
+
+ partition@1e00000 {
+ label = "Data";
+ reg = <0x01e00000 0x05f20000>;
+ };
+
+ partition@7d20000 {
+ label = "RedBoot";
+ reg = <0x07d20000 0x002e0000>;
+ read-only;
+ };
+ };
+ };
+ };
+};
+
+&eth0 {
+ phy-handle = <&phy0>;
+};
+
+&i2s {
+ dmas = <&dma0 0 1>, <&dma0 0 2>;
+ dma-names = "tx", "rx";
+ pinctrl-names = "default";
+ pinctrl-0 = <&i2s_on_ac97_pins>;
+ status = "okay";
+};
+
+&gpio1 {
+ /* PWM */
+ gpio-ranges = <&syscon 6 163 1>;
+};
+
+&gpio4 {
+ gpio-ranges = <&syscon 0 97 2>;
+ status = "okay";
+};
+
+&gpio6 {
+ gpio-ranges = <&syscon 0 87 2>;
+ status = "okay";
+};
+
+&gpio7 {
+ gpio-ranges = <&syscon 2 199 4>;
+ status = "okay";
+};
+
+&mdio0 {
+ phy0: ethernet-phy@1 {
+ reg = <1>;
+ device_type = "ethernet-phy";
+ };
+};
+
+&uart0 {
+ status = "okay";
+};
+
+&uart1 {
+ status = "okay";
+};
+
+&usb0 {
+ status = "okay";
+};
diff --git a/arch/arm/boot/dts/cirrus/ep93xx-edb9302.dts b/arch/arm/boot/dts/cirrus/ep93xx-edb9302.dts
new file mode 100644
index 000000000000..312b2be1c638
--- /dev/null
+++ b/arch/arm/boot/dts/cirrus/ep93xx-edb9302.dts
@@ -0,0 +1,181 @@
+// SPDX-License-Identifier: GPL-2.0-only OR BSD-2-Clause
+/*
+ * Device Tree file for Cirrus Logic EDB9302 board based on EP9302 SoC
+ */
+/dts-v1/;
+#include "ep93xx.dtsi"
+
+/ {
+ #address-cells = <1>;
+ #size-cells = <1>;
+ compatible = "cirrus,edb9302", "cirrus,ep9301";
+ model = "cirrus,edb9302";
+
+ chosen {
+ };
+
+ memory@0 {
+ device_type = "memory";
+ /* should be set from ATAGS */
+ reg = <0x0000000 0x800000>,
+ <0x1000000 0x800000>,
+ <0x4000000 0x800000>,
+ <0x5000000 0x800000>;
+ };
+
+ sound {
+ compatible = "audio-graph-card2";
+ label = "EDB93XX";
+ links = <&i2s_port>;
+ };
+
+ leds {
+ compatible = "gpio-leds";
+ led-0 {
+ label = "grled";
+ gpios = <&gpio4 0 GPIO_ACTIVE_HIGH>;
+ linux,default-trigger = "heartbeat";
+ function = LED_FUNCTION_HEARTBEAT;
+ };
+
+ led-1 {
+ label = "rdled";
+ gpios = <&gpio4 1 GPIO_ACTIVE_HIGH>;
+ function = LED_FUNCTION_FAULT;
+ };
+ };
+};
+
+&adc {
+ status = "okay";
+};
+
+&ebi {
+ flash@60000000 {
+ compatible = "cfi-flash";
+ reg = <0x60000000 0x1000000>;
+ bank-width = <2>;
+ };
+};
+
+&eth0 {
+ phy-handle = <&phy0>;
+};
+
+&gpio0 {
+ gpio-ranges = <&syscon 0 153 1>,
+ <&syscon 1 152 1>,
+ <&syscon 2 151 1>,
+ <&syscon 3 148 1>,
+ <&syscon 4 147 1>,
+ <&syscon 5 146 1>,
+ <&syscon 6 145 1>,
+ <&syscon 7 144 1>;
+};
+
+&gpio1 {
+ gpio-ranges = <&syscon 0 143 1>,
+ <&syscon 1 142 1>,
+ <&syscon 2 141 1>,
+ <&syscon 3 140 1>,
+ <&syscon 4 165 1>,
+ <&syscon 5 164 1>,
+ <&syscon 6 163 1>,
+ <&syscon 7 160 1>;
+};
+
+&gpio2 {
+ gpio-ranges = <&syscon 0 115 1>;
+};
+
+/* edb9302 doesn't have GPIO Port D present */
+&gpio3 {
+ status = "disabled";
+};
+
+&gpio4 {
+ gpio-ranges = <&syscon 0 97 2>;
+};
+
+&gpio5 {
+ gpio-ranges = <&syscon 1 170 1>,
+ <&syscon 2 169 1>,
+ <&syscon 3 168 1>;
+};
+
+&gpio6 {
+ gpio-ranges = <&syscon 0 87 2>;
+};
+
+&gpio7 {
+ gpio-ranges = <&syscon 2 199 4>;
+};
+
+&i2s {
+ pinctrl-names = "default";
+ pinctrl-0 = <&i2s_on_ac97_pins>;
+ status = "okay";
+ i2s_port: port {
+ i2s_ep: endpoint {
+ system-clock-direction-out;
+ frame-master;
+ bitclock-master;
+ mclk-fs = <256>;
+ dai-format = "i2s";
+ convert-channels = <2>;
+ convert-sample-format = "s32_le";
+ remote-endpoint = <&codec_ep>;
+ };
+ };
+};
+
+&mdio0 {
+ phy0: ethernet-phy@1 {
+ reg = <1>;
+ device_type = "ethernet-phy";
+ };
+};
+
+&spi0 {
+ cs-gpios = <&gpio0 6 GPIO_ACTIVE_LOW
+ &gpio0 7 GPIO_ACTIVE_LOW>;
+ dmas = <&dma1 10 2>, <&dma1 10 1>;
+ dma-names = "rx", "tx";
+ status = "okay";
+
+ cs4271: codec@0 {
+ compatible = "cirrus,cs4271";
+ reg = <0>;
+ #sound-dai-cells = <0>;
+ spi-max-frequency = <6000000>;
+ spi-cpol;
+ spi-cpha;
+ reset-gpios = <&gpio0 1 GPIO_ACTIVE_LOW>;
+ port {
+ codec_ep: endpoint {
+ remote-endpoint = <&i2s_ep>;
+ };
+ };
+ };
+
+ at25f1024: eeprom@1 {
+ compatible = "atmel,at25";
+ reg = <1>;
+ address-width = <8>;
+ size = <0x20000>;
+ pagesize = <256>;
+ spi-max-frequency = <20000000>;
+ };
+};
+
+&uart0 {
+ status = "okay";
+};
+
+&uart1 {
+ status = "okay";
+};
+
+&usb0 {
+ status = "okay";
+};
diff --git a/arch/arm/boot/dts/cirrus/ep93xx-ts7250.dts b/arch/arm/boot/dts/cirrus/ep93xx-ts7250.dts
new file mode 100644
index 000000000000..9e03f93d9fc8
--- /dev/null
+++ b/arch/arm/boot/dts/cirrus/ep93xx-ts7250.dts
@@ -0,0 +1,145 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Device Tree file for Technologic Systems ts7250 board based on Cirrus EP9302 SoC
+ */
+/dts-v1/;
+#include "ep93xx.dtsi"
+
+/ {
+ compatible = "technologic,ts7250", "cirrus,ep9301";
+ model = "TS-7250 SBC";
+ #address-cells = <1>;
+ #size-cells = <1>;
+
+ chosen {
+ };
+
+ memory@0 {
+ device_type = "memory";
+ /* should be set from ATAGS */
+ reg = <0x00000000 0x02000000>,
+ <0x000530c0 0x01fdd000>;
+ };
+
+ leds {
+ compatible = "gpio-leds";
+ led-0 {
+ label = "grled";
+ gpios = <&gpio4 0 GPIO_ACTIVE_HIGH>;
+ linux,default-trigger = "heartbeat";
+ function = LED_FUNCTION_HEARTBEAT;
+ };
+
+ led-1 {
+ label = "rdled";
+ gpios = <&gpio4 1 GPIO_ACTIVE_HIGH>;
+ function = LED_FUNCTION_FAULT;
+ };
+ };
+};
+
+&ebi {
+ nand-controller@60000000 {
+ compatible = "technologic,ts7200-nand";
+ reg = <0x60000000 0x8000000>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ nand@0 {
+ reg = <0>;
+ partitions {
+ compatible = "fixed-partitions";
+ #address-cells = <1>;
+ #size-cells = <1>;
+
+ partition@0 {
+ label = "TS-BOOTROM";
+ reg = <0x00000000 0x00020000>;
+ read-only;
+ };
+
+ partition@20000 {
+ label = "Linux";
+ reg = <0x00020000 0x07d00000>;
+ };
+
+ partition@7d20000 {
+ label = "RedBoot";
+ reg = <0x07d20000 0x002e0000>;
+ read-only;
+ };
+ };
+ };
+ };
+
+ rtc@10800000 {
+ compatible = "st,m48t86";
+ reg = <0x10800000 0x1>,
+ <0x11700000 0x1>;
+ };
+
+ watchdog@23800000 {
+ compatible = "technologic,ts7200-wdt";
+ reg = <0x23800000 0x01>,
+ <0x23c00000 0x01>;
+ timeout-sec = <30>;
+ };
+};
+
+&eth0 {
+ phy-handle = <&phy0>;
+};
+
+&gpio1 {
+ /* PWM */
+ gpio-ranges = <&syscon 6 163 1>;
+};
+
+/* ts7250 doesn't have GPIO Port D present */
+&gpio3 {
+ status = "disabled";
+};
+
+&gpio4 {
+ gpio-ranges = <&syscon 0 97 2>;
+};
+
+&gpio6 {
+ gpio-ranges = <&syscon 0 87 2>;
+};
+
+&gpio7 {
+ gpio-ranges = <&syscon 2 199 4>;
+};
+
+&spi0 {
+ cs-gpios = <&gpio5 2 GPIO_ACTIVE_HIGH>;
+ dmas = <&dma1 10 2>, <&dma1 10 1>;
+ dma-names = "rx", "tx";
+ status = "okay";
+
+ tmp122: temperature-sensor@0 {
+ compatible = "ti,tmp122";
+ reg = <0>;
+ spi-max-frequency = <2000000>;
+ };
+};
+
+&mdio0 {
+ phy0: ethernet-phy@1 {
+ reg = <1>;
+ device_type = "ethernet-phy";
+ };
+};
+
+&uart0 {
+ status = "okay";
+};
+
+&uart1 {
+ status = "okay";
+};
+
+&usb0 {
+ status = "okay";
+};
diff --git a/arch/arm/boot/dts/cirrus/ep93xx.dtsi b/arch/arm/boot/dts/cirrus/ep93xx.dtsi
new file mode 100644
index 000000000000..0dd1eee346ca
--- /dev/null
+++ b/arch/arm/boot/dts/cirrus/ep93xx.dtsi
@@ -0,0 +1,444 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Device Tree file for Cirrus Logic systems EP93XX SoC
+ */
+#include <dt-bindings/gpio/gpio.h>
+#include <dt-bindings/leds/common.h>
+#include <dt-bindings/input/input.h>
+#include <dt-bindings/clock/cirrus,ep9301-syscon.h>
+/ {
+ soc: soc {
+ compatible = "simple-bus";
+ ranges;
+ #address-cells = <1>;
+ #size-cells = <1>;
+
+ syscon: syscon@80930000 {
+ compatible = "cirrus,ep9301-syscon", "syscon";
+ reg = <0x80930000 0x1000>;
+
+ #clock-cells = <1>;
+ clocks = <&xtali>;
+
+ spi_default_pins: pins-spi {
+ function = "spi";
+ groups = "ssp";
+ };
+
+ ac97_default_pins: pins-ac97 {
+ function = "ac97";
+ groups = "ac97";
+ };
+
+ i2s_on_ssp_pins: pins-i2sonssp {
+ function = "i2s";
+ groups = "i2s_on_ssp";
+ };
+
+ i2s_on_ac97_pins: pins-i2sonac97 {
+ function = "i2s";
+ groups = "i2s_on_ac97";
+ };
+
+ gpio1_default_pins: pins-gpio1 {
+ function = "gpio";
+ groups = "gpio1agrp";
+ };
+
+ pwm1_default_pins: pins-pwm1 {
+ function = "pwm";
+ groups = "pwm1";
+ };
+
+ gpio2_default_pins: pins-gpio2 {
+ function = "gpio";
+ groups = "gpio2agrp";
+ };
+
+ gpio3_default_pins: pins-gpio3 {
+ function = "gpio";
+ groups = "gpio3agrp";
+ };
+
+ keypad_default_pins: pins-keypad {
+ function = "keypad";
+ groups = "keypadgrp";
+ };
+
+ gpio4_default_pins: pins-gpio4 {
+ function = "gpio";
+ groups = "gpio4agrp";
+ };
+
+ gpio6_default_pins: pins-gpio6 {
+ function = "gpio";
+ groups = "gpio6agrp";
+ };
+
+ gpio7_default_pins: pins-gpio7 {
+ function = "gpio";
+ groups = "gpio7agrp";
+ };
+
+ ide_default_pins: pins-ide {
+ function = "pata";
+ groups = "idegrp";
+ };
+
+ lcd_on_dram0_pins: pins-rasteronsdram0 {
+ function = "lcd";
+ groups = "rasteronsdram0grp";
+ };
+
+ lcd_on_dram3_pins: pins-rasteronsdram3 {
+ function = "lcd";
+ groups = "rasteronsdram3grp";
+ };
+ };
+
+ adc: adc@80900000 {
+ compatible = "cirrus,ep9301-adc";
+ reg = <0x80900000 0x28>;
+ clocks = <&syscon EP93XX_CLK_ADC>;
+ interrupt-parent = <&vic0>;
+ interrupts = <30>;
+ status = "disabled";
+ };
+
+ /*
+ * The EP93XX expansion bus is a set of up to 7 each up to 16MB
+ * windows in the 256MB space from 0x50000000 to 0x5fffffff.
+ * But since we don't require to setup it in any way, we can
+ * represent it as a simple-bus.
+ */
+ ebi: bus@80080000 {
+ compatible = "simple-bus";
+ reg = <0x80080000 0x20>;
+ native-endian;
+ #address-cells = <1>;
+ #size-cells = <1>;
+ ranges;
+ };
+
+ dma0: dma-controller@80000000 {
+ compatible = "cirrus,ep9301-dma-m2p";
+ reg = <0x80000000 0x0040>,
+ <0x80000040 0x0040>,
+ <0x80000080 0x0040>,
+ <0x800000c0 0x0040>,
+ <0x80000240 0x0040>,
+ <0x80000200 0x0040>,
+ <0x800002c0 0x0040>,
+ <0x80000280 0x0040>,
+ <0x80000340 0x0040>,
+ <0x80000300 0x0040>;
+ clocks = <&syscon EP93XX_CLK_M2P0>,
+ <&syscon EP93XX_CLK_M2P1>,
+ <&syscon EP93XX_CLK_M2P2>,
+ <&syscon EP93XX_CLK_M2P3>,
+ <&syscon EP93XX_CLK_M2P4>,
+ <&syscon EP93XX_CLK_M2P5>,
+ <&syscon EP93XX_CLK_M2P6>,
+ <&syscon EP93XX_CLK_M2P7>,
+ <&syscon EP93XX_CLK_M2P8>,
+ <&syscon EP93XX_CLK_M2P9>;
+ clock-names = "m2p0", "m2p1",
+ "m2p2", "m2p3",
+ "m2p4", "m2p5",
+ "m2p6", "m2p7",
+ "m2p8", "m2p9";
+ interrupt-parent = <&vic0>;
+ interrupts = <7>, <8>, <9>, <10>, <11>,
+ <12>, <13>, <14>, <15>, <16>;
+ #dma-cells = <2>;
+ };
+
+ dma1: dma-controller@80000100 {
+ compatible = "cirrus,ep9301-dma-m2m";
+ reg = <0x80000100 0x0040>,
+ <0x80000140 0x0040>;
+ clocks = <&syscon EP93XX_CLK_M2M0>,
+ <&syscon EP93XX_CLK_M2M1>;
+ clock-names = "m2m0", "m2m1";
+ interrupt-parent = <&vic0>;
+ interrupts = <17>, <18>;
+ #dma-cells = <2>;
+ };
+
+ eth0: ethernet@80010000 {
+ compatible = "cirrus,ep9301-eth";
+ reg = <0x80010000 0x10000>;
+ interrupt-parent = <&vic1>;
+ interrupts = <7>;
+ mdio0: mdio {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ };
+ };
+
+ gpio0: gpio@80840000 {
+ compatible = "cirrus,ep9301-gpio";
+ reg = <0x80840000 0x04>,
+ <0x80840010 0x04>,
+ <0x80840090 0x1c>;
+ reg-names = "data", "dir", "intr";
+ gpio-controller;
+ #gpio-cells = <2>;
+ interrupt-controller;
+ #interrupt-cells = <2>;
+ interrupt-parent = <&vic1>;
+ interrupts = <27>;
+ };
+
+ gpio1: gpio@80840004 {
+ compatible = "cirrus,ep9301-gpio";
+ reg = <0x80840004 0x04>,
+ <0x80840014 0x04>,
+ <0x808400ac 0x1c>;
+ reg-names = "data", "dir", "intr";
+ gpio-controller;
+ #gpio-cells = <2>;
+ interrupt-controller;
+ #interrupt-cells = <2>;
+ interrupt-parent = <&vic1>;
+ interrupts = <27>;
+ };
+
+ gpio2: gpio@80840008 {
+ compatible = "cirrus,ep9301-gpio";
+ reg = <0x80840008 0x04>,
+ <0x80840018 0x04>;
+ reg-names = "data", "dir";
+ gpio-controller;
+ #gpio-cells = <2>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&gpio2_default_pins>;
+ };
+
+ gpio3: gpio@8084000c {
+ compatible = "cirrus,ep9301-gpio";
+ reg = <0x8084000c 0x04>,
+ <0x8084001c 0x04>;
+ reg-names = "data", "dir";
+ gpio-controller;
+ #gpio-cells = <2>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&gpio3_default_pins>;
+ };
+
+ gpio4: gpio@80840020 {
+ compatible = "cirrus,ep9301-gpio";
+ reg = <0x80840020 0x04>,
+ <0x80840024 0x04>;
+ reg-names = "data", "dir";
+ gpio-controller;
+ #gpio-cells = <2>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&gpio4_default_pins>;
+ };
+
+ gpio5: gpio@80840030 {
+ compatible = "cirrus,ep9301-gpio";
+ reg = <0x80840030 0x04>,
+ <0x80840034 0x04>,
+ <0x8084004c 0x1c>;
+ reg-names = "data", "dir", "intr";
+ gpio-controller;
+ #gpio-cells = <2>;
+ interrupt-controller;
+ #interrupt-cells = <2>;
+ interrupts-extended = <&vic0 19>, <&vic0 20>,
+ <&vic0 21>, <&vic0 22>,
+ <&vic1 15>, <&vic1 16>,
+ <&vic1 17>, <&vic1 18>;
+ };
+
+ gpio6: gpio@80840038 {
+ compatible = "cirrus,ep9301-gpio";
+ reg = <0x80840038 0x04>,
+ <0x8084003c 0x04>;
+ reg-names = "data", "dir";
+ gpio-controller;
+ #gpio-cells = <2>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&gpio6_default_pins>;
+ };
+
+ gpio7: gpio@80840040 {
+ compatible = "cirrus,ep9301-gpio";
+ reg = <0x80840040 0x04>,
+ <0x80840044 0x04>;
+ reg-names = "data", "dir";
+ gpio-controller;
+ #gpio-cells = <2>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&gpio7_default_pins>;
+ };
+
+ i2s: i2s@80820000 {
+ compatible = "cirrus,ep9301-i2s";
+ reg = <0x80820000 0x100>;
+ #sound-dai-cells = <0>;
+ interrupt-parent = <&vic1>;
+ interrupts = <28>;
+ clocks = <&syscon EP93XX_CLK_I2S_MCLK>,
+ <&syscon EP93XX_CLK_I2S_SCLK>,
+ <&syscon EP93XX_CLK_I2S_LRCLK>;
+ clock-names = "mclk", "sclk", "lrclk";
+ dmas = <&dma0 0 1>, <&dma0 0 2>;
+ dma-names = "tx", "rx";
+ status = "disabled";
+ };
+
+ ide: ide@800a0000 {
+ compatible = "cirrus,ep9312-pata";
+ reg = <0x800a0000 0x38>;
+ interrupt-parent = <&vic1>;
+ interrupts = <8>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&ide_default_pins>;
+ status = "disabled";
+ };
+
+ vic0: interrupt-controller@800b0000 {
+ compatible = "arm,pl192-vic";
+ reg = <0x800b0000 0x1000>;
+ interrupt-controller;
+ #interrupt-cells = <1>;
+ valid-mask = <0x7ffffffc>;
+ valid-wakeup-mask = <0x0>;
+ };
+
+ vic1: interrupt-controller@800c0000 {
+ compatible = "arm,pl192-vic";
+ reg = <0x800c0000 0x1000>;
+ interrupt-controller;
+ #interrupt-cells = <1>;
+ valid-mask = <0x1fffffff>;
+ valid-wakeup-mask = <0x0>;
+ };
+
+ keypad: keypad@800f0000 {
+ compatible = "cirrus,ep9307-keypad";
+ reg = <0x800f0000 0x0c>;
+ interrupt-parent = <&vic0>;
+ interrupts = <29>;
+ clocks = <&syscon EP93XX_CLK_KEYPAD>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&keypad_default_pins>;
+ linux,keymap = <KEY_UP>,
+ <KEY_DOWN>,
+ <KEY_VOLUMEDOWN>,
+ <KEY_HOME>,
+ <KEY_RIGHT>,
+ <KEY_LEFT>,
+ <KEY_ENTER>,
+ <KEY_VOLUMEUP>,
+ <KEY_F6>,
+ <KEY_F8>,
+ <KEY_F9>,
+ <KEY_F10>,
+ <KEY_F1>,
+ <KEY_F2>,
+ <KEY_F3>,
+ <KEY_POWER>;
+ };
+
+ pwm0: pwm@80910000 {
+ compatible = "cirrus,ep9301-pwm";
+ reg = <0x80910000 0x10>;
+ clocks = <&syscon EP93XX_CLK_PWM>;
+ #pwm-cells = <3>;
+ status = "disabled";
+ };
+
+ pwm1: pwm@80910020 {
+ compatible = "cirrus,ep9301-pwm";
+ reg = <0x80910020 0x10>;
+ clocks = <&syscon EP93XX_CLK_PWM>;
+ #pwm-cells = <3>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&pwm1_default_pins>;
+ status = "disabled";
+ };
+
+ rtc0: rtc@80920000 {
+ compatible = "cirrus,ep9301-rtc";
+ reg = <0x80920000 0x100>;
+ };
+
+ spi0: spi@808a0000 {
+ compatible = "cirrus,ep9301-spi";
+ reg = <0x808a0000 0x18>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ interrupt-parent = <&vic1>;
+ interrupts = <21>;
+ clocks = <&syscon EP93XX_CLK_SPI>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&spi_default_pins>;
+ status = "disabled";
+ };
+
+ timer: timer@80810000 {
+ compatible = "cirrus,ep9301-timer";
+ reg = <0x80810000 0x100>;
+ interrupt-parent = <&vic1>;
+ interrupts = <19>;
+ };
+
+ uart0: serial@808c0000 {
+ compatible = "arm,pl011", "arm,primecell";
+ reg = <0x808c0000 0x1000>;
+ arm,primecell-periphid = <0x00041010>;
+ clocks = <&syscon EP93XX_CLK_UART1>, <&syscon EP93XX_CLK_UART>;
+ clock-names = "uartclk", "apb_pclk";
+ interrupt-parent = <&vic1>;
+ interrupts = <20>;
+ status = "disabled";
+ };
+
+ uart1: uart@808d0000 {
+ compatible = "arm,primecell";
+ reg = <0x808d0000 0x1000>;
+ arm,primecell-periphid = <0x00041010>;
+ clocks = <&syscon EP93XX_CLK_UART2>, <&syscon EP93XX_CLK_UART>;
+ clock-names = "apb:uart2", "apb_pclk";
+ interrupt-parent = <&vic1>;
+ interrupts = <22>;
+ status = "disabled";
+ };
+
+ uart2: uart@808b0000 {
+ compatible = "arm,primecell";
+ reg = <0x808b0000 0x1000>;
+ arm,primecell-periphid = <0x00041010>;
+ clocks = <&syscon EP93XX_CLK_UART3>, <&syscon EP93XX_CLK_UART>;
+ clock-names = "apb:uart3", "apb_pclk";
+ interrupt-parent = <&vic1>;
+ interrupts = <23>;
+ status = "disabled";
+ };
+
+ usb0: usb@80020000 {
+ compatible = "generic-ohci";
+ reg = <0x80020000 0x10000>;
+ interrupt-parent = <&vic1>;
+ interrupts = <24>;
+ clocks = <&syscon EP93XX_CLK_USB>;
+ status = "disabled";
+ };
+
+ watchdog0: watchdog@80940000 {
+ compatible = "cirrus,ep9301-wdt";
+ reg = <0x80940000 0x08>;
+ };
+ };
+
+ xtali: oscillator {
+ compatible = "fixed-clock";
+ #clock-cells = <0>;
+ clock-frequency = <14745600>;
+ clock-output-names = "xtali";
+ };
+};
diff --git a/arch/arm/configs/hisi_defconfig b/arch/arm/configs/hisi_defconfig
index 0376a65e8bc1..e19c1039fb93 100644
--- a/arch/arm/configs/hisi_defconfig
+++ b/arch/arm/configs/hisi_defconfig
@@ -43,6 +43,7 @@ CONFIG_SERIAL_8250_DW=y
CONFIG_SERIAL_OF_PLATFORM=y
CONFIG_SERIAL_AMBA_PL011=y
CONFIG_SERIAL_AMBA_PL011_CONSOLE=y
+CONFIG_I2C_DESIGNWARE_CORE=y
CONFIG_I2C_DESIGNWARE_PLATFORM=y
CONFIG_SPI=y
CONFIG_SPI_PL022=y
diff --git a/arch/arm/configs/multi_v7_defconfig b/arch/arm/configs/multi_v7_defconfig
index 62734530a3d6..9a5f5c439b87 100644
--- a/arch/arm/configs/multi_v7_defconfig
+++ b/arch/arm/configs/multi_v7_defconfig
@@ -413,6 +413,7 @@ CONFIG_I2C_AT91=m
CONFIG_I2C_BCM2835=y
CONFIG_I2C_CADENCE=y
CONFIG_I2C_DAVINCI=y
+CONFIG_I2C_DESIGNWARE_CORE=y
CONFIG_I2C_DESIGNWARE_PLATFORM=y
CONFIG_I2C_DIGICOLOR=m
CONFIG_I2C_EMEV2=m
diff --git a/arch/arm/configs/pxa_defconfig b/arch/arm/configs/pxa_defconfig
index f2ca5c9131b5..e1cb170c2bf0 100644
--- a/arch/arm/configs/pxa_defconfig
+++ b/arch/arm/configs/pxa_defconfig
@@ -277,6 +277,7 @@ CONFIG_HW_RANDOM=y
CONFIG_I2C_CHARDEV=m
CONFIG_I2C_MUX_PCA954x=m
CONFIG_I2C_MUX_PINCTRL=m
+CONFIG_I2C_DESIGNWARE_CORE=m
CONFIG_I2C_DESIGNWARE_PLATFORM=m
CONFIG_I2C_GPIO=y
CONFIG_I2C_PXA_SLAVE=y
diff --git a/arch/arm/configs/socfpga_defconfig b/arch/arm/configs/socfpga_defconfig
index e82c3866b810..294906c8f16e 100644
--- a/arch/arm/configs/socfpga_defconfig
+++ b/arch/arm/configs/socfpga_defconfig
@@ -83,6 +83,7 @@ CONFIG_SERIAL_8250_RUNTIME_UARTS=2
CONFIG_SERIAL_8250_DW=y
CONFIG_I2C=y
CONFIG_I2C_CHARDEV=y
+CONFIG_I2C_DESIGNWARE_CORE=y
CONFIG_I2C_DESIGNWARE_PLATFORM=y
CONFIG_SPI=y
CONFIG_SPI_CADENCE_QUADSPI=y
diff --git a/arch/arm/configs/spear13xx_defconfig b/arch/arm/configs/spear13xx_defconfig
index c8128a6180e7..a8f992fdb30d 100644
--- a/arch/arm/configs/spear13xx_defconfig
+++ b/arch/arm/configs/spear13xx_defconfig
@@ -62,6 +62,7 @@ CONFIG_SERIAL_AMBA_PL011_CONSOLE=y
# CONFIG_HW_RANDOM is not set
CONFIG_RAW_DRIVER=y
CONFIG_I2C=y
+CONFIG_I2C_DESIGNWARE_CORE=y
CONFIG_I2C_DESIGNWARE_PLATFORM=y
CONFIG_SPI=y
CONFIG_SPI_PL022=y
diff --git a/arch/arm/configs/spear3xx_defconfig b/arch/arm/configs/spear3xx_defconfig
index 97ea2e9a6f07..8dc5a388759c 100644
--- a/arch/arm/configs/spear3xx_defconfig
+++ b/arch/arm/configs/spear3xx_defconfig
@@ -42,6 +42,7 @@ CONFIG_SERIAL_AMBA_PL011_CONSOLE=y
# CONFIG_HW_RANDOM is not set
CONFIG_RAW_DRIVER=y
CONFIG_I2C=y
+CONFIG_I2C_DESIGNWARE_CORE=y
CONFIG_I2C_DESIGNWARE_PLATFORM=y
CONFIG_SPI=y
CONFIG_SPI_PL022=y
diff --git a/arch/arm/configs/spear6xx_defconfig b/arch/arm/configs/spear6xx_defconfig
index a7a3413ac968..4e9e1a6ff381 100644
--- a/arch/arm/configs/spear6xx_defconfig
+++ b/arch/arm/configs/spear6xx_defconfig
@@ -33,6 +33,7 @@ CONFIG_STMMAC_ETH=y
CONFIG_SERIAL_AMBA_PL011=y
CONFIG_SERIAL_AMBA_PL011_CONSOLE=y
CONFIG_I2C=y
+CONFIG_I2C_DESIGNWARE_CORE=y
CONFIG_I2C_DESIGNWARE_PLATFORM=y
CONFIG_SPI=y
CONFIG_SPI_PL022=y
diff --git a/arch/arm/kernel/sys_oabi-compat.c b/arch/arm/kernel/sys_oabi-compat.c
index d00f4040a9f5..f5781ff54a5c 100644
--- a/arch/arm/kernel/sys_oabi-compat.c
+++ b/arch/arm/kernel/sys_oabi-compat.c
@@ -239,19 +239,19 @@ asmlinkage long sys_oabi_fcntl64(unsigned int fd, unsigned int cmd,
struct flock64 flock;
long err = -EBADF;
- if (!f.file)
+ if (!fd_file(f))
goto out;
switch (cmd) {
case F_GETLK64:
case F_OFD_GETLK:
- err = security_file_fcntl(f.file, cmd, arg);
+ err = security_file_fcntl(fd_file(f), cmd, arg);
if (err)
break;
err = get_oabi_flock(&flock, argp);
if (err)
break;
- err = fcntl_getlk64(f.file, cmd, &flock);
+ err = fcntl_getlk64(fd_file(f), cmd, &flock);
if (!err)
err = put_oabi_flock(&flock, argp);
break;
@@ -259,13 +259,13 @@ asmlinkage long sys_oabi_fcntl64(unsigned int fd, unsigned int cmd,
case F_SETLKW64:
case F_OFD_SETLK:
case F_OFD_SETLKW:
- err = security_file_fcntl(f.file, cmd, arg);
+ err = security_file_fcntl(fd_file(f), cmd, arg);
if (err)
break;
err = get_oabi_flock(&flock, argp);
if (err)
break;
- err = fcntl_setlk64(fd, f.file, cmd, &flock);
+ err = fcntl_setlk64(fd, fd_file(f), cmd, &flock);
break;
default:
err = sys_fcntl64(fd, cmd, arg);
diff --git a/arch/arm/lib/xor-neon.c b/arch/arm/lib/xor-neon.c
index 522510baed49..cf57fca97908 100644
--- a/arch/arm/lib/xor-neon.c
+++ b/arch/arm/lib/xor-neon.c
@@ -8,6 +8,7 @@
#include <linux/raid/xor.h>
#include <linux/module.h>
+MODULE_DESCRIPTION("NEON accelerated XOR implementation");
MODULE_LICENSE("GPL");
#ifndef __ARM_NEON__
diff --git a/arch/arm/mach-ep93xx/Kconfig b/arch/arm/mach-ep93xx/Kconfig
index 703f3d232a60..812b71dcf60e 100644
--- a/arch/arm/mach-ep93xx/Kconfig
+++ b/arch/arm/mach-ep93xx/Kconfig
@@ -3,27 +3,27 @@ menuconfig ARCH_EP93XX
bool "EP93xx-based"
depends on ATAGS
depends on ARCH_MULTI_V4T
+ # CONFIG_ARCH_MULTI_V7 is not set
depends on CPU_LITTLE_ENDIAN
+ select ARCH_HAS_RESET_CONTROLLER
select ARCH_SPARSEMEM_ENABLE
select ARM_AMBA
select ARM_VIC
+ select ARM_APPENDED_DTB # Old Redboot bootloaders deployed
+ select ARM_ATAG_DTB_COMPAT # we need this to update dt memory node
+ select COMMON_CLK_EP93XX
+ select EP93XX_TIMER
select CLKSRC_MMIO
select CPU_ARM920T
select GPIOLIB
+ select PINCTRL
+ select PINCTRL_EP93XX
help
This enables support for the Cirrus EP93xx series of CPUs.
if ARCH_EP93XX
-menu "Cirrus EP93xx Implementation Options"
-
-config EP93XX_SOC_COMMON
- bool
- default y
- select SOC_BUS
- select LEDS_GPIO_REGISTER
-
-comment "EP93xx Platforms"
+# menu "EP93xx Platforms"
config MACH_BK3
bool "Support Liebherr BK3.1"
@@ -103,6 +103,6 @@ config MACH_VISION_EP9307
Say 'Y' here if you want your kernel to support the
Vision Engraving Systems EP9307 SoM.
-endmenu
+# endmenu
endif
diff --git a/arch/arm/mach-ep93xx/Makefile b/arch/arm/mach-ep93xx/Makefile
deleted file mode 100644
index 62e37403df14..000000000000
--- a/arch/arm/mach-ep93xx/Makefile
+++ /dev/null
@@ -1,11 +0,0 @@
-# SPDX-License-Identifier: GPL-2.0
-#
-# Makefile for the linux kernel.
-#
-obj-y := core.o clock.o timer-ep93xx.o
-
-obj-$(CONFIG_EP93XX_DMA) += dma.o
-
-obj-$(CONFIG_MACH_EDB93XX) += edb93xx.o
-obj-$(CONFIG_MACH_TS72XX) += ts72xx.o
-obj-$(CONFIG_MACH_VISION_EP9307)+= vision_ep9307.o
diff --git a/arch/arm/mach-ep93xx/clock.c b/arch/arm/mach-ep93xx/clock.c
deleted file mode 100644
index 85a496ddc619..000000000000
--- a/arch/arm/mach-ep93xx/clock.c
+++ /dev/null
@@ -1,733 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-or-later
-/*
- * arch/arm/mach-ep93xx/clock.c
- * Clock control for Cirrus EP93xx chips.
- *
- * Copyright (C) 2006 Lennert Buytenhek <buytenh@wantstofly.org>
- */
-
-#define pr_fmt(fmt) "ep93xx " KBUILD_MODNAME ": " fmt
-
-#include <linux/kernel.h>
-#include <linux/clk.h>
-#include <linux/err.h>
-#include <linux/module.h>
-#include <linux/string.h>
-#include <linux/io.h>
-#include <linux/spinlock.h>
-#include <linux/clkdev.h>
-#include <linux/clk-provider.h>
-#include <linux/soc/cirrus/ep93xx.h>
-
-#include "hardware.h"
-
-#include <asm/div64.h>
-
-#include "soc.h"
-
-static DEFINE_SPINLOCK(clk_lock);
-
-static char fclk_divisors[] = { 1, 2, 4, 8, 16, 1, 1, 1 };
-static char hclk_divisors[] = { 1, 2, 4, 5, 6, 8, 16, 32 };
-static char pclk_divisors[] = { 1, 2, 4, 8 };
-
-static char adc_divisors[] = { 16, 4 };
-static char sclk_divisors[] = { 2, 4 };
-static char lrclk_divisors[] = { 32, 64, 128 };
-
-static const char * const mux_parents[] = {
- "xtali",
- "pll1",
- "pll2"
-};
-
-/*
- * PLL rate = 14.7456 MHz * (X1FBD + 1) * (X2FBD + 1) / (X2IPD + 1) / 2^PS
- */
-static unsigned long calc_pll_rate(unsigned long long rate, u32 config_word)
-{
- int i;
-
- rate *= ((config_word >> 11) & 0x1f) + 1; /* X1FBD */
- rate *= ((config_word >> 5) & 0x3f) + 1; /* X2FBD */
- do_div(rate, (config_word & 0x1f) + 1); /* X2IPD */
- for (i = 0; i < ((config_word >> 16) & 3); i++) /* PS */
- rate >>= 1;
-
- return (unsigned long)rate;
-}
-
-struct clk_psc {
- struct clk_hw hw;
- void __iomem *reg;
- u8 bit_idx;
- u32 mask;
- u8 shift;
- u8 width;
- char *div;
- u8 num_div;
- spinlock_t *lock;
-};
-
-#define to_clk_psc(_hw) container_of(_hw, struct clk_psc, hw)
-
-static int ep93xx_clk_is_enabled(struct clk_hw *hw)
-{
- struct clk_psc *psc = to_clk_psc(hw);
- u32 val = readl(psc->reg);
-
- return (val & BIT(psc->bit_idx)) ? 1 : 0;
-}
-
-static int ep93xx_clk_enable(struct clk_hw *hw)
-{
- struct clk_psc *psc = to_clk_psc(hw);
- unsigned long flags = 0;
- u32 val;
-
- if (psc->lock)
- spin_lock_irqsave(psc->lock, flags);
-
- val = __raw_readl(psc->reg);
- val |= BIT(psc->bit_idx);
-
- ep93xx_syscon_swlocked_write(val, psc->reg);
-
- if (psc->lock)
- spin_unlock_irqrestore(psc->lock, flags);
-
- return 0;
-}
-
-static void ep93xx_clk_disable(struct clk_hw *hw)
-{
- struct clk_psc *psc = to_clk_psc(hw);
- unsigned long flags = 0;
- u32 val;
-
- if (psc->lock)
- spin_lock_irqsave(psc->lock, flags);
-
- val = __raw_readl(psc->reg);
- val &= ~BIT(psc->bit_idx);
-
- ep93xx_syscon_swlocked_write(val, psc->reg);
-
- if (psc->lock)
- spin_unlock_irqrestore(psc->lock, flags);
-}
-
-static const struct clk_ops clk_ep93xx_gate_ops = {
- .enable = ep93xx_clk_enable,
- .disable = ep93xx_clk_disable,
- .is_enabled = ep93xx_clk_is_enabled,
-};
-
-static struct clk_hw *ep93xx_clk_register_gate(const char *name,
- const char *parent_name,
- void __iomem *reg,
- u8 bit_idx)
-{
- struct clk_init_data init;
- struct clk_psc *psc;
- struct clk *clk;
-
- psc = kzalloc(sizeof(*psc), GFP_KERNEL);
- if (!psc)
- return ERR_PTR(-ENOMEM);
-
- init.name = name;
- init.ops = &clk_ep93xx_gate_ops;
- init.flags = CLK_SET_RATE_PARENT;
- init.parent_names = (parent_name ? &parent_name : NULL);
- init.num_parents = (parent_name ? 1 : 0);
-
- psc->reg = reg;
- psc->bit_idx = bit_idx;
- psc->hw.init = &init;
- psc->lock = &clk_lock;
-
- clk = clk_register(NULL, &psc->hw);
- if (IS_ERR(clk)) {
- kfree(psc);
- return ERR_CAST(clk);
- }
-
- return &psc->hw;
-}
-
-static u8 ep93xx_mux_get_parent(struct clk_hw *hw)
-{
- struct clk_psc *psc = to_clk_psc(hw);
- u32 val = __raw_readl(psc->reg);
-
- if (!(val & EP93XX_SYSCON_CLKDIV_ESEL))
- return 0;
-
- if (!(val & EP93XX_SYSCON_CLKDIV_PSEL))
- return 1;
-
- return 2;
-}
-
-static int ep93xx_mux_set_parent_lock(struct clk_hw *hw, u8 index)
-{
- struct clk_psc *psc = to_clk_psc(hw);
- unsigned long flags = 0;
- u32 val;
-
- if (index >= ARRAY_SIZE(mux_parents))
- return -EINVAL;
-
- if (psc->lock)
- spin_lock_irqsave(psc->lock, flags);
-
- val = __raw_readl(psc->reg);
- val &= ~(EP93XX_SYSCON_CLKDIV_ESEL | EP93XX_SYSCON_CLKDIV_PSEL);
-
-
- if (index != 0) {
- val |= EP93XX_SYSCON_CLKDIV_ESEL;
- val |= (index - 1) ? EP93XX_SYSCON_CLKDIV_PSEL : 0;
- }
-
- ep93xx_syscon_swlocked_write(val, psc->reg);
-
- if (psc->lock)
- spin_unlock_irqrestore(psc->lock, flags);
-
- return 0;
-}
-
-static bool is_best(unsigned long rate, unsigned long now,
- unsigned long best)
-{
- return abs(rate - now) < abs(rate - best);
-}
-
-static int ep93xx_mux_determine_rate(struct clk_hw *hw,
- struct clk_rate_request *req)
-{
- unsigned long rate = req->rate;
- struct clk *best_parent = NULL;
- unsigned long __parent_rate;
- unsigned long best_rate = 0, actual_rate, mclk_rate;
- unsigned long best_parent_rate;
- int __div = 0, __pdiv = 0;
- int i;
-
- /*
- * Try the two pll's and the external clock
- * Because the valid predividers are 2, 2.5 and 3, we multiply
- * all the clocks by 2 to avoid floating point math.
- *
- * This is based on the algorithm in the ep93xx raster guide:
- * http://be-a-maverick.com/en/pubs/appNote/AN269REV1.pdf
- *
- */
- for (i = 0; i < ARRAY_SIZE(mux_parents); i++) {
- struct clk *parent = clk_get_sys(mux_parents[i], NULL);
-
- __parent_rate = clk_get_rate(parent);
- mclk_rate = __parent_rate * 2;
-
- /* Try each predivider value */
- for (__pdiv = 4; __pdiv <= 6; __pdiv++) {
- __div = mclk_rate / (rate * __pdiv);
- if (__div < 2 || __div > 127)
- continue;
-
- actual_rate = mclk_rate / (__pdiv * __div);
- if (is_best(rate, actual_rate, best_rate)) {
- best_rate = actual_rate;
- best_parent_rate = __parent_rate;
- best_parent = parent;
- }
- }
- }
-
- if (!best_parent)
- return -EINVAL;
-
- req->best_parent_rate = best_parent_rate;
- req->best_parent_hw = __clk_get_hw(best_parent);
- req->rate = best_rate;
-
- return 0;
-}
-
-static unsigned long ep93xx_ddiv_recalc_rate(struct clk_hw *hw,
- unsigned long parent_rate)
-{
- struct clk_psc *psc = to_clk_psc(hw);
- unsigned long rate = 0;
- u32 val = __raw_readl(psc->reg);
- int __pdiv = ((val >> EP93XX_SYSCON_CLKDIV_PDIV_SHIFT) & 0x03);
- int __div = val & 0x7f;
-
- if (__div > 0)
- rate = (parent_rate * 2) / ((__pdiv + 3) * __div);
-
- return rate;
-}
-
-static int ep93xx_ddiv_set_rate(struct clk_hw *hw, unsigned long rate,
- unsigned long parent_rate)
-{
- struct clk_psc *psc = to_clk_psc(hw);
- int pdiv = 0, div = 0;
- unsigned long best_rate = 0, actual_rate, mclk_rate;
- int __div = 0, __pdiv = 0;
- u32 val;
-
- mclk_rate = parent_rate * 2;
-
- for (__pdiv = 4; __pdiv <= 6; __pdiv++) {
- __div = mclk_rate / (rate * __pdiv);
- if (__div < 2 || __div > 127)
- continue;
-
- actual_rate = mclk_rate / (__pdiv * __div);
- if (is_best(rate, actual_rate, best_rate)) {
- pdiv = __pdiv - 3;
- div = __div;
- best_rate = actual_rate;
- }
- }
-
- if (!best_rate)
- return -EINVAL;
-
- val = __raw_readl(psc->reg);
-
- /* Clear old dividers */
- val &= ~0x37f;
-
- /* Set the new pdiv and div bits for the new clock rate */
- val |= (pdiv << EP93XX_SYSCON_CLKDIV_PDIV_SHIFT) | div;
- ep93xx_syscon_swlocked_write(val, psc->reg);
-
- return 0;
-}
-
-static const struct clk_ops clk_ddiv_ops = {
- .enable = ep93xx_clk_enable,
- .disable = ep93xx_clk_disable,
- .is_enabled = ep93xx_clk_is_enabled,
- .get_parent = ep93xx_mux_get_parent,
- .set_parent = ep93xx_mux_set_parent_lock,
- .determine_rate = ep93xx_mux_determine_rate,
- .recalc_rate = ep93xx_ddiv_recalc_rate,
- .set_rate = ep93xx_ddiv_set_rate,
-};
-
-static struct clk_hw *clk_hw_register_ddiv(const char *name,
- void __iomem *reg,
- u8 bit_idx)
-{
- struct clk_init_data init;
- struct clk_psc *psc;
- struct clk *clk;
-
- psc = kzalloc(sizeof(*psc), GFP_KERNEL);
- if (!psc)
- return ERR_PTR(-ENOMEM);
-
- init.name = name;
- init.ops = &clk_ddiv_ops;
- init.flags = 0;
- init.parent_names = mux_parents;
- init.num_parents = ARRAY_SIZE(mux_parents);
-
- psc->reg = reg;
- psc->bit_idx = bit_idx;
- psc->lock = &clk_lock;
- psc->hw.init = &init;
-
- clk = clk_register(NULL, &psc->hw);
- if (IS_ERR(clk)) {
- kfree(psc);
- return ERR_CAST(clk);
- }
- return &psc->hw;
-}
-
-static unsigned long ep93xx_div_recalc_rate(struct clk_hw *hw,
- unsigned long parent_rate)
-{
- struct clk_psc *psc = to_clk_psc(hw);
- u32 val = __raw_readl(psc->reg);
- u8 index = (val & psc->mask) >> psc->shift;
-
- if (index > psc->num_div)
- return 0;
-
- return DIV_ROUND_UP_ULL(parent_rate, psc->div[index]);
-}
-
-static long ep93xx_div_round_rate(struct clk_hw *hw, unsigned long rate,
- unsigned long *parent_rate)
-{
- struct clk_psc *psc = to_clk_psc(hw);
- unsigned long best = 0, now, maxdiv;
- int i;
-
- maxdiv = psc->div[psc->num_div - 1];
-
- for (i = 0; i < psc->num_div; i++) {
- if ((rate * psc->div[i]) == *parent_rate)
- return DIV_ROUND_UP_ULL((u64)*parent_rate, psc->div[i]);
-
- now = DIV_ROUND_UP_ULL((u64)*parent_rate, psc->div[i]);
-
- if (is_best(rate, now, best))
- best = now;
- }
-
- if (!best)
- best = DIV_ROUND_UP_ULL(*parent_rate, maxdiv);
-
- return best;
-}
-
-static int ep93xx_div_set_rate(struct clk_hw *hw, unsigned long rate,
- unsigned long parent_rate)
-{
- struct clk_psc *psc = to_clk_psc(hw);
- u32 val = __raw_readl(psc->reg) & ~psc->mask;
- int i;
-
- for (i = 0; i < psc->num_div; i++)
- if (rate == parent_rate / psc->div[i]) {
- val |= i << psc->shift;
- break;
- }
-
- if (i == psc->num_div)
- return -EINVAL;
-
- ep93xx_syscon_swlocked_write(val, psc->reg);
-
- return 0;
-}
-
-static const struct clk_ops ep93xx_div_ops = {
- .enable = ep93xx_clk_enable,
- .disable = ep93xx_clk_disable,
- .is_enabled = ep93xx_clk_is_enabled,
- .recalc_rate = ep93xx_div_recalc_rate,
- .round_rate = ep93xx_div_round_rate,
- .set_rate = ep93xx_div_set_rate,
-};
-
-static struct clk_hw *clk_hw_register_div(const char *name,
- const char *parent_name,
- void __iomem *reg,
- u8 enable_bit,
- u8 shift,
- u8 width,
- char *clk_divisors,
- u8 num_div)
-{
- struct clk_init_data init;
- struct clk_psc *psc;
- struct clk *clk;
-
- psc = kzalloc(sizeof(*psc), GFP_KERNEL);
- if (!psc)
- return ERR_PTR(-ENOMEM);
-
- init.name = name;
- init.ops = &ep93xx_div_ops;
- init.flags = 0;
- init.parent_names = (parent_name ? &parent_name : NULL);
- init.num_parents = 1;
-
- psc->reg = reg;
- psc->bit_idx = enable_bit;
- psc->mask = GENMASK(shift + width - 1, shift);
- psc->shift = shift;
- psc->div = clk_divisors;
- psc->num_div = num_div;
- psc->lock = &clk_lock;
- psc->hw.init = &init;
-
- clk = clk_register(NULL, &psc->hw);
- if (IS_ERR(clk)) {
- kfree(psc);
- return ERR_CAST(clk);
- }
- return &psc->hw;
-}
-
-struct ep93xx_gate {
- unsigned int bit;
- const char *dev_id;
- const char *con_id;
-};
-
-static struct ep93xx_gate ep93xx_uarts[] = {
- {EP93XX_SYSCON_DEVCFG_U1EN, "apb:uart1", NULL},
- {EP93XX_SYSCON_DEVCFG_U2EN, "apb:uart2", NULL},
- {EP93XX_SYSCON_DEVCFG_U3EN, "apb:uart3", NULL},
-};
-
-static void __init ep93xx_uart_clock_init(void)
-{
- unsigned int i;
- struct clk_hw *hw;
- u32 value;
- unsigned int clk_uart_div;
-
- value = __raw_readl(EP93XX_SYSCON_PWRCNT);
- if (value & EP93XX_SYSCON_PWRCNT_UARTBAUD)
- clk_uart_div = 1;
- else
- clk_uart_div = 2;
-
- hw = clk_hw_register_fixed_factor(NULL, "uart", "xtali", 0, 1, clk_uart_div);
-
- /* parenting uart gate clocks to uart clock */
- for (i = 0; i < ARRAY_SIZE(ep93xx_uarts); i++) {
- hw = ep93xx_clk_register_gate(ep93xx_uarts[i].dev_id,
- "uart",
- EP93XX_SYSCON_DEVCFG,
- ep93xx_uarts[i].bit);
-
- clk_hw_register_clkdev(hw, NULL, ep93xx_uarts[i].dev_id);
- }
-}
-
-static struct ep93xx_gate ep93xx_dmas[] = {
- {EP93XX_SYSCON_PWRCNT_DMA_M2P0, NULL, "m2p0"},
- {EP93XX_SYSCON_PWRCNT_DMA_M2P1, NULL, "m2p1"},
- {EP93XX_SYSCON_PWRCNT_DMA_M2P2, NULL, "m2p2"},
- {EP93XX_SYSCON_PWRCNT_DMA_M2P3, NULL, "m2p3"},
- {EP93XX_SYSCON_PWRCNT_DMA_M2P4, NULL, "m2p4"},
- {EP93XX_SYSCON_PWRCNT_DMA_M2P5, NULL, "m2p5"},
- {EP93XX_SYSCON_PWRCNT_DMA_M2P6, NULL, "m2p6"},
- {EP93XX_SYSCON_PWRCNT_DMA_M2P7, NULL, "m2p7"},
- {EP93XX_SYSCON_PWRCNT_DMA_M2P8, NULL, "m2p8"},
- {EP93XX_SYSCON_PWRCNT_DMA_M2P9, NULL, "m2p9"},
- {EP93XX_SYSCON_PWRCNT_DMA_M2M0, NULL, "m2m0"},
- {EP93XX_SYSCON_PWRCNT_DMA_M2M1, NULL, "m2m1"},
-};
-
-static void __init ep93xx_dma_clock_init(void)
-{
- unsigned int i;
- struct clk_hw *hw;
- int ret;
-
- for (i = 0; i < ARRAY_SIZE(ep93xx_dmas); i++) {
- hw = clk_hw_register_gate(NULL, ep93xx_dmas[i].con_id,
- "hclk", 0,
- EP93XX_SYSCON_PWRCNT,
- ep93xx_dmas[i].bit,
- 0,
- &clk_lock);
-
- ret = clk_hw_register_clkdev(hw, ep93xx_dmas[i].con_id, NULL);
- if (ret)
- pr_err("%s: failed to register lookup %s\n",
- __func__, ep93xx_dmas[i].con_id);
- }
-}
-
-static int __init ep93xx_clock_init(void)
-{
- u32 value;
- struct clk_hw *hw;
- unsigned long clk_pll1_rate;
- unsigned long clk_f_rate;
- unsigned long clk_h_rate;
- unsigned long clk_p_rate;
- unsigned long clk_pll2_rate;
- unsigned int clk_f_div;
- unsigned int clk_h_div;
- unsigned int clk_p_div;
- unsigned int clk_usb_div;
- unsigned long clk_spi_div;
-
- hw = clk_hw_register_fixed_rate(NULL, "xtali", NULL, 0, EP93XX_EXT_CLK_RATE);
- clk_hw_register_clkdev(hw, NULL, "xtali");
-
- /* Determine the bootloader configured pll1 rate */
- value = __raw_readl(EP93XX_SYSCON_CLKSET1);
- if (!(value & EP93XX_SYSCON_CLKSET1_NBYP1))
- clk_pll1_rate = EP93XX_EXT_CLK_RATE;
- else
- clk_pll1_rate = calc_pll_rate(EP93XX_EXT_CLK_RATE, value);
-
- hw = clk_hw_register_fixed_rate(NULL, "pll1", "xtali", 0, clk_pll1_rate);
- clk_hw_register_clkdev(hw, NULL, "pll1");
-
- /* Initialize the pll1 derived clocks */
- clk_f_div = fclk_divisors[(value >> 25) & 0x7];
- clk_h_div = hclk_divisors[(value >> 20) & 0x7];
- clk_p_div = pclk_divisors[(value >> 18) & 0x3];
-
- hw = clk_hw_register_fixed_factor(NULL, "fclk", "pll1", 0, 1, clk_f_div);
- clk_f_rate = clk_get_rate(hw->clk);
- hw = clk_hw_register_fixed_factor(NULL, "hclk", "pll1", 0, 1, clk_h_div);
- clk_h_rate = clk_get_rate(hw->clk);
- hw = clk_hw_register_fixed_factor(NULL, "pclk", "hclk", 0, 1, clk_p_div);
- clk_p_rate = clk_get_rate(hw->clk);
-
- clk_hw_register_clkdev(hw, "apb_pclk", NULL);
-
- ep93xx_dma_clock_init();
-
- /* Determine the bootloader configured pll2 rate */
- value = __raw_readl(EP93XX_SYSCON_CLKSET2);
- if (!(value & EP93XX_SYSCON_CLKSET2_NBYP2))
- clk_pll2_rate = EP93XX_EXT_CLK_RATE;
- else if (value & EP93XX_SYSCON_CLKSET2_PLL2_EN)
- clk_pll2_rate = calc_pll_rate(EP93XX_EXT_CLK_RATE, value);
- else
- clk_pll2_rate = 0;
-
- hw = clk_hw_register_fixed_rate(NULL, "pll2", "xtali", 0, clk_pll2_rate);
- clk_hw_register_clkdev(hw, NULL, "pll2");
-
- /* Initialize the pll2 derived clocks */
- /*
- * These four bits set the divide ratio between the PLL2
- * output and the USB clock.
- * 0000 - Divide by 1
- * 0001 - Divide by 2
- * 0010 - Divide by 3
- * 0011 - Divide by 4
- * 0100 - Divide by 5
- * 0101 - Divide by 6
- * 0110 - Divide by 7
- * 0111 - Divide by 8
- * 1000 - Divide by 9
- * 1001 - Divide by 10
- * 1010 - Divide by 11
- * 1011 - Divide by 12
- * 1100 - Divide by 13
- * 1101 - Divide by 14
- * 1110 - Divide by 15
- * 1111 - Divide by 1
- * On power-on-reset these bits are reset to 0000b.
- */
- clk_usb_div = (((value >> 28) & 0xf) + 1);
- hw = clk_hw_register_fixed_factor(NULL, "usb_clk", "pll2", 0, 1, clk_usb_div);
- hw = clk_hw_register_gate(NULL, "ohci-platform",
- "usb_clk", 0,
- EP93XX_SYSCON_PWRCNT,
- EP93XX_SYSCON_PWRCNT_USH_EN,
- 0,
- &clk_lock);
- clk_hw_register_clkdev(hw, NULL, "ohci-platform");
-
- /*
- * EP93xx SSP clock rate was doubled in version E2. For more information
- * see:
- * http://www.cirrus.com/en/pubs/appNote/AN273REV4.pdf
- */
- clk_spi_div = 1;
- if (ep93xx_chip_revision() < EP93XX_CHIP_REV_E2)
- clk_spi_div = 2;
- hw = clk_hw_register_fixed_factor(NULL, "ep93xx-spi.0", "xtali", 0, 1, clk_spi_div);
- clk_hw_register_clkdev(hw, NULL, "ep93xx-spi.0");
-
- /* pwm clock */
- hw = clk_hw_register_fixed_factor(NULL, "pwm_clk", "xtali", 0, 1, 1);
- clk_hw_register_clkdev(hw, "pwm_clk", NULL);
-
- pr_info("PLL1 running at %ld MHz, PLL2 at %ld MHz\n",
- clk_pll1_rate / 1000000, clk_pll2_rate / 1000000);
- pr_info("FCLK %ld MHz, HCLK %ld MHz, PCLK %ld MHz\n",
- clk_f_rate / 1000000, clk_h_rate / 1000000,
- clk_p_rate / 1000000);
-
- ep93xx_uart_clock_init();
-
- /* touchscreen/adc clock */
- hw = clk_hw_register_div("ep93xx-adc",
- "xtali",
- EP93XX_SYSCON_KEYTCHCLKDIV,
- EP93XX_SYSCON_KEYTCHCLKDIV_TSEN,
- EP93XX_SYSCON_KEYTCHCLKDIV_ADIV,
- 1,
- adc_divisors,
- ARRAY_SIZE(adc_divisors));
-
- clk_hw_register_clkdev(hw, NULL, "ep93xx-adc");
-
- /* keypad clock */
- hw = clk_hw_register_div("ep93xx-keypad",
- "xtali",
- EP93XX_SYSCON_KEYTCHCLKDIV,
- EP93XX_SYSCON_KEYTCHCLKDIV_KEN,
- EP93XX_SYSCON_KEYTCHCLKDIV_KDIV,
- 1,
- adc_divisors,
- ARRAY_SIZE(adc_divisors));
-
- clk_hw_register_clkdev(hw, NULL, "ep93xx-keypad");
-
- /* On reset PDIV and VDIV is set to zero, while PDIV zero
- * means clock disable, VDIV shouldn't be zero.
- * So i set both dividers to minimum.
- */
- /* ENA - Enable CLK divider. */
- /* PDIV - 00 - Disable clock */
- /* VDIV - at least 2 */
- /* Check and enable video clk registers */
- value = __raw_readl(EP93XX_SYSCON_VIDCLKDIV);
- value |= (1 << EP93XX_SYSCON_CLKDIV_PDIV_SHIFT) | 2;
- ep93xx_syscon_swlocked_write(value, EP93XX_SYSCON_VIDCLKDIV);
-
- /* check and enable i2s clk registers */
- value = __raw_readl(EP93XX_SYSCON_I2SCLKDIV);
- value |= (1 << EP93XX_SYSCON_CLKDIV_PDIV_SHIFT) | 2;
- ep93xx_syscon_swlocked_write(value, EP93XX_SYSCON_I2SCLKDIV);
-
- /* video clk */
- hw = clk_hw_register_ddiv("ep93xx-fb",
- EP93XX_SYSCON_VIDCLKDIV,
- EP93XX_SYSCON_CLKDIV_ENABLE);
-
- clk_hw_register_clkdev(hw, NULL, "ep93xx-fb");
-
- /* i2s clk */
- hw = clk_hw_register_ddiv("mclk",
- EP93XX_SYSCON_I2SCLKDIV,
- EP93XX_SYSCON_CLKDIV_ENABLE);
-
- clk_hw_register_clkdev(hw, "mclk", "ep93xx-i2s");
-
- /* i2s sclk */
-#define EP93XX_I2SCLKDIV_SDIV_SHIFT 16
-#define EP93XX_I2SCLKDIV_SDIV_WIDTH 1
- hw = clk_hw_register_div("sclk",
- "mclk",
- EP93XX_SYSCON_I2SCLKDIV,
- EP93XX_SYSCON_I2SCLKDIV_SENA,
- EP93XX_I2SCLKDIV_SDIV_SHIFT,
- EP93XX_I2SCLKDIV_SDIV_WIDTH,
- sclk_divisors,
- ARRAY_SIZE(sclk_divisors));
-
- clk_hw_register_clkdev(hw, "sclk", "ep93xx-i2s");
-
- /* i2s lrclk */
-#define EP93XX_I2SCLKDIV_LRDIV32_SHIFT 17
-#define EP93XX_I2SCLKDIV_LRDIV32_WIDTH 3
- hw = clk_hw_register_div("lrclk",
- "sclk",
- EP93XX_SYSCON_I2SCLKDIV,
- EP93XX_SYSCON_I2SCLKDIV_SENA,
- EP93XX_I2SCLKDIV_LRDIV32_SHIFT,
- EP93XX_I2SCLKDIV_LRDIV32_WIDTH,
- lrclk_divisors,
- ARRAY_SIZE(lrclk_divisors));
-
- clk_hw_register_clkdev(hw, "lrclk", "ep93xx-i2s");
-
- return 0;
-}
-postcore_initcall(ep93xx_clock_init);
diff --git a/arch/arm/mach-ep93xx/core.c b/arch/arm/mach-ep93xx/core.c
deleted file mode 100644
index 8b1ec60a9a46..000000000000
--- a/arch/arm/mach-ep93xx/core.c
+++ /dev/null
@@ -1,1018 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-or-later
-/*
- * arch/arm/mach-ep93xx/core.c
- * Core routines for Cirrus EP93xx chips.
- *
- * Copyright (C) 2006 Lennert Buytenhek <buytenh@wantstofly.org>
- * Copyright (C) 2007 Herbert Valerio Riedel <hvr@gnu.org>
- *
- * Thanks go to Michael Burian and Ray Lehtiniemi for their key
- * role in the ep93xx linux community.
- */
-
-#define pr_fmt(fmt) "ep93xx " KBUILD_MODNAME ": " fmt
-
-#include <linux/kernel.h>
-#include <linux/init.h>
-#include <linux/platform_device.h>
-#include <linux/interrupt.h>
-#include <linux/dma-mapping.h>
-#include <linux/sys_soc.h>
-#include <linux/irq.h>
-#include <linux/io.h>
-#include <linux/gpio.h>
-#include <linux/leds.h>
-#include <linux/uaccess.h>
-#include <linux/termios.h>
-#include <linux/amba/bus.h>
-#include <linux/amba/serial.h>
-#include <linux/mtd/physmap.h>
-#include <linux/i2c.h>
-#include <linux/gpio/machine.h>
-#include <linux/spi/spi.h>
-#include <linux/export.h>
-#include <linux/irqchip/arm-vic.h>
-#include <linux/reboot.h>
-#include <linux/usb/ohci_pdriver.h>
-#include <linux/random.h>
-
-#include "hardware.h"
-#include <linux/platform_data/video-ep93xx.h>
-#include <linux/platform_data/keypad-ep93xx.h>
-#include <linux/platform_data/spi-ep93xx.h>
-#include <linux/soc/cirrus/ep93xx.h>
-
-#include "gpio-ep93xx.h"
-
-#include <asm/mach/arch.h>
-#include <asm/mach/map.h>
-
-#include "soc.h"
-#include "irqs.h"
-
-/*************************************************************************
- * Static I/O mappings that are needed for all EP93xx platforms
- *************************************************************************/
-static struct map_desc ep93xx_io_desc[] __initdata = {
- {
- .virtual = EP93XX_AHB_VIRT_BASE,
- .pfn = __phys_to_pfn(EP93XX_AHB_PHYS_BASE),
- .length = EP93XX_AHB_SIZE,
- .type = MT_DEVICE,
- }, {
- .virtual = EP93XX_APB_VIRT_BASE,
- .pfn = __phys_to_pfn(EP93XX_APB_PHYS_BASE),
- .length = EP93XX_APB_SIZE,
- .type = MT_DEVICE,
- },
-};
-
-void __init ep93xx_map_io(void)
-{
- iotable_init(ep93xx_io_desc, ARRAY_SIZE(ep93xx_io_desc));
-}
-
-/*************************************************************************
- * EP93xx IRQ handling
- *************************************************************************/
-void __init ep93xx_init_irq(void)
-{
- vic_init(EP93XX_VIC1_BASE, IRQ_EP93XX_VIC0, EP93XX_VIC1_VALID_IRQ_MASK, 0);
- vic_init(EP93XX_VIC2_BASE, IRQ_EP93XX_VIC1, EP93XX_VIC2_VALID_IRQ_MASK, 0);
-}
-
-
-/*************************************************************************
- * EP93xx System Controller Software Locked register handling
- *************************************************************************/
-
-/*
- * syscon_swlock prevents anything else from writing to the syscon
- * block while a software locked register is being written.
- */
-static DEFINE_SPINLOCK(syscon_swlock);
-
-void ep93xx_syscon_swlocked_write(unsigned int val, void __iomem *reg)
-{
- unsigned long flags;
-
- spin_lock_irqsave(&syscon_swlock, flags);
-
- __raw_writel(0xaa, EP93XX_SYSCON_SWLOCK);
- __raw_writel(val, reg);
-
- spin_unlock_irqrestore(&syscon_swlock, flags);
-}
-
-void ep93xx_devcfg_set_clear(unsigned int set_bits, unsigned int clear_bits)
-{
- unsigned long flags;
- unsigned int val;
-
- spin_lock_irqsave(&syscon_swlock, flags);
-
- val = __raw_readl(EP93XX_SYSCON_DEVCFG);
- val &= ~clear_bits;
- val |= set_bits;
- __raw_writel(0xaa, EP93XX_SYSCON_SWLOCK);
- __raw_writel(val, EP93XX_SYSCON_DEVCFG);
-
- spin_unlock_irqrestore(&syscon_swlock, flags);
-}
-
-/**
- * ep93xx_chip_revision() - returns the EP93xx chip revision
- *
- * See "platform.h" for more information.
- */
-unsigned int ep93xx_chip_revision(void)
-{
- unsigned int v;
-
- v = __raw_readl(EP93XX_SYSCON_SYSCFG);
- v &= EP93XX_SYSCON_SYSCFG_REV_MASK;
- v >>= EP93XX_SYSCON_SYSCFG_REV_SHIFT;
- return v;
-}
-EXPORT_SYMBOL_GPL(ep93xx_chip_revision);
-
-/*************************************************************************
- * EP93xx GPIO
- *************************************************************************/
-static struct resource ep93xx_gpio_resource[] = {
- DEFINE_RES_MEM(EP93XX_GPIO_PHYS_BASE, 0xcc),
- DEFINE_RES_IRQ(IRQ_EP93XX_GPIO_AB),
- DEFINE_RES_IRQ(IRQ_EP93XX_GPIO0MUX),
- DEFINE_RES_IRQ(IRQ_EP93XX_GPIO1MUX),
- DEFINE_RES_IRQ(IRQ_EP93XX_GPIO2MUX),
- DEFINE_RES_IRQ(IRQ_EP93XX_GPIO3MUX),
- DEFINE_RES_IRQ(IRQ_EP93XX_GPIO4MUX),
- DEFINE_RES_IRQ(IRQ_EP93XX_GPIO5MUX),
- DEFINE_RES_IRQ(IRQ_EP93XX_GPIO6MUX),
- DEFINE_RES_IRQ(IRQ_EP93XX_GPIO7MUX),
-};
-
-static struct platform_device ep93xx_gpio_device = {
- .name = "gpio-ep93xx",
- .id = -1,
- .num_resources = ARRAY_SIZE(ep93xx_gpio_resource),
- .resource = ep93xx_gpio_resource,
-};
-
-/*************************************************************************
- * EP93xx peripheral handling
- *************************************************************************/
-#define EP93XX_UART_MCR_OFFSET (0x0100)
-
-static void ep93xx_uart_set_mctrl(struct amba_device *dev,
- void __iomem *base, unsigned int mctrl)
-{
- unsigned int mcr;
-
- mcr = 0;
- if (mctrl & TIOCM_RTS)
- mcr |= 2;
- if (mctrl & TIOCM_DTR)
- mcr |= 1;
-
- __raw_writel(mcr, base + EP93XX_UART_MCR_OFFSET);
-}
-
-static struct amba_pl010_data ep93xx_uart_data = {
- .set_mctrl = ep93xx_uart_set_mctrl,
-};
-
-static AMBA_APB_DEVICE(uart1, "apb:uart1", 0x00041010, EP93XX_UART1_PHYS_BASE,
- { IRQ_EP93XX_UART1 }, &ep93xx_uart_data);
-
-static AMBA_APB_DEVICE(uart2, "apb:uart2", 0x00041010, EP93XX_UART2_PHYS_BASE,
- { IRQ_EP93XX_UART2 }, NULL);
-
-static AMBA_APB_DEVICE(uart3, "apb:uart3", 0x00041010, EP93XX_UART3_PHYS_BASE,
- { IRQ_EP93XX_UART3 }, &ep93xx_uart_data);
-
-static struct resource ep93xx_rtc_resource[] = {
- DEFINE_RES_MEM(EP93XX_RTC_PHYS_BASE, 0x10c),
-};
-
-static struct platform_device ep93xx_rtc_device = {
- .name = "ep93xx-rtc",
- .id = -1,
- .num_resources = ARRAY_SIZE(ep93xx_rtc_resource),
- .resource = ep93xx_rtc_resource,
-};
-
-/*************************************************************************
- * EP93xx OHCI USB Host
- *************************************************************************/
-
-static struct clk *ep93xx_ohci_host_clock;
-
-static int ep93xx_ohci_power_on(struct platform_device *pdev)
-{
- if (!ep93xx_ohci_host_clock) {
- ep93xx_ohci_host_clock = devm_clk_get(&pdev->dev, NULL);
- if (IS_ERR(ep93xx_ohci_host_clock))
- return PTR_ERR(ep93xx_ohci_host_clock);
- }
-
- return clk_prepare_enable(ep93xx_ohci_host_clock);
-}
-
-static void ep93xx_ohci_power_off(struct platform_device *pdev)
-{
- clk_disable(ep93xx_ohci_host_clock);
-}
-
-static struct usb_ohci_pdata ep93xx_ohci_pdata = {
- .power_on = ep93xx_ohci_power_on,
- .power_off = ep93xx_ohci_power_off,
- .power_suspend = ep93xx_ohci_power_off,
-};
-
-static struct resource ep93xx_ohci_resources[] = {
- DEFINE_RES_MEM(EP93XX_USB_PHYS_BASE, 0x1000),
- DEFINE_RES_IRQ(IRQ_EP93XX_USB),
-};
-
-static u64 ep93xx_ohci_dma_mask = DMA_BIT_MASK(32);
-
-static struct platform_device ep93xx_ohci_device = {
- .name = "ohci-platform",
- .id = -1,
- .num_resources = ARRAY_SIZE(ep93xx_ohci_resources),
- .resource = ep93xx_ohci_resources,
- .dev = {
- .dma_mask = &ep93xx_ohci_dma_mask,
- .coherent_dma_mask = DMA_BIT_MASK(32),
- .platform_data = &ep93xx_ohci_pdata,
- },
-};
-
-/*************************************************************************
- * EP93xx physmap'ed flash
- *************************************************************************/
-static struct physmap_flash_data ep93xx_flash_data;
-
-static struct resource ep93xx_flash_resource = {
- .flags = IORESOURCE_MEM,
-};
-
-static struct platform_device ep93xx_flash = {
- .name = "physmap-flash",
- .id = 0,
- .dev = {
- .platform_data = &ep93xx_flash_data,
- },
- .num_resources = 1,
- .resource = &ep93xx_flash_resource,
-};
-
-/**
- * ep93xx_register_flash() - Register the external flash device.
- * @width: bank width in octets
- * @start: resource start address
- * @size: resource size
- */
-void __init ep93xx_register_flash(unsigned int width,
- resource_size_t start, resource_size_t size)
-{
- ep93xx_flash_data.width = width;
-
- ep93xx_flash_resource.start = start;
- ep93xx_flash_resource.end = start + size - 1;
-
- platform_device_register(&ep93xx_flash);
-}
-
-
-/*************************************************************************
- * EP93xx ethernet peripheral handling
- *************************************************************************/
-static struct ep93xx_eth_data ep93xx_eth_data;
-
-static struct resource ep93xx_eth_resource[] = {
- DEFINE_RES_MEM(EP93XX_ETHERNET_PHYS_BASE, 0x10000),
- DEFINE_RES_IRQ(IRQ_EP93XX_ETHERNET),
-};
-
-static u64 ep93xx_eth_dma_mask = DMA_BIT_MASK(32);
-
-static struct platform_device ep93xx_eth_device = {
- .name = "ep93xx-eth",
- .id = -1,
- .dev = {
- .platform_data = &ep93xx_eth_data,
- .coherent_dma_mask = DMA_BIT_MASK(32),
- .dma_mask = &ep93xx_eth_dma_mask,
- },
- .num_resources = ARRAY_SIZE(ep93xx_eth_resource),
- .resource = ep93xx_eth_resource,
-};
-
-/**
- * ep93xx_register_eth - Register the built-in ethernet platform device.
- * @data: platform specific ethernet configuration (__initdata)
- * @copy_addr: flag indicating that the MAC address should be copied
- * from the IndAd registers (as programmed by the bootloader)
- */
-void __init ep93xx_register_eth(struct ep93xx_eth_data *data, int copy_addr)
-{
- if (copy_addr)
- memcpy_fromio(data->dev_addr, EP93XX_ETHERNET_BASE + 0x50, 6);
-
- ep93xx_eth_data = *data;
- platform_device_register(&ep93xx_eth_device);
-}
-
-
-/*************************************************************************
- * EP93xx i2c peripheral handling
- *************************************************************************/
-
-/* All EP93xx devices use the same two GPIO pins for I2C bit-banging */
-static struct gpiod_lookup_table ep93xx_i2c_gpiod_table = {
- .dev_id = "i2c-gpio.0",
- .table = {
- /* Use local offsets on gpiochip/port "G" */
- GPIO_LOOKUP_IDX("G", 1, NULL, 0,
- GPIO_ACTIVE_HIGH | GPIO_OPEN_DRAIN),
- GPIO_LOOKUP_IDX("G", 0, NULL, 1,
- GPIO_ACTIVE_HIGH | GPIO_OPEN_DRAIN),
- { }
- },
-};
-
-static struct platform_device ep93xx_i2c_device = {
- .name = "i2c-gpio",
- .id = 0,
- .dev = {
- .platform_data = NULL,
- },
-};
-
-/**
- * ep93xx_register_i2c - Register the i2c platform device.
- * @devices: platform specific i2c bus device information (__initdata)
- * @num: the number of devices on the i2c bus
- */
-void __init ep93xx_register_i2c(struct i2c_board_info *devices, int num)
-{
- /*
- * FIXME: this just sets the two pins as non-opendrain, as no
- * platforms tries to do that anyway. Flag the applicable lines
- * as open drain in the GPIO_LOOKUP above and the driver or
- * gpiolib will handle open drain/open drain emulation as need
- * be. Right now i2c-gpio emulates open drain which is not
- * optimal.
- */
- __raw_writel((0 << 1) | (0 << 0),
- EP93XX_GPIO_EEDRIVE);
-
- i2c_register_board_info(0, devices, num);
- gpiod_add_lookup_table(&ep93xx_i2c_gpiod_table);
- platform_device_register(&ep93xx_i2c_device);
-}
-
-/*************************************************************************
- * EP93xx SPI peripheral handling
- *************************************************************************/
-static struct ep93xx_spi_info ep93xx_spi_master_data;
-
-static struct resource ep93xx_spi_resources[] = {
- DEFINE_RES_MEM(EP93XX_SPI_PHYS_BASE, 0x18),
- DEFINE_RES_IRQ(IRQ_EP93XX_SSP),
-};
-
-static u64 ep93xx_spi_dma_mask = DMA_BIT_MASK(32);
-
-static struct platform_device ep93xx_spi_device = {
- .name = "ep93xx-spi",
- .id = 0,
- .dev = {
- .platform_data = &ep93xx_spi_master_data,
- .coherent_dma_mask = DMA_BIT_MASK(32),
- .dma_mask = &ep93xx_spi_dma_mask,
- },
- .num_resources = ARRAY_SIZE(ep93xx_spi_resources),
- .resource = ep93xx_spi_resources,
-};
-
-/**
- * ep93xx_register_spi() - registers spi platform device
- * @info: ep93xx board specific spi master info (__initdata)
- * @devices: SPI devices to register (__initdata)
- * @num: number of SPI devices to register
- *
- * This function registers platform device for the EP93xx SPI controller and
- * also makes sure that SPI pins are muxed so that I2S is not using those pins.
- */
-void __init ep93xx_register_spi(struct ep93xx_spi_info *info,
- struct spi_board_info *devices, int num)
-{
- /*
- * When SPI is used, we need to make sure that I2S is muxed off from
- * SPI pins.
- */
- ep93xx_devcfg_clear_bits(EP93XX_SYSCON_DEVCFG_I2SONSSP);
-
- ep93xx_spi_master_data = *info;
- spi_register_board_info(devices, num);
- platform_device_register(&ep93xx_spi_device);
-}
-
-/*************************************************************************
- * EP93xx LEDs
- *************************************************************************/
-static const struct gpio_led ep93xx_led_pins[] __initconst = {
- {
- .name = "platform:grled",
- }, {
- .name = "platform:rdled",
- },
-};
-
-static const struct gpio_led_platform_data ep93xx_led_data __initconst = {
- .num_leds = ARRAY_SIZE(ep93xx_led_pins),
- .leds = ep93xx_led_pins,
-};
-
-static struct gpiod_lookup_table ep93xx_leds_gpio_table = {
- .dev_id = "leds-gpio",
- .table = {
- /* Use local offsets on gpiochip/port "E" */
- GPIO_LOOKUP_IDX("E", 0, NULL, 0, GPIO_ACTIVE_HIGH),
- GPIO_LOOKUP_IDX("E", 1, NULL, 1, GPIO_ACTIVE_HIGH),
- { }
- },
-};
-
-/*************************************************************************
- * EP93xx pwm peripheral handling
- *************************************************************************/
-static struct resource ep93xx_pwm0_resource[] = {
- DEFINE_RES_MEM(EP93XX_PWM_PHYS_BASE, 0x10),
-};
-
-static struct platform_device ep93xx_pwm0_device = {
- .name = "ep93xx-pwm",
- .id = 0,
- .num_resources = ARRAY_SIZE(ep93xx_pwm0_resource),
- .resource = ep93xx_pwm0_resource,
-};
-
-static struct resource ep93xx_pwm1_resource[] = {
- DEFINE_RES_MEM(EP93XX_PWM_PHYS_BASE + 0x20, 0x10),
-};
-
-static struct platform_device ep93xx_pwm1_device = {
- .name = "ep93xx-pwm",
- .id = 1,
- .num_resources = ARRAY_SIZE(ep93xx_pwm1_resource),
- .resource = ep93xx_pwm1_resource,
-};
-
-void __init ep93xx_register_pwm(int pwm0, int pwm1)
-{
- if (pwm0)
- platform_device_register(&ep93xx_pwm0_device);
-
- /* NOTE: EP9307 does not have PWMOUT1 (pin EGPIO14) */
- if (pwm1)
- platform_device_register(&ep93xx_pwm1_device);
-}
-
-int ep93xx_pwm_acquire_gpio(struct platform_device *pdev)
-{
- int err;
-
- if (pdev->id == 0) {
- err = 0;
- } else if (pdev->id == 1) {
- err = gpio_request(EP93XX_GPIO_LINE_EGPIO14,
- dev_name(&pdev->dev));
- if (err)
- return err;
- err = gpio_direction_output(EP93XX_GPIO_LINE_EGPIO14, 0);
- if (err)
- goto fail;
-
- /* PWM 1 output on EGPIO[14] */
- ep93xx_devcfg_set_bits(EP93XX_SYSCON_DEVCFG_PONG);
- } else {
- err = -ENODEV;
- }
-
- return err;
-
-fail:
- gpio_free(EP93XX_GPIO_LINE_EGPIO14);
- return err;
-}
-EXPORT_SYMBOL(ep93xx_pwm_acquire_gpio);
-
-void ep93xx_pwm_release_gpio(struct platform_device *pdev)
-{
- if (pdev->id == 1) {
- gpio_direction_input(EP93XX_GPIO_LINE_EGPIO14);
- gpio_free(EP93XX_GPIO_LINE_EGPIO14);
-
- /* EGPIO[14] used for GPIO */
- ep93xx_devcfg_clear_bits(EP93XX_SYSCON_DEVCFG_PONG);
- }
-}
-EXPORT_SYMBOL(ep93xx_pwm_release_gpio);
-
-
-/*************************************************************************
- * EP93xx video peripheral handling
- *************************************************************************/
-static struct ep93xxfb_mach_info ep93xxfb_data;
-
-static struct resource ep93xx_fb_resource[] = {
- DEFINE_RES_MEM(EP93XX_RASTER_PHYS_BASE, 0x800),
-};
-
-static struct platform_device ep93xx_fb_device = {
- .name = "ep93xx-fb",
- .id = -1,
- .dev = {
- .platform_data = &ep93xxfb_data,
- .coherent_dma_mask = DMA_BIT_MASK(32),
- .dma_mask = &ep93xx_fb_device.dev.coherent_dma_mask,
- },
- .num_resources = ARRAY_SIZE(ep93xx_fb_resource),
- .resource = ep93xx_fb_resource,
-};
-
-/* The backlight use a single register in the framebuffer's register space */
-#define EP93XX_RASTER_REG_BRIGHTNESS 0x20
-
-static struct resource ep93xx_bl_resources[] = {
- DEFINE_RES_MEM(EP93XX_RASTER_PHYS_BASE +
- EP93XX_RASTER_REG_BRIGHTNESS, 0x04),
-};
-
-static struct platform_device ep93xx_bl_device = {
- .name = "ep93xx-bl",
- .id = -1,
- .num_resources = ARRAY_SIZE(ep93xx_bl_resources),
- .resource = ep93xx_bl_resources,
-};
-
-/**
- * ep93xx_register_fb - Register the framebuffer platform device.
- * @data: platform specific framebuffer configuration (__initdata)
- */
-void __init ep93xx_register_fb(struct ep93xxfb_mach_info *data)
-{
- ep93xxfb_data = *data;
- platform_device_register(&ep93xx_fb_device);
- platform_device_register(&ep93xx_bl_device);
-}
-
-
-/*************************************************************************
- * EP93xx matrix keypad peripheral handling
- *************************************************************************/
-static struct ep93xx_keypad_platform_data ep93xx_keypad_data;
-
-static struct resource ep93xx_keypad_resource[] = {
- DEFINE_RES_MEM(EP93XX_KEY_MATRIX_PHYS_BASE, 0x0c),
- DEFINE_RES_IRQ(IRQ_EP93XX_KEY),
-};
-
-static struct platform_device ep93xx_keypad_device = {
- .name = "ep93xx-keypad",
- .id = -1,
- .dev = {
- .platform_data = &ep93xx_keypad_data,
- },
- .num_resources = ARRAY_SIZE(ep93xx_keypad_resource),
- .resource = ep93xx_keypad_resource,
-};
-
-/**
- * ep93xx_register_keypad - Register the keypad platform device.
- * @data: platform specific keypad configuration (__initdata)
- */
-void __init ep93xx_register_keypad(struct ep93xx_keypad_platform_data *data)
-{
- ep93xx_keypad_data = *data;
- platform_device_register(&ep93xx_keypad_device);
-}
-
-int ep93xx_keypad_acquire_gpio(struct platform_device *pdev)
-{
- int err;
- int i;
-
- for (i = 0; i < 8; i++) {
- err = gpio_request(EP93XX_GPIO_LINE_C(i), dev_name(&pdev->dev));
- if (err)
- goto fail_gpio_c;
- err = gpio_request(EP93XX_GPIO_LINE_D(i), dev_name(&pdev->dev));
- if (err)
- goto fail_gpio_d;
- }
-
- /* Enable the keypad controller; GPIO ports C and D used for keypad */
- ep93xx_devcfg_clear_bits(EP93XX_SYSCON_DEVCFG_KEYS |
- EP93XX_SYSCON_DEVCFG_GONK);
-
- return 0;
-
-fail_gpio_d:
- gpio_free(EP93XX_GPIO_LINE_C(i));
-fail_gpio_c:
- for (--i; i >= 0; --i) {
- gpio_free(EP93XX_GPIO_LINE_C(i));
- gpio_free(EP93XX_GPIO_LINE_D(i));
- }
- return err;
-}
-EXPORT_SYMBOL(ep93xx_keypad_acquire_gpio);
-
-void ep93xx_keypad_release_gpio(struct platform_device *pdev)
-{
- int i;
-
- for (i = 0; i < 8; i++) {
- gpio_free(EP93XX_GPIO_LINE_C(i));
- gpio_free(EP93XX_GPIO_LINE_D(i));
- }
-
- /* Disable the keypad controller; GPIO ports C and D used for GPIO */
- ep93xx_devcfg_set_bits(EP93XX_SYSCON_DEVCFG_KEYS |
- EP93XX_SYSCON_DEVCFG_GONK);
-}
-EXPORT_SYMBOL(ep93xx_keypad_release_gpio);
-
-/*************************************************************************
- * EP93xx I2S audio peripheral handling
- *************************************************************************/
-static struct resource ep93xx_i2s_resource[] = {
- DEFINE_RES_MEM(EP93XX_I2S_PHYS_BASE, 0x100),
- DEFINE_RES_IRQ(IRQ_EP93XX_SAI),
-};
-
-static struct platform_device ep93xx_i2s_device = {
- .name = "ep93xx-i2s",
- .id = -1,
- .num_resources = ARRAY_SIZE(ep93xx_i2s_resource),
- .resource = ep93xx_i2s_resource,
-};
-
-static struct platform_device ep93xx_pcm_device = {
- .name = "ep93xx-pcm-audio",
- .id = -1,
-};
-
-void __init ep93xx_register_i2s(void)
-{
- platform_device_register(&ep93xx_i2s_device);
- platform_device_register(&ep93xx_pcm_device);
-}
-
-#define EP93XX_SYSCON_DEVCFG_I2S_MASK (EP93XX_SYSCON_DEVCFG_I2SONSSP | \
- EP93XX_SYSCON_DEVCFG_I2SONAC97)
-
-#define EP93XX_I2SCLKDIV_MASK (EP93XX_SYSCON_I2SCLKDIV_ORIDE | \
- EP93XX_SYSCON_I2SCLKDIV_SPOL)
-
-int ep93xx_i2s_acquire(void)
-{
- unsigned val;
-
- ep93xx_devcfg_set_clear(EP93XX_SYSCON_DEVCFG_I2SONAC97,
- EP93XX_SYSCON_DEVCFG_I2S_MASK);
-
- /*
- * This is potentially racy with the clock api for i2s_mclk, sclk and
- * lrclk. Since the i2s driver is the only user of those clocks we
- * rely on it to prevent parallel use of this function and the
- * clock api for the i2s clocks.
- */
- val = __raw_readl(EP93XX_SYSCON_I2SCLKDIV);
- val &= ~EP93XX_I2SCLKDIV_MASK;
- val |= EP93XX_SYSCON_I2SCLKDIV_ORIDE | EP93XX_SYSCON_I2SCLKDIV_SPOL;
- ep93xx_syscon_swlocked_write(val, EP93XX_SYSCON_I2SCLKDIV);
-
- return 0;
-}
-EXPORT_SYMBOL(ep93xx_i2s_acquire);
-
-void ep93xx_i2s_release(void)
-{
- ep93xx_devcfg_clear_bits(EP93XX_SYSCON_DEVCFG_I2S_MASK);
-}
-EXPORT_SYMBOL(ep93xx_i2s_release);
-
-/*************************************************************************
- * EP93xx AC97 audio peripheral handling
- *************************************************************************/
-static struct resource ep93xx_ac97_resources[] = {
- DEFINE_RES_MEM(EP93XX_AAC_PHYS_BASE, 0xac),
- DEFINE_RES_IRQ(IRQ_EP93XX_AACINTR),
-};
-
-static struct platform_device ep93xx_ac97_device = {
- .name = "ep93xx-ac97",
- .id = -1,
- .num_resources = ARRAY_SIZE(ep93xx_ac97_resources),
- .resource = ep93xx_ac97_resources,
-};
-
-void __init ep93xx_register_ac97(void)
-{
- /*
- * Make sure that the AC97 pins are not used by I2S.
- */
- ep93xx_devcfg_clear_bits(EP93XX_SYSCON_DEVCFG_I2SONAC97);
-
- platform_device_register(&ep93xx_ac97_device);
- platform_device_register(&ep93xx_pcm_device);
-}
-
-/*************************************************************************
- * EP93xx Watchdog
- *************************************************************************/
-static struct resource ep93xx_wdt_resources[] = {
- DEFINE_RES_MEM(EP93XX_WATCHDOG_PHYS_BASE, 0x08),
-};
-
-static struct platform_device ep93xx_wdt_device = {
- .name = "ep93xx-wdt",
- .id = -1,
- .num_resources = ARRAY_SIZE(ep93xx_wdt_resources),
- .resource = ep93xx_wdt_resources,
-};
-
-/*************************************************************************
- * EP93xx IDE
- *************************************************************************/
-static struct resource ep93xx_ide_resources[] = {
- DEFINE_RES_MEM(EP93XX_IDE_PHYS_BASE, 0x38),
- DEFINE_RES_IRQ(IRQ_EP93XX_EXT3),
-};
-
-static struct platform_device ep93xx_ide_device = {
- .name = "ep93xx-ide",
- .id = -1,
- .dev = {
- .dma_mask = &ep93xx_ide_device.dev.coherent_dma_mask,
- .coherent_dma_mask = DMA_BIT_MASK(32),
- },
- .num_resources = ARRAY_SIZE(ep93xx_ide_resources),
- .resource = ep93xx_ide_resources,
-};
-
-void __init ep93xx_register_ide(void)
-{
- platform_device_register(&ep93xx_ide_device);
-}
-
-int ep93xx_ide_acquire_gpio(struct platform_device *pdev)
-{
- int err;
- int i;
-
- err = gpio_request(EP93XX_GPIO_LINE_EGPIO2, dev_name(&pdev->dev));
- if (err)
- return err;
- err = gpio_request(EP93XX_GPIO_LINE_EGPIO15, dev_name(&pdev->dev));
- if (err)
- goto fail_egpio15;
- for (i = 2; i < 8; i++) {
- err = gpio_request(EP93XX_GPIO_LINE_E(i), dev_name(&pdev->dev));
- if (err)
- goto fail_gpio_e;
- }
- for (i = 4; i < 8; i++) {
- err = gpio_request(EP93XX_GPIO_LINE_G(i), dev_name(&pdev->dev));
- if (err)
- goto fail_gpio_g;
- }
- for (i = 0; i < 8; i++) {
- err = gpio_request(EP93XX_GPIO_LINE_H(i), dev_name(&pdev->dev));
- if (err)
- goto fail_gpio_h;
- }
-
- /* GPIO ports E[7:2], G[7:4] and H used by IDE */
- ep93xx_devcfg_clear_bits(EP93XX_SYSCON_DEVCFG_EONIDE |
- EP93XX_SYSCON_DEVCFG_GONIDE |
- EP93XX_SYSCON_DEVCFG_HONIDE);
- return 0;
-
-fail_gpio_h:
- for (--i; i >= 0; --i)
- gpio_free(EP93XX_GPIO_LINE_H(i));
- i = 8;
-fail_gpio_g:
- for (--i; i >= 4; --i)
- gpio_free(EP93XX_GPIO_LINE_G(i));
- i = 8;
-fail_gpio_e:
- for (--i; i >= 2; --i)
- gpio_free(EP93XX_GPIO_LINE_E(i));
- gpio_free(EP93XX_GPIO_LINE_EGPIO15);
-fail_egpio15:
- gpio_free(EP93XX_GPIO_LINE_EGPIO2);
- return err;
-}
-EXPORT_SYMBOL(ep93xx_ide_acquire_gpio);
-
-void ep93xx_ide_release_gpio(struct platform_device *pdev)
-{
- int i;
-
- for (i = 2; i < 8; i++)
- gpio_free(EP93XX_GPIO_LINE_E(i));
- for (i = 4; i < 8; i++)
- gpio_free(EP93XX_GPIO_LINE_G(i));
- for (i = 0; i < 8; i++)
- gpio_free(EP93XX_GPIO_LINE_H(i));
- gpio_free(EP93XX_GPIO_LINE_EGPIO15);
- gpio_free(EP93XX_GPIO_LINE_EGPIO2);
-
-
- /* GPIO ports E[7:2], G[7:4] and H used by GPIO */
- ep93xx_devcfg_set_bits(EP93XX_SYSCON_DEVCFG_EONIDE |
- EP93XX_SYSCON_DEVCFG_GONIDE |
- EP93XX_SYSCON_DEVCFG_HONIDE);
-}
-EXPORT_SYMBOL(ep93xx_ide_release_gpio);
-
-/*************************************************************************
- * EP93xx ADC
- *************************************************************************/
-static struct resource ep93xx_adc_resources[] = {
- DEFINE_RES_MEM(EP93XX_ADC_PHYS_BASE, 0x28),
- DEFINE_RES_IRQ(IRQ_EP93XX_TOUCH),
-};
-
-static struct platform_device ep93xx_adc_device = {
- .name = "ep93xx-adc",
- .id = -1,
- .num_resources = ARRAY_SIZE(ep93xx_adc_resources),
- .resource = ep93xx_adc_resources,
-};
-
-void __init ep93xx_register_adc(void)
-{
- /* Power up ADC, deactivate Touch Screen Controller */
- ep93xx_devcfg_set_clear(EP93XX_SYSCON_DEVCFG_TIN,
- EP93XX_SYSCON_DEVCFG_ADCPD);
-
- platform_device_register(&ep93xx_adc_device);
-}
-
-/*************************************************************************
- * EP93xx Security peripheral
- *************************************************************************/
-
-/*
- * The Maverick Key is 256 bits of micro fuses blown at the factory during
- * manufacturing to uniquely identify a part.
- *
- * See: http://arm.cirrus.com/forum/viewtopic.php?t=486&highlight=maverick+key
- */
-#define EP93XX_SECURITY_REG(x) (EP93XX_SECURITY_BASE + (x))
-#define EP93XX_SECURITY_SECFLG EP93XX_SECURITY_REG(0x2400)
-#define EP93XX_SECURITY_FUSEFLG EP93XX_SECURITY_REG(0x2410)
-#define EP93XX_SECURITY_UNIQID EP93XX_SECURITY_REG(0x2440)
-#define EP93XX_SECURITY_UNIQCHK EP93XX_SECURITY_REG(0x2450)
-#define EP93XX_SECURITY_UNIQVAL EP93XX_SECURITY_REG(0x2460)
-#define EP93XX_SECURITY_SECID1 EP93XX_SECURITY_REG(0x2500)
-#define EP93XX_SECURITY_SECID2 EP93XX_SECURITY_REG(0x2504)
-#define EP93XX_SECURITY_SECCHK1 EP93XX_SECURITY_REG(0x2520)
-#define EP93XX_SECURITY_SECCHK2 EP93XX_SECURITY_REG(0x2524)
-#define EP93XX_SECURITY_UNIQID2 EP93XX_SECURITY_REG(0x2700)
-#define EP93XX_SECURITY_UNIQID3 EP93XX_SECURITY_REG(0x2704)
-#define EP93XX_SECURITY_UNIQID4 EP93XX_SECURITY_REG(0x2708)
-#define EP93XX_SECURITY_UNIQID5 EP93XX_SECURITY_REG(0x270c)
-
-static char ep93xx_soc_id[33];
-
-static const char __init *ep93xx_get_soc_id(void)
-{
- unsigned int id, id2, id3, id4, id5;
-
- if (__raw_readl(EP93XX_SECURITY_UNIQVAL) != 1)
- return "bad Hamming code";
-
- id = __raw_readl(EP93XX_SECURITY_UNIQID);
- id2 = __raw_readl(EP93XX_SECURITY_UNIQID2);
- id3 = __raw_readl(EP93XX_SECURITY_UNIQID3);
- id4 = __raw_readl(EP93XX_SECURITY_UNIQID4);
- id5 = __raw_readl(EP93XX_SECURITY_UNIQID5);
-
- if (id != id2)
- return "invalid";
-
- /* Toss the unique ID into the entropy pool */
- add_device_randomness(&id2, 4);
- add_device_randomness(&id3, 4);
- add_device_randomness(&id4, 4);
- add_device_randomness(&id5, 4);
-
- snprintf(ep93xx_soc_id, sizeof(ep93xx_soc_id),
- "%08x%08x%08x%08x", id2, id3, id4, id5);
-
- return ep93xx_soc_id;
-}
-
-static const char __init *ep93xx_get_soc_rev(void)
-{
- int rev = ep93xx_chip_revision();
-
- switch (rev) {
- case EP93XX_CHIP_REV_D0:
- return "D0";
- case EP93XX_CHIP_REV_D1:
- return "D1";
- case EP93XX_CHIP_REV_E0:
- return "E0";
- case EP93XX_CHIP_REV_E1:
- return "E1";
- case EP93XX_CHIP_REV_E2:
- return "E2";
- default:
- return "unknown";
- }
-}
-
-static const char __init *ep93xx_get_machine_name(void)
-{
- return kasprintf(GFP_KERNEL,"%s", machine_desc->name);
-}
-
-static struct device __init *ep93xx_init_soc(void)
-{
- struct soc_device_attribute *soc_dev_attr;
- struct soc_device *soc_dev;
-
- soc_dev_attr = kzalloc(sizeof(*soc_dev_attr), GFP_KERNEL);
- if (!soc_dev_attr)
- return NULL;
-
- soc_dev_attr->machine = ep93xx_get_machine_name();
- soc_dev_attr->family = "Cirrus Logic EP93xx";
- soc_dev_attr->revision = ep93xx_get_soc_rev();
- soc_dev_attr->soc_id = ep93xx_get_soc_id();
-
- soc_dev = soc_device_register(soc_dev_attr);
- if (IS_ERR(soc_dev)) {
- kfree(soc_dev_attr->machine);
- kfree(soc_dev_attr);
- return NULL;
- }
-
- return soc_device_to_device(soc_dev);
-}
-
-struct device __init *ep93xx_init_devices(void)
-{
- struct device *parent;
-
- /* Disallow access to MaverickCrunch initially */
- ep93xx_devcfg_clear_bits(EP93XX_SYSCON_DEVCFG_CPENA);
-
- /* Default all ports to GPIO */
- ep93xx_devcfg_set_bits(EP93XX_SYSCON_DEVCFG_KEYS |
- EP93XX_SYSCON_DEVCFG_GONK |
- EP93XX_SYSCON_DEVCFG_EONIDE |
- EP93XX_SYSCON_DEVCFG_GONIDE |
- EP93XX_SYSCON_DEVCFG_HONIDE);
-
- parent = ep93xx_init_soc();
-
- /* Get the GPIO working early, other devices need it */
- platform_device_register(&ep93xx_gpio_device);
-
- amba_device_register(&uart1_device, &iomem_resource);
- amba_device_register(&uart2_device, &iomem_resource);
- amba_device_register(&uart3_device, &iomem_resource);
-
- platform_device_register(&ep93xx_rtc_device);
- platform_device_register(&ep93xx_ohci_device);
- platform_device_register(&ep93xx_wdt_device);
-
- gpiod_add_lookup_table(&ep93xx_leds_gpio_table);
- gpio_led_register_device(-1, &ep93xx_led_data);
-
- return parent;
-}
-
-void ep93xx_restart(enum reboot_mode mode, const char *cmd)
-{
- /*
- * Set then clear the SWRST bit to initiate a software reset
- */
- ep93xx_devcfg_set_bits(EP93XX_SYSCON_DEVCFG_SWRST);
- ep93xx_devcfg_clear_bits(EP93XX_SYSCON_DEVCFG_SWRST);
-
- while (1)
- ;
-}
diff --git a/arch/arm/mach-ep93xx/dma.c b/arch/arm/mach-ep93xx/dma.c
deleted file mode 100644
index 74515acab8ef..000000000000
--- a/arch/arm/mach-ep93xx/dma.c
+++ /dev/null
@@ -1,114 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-or-later
-/*
- * arch/arm/mach-ep93xx/dma.c
- *
- * Platform support code for the EP93xx dmaengine driver.
- *
- * Copyright (C) 2011 Mika Westerberg
- *
- * This work is based on the original dma-m2p implementation with
- * following copyrights:
- *
- * Copyright (C) 2006 Lennert Buytenhek <buytenh@wantstofly.org>
- * Copyright (C) 2006 Applied Data Systems
- * Copyright (C) 2009 Ryan Mallon <rmallon@gmail.com>
- */
-
-#include <linux/dmaengine.h>
-#include <linux/dma-mapping.h>
-#include <linux/init.h>
-#include <linux/interrupt.h>
-#include <linux/kernel.h>
-#include <linux/platform_device.h>
-
-#include <linux/platform_data/dma-ep93xx.h>
-#include "hardware.h"
-
-#include "soc.h"
-
-#define DMA_CHANNEL(_name, _base, _irq) \
- { .name = (_name), .base = (_base), .irq = (_irq) }
-
-/*
- * DMA M2P channels.
- *
- * On the EP93xx chip the following peripherals my be allocated to the 10
- * Memory to Internal Peripheral (M2P) channels (5 transmit + 5 receive).
- *
- * I2S contains 3 Tx and 3 Rx DMA Channels
- * AAC contains 3 Tx and 3 Rx DMA Channels
- * UART1 contains 1 Tx and 1 Rx DMA Channels
- * UART2 contains 1 Tx and 1 Rx DMA Channels
- * UART3 contains 1 Tx and 1 Rx DMA Channels
- * IrDA contains 1 Tx and 1 Rx DMA Channels
- *
- * Registers are mapped statically in ep93xx_map_io().
- */
-static struct ep93xx_dma_chan_data ep93xx_dma_m2p_channels[] = {
- DMA_CHANNEL("m2p0", EP93XX_DMA_BASE + 0x0000, IRQ_EP93XX_DMAM2P0),
- DMA_CHANNEL("m2p1", EP93XX_DMA_BASE + 0x0040, IRQ_EP93XX_DMAM2P1),
- DMA_CHANNEL("m2p2", EP93XX_DMA_BASE + 0x0080, IRQ_EP93XX_DMAM2P2),
- DMA_CHANNEL("m2p3", EP93XX_DMA_BASE + 0x00c0, IRQ_EP93XX_DMAM2P3),
- DMA_CHANNEL("m2p4", EP93XX_DMA_BASE + 0x0240, IRQ_EP93XX_DMAM2P4),
- DMA_CHANNEL("m2p5", EP93XX_DMA_BASE + 0x0200, IRQ_EP93XX_DMAM2P5),
- DMA_CHANNEL("m2p6", EP93XX_DMA_BASE + 0x02c0, IRQ_EP93XX_DMAM2P6),
- DMA_CHANNEL("m2p7", EP93XX_DMA_BASE + 0x0280, IRQ_EP93XX_DMAM2P7),
- DMA_CHANNEL("m2p8", EP93XX_DMA_BASE + 0x0340, IRQ_EP93XX_DMAM2P8),
- DMA_CHANNEL("m2p9", EP93XX_DMA_BASE + 0x0300, IRQ_EP93XX_DMAM2P9),
-};
-
-static struct ep93xx_dma_platform_data ep93xx_dma_m2p_data = {
- .channels = ep93xx_dma_m2p_channels,
- .num_channels = ARRAY_SIZE(ep93xx_dma_m2p_channels),
-};
-
-static u64 ep93xx_dma_m2p_mask = DMA_BIT_MASK(32);
-
-static struct platform_device ep93xx_dma_m2p_device = {
- .name = "ep93xx-dma-m2p",
- .id = -1,
- .dev = {
- .platform_data = &ep93xx_dma_m2p_data,
- .dma_mask = &ep93xx_dma_m2p_mask,
- .coherent_dma_mask = DMA_BIT_MASK(32),
- },
-};
-
-/*
- * DMA M2M channels.
- *
- * There are 2 M2M channels which support memcpy/memset and in addition simple
- * hardware requests from/to SSP and IDE. We do not implement an external
- * hardware requests.
- *
- * Registers are mapped statically in ep93xx_map_io().
- */
-static struct ep93xx_dma_chan_data ep93xx_dma_m2m_channels[] = {
- DMA_CHANNEL("m2m0", EP93XX_DMA_BASE + 0x0100, IRQ_EP93XX_DMAM2M0),
- DMA_CHANNEL("m2m1", EP93XX_DMA_BASE + 0x0140, IRQ_EP93XX_DMAM2M1),
-};
-
-static struct ep93xx_dma_platform_data ep93xx_dma_m2m_data = {
- .channels = ep93xx_dma_m2m_channels,
- .num_channels = ARRAY_SIZE(ep93xx_dma_m2m_channels),
-};
-
-static u64 ep93xx_dma_m2m_mask = DMA_BIT_MASK(32);
-
-static struct platform_device ep93xx_dma_m2m_device = {
- .name = "ep93xx-dma-m2m",
- .id = -1,
- .dev = {
- .platform_data = &ep93xx_dma_m2m_data,
- .dma_mask = &ep93xx_dma_m2m_mask,
- .coherent_dma_mask = DMA_BIT_MASK(32),
- },
-};
-
-static int __init ep93xx_dma_init(void)
-{
- platform_device_register(&ep93xx_dma_m2p_device);
- platform_device_register(&ep93xx_dma_m2m_device);
- return 0;
-}
-arch_initcall(ep93xx_dma_init);
diff --git a/arch/arm/mach-ep93xx/edb93xx.c b/arch/arm/mach-ep93xx/edb93xx.c
deleted file mode 100644
index dbdb822a0100..000000000000
--- a/arch/arm/mach-ep93xx/edb93xx.c
+++ /dev/null
@@ -1,368 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-or-later
-/*
- * arch/arm/mach-ep93xx/edb93xx.c
- * Cirrus Logic EDB93xx Development Board support.
- *
- * EDB93XX, EDB9301, EDB9307A
- * Copyright (C) 2008-2009 H Hartley Sweeten <hsweeten@visionengravers.com>
- *
- * EDB9302
- * Copyright (C) 2006 George Kashperko <george@chas.com.ua>
- *
- * EDB9302A, EDB9315, EDB9315A
- * Copyright (C) 2006 Lennert Buytenhek <buytenh@wantstofly.org>
- *
- * EDB9307
- * Copyright (C) 2007 Herbert Valerio Riedel <hvr@gnu.org>
- *
- * EDB9312
- * Copyright (C) 2006 Infosys Technologies Limited
- * Toufeeq Hussain <toufeeq_hussain@infosys.com>
- */
-
-#include <linux/kernel.h>
-#include <linux/init.h>
-#include <linux/platform_device.h>
-#include <linux/i2c.h>
-#include <linux/spi/spi.h>
-#include <linux/gpio/machine.h>
-
-#include <sound/cs4271.h>
-
-#include "hardware.h"
-#include <linux/platform_data/video-ep93xx.h>
-#include <linux/platform_data/spi-ep93xx.h>
-#include "gpio-ep93xx.h"
-
-#include <asm/mach-types.h>
-#include <asm/mach/arch.h>
-
-#include "soc.h"
-
-static void __init edb93xx_register_flash(void)
-{
- if (machine_is_edb9307() || machine_is_edb9312() ||
- machine_is_edb9315()) {
- ep93xx_register_flash(4, EP93XX_CS6_PHYS_BASE, SZ_32M);
- } else {
- ep93xx_register_flash(2, EP93XX_CS6_PHYS_BASE, SZ_16M);
- }
-}
-
-static struct ep93xx_eth_data __initdata edb93xx_eth_data = {
- .phy_id = 1,
-};
-
-
-/*************************************************************************
- * EDB93xx i2c peripheral handling
- *************************************************************************/
-
-static struct i2c_board_info __initdata edb93xxa_i2c_board_info[] = {
- {
- I2C_BOARD_INFO("isl1208", 0x6f),
- },
-};
-
-static struct i2c_board_info __initdata edb93xx_i2c_board_info[] = {
- {
- I2C_BOARD_INFO("ds1337", 0x68),
- },
-};
-
-static void __init edb93xx_register_i2c(void)
-{
- if (machine_is_edb9302a() || machine_is_edb9307a() ||
- machine_is_edb9315a()) {
- ep93xx_register_i2c(edb93xxa_i2c_board_info,
- ARRAY_SIZE(edb93xxa_i2c_board_info));
- } else if (machine_is_edb9302() || machine_is_edb9307()
- || machine_is_edb9312() || machine_is_edb9315()) {
- ep93xx_register_i2c(edb93xx_i2c_board_info,
- ARRAY_SIZE(edb93xx_i2c_board_info));
- }
-}
-
-
-/*************************************************************************
- * EDB93xx SPI peripheral handling
- *************************************************************************/
-static struct cs4271_platform_data edb93xx_cs4271_data = {
- /* Intentionally left blank */
-};
-
-static struct spi_board_info edb93xx_spi_board_info[] __initdata = {
- {
- .modalias = "cs4271",
- .platform_data = &edb93xx_cs4271_data,
- .max_speed_hz = 6000000,
- .bus_num = 0,
- .chip_select = 0,
- .mode = SPI_MODE_3,
- },
-};
-
-static struct gpiod_lookup_table edb93xx_spi_cs_gpio_table = {
- .dev_id = "spi0",
- .table = {
- GPIO_LOOKUP("A", 6, "cs", GPIO_ACTIVE_LOW),
- { },
- },
-};
-
-static struct ep93xx_spi_info edb93xx_spi_info __initdata = {
- /* Intentionally left blank */
-};
-
-static struct gpiod_lookup_table edb93xx_cs4272_edb9301_gpio_table = {
- .dev_id = "spi0.0", /* CS0 on SPI0 */
- .table = {
- GPIO_LOOKUP("A", 1, "reset", GPIO_ACTIVE_LOW),
- { },
- },
-};
-
-static struct gpiod_lookup_table edb93xx_cs4272_edb9302_gpio_table = {
- .dev_id = "spi0.0", /* CS0 on SPI0 */
- .table = {
- GPIO_LOOKUP("H", 2, "reset", GPIO_ACTIVE_LOW),
- { },
- },
-};
-
-static struct gpiod_lookup_table edb93xx_cs4272_edb9315_gpio_table = {
- .dev_id = "spi0.0", /* CS0 on SPI0 */
- .table = {
- GPIO_LOOKUP("B", 6, "reset", GPIO_ACTIVE_LOW),
- { },
- },
-};
-
-static void __init edb93xx_register_spi(void)
-{
- if (machine_is_edb9301() || machine_is_edb9302())
- gpiod_add_lookup_table(&edb93xx_cs4272_edb9301_gpio_table);
- else if (machine_is_edb9302a() || machine_is_edb9307a())
- gpiod_add_lookup_table(&edb93xx_cs4272_edb9302_gpio_table);
- else if (machine_is_edb9315a())
- gpiod_add_lookup_table(&edb93xx_cs4272_edb9315_gpio_table);
-
- gpiod_add_lookup_table(&edb93xx_spi_cs_gpio_table);
- ep93xx_register_spi(&edb93xx_spi_info, edb93xx_spi_board_info,
- ARRAY_SIZE(edb93xx_spi_board_info));
-}
-
-
-/*************************************************************************
- * EDB93xx I2S
- *************************************************************************/
-static struct platform_device edb93xx_audio_device = {
- .name = "edb93xx-audio",
- .id = -1,
-};
-
-static int __init edb93xx_has_audio(void)
-{
- return (machine_is_edb9301() || machine_is_edb9302() ||
- machine_is_edb9302a() || machine_is_edb9307a() ||
- machine_is_edb9315a());
-}
-
-static void __init edb93xx_register_i2s(void)
-{
- if (edb93xx_has_audio()) {
- ep93xx_register_i2s();
- platform_device_register(&edb93xx_audio_device);
- }
-}
-
-
-/*************************************************************************
- * EDB93xx pwm
- *************************************************************************/
-static void __init edb93xx_register_pwm(void)
-{
- if (machine_is_edb9301() ||
- machine_is_edb9302() || machine_is_edb9302a()) {
- /* EP9301 and EP9302 only have pwm.1 (EGPIO14) */
- ep93xx_register_pwm(0, 1);
- } else if (machine_is_edb9307() || machine_is_edb9307a()) {
- /* EP9307 only has pwm.0 (PWMOUT) */
- ep93xx_register_pwm(1, 0);
- } else {
- /* EP9312 and EP9315 have both */
- ep93xx_register_pwm(1, 1);
- }
-}
-
-
-/*************************************************************************
- * EDB93xx framebuffer
- *************************************************************************/
-static struct ep93xxfb_mach_info __initdata edb93xxfb_info = {
- .flags = 0,
-};
-
-static int __init edb93xx_has_fb(void)
-{
- /* These platforms have an ep93xx with video capability */
- return machine_is_edb9307() || machine_is_edb9307a() ||
- machine_is_edb9312() || machine_is_edb9315() ||
- machine_is_edb9315a();
-}
-
-static void __init edb93xx_register_fb(void)
-{
- if (!edb93xx_has_fb())
- return;
-
- if (machine_is_edb9307a() || machine_is_edb9315a())
- edb93xxfb_info.flags |= EP93XXFB_USE_SDCSN0;
- else
- edb93xxfb_info.flags |= EP93XXFB_USE_SDCSN3;
-
- ep93xx_register_fb(&edb93xxfb_info);
-}
-
-
-/*************************************************************************
- * EDB93xx IDE
- *************************************************************************/
-static int __init edb93xx_has_ide(void)
-{
- /*
- * Although EDB9312 and EDB9315 do have IDE capability, they have
- * INTRQ line wired as pull-up, which makes using IDE interface
- * problematic.
- */
- return machine_is_edb9312() || machine_is_edb9315() ||
- machine_is_edb9315a();
-}
-
-static void __init edb93xx_register_ide(void)
-{
- if (!edb93xx_has_ide())
- return;
-
- ep93xx_register_ide();
-}
-
-
-static void __init edb93xx_init_machine(void)
-{
- ep93xx_init_devices();
- edb93xx_register_flash();
- ep93xx_register_eth(&edb93xx_eth_data, 1);
- edb93xx_register_i2c();
- edb93xx_register_spi();
- edb93xx_register_i2s();
- edb93xx_register_pwm();
- edb93xx_register_fb();
- edb93xx_register_ide();
- ep93xx_register_adc();
-}
-
-
-#ifdef CONFIG_MACH_EDB9301
-MACHINE_START(EDB9301, "Cirrus Logic EDB9301 Evaluation Board")
- /* Maintainer: H Hartley Sweeten <hsweeten@visionengravers.com> */
- .atag_offset = 0x100,
- .nr_irqs = NR_EP93XX_IRQS,
- .map_io = ep93xx_map_io,
- .init_irq = ep93xx_init_irq,
- .init_time = ep93xx_timer_init,
- .init_machine = edb93xx_init_machine,
- .restart = ep93xx_restart,
-MACHINE_END
-#endif
-
-#ifdef CONFIG_MACH_EDB9302
-MACHINE_START(EDB9302, "Cirrus Logic EDB9302 Evaluation Board")
- /* Maintainer: George Kashperko <george@chas.com.ua> */
- .atag_offset = 0x100,
- .nr_irqs = NR_EP93XX_IRQS,
- .map_io = ep93xx_map_io,
- .init_irq = ep93xx_init_irq,
- .init_time = ep93xx_timer_init,
- .init_machine = edb93xx_init_machine,
- .restart = ep93xx_restart,
-MACHINE_END
-#endif
-
-#ifdef CONFIG_MACH_EDB9302A
-MACHINE_START(EDB9302A, "Cirrus Logic EDB9302A Evaluation Board")
- /* Maintainer: Lennert Buytenhek <buytenh@wantstofly.org> */
- .atag_offset = 0x100,
- .nr_irqs = NR_EP93XX_IRQS,
- .map_io = ep93xx_map_io,
- .init_irq = ep93xx_init_irq,
- .init_time = ep93xx_timer_init,
- .init_machine = edb93xx_init_machine,
- .restart = ep93xx_restart,
-MACHINE_END
-#endif
-
-#ifdef CONFIG_MACH_EDB9307
-MACHINE_START(EDB9307, "Cirrus Logic EDB9307 Evaluation Board")
- /* Maintainer: Herbert Valerio Riedel <hvr@gnu.org> */
- .atag_offset = 0x100,
- .nr_irqs = NR_EP93XX_IRQS,
- .map_io = ep93xx_map_io,
- .init_irq = ep93xx_init_irq,
- .init_time = ep93xx_timer_init,
- .init_machine = edb93xx_init_machine,
- .restart = ep93xx_restart,
-MACHINE_END
-#endif
-
-#ifdef CONFIG_MACH_EDB9307A
-MACHINE_START(EDB9307A, "Cirrus Logic EDB9307A Evaluation Board")
- /* Maintainer: H Hartley Sweeten <hsweeten@visionengravers.com> */
- .atag_offset = 0x100,
- .nr_irqs = NR_EP93XX_IRQS,
- .map_io = ep93xx_map_io,
- .init_irq = ep93xx_init_irq,
- .init_time = ep93xx_timer_init,
- .init_machine = edb93xx_init_machine,
- .restart = ep93xx_restart,
-MACHINE_END
-#endif
-
-#ifdef CONFIG_MACH_EDB9312
-MACHINE_START(EDB9312, "Cirrus Logic EDB9312 Evaluation Board")
- /* Maintainer: Toufeeq Hussain <toufeeq_hussain@infosys.com> */
- .atag_offset = 0x100,
- .nr_irqs = NR_EP93XX_IRQS,
- .map_io = ep93xx_map_io,
- .init_irq = ep93xx_init_irq,
- .init_time = ep93xx_timer_init,
- .init_machine = edb93xx_init_machine,
- .restart = ep93xx_restart,
-MACHINE_END
-#endif
-
-#ifdef CONFIG_MACH_EDB9315
-MACHINE_START(EDB9315, "Cirrus Logic EDB9315 Evaluation Board")
- /* Maintainer: Lennert Buytenhek <buytenh@wantstofly.org> */
- .atag_offset = 0x100,
- .nr_irqs = NR_EP93XX_IRQS,
- .map_io = ep93xx_map_io,
- .init_irq = ep93xx_init_irq,
- .init_time = ep93xx_timer_init,
- .init_machine = edb93xx_init_machine,
- .restart = ep93xx_restart,
-MACHINE_END
-#endif
-
-#ifdef CONFIG_MACH_EDB9315A
-MACHINE_START(EDB9315A, "Cirrus Logic EDB9315A Evaluation Board")
- /* Maintainer: Lennert Buytenhek <buytenh@wantstofly.org> */
- .atag_offset = 0x100,
- .nr_irqs = NR_EP93XX_IRQS,
- .map_io = ep93xx_map_io,
- .init_irq = ep93xx_init_irq,
- .init_time = ep93xx_timer_init,
- .init_machine = edb93xx_init_machine,
- .restart = ep93xx_restart,
-MACHINE_END
-#endif
diff --git a/arch/arm/mach-ep93xx/ep93xx-regs.h b/arch/arm/mach-ep93xx/ep93xx-regs.h
deleted file mode 100644
index 8fa3646de0a4..000000000000
--- a/arch/arm/mach-ep93xx/ep93xx-regs.h
+++ /dev/null
@@ -1,38 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-#ifndef __ASM_ARCH_EP93XX_REGS_H
-#define __ASM_ARCH_EP93XX_REGS_H
-
-/*
- * EP93xx linux memory map:
- *
- * virt phys size
- * fe800000 5M per-platform mappings
- * fed00000 80800000 2M APB
- * fef00000 80000000 1M AHB
- */
-
-#define EP93XX_AHB_PHYS_BASE 0x80000000
-#define EP93XX_AHB_VIRT_BASE 0xfef00000
-#define EP93XX_AHB_SIZE 0x00100000
-
-#define EP93XX_AHB_PHYS(x) (EP93XX_AHB_PHYS_BASE + (x))
-#define EP93XX_AHB_IOMEM(x) IOMEM(EP93XX_AHB_VIRT_BASE + (x))
-
-#define EP93XX_APB_PHYS_BASE 0x80800000
-#define EP93XX_APB_VIRT_BASE 0xfed00000
-#define EP93XX_APB_SIZE 0x00200000
-
-#define EP93XX_APB_PHYS(x) (EP93XX_APB_PHYS_BASE + (x))
-#define EP93XX_APB_IOMEM(x) IOMEM(EP93XX_APB_VIRT_BASE + (x))
-
-/* APB UARTs */
-#define EP93XX_UART1_PHYS_BASE EP93XX_APB_PHYS(0x000c0000)
-#define EP93XX_UART1_BASE EP93XX_APB_IOMEM(0x000c0000)
-
-#define EP93XX_UART2_PHYS_BASE EP93XX_APB_PHYS(0x000d0000)
-#define EP93XX_UART2_BASE EP93XX_APB_IOMEM(0x000d0000)
-
-#define EP93XX_UART3_PHYS_BASE EP93XX_APB_PHYS(0x000e0000)
-#define EP93XX_UART3_BASE EP93XX_APB_IOMEM(0x000e0000)
-
-#endif
diff --git a/arch/arm/mach-ep93xx/gpio-ep93xx.h b/arch/arm/mach-ep93xx/gpio-ep93xx.h
deleted file mode 100644
index 7b46eb7e5507..000000000000
--- a/arch/arm/mach-ep93xx/gpio-ep93xx.h
+++ /dev/null
@@ -1,111 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-/* Include file for the EP93XX GPIO controller machine specifics */
-
-#ifndef __GPIO_EP93XX_H
-#define __GPIO_EP93XX_H
-
-#include "ep93xx-regs.h"
-
-#define EP93XX_GPIO_PHYS_BASE EP93XX_APB_PHYS(0x00040000)
-#define EP93XX_GPIO_BASE EP93XX_APB_IOMEM(0x00040000)
-#define EP93XX_GPIO_REG(x) (EP93XX_GPIO_BASE + (x))
-#define EP93XX_GPIO_F_INT_STATUS EP93XX_GPIO_REG(0x5c)
-#define EP93XX_GPIO_A_INT_STATUS EP93XX_GPIO_REG(0xa0)
-#define EP93XX_GPIO_B_INT_STATUS EP93XX_GPIO_REG(0xbc)
-#define EP93XX_GPIO_EEDRIVE EP93XX_GPIO_REG(0xc8)
-
-/* GPIO port A. */
-#define EP93XX_GPIO_LINE_A(x) ((x) + 0)
-#define EP93XX_GPIO_LINE_EGPIO0 EP93XX_GPIO_LINE_A(0)
-#define EP93XX_GPIO_LINE_EGPIO1 EP93XX_GPIO_LINE_A(1)
-#define EP93XX_GPIO_LINE_EGPIO2 EP93XX_GPIO_LINE_A(2)
-#define EP93XX_GPIO_LINE_EGPIO3 EP93XX_GPIO_LINE_A(3)
-#define EP93XX_GPIO_LINE_EGPIO4 EP93XX_GPIO_LINE_A(4)
-#define EP93XX_GPIO_LINE_EGPIO5 EP93XX_GPIO_LINE_A(5)
-#define EP93XX_GPIO_LINE_EGPIO6 EP93XX_GPIO_LINE_A(6)
-#define EP93XX_GPIO_LINE_EGPIO7 EP93XX_GPIO_LINE_A(7)
-
-/* GPIO port B. */
-#define EP93XX_GPIO_LINE_B(x) ((x) + 8)
-#define EP93XX_GPIO_LINE_EGPIO8 EP93XX_GPIO_LINE_B(0)
-#define EP93XX_GPIO_LINE_EGPIO9 EP93XX_GPIO_LINE_B(1)
-#define EP93XX_GPIO_LINE_EGPIO10 EP93XX_GPIO_LINE_B(2)
-#define EP93XX_GPIO_LINE_EGPIO11 EP93XX_GPIO_LINE_B(3)
-#define EP93XX_GPIO_LINE_EGPIO12 EP93XX_GPIO_LINE_B(4)
-#define EP93XX_GPIO_LINE_EGPIO13 EP93XX_GPIO_LINE_B(5)
-#define EP93XX_GPIO_LINE_EGPIO14 EP93XX_GPIO_LINE_B(6)
-#define EP93XX_GPIO_LINE_EGPIO15 EP93XX_GPIO_LINE_B(7)
-
-/* GPIO port C. */
-#define EP93XX_GPIO_LINE_C(x) ((x) + 40)
-#define EP93XX_GPIO_LINE_ROW0 EP93XX_GPIO_LINE_C(0)
-#define EP93XX_GPIO_LINE_ROW1 EP93XX_GPIO_LINE_C(1)
-#define EP93XX_GPIO_LINE_ROW2 EP93XX_GPIO_LINE_C(2)
-#define EP93XX_GPIO_LINE_ROW3 EP93XX_GPIO_LINE_C(3)
-#define EP93XX_GPIO_LINE_ROW4 EP93XX_GPIO_LINE_C(4)
-#define EP93XX_GPIO_LINE_ROW5 EP93XX_GPIO_LINE_C(5)
-#define EP93XX_GPIO_LINE_ROW6 EP93XX_GPIO_LINE_C(6)
-#define EP93XX_GPIO_LINE_ROW7 EP93XX_GPIO_LINE_C(7)
-
-/* GPIO port D. */
-#define EP93XX_GPIO_LINE_D(x) ((x) + 24)
-#define EP93XX_GPIO_LINE_COL0 EP93XX_GPIO_LINE_D(0)
-#define EP93XX_GPIO_LINE_COL1 EP93XX_GPIO_LINE_D(1)
-#define EP93XX_GPIO_LINE_COL2 EP93XX_GPIO_LINE_D(2)
-#define EP93XX_GPIO_LINE_COL3 EP93XX_GPIO_LINE_D(3)
-#define EP93XX_GPIO_LINE_COL4 EP93XX_GPIO_LINE_D(4)
-#define EP93XX_GPIO_LINE_COL5 EP93XX_GPIO_LINE_D(5)
-#define EP93XX_GPIO_LINE_COL6 EP93XX_GPIO_LINE_D(6)
-#define EP93XX_GPIO_LINE_COL7 EP93XX_GPIO_LINE_D(7)
-
-/* GPIO port E. */
-#define EP93XX_GPIO_LINE_E(x) ((x) + 32)
-#define EP93XX_GPIO_LINE_GRLED EP93XX_GPIO_LINE_E(0)
-#define EP93XX_GPIO_LINE_RDLED EP93XX_GPIO_LINE_E(1)
-#define EP93XX_GPIO_LINE_DIORn EP93XX_GPIO_LINE_E(2)
-#define EP93XX_GPIO_LINE_IDECS1n EP93XX_GPIO_LINE_E(3)
-#define EP93XX_GPIO_LINE_IDECS2n EP93XX_GPIO_LINE_E(4)
-#define EP93XX_GPIO_LINE_IDEDA0 EP93XX_GPIO_LINE_E(5)
-#define EP93XX_GPIO_LINE_IDEDA1 EP93XX_GPIO_LINE_E(6)
-#define EP93XX_GPIO_LINE_IDEDA2 EP93XX_GPIO_LINE_E(7)
-
-/* GPIO port F. */
-#define EP93XX_GPIO_LINE_F(x) ((x) + 16)
-#define EP93XX_GPIO_LINE_WP EP93XX_GPIO_LINE_F(0)
-#define EP93XX_GPIO_LINE_MCCD1 EP93XX_GPIO_LINE_F(1)
-#define EP93XX_GPIO_LINE_MCCD2 EP93XX_GPIO_LINE_F(2)
-#define EP93XX_GPIO_LINE_MCBVD1 EP93XX_GPIO_LINE_F(3)
-#define EP93XX_GPIO_LINE_MCBVD2 EP93XX_GPIO_LINE_F(4)
-#define EP93XX_GPIO_LINE_VS1 EP93XX_GPIO_LINE_F(5)
-#define EP93XX_GPIO_LINE_READY EP93XX_GPIO_LINE_F(6)
-#define EP93XX_GPIO_LINE_VS2 EP93XX_GPIO_LINE_F(7)
-
-/* GPIO port G. */
-#define EP93XX_GPIO_LINE_G(x) ((x) + 48)
-#define EP93XX_GPIO_LINE_EECLK EP93XX_GPIO_LINE_G(0)
-#define EP93XX_GPIO_LINE_EEDAT EP93XX_GPIO_LINE_G(1)
-#define EP93XX_GPIO_LINE_SLA0 EP93XX_GPIO_LINE_G(2)
-#define EP93XX_GPIO_LINE_SLA1 EP93XX_GPIO_LINE_G(3)
-#define EP93XX_GPIO_LINE_DD12 EP93XX_GPIO_LINE_G(4)
-#define EP93XX_GPIO_LINE_DD13 EP93XX_GPIO_LINE_G(5)
-#define EP93XX_GPIO_LINE_DD14 EP93XX_GPIO_LINE_G(6)
-#define EP93XX_GPIO_LINE_DD15 EP93XX_GPIO_LINE_G(7)
-
-/* GPIO port H. */
-#define EP93XX_GPIO_LINE_H(x) ((x) + 56)
-#define EP93XX_GPIO_LINE_DD0 EP93XX_GPIO_LINE_H(0)
-#define EP93XX_GPIO_LINE_DD1 EP93XX_GPIO_LINE_H(1)
-#define EP93XX_GPIO_LINE_DD2 EP93XX_GPIO_LINE_H(2)
-#define EP93XX_GPIO_LINE_DD3 EP93XX_GPIO_LINE_H(3)
-#define EP93XX_GPIO_LINE_DD4 EP93XX_GPIO_LINE_H(4)
-#define EP93XX_GPIO_LINE_DD5 EP93XX_GPIO_LINE_H(5)
-#define EP93XX_GPIO_LINE_DD6 EP93XX_GPIO_LINE_H(6)
-#define EP93XX_GPIO_LINE_DD7 EP93XX_GPIO_LINE_H(7)
-
-/* maximum value for gpio line identifiers */
-#define EP93XX_GPIO_LINE_MAX EP93XX_GPIO_LINE_H(7)
-
-/* maximum value for irq capable line identifiers */
-#define EP93XX_GPIO_LINE_MAX_IRQ EP93XX_GPIO_LINE_F(7)
-
-#endif /* __GPIO_EP93XX_H */
diff --git a/arch/arm/mach-ep93xx/hardware.h b/arch/arm/mach-ep93xx/hardware.h
deleted file mode 100644
index e7d850e04782..000000000000
--- a/arch/arm/mach-ep93xx/hardware.h
+++ /dev/null
@@ -1,25 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-/*
- * arch/arm/mach-ep93xx/include/mach/hardware.h
- */
-
-#ifndef __ASM_ARCH_HARDWARE_H
-#define __ASM_ARCH_HARDWARE_H
-
-#include "platform.h"
-
-/*
- * The EP93xx has two external crystal oscillators. To generate the
- * required high-frequency clocks, the processor uses two phase-locked-
- * loops (PLLs) to multiply the incoming external clock signal to much
- * higher frequencies that are then divided down by programmable dividers
- * to produce the needed clocks. The PLLs operate independently of one
- * another.
- */
-#define EP93XX_EXT_CLK_RATE 14745600
-#define EP93XX_EXT_RTC_RATE 32768
-
-#define EP93XX_KEYTCHCLK_DIV4 (EP93XX_EXT_CLK_RATE / 4)
-#define EP93XX_KEYTCHCLK_DIV16 (EP93XX_EXT_CLK_RATE / 16)
-
-#endif
diff --git a/arch/arm/mach-ep93xx/irqs.h b/arch/arm/mach-ep93xx/irqs.h
deleted file mode 100644
index 353201b90c66..000000000000
--- a/arch/arm/mach-ep93xx/irqs.h
+++ /dev/null
@@ -1,76 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-#ifndef __ASM_ARCH_IRQS_H
-#define __ASM_ARCH_IRQS_H
-
-#define IRQ_EP93XX_VIC0 1
-
-#define IRQ_EP93XX_COMMRX (IRQ_EP93XX_VIC0 + 2)
-#define IRQ_EP93XX_COMMTX (IRQ_EP93XX_VIC0 + 3)
-#define IRQ_EP93XX_TIMER1 (IRQ_EP93XX_VIC0 + 4)
-#define IRQ_EP93XX_TIMER2 (IRQ_EP93XX_VIC0 + 5)
-#define IRQ_EP93XX_AACINTR (IRQ_EP93XX_VIC0 + 6)
-#define IRQ_EP93XX_DMAM2P0 (IRQ_EP93XX_VIC0 + 7)
-#define IRQ_EP93XX_DMAM2P1 (IRQ_EP93XX_VIC0 + 8)
-#define IRQ_EP93XX_DMAM2P2 (IRQ_EP93XX_VIC0 + 9)
-#define IRQ_EP93XX_DMAM2P3 (IRQ_EP93XX_VIC0 + 10)
-#define IRQ_EP93XX_DMAM2P4 (IRQ_EP93XX_VIC0 + 11)
-#define IRQ_EP93XX_DMAM2P5 (IRQ_EP93XX_VIC0 + 12)
-#define IRQ_EP93XX_DMAM2P6 (IRQ_EP93XX_VIC0 + 13)
-#define IRQ_EP93XX_DMAM2P7 (IRQ_EP93XX_VIC0 + 14)
-#define IRQ_EP93XX_DMAM2P8 (IRQ_EP93XX_VIC0 + 15)
-#define IRQ_EP93XX_DMAM2P9 (IRQ_EP93XX_VIC0 + 16)
-#define IRQ_EP93XX_DMAM2M0 (IRQ_EP93XX_VIC0 + 17)
-#define IRQ_EP93XX_DMAM2M1 (IRQ_EP93XX_VIC0 + 18)
-#define IRQ_EP93XX_GPIO0MUX (IRQ_EP93XX_VIC0 + 19)
-#define IRQ_EP93XX_GPIO1MUX (IRQ_EP93XX_VIC0 + 20)
-#define IRQ_EP93XX_GPIO2MUX (IRQ_EP93XX_VIC0 + 21)
-#define IRQ_EP93XX_GPIO3MUX (IRQ_EP93XX_VIC0 + 22)
-#define IRQ_EP93XX_UART1RX (IRQ_EP93XX_VIC0 + 23)
-#define IRQ_EP93XX_UART1TX (IRQ_EP93XX_VIC0 + 24)
-#define IRQ_EP93XX_UART2RX (IRQ_EP93XX_VIC0 + 25)
-#define IRQ_EP93XX_UART2TX (IRQ_EP93XX_VIC0 + 26)
-#define IRQ_EP93XX_UART3RX (IRQ_EP93XX_VIC0 + 27)
-#define IRQ_EP93XX_UART3TX (IRQ_EP93XX_VIC0 + 28)
-#define IRQ_EP93XX_KEY (IRQ_EP93XX_VIC0 + 29)
-#define IRQ_EP93XX_TOUCH (IRQ_EP93XX_VIC0 + 30)
-#define EP93XX_VIC1_VALID_IRQ_MASK 0x7ffffffc
-
-#define IRQ_EP93XX_VIC1 (IRQ_EP93XX_VIC0 + 32)
-
-#define IRQ_EP93XX_EXT0 (IRQ_EP93XX_VIC1 + 0)
-#define IRQ_EP93XX_EXT1 (IRQ_EP93XX_VIC1 + 1)
-#define IRQ_EP93XX_EXT2 (IRQ_EP93XX_VIC1 + 2)
-#define IRQ_EP93XX_64HZ (IRQ_EP93XX_VIC1 + 3)
-#define IRQ_EP93XX_WATCHDOG (IRQ_EP93XX_VIC1 + 4)
-#define IRQ_EP93XX_RTC (IRQ_EP93XX_VIC1 + 5)
-#define IRQ_EP93XX_IRDA (IRQ_EP93XX_VIC1 + 6)
-#define IRQ_EP93XX_ETHERNET (IRQ_EP93XX_VIC1 + 7)
-#define IRQ_EP93XX_EXT3 (IRQ_EP93XX_VIC1 + 8)
-#define IRQ_EP93XX_PROG (IRQ_EP93XX_VIC1 + 9)
-#define IRQ_EP93XX_1HZ (IRQ_EP93XX_VIC1 + 10)
-#define IRQ_EP93XX_VSYNC (IRQ_EP93XX_VIC1 + 11)
-#define IRQ_EP93XX_VIDEO_FIFO (IRQ_EP93XX_VIC1 + 12)
-#define IRQ_EP93XX_SSP1RX (IRQ_EP93XX_VIC1 + 13)
-#define IRQ_EP93XX_SSP1TX (IRQ_EP93XX_VIC1 + 14)
-#define IRQ_EP93XX_GPIO4MUX (IRQ_EP93XX_VIC1 + 15)
-#define IRQ_EP93XX_GPIO5MUX (IRQ_EP93XX_VIC1 + 16)
-#define IRQ_EP93XX_GPIO6MUX (IRQ_EP93XX_VIC1 + 17)
-#define IRQ_EP93XX_GPIO7MUX (IRQ_EP93XX_VIC1 + 18)
-#define IRQ_EP93XX_TIMER3 (IRQ_EP93XX_VIC1 + 19)
-#define IRQ_EP93XX_UART1 (IRQ_EP93XX_VIC1 + 20)
-#define IRQ_EP93XX_SSP (IRQ_EP93XX_VIC1 + 21)
-#define IRQ_EP93XX_UART2 (IRQ_EP93XX_VIC1 + 22)
-#define IRQ_EP93XX_UART3 (IRQ_EP93XX_VIC1 + 23)
-#define IRQ_EP93XX_USB (IRQ_EP93XX_VIC1 + 24)
-#define IRQ_EP93XX_ETHERNET_PME (IRQ_EP93XX_VIC1 + 25)
-#define IRQ_EP93XX_DSP (IRQ_EP93XX_VIC1 + 26)
-#define IRQ_EP93XX_GPIO_AB (IRQ_EP93XX_VIC1 + 27)
-#define IRQ_EP93XX_SAI (IRQ_EP93XX_VIC1 + 28)
-#define EP93XX_VIC2_VALID_IRQ_MASK 0x1fffffff
-
-#define NR_EP93XX_IRQS (IRQ_EP93XX_VIC1 + 32 + 24)
-
-#define EP93XX_BOARD_IRQ(x) (NR_EP93XX_IRQS + (x))
-#define EP93XX_BOARD_IRQS 32
-
-#endif
diff --git a/arch/arm/mach-ep93xx/platform.h b/arch/arm/mach-ep93xx/platform.h
deleted file mode 100644
index 5fb1b919133f..000000000000
--- a/arch/arm/mach-ep93xx/platform.h
+++ /dev/null
@@ -1,42 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-/*
- * arch/arm/mach-ep93xx/include/mach/platform.h
- */
-
-#ifndef __ASSEMBLY__
-
-#include <linux/platform_data/eth-ep93xx.h>
-#include <linux/reboot.h>
-
-struct device;
-struct i2c_board_info;
-struct spi_board_info;
-struct platform_device;
-struct ep93xxfb_mach_info;
-struct ep93xx_keypad_platform_data;
-struct ep93xx_spi_info;
-
-void ep93xx_map_io(void);
-void ep93xx_init_irq(void);
-
-void ep93xx_register_flash(unsigned int width,
- resource_size_t start, resource_size_t size);
-
-void ep93xx_register_eth(struct ep93xx_eth_data *data, int copy_addr);
-void ep93xx_register_i2c(struct i2c_board_info *devices, int num);
-void ep93xx_register_spi(struct ep93xx_spi_info *info,
- struct spi_board_info *devices, int num);
-void ep93xx_register_fb(struct ep93xxfb_mach_info *data);
-void ep93xx_register_pwm(int pwm0, int pwm1);
-void ep93xx_register_keypad(struct ep93xx_keypad_platform_data *data);
-void ep93xx_register_i2s(void);
-void ep93xx_register_ac97(void);
-void ep93xx_register_ide(void);
-void ep93xx_register_adc(void);
-
-struct device *ep93xx_init_devices(void);
-extern void ep93xx_timer_init(void);
-
-void ep93xx_restart(enum reboot_mode, const char *);
-
-#endif
diff --git a/arch/arm/mach-ep93xx/soc.h b/arch/arm/mach-ep93xx/soc.h
deleted file mode 100644
index 3245ebbd5069..000000000000
--- a/arch/arm/mach-ep93xx/soc.h
+++ /dev/null
@@ -1,212 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-or-later */
-/*
- * arch/arm/mach-ep93xx/soc.h
- *
- * Copyright (C) 2012 Open Kernel Labs <www.ok-labs.com>
- * Copyright (C) 2012 Ryan Mallon <rmallon@gmail.com>
- */
-
-#ifndef _EP93XX_SOC_H
-#define _EP93XX_SOC_H
-
-#include "ep93xx-regs.h"
-#include "irqs.h"
-
-/*
- * EP93xx Physical Memory Map:
- *
- * The ASDO pin is sampled at system reset to select a synchronous or
- * asynchronous boot configuration. When ASDO is "1" (i.e. pulled-up)
- * the synchronous boot mode is selected. When ASDO is "0" (i.e
- * pulled-down) the asynchronous boot mode is selected.
- *
- * In synchronous boot mode nSDCE3 is decoded starting at physical address
- * 0x00000000 and nCS0 is decoded starting at 0xf0000000. For asynchronous
- * boot mode they are swapped with nCS0 decoded at 0x00000000 ann nSDCE3
- * decoded at 0xf0000000.
- *
- * There is known errata for the EP93xx dealing with External Memory
- * Configurations. Please refer to "AN273: EP93xx Silicon Rev E Design
- * Guidelines" for more information. This document can be found at:
- *
- * http://www.cirrus.com/en/pubs/appNote/AN273REV4.pdf
- */
-
-#define EP93XX_CS0_PHYS_BASE_ASYNC 0x00000000 /* ASDO Pin = 0 */
-#define EP93XX_SDCE3_PHYS_BASE_SYNC 0x00000000 /* ASDO Pin = 1 */
-#define EP93XX_CS1_PHYS_BASE 0x10000000
-#define EP93XX_CS2_PHYS_BASE 0x20000000
-#define EP93XX_CS3_PHYS_BASE 0x30000000
-#define EP93XX_PCMCIA_PHYS_BASE 0x40000000
-#define EP93XX_CS6_PHYS_BASE 0x60000000
-#define EP93XX_CS7_PHYS_BASE 0x70000000
-#define EP93XX_SDCE0_PHYS_BASE 0xc0000000
-#define EP93XX_SDCE1_PHYS_BASE 0xd0000000
-#define EP93XX_SDCE2_PHYS_BASE 0xe0000000
-#define EP93XX_SDCE3_PHYS_BASE_ASYNC 0xf0000000 /* ASDO Pin = 0 */
-#define EP93XX_CS0_PHYS_BASE_SYNC 0xf0000000 /* ASDO Pin = 1 */
-
-/* AHB peripherals */
-#define EP93XX_DMA_BASE EP93XX_AHB_IOMEM(0x00000000)
-
-#define EP93XX_ETHERNET_PHYS_BASE EP93XX_AHB_PHYS(0x00010000)
-#define EP93XX_ETHERNET_BASE EP93XX_AHB_IOMEM(0x00010000)
-
-#define EP93XX_USB_PHYS_BASE EP93XX_AHB_PHYS(0x00020000)
-#define EP93XX_USB_BASE EP93XX_AHB_IOMEM(0x00020000)
-
-#define EP93XX_RASTER_PHYS_BASE EP93XX_AHB_PHYS(0x00030000)
-#define EP93XX_RASTER_BASE EP93XX_AHB_IOMEM(0x00030000)
-
-#define EP93XX_GRAPHICS_ACCEL_BASE EP93XX_AHB_IOMEM(0x00040000)
-
-#define EP93XX_SDRAM_CONTROLLER_BASE EP93XX_AHB_IOMEM(0x00060000)
-
-#define EP93XX_PCMCIA_CONTROLLER_BASE EP93XX_AHB_IOMEM(0x00080000)
-
-#define EP93XX_BOOT_ROM_BASE EP93XX_AHB_IOMEM(0x00090000)
-
-#define EP93XX_IDE_PHYS_BASE EP93XX_AHB_PHYS(0x000a0000)
-#define EP93XX_IDE_BASE EP93XX_AHB_IOMEM(0x000a0000)
-
-#define EP93XX_VIC1_BASE EP93XX_AHB_IOMEM(0x000b0000)
-
-#define EP93XX_VIC2_BASE EP93XX_AHB_IOMEM(0x000c0000)
-
-/* APB peripherals */
-#define EP93XX_TIMER_BASE EP93XX_APB_IOMEM(0x00010000)
-
-#define EP93XX_I2S_PHYS_BASE EP93XX_APB_PHYS(0x00020000)
-#define EP93XX_I2S_BASE EP93XX_APB_IOMEM(0x00020000)
-
-#define EP93XX_SECURITY_BASE EP93XX_APB_IOMEM(0x00030000)
-
-#define EP93XX_AAC_PHYS_BASE EP93XX_APB_PHYS(0x00080000)
-#define EP93XX_AAC_BASE EP93XX_APB_IOMEM(0x00080000)
-
-#define EP93XX_SPI_PHYS_BASE EP93XX_APB_PHYS(0x000a0000)
-#define EP93XX_SPI_BASE EP93XX_APB_IOMEM(0x000a0000)
-
-#define EP93XX_IRDA_BASE EP93XX_APB_IOMEM(0x000b0000)
-
-#define EP93XX_KEY_MATRIX_PHYS_BASE EP93XX_APB_PHYS(0x000f0000)
-#define EP93XX_KEY_MATRIX_BASE EP93XX_APB_IOMEM(0x000f0000)
-
-#define EP93XX_ADC_PHYS_BASE EP93XX_APB_PHYS(0x00100000)
-#define EP93XX_ADC_BASE EP93XX_APB_IOMEM(0x00100000)
-#define EP93XX_TOUCHSCREEN_BASE EP93XX_APB_IOMEM(0x00100000)
-
-#define EP93XX_PWM_PHYS_BASE EP93XX_APB_PHYS(0x00110000)
-#define EP93XX_PWM_BASE EP93XX_APB_IOMEM(0x00110000)
-
-#define EP93XX_RTC_PHYS_BASE EP93XX_APB_PHYS(0x00120000)
-#define EP93XX_RTC_BASE EP93XX_APB_IOMEM(0x00120000)
-
-#define EP93XX_WATCHDOG_PHYS_BASE EP93XX_APB_PHYS(0x00140000)
-#define EP93XX_WATCHDOG_BASE EP93XX_APB_IOMEM(0x00140000)
-
-/* System controller */
-#define EP93XX_SYSCON_BASE EP93XX_APB_IOMEM(0x00130000)
-#define EP93XX_SYSCON_REG(x) (EP93XX_SYSCON_BASE + (x))
-#define EP93XX_SYSCON_POWER_STATE EP93XX_SYSCON_REG(0x00)
-#define EP93XX_SYSCON_PWRCNT EP93XX_SYSCON_REG(0x04)
-#define EP93XX_SYSCON_PWRCNT_FIR_EN (1<<31)
-#define EP93XX_SYSCON_PWRCNT_UARTBAUD (1<<29)
-#define EP93XX_SYSCON_PWRCNT_USH_EN 28
-#define EP93XX_SYSCON_PWRCNT_DMA_M2M1 27
-#define EP93XX_SYSCON_PWRCNT_DMA_M2M0 26
-#define EP93XX_SYSCON_PWRCNT_DMA_M2P8 25
-#define EP93XX_SYSCON_PWRCNT_DMA_M2P9 24
-#define EP93XX_SYSCON_PWRCNT_DMA_M2P6 23
-#define EP93XX_SYSCON_PWRCNT_DMA_M2P7 22
-#define EP93XX_SYSCON_PWRCNT_DMA_M2P4 21
-#define EP93XX_SYSCON_PWRCNT_DMA_M2P5 20
-#define EP93XX_SYSCON_PWRCNT_DMA_M2P2 19
-#define EP93XX_SYSCON_PWRCNT_DMA_M2P3 18
-#define EP93XX_SYSCON_PWRCNT_DMA_M2P0 17
-#define EP93XX_SYSCON_PWRCNT_DMA_M2P1 16
-#define EP93XX_SYSCON_HALT EP93XX_SYSCON_REG(0x08)
-#define EP93XX_SYSCON_STANDBY EP93XX_SYSCON_REG(0x0c)
-#define EP93XX_SYSCON_CLKSET1 EP93XX_SYSCON_REG(0x20)
-#define EP93XX_SYSCON_CLKSET1_NBYP1 (1<<23)
-#define EP93XX_SYSCON_CLKSET2 EP93XX_SYSCON_REG(0x24)
-#define EP93XX_SYSCON_CLKSET2_NBYP2 (1<<19)
-#define EP93XX_SYSCON_CLKSET2_PLL2_EN (1<<18)
-#define EP93XX_SYSCON_DEVCFG EP93XX_SYSCON_REG(0x80)
-#define EP93XX_SYSCON_DEVCFG_SWRST (1<<31)
-#define EP93XX_SYSCON_DEVCFG_D1ONG (1<<30)
-#define EP93XX_SYSCON_DEVCFG_D0ONG (1<<29)
-#define EP93XX_SYSCON_DEVCFG_IONU2 (1<<28)
-#define EP93XX_SYSCON_DEVCFG_GONK (1<<27)
-#define EP93XX_SYSCON_DEVCFG_TONG (1<<26)
-#define EP93XX_SYSCON_DEVCFG_MONG (1<<25)
-#define EP93XX_SYSCON_DEVCFG_U3EN 24
-#define EP93XX_SYSCON_DEVCFG_CPENA (1<<23)
-#define EP93XX_SYSCON_DEVCFG_A2ONG (1<<22)
-#define EP93XX_SYSCON_DEVCFG_A1ONG (1<<21)
-#define EP93XX_SYSCON_DEVCFG_U2EN 20
-#define EP93XX_SYSCON_DEVCFG_EXVC (1<<19)
-#define EP93XX_SYSCON_DEVCFG_U1EN 18
-#define EP93XX_SYSCON_DEVCFG_TIN (1<<17)
-#define EP93XX_SYSCON_DEVCFG_HC3IN (1<<15)
-#define EP93XX_SYSCON_DEVCFG_HC3EN (1<<14)
-#define EP93XX_SYSCON_DEVCFG_HC1IN (1<<13)
-#define EP93XX_SYSCON_DEVCFG_HC1EN (1<<12)
-#define EP93XX_SYSCON_DEVCFG_HONIDE (1<<11)
-#define EP93XX_SYSCON_DEVCFG_GONIDE (1<<10)
-#define EP93XX_SYSCON_DEVCFG_PONG (1<<9)
-#define EP93XX_SYSCON_DEVCFG_EONIDE (1<<8)
-#define EP93XX_SYSCON_DEVCFG_I2SONSSP (1<<7)
-#define EP93XX_SYSCON_DEVCFG_I2SONAC97 (1<<6)
-#define EP93XX_SYSCON_DEVCFG_RASONP3 (1<<4)
-#define EP93XX_SYSCON_DEVCFG_RAS (1<<3)
-#define EP93XX_SYSCON_DEVCFG_ADCPD (1<<2)
-#define EP93XX_SYSCON_DEVCFG_KEYS (1<<1)
-#define EP93XX_SYSCON_DEVCFG_SHENA (1<<0)
-#define EP93XX_SYSCON_VIDCLKDIV EP93XX_SYSCON_REG(0x84)
-#define EP93XX_SYSCON_CLKDIV_ENABLE 15
-#define EP93XX_SYSCON_CLKDIV_ESEL (1<<14)
-#define EP93XX_SYSCON_CLKDIV_PSEL (1<<13)
-#define EP93XX_SYSCON_CLKDIV_PDIV_SHIFT 8
-#define EP93XX_SYSCON_I2SCLKDIV EP93XX_SYSCON_REG(0x8c)
-#define EP93XX_SYSCON_I2SCLKDIV_SENA 31
-#define EP93XX_SYSCON_I2SCLKDIV_ORIDE (1<<29)
-#define EP93XX_SYSCON_I2SCLKDIV_SPOL (1<<19)
-#define EP93XX_I2SCLKDIV_SDIV (1 << 16)
-#define EP93XX_I2SCLKDIV_LRDIV32 (0 << 17)
-#define EP93XX_I2SCLKDIV_LRDIV64 (1 << 17)
-#define EP93XX_I2SCLKDIV_LRDIV128 (2 << 17)
-#define EP93XX_I2SCLKDIV_LRDIV_MASK (3 << 17)
-#define EP93XX_SYSCON_KEYTCHCLKDIV EP93XX_SYSCON_REG(0x90)
-#define EP93XX_SYSCON_KEYTCHCLKDIV_TSEN 31
-#define EP93XX_SYSCON_KEYTCHCLKDIV_ADIV 16
-#define EP93XX_SYSCON_KEYTCHCLKDIV_KEN 15
-#define EP93XX_SYSCON_KEYTCHCLKDIV_KDIV (1<<0)
-#define EP93XX_SYSCON_SYSCFG EP93XX_SYSCON_REG(0x9c)
-#define EP93XX_SYSCON_SYSCFG_REV_MASK (0xf0000000)
-#define EP93XX_SYSCON_SYSCFG_REV_SHIFT (28)
-#define EP93XX_SYSCON_SYSCFG_SBOOT (1<<8)
-#define EP93XX_SYSCON_SYSCFG_LCSN7 (1<<7)
-#define EP93XX_SYSCON_SYSCFG_LCSN6 (1<<6)
-#define EP93XX_SYSCON_SYSCFG_LASDO (1<<5)
-#define EP93XX_SYSCON_SYSCFG_LEEDA (1<<4)
-#define EP93XX_SYSCON_SYSCFG_LEECLK (1<<3)
-#define EP93XX_SYSCON_SYSCFG_LCSN2 (1<<1)
-#define EP93XX_SYSCON_SYSCFG_LCSN1 (1<<0)
-#define EP93XX_SYSCON_SWLOCK EP93XX_SYSCON_REG(0xc0)
-
-/* EP93xx System Controller software locked register write */
-void ep93xx_syscon_swlocked_write(unsigned int val, void __iomem *reg);
-void ep93xx_devcfg_set_clear(unsigned int set_bits, unsigned int clear_bits);
-
-static inline void ep93xx_devcfg_set_bits(unsigned int bits)
-{
- ep93xx_devcfg_set_clear(bits, 0x00);
-}
-
-static inline void ep93xx_devcfg_clear_bits(unsigned int bits)
-{
- ep93xx_devcfg_set_clear(0x00, bits);
-}
-
-#endif /* _EP93XX_SOC_H */
diff --git a/arch/arm/mach-ep93xx/timer-ep93xx.c b/arch/arm/mach-ep93xx/timer-ep93xx.c
deleted file mode 100644
index a9efa7bc2fa1..000000000000
--- a/arch/arm/mach-ep93xx/timer-ep93xx.c
+++ /dev/null
@@ -1,143 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-#include <linux/kernel.h>
-#include <linux/init.h>
-#include <linux/clocksource.h>
-#include <linux/clockchips.h>
-#include <linux/sched_clock.h>
-#include <linux/interrupt.h>
-#include <linux/irq.h>
-#include <linux/io.h>
-#include <asm/mach/time.h>
-#include "soc.h"
-#include "platform.h"
-
-/*************************************************************************
- * Timer handling for EP93xx
- *************************************************************************
- * The ep93xx has four internal timers. Timers 1, 2 (both 16 bit) and
- * 3 (32 bit) count down at 508 kHz, are self-reloading, and can generate
- * an interrupt on underflow. Timer 4 (40 bit) counts down at 983.04 kHz,
- * is free-running, and can't generate interrupts.
- *
- * The 508 kHz timers are ideal for use for the timer interrupt, as the
- * most common values of HZ divide 508 kHz nicely. We pick the 32 bit
- * timer (timer 3) to get as long sleep intervals as possible when using
- * CONFIG_NO_HZ.
- *
- * The higher clock rate of timer 4 makes it a better choice than the
- * other timers for use as clock source and for sched_clock(), providing
- * a stable 40 bit time base.
- *************************************************************************
- */
-#define EP93XX_TIMER_REG(x) (EP93XX_TIMER_BASE + (x))
-#define EP93XX_TIMER1_LOAD EP93XX_TIMER_REG(0x00)
-#define EP93XX_TIMER1_VALUE EP93XX_TIMER_REG(0x04)
-#define EP93XX_TIMER1_CONTROL EP93XX_TIMER_REG(0x08)
-#define EP93XX_TIMER123_CONTROL_ENABLE (1 << 7)
-#define EP93XX_TIMER123_CONTROL_MODE (1 << 6)
-#define EP93XX_TIMER123_CONTROL_CLKSEL (1 << 3)
-#define EP93XX_TIMER1_CLEAR EP93XX_TIMER_REG(0x0c)
-#define EP93XX_TIMER2_LOAD EP93XX_TIMER_REG(0x20)
-#define EP93XX_TIMER2_VALUE EP93XX_TIMER_REG(0x24)
-#define EP93XX_TIMER2_CONTROL EP93XX_TIMER_REG(0x28)
-#define EP93XX_TIMER2_CLEAR EP93XX_TIMER_REG(0x2c)
-#define EP93XX_TIMER4_VALUE_LOW EP93XX_TIMER_REG(0x60)
-#define EP93XX_TIMER4_VALUE_HIGH EP93XX_TIMER_REG(0x64)
-#define EP93XX_TIMER4_VALUE_HIGH_ENABLE (1 << 8)
-#define EP93XX_TIMER3_LOAD EP93XX_TIMER_REG(0x80)
-#define EP93XX_TIMER3_VALUE EP93XX_TIMER_REG(0x84)
-#define EP93XX_TIMER3_CONTROL EP93XX_TIMER_REG(0x88)
-#define EP93XX_TIMER3_CLEAR EP93XX_TIMER_REG(0x8c)
-
-#define EP93XX_TIMER123_RATE 508469
-#define EP93XX_TIMER4_RATE 983040
-
-static u64 notrace ep93xx_read_sched_clock(void)
-{
- u64 ret;
-
- ret = readl(EP93XX_TIMER4_VALUE_LOW);
- ret |= ((u64) (readl(EP93XX_TIMER4_VALUE_HIGH) & 0xff) << 32);
- return ret;
-}
-
-static u64 ep93xx_clocksource_read(struct clocksource *c)
-{
- u64 ret;
-
- ret = readl(EP93XX_TIMER4_VALUE_LOW);
- ret |= ((u64) (readl(EP93XX_TIMER4_VALUE_HIGH) & 0xff) << 32);
- return (u64) ret;
-}
-
-static int ep93xx_clkevt_set_next_event(unsigned long next,
- struct clock_event_device *evt)
-{
- /* Default mode: periodic, off, 508 kHz */
- u32 tmode = EP93XX_TIMER123_CONTROL_MODE |
- EP93XX_TIMER123_CONTROL_CLKSEL;
-
- /* Clear timer */
- writel(tmode, EP93XX_TIMER3_CONTROL);
-
- /* Set next event */
- writel(next, EP93XX_TIMER3_LOAD);
- writel(tmode | EP93XX_TIMER123_CONTROL_ENABLE,
- EP93XX_TIMER3_CONTROL);
- return 0;
-}
-
-
-static int ep93xx_clkevt_shutdown(struct clock_event_device *evt)
-{
- /* Disable timer */
- writel(0, EP93XX_TIMER3_CONTROL);
-
- return 0;
-}
-
-static struct clock_event_device ep93xx_clockevent = {
- .name = "timer1",
- .features = CLOCK_EVT_FEAT_ONESHOT,
- .set_state_shutdown = ep93xx_clkevt_shutdown,
- .set_state_oneshot = ep93xx_clkevt_shutdown,
- .tick_resume = ep93xx_clkevt_shutdown,
- .set_next_event = ep93xx_clkevt_set_next_event,
- .rating = 300,
-};
-
-static irqreturn_t ep93xx_timer_interrupt(int irq, void *dev_id)
-{
- struct clock_event_device *evt = dev_id;
-
- /* Writing any value clears the timer interrupt */
- writel(1, EP93XX_TIMER3_CLEAR);
-
- evt->event_handler(evt);
-
- return IRQ_HANDLED;
-}
-
-void __init ep93xx_timer_init(void)
-{
- int irq = IRQ_EP93XX_TIMER3;
- unsigned long flags = IRQF_TIMER | IRQF_IRQPOLL;
-
- /* Enable and register clocksource and sched_clock on timer 4 */
- writel(EP93XX_TIMER4_VALUE_HIGH_ENABLE,
- EP93XX_TIMER4_VALUE_HIGH);
- clocksource_mmio_init(NULL, "timer4",
- EP93XX_TIMER4_RATE, 200, 40,
- ep93xx_clocksource_read);
- sched_clock_register(ep93xx_read_sched_clock, 40,
- EP93XX_TIMER4_RATE);
-
- /* Set up clockevent on timer 3 */
- if (request_irq(irq, ep93xx_timer_interrupt, flags, "ep93xx timer",
- &ep93xx_clockevent))
- pr_err("Failed to request irq %d (ep93xx timer)\n", irq);
- clockevents_config_and_register(&ep93xx_clockevent,
- EP93XX_TIMER123_RATE,
- 1,
- 0xffffffffU);
-}
diff --git a/arch/arm/mach-ep93xx/ts72xx.c b/arch/arm/mach-ep93xx/ts72xx.c
deleted file mode 100644
index d3de7283ecb3..000000000000
--- a/arch/arm/mach-ep93xx/ts72xx.c
+++ /dev/null
@@ -1,422 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-or-later
-/*
- * arch/arm/mach-ep93xx/ts72xx.c
- * Technologic Systems TS72xx SBC support.
- *
- * Copyright (C) 2006 Lennert Buytenhek <buytenh@wantstofly.org>
- */
-
-#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
-
-#include <linux/kernel.h>
-#include <linux/init.h>
-#include <linux/platform_device.h>
-#include <linux/io.h>
-#include <linux/mtd/platnand.h>
-#include <linux/spi/spi.h>
-#include <linux/spi/flash.h>
-#include <linux/spi/mmc_spi.h>
-#include <linux/mmc/host.h>
-#include <linux/platform_data/spi-ep93xx.h>
-#include <linux/gpio/machine.h>
-
-#include "gpio-ep93xx.h"
-#include "hardware.h"
-
-#include <asm/mach-types.h>
-#include <asm/mach/map.h>
-#include <asm/mach/arch.h>
-
-#include "soc.h"
-#include "ts72xx.h"
-
-/*************************************************************************
- * IO map
- *************************************************************************/
-static struct map_desc ts72xx_io_desc[] __initdata = {
- {
- .virtual = (unsigned long)TS72XX_MODEL_VIRT_BASE,
- .pfn = __phys_to_pfn(TS72XX_MODEL_PHYS_BASE),
- .length = TS72XX_MODEL_SIZE,
- .type = MT_DEVICE,
- }, {
- .virtual = (unsigned long)TS72XX_OPTIONS_VIRT_BASE,
- .pfn = __phys_to_pfn(TS72XX_OPTIONS_PHYS_BASE),
- .length = TS72XX_OPTIONS_SIZE,
- .type = MT_DEVICE,
- }, {
- .virtual = (unsigned long)TS72XX_OPTIONS2_VIRT_BASE,
- .pfn = __phys_to_pfn(TS72XX_OPTIONS2_PHYS_BASE),
- .length = TS72XX_OPTIONS2_SIZE,
- .type = MT_DEVICE,
- }, {
- .virtual = (unsigned long)TS72XX_CPLDVER_VIRT_BASE,
- .pfn = __phys_to_pfn(TS72XX_CPLDVER_PHYS_BASE),
- .length = TS72XX_CPLDVER_SIZE,
- .type = MT_DEVICE,
- }
-};
-
-static void __init ts72xx_map_io(void)
-{
- ep93xx_map_io();
- iotable_init(ts72xx_io_desc, ARRAY_SIZE(ts72xx_io_desc));
-}
-
-
-/*************************************************************************
- * NAND flash
- *************************************************************************/
-#define TS72XX_NAND_CONTROL_ADDR_LINE 22 /* 0xN0400000 */
-#define TS72XX_NAND_BUSY_ADDR_LINE 23 /* 0xN0800000 */
-
-static void ts72xx_nand_hwcontrol(struct nand_chip *chip,
- int cmd, unsigned int ctrl)
-{
- if (ctrl & NAND_CTRL_CHANGE) {
- void __iomem *addr = chip->legacy.IO_ADDR_R;
- unsigned char bits;
-
- addr += (1 << TS72XX_NAND_CONTROL_ADDR_LINE);
-
- bits = __raw_readb(addr) & ~0x07;
- bits |= (ctrl & NAND_NCE) << 2; /* bit 0 -> bit 2 */
- bits |= (ctrl & NAND_CLE); /* bit 1 -> bit 1 */
- bits |= (ctrl & NAND_ALE) >> 2; /* bit 2 -> bit 0 */
-
- __raw_writeb(bits, addr);
- }
-
- if (cmd != NAND_CMD_NONE)
- __raw_writeb(cmd, chip->legacy.IO_ADDR_W);
-}
-
-static int ts72xx_nand_device_ready(struct nand_chip *chip)
-{
- void __iomem *addr = chip->legacy.IO_ADDR_R;
-
- addr += (1 << TS72XX_NAND_BUSY_ADDR_LINE);
-
- return !!(__raw_readb(addr) & 0x20);
-}
-
-#define TS72XX_BOOTROM_PART_SIZE (SZ_16K)
-#define TS72XX_REDBOOT_PART_SIZE (SZ_2M + SZ_1M)
-
-static struct mtd_partition ts72xx_nand_parts[] = {
- {
- .name = "TS-BOOTROM",
- .offset = 0,
- .size = TS72XX_BOOTROM_PART_SIZE,
- .mask_flags = MTD_WRITEABLE, /* force read-only */
- }, {
- .name = "Linux",
- .offset = MTDPART_OFS_RETAIN,
- .size = TS72XX_REDBOOT_PART_SIZE,
- /* leave so much for last partition */
- }, {
- .name = "RedBoot",
- .offset = MTDPART_OFS_APPEND,
- .size = MTDPART_SIZ_FULL,
- .mask_flags = MTD_WRITEABLE, /* force read-only */
- },
-};
-
-static struct platform_nand_data ts72xx_nand_data = {
- .chip = {
- .nr_chips = 1,
- .chip_offset = 0,
- .chip_delay = 15,
- },
- .ctrl = {
- .cmd_ctrl = ts72xx_nand_hwcontrol,
- .dev_ready = ts72xx_nand_device_ready,
- },
-};
-
-static struct resource ts72xx_nand_resource[] = {
- {
- .start = 0, /* filled in later */
- .end = 0, /* filled in later */
- .flags = IORESOURCE_MEM,
- },
-};
-
-static struct platform_device ts72xx_nand_flash = {
- .name = "gen_nand",
- .id = -1,
- .dev.platform_data = &ts72xx_nand_data,
- .resource = ts72xx_nand_resource,
- .num_resources = ARRAY_SIZE(ts72xx_nand_resource),
-};
-
-static void __init ts72xx_register_flash(struct mtd_partition *parts, int n,
- resource_size_t start)
-{
- /*
- * TS7200 has NOR flash all other TS72xx board have NAND flash.
- */
- if (board_is_ts7200()) {
- ep93xx_register_flash(2, EP93XX_CS6_PHYS_BASE, SZ_16M);
- } else {
- ts72xx_nand_resource[0].start = start;
- ts72xx_nand_resource[0].end = start + SZ_16M - 1;
-
- ts72xx_nand_data.chip.partitions = parts;
- ts72xx_nand_data.chip.nr_partitions = n;
-
- platform_device_register(&ts72xx_nand_flash);
- }
-}
-
-/*************************************************************************
- * RTC M48T86
- *************************************************************************/
-#define TS72XX_RTC_INDEX_PHYS_BASE (EP93XX_CS1_PHYS_BASE + 0x00800000)
-#define TS72XX_RTC_DATA_PHYS_BASE (EP93XX_CS1_PHYS_BASE + 0x01700000)
-
-static struct resource ts72xx_rtc_resources[] = {
- DEFINE_RES_MEM(TS72XX_RTC_INDEX_PHYS_BASE, 0x01),
- DEFINE_RES_MEM(TS72XX_RTC_DATA_PHYS_BASE, 0x01),
-};
-
-static struct platform_device ts72xx_rtc_device = {
- .name = "rtc-m48t86",
- .id = -1,
- .resource = ts72xx_rtc_resources,
- .num_resources = ARRAY_SIZE(ts72xx_rtc_resources),
-};
-
-/*************************************************************************
- * Watchdog (in CPLD)
- *************************************************************************/
-#define TS72XX_WDT_CONTROL_PHYS_BASE (EP93XX_CS2_PHYS_BASE + 0x03800000)
-#define TS72XX_WDT_FEED_PHYS_BASE (EP93XX_CS2_PHYS_BASE + 0x03c00000)
-
-static struct resource ts72xx_wdt_resources[] = {
- DEFINE_RES_MEM(TS72XX_WDT_CONTROL_PHYS_BASE, 0x01),
- DEFINE_RES_MEM(TS72XX_WDT_FEED_PHYS_BASE, 0x01),
-};
-
-static struct platform_device ts72xx_wdt_device = {
- .name = "ts72xx-wdt",
- .id = -1,
- .resource = ts72xx_wdt_resources,
- .num_resources = ARRAY_SIZE(ts72xx_wdt_resources),
-};
-
-/*************************************************************************
- * ETH
- *************************************************************************/
-static struct ep93xx_eth_data __initdata ts72xx_eth_data = {
- .phy_id = 1,
-};
-
-/*************************************************************************
- * SPI SD/MMC host
- *************************************************************************/
-#define BK3_EN_SDCARD_PHYS_BASE 0x12400000
-#define BK3_EN_SDCARD_PWR 0x0
-#define BK3_DIS_SDCARD_PWR 0x0C
-static void bk3_mmc_spi_setpower(struct device *dev, unsigned int vdd)
-{
- void __iomem *pwr_sd = ioremap(BK3_EN_SDCARD_PHYS_BASE, SZ_4K);
-
- if (!pwr_sd) {
- pr_err("Failed to enable SD card power!");
- return;
- }
-
- pr_debug("%s: SD card pwr %s VDD:0x%x\n", __func__,
- !!vdd ? "ON" : "OFF", vdd);
-
- if (!!vdd)
- __raw_writeb(BK3_EN_SDCARD_PWR, pwr_sd);
- else
- __raw_writeb(BK3_DIS_SDCARD_PWR, pwr_sd);
-
- iounmap(pwr_sd);
-}
-
-static struct mmc_spi_platform_data bk3_spi_mmc_data = {
- .detect_delay = 500,
- .powerup_msecs = 100,
- .ocr_mask = MMC_VDD_32_33 | MMC_VDD_33_34,
- .caps = MMC_CAP_NONREMOVABLE,
- .setpower = bk3_mmc_spi_setpower,
-};
-
-/*************************************************************************
- * SPI Bus - SD card access
- *************************************************************************/
-static struct spi_board_info bk3_spi_board_info[] __initdata = {
- {
- .modalias = "mmc_spi",
- .platform_data = &bk3_spi_mmc_data,
- .max_speed_hz = 7.4E6,
- .bus_num = 0,
- .chip_select = 0,
- .mode = SPI_MODE_0,
- },
-};
-
-/*
- * This is a stub -> the FGPIO[3] pin is not connected on the schematic
- * The all work is performed automatically by !SPI_FRAME (SFRM1) and
- * goes through CPLD
- */
-static struct gpiod_lookup_table bk3_spi_cs_gpio_table = {
- .dev_id = "spi0",
- .table = {
- GPIO_LOOKUP("F", 3, "cs", GPIO_ACTIVE_LOW),
- { },
- },
-};
-
-static struct ep93xx_spi_info bk3_spi_master __initdata = {
- .use_dma = 1,
-};
-
-/*************************************************************************
- * TS72XX support code
- *************************************************************************/
-#if IS_ENABLED(CONFIG_FPGA_MGR_TS73XX)
-
-/* Relative to EP93XX_CS1_PHYS_BASE */
-#define TS73XX_FPGA_LOADER_BASE 0x03c00000
-
-static struct resource ts73xx_fpga_resources[] = {
- {
- .start = EP93XX_CS1_PHYS_BASE + TS73XX_FPGA_LOADER_BASE,
- .end = EP93XX_CS1_PHYS_BASE + TS73XX_FPGA_LOADER_BASE + 1,
- .flags = IORESOURCE_MEM,
- },
-};
-
-static struct platform_device ts73xx_fpga_device = {
- .name = "ts73xx-fpga-mgr",
- .id = -1,
- .resource = ts73xx_fpga_resources,
- .num_resources = ARRAY_SIZE(ts73xx_fpga_resources),
-};
-
-#endif
-
-/*************************************************************************
- * SPI Bus
- *************************************************************************/
-static struct spi_board_info ts72xx_spi_devices[] __initdata = {
- {
- .modalias = "tmp122",
- .max_speed_hz = 2 * 1000 * 1000,
- .bus_num = 0,
- .chip_select = 0,
- },
-};
-
-static struct gpiod_lookup_table ts72xx_spi_cs_gpio_table = {
- .dev_id = "spi0",
- .table = {
- /* DIO_17 */
- GPIO_LOOKUP("F", 2, "cs", GPIO_ACTIVE_LOW),
- { },
- },
-};
-
-static struct ep93xx_spi_info ts72xx_spi_info __initdata = {
- /* Intentionally left blank */
-};
-
-static void __init ts72xx_init_machine(void)
-{
- ep93xx_init_devices();
- ts72xx_register_flash(ts72xx_nand_parts, ARRAY_SIZE(ts72xx_nand_parts),
- is_ts9420_installed() ?
- EP93XX_CS7_PHYS_BASE : EP93XX_CS6_PHYS_BASE);
- platform_device_register(&ts72xx_rtc_device);
- platform_device_register(&ts72xx_wdt_device);
-
- ep93xx_register_eth(&ts72xx_eth_data, 1);
-#if IS_ENABLED(CONFIG_FPGA_MGR_TS73XX)
- if (board_is_ts7300())
- platform_device_register(&ts73xx_fpga_device);
-#endif
- gpiod_add_lookup_table(&ts72xx_spi_cs_gpio_table);
- ep93xx_register_spi(&ts72xx_spi_info, ts72xx_spi_devices,
- ARRAY_SIZE(ts72xx_spi_devices));
-}
-
-MACHINE_START(TS72XX, "Technologic Systems TS-72xx SBC")
- /* Maintainer: Lennert Buytenhek <buytenh@wantstofly.org> */
- .atag_offset = 0x100,
- .nr_irqs = NR_EP93XX_IRQS,
- .map_io = ts72xx_map_io,
- .init_irq = ep93xx_init_irq,
- .init_time = ep93xx_timer_init,
- .init_machine = ts72xx_init_machine,
- .restart = ep93xx_restart,
-MACHINE_END
-
-/*************************************************************************
- * EP93xx I2S audio peripheral handling
- *************************************************************************/
-static struct resource ep93xx_i2s_resource[] = {
- DEFINE_RES_MEM(EP93XX_I2S_PHYS_BASE, 0x100),
- DEFINE_RES_IRQ_NAMED(IRQ_EP93XX_SAI, "spilink i2s slave"),
-};
-
-static struct platform_device ep93xx_i2s_device = {
- .name = "ep93xx-spilink-i2s",
- .id = -1,
- .num_resources = ARRAY_SIZE(ep93xx_i2s_resource),
- .resource = ep93xx_i2s_resource,
-};
-
-/*************************************************************************
- * BK3 support code
- *************************************************************************/
-static struct mtd_partition bk3_nand_parts[] = {
- {
- .name = "System",
- .offset = 0x00000000,
- .size = 0x01e00000,
- }, {
- .name = "Data",
- .offset = 0x01e00000,
- .size = 0x05f20000
- }, {
- .name = "RedBoot",
- .offset = 0x07d20000,
- .size = 0x002e0000,
- .mask_flags = MTD_WRITEABLE, /* force RO */
- },
-};
-
-static void __init bk3_init_machine(void)
-{
- ep93xx_init_devices();
-
- ts72xx_register_flash(bk3_nand_parts, ARRAY_SIZE(bk3_nand_parts),
- EP93XX_CS6_PHYS_BASE);
-
- ep93xx_register_eth(&ts72xx_eth_data, 1);
-
- gpiod_add_lookup_table(&bk3_spi_cs_gpio_table);
- ep93xx_register_spi(&bk3_spi_master, bk3_spi_board_info,
- ARRAY_SIZE(bk3_spi_board_info));
-
- /* Configure ep93xx's I2S to use AC97 pins */
- ep93xx_devcfg_set_bits(EP93XX_SYSCON_DEVCFG_I2SONAC97);
- platform_device_register(&ep93xx_i2s_device);
-}
-
-MACHINE_START(BK3, "Liebherr controller BK3.1")
- /* Maintainer: Lukasz Majewski <lukma@denx.de> */
- .atag_offset = 0x100,
- .nr_irqs = NR_EP93XX_IRQS,
- .map_io = ts72xx_map_io,
- .init_irq = ep93xx_init_irq,
- .init_time = ep93xx_timer_init,
- .init_machine = bk3_init_machine,
- .restart = ep93xx_restart,
-MACHINE_END
diff --git a/arch/arm/mach-ep93xx/ts72xx.h b/arch/arm/mach-ep93xx/ts72xx.h
deleted file mode 100644
index 00b4941d29c9..000000000000
--- a/arch/arm/mach-ep93xx/ts72xx.h
+++ /dev/null
@@ -1,94 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-/*
- * arch/arm/mach-ep93xx/include/mach/ts72xx.h
- */
-
-/*
- * TS72xx memory map:
- *
- * virt phys size
- * febff000 22000000 4K model number register (bits 0-2)
- * febfe000 22400000 4K options register
- * febfd000 22800000 4K options register #2
- * febfc000 23400000 4K CPLD version register
- */
-
-#ifndef __TS72XX_H_
-#define __TS72XX_H_
-
-#define TS72XX_MODEL_PHYS_BASE 0x22000000
-#define TS72XX_MODEL_VIRT_BASE IOMEM(0xfebff000)
-#define TS72XX_MODEL_SIZE 0x00001000
-
-#define TS72XX_MODEL_TS7200 0x00
-#define TS72XX_MODEL_TS7250 0x01
-#define TS72XX_MODEL_TS7260 0x02
-#define TS72XX_MODEL_TS7300 0x03
-#define TS72XX_MODEL_TS7400 0x04
-#define TS72XX_MODEL_MASK 0x07
-
-
-#define TS72XX_OPTIONS_PHYS_BASE 0x22400000
-#define TS72XX_OPTIONS_VIRT_BASE IOMEM(0xfebfe000)
-#define TS72XX_OPTIONS_SIZE 0x00001000
-
-#define TS72XX_OPTIONS_COM2_RS485 0x02
-#define TS72XX_OPTIONS_MAX197 0x01
-
-
-#define TS72XX_OPTIONS2_PHYS_BASE 0x22800000
-#define TS72XX_OPTIONS2_VIRT_BASE IOMEM(0xfebfd000)
-#define TS72XX_OPTIONS2_SIZE 0x00001000
-
-#define TS72XX_OPTIONS2_TS9420 0x04
-#define TS72XX_OPTIONS2_TS9420_BOOT 0x02
-
-#define TS72XX_CPLDVER_PHYS_BASE 0x23400000
-#define TS72XX_CPLDVER_VIRT_BASE IOMEM(0xfebfc000)
-#define TS72XX_CPLDVER_SIZE 0x00001000
-
-#ifndef __ASSEMBLY__
-
-static inline int ts72xx_model(void)
-{
- return __raw_readb(TS72XX_MODEL_VIRT_BASE) & TS72XX_MODEL_MASK;
-}
-
-static inline int board_is_ts7200(void)
-{
- return ts72xx_model() == TS72XX_MODEL_TS7200;
-}
-
-static inline int board_is_ts7250(void)
-{
- return ts72xx_model() == TS72XX_MODEL_TS7250;
-}
-
-static inline int board_is_ts7260(void)
-{
- return ts72xx_model() == TS72XX_MODEL_TS7260;
-}
-
-static inline int board_is_ts7300(void)
-{
- return ts72xx_model() == TS72XX_MODEL_TS7300;
-}
-
-static inline int board_is_ts7400(void)
-{
- return ts72xx_model() == TS72XX_MODEL_TS7400;
-}
-
-static inline int is_max197_installed(void)
-{
- return !!(__raw_readb(TS72XX_OPTIONS_VIRT_BASE) &
- TS72XX_OPTIONS_MAX197);
-}
-
-static inline int is_ts9420_installed(void)
-{
- return !!(__raw_readb(TS72XX_OPTIONS2_VIRT_BASE) &
- TS72XX_OPTIONS2_TS9420);
-}
-#endif
-#endif /* __TS72XX_H_ */
diff --git a/arch/arm/mach-ep93xx/vision_ep9307.c b/arch/arm/mach-ep93xx/vision_ep9307.c
deleted file mode 100644
index 85f0dd7255a9..000000000000
--- a/arch/arm/mach-ep93xx/vision_ep9307.c
+++ /dev/null
@@ -1,319 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-or-later
-/*
- * arch/arm/mach-ep93xx/vision_ep9307.c
- * Vision Engraving Systems EP9307 SoM support.
- *
- * Copyright (C) 2008-2011 Vision Engraving Systems
- * H Hartley Sweeten <hsweeten@visionengravers.com>
- */
-
-#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
-
-#include <linux/kernel.h>
-#include <linux/init.h>
-#include <linux/platform_device.h>
-#include <linux/irq.h>
-#include <linux/gpio.h>
-#include <linux/gpio/machine.h>
-#include <linux/fb.h>
-#include <linux/io.h>
-#include <linux/mtd/partitions.h>
-#include <linux/i2c.h>
-#include <linux/platform_data/pca953x.h>
-#include <linux/spi/spi.h>
-#include <linux/spi/flash.h>
-#include <linux/spi/mmc_spi.h>
-#include <linux/mmc/host.h>
-
-#include <sound/cs4271.h>
-
-#include "hardware.h"
-#include <linux/platform_data/video-ep93xx.h>
-#include <linux/platform_data/spi-ep93xx.h>
-#include "gpio-ep93xx.h"
-
-#include <asm/mach-types.h>
-#include <asm/mach/map.h>
-#include <asm/mach/arch.h>
-
-#include "soc.h"
-
-/*************************************************************************
- * Static I/O mappings for the FPGA
- *************************************************************************/
-#define VISION_PHYS_BASE EP93XX_CS7_PHYS_BASE
-#define VISION_VIRT_BASE 0xfebff000
-
-static struct map_desc vision_io_desc[] __initdata = {
- {
- .virtual = VISION_VIRT_BASE,
- .pfn = __phys_to_pfn(VISION_PHYS_BASE),
- .length = SZ_4K,
- .type = MT_DEVICE,
- },
-};
-
-static void __init vision_map_io(void)
-{
- ep93xx_map_io();
-
- iotable_init(vision_io_desc, ARRAY_SIZE(vision_io_desc));
-}
-
-/*************************************************************************
- * Ethernet
- *************************************************************************/
-static struct ep93xx_eth_data vision_eth_data __initdata = {
- .phy_id = 1,
-};
-
-/*************************************************************************
- * Framebuffer
- *************************************************************************/
-#define VISION_LCD_ENABLE EP93XX_GPIO_LINE_EGPIO1
-
-static int vision_lcd_setup(struct platform_device *pdev)
-{
- int err;
-
- err = gpio_request_one(VISION_LCD_ENABLE, GPIOF_OUT_INIT_HIGH, dev_name(&pdev->dev));
- if (err)
- return err;
-
- ep93xx_devcfg_clear_bits(EP93XX_SYSCON_DEVCFG_RAS |
- EP93XX_SYSCON_DEVCFG_RASONP3 |
- EP93XX_SYSCON_DEVCFG_EXVC);
-
- return 0;
-}
-
-static void vision_lcd_teardown(struct platform_device *pdev)
-{
- gpio_free(VISION_LCD_ENABLE);
-}
-
-static void vision_lcd_blank(int blank_mode, struct fb_info *info)
-{
- if (blank_mode)
- gpio_set_value(VISION_LCD_ENABLE, 0);
- else
- gpio_set_value(VISION_LCD_ENABLE, 1);
-}
-
-static struct ep93xxfb_mach_info ep93xxfb_info __initdata = {
- .flags = EP93XXFB_USE_SDCSN0 | EP93XXFB_PCLK_FALLING,
- .setup = vision_lcd_setup,
- .teardown = vision_lcd_teardown,
- .blank = vision_lcd_blank,
-};
-
-
-/*************************************************************************
- * GPIO Expanders
- *************************************************************************/
-#define PCA9539_74_GPIO_BASE (EP93XX_GPIO_LINE_MAX + 1)
-#define PCA9539_75_GPIO_BASE (PCA9539_74_GPIO_BASE + 16)
-#define PCA9539_76_GPIO_BASE (PCA9539_75_GPIO_BASE + 16)
-#define PCA9539_77_GPIO_BASE (PCA9539_76_GPIO_BASE + 16)
-
-static struct pca953x_platform_data pca953x_74_gpio_data = {
- .gpio_base = PCA9539_74_GPIO_BASE,
- .irq_base = EP93XX_BOARD_IRQ(0),
-};
-
-static struct pca953x_platform_data pca953x_75_gpio_data = {
- .gpio_base = PCA9539_75_GPIO_BASE,
- .irq_base = -1,
-};
-
-static struct pca953x_platform_data pca953x_76_gpio_data = {
- .gpio_base = PCA9539_76_GPIO_BASE,
- .irq_base = -1,
-};
-
-static struct pca953x_platform_data pca953x_77_gpio_data = {
- .gpio_base = PCA9539_77_GPIO_BASE,
- .irq_base = -1,
-};
-
-/*************************************************************************
- * I2C Bus
- *************************************************************************/
-
-static struct i2c_board_info vision_i2c_info[] __initdata = {
- {
- I2C_BOARD_INFO("isl1208", 0x6f),
- .irq = IRQ_EP93XX_EXT1,
- }, {
- I2C_BOARD_INFO("pca9539", 0x74),
- .platform_data = &pca953x_74_gpio_data,
- }, {
- I2C_BOARD_INFO("pca9539", 0x75),
- .platform_data = &pca953x_75_gpio_data,
- }, {
- I2C_BOARD_INFO("pca9539", 0x76),
- .platform_data = &pca953x_76_gpio_data,
- }, {
- I2C_BOARD_INFO("pca9539", 0x77),
- .platform_data = &pca953x_77_gpio_data,
- },
-};
-
-/*************************************************************************
- * SPI CS4271 Audio Codec
- *************************************************************************/
-static struct cs4271_platform_data vision_cs4271_data = {
- /* Intentionally left blank */
-};
-
-/*************************************************************************
- * SPI Flash
- *************************************************************************/
-static struct mtd_partition vision_spi_flash_partitions[] = {
- {
- .name = "SPI bootstrap",
- .offset = 0,
- .size = SZ_4K,
- }, {
- .name = "Bootstrap config",
- .offset = MTDPART_OFS_APPEND,
- .size = SZ_4K,
- }, {
- .name = "System config",
- .offset = MTDPART_OFS_APPEND,
- .size = MTDPART_SIZ_FULL,
- },
-};
-
-static struct flash_platform_data vision_spi_flash_data = {
- .name = "SPI Flash",
- .parts = vision_spi_flash_partitions,
- .nr_parts = ARRAY_SIZE(vision_spi_flash_partitions),
-};
-
-/*************************************************************************
- * SPI SD/MMC host
- *************************************************************************/
-static struct mmc_spi_platform_data vision_spi_mmc_data = {
- .detect_delay = 100,
- .powerup_msecs = 100,
- .ocr_mask = MMC_VDD_32_33 | MMC_VDD_33_34,
- .caps2 = MMC_CAP2_RO_ACTIVE_HIGH,
-};
-
-static struct gpiod_lookup_table vision_spi_mmc_gpio_table = {
- .dev_id = "mmc_spi.2", /* "mmc_spi @ CS2 */
- .table = {
- /* Card detect */
- GPIO_LOOKUP_IDX("B", 7, NULL, 0, GPIO_ACTIVE_LOW),
- /* Write protect */
- GPIO_LOOKUP_IDX("F", 0, NULL, 1, GPIO_ACTIVE_HIGH),
- { },
- },
-};
-
-/*************************************************************************
- * SPI Bus
- *************************************************************************/
-static struct spi_board_info vision_spi_board_info[] __initdata = {
- {
- .modalias = "cs4271",
- .platform_data = &vision_cs4271_data,
- .max_speed_hz = 6000000,
- .bus_num = 0,
- .chip_select = 0,
- .mode = SPI_MODE_3,
- }, {
- .modalias = "sst25l",
- .platform_data = &vision_spi_flash_data,
- .max_speed_hz = 20000000,
- .bus_num = 0,
- .chip_select = 1,
- .mode = SPI_MODE_3,
- }, {
- .modalias = "mmc_spi",
- .platform_data = &vision_spi_mmc_data,
- .max_speed_hz = 20000000,
- .bus_num = 0,
- .chip_select = 2,
- .mode = SPI_MODE_3,
- },
-};
-
-static struct gpiod_lookup_table vision_spi_cs4271_gpio_table = {
- .dev_id = "spi0.0", /* cs4271 @ CS0 */
- .table = {
- /* RESET */
- GPIO_LOOKUP_IDX("H", 2, NULL, 0, GPIO_ACTIVE_LOW),
- { },
- },
-};
-
-static struct gpiod_lookup_table vision_spi_cs_gpio_table = {
- .dev_id = "spi0",
- .table = {
- GPIO_LOOKUP_IDX("A", 6, "cs", 0, GPIO_ACTIVE_LOW),
- GPIO_LOOKUP_IDX("A", 7, "cs", 1, GPIO_ACTIVE_LOW),
- GPIO_LOOKUP_IDX("G", 2, "cs", 2, GPIO_ACTIVE_LOW),
- { },
- },
-};
-
-static struct ep93xx_spi_info vision_spi_master __initdata = {
- .use_dma = 1,
-};
-
-/*************************************************************************
- * I2S Audio
- *************************************************************************/
-static struct platform_device vision_audio_device = {
- .name = "edb93xx-audio",
- .id = -1,
-};
-
-static void __init vision_register_i2s(void)
-{
- ep93xx_register_i2s();
- platform_device_register(&vision_audio_device);
-}
-
-/*************************************************************************
- * Machine Initialization
- *************************************************************************/
-static void __init vision_init_machine(void)
-{
- ep93xx_init_devices();
- ep93xx_register_flash(2, EP93XX_CS6_PHYS_BASE, SZ_64M);
- ep93xx_register_eth(&vision_eth_data, 1);
- ep93xx_register_fb(&ep93xxfb_info);
- ep93xx_register_pwm(1, 0);
-
- /*
- * Request the gpio expander's interrupt gpio line now to prevent
- * the kernel from doing a WARN in gpiolib:gpio_ensure_requested().
- */
- if (gpio_request_one(EP93XX_GPIO_LINE_F(7), GPIOF_IN, "pca9539:74"))
- pr_warn("cannot request interrupt gpio for pca9539:74\n");
-
- vision_i2c_info[1].irq = gpio_to_irq(EP93XX_GPIO_LINE_F(7));
-
- ep93xx_register_i2c(vision_i2c_info,
- ARRAY_SIZE(vision_i2c_info));
- gpiod_add_lookup_table(&vision_spi_cs4271_gpio_table);
- gpiod_add_lookup_table(&vision_spi_mmc_gpio_table);
- gpiod_add_lookup_table(&vision_spi_cs_gpio_table);
- ep93xx_register_spi(&vision_spi_master, vision_spi_board_info,
- ARRAY_SIZE(vision_spi_board_info));
- vision_register_i2s();
-}
-
-MACHINE_START(VISION_EP9307, "Vision Engraving Systems EP9307")
- /* Maintainer: H Hartley Sweeten <hsweeten@visionengravers.com> */
- .atag_offset = 0x100,
- .nr_irqs = NR_EP93XX_IRQS + EP93XX_BOARD_IRQS,
- .map_io = vision_map_io,
- .init_irq = ep93xx_init_irq,
- .init_time = ep93xx_timer_init,
- .init_machine = vision_init_machine,
- .restart = ep93xx_restart,
-MACHINE_END
diff --git a/arch/arm/mach-lpc32xx/Kconfig b/arch/arm/mach-lpc32xx/Kconfig
index 35730d3696d0..138599545c24 100644
--- a/arch/arm/mach-lpc32xx/Kconfig
+++ b/arch/arm/mach-lpc32xx/Kconfig
@@ -8,5 +8,6 @@ config ARCH_LPC32XX
select CLKSRC_LPC32XX
select CPU_ARM926T
select GPIOLIB
+ select LPC32XX_DMAMUX if AMBA_PL08X
help
Support for the NXP LPC32XX family of processors
diff --git a/arch/arm/mach-omap2/omap-mpuss-lowpower.c b/arch/arm/mach-omap2/omap-mpuss-lowpower.c
index 7ad74db951f6..f18ef45e2fe1 100644
--- a/arch/arm/mach-omap2/omap-mpuss-lowpower.c
+++ b/arch/arm/mach-omap2/omap-mpuss-lowpower.c
@@ -333,7 +333,7 @@ int omap4_hotplug_cpu(unsigned int cpu, unsigned int power_state)
omap_pm_ops.scu_prepare(cpu, power_state);
/*
- * CPU never retuns back if targeted power state is OFF mode.
+ * CPU never returns back if targeted power state is OFF mode.
* CPU ONLINE follows normal CPU ONLINE ptah via
* omap4_secondary_startup().
*/
diff --git a/arch/arm/mach-pxa/spitz.c b/arch/arm/mach-pxa/spitz.c
index 452bf7aac1fa..33533e35720f 100644
--- a/arch/arm/mach-pxa/spitz.c
+++ b/arch/arm/mach-pxa/spitz.c
@@ -378,38 +378,56 @@ static const uint32_t spitz_keymap[] = {
KEY(6, 8, KEY_RIGHT),
};
-static const struct matrix_keymap_data spitz_keymap_data = {
- .keymap = spitz_keymap,
- .keymap_size = ARRAY_SIZE(spitz_keymap),
+static const struct software_node_ref_args spitz_mkp_row_gpios[] = {
+ SOFTWARE_NODE_REFERENCE(&pxa2xx_gpiochip_node, 12, GPIO_ACTIVE_HIGH),
+ SOFTWARE_NODE_REFERENCE(&pxa2xx_gpiochip_node, 17, GPIO_ACTIVE_HIGH),
+ SOFTWARE_NODE_REFERENCE(&pxa2xx_gpiochip_node, 91, GPIO_ACTIVE_HIGH),
+ SOFTWARE_NODE_REFERENCE(&pxa2xx_gpiochip_node, 34, GPIO_ACTIVE_HIGH),
+ SOFTWARE_NODE_REFERENCE(&pxa2xx_gpiochip_node, 36, GPIO_ACTIVE_HIGH),
+ SOFTWARE_NODE_REFERENCE(&pxa2xx_gpiochip_node, 38, GPIO_ACTIVE_HIGH),
+ SOFTWARE_NODE_REFERENCE(&pxa2xx_gpiochip_node, 39, GPIO_ACTIVE_HIGH),
};
-static const uint32_t spitz_row_gpios[] =
- { 12, 17, 91, 34, 36, 38, 39 };
-static const uint32_t spitz_col_gpios[] =
- { 88, 23, 24, 25, 26, 27, 52, 103, 107, 108, 114 };
-
-static struct matrix_keypad_platform_data spitz_mkp_pdata = {
- .keymap_data = &spitz_keymap_data,
- .row_gpios = spitz_row_gpios,
- .col_gpios = spitz_col_gpios,
- .num_row_gpios = ARRAY_SIZE(spitz_row_gpios),
- .num_col_gpios = ARRAY_SIZE(spitz_col_gpios),
- .col_scan_delay_us = 10,
- .debounce_ms = 10,
- .wakeup = 1,
+static const struct software_node_ref_args spitz_mkp_col_gpios[] = {
+ SOFTWARE_NODE_REFERENCE(&pxa2xx_gpiochip_node, 88, GPIO_ACTIVE_HIGH),
+ SOFTWARE_NODE_REFERENCE(&pxa2xx_gpiochip_node, 23, GPIO_ACTIVE_HIGH),
+ SOFTWARE_NODE_REFERENCE(&pxa2xx_gpiochip_node, 24, GPIO_ACTIVE_HIGH),
+ SOFTWARE_NODE_REFERENCE(&pxa2xx_gpiochip_node, 25, GPIO_ACTIVE_HIGH),
+ SOFTWARE_NODE_REFERENCE(&pxa2xx_gpiochip_node, 26, GPIO_ACTIVE_HIGH),
+ SOFTWARE_NODE_REFERENCE(&pxa2xx_gpiochip_node, 27, GPIO_ACTIVE_HIGH),
+ SOFTWARE_NODE_REFERENCE(&pxa2xx_gpiochip_node, 52, GPIO_ACTIVE_HIGH),
+ SOFTWARE_NODE_REFERENCE(&pxa2xx_gpiochip_node, 103, GPIO_ACTIVE_HIGH),
+ SOFTWARE_NODE_REFERENCE(&pxa2xx_gpiochip_node, 107, GPIO_ACTIVE_HIGH),
+ SOFTWARE_NODE_REFERENCE(&pxa2xx_gpiochip_node, 108, GPIO_ACTIVE_HIGH),
+ SOFTWARE_NODE_REFERENCE(&pxa2xx_gpiochip_node, 114, GPIO_ACTIVE_HIGH),
};
-static struct platform_device spitz_mkp_device = {
+static const struct property_entry spitz_mkp_properties[] = {
+ PROPERTY_ENTRY_U32_ARRAY("linux,keymap", spitz_keymap),
+ PROPERTY_ENTRY_REF_ARRAY("row-gpios", spitz_mkp_row_gpios),
+ PROPERTY_ENTRY_REF_ARRAY("col-gpios", spitz_mkp_col_gpios),
+ PROPERTY_ENTRY_U32("col-scan-delay-us", 10),
+ PROPERTY_ENTRY_U32("debounce-delay-ms", 10),
+ PROPERTY_ENTRY_BOOL("wakeup-source"),
+ { }
+};
+
+static const struct platform_device_info spitz_mkp_info __initconst = {
.name = "matrix-keypad",
- .id = -1,
- .dev = {
- .platform_data = &spitz_mkp_pdata,
- },
+ .id = PLATFORM_DEVID_NONE,
+ .properties = spitz_mkp_properties,
};
+
static void __init spitz_mkp_init(void)
{
- platform_device_register(&spitz_mkp_device);
+ struct platform_device *pd;
+ int err;
+
+ pd = platform_device_register_full(&spitz_mkp_info);
+ err = PTR_ERR_OR_ZERO(pd);
+ if (err)
+ pr_err("failed to create keypad device: %d\n", err);
}
#else
static inline void spitz_mkp_init(void) {}
@@ -419,45 +437,82 @@ static inline void spitz_mkp_init(void) {}
* GPIO keys
******************************************************************************/
#if defined(CONFIG_KEYBOARD_GPIO) || defined(CONFIG_KEYBOARD_GPIO_MODULE)
-static struct gpio_keys_button spitz_gpio_keys[] = {
- {
- .type = EV_PWR,
- .code = KEY_SUSPEND,
- .gpio = SPITZ_GPIO_ON_KEY,
- .desc = "On Off",
- .wakeup = 1,
- },
- /* Two buttons detecting the lid state */
- {
- .type = EV_SW,
- .code = 0,
- .gpio = SPITZ_GPIO_SWA,
- .desc = "Display Down",
- },
- {
- .type = EV_SW,
- .code = 1,
- .gpio = SPITZ_GPIO_SWB,
- .desc = "Lid Closed",
- },
+static const struct software_node spitz_gpio_keys_node = {
+ .name = "spitz-gpio-keys",
};
-static struct gpio_keys_platform_data spitz_gpio_keys_platform_data = {
- .buttons = spitz_gpio_keys,
- .nbuttons = ARRAY_SIZE(spitz_gpio_keys),
+static const struct property_entry spitz_suspend_key_props[] = {
+ PROPERTY_ENTRY_U32("linux,input-type", EV_PWR),
+ PROPERTY_ENTRY_U32("linux,code", KEY_SUSPEND),
+ PROPERTY_ENTRY_GPIO("gpios", &pxa2xx_gpiochip_node,
+ SPITZ_GPIO_ON_KEY, GPIO_ACTIVE_HIGH),
+ PROPERTY_ENTRY_STRING("label", "On Off"),
+ PROPERTY_ENTRY_BOOL("wakeup-source"),
+ { }
};
-static struct platform_device spitz_gpio_keys_device = {
- .name = "gpio-keys",
- .id = -1,
- .dev = {
- .platform_data = &spitz_gpio_keys_platform_data,
- },
+static const struct software_node spitz_suspend_key_node = {
+ .parent = &spitz_gpio_keys_node,
+ .properties = spitz_suspend_key_props,
+};
+
+static const struct property_entry spitz_sw1_props[] = {
+ PROPERTY_ENTRY_U32("linux,input-type", EV_SW),
+ PROPERTY_ENTRY_U32("linux,code", 0),
+ PROPERTY_ENTRY_GPIO("gpios", &pxa2xx_gpiochip_node,
+ SPITZ_GPIO_SWA, GPIO_ACTIVE_HIGH),
+ PROPERTY_ENTRY_STRING("label", "Display Down"),
+ { }
+};
+
+static const struct software_node spitz_sw1_node = {
+ .parent = &spitz_gpio_keys_node,
+ .properties = spitz_sw1_props,
+};
+
+static const struct property_entry spitz_sw2_props[] = {
+ PROPERTY_ENTRY_U32("linux,input-type", EV_SW),
+ PROPERTY_ENTRY_U32("linux,code", 1),
+ PROPERTY_ENTRY_GPIO("gpios", &pxa2xx_gpiochip_node,
+ SPITZ_GPIO_SWB, GPIO_ACTIVE_HIGH),
+ PROPERTY_ENTRY_STRING("label", "Lid Closed"),
+ { }
+};
+
+static const struct software_node spitz_sw2_node = {
+ .parent = &spitz_gpio_keys_node,
+ .properties = spitz_sw2_props,
+};
+
+static const struct software_node *spitz_gpio_keys_swnodes[] = {
+ &spitz_gpio_keys_node,
+ &spitz_suspend_key_node,
+ &spitz_sw1_node,
+ &spitz_sw2_node,
+ NULL
};
static void __init spitz_keys_init(void)
{
- platform_device_register(&spitz_gpio_keys_device);
+ struct platform_device_info keys_info = {
+ .name = "gpio-keys",
+ .id = PLATFORM_DEVID_NONE,
+ };
+ struct platform_device *pd;
+ int err;
+
+ err = software_node_register_node_group(spitz_gpio_keys_swnodes);
+ if (err) {
+ pr_err("failed to register gpio-keys software nodes: %d\n", err);
+ return;
+ }
+
+ keys_info.fwnode = software_node_fwnode(&spitz_gpio_keys_node);
+
+ pd = platform_device_register_full(&keys_info);
+ err = PTR_ERR_OR_ZERO(pd);
+ if (err)
+ pr_err("failed to create gpio-keys device: %d\n", err);
}
#else
static inline void spitz_keys_init(void) {}
diff --git a/arch/arm/mm/fault-armv.c b/arch/arm/mm/fault-armv.c
index 2286c2ea60ec..831793cd6ff9 100644
--- a/arch/arm/mm/fault-armv.c
+++ b/arch/arm/mm/fault-armv.c
@@ -61,7 +61,7 @@ static int do_adjust_pte(struct vm_area_struct *vma, unsigned long address,
return ret;
}
-#if USE_SPLIT_PTE_PTLOCKS
+#if defined(CONFIG_SPLIT_PTE_PTLOCKS)
/*
* If we are using split PTE locks, then we need to take the page
* lock here. Otherwise we are using shared mm->page_table_lock
@@ -80,10 +80,10 @@ static inline void do_pte_unlock(spinlock_t *ptl)
{
spin_unlock(ptl);
}
-#else /* !USE_SPLIT_PTE_PTLOCKS */
+#else /* !defined(CONFIG_SPLIT_PTE_PTLOCKS) */
static inline void do_pte_lock(spinlock_t *ptl) {}
static inline void do_pte_unlock(spinlock_t *ptl) {}
-#endif /* USE_SPLIT_PTE_PTLOCKS */
+#endif /* defined(CONFIG_SPLIT_PTE_PTLOCKS) */
static int adjust_pte(struct vm_area_struct *vma, unsigned long address,
unsigned long pfn)
diff --git a/arch/arm/mm/mmap.c b/arch/arm/mm/mmap.c
index d65d0e6ed10a..3dbb383c26d5 100644
--- a/arch/arm/mm/mmap.c
+++ b/arch/arm/mm/mmap.c
@@ -28,7 +28,8 @@
*/
unsigned long
arch_get_unmapped_area(struct file *filp, unsigned long addr,
- unsigned long len, unsigned long pgoff, unsigned long flags)
+ unsigned long len, unsigned long pgoff,
+ unsigned long flags, vm_flags_t vm_flags)
{
struct mm_struct *mm = current->mm;
struct vm_area_struct *vma;
@@ -78,8 +79,8 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr,
unsigned long
arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
- const unsigned long len, const unsigned long pgoff,
- const unsigned long flags)
+ const unsigned long len, const unsigned long pgoff,
+ const unsigned long flags, vm_flags_t vm_flags)
{
struct vm_area_struct *vma;
struct mm_struct *mm = current->mm;
diff --git a/arch/arm64/Kconfig b/arch/arm64/Kconfig
index 8f2df6ed745d..3e29b44d2d7b 100644
--- a/arch/arm64/Kconfig
+++ b/arch/arm64/Kconfig
@@ -24,6 +24,7 @@ config ARM64
select ARCH_HAS_CURRENT_STACK_POINTER
select ARCH_HAS_DEBUG_VIRTUAL
select ARCH_HAS_DEBUG_VM_PGTABLE
+ select ARCH_HAS_DMA_OPS if XEN
select ARCH_HAS_DMA_PREP_COHERENT
select ARCH_HAS_ACPI_TABLE_UPGRADE if ACPI
select ARCH_HAS_FAST_MULTIPLIER
@@ -100,6 +101,8 @@ config ARM64
select ARCH_SUPPORTS_NUMA_BALANCING
select ARCH_SUPPORTS_PAGE_TABLE_CHECK
select ARCH_SUPPORTS_PER_VMA_LOCK
+ select ARCH_SUPPORTS_HUGE_PFNMAP if TRANSPARENT_HUGEPAGE
+ select ARCH_SUPPORTS_RT
select ARCH_WANT_BATCHED_UNMAP_TLB_FLUSH
select ARCH_WANT_COMPAT_IPC_PARSE_VERSION if COMPAT
select ARCH_WANT_DEFAULT_BPF_JIT
@@ -232,7 +235,7 @@ config ARM64
select HAVE_FUNCTION_ARG_ACCESS_API
select MMU_GATHER_RCU_TABLE_FREE
select HAVE_RSEQ
- select HAVE_RUST if CPU_LITTLE_ENDIAN
+ select HAVE_RUST if RUSTC_SUPPORTS_ARM64
select HAVE_STACKPROTECTOR
select HAVE_SYSCALL_TRACEPOINTS
select HAVE_KPROBES
@@ -267,6 +270,18 @@ config ARM64
help
ARM 64-bit (AArch64) Linux support.
+config RUSTC_SUPPORTS_ARM64
+ def_bool y
+ depends on CPU_LITTLE_ENDIAN
+ # Shadow call stack is only supported on certain rustc versions.
+ #
+ # When using the UNWIND_PATCH_PAC_INTO_SCS option, rustc version 1.80+ is
+ # required due to use of the -Zfixed-x18 flag.
+ #
+ # Otherwise, rustc version 1.82+ is required due to use of the
+ # -Zsanitizer=shadow-call-stack flag.
+ depends on !SHADOW_CALL_STACK || RUSTC_VERSION >= 108200 || RUSTC_VERSION >= 108000 && UNWIND_PATCH_PAC_INTO_SCS
+
config CLANG_SUPPORTS_DYNAMIC_FTRACE_WITH_ARGS
def_bool CC_IS_CLANG
# https://github.com/ClangBuiltLinux/linux/issues/1507
@@ -2102,7 +2117,8 @@ config ARM64_MTE
depends on ARM64_PAN
select ARCH_HAS_SUBPAGE_FAULTS
select ARCH_USES_HIGH_VMA_FLAGS
- select ARCH_USES_PG_ARCH_X
+ select ARCH_USES_PG_ARCH_2
+ select ARCH_USES_PG_ARCH_3
help
Memory Tagging (part of the ARMv8.5 Extensions) provides
architectural support for run-time, always-on detection of
diff --git a/arch/arm64/Makefile b/arch/arm64/Makefile
index f6bc3da1ef11..b058c4803efb 100644
--- a/arch/arm64/Makefile
+++ b/arch/arm64/Makefile
@@ -57,9 +57,11 @@ KBUILD_AFLAGS += $(call cc-option,-mabi=lp64)
ifneq ($(CONFIG_UNWIND_TABLES),y)
KBUILD_CFLAGS += -fno-asynchronous-unwind-tables -fno-unwind-tables
KBUILD_AFLAGS += -fno-asynchronous-unwind-tables -fno-unwind-tables
+KBUILD_RUSTFLAGS += -Cforce-unwind-tables=n
else
KBUILD_CFLAGS += -fasynchronous-unwind-tables
KBUILD_AFLAGS += -fasynchronous-unwind-tables
+KBUILD_RUSTFLAGS += -Cforce-unwind-tables=y -Zuse-sync-unwind=n
endif
ifeq ($(CONFIG_STACKPROTECTOR_PER_TASK),y)
@@ -114,6 +116,7 @@ endif
ifeq ($(CONFIG_SHADOW_CALL_STACK), y)
KBUILD_CFLAGS += -ffixed-x18
+KBUILD_RUSTFLAGS += -Zfixed-x18
endif
ifeq ($(CONFIG_CPU_BIG_ENDIAN), y)
diff --git a/arch/arm64/boot/Makefile b/arch/arm64/boot/Makefile
index 607a67a649c4..b5a08333bc57 100644
--- a/arch/arm64/boot/Makefile
+++ b/arch/arm64/boot/Makefile
@@ -17,7 +17,7 @@
OBJCOPYFLAGS_Image :=-O binary -R .note -R .note.gnu.build-id -R .comment -S
targets := Image Image.bz2 Image.gz Image.lz4 Image.lzma Image.lzo \
- Image.zst image.fit
+ Image.zst Image.xz image.fit
$(obj)/Image: vmlinux FORCE
$(call if_changed,objcopy)
@@ -40,6 +40,9 @@ $(obj)/Image.lzo: $(obj)/Image FORCE
$(obj)/Image.zst: $(obj)/Image FORCE
$(call if_changed,zstd)
+$(obj)/Image.xz: $(obj)/Image FORCE
+ $(call if_changed,xzkern)
+
$(obj)/image.fit: $(obj)/Image $(obj)/dts/dtbs-list FORCE
$(call if_changed,fit)
diff --git a/arch/arm64/boot/dts/mediatek/mt7981b.dtsi b/arch/arm64/boot/dts/mediatek/mt7981b.dtsi
index b096009ef99c..5cbea9cd411f 100644
--- a/arch/arm64/boot/dts/mediatek/mt7981b.dtsi
+++ b/arch/arm64/boot/dts/mediatek/mt7981b.dtsi
@@ -94,6 +94,39 @@
#pwm-cells = <2>;
};
+ serial@11002000 {
+ compatible = "mediatek,mt7981-uart", "mediatek,mt6577-uart";
+ reg = <0 0x11002000 0 0x100>;
+ interrupts = <GIC_SPI 123 IRQ_TYPE_LEVEL_HIGH>;
+ interrupt-names = "uart", "wakeup";
+ clocks = <&infracfg CLK_INFRA_UART0_SEL>,
+ <&infracfg CLK_INFRA_UART0_CK>;
+ clock-names = "baud", "bus";
+ status = "disabled";
+ };
+
+ serial@11003000 {
+ compatible = "mediatek,mt7981-uart", "mediatek,mt6577-uart";
+ reg = <0 0x11003000 0 0x100>;
+ interrupts = <GIC_SPI 124 IRQ_TYPE_LEVEL_HIGH>;
+ interrupt-names = "uart", "wakeup";
+ clocks = <&infracfg CLK_INFRA_UART1_SEL>,
+ <&infracfg CLK_INFRA_UART1_CK>;
+ clock-names = "baud", "bus";
+ status = "disabled";
+ };
+
+ serial@11004000 {
+ compatible = "mediatek,mt7981-uart", "mediatek,mt6577-uart";
+ reg = <0 0x11004000 0 0x100>;
+ interrupts = <GIC_SPI 125 IRQ_TYPE_LEVEL_HIGH>;
+ interrupt-names = "uart", "wakeup";
+ clocks = <&infracfg CLK_INFRA_UART2_SEL>,
+ <&infracfg CLK_INFRA_UART2_CK>;
+ clock-names = "baud", "bus";
+ status = "disabled";
+ };
+
i2c@11007000 {
compatible = "mediatek,mt7981-i2c";
reg = <0 0x11007000 0 0x1000>,
diff --git a/arch/arm64/boot/dts/ti/k3-am625-beagleplay.dts b/arch/arm64/boot/dts/ti/k3-am625-beagleplay.dts
index 70de288d728e..a1cd47d7f5e3 100644
--- a/arch/arm64/boot/dts/ti/k3-am625-beagleplay.dts
+++ b/arch/arm64/boot/dts/ti/k3-am625-beagleplay.dts
@@ -888,7 +888,8 @@
mcu {
compatible = "ti,cc1352p7";
- reset-gpios = <&main_gpio0 72 GPIO_ACTIVE_LOW>;
+ bootloader-backdoor-gpios = <&main_gpio0 13 GPIO_ACTIVE_HIGH>;
+ reset-gpios = <&main_gpio0 14 GPIO_ACTIVE_HIGH>;
vdds-supply = <&vdd_3v3>;
};
};
diff --git a/arch/arm64/boot/dts/xilinx/zynqmp-zcu102-revA.dts b/arch/arm64/boot/dts/xilinx/zynqmp-zcu102-revA.dts
index ad8f23a0ec67..d2175f3dd099 100644
--- a/arch/arm64/boot/dts/xilinx/zynqmp-zcu102-revA.dts
+++ b/arch/arm64/boot/dts/xilinx/zynqmp-zcu102-revA.dts
@@ -941,6 +941,7 @@
&pcie {
status = "okay";
+ phys = <&psgtr 0 PHY_TYPE_PCIE 0 0>;
};
&psgtr {
diff --git a/arch/arm64/configs/defconfig b/arch/arm64/configs/defconfig
index 187e7cc477e0..5fdbfea7a5b2 100644
--- a/arch/arm64/configs/defconfig
+++ b/arch/arm64/configs/defconfig
@@ -518,6 +518,7 @@ CONFIG_I2C_MUX=y
CONFIG_I2C_MUX_PCA954x=y
CONFIG_I2C_BCM2835=m
CONFIG_I2C_CADENCE=m
+CONFIG_I2C_DESIGNWARE_CORE=y
CONFIG_I2C_DESIGNWARE_PLATFORM=y
CONFIG_I2C_GPIO=m
CONFIG_I2C_IMX=y
diff --git a/arch/arm64/include/asm/Kbuild b/arch/arm64/include/asm/Kbuild
index 7d7d97ad3cd5..4e350df9a02d 100644
--- a/arch/arm64/include/asm/Kbuild
+++ b/arch/arm64/include/asm/Kbuild
@@ -9,6 +9,7 @@ syscall-y += unistd_compat_32.h
generic-y += early_ioremap.h
generic-y += mcs_spinlock.h
+generic-y += mmzone.h
generic-y += qrwlock.h
generic-y += qspinlock.h
generic-y += parport.h
diff --git a/arch/arm64/include/asm/mmzone.h b/arch/arm64/include/asm/mmzone.h
deleted file mode 100644
index fa17e01d9ab2..000000000000
--- a/arch/arm64/include/asm/mmzone.h
+++ /dev/null
@@ -1,13 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-#ifndef __ASM_MMZONE_H
-#define __ASM_MMZONE_H
-
-#ifdef CONFIG_NUMA
-
-#include <asm/numa.h>
-
-extern struct pglist_data *node_data[];
-#define NODE_DATA(nid) (node_data[(nid)])
-
-#endif /* CONFIG_NUMA */
-#endif /* __ASM_MMZONE_H */
diff --git a/arch/arm64/include/asm/pgtable.h b/arch/arm64/include/asm/pgtable.h
index 96c2b0b07c4c..c329ea061dc9 100644
--- a/arch/arm64/include/asm/pgtable.h
+++ b/arch/arm64/include/asm/pgtable.h
@@ -407,6 +407,7 @@ static inline void __sync_cache_and_tags(pte_t pte, unsigned int nr_pages)
/*
* Select all bits except the pfn
*/
+#define pte_pgprot pte_pgprot
static inline pgprot_t pte_pgprot(pte_t pte)
{
unsigned long pfn = pte_pfn(pte);
@@ -600,6 +601,14 @@ static inline pmd_t pmd_mkdevmap(pmd_t pmd)
return pte_pmd(set_pte_bit(pmd_pte(pmd), __pgprot(PTE_DEVMAP)));
}
+#ifdef CONFIG_ARCH_SUPPORTS_PMD_PFNMAP
+#define pmd_special(pte) (!!((pmd_val(pte) & PTE_SPECIAL)))
+static inline pmd_t pmd_mkspecial(pmd_t pmd)
+{
+ return set_pmd_bit(pmd, __pgprot(PTE_SPECIAL));
+}
+#endif
+
#define __pmd_to_phys(pmd) __pte_to_phys(pmd_pte(pmd))
#define __phys_to_pmd_val(phys) __phys_to_pte_val(phys)
#define pmd_pfn(pmd) ((__pmd_to_phys(pmd) & PMD_MASK) >> PAGE_SHIFT)
@@ -617,6 +626,27 @@ static inline pmd_t pmd_mkdevmap(pmd_t pmd)
#define pud_pfn(pud) ((__pud_to_phys(pud) & PUD_MASK) >> PAGE_SHIFT)
#define pfn_pud(pfn,prot) __pud(__phys_to_pud_val((phys_addr_t)(pfn) << PAGE_SHIFT) | pgprot_val(prot))
+#ifdef CONFIG_ARCH_SUPPORTS_PUD_PFNMAP
+#define pud_special(pte) pte_special(pud_pte(pud))
+#define pud_mkspecial(pte) pte_pud(pte_mkspecial(pud_pte(pud)))
+#endif
+
+#define pmd_pgprot pmd_pgprot
+static inline pgprot_t pmd_pgprot(pmd_t pmd)
+{
+ unsigned long pfn = pmd_pfn(pmd);
+
+ return __pgprot(pmd_val(pfn_pmd(pfn, __pgprot(0))) ^ pmd_val(pmd));
+}
+
+#define pud_pgprot pud_pgprot
+static inline pgprot_t pud_pgprot(pud_t pud)
+{
+ unsigned long pfn = pud_pfn(pud);
+
+ return __pgprot(pud_val(pfn_pud(pfn, __pgprot(0))) ^ pud_val(pud));
+}
+
static inline void __set_pte_at(struct mm_struct *mm,
unsigned long __always_unused addr,
pte_t *ptep, pte_t pte, unsigned int nr)
diff --git a/arch/arm64/include/asm/topology.h b/arch/arm64/include/asm/topology.h
index 0f6ef432fb84..5fc3af9f8f29 100644
--- a/arch/arm64/include/asm/topology.h
+++ b/arch/arm64/include/asm/topology.h
@@ -5,6 +5,7 @@
#include <linux/cpumask.h>
#ifdef CONFIG_NUMA
+#include <asm/numa.h>
struct pci_bus;
int pcibus_to_node(struct pci_bus *bus);
diff --git a/arch/arm64/kernel/vmlinux.lds.S b/arch/arm64/kernel/vmlinux.lds.S
index 55a8e310ea12..58d89d997d05 100644
--- a/arch/arm64/kernel/vmlinux.lds.S
+++ b/arch/arm64/kernel/vmlinux.lds.S
@@ -264,8 +264,7 @@ SECTIONS
EXIT_DATA
}
- RUNTIME_CONST(shift, d_hash_shift)
- RUNTIME_CONST(ptr, dentry_hashtable)
+ RUNTIME_CONST_VARIABLES
PERCPU_SECTION(L1_CACHE_BYTES)
HYPERVISOR_PERCPU_SECTION
diff --git a/arch/arm64/kvm/arm.c b/arch/arm64/kvm/arm.c
index fe0764173cd0..a0d01c46e408 100644
--- a/arch/arm64/kvm/arm.c
+++ b/arch/arm64/kvm/arm.c
@@ -2164,7 +2164,7 @@ static void cpu_hyp_uninit(void *discard)
}
}
-int kvm_arch_hardware_enable(void)
+int kvm_arch_enable_virtualization_cpu(void)
{
/*
* Most calls to this function are made with migration
@@ -2184,7 +2184,7 @@ int kvm_arch_hardware_enable(void)
return 0;
}
-void kvm_arch_hardware_disable(void)
+void kvm_arch_disable_virtualization_cpu(void)
{
kvm_timer_cpu_down();
kvm_vgic_cpu_down();
@@ -2380,7 +2380,7 @@ static int __init do_pkvm_init(u32 hyp_va_bits)
/*
* The stub hypercalls are now disabled, so set our local flag to
- * prevent a later re-init attempt in kvm_arch_hardware_enable().
+ * prevent a later re-init attempt in kvm_arch_enable_virtualization_cpu().
*/
__this_cpu_write(kvm_hyp_initialized, 1);
preempt_enable();
diff --git a/arch/arm64/kvm/nested.c b/arch/arm64/kvm/nested.c
index 638b6205526f..f9e30dd34c7a 100644
--- a/arch/arm64/kvm/nested.c
+++ b/arch/arm64/kvm/nested.c
@@ -62,7 +62,6 @@ int kvm_vcpu_init_nested(struct kvm_vcpu *vcpu)
*/
num_mmus = atomic_read(&kvm->online_vcpus) * S2_MMU_PER_VCPU;
tmp = kvrealloc(kvm->arch.nested_mmus,
- size_mul(sizeof(*kvm->arch.nested_mmus), kvm->arch.nested_mmus_size),
size_mul(sizeof(*kvm->arch.nested_mmus), num_mmus),
GFP_KERNEL_ACCOUNT | __GFP_ZERO);
if (!tmp)
diff --git a/arch/arm64/mm/init.c b/arch/arm64/mm/init.c
index a0400b9aa814..27a32ff15412 100644
--- a/arch/arm64/mm/init.c
+++ b/arch/arm64/mm/init.c
@@ -114,36 +114,33 @@ static void __init arch_reserve_crashkernel(void)
low_size, high);
}
-/*
- * Return the maximum physical address for a zone accessible by the given bits
- * limit. If DRAM starts above 32-bit, expand the zone to the maximum
- * available memory, otherwise cap it at 32-bit.
- */
-static phys_addr_t __init max_zone_phys(unsigned int zone_bits)
+static phys_addr_t __init max_zone_phys(phys_addr_t zone_limit)
{
- phys_addr_t zone_mask = DMA_BIT_MASK(zone_bits);
- phys_addr_t phys_start = memblock_start_of_DRAM();
-
- if (phys_start > U32_MAX)
- zone_mask = PHYS_ADDR_MAX;
- else if (phys_start > zone_mask)
- zone_mask = U32_MAX;
+ /**
+ * Information we get from firmware (e.g. DT dma-ranges) describe DMA
+ * bus constraints. Devices using DMA might have their own limitations.
+ * Some of them rely on DMA zone in low 32-bit memory. Keep low RAM
+ * DMA zone on platforms that have RAM there.
+ */
+ if (memblock_start_of_DRAM() < U32_MAX)
+ zone_limit = min(zone_limit, U32_MAX);
- return min(zone_mask, memblock_end_of_DRAM() - 1) + 1;
+ return min(zone_limit, memblock_end_of_DRAM() - 1) + 1;
}
static void __init zone_sizes_init(void)
{
unsigned long max_zone_pfns[MAX_NR_ZONES] = {0};
- unsigned int __maybe_unused acpi_zone_dma_bits;
- unsigned int __maybe_unused dt_zone_dma_bits;
- phys_addr_t __maybe_unused dma32_phys_limit = max_zone_phys(32);
+ phys_addr_t __maybe_unused acpi_zone_dma_limit;
+ phys_addr_t __maybe_unused dt_zone_dma_limit;
+ phys_addr_t __maybe_unused dma32_phys_limit =
+ max_zone_phys(DMA_BIT_MASK(32));
#ifdef CONFIG_ZONE_DMA
- acpi_zone_dma_bits = fls64(acpi_iort_dma_get_max_cpu_address());
- dt_zone_dma_bits = fls64(of_dma_get_max_cpu_address(NULL));
- zone_dma_bits = min3(32U, dt_zone_dma_bits, acpi_zone_dma_bits);
- arm64_dma_phys_limit = max_zone_phys(zone_dma_bits);
+ acpi_zone_dma_limit = acpi_iort_dma_get_max_cpu_address();
+ dt_zone_dma_limit = of_dma_get_max_cpu_address(NULL);
+ zone_dma_limit = min(dt_zone_dma_limit, acpi_zone_dma_limit);
+ arm64_dma_phys_limit = max_zone_phys(zone_dma_limit);
max_zone_pfns[ZONE_DMA] = PFN_DOWN(arm64_dma_phys_limit);
#endif
#ifdef CONFIG_ZONE_DMA32
diff --git a/arch/arm64/net/bpf_jit_comp.c b/arch/arm64/net/bpf_jit_comp.c
index dd0bb069df4b..8bbd0b20136a 100644
--- a/arch/arm64/net/bpf_jit_comp.c
+++ b/arch/arm64/net/bpf_jit_comp.c
@@ -26,9 +26,8 @@
#define TMP_REG_1 (MAX_BPF_JIT_REG + 0)
#define TMP_REG_2 (MAX_BPF_JIT_REG + 1)
-#define TCALL_CNT (MAX_BPF_JIT_REG + 2)
+#define TCCNT_PTR (MAX_BPF_JIT_REG + 2)
#define TMP_REG_3 (MAX_BPF_JIT_REG + 3)
-#define FP_BOTTOM (MAX_BPF_JIT_REG + 4)
#define ARENA_VM_START (MAX_BPF_JIT_REG + 5)
#define check_imm(bits, imm) do { \
@@ -63,11 +62,10 @@ static const int bpf2a64[] = {
[TMP_REG_1] = A64_R(10),
[TMP_REG_2] = A64_R(11),
[TMP_REG_3] = A64_R(12),
- /* tail_call_cnt */
- [TCALL_CNT] = A64_R(26),
+ /* tail_call_cnt_ptr */
+ [TCCNT_PTR] = A64_R(26),
/* temporary register for blinding constants */
[BPF_REG_AX] = A64_R(9),
- [FP_BOTTOM] = A64_R(27),
/* callee saved register for kern_vm_start address */
[ARENA_VM_START] = A64_R(28),
};
@@ -78,11 +76,15 @@ struct jit_ctx {
int epilogue_offset;
int *offset;
int exentry_idx;
+ int nr_used_callee_reg;
+ u8 used_callee_reg[8]; /* r6~r9, fp, arena_vm_start */
__le32 *image;
__le32 *ro_image;
u32 stack_size;
- int fpb_offset;
u64 user_vm_start;
+ u64 arena_vm_start;
+ bool fp_used;
+ bool write;
};
struct bpf_plt {
@@ -96,7 +98,7 @@ struct bpf_plt {
static inline void emit(const u32 insn, struct jit_ctx *ctx)
{
- if (ctx->image != NULL)
+ if (ctx->image != NULL && ctx->write)
ctx->image[ctx->idx] = cpu_to_le32(insn);
ctx->idx++;
@@ -181,14 +183,47 @@ static inline void emit_addr_mov_i64(const int reg, const u64 val,
}
}
-static inline void emit_call(u64 target, struct jit_ctx *ctx)
+static bool should_emit_indirect_call(long target, const struct jit_ctx *ctx)
+{
+ long offset;
+
+ /* when ctx->ro_image is not allocated or the target is unknown,
+ * emit indirect call
+ */
+ if (!ctx->ro_image || !target)
+ return true;
+
+ offset = target - (long)&ctx->ro_image[ctx->idx];
+ return offset < -SZ_128M || offset >= SZ_128M;
+}
+
+static void emit_direct_call(u64 target, struct jit_ctx *ctx)
{
- u8 tmp = bpf2a64[TMP_REG_1];
+ u32 insn;
+ unsigned long pc;
+
+ pc = (unsigned long)&ctx->ro_image[ctx->idx];
+ insn = aarch64_insn_gen_branch_imm(pc, target, AARCH64_INSN_BRANCH_LINK);
+ emit(insn, ctx);
+}
+
+static void emit_indirect_call(u64 target, struct jit_ctx *ctx)
+{
+ u8 tmp;
+ tmp = bpf2a64[TMP_REG_1];
emit_addr_mov_i64(tmp, target, ctx);
emit(A64_BLR(tmp), ctx);
}
+static void emit_call(u64 target, struct jit_ctx *ctx)
+{
+ if (should_emit_indirect_call((long)target, ctx))
+ emit_indirect_call(target, ctx);
+ else
+ emit_direct_call(target, ctx);
+}
+
static inline int bpf2a64_offset(int bpf_insn, int off,
const struct jit_ctx *ctx)
{
@@ -273,21 +308,143 @@ static bool is_lsi_offset(int offset, int scale)
return true;
}
-/* generated prologue:
+/* generated main prog prologue:
* bti c // if CONFIG_ARM64_BTI_KERNEL
* mov x9, lr
* nop // POKE_OFFSET
* paciasp // if CONFIG_ARM64_PTR_AUTH_KERNEL
* stp x29, lr, [sp, #-16]!
* mov x29, sp
- * stp x19, x20, [sp, #-16]!
- * stp x21, x22, [sp, #-16]!
- * stp x25, x26, [sp, #-16]!
- * stp x27, x28, [sp, #-16]!
- * mov x25, sp
- * mov tcc, #0
+ * stp xzr, x26, [sp, #-16]!
+ * mov x26, sp
* // PROLOGUE_OFFSET
+ * // save callee-saved registers
*/
+static void prepare_bpf_tail_call_cnt(struct jit_ctx *ctx)
+{
+ const bool is_main_prog = !bpf_is_subprog(ctx->prog);
+ const u8 ptr = bpf2a64[TCCNT_PTR];
+
+ if (is_main_prog) {
+ /* Initialize tail_call_cnt. */
+ emit(A64_PUSH(A64_ZR, ptr, A64_SP), ctx);
+ emit(A64_MOV(1, ptr, A64_SP), ctx);
+ } else
+ emit(A64_PUSH(ptr, ptr, A64_SP), ctx);
+}
+
+static void find_used_callee_regs(struct jit_ctx *ctx)
+{
+ int i;
+ const struct bpf_prog *prog = ctx->prog;
+ const struct bpf_insn *insn = &prog->insnsi[0];
+ int reg_used = 0;
+
+ for (i = 0; i < prog->len; i++, insn++) {
+ if (insn->dst_reg == BPF_REG_6 || insn->src_reg == BPF_REG_6)
+ reg_used |= 1;
+
+ if (insn->dst_reg == BPF_REG_7 || insn->src_reg == BPF_REG_7)
+ reg_used |= 2;
+
+ if (insn->dst_reg == BPF_REG_8 || insn->src_reg == BPF_REG_8)
+ reg_used |= 4;
+
+ if (insn->dst_reg == BPF_REG_9 || insn->src_reg == BPF_REG_9)
+ reg_used |= 8;
+
+ if (insn->dst_reg == BPF_REG_FP || insn->src_reg == BPF_REG_FP) {
+ ctx->fp_used = true;
+ reg_used |= 16;
+ }
+ }
+
+ i = 0;
+ if (reg_used & 1)
+ ctx->used_callee_reg[i++] = bpf2a64[BPF_REG_6];
+
+ if (reg_used & 2)
+ ctx->used_callee_reg[i++] = bpf2a64[BPF_REG_7];
+
+ if (reg_used & 4)
+ ctx->used_callee_reg[i++] = bpf2a64[BPF_REG_8];
+
+ if (reg_used & 8)
+ ctx->used_callee_reg[i++] = bpf2a64[BPF_REG_9];
+
+ if (reg_used & 16)
+ ctx->used_callee_reg[i++] = bpf2a64[BPF_REG_FP];
+
+ if (ctx->arena_vm_start)
+ ctx->used_callee_reg[i++] = bpf2a64[ARENA_VM_START];
+
+ ctx->nr_used_callee_reg = i;
+}
+
+/* Save callee-saved registers */
+static void push_callee_regs(struct jit_ctx *ctx)
+{
+ int reg1, reg2, i;
+
+ /*
+ * Program acting as exception boundary should save all ARM64
+ * Callee-saved registers as the exception callback needs to recover
+ * all ARM64 Callee-saved registers in its epilogue.
+ */
+ if (ctx->prog->aux->exception_boundary) {
+ emit(A64_PUSH(A64_R(19), A64_R(20), A64_SP), ctx);
+ emit(A64_PUSH(A64_R(21), A64_R(22), A64_SP), ctx);
+ emit(A64_PUSH(A64_R(23), A64_R(24), A64_SP), ctx);
+ emit(A64_PUSH(A64_R(25), A64_R(26), A64_SP), ctx);
+ emit(A64_PUSH(A64_R(27), A64_R(28), A64_SP), ctx);
+ } else {
+ find_used_callee_regs(ctx);
+ for (i = 0; i + 1 < ctx->nr_used_callee_reg; i += 2) {
+ reg1 = ctx->used_callee_reg[i];
+ reg2 = ctx->used_callee_reg[i + 1];
+ emit(A64_PUSH(reg1, reg2, A64_SP), ctx);
+ }
+ if (i < ctx->nr_used_callee_reg) {
+ reg1 = ctx->used_callee_reg[i];
+ /* keep SP 16-byte aligned */
+ emit(A64_PUSH(reg1, A64_ZR, A64_SP), ctx);
+ }
+ }
+}
+
+/* Restore callee-saved registers */
+static void pop_callee_regs(struct jit_ctx *ctx)
+{
+ struct bpf_prog_aux *aux = ctx->prog->aux;
+ int reg1, reg2, i;
+
+ /*
+ * Program acting as exception boundary pushes R23 and R24 in addition
+ * to BPF callee-saved registers. Exception callback uses the boundary
+ * program's stack frame, so recover these extra registers in the above
+ * two cases.
+ */
+ if (aux->exception_boundary || aux->exception_cb) {
+ emit(A64_POP(A64_R(27), A64_R(28), A64_SP), ctx);
+ emit(A64_POP(A64_R(25), A64_R(26), A64_SP), ctx);
+ emit(A64_POP(A64_R(23), A64_R(24), A64_SP), ctx);
+ emit(A64_POP(A64_R(21), A64_R(22), A64_SP), ctx);
+ emit(A64_POP(A64_R(19), A64_R(20), A64_SP), ctx);
+ } else {
+ i = ctx->nr_used_callee_reg - 1;
+ if (ctx->nr_used_callee_reg % 2 != 0) {
+ reg1 = ctx->used_callee_reg[i];
+ emit(A64_POP(reg1, A64_ZR, A64_SP), ctx);
+ i--;
+ }
+ while (i > 0) {
+ reg1 = ctx->used_callee_reg[i - 1];
+ reg2 = ctx->used_callee_reg[i];
+ emit(A64_POP(reg1, reg2, A64_SP), ctx);
+ i -= 2;
+ }
+ }
+}
#define BTI_INSNS (IS_ENABLED(CONFIG_ARM64_BTI_KERNEL) ? 1 : 0)
#define PAC_INSNS (IS_ENABLED(CONFIG_ARM64_PTR_AUTH_KERNEL) ? 1 : 0)
@@ -296,20 +453,13 @@ static bool is_lsi_offset(int offset, int scale)
#define POKE_OFFSET (BTI_INSNS + 1)
/* Tail call offset to jump into */
-#define PROLOGUE_OFFSET (BTI_INSNS + 2 + PAC_INSNS + 8)
+#define PROLOGUE_OFFSET (BTI_INSNS + 2 + PAC_INSNS + 4)
-static int build_prologue(struct jit_ctx *ctx, bool ebpf_from_cbpf,
- bool is_exception_cb, u64 arena_vm_start)
+static int build_prologue(struct jit_ctx *ctx, bool ebpf_from_cbpf)
{
const struct bpf_prog *prog = ctx->prog;
const bool is_main_prog = !bpf_is_subprog(prog);
- const u8 r6 = bpf2a64[BPF_REG_6];
- const u8 r7 = bpf2a64[BPF_REG_7];
- const u8 r8 = bpf2a64[BPF_REG_8];
- const u8 r9 = bpf2a64[BPF_REG_9];
const u8 fp = bpf2a64[BPF_REG_FP];
- const u8 tcc = bpf2a64[TCALL_CNT];
- const u8 fpb = bpf2a64[FP_BOTTOM];
const u8 arena_vm_base = bpf2a64[ARENA_VM_START];
const int idx0 = ctx->idx;
int cur_offset;
@@ -348,19 +498,28 @@ static int build_prologue(struct jit_ctx *ctx, bool ebpf_from_cbpf,
emit(A64_MOV(1, A64_R(9), A64_LR), ctx);
emit(A64_NOP, ctx);
- if (!is_exception_cb) {
+ if (!prog->aux->exception_cb) {
/* Sign lr */
if (IS_ENABLED(CONFIG_ARM64_PTR_AUTH_KERNEL))
emit(A64_PACIASP, ctx);
+
/* Save FP and LR registers to stay align with ARM64 AAPCS */
emit(A64_PUSH(A64_FP, A64_LR, A64_SP), ctx);
emit(A64_MOV(1, A64_FP, A64_SP), ctx);
- /* Save callee-saved registers */
- emit(A64_PUSH(r6, r7, A64_SP), ctx);
- emit(A64_PUSH(r8, r9, A64_SP), ctx);
- emit(A64_PUSH(fp, tcc, A64_SP), ctx);
- emit(A64_PUSH(fpb, A64_R(28), A64_SP), ctx);
+ prepare_bpf_tail_call_cnt(ctx);
+
+ if (!ebpf_from_cbpf && is_main_prog) {
+ cur_offset = ctx->idx - idx0;
+ if (cur_offset != PROLOGUE_OFFSET) {
+ pr_err_once("PROLOGUE_OFFSET = %d, expected %d!\n",
+ cur_offset, PROLOGUE_OFFSET);
+ return -1;
+ }
+ /* BTI landing pad for the tail call, done with a BR */
+ emit_bti(A64_BTI_J, ctx);
+ }
+ push_callee_regs(ctx);
} else {
/*
* Exception callback receives FP of Main Program as third
@@ -372,58 +531,28 @@ static int build_prologue(struct jit_ctx *ctx, bool ebpf_from_cbpf,
* callee-saved registers. The exception callback will not push
* anything and re-use the main program's stack.
*
- * 10 registers are on the stack
+ * 12 registers are on the stack
*/
- emit(A64_SUB_I(1, A64_SP, A64_FP, 80), ctx);
+ emit(A64_SUB_I(1, A64_SP, A64_FP, 96), ctx);
}
- /* Set up BPF prog stack base register */
- emit(A64_MOV(1, fp, A64_SP), ctx);
-
- if (!ebpf_from_cbpf && is_main_prog) {
- /* Initialize tail_call_cnt */
- emit(A64_MOVZ(1, tcc, 0, 0), ctx);
-
- cur_offset = ctx->idx - idx0;
- if (cur_offset != PROLOGUE_OFFSET) {
- pr_err_once("PROLOGUE_OFFSET = %d, expected %d!\n",
- cur_offset, PROLOGUE_OFFSET);
- return -1;
- }
-
- /* BTI landing pad for the tail call, done with a BR */
- emit_bti(A64_BTI_J, ctx);
- }
-
- /*
- * Program acting as exception boundary should save all ARM64
- * Callee-saved registers as the exception callback needs to recover
- * all ARM64 Callee-saved registers in its epilogue.
- */
- if (prog->aux->exception_boundary) {
- /*
- * As we are pushing two more registers, BPF_FP should be moved
- * 16 bytes
- */
- emit(A64_SUB_I(1, fp, fp, 16), ctx);
- emit(A64_PUSH(A64_R(23), A64_R(24), A64_SP), ctx);
- }
-
- emit(A64_SUB_I(1, fpb, fp, ctx->fpb_offset), ctx);
+ if (ctx->fp_used)
+ /* Set up BPF prog stack base register */
+ emit(A64_MOV(1, fp, A64_SP), ctx);
/* Stack must be multiples of 16B */
ctx->stack_size = round_up(prog->aux->stack_depth, 16);
/* Set up function call stack */
- emit(A64_SUB_I(1, A64_SP, A64_SP, ctx->stack_size), ctx);
+ if (ctx->stack_size)
+ emit(A64_SUB_I(1, A64_SP, A64_SP, ctx->stack_size), ctx);
- if (arena_vm_start)
- emit_a64_mov_i64(arena_vm_base, arena_vm_start, ctx);
+ if (ctx->arena_vm_start)
+ emit_a64_mov_i64(arena_vm_base, ctx->arena_vm_start, ctx);
return 0;
}
-static int out_offset = -1; /* initialized on the first pass of build_body() */
static int emit_bpf_tail_call(struct jit_ctx *ctx)
{
/* bpf_tail_call(void *prog_ctx, struct bpf_array *array, u64 index) */
@@ -432,11 +561,12 @@ static int emit_bpf_tail_call(struct jit_ctx *ctx)
const u8 tmp = bpf2a64[TMP_REG_1];
const u8 prg = bpf2a64[TMP_REG_2];
- const u8 tcc = bpf2a64[TCALL_CNT];
- const int idx0 = ctx->idx;
-#define cur_offset (ctx->idx - idx0)
-#define jmp_offset (out_offset - (cur_offset))
+ const u8 tcc = bpf2a64[TMP_REG_3];
+ const u8 ptr = bpf2a64[TCCNT_PTR];
size_t off;
+ __le32 *branch1 = NULL;
+ __le32 *branch2 = NULL;
+ __le32 *branch3 = NULL;
/* if (index >= array->map.max_entries)
* goto out;
@@ -446,16 +576,20 @@ static int emit_bpf_tail_call(struct jit_ctx *ctx)
emit(A64_LDR32(tmp, r2, tmp), ctx);
emit(A64_MOV(0, r3, r3), ctx);
emit(A64_CMP(0, r3, tmp), ctx);
- emit(A64_B_(A64_COND_CS, jmp_offset), ctx);
+ branch1 = ctx->image + ctx->idx;
+ emit(A64_NOP, ctx);
/*
- * if (tail_call_cnt >= MAX_TAIL_CALL_CNT)
+ * if ((*tail_call_cnt_ptr) >= MAX_TAIL_CALL_CNT)
* goto out;
- * tail_call_cnt++;
*/
emit_a64_mov_i64(tmp, MAX_TAIL_CALL_CNT, ctx);
+ emit(A64_LDR64I(tcc, ptr, 0), ctx);
emit(A64_CMP(1, tcc, tmp), ctx);
- emit(A64_B_(A64_COND_CS, jmp_offset), ctx);
+ branch2 = ctx->image + ctx->idx;
+ emit(A64_NOP, ctx);
+
+ /* (*tail_call_cnt_ptr)++; */
emit(A64_ADD_I(1, tcc, tcc, 1), ctx);
/* prog = array->ptrs[index];
@@ -467,27 +601,37 @@ static int emit_bpf_tail_call(struct jit_ctx *ctx)
emit(A64_ADD(1, tmp, r2, tmp), ctx);
emit(A64_LSL(1, prg, r3, 3), ctx);
emit(A64_LDR64(prg, tmp, prg), ctx);
- emit(A64_CBZ(1, prg, jmp_offset), ctx);
+ branch3 = ctx->image + ctx->idx;
+ emit(A64_NOP, ctx);
+
+ /* Update tail_call_cnt if the slot is populated. */
+ emit(A64_STR64I(tcc, ptr, 0), ctx);
+
+ /* restore SP */
+ if (ctx->stack_size)
+ emit(A64_ADD_I(1, A64_SP, A64_SP, ctx->stack_size), ctx);
+
+ pop_callee_regs(ctx);
/* goto *(prog->bpf_func + prologue_offset); */
off = offsetof(struct bpf_prog, bpf_func);
emit_a64_mov_i64(tmp, off, ctx);
emit(A64_LDR64(tmp, prg, tmp), ctx);
emit(A64_ADD_I(1, tmp, tmp, sizeof(u32) * PROLOGUE_OFFSET), ctx);
- emit(A64_ADD_I(1, A64_SP, A64_SP, ctx->stack_size), ctx);
emit(A64_BR(tmp), ctx);
- /* out: */
- if (out_offset == -1)
- out_offset = cur_offset;
- if (cur_offset != out_offset) {
- pr_err_once("tail_call out_offset = %d, expected %d!\n",
- cur_offset, out_offset);
- return -1;
+ if (ctx->image) {
+ off = &ctx->image[ctx->idx] - branch1;
+ *branch1 = cpu_to_le32(A64_B_(A64_COND_CS, off));
+
+ off = &ctx->image[ctx->idx] - branch2;
+ *branch2 = cpu_to_le32(A64_B_(A64_COND_CS, off));
+
+ off = &ctx->image[ctx->idx] - branch3;
+ *branch3 = cpu_to_le32(A64_CBZ(1, prg, off));
}
+
return 0;
-#undef cur_offset
-#undef jmp_offset
}
#ifdef CONFIG_ARM64_LSE_ATOMICS
@@ -713,36 +857,18 @@ static void build_plt(struct jit_ctx *ctx)
plt->target = (u64)&dummy_tramp;
}
-static void build_epilogue(struct jit_ctx *ctx, bool is_exception_cb)
+static void build_epilogue(struct jit_ctx *ctx)
{
const u8 r0 = bpf2a64[BPF_REG_0];
- const u8 r6 = bpf2a64[BPF_REG_6];
- const u8 r7 = bpf2a64[BPF_REG_7];
- const u8 r8 = bpf2a64[BPF_REG_8];
- const u8 r9 = bpf2a64[BPF_REG_9];
- const u8 fp = bpf2a64[BPF_REG_FP];
- const u8 fpb = bpf2a64[FP_BOTTOM];
+ const u8 ptr = bpf2a64[TCCNT_PTR];
/* We're done with BPF stack */
- emit(A64_ADD_I(1, A64_SP, A64_SP, ctx->stack_size), ctx);
+ if (ctx->stack_size)
+ emit(A64_ADD_I(1, A64_SP, A64_SP, ctx->stack_size), ctx);
- /*
- * Program acting as exception boundary pushes R23 and R24 in addition
- * to BPF callee-saved registers. Exception callback uses the boundary
- * program's stack frame, so recover these extra registers in the above
- * two cases.
- */
- if (ctx->prog->aux->exception_boundary || is_exception_cb)
- emit(A64_POP(A64_R(23), A64_R(24), A64_SP), ctx);
+ pop_callee_regs(ctx);
- /* Restore x27 and x28 */
- emit(A64_POP(fpb, A64_R(28), A64_SP), ctx);
- /* Restore fs (x25) and x26 */
- emit(A64_POP(fp, A64_R(26), A64_SP), ctx);
-
- /* Restore callee-saved register */
- emit(A64_POP(r8, r9, A64_SP), ctx);
- emit(A64_POP(r6, r7, A64_SP), ctx);
+ emit(A64_POP(A64_ZR, ptr, A64_SP), ctx);
/* Restore FP/LR registers */
emit(A64_POP(A64_FP, A64_LR, A64_SP), ctx);
@@ -862,7 +988,6 @@ static int build_insn(const struct bpf_insn *insn, struct jit_ctx *ctx,
const u8 tmp = bpf2a64[TMP_REG_1];
const u8 tmp2 = bpf2a64[TMP_REG_2];
const u8 fp = bpf2a64[BPF_REG_FP];
- const u8 fpb = bpf2a64[FP_BOTTOM];
const u8 arena_vm_base = bpf2a64[ARENA_VM_START];
const s16 off = insn->off;
const s32 imm = insn->imm;
@@ -1314,9 +1439,9 @@ emit_cond_jmp:
emit(A64_ADD(1, tmp2, src, arena_vm_base), ctx);
src = tmp2;
}
- if (ctx->fpb_offset > 0 && src == fp && BPF_MODE(insn->code) != BPF_PROBE_MEM32) {
- src_adj = fpb;
- off_adj = off + ctx->fpb_offset;
+ if (src == fp) {
+ src_adj = A64_SP;
+ off_adj = off + ctx->stack_size;
} else {
src_adj = src;
off_adj = off;
@@ -1407,9 +1532,9 @@ emit_cond_jmp:
emit(A64_ADD(1, tmp2, dst, arena_vm_base), ctx);
dst = tmp2;
}
- if (ctx->fpb_offset > 0 && dst == fp && BPF_MODE(insn->code) != BPF_PROBE_MEM32) {
- dst_adj = fpb;
- off_adj = off + ctx->fpb_offset;
+ if (dst == fp) {
+ dst_adj = A64_SP;
+ off_adj = off + ctx->stack_size;
} else {
dst_adj = dst;
off_adj = off;
@@ -1469,9 +1594,9 @@ emit_cond_jmp:
emit(A64_ADD(1, tmp2, dst, arena_vm_base), ctx);
dst = tmp2;
}
- if (ctx->fpb_offset > 0 && dst == fp && BPF_MODE(insn->code) != BPF_PROBE_MEM32) {
- dst_adj = fpb;
- off_adj = off + ctx->fpb_offset;
+ if (dst == fp) {
+ dst_adj = A64_SP;
+ off_adj = off + ctx->stack_size;
} else {
dst_adj = dst;
off_adj = off;
@@ -1540,79 +1665,6 @@ emit_cond_jmp:
return 0;
}
-/*
- * Return 0 if FP may change at runtime, otherwise find the minimum negative
- * offset to FP, converts it to positive number, and align down to 8 bytes.
- */
-static int find_fpb_offset(struct bpf_prog *prog)
-{
- int i;
- int offset = 0;
-
- for (i = 0; i < prog->len; i++) {
- const struct bpf_insn *insn = &prog->insnsi[i];
- const u8 class = BPF_CLASS(insn->code);
- const u8 mode = BPF_MODE(insn->code);
- const u8 src = insn->src_reg;
- const u8 dst = insn->dst_reg;
- const s32 imm = insn->imm;
- const s16 off = insn->off;
-
- switch (class) {
- case BPF_STX:
- case BPF_ST:
- /* fp holds atomic operation result */
- if (class == BPF_STX && mode == BPF_ATOMIC &&
- ((imm == BPF_XCHG ||
- imm == (BPF_FETCH | BPF_ADD) ||
- imm == (BPF_FETCH | BPF_AND) ||
- imm == (BPF_FETCH | BPF_XOR) ||
- imm == (BPF_FETCH | BPF_OR)) &&
- src == BPF_REG_FP))
- return 0;
-
- if (mode == BPF_MEM && dst == BPF_REG_FP &&
- off < offset)
- offset = insn->off;
- break;
-
- case BPF_JMP32:
- case BPF_JMP:
- break;
-
- case BPF_LDX:
- case BPF_LD:
- /* fp holds load result */
- if (dst == BPF_REG_FP)
- return 0;
-
- if (class == BPF_LDX && mode == BPF_MEM &&
- src == BPF_REG_FP && off < offset)
- offset = off;
- break;
-
- case BPF_ALU:
- case BPF_ALU64:
- default:
- /* fp holds ALU result */
- if (dst == BPF_REG_FP)
- return 0;
- }
- }
-
- if (offset < 0) {
- /*
- * safely be converted to a positive 'int', since insn->off
- * is 's16'
- */
- offset = -offset;
- /* align down to 8 bytes */
- offset = ALIGN_DOWN(offset, 8);
- }
-
- return offset;
-}
-
static int build_body(struct jit_ctx *ctx, bool extra_pass)
{
const struct bpf_prog *prog = ctx->prog;
@@ -1631,13 +1683,11 @@ static int build_body(struct jit_ctx *ctx, bool extra_pass)
const struct bpf_insn *insn = &prog->insnsi[i];
int ret;
- if (ctx->image == NULL)
- ctx->offset[i] = ctx->idx;
+ ctx->offset[i] = ctx->idx;
ret = build_insn(insn, ctx, extra_pass);
if (ret > 0) {
i++;
- if (ctx->image == NULL)
- ctx->offset[i] = ctx->idx;
+ ctx->offset[i] = ctx->idx;
continue;
}
if (ret)
@@ -1648,8 +1698,7 @@ static int build_body(struct jit_ctx *ctx, bool extra_pass)
* the last element with the offset after the last
* instruction (end of program)
*/
- if (ctx->image == NULL)
- ctx->offset[i] = ctx->idx;
+ ctx->offset[i] = ctx->idx;
return 0;
}
@@ -1701,9 +1750,10 @@ struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *prog)
bool tmp_blinded = false;
bool extra_pass = false;
struct jit_ctx ctx;
- u64 arena_vm_start;
u8 *image_ptr;
u8 *ro_image_ptr;
+ int body_idx;
+ int exentry_idx;
if (!prog->jit_requested)
return orig_prog;
@@ -1719,7 +1769,6 @@ struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *prog)
prog = tmp;
}
- arena_vm_start = bpf_arena_get_kern_vm_start(prog->aux->arena);
jit_data = prog->aux->jit_data;
if (!jit_data) {
jit_data = kzalloc(sizeof(*jit_data), GFP_KERNEL);
@@ -1749,17 +1798,15 @@ struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *prog)
goto out_off;
}
- ctx.fpb_offset = find_fpb_offset(prog);
ctx.user_vm_start = bpf_arena_get_user_vm_start(prog->aux->arena);
+ ctx.arena_vm_start = bpf_arena_get_kern_vm_start(prog->aux->arena);
- /*
- * 1. Initial fake pass to compute ctx->idx and ctx->offset.
+ /* Pass 1: Estimate the maximum image size.
*
* BPF line info needs ctx->offset[i] to be the offset of
* instruction[i] in jited image, so build prologue first.
*/
- if (build_prologue(&ctx, was_classic, prog->aux->exception_cb,
- arena_vm_start)) {
+ if (build_prologue(&ctx, was_classic)) {
prog = orig_prog;
goto out_off;
}
@@ -1770,14 +1817,14 @@ struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *prog)
}
ctx.epilogue_offset = ctx.idx;
- build_epilogue(&ctx, prog->aux->exception_cb);
+ build_epilogue(&ctx);
build_plt(&ctx);
extable_align = __alignof__(struct exception_table_entry);
extable_size = prog->aux->num_exentries *
sizeof(struct exception_table_entry);
- /* Now we know the actual image size. */
+ /* Now we know the maximum image size. */
prog_size = sizeof(u32) * ctx.idx;
/* also allocate space for plt target */
extable_offset = round_up(prog_size + PLT_TARGET_SIZE, extable_align);
@@ -1790,7 +1837,7 @@ struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *prog)
goto out_off;
}
- /* 2. Now, the actual pass. */
+ /* Pass 2: Determine jited position and result for each instruction */
/*
* Use the image(RW) for writing the JITed instructions. But also save
@@ -1806,30 +1853,56 @@ struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *prog)
skip_init_ctx:
ctx.idx = 0;
ctx.exentry_idx = 0;
+ ctx.write = true;
+
+ build_prologue(&ctx, was_classic);
- build_prologue(&ctx, was_classic, prog->aux->exception_cb, arena_vm_start);
+ /* Record exentry_idx and body_idx before first build_body */
+ exentry_idx = ctx.exentry_idx;
+ body_idx = ctx.idx;
+ /* Dont write body instructions to memory for now */
+ ctx.write = false;
if (build_body(&ctx, extra_pass)) {
prog = orig_prog;
goto out_free_hdr;
}
- build_epilogue(&ctx, prog->aux->exception_cb);
+ ctx.epilogue_offset = ctx.idx;
+ ctx.exentry_idx = exentry_idx;
+ ctx.idx = body_idx;
+ ctx.write = true;
+
+ /* Pass 3: Adjust jump offset and write final image */
+ if (build_body(&ctx, extra_pass) ||
+ WARN_ON_ONCE(ctx.idx != ctx.epilogue_offset)) {
+ prog = orig_prog;
+ goto out_free_hdr;
+ }
+
+ build_epilogue(&ctx);
build_plt(&ctx);
- /* 3. Extra pass to validate JITed code. */
+ /* Extra pass to validate JITed code. */
if (validate_ctx(&ctx)) {
prog = orig_prog;
goto out_free_hdr;
}
+ /* update the real prog size */
+ prog_size = sizeof(u32) * ctx.idx;
+
/* And we're done. */
if (bpf_jit_enable > 1)
bpf_jit_dump(prog->len, prog_size, 2, ctx.image);
if (!prog->is_func || extra_pass) {
- if (extra_pass && ctx.idx != jit_data->ctx.idx) {
- pr_err_once("multi-func JIT bug %d != %d\n",
+ /* The jited image may shrink since the jited result for
+ * BPF_CALL to subprog may be changed from indirect call
+ * to direct call.
+ */
+ if (extra_pass && ctx.idx > jit_data->ctx.idx) {
+ pr_err_once("multi-func JIT bug %d > %d\n",
ctx.idx, jit_data->ctx.idx);
prog->bpf_func = NULL;
prog->jited = 0;
@@ -2300,6 +2373,7 @@ int arch_prepare_bpf_trampoline(struct bpf_tramp_image *im, void *ro_image,
.image = image,
.ro_image = ro_image,
.idx = 0,
+ .write = true,
};
nregs = btf_func_model_nregs(m);
diff --git a/arch/csky/abiv1/mmap.c b/arch/csky/abiv1/mmap.c
index 7f826331d409..1047865e82a9 100644
--- a/arch/csky/abiv1/mmap.c
+++ b/arch/csky/abiv1/mmap.c
@@ -23,7 +23,8 @@
*/
unsigned long
arch_get_unmapped_area(struct file *filp, unsigned long addr,
- unsigned long len, unsigned long pgoff, unsigned long flags)
+ unsigned long len, unsigned long pgoff,
+ unsigned long flags, vm_flags_t vm_flags)
{
struct mm_struct *mm = current->mm;
struct vm_area_struct *vma;
diff --git a/arch/csky/kernel/vdso.c b/arch/csky/kernel/vdso.c
index 2ca886e4a458..5c9ef63c29f1 100644
--- a/arch/csky/kernel/vdso.c
+++ b/arch/csky/kernel/vdso.c
@@ -45,9 +45,16 @@ arch_initcall(vdso_init);
int arch_setup_additional_pages(struct linux_binprm *bprm,
int uses_interp)
{
+ struct vm_area_struct *vma;
struct mm_struct *mm = current->mm;
unsigned long vdso_base, vdso_len;
int ret;
+ static struct vm_special_mapping vdso_mapping = {
+ .name = "[vdso]",
+ };
+ static struct vm_special_mapping vvar_mapping = {
+ .name = "[vvar]",
+ };
vdso_len = (vdso_pages + 1) << PAGE_SHIFT;
@@ -65,22 +72,29 @@ int arch_setup_additional_pages(struct linux_binprm *bprm,
*/
mm->context.vdso = (void *)vdso_base;
- ret =
- install_special_mapping(mm, vdso_base, vdso_pages << PAGE_SHIFT,
+ vdso_mapping.pages = vdso_pagelist;
+ vma =
+ _install_special_mapping(mm, vdso_base, vdso_pages << PAGE_SHIFT,
(VM_READ | VM_EXEC | VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC),
- vdso_pagelist);
+ &vdso_mapping);
- if (unlikely(ret)) {
+ if (IS_ERR(vma)) {
+ ret = PTR_ERR(vma);
mm->context.vdso = NULL;
goto end;
}
vdso_base += (vdso_pages << PAGE_SHIFT);
- ret = install_special_mapping(mm, vdso_base, PAGE_SIZE,
- (VM_READ | VM_MAYREAD), &vdso_pagelist[vdso_pages]);
+ vvar_mapping.pages = &vdso_pagelist[vdso_pages];
+ vma = _install_special_mapping(mm, vdso_base, PAGE_SIZE,
+ (VM_READ | VM_MAYREAD), &vvar_mapping);
- if (unlikely(ret))
+ if (IS_ERR(vma)) {
+ ret = PTR_ERR(vma);
mm->context.vdso = NULL;
+ goto end;
+ }
+ ret = 0;
end:
mmap_write_unlock(mm);
return ret;
diff --git a/arch/hexagon/kernel/vdso.c b/arch/hexagon/kernel/vdso.c
index 2e4872d62124..8119084dc519 100644
--- a/arch/hexagon/kernel/vdso.c
+++ b/arch/hexagon/kernel/vdso.c
@@ -51,7 +51,11 @@ int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
{
int ret;
unsigned long vdso_base;
+ struct vm_area_struct *vma;
struct mm_struct *mm = current->mm;
+ static struct vm_special_mapping vdso_mapping = {
+ .name = "[vdso]",
+ };
if (mmap_write_lock_killable(mm))
return -EINTR;
@@ -66,16 +70,18 @@ int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
}
/* MAYWRITE to allow gdb to COW and set breakpoints. */
- ret = install_special_mapping(mm, vdso_base, PAGE_SIZE,
+ vdso_mapping.pages = &vdso_page;
+ vma = _install_special_mapping(mm, vdso_base, PAGE_SIZE,
VM_READ|VM_EXEC|
VM_MAYREAD|VM_MAYWRITE|VM_MAYEXEC,
- &vdso_page);
+ &vdso_mapping);
- if (ret)
+ ret = PTR_ERR(vma);
+ if (IS_ERR(vma))
goto up_fail;
mm->context.vdso = (void *)vdso_base;
-
+ ret = 0;
up_fail:
mmap_write_unlock(mm);
return ret;
diff --git a/arch/loongarch/Kconfig b/arch/loongarch/Kconfig
index 0eb0436ad4ce..bb35c34f86d2 100644
--- a/arch/loongarch/Kconfig
+++ b/arch/loongarch/Kconfig
@@ -25,6 +25,8 @@ config LOONGARCH
select ARCH_HAS_NON_OVERLAPPING_ADDRESS_SPACE
select ARCH_HAS_PTE_DEVMAP
select ARCH_HAS_PTE_SPECIAL
+ select ARCH_HAS_SET_MEMORY
+ select ARCH_HAS_SET_DIRECT_MAP
select ARCH_HAS_TICK_BROADCAST if GENERIC_CLOCKEVENTS_BROADCAST
select ARCH_INLINE_READ_LOCK if !PREEMPTION
select ARCH_INLINE_READ_LOCK_BH if !PREEMPTION
@@ -82,6 +84,7 @@ config LOONGARCH
select GENERIC_CMOS_UPDATE
select GENERIC_CPU_AUTOPROBE
select GENERIC_CPU_DEVICES
+ select GENERIC_CPU_VULNERABILITIES
select GENERIC_ENTRY
select GENERIC_GETTIMEOFDAY
select GENERIC_IOREMAP if !ARCH_IOREMAP
@@ -147,7 +150,7 @@ config LOONGARCH
select HAVE_LIVEPATCH
select HAVE_MOD_ARCH_SPECIFIC
select HAVE_NMI
- select HAVE_OBJTOOL if AS_HAS_EXPLICIT_RELOCS && AS_HAS_THIN_ADD_SUB && !CC_IS_CLANG
+ select HAVE_OBJTOOL if AS_HAS_EXPLICIT_RELOCS && AS_HAS_THIN_ADD_SUB
select HAVE_PCI
select HAVE_PERF_EVENTS
select HAVE_PERF_REGS
@@ -267,7 +270,7 @@ config AS_HAS_FCSR_CLASS
def_bool $(as-instr,movfcsr2gr \$t0$(comma)\$fcsr0)
config AS_HAS_THIN_ADD_SUB
- def_bool $(cc-option,-Wa$(comma)-mthin-add-sub)
+ def_bool $(cc-option,-Wa$(comma)-mthin-add-sub) || AS_IS_LLVM
config AS_HAS_LSX_EXTENSION
def_bool $(as-instr,vld \$vr0$(comma)\$a0$(comma)0)
diff --git a/arch/loongarch/configs/loongson3_defconfig b/arch/loongarch/configs/loongson3_defconfig
index b4252c357c8e..75b366407a60 100644
--- a/arch/loongarch/configs/loongson3_defconfig
+++ b/arch/loongarch/configs/loongson3_defconfig
@@ -96,7 +96,6 @@ CONFIG_ZPOOL=y
CONFIG_ZSWAP=y
CONFIG_ZSWAP_COMPRESSOR_DEFAULT_ZSTD=y
CONFIG_ZBUD=y
-CONFIG_Z3FOLD=y
CONFIG_ZSMALLOC=m
# CONFIG_COMPAT_BRK is not set
CONFIG_MEMORY_HOTPLUG=y
diff --git a/arch/loongarch/include/asm/Kbuild b/arch/loongarch/include/asm/Kbuild
index 4635b755b2b4..5b5a6c90e6e2 100644
--- a/arch/loongarch/include/asm/Kbuild
+++ b/arch/loongarch/include/asm/Kbuild
@@ -8,5 +8,6 @@ generic-y += early_ioremap.h
generic-y += qrwlock.h
generic-y += user.h
generic-y += ioctl.h
+generic-y += mmzone.h
generic-y += statfs.h
generic-y += param.h
diff --git a/arch/loongarch/include/asm/atomic.h b/arch/loongarch/include/asm/atomic.h
index 99af8b3160a8..c86f0ab922ec 100644
--- a/arch/loongarch/include/asm/atomic.h
+++ b/arch/loongarch/include/asm/atomic.h
@@ -15,6 +15,7 @@
#define __LL "ll.w "
#define __SC "sc.w "
#define __AMADD "amadd.w "
+#define __AMOR "amor.w "
#define __AMAND_DB "amand_db.w "
#define __AMOR_DB "amor_db.w "
#define __AMXOR_DB "amxor_db.w "
@@ -22,6 +23,7 @@
#define __LL "ll.d "
#define __SC "sc.d "
#define __AMADD "amadd.d "
+#define __AMOR "amor.d "
#define __AMAND_DB "amand_db.d "
#define __AMOR_DB "amor_db.d "
#define __AMXOR_DB "amxor_db.d "
diff --git a/arch/loongarch/include/asm/cpu-features.h b/arch/loongarch/include/asm/cpu-features.h
index 16a716f88a5c..fc83bb32f9f0 100644
--- a/arch/loongarch/include/asm/cpu-features.h
+++ b/arch/loongarch/include/asm/cpu-features.h
@@ -51,6 +51,7 @@
#define cpu_has_lbt_mips cpu_opt(LOONGARCH_CPU_LBT_MIPS)
#define cpu_has_lbt (cpu_has_lbt_x86|cpu_has_lbt_arm|cpu_has_lbt_mips)
#define cpu_has_csr cpu_opt(LOONGARCH_CPU_CSR)
+#define cpu_has_iocsr cpu_opt(LOONGARCH_CPU_IOCSR)
#define cpu_has_tlb cpu_opt(LOONGARCH_CPU_TLB)
#define cpu_has_watch cpu_opt(LOONGARCH_CPU_WATCH)
#define cpu_has_vint cpu_opt(LOONGARCH_CPU_VINT)
@@ -65,6 +66,7 @@
#define cpu_has_guestid cpu_opt(LOONGARCH_CPU_GUESTID)
#define cpu_has_hypervisor cpu_opt(LOONGARCH_CPU_HYPERVISOR)
#define cpu_has_ptw cpu_opt(LOONGARCH_CPU_PTW)
+#define cpu_has_lspw cpu_opt(LOONGARCH_CPU_LSPW)
#define cpu_has_avecint cpu_opt(LOONGARCH_CPU_AVECINT)
#endif /* __ASM_CPU_FEATURES_H */
diff --git a/arch/loongarch/include/asm/cpu.h b/arch/loongarch/include/asm/cpu.h
index 843f9c4ec980..98cf4d7b4b0a 100644
--- a/arch/loongarch/include/asm/cpu.h
+++ b/arch/loongarch/include/asm/cpu.h
@@ -87,19 +87,21 @@ enum cpu_type_enum {
#define CPU_FEATURE_LBT_MIPS 12 /* CPU has MIPS Binary Translation */
#define CPU_FEATURE_TLB 13 /* CPU has TLB */
#define CPU_FEATURE_CSR 14 /* CPU has CSR */
-#define CPU_FEATURE_WATCH 15 /* CPU has watchpoint registers */
-#define CPU_FEATURE_VINT 16 /* CPU has vectored interrupts */
-#define CPU_FEATURE_CSRIPI 17 /* CPU has CSR-IPI */
-#define CPU_FEATURE_EXTIOI 18 /* CPU has EXT-IOI */
-#define CPU_FEATURE_PREFETCH 19 /* CPU has prefetch instructions */
-#define CPU_FEATURE_PMP 20 /* CPU has perfermance counter */
-#define CPU_FEATURE_SCALEFREQ 21 /* CPU supports cpufreq scaling */
-#define CPU_FEATURE_FLATMODE 22 /* CPU has flat mode */
-#define CPU_FEATURE_EIODECODE 23 /* CPU has EXTIOI interrupt pin decode mode */
-#define CPU_FEATURE_GUESTID 24 /* CPU has GuestID feature */
-#define CPU_FEATURE_HYPERVISOR 25 /* CPU has hypervisor (running in VM) */
-#define CPU_FEATURE_PTW 26 /* CPU has hardware page table walker */
-#define CPU_FEATURE_AVECINT 27 /* CPU has avec interrupt */
+#define CPU_FEATURE_IOCSR 15 /* CPU has IOCSR */
+#define CPU_FEATURE_WATCH 16 /* CPU has watchpoint registers */
+#define CPU_FEATURE_VINT 17 /* CPU has vectored interrupts */
+#define CPU_FEATURE_CSRIPI 18 /* CPU has CSR-IPI */
+#define CPU_FEATURE_EXTIOI 19 /* CPU has EXT-IOI */
+#define CPU_FEATURE_PREFETCH 20 /* CPU has prefetch instructions */
+#define CPU_FEATURE_PMP 21 /* CPU has perfermance counter */
+#define CPU_FEATURE_SCALEFREQ 22 /* CPU supports cpufreq scaling */
+#define CPU_FEATURE_FLATMODE 23 /* CPU has flat mode */
+#define CPU_FEATURE_EIODECODE 24 /* CPU has EXTIOI interrupt pin decode mode */
+#define CPU_FEATURE_GUESTID 25 /* CPU has GuestID feature */
+#define CPU_FEATURE_HYPERVISOR 26 /* CPU has hypervisor (running in VM) */
+#define CPU_FEATURE_PTW 27 /* CPU has hardware page table walker */
+#define CPU_FEATURE_LSPW 28 /* CPU has LSPW (lddir/ldpte instructions) */
+#define CPU_FEATURE_AVECINT 29 /* CPU has AVEC interrupt */
#define LOONGARCH_CPU_CPUCFG BIT_ULL(CPU_FEATURE_CPUCFG)
#define LOONGARCH_CPU_LAM BIT_ULL(CPU_FEATURE_LAM)
@@ -115,6 +117,7 @@ enum cpu_type_enum {
#define LOONGARCH_CPU_LBT_ARM BIT_ULL(CPU_FEATURE_LBT_ARM)
#define LOONGARCH_CPU_LBT_MIPS BIT_ULL(CPU_FEATURE_LBT_MIPS)
#define LOONGARCH_CPU_TLB BIT_ULL(CPU_FEATURE_TLB)
+#define LOONGARCH_CPU_IOCSR BIT_ULL(CPU_FEATURE_IOCSR)
#define LOONGARCH_CPU_CSR BIT_ULL(CPU_FEATURE_CSR)
#define LOONGARCH_CPU_WATCH BIT_ULL(CPU_FEATURE_WATCH)
#define LOONGARCH_CPU_VINT BIT_ULL(CPU_FEATURE_VINT)
@@ -128,6 +131,7 @@ enum cpu_type_enum {
#define LOONGARCH_CPU_GUESTID BIT_ULL(CPU_FEATURE_GUESTID)
#define LOONGARCH_CPU_HYPERVISOR BIT_ULL(CPU_FEATURE_HYPERVISOR)
#define LOONGARCH_CPU_PTW BIT_ULL(CPU_FEATURE_PTW)
+#define LOONGARCH_CPU_LSPW BIT_ULL(CPU_FEATURE_LSPW)
#define LOONGARCH_CPU_AVECINT BIT_ULL(CPU_FEATURE_AVECINT)
#endif /* _ASM_CPU_H */
diff --git a/arch/loongarch/include/asm/loongarch.h b/arch/loongarch/include/asm/loongarch.h
index 04bf1a7f903a..26542413a5b0 100644
--- a/arch/loongarch/include/asm/loongarch.h
+++ b/arch/loongarch/include/asm/loongarch.h
@@ -62,6 +62,7 @@
#define LOONGARCH_CPUCFG1 0x1
#define CPUCFG1_ISGR32 BIT(0)
#define CPUCFG1_ISGR64 BIT(1)
+#define CPUCFG1_ISA GENMASK(1, 0)
#define CPUCFG1_PAGING BIT(2)
#define CPUCFG1_IOCSR BIT(3)
#define CPUCFG1_PABITS GENMASK(11, 4)
diff --git a/arch/loongarch/include/asm/mmu_context.h b/arch/loongarch/include/asm/mmu_context.h
index 9f97c3453b9c..304363bd3935 100644
--- a/arch/loongarch/include/asm/mmu_context.h
+++ b/arch/loongarch/include/asm/mmu_context.h
@@ -49,12 +49,12 @@ static inline void enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk)
/* Normal, classic get_new_mmu_context */
static inline void
-get_new_mmu_context(struct mm_struct *mm, unsigned long cpu)
+get_new_mmu_context(struct mm_struct *mm, unsigned long cpu, bool *need_flush)
{
u64 asid = asid_cache(cpu);
if (!((++asid) & cpu_asid_mask(&cpu_data[cpu])))
- local_flush_tlb_user(); /* start new asid cycle */
+ *need_flush = true; /* start new asid cycle */
cpu_context(cpu, mm) = asid_cache(cpu) = asid;
}
@@ -74,21 +74,34 @@ init_new_context(struct task_struct *tsk, struct mm_struct *mm)
return 0;
}
+static inline void atomic_update_pgd_asid(unsigned long asid, unsigned long pgdl)
+{
+ __asm__ __volatile__(
+ "csrwr %[pgdl_val], %[pgdl_reg] \n\t"
+ "csrwr %[asid_val], %[asid_reg] \n\t"
+ : [asid_val] "+r" (asid), [pgdl_val] "+r" (pgdl)
+ : [asid_reg] "i" (LOONGARCH_CSR_ASID), [pgdl_reg] "i" (LOONGARCH_CSR_PGDL)
+ : "memory"
+ );
+}
+
static inline void switch_mm_irqs_off(struct mm_struct *prev, struct mm_struct *next,
struct task_struct *tsk)
{
+ bool need_flush = false;
unsigned int cpu = smp_processor_id();
/* Check if our ASID is of an older version and thus invalid */
if (!asid_valid(next, cpu))
- get_new_mmu_context(next, cpu);
-
- write_csr_asid(cpu_asid(cpu, next));
+ get_new_mmu_context(next, cpu, &need_flush);
if (next != &init_mm)
- csr_write64((unsigned long)next->pgd, LOONGARCH_CSR_PGDL);
+ atomic_update_pgd_asid(cpu_asid(cpu, next), (unsigned long)next->pgd);
else
- csr_write64((unsigned long)invalid_pg_dir, LOONGARCH_CSR_PGDL);
+ atomic_update_pgd_asid(cpu_asid(cpu, next), (unsigned long)invalid_pg_dir);
+
+ if (need_flush)
+ local_flush_tlb_user(); /* Flush tlb after update ASID */
/*
* Mark current->active_mm as not "active" anymore.
@@ -135,9 +148,15 @@ drop_mmu_context(struct mm_struct *mm, unsigned int cpu)
asid = read_csr_asid() & cpu_asid_mask(&current_cpu_data);
if (asid == cpu_asid(cpu, mm)) {
+ bool need_flush = false;
+
if (!current->mm || (current->mm == mm)) {
- get_new_mmu_context(mm, cpu);
+ get_new_mmu_context(mm, cpu, &need_flush);
+
write_csr_asid(cpu_asid(cpu, mm));
+ if (need_flush)
+ local_flush_tlb_user(); /* Flush tlb after update ASID */
+
goto out;
}
}
diff --git a/arch/loongarch/include/asm/mmzone.h b/arch/loongarch/include/asm/mmzone.h
deleted file mode 100644
index 2b9a90727e19..000000000000
--- a/arch/loongarch/include/asm/mmzone.h
+++ /dev/null
@@ -1,16 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-/*
- * Author: Huacai Chen (chenhuacai@loongson.cn)
- * Copyright (C) 2020-2022 Loongson Technology Corporation Limited
- */
-#ifndef _ASM_MMZONE_H_
-#define _ASM_MMZONE_H_
-
-#include <asm/page.h>
-#include <asm/numa.h>
-
-extern struct pglist_data *node_data[];
-
-#define NODE_DATA(nid) (node_data[(nid)])
-
-#endif /* _ASM_MMZONE_H_ */
diff --git a/arch/loongarch/include/asm/percpu.h b/arch/loongarch/include/asm/percpu.h
index 8f290e5546cf..87be9b14e9da 100644
--- a/arch/loongarch/include/asm/percpu.h
+++ b/arch/loongarch/include/asm/percpu.h
@@ -68,75 +68,6 @@ PERCPU_OP(and, and, &)
PERCPU_OP(or, or, |)
#undef PERCPU_OP
-static __always_inline unsigned long __percpu_read(void __percpu *ptr, int size)
-{
- unsigned long ret;
-
- switch (size) {
- case 1:
- __asm__ __volatile__ ("ldx.b %[ret], $r21, %[ptr] \n"
- : [ret] "=&r"(ret)
- : [ptr] "r"(ptr)
- : "memory");
- break;
- case 2:
- __asm__ __volatile__ ("ldx.h %[ret], $r21, %[ptr] \n"
- : [ret] "=&r"(ret)
- : [ptr] "r"(ptr)
- : "memory");
- break;
- case 4:
- __asm__ __volatile__ ("ldx.w %[ret], $r21, %[ptr] \n"
- : [ret] "=&r"(ret)
- : [ptr] "r"(ptr)
- : "memory");
- break;
- case 8:
- __asm__ __volatile__ ("ldx.d %[ret], $r21, %[ptr] \n"
- : [ret] "=&r"(ret)
- : [ptr] "r"(ptr)
- : "memory");
- break;
- default:
- ret = 0;
- BUILD_BUG();
- }
-
- return ret;
-}
-
-static __always_inline void __percpu_write(void __percpu *ptr, unsigned long val, int size)
-{
- switch (size) {
- case 1:
- __asm__ __volatile__("stx.b %[val], $r21, %[ptr] \n"
- :
- : [val] "r" (val), [ptr] "r" (ptr)
- : "memory");
- break;
- case 2:
- __asm__ __volatile__("stx.h %[val], $r21, %[ptr] \n"
- :
- : [val] "r" (val), [ptr] "r" (ptr)
- : "memory");
- break;
- case 4:
- __asm__ __volatile__("stx.w %[val], $r21, %[ptr] \n"
- :
- : [val] "r" (val), [ptr] "r" (ptr)
- : "memory");
- break;
- case 8:
- __asm__ __volatile__("stx.d %[val], $r21, %[ptr] \n"
- :
- : [val] "r" (val), [ptr] "r" (ptr)
- : "memory");
- break;
- default:
- BUILD_BUG();
- }
-}
-
static __always_inline unsigned long __percpu_xchg(void *ptr, unsigned long val, int size)
{
switch (size) {
@@ -157,6 +88,33 @@ static __always_inline unsigned long __percpu_xchg(void *ptr, unsigned long val,
return 0;
}
+#define __pcpu_op_1(op) op ".b "
+#define __pcpu_op_2(op) op ".h "
+#define __pcpu_op_4(op) op ".w "
+#define __pcpu_op_8(op) op ".d "
+
+#define _percpu_read(size, _pcp) \
+({ \
+ typeof(_pcp) __pcp_ret; \
+ \
+ __asm__ __volatile__( \
+ __pcpu_op_##size("ldx") "%[ret], $r21, %[ptr] \n" \
+ : [ret] "=&r"(__pcp_ret) \
+ : [ptr] "r"(&(_pcp)) \
+ : "memory"); \
+ \
+ __pcp_ret; \
+})
+
+#define _percpu_write(size, _pcp, _val) \
+do { \
+ __asm__ __volatile__( \
+ __pcpu_op_##size("stx") "%[val], $r21, %[ptr] \n" \
+ : \
+ : [val] "r"(_val), [ptr] "r"(&(_pcp)) \
+ : "memory"); \
+} while (0)
+
/* this_cpu_cmpxchg */
#define _protect_cmpxchg_local(pcp, o, n) \
({ \
@@ -167,18 +125,6 @@ static __always_inline unsigned long __percpu_xchg(void *ptr, unsigned long val,
__ret; \
})
-#define _percpu_read(pcp) \
-({ \
- typeof(pcp) __retval; \
- __retval = (typeof(pcp))__percpu_read(&(pcp), sizeof(pcp)); \
- __retval; \
-})
-
-#define _percpu_write(pcp, val) \
-do { \
- __percpu_write(&(pcp), (unsigned long)(val), sizeof(pcp)); \
-} while (0) \
-
#define _pcp_protect(operation, pcp, val) \
({ \
typeof(pcp) __retval; \
@@ -215,15 +161,15 @@ do { \
#define this_cpu_or_4(pcp, val) _percpu_or(pcp, val)
#define this_cpu_or_8(pcp, val) _percpu_or(pcp, val)
-#define this_cpu_read_1(pcp) _percpu_read(pcp)
-#define this_cpu_read_2(pcp) _percpu_read(pcp)
-#define this_cpu_read_4(pcp) _percpu_read(pcp)
-#define this_cpu_read_8(pcp) _percpu_read(pcp)
+#define this_cpu_read_1(pcp) _percpu_read(1, pcp)
+#define this_cpu_read_2(pcp) _percpu_read(2, pcp)
+#define this_cpu_read_4(pcp) _percpu_read(4, pcp)
+#define this_cpu_read_8(pcp) _percpu_read(8, pcp)
-#define this_cpu_write_1(pcp, val) _percpu_write(pcp, val)
-#define this_cpu_write_2(pcp, val) _percpu_write(pcp, val)
-#define this_cpu_write_4(pcp, val) _percpu_write(pcp, val)
-#define this_cpu_write_8(pcp, val) _percpu_write(pcp, val)
+#define this_cpu_write_1(pcp, val) _percpu_write(1, pcp, val)
+#define this_cpu_write_2(pcp, val) _percpu_write(2, pcp, val)
+#define this_cpu_write_4(pcp, val) _percpu_write(4, pcp, val)
+#define this_cpu_write_8(pcp, val) _percpu_write(8, pcp, val)
#define this_cpu_xchg_1(pcp, val) _percpu_xchg(pcp, val)
#define this_cpu_xchg_2(pcp, val) _percpu_xchg(pcp, val)
diff --git a/arch/loongarch/include/asm/pgtable.h b/arch/loongarch/include/asm/pgtable.h
index 85431f20a14d..9965f52ef65b 100644
--- a/arch/loongarch/include/asm/pgtable.h
+++ b/arch/loongarch/include/asm/pgtable.h
@@ -331,29 +331,23 @@ static inline void set_pte(pte_t *ptep, pte_t pteval)
* Make sure the buddy is global too (if it's !none,
* it better already be global)
*/
+ if (pte_none(ptep_get(buddy))) {
#ifdef CONFIG_SMP
- /*
- * For SMP, multiple CPUs can race, so we need to do
- * this atomically.
- */
- unsigned long page_global = _PAGE_GLOBAL;
- unsigned long tmp;
-
- __asm__ __volatile__ (
- "1:" __LL "%[tmp], %[buddy] \n"
- " bnez %[tmp], 2f \n"
- " or %[tmp], %[tmp], %[global] \n"
- __SC "%[tmp], %[buddy] \n"
- " beqz %[tmp], 1b \n"
- " nop \n"
- "2: \n"
- __WEAK_LLSC_MB
- : [buddy] "+m" (buddy->pte), [tmp] "=&r" (tmp)
- : [global] "r" (page_global));
+ /*
+ * For SMP, multiple CPUs can race, so we need
+ * to do this atomically.
+ */
+ __asm__ __volatile__(
+ __AMOR "$zero, %[global], %[buddy] \n"
+ : [buddy] "+ZB" (buddy->pte)
+ : [global] "r" (_PAGE_GLOBAL)
+ : "memory");
+
+ DBAR(0b11000); /* o_wrw = 0b11000 */
#else /* !CONFIG_SMP */
- if (pte_none(ptep_get(buddy)))
WRITE_ONCE(*buddy, __pte(pte_val(ptep_get(buddy)) | _PAGE_GLOBAL));
#endif /* CONFIG_SMP */
+ }
}
}
diff --git a/arch/loongarch/include/asm/set_memory.h b/arch/loongarch/include/asm/set_memory.h
new file mode 100644
index 000000000000..d70505b6676c
--- /dev/null
+++ b/arch/loongarch/include/asm/set_memory.h
@@ -0,0 +1,21 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (C) 2024 Loongson Technology Corporation Limited
+ */
+
+#ifndef _ASM_LOONGARCH_SET_MEMORY_H
+#define _ASM_LOONGARCH_SET_MEMORY_H
+
+/*
+ * Functions to change memory attributes.
+ */
+int set_memory_x(unsigned long addr, int numpages);
+int set_memory_nx(unsigned long addr, int numpages);
+int set_memory_ro(unsigned long addr, int numpages);
+int set_memory_rw(unsigned long addr, int numpages);
+
+bool kernel_page_present(struct page *page);
+int set_direct_map_default_noflush(struct page *page);
+int set_direct_map_invalid_noflush(struct page *page);
+
+#endif /* _ASM_LOONGARCH_SET_MEMORY_H */
diff --git a/arch/loongarch/include/asm/topology.h b/arch/loongarch/include/asm/topology.h
index 66128dec0bf6..50273c9187d0 100644
--- a/arch/loongarch/include/asm/topology.h
+++ b/arch/loongarch/include/asm/topology.h
@@ -8,6 +8,7 @@
#include <linux/smp.h>
#ifdef CONFIG_NUMA
+#include <asm/numa.h>
extern cpumask_t cpus_on_node[];
diff --git a/arch/loongarch/include/uapi/asm/hwcap.h b/arch/loongarch/include/uapi/asm/hwcap.h
index 6955a7cb2c65..2b34e56cfa9e 100644
--- a/arch/loongarch/include/uapi/asm/hwcap.h
+++ b/arch/loongarch/include/uapi/asm/hwcap.h
@@ -17,5 +17,6 @@
#define HWCAP_LOONGARCH_LBT_ARM (1 << 11)
#define HWCAP_LOONGARCH_LBT_MIPS (1 << 12)
#define HWCAP_LOONGARCH_PTW (1 << 13)
+#define HWCAP_LOONGARCH_LSPW (1 << 14)
#endif /* _UAPI_ASM_HWCAP_H */
diff --git a/arch/loongarch/include/uapi/asm/sigcontext.h b/arch/loongarch/include/uapi/asm/sigcontext.h
index 6c22f616b8f1..5cd121275bac 100644
--- a/arch/loongarch/include/uapi/asm/sigcontext.h
+++ b/arch/loongarch/include/uapi/asm/sigcontext.h
@@ -9,7 +9,6 @@
#define _UAPI_ASM_SIGCONTEXT_H
#include <linux/types.h>
-#include <linux/posix_types.h>
/* FP context was used */
#define SC_USED_FP (1 << 0)
diff --git a/arch/loongarch/kernel/acpi.c b/arch/loongarch/kernel/acpi.c
index 929a497c987e..f1a74b80f22c 100644
--- a/arch/loongarch/kernel/acpi.c
+++ b/arch/loongarch/kernel/acpi.c
@@ -9,6 +9,7 @@
#include <linux/init.h>
#include <linux/acpi.h>
+#include <linux/efi-bgrt.h>
#include <linux/irq.h>
#include <linux/irqdomain.h>
#include <linux/memblock.h>
@@ -212,6 +213,9 @@ void __init acpi_boot_table_init(void)
/* Do not enable ACPI SPCR console by default */
acpi_parse_spcr(earlycon_acpi_spcr_enable, false);
+ if (IS_ENABLED(CONFIG_ACPI_BGRT))
+ acpi_table_parse(ACPI_SIG_BGRT, acpi_parse_bgrt);
+
return;
fdt_earlycon:
diff --git a/arch/loongarch/kernel/cpu-probe.c b/arch/loongarch/kernel/cpu-probe.c
index 14f0449f5452..cbce099037b2 100644
--- a/arch/loongarch/kernel/cpu-probe.c
+++ b/arch/loongarch/kernel/cpu-probe.c
@@ -91,12 +91,30 @@ static void cpu_probe_common(struct cpuinfo_loongarch *c)
unsigned int config;
unsigned long asid_mask;
- c->options = LOONGARCH_CPU_CPUCFG | LOONGARCH_CPU_CSR |
- LOONGARCH_CPU_TLB | LOONGARCH_CPU_VINT | LOONGARCH_CPU_WATCH;
+ c->options = LOONGARCH_CPU_CPUCFG | LOONGARCH_CPU_CSR | LOONGARCH_CPU_VINT;
elf_hwcap = HWCAP_LOONGARCH_CPUCFG;
config = read_cpucfg(LOONGARCH_CPUCFG1);
+
+ switch (config & CPUCFG1_ISA) {
+ case 0:
+ set_isa(c, LOONGARCH_CPU_ISA_LA32R);
+ break;
+ case 1:
+ set_isa(c, LOONGARCH_CPU_ISA_LA32S);
+ break;
+ case 2:
+ set_isa(c, LOONGARCH_CPU_ISA_LA64);
+ break;
+ default:
+ pr_warn("Warning: unknown ISA level\n");
+ }
+
+ if (config & CPUCFG1_PAGING)
+ c->options |= LOONGARCH_CPU_TLB;
+ if (config & CPUCFG1_IOCSR)
+ c->options |= LOONGARCH_CPU_IOCSR;
if (config & CPUCFG1_UAL) {
c->options |= LOONGARCH_CPU_UAL;
elf_hwcap |= HWCAP_LOONGARCH_UAL;
@@ -139,6 +157,10 @@ static void cpu_probe_common(struct cpuinfo_loongarch *c)
c->options |= LOONGARCH_CPU_PTW;
elf_hwcap |= HWCAP_LOONGARCH_PTW;
}
+ if (config & CPUCFG2_LSPW) {
+ c->options |= LOONGARCH_CPU_LSPW;
+ elf_hwcap |= HWCAP_LOONGARCH_LSPW;
+ }
if (config & CPUCFG2_LVZP) {
c->options |= LOONGARCH_CPU_LVZ;
elf_hwcap |= HWCAP_LOONGARCH_LVZ;
@@ -162,22 +184,6 @@ static void cpu_probe_common(struct cpuinfo_loongarch *c)
if (config & CPUCFG6_PMP)
c->options |= LOONGARCH_CPU_PMP;
- config = iocsr_read32(LOONGARCH_IOCSR_FEATURES);
- if (config & IOCSRF_CSRIPI)
- c->options |= LOONGARCH_CPU_CSRIPI;
- if (config & IOCSRF_EXTIOI)
- c->options |= LOONGARCH_CPU_EXTIOI;
- if (config & IOCSRF_FREQSCALE)
- c->options |= LOONGARCH_CPU_SCALEFREQ;
- if (config & IOCSRF_FLATMODE)
- c->options |= LOONGARCH_CPU_FLATMODE;
- if (config & IOCSRF_EIODECODE)
- c->options |= LOONGARCH_CPU_EIODECODE;
- if (config & IOCSRF_AVEC)
- c->options |= LOONGARCH_CPU_AVECINT;
- if (config & IOCSRF_VM)
- c->options |= LOONGARCH_CPU_HYPERVISOR;
-
config = csr_read32(LOONGARCH_CSR_ASID);
config = (config & CSR_ASID_BIT) >> CSR_ASID_BIT_SHIFT;
asid_mask = GENMASK(config - 1, 0);
@@ -210,6 +216,9 @@ static void cpu_probe_common(struct cpuinfo_loongarch *c)
default:
pr_warn("Warning: unknown TLB type\n");
}
+
+ if (get_num_brps() + get_num_wrps())
+ c->options |= LOONGARCH_CPU_WATCH;
}
#define MAX_NAME_LEN 32
@@ -220,52 +229,67 @@ static char cpu_full_name[MAX_NAME_LEN] = " - ";
static inline void cpu_probe_loongson(struct cpuinfo_loongarch *c, unsigned int cpu)
{
+ uint32_t config;
uint64_t *vendor = (void *)(&cpu_full_name[VENDOR_OFFSET]);
uint64_t *cpuname = (void *)(&cpu_full_name[CPUNAME_OFFSET]);
+ const char *core_name = "Unknown";
- if (!__cpu_full_name[cpu])
- __cpu_full_name[cpu] = cpu_full_name;
-
- *vendor = iocsr_read64(LOONGARCH_IOCSR_VENDOR);
- *cpuname = iocsr_read64(LOONGARCH_IOCSR_CPUNAME);
-
- switch (c->processor_id & PRID_SERIES_MASK) {
- case PRID_SERIES_LA132:
+ switch (BIT(fls(c->isa_level) - 1)) {
+ case LOONGARCH_CPU_ISA_LA32R:
+ case LOONGARCH_CPU_ISA_LA32S:
c->cputype = CPU_LOONGSON32;
- set_isa(c, LOONGARCH_CPU_ISA_LA32S);
__cpu_family[cpu] = "Loongson-32bit";
- pr_info("32-bit Loongson Processor probed (LA132 Core)\n");
break;
- case PRID_SERIES_LA264:
+ case LOONGARCH_CPU_ISA_LA64:
c->cputype = CPU_LOONGSON64;
- set_isa(c, LOONGARCH_CPU_ISA_LA64);
__cpu_family[cpu] = "Loongson-64bit";
- pr_info("64-bit Loongson Processor probed (LA264 Core)\n");
+ break;
+ }
+
+ switch (c->processor_id & PRID_SERIES_MASK) {
+ case PRID_SERIES_LA132:
+ core_name = "LA132";
+ break;
+ case PRID_SERIES_LA264:
+ core_name = "LA264";
break;
case PRID_SERIES_LA364:
- c->cputype = CPU_LOONGSON64;
- set_isa(c, LOONGARCH_CPU_ISA_LA64);
- __cpu_family[cpu] = "Loongson-64bit";
- pr_info("64-bit Loongson Processor probed (LA364 Core)\n");
+ core_name = "LA364";
break;
case PRID_SERIES_LA464:
- c->cputype = CPU_LOONGSON64;
- set_isa(c, LOONGARCH_CPU_ISA_LA64);
- __cpu_family[cpu] = "Loongson-64bit";
- pr_info("64-bit Loongson Processor probed (LA464 Core)\n");
+ core_name = "LA464";
break;
case PRID_SERIES_LA664:
- c->cputype = CPU_LOONGSON64;
- set_isa(c, LOONGARCH_CPU_ISA_LA64);
- __cpu_family[cpu] = "Loongson-64bit";
- pr_info("64-bit Loongson Processor probed (LA664 Core)\n");
+ core_name = "LA664";
break;
- default: /* Default to 64 bit */
- c->cputype = CPU_LOONGSON64;
- set_isa(c, LOONGARCH_CPU_ISA_LA64);
- __cpu_family[cpu] = "Loongson-64bit";
- pr_info("64-bit Loongson Processor probed (Unknown Core)\n");
}
+
+ pr_info("%s Processor probed (%s Core)\n", __cpu_family[cpu], core_name);
+
+ if (!cpu_has_iocsr)
+ return;
+
+ if (!__cpu_full_name[cpu])
+ __cpu_full_name[cpu] = cpu_full_name;
+
+ *vendor = iocsr_read64(LOONGARCH_IOCSR_VENDOR);
+ *cpuname = iocsr_read64(LOONGARCH_IOCSR_CPUNAME);
+
+ config = iocsr_read32(LOONGARCH_IOCSR_FEATURES);
+ if (config & IOCSRF_CSRIPI)
+ c->options |= LOONGARCH_CPU_CSRIPI;
+ if (config & IOCSRF_EXTIOI)
+ c->options |= LOONGARCH_CPU_EXTIOI;
+ if (config & IOCSRF_FREQSCALE)
+ c->options |= LOONGARCH_CPU_SCALEFREQ;
+ if (config & IOCSRF_FLATMODE)
+ c->options |= LOONGARCH_CPU_FLATMODE;
+ if (config & IOCSRF_EIODECODE)
+ c->options |= LOONGARCH_CPU_EIODECODE;
+ if (config & IOCSRF_AVEC)
+ c->options |= LOONGARCH_CPU_AVECINT;
+ if (config & IOCSRF_VM)
+ c->options |= LOONGARCH_CPU_HYPERVISOR;
}
#ifdef CONFIG_64BIT
diff --git a/arch/loongarch/kernel/numa.c b/arch/loongarch/kernel/numa.c
index 8fe21f868f72..84fe7f854820 100644
--- a/arch/loongarch/kernel/numa.c
+++ b/arch/loongarch/kernel/numa.c
@@ -27,10 +27,7 @@
#include <asm/time.h>
int numa_off;
-struct pglist_data *node_data[MAX_NUMNODES];
unsigned char node_distances[MAX_NUMNODES][MAX_NUMNODES];
-
-EXPORT_SYMBOL(node_data);
EXPORT_SYMBOL(node_distances);
static struct numa_meminfo numa_meminfo;
@@ -190,24 +187,6 @@ int __init numa_add_memblk(int nid, u64 start, u64 end)
return numa_add_memblk_to(nid, start, end, &numa_meminfo);
}
-static void __init alloc_node_data(int nid)
-{
- void *nd;
- unsigned long nd_pa;
- size_t nd_sz = roundup(sizeof(pg_data_t), PAGE_SIZE);
-
- nd_pa = memblock_phys_alloc_try_nid(nd_sz, SMP_CACHE_BYTES, nid);
- if (!nd_pa) {
- pr_err("Cannot find %zu Byte for node_data (initial node: %d)\n", nd_sz, nid);
- return;
- }
-
- nd = __va(nd_pa);
-
- node_data[nid] = nd;
- memset(nd, 0, sizeof(pg_data_t));
-}
-
static void __init node_mem_init(unsigned int node)
{
unsigned long start_pfn, end_pfn;
diff --git a/arch/loongarch/kernel/proc.c b/arch/loongarch/kernel/proc.c
index 0d33cbc47e51..6ce46d92f1f1 100644
--- a/arch/loongarch/kernel/proc.c
+++ b/arch/loongarch/kernel/proc.c
@@ -31,6 +31,7 @@ int proc_cpuinfo_notifier_call_chain(unsigned long val, void *v)
static int show_cpuinfo(struct seq_file *m, void *v)
{
unsigned long n = (unsigned long) v - 1;
+ unsigned int isa = cpu_data[n].isa_level;
unsigned int version = cpu_data[n].processor_id & 0xff;
unsigned int fp_version = cpu_data[n].fpu_vers;
struct proc_cpuinfo_notifier_args proc_cpuinfo_notifier_args;
@@ -64,9 +65,11 @@ static int show_cpuinfo(struct seq_file *m, void *v)
cpu_pabits + 1, cpu_vabits + 1);
seq_printf(m, "ISA\t\t\t:");
- if (cpu_has_loongarch32)
- seq_printf(m, " loongarch32");
- if (cpu_has_loongarch64)
+ if (isa & LOONGARCH_CPU_ISA_LA32R)
+ seq_printf(m, " loongarch32r");
+ if (isa & LOONGARCH_CPU_ISA_LA32S)
+ seq_printf(m, " loongarch32s");
+ if (isa & LOONGARCH_CPU_ISA_LA64)
seq_printf(m, " loongarch64");
seq_printf(m, "\n");
@@ -81,6 +84,7 @@ static int show_cpuinfo(struct seq_file *m, void *v)
if (cpu_has_complex) seq_printf(m, " complex");
if (cpu_has_crypto) seq_printf(m, " crypto");
if (cpu_has_ptw) seq_printf(m, " ptw");
+ if (cpu_has_lspw) seq_printf(m, " lspw");
if (cpu_has_lvz) seq_printf(m, " lvz");
if (cpu_has_lbt_x86) seq_printf(m, " lbt_x86");
if (cpu_has_lbt_arm) seq_printf(m, " lbt_arm");
diff --git a/arch/loongarch/kernel/syscall.c b/arch/loongarch/kernel/syscall.c
index ba5d0930a74f..168bd97540f8 100644
--- a/arch/loongarch/kernel/syscall.c
+++ b/arch/loongarch/kernel/syscall.c
@@ -79,7 +79,3 @@ void noinstr __no_stack_protector do_syscall(struct pt_regs *regs)
syscall_exit_to_user_mode(regs);
}
-
-#ifdef CONFIG_RANDOMIZE_KSTACK_OFFSET
-STACK_FRAME_NON_STANDARD(do_syscall);
-#endif
diff --git a/arch/loongarch/kvm/main.c b/arch/loongarch/kvm/main.c
index 844736b99d38..27e9b94c0a0b 100644
--- a/arch/loongarch/kvm/main.c
+++ b/arch/loongarch/kvm/main.c
@@ -261,7 +261,7 @@ long kvm_arch_dev_ioctl(struct file *filp,
return -ENOIOCTLCMD;
}
-int kvm_arch_hardware_enable(void)
+int kvm_arch_enable_virtualization_cpu(void)
{
unsigned long env, gcfg = 0;
@@ -300,7 +300,7 @@ int kvm_arch_hardware_enable(void)
return 0;
}
-void kvm_arch_hardware_disable(void)
+void kvm_arch_disable_virtualization_cpu(void)
{
write_csr_gcfg(0);
write_csr_gstat(0);
diff --git a/arch/loongarch/mm/Makefile b/arch/loongarch/mm/Makefile
index e4d1e581dbae..278be2c8fc36 100644
--- a/arch/loongarch/mm/Makefile
+++ b/arch/loongarch/mm/Makefile
@@ -4,7 +4,8 @@
#
obj-y += init.o cache.o tlb.o tlbex.o extable.o \
- fault.o ioremap.o maccess.o mmap.o pgtable.o page.o
+ fault.o ioremap.o maccess.o mmap.o pgtable.o \
+ page.o pageattr.o
obj-$(CONFIG_HUGETLB_PAGE) += hugetlbpage.o
obj-$(CONFIG_KASAN) += kasan_init.o
diff --git a/arch/loongarch/mm/fault.c b/arch/loongarch/mm/fault.c
index 97b40defde06..deefd9617d00 100644
--- a/arch/loongarch/mm/fault.c
+++ b/arch/loongarch/mm/fault.c
@@ -31,11 +31,52 @@
int show_unhandled_signals = 1;
+static int __kprobes spurious_fault(unsigned long write, unsigned long address)
+{
+ pgd_t *pgd;
+ p4d_t *p4d;
+ pud_t *pud;
+ pmd_t *pmd;
+ pte_t *pte;
+
+ if (!(address & __UA_LIMIT))
+ return 0;
+
+ pgd = pgd_offset_k(address);
+ if (!pgd_present(pgdp_get(pgd)))
+ return 0;
+
+ p4d = p4d_offset(pgd, address);
+ if (!p4d_present(p4dp_get(p4d)))
+ return 0;
+
+ pud = pud_offset(p4d, address);
+ if (!pud_present(pudp_get(pud)))
+ return 0;
+
+ pmd = pmd_offset(pud, address);
+ if (!pmd_present(pmdp_get(pmd)))
+ return 0;
+
+ if (pmd_leaf(*pmd)) {
+ return write ? pmd_write(pmdp_get(pmd)) : 1;
+ } else {
+ pte = pte_offset_kernel(pmd, address);
+ if (!pte_present(ptep_get(pte)))
+ return 0;
+
+ return write ? pte_write(ptep_get(pte)) : 1;
+ }
+}
+
static void __kprobes no_context(struct pt_regs *regs,
unsigned long write, unsigned long address)
{
const int field = sizeof(unsigned long) * 2;
+ if (spurious_fault(write, address))
+ return;
+
/* Are we prepared to handle this kernel fault? */
if (fixup_exception(regs))
return;
diff --git a/arch/loongarch/mm/mmap.c b/arch/loongarch/mm/mmap.c
index 889030985135..914e82ff3f65 100644
--- a/arch/loongarch/mm/mmap.c
+++ b/arch/loongarch/mm/mmap.c
@@ -89,7 +89,8 @@ static unsigned long arch_get_unmapped_area_common(struct file *filp,
}
unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr0,
- unsigned long len, unsigned long pgoff, unsigned long flags)
+ unsigned long len, unsigned long pgoff, unsigned long flags,
+ vm_flags_t vm_flags)
{
return arch_get_unmapped_area_common(filp,
addr0, len, pgoff, flags, UP);
@@ -101,7 +102,7 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr0,
*/
unsigned long arch_get_unmapped_area_topdown(struct file *filp,
unsigned long addr0, unsigned long len, unsigned long pgoff,
- unsigned long flags)
+ unsigned long flags, vm_flags_t vm_flags)
{
return arch_get_unmapped_area_common(filp,
addr0, len, pgoff, flags, DOWN);
diff --git a/arch/loongarch/mm/pageattr.c b/arch/loongarch/mm/pageattr.c
new file mode 100644
index 000000000000..ffd8d76021d4
--- /dev/null
+++ b/arch/loongarch/mm/pageattr.c
@@ -0,0 +1,218 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (C) 2024 Loongson Technology Corporation Limited
+ */
+
+#include <linux/pagewalk.h>
+#include <linux/pgtable.h>
+#include <asm/set_memory.h>
+#include <asm/tlbflush.h>
+
+struct pageattr_masks {
+ pgprot_t set_mask;
+ pgprot_t clear_mask;
+};
+
+static unsigned long set_pageattr_masks(unsigned long val, struct mm_walk *walk)
+{
+ unsigned long new_val = val;
+ struct pageattr_masks *masks = walk->private;
+
+ new_val &= ~(pgprot_val(masks->clear_mask));
+ new_val |= (pgprot_val(masks->set_mask));
+
+ return new_val;
+}
+
+static int pageattr_pgd_entry(pgd_t *pgd, unsigned long addr,
+ unsigned long next, struct mm_walk *walk)
+{
+ pgd_t val = pgdp_get(pgd);
+
+ if (pgd_leaf(val)) {
+ val = __pgd(set_pageattr_masks(pgd_val(val), walk));
+ set_pgd(pgd, val);
+ }
+
+ return 0;
+}
+
+static int pageattr_p4d_entry(p4d_t *p4d, unsigned long addr,
+ unsigned long next, struct mm_walk *walk)
+{
+ p4d_t val = p4dp_get(p4d);
+
+ if (p4d_leaf(val)) {
+ val = __p4d(set_pageattr_masks(p4d_val(val), walk));
+ set_p4d(p4d, val);
+ }
+
+ return 0;
+}
+
+static int pageattr_pud_entry(pud_t *pud, unsigned long addr,
+ unsigned long next, struct mm_walk *walk)
+{
+ pud_t val = pudp_get(pud);
+
+ if (pud_leaf(val)) {
+ val = __pud(set_pageattr_masks(pud_val(val), walk));
+ set_pud(pud, val);
+ }
+
+ return 0;
+}
+
+static int pageattr_pmd_entry(pmd_t *pmd, unsigned long addr,
+ unsigned long next, struct mm_walk *walk)
+{
+ pmd_t val = pmdp_get(pmd);
+
+ if (pmd_leaf(val)) {
+ val = __pmd(set_pageattr_masks(pmd_val(val), walk));
+ set_pmd(pmd, val);
+ }
+
+ return 0;
+}
+
+static int pageattr_pte_entry(pte_t *pte, unsigned long addr,
+ unsigned long next, struct mm_walk *walk)
+{
+ pte_t val = ptep_get(pte);
+
+ val = __pte(set_pageattr_masks(pte_val(val), walk));
+ set_pte(pte, val);
+
+ return 0;
+}
+
+static int pageattr_pte_hole(unsigned long addr, unsigned long next,
+ int depth, struct mm_walk *walk)
+{
+ return 0;
+}
+
+static const struct mm_walk_ops pageattr_ops = {
+ .pgd_entry = pageattr_pgd_entry,
+ .p4d_entry = pageattr_p4d_entry,
+ .pud_entry = pageattr_pud_entry,
+ .pmd_entry = pageattr_pmd_entry,
+ .pte_entry = pageattr_pte_entry,
+ .pte_hole = pageattr_pte_hole,
+ .walk_lock = PGWALK_RDLOCK,
+};
+
+static int __set_memory(unsigned long addr, int numpages, pgprot_t set_mask, pgprot_t clear_mask)
+{
+ int ret;
+ unsigned long start = addr;
+ unsigned long end = start + PAGE_SIZE * numpages;
+ struct pageattr_masks masks = {
+ .set_mask = set_mask,
+ .clear_mask = clear_mask
+ };
+
+ if (!numpages)
+ return 0;
+
+ mmap_write_lock(&init_mm);
+ ret = walk_page_range_novma(&init_mm, start, end, &pageattr_ops, NULL, &masks);
+ mmap_write_unlock(&init_mm);
+
+ flush_tlb_kernel_range(start, end);
+
+ return ret;
+}
+
+int set_memory_x(unsigned long addr, int numpages)
+{
+ if (addr < vm_map_base)
+ return 0;
+
+ return __set_memory(addr, numpages, __pgprot(0), __pgprot(_PAGE_NO_EXEC));
+}
+
+int set_memory_nx(unsigned long addr, int numpages)
+{
+ if (addr < vm_map_base)
+ return 0;
+
+ return __set_memory(addr, numpages, __pgprot(_PAGE_NO_EXEC), __pgprot(0));
+}
+
+int set_memory_ro(unsigned long addr, int numpages)
+{
+ if (addr < vm_map_base)
+ return 0;
+
+ return __set_memory(addr, numpages, __pgprot(0), __pgprot(_PAGE_WRITE | _PAGE_DIRTY));
+}
+
+int set_memory_rw(unsigned long addr, int numpages)
+{
+ if (addr < vm_map_base)
+ return 0;
+
+ return __set_memory(addr, numpages, __pgprot(_PAGE_WRITE | _PAGE_DIRTY), __pgprot(0));
+}
+
+bool kernel_page_present(struct page *page)
+{
+ pgd_t *pgd;
+ p4d_t *p4d;
+ pud_t *pud;
+ pmd_t *pmd;
+ pte_t *pte;
+ unsigned long addr = (unsigned long)page_address(page);
+
+ if (addr < vm_map_base)
+ return true;
+
+ pgd = pgd_offset_k(addr);
+ if (pgd_none(pgdp_get(pgd)))
+ return false;
+ if (pgd_leaf(pgdp_get(pgd)))
+ return true;
+
+ p4d = p4d_offset(pgd, addr);
+ if (p4d_none(p4dp_get(p4d)))
+ return false;
+ if (p4d_leaf(p4dp_get(p4d)))
+ return true;
+
+ pud = pud_offset(p4d, addr);
+ if (pud_none(pudp_get(pud)))
+ return false;
+ if (pud_leaf(pudp_get(pud)))
+ return true;
+
+ pmd = pmd_offset(pud, addr);
+ if (pmd_none(pmdp_get(pmd)))
+ return false;
+ if (pmd_leaf(pmdp_get(pmd)))
+ return true;
+
+ pte = pte_offset_kernel(pmd, addr);
+ return pte_present(ptep_get(pte));
+}
+
+int set_direct_map_default_noflush(struct page *page)
+{
+ unsigned long addr = (unsigned long)page_address(page);
+
+ if (addr < vm_map_base)
+ return 0;
+
+ return __set_memory(addr, 1, PAGE_KERNEL, __pgprot(0));
+}
+
+int set_direct_map_invalid_noflush(struct page *page)
+{
+ unsigned long addr = (unsigned long)page_address(page);
+
+ if (addr < vm_map_base)
+ return 0;
+
+ return __set_memory(addr, 1, __pgprot(0), __pgprot(_PAGE_PRESENT | _PAGE_VALID));
+}
diff --git a/arch/loongarch/pci/acpi.c b/arch/loongarch/pci/acpi.c
index 365f7de771cb..1da4dc46df43 100644
--- a/arch/loongarch/pci/acpi.c
+++ b/arch/loongarch/pci/acpi.c
@@ -225,6 +225,7 @@ struct pci_bus *pci_acpi_scan_root(struct acpi_pci_root *root)
if (bus) {
memcpy(bus->sysdata, info->cfg, sizeof(struct pci_config_window));
kfree(info);
+ kfree(root_ops);
} else {
struct pci_bus *child;
diff --git a/arch/loongarch/vdso/vgetrandom-chacha.S b/arch/loongarch/vdso/vgetrandom-chacha.S
index 7e86a50f6e85..c2733e6c3a8d 100644
--- a/arch/loongarch/vdso/vgetrandom-chacha.S
+++ b/arch/loongarch/vdso/vgetrandom-chacha.S
@@ -9,23 +9,11 @@
.text
-/* Salsa20 quarter-round */
-.macro QR a b c d
- add.w \a, \a, \b
- xor \d, \d, \a
- rotri.w \d, \d, 16
-
- add.w \c, \c, \d
- xor \b, \b, \c
- rotri.w \b, \b, 20
-
- add.w \a, \a, \b
- xor \d, \d, \a
- rotri.w \d, \d, 24
-
- add.w \c, \c, \d
- xor \b, \b, \c
- rotri.w \b, \b, 25
+.macro OP_4REG op d0 d1 d2 d3 s0 s1 s2 s3
+ \op \d0, \d0, \s0
+ \op \d1, \d1, \s1
+ \op \d2, \d2, \s2
+ \op \d3, \d3, \s3
.endm
/*
@@ -74,6 +62,23 @@ SYM_FUNC_START(__arch_chacha20_blocks_nostack)
/* Reuse i as copy3 */
#define copy3 i
+/* Packs to be used with OP_4REG */
+#define line0 state0, state1, state2, state3
+#define line1 state4, state5, state6, state7
+#define line2 state8, state9, state10, state11
+#define line3 state12, state13, state14, state15
+
+#define line1_perm state5, state6, state7, state4
+#define line2_perm state10, state11, state8, state9
+#define line3_perm state15, state12, state13, state14
+
+#define copy copy0, copy1, copy2, copy3
+
+#define _16 16, 16, 16, 16
+#define _20 20, 20, 20, 20
+#define _24 24, 24, 24, 24
+#define _25 25, 25, 25, 25
+
/*
* The ABI requires s0-s9 saved, and sp aligned to 16-byte.
* This does not violate the stack-less requirement: no sensitive data
@@ -126,16 +131,38 @@ SYM_FUNC_START(__arch_chacha20_blocks_nostack)
li.w i, 10
.Lpermute:
/* odd round */
- QR state0, state4, state8, state12
- QR state1, state5, state9, state13
- QR state2, state6, state10, state14
- QR state3, state7, state11, state15
+ OP_4REG add.w line0, line1
+ OP_4REG xor line3, line0
+ OP_4REG rotri.w line3, _16
+
+ OP_4REG add.w line2, line3
+ OP_4REG xor line1, line2
+ OP_4REG rotri.w line1, _20
+
+ OP_4REG add.w line0, line1
+ OP_4REG xor line3, line0
+ OP_4REG rotri.w line3, _24
+
+ OP_4REG add.w line2, line3
+ OP_4REG xor line1, line2
+ OP_4REG rotri.w line1, _25
/* even round */
- QR state0, state5, state10, state15
- QR state1, state6, state11, state12
- QR state2, state7, state8, state13
- QR state3, state4, state9, state14
+ OP_4REG add.w line0, line1_perm
+ OP_4REG xor line3_perm, line0
+ OP_4REG rotri.w line3_perm, _16
+
+ OP_4REG add.w line2_perm, line3_perm
+ OP_4REG xor line1_perm, line2_perm
+ OP_4REG rotri.w line1_perm, _20
+
+ OP_4REG add.w line0, line1_perm
+ OP_4REG xor line3_perm, line0
+ OP_4REG rotri.w line3_perm, _24
+
+ OP_4REG add.w line2_perm, line3_perm
+ OP_4REG xor line1_perm, line2_perm
+ OP_4REG rotri.w line1_perm, _25
addi.w i, i, -1
bnez i, .Lpermute
@@ -147,10 +174,7 @@ SYM_FUNC_START(__arch_chacha20_blocks_nostack)
li.w copy3, 0x6b206574
/* output[0,1,2,3] = copy[0,1,2,3] + state[0,1,2,3] */
- add.w state0, state0, copy0
- add.w state1, state1, copy1
- add.w state2, state2, copy2
- add.w state3, state3, copy3
+ OP_4REG add.w line0, copy
st.w state0, output, 0
st.w state1, output, 4
st.w state2, output, 8
@@ -165,10 +189,7 @@ SYM_FUNC_START(__arch_chacha20_blocks_nostack)
ld.w state3, key, 12
/* output[4,5,6,7] = state[0,1,2,3] + state[4,5,6,7] */
- add.w state4, state4, state0
- add.w state5, state5, state1
- add.w state6, state6, state2
- add.w state7, state7, state3
+ OP_4REG add.w line1, line0
st.w state4, output, 16
st.w state5, output, 20
st.w state6, output, 24
@@ -181,10 +202,7 @@ SYM_FUNC_START(__arch_chacha20_blocks_nostack)
ld.w state3, key, 28
/* output[8,9,10,11] = state[0,1,2,3] + state[8,9,10,11] */
- add.w state8, state8, state0
- add.w state9, state9, state1
- add.w state10, state10, state2
- add.w state11, state11, state3
+ OP_4REG add.w line2, line0
st.w state8, output, 32
st.w state9, output, 36
st.w state10, output, 40
diff --git a/arch/m68k/kernel/setup_no.c b/arch/m68k/kernel/setup_no.c
index 37fb663559b4..c926da9d5ec2 100644
--- a/arch/m68k/kernel/setup_no.c
+++ b/arch/m68k/kernel/setup_no.c
@@ -138,7 +138,7 @@ void __init setup_arch(char **cmdline_p)
pr_debug("KERNEL -> TEXT=0x%p-0x%p DATA=0x%p-0x%p BSS=0x%p-0x%p\n",
_stext, _etext, _sdata, _edata, __bss_start, __bss_stop);
- pr_debug("MEMORY -> ROMFS=0x%p-0x%06lx MEM=0x%06lx-0x%06lx\n ",
+ pr_debug("MEMORY -> ROMFS=0x%p-0x%06lx MEM=0x%06lx-0x%06lx\n",
__bss_stop, memory_start, memory_start, memory_end);
memblock_add(_rambase, memory_end - _rambase);
diff --git a/arch/mips/Kconfig b/arch/mips/Kconfig
index 60077e576935..397edf05dd72 100644
--- a/arch/mips/Kconfig
+++ b/arch/mips/Kconfig
@@ -8,6 +8,7 @@ config MIPS
select ARCH_HAS_CPU_FINALIZE_INIT
select ARCH_HAS_CURRENT_STACK_POINTER if !CC_IS_CLANG || CLANG_VERSION >= 140000
select ARCH_HAS_DEBUG_VIRTUAL if !64BIT
+ select ARCH_HAS_DMA_OPS if MACH_JAZZ
select ARCH_HAS_FORTIFY_SOURCE
select ARCH_HAS_KCOV
select ARCH_HAS_NON_OVERLAPPING_ADDRESS_SPACE if !EVA
@@ -393,7 +394,6 @@ config MACH_JAZZ
select ARC_PROMLIB
select ARCH_MIGHT_HAVE_PC_PARPORT
select ARCH_MIGHT_HAVE_PC_SERIO
- select DMA_OPS
select FW_ARC
select FW_ARC32
select ARCH_MAY_HAVE_PC_FDC
@@ -502,7 +502,6 @@ config MACH_LOONGSON64
select USE_OF
select BUILTIN_DTB
select PCI_HOST_GENERIC
- select HAVE_ARCH_NODEDATA_EXTENSION if NUMA
help
This enables the support of Loongson-2/3 family of machines.
@@ -735,7 +734,6 @@ config SGI_IP27
select WAR_R10000_LLSC
select MIPS_L1_CACHE_SHIFT_7
select NUMA
- select HAVE_ARCH_NODEDATA_EXTENSION
help
This are the SGI Origin 200, Origin 2000 and Onyx 2 Graphics
workstations. To compile a Linux kernel that runs on these, say Y
@@ -2613,9 +2611,6 @@ config NUMA
config SYS_SUPPORTS_NUMA
bool
-config HAVE_ARCH_NODEDATA_EXTENSION
- bool
-
config RELOCATABLE
bool "Relocatable kernel"
depends on SYS_SUPPORTS_RELOCATABLE
diff --git a/arch/mips/configs/generic/board-ocelot.config b/arch/mips/configs/generic/board-ocelot.config
index 8cfbafa532e0..a5b5b5102472 100644
--- a/arch/mips/configs/generic/board-ocelot.config
+++ b/arch/mips/configs/generic/board-ocelot.config
@@ -31,6 +31,7 @@ CONFIG_MICROSEMI_PHY=y
CONFIG_I2C=y
CONFIG_I2C_CHARDEV=y
CONFIG_I2C_MUX=y
+CONFIG_I2C_DESIGNWARE_CORE=y
CONFIG_I2C_DESIGNWARE_PLATFORM=y
CONFIG_SPI=y
diff --git a/arch/mips/include/asm/kvm_host.h b/arch/mips/include/asm/kvm_host.h
index 6743a57c1ab4..f7222eb594ea 100644
--- a/arch/mips/include/asm/kvm_host.h
+++ b/arch/mips/include/asm/kvm_host.h
@@ -728,8 +728,8 @@ struct kvm_mips_callbacks {
int (*handle_fpe)(struct kvm_vcpu *vcpu);
int (*handle_msa_disabled)(struct kvm_vcpu *vcpu);
int (*handle_guest_exit)(struct kvm_vcpu *vcpu);
- int (*hardware_enable)(void);
- void (*hardware_disable)(void);
+ int (*enable_virtualization_cpu)(void);
+ void (*disable_virtualization_cpu)(void);
int (*check_extension)(struct kvm *kvm, long ext);
int (*vcpu_init)(struct kvm_vcpu *vcpu);
void (*vcpu_uninit)(struct kvm_vcpu *vcpu);
diff --git a/arch/mips/include/asm/mach-ip27/mmzone.h b/arch/mips/include/asm/mach-ip27/mmzone.h
index 08c36e50a860..56959eb9cb26 100644
--- a/arch/mips/include/asm/mach-ip27/mmzone.h
+++ b/arch/mips/include/asm/mach-ip27/mmzone.h
@@ -22,7 +22,6 @@ struct node_data {
extern struct node_data *__node_data[];
-#define NODE_DATA(n) (&__node_data[(n)]->pglist)
#define hub_data(n) (&__node_data[(n)]->hub)
#endif /* _ASM_MACH_MMZONE_H */
diff --git a/arch/mips/include/asm/mach-loongson64/mmzone.h b/arch/mips/include/asm/mach-loongson64/mmzone.h
index a3d65d37b8b5..8fb70fd3c9c4 100644
--- a/arch/mips/include/asm/mach-loongson64/mmzone.h
+++ b/arch/mips/include/asm/mach-loongson64/mmzone.h
@@ -14,10 +14,6 @@
#define pa_to_nid(addr) (((addr) & 0xf00000000000) >> NODE_ADDRSPACE_SHIFT)
#define nid_to_addrbase(nid) ((unsigned long)(nid) << NODE_ADDRSPACE_SHIFT)
-extern struct pglist_data *__node_data[];
-
-#define NODE_DATA(n) (__node_data[n])
-
extern void __init prom_init_numa_memory(void);
#endif /* _ASM_MACH_MMZONE_H */
diff --git a/arch/mips/kvm/mips.c b/arch/mips/kvm/mips.c
index b5de770b092e..60b43ea85c12 100644
--- a/arch/mips/kvm/mips.c
+++ b/arch/mips/kvm/mips.c
@@ -125,14 +125,14 @@ int kvm_arch_vcpu_should_kick(struct kvm_vcpu *vcpu)
return 1;
}
-int kvm_arch_hardware_enable(void)
+int kvm_arch_enable_virtualization_cpu(void)
{
- return kvm_mips_callbacks->hardware_enable();
+ return kvm_mips_callbacks->enable_virtualization_cpu();
}
-void kvm_arch_hardware_disable(void)
+void kvm_arch_disable_virtualization_cpu(void)
{
- kvm_mips_callbacks->hardware_disable();
+ kvm_mips_callbacks->disable_virtualization_cpu();
}
int kvm_arch_init_vm(struct kvm *kvm, unsigned long type)
diff --git a/arch/mips/kvm/vz.c b/arch/mips/kvm/vz.c
index 99d5a71e4300..ccab4d76b126 100644
--- a/arch/mips/kvm/vz.c
+++ b/arch/mips/kvm/vz.c
@@ -2869,7 +2869,7 @@ static unsigned int kvm_vz_resize_guest_vtlb(unsigned int size)
return ret + 1;
}
-static int kvm_vz_hardware_enable(void)
+static int kvm_vz_enable_virtualization_cpu(void)
{
unsigned int mmu_size, guest_mmu_size, ftlb_size;
u64 guest_cvmctl, cvmvmconfig;
@@ -2983,7 +2983,7 @@ static int kvm_vz_hardware_enable(void)
return 0;
}
-static void kvm_vz_hardware_disable(void)
+static void kvm_vz_disable_virtualization_cpu(void)
{
u64 cvmvmconfig;
unsigned int mmu_size;
@@ -3280,8 +3280,8 @@ static struct kvm_mips_callbacks kvm_vz_callbacks = {
.handle_msa_disabled = kvm_trap_vz_handle_msa_disabled,
.handle_guest_exit = kvm_trap_vz_handle_guest_exit,
- .hardware_enable = kvm_vz_hardware_enable,
- .hardware_disable = kvm_vz_hardware_disable,
+ .enable_virtualization_cpu = kvm_vz_enable_virtualization_cpu,
+ .disable_virtualization_cpu = kvm_vz_disable_virtualization_cpu,
.check_extension = kvm_vz_check_extension,
.vcpu_init = kvm_vz_vcpu_init,
.vcpu_uninit = kvm_vz_vcpu_uninit,
diff --git a/arch/mips/loongson64/numa.c b/arch/mips/loongson64/numa.c
index 68dafd6d3e25..8388400d052f 100644
--- a/arch/mips/loongson64/numa.c
+++ b/arch/mips/loongson64/numa.c
@@ -29,8 +29,6 @@
unsigned char __node_distances[MAX_NUMNODES][MAX_NUMNODES];
EXPORT_SYMBOL(__node_distances);
-struct pglist_data *__node_data[MAX_NUMNODES];
-EXPORT_SYMBOL(__node_data);
cpumask_t __node_cpumask[MAX_NUMNODES];
EXPORT_SYMBOL(__node_cpumask);
@@ -83,12 +81,8 @@ static void __init init_topology_matrix(void)
static void __init node_mem_init(unsigned int node)
{
- struct pglist_data *nd;
unsigned long node_addrspace_offset;
unsigned long start_pfn, end_pfn;
- unsigned long nd_pa;
- int tnid;
- const size_t nd_size = roundup(sizeof(pg_data_t), SMP_CACHE_BYTES);
node_addrspace_offset = nid_to_addrbase(node);
pr_info("Node%d's addrspace_offset is 0x%lx\n",
@@ -98,16 +92,8 @@ static void __init node_mem_init(unsigned int node)
pr_info("Node%d: start_pfn=0x%lx, end_pfn=0x%lx\n",
node, start_pfn, end_pfn);
- nd_pa = memblock_phys_alloc_try_nid(nd_size, SMP_CACHE_BYTES, node);
- if (!nd_pa)
- panic("Cannot allocate %zu bytes for node %d data\n",
- nd_size, node);
- nd = __va(nd_pa);
- memset(nd, 0, sizeof(struct pglist_data));
- tnid = early_pfn_to_nid(nd_pa >> PAGE_SHIFT);
- if (tnid != node)
- pr_info("NODE_DATA(%d) on node %d\n", node, tnid);
- __node_data[node] = nd;
+ alloc_node_data(node);
+
NODE_DATA(node)->node_start_pfn = start_pfn;
NODE_DATA(node)->node_spanned_pages = end_pfn - start_pfn;
@@ -198,13 +184,3 @@ void __init prom_init_numa_memory(void)
pr_info("CP0_PageGrain: CP0 5.1 (0x%x)\n", read_c0_pagegrain());
prom_meminit();
}
-
-pg_data_t * __init arch_alloc_nodedata(int nid)
-{
- return memblock_alloc(sizeof(pg_data_t), SMP_CACHE_BYTES);
-}
-
-void arch_refresh_nodedata(int nid, pg_data_t *pgdat)
-{
- __node_data[nid] = pgdat;
-}
diff --git a/arch/mips/mm/mmap.c b/arch/mips/mm/mmap.c
index 7e11d7b58761..5d2a1225785b 100644
--- a/arch/mips/mm/mmap.c
+++ b/arch/mips/mm/mmap.c
@@ -98,7 +98,8 @@ static unsigned long arch_get_unmapped_area_common(struct file *filp,
}
unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr0,
- unsigned long len, unsigned long pgoff, unsigned long flags)
+ unsigned long len, unsigned long pgoff, unsigned long flags,
+ vm_flags_t vm_flags)
{
return arch_get_unmapped_area_common(filp,
addr0, len, pgoff, flags, UP);
@@ -110,7 +111,7 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr0,
*/
unsigned long arch_get_unmapped_area_topdown(struct file *filp,
unsigned long addr0, unsigned long len, unsigned long pgoff,
- unsigned long flags)
+ unsigned long flags, vm_flags_t vm_flags)
{
return arch_get_unmapped_area_common(filp,
addr0, len, pgoff, flags, DOWN);
diff --git a/arch/mips/sgi-ip27/ip27-memory.c b/arch/mips/sgi-ip27/ip27-memory.c
index b8ca94cfb4fe..1963313f55d8 100644
--- a/arch/mips/sgi-ip27/ip27-memory.c
+++ b/arch/mips/sgi-ip27/ip27-memory.c
@@ -35,7 +35,6 @@
#define PFN_NASIDSHFT (NASID_SHFT - PAGE_SHIFT)
struct node_data *__node_data[MAX_NUMNODES];
-
EXPORT_SYMBOL(__node_data);
static u64 gen_region_mask(void)
@@ -361,6 +360,7 @@ static void __init node_mem_init(nasid_t node)
*/
__node_data[node] = __va(slot_freepfn << PAGE_SHIFT);
memset(__node_data[node], 0, PAGE_SIZE);
+ node_data[node] = &__node_data[node]->pglist;
NODE_DATA(node)->node_start_pfn = start_pfn;
NODE_DATA(node)->node_spanned_pages = end_pfn - start_pfn;
@@ -423,13 +423,3 @@ void __init mem_init(void)
memblock_free_all();
setup_zero_pages(); /* This comes from node 0 */
}
-
-pg_data_t * __init arch_alloc_nodedata(int nid)
-{
- return memblock_alloc(sizeof(pg_data_t), SMP_CACHE_BYTES);
-}
-
-void arch_refresh_nodedata(int nid, pg_data_t *pgdat)
-{
- __node_data[nid] = (struct node_data *)pgdat;
-}
diff --git a/arch/mips/sgi-ip27/ip27-smp.c b/arch/mips/sgi-ip27/ip27-smp.c
index 5d2652a1d35a..62733e049570 100644
--- a/arch/mips/sgi-ip27/ip27-smp.c
+++ b/arch/mips/sgi-ip27/ip27-smp.c
@@ -70,11 +70,13 @@ void cpu_node_probe(void)
gda_t *gdap = GDA;
nodes_clear(node_online_map);
+ nodes_clear(node_possible_map);
for (i = 0; i < MAX_NUMNODES; i++) {
nasid_t nasid = gdap->g_nasidtable[i];
if (nasid == INVALID_NASID)
break;
node_set_online(nasid);
+ node_set(nasid, node_possible_map);
highest = node_scan_cpus(nasid, highest);
}
diff --git a/arch/nios2/mm/init.c b/arch/nios2/mm/init.c
index 3459df28afee..a2278485de19 100644
--- a/arch/nios2/mm/init.c
+++ b/arch/nios2/mm/init.c
@@ -82,6 +82,10 @@ void __init mmu_init(void)
pgd_t swapper_pg_dir[PTRS_PER_PGD] __aligned(PAGE_SIZE);
pte_t invalid_pte_table[PTRS_PER_PTE] __aligned(PAGE_SIZE);
static struct page *kuser_page[1];
+static struct vm_special_mapping vdso_mapping = {
+ .name = "[vdso]",
+ .pages = kuser_page,
+};
static int alloc_kuser_page(void)
{
@@ -106,18 +110,18 @@ arch_initcall(alloc_kuser_page);
int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
{
struct mm_struct *mm = current->mm;
- int ret;
+ struct vm_area_struct *vma;
mmap_write_lock(mm);
/* Map kuser helpers to user space address */
- ret = install_special_mapping(mm, KUSER_BASE, KUSER_SIZE,
+ vma = _install_special_mapping(mm, KUSER_BASE, KUSER_SIZE,
VM_READ | VM_EXEC | VM_MAYREAD |
- VM_MAYEXEC, kuser_page);
+ VM_MAYEXEC, &vdso_mapping);
mmap_write_unlock(mm);
- return ret;
+ return IS_ERR(vma) ? PTR_ERR(vma) : 0;
}
const char *arch_vma_name(struct vm_area_struct *vma)
diff --git a/arch/parisc/Kconfig b/arch/parisc/Kconfig
index db3edd54d6f2..aa6a3cad275d 100644
--- a/arch/parisc/Kconfig
+++ b/arch/parisc/Kconfig
@@ -10,6 +10,7 @@ config PARISC
select ARCH_WANT_FRAME_POINTERS
select ARCH_HAS_CPU_CACHE_ALIASING
select ARCH_HAS_DMA_ALLOC if PA11
+ select ARCH_HAS_DMA_OPS
select ARCH_HAS_ELF_RANDOMIZE
select ARCH_HAS_STRICT_KERNEL_RWX
select ARCH_HAS_STRICT_MODULE_RWX
@@ -23,7 +24,6 @@ config PARISC
select ARCH_HAS_CACHE_LINE_SIZE
select ARCH_HAS_DEBUG_VM_PGTABLE
select HAVE_RELIABLE_STACKTRACE
- select DMA_OPS
select RTC_CLASS
select RTC_DRV_GENERIC
select INIT_ALL_POSSIBLE
diff --git a/arch/parisc/kernel/perf.c b/arch/parisc/kernel/perf.c
index b0f0816879df..5e8e37a722ef 100644
--- a/arch/parisc/kernel/perf.c
+++ b/arch/parisc/kernel/perf.c
@@ -466,7 +466,6 @@ static long perf_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
}
static const struct file_operations perf_fops = {
- .llseek = no_llseek,
.read = perf_read,
.write = perf_write,
.unlocked_ioctl = perf_ioctl,
diff --git a/arch/parisc/kernel/sys_parisc.c b/arch/parisc/kernel/sys_parisc.c
index f7722451276e..f852fe274abe 100644
--- a/arch/parisc/kernel/sys_parisc.c
+++ b/arch/parisc/kernel/sys_parisc.c
@@ -167,7 +167,8 @@ static unsigned long arch_get_unmapped_area_common(struct file *filp,
}
unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr,
- unsigned long len, unsigned long pgoff, unsigned long flags)
+ unsigned long len, unsigned long pgoff, unsigned long flags,
+ vm_flags_t vm_flags)
{
return arch_get_unmapped_area_common(filp,
addr, len, pgoff, flags, UP);
@@ -175,7 +176,7 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr,
unsigned long arch_get_unmapped_area_topdown(struct file *filp,
unsigned long addr, unsigned long len, unsigned long pgoff,
- unsigned long flags)
+ unsigned long flags, vm_flags_t vm_flags)
{
return arch_get_unmapped_area_common(filp,
addr, len, pgoff, flags, DOWN);
diff --git a/arch/parisc/mm/hugetlbpage.c b/arch/parisc/mm/hugetlbpage.c
index 0356199bd9e7..aa664f7ddb63 100644
--- a/arch/parisc/mm/hugetlbpage.c
+++ b/arch/parisc/mm/hugetlbpage.c
@@ -40,7 +40,7 @@ hugetlb_get_unmapped_area(struct file *file, unsigned long addr,
addr = ALIGN(addr, huge_page_size(h));
/* we need to make sure the colouring is OK */
- return arch_get_unmapped_area(file, addr, len, pgoff, flags);
+ return arch_get_unmapped_area(file, addr, len, pgoff, flags, 0);
}
diff --git a/arch/powerpc/Kconfig b/arch/powerpc/Kconfig
index 0b8b2e3a6381..8094a01974cc 100644
--- a/arch/powerpc/Kconfig
+++ b/arch/powerpc/Kconfig
@@ -133,6 +133,7 @@ config PPC
select ARCH_HAS_DEBUG_WX if STRICT_KERNEL_RWX
select ARCH_HAS_DEVMEM_IS_ALLOWED
select ARCH_HAS_DMA_MAP_DIRECT if PPC_PSERIES
+ select ARCH_HAS_DMA_OPS if PPC64
select ARCH_HAS_FORTIFY_SOURCE
select ARCH_HAS_GCOV_PROFILE_ALL
select ARCH_HAS_KCOV
@@ -185,7 +186,6 @@ config PPC
select CPUMASK_OFFSTACK if NR_CPUS >= 8192
select DCACHE_WORD_ACCESS if PPC64 && CPU_LITTLE_ENDIAN
select DMA_OPS_BYPASS if PPC64
- select DMA_OPS if PPC64
select DYNAMIC_FTRACE if FUNCTION_TRACER
select EDAC_ATOMIC_SCRUB
select EDAC_SUPPORT
diff --git a/arch/powerpc/boot/xz_config.h b/arch/powerpc/boot/xz_config.h
index ebfadd39e192..9506a96ebbcc 100644
--- a/arch/powerpc/boot/xz_config.h
+++ b/arch/powerpc/boot/xz_config.h
@@ -50,11 +50,8 @@ static inline void put_unaligned_be32(u32 val, void *p)
/* prevent the inclusion of the xz-preboot MM headers */
#define DECOMPR_MM_H
#define memmove memmove
-#define XZ_EXTERN static
/* xz.h needs to be included directly since we need enum xz_mode */
#include "../../../include/linux/xz.h"
-#undef XZ_EXTERN
-
#endif
diff --git a/arch/powerpc/configs/ppc64_defconfig b/arch/powerpc/configs/ppc64_defconfig
index 6001d580c0dd..a5e3e7f97f4d 100644
--- a/arch/powerpc/configs/ppc64_defconfig
+++ b/arch/powerpc/configs/ppc64_defconfig
@@ -81,7 +81,6 @@ CONFIG_MODULE_SIG_SHA512=y
CONFIG_PARTITION_ADVANCED=y
CONFIG_BINFMT_MISC=m
CONFIG_ZSWAP=y
-CONFIG_Z3FOLD=y
CONFIG_ZSMALLOC=y
# CONFIG_SLAB_MERGE_DEFAULT is not set
CONFIG_SLAB_FREELIST_RANDOM=y
diff --git a/arch/powerpc/crypto/Kconfig b/arch/powerpc/crypto/Kconfig
index 09ebcbdfb34f..46a4c85e85e2 100644
--- a/arch/powerpc/crypto/Kconfig
+++ b/arch/powerpc/crypto/Kconfig
@@ -107,6 +107,7 @@ config CRYPTO_AES_PPC_SPE
config CRYPTO_AES_GCM_P10
tristate "Stitched AES/GCM acceleration support on P10 or later CPU (PPC)"
+ depends on BROKEN
depends on PPC64 && CPU_LITTLE_ENDIAN && VSX
select CRYPTO_LIB_AES
select CRYPTO_ALGAPI
diff --git a/arch/powerpc/include/asm/book3s/64/pgtable.h b/arch/powerpc/include/asm/book3s/64/pgtable.h
index f8ba72573cae..6d98e6f08d4d 100644
--- a/arch/powerpc/include/asm/book3s/64/pgtable.h
+++ b/arch/powerpc/include/asm/book3s/64/pgtable.h
@@ -1098,6 +1098,7 @@ extern pmd_t pfn_pmd(unsigned long pfn, pgprot_t pgprot);
extern pud_t pfn_pud(unsigned long pfn, pgprot_t pgprot);
extern pmd_t mk_pmd(struct page *page, pgprot_t pgprot);
extern pmd_t pmd_modify(pmd_t pmd, pgprot_t newprot);
+extern pud_t pud_modify(pud_t pud, pgprot_t newprot);
extern void set_pmd_at(struct mm_struct *mm, unsigned long addr,
pmd_t *pmdp, pmd_t pmd);
extern void set_pud_at(struct mm_struct *mm, unsigned long addr,
@@ -1358,6 +1359,8 @@ static inline pgtable_t pgtable_trans_huge_withdraw(struct mm_struct *mm,
#define __HAVE_ARCH_PMDP_INVALIDATE
extern pmd_t pmdp_invalidate(struct vm_area_struct *vma, unsigned long address,
pmd_t *pmdp);
+extern pud_t pudp_invalidate(struct vm_area_struct *vma, unsigned long address,
+ pud_t *pudp);
#define pmd_move_must_withdraw pmd_move_must_withdraw
struct spinlock;
diff --git a/arch/powerpc/include/asm/mmu_context.h b/arch/powerpc/include/asm/mmu_context.h
index 99707456c2cd..a157ab513347 100644
--- a/arch/powerpc/include/asm/mmu_context.h
+++ b/arch/powerpc/include/asm/mmu_context.h
@@ -257,15 +257,6 @@ static inline void enter_lazy_tlb(struct mm_struct *mm,
extern void arch_exit_mmap(struct mm_struct *mm);
-static inline void arch_unmap(struct mm_struct *mm,
- unsigned long start, unsigned long end)
-{
- unsigned long vdso_base = (unsigned long)mm->context.vdso;
-
- if (start <= vdso_base && vdso_base < end)
- mm->context.vdso = NULL;
-}
-
#ifdef CONFIG_PPC_MEM_KEYS
bool arch_vma_access_permitted(struct vm_area_struct *vma, bool write,
bool execute, bool foreign);
diff --git a/arch/powerpc/include/asm/mmzone.h b/arch/powerpc/include/asm/mmzone.h
index da827d2d0866..d99863cd6cde 100644
--- a/arch/powerpc/include/asm/mmzone.h
+++ b/arch/powerpc/include/asm/mmzone.h
@@ -20,12 +20,6 @@
#ifdef CONFIG_NUMA
-extern struct pglist_data *node_data[];
-/*
- * Return a pointer to the node data for node n.
- */
-#define NODE_DATA(nid) (node_data[nid])
-
/*
* Following are specific to this numa platform.
*/
diff --git a/arch/powerpc/include/asm/pgtable.h b/arch/powerpc/include/asm/pgtable.h
index 264a6c09517a..2f72ad885332 100644
--- a/arch/powerpc/include/asm/pgtable.h
+++ b/arch/powerpc/include/asm/pgtable.h
@@ -65,6 +65,7 @@ static inline unsigned long pte_pfn(pte_t pte)
/*
* Select all bits except the pfn
*/
+#define pte_pgprot pte_pgprot
static inline pgprot_t pte_pgprot(pte_t pte)
{
unsigned long pte_flags;
diff --git a/arch/powerpc/kernel/eeh.c b/arch/powerpc/kernel/eeh.c
index 0e59b8fd9bc6..83fe99861eb1 100644
--- a/arch/powerpc/kernel/eeh.c
+++ b/arch/powerpc/kernel/eeh.c
@@ -1574,6 +1574,104 @@ static int proc_eeh_show(struct seq_file *m, void *v)
}
#endif /* CONFIG_PROC_FS */
+static int eeh_break_device(struct pci_dev *pdev)
+{
+ struct resource *bar = NULL;
+ void __iomem *mapped;
+ u16 old, bit;
+ int i, pos;
+
+ /* Do we have an MMIO BAR to disable? */
+ for (i = 0; i <= PCI_STD_RESOURCE_END; i++) {
+ struct resource *r = &pdev->resource[i];
+
+ if (!r->flags || !r->start)
+ continue;
+ if (r->flags & IORESOURCE_IO)
+ continue;
+ if (r->flags & IORESOURCE_UNSET)
+ continue;
+
+ bar = r;
+ break;
+ }
+
+ if (!bar) {
+ pci_err(pdev, "Unable to find Memory BAR to cause EEH with\n");
+ return -ENXIO;
+ }
+
+ pci_err(pdev, "Going to break: %pR\n", bar);
+
+ if (pdev->is_virtfn) {
+#ifndef CONFIG_PCI_IOV
+ return -ENXIO;
+#else
+ /*
+ * VFs don't have a per-function COMMAND register, so the best
+ * we can do is clear the Memory Space Enable bit in the PF's
+ * SRIOV control reg.
+ *
+ * Unfortunately, this requires that we have a PF (i.e doesn't
+ * work for a passed-through VF) and it has the potential side
+ * effect of also causing an EEH on every other VF under the
+ * PF. Oh well.
+ */
+ pdev = pdev->physfn;
+ if (!pdev)
+ return -ENXIO; /* passed through VFs have no PF */
+
+ pos = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_SRIOV);
+ pos += PCI_SRIOV_CTRL;
+ bit = PCI_SRIOV_CTRL_MSE;
+#endif /* !CONFIG_PCI_IOV */
+ } else {
+ bit = PCI_COMMAND_MEMORY;
+ pos = PCI_COMMAND;
+ }
+
+ /*
+ * Process here is:
+ *
+ * 1. Disable Memory space.
+ *
+ * 2. Perform an MMIO to the device. This should result in an error
+ * (CA / UR) being raised by the device which results in an EEH
+ * PE freeze. Using the in_8() accessor skips the eeh detection hook
+ * so the freeze hook so the EEH Detection machinery won't be
+ * triggered here. This is to match the usual behaviour of EEH
+ * where the HW will asynchronously freeze a PE and it's up to
+ * the kernel to notice and deal with it.
+ *
+ * 3. Turn Memory space back on. This is more important for VFs
+ * since recovery will probably fail if we don't. For normal
+ * the COMMAND register is reset as a part of re-initialising
+ * the device.
+ *
+ * Breaking stuff is the point so who cares if it's racy ;)
+ */
+ pci_read_config_word(pdev, pos, &old);
+
+ mapped = ioremap(bar->start, PAGE_SIZE);
+ if (!mapped) {
+ pci_err(pdev, "Unable to map MMIO BAR %pR\n", bar);
+ return -ENXIO;
+ }
+
+ pci_write_config_word(pdev, pos, old & ~bit);
+ in_8(mapped);
+ pci_write_config_word(pdev, pos, old);
+
+ iounmap(mapped);
+
+ return 0;
+}
+
+int eeh_pe_inject_mmio_error(struct pci_dev *pdev)
+{
+ return eeh_break_device(pdev);
+}
+
#ifdef CONFIG_DEBUG_FS
@@ -1725,99 +1823,6 @@ static const struct file_operations eeh_dev_check_fops = {
.read = eeh_debugfs_dev_usage,
};
-static int eeh_debugfs_break_device(struct pci_dev *pdev)
-{
- struct resource *bar = NULL;
- void __iomem *mapped;
- u16 old, bit;
- int i, pos;
-
- /* Do we have an MMIO BAR to disable? */
- for (i = 0; i <= PCI_STD_RESOURCE_END; i++) {
- struct resource *r = &pdev->resource[i];
-
- if (!r->flags || !r->start)
- continue;
- if (r->flags & IORESOURCE_IO)
- continue;
- if (r->flags & IORESOURCE_UNSET)
- continue;
-
- bar = r;
- break;
- }
-
- if (!bar) {
- pci_err(pdev, "Unable to find Memory BAR to cause EEH with\n");
- return -ENXIO;
- }
-
- pci_err(pdev, "Going to break: %pR\n", bar);
-
- if (pdev->is_virtfn) {
-#ifndef CONFIG_PCI_IOV
- return -ENXIO;
-#else
- /*
- * VFs don't have a per-function COMMAND register, so the best
- * we can do is clear the Memory Space Enable bit in the PF's
- * SRIOV control reg.
- *
- * Unfortunately, this requires that we have a PF (i.e doesn't
- * work for a passed-through VF) and it has the potential side
- * effect of also causing an EEH on every other VF under the
- * PF. Oh well.
- */
- pdev = pdev->physfn;
- if (!pdev)
- return -ENXIO; /* passed through VFs have no PF */
-
- pos = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_SRIOV);
- pos += PCI_SRIOV_CTRL;
- bit = PCI_SRIOV_CTRL_MSE;
-#endif /* !CONFIG_PCI_IOV */
- } else {
- bit = PCI_COMMAND_MEMORY;
- pos = PCI_COMMAND;
- }
-
- /*
- * Process here is:
- *
- * 1. Disable Memory space.
- *
- * 2. Perform an MMIO to the device. This should result in an error
- * (CA / UR) being raised by the device which results in an EEH
- * PE freeze. Using the in_8() accessor skips the eeh detection hook
- * so the freeze hook so the EEH Detection machinery won't be
- * triggered here. This is to match the usual behaviour of EEH
- * where the HW will asynchronously freeze a PE and it's up to
- * the kernel to notice and deal with it.
- *
- * 3. Turn Memory space back on. This is more important for VFs
- * since recovery will probably fail if we don't. For normal
- * the COMMAND register is reset as a part of re-initialising
- * the device.
- *
- * Breaking stuff is the point so who cares if it's racy ;)
- */
- pci_read_config_word(pdev, pos, &old);
-
- mapped = ioremap(bar->start, PAGE_SIZE);
- if (!mapped) {
- pci_err(pdev, "Unable to map MMIO BAR %pR\n", bar);
- return -ENXIO;
- }
-
- pci_write_config_word(pdev, pos, old & ~bit);
- in_8(mapped);
- pci_write_config_word(pdev, pos, old);
-
- iounmap(mapped);
-
- return 0;
-}
-
static ssize_t eeh_dev_break_write(struct file *filp,
const char __user *user_buf,
size_t count, loff_t *ppos)
@@ -1829,7 +1834,7 @@ static ssize_t eeh_dev_break_write(struct file *filp,
if (IS_ERR(pdev))
return PTR_ERR(pdev);
- ret = eeh_debugfs_break_device(pdev);
+ ret = eeh_break_device(pdev);
pci_dev_put(pdev);
if (ret < 0)
@@ -1844,11 +1849,6 @@ static const struct file_operations eeh_dev_break_fops = {
.read = eeh_debugfs_dev_usage,
};
-int eeh_pe_inject_mmio_error(struct pci_dev *pdev)
-{
- return eeh_debugfs_break_device(pdev);
-}
-
static ssize_t eeh_dev_can_recover(struct file *filp,
const char __user *user_buf,
size_t count, loff_t *ppos)
diff --git a/arch/powerpc/kernel/nvram_64.c b/arch/powerpc/kernel/nvram_64.c
index e385d3164648..f9c6568a9137 100644
--- a/arch/powerpc/kernel/nvram_64.c
+++ b/arch/powerpc/kernel/nvram_64.c
@@ -73,7 +73,7 @@ static const char *nvram_os_partitions[] = {
};
static void oops_to_nvram(struct kmsg_dumper *dumper,
- enum kmsg_dump_reason reason);
+ struct kmsg_dump_detail *detail);
static struct kmsg_dumper nvram_kmsg_dumper = {
.dump = oops_to_nvram
@@ -643,7 +643,7 @@ void __init nvram_init_oops_partition(int rtas_partition_exists)
* partition. If that's too much, go back and capture uncompressed text.
*/
static void oops_to_nvram(struct kmsg_dumper *dumper,
- enum kmsg_dump_reason reason)
+ struct kmsg_dump_detail *detail)
{
struct oops_log_info *oops_hdr = (struct oops_log_info *)oops_buf;
static unsigned int oops_count = 0;
@@ -655,7 +655,7 @@ static void oops_to_nvram(struct kmsg_dumper *dumper,
unsigned int err_type = ERR_TYPE_KERNEL_PANIC_GZ;
int rc = -1;
- switch (reason) {
+ switch (detail->reason) {
case KMSG_DUMP_SHUTDOWN:
/* These are almost always orderly shutdowns. */
return;
@@ -671,7 +671,7 @@ static void oops_to_nvram(struct kmsg_dumper *dumper,
break;
default:
pr_err("%s: ignoring unrecognized KMSG_DUMP_* reason %d\n",
- __func__, (int) reason);
+ __func__, (int) detail->reason);
return;
}
diff --git a/arch/powerpc/kernel/vdso.c b/arch/powerpc/kernel/vdso.c
index 7a2ff9010f17..ee4b9d676cff 100644
--- a/arch/powerpc/kernel/vdso.c
+++ b/arch/powerpc/kernel/vdso.c
@@ -81,6 +81,21 @@ static int vdso64_mremap(const struct vm_special_mapping *sm, struct vm_area_str
return vdso_mremap(sm, new_vma, &vdso64_end - &vdso64_start);
}
+static void vdso_close(const struct vm_special_mapping *sm, struct vm_area_struct *vma)
+{
+ struct mm_struct *mm = vma->vm_mm;
+
+ /*
+ * close() is called for munmap() but also for mremap(). In the mremap()
+ * case the vdso pointer has already been updated by the mremap() hook
+ * above, so it must not be set to NULL here.
+ */
+ if (vma->vm_start != (unsigned long)mm->context.vdso)
+ return;
+
+ mm->context.vdso = NULL;
+}
+
static vm_fault_t vvar_fault(const struct vm_special_mapping *sm,
struct vm_area_struct *vma, struct vm_fault *vmf);
@@ -92,11 +107,13 @@ static struct vm_special_mapping vvar_spec __ro_after_init = {
static struct vm_special_mapping vdso32_spec __ro_after_init = {
.name = "[vdso]",
.mremap = vdso32_mremap,
+ .close = vdso_close,
};
static struct vm_special_mapping vdso64_spec __ro_after_init = {
.name = "[vdso]",
.mremap = vdso64_mremap,
+ .close = vdso_close,
};
#ifdef CONFIG_TIME_NS
@@ -197,13 +214,6 @@ static int __arch_setup_additional_pages(struct linux_binprm *bprm, int uses_int
/* Add required alignment. */
vdso_base = ALIGN(vdso_base, VDSO_ALIGNMENT);
- /*
- * Put vDSO base into mm struct. We need to do this before calling
- * install_special_mapping or the perf counter mmap tracking code
- * will fail to recognise it as a vDSO.
- */
- mm->context.vdso = (void __user *)vdso_base + vvar_size;
-
vma = _install_special_mapping(mm, vdso_base, vvar_size,
VM_READ | VM_MAYREAD | VM_IO |
VM_DONTDUMP | VM_PFNMAP, &vvar_spec);
@@ -223,10 +233,15 @@ static int __arch_setup_additional_pages(struct linux_binprm *bprm, int uses_int
vma = _install_special_mapping(mm, vdso_base + vvar_size, vdso_size,
VM_READ | VM_EXEC | VM_MAYREAD |
VM_MAYWRITE | VM_MAYEXEC, vdso_spec);
- if (IS_ERR(vma))
+ if (IS_ERR(vma)) {
do_munmap(mm, vdso_base, vvar_size, NULL);
+ return PTR_ERR(vma);
+ }
+
+ // Now that the mappings are in place, set the mm VDSO pointer
+ mm->context.vdso = (void __user *)vdso_base + vvar_size;
- return PTR_ERR_OR_ZERO(vma);
+ return 0;
}
int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
@@ -240,8 +255,6 @@ int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
return -EINTR;
rc = __arch_setup_additional_pages(bprm, uses_interp);
- if (rc)
- mm->context.vdso = NULL;
mmap_write_unlock(mm);
return rc;
diff --git a/arch/powerpc/kvm/book3s_64_vio.c b/arch/powerpc/kvm/book3s_64_vio.c
index 3ff3de9a52ac..34c0adb9fdbf 100644
--- a/arch/powerpc/kvm/book3s_64_vio.c
+++ b/arch/powerpc/kvm/book3s_64_vio.c
@@ -118,12 +118,12 @@ long kvm_spapr_tce_attach_iommu_group(struct kvm *kvm, int tablefd,
struct fd f;
f = fdget(tablefd);
- if (!f.file)
+ if (!fd_file(f))
return -EBADF;
rcu_read_lock();
list_for_each_entry_rcu(stt, &kvm->arch.spapr_tce_tables, list) {
- if (stt == f.file->private_data) {
+ if (stt == fd_file(f)->private_data) {
found = true;
break;
}
diff --git a/arch/powerpc/kvm/powerpc.c b/arch/powerpc/kvm/powerpc.c
index 5e6c7b527677..f14329989e9a 100644
--- a/arch/powerpc/kvm/powerpc.c
+++ b/arch/powerpc/kvm/powerpc.c
@@ -1938,11 +1938,11 @@ static int kvm_vcpu_ioctl_enable_cap(struct kvm_vcpu *vcpu,
r = -EBADF;
f = fdget(cap->args[0]);
- if (!f.file)
+ if (!fd_file(f))
break;
r = -EPERM;
- dev = kvm_device_from_filp(f.file);
+ dev = kvm_device_from_filp(fd_file(f));
if (dev)
r = kvmppc_mpic_connect_vcpu(dev, vcpu, cap->args[1]);
@@ -1957,11 +1957,11 @@ static int kvm_vcpu_ioctl_enable_cap(struct kvm_vcpu *vcpu,
r = -EBADF;
f = fdget(cap->args[0]);
- if (!f.file)
+ if (!fd_file(f))
break;
r = -EPERM;
- dev = kvm_device_from_filp(f.file);
+ dev = kvm_device_from_filp(fd_file(f));
if (dev) {
if (xics_on_xive())
r = kvmppc_xive_connect_vcpu(dev, vcpu, cap->args[1]);
@@ -1980,7 +1980,7 @@ static int kvm_vcpu_ioctl_enable_cap(struct kvm_vcpu *vcpu,
r = -EBADF;
f = fdget(cap->args[0]);
- if (!f.file)
+ if (!fd_file(f))
break;
r = -ENXIO;
@@ -1990,7 +1990,7 @@ static int kvm_vcpu_ioctl_enable_cap(struct kvm_vcpu *vcpu,
}
r = -EPERM;
- dev = kvm_device_from_filp(f.file);
+ dev = kvm_device_from_filp(fd_file(f));
if (dev)
r = kvmppc_xive_native_connect_vcpu(dev, vcpu,
cap->args[1]);
diff --git a/arch/powerpc/lib/crtsavres.S b/arch/powerpc/lib/crtsavres.S
index 7e5e1c28e56a..8967903c15e9 100644
--- a/arch/powerpc/lib/crtsavres.S
+++ b/arch/powerpc/lib/crtsavres.S
@@ -46,7 +46,7 @@
.section ".text"
-#ifndef CONFIG_PPC64
+#ifndef __powerpc64__
/* Routines for saving integer registers, called by the compiler. */
/* Called with r11 pointing to the stack header word of the caller of the */
diff --git a/arch/powerpc/mm/book3s64/pgtable.c b/arch/powerpc/mm/book3s64/pgtable.c
index f4d8d3c40e5c..5a4a75369043 100644
--- a/arch/powerpc/mm/book3s64/pgtable.c
+++ b/arch/powerpc/mm/book3s64/pgtable.c
@@ -176,6 +176,17 @@ pmd_t pmdp_invalidate(struct vm_area_struct *vma, unsigned long address,
return __pmd(old_pmd);
}
+pud_t pudp_invalidate(struct vm_area_struct *vma, unsigned long address,
+ pud_t *pudp)
+{
+ unsigned long old_pud;
+
+ VM_WARN_ON_ONCE(!pud_present(*pudp));
+ old_pud = pud_hugepage_update(vma->vm_mm, address, pudp, _PAGE_PRESENT, _PAGE_INVALID);
+ flush_pud_tlb_range(vma, address, address + HPAGE_PUD_SIZE);
+ return __pud(old_pud);
+}
+
pmd_t pmdp_huge_get_and_clear_full(struct vm_area_struct *vma,
unsigned long addr, pmd_t *pmdp, int full)
{
@@ -259,6 +270,15 @@ pmd_t pmd_modify(pmd_t pmd, pgprot_t newprot)
pmdv &= _HPAGE_CHG_MASK;
return pmd_set_protbits(__pmd(pmdv), newprot);
}
+
+pud_t pud_modify(pud_t pud, pgprot_t newprot)
+{
+ unsigned long pudv;
+
+ pudv = pud_val(pud);
+ pudv &= _HPAGE_CHG_MASK;
+ return pud_set_protbits(__pud(pudv), newprot);
+}
#endif /* CONFIG_TRANSPARENT_HUGEPAGE */
/* For use by kexec, called with MMU off */
diff --git a/arch/powerpc/mm/book3s64/slice.c b/arch/powerpc/mm/book3s64/slice.c
index ef3ce37f1bb3..87307d0fc3b8 100644
--- a/arch/powerpc/mm/book3s64/slice.c
+++ b/arch/powerpc/mm/book3s64/slice.c
@@ -637,10 +637,11 @@ unsigned long arch_get_unmapped_area(struct file *filp,
unsigned long addr,
unsigned long len,
unsigned long pgoff,
- unsigned long flags)
+ unsigned long flags,
+ vm_flags_t vm_flags)
{
if (radix_enabled())
- return generic_get_unmapped_area(filp, addr, len, pgoff, flags);
+ return generic_get_unmapped_area(filp, addr, len, pgoff, flags, vm_flags);
return slice_get_unmapped_area(addr, len, flags,
mm_ctx_user_psize(&current->mm->context), 0);
@@ -650,10 +651,11 @@ unsigned long arch_get_unmapped_area_topdown(struct file *filp,
const unsigned long addr0,
const unsigned long len,
const unsigned long pgoff,
- const unsigned long flags)
+ const unsigned long flags,
+ vm_flags_t vm_flags)
{
if (radix_enabled())
- return generic_get_unmapped_area_topdown(filp, addr0, len, pgoff, flags);
+ return generic_get_unmapped_area_topdown(filp, addr0, len, pgoff, flags, vm_flags);
return slice_get_unmapped_area(addr0, len, flags,
mm_ctx_user_psize(&current->mm->context), 1);
diff --git a/arch/powerpc/mm/mem.c b/arch/powerpc/mm/mem.c
index da606ef18eae..1221c561b43a 100644
--- a/arch/powerpc/mm/mem.c
+++ b/arch/powerpc/mm/mem.c
@@ -216,7 +216,7 @@ static int __init mark_nonram_nosave(void)
* everything else. GFP_DMA32 page allocations automatically fall back to
* ZONE_DMA.
*
- * By using 31-bit unconditionally, we can exploit zone_dma_bits to inform the
+ * By using 31-bit unconditionally, we can exploit zone_dma_limit to inform the
* generic DMA mapping code. 32-bit only devices (if not handled by an IOMMU
* anyway) will take a first dip into ZONE_NORMAL and get otherwise served by
* ZONE_DMA.
@@ -230,6 +230,7 @@ void __init paging_init(void)
{
unsigned long long total_ram = memblock_phys_mem_size();
phys_addr_t top_of_ram = memblock_end_of_DRAM();
+ int zone_dma_bits;
#ifdef CONFIG_HIGHMEM
unsigned long v = __fix_to_virt(FIX_KMAP_END);
@@ -256,6 +257,8 @@ void __init paging_init(void)
else
zone_dma_bits = 31;
+ zone_dma_limit = DMA_BIT_MASK(zone_dma_bits);
+
#ifdef CONFIG_ZONE_DMA
max_zone_pfns[ZONE_DMA] = min(max_low_pfn,
1UL << (zone_dma_bits - PAGE_SHIFT));
diff --git a/arch/powerpc/mm/numa.c b/arch/powerpc/mm/numa.c
index aa89899f0c1a..3c1da08304d0 100644
--- a/arch/powerpc/mm/numa.c
+++ b/arch/powerpc/mm/numa.c
@@ -43,11 +43,9 @@ static char *cmdline __initdata;
int numa_cpu_lookup_table[NR_CPUS];
cpumask_var_t node_to_cpumask_map[MAX_NUMNODES];
-struct pglist_data *node_data[MAX_NUMNODES];
EXPORT_SYMBOL(numa_cpu_lookup_table);
EXPORT_SYMBOL(node_to_cpumask_map);
-EXPORT_SYMBOL(node_data);
static int primary_domain_index;
static int n_mem_addr_cells, n_mem_size_cells;
@@ -1095,27 +1093,9 @@ void __init dump_numa_cpu_topology(void)
static void __init setup_node_data(int nid, u64 start_pfn, u64 end_pfn)
{
u64 spanned_pages = end_pfn - start_pfn;
- const size_t nd_size = roundup(sizeof(pg_data_t), SMP_CACHE_BYTES);
- u64 nd_pa;
- void *nd;
- int tnid;
-
- nd_pa = memblock_phys_alloc_try_nid(nd_size, SMP_CACHE_BYTES, nid);
- if (!nd_pa)
- panic("Cannot allocate %zu bytes for node %d data\n",
- nd_size, nid);
-
- nd = __va(nd_pa);
-
- /* report and initialize */
- pr_info(" NODE_DATA [mem %#010Lx-%#010Lx]\n",
- nd_pa, nd_pa + nd_size - 1);
- tnid = early_pfn_to_nid(nd_pa >> PAGE_SHIFT);
- if (tnid != nid)
- pr_info(" NODE_DATA(%d) on node %d\n", nid, tnid);
-
- node_data[nid] = nd;
- memset(NODE_DATA(nid), 0, sizeof(pg_data_t));
+
+ alloc_node_data(nid);
+
NODE_DATA(nid)->node_id = nid;
NODE_DATA(nid)->node_start_pfn = start_pfn;
NODE_DATA(nid)->node_spanned_pages = spanned_pages;
diff --git a/arch/powerpc/mm/pgtable-frag.c b/arch/powerpc/mm/pgtable-frag.c
index 8c31802f97e8..e89f64a0f24a 100644
--- a/arch/powerpc/mm/pgtable-frag.c
+++ b/arch/powerpc/mm/pgtable-frag.c
@@ -136,10 +136,10 @@ void pte_fragment_free(unsigned long *table, int kernel)
#ifdef CONFIG_TRANSPARENT_HUGEPAGE
void pte_free_defer(struct mm_struct *mm, pgtable_t pgtable)
{
- struct page *page;
+ struct folio *folio;
- page = virt_to_page(pgtable);
- SetPageActive(page);
+ folio = virt_to_folio(pgtable);
+ folio_set_active(folio);
pte_fragment_free((unsigned long *)pgtable, 0);
}
#endif /* CONFIG_TRANSPARENT_HUGEPAGE */
diff --git a/arch/powerpc/mm/pgtable.c b/arch/powerpc/mm/pgtable.c
index ab0656115424..7316396e452d 100644
--- a/arch/powerpc/mm/pgtable.c
+++ b/arch/powerpc/mm/pgtable.c
@@ -297,6 +297,12 @@ int huge_ptep_set_access_flags(struct vm_area_struct *vma,
}
#if defined(CONFIG_PPC_8xx)
+
+#if defined(CONFIG_SPLIT_PTE_PTLOCKS) || defined(CONFIG_SPLIT_PMD_PTLOCKS)
+/* We need the same lock to protect the PMD table and the two PTE tables. */
+#error "8M hugetlb folios are incompatible with split page table locks"
+#endif
+
static void __set_huge_pte_at(pmd_t *pmd, pte_t *ptep, pte_basic_t val)
{
pte_basic_t *entry = (pte_basic_t *)ptep;
diff --git a/arch/powerpc/platforms/cell/spu_syscalls.c b/arch/powerpc/platforms/cell/spu_syscalls.c
index 87ad7d563cfa..cd7d42fc12a6 100644
--- a/arch/powerpc/platforms/cell/spu_syscalls.c
+++ b/arch/powerpc/platforms/cell/spu_syscalls.c
@@ -66,8 +66,8 @@ SYSCALL_DEFINE4(spu_create, const char __user *, name, unsigned int, flags,
if (flags & SPU_CREATE_AFFINITY_SPU) {
struct fd neighbor = fdget(neighbor_fd);
ret = -EBADF;
- if (neighbor.file) {
- ret = calls->create_thread(name, flags, mode, neighbor.file);
+ if (fd_file(neighbor)) {
+ ret = calls->create_thread(name, flags, mode, fd_file(neighbor));
fdput(neighbor);
}
} else
@@ -89,8 +89,8 @@ SYSCALL_DEFINE3(spu_run,int, fd, __u32 __user *, unpc, __u32 __user *, ustatus)
ret = -EBADF;
arg = fdget(fd);
- if (arg.file) {
- ret = calls->spu_run(arg.file, unpc, ustatus);
+ if (fd_file(arg)) {
+ ret = calls->spu_run(fd_file(arg), unpc, ustatus);
fdput(arg);
}
diff --git a/arch/powerpc/platforms/powernv/opal-kmsg.c b/arch/powerpc/platforms/powernv/opal-kmsg.c
index 6c3bc4b4da98..bb4218fa796e 100644
--- a/arch/powerpc/platforms/powernv/opal-kmsg.c
+++ b/arch/powerpc/platforms/powernv/opal-kmsg.c
@@ -20,13 +20,13 @@
* message, it just ensures that OPAL completely flushes the console buffer.
*/
static void kmsg_dump_opal_console_flush(struct kmsg_dumper *dumper,
- enum kmsg_dump_reason reason)
+ struct kmsg_dump_detail *detail)
{
/*
* Outside of a panic context the pollers will continue to run,
* so we don't need to do any special flushing.
*/
- if (reason != KMSG_DUMP_PANIC)
+ if (detail->reason != KMSG_DUMP_PANIC)
return;
opal_flush_console(0);
diff --git a/arch/powerpc/platforms/pseries/papr-vpd.c b/arch/powerpc/platforms/pseries/papr-vpd.c
index c29e85db5f35..1574176e3ffc 100644
--- a/arch/powerpc/platforms/pseries/papr-vpd.c
+++ b/arch/powerpc/platforms/pseries/papr-vpd.c
@@ -156,10 +156,7 @@ static int vpd_blob_extend(struct vpd_blob *blob, const char *data, size_t len)
const char *old_ptr = blob->data;
char *new_ptr;
- new_ptr = old_ptr ?
- kvrealloc(old_ptr, old_len, new_len, GFP_KERNEL_ACCOUNT) :
- kvmalloc(len, GFP_KERNEL_ACCOUNT);
-
+ new_ptr = kvrealloc(old_ptr, new_len, GFP_KERNEL_ACCOUNT);
if (!new_ptr)
return -ENOMEM;
diff --git a/arch/riscv/Kconfig b/arch/riscv/Kconfig
index 86d1f1cea571..22dc5ea4196c 100644
--- a/arch/riscv/Kconfig
+++ b/arch/riscv/Kconfig
@@ -65,10 +65,12 @@ config RISCV
select ARCH_SUPPORTS_LTO_CLANG_THIN if LLD_VERSION >= 140000
select ARCH_SUPPORTS_PAGE_TABLE_CHECK if MMU
select ARCH_SUPPORTS_PER_VMA_LOCK if MMU
+ select ARCH_SUPPORTS_RT
select ARCH_SUPPORTS_SHADOW_CALL_STACK if HAVE_SHADOW_CALL_STACK
select ARCH_USE_CMPXCHG_LOCKREF if 64BIT
select ARCH_USE_MEMTEST
select ARCH_USE_QUEUED_RWLOCKS
+ select ARCH_USE_SYM_ANNOTATIONS
select ARCH_USES_CFI_TRAPS if CFI_CLANG
select ARCH_WANT_BATCHED_UNMAP_TLB_FLUSH if MMU
select ARCH_WANT_DEFAULT_TOPDOWN_MMAP_LAYOUT if MMU
@@ -93,6 +95,7 @@ config RISCV
select GENERIC_ATOMIC64 if !64BIT
select GENERIC_CLOCKEVENTS_BROADCAST if SMP
select GENERIC_CPU_DEVICES
+ select GENERIC_CPU_VULNERABILITIES
select GENERIC_EARLY_IOREMAP
select GENERIC_ENTRY
select GENERIC_GETTIMEOFDAY if HAVE_GENERIC_VDSO
@@ -157,6 +160,7 @@ config RISCV
select HAVE_KERNEL_LZO if !XIP_KERNEL && !EFI_ZBOOT
select HAVE_KERNEL_UNCOMPRESSED if !XIP_KERNEL && !EFI_ZBOOT
select HAVE_KERNEL_ZSTD if !XIP_KERNEL && !EFI_ZBOOT
+ select HAVE_KERNEL_XZ if !XIP_KERNEL && !EFI_ZBOOT
select HAVE_KPROBES if !XIP_KERNEL
select HAVE_KRETPROBES if !XIP_KERNEL
# https://github.com/ClangBuiltLinux/linux/issues/1881
@@ -173,7 +177,7 @@ config RISCV
select HAVE_REGS_AND_STACK_ACCESS_API
select HAVE_RETHOOK if !XIP_KERNEL
select HAVE_RSEQ
- select HAVE_RUST if 64BIT
+ select HAVE_RUST if RUSTC_SUPPORTS_RISCV
select HAVE_SAMPLE_FTRACE_DIRECT
select HAVE_SAMPLE_FTRACE_DIRECT_MULTI
select HAVE_STACKPROTECTOR
@@ -202,8 +206,16 @@ config RISCV
select THREAD_INFO_IN_TASK
select TRACE_IRQFLAGS_SUPPORT
select UACCESS_MEMCPY if !MMU
+ select USER_STACKTRACE_SUPPORT
select ZONE_DMA32 if 64BIT
+config RUSTC_SUPPORTS_RISCV
+ def_bool y
+ depends on 64BIT
+ # Shadow call stack requires rustc version 1.82+ due to use of the
+ # -Zsanitizer=shadow-call-stack flag.
+ depends on !SHADOW_CALL_STACK || RUSTC_VERSION >= 108200
+
config CLANG_SUPPORTS_DYNAMIC_FTRACE
def_bool CC_IS_CLANG
# https://github.com/ClangBuiltLinux/linux/issues/1817
@@ -321,6 +333,11 @@ config GENERIC_HWEIGHT
config FIX_EARLYCON_MEM
def_bool MMU
+config ILLEGAL_POINTER_VALUE
+ hex
+ default 0 if 32BIT
+ default 0xdead000000000000 if 64BIT
+
config PGTABLE_LEVELS
int
default 5 if 64BIT
diff --git a/arch/riscv/Makefile b/arch/riscv/Makefile
index 6fe682139d2e..d469db9f46f4 100644
--- a/arch/riscv/Makefile
+++ b/arch/riscv/Makefile
@@ -159,6 +159,7 @@ boot-image-$(CONFIG_KERNEL_LZ4) := Image.lz4
boot-image-$(CONFIG_KERNEL_LZMA) := Image.lzma
boot-image-$(CONFIG_KERNEL_LZO) := Image.lzo
boot-image-$(CONFIG_KERNEL_ZSTD) := Image.zst
+boot-image-$(CONFIG_KERNEL_XZ) := Image.xz
ifdef CONFIG_RISCV_M_MODE
boot-image-$(CONFIG_ARCH_CANAAN) := loader.bin
endif
@@ -183,12 +184,12 @@ endif
vdso-install-y += arch/riscv/kernel/vdso/vdso.so.dbg
vdso-install-$(CONFIG_COMPAT) += arch/riscv/kernel/compat_vdso/compat_vdso.so.dbg
-BOOT_TARGETS := Image Image.gz Image.bz2 Image.lz4 Image.lzma Image.lzo Image.zst loader loader.bin xipImage vmlinuz.efi
+BOOT_TARGETS := Image Image.gz Image.bz2 Image.lz4 Image.lzma Image.lzo Image.zst Image.xz loader loader.bin xipImage vmlinuz.efi
all: $(notdir $(KBUILD_IMAGE))
loader.bin: loader
-Image.gz Image.bz2 Image.lz4 Image.lzma Image.lzo Image.zst loader xipImage vmlinuz.efi: Image
+Image.gz Image.bz2 Image.lz4 Image.lzma Image.lzo Image.zst Image.xz loader xipImage vmlinuz.efi: Image
$(BOOT_TARGETS): vmlinux
$(Q)$(MAKE) $(build)=$(boot) $(boot)/$@
@@ -225,6 +226,7 @@ define archhelp
echo ' Image.lzma - Compressed kernel image (arch/riscv/boot/Image.lzma)'
echo ' Image.lzo - Compressed kernel image (arch/riscv/boot/Image.lzo)'
echo ' Image.zst - Compressed kernel image (arch/riscv/boot/Image.zst)'
+ echo ' Image.xz - Compressed kernel image (arch/riscv/boot/Image.xz)'
echo ' vmlinuz.efi - Compressed EFI kernel image (arch/riscv/boot/vmlinuz.efi)'
echo ' Default when CONFIG_EFI_ZBOOT=y'
echo ' xipImage - Execute-in-place kernel image (arch/riscv/boot/xipImage)'
diff --git a/arch/riscv/boot/Makefile b/arch/riscv/boot/Makefile
index 4e9e7a28bf9b..b25d524ce5eb 100644
--- a/arch/riscv/boot/Makefile
+++ b/arch/riscv/boot/Makefile
@@ -64,6 +64,9 @@ $(obj)/Image.lzo: $(obj)/Image FORCE
$(obj)/Image.zst: $(obj)/Image FORCE
$(call if_changed,zstd)
+$(obj)/Image.xz: $(obj)/Image FORCE
+ $(call if_changed,xzkern)
+
$(obj)/loader.bin: $(obj)/loader FORCE
$(call if_changed,objcopy)
diff --git a/arch/riscv/configs/defconfig b/arch/riscv/configs/defconfig
index ee978cc74673..2341393cfac1 100644
--- a/arch/riscv/configs/defconfig
+++ b/arch/riscv/configs/defconfig
@@ -137,12 +137,10 @@ CONFIG_VIRTIO_NET=y
CONFIG_MACB=y
CONFIG_E1000E=y
CONFIG_R8169=y
-CONFIG_RAVB=y
CONFIG_STMMAC_ETH=m
CONFIG_MICREL_PHY=y
CONFIG_MICROSEMI_PHY=y
CONFIG_MOTORCOMM_PHY=y
-CONFIG_CAN_RCAR_CANFD=m
CONFIG_INPUT_MOUSEDEV=y
CONFIG_KEYBOARD_SUN4I_LRADC=m
CONFIG_SERIAL_8250=y
@@ -150,20 +148,18 @@ CONFIG_SERIAL_8250_CONSOLE=y
CONFIG_SERIAL_8250_DW=y
CONFIG_SERIAL_OF_PLATFORM=y
CONFIG_SERIAL_EARLYCON_RISCV_SBI=y
-CONFIG_SERIAL_SH_SCI=y
CONFIG_VIRTIO_CONSOLE=y
CONFIG_HW_RANDOM=y
CONFIG_HW_RANDOM_VIRTIO=y
CONFIG_HW_RANDOM_JH7110=m
CONFIG_I2C=y
CONFIG_I2C_CHARDEV=m
+CONFIG_I2C_DESIGNWARE_CORE=y
CONFIG_I2C_DESIGNWARE_PLATFORM=y
CONFIG_I2C_MV64XXX=m
-CONFIG_I2C_RIIC=y
CONFIG_SPI=y
CONFIG_SPI_CADENCE_QUADSPI=m
CONFIG_SPI_PL022=m
-CONFIG_SPI_RSPI=m
CONFIG_SPI_SIFIVE=y
CONFIG_SPI_SUN6I=y
# CONFIG_PTP_1588_CLOCK is not set
@@ -176,7 +172,6 @@ CONFIG_POWER_RESET_GPIO_RESTART=y
CONFIG_SENSORS_SFCTEMP=m
CONFIG_CPU_THERMAL=y
CONFIG_DEVFREQ_THERMAL=y
-CONFIG_RZG2L_THERMAL=y
CONFIG_WATCHDOG=y
CONFIG_SUNXI_WATCHDOG=y
CONFIG_MFD_AXP20X_I2C=y
@@ -205,11 +200,11 @@ CONFIG_USB=y
CONFIG_USB_OTG=y
CONFIG_USB_XHCI_HCD=y
CONFIG_USB_XHCI_PLATFORM=y
+# CONFIG_USB_XHCI_RCAR is not set
CONFIG_USB_EHCI_HCD=y
CONFIG_USB_EHCI_HCD_PLATFORM=y
CONFIG_USB_OHCI_HCD=y
CONFIG_USB_OHCI_HCD_PLATFORM=y
-CONFIG_USB_RENESAS_USBHS=m
CONFIG_USB_STORAGE=y
CONFIG_USB_UAS=y
CONFIG_USB_CDNS_SUPPORT=m
@@ -221,7 +216,6 @@ CONFIG_USB_MUSB_HDRC=m
CONFIG_USB_MUSB_SUNXI=m
CONFIG_NOP_USB_XCEIV=m
CONFIG_USB_GADGET=y
-CONFIG_USB_RENESAS_USBHS_UDC=m
CONFIG_USB_CONFIGFS=m
CONFIG_USB_CONFIGFS_SERIAL=y
CONFIG_USB_CONFIGFS_ACM=y
@@ -239,7 +233,6 @@ CONFIG_MMC_SDHCI_PLTFM=y
CONFIG_MMC_SDHCI_OF_DWCMSHC=y
CONFIG_MMC_SDHCI_CADENCE=y
CONFIG_MMC_SPI=y
-CONFIG_MMC_SDHI=y
CONFIG_MMC_DW=y
CONFIG_MMC_DW_STARFIVE=y
CONFIG_MMC_SUNXI=y
@@ -257,7 +250,6 @@ CONFIG_CLK_SOPHGO_SG2042_PLL=y
CONFIG_CLK_SOPHGO_SG2042_CLKGEN=y
CONFIG_CLK_SOPHGO_SG2042_RPGATE=y
CONFIG_SUN8I_DE2_CCU=m
-CONFIG_RENESAS_OSTM=y
CONFIG_SUN50I_IOMMU=y
CONFIG_RPMSG_CHAR=y
CONFIG_RPMSG_CTRL=y
@@ -265,7 +257,6 @@ CONFIG_RPMSG_VIRTIO=y
CONFIG_PM_DEVFREQ=y
CONFIG_IIO=y
CONFIG_PHY_SUN4I_USB=m
-CONFIG_PHY_RCAR_GEN3_USB2=y
CONFIG_PHY_STARFIVE_JH7110_DPHY_RX=m
CONFIG_PHY_STARFIVE_JH7110_PCIE=m
CONFIG_PHY_STARFIVE_JH7110_USB=m
diff --git a/arch/riscv/configs/nommu_k210_defconfig b/arch/riscv/configs/nommu_k210_defconfig
index af9601da4643..87ff5a1233af 100644
--- a/arch/riscv/configs/nommu_k210_defconfig
+++ b/arch/riscv/configs/nommu_k210_defconfig
@@ -58,6 +58,7 @@ CONFIG_I2C=y
# CONFIG_I2C_COMPAT is not set
CONFIG_I2C_CHARDEV=y
# CONFIG_I2C_HELPER_AUTO is not set
+CONFIG_I2C_DESIGNWARE_CORE=y
CONFIG_I2C_DESIGNWARE_PLATFORM=y
CONFIG_SPI=y
# CONFIG_SPI_MEM is not set
diff --git a/arch/riscv/configs/nommu_k210_sdcard_defconfig b/arch/riscv/configs/nommu_k210_sdcard_defconfig
index dd460c649152..95cbd574f291 100644
--- a/arch/riscv/configs/nommu_k210_sdcard_defconfig
+++ b/arch/riscv/configs/nommu_k210_sdcard_defconfig
@@ -50,6 +50,7 @@ CONFIG_DEVTMPFS_MOUNT=y
CONFIG_I2C=y
CONFIG_I2C_CHARDEV=y
# CONFIG_I2C_HELPER_AUTO is not set
+CONFIG_I2C_DESIGNWARE_CORE=y
CONFIG_I2C_DESIGNWARE_PLATFORM=y
CONFIG_SPI=y
# CONFIG_SPI_MEM is not set
diff --git a/arch/riscv/errata/sifive/errata_cip_453.S b/arch/riscv/errata/sifive/errata_cip_453.S
index f1b9623fe1de..b1f7b636fe9a 100644
--- a/arch/riscv/errata/sifive/errata_cip_453.S
+++ b/arch/riscv/errata/sifive/errata_cip_453.S
@@ -21,7 +21,7 @@
1:
.endm
-ENTRY(sifive_cip_453_page_fault_trp)
+SYM_FUNC_START(sifive_cip_453_page_fault_trp)
ADD_SIGN_EXT a0, t0, t1
#ifdef CONFIG_MMU
la t0, do_page_fault
@@ -29,10 +29,10 @@ ENTRY(sifive_cip_453_page_fault_trp)
la t0, do_trap_unknown
#endif
jr t0
-END(sifive_cip_453_page_fault_trp)
+SYM_FUNC_END(sifive_cip_453_page_fault_trp)
-ENTRY(sifive_cip_453_insn_fault_trp)
+SYM_FUNC_START(sifive_cip_453_insn_fault_trp)
ADD_SIGN_EXT a0, t0, t1
la t0, do_trap_insn_fault
jr t0
-END(sifive_cip_453_insn_fault_trp)
+SYM_FUNC_END(sifive_cip_453_insn_fault_trp)
diff --git a/arch/riscv/include/asm/Kbuild b/arch/riscv/include/asm/Kbuild
index 5c589770f2a8..1461af12da6e 100644
--- a/arch/riscv/include/asm/Kbuild
+++ b/arch/riscv/include/asm/Kbuild
@@ -5,6 +5,7 @@ syscall-y += syscall_table_64.h
generic-y += early_ioremap.h
generic-y += flat.h
generic-y += kvm_para.h
+generic-y += mmzone.h
generic-y += parport.h
generic-y += spinlock.h
generic-y += spinlock_types.h
diff --git a/arch/riscv/include/asm/acpi.h b/arch/riscv/include/asm/acpi.h
index e0a1f84404f3..6e13695120bc 100644
--- a/arch/riscv/include/asm/acpi.h
+++ b/arch/riscv/include/asm/acpi.h
@@ -91,10 +91,8 @@ static inline void acpi_get_cbo_block_size(struct acpi_table_header *table,
#endif /* CONFIG_ACPI */
#ifdef CONFIG_ACPI_NUMA
-int acpi_numa_get_nid(unsigned int cpu);
void acpi_map_cpus_to_nodes(void);
#else
-static inline int acpi_numa_get_nid(unsigned int cpu) { return NUMA_NO_NODE; }
static inline void acpi_map_cpus_to_nodes(void) { }
#endif /* CONFIG_ACPI_NUMA */
diff --git a/arch/riscv/include/asm/bitops.h b/arch/riscv/include/asm/bitops.h
index 71af9ecfcfcb..fae152ea0508 100644
--- a/arch/riscv/include/asm/bitops.h
+++ b/arch/riscv/include/asm/bitops.h
@@ -222,44 +222,44 @@ legacy:
#define __NOT(x) (~(x))
/**
- * test_and_set_bit - Set a bit and return its old value
+ * arch_test_and_set_bit - Set a bit and return its old value
* @nr: Bit to set
* @addr: Address to count from
*
* This operation may be reordered on other architectures than x86.
*/
-static inline int test_and_set_bit(int nr, volatile unsigned long *addr)
+static inline int arch_test_and_set_bit(int nr, volatile unsigned long *addr)
{
return __test_and_op_bit(or, __NOP, nr, addr);
}
/**
- * test_and_clear_bit - Clear a bit and return its old value
+ * arch_test_and_clear_bit - Clear a bit and return its old value
* @nr: Bit to clear
* @addr: Address to count from
*
* This operation can be reordered on other architectures other than x86.
*/
-static inline int test_and_clear_bit(int nr, volatile unsigned long *addr)
+static inline int arch_test_and_clear_bit(int nr, volatile unsigned long *addr)
{
return __test_and_op_bit(and, __NOT, nr, addr);
}
/**
- * test_and_change_bit - Change a bit and return its old value
+ * arch_test_and_change_bit - Change a bit and return its old value
* @nr: Bit to change
* @addr: Address to count from
*
* This operation is atomic and cannot be reordered.
* It also implies a memory barrier.
*/
-static inline int test_and_change_bit(int nr, volatile unsigned long *addr)
+static inline int arch_test_and_change_bit(int nr, volatile unsigned long *addr)
{
return __test_and_op_bit(xor, __NOP, nr, addr);
}
/**
- * set_bit - Atomically set a bit in memory
+ * arch_set_bit - Atomically set a bit in memory
* @nr: the bit to set
* @addr: the address to start counting from
*
@@ -270,13 +270,13 @@ static inline int test_and_change_bit(int nr, volatile unsigned long *addr)
* Note that @nr may be almost arbitrarily large; this function is not
* restricted to acting on a single-word quantity.
*/
-static inline void set_bit(int nr, volatile unsigned long *addr)
+static inline void arch_set_bit(int nr, volatile unsigned long *addr)
{
__op_bit(or, __NOP, nr, addr);
}
/**
- * clear_bit - Clears a bit in memory
+ * arch_clear_bit - Clears a bit in memory
* @nr: Bit to clear
* @addr: Address to start counting from
*
@@ -284,13 +284,13 @@ static inline void set_bit(int nr, volatile unsigned long *addr)
* on non x86 architectures, so if you are writing portable code,
* make sure not to rely on its reordering guarantees.
*/
-static inline void clear_bit(int nr, volatile unsigned long *addr)
+static inline void arch_clear_bit(int nr, volatile unsigned long *addr)
{
__op_bit(and, __NOT, nr, addr);
}
/**
- * change_bit - Toggle a bit in memory
+ * arch_change_bit - Toggle a bit in memory
* @nr: Bit to change
* @addr: Address to start counting from
*
@@ -298,40 +298,40 @@ static inline void clear_bit(int nr, volatile unsigned long *addr)
* Note that @nr may be almost arbitrarily large; this function is not
* restricted to acting on a single-word quantity.
*/
-static inline void change_bit(int nr, volatile unsigned long *addr)
+static inline void arch_change_bit(int nr, volatile unsigned long *addr)
{
__op_bit(xor, __NOP, nr, addr);
}
/**
- * test_and_set_bit_lock - Set a bit and return its old value, for lock
+ * arch_test_and_set_bit_lock - Set a bit and return its old value, for lock
* @nr: Bit to set
* @addr: Address to count from
*
* This operation is atomic and provides acquire barrier semantics.
* It can be used to implement bit locks.
*/
-static inline int test_and_set_bit_lock(
+static inline int arch_test_and_set_bit_lock(
unsigned long nr, volatile unsigned long *addr)
{
return __test_and_op_bit_ord(or, __NOP, nr, addr, .aq);
}
/**
- * clear_bit_unlock - Clear a bit in memory, for unlock
+ * arch_clear_bit_unlock - Clear a bit in memory, for unlock
* @nr: the bit to set
* @addr: the address to start counting from
*
* This operation is atomic and provides release barrier semantics.
*/
-static inline void clear_bit_unlock(
+static inline void arch_clear_bit_unlock(
unsigned long nr, volatile unsigned long *addr)
{
__op_bit_ord(and, __NOT, nr, addr, .rl);
}
/**
- * __clear_bit_unlock - Clear a bit in memory, for unlock
+ * arch___clear_bit_unlock - Clear a bit in memory, for unlock
* @nr: the bit to set
* @addr: the address to start counting from
*
@@ -345,13 +345,13 @@ static inline void clear_bit_unlock(
* non-atomic property here: it's a lot more instructions and we still have to
* provide release semantics anyway.
*/
-static inline void __clear_bit_unlock(
+static inline void arch___clear_bit_unlock(
unsigned long nr, volatile unsigned long *addr)
{
- clear_bit_unlock(nr, addr);
+ arch_clear_bit_unlock(nr, addr);
}
-static inline bool xor_unlock_is_negative_byte(unsigned long mask,
+static inline bool arch_xor_unlock_is_negative_byte(unsigned long mask,
volatile unsigned long *addr)
{
unsigned long res;
@@ -369,6 +369,9 @@ static inline bool xor_unlock_is_negative_byte(unsigned long mask,
#undef __NOT
#undef __AMO
+#include <asm-generic/bitops/instrumented-atomic.h>
+#include <asm-generic/bitops/instrumented-lock.h>
+
#include <asm-generic/bitops/non-atomic.h>
#include <asm-generic/bitops/le.h>
#include <asm-generic/bitops/ext2-atomic.h>
diff --git a/arch/riscv/include/asm/cacheflush.h b/arch/riscv/include/asm/cacheflush.h
index ce79c558a4c8..8de73f91bfa3 100644
--- a/arch/riscv/include/asm/cacheflush.h
+++ b/arch/riscv/include/asm/cacheflush.h
@@ -46,7 +46,23 @@ do { \
} while (0)
#ifdef CONFIG_64BIT
-#define flush_cache_vmap(start, end) flush_tlb_kernel_range(start, end)
+extern u64 new_vmalloc[NR_CPUS / sizeof(u64) + 1];
+extern char _end[];
+#define flush_cache_vmap flush_cache_vmap
+static inline void flush_cache_vmap(unsigned long start, unsigned long end)
+{
+ if (is_vmalloc_or_module_addr((void *)start)) {
+ int i;
+
+ /*
+ * We don't care if concurrently a cpu resets this value since
+ * the only place this can happen is in handle_exception() where
+ * an sfence.vma is emitted.
+ */
+ for (i = 0; i < ARRAY_SIZE(new_vmalloc); ++i)
+ new_vmalloc[i] = -1ULL;
+ }
+}
#define flush_cache_vmap_early(start, end) local_flush_tlb_kernel_range(start, end)
#endif
diff --git a/arch/riscv/include/asm/exec.h b/arch/riscv/include/asm/exec.h
new file mode 100644
index 000000000000..07d9942682e0
--- /dev/null
+++ b/arch/riscv/include/asm/exec.h
@@ -0,0 +1,8 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+
+#ifndef __ASM_EXEC_H
+#define __ASM_EXEC_H
+
+extern unsigned long arch_align_stack(unsigned long sp);
+
+#endif /* __ASM_EXEC_H */
diff --git a/arch/riscv/include/asm/fence.h b/arch/riscv/include/asm/fence.h
index 6bcd80325dfc..182db7930edc 100644
--- a/arch/riscv/include/asm/fence.h
+++ b/arch/riscv/include/asm/fence.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
#ifndef _ASM_RISCV_FENCE_H
#define _ASM_RISCV_FENCE_H
diff --git a/arch/riscv/include/asm/hwcap.h b/arch/riscv/include/asm/hwcap.h
index 5a0bd27fd11a..46d9de54179e 100644
--- a/arch/riscv/include/asm/hwcap.h
+++ b/arch/riscv/include/asm/hwcap.h
@@ -92,6 +92,7 @@
#define RISCV_ISA_EXT_ZCF 83
#define RISCV_ISA_EXT_ZCMOP 84
#define RISCV_ISA_EXT_ZAWRS 85
+#define RISCV_ISA_EXT_SVVPTC 86
#define RISCV_ISA_EXT_XLINUXENVCFG 127
diff --git a/arch/riscv/include/asm/irq.h b/arch/riscv/include/asm/irq.h
index 7e9a84a005ed..7b038f3b7cb0 100644
--- a/arch/riscv/include/asm/irq.h
+++ b/arch/riscv/include/asm/irq.h
@@ -14,6 +14,11 @@
#define INVALID_CONTEXT UINT_MAX
+#ifdef CONFIG_SMP
+void arch_trigger_cpumask_backtrace(const cpumask_t *mask, int exclude_cpu);
+#define arch_trigger_cpumask_backtrace arch_trigger_cpumask_backtrace
+#endif
+
void riscv_set_intc_hwnode_fn(struct fwnode_handle *(*fn)(void));
struct fwnode_handle *riscv_get_intc_hwnode(void);
diff --git a/arch/riscv/include/asm/mmzone.h b/arch/riscv/include/asm/mmzone.h
deleted file mode 100644
index fa17e01d9ab2..000000000000
--- a/arch/riscv/include/asm/mmzone.h
+++ /dev/null
@@ -1,13 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-#ifndef __ASM_MMZONE_H
-#define __ASM_MMZONE_H
-
-#ifdef CONFIG_NUMA
-
-#include <asm/numa.h>
-
-extern struct pglist_data *node_data[];
-#define NODE_DATA(nid) (node_data[(nid)])
-
-#endif /* CONFIG_NUMA */
-#endif /* __ASM_MMZONE_H */
diff --git a/arch/riscv/include/asm/page.h b/arch/riscv/include/asm/page.h
index 7ede2111c591..32d308a3355f 100644
--- a/arch/riscv/include/asm/page.h
+++ b/arch/riscv/include/asm/page.h
@@ -112,11 +112,13 @@ struct kernel_mapping {
/* Offset between linear mapping virtual address and kernel load address */
unsigned long va_pa_offset;
/* Offset between kernel mapping virtual address and kernel load address */
- unsigned long va_kernel_pa_offset;
- unsigned long va_kernel_xip_pa_offset;
#ifdef CONFIG_XIP_KERNEL
+ unsigned long va_kernel_xip_text_pa_offset;
+ unsigned long va_kernel_xip_data_pa_offset;
uintptr_t xiprom;
uintptr_t xiprom_sz;
+#else
+ unsigned long va_kernel_pa_offset;
#endif
};
@@ -134,12 +136,18 @@ extern phys_addr_t phys_ram_base;
#else
void *linear_mapping_pa_to_va(unsigned long x);
#endif
+
+#ifdef CONFIG_XIP_KERNEL
#define kernel_mapping_pa_to_va(y) ({ \
unsigned long _y = (unsigned long)(y); \
- (IS_ENABLED(CONFIG_XIP_KERNEL) && _y < phys_ram_base) ? \
- (void *)(_y + kernel_map.va_kernel_xip_pa_offset) : \
- (void *)(_y + kernel_map.va_kernel_pa_offset + XIP_OFFSET); \
+ (_y < phys_ram_base) ? \
+ (void *)(_y + kernel_map.va_kernel_xip_text_pa_offset) : \
+ (void *)(_y + kernel_map.va_kernel_xip_data_pa_offset); \
})
+#else
+#define kernel_mapping_pa_to_va(y) ((void *)((unsigned long)(y) + kernel_map.va_kernel_pa_offset))
+#endif
+
#define __pa_to_va_nodebug(x) linear_mapping_pa_to_va(x)
#ifndef CONFIG_DEBUG_VIRTUAL
@@ -147,12 +155,17 @@ void *linear_mapping_pa_to_va(unsigned long x);
#else
phys_addr_t linear_mapping_va_to_pa(unsigned long x);
#endif
+
+#ifdef CONFIG_XIP_KERNEL
#define kernel_mapping_va_to_pa(y) ({ \
unsigned long _y = (unsigned long)(y); \
- (IS_ENABLED(CONFIG_XIP_KERNEL) && _y < kernel_map.virt_addr + XIP_OFFSET) ? \
- (_y - kernel_map.va_kernel_xip_pa_offset) : \
- (_y - kernel_map.va_kernel_pa_offset - XIP_OFFSET); \
+ (_y < kernel_map.virt_addr + kernel_map.xiprom_sz) ? \
+ (_y - kernel_map.va_kernel_xip_text_pa_offset) : \
+ (_y - kernel_map.va_kernel_xip_data_pa_offset); \
})
+#else
+#define kernel_mapping_va_to_pa(y) ((unsigned long)(y) - kernel_map.va_kernel_pa_offset)
+#endif
#define __va_to_pa_nodebug(x) ({ \
unsigned long _x = x; \
diff --git a/arch/riscv/include/asm/pgtable.h b/arch/riscv/include/asm/pgtable.h
index 089f3c9f56a3..e79f15293492 100644
--- a/arch/riscv/include/asm/pgtable.h
+++ b/arch/riscv/include/asm/pgtable.h
@@ -107,13 +107,6 @@
#endif
-#ifdef CONFIG_XIP_KERNEL
-#define XIP_OFFSET SZ_32M
-#define XIP_OFFSET_MASK (SZ_32M - 1)
-#else
-#define XIP_OFFSET 0
-#endif
-
#ifndef __ASSEMBLY__
#include <asm/page.h>
@@ -142,11 +135,14 @@
#ifdef CONFIG_XIP_KERNEL
#define XIP_FIXUP(addr) ({ \
+ extern char _sdata[], _start[], _end[]; \
+ uintptr_t __rom_start_data = CONFIG_XIP_PHYS_ADDR \
+ + (uintptr_t)&_sdata - (uintptr_t)&_start; \
+ uintptr_t __rom_end_data = CONFIG_XIP_PHYS_ADDR \
+ + (uintptr_t)&_end - (uintptr_t)&_start; \
uintptr_t __a = (uintptr_t)(addr); \
- (__a >= CONFIG_XIP_PHYS_ADDR && \
- __a < CONFIG_XIP_PHYS_ADDR + XIP_OFFSET * 2) ? \
- __a - CONFIG_XIP_PHYS_ADDR + CONFIG_PHYS_RAM_BASE - XIP_OFFSET :\
- __a; \
+ (__a >= __rom_start_data && __a < __rom_end_data) ? \
+ __a - __rom_start_data + CONFIG_PHYS_RAM_BASE : __a; \
})
#else
#define XIP_FIXUP(addr) (addr)
@@ -501,6 +497,9 @@ static inline void update_mmu_cache_range(struct vm_fault *vmf,
struct vm_area_struct *vma, unsigned long address,
pte_t *ptep, unsigned int nr)
{
+ asm goto(ALTERNATIVE("nop", "j %l[svvptc]", 0, RISCV_ISA_EXT_SVVPTC, 1)
+ : : : : svvptc);
+
/*
* The kernel assumes that TLBs don't cache invalid entries, but
* in RISC-V, SFENCE.VMA specifies an ordering constraint, not a
@@ -510,6 +509,13 @@ static inline void update_mmu_cache_range(struct vm_fault *vmf,
*/
while (nr--)
local_flush_tlb_page(address + nr * PAGE_SIZE);
+
+svvptc:;
+ /*
+ * Svvptc guarantees that the new valid pte will be visible within
+ * a bounded timeframe, so when the uarch does not cache invalid
+ * entries, we don't have to do anything.
+ */
}
#define update_mmu_cache(vma, addr, ptep) \
update_mmu_cache_range(NULL, vma, addr, ptep, 1)
diff --git a/arch/riscv/include/asm/sbi.h b/arch/riscv/include/asm/sbi.h
index 7bd3746028c9..98f631b051db 100644
--- a/arch/riscv/include/asm/sbi.h
+++ b/arch/riscv/include/asm/sbi.h
@@ -159,6 +159,7 @@ struct riscv_pmu_snapshot_data {
#define RISCV_PMU_RAW_EVENT_MASK GENMASK_ULL(47, 0)
#define RISCV_PMU_RAW_EVENT_IDX 0x20000
+#define RISCV_PLAT_FW_EVENT 0xFFFF
/** General pmu event codes specified in SBI PMU extension */
enum sbi_pmu_hw_generic_events_t {
diff --git a/arch/riscv/include/asm/set_memory.h b/arch/riscv/include/asm/set_memory.h
index ec11001c3fe0..ab92fc84e1fc 100644
--- a/arch/riscv/include/asm/set_memory.h
+++ b/arch/riscv/include/asm/set_memory.h
@@ -46,7 +46,7 @@ bool kernel_page_present(struct page *page);
#endif /* __ASSEMBLY__ */
-#ifdef CONFIG_STRICT_KERNEL_RWX
+#if defined(CONFIG_STRICT_KERNEL_RWX) || defined(CONFIG_XIP_KERNEL)
#ifdef CONFIG_64BIT
#define SECTION_ALIGN (1 << 21)
#else
diff --git a/arch/riscv/include/asm/sparsemem.h b/arch/riscv/include/asm/sparsemem.h
index 63acaecc3374..2f901a410586 100644
--- a/arch/riscv/include/asm/sparsemem.h
+++ b/arch/riscv/include/asm/sparsemem.h
@@ -7,7 +7,7 @@
#ifdef CONFIG_64BIT
#define MAX_PHYSMEM_BITS 56
#else
-#define MAX_PHYSMEM_BITS 34
+#define MAX_PHYSMEM_BITS 32
#endif /* CONFIG_64BIT */
#define SECTION_SIZE_BITS 27
#endif /* CONFIG_SPARSEMEM */
diff --git a/arch/riscv/include/asm/string.h b/arch/riscv/include/asm/string.h
index a96b1fea24fe..5ba77f60bf0b 100644
--- a/arch/riscv/include/asm/string.h
+++ b/arch/riscv/include/asm/string.h
@@ -19,6 +19,7 @@ extern asmlinkage void *__memcpy(void *, const void *, size_t);
extern asmlinkage void *memmove(void *, const void *, size_t);
extern asmlinkage void *__memmove(void *, const void *, size_t);
+#if !(defined(CONFIG_KASAN_GENERIC) || defined(CONFIG_KASAN_SW_TAGS))
#define __HAVE_ARCH_STRCMP
extern asmlinkage int strcmp(const char *cs, const char *ct);
@@ -27,6 +28,7 @@ extern asmlinkage __kernel_size_t strlen(const char *);
#define __HAVE_ARCH_STRNCMP
extern asmlinkage int strncmp(const char *cs, const char *ct, size_t count);
+#endif
/* For those files which don't want to check by kasan. */
#if defined(CONFIG_KASAN) && !defined(__SANITIZE_ADDRESS__)
diff --git a/arch/riscv/include/asm/thread_info.h b/arch/riscv/include/asm/thread_info.h
index fca5c6be2b81..ebe52f96da34 100644
--- a/arch/riscv/include/asm/thread_info.h
+++ b/arch/riscv/include/asm/thread_info.h
@@ -61,6 +61,13 @@ struct thread_info {
void *scs_base;
void *scs_sp;
#endif
+#ifdef CONFIG_64BIT
+ /*
+ * Used in handle_exception() to save a0, a1 and a2 before knowing if we
+ * can access the kernel stack.
+ */
+ unsigned long a0, a1, a2;
+#endif
};
#ifdef CONFIG_SHADOW_CALL_STACK
@@ -112,8 +119,4 @@ int arch_dup_task_struct(struct task_struct *dst, struct task_struct *src);
#define _TIF_UPROBE (1 << TIF_UPROBE)
#define _TIF_RISCV_V_DEFER_RESTORE (1 << TIF_RISCV_V_DEFER_RESTORE)
-#define _TIF_WORK_MASK \
- (_TIF_NOTIFY_RESUME | _TIF_SIGPENDING | _TIF_NEED_RESCHED | \
- _TIF_NOTIFY_SIGNAL | _TIF_UPROBE)
-
#endif /* _ASM_RISCV_THREAD_INFO_H */
diff --git a/arch/riscv/include/asm/topology.h b/arch/riscv/include/asm/topology.h
index 61183688bdd5..fe1a8bf6902d 100644
--- a/arch/riscv/include/asm/topology.h
+++ b/arch/riscv/include/asm/topology.h
@@ -4,6 +4,10 @@
#include <linux/arch_topology.h>
+#ifdef CONFIG_NUMA
+#include <asm/numa.h>
+#endif
+
/* Replace task scheduler's default frequency-invariant accounting */
#define arch_scale_freq_tick topology_scale_freq_tick
#define arch_set_freq_scale topology_set_freq_scale
diff --git a/arch/riscv/include/asm/vmalloc.h b/arch/riscv/include/asm/vmalloc.h
index 51f6dfe19745..fefe94dc98e2 100644
--- a/arch/riscv/include/asm/vmalloc.h
+++ b/arch/riscv/include/asm/vmalloc.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
#ifndef _ASM_RISCV_VMALLOC_H
#define _ASM_RISCV_VMALLOC_H
diff --git a/arch/riscv/include/asm/xip_fixup.h b/arch/riscv/include/asm/xip_fixup.h
index b65bf6306f69..f3d56299bc22 100644
--- a/arch/riscv/include/asm/xip_fixup.h
+++ b/arch/riscv/include/asm/xip_fixup.h
@@ -9,18 +9,36 @@
#ifdef CONFIG_XIP_KERNEL
.macro XIP_FIXUP_OFFSET reg
- REG_L t0, _xip_fixup
+ /* Fix-up address in Flash into address in RAM early during boot before
+ * MMU is up. Because generated code "thinks" data is in Flash, but it
+ * is actually in RAM (actually data is also in Flash, but Flash is
+ * read-only, thus we need to use the data residing in RAM).
+ *
+ * The start of data in Flash is _sdata and the start of data in RAM is
+ * CONFIG_PHYS_RAM_BASE. So this fix-up essentially does this:
+ * reg += CONFIG_PHYS_RAM_BASE - _start
+ */
+ li t0, CONFIG_PHYS_RAM_BASE
add \reg, \reg, t0
+ la t0, _sdata
+ sub \reg, \reg, t0
.endm
.macro XIP_FIXUP_FLASH_OFFSET reg
+ /* In linker script, at the transition from read-only section to
+ * writable section, the VMA is increased while LMA remains the same.
+ * (See in linker script how _sdata, __data_loc and LOAD_OFFSET is
+ * changed)
+ *
+ * Consequently, early during boot before MMU is up, the generated code
+ * reads the "writable" section at wrong addresses, because VMA is used
+ * by compiler to generate code, but the data is located in Flash using
+ * LMA.
+ */
+ la t0, _sdata
+ sub \reg, \reg, t0
la t0, __data_loc
- REG_L t1, _xip_phys_offset
- sub \reg, \reg, t1
add \reg, \reg, t0
.endm
-
-_xip_fixup: .dword CONFIG_PHYS_RAM_BASE - CONFIG_XIP_PHYS_ADDR - XIP_OFFSET
-_xip_phys_offset: .dword CONFIG_XIP_PHYS_ADDR + XIP_OFFSET
#else
.macro XIP_FIXUP_OFFSET reg
.endm
diff --git a/arch/riscv/kernel/acpi_numa.c b/arch/riscv/kernel/acpi_numa.c
index ff95aeebee3e..130769e3a99c 100644
--- a/arch/riscv/kernel/acpi_numa.c
+++ b/arch/riscv/kernel/acpi_numa.c
@@ -30,7 +30,7 @@
static int acpi_early_node_map[NR_CPUS] __initdata = { [0 ... NR_CPUS - 1] = NUMA_NO_NODE };
-int __init acpi_numa_get_nid(unsigned int cpu)
+static int __init acpi_numa_get_nid(unsigned int cpu)
{
return acpi_early_node_map[cpu];
}
diff --git a/arch/riscv/kernel/asm-offsets.c b/arch/riscv/kernel/asm-offsets.c
index b09ca5f944f7..e94180ba432f 100644
--- a/arch/riscv/kernel/asm-offsets.c
+++ b/arch/riscv/kernel/asm-offsets.c
@@ -36,6 +36,8 @@ void asm_offsets(void)
OFFSET(TASK_THREAD_S9, task_struct, thread.s[9]);
OFFSET(TASK_THREAD_S10, task_struct, thread.s[10]);
OFFSET(TASK_THREAD_S11, task_struct, thread.s[11]);
+
+ OFFSET(TASK_TI_CPU, task_struct, thread_info.cpu);
OFFSET(TASK_TI_FLAGS, task_struct, thread_info.flags);
OFFSET(TASK_TI_PREEMPT_COUNT, task_struct, thread_info.preempt_count);
OFFSET(TASK_TI_KERNEL_SP, task_struct, thread_info.kernel_sp);
@@ -43,6 +45,11 @@ void asm_offsets(void)
#ifdef CONFIG_SHADOW_CALL_STACK
OFFSET(TASK_TI_SCS_SP, task_struct, thread_info.scs_sp);
#endif
+#ifdef CONFIG_64BIT
+ OFFSET(TASK_TI_A0, task_struct, thread_info.a0);
+ OFFSET(TASK_TI_A1, task_struct, thread_info.a1);
+ OFFSET(TASK_TI_A2, task_struct, thread_info.a2);
+#endif
OFFSET(TASK_TI_CPU_NUM, task_struct, thread_info.cpu);
OFFSET(TASK_THREAD_F0, task_struct, thread.fstate.f[0]);
diff --git a/arch/riscv/kernel/cacheinfo.c b/arch/riscv/kernel/cacheinfo.c
index d6c108c50cba..b320b1d9aa01 100644
--- a/arch/riscv/kernel/cacheinfo.c
+++ b/arch/riscv/kernel/cacheinfo.c
@@ -71,6 +71,11 @@ static void ci_leaf_init(struct cacheinfo *this_leaf,
this_leaf->type = type;
}
+int init_cache_level(unsigned int cpu)
+{
+ return init_of_cache_level(cpu);
+}
+
int populate_cache_leaves(unsigned int cpu)
{
struct cpu_cacheinfo *this_cpu_ci = get_cpu_cacheinfo(cpu);
diff --git a/arch/riscv/kernel/cpufeature.c b/arch/riscv/kernel/cpufeature.c
index b427188b28fc..3a8eeaa9310c 100644
--- a/arch/riscv/kernel/cpufeature.c
+++ b/arch/riscv/kernel/cpufeature.c
@@ -381,6 +381,7 @@ const struct riscv_isa_ext_data riscv_isa_ext[] = {
__RISCV_ISA_EXT_DATA(svinval, RISCV_ISA_EXT_SVINVAL),
__RISCV_ISA_EXT_DATA(svnapot, RISCV_ISA_EXT_SVNAPOT),
__RISCV_ISA_EXT_DATA(svpbmt, RISCV_ISA_EXT_SVPBMT),
+ __RISCV_ISA_EXT_DATA(svvptc, RISCV_ISA_EXT_SVVPTC),
};
const size_t riscv_isa_ext_count = ARRAY_SIZE(riscv_isa_ext);
diff --git a/arch/riscv/kernel/elf_kexec.c b/arch/riscv/kernel/elf_kexec.c
index 11c0d2e0becf..3c37661801f9 100644
--- a/arch/riscv/kernel/elf_kexec.c
+++ b/arch/riscv/kernel/elf_kexec.c
@@ -451,6 +451,12 @@ int arch_kexec_apply_relocations_add(struct purgatory_info *pi,
*(u32 *)loc = CLEAN_IMM(CJTYPE, *(u32 *)loc) |
ENCODE_CJTYPE_IMM(val - addr);
break;
+ case R_RISCV_ADD16:
+ *(u16 *)loc += val;
+ break;
+ case R_RISCV_SUB16:
+ *(u16 *)loc -= val;
+ break;
case R_RISCV_ADD32:
*(u32 *)loc += val;
break;
diff --git a/arch/riscv/kernel/entry.S b/arch/riscv/kernel/entry.S
index ac2e908d4418..c200d329d4bd 100644
--- a/arch/riscv/kernel/entry.S
+++ b/arch/riscv/kernel/entry.S
@@ -19,6 +19,79 @@
.section .irqentry.text, "ax"
+.macro new_vmalloc_check
+ REG_S a0, TASK_TI_A0(tp)
+ csrr a0, CSR_CAUSE
+ /* Exclude IRQs */
+ blt a0, zero, _new_vmalloc_restore_context_a0
+
+ REG_S a1, TASK_TI_A1(tp)
+ /* Only check new_vmalloc if we are in page/protection fault */
+ li a1, EXC_LOAD_PAGE_FAULT
+ beq a0, a1, _new_vmalloc_kernel_address
+ li a1, EXC_STORE_PAGE_FAULT
+ beq a0, a1, _new_vmalloc_kernel_address
+ li a1, EXC_INST_PAGE_FAULT
+ bne a0, a1, _new_vmalloc_restore_context_a1
+
+_new_vmalloc_kernel_address:
+ /* Is it a kernel address? */
+ csrr a0, CSR_TVAL
+ bge a0, zero, _new_vmalloc_restore_context_a1
+
+ /* Check if a new vmalloc mapping appeared that could explain the trap */
+ REG_S a2, TASK_TI_A2(tp)
+ /*
+ * Computes:
+ * a0 = &new_vmalloc[BIT_WORD(cpu)]
+ * a1 = BIT_MASK(cpu)
+ */
+ REG_L a2, TASK_TI_CPU(tp)
+ /*
+ * Compute the new_vmalloc element position:
+ * (cpu / 64) * 8 = (cpu >> 6) << 3
+ */
+ srli a1, a2, 6
+ slli a1, a1, 3
+ la a0, new_vmalloc
+ add a0, a0, a1
+ /*
+ * Compute the bit position in the new_vmalloc element:
+ * bit_pos = cpu % 64 = cpu - (cpu / 64) * 64 = cpu - (cpu >> 6) << 6
+ * = cpu - ((cpu >> 6) << 3) << 3
+ */
+ slli a1, a1, 3
+ sub a1, a2, a1
+ /* Compute the "get mask": 1 << bit_pos */
+ li a2, 1
+ sll a1, a2, a1
+
+ /* Check the value of new_vmalloc for this cpu */
+ REG_L a2, 0(a0)
+ and a2, a2, a1
+ beq a2, zero, _new_vmalloc_restore_context
+
+ /* Atomically reset the current cpu bit in new_vmalloc */
+ amoxor.d a0, a1, (a0)
+
+ /* Only emit a sfence.vma if the uarch caches invalid entries */
+ ALTERNATIVE("sfence.vma", "nop", 0, RISCV_ISA_EXT_SVVPTC, 1)
+
+ REG_L a0, TASK_TI_A0(tp)
+ REG_L a1, TASK_TI_A1(tp)
+ REG_L a2, TASK_TI_A2(tp)
+ csrw CSR_SCRATCH, x0
+ sret
+
+_new_vmalloc_restore_context:
+ REG_L a2, TASK_TI_A2(tp)
+_new_vmalloc_restore_context_a1:
+ REG_L a1, TASK_TI_A1(tp)
+_new_vmalloc_restore_context_a0:
+ REG_L a0, TASK_TI_A0(tp)
+.endm
+
+
SYM_CODE_START(handle_exception)
/*
* If coming from userspace, preserve the user thread pointer and load
@@ -30,6 +103,20 @@ SYM_CODE_START(handle_exception)
.Lrestore_kernel_tpsp:
csrr tp, CSR_SCRATCH
+
+#ifdef CONFIG_64BIT
+ /*
+ * The RISC-V kernel does not eagerly emit a sfence.vma after each
+ * new vmalloc mapping, which may result in exceptions:
+ * - if the uarch caches invalid entries, the new mapping would not be
+ * observed by the page table walker and an invalidation is needed.
+ * - if the uarch does not cache invalid entries, a reordered access
+ * could "miss" the new mapping and traps: in that case, we only need
+ * to retry the access, no sfence.vma is required.
+ */
+ new_vmalloc_check
+#endif
+
REG_S sp, TASK_TI_KERNEL_SP(tp)
#ifdef CONFIG_VMAP_STACK
@@ -239,8 +326,8 @@ SYM_CODE_START(ret_from_fork)
jalr s0
1:
move a0, sp /* pt_regs */
- la ra, ret_from_exception
- tail syscall_exit_to_user_mode
+ call syscall_exit_to_user_mode
+ j ret_from_exception
SYM_CODE_END(ret_from_fork)
#ifdef CONFIG_IRQ_STACKS
diff --git a/arch/riscv/kernel/module.c b/arch/riscv/kernel/module.c
index 906f9a3a5d65..1cd461f3d872 100644
--- a/arch/riscv/kernel/module.c
+++ b/arch/riscv/kernel/module.c
@@ -787,8 +787,8 @@ int apply_relocate_add(Elf_Shdr *sechdrs, const char *strtab,
int res;
unsigned int num_relocations = sechdrs[relsec].sh_size / sizeof(*rel);
struct hlist_head *relocation_hashtable;
- struct list_head used_buckets_list;
unsigned int hashtable_bits;
+ LIST_HEAD(used_buckets_list);
hashtable_bits = initialize_relocation_hashtable(num_relocations,
&relocation_hashtable);
@@ -796,8 +796,6 @@ int apply_relocate_add(Elf_Shdr *sechdrs, const char *strtab,
if (!relocation_hashtable)
return -ENOMEM;
- INIT_LIST_HEAD(&used_buckets_list);
-
pr_debug("Applying relocate section %u to %u\n", relsec,
sechdrs[relsec].sh_info);
diff --git a/arch/riscv/kernel/perf_callchain.c b/arch/riscv/kernel/perf_callchain.c
index 3348a61de7d9..c7468af77c66 100644
--- a/arch/riscv/kernel/perf_callchain.c
+++ b/arch/riscv/kernel/perf_callchain.c
@@ -6,37 +6,9 @@
#include <asm/stacktrace.h>
-/*
- * Get the return address for a single stackframe and return a pointer to the
- * next frame tail.
- */
-static unsigned long user_backtrace(struct perf_callchain_entry_ctx *entry,
- unsigned long fp, unsigned long reg_ra)
+static bool fill_callchain(void *entry, unsigned long pc)
{
- struct stackframe buftail;
- unsigned long ra = 0;
- unsigned long __user *user_frame_tail =
- (unsigned long __user *)(fp - sizeof(struct stackframe));
-
- /* Check accessibility of one struct frame_tail beyond */
- if (!access_ok(user_frame_tail, sizeof(buftail)))
- return 0;
- if (__copy_from_user_inatomic(&buftail, user_frame_tail,
- sizeof(buftail)))
- return 0;
-
- if (reg_ra != 0)
- ra = reg_ra;
- else
- ra = buftail.ra;
-
- fp = buftail.fp;
- if (ra != 0)
- perf_callchain_store(entry, ra);
- else
- return 0;
-
- return fp;
+ return perf_callchain_store(entry, pc) == 0;
}
/*
@@ -56,19 +28,7 @@ static unsigned long user_backtrace(struct perf_callchain_entry_ctx *entry,
void perf_callchain_user(struct perf_callchain_entry_ctx *entry,
struct pt_regs *regs)
{
- unsigned long fp = 0;
-
- fp = regs->s0;
- perf_callchain_store(entry, regs->epc);
-
- fp = user_backtrace(entry, fp, regs->ra);
- while (fp && !(fp & 0x3) && entry->nr < entry->max_stack)
- fp = user_backtrace(entry, fp, 0);
-}
-
-static bool fill_callchain(void *entry, unsigned long pc)
-{
- return perf_callchain_store(entry, pc) == 0;
+ arch_stack_walk_user(fill_callchain, entry, regs);
}
void perf_callchain_kernel(struct perf_callchain_entry_ctx *entry,
diff --git a/arch/riscv/kernel/pi/Makefile b/arch/riscv/kernel/pi/Makefile
index 50bc5ef7dd2f..d5bf1bc7de62 100644
--- a/arch/riscv/kernel/pi/Makefile
+++ b/arch/riscv/kernel/pi/Makefile
@@ -5,6 +5,7 @@ KBUILD_CFLAGS := $(subst $(CC_FLAGS_FTRACE),,$(KBUILD_CFLAGS)) -fpie \
-Os -DDISABLE_BRANCH_PROFILING $(DISABLE_STACKLEAK_PLUGIN) \
$(call cc-option,-mbranch-protection=none) \
-I$(srctree)/scripts/dtc/libfdt -fno-stack-protector \
+ -include $(srctree)/include/linux/hidden.h \
-D__DISABLE_EXPORTS -ffreestanding \
-fno-asynchronous-unwind-tables -fno-unwind-tables \
$(call cc-option,-fno-addrsig)
@@ -16,6 +17,7 @@ KBUILD_CFLAGS += -mcmodel=medany
CFLAGS_cmdline_early.o += -D__NO_FORTIFY
CFLAGS_lib-fdt_ro.o += -D__NO_FORTIFY
+CFLAGS_fdt_early.o += -D__NO_FORTIFY
$(obj)/%.pi.o: OBJCOPYFLAGS := --prefix-symbols=__pi_ \
--remove-section=.note.gnu.property \
@@ -32,5 +34,5 @@ $(obj)/string.o: $(srctree)/lib/string.c FORCE
$(obj)/ctype.o: $(srctree)/lib/ctype.c FORCE
$(call if_changed_rule,cc_o_c)
-obj-y := cmdline_early.pi.o fdt_early.pi.o string.pi.o ctype.pi.o lib-fdt.pi.o lib-fdt_ro.pi.o
+obj-y := cmdline_early.pi.o fdt_early.pi.o string.pi.o ctype.pi.o lib-fdt.pi.o lib-fdt_ro.pi.o archrandom_early.pi.o
extra-y := $(patsubst %.pi.o,%.o,$(obj-y))
diff --git a/arch/riscv/kernel/pi/archrandom_early.c b/arch/riscv/kernel/pi/archrandom_early.c
new file mode 100644
index 000000000000..3f05d3cf3b7b
--- /dev/null
+++ b/arch/riscv/kernel/pi/archrandom_early.c
@@ -0,0 +1,30 @@
+// SPDX-License-Identifier: GPL-2.0-only
+
+#include <asm/csr.h>
+#include <linux/processor.h>
+
+#include "pi.h"
+
+/*
+ * To avoid rewriting code include asm/archrandom.h and create macros
+ * for the functions that won't be included.
+ */
+#undef riscv_has_extension_unlikely
+#define riscv_has_extension_likely(...) false
+#undef pr_err_once
+#define pr_err_once(...)
+
+#include <asm/archrandom.h>
+
+u64 get_kaslr_seed_zkr(const uintptr_t dtb_pa)
+{
+ unsigned long seed = 0;
+
+ if (!fdt_early_match_extension_isa((const void *)dtb_pa, "zkr"))
+ return 0;
+
+ if (!csr_seed_long(&seed))
+ return 0;
+
+ return seed;
+}
diff --git a/arch/riscv/kernel/pi/cmdline_early.c b/arch/riscv/kernel/pi/cmdline_early.c
index f6d4dedffb84..fbcdc9e4e143 100644
--- a/arch/riscv/kernel/pi/cmdline_early.c
+++ b/arch/riscv/kernel/pi/cmdline_early.c
@@ -6,15 +6,9 @@
#include <asm/pgtable.h>
#include <asm/setup.h>
-static char early_cmdline[COMMAND_LINE_SIZE];
+#include "pi.h"
-/*
- * Declare the functions that are exported (but prefixed) here so that LLVM
- * does not complain it lacks the 'static' keyword (which, if added, makes
- * LLVM complain because the function is actually unused in this file).
- */
-u64 set_satp_mode_from_cmdline(uintptr_t dtb_pa);
-bool set_nokaslr_from_cmdline(uintptr_t dtb_pa);
+static char early_cmdline[COMMAND_LINE_SIZE];
static char *get_early_cmdline(uintptr_t dtb_pa)
{
diff --git a/arch/riscv/kernel/pi/fdt_early.c b/arch/riscv/kernel/pi/fdt_early.c
index 899610e042ab..9bdee2fafe47 100644
--- a/arch/riscv/kernel/pi/fdt_early.c
+++ b/arch/riscv/kernel/pi/fdt_early.c
@@ -2,13 +2,9 @@
#include <linux/types.h>
#include <linux/init.h>
#include <linux/libfdt.h>
+#include <linux/ctype.h>
-/*
- * Declare the functions that are exported (but prefixed) here so that LLVM
- * does not complain it lacks the 'static' keyword (which, if added, makes
- * LLVM complain because the function is actually unused in this file).
- */
-u64 get_kaslr_seed(uintptr_t dtb_pa);
+#include "pi.h"
u64 get_kaslr_seed(uintptr_t dtb_pa)
{
@@ -28,3 +24,162 @@ u64 get_kaslr_seed(uintptr_t dtb_pa)
*prop = 0;
return ret;
}
+
+/**
+ * fdt_device_is_available - check if a device is available for use
+ *
+ * @fdt: pointer to the device tree blob
+ * @node: offset of the node whose property to find
+ *
+ * Returns true if the status property is absent or set to "okay" or "ok",
+ * false otherwise
+ */
+static bool fdt_device_is_available(const void *fdt, int node)
+{
+ const char *status;
+ int statlen;
+
+ status = fdt_getprop(fdt, node, "status", &statlen);
+ if (!status)
+ return true;
+
+ if (statlen > 0) {
+ if (!strcmp(status, "okay") || !strcmp(status, "ok"))
+ return true;
+ }
+
+ return false;
+}
+
+/* Copy of fdt_nodename_eq_ */
+static int fdt_node_name_eq(const void *fdt, int offset,
+ const char *s)
+{
+ int olen;
+ int len = strlen(s);
+ const char *p = fdt_get_name(fdt, offset, &olen);
+
+ if (!p || olen < len)
+ /* short match */
+ return 0;
+
+ if (memcmp(p, s, len) != 0)
+ return 0;
+
+ if (p[len] == '\0')
+ return 1;
+ else if (!memchr(s, '@', len) && (p[len] == '@'))
+ return 1;
+ else
+ return 0;
+}
+
+/**
+ * isa_string_contains - check if isa string contains an extension
+ *
+ * @isa_str: isa string to search
+ * @ext_name: the extension to search for
+ *
+ * Returns true if the extension is in the given isa string,
+ * false otherwise
+ */
+static bool isa_string_contains(const char *isa_str, const char *ext_name)
+{
+ size_t i, single_end, len = strlen(ext_name);
+ char ext_end;
+
+ /* Error must contain rv32/64 */
+ if (strlen(isa_str) < 4)
+ return false;
+
+ if (len == 1) {
+ single_end = strcspn(isa_str, "sSxXzZ");
+ /* Search for single chars between rv32/64 and multi-letter extensions */
+ for (i = 4; i < single_end; i++) {
+ if (tolower(isa_str[i]) == ext_name[0])
+ return true;
+ }
+ return false;
+ }
+
+ /* Skip to start of multi-letter extensions */
+ isa_str = strpbrk(isa_str, "sSxXzZ");
+ while (isa_str) {
+ if (strncasecmp(isa_str, ext_name, len) == 0) {
+ ext_end = isa_str[len];
+ /* Check if matches the whole extension. */
+ if (ext_end == '\0' || ext_end == '_')
+ return true;
+ }
+ /* Multi-letter extensions must be split from other multi-letter
+ * extensions with an "_", the end of a multi-letter extension will
+ * either be the null character or the "_" at the start of the next
+ * multi-letter extension.
+ */
+ isa_str = strchr(isa_str, '_');
+ if (isa_str)
+ isa_str++;
+ }
+
+ return false;
+}
+
+/**
+ * early_cpu_isa_ext_available - check if cpu node has an extension
+ *
+ * @fdt: pointer to the device tree blob
+ * @node: offset of the cpu node
+ * @ext_name: the extension to search for
+ *
+ * Returns true if the cpu node has the extension,
+ * false otherwise
+ */
+static bool early_cpu_isa_ext_available(const void *fdt, int node, const char *ext_name)
+{
+ const void *prop;
+ int len;
+
+ prop = fdt_getprop(fdt, node, "riscv,isa-extensions", &len);
+ if (prop && fdt_stringlist_contains(prop, len, ext_name))
+ return true;
+
+ prop = fdt_getprop(fdt, node, "riscv,isa", &len);
+ if (prop && isa_string_contains(prop, ext_name))
+ return true;
+
+ return false;
+}
+
+/**
+ * fdt_early_match_extension_isa - check if all cpu nodes have an extension
+ *
+ * @fdt: pointer to the device tree blob
+ * @ext_name: the extension to search for
+ *
+ * Returns true if the all available the cpu nodes have the extension,
+ * false otherwise
+ */
+bool fdt_early_match_extension_isa(const void *fdt, const char *ext_name)
+{
+ int node, parent;
+ bool ret = false;
+
+ parent = fdt_path_offset(fdt, "/cpus");
+ if (parent < 0)
+ return false;
+
+ fdt_for_each_subnode(node, fdt, parent) {
+ if (!fdt_node_name_eq(fdt, node, "cpu"))
+ continue;
+
+ if (!fdt_device_is_available(fdt, node))
+ continue;
+
+ if (!early_cpu_isa_ext_available(fdt, node, ext_name))
+ return false;
+
+ ret = true;
+ }
+
+ return ret;
+}
diff --git a/arch/riscv/kernel/pi/pi.h b/arch/riscv/kernel/pi/pi.h
new file mode 100644
index 000000000000..21141d84fea6
--- /dev/null
+++ b/arch/riscv/kernel/pi/pi.h
@@ -0,0 +1,20 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _RISCV_PI_H_
+#define _RISCV_PI_H_
+
+#include <linux/types.h>
+
+/*
+ * The following functions are exported (but prefixed). Declare them here so
+ * that LLVM does not complain it lacks the 'static' keyword (which, if
+ * added, makes LLVM complain because the function is unused).
+ */
+
+u64 get_kaslr_seed(uintptr_t dtb_pa);
+u64 get_kaslr_seed_zkr(const uintptr_t dtb_pa);
+bool set_nokaslr_from_cmdline(uintptr_t dtb_pa);
+u64 set_satp_mode_from_cmdline(uintptr_t dtb_pa);
+
+bool fdt_early_match_extension_isa(const void *fdt, const char *ext_name);
+
+#endif /* _RISCV_PI_H_ */
diff --git a/arch/riscv/kernel/process.c b/arch/riscv/kernel/process.c
index e4bc61c4e58a..e3142d8a6e28 100644
--- a/arch/riscv/kernel/process.c
+++ b/arch/riscv/kernel/process.c
@@ -15,6 +15,7 @@
#include <linux/tick.h>
#include <linux/ptrace.h>
#include <linux/uaccess.h>
+#include <linux/personality.h>
#include <asm/unistd.h>
#include <asm/processor.h>
@@ -26,6 +27,7 @@
#include <asm/cpuidle.h>
#include <asm/vector.h>
#include <asm/cpufeature.h>
+#include <asm/exec.h>
#if defined(CONFIG_STACKPROTECTOR) && !defined(CONFIG_STACKPROTECTOR_PER_TASK)
#include <linux/stackprotector.h>
@@ -99,6 +101,13 @@ void show_regs(struct pt_regs *regs)
dump_backtrace(regs, NULL, KERN_DEFAULT);
}
+unsigned long arch_align_stack(unsigned long sp)
+{
+ if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space)
+ sp -= get_random_u32_below(PAGE_SIZE);
+ return sp & ~0xf;
+}
+
#ifdef CONFIG_COMPAT
static bool compat_mode_supported __read_mostly;
diff --git a/arch/riscv/kernel/riscv_ksyms.c b/arch/riscv/kernel/riscv_ksyms.c
index a72879b4249a..5ab1c7e1a6ed 100644
--- a/arch/riscv/kernel/riscv_ksyms.c
+++ b/arch/riscv/kernel/riscv_ksyms.c
@@ -12,9 +12,6 @@
EXPORT_SYMBOL(memset);
EXPORT_SYMBOL(memcpy);
EXPORT_SYMBOL(memmove);
-EXPORT_SYMBOL(strcmp);
-EXPORT_SYMBOL(strlen);
-EXPORT_SYMBOL(strncmp);
EXPORT_SYMBOL(__memset);
EXPORT_SYMBOL(__memcpy);
EXPORT_SYMBOL(__memmove);
diff --git a/arch/riscv/kernel/smp.c b/arch/riscv/kernel/smp.c
index 8e6eb64459af..c180a647a30e 100644
--- a/arch/riscv/kernel/smp.c
+++ b/arch/riscv/kernel/smp.c
@@ -13,6 +13,7 @@
#include <linux/interrupt.h>
#include <linux/module.h>
#include <linux/kexec.h>
+#include <linux/kgdb.h>
#include <linux/percpu.h>
#include <linux/profile.h>
#include <linux/smp.h>
@@ -21,6 +22,7 @@
#include <linux/delay.h>
#include <linux/irq.h>
#include <linux/irq_work.h>
+#include <linux/nmi.h>
#include <asm/tlbflush.h>
#include <asm/cacheflush.h>
@@ -33,6 +35,8 @@ enum ipi_message_type {
IPI_CPU_CRASH_STOP,
IPI_IRQ_WORK,
IPI_TIMER,
+ IPI_CPU_BACKTRACE,
+ IPI_KGDB_ROUNDUP,
IPI_MAX
};
@@ -113,6 +117,7 @@ void arch_irq_work_raise(void)
static irqreturn_t handle_IPI(int irq, void *data)
{
+ unsigned int cpu = smp_processor_id();
int ipi = irq - ipi_virq_base;
switch (ipi) {
@@ -126,7 +131,7 @@ static irqreturn_t handle_IPI(int irq, void *data)
ipi_stop();
break;
case IPI_CPU_CRASH_STOP:
- ipi_cpu_crash_stop(smp_processor_id(), get_irq_regs());
+ ipi_cpu_crash_stop(cpu, get_irq_regs());
break;
case IPI_IRQ_WORK:
irq_work_run();
@@ -136,8 +141,14 @@ static irqreturn_t handle_IPI(int irq, void *data)
tick_receive_broadcast();
break;
#endif
+ case IPI_CPU_BACKTRACE:
+ nmi_cpu_backtrace(get_irq_regs());
+ break;
+ case IPI_KGDB_ROUNDUP:
+ kgdb_nmicallback(cpu, get_irq_regs());
+ break;
default:
- pr_warn("CPU%d: unhandled IPI%d\n", smp_processor_id(), ipi);
+ pr_warn("CPU%d: unhandled IPI%d\n", cpu, ipi);
break;
}
@@ -203,6 +214,8 @@ static const char * const ipi_names[] = {
[IPI_CPU_CRASH_STOP] = "CPU stop (for crash dump) interrupts",
[IPI_IRQ_WORK] = "IRQ work interrupts",
[IPI_TIMER] = "Timer broadcast interrupts",
+ [IPI_CPU_BACKTRACE] = "CPU backtrace interrupts",
+ [IPI_KGDB_ROUNDUP] = "KGDB roundup interrupts",
};
void show_ipi_stats(struct seq_file *p, int prec)
@@ -323,3 +336,29 @@ void arch_smp_send_reschedule(int cpu)
send_ipi_single(cpu, IPI_RESCHEDULE);
}
EXPORT_SYMBOL_GPL(arch_smp_send_reschedule);
+
+static void riscv_backtrace_ipi(cpumask_t *mask)
+{
+ send_ipi_mask(mask, IPI_CPU_BACKTRACE);
+}
+
+void arch_trigger_cpumask_backtrace(const cpumask_t *mask, int exclude_cpu)
+{
+ nmi_trigger_cpumask_backtrace(mask, exclude_cpu, riscv_backtrace_ipi);
+}
+
+#ifdef CONFIG_KGDB
+void kgdb_roundup_cpus(void)
+{
+ int this_cpu = raw_smp_processor_id();
+ int cpu;
+
+ for_each_online_cpu(cpu) {
+ /* No need to roundup ourselves */
+ if (cpu == this_cpu)
+ continue;
+
+ send_ipi_single(cpu, IPI_KGDB_ROUNDUP);
+ }
+}
+#endif
diff --git a/arch/riscv/kernel/stacktrace.c b/arch/riscv/kernel/stacktrace.c
index c6d5de22463f..153a2db4c5fa 100644
--- a/arch/riscv/kernel/stacktrace.c
+++ b/arch/riscv/kernel/stacktrace.c
@@ -162,3 +162,46 @@ noinline noinstr void arch_stack_walk(stack_trace_consume_fn consume_entry, void
{
walk_stackframe(task, regs, consume_entry, cookie);
}
+
+/*
+ * Get the return address for a single stackframe and return a pointer to the
+ * next frame tail.
+ */
+static unsigned long unwind_user_frame(stack_trace_consume_fn consume_entry,
+ void *cookie, unsigned long fp,
+ unsigned long reg_ra)
+{
+ struct stackframe buftail;
+ unsigned long ra = 0;
+ unsigned long __user *user_frame_tail =
+ (unsigned long __user *)(fp - sizeof(struct stackframe));
+
+ /* Check accessibility of one struct frame_tail beyond */
+ if (!access_ok(user_frame_tail, sizeof(buftail)))
+ return 0;
+ if (__copy_from_user_inatomic(&buftail, user_frame_tail,
+ sizeof(buftail)))
+ return 0;
+
+ ra = reg_ra ? : buftail.ra;
+
+ fp = buftail.fp;
+ if (!ra || !consume_entry(cookie, ra))
+ return 0;
+
+ return fp;
+}
+
+void arch_stack_walk_user(stack_trace_consume_fn consume_entry, void *cookie,
+ const struct pt_regs *regs)
+{
+ unsigned long fp = 0;
+
+ fp = regs->s0;
+ if (!consume_entry(cookie, regs->epc))
+ return;
+
+ fp = unwind_user_frame(consume_entry, cookie, fp, regs->ra);
+ while (fp && !(fp & 0x7))
+ fp = unwind_user_frame(consume_entry, cookie, fp, 0);
+}
diff --git a/arch/riscv/kernel/vdso/Makefile b/arch/riscv/kernel/vdso/Makefile
index f7ef8ad9b550..960feb1526ca 100644
--- a/arch/riscv/kernel/vdso/Makefile
+++ b/arch/riscv/kernel/vdso/Makefile
@@ -45,7 +45,7 @@ $(obj)/vdso.o: $(obj)/vdso.so
# link rule for the .so file, .lds has to be first
$(obj)/vdso.so.dbg: $(obj)/vdso.lds $(obj-vdso) FORCE
$(call if_changed,vdsold)
-LDFLAGS_vdso.so.dbg = -shared -S -soname=linux-vdso.so.1 \
+LDFLAGS_vdso.so.dbg = -shared -soname=linux-vdso.so.1 \
--build-id=sha1 --hash-style=both --eh-frame-hdr
# strip rule for the .so file
diff --git a/arch/riscv/kernel/vendor_extensions/andes.c b/arch/riscv/kernel/vendor_extensions/andes.c
index ec688c88456a..51f302b6d503 100644
--- a/arch/riscv/kernel/vendor_extensions/andes.c
+++ b/arch/riscv/kernel/vendor_extensions/andes.c
@@ -8,7 +8,7 @@
#include <linux/types.h>
/* All Andes vendor extensions supported in Linux */
-const struct riscv_isa_ext_data riscv_isa_vendor_ext_andes[] = {
+static const struct riscv_isa_ext_data riscv_isa_vendor_ext_andes[] = {
__RISCV_ISA_EXT_DATA(xandespmu, RISCV_ISA_VENDOR_EXT_XANDESPMU),
};
diff --git a/arch/riscv/kernel/vmcore_info.c b/arch/riscv/kernel/vmcore_info.c
index 6d7a22522d63..d5e448aa90e7 100644
--- a/arch/riscv/kernel/vmcore_info.c
+++ b/arch/riscv/kernel/vmcore_info.c
@@ -19,6 +19,13 @@ void arch_crash_save_vmcoreinfo(void)
#endif
#endif
vmcoreinfo_append_str("NUMBER(KERNEL_LINK_ADDR)=0x%lx\n", KERNEL_LINK_ADDR);
+#ifdef CONFIG_XIP_KERNEL
+ /* TODO: Communicate with crash-utility developers on the information to
+ * export. The XIP case is more complicated, because the virtual-physical
+ * address offset depends on whether the address is in ROM or in RAM.
+ */
+#else
vmcoreinfo_append_str("NUMBER(va_kernel_pa_offset)=0x%lx\n",
kernel_map.va_kernel_pa_offset);
+#endif
}
diff --git a/arch/riscv/kernel/vmlinux-xip.lds.S b/arch/riscv/kernel/vmlinux-xip.lds.S
index 8c3daa1b0531..a7611789bad5 100644
--- a/arch/riscv/kernel/vmlinux-xip.lds.S
+++ b/arch/riscv/kernel/vmlinux-xip.lds.S
@@ -14,6 +14,7 @@
#include <asm/page.h>
#include <asm/cache.h>
#include <asm/thread_info.h>
+#include <asm/set_memory.h>
OUTPUT_ARCH(riscv)
ENTRY(_start)
@@ -65,10 +66,10 @@ SECTIONS
* From this point, stuff is considered writable and will be copied to RAM
*/
__data_loc = ALIGN(PAGE_SIZE); /* location in file */
- . = KERNEL_LINK_ADDR + XIP_OFFSET; /* location in memory */
+ . = ALIGN(SECTION_ALIGN); /* location in memory */
#undef LOAD_OFFSET
-#define LOAD_OFFSET (KERNEL_LINK_ADDR + XIP_OFFSET - (__data_loc & XIP_OFFSET_MASK))
+#define LOAD_OFFSET (KERNEL_LINK_ADDR + _sdata - __data_loc)
_sdata = .; /* Start of data section */
_data = .;
diff --git a/arch/riscv/kvm/main.c b/arch/riscv/kvm/main.c
index bab2ec34cd87..f3427f6de608 100644
--- a/arch/riscv/kvm/main.c
+++ b/arch/riscv/kvm/main.c
@@ -20,7 +20,7 @@ long kvm_arch_dev_ioctl(struct file *filp,
return -EINVAL;
}
-int kvm_arch_hardware_enable(void)
+int kvm_arch_enable_virtualization_cpu(void)
{
csr_write(CSR_HEDELEG, KVM_HEDELEG_DEFAULT);
csr_write(CSR_HIDELEG, KVM_HIDELEG_DEFAULT);
@@ -35,7 +35,7 @@ int kvm_arch_hardware_enable(void)
return 0;
}
-void kvm_arch_hardware_disable(void)
+void kvm_arch_disable_virtualization_cpu(void)
{
kvm_riscv_aia_disable();
diff --git a/arch/riscv/lib/Makefile b/arch/riscv/lib/Makefile
index 2b369f51b0a5..8eec6b69a875 100644
--- a/arch/riscv/lib/Makefile
+++ b/arch/riscv/lib/Makefile
@@ -3,9 +3,11 @@ lib-y += delay.o
lib-y += memcpy.o
lib-y += memset.o
lib-y += memmove.o
+ifeq ($(CONFIG_KASAN_GENERIC)$(CONFIG_KASAN_SW_TAGS),)
lib-y += strcmp.o
lib-y += strlen.o
lib-y += strncmp.o
+endif
lib-y += csum.o
ifeq ($(CONFIG_MMU), y)
lib-$(CONFIG_RISCV_ISA_V) += uaccess_vector.o
diff --git a/arch/riscv/lib/memset.S b/arch/riscv/lib/memset.S
index 35f358e70bdb..da23b8347e2d 100644
--- a/arch/riscv/lib/memset.S
+++ b/arch/riscv/lib/memset.S
@@ -111,3 +111,5 @@ SYM_FUNC_START(__memset)
ret
SYM_FUNC_END(__memset)
SYM_FUNC_ALIAS_WEAK(memset, __memset)
+SYM_FUNC_ALIAS(__pi_memset, __memset)
+SYM_FUNC_ALIAS(__pi___memset, __memset)
diff --git a/arch/riscv/lib/strcmp.S b/arch/riscv/lib/strcmp.S
index 687b2bea5c43..57a5c0066231 100644
--- a/arch/riscv/lib/strcmp.S
+++ b/arch/riscv/lib/strcmp.S
@@ -120,3 +120,5 @@ strcmp_zbb:
.option pop
#endif
SYM_FUNC_END(strcmp)
+SYM_FUNC_ALIAS(__pi_strcmp, strcmp)
+EXPORT_SYMBOL(strcmp)
diff --git a/arch/riscv/lib/strlen.S b/arch/riscv/lib/strlen.S
index 8ae3064e45ff..962983b73251 100644
--- a/arch/riscv/lib/strlen.S
+++ b/arch/riscv/lib/strlen.S
@@ -131,3 +131,4 @@ strlen_zbb:
#endif
SYM_FUNC_END(strlen)
SYM_FUNC_ALIAS(__pi_strlen, strlen)
+EXPORT_SYMBOL(strlen)
diff --git a/arch/riscv/lib/strncmp.S b/arch/riscv/lib/strncmp.S
index aba5b3148621..7b2d0ff9ed6c 100644
--- a/arch/riscv/lib/strncmp.S
+++ b/arch/riscv/lib/strncmp.S
@@ -136,3 +136,5 @@ strncmp_zbb:
.option pop
#endif
SYM_FUNC_END(strncmp)
+SYM_FUNC_ALIAS(__pi_strncmp, strncmp)
+EXPORT_SYMBOL(strncmp)
diff --git a/arch/riscv/mm/init.c b/arch/riscv/mm/init.c
index 1785782c2e55..0e8c20adcd98 100644
--- a/arch/riscv/mm/init.c
+++ b/arch/riscv/mm/init.c
@@ -37,6 +37,8 @@
#include "../kernel/head.h"
+u64 new_vmalloc[NR_CPUS / sizeof(u64) + 1];
+
struct kernel_mapping kernel_map __ro_after_init;
EXPORT_SYMBOL(kernel_map);
#ifdef CONFIG_XIP_KERNEL
@@ -917,7 +919,7 @@ static void __init relocate_kernel(void)
static void __init create_kernel_page_table(pgd_t *pgdir,
__always_unused bool early)
{
- uintptr_t va, end_va;
+ uintptr_t va, start_va, end_va;
/* Map the flash resident part */
end_va = kernel_map.virt_addr + kernel_map.xiprom_sz;
@@ -927,10 +929,11 @@ static void __init create_kernel_page_table(pgd_t *pgdir,
PMD_SIZE, PAGE_KERNEL_EXEC);
/* Map the data in RAM */
+ start_va = kernel_map.virt_addr + (uintptr_t)&_sdata - (uintptr_t)&_start;
end_va = kernel_map.virt_addr + kernel_map.size;
- for (va = kernel_map.virt_addr + XIP_OFFSET; va < end_va; va += PMD_SIZE)
+ for (va = start_va; va < end_va; va += PMD_SIZE)
create_pgd_mapping(pgdir, va,
- kernel_map.phys_addr + (va - (kernel_map.virt_addr + XIP_OFFSET)),
+ kernel_map.phys_addr + (va - start_va),
PMD_SIZE, PAGE_KERNEL);
}
#else
@@ -1048,6 +1051,7 @@ static void __init pt_ops_set_late(void)
#ifdef CONFIG_RANDOMIZE_BASE
extern bool __init __pi_set_nokaslr_from_cmdline(uintptr_t dtb_pa);
extern u64 __init __pi_get_kaslr_seed(uintptr_t dtb_pa);
+extern u64 __init __pi_get_kaslr_seed_zkr(const uintptr_t dtb_pa);
static int __init print_nokaslr(char *p)
{
@@ -1068,10 +1072,12 @@ asmlinkage void __init setup_vm(uintptr_t dtb_pa)
#ifdef CONFIG_RANDOMIZE_BASE
if (!__pi_set_nokaslr_from_cmdline(dtb_pa)) {
- u64 kaslr_seed = __pi_get_kaslr_seed(dtb_pa);
+ u64 kaslr_seed = __pi_get_kaslr_seed_zkr(dtb_pa);
u32 kernel_size = (uintptr_t)(&_end) - (uintptr_t)(&_start);
u32 nr_pos;
+ if (kaslr_seed == 0)
+ kaslr_seed = __pi_get_kaslr_seed(dtb_pa);
/*
* Compute the number of positions available: we are limited
* by the early page table that only has one PUD and we must
@@ -1098,11 +1104,14 @@ asmlinkage void __init setup_vm(uintptr_t dtb_pa)
kernel_map.phys_addr = (uintptr_t)CONFIG_PHYS_RAM_BASE;
kernel_map.size = (uintptr_t)(&_end) - (uintptr_t)(&_start);
- kernel_map.va_kernel_xip_pa_offset = kernel_map.virt_addr - kernel_map.xiprom;
+ kernel_map.va_kernel_xip_text_pa_offset = kernel_map.virt_addr - kernel_map.xiprom;
+ kernel_map.va_kernel_xip_data_pa_offset = kernel_map.virt_addr - kernel_map.phys_addr
+ + (uintptr_t)&_sdata - (uintptr_t)&_start;
#else
kernel_map.page_offset = _AC(CONFIG_PAGE_OFFSET, UL);
kernel_map.phys_addr = (uintptr_t)(&_start);
kernel_map.size = (uintptr_t)(&_end) - kernel_map.phys_addr;
+ kernel_map.va_kernel_pa_offset = kernel_map.virt_addr - kernel_map.phys_addr;
#endif
#if defined(CONFIG_64BIT) && !defined(CONFIG_XIP_KERNEL)
@@ -1124,15 +1133,8 @@ asmlinkage void __init setup_vm(uintptr_t dtb_pa)
*/
kernel_map.va_pa_offset = IS_ENABLED(CONFIG_64BIT) ?
0UL : PAGE_OFFSET - kernel_map.phys_addr;
- kernel_map.va_kernel_pa_offset = kernel_map.virt_addr - kernel_map.phys_addr;
- /*
- * The default maximal physical memory size is KERN_VIRT_SIZE for 32-bit
- * kernel, whereas for 64-bit kernel, the end of the virtual address
- * space is occupied by the modules/BPF/kernel mappings which reduces
- * the available size of the linear mapping.
- */
- memory_limit = KERN_VIRT_SIZE - (IS_ENABLED(CONFIG_64BIT) ? SZ_4G : 0);
+ memory_limit = KERN_VIRT_SIZE;
/* Sanity check alignment and size */
BUG_ON((PAGE_OFFSET % PGDIR_SIZE) != 0);
diff --git a/arch/riscv/mm/pgtable.c b/arch/riscv/mm/pgtable.c
index 533ec9055fa0..4ae67324f992 100644
--- a/arch/riscv/mm/pgtable.c
+++ b/arch/riscv/mm/pgtable.c
@@ -9,6 +9,9 @@ int ptep_set_access_flags(struct vm_area_struct *vma,
unsigned long address, pte_t *ptep,
pte_t entry, int dirty)
{
+ asm goto(ALTERNATIVE("nop", "j %l[svvptc]", 0, RISCV_ISA_EXT_SVVPTC, 1)
+ : : : : svvptc);
+
if (!pte_same(ptep_get(ptep), entry))
__set_pte_at(vma->vm_mm, ptep, entry);
/*
@@ -16,6 +19,16 @@ int ptep_set_access_flags(struct vm_area_struct *vma,
* the case that the PTE changed and the spurious fault case.
*/
return true;
+
+svvptc:
+ if (!pte_same(ptep_get(ptep), entry)) {
+ __set_pte_at(vma->vm_mm, ptep, entry);
+ /* Here only not svadu is impacted */
+ flush_tlb_page(vma, address);
+ return true;
+ }
+
+ return false;
}
int ptep_test_and_clear_young(struct vm_area_struct *vma,
diff --git a/arch/riscv/purgatory/Makefile b/arch/riscv/purgatory/Makefile
index f11945ee2490..fb9c917c9b45 100644
--- a/arch/riscv/purgatory/Makefile
+++ b/arch/riscv/purgatory/Makefile
@@ -1,7 +1,9 @@
# SPDX-License-Identifier: GPL-2.0
purgatory-y := purgatory.o sha256.o entry.o string.o ctype.o memcpy.o memset.o
+ifeq ($(CONFIG_KASAN_GENERIC)$(CONFIG_KASAN_SW_TAGS),)
purgatory-y += strcmp.o strlen.o strncmp.o
+endif
targets += $(purgatory-y)
PURGATORY_OBJS = $(addprefix $(obj)/,$(purgatory-y))
diff --git a/arch/s390/Kconfig b/arch/s390/Kconfig
index b0d0b3a8d196..d339fe4fdedf 100644
--- a/arch/s390/Kconfig
+++ b/arch/s390/Kconfig
@@ -70,6 +70,7 @@ config S390
select ARCH_HAS_DEBUG_VM_PGTABLE
select ARCH_HAS_DEBUG_WX
select ARCH_HAS_DEVMEM_IS_ALLOWED
+ select ARCH_HAS_DMA_OPS if PCI
select ARCH_HAS_ELF_RANDOMIZE
select ARCH_HAS_FORCE_DMA_UNENCRYPTED
select ARCH_HAS_FORTIFY_SOURCE
@@ -137,7 +138,6 @@ config S390
select BUILDTIME_TABLE_SORT
select CLONE_BACKWARDS2
select DCACHE_WORD_ACCESS if !KMSAN
- select DMA_OPS if PCI
select DYNAMIC_FTRACE if FUNCTION_TRACER
select FUNCTION_ALIGNMENT_8B if CC_IS_GCC
select FUNCTION_ALIGNMENT_16B if !CC_IS_GCC
@@ -514,6 +514,26 @@ config SCHED_TOPOLOGY
making when dealing with machines that have multi-threading,
multiple cores or multiple books.
+config SCHED_TOPOLOGY_VERTICAL
+ def_bool y
+ bool "Use vertical CPU polarization by default"
+ depends on SCHED_TOPOLOGY
+ help
+ Use vertical CPU polarization by default if available.
+ The default CPU polarization is horizontal.
+
+config HIPERDISPATCH_ON
+ def_bool y
+ bool "Use hiperdispatch on vertical polarization by default"
+ depends on SCHED_TOPOLOGY
+ depends on PROC_SYSCTL
+ help
+ Hiperdispatch aims to improve the CPU scheduler's decision
+ making when using vertical polarization by adjusting CPU
+ capacities dynamically. Set this option to use hiperdispatch
+ on vertical polarization by default. This can be overwritten
+ by sysctl's s390.hiperdispatch attribute later on.
+
source "kernel/Kconfig.hz"
config CERT_STORE
@@ -558,17 +578,13 @@ config EXPOLINE
If unsure, say N.
config EXPOLINE_EXTERN
- def_bool y if EXPOLINE
- depends on EXPOLINE
- depends on CC_IS_GCC && GCC_VERSION >= 110200
- depends on $(success,$(srctree)/arch/s390/tools/gcc-thunk-extern.sh $(CC))
- prompt "Generate expolines as extern functions."
+ def_bool EXPOLINE && CC_IS_GCC && GCC_VERSION >= 110200 && \
+ $(success,$(srctree)/arch/s390/tools/gcc-thunk-extern.sh $(CC))
help
- This option is required for some tooling like kpatch. The kernel is
- compiled with -mindirect-branch=thunk-extern and requires a newer
- compiler.
-
- If unsure, say N.
+ Generate expolines as external functions if the compiler supports it.
+ This option is required for some tooling like kpatch, if expolines
+ are enabled. The kernel is compiled with
+ -mindirect-branch=thunk-extern, which requires a newer compiler.
choice
prompt "Expoline default"
diff --git a/arch/s390/Makefile.postlink b/arch/s390/Makefile.postlink
new file mode 100644
index 000000000000..df82f5410769
--- /dev/null
+++ b/arch/s390/Makefile.postlink
@@ -0,0 +1,38 @@
+# SPDX-License-Identifier: GPL-2.0
+# ===========================================================================
+# Post-link s390 pass
+# ===========================================================================
+#
+# 1. Separate relocations from vmlinux into relocs.S.
+# 2. Strip relocations from vmlinux.
+
+PHONY := __archpost
+__archpost:
+
+-include include/config/auto.conf
+include $(srctree)/scripts/Kbuild.include
+
+CMD_RELOCS=arch/s390/tools/relocs
+OUT_RELOCS = arch/s390/boot
+quiet_cmd_relocs = RELOCS $(OUT_RELOCS)/relocs.S
+ cmd_relocs = \
+ mkdir -p $(OUT_RELOCS); \
+ $(CMD_RELOCS) $@ > $(OUT_RELOCS)/relocs.S
+
+quiet_cmd_strip_relocs = RSTRIP $@
+ cmd_strip_relocs = \
+ $(OBJCOPY) --remove-section='.rel.*' --remove-section='.rel__*' \
+ --remove-section='.rela.*' --remove-section='.rela__*' $@
+
+vmlinux: FORCE
+ $(call cmd,relocs)
+ $(call cmd,strip_relocs)
+
+clean:
+ @rm -f $(OUT_RELOCS)/relocs.S
+
+PHONY += FORCE clean
+
+FORCE:
+
+.PHONY: $(PHONY)
diff --git a/arch/s390/boot/Makefile b/arch/s390/boot/Makefile
index 4f476884d340..8bc1308ac892 100644
--- a/arch/s390/boot/Makefile
+++ b/arch/s390/boot/Makefile
@@ -11,35 +11,23 @@ KASAN_SANITIZE := n
KCSAN_SANITIZE := n
KMSAN_SANITIZE := n
-KBUILD_AFLAGS := $(KBUILD_AFLAGS_DECOMPRESSOR)
-KBUILD_CFLAGS := $(KBUILD_CFLAGS_DECOMPRESSOR)
-
#
-# Use minimum architecture for als.c to be able to print an error
+# Use minimum architecture level so it is possible to print an error
# message if the kernel is started on a machine which is too old
#
-ifndef CONFIG_CC_IS_CLANG
-CC_FLAGS_MARCH_MINIMUM := -march=z900
-else
CC_FLAGS_MARCH_MINIMUM := -march=z10
-endif
-
-ifneq ($(CC_FLAGS_MARCH),$(CC_FLAGS_MARCH_MINIMUM))
-AFLAGS_REMOVE_head.o += $(CC_FLAGS_MARCH)
-AFLAGS_head.o += $(CC_FLAGS_MARCH_MINIMUM)
-AFLAGS_REMOVE_mem.o += $(CC_FLAGS_MARCH)
-AFLAGS_mem.o += $(CC_FLAGS_MARCH_MINIMUM)
-CFLAGS_REMOVE_als.o += $(CC_FLAGS_MARCH)
-CFLAGS_als.o += $(CC_FLAGS_MARCH_MINIMUM)
-CFLAGS_REMOVE_sclp_early_core.o += $(CC_FLAGS_MARCH)
-CFLAGS_sclp_early_core.o += $(CC_FLAGS_MARCH_MINIMUM)
-endif
+
+KBUILD_AFLAGS := $(filter-out $(CC_FLAGS_MARCH),$(KBUILD_AFLAGS_DECOMPRESSOR))
+KBUILD_CFLAGS := $(filter-out $(CC_FLAGS_MARCH),$(KBUILD_CFLAGS_DECOMPRESSOR))
+KBUILD_AFLAGS += $(CC_FLAGS_MARCH_MINIMUM)
+KBUILD_CFLAGS += $(CC_FLAGS_MARCH_MINIMUM)
CFLAGS_sclp_early_core.o += -I$(srctree)/drivers/s390/char
obj-y := head.o als.o startup.o physmem_info.o ipl_parm.o ipl_report.o vmem.o
obj-y += string.o ebcdic.o sclp_early_core.o mem.o ipl_vmparm.o cmdline.o
-obj-y += version.o pgm_check_info.o ctype.o ipl_data.o relocs.o alternative.o uv.o
+obj-y += version.o pgm_check_info.o ctype.o ipl_data.o relocs.o alternative.o
+obj-y += uv.o printk.o
obj-$(CONFIG_RANDOMIZE_BASE) += kaslr.o
obj-y += $(if $(CONFIG_KERNEL_UNCOMPRESSED),,decompressor.o) info.o
obj-$(CONFIG_KERNEL_ZSTD) += clz_ctz.o
@@ -109,11 +97,9 @@ OBJCOPYFLAGS_vmlinux.bin := -O binary --remove-section=.comment --remove-section
$(obj)/vmlinux.bin: vmlinux FORCE
$(call if_changed,objcopy)
-CMD_RELOCS=arch/s390/tools/relocs
-quiet_cmd_relocs = RELOCS $@
- cmd_relocs = $(CMD_RELOCS) $< > $@
-$(obj)/relocs.S: vmlinux FORCE
- $(call if_changed,relocs)
+# relocs.S is created by the vmlinux postlink step.
+$(obj)/relocs.S: vmlinux
+ @true
suffix-$(CONFIG_KERNEL_GZIP) := .gz
suffix-$(CONFIG_KERNEL_BZIP2) := .bz2
diff --git a/arch/s390/boot/als.c b/arch/s390/boot/als.c
index 47c48fbfb563..11e0c3d5dbc8 100644
--- a/arch/s390/boot/als.c
+++ b/arch/s390/boot/als.c
@@ -9,42 +9,8 @@
#include <asm/sclp.h>
#include "boot.h"
-/*
- * The code within this file will be called very early. It may _not_
- * access anything within the bss section, since that is not cleared
- * yet and may contain data (e.g. initrd) that must be saved by other
- * code.
- * For temporary objects the stack (16k) should be used.
- */
-
static unsigned long als[] = { FACILITIES_ALS };
-static void u16_to_hex(char *str, u16 val)
-{
- int i, num;
-
- for (i = 1; i <= 4; i++) {
- num = (val >> (16 - 4 * i)) & 0xf;
- if (num >= 10)
- num += 7;
- *str++ = '0' + num;
- }
- *str = '\0';
-}
-
-static void print_machine_type(void)
-{
- static char mach_str[80] = "Detected machine-type number: ";
- char type_str[5];
- struct cpuid id;
-
- get_cpu_id(&id);
- u16_to_hex(type_str, id.machine);
- strcat(mach_str, type_str);
- strcat(mach_str, "\n");
- sclp_early_printk(mach_str);
-}
-
static void u16_to_decimal(char *str, u16 val)
{
int div = 1;
@@ -80,8 +46,7 @@ void print_missing_facilities(void)
* z/VM adds a four character prefix.
*/
if (strlen(als_str) > 70) {
- strcat(als_str, "\n");
- sclp_early_printk(als_str);
+ boot_printk("%s\n", als_str);
*als_str = '\0';
}
u16_to_decimal(val_str, i * BITS_PER_LONG + j);
@@ -89,16 +54,18 @@ void print_missing_facilities(void)
first = 0;
}
}
- strcat(als_str, "\n");
- sclp_early_printk(als_str);
+ boot_printk("%s\n", als_str);
}
static void facility_mismatch(void)
{
- sclp_early_printk("The Linux kernel requires more recent processor hardware\n");
- print_machine_type();
+ struct cpuid id;
+
+ get_cpu_id(&id);
+ boot_printk("The Linux kernel requires more recent processor hardware\n");
+ boot_printk("Detected machine-type number: %4x\n", id.machine);
print_missing_facilities();
- sclp_early_printk("See Principles of Operations for facility bits\n");
+ boot_printk("See Principles of Operations for facility bits\n");
disabled_wait();
}
diff --git a/arch/s390/boot/boot.h b/arch/s390/boot/boot.h
index 83e2ce050b6c..7521a9d75fa2 100644
--- a/arch/s390/boot/boot.h
+++ b/arch/s390/boot/boot.h
@@ -70,7 +70,7 @@ void print_pgm_check_info(void);
unsigned long randomize_within_range(unsigned long size, unsigned long align,
unsigned long min, unsigned long max);
void setup_vmem(unsigned long kernel_start, unsigned long kernel_end, unsigned long asce_limit);
-void __printf(1, 2) decompressor_printk(const char *fmt, ...);
+void __printf(1, 2) boot_printk(const char *fmt, ...);
void print_stacktrace(unsigned long sp);
void error(char *m);
int get_random(unsigned long limit, unsigned long *value);
diff --git a/arch/s390/boot/head.S b/arch/s390/boot/head.S
index 637c29c3f6e3..0a47b16f6412 100644
--- a/arch/s390/boot/head.S
+++ b/arch/s390/boot/head.S
@@ -299,11 +299,11 @@ SYM_CODE_END(startup_normal)
# the save area and does disabled wait with a faulty address.
#
SYM_CODE_START_LOCAL(startup_pgm_check_handler)
- stmg %r8,%r15,__LC_SAVE_AREA_SYNC
+ stmg %r8,%r15,__LC_SAVE_AREA
la %r8,4095
stctg %c0,%c15,__LC_CREGS_SAVE_AREA-4095(%r8)
stmg %r0,%r7,__LC_GPREGS_SAVE_AREA-4095(%r8)
- mvc __LC_GPREGS_SAVE_AREA-4095+64(64,%r8),__LC_SAVE_AREA_SYNC
+ mvc __LC_GPREGS_SAVE_AREA-4095+64(64,%r8),__LC_SAVE_AREA
mvc __LC_PSW_SAVE_AREA-4095(16,%r8),__LC_PGM_OLD_PSW
mvc __LC_RETURN_PSW(16),__LC_PGM_OLD_PSW
ni __LC_RETURN_PSW,0xfc # remove IO and EX bits
diff --git a/arch/s390/boot/ipl_parm.c b/arch/s390/boot/ipl_parm.c
index 1773b72a6a7b..557462e62cd7 100644
--- a/arch/s390/boot/ipl_parm.c
+++ b/arch/s390/boot/ipl_parm.c
@@ -215,7 +215,7 @@ static void check_cleared_facilities(void)
for (i = 0; i < ARRAY_SIZE(als); i++) {
if ((stfle_fac_list[i] & als[i]) != als[i]) {
- sclp_early_printk("Warning: The Linux kernel requires facilities cleared via command line option\n");
+ boot_printk("Warning: The Linux kernel requires facilities cleared via command line option\n");
print_missing_facilities();
break;
}
diff --git a/arch/s390/boot/kaslr.c b/arch/s390/boot/kaslr.c
index bd3bf5ef472d..f864d2bff775 100644
--- a/arch/s390/boot/kaslr.c
+++ b/arch/s390/boot/kaslr.c
@@ -32,7 +32,7 @@ struct prng_parm {
static int check_prng(void)
{
if (!cpacf_query_func(CPACF_KMC, CPACF_KMC_PRNG)) {
- sclp_early_printk("KASLR disabled: CPU has no PRNG\n");
+ boot_printk("KASLR disabled: CPU has no PRNG\n");
return 0;
}
if (cpacf_query_func(CPACF_PRNO, CPACF_PRNO_TRNG))
diff --git a/arch/s390/boot/pgm_check_info.c b/arch/s390/boot/pgm_check_info.c
index 5352b3d356da..5abe59fb3bc0 100644
--- a/arch/s390/boot/pgm_check_info.c
+++ b/arch/s390/boot/pgm_check_info.c
@@ -11,131 +11,19 @@
#include <asm/uv.h>
#include "boot.h"
-const char hex_asc[] = "0123456789abcdef";
-
-static char *as_hex(char *dst, unsigned long val, int pad)
-{
- char *p, *end = p = dst + max(pad, (int)__fls(val | 1) / 4 + 1);
-
- for (*p-- = 0; p >= dst; val >>= 4)
- *p-- = hex_asc[val & 0x0f];
- return end;
-}
-
-static char *symstart(char *p)
-{
- while (*p)
- p--;
- return p + 1;
-}
-
-static noinline char *findsym(unsigned long ip, unsigned short *off, unsigned short *len)
-{
- /* symbol entries are in a form "10000 c4 startup\0" */
- char *a = _decompressor_syms_start;
- char *b = _decompressor_syms_end;
- unsigned long start;
- unsigned long size;
- char *pivot;
- char *endp;
-
- while (a < b) {
- pivot = symstart(a + (b - a) / 2);
- start = simple_strtoull(pivot, &endp, 16);
- size = simple_strtoull(endp + 1, &endp, 16);
- if (ip < start) {
- b = pivot;
- continue;
- }
- if (ip > start + size) {
- a = pivot + strlen(pivot) + 1;
- continue;
- }
- *off = ip - start;
- *len = size;
- return endp + 1;
- }
- return NULL;
-}
-
-static noinline char *strsym(void *ip)
-{
- static char buf[64];
- unsigned short off;
- unsigned short len;
- char *p;
-
- p = findsym((unsigned long)ip, &off, &len);
- if (p) {
- strncpy(buf, p, sizeof(buf));
- /* reserve 15 bytes for offset/len in symbol+0x1234/0x1234 */
- p = buf + strnlen(buf, sizeof(buf) - 15);
- strcpy(p, "+0x");
- p = as_hex(p + 3, off, 0);
- strcpy(p, "/0x");
- as_hex(p + 3, len, 0);
- } else {
- as_hex(buf, (unsigned long)ip, 16);
- }
- return buf;
-}
-
-void decompressor_printk(const char *fmt, ...)
-{
- char buf[1024] = { 0 };
- char *end = buf + sizeof(buf) - 1; /* make sure buf is 0 terminated */
- unsigned long pad;
- char *p = buf;
- va_list args;
-
- va_start(args, fmt);
- for (; p < end && *fmt; fmt++) {
- if (*fmt != '%') {
- *p++ = *fmt;
- continue;
- }
- pad = isdigit(*++fmt) ? simple_strtol(fmt, (char **)&fmt, 10) : 0;
- switch (*fmt) {
- case 's':
- p = buf + strlcat(buf, va_arg(args, char *), sizeof(buf));
- break;
- case 'p':
- if (*++fmt != 'S')
- goto out;
- p = buf + strlcat(buf, strsym(va_arg(args, void *)), sizeof(buf));
- break;
- case 'l':
- if (*++fmt != 'x' || end - p <= max(sizeof(long) * 2, pad))
- goto out;
- p = as_hex(p, va_arg(args, unsigned long), pad);
- break;
- case 'x':
- if (end - p <= max(sizeof(int) * 2, pad))
- goto out;
- p = as_hex(p, va_arg(args, unsigned int), pad);
- break;
- default:
- goto out;
- }
- }
-out:
- va_end(args);
- sclp_early_printk(buf);
-}
-
void print_stacktrace(unsigned long sp)
{
struct stack_info boot_stack = { STACK_TYPE_TASK, (unsigned long)_stack_start,
(unsigned long)_stack_end };
bool first = true;
- decompressor_printk("Call Trace:\n");
+ boot_printk("Call Trace:\n");
while (!(sp & 0x7) && on_stack(&boot_stack, sp, sizeof(struct stack_frame))) {
struct stack_frame *sf = (struct stack_frame *)sp;
- decompressor_printk(first ? "(sp:%016lx [<%016lx>] %pS)\n" :
- " sp:%016lx [<%016lx>] %pS\n",
- sp, sf->gprs[8], (void *)sf->gprs[8]);
+ boot_printk(first ? "(sp:%016lx [<%016lx>] %pS)\n" :
+ " sp:%016lx [<%016lx>] %pS\n",
+ sp, sf->gprs[8], (void *)sf->gprs[8]);
if (sf->back_chain <= sp)
break;
sp = sf->back_chain;
@@ -148,34 +36,30 @@ void print_pgm_check_info(void)
unsigned long *gpregs = (unsigned long *)get_lowcore()->gpregs_save_area;
struct psw_bits *psw = &psw_bits(get_lowcore()->psw_save_area);
- decompressor_printk("Linux version %s\n", kernel_version);
+ boot_printk("Linux version %s\n", kernel_version);
if (!is_prot_virt_guest() && early_command_line[0])
- decompressor_printk("Kernel command line: %s\n", early_command_line);
- decompressor_printk("Kernel fault: interruption code %04x ilc:%x\n",
- get_lowcore()->pgm_code, get_lowcore()->pgm_ilc >> 1);
+ boot_printk("Kernel command line: %s\n", early_command_line);
+ boot_printk("Kernel fault: interruption code %04x ilc:%x\n",
+ get_lowcore()->pgm_code, get_lowcore()->pgm_ilc >> 1);
if (kaslr_enabled()) {
- decompressor_printk("Kernel random base: %lx\n", __kaslr_offset);
- decompressor_printk("Kernel random base phys: %lx\n", __kaslr_offset_phys);
+ boot_printk("Kernel random base: %lx\n", __kaslr_offset);
+ boot_printk("Kernel random base phys: %lx\n", __kaslr_offset_phys);
}
- decompressor_printk("PSW : %016lx %016lx (%pS)\n",
- get_lowcore()->psw_save_area.mask,
- get_lowcore()->psw_save_area.addr,
- (void *)get_lowcore()->psw_save_area.addr);
- decompressor_printk(
+ boot_printk("PSW : %016lx %016lx (%pS)\n",
+ get_lowcore()->psw_save_area.mask,
+ get_lowcore()->psw_save_area.addr,
+ (void *)get_lowcore()->psw_save_area.addr);
+ boot_printk(
" R:%x T:%x IO:%x EX:%x Key:%x M:%x W:%x P:%x AS:%x CC:%x PM:%x RI:%x EA:%x\n",
psw->per, psw->dat, psw->io, psw->ext, psw->key, psw->mcheck,
psw->wait, psw->pstate, psw->as, psw->cc, psw->pm, psw->ri,
psw->eaba);
- decompressor_printk("GPRS: %016lx %016lx %016lx %016lx\n",
- gpregs[0], gpregs[1], gpregs[2], gpregs[3]);
- decompressor_printk(" %016lx %016lx %016lx %016lx\n",
- gpregs[4], gpregs[5], gpregs[6], gpregs[7]);
- decompressor_printk(" %016lx %016lx %016lx %016lx\n",
- gpregs[8], gpregs[9], gpregs[10], gpregs[11]);
- decompressor_printk(" %016lx %016lx %016lx %016lx\n",
- gpregs[12], gpregs[13], gpregs[14], gpregs[15]);
+ boot_printk("GPRS: %016lx %016lx %016lx %016lx\n", gpregs[0], gpregs[1], gpregs[2], gpregs[3]);
+ boot_printk(" %016lx %016lx %016lx %016lx\n", gpregs[4], gpregs[5], gpregs[6], gpregs[7]);
+ boot_printk(" %016lx %016lx %016lx %016lx\n", gpregs[8], gpregs[9], gpregs[10], gpregs[11]);
+ boot_printk(" %016lx %016lx %016lx %016lx\n", gpregs[12], gpregs[13], gpregs[14], gpregs[15]);
print_stacktrace(get_lowcore()->gpregs_save_area[15]);
- decompressor_printk("Last Breaking-Event-Address:\n");
- decompressor_printk(" [<%016lx>] %pS\n", (unsigned long)get_lowcore()->pgm_last_break,
- (void *)get_lowcore()->pgm_last_break);
+ boot_printk("Last Breaking-Event-Address:\n");
+ boot_printk(" [<%016lx>] %pS\n", (unsigned long)get_lowcore()->pgm_last_break,
+ (void *)get_lowcore()->pgm_last_break);
}
diff --git a/arch/s390/boot/physmem_info.c b/arch/s390/boot/physmem_info.c
index 4c9ad8258f7e..1d131a81cb8b 100644
--- a/arch/s390/boot/physmem_info.c
+++ b/arch/s390/boot/physmem_info.c
@@ -190,27 +190,27 @@ static void die_oom(unsigned long size, unsigned long align, unsigned long min,
enum reserved_range_type t;
int i;
- decompressor_printk("Linux version %s\n", kernel_version);
+ boot_printk("Linux version %s\n", kernel_version);
if (!is_prot_virt_guest() && early_command_line[0])
- decompressor_printk("Kernel command line: %s\n", early_command_line);
- decompressor_printk("Out of memory allocating %lx bytes %lx aligned in range %lx:%lx\n",
- size, align, min, max);
- decompressor_printk("Reserved memory ranges:\n");
+ boot_printk("Kernel command line: %s\n", early_command_line);
+ boot_printk("Out of memory allocating %lx bytes %lx aligned in range %lx:%lx\n",
+ size, align, min, max);
+ boot_printk("Reserved memory ranges:\n");
for_each_physmem_reserved_range(t, range, &start, &end) {
- decompressor_printk("%016lx %016lx %s\n", start, end, get_rr_type_name(t));
+ boot_printk("%016lx %016lx %s\n", start, end, get_rr_type_name(t));
total_reserved_mem += end - start;
}
- decompressor_printk("Usable online memory ranges (info source: %s [%x]):\n",
- get_physmem_info_source(), physmem_info.info_source);
+ boot_printk("Usable online memory ranges (info source: %s [%x]):\n",
+ get_physmem_info_source(), physmem_info.info_source);
for_each_physmem_usable_range(i, &start, &end) {
- decompressor_printk("%016lx %016lx\n", start, end);
+ boot_printk("%016lx %016lx\n", start, end);
total_mem += end - start;
}
- decompressor_printk("Usable online memory total: %lx Reserved: %lx Free: %lx\n",
- total_mem, total_reserved_mem,
- total_mem > total_reserved_mem ? total_mem - total_reserved_mem : 0);
+ boot_printk("Usable online memory total: %lx Reserved: %lx Free: %lx\n",
+ total_mem, total_reserved_mem,
+ total_mem > total_reserved_mem ? total_mem - total_reserved_mem : 0);
print_stacktrace(current_frame_address());
- sclp_early_printk("\n\n -- System halted\n");
+ boot_printk("\n\n -- System halted\n");
disabled_wait();
}
diff --git a/arch/s390/boot/printk.c b/arch/s390/boot/printk.c
new file mode 100644
index 000000000000..35f18f2b936e
--- /dev/null
+++ b/arch/s390/boot/printk.c
@@ -0,0 +1,124 @@
+// SPDX-License-Identifier: GPL-2.0
+#include <linux/kernel.h>
+#include <linux/stdarg.h>
+#include <linux/string.h>
+#include <linux/ctype.h>
+#include <asm/stacktrace.h>
+#include <asm/boot_data.h>
+#include <asm/lowcore.h>
+#include <asm/setup.h>
+#include <asm/sclp.h>
+#include <asm/uv.h>
+#include "boot.h"
+
+const char hex_asc[] = "0123456789abcdef";
+
+static char *as_hex(char *dst, unsigned long val, int pad)
+{
+ char *p, *end = p = dst + max(pad, (int)__fls(val | 1) / 4 + 1);
+
+ for (*p-- = 0; p >= dst; val >>= 4)
+ *p-- = hex_asc[val & 0x0f];
+ return end;
+}
+
+static char *symstart(char *p)
+{
+ while (*p)
+ p--;
+ return p + 1;
+}
+
+static noinline char *findsym(unsigned long ip, unsigned short *off, unsigned short *len)
+{
+ /* symbol entries are in a form "10000 c4 startup\0" */
+ char *a = _decompressor_syms_start;
+ char *b = _decompressor_syms_end;
+ unsigned long start;
+ unsigned long size;
+ char *pivot;
+ char *endp;
+
+ while (a < b) {
+ pivot = symstart(a + (b - a) / 2);
+ start = simple_strtoull(pivot, &endp, 16);
+ size = simple_strtoull(endp + 1, &endp, 16);
+ if (ip < start) {
+ b = pivot;
+ continue;
+ }
+ if (ip > start + size) {
+ a = pivot + strlen(pivot) + 1;
+ continue;
+ }
+ *off = ip - start;
+ *len = size;
+ return endp + 1;
+ }
+ return NULL;
+}
+
+static noinline char *strsym(void *ip)
+{
+ static char buf[64];
+ unsigned short off;
+ unsigned short len;
+ char *p;
+
+ p = findsym((unsigned long)ip, &off, &len);
+ if (p) {
+ strncpy(buf, p, sizeof(buf));
+ /* reserve 15 bytes for offset/len in symbol+0x1234/0x1234 */
+ p = buf + strnlen(buf, sizeof(buf) - 15);
+ strcpy(p, "+0x");
+ p = as_hex(p + 3, off, 0);
+ strcpy(p, "/0x");
+ as_hex(p + 3, len, 0);
+ } else {
+ as_hex(buf, (unsigned long)ip, 16);
+ }
+ return buf;
+}
+
+void boot_printk(const char *fmt, ...)
+{
+ char buf[1024] = { 0 };
+ char *end = buf + sizeof(buf) - 1; /* make sure buf is 0 terminated */
+ unsigned long pad;
+ char *p = buf;
+ va_list args;
+
+ va_start(args, fmt);
+ for (; p < end && *fmt; fmt++) {
+ if (*fmt != '%') {
+ *p++ = *fmt;
+ continue;
+ }
+ pad = isdigit(*++fmt) ? simple_strtol(fmt, (char **)&fmt, 10) : 0;
+ switch (*fmt) {
+ case 's':
+ p = buf + strlcat(buf, va_arg(args, char *), sizeof(buf));
+ break;
+ case 'p':
+ if (*++fmt != 'S')
+ goto out;
+ p = buf + strlcat(buf, strsym(va_arg(args, void *)), sizeof(buf));
+ break;
+ case 'l':
+ if (*++fmt != 'x' || end - p <= max(sizeof(long) * 2, pad))
+ goto out;
+ p = as_hex(p, va_arg(args, unsigned long), pad);
+ break;
+ case 'x':
+ if (end - p <= max(sizeof(int) * 2, pad))
+ goto out;
+ p = as_hex(p, va_arg(args, unsigned int), pad);
+ break;
+ default:
+ goto out;
+ }
+ }
+out:
+ va_end(args);
+ sclp_early_printk(buf);
+}
diff --git a/arch/s390/boot/startup.c b/arch/s390/boot/startup.c
index c73b5118ad42..c8f149ad77e5 100644
--- a/arch/s390/boot/startup.c
+++ b/arch/s390/boot/startup.c
@@ -39,10 +39,7 @@ struct machine_info machine;
void error(char *x)
{
- sclp_early_printk("\n\n");
- sclp_early_printk(x);
- sclp_early_printk("\n\n -- System halted");
-
+ boot_printk("\n\n%s\n\n -- System halted", x);
disabled_wait();
}
@@ -296,7 +293,7 @@ static unsigned long setup_kernel_memory_layout(unsigned long kernel_size)
kernel_start = round_down(kernel_end - kernel_size, THREAD_SIZE);
} else if (vmax < __NO_KASLR_END_KERNEL || vsize > __NO_KASLR_END_KERNEL) {
kernel_start = round_down(vmax - kernel_size, THREAD_SIZE);
- decompressor_printk("The kernel base address is forced to %lx\n", kernel_start);
+ boot_printk("The kernel base address is forced to %lx\n", kernel_start);
} else {
kernel_start = __NO_KASLR_START_KERNEL;
}
diff --git a/arch/s390/configs/debug_defconfig b/arch/s390/configs/debug_defconfig
index ea63a7342f5f..9b57add02cd5 100644
--- a/arch/s390/configs/debug_defconfig
+++ b/arch/s390/configs/debug_defconfig
@@ -59,6 +59,7 @@ CONFIG_CMM=m
CONFIG_APPLDATA_BASE=y
CONFIG_S390_HYPFS_FS=y
CONFIG_KVM=m
+CONFIG_KVM_S390_UCONTROL=y
CONFIG_S390_UNWIND_SELFTEST=m
CONFIG_S390_KPROBES_SANITY_TEST=m
CONFIG_S390_MODULES_SANITY_TEST=m
@@ -794,8 +795,12 @@ CONFIG_CRYPTO_GHASH_S390=m
CONFIG_CRYPTO_AES_S390=m
CONFIG_CRYPTO_DES_S390=m
CONFIG_CRYPTO_CHACHA_S390=m
+CONFIG_CRYPTO_HMAC_S390=m
CONFIG_ZCRYPT=m
CONFIG_PKEY=m
+CONFIG_PKEY_CCA=m
+CONFIG_PKEY_EP11=m
+CONFIG_PKEY_PCKMO=m
CONFIG_CRYPTO_PAES_S390=m
CONFIG_CRYPTO_DEV_VIRTIO=m
CONFIG_SYSTEM_BLACKLIST_KEYRING=y
diff --git a/arch/s390/configs/defconfig b/arch/s390/configs/defconfig
index d8b28ff8ff45..df4addd1834a 100644
--- a/arch/s390/configs/defconfig
+++ b/arch/s390/configs/defconfig
@@ -781,8 +781,12 @@ CONFIG_CRYPTO_GHASH_S390=m
CONFIG_CRYPTO_AES_S390=m
CONFIG_CRYPTO_DES_S390=m
CONFIG_CRYPTO_CHACHA_S390=m
+CONFIG_CRYPTO_HMAC_S390=m
CONFIG_ZCRYPT=m
CONFIG_PKEY=m
+CONFIG_PKEY_CCA=m
+CONFIG_PKEY_EP11=m
+CONFIG_PKEY_PCKMO=m
CONFIG_CRYPTO_PAES_S390=m
CONFIG_CRYPTO_DEV_VIRTIO=m
CONFIG_SYSTEM_BLACKLIST_KEYRING=y
diff --git a/arch/s390/crypto/Kconfig b/arch/s390/crypto/Kconfig
index 06ee706b0d78..d3eb3a233693 100644
--- a/arch/s390/crypto/Kconfig
+++ b/arch/s390/crypto/Kconfig
@@ -132,4 +132,14 @@ config CRYPTO_CHACHA_S390
It is available as of z13.
+config CRYPTO_HMAC_S390
+ tristate "Keyed-hash message authentication code: HMAC"
+ depends on S390
+ select CRYPTO_HASH
+ help
+ s390 specific HMAC hardware support for SHA224, SHA256, SHA384 and
+ SHA512.
+
+ Architecture: s390
+
endmenu
diff --git a/arch/s390/crypto/Makefile b/arch/s390/crypto/Makefile
index 1b1cc478fa94..a0cb96937c3d 100644
--- a/arch/s390/crypto/Makefile
+++ b/arch/s390/crypto/Makefile
@@ -15,6 +15,7 @@ obj-$(CONFIG_CRYPTO_CHACHA_S390) += chacha_s390.o
obj-$(CONFIG_S390_PRNG) += prng.o
obj-$(CONFIG_CRYPTO_GHASH_S390) += ghash_s390.o
obj-$(CONFIG_CRYPTO_CRC32_S390) += crc32-vx_s390.o
+obj-$(CONFIG_CRYPTO_HMAC_S390) += hmac_s390.o
obj-y += arch_random.o
crc32-vx_s390-y := crc32-vx.o crc32le-vx.o crc32be-vx.o
diff --git a/arch/s390/crypto/aes_s390.c b/arch/s390/crypto/aes_s390.c
index c6fe5405de4a..8cc02d6e0d0f 100644
--- a/arch/s390/crypto/aes_s390.c
+++ b/arch/s390/crypto/aes_s390.c
@@ -51,8 +51,13 @@ struct s390_aes_ctx {
};
struct s390_xts_ctx {
- u8 key[32];
- u8 pcc_key[32];
+ union {
+ u8 keys[64];
+ struct {
+ u8 key[32];
+ u8 pcc_key[32];
+ };
+ };
int key_len;
unsigned long fc;
struct crypto_skcipher *fallback;
@@ -526,6 +531,108 @@ static struct skcipher_alg xts_aes_alg = {
.decrypt = xts_aes_decrypt,
};
+static int fullxts_aes_set_key(struct crypto_skcipher *tfm, const u8 *in_key,
+ unsigned int key_len)
+{
+ struct s390_xts_ctx *xts_ctx = crypto_skcipher_ctx(tfm);
+ unsigned long fc;
+ int err;
+
+ err = xts_fallback_setkey(tfm, in_key, key_len);
+ if (err)
+ return err;
+
+ /* Pick the correct function code based on the key length */
+ fc = (key_len == 32) ? CPACF_KM_XTS_128_FULL :
+ (key_len == 64) ? CPACF_KM_XTS_256_FULL : 0;
+
+ /* Check if the function code is available */
+ xts_ctx->fc = (fc && cpacf_test_func(&km_functions, fc)) ? fc : 0;
+ if (!xts_ctx->fc)
+ return 0;
+
+ /* Store double-key */
+ memcpy(xts_ctx->keys, in_key, key_len);
+ xts_ctx->key_len = key_len;
+ return 0;
+}
+
+static int fullxts_aes_crypt(struct skcipher_request *req, unsigned long modifier)
+{
+ struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
+ struct s390_xts_ctx *xts_ctx = crypto_skcipher_ctx(tfm);
+ unsigned int offset, nbytes, n;
+ struct skcipher_walk walk;
+ int ret;
+ struct {
+ __u8 key[64];
+ __u8 tweak[16];
+ __u8 nap[16];
+ } fxts_param = {
+ .nap = {0},
+ };
+
+ if (req->cryptlen < AES_BLOCK_SIZE)
+ return -EINVAL;
+
+ if (unlikely(!xts_ctx->fc || (req->cryptlen % AES_BLOCK_SIZE) != 0)) {
+ struct skcipher_request *subreq = skcipher_request_ctx(req);
+
+ *subreq = *req;
+ skcipher_request_set_tfm(subreq, xts_ctx->fallback);
+ return (modifier & CPACF_DECRYPT) ?
+ crypto_skcipher_decrypt(subreq) :
+ crypto_skcipher_encrypt(subreq);
+ }
+
+ ret = skcipher_walk_virt(&walk, req, false);
+ if (ret)
+ return ret;
+
+ offset = xts_ctx->key_len & 0x20;
+ memcpy(fxts_param.key + offset, xts_ctx->keys, xts_ctx->key_len);
+ memcpy(fxts_param.tweak, req->iv, AES_BLOCK_SIZE);
+ fxts_param.nap[0] = 0x01; /* initial alpha power (1, little-endian) */
+
+ while ((nbytes = walk.nbytes) != 0) {
+ /* only use complete blocks */
+ n = nbytes & ~(AES_BLOCK_SIZE - 1);
+ cpacf_km(xts_ctx->fc | modifier, fxts_param.key + offset,
+ walk.dst.virt.addr, walk.src.virt.addr, n);
+ ret = skcipher_walk_done(&walk, nbytes - n);
+ }
+ memzero_explicit(&fxts_param, sizeof(fxts_param));
+ return ret;
+}
+
+static int fullxts_aes_encrypt(struct skcipher_request *req)
+{
+ return fullxts_aes_crypt(req, 0);
+}
+
+static int fullxts_aes_decrypt(struct skcipher_request *req)
+{
+ return fullxts_aes_crypt(req, CPACF_DECRYPT);
+}
+
+static struct skcipher_alg fullxts_aes_alg = {
+ .base.cra_name = "xts(aes)",
+ .base.cra_driver_name = "full-xts-aes-s390",
+ .base.cra_priority = 403, /* aes-xts-s390 + 1 */
+ .base.cra_flags = CRYPTO_ALG_NEED_FALLBACK,
+ .base.cra_blocksize = AES_BLOCK_SIZE,
+ .base.cra_ctxsize = sizeof(struct s390_xts_ctx),
+ .base.cra_module = THIS_MODULE,
+ .init = xts_fallback_init,
+ .exit = xts_fallback_exit,
+ .min_keysize = 2 * AES_MIN_KEY_SIZE,
+ .max_keysize = 2 * AES_MAX_KEY_SIZE,
+ .ivsize = AES_BLOCK_SIZE,
+ .setkey = fullxts_aes_set_key,
+ .encrypt = fullxts_aes_encrypt,
+ .decrypt = fullxts_aes_decrypt,
+};
+
static int ctr_aes_set_key(struct crypto_skcipher *tfm, const u8 *in_key,
unsigned int key_len)
{
@@ -955,7 +1062,7 @@ static struct aead_alg gcm_aes_aead = {
};
static struct crypto_alg *aes_s390_alg;
-static struct skcipher_alg *aes_s390_skcipher_algs[4];
+static struct skcipher_alg *aes_s390_skcipher_algs[5];
static int aes_s390_skciphers_num;
static struct aead_alg *aes_s390_aead_alg;
@@ -1012,6 +1119,13 @@ static int __init aes_s390_init(void)
goto out_err;
}
+ if (cpacf_test_func(&km_functions, CPACF_KM_XTS_128_FULL) ||
+ cpacf_test_func(&km_functions, CPACF_KM_XTS_256_FULL)) {
+ ret = aes_s390_register_skcipher(&fullxts_aes_alg);
+ if (ret)
+ goto out_err;
+ }
+
if (cpacf_test_func(&km_functions, CPACF_KM_XTS_128) ||
cpacf_test_func(&km_functions, CPACF_KM_XTS_256)) {
ret = aes_s390_register_skcipher(&xts_aes_alg);
diff --git a/arch/s390/crypto/hmac_s390.c b/arch/s390/crypto/hmac_s390.c
new file mode 100644
index 000000000000..bba9a818dfdc
--- /dev/null
+++ b/arch/s390/crypto/hmac_s390.c
@@ -0,0 +1,359 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * Copyright IBM Corp. 2024
+ *
+ * s390 specific HMAC support.
+ */
+
+#define KMSG_COMPONENT "hmac_s390"
+#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
+
+#include <asm/cpacf.h>
+#include <crypto/sha2.h>
+#include <crypto/internal/hash.h>
+#include <linux/cpufeature.h>
+#include <linux/module.h>
+
+/*
+ * KMAC param block layout for sha2 function codes:
+ * The layout of the param block for the KMAC instruction depends on the
+ * blocksize of the used hashing sha2-algorithm function codes. The param block
+ * contains the hash chaining value (cv), the input message bit-length (imbl)
+ * and the hmac-secret (key). To prevent code duplication, the sizes of all
+ * these are calculated based on the blocksize.
+ *
+ * param-block:
+ * +-------+
+ * | cv |
+ * +-------+
+ * | imbl |
+ * +-------+
+ * | key |
+ * +-------+
+ *
+ * sizes:
+ * part | sh2-alg | calculation | size | type
+ * -----+---------+-------------+------+--------
+ * cv | 224/256 | blocksize/2 | 32 | u64[8]
+ * | 384/512 | | 64 | u128[8]
+ * imbl | 224/256 | blocksize/8 | 8 | u64
+ * | 384/512 | | 16 | u128
+ * key | 224/256 | blocksize | 64 | u8[64]
+ * | 384/512 | | 128 | u8[128]
+ */
+
+#define MAX_DIGEST_SIZE SHA512_DIGEST_SIZE
+#define MAX_IMBL_SIZE sizeof(u128)
+#define MAX_BLOCK_SIZE SHA512_BLOCK_SIZE
+
+#define SHA2_CV_SIZE(bs) ((bs) >> 1)
+#define SHA2_IMBL_SIZE(bs) ((bs) >> 3)
+
+#define SHA2_IMBL_OFFSET(bs) (SHA2_CV_SIZE(bs))
+#define SHA2_KEY_OFFSET(bs) (SHA2_CV_SIZE(bs) + SHA2_IMBL_SIZE(bs))
+
+struct s390_hmac_ctx {
+ u8 key[MAX_BLOCK_SIZE];
+};
+
+union s390_kmac_gr0 {
+ unsigned long reg;
+ struct {
+ unsigned long : 48;
+ unsigned long ikp : 1;
+ unsigned long iimp : 1;
+ unsigned long ccup : 1;
+ unsigned long : 6;
+ unsigned long fc : 7;
+ };
+};
+
+struct s390_kmac_sha2_ctx {
+ u8 param[MAX_DIGEST_SIZE + MAX_IMBL_SIZE + MAX_BLOCK_SIZE];
+ union s390_kmac_gr0 gr0;
+ u8 buf[MAX_BLOCK_SIZE];
+ unsigned int buflen;
+};
+
+/*
+ * kmac_sha2_set_imbl - sets the input message bit-length based on the blocksize
+ */
+static inline void kmac_sha2_set_imbl(u8 *param, unsigned int buflen,
+ unsigned int blocksize)
+{
+ u8 *imbl = param + SHA2_IMBL_OFFSET(blocksize);
+
+ switch (blocksize) {
+ case SHA256_BLOCK_SIZE:
+ *(u64 *)imbl = (u64)buflen * BITS_PER_BYTE;
+ break;
+ case SHA512_BLOCK_SIZE:
+ *(u128 *)imbl = (u128)buflen * BITS_PER_BYTE;
+ break;
+ default:
+ break;
+ }
+}
+
+static int hash_key(const u8 *in, unsigned int inlen,
+ u8 *digest, unsigned int digestsize)
+{
+ unsigned long func;
+ union {
+ struct sha256_paramblock {
+ u32 h[8];
+ u64 mbl;
+ } sha256;
+ struct sha512_paramblock {
+ u64 h[8];
+ u128 mbl;
+ } sha512;
+ } __packed param;
+
+#define PARAM_INIT(x, y, z) \
+ param.sha##x.h[0] = SHA##y ## _H0; \
+ param.sha##x.h[1] = SHA##y ## _H1; \
+ param.sha##x.h[2] = SHA##y ## _H2; \
+ param.sha##x.h[3] = SHA##y ## _H3; \
+ param.sha##x.h[4] = SHA##y ## _H4; \
+ param.sha##x.h[5] = SHA##y ## _H5; \
+ param.sha##x.h[6] = SHA##y ## _H6; \
+ param.sha##x.h[7] = SHA##y ## _H7; \
+ param.sha##x.mbl = (z)
+
+ switch (digestsize) {
+ case SHA224_DIGEST_SIZE:
+ func = CPACF_KLMD_SHA_256;
+ PARAM_INIT(256, 224, inlen * 8);
+ break;
+ case SHA256_DIGEST_SIZE:
+ func = CPACF_KLMD_SHA_256;
+ PARAM_INIT(256, 256, inlen * 8);
+ break;
+ case SHA384_DIGEST_SIZE:
+ func = CPACF_KLMD_SHA_512;
+ PARAM_INIT(512, 384, inlen * 8);
+ break;
+ case SHA512_DIGEST_SIZE:
+ func = CPACF_KLMD_SHA_512;
+ PARAM_INIT(512, 512, inlen * 8);
+ break;
+ default:
+ return -EINVAL;
+ }
+
+#undef PARAM_INIT
+
+ cpacf_klmd(func, &param, in, inlen);
+
+ memcpy(digest, &param, digestsize);
+
+ return 0;
+}
+
+static int s390_hmac_sha2_setkey(struct crypto_shash *tfm,
+ const u8 *key, unsigned int keylen)
+{
+ struct s390_hmac_ctx *tfm_ctx = crypto_shash_ctx(tfm);
+ unsigned int ds = crypto_shash_digestsize(tfm);
+ unsigned int bs = crypto_shash_blocksize(tfm);
+
+ memset(tfm_ctx, 0, sizeof(*tfm_ctx));
+
+ if (keylen > bs)
+ return hash_key(key, keylen, tfm_ctx->key, ds);
+
+ memcpy(tfm_ctx->key, key, keylen);
+ return 0;
+}
+
+static int s390_hmac_sha2_init(struct shash_desc *desc)
+{
+ struct s390_hmac_ctx *tfm_ctx = crypto_shash_ctx(desc->tfm);
+ struct s390_kmac_sha2_ctx *ctx = shash_desc_ctx(desc);
+ unsigned int bs = crypto_shash_blocksize(desc->tfm);
+
+ memcpy(ctx->param + SHA2_KEY_OFFSET(bs),
+ tfm_ctx->key, bs);
+
+ ctx->buflen = 0;
+ ctx->gr0.reg = 0;
+ switch (crypto_shash_digestsize(desc->tfm)) {
+ case SHA224_DIGEST_SIZE:
+ ctx->gr0.fc = CPACF_KMAC_HMAC_SHA_224;
+ break;
+ case SHA256_DIGEST_SIZE:
+ ctx->gr0.fc = CPACF_KMAC_HMAC_SHA_256;
+ break;
+ case SHA384_DIGEST_SIZE:
+ ctx->gr0.fc = CPACF_KMAC_HMAC_SHA_384;
+ break;
+ case SHA512_DIGEST_SIZE:
+ ctx->gr0.fc = CPACF_KMAC_HMAC_SHA_512;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int s390_hmac_sha2_update(struct shash_desc *desc,
+ const u8 *data, unsigned int len)
+{
+ struct s390_kmac_sha2_ctx *ctx = shash_desc_ctx(desc);
+ unsigned int bs = crypto_shash_blocksize(desc->tfm);
+ unsigned int offset, n;
+
+ /* check current buffer */
+ offset = ctx->buflen % bs;
+ ctx->buflen += len;
+ if (offset + len < bs)
+ goto store;
+
+ /* process one stored block */
+ if (offset) {
+ n = bs - offset;
+ memcpy(ctx->buf + offset, data, n);
+ ctx->gr0.iimp = 1;
+ _cpacf_kmac(&ctx->gr0.reg, ctx->param, ctx->buf, bs);
+ data += n;
+ len -= n;
+ offset = 0;
+ }
+ /* process as many blocks as possible */
+ if (len >= bs) {
+ n = (len / bs) * bs;
+ ctx->gr0.iimp = 1;
+ _cpacf_kmac(&ctx->gr0.reg, ctx->param, data, n);
+ data += n;
+ len -= n;
+ }
+store:
+ /* store incomplete block in buffer */
+ if (len)
+ memcpy(ctx->buf + offset, data, len);
+
+ return 0;
+}
+
+static int s390_hmac_sha2_final(struct shash_desc *desc, u8 *out)
+{
+ struct s390_kmac_sha2_ctx *ctx = shash_desc_ctx(desc);
+ unsigned int bs = crypto_shash_blocksize(desc->tfm);
+
+ ctx->gr0.iimp = 0;
+ kmac_sha2_set_imbl(ctx->param, ctx->buflen, bs);
+ _cpacf_kmac(&ctx->gr0.reg, ctx->param, ctx->buf, ctx->buflen % bs);
+ memcpy(out, ctx->param, crypto_shash_digestsize(desc->tfm));
+
+ return 0;
+}
+
+static int s390_hmac_sha2_digest(struct shash_desc *desc,
+ const u8 *data, unsigned int len, u8 *out)
+{
+ struct s390_kmac_sha2_ctx *ctx = shash_desc_ctx(desc);
+ unsigned int ds = crypto_shash_digestsize(desc->tfm);
+ int rc;
+
+ rc = s390_hmac_sha2_init(desc);
+ if (rc)
+ return rc;
+
+ ctx->gr0.iimp = 0;
+ kmac_sha2_set_imbl(ctx->param, len,
+ crypto_shash_blocksize(desc->tfm));
+ _cpacf_kmac(&ctx->gr0.reg, ctx->param, data, len);
+ memcpy(out, ctx->param, ds);
+
+ return 0;
+}
+
+#define S390_HMAC_SHA2_ALG(x) { \
+ .fc = CPACF_KMAC_HMAC_SHA_##x, \
+ .alg = { \
+ .init = s390_hmac_sha2_init, \
+ .update = s390_hmac_sha2_update, \
+ .final = s390_hmac_sha2_final, \
+ .digest = s390_hmac_sha2_digest, \
+ .setkey = s390_hmac_sha2_setkey, \
+ .descsize = sizeof(struct s390_kmac_sha2_ctx), \
+ .halg = { \
+ .digestsize = SHA##x##_DIGEST_SIZE, \
+ .base = { \
+ .cra_name = "hmac(sha" #x ")", \
+ .cra_driver_name = "hmac_s390_sha" #x, \
+ .cra_blocksize = SHA##x##_BLOCK_SIZE, \
+ .cra_priority = 400, \
+ .cra_ctxsize = sizeof(struct s390_hmac_ctx), \
+ .cra_module = THIS_MODULE, \
+ }, \
+ }, \
+ }, \
+}
+
+static struct s390_hmac_alg {
+ bool registered;
+ unsigned int fc;
+ struct shash_alg alg;
+} s390_hmac_algs[] = {
+ S390_HMAC_SHA2_ALG(224),
+ S390_HMAC_SHA2_ALG(256),
+ S390_HMAC_SHA2_ALG(384),
+ S390_HMAC_SHA2_ALG(512),
+};
+
+static __always_inline void _s390_hmac_algs_unregister(void)
+{
+ struct s390_hmac_alg *hmac;
+ int i;
+
+ for (i = ARRAY_SIZE(s390_hmac_algs) - 1; i >= 0; i--) {
+ hmac = &s390_hmac_algs[i];
+ if (!hmac->registered)
+ continue;
+ crypto_unregister_shash(&hmac->alg);
+ }
+}
+
+static int __init hmac_s390_init(void)
+{
+ struct s390_hmac_alg *hmac;
+ int i, rc = -ENODEV;
+
+ if (!cpacf_query_func(CPACF_KLMD, CPACF_KLMD_SHA_256))
+ return -ENODEV;
+ if (!cpacf_query_func(CPACF_KLMD, CPACF_KLMD_SHA_512))
+ return -ENODEV;
+
+ for (i = 0; i < ARRAY_SIZE(s390_hmac_algs); i++) {
+ hmac = &s390_hmac_algs[i];
+ if (!cpacf_query_func(CPACF_KMAC, hmac->fc))
+ continue;
+
+ rc = crypto_register_shash(&hmac->alg);
+ if (rc) {
+ pr_err("unable to register %s\n",
+ hmac->alg.halg.base.cra_name);
+ goto out;
+ }
+ hmac->registered = true;
+ pr_debug("registered %s\n", hmac->alg.halg.base.cra_name);
+ }
+ return rc;
+out:
+ _s390_hmac_algs_unregister();
+ return rc;
+}
+
+static void __exit hmac_s390_exit(void)
+{
+ _s390_hmac_algs_unregister();
+}
+
+module_cpu_feature_match(S390_CPU_FEATURE_MSA, hmac_s390_init);
+module_exit(hmac_s390_exit);
+
+MODULE_DESCRIPTION("S390 HMAC driver");
+MODULE_LICENSE("GPL");
diff --git a/arch/s390/crypto/paes_s390.c b/arch/s390/crypto/paes_s390.c
index 99ea3f12c5d2..ef4491ccbbf8 100644
--- a/arch/s390/crypto/paes_s390.c
+++ b/arch/s390/crypto/paes_s390.c
@@ -133,8 +133,8 @@ static inline int __paes_keyblob2pkey(struct key_blob *kb,
if (msleep_interruptible(1000))
return -EINTR;
}
- ret = pkey_keyblob2pkey(kb->key, kb->keylen,
- pk->protkey, &pk->len, &pk->type);
+ ret = pkey_key2protkey(kb->key, kb->keylen,
+ pk->protkey, &pk->len, &pk->type);
}
return ret;
@@ -802,7 +802,10 @@ out_err:
module_init(paes_s390_init);
module_exit(paes_s390_fini);
-MODULE_ALIAS_CRYPTO("paes");
+MODULE_ALIAS_CRYPTO("ecb(paes)");
+MODULE_ALIAS_CRYPTO("cbc(paes)");
+MODULE_ALIAS_CRYPTO("ctr(paes)");
+MODULE_ALIAS_CRYPTO("xts(paes)");
MODULE_DESCRIPTION("Rijndael (AES) Cipher Algorithm with protected keys");
MODULE_LICENSE("GPL");
diff --git a/arch/s390/crypto/sha.h b/arch/s390/crypto/sha.h
index 65ea12fc87a1..2bb22db54c31 100644
--- a/arch/s390/crypto/sha.h
+++ b/arch/s390/crypto/sha.h
@@ -25,6 +25,7 @@ struct s390_sha_ctx {
u32 state[CPACF_MAX_PARMBLOCK_SIZE / sizeof(u32)];
u8 buf[SHA_MAX_BLOCK_SIZE];
int func; /* KIMD function to use */
+ int first_message_part;
};
struct shash_desc;
diff --git a/arch/s390/crypto/sha3_256_s390.c b/arch/s390/crypto/sha3_256_s390.c
index e1350e033a32..a84ef692f572 100644
--- a/arch/s390/crypto/sha3_256_s390.c
+++ b/arch/s390/crypto/sha3_256_s390.c
@@ -21,9 +21,11 @@ static int sha3_256_init(struct shash_desc *desc)
{
struct s390_sha_ctx *sctx = shash_desc_ctx(desc);
- memset(sctx->state, 0, sizeof(sctx->state));
+ if (!test_facility(86)) /* msa 12 */
+ memset(sctx->state, 0, sizeof(sctx->state));
sctx->count = 0;
sctx->func = CPACF_KIMD_SHA3_256;
+ sctx->first_message_part = 1;
return 0;
}
@@ -36,6 +38,7 @@ static int sha3_256_export(struct shash_desc *desc, void *out)
octx->rsiz = sctx->count;
memcpy(octx->st, sctx->state, sizeof(octx->st));
memcpy(octx->buf, sctx->buf, sizeof(octx->buf));
+ octx->partial = sctx->first_message_part;
return 0;
}
@@ -48,6 +51,7 @@ static int sha3_256_import(struct shash_desc *desc, const void *in)
sctx->count = ictx->rsiz;
memcpy(sctx->state, ictx->st, sizeof(ictx->st));
memcpy(sctx->buf, ictx->buf, sizeof(ictx->buf));
+ sctx->first_message_part = ictx->partial;
sctx->func = CPACF_KIMD_SHA3_256;
return 0;
@@ -61,6 +65,7 @@ static int sha3_224_import(struct shash_desc *desc, const void *in)
sctx->count = ictx->rsiz;
memcpy(sctx->state, ictx->st, sizeof(ictx->st));
memcpy(sctx->buf, ictx->buf, sizeof(ictx->buf));
+ sctx->first_message_part = ictx->partial;
sctx->func = CPACF_KIMD_SHA3_224;
return 0;
@@ -88,9 +93,11 @@ static int sha3_224_init(struct shash_desc *desc)
{
struct s390_sha_ctx *sctx = shash_desc_ctx(desc);
- memset(sctx->state, 0, sizeof(sctx->state));
+ if (!test_facility(86)) /* msa 12 */
+ memset(sctx->state, 0, sizeof(sctx->state));
sctx->count = 0;
sctx->func = CPACF_KIMD_SHA3_224;
+ sctx->first_message_part = 1;
return 0;
}
diff --git a/arch/s390/crypto/sha3_512_s390.c b/arch/s390/crypto/sha3_512_s390.c
index 06c142ed9bb1..07528fc98ff7 100644
--- a/arch/s390/crypto/sha3_512_s390.c
+++ b/arch/s390/crypto/sha3_512_s390.c
@@ -20,9 +20,11 @@ static int sha3_512_init(struct shash_desc *desc)
{
struct s390_sha_ctx *sctx = shash_desc_ctx(desc);
- memset(sctx->state, 0, sizeof(sctx->state));
+ if (!test_facility(86)) /* msa 12 */
+ memset(sctx->state, 0, sizeof(sctx->state));
sctx->count = 0;
sctx->func = CPACF_KIMD_SHA3_512;
+ sctx->first_message_part = 1;
return 0;
}
@@ -37,6 +39,7 @@ static int sha3_512_export(struct shash_desc *desc, void *out)
memcpy(octx->st, sctx->state, sizeof(octx->st));
memcpy(octx->buf, sctx->buf, sizeof(octx->buf));
+ octx->partial = sctx->first_message_part;
return 0;
}
@@ -52,6 +55,7 @@ static int sha3_512_import(struct shash_desc *desc, const void *in)
memcpy(sctx->state, ictx->st, sizeof(ictx->st));
memcpy(sctx->buf, ictx->buf, sizeof(ictx->buf));
+ sctx->first_message_part = ictx->partial;
sctx->func = CPACF_KIMD_SHA3_512;
return 0;
@@ -68,6 +72,7 @@ static int sha3_384_import(struct shash_desc *desc, const void *in)
memcpy(sctx->state, ictx->st, sizeof(ictx->st));
memcpy(sctx->buf, ictx->buf, sizeof(ictx->buf));
+ sctx->first_message_part = ictx->partial;
sctx->func = CPACF_KIMD_SHA3_384;
return 0;
@@ -97,9 +102,11 @@ static int sha3_384_init(struct shash_desc *desc)
{
struct s390_sha_ctx *sctx = shash_desc_ctx(desc);
- memset(sctx->state, 0, sizeof(sctx->state));
+ if (!test_facility(86)) /* msa 12 */
+ memset(sctx->state, 0, sizeof(sctx->state));
sctx->count = 0;
sctx->func = CPACF_KIMD_SHA3_384;
+ sctx->first_message_part = 1;
return 0;
}
diff --git a/arch/s390/crypto/sha_common.c b/arch/s390/crypto/sha_common.c
index 686fe7aa192f..961d7d522af1 100644
--- a/arch/s390/crypto/sha_common.c
+++ b/arch/s390/crypto/sha_common.c
@@ -18,6 +18,7 @@ int s390_sha_update(struct shash_desc *desc, const u8 *data, unsigned int len)
struct s390_sha_ctx *ctx = shash_desc_ctx(desc);
unsigned int bsize = crypto_shash_blocksize(desc->tfm);
unsigned int index, n;
+ int fc;
/* how much is already in the buffer? */
index = ctx->count % bsize;
@@ -26,10 +27,16 @@ int s390_sha_update(struct shash_desc *desc, const u8 *data, unsigned int len)
if ((index + len) < bsize)
goto store;
+ fc = ctx->func;
+ if (ctx->first_message_part)
+ fc |= test_facility(86) ? CPACF_KIMD_NIP : 0;
+
/* process one stored block */
if (index) {
memcpy(ctx->buf + index, data, bsize - index);
- cpacf_kimd(ctx->func, ctx->state, ctx->buf, bsize);
+ cpacf_kimd(fc, ctx->state, ctx->buf, bsize);
+ ctx->first_message_part = 0;
+ fc &= ~CPACF_KIMD_NIP;
data += bsize - index;
len -= bsize - index;
index = 0;
@@ -38,7 +45,8 @@ int s390_sha_update(struct shash_desc *desc, const u8 *data, unsigned int len)
/* process as many blocks as possible */
if (len >= bsize) {
n = (len / bsize) * bsize;
- cpacf_kimd(ctx->func, ctx->state, data, n);
+ cpacf_kimd(fc, ctx->state, data, n);
+ ctx->first_message_part = 0;
data += n;
len -= n;
}
@@ -75,7 +83,7 @@ int s390_sha_final(struct shash_desc *desc, u8 *out)
unsigned int bsize = crypto_shash_blocksize(desc->tfm);
u64 bits;
unsigned int n;
- int mbl_offset;
+ int mbl_offset, fc;
n = ctx->count % bsize;
bits = ctx->count * 8;
@@ -109,7 +117,11 @@ int s390_sha_final(struct shash_desc *desc, u8 *out)
return -EINVAL;
}
- cpacf_klmd(ctx->func, ctx->state, ctx->buf, n);
+ fc = ctx->func;
+ fc |= test_facility(86) ? CPACF_KLMD_DUFOP : 0;
+ if (ctx->first_message_part)
+ fc |= CPACF_KLMD_NIP;
+ cpacf_klmd(fc, ctx->state, ctx->buf, n);
/* copy digest to out */
memcpy(out, ctx->state, crypto_shash_digestsize(desc->tfm));
diff --git a/arch/s390/hypfs/hypfs.h b/arch/s390/hypfs/hypfs.h
index 65f4036fd541..83ebf54cca6b 100644
--- a/arch/s390/hypfs/hypfs.h
+++ b/arch/s390/hypfs/hypfs.h
@@ -78,7 +78,6 @@ struct hypfs_dbfs_file {
struct dentry *dentry;
};
-extern void hypfs_dbfs_exit(void);
extern void hypfs_dbfs_create_file(struct hypfs_dbfs_file *df);
extern void hypfs_dbfs_remove_file(struct hypfs_dbfs_file *df);
diff --git a/arch/s390/hypfs/hypfs_dbfs.c b/arch/s390/hypfs/hypfs_dbfs.c
index 0e855c5e91c5..5d9effb0867c 100644
--- a/arch/s390/hypfs/hypfs_dbfs.c
+++ b/arch/s390/hypfs/hypfs_dbfs.c
@@ -76,7 +76,6 @@ static long dbfs_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
static const struct file_operations dbfs_ops = {
.read = dbfs_read,
- .llseek = no_llseek,
.unlocked_ioctl = dbfs_ioctl,
};
diff --git a/arch/s390/hypfs/hypfs_diag.c b/arch/s390/hypfs/hypfs_diag.c
index 26a009f9c49e..c8af67d20994 100644
--- a/arch/s390/hypfs/hypfs_diag.c
+++ b/arch/s390/hypfs/hypfs_diag.c
@@ -29,8 +29,6 @@ static enum diag204_format diag204_info_type; /* used diag 204 data format */
static void *diag204_buf; /* 4K aligned buffer for diag204 data */
static int diag204_buf_pages; /* number of pages for diag204 data */
-static struct dentry *dbfs_d204_file;
-
enum diag204_format diag204_get_info_type(void)
{
return diag204_info_type;
@@ -214,16 +212,13 @@ __init int hypfs_diag_init(void)
hypfs_dbfs_create_file(&dbfs_file_d204);
rc = hypfs_diag_fs_init();
- if (rc) {
+ if (rc)
pr_err("The hardware system does not provide all functions required by hypfs\n");
- debugfs_remove(dbfs_d204_file);
- }
return rc;
}
void hypfs_diag_exit(void)
{
- debugfs_remove(dbfs_d204_file);
hypfs_diag_fs_exit();
diag204_free_buffer();
hypfs_dbfs_remove_file(&dbfs_file_d204);
diff --git a/arch/s390/hypfs/inode.c b/arch/s390/hypfs/inode.c
index 858beaf4a8cb..d428635abf08 100644
--- a/arch/s390/hypfs/inode.c
+++ b/arch/s390/hypfs/inode.c
@@ -443,7 +443,6 @@ static const struct file_operations hypfs_file_ops = {
.release = hypfs_release,
.read_iter = hypfs_read_iter,
.write_iter = hypfs_write_iter,
- .llseek = no_llseek,
};
static struct file_system_type hypfs_type = {
diff --git a/arch/s390/include/asm/Kbuild b/arch/s390/include/asm/Kbuild
index 4b904110d27c..297bf7157968 100644
--- a/arch/s390/include/asm/Kbuild
+++ b/arch/s390/include/asm/Kbuild
@@ -7,3 +7,4 @@ generated-y += unistd_nr.h
generic-y += asm-offsets.h
generic-y += kvm_types.h
generic-y += mcs_spinlock.h
+generic-y += mmzone.h
diff --git a/arch/s390/include/asm/arch_hweight.h b/arch/s390/include/asm/arch_hweight.h
index 50e23ce854e5..aca08b0acbc1 100644
--- a/arch/s390/include/asm/arch_hweight.h
+++ b/arch/s390/include/asm/arch_hweight.h
@@ -4,6 +4,7 @@
#define _ASM_S390_ARCH_HWEIGHT_H
#include <linux/types.h>
+#include <asm/march.h>
static __always_inline unsigned long popcnt_z196(unsigned long w)
{
@@ -29,9 +30,9 @@ static __always_inline unsigned long popcnt_z15(unsigned long w)
static __always_inline unsigned long __arch_hweight64(__u64 w)
{
- if (IS_ENABLED(CONFIG_HAVE_MARCH_Z15_FEATURES))
+ if (__is_defined(MARCH_HAS_Z15_FEATURES))
return popcnt_z15(w);
- if (IS_ENABLED(CONFIG_HAVE_MARCH_Z196_FEATURES)) {
+ if (__is_defined(MARCH_HAS_Z196_FEATURES)) {
w = popcnt_z196(w);
w += w >> 32;
w += w >> 16;
@@ -43,9 +44,9 @@ static __always_inline unsigned long __arch_hweight64(__u64 w)
static __always_inline unsigned int __arch_hweight32(unsigned int w)
{
- if (IS_ENABLED(CONFIG_HAVE_MARCH_Z15_FEATURES))
+ if (__is_defined(MARCH_HAS_Z15_FEATURES))
return popcnt_z15(w);
- if (IS_ENABLED(CONFIG_HAVE_MARCH_Z196_FEATURES)) {
+ if (__is_defined(MARCH_HAS_Z196_FEATURES)) {
w = popcnt_z196(w);
w += w >> 16;
w += w >> 8;
@@ -56,9 +57,9 @@ static __always_inline unsigned int __arch_hweight32(unsigned int w)
static __always_inline unsigned int __arch_hweight16(unsigned int w)
{
- if (IS_ENABLED(CONFIG_HAVE_MARCH_Z15_FEATURES))
+ if (__is_defined(MARCH_HAS_Z15_FEATURES))
return popcnt_z15((unsigned short)w);
- if (IS_ENABLED(CONFIG_HAVE_MARCH_Z196_FEATURES)) {
+ if (__is_defined(MARCH_HAS_Z196_FEATURES)) {
w = popcnt_z196(w);
w += w >> 8;
return w & 0xff;
@@ -68,7 +69,7 @@ static __always_inline unsigned int __arch_hweight16(unsigned int w)
static __always_inline unsigned int __arch_hweight8(unsigned int w)
{
- if (IS_ENABLED(CONFIG_HAVE_MARCH_Z196_FEATURES))
+ if (__is_defined(MARCH_HAS_Z196_FEATURES))
return popcnt_z196((unsigned char)w);
return __sw_hweight8(w);
}
diff --git a/arch/s390/include/asm/atomic_ops.h b/arch/s390/include/asm/atomic_ops.h
index 742c7919cbcd..65380da9e75f 100644
--- a/arch/s390/include/asm/atomic_ops.h
+++ b/arch/s390/include/asm/atomic_ops.h
@@ -9,6 +9,7 @@
#define __ARCH_S390_ATOMIC_OPS__
#include <linux/limits.h>
+#include <asm/march.h>
static __always_inline int __atomic_read(const atomic_t *v)
{
@@ -56,7 +57,7 @@ static __always_inline void __atomic64_set(atomic64_t *v, s64 i)
}
}
-#ifdef CONFIG_HAVE_MARCH_Z196_FEATURES
+#ifdef MARCH_HAS_Z196_FEATURES
#define __ATOMIC_OP(op_name, op_type, op_string, op_barrier) \
static __always_inline op_type op_name(op_type val, op_type *ptr) \
@@ -107,7 +108,7 @@ __ATOMIC_CONST_OPS(__atomic64_add_const, long, "agsi")
#undef __ATOMIC_CONST_OPS
#undef __ATOMIC_CONST_OP
-#else /* CONFIG_HAVE_MARCH_Z196_FEATURES */
+#else /* MARCH_HAS_Z196_FEATURES */
#define __ATOMIC_OP(op_name, op_string) \
static __always_inline int op_name(int val, int *ptr) \
@@ -166,7 +167,7 @@ __ATOMIC64_OPS(__atomic64_xor, "xgr")
#define __atomic64_add_const(val, ptr) __atomic64_add(val, ptr)
#define __atomic64_add_const_barrier(val, ptr) __atomic64_add(val, ptr)
-#endif /* CONFIG_HAVE_MARCH_Z196_FEATURES */
+#endif /* MARCH_HAS_Z196_FEATURES */
static __always_inline int __atomic_cmpxchg(int *ptr, int old, int new)
{
diff --git a/arch/s390/include/asm/barrier.h b/arch/s390/include/asm/barrier.h
index 82de2a7c4160..d82130d7f2b6 100644
--- a/arch/s390/include/asm/barrier.h
+++ b/arch/s390/include/asm/barrier.h
@@ -8,13 +8,15 @@
#ifndef __ASM_BARRIER_H
#define __ASM_BARRIER_H
+#include <asm/march.h>
+
/*
* Force strict CPU ordering.
* And yes, this is required on UP too when we're talking
* to devices.
*/
-#ifdef CONFIG_HAVE_MARCH_Z196_FEATURES
+#ifdef MARCH_HAS_Z196_FEATURES
/* Fast-BCR without checkpoint synchronization */
#define __ASM_BCR_SERIALIZE "bcr 14,0\n"
#else
diff --git a/arch/s390/include/asm/cpacf.h b/arch/s390/include/asm/cpacf.h
index dae8843b164f..1d3a4b0c650f 100644
--- a/arch/s390/include/asm/cpacf.h
+++ b/arch/s390/include/asm/cpacf.h
@@ -54,6 +54,8 @@
#define CPACF_KM_XTS_256 0x34
#define CPACF_KM_PXTS_128 0x3a
#define CPACF_KM_PXTS_256 0x3c
+#define CPACF_KM_XTS_128_FULL 0x52
+#define CPACF_KM_XTS_256_FULL 0x54
/*
* Function codes for the KMC (CIPHER MESSAGE WITH CHAINING)
@@ -121,23 +123,31 @@
#define CPACF_KMAC_DEA 0x01
#define CPACF_KMAC_TDEA_128 0x02
#define CPACF_KMAC_TDEA_192 0x03
+#define CPACF_KMAC_HMAC_SHA_224 0x70
+#define CPACF_KMAC_HMAC_SHA_256 0x71
+#define CPACF_KMAC_HMAC_SHA_384 0x72
+#define CPACF_KMAC_HMAC_SHA_512 0x73
/*
* Function codes for the PCKMO (PERFORM CRYPTOGRAPHIC KEY MANAGEMENT)
* instruction
*/
-#define CPACF_PCKMO_QUERY 0x00
-#define CPACF_PCKMO_ENC_DES_KEY 0x01
-#define CPACF_PCKMO_ENC_TDES_128_KEY 0x02
-#define CPACF_PCKMO_ENC_TDES_192_KEY 0x03
-#define CPACF_PCKMO_ENC_AES_128_KEY 0x12
-#define CPACF_PCKMO_ENC_AES_192_KEY 0x13
-#define CPACF_PCKMO_ENC_AES_256_KEY 0x14
-#define CPACF_PCKMO_ENC_ECC_P256_KEY 0x20
-#define CPACF_PCKMO_ENC_ECC_P384_KEY 0x21
-#define CPACF_PCKMO_ENC_ECC_P521_KEY 0x22
-#define CPACF_PCKMO_ENC_ECC_ED25519_KEY 0x28
-#define CPACF_PCKMO_ENC_ECC_ED448_KEY 0x29
+#define CPACF_PCKMO_QUERY 0x00
+#define CPACF_PCKMO_ENC_DES_KEY 0x01
+#define CPACF_PCKMO_ENC_TDES_128_KEY 0x02
+#define CPACF_PCKMO_ENC_TDES_192_KEY 0x03
+#define CPACF_PCKMO_ENC_AES_128_KEY 0x12
+#define CPACF_PCKMO_ENC_AES_192_KEY 0x13
+#define CPACF_PCKMO_ENC_AES_256_KEY 0x14
+#define CPACF_PCKMO_ENC_AES_XTS_128_DOUBLE_KEY 0x15
+#define CPACF_PCKMO_ENC_AES_XTS_256_DOUBLE_KEY 0x16
+#define CPACF_PCKMO_ENC_ECC_P256_KEY 0x20
+#define CPACF_PCKMO_ENC_ECC_P384_KEY 0x21
+#define CPACF_PCKMO_ENC_ECC_P521_KEY 0x22
+#define CPACF_PCKMO_ENC_ECC_ED25519_KEY 0x28
+#define CPACF_PCKMO_ENC_ECC_ED448_KEY 0x29
+#define CPACF_PCKMO_ENC_HMAC_512_KEY 0x76
+#define CPACF_PCKMO_ENC_HMAC_1024_KEY 0x7a
/*
* Function codes for the PRNO (PERFORM RANDOM NUMBER OPERATION)
@@ -165,7 +175,40 @@
#define CPACF_KMA_LAAD 0x200 /* Last-AAD */
#define CPACF_KMA_HS 0x400 /* Hash-subkey Supplied */
+/*
+ * Flags for the KIMD/KLMD (COMPUTE INTERMEDIATE/LAST MESSAGE DIGEST)
+ * instructions
+ */
+#define CPACF_KIMD_NIP 0x8000
+#define CPACF_KLMD_DUFOP 0x4000
+#define CPACF_KLMD_NIP 0x8000
+
+/*
+ * Function codes for KDSA (COMPUTE DIGITAL SIGNATURE AUTHENTICATION)
+ * instruction
+ */
+#define CPACF_KDSA_QUERY 0x00
+#define CPACF_KDSA_ECDSA_VERIFY_P256 0x01
+#define CPACF_KDSA_ECDSA_VERIFY_P384 0x02
+#define CPACF_KDSA_ECDSA_VERIFY_P521 0x03
+#define CPACF_KDSA_ECDSA_SIGN_P256 0x09
+#define CPACF_KDSA_ECDSA_SIGN_P384 0x0a
+#define CPACF_KDSA_ECDSA_SIGN_P521 0x0b
+#define CPACF_KDSA_ENC_ECDSA_SIGN_P256 0x11
+#define CPACF_KDSA_ENC_ECDSA_SIGN_P384 0x12
+#define CPACF_KDSA_ENC_ECDSA_SIGN_P521 0x13
+#define CPACF_KDSA_EDDSA_VERIFY_ED25519 0x20
+#define CPACF_KDSA_EDDSA_VERIFY_ED448 0x24
+#define CPACF_KDSA_EDDSA_SIGN_ED25519 0x28
+#define CPACF_KDSA_EDDSA_SIGN_ED448 0x2c
+#define CPACF_KDSA_ENC_EDDSA_SIGN_ED25519 0x30
+#define CPACF_KDSA_ENC_EDDSA_SIGN_ED448 0x34
+
+#define CPACF_FC_QUERY 0x00
+#define CPACF_FC_QUERY_AUTH_INFO 0x7F
+
typedef struct { unsigned char bytes[16]; } cpacf_mask_t;
+typedef struct { unsigned char bytes[256]; } cpacf_qai_t;
/*
* Prototype for a not existing function to produce a link
@@ -175,80 +218,85 @@ typedef struct { unsigned char bytes[16]; } cpacf_mask_t;
void __cpacf_bad_opcode(void);
static __always_inline void __cpacf_query_rre(u32 opc, u8 r1, u8 r2,
- cpacf_mask_t *mask)
+ u8 *pb, u8 fc)
{
asm volatile(
- " la %%r1,%[mask]\n"
- " xgr %%r0,%%r0\n"
+ " la %%r1,%[pb]\n"
+ " lghi %%r0,%[fc]\n"
" .insn rre,%[opc] << 16,%[r1],%[r2]\n"
- : [mask] "=R" (*mask)
- : [opc] "i" (opc),
+ : [pb] "=R" (*pb)
+ : [opc] "i" (opc), [fc] "i" (fc),
[r1] "i" (r1), [r2] "i" (r2)
- : "cc", "r0", "r1");
+ : "cc", "memory", "r0", "r1");
}
-static __always_inline void __cpacf_query_rrf(u32 opc,
- u8 r1, u8 r2, u8 r3, u8 m4,
- cpacf_mask_t *mask)
+static __always_inline void __cpacf_query_rrf(u32 opc, u8 r1, u8 r2, u8 r3,
+ u8 m4, u8 *pb, u8 fc)
{
asm volatile(
- " la %%r1,%[mask]\n"
- " xgr %%r0,%%r0\n"
+ " la %%r1,%[pb]\n"
+ " lghi %%r0,%[fc]\n"
" .insn rrf,%[opc] << 16,%[r1],%[r2],%[r3],%[m4]\n"
- : [mask] "=R" (*mask)
- : [opc] "i" (opc), [r1] "i" (r1), [r2] "i" (r2),
- [r3] "i" (r3), [m4] "i" (m4)
- : "cc", "r0", "r1");
+ : [pb] "=R" (*pb)
+ : [opc] "i" (opc), [fc] "i" (fc), [r1] "i" (r1),
+ [r2] "i" (r2), [r3] "i" (r3), [m4] "i" (m4)
+ : "cc", "memory", "r0", "r1");
}
-static __always_inline void __cpacf_query(unsigned int opcode,
- cpacf_mask_t *mask)
+static __always_inline void __cpacf_query_insn(unsigned int opcode, void *pb,
+ u8 fc)
{
switch (opcode) {
case CPACF_KDSA:
- __cpacf_query_rre(CPACF_KDSA, 0, 2, mask);
+ __cpacf_query_rre(CPACF_KDSA, 0, 2, pb, fc);
break;
case CPACF_KIMD:
- __cpacf_query_rre(CPACF_KIMD, 0, 2, mask);
+ __cpacf_query_rre(CPACF_KIMD, 0, 2, pb, fc);
break;
case CPACF_KLMD:
- __cpacf_query_rre(CPACF_KLMD, 0, 2, mask);
+ __cpacf_query_rre(CPACF_KLMD, 0, 2, pb, fc);
break;
case CPACF_KM:
- __cpacf_query_rre(CPACF_KM, 2, 4, mask);
+ __cpacf_query_rre(CPACF_KM, 2, 4, pb, fc);
break;
case CPACF_KMA:
- __cpacf_query_rrf(CPACF_KMA, 2, 4, 6, 0, mask);
+ __cpacf_query_rrf(CPACF_KMA, 2, 4, 6, 0, pb, fc);
break;
case CPACF_KMAC:
- __cpacf_query_rre(CPACF_KMAC, 0, 2, mask);
+ __cpacf_query_rre(CPACF_KMAC, 0, 2, pb, fc);
break;
case CPACF_KMC:
- __cpacf_query_rre(CPACF_KMC, 2, 4, mask);
+ __cpacf_query_rre(CPACF_KMC, 2, 4, pb, fc);
break;
case CPACF_KMCTR:
- __cpacf_query_rrf(CPACF_KMCTR, 2, 4, 6, 0, mask);
+ __cpacf_query_rrf(CPACF_KMCTR, 2, 4, 6, 0, pb, fc);
break;
case CPACF_KMF:
- __cpacf_query_rre(CPACF_KMF, 2, 4, mask);
+ __cpacf_query_rre(CPACF_KMF, 2, 4, pb, fc);
break;
case CPACF_KMO:
- __cpacf_query_rre(CPACF_KMO, 2, 4, mask);
+ __cpacf_query_rre(CPACF_KMO, 2, 4, pb, fc);
break;
case CPACF_PCC:
- __cpacf_query_rre(CPACF_PCC, 0, 0, mask);
+ __cpacf_query_rre(CPACF_PCC, 0, 0, pb, fc);
break;
case CPACF_PCKMO:
- __cpacf_query_rre(CPACF_PCKMO, 0, 0, mask);
+ __cpacf_query_rre(CPACF_PCKMO, 0, 0, pb, fc);
break;
case CPACF_PRNO:
- __cpacf_query_rre(CPACF_PRNO, 2, 4, mask);
+ __cpacf_query_rre(CPACF_PRNO, 2, 4, pb, fc);
break;
default:
__cpacf_bad_opcode();
}
}
+static __always_inline void __cpacf_query(unsigned int opcode,
+ cpacf_mask_t *mask)
+{
+ __cpacf_query_insn(opcode, mask, CPACF_FC_QUERY);
+}
+
static __always_inline int __cpacf_check_opcode(unsigned int opcode)
{
switch (opcode) {
@@ -269,6 +317,8 @@ static __always_inline int __cpacf_check_opcode(unsigned int opcode)
return test_facility(57); /* check for MSA5 */
case CPACF_KMA:
return test_facility(146); /* check for MSA8 */
+ case CPACF_KDSA:
+ return test_facility(155); /* check for MSA9 */
default:
__cpacf_bad_opcode();
return 0;
@@ -276,14 +326,15 @@ static __always_inline int __cpacf_check_opcode(unsigned int opcode)
}
/**
- * cpacf_query() - check if a specific CPACF function is available
+ * cpacf_query() - Query the function code mask for this CPACF opcode
* @opcode: the opcode of the crypto instruction
- * @func: the function code to test for
+ * @mask: ptr to struct cpacf_mask_t
*
* Executes the query function for the given crypto instruction @opcode
* and checks if @func is available
*
- * Returns 1 if @func is available for @opcode, 0 otherwise
+ * On success 1 is returned and the mask is filled with the function
+ * code mask for this CPACF opcode, otherwise 0 is returned.
*/
static __always_inline int cpacf_query(unsigned int opcode, cpacf_mask_t *mask)
{
@@ -300,7 +351,8 @@ static inline int cpacf_test_func(cpacf_mask_t *mask, unsigned int func)
return (mask->bytes[func >> 3] & (0x80 >> (func & 7))) != 0;
}
-static __always_inline int cpacf_query_func(unsigned int opcode, unsigned int func)
+static __always_inline int cpacf_query_func(unsigned int opcode,
+ unsigned int func)
{
cpacf_mask_t mask;
@@ -309,6 +361,32 @@ static __always_inline int cpacf_query_func(unsigned int opcode, unsigned int fu
return 0;
}
+static __always_inline void __cpacf_qai(unsigned int opcode, cpacf_qai_t *qai)
+{
+ __cpacf_query_insn(opcode, qai, CPACF_FC_QUERY_AUTH_INFO);
+}
+
+/**
+ * cpacf_qai() - Get the query authentication information for a CPACF opcode
+ * @opcode: the opcode of the crypto instruction
+ * @mask: ptr to struct cpacf_qai_t
+ *
+ * Executes the query authentication information function for the given crypto
+ * instruction @opcode and checks if @func is available
+ *
+ * On success 1 is returned and the mask is filled with the query authentication
+ * information for this CPACF opcode, otherwise 0 is returned.
+ */
+static __always_inline int cpacf_qai(unsigned int opcode, cpacf_qai_t *qai)
+{
+ if (cpacf_query_func(opcode, CPACF_FC_QUERY_AUTH_INFO)) {
+ __cpacf_qai(opcode, qai);
+ return 1;
+ }
+ memset(qai, 0, sizeof(*qai));
+ return 0;
+}
+
/**
* cpacf_km() - executes the KM (CIPHER MESSAGE) instruction
* @func: the function code passed to KM; see CPACF_KM_xxx defines
@@ -391,7 +469,7 @@ static inline void cpacf_kimd(unsigned long func, void *param,
asm volatile(
" lgr 0,%[fc]\n"
" lgr 1,%[pba]\n"
- "0: .insn rre,%[opc] << 16,0,%[src]\n"
+ "0: .insn rrf,%[opc] << 16,0,%[src],8,0\n"
" brc 1,0b\n" /* handle partial completion */
: [src] "+&d" (s.pair)
: [fc] "d" (func), [pba] "d" ((unsigned long)(param)),
@@ -416,7 +494,7 @@ static inline void cpacf_klmd(unsigned long func, void *param,
asm volatile(
" lgr 0,%[fc]\n"
" lgr 1,%[pba]\n"
- "0: .insn rre,%[opc] << 16,0,%[src]\n"
+ "0: .insn rrf,%[opc] << 16,0,%[src],8,0\n"
" brc 1,0b\n" /* handle partial completion */
: [src] "+&d" (s.pair)
: [fc] "d" (func), [pba] "d" ((unsigned long)param),
@@ -425,29 +503,30 @@ static inline void cpacf_klmd(unsigned long func, void *param,
}
/**
- * cpacf_kmac() - executes the KMAC (COMPUTE MESSAGE AUTHENTICATION CODE)
- * instruction
- * @func: the function code passed to KM; see CPACF_KMAC_xxx defines
+ * _cpacf_kmac() - executes the KMAC (COMPUTE MESSAGE AUTHENTICATION CODE)
+ * instruction and updates flags in gr0
+ * @gr0: pointer to gr0 (fc and flags) passed to KMAC; see CPACF_KMAC_xxx defines
* @param: address of parameter block; see POP for details on each func
* @src: address of source memory area
* @src_len: length of src operand in bytes
*
* Returns 0 for the query func, number of processed bytes for digest funcs
*/
-static inline int cpacf_kmac(unsigned long func, void *param,
- const u8 *src, long src_len)
+static inline int _cpacf_kmac(unsigned long *gr0, void *param,
+ const u8 *src, long src_len)
{
union register_pair s;
s.even = (unsigned long)src;
s.odd = (unsigned long)src_len;
asm volatile(
- " lgr 0,%[fc]\n"
+ " lgr 0,%[r0]\n"
" lgr 1,%[pba]\n"
"0: .insn rre,%[opc] << 16,0,%[src]\n"
" brc 1,0b\n" /* handle partial completion */
- : [src] "+&d" (s.pair)
- : [fc] "d" (func), [pba] "d" ((unsigned long)param),
+ " lgr %[r0],0\n"
+ : [r0] "+d" (*gr0), [src] "+&d" (s.pair)
+ : [pba] "d" ((unsigned long)param),
[opc] "i" (CPACF_KMAC)
: "cc", "memory", "0", "1");
@@ -455,6 +534,22 @@ static inline int cpacf_kmac(unsigned long func, void *param,
}
/**
+ * cpacf_kmac() - executes the KMAC (COMPUTE MESSAGE AUTHENTICATION CODE)
+ * instruction
+ * @func: function code passed to KMAC; see CPACF_KMAC_xxx defines
+ * @param: address of parameter block; see POP for details on each func
+ * @src: address of source memory area
+ * @src_len: length of src operand in bytes
+ *
+ * Returns 0 for the query func, number of processed bytes for digest funcs
+ */
+static inline int cpacf_kmac(unsigned long func, void *param,
+ const u8 *src, long src_len)
+{
+ return _cpacf_kmac(&func, param, src, src_len);
+}
+
+/**
* cpacf_kmctr() - executes the KMCTR (CIPHER MESSAGE WITH COUNTER) instruction
* @func: the function code passed to KMCTR; see CPACF_KMCTR_xxx defines
* @param: address of parameter block; see POP for details on each func
diff --git a/arch/s390/include/asm/ctlreg.h b/arch/s390/include/asm/ctlreg.h
index 72a9556d04f3..e6527f51ad0b 100644
--- a/arch/s390/include/asm/ctlreg.h
+++ b/arch/s390/include/asm/ctlreg.h
@@ -202,8 +202,9 @@ union ctlreg0 {
unsigned long : 3;
unsigned long ccc : 1; /* Cryptography counter control */
unsigned long pec : 1; /* PAI extension control */
- unsigned long : 17;
- unsigned long : 3;
+ unsigned long : 15;
+ unsigned long wti : 1; /* Warning-track */
+ unsigned long : 4;
unsigned long lap : 1; /* Low-address-protection control */
unsigned long : 4;
unsigned long edat : 1; /* Enhanced-DAT-enablement control */
diff --git a/arch/s390/include/asm/diag.h b/arch/s390/include/asm/diag.h
index c0d43512f4fc..e1316e181230 100644
--- a/arch/s390/include/asm/diag.h
+++ b/arch/s390/include/asm/diag.h
@@ -38,6 +38,7 @@ enum diag_stat_enum {
DIAG_STAT_X308,
DIAG_STAT_X318,
DIAG_STAT_X320,
+ DIAG_STAT_X49C,
DIAG_STAT_X500,
NR_DIAG_STAT
};
@@ -363,4 +364,12 @@ void _diag0c_amode31(unsigned long rx);
void _diag308_reset_amode31(void);
int _diag8c_amode31(struct diag8c *addr, struct ccw_dev_id *devno, size_t len);
+/* diag 49c subcodes */
+enum diag49c_sc {
+ DIAG49C_SUBC_ACK = 0,
+ DIAG49C_SUBC_REG = 1
+};
+
+int diag49c(unsigned long subcode);
+
#endif /* _ASM_S390_DIAG_H */
diff --git a/arch/s390/include/asm/ftrace.h b/arch/s390/include/asm/ftrace.h
index fbadca645af7..406746666eb7 100644
--- a/arch/s390/include/asm/ftrace.h
+++ b/arch/s390/include/asm/ftrace.h
@@ -6,8 +6,23 @@
#define MCOUNT_INSN_SIZE 6
#ifndef __ASSEMBLY__
+#include <asm/stacktrace.h>
-unsigned long return_address(unsigned int n);
+static __always_inline unsigned long return_address(unsigned int n)
+{
+ struct stack_frame *sf;
+
+ if (!n)
+ return (unsigned long)__builtin_return_address(0);
+
+ sf = (struct stack_frame *)current_frame_address();
+ do {
+ sf = (struct stack_frame *)sf->back_chain;
+ if (!sf)
+ return 0;
+ } while (--n);
+ return sf->gprs[8];
+}
#define ftrace_return_address(n) return_address(n)
void ftrace_caller(void);
diff --git a/arch/s390/include/asm/hiperdispatch.h b/arch/s390/include/asm/hiperdispatch.h
new file mode 100644
index 000000000000..27e23aa27a24
--- /dev/null
+++ b/arch/s390/include/asm/hiperdispatch.h
@@ -0,0 +1,14 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright IBM Corp. 2024
+ */
+
+#ifndef _ASM_HIPERDISPATCH_H
+#define _ASM_HIPERDISPATCH_H
+
+void hd_reset_state(void);
+void hd_add_core(int cpu);
+void hd_disable_hiperdispatch(void);
+int hd_enable_hiperdispatch(void);
+
+#endif /* _ASM_HIPERDISPATCH_H */
diff --git a/arch/s390/include/asm/irq.h b/arch/s390/include/asm/irq.h
index 54b42817f70a..d9e705f4a697 100644
--- a/arch/s390/include/asm/irq.h
+++ b/arch/s390/include/asm/irq.h
@@ -47,6 +47,7 @@ enum interruption_class {
IRQEXT_CMS,
IRQEXT_CMC,
IRQEXT_FTP,
+ IRQEXT_WTI,
IRQIO_CIO,
IRQIO_DAS,
IRQIO_C15,
@@ -99,6 +100,7 @@ int unregister_external_irq(u16 code, ext_int_handler_t handler);
enum irq_subclass {
IRQ_SUBCLASS_MEASUREMENT_ALERT = 5,
IRQ_SUBCLASS_SERVICE_SIGNAL = 9,
+ IRQ_SUBCLASS_WARNING_TRACK = 33,
};
#define CR0_IRQ_SUBCLASS_MASK \
diff --git a/arch/s390/include/asm/lowcore.h b/arch/s390/include/asm/lowcore.h
index 183ac29afaf8..48c64716d1f2 100644
--- a/arch/s390/include/asm/lowcore.h
+++ b/arch/s390/include/asm/lowcore.h
@@ -98,8 +98,8 @@ struct lowcore {
psw_t io_new_psw; /* 0x01f0 */
/* Save areas. */
- __u64 save_area_sync[8]; /* 0x0200 */
- __u64 save_area_async[8]; /* 0x0240 */
+ __u64 save_area[8]; /* 0x0200 */
+ __u8 pad_0x0240[0x0280-0x0240]; /* 0x0240 */
__u64 save_area_restart[1]; /* 0x0280 */
__u64 pcpu; /* 0x0288 */
diff --git a/arch/s390/include/asm/march.h b/arch/s390/include/asm/march.h
new file mode 100644
index 000000000000..fd9eef3be44c
--- /dev/null
+++ b/arch/s390/include/asm/march.h
@@ -0,0 +1,38 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+
+#ifndef __ASM_S390_MARCH_H
+#define __ASM_S390_MARCH_H
+
+#include <linux/kconfig.h>
+
+#define MARCH_HAS_Z10_FEATURES 1
+
+#ifndef __DECOMPRESSOR
+
+#ifdef CONFIG_HAVE_MARCH_Z196_FEATURES
+#define MARCH_HAS_Z196_FEATURES 1
+#endif
+
+#ifdef CONFIG_HAVE_MARCH_ZEC12_FEATURES
+#define MARCH_HAS_ZEC12_FEATURES 1
+#endif
+
+#ifdef CONFIG_HAVE_MARCH_Z13_FEATURES
+#define MARCH_HAS_Z13_FEATURES 1
+#endif
+
+#ifdef CONFIG_HAVE_MARCH_Z14_FEATURES
+#define MARCH_HAS_Z14_FEATURES 1
+#endif
+
+#ifdef CONFIG_HAVE_MARCH_Z15_FEATURES
+#define MARCH_HAS_Z15_FEATURES 1
+#endif
+
+#ifdef CONFIG_HAVE_MARCH_Z16_FEATURES
+#define MARCH_HAS_Z16_FEATURES 1
+#endif
+
+#endif /* __DECOMPRESSOR */
+
+#endif /* __ASM_S390_MARCH_H */
diff --git a/arch/s390/include/asm/mmzone.h b/arch/s390/include/asm/mmzone.h
deleted file mode 100644
index 73e3e7c6976c..000000000000
--- a/arch/s390/include/asm/mmzone.h
+++ /dev/null
@@ -1,17 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-/*
- * NUMA support for s390
- *
- * Copyright IBM Corp. 2015
- */
-
-#ifndef _ASM_S390_MMZONE_H
-#define _ASM_S390_MMZONE_H
-
-#ifdef CONFIG_NUMA
-
-extern struct pglist_data *node_data[];
-#define NODE_DATA(nid) (node_data[nid])
-
-#endif /* CONFIG_NUMA */
-#endif /* _ASM_S390_MMZONE_H */
diff --git a/arch/s390/include/asm/page.h b/arch/s390/include/asm/page.h
index 16e4caa931f1..73e1e03317b4 100644
--- a/arch/s390/include/asm/page.h
+++ b/arch/s390/include/asm/page.h
@@ -176,8 +176,6 @@ static inline int devmem_is_allowed(unsigned long pfn)
int arch_make_folio_accessible(struct folio *folio);
#define HAVE_ARCH_MAKE_FOLIO_ACCESSIBLE
-int arch_make_page_accessible(struct page *page);
-#define HAVE_ARCH_MAKE_PAGE_ACCESSIBLE
struct vm_layout {
unsigned long kaslr_offset;
diff --git a/arch/s390/include/asm/pci.h b/arch/s390/include/asm/pci.h
index 30820a649e6e..9d920ced6047 100644
--- a/arch/s390/include/asm/pci.h
+++ b/arch/s390/include/asm/pci.h
@@ -191,7 +191,14 @@ static inline bool zdev_enabled(struct zpci_dev *zdev)
return (zdev->fh & (1UL << 31)) ? true : false;
}
-extern const struct attribute_group *zpci_attr_groups[];
+extern const struct attribute_group zpci_attr_group;
+extern const struct attribute_group pfip_attr_group;
+extern const struct attribute_group zpci_ident_attr_group;
+
+#define ARCH_PCI_DEV_GROUPS &zpci_attr_group, \
+ &pfip_attr_group, \
+ &zpci_ident_attr_group,
+
extern unsigned int s390_pci_force_floating __initdata;
extern unsigned int s390_pci_no_rid;
diff --git a/arch/s390/include/asm/percpu.h b/arch/s390/include/asm/percpu.h
index 89a28740b6ab..84f6b8357b45 100644
--- a/arch/s390/include/asm/percpu.h
+++ b/arch/s390/include/asm/percpu.h
@@ -4,6 +4,7 @@
#include <linux/preempt.h>
#include <asm/cmpxchg.h>
+#include <asm/march.h>
/*
* s390 uses its own implementation for per cpu data, the offset of
@@ -50,7 +51,7 @@
#define this_cpu_or_1(pcp, val) arch_this_cpu_to_op_simple(pcp, val, |)
#define this_cpu_or_2(pcp, val) arch_this_cpu_to_op_simple(pcp, val, |)
-#ifndef CONFIG_HAVE_MARCH_Z196_FEATURES
+#ifndef MARCH_HAS_Z196_FEATURES
#define this_cpu_add_4(pcp, val) arch_this_cpu_to_op_simple(pcp, val, +)
#define this_cpu_add_8(pcp, val) arch_this_cpu_to_op_simple(pcp, val, +)
@@ -61,7 +62,7 @@
#define this_cpu_or_4(pcp, val) arch_this_cpu_to_op_simple(pcp, val, |)
#define this_cpu_or_8(pcp, val) arch_this_cpu_to_op_simple(pcp, val, |)
-#else /* CONFIG_HAVE_MARCH_Z196_FEATURES */
+#else /* MARCH_HAS_Z196_FEATURES */
#define arch_this_cpu_add(pcp, val, op1, op2, szcast) \
{ \
@@ -129,7 +130,7 @@
#define this_cpu_or_4(pcp, val) arch_this_cpu_to_op(pcp, val, "lao")
#define this_cpu_or_8(pcp, val) arch_this_cpu_to_op(pcp, val, "laog")
-#endif /* CONFIG_HAVE_MARCH_Z196_FEATURES */
+#endif /* MARCH_HAS_Z196_FEATURES */
#define arch_this_cpu_cmpxchg(pcp, oval, nval) \
({ \
diff --git a/arch/s390/include/asm/perf_event.h b/arch/s390/include/asm/perf_event.h
index 9917e2717b2b..66200d4a2134 100644
--- a/arch/s390/include/asm/perf_event.h
+++ b/arch/s390/include/asm/perf_event.h
@@ -48,30 +48,6 @@ struct perf_sf_sde_regs {
unsigned long reserved:63; /* reserved */
};
-/* Perf PMU definitions for the counter facility */
-#define PERF_CPUM_CF_MAX_CTR 0xffffUL /* Max ctr for ECCTR */
-
-/* Perf PMU definitions for the sampling facility */
-#define PERF_CPUM_SF_MAX_CTR 2
-#define PERF_EVENT_CPUM_SF 0xB0000UL /* Event: Basic-sampling */
-#define PERF_EVENT_CPUM_SF_DIAG 0xBD000UL /* Event: Combined-sampling */
-#define PERF_EVENT_CPUM_CF_DIAG 0xBC000UL /* Event: Counter sets */
-#define PERF_CPUM_SF_BASIC_MODE 0x0001 /* Basic-sampling flag */
-#define PERF_CPUM_SF_DIAG_MODE 0x0002 /* Diagnostic-sampling flag */
-#define PERF_CPUM_SF_MODE_MASK (PERF_CPUM_SF_BASIC_MODE| \
- PERF_CPUM_SF_DIAG_MODE)
-#define PERF_CPUM_SF_FREQ_MODE 0x0008 /* Sampling with frequency */
-
-#define REG_NONE 0
-#define REG_OVERFLOW 1
-#define OVERFLOW_REG(hwc) ((hwc)->extra_reg.config)
-#define SFB_ALLOC_REG(hwc) ((hwc)->extra_reg.alloc)
-#define TEAR_REG(hwc) ((hwc)->last_tag)
-#define SAMPL_RATE(hwc) ((hwc)->event_base)
-#define SAMPL_FLAGS(hwc) ((hwc)->config_base)
-#define SAMPL_DIAG_MODE(hwc) (SAMPL_FLAGS(hwc) & PERF_CPUM_SF_DIAG_MODE)
-#define SAMPLE_FREQ_MODE(hwc) (SAMPL_FLAGS(hwc) & PERF_CPUM_SF_FREQ_MODE)
-
#define perf_arch_fetch_caller_regs(regs, __ip) do { \
(regs)->psw.addr = (__ip); \
(regs)->gprs[15] = (unsigned long)__builtin_frame_address(0) - \
diff --git a/arch/s390/include/asm/pgtable.h b/arch/s390/include/asm/pgtable.h
index 3fa280d0672a..0ffbaf741955 100644
--- a/arch/s390/include/asm/pgtable.h
+++ b/arch/s390/include/asm/pgtable.h
@@ -955,6 +955,7 @@ static inline int pte_unused(pte_t pte)
* young/old accounting is not supported, i.e _PAGE_PROTECT and _PAGE_INVALID
* must not be set.
*/
+#define pte_pgprot pte_pgprot
static inline pgprot_t pte_pgprot(pte_t pte)
{
unsigned long pte_flags = pte_val(pte) & _PAGE_CHG_MASK;
diff --git a/arch/s390/include/asm/pkey.h b/arch/s390/include/asm/pkey.h
index 47d80a7451a6..5dca1a46a9f6 100644
--- a/arch/s390/include/asm/pkey.h
+++ b/arch/s390/include/asm/pkey.h
@@ -22,7 +22,7 @@
* @param protkey pointer to buffer receiving the protected key
* @return 0 on success, negative errno value on failure
*/
-int pkey_keyblob2pkey(const u8 *key, u32 keylen,
- u8 *protkey, u32 *protkeylen, u32 *protkeytype);
+int pkey_key2protkey(const u8 *key, u32 keylen,
+ u8 *protkey, u32 *protkeylen, u32 *protkeytype);
#endif /* _KAPI_PKEY_H */
diff --git a/arch/s390/include/asm/preempt.h b/arch/s390/include/asm/preempt.h
index 3ae5f31c665d..deca3f221836 100644
--- a/arch/s390/include/asm/preempt.h
+++ b/arch/s390/include/asm/preempt.h
@@ -5,8 +5,9 @@
#include <asm/current.h>
#include <linux/thread_info.h>
#include <asm/atomic_ops.h>
+#include <asm/march.h>
-#ifdef CONFIG_HAVE_MARCH_Z196_FEATURES
+#ifdef MARCH_HAS_Z196_FEATURES
/* We use the MSB mostly because its available */
#define PREEMPT_NEED_RESCHED 0x80000000
@@ -75,7 +76,7 @@ static __always_inline bool should_resched(int preempt_offset)
preempt_offset);
}
-#else /* CONFIG_HAVE_MARCH_Z196_FEATURES */
+#else /* MARCH_HAS_Z196_FEATURES */
#define PREEMPT_ENABLED (0)
@@ -123,7 +124,7 @@ static __always_inline bool should_resched(int preempt_offset)
tif_need_resched());
}
-#endif /* CONFIG_HAVE_MARCH_Z196_FEATURES */
+#endif /* MARCH_HAS_Z196_FEATURES */
#define init_task_preempt_count(p) do { } while (0)
/* Deferred to CPU bringup time */
diff --git a/arch/s390/include/asm/processor.h b/arch/s390/include/asm/processor.h
index 5ecd442535b9..9a5236acc0a8 100644
--- a/arch/s390/include/asm/processor.h
+++ b/arch/s390/include/asm/processor.h
@@ -44,6 +44,7 @@ struct pcpu {
unsigned long ec_mask; /* bit mask for ec_xxx functions */
unsigned long ec_clk; /* sigp timestamp for ec_xxx */
unsigned long flags; /* per CPU flags */
+ unsigned long capacity; /* cpu capacity for scheduler */
signed char state; /* physical cpu state */
signed char polarization; /* physical polarization */
u16 address; /* physical cpu address */
diff --git a/arch/s390/include/asm/sclp.h b/arch/s390/include/asm/sclp.h
index da3dad18fe50..eb00fa1771da 100644
--- a/arch/s390/include/asm/sclp.h
+++ b/arch/s390/include/asm/sclp.h
@@ -72,6 +72,7 @@ struct sclp_info {
unsigned char has_core_type : 1;
unsigned char has_sprp : 1;
unsigned char has_hvs : 1;
+ unsigned char has_wti : 1;
unsigned char has_esca : 1;
unsigned char has_sief2 : 1;
unsigned char has_64bscao : 1;
diff --git a/arch/s390/include/asm/setup.h b/arch/s390/include/asm/setup.h
index 8505737712ee..70b920b32827 100644
--- a/arch/s390/include/asm/setup.h
+++ b/arch/s390/include/asm/setup.h
@@ -34,6 +34,7 @@
#define MACHINE_FLAG_SCC BIT(17)
#define MACHINE_FLAG_PCI_MIO BIT(18)
#define MACHINE_FLAG_RDP BIT(19)
+#define MACHINE_FLAG_SEQ_INSN BIT(20)
#define LPP_MAGIC BIT(31)
#define LPP_PID_MASK _AC(0xffffffff, UL)
@@ -95,6 +96,7 @@ extern unsigned long mio_wb_bit_mask;
#define MACHINE_HAS_SCC (get_lowcore()->machine_flags & MACHINE_FLAG_SCC)
#define MACHINE_HAS_PCI_MIO (get_lowcore()->machine_flags & MACHINE_FLAG_PCI_MIO)
#define MACHINE_HAS_RDP (get_lowcore()->machine_flags & MACHINE_FLAG_RDP)
+#define MACHINE_HAS_SEQ_INSN (get_lowcore()->machine_flags & MACHINE_FLAG_SEQ_INSN)
/*
* Console mode. Override with conmode=
@@ -115,6 +117,8 @@ extern unsigned int console_irq;
#define SET_CONSOLE_VT220 do { console_mode = 4; } while (0)
#define SET_CONSOLE_HVC do { console_mode = 5; } while (0)
+void register_early_console(void);
+
#ifdef CONFIG_VMCP
void vmcp_cma_reserve(void);
#else
diff --git a/arch/s390/include/asm/smp.h b/arch/s390/include/asm/smp.h
index cd835f4fb11a..7feca96c48c6 100644
--- a/arch/s390/include/asm/smp.h
+++ b/arch/s390/include/asm/smp.h
@@ -12,6 +12,7 @@
#include <asm/processor.h>
#define raw_smp_processor_id() (get_lowcore()->cpu_nr)
+#define arch_scale_cpu_capacity smp_cpu_get_capacity
extern struct mutex smp_cpu_state_mutex;
extern unsigned int smp_cpu_mt_shift;
@@ -34,6 +35,9 @@ extern void smp_save_dump_secondary_cpus(void);
extern void smp_yield_cpu(int cpu);
extern void smp_cpu_set_polarization(int cpu, int val);
extern int smp_cpu_get_polarization(int cpu);
+extern void smp_cpu_set_capacity(int cpu, unsigned long val);
+extern void smp_set_core_capacity(int cpu, unsigned long val);
+extern unsigned long smp_cpu_get_capacity(int cpu);
extern int smp_cpu_get_cpu_address(int cpu);
extern void smp_fill_possible_mask(void);
extern void smp_detect_cpus(void);
diff --git a/arch/s390/include/asm/topology.h b/arch/s390/include/asm/topology.h
index 3a0ac0c7a9a3..cef06bffad80 100644
--- a/arch/s390/include/asm/topology.h
+++ b/arch/s390/include/asm/topology.h
@@ -67,6 +67,9 @@ static inline void topology_expect_change(void) { }
#define POLARIZATION_VM (2)
#define POLARIZATION_VH (3)
+#define CPU_CAPACITY_HIGH SCHED_CAPACITY_SCALE
+#define CPU_CAPACITY_LOW (SCHED_CAPACITY_SCALE >> 3)
+
#define SD_BOOK_INIT SD_CPU_INIT
#ifdef CONFIG_NUMA
diff --git a/arch/s390/include/asm/trace/hiperdispatch.h b/arch/s390/include/asm/trace/hiperdispatch.h
new file mode 100644
index 000000000000..46462ee645b0
--- /dev/null
+++ b/arch/s390/include/asm/trace/hiperdispatch.h
@@ -0,0 +1,58 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Tracepoint header for hiperdispatch
+ *
+ * Copyright IBM Corp. 2024
+ */
+
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM s390
+
+#if !defined(_TRACE_S390_HIPERDISPATCH_H) || defined(TRACE_HEADER_MULTI_READ)
+#define _TRACE_S390_HIPERDISPATCH_H
+
+#include <linux/tracepoint.h>
+
+#undef TRACE_INCLUDE_PATH
+#undef TRACE_INCLUDE_FILE
+
+#define TRACE_INCLUDE_PATH asm/trace
+#define TRACE_INCLUDE_FILE hiperdispatch
+
+TRACE_EVENT(s390_hd_work_fn,
+ TP_PROTO(int steal_time_percentage,
+ int entitled_core_count,
+ int highcap_core_count),
+ TP_ARGS(steal_time_percentage,
+ entitled_core_count,
+ highcap_core_count),
+ TP_STRUCT__entry(__field(int, steal_time_percentage)
+ __field(int, entitled_core_count)
+ __field(int, highcap_core_count)),
+ TP_fast_assign(__entry->steal_time_percentage = steal_time_percentage;
+ __entry->entitled_core_count = entitled_core_count;
+ __entry->highcap_core_count = highcap_core_count;),
+ TP_printk("steal: %d entitled_core_count: %d highcap_core_count: %d",
+ __entry->steal_time_percentage,
+ __entry->entitled_core_count,
+ __entry->highcap_core_count)
+);
+
+TRACE_EVENT(s390_hd_rebuild_domains,
+ TP_PROTO(int current_highcap_core_count,
+ int new_highcap_core_count),
+ TP_ARGS(current_highcap_core_count,
+ new_highcap_core_count),
+ TP_STRUCT__entry(__field(int, current_highcap_core_count)
+ __field(int, new_highcap_core_count)),
+ TP_fast_assign(__entry->current_highcap_core_count = current_highcap_core_count;
+ __entry->new_highcap_core_count = new_highcap_core_count),
+ TP_printk("change highcap_core_count: %u -> %u",
+ __entry->current_highcap_core_count,
+ __entry->new_highcap_core_count)
+);
+
+#endif /* _TRACE_S390_HIPERDISPATCH_H */
+
+/* This part must be outside protection */
+#include <trace/define_trace.h>
diff --git a/arch/s390/include/uapi/asm/pkey.h b/arch/s390/include/uapi/asm/pkey.h
index 5ad76471e73f..60431d00e6bd 100644
--- a/arch/s390/include/uapi/asm/pkey.h
+++ b/arch/s390/include/uapi/asm/pkey.h
@@ -41,6 +41,10 @@
#define PKEY_KEYTYPE_ECC_P521 7
#define PKEY_KEYTYPE_ECC_ED25519 8
#define PKEY_KEYTYPE_ECC_ED448 9
+#define PKEY_KEYTYPE_AES_XTS_128 10
+#define PKEY_KEYTYPE_AES_XTS_256 11
+#define PKEY_KEYTYPE_HMAC_512 12
+#define PKEY_KEYTYPE_HMAC_1024 13
/* the newer ioctls use a pkey_key_type enum for type information */
enum pkey_key_type {
@@ -50,6 +54,7 @@ enum pkey_key_type {
PKEY_TYPE_CCA_ECC = (__u32) 0x1f,
PKEY_TYPE_EP11_AES = (__u32) 6,
PKEY_TYPE_EP11_ECC = (__u32) 7,
+ PKEY_TYPE_PROTKEY = (__u32) 8,
};
/* the newer ioctls use a pkey_key_size enum for key size information */
diff --git a/arch/s390/kernel/Makefile b/arch/s390/kernel/Makefile
index e47a4be54ff8..48caae8c7e10 100644
--- a/arch/s390/kernel/Makefile
+++ b/arch/s390/kernel/Makefile
@@ -36,22 +36,23 @@ CFLAGS_stacktrace.o += -fno-optimize-sibling-calls
CFLAGS_dumpstack.o += -fno-optimize-sibling-calls
CFLAGS_unwind_bc.o += -fno-optimize-sibling-calls
-obj-y := head64.o traps.o time.o process.o earlypgm.o early.o setup.o idle.o vtime.o
+obj-y := head64.o traps.o time.o process.o early.o setup.o idle.o vtime.o
obj-y += processor.o syscall.o ptrace.o signal.o cpcmd.o ebcdic.o nmi.o
obj-y += debug.o irq.o ipl.o dis.o diag.o vdso.o cpufeature.o
obj-y += sysinfo.o lgr.o os_info.o ctlreg.o
obj-y += runtime_instr.o cache.o fpu.o dumpstack.o guarded_storage.o sthyi.o
obj-y += entry.o reipl.o kdebugfs.o alternative.o
obj-y += nospec-branch.o ipl_vmparm.o machine_kexec_reloc.o unwind_bc.o
-obj-y += smp.o text_amode31.o stacktrace.o abs_lowcore.o facility.o uv.o
+obj-y += smp.o text_amode31.o stacktrace.o abs_lowcore.o facility.o uv.o wti.o
extra-y += vmlinux.lds
obj-$(CONFIG_SYSFS) += nospec-sysfs.o
CFLAGS_REMOVE_nospec-branch.o += $(CC_FLAGS_EXPOLINE)
+obj-$(CONFIG_SYSFS) += cpacf.o
obj-$(CONFIG_MODULES) += module.o
-obj-$(CONFIG_SCHED_TOPOLOGY) += topology.o
+obj-$(CONFIG_SCHED_TOPOLOGY) += topology.o hiperdispatch.o
obj-$(CONFIG_NUMA) += numa.o
obj-$(CONFIG_AUDIT) += audit.o
compat-obj-$(CONFIG_AUDIT) += compat_audit.o
diff --git a/arch/s390/kernel/asm-offsets.c b/arch/s390/kernel/asm-offsets.c
index ffa0dd2dbaac..5529248d84fb 100644
--- a/arch/s390/kernel/asm-offsets.c
+++ b/arch/s390/kernel/asm-offsets.c
@@ -112,8 +112,7 @@ int main(void)
OFFSET(__LC_MCK_NEW_PSW, lowcore, mcck_new_psw);
OFFSET(__LC_IO_NEW_PSW, lowcore, io_new_psw);
/* software defined lowcore locations 0x200 - 0xdff*/
- OFFSET(__LC_SAVE_AREA_SYNC, lowcore, save_area_sync);
- OFFSET(__LC_SAVE_AREA_ASYNC, lowcore, save_area_async);
+ OFFSET(__LC_SAVE_AREA, lowcore, save_area);
OFFSET(__LC_SAVE_AREA_RESTART, lowcore, save_area_restart);
OFFSET(__LC_PCPU, lowcore, pcpu);
OFFSET(__LC_RETURN_PSW, lowcore, return_psw);
diff --git a/arch/s390/kernel/cpacf.c b/arch/s390/kernel/cpacf.c
new file mode 100644
index 000000000000..c8575dbc890d
--- /dev/null
+++ b/arch/s390/kernel/cpacf.c
@@ -0,0 +1,119 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright IBM Corp. 2024
+ */
+
+#define KMSG_COMPONENT "cpacf"
+#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
+
+#include <linux/cpu.h>
+#include <linux/device.h>
+#include <linux/sysfs.h>
+#include <asm/cpacf.h>
+
+#define CPACF_QUERY(name, instruction) \
+static ssize_t name##_query_raw_read(struct file *fp, \
+ struct kobject *kobj, \
+ struct bin_attribute *attr, \
+ char *buf, loff_t offs, \
+ size_t count) \
+{ \
+ cpacf_mask_t mask; \
+ \
+ if (!cpacf_query(CPACF_##instruction, &mask)) \
+ return -EOPNOTSUPP; \
+ return memory_read_from_buffer(buf, count, &offs, &mask, sizeof(mask)); \
+} \
+static BIN_ATTR_RO(name##_query_raw, sizeof(cpacf_mask_t))
+
+CPACF_QUERY(km, KM);
+CPACF_QUERY(kmc, KMC);
+CPACF_QUERY(kimd, KIMD);
+CPACF_QUERY(klmd, KLMD);
+CPACF_QUERY(kmac, KMAC);
+CPACF_QUERY(pckmo, PCKMO);
+CPACF_QUERY(kmf, KMF);
+CPACF_QUERY(kmctr, KMCTR);
+CPACF_QUERY(kmo, KMO);
+CPACF_QUERY(pcc, PCC);
+CPACF_QUERY(prno, PRNO);
+CPACF_QUERY(kma, KMA);
+CPACF_QUERY(kdsa, KDSA);
+
+#define CPACF_QAI(name, instruction) \
+static ssize_t name##_query_auth_info_raw_read( \
+ struct file *fp, struct kobject *kobj, \
+ struct bin_attribute *attr, char *buf, loff_t offs, \
+ size_t count) \
+{ \
+ cpacf_qai_t qai; \
+ \
+ if (!cpacf_qai(CPACF_##instruction, &qai)) \
+ return -EOPNOTSUPP; \
+ return memory_read_from_buffer(buf, count, &offs, &qai, \
+ sizeof(qai)); \
+} \
+static BIN_ATTR_RO(name##_query_auth_info_raw, sizeof(cpacf_qai_t))
+
+CPACF_QAI(km, KM);
+CPACF_QAI(kmc, KMC);
+CPACF_QAI(kimd, KIMD);
+CPACF_QAI(klmd, KLMD);
+CPACF_QAI(kmac, KMAC);
+CPACF_QAI(pckmo, PCKMO);
+CPACF_QAI(kmf, KMF);
+CPACF_QAI(kmctr, KMCTR);
+CPACF_QAI(kmo, KMO);
+CPACF_QAI(pcc, PCC);
+CPACF_QAI(prno, PRNO);
+CPACF_QAI(kma, KMA);
+CPACF_QAI(kdsa, KDSA);
+
+static struct bin_attribute *cpacf_attrs[] = {
+ &bin_attr_km_query_raw,
+ &bin_attr_kmc_query_raw,
+ &bin_attr_kimd_query_raw,
+ &bin_attr_klmd_query_raw,
+ &bin_attr_kmac_query_raw,
+ &bin_attr_pckmo_query_raw,
+ &bin_attr_kmf_query_raw,
+ &bin_attr_kmctr_query_raw,
+ &bin_attr_kmo_query_raw,
+ &bin_attr_pcc_query_raw,
+ &bin_attr_prno_query_raw,
+ &bin_attr_kma_query_raw,
+ &bin_attr_kdsa_query_raw,
+ &bin_attr_km_query_auth_info_raw,
+ &bin_attr_kmc_query_auth_info_raw,
+ &bin_attr_kimd_query_auth_info_raw,
+ &bin_attr_klmd_query_auth_info_raw,
+ &bin_attr_kmac_query_auth_info_raw,
+ &bin_attr_pckmo_query_auth_info_raw,
+ &bin_attr_kmf_query_auth_info_raw,
+ &bin_attr_kmctr_query_auth_info_raw,
+ &bin_attr_kmo_query_auth_info_raw,
+ &bin_attr_pcc_query_auth_info_raw,
+ &bin_attr_prno_query_auth_info_raw,
+ &bin_attr_kma_query_auth_info_raw,
+ &bin_attr_kdsa_query_auth_info_raw,
+ NULL,
+};
+
+static const struct attribute_group cpacf_attr_grp = {
+ .name = "cpacf",
+ .bin_attrs = cpacf_attrs,
+};
+
+static int __init cpacf_init(void)
+{
+ struct device *cpu_root;
+ int rc = 0;
+
+ cpu_root = bus_get_dev_root(&cpu_subsys);
+ if (cpu_root) {
+ rc = sysfs_create_group(&cpu_root->kobj, &cpacf_attr_grp);
+ put_device(cpu_root);
+ }
+ return rc;
+}
+device_initcall(cpacf_init);
diff --git a/arch/s390/kernel/debug.c b/arch/s390/kernel/debug.c
index bce50ca75ea7..e62bea9ab21e 100644
--- a/arch/s390/kernel/debug.c
+++ b/arch/s390/kernel/debug.c
@@ -163,7 +163,6 @@ static const struct file_operations debug_file_ops = {
.write = debug_input,
.open = debug_open,
.release = debug_close,
- .llseek = no_llseek,
};
static struct dentry *debug_debugfs_root_entry;
diff --git a/arch/s390/kernel/diag.c b/arch/s390/kernel/diag.c
index ac7b8c8e3133..007e1795670e 100644
--- a/arch/s390/kernel/diag.c
+++ b/arch/s390/kernel/diag.c
@@ -52,6 +52,7 @@ static const struct diag_desc diag_map[NR_DIAG_STAT] = {
[DIAG_STAT_X308] = { .code = 0x308, .name = "List-Directed IPL" },
[DIAG_STAT_X318] = { .code = 0x318, .name = "CP Name and Version Codes" },
[DIAG_STAT_X320] = { .code = 0x320, .name = "Certificate Store" },
+ [DIAG_STAT_X49C] = { .code = 0x49c, .name = "Warning-Track Interruption" },
[DIAG_STAT_X500] = { .code = 0x500, .name = "Virtio Service" },
};
@@ -303,3 +304,19 @@ int diag26c(void *req, void *resp, enum diag26c_sc subcode)
return diag_amode31_ops.diag26c(virt_to_phys(req), virt_to_phys(resp), subcode);
}
EXPORT_SYMBOL(diag26c);
+
+int diag49c(unsigned long subcode)
+{
+ int rc;
+
+ diag_stat_inc(DIAG_STAT_X49C);
+ asm volatile(
+ " diag %[subcode],0,0x49c\n"
+ " ipm %[rc]\n"
+ " srl %[rc],28\n"
+ : [rc] "=d" (rc)
+ : [subcode] "d" (subcode)
+ : "cc");
+ return rc;
+}
+EXPORT_SYMBOL(diag49c);
diff --git a/arch/s390/kernel/dis.c b/arch/s390/kernel/dis.c
index 89dc826a8d2e..94eb8168ea44 100644
--- a/arch/s390/kernel/dis.c
+++ b/arch/s390/kernel/dis.c
@@ -122,6 +122,7 @@ enum {
U8_32, /* 8 bit unsigned value starting at 32 */
U12_16, /* 12 bit unsigned value starting at 16 */
U16_16, /* 16 bit unsigned value starting at 16 */
+ U16_20, /* 16 bit unsigned value starting at 20 */
U16_32, /* 16 bit unsigned value starting at 32 */
U32_16, /* 32 bit unsigned value starting at 16 */
VX_12, /* Vector index register starting at position 12 */
@@ -184,6 +185,7 @@ static const struct s390_operand operands[] = {
[U8_32] = { 8, 32, 0 },
[U12_16] = { 12, 16, 0 },
[U16_16] = { 16, 16, 0 },
+ [U16_20] = { 16, 20, 0 },
[U16_32] = { 16, 32, 0 },
[U32_16] = { 32, 16, 0 },
[VX_12] = { 4, 12, OPERAND_INDEX | OPERAND_VR },
@@ -257,7 +259,6 @@ static const unsigned char formats[][6] = {
[INSTR_RSL_R0RD] = { D_20, L4_8, B_16, 0, 0, 0 },
[INSTR_RSY_AARD] = { A_8, A_12, D20_20, B_16, 0, 0 },
[INSTR_RSY_CCRD] = { C_8, C_12, D20_20, B_16, 0, 0 },
- [INSTR_RSY_RDRU] = { R_8, D20_20, B_16, U4_12, 0, 0 },
[INSTR_RSY_RRRD] = { R_8, R_12, D20_20, B_16, 0, 0 },
[INSTR_RSY_RURD] = { R_8, U4_12, D20_20, B_16, 0, 0 },
[INSTR_RSY_RURD2] = { R_8, D20_20, B_16, U4_12, 0, 0 },
@@ -300,14 +301,17 @@ static const unsigned char formats[][6] = {
[INSTR_VRI_V0UU2] = { V_8, U16_16, U4_32, 0, 0, 0 },
[INSTR_VRI_V0UUU] = { V_8, U8_16, U8_24, U4_32, 0, 0 },
[INSTR_VRI_VR0UU] = { V_8, R_12, U8_28, U4_24, 0, 0 },
+ [INSTR_VRI_VV0UU] = { V_8, V_12, U8_28, U4_24, 0, 0 },
[INSTR_VRI_VVUU] = { V_8, V_12, U16_16, U4_32, 0, 0 },
[INSTR_VRI_VVUUU] = { V_8, V_12, U12_16, U4_32, U4_28, 0 },
[INSTR_VRI_VVUUU2] = { V_8, V_12, U8_28, U8_16, U4_24, 0 },
[INSTR_VRI_VVV0U] = { V_8, V_12, V_16, U8_24, 0, 0 },
[INSTR_VRI_VVV0UU] = { V_8, V_12, V_16, U8_24, U4_32, 0 },
[INSTR_VRI_VVV0UU2] = { V_8, V_12, V_16, U8_28, U4_24, 0 },
- [INSTR_VRR_0V] = { V_12, 0, 0, 0, 0, 0 },
+ [INSTR_VRI_VVV0UV] = { V_8, V_12, V_16, V_32, U8_24, 0 },
+ [INSTR_VRR_0V0U] = { V_12, U16_20, 0, 0, 0, 0 },
[INSTR_VRR_0VV0U] = { V_12, V_16, U4_24, 0, 0, 0 },
+ [INSTR_VRR_0VVU] = { V_12, V_16, U16_20, 0, 0, 0 },
[INSTR_VRR_RV0UU] = { R_8, V_12, U4_24, U4_28, 0, 0 },
[INSTR_VRR_VRR] = { V_8, R_12, R_16, 0, 0, 0 },
[INSTR_VRR_VV] = { V_8, V_12, 0, 0, 0, 0 },
@@ -455,21 +459,21 @@ static int print_insn(char *buffer, unsigned char *code, unsigned long addr)
if (separator)
ptr += sprintf(ptr, "%c", separator);
if (operand->flags & OPERAND_GPR)
- ptr += sprintf(ptr, "%%r%i", value);
+ ptr += sprintf(ptr, "%%r%u", value);
else if (operand->flags & OPERAND_FPR)
- ptr += sprintf(ptr, "%%f%i", value);
+ ptr += sprintf(ptr, "%%f%u", value);
else if (operand->flags & OPERAND_AR)
- ptr += sprintf(ptr, "%%a%i", value);
+ ptr += sprintf(ptr, "%%a%u", value);
else if (operand->flags & OPERAND_CR)
- ptr += sprintf(ptr, "%%c%i", value);
+ ptr += sprintf(ptr, "%%c%u", value);
else if (operand->flags & OPERAND_VR)
- ptr += sprintf(ptr, "%%v%i", value);
+ ptr += sprintf(ptr, "%%v%u", value);
else if (operand->flags & OPERAND_PCREL) {
void *pcrel = (void *)((int)value + addr);
ptr += sprintf(ptr, "%px", pcrel);
} else if (operand->flags & OPERAND_SIGNED)
- ptr += sprintf(ptr, "%i", value);
+ ptr += sprintf(ptr, "%i", (int)value);
else
ptr += sprintf(ptr, "%u", value);
if (operand->flags & OPERAND_DISP)
diff --git a/arch/s390/kernel/early.c b/arch/s390/kernel/early.c
index 14d324865e33..62f8f5a750a3 100644
--- a/arch/s390/kernel/early.c
+++ b/arch/s390/kernel/early.c
@@ -7,6 +7,7 @@
#define KMSG_COMPONENT "setup"
#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
+#include <linux/sched/debug.h>
#include <linux/compiler.h>
#include <linux/init.h>
#include <linux/errno.h>
@@ -175,20 +176,45 @@ static __init void setup_topology(void)
topology_max_mnest = max_mnest;
}
-void __do_early_pgm_check(struct pt_regs *regs)
+void __init __do_early_pgm_check(struct pt_regs *regs)
{
- if (!fixup_exception(regs))
- disabled_wait();
+ struct lowcore *lc = get_lowcore();
+ unsigned long ip;
+
+ regs->int_code = lc->pgm_int_code;
+ regs->int_parm_long = lc->trans_exc_code;
+ ip = __rewind_psw(regs->psw, regs->int_code >> 16);
+
+ /* Monitor Event? Might be a warning */
+ if ((regs->int_code & PGM_INT_CODE_MASK) == 0x40) {
+ if (report_bug(ip, regs) == BUG_TRAP_TYPE_WARN)
+ return;
+ }
+ if (fixup_exception(regs))
+ return;
+ /*
+ * Unhandled exception - system cannot continue but try to get some
+ * helpful messages to the console. Use early_printk() to print
+ * some basic information in case it is too early for printk().
+ */
+ register_early_console();
+ early_printk("PANIC: early exception %04x PSW: %016lx %016lx\n",
+ regs->int_code & 0xffff, regs->psw.mask, regs->psw.addr);
+ show_regs(regs);
+ disabled_wait();
}
static noinline __init void setup_lowcore_early(void)
{
+ struct lowcore *lc = get_lowcore();
psw_t psw;
psw.addr = (unsigned long)early_pgm_check_handler;
psw.mask = PSW_KERNEL_BITS;
- get_lowcore()->program_new_psw = psw;
- get_lowcore()->preempt_count = INIT_PREEMPT_COUNT;
+ lc->program_new_psw = psw;
+ lc->preempt_count = INIT_PREEMPT_COUNT;
+ lc->return_lpswe = gen_lpswe(__LC_RETURN_PSW);
+ lc->return_mcck_lpswe = gen_lpswe(__LC_RETURN_MCCK_PSW);
}
static __init void detect_diag9c(void)
@@ -242,6 +268,8 @@ static __init void detect_machine_facilities(void)
}
if (test_facility(194))
get_lowcore()->machine_flags |= MACHINE_FLAG_RDP;
+ if (test_facility(85))
+ get_lowcore()->machine_flags |= MACHINE_FLAG_SEQ_INSN;
}
static inline void save_vector_registers(void)
diff --git a/arch/s390/kernel/early_printk.c b/arch/s390/kernel/early_printk.c
index d9d53f44008a..cefe020a3be3 100644
--- a/arch/s390/kernel/early_printk.c
+++ b/arch/s390/kernel/early_printk.c
@@ -6,6 +6,7 @@
#include <linux/console.h>
#include <linux/kernel.h>
#include <linux/init.h>
+#include <asm/setup.h>
#include <asm/sclp.h>
static void sclp_early_write(struct console *con, const char *s, unsigned int len)
@@ -20,6 +21,16 @@ static struct console sclp_early_console = {
.index = -1,
};
+void __init register_early_console(void)
+{
+ if (early_console)
+ return;
+ if (!sclp.has_linemode && !sclp.has_vt220)
+ return;
+ early_console = &sclp_early_console;
+ register_console(early_console);
+}
+
static int __init setup_early_printk(char *buf)
{
if (early_console)
@@ -27,10 +38,7 @@ static int __init setup_early_printk(char *buf)
/* Accept only "earlyprintk" and "earlyprintk=sclp" */
if (buf && !str_has_prefix(buf, "sclp"))
return 0;
- if (!sclp.has_linemode && !sclp.has_vt220)
- return 0;
- early_console = &sclp_early_console;
- register_console(early_console);
+ register_early_console();
return 0;
}
early_param("earlyprintk", setup_early_printk);
diff --git a/arch/s390/kernel/earlypgm.S b/arch/s390/kernel/earlypgm.S
deleted file mode 100644
index c634871f0d90..000000000000
--- a/arch/s390/kernel/earlypgm.S
+++ /dev/null
@@ -1,23 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-/*
- * Copyright IBM Corp. 2006, 2007
- * Author(s): Michael Holzheu <holzheu@de.ibm.com>
- */
-
-#include <linux/linkage.h>
-#include <asm/asm-offsets.h>
-
-SYM_CODE_START(early_pgm_check_handler)
- stmg %r8,%r15,__LC_SAVE_AREA_SYNC
- aghi %r15,-(STACK_FRAME_OVERHEAD+__PT_SIZE)
- la %r11,STACK_FRAME_OVERHEAD(%r15)
- xc __SF_BACKCHAIN(8,%r15),__SF_BACKCHAIN(%r15)
- stmg %r0,%r7,__PT_R0(%r11)
- mvc __PT_PSW(16,%r11),__LC_PGM_OLD_PSW
- mvc __PT_R8(64,%r11),__LC_SAVE_AREA_SYNC
- lgr %r2,%r11
- brasl %r14,__do_early_pgm_check
- mvc __LC_RETURN_PSW(16),STACK_FRAME_OVERHEAD+__PT_PSW(%r15)
- lmg %r0,%r15,STACK_FRAME_OVERHEAD+__PT_R0(%r15)
- lpswe __LC_RETURN_PSW
-SYM_CODE_END(early_pgm_check_handler)
diff --git a/arch/s390/kernel/entry.S b/arch/s390/kernel/entry.S
index 269436665d02..d6d5317f768e 100644
--- a/arch/s390/kernel/entry.S
+++ b/arch/s390/kernel/entry.S
@@ -264,7 +264,7 @@ EXPORT_SYMBOL(sie_exit)
*/
SYM_CODE_START(system_call)
- STMG_LC %r8,%r15,__LC_SAVE_AREA_SYNC
+ STMG_LC %r8,%r15,__LC_SAVE_AREA
GET_LC %r13
stpt __LC_SYS_ENTER_TIMER(%r13)
BPOFF
@@ -287,7 +287,7 @@ SYM_CODE_START(system_call)
xgr %r10,%r10
xgr %r11,%r11
la %r2,STACK_FRAME_OVERHEAD(%r15) # pointer to pt_regs
- mvc __PT_R8(64,%r2),__LC_SAVE_AREA_SYNC(%r13)
+ mvc __PT_R8(64,%r2),__LC_SAVE_AREA(%r13)
MBEAR %r2,%r13
lgr %r3,%r14
brasl %r14,__do_syscall
@@ -323,7 +323,7 @@ SYM_CODE_END(ret_from_fork)
*/
SYM_CODE_START(pgm_check_handler)
- STMG_LC %r8,%r15,__LC_SAVE_AREA_SYNC
+ STMG_LC %r8,%r15,__LC_SAVE_AREA
GET_LC %r13
stpt __LC_SYS_ENTER_TIMER(%r13)
BPOFF
@@ -338,16 +338,16 @@ SYM_CODE_START(pgm_check_handler)
jnz 2f # -> enabled, can't be a double fault
tm __LC_PGM_ILC+3(%r13),0x80 # check for per exception
jnz .Lpgm_svcper # -> single stepped svc
-2: CHECK_STACK __LC_SAVE_AREA_SYNC,%r13
+2: CHECK_STACK __LC_SAVE_AREA,%r13
aghi %r15,-(STACK_FRAME_OVERHEAD + __PT_SIZE)
# CHECK_VMAP_STACK branches to stack_overflow or 4f
- CHECK_VMAP_STACK __LC_SAVE_AREA_SYNC,%r13,4f
+ CHECK_VMAP_STACK __LC_SAVE_AREA,%r13,4f
3: lg %r15,__LC_KERNEL_STACK(%r13)
4: la %r11,STACK_FRAME_OVERHEAD(%r15)
xc __PT_FLAGS(8,%r11),__PT_FLAGS(%r11)
xc __SF_BACKCHAIN(8,%r15),__SF_BACKCHAIN(%r15)
stmg %r0,%r7,__PT_R0(%r11)
- mvc __PT_R8(64,%r11),__LC_SAVE_AREA_SYNC(%r13)
+ mvc __PT_R8(64,%r11),__LC_SAVE_AREA(%r13)
mvc __PT_LAST_BREAK(8,%r11),__LC_PGM_LAST_BREAK(%r13)
stctg %c1,%c1,__PT_CR1(%r11)
#if IS_ENABLED(CONFIG_KVM)
@@ -398,7 +398,7 @@ SYM_CODE_END(pgm_check_handler)
*/
.macro INT_HANDLER name,lc_old_psw,handler
SYM_CODE_START(\name)
- STMG_LC %r8,%r15,__LC_SAVE_AREA_ASYNC
+ STMG_LC %r8,%r15,__LC_SAVE_AREA
GET_LC %r13
stckf __LC_INT_CLOCK(%r13)
stpt __LC_SYS_ENTER_TIMER(%r13)
@@ -414,7 +414,7 @@ SYM_CODE_START(\name)
BPENTER __SF_SIE_FLAGS(%r15),_TIF_ISOLATE_BP_GUEST
SIEEXIT __SF_SIE_CONTROL(%r15),%r13
#endif
-0: CHECK_STACK __LC_SAVE_AREA_ASYNC,%r13
+0: CHECK_STACK __LC_SAVE_AREA,%r13
aghi %r15,-(STACK_FRAME_OVERHEAD + __PT_SIZE)
j 2f
1: lctlg %c1,%c1,__LC_KERNEL_ASCE(%r13)
@@ -432,7 +432,7 @@ SYM_CODE_START(\name)
xgr %r7,%r7
xgr %r10,%r10
xc __PT_FLAGS(8,%r11),__PT_FLAGS(%r11)
- mvc __PT_R8(64,%r11),__LC_SAVE_AREA_ASYNC(%r13)
+ mvc __PT_R8(64,%r11),__LC_SAVE_AREA(%r13)
MBEAR %r11,%r13
stmg %r8,%r9,__PT_PSW(%r11)
lgr %r2,%r11 # pass pointer to pt_regs
@@ -599,6 +599,24 @@ SYM_CODE_START(restart_int_handler)
3: j 3b
SYM_CODE_END(restart_int_handler)
+ __INIT
+SYM_CODE_START(early_pgm_check_handler)
+ STMG_LC %r8,%r15,__LC_SAVE_AREA
+ GET_LC %r13
+ aghi %r15,-(STACK_FRAME_OVERHEAD+__PT_SIZE)
+ la %r11,STACK_FRAME_OVERHEAD(%r15)
+ xc __SF_BACKCHAIN(8,%r15),__SF_BACKCHAIN(%r15)
+ stmg %r0,%r7,__PT_R0(%r11)
+ mvc __PT_PSW(16,%r11),__LC_PGM_OLD_PSW(%r13)
+ mvc __PT_R8(64,%r11),__LC_SAVE_AREA(%r13)
+ lgr %r2,%r11
+ brasl %r14,__do_early_pgm_check
+ mvc __LC_RETURN_PSW(16,%r13),STACK_FRAME_OVERHEAD+__PT_PSW(%r15)
+ lmg %r0,%r15,STACK_FRAME_OVERHEAD+__PT_R0(%r15)
+ LPSWEY __LC_RETURN_PSW,__LC_RETURN_LPSWE
+SYM_CODE_END(early_pgm_check_handler)
+ __FINIT
+
.section .kprobes.text, "ax"
#if defined(CONFIG_CHECK_STACK) || defined(CONFIG_VMAP_STACK)
diff --git a/arch/s390/kernel/ftrace.c b/arch/s390/kernel/ftrace.c
index 0bd6adc40a34..0b6e62d1d8b8 100644
--- a/arch/s390/kernel/ftrace.c
+++ b/arch/s390/kernel/ftrace.c
@@ -50,10 +50,6 @@ struct ftrace_insn {
s32 disp;
} __packed;
-#ifdef CONFIG_MODULES
-static char *ftrace_plt;
-#endif /* CONFIG_MODULES */
-
static const char *ftrace_shared_hotpatch_trampoline(const char **end)
{
const char *tstart, *tend;
@@ -73,19 +69,20 @@ static const char *ftrace_shared_hotpatch_trampoline(const char **end)
bool ftrace_need_init_nop(void)
{
- return true;
+ return !MACHINE_HAS_SEQ_INSN;
}
int ftrace_init_nop(struct module *mod, struct dyn_ftrace *rec)
{
static struct ftrace_hotpatch_trampoline *next_vmlinux_trampoline =
__ftrace_hotpatch_trampolines_start;
- static const char orig[6] = { 0xc0, 0x04, 0x00, 0x00, 0x00, 0x00 };
+ static const struct ftrace_insn orig = { .opc = 0xc004, .disp = 0 };
static struct ftrace_hotpatch_trampoline *trampoline;
struct ftrace_hotpatch_trampoline **next_trampoline;
struct ftrace_hotpatch_trampoline *trampolines_end;
struct ftrace_hotpatch_trampoline tmp;
struct ftrace_insn *insn;
+ struct ftrace_insn old;
const char *shared;
s32 disp;
@@ -99,7 +96,6 @@ int ftrace_init_nop(struct module *mod, struct dyn_ftrace *rec)
if (mod) {
next_trampoline = &mod->arch.next_trampoline;
trampolines_end = mod->arch.trampolines_end;
- shared = ftrace_plt;
}
#endif
@@ -107,8 +103,10 @@ int ftrace_init_nop(struct module *mod, struct dyn_ftrace *rec)
return -ENOMEM;
trampoline = (*next_trampoline)++;
+ if (copy_from_kernel_nofault(&old, (void *)rec->ip, sizeof(old)))
+ return -EFAULT;
/* Check for the compiler-generated fentry nop (brcl 0, .). */
- if (WARN_ON_ONCE(memcmp((const void *)rec->ip, &orig, sizeof(orig))))
+ if (WARN_ON_ONCE(memcmp(&orig, &old, sizeof(old))))
return -EINVAL;
/* Generate the trampoline. */
@@ -144,8 +142,35 @@ static struct ftrace_hotpatch_trampoline *ftrace_get_trampoline(struct dyn_ftrac
return trampoline;
}
-int ftrace_modify_call(struct dyn_ftrace *rec, unsigned long old_addr,
- unsigned long addr)
+static inline struct ftrace_insn
+ftrace_generate_branch_insn(unsigned long ip, unsigned long target)
+{
+ /* brasl r0,target or brcl 0,0 */
+ return (struct ftrace_insn){ .opc = target ? 0xc005 : 0xc004,
+ .disp = target ? (target - ip) / 2 : 0 };
+}
+
+static int ftrace_patch_branch_insn(unsigned long ip, unsigned long old_target,
+ unsigned long target)
+{
+ struct ftrace_insn orig = ftrace_generate_branch_insn(ip, old_target);
+ struct ftrace_insn new = ftrace_generate_branch_insn(ip, target);
+ struct ftrace_insn old;
+
+ if (!IS_ALIGNED(ip, 8))
+ return -EINVAL;
+ if (copy_from_kernel_nofault(&old, (void *)ip, sizeof(old)))
+ return -EFAULT;
+ /* Verify that the to be replaced code matches what we expect. */
+ if (memcmp(&orig, &old, sizeof(old)))
+ return -EINVAL;
+ s390_kernel_write((void *)ip, &new, sizeof(new));
+ return 0;
+}
+
+static int ftrace_modify_trampoline_call(struct dyn_ftrace *rec,
+ unsigned long old_addr,
+ unsigned long addr)
{
struct ftrace_hotpatch_trampoline *trampoline;
u64 old;
@@ -161,6 +186,15 @@ int ftrace_modify_call(struct dyn_ftrace *rec, unsigned long old_addr,
return 0;
}
+int ftrace_modify_call(struct dyn_ftrace *rec, unsigned long old_addr,
+ unsigned long addr)
+{
+ if (MACHINE_HAS_SEQ_INSN)
+ return ftrace_patch_branch_insn(rec->ip, old_addr, addr);
+ else
+ return ftrace_modify_trampoline_call(rec, old_addr, addr);
+}
+
static int ftrace_patch_branch_mask(void *addr, u16 expected, bool enable)
{
u16 old;
@@ -179,11 +213,14 @@ static int ftrace_patch_branch_mask(void *addr, u16 expected, bool enable)
int ftrace_make_nop(struct module *mod, struct dyn_ftrace *rec,
unsigned long addr)
{
- /* Expect brcl 0xf,... */
- return ftrace_patch_branch_mask((void *)rec->ip, 0xc0f4, false);
+ /* Expect brcl 0xf,... for the !MACHINE_HAS_SEQ_INSN case */
+ if (MACHINE_HAS_SEQ_INSN)
+ return ftrace_patch_branch_insn(rec->ip, addr, 0);
+ else
+ return ftrace_patch_branch_mask((void *)rec->ip, 0xc0f4, false);
}
-int ftrace_make_call(struct dyn_ftrace *rec, unsigned long addr)
+static int ftrace_make_trampoline_call(struct dyn_ftrace *rec, unsigned long addr)
{
struct ftrace_hotpatch_trampoline *trampoline;
@@ -195,6 +232,14 @@ int ftrace_make_call(struct dyn_ftrace *rec, unsigned long addr)
return ftrace_patch_branch_mask((void *)rec->ip, 0xc004, true);
}
+int ftrace_make_call(struct dyn_ftrace *rec, unsigned long addr)
+{
+ if (MACHINE_HAS_SEQ_INSN)
+ return ftrace_patch_branch_insn(rec->ip, 0, addr);
+ else
+ return ftrace_make_trampoline_call(rec, addr);
+}
+
int ftrace_update_ftrace_func(ftrace_func_t func)
{
ftrace_func = func;
@@ -215,25 +260,6 @@ void ftrace_arch_code_modify_post_process(void)
text_poke_sync_lock();
}
-#ifdef CONFIG_MODULES
-
-static int __init ftrace_plt_init(void)
-{
- const char *start, *end;
-
- ftrace_plt = execmem_alloc(EXECMEM_FTRACE, PAGE_SIZE);
- if (!ftrace_plt)
- panic("cannot allocate ftrace plt\n");
-
- start = ftrace_shared_hotpatch_trampoline(&end);
- memcpy(ftrace_plt, start, end - start);
- set_memory_rox((unsigned long)ftrace_plt, 1);
- return 0;
-}
-device_initcall(ftrace_plt_init);
-
-#endif /* CONFIG_MODULES */
-
#ifdef CONFIG_FUNCTION_GRAPH_TRACER
/*
* Hook the return address and push it in the stack of return addresses
@@ -264,26 +290,14 @@ NOKPROBE_SYMBOL(prepare_ftrace_return);
*/
int ftrace_enable_ftrace_graph_caller(void)
{
- int rc;
-
/* Expect brc 0xf,... */
- rc = ftrace_patch_branch_mask(ftrace_graph_caller, 0xa7f4, false);
- if (rc)
- return rc;
- text_poke_sync_lock();
- return 0;
+ return ftrace_patch_branch_mask(ftrace_graph_caller, 0xa7f4, false);
}
int ftrace_disable_ftrace_graph_caller(void)
{
- int rc;
-
/* Expect brc 0x0,... */
- rc = ftrace_patch_branch_mask(ftrace_graph_caller, 0xa704, true);
- if (rc)
- return rc;
- text_poke_sync_lock();
- return 0;
+ return ftrace_patch_branch_mask(ftrace_graph_caller, 0xa704, true);
}
#endif /* CONFIG_FUNCTION_GRAPH_TRACER */
diff --git a/arch/s390/kernel/ftrace.h b/arch/s390/kernel/ftrace.h
index 7f75a9616406..23337065f402 100644
--- a/arch/s390/kernel/ftrace.h
+++ b/arch/s390/kernel/ftrace.h
@@ -18,7 +18,5 @@ extern const char ftrace_shared_hotpatch_trampoline_br[];
extern const char ftrace_shared_hotpatch_trampoline_br_end[];
extern const char ftrace_shared_hotpatch_trampoline_exrl[];
extern const char ftrace_shared_hotpatch_trampoline_exrl_end[];
-extern const char ftrace_plt_template[];
-extern const char ftrace_plt_template_end[];
#endif /* _FTRACE_H */
diff --git a/arch/s390/kernel/hiperdispatch.c b/arch/s390/kernel/hiperdispatch.c
new file mode 100644
index 000000000000..2a99a216ab62
--- /dev/null
+++ b/arch/s390/kernel/hiperdispatch.c
@@ -0,0 +1,430 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright IBM Corp. 2024
+ */
+
+#define KMSG_COMPONENT "hd"
+#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
+
+/*
+ * Hiperdispatch:
+ * Dynamically calculates the optimum number of high capacity COREs
+ * by considering the state the system is in. When hiperdispatch decides
+ * that a capacity update is necessary, it schedules a topology update.
+ * During topology updates the CPU capacities are always re-adjusted.
+ *
+ * There is two places where CPU capacities are being accessed within
+ * hiperdispatch.
+ * -> hiperdispatch's reoccuring work function reads CPU capacities to
+ * determine high capacity CPU count.
+ * -> during a topology update hiperdispatch's adjustment function
+ * updates CPU capacities.
+ * These two can run on different CPUs in parallel which can cause
+ * hiperdispatch to make wrong decisions. This can potentially cause
+ * some overhead by leading to extra rebuild_sched_domains() calls
+ * for correction. Access to capacities within hiperdispatch has to be
+ * serialized to prevent the overhead.
+ *
+ * Hiperdispatch decision making revolves around steal time.
+ * HD_STEAL_THRESHOLD value is taken as reference. Whenever steal time
+ * crosses the threshold value hiperdispatch falls back to giving high
+ * capacities to entitled CPUs. When steal time drops below the
+ * threshold boundary, hiperdispatch utilizes all CPUs by giving all
+ * of them high capacity.
+ *
+ * The theory behind HD_STEAL_THRESHOLD is related to the SMP thread
+ * performance. Comparing the throughput of;
+ * - single CORE, with N threads, running N tasks
+ * - N separate COREs running N tasks,
+ * using individual COREs for individual tasks yield better
+ * performance. This performance difference is roughly ~30% (can change
+ * between machine generations)
+ *
+ * Hiperdispatch tries to hint scheduler to use individual COREs for
+ * each task, as long as steal time on those COREs are less than 30%,
+ * therefore delaying the throughput loss caused by using SMP threads.
+ */
+
+#include <linux/cpumask.h>
+#include <linux/debugfs.h>
+#include <linux/device.h>
+#include <linux/kernel_stat.h>
+#include <linux/kstrtox.h>
+#include <linux/ktime.h>
+#include <linux/sysctl.h>
+#include <linux/types.h>
+#include <linux/workqueue.h>
+#include <asm/hiperdispatch.h>
+#include <asm/setup.h>
+#include <asm/smp.h>
+#include <asm/topology.h>
+
+#define CREATE_TRACE_POINTS
+#include <asm/trace/hiperdispatch.h>
+
+#define HD_DELAY_FACTOR (4)
+#define HD_DELAY_INTERVAL (HZ / 4)
+#define HD_STEAL_THRESHOLD 30
+#define HD_STEAL_AVG_WEIGHT 16
+
+static cpumask_t hd_vl_coremask; /* Mask containing all vertical low COREs */
+static cpumask_t hd_vmvl_cpumask; /* Mask containing vertical medium and low CPUs */
+static int hd_high_capacity_cores; /* Current CORE count with high capacity */
+static int hd_entitled_cores; /* Total vertical high and medium CORE count */
+static int hd_online_cores; /* Current online CORE count */
+
+static unsigned long hd_previous_steal; /* Previous iteration's CPU steal timer total */
+static unsigned long hd_high_time; /* Total time spent while all cpus have high capacity */
+static unsigned long hd_low_time; /* Total time spent while vl cpus have low capacity */
+static atomic64_t hd_adjustments; /* Total occurrence count of hiperdispatch adjustments */
+
+static unsigned int hd_steal_threshold = HD_STEAL_THRESHOLD;
+static unsigned int hd_delay_factor = HD_DELAY_FACTOR;
+static int hd_enabled;
+
+static void hd_capacity_work_fn(struct work_struct *work);
+static DECLARE_DELAYED_WORK(hd_capacity_work, hd_capacity_work_fn);
+
+static int hd_set_hiperdispatch_mode(int enable)
+{
+ if (!MACHINE_HAS_TOPOLOGY)
+ enable = 0;
+ if (hd_enabled == enable)
+ return 0;
+ hd_enabled = enable;
+ return 1;
+}
+
+void hd_reset_state(void)
+{
+ cpumask_clear(&hd_vl_coremask);
+ cpumask_clear(&hd_vmvl_cpumask);
+ hd_entitled_cores = 0;
+ hd_online_cores = 0;
+}
+
+void hd_add_core(int cpu)
+{
+ const struct cpumask *siblings;
+ int polarization;
+
+ hd_online_cores++;
+ polarization = smp_cpu_get_polarization(cpu);
+ siblings = topology_sibling_cpumask(cpu);
+ switch (polarization) {
+ case POLARIZATION_VH:
+ hd_entitled_cores++;
+ break;
+ case POLARIZATION_VM:
+ hd_entitled_cores++;
+ cpumask_or(&hd_vmvl_cpumask, &hd_vmvl_cpumask, siblings);
+ break;
+ case POLARIZATION_VL:
+ cpumask_set_cpu(cpu, &hd_vl_coremask);
+ cpumask_or(&hd_vmvl_cpumask, &hd_vmvl_cpumask, siblings);
+ break;
+ }
+}
+
+/* Serialize update and read operations of debug counters. */
+static DEFINE_MUTEX(hd_counter_mutex);
+
+static void hd_update_times(void)
+{
+ static ktime_t prev;
+ ktime_t now;
+
+ /*
+ * Check if hiperdispatch is active, if not set the prev to 0.
+ * This way it is possible to differentiate the first update iteration after
+ * enabling hiperdispatch.
+ */
+ if (hd_entitled_cores == 0 || hd_enabled == 0) {
+ prev = ktime_set(0, 0);
+ return;
+ }
+ now = ktime_get();
+ if (ktime_after(prev, 0)) {
+ if (hd_high_capacity_cores == hd_online_cores)
+ hd_high_time += ktime_ms_delta(now, prev);
+ else
+ hd_low_time += ktime_ms_delta(now, prev);
+ }
+ prev = now;
+}
+
+static void hd_update_capacities(void)
+{
+ int cpu, upscaling_cores;
+ unsigned long capacity;
+
+ upscaling_cores = hd_high_capacity_cores - hd_entitled_cores;
+ capacity = upscaling_cores > 0 ? CPU_CAPACITY_HIGH : CPU_CAPACITY_LOW;
+ hd_high_capacity_cores = hd_entitled_cores;
+ for_each_cpu(cpu, &hd_vl_coremask) {
+ smp_set_core_capacity(cpu, capacity);
+ if (capacity != CPU_CAPACITY_HIGH)
+ continue;
+ hd_high_capacity_cores++;
+ upscaling_cores--;
+ if (upscaling_cores == 0)
+ capacity = CPU_CAPACITY_LOW;
+ }
+}
+
+void hd_disable_hiperdispatch(void)
+{
+ cancel_delayed_work_sync(&hd_capacity_work);
+ hd_high_capacity_cores = hd_online_cores;
+ hd_previous_steal = 0;
+}
+
+int hd_enable_hiperdispatch(void)
+{
+ mutex_lock(&hd_counter_mutex);
+ hd_update_times();
+ mutex_unlock(&hd_counter_mutex);
+ if (hd_enabled == 0)
+ return 0;
+ if (hd_entitled_cores == 0)
+ return 0;
+ if (hd_online_cores <= hd_entitled_cores)
+ return 0;
+ mod_delayed_work(system_wq, &hd_capacity_work, HD_DELAY_INTERVAL * hd_delay_factor);
+ hd_update_capacities();
+ return 1;
+}
+
+static unsigned long hd_steal_avg(unsigned long new)
+{
+ static unsigned long steal;
+
+ steal = (steal * (HD_STEAL_AVG_WEIGHT - 1) + new) / HD_STEAL_AVG_WEIGHT;
+ return steal;
+}
+
+static unsigned long hd_calculate_steal_percentage(void)
+{
+ unsigned long time_delta, steal_delta, steal, percentage;
+ static ktime_t prev;
+ int cpus, cpu;
+ ktime_t now;
+
+ cpus = 0;
+ steal = 0;
+ percentage = 0;
+ for_each_cpu(cpu, &hd_vmvl_cpumask) {
+ steal += kcpustat_cpu(cpu).cpustat[CPUTIME_STEAL];
+ cpus++;
+ }
+ /*
+ * If there is no vertical medium and low CPUs steal time
+ * is 0 as vertical high CPUs shouldn't experience steal time.
+ */
+ if (cpus == 0)
+ return percentage;
+ now = ktime_get();
+ time_delta = ktime_to_ns(ktime_sub(now, prev));
+ if (steal > hd_previous_steal && hd_previous_steal != 0) {
+ steal_delta = (steal - hd_previous_steal) * 100 / time_delta;
+ percentage = steal_delta / cpus;
+ }
+ hd_previous_steal = steal;
+ prev = now;
+ return percentage;
+}
+
+static void hd_capacity_work_fn(struct work_struct *work)
+{
+ unsigned long steal_percentage, new_cores;
+
+ mutex_lock(&smp_cpu_state_mutex);
+ /*
+ * If online cores are less or equal to entitled cores hiperdispatch
+ * does not need to make any adjustments, call a topology update to
+ * disable hiperdispatch.
+ * Normally this check is handled on topology update, but during cpu
+ * unhotplug, topology and cpu mask updates are done in reverse
+ * order, causing hd_enable_hiperdispatch() to get stale data.
+ */
+ if (hd_online_cores <= hd_entitled_cores) {
+ topology_schedule_update();
+ mutex_unlock(&smp_cpu_state_mutex);
+ return;
+ }
+ steal_percentage = hd_steal_avg(hd_calculate_steal_percentage());
+ if (steal_percentage < hd_steal_threshold)
+ new_cores = hd_online_cores;
+ else
+ new_cores = hd_entitled_cores;
+ if (hd_high_capacity_cores != new_cores) {
+ trace_s390_hd_rebuild_domains(hd_high_capacity_cores, new_cores);
+ hd_high_capacity_cores = new_cores;
+ atomic64_inc(&hd_adjustments);
+ topology_schedule_update();
+ }
+ trace_s390_hd_work_fn(steal_percentage, hd_entitled_cores, hd_high_capacity_cores);
+ mutex_unlock(&smp_cpu_state_mutex);
+ schedule_delayed_work(&hd_capacity_work, HD_DELAY_INTERVAL);
+}
+
+static int hiperdispatch_ctl_handler(const struct ctl_table *ctl, int write,
+ void *buffer, size_t *lenp, loff_t *ppos)
+{
+ int hiperdispatch;
+ int rc;
+ struct ctl_table ctl_entry = {
+ .procname = ctl->procname,
+ .data = &hiperdispatch,
+ .maxlen = sizeof(int),
+ .extra1 = SYSCTL_ZERO,
+ .extra2 = SYSCTL_ONE,
+ };
+
+ hiperdispatch = hd_enabled;
+ rc = proc_douintvec_minmax(&ctl_entry, write, buffer, lenp, ppos);
+ if (rc < 0 || !write)
+ return rc;
+ mutex_lock(&smp_cpu_state_mutex);
+ if (hd_set_hiperdispatch_mode(hiperdispatch))
+ topology_schedule_update();
+ mutex_unlock(&smp_cpu_state_mutex);
+ return 0;
+}
+
+static struct ctl_table hiperdispatch_ctl_table[] = {
+ {
+ .procname = "hiperdispatch",
+ .mode = 0644,
+ .proc_handler = hiperdispatch_ctl_handler,
+ },
+};
+
+static ssize_t hd_steal_threshold_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ return sysfs_emit(buf, "%u\n", hd_steal_threshold);
+}
+
+static ssize_t hd_steal_threshold_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf,
+ size_t count)
+{
+ unsigned int val;
+ int rc;
+
+ rc = kstrtouint(buf, 0, &val);
+ if (rc)
+ return rc;
+ if (val > 100)
+ return -ERANGE;
+ hd_steal_threshold = val;
+ return count;
+}
+
+static DEVICE_ATTR_RW(hd_steal_threshold);
+
+static ssize_t hd_delay_factor_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ return sysfs_emit(buf, "%u\n", hd_delay_factor);
+}
+
+static ssize_t hd_delay_factor_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf,
+ size_t count)
+{
+ unsigned int val;
+ int rc;
+
+ rc = kstrtouint(buf, 0, &val);
+ if (rc)
+ return rc;
+ if (!val)
+ return -ERANGE;
+ hd_delay_factor = val;
+ return count;
+}
+
+static DEVICE_ATTR_RW(hd_delay_factor);
+
+static struct attribute *hd_attrs[] = {
+ &dev_attr_hd_steal_threshold.attr,
+ &dev_attr_hd_delay_factor.attr,
+ NULL,
+};
+
+static const struct attribute_group hd_attr_group = {
+ .name = "hiperdispatch",
+ .attrs = hd_attrs,
+};
+
+static int hd_greedy_time_get(void *unused, u64 *val)
+{
+ mutex_lock(&hd_counter_mutex);
+ hd_update_times();
+ *val = hd_high_time;
+ mutex_unlock(&hd_counter_mutex);
+ return 0;
+}
+
+DEFINE_SIMPLE_ATTRIBUTE(hd_greedy_time_fops, hd_greedy_time_get, NULL, "%llu\n");
+
+static int hd_conservative_time_get(void *unused, u64 *val)
+{
+ mutex_lock(&hd_counter_mutex);
+ hd_update_times();
+ *val = hd_low_time;
+ mutex_unlock(&hd_counter_mutex);
+ return 0;
+}
+
+DEFINE_SIMPLE_ATTRIBUTE(hd_conservative_time_fops, hd_conservative_time_get, NULL, "%llu\n");
+
+static int hd_adjustment_count_get(void *unused, u64 *val)
+{
+ *val = atomic64_read(&hd_adjustments);
+ return 0;
+}
+
+DEFINE_SIMPLE_ATTRIBUTE(hd_adjustments_fops, hd_adjustment_count_get, NULL, "%llu\n");
+
+static void __init hd_create_debugfs_counters(void)
+{
+ struct dentry *dir;
+
+ dir = debugfs_create_dir("hiperdispatch", arch_debugfs_dir);
+ debugfs_create_file("conservative_time_ms", 0400, dir, NULL, &hd_conservative_time_fops);
+ debugfs_create_file("greedy_time_ms", 0400, dir, NULL, &hd_greedy_time_fops);
+ debugfs_create_file("adjustment_count", 0400, dir, NULL, &hd_adjustments_fops);
+}
+
+static void __init hd_create_attributes(void)
+{
+ struct device *dev;
+
+ dev = bus_get_dev_root(&cpu_subsys);
+ if (!dev)
+ return;
+ if (sysfs_create_group(&dev->kobj, &hd_attr_group))
+ pr_warn("Unable to create hiperdispatch attribute group\n");
+ put_device(dev);
+}
+
+static int __init hd_init(void)
+{
+ if (IS_ENABLED(CONFIG_HIPERDISPATCH_ON)) {
+ hd_set_hiperdispatch_mode(1);
+ topology_schedule_update();
+ }
+ if (!register_sysctl("s390", hiperdispatch_ctl_table))
+ pr_warn("Failed to register s390.hiperdispatch sysctl attribute\n");
+ hd_create_debugfs_counters();
+ hd_create_attributes();
+ return 0;
+}
+late_initcall(hd_init);
diff --git a/arch/s390/kernel/irq.c b/arch/s390/kernel/irq.c
index 1af5a08d72ab..2639a3d12736 100644
--- a/arch/s390/kernel/irq.c
+++ b/arch/s390/kernel/irq.c
@@ -76,6 +76,7 @@ static const struct irq_class irqclass_sub_desc[] = {
{.irq = IRQEXT_CMS, .name = "CMS", .desc = "[EXT] CPU-Measurement: Sampling"},
{.irq = IRQEXT_CMC, .name = "CMC", .desc = "[EXT] CPU-Measurement: Counter"},
{.irq = IRQEXT_FTP, .name = "FTP", .desc = "[EXT] HMC FTP Service"},
+ {.irq = IRQEXT_WTI, .name = "WTI", .desc = "[EXT] Warning Track"},
{.irq = IRQIO_CIO, .name = "CIO", .desc = "[I/O] Common I/O Layer Interrupt"},
{.irq = IRQIO_DAS, .name = "DAS", .desc = "[I/O] DASD"},
{.irq = IRQIO_C15, .name = "C15", .desc = "[I/O] 3215"},
diff --git a/arch/s390/kernel/kprobes.c b/arch/s390/kernel/kprobes.c
index 05c83505e979..6295faf0987d 100644
--- a/arch/s390/kernel/kprobes.c
+++ b/arch/s390/kernel/kprobes.c
@@ -21,6 +21,7 @@
#include <linux/hardirq.h>
#include <linux/ftrace.h>
#include <linux/execmem.h>
+#include <asm/text-patching.h>
#include <asm/set_memory.h>
#include <asm/sections.h>
#include <asm/dis.h>
@@ -152,7 +153,12 @@ void arch_arm_kprobe(struct kprobe *p)
{
struct swap_insn_args args = {.p = p, .arm_kprobe = 1};
- stop_machine_cpuslocked(swap_instruction, &args, NULL);
+ if (MACHINE_HAS_SEQ_INSN) {
+ swap_instruction(&args);
+ text_poke_sync();
+ } else {
+ stop_machine_cpuslocked(swap_instruction, &args, NULL);
+ }
}
NOKPROBE_SYMBOL(arch_arm_kprobe);
@@ -160,7 +166,12 @@ void arch_disarm_kprobe(struct kprobe *p)
{
struct swap_insn_args args = {.p = p, .arm_kprobe = 0};
- stop_machine_cpuslocked(swap_instruction, &args, NULL);
+ if (MACHINE_HAS_SEQ_INSN) {
+ swap_instruction(&args);
+ text_poke_sync();
+ } else {
+ stop_machine_cpuslocked(swap_instruction, &args, NULL);
+ }
}
NOKPROBE_SYMBOL(arch_disarm_kprobe);
diff --git a/arch/s390/kernel/mcount.S b/arch/s390/kernel/mcount.S
index ae4d4fd9afcd..7e267ef63a7f 100644
--- a/arch/s390/kernel/mcount.S
+++ b/arch/s390/kernel/mcount.S
@@ -9,6 +9,7 @@
#include <asm/ftrace.h>
#include <asm/nospec-insn.h>
#include <asm/ptrace.h>
+#include <asm/march.h>
#define STACK_FRAME_SIZE_PTREGS (STACK_FRAME_OVERHEAD + __PT_SIZE)
#define STACK_PTREGS (STACK_FRAME_OVERHEAD)
@@ -88,7 +89,7 @@ SYM_CODE_START(ftrace_caller)
SYM_CODE_END(ftrace_caller)
SYM_CODE_START(ftrace_common)
-#ifdef CONFIG_HAVE_MARCH_Z196_FEATURES
+#ifdef MARCH_HAS_Z196_FEATURES
aghik %r2,%r0,-MCOUNT_INSN_SIZE
lgrl %r4,function_trace_op
lgrl %r1,ftrace_func
@@ -115,7 +116,7 @@ SYM_INNER_LABEL(ftrace_graph_caller, SYM_L_GLOBAL)
.Lftrace_graph_caller_end:
#endif
lg %r0,(STACK_FREGS_PTREGS_PSW+8)(%r15)
-#ifdef CONFIG_HAVE_MARCH_Z196_FEATURES
+#ifdef MARCH_HAS_Z196_FEATURES
ltg %r1,STACK_FREGS_PTREGS_ORIG_GPR2(%r15)
locgrz %r1,%r0
#else
diff --git a/arch/s390/kernel/numa.c b/arch/s390/kernel/numa.c
index 23ab9f02f278..ddc1448ea2e1 100644
--- a/arch/s390/kernel/numa.c
+++ b/arch/s390/kernel/numa.c
@@ -14,9 +14,6 @@
#include <linux/node.h>
#include <asm/numa.h>
-struct pglist_data *node_data[MAX_NUMNODES];
-EXPORT_SYMBOL(node_data);
-
void __init numa_setup(void)
{
int nid;
diff --git a/arch/s390/kernel/perf_cpum_cf.c b/arch/s390/kernel/perf_cpum_cf.c
index 6968be98af11..e2e0aa463fbd 100644
--- a/arch/s390/kernel/perf_cpum_cf.c
+++ b/arch/s390/kernel/perf_cpum_cf.c
@@ -22,6 +22,10 @@
#include <asm/hwctrset.h>
#include <asm/debug.h>
+/* Perf PMU definitions for the counter facility */
+#define PERF_CPUM_CF_MAX_CTR 0xffffUL /* Max ctr for ECCTR */
+#define PERF_EVENT_CPUM_CF_DIAG 0xBC000UL /* Event: Counter sets */
+
enum cpumf_ctr_set {
CPUMF_CTR_SET_BASIC = 0, /* Basic Counter Set */
CPUMF_CTR_SET_USER = 1, /* Problem-State Counter Set */
@@ -1694,7 +1698,6 @@ static const struct file_operations cfset_fops = {
.release = cfset_release,
.unlocked_ioctl = cfset_ioctl,
.compat_ioctl = cfset_ioctl,
- .llseek = no_llseek
};
static struct miscdevice cfset_dev = {
diff --git a/arch/s390/kernel/perf_cpum_sf.c b/arch/s390/kernel/perf_cpum_sf.c
index 736c1d9632dd..5b765e3ccf0c 100644
--- a/arch/s390/kernel/perf_cpum_sf.c
+++ b/arch/s390/kernel/perf_cpum_sf.c
@@ -24,6 +24,22 @@
#include <asm/timex.h>
#include <linux/io.h>
+/* Perf PMU definitions for the sampling facility */
+#define PERF_CPUM_SF_MAX_CTR 2
+#define PERF_EVENT_CPUM_SF 0xB0000UL /* Event: Basic-sampling */
+#define PERF_EVENT_CPUM_SF_DIAG 0xBD000UL /* Event: Combined-sampling */
+#define PERF_CPUM_SF_BASIC_MODE 0x0001 /* Basic-sampling flag */
+#define PERF_CPUM_SF_DIAG_MODE 0x0002 /* Diagnostic-sampling flag */
+#define PERF_CPUM_SF_FREQ_MODE 0x0008 /* Sampling with frequency */
+
+#define OVERFLOW_REG(hwc) ((hwc)->extra_reg.config)
+#define SFB_ALLOC_REG(hwc) ((hwc)->extra_reg.alloc)
+#define TEAR_REG(hwc) ((hwc)->last_tag)
+#define SAMPL_RATE(hwc) ((hwc)->event_base)
+#define SAMPL_FLAGS(hwc) ((hwc)->config_base)
+#define SAMPL_DIAG_MODE(hwc) (SAMPL_FLAGS(hwc) & PERF_CPUM_SF_DIAG_MODE)
+#define SAMPL_FREQ_MODE(hwc) (SAMPL_FLAGS(hwc) & PERF_CPUM_SF_FREQ_MODE)
+
/* Minimum number of sample-data-block-tables:
* At least one table is required for the sampling buffer structure.
* A single table contains up to 511 pointers to sample-data-blocks.
@@ -113,17 +129,6 @@ static inline unsigned long sample_rate_to_freq(struct hws_qsi_info_block *qsi,
return USEC_PER_SEC * qsi->cpu_speed / rate;
}
-/* Return TOD timestamp contained in an trailer entry */
-static inline unsigned long long trailer_timestamp(struct hws_trailer_entry *te)
-{
- /* TOD in STCKE format */
- if (te->header.t)
- return *((unsigned long long *)&te->timestamp[1]);
-
- /* TOD in STCK format */
- return *((unsigned long long *)&te->timestamp[0]);
-}
-
/* Return pointer to trailer entry of an sample data block */
static inline struct hws_trailer_entry *trailer_entry_ptr(unsigned long v)
{
@@ -154,12 +159,12 @@ static inline unsigned long *get_next_sdbt(unsigned long *s)
/*
* sf_disable() - Switch off sampling facility
*/
-static int sf_disable(void)
+static void sf_disable(void)
{
struct hws_lsctl_request_block sreq;
memset(&sreq, 0, sizeof(sreq));
- return lsctl(&sreq);
+ lsctl(&sreq);
}
/*
@@ -208,8 +213,6 @@ static void free_sampling_buffer(struct sf_buffer *sfb)
}
}
- debug_sprintf_event(sfdbg, 5, "%s: freed sdbt %#lx\n", __func__,
- (unsigned long)sfb->sdbt);
memset(sfb, 0, sizeof(*sfb));
}
@@ -265,10 +268,8 @@ static int realloc_sampling_buffer(struct sf_buffer *sfb,
* the sampling buffer origin.
*/
if (sfb->sdbt != get_next_sdbt(tail)) {
- debug_sprintf_event(sfdbg, 3, "%s: "
- "sampling buffer is not linked: origin %#lx"
- " tail %#lx\n", __func__,
- (unsigned long)sfb->sdbt,
+ debug_sprintf_event(sfdbg, 3, "%s buffer not linked origin %#lx tail %#lx\n",
+ __func__, (unsigned long)sfb->sdbt,
(unsigned long)tail);
return -EINVAL;
}
@@ -318,9 +319,6 @@ static int realloc_sampling_buffer(struct sf_buffer *sfb,
*tail = virt_to_phys(sfb->sdbt) + 1;
sfb->tail = tail;
- debug_sprintf_event(sfdbg, 4, "%s: new buffer"
- " settings: sdbt %lu sdb %lu\n", __func__,
- sfb->num_sdbt, sfb->num_sdb);
return rc;
}
@@ -357,15 +355,8 @@ static int alloc_sampling_buffer(struct sf_buffer *sfb, unsigned long num_sdb)
/* Allocate requested number of sample-data-blocks */
rc = realloc_sampling_buffer(sfb, num_sdb, GFP_KERNEL);
- if (rc) {
+ if (rc)
free_sampling_buffer(sfb);
- debug_sprintf_event(sfdbg, 4, "%s: "
- "realloc_sampling_buffer failed with rc %i\n",
- __func__, rc);
- } else
- debug_sprintf_event(sfdbg, 4,
- "%s: tear %#lx dear %#lx\n", __func__,
- (unsigned long)sfb->sdbt, (unsigned long)*sfb->sdbt);
return rc;
}
@@ -377,8 +368,8 @@ static void sfb_set_limits(unsigned long min, unsigned long max)
CPUM_SF_MAX_SDB = max;
memset(&si, 0, sizeof(si));
- if (!qsi(&si))
- CPUM_SF_SDB_DIAG_FACTOR = DIV_ROUND_UP(si.dsdes, si.bsdes);
+ qsi(&si);
+ CPUM_SF_SDB_DIAG_FACTOR = DIV_ROUND_UP(si.dsdes, si.bsdes);
}
static unsigned long sfb_max_limit(struct hw_perf_event *hwc)
@@ -397,12 +388,6 @@ static unsigned long sfb_pending_allocs(struct sf_buffer *sfb,
return 0;
}
-static int sfb_has_pending_allocs(struct sf_buffer *sfb,
- struct hw_perf_event *hwc)
-{
- return sfb_pending_allocs(sfb, hwc) > 0;
-}
-
static void sfb_account_allocs(unsigned long num, struct hw_perf_event *hwc)
{
/* Limit the number of SDBs to not exceed the maximum */
@@ -426,7 +411,6 @@ static void deallocate_buffers(struct cpu_hw_sf *cpuhw)
static int allocate_buffers(struct cpu_hw_sf *cpuhw, struct hw_perf_event *hwc)
{
unsigned long n_sdb, freq;
- size_t sample_size;
/* Calculate sampling buffers using 4K pages
*
@@ -457,7 +441,6 @@ static int allocate_buffers(struct cpu_hw_sf *cpuhw, struct hw_perf_event *hwc)
* ensure a minimum of CPUM_SF_MIN_SDBT (one table can manage up
* to 511 SDBs).
*/
- sample_size = sizeof(struct hws_basic_entry);
freq = sample_rate_to_freq(&cpuhw->qsi, SAMPL_RATE(hwc));
n_sdb = CPUM_SF_MIN_SDB + DIV_ROUND_UP(freq, 10000);
@@ -473,12 +456,6 @@ static int allocate_buffers(struct cpu_hw_sf *cpuhw, struct hw_perf_event *hwc)
if (sf_buffer_available(cpuhw))
return 0;
- debug_sprintf_event(sfdbg, 3,
- "%s: rate %lu f %lu sdb %lu/%lu"
- " sample_size %lu cpuhw %p\n", __func__,
- SAMPL_RATE(hwc), freq, n_sdb, sfb_max_limit(hwc),
- sample_size, cpuhw);
-
return alloc_sampling_buffer(&cpuhw->sfb,
sfb_pending_allocs(&cpuhw->sfb, hwc));
}
@@ -535,8 +512,6 @@ static void sfb_account_overflows(struct cpu_hw_sf *cpuhw,
if (num)
sfb_account_allocs(num, hwc);
- debug_sprintf_event(sfdbg, 5, "%s: overflow %llu ratio %lu num %lu\n",
- __func__, OVERFLOW_REG(hwc), ratio, num);
OVERFLOW_REG(hwc) = 0;
}
@@ -554,13 +529,11 @@ static void sfb_account_overflows(struct cpu_hw_sf *cpuhw,
static void extend_sampling_buffer(struct sf_buffer *sfb,
struct hw_perf_event *hwc)
{
- unsigned long num, num_old;
- int rc;
+ unsigned long num;
num = sfb_pending_allocs(sfb, hwc);
if (!num)
return;
- num_old = sfb->num_sdb;
/* Disable the sampling facility to reset any states and also
* clear pending measurement alerts.
@@ -572,51 +545,33 @@ static void extend_sampling_buffer(struct sf_buffer *sfb,
* called by perf. Because this is a reallocation, it is fine if the
* new SDB-request cannot be satisfied immediately.
*/
- rc = realloc_sampling_buffer(sfb, num, GFP_ATOMIC);
- if (rc)
- debug_sprintf_event(sfdbg, 5, "%s: realloc failed with rc %i\n",
- __func__, rc);
-
- if (sfb_has_pending_allocs(sfb, hwc))
- debug_sprintf_event(sfdbg, 5, "%s: "
- "req %lu alloc %lu remaining %lu\n",
- __func__, num, sfb->num_sdb - num_old,
- sfb_pending_allocs(sfb, hwc));
+ realloc_sampling_buffer(sfb, num, GFP_ATOMIC);
}
/* Number of perf events counting hardware events */
-static atomic_t num_events;
+static refcount_t num_events;
/* Used to avoid races in calling reserve/release_cpumf_hardware */
static DEFINE_MUTEX(pmc_reserve_mutex);
#define PMC_INIT 0
#define PMC_RELEASE 1
-#define PMC_FAILURE 2
static void setup_pmc_cpu(void *flags)
{
- struct cpu_hw_sf *cpusf = this_cpu_ptr(&cpu_hw_sf);
- int err = 0;
+ struct cpu_hw_sf *cpuhw = this_cpu_ptr(&cpu_hw_sf);
switch (*((int *)flags)) {
case PMC_INIT:
- memset(cpusf, 0, sizeof(*cpusf));
- err = qsi(&cpusf->qsi);
- if (err)
- break;
- cpusf->flags |= PMU_F_RESERVED;
- err = sf_disable();
+ memset(cpuhw, 0, sizeof(*cpuhw));
+ qsi(&cpuhw->qsi);
+ cpuhw->flags |= PMU_F_RESERVED;
+ sf_disable();
break;
case PMC_RELEASE:
- cpusf->flags &= ~PMU_F_RESERVED;
- err = sf_disable();
- if (!err)
- deallocate_buffers(cpusf);
+ cpuhw->flags &= ~PMU_F_RESERVED;
+ sf_disable();
+ deallocate_buffers(cpuhw);
break;
}
- if (err) {
- *((int *)flags) |= PMC_FAILURE;
- pr_err("Switching off the sampling facility failed with rc %i\n", err);
- }
}
static void release_pmc_hardware(void)
@@ -627,27 +582,19 @@ static void release_pmc_hardware(void)
on_each_cpu(setup_pmc_cpu, &flags, 1);
}
-static int reserve_pmc_hardware(void)
+static void reserve_pmc_hardware(void)
{
int flags = PMC_INIT;
on_each_cpu(setup_pmc_cpu, &flags, 1);
- if (flags & PMC_FAILURE) {
- release_pmc_hardware();
- return -ENODEV;
- }
irq_subclass_register(IRQ_SUBCLASS_MEASUREMENT_ALERT);
-
- return 0;
}
static void hw_perf_event_destroy(struct perf_event *event)
{
/* Release PMC if this is the last perf event */
- if (!atomic_add_unless(&num_events, -1, 1)) {
- mutex_lock(&pmc_reserve_mutex);
- if (atomic_dec_return(&num_events) == 0)
- release_pmc_hardware();
+ if (refcount_dec_and_mutex_lock(&num_events, &pmc_reserve_mutex)) {
+ release_pmc_hardware();
mutex_unlock(&pmc_reserve_mutex);
}
}
@@ -751,9 +698,6 @@ static unsigned long getrate(bool freq, unsigned long sample,
*/
if (sample_rate_to_freq(si, rate) >
sysctl_perf_event_sample_rate) {
- debug_sprintf_event(sfdbg, 1, "%s: "
- "Sampling rate exceeds maximum "
- "perf sample rate\n", __func__);
rate = 0;
}
}
@@ -798,9 +742,6 @@ static int __hw_perf_event_init_rate(struct perf_event *event,
attr->sample_period = rate;
SAMPL_RATE(hwc) = rate;
hw_init_period(hwc, SAMPL_RATE(hwc));
- debug_sprintf_event(sfdbg, 4, "%s: cpu %d period %#llx freq %d,%#lx\n",
- __func__, event->cpu, event->attr.sample_period,
- event->attr.freq, SAMPLE_FREQ_MODE(hwc));
return 0;
}
@@ -810,23 +751,17 @@ static int __hw_perf_event_init(struct perf_event *event)
struct hws_qsi_info_block si;
struct perf_event_attr *attr = &event->attr;
struct hw_perf_event *hwc = &event->hw;
- int cpu, err;
+ int cpu, err = 0;
/* Reserve CPU-measurement sampling facility */
- err = 0;
- if (!atomic_inc_not_zero(&num_events)) {
- mutex_lock(&pmc_reserve_mutex);
- if (atomic_read(&num_events) == 0 && reserve_pmc_hardware())
- err = -EBUSY;
- else
- atomic_inc(&num_events);
- mutex_unlock(&pmc_reserve_mutex);
+ mutex_lock(&pmc_reserve_mutex);
+ if (!refcount_inc_not_zero(&num_events)) {
+ reserve_pmc_hardware();
+ refcount_set(&num_events, 1);
}
+ mutex_unlock(&pmc_reserve_mutex);
event->destroy = hw_perf_event_destroy;
- if (err)
- goto out;
-
/* Access per-CPU sampling information (query sampling info) */
/*
* The event->cpu value can be -1 to count on every CPU, for example,
@@ -838,9 +773,9 @@ static int __hw_perf_event_init(struct perf_event *event)
*/
memset(&si, 0, sizeof(si));
cpuhw = NULL;
- if (event->cpu == -1)
+ if (event->cpu == -1) {
qsi(&si);
- else {
+ } else {
/* Event is pinned to a particular CPU, retrieve the per-CPU
* sampling structure for accessing the CPU-specific QSI.
*/
@@ -881,10 +816,6 @@ static int __hw_perf_event_init(struct perf_event *event)
if (err)
goto out;
- /* Initialize sample data overflow accounting */
- hwc->extra_reg.reg = REG_OVERFLOW;
- OVERFLOW_REG(hwc) = 0;
-
/* Use AUX buffer. No need to allocate it by ourself */
if (attr->config == PERF_EVENT_CPUM_SF_DIAG)
return 0;
@@ -1007,7 +938,7 @@ static void cpumsf_pmu_enable(struct pmu *pmu)
extend_sampling_buffer(&cpuhw->sfb, hwc);
}
/* Rate may be adjusted with ioctl() */
- cpuhw->lsctl.interval = SAMPL_RATE(&cpuhw->event->hw);
+ cpuhw->lsctl.interval = SAMPL_RATE(hwc);
}
/* (Re)enable the PMU and sampling facility */
@@ -1023,12 +954,6 @@ static void cpumsf_pmu_enable(struct pmu *pmu)
/* Load current program parameter */
lpp(&get_lowcore()->lpp);
-
- debug_sprintf_event(sfdbg, 6, "%s: es %i cs %i ed %i cd %i "
- "interval %#lx tear %#lx dear %#lx\n", __func__,
- cpuhw->lsctl.es, cpuhw->lsctl.cs, cpuhw->lsctl.ed,
- cpuhw->lsctl.cd, cpuhw->lsctl.interval,
- cpuhw->lsctl.tear, cpuhw->lsctl.dear);
}
static void cpumsf_pmu_disable(struct pmu *pmu)
@@ -1055,21 +980,18 @@ static void cpumsf_pmu_disable(struct pmu *pmu)
return;
}
- /* Save state of TEAR and DEAR register contents */
- err = qsi(&si);
- if (!err) {
- /* TEAR/DEAR values are valid only if the sampling facility is
- * enabled. Note that cpumsf_pmu_disable() might be called even
- * for a disabled sampling facility because cpumsf_pmu_enable()
- * controls the enable/disable state.
- */
- if (si.es) {
- cpuhw->lsctl.tear = si.tear;
- cpuhw->lsctl.dear = si.dear;
- }
- } else
- debug_sprintf_event(sfdbg, 3, "%s: qsi() failed with err %i\n",
- __func__, err);
+ /*
+ * Save state of TEAR and DEAR register contents.
+ * TEAR/DEAR values are valid only if the sampling facility is
+ * enabled. Note that cpumsf_pmu_disable() might be called even
+ * for a disabled sampling facility because cpumsf_pmu_enable()
+ * controls the enable/disable state.
+ */
+ qsi(&si);
+ if (si.es) {
+ cpuhw->lsctl.tear = si.tear;
+ cpuhw->lsctl.dear = si.dear;
+ }
cpuhw->flags &= ~PMU_F_ENABLED;
}
@@ -1235,11 +1157,6 @@ static void hw_collect_samples(struct perf_event *event, unsigned long *sdbt,
/* Count discarded samples */
*overflow += 1;
} else {
- debug_sprintf_event(sfdbg, 4,
- "%s: Found unknown"
- " sampling data entry: te->f %i"
- " basic.def %#4x (%p)\n", __func__,
- te->header.f, sample->def, sample);
/* Sample slot is not yet written or other record.
*
* This condition can occur if the buffer was reused
@@ -1284,7 +1201,7 @@ static void hw_perf_event_update(struct perf_event *event, int flush_all)
* AUX buffer is used when in diagnostic sampling mode.
* No perf events/samples are created.
*/
- if (SAMPL_DIAG_MODE(&event->hw))
+ if (SAMPL_DIAG_MODE(hwc))
return;
sdbt = (unsigned long *)TEAR_REG(hwc);
@@ -1309,13 +1226,6 @@ static void hw_perf_event_update(struct perf_event *event, int flush_all)
*/
sampl_overflow += te->header.overflow;
- /* Timestamps are valid for full sample-data-blocks only */
- debug_sprintf_event(sfdbg, 6, "%s: sdbt %#lx/%#lx "
- "overflow %llu timestamp %#llx\n",
- __func__, sdb, (unsigned long)sdbt,
- te->header.overflow,
- (te->header.f) ? trailer_timestamp(te) : 0ULL);
-
/* Collect all samples from a single sample-data-block and
* flag if an (perf) event overflow happened. If so, the PMU
* is stopped and remaining samples will be discarded.
@@ -1340,7 +1250,7 @@ static void hw_perf_event_update(struct perf_event *event, int flush_all)
sdbt = get_next_sdbt(sdbt);
/* Update event hardware registers */
- TEAR_REG(hwc) = (unsigned long) sdbt;
+ TEAR_REG(hwc) = (unsigned long)sdbt;
/* Stop processing sample-data if all samples of the current
* sample-data-block were flushed even if it was not full.
@@ -1362,19 +1272,8 @@ static void hw_perf_event_update(struct perf_event *event, int flush_all)
* are dropped.
* Slightly increase the interval to avoid hitting this limit.
*/
- if (event_overflow) {
+ if (event_overflow)
SAMPL_RATE(hwc) += DIV_ROUND_UP(SAMPL_RATE(hwc), 10);
- debug_sprintf_event(sfdbg, 1, "%s: rate adjustment %ld\n",
- __func__,
- DIV_ROUND_UP(SAMPL_RATE(hwc), 10));
- }
-
- if (sampl_overflow || event_overflow)
- debug_sprintf_event(sfdbg, 4, "%s: "
- "overflows: sample %llu event %llu"
- " total %llu num_sdb %llu\n",
- __func__, sampl_overflow, event_overflow,
- OVERFLOW_REG(hwc), num_sdb);
}
static inline unsigned long aux_sdb_index(struct aux_buffer *aux,
@@ -1442,9 +1341,6 @@ static void aux_output_end(struct perf_output_handle *handle)
/* Remove alert indicators in the buffer */
te = aux_sdb_trailer(aux, aux->alert_mark);
te->header.a = 0;
-
- debug_sprintf_event(sfdbg, 6, "%s: SDBs %ld range %ld head %ld\n",
- __func__, i, range_scan, aux->head);
}
/*
@@ -1463,7 +1359,7 @@ static int aux_output_begin(struct perf_output_handle *handle,
unsigned long range, i, range_scan, idx, head, base, offset;
struct hws_trailer_entry *te;
- if (WARN_ON_ONCE(handle->head & ~PAGE_MASK))
+ if (handle->head & ~PAGE_MASK)
return -EINVAL;
aux->head = handle->head >> PAGE_SHIFT;
@@ -1475,10 +1371,6 @@ static int aux_output_begin(struct perf_output_handle *handle,
* SDBs between aux->head and aux->empty_mark are already ready
* for new data. range_scan is num of SDBs not within them.
*/
- debug_sprintf_event(sfdbg, 6,
- "%s: range %ld head %ld alert %ld empty %ld\n",
- __func__, range, aux->head, aux->alert_mark,
- aux->empty_mark);
if (range > aux_sdb_num_empty(aux)) {
range_scan = range - aux_sdb_num_empty(aux);
idx = aux->empty_mark + 1;
@@ -1504,12 +1396,6 @@ static int aux_output_begin(struct perf_output_handle *handle,
cpuhw->lsctl.tear = virt_to_phys((void *)base) + offset * sizeof(unsigned long);
cpuhw->lsctl.dear = virt_to_phys((void *)aux->sdb_index[head]);
- debug_sprintf_event(sfdbg, 6, "%s: head %ld alert %ld empty %ld "
- "index %ld tear %#lx dear %#lx\n", __func__,
- aux->head, aux->alert_mark, aux->empty_mark,
- head / CPUM_SF_SDB_PER_TABLE,
- cpuhw->lsctl.tear, cpuhw->lsctl.dear);
-
return 0;
}
@@ -1571,14 +1457,11 @@ static bool aux_set_alert(struct aux_buffer *aux, unsigned long alert_index,
static bool aux_reset_buffer(struct aux_buffer *aux, unsigned long range,
unsigned long long *overflow)
{
- unsigned long i, range_scan, idx, idx_old;
union hws_trailer_header old, prev, new;
+ unsigned long i, range_scan, idx;
unsigned long long orig_overflow;
struct hws_trailer_entry *te;
- debug_sprintf_event(sfdbg, 6, "%s: range %ld head %ld alert %ld "
- "empty %ld\n", __func__, range, aux->head,
- aux->alert_mark, aux->empty_mark);
if (range <= aux_sdb_num_empty(aux))
/*
* No need to scan. All SDBs in range are marked as empty.
@@ -1601,7 +1484,7 @@ static bool aux_reset_buffer(struct aux_buffer *aux, unsigned long range,
* indicator fall into this range, set it.
*/
range_scan = range - aux_sdb_num_empty(aux);
- idx_old = idx = aux->empty_mark + 1;
+ idx = aux->empty_mark + 1;
for (i = 0; i < range_scan; i++, idx++) {
te = aux_sdb_trailer(aux, idx);
prev.val = READ_ONCE_ALIGNED_128(te->header.val);
@@ -1623,9 +1506,6 @@ static bool aux_reset_buffer(struct aux_buffer *aux, unsigned long range,
/* Update empty_mark to new position */
aux->empty_mark = aux->head + range - 1;
- debug_sprintf_event(sfdbg, 6, "%s: range_scan %ld idx %ld..%ld "
- "empty %ld\n", __func__, range_scan, idx_old,
- idx - 1, aux->empty_mark);
return true;
}
@@ -1642,12 +1522,12 @@ static void hw_collect_aux(struct cpu_hw_sf *cpuhw)
unsigned long num_sdb;
aux = perf_get_aux(handle);
- if (WARN_ON_ONCE(!aux))
+ if (!aux)
return;
/* Inform user space new data arrived */
size = aux_sdb_num_alert(aux) << PAGE_SHIFT;
- debug_sprintf_event(sfdbg, 6, "%s: #alert %ld\n", __func__,
+ debug_sprintf_event(sfdbg, 6, "%s #alert %ld\n", __func__,
size >> PAGE_SHIFT);
perf_aux_output_end(handle, size);
@@ -1661,7 +1541,7 @@ static void hw_collect_aux(struct cpu_hw_sf *cpuhw)
num_sdb);
break;
}
- if (WARN_ON_ONCE(!aux))
+ if (!aux)
return;
/* Update head and alert_mark to new position */
@@ -1681,23 +1561,11 @@ static void hw_collect_aux(struct cpu_hw_sf *cpuhw)
perf_aux_output_end(&cpuhw->handle, size);
pr_err("Sample data caused the AUX buffer with %lu "
"pages to overflow\n", aux->sfb.num_sdb);
- debug_sprintf_event(sfdbg, 1, "%s: head %ld range %ld "
- "overflow %lld\n", __func__,
- aux->head, range, overflow);
} else {
size = aux_sdb_num_alert(aux) << PAGE_SHIFT;
perf_aux_output_end(&cpuhw->handle, size);
- debug_sprintf_event(sfdbg, 6, "%s: head %ld alert %ld "
- "already full, try another\n",
- __func__,
- aux->head, aux->alert_mark);
}
}
-
- if (done)
- debug_sprintf_event(sfdbg, 6, "%s: head %ld alert %ld "
- "empty %ld\n", __func__, aux->head,
- aux->alert_mark, aux->empty_mark);
}
/*
@@ -1719,8 +1587,6 @@ static void aux_buffer_free(void *data)
kfree(aux->sdbt_index);
kfree(aux->sdb_index);
kfree(aux);
-
- debug_sprintf_event(sfdbg, 4, "%s: SDBTs %lu\n", __func__, num_sdbt);
}
static void aux_sdb_init(unsigned long sdb)
@@ -1828,9 +1694,6 @@ static void *aux_buffer_setup(struct perf_event *event, void **pages,
*/
aux->empty_mark = sfb->num_sdb - 1;
- debug_sprintf_event(sfdbg, 4, "%s: SDBTs %lu SDBs %lu\n", __func__,
- sfb->num_sdbt, sfb->num_sdb);
-
return aux;
no_sdbt:
@@ -1863,8 +1726,7 @@ static int cpumsf_pmu_check_period(struct perf_event *event, u64 value)
memset(&si, 0, sizeof(si));
if (event->cpu == -1) {
- if (qsi(&si))
- return -ENODEV;
+ qsi(&si);
} else {
/* Event is pinned to a particular CPU, retrieve the per-CPU
* sampling structure for accessing the CPU-specific QSI.
@@ -1874,7 +1736,7 @@ static int cpumsf_pmu_check_period(struct perf_event *event, u64 value)
si = cpuhw->qsi;
}
- do_freq = !!SAMPLE_FREQ_MODE(&event->hw);
+ do_freq = !!SAMPL_FREQ_MODE(&event->hw);
rate = getrate(do_freq, value, &si);
if (!rate)
return -EINVAL;
@@ -1882,10 +1744,6 @@ static int cpumsf_pmu_check_period(struct perf_event *event, u64 value)
event->attr.sample_period = rate;
SAMPL_RATE(&event->hw) = rate;
hw_init_period(&event->hw, SAMPL_RATE(&event->hw));
- debug_sprintf_event(sfdbg, 4, "%s:"
- " cpu %d value %#llx period %#llx freq %d\n",
- __func__, event->cpu, value,
- event->attr.sample_period, do_freq);
return 0;
}
@@ -1896,12 +1754,8 @@ static void cpumsf_pmu_start(struct perf_event *event, int flags)
{
struct cpu_hw_sf *cpuhw = this_cpu_ptr(&cpu_hw_sf);
- if (WARN_ON_ONCE(!(event->hw.state & PERF_HES_STOPPED)))
+ if (!(event->hw.state & PERF_HES_STOPPED))
return;
-
- if (flags & PERF_EF_RELOAD)
- WARN_ON_ONCE(!(event->hw.state & PERF_HES_UPTODATE));
-
perf_pmu_disable(event->pmu);
event->hw.state = 0;
cpuhw->lsctl.cs = 1;
@@ -1936,7 +1790,7 @@ static int cpumsf_pmu_add(struct perf_event *event, int flags)
{
struct cpu_hw_sf *cpuhw = this_cpu_ptr(&cpu_hw_sf);
struct aux_buffer *aux;
- int err;
+ int err = 0;
if (cpuhw->flags & PMU_F_IN_USE)
return -EAGAIN;
@@ -1944,7 +1798,6 @@ static int cpumsf_pmu_add(struct perf_event *event, int flags)
if (!SAMPL_DIAG_MODE(&event->hw) && !cpuhw->sfb.sdbt)
return -EINVAL;
- err = 0;
perf_pmu_disable(event->pmu);
event->hw.state = PERF_HES_UPTODATE | PERF_HES_STOPPED;
@@ -2115,7 +1968,7 @@ static void cpumf_measurement_alert(struct ext_code ext_code,
/* Report measurement alerts only for non-PRA codes */
if (alert != CPU_MF_INT_SF_PRA)
- debug_sprintf_event(sfdbg, 6, "%s: alert %#x\n", __func__,
+ debug_sprintf_event(sfdbg, 6, "%s alert %#x\n", __func__,
alert);
/* Sampling authorization change request */
@@ -2143,7 +1996,7 @@ static int cpusf_pmu_setup(unsigned int cpu, int flags)
/* Ignore the notification if no events are scheduled on the PMU.
* This might be racy...
*/
- if (!atomic_read(&num_events))
+ if (!refcount_read(&num_events))
return 0;
local_irq_disable();
@@ -2205,10 +2058,12 @@ static const struct kernel_param_ops param_ops_sfb_size = {
.get = param_get_sfb_size,
};
-#define RS_INIT_FAILURE_QSI 0x0001
-#define RS_INIT_FAILURE_BSDES 0x0002
-#define RS_INIT_FAILURE_ALRT 0x0003
-#define RS_INIT_FAILURE_PERF 0x0004
+enum {
+ RS_INIT_FAILURE_BSDES = 2, /* Bad basic sampling size */
+ RS_INIT_FAILURE_ALRT = 3, /* IRQ registration failure */
+ RS_INIT_FAILURE_PERF = 4 /* PMU registration failure */
+};
+
static void __init pr_cpumsf_err(unsigned int reason)
{
pr_err("Sampling facility support for perf is not available: "
@@ -2224,11 +2079,7 @@ static int __init init_cpum_sampling_pmu(void)
return -ENODEV;
memset(&si, 0, sizeof(si));
- if (qsi(&si)) {
- pr_cpumsf_err(RS_INIT_FAILURE_QSI);
- return -ENODEV;
- }
-
+ qsi(&si);
if (!si.as && !si.ad)
return -ENODEV;
diff --git a/arch/s390/kernel/perf_pai_crypto.c b/arch/s390/kernel/perf_pai_crypto.c
index 2f5a20e300f6..fa7325454266 100644
--- a/arch/s390/kernel/perf_pai_crypto.c
+++ b/arch/s390/kernel/perf_pai_crypto.c
@@ -738,6 +738,22 @@ static const char * const paicrypt_ctrnames[] = {
[154] = "PCKMO_ENCRYPT_ECC_ED448_KEY",
[155] = "IBM_RESERVED_155",
[156] = "IBM_RESERVED_156",
+ [157] = "KM_FULL_XTS_AES_128",
+ [158] = "KM_FULL_XTS_AES_256",
+ [159] = "KM_FULL_XTS_ENCRYPTED_AES_128",
+ [160] = "KM_FULL_XTS_ENCRYPTED_AES_256",
+ [161] = "KMAC_HMAC_SHA_224",
+ [162] = "KMAC_HMAC_SHA_256",
+ [163] = "KMAC_HMAC_SHA_384",
+ [164] = "KMAC_HMAC_SHA_512",
+ [165] = "KMAC_HMAC_ENCRYPTED_SHA_224",
+ [166] = "KMAC_HMAC_ENCRYPTED_SHA_256",
+ [167] = "KMAC_HMAC_ENCRYPTED_SHA_384",
+ [168] = "KMAC_HMAC_ENCRYPTED_SHA_512",
+ [169] = "PCKMO_ENCRYPT_HMAC_512_KEY",
+ [170] = "PCKMO_ENCRYPT_HMAC_1024_KEY",
+ [171] = "PCKMO_ENCRYPT_AES_XTS_128",
+ [172] = "PCKMO_ENCRYPT_AES_XTS_256",
};
static void __init attr_event_free(struct attribute **attrs, int num)
diff --git a/arch/s390/kernel/perf_pai_ext.c b/arch/s390/kernel/perf_pai_ext.c
index 6295531b39a2..7f462bef1fc0 100644
--- a/arch/s390/kernel/perf_pai_ext.c
+++ b/arch/s390/kernel/perf_pai_ext.c
@@ -635,6 +635,15 @@ static const char * const paiext_ctrnames[] = {
[25] = "NNPA_1MFRAME",
[26] = "NNPA_2GFRAME",
[27] = "NNPA_ACCESSEXCEPT",
+ [28] = "NNPA_TRANSFORM",
+ [29] = "NNPA_GELU",
+ [30] = "NNPA_MOMENTS",
+ [31] = "NNPA_LAYERNORM",
+ [32] = "NNPA_MATMUL_OP_BCAST1",
+ [33] = "NNPA_SQRT",
+ [34] = "NNPA_INVSQRT",
+ [35] = "NNPA_NORM",
+ [36] = "NNPA_REDUCE",
};
static void __init attr_event_free(struct attribute **attrs, int num)
diff --git a/arch/s390/kernel/smp.c b/arch/s390/kernel/smp.c
index fbba37ec53cf..4df56fdb2488 100644
--- a/arch/s390/kernel/smp.c
+++ b/arch/s390/kernel/smp.c
@@ -671,6 +671,25 @@ int smp_cpu_get_polarization(int cpu)
return per_cpu(pcpu_devices, cpu).polarization;
}
+void smp_cpu_set_capacity(int cpu, unsigned long val)
+{
+ per_cpu(pcpu_devices, cpu).capacity = val;
+}
+
+unsigned long smp_cpu_get_capacity(int cpu)
+{
+ return per_cpu(pcpu_devices, cpu).capacity;
+}
+
+void smp_set_core_capacity(int cpu, unsigned long val)
+{
+ int i;
+
+ cpu = smp_get_base_cpu(cpu);
+ for (i = cpu; (i <= cpu + smp_cpu_mtid) && (i < nr_cpu_ids); i++)
+ smp_cpu_set_capacity(i, val);
+}
+
int smp_cpu_get_cpu_address(int cpu)
{
return per_cpu(pcpu_devices, cpu).address;
@@ -719,6 +738,7 @@ static int smp_add_core(struct sclp_core_entry *core, cpumask_t *avail,
else
pcpu->state = CPU_STATE_STANDBY;
smp_cpu_set_polarization(cpu, POLARIZATION_UNKNOWN);
+ smp_cpu_set_capacity(cpu, CPU_CAPACITY_HIGH);
set_cpu_present(cpu, true);
if (!early && arch_register_cpu(cpu))
set_cpu_present(cpu, false);
@@ -961,6 +981,7 @@ void __init smp_prepare_boot_cpu(void)
ipl_pcpu->state = CPU_STATE_CONFIGURED;
lc->pcpu = (unsigned long)ipl_pcpu;
smp_cpu_set_polarization(0, POLARIZATION_UNKNOWN);
+ smp_cpu_set_capacity(0, CPU_CAPACITY_HIGH);
}
void __init smp_setup_processor_id(void)
diff --git a/arch/s390/kernel/stacktrace.c b/arch/s390/kernel/stacktrace.c
index 640363b2a105..9f59837d159e 100644
--- a/arch/s390/kernel/stacktrace.c
+++ b/arch/s390/kernel/stacktrace.c
@@ -162,22 +162,3 @@ void arch_stack_walk_user(stack_trace_consume_fn consume_entry, void *cookie,
{
arch_stack_walk_user_common(consume_entry, cookie, NULL, regs, false);
}
-
-unsigned long return_address(unsigned int n)
-{
- struct unwind_state state;
- unsigned long addr;
-
- /* Increment to skip current stack entry */
- n++;
-
- unwind_for_each_frame(&state, NULL, NULL, 0) {
- addr = unwind_get_return_address(&state);
- if (!addr)
- break;
- if (!n--)
- return addr;
- }
- return 0;
-}
-EXPORT_SYMBOL_GPL(return_address);
diff --git a/arch/s390/kernel/sysinfo.c b/arch/s390/kernel/sysinfo.c
index 2be30a96696a..88055f58fbda 100644
--- a/arch/s390/kernel/sysinfo.c
+++ b/arch/s390/kernel/sysinfo.c
@@ -498,7 +498,6 @@ static const struct file_operations stsi_##fc##_##s1##_##s2##_fs_ops = { \
.open = stsi_open_##fc##_##s1##_##s2, \
.release = stsi_release, \
.read = stsi_read, \
- .llseek = no_llseek, \
};
static int stsi_release(struct inode *inode, struct file *file)
diff --git a/arch/s390/kernel/topology.c b/arch/s390/kernel/topology.c
index 22029ecae1c5..813e5da9a973 100644
--- a/arch/s390/kernel/topology.c
+++ b/arch/s390/kernel/topology.c
@@ -24,6 +24,7 @@
#include <linux/mm.h>
#include <linux/nodemask.h>
#include <linux/node.h>
+#include <asm/hiperdispatch.h>
#include <asm/sysinfo.h>
#define PTF_HORIZONTAL (0UL)
@@ -47,6 +48,7 @@ static int topology_mode = TOPOLOGY_MODE_UNINITIALIZED;
static void set_topology_timer(void);
static void topology_work_fn(struct work_struct *work);
static struct sysinfo_15_1_x *tl_info;
+static int cpu_management;
static DECLARE_WORK(topology_work, topology_work_fn);
@@ -144,6 +146,7 @@ static void add_cpus_to_mask(struct topology_core *tl_core,
cpumask_set_cpu(cpu, &book->mask);
cpumask_set_cpu(cpu, &socket->mask);
smp_cpu_set_polarization(cpu, tl_core->pp);
+ smp_cpu_set_capacity(cpu, CPU_CAPACITY_HIGH);
}
}
}
@@ -270,6 +273,7 @@ void update_cpu_masks(void)
topo->drawer_id = id;
}
}
+ hd_reset_state();
for_each_online_cpu(cpu) {
topo = &cpu_topology[cpu];
pkg_first = cpumask_first(&topo->core_mask);
@@ -278,8 +282,10 @@ void update_cpu_masks(void)
for_each_cpu(sibling, &topo->core_mask) {
topo_sibling = &cpu_topology[sibling];
smt_first = cpumask_first(&topo_sibling->thread_mask);
- if (sibling == smt_first)
+ if (sibling == smt_first) {
topo_package->booted_cores++;
+ hd_add_core(sibling);
+ }
}
} else {
topo->booted_cores = topo_package->booted_cores;
@@ -303,8 +309,10 @@ static void __arch_update_dedicated_flag(void *arg)
static int __arch_update_cpu_topology(void)
{
struct sysinfo_15_1_x *info = tl_info;
- int rc = 0;
+ int rc, hd_status;
+ hd_status = 0;
+ rc = 0;
mutex_lock(&smp_cpu_state_mutex);
if (MACHINE_HAS_TOPOLOGY) {
rc = 1;
@@ -314,7 +322,11 @@ static int __arch_update_cpu_topology(void)
update_cpu_masks();
if (!MACHINE_HAS_TOPOLOGY)
topology_update_polarization_simple();
+ if (cpu_management == 1)
+ hd_status = hd_enable_hiperdispatch();
mutex_unlock(&smp_cpu_state_mutex);
+ if (hd_status == 0)
+ hd_disable_hiperdispatch();
return rc;
}
@@ -374,7 +386,24 @@ void topology_expect_change(void)
set_topology_timer();
}
-static int cpu_management;
+static int set_polarization(int polarization)
+{
+ int rc = 0;
+
+ cpus_read_lock();
+ mutex_lock(&smp_cpu_state_mutex);
+ if (cpu_management == polarization)
+ goto out;
+ rc = topology_set_cpu_management(polarization);
+ if (rc)
+ goto out;
+ cpu_management = polarization;
+ topology_expect_change();
+out:
+ mutex_unlock(&smp_cpu_state_mutex);
+ cpus_read_unlock();
+ return rc;
+}
static ssize_t dispatching_show(struct device *dev,
struct device_attribute *attr,
@@ -400,19 +429,7 @@ static ssize_t dispatching_store(struct device *dev,
return -EINVAL;
if (val != 0 && val != 1)
return -EINVAL;
- rc = 0;
- cpus_read_lock();
- mutex_lock(&smp_cpu_state_mutex);
- if (cpu_management == val)
- goto out;
- rc = topology_set_cpu_management(val);
- if (rc)
- goto out;
- cpu_management = val;
- topology_expect_change();
-out:
- mutex_unlock(&smp_cpu_state_mutex);
- cpus_read_unlock();
+ rc = set_polarization(val);
return rc ? rc : count;
}
static DEVICE_ATTR_RW(dispatching);
@@ -624,12 +641,37 @@ static int topology_ctl_handler(const struct ctl_table *ctl, int write,
return rc;
}
+static int polarization_ctl_handler(const struct ctl_table *ctl, int write,
+ void *buffer, size_t *lenp, loff_t *ppos)
+{
+ int polarization;
+ int rc;
+ struct ctl_table ctl_entry = {
+ .procname = ctl->procname,
+ .data = &polarization,
+ .maxlen = sizeof(int),
+ .extra1 = SYSCTL_ZERO,
+ .extra2 = SYSCTL_ONE,
+ };
+
+ polarization = cpu_management;
+ rc = proc_douintvec_minmax(&ctl_entry, write, buffer, lenp, ppos);
+ if (rc < 0 || !write)
+ return rc;
+ return set_polarization(polarization);
+}
+
static struct ctl_table topology_ctl_table[] = {
{
.procname = "topology",
.mode = 0644,
.proc_handler = topology_ctl_handler,
},
+ {
+ .procname = "polarization",
+ .mode = 0644,
+ .proc_handler = polarization_ctl_handler,
+ },
};
static int __init topology_init(void)
@@ -642,6 +684,8 @@ static int __init topology_init(void)
set_topology_timer();
else
topology_update_polarization_simple();
+ if (IS_ENABLED(CONFIG_SCHED_TOPOLOGY_VERTICAL))
+ set_polarization(1);
register_sysctl("s390", topology_ctl_table);
dev_root = bus_get_dev_root(&cpu_subsys);
diff --git a/arch/s390/kernel/uv.c b/arch/s390/kernel/uv.c
index 36db065c7cf7..9646f773208a 100644
--- a/arch/s390/kernel/uv.c
+++ b/arch/s390/kernel/uv.c
@@ -14,6 +14,7 @@
#include <linux/memblock.h>
#include <linux/pagemap.h>
#include <linux/swap.h>
+#include <linux/pagewalk.h>
#include <asm/facility.h>
#include <asm/sections.h>
#include <asm/uv.h>
@@ -462,9 +463,9 @@ EXPORT_SYMBOL_GPL(gmap_convert_to_secure);
int gmap_destroy_page(struct gmap *gmap, unsigned long gaddr)
{
struct vm_area_struct *vma;
+ struct folio_walk fw;
unsigned long uaddr;
struct folio *folio;
- struct page *page;
int rc;
rc = -EFAULT;
@@ -483,11 +484,15 @@ int gmap_destroy_page(struct gmap *gmap, unsigned long gaddr)
goto out;
rc = 0;
- /* we take an extra reference here */
- page = follow_page(vma, uaddr, FOLL_WRITE | FOLL_GET);
- if (IS_ERR_OR_NULL(page))
+ folio = folio_walk_start(&fw, vma, uaddr, 0);
+ if (!folio)
goto out;
- folio = page_folio(page);
+ /*
+ * See gmap_make_secure(): large folios cannot be secure. Small
+ * folio implies FW_LEVEL_PTE.
+ */
+ if (folio_test_large(folio) || !pte_write(fw.pte))
+ goto out_walk_end;
rc = uv_destroy_folio(folio);
/*
* Fault handlers can race; it is possible that two CPUs will fault
@@ -500,7 +505,8 @@ int gmap_destroy_page(struct gmap *gmap, unsigned long gaddr)
*/
if (rc)
rc = uv_convert_from_secure_folio(folio);
- folio_put(folio);
+out_walk_end:
+ folio_walk_end(&fw, vma);
out:
mmap_read_unlock(gmap->mm);
return rc;
@@ -548,11 +554,6 @@ int arch_make_folio_accessible(struct folio *folio)
}
EXPORT_SYMBOL_GPL(arch_make_folio_accessible);
-int arch_make_page_accessible(struct page *page)
-{
- return arch_make_folio_accessible(page_folio(page));
-}
-EXPORT_SYMBOL_GPL(arch_make_page_accessible);
static ssize_t uv_query_facilities(struct kobject *kobj,
struct kobj_attribute *attr, char *buf)
{
diff --git a/arch/s390/kernel/vdso64/vdso_user_wrapper.S b/arch/s390/kernel/vdso64/vdso_user_wrapper.S
index e26e68675c08..aa06c85bcbd3 100644
--- a/arch/s390/kernel/vdso64/vdso_user_wrapper.S
+++ b/arch/s390/kernel/vdso64/vdso_user_wrapper.S
@@ -13,10 +13,7 @@
* for details.
*/
.macro vdso_func func
- .globl __kernel_\func
- .type __kernel_\func,@function
- __ALIGN
-__kernel_\func:
+SYM_FUNC_START(__kernel_\func)
CFI_STARTPROC
aghi %r15,-STACK_FRAME_VDSO_OVERHEAD
CFI_DEF_CFA_OFFSET (STACK_FRAME_USER_OVERHEAD + STACK_FRAME_VDSO_OVERHEAD)
@@ -32,7 +29,7 @@ __kernel_\func:
CFI_RESTORE 15
br %r14
CFI_ENDPROC
- .size __kernel_\func,.-__kernel_\func
+SYM_FUNC_END(__kernel_\func)
.endm
vdso_func gettimeofday
@@ -41,16 +38,13 @@ vdso_func clock_gettime
vdso_func getcpu
.macro vdso_syscall func,syscall
- .globl __kernel_\func
- .type __kernel_\func,@function
- __ALIGN
-__kernel_\func:
+SYM_FUNC_START(__kernel_\func)
CFI_STARTPROC
svc \syscall
/* Make sure we notice when a syscall returns, which shouldn't happen */
.word 0
CFI_ENDPROC
- .size __kernel_\func,.-__kernel_\func
+SYM_FUNC_END(__kernel_\func)
.endm
vdso_syscall restart_syscall,__NR_restart_syscall
diff --git a/arch/s390/kernel/vdso64/vgetrandom-chacha.S b/arch/s390/kernel/vdso64/vgetrandom-chacha.S
index d802b0a96f41..09c034c2f853 100644
--- a/arch/s390/kernel/vdso64/vgetrandom-chacha.S
+++ b/arch/s390/kernel/vdso64/vgetrandom-chacha.S
@@ -1,7 +1,9 @@
/* SPDX-License-Identifier: GPL-2.0 */
+#include <linux/stringify.h>
#include <linux/linkage.h>
#include <asm/alternative.h>
+#include <asm/dwarf.h>
#include <asm/fpu-insn.h>
#define STATE0 %v0
@@ -12,9 +14,6 @@
#define COPY1 %v5
#define COPY2 %v6
#define COPY3 %v7
-#define PERM4 %v16
-#define PERM8 %v17
-#define PERM12 %v18
#define BEPERM %v19
#define TMP0 %v20
#define TMP1 %v21
@@ -23,13 +22,11 @@
.section .rodata
- .balign 128
-.Lconstants:
+ .balign 32
+SYM_DATA_START_LOCAL(chacha20_constants)
.long 0x61707865,0x3320646e,0x79622d32,0x6b206574 # endian-neutral
- .long 0x04050607,0x08090a0b,0x0c0d0e0f,0x00010203 # rotl 4 bytes
- .long 0x08090a0b,0x0c0d0e0f,0x00010203,0x04050607 # rotl 8 bytes
- .long 0x0c0d0e0f,0x00010203,0x04050607,0x08090a0b # rotl 12 bytes
.long 0x03020100,0x07060504,0x0b0a0908,0x0f0e0d0c # byte swap
+SYM_DATA_END(chacha20_constants)
.text
/*
@@ -43,13 +40,14 @@
* size_t nblocks)
*/
SYM_FUNC_START(__arch_chacha20_blocks_nostack)
- larl %r1,.Lconstants
+ CFI_STARTPROC
+ larl %r1,chacha20_constants
/* COPY0 = "expand 32-byte k" */
VL COPY0,0,,%r1
- /* PERM4-PERM12,BEPERM = byte selectors for VPERM */
- VLM PERM4,BEPERM,16,%r1
+ /* BEPERM = byte selectors for VPERM */
+ ALTERNATIVE __stringify(VL BEPERM,16,,%r1), "brcl 0,0", ALT_FACILITY(148)
/* COPY1,COPY2 = key */
VLM COPY1,COPY2,0,%r3
@@ -89,11 +87,11 @@ SYM_FUNC_START(__arch_chacha20_blocks_nostack)
VERLLF STATE1,STATE1,7
/* STATE1[0,1,2,3] = STATE1[1,2,3,0] */
- VPERM STATE1,STATE1,STATE1,PERM4
+ VSLDB STATE1,STATE1,STATE1,4
/* STATE2[0,1,2,3] = STATE2[2,3,0,1] */
- VPERM STATE2,STATE2,STATE2,PERM8
+ VSLDB STATE2,STATE2,STATE2,8
/* STATE3[0,1,2,3] = STATE3[3,0,1,2] */
- VPERM STATE3,STATE3,STATE3,PERM12
+ VSLDB STATE3,STATE3,STATE3,12
/* STATE0 += STATE1, STATE3 = rotl32(STATE3 ^ STATE0, 16) */
VAF STATE0,STATE0,STATE1
@@ -116,32 +114,38 @@ SYM_FUNC_START(__arch_chacha20_blocks_nostack)
VERLLF STATE1,STATE1,7
/* STATE1[0,1,2,3] = STATE1[3,0,1,2] */
- VPERM STATE1,STATE1,STATE1,PERM12
+ VSLDB STATE1,STATE1,STATE1,12
/* STATE2[0,1,2,3] = STATE2[2,3,0,1] */
- VPERM STATE2,STATE2,STATE2,PERM8
+ VSLDB STATE2,STATE2,STATE2,8
/* STATE3[0,1,2,3] = STATE3[1,2,3,0] */
- VPERM STATE3,STATE3,STATE3,PERM4
+ VSLDB STATE3,STATE3,STATE3,4
brctg %r0,.Ldoubleround
- /* OUTPUT0 = STATE0 + STATE0 */
+ /* OUTPUT0 = STATE0 + COPY0 */
VAF STATE0,STATE0,COPY0
- /* OUTPUT1 = STATE1 + STATE1 */
+ /* OUTPUT1 = STATE1 + COPY1 */
VAF STATE1,STATE1,COPY1
- /* OUTPUT2 = STATE2 + STATE2 */
+ /* OUTPUT2 = STATE2 + COPY2 */
VAF STATE2,STATE2,COPY2
- /* OUTPUT2 = STATE3 + STATE3 */
+ /* OUTPUT3 = STATE3 + COPY3 */
VAF STATE3,STATE3,COPY3
- /*
- * 32 bit wise little endian store to OUTPUT. If the vector
- * enhancement facility 2 is not installed use the slow path.
- */
- ALTERNATIVE "brc 0xf,.Lstoreslow", "nop", ALT_FACILITY(148)
- VSTBRF STATE0,0,,%r2
- VSTBRF STATE1,16,,%r2
- VSTBRF STATE2,32,,%r2
- VSTBRF STATE3,48,,%r2
-.Lstoredone:
+ ALTERNATIVE \
+ __stringify( \
+ /* Convert STATE to little endian and store to OUTPUT */\
+ VPERM TMP0,STATE0,STATE0,BEPERM; \
+ VPERM TMP1,STATE1,STATE1,BEPERM; \
+ VPERM TMP2,STATE2,STATE2,BEPERM; \
+ VPERM TMP3,STATE3,STATE3,BEPERM; \
+ VSTM TMP0,TMP3,0,%r2), \
+ __stringify( \
+ /* 32 bit wise little endian store to OUTPUT */ \
+ VSTBRF STATE0,0,,%r2; \
+ VSTBRF STATE1,16,,%r2; \
+ VSTBRF STATE2,32,,%r2; \
+ VSTBRF STATE3,48,,%r2; \
+ brcl 0,0), \
+ ALT_FACILITY(148)
/* ++COPY3.COUNTER */
/* alsih %r3,1 */
@@ -173,13 +177,5 @@ SYM_FUNC_START(__arch_chacha20_blocks_nostack)
VZERO TMP3
br %r14
-
-.Lstoreslow:
- /* Convert STATE to little endian format and store to OUTPUT */
- VPERM TMP0,STATE0,STATE0,BEPERM
- VPERM TMP1,STATE1,STATE1,BEPERM
- VPERM TMP2,STATE2,STATE2,BEPERM
- VPERM TMP3,STATE3,STATE3,BEPERM
- VSTM TMP0,TMP3,0,%r2
- j .Lstoredone
+ CFI_ENDPROC
SYM_FUNC_END(__arch_chacha20_blocks_nostack)
diff --git a/arch/s390/kernel/vmlinux.lds.S b/arch/s390/kernel/vmlinux.lds.S
index ae5d0a9d6911..377b9aaf8c92 100644
--- a/arch/s390/kernel/vmlinux.lds.S
+++ b/arch/s390/kernel/vmlinux.lds.S
@@ -191,8 +191,7 @@ SECTIONS
. = ALIGN(PAGE_SIZE);
INIT_DATA_SECTION(0x100)
- RUNTIME_CONST(shift, d_hash_shift)
- RUNTIME_CONST(ptr, dentry_hashtable)
+ RUNTIME_CONST_VARIABLES
PERCPU_SECTION(0x100)
diff --git a/arch/s390/kernel/wti.c b/arch/s390/kernel/wti.c
new file mode 100644
index 000000000000..949fdbf0e8b6
--- /dev/null
+++ b/arch/s390/kernel/wti.c
@@ -0,0 +1,215 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Support for warning track interruption
+ *
+ * Copyright IBM Corp. 2023
+ */
+
+#include <linux/cpu.h>
+#include <linux/debugfs.h>
+#include <linux/kallsyms.h>
+#include <linux/smpboot.h>
+#include <linux/irq.h>
+#include <uapi/linux/sched/types.h>
+#include <asm/debug.h>
+#include <asm/diag.h>
+#include <asm/sclp.h>
+
+#define WTI_DBF_LEN 64
+
+struct wti_debug {
+ unsigned long missed;
+ unsigned long addr;
+ pid_t pid;
+};
+
+struct wti_state {
+ /* debug data for s390dbf */
+ struct wti_debug dbg;
+ /*
+ * Represents the real-time thread responsible to
+ * acknowledge the warning-track interrupt and trigger
+ * preliminary and postliminary precautions.
+ */
+ struct task_struct *thread;
+ /*
+ * If pending is true, the real-time thread must be scheduled.
+ * If not, a wake up of that thread will remain a noop.
+ */
+ bool pending;
+};
+
+static DEFINE_PER_CPU(struct wti_state, wti_state);
+
+static debug_info_t *wti_dbg;
+
+/*
+ * During a warning-track grace period, interrupts are disabled
+ * to prevent delays of the warning-track acknowledgment.
+ *
+ * Once the CPU is physically dispatched again, interrupts are
+ * re-enabled.
+ */
+
+static void wti_irq_disable(void)
+{
+ unsigned long flags;
+ struct ctlreg cr6;
+
+ local_irq_save(flags);
+ local_ctl_store(6, &cr6);
+ /* disable all I/O interrupts */
+ cr6.val &= ~0xff000000UL;
+ local_ctl_load(6, &cr6);
+ local_irq_restore(flags);
+}
+
+static void wti_irq_enable(void)
+{
+ unsigned long flags;
+ struct ctlreg cr6;
+
+ local_irq_save(flags);
+ local_ctl_store(6, &cr6);
+ /* enable all I/O interrupts */
+ cr6.val |= 0xff000000UL;
+ local_ctl_load(6, &cr6);
+ local_irq_restore(flags);
+}
+
+static void store_debug_data(struct wti_state *st)
+{
+ struct pt_regs *regs = get_irq_regs();
+
+ st->dbg.pid = current->pid;
+ st->dbg.addr = 0;
+ if (!user_mode(regs))
+ st->dbg.addr = regs->psw.addr;
+}
+
+static void wti_interrupt(struct ext_code ext_code,
+ unsigned int param32, unsigned long param64)
+{
+ struct wti_state *st = this_cpu_ptr(&wti_state);
+
+ inc_irq_stat(IRQEXT_WTI);
+ wti_irq_disable();
+ store_debug_data(st);
+ st->pending = true;
+ wake_up_process(st->thread);
+}
+
+static int wti_pending(unsigned int cpu)
+{
+ struct wti_state *st = per_cpu_ptr(&wti_state, cpu);
+
+ return st->pending;
+}
+
+static void wti_dbf_grace_period(struct wti_state *st)
+{
+ struct wti_debug *wdi = &st->dbg;
+ char buf[WTI_DBF_LEN];
+
+ if (wdi->addr)
+ snprintf(buf, sizeof(buf), "%d %pS", wdi->pid, (void *)wdi->addr);
+ else
+ snprintf(buf, sizeof(buf), "%d <user>", wdi->pid);
+ debug_text_event(wti_dbg, 2, buf);
+ wdi->missed++;
+}
+
+static int wti_show(struct seq_file *seq, void *v)
+{
+ struct wti_state *st;
+ int cpu;
+
+ cpus_read_lock();
+ seq_puts(seq, " ");
+ for_each_online_cpu(cpu)
+ seq_printf(seq, "CPU%-8d", cpu);
+ seq_putc(seq, '\n');
+ for_each_online_cpu(cpu) {
+ st = per_cpu_ptr(&wti_state, cpu);
+ seq_printf(seq, " %10lu", st->dbg.missed);
+ }
+ seq_putc(seq, '\n');
+ cpus_read_unlock();
+ return 0;
+}
+DEFINE_SHOW_ATTRIBUTE(wti);
+
+static void wti_thread_fn(unsigned int cpu)
+{
+ struct wti_state *st = per_cpu_ptr(&wti_state, cpu);
+
+ st->pending = false;
+ /*
+ * Yield CPU voluntarily to the hypervisor. Control
+ * resumes when hypervisor decides to dispatch CPU
+ * to this LPAR again.
+ */
+ if (diag49c(DIAG49C_SUBC_ACK))
+ wti_dbf_grace_period(st);
+ wti_irq_enable();
+}
+
+static struct smp_hotplug_thread wti_threads = {
+ .store = &wti_state.thread,
+ .thread_should_run = wti_pending,
+ .thread_fn = wti_thread_fn,
+ .thread_comm = "cpuwti/%u",
+ .selfparking = false,
+};
+
+static int __init wti_init(void)
+{
+ struct sched_param wti_sched_param = { .sched_priority = MAX_RT_PRIO - 1 };
+ struct dentry *wti_dir;
+ struct wti_state *st;
+ int cpu, rc;
+
+ rc = -EOPNOTSUPP;
+ if (!sclp.has_wti)
+ goto out;
+ rc = smpboot_register_percpu_thread(&wti_threads);
+ if (WARN_ON(rc))
+ goto out;
+ for_each_online_cpu(cpu) {
+ st = per_cpu_ptr(&wti_state, cpu);
+ sched_setscheduler(st->thread, SCHED_FIFO, &wti_sched_param);
+ }
+ rc = register_external_irq(EXT_IRQ_WARNING_TRACK, wti_interrupt);
+ if (rc) {
+ pr_warn("Couldn't request external interrupt 0x1007\n");
+ goto out_thread;
+ }
+ irq_subclass_register(IRQ_SUBCLASS_WARNING_TRACK);
+ rc = diag49c(DIAG49C_SUBC_REG);
+ if (rc) {
+ pr_warn("Failed to register warning track interrupt through DIAG 49C\n");
+ rc = -EOPNOTSUPP;
+ goto out_subclass;
+ }
+ wti_dir = debugfs_create_dir("wti", arch_debugfs_dir);
+ debugfs_create_file("stat", 0400, wti_dir, NULL, &wti_fops);
+ wti_dbg = debug_register("wti", 1, 1, WTI_DBF_LEN);
+ if (!wti_dbg) {
+ rc = -ENOMEM;
+ goto out_debug_register;
+ }
+ rc = debug_register_view(wti_dbg, &debug_hex_ascii_view);
+ if (rc)
+ goto out_debug_register;
+ goto out;
+out_debug_register:
+ debug_unregister(wti_dbg);
+out_subclass:
+ irq_subclass_unregister(IRQ_SUBCLASS_WARNING_TRACK);
+ unregister_external_irq(EXT_IRQ_WARNING_TRACK, wti_interrupt);
+out_thread:
+ smpboot_unregister_percpu_thread(&wti_threads);
+out:
+ return rc;
+}
+late_initcall(wti_init);
diff --git a/arch/s390/kvm/kvm-s390.c b/arch/s390/kvm/kvm-s390.c
index 0fd96860fc45..bb7134faaebf 100644
--- a/arch/s390/kvm/kvm-s390.c
+++ b/arch/s390/kvm/kvm-s390.c
@@ -348,20 +348,29 @@ static inline int plo_test_bit(unsigned char nr)
return cc == 0;
}
-static __always_inline void __insn32_query(unsigned int opcode, u8 *query)
+static __always_inline void __sortl_query(u8 (*query)[32])
{
asm volatile(
" lghi 0,0\n"
- " lgr 1,%[query]\n"
+ " la 1,%[query]\n"
/* Parameter registers are ignored */
- " .insn rrf,%[opc] << 16,2,4,6,0\n"
+ " .insn rre,0xb9380000,2,4\n"
+ : [query] "=R" (*query)
:
- : [query] "d" ((unsigned long)query), [opc] "i" (opcode)
- : "cc", "memory", "0", "1");
+ : "cc", "0", "1");
}
-#define INSN_SORTL 0xb938
-#define INSN_DFLTCC 0xb939
+static __always_inline void __dfltcc_query(u8 (*query)[32])
+{
+ asm volatile(
+ " lghi 0,0\n"
+ " la 1,%[query]\n"
+ /* Parameter registers are ignored */
+ " .insn rrf,0xb9390000,2,4,6,0\n"
+ : [query] "=R" (*query)
+ :
+ : "cc", "0", "1");
+}
static void __init kvm_s390_cpu_feat_init(void)
{
@@ -415,10 +424,10 @@ static void __init kvm_s390_cpu_feat_init(void)
kvm_s390_available_subfunc.kdsa);
if (test_facility(150)) /* SORTL */
- __insn32_query(INSN_SORTL, kvm_s390_available_subfunc.sortl);
+ __sortl_query(&kvm_s390_available_subfunc.sortl);
if (test_facility(151)) /* DFLTCC */
- __insn32_query(INSN_DFLTCC, kvm_s390_available_subfunc.dfltcc);
+ __dfltcc_query(&kvm_s390_available_subfunc.dfltcc);
if (MACHINE_HAS_ESOP)
allow_cpu_feat(KVM_S390_VM_CPU_FEAT_ESOP);
diff --git a/arch/s390/mm/cmm.c b/arch/s390/mm/cmm.c
index 75d15bf41d97..d01724a715d0 100644
--- a/arch/s390/mm/cmm.c
+++ b/arch/s390/mm/cmm.c
@@ -95,11 +95,12 @@ static long cmm_alloc_pages(long nr, long *counter,
(*counter)++;
spin_unlock(&cmm_lock);
nr--;
+ cond_resched();
}
return nr;
}
-static long cmm_free_pages(long nr, long *counter, struct cmm_page_array **list)
+static long __cmm_free_pages(long nr, long *counter, struct cmm_page_array **list)
{
struct cmm_page_array *pa;
unsigned long addr;
@@ -123,6 +124,21 @@ static long cmm_free_pages(long nr, long *counter, struct cmm_page_array **list)
return nr;
}
+static long cmm_free_pages(long nr, long *counter, struct cmm_page_array **list)
+{
+ long inc = 0;
+
+ while (nr) {
+ inc = min(256L, nr);
+ nr -= inc;
+ inc = __cmm_free_pages(inc, counter, list);
+ if (inc)
+ break;
+ cond_resched();
+ }
+ return nr + inc;
+}
+
static int cmm_oom_notify(struct notifier_block *self,
unsigned long dummy, void *parm)
{
diff --git a/arch/s390/mm/dump_pagetables.c b/arch/s390/mm/dump_pagetables.c
index 0a67fcee4414..fa54f3bc0c8d 100644
--- a/arch/s390/mm/dump_pagetables.c
+++ b/arch/s390/mm/dump_pagetables.c
@@ -18,89 +18,12 @@ static unsigned long max_addr;
struct addr_marker {
int is_start;
unsigned long start_address;
+ unsigned long size;
const char *name;
};
-enum address_markers_idx {
- KVA_NR = 0,
- LOWCORE_START_NR,
- LOWCORE_END_NR,
- AMODE31_START_NR,
- AMODE31_END_NR,
- KERNEL_START_NR,
- KERNEL_END_NR,
-#ifdef CONFIG_KFENCE
- KFENCE_START_NR,
- KFENCE_END_NR,
-#endif
- IDENTITY_START_NR,
- IDENTITY_END_NR,
- VMEMMAP_NR,
- VMEMMAP_END_NR,
- VMALLOC_NR,
- VMALLOC_END_NR,
-#ifdef CONFIG_KMSAN
- KMSAN_VMALLOC_SHADOW_START_NR,
- KMSAN_VMALLOC_SHADOW_END_NR,
- KMSAN_VMALLOC_ORIGIN_START_NR,
- KMSAN_VMALLOC_ORIGIN_END_NR,
- KMSAN_MODULES_SHADOW_START_NR,
- KMSAN_MODULES_SHADOW_END_NR,
- KMSAN_MODULES_ORIGIN_START_NR,
- KMSAN_MODULES_ORIGIN_END_NR,
-#endif
- MODULES_NR,
- MODULES_END_NR,
- ABS_LOWCORE_NR,
- ABS_LOWCORE_END_NR,
- MEMCPY_REAL_NR,
- MEMCPY_REAL_END_NR,
-#ifdef CONFIG_KASAN
- KASAN_SHADOW_START_NR,
- KASAN_SHADOW_END_NR,
-#endif
-};
-
-static struct addr_marker address_markers[] = {
- [KVA_NR] = {0, 0, "Kernel Virtual Address Space"},
- [LOWCORE_START_NR] = {1, 0, "Lowcore Start"},
- [LOWCORE_END_NR] = {0, 0, "Lowcore End"},
- [IDENTITY_START_NR] = {1, 0, "Identity Mapping Start"},
- [IDENTITY_END_NR] = {0, 0, "Identity Mapping End"},
- [AMODE31_START_NR] = {1, 0, "Amode31 Area Start"},
- [AMODE31_END_NR] = {0, 0, "Amode31 Area End"},
- [KERNEL_START_NR] = {1, (unsigned long)_stext, "Kernel Image Start"},
- [KERNEL_END_NR] = {0, (unsigned long)_end, "Kernel Image End"},
-#ifdef CONFIG_KFENCE
- [KFENCE_START_NR] = {1, 0, "KFence Pool Start"},
- [KFENCE_END_NR] = {0, 0, "KFence Pool End"},
-#endif
- [VMEMMAP_NR] = {1, 0, "vmemmap Area Start"},
- [VMEMMAP_END_NR] = {0, 0, "vmemmap Area End"},
- [VMALLOC_NR] = {1, 0, "vmalloc Area Start"},
- [VMALLOC_END_NR] = {0, 0, "vmalloc Area End"},
-#ifdef CONFIG_KMSAN
- [KMSAN_VMALLOC_SHADOW_START_NR] = {1, 0, "Kmsan vmalloc Shadow Start"},
- [KMSAN_VMALLOC_SHADOW_END_NR] = {0, 0, "Kmsan vmalloc Shadow End"},
- [KMSAN_VMALLOC_ORIGIN_START_NR] = {1, 0, "Kmsan vmalloc Origins Start"},
- [KMSAN_VMALLOC_ORIGIN_END_NR] = {0, 0, "Kmsan vmalloc Origins End"},
- [KMSAN_MODULES_SHADOW_START_NR] = {1, 0, "Kmsan Modules Shadow Start"},
- [KMSAN_MODULES_SHADOW_END_NR] = {0, 0, "Kmsan Modules Shadow End"},
- [KMSAN_MODULES_ORIGIN_START_NR] = {1, 0, "Kmsan Modules Origins Start"},
- [KMSAN_MODULES_ORIGIN_END_NR] = {0, 0, "Kmsan Modules Origins End"},
-#endif
- [MODULES_NR] = {1, 0, "Modules Area Start"},
- [MODULES_END_NR] = {0, 0, "Modules Area End"},
- [ABS_LOWCORE_NR] = {1, 0, "Lowcore Area Start"},
- [ABS_LOWCORE_END_NR] = {0, 0, "Lowcore Area End"},
- [MEMCPY_REAL_NR] = {1, 0, "Real Memory Copy Area Start"},
- [MEMCPY_REAL_END_NR] = {0, 0, "Real Memory Copy Area End"},
-#ifdef CONFIG_KASAN
- [KASAN_SHADOW_START_NR] = {1, KASAN_SHADOW_START, "Kasan Shadow Start"},
- [KASAN_SHADOW_END_NR] = {0, KASAN_SHADOW_END, "Kasan Shadow End"},
-#endif
- {1, -1UL, NULL}
-};
+static struct addr_marker *markers;
+static unsigned int markers_cnt;
struct pg_state {
struct ptdump_state ptdump;
@@ -173,7 +96,8 @@ static void note_page_update_state(struct pg_state *st, unsigned long addr, unsi
while (addr >= st->marker[1].start_address) {
st->marker++;
- pt_dump_seq_printf(m, "---[ %s ]---\n", st->marker->name);
+ pt_dump_seq_printf(m, "---[ %s %s ]---\n", st->marker->name,
+ st->marker->is_start ? "Start" : "End");
}
st->start_address = addr;
st->current_prot = prot;
@@ -202,7 +126,7 @@ static void note_page(struct ptdump_state *pt_st, unsigned long addr, int level,
if (level == -1)
addr = max_addr;
if (st->level == -1) {
- pt_dump_seq_printf(m, "---[ %s ]---\n", st->marker->name);
+ pt_dump_seq_puts(m, "---[ Kernel Virtual Address Space ]---\n");
note_page_update_state(st, addr, prot, level);
} else if (prot != st->current_prot || level != st->level ||
addr >= st->marker[1].start_address) {
@@ -276,7 +200,7 @@ static int ptdump_show(struct seq_file *m, void *v)
.check_wx = false,
.wx_pages = 0,
.start_address = 0,
- .marker = address_markers,
+ .marker = markers,
};
get_online_mems();
@@ -299,10 +223,23 @@ static int ptdump_cmp(const void *a, const void *b)
if (ama->start_address < amb->start_address)
return -1;
/*
- * If the start addresses of two markers are identical consider the
- * marker which defines the start of an area higher than the one which
- * defines the end of an area. This keeps pairs of markers sorted.
+ * If the start addresses of two markers are identical sort markers in an
+ * order that considers areas contained within other areas correctly.
*/
+ if (ama->is_start && amb->is_start) {
+ if (ama->size > amb->size)
+ return -1;
+ if (ama->size < amb->size)
+ return 1;
+ return 0;
+ }
+ if (!ama->is_start && !amb->is_start) {
+ if (ama->size > amb->size)
+ return 1;
+ if (ama->size < amb->size)
+ return -1;
+ return 0;
+ }
if (ama->is_start)
return 1;
if (amb->is_start)
@@ -310,12 +247,41 @@ static int ptdump_cmp(const void *a, const void *b)
return 0;
}
+static int add_marker(unsigned long start, unsigned long end, const char *name)
+{
+ size_t oldsize, newsize;
+
+ oldsize = markers_cnt * sizeof(*markers);
+ newsize = oldsize + 2 * sizeof(*markers);
+ if (!oldsize)
+ markers = kvmalloc(newsize, GFP_KERNEL);
+ else
+ markers = kvrealloc(markers, newsize, GFP_KERNEL);
+ if (!markers)
+ goto error;
+ markers[markers_cnt].is_start = 1;
+ markers[markers_cnt].start_address = start;
+ markers[markers_cnt].size = end - start;
+ markers[markers_cnt].name = name;
+ markers_cnt++;
+ markers[markers_cnt].is_start = 0;
+ markers[markers_cnt].start_address = end;
+ markers[markers_cnt].size = end - start;
+ markers[markers_cnt].name = name;
+ markers_cnt++;
+ return 0;
+error:
+ markers_cnt = 0;
+ return -ENOMEM;
+}
+
static int pt_dump_init(void)
{
#ifdef CONFIG_KFENCE
unsigned long kfence_start = (unsigned long)__kfence_pool;
#endif
unsigned long lowcore = (unsigned long)get_lowcore();
+ int rc;
/*
* Figure out the maximum virtual address being accessible with the
@@ -324,41 +290,38 @@ static int pt_dump_init(void)
*/
max_addr = (get_lowcore()->kernel_asce.val & _REGION_ENTRY_TYPE_MASK) >> 2;
max_addr = 1UL << (max_addr * 11 + 31);
- address_markers[LOWCORE_START_NR].start_address = lowcore;
- address_markers[LOWCORE_END_NR].start_address = lowcore + sizeof(struct lowcore);
- address_markers[IDENTITY_START_NR].start_address = __identity_base;
- address_markers[IDENTITY_END_NR].start_address = __identity_base + ident_map_size;
- address_markers[AMODE31_START_NR].start_address = (unsigned long)__samode31;
- address_markers[AMODE31_END_NR].start_address = (unsigned long)__eamode31;
- address_markers[MODULES_NR].start_address = MODULES_VADDR;
- address_markers[MODULES_END_NR].start_address = MODULES_END;
- address_markers[ABS_LOWCORE_NR].start_address = __abs_lowcore;
- address_markers[ABS_LOWCORE_END_NR].start_address = __abs_lowcore + ABS_LOWCORE_MAP_SIZE;
- address_markers[MEMCPY_REAL_NR].start_address = __memcpy_real_area;
- address_markers[MEMCPY_REAL_END_NR].start_address = __memcpy_real_area + MEMCPY_REAL_SIZE;
- address_markers[VMEMMAP_NR].start_address = (unsigned long) vmemmap;
- address_markers[VMEMMAP_END_NR].start_address = (unsigned long)vmemmap + vmemmap_size;
- address_markers[VMALLOC_NR].start_address = VMALLOC_START;
- address_markers[VMALLOC_END_NR].start_address = VMALLOC_END;
+ /* start + end markers - must be added first */
+ rc = add_marker(0, -1UL, NULL);
+ rc |= add_marker((unsigned long)_stext, (unsigned long)_end, "Kernel Image");
+ rc |= add_marker(lowcore, lowcore + sizeof(struct lowcore), "Lowcore");
+ rc |= add_marker(__identity_base, __identity_base + ident_map_size, "Identity Mapping");
+ rc |= add_marker((unsigned long)__samode31, (unsigned long)__eamode31, "Amode31 Area");
+ rc |= add_marker(MODULES_VADDR, MODULES_END, "Modules Area");
+ rc |= add_marker(__abs_lowcore, __abs_lowcore + ABS_LOWCORE_MAP_SIZE, "Lowcore Area");
+ rc |= add_marker(__memcpy_real_area, __memcpy_real_area + MEMCPY_REAL_SIZE, "Real Memory Copy Area");
+ rc |= add_marker((unsigned long)vmemmap, (unsigned long)vmemmap + vmemmap_size, "vmemmap Area");
+ rc |= add_marker(VMALLOC_START, VMALLOC_END, "vmalloc Area");
#ifdef CONFIG_KFENCE
- address_markers[KFENCE_START_NR].start_address = kfence_start;
- address_markers[KFENCE_END_NR].start_address = kfence_start + KFENCE_POOL_SIZE;
+ rc |= add_marker(kfence_start, kfence_start + KFENCE_POOL_SIZE, "KFence Pool");
#endif
#ifdef CONFIG_KMSAN
- address_markers[KMSAN_VMALLOC_SHADOW_START_NR].start_address = KMSAN_VMALLOC_SHADOW_START;
- address_markers[KMSAN_VMALLOC_SHADOW_END_NR].start_address = KMSAN_VMALLOC_SHADOW_END;
- address_markers[KMSAN_VMALLOC_ORIGIN_START_NR].start_address = KMSAN_VMALLOC_ORIGIN_START;
- address_markers[KMSAN_VMALLOC_ORIGIN_END_NR].start_address = KMSAN_VMALLOC_ORIGIN_END;
- address_markers[KMSAN_MODULES_SHADOW_START_NR].start_address = KMSAN_MODULES_SHADOW_START;
- address_markers[KMSAN_MODULES_SHADOW_END_NR].start_address = KMSAN_MODULES_SHADOW_END;
- address_markers[KMSAN_MODULES_ORIGIN_START_NR].start_address = KMSAN_MODULES_ORIGIN_START;
- address_markers[KMSAN_MODULES_ORIGIN_END_NR].start_address = KMSAN_MODULES_ORIGIN_END;
+ rc |= add_marker(KMSAN_VMALLOC_SHADOW_START, KMSAN_VMALLOC_SHADOW_END, "Kmsan vmalloc Shadow");
+ rc |= add_marker(KMSAN_VMALLOC_ORIGIN_START, KMSAN_VMALLOC_ORIGIN_END, "Kmsan vmalloc Origins");
+ rc |= add_marker(KMSAN_MODULES_SHADOW_START, KMSAN_MODULES_SHADOW_END, "Kmsan Modules Shadow");
+ rc |= add_marker(KMSAN_MODULES_ORIGIN_START, KMSAN_MODULES_ORIGIN_END, "Kmsan Modules Origins");
+#endif
+#ifdef CONFIG_KASAN
+ rc |= add_marker(KASAN_SHADOW_START, KASAN_SHADOW_END, "Kasan Shadow");
#endif
- sort(address_markers, ARRAY_SIZE(address_markers) - 1,
- sizeof(address_markers[0]), ptdump_cmp, NULL);
+ if (rc)
+ goto error;
+ sort(&markers[1], markers_cnt - 1, sizeof(*markers), ptdump_cmp, NULL);
#ifdef CONFIG_PTDUMP_DEBUGFS
debugfs_create_file("kernel_page_tables", 0400, NULL, NULL, &ptdump_fops);
#endif /* CONFIG_PTDUMP_DEBUGFS */
return 0;
+error:
+ kvfree(markers);
+ return -ENOMEM;
}
device_initcall(pt_dump_init);
diff --git a/arch/s390/mm/fault.c b/arch/s390/mm/fault.c
index 8e149ef5e89b..ad8b0d6b77ea 100644
--- a/arch/s390/mm/fault.c
+++ b/arch/s390/mm/fault.c
@@ -34,6 +34,7 @@
#include <linux/uaccess.h>
#include <linux/hugetlb.h>
#include <linux/kfence.h>
+#include <linux/pagewalk.h>
#include <asm/asm-extable.h>
#include <asm/asm-offsets.h>
#include <asm/ptrace.h>
@@ -492,9 +493,9 @@ void do_secure_storage_access(struct pt_regs *regs)
union teid teid = { .val = regs->int_parm_long };
unsigned long addr = get_fault_address(regs);
struct vm_area_struct *vma;
+ struct folio_walk fw;
struct mm_struct *mm;
struct folio *folio;
- struct page *page;
struct gmap *gmap;
int rc;
@@ -536,15 +537,18 @@ void do_secure_storage_access(struct pt_regs *regs)
vma = find_vma(mm, addr);
if (!vma)
return handle_fault_error(regs, SEGV_MAPERR);
- page = follow_page(vma, addr, FOLL_WRITE | FOLL_GET);
- if (IS_ERR_OR_NULL(page)) {
+ folio = folio_walk_start(&fw, vma, addr, 0);
+ if (!folio) {
mmap_read_unlock(mm);
break;
}
- folio = page_folio(page);
- if (arch_make_folio_accessible(folio))
- send_sig(SIGSEGV, current, 0);
+ /* arch_make_folio_accessible() needs a raised refcount. */
+ folio_get(folio);
+ rc = arch_make_folio_accessible(folio);
folio_put(folio);
+ folio_walk_end(&fw, vma);
+ if (rc)
+ send_sig(SIGSEGV, current, 0);
mmap_read_unlock(mm);
break;
case KERNEL_FAULT:
diff --git a/arch/s390/mm/init.c b/arch/s390/mm/init.c
index e3d258f9e726..7a96623a9d2e 100644
--- a/arch/s390/mm/init.c
+++ b/arch/s390/mm/init.c
@@ -62,7 +62,7 @@ EXPORT_SYMBOL(zero_page_mask);
static void __init setup_zero_pages(void)
{
- unsigned long total_pages = PHYS_PFN(memblock_phys_mem_size() - memblock_reserved_size());
+ unsigned long total_pages = memblock_estimated_nr_free_pages();
unsigned int order;
struct page *page;
int i;
@@ -97,7 +97,7 @@ void __init paging_init(void)
vmem_map_init();
sparse_init();
- zone_dma_bits = 31;
+ zone_dma_limit = DMA_BIT_MASK(31);
memset(max_zone_pfns, 0, sizeof(max_zone_pfns));
max_zone_pfns[ZONE_DMA] = virt_to_pfn(MAX_DMA_ADDRESS);
max_zone_pfns[ZONE_NORMAL] = max_low_pfn;
diff --git a/arch/s390/mm/mmap.c b/arch/s390/mm/mmap.c
index 206756946589..96efa061ce01 100644
--- a/arch/s390/mm/mmap.c
+++ b/arch/s390/mm/mmap.c
@@ -82,7 +82,7 @@ static int get_align_mask(struct file *filp, unsigned long flags)
unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr,
unsigned long len, unsigned long pgoff,
- unsigned long flags)
+ unsigned long flags, vm_flags_t vm_flags)
{
struct mm_struct *mm = current->mm;
struct vm_area_struct *vma;
@@ -117,7 +117,7 @@ check_asce_limit:
unsigned long arch_get_unmapped_area_topdown(struct file *filp, unsigned long addr,
unsigned long len, unsigned long pgoff,
- unsigned long flags)
+ unsigned long flags, vm_flags_t vm_flags)
{
struct vm_area_struct *vma;
struct mm_struct *mm = current->mm;
diff --git a/arch/s390/pci/Makefile b/arch/s390/pci/Makefile
index 0547a10406e7..2c21f0394c9a 100644
--- a/arch/s390/pci/Makefile
+++ b/arch/s390/pci/Makefile
@@ -3,7 +3,8 @@
# Makefile for the s390 PCI subsystem.
#
-obj-$(CONFIG_PCI) += pci.o pci_irq.o pci_clp.o pci_sysfs.o \
+obj-$(CONFIG_PCI) += pci.o pci_irq.o pci_clp.o \
pci_event.o pci_debug.o pci_insn.o pci_mmio.o \
pci_bus.o pci_kvm_hook.o
obj-$(CONFIG_PCI_IOV) += pci_iov.o
+obj-$(CONFIG_SYSFS) += pci_sysfs.o
diff --git a/arch/s390/pci/pci.c b/arch/s390/pci/pci.c
index cff4838fad21..bd9624c20b80 100644
--- a/arch/s390/pci/pci.c
+++ b/arch/s390/pci/pci.c
@@ -587,7 +587,6 @@ int pcibios_device_add(struct pci_dev *pdev)
if (pdev->is_physfn)
pdev->no_vf_scan = 1;
- pdev->dev.groups = zpci_attr_groups;
zpci_map_resources(pdev);
for (i = 0; i < PCI_STD_NUM_BARS; i++) {
diff --git a/arch/s390/pci/pci_clp.c b/arch/s390/pci/pci_clp.c
index ee90a91ed888..6f55a59a0871 100644
--- a/arch/s390/pci/pci_clp.c
+++ b/arch/s390/pci/pci_clp.c
@@ -657,7 +657,6 @@ static const struct file_operations clp_misc_fops = {
.release = clp_misc_release,
.unlocked_ioctl = clp_misc_ioctl,
.compat_ioctl = clp_misc_ioctl,
- .llseek = no_llseek,
};
static struct miscdevice clp_misc_device = {
diff --git a/arch/s390/pci/pci_mmio.c b/arch/s390/pci/pci_mmio.c
index 5398729bfe1b..de5c0b389a3e 100644
--- a/arch/s390/pci/pci_mmio.c
+++ b/arch/s390/pci/pci_mmio.c
@@ -118,12 +118,11 @@ static inline int __memcpy_toio_inuser(void __iomem *dst,
SYSCALL_DEFINE3(s390_pci_mmio_write, unsigned long, mmio_addr,
const void __user *, user_buffer, size_t, length)
{
+ struct follow_pfnmap_args args = { };
u8 local_buf[64];
void __iomem *io_addr;
void *buf;
struct vm_area_struct *vma;
- pte_t *ptep;
- spinlock_t *ptl;
long ret;
if (!zpci_is_enabled())
@@ -169,11 +168,13 @@ SYSCALL_DEFINE3(s390_pci_mmio_write, unsigned long, mmio_addr,
if (!(vma->vm_flags & VM_WRITE))
goto out_unlock_mmap;
- ret = follow_pte(vma, mmio_addr, &ptep, &ptl);
+ args.address = mmio_addr;
+ args.vma = vma;
+ ret = follow_pfnmap_start(&args);
if (ret)
goto out_unlock_mmap;
- io_addr = (void __iomem *)((pte_pfn(*ptep) << PAGE_SHIFT) |
+ io_addr = (void __iomem *)((args.pfn << PAGE_SHIFT) |
(mmio_addr & ~PAGE_MASK));
if ((unsigned long) io_addr < ZPCI_IOMAP_ADDR_BASE)
@@ -181,7 +182,7 @@ SYSCALL_DEFINE3(s390_pci_mmio_write, unsigned long, mmio_addr,
ret = zpci_memcpy_toio(io_addr, buf, length);
out_unlock_pt:
- pte_unmap_unlock(ptep, ptl);
+ follow_pfnmap_end(&args);
out_unlock_mmap:
mmap_read_unlock(current->mm);
out_free:
@@ -260,12 +261,11 @@ static inline int __memcpy_fromio_inuser(void __user *dst,
SYSCALL_DEFINE3(s390_pci_mmio_read, unsigned long, mmio_addr,
void __user *, user_buffer, size_t, length)
{
+ struct follow_pfnmap_args args = { };
u8 local_buf[64];
void __iomem *io_addr;
void *buf;
struct vm_area_struct *vma;
- pte_t *ptep;
- spinlock_t *ptl;
long ret;
if (!zpci_is_enabled())
@@ -308,11 +308,13 @@ SYSCALL_DEFINE3(s390_pci_mmio_read, unsigned long, mmio_addr,
if (!(vma->vm_flags & VM_WRITE))
goto out_unlock_mmap;
- ret = follow_pte(vma, mmio_addr, &ptep, &ptl);
+ args.vma = vma;
+ args.address = mmio_addr;
+ ret = follow_pfnmap_start(&args);
if (ret)
goto out_unlock_mmap;
- io_addr = (void __iomem *)((pte_pfn(*ptep) << PAGE_SHIFT) |
+ io_addr = (void __iomem *)((args.pfn << PAGE_SHIFT) |
(mmio_addr & ~PAGE_MASK));
if ((unsigned long) io_addr < ZPCI_IOMAP_ADDR_BASE) {
@@ -322,7 +324,7 @@ SYSCALL_DEFINE3(s390_pci_mmio_read, unsigned long, mmio_addr,
ret = zpci_memcpy_fromio(buf, io_addr, length);
out_unlock_pt:
- pte_unmap_unlock(ptep, ptl);
+ follow_pfnmap_end(&args);
out_unlock_mmap:
mmap_read_unlock(current->mm);
diff --git a/arch/s390/pci/pci_sysfs.c b/arch/s390/pci/pci_sysfs.c
index 0f4f1e8fc480..1f81f6ff7b95 100644
--- a/arch/s390/pci/pci_sysfs.c
+++ b/arch/s390/pci/pci_sysfs.c
@@ -197,7 +197,7 @@ static struct attribute *zpci_ident_attrs[] = {
NULL,
};
-static struct attribute_group zpci_ident_attr_group = {
+const struct attribute_group zpci_ident_attr_group = {
.attrs = zpci_ident_attrs,
.is_visible = zpci_index_is_visible,
};
@@ -223,7 +223,7 @@ static struct attribute *zpci_dev_attrs[] = {
NULL,
};
-static struct attribute_group zpci_attr_group = {
+const struct attribute_group zpci_attr_group = {
.attrs = zpci_dev_attrs,
.bin_attrs = zpci_bin_attrs,
};
@@ -235,14 +235,8 @@ static struct attribute *pfip_attrs[] = {
&dev_attr_segment3.attr,
NULL,
};
-static struct attribute_group pfip_attr_group = {
+
+const struct attribute_group pfip_attr_group = {
.name = "pfip",
.attrs = pfip_attrs,
};
-
-const struct attribute_group *zpci_attr_groups[] = {
- &zpci_attr_group,
- &pfip_attr_group,
- &zpci_ident_attr_group,
- NULL,
-};
diff --git a/arch/s390/tools/opcodes.txt b/arch/s390/tools/opcodes.txt
index 5f008e794898..def2659f6602 100644
--- a/arch/s390/tools/opcodes.txt
+++ b/arch/s390/tools/opcodes.txt
@@ -527,9 +527,9 @@ b938 sortl RRE_RR
b939 dfltcc RRF_R0RR2
b93a kdsa RRE_RR
b93b nnpa RRE_00
-b93c ppno RRE_RR
-b93e kimd RRE_RR
-b93f klmd RRE_RR
+b93c prno RRE_RR
+b93e kimd RRF_U0RR
+b93f klmd RRF_U0RR
b941 cfdtr RRF_UURF
b942 clgdtr RRF_UURF
b943 clfdtr RRF_UURF
@@ -549,6 +549,10 @@ b964 nngrk RRF_R0RR2
b965 ocgrk RRF_R0RR2
b966 nogrk RRF_R0RR2
b967 nxgrk RRF_R0RR2
+b968 clzg RRE_RR
+b969 ctzg RRE_RR
+b96c bextg RRF_R0RR2
+b96d bdepg RRF_R0RR2
b972 crt RRF_U0RR
b973 clrt RRF_U0RR
b974 nnrk RRF_R0RR2
@@ -796,6 +800,16 @@ e35b sy RXY_RRRD
e35c mfy RXY_RRRD
e35e aly RXY_RRRD
e35f sly RXY_RRRD
+e360 lxab RXY_RRRD
+e361 llxab RXY_RRRD
+e362 lxah RXY_RRRD
+e363 llxah RXY_RRRD
+e364 lxaf RXY_RRRD
+e365 llxaf RXY_RRRD
+e366 lxag RXY_RRRD
+e367 llxag RXY_RRRD
+e368 lxaq RXY_RRRD
+e369 llxaq RXY_RRRD
e370 sthy RXY_RRRD
e371 lay RXY_RRRD
e372 stcy RXY_RRRD
@@ -880,6 +894,8 @@ e63c vupkz VSI_URDV
e63d vstrl VSI_URDV
e63f vstrlr VRS_RRDV
e649 vlip VRI_V0UU2
+e64a vcvdq VRI_VV0UU
+e64e vcvbq VRR_VV0U2
e650 vcvb VRR_RV0UU
e651 vclzdp VRR_VV0U2
e652 vcvbg VRR_RV0UU
@@ -893,7 +909,7 @@ e65b vpsop VRI_VVUUU2
e65c vupkzl VRR_VV0U2
e65d vcfn VRR_VV0UU2
e65e vclfnl VRR_VV0UU2
-e65f vtp VRR_0V
+e65f vtp VRR_0V0U
e670 vpkzr VRI_VVV0UU2
e671 vap VRI_VVV0UU2
e672 vsrpr VRI_VVV0UU2
@@ -908,6 +924,7 @@ e67b vrp VRI_VVV0UU2
e67c vscshp VRR_VVV
e67d vcsph VRR_VVV0U0
e67e vsdp VRI_VVV0UU2
+e67f vtz VRR_0VVU
e700 vleb VRX_VRRDU
e701 vleh VRX_VRRDU
e702 vleg VRX_VRRDU
@@ -948,6 +965,7 @@ e74d vrep VRI_VVUU
e750 vpopct VRR_VV0U
e752 vctz VRR_VV0U
e753 vclz VRR_VV0U
+e754 vgem VRR_VV0U
e756 vlr VRX_VV
e75c vistr VRR_VV0U0U
e75f vseg VRR_VV0U
@@ -985,6 +1003,8 @@ e784 vpdi VRR_VVV0U
e785 vbperm VRR_VVV
e786 vsld VRI_VVV0U
e787 vsrd VRI_VVV0U
+e788 veval VRI_VVV0UV
+e789 vblend VRR_VVVU0V
e78a vstrc VRR_VVVUU0V
e78b vstrs VRR_VVVUU0V
e78c vperm VRR_VVV0V
@@ -1010,6 +1030,10 @@ e7ac vmale VRR_VVVU0V
e7ad vmalo VRR_VVVU0V
e7ae vmae VRR_VVVU0V
e7af vmao VRR_VVVU0V
+e7b0 vdl VRR_VVV0UU
+e7b1 vrl VRR_VVV0UU
+e7b2 vd VRR_VVV0UU
+e7b3 vr VRR_VVV0UU
e7b4 vgfm VRR_VVV0U
e7b8 vmsl VRR_VVVUU0V
e7b9 vaccc VRR_VVVU0V
@@ -1017,12 +1041,12 @@ e7bb vac VRR_VVVU0V
e7bc vgfma VRR_VVVU0V
e7bd vsbcbi VRR_VVVU0V
e7bf vsbi VRR_VVVU0V
-e7c0 vclgd VRR_VV0UUU
-e7c1 vcdlg VRR_VV0UUU
-e7c2 vcgd VRR_VV0UUU
-e7c3 vcdg VRR_VV0UUU
-e7c4 vlde VRR_VV0UU2
-e7c5 vled VRR_VV0UUU
+e7c0 vclfp VRR_VV0UUU
+e7c1 vcfpl VRR_VV0UUU
+e7c2 vcsfp VRR_VV0UUU
+e7c3 vcfps VRR_VV0UUU
+e7c4 vfll VRR_VV0UU2
+e7c5 vflr VRR_VV0UUU
e7c7 vfi VRR_VV0UUU
e7ca wfk VRR_VV0UU2
e7cb wfc VRR_VV0UU2
@@ -1094,9 +1118,9 @@ eb54 niy SIY_URD
eb55 cliy SIY_URD
eb56 oiy SIY_URD
eb57 xiy SIY_URD
-eb60 lric RSY_RDRU
-eb61 stric RSY_RDRU
-eb62 mric RSY_RDRU
+eb60 lric RSY_RURD2
+eb61 stric RSY_RURD2
+eb62 mric RSY_RURD2
eb6a asi SIY_IRD
eb6e alsi SIY_IRD
eb71 lpswey SIY_RD
@@ -1104,7 +1128,7 @@ eb7a agsi SIY_IRD
eb7e algsi SIY_IRD
eb80 icmh RSY_RURD
eb81 icmy RSY_RURD
-eb8a sqbs RSY_RDRU
+eb8a sqbs RSY_RURD2
eb8e mvclu RSY_RRRD
eb8f clclu RSY_RRRD
eb90 stmy RSY_RRRD
diff --git a/arch/sh/include/asm/irq.h b/arch/sh/include/asm/irq.h
index 0f384b1f45ca..53fc18a3d4c2 100644
--- a/arch/sh/include/asm/irq.h
+++ b/arch/sh/include/asm/irq.h
@@ -14,12 +14,6 @@
#define NO_IRQ_IGNORE ((unsigned int)-1)
/*
- * Simple Mask Register Support
- */
-extern void make_maskreg_irq(unsigned int irq);
-extern unsigned short *irq_mask_register;
-
-/*
* PINT IRQs
*/
void make_imask_irq(unsigned int irq);
diff --git a/arch/sh/include/asm/mmzone.h b/arch/sh/include/asm/mmzone.h
index 7b8dead2723d..63f88b465e39 100644
--- a/arch/sh/include/asm/mmzone.h
+++ b/arch/sh/include/asm/mmzone.h
@@ -5,9 +5,6 @@
#ifdef CONFIG_NUMA
#include <linux/numa.h>
-extern struct pglist_data *node_data[];
-#define NODE_DATA(nid) (node_data[nid])
-
static inline int pfn_to_nid(unsigned long pfn)
{
int nid;
diff --git a/arch/sh/kernel/vsyscall/vsyscall.c b/arch/sh/kernel/vsyscall/vsyscall.c
index 1bd85a6949c4..add35c51e017 100644
--- a/arch/sh/kernel/vsyscall/vsyscall.c
+++ b/arch/sh/kernel/vsyscall/vsyscall.c
@@ -36,6 +36,10 @@ __setup("vdso=", vdso_setup);
*/
extern const char vsyscall_trapa_start, vsyscall_trapa_end;
static struct page *syscall_pages[1];
+static struct vm_special_mapping vdso_mapping = {
+ .name = "[vdso]",
+ .pages = syscall_pages,
+};
int __init vsyscall_init(void)
{
@@ -58,6 +62,7 @@ int __init vsyscall_init(void)
int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
{
struct mm_struct *mm = current->mm;
+ struct vm_area_struct *vma;
unsigned long addr;
int ret;
@@ -70,14 +75,17 @@ int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
goto up_fail;
}
- ret = install_special_mapping(mm, addr, PAGE_SIZE,
+ vdso_mapping.pages = syscall_pages;
+ vma = _install_special_mapping(mm, addr, PAGE_SIZE,
VM_READ | VM_EXEC |
VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC,
- syscall_pages);
- if (unlikely(ret))
+ &vdso_mapping);
+ ret = PTR_ERR(vma);
+ if (IS_ERR(vma))
goto up_fail;
current->mm->context.vdso = (void *)addr;
+ ret = 0;
up_fail:
mmap_write_unlock(mm);
diff --git a/arch/sh/mm/init.c b/arch/sh/mm/init.c
index d1fe90b2f5ff..2a88b0c9e70f 100644
--- a/arch/sh/mm/init.c
+++ b/arch/sh/mm/init.c
@@ -212,12 +212,7 @@ void __init allocate_pgdat(unsigned int nid)
get_pfn_range_for_nid(nid, &start_pfn, &end_pfn);
#ifdef CONFIG_NUMA
- NODE_DATA(nid) = memblock_alloc_try_nid(
- sizeof(struct pglist_data),
- SMP_CACHE_BYTES, MEMBLOCK_LOW_LIMIT,
- MEMBLOCK_ALLOC_ACCESSIBLE, nid);
- if (!NODE_DATA(nid))
- panic("Can't allocate pgdat for node %d\n", nid);
+ alloc_node_data(nid);
#endif
NODE_DATA(nid)->node_start_pfn = start_pfn;
diff --git a/arch/sh/mm/mmap.c b/arch/sh/mm/mmap.c
index bee329d4149a..c442734d9b0c 100644
--- a/arch/sh/mm/mmap.c
+++ b/arch/sh/mm/mmap.c
@@ -52,7 +52,8 @@ static inline unsigned long COLOUR_ALIGN(unsigned long addr,
}
unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr,
- unsigned long len, unsigned long pgoff, unsigned long flags)
+ unsigned long len, unsigned long pgoff, unsigned long flags,
+ vm_flags_t vm_flags)
{
struct mm_struct *mm = current->mm;
struct vm_area_struct *vma;
@@ -99,7 +100,7 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr,
unsigned long
arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
const unsigned long len, const unsigned long pgoff,
- const unsigned long flags)
+ const unsigned long flags, vm_flags_t vm_flags)
{
struct vm_area_struct *vma;
struct mm_struct *mm = current->mm;
diff --git a/arch/sh/mm/numa.c b/arch/sh/mm/numa.c
index 50f0dc1744d0..9bc212b5e762 100644
--- a/arch/sh/mm/numa.c
+++ b/arch/sh/mm/numa.c
@@ -14,9 +14,6 @@
#include <linux/pfn.h>
#include <asm/sections.h>
-struct pglist_data *node_data[MAX_NUMNODES] __read_mostly;
-EXPORT_SYMBOL_GPL(node_data);
-
/*
* On SH machines the conventional approach is to stash system RAM
* in node 0, and other memory blocks in to node 1 and up, ordered by
diff --git a/arch/sparc/Kconfig b/arch/sparc/Kconfig
index 11bf9d312318..dcfdb7f1dae9 100644
--- a/arch/sparc/Kconfig
+++ b/arch/sparc/Kconfig
@@ -14,9 +14,9 @@ config SPARC
bool
default y
select ARCH_HAS_CPU_CACHE_ALIASING
+ select ARCH_HAS_DMA_OPS
select ARCH_MIGHT_HAVE_PC_PARPORT if SPARC64 && PCI
select ARCH_MIGHT_HAVE_PC_SERIO
- select DMA_OPS
select OF
select OF_PROMTREE
select HAVE_ASM_MODVERSIONS
diff --git a/arch/sparc/include/asm/mmzone.h b/arch/sparc/include/asm/mmzone.h
index a236d8aa893a..74eb2c71d077 100644
--- a/arch/sparc/include/asm/mmzone.h
+++ b/arch/sparc/include/asm/mmzone.h
@@ -6,10 +6,6 @@
#include <linux/cpumask.h>
-extern struct pglist_data *node_data[];
-
-#define NODE_DATA(nid) (node_data[nid])
-
extern int numa_cpu_lookup_table[];
extern cpumask_t numa_cpumask_lookup_table[];
diff --git a/arch/sparc/include/asm/pgtable_64.h b/arch/sparc/include/asm/pgtable_64.h
index 3fe429d73a65..2b7f358762c1 100644
--- a/arch/sparc/include/asm/pgtable_64.h
+++ b/arch/sparc/include/asm/pgtable_64.h
@@ -783,6 +783,7 @@ static inline pmd_t pmd_mkwrite_novma(pmd_t pmd)
return __pmd(pte_val(pte));
}
+#define pmd_pgprot pmd_pgprot
static inline pgprot_t pmd_pgprot(pmd_t entry)
{
unsigned long val = pmd_val(entry);
diff --git a/arch/sparc/kernel/sys_sparc_32.c b/arch/sparc/kernel/sys_sparc_32.c
index 08a19727795c..80822f922e76 100644
--- a/arch/sparc/kernel/sys_sparc_32.c
+++ b/arch/sparc/kernel/sys_sparc_32.c
@@ -39,7 +39,7 @@ SYSCALL_DEFINE0(getpagesize)
return PAGE_SIZE; /* Possibly older binaries want 8192 on sun4's? */
}
-unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsigned long len, unsigned long pgoff, unsigned long flags)
+unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsigned long len, unsigned long pgoff, unsigned long flags, vm_flags_t vm_flags)
{
struct vm_unmapped_area_info info = {};
diff --git a/arch/sparc/kernel/sys_sparc_64.c b/arch/sparc/kernel/sys_sparc_64.c
index d9c3b34ca744..acade309dc2f 100644
--- a/arch/sparc/kernel/sys_sparc_64.c
+++ b/arch/sparc/kernel/sys_sparc_64.c
@@ -87,7 +87,7 @@ static inline unsigned long COLOR_ALIGN(unsigned long addr,
return base + off;
}
-unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsigned long len, unsigned long pgoff, unsigned long flags)
+unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsigned long len, unsigned long pgoff, unsigned long flags, vm_flags_t vm_flags)
{
struct mm_struct *mm = current->mm;
struct vm_area_struct * vma;
@@ -146,7 +146,7 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsi
unsigned long
arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
const unsigned long len, const unsigned long pgoff,
- const unsigned long flags)
+ const unsigned long flags, vm_flags_t vm_flags)
{
struct vm_area_struct *vma;
struct mm_struct *mm = current->mm;
diff --git a/arch/sparc/mm/init_64.c b/arch/sparc/mm/init_64.c
index 53d7cb5bbffe..21f8cbbd0581 100644
--- a/arch/sparc/mm/init_64.c
+++ b/arch/sparc/mm/init_64.c
@@ -1075,14 +1075,9 @@ static void __init allocate_node_data(int nid)
{
struct pglist_data *p;
unsigned long start_pfn, end_pfn;
-#ifdef CONFIG_NUMA
- NODE_DATA(nid) = memblock_alloc_node(sizeof(struct pglist_data),
- SMP_CACHE_BYTES, nid);
- if (!NODE_DATA(nid)) {
- prom_printf("Cannot allocate pglist_data for nid[%d]\n", nid);
- prom_halt();
- }
+#ifdef CONFIG_NUMA
+ alloc_node_data(nid);
NODE_DATA(nid)->node_id = nid;
#endif
@@ -1115,11 +1110,9 @@ static void init_node_masks_nonnuma(void)
}
#ifdef CONFIG_NUMA
-struct pglist_data *node_data[MAX_NUMNODES];
EXPORT_SYMBOL(numa_cpu_lookup_table);
EXPORT_SYMBOL(numa_cpumask_lookup_table);
-EXPORT_SYMBOL(node_data);
static int scan_pio_for_cfg_handle(struct mdesc_handle *md, u64 pio,
u32 cfg_handle)
diff --git a/arch/sparc/mm/leon_mm.c b/arch/sparc/mm/leon_mm.c
index ec61ff1f96b7..1dc9b3d70eda 100644
--- a/arch/sparc/mm/leon_mm.c
+++ b/arch/sparc/mm/leon_mm.c
@@ -39,12 +39,10 @@ unsigned long leon_swprobe(unsigned long vaddr, unsigned long *paddr)
unsigned int ctxtbl;
unsigned int pgd, pmd, ped;
unsigned int ptr;
- unsigned int lvl, pte, paddrbase;
+ unsigned int lvl, pte;
unsigned int ctx;
unsigned int paddr_calc;
- paddrbase = 0;
-
if (srmmu_swprobe_trace)
printk(KERN_INFO "swprobe: trace on\n");
@@ -73,7 +71,6 @@ unsigned long leon_swprobe(unsigned long vaddr, unsigned long *paddr)
printk(KERN_INFO "swprobe: pgd is entry level 3\n");
lvl = 3;
pte = pgd;
- paddrbase = pgd & _SRMMU_PTE_PMASK_LEON;
goto ready;
}
if (((pgd & SRMMU_ET_MASK) != SRMMU_ET_PTD)) {
@@ -96,7 +93,6 @@ unsigned long leon_swprobe(unsigned long vaddr, unsigned long *paddr)
printk(KERN_INFO "swprobe: pmd is entry level 2\n");
lvl = 2;
pte = pmd;
- paddrbase = pmd & _SRMMU_PTE_PMASK_LEON;
goto ready;
}
if (((pmd & SRMMU_ET_MASK) != SRMMU_ET_PTD)) {
@@ -124,7 +120,6 @@ unsigned long leon_swprobe(unsigned long vaddr, unsigned long *paddr)
printk(KERN_INFO "swprobe: ped is entry level 1\n");
lvl = 1;
pte = ped;
- paddrbase = ped & _SRMMU_PTE_PMASK_LEON;
goto ready;
}
if (((ped & SRMMU_ET_MASK) != SRMMU_ET_PTD)) {
@@ -147,7 +142,6 @@ unsigned long leon_swprobe(unsigned long vaddr, unsigned long *paddr)
printk(KERN_INFO "swprobe: ptr is entry level 0\n");
lvl = 0;
pte = ptr;
- paddrbase = ptr & _SRMMU_PTE_PMASK_LEON;
goto ready;
}
if (srmmu_swprobe_trace)
diff --git a/arch/um/Kconfig b/arch/um/Kconfig
index dca84fd6d00a..c89575d05021 100644
--- a/arch/um/Kconfig
+++ b/arch/um/Kconfig
@@ -11,7 +11,6 @@ config UML
select ARCH_HAS_KCOV
select ARCH_HAS_STRNCPY_FROM_USER
select ARCH_HAS_STRNLEN_USER
- select ARCH_NO_PREEMPT_DYNAMIC
select HAVE_ARCH_AUDITSYSCALL
select HAVE_ARCH_KASAN if X86_64
select HAVE_ARCH_KASAN_VMALLOC if HAVE_ARCH_KASAN
diff --git a/arch/um/drivers/harddog_kern.c b/arch/um/drivers/harddog_kern.c
index 99a7144b229f..819aabb4ecdc 100644
--- a/arch/um/drivers/harddog_kern.c
+++ b/arch/um/drivers/harddog_kern.c
@@ -164,7 +164,6 @@ static const struct file_operations harddog_fops = {
.compat_ioctl = compat_ptr_ioctl,
.open = harddog_open,
.release = harddog_release,
- .llseek = no_llseek,
};
static struct miscdevice harddog_miscdev = {
diff --git a/arch/um/drivers/hostaudio_kern.c b/arch/um/drivers/hostaudio_kern.c
index c42b793bce65..9d228878cea2 100644
--- a/arch/um/drivers/hostaudio_kern.c
+++ b/arch/um/drivers/hostaudio_kern.c
@@ -291,7 +291,6 @@ static int hostmixer_release(struct inode *inode, struct file *file)
static const struct file_operations hostaudio_fops = {
.owner = THIS_MODULE,
- .llseek = no_llseek,
.read = hostaudio_read,
.write = hostaudio_write,
.poll = hostaudio_poll,
@@ -304,7 +303,6 @@ static const struct file_operations hostaudio_fops = {
static const struct file_operations hostmixer_fops = {
.owner = THIS_MODULE,
- .llseek = no_llseek,
.unlocked_ioctl = hostmixer_ioctl_mixdev,
.open = hostmixer_open_mixdev,
.release = hostmixer_release,
diff --git a/arch/um/drivers/vector_kern.c b/arch/um/drivers/vector_kern.c
index 2d473282ab51..c992da83268d 100644
--- a/arch/um/drivers/vector_kern.c
+++ b/arch/um/drivers/vector_kern.c
@@ -22,6 +22,7 @@
#include <linux/interrupt.h>
#include <linux/firmware.h>
#include <linux/fs.h>
+#include <asm/atomic.h>
#include <uapi/linux/filter.h>
#include <init.h>
#include <irq_kern.h>
@@ -102,18 +103,33 @@ static const struct {
static void vector_reset_stats(struct vector_private *vp)
{
+ /* We reuse the existing queue locks for stats */
+
+ /* RX stats are modified with RX head_lock held
+ * in vector_poll.
+ */
+
+ spin_lock(&vp->rx_queue->head_lock);
vp->estats.rx_queue_max = 0;
vp->estats.rx_queue_running_average = 0;
- vp->estats.tx_queue_max = 0;
- vp->estats.tx_queue_running_average = 0;
vp->estats.rx_encaps_errors = 0;
+ vp->estats.sg_ok = 0;
+ vp->estats.sg_linearized = 0;
+ spin_unlock(&vp->rx_queue->head_lock);
+
+ /* TX stats are modified with TX head_lock held
+ * in vector_send.
+ */
+
+ spin_lock(&vp->tx_queue->head_lock);
vp->estats.tx_timeout_count = 0;
vp->estats.tx_restart_queue = 0;
vp->estats.tx_kicks = 0;
vp->estats.tx_flow_control_xon = 0;
vp->estats.tx_flow_control_xoff = 0;
- vp->estats.sg_ok = 0;
- vp->estats.sg_linearized = 0;
+ vp->estats.tx_queue_max = 0;
+ vp->estats.tx_queue_running_average = 0;
+ spin_unlock(&vp->tx_queue->head_lock);
}
static int get_mtu(struct arglist *def)
@@ -232,12 +248,6 @@ static int get_transport_options(struct arglist *def)
static char *drop_buffer;
-/* Array backed queues optimized for bulk enqueue/dequeue and
- * 1:N (small values of N) or 1:1 enqueuer/dequeuer ratios.
- * For more details and full design rationale see
- * http://foswiki.cambridgegreys.com/Main/EatYourTailAndEnjoyIt
- */
-
/*
* Advance the mmsg queue head by n = advance. Resets the queue to
@@ -247,27 +257,13 @@ static char *drop_buffer;
static int vector_advancehead(struct vector_queue *qi, int advance)
{
- int queue_depth;
-
qi->head =
(qi->head + advance)
% qi->max_depth;
- spin_lock(&qi->tail_lock);
- qi->queue_depth -= advance;
-
- /* we are at 0, use this to
- * reset head and tail so we can use max size vectors
- */
-
- if (qi->queue_depth == 0) {
- qi->head = 0;
- qi->tail = 0;
- }
- queue_depth = qi->queue_depth;
- spin_unlock(&qi->tail_lock);
- return queue_depth;
+ atomic_sub(advance, &qi->queue_depth);
+ return atomic_read(&qi->queue_depth);
}
/* Advance the queue tail by n = advance.
@@ -277,16 +273,11 @@ static int vector_advancehead(struct vector_queue *qi, int advance)
static int vector_advancetail(struct vector_queue *qi, int advance)
{
- int queue_depth;
-
qi->tail =
(qi->tail + advance)
% qi->max_depth;
- spin_lock(&qi->head_lock);
- qi->queue_depth += advance;
- queue_depth = qi->queue_depth;
- spin_unlock(&qi->head_lock);
- return queue_depth;
+ atomic_add(advance, &qi->queue_depth);
+ return atomic_read(&qi->queue_depth);
}
static int prep_msg(struct vector_private *vp,
@@ -339,9 +330,7 @@ static int vector_enqueue(struct vector_queue *qi, struct sk_buff *skb)
int iov_count;
spin_lock(&qi->tail_lock);
- spin_lock(&qi->head_lock);
- queue_depth = qi->queue_depth;
- spin_unlock(&qi->head_lock);
+ queue_depth = atomic_read(&qi->queue_depth);
if (skb)
packet_len = skb->len;
@@ -360,6 +349,7 @@ static int vector_enqueue(struct vector_queue *qi, struct sk_buff *skb)
mmsg_vector->msg_hdr.msg_iovlen = iov_count;
mmsg_vector->msg_hdr.msg_name = vp->fds->remote_addr;
mmsg_vector->msg_hdr.msg_namelen = vp->fds->remote_addr_size;
+ wmb(); /* Make the packet visible to the NAPI poll thread */
queue_depth = vector_advancetail(qi, 1);
} else
goto drop;
@@ -398,7 +388,7 @@ static int consume_vector_skbs(struct vector_queue *qi, int count)
}
/*
- * Generic vector deque via sendmmsg with support for forming headers
+ * Generic vector dequeue via sendmmsg with support for forming headers
* using transport specific callback. Allows GRE, L2TPv3, RAW and
* other transports to use a common dequeue procedure in vector mode
*/
@@ -408,69 +398,64 @@ static int vector_send(struct vector_queue *qi)
{
struct vector_private *vp = netdev_priv(qi->dev);
struct mmsghdr *send_from;
- int result = 0, send_len, queue_depth = qi->max_depth;
+ int result = 0, send_len;
if (spin_trylock(&qi->head_lock)) {
- if (spin_trylock(&qi->tail_lock)) {
- /* update queue_depth to current value */
- queue_depth = qi->queue_depth;
- spin_unlock(&qi->tail_lock);
- while (queue_depth > 0) {
- /* Calculate the start of the vector */
- send_len = queue_depth;
- send_from = qi->mmsg_vector;
- send_from += qi->head;
- /* Adjust vector size if wraparound */
- if (send_len + qi->head > qi->max_depth)
- send_len = qi->max_depth - qi->head;
- /* Try to TX as many packets as possible */
- if (send_len > 0) {
- result = uml_vector_sendmmsg(
- vp->fds->tx_fd,
- send_from,
- send_len,
- 0
- );
- vp->in_write_poll =
- (result != send_len);
- }
- /* For some of the sendmmsg error scenarios
- * we may end being unsure in the TX success
- * for all packets. It is safer to declare
- * them all TX-ed and blame the network.
- */
- if (result < 0) {
- if (net_ratelimit())
- netdev_err(vp->dev, "sendmmsg err=%i\n",
- result);
- vp->in_error = true;
- result = send_len;
- }
- if (result > 0) {
- queue_depth =
- consume_vector_skbs(qi, result);
- /* This is equivalent to an TX IRQ.
- * Restart the upper layers to feed us
- * more packets.
- */
- if (result > vp->estats.tx_queue_max)
- vp->estats.tx_queue_max = result;
- vp->estats.tx_queue_running_average =
- (vp->estats.tx_queue_running_average + result) >> 1;
- }
- netif_wake_queue(qi->dev);
- /* if TX is busy, break out of the send loop,
- * poll write IRQ will reschedule xmit for us
+ /* update queue_depth to current value */
+ while (atomic_read(&qi->queue_depth) > 0) {
+ /* Calculate the start of the vector */
+ send_len = atomic_read(&qi->queue_depth);
+ send_from = qi->mmsg_vector;
+ send_from += qi->head;
+ /* Adjust vector size if wraparound */
+ if (send_len + qi->head > qi->max_depth)
+ send_len = qi->max_depth - qi->head;
+ /* Try to TX as many packets as possible */
+ if (send_len > 0) {
+ result = uml_vector_sendmmsg(
+ vp->fds->tx_fd,
+ send_from,
+ send_len,
+ 0
+ );
+ vp->in_write_poll =
+ (result != send_len);
+ }
+ /* For some of the sendmmsg error scenarios
+ * we may end being unsure in the TX success
+ * for all packets. It is safer to declare
+ * them all TX-ed and blame the network.
+ */
+ if (result < 0) {
+ if (net_ratelimit())
+ netdev_err(vp->dev, "sendmmsg err=%i\n",
+ result);
+ vp->in_error = true;
+ result = send_len;
+ }
+ if (result > 0) {
+ consume_vector_skbs(qi, result);
+ /* This is equivalent to an TX IRQ.
+ * Restart the upper layers to feed us
+ * more packets.
*/
- if (result != send_len) {
- vp->estats.tx_restart_queue++;
- break;
- }
+ if (result > vp->estats.tx_queue_max)
+ vp->estats.tx_queue_max = result;
+ vp->estats.tx_queue_running_average =
+ (vp->estats.tx_queue_running_average + result) >> 1;
+ }
+ netif_wake_queue(qi->dev);
+ /* if TX is busy, break out of the send loop,
+ * poll write IRQ will reschedule xmit for us.
+ */
+ if (result != send_len) {
+ vp->estats.tx_restart_queue++;
+ break;
}
}
spin_unlock(&qi->head_lock);
}
- return queue_depth;
+ return atomic_read(&qi->queue_depth);
}
/* Queue destructor. Deliberately stateless so we can use
@@ -589,7 +574,7 @@ static struct vector_queue *create_queue(
}
spin_lock_init(&result->head_lock);
spin_lock_init(&result->tail_lock);
- result->queue_depth = 0;
+ atomic_set(&result->queue_depth, 0);
result->head = 0;
result->tail = 0;
return result;
@@ -668,18 +653,27 @@ done:
}
-/* Prepare queue for recvmmsg one-shot rx - fill with fresh sk_buffs*/
+/* Prepare queue for recvmmsg one-shot rx - fill with fresh sk_buffs */
static void prep_queue_for_rx(struct vector_queue *qi)
{
struct vector_private *vp = netdev_priv(qi->dev);
struct mmsghdr *mmsg_vector = qi->mmsg_vector;
void **skbuff_vector = qi->skbuff_vector;
- int i;
+ int i, queue_depth;
+
+ queue_depth = atomic_read(&qi->queue_depth);
- if (qi->queue_depth == 0)
+ if (queue_depth == 0)
return;
- for (i = 0; i < qi->queue_depth; i++) {
+
+ /* RX is always emptied 100% during each cycle, so we do not
+ * have to do the tail wraparound math for it.
+ */
+
+ qi->head = qi->tail = 0;
+
+ for (i = 0; i < queue_depth; i++) {
/* it is OK if allocation fails - recvmmsg with NULL data in
* iov argument still performs an RX, just drops the packet
* This allows us stop faffing around with a "drop buffer"
@@ -689,7 +683,7 @@ static void prep_queue_for_rx(struct vector_queue *qi)
skbuff_vector++;
mmsg_vector++;
}
- qi->queue_depth = 0;
+ atomic_set(&qi->queue_depth, 0);
}
static struct vector_device *find_device(int n)
@@ -972,7 +966,7 @@ static int vector_mmsg_rx(struct vector_private *vp, int budget)
budget = qi->max_depth;
packet_count = uml_vector_recvmmsg(
- vp->fds->rx_fd, qi->mmsg_vector, qi->max_depth, 0);
+ vp->fds->rx_fd, qi->mmsg_vector, budget, 0);
if (packet_count < 0)
vp->in_error = true;
@@ -985,7 +979,7 @@ static int vector_mmsg_rx(struct vector_private *vp, int budget)
* many do we need to prep the next time prep_queue_for_rx() is called.
*/
- qi->queue_depth = packet_count;
+ atomic_add(packet_count, &qi->queue_depth);
for (i = 0; i < packet_count; i++) {
skb = (*skbuff_vector);
@@ -1172,6 +1166,7 @@ static int vector_poll(struct napi_struct *napi, int budget)
if ((vp->options & VECTOR_TX) != 0)
tx_enqueued = (vector_send(vp->tx_queue) > 0);
+ spin_lock(&vp->rx_queue->head_lock);
if ((vp->options & VECTOR_RX) > 0)
err = vector_mmsg_rx(vp, budget);
else {
@@ -1179,12 +1174,13 @@ static int vector_poll(struct napi_struct *napi, int budget)
if (err > 0)
err = 1;
}
+ spin_unlock(&vp->rx_queue->head_lock);
if (err > 0)
work_done += err;
if (tx_enqueued || err > 0)
napi_schedule(napi);
- if (work_done < budget)
+ if (work_done <= budget)
napi_complete_done(napi, work_done);
return work_done;
}
@@ -1225,7 +1221,7 @@ static int vector_net_open(struct net_device *dev)
vp->rx_header_size,
MAX_IOV_SIZE
);
- vp->rx_queue->queue_depth = get_depth(vp->parsed);
+ atomic_set(&vp->rx_queue->queue_depth, get_depth(vp->parsed));
} else {
vp->header_rxbuffer = kmalloc(
vp->rx_header_size,
@@ -1467,7 +1463,17 @@ static void vector_get_ethtool_stats(struct net_device *dev,
{
struct vector_private *vp = netdev_priv(dev);
+ /* Stats are modified in the dequeue portions of
+ * rx/tx which are protected by the head locks
+ * grabbing these locks here ensures they are up
+ * to date.
+ */
+
+ spin_lock(&vp->tx_queue->head_lock);
+ spin_lock(&vp->rx_queue->head_lock);
memcpy(tmp_stats, &vp->estats, sizeof(struct vector_estats));
+ spin_unlock(&vp->rx_queue->head_lock);
+ spin_unlock(&vp->tx_queue->head_lock);
}
static int vector_get_coalesce(struct net_device *netdev,
diff --git a/arch/um/drivers/vector_kern.h b/arch/um/drivers/vector_kern.h
index 806df551be0b..417834793658 100644
--- a/arch/um/drivers/vector_kern.h
+++ b/arch/um/drivers/vector_kern.h
@@ -14,6 +14,7 @@
#include <linux/ctype.h>
#include <linux/workqueue.h>
#include <linux/interrupt.h>
+#include <asm/atomic.h>
#include "vector_user.h"
@@ -44,7 +45,8 @@ struct vector_queue {
struct net_device *dev;
spinlock_t head_lock;
spinlock_t tail_lock;
- int queue_depth, head, tail, max_depth, max_iov_frags;
+ atomic_t queue_depth;
+ int head, tail, max_depth, max_iov_frags;
short options;
};
diff --git a/arch/um/drivers/vector_user.c b/arch/um/drivers/vector_user.c
index b16a5e5619d3..2ea67e6fd067 100644
--- a/arch/um/drivers/vector_user.c
+++ b/arch/um/drivers/vector_user.c
@@ -46,6 +46,9 @@
#define TRANS_FD "fd"
#define TRANS_FD_LEN strlen(TRANS_FD)
+#define TRANS_VDE "vde"
+#define TRANS_VDE_LEN strlen(TRANS_VDE)
+
#define VNET_HDR_FAIL "could not enable vnet headers on fd %d"
#define TUN_GET_F_FAIL "tapraw: TUNGETFEATURES failed: %s"
#define L2TPV3_BIND_FAIL "l2tpv3_open : could not bind socket err=%i"
@@ -434,6 +437,84 @@ fd_cleanup:
return NULL;
}
+/* enough char to store an int type */
+#define ENOUGH(type) ((CHAR_BIT * sizeof(type) - 1) / 3 + 2)
+#define ENOUGH_OCTAL(type) ((CHAR_BIT * sizeof(type) + 2) / 3)
+/* vde_plug --descr xx --port2 xx --mod2 xx --group2 xx seqpacket://NN vnl (NULL) */
+#define VDE_MAX_ARGC 12
+#define VDE_SEQPACKET_HEAD "seqpacket://"
+#define VDE_SEQPACKET_HEAD_LEN (sizeof(VDE_SEQPACKET_HEAD) - 1)
+#define VDE_DEFAULT_DESCRIPTION "UML"
+
+static struct vector_fds *user_init_vde_fds(struct arglist *ifspec)
+{
+ char seqpacketvnl[VDE_SEQPACKET_HEAD_LEN + ENOUGH(int) + 1];
+ char *argv[VDE_MAX_ARGC] = {"vde_plug"};
+ int argc = 1;
+ int rv;
+ int sv[2];
+ struct vector_fds *result = NULL;
+
+ char *vnl = uml_vector_fetch_arg(ifspec,"vnl");
+ char *descr = uml_vector_fetch_arg(ifspec,"descr");
+ char *port = uml_vector_fetch_arg(ifspec,"port");
+ char *mode = uml_vector_fetch_arg(ifspec,"mode");
+ char *group = uml_vector_fetch_arg(ifspec,"group");
+ if (descr == NULL) descr = VDE_DEFAULT_DESCRIPTION;
+
+ argv[argc++] = "--descr";
+ argv[argc++] = descr;
+ if (port != NULL) {
+ argv[argc++] = "--port2";
+ argv[argc++] = port;
+ }
+ if (mode != NULL) {
+ argv[argc++] = "--mod2";
+ argv[argc++] = mode;
+ }
+ if (group != NULL) {
+ argv[argc++] = "--group2";
+ argv[argc++] = group;
+ }
+ argv[argc++] = seqpacketvnl;
+ argv[argc++] = vnl;
+ argv[argc++] = NULL;
+
+ rv = socketpair(AF_UNIX, SOCK_SEQPACKET, 0, sv);
+ if (rv < 0) {
+ printk(UM_KERN_ERR "vde: seqpacket socketpair err %d", -errno);
+ return NULL;
+ }
+ rv = os_set_exec_close(sv[0]);
+ if (rv < 0) {
+ printk(UM_KERN_ERR "vde: seqpacket socketpair cloexec err %d", -errno);
+ goto vde_cleanup_sv;
+ }
+ snprintf(seqpacketvnl, sizeof(seqpacketvnl), VDE_SEQPACKET_HEAD "%d", sv[1]);
+
+ run_helper(NULL, NULL, argv);
+
+ close(sv[1]);
+
+ result = uml_kmalloc(sizeof(struct vector_fds), UM_GFP_KERNEL);
+ if (result == NULL) {
+ printk(UM_KERN_ERR "fd open: allocation failed");
+ goto vde_cleanup;
+ }
+
+ result->rx_fd = sv[0];
+ result->tx_fd = sv[0];
+ result->remote_addr_size = 0;
+ result->remote_addr = NULL;
+ return result;
+
+vde_cleanup_sv:
+ close(sv[1]);
+vde_cleanup:
+ close(sv[0]);
+ return NULL;
+}
+
static struct vector_fds *user_init_raw_fds(struct arglist *ifspec)
{
int rxfd = -1, txfd = -1;
@@ -673,6 +754,8 @@ struct vector_fds *uml_vector_user_open(
return user_init_unix_fds(parsed, ID_BESS);
if (strncmp(transport, TRANS_FD, TRANS_FD_LEN) == 0)
return user_init_fd_fds(parsed);
+ if (strncmp(transport, TRANS_VDE, TRANS_VDE_LEN) == 0)
+ return user_init_vde_fds(parsed);
return NULL;
}
diff --git a/arch/um/include/asm/pgtable.h b/arch/um/include/asm/pgtable.h
index 5bb397b65efb..83373c9963e7 100644
--- a/arch/um/include/asm/pgtable.h
+++ b/arch/um/include/asm/pgtable.h
@@ -359,11 +359,4 @@ static inline pte_t pte_swp_clear_exclusive(pte_t pte)
return pte;
}
-/* Clear a kernel PTE and flush it from the TLB */
-#define kpte_clear_flush(ptep, vaddr) \
-do { \
- pte_clear(&init_mm, (vaddr), (ptep)); \
- __flush_tlb_one((vaddr)); \
-} while (0)
-
#endif
diff --git a/arch/um/include/asm/processor-generic.h b/arch/um/include/asm/processor-generic.h
index 5a7c05275aa7..bce4595798da 100644
--- a/arch/um/include/asm/processor-generic.h
+++ b/arch/um/include/asm/processor-generic.h
@@ -28,20 +28,10 @@ struct thread_struct {
struct arch_thread arch;
jmp_buf switch_buf;
struct {
- int op;
- union {
- struct {
- int pid;
- } fork, exec;
- struct {
- int (*proc)(void *);
- void *arg;
- } thread;
- struct {
- void (*proc)(void *);
- void *arg;
- } cb;
- } u;
+ struct {
+ int (*proc)(void *);
+ void *arg;
+ } thread;
} request;
};
@@ -51,7 +41,7 @@ struct thread_struct {
.fault_addr = NULL, \
.prev_sched = NULL, \
.arch = INIT_ARCH_THREAD, \
- .request = { 0 } \
+ .request = { } \
}
/*
diff --git a/arch/um/include/asm/sysrq.h b/arch/um/include/asm/sysrq.h
deleted file mode 100644
index 8fc8c65cd357..000000000000
--- a/arch/um/include/asm/sysrq.h
+++ /dev/null
@@ -1,8 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-#ifndef __UM_SYSRQ_H
-#define __UM_SYSRQ_H
-
-struct task_struct;
-extern void show_trace(struct task_struct* task, unsigned long *stack);
-
-#endif
diff --git a/arch/um/include/shared/skas/mm_id.h b/arch/um/include/shared/skas/mm_id.h
index 1e76ba40feba..140388c282f6 100644
--- a/arch/um/include/shared/skas/mm_id.h
+++ b/arch/um/include/shared/skas/mm_id.h
@@ -7,10 +7,7 @@
#define __MM_ID_H
struct mm_id {
- union {
- int mm_fd;
- int pid;
- } u;
+ int pid;
unsigned long stack;
int syscall_data_len;
};
diff --git a/arch/um/include/shared/skas/skas.h b/arch/um/include/shared/skas/skas.h
index ebaa116de30b..85c50122ab98 100644
--- a/arch/um/include/shared/skas/skas.h
+++ b/arch/um/include/shared/skas/skas.h
@@ -10,10 +10,8 @@
extern int userspace_pid[];
-extern int user_thread(unsigned long stack, int flags);
extern void new_thread_handler(void);
extern void handle_syscall(struct uml_pt_regs *regs);
-extern long execute_syscall_skas(void *r);
extern unsigned long current_stub_stack(void);
extern struct mm_id *current_mm_id(void);
extern void current_mm_sync(void);
diff --git a/arch/um/kernel/exec.c b/arch/um/kernel/exec.c
index 2c15bb2c104c..cb8b5cd9285c 100644
--- a/arch/um/kernel/exec.c
+++ b/arch/um/kernel/exec.c
@@ -35,8 +35,5 @@ void start_thread(struct pt_regs *regs, unsigned long eip, unsigned long esp)
PT_REGS_IP(regs) = eip;
PT_REGS_SP(regs) = esp;
clear_thread_flag(TIF_SINGLESTEP);
-#ifdef SUBARCH_EXECVE1
- SUBARCH_EXECVE1(regs->regs);
-#endif
}
EXPORT_SYMBOL(start_thread);
diff --git a/arch/um/kernel/kmsg_dump.c b/arch/um/kernel/kmsg_dump.c
index 4382cf02a6d1..419021175272 100644
--- a/arch/um/kernel/kmsg_dump.c
+++ b/arch/um/kernel/kmsg_dump.c
@@ -8,7 +8,7 @@
#include <os.h>
static void kmsg_dumper_stdout(struct kmsg_dumper *dumper,
- enum kmsg_dump_reason reason)
+ struct kmsg_dump_detail *detail)
{
static struct kmsg_dump_iter iter;
static DEFINE_SPINLOCK(lock);
diff --git a/arch/um/kernel/process.c b/arch/um/kernel/process.c
index f36b63f53bab..be2856af6d4c 100644
--- a/arch/um/kernel/process.c
+++ b/arch/um/kernel/process.c
@@ -109,8 +109,8 @@ void new_thread_handler(void)
schedule_tail(current->thread.prev_sched);
current->thread.prev_sched = NULL;
- fn = current->thread.request.u.thread.proc;
- arg = current->thread.request.u.thread.arg;
+ fn = current->thread.request.thread.proc;
+ arg = current->thread.request.thread.arg;
/*
* callback returns only if the kernel thread execs a process
@@ -158,8 +158,8 @@ int copy_thread(struct task_struct * p, const struct kernel_clone_args *args)
arch_copy_thread(&current->thread.arch, &p->thread.arch);
} else {
get_safe_registers(p->thread.regs.regs.gp, p->thread.regs.regs.fp);
- p->thread.request.u.thread.proc = args->fn;
- p->thread.request.u.thread.arg = args->fn_arg;
+ p->thread.request.thread.proc = args->fn;
+ p->thread.request.thread.arg = args->fn_arg;
handler = new_thread_handler;
}
diff --git a/arch/um/kernel/reboot.c b/arch/um/kernel/reboot.c
index 3736bca626ba..680bce4bd8fa 100644
--- a/arch/um/kernel/reboot.c
+++ b/arch/um/kernel/reboot.c
@@ -29,7 +29,7 @@ static void kill_off_processes(void)
t = find_lock_task_mm(p);
if (!t)
continue;
- pid = t->mm->context.id.u.pid;
+ pid = t->mm->context.id.pid;
task_unlock(t);
os_kill_ptraced_process(pid, 1);
}
diff --git a/arch/um/kernel/skas/mmu.c b/arch/um/kernel/skas/mmu.c
index 47f98d87ea3c..886ed5e65674 100644
--- a/arch/um/kernel/skas/mmu.c
+++ b/arch/um/kernel/skas/mmu.c
@@ -32,11 +32,11 @@ int init_new_context(struct task_struct *task, struct mm_struct *mm)
new_id->stack = stack;
block_signals_trace();
- new_id->u.pid = start_userspace(stack);
+ new_id->pid = start_userspace(stack);
unblock_signals_trace();
- if (new_id->u.pid < 0) {
- ret = new_id->u.pid;
+ if (new_id->pid < 0) {
+ ret = new_id->pid;
goto out_free;
}
@@ -83,12 +83,12 @@ void destroy_context(struct mm_struct *mm)
* whole UML suddenly dying. Also, cover negative and
* 1 cases, since they shouldn't happen either.
*/
- if (mmu->id.u.pid < 2) {
+ if (mmu->id.pid < 2) {
printk(KERN_ERR "corrupt mm_context - pid = %d\n",
- mmu->id.u.pid);
+ mmu->id.pid);
return;
}
- os_kill_ptraced_process(mmu->id.u.pid, 1);
+ os_kill_ptraced_process(mmu->id.pid, 1);
free_pages(mmu->id.stack, ilog2(STUB_DATA_PAGES));
}
diff --git a/arch/um/kernel/skas/process.c b/arch/um/kernel/skas/process.c
index 5f9c1c5f36e2..68657988c8d1 100644
--- a/arch/um/kernel/skas/process.c
+++ b/arch/um/kernel/skas/process.c
@@ -39,8 +39,8 @@ int __init start_uml(void)
init_new_thread_signals();
- init_task.thread.request.u.thread.proc = start_kernel_proc;
- init_task.thread.request.u.thread.arg = NULL;
+ init_task.thread.request.thread.proc = start_kernel_proc;
+ init_task.thread.request.thread.arg = NULL;
return start_idle_thread(task_stack_page(&init_task),
&init_task.thread.switch_buf);
}
diff --git a/arch/um/kernel/skas/syscall.c b/arch/um/kernel/skas/syscall.c
index 9ee19e566da3..b09e85279d2b 100644
--- a/arch/um/kernel/skas/syscall.c
+++ b/arch/um/kernel/skas/syscall.c
@@ -12,23 +12,13 @@
#include <sysdep/syscalls.h>
#include <linux/time-internal.h>
#include <asm/unistd.h>
+#include <asm/delay.h>
void handle_syscall(struct uml_pt_regs *r)
{
struct pt_regs *regs = container_of(r, struct pt_regs, regs);
int syscall;
- /*
- * If we have infinite CPU resources, then make every syscall also a
- * preemption point, since we don't have any other preemption in this
- * case, and kernel threads would basically never run until userspace
- * went to sleep, even if said userspace interacts with the kernel in
- * various ways.
- */
- if (time_travel_mode == TT_MODE_INFCPU ||
- time_travel_mode == TT_MODE_EXTERNAL)
- schedule();
-
/* Initialize the syscall number and default return value. */
UPT_SYSCALL_NR(r) = PT_SYSCALL_NR(r->gp);
PT_REGS_SET_SYSCALL_RETURN(regs, -ENOSYS);
@@ -41,9 +31,25 @@ void handle_syscall(struct uml_pt_regs *r)
goto out;
syscall = UPT_SYSCALL_NR(r);
- if (syscall >= 0 && syscall < __NR_syscalls)
- PT_REGS_SET_SYSCALL_RETURN(regs,
- EXECUTE_SYSCALL(syscall, regs));
+ if (syscall >= 0 && syscall < __NR_syscalls) {
+ unsigned long ret = EXECUTE_SYSCALL(syscall, regs);
+
+ PT_REGS_SET_SYSCALL_RETURN(regs, ret);
+
+ /*
+ * An error value here can be some form of -ERESTARTSYS
+ * and then we'd just loop. Make any error syscalls take
+ * some time, so that it won't just loop if something is
+ * not ready, and hopefully other things will make some
+ * progress.
+ */
+ if (IS_ERR_VALUE(ret) &&
+ (time_travel_mode == TT_MODE_INFCPU ||
+ time_travel_mode == TT_MODE_EXTERNAL)) {
+ um_udelay(1);
+ schedule();
+ }
+ }
out:
syscall_trace_leave(regs);
diff --git a/arch/um/kernel/sysrq.c b/arch/um/kernel/sysrq.c
index 746715379f12..4bb8622dc512 100644
--- a/arch/um/kernel/sysrq.c
+++ b/arch/um/kernel/sysrq.c
@@ -11,7 +11,6 @@
#include <linux/sched/debug.h>
#include <linux/sched/task_stack.h>
-#include <asm/sysrq.h>
#include <asm/stacktrace.h>
#include <os.h>
diff --git a/arch/um/kernel/time.c b/arch/um/kernel/time.c
index 47b9f5e63566..29b27b90581f 100644
--- a/arch/um/kernel/time.c
+++ b/arch/um/kernel/time.c
@@ -839,7 +839,7 @@ static irqreturn_t um_timer(int irq, void *dev)
if (get_current()->mm != NULL)
{
/* userspace - relay signal, results in correct userspace timers */
- os_alarm_process(get_current()->mm->context.id.u.pid);
+ os_alarm_process(get_current()->mm->context.id.pid);
}
(*timer_clockevent.event_handler)(&timer_clockevent);
diff --git a/arch/um/kernel/tlb.c b/arch/um/kernel/tlb.c
index 44c6fc697f3a..548af31d4111 100644
--- a/arch/um/kernel/tlb.c
+++ b/arch/um/kernel/tlb.c
@@ -82,16 +82,12 @@ static inline int update_pte_range(pmd_t *pmd, unsigned long addr,
(x ? UM_PROT_EXEC : 0));
if (pte_newpage(*pte)) {
if (pte_present(*pte)) {
- if (pte_newpage(*pte)) {
- __u64 offset;
- unsigned long phys =
- pte_val(*pte) & PAGE_MASK;
- int fd = phys_mapping(phys, &offset);
-
- ret = ops->mmap(ops->mm_idp, addr,
- PAGE_SIZE, prot, fd,
- offset);
- }
+ __u64 offset;
+ unsigned long phys = pte_val(*pte) & PAGE_MASK;
+ int fd = phys_mapping(phys, &offset);
+
+ ret = ops->mmap(ops->mm_idp, addr, PAGE_SIZE,
+ prot, fd, offset);
} else
ret = ops->unmap(ops->mm_idp, addr, PAGE_SIZE);
} else if (pte_newprot(*pte))
diff --git a/arch/um/os-Linux/file.c b/arch/um/os-Linux/file.c
index 5adf8f630049..f1d03cf3957f 100644
--- a/arch/um/os-Linux/file.c
+++ b/arch/um/os-Linux/file.c
@@ -528,7 +528,8 @@ int os_shutdown_socket(int fd, int r, int w)
ssize_t os_rcv_fd_msg(int fd, int *fds, unsigned int n_fds,
void *data, size_t data_len)
{
- char buf[CMSG_SPACE(sizeof(*fds) * n_fds)];
+#define MAX_RCV_FDS 2
+ char buf[CMSG_SPACE(sizeof(*fds) * MAX_RCV_FDS)];
struct cmsghdr *cmsg;
struct iovec iov = {
.iov_base = data,
@@ -538,10 +539,13 @@ ssize_t os_rcv_fd_msg(int fd, int *fds, unsigned int n_fds,
.msg_iov = &iov,
.msg_iovlen = 1,
.msg_control = buf,
- .msg_controllen = sizeof(buf),
+ .msg_controllen = CMSG_SPACE(sizeof(*fds) * n_fds),
};
int n;
+ if (n_fds > MAX_RCV_FDS)
+ return -EINVAL;
+
n = recvmsg(fd, &msg, 0);
if (n < 0)
return -errno;
diff --git a/arch/um/os-Linux/skas/mem.c b/arch/um/os-Linux/skas/mem.c
index c55430775efd..9a13ac23c606 100644
--- a/arch/um/os-Linux/skas/mem.c
+++ b/arch/um/os-Linux/skas/mem.c
@@ -78,7 +78,7 @@ static inline long do_syscall_stub(struct mm_id *mm_idp)
{
struct stub_data *proc_data = (void *)mm_idp->stack;
int n, i;
- int err, pid = mm_idp->u.pid;
+ int err, pid = mm_idp->pid;
n = ptrace_setregs(pid, syscall_regs);
if (n < 0) {
diff --git a/arch/um/os-Linux/skas/process.c b/arch/um/os-Linux/skas/process.c
index f7088345b3fc..b6f656bcffb1 100644
--- a/arch/um/os-Linux/skas/process.c
+++ b/arch/um/os-Linux/skas/process.c
@@ -588,5 +588,5 @@ void reboot_skas(void)
void __switch_mm(struct mm_id *mm_idp)
{
- userspace_pid[0] = mm_idp->u.pid;
+ userspace_pid[0] = mm_idp->pid;
}
diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig
index 8f07de9e2406..2852fcd82cbd 100644
--- a/arch/x86/Kconfig
+++ b/arch/x86/Kconfig
@@ -28,6 +28,7 @@ config X86_64
select ARCH_HAS_GIGANTIC_PAGE
select ARCH_SUPPORTS_INT128 if CC_HAS_INT128
select ARCH_SUPPORTS_PER_VMA_LOCK
+ select ARCH_SUPPORTS_HUGE_PFNMAP if TRANSPARENT_HUGEPAGE
select HAVE_ARCH_SOFT_DIRTY
select MODULES_USE_ELF_RELA
select NEED_DMA_MAP_STATE
@@ -79,6 +80,7 @@ config X86
select ARCH_HAS_DEBUG_VIRTUAL
select ARCH_HAS_DEBUG_VM_PGTABLE if !X86_PAE
select ARCH_HAS_DEVMEM_IS_ALLOWED
+ select ARCH_HAS_DMA_OPS if GART_IOMMU || XEN
select ARCH_HAS_EARLY_DEBUG if KGDB
select ARCH_HAS_ELF_RANDOMIZE
select ARCH_HAS_FAST_MULTIPLIER
@@ -123,6 +125,7 @@ config X86
select ARCH_USES_CFI_TRAPS if X86_64 && CFI_CLANG
select ARCH_SUPPORTS_LTO_CLANG
select ARCH_SUPPORTS_LTO_CLANG_THIN
+ select ARCH_SUPPORTS_RT
select ARCH_USE_BUILTIN_BSWAP
select ARCH_USE_CMPXCHG_LOCKREF if X86_CMPXCHG64
select ARCH_USE_MEMTEST
@@ -297,6 +300,7 @@ config X86
select NEED_PER_CPU_EMBED_FIRST_CHUNK
select NEED_PER_CPU_PAGE_FIRST_CHUNK
select NEED_SG_DMA_LENGTH
+ select NUMA_MEMBLKS if NUMA
select PCI_DOMAINS if PCI
select PCI_LOCKLESS_CONFIG if PCI
select PERF_EVENTS
@@ -944,7 +948,6 @@ config DMI
config GART_IOMMU
bool "Old AMD GART IOMMU support"
- select DMA_OPS
select IOMMU_HELPER
select SWIOTLB
depends on X86_64 && PCI && AMD_NB
@@ -1600,14 +1603,6 @@ config X86_64_ACPI_NUMA
help
Enable ACPI SRAT based node topology detection.
-config NUMA_EMU
- bool "NUMA emulation"
- depends on NUMA
- help
- Enable NUMA emulation. A flat machine will be split
- into virtual nodes when booted with "numa=fake=N", where N is the
- number of nodes. This is only useful for debugging.
-
config NODES_SHIFT
int "Maximum NUMA Nodes (as a power of 2)" if !MAXSMP
range 1 10
@@ -1807,6 +1802,7 @@ config X86_PAT
def_bool y
prompt "x86 PAT support" if EXPERT
depends on MTRR
+ select ARCH_USES_PG_ARCH_2
help
Use PAT attributes to setup page level cache control.
@@ -1818,10 +1814,6 @@ config X86_PAT
If unsure, say Y.
-config ARCH_USES_PG_UNCACHED
- def_bool y
- depends on X86_PAT
-
config X86_UMIP
def_bool y
prompt "User Mode Instruction Prevention" if EXPERT
diff --git a/arch/x86/Makefile b/arch/x86/Makefile
index 801fd85c3ef6..cd75e78a06c1 100644
--- a/arch/x86/Makefile
+++ b/arch/x86/Makefile
@@ -24,11 +24,15 @@ RETPOLINE_CFLAGS += $(call cc-option,-mindirect-branch-cs-prefix)
ifdef CONFIG_MITIGATION_RETHUNK
RETHUNK_CFLAGS := -mfunction-return=thunk-extern
+RETHUNK_RUSTFLAGS := -Zfunction-return=thunk-extern
RETPOLINE_CFLAGS += $(RETHUNK_CFLAGS)
+RETPOLINE_RUSTFLAGS += $(RETHUNK_RUSTFLAGS)
endif
export RETHUNK_CFLAGS
+export RETHUNK_RUSTFLAGS
export RETPOLINE_CFLAGS
+export RETPOLINE_RUSTFLAGS
export RETPOLINE_VDSO_CFLAGS
# For gcc stack alignment is specified with -mpreferred-stack-boundary,
@@ -218,9 +222,10 @@ KBUILD_CFLAGS += -fno-asynchronous-unwind-tables
# Avoid indirect branches in kernel to deal with Spectre
ifdef CONFIG_MITIGATION_RETPOLINE
KBUILD_CFLAGS += $(RETPOLINE_CFLAGS)
+ KBUILD_RUSTFLAGS += $(RETPOLINE_RUSTFLAGS)
# Additionally, avoid generating expensive indirect jumps which
# are subject to retpolines for small number of switch cases.
- # clang turns off jump table generation by default when under
+ # LLVM turns off jump table generation by default when under
# retpoline builds, however, gcc does not for x86. This has
# only been fixed starting from gcc stable version 8.4.0 and
# onwards, but not for older ones. See gcc bug #86952.
@@ -237,6 +242,10 @@ ifdef CONFIG_CALL_PADDING
PADDING_CFLAGS := -fpatchable-function-entry=$(CONFIG_FUNCTION_PADDING_BYTES),$(CONFIG_FUNCTION_PADDING_BYTES)
KBUILD_CFLAGS += $(PADDING_CFLAGS)
export PADDING_CFLAGS
+
+PADDING_RUSTFLAGS := -Zpatchable-function-entry=$(CONFIG_FUNCTION_PADDING_BYTES),$(CONFIG_FUNCTION_PADDING_BYTES)
+KBUILD_RUSTFLAGS += $(PADDING_RUSTFLAGS)
+export PADDING_RUSTFLAGS
endif
KBUILD_LDFLAGS += -m elf_$(UTS_MACHINE)
diff --git a/arch/x86/boot/compressed/misc.c b/arch/x86/boot/compressed/misc.c
index 944454306ef4..04a35b2c26e9 100644
--- a/arch/x86/boot/compressed/misc.c
+++ b/arch/x86/boot/compressed/misc.c
@@ -511,7 +511,7 @@ asmlinkage __visible void *extract_kernel(void *rmode, unsigned char *output)
if (init_unaccepted_memory()) {
debug_putstr("Accepting memory... ");
- accept_memory(__pa(output), __pa(output) + needed_size);
+ accept_memory(__pa(output), needed_size);
}
entry_offset = decompress_kernel(output, virt_addr, error);
diff --git a/arch/x86/boot/compressed/misc.h b/arch/x86/boot/compressed/misc.h
index b353a7be380c..dd8d1a85f671 100644
--- a/arch/x86/boot/compressed/misc.h
+++ b/arch/x86/boot/compressed/misc.h
@@ -256,6 +256,6 @@ static inline bool init_unaccepted_memory(void) { return false; }
/* Defined in EFI stub */
extern struct efi_unaccepted_memory *unaccepted_table;
-void accept_memory(phys_addr_t start, phys_addr_t end);
+void accept_memory(phys_addr_t start, unsigned long size);
#endif /* BOOT_COMPRESSED_MISC_H */
diff --git a/arch/x86/coco/tdx/tdx.c b/arch/x86/coco/tdx/tdx.c
index da8b66dce0da..327c45c5013f 100644
--- a/arch/x86/coco/tdx/tdx.c
+++ b/arch/x86/coco/tdx/tdx.c
@@ -16,6 +16,7 @@
#include <asm/insn-eval.h>
#include <asm/pgtable.h>
#include <asm/set_memory.h>
+#include <asm/traps.h>
/* MMIO direction */
#define EPT_READ 0
@@ -433,6 +434,11 @@ static int handle_mmio(struct pt_regs *regs, struct ve_info *ve)
return -EINVAL;
}
+ if (!fault_in_kernel_space(ve->gla)) {
+ WARN_ONCE(1, "Access to userspace address is not supported");
+ return -EINVAL;
+ }
+
/*
* Reject EPT violation #VEs that split pages.
*
diff --git a/arch/x86/configs/tiny.config b/arch/x86/configs/tiny.config
index be3ee4294903..aabafa3faa6d 100644
--- a/arch/x86/configs/tiny.config
+++ b/arch/x86/configs/tiny.config
@@ -1,6 +1,2 @@
CONFIG_NOHIGHMEM=y
-# CONFIG_HIGHMEM4G is not set
-# CONFIG_HIGHMEM64G is not set
-# CONFIG_UNWINDER_ORC is not set
CONFIG_UNWINDER_GUESS=y
-# CONFIG_UNWINDER_FRAME_POINTER is not set
diff --git a/arch/x86/include/asm/Kbuild b/arch/x86/include/asm/Kbuild
index a192bdea69e2..6c23d1661b17 100644
--- a/arch/x86/include/asm/Kbuild
+++ b/arch/x86/include/asm/Kbuild
@@ -11,3 +11,4 @@ generated-y += xen-hypercalls.h
generic-y += early_ioremap.h
generic-y += mcs_spinlock.h
+generic-y += mmzone.h
diff --git a/arch/x86/include/asm/atomic64_32.h b/arch/x86/include/asm/atomic64_32.h
index 8db2ec4d6cda..1f650b4dde50 100644
--- a/arch/x86/include/asm/atomic64_32.h
+++ b/arch/x86/include/asm/atomic64_32.h
@@ -163,20 +163,18 @@ static __always_inline s64 arch_atomic64_dec_return(atomic64_t *v)
}
#define arch_atomic64_dec_return arch_atomic64_dec_return
-static __always_inline s64 arch_atomic64_add(s64 i, atomic64_t *v)
+static __always_inline void arch_atomic64_add(s64 i, atomic64_t *v)
{
__alternative_atomic64(add, add_return,
ASM_OUTPUT2("+A" (i), "+c" (v)),
ASM_NO_INPUT_CLOBBER("memory"));
- return i;
}
-static __always_inline s64 arch_atomic64_sub(s64 i, atomic64_t *v)
+static __always_inline void arch_atomic64_sub(s64 i, atomic64_t *v)
{
__alternative_atomic64(sub, sub_return,
ASM_OUTPUT2("+A" (i), "+c" (v)),
ASM_NO_INPUT_CLOBBER("memory"));
- return i;
}
static __always_inline void arch_atomic64_inc(atomic64_t *v)
diff --git a/arch/x86/include/asm/cpuid.h b/arch/x86/include/asm/cpuid.h
index 80cc6386d7b1..ca4243318aad 100644
--- a/arch/x86/include/asm/cpuid.h
+++ b/arch/x86/include/asm/cpuid.h
@@ -179,6 +179,7 @@ static __always_inline bool cpuid_function_is_indexed(u32 function)
case 0x1d:
case 0x1e:
case 0x1f:
+ case 0x24:
case 0x8000001d:
return true;
}
diff --git a/arch/x86/include/asm/intel-family.h b/arch/x86/include/asm/intel-family.h
index 44949f972826..1a42f829667a 100644
--- a/arch/x86/include/asm/intel-family.h
+++ b/arch/x86/include/asm/intel-family.h
@@ -135,6 +135,8 @@
#define INTEL_LUNARLAKE_M IFM(6, 0xBD)
+#define INTEL_PANTHERLAKE_L IFM(6, 0xCC)
+
/* "Small Core" Processors (Atom/E-Core) */
#define INTEL_ATOM_BONNELL IFM(6, 0x1C) /* Diamondville, Pineview */
@@ -178,4 +180,7 @@
#define INTEL_FAM5_QUARK_X1000 0x09 /* Quark X1000 SoC */
#define INTEL_QUARK_X1000 IFM(5, 0x09) /* Quark X1000 SoC */
+/* Family 19 */
+#define INTEL_PANTHERCOVE_X IFM(19, 0x01) /* Diamond Rapids */
+
#endif /* _ASM_X86_INTEL_FAMILY_H */
diff --git a/arch/x86/include/asm/kvm-x86-ops.h b/arch/x86/include/asm/kvm-x86-ops.h
index 68ad4f923664..861d080ed4c6 100644
--- a/arch/x86/include/asm/kvm-x86-ops.h
+++ b/arch/x86/include/asm/kvm-x86-ops.h
@@ -14,8 +14,8 @@ BUILD_BUG_ON(1)
* be __static_call_return0.
*/
KVM_X86_OP(check_processor_compatibility)
-KVM_X86_OP(hardware_enable)
-KVM_X86_OP(hardware_disable)
+KVM_X86_OP(enable_virtualization_cpu)
+KVM_X86_OP(disable_virtualization_cpu)
KVM_X86_OP(hardware_unsetup)
KVM_X86_OP(has_emulated_msr)
KVM_X86_OP(vcpu_after_set_cpuid)
@@ -125,7 +125,7 @@ KVM_X86_OP_OPTIONAL(mem_enc_unregister_region)
KVM_X86_OP_OPTIONAL(vm_copy_enc_context_from)
KVM_X86_OP_OPTIONAL(vm_move_enc_context_from)
KVM_X86_OP_OPTIONAL(guest_memory_reclaimed)
-KVM_X86_OP(get_msr_feature)
+KVM_X86_OP(get_feature_msr)
KVM_X86_OP(check_emulate_instruction)
KVM_X86_OP(apic_init_signal_blocked)
KVM_X86_OP_OPTIONAL(enable_l2_tlb_flush)
diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h
index 4a68cb3eba78..6d9f763a7bb9 100644
--- a/arch/x86/include/asm/kvm_host.h
+++ b/arch/x86/include/asm/kvm_host.h
@@ -36,6 +36,7 @@
#include <asm/kvm_page_track.h>
#include <asm/kvm_vcpu_regs.h>
#include <asm/hyperv-tlfs.h>
+#include <asm/reboot.h>
#define __KVM_HAVE_ARCH_VCPU_DEBUGFS
@@ -211,6 +212,7 @@ enum exit_fastpath_completion {
EXIT_FASTPATH_NONE,
EXIT_FASTPATH_REENTER_GUEST,
EXIT_FASTPATH_EXIT_HANDLED,
+ EXIT_FASTPATH_EXIT_USERSPACE,
};
typedef enum exit_fastpath_completion fastpath_t;
@@ -280,10 +282,6 @@ enum x86_intercept_stage;
#define PFERR_PRIVATE_ACCESS BIT_ULL(49)
#define PFERR_SYNTHETIC_MASK (PFERR_IMPLICIT_ACCESS | PFERR_PRIVATE_ACCESS)
-#define PFERR_NESTED_GUEST_PAGE (PFERR_GUEST_PAGE_MASK | \
- PFERR_WRITE_MASK | \
- PFERR_PRESENT_MASK)
-
/* apic attention bits */
#define KVM_APIC_CHECK_VAPIC 0
/*
@@ -1629,8 +1627,10 @@ struct kvm_x86_ops {
int (*check_processor_compatibility)(void);
- int (*hardware_enable)(void);
- void (*hardware_disable)(void);
+ int (*enable_virtualization_cpu)(void);
+ void (*disable_virtualization_cpu)(void);
+ cpu_emergency_virt_cb *emergency_disable_virtualization_cpu;
+
void (*hardware_unsetup)(void);
bool (*has_emulated_msr)(struct kvm *kvm, u32 index);
void (*vcpu_after_set_cpuid)(struct kvm_vcpu *vcpu);
@@ -1727,6 +1727,8 @@ struct kvm_x86_ops {
void (*enable_nmi_window)(struct kvm_vcpu *vcpu);
void (*enable_irq_window)(struct kvm_vcpu *vcpu);
void (*update_cr8_intercept)(struct kvm_vcpu *vcpu, int tpr, int irr);
+
+ const bool x2apic_icr_is_split;
const unsigned long required_apicv_inhibits;
bool allow_apicv_in_x2apic_without_x2apic_virtualization;
void (*refresh_apicv_exec_ctrl)(struct kvm_vcpu *vcpu);
@@ -1806,7 +1808,7 @@ struct kvm_x86_ops {
int (*vm_move_enc_context_from)(struct kvm *kvm, unsigned int source_fd);
void (*guest_memory_reclaimed)(struct kvm *kvm);
- int (*get_msr_feature)(struct kvm_msr_entry *entry);
+ int (*get_feature_msr)(u32 msr, u64 *data);
int (*check_emulate_instruction)(struct kvm_vcpu *vcpu, int emul_type,
void *insn, int insn_len);
@@ -2060,6 +2062,8 @@ void kvm_prepare_emulation_failure_exit(struct kvm_vcpu *vcpu);
void kvm_enable_efer_bits(u64);
bool kvm_valid_efer(struct kvm_vcpu *vcpu, u64 efer);
+int kvm_get_msr_with_filter(struct kvm_vcpu *vcpu, u32 index, u64 *data);
+int kvm_set_msr_with_filter(struct kvm_vcpu *vcpu, u32 index, u64 data);
int __kvm_get_msr(struct kvm_vcpu *vcpu, u32 index, u64 *data, bool host_initiated);
int kvm_get_msr(struct kvm_vcpu *vcpu, u32 index, u64 *data);
int kvm_set_msr(struct kvm_vcpu *vcpu, u32 index, u64 data);
@@ -2136,7 +2140,15 @@ int kvm_get_nr_pending_nmis(struct kvm_vcpu *vcpu);
void kvm_update_dr7(struct kvm_vcpu *vcpu);
-int kvm_mmu_unprotect_page(struct kvm *kvm, gfn_t gfn);
+bool __kvm_mmu_unprotect_gfn_and_retry(struct kvm_vcpu *vcpu, gpa_t cr2_or_gpa,
+ bool always_retry);
+
+static inline bool kvm_mmu_unprotect_gfn_and_retry(struct kvm_vcpu *vcpu,
+ gpa_t cr2_or_gpa)
+{
+ return __kvm_mmu_unprotect_gfn_and_retry(vcpu, cr2_or_gpa, false);
+}
+
void kvm_mmu_free_roots(struct kvm *kvm, struct kvm_mmu *mmu,
ulong roots_to_free);
void kvm_mmu_free_guest_mode_roots(struct kvm *kvm, struct kvm_mmu *mmu);
@@ -2254,6 +2266,7 @@ int kvm_cpu_has_injectable_intr(struct kvm_vcpu *v);
int kvm_cpu_has_interrupt(struct kvm_vcpu *vcpu);
int kvm_cpu_has_extint(struct kvm_vcpu *v);
int kvm_arch_interrupt_allowed(struct kvm_vcpu *vcpu);
+int kvm_cpu_get_extint(struct kvm_vcpu *v);
int kvm_cpu_get_interrupt(struct kvm_vcpu *v);
void kvm_vcpu_reset(struct kvm_vcpu *vcpu, bool init_event);
@@ -2345,7 +2358,8 @@ int memslot_rmap_alloc(struct kvm_memory_slot *slot, unsigned long npages);
KVM_X86_QUIRK_OUT_7E_INC_RIP | \
KVM_X86_QUIRK_MISC_ENABLE_NO_MWAIT | \
KVM_X86_QUIRK_FIX_HYPERCALL_INSN | \
- KVM_X86_QUIRK_MWAIT_NEVER_UD_FAULTS)
+ KVM_X86_QUIRK_MWAIT_NEVER_UD_FAULTS | \
+ KVM_X86_QUIRK_SLOT_ZAP_ALL)
/*
* KVM previously used a u32 field in kvm_run to indicate the hypercall was
diff --git a/arch/x86/include/asm/mmu_context.h b/arch/x86/include/asm/mmu_context.h
index 19091ebb8633..2886cb668d7f 100644
--- a/arch/x86/include/asm/mmu_context.h
+++ b/arch/x86/include/asm/mmu_context.h
@@ -238,11 +238,6 @@ static inline bool is_64bit_mm(struct mm_struct *mm)
}
#endif
-static inline void arch_unmap(struct mm_struct *mm, unsigned long start,
- unsigned long end)
-{
-}
-
/*
* We only want to enforce protection keys on the current process
* because we effectively have no access to PKRU for other
diff --git a/arch/x86/include/asm/mmzone.h b/arch/x86/include/asm/mmzone.h
deleted file mode 100644
index c41b41edd691..000000000000
--- a/arch/x86/include/asm/mmzone.h
+++ /dev/null
@@ -1,6 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-#ifdef CONFIG_X86_32
-# include <asm/mmzone_32.h>
-#else
-# include <asm/mmzone_64.h>
-#endif
diff --git a/arch/x86/include/asm/mmzone_32.h b/arch/x86/include/asm/mmzone_32.h
deleted file mode 100644
index 2d4515e8b7df..000000000000
--- a/arch/x86/include/asm/mmzone_32.h
+++ /dev/null
@@ -1,17 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-/*
- * Written by Pat Gaughen (gone@us.ibm.com) Mar 2002
- *
- */
-
-#ifndef _ASM_X86_MMZONE_32_H
-#define _ASM_X86_MMZONE_32_H
-
-#include <asm/smp.h>
-
-#ifdef CONFIG_NUMA
-extern struct pglist_data *node_data[];
-#define NODE_DATA(nid) (node_data[nid])
-#endif /* CONFIG_NUMA */
-
-#endif /* _ASM_X86_MMZONE_32_H */
diff --git a/arch/x86/include/asm/mmzone_64.h b/arch/x86/include/asm/mmzone_64.h
deleted file mode 100644
index 0c585046f744..000000000000
--- a/arch/x86/include/asm/mmzone_64.h
+++ /dev/null
@@ -1,18 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-/* K8 NUMA support */
-/* Copyright 2002,2003 by Andi Kleen, SuSE Labs */
-/* 2.5 Version loosely based on the NUMAQ Code by Pat Gaughen. */
-#ifndef _ASM_X86_MMZONE_64_H
-#define _ASM_X86_MMZONE_64_H
-
-#ifdef CONFIG_NUMA
-
-#include <linux/mmdebug.h>
-#include <asm/smp.h>
-
-extern struct pglist_data *node_data[];
-
-#define NODE_DATA(nid) (node_data[nid])
-
-#endif
-#endif /* _ASM_X86_MMZONE_64_H */
diff --git a/arch/x86/include/asm/msr-index.h b/arch/x86/include/asm/msr-index.h
index a7c06a46fb76..3ae84c3b8e6d 100644
--- a/arch/x86/include/asm/msr-index.h
+++ b/arch/x86/include/asm/msr-index.h
@@ -36,6 +36,20 @@
#define EFER_FFXSR (1<<_EFER_FFXSR)
#define EFER_AUTOIBRS (1<<_EFER_AUTOIBRS)
+/*
+ * Architectural memory types that are common to MTRRs, PAT, VMX MSRs, etc.
+ * Most MSRs support/allow only a subset of memory types, but the values
+ * themselves are common across all relevant MSRs.
+ */
+#define X86_MEMTYPE_UC 0ull /* Uncacheable, a.k.a. Strong Uncacheable */
+#define X86_MEMTYPE_WC 1ull /* Write Combining */
+/* RESERVED 2 */
+/* RESERVED 3 */
+#define X86_MEMTYPE_WT 4ull /* Write Through */
+#define X86_MEMTYPE_WP 5ull /* Write Protected */
+#define X86_MEMTYPE_WB 6ull /* Write Back */
+#define X86_MEMTYPE_UC_MINUS 7ull /* Weak Uncacheabled (PAT only) */
+
/* FRED MSRs */
#define MSR_IA32_FRED_RSP0 0x1cc /* Level 0 stack pointer */
#define MSR_IA32_FRED_RSP1 0x1cd /* Level 1 stack pointer */
@@ -365,6 +379,12 @@
#define MSR_IA32_CR_PAT 0x00000277
+#define PAT_VALUE(p0, p1, p2, p3, p4, p5, p6, p7) \
+ ((X86_MEMTYPE_ ## p0) | (X86_MEMTYPE_ ## p1 << 8) | \
+ (X86_MEMTYPE_ ## p2 << 16) | (X86_MEMTYPE_ ## p3 << 24) | \
+ (X86_MEMTYPE_ ## p4 << 32) | (X86_MEMTYPE_ ## p5 << 40) | \
+ (X86_MEMTYPE_ ## p6 << 48) | (X86_MEMTYPE_ ## p7 << 56))
+
#define MSR_IA32_DEBUGCTLMSR 0x000001d9
#define MSR_IA32_LASTBRANCHFROMIP 0x000001db
#define MSR_IA32_LASTBRANCHTOIP 0x000001dc
@@ -1159,15 +1179,6 @@
#define MSR_IA32_VMX_VMFUNC 0x00000491
#define MSR_IA32_VMX_PROCBASED_CTLS3 0x00000492
-/* VMX_BASIC bits and bitmasks */
-#define VMX_BASIC_VMCS_SIZE_SHIFT 32
-#define VMX_BASIC_TRUE_CTLS (1ULL << 55)
-#define VMX_BASIC_64 0x0001000000000000LLU
-#define VMX_BASIC_MEM_TYPE_SHIFT 50
-#define VMX_BASIC_MEM_TYPE_MASK 0x003c000000000000LLU
-#define VMX_BASIC_MEM_TYPE_WB 6LLU
-#define VMX_BASIC_INOUT 0x0040000000000000LLU
-
/* Resctrl MSRs: */
/* - Intel: */
#define MSR_IA32_L3_QOS_CFG 0xc81
@@ -1185,11 +1196,6 @@
#define MSR_IA32_SMBA_BW_BASE 0xc0000280
#define MSR_IA32_EVT_CFG_BASE 0xc0000400
-/* MSR_IA32_VMX_MISC bits */
-#define MSR_IA32_VMX_MISC_INTEL_PT (1ULL << 14)
-#define MSR_IA32_VMX_MISC_VMWRITE_SHADOW_RO_FIELDS (1ULL << 29)
-#define MSR_IA32_VMX_MISC_PREEMPTION_TIMER_SCALE 0x1F
-
/* AMD-V MSRs */
#define MSR_VM_CR 0xc0010114
#define MSR_VM_IGNNE 0xc0010115
diff --git a/arch/x86/include/asm/numa.h b/arch/x86/include/asm/numa.h
index ef2844d69173..5469d7a7c40f 100644
--- a/arch/x86/include/asm/numa.h
+++ b/arch/x86/include/asm/numa.h
@@ -10,8 +10,6 @@
#ifdef CONFIG_NUMA
-#define NR_NODE_MEMBLKS (MAX_NUMNODES*2)
-
extern int numa_off;
/*
@@ -25,9 +23,6 @@ extern int numa_off;
extern s16 __apicid_to_node[MAX_LOCAL_APIC];
extern nodemask_t numa_nodes_parsed __initdata;
-extern int __init numa_add_memblk(int nodeid, u64 start, u64 end);
-extern void __init numa_set_distance(int from, int to, int distance);
-
static inline void set_apicid_to_node(int apicid, s16 node)
{
__apicid_to_node[apicid] = node;
@@ -54,31 +49,20 @@ static inline int numa_cpu_node(int cpu)
extern void numa_set_node(int cpu, int node);
extern void numa_clear_node(int cpu);
extern void __init init_cpu_to_node(void);
-extern void numa_add_cpu(int cpu);
-extern void numa_remove_cpu(int cpu);
+extern void numa_add_cpu(unsigned int cpu);
+extern void numa_remove_cpu(unsigned int cpu);
extern void init_gi_nodes(void);
#else /* CONFIG_NUMA */
static inline void numa_set_node(int cpu, int node) { }
static inline void numa_clear_node(int cpu) { }
static inline void init_cpu_to_node(void) { }
-static inline void numa_add_cpu(int cpu) { }
-static inline void numa_remove_cpu(int cpu) { }
+static inline void numa_add_cpu(unsigned int cpu) { }
+static inline void numa_remove_cpu(unsigned int cpu) { }
static inline void init_gi_nodes(void) { }
#endif /* CONFIG_NUMA */
#ifdef CONFIG_DEBUG_PER_CPU_MAPS
-void debug_cpumask_set_cpu(int cpu, int node, bool enable);
+void debug_cpumask_set_cpu(unsigned int cpu, int node, bool enable);
#endif
-#ifdef CONFIG_NUMA_EMU
-#define FAKE_NODE_MIN_SIZE ((u64)32 << 20)
-#define FAKE_NODE_MIN_HASH_MASK (~(FAKE_NODE_MIN_SIZE - 1UL))
-int numa_emu_cmdline(char *str);
-#else /* CONFIG_NUMA_EMU */
-static inline int numa_emu_cmdline(char *str)
-{
- return -EINVAL;
-}
-#endif /* CONFIG_NUMA_EMU */
-
#endif /* _ASM_X86_NUMA_H */
diff --git a/arch/x86/include/asm/pgtable.h b/arch/x86/include/asm/pgtable.h
index e39311a89bf4..4c2d080d26b4 100644
--- a/arch/x86/include/asm/pgtable.h
+++ b/arch/x86/include/asm/pgtable.h
@@ -120,6 +120,34 @@ extern pmdval_t early_pmd_flags;
#define arch_end_context_switch(prev) do {} while(0)
#endif /* CONFIG_PARAVIRT_XXL */
+static inline pmd_t pmd_set_flags(pmd_t pmd, pmdval_t set)
+{
+ pmdval_t v = native_pmd_val(pmd);
+
+ return native_make_pmd(v | set);
+}
+
+static inline pmd_t pmd_clear_flags(pmd_t pmd, pmdval_t clear)
+{
+ pmdval_t v = native_pmd_val(pmd);
+
+ return native_make_pmd(v & ~clear);
+}
+
+static inline pud_t pud_set_flags(pud_t pud, pudval_t set)
+{
+ pudval_t v = native_pud_val(pud);
+
+ return native_make_pud(v | set);
+}
+
+static inline pud_t pud_clear_flags(pud_t pud, pudval_t clear)
+{
+ pudval_t v = native_pud_val(pud);
+
+ return native_make_pud(v & ~clear);
+}
+
/*
* The following only work if pte_present() is true.
* Undefined behaviour if not..
@@ -174,6 +202,13 @@ static inline int pud_young(pud_t pud)
return pud_flags(pud) & _PAGE_ACCESSED;
}
+static inline bool pud_shstk(pud_t pud)
+{
+ return cpu_feature_enabled(X86_FEATURE_SHSTK) &&
+ (pud_flags(pud) & (_PAGE_RW | _PAGE_DIRTY | _PAGE_PSE)) ==
+ (_PAGE_DIRTY | _PAGE_PSE);
+}
+
static inline int pte_write(pte_t pte)
{
/*
@@ -310,6 +345,30 @@ static inline int pud_devmap(pud_t pud)
}
#endif
+#ifdef CONFIG_ARCH_SUPPORTS_PMD_PFNMAP
+static inline bool pmd_special(pmd_t pmd)
+{
+ return pmd_flags(pmd) & _PAGE_SPECIAL;
+}
+
+static inline pmd_t pmd_mkspecial(pmd_t pmd)
+{
+ return pmd_set_flags(pmd, _PAGE_SPECIAL);
+}
+#endif /* CONFIG_ARCH_SUPPORTS_PMD_PFNMAP */
+
+#ifdef CONFIG_ARCH_SUPPORTS_PUD_PFNMAP
+static inline bool pud_special(pud_t pud)
+{
+ return pud_flags(pud) & _PAGE_SPECIAL;
+}
+
+static inline pud_t pud_mkspecial(pud_t pud)
+{
+ return pud_set_flags(pud, _PAGE_SPECIAL);
+}
+#endif /* CONFIG_ARCH_SUPPORTS_PUD_PFNMAP */
+
static inline int pgd_devmap(pgd_t pgd)
{
return 0;
@@ -480,20 +539,6 @@ static inline pte_t pte_mkdevmap(pte_t pte)
return pte_set_flags(pte, _PAGE_SPECIAL|_PAGE_DEVMAP);
}
-static inline pmd_t pmd_set_flags(pmd_t pmd, pmdval_t set)
-{
- pmdval_t v = native_pmd_val(pmd);
-
- return native_make_pmd(v | set);
-}
-
-static inline pmd_t pmd_clear_flags(pmd_t pmd, pmdval_t clear)
-{
- pmdval_t v = native_pmd_val(pmd);
-
- return native_make_pmd(v & ~clear);
-}
-
/* See comments above mksaveddirty_shift() */
static inline pmd_t pmd_mksaveddirty(pmd_t pmd)
{
@@ -588,20 +633,6 @@ static inline pmd_t pmd_mkwrite_novma(pmd_t pmd)
pmd_t pmd_mkwrite(pmd_t pmd, struct vm_area_struct *vma);
#define pmd_mkwrite pmd_mkwrite
-static inline pud_t pud_set_flags(pud_t pud, pudval_t set)
-{
- pudval_t v = native_pud_val(pud);
-
- return native_make_pud(v | set);
-}
-
-static inline pud_t pud_clear_flags(pud_t pud, pudval_t clear)
-{
- pudval_t v = native_pud_val(pud);
-
- return native_make_pud(v & ~clear);
-}
-
/* See comments above mksaveddirty_shift() */
static inline pud_t pud_mksaveddirty(pud_t pud)
{
@@ -780,6 +811,12 @@ static inline pmd_t pmd_mkinvalid(pmd_t pmd)
__pgprot(pmd_flags(pmd) & ~(_PAGE_PRESENT|_PAGE_PROTNONE)));
}
+static inline pud_t pud_mkinvalid(pud_t pud)
+{
+ return pfn_pud(pud_pfn(pud),
+ __pgprot(pud_flags(pud) & ~(_PAGE_PRESENT|_PAGE_PROTNONE)));
+}
+
static inline u64 flip_protnone_guard(u64 oldval, u64 val, u64 mask);
static inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
@@ -827,14 +864,8 @@ static inline pmd_t pmd_modify(pmd_t pmd, pgprot_t newprot)
pmd_result = __pmd(val);
/*
- * To avoid creating Write=0,Dirty=1 PMDs, pte_modify() needs to avoid:
- * 1. Marking Write=0 PMDs Dirty=1
- * 2. Marking Dirty=1 PMDs Write=0
- *
- * The first case cannot happen because the _PAGE_CHG_MASK will filter
- * out any Dirty bit passed in newprot. Handle the second case by
- * going through the mksaveddirty exercise. Only do this if the old
- * value was Write=1 to avoid doing this on Shadow Stack PTEs.
+ * Avoid creating shadow stack PMD by accident. See comment in
+ * pte_modify().
*/
if (oldval & _PAGE_RW)
pmd_result = pmd_mksaveddirty(pmd_result);
@@ -844,6 +875,29 @@ static inline pmd_t pmd_modify(pmd_t pmd, pgprot_t newprot)
return pmd_result;
}
+static inline pud_t pud_modify(pud_t pud, pgprot_t newprot)
+{
+ pudval_t val = pud_val(pud), oldval = val;
+ pud_t pud_result;
+
+ val &= _HPAGE_CHG_MASK;
+ val |= check_pgprot(newprot) & ~_HPAGE_CHG_MASK;
+ val = flip_protnone_guard(oldval, val, PHYSICAL_PUD_PAGE_MASK);
+
+ pud_result = __pud(val);
+
+ /*
+ * Avoid creating shadow stack PUD by accident. See comment in
+ * pte_modify().
+ */
+ if (oldval & _PAGE_RW)
+ pud_result = pud_mksaveddirty(pud_result);
+ else
+ pud_result = pud_clear_saveddirty(pud_result);
+
+ return pud_result;
+}
+
/*
* mprotect needs to preserve PAT and encryption bits when updating
* vm_page_prot
@@ -1078,8 +1132,7 @@ static inline pmd_t *pud_pgtable(pud_t pud)
#define pud_leaf pud_leaf
static inline bool pud_leaf(pud_t pud)
{
- return (pud_val(pud) & (_PAGE_PSE | _PAGE_PRESENT)) ==
- (_PAGE_PSE | _PAGE_PRESENT);
+ return pud_val(pud) & _PAGE_PSE;
}
static inline int pud_bad(pud_t pud)
@@ -1383,10 +1436,28 @@ static inline pmd_t pmdp_establish(struct vm_area_struct *vma,
}
#endif
+#ifdef CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD
+static inline pud_t pudp_establish(struct vm_area_struct *vma,
+ unsigned long address, pud_t *pudp, pud_t pud)
+{
+ page_table_check_pud_set(vma->vm_mm, pudp, pud);
+ if (IS_ENABLED(CONFIG_SMP)) {
+ return xchg(pudp, pud);
+ } else {
+ pud_t old = *pudp;
+ WRITE_ONCE(*pudp, pud);
+ return old;
+ }
+}
+#endif
+
#define __HAVE_ARCH_PMDP_INVALIDATE_AD
extern pmd_t pmdp_invalidate_ad(struct vm_area_struct *vma,
unsigned long address, pmd_t *pmdp);
+pud_t pudp_invalidate(struct vm_area_struct *vma, unsigned long address,
+ pud_t *pudp);
+
/*
* Page table pages are page-aligned. The lower half of the top
* level is used for userspace and the top half for the kernel.
@@ -1668,6 +1739,9 @@ void arch_check_zapped_pte(struct vm_area_struct *vma, pte_t pte);
#define arch_check_zapped_pmd arch_check_zapped_pmd
void arch_check_zapped_pmd(struct vm_area_struct *vma, pmd_t pmd);
+#define arch_check_zapped_pud arch_check_zapped_pud
+void arch_check_zapped_pud(struct vm_area_struct *vma, pud_t pud);
+
#ifdef CONFIG_XEN_PV
#define arch_has_hw_nonleaf_pmd_young arch_has_hw_nonleaf_pmd_young
static inline bool arch_has_hw_nonleaf_pmd_young(void)
diff --git a/arch/x86/include/asm/pgtable_64.h b/arch/x86/include/asm/pgtable_64.h
index 3c4407271d08..d1426b64c1b9 100644
--- a/arch/x86/include/asm/pgtable_64.h
+++ b/arch/x86/include/asm/pgtable_64.h
@@ -245,7 +245,6 @@ extern void cleanup_highmap(void);
#define HAVE_ARCH_UNMAPPED_AREA
#define HAVE_ARCH_UNMAPPED_AREA_TOPDOWN
-#define HAVE_ARCH_UNMAPPED_AREA_VMFLAGS
#define PAGE_AGP PAGE_KERNEL_NOCACHE
#define HAVE_PAGE_AGP 1
@@ -271,5 +270,26 @@ static inline bool gup_fast_permitted(unsigned long start, unsigned long end)
#include <asm/pgtable-invert.h>
-#endif /* !__ASSEMBLY__ */
+#else /* __ASSEMBLY__ */
+
+#define l4_index(x) (((x) >> 39) & 511)
+#define pud_index(x) (((x) >> PUD_SHIFT) & (PTRS_PER_PUD - 1))
+
+L4_PAGE_OFFSET = l4_index(__PAGE_OFFSET_BASE_L4)
+L4_START_KERNEL = l4_index(__START_KERNEL_map)
+
+L3_START_KERNEL = pud_index(__START_KERNEL_map)
+
+#define SYM_DATA_START_PAGE_ALIGNED(name) \
+ SYM_START(name, SYM_L_GLOBAL, .balign PAGE_SIZE)
+
+/* Automate the creation of 1 to 1 mapping pmd entries */
+#define PMDS(START, PERM, COUNT) \
+ i = 0 ; \
+ .rept (COUNT) ; \
+ .quad (START) + (i << PMD_SHIFT) + (PERM) ; \
+ i = i + 1 ; \
+ .endr
+
+#endif /* __ASSEMBLY__ */
#endif /* _ASM_X86_PGTABLE_64_H */
diff --git a/arch/x86/include/asm/reboot.h b/arch/x86/include/asm/reboot.h
index 6536873f8fc0..c02183d3cdd7 100644
--- a/arch/x86/include/asm/reboot.h
+++ b/arch/x86/include/asm/reboot.h
@@ -25,12 +25,14 @@ void __noreturn machine_real_restart(unsigned int type);
#define MRR_BIOS 0
#define MRR_APM 1
-#if IS_ENABLED(CONFIG_KVM_INTEL) || IS_ENABLED(CONFIG_KVM_AMD)
typedef void (cpu_emergency_virt_cb)(void);
+#if IS_ENABLED(CONFIG_KVM_INTEL) || IS_ENABLED(CONFIG_KVM_AMD)
void cpu_emergency_register_virt_callback(cpu_emergency_virt_cb *callback);
void cpu_emergency_unregister_virt_callback(cpu_emergency_virt_cb *callback);
void cpu_emergency_disable_virtualization(void);
#else
+static inline void cpu_emergency_register_virt_callback(cpu_emergency_virt_cb *callback) {}
+static inline void cpu_emergency_unregister_virt_callback(cpu_emergency_virt_cb *callback) {}
static inline void cpu_emergency_disable_virtualization(void) {}
#endif /* CONFIG_KVM_INTEL || CONFIG_KVM_AMD */
diff --git a/arch/x86/include/asm/sparsemem.h b/arch/x86/include/asm/sparsemem.h
index 64df897c0ee3..3918c7a434f5 100644
--- a/arch/x86/include/asm/sparsemem.h
+++ b/arch/x86/include/asm/sparsemem.h
@@ -31,13 +31,4 @@
#endif /* CONFIG_SPARSEMEM */
-#ifndef __ASSEMBLY__
-#ifdef CONFIG_NUMA_KEEP_MEMINFO
-extern int phys_to_target_node(phys_addr_t start);
-#define phys_to_target_node phys_to_target_node
-extern int memory_add_physaddr_to_nid(u64 start);
-#define memory_add_physaddr_to_nid memory_add_physaddr_to_nid
-#endif
-#endif /* __ASSEMBLY__ */
-
#endif /* _ASM_X86_SPARSEMEM_H */
diff --git a/arch/x86/include/asm/svm.h b/arch/x86/include/asm/svm.h
index f0dea3750ca9..2b59b9951c90 100644
--- a/arch/x86/include/asm/svm.h
+++ b/arch/x86/include/asm/svm.h
@@ -516,6 +516,20 @@ struct ghcb {
u32 ghcb_usage;
} __packed;
+struct vmcb {
+ struct vmcb_control_area control;
+ union {
+ struct vmcb_save_area save;
+
+ /*
+ * For SEV-ES VMs, the save area in the VMCB is used only to
+ * save/load host state. Guest state resides in a separate
+ * page, the aptly named VM Save Area (VMSA), that is encrypted
+ * with the guest's private key.
+ */
+ struct sev_es_save_area host_sev_es_save;
+ };
+} __packed;
#define EXPECTED_VMCB_SAVE_AREA_SIZE 744
#define EXPECTED_GHCB_SAVE_AREA_SIZE 1032
@@ -532,6 +546,7 @@ static inline void __unused_size_checks(void)
BUILD_BUG_ON(sizeof(struct ghcb_save_area) != EXPECTED_GHCB_SAVE_AREA_SIZE);
BUILD_BUG_ON(sizeof(struct sev_es_save_area) != EXPECTED_SEV_ES_SAVE_AREA_SIZE);
BUILD_BUG_ON(sizeof(struct vmcb_control_area) != EXPECTED_VMCB_CONTROL_AREA_SIZE);
+ BUILD_BUG_ON(offsetof(struct vmcb, save) != EXPECTED_VMCB_CONTROL_AREA_SIZE);
BUILD_BUG_ON(sizeof(struct ghcb) != EXPECTED_GHCB_SIZE);
/* Check offsets of reserved fields */
@@ -568,11 +583,6 @@ static inline void __unused_size_checks(void)
BUILD_BUG_RESERVED_OFFSET(ghcb, 0xff0);
}
-struct vmcb {
- struct vmcb_control_area control;
- struct vmcb_save_area save;
-} __packed;
-
#define SVM_CPUID_FUNC 0x8000000a
#define SVM_SELECTOR_S_SHIFT 4
diff --git a/arch/x86/include/asm/uaccess_64.h b/arch/x86/include/asm/uaccess_64.h
index 04789f45ab2b..afce8ee5d7b7 100644
--- a/arch/x86/include/asm/uaccess_64.h
+++ b/arch/x86/include/asm/uaccess_64.h
@@ -54,6 +54,17 @@ static inline unsigned long __untagged_addr_remote(struct mm_struct *mm,
#define valid_user_address(x) ((__force long)(x) >= 0)
/*
+ * Masking the user address is an alternative to a conditional
+ * user_access_begin that can avoid the fencing. This only works
+ * for dense accesses starting at the address.
+ */
+#define mask_user_address(x) ((typeof(x))((long)(x)|((long)(x)>>63)))
+#define masked_user_access_begin(x) ({ \
+ __auto_type __masked_ptr = (x); \
+ __masked_ptr = mask_user_address(__masked_ptr); \
+ __uaccess_begin(); __masked_ptr; })
+
+/*
* User pointers can have tag bits on x86-64. This scheme tolerates
* arbitrary values in those bits rather then masking them off.
*
diff --git a/arch/x86/include/asm/vmx.h b/arch/x86/include/asm/vmx.h
index d77a31039f24..f7fd4369b821 100644
--- a/arch/x86/include/asm/vmx.h
+++ b/arch/x86/include/asm/vmx.h
@@ -122,19 +122,17 @@
#define VM_ENTRY_ALWAYSON_WITHOUT_TRUE_MSR 0x000011ff
-#define VMX_MISC_PREEMPTION_TIMER_RATE_MASK 0x0000001f
-#define VMX_MISC_SAVE_EFER_LMA 0x00000020
-#define VMX_MISC_ACTIVITY_HLT 0x00000040
-#define VMX_MISC_ACTIVITY_WAIT_SIPI 0x00000100
-#define VMX_MISC_ZERO_LEN_INS 0x40000000
-#define VMX_MISC_MSR_LIST_MULTIPLIER 512
-
/* VMFUNC functions */
#define VMFUNC_CONTROL_BIT(x) BIT((VMX_FEATURE_##x & 0x1f) - 28)
#define VMX_VMFUNC_EPTP_SWITCHING VMFUNC_CONTROL_BIT(EPTP_SWITCHING)
#define VMFUNC_EPTP_ENTRIES 512
+#define VMX_BASIC_32BIT_PHYS_ADDR_ONLY BIT_ULL(48)
+#define VMX_BASIC_DUAL_MONITOR_TREATMENT BIT_ULL(49)
+#define VMX_BASIC_INOUT BIT_ULL(54)
+#define VMX_BASIC_TRUE_CTLS BIT_ULL(55)
+
static inline u32 vmx_basic_vmcs_revision_id(u64 vmx_basic)
{
return vmx_basic & GENMASK_ULL(30, 0);
@@ -145,9 +143,30 @@ static inline u32 vmx_basic_vmcs_size(u64 vmx_basic)
return (vmx_basic & GENMASK_ULL(44, 32)) >> 32;
}
+static inline u32 vmx_basic_vmcs_mem_type(u64 vmx_basic)
+{
+ return (vmx_basic & GENMASK_ULL(53, 50)) >> 50;
+}
+
+static inline u64 vmx_basic_encode_vmcs_info(u32 revision, u16 size, u8 memtype)
+{
+ return revision | ((u64)size << 32) | ((u64)memtype << 50);
+}
+
+#define VMX_MISC_SAVE_EFER_LMA BIT_ULL(5)
+#define VMX_MISC_ACTIVITY_HLT BIT_ULL(6)
+#define VMX_MISC_ACTIVITY_SHUTDOWN BIT_ULL(7)
+#define VMX_MISC_ACTIVITY_WAIT_SIPI BIT_ULL(8)
+#define VMX_MISC_INTEL_PT BIT_ULL(14)
+#define VMX_MISC_RDMSR_IN_SMM BIT_ULL(15)
+#define VMX_MISC_VMXOFF_BLOCK_SMI BIT_ULL(28)
+#define VMX_MISC_VMWRITE_SHADOW_RO_FIELDS BIT_ULL(29)
+#define VMX_MISC_ZERO_LEN_INS BIT_ULL(30)
+#define VMX_MISC_MSR_LIST_MULTIPLIER 512
+
static inline int vmx_misc_preemption_timer_rate(u64 vmx_misc)
{
- return vmx_misc & VMX_MISC_PREEMPTION_TIMER_RATE_MASK;
+ return vmx_misc & GENMASK_ULL(4, 0);
}
static inline int vmx_misc_cr3_count(u64 vmx_misc)
@@ -508,9 +527,10 @@ enum vmcs_field {
#define VMX_EPTP_PWL_4 0x18ull
#define VMX_EPTP_PWL_5 0x20ull
#define VMX_EPTP_AD_ENABLE_BIT (1ull << 6)
+/* The EPTP memtype is encoded in bits 2:0, i.e. doesn't need to be shifted. */
#define VMX_EPTP_MT_MASK 0x7ull
-#define VMX_EPTP_MT_WB 0x6ull
-#define VMX_EPTP_MT_UC 0x0ull
+#define VMX_EPTP_MT_WB X86_MEMTYPE_WB
+#define VMX_EPTP_MT_UC X86_MEMTYPE_UC
#define VMX_EPT_READABLE_MASK 0x1ull
#define VMX_EPT_WRITABLE_MASK 0x2ull
#define VMX_EPT_EXECUTABLE_MASK 0x4ull
diff --git a/arch/x86/include/uapi/asm/kvm.h b/arch/x86/include/uapi/asm/kvm.h
index bf57a824f722..a8debbf2f702 100644
--- a/arch/x86/include/uapi/asm/kvm.h
+++ b/arch/x86/include/uapi/asm/kvm.h
@@ -439,6 +439,7 @@ struct kvm_sync_regs {
#define KVM_X86_QUIRK_MISC_ENABLE_NO_MWAIT (1 << 4)
#define KVM_X86_QUIRK_FIX_HYPERCALL_INSN (1 << 5)
#define KVM_X86_QUIRK_MWAIT_NEVER_UD_FAULTS (1 << 6)
+#define KVM_X86_QUIRK_SLOT_ZAP_ALL (1 << 7)
#define KVM_STATE_NESTED_FORMAT_VMX 0
#define KVM_STATE_NESTED_FORMAT_SVM 1
diff --git a/arch/x86/kernel/cpu/mce/dev-mcelog.c b/arch/x86/kernel/cpu/mce/dev-mcelog.c
index a3aa0199222e..af44fd5dbd7c 100644
--- a/arch/x86/kernel/cpu/mce/dev-mcelog.c
+++ b/arch/x86/kernel/cpu/mce/dev-mcelog.c
@@ -331,7 +331,6 @@ static const struct file_operations mce_chrdev_ops = {
.poll = mce_chrdev_poll,
.unlocked_ioctl = mce_chrdev_ioctl,
.compat_ioctl = compat_ptr_ioctl,
- .llseek = no_llseek,
};
static struct miscdevice mce_chrdev_device = {
diff --git a/arch/x86/kernel/cpu/mtrr/mtrr.c b/arch/x86/kernel/cpu/mtrr/mtrr.c
index 2a2fc14955cd..989d368be04f 100644
--- a/arch/x86/kernel/cpu/mtrr/mtrr.c
+++ b/arch/x86/kernel/cpu/mtrr/mtrr.c
@@ -55,6 +55,12 @@
#include "mtrr.h"
+static_assert(X86_MEMTYPE_UC == MTRR_TYPE_UNCACHABLE);
+static_assert(X86_MEMTYPE_WC == MTRR_TYPE_WRCOMB);
+static_assert(X86_MEMTYPE_WT == MTRR_TYPE_WRTHROUGH);
+static_assert(X86_MEMTYPE_WP == MTRR_TYPE_WRPROT);
+static_assert(X86_MEMTYPE_WB == MTRR_TYPE_WRBACK);
+
/* arch_phys_wc_add returns an MTRR register index plus this offset. */
#define MTRR_TO_PHYS_WC_OFFSET 1000
diff --git a/arch/x86/kernel/cpu/resctrl/pseudo_lock.c b/arch/x86/kernel/cpu/resctrl/pseudo_lock.c
index e69489d48625..972e6b6b0481 100644
--- a/arch/x86/kernel/cpu/resctrl/pseudo_lock.c
+++ b/arch/x86/kernel/cpu/resctrl/pseudo_lock.c
@@ -1567,7 +1567,6 @@ static int pseudo_lock_dev_mmap(struct file *filp, struct vm_area_struct *vma)
static const struct file_operations pseudo_lock_dev_fops = {
.owner = THIS_MODULE,
- .llseek = no_llseek,
.read = NULL,
.write = NULL,
.open = pseudo_lock_dev_open,
diff --git a/arch/x86/kernel/cpu/sgx/main.c b/arch/x86/kernel/cpu/sgx/main.c
index 3a79105455f1..9ace84486499 100644
--- a/arch/x86/kernel/cpu/sgx/main.c
+++ b/arch/x86/kernel/cpu/sgx/main.c
@@ -903,10 +903,10 @@ int sgx_set_attribute(unsigned long *allowed_attributes,
{
struct fd f = fdget(attribute_fd);
- if (!f.file)
+ if (!fd_file(f))
return -EINVAL;
- if (f.file->f_op != &sgx_provision_fops) {
+ if (fd_file(f)->f_op != &sgx_provision_fops) {
fdput(f);
return -EINVAL;
}
diff --git a/arch/x86/kernel/head_64.S b/arch/x86/kernel/head_64.S
index 330922b328bf..16752b8dfa89 100644
--- a/arch/x86/kernel/head_64.S
+++ b/arch/x86/kernel/head_64.S
@@ -32,13 +32,6 @@
* We are not able to switch in one step to the final KERNEL ADDRESS SPACE
* because we need identity-mapped pages.
*/
-#define l4_index(x) (((x) >> 39) & 511)
-#define pud_index(x) (((x) >> PUD_SHIFT) & (PTRS_PER_PUD-1))
-
-L4_PAGE_OFFSET = l4_index(__PAGE_OFFSET_BASE_L4)
-L4_START_KERNEL = l4_index(__START_KERNEL_map)
-
-L3_START_KERNEL = pud_index(__START_KERNEL_map)
__HEAD
.code64
@@ -577,9 +570,6 @@ SYM_CODE_START_NOALIGN(vc_no_ghcb)
SYM_CODE_END(vc_no_ghcb)
#endif
-#define SYM_DATA_START_PAGE_ALIGNED(name) \
- SYM_START(name, SYM_L_GLOBAL, .balign PAGE_SIZE)
-
#ifdef CONFIG_MITIGATION_PAGE_TABLE_ISOLATION
/*
* Each PGD needs to be 8k long and 8k aligned. We do not
@@ -601,14 +591,6 @@ SYM_CODE_END(vc_no_ghcb)
#define PTI_USER_PGD_FILL 0
#endif
-/* Automate the creation of 1 to 1 mapping pmd entries */
-#define PMDS(START, PERM, COUNT) \
- i = 0 ; \
- .rept (COUNT) ; \
- .quad (START) + (i << PMD_SHIFT) + (PERM) ; \
- i = i + 1 ; \
- .endr
-
__INITDATA
.balign 4
@@ -708,8 +690,6 @@ SYM_DATA_START_PAGE_ALIGNED(level1_fixmap_pgt)
.endr
SYM_DATA_END(level1_fixmap_pgt)
-#undef PMDS
-
.data
.align 16
diff --git a/arch/x86/kernel/sys_x86_64.c b/arch/x86/kernel/sys_x86_64.c
index 01d7cd85ef97..87f8c9a71c49 100644
--- a/arch/x86/kernel/sys_x86_64.c
+++ b/arch/x86/kernel/sys_x86_64.c
@@ -121,7 +121,7 @@ static inline unsigned long stack_guard_placement(vm_flags_t vm_flags)
}
unsigned long
-arch_get_unmapped_area_vmflags(struct file *filp, unsigned long addr, unsigned long len,
+arch_get_unmapped_area(struct file *filp, unsigned long addr, unsigned long len,
unsigned long pgoff, unsigned long flags, vm_flags_t vm_flags)
{
struct mm_struct *mm = current->mm;
@@ -158,7 +158,7 @@ arch_get_unmapped_area_vmflags(struct file *filp, unsigned long addr, unsigned l
}
unsigned long
-arch_get_unmapped_area_topdown_vmflags(struct file *filp, unsigned long addr0,
+arch_get_unmapped_area_topdown(struct file *filp, unsigned long addr0,
unsigned long len, unsigned long pgoff,
unsigned long flags, vm_flags_t vm_flags)
{
@@ -228,20 +228,5 @@ bottomup:
* can happen with large stack limits and large mmap()
* allocations.
*/
- return arch_get_unmapped_area(filp, addr0, len, pgoff, flags);
-}
-
-unsigned long
-arch_get_unmapped_area(struct file *filp, unsigned long addr,
- unsigned long len, unsigned long pgoff, unsigned long flags)
-{
- return arch_get_unmapped_area_vmflags(filp, addr, len, pgoff, flags, 0);
-}
-
-unsigned long
-arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr,
- const unsigned long len, const unsigned long pgoff,
- const unsigned long flags)
-{
- return arch_get_unmapped_area_topdown_vmflags(filp, addr, len, pgoff, flags, 0);
+ return arch_get_unmapped_area(filp, addr0, len, pgoff, flags, 0);
}
diff --git a/arch/x86/kernel/vmlinux.lds.S b/arch/x86/kernel/vmlinux.lds.S
index 6e73403e874f..6726be89b7a6 100644
--- a/arch/x86/kernel/vmlinux.lds.S
+++ b/arch/x86/kernel/vmlinux.lds.S
@@ -357,8 +357,7 @@ SECTIONS
PERCPU_SECTION(INTERNODE_CACHE_BYTES)
#endif
- RUNTIME_CONST(shift, d_hash_shift)
- RUNTIME_CONST(ptr, dentry_hashtable)
+ RUNTIME_CONST_VARIABLES
. = ALIGN(PAGE_SIZE);
diff --git a/arch/x86/kvm/cpuid.c b/arch/x86/kvm/cpuid.c
index 2617be544480..41786b834b16 100644
--- a/arch/x86/kvm/cpuid.c
+++ b/arch/x86/kvm/cpuid.c
@@ -705,7 +705,7 @@ void kvm_set_cpu_caps(void)
kvm_cpu_cap_init_kvm_defined(CPUID_7_1_EDX,
F(AVX_VNNI_INT8) | F(AVX_NE_CONVERT) | F(PREFETCHITI) |
- F(AMX_COMPLEX)
+ F(AMX_COMPLEX) | F(AVX10)
);
kvm_cpu_cap_init_kvm_defined(CPUID_7_2_EDX,
@@ -721,6 +721,10 @@ void kvm_set_cpu_caps(void)
SF(SGX1) | SF(SGX2) | SF(SGX_EDECCSSA)
);
+ kvm_cpu_cap_init_kvm_defined(CPUID_24_0_EBX,
+ F(AVX10_128) | F(AVX10_256) | F(AVX10_512)
+ );
+
kvm_cpu_cap_mask(CPUID_8000_0001_ECX,
F(LAHF_LM) | F(CMP_LEGACY) | 0 /*SVM*/ | 0 /* ExtApicSpace */ |
F(CR8_LEGACY) | F(ABM) | F(SSE4A) | F(MISALIGNSSE) |
@@ -949,7 +953,7 @@ static inline int __do_cpuid_func(struct kvm_cpuid_array *array, u32 function)
switch (function) {
case 0:
/* Limited to the highest leaf implemented in KVM. */
- entry->eax = min(entry->eax, 0x1fU);
+ entry->eax = min(entry->eax, 0x24U);
break;
case 1:
cpuid_entry_override(entry, CPUID_1_EDX);
@@ -1174,6 +1178,28 @@ static inline int __do_cpuid_func(struct kvm_cpuid_array *array, u32 function)
break;
}
break;
+ case 0x24: {
+ u8 avx10_version;
+
+ if (!kvm_cpu_cap_has(X86_FEATURE_AVX10)) {
+ entry->eax = entry->ebx = entry->ecx = entry->edx = 0;
+ break;
+ }
+
+ /*
+ * The AVX10 version is encoded in EBX[7:0]. Note, the version
+ * is guaranteed to be >=1 if AVX10 is supported. Note #2, the
+ * version needs to be captured before overriding EBX features!
+ */
+ avx10_version = min_t(u8, entry->ebx & 0xff, 1);
+ cpuid_entry_override(entry, CPUID_24_0_EBX);
+ entry->ebx |= avx10_version;
+
+ entry->eax = 0;
+ entry->ecx = 0;
+ entry->edx = 0;
+ break;
+ }
case KVM_CPUID_SIGNATURE: {
const u32 *sigptr = (const u32 *)KVM_SIGNATURE;
entry->eax = KVM_CPUID_FEATURES;
diff --git a/arch/x86/kvm/irq.c b/arch/x86/kvm/irq.c
index 3d7eb11d0e45..63f66c51975a 100644
--- a/arch/x86/kvm/irq.c
+++ b/arch/x86/kvm/irq.c
@@ -108,7 +108,7 @@ EXPORT_SYMBOL_GPL(kvm_cpu_has_interrupt);
* Read pending interrupt(from non-APIC source)
* vector and intack.
*/
-static int kvm_cpu_get_extint(struct kvm_vcpu *v)
+int kvm_cpu_get_extint(struct kvm_vcpu *v)
{
if (!kvm_cpu_has_extint(v)) {
WARN_ON(!lapic_in_kernel(v));
@@ -131,6 +131,7 @@ static int kvm_cpu_get_extint(struct kvm_vcpu *v)
} else
return kvm_pic_read_irq(v->kvm); /* PIC */
}
+EXPORT_SYMBOL_GPL(kvm_cpu_get_extint);
/*
* Read pending interrupt vector and intack.
@@ -141,9 +142,12 @@ int kvm_cpu_get_interrupt(struct kvm_vcpu *v)
if (vector != -1)
return vector; /* PIC */
- return kvm_get_apic_interrupt(v); /* APIC */
+ vector = kvm_apic_has_interrupt(v); /* APIC */
+ if (vector != -1)
+ kvm_apic_ack_interrupt(v, vector);
+
+ return vector;
}
-EXPORT_SYMBOL_GPL(kvm_cpu_get_interrupt);
void kvm_inject_pending_timer_irqs(struct kvm_vcpu *vcpu)
{
diff --git a/arch/x86/kvm/lapic.c b/arch/x86/kvm/lapic.c
index 5bb481aefcbc..2098dc689088 100644
--- a/arch/x86/kvm/lapic.c
+++ b/arch/x86/kvm/lapic.c
@@ -1944,7 +1944,7 @@ static void start_sw_tscdeadline(struct kvm_lapic *apic)
u64 ns = 0;
ktime_t expire;
struct kvm_vcpu *vcpu = apic->vcpu;
- unsigned long this_tsc_khz = vcpu->arch.virtual_tsc_khz;
+ u32 this_tsc_khz = vcpu->arch.virtual_tsc_khz;
unsigned long flags;
ktime_t now;
@@ -2453,6 +2453,43 @@ void kvm_lapic_set_eoi(struct kvm_vcpu *vcpu)
}
EXPORT_SYMBOL_GPL(kvm_lapic_set_eoi);
+#define X2APIC_ICR_RESERVED_BITS (GENMASK_ULL(31, 20) | GENMASK_ULL(17, 16) | BIT(13))
+
+int kvm_x2apic_icr_write(struct kvm_lapic *apic, u64 data)
+{
+ if (data & X2APIC_ICR_RESERVED_BITS)
+ return 1;
+
+ /*
+ * The BUSY bit is reserved on both Intel and AMD in x2APIC mode, but
+ * only AMD requires it to be zero, Intel essentially just ignores the
+ * bit. And if IPI virtualization (Intel) or x2AVIC (AMD) is enabled,
+ * the CPU performs the reserved bits checks, i.e. the underlying CPU
+ * behavior will "win". Arbitrarily clear the BUSY bit, as there is no
+ * sane way to provide consistent behavior with respect to hardware.
+ */
+ data &= ~APIC_ICR_BUSY;
+
+ kvm_apic_send_ipi(apic, (u32)data, (u32)(data >> 32));
+ if (kvm_x86_ops.x2apic_icr_is_split) {
+ kvm_lapic_set_reg(apic, APIC_ICR, data);
+ kvm_lapic_set_reg(apic, APIC_ICR2, data >> 32);
+ } else {
+ kvm_lapic_set_reg64(apic, APIC_ICR, data);
+ }
+ trace_kvm_apic_write(APIC_ICR, data);
+ return 0;
+}
+
+static u64 kvm_x2apic_icr_read(struct kvm_lapic *apic)
+{
+ if (kvm_x86_ops.x2apic_icr_is_split)
+ return (u64)kvm_lapic_get_reg(apic, APIC_ICR) |
+ (u64)kvm_lapic_get_reg(apic, APIC_ICR2) << 32;
+
+ return kvm_lapic_get_reg64(apic, APIC_ICR);
+}
+
/* emulate APIC access in a trap manner */
void kvm_apic_write_nodecode(struct kvm_vcpu *vcpu, u32 offset)
{
@@ -2470,7 +2507,7 @@ void kvm_apic_write_nodecode(struct kvm_vcpu *vcpu, u32 offset)
* maybe-unecessary write, and both are in the noise anyways.
*/
if (apic_x2apic_mode(apic) && offset == APIC_ICR)
- kvm_x2apic_icr_write(apic, kvm_lapic_get_reg64(apic, APIC_ICR));
+ WARN_ON_ONCE(kvm_x2apic_icr_write(apic, kvm_x2apic_icr_read(apic)));
else
kvm_lapic_reg_write(apic, offset, kvm_lapic_get_reg(apic, offset));
}
@@ -2922,14 +2959,13 @@ void kvm_inject_apic_timer_irqs(struct kvm_vcpu *vcpu)
}
}
-int kvm_get_apic_interrupt(struct kvm_vcpu *vcpu)
+void kvm_apic_ack_interrupt(struct kvm_vcpu *vcpu, int vector)
{
- int vector = kvm_apic_has_interrupt(vcpu);
struct kvm_lapic *apic = vcpu->arch.apic;
u32 ppr;
- if (vector == -1)
- return -1;
+ if (WARN_ON_ONCE(vector < 0 || !apic))
+ return;
/*
* We get here even with APIC virtualization enabled, if doing
@@ -2957,8 +2993,8 @@ int kvm_get_apic_interrupt(struct kvm_vcpu *vcpu)
__apic_update_ppr(apic, &ppr);
}
- return vector;
}
+EXPORT_SYMBOL_GPL(kvm_apic_ack_interrupt);
static int kvm_apic_state_fixup(struct kvm_vcpu *vcpu,
struct kvm_lapic_state *s, bool set)
@@ -2990,18 +3026,22 @@ static int kvm_apic_state_fixup(struct kvm_vcpu *vcpu,
/*
* In x2APIC mode, the LDR is fixed and based on the id. And
- * ICR is internally a single 64-bit register, but needs to be
- * split to ICR+ICR2 in userspace for backwards compatibility.
+ * if the ICR is _not_ split, ICR is internally a single 64-bit
+ * register, but needs to be split to ICR+ICR2 in userspace for
+ * backwards compatibility.
*/
- if (set) {
+ if (set)
*ldr = kvm_apic_calc_x2apic_ldr(x2apic_id);
- icr = __kvm_lapic_get_reg(s->regs, APIC_ICR) |
- (u64)__kvm_lapic_get_reg(s->regs, APIC_ICR2) << 32;
- __kvm_lapic_set_reg64(s->regs, APIC_ICR, icr);
- } else {
- icr = __kvm_lapic_get_reg64(s->regs, APIC_ICR);
- __kvm_lapic_set_reg(s->regs, APIC_ICR2, icr >> 32);
+ if (!kvm_x86_ops.x2apic_icr_is_split) {
+ if (set) {
+ icr = __kvm_lapic_get_reg(s->regs, APIC_ICR) |
+ (u64)__kvm_lapic_get_reg(s->regs, APIC_ICR2) << 32;
+ __kvm_lapic_set_reg64(s->regs, APIC_ICR, icr);
+ } else {
+ icr = __kvm_lapic_get_reg64(s->regs, APIC_ICR);
+ __kvm_lapic_set_reg(s->regs, APIC_ICR2, icr >> 32);
+ }
}
}
@@ -3194,22 +3234,12 @@ int kvm_lapic_set_vapic_addr(struct kvm_vcpu *vcpu, gpa_t vapic_addr)
return 0;
}
-int kvm_x2apic_icr_write(struct kvm_lapic *apic, u64 data)
-{
- data &= ~APIC_ICR_BUSY;
-
- kvm_apic_send_ipi(apic, (u32)data, (u32)(data >> 32));
- kvm_lapic_set_reg64(apic, APIC_ICR, data);
- trace_kvm_apic_write(APIC_ICR, data);
- return 0;
-}
-
static int kvm_lapic_msr_read(struct kvm_lapic *apic, u32 reg, u64 *data)
{
u32 low;
if (reg == APIC_ICR) {
- *data = kvm_lapic_get_reg64(apic, APIC_ICR);
+ *data = kvm_x2apic_icr_read(apic);
return 0;
}
diff --git a/arch/x86/kvm/lapic.h b/arch/x86/kvm/lapic.h
index 7ef8ae73e82d..1b8ef9856422 100644
--- a/arch/x86/kvm/lapic.h
+++ b/arch/x86/kvm/lapic.h
@@ -88,15 +88,14 @@ int kvm_create_lapic(struct kvm_vcpu *vcpu);
void kvm_free_lapic(struct kvm_vcpu *vcpu);
int kvm_apic_has_interrupt(struct kvm_vcpu *vcpu);
+void kvm_apic_ack_interrupt(struct kvm_vcpu *vcpu, int vector);
int kvm_apic_accept_pic_intr(struct kvm_vcpu *vcpu);
-int kvm_get_apic_interrupt(struct kvm_vcpu *vcpu);
int kvm_apic_accept_events(struct kvm_vcpu *vcpu);
void kvm_lapic_reset(struct kvm_vcpu *vcpu, bool init_event);
u64 kvm_lapic_get_cr8(struct kvm_vcpu *vcpu);
void kvm_lapic_set_tpr(struct kvm_vcpu *vcpu, unsigned long cr8);
void kvm_lapic_set_eoi(struct kvm_vcpu *vcpu);
void kvm_lapic_set_base(struct kvm_vcpu *vcpu, u64 value);
-u64 kvm_lapic_get_base(struct kvm_vcpu *vcpu);
void kvm_recalculate_apic_map(struct kvm *kvm);
void kvm_apic_set_version(struct kvm_vcpu *vcpu);
void kvm_apic_after_set_mcg_cap(struct kvm_vcpu *vcpu);
diff --git a/arch/x86/kvm/mmu.h b/arch/x86/kvm/mmu.h
index 4341e0e28571..9dc5dd43ae7f 100644
--- a/arch/x86/kvm/mmu.h
+++ b/arch/x86/kvm/mmu.h
@@ -223,8 +223,6 @@ static inline u8 permission_fault(struct kvm_vcpu *vcpu, struct kvm_mmu *mmu,
bool kvm_mmu_may_ignore_guest_pat(void);
-int kvm_arch_write_log_dirty(struct kvm_vcpu *vcpu);
-
int kvm_mmu_post_init_vm(struct kvm *kvm);
void kvm_mmu_pre_destroy_vm(struct kvm *kvm);
diff --git a/arch/x86/kvm/mmu/mmu.c b/arch/x86/kvm/mmu/mmu.c
index 7813d28b082f..e52f990548df 100644
--- a/arch/x86/kvm/mmu/mmu.c
+++ b/arch/x86/kvm/mmu/mmu.c
@@ -614,32 +614,6 @@ static u64 mmu_spte_get_lockless(u64 *sptep)
return __get_spte_lockless(sptep);
}
-/* Returns the Accessed status of the PTE and resets it at the same time. */
-static bool mmu_spte_age(u64 *sptep)
-{
- u64 spte = mmu_spte_get_lockless(sptep);
-
- if (!is_accessed_spte(spte))
- return false;
-
- if (spte_ad_enabled(spte)) {
- clear_bit((ffs(shadow_accessed_mask) - 1),
- (unsigned long *)sptep);
- } else {
- /*
- * Capture the dirty status of the page, so that it doesn't get
- * lost when the SPTE is marked for access tracking.
- */
- if (is_writable_pte(spte))
- kvm_set_pfn_dirty(spte_to_pfn(spte));
-
- spte = mark_spte_for_access_track(spte);
- mmu_spte_update_no_track(sptep, spte);
- }
-
- return true;
-}
-
static inline bool is_tdp_mmu_active(struct kvm_vcpu *vcpu)
{
return tdp_mmu_enabled && vcpu->arch.mmu->root_role.direct;
@@ -938,6 +912,7 @@ static struct kvm_memory_slot *gfn_to_memslot_dirty_bitmap(struct kvm_vcpu *vcpu
* in this rmap chain. Otherwise, (rmap_head->val & ~1) points to a struct
* pte_list_desc containing more mappings.
*/
+#define KVM_RMAP_MANY BIT(0)
/*
* Returns the number of pointers in the rmap chain, not counting the new one.
@@ -950,16 +925,16 @@ static int pte_list_add(struct kvm_mmu_memory_cache *cache, u64 *spte,
if (!rmap_head->val) {
rmap_head->val = (unsigned long)spte;
- } else if (!(rmap_head->val & 1)) {
+ } else if (!(rmap_head->val & KVM_RMAP_MANY)) {
desc = kvm_mmu_memory_cache_alloc(cache);
desc->sptes[0] = (u64 *)rmap_head->val;
desc->sptes[1] = spte;
desc->spte_count = 2;
desc->tail_count = 0;
- rmap_head->val = (unsigned long)desc | 1;
+ rmap_head->val = (unsigned long)desc | KVM_RMAP_MANY;
++count;
} else {
- desc = (struct pte_list_desc *)(rmap_head->val & ~1ul);
+ desc = (struct pte_list_desc *)(rmap_head->val & ~KVM_RMAP_MANY);
count = desc->tail_count + desc->spte_count;
/*
@@ -968,10 +943,10 @@ static int pte_list_add(struct kvm_mmu_memory_cache *cache, u64 *spte,
*/
if (desc->spte_count == PTE_LIST_EXT) {
desc = kvm_mmu_memory_cache_alloc(cache);
- desc->more = (struct pte_list_desc *)(rmap_head->val & ~1ul);
+ desc->more = (struct pte_list_desc *)(rmap_head->val & ~KVM_RMAP_MANY);
desc->spte_count = 0;
desc->tail_count = count;
- rmap_head->val = (unsigned long)desc | 1;
+ rmap_head->val = (unsigned long)desc | KVM_RMAP_MANY;
}
desc->sptes[desc->spte_count++] = spte;
}
@@ -982,7 +957,7 @@ static void pte_list_desc_remove_entry(struct kvm *kvm,
struct kvm_rmap_head *rmap_head,
struct pte_list_desc *desc, int i)
{
- struct pte_list_desc *head_desc = (struct pte_list_desc *)(rmap_head->val & ~1ul);
+ struct pte_list_desc *head_desc = (struct pte_list_desc *)(rmap_head->val & ~KVM_RMAP_MANY);
int j = head_desc->spte_count - 1;
/*
@@ -1011,7 +986,7 @@ static void pte_list_desc_remove_entry(struct kvm *kvm,
if (!head_desc->more)
rmap_head->val = 0;
else
- rmap_head->val = (unsigned long)head_desc->more | 1;
+ rmap_head->val = (unsigned long)head_desc->more | KVM_RMAP_MANY;
mmu_free_pte_list_desc(head_desc);
}
@@ -1024,13 +999,13 @@ static void pte_list_remove(struct kvm *kvm, u64 *spte,
if (KVM_BUG_ON_DATA_CORRUPTION(!rmap_head->val, kvm))
return;
- if (!(rmap_head->val & 1)) {
+ if (!(rmap_head->val & KVM_RMAP_MANY)) {
if (KVM_BUG_ON_DATA_CORRUPTION((u64 *)rmap_head->val != spte, kvm))
return;
rmap_head->val = 0;
} else {
- desc = (struct pte_list_desc *)(rmap_head->val & ~1ul);
+ desc = (struct pte_list_desc *)(rmap_head->val & ~KVM_RMAP_MANY);
while (desc) {
for (i = 0; i < desc->spte_count; ++i) {
if (desc->sptes[i] == spte) {
@@ -1063,12 +1038,12 @@ static bool kvm_zap_all_rmap_sptes(struct kvm *kvm,
if (!rmap_head->val)
return false;
- if (!(rmap_head->val & 1)) {
+ if (!(rmap_head->val & KVM_RMAP_MANY)) {
mmu_spte_clear_track_bits(kvm, (u64 *)rmap_head->val);
goto out;
}
- desc = (struct pte_list_desc *)(rmap_head->val & ~1ul);
+ desc = (struct pte_list_desc *)(rmap_head->val & ~KVM_RMAP_MANY);
for (; desc; desc = next) {
for (i = 0; i < desc->spte_count; i++)
@@ -1088,10 +1063,10 @@ unsigned int pte_list_count(struct kvm_rmap_head *rmap_head)
if (!rmap_head->val)
return 0;
- else if (!(rmap_head->val & 1))
+ else if (!(rmap_head->val & KVM_RMAP_MANY))
return 1;
- desc = (struct pte_list_desc *)(rmap_head->val & ~1ul);
+ desc = (struct pte_list_desc *)(rmap_head->val & ~KVM_RMAP_MANY);
return desc->tail_count + desc->spte_count;
}
@@ -1153,13 +1128,13 @@ static u64 *rmap_get_first(struct kvm_rmap_head *rmap_head,
if (!rmap_head->val)
return NULL;
- if (!(rmap_head->val & 1)) {
+ if (!(rmap_head->val & KVM_RMAP_MANY)) {
iter->desc = NULL;
sptep = (u64 *)rmap_head->val;
goto out;
}
- iter->desc = (struct pte_list_desc *)(rmap_head->val & ~1ul);
+ iter->desc = (struct pte_list_desc *)(rmap_head->val & ~KVM_RMAP_MANY);
iter->pos = 0;
sptep = iter->desc->sptes[iter->pos];
out:
@@ -1307,15 +1282,6 @@ static bool __rmap_clear_dirty(struct kvm *kvm, struct kvm_rmap_head *rmap_head,
return flush;
}
-/**
- * kvm_mmu_write_protect_pt_masked - write protect selected PT level pages
- * @kvm: kvm instance
- * @slot: slot to protect
- * @gfn_offset: start of the BITS_PER_LONG pages we care about
- * @mask: indicates which pages we should protect
- *
- * Used when we do not need to care about huge page mappings.
- */
static void kvm_mmu_write_protect_pt_masked(struct kvm *kvm,
struct kvm_memory_slot *slot,
gfn_t gfn_offset, unsigned long mask)
@@ -1339,16 +1305,6 @@ static void kvm_mmu_write_protect_pt_masked(struct kvm *kvm,
}
}
-/**
- * kvm_mmu_clear_dirty_pt_masked - clear MMU D-bit for PT level pages, or write
- * protect the page if the D-bit isn't supported.
- * @kvm: kvm instance
- * @slot: slot to clear D-bit
- * @gfn_offset: start of the BITS_PER_LONG pages we care about
- * @mask: indicates which pages we should clear D-bit
- *
- * Used for PML to re-log the dirty GPAs after userspace querying dirty_bitmap.
- */
static void kvm_mmu_clear_dirty_pt_masked(struct kvm *kvm,
struct kvm_memory_slot *slot,
gfn_t gfn_offset, unsigned long mask)
@@ -1372,24 +1328,16 @@ static void kvm_mmu_clear_dirty_pt_masked(struct kvm *kvm,
}
}
-/**
- * kvm_arch_mmu_enable_log_dirty_pt_masked - enable dirty logging for selected
- * PT level pages.
- *
- * It calls kvm_mmu_write_protect_pt_masked to write protect selected pages to
- * enable dirty logging for them.
- *
- * We need to care about huge page mappings: e.g. during dirty logging we may
- * have such mappings.
- */
void kvm_arch_mmu_enable_log_dirty_pt_masked(struct kvm *kvm,
struct kvm_memory_slot *slot,
gfn_t gfn_offset, unsigned long mask)
{
/*
- * Huge pages are NOT write protected when we start dirty logging in
- * initially-all-set mode; must write protect them here so that they
- * are split to 4K on the first write.
+ * If the slot was assumed to be "initially all dirty", write-protect
+ * huge pages to ensure they are split to 4KiB on the first write (KVM
+ * dirty logs at 4KiB granularity). If eager page splitting is enabled,
+ * immediately try to split huge pages, e.g. so that vCPUs don't get
+ * saddled with the cost of splitting.
*
* The gfn_offset is guaranteed to be aligned to 64, but the base_gfn
* of memslot has no such restriction, so the range can cross two large
@@ -1411,7 +1359,16 @@ void kvm_arch_mmu_enable_log_dirty_pt_masked(struct kvm *kvm,
PG_LEVEL_2M);
}
- /* Now handle 4K PTEs. */
+ /*
+ * (Re)Enable dirty logging for all 4KiB SPTEs that map the GFNs in
+ * mask. If PML is enabled and the GFN doesn't need to be write-
+ * protected for other reasons, e.g. shadow paging, clear the Dirty bit.
+ * Otherwise clear the Writable bit.
+ *
+ * Note that kvm_mmu_clear_dirty_pt_masked() is called whenever PML is
+ * enabled but it chooses between clearing the Dirty bit and Writeable
+ * bit based on the context.
+ */
if (kvm_x86_ops.cpu_dirty_log_size)
kvm_mmu_clear_dirty_pt_masked(kvm, slot, gfn_offset, mask);
else
@@ -1453,16 +1410,10 @@ static bool kvm_vcpu_write_protect_gfn(struct kvm_vcpu *vcpu, u64 gfn)
return kvm_mmu_slot_gfn_write_protect(vcpu->kvm, slot, gfn, PG_LEVEL_4K);
}
-static bool __kvm_zap_rmap(struct kvm *kvm, struct kvm_rmap_head *rmap_head,
- const struct kvm_memory_slot *slot)
-{
- return kvm_zap_all_rmap_sptes(kvm, rmap_head);
-}
-
static bool kvm_zap_rmap(struct kvm *kvm, struct kvm_rmap_head *rmap_head,
- struct kvm_memory_slot *slot, gfn_t gfn, int level)
+ const struct kvm_memory_slot *slot)
{
- return __kvm_zap_rmap(kvm, rmap_head, slot);
+ return kvm_zap_all_rmap_sptes(kvm, rmap_head);
}
struct slot_rmap_walk_iterator {
@@ -1513,7 +1464,7 @@ static bool slot_rmap_walk_okay(struct slot_rmap_walk_iterator *iterator)
static void slot_rmap_walk_next(struct slot_rmap_walk_iterator *iterator)
{
while (++iterator->rmap <= iterator->end_rmap) {
- iterator->gfn += (1UL << KVM_HPAGE_GFN_SHIFT(iterator->level));
+ iterator->gfn += KVM_PAGES_PER_HPAGE(iterator->level);
if (iterator->rmap->val)
return;
@@ -1534,23 +1485,71 @@ static void slot_rmap_walk_next(struct slot_rmap_walk_iterator *iterator)
slot_rmap_walk_okay(_iter_); \
slot_rmap_walk_next(_iter_))
-typedef bool (*rmap_handler_t)(struct kvm *kvm, struct kvm_rmap_head *rmap_head,
- struct kvm_memory_slot *slot, gfn_t gfn,
- int level);
+/* The return value indicates if tlb flush on all vcpus is needed. */
+typedef bool (*slot_rmaps_handler) (struct kvm *kvm,
+ struct kvm_rmap_head *rmap_head,
+ const struct kvm_memory_slot *slot);
-static __always_inline bool kvm_handle_gfn_range(struct kvm *kvm,
- struct kvm_gfn_range *range,
- rmap_handler_t handler)
+static __always_inline bool __walk_slot_rmaps(struct kvm *kvm,
+ const struct kvm_memory_slot *slot,
+ slot_rmaps_handler fn,
+ int start_level, int end_level,
+ gfn_t start_gfn, gfn_t end_gfn,
+ bool can_yield, bool flush_on_yield,
+ bool flush)
{
struct slot_rmap_walk_iterator iterator;
- bool ret = false;
- for_each_slot_rmap_range(range->slot, PG_LEVEL_4K, KVM_MAX_HUGEPAGE_LEVEL,
- range->start, range->end - 1, &iterator)
- ret |= handler(kvm, iterator.rmap, range->slot, iterator.gfn,
- iterator.level);
+ lockdep_assert_held_write(&kvm->mmu_lock);
- return ret;
+ for_each_slot_rmap_range(slot, start_level, end_level, start_gfn,
+ end_gfn, &iterator) {
+ if (iterator.rmap)
+ flush |= fn(kvm, iterator.rmap, slot);
+
+ if (!can_yield)
+ continue;
+
+ if (need_resched() || rwlock_needbreak(&kvm->mmu_lock)) {
+ if (flush && flush_on_yield) {
+ kvm_flush_remote_tlbs_range(kvm, start_gfn,
+ iterator.gfn - start_gfn + 1);
+ flush = false;
+ }
+ cond_resched_rwlock_write(&kvm->mmu_lock);
+ }
+ }
+
+ return flush;
+}
+
+static __always_inline bool walk_slot_rmaps(struct kvm *kvm,
+ const struct kvm_memory_slot *slot,
+ slot_rmaps_handler fn,
+ int start_level, int end_level,
+ bool flush_on_yield)
+{
+ return __walk_slot_rmaps(kvm, slot, fn, start_level, end_level,
+ slot->base_gfn, slot->base_gfn + slot->npages - 1,
+ true, flush_on_yield, false);
+}
+
+static __always_inline bool walk_slot_rmaps_4k(struct kvm *kvm,
+ const struct kvm_memory_slot *slot,
+ slot_rmaps_handler fn,
+ bool flush_on_yield)
+{
+ return walk_slot_rmaps(kvm, slot, fn, PG_LEVEL_4K, PG_LEVEL_4K, flush_on_yield);
+}
+
+static bool __kvm_rmap_zap_gfn_range(struct kvm *kvm,
+ const struct kvm_memory_slot *slot,
+ gfn_t start, gfn_t end, bool can_yield,
+ bool flush)
+{
+ return __walk_slot_rmaps(kvm, slot, kvm_zap_rmap,
+ PG_LEVEL_4K, KVM_MAX_HUGEPAGE_LEVEL,
+ start, end - 1, can_yield, true, flush);
}
bool kvm_unmap_gfn_range(struct kvm *kvm, struct kvm_gfn_range *range)
@@ -1558,7 +1557,9 @@ bool kvm_unmap_gfn_range(struct kvm *kvm, struct kvm_gfn_range *range)
bool flush = false;
if (kvm_memslots_have_rmaps(kvm))
- flush = kvm_handle_gfn_range(kvm, range, kvm_zap_rmap);
+ flush = __kvm_rmap_zap_gfn_range(kvm, range->slot,
+ range->start, range->end,
+ range->may_block, flush);
if (tdp_mmu_enabled)
flush = kvm_tdp_mmu_unmap_gfn_range(kvm, range, flush);
@@ -1570,31 +1571,6 @@ bool kvm_unmap_gfn_range(struct kvm *kvm, struct kvm_gfn_range *range)
return flush;
}
-static bool kvm_age_rmap(struct kvm *kvm, struct kvm_rmap_head *rmap_head,
- struct kvm_memory_slot *slot, gfn_t gfn, int level)
-{
- u64 *sptep;
- struct rmap_iterator iter;
- int young = 0;
-
- for_each_rmap_spte(rmap_head, &iter, sptep)
- young |= mmu_spte_age(sptep);
-
- return young;
-}
-
-static bool kvm_test_age_rmap(struct kvm *kvm, struct kvm_rmap_head *rmap_head,
- struct kvm_memory_slot *slot, gfn_t gfn, int level)
-{
- u64 *sptep;
- struct rmap_iterator iter;
-
- for_each_rmap_spte(rmap_head, &iter, sptep)
- if (is_accessed_spte(*sptep))
- return true;
- return false;
-}
-
#define RMAP_RECYCLE_THRESHOLD 1000
static void __rmap_add(struct kvm *kvm,
@@ -1629,12 +1605,52 @@ static void rmap_add(struct kvm_vcpu *vcpu, const struct kvm_memory_slot *slot,
__rmap_add(vcpu->kvm, cache, slot, spte, gfn, access);
}
+static bool kvm_rmap_age_gfn_range(struct kvm *kvm,
+ struct kvm_gfn_range *range, bool test_only)
+{
+ struct slot_rmap_walk_iterator iterator;
+ struct rmap_iterator iter;
+ bool young = false;
+ u64 *sptep;
+
+ for_each_slot_rmap_range(range->slot, PG_LEVEL_4K, KVM_MAX_HUGEPAGE_LEVEL,
+ range->start, range->end - 1, &iterator) {
+ for_each_rmap_spte(iterator.rmap, &iter, sptep) {
+ u64 spte = *sptep;
+
+ if (!is_accessed_spte(spte))
+ continue;
+
+ if (test_only)
+ return true;
+
+ if (spte_ad_enabled(spte)) {
+ clear_bit((ffs(shadow_accessed_mask) - 1),
+ (unsigned long *)sptep);
+ } else {
+ /*
+ * Capture the dirty status of the page, so that
+ * it doesn't get lost when the SPTE is marked
+ * for access tracking.
+ */
+ if (is_writable_pte(spte))
+ kvm_set_pfn_dirty(spte_to_pfn(spte));
+
+ spte = mark_spte_for_access_track(spte);
+ mmu_spte_update_no_track(sptep, spte);
+ }
+ young = true;
+ }
+ }
+ return young;
+}
+
bool kvm_age_gfn(struct kvm *kvm, struct kvm_gfn_range *range)
{
bool young = false;
if (kvm_memslots_have_rmaps(kvm))
- young = kvm_handle_gfn_range(kvm, range, kvm_age_rmap);
+ young = kvm_rmap_age_gfn_range(kvm, range, false);
if (tdp_mmu_enabled)
young |= kvm_tdp_mmu_age_gfn_range(kvm, range);
@@ -1647,7 +1663,7 @@ bool kvm_test_age_gfn(struct kvm *kvm, struct kvm_gfn_range *range)
bool young = false;
if (kvm_memslots_have_rmaps(kvm))
- young = kvm_handle_gfn_range(kvm, range, kvm_test_age_rmap);
+ young = kvm_rmap_age_gfn_range(kvm, range, true);
if (tdp_mmu_enabled)
young |= kvm_tdp_mmu_test_age_gfn(kvm, range);
@@ -2713,36 +2729,49 @@ void kvm_mmu_change_mmu_pages(struct kvm *kvm, unsigned long goal_nr_mmu_pages)
write_unlock(&kvm->mmu_lock);
}
-int kvm_mmu_unprotect_page(struct kvm *kvm, gfn_t gfn)
+bool __kvm_mmu_unprotect_gfn_and_retry(struct kvm_vcpu *vcpu, gpa_t cr2_or_gpa,
+ bool always_retry)
{
- struct kvm_mmu_page *sp;
+ struct kvm *kvm = vcpu->kvm;
LIST_HEAD(invalid_list);
- int r;
+ struct kvm_mmu_page *sp;
+ gpa_t gpa = cr2_or_gpa;
+ bool r = false;
+
+ /*
+ * Bail early if there aren't any write-protected shadow pages to avoid
+ * unnecessarily taking mmu_lock lock, e.g. if the gfn is write-tracked
+ * by a third party. Reading indirect_shadow_pages without holding
+ * mmu_lock is safe, as this is purely an optimization, i.e. a false
+ * positive is benign, and a false negative will simply result in KVM
+ * skipping the unprotect+retry path, which is also an optimization.
+ */
+ if (!READ_ONCE(kvm->arch.indirect_shadow_pages))
+ goto out;
+
+ if (!vcpu->arch.mmu->root_role.direct) {
+ gpa = kvm_mmu_gva_to_gpa_write(vcpu, cr2_or_gpa, NULL);
+ if (gpa == INVALID_GPA)
+ goto out;
+ }
- r = 0;
write_lock(&kvm->mmu_lock);
- for_each_gfn_valid_sp_with_gptes(kvm, sp, gfn) {
- r = 1;
+ for_each_gfn_valid_sp_with_gptes(kvm, sp, gpa_to_gfn(gpa))
kvm_mmu_prepare_zap_page(kvm, sp, &invalid_list);
- }
+
+ /*
+ * Snapshot the result before zapping, as zapping will remove all list
+ * entries, i.e. checking the list later would yield a false negative.
+ */
+ r = !list_empty(&invalid_list);
kvm_mmu_commit_zap_page(kvm, &invalid_list);
write_unlock(&kvm->mmu_lock);
- return r;
-}
-
-static int kvm_mmu_unprotect_page_virt(struct kvm_vcpu *vcpu, gva_t gva)
-{
- gpa_t gpa;
- int r;
-
- if (vcpu->arch.mmu->root_role.direct)
- return 0;
-
- gpa = kvm_mmu_gva_to_gpa_read(vcpu, gva, NULL);
-
- r = kvm_mmu_unprotect_page(vcpu->kvm, gpa >> PAGE_SHIFT);
-
+out:
+ if (r || always_retry) {
+ vcpu->arch.last_retry_eip = kvm_rip_read(vcpu);
+ vcpu->arch.last_retry_addr = cr2_or_gpa;
+ }
return r;
}
@@ -2914,10 +2943,8 @@ static int mmu_set_spte(struct kvm_vcpu *vcpu, struct kvm_memory_slot *slot,
trace_kvm_mmu_set_spte(level, gfn, sptep);
}
- if (wrprot) {
- if (write_fault)
- ret = RET_PF_EMULATE;
- }
+ if (wrprot && write_fault)
+ ret = RET_PF_WRITE_PROTECTED;
if (flush)
kvm_flush_remote_tlbs_gfn(vcpu->kvm, gfn, level);
@@ -4549,7 +4576,7 @@ static int direct_page_fault(struct kvm_vcpu *vcpu, struct kvm_page_fault *fault
return RET_PF_RETRY;
if (page_fault_handle_page_track(vcpu, fault))
- return RET_PF_EMULATE;
+ return RET_PF_WRITE_PROTECTED;
r = fast_page_fault(vcpu, fault);
if (r != RET_PF_INVALID)
@@ -4618,8 +4645,6 @@ int kvm_handle_page_fault(struct kvm_vcpu *vcpu, u64 error_code,
if (!flags) {
trace_kvm_page_fault(vcpu, fault_address, error_code);
- if (kvm_event_needs_reinjection(vcpu))
- kvm_mmu_unprotect_page_virt(vcpu, fault_address);
r = kvm_mmu_page_fault(vcpu, fault_address, error_code, insn,
insn_len);
} else if (flags & KVM_PV_REASON_PAGE_NOT_PRESENT) {
@@ -4642,7 +4667,7 @@ static int kvm_tdp_mmu_page_fault(struct kvm_vcpu *vcpu,
int r;
if (page_fault_handle_page_track(vcpu, fault))
- return RET_PF_EMULATE;
+ return RET_PF_WRITE_PROTECTED;
r = fast_page_fault(vcpu, fault);
if (r != RET_PF_INVALID)
@@ -4719,6 +4744,7 @@ static int kvm_tdp_map_page(struct kvm_vcpu *vcpu, gpa_t gpa, u64 error_code,
switch (r) {
case RET_PF_FIXED:
case RET_PF_SPURIOUS:
+ case RET_PF_WRITE_PROTECTED:
return 0;
case RET_PF_EMULATE:
@@ -5963,6 +5989,106 @@ void kvm_mmu_track_write(struct kvm_vcpu *vcpu, gpa_t gpa, const u8 *new,
write_unlock(&vcpu->kvm->mmu_lock);
}
+static bool is_write_to_guest_page_table(u64 error_code)
+{
+ const u64 mask = PFERR_GUEST_PAGE_MASK | PFERR_WRITE_MASK | PFERR_PRESENT_MASK;
+
+ return (error_code & mask) == mask;
+}
+
+static int kvm_mmu_write_protect_fault(struct kvm_vcpu *vcpu, gpa_t cr2_or_gpa,
+ u64 error_code, int *emulation_type)
+{
+ bool direct = vcpu->arch.mmu->root_role.direct;
+
+ /*
+ * Do not try to unprotect and retry if the vCPU re-faulted on the same
+ * RIP with the same address that was previously unprotected, as doing
+ * so will likely put the vCPU into an infinite. E.g. if the vCPU uses
+ * a non-page-table modifying instruction on the PDE that points to the
+ * instruction, then unprotecting the gfn will unmap the instruction's
+ * code, i.e. make it impossible for the instruction to ever complete.
+ */
+ if (vcpu->arch.last_retry_eip == kvm_rip_read(vcpu) &&
+ vcpu->arch.last_retry_addr == cr2_or_gpa)
+ return RET_PF_EMULATE;
+
+ /*
+ * Reset the unprotect+retry values that guard against infinite loops.
+ * The values will be refreshed if KVM explicitly unprotects a gfn and
+ * retries, in all other cases it's safe to retry in the future even if
+ * the next page fault happens on the same RIP+address.
+ */
+ vcpu->arch.last_retry_eip = 0;
+ vcpu->arch.last_retry_addr = 0;
+
+ /*
+ * It should be impossible to reach this point with an MMIO cache hit,
+ * as RET_PF_WRITE_PROTECTED is returned if and only if there's a valid,
+ * writable memslot, and creating a memslot should invalidate the MMIO
+ * cache by way of changing the memslot generation. WARN and disallow
+ * retry if MMIO is detected, as retrying MMIO emulation is pointless
+ * and could put the vCPU into an infinite loop because the processor
+ * will keep faulting on the non-existent MMIO address.
+ */
+ if (WARN_ON_ONCE(mmio_info_in_cache(vcpu, cr2_or_gpa, direct)))
+ return RET_PF_EMULATE;
+
+ /*
+ * Before emulating the instruction, check to see if the access was due
+ * to a read-only violation while the CPU was walking non-nested NPT
+ * page tables, i.e. for a direct MMU, for _guest_ page tables in L1.
+ * If L1 is sharing (a subset of) its page tables with L2, e.g. by
+ * having nCR3 share lower level page tables with hCR3, then when KVM
+ * (L0) write-protects the nested NPTs, i.e. npt12 entries, KVM is also
+ * unknowingly write-protecting L1's guest page tables, which KVM isn't
+ * shadowing.
+ *
+ * Because the CPU (by default) walks NPT page tables using a write
+ * access (to ensure the CPU can do A/D updates), page walks in L1 can
+ * trigger write faults for the above case even when L1 isn't modifying
+ * PTEs. As a result, KVM will unnecessarily emulate (or at least, try
+ * to emulate) an excessive number of L1 instructions; because L1's MMU
+ * isn't shadowed by KVM, there is no need to write-protect L1's gPTEs
+ * and thus no need to emulate in order to guarantee forward progress.
+ *
+ * Try to unprotect the gfn, i.e. zap any shadow pages, so that L1 can
+ * proceed without triggering emulation. If one or more shadow pages
+ * was zapped, skip emulation and resume L1 to let it natively execute
+ * the instruction. If no shadow pages were zapped, then the write-
+ * fault is due to something else entirely, i.e. KVM needs to emulate,
+ * as resuming the guest will put it into an infinite loop.
+ *
+ * Note, this code also applies to Intel CPUs, even though it is *very*
+ * unlikely that an L1 will share its page tables (IA32/PAE/paging64
+ * format) with L2's page tables (EPT format).
+ *
+ * For indirect MMUs, i.e. if KVM is shadowing the current MMU, try to
+ * unprotect the gfn and retry if an event is awaiting reinjection. If
+ * KVM emulates multiple instructions before completing event injection,
+ * the event could be delayed beyond what is architecturally allowed,
+ * e.g. KVM could inject an IRQ after the TPR has been raised.
+ */
+ if (((direct && is_write_to_guest_page_table(error_code)) ||
+ (!direct && kvm_event_needs_reinjection(vcpu))) &&
+ kvm_mmu_unprotect_gfn_and_retry(vcpu, cr2_or_gpa))
+ return RET_PF_RETRY;
+
+ /*
+ * The gfn is write-protected, but if KVM detects its emulating an
+ * instruction that is unlikely to be used to modify page tables, or if
+ * emulation fails, KVM can try to unprotect the gfn and let the CPU
+ * re-execute the instruction that caused the page fault. Do not allow
+ * retrying an instruction from a nested guest as KVM is only explicitly
+ * shadowing L1's page tables, i.e. unprotecting something for L1 isn't
+ * going to magically fix whatever issue caused L2 to fail.
+ */
+ if (!is_guest_mode(vcpu))
+ *emulation_type |= EMULTYPE_ALLOW_RETRY_PF;
+
+ return RET_PF_EMULATE;
+}
+
int noinline kvm_mmu_page_fault(struct kvm_vcpu *vcpu, gpa_t cr2_or_gpa, u64 error_code,
void *insn, int insn_len)
{
@@ -6008,6 +6134,10 @@ int noinline kvm_mmu_page_fault(struct kvm_vcpu *vcpu, gpa_t cr2_or_gpa, u64 err
if (r < 0)
return r;
+ if (r == RET_PF_WRITE_PROTECTED)
+ r = kvm_mmu_write_protect_fault(vcpu, cr2_or_gpa, error_code,
+ &emulation_type);
+
if (r == RET_PF_FIXED)
vcpu->stat.pf_fixed++;
else if (r == RET_PF_EMULATE)
@@ -6018,32 +6148,6 @@ int noinline kvm_mmu_page_fault(struct kvm_vcpu *vcpu, gpa_t cr2_or_gpa, u64 err
if (r != RET_PF_EMULATE)
return 1;
- /*
- * Before emulating the instruction, check if the error code
- * was due to a RO violation while translating the guest page.
- * This can occur when using nested virtualization with nested
- * paging in both guests. If true, we simply unprotect the page
- * and resume the guest.
- */
- if (vcpu->arch.mmu->root_role.direct &&
- (error_code & PFERR_NESTED_GUEST_PAGE) == PFERR_NESTED_GUEST_PAGE) {
- kvm_mmu_unprotect_page(vcpu->kvm, gpa_to_gfn(cr2_or_gpa));
- return 1;
- }
-
- /*
- * vcpu->arch.mmu.page_fault returned RET_PF_EMULATE, but we can still
- * optimistically try to just unprotect the page and let the processor
- * re-execute the instruction that caused the page fault. Do not allow
- * retrying MMIO emulation, as it's not only pointless but could also
- * cause us to enter an infinite loop because the processor will keep
- * faulting on the non-existent MMIO address. Retrying an instruction
- * from a nested guest is also pointless and dangerous as we are only
- * explicitly shadowing L1's page tables, i.e. unprotecting something
- * for L1 isn't going to magically fix whatever issue cause L2 to fail.
- */
- if (!mmio_info_in_cache(vcpu, cr2_or_gpa, direct) && !is_guest_mode(vcpu))
- emulation_type |= EMULTYPE_ALLOW_RETRY_PF;
emulate:
return x86_emulate_instruction(vcpu, cr2_or_gpa, emulation_type, insn,
insn_len);
@@ -6202,59 +6306,6 @@ void kvm_configure_mmu(bool enable_tdp, int tdp_forced_root_level,
}
EXPORT_SYMBOL_GPL(kvm_configure_mmu);
-/* The return value indicates if tlb flush on all vcpus is needed. */
-typedef bool (*slot_rmaps_handler) (struct kvm *kvm,
- struct kvm_rmap_head *rmap_head,
- const struct kvm_memory_slot *slot);
-
-static __always_inline bool __walk_slot_rmaps(struct kvm *kvm,
- const struct kvm_memory_slot *slot,
- slot_rmaps_handler fn,
- int start_level, int end_level,
- gfn_t start_gfn, gfn_t end_gfn,
- bool flush_on_yield, bool flush)
-{
- struct slot_rmap_walk_iterator iterator;
-
- lockdep_assert_held_write(&kvm->mmu_lock);
-
- for_each_slot_rmap_range(slot, start_level, end_level, start_gfn,
- end_gfn, &iterator) {
- if (iterator.rmap)
- flush |= fn(kvm, iterator.rmap, slot);
-
- if (need_resched() || rwlock_needbreak(&kvm->mmu_lock)) {
- if (flush && flush_on_yield) {
- kvm_flush_remote_tlbs_range(kvm, start_gfn,
- iterator.gfn - start_gfn + 1);
- flush = false;
- }
- cond_resched_rwlock_write(&kvm->mmu_lock);
- }
- }
-
- return flush;
-}
-
-static __always_inline bool walk_slot_rmaps(struct kvm *kvm,
- const struct kvm_memory_slot *slot,
- slot_rmaps_handler fn,
- int start_level, int end_level,
- bool flush_on_yield)
-{
- return __walk_slot_rmaps(kvm, slot, fn, start_level, end_level,
- slot->base_gfn, slot->base_gfn + slot->npages - 1,
- flush_on_yield, false);
-}
-
-static __always_inline bool walk_slot_rmaps_4k(struct kvm *kvm,
- const struct kvm_memory_slot *slot,
- slot_rmaps_handler fn,
- bool flush_on_yield)
-{
- return walk_slot_rmaps(kvm, slot, fn, PG_LEVEL_4K, PG_LEVEL_4K, flush_on_yield);
-}
-
static void free_mmu_pages(struct kvm_mmu *mmu)
{
if (!tdp_enabled && mmu->pae_root)
@@ -6528,9 +6579,8 @@ static bool kvm_rmap_zap_gfn_range(struct kvm *kvm, gfn_t gfn_start, gfn_t gfn_e
if (WARN_ON_ONCE(start >= end))
continue;
- flush = __walk_slot_rmaps(kvm, memslot, __kvm_zap_rmap,
- PG_LEVEL_4K, KVM_MAX_HUGEPAGE_LEVEL,
- start, end - 1, true, flush);
+ flush = __kvm_rmap_zap_gfn_range(kvm, memslot, start,
+ end, true, flush);
}
}
@@ -6818,7 +6868,7 @@ static void kvm_shadow_mmu_try_split_huge_pages(struct kvm *kvm,
*/
for (level = KVM_MAX_HUGEPAGE_LEVEL; level > target_level; level--)
__walk_slot_rmaps(kvm, slot, shadow_mmu_try_split_huge_pages,
- level, level, start, end - 1, true, false);
+ level, level, start, end - 1, true, true, false);
}
/* Must be called with the mmu_lock held in write-mode. */
@@ -6997,10 +7047,42 @@ void kvm_arch_flush_shadow_all(struct kvm *kvm)
kvm_mmu_zap_all(kvm);
}
+/*
+ * Zapping leaf SPTEs with memslot range when a memslot is moved/deleted.
+ *
+ * Zapping non-leaf SPTEs, a.k.a. not-last SPTEs, isn't required, worst
+ * case scenario we'll have unused shadow pages lying around until they
+ * are recycled due to age or when the VM is destroyed.
+ */
+static void kvm_mmu_zap_memslot_leafs(struct kvm *kvm, struct kvm_memory_slot *slot)
+{
+ struct kvm_gfn_range range = {
+ .slot = slot,
+ .start = slot->base_gfn,
+ .end = slot->base_gfn + slot->npages,
+ .may_block = true,
+ };
+
+ write_lock(&kvm->mmu_lock);
+ if (kvm_unmap_gfn_range(kvm, &range))
+ kvm_flush_remote_tlbs_memslot(kvm, slot);
+
+ write_unlock(&kvm->mmu_lock);
+}
+
+static inline bool kvm_memslot_flush_zap_all(struct kvm *kvm)
+{
+ return kvm->arch.vm_type == KVM_X86_DEFAULT_VM &&
+ kvm_check_has_quirk(kvm, KVM_X86_QUIRK_SLOT_ZAP_ALL);
+}
+
void kvm_arch_flush_shadow_memslot(struct kvm *kvm,
struct kvm_memory_slot *slot)
{
- kvm_mmu_zap_all_fast(kvm);
+ if (kvm_memslot_flush_zap_all(kvm))
+ kvm_mmu_zap_all_fast(kvm);
+ else
+ kvm_mmu_zap_memslot_leafs(kvm, slot);
}
void kvm_mmu_invalidate_mmio_sptes(struct kvm *kvm, u64 gen)
diff --git a/arch/x86/kvm/mmu/mmu_internal.h b/arch/x86/kvm/mmu/mmu_internal.h
index 1721d97743e9..c98827840e07 100644
--- a/arch/x86/kvm/mmu/mmu_internal.h
+++ b/arch/x86/kvm/mmu/mmu_internal.h
@@ -258,6 +258,8 @@ int kvm_tdp_page_fault(struct kvm_vcpu *vcpu, struct kvm_page_fault *fault);
* RET_PF_CONTINUE: So far, so good, keep handling the page fault.
* RET_PF_RETRY: let CPU fault again on the address.
* RET_PF_EMULATE: mmio page fault, emulate the instruction directly.
+ * RET_PF_WRITE_PROTECTED: the gfn is write-protected, either unprotected the
+ * gfn and retry, or emulate the instruction directly.
* RET_PF_INVALID: the spte is invalid, let the real page fault path update it.
* RET_PF_FIXED: The faulting entry has been fixed.
* RET_PF_SPURIOUS: The faulting entry was already fixed, e.g. by another vCPU.
@@ -274,6 +276,7 @@ enum {
RET_PF_CONTINUE = 0,
RET_PF_RETRY,
RET_PF_EMULATE,
+ RET_PF_WRITE_PROTECTED,
RET_PF_INVALID,
RET_PF_FIXED,
RET_PF_SPURIOUS,
@@ -349,8 +352,6 @@ int kvm_mmu_max_mapping_level(struct kvm *kvm,
void kvm_mmu_hugepage_adjust(struct kvm_vcpu *vcpu, struct kvm_page_fault *fault);
void disallowed_hugepage_adjust(struct kvm_page_fault *fault, u64 spte, int cur_level);
-void *mmu_memory_cache_alloc(struct kvm_mmu_memory_cache *mc);
-
void track_possible_nx_huge_page(struct kvm *kvm, struct kvm_mmu_page *sp);
void untrack_possible_nx_huge_page(struct kvm *kvm, struct kvm_mmu_page *sp);
diff --git a/arch/x86/kvm/mmu/mmutrace.h b/arch/x86/kvm/mmu/mmutrace.h
index 195d98bc8de8..f35a830ce469 100644
--- a/arch/x86/kvm/mmu/mmutrace.h
+++ b/arch/x86/kvm/mmu/mmutrace.h
@@ -57,6 +57,7 @@
TRACE_DEFINE_ENUM(RET_PF_CONTINUE);
TRACE_DEFINE_ENUM(RET_PF_RETRY);
TRACE_DEFINE_ENUM(RET_PF_EMULATE);
+TRACE_DEFINE_ENUM(RET_PF_WRITE_PROTECTED);
TRACE_DEFINE_ENUM(RET_PF_INVALID);
TRACE_DEFINE_ENUM(RET_PF_FIXED);
TRACE_DEFINE_ENUM(RET_PF_SPURIOUS);
diff --git a/arch/x86/kvm/mmu/paging_tmpl.h b/arch/x86/kvm/mmu/paging_tmpl.h
index 69941cebb3a8..ae7d39ff2d07 100644
--- a/arch/x86/kvm/mmu/paging_tmpl.h
+++ b/arch/x86/kvm/mmu/paging_tmpl.h
@@ -646,10 +646,10 @@ static int FNAME(fetch)(struct kvm_vcpu *vcpu, struct kvm_page_fault *fault,
* really care if it changes underneath us after this point).
*/
if (FNAME(gpte_changed)(vcpu, gw, top_level))
- goto out_gpte_changed;
+ return RET_PF_RETRY;
if (WARN_ON_ONCE(!VALID_PAGE(vcpu->arch.mmu->root.hpa)))
- goto out_gpte_changed;
+ return RET_PF_RETRY;
/*
* Load a new root and retry the faulting instruction in the extremely
@@ -659,7 +659,7 @@ static int FNAME(fetch)(struct kvm_vcpu *vcpu, struct kvm_page_fault *fault,
*/
if (unlikely(kvm_mmu_is_dummy_root(vcpu->arch.mmu->root.hpa))) {
kvm_make_request(KVM_REQ_MMU_FREE_OBSOLETE_ROOTS, vcpu);
- goto out_gpte_changed;
+ return RET_PF_RETRY;
}
for_each_shadow_entry(vcpu, fault->addr, it) {
@@ -674,34 +674,38 @@ static int FNAME(fetch)(struct kvm_vcpu *vcpu, struct kvm_page_fault *fault,
sp = kvm_mmu_get_child_sp(vcpu, it.sptep, table_gfn,
false, access);
- if (sp != ERR_PTR(-EEXIST)) {
- /*
- * We must synchronize the pagetable before linking it
- * because the guest doesn't need to flush tlb when
- * the gpte is changed from non-present to present.
- * Otherwise, the guest may use the wrong mapping.
- *
- * For PG_LEVEL_4K, kvm_mmu_get_page() has already
- * synchronized it transiently via kvm_sync_page().
- *
- * For higher level pagetable, we synchronize it via
- * the slower mmu_sync_children(). If it needs to
- * break, some progress has been made; return
- * RET_PF_RETRY and retry on the next #PF.
- * KVM_REQ_MMU_SYNC is not necessary but it
- * expedites the process.
- */
- if (sp->unsync_children &&
- mmu_sync_children(vcpu, sp, false))
- return RET_PF_RETRY;
- }
+ /*
+ * Synchronize the new page before linking it, as the CPU (KVM)
+ * is architecturally disallowed from inserting non-present
+ * entries into the TLB, i.e. the guest isn't required to flush
+ * the TLB when changing the gPTE from non-present to present.
+ *
+ * For PG_LEVEL_4K, kvm_mmu_find_shadow_page() has already
+ * synchronized the page via kvm_sync_page().
+ *
+ * For higher level pages, which cannot be unsync themselves
+ * but can have unsync children, synchronize via the slower
+ * mmu_sync_children(). If KVM needs to drop mmu_lock due to
+ * contention or to reschedule, instruct the caller to retry
+ * the #PF (mmu_sync_children() ensures forward progress will
+ * be made).
+ */
+ if (sp != ERR_PTR(-EEXIST) && sp->unsync_children &&
+ mmu_sync_children(vcpu, sp, false))
+ return RET_PF_RETRY;
/*
- * Verify that the gpte in the page we've just write
- * protected is still there.
+ * Verify that the gpte in the page, which is now either
+ * write-protected or unsync, wasn't modified between the fault
+ * and acquiring mmu_lock. This needs to be done even when
+ * reusing an existing shadow page to ensure the information
+ * gathered by the walker matches the information stored in the
+ * shadow page (which could have been modified by a different
+ * vCPU even if the page was already linked). Holding mmu_lock
+ * prevents the shadow page from changing after this point.
*/
if (FNAME(gpte_changed)(vcpu, gw, it.level - 1))
- goto out_gpte_changed;
+ return RET_PF_RETRY;
if (sp != ERR_PTR(-EEXIST))
link_shadow_page(vcpu, it.sptep, sp);
@@ -755,9 +759,6 @@ static int FNAME(fetch)(struct kvm_vcpu *vcpu, struct kvm_page_fault *fault,
FNAME(pte_prefetch)(vcpu, gw, it.sptep);
return ret;
-
-out_gpte_changed:
- return RET_PF_RETRY;
}
/*
@@ -805,7 +806,7 @@ static int FNAME(page_fault)(struct kvm_vcpu *vcpu, struct kvm_page_fault *fault
if (page_fault_handle_page_track(vcpu, fault)) {
shadow_page_table_clear_flood(vcpu, fault->addr);
- return RET_PF_EMULATE;
+ return RET_PF_WRITE_PROTECTED;
}
r = mmu_topup_memory_caches(vcpu, true);
diff --git a/arch/x86/kvm/mmu/tdp_mmu.c b/arch/x86/kvm/mmu/tdp_mmu.c
index 3c55955bcaf8..3b996c1fdaab 100644
--- a/arch/x86/kvm/mmu/tdp_mmu.c
+++ b/arch/x86/kvm/mmu/tdp_mmu.c
@@ -1046,10 +1046,8 @@ static int tdp_mmu_map_handle_target_level(struct kvm_vcpu *vcpu,
* protected, emulation is needed. If the emulation was skipped,
* the vCPU would have the same fault again.
*/
- if (wrprot) {
- if (fault->write)
- ret = RET_PF_EMULATE;
- }
+ if (wrprot && fault->write)
+ ret = RET_PF_WRITE_PROTECTED;
/* If a MMIO SPTE is installed, the MMIO will need to be emulated. */
if (unlikely(is_mmio_spte(vcpu->kvm, new_spte))) {
diff --git a/arch/x86/kvm/reverse_cpuid.h b/arch/x86/kvm/reverse_cpuid.h
index 2f4e155080ba..0d17d6b70639 100644
--- a/arch/x86/kvm/reverse_cpuid.h
+++ b/arch/x86/kvm/reverse_cpuid.h
@@ -17,6 +17,7 @@ enum kvm_only_cpuid_leafs {
CPUID_8000_0007_EDX,
CPUID_8000_0022_EAX,
CPUID_7_2_EDX,
+ CPUID_24_0_EBX,
NR_KVM_CPU_CAPS,
NKVMCAPINTS = NR_KVM_CPU_CAPS - NCAPINTS,
@@ -46,6 +47,7 @@ enum kvm_only_cpuid_leafs {
#define X86_FEATURE_AVX_NE_CONVERT KVM_X86_FEATURE(CPUID_7_1_EDX, 5)
#define X86_FEATURE_AMX_COMPLEX KVM_X86_FEATURE(CPUID_7_1_EDX, 8)
#define X86_FEATURE_PREFETCHITI KVM_X86_FEATURE(CPUID_7_1_EDX, 14)
+#define X86_FEATURE_AVX10 KVM_X86_FEATURE(CPUID_7_1_EDX, 19)
/* Intel-defined sub-features, CPUID level 0x00000007:2 (EDX) */
#define X86_FEATURE_INTEL_PSFD KVM_X86_FEATURE(CPUID_7_2_EDX, 0)
@@ -55,6 +57,11 @@ enum kvm_only_cpuid_leafs {
#define KVM_X86_FEATURE_BHI_CTRL KVM_X86_FEATURE(CPUID_7_2_EDX, 4)
#define X86_FEATURE_MCDT_NO KVM_X86_FEATURE(CPUID_7_2_EDX, 5)
+/* Intel-defined sub-features, CPUID level 0x00000024:0 (EBX) */
+#define X86_FEATURE_AVX10_128 KVM_X86_FEATURE(CPUID_24_0_EBX, 16)
+#define X86_FEATURE_AVX10_256 KVM_X86_FEATURE(CPUID_24_0_EBX, 17)
+#define X86_FEATURE_AVX10_512 KVM_X86_FEATURE(CPUID_24_0_EBX, 18)
+
/* CPUID level 0x80000007 (EDX). */
#define KVM_X86_FEATURE_CONSTANT_TSC KVM_X86_FEATURE(CPUID_8000_0007_EDX, 8)
@@ -90,6 +97,7 @@ static const struct cpuid_reg reverse_cpuid[] = {
[CPUID_8000_0021_EAX] = {0x80000021, 0, CPUID_EAX},
[CPUID_8000_0022_EAX] = {0x80000022, 0, CPUID_EAX},
[CPUID_7_2_EDX] = { 7, 2, CPUID_EDX},
+ [CPUID_24_0_EBX] = { 0x24, 0, CPUID_EBX},
};
/*
diff --git a/arch/x86/kvm/smm.c b/arch/x86/kvm/smm.c
index 00e3c27d2a87..85241c0c7f56 100644
--- a/arch/x86/kvm/smm.c
+++ b/arch/x86/kvm/smm.c
@@ -624,17 +624,31 @@ int emulator_leave_smm(struct x86_emulate_ctxt *ctxt)
#endif
/*
- * Give leave_smm() a chance to make ISA-specific changes to the vCPU
- * state (e.g. enter guest mode) before loading state from the SMM
- * state-save area.
+ * FIXME: When resuming L2 (a.k.a. guest mode), the transition to guest
+ * mode should happen _after_ loading state from SMRAM. However, KVM
+ * piggybacks the nested VM-Enter flows (which is wrong for many other
+ * reasons), and so nSVM/nVMX would clobber state that is loaded from
+ * SMRAM and from the VMCS/VMCB.
*/
if (kvm_x86_call(leave_smm)(vcpu, &smram))
return X86EMUL_UNHANDLEABLE;
#ifdef CONFIG_X86_64
if (guest_cpuid_has(vcpu, X86_FEATURE_LM))
- return rsm_load_state_64(ctxt, &smram.smram64);
+ ret = rsm_load_state_64(ctxt, &smram.smram64);
else
#endif
- return rsm_load_state_32(ctxt, &smram.smram32);
+ ret = rsm_load_state_32(ctxt, &smram.smram32);
+
+ /*
+ * If RSM fails and triggers shutdown, architecturally the shutdown
+ * occurs *before* the transition to guest mode. But due to KVM's
+ * flawed handling of RSM to L2 (see above), the vCPU may already be
+ * in_guest_mode(). Force the vCPU out of guest mode before delivering
+ * the shutdown, so that L1 enters shutdown instead of seeing a VM-Exit
+ * that architecturally shouldn't be possible.
+ */
+ if (ret != X86EMUL_CONTINUE && is_guest_mode(vcpu))
+ kvm_leave_nested(vcpu);
+ return ret;
}
diff --git a/arch/x86/kvm/svm/nested.c b/arch/x86/kvm/svm/nested.c
index 6f704c1037e5..d5314cb7dff4 100644
--- a/arch/x86/kvm/svm/nested.c
+++ b/arch/x86/kvm/svm/nested.c
@@ -1693,8 +1693,8 @@ static int svm_set_nested_state(struct kvm_vcpu *vcpu,
return -EINVAL;
ret = -ENOMEM;
- ctl = kzalloc(sizeof(*ctl), GFP_KERNEL_ACCOUNT);
- save = kzalloc(sizeof(*save), GFP_KERNEL_ACCOUNT);
+ ctl = kzalloc(sizeof(*ctl), GFP_KERNEL);
+ save = kzalloc(sizeof(*save), GFP_KERNEL);
if (!ctl || !save)
goto out_free;
diff --git a/arch/x86/kvm/svm/sev.c b/arch/x86/kvm/svm/sev.c
index 714c517dd4b7..0b851ef937f2 100644
--- a/arch/x86/kvm/svm/sev.c
+++ b/arch/x86/kvm/svm/sev.c
@@ -534,10 +534,10 @@ static int __sev_issue_cmd(int fd, int id, void *data, int *error)
int ret;
f = fdget(fd);
- if (!f.file)
+ if (!fd_file(f))
return -EBADF;
- ret = sev_issue_cmd_external_user(f.file, id, data, error);
+ ret = sev_issue_cmd_external_user(fd_file(f), id, data, error);
fdput(f);
return ret;
@@ -2078,15 +2078,15 @@ int sev_vm_move_enc_context_from(struct kvm *kvm, unsigned int source_fd)
bool charged = false;
int ret;
- if (!f.file)
+ if (!fd_file(f))
return -EBADF;
- if (!file_is_kvm(f.file)) {
+ if (!file_is_kvm(fd_file(f))) {
ret = -EBADF;
goto out_fput;
}
- source_kvm = f.file->private_data;
+ source_kvm = fd_file(f)->private_data;
ret = sev_lock_two_vms(kvm, source_kvm);
if (ret)
goto out_fput;
@@ -2803,15 +2803,15 @@ int sev_vm_copy_enc_context_from(struct kvm *kvm, unsigned int source_fd)
struct kvm_sev_info *source_sev, *mirror_sev;
int ret;
- if (!f.file)
+ if (!fd_file(f))
return -EBADF;
- if (!file_is_kvm(f.file)) {
+ if (!file_is_kvm(fd_file(f))) {
ret = -EBADF;
goto e_source_fput;
}
- source_kvm = f.file->private_data;
+ source_kvm = fd_file(f)->private_data;
ret = sev_lock_two_vms(kvm, source_kvm);
if (ret)
goto e_source_fput;
diff --git a/arch/x86/kvm/svm/svm.c b/arch/x86/kvm/svm/svm.c
index 5ab2c92c7331..9df3e1e5ae81 100644
--- a/arch/x86/kvm/svm/svm.c
+++ b/arch/x86/kvm/svm/svm.c
@@ -573,7 +573,7 @@ static void __svm_write_tsc_multiplier(u64 multiplier)
static __always_inline struct sev_es_save_area *sev_es_host_save_area(struct svm_cpu_data *sd)
{
- return page_address(sd->save_area) + 0x400;
+ return &sd->save_area->host_sev_es_save;
}
static inline void kvm_cpu_svm_disable(void)
@@ -592,14 +592,14 @@ static inline void kvm_cpu_svm_disable(void)
}
}
-static void svm_emergency_disable(void)
+static void svm_emergency_disable_virtualization_cpu(void)
{
kvm_rebooting = true;
kvm_cpu_svm_disable();
}
-static void svm_hardware_disable(void)
+static void svm_disable_virtualization_cpu(void)
{
/* Make sure we clean up behind us */
if (tsc_scaling)
@@ -610,7 +610,7 @@ static void svm_hardware_disable(void)
amd_pmu_disable_virt();
}
-static int svm_hardware_enable(void)
+static int svm_enable_virtualization_cpu(void)
{
struct svm_cpu_data *sd;
@@ -696,7 +696,7 @@ static void svm_cpu_uninit(int cpu)
return;
kfree(sd->sev_vmcbs);
- __free_page(sd->save_area);
+ __free_page(__sme_pa_to_page(sd->save_area_pa));
sd->save_area_pa = 0;
sd->save_area = NULL;
}
@@ -704,23 +704,24 @@ static void svm_cpu_uninit(int cpu)
static int svm_cpu_init(int cpu)
{
struct svm_cpu_data *sd = per_cpu_ptr(&svm_data, cpu);
+ struct page *save_area_page;
int ret = -ENOMEM;
memset(sd, 0, sizeof(struct svm_cpu_data));
- sd->save_area = snp_safe_alloc_page_node(cpu_to_node(cpu), GFP_KERNEL);
- if (!sd->save_area)
+ save_area_page = snp_safe_alloc_page_node(cpu_to_node(cpu), GFP_KERNEL);
+ if (!save_area_page)
return ret;
ret = sev_cpu_init(sd);
if (ret)
goto free_save_area;
- sd->save_area_pa = __sme_page_pa(sd->save_area);
+ sd->save_area = page_address(save_area_page);
+ sd->save_area_pa = __sme_page_pa(save_area_page);
return 0;
free_save_area:
- __free_page(sd->save_area);
- sd->save_area = NULL;
+ __free_page(save_area_page);
return ret;
}
@@ -1124,8 +1125,7 @@ static void svm_hardware_unsetup(void)
for_each_possible_cpu(cpu)
svm_cpu_uninit(cpu);
- __free_pages(pfn_to_page(iopm_base >> PAGE_SHIFT),
- get_order(IOPM_SIZE));
+ __free_pages(__sme_pa_to_page(iopm_base), get_order(IOPM_SIZE));
iopm_base = 0;
}
@@ -1301,7 +1301,7 @@ static void init_vmcb(struct kvm_vcpu *vcpu)
if (!kvm_hlt_in_guest(vcpu->kvm))
svm_set_intercept(svm, INTERCEPT_HLT);
- control->iopm_base_pa = __sme_set(iopm_base);
+ control->iopm_base_pa = iopm_base;
control->msrpm_base_pa = __sme_set(__pa(svm->msrpm));
control->int_ctl = V_INTR_MASKING_MASK;
@@ -1503,7 +1503,7 @@ static void svm_vcpu_free(struct kvm_vcpu *vcpu)
sev_free_vcpu(vcpu);
- __free_page(pfn_to_page(__sme_clr(svm->vmcb01.pa) >> PAGE_SHIFT));
+ __free_page(__sme_pa_to_page(svm->vmcb01.pa));
__free_pages(virt_to_page(svm->msrpm), get_order(MSRPM_SIZE));
}
@@ -1533,7 +1533,7 @@ static void svm_prepare_switch_to_guest(struct kvm_vcpu *vcpu)
* TSC_AUX is always virtualized for SEV-ES guests when the feature is
* available. The user return MSR support is not required in this case
* because TSC_AUX is restored on #VMEXIT from the host save area
- * (which has been initialized in svm_hardware_enable()).
+ * (which has been initialized in svm_enable_virtualization_cpu()).
*/
if (likely(tsc_aux_uret_slot >= 0) &&
(!boot_cpu_has(X86_FEATURE_V_TSC_AUX) || !sev_es_guest(vcpu->kvm)))
@@ -2825,17 +2825,17 @@ static int efer_trap(struct kvm_vcpu *vcpu)
return kvm_complete_insn_gp(vcpu, ret);
}
-static int svm_get_msr_feature(struct kvm_msr_entry *msr)
+static int svm_get_feature_msr(u32 msr, u64 *data)
{
- msr->data = 0;
+ *data = 0;
- switch (msr->index) {
+ switch (msr) {
case MSR_AMD64_DE_CFG:
if (cpu_feature_enabled(X86_FEATURE_LFENCE_RDTSC))
- msr->data |= MSR_AMD64_DE_CFG_LFENCE_SERIALIZE;
+ *data |= MSR_AMD64_DE_CFG_LFENCE_SERIALIZE;
break;
default:
- return KVM_MSR_RET_INVALID;
+ return KVM_MSR_RET_UNSUPPORTED;
}
return 0;
@@ -3144,7 +3144,7 @@ static int svm_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr)
* feature is available. The user return MSR support is not
* required in this case because TSC_AUX is restored on #VMEXIT
* from the host save area (which has been initialized in
- * svm_hardware_enable()).
+ * svm_enable_virtualization_cpu()).
*/
if (boot_cpu_has(X86_FEATURE_V_TSC_AUX) && sev_es_guest(vcpu->kvm))
break;
@@ -3191,18 +3191,21 @@ static int svm_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr)
kvm_pr_unimpl_wrmsr(vcpu, ecx, data);
break;
case MSR_AMD64_DE_CFG: {
- struct kvm_msr_entry msr_entry;
+ u64 supported_de_cfg;
- msr_entry.index = msr->index;
- if (svm_get_msr_feature(&msr_entry))
+ if (svm_get_feature_msr(ecx, &supported_de_cfg))
return 1;
- /* Check the supported bits */
- if (data & ~msr_entry.data)
+ if (data & ~supported_de_cfg)
return 1;
- /* Don't allow the guest to change a bit, #GP */
- if (!msr->host_initiated && (data ^ msr_entry.data))
+ /*
+ * Don't let the guest change the host-programmed value. The
+ * MSR is very model specific, i.e. contains multiple bits that
+ * are completely unknown to KVM, and the one bit known to KVM
+ * is simply a reflection of hardware capabilities.
+ */
+ if (!msr->host_initiated && data != svm->msr_decfg)
return 1;
svm->msr_decfg = data;
@@ -4156,12 +4159,21 @@ static int svm_vcpu_pre_run(struct kvm_vcpu *vcpu)
static fastpath_t svm_exit_handlers_fastpath(struct kvm_vcpu *vcpu)
{
+ struct vcpu_svm *svm = to_svm(vcpu);
+
if (is_guest_mode(vcpu))
return EXIT_FASTPATH_NONE;
- if (to_svm(vcpu)->vmcb->control.exit_code == SVM_EXIT_MSR &&
- to_svm(vcpu)->vmcb->control.exit_info_1)
+ switch (svm->vmcb->control.exit_code) {
+ case SVM_EXIT_MSR:
+ if (!svm->vmcb->control.exit_info_1)
+ break;
return handle_fastpath_set_msr_irqoff(vcpu);
+ case SVM_EXIT_HLT:
+ return handle_fastpath_hlt(vcpu);
+ default:
+ break;
+ }
return EXIT_FASTPATH_NONE;
}
@@ -4992,8 +5004,9 @@ static struct kvm_x86_ops svm_x86_ops __initdata = {
.check_processor_compatibility = svm_check_processor_compat,
.hardware_unsetup = svm_hardware_unsetup,
- .hardware_enable = svm_hardware_enable,
- .hardware_disable = svm_hardware_disable,
+ .enable_virtualization_cpu = svm_enable_virtualization_cpu,
+ .disable_virtualization_cpu = svm_disable_virtualization_cpu,
+ .emergency_disable_virtualization_cpu = svm_emergency_disable_virtualization_cpu,
.has_emulated_msr = svm_has_emulated_msr,
.vcpu_create = svm_vcpu_create,
@@ -5011,7 +5024,7 @@ static struct kvm_x86_ops svm_x86_ops __initdata = {
.vcpu_unblocking = avic_vcpu_unblocking,
.update_exception_bitmap = svm_update_exception_bitmap,
- .get_msr_feature = svm_get_msr_feature,
+ .get_feature_msr = svm_get_feature_msr,
.get_msr = svm_get_msr,
.set_msr = svm_set_msr,
.get_segment_base = svm_get_segment_base,
@@ -5062,6 +5075,8 @@ static struct kvm_x86_ops svm_x86_ops __initdata = {
.enable_nmi_window = svm_enable_nmi_window,
.enable_irq_window = svm_enable_irq_window,
.update_cr8_intercept = svm_update_cr8_intercept,
+
+ .x2apic_icr_is_split = true,
.set_virtual_apic_mode = avic_refresh_virtual_apic_mode,
.refresh_apicv_exec_ctrl = avic_refresh_apicv_exec_ctrl,
.apicv_post_state_restore = avic_apicv_post_state_restore,
@@ -5266,7 +5281,7 @@ static __init int svm_hardware_setup(void)
iopm_va = page_address(iopm_pages);
memset(iopm_va, 0xff, PAGE_SIZE * (1 << order));
- iopm_base = page_to_pfn(iopm_pages) << PAGE_SHIFT;
+ iopm_base = __sme_page_pa(iopm_pages);
init_msrpm_offsets();
@@ -5425,8 +5440,6 @@ static struct kvm_x86_init_ops svm_init_ops __initdata = {
static void __svm_exit(void)
{
kvm_x86_vendor_exit();
-
- cpu_emergency_unregister_virt_callback(svm_emergency_disable);
}
static int __init svm_init(void)
@@ -5442,8 +5455,6 @@ static int __init svm_init(void)
if (r)
return r;
- cpu_emergency_register_virt_callback(svm_emergency_disable);
-
/*
* Common KVM initialization _must_ come last, after this, /dev/kvm is
* exposed to userspace!
diff --git a/arch/x86/kvm/svm/svm.h b/arch/x86/kvm/svm/svm.h
index 76107c7d0595..43fa6a16eb19 100644
--- a/arch/x86/kvm/svm/svm.h
+++ b/arch/x86/kvm/svm/svm.h
@@ -25,7 +25,21 @@
#include "cpuid.h"
#include "kvm_cache_regs.h"
-#define __sme_page_pa(x) __sme_set(page_to_pfn(x) << PAGE_SHIFT)
+/*
+ * Helpers to convert to/from physical addresses for pages whose address is
+ * consumed directly by hardware. Even though it's a physical address, SVM
+ * often restricts the address to the natural width, hence 'unsigned long'
+ * instead of 'hpa_t'.
+ */
+static inline unsigned long __sme_page_pa(struct page *page)
+{
+ return __sme_set(page_to_pfn(page) << PAGE_SHIFT);
+}
+
+static inline struct page *__sme_pa_to_page(unsigned long pa)
+{
+ return pfn_to_page(__sme_clr(pa) >> PAGE_SHIFT);
+}
#define IOPM_SIZE PAGE_SIZE * 3
#define MSRPM_SIZE PAGE_SIZE * 2
@@ -321,7 +335,7 @@ struct svm_cpu_data {
u32 next_asid;
u32 min_asid;
- struct page *save_area;
+ struct vmcb *save_area;
unsigned long save_area_pa;
struct vmcb *current_vmcb;
diff --git a/arch/x86/kvm/svm/vmenter.S b/arch/x86/kvm/svm/vmenter.S
index a0c8eb37d3e1..2ed80aea3bb1 100644
--- a/arch/x86/kvm/svm/vmenter.S
+++ b/arch/x86/kvm/svm/vmenter.S
@@ -209,10 +209,8 @@ SYM_FUNC_START(__svm_vcpu_run)
7: vmload %_ASM_AX
8:
-#ifdef CONFIG_MITIGATION_RETPOLINE
/* IMPORTANT: Stuff the RSB immediately after VM-Exit, before RET! */
- FILL_RETURN_BUFFER %_ASM_AX, RSB_CLEAR_LOOPS, X86_FEATURE_RETPOLINE
-#endif
+ FILL_RETURN_BUFFER %_ASM_AX, RSB_CLEAR_LOOPS, X86_FEATURE_RSB_VMEXIT
/* Clobbers RAX, RCX, RDX. */
RESTORE_HOST_SPEC_CTRL
@@ -348,10 +346,8 @@ SYM_FUNC_START(__svm_sev_es_vcpu_run)
2: cli
-#ifdef CONFIG_MITIGATION_RETPOLINE
/* IMPORTANT: Stuff the RSB immediately after VM-Exit, before RET! */
- FILL_RETURN_BUFFER %rax, RSB_CLEAR_LOOPS, X86_FEATURE_RETPOLINE
-#endif
+ FILL_RETURN_BUFFER %rax, RSB_CLEAR_LOOPS, X86_FEATURE_RSB_VMEXIT
/* Clobbers RAX, RCX, RDX, consumes RDI (@svm) and RSI (@spec_ctrl_intercepted). */
RESTORE_HOST_SPEC_CTRL
diff --git a/arch/x86/kvm/vmx/capabilities.h b/arch/x86/kvm/vmx/capabilities.h
index 41a4533f9989..cb6588238f46 100644
--- a/arch/x86/kvm/vmx/capabilities.h
+++ b/arch/x86/kvm/vmx/capabilities.h
@@ -54,9 +54,7 @@ struct nested_vmx_msrs {
};
struct vmcs_config {
- int size;
- u32 basic_cap;
- u32 revision_id;
+ u64 basic;
u32 pin_based_exec_ctrl;
u32 cpu_based_exec_ctrl;
u32 cpu_based_2nd_exec_ctrl;
@@ -76,7 +74,7 @@ extern struct vmx_capability vmx_capability __ro_after_init;
static inline bool cpu_has_vmx_basic_inout(void)
{
- return (((u64)vmcs_config.basic_cap << 32) & VMX_BASIC_INOUT);
+ return vmcs_config.basic & VMX_BASIC_INOUT;
}
static inline bool cpu_has_virtual_nmis(void)
@@ -225,7 +223,7 @@ static inline bool cpu_has_vmx_vmfunc(void)
static inline bool cpu_has_vmx_shadow_vmcs(void)
{
/* check if the cpu supports writing r/o exit information fields */
- if (!(vmcs_config.misc & MSR_IA32_VMX_MISC_VMWRITE_SHADOW_RO_FIELDS))
+ if (!(vmcs_config.misc & VMX_MISC_VMWRITE_SHADOW_RO_FIELDS))
return false;
return vmcs_config.cpu_based_2nd_exec_ctrl &
@@ -367,7 +365,7 @@ static inline bool cpu_has_vmx_invvpid_global(void)
static inline bool cpu_has_vmx_intel_pt(void)
{
- return (vmcs_config.misc & MSR_IA32_VMX_MISC_INTEL_PT) &&
+ return (vmcs_config.misc & VMX_MISC_INTEL_PT) &&
(vmcs_config.cpu_based_2nd_exec_ctrl & SECONDARY_EXEC_PT_USE_GPA) &&
(vmcs_config.vmentry_ctrl & VM_ENTRY_LOAD_IA32_RTIT_CTL);
}
diff --git a/arch/x86/kvm/vmx/main.c b/arch/x86/kvm/vmx/main.c
index 0bf35ebe8a1b..7668e2fb8043 100644
--- a/arch/x86/kvm/vmx/main.c
+++ b/arch/x86/kvm/vmx/main.c
@@ -23,8 +23,10 @@ struct kvm_x86_ops vt_x86_ops __initdata = {
.hardware_unsetup = vmx_hardware_unsetup,
- .hardware_enable = vmx_hardware_enable,
- .hardware_disable = vmx_hardware_disable,
+ .enable_virtualization_cpu = vmx_enable_virtualization_cpu,
+ .disable_virtualization_cpu = vmx_disable_virtualization_cpu,
+ .emergency_disable_virtualization_cpu = vmx_emergency_disable_virtualization_cpu,
+
.has_emulated_msr = vmx_has_emulated_msr,
.vm_size = sizeof(struct kvm_vmx),
@@ -41,7 +43,7 @@ struct kvm_x86_ops vt_x86_ops __initdata = {
.vcpu_put = vmx_vcpu_put,
.update_exception_bitmap = vmx_update_exception_bitmap,
- .get_msr_feature = vmx_get_msr_feature,
+ .get_feature_msr = vmx_get_feature_msr,
.get_msr = vmx_get_msr,
.set_msr = vmx_set_msr,
.get_segment_base = vmx_get_segment_base,
@@ -89,6 +91,8 @@ struct kvm_x86_ops vt_x86_ops __initdata = {
.enable_nmi_window = vmx_enable_nmi_window,
.enable_irq_window = vmx_enable_irq_window,
.update_cr8_intercept = vmx_update_cr8_intercept,
+
+ .x2apic_icr_is_split = false,
.set_virtual_apic_mode = vmx_set_virtual_apic_mode,
.set_apic_access_page_addr = vmx_set_apic_access_page_addr,
.refresh_apicv_exec_ctrl = vmx_refresh_apicv_exec_ctrl,
diff --git a/arch/x86/kvm/vmx/nested.c b/arch/x86/kvm/vmx/nested.c
index 2392a7ef254d..a8e7bc04d9bf 100644
--- a/arch/x86/kvm/vmx/nested.c
+++ b/arch/x86/kvm/vmx/nested.c
@@ -981,7 +981,7 @@ static u32 nested_vmx_load_msr(struct kvm_vcpu *vcpu, u64 gpa, u32 count)
__func__, i, e.index, e.reserved);
goto fail;
}
- if (kvm_set_msr(vcpu, e.index, e.value)) {
+ if (kvm_set_msr_with_filter(vcpu, e.index, e.value)) {
pr_debug_ratelimited(
"%s cannot write MSR (%u, 0x%x, 0x%llx)\n",
__func__, i, e.index, e.value);
@@ -1017,7 +1017,7 @@ static bool nested_vmx_get_vmexit_msr_value(struct kvm_vcpu *vcpu,
}
}
- if (kvm_get_msr(vcpu, msr_index, data)) {
+ if (kvm_get_msr_with_filter(vcpu, msr_index, data)) {
pr_debug_ratelimited("%s cannot read MSR (0x%x)\n", __func__,
msr_index);
return false;
@@ -1112,9 +1112,9 @@ static void prepare_vmx_msr_autostore_list(struct kvm_vcpu *vcpu,
/*
* Emulated VMEntry does not fail here. Instead a less
* accurate value will be returned by
- * nested_vmx_get_vmexit_msr_value() using kvm_get_msr()
- * instead of reading the value from the vmcs02 VMExit
- * MSR-store area.
+ * nested_vmx_get_vmexit_msr_value() by reading KVM's
+ * internal MSR state instead of reading the value from
+ * the vmcs02 VMExit MSR-store area.
*/
pr_warn_ratelimited(
"Not enough msr entries in msr_autostore. Can't add msr %x\n",
@@ -1251,21 +1251,32 @@ static bool is_bitwise_subset(u64 superset, u64 subset, u64 mask)
static int vmx_restore_vmx_basic(struct vcpu_vmx *vmx, u64 data)
{
- const u64 feature_and_reserved =
- /* feature (except bit 48; see below) */
- BIT_ULL(49) | BIT_ULL(54) | BIT_ULL(55) |
- /* reserved */
- BIT_ULL(31) | GENMASK_ULL(47, 45) | GENMASK_ULL(63, 56);
+ const u64 feature_bits = VMX_BASIC_DUAL_MONITOR_TREATMENT |
+ VMX_BASIC_INOUT |
+ VMX_BASIC_TRUE_CTLS;
+
+ const u64 reserved_bits = GENMASK_ULL(63, 56) |
+ GENMASK_ULL(47, 45) |
+ BIT_ULL(31);
+
u64 vmx_basic = vmcs_config.nested.basic;
- if (!is_bitwise_subset(vmx_basic, data, feature_and_reserved))
+ BUILD_BUG_ON(feature_bits & reserved_bits);
+
+ /*
+ * Except for 32BIT_PHYS_ADDR_ONLY, which is an anti-feature bit (has
+ * inverted polarity), the incoming value must not set feature bits or
+ * reserved bits that aren't allowed/supported by KVM. Fields, i.e.
+ * multi-bit values, are explicitly checked below.
+ */
+ if (!is_bitwise_subset(vmx_basic, data, feature_bits | reserved_bits))
return -EINVAL;
/*
* KVM does not emulate a version of VMX that constrains physical
* addresses of VMX structures (e.g. VMCS) to 32-bits.
*/
- if (data & BIT_ULL(48))
+ if (data & VMX_BASIC_32BIT_PHYS_ADDR_ONLY)
return -EINVAL;
if (vmx_basic_vmcs_revision_id(vmx_basic) !=
@@ -1334,16 +1345,29 @@ vmx_restore_control_msr(struct vcpu_vmx *vmx, u32 msr_index, u64 data)
static int vmx_restore_vmx_misc(struct vcpu_vmx *vmx, u64 data)
{
- const u64 feature_and_reserved_bits =
- /* feature */
- BIT_ULL(5) | GENMASK_ULL(8, 6) | BIT_ULL(14) | BIT_ULL(15) |
- BIT_ULL(28) | BIT_ULL(29) | BIT_ULL(30) |
- /* reserved */
- GENMASK_ULL(13, 9) | BIT_ULL(31);
+ const u64 feature_bits = VMX_MISC_SAVE_EFER_LMA |
+ VMX_MISC_ACTIVITY_HLT |
+ VMX_MISC_ACTIVITY_SHUTDOWN |
+ VMX_MISC_ACTIVITY_WAIT_SIPI |
+ VMX_MISC_INTEL_PT |
+ VMX_MISC_RDMSR_IN_SMM |
+ VMX_MISC_VMWRITE_SHADOW_RO_FIELDS |
+ VMX_MISC_VMXOFF_BLOCK_SMI |
+ VMX_MISC_ZERO_LEN_INS;
+
+ const u64 reserved_bits = BIT_ULL(31) | GENMASK_ULL(13, 9);
+
u64 vmx_misc = vmx_control_msr(vmcs_config.nested.misc_low,
vmcs_config.nested.misc_high);
- if (!is_bitwise_subset(vmx_misc, data, feature_and_reserved_bits))
+ BUILD_BUG_ON(feature_bits & reserved_bits);
+
+ /*
+ * The incoming value must not set feature bits or reserved bits that
+ * aren't allowed/supported by KVM. Fields, i.e. multi-bit values, are
+ * explicitly checked below.
+ */
+ if (!is_bitwise_subset(vmx_misc, data, feature_bits | reserved_bits))
return -EINVAL;
if ((vmx->nested.msrs.pinbased_ctls_high &
@@ -2317,10 +2341,12 @@ static void prepare_vmcs02_early(struct vcpu_vmx *vmx, struct loaded_vmcs *vmcs0
/* Posted interrupts setting is only taken from vmcs12. */
vmx->nested.pi_pending = false;
- if (nested_cpu_has_posted_intr(vmcs12))
+ if (nested_cpu_has_posted_intr(vmcs12)) {
vmx->nested.posted_intr_nv = vmcs12->posted_intr_nv;
- else
+ } else {
+ vmx->nested.posted_intr_nv = -1;
exec_control &= ~PIN_BASED_POSTED_INTR;
+ }
pin_controls_set(vmx, exec_control);
/*
@@ -2470,6 +2496,7 @@ static void prepare_vmcs02_rare(struct vcpu_vmx *vmx, struct vmcs12 *vmcs12)
if (!hv_evmcs || !(hv_evmcs->hv_clean_fields &
HV_VMX_ENLIGHTENED_CLEAN_FIELD_GUEST_GRP2)) {
+
vmcs_write16(GUEST_ES_SELECTOR, vmcs12->guest_es_selector);
vmcs_write16(GUEST_CS_SELECTOR, vmcs12->guest_cs_selector);
vmcs_write16(GUEST_SS_SELECTOR, vmcs12->guest_ss_selector);
@@ -2507,7 +2534,7 @@ static void prepare_vmcs02_rare(struct vcpu_vmx *vmx, struct vmcs12 *vmcs12)
vmcs_writel(GUEST_GDTR_BASE, vmcs12->guest_gdtr_base);
vmcs_writel(GUEST_IDTR_BASE, vmcs12->guest_idtr_base);
- vmx->segment_cache.bitmask = 0;
+ vmx_segment_cache_clear(vmx);
}
if (!hv_evmcs || !(hv_evmcs->hv_clean_fields &
@@ -4284,11 +4311,52 @@ static int vmx_check_nested_events(struct kvm_vcpu *vcpu)
}
if (kvm_cpu_has_interrupt(vcpu) && !vmx_interrupt_blocked(vcpu)) {
+ int irq;
+
if (block_nested_events)
return -EBUSY;
if (!nested_exit_on_intr(vcpu))
goto no_vmexit;
- nested_vmx_vmexit(vcpu, EXIT_REASON_EXTERNAL_INTERRUPT, 0, 0);
+
+ if (!nested_exit_intr_ack_set(vcpu)) {
+ nested_vmx_vmexit(vcpu, EXIT_REASON_EXTERNAL_INTERRUPT, 0, 0);
+ return 0;
+ }
+
+ irq = kvm_cpu_get_extint(vcpu);
+ if (irq != -1) {
+ nested_vmx_vmexit(vcpu, EXIT_REASON_EXTERNAL_INTERRUPT,
+ INTR_INFO_VALID_MASK | INTR_TYPE_EXT_INTR | irq, 0);
+ return 0;
+ }
+
+ irq = kvm_apic_has_interrupt(vcpu);
+ if (WARN_ON_ONCE(irq < 0))
+ goto no_vmexit;
+
+ /*
+ * If the IRQ is L2's PI notification vector, process posted
+ * interrupts for L2 instead of injecting VM-Exit, as the
+ * detection/morphing architecturally occurs when the IRQ is
+ * delivered to the CPU. Note, only interrupts that are routed
+ * through the local APIC trigger posted interrupt processing,
+ * and enabling posted interrupts requires ACK-on-exit.
+ */
+ if (irq == vmx->nested.posted_intr_nv) {
+ vmx->nested.pi_pending = true;
+ kvm_apic_clear_irr(vcpu, irq);
+ goto no_vmexit;
+ }
+
+ nested_vmx_vmexit(vcpu, EXIT_REASON_EXTERNAL_INTERRUPT,
+ INTR_INFO_VALID_MASK | INTR_TYPE_EXT_INTR | irq, 0);
+
+ /*
+ * ACK the interrupt _after_ emulating VM-Exit, as the IRQ must
+ * be marked as in-service in vmcs01.GUEST_INTERRUPT_STATUS.SVI
+ * if APICv is active.
+ */
+ kvm_apic_ack_interrupt(vcpu, irq);
return 0;
}
@@ -4806,7 +4874,7 @@ static void nested_vmx_restore_host_state(struct kvm_vcpu *vcpu)
goto vmabort;
}
- if (kvm_set_msr(vcpu, h.index, h.value)) {
+ if (kvm_set_msr_with_filter(vcpu, h.index, h.value)) {
pr_debug_ratelimited(
"%s WRMSR failed (%u, 0x%x, 0x%llx)\n",
__func__, j, h.index, h.value);
@@ -4969,14 +5037,6 @@ void nested_vmx_vmexit(struct kvm_vcpu *vcpu, u32 vm_exit_reason,
vcpu->arch.mp_state = KVM_MP_STATE_RUNNABLE;
if (likely(!vmx->fail)) {
- if ((u16)vm_exit_reason == EXIT_REASON_EXTERNAL_INTERRUPT &&
- nested_exit_intr_ack_set(vcpu)) {
- int irq = kvm_cpu_get_interrupt(vcpu);
- WARN_ON(irq < 0);
- vmcs12->vm_exit_intr_info = irq |
- INTR_INFO_VALID_MASK | INTR_TYPE_EXT_INTR;
- }
-
if (vm_exit_reason != -1)
trace_kvm_nested_vmexit_inject(vmcs12->vm_exit_reason,
vmcs12->exit_qualification,
@@ -7051,7 +7111,7 @@ static void nested_vmx_setup_misc_data(struct vmcs_config *vmcs_conf,
{
msrs->misc_low = (u32)vmcs_conf->misc & VMX_MISC_SAVE_EFER_LMA;
msrs->misc_low |=
- MSR_IA32_VMX_MISC_VMWRITE_SHADOW_RO_FIELDS |
+ VMX_MISC_VMWRITE_SHADOW_RO_FIELDS |
VMX_MISC_EMULATED_PREEMPTION_TIMER_RATE |
VMX_MISC_ACTIVITY_HLT |
VMX_MISC_ACTIVITY_WAIT_SIPI;
@@ -7066,12 +7126,10 @@ static void nested_vmx_setup_basic(struct nested_vmx_msrs *msrs)
* guest, and the VMCS structure we give it - not about the
* VMX support of the underlying hardware.
*/
- msrs->basic =
- VMCS12_REVISION |
- VMX_BASIC_TRUE_CTLS |
- ((u64)VMCS12_SIZE << VMX_BASIC_VMCS_SIZE_SHIFT) |
- (VMX_BASIC_MEM_TYPE_WB << VMX_BASIC_MEM_TYPE_SHIFT);
+ msrs->basic = vmx_basic_encode_vmcs_info(VMCS12_REVISION, VMCS12_SIZE,
+ X86_MEMTYPE_WB);
+ msrs->basic |= VMX_BASIC_TRUE_CTLS;
if (cpu_has_vmx_basic_inout())
msrs->basic |= VMX_BASIC_INOUT;
}
diff --git a/arch/x86/kvm/vmx/nested.h b/arch/x86/kvm/vmx/nested.h
index cce4e2aa30fb..2c296b6abb8c 100644
--- a/arch/x86/kvm/vmx/nested.h
+++ b/arch/x86/kvm/vmx/nested.h
@@ -39,11 +39,17 @@ bool nested_vmx_check_io_bitmaps(struct kvm_vcpu *vcpu, unsigned int port,
static inline struct vmcs12 *get_vmcs12(struct kvm_vcpu *vcpu)
{
+ lockdep_assert_once(lockdep_is_held(&vcpu->mutex) ||
+ !refcount_read(&vcpu->kvm->users_count));
+
return to_vmx(vcpu)->nested.cached_vmcs12;
}
static inline struct vmcs12 *get_shadow_vmcs12(struct kvm_vcpu *vcpu)
{
+ lockdep_assert_once(lockdep_is_held(&vcpu->mutex) ||
+ !refcount_read(&vcpu->kvm->users_count));
+
return to_vmx(vcpu)->nested.cached_shadow_vmcs12;
}
@@ -109,7 +115,7 @@ static inline unsigned nested_cpu_vmx_misc_cr3_count(struct kvm_vcpu *vcpu)
static inline bool nested_cpu_has_vmwrite_any_field(struct kvm_vcpu *vcpu)
{
return to_vmx(vcpu)->nested.msrs.misc_low &
- MSR_IA32_VMX_MISC_VMWRITE_SHADOW_RO_FIELDS;
+ VMX_MISC_VMWRITE_SHADOW_RO_FIELDS;
}
static inline bool nested_cpu_has_zero_length_injection(struct kvm_vcpu *vcpu)
diff --git a/arch/x86/kvm/vmx/sgx.c b/arch/x86/kvm/vmx/sgx.c
index 6fef01e0536e..a3c3d2a51f47 100644
--- a/arch/x86/kvm/vmx/sgx.c
+++ b/arch/x86/kvm/vmx/sgx.c
@@ -274,7 +274,7 @@ static int handle_encls_ecreate(struct kvm_vcpu *vcpu)
* simultaneously set SGX_ATTR_PROVISIONKEY to bypass the check to
* enforce restriction of access to the PROVISIONKEY.
*/
- contents = (struct sgx_secs *)__get_free_page(GFP_KERNEL_ACCOUNT);
+ contents = (struct sgx_secs *)__get_free_page(GFP_KERNEL);
if (!contents)
return -ENOMEM;
diff --git a/arch/x86/kvm/vmx/vmx.c b/arch/x86/kvm/vmx/vmx.c
index 733a0c45d1a6..1a4438358c5e 100644
--- a/arch/x86/kvm/vmx/vmx.c
+++ b/arch/x86/kvm/vmx/vmx.c
@@ -525,10 +525,6 @@ static const struct kvm_vmx_segment_field {
VMX_SEGMENT_FIELD(LDTR),
};
-static inline void vmx_segment_cache_clear(struct vcpu_vmx *vmx)
-{
- vmx->segment_cache.bitmask = 0;
-}
static unsigned long host_idt_base;
@@ -755,7 +751,7 @@ fault:
return -EIO;
}
-static void vmx_emergency_disable(void)
+void vmx_emergency_disable_virtualization_cpu(void)
{
int cpu = raw_smp_processor_id();
struct loaded_vmcs *v;
@@ -1998,15 +1994,15 @@ static inline bool is_vmx_feature_control_msr_valid(struct vcpu_vmx *vmx,
return !(msr->data & ~valid_bits);
}
-int vmx_get_msr_feature(struct kvm_msr_entry *msr)
+int vmx_get_feature_msr(u32 msr, u64 *data)
{
- switch (msr->index) {
+ switch (msr) {
case KVM_FIRST_EMULATED_VMX_MSR ... KVM_LAST_EMULATED_VMX_MSR:
if (!nested)
return 1;
- return vmx_get_vmx_msr(&vmcs_config.nested, msr->index, &msr->data);
+ return vmx_get_vmx_msr(&vmcs_config.nested, msr, data);
default:
- return KVM_MSR_RET_INVALID;
+ return KVM_MSR_RET_UNSUPPORTED;
}
}
@@ -2605,13 +2601,13 @@ static u64 adjust_vmx_controls64(u64 ctl_opt, u32 msr)
static int setup_vmcs_config(struct vmcs_config *vmcs_conf,
struct vmx_capability *vmx_cap)
{
- u32 vmx_msr_low, vmx_msr_high;
u32 _pin_based_exec_control = 0;
u32 _cpu_based_exec_control = 0;
u32 _cpu_based_2nd_exec_control = 0;
u64 _cpu_based_3rd_exec_control = 0;
u32 _vmexit_control = 0;
u32 _vmentry_control = 0;
+ u64 basic_msr;
u64 misc_msr;
int i;
@@ -2734,29 +2730,29 @@ static int setup_vmcs_config(struct vmcs_config *vmcs_conf,
_vmexit_control &= ~x_ctrl;
}
- rdmsr(MSR_IA32_VMX_BASIC, vmx_msr_low, vmx_msr_high);
+ rdmsrl(MSR_IA32_VMX_BASIC, basic_msr);
/* IA-32 SDM Vol 3B: VMCS size is never greater than 4kB. */
- if ((vmx_msr_high & 0x1fff) > PAGE_SIZE)
+ if (vmx_basic_vmcs_size(basic_msr) > PAGE_SIZE)
return -EIO;
#ifdef CONFIG_X86_64
- /* IA-32 SDM Vol 3B: 64-bit CPUs always have VMX_BASIC_MSR[48]==0. */
- if (vmx_msr_high & (1u<<16))
+ /*
+ * KVM expects to be able to shove all legal physical addresses into
+ * VMCS fields for 64-bit kernels, and per the SDM, "This bit is always
+ * 0 for processors that support Intel 64 architecture".
+ */
+ if (basic_msr & VMX_BASIC_32BIT_PHYS_ADDR_ONLY)
return -EIO;
#endif
/* Require Write-Back (WB) memory type for VMCS accesses. */
- if (((vmx_msr_high >> 18) & 15) != 6)
+ if (vmx_basic_vmcs_mem_type(basic_msr) != X86_MEMTYPE_WB)
return -EIO;
rdmsrl(MSR_IA32_VMX_MISC, misc_msr);
- vmcs_conf->size = vmx_msr_high & 0x1fff;
- vmcs_conf->basic_cap = vmx_msr_high & ~0x1fff;
-
- vmcs_conf->revision_id = vmx_msr_low;
-
+ vmcs_conf->basic = basic_msr;
vmcs_conf->pin_based_exec_ctrl = _pin_based_exec_control;
vmcs_conf->cpu_based_exec_ctrl = _cpu_based_exec_control;
vmcs_conf->cpu_based_2nd_exec_ctrl = _cpu_based_2nd_exec_control;
@@ -2844,7 +2840,7 @@ fault:
return -EFAULT;
}
-int vmx_hardware_enable(void)
+int vmx_enable_virtualization_cpu(void)
{
int cpu = raw_smp_processor_id();
u64 phys_addr = __pa(per_cpu(vmxarea, cpu));
@@ -2881,7 +2877,7 @@ static void vmclear_local_loaded_vmcss(void)
__loaded_vmcs_clear(v);
}
-void vmx_hardware_disable(void)
+void vmx_disable_virtualization_cpu(void)
{
vmclear_local_loaded_vmcss();
@@ -2903,13 +2899,13 @@ struct vmcs *alloc_vmcs_cpu(bool shadow, int cpu, gfp_t flags)
if (!pages)
return NULL;
vmcs = page_address(pages);
- memset(vmcs, 0, vmcs_config.size);
+ memset(vmcs, 0, vmx_basic_vmcs_size(vmcs_config.basic));
/* KVM supports Enlightened VMCS v1 only */
if (kvm_is_using_evmcs())
vmcs->hdr.revision_id = KVM_EVMCS_VERSION;
else
- vmcs->hdr.revision_id = vmcs_config.revision_id;
+ vmcs->hdr.revision_id = vmx_basic_vmcs_revision_id(vmcs_config.basic);
if (shadow)
vmcs->hdr.shadow_vmcs = 1;
@@ -3002,7 +2998,7 @@ static __init int alloc_kvm_area(void)
* physical CPU.
*/
if (kvm_is_using_evmcs())
- vmcs->hdr.revision_id = vmcs_config.revision_id;
+ vmcs->hdr.revision_id = vmx_basic_vmcs_revision_id(vmcs_config.basic);
per_cpu(vmxarea, cpu) = vmcs;
}
@@ -4219,6 +4215,13 @@ static int vmx_deliver_nested_posted_interrupt(struct kvm_vcpu *vcpu,
{
struct vcpu_vmx *vmx = to_vmx(vcpu);
+ /*
+ * DO NOT query the vCPU's vmcs12, as vmcs12 is dynamically allocated
+ * and freed, and must not be accessed outside of vcpu->mutex. The
+ * vCPU's cached PI NV is valid if and only if posted interrupts
+ * enabled in its vmcs12, i.e. checking the vector also checks that
+ * L1 has enabled posted interrupts for L2.
+ */
if (is_guest_mode(vcpu) &&
vector == vmx->nested.posted_intr_nv) {
/*
@@ -5804,8 +5807,9 @@ static int handle_ept_violation(struct kvm_vcpu *vcpu)
error_code |= (exit_qualification & EPT_VIOLATION_RWX_MASK)
? PFERR_PRESENT_MASK : 0;
- error_code |= (exit_qualification & EPT_VIOLATION_GVA_TRANSLATED) != 0 ?
- PFERR_GUEST_FINAL_MASK : PFERR_GUEST_PAGE_MASK;
+ if (error_code & EPT_VIOLATION_GVA_IS_VALID)
+ error_code |= (exit_qualification & EPT_VIOLATION_GVA_TRANSLATED) ?
+ PFERR_GUEST_FINAL_MASK : PFERR_GUEST_PAGE_MASK;
/*
* Check that the GPA doesn't exceed physical memory limits, as that is
@@ -7265,6 +7269,8 @@ static fastpath_t vmx_exit_handlers_fastpath(struct kvm_vcpu *vcpu,
return handle_fastpath_set_msr_irqoff(vcpu);
case EXIT_REASON_PREEMPTION_TIMER:
return handle_fastpath_preemption_timer(vcpu, force_immediate_exit);
+ case EXIT_REASON_HLT:
+ return handle_fastpath_hlt(vcpu);
default:
return EXIT_FASTPATH_NONE;
}
@@ -7965,6 +7971,7 @@ static __init void vmx_set_cpu_caps(void)
kvm_cpu_cap_clear(X86_FEATURE_SGX_LC);
kvm_cpu_cap_clear(X86_FEATURE_SGX1);
kvm_cpu_cap_clear(X86_FEATURE_SGX2);
+ kvm_cpu_cap_clear(X86_FEATURE_SGX_EDECCSSA);
}
if (vmx_umip_emulated())
@@ -8515,7 +8522,7 @@ __init int vmx_hardware_setup(void)
u64 use_timer_freq = 5000ULL * 1000 * 1000;
cpu_preemption_timer_multi =
- vmcs_config.misc & VMX_MISC_PREEMPTION_TIMER_RATE_MASK;
+ vmx_misc_preemption_timer_rate(vmcs_config.misc);
if (tsc_khz)
use_timer_freq = (u64)tsc_khz * 1000;
@@ -8582,8 +8589,6 @@ static void __vmx_exit(void)
{
allow_smaller_maxphyaddr = false;
- cpu_emergency_unregister_virt_callback(vmx_emergency_disable);
-
vmx_cleanup_l1d_flush();
}
@@ -8630,8 +8635,6 @@ static int __init vmx_init(void)
pi_init_cpu(cpu);
}
- cpu_emergency_register_virt_callback(vmx_emergency_disable);
-
vmx_check_vmcs12_offsets();
/*
diff --git a/arch/x86/kvm/vmx/vmx.h b/arch/x86/kvm/vmx/vmx.h
index 42498fa63abb..2325f773a20b 100644
--- a/arch/x86/kvm/vmx/vmx.h
+++ b/arch/x86/kvm/vmx/vmx.h
@@ -17,10 +17,6 @@
#include "run_flags.h"
#include "../mmu.h"
-#define MSR_TYPE_R 1
-#define MSR_TYPE_W 2
-#define MSR_TYPE_RW 3
-
#define X2APIC_MSR(r) (APIC_BASE_MSR + ((r) >> 4))
#ifdef CONFIG_X86_64
@@ -756,4 +752,9 @@ static inline bool vmx_can_use_ipiv(struct kvm_vcpu *vcpu)
return lapic_in_kernel(vcpu) && enable_ipiv;
}
+static inline void vmx_segment_cache_clear(struct vcpu_vmx *vmx)
+{
+ vmx->segment_cache.bitmask = 0;
+}
+
#endif /* __KVM_X86_VMX_H */
diff --git a/arch/x86/kvm/vmx/vmx_onhyperv.h b/arch/x86/kvm/vmx/vmx_onhyperv.h
index eb48153bfd73..bba24ed99ee6 100644
--- a/arch/x86/kvm/vmx/vmx_onhyperv.h
+++ b/arch/x86/kvm/vmx/vmx_onhyperv.h
@@ -104,6 +104,14 @@ static inline void evmcs_load(u64 phys_addr)
struct hv_vp_assist_page *vp_ap =
hv_get_vp_assist_page(smp_processor_id());
+ /*
+ * When enabling eVMCS, KVM verifies that every CPU has a valid hv_vp_assist_page()
+ * and aborts enabling the feature otherwise. CPU onlining path is also checked in
+ * vmx_hardware_enable().
+ */
+ if (KVM_BUG_ON(!vp_ap, kvm_get_running_vcpu()->kvm))
+ return;
+
if (current_evmcs->hv_enlightenments_control.nested_flush_hypercall)
vp_ap->nested_control.features.directhypercall = 1;
vp_ap->current_nested_vmcs = phys_addr;
diff --git a/arch/x86/kvm/vmx/vmx_ops.h b/arch/x86/kvm/vmx/vmx_ops.h
index 8060e5fc6dbd..93e020dc88f6 100644
--- a/arch/x86/kvm/vmx/vmx_ops.h
+++ b/arch/x86/kvm/vmx/vmx_ops.h
@@ -47,7 +47,7 @@ static __always_inline void vmcs_check16(unsigned long field)
BUILD_BUG_ON_MSG(__builtin_constant_p(field) && ((field) & 0x6001) == 0x2001,
"16-bit accessor invalid for 64-bit high field");
BUILD_BUG_ON_MSG(__builtin_constant_p(field) && ((field) & 0x6000) == 0x4000,
- "16-bit accessor invalid for 32-bit high field");
+ "16-bit accessor invalid for 32-bit field");
BUILD_BUG_ON_MSG(__builtin_constant_p(field) && ((field) & 0x6000) == 0x6000,
"16-bit accessor invalid for natural width field");
}
diff --git a/arch/x86/kvm/vmx/x86_ops.h b/arch/x86/kvm/vmx/x86_ops.h
index ce3221cd1d01..a55981c5216e 100644
--- a/arch/x86/kvm/vmx/x86_ops.h
+++ b/arch/x86/kvm/vmx/x86_ops.h
@@ -13,8 +13,9 @@ extern struct kvm_x86_init_ops vt_init_ops __initdata;
void vmx_hardware_unsetup(void);
int vmx_check_processor_compat(void);
-int vmx_hardware_enable(void);
-void vmx_hardware_disable(void);
+int vmx_enable_virtualization_cpu(void);
+void vmx_disable_virtualization_cpu(void);
+void vmx_emergency_disable_virtualization_cpu(void);
int vmx_vm_init(struct kvm *kvm);
void vmx_vm_destroy(struct kvm *kvm);
int vmx_vcpu_precreate(struct kvm *kvm);
@@ -56,7 +57,7 @@ bool vmx_has_emulated_msr(struct kvm *kvm, u32 index);
void vmx_msr_filter_changed(struct kvm_vcpu *vcpu);
void vmx_prepare_switch_to_guest(struct kvm_vcpu *vcpu);
void vmx_update_exception_bitmap(struct kvm_vcpu *vcpu);
-int vmx_get_msr_feature(struct kvm_msr_entry *msr);
+int vmx_get_feature_msr(u32 msr, u64 *data);
int vmx_get_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info);
u64 vmx_get_segment_base(struct kvm_vcpu *vcpu, int seg);
void vmx_get_segment(struct kvm_vcpu *vcpu, struct kvm_segment *var, int seg);
diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
index c983c8e434b8..83fe0a78146f 100644
--- a/arch/x86/kvm/x86.c
+++ b/arch/x86/kvm/x86.c
@@ -305,24 +305,237 @@ const struct kvm_stats_header kvm_vcpu_stats_header = {
static struct kmem_cache *x86_emulator_cache;
/*
- * When called, it means the previous get/set msr reached an invalid msr.
- * Return true if we want to ignore/silent this failed msr access.
+ * The three MSR lists(msrs_to_save, emulated_msrs, msr_based_features) track
+ * the set of MSRs that KVM exposes to userspace through KVM_GET_MSRS,
+ * KVM_SET_MSRS, and KVM_GET_MSR_INDEX_LIST. msrs_to_save holds MSRs that
+ * require host support, i.e. should be probed via RDMSR. emulated_msrs holds
+ * MSRs that KVM emulates without strictly requiring host support.
+ * msr_based_features holds MSRs that enumerate features, i.e. are effectively
+ * CPUID leafs. Note, msr_based_features isn't mutually exclusive with
+ * msrs_to_save and emulated_msrs.
*/
-static bool kvm_msr_ignored_check(u32 msr, u64 data, bool write)
+
+static const u32 msrs_to_save_base[] = {
+ MSR_IA32_SYSENTER_CS, MSR_IA32_SYSENTER_ESP, MSR_IA32_SYSENTER_EIP,
+ MSR_STAR,
+#ifdef CONFIG_X86_64
+ MSR_CSTAR, MSR_KERNEL_GS_BASE, MSR_SYSCALL_MASK, MSR_LSTAR,
+#endif
+ MSR_IA32_TSC, MSR_IA32_CR_PAT, MSR_VM_HSAVE_PA,
+ MSR_IA32_FEAT_CTL, MSR_IA32_BNDCFGS, MSR_TSC_AUX,
+ MSR_IA32_SPEC_CTRL, MSR_IA32_TSX_CTRL,
+ MSR_IA32_RTIT_CTL, MSR_IA32_RTIT_STATUS, MSR_IA32_RTIT_CR3_MATCH,
+ MSR_IA32_RTIT_OUTPUT_BASE, MSR_IA32_RTIT_OUTPUT_MASK,
+ MSR_IA32_RTIT_ADDR0_A, MSR_IA32_RTIT_ADDR0_B,
+ MSR_IA32_RTIT_ADDR1_A, MSR_IA32_RTIT_ADDR1_B,
+ MSR_IA32_RTIT_ADDR2_A, MSR_IA32_RTIT_ADDR2_B,
+ MSR_IA32_RTIT_ADDR3_A, MSR_IA32_RTIT_ADDR3_B,
+ MSR_IA32_UMWAIT_CONTROL,
+
+ MSR_IA32_XFD, MSR_IA32_XFD_ERR,
+};
+
+static const u32 msrs_to_save_pmu[] = {
+ MSR_ARCH_PERFMON_FIXED_CTR0, MSR_ARCH_PERFMON_FIXED_CTR1,
+ MSR_ARCH_PERFMON_FIXED_CTR0 + 2,
+ MSR_CORE_PERF_FIXED_CTR_CTRL, MSR_CORE_PERF_GLOBAL_STATUS,
+ MSR_CORE_PERF_GLOBAL_CTRL,
+ MSR_IA32_PEBS_ENABLE, MSR_IA32_DS_AREA, MSR_PEBS_DATA_CFG,
+
+ /* This part of MSRs should match KVM_MAX_NR_INTEL_GP_COUNTERS. */
+ MSR_ARCH_PERFMON_PERFCTR0, MSR_ARCH_PERFMON_PERFCTR1,
+ MSR_ARCH_PERFMON_PERFCTR0 + 2, MSR_ARCH_PERFMON_PERFCTR0 + 3,
+ MSR_ARCH_PERFMON_PERFCTR0 + 4, MSR_ARCH_PERFMON_PERFCTR0 + 5,
+ MSR_ARCH_PERFMON_PERFCTR0 + 6, MSR_ARCH_PERFMON_PERFCTR0 + 7,
+ MSR_ARCH_PERFMON_EVENTSEL0, MSR_ARCH_PERFMON_EVENTSEL1,
+ MSR_ARCH_PERFMON_EVENTSEL0 + 2, MSR_ARCH_PERFMON_EVENTSEL0 + 3,
+ MSR_ARCH_PERFMON_EVENTSEL0 + 4, MSR_ARCH_PERFMON_EVENTSEL0 + 5,
+ MSR_ARCH_PERFMON_EVENTSEL0 + 6, MSR_ARCH_PERFMON_EVENTSEL0 + 7,
+
+ MSR_K7_EVNTSEL0, MSR_K7_EVNTSEL1, MSR_K7_EVNTSEL2, MSR_K7_EVNTSEL3,
+ MSR_K7_PERFCTR0, MSR_K7_PERFCTR1, MSR_K7_PERFCTR2, MSR_K7_PERFCTR3,
+
+ /* This part of MSRs should match KVM_MAX_NR_AMD_GP_COUNTERS. */
+ MSR_F15H_PERF_CTL0, MSR_F15H_PERF_CTL1, MSR_F15H_PERF_CTL2,
+ MSR_F15H_PERF_CTL3, MSR_F15H_PERF_CTL4, MSR_F15H_PERF_CTL5,
+ MSR_F15H_PERF_CTR0, MSR_F15H_PERF_CTR1, MSR_F15H_PERF_CTR2,
+ MSR_F15H_PERF_CTR3, MSR_F15H_PERF_CTR4, MSR_F15H_PERF_CTR5,
+
+ MSR_AMD64_PERF_CNTR_GLOBAL_CTL,
+ MSR_AMD64_PERF_CNTR_GLOBAL_STATUS,
+ MSR_AMD64_PERF_CNTR_GLOBAL_STATUS_CLR,
+};
+
+static u32 msrs_to_save[ARRAY_SIZE(msrs_to_save_base) +
+ ARRAY_SIZE(msrs_to_save_pmu)];
+static unsigned num_msrs_to_save;
+
+static const u32 emulated_msrs_all[] = {
+ MSR_KVM_SYSTEM_TIME, MSR_KVM_WALL_CLOCK,
+ MSR_KVM_SYSTEM_TIME_NEW, MSR_KVM_WALL_CLOCK_NEW,
+
+#ifdef CONFIG_KVM_HYPERV
+ HV_X64_MSR_GUEST_OS_ID, HV_X64_MSR_HYPERCALL,
+ HV_X64_MSR_TIME_REF_COUNT, HV_X64_MSR_REFERENCE_TSC,
+ HV_X64_MSR_TSC_FREQUENCY, HV_X64_MSR_APIC_FREQUENCY,
+ HV_X64_MSR_CRASH_P0, HV_X64_MSR_CRASH_P1, HV_X64_MSR_CRASH_P2,
+ HV_X64_MSR_CRASH_P3, HV_X64_MSR_CRASH_P4, HV_X64_MSR_CRASH_CTL,
+ HV_X64_MSR_RESET,
+ HV_X64_MSR_VP_INDEX,
+ HV_X64_MSR_VP_RUNTIME,
+ HV_X64_MSR_SCONTROL,
+ HV_X64_MSR_STIMER0_CONFIG,
+ HV_X64_MSR_VP_ASSIST_PAGE,
+ HV_X64_MSR_REENLIGHTENMENT_CONTROL, HV_X64_MSR_TSC_EMULATION_CONTROL,
+ HV_X64_MSR_TSC_EMULATION_STATUS, HV_X64_MSR_TSC_INVARIANT_CONTROL,
+ HV_X64_MSR_SYNDBG_OPTIONS,
+ HV_X64_MSR_SYNDBG_CONTROL, HV_X64_MSR_SYNDBG_STATUS,
+ HV_X64_MSR_SYNDBG_SEND_BUFFER, HV_X64_MSR_SYNDBG_RECV_BUFFER,
+ HV_X64_MSR_SYNDBG_PENDING_BUFFER,
+#endif
+
+ MSR_KVM_ASYNC_PF_EN, MSR_KVM_STEAL_TIME,
+ MSR_KVM_PV_EOI_EN, MSR_KVM_ASYNC_PF_INT, MSR_KVM_ASYNC_PF_ACK,
+
+ MSR_IA32_TSC_ADJUST,
+ MSR_IA32_TSC_DEADLINE,
+ MSR_IA32_ARCH_CAPABILITIES,
+ MSR_IA32_PERF_CAPABILITIES,
+ MSR_IA32_MISC_ENABLE,
+ MSR_IA32_MCG_STATUS,
+ MSR_IA32_MCG_CTL,
+ MSR_IA32_MCG_EXT_CTL,
+ MSR_IA32_SMBASE,
+ MSR_SMI_COUNT,
+ MSR_PLATFORM_INFO,
+ MSR_MISC_FEATURES_ENABLES,
+ MSR_AMD64_VIRT_SPEC_CTRL,
+ MSR_AMD64_TSC_RATIO,
+ MSR_IA32_POWER_CTL,
+ MSR_IA32_UCODE_REV,
+
+ /*
+ * KVM always supports the "true" VMX control MSRs, even if the host
+ * does not. The VMX MSRs as a whole are considered "emulated" as KVM
+ * doesn't strictly require them to exist in the host (ignoring that
+ * KVM would refuse to load in the first place if the core set of MSRs
+ * aren't supported).
+ */
+ MSR_IA32_VMX_BASIC,
+ MSR_IA32_VMX_TRUE_PINBASED_CTLS,
+ MSR_IA32_VMX_TRUE_PROCBASED_CTLS,
+ MSR_IA32_VMX_TRUE_EXIT_CTLS,
+ MSR_IA32_VMX_TRUE_ENTRY_CTLS,
+ MSR_IA32_VMX_MISC,
+ MSR_IA32_VMX_CR0_FIXED0,
+ MSR_IA32_VMX_CR4_FIXED0,
+ MSR_IA32_VMX_VMCS_ENUM,
+ MSR_IA32_VMX_PROCBASED_CTLS2,
+ MSR_IA32_VMX_EPT_VPID_CAP,
+ MSR_IA32_VMX_VMFUNC,
+
+ MSR_K7_HWCR,
+ MSR_KVM_POLL_CONTROL,
+};
+
+static u32 emulated_msrs[ARRAY_SIZE(emulated_msrs_all)];
+static unsigned num_emulated_msrs;
+
+/*
+ * List of MSRs that control the existence of MSR-based features, i.e. MSRs
+ * that are effectively CPUID leafs. VMX MSRs are also included in the set of
+ * feature MSRs, but are handled separately to allow expedited lookups.
+ */
+static const u32 msr_based_features_all_except_vmx[] = {
+ MSR_AMD64_DE_CFG,
+ MSR_IA32_UCODE_REV,
+ MSR_IA32_ARCH_CAPABILITIES,
+ MSR_IA32_PERF_CAPABILITIES,
+};
+
+static u32 msr_based_features[ARRAY_SIZE(msr_based_features_all_except_vmx) +
+ (KVM_LAST_EMULATED_VMX_MSR - KVM_FIRST_EMULATED_VMX_MSR + 1)];
+static unsigned int num_msr_based_features;
+
+/*
+ * All feature MSRs except uCode revID, which tracks the currently loaded uCode
+ * patch, are immutable once the vCPU model is defined.
+ */
+static bool kvm_is_immutable_feature_msr(u32 msr)
{
- const char *op = write ? "wrmsr" : "rdmsr";
+ int i;
- if (ignore_msrs) {
- if (report_ignored_msrs)
- kvm_pr_unimpl("ignored %s: 0x%x data 0x%llx\n",
- op, msr, data);
- /* Mask the error */
+ if (msr >= KVM_FIRST_EMULATED_VMX_MSR && msr <= KVM_LAST_EMULATED_VMX_MSR)
return true;
- } else {
+
+ for (i = 0; i < ARRAY_SIZE(msr_based_features_all_except_vmx); i++) {
+ if (msr == msr_based_features_all_except_vmx[i])
+ return msr != MSR_IA32_UCODE_REV;
+ }
+
+ return false;
+}
+
+static bool kvm_is_advertised_msr(u32 msr_index)
+{
+ unsigned int i;
+
+ for (i = 0; i < num_msrs_to_save; i++) {
+ if (msrs_to_save[i] == msr_index)
+ return true;
+ }
+
+ for (i = 0; i < num_emulated_msrs; i++) {
+ if (emulated_msrs[i] == msr_index)
+ return true;
+ }
+
+ return false;
+}
+
+typedef int (*msr_access_t)(struct kvm_vcpu *vcpu, u32 index, u64 *data,
+ bool host_initiated);
+
+static __always_inline int kvm_do_msr_access(struct kvm_vcpu *vcpu, u32 msr,
+ u64 *data, bool host_initiated,
+ enum kvm_msr_access rw,
+ msr_access_t msr_access_fn)
+{
+ const char *op = rw == MSR_TYPE_W ? "wrmsr" : "rdmsr";
+ int ret;
+
+ BUILD_BUG_ON(rw != MSR_TYPE_R && rw != MSR_TYPE_W);
+
+ /*
+ * Zero the data on read failures to avoid leaking stack data to the
+ * guest and/or userspace, e.g. if the failure is ignored below.
+ */
+ ret = msr_access_fn(vcpu, msr, data, host_initiated);
+ if (ret && rw == MSR_TYPE_R)
+ *data = 0;
+
+ if (ret != KVM_MSR_RET_UNSUPPORTED)
+ return ret;
+
+ /*
+ * Userspace is allowed to read MSRs, and write '0' to MSRs, that KVM
+ * advertises to userspace, even if an MSR isn't fully supported.
+ * Simply check that @data is '0', which covers both the write '0' case
+ * and all reads (in which case @data is zeroed on failure; see above).
+ */
+ if (host_initiated && !*data && kvm_is_advertised_msr(msr))
+ return 0;
+
+ if (!ignore_msrs) {
kvm_debug_ratelimited("unhandled %s: 0x%x data 0x%llx\n",
- op, msr, data);
- return false;
+ op, msr, *data);
+ return ret;
}
+
+ if (report_ignored_msrs)
+ kvm_pr_unimpl("ignored %s: 0x%x data 0x%llx\n", op, msr, *data);
+
+ return 0;
}
static struct kmem_cache *kvm_alloc_emulator_cache(void)
@@ -355,7 +568,7 @@ static void kvm_on_user_return(struct user_return_notifier *urn)
/*
* Disabling irqs at this point since the following code could be
- * interrupted and executed through kvm_arch_hardware_disable()
+ * interrupted and executed through kvm_arch_disable_virtualization_cpu()
*/
local_irq_save(flags);
if (msrs->registered) {
@@ -413,8 +626,7 @@ EXPORT_SYMBOL_GPL(kvm_find_user_return_msr);
static void kvm_user_return_msr_cpu_online(void)
{
- unsigned int cpu = smp_processor_id();
- struct kvm_user_return_msrs *msrs = per_cpu_ptr(user_return_msrs, cpu);
+ struct kvm_user_return_msrs *msrs = this_cpu_ptr(user_return_msrs);
u64 value;
int i;
@@ -621,12 +833,6 @@ static void kvm_queue_exception_vmexit(struct kvm_vcpu *vcpu, unsigned int vecto
ex->payload = payload;
}
-/* Forcibly leave the nested mode in cases like a vCPU reset */
-static void kvm_leave_nested(struct kvm_vcpu *vcpu)
-{
- kvm_x86_ops.nested_ops->leave_nested(vcpu);
-}
-
static void kvm_multiple_exception(struct kvm_vcpu *vcpu,
unsigned nr, bool has_error, u32 error_code,
bool has_payload, unsigned long payload, bool reinject)
@@ -1412,178 +1618,6 @@ int kvm_emulate_rdpmc(struct kvm_vcpu *vcpu)
EXPORT_SYMBOL_GPL(kvm_emulate_rdpmc);
/*
- * The three MSR lists(msrs_to_save, emulated_msrs, msr_based_features) track
- * the set of MSRs that KVM exposes to userspace through KVM_GET_MSRS,
- * KVM_SET_MSRS, and KVM_GET_MSR_INDEX_LIST. msrs_to_save holds MSRs that
- * require host support, i.e. should be probed via RDMSR. emulated_msrs holds
- * MSRs that KVM emulates without strictly requiring host support.
- * msr_based_features holds MSRs that enumerate features, i.e. are effectively
- * CPUID leafs. Note, msr_based_features isn't mutually exclusive with
- * msrs_to_save and emulated_msrs.
- */
-
-static const u32 msrs_to_save_base[] = {
- MSR_IA32_SYSENTER_CS, MSR_IA32_SYSENTER_ESP, MSR_IA32_SYSENTER_EIP,
- MSR_STAR,
-#ifdef CONFIG_X86_64
- MSR_CSTAR, MSR_KERNEL_GS_BASE, MSR_SYSCALL_MASK, MSR_LSTAR,
-#endif
- MSR_IA32_TSC, MSR_IA32_CR_PAT, MSR_VM_HSAVE_PA,
- MSR_IA32_FEAT_CTL, MSR_IA32_BNDCFGS, MSR_TSC_AUX,
- MSR_IA32_SPEC_CTRL, MSR_IA32_TSX_CTRL,
- MSR_IA32_RTIT_CTL, MSR_IA32_RTIT_STATUS, MSR_IA32_RTIT_CR3_MATCH,
- MSR_IA32_RTIT_OUTPUT_BASE, MSR_IA32_RTIT_OUTPUT_MASK,
- MSR_IA32_RTIT_ADDR0_A, MSR_IA32_RTIT_ADDR0_B,
- MSR_IA32_RTIT_ADDR1_A, MSR_IA32_RTIT_ADDR1_B,
- MSR_IA32_RTIT_ADDR2_A, MSR_IA32_RTIT_ADDR2_B,
- MSR_IA32_RTIT_ADDR3_A, MSR_IA32_RTIT_ADDR3_B,
- MSR_IA32_UMWAIT_CONTROL,
-
- MSR_IA32_XFD, MSR_IA32_XFD_ERR,
-};
-
-static const u32 msrs_to_save_pmu[] = {
- MSR_ARCH_PERFMON_FIXED_CTR0, MSR_ARCH_PERFMON_FIXED_CTR1,
- MSR_ARCH_PERFMON_FIXED_CTR0 + 2,
- MSR_CORE_PERF_FIXED_CTR_CTRL, MSR_CORE_PERF_GLOBAL_STATUS,
- MSR_CORE_PERF_GLOBAL_CTRL,
- MSR_IA32_PEBS_ENABLE, MSR_IA32_DS_AREA, MSR_PEBS_DATA_CFG,
-
- /* This part of MSRs should match KVM_MAX_NR_INTEL_GP_COUNTERS. */
- MSR_ARCH_PERFMON_PERFCTR0, MSR_ARCH_PERFMON_PERFCTR1,
- MSR_ARCH_PERFMON_PERFCTR0 + 2, MSR_ARCH_PERFMON_PERFCTR0 + 3,
- MSR_ARCH_PERFMON_PERFCTR0 + 4, MSR_ARCH_PERFMON_PERFCTR0 + 5,
- MSR_ARCH_PERFMON_PERFCTR0 + 6, MSR_ARCH_PERFMON_PERFCTR0 + 7,
- MSR_ARCH_PERFMON_EVENTSEL0, MSR_ARCH_PERFMON_EVENTSEL1,
- MSR_ARCH_PERFMON_EVENTSEL0 + 2, MSR_ARCH_PERFMON_EVENTSEL0 + 3,
- MSR_ARCH_PERFMON_EVENTSEL0 + 4, MSR_ARCH_PERFMON_EVENTSEL0 + 5,
- MSR_ARCH_PERFMON_EVENTSEL0 + 6, MSR_ARCH_PERFMON_EVENTSEL0 + 7,
-
- MSR_K7_EVNTSEL0, MSR_K7_EVNTSEL1, MSR_K7_EVNTSEL2, MSR_K7_EVNTSEL3,
- MSR_K7_PERFCTR0, MSR_K7_PERFCTR1, MSR_K7_PERFCTR2, MSR_K7_PERFCTR3,
-
- /* This part of MSRs should match KVM_MAX_NR_AMD_GP_COUNTERS. */
- MSR_F15H_PERF_CTL0, MSR_F15H_PERF_CTL1, MSR_F15H_PERF_CTL2,
- MSR_F15H_PERF_CTL3, MSR_F15H_PERF_CTL4, MSR_F15H_PERF_CTL5,
- MSR_F15H_PERF_CTR0, MSR_F15H_PERF_CTR1, MSR_F15H_PERF_CTR2,
- MSR_F15H_PERF_CTR3, MSR_F15H_PERF_CTR4, MSR_F15H_PERF_CTR5,
-
- MSR_AMD64_PERF_CNTR_GLOBAL_CTL,
- MSR_AMD64_PERF_CNTR_GLOBAL_STATUS,
- MSR_AMD64_PERF_CNTR_GLOBAL_STATUS_CLR,
-};
-
-static u32 msrs_to_save[ARRAY_SIZE(msrs_to_save_base) +
- ARRAY_SIZE(msrs_to_save_pmu)];
-static unsigned num_msrs_to_save;
-
-static const u32 emulated_msrs_all[] = {
- MSR_KVM_SYSTEM_TIME, MSR_KVM_WALL_CLOCK,
- MSR_KVM_SYSTEM_TIME_NEW, MSR_KVM_WALL_CLOCK_NEW,
-
-#ifdef CONFIG_KVM_HYPERV
- HV_X64_MSR_GUEST_OS_ID, HV_X64_MSR_HYPERCALL,
- HV_X64_MSR_TIME_REF_COUNT, HV_X64_MSR_REFERENCE_TSC,
- HV_X64_MSR_TSC_FREQUENCY, HV_X64_MSR_APIC_FREQUENCY,
- HV_X64_MSR_CRASH_P0, HV_X64_MSR_CRASH_P1, HV_X64_MSR_CRASH_P2,
- HV_X64_MSR_CRASH_P3, HV_X64_MSR_CRASH_P4, HV_X64_MSR_CRASH_CTL,
- HV_X64_MSR_RESET,
- HV_X64_MSR_VP_INDEX,
- HV_X64_MSR_VP_RUNTIME,
- HV_X64_MSR_SCONTROL,
- HV_X64_MSR_STIMER0_CONFIG,
- HV_X64_MSR_VP_ASSIST_PAGE,
- HV_X64_MSR_REENLIGHTENMENT_CONTROL, HV_X64_MSR_TSC_EMULATION_CONTROL,
- HV_X64_MSR_TSC_EMULATION_STATUS, HV_X64_MSR_TSC_INVARIANT_CONTROL,
- HV_X64_MSR_SYNDBG_OPTIONS,
- HV_X64_MSR_SYNDBG_CONTROL, HV_X64_MSR_SYNDBG_STATUS,
- HV_X64_MSR_SYNDBG_SEND_BUFFER, HV_X64_MSR_SYNDBG_RECV_BUFFER,
- HV_X64_MSR_SYNDBG_PENDING_BUFFER,
-#endif
-
- MSR_KVM_ASYNC_PF_EN, MSR_KVM_STEAL_TIME,
- MSR_KVM_PV_EOI_EN, MSR_KVM_ASYNC_PF_INT, MSR_KVM_ASYNC_PF_ACK,
-
- MSR_IA32_TSC_ADJUST,
- MSR_IA32_TSC_DEADLINE,
- MSR_IA32_ARCH_CAPABILITIES,
- MSR_IA32_PERF_CAPABILITIES,
- MSR_IA32_MISC_ENABLE,
- MSR_IA32_MCG_STATUS,
- MSR_IA32_MCG_CTL,
- MSR_IA32_MCG_EXT_CTL,
- MSR_IA32_SMBASE,
- MSR_SMI_COUNT,
- MSR_PLATFORM_INFO,
- MSR_MISC_FEATURES_ENABLES,
- MSR_AMD64_VIRT_SPEC_CTRL,
- MSR_AMD64_TSC_RATIO,
- MSR_IA32_POWER_CTL,
- MSR_IA32_UCODE_REV,
-
- /*
- * KVM always supports the "true" VMX control MSRs, even if the host
- * does not. The VMX MSRs as a whole are considered "emulated" as KVM
- * doesn't strictly require them to exist in the host (ignoring that
- * KVM would refuse to load in the first place if the core set of MSRs
- * aren't supported).
- */
- MSR_IA32_VMX_BASIC,
- MSR_IA32_VMX_TRUE_PINBASED_CTLS,
- MSR_IA32_VMX_TRUE_PROCBASED_CTLS,
- MSR_IA32_VMX_TRUE_EXIT_CTLS,
- MSR_IA32_VMX_TRUE_ENTRY_CTLS,
- MSR_IA32_VMX_MISC,
- MSR_IA32_VMX_CR0_FIXED0,
- MSR_IA32_VMX_CR4_FIXED0,
- MSR_IA32_VMX_VMCS_ENUM,
- MSR_IA32_VMX_PROCBASED_CTLS2,
- MSR_IA32_VMX_EPT_VPID_CAP,
- MSR_IA32_VMX_VMFUNC,
-
- MSR_K7_HWCR,
- MSR_KVM_POLL_CONTROL,
-};
-
-static u32 emulated_msrs[ARRAY_SIZE(emulated_msrs_all)];
-static unsigned num_emulated_msrs;
-
-/*
- * List of MSRs that control the existence of MSR-based features, i.e. MSRs
- * that are effectively CPUID leafs. VMX MSRs are also included in the set of
- * feature MSRs, but are handled separately to allow expedited lookups.
- */
-static const u32 msr_based_features_all_except_vmx[] = {
- MSR_AMD64_DE_CFG,
- MSR_IA32_UCODE_REV,
- MSR_IA32_ARCH_CAPABILITIES,
- MSR_IA32_PERF_CAPABILITIES,
-};
-
-static u32 msr_based_features[ARRAY_SIZE(msr_based_features_all_except_vmx) +
- (KVM_LAST_EMULATED_VMX_MSR - KVM_FIRST_EMULATED_VMX_MSR + 1)];
-static unsigned int num_msr_based_features;
-
-/*
- * All feature MSRs except uCode revID, which tracks the currently loaded uCode
- * patch, are immutable once the vCPU model is defined.
- */
-static bool kvm_is_immutable_feature_msr(u32 msr)
-{
- int i;
-
- if (msr >= KVM_FIRST_EMULATED_VMX_MSR && msr <= KVM_LAST_EMULATED_VMX_MSR)
- return true;
-
- for (i = 0; i < ARRAY_SIZE(msr_based_features_all_except_vmx); i++) {
- if (msr == msr_based_features_all_except_vmx[i])
- return msr != MSR_IA32_UCODE_REV;
- }
-
- return false;
-}
-
-/*
* Some IA32_ARCH_CAPABILITIES bits have dependencies on MSRs that KVM
* does not yet virtualize. These include:
* 10 - MISC_PACKAGE_CTRLS
@@ -1660,40 +1694,31 @@ static u64 kvm_get_arch_capabilities(void)
return data;
}
-static int kvm_get_msr_feature(struct kvm_msr_entry *msr)
+static int kvm_get_feature_msr(struct kvm_vcpu *vcpu, u32 index, u64 *data,
+ bool host_initiated)
{
- switch (msr->index) {
+ WARN_ON_ONCE(!host_initiated);
+
+ switch (index) {
case MSR_IA32_ARCH_CAPABILITIES:
- msr->data = kvm_get_arch_capabilities();
+ *data = kvm_get_arch_capabilities();
break;
case MSR_IA32_PERF_CAPABILITIES:
- msr->data = kvm_caps.supported_perf_cap;
+ *data = kvm_caps.supported_perf_cap;
break;
case MSR_IA32_UCODE_REV:
- rdmsrl_safe(msr->index, &msr->data);
+ rdmsrl_safe(index, data);
break;
default:
- return kvm_x86_call(get_msr_feature)(msr);
+ return kvm_x86_call(get_feature_msr)(index, data);
}
return 0;
}
-static int do_get_msr_feature(struct kvm_vcpu *vcpu, unsigned index, u64 *data)
+static int do_get_feature_msr(struct kvm_vcpu *vcpu, unsigned index, u64 *data)
{
- struct kvm_msr_entry msr;
- int r;
-
- /* Unconditionally clear the output for simplicity */
- msr.data = 0;
- msr.index = index;
- r = kvm_get_msr_feature(&msr);
-
- if (r == KVM_MSR_RET_INVALID && kvm_msr_ignored_check(index, 0, false))
- r = 0;
-
- *data = msr.data;
-
- return r;
+ return kvm_do_msr_access(vcpu, index, data, true, MSR_TYPE_R,
+ kvm_get_feature_msr);
}
static bool __kvm_valid_efer(struct kvm_vcpu *vcpu, u64 efer)
@@ -1880,16 +1905,17 @@ static int __kvm_set_msr(struct kvm_vcpu *vcpu, u32 index, u64 data,
return kvm_x86_call(set_msr)(vcpu, &msr);
}
+static int _kvm_set_msr(struct kvm_vcpu *vcpu, u32 index, u64 *data,
+ bool host_initiated)
+{
+ return __kvm_set_msr(vcpu, index, *data, host_initiated);
+}
+
static int kvm_set_msr_ignored_check(struct kvm_vcpu *vcpu,
u32 index, u64 data, bool host_initiated)
{
- int ret = __kvm_set_msr(vcpu, index, data, host_initiated);
-
- if (ret == KVM_MSR_RET_INVALID)
- if (kvm_msr_ignored_check(index, data, true))
- ret = 0;
-
- return ret;
+ return kvm_do_msr_access(vcpu, index, &data, host_initiated, MSR_TYPE_W,
+ _kvm_set_msr);
}
/*
@@ -1928,31 +1954,25 @@ int __kvm_get_msr(struct kvm_vcpu *vcpu, u32 index, u64 *data,
static int kvm_get_msr_ignored_check(struct kvm_vcpu *vcpu,
u32 index, u64 *data, bool host_initiated)
{
- int ret = __kvm_get_msr(vcpu, index, data, host_initiated);
-
- if (ret == KVM_MSR_RET_INVALID) {
- /* Unconditionally clear *data for simplicity */
- *data = 0;
- if (kvm_msr_ignored_check(index, 0, false))
- ret = 0;
- }
-
- return ret;
+ return kvm_do_msr_access(vcpu, index, data, host_initiated, MSR_TYPE_R,
+ __kvm_get_msr);
}
-static int kvm_get_msr_with_filter(struct kvm_vcpu *vcpu, u32 index, u64 *data)
+int kvm_get_msr_with_filter(struct kvm_vcpu *vcpu, u32 index, u64 *data)
{
if (!kvm_msr_allowed(vcpu, index, KVM_MSR_FILTER_READ))
return KVM_MSR_RET_FILTERED;
return kvm_get_msr_ignored_check(vcpu, index, data, false);
}
+EXPORT_SYMBOL_GPL(kvm_get_msr_with_filter);
-static int kvm_set_msr_with_filter(struct kvm_vcpu *vcpu, u32 index, u64 data)
+int kvm_set_msr_with_filter(struct kvm_vcpu *vcpu, u32 index, u64 data)
{
if (!kvm_msr_allowed(vcpu, index, KVM_MSR_FILTER_WRITE))
return KVM_MSR_RET_FILTERED;
return kvm_set_msr_ignored_check(vcpu, index, data, false);
}
+EXPORT_SYMBOL_GPL(kvm_set_msr_with_filter);
int kvm_get_msr(struct kvm_vcpu *vcpu, u32 index, u64 *data)
{
@@ -1999,7 +2019,7 @@ static int complete_fast_rdmsr(struct kvm_vcpu *vcpu)
static u64 kvm_msr_reason(int r)
{
switch (r) {
- case KVM_MSR_RET_INVALID:
+ case KVM_MSR_RET_UNSUPPORTED:
return KVM_MSR_EXIT_REASON_UNKNOWN;
case KVM_MSR_RET_FILTERED:
return KVM_MSR_EXIT_REASON_FILTER;
@@ -2162,31 +2182,34 @@ fastpath_t handle_fastpath_set_msr_irqoff(struct kvm_vcpu *vcpu)
{
u32 msr = kvm_rcx_read(vcpu);
u64 data;
- fastpath_t ret = EXIT_FASTPATH_NONE;
+ fastpath_t ret;
+ bool handled;
kvm_vcpu_srcu_read_lock(vcpu);
switch (msr) {
case APIC_BASE_MSR + (APIC_ICR >> 4):
data = kvm_read_edx_eax(vcpu);
- if (!handle_fastpath_set_x2apic_icr_irqoff(vcpu, data)) {
- kvm_skip_emulated_instruction(vcpu);
- ret = EXIT_FASTPATH_EXIT_HANDLED;
- }
+ handled = !handle_fastpath_set_x2apic_icr_irqoff(vcpu, data);
break;
case MSR_IA32_TSC_DEADLINE:
data = kvm_read_edx_eax(vcpu);
- if (!handle_fastpath_set_tscdeadline(vcpu, data)) {
- kvm_skip_emulated_instruction(vcpu);
- ret = EXIT_FASTPATH_REENTER_GUEST;
- }
+ handled = !handle_fastpath_set_tscdeadline(vcpu, data);
break;
default:
+ handled = false;
break;
}
- if (ret != EXIT_FASTPATH_NONE)
+ if (handled) {
+ if (!kvm_skip_emulated_instruction(vcpu))
+ ret = EXIT_FASTPATH_EXIT_USERSPACE;
+ else
+ ret = EXIT_FASTPATH_REENTER_GUEST;
trace_kvm_msr_write(msr, data);
+ } else {
+ ret = EXIT_FASTPATH_NONE;
+ }
kvm_vcpu_srcu_read_unlock(vcpu);
@@ -3746,18 +3769,6 @@ static void record_steal_time(struct kvm_vcpu *vcpu)
mark_page_dirty_in_slot(vcpu->kvm, ghc->memslot, gpa_to_gfn(ghc->gpa));
}
-static bool kvm_is_msr_to_save(u32 msr_index)
-{
- unsigned int i;
-
- for (i = 0; i < num_msrs_to_save; i++) {
- if (msrs_to_save[i] == msr_index)
- return true;
- }
-
- return false;
-}
-
int kvm_set_msr_common(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
{
u32 msr = msr_info->index;
@@ -4139,15 +4150,7 @@ int kvm_set_msr_common(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
if (kvm_pmu_is_valid_msr(vcpu, msr))
return kvm_pmu_set_msr(vcpu, msr_info);
- /*
- * Userspace is allowed to write '0' to MSRs that KVM reports
- * as to-be-saved, even if an MSRs isn't fully supported.
- */
- if (msr_info->host_initiated && !data &&
- kvm_is_msr_to_save(msr))
- break;
-
- return KVM_MSR_RET_INVALID;
+ return KVM_MSR_RET_UNSUPPORTED;
}
return 0;
}
@@ -4498,17 +4501,7 @@ int kvm_get_msr_common(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
if (kvm_pmu_is_valid_msr(vcpu, msr_info->index))
return kvm_pmu_get_msr(vcpu, msr_info);
- /*
- * Userspace is allowed to read MSRs that KVM reports as
- * to-be-saved, even if an MSR isn't fully supported.
- */
- if (msr_info->host_initiated &&
- kvm_is_msr_to_save(msr_info->index)) {
- msr_info->data = 0;
- break;
- }
-
- return KVM_MSR_RET_INVALID;
+ return KVM_MSR_RET_UNSUPPORTED;
}
return 0;
}
@@ -4946,7 +4939,7 @@ long kvm_arch_dev_ioctl(struct file *filp,
break;
}
case KVM_GET_MSRS:
- r = msr_io(NULL, argp, do_get_msr_feature, 1);
+ r = msr_io(NULL, argp, do_get_feature_msr, 1);
break;
#ifdef CONFIG_KVM_HYPERV
case KVM_GET_SUPPORTED_HV_CPUID:
@@ -7383,11 +7376,9 @@ out:
static void kvm_probe_feature_msr(u32 msr_index)
{
- struct kvm_msr_entry msr = {
- .index = msr_index,
- };
+ u64 data;
- if (kvm_get_msr_feature(&msr))
+ if (kvm_get_feature_msr(NULL, msr_index, &data, true))
return;
msr_based_features[num_msr_based_features++] = msr_index;
@@ -8865,60 +8856,13 @@ static int handle_emulation_failure(struct kvm_vcpu *vcpu, int emulation_type)
return 1;
}
-static bool reexecute_instruction(struct kvm_vcpu *vcpu, gpa_t cr2_or_gpa,
- int emulation_type)
+static bool kvm_unprotect_and_retry_on_failure(struct kvm_vcpu *vcpu,
+ gpa_t cr2_or_gpa,
+ int emulation_type)
{
- gpa_t gpa = cr2_or_gpa;
- kvm_pfn_t pfn;
-
if (!(emulation_type & EMULTYPE_ALLOW_RETRY_PF))
return false;
- if (WARN_ON_ONCE(is_guest_mode(vcpu)) ||
- WARN_ON_ONCE(!(emulation_type & EMULTYPE_PF)))
- return false;
-
- if (!vcpu->arch.mmu->root_role.direct) {
- /*
- * Write permission should be allowed since only
- * write access need to be emulated.
- */
- gpa = kvm_mmu_gva_to_gpa_write(vcpu, cr2_or_gpa, NULL);
-
- /*
- * If the mapping is invalid in guest, let cpu retry
- * it to generate fault.
- */
- if (gpa == INVALID_GPA)
- return true;
- }
-
- /*
- * Do not retry the unhandleable instruction if it faults on the
- * readonly host memory, otherwise it will goto a infinite loop:
- * retry instruction -> write #PF -> emulation fail -> retry
- * instruction -> ...
- */
- pfn = gfn_to_pfn(vcpu->kvm, gpa_to_gfn(gpa));
-
- /*
- * If the instruction failed on the error pfn, it can not be fixed,
- * report the error to userspace.
- */
- if (is_error_noslot_pfn(pfn))
- return false;
-
- kvm_release_pfn_clean(pfn);
-
- /*
- * If emulation may have been triggered by a write to a shadowed page
- * table, unprotect the gfn (zap any relevant SPTEs) and re-enter the
- * guest to let the CPU re-execute the instruction in the hope that the
- * CPU can cleanly execute the instruction that KVM failed to emulate.
- */
- if (vcpu->kvm->arch.indirect_shadow_pages)
- kvm_mmu_unprotect_page(vcpu->kvm, gpa_to_gfn(gpa));
-
/*
* If the failed instruction faulted on an access to page tables that
* are used to translate any part of the instruction, KVM can't resolve
@@ -8929,54 +8873,24 @@ static bool reexecute_instruction(struct kvm_vcpu *vcpu, gpa_t cr2_or_gpa,
* then zap the SPTE to unprotect the gfn, and then do it all over
* again. Report the error to userspace.
*/
- return !(emulation_type & EMULTYPE_WRITE_PF_TO_SP);
-}
-
-static bool retry_instruction(struct x86_emulate_ctxt *ctxt,
- gpa_t cr2_or_gpa, int emulation_type)
-{
- struct kvm_vcpu *vcpu = emul_to_vcpu(ctxt);
- unsigned long last_retry_eip, last_retry_addr, gpa = cr2_or_gpa;
-
- last_retry_eip = vcpu->arch.last_retry_eip;
- last_retry_addr = vcpu->arch.last_retry_addr;
+ if (emulation_type & EMULTYPE_WRITE_PF_TO_SP)
+ return false;
/*
- * If the emulation is caused by #PF and it is non-page_table
- * writing instruction, it means the VM-EXIT is caused by shadow
- * page protected, we can zap the shadow page and retry this
- * instruction directly.
- *
- * Note: if the guest uses a non-page-table modifying instruction
- * on the PDE that points to the instruction, then we will unmap
- * the instruction and go to an infinite loop. So, we cache the
- * last retried eip and the last fault address, if we meet the eip
- * and the address again, we can break out of the potential infinite
- * loop.
+ * If emulation may have been triggered by a write to a shadowed page
+ * table, unprotect the gfn (zap any relevant SPTEs) and re-enter the
+ * guest to let the CPU re-execute the instruction in the hope that the
+ * CPU can cleanly execute the instruction that KVM failed to emulate.
*/
- vcpu->arch.last_retry_eip = vcpu->arch.last_retry_addr = 0;
-
- if (!(emulation_type & EMULTYPE_ALLOW_RETRY_PF))
- return false;
-
- if (WARN_ON_ONCE(is_guest_mode(vcpu)) ||
- WARN_ON_ONCE(!(emulation_type & EMULTYPE_PF)))
- return false;
-
- if (x86_page_table_writing_insn(ctxt))
- return false;
-
- if (ctxt->eip == last_retry_eip && last_retry_addr == cr2_or_gpa)
- return false;
-
- vcpu->arch.last_retry_eip = ctxt->eip;
- vcpu->arch.last_retry_addr = cr2_or_gpa;
-
- if (!vcpu->arch.mmu->root_role.direct)
- gpa = kvm_mmu_gva_to_gpa_write(vcpu, cr2_or_gpa, NULL);
-
- kvm_mmu_unprotect_page(vcpu->kvm, gpa_to_gfn(gpa));
+ __kvm_mmu_unprotect_gfn_and_retry(vcpu, cr2_or_gpa, true);
+ /*
+ * Retry even if _this_ vCPU didn't unprotect the gfn, as it's possible
+ * all SPTEs were already zapped by a different task. The alternative
+ * is to report the error to userspace and likely terminate the guest,
+ * and the last_retry_{eip,addr} checks will prevent retrying the page
+ * fault indefinitely, i.e. there's nothing to lose by retrying.
+ */
return true;
}
@@ -9176,6 +9090,11 @@ int x86_emulate_instruction(struct kvm_vcpu *vcpu, gpa_t cr2_or_gpa,
struct x86_emulate_ctxt *ctxt = vcpu->arch.emulate_ctxt;
bool writeback = true;
+ if ((emulation_type & EMULTYPE_ALLOW_RETRY_PF) &&
+ (WARN_ON_ONCE(is_guest_mode(vcpu)) ||
+ WARN_ON_ONCE(!(emulation_type & EMULTYPE_PF))))
+ emulation_type &= ~EMULTYPE_ALLOW_RETRY_PF;
+
r = kvm_check_emulate_insn(vcpu, emulation_type, insn, insn_len);
if (r != X86EMUL_CONTINUE) {
if (r == X86EMUL_RETRY_INSTR || r == X86EMUL_PROPAGATE_FAULT)
@@ -9206,8 +9125,8 @@ int x86_emulate_instruction(struct kvm_vcpu *vcpu, gpa_t cr2_or_gpa,
kvm_queue_exception(vcpu, UD_VECTOR);
return 1;
}
- if (reexecute_instruction(vcpu, cr2_or_gpa,
- emulation_type))
+ if (kvm_unprotect_and_retry_on_failure(vcpu, cr2_or_gpa,
+ emulation_type))
return 1;
if (ctxt->have_exception &&
@@ -9254,7 +9173,15 @@ int x86_emulate_instruction(struct kvm_vcpu *vcpu, gpa_t cr2_or_gpa,
return 1;
}
- if (retry_instruction(ctxt, cr2_or_gpa, emulation_type))
+ /*
+ * If emulation was caused by a write-protection #PF on a non-page_table
+ * writing instruction, try to unprotect the gfn, i.e. zap shadow pages,
+ * and retry the instruction, as the vCPU is likely no longer using the
+ * gfn as a page table.
+ */
+ if ((emulation_type & EMULTYPE_ALLOW_RETRY_PF) &&
+ !x86_page_table_writing_insn(ctxt) &&
+ kvm_mmu_unprotect_gfn_and_retry(vcpu, cr2_or_gpa))
return 1;
/* this is needed for vmware backdoor interface to work since it
@@ -9285,7 +9212,8 @@ restart:
return 1;
if (r == EMULATION_FAILED) {
- if (reexecute_instruction(vcpu, cr2_or_gpa, emulation_type))
+ if (kvm_unprotect_and_retry_on_failure(vcpu, cr2_or_gpa,
+ emulation_type))
return 1;
return handle_emulation_failure(vcpu, emulation_type);
@@ -9753,7 +9681,7 @@ int kvm_x86_vendor_init(struct kvm_x86_init_ops *ops)
guard(mutex)(&vendor_module_lock);
- if (kvm_x86_ops.hardware_enable) {
+ if (kvm_x86_ops.enable_virtualization_cpu) {
pr_err("already loaded vendor module '%s'\n", kvm_x86_ops.name);
return -EEXIST;
}
@@ -9880,7 +9808,7 @@ int kvm_x86_vendor_init(struct kvm_x86_init_ops *ops)
return 0;
out_unwind_ops:
- kvm_x86_ops.hardware_enable = NULL;
+ kvm_x86_ops.enable_virtualization_cpu = NULL;
kvm_x86_call(hardware_unsetup)();
out_mmu_exit:
kvm_mmu_vendor_module_exit();
@@ -9921,56 +9849,11 @@ void kvm_x86_vendor_exit(void)
WARN_ON(static_branch_unlikely(&kvm_xen_enabled.key));
#endif
mutex_lock(&vendor_module_lock);
- kvm_x86_ops.hardware_enable = NULL;
+ kvm_x86_ops.enable_virtualization_cpu = NULL;
mutex_unlock(&vendor_module_lock);
}
EXPORT_SYMBOL_GPL(kvm_x86_vendor_exit);
-static int __kvm_emulate_halt(struct kvm_vcpu *vcpu, int state, int reason)
-{
- /*
- * The vCPU has halted, e.g. executed HLT. Update the run state if the
- * local APIC is in-kernel, the run loop will detect the non-runnable
- * state and halt the vCPU. Exit to userspace if the local APIC is
- * managed by userspace, in which case userspace is responsible for
- * handling wake events.
- */
- ++vcpu->stat.halt_exits;
- if (lapic_in_kernel(vcpu)) {
- vcpu->arch.mp_state = state;
- return 1;
- } else {
- vcpu->run->exit_reason = reason;
- return 0;
- }
-}
-
-int kvm_emulate_halt_noskip(struct kvm_vcpu *vcpu)
-{
- return __kvm_emulate_halt(vcpu, KVM_MP_STATE_HALTED, KVM_EXIT_HLT);
-}
-EXPORT_SYMBOL_GPL(kvm_emulate_halt_noskip);
-
-int kvm_emulate_halt(struct kvm_vcpu *vcpu)
-{
- int ret = kvm_skip_emulated_instruction(vcpu);
- /*
- * TODO: we might be squashing a GUESTDBG_SINGLESTEP-triggered
- * KVM_EXIT_DEBUG here.
- */
- return kvm_emulate_halt_noskip(vcpu) && ret;
-}
-EXPORT_SYMBOL_GPL(kvm_emulate_halt);
-
-int kvm_emulate_ap_reset_hold(struct kvm_vcpu *vcpu)
-{
- int ret = kvm_skip_emulated_instruction(vcpu);
-
- return __kvm_emulate_halt(vcpu, KVM_MP_STATE_AP_RESET_HOLD,
- KVM_EXIT_AP_RESET_HOLD) && ret;
-}
-EXPORT_SYMBOL_GPL(kvm_emulate_ap_reset_hold);
-
#ifdef CONFIG_X86_64
static int kvm_pv_clock_pairing(struct kvm_vcpu *vcpu, gpa_t paddr,
unsigned long clock_type)
@@ -11207,6 +11090,9 @@ static int vcpu_enter_guest(struct kvm_vcpu *vcpu)
if (vcpu->arch.apic_attention)
kvm_lapic_sync_from_vapic(vcpu);
+ if (unlikely(exit_fastpath == EXIT_FASTPATH_EXIT_USERSPACE))
+ return 0;
+
r = kvm_x86_call(handle_exit)(vcpu, exit_fastpath);
return r;
@@ -11220,6 +11106,67 @@ out:
return r;
}
+static bool kvm_vcpu_running(struct kvm_vcpu *vcpu)
+{
+ return (vcpu->arch.mp_state == KVM_MP_STATE_RUNNABLE &&
+ !vcpu->arch.apf.halted);
+}
+
+static bool kvm_vcpu_has_events(struct kvm_vcpu *vcpu)
+{
+ if (!list_empty_careful(&vcpu->async_pf.done))
+ return true;
+
+ if (kvm_apic_has_pending_init_or_sipi(vcpu) &&
+ kvm_apic_init_sipi_allowed(vcpu))
+ return true;
+
+ if (vcpu->arch.pv.pv_unhalted)
+ return true;
+
+ if (kvm_is_exception_pending(vcpu))
+ return true;
+
+ if (kvm_test_request(KVM_REQ_NMI, vcpu) ||
+ (vcpu->arch.nmi_pending &&
+ kvm_x86_call(nmi_allowed)(vcpu, false)))
+ return true;
+
+#ifdef CONFIG_KVM_SMM
+ if (kvm_test_request(KVM_REQ_SMI, vcpu) ||
+ (vcpu->arch.smi_pending &&
+ kvm_x86_call(smi_allowed)(vcpu, false)))
+ return true;
+#endif
+
+ if (kvm_test_request(KVM_REQ_PMI, vcpu))
+ return true;
+
+ if (kvm_test_request(KVM_REQ_UPDATE_PROTECTED_GUEST_STATE, vcpu))
+ return true;
+
+ if (kvm_arch_interrupt_allowed(vcpu) && kvm_cpu_has_interrupt(vcpu))
+ return true;
+
+ if (kvm_hv_has_stimer_pending(vcpu))
+ return true;
+
+ if (is_guest_mode(vcpu) &&
+ kvm_x86_ops.nested_ops->has_events &&
+ kvm_x86_ops.nested_ops->has_events(vcpu, false))
+ return true;
+
+ if (kvm_xen_has_pending_events(vcpu))
+ return true;
+
+ return false;
+}
+
+int kvm_arch_vcpu_runnable(struct kvm_vcpu *vcpu)
+{
+ return kvm_vcpu_running(vcpu) || kvm_vcpu_has_events(vcpu);
+}
+
/* Called within kvm->srcu read side. */
static inline int vcpu_block(struct kvm_vcpu *vcpu)
{
@@ -11291,12 +11238,6 @@ static inline int vcpu_block(struct kvm_vcpu *vcpu)
return 1;
}
-static inline bool kvm_vcpu_running(struct kvm_vcpu *vcpu)
-{
- return (vcpu->arch.mp_state == KVM_MP_STATE_RUNNABLE &&
- !vcpu->arch.apf.halted);
-}
-
/* Called within kvm->srcu read side. */
static int vcpu_run(struct kvm_vcpu *vcpu)
{
@@ -11348,6 +11289,98 @@ static int vcpu_run(struct kvm_vcpu *vcpu)
return r;
}
+static int __kvm_emulate_halt(struct kvm_vcpu *vcpu, int state, int reason)
+{
+ /*
+ * The vCPU has halted, e.g. executed HLT. Update the run state if the
+ * local APIC is in-kernel, the run loop will detect the non-runnable
+ * state and halt the vCPU. Exit to userspace if the local APIC is
+ * managed by userspace, in which case userspace is responsible for
+ * handling wake events.
+ */
+ ++vcpu->stat.halt_exits;
+ if (lapic_in_kernel(vcpu)) {
+ if (kvm_vcpu_has_events(vcpu))
+ vcpu->arch.pv.pv_unhalted = false;
+ else
+ vcpu->arch.mp_state = state;
+ return 1;
+ } else {
+ vcpu->run->exit_reason = reason;
+ return 0;
+ }
+}
+
+int kvm_emulate_halt_noskip(struct kvm_vcpu *vcpu)
+{
+ return __kvm_emulate_halt(vcpu, KVM_MP_STATE_HALTED, KVM_EXIT_HLT);
+}
+EXPORT_SYMBOL_GPL(kvm_emulate_halt_noskip);
+
+int kvm_emulate_halt(struct kvm_vcpu *vcpu)
+{
+ int ret = kvm_skip_emulated_instruction(vcpu);
+ /*
+ * TODO: we might be squashing a GUESTDBG_SINGLESTEP-triggered
+ * KVM_EXIT_DEBUG here.
+ */
+ return kvm_emulate_halt_noskip(vcpu) && ret;
+}
+EXPORT_SYMBOL_GPL(kvm_emulate_halt);
+
+fastpath_t handle_fastpath_hlt(struct kvm_vcpu *vcpu)
+{
+ int ret;
+
+ kvm_vcpu_srcu_read_lock(vcpu);
+ ret = kvm_emulate_halt(vcpu);
+ kvm_vcpu_srcu_read_unlock(vcpu);
+
+ if (!ret)
+ return EXIT_FASTPATH_EXIT_USERSPACE;
+
+ if (kvm_vcpu_running(vcpu))
+ return EXIT_FASTPATH_REENTER_GUEST;
+
+ return EXIT_FASTPATH_EXIT_HANDLED;
+}
+EXPORT_SYMBOL_GPL(handle_fastpath_hlt);
+
+int kvm_emulate_ap_reset_hold(struct kvm_vcpu *vcpu)
+{
+ int ret = kvm_skip_emulated_instruction(vcpu);
+
+ return __kvm_emulate_halt(vcpu, KVM_MP_STATE_AP_RESET_HOLD,
+ KVM_EXIT_AP_RESET_HOLD) && ret;
+}
+EXPORT_SYMBOL_GPL(kvm_emulate_ap_reset_hold);
+
+bool kvm_arch_dy_has_pending_interrupt(struct kvm_vcpu *vcpu)
+{
+ return kvm_vcpu_apicv_active(vcpu) &&
+ kvm_x86_call(dy_apicv_has_pending_interrupt)(vcpu);
+}
+
+bool kvm_arch_vcpu_preempted_in_kernel(struct kvm_vcpu *vcpu)
+{
+ return vcpu->arch.preempted_in_kernel;
+}
+
+bool kvm_arch_dy_runnable(struct kvm_vcpu *vcpu)
+{
+ if (READ_ONCE(vcpu->arch.pv.pv_unhalted))
+ return true;
+
+ if (kvm_test_request(KVM_REQ_NMI, vcpu) ||
+#ifdef CONFIG_KVM_SMM
+ kvm_test_request(KVM_REQ_SMI, vcpu) ||
+#endif
+ kvm_test_request(KVM_REQ_EVENT, vcpu))
+ return true;
+
+ return kvm_arch_dy_has_pending_interrupt(vcpu);
+}
+
static inline int complete_emulated_io(struct kvm_vcpu *vcpu)
{
return kvm_emulate_instruction(vcpu, EMULTYPE_NO_DECODE);
@@ -12264,8 +12297,6 @@ int kvm_arch_vcpu_create(struct kvm_vcpu *vcpu)
vcpu->arch.maxphyaddr = cpuid_query_maxphyaddr(vcpu);
vcpu->arch.reserved_gpa_bits = kvm_vcpu_reserved_gpa_bits_raw(vcpu);
- vcpu->arch.pat = MSR_IA32_CR_PAT_DEFAULT;
-
kvm_async_pf_hash_reset(vcpu);
vcpu->arch.perf_capabilities = kvm_caps.supported_perf_cap;
@@ -12431,6 +12462,8 @@ void kvm_vcpu_reset(struct kvm_vcpu *vcpu, bool init_event)
if (!init_event) {
vcpu->arch.smbase = 0x30000;
+ vcpu->arch.pat = MSR_IA32_CR_PAT_DEFAULT;
+
vcpu->arch.msr_misc_features_enables = 0;
vcpu->arch.ia32_misc_enable_msr = MSR_IA32_MISC_ENABLE_PEBS_UNAVAIL |
MSR_IA32_MISC_ENABLE_BTS_UNAVAIL;
@@ -12516,7 +12549,17 @@ void kvm_vcpu_deliver_sipi_vector(struct kvm_vcpu *vcpu, u8 vector)
}
EXPORT_SYMBOL_GPL(kvm_vcpu_deliver_sipi_vector);
-int kvm_arch_hardware_enable(void)
+void kvm_arch_enable_virtualization(void)
+{
+ cpu_emergency_register_virt_callback(kvm_x86_ops.emergency_disable_virtualization_cpu);
+}
+
+void kvm_arch_disable_virtualization(void)
+{
+ cpu_emergency_unregister_virt_callback(kvm_x86_ops.emergency_disable_virtualization_cpu);
+}
+
+int kvm_arch_enable_virtualization_cpu(void)
{
struct kvm *kvm;
struct kvm_vcpu *vcpu;
@@ -12532,7 +12575,7 @@ int kvm_arch_hardware_enable(void)
if (ret)
return ret;
- ret = kvm_x86_call(hardware_enable)();
+ ret = kvm_x86_call(enable_virtualization_cpu)();
if (ret != 0)
return ret;
@@ -12612,9 +12655,9 @@ int kvm_arch_hardware_enable(void)
return 0;
}
-void kvm_arch_hardware_disable(void)
+void kvm_arch_disable_virtualization_cpu(void)
{
- kvm_x86_call(hardware_disable)();
+ kvm_x86_call(disable_virtualization_cpu)();
drop_user_return_notifiers();
}
@@ -13162,87 +13205,6 @@ void kvm_arch_commit_memory_region(struct kvm *kvm,
kvm_arch_free_memslot(kvm, old);
}
-static inline bool kvm_vcpu_has_events(struct kvm_vcpu *vcpu)
-{
- if (!list_empty_careful(&vcpu->async_pf.done))
- return true;
-
- if (kvm_apic_has_pending_init_or_sipi(vcpu) &&
- kvm_apic_init_sipi_allowed(vcpu))
- return true;
-
- if (vcpu->arch.pv.pv_unhalted)
- return true;
-
- if (kvm_is_exception_pending(vcpu))
- return true;
-
- if (kvm_test_request(KVM_REQ_NMI, vcpu) ||
- (vcpu->arch.nmi_pending &&
- kvm_x86_call(nmi_allowed)(vcpu, false)))
- return true;
-
-#ifdef CONFIG_KVM_SMM
- if (kvm_test_request(KVM_REQ_SMI, vcpu) ||
- (vcpu->arch.smi_pending &&
- kvm_x86_call(smi_allowed)(vcpu, false)))
- return true;
-#endif
-
- if (kvm_test_request(KVM_REQ_PMI, vcpu))
- return true;
-
- if (kvm_test_request(KVM_REQ_UPDATE_PROTECTED_GUEST_STATE, vcpu))
- return true;
-
- if (kvm_arch_interrupt_allowed(vcpu) && kvm_cpu_has_interrupt(vcpu))
- return true;
-
- if (kvm_hv_has_stimer_pending(vcpu))
- return true;
-
- if (is_guest_mode(vcpu) &&
- kvm_x86_ops.nested_ops->has_events &&
- kvm_x86_ops.nested_ops->has_events(vcpu, false))
- return true;
-
- if (kvm_xen_has_pending_events(vcpu))
- return true;
-
- return false;
-}
-
-int kvm_arch_vcpu_runnable(struct kvm_vcpu *vcpu)
-{
- return kvm_vcpu_running(vcpu) || kvm_vcpu_has_events(vcpu);
-}
-
-bool kvm_arch_dy_has_pending_interrupt(struct kvm_vcpu *vcpu)
-{
- return kvm_vcpu_apicv_active(vcpu) &&
- kvm_x86_call(dy_apicv_has_pending_interrupt)(vcpu);
-}
-
-bool kvm_arch_vcpu_preempted_in_kernel(struct kvm_vcpu *vcpu)
-{
- return vcpu->arch.preempted_in_kernel;
-}
-
-bool kvm_arch_dy_runnable(struct kvm_vcpu *vcpu)
-{
- if (READ_ONCE(vcpu->arch.pv.pv_unhalted))
- return true;
-
- if (kvm_test_request(KVM_REQ_NMI, vcpu) ||
-#ifdef CONFIG_KVM_SMM
- kvm_test_request(KVM_REQ_SMI, vcpu) ||
-#endif
- kvm_test_request(KVM_REQ_EVENT, vcpu))
- return true;
-
- return kvm_arch_dy_has_pending_interrupt(vcpu);
-}
-
bool kvm_arch_vcpu_in_kernel(struct kvm_vcpu *vcpu)
{
if (vcpu->arch.guest_state_protected)
diff --git a/arch/x86/kvm/x86.h b/arch/x86/kvm/x86.h
index 50596f6f8320..a84c48ef5278 100644
--- a/arch/x86/kvm/x86.h
+++ b/arch/x86/kvm/x86.h
@@ -103,11 +103,18 @@ static inline unsigned int __shrink_ple_window(unsigned int val,
return max(val, min);
}
-#define MSR_IA32_CR_PAT_DEFAULT 0x0007040600070406ULL
+#define MSR_IA32_CR_PAT_DEFAULT \
+ PAT_VALUE(WB, WT, UC_MINUS, UC, WB, WT, UC_MINUS, UC)
void kvm_service_local_tlb_flush_requests(struct kvm_vcpu *vcpu);
int kvm_check_nested_events(struct kvm_vcpu *vcpu);
+/* Forcibly leave the nested mode in cases like a vCPU reset */
+static inline void kvm_leave_nested(struct kvm_vcpu *vcpu)
+{
+ kvm_x86_ops.nested_ops->leave_nested(vcpu);
+}
+
static inline bool kvm_vcpu_has_run(struct kvm_vcpu *vcpu)
{
return vcpu->arch.last_vmentry_cpu != -1;
@@ -334,6 +341,7 @@ int x86_decode_emulated_instruction(struct kvm_vcpu *vcpu, int emulation_type,
int x86_emulate_instruction(struct kvm_vcpu *vcpu, gpa_t cr2_or_gpa,
int emulation_type, void *insn, int insn_len);
fastpath_t handle_fastpath_set_msr_irqoff(struct kvm_vcpu *vcpu);
+fastpath_t handle_fastpath_hlt(struct kvm_vcpu *vcpu);
extern struct kvm_caps kvm_caps;
extern struct kvm_host_values kvm_host;
@@ -504,13 +512,26 @@ int kvm_handle_memory_failure(struct kvm_vcpu *vcpu, int r,
int kvm_handle_invpcid(struct kvm_vcpu *vcpu, unsigned long type, gva_t gva);
bool kvm_msr_allowed(struct kvm_vcpu *vcpu, u32 index, u32 type);
+enum kvm_msr_access {
+ MSR_TYPE_R = BIT(0),
+ MSR_TYPE_W = BIT(1),
+ MSR_TYPE_RW = MSR_TYPE_R | MSR_TYPE_W,
+};
+
/*
* Internal error codes that are used to indicate that MSR emulation encountered
- * an error that should result in #GP in the guest, unless userspace
- * handles it.
+ * an error that should result in #GP in the guest, unless userspace handles it.
+ * Note, '1', '0', and negative numbers are off limits, as they are used by KVM
+ * as part of KVM's lightly documented internal KVM_RUN return codes.
+ *
+ * UNSUPPORTED - The MSR isn't supported, either because it is completely
+ * unknown to KVM, or because the MSR should not exist according
+ * to the vCPU model.
+ *
+ * FILTERED - Access to the MSR is denied by a userspace MSR filter.
*/
-#define KVM_MSR_RET_INVALID 2 /* in-kernel MSR emulation #GP condition */
-#define KVM_MSR_RET_FILTERED 3 /* #GP due to userspace MSR filter */
+#define KVM_MSR_RET_UNSUPPORTED 2
+#define KVM_MSR_RET_FILTERED 3
#define __cr4_reserved_bits(__cpu_has, __c) \
({ \
diff --git a/arch/x86/lib/atomic64_cx8_32.S b/arch/x86/lib/atomic64_cx8_32.S
index 90afb488b396..b2eff07d65e4 100644
--- a/arch/x86/lib/atomic64_cx8_32.S
+++ b/arch/x86/lib/atomic64_cx8_32.S
@@ -16,6 +16,11 @@
cmpxchg8b (\reg)
.endm
+.macro read64_nonatomic reg
+ movl (\reg), %eax
+ movl 4(\reg), %edx
+.endm
+
SYM_FUNC_START(atomic64_read_cx8)
read64 %ecx
RET
@@ -51,7 +56,7 @@ SYM_FUNC_START(atomic64_\func\()_return_cx8)
movl %edx, %edi
movl %ecx, %ebp
- read64 %ecx
+ read64_nonatomic %ecx
1:
movl %eax, %ebx
movl %edx, %ecx
@@ -79,7 +84,7 @@ addsub_return sub sub sbb
SYM_FUNC_START(atomic64_\func\()_return_cx8)
pushl %ebx
- read64 %esi
+ read64_nonatomic %esi
1:
movl %eax, %ebx
movl %edx, %ecx
diff --git a/arch/x86/mm/Makefile b/arch/x86/mm/Makefile
index 8d3a00e5c528..690fbf48e853 100644
--- a/arch/x86/mm/Makefile
+++ b/arch/x86/mm/Makefile
@@ -57,7 +57,6 @@ obj-$(CONFIG_MMIOTRACE_TEST) += testmmiotrace.o
obj-$(CONFIG_NUMA) += numa.o numa_$(BITS).o
obj-$(CONFIG_AMD_NUMA) += amdtopology.o
obj-$(CONFIG_ACPI_NUMA) += srat.o
-obj-$(CONFIG_NUMA_EMU) += numa_emulation.o
obj-$(CONFIG_X86_INTEL_MEMORY_PROTECTION_KEYS) += pkeys.o
obj-$(CONFIG_RANDOMIZE_MEMORY) += kaslr.o
diff --git a/arch/x86/mm/amdtopology.c b/arch/x86/mm/amdtopology.c
index 9332b36a1091..628833afee37 100644
--- a/arch/x86/mm/amdtopology.c
+++ b/arch/x86/mm/amdtopology.c
@@ -12,6 +12,7 @@
#include <linux/string.h>
#include <linux/nodemask.h>
#include <linux/memblock.h>
+#include <linux/numa_memblks.h>
#include <asm/io.h>
#include <linux/pci_ids.h>
diff --git a/arch/x86/mm/numa.c b/arch/x86/mm/numa.c
index 6ce10e3c6228..64e5cdb2460a 100644
--- a/arch/x86/mm/numa.c
+++ b/arch/x86/mm/numa.c
@@ -13,6 +13,7 @@
#include <linux/sched.h>
#include <linux/topology.h>
#include <linux/sort.h>
+#include <linux/numa_memblks.h>
#include <asm/e820/api.h>
#include <asm/proto.h>
@@ -22,16 +23,6 @@
#include "numa_internal.h"
int numa_off;
-nodemask_t numa_nodes_parsed __initdata;
-
-struct pglist_data *node_data[MAX_NUMNODES] __read_mostly;
-EXPORT_SYMBOL(node_data);
-
-static struct numa_meminfo numa_meminfo __initdata_or_meminfo;
-static struct numa_meminfo numa_reserved_meminfo __initdata_or_meminfo;
-
-static int numa_distance_cnt;
-static u8 *numa_distance;
static __init int numa_setup(char *opt)
{
@@ -124,456 +115,27 @@ void __init setup_node_to_cpumask_map(void)
pr_debug("Node to cpumask map for %u nodes\n", nr_node_ids);
}
-static int __init numa_add_memblk_to(int nid, u64 start, u64 end,
- struct numa_meminfo *mi)
-{
- /* ignore zero length blks */
- if (start == end)
- return 0;
-
- /* whine about and ignore invalid blks */
- if (start > end || nid < 0 || nid >= MAX_NUMNODES) {
- pr_warn("Warning: invalid memblk node %d [mem %#010Lx-%#010Lx]\n",
- nid, start, end - 1);
- return 0;
- }
-
- if (mi->nr_blks >= NR_NODE_MEMBLKS) {
- pr_err("too many memblk ranges\n");
- return -EINVAL;
- }
-
- mi->blk[mi->nr_blks].start = start;
- mi->blk[mi->nr_blks].end = end;
- mi->blk[mi->nr_blks].nid = nid;
- mi->nr_blks++;
- return 0;
-}
-
-/**
- * numa_remove_memblk_from - Remove one numa_memblk from a numa_meminfo
- * @idx: Index of memblk to remove
- * @mi: numa_meminfo to remove memblk from
- *
- * Remove @idx'th numa_memblk from @mi by shifting @mi->blk[] and
- * decrementing @mi->nr_blks.
- */
-void __init numa_remove_memblk_from(int idx, struct numa_meminfo *mi)
-{
- mi->nr_blks--;
- memmove(&mi->blk[idx], &mi->blk[idx + 1],
- (mi->nr_blks - idx) * sizeof(mi->blk[0]));
-}
-
-/**
- * numa_move_tail_memblk - Move a numa_memblk from one numa_meminfo to another
- * @dst: numa_meminfo to append block to
- * @idx: Index of memblk to remove
- * @src: numa_meminfo to remove memblk from
- */
-static void __init numa_move_tail_memblk(struct numa_meminfo *dst, int idx,
- struct numa_meminfo *src)
-{
- dst->blk[dst->nr_blks++] = src->blk[idx];
- numa_remove_memblk_from(idx, src);
-}
-
-/**
- * numa_add_memblk - Add one numa_memblk to numa_meminfo
- * @nid: NUMA node ID of the new memblk
- * @start: Start address of the new memblk
- * @end: End address of the new memblk
- *
- * Add a new memblk to the default numa_meminfo.
- *
- * RETURNS:
- * 0 on success, -errno on failure.
- */
-int __init numa_add_memblk(int nid, u64 start, u64 end)
-{
- return numa_add_memblk_to(nid, start, end, &numa_meminfo);
-}
-
-/* Allocate NODE_DATA for a node on the local memory */
-static void __init alloc_node_data(int nid)
+static int __init numa_register_nodes(void)
{
- const size_t nd_size = roundup(sizeof(pg_data_t), PAGE_SIZE);
- u64 nd_pa;
- void *nd;
- int tnid;
-
- /*
- * Allocate node data. Try node-local memory and then any node.
- * Never allocate in DMA zone.
- */
- nd_pa = memblock_phys_alloc_try_nid(nd_size, SMP_CACHE_BYTES, nid);
- if (!nd_pa) {
- pr_err("Cannot find %zu bytes in any node (initial node: %d)\n",
- nd_size, nid);
- return;
- }
- nd = __va(nd_pa);
-
- /* report and initialize */
- printk(KERN_INFO "NODE_DATA(%d) allocated [mem %#010Lx-%#010Lx]\n", nid,
- nd_pa, nd_pa + nd_size - 1);
- tnid = early_pfn_to_nid(nd_pa >> PAGE_SHIFT);
- if (tnid != nid)
- printk(KERN_INFO " NODE_DATA(%d) on node %d\n", nid, tnid);
-
- node_data[nid] = nd;
- memset(NODE_DATA(nid), 0, sizeof(pg_data_t));
-
- node_set_online(nid);
-}
-
-/**
- * numa_cleanup_meminfo - Cleanup a numa_meminfo
- * @mi: numa_meminfo to clean up
- *
- * Sanitize @mi by merging and removing unnecessary memblks. Also check for
- * conflicts and clear unused memblks.
- *
- * RETURNS:
- * 0 on success, -errno on failure.
- */
-int __init numa_cleanup_meminfo(struct numa_meminfo *mi)
-{
- const u64 low = 0;
- const u64 high = PFN_PHYS(max_pfn);
- int i, j, k;
-
- /* first, trim all entries */
- for (i = 0; i < mi->nr_blks; i++) {
- struct numa_memblk *bi = &mi->blk[i];
-
- /* move / save reserved memory ranges */
- if (!memblock_overlaps_region(&memblock.memory,
- bi->start, bi->end - bi->start)) {
- numa_move_tail_memblk(&numa_reserved_meminfo, i--, mi);
- continue;
- }
-
- /* make sure all non-reserved blocks are inside the limits */
- bi->start = max(bi->start, low);
-
- /* preserve info for non-RAM areas above 'max_pfn': */
- if (bi->end > high) {
- numa_add_memblk_to(bi->nid, high, bi->end,
- &numa_reserved_meminfo);
- bi->end = high;
- }
-
- /* and there's no empty block */
- if (bi->start >= bi->end)
- numa_remove_memblk_from(i--, mi);
- }
-
- /* merge neighboring / overlapping entries */
- for (i = 0; i < mi->nr_blks; i++) {
- struct numa_memblk *bi = &mi->blk[i];
-
- for (j = i + 1; j < mi->nr_blks; j++) {
- struct numa_memblk *bj = &mi->blk[j];
- u64 start, end;
-
- /*
- * See whether there are overlapping blocks. Whine
- * about but allow overlaps of the same nid. They
- * will be merged below.
- */
- if (bi->end > bj->start && bi->start < bj->end) {
- if (bi->nid != bj->nid) {
- pr_err("node %d [mem %#010Lx-%#010Lx] overlaps with node %d [mem %#010Lx-%#010Lx]\n",
- bi->nid, bi->start, bi->end - 1,
- bj->nid, bj->start, bj->end - 1);
- return -EINVAL;
- }
- pr_warn("Warning: node %d [mem %#010Lx-%#010Lx] overlaps with itself [mem %#010Lx-%#010Lx]\n",
- bi->nid, bi->start, bi->end - 1,
- bj->start, bj->end - 1);
- }
-
- /*
- * Join together blocks on the same node, holes
- * between which don't overlap with memory on other
- * nodes.
- */
- if (bi->nid != bj->nid)
- continue;
- start = min(bi->start, bj->start);
- end = max(bi->end, bj->end);
- for (k = 0; k < mi->nr_blks; k++) {
- struct numa_memblk *bk = &mi->blk[k];
-
- if (bi->nid == bk->nid)
- continue;
- if (start < bk->end && end > bk->start)
- break;
- }
- if (k < mi->nr_blks)
- continue;
- printk(KERN_INFO "NUMA: Node %d [mem %#010Lx-%#010Lx] + [mem %#010Lx-%#010Lx] -> [mem %#010Lx-%#010Lx]\n",
- bi->nid, bi->start, bi->end - 1, bj->start,
- bj->end - 1, start, end - 1);
- bi->start = start;
- bi->end = end;
- numa_remove_memblk_from(j--, mi);
- }
- }
-
- /* clear unused ones */
- for (i = mi->nr_blks; i < ARRAY_SIZE(mi->blk); i++) {
- mi->blk[i].start = mi->blk[i].end = 0;
- mi->blk[i].nid = NUMA_NO_NODE;
- }
-
- return 0;
-}
-
-/*
- * Set nodes, which have memory in @mi, in *@nodemask.
- */
-static void __init numa_nodemask_from_meminfo(nodemask_t *nodemask,
- const struct numa_meminfo *mi)
-{
- int i;
-
- for (i = 0; i < ARRAY_SIZE(mi->blk); i++)
- if (mi->blk[i].start != mi->blk[i].end &&
- mi->blk[i].nid != NUMA_NO_NODE)
- node_set(mi->blk[i].nid, *nodemask);
-}
-
-/**
- * numa_reset_distance - Reset NUMA distance table
- *
- * The current table is freed. The next numa_set_distance() call will
- * create a new one.
- */
-void __init numa_reset_distance(void)
-{
- size_t size = numa_distance_cnt * numa_distance_cnt * sizeof(numa_distance[0]);
-
- /* numa_distance could be 1LU marking allocation failure, test cnt */
- if (numa_distance_cnt)
- memblock_free(numa_distance, size);
- numa_distance_cnt = 0;
- numa_distance = NULL; /* enable table creation */
-}
-
-static int __init numa_alloc_distance(void)
-{
- nodemask_t nodes_parsed;
- size_t size;
- int i, j, cnt = 0;
- u64 phys;
-
- /* size the new table and allocate it */
- nodes_parsed = numa_nodes_parsed;
- numa_nodemask_from_meminfo(&nodes_parsed, &numa_meminfo);
-
- for_each_node_mask(i, nodes_parsed)
- cnt = i;
- cnt++;
- size = cnt * cnt * sizeof(numa_distance[0]);
-
- phys = memblock_phys_alloc_range(size, PAGE_SIZE, 0,
- PFN_PHYS(max_pfn_mapped));
- if (!phys) {
- pr_warn("Warning: can't allocate distance table!\n");
- /* don't retry until explicitly reset */
- numa_distance = (void *)1LU;
- return -ENOMEM;
- }
-
- numa_distance = __va(phys);
- numa_distance_cnt = cnt;
-
- /* fill with the default distances */
- for (i = 0; i < cnt; i++)
- for (j = 0; j < cnt; j++)
- numa_distance[i * cnt + j] = i == j ?
- LOCAL_DISTANCE : REMOTE_DISTANCE;
- printk(KERN_DEBUG "NUMA: Initialized distance table, cnt=%d\n", cnt);
-
- return 0;
-}
-
-/**
- * numa_set_distance - Set NUMA distance from one NUMA to another
- * @from: the 'from' node to set distance
- * @to: the 'to' node to set distance
- * @distance: NUMA distance
- *
- * Set the distance from node @from to @to to @distance. If distance table
- * doesn't exist, one which is large enough to accommodate all the currently
- * known nodes will be created.
- *
- * If such table cannot be allocated, a warning is printed and further
- * calls are ignored until the distance table is reset with
- * numa_reset_distance().
- *
- * If @from or @to is higher than the highest known node or lower than zero
- * at the time of table creation or @distance doesn't make sense, the call
- * is ignored.
- * This is to allow simplification of specific NUMA config implementations.
- */
-void __init numa_set_distance(int from, int to, int distance)
-{
- if (!numa_distance && numa_alloc_distance() < 0)
- return;
-
- if (from >= numa_distance_cnt || to >= numa_distance_cnt ||
- from < 0 || to < 0) {
- pr_warn_once("Warning: node ids are out of bound, from=%d to=%d distance=%d\n",
- from, to, distance);
- return;
- }
-
- if ((u8)distance != distance ||
- (from == to && distance != LOCAL_DISTANCE)) {
- pr_warn_once("Warning: invalid distance parameter, from=%d to=%d distance=%d\n",
- from, to, distance);
- return;
- }
-
- numa_distance[from * numa_distance_cnt + to] = distance;
-}
-
-int __node_distance(int from, int to)
-{
- if (from >= numa_distance_cnt || to >= numa_distance_cnt)
- return from == to ? LOCAL_DISTANCE : REMOTE_DISTANCE;
- return numa_distance[from * numa_distance_cnt + to];
-}
-EXPORT_SYMBOL(__node_distance);
-
-/*
- * Mark all currently memblock-reserved physical memory (which covers the
- * kernel's own memory ranges) as hot-unswappable.
- */
-static void __init numa_clear_kernel_node_hotplug(void)
-{
- nodemask_t reserved_nodemask = NODE_MASK_NONE;
- struct memblock_region *mb_region;
- int i;
-
- /*
- * We have to do some preprocessing of memblock regions, to
- * make them suitable for reservation.
- *
- * At this time, all memory regions reserved by memblock are
- * used by the kernel, but those regions are not split up
- * along node boundaries yet, and don't necessarily have their
- * node ID set yet either.
- *
- * So iterate over all memory known to the x86 architecture,
- * and use those ranges to set the nid in memblock.reserved.
- * This will split up the memblock regions along node
- * boundaries and will set the node IDs as well.
- */
- for (i = 0; i < numa_meminfo.nr_blks; i++) {
- struct numa_memblk *mb = numa_meminfo.blk + i;
- int ret;
-
- ret = memblock_set_node(mb->start, mb->end - mb->start, &memblock.reserved, mb->nid);
- WARN_ON_ONCE(ret);
- }
-
- /*
- * Now go over all reserved memblock regions, to construct a
- * node mask of all kernel reserved memory areas.
- *
- * [ Note, when booting with mem=nn[kMG] or in a kdump kernel,
- * numa_meminfo might not include all memblock.reserved
- * memory ranges, because quirks such as trim_snb_memory()
- * reserve specific pages for Sandy Bridge graphics. ]
- */
- for_each_reserved_mem_region(mb_region) {
- int nid = memblock_get_region_node(mb_region);
-
- if (nid != NUMA_NO_NODE)
- node_set(nid, reserved_nodemask);
- }
-
- /*
- * Finally, clear the MEMBLOCK_HOTPLUG flag for all memory
- * belonging to the reserved node mask.
- *
- * Note that this will include memory regions that reside
- * on nodes that contain kernel memory - entire nodes
- * become hot-unpluggable:
- */
- for (i = 0; i < numa_meminfo.nr_blks; i++) {
- struct numa_memblk *mb = numa_meminfo.blk + i;
-
- if (!node_isset(mb->nid, reserved_nodemask))
- continue;
-
- memblock_clear_hotplug(mb->start, mb->end - mb->start);
- }
-}
-
-static int __init numa_register_memblks(struct numa_meminfo *mi)
-{
- int i, nid;
-
- /* Account for nodes with cpus and no memory */
- node_possible_map = numa_nodes_parsed;
- numa_nodemask_from_meminfo(&node_possible_map, mi);
- if (WARN_ON(nodes_empty(node_possible_map)))
- return -EINVAL;
-
- for (i = 0; i < mi->nr_blks; i++) {
- struct numa_memblk *mb = &mi->blk[i];
- memblock_set_node(mb->start, mb->end - mb->start,
- &memblock.memory, mb->nid);
- }
-
- /*
- * At very early time, the kernel have to use some memory such as
- * loading the kernel image. We cannot prevent this anyway. So any
- * node the kernel resides in should be un-hotpluggable.
- *
- * And when we come here, alloc node data won't fail.
- */
- numa_clear_kernel_node_hotplug();
-
- /*
- * If sections array is gonna be used for pfn -> nid mapping, check
- * whether its granularity is fine enough.
- */
- if (IS_ENABLED(NODE_NOT_IN_PAGE_FLAGS)) {
- unsigned long pfn_align = node_map_pfn_alignment();
-
- if (pfn_align && pfn_align < PAGES_PER_SECTION) {
- pr_warn("Node alignment %LuMB < min %LuMB, rejecting NUMA config\n",
- PFN_PHYS(pfn_align) >> 20,
- PFN_PHYS(PAGES_PER_SECTION) >> 20);
- return -EINVAL;
- }
- }
+ int nid;
if (!memblock_validate_numa_coverage(SZ_1M))
return -EINVAL;
/* Finally register nodes. */
for_each_node_mask(nid, node_possible_map) {
- u64 start = PFN_PHYS(max_pfn);
- u64 end = 0;
-
- for (i = 0; i < mi->nr_blks; i++) {
- if (nid != mi->blk[i].nid)
- continue;
- start = min(mi->blk[i].start, start);
- end = max(mi->blk[i].end, end);
- }
+ unsigned long start_pfn, end_pfn;
- if (start >= end)
+ /*
+ * Note, get_pfn_range_for_nid() depends on
+ * memblock_set_node() having already happened
+ */
+ get_pfn_range_for_nid(nid, &start_pfn, &end_pfn);
+ if (start_pfn >= end_pfn)
continue;
alloc_node_data(nid);
+ node_set_online(nid);
}
/* Dump memblock with node info and return. */
@@ -609,39 +171,11 @@ static int __init numa_init(int (*init_func)(void))
for (i = 0; i < MAX_LOCAL_APIC; i++)
set_apicid_to_node(i, NUMA_NO_NODE);
- nodes_clear(numa_nodes_parsed);
- nodes_clear(node_possible_map);
- nodes_clear(node_online_map);
- memset(&numa_meminfo, 0, sizeof(numa_meminfo));
- WARN_ON(memblock_set_node(0, ULLONG_MAX, &memblock.memory,
- NUMA_NO_NODE));
- WARN_ON(memblock_set_node(0, ULLONG_MAX, &memblock.reserved,
- NUMA_NO_NODE));
- /* In case that parsing SRAT failed. */
- WARN_ON(memblock_clear_hotplug(0, ULLONG_MAX));
- numa_reset_distance();
-
- ret = init_func();
- if (ret < 0)
- return ret;
-
- /*
- * We reset memblock back to the top-down direction
- * here because if we configured ACPI_NUMA, we have
- * parsed SRAT in init_func(). It is ok to have the
- * reset here even if we did't configure ACPI_NUMA
- * or acpi numa init fails and fallbacks to dummy
- * numa init.
- */
- memblock_set_bottom_up(false);
-
- ret = numa_cleanup_meminfo(&numa_meminfo);
+ ret = numa_memblks_init(init_func, /* memblock_force_top_down */ true);
if (ret < 0)
return ret;
- numa_emulation(&numa_meminfo, numa_distance_cnt);
-
- ret = numa_register_memblks(&numa_meminfo);
+ ret = numa_register_nodes();
if (ret < 0)
return ret;
@@ -782,12 +316,12 @@ void __init init_cpu_to_node(void)
#ifndef CONFIG_DEBUG_PER_CPU_MAPS
# ifndef CONFIG_NUMA_EMU
-void numa_add_cpu(int cpu)
+void numa_add_cpu(unsigned int cpu)
{
cpumask_set_cpu(cpu, node_to_cpumask_map[early_cpu_to_node(cpu)]);
}
-void numa_remove_cpu(int cpu)
+void numa_remove_cpu(unsigned int cpu)
{
cpumask_clear_cpu(cpu, node_to_cpumask_map[early_cpu_to_node(cpu)]);
}
@@ -825,7 +359,7 @@ int early_cpu_to_node(int cpu)
return per_cpu(x86_cpu_to_node_map, cpu);
}
-void debug_cpumask_set_cpu(int cpu, int node, bool enable)
+void debug_cpumask_set_cpu(unsigned int cpu, int node, bool enable)
{
struct cpumask *mask;
@@ -857,12 +391,12 @@ static void numa_set_cpumask(int cpu, bool enable)
debug_cpumask_set_cpu(cpu, early_cpu_to_node(cpu), enable);
}
-void numa_add_cpu(int cpu)
+void numa_add_cpu(unsigned int cpu)
{
numa_set_cpumask(cpu, true);
}
-void numa_remove_cpu(int cpu)
+void numa_remove_cpu(unsigned int cpu)
{
numa_set_cpumask(cpu, false);
}
@@ -893,113 +427,29 @@ EXPORT_SYMBOL(cpumask_of_node);
#endif /* !CONFIG_DEBUG_PER_CPU_MAPS */
-#ifdef CONFIG_NUMA_KEEP_MEMINFO
-static int meminfo_to_nid(struct numa_meminfo *mi, u64 start)
+#ifdef CONFIG_NUMA_EMU
+void __init numa_emu_update_cpu_to_node(int *emu_nid_to_phys,
+ unsigned int nr_emu_nids)
{
- int i;
-
- for (i = 0; i < mi->nr_blks; i++)
- if (mi->blk[i].start <= start && mi->blk[i].end > start)
- return mi->blk[i].nid;
- return NUMA_NO_NODE;
-}
-
-int phys_to_target_node(phys_addr_t start)
-{
- int nid = meminfo_to_nid(&numa_meminfo, start);
+ int i, j;
/*
- * Prefer online nodes, but if reserved memory might be
- * hot-added continue the search with reserved ranges.
+ * Transform __apicid_to_node table to use emulated nids by
+ * reverse-mapping phys_nid. The maps should always exist but fall
+ * back to zero just in case.
*/
- if (nid != NUMA_NO_NODE)
- return nid;
-
- return meminfo_to_nid(&numa_reserved_meminfo, start);
-}
-EXPORT_SYMBOL_GPL(phys_to_target_node);
-
-int memory_add_physaddr_to_nid(u64 start)
-{
- int nid = meminfo_to_nid(&numa_meminfo, start);
-
- if (nid == NUMA_NO_NODE)
- nid = numa_meminfo.blk[0].nid;
- return nid;
-}
-EXPORT_SYMBOL_GPL(memory_add_physaddr_to_nid);
-
-#endif
-
-static int __init cmp_memblk(const void *a, const void *b)
-{
- const struct numa_memblk *ma = *(const struct numa_memblk **)a;
- const struct numa_memblk *mb = *(const struct numa_memblk **)b;
-
- return (ma->start > mb->start) - (ma->start < mb->start);
+ for (i = 0; i < ARRAY_SIZE(__apicid_to_node); i++) {
+ if (__apicid_to_node[i] == NUMA_NO_NODE)
+ continue;
+ for (j = 0; j < nr_emu_nids; j++)
+ if (__apicid_to_node[i] == emu_nid_to_phys[j])
+ break;
+ __apicid_to_node[i] = j < nr_emu_nids ? j : 0;
+ }
}
-static struct numa_memblk *numa_memblk_list[NR_NODE_MEMBLKS] __initdata;
-
-/**
- * numa_fill_memblks - Fill gaps in numa_meminfo memblks
- * @start: address to begin fill
- * @end: address to end fill
- *
- * Find and extend numa_meminfo memblks to cover the physical
- * address range @start-@end
- *
- * RETURNS:
- * 0 : Success
- * NUMA_NO_MEMBLK : No memblks exist in address range @start-@end
- */
-
-int __init numa_fill_memblks(u64 start, u64 end)
+u64 __init numa_emu_dma_end(void)
{
- struct numa_memblk **blk = &numa_memblk_list[0];
- struct numa_meminfo *mi = &numa_meminfo;
- int count = 0;
- u64 prev_end;
-
- /*
- * Create a list of pointers to numa_meminfo memblks that
- * overlap start, end. The list is used to make in-place
- * changes that fill out the numa_meminfo memblks.
- */
- for (int i = 0; i < mi->nr_blks; i++) {
- struct numa_memblk *bi = &mi->blk[i];
-
- if (memblock_addrs_overlap(start, end - start, bi->start,
- bi->end - bi->start)) {
- blk[count] = &mi->blk[i];
- count++;
- }
- }
- if (!count)
- return NUMA_NO_MEMBLK;
-
- /* Sort the list of pointers in memblk->start order */
- sort(&blk[0], count, sizeof(blk[0]), cmp_memblk, NULL);
-
- /* Make sure the first/last memblks include start/end */
- blk[0]->start = min(blk[0]->start, start);
- blk[count - 1]->end = max(blk[count - 1]->end, end);
-
- /*
- * Fill any gaps by tracking the previous memblks
- * end address and backfilling to it if needed.
- */
- prev_end = blk[0]->end;
- for (int i = 1; i < count; i++) {
- struct numa_memblk *curr = blk[i];
-
- if (prev_end >= curr->start) {
- if (prev_end < curr->end)
- prev_end = curr->end;
- } else {
- curr->start = prev_end;
- prev_end = curr->end;
- }
- }
- return 0;
+ return PFN_PHYS(MAX_DMA32_PFN);
}
+#endif /* CONFIG_NUMA_EMU */
diff --git a/arch/x86/mm/numa_internal.h b/arch/x86/mm/numa_internal.h
index 86860f279662..11e1ff370c10 100644
--- a/arch/x86/mm/numa_internal.h
+++ b/arch/x86/mm/numa_internal.h
@@ -5,30 +5,6 @@
#include <linux/types.h>
#include <asm/numa.h>
-struct numa_memblk {
- u64 start;
- u64 end;
- int nid;
-};
-
-struct numa_meminfo {
- int nr_blks;
- struct numa_memblk blk[NR_NODE_MEMBLKS];
-};
-
-void __init numa_remove_memblk_from(int idx, struct numa_meminfo *mi);
-int __init numa_cleanup_meminfo(struct numa_meminfo *mi);
-void __init numa_reset_distance(void);
-
void __init x86_numa_init(void);
-#ifdef CONFIG_NUMA_EMU
-void __init numa_emulation(struct numa_meminfo *numa_meminfo,
- int numa_dist_cnt);
-#else
-static inline void numa_emulation(struct numa_meminfo *numa_meminfo,
- int numa_dist_cnt)
-{ }
-#endif
-
#endif /* __X86_MM_NUMA_INTERNAL_H */
diff --git a/arch/x86/mm/pat/memtype.c b/arch/x86/mm/pat/memtype.c
index bdc2a240c2aa..feb8cc6a12bf 100644
--- a/arch/x86/mm/pat/memtype.c
+++ b/arch/x86/mm/pat/memtype.c
@@ -104,7 +104,7 @@ __setup("debugpat", pat_debug_setup);
#ifdef CONFIG_X86_PAT
/*
- * X86 PAT uses page flags arch_1 and uncached together to keep track of
+ * X86 PAT uses page flags arch_1 and arch_2 together to keep track of
* memory type of pages that have backing page struct.
*
* X86 PAT supports 4 different memory types:
@@ -118,9 +118,9 @@ __setup("debugpat", pat_debug_setup);
#define _PGMT_WB 0
#define _PGMT_WC (1UL << PG_arch_1)
-#define _PGMT_UC_MINUS (1UL << PG_uncached)
-#define _PGMT_WT (1UL << PG_uncached | 1UL << PG_arch_1)
-#define _PGMT_MASK (1UL << PG_uncached | 1UL << PG_arch_1)
+#define _PGMT_UC_MINUS (1UL << PG_arch_2)
+#define _PGMT_WT (1UL << PG_arch_2 | 1UL << PG_arch_1)
+#define _PGMT_MASK (1UL << PG_arch_2 | 1UL << PG_arch_1)
#define _PGMT_CLEAR_MASK (~_PGMT_MASK)
static inline enum page_cache_mode get_page_memtype(struct page *pg)
@@ -176,15 +176,6 @@ static inline void set_page_memtype(struct page *pg,
}
#endif
-enum {
- PAT_UC = 0, /* uncached */
- PAT_WC = 1, /* Write combining */
- PAT_WT = 4, /* Write Through */
- PAT_WP = 5, /* Write Protected */
- PAT_WB = 6, /* Write Back (default) */
- PAT_UC_MINUS = 7, /* UC, but can be overridden by MTRR */
-};
-
#define CM(c) (_PAGE_CACHE_MODE_ ## c)
static enum page_cache_mode __init pat_get_cache_mode(unsigned int pat_val,
@@ -194,13 +185,13 @@ static enum page_cache_mode __init pat_get_cache_mode(unsigned int pat_val,
char *cache_mode;
switch (pat_val) {
- case PAT_UC: cache = CM(UC); cache_mode = "UC "; break;
- case PAT_WC: cache = CM(WC); cache_mode = "WC "; break;
- case PAT_WT: cache = CM(WT); cache_mode = "WT "; break;
- case PAT_WP: cache = CM(WP); cache_mode = "WP "; break;
- case PAT_WB: cache = CM(WB); cache_mode = "WB "; break;
- case PAT_UC_MINUS: cache = CM(UC_MINUS); cache_mode = "UC- "; break;
- default: cache = CM(WB); cache_mode = "WB "; break;
+ case X86_MEMTYPE_UC: cache = CM(UC); cache_mode = "UC "; break;
+ case X86_MEMTYPE_WC: cache = CM(WC); cache_mode = "WC "; break;
+ case X86_MEMTYPE_WT: cache = CM(WT); cache_mode = "WT "; break;
+ case X86_MEMTYPE_WP: cache = CM(WP); cache_mode = "WP "; break;
+ case X86_MEMTYPE_WB: cache = CM(WB); cache_mode = "WB "; break;
+ case X86_MEMTYPE_UC_MINUS: cache = CM(UC_MINUS); cache_mode = "UC- "; break;
+ default: cache = CM(WB); cache_mode = "WB "; break;
}
memcpy(msg, cache_mode, 4);
@@ -257,12 +248,6 @@ void pat_cpu_init(void)
void __init pat_bp_init(void)
{
struct cpuinfo_x86 *c = &boot_cpu_data;
-#define PAT(p0, p1, p2, p3, p4, p5, p6, p7) \
- (((u64)PAT_ ## p0) | ((u64)PAT_ ## p1 << 8) | \
- ((u64)PAT_ ## p2 << 16) | ((u64)PAT_ ## p3 << 24) | \
- ((u64)PAT_ ## p4 << 32) | ((u64)PAT_ ## p5 << 40) | \
- ((u64)PAT_ ## p6 << 48) | ((u64)PAT_ ## p7 << 56))
-
if (!IS_ENABLED(CONFIG_X86_PAT))
pr_info_once("x86/PAT: PAT support disabled because CONFIG_X86_PAT is disabled in the kernel.\n");
@@ -293,7 +278,7 @@ void __init pat_bp_init(void)
* NOTE: When WC or WP is used, it is redirected to UC- per
* the default setup in __cachemode2pte_tbl[].
*/
- pat_msr_val = PAT(WB, WT, UC_MINUS, UC, WB, WT, UC_MINUS, UC);
+ pat_msr_val = PAT_VALUE(WB, WT, UC_MINUS, UC, WB, WT, UC_MINUS, UC);
}
/*
@@ -328,7 +313,7 @@ void __init pat_bp_init(void)
* NOTE: When WT or WP is used, it is redirected to UC- per
* the default setup in __cachemode2pte_tbl[].
*/
- pat_msr_val = PAT(WB, WC, UC_MINUS, UC, WB, WC, UC_MINUS, UC);
+ pat_msr_val = PAT_VALUE(WB, WC, UC_MINUS, UC, WB, WC, UC_MINUS, UC);
} else {
/*
* Full PAT support. We put WT in slot 7 to improve
@@ -356,13 +341,12 @@ void __init pat_bp_init(void)
* The reserved slots are unused, but mapped to their
* corresponding types in the presence of PAT errata.
*/
- pat_msr_val = PAT(WB, WC, UC_MINUS, UC, WB, WP, UC_MINUS, WT);
+ pat_msr_val = PAT_VALUE(WB, WC, UC_MINUS, UC, WB, WP, UC_MINUS, WT);
}
memory_caching_control |= CACHE_PAT;
init_cache_modes(pat_msr_val);
-#undef PAT
}
static DEFINE_SPINLOCK(memtype_lock); /* protects memtype accesses */
@@ -951,23 +935,20 @@ static void free_pfn_range(u64 paddr, unsigned long size)
static int follow_phys(struct vm_area_struct *vma, unsigned long *prot,
resource_size_t *phys)
{
- pte_t *ptep, pte;
- spinlock_t *ptl;
+ struct follow_pfnmap_args args = { .vma = vma, .address = vma->vm_start };
- if (follow_pte(vma, vma->vm_start, &ptep, &ptl))
+ if (follow_pfnmap_start(&args))
return -EINVAL;
- pte = ptep_get(ptep);
-
/* Never return PFNs of anon folios in COW mappings. */
- if (vm_normal_folio(vma, vma->vm_start, pte)) {
- pte_unmap_unlock(ptep, ptl);
+ if (!args.special) {
+ follow_pfnmap_end(&args);
return -EINVAL;
}
- *prot = pgprot_val(pte_pgprot(pte));
- *phys = (resource_size_t)pte_pfn(pte) << PAGE_SHIFT;
- pte_unmap_unlock(ptep, ptl);
+ *prot = pgprot_val(args.pgprot);
+ *phys = (resource_size_t)args.pfn << PAGE_SHIFT;
+ follow_pfnmap_end(&args);
return 0;
}
diff --git a/arch/x86/mm/pgtable.c b/arch/x86/mm/pgtable.c
index f5931499c2d6..5745a354a241 100644
--- a/arch/x86/mm/pgtable.c
+++ b/arch/x86/mm/pgtable.c
@@ -641,6 +641,18 @@ pmd_t pmdp_invalidate_ad(struct vm_area_struct *vma, unsigned long address,
}
#endif
+#if defined(CONFIG_TRANSPARENT_HUGEPAGE) && \
+ defined(CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD)
+pud_t pudp_invalidate(struct vm_area_struct *vma, unsigned long address,
+ pud_t *pudp)
+{
+ VM_WARN_ON_ONCE(!pud_present(*pudp));
+ pud_t old = pudp_establish(vma, address, pudp, pud_mkinvalid(*pudp));
+ flush_pud_tlb_range(vma, address, address + HPAGE_PUD_SIZE);
+ return old;
+}
+#endif
+
/**
* reserve_top_address - reserves a hole in the top of kernel address space
* @reserve - size of hole to reserve
@@ -926,3 +938,9 @@ void arch_check_zapped_pmd(struct vm_area_struct *vma, pmd_t pmd)
VM_WARN_ON_ONCE(!(vma->vm_flags & VM_SHADOW_STACK) &&
pmd_shstk(pmd));
}
+
+void arch_check_zapped_pud(struct vm_area_struct *vma, pud_t pud)
+{
+ /* See note in arch_check_zapped_pte() */
+ VM_WARN_ON_ONCE(!(vma->vm_flags & VM_SHADOW_STACK) && pud_shstk(pud));
+}
diff --git a/arch/x86/mm/testmmiotrace.c b/arch/x86/mm/testmmiotrace.c
index bda73cb7a044..ae295659ca14 100644
--- a/arch/x86/mm/testmmiotrace.c
+++ b/arch/x86/mm/testmmiotrace.c
@@ -144,3 +144,4 @@ static void __exit cleanup(void)
module_init(init);
module_exit(cleanup);
MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("Test module for mmiotrace");
diff --git a/arch/x86/net/bpf_jit_comp.c b/arch/x86/net/bpf_jit_comp.c
index d25d81c8ecc0..06b080b61aa5 100644
--- a/arch/x86/net/bpf_jit_comp.c
+++ b/arch/x86/net/bpf_jit_comp.c
@@ -64,6 +64,56 @@ static bool is_imm8(int value)
return value <= 127 && value >= -128;
}
+/*
+ * Let us limit the positive offset to be <= 123.
+ * This is to ensure eventual jit convergence For the following patterns:
+ * ...
+ * pass4, final_proglen=4391:
+ * ...
+ * 20e: 48 85 ff test rdi,rdi
+ * 211: 74 7d je 0x290
+ * 213: 48 8b 77 00 mov rsi,QWORD PTR [rdi+0x0]
+ * ...
+ * 289: 48 85 ff test rdi,rdi
+ * 28c: 74 17 je 0x2a5
+ * 28e: e9 7f ff ff ff jmp 0x212
+ * 293: bf 03 00 00 00 mov edi,0x3
+ * Note that insn at 0x211 is 2-byte cond jump insn for offset 0x7d (-125)
+ * and insn at 0x28e is 5-byte jmp insn with offset -129.
+ *
+ * pass5, final_proglen=4392:
+ * ...
+ * 20e: 48 85 ff test rdi,rdi
+ * 211: 0f 84 80 00 00 00 je 0x297
+ * 217: 48 8b 77 00 mov rsi,QWORD PTR [rdi+0x0]
+ * ...
+ * 28d: 48 85 ff test rdi,rdi
+ * 290: 74 1a je 0x2ac
+ * 292: eb 84 jmp 0x218
+ * 294: bf 03 00 00 00 mov edi,0x3
+ * Note that insn at 0x211 is 6-byte cond jump insn now since its offset
+ * becomes 0x80 based on previous round (0x293 - 0x213 = 0x80).
+ * At the same time, insn at 0x292 is a 2-byte insn since its offset is
+ * -124.
+ *
+ * pass6 will repeat the same code as in pass4 and this will prevent
+ * eventual convergence.
+ *
+ * To fix this issue, we need to break je (2->6 bytes) <-> jmp (5->2 bytes)
+ * cycle in the above. In the above example je offset <= 0x7c should work.
+ *
+ * For other cases, je <-> je needs offset <= 0x7b to avoid no convergence
+ * issue. For jmp <-> je and jmp <-> jmp cases, jmp offset <= 0x7c should
+ * avoid no convergence issue.
+ *
+ * Overall, let us limit the positive offset for 8bit cond/uncond jmp insn
+ * to maximum 123 (0x7b). This way, the jit pass can eventually converge.
+ */
+static bool is_imm8_jmp_offset(int value)
+{
+ return value <= 123 && value >= -128;
+}
+
static bool is_simm32(s64 value)
{
return value == (s64)(s32)value;
@@ -273,7 +323,7 @@ struct jit_context {
/* Number of bytes emit_patch() needs to generate instructions */
#define X86_PATCH_SIZE 5
/* Number of bytes that will be skipped on tailcall */
-#define X86_TAIL_CALL_OFFSET (11 + ENDBR_INSN_SIZE)
+#define X86_TAIL_CALL_OFFSET (12 + ENDBR_INSN_SIZE)
static void push_r12(u8 **pprog)
{
@@ -403,6 +453,37 @@ static void emit_cfi(u8 **pprog, u32 hash)
*pprog = prog;
}
+static void emit_prologue_tail_call(u8 **pprog, bool is_subprog)
+{
+ u8 *prog = *pprog;
+
+ if (!is_subprog) {
+ /* cmp rax, MAX_TAIL_CALL_CNT */
+ EMIT4(0x48, 0x83, 0xF8, MAX_TAIL_CALL_CNT);
+ EMIT2(X86_JA, 6); /* ja 6 */
+ /* rax is tail_call_cnt if <= MAX_TAIL_CALL_CNT.
+ * case1: entry of main prog.
+ * case2: tail callee of main prog.
+ */
+ EMIT1(0x50); /* push rax */
+ /* Make rax as tail_call_cnt_ptr. */
+ EMIT3(0x48, 0x89, 0xE0); /* mov rax, rsp */
+ EMIT2(0xEB, 1); /* jmp 1 */
+ /* rax is tail_call_cnt_ptr if > MAX_TAIL_CALL_CNT.
+ * case: tail callee of subprog.
+ */
+ EMIT1(0x50); /* push rax */
+ /* push tail_call_cnt_ptr */
+ EMIT1(0x50); /* push rax */
+ } else { /* is_subprog */
+ /* rax is tail_call_cnt_ptr. */
+ EMIT1(0x50); /* push rax */
+ EMIT1(0x50); /* push rax */
+ }
+
+ *pprog = prog;
+}
+
/*
* Emit x86-64 prologue code for BPF program.
* bpf_tail_call helper will skip the first X86_TAIL_CALL_OFFSET bytes
@@ -424,10 +505,10 @@ static void emit_prologue(u8 **pprog, u32 stack_depth, bool ebpf_from_cbpf,
/* When it's the entry of the whole tailcall context,
* zeroing rax means initialising tail_call_cnt.
*/
- EMIT2(0x31, 0xC0); /* xor eax, eax */
+ EMIT3(0x48, 0x31, 0xC0); /* xor rax, rax */
else
/* Keep the same instruction layout. */
- EMIT2(0x66, 0x90); /* nop2 */
+ emit_nops(&prog, 3); /* nop3 */
}
/* Exception callback receives FP as third parameter */
if (is_exception_cb) {
@@ -453,7 +534,7 @@ static void emit_prologue(u8 **pprog, u32 stack_depth, bool ebpf_from_cbpf,
if (stack_depth)
EMIT3_off32(0x48, 0x81, 0xEC, round_up(stack_depth, 8));
if (tail_call_reachable)
- EMIT1(0x50); /* push rax */
+ emit_prologue_tail_call(&prog, is_subprog);
*pprog = prog;
}
@@ -589,13 +670,15 @@ static void emit_return(u8 **pprog, u8 *ip)
*pprog = prog;
}
+#define BPF_TAIL_CALL_CNT_PTR_STACK_OFF(stack) (-16 - round_up(stack, 8))
+
/*
* Generate the following code:
*
* ... bpf_tail_call(void *ctx, struct bpf_array *array, u64 index) ...
* if (index >= array->map.max_entries)
* goto out;
- * if (tail_call_cnt++ >= MAX_TAIL_CALL_CNT)
+ * if ((*tcc_ptr)++ >= MAX_TAIL_CALL_CNT)
* goto out;
* prog = array->ptrs[index];
* if (prog == NULL)
@@ -608,7 +691,7 @@ static void emit_bpf_tail_call_indirect(struct bpf_prog *bpf_prog,
u32 stack_depth, u8 *ip,
struct jit_context *ctx)
{
- int tcc_off = -4 - round_up(stack_depth, 8);
+ int tcc_ptr_off = BPF_TAIL_CALL_CNT_PTR_STACK_OFF(stack_depth);
u8 *prog = *pprog, *start = *pprog;
int offset;
@@ -630,16 +713,14 @@ static void emit_bpf_tail_call_indirect(struct bpf_prog *bpf_prog,
EMIT2(X86_JBE, offset); /* jbe out */
/*
- * if (tail_call_cnt++ >= MAX_TAIL_CALL_CNT)
+ * if ((*tcc_ptr)++ >= MAX_TAIL_CALL_CNT)
* goto out;
*/
- EMIT2_off32(0x8B, 0x85, tcc_off); /* mov eax, dword ptr [rbp - tcc_off] */
- EMIT3(0x83, 0xF8, MAX_TAIL_CALL_CNT); /* cmp eax, MAX_TAIL_CALL_CNT */
+ EMIT3_off32(0x48, 0x8B, 0x85, tcc_ptr_off); /* mov rax, qword ptr [rbp - tcc_ptr_off] */
+ EMIT4(0x48, 0x83, 0x38, MAX_TAIL_CALL_CNT); /* cmp qword ptr [rax], MAX_TAIL_CALL_CNT */
offset = ctx->tail_call_indirect_label - (prog + 2 - start);
EMIT2(X86_JAE, offset); /* jae out */
- EMIT3(0x83, 0xC0, 0x01); /* add eax, 1 */
- EMIT2_off32(0x89, 0x85, tcc_off); /* mov dword ptr [rbp - tcc_off], eax */
/* prog = array->ptrs[index]; */
EMIT4_off32(0x48, 0x8B, 0x8C, 0xD6, /* mov rcx, [rsi + rdx * 8 + offsetof(...)] */
@@ -654,6 +735,9 @@ static void emit_bpf_tail_call_indirect(struct bpf_prog *bpf_prog,
offset = ctx->tail_call_indirect_label - (prog + 2 - start);
EMIT2(X86_JE, offset); /* je out */
+ /* Inc tail_call_cnt if the slot is populated. */
+ EMIT4(0x48, 0x83, 0x00, 0x01); /* add qword ptr [rax], 1 */
+
if (bpf_prog->aux->exception_boundary) {
pop_callee_regs(&prog, all_callee_regs_used);
pop_r12(&prog);
@@ -663,6 +747,11 @@ static void emit_bpf_tail_call_indirect(struct bpf_prog *bpf_prog,
pop_r12(&prog);
}
+ /* Pop tail_call_cnt_ptr. */
+ EMIT1(0x58); /* pop rax */
+ /* Pop tail_call_cnt, if it's main prog.
+ * Pop tail_call_cnt_ptr, if it's subprog.
+ */
EMIT1(0x58); /* pop rax */
if (stack_depth)
EMIT3_off32(0x48, 0x81, 0xC4, /* add rsp, sd */
@@ -691,21 +780,19 @@ static void emit_bpf_tail_call_direct(struct bpf_prog *bpf_prog,
bool *callee_regs_used, u32 stack_depth,
struct jit_context *ctx)
{
- int tcc_off = -4 - round_up(stack_depth, 8);
+ int tcc_ptr_off = BPF_TAIL_CALL_CNT_PTR_STACK_OFF(stack_depth);
u8 *prog = *pprog, *start = *pprog;
int offset;
/*
- * if (tail_call_cnt++ >= MAX_TAIL_CALL_CNT)
+ * if ((*tcc_ptr)++ >= MAX_TAIL_CALL_CNT)
* goto out;
*/
- EMIT2_off32(0x8B, 0x85, tcc_off); /* mov eax, dword ptr [rbp - tcc_off] */
- EMIT3(0x83, 0xF8, MAX_TAIL_CALL_CNT); /* cmp eax, MAX_TAIL_CALL_CNT */
+ EMIT3_off32(0x48, 0x8B, 0x85, tcc_ptr_off); /* mov rax, qword ptr [rbp - tcc_ptr_off] */
+ EMIT4(0x48, 0x83, 0x38, MAX_TAIL_CALL_CNT); /* cmp qword ptr [rax], MAX_TAIL_CALL_CNT */
offset = ctx->tail_call_direct_label - (prog + 2 - start);
EMIT2(X86_JAE, offset); /* jae out */
- EMIT3(0x83, 0xC0, 0x01); /* add eax, 1 */
- EMIT2_off32(0x89, 0x85, tcc_off); /* mov dword ptr [rbp - tcc_off], eax */
poke->tailcall_bypass = ip + (prog - start);
poke->adj_off = X86_TAIL_CALL_OFFSET;
@@ -715,6 +802,9 @@ static void emit_bpf_tail_call_direct(struct bpf_prog *bpf_prog,
emit_jump(&prog, (u8 *)poke->tailcall_target + X86_PATCH_SIZE,
poke->tailcall_bypass);
+ /* Inc tail_call_cnt if the slot is populated. */
+ EMIT4(0x48, 0x83, 0x00, 0x01); /* add qword ptr [rax], 1 */
+
if (bpf_prog->aux->exception_boundary) {
pop_callee_regs(&prog, all_callee_regs_used);
pop_r12(&prog);
@@ -724,6 +814,11 @@ static void emit_bpf_tail_call_direct(struct bpf_prog *bpf_prog,
pop_r12(&prog);
}
+ /* Pop tail_call_cnt_ptr. */
+ EMIT1(0x58); /* pop rax */
+ /* Pop tail_call_cnt, if it's main prog.
+ * Pop tail_call_cnt_ptr, if it's subprog.
+ */
EMIT1(0x58); /* pop rax */
if (stack_depth)
EMIT3_off32(0x48, 0x81, 0xC4, round_up(stack_depth, 8));
@@ -1311,9 +1406,11 @@ static void emit_shiftx(u8 **pprog, u32 dst_reg, u8 src_reg, bool is64, u8 op)
#define INSN_SZ_DIFF (((addrs[i] - addrs[i - 1]) - (prog - temp)))
-/* mov rax, qword ptr [rbp - rounded_stack_depth - 8] */
-#define RESTORE_TAIL_CALL_CNT(stack) \
- EMIT3_off32(0x48, 0x8B, 0x85, -round_up(stack, 8) - 8)
+#define __LOAD_TCC_PTR(off) \
+ EMIT3_off32(0x48, 0x8B, 0x85, off)
+/* mov rax, qword ptr [rbp - rounded_stack_depth - 16] */
+#define LOAD_TAIL_CALL_CNT_PTR(stack) \
+ __LOAD_TCC_PTR(BPF_TAIL_CALL_CNT_PTR_STACK_OFF(stack))
static int do_jit(struct bpf_prog *bpf_prog, int *addrs, u8 *image, u8 *rw_image,
int oldproglen, struct jit_context *ctx, bool jmp_padding)
@@ -2031,7 +2128,7 @@ populate_extable:
func = (u8 *) __bpf_call_base + imm32;
if (tail_call_reachable) {
- RESTORE_TAIL_CALL_CNT(bpf_prog->aux->stack_depth);
+ LOAD_TAIL_CALL_CNT_PTR(bpf_prog->aux->stack_depth);
ip += 7;
}
if (!imm32)
@@ -2184,7 +2281,7 @@ emit_cond_jmp: /* Convert BPF opcode to x86 */
return -EFAULT;
}
jmp_offset = addrs[i + insn->off] - addrs[i];
- if (is_imm8(jmp_offset)) {
+ if (is_imm8_jmp_offset(jmp_offset)) {
if (jmp_padding) {
/* To keep the jmp_offset valid, the extra bytes are
* padded before the jump insn, so we subtract the
@@ -2266,7 +2363,7 @@ emit_cond_jmp: /* Convert BPF opcode to x86 */
break;
}
emit_jmp:
- if (is_imm8(jmp_offset)) {
+ if (is_imm8_jmp_offset(jmp_offset)) {
if (jmp_padding) {
/* To avoid breaking jmp_offset, the extra bytes
* are padded before the actual jmp insn, so
@@ -2706,6 +2803,10 @@ static int invoke_bpf_mod_ret(const struct btf_func_model *m, u8 **pprog,
return 0;
}
+/* mov rax, qword ptr [rbp - rounded_stack_depth - 8] */
+#define LOAD_TRAMP_TAIL_CALL_CNT_PTR(stack) \
+ __LOAD_TCC_PTR(-round_up(stack, 8) - 8)
+
/* Example:
* __be16 eth_type_trans(struct sk_buff *skb, struct net_device *dev);
* its 'struct btf_func_model' will be nr_args=2
@@ -2826,7 +2927,7 @@ static int __arch_prepare_bpf_trampoline(struct bpf_tramp_image *im, void *rw_im
* [ ... ]
* [ stack_arg2 ]
* RBP - arg_stack_off [ stack_arg1 ]
- * RSP [ tail_call_cnt ] BPF_TRAMP_F_TAIL_CALL_CTX
+ * RSP [ tail_call_cnt_ptr ] BPF_TRAMP_F_TAIL_CALL_CTX
*/
/* room for return value of orig_call or fentry prog */
@@ -2955,10 +3056,10 @@ static int __arch_prepare_bpf_trampoline(struct bpf_tramp_image *im, void *rw_im
save_args(m, &prog, arg_stack_off, true);
if (flags & BPF_TRAMP_F_TAIL_CALL_CTX) {
- /* Before calling the original function, restore the
- * tail_call_cnt from stack to rax.
+ /* Before calling the original function, load the
+ * tail_call_cnt_ptr from stack to rax.
*/
- RESTORE_TAIL_CALL_CNT(stack_size);
+ LOAD_TRAMP_TAIL_CALL_CNT_PTR(stack_size);
}
if (flags & BPF_TRAMP_F_ORIG_STACK) {
@@ -3017,10 +3118,10 @@ static int __arch_prepare_bpf_trampoline(struct bpf_tramp_image *im, void *rw_im
goto cleanup;
}
} else if (flags & BPF_TRAMP_F_TAIL_CALL_CTX) {
- /* Before running the original function, restore the
- * tail_call_cnt from stack to rax.
+ /* Before running the original function, load the
+ * tail_call_cnt_ptr from stack to rax.
*/
- RESTORE_TAIL_CALL_CNT(stack_size);
+ LOAD_TRAMP_TAIL_CALL_CNT_PTR(stack_size);
}
/* restore return value of orig_call or fentry prog back into RAX */
diff --git a/arch/x86/pci/fixup.c b/arch/x86/pci/fixup.c
index b33afb240601..98a9bb92d75c 100644
--- a/arch/x86/pci/fixup.c
+++ b/arch/x86/pci/fixup.c
@@ -980,7 +980,7 @@ static void amd_rp_pme_suspend(struct pci_dev *dev)
return;
rp = pcie_find_root_port(dev);
- if (!rp->pm_cap)
+ if (!rp || !rp->pm_cap)
return;
rp->pme_support &= ~((PCI_PM_CAP_PME_D3hot|PCI_PM_CAP_PME_D3cold) >>
@@ -994,7 +994,7 @@ static void amd_rp_pme_resume(struct pci_dev *dev)
u16 pmc;
rp = pcie_find_root_port(dev);
- if (!rp->pm_cap)
+ if (!rp || !rp->pm_cap)
return;
pci_read_config_word(rp, rp->pm_cap + PCI_PM_PMC, &pmc);
diff --git a/arch/x86/platform/pvh/head.S b/arch/x86/platform/pvh/head.S
index f7235ef87bc3..64fca49cd88f 100644
--- a/arch/x86/platform/pvh/head.S
+++ b/arch/x86/platform/pvh/head.S
@@ -7,6 +7,7 @@
.code32
.text
#define _pa(x) ((x) - __START_KERNEL_map)
+#define rva(x) ((x) - pvh_start_xen)
#include <linux/elfnote.h>
#include <linux/init.h>
@@ -15,6 +16,7 @@
#include <asm/segment.h>
#include <asm/asm.h>
#include <asm/boot.h>
+#include <asm/pgtable.h>
#include <asm/processor-flags.h>
#include <asm/msr.h>
#include <asm/nospec-branch.h>
@@ -54,7 +56,25 @@ SYM_CODE_START_LOCAL(pvh_start_xen)
UNWIND_HINT_END_OF_STACK
cld
- lgdt (_pa(gdt))
+ /*
+ * See the comment for startup_32 for more details. We need to
+ * execute a call to get the execution address to be position
+ * independent, but we don't have a stack. Save and restore the
+ * magic field of start_info in ebx, and use that as the stack.
+ */
+ mov (%ebx), %eax
+ leal 4(%ebx), %esp
+ ANNOTATE_INTRA_FUNCTION_CALL
+ call 1f
+1: popl %ebp
+ mov %eax, (%ebx)
+ subl $rva(1b), %ebp
+ movl $0, %esp
+
+ leal rva(gdt)(%ebp), %eax
+ leal rva(gdt_start)(%ebp), %ecx
+ movl %ecx, 2(%eax)
+ lgdt (%eax)
mov $PVH_DS_SEL,%eax
mov %eax,%ds
@@ -62,14 +82,14 @@ SYM_CODE_START_LOCAL(pvh_start_xen)
mov %eax,%ss
/* Stash hvm_start_info. */
- mov $_pa(pvh_start_info), %edi
+ leal rva(pvh_start_info)(%ebp), %edi
mov %ebx, %esi
- mov _pa(pvh_start_info_sz), %ecx
+ movl rva(pvh_start_info_sz)(%ebp), %ecx
shr $2,%ecx
rep
movsl
- mov $_pa(early_stack_end), %esp
+ leal rva(early_stack_end)(%ebp), %esp
/* Enable PAE mode. */
mov %cr4, %eax
@@ -83,31 +103,86 @@ SYM_CODE_START_LOCAL(pvh_start_xen)
btsl $_EFER_LME, %eax
wrmsr
+ mov %ebp, %ebx
+ subl $_pa(pvh_start_xen), %ebx /* offset */
+ jz .Lpagetable_done
+
+ /* Fixup page-tables for relocation. */
+ leal rva(pvh_init_top_pgt)(%ebp), %edi
+ movl $PTRS_PER_PGD, %ecx
+2:
+ testl $_PAGE_PRESENT, 0x00(%edi)
+ jz 1f
+ addl %ebx, 0x00(%edi)
+1:
+ addl $8, %edi
+ decl %ecx
+ jnz 2b
+
+ /* L3 ident has a single entry. */
+ leal rva(pvh_level3_ident_pgt)(%ebp), %edi
+ addl %ebx, 0x00(%edi)
+
+ leal rva(pvh_level3_kernel_pgt)(%ebp), %edi
+ addl %ebx, (PAGE_SIZE - 16)(%edi)
+ addl %ebx, (PAGE_SIZE - 8)(%edi)
+
+ /* pvh_level2_ident_pgt is fine - large pages */
+
+ /* pvh_level2_kernel_pgt needs adjustment - large pages */
+ leal rva(pvh_level2_kernel_pgt)(%ebp), %edi
+ movl $PTRS_PER_PMD, %ecx
+2:
+ testl $_PAGE_PRESENT, 0x00(%edi)
+ jz 1f
+ addl %ebx, 0x00(%edi)
+1:
+ addl $8, %edi
+ decl %ecx
+ jnz 2b
+
+.Lpagetable_done:
/* Enable pre-constructed page tables. */
- mov $_pa(init_top_pgt), %eax
+ leal rva(pvh_init_top_pgt)(%ebp), %eax
mov %eax, %cr3
mov $(X86_CR0_PG | X86_CR0_PE), %eax
mov %eax, %cr0
/* Jump to 64-bit mode. */
- ljmp $PVH_CS_SEL, $_pa(1f)
+ pushl $PVH_CS_SEL
+ leal rva(1f)(%ebp), %eax
+ pushl %eax
+ lretl
/* 64-bit entry point. */
.code64
1:
+ UNWIND_HINT_END_OF_STACK
+
/* Set base address in stack canary descriptor. */
mov $MSR_GS_BASE,%ecx
- mov $_pa(canary), %eax
+ leal canary(%rip), %eax
xor %edx, %edx
wrmsr
+ /*
+ * Calculate load offset and store in phys_base. __pa() needs
+ * phys_base set to calculate the hypercall page in xen_pvh_init().
+ */
+ movq %rbp, %rbx
+ subq $_pa(pvh_start_xen), %rbx
+ movq %rbx, phys_base(%rip)
call xen_prepare_pvh
+ /*
+ * Clear phys_base. __startup_64 will *add* to its value,
+ * so reset to 0.
+ */
+ xor %rbx, %rbx
+ movq %rbx, phys_base(%rip)
/* startup_64 expects boot_params in %rsi. */
- mov $_pa(pvh_bootparams), %rsi
- mov $_pa(startup_64), %rax
- ANNOTATE_RETPOLINE_SAFE
- jmp *%rax
+ lea pvh_bootparams(%rip), %rsi
+ jmp startup_64
#else /* CONFIG_X86_64 */
@@ -143,7 +218,7 @@ SYM_CODE_END(pvh_start_xen)
.balign 8
SYM_DATA_START_LOCAL(gdt)
.word gdt_end - gdt_start
- .long _pa(gdt_start)
+ .long _pa(gdt_start) /* x86-64 will overwrite if relocated. */
.word 0
SYM_DATA_END(gdt)
SYM_DATA_START_LOCAL(gdt_start)
@@ -163,5 +238,67 @@ SYM_DATA_START_LOCAL(early_stack)
.fill BOOT_STACK_SIZE, 1, 0
SYM_DATA_END_LABEL(early_stack, SYM_L_LOCAL, early_stack_end)
+#ifdef CONFIG_X86_64
+/*
+ * Xen PVH needs a set of identity mapped and kernel high mapping
+ * page tables. pvh_start_xen starts running on the identity mapped
+ * page tables, but xen_prepare_pvh calls into the high mapping.
+ * These page tables need to be relocatable and are only used until
+ * startup_64 transitions to init_top_pgt.
+ */
+SYM_DATA_START_PAGE_ALIGNED(pvh_init_top_pgt)
+ .quad pvh_level3_ident_pgt - __START_KERNEL_map + _KERNPG_TABLE_NOENC
+ .org pvh_init_top_pgt + L4_PAGE_OFFSET * 8, 0
+ .quad pvh_level3_ident_pgt - __START_KERNEL_map + _KERNPG_TABLE_NOENC
+ .org pvh_init_top_pgt + L4_START_KERNEL * 8, 0
+ /* (2^48-(2*1024*1024*1024))/(2^39) = 511 */
+ .quad pvh_level3_kernel_pgt - __START_KERNEL_map + _PAGE_TABLE_NOENC
+SYM_DATA_END(pvh_init_top_pgt)
+
+SYM_DATA_START_PAGE_ALIGNED(pvh_level3_ident_pgt)
+ .quad pvh_level2_ident_pgt - __START_KERNEL_map + _KERNPG_TABLE_NOENC
+ .fill 511, 8, 0
+SYM_DATA_END(pvh_level3_ident_pgt)
+SYM_DATA_START_PAGE_ALIGNED(pvh_level2_ident_pgt)
+ /*
+ * Since I easily can, map the first 1G.
+ * Don't set NX because code runs from these pages.
+ *
+ * Note: This sets _PAGE_GLOBAL despite whether
+ * the CPU supports it or it is enabled. But,
+ * the CPU should ignore the bit.
+ */
+ PMDS(0, __PAGE_KERNEL_IDENT_LARGE_EXEC, PTRS_PER_PMD)
+SYM_DATA_END(pvh_level2_ident_pgt)
+SYM_DATA_START_PAGE_ALIGNED(pvh_level3_kernel_pgt)
+ .fill L3_START_KERNEL, 8, 0
+ /* (2^48-(2*1024*1024*1024)-((2^39)*511))/(2^30) = 510 */
+ .quad pvh_level2_kernel_pgt - __START_KERNEL_map + _KERNPG_TABLE_NOENC
+ .quad 0 /* no fixmap */
+SYM_DATA_END(pvh_level3_kernel_pgt)
+
+SYM_DATA_START_PAGE_ALIGNED(pvh_level2_kernel_pgt)
+ /*
+ * Kernel high mapping.
+ *
+ * The kernel code+data+bss must be located below KERNEL_IMAGE_SIZE in
+ * virtual address space, which is 1 GiB if RANDOMIZE_BASE is enabled,
+ * 512 MiB otherwise.
+ *
+ * (NOTE: after that starts the module area, see MODULES_VADDR.)
+ *
+ * This table is eventually used by the kernel during normal runtime.
+ * Care must be taken to clear out undesired bits later, like _PAGE_RW
+ * or _PAGE_GLOBAL in some cases.
+ */
+ PMDS(0, __PAGE_KERNEL_LARGE_EXEC, KERNEL_IMAGE_SIZE / PMD_SIZE)
+SYM_DATA_END(pvh_level2_kernel_pgt)
+
+ ELFNOTE(Xen, XEN_ELFNOTE_PHYS32_RELOC,
+ .long CONFIG_PHYSICAL_ALIGN;
+ .long LOAD_PHYSICAL_ADDR;
+ .long KERNEL_IMAGE_SIZE - 1)
+#endif
+
ELFNOTE(Xen, XEN_ELFNOTE_PHYS32_ENTRY,
_ASM_PTR (pvh_start_xen - __START_KERNEL_map))
diff --git a/arch/x86/um/sysrq_32.c b/arch/x86/um/sysrq_32.c
index f2383484840d..a1ee415c008d 100644
--- a/arch/x86/um/sysrq_32.c
+++ b/arch/x86/um/sysrq_32.c
@@ -9,7 +9,6 @@
#include <linux/sched/debug.h>
#include <linux/kallsyms.h>
#include <asm/ptrace.h>
-#include <asm/sysrq.h>
/* This is declared by <linux/sched.h> */
void show_regs(struct pt_regs *regs)
diff --git a/arch/x86/um/sysrq_64.c b/arch/x86/um/sysrq_64.c
index 0bf6de40abff..340d8a243c8a 100644
--- a/arch/x86/um/sysrq_64.c
+++ b/arch/x86/um/sysrq_64.c
@@ -12,7 +12,6 @@
#include <linux/utsname.h>
#include <asm/current.h>
#include <asm/ptrace.h>
-#include <asm/sysrq.h>
void show_regs(struct pt_regs *regs)
{
diff --git a/arch/x86/um/vdso/vma.c b/arch/x86/um/vdso/vma.c
index 76d9f6ce7a3d..f238f7b33cdd 100644
--- a/arch/x86/um/vdso/vma.c
+++ b/arch/x86/um/vdso/vma.c
@@ -52,8 +52,11 @@ subsys_initcall(init_vdso);
int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
{
- int err;
+ struct vm_area_struct *vma;
struct mm_struct *mm = current->mm;
+ static struct vm_special_mapping vdso_mapping = {
+ .name = "[vdso]",
+ };
if (!vdso_enabled)
return 0;
@@ -61,12 +64,13 @@ int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
if (mmap_write_lock_killable(mm))
return -EINTR;
- err = install_special_mapping(mm, um_vdso_addr, PAGE_SIZE,
+ vdso_mapping.pages = vdsop;
+ vma = _install_special_mapping(mm, um_vdso_addr, PAGE_SIZE,
VM_READ|VM_EXEC|
VM_MAYREAD|VM_MAYWRITE|VM_MAYEXEC,
- vdsop);
+ &vdso_mapping);
mmap_write_unlock(mm);
- return err;
+ return IS_ERR(vma) ? PTR_ERR(vma) : 0;
}
diff --git a/arch/x86/xen/enlighten_pvh.c b/arch/x86/xen/enlighten_pvh.c
index 728a4366ca85..bf68c329fc01 100644
--- a/arch/x86/xen/enlighten_pvh.c
+++ b/arch/x86/xen/enlighten_pvh.c
@@ -4,6 +4,7 @@
#include <linux/mm.h>
#include <xen/hvc-console.h>
+#include <xen/acpi.h>
#include <asm/bootparam.h>
#include <asm/io_apic.h>
@@ -28,6 +29,28 @@
bool __ro_after_init xen_pvh;
EXPORT_SYMBOL_GPL(xen_pvh);
+#ifdef CONFIG_XEN_DOM0
+int xen_pvh_setup_gsi(int gsi, int trigger, int polarity)
+{
+ int ret;
+ struct physdev_setup_gsi setup_gsi;
+
+ setup_gsi.gsi = gsi;
+ setup_gsi.triggering = (trigger == ACPI_EDGE_SENSITIVE ? 0 : 1);
+ setup_gsi.polarity = (polarity == ACPI_ACTIVE_HIGH ? 0 : 1);
+
+ ret = HYPERVISOR_physdev_op(PHYSDEVOP_setup_gsi, &setup_gsi);
+ if (ret == -EEXIST) {
+ xen_raw_printk("Already setup the GSI :%d\n", gsi);
+ ret = 0;
+ } else if (ret)
+ xen_raw_printk("Fail to setup GSI (%d)!\n", gsi);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(xen_pvh_setup_gsi);
+#endif
+
/*
* Reserve e820 UNUSABLE regions to inflate the memory balloon.
*
diff --git a/arch/x86/xen/mmu_pv.c b/arch/x86/xen/mmu_pv.c
index 839e6613753d..55a4996d0c04 100644
--- a/arch/x86/xen/mmu_pv.c
+++ b/arch/x86/xen/mmu_pv.c
@@ -665,7 +665,7 @@ static spinlock_t *xen_pte_lock(struct page *page, struct mm_struct *mm)
{
spinlock_t *ptl = NULL;
-#if USE_SPLIT_PTE_PTLOCKS
+#if defined(CONFIG_SPLIT_PTE_PTLOCKS)
ptl = ptlock_ptr(page_ptdesc(page));
spin_lock_nest_lock(ptl, &mm->page_table_lock);
#endif
@@ -1553,7 +1553,8 @@ static inline void xen_alloc_ptpage(struct mm_struct *mm, unsigned long pfn,
__set_pfn_prot(pfn, PAGE_KERNEL_RO);
- if (level == PT_PTE && USE_SPLIT_PTE_PTLOCKS && !pinned)
+ if (level == PT_PTE && IS_ENABLED(CONFIG_SPLIT_PTE_PTLOCKS) &&
+ !pinned)
__pin_pagetable_pfn(MMUEXT_PIN_L1_TABLE, pfn);
xen_mc_issue(XEN_LAZY_MMU);
@@ -1581,7 +1582,7 @@ static inline void xen_release_ptpage(unsigned long pfn, unsigned level)
if (pinned) {
xen_mc_batch();
- if (level == PT_PTE && USE_SPLIT_PTE_PTLOCKS)
+ if (level == PT_PTE && IS_ENABLED(CONFIG_SPLIT_PTE_PTLOCKS))
__pin_pagetable_pfn(MMUEXT_UNPIN_TABLE, pfn);
__set_pfn_prot(pfn, PAGE_KERNEL);
diff --git a/arch/xtensa/kernel/syscall.c b/arch/xtensa/kernel/syscall.c
index b3c2450d6f23..dc54f854c2f5 100644
--- a/arch/xtensa/kernel/syscall.c
+++ b/arch/xtensa/kernel/syscall.c
@@ -55,7 +55,8 @@ asmlinkage long xtensa_fadvise64_64(int fd, int advice,
#ifdef CONFIG_MMU
unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr,
- unsigned long len, unsigned long pgoff, unsigned long flags)
+ unsigned long len, unsigned long pgoff, unsigned long flags,
+ vm_flags_t vm_flags)
{
struct vm_area_struct *vmm;
struct vma_iterator vmi;
diff --git a/block/bdev.c b/block/bdev.c
index 33f9c4605e3a..738e3c8457e7 100644
--- a/block/bdev.c
+++ b/block/bdev.c
@@ -555,7 +555,7 @@ retry:
/* if claiming is already in progress, wait for it to finish */
if (whole->bd_claiming) {
- wait_queue_head_t *wq = bit_waitqueue(&whole->bd_claiming, 0);
+ wait_queue_head_t *wq = __var_waitqueue(&whole->bd_claiming);
DEFINE_WAIT(wait);
prepare_to_wait(wq, &wait, TASK_UNINTERRUPTIBLE);
@@ -578,7 +578,7 @@ static void bd_clear_claiming(struct block_device *whole, void *holder)
/* tell others that we're done */
BUG_ON(whole->bd_claiming != holder);
whole->bd_claiming = NULL;
- wake_up_bit(&whole->bd_claiming, 0);
+ wake_up_var(&whole->bd_claiming);
}
/**
diff --git a/block/bio-integrity.c b/block/bio-integrity.c
index 96a2653905ae..88e3ad73c385 100644
--- a/block/bio-integrity.c
+++ b/block/bio-integrity.c
@@ -367,7 +367,6 @@ free_bvec:
kfree(bvec);
return ret;
}
-EXPORT_SYMBOL_GPL(bio_integrity_map_user);
/**
* bio_integrity_prep - Prepare bio for integrity I/O
diff --git a/block/blk-core.c b/block/blk-core.c
index 1217c2cd66dd..bc5e8c5eaac9 100644
--- a/block/blk-core.c
+++ b/block/blk-core.c
@@ -799,6 +799,7 @@ void submit_bio_noacct(struct bio *bio)
switch (bio_op(bio)) {
case REQ_OP_READ:
+ break;
case REQ_OP_WRITE:
if (bio->bi_opf & REQ_ATOMIC) {
status = blk_validate_atomic_write_op_size(q, bio);
diff --git a/block/blk-integrity.c b/block/blk-integrity.c
index 010decc892ea..0a2b1c5d0ebf 100644
--- a/block/blk-integrity.c
+++ b/block/blk-integrity.c
@@ -53,7 +53,6 @@ new_segment:
return segments;
}
-EXPORT_SYMBOL(blk_rq_count_integrity_sg);
/**
* blk_rq_map_integrity_sg - Map integrity metadata into a scatterlist
@@ -63,19 +62,20 @@ EXPORT_SYMBOL(blk_rq_count_integrity_sg);
*
* Description: Map the integrity vectors in request into a
* scatterlist. The scatterlist must be big enough to hold all
- * elements. I.e. sized using blk_rq_count_integrity_sg().
+ * elements. I.e. sized using blk_rq_count_integrity_sg() or
+ * rq->nr_integrity_segments.
*/
-int blk_rq_map_integrity_sg(struct request_queue *q, struct bio *bio,
- struct scatterlist *sglist)
+int blk_rq_map_integrity_sg(struct request *rq, struct scatterlist *sglist)
{
struct bio_vec iv, ivprv = { NULL };
+ struct request_queue *q = rq->q;
struct scatterlist *sg = NULL;
+ struct bio *bio = rq->bio;
unsigned int segments = 0;
struct bvec_iter iter;
int prev = 0;
bio_for_each_integrity_vec(iv, bio, iter) {
-
if (prev) {
if (!biovec_phys_mergeable(q, &ivprv, &iv))
goto new_segment;
@@ -103,10 +103,30 @@ new_segment:
if (sg)
sg_mark_end(sg);
+ /*
+ * Something must have been wrong if the figured number of segment
+ * is bigger than number of req's physical integrity segments
+ */
+ BUG_ON(segments > rq->nr_integrity_segments);
+ BUG_ON(segments > queue_max_integrity_segments(q));
return segments;
}
EXPORT_SYMBOL(blk_rq_map_integrity_sg);
+int blk_rq_integrity_map_user(struct request *rq, void __user *ubuf,
+ ssize_t bytes, u32 seed)
+{
+ int ret = bio_integrity_map_user(rq->bio, ubuf, bytes, seed);
+
+ if (ret)
+ return ret;
+
+ rq->nr_integrity_segments = blk_rq_count_integrity_sg(rq->q, rq->bio);
+ rq->cmd_flags |= REQ_INTEGRITY;
+ return 0;
+}
+EXPORT_SYMBOL_GPL(blk_rq_integrity_map_user);
+
bool blk_integrity_merge_rq(struct request_queue *q, struct request *req,
struct request *next)
{
@@ -134,7 +154,6 @@ bool blk_integrity_merge_bio(struct request_queue *q, struct request *req,
struct bio *bio)
{
int nr_integrity_segs;
- struct bio *next = bio->bi_next;
if (blk_integrity_rq(req) == 0 && bio_integrity(bio) == NULL)
return true;
@@ -145,16 +164,11 @@ bool blk_integrity_merge_bio(struct request_queue *q, struct request *req,
if (bio_integrity(req->bio)->bip_flags != bio_integrity(bio)->bip_flags)
return false;
- bio->bi_next = NULL;
nr_integrity_segs = blk_rq_count_integrity_sg(q, bio);
- bio->bi_next = next;
-
if (req->nr_integrity_segments + nr_integrity_segs >
q->limits.max_integrity_segments)
return false;
- req->nr_integrity_segments += nr_integrity_segs;
-
return true;
}
diff --git a/block/blk-merge.c b/block/blk-merge.c
index 56769c4bcd79..ad763ec313b6 100644
--- a/block/blk-merge.c
+++ b/block/blk-merge.c
@@ -639,6 +639,9 @@ static inline int ll_new_hw_segment(struct request *req, struct bio *bio,
* counters.
*/
req->nr_phys_segments += nr_phys_segs;
+ if (bio_integrity(bio))
+ req->nr_integrity_segments += blk_rq_count_integrity_sg(req->q,
+ bio);
return 1;
no_merge:
@@ -731,6 +734,7 @@ static int ll_merge_requests_fn(struct request_queue *q, struct request *req,
/* Merge is OK... */
req->nr_phys_segments = total_phys_segments;
+ req->nr_integrity_segments += next->nr_integrity_segments;
return 1;
}
diff --git a/block/blk-mq.c b/block/blk-mq.c
index 831c5cf5d874..4b2c8e940f59 100644
--- a/block/blk-mq.c
+++ b/block/blk-mq.c
@@ -376,9 +376,7 @@ static struct request *blk_mq_rq_ctx_init(struct blk_mq_alloc_data *data,
rq->io_start_time_ns = 0;
rq->stats_sectors = 0;
rq->nr_phys_segments = 0;
-#if defined(CONFIG_BLK_DEV_INTEGRITY)
rq->nr_integrity_segments = 0;
-#endif
rq->end_io = NULL;
rq->end_io_data = NULL;
@@ -2546,6 +2544,9 @@ static void blk_mq_bio_to_request(struct request *rq, struct bio *bio,
rq->__sector = bio->bi_iter.bi_sector;
rq->write_hint = bio->bi_write_hint;
blk_rq_bio_prep(rq, bio, nr_segs);
+ if (bio_integrity(bio))
+ rq->nr_integrity_segments = blk_rq_count_integrity_sg(rq->q,
+ bio);
/* This can't fail, since GFP_NOIO includes __GFP_DIRECT_RECLAIM. */
err = blk_crypto_rq_bio_prep(rq, bio, GFP_NOIO);
diff --git a/block/blk-settings.c b/block/blk-settings.c
index cd8a8eabc9a5..a446654ddee5 100644
--- a/block/blk-settings.c
+++ b/block/blk-settings.c
@@ -437,48 +437,6 @@ int queue_limits_set(struct request_queue *q, struct queue_limits *lim)
}
EXPORT_SYMBOL_GPL(queue_limits_set);
-/**
- * blk_limits_io_min - set minimum request size for a device
- * @limits: the queue limits
- * @min: smallest I/O size in bytes
- *
- * Description:
- * Some devices have an internal block size bigger than the reported
- * hardware sector size. This function can be used to signal the
- * smallest I/O the device can perform without incurring a performance
- * penalty.
- */
-void blk_limits_io_min(struct queue_limits *limits, unsigned int min)
-{
- limits->io_min = min;
-
- if (limits->io_min < limits->logical_block_size)
- limits->io_min = limits->logical_block_size;
-
- if (limits->io_min < limits->physical_block_size)
- limits->io_min = limits->physical_block_size;
-}
-EXPORT_SYMBOL(blk_limits_io_min);
-
-/**
- * blk_limits_io_opt - set optimal request size for a device
- * @limits: the queue limits
- * @opt: smallest I/O size in bytes
- *
- * Description:
- * Storage devices may report an optimal I/O size, which is the
- * device's preferred unit for sustained I/O. This is rarely reported
- * for disk drives. For RAID arrays it is usually the stripe width or
- * the internal track size. A properly aligned multiple of
- * optimal_io_size is the preferred request size for workloads where
- * sustained throughput is desired.
- */
-void blk_limits_io_opt(struct queue_limits *limits, unsigned int opt)
-{
- limits->io_opt = opt;
-}
-EXPORT_SYMBOL(blk_limits_io_opt);
-
static int queue_limit_alignment_offset(const struct queue_limits *lim,
sector_t sector)
{
diff --git a/block/elevator.c b/block/elevator.c
index c355b55d0107..4122026b11f1 100644
--- a/block/elevator.c
+++ b/block/elevator.c
@@ -715,7 +715,9 @@ int elv_iosched_load_module(struct gendisk *disk, const char *buf,
strscpy(elevator_name, buf, sizeof(elevator_name));
- return request_module("%s-iosched", strstrip(elevator_name));
+ request_module("%s-iosched", strstrip(elevator_name));
+
+ return 0;
}
ssize_t elv_iosched_store(struct gendisk *disk, const char *buf,
diff --git a/block/fops.c b/block/fops.c
index e69deb6d8635..e696ae53bf1e 100644
--- a/block/fops.c
+++ b/block/fops.c
@@ -666,7 +666,7 @@ blkdev_direct_write(struct kiocb *iocb, struct iov_iter *from)
static ssize_t blkdev_buffered_write(struct kiocb *iocb, struct iov_iter *from)
{
- return iomap_file_buffered_write(iocb, from, &blkdev_iomap_ops);
+ return iomap_file_buffered_write(iocb, from, &blkdev_iomap_ops, NULL);
}
/*
diff --git a/certs/Makefile b/certs/Makefile
index 1094e3860c2a..f6fa4d8d75e0 100644
--- a/certs/Makefile
+++ b/certs/Makefile
@@ -84,5 +84,5 @@ targets += x509_revocation_list
hostprogs := extract-cert
-HOSTCFLAGS_extract-cert.o = $(shell $(HOSTPKG_CONFIG) --cflags libcrypto 2> /dev/null)
+HOSTCFLAGS_extract-cert.o = $(shell $(HOSTPKG_CONFIG) --cflags libcrypto 2> /dev/null) -I$(srctree)/scripts
HOSTLDLIBS_extract-cert = $(shell $(HOSTPKG_CONFIG) --libs libcrypto 2> /dev/null || echo -lcrypto)
diff --git a/certs/extract-cert.c b/certs/extract-cert.c
index 70e9ec89d87d..7d6d468ed612 100644
--- a/certs/extract-cert.c
+++ b/certs/extract-cert.c
@@ -21,14 +21,17 @@
#include <openssl/bio.h>
#include <openssl/pem.h>
#include <openssl/err.h>
-#include <openssl/engine.h>
-
-/*
- * OpenSSL 3.0 deprecates the OpenSSL's ENGINE API.
- *
- * Remove this if/when that API is no longer used
- */
-#pragma GCC diagnostic ignored "-Wdeprecated-declarations"
+#if OPENSSL_VERSION_MAJOR >= 3
+# define USE_PKCS11_PROVIDER
+# include <openssl/provider.h>
+# include <openssl/store.h>
+#else
+# if !defined(OPENSSL_NO_ENGINE) && !defined(OPENSSL_NO_DEPRECATED_3_0)
+# define USE_PKCS11_ENGINE
+# include <openssl/engine.h>
+# endif
+#endif
+#include "ssl-common.h"
#define PKEY_ID_PKCS7 2
@@ -40,41 +43,6 @@ void format(void)
exit(2);
}
-static void display_openssl_errors(int l)
-{
- const char *file;
- char buf[120];
- int e, line;
-
- if (ERR_peek_error() == 0)
- return;
- fprintf(stderr, "At main.c:%d:\n", l);
-
- while ((e = ERR_get_error_line(&file, &line))) {
- ERR_error_string(e, buf);
- fprintf(stderr, "- SSL %s: %s:%d\n", buf, file, line);
- }
-}
-
-static void drain_openssl_errors(void)
-{
- const char *file;
- int line;
-
- if (ERR_peek_error() == 0)
- return;
- while (ERR_get_error_line(&file, &line)) {}
-}
-
-#define ERR(cond, fmt, ...) \
- do { \
- bool __cond = (cond); \
- display_openssl_errors(__LINE__); \
- if (__cond) { \
- err(1, fmt, ## __VA_ARGS__); \
- } \
- } while(0)
-
static const char *key_pass;
static BIO *wb;
static char *cert_dst;
@@ -94,6 +62,66 @@ static void write_cert(X509 *x509)
fprintf(stderr, "Extracted cert: %s\n", buf);
}
+static X509 *load_cert_pkcs11(const char *cert_src)
+{
+ X509 *cert = NULL;
+#ifdef USE_PKCS11_PROVIDER
+ OSSL_STORE_CTX *store;
+
+ if (!OSSL_PROVIDER_try_load(NULL, "pkcs11", true))
+ ERR(1, "OSSL_PROVIDER_try_load(pkcs11)");
+ if (!OSSL_PROVIDER_try_load(NULL, "default", true))
+ ERR(1, "OSSL_PROVIDER_try_load(default)");
+
+ store = OSSL_STORE_open(cert_src, NULL, NULL, NULL, NULL);
+ ERR(!store, "OSSL_STORE_open");
+
+ while (!OSSL_STORE_eof(store)) {
+ OSSL_STORE_INFO *info = OSSL_STORE_load(store);
+
+ if (!info) {
+ drain_openssl_errors(__LINE__, 0);
+ continue;
+ }
+ if (OSSL_STORE_INFO_get_type(info) == OSSL_STORE_INFO_CERT) {
+ cert = OSSL_STORE_INFO_get1_CERT(info);
+ ERR(!cert, "OSSL_STORE_INFO_get1_CERT");
+ }
+ OSSL_STORE_INFO_free(info);
+ if (cert)
+ break;
+ }
+ OSSL_STORE_close(store);
+#elif defined(USE_PKCS11_ENGINE)
+ ENGINE *e;
+ struct {
+ const char *cert_id;
+ X509 *cert;
+ } parms;
+
+ parms.cert_id = cert_src;
+ parms.cert = NULL;
+
+ ENGINE_load_builtin_engines();
+ drain_openssl_errors(__LINE__, 1);
+ e = ENGINE_by_id("pkcs11");
+ ERR(!e, "Load PKCS#11 ENGINE");
+ if (ENGINE_init(e))
+ drain_openssl_errors(__LINE__, 1);
+ else
+ ERR(1, "ENGINE_init");
+ if (key_pass)
+ ERR(!ENGINE_ctrl_cmd_string(e, "PIN", key_pass, 0), "Set PKCS#11 PIN");
+ ENGINE_ctrl_cmd(e, "LOAD_CERT_CTRL", 0, &parms, NULL, 1);
+ ERR(!parms.cert, "Get X.509 from PKCS#11");
+ cert = parms.cert;
+#else
+ fprintf(stderr, "no pkcs11 engine/provider available\n");
+ exit(1);
+#endif
+ return cert;
+}
+
int main(int argc, char **argv)
{
char *cert_src;
@@ -122,28 +150,10 @@ int main(int argc, char **argv)
fclose(f);
exit(0);
} else if (!strncmp(cert_src, "pkcs11:", 7)) {
- ENGINE *e;
- struct {
- const char *cert_id;
- X509 *cert;
- } parms;
-
- parms.cert_id = cert_src;
- parms.cert = NULL;
+ X509 *cert = load_cert_pkcs11(cert_src);
- ENGINE_load_builtin_engines();
- drain_openssl_errors();
- e = ENGINE_by_id("pkcs11");
- ERR(!e, "Load PKCS#11 ENGINE");
- if (ENGINE_init(e))
- drain_openssl_errors();
- else
- ERR(1, "ENGINE_init");
- if (key_pass)
- ERR(!ENGINE_ctrl_cmd_string(e, "PIN", key_pass, 0), "Set PKCS#11 PIN");
- ENGINE_ctrl_cmd(e, "LOAD_CERT_CTRL", 0, &parms, NULL, 1);
- ERR(!parms.cert, "Get X.509 from PKCS#11");
- write_cert(parms.cert);
+ ERR(!cert, "load_cert_pkcs11 failed");
+ write_cert(cert);
} else {
BIO *b;
X509 *x509;
diff --git a/crypto/asymmetric_keys/asymmetric_type.c b/crypto/asymmetric_keys/asymmetric_type.c
index a5da8ccd353e..43af5fa510c0 100644
--- a/crypto/asymmetric_keys/asymmetric_type.c
+++ b/crypto/asymmetric_keys/asymmetric_type.c
@@ -60,17 +60,18 @@ struct key *find_asymmetric_key(struct key *keyring,
char *req, *p;
int len;
- WARN_ON(!id_0 && !id_1 && !id_2);
-
if (id_0) {
lookup = id_0->data;
len = id_0->len;
} else if (id_1) {
lookup = id_1->data;
len = id_1->len;
- } else {
+ } else if (id_2) {
lookup = id_2->data;
len = id_2->len;
+ } else {
+ WARN_ON(1);
+ return ERR_PTR(-EINVAL);
}
/* Construct an identifier "id:<keyid>". */
diff --git a/drivers/Makefile b/drivers/Makefile
index fe9ceb0d2288..45d1c3e630f7 100644
--- a/drivers/Makefile
+++ b/drivers/Makefile
@@ -17,6 +17,9 @@ obj-$(CONFIG_PINCTRL) += pinctrl/
obj-$(CONFIG_GPIOLIB) += gpio/
obj-y += pwm/
+# LEDs must come before PCI, it is needed by NPEM driver
+obj-y += leds/
+
obj-y += pci/
obj-$(CONFIG_PARISC) += parisc/
@@ -130,7 +133,6 @@ obj-$(CONFIG_CPU_IDLE) += cpuidle/
obj-y += mmc/
obj-y += ufs/
obj-$(CONFIG_MEMSTICK) += memstick/
-obj-y += leds/
obj-$(CONFIG_INFINIBAND) += infiniband/
obj-y += firmware/
obj-$(CONFIG_CRYPTO) += crypto/
diff --git a/drivers/accel/drm_accel.c b/drivers/accel/drm_accel.c
index 16c3edb8c46e..aa826033b0ce 100644
--- a/drivers/accel/drm_accel.c
+++ b/drivers/accel/drm_accel.c
@@ -8,7 +8,7 @@
#include <linux/debugfs.h>
#include <linux/device.h>
-#include <linux/idr.h>
+#include <linux/xarray.h>
#include <drm/drm_accel.h>
#include <drm/drm_auth.h>
@@ -18,8 +18,7 @@
#include <drm/drm_ioctl.h>
#include <drm/drm_print.h>
-static DEFINE_SPINLOCK(accel_minor_lock);
-static struct idr accel_minors_idr;
+DEFINE_XARRAY_ALLOC(accel_minors_xa);
static struct dentry *accel_debugfs_root;
@@ -118,99 +117,6 @@ void accel_set_device_instance_params(struct device *kdev, int index)
}
/**
- * accel_minor_alloc() - Allocates a new accel minor
- *
- * This function access the accel minors idr and allocates from it
- * a new id to represent a new accel minor
- *
- * Return: A new id on success or error code in case idr_alloc failed
- */
-int accel_minor_alloc(void)
-{
- unsigned long flags;
- int r;
-
- spin_lock_irqsave(&accel_minor_lock, flags);
- r = idr_alloc(&accel_minors_idr, NULL, 0, ACCEL_MAX_MINORS, GFP_NOWAIT);
- spin_unlock_irqrestore(&accel_minor_lock, flags);
-
- return r;
-}
-
-/**
- * accel_minor_remove() - Remove an accel minor
- * @index: The minor id to remove.
- *
- * This function access the accel minors idr and removes from
- * it the member with the id that is passed to this function.
- */
-void accel_minor_remove(int index)
-{
- unsigned long flags;
-
- spin_lock_irqsave(&accel_minor_lock, flags);
- idr_remove(&accel_minors_idr, index);
- spin_unlock_irqrestore(&accel_minor_lock, flags);
-}
-
-/**
- * accel_minor_replace() - Replace minor pointer in accel minors idr.
- * @minor: Pointer to the new minor.
- * @index: The minor id to replace.
- *
- * This function access the accel minors idr structure and replaces the pointer
- * that is associated with an existing id. Because the minor pointer can be
- * NULL, we need to explicitly pass the index.
- *
- * Return: 0 for success, negative value for error
- */
-void accel_minor_replace(struct drm_minor *minor, int index)
-{
- unsigned long flags;
-
- spin_lock_irqsave(&accel_minor_lock, flags);
- idr_replace(&accel_minors_idr, minor, index);
- spin_unlock_irqrestore(&accel_minor_lock, flags);
-}
-
-/*
- * Looks up the given minor-ID and returns the respective DRM-minor object. The
- * refence-count of the underlying device is increased so you must release this
- * object with accel_minor_release().
- *
- * The object can be only a drm_minor that represents an accel device.
- *
- * As long as you hold this minor, it is guaranteed that the object and the
- * minor->dev pointer will stay valid! However, the device may get unplugged and
- * unregistered while you hold the minor.
- */
-static struct drm_minor *accel_minor_acquire(unsigned int minor_id)
-{
- struct drm_minor *minor;
- unsigned long flags;
-
- spin_lock_irqsave(&accel_minor_lock, flags);
- minor = idr_find(&accel_minors_idr, minor_id);
- if (minor)
- drm_dev_get(minor->dev);
- spin_unlock_irqrestore(&accel_minor_lock, flags);
-
- if (!minor) {
- return ERR_PTR(-ENODEV);
- } else if (drm_dev_is_unplugged(minor->dev)) {
- drm_dev_put(minor->dev);
- return ERR_PTR(-ENODEV);
- }
-
- return minor;
-}
-
-static void accel_minor_release(struct drm_minor *minor)
-{
- drm_dev_put(minor->dev);
-}
-
-/**
* accel_open - open method for ACCEL file
* @inode: device inode
* @filp: file pointer.
@@ -227,7 +133,7 @@ int accel_open(struct inode *inode, struct file *filp)
struct drm_minor *minor;
int retcode;
- minor = accel_minor_acquire(iminor(inode));
+ minor = drm_minor_acquire(&accel_minors_xa, iminor(inode));
if (IS_ERR(minor))
return PTR_ERR(minor);
@@ -246,7 +152,7 @@ int accel_open(struct inode *inode, struct file *filp)
err_undo:
atomic_dec(&dev->open_count);
- accel_minor_release(minor);
+ drm_minor_release(minor);
return retcode;
}
EXPORT_SYMBOL_GPL(accel_open);
@@ -257,7 +163,7 @@ static int accel_stub_open(struct inode *inode, struct file *filp)
struct drm_minor *minor;
int err;
- minor = accel_minor_acquire(iminor(inode));
+ minor = drm_minor_acquire(&accel_minors_xa, iminor(inode));
if (IS_ERR(minor))
return PTR_ERR(minor);
@@ -274,7 +180,7 @@ static int accel_stub_open(struct inode *inode, struct file *filp)
err = 0;
out:
- accel_minor_release(minor);
+ drm_minor_release(minor);
return err;
}
@@ -290,15 +196,13 @@ void accel_core_exit(void)
unregister_chrdev(ACCEL_MAJOR, "accel");
debugfs_remove(accel_debugfs_root);
accel_sysfs_destroy();
- idr_destroy(&accel_minors_idr);
+ WARN_ON(!xa_empty(&accel_minors_xa));
}
int __init accel_core_init(void)
{
int ret;
- idr_init(&accel_minors_idr);
-
ret = accel_sysfs_init();
if (ret < 0) {
DRM_ERROR("Cannot create ACCEL class: %d\n", ret);
diff --git a/drivers/accel/ivpu/ivpu_fw.c b/drivers/accel/ivpu/ivpu_fw.c
index de3d66116375..ede6165e09d9 100644
--- a/drivers/accel/ivpu/ivpu_fw.c
+++ b/drivers/accel/ivpu/ivpu_fw.c
@@ -60,6 +60,10 @@ static struct {
{ IVPU_HW_IP_40XX, "intel/vpu/vpu_40xx_v0.0.bin" },
};
+/* Production fw_names from the table above */
+MODULE_FIRMWARE("intel/vpu/vpu_37xx_v0.0.bin");
+MODULE_FIRMWARE("intel/vpu/vpu_40xx_v0.0.bin");
+
static int ivpu_fw_request(struct ivpu_device *vdev)
{
int ret = -ENOENT;
diff --git a/drivers/accel/qaic/qaic_drv.c b/drivers/accel/qaic/qaic_drv.c
index 580b29ed1902..bf10156c334e 100644
--- a/drivers/accel/qaic/qaic_drv.c
+++ b/drivers/accel/qaic/qaic_drv.c
@@ -447,9 +447,7 @@ static int init_pci(struct qaic_device *qdev, struct pci_dev *pdev)
ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
if (ret)
return ret;
- ret = dma_set_max_seg_size(&pdev->dev, UINT_MAX);
- if (ret)
- return ret;
+ dma_set_max_seg_size(&pdev->dev, UINT_MAX);
qdev->bar_0 = devm_ioremap_resource(&pdev->dev, &pdev->resource[0]);
if (IS_ERR(qdev->bar_0))
diff --git a/drivers/acpi/Kconfig b/drivers/acpi/Kconfig
index e3a7c2aedd5f..d67f63d93b2a 100644
--- a/drivers/acpi/Kconfig
+++ b/drivers/acpi/Kconfig
@@ -451,7 +451,7 @@ config ACPI_HED
config ACPI_BGRT
bool "Boottime Graphics Resource Table support"
- depends on EFI && (X86 || ARM64)
+ depends on EFI && (X86 || ARM64 || LOONGARCH)
help
This driver adds support for exposing the ACPI Boottime Graphics
Resource Table, which allows the operating system to obtain
diff --git a/drivers/acpi/apei/einj-cxl.c b/drivers/acpi/apei/einj-cxl.c
index 8b8be0c90709..4f81a119ec08 100644
--- a/drivers/acpi/apei/einj-cxl.c
+++ b/drivers/acpi/apei/einj-cxl.c
@@ -7,9 +7,9 @@
*
* Author: Ben Cheatham <benjamin.cheatham@amd.com>
*/
-#include <linux/einj-cxl.h>
#include <linux/seq_file.h>
#include <linux/pci.h>
+#include <cxl/einj.h>
#include "apei-internal.h"
diff --git a/drivers/acpi/apei/erst-dbg.c b/drivers/acpi/apei/erst-dbg.c
index 8bc71cdc2270..246076341e8c 100644
--- a/drivers/acpi/apei/erst-dbg.c
+++ b/drivers/acpi/apei/erst-dbg.c
@@ -199,7 +199,6 @@ static const struct file_operations erst_dbg_ops = {
.read = erst_dbg_read,
.write = erst_dbg_write,
.unlocked_ioctl = erst_dbg_ioctl,
- .llseek = no_llseek,
};
static struct miscdevice erst_dbg_dev = {
diff --git a/drivers/acpi/apei/ghes.c b/drivers/acpi/apei/ghes.c
index 623cc0cb4a65..ada93cfde9ba 100644
--- a/drivers/acpi/apei/ghes.c
+++ b/drivers/acpi/apei/ghes.c
@@ -27,7 +27,6 @@
#include <linux/timer.h>
#include <linux/cper.h>
#include <linux/cleanup.h>
-#include <linux/cxl-event.h>
#include <linux/platform_device.h>
#include <linux/mutex.h>
#include <linux/ratelimit.h>
@@ -50,6 +49,7 @@
#include <acpi/apei.h>
#include <asm/fixmap.h>
#include <asm/tlbflush.h>
+#include <cxl/event.h>
#include <ras/ras_event.h>
#include "apei-internal.h"
diff --git a/drivers/acpi/numa/srat.c b/drivers/acpi/numa/srat.c
index 44f91f2c6c5d..bec0dcd1f9c3 100644
--- a/drivers/acpi/numa/srat.c
+++ b/drivers/acpi/numa/srat.c
@@ -17,6 +17,7 @@
#include <linux/numa.h>
#include <linux/nodemask.h>
#include <linux/topology.h>
+#include <linux/numa_memblks.h>
static nodemask_t nodes_found_map = NODE_MASK_NONE;
diff --git a/drivers/acpi/pci_irq.c b/drivers/acpi/pci_irq.c
index ff30ceca2203..630fe0a34bc6 100644
--- a/drivers/acpi/pci_irq.c
+++ b/drivers/acpi/pci_irq.c
@@ -288,7 +288,7 @@ static int acpi_reroute_boot_interrupt(struct pci_dev *dev,
}
#endif /* CONFIG_X86_IO_APIC */
-static struct acpi_prt_entry *acpi_pci_irq_lookup(struct pci_dev *dev, int pin)
+struct acpi_prt_entry *acpi_pci_irq_lookup(struct pci_dev *dev, int pin)
{
struct acpi_prt_entry *entry = NULL;
struct pci_dev *bridge;
diff --git a/drivers/acpi/pci_mcfg.c b/drivers/acpi/pci_mcfg.c
index 860014b89b8e..58e10a980114 100644
--- a/drivers/acpi/pci_mcfg.c
+++ b/drivers/acpi/pci_mcfg.c
@@ -181,6 +181,18 @@ static struct mcfg_fixup mcfg_quirks[] = {
LOONGSON_ECAM_MCFG("LOONGSON", 0),
LOONGSON_ECAM_MCFG("\0", 1),
LOONGSON_ECAM_MCFG("LOONGSON", 1),
+ LOONGSON_ECAM_MCFG("\0", 2),
+ LOONGSON_ECAM_MCFG("LOONGSON", 2),
+ LOONGSON_ECAM_MCFG("\0", 3),
+ LOONGSON_ECAM_MCFG("LOONGSON", 3),
+ LOONGSON_ECAM_MCFG("\0", 4),
+ LOONGSON_ECAM_MCFG("LOONGSON", 4),
+ LOONGSON_ECAM_MCFG("\0", 5),
+ LOONGSON_ECAM_MCFG("LOONGSON", 5),
+ LOONGSON_ECAM_MCFG("\0", 6),
+ LOONGSON_ECAM_MCFG("LOONGSON", 6),
+ LOONGSON_ECAM_MCFG("\0", 7),
+ LOONGSON_ECAM_MCFG("LOONGSON", 7),
#endif /* LOONGARCH */
};
diff --git a/drivers/android/binder.c b/drivers/android/binder.c
index e8643c69d426..978740537a1a 100644
--- a/drivers/android/binder.c
+++ b/drivers/android/binder.c
@@ -277,7 +277,7 @@ _binder_proc_lock(struct binder_proc *proc, int line)
}
/**
- * binder_proc_unlock() - Release spinlock for given binder_proc
+ * binder_proc_unlock() - Release outer lock for given binder_proc
* @proc: struct binder_proc to acquire
*
* Release lock acquired via binder_proc_lock()
@@ -1352,6 +1352,7 @@ static void binder_free_ref(struct binder_ref *ref)
if (ref->node)
binder_free_node(ref->node);
kfree(ref->death);
+ kfree(ref->freeze);
kfree(ref);
}
@@ -1546,7 +1547,7 @@ static void binder_thread_dec_tmpref(struct binder_thread *thread)
* by threads that are being released. When done with the binder_proc,
* this function is called to decrement the counter and free the
* proc if appropriate (proc has been released, all threads have
- * been released and not currenly in-use to process a transaction).
+ * been released and not currently in-use to process a transaction).
*/
static void binder_proc_dec_tmpref(struct binder_proc *proc)
{
@@ -3842,6 +3843,155 @@ err_invalid_target_handle:
}
}
+static int
+binder_request_freeze_notification(struct binder_proc *proc,
+ struct binder_thread *thread,
+ struct binder_handle_cookie *handle_cookie)
+{
+ struct binder_ref_freeze *freeze;
+ struct binder_ref *ref;
+ bool is_frozen;
+
+ freeze = kzalloc(sizeof(*freeze), GFP_KERNEL);
+ if (!freeze)
+ return -ENOMEM;
+ binder_proc_lock(proc);
+ ref = binder_get_ref_olocked(proc, handle_cookie->handle, false);
+ if (!ref) {
+ binder_user_error("%d:%d BC_REQUEST_FREEZE_NOTIFICATION invalid ref %d\n",
+ proc->pid, thread->pid, handle_cookie->handle);
+ binder_proc_unlock(proc);
+ kfree(freeze);
+ return -EINVAL;
+ }
+
+ binder_node_lock(ref->node);
+
+ if (ref->freeze || !ref->node->proc) {
+ binder_user_error("%d:%d invalid BC_REQUEST_FREEZE_NOTIFICATION %s\n",
+ proc->pid, thread->pid,
+ ref->freeze ? "already set" : "dead node");
+ binder_node_unlock(ref->node);
+ binder_proc_unlock(proc);
+ kfree(freeze);
+ return -EINVAL;
+ }
+ binder_inner_proc_lock(ref->node->proc);
+ is_frozen = ref->node->proc->is_frozen;
+ binder_inner_proc_unlock(ref->node->proc);
+
+ binder_stats_created(BINDER_STAT_FREEZE);
+ INIT_LIST_HEAD(&freeze->work.entry);
+ freeze->cookie = handle_cookie->cookie;
+ freeze->work.type = BINDER_WORK_FROZEN_BINDER;
+ freeze->is_frozen = is_frozen;
+
+ ref->freeze = freeze;
+
+ binder_inner_proc_lock(proc);
+ binder_enqueue_work_ilocked(&ref->freeze->work, &proc->todo);
+ binder_wakeup_proc_ilocked(proc);
+ binder_inner_proc_unlock(proc);
+
+ binder_node_unlock(ref->node);
+ binder_proc_unlock(proc);
+ return 0;
+}
+
+static int
+binder_clear_freeze_notification(struct binder_proc *proc,
+ struct binder_thread *thread,
+ struct binder_handle_cookie *handle_cookie)
+{
+ struct binder_ref_freeze *freeze;
+ struct binder_ref *ref;
+
+ binder_proc_lock(proc);
+ ref = binder_get_ref_olocked(proc, handle_cookie->handle, false);
+ if (!ref) {
+ binder_user_error("%d:%d BC_CLEAR_FREEZE_NOTIFICATION invalid ref %d\n",
+ proc->pid, thread->pid, handle_cookie->handle);
+ binder_proc_unlock(proc);
+ return -EINVAL;
+ }
+
+ binder_node_lock(ref->node);
+
+ if (!ref->freeze) {
+ binder_user_error("%d:%d BC_CLEAR_FREEZE_NOTIFICATION freeze notification not active\n",
+ proc->pid, thread->pid);
+ binder_node_unlock(ref->node);
+ binder_proc_unlock(proc);
+ return -EINVAL;
+ }
+ freeze = ref->freeze;
+ binder_inner_proc_lock(proc);
+ if (freeze->cookie != handle_cookie->cookie) {
+ binder_user_error("%d:%d BC_CLEAR_FREEZE_NOTIFICATION freeze notification cookie mismatch %016llx != %016llx\n",
+ proc->pid, thread->pid, (u64)freeze->cookie,
+ (u64)handle_cookie->cookie);
+ binder_inner_proc_unlock(proc);
+ binder_node_unlock(ref->node);
+ binder_proc_unlock(proc);
+ return -EINVAL;
+ }
+ ref->freeze = NULL;
+ /*
+ * Take the existing freeze object and overwrite its work type. There are three cases here:
+ * 1. No pending notification. In this case just add the work to the queue.
+ * 2. A notification was sent and is pending an ack from userspace. Once an ack arrives, we
+ * should resend with the new work type.
+ * 3. A notification is pending to be sent. Since the work is already in the queue, nothing
+ * needs to be done here.
+ */
+ freeze->work.type = BINDER_WORK_CLEAR_FREEZE_NOTIFICATION;
+ if (list_empty(&freeze->work.entry)) {
+ binder_enqueue_work_ilocked(&freeze->work, &proc->todo);
+ binder_wakeup_proc_ilocked(proc);
+ } else if (freeze->sent) {
+ freeze->resend = true;
+ }
+ binder_inner_proc_unlock(proc);
+ binder_node_unlock(ref->node);
+ binder_proc_unlock(proc);
+ return 0;
+}
+
+static int
+binder_freeze_notification_done(struct binder_proc *proc,
+ struct binder_thread *thread,
+ binder_uintptr_t cookie)
+{
+ struct binder_ref_freeze *freeze = NULL;
+ struct binder_work *w;
+
+ binder_inner_proc_lock(proc);
+ list_for_each_entry(w, &proc->delivered_freeze, entry) {
+ struct binder_ref_freeze *tmp_freeze =
+ container_of(w, struct binder_ref_freeze, work);
+
+ if (tmp_freeze->cookie == cookie) {
+ freeze = tmp_freeze;
+ break;
+ }
+ }
+ if (!freeze) {
+ binder_user_error("%d:%d BC_FREEZE_NOTIFICATION_DONE %016llx not found\n",
+ proc->pid, thread->pid, (u64)cookie);
+ binder_inner_proc_unlock(proc);
+ return -EINVAL;
+ }
+ binder_dequeue_work_ilocked(&freeze->work);
+ freeze->sent = false;
+ if (freeze->resend) {
+ freeze->resend = false;
+ binder_enqueue_work_ilocked(&freeze->work, &proc->todo);
+ binder_wakeup_proc_ilocked(proc);
+ }
+ binder_inner_proc_unlock(proc);
+ return 0;
+}
+
/**
* binder_free_buf() - free the specified buffer
* @proc: binder proc that owns buffer
@@ -4325,6 +4475,44 @@ static int binder_thread_write(struct binder_proc *proc,
binder_inner_proc_unlock(proc);
} break;
+ case BC_REQUEST_FREEZE_NOTIFICATION: {
+ struct binder_handle_cookie handle_cookie;
+ int error;
+
+ if (copy_from_user(&handle_cookie, ptr, sizeof(handle_cookie)))
+ return -EFAULT;
+ ptr += sizeof(handle_cookie);
+ error = binder_request_freeze_notification(proc, thread,
+ &handle_cookie);
+ if (error)
+ return error;
+ } break;
+
+ case BC_CLEAR_FREEZE_NOTIFICATION: {
+ struct binder_handle_cookie handle_cookie;
+ int error;
+
+ if (copy_from_user(&handle_cookie, ptr, sizeof(handle_cookie)))
+ return -EFAULT;
+ ptr += sizeof(handle_cookie);
+ error = binder_clear_freeze_notification(proc, thread, &handle_cookie);
+ if (error)
+ return error;
+ } break;
+
+ case BC_FREEZE_NOTIFICATION_DONE: {
+ binder_uintptr_t cookie;
+ int error;
+
+ if (get_user(cookie, (binder_uintptr_t __user *)ptr))
+ return -EFAULT;
+
+ ptr += sizeof(cookie);
+ error = binder_freeze_notification_done(proc, thread, cookie);
+ if (error)
+ return error;
+ } break;
+
default:
pr_err("%d:%d unknown command %u\n",
proc->pid, thread->pid, cmd);
@@ -4714,6 +4902,46 @@ retry:
if (cmd == BR_DEAD_BINDER)
goto done; /* DEAD_BINDER notifications can cause transactions */
} break;
+
+ case BINDER_WORK_FROZEN_BINDER: {
+ struct binder_ref_freeze *freeze;
+ struct binder_frozen_state_info info;
+
+ memset(&info, 0, sizeof(info));
+ freeze = container_of(w, struct binder_ref_freeze, work);
+ info.is_frozen = freeze->is_frozen;
+ info.cookie = freeze->cookie;
+ freeze->sent = true;
+ binder_enqueue_work_ilocked(w, &proc->delivered_freeze);
+ binder_inner_proc_unlock(proc);
+
+ if (put_user(BR_FROZEN_BINDER, (uint32_t __user *)ptr))
+ return -EFAULT;
+ ptr += sizeof(uint32_t);
+ if (copy_to_user(ptr, &info, sizeof(info)))
+ return -EFAULT;
+ ptr += sizeof(info);
+ binder_stat_br(proc, thread, BR_FROZEN_BINDER);
+ goto done; /* BR_FROZEN_BINDER notifications can cause transactions */
+ } break;
+
+ case BINDER_WORK_CLEAR_FREEZE_NOTIFICATION: {
+ struct binder_ref_freeze *freeze =
+ container_of(w, struct binder_ref_freeze, work);
+ binder_uintptr_t cookie = freeze->cookie;
+
+ binder_inner_proc_unlock(proc);
+ kfree(freeze);
+ binder_stats_deleted(BINDER_STAT_FREEZE);
+ if (put_user(BR_CLEAR_FREEZE_NOTIFICATION_DONE, (uint32_t __user *)ptr))
+ return -EFAULT;
+ ptr += sizeof(uint32_t);
+ if (put_user(cookie, (binder_uintptr_t __user *)ptr))
+ return -EFAULT;
+ ptr += sizeof(binder_uintptr_t);
+ binder_stat_br(proc, thread, BR_CLEAR_FREEZE_NOTIFICATION_DONE);
+ } break;
+
default:
binder_inner_proc_unlock(proc);
pr_err("%d:%d: bad work type %d\n",
@@ -5322,6 +5550,48 @@ static bool binder_txns_pending_ilocked(struct binder_proc *proc)
return false;
}
+static void binder_add_freeze_work(struct binder_proc *proc, bool is_frozen)
+{
+ struct rb_node *n;
+ struct binder_ref *ref;
+
+ binder_inner_proc_lock(proc);
+ for (n = rb_first(&proc->nodes); n; n = rb_next(n)) {
+ struct binder_node *node;
+
+ node = rb_entry(n, struct binder_node, rb_node);
+ binder_inner_proc_unlock(proc);
+ binder_node_lock(node);
+ hlist_for_each_entry(ref, &node->refs, node_entry) {
+ /*
+ * Need the node lock to synchronize
+ * with new notification requests and the
+ * inner lock to synchronize with queued
+ * freeze notifications.
+ */
+ binder_inner_proc_lock(ref->proc);
+ if (!ref->freeze) {
+ binder_inner_proc_unlock(ref->proc);
+ continue;
+ }
+ ref->freeze->work.type = BINDER_WORK_FROZEN_BINDER;
+ if (list_empty(&ref->freeze->work.entry)) {
+ ref->freeze->is_frozen = is_frozen;
+ binder_enqueue_work_ilocked(&ref->freeze->work, &ref->proc->todo);
+ binder_wakeup_proc_ilocked(ref->proc);
+ } else {
+ if (ref->freeze->sent && ref->freeze->is_frozen != is_frozen)
+ ref->freeze->resend = true;
+ ref->freeze->is_frozen = is_frozen;
+ }
+ binder_inner_proc_unlock(ref->proc);
+ }
+ binder_node_unlock(node);
+ binder_inner_proc_lock(proc);
+ }
+ binder_inner_proc_unlock(proc);
+}
+
static int binder_ioctl_freeze(struct binder_freeze_info *info,
struct binder_proc *target_proc)
{
@@ -5333,6 +5603,7 @@ static int binder_ioctl_freeze(struct binder_freeze_info *info,
target_proc->async_recv = false;
target_proc->is_frozen = false;
binder_inner_proc_unlock(target_proc);
+ binder_add_freeze_work(target_proc, false);
return 0;
}
@@ -5365,6 +5636,8 @@ static int binder_ioctl_freeze(struct binder_freeze_info *info,
binder_inner_proc_lock(target_proc);
target_proc->is_frozen = false;
binder_inner_proc_unlock(target_proc);
+ } else {
+ binder_add_freeze_work(target_proc, true);
}
return ret;
@@ -5740,6 +6013,7 @@ static int binder_open(struct inode *nodp, struct file *filp)
binder_stats_created(BINDER_STAT_PROC);
proc->pid = current->group_leader->pid;
INIT_LIST_HEAD(&proc->delivered_death);
+ INIT_LIST_HEAD(&proc->delivered_freeze);
INIT_LIST_HEAD(&proc->waiting_threads);
filp->private_data = proc;
@@ -6291,7 +6565,9 @@ static const char * const binder_return_strings[] = {
"BR_FAILED_REPLY",
"BR_FROZEN_REPLY",
"BR_ONEWAY_SPAM_SUSPECT",
- "BR_TRANSACTION_PENDING_FROZEN"
+ "BR_TRANSACTION_PENDING_FROZEN",
+ "BR_FROZEN_BINDER",
+ "BR_CLEAR_FREEZE_NOTIFICATION_DONE",
};
static const char * const binder_command_strings[] = {
@@ -6314,6 +6590,9 @@ static const char * const binder_command_strings[] = {
"BC_DEAD_BINDER_DONE",
"BC_TRANSACTION_SG",
"BC_REPLY_SG",
+ "BC_REQUEST_FREEZE_NOTIFICATION",
+ "BC_CLEAR_FREEZE_NOTIFICATION",
+ "BC_FREEZE_NOTIFICATION_DONE",
};
static const char * const binder_objstat_strings[] = {
@@ -6323,7 +6602,8 @@ static const char * const binder_objstat_strings[] = {
"ref",
"death",
"transaction",
- "transaction_complete"
+ "transaction_complete",
+ "freeze",
};
static void print_binder_stats(struct seq_file *m, const char *prefix,
diff --git a/drivers/android/binder_internal.h b/drivers/android/binder_internal.h
index 7d4fc53f7a73..f8d6be682f23 100644
--- a/drivers/android/binder_internal.h
+++ b/drivers/android/binder_internal.h
@@ -130,12 +130,13 @@ enum binder_stat_types {
BINDER_STAT_DEATH,
BINDER_STAT_TRANSACTION,
BINDER_STAT_TRANSACTION_COMPLETE,
+ BINDER_STAT_FREEZE,
BINDER_STAT_COUNT
};
struct binder_stats {
- atomic_t br[_IOC_NR(BR_TRANSACTION_PENDING_FROZEN) + 1];
- atomic_t bc[_IOC_NR(BC_REPLY_SG) + 1];
+ atomic_t br[_IOC_NR(BR_CLEAR_FREEZE_NOTIFICATION_DONE) + 1];
+ atomic_t bc[_IOC_NR(BC_FREEZE_NOTIFICATION_DONE) + 1];
atomic_t obj_created[BINDER_STAT_COUNT];
atomic_t obj_deleted[BINDER_STAT_COUNT];
};
@@ -160,6 +161,8 @@ struct binder_work {
BINDER_WORK_DEAD_BINDER,
BINDER_WORK_DEAD_BINDER_AND_CLEAR,
BINDER_WORK_CLEAR_DEATH_NOTIFICATION,
+ BINDER_WORK_FROZEN_BINDER,
+ BINDER_WORK_CLEAR_FREEZE_NOTIFICATION,
} type;
};
@@ -276,6 +279,14 @@ struct binder_ref_death {
binder_uintptr_t cookie;
};
+struct binder_ref_freeze {
+ struct binder_work work;
+ binder_uintptr_t cookie;
+ bool is_frozen:1;
+ bool sent:1;
+ bool resend:1;
+};
+
/**
* struct binder_ref_data - binder_ref counts and id
* @debug_id: unique ID for the ref
@@ -308,6 +319,8 @@ struct binder_ref_data {
* @node indicates the node must be freed
* @death: pointer to death notification (ref_death) if requested
* (protected by @node->lock)
+ * @freeze: pointer to freeze notification (ref_freeze) if requested
+ * (protected by @node->lock)
*
* Structure to track references from procA to target node (on procB). This
* structure is unsafe to access without holding @proc->outer_lock.
@@ -324,6 +337,7 @@ struct binder_ref {
struct binder_proc *proc;
struct binder_node *node;
struct binder_ref_death *death;
+ struct binder_ref_freeze *freeze;
};
/**
@@ -377,6 +391,8 @@ struct binder_ref {
* (atomics, no lock needed)
* @delivered_death: list of delivered death notification
* (protected by @inner_lock)
+ * @delivered_freeze: list of delivered freeze notification
+ * (protected by @inner_lock)
* @max_threads: cap on number of binder threads
* (protected by @inner_lock)
* @requested_threads: number of binder threads requested but not
@@ -424,6 +440,7 @@ struct binder_proc {
struct list_head todo;
struct binder_stats stats;
struct list_head delivered_death;
+ struct list_head delivered_freeze;
u32 max_threads;
int requested_threads;
int requested_threads_started;
diff --git a/drivers/android/binderfs.c b/drivers/android/binderfs.c
index 3001d754ac36..ad1fa7abc323 100644
--- a/drivers/android/binderfs.c
+++ b/drivers/android/binderfs.c
@@ -58,6 +58,7 @@ enum binderfs_stats_mode {
struct binder_features {
bool oneway_spam_detection;
bool extended_error;
+ bool freeze_notification;
};
static const struct constant_table binderfs_param_stats[] = {
@@ -74,6 +75,7 @@ static const struct fs_parameter_spec binderfs_fs_parameters[] = {
static struct binder_features binder_features = {
.oneway_spam_detection = true,
.extended_error = true,
+ .freeze_notification = true,
};
static inline struct binderfs_info *BINDERFS_SB(const struct super_block *sb)
@@ -608,6 +610,12 @@ static int init_binder_features(struct super_block *sb)
if (IS_ERR(dentry))
return PTR_ERR(dentry);
+ dentry = binderfs_create_file(dir, "freeze_notification",
+ &binder_features_fops,
+ &binder_features.freeze_notification);
+ if (IS_ERR(dentry))
+ return PTR_ERR(dentry);
+
return 0;
}
diff --git a/drivers/ata/ahci.c b/drivers/ata/ahci.c
index a05c17249448..45f63b09828a 100644
--- a/drivers/ata/ahci.c
+++ b/drivers/ata/ahci.c
@@ -1370,7 +1370,7 @@ static bool ahci_broken_suspend(struct pci_dev *pdev)
* V1.03 is known to be broken. V3.04 is known to
* work. Between, there are V1.06, V2.06 and V3.03
* that we don't have much idea about. For now,
- * blacklist anything older than V3.04.
+ * assume that anything older than V3.04 is broken.
*
* http://bugzilla.kernel.org/show_bug.cgi?id=15104
*/
diff --git a/drivers/ata/ahci_brcm.c b/drivers/ata/ahci_brcm.c
index 70c3a33eee6f..2f16524c2526 100644
--- a/drivers/ata/ahci_brcm.c
+++ b/drivers/ata/ahci_brcm.c
@@ -437,7 +437,6 @@ static int brcm_ahci_probe(struct platform_device *pdev)
struct device *dev = &pdev->dev;
struct brcm_ahci_priv *priv;
struct ahci_host_priv *hpriv;
- struct resource *res;
int ret;
priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
@@ -451,8 +450,7 @@ static int brcm_ahci_probe(struct platform_device *pdev)
priv->version = (unsigned long)of_id->data;
priv->dev = dev;
- res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "top-ctrl");
- priv->top_ctrl = devm_ioremap_resource(dev, res);
+ priv->top_ctrl = devm_platform_ioremap_resource_byname(pdev, "top-ctrl");
if (IS_ERR(priv->top_ctrl))
return PTR_ERR(priv->top_ctrl);
diff --git a/drivers/ata/ahci_imx.c b/drivers/ata/ahci_imx.c
index cb768f66f0a7..6f955e9105e8 100644
--- a/drivers/ata/ahci_imx.c
+++ b/drivers/ata/ahci_imx.c
@@ -19,6 +19,7 @@
#include <linux/libata.h>
#include <linux/hwmon.h>
#include <linux/hwmon-sysfs.h>
+#include <linux/phy/phy.h>
#include <linux/thermal.h>
#include "ahci.h"
@@ -44,42 +45,10 @@ enum {
/* Clock Reset Register */
IMX_CLOCK_RESET = 0x7f3f,
IMX_CLOCK_RESET_RESET = 1 << 0,
- /* IMX8QM HSIO AHCI definitions */
- IMX8QM_SATA_PHY_RX_IMPED_RATIO_OFFSET = 0x03,
- IMX8QM_SATA_PHY_TX_IMPED_RATIO_OFFSET = 0x09,
- IMX8QM_SATA_PHY_IMPED_RATIO_85OHM = 0x6c,
- IMX8QM_LPCG_PHYX2_OFFSET = 0x00000,
- IMX8QM_CSR_PHYX2_OFFSET = 0x90000,
- IMX8QM_CSR_PHYX1_OFFSET = 0xa0000,
- IMX8QM_CSR_PHYX_STTS0_OFFSET = 0x4,
- IMX8QM_CSR_PCIEA_OFFSET = 0xb0000,
- IMX8QM_CSR_PCIEB_OFFSET = 0xc0000,
- IMX8QM_CSR_SATA_OFFSET = 0xd0000,
- IMX8QM_CSR_PCIE_CTRL2_OFFSET = 0x8,
- IMX8QM_CSR_MISC_OFFSET = 0xe0000,
-
- IMX8QM_LPCG_PHYX2_PCLK0_MASK = (0x3 << 16),
- IMX8QM_LPCG_PHYX2_PCLK1_MASK = (0x3 << 20),
- IMX8QM_PHY_APB_RSTN_0 = BIT(0),
- IMX8QM_PHY_MODE_SATA = BIT(19),
- IMX8QM_PHY_MODE_MASK = (0xf << 17),
- IMX8QM_PHY_PIPE_RSTN_0 = BIT(24),
- IMX8QM_PHY_PIPE_RSTN_OVERRIDE_0 = BIT(25),
- IMX8QM_PHY_PIPE_RSTN_1 = BIT(26),
- IMX8QM_PHY_PIPE_RSTN_OVERRIDE_1 = BIT(27),
- IMX8QM_STTS0_LANE0_TX_PLL_LOCK = BIT(4),
- IMX8QM_MISC_IOB_RXENA = BIT(0),
- IMX8QM_MISC_IOB_TXENA = BIT(1),
- IMX8QM_MISC_PHYX1_EPCS_SEL = BIT(12),
- IMX8QM_MISC_CLKREQN_OUT_OVERRIDE_1 = BIT(24),
- IMX8QM_MISC_CLKREQN_OUT_OVERRIDE_0 = BIT(25),
- IMX8QM_MISC_CLKREQN_IN_OVERRIDE_1 = BIT(28),
- IMX8QM_MISC_CLKREQN_IN_OVERRIDE_0 = BIT(29),
- IMX8QM_SATA_CTRL_RESET_N = BIT(12),
- IMX8QM_SATA_CTRL_EPCS_PHYRESET_N = BIT(7),
- IMX8QM_CTRL_BUTTON_RST_N = BIT(21),
- IMX8QM_CTRL_POWER_UP_RST_N = BIT(23),
- IMX8QM_CTRL_LTSSM_ENABLE = BIT(4),
+ /* IMX8QM SATA specific control registers */
+ IMX8QM_SATA_AHCI_PTC = 0xc8,
+ IMX8QM_SATA_AHCI_PTC_RXWM_MASK = GENMASK(6, 0),
+ IMX8QM_SATA_AHCI_PTC_RXWM = 0x29,
};
enum ahci_imx_type {
@@ -95,14 +64,10 @@ struct imx_ahci_priv {
struct clk *sata_clk;
struct clk *sata_ref_clk;
struct clk *ahb_clk;
- struct clk *epcs_tx_clk;
- struct clk *epcs_rx_clk;
- struct clk *phy_apbclk;
- struct clk *phy_pclk0;
- struct clk *phy_pclk1;
- void __iomem *phy_base;
- struct gpio_desc *clkreq_gpiod;
struct regmap *gpr;
+ struct phy *sata_phy;
+ struct phy *cali_phy0;
+ struct phy *cali_phy1;
bool no_device;
bool first_time;
u32 phy_params;
@@ -450,201 +415,79 @@ ATTRIBUTE_GROUPS(fsl_sata_ahci);
static int imx8_sata_enable(struct ahci_host_priv *hpriv)
{
- u32 val, reg;
- int i, ret;
+ u32 val;
+ int ret;
struct imx_ahci_priv *imxpriv = hpriv->plat_data;
struct device *dev = &imxpriv->ahci_pdev->dev;
- /* configure the hsio for sata */
- ret = clk_prepare_enable(imxpriv->phy_pclk0);
- if (ret < 0) {
- dev_err(dev, "can't enable phy_pclk0.\n");
+ /*
+ * Since "REXT" pin is only present for first lane of i.MX8QM
+ * PHY, its calibration results will be stored, passed through
+ * to the second lane PHY, and shared with all three lane PHYs.
+ *
+ * Initialize the first two lane PHYs here, although only the
+ * third lane PHY is used by SATA.
+ */
+ ret = phy_init(imxpriv->cali_phy0);
+ if (ret) {
+ dev_err(dev, "cali PHY init failed\n");
return ret;
}
- ret = clk_prepare_enable(imxpriv->phy_pclk1);
- if (ret < 0) {
- dev_err(dev, "can't enable phy_pclk1.\n");
- goto disable_phy_pclk0;
+ ret = phy_power_on(imxpriv->cali_phy0);
+ if (ret) {
+ dev_err(dev, "cali PHY power on failed\n");
+ goto err_cali_phy0_exit;
}
- ret = clk_prepare_enable(imxpriv->epcs_tx_clk);
- if (ret < 0) {
- dev_err(dev, "can't enable epcs_tx_clk.\n");
- goto disable_phy_pclk1;
+ ret = phy_init(imxpriv->cali_phy1);
+ if (ret) {
+ dev_err(dev, "cali PHY1 init failed\n");
+ goto err_cali_phy0_off;
}
- ret = clk_prepare_enable(imxpriv->epcs_rx_clk);
- if (ret < 0) {
- dev_err(dev, "can't enable epcs_rx_clk.\n");
- goto disable_epcs_tx_clk;
+ ret = phy_power_on(imxpriv->cali_phy1);
+ if (ret) {
+ dev_err(dev, "cali PHY1 power on failed\n");
+ goto err_cali_phy1_exit;
}
- ret = clk_prepare_enable(imxpriv->phy_apbclk);
- if (ret < 0) {
- dev_err(dev, "can't enable phy_apbclk.\n");
- goto disable_epcs_rx_clk;
+ ret = phy_init(imxpriv->sata_phy);
+ if (ret) {
+ dev_err(dev, "sata PHY init failed\n");
+ goto err_cali_phy1_off;
}
- /* Configure PHYx2 PIPE_RSTN */
- regmap_read(imxpriv->gpr, IMX8QM_CSR_PCIEA_OFFSET +
- IMX8QM_CSR_PCIE_CTRL2_OFFSET, &val);
- if ((val & IMX8QM_CTRL_LTSSM_ENABLE) == 0) {
- /* The link of the PCIEA of HSIO is down */
- regmap_update_bits(imxpriv->gpr,
- IMX8QM_CSR_PHYX2_OFFSET,
- IMX8QM_PHY_PIPE_RSTN_0 |
- IMX8QM_PHY_PIPE_RSTN_OVERRIDE_0,
- IMX8QM_PHY_PIPE_RSTN_0 |
- IMX8QM_PHY_PIPE_RSTN_OVERRIDE_0);
+ ret = phy_set_mode(imxpriv->sata_phy, PHY_MODE_SATA);
+ if (ret) {
+ dev_err(dev, "unable to set SATA PHY mode\n");
+ goto err_sata_phy_exit;
}
- regmap_read(imxpriv->gpr, IMX8QM_CSR_PCIEB_OFFSET +
- IMX8QM_CSR_PCIE_CTRL2_OFFSET, &reg);
- if ((reg & IMX8QM_CTRL_LTSSM_ENABLE) == 0) {
- /* The link of the PCIEB of HSIO is down */
- regmap_update_bits(imxpriv->gpr,
- IMX8QM_CSR_PHYX2_OFFSET,
- IMX8QM_PHY_PIPE_RSTN_1 |
- IMX8QM_PHY_PIPE_RSTN_OVERRIDE_1,
- IMX8QM_PHY_PIPE_RSTN_1 |
- IMX8QM_PHY_PIPE_RSTN_OVERRIDE_1);
+ ret = phy_power_on(imxpriv->sata_phy);
+ if (ret) {
+ dev_err(dev, "sata PHY power up failed\n");
+ goto err_sata_phy_exit;
}
- if (((reg | val) & IMX8QM_CTRL_LTSSM_ENABLE) == 0) {
- /* The links of both PCIA and PCIEB of HSIO are down */
- regmap_update_bits(imxpriv->gpr,
- IMX8QM_LPCG_PHYX2_OFFSET,
- IMX8QM_LPCG_PHYX2_PCLK0_MASK |
- IMX8QM_LPCG_PHYX2_PCLK1_MASK,
- 0);
- }
-
- /* set PWR_RST and BT_RST of csr_pciea */
- val = IMX8QM_CSR_PCIEA_OFFSET + IMX8QM_CSR_PCIE_CTRL2_OFFSET;
- regmap_update_bits(imxpriv->gpr,
- val,
- IMX8QM_CTRL_BUTTON_RST_N,
- IMX8QM_CTRL_BUTTON_RST_N);
- regmap_update_bits(imxpriv->gpr,
- val,
- IMX8QM_CTRL_POWER_UP_RST_N,
- IMX8QM_CTRL_POWER_UP_RST_N);
-
- /* PHYX1_MODE to SATA */
- regmap_update_bits(imxpriv->gpr,
- IMX8QM_CSR_PHYX1_OFFSET,
- IMX8QM_PHY_MODE_MASK,
- IMX8QM_PHY_MODE_SATA);
- /*
- * BIT0 RXENA 1, BIT1 TXENA 0
- * BIT12 PHY_X1_EPCS_SEL 1.
- */
- regmap_update_bits(imxpriv->gpr,
- IMX8QM_CSR_MISC_OFFSET,
- IMX8QM_MISC_IOB_RXENA,
- IMX8QM_MISC_IOB_RXENA);
- regmap_update_bits(imxpriv->gpr,
- IMX8QM_CSR_MISC_OFFSET,
- IMX8QM_MISC_IOB_TXENA,
- 0);
- regmap_update_bits(imxpriv->gpr,
- IMX8QM_CSR_MISC_OFFSET,
- IMX8QM_MISC_PHYX1_EPCS_SEL,
- IMX8QM_MISC_PHYX1_EPCS_SEL);
- /*
- * It is possible, for PCIe and SATA are sharing
- * the same clock source, HPLL or external oscillator.
- * When PCIe is in low power modes (L1.X or L2 etc),
- * the clock source can be turned off. In this case,
- * if this clock source is required to be toggling by
- * SATA, then SATA functions will be abnormal.
- * Set the override here to avoid it.
- */
- regmap_update_bits(imxpriv->gpr,
- IMX8QM_CSR_MISC_OFFSET,
- IMX8QM_MISC_CLKREQN_OUT_OVERRIDE_1 |
- IMX8QM_MISC_CLKREQN_OUT_OVERRIDE_0 |
- IMX8QM_MISC_CLKREQN_IN_OVERRIDE_1 |
- IMX8QM_MISC_CLKREQN_IN_OVERRIDE_0,
- IMX8QM_MISC_CLKREQN_OUT_OVERRIDE_1 |
- IMX8QM_MISC_CLKREQN_OUT_OVERRIDE_0 |
- IMX8QM_MISC_CLKREQN_IN_OVERRIDE_1 |
- IMX8QM_MISC_CLKREQN_IN_OVERRIDE_0);
-
- /* clear PHY RST, then set it */
- regmap_update_bits(imxpriv->gpr,
- IMX8QM_CSR_SATA_OFFSET,
- IMX8QM_SATA_CTRL_EPCS_PHYRESET_N,
- 0);
-
- regmap_update_bits(imxpriv->gpr,
- IMX8QM_CSR_SATA_OFFSET,
- IMX8QM_SATA_CTRL_EPCS_PHYRESET_N,
- IMX8QM_SATA_CTRL_EPCS_PHYRESET_N);
-
- /* CTRL RST: SET -> delay 1 us -> CLEAR -> SET */
- regmap_update_bits(imxpriv->gpr,
- IMX8QM_CSR_SATA_OFFSET,
- IMX8QM_SATA_CTRL_RESET_N,
- IMX8QM_SATA_CTRL_RESET_N);
- udelay(1);
- regmap_update_bits(imxpriv->gpr,
- IMX8QM_CSR_SATA_OFFSET,
- IMX8QM_SATA_CTRL_RESET_N,
- 0);
- regmap_update_bits(imxpriv->gpr,
- IMX8QM_CSR_SATA_OFFSET,
- IMX8QM_SATA_CTRL_RESET_N,
- IMX8QM_SATA_CTRL_RESET_N);
-
- /* APB reset */
- regmap_update_bits(imxpriv->gpr,
- IMX8QM_CSR_PHYX1_OFFSET,
- IMX8QM_PHY_APB_RSTN_0,
- IMX8QM_PHY_APB_RSTN_0);
-
- for (i = 0; i < 100; i++) {
- reg = IMX8QM_CSR_PHYX1_OFFSET +
- IMX8QM_CSR_PHYX_STTS0_OFFSET;
- regmap_read(imxpriv->gpr, reg, &val);
- val &= IMX8QM_STTS0_LANE0_TX_PLL_LOCK;
- if (val == IMX8QM_STTS0_LANE0_TX_PLL_LOCK)
- break;
- udelay(1);
- }
+ /* The cali_phy# can be turned off after SATA PHY is initialized. */
+ phy_power_off(imxpriv->cali_phy1);
+ phy_exit(imxpriv->cali_phy1);
+ phy_power_off(imxpriv->cali_phy0);
+ phy_exit(imxpriv->cali_phy0);
- if (val != IMX8QM_STTS0_LANE0_TX_PLL_LOCK) {
- dev_err(dev, "TX PLL of the PHY is not locked\n");
- ret = -ENODEV;
- } else {
- writeb(imxpriv->imped_ratio, imxpriv->phy_base +
- IMX8QM_SATA_PHY_RX_IMPED_RATIO_OFFSET);
- writeb(imxpriv->imped_ratio, imxpriv->phy_base +
- IMX8QM_SATA_PHY_TX_IMPED_RATIO_OFFSET);
- reg = readb(imxpriv->phy_base +
- IMX8QM_SATA_PHY_RX_IMPED_RATIO_OFFSET);
- if (unlikely(reg != imxpriv->imped_ratio))
- dev_info(dev, "Can't set PHY RX impedance ratio.\n");
- reg = readb(imxpriv->phy_base +
- IMX8QM_SATA_PHY_TX_IMPED_RATIO_OFFSET);
- if (unlikely(reg != imxpriv->imped_ratio))
- dev_info(dev, "Can't set PHY TX impedance ratio.\n");
- usleep_range(50, 100);
+ /* RxWaterMark setting */
+ val = readl(hpriv->mmio + IMX8QM_SATA_AHCI_PTC);
+ val &= ~IMX8QM_SATA_AHCI_PTC_RXWM_MASK;
+ val |= IMX8QM_SATA_AHCI_PTC_RXWM;
+ writel(val, hpriv->mmio + IMX8QM_SATA_AHCI_PTC);
- /*
- * To reduce the power consumption, gate off
- * the PHY clks
- */
- clk_disable_unprepare(imxpriv->phy_apbclk);
- clk_disable_unprepare(imxpriv->phy_pclk1);
- clk_disable_unprepare(imxpriv->phy_pclk0);
- return ret;
- }
+ return 0;
- clk_disable_unprepare(imxpriv->phy_apbclk);
-disable_epcs_rx_clk:
- clk_disable_unprepare(imxpriv->epcs_rx_clk);
-disable_epcs_tx_clk:
- clk_disable_unprepare(imxpriv->epcs_tx_clk);
-disable_phy_pclk1:
- clk_disable_unprepare(imxpriv->phy_pclk1);
-disable_phy_pclk0:
- clk_disable_unprepare(imxpriv->phy_pclk0);
+err_sata_phy_exit:
+ phy_exit(imxpriv->sata_phy);
+err_cali_phy1_off:
+ phy_power_off(imxpriv->cali_phy1);
+err_cali_phy1_exit:
+ phy_exit(imxpriv->cali_phy1);
+err_cali_phy0_off:
+ phy_power_off(imxpriv->cali_phy0);
+err_cali_phy0_exit:
+ phy_exit(imxpriv->cali_phy0);
return ret;
}
@@ -698,6 +541,9 @@ static int imx_sata_enable(struct ahci_host_priv *hpriv)
}
} else if (imxpriv->type == AHCI_IMX8QM) {
ret = imx8_sata_enable(hpriv);
+ if (ret)
+ goto disable_clk;
+
}
usleep_range(1000, 2000);
@@ -736,8 +582,10 @@ static void imx_sata_disable(struct ahci_host_priv *hpriv)
break;
case AHCI_IMX8QM:
- clk_disable_unprepare(imxpriv->epcs_rx_clk);
- clk_disable_unprepare(imxpriv->epcs_tx_clk);
+ if (imxpriv->sata_phy) {
+ phy_power_off(imxpriv->sata_phy);
+ phy_exit(imxpriv->sata_phy);
+ }
break;
default:
@@ -760,6 +608,9 @@ static void ahci_imx_error_handler(struct ata_port *ap)
ahci_error_handler(ap);
+ if (imxpriv->type == AHCI_IMX8QM)
+ return;
+
if (!(imxpriv->first_time) || ahci_imx_hotplug)
return;
@@ -986,65 +837,19 @@ static const struct scsi_host_template ahci_platform_sht = {
static int imx8_sata_probe(struct device *dev, struct imx_ahci_priv *imxpriv)
{
- struct resource *phy_res;
- struct platform_device *pdev = imxpriv->ahci_pdev;
- struct device_node *np = dev->of_node;
-
- if (of_property_read_u32(np, "fsl,phy-imp", &imxpriv->imped_ratio))
- imxpriv->imped_ratio = IMX8QM_SATA_PHY_IMPED_RATIO_85OHM;
- phy_res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "phy");
- if (phy_res) {
- imxpriv->phy_base = devm_ioremap(dev, phy_res->start,
- resource_size(phy_res));
- if (!imxpriv->phy_base) {
- dev_err(dev, "error with ioremap\n");
- return -ENOMEM;
- }
- } else {
- dev_err(dev, "missing *phy* reg region.\n");
- return -ENOMEM;
- }
- imxpriv->gpr =
- syscon_regmap_lookup_by_phandle(np, "hsio");
- if (IS_ERR(imxpriv->gpr)) {
- dev_err(dev, "unable to find gpr registers\n");
- return PTR_ERR(imxpriv->gpr);
- }
-
- imxpriv->epcs_tx_clk = devm_clk_get(dev, "epcs_tx");
- if (IS_ERR(imxpriv->epcs_tx_clk)) {
- dev_err(dev, "can't get epcs_tx_clk clock.\n");
- return PTR_ERR(imxpriv->epcs_tx_clk);
- }
- imxpriv->epcs_rx_clk = devm_clk_get(dev, "epcs_rx");
- if (IS_ERR(imxpriv->epcs_rx_clk)) {
- dev_err(dev, "can't get epcs_rx_clk clock.\n");
- return PTR_ERR(imxpriv->epcs_rx_clk);
- }
- imxpriv->phy_pclk0 = devm_clk_get(dev, "phy_pclk0");
- if (IS_ERR(imxpriv->phy_pclk0)) {
- dev_err(dev, "can't get phy_pclk0 clock.\n");
- return PTR_ERR(imxpriv->phy_pclk0);
- }
- imxpriv->phy_pclk1 = devm_clk_get(dev, "phy_pclk1");
- if (IS_ERR(imxpriv->phy_pclk1)) {
- dev_err(dev, "can't get phy_pclk1 clock.\n");
- return PTR_ERR(imxpriv->phy_pclk1);
- }
- imxpriv->phy_apbclk = devm_clk_get(dev, "phy_apbclk");
- if (IS_ERR(imxpriv->phy_apbclk)) {
- dev_err(dev, "can't get phy_apbclk clock.\n");
- return PTR_ERR(imxpriv->phy_apbclk);
- }
-
- /* Fetch GPIO, then enable the external OSC */
- imxpriv->clkreq_gpiod = devm_gpiod_get_optional(dev, "clkreq",
- GPIOD_OUT_LOW | GPIOD_FLAGS_BIT_NONEXCLUSIVE);
- if (IS_ERR(imxpriv->clkreq_gpiod))
- return PTR_ERR(imxpriv->clkreq_gpiod);
- if (imxpriv->clkreq_gpiod)
- gpiod_set_consumer_name(imxpriv->clkreq_gpiod, "SATA CLKREQ");
-
+ imxpriv->sata_phy = devm_phy_get(dev, "sata-phy");
+ if (IS_ERR(imxpriv->sata_phy))
+ return dev_err_probe(dev, PTR_ERR(imxpriv->sata_phy),
+ "Failed to get sata_phy\n");
+
+ imxpriv->cali_phy0 = devm_phy_get(dev, "cali-phy0");
+ if (IS_ERR(imxpriv->cali_phy0))
+ return dev_err_probe(dev, PTR_ERR(imxpriv->cali_phy0),
+ "Failed to get cali_phy0\n");
+ imxpriv->cali_phy1 = devm_phy_get(dev, "cali-phy1");
+ if (IS_ERR(imxpriv->cali_phy1))
+ return dev_err_probe(dev, PTR_ERR(imxpriv->cali_phy1),
+ "Failed to get cali_phy1\n");
return 0;
}
@@ -1077,12 +882,6 @@ static int imx_ahci_probe(struct platform_device *pdev)
return PTR_ERR(imxpriv->sata_ref_clk);
}
- imxpriv->ahb_clk = devm_clk_get(dev, "ahb");
- if (IS_ERR(imxpriv->ahb_clk)) {
- dev_err(dev, "can't get ahb clock.\n");
- return PTR_ERR(imxpriv->ahb_clk);
- }
-
if (imxpriv->type == AHCI_IMX6Q || imxpriv->type == AHCI_IMX6QP) {
u32 reg_value;
@@ -1142,11 +941,8 @@ static int imx_ahci_probe(struct platform_device *pdev)
goto disable_clk;
/*
- * Configure the HWINIT bits of the HOST_CAP and HOST_PORTS_IMPL,
- * and IP vendor specific register IMX_TIMER1MS.
- * Configure CAP_SSS (support stagered spin up).
- * Implement the port0.
- * Get the ahb clock rate, and configure the TIMER1MS register.
+ * Configure the HWINIT bits of the HOST_CAP and HOST_PORTS_IMPL.
+ * Set CAP_SSS (support stagered spin up) and Implement the port0.
*/
reg_val = readl(hpriv->mmio + HOST_CAP);
if (!(reg_val & HOST_CAP_SSS)) {
@@ -1159,8 +955,20 @@ static int imx_ahci_probe(struct platform_device *pdev)
writel(reg_val, hpriv->mmio + HOST_PORTS_IMPL);
}
- reg_val = clk_get_rate(imxpriv->ahb_clk) / 1000;
- writel(reg_val, hpriv->mmio + IMX_TIMER1MS);
+ if (imxpriv->type != AHCI_IMX8QM) {
+ /*
+ * Get AHB clock rate and configure the vendor specified
+ * TIMER1MS register on i.MX53, i.MX6Q and i.MX6QP only.
+ */
+ imxpriv->ahb_clk = devm_clk_get(dev, "ahb");
+ if (IS_ERR(imxpriv->ahb_clk)) {
+ dev_err(dev, "Failed to get ahb clock\n");
+ ret = PTR_ERR(imxpriv->ahb_clk);
+ goto disable_sata;
+ }
+ reg_val = clk_get_rate(imxpriv->ahb_clk) / 1000;
+ writel(reg_val, hpriv->mmio + IMX_TIMER1MS);
+ }
ret = ahci_platform_init_host(pdev, hpriv, &ahci_imx_port_info,
&ahci_platform_sht);
@@ -1229,6 +1037,6 @@ static struct platform_driver imx_ahci_driver = {
module_platform_driver(imx_ahci_driver);
MODULE_DESCRIPTION("Freescale i.MX AHCI SATA platform driver");
-MODULE_AUTHOR("Richard Zhu <Hong-Xing.Zhu@freescale.com>");
+MODULE_AUTHOR("Richard Zhu <hongxing.zhu@nxp.com>");
MODULE_LICENSE("GPL");
MODULE_ALIAS("platform:" DRV_NAME);
diff --git a/drivers/ata/ata_piix.c b/drivers/ata/ata_piix.c
index ec3c5bd1f813..093b940bc953 100644
--- a/drivers/ata/ata_piix.c
+++ b/drivers/ata/ata_piix.c
@@ -1446,7 +1446,6 @@ static int piix_init_sidpr(struct ata_host *host)
if (hpriv->map[i] == IDE)
return 0;
- /* is it blacklisted? */
if (piix_no_sidpr(host))
return 0;
diff --git a/drivers/ata/libahci_platform.c b/drivers/ata/libahci_platform.c
index 581704e61f28..7a8064520a35 100644
--- a/drivers/ata/libahci_platform.c
+++ b/drivers/ata/libahci_platform.c
@@ -410,7 +410,6 @@ static int ahci_platform_get_regulator(struct ahci_host_priv *hpriv, u32 port,
static int ahci_platform_get_firmware(struct ahci_host_priv *hpriv,
struct device *dev)
{
- struct device_node *child;
u32 port;
if (!of_property_read_u32(dev->of_node, "hba-cap", &hpriv->saved_cap))
@@ -419,14 +418,12 @@ static int ahci_platform_get_firmware(struct ahci_host_priv *hpriv,
of_property_read_u32(dev->of_node,
"ports-implemented", &hpriv->saved_port_map);
- for_each_child_of_node(dev->of_node, child) {
+ for_each_child_of_node_scoped(dev->of_node, child) {
if (!of_device_is_available(child))
continue;
- if (of_property_read_u32(child, "reg", &port)) {
- of_node_put(child);
+ if (of_property_read_u32(child, "reg", &port))
return -EINVAL;
- }
if (!of_property_read_u32(child, "hba-port-cap", &hpriv->saved_port_cap[port]))
hpriv->saved_port_cap[port] &= PORT_CMD_CAP;
@@ -460,7 +457,6 @@ struct ahci_host_priv *ahci_platform_get_resources(struct platform_device *pdev,
int child_nodes, rc = -ENOMEM, enabled_ports = 0;
struct device *dev = &pdev->dev;
struct ahci_host_priv *hpriv;
- struct device_node *child;
u32 mask_port_map = 0;
if (!devres_open_group(dev, NULL, GFP_KERNEL))
@@ -579,7 +575,7 @@ struct ahci_host_priv *ahci_platform_get_resources(struct platform_device *pdev,
}
if (child_nodes) {
- for_each_child_of_node(dev->of_node, child) {
+ for_each_child_of_node_scoped(dev->of_node, child) {
u32 port;
struct platform_device *port_dev __maybe_unused;
@@ -588,7 +584,6 @@ struct ahci_host_priv *ahci_platform_get_resources(struct platform_device *pdev,
if (of_property_read_u32(child, "reg", &port)) {
rc = -EINVAL;
- of_node_put(child);
goto err_out;
}
@@ -606,18 +601,14 @@ struct ahci_host_priv *ahci_platform_get_resources(struct platform_device *pdev,
if (port_dev) {
rc = ahci_platform_get_regulator(hpriv, port,
&port_dev->dev);
- if (rc == -EPROBE_DEFER) {
- of_node_put(child);
+ if (rc == -EPROBE_DEFER)
goto err_out;
- }
}
#endif
rc = ahci_platform_get_phy(hpriv, port, dev, child);
- if (rc) {
- of_node_put(child);
+ if (rc)
goto err_out;
- }
enabled_ports++;
}
diff --git a/drivers/ata/libata-core.c b/drivers/ata/libata-core.c
index 30932552437a..cdb20a700b55 100644
--- a/drivers/ata/libata-core.c
+++ b/drivers/ata/libata-core.c
@@ -72,19 +72,11 @@ const struct ata_port_operations ata_base_port_ops = {
.end_eh = ata_std_end_eh,
};
-const struct ata_port_operations sata_port_ops = {
- .inherits = &ata_base_port_ops,
-
- .qc_defer = ata_std_qc_defer,
- .hardreset = sata_std_hardreset,
-};
-EXPORT_SYMBOL_GPL(sata_port_ops);
-
static unsigned int ata_dev_init_params(struct ata_device *dev,
u16 heads, u16 sectors);
static unsigned int ata_dev_set_xfermode(struct ata_device *dev);
static void ata_dev_xfermask(struct ata_device *dev);
-static unsigned long ata_dev_blacklisted(const struct ata_device *dev);
+static unsigned int ata_dev_quirks(const struct ata_device *dev);
static DEFINE_IDA(ata_ida);
@@ -94,8 +86,8 @@ struct ata_force_param {
u8 cbl;
u8 spd_limit;
unsigned int xfer_mask;
- unsigned int horkage_on;
- unsigned int horkage_off;
+ unsigned int quirk_on;
+ unsigned int quirk_off;
u16 lflags_on;
u16 lflags_off;
};
@@ -160,18 +152,13 @@ MODULE_DESCRIPTION("Library module for ATA devices");
MODULE_LICENSE("GPL");
MODULE_VERSION(DRV_VERSION);
-static inline bool ata_dev_print_info(struct ata_device *dev)
+static inline bool ata_dev_print_info(const struct ata_device *dev)
{
struct ata_eh_context *ehc = &dev->link->eh_context;
return ehc->i.flags & ATA_EHI_PRINTINFO;
}
-static bool ata_sstatus_online(u32 sstatus)
-{
- return (sstatus & 0xf) == 0x3;
-}
-
/**
* ata_link_next - link iteration helper
* @link: the previous link, NULL to start
@@ -457,17 +444,17 @@ static void ata_force_xfermask(struct ata_device *dev)
}
/**
- * ata_force_horkage - force horkage according to libata.force
+ * ata_force_quirks - force quirks according to libata.force
* @dev: ATA device of interest
*
- * Force horkage according to libata.force and whine about it.
+ * Force quirks according to libata.force and whine about it.
* For consistency with link selection, device number 15 selects
* the first device connected to the host link.
*
* LOCKING:
* EH context.
*/
-static void ata_force_horkage(struct ata_device *dev)
+static void ata_force_quirks(struct ata_device *dev)
{
int devno = dev->link->pmp + dev->devno;
int alt_devno = devno;
@@ -487,21 +474,21 @@ static void ata_force_horkage(struct ata_device *dev)
fe->device != alt_devno)
continue;
- if (!(~dev->horkage & fe->param.horkage_on) &&
- !(dev->horkage & fe->param.horkage_off))
+ if (!(~dev->quirks & fe->param.quirk_on) &&
+ !(dev->quirks & fe->param.quirk_off))
continue;
- dev->horkage |= fe->param.horkage_on;
- dev->horkage &= ~fe->param.horkage_off;
+ dev->quirks |= fe->param.quirk_on;
+ dev->quirks &= ~fe->param.quirk_off;
- ata_dev_notice(dev, "FORCE: horkage modified (%s)\n",
+ ata_dev_notice(dev, "FORCE: modified (%s)\n",
fe->param.name);
}
}
#else
static inline void ata_force_link_limits(struct ata_link *link) { }
static inline void ata_force_xfermask(struct ata_device *dev) { }
-static inline void ata_force_horkage(struct ata_device *dev) { }
+static inline void ata_force_quirks(struct ata_device *dev) { }
#endif
/**
@@ -1221,7 +1208,7 @@ static int ata_read_native_max_address(struct ata_device *dev, u64 *max_sectors)
*max_sectors = ata_tf_to_lba48(&tf) + 1;
else
*max_sectors = ata_tf_to_lba(&tf) + 1;
- if (dev->horkage & ATA_HORKAGE_HPA_SIZE)
+ if (dev->quirks & ATA_QUIRK_HPA_SIZE)
(*max_sectors)--;
return 0;
}
@@ -1306,7 +1293,7 @@ static int ata_hpa_resize(struct ata_device *dev)
/* do we need to do it? */
if ((dev->class != ATA_DEV_ATA && dev->class != ATA_DEV_ZAC) ||
!ata_id_has_lba(dev->id) || !ata_id_hpa_enabled(dev->id) ||
- (dev->horkage & ATA_HORKAGE_BROKEN_HPA))
+ (dev->quirks & ATA_QUIRK_BROKEN_HPA))
return 0;
/* read native max address */
@@ -1318,7 +1305,7 @@ static int ata_hpa_resize(struct ata_device *dev)
if (rc == -EACCES || !unlock_hpa) {
ata_dev_warn(dev,
"HPA support seems broken, skipping HPA handling\n");
- dev->horkage |= ATA_HORKAGE_BROKEN_HPA;
+ dev->quirks |= ATA_QUIRK_BROKEN_HPA;
/* we can continue if device aborted the command */
if (rc == -EACCES)
@@ -1355,7 +1342,7 @@ static int ata_hpa_resize(struct ata_device *dev)
"device aborted resize (%llu -> %llu), skipping HPA handling\n",
(unsigned long long)sectors,
(unsigned long long)native_sectors);
- dev->horkage |= ATA_HORKAGE_BROKEN_HPA;
+ dev->quirks |= ATA_QUIRK_BROKEN_HPA;
return 0;
} else if (rc)
return rc;
@@ -1835,7 +1822,7 @@ retry:
goto err_out;
}
- if (dev->horkage & ATA_HORKAGE_DUMP_ID) {
+ if (dev->quirks & ATA_QUIRK_DUMP_ID) {
ata_dev_info(dev, "dumping IDENTIFY data, "
"class=%d may_fallback=%d tried_spinup=%d\n",
class, may_fallback, tried_spinup);
@@ -2104,7 +2091,7 @@ unsigned int ata_read_log_page(struct ata_device *dev, u8 log,
retry:
ata_tf_init(dev, &tf);
if (ata_dma_enabled(dev) && ata_id_has_read_log_dma_ext(dev->id) &&
- !(dev->horkage & ATA_HORKAGE_NO_DMA_LOG)) {
+ !(dev->quirks & ATA_QUIRK_NO_DMA_LOG)) {
tf.command = ATA_CMD_READ_LOG_DMA_EXT;
tf.protocol = ATA_PROT_DMA;
dma = true;
@@ -2124,7 +2111,7 @@ retry:
if (err_mask) {
if (dma) {
- dev->horkage |= ATA_HORKAGE_NO_DMA_LOG;
+ dev->quirks |= ATA_QUIRK_NO_DMA_LOG;
if (!ata_port_is_frozen(dev->link->ap))
goto retry;
}
@@ -2138,22 +2125,19 @@ retry:
static int ata_log_supported(struct ata_device *dev, u8 log)
{
- struct ata_port *ap = dev->link->ap;
-
- if (dev->horkage & ATA_HORKAGE_NO_LOG_DIR)
+ if (dev->quirks & ATA_QUIRK_NO_LOG_DIR)
return 0;
- if (ata_read_log_page(dev, ATA_LOG_DIRECTORY, 0, ap->sector_buf, 1))
+ if (ata_read_log_page(dev, ATA_LOG_DIRECTORY, 0, dev->sector_buf, 1))
return 0;
- return get_unaligned_le16(&ap->sector_buf[log * 2]);
+ return get_unaligned_le16(&dev->sector_buf[log * 2]);
}
static bool ata_identify_page_supported(struct ata_device *dev, u8 page)
{
- struct ata_port *ap = dev->link->ap;
unsigned int err, i;
- if (dev->horkage & ATA_HORKAGE_NO_ID_DEV_LOG)
+ if (dev->quirks & ATA_QUIRK_NO_ID_DEV_LOG)
return false;
if (!ata_log_supported(dev, ATA_LOG_IDENTIFY_DEVICE)) {
@@ -2165,7 +2149,7 @@ static bool ata_identify_page_supported(struct ata_device *dev, u8 page)
if (ata_id_major_version(dev->id) >= 10)
ata_dev_warn(dev,
"ATA Identify Device Log not supported\n");
- dev->horkage |= ATA_HORKAGE_NO_ID_DEV_LOG;
+ dev->quirks |= ATA_QUIRK_NO_ID_DEV_LOG;
return false;
}
@@ -2173,20 +2157,20 @@ static bool ata_identify_page_supported(struct ata_device *dev, u8 page)
* Read IDENTIFY DEVICE data log, page 0, to figure out if the page is
* supported.
*/
- err = ata_read_log_page(dev, ATA_LOG_IDENTIFY_DEVICE, 0, ap->sector_buf,
- 1);
+ err = ata_read_log_page(dev, ATA_LOG_IDENTIFY_DEVICE, 0,
+ dev->sector_buf, 1);
if (err)
return false;
- for (i = 0; i < ap->sector_buf[8]; i++) {
- if (ap->sector_buf[9 + i] == page)
+ for (i = 0; i < dev->sector_buf[8]; i++) {
+ if (dev->sector_buf[9 + i] == page)
return true;
}
return false;
}
-static int ata_do_link_spd_horkage(struct ata_device *dev)
+static int ata_do_link_spd_quirk(struct ata_device *dev)
{
struct ata_link *plink = ata_dev_phys_link(dev);
u32 target, target_limit;
@@ -2194,7 +2178,7 @@ static int ata_do_link_spd_horkage(struct ata_device *dev)
if (!sata_scr_valid(plink))
return 0;
- if (dev->horkage & ATA_HORKAGE_1_5_GBPS)
+ if (dev->quirks & ATA_QUIRK_1_5_GBPS)
target = 1;
else
return 0;
@@ -2212,26 +2196,25 @@ static int ata_do_link_spd_horkage(struct ata_device *dev)
* guaranteed by setting sata_spd_limit to target_limit above.
*/
if (plink->sata_spd > target) {
- ata_dev_info(dev, "applying link speed limit horkage to %s\n",
+ ata_dev_info(dev, "applying link speed limit quirk to %s\n",
sata_spd_string(target));
return -EAGAIN;
}
return 0;
}
-static inline u8 ata_dev_knobble(struct ata_device *dev)
+static inline bool ata_dev_knobble(struct ata_device *dev)
{
struct ata_port *ap = dev->link->ap;
- if (ata_dev_blacklisted(dev) & ATA_HORKAGE_BRIDGE_OK)
- return 0;
+ if (ata_dev_quirks(dev) & ATA_QUIRK_BRIDGE_OK)
+ return false;
return ((ap->cbl == ATA_CBL_SATA) && (!ata_id_is_sata(dev->id)));
}
static void ata_dev_config_ncq_send_recv(struct ata_device *dev)
{
- struct ata_port *ap = dev->link->ap;
unsigned int err_mask;
if (!ata_log_supported(dev, ATA_LOG_NCQ_SEND_RECV)) {
@@ -2239,14 +2222,14 @@ static void ata_dev_config_ncq_send_recv(struct ata_device *dev)
return;
}
err_mask = ata_read_log_page(dev, ATA_LOG_NCQ_SEND_RECV,
- 0, ap->sector_buf, 1);
+ 0, dev->sector_buf, 1);
if (!err_mask) {
u8 *cmds = dev->ncq_send_recv_cmds;
dev->flags |= ATA_DFLAG_NCQ_SEND_RECV;
- memcpy(cmds, ap->sector_buf, ATA_LOG_NCQ_SEND_RECV_SIZE);
+ memcpy(cmds, dev->sector_buf, ATA_LOG_NCQ_SEND_RECV_SIZE);
- if (dev->horkage & ATA_HORKAGE_NO_NCQ_TRIM) {
+ if (dev->quirks & ATA_QUIRK_NO_NCQ_TRIM) {
ata_dev_dbg(dev, "disabling queued TRIM support\n");
cmds[ATA_LOG_NCQ_SEND_RECV_DSM_OFFSET] &=
~ATA_LOG_NCQ_SEND_RECV_DSM_TRIM;
@@ -2256,7 +2239,6 @@ static void ata_dev_config_ncq_send_recv(struct ata_device *dev)
static void ata_dev_config_ncq_non_data(struct ata_device *dev)
{
- struct ata_port *ap = dev->link->ap;
unsigned int err_mask;
if (!ata_log_supported(dev, ATA_LOG_NCQ_NON_DATA)) {
@@ -2265,17 +2247,14 @@ static void ata_dev_config_ncq_non_data(struct ata_device *dev)
return;
}
err_mask = ata_read_log_page(dev, ATA_LOG_NCQ_NON_DATA,
- 0, ap->sector_buf, 1);
- if (!err_mask) {
- u8 *cmds = dev->ncq_non_data_cmds;
-
- memcpy(cmds, ap->sector_buf, ATA_LOG_NCQ_NON_DATA_SIZE);
- }
+ 0, dev->sector_buf, 1);
+ if (!err_mask)
+ memcpy(dev->ncq_non_data_cmds, dev->sector_buf,
+ ATA_LOG_NCQ_NON_DATA_SIZE);
}
static void ata_dev_config_ncq_prio(struct ata_device *dev)
{
- struct ata_port *ap = dev->link->ap;
unsigned int err_mask;
if (!ata_identify_page_supported(dev, ATA_LOG_SATA_SETTINGS))
@@ -2284,12 +2263,11 @@ static void ata_dev_config_ncq_prio(struct ata_device *dev)
err_mask = ata_read_log_page(dev,
ATA_LOG_IDENTIFY_DEVICE,
ATA_LOG_SATA_SETTINGS,
- ap->sector_buf,
- 1);
+ dev->sector_buf, 1);
if (err_mask)
goto not_supported;
- if (!(ap->sector_buf[ATA_LOG_NCQ_PRIO_OFFSET] & BIT(3)))
+ if (!(dev->sector_buf[ATA_LOG_NCQ_PRIO_OFFSET] & BIT(3)))
goto not_supported;
dev->flags |= ATA_DFLAG_NCQ_PRIO;
@@ -2334,12 +2312,12 @@ static int ata_dev_config_ncq(struct ata_device *dev,
}
if (!IS_ENABLED(CONFIG_SATA_HOST))
return 0;
- if (dev->horkage & ATA_HORKAGE_NONCQ) {
+ if (dev->quirks & ATA_QUIRK_NONCQ) {
snprintf(desc, desc_sz, "NCQ (not used)");
return 0;
}
- if (dev->horkage & ATA_HORKAGE_NO_NCQ_ON_ATI &&
+ if (dev->quirks & ATA_QUIRK_NO_NCQ_ON_ATI &&
ata_dev_check_adapter(dev, PCI_VENDOR_ID_ATI)) {
snprintf(desc, desc_sz, "NCQ (not used)");
return 0;
@@ -2350,7 +2328,7 @@ static int ata_dev_config_ncq(struct ata_device *dev,
dev->flags |= ATA_DFLAG_NCQ;
}
- if (!(dev->horkage & ATA_HORKAGE_BROKEN_FPDMA_AA) &&
+ if (!(dev->quirks & ATA_QUIRK_BROKEN_FPDMA_AA) &&
(ap->flags & ATA_FLAG_FPDMA_AA) &&
ata_id_has_fpdma_aa(dev->id)) {
err_mask = ata_dev_set_feature(dev, SETFEATURES_SATA_ENABLE,
@@ -2360,7 +2338,7 @@ static int ata_dev_config_ncq(struct ata_device *dev,
"failed to enable AA (error_mask=0x%x)\n",
err_mask);
if (err_mask != AC_ERR_DEV) {
- dev->horkage |= ATA_HORKAGE_BROKEN_FPDMA_AA;
+ dev->quirks |= ATA_QUIRK_BROKEN_FPDMA_AA;
return -EIO;
}
} else
@@ -2405,9 +2383,8 @@ static void ata_dev_config_sense_reporting(struct ata_device *dev)
static void ata_dev_config_zac(struct ata_device *dev)
{
- struct ata_port *ap = dev->link->ap;
unsigned int err_mask;
- u8 *identify_buf = ap->sector_buf;
+ u8 *identify_buf = dev->sector_buf;
dev->zac_zones_optimal_open = U32_MAX;
dev->zac_zones_optimal_nonseq = U32_MAX;
@@ -2459,7 +2436,6 @@ static void ata_dev_config_zac(struct ata_device *dev)
static void ata_dev_config_trusted(struct ata_device *dev)
{
- struct ata_port *ap = dev->link->ap;
u64 trusted_cap;
unsigned int err;
@@ -2473,11 +2449,11 @@ static void ata_dev_config_trusted(struct ata_device *dev)
}
err = ata_read_log_page(dev, ATA_LOG_IDENTIFY_DEVICE, ATA_LOG_SECURITY,
- ap->sector_buf, 1);
+ dev->sector_buf, 1);
if (err)
return;
- trusted_cap = get_unaligned_le64(&ap->sector_buf[40]);
+ trusted_cap = get_unaligned_le64(&dev->sector_buf[40]);
if (!(trusted_cap & (1ULL << 63))) {
ata_dev_dbg(dev,
"Trusted Computing capability qword not valid!\n");
@@ -2488,12 +2464,41 @@ static void ata_dev_config_trusted(struct ata_device *dev)
dev->flags |= ATA_DFLAG_TRUSTED;
}
+void ata_dev_cleanup_cdl_resources(struct ata_device *dev)
+{
+ kfree(dev->cdl);
+ dev->cdl = NULL;
+}
+
+static int ata_dev_init_cdl_resources(struct ata_device *dev)
+{
+ struct ata_cdl *cdl = dev->cdl;
+ unsigned int err_mask;
+
+ if (!cdl) {
+ cdl = kzalloc(sizeof(*cdl), GFP_KERNEL);
+ if (!cdl)
+ return -ENOMEM;
+ dev->cdl = cdl;
+ }
+
+ err_mask = ata_read_log_page(dev, ATA_LOG_CDL, 0, cdl->desc_log_buf,
+ ATA_LOG_CDL_SIZE / ATA_SECT_SIZE);
+ if (err_mask) {
+ ata_dev_warn(dev, "Read Command Duration Limits log failed\n");
+ ata_dev_cleanup_cdl_resources(dev);
+ return -EIO;
+ }
+
+ return 0;
+}
+
static void ata_dev_config_cdl(struct ata_device *dev)
{
- struct ata_port *ap = dev->link->ap;
unsigned int err_mask;
bool cdl_enabled;
u64 val;
+ int ret;
if (ata_id_major_version(dev->id) < 11)
goto not_supported;
@@ -2505,12 +2510,12 @@ static void ata_dev_config_cdl(struct ata_device *dev)
err_mask = ata_read_log_page(dev, ATA_LOG_IDENTIFY_DEVICE,
ATA_LOG_SUPPORTED_CAPABILITIES,
- ap->sector_buf, 1);
+ dev->sector_buf, 1);
if (err_mask)
goto not_supported;
/* Check Command Duration Limit Supported bits */
- val = get_unaligned_le64(&ap->sector_buf[168]);
+ val = get_unaligned_le64(&dev->sector_buf[168]);
if (!(val & BIT_ULL(63)) || !(val & BIT_ULL(0)))
goto not_supported;
@@ -2523,7 +2528,7 @@ static void ata_dev_config_cdl(struct ata_device *dev)
* We must have support for the sense data for successful NCQ commands
* log indicated by the successful NCQ command sense data supported bit.
*/
- val = get_unaligned_le64(&ap->sector_buf[8]);
+ val = get_unaligned_le64(&dev->sector_buf[8]);
if (!(val & BIT_ULL(63)) || !(val & BIT_ULL(47))) {
ata_dev_warn(dev,
"CDL supported but Successful NCQ Command Sense Data is not supported\n");
@@ -2543,11 +2548,11 @@ static void ata_dev_config_cdl(struct ata_device *dev)
*/
err_mask = ata_read_log_page(dev, ATA_LOG_IDENTIFY_DEVICE,
ATA_LOG_CURRENT_SETTINGS,
- ap->sector_buf, 1);
+ dev->sector_buf, 1);
if (err_mask)
goto not_supported;
- val = get_unaligned_le64(&ap->sector_buf[8]);
+ val = get_unaligned_le64(&dev->sector_buf[8]);
cdl_enabled = val & BIT_ULL(63) && val & BIT_ULL(21);
if (dev->flags & ATA_DFLAG_CDL_ENABLED) {
if (!cdl_enabled) {
@@ -2588,37 +2593,20 @@ static void ata_dev_config_cdl(struct ata_device *dev)
}
}
- /*
- * Allocate a buffer to handle reading the sense data for successful
- * NCQ Commands log page for commands using a CDL with one of the limit
- * policy set to 0xD (successful completion with sense data available
- * bit set).
- */
- if (!ap->ncq_sense_buf) {
- ap->ncq_sense_buf = kmalloc(ATA_LOG_SENSE_NCQ_SIZE, GFP_KERNEL);
- if (!ap->ncq_sense_buf)
- goto not_supported;
- }
-
- /*
- * Command duration limits is supported: cache the CDL log page 18h
- * (command duration descriptors).
- */
- err_mask = ata_read_log_page(dev, ATA_LOG_CDL, 0, ap->sector_buf, 1);
- if (err_mask) {
- ata_dev_warn(dev, "Read Command Duration Limits log failed\n");
+ /* CDL is supported: allocate and initialize needed resources. */
+ ret = ata_dev_init_cdl_resources(dev);
+ if (ret) {
+ ata_dev_warn(dev, "Initialize CDL resources failed\n");
goto not_supported;
}
- memcpy(dev->cdl, ap->sector_buf, ATA_LOG_CDL_SIZE);
dev->flags |= ATA_DFLAG_CDL;
return;
not_supported:
dev->flags &= ~(ATA_DFLAG_CDL | ATA_DFLAG_CDL_ENABLED);
- kfree(ap->ncq_sense_buf);
- ap->ncq_sense_buf = NULL;
+ ata_dev_cleanup_cdl_resources(dev);
}
static int ata_dev_config_lba(struct ata_device *dev)
@@ -2689,7 +2677,7 @@ static void ata_dev_config_fua(struct ata_device *dev)
goto nofua;
/* Ignore known bad devices and devices that lack NCQ support */
- if (!ata_ncq_supported(dev) || (dev->horkage & ATA_HORKAGE_NO_FUA))
+ if (!ata_ncq_supported(dev) || (dev->quirks & ATA_QUIRK_NO_FUA))
goto nofua;
dev->flags |= ATA_DFLAG_FUA;
@@ -2702,7 +2690,7 @@ nofua:
static void ata_dev_config_devslp(struct ata_device *dev)
{
- u8 *sata_setting = dev->link->ap->sector_buf;
+ u8 *sata_setting = dev->sector_buf;
unsigned int err_mask;
int i, j;
@@ -2829,11 +2817,11 @@ int ata_dev_configure(struct ata_device *dev)
return 0;
}
- /* set horkage */
- dev->horkage |= ata_dev_blacklisted(dev);
- ata_force_horkage(dev);
+ /* Set quirks */
+ dev->quirks |= ata_dev_quirks(dev);
+ ata_force_quirks(dev);
- if (dev->horkage & ATA_HORKAGE_DISABLE) {
+ if (dev->quirks & ATA_QUIRK_DISABLE) {
ata_dev_info(dev, "unsupported device, disabling\n");
ata_dev_disable(dev);
return 0;
@@ -2848,19 +2836,19 @@ int ata_dev_configure(struct ata_device *dev)
return 0;
}
- rc = ata_do_link_spd_horkage(dev);
+ rc = ata_do_link_spd_quirk(dev);
if (rc)
return rc;
/* some WD SATA-1 drives have issues with LPM, turn on NOLPM for them */
- if ((dev->horkage & ATA_HORKAGE_WD_BROKEN_LPM) &&
+ if ((dev->quirks & ATA_QUIRK_WD_BROKEN_LPM) &&
(id[ATA_ID_SATA_CAPABILITY] & 0xe) == 0x2)
- dev->horkage |= ATA_HORKAGE_NOLPM;
+ dev->quirks |= ATA_QUIRK_NOLPM;
if (ap->flags & ATA_FLAG_NO_LPM)
- dev->horkage |= ATA_HORKAGE_NOLPM;
+ dev->quirks |= ATA_QUIRK_NOLPM;
- if (dev->horkage & ATA_HORKAGE_NOLPM) {
+ if (dev->quirks & ATA_QUIRK_NOLPM) {
ata_dev_warn(dev, "LPM support broken, forcing max_power\n");
dev->link->ap->target_lpm_policy = ATA_LPM_MAX_POWER;
}
@@ -3006,7 +2994,8 @@ int ata_dev_configure(struct ata_device *dev)
cdb_intr_string = ", CDB intr";
}
- if (atapi_dmadir || (dev->horkage & ATA_HORKAGE_ATAPI_DMADIR) || atapi_id_dmadir(dev->id)) {
+ if (atapi_dmadir || (dev->quirks & ATA_QUIRK_ATAPI_DMADIR) ||
+ atapi_id_dmadir(dev->id)) {
dev->flags |= ATA_DFLAG_DMADIR;
dma_dir_string = ", DMADIR";
}
@@ -3043,24 +3032,24 @@ int ata_dev_configure(struct ata_device *dev)
if ((dev->class == ATA_DEV_ATAPI) &&
(atapi_command_packet_set(id) == TYPE_TAPE)) {
dev->max_sectors = ATA_MAX_SECTORS_TAPE;
- dev->horkage |= ATA_HORKAGE_STUCK_ERR;
+ dev->quirks |= ATA_QUIRK_STUCK_ERR;
}
- if (dev->horkage & ATA_HORKAGE_MAX_SEC_128)
+ if (dev->quirks & ATA_QUIRK_MAX_SEC_128)
dev->max_sectors = min_t(unsigned int, ATA_MAX_SECTORS_128,
dev->max_sectors);
- if (dev->horkage & ATA_HORKAGE_MAX_SEC_1024)
+ if (dev->quirks & ATA_QUIRK_MAX_SEC_1024)
dev->max_sectors = min_t(unsigned int, ATA_MAX_SECTORS_1024,
dev->max_sectors);
- if (dev->horkage & ATA_HORKAGE_MAX_SEC_LBA48)
+ if (dev->quirks & ATA_QUIRK_MAX_SEC_LBA48)
dev->max_sectors = ATA_MAX_SECTORS_LBA48;
if (ap->ops->dev_config)
ap->ops->dev_config(dev);
- if (dev->horkage & ATA_HORKAGE_DIAGNOSTIC) {
+ if (dev->quirks & ATA_QUIRK_DIAGNOSTIC) {
/* Let the user know. We don't want to disallow opens for
rescue purposes, or in case the vendor is just a blithering
idiot. Do this after the dev_config call as some controllers
@@ -3075,7 +3064,7 @@ int ata_dev_configure(struct ata_device *dev)
}
}
- if ((dev->horkage & ATA_HORKAGE_FIRMWARE_WARN) && print_info) {
+ if ((dev->quirks & ATA_QUIRK_FIRMWARE_WARN) && print_info) {
ata_dev_warn(dev, "WARNING: device requires firmware update to be fully functional\n");
ata_dev_warn(dev, " contact the vendor or visit http://ata.wiki.kernel.org\n");
}
@@ -3199,86 +3188,6 @@ struct ata_device *ata_dev_pair(struct ata_device *adev)
}
EXPORT_SYMBOL_GPL(ata_dev_pair);
-/**
- * sata_down_spd_limit - adjust SATA spd limit downward
- * @link: Link to adjust SATA spd limit for
- * @spd_limit: Additional limit
- *
- * Adjust SATA spd limit of @link downward. Note that this
- * function only adjusts the limit. The change must be applied
- * using sata_set_spd().
- *
- * If @spd_limit is non-zero, the speed is limited to equal to or
- * lower than @spd_limit if such speed is supported. If
- * @spd_limit is slower than any supported speed, only the lowest
- * supported speed is allowed.
- *
- * LOCKING:
- * Inherited from caller.
- *
- * RETURNS:
- * 0 on success, negative errno on failure
- */
-int sata_down_spd_limit(struct ata_link *link, u32 spd_limit)
-{
- u32 sstatus, spd, mask;
- int rc, bit;
-
- if (!sata_scr_valid(link))
- return -EOPNOTSUPP;
-
- /* If SCR can be read, use it to determine the current SPD.
- * If not, use cached value in link->sata_spd.
- */
- rc = sata_scr_read(link, SCR_STATUS, &sstatus);
- if (rc == 0 && ata_sstatus_online(sstatus))
- spd = (sstatus >> 4) & 0xf;
- else
- spd = link->sata_spd;
-
- mask = link->sata_spd_limit;
- if (mask <= 1)
- return -EINVAL;
-
- /* unconditionally mask off the highest bit */
- bit = fls(mask) - 1;
- mask &= ~(1 << bit);
-
- /*
- * Mask off all speeds higher than or equal to the current one. At
- * this point, if current SPD is not available and we previously
- * recorded the link speed from SStatus, the driver has already
- * masked off the highest bit so mask should already be 1 or 0.
- * Otherwise, we should not force 1.5Gbps on a link where we have
- * not previously recorded speed from SStatus. Just return in this
- * case.
- */
- if (spd > 1)
- mask &= (1 << (spd - 1)) - 1;
- else if (link->sata_spd)
- return -EINVAL;
-
- /* were we already at the bottom? */
- if (!mask)
- return -EINVAL;
-
- if (spd_limit) {
- if (mask & ((1 << spd_limit) - 1))
- mask &= (1 << spd_limit) - 1;
- else {
- bit = ffs(mask) - 1;
- mask = 1 << bit;
- }
- }
-
- link->sata_spd_limit = mask;
-
- ata_link_warn(link, "limiting SATA link speed to %s\n",
- sata_spd_string(fls(mask)));
-
- return 0;
-}
-
#ifdef CONFIG_ATA_ACPI
/**
* ata_timing_cycle2mode - find xfer mode for the specified cycle duration
@@ -3425,7 +3334,7 @@ static int ata_dev_set_mode(struct ata_device *dev)
{
struct ata_port *ap = dev->link->ap;
struct ata_eh_context *ehc = &dev->link->eh_context;
- const bool nosetxfer = dev->horkage & ATA_HORKAGE_NOSETXFER;
+ const bool nosetxfer = dev->quirks & ATA_QUIRK_NOSETXFER;
const char *dev_err_whine = "";
int ign_dev_err = 0;
unsigned int err_mask = 0;
@@ -3761,33 +3670,6 @@ int ata_std_prereset(struct ata_link *link, unsigned long deadline)
EXPORT_SYMBOL_GPL(ata_std_prereset);
/**
- * sata_std_hardreset - COMRESET w/o waiting or classification
- * @link: link to reset
- * @class: resulting class of attached device
- * @deadline: deadline jiffies for the operation
- *
- * Standard SATA COMRESET w/o waiting or classification.
- *
- * LOCKING:
- * Kernel thread context (may sleep)
- *
- * RETURNS:
- * 0 if link offline, -EAGAIN if link online, -errno on errors.
- */
-int sata_std_hardreset(struct ata_link *link, unsigned int *class,
- unsigned long deadline)
-{
- const unsigned int *timing = sata_ehc_deb_timing(&link->eh_context);
- bool online;
- int rc;
-
- /* do hardreset */
- rc = sata_link_hardreset(link, timing, deadline, &online, NULL);
- return online ? -EAGAIN : rc;
-}
-EXPORT_SYMBOL_GPL(sata_std_hardreset);
-
-/**
* ata_std_postreset - standard postreset callback
* @link: the target ata_link
* @classes: classes of attached devices
@@ -3878,7 +3760,7 @@ static int ata_dev_same_device(struct ata_device *dev, unsigned int new_class,
int ata_dev_reread_id(struct ata_device *dev, unsigned int readid_flags)
{
unsigned int class = dev->class;
- u16 *id = (void *)dev->link->ap->sector_buf;
+ u16 *id = (void *)dev->sector_buf;
int rc;
/* read ID data */
@@ -3969,7 +3851,7 @@ int ata_dev_revalidate(struct ata_device *dev, unsigned int new_class,
*/
if (dev->n_native_sectors == n_native_sectors &&
dev->n_sectors < n_sectors && n_sectors == n_native_sectors &&
- !(dev->horkage & ATA_HORKAGE_BROKEN_HPA)) {
+ !(dev->quirks & ATA_QUIRK_BROKEN_HPA)) {
ata_dev_warn(dev,
"old n_sectors matches native, probably "
"late HPA lock, will try to unlock HPA\n");
@@ -3987,223 +3869,292 @@ int ata_dev_revalidate(struct ata_device *dev, unsigned int new_class,
return rc;
}
-struct ata_blacklist_entry {
+static const char * const ata_quirk_names[] = {
+ [__ATA_QUIRK_DIAGNOSTIC] = "diagnostic",
+ [__ATA_QUIRK_NODMA] = "nodma",
+ [__ATA_QUIRK_NONCQ] = "noncq",
+ [__ATA_QUIRK_MAX_SEC_128] = "maxsec128",
+ [__ATA_QUIRK_BROKEN_HPA] = "brokenhpa",
+ [__ATA_QUIRK_DISABLE] = "disable",
+ [__ATA_QUIRK_HPA_SIZE] = "hpasize",
+ [__ATA_QUIRK_IVB] = "ivb",
+ [__ATA_QUIRK_STUCK_ERR] = "stuckerr",
+ [__ATA_QUIRK_BRIDGE_OK] = "bridgeok",
+ [__ATA_QUIRK_ATAPI_MOD16_DMA] = "atapimod16dma",
+ [__ATA_QUIRK_FIRMWARE_WARN] = "firmwarewarn",
+ [__ATA_QUIRK_1_5_GBPS] = "1.5gbps",
+ [__ATA_QUIRK_NOSETXFER] = "nosetxfer",
+ [__ATA_QUIRK_BROKEN_FPDMA_AA] = "brokenfpdmaaa",
+ [__ATA_QUIRK_DUMP_ID] = "dumpid",
+ [__ATA_QUIRK_MAX_SEC_LBA48] = "maxseclba48",
+ [__ATA_QUIRK_ATAPI_DMADIR] = "atapidmadir",
+ [__ATA_QUIRK_NO_NCQ_TRIM] = "noncqtrim",
+ [__ATA_QUIRK_NOLPM] = "nolpm",
+ [__ATA_QUIRK_WD_BROKEN_LPM] = "wdbrokenlpm",
+ [__ATA_QUIRK_ZERO_AFTER_TRIM] = "zeroaftertrim",
+ [__ATA_QUIRK_NO_DMA_LOG] = "nodmalog",
+ [__ATA_QUIRK_NOTRIM] = "notrim",
+ [__ATA_QUIRK_MAX_SEC_1024] = "maxsec1024",
+ [__ATA_QUIRK_MAX_TRIM_128M] = "maxtrim128m",
+ [__ATA_QUIRK_NO_NCQ_ON_ATI] = "noncqonati",
+ [__ATA_QUIRK_NO_ID_DEV_LOG] = "noiddevlog",
+ [__ATA_QUIRK_NO_LOG_DIR] = "nologdir",
+ [__ATA_QUIRK_NO_FUA] = "nofua",
+};
+
+static void ata_dev_print_quirks(const struct ata_device *dev,
+ const char *model, const char *rev,
+ unsigned int quirks)
+{
+ struct ata_eh_context *ehc = &dev->link->eh_context;
+ int n = 0, i;
+ size_t sz;
+ char *str;
+
+ if (!ata_dev_print_info(dev) || ehc->i.flags & ATA_EHI_DID_PRINT_QUIRKS)
+ return;
+
+ ehc->i.flags |= ATA_EHI_DID_PRINT_QUIRKS;
+
+ if (!quirks)
+ return;
+
+ sz = 64 + ARRAY_SIZE(ata_quirk_names) * 16;
+ str = kmalloc(sz, GFP_KERNEL);
+ if (!str)
+ return;
+
+ n = snprintf(str, sz, "Model '%s', rev '%s', applying quirks:",
+ model, rev);
+
+ for (i = 0; i < ARRAY_SIZE(ata_quirk_names); i++) {
+ if (quirks & (1U << i))
+ n += snprintf(str + n, sz - n,
+ " %s", ata_quirk_names[i]);
+ }
+
+ ata_dev_warn(dev, "%s\n", str);
+
+ kfree(str);
+}
+
+struct ata_dev_quirks_entry {
const char *model_num;
const char *model_rev;
- unsigned long horkage;
+ unsigned int quirks;
};
-static const struct ata_blacklist_entry ata_device_blacklist [] = {
+static const struct ata_dev_quirks_entry __ata_dev_quirks[] = {
/* Devices with DMA related problems under Linux */
- { "WDC AC11000H", NULL, ATA_HORKAGE_NODMA },
- { "WDC AC22100H", NULL, ATA_HORKAGE_NODMA },
- { "WDC AC32500H", NULL, ATA_HORKAGE_NODMA },
- { "WDC AC33100H", NULL, ATA_HORKAGE_NODMA },
- { "WDC AC31600H", NULL, ATA_HORKAGE_NODMA },
- { "WDC AC32100H", "24.09P07", ATA_HORKAGE_NODMA },
- { "WDC AC23200L", "21.10N21", ATA_HORKAGE_NODMA },
- { "Compaq CRD-8241B", NULL, ATA_HORKAGE_NODMA },
- { "CRD-8400B", NULL, ATA_HORKAGE_NODMA },
- { "CRD-848[02]B", NULL, ATA_HORKAGE_NODMA },
- { "CRD-84", NULL, ATA_HORKAGE_NODMA },
- { "SanDisk SDP3B", NULL, ATA_HORKAGE_NODMA },
- { "SanDisk SDP3B-64", NULL, ATA_HORKAGE_NODMA },
- { "SANYO CD-ROM CRD", NULL, ATA_HORKAGE_NODMA },
- { "HITACHI CDR-8", NULL, ATA_HORKAGE_NODMA },
- { "HITACHI CDR-8[34]35",NULL, ATA_HORKAGE_NODMA },
- { "Toshiba CD-ROM XM-6202B", NULL, ATA_HORKAGE_NODMA },
- { "TOSHIBA CD-ROM XM-1702BC", NULL, ATA_HORKAGE_NODMA },
- { "CD-532E-A", NULL, ATA_HORKAGE_NODMA },
- { "E-IDE CD-ROM CR-840",NULL, ATA_HORKAGE_NODMA },
- { "CD-ROM Drive/F5A", NULL, ATA_HORKAGE_NODMA },
- { "WPI CDD-820", NULL, ATA_HORKAGE_NODMA },
- { "SAMSUNG CD-ROM SC-148C", NULL, ATA_HORKAGE_NODMA },
- { "SAMSUNG CD-ROM SC", NULL, ATA_HORKAGE_NODMA },
- { "ATAPI CD-ROM DRIVE 40X MAXIMUM",NULL,ATA_HORKAGE_NODMA },
- { "_NEC DV5800A", NULL, ATA_HORKAGE_NODMA },
- { "SAMSUNG CD-ROM SN-124", "N001", ATA_HORKAGE_NODMA },
- { "Seagate STT20000A", NULL, ATA_HORKAGE_NODMA },
- { " 2GB ATA Flash Disk", "ADMA428M", ATA_HORKAGE_NODMA },
- { "VRFDFC22048UCHC-TE*", NULL, ATA_HORKAGE_NODMA },
+ { "WDC AC11000H", NULL, ATA_QUIRK_NODMA },
+ { "WDC AC22100H", NULL, ATA_QUIRK_NODMA },
+ { "WDC AC32500H", NULL, ATA_QUIRK_NODMA },
+ { "WDC AC33100H", NULL, ATA_QUIRK_NODMA },
+ { "WDC AC31600H", NULL, ATA_QUIRK_NODMA },
+ { "WDC AC32100H", "24.09P07", ATA_QUIRK_NODMA },
+ { "WDC AC23200L", "21.10N21", ATA_QUIRK_NODMA },
+ { "Compaq CRD-8241B", NULL, ATA_QUIRK_NODMA },
+ { "CRD-8400B", NULL, ATA_QUIRK_NODMA },
+ { "CRD-848[02]B", NULL, ATA_QUIRK_NODMA },
+ { "CRD-84", NULL, ATA_QUIRK_NODMA },
+ { "SanDisk SDP3B", NULL, ATA_QUIRK_NODMA },
+ { "SanDisk SDP3B-64", NULL, ATA_QUIRK_NODMA },
+ { "SANYO CD-ROM CRD", NULL, ATA_QUIRK_NODMA },
+ { "HITACHI CDR-8", NULL, ATA_QUIRK_NODMA },
+ { "HITACHI CDR-8[34]35", NULL, ATA_QUIRK_NODMA },
+ { "Toshiba CD-ROM XM-6202B", NULL, ATA_QUIRK_NODMA },
+ { "TOSHIBA CD-ROM XM-1702BC", NULL, ATA_QUIRK_NODMA },
+ { "CD-532E-A", NULL, ATA_QUIRK_NODMA },
+ { "E-IDE CD-ROM CR-840", NULL, ATA_QUIRK_NODMA },
+ { "CD-ROM Drive/F5A", NULL, ATA_QUIRK_NODMA },
+ { "WPI CDD-820", NULL, ATA_QUIRK_NODMA },
+ { "SAMSUNG CD-ROM SC-148C", NULL, ATA_QUIRK_NODMA },
+ { "SAMSUNG CD-ROM SC", NULL, ATA_QUIRK_NODMA },
+ { "ATAPI CD-ROM DRIVE 40X MAXIMUM", NULL, ATA_QUIRK_NODMA },
+ { "_NEC DV5800A", NULL, ATA_QUIRK_NODMA },
+ { "SAMSUNG CD-ROM SN-124", "N001", ATA_QUIRK_NODMA },
+ { "Seagate STT20000A", NULL, ATA_QUIRK_NODMA },
+ { " 2GB ATA Flash Disk", "ADMA428M", ATA_QUIRK_NODMA },
+ { "VRFDFC22048UCHC-TE*", NULL, ATA_QUIRK_NODMA },
/* Odd clown on sil3726/4726 PMPs */
- { "Config Disk", NULL, ATA_HORKAGE_DISABLE },
+ { "Config Disk", NULL, ATA_QUIRK_DISABLE },
/* Similar story with ASMedia 1092 */
- { "ASMT109x- Config", NULL, ATA_HORKAGE_DISABLE },
+ { "ASMT109x- Config", NULL, ATA_QUIRK_DISABLE },
/* Weird ATAPI devices */
- { "TORiSAN DVD-ROM DRD-N216", NULL, ATA_HORKAGE_MAX_SEC_128 },
- { "QUANTUM DAT DAT72-000", NULL, ATA_HORKAGE_ATAPI_MOD16_DMA },
- { "Slimtype DVD A DS8A8SH", NULL, ATA_HORKAGE_MAX_SEC_LBA48 },
- { "Slimtype DVD A DS8A9SH", NULL, ATA_HORKAGE_MAX_SEC_LBA48 },
+ { "TORiSAN DVD-ROM DRD-N216", NULL, ATA_QUIRK_MAX_SEC_128 },
+ { "QUANTUM DAT DAT72-000", NULL, ATA_QUIRK_ATAPI_MOD16_DMA },
+ { "Slimtype DVD A DS8A8SH", NULL, ATA_QUIRK_MAX_SEC_LBA48 },
+ { "Slimtype DVD A DS8A9SH", NULL, ATA_QUIRK_MAX_SEC_LBA48 },
/*
* Causes silent data corruption with higher max sects.
* http://lkml.kernel.org/g/x49wpy40ysk.fsf@segfault.boston.devel.redhat.com
*/
- { "ST380013AS", "3.20", ATA_HORKAGE_MAX_SEC_1024 },
+ { "ST380013AS", "3.20", ATA_QUIRK_MAX_SEC_1024 },
/*
* These devices time out with higher max sects.
* https://bugzilla.kernel.org/show_bug.cgi?id=121671
*/
- { "LITEON CX1-JB*-HP", NULL, ATA_HORKAGE_MAX_SEC_1024 },
- { "LITEON EP1-*", NULL, ATA_HORKAGE_MAX_SEC_1024 },
+ { "LITEON CX1-JB*-HP", NULL, ATA_QUIRK_MAX_SEC_1024 },
+ { "LITEON EP1-*", NULL, ATA_QUIRK_MAX_SEC_1024 },
/* Devices we expect to fail diagnostics */
/* Devices where NCQ should be avoided */
/* NCQ is slow */
- { "WDC WD740ADFD-00", NULL, ATA_HORKAGE_NONCQ },
- { "WDC WD740ADFD-00NLR1", NULL, ATA_HORKAGE_NONCQ },
+ { "WDC WD740ADFD-00", NULL, ATA_QUIRK_NONCQ },
+ { "WDC WD740ADFD-00NLR1", NULL, ATA_QUIRK_NONCQ },
/* http://thread.gmane.org/gmane.linux.ide/14907 */
- { "FUJITSU MHT2060BH", NULL, ATA_HORKAGE_NONCQ },
+ { "FUJITSU MHT2060BH", NULL, ATA_QUIRK_NONCQ },
/* NCQ is broken */
- { "Maxtor *", "BANC*", ATA_HORKAGE_NONCQ },
- { "Maxtor 7V300F0", "VA111630", ATA_HORKAGE_NONCQ },
- { "ST380817AS", "3.42", ATA_HORKAGE_NONCQ },
- { "ST3160023AS", "3.42", ATA_HORKAGE_NONCQ },
- { "OCZ CORE_SSD", "02.10104", ATA_HORKAGE_NONCQ },
+ { "Maxtor *", "BANC*", ATA_QUIRK_NONCQ },
+ { "Maxtor 7V300F0", "VA111630", ATA_QUIRK_NONCQ },
+ { "ST380817AS", "3.42", ATA_QUIRK_NONCQ },
+ { "ST3160023AS", "3.42", ATA_QUIRK_NONCQ },
+ { "OCZ CORE_SSD", "02.10104", ATA_QUIRK_NONCQ },
/* Seagate NCQ + FLUSH CACHE firmware bug */
- { "ST31500341AS", "SD1[5-9]", ATA_HORKAGE_NONCQ |
- ATA_HORKAGE_FIRMWARE_WARN },
+ { "ST31500341AS", "SD1[5-9]", ATA_QUIRK_NONCQ |
+ ATA_QUIRK_FIRMWARE_WARN },
- { "ST31000333AS", "SD1[5-9]", ATA_HORKAGE_NONCQ |
- ATA_HORKAGE_FIRMWARE_WARN },
+ { "ST31000333AS", "SD1[5-9]", ATA_QUIRK_NONCQ |
+ ATA_QUIRK_FIRMWARE_WARN },
- { "ST3640[36]23AS", "SD1[5-9]", ATA_HORKAGE_NONCQ |
- ATA_HORKAGE_FIRMWARE_WARN },
+ { "ST3640[36]23AS", "SD1[5-9]", ATA_QUIRK_NONCQ |
+ ATA_QUIRK_FIRMWARE_WARN },
- { "ST3320[68]13AS", "SD1[5-9]", ATA_HORKAGE_NONCQ |
- ATA_HORKAGE_FIRMWARE_WARN },
+ { "ST3320[68]13AS", "SD1[5-9]", ATA_QUIRK_NONCQ |
+ ATA_QUIRK_FIRMWARE_WARN },
/* drives which fail FPDMA_AA activation (some may freeze afterwards)
the ST disks also have LPM issues */
- { "ST1000LM024 HN-M101MBB", NULL, ATA_HORKAGE_BROKEN_FPDMA_AA |
- ATA_HORKAGE_NOLPM },
- { "VB0250EAVER", "HPG7", ATA_HORKAGE_BROKEN_FPDMA_AA },
+ { "ST1000LM024 HN-M101MBB", NULL, ATA_QUIRK_BROKEN_FPDMA_AA |
+ ATA_QUIRK_NOLPM },
+ { "VB0250EAVER", "HPG7", ATA_QUIRK_BROKEN_FPDMA_AA },
/* Blacklist entries taken from Silicon Image 3124/3132
Windows driver .inf file - also several Linux problem reports */
- { "HTS541060G9SA00", "MB3OC60D", ATA_HORKAGE_NONCQ },
- { "HTS541080G9SA00", "MB4OC60D", ATA_HORKAGE_NONCQ },
- { "HTS541010G9SA00", "MBZOC60D", ATA_HORKAGE_NONCQ },
+ { "HTS541060G9SA00", "MB3OC60D", ATA_QUIRK_NONCQ },
+ { "HTS541080G9SA00", "MB4OC60D", ATA_QUIRK_NONCQ },
+ { "HTS541010G9SA00", "MBZOC60D", ATA_QUIRK_NONCQ },
/* https://bugzilla.kernel.org/show_bug.cgi?id=15573 */
- { "C300-CTFDDAC128MAG", "0001", ATA_HORKAGE_NONCQ },
+ { "C300-CTFDDAC128MAG", "0001", ATA_QUIRK_NONCQ },
/* Sandisk SD7/8/9s lock up hard on large trims */
- { "SanDisk SD[789]*", NULL, ATA_HORKAGE_MAX_TRIM_128M },
+ { "SanDisk SD[789]*", NULL, ATA_QUIRK_MAX_TRIM_128M },
/* devices which puke on READ_NATIVE_MAX */
- { "HDS724040KLSA80", "KFAOA20N", ATA_HORKAGE_BROKEN_HPA },
- { "WDC WD3200JD-00KLB0", "WD-WCAMR1130137", ATA_HORKAGE_BROKEN_HPA },
- { "WDC WD2500JD-00HBB0", "WD-WMAL71490727", ATA_HORKAGE_BROKEN_HPA },
- { "MAXTOR 6L080L4", "A93.0500", ATA_HORKAGE_BROKEN_HPA },
+ { "HDS724040KLSA80", "KFAOA20N", ATA_QUIRK_BROKEN_HPA },
+ { "WDC WD3200JD-00KLB0", "WD-WCAMR1130137", ATA_QUIRK_BROKEN_HPA },
+ { "WDC WD2500JD-00HBB0", "WD-WMAL71490727", ATA_QUIRK_BROKEN_HPA },
+ { "MAXTOR 6L080L4", "A93.0500", ATA_QUIRK_BROKEN_HPA },
/* this one allows HPA unlocking but fails IOs on the area */
- { "OCZ-VERTEX", "1.30", ATA_HORKAGE_BROKEN_HPA },
+ { "OCZ-VERTEX", "1.30", ATA_QUIRK_BROKEN_HPA },
/* Devices which report 1 sector over size HPA */
- { "ST340823A", NULL, ATA_HORKAGE_HPA_SIZE },
- { "ST320413A", NULL, ATA_HORKAGE_HPA_SIZE },
- { "ST310211A", NULL, ATA_HORKAGE_HPA_SIZE },
+ { "ST340823A", NULL, ATA_QUIRK_HPA_SIZE },
+ { "ST320413A", NULL, ATA_QUIRK_HPA_SIZE },
+ { "ST310211A", NULL, ATA_QUIRK_HPA_SIZE },
/* Devices which get the IVB wrong */
- { "QUANTUM FIREBALLlct10 05", "A03.0900", ATA_HORKAGE_IVB },
- /* Maybe we should just blacklist TSSTcorp... */
- { "TSSTcorp CDDVDW SH-S202[HJN]", "SB0[01]", ATA_HORKAGE_IVB },
+ { "QUANTUM FIREBALLlct10 05", "A03.0900", ATA_QUIRK_IVB },
+ /* Maybe we should just add all TSSTcorp devices... */
+ { "TSSTcorp CDDVDW SH-S202[HJN]", "SB0[01]", ATA_QUIRK_IVB },
/* Devices that do not need bridging limits applied */
- { "MTRON MSP-SATA*", NULL, ATA_HORKAGE_BRIDGE_OK },
- { "BUFFALO HD-QSU2/R5", NULL, ATA_HORKAGE_BRIDGE_OK },
+ { "MTRON MSP-SATA*", NULL, ATA_QUIRK_BRIDGE_OK },
+ { "BUFFALO HD-QSU2/R5", NULL, ATA_QUIRK_BRIDGE_OK },
/* Devices which aren't very happy with higher link speeds */
- { "WD My Book", NULL, ATA_HORKAGE_1_5_GBPS },
- { "Seagate FreeAgent GoFlex", NULL, ATA_HORKAGE_1_5_GBPS },
+ { "WD My Book", NULL, ATA_QUIRK_1_5_GBPS },
+ { "Seagate FreeAgent GoFlex", NULL, ATA_QUIRK_1_5_GBPS },
/*
* Devices which choke on SETXFER. Applies only if both the
* device and controller are SATA.
*/
- { "PIONEER DVD-RW DVRTD08", NULL, ATA_HORKAGE_NOSETXFER },
- { "PIONEER DVD-RW DVRTD08A", NULL, ATA_HORKAGE_NOSETXFER },
- { "PIONEER DVD-RW DVR-215", NULL, ATA_HORKAGE_NOSETXFER },
- { "PIONEER DVD-RW DVR-212D", NULL, ATA_HORKAGE_NOSETXFER },
- { "PIONEER DVD-RW DVR-216D", NULL, ATA_HORKAGE_NOSETXFER },
+ { "PIONEER DVD-RW DVRTD08", NULL, ATA_QUIRK_NOSETXFER },
+ { "PIONEER DVD-RW DVRTD08A", NULL, ATA_QUIRK_NOSETXFER },
+ { "PIONEER DVD-RW DVR-215", NULL, ATA_QUIRK_NOSETXFER },
+ { "PIONEER DVD-RW DVR-212D", NULL, ATA_QUIRK_NOSETXFER },
+ { "PIONEER DVD-RW DVR-216D", NULL, ATA_QUIRK_NOSETXFER },
/* These specific Pioneer models have LPM issues */
- { "PIONEER BD-RW BDR-207M", NULL, ATA_HORKAGE_NOLPM },
- { "PIONEER BD-RW BDR-205", NULL, ATA_HORKAGE_NOLPM },
+ { "PIONEER BD-RW BDR-207M", NULL, ATA_QUIRK_NOLPM },
+ { "PIONEER BD-RW BDR-205", NULL, ATA_QUIRK_NOLPM },
/* Crucial devices with broken LPM support */
- { "CT*0BX*00SSD1", NULL, ATA_HORKAGE_NOLPM },
+ { "CT*0BX*00SSD1", NULL, ATA_QUIRK_NOLPM },
/* 512GB MX100 with MU01 firmware has both queued TRIM and LPM issues */
- { "Crucial_CT512MX100*", "MU01", ATA_HORKAGE_NO_NCQ_TRIM |
- ATA_HORKAGE_ZERO_AFTER_TRIM |
- ATA_HORKAGE_NOLPM },
+ { "Crucial_CT512MX100*", "MU01", ATA_QUIRK_NO_NCQ_TRIM |
+ ATA_QUIRK_ZERO_AFTER_TRIM |
+ ATA_QUIRK_NOLPM },
/* 512GB MX100 with newer firmware has only LPM issues */
- { "Crucial_CT512MX100*", NULL, ATA_HORKAGE_ZERO_AFTER_TRIM |
- ATA_HORKAGE_NOLPM },
+ { "Crucial_CT512MX100*", NULL, ATA_QUIRK_ZERO_AFTER_TRIM |
+ ATA_QUIRK_NOLPM },
/* 480GB+ M500 SSDs have both queued TRIM and LPM issues */
- { "Crucial_CT480M500*", NULL, ATA_HORKAGE_NO_NCQ_TRIM |
- ATA_HORKAGE_ZERO_AFTER_TRIM |
- ATA_HORKAGE_NOLPM },
- { "Crucial_CT960M500*", NULL, ATA_HORKAGE_NO_NCQ_TRIM |
- ATA_HORKAGE_ZERO_AFTER_TRIM |
- ATA_HORKAGE_NOLPM },
+ { "Crucial_CT480M500*", NULL, ATA_QUIRK_NO_NCQ_TRIM |
+ ATA_QUIRK_ZERO_AFTER_TRIM |
+ ATA_QUIRK_NOLPM },
+ { "Crucial_CT960M500*", NULL, ATA_QUIRK_NO_NCQ_TRIM |
+ ATA_QUIRK_ZERO_AFTER_TRIM |
+ ATA_QUIRK_NOLPM },
/* AMD Radeon devices with broken LPM support */
- { "R3SL240G", NULL, ATA_HORKAGE_NOLPM },
+ { "R3SL240G", NULL, ATA_QUIRK_NOLPM },
/* Apacer models with LPM issues */
- { "Apacer AS340*", NULL, ATA_HORKAGE_NOLPM },
+ { "Apacer AS340*", NULL, ATA_QUIRK_NOLPM },
/* These specific Samsung models/firmware-revs do not handle LPM well */
- { "SAMSUNG MZMPC128HBFU-000MV", "CXM14M1Q", ATA_HORKAGE_NOLPM },
- { "SAMSUNG SSD PM830 mSATA *", "CXM13D1Q", ATA_HORKAGE_NOLPM },
- { "SAMSUNG MZ7TD256HAFV-000L9", NULL, ATA_HORKAGE_NOLPM },
- { "SAMSUNG MZ7TE512HMHP-000L1", "EXT06L0Q", ATA_HORKAGE_NOLPM },
+ { "SAMSUNG MZMPC128HBFU-000MV", "CXM14M1Q", ATA_QUIRK_NOLPM },
+ { "SAMSUNG SSD PM830 mSATA *", "CXM13D1Q", ATA_QUIRK_NOLPM },
+ { "SAMSUNG MZ7TD256HAFV-000L9", NULL, ATA_QUIRK_NOLPM },
+ { "SAMSUNG MZ7TE512HMHP-000L1", "EXT06L0Q", ATA_QUIRK_NOLPM },
/* devices that don't properly handle queued TRIM commands */
- { "Micron_M500IT_*", "MU01", ATA_HORKAGE_NO_NCQ_TRIM |
- ATA_HORKAGE_ZERO_AFTER_TRIM },
- { "Micron_M500_*", NULL, ATA_HORKAGE_NO_NCQ_TRIM |
- ATA_HORKAGE_ZERO_AFTER_TRIM },
- { "Micron_M5[15]0_*", "MU01", ATA_HORKAGE_NO_NCQ_TRIM |
- ATA_HORKAGE_ZERO_AFTER_TRIM },
- { "Micron_1100_*", NULL, ATA_HORKAGE_NO_NCQ_TRIM |
- ATA_HORKAGE_ZERO_AFTER_TRIM, },
- { "Crucial_CT*M500*", NULL, ATA_HORKAGE_NO_NCQ_TRIM |
- ATA_HORKAGE_ZERO_AFTER_TRIM },
- { "Crucial_CT*M550*", "MU01", ATA_HORKAGE_NO_NCQ_TRIM |
- ATA_HORKAGE_ZERO_AFTER_TRIM },
- { "Crucial_CT*MX100*", "MU01", ATA_HORKAGE_NO_NCQ_TRIM |
- ATA_HORKAGE_ZERO_AFTER_TRIM },
- { "Samsung SSD 840 EVO*", NULL, ATA_HORKAGE_NO_NCQ_TRIM |
- ATA_HORKAGE_NO_DMA_LOG |
- ATA_HORKAGE_ZERO_AFTER_TRIM },
- { "Samsung SSD 840*", NULL, ATA_HORKAGE_NO_NCQ_TRIM |
- ATA_HORKAGE_ZERO_AFTER_TRIM },
- { "Samsung SSD 850*", NULL, ATA_HORKAGE_NO_NCQ_TRIM |
- ATA_HORKAGE_ZERO_AFTER_TRIM },
- { "Samsung SSD 860*", NULL, ATA_HORKAGE_NO_NCQ_TRIM |
- ATA_HORKAGE_ZERO_AFTER_TRIM |
- ATA_HORKAGE_NO_NCQ_ON_ATI },
- { "Samsung SSD 870*", NULL, ATA_HORKAGE_NO_NCQ_TRIM |
- ATA_HORKAGE_ZERO_AFTER_TRIM |
- ATA_HORKAGE_NO_NCQ_ON_ATI },
- { "SAMSUNG*MZ7LH*", NULL, ATA_HORKAGE_NO_NCQ_TRIM |
- ATA_HORKAGE_ZERO_AFTER_TRIM |
- ATA_HORKAGE_NO_NCQ_ON_ATI, },
- { "FCCT*M500*", NULL, ATA_HORKAGE_NO_NCQ_TRIM |
- ATA_HORKAGE_ZERO_AFTER_TRIM },
+ { "Micron_M500IT_*", "MU01", ATA_QUIRK_NO_NCQ_TRIM |
+ ATA_QUIRK_ZERO_AFTER_TRIM },
+ { "Micron_M500_*", NULL, ATA_QUIRK_NO_NCQ_TRIM |
+ ATA_QUIRK_ZERO_AFTER_TRIM },
+ { "Micron_M5[15]0_*", "MU01", ATA_QUIRK_NO_NCQ_TRIM |
+ ATA_QUIRK_ZERO_AFTER_TRIM },
+ { "Micron_1100_*", NULL, ATA_QUIRK_NO_NCQ_TRIM |
+ ATA_QUIRK_ZERO_AFTER_TRIM, },
+ { "Crucial_CT*M500*", NULL, ATA_QUIRK_NO_NCQ_TRIM |
+ ATA_QUIRK_ZERO_AFTER_TRIM },
+ { "Crucial_CT*M550*", "MU01", ATA_QUIRK_NO_NCQ_TRIM |
+ ATA_QUIRK_ZERO_AFTER_TRIM },
+ { "Crucial_CT*MX100*", "MU01", ATA_QUIRK_NO_NCQ_TRIM |
+ ATA_QUIRK_ZERO_AFTER_TRIM },
+ { "Samsung SSD 840 EVO*", NULL, ATA_QUIRK_NO_NCQ_TRIM |
+ ATA_QUIRK_NO_DMA_LOG |
+ ATA_QUIRK_ZERO_AFTER_TRIM },
+ { "Samsung SSD 840*", NULL, ATA_QUIRK_NO_NCQ_TRIM |
+ ATA_QUIRK_ZERO_AFTER_TRIM },
+ { "Samsung SSD 850*", NULL, ATA_QUIRK_NO_NCQ_TRIM |
+ ATA_QUIRK_ZERO_AFTER_TRIM },
+ { "Samsung SSD 860*", NULL, ATA_QUIRK_NO_NCQ_TRIM |
+ ATA_QUIRK_ZERO_AFTER_TRIM |
+ ATA_QUIRK_NO_NCQ_ON_ATI },
+ { "Samsung SSD 870*", NULL, ATA_QUIRK_NO_NCQ_TRIM |
+ ATA_QUIRK_ZERO_AFTER_TRIM |
+ ATA_QUIRK_NO_NCQ_ON_ATI },
+ { "SAMSUNG*MZ7LH*", NULL, ATA_QUIRK_NO_NCQ_TRIM |
+ ATA_QUIRK_ZERO_AFTER_TRIM |
+ ATA_QUIRK_NO_NCQ_ON_ATI, },
+ { "FCCT*M500*", NULL, ATA_QUIRK_NO_NCQ_TRIM |
+ ATA_QUIRK_ZERO_AFTER_TRIM },
/* devices that don't properly handle TRIM commands */
- { "SuperSSpeed S238*", NULL, ATA_HORKAGE_NOTRIM },
- { "M88V29*", NULL, ATA_HORKAGE_NOTRIM },
+ { "SuperSSpeed S238*", NULL, ATA_QUIRK_NOTRIM },
+ { "M88V29*", NULL, ATA_QUIRK_NOTRIM },
/*
* As defined, the DRAT (Deterministic Read After Trim) and RZAT
@@ -4223,14 +4174,14 @@ static const struct ata_blacklist_entry ata_device_blacklist [] = {
*/
{ "INTEL*SSDSC2MH*", NULL, 0 },
- { "Micron*", NULL, ATA_HORKAGE_ZERO_AFTER_TRIM },
- { "Crucial*", NULL, ATA_HORKAGE_ZERO_AFTER_TRIM },
- { "INTEL*SSD*", NULL, ATA_HORKAGE_ZERO_AFTER_TRIM },
- { "SSD*INTEL*", NULL, ATA_HORKAGE_ZERO_AFTER_TRIM },
- { "Samsung*SSD*", NULL, ATA_HORKAGE_ZERO_AFTER_TRIM },
- { "SAMSUNG*SSD*", NULL, ATA_HORKAGE_ZERO_AFTER_TRIM },
- { "SAMSUNG*MZ7KM*", NULL, ATA_HORKAGE_ZERO_AFTER_TRIM },
- { "ST[1248][0248]0[FH]*", NULL, ATA_HORKAGE_ZERO_AFTER_TRIM },
+ { "Micron*", NULL, ATA_QUIRK_ZERO_AFTER_TRIM },
+ { "Crucial*", NULL, ATA_QUIRK_ZERO_AFTER_TRIM },
+ { "INTEL*SSD*", NULL, ATA_QUIRK_ZERO_AFTER_TRIM },
+ { "SSD*INTEL*", NULL, ATA_QUIRK_ZERO_AFTER_TRIM },
+ { "Samsung*SSD*", NULL, ATA_QUIRK_ZERO_AFTER_TRIM },
+ { "SAMSUNG*SSD*", NULL, ATA_QUIRK_ZERO_AFTER_TRIM },
+ { "SAMSUNG*MZ7KM*", NULL, ATA_QUIRK_ZERO_AFTER_TRIM },
+ { "ST[1248][0248]0[FH]*", NULL, ATA_QUIRK_ZERO_AFTER_TRIM },
/*
* Some WD SATA-I drives spin up and down erratically when the link
@@ -4241,62 +4192,66 @@ static const struct ata_blacklist_entry ata_device_blacklist [] = {
*
* https://bugzilla.kernel.org/show_bug.cgi?id=57211
*/
- { "WDC WD800JD-*", NULL, ATA_HORKAGE_WD_BROKEN_LPM },
- { "WDC WD1200JD-*", NULL, ATA_HORKAGE_WD_BROKEN_LPM },
- { "WDC WD1600JD-*", NULL, ATA_HORKAGE_WD_BROKEN_LPM },
- { "WDC WD2000JD-*", NULL, ATA_HORKAGE_WD_BROKEN_LPM },
- { "WDC WD2500JD-*", NULL, ATA_HORKAGE_WD_BROKEN_LPM },
- { "WDC WD3000JD-*", NULL, ATA_HORKAGE_WD_BROKEN_LPM },
- { "WDC WD3200JD-*", NULL, ATA_HORKAGE_WD_BROKEN_LPM },
+ { "WDC WD800JD-*", NULL, ATA_QUIRK_WD_BROKEN_LPM },
+ { "WDC WD1200JD-*", NULL, ATA_QUIRK_WD_BROKEN_LPM },
+ { "WDC WD1600JD-*", NULL, ATA_QUIRK_WD_BROKEN_LPM },
+ { "WDC WD2000JD-*", NULL, ATA_QUIRK_WD_BROKEN_LPM },
+ { "WDC WD2500JD-*", NULL, ATA_QUIRK_WD_BROKEN_LPM },
+ { "WDC WD3000JD-*", NULL, ATA_QUIRK_WD_BROKEN_LPM },
+ { "WDC WD3200JD-*", NULL, ATA_QUIRK_WD_BROKEN_LPM },
/*
* This sata dom device goes on a walkabout when the ATA_LOG_DIRECTORY
* log page is accessed. Ensure we never ask for this log page with
* these devices.
*/
- { "SATADOM-ML 3ME", NULL, ATA_HORKAGE_NO_LOG_DIR },
+ { "SATADOM-ML 3ME", NULL, ATA_QUIRK_NO_LOG_DIR },
/* Buggy FUA */
- { "Maxtor", "BANC1G10", ATA_HORKAGE_NO_FUA },
- { "WDC*WD2500J*", NULL, ATA_HORKAGE_NO_FUA },
- { "OCZ-VERTEX*", NULL, ATA_HORKAGE_NO_FUA },
- { "INTEL*SSDSC2CT*", NULL, ATA_HORKAGE_NO_FUA },
+ { "Maxtor", "BANC1G10", ATA_QUIRK_NO_FUA },
+ { "WDC*WD2500J*", NULL, ATA_QUIRK_NO_FUA },
+ { "OCZ-VERTEX*", NULL, ATA_QUIRK_NO_FUA },
+ { "INTEL*SSDSC2CT*", NULL, ATA_QUIRK_NO_FUA },
/* End Marker */
{ }
};
-static unsigned long ata_dev_blacklisted(const struct ata_device *dev)
+static unsigned int ata_dev_quirks(const struct ata_device *dev)
{
unsigned char model_num[ATA_ID_PROD_LEN + 1];
unsigned char model_rev[ATA_ID_FW_REV_LEN + 1];
- const struct ata_blacklist_entry *ad = ata_device_blacklist;
+ const struct ata_dev_quirks_entry *ad = __ata_dev_quirks;
+
+ /* dev->quirks is an unsigned int. */
+ BUILD_BUG_ON(__ATA_QUIRK_MAX > 32);
ata_id_c_string(dev->id, model_num, ATA_ID_PROD, sizeof(model_num));
ata_id_c_string(dev->id, model_rev, ATA_ID_FW_REV, sizeof(model_rev));
while (ad->model_num) {
- if (glob_match(ad->model_num, model_num)) {
- if (ad->model_rev == NULL)
- return ad->horkage;
- if (glob_match(ad->model_rev, model_rev))
- return ad->horkage;
+ if (glob_match(ad->model_num, model_num) &&
+ (!ad->model_rev || glob_match(ad->model_rev, model_rev))) {
+ ata_dev_print_quirks(dev, model_num, model_rev,
+ ad->quirks);
+ return ad->quirks;
}
ad++;
}
return 0;
}
-static int ata_dma_blacklisted(const struct ata_device *dev)
+static bool ata_dev_nodma(const struct ata_device *dev)
{
- /* We don't support polling DMA.
- * DMA blacklist those ATAPI devices with CDB-intr (and use PIO)
- * if the LLDD handles only interrupts in the HSM_ST_LAST state.
+ /*
+ * We do not support polling DMA. Deny DMA for those ATAPI devices
+ * with CDB-intr (and use PIO) if the LLDD handles only interrupts in
+ * the HSM_ST_LAST state.
*/
if ((dev->link->ap->flags & ATA_FLAG_PIO_POLLING) &&
(dev->flags & ATA_DFLAG_CDB_INTR))
- return 1;
- return (dev->horkage & ATA_HORKAGE_NODMA) ? 1 : 0;
+ return true;
+ return dev->quirks & ATA_QUIRK_NODMA;
}
/**
@@ -4309,7 +4264,7 @@ static int ata_dma_blacklisted(const struct ata_device *dev)
static int ata_is_40wire(struct ata_device *dev)
{
- if (dev->horkage & ATA_HORKAGE_IVB)
+ if (dev->quirks & ATA_QUIRK_IVB)
return ata_drive_40wire_relaxed(dev->id);
return ata_drive_40wire(dev->id);
}
@@ -4371,8 +4326,7 @@ static int cable_is_40wire(struct ata_port *ap)
*
* Compute supported xfermask of @dev and store it in
* dev->*_mask. This function is responsible for applying all
- * known limits including host controller limits, device
- * blacklist, etc...
+ * known limits including host controller limits, device quirks, etc...
*
* LOCKING:
* None.
@@ -4404,10 +4358,10 @@ static void ata_dev_xfermask(struct ata_device *dev)
xfer_mask &= ~(0x03 << (ATA_SHIFT_MWDMA + 3));
}
- if (ata_dma_blacklisted(dev)) {
+ if (ata_dev_nodma(dev)) {
xfer_mask &= ~(ATA_MASK_MWDMA | ATA_MASK_UDMA);
ata_dev_warn(dev,
- "device is on DMA blacklist, disabling DMA\n");
+ "device does not support DMA, disabling DMA\n");
}
if ((host->flags & ATA_HOST_SIMPLEX) &&
@@ -4588,7 +4542,7 @@ int atapi_check_dma(struct ata_queued_cmd *qc)
/* Don't allow DMA if it isn't multiple of 16 bytes. Quite a
* few ATAPI devices choke on such DMA requests.
*/
- if (!(qc->dev->horkage & ATA_HORKAGE_ATAPI_MOD16_DMA) &&
+ if (!(qc->dev->quirks & ATA_QUIRK_ATAPI_MOD16_DMA) &&
unlikely(qc->nbytes & 15))
return 1;
@@ -4629,12 +4583,6 @@ int ata_std_qc_defer(struct ata_queued_cmd *qc)
}
EXPORT_SYMBOL_GPL(ata_std_qc_defer);
-enum ata_completion_errors ata_noop_qc_prep(struct ata_queued_cmd *qc)
-{
- return AC_ERR_OK;
-}
-EXPORT_SYMBOL_GPL(ata_noop_qc_prep);
-
/**
* ata_sg_init - Associate command with scatter-gather table.
* @qc: Command to be associated
@@ -4762,8 +4710,9 @@ void __ata_qc_complete(struct ata_queued_cmd *qc)
struct ata_port *ap;
struct ata_link *link;
- WARN_ON_ONCE(qc == NULL); /* ata_qc_from_tag _might_ return NULL */
- WARN_ON_ONCE(!(qc->flags & ATA_QCFLAG_ACTIVE));
+ if (WARN_ON_ONCE(!(qc->flags & ATA_QCFLAG_ACTIVE)))
+ return;
+
ap = qc->ap;
link = qc->dev->link;
@@ -4785,9 +4734,10 @@ void __ata_qc_complete(struct ata_queued_cmd *qc)
ap->excl_link == link))
ap->excl_link = NULL;
- /* atapi: mark qc as inactive to prevent the interrupt handler
- * from completing the command twice later, before the error handler
- * is called. (when rc != 0 and atapi request sense is needed)
+ /*
+ * Mark qc as inactive to prevent the port interrupt handler from
+ * completing the command twice later, before the error handler is
+ * called.
*/
qc->flags &= ~ATA_QCFLAG_ACTIVE;
ap->qc_active &= ~(1ULL << qc->tag);
@@ -5021,10 +4971,13 @@ void ata_qc_issue(struct ata_queued_cmd *qc)
return;
}
- trace_ata_qc_prep(qc);
- qc->err_mask |= ap->ops->qc_prep(qc);
- if (unlikely(qc->err_mask))
- goto err;
+ if (ap->ops->qc_prep) {
+ trace_ata_qc_prep(qc);
+ qc->err_mask |= ap->ops->qc_prep(qc);
+ if (unlikely(qc->err_mask))
+ goto err;
+ }
+
trace_ata_qc_issue(qc);
qc->err_mask |= ap->ops->qc_issue(qc);
if (unlikely(qc->err_mask))
@@ -5368,7 +5321,7 @@ void ata_dev_init(struct ata_device *dev)
*/
spin_lock_irqsave(ap->lock, flags);
dev->flags &= ~ATA_DFLAG_INIT_MASK;
- dev->horkage = 0;
+ dev->quirks = 0;
spin_unlock_irqrestore(ap->lock, flags);
memset((void *)dev + ATA_DEVICE_CLEAR_BEGIN, 0,
@@ -5510,7 +5463,6 @@ void ata_port_free(struct ata_port *ap)
kfree(ap->pmp_link);
kfree(ap->slave_link);
- kfree(ap->ncq_sense_buf);
ida_free(&ata_ida, ap->print_id);
kfree(ap);
}
@@ -6038,6 +5990,23 @@ int ata_host_activate(struct ata_host *host, int irq,
EXPORT_SYMBOL_GPL(ata_host_activate);
/**
+ * ata_dev_free_resources - Free a device resources
+ * @dev: Target ATA device
+ *
+ * Free resources allocated to support a device features.
+ *
+ * LOCKING:
+ * Kernel thread context (may sleep).
+ */
+void ata_dev_free_resources(struct ata_device *dev)
+{
+ if (zpodd_dev_enabled(dev))
+ zpodd_exit(dev);
+
+ ata_dev_cleanup_cdl_resources(dev);
+}
+
+/**
* ata_port_detach - Detach ATA port in preparation of device removal
* @ap: ATA port to be detached
*
@@ -6091,19 +6060,15 @@ static void ata_port_detach(struct ata_port *ap)
cancel_delayed_work_sync(&ap->hotplug_task);
cancel_delayed_work_sync(&ap->scsi_rescan_task);
- /* clean up zpodd on port removal */
- ata_for_each_link(link, ap, HOST_FIRST) {
- ata_for_each_dev(dev, link, ALL) {
- if (zpodd_dev_enabled(dev))
- zpodd_exit(dev);
- }
- }
+ /* Delete port multiplier link transport devices */
if (ap->pmp_link) {
int i;
+
for (i = 0; i < SATA_PMP_MAX_PORTS; i++)
ata_tlink_delete(&ap->pmp_link[i]);
}
- /* remove the associated SCSI host */
+
+ /* Remove the associated SCSI host */
scsi_remove_host(ap->scsi_host);
ata_tport_delete(ap);
}
@@ -6299,12 +6264,12 @@ EXPORT_SYMBOL_GPL(ata_platform_remove_one);
{ "no" #name, .lflags_on = (flags) }, \
{ #name, .lflags_off = (flags) }
-#define force_horkage_on(name, flag) \
- { #name, .horkage_on = (flag) }
+#define force_quirk_on(name, flag) \
+ { #name, .quirk_on = (flag) }
-#define force_horkage_onoff(name, flag) \
- { "no" #name, .horkage_on = (flag) }, \
- { #name, .horkage_off = (flag) }
+#define force_quirk_onoff(name, flag) \
+ { "no" #name, .quirk_on = (flag) }, \
+ { #name, .quirk_off = (flag) }
static const struct ata_force_param force_tbl[] __initconst = {
force_cbl(40c, ATA_CBL_PATA40),
@@ -6358,32 +6323,32 @@ static const struct ata_force_param force_tbl[] __initconst = {
force_lflag_on(rstonce, ATA_LFLAG_RST_ONCE),
force_lflag_onoff(dbdelay, ATA_LFLAG_NO_DEBOUNCE_DELAY),
- force_horkage_onoff(ncq, ATA_HORKAGE_NONCQ),
- force_horkage_onoff(ncqtrim, ATA_HORKAGE_NO_NCQ_TRIM),
- force_horkage_onoff(ncqati, ATA_HORKAGE_NO_NCQ_ON_ATI),
+ force_quirk_onoff(ncq, ATA_QUIRK_NONCQ),
+ force_quirk_onoff(ncqtrim, ATA_QUIRK_NO_NCQ_TRIM),
+ force_quirk_onoff(ncqati, ATA_QUIRK_NO_NCQ_ON_ATI),
- force_horkage_onoff(trim, ATA_HORKAGE_NOTRIM),
- force_horkage_on(trim_zero, ATA_HORKAGE_ZERO_AFTER_TRIM),
- force_horkage_on(max_trim_128m, ATA_HORKAGE_MAX_TRIM_128M),
+ force_quirk_onoff(trim, ATA_QUIRK_NOTRIM),
+ force_quirk_on(trim_zero, ATA_QUIRK_ZERO_AFTER_TRIM),
+ force_quirk_on(max_trim_128m, ATA_QUIRK_MAX_TRIM_128M),
- force_horkage_onoff(dma, ATA_HORKAGE_NODMA),
- force_horkage_on(atapi_dmadir, ATA_HORKAGE_ATAPI_DMADIR),
- force_horkage_on(atapi_mod16_dma, ATA_HORKAGE_ATAPI_MOD16_DMA),
+ force_quirk_onoff(dma, ATA_QUIRK_NODMA),
+ force_quirk_on(atapi_dmadir, ATA_QUIRK_ATAPI_DMADIR),
+ force_quirk_on(atapi_mod16_dma, ATA_QUIRK_ATAPI_MOD16_DMA),
- force_horkage_onoff(dmalog, ATA_HORKAGE_NO_DMA_LOG),
- force_horkage_onoff(iddevlog, ATA_HORKAGE_NO_ID_DEV_LOG),
- force_horkage_onoff(logdir, ATA_HORKAGE_NO_LOG_DIR),
+ force_quirk_onoff(dmalog, ATA_QUIRK_NO_DMA_LOG),
+ force_quirk_onoff(iddevlog, ATA_QUIRK_NO_ID_DEV_LOG),
+ force_quirk_onoff(logdir, ATA_QUIRK_NO_LOG_DIR),
- force_horkage_on(max_sec_128, ATA_HORKAGE_MAX_SEC_128),
- force_horkage_on(max_sec_1024, ATA_HORKAGE_MAX_SEC_1024),
- force_horkage_on(max_sec_lba48, ATA_HORKAGE_MAX_SEC_LBA48),
+ force_quirk_on(max_sec_128, ATA_QUIRK_MAX_SEC_128),
+ force_quirk_on(max_sec_1024, ATA_QUIRK_MAX_SEC_1024),
+ force_quirk_on(max_sec_lba48, ATA_QUIRK_MAX_SEC_LBA48),
- force_horkage_onoff(lpm, ATA_HORKAGE_NOLPM),
- force_horkage_onoff(setxfer, ATA_HORKAGE_NOSETXFER),
- force_horkage_on(dump_id, ATA_HORKAGE_DUMP_ID),
- force_horkage_onoff(fua, ATA_HORKAGE_NO_FUA),
+ force_quirk_onoff(lpm, ATA_QUIRK_NOLPM),
+ force_quirk_onoff(setxfer, ATA_QUIRK_NOSETXFER),
+ force_quirk_on(dump_id, ATA_QUIRK_DUMP_ID),
+ force_quirk_onoff(fua, ATA_QUIRK_NO_FUA),
- force_horkage_on(disable, ATA_HORKAGE_DISABLE),
+ force_quirk_on(disable, ATA_QUIRK_DISABLE),
};
static int __init ata_parse_force_one(char **cur,
@@ -6659,7 +6624,6 @@ static void ata_dummy_error_handler(struct ata_port *ap)
}
struct ata_port_operations ata_dummy_port_ops = {
- .qc_prep = ata_noop_qc_prep,
.qc_issue = ata_dummy_qc_issue,
.error_handler = ata_dummy_error_handler,
.sched_eh = ata_std_sched_eh,
diff --git a/drivers/ata/libata-eh.c b/drivers/ata/libata-eh.c
index 214b935c2ced..3f0144e7dc80 100644
--- a/drivers/ata/libata-eh.c
+++ b/drivers/ata/libata-eh.c
@@ -500,10 +500,13 @@ static void ata_eh_dev_disable(struct ata_device *dev)
ata_down_xfermask_limit(dev, ATA_DNXFER_FORCE_PIO0 | ATA_DNXFER_QUIET);
dev->class++;
- /* From now till the next successful probe, ering is used to
+ /*
+ * From now till the next successful probe, ering is used to
* track probe failures. Clear accumulated device error info.
*/
ata_ering_clear(&dev->ering);
+
+ ata_dev_free_resources(dev);
}
static void ata_eh_unload(struct ata_port *ap)
@@ -630,6 +633,14 @@ void ata_scsi_cmd_error_handler(struct Scsi_Host *host, struct ata_port *ap,
list_for_each_entry_safe(scmd, tmp, eh_work_q, eh_entry) {
struct ata_queued_cmd *qc;
+ /*
+ * If the scmd was added to EH, via ata_qc_schedule_eh() ->
+ * scsi_timeout() -> scsi_eh_scmd_add(), scsi_timeout() will
+ * have set DID_TIME_OUT (since libata does not have an abort
+ * handler). Thus, to clear DID_TIME_OUT, clear the host byte.
+ */
+ set_host_byte(scmd, DID_OK);
+
ata_qc_for_each_raw(ap, qc, i) {
if (qc->flags & ATA_QCFLAG_ACTIVE &&
qc->scsicmd == scmd)
@@ -1402,6 +1413,43 @@ unsigned int atapi_eh_tur(struct ata_device *dev, u8 *r_sense_key)
}
/**
+ * ata_eh_decide_disposition - Disposition a qc based on sense data
+ * @qc: qc to examine
+ *
+ * For a regular SCSI command, the SCSI completion callback (scsi_done())
+ * will call scsi_complete(), which will call scsi_decide_disposition(),
+ * which will call scsi_check_sense(). scsi_complete() finally calls
+ * scsi_finish_command(). This is fine for SCSI, since any eventual sense
+ * data is usually returned in the completion itself (without invoking SCSI
+ * EH). However, for a QC, we always need to fetch the sense data
+ * explicitly using SCSI EH.
+ *
+ * A command that is completed via SCSI EH will instead be completed using
+ * scsi_eh_flush_done_q(), which will call scsi_finish_command() directly
+ * (without ever calling scsi_check_sense()).
+ *
+ * For a command that went through SCSI EH, it is the responsibility of the
+ * SCSI EH strategy handler to call scsi_decide_disposition(), see e.g. how
+ * scsi_eh_get_sense() calls scsi_decide_disposition() for SCSI LLDDs that
+ * do not get the sense data as part of the completion.
+ *
+ * Thus, for QC commands that went via SCSI EH, we need to call
+ * scsi_check_sense() ourselves, similar to how scsi_eh_get_sense() calls
+ * scsi_decide_disposition(), which calls scsi_check_sense(), in order to
+ * set the correct SCSI ML byte (if any).
+ *
+ * LOCKING:
+ * EH context.
+ *
+ * RETURNS:
+ * SUCCESS or FAILED or NEEDS_RETRY or ADD_TO_MLQUEUE
+ */
+enum scsi_disposition ata_eh_decide_disposition(struct ata_queued_cmd *qc)
+{
+ return scsi_check_sense(qc->scsicmd);
+}
+
+/**
* ata_eh_request_sense - perform REQUEST_SENSE_DATA_EXT
* @qc: qc to perform REQUEST_SENSE_SENSE_DATA_EXT to
*
@@ -1627,7 +1675,8 @@ static unsigned int ata_eh_analyze_tf(struct ata_queued_cmd *qc)
}
if (qc->flags & ATA_QCFLAG_SENSE_VALID) {
- enum scsi_disposition ret = scsi_check_sense(qc->scsicmd);
+ enum scsi_disposition ret = ata_eh_decide_disposition(qc);
+
/*
* SUCCESS here means that the sense code could be
* evaluated and should be passed to the upper layers
@@ -1924,7 +1973,7 @@ static inline bool ata_eh_quiet(struct ata_queued_cmd *qc)
return qc->flags & ATA_QCFLAG_QUIET;
}
-static int ata_eh_read_sense_success_non_ncq(struct ata_link *link)
+static int ata_eh_get_non_ncq_success_sense(struct ata_link *link)
{
struct ata_port *ap = link->ap;
struct ata_queued_cmd *qc;
@@ -1942,11 +1991,10 @@ static int ata_eh_read_sense_success_non_ncq(struct ata_link *link)
return -EIO;
/*
- * If we have sense data, call scsi_check_sense() in order to set the
- * correct SCSI ML byte (if any). No point in checking the return value,
- * since the command has already completed successfully.
+ * No point in checking the return value, since the command has already
+ * completed successfully.
*/
- scsi_check_sense(qc->scsicmd);
+ ata_eh_decide_disposition(qc);
return 0;
}
@@ -1976,9 +2024,9 @@ static void ata_eh_get_success_sense(struct ata_link *link)
* request sense ext command to retrieve the sense data.
*/
if (link->sactive)
- ret = ata_eh_read_sense_success_ncq_log(link);
+ ret = ata_eh_get_ncq_success_sense(link);
else
- ret = ata_eh_read_sense_success_non_ncq(link);
+ ret = ata_eh_get_non_ncq_success_sense(link);
if (ret)
goto out;
@@ -3247,7 +3295,7 @@ static int atapi_eh_clear_ua(struct ata_device *dev)
int i;
for (i = 0; i < ATA_EH_UA_TRIES; i++) {
- u8 *sense_buffer = dev->link->ap->sector_buf;
+ u8 *sense_buffer = dev->sector_buf;
u8 sense_key = 0;
unsigned int err_mask;
diff --git a/drivers/ata/libata-pmp.c b/drivers/ata/libata-pmp.c
index e2e9cbd405fa..d5d189328ae6 100644
--- a/drivers/ata/libata-pmp.c
+++ b/drivers/ata/libata-pmp.c
@@ -648,8 +648,7 @@ static int sata_pmp_same_pmp(struct ata_device *dev, const u32 *new_gscr)
static int sata_pmp_revalidate(struct ata_device *dev, unsigned int new_class)
{
struct ata_link *link = dev->link;
- struct ata_port *ap = link->ap;
- u32 *gscr = (void *)ap->sector_buf;
+ u32 *gscr = (void *)dev->sector_buf;
int rc;
ata_eh_about_to_do(link, NULL, ATA_EH_REVALIDATE);
diff --git a/drivers/ata/libata-sata.c b/drivers/ata/libata-sata.c
index 48660d445602..c8b119a06bb2 100644
--- a/drivers/ata/libata-sata.c
+++ b/drivers/ata/libata-sata.c
@@ -518,6 +518,86 @@ int sata_set_spd(struct ata_link *link)
EXPORT_SYMBOL_GPL(sata_set_spd);
/**
+ * sata_down_spd_limit - adjust SATA spd limit downward
+ * @link: Link to adjust SATA spd limit for
+ * @spd_limit: Additional limit
+ *
+ * Adjust SATA spd limit of @link downward. Note that this
+ * function only adjusts the limit. The change must be applied
+ * using sata_set_spd().
+ *
+ * If @spd_limit is non-zero, the speed is limited to equal to or
+ * lower than @spd_limit if such speed is supported. If
+ * @spd_limit is slower than any supported speed, only the lowest
+ * supported speed is allowed.
+ *
+ * LOCKING:
+ * Inherited from caller.
+ *
+ * RETURNS:
+ * 0 on success, negative errno on failure
+ */
+int sata_down_spd_limit(struct ata_link *link, u32 spd_limit)
+{
+ u32 sstatus, spd, mask;
+ int rc, bit;
+
+ if (!sata_scr_valid(link))
+ return -EOPNOTSUPP;
+
+ /* If SCR can be read, use it to determine the current SPD.
+ * If not, use cached value in link->sata_spd.
+ */
+ rc = sata_scr_read(link, SCR_STATUS, &sstatus);
+ if (rc == 0 && ata_sstatus_online(sstatus))
+ spd = (sstatus >> 4) & 0xf;
+ else
+ spd = link->sata_spd;
+
+ mask = link->sata_spd_limit;
+ if (mask <= 1)
+ return -EINVAL;
+
+ /* unconditionally mask off the highest bit */
+ bit = fls(mask) - 1;
+ mask &= ~(1 << bit);
+
+ /*
+ * Mask off all speeds higher than or equal to the current one. At
+ * this point, if current SPD is not available and we previously
+ * recorded the link speed from SStatus, the driver has already
+ * masked off the highest bit so mask should already be 1 or 0.
+ * Otherwise, we should not force 1.5Gbps on a link where we have
+ * not previously recorded speed from SStatus. Just return in this
+ * case.
+ */
+ if (spd > 1)
+ mask &= (1 << (spd - 1)) - 1;
+ else if (link->sata_spd)
+ return -EINVAL;
+
+ /* were we already at the bottom? */
+ if (!mask)
+ return -EINVAL;
+
+ if (spd_limit) {
+ if (mask & ((1 << spd_limit) - 1))
+ mask &= (1 << spd_limit) - 1;
+ else {
+ bit = ffs(mask) - 1;
+ mask = 1 << bit;
+ }
+ }
+
+ link->sata_spd_limit = mask;
+
+ ata_link_warn(link, "limiting SATA link speed to %s\n",
+ sata_spd_string(fls(mask)));
+
+ return 0;
+}
+
+/**
* sata_link_hardreset - reset link via SATA phy reset
* @link: link to reset
* @timing: timing parameters { interval, duration, timeout } in msec
@@ -627,6 +707,34 @@ int sata_link_hardreset(struct ata_link *link, const unsigned int *timing,
EXPORT_SYMBOL_GPL(sata_link_hardreset);
/**
+ * sata_std_hardreset - COMRESET w/o waiting or classification
+ * @link: link to reset
+ * @class: resulting class of attached device
+ * @deadline: deadline jiffies for the operation
+ *
+ * Standard SATA COMRESET w/o waiting or classification.
+ *
+ * LOCKING:
+ * Kernel thread context (may sleep)
+ *
+ * RETURNS:
+ * 0 if link offline, -EAGAIN if link online, -errno on errors.
+ */
+int sata_std_hardreset(struct ata_link *link, unsigned int *class,
+ unsigned long deadline)
+{
+ const unsigned int *timing = sata_ehc_deb_timing(&link->eh_context);
+ bool online;
+ int rc;
+
+ rc = sata_link_hardreset(link, timing, deadline, &online, NULL);
+ if (online)
+ return -EAGAIN;
+ return rc;
+}
+EXPORT_SYMBOL_GPL(sata_std_hardreset);
+
+/**
* ata_qc_complete_multiple - Complete multiple qcs successfully
* @ap: port in question
* @qc_active: new qc_active mask
@@ -818,7 +926,7 @@ static ssize_t ata_scsi_lpm_store(struct device *device,
ata_for_each_link(link, ap, EDGE) {
ata_for_each_dev(dev, &ap->link, ENABLED) {
- if (dev->horkage & ATA_HORKAGE_NOLPM) {
+ if (dev->quirks & ATA_QUIRK_NOLPM) {
count = -EOPNOTSUPP;
goto out_unlock;
}
@@ -1340,7 +1448,7 @@ EXPORT_SYMBOL_GPL(sata_async_notification);
static int ata_eh_read_log_10h(struct ata_device *dev,
int *tag, struct ata_taskfile *tf)
{
- u8 *buf = dev->link->ap->sector_buf;
+ u8 *buf = dev->sector_buf;
unsigned int err_mask;
u8 csum;
int i;
@@ -1379,8 +1487,8 @@ static int ata_eh_read_log_10h(struct ata_device *dev,
}
/**
- * ata_eh_read_sense_success_ncq_log - Read the sense data for successful
- * NCQ commands log
+ * ata_eh_get_ncq_success_sense - Read and process the sense data for
+ * successful NCQ commands log page
* @link: ATA link to get sense data for
*
* Read the sense data for successful NCQ commands log page to obtain
@@ -1393,11 +1501,11 @@ static int ata_eh_read_log_10h(struct ata_device *dev,
* RETURNS:
* 0 on success, -errno otherwise.
*/
-int ata_eh_read_sense_success_ncq_log(struct ata_link *link)
+int ata_eh_get_ncq_success_sense(struct ata_link *link)
{
struct ata_device *dev = link->device;
struct ata_port *ap = dev->link->ap;
- u8 *buf = ap->ncq_sense_buf;
+ u8 *buf = dev->cdl->ncq_sense_log_buf;
struct ata_queued_cmd *qc;
unsigned int err_mask, tag;
u8 *sense, sk = 0, asc = 0, ascq = 0;
@@ -1455,17 +1563,14 @@ int ata_eh_read_sense_success_ncq_log(struct ata_link *link)
qc->flags |= ATA_QCFLAG_SENSE_VALID;
/*
- * If we have sense data, call scsi_check_sense() in order to
- * set the correct SCSI ML byte (if any). No point in checking
- * the return value, since the command has already completed
- * successfully.
+ * No point in checking the return value, since the command has
+ * already completed successfully.
*/
- scsi_check_sense(qc->scsicmd);
+ ata_eh_decide_disposition(qc);
}
return ret;
}
-EXPORT_SYMBOL_GPL(ata_eh_read_sense_success_ncq_log);
/**
* ata_eh_analyze_ncq_error - analyze NCQ error
@@ -1576,3 +1681,11 @@ void ata_eh_analyze_ncq_error(struct ata_link *link)
ehc->i.err_mask &= ~AC_ERR_DEV;
}
EXPORT_SYMBOL_GPL(ata_eh_analyze_ncq_error);
+
+const struct ata_port_operations sata_port_ops = {
+ .inherits = &ata_base_port_ops,
+
+ .qc_defer = ata_std_qc_defer,
+ .hardreset = sata_std_hardreset,
+};
+EXPORT_SYMBOL_GPL(sata_port_ops);
diff --git a/drivers/ata/libata-scsi.c b/drivers/ata/libata-scsi.c
index 473e00a58a8b..a4aedf7e1775 100644
--- a/drivers/ata/libata-scsi.c
+++ b/drivers/ata/libata-scsi.c
@@ -1691,9 +1691,6 @@ static void ata_scsi_qc_complete(struct ata_queued_cmd *qc)
set_status_byte(qc->scsicmd, SAM_STAT_CHECK_CONDITION);
} else if (is_error && !have_sense) {
ata_gen_ata_sense(qc);
- } else {
- /* Keep the SCSI ML and status byte, clear host byte. */
- cmd->result &= 0x0000ffff;
}
ata_qc_done(qc);
@@ -2094,7 +2091,7 @@ static unsigned int ata_scsiop_inq_b0(struct ata_scsi_args *args, u8 *rbuf)
if (ata_id_has_trim(args->id)) {
u64 max_blocks = 65535 * ATA_MAX_TRIM_RNUM;
- if (dev->horkage & ATA_HORKAGE_MAX_TRIM_128M)
+ if (dev->quirks & ATA_QUIRK_MAX_TRIM_128M)
max_blocks = 128 << (20 - SECTOR_SHIFT);
put_unaligned_be64(max_blocks, &rbuf[36]);
@@ -2259,10 +2256,15 @@ static inline u16 ata_xlat_cdl_limit(u8 *buf)
static unsigned int ata_msense_control_spgt2(struct ata_device *dev, u8 *buf,
u8 spg)
{
- u8 *b, *cdl = dev->cdl, *desc;
+ u8 *b, *cdl, *desc;
u32 policy;
int i;
+ if (!(dev->flags & ATA_DFLAG_CDL) || !dev->cdl)
+ return 0;
+
+ cdl = dev->cdl->desc_log_buf;
+
/*
* Fill the subpage. The first four bytes of the T2A/T2B mode pages
* are a header. The PAGE LENGTH field is the size of the page
@@ -2359,7 +2361,7 @@ static unsigned int ata_msense_control(struct ata_device *dev, u8 *buf,
case ALL_SUB_MPAGES:
n = ata_msense_control_spg0(dev, buf, changeable);
n += ata_msense_control_spgt2(dev, buf + n, CDL_T2A_SUB_MPAGE);
- n += ata_msense_control_spgt2(dev, buf + n, CDL_T2A_SUB_MPAGE);
+ n += ata_msense_control_spgt2(dev, buf + n, CDL_T2B_SUB_MPAGE);
n += ata_msense_control_ata_feature(dev, buf + n);
return n;
default:
@@ -2572,11 +2574,11 @@ static unsigned int ata_scsiop_read_cap(struct ata_scsi_args *args, u8 *rbuf)
rbuf[15] = lowest_aligned;
if (ata_id_has_trim(args->id) &&
- !(dev->horkage & ATA_HORKAGE_NOTRIM)) {
+ !(dev->quirks & ATA_QUIRK_NOTRIM)) {
rbuf[14] |= 0x80; /* LBPME */
if (ata_id_has_zero_after_trim(args->id) &&
- dev->horkage & ATA_HORKAGE_ZERO_AFTER_TRIM) {
+ dev->quirks & ATA_QUIRK_ZERO_AFTER_TRIM) {
ata_dev_info(dev, "Enabling discard_zeroes_data\n");
rbuf[14] |= 0x40; /* LBPRZ */
}
@@ -3240,8 +3242,7 @@ static unsigned int ata_scsi_write_same_xlat(struct ata_queued_cmd *qc)
}
scsi_16_lba_len(cdb, &block, &n_block);
- if (!unmap ||
- (dev->horkage & ATA_HORKAGE_NOTRIM) ||
+ if (!unmap || (dev->quirks & ATA_QUIRK_NOTRIM) ||
!ata_id_has_trim(dev->id)) {
fp = 1;
bp = 3;
@@ -4617,16 +4618,15 @@ static void ata_scsi_handle_link_detach(struct ata_link *link)
ata_for_each_dev(dev, link, ALL) {
unsigned long flags;
- if (!(dev->flags & ATA_DFLAG_DETACHED))
+ spin_lock_irqsave(ap->lock, flags);
+ if (!(dev->flags & ATA_DFLAG_DETACHED)) {
+ spin_unlock_irqrestore(ap->lock, flags);
continue;
+ }
- spin_lock_irqsave(ap->lock, flags);
dev->flags &= ~ATA_DFLAG_DETACHED;
spin_unlock_irqrestore(ap->lock, flags);
- if (zpodd_dev_enabled(dev))
- zpodd_exit(dev);
-
ata_scsi_remove_dev(dev);
}
}
diff --git a/drivers/ata/libata-sff.c b/drivers/ata/libata-sff.c
index 250f7dae05fd..67f277e1c3bf 100644
--- a/drivers/ata/libata-sff.c
+++ b/drivers/ata/libata-sff.c
@@ -26,7 +26,6 @@ static struct workqueue_struct *ata_sff_wq;
const struct ata_port_operations ata_sff_port_ops = {
.inherits = &ata_base_port_ops,
- .qc_prep = ata_noop_qc_prep,
.qc_issue = ata_sff_qc_issue,
.qc_fill_rtf = ata_sff_qc_fill_rtf,
@@ -970,7 +969,7 @@ fsm_start:
* We ignore ERR here to workaround and proceed sending
* the CDB.
*/
- if (!(qc->dev->horkage & ATA_HORKAGE_STUCK_ERR)) {
+ if (!(qc->dev->quirks & ATA_QUIRK_STUCK_ERR)) {
ata_ehi_push_desc(ehi, "ST_FIRST: "
"DRQ=1 with device error, "
"dev_stat 0x%X", status);
@@ -1045,8 +1044,8 @@ fsm_start:
* IDENTIFY, it's likely a phantom
* device. Mark hint.
*/
- if (qc->dev->horkage &
- ATA_HORKAGE_DIAGNOSTIC)
+ if (qc->dev->quirks &
+ ATA_QUIRK_DIAGNOSTIC)
qc->err_mask |=
AC_ERR_NODEV_HINT;
} else {
@@ -1762,7 +1761,7 @@ unsigned int ata_sff_dev_classify(struct ata_device *dev, int present,
/* see if device passed diags: continue and warn later */
if (err == 0)
/* diagnostic fail : do nothing _YET_ */
- dev->horkage |= ATA_HORKAGE_DIAGNOSTIC;
+ dev->quirks |= ATA_QUIRK_DIAGNOSTIC;
else if (err == 1)
/* do nothing */ ;
else if ((dev->devno == 0) && (err == 0x81))
@@ -1781,7 +1780,7 @@ unsigned int ata_sff_dev_classify(struct ata_device *dev, int present,
* device signature is invalid with diagnostic
* failure.
*/
- if (present && (dev->horkage & ATA_HORKAGE_DIAGNOSTIC))
+ if (present && (dev->quirks & ATA_QUIRK_DIAGNOSTIC))
class = ATA_DEV_ATA;
else
class = ATA_DEV_NONE;
diff --git a/drivers/ata/libata-transport.c b/drivers/ata/libata-transport.c
index 9e24c33388f9..e898be49df6b 100644
--- a/drivers/ata/libata-transport.c
+++ b/drivers/ata/libata-transport.c
@@ -80,12 +80,6 @@ struct ata_internal {
#define transport_class_to_port(dev) \
tdev_to_port((dev)->parent)
-
-/* Device objects are always created whit link objects */
-static int ata_tdev_add(struct ata_device *dev);
-static void ata_tdev_delete(struct ata_device *dev);
-
-
/*
* Hack to allow attributes of the same name in different objects.
*/
@@ -365,135 +359,6 @@ unsigned int ata_port_classify(struct ata_port *ap,
EXPORT_SYMBOL_GPL(ata_port_classify);
/*
- * ATA link attributes
- */
-static int noop(int x) { return x; }
-
-#define ata_link_show_linkspeed(field, format) \
-static ssize_t \
-show_ata_link_##field(struct device *dev, \
- struct device_attribute *attr, char *buf) \
-{ \
- struct ata_link *link = transport_class_to_link(dev); \
- \
- return sprintf(buf, "%s\n", sata_spd_string(format(link->field))); \
-}
-
-#define ata_link_linkspeed_attr(field, format) \
- ata_link_show_linkspeed(field, format) \
-static DEVICE_ATTR(field, S_IRUGO, show_ata_link_##field, NULL)
-
-ata_link_linkspeed_attr(hw_sata_spd_limit, fls);
-ata_link_linkspeed_attr(sata_spd_limit, fls);
-ata_link_linkspeed_attr(sata_spd, noop);
-
-
-static DECLARE_TRANSPORT_CLASS(ata_link_class,
- "ata_link", NULL, NULL, NULL);
-
-static void ata_tlink_release(struct device *dev)
-{
-}
-
-/**
- * ata_is_link -- check if a struct device represents a ATA link
- * @dev: device to check
- *
- * Returns:
- * %1 if the device represents a ATA link, %0 else
- */
-static int ata_is_link(const struct device *dev)
-{
- return dev->release == ata_tlink_release;
-}
-
-static int ata_tlink_match(struct attribute_container *cont,
- struct device *dev)
-{
- struct ata_internal* i = to_ata_internal(ata_scsi_transport_template);
- if (!ata_is_link(dev))
- return 0;
- return &i->link_attr_cont.ac == cont;
-}
-
-/**
- * ata_tlink_delete -- remove ATA LINK
- * @link: ATA LINK to remove
- *
- * Removes the specified ATA LINK. remove associated ATA device(s) as well.
- */
-void ata_tlink_delete(struct ata_link *link)
-{
- struct device *dev = &link->tdev;
- struct ata_device *ata_dev;
-
- ata_for_each_dev(ata_dev, link, ALL) {
- ata_tdev_delete(ata_dev);
- }
-
- transport_remove_device(dev);
- device_del(dev);
- transport_destroy_device(dev);
- put_device(dev);
-}
-
-/**
- * ata_tlink_add -- initialize a transport ATA link structure
- * @link: allocated ata_link structure.
- *
- * Initialize an ATA LINK structure for sysfs. It will be added in the
- * device tree below the ATA PORT it belongs to.
- *
- * Returns %0 on success
- */
-int ata_tlink_add(struct ata_link *link)
-{
- struct device *dev = &link->tdev;
- struct ata_port *ap = link->ap;
- struct ata_device *ata_dev;
- int error;
-
- device_initialize(dev);
- dev->parent = &ap->tdev;
- dev->release = ata_tlink_release;
- if (ata_is_host_link(link))
- dev_set_name(dev, "link%d", ap->print_id);
- else
- dev_set_name(dev, "link%d.%d", ap->print_id, link->pmp);
-
- transport_setup_device(dev);
-
- error = device_add(dev);
- if (error) {
- goto tlink_err;
- }
-
- error = transport_add_device(dev);
- if (error)
- goto tlink_transport_err;
- transport_configure_device(dev);
-
- ata_for_each_dev(ata_dev, link, ALL) {
- error = ata_tdev_add(ata_dev);
- if (error) {
- goto tlink_dev_err;
- }
- }
- return 0;
- tlink_dev_err:
- while (--ata_dev >= link->device) {
- ata_tdev_delete(ata_dev);
- }
- transport_remove_device(dev);
- tlink_transport_err:
- device_del(dev);
- tlink_err:
- transport_destroy_device(dev);
- put_device(dev);
- return error;
-}
-
-/*
* ATA device attributes
*/
@@ -617,10 +482,10 @@ show_ata_dev_trim(struct device *dev,
if (!ata_id_has_trim(ata_dev->id))
mode = "unsupported";
- else if (ata_dev->horkage & ATA_HORKAGE_NOTRIM)
+ else if (ata_dev->quirks & ATA_QUIRK_NOTRIM)
mode = "forced_unsupported";
- else if (ata_dev->horkage & ATA_HORKAGE_NO_NCQ_TRIM)
- mode = "forced_unqueued";
+ else if (ata_dev->quirks & ATA_QUIRK_NO_NCQ_TRIM)
+ mode = "forced_unqueued";
else if (ata_fpdma_dsm_supported(ata_dev))
mode = "queued";
else
@@ -643,9 +508,9 @@ static void ata_tdev_release(struct device *dev)
* @dev: device to check
*
* Returns:
- * %1 if the device represents a ATA device, %0 else
+ * true if the device represents a ATA device, false otherwise
*/
-static int ata_is_ata_dev(const struct device *dev)
+static bool ata_is_ata_dev(const struct device *dev)
{
return dev->release == ata_tdev_release;
}
@@ -653,21 +518,22 @@ static int ata_is_ata_dev(const struct device *dev)
static int ata_tdev_match(struct attribute_container *cont,
struct device *dev)
{
- struct ata_internal* i = to_ata_internal(ata_scsi_transport_template);
+ struct ata_internal *i = to_ata_internal(ata_scsi_transport_template);
+
if (!ata_is_ata_dev(dev))
return 0;
return &i->dev_attr_cont.ac == cont;
}
/**
- * ata_tdev_free -- free a ATA LINK
- * @dev: ATA PHY to free
+ * ata_tdev_free -- free an ATA transport device
+ * @dev: struct ata_device owning the transport device to free
*
- * Frees the specified ATA PHY.
+ * Free the ATA transport device for the specified ATA device.
*
* Note:
- * This function must only be called on a PHY that has not
- * successfully been added using ata_tdev_add().
+ * This function must only be called for a ATA transport device that has not
+ * yet successfully been added using ata_tdev_add().
*/
static void ata_tdev_free(struct ata_device *dev)
{
@@ -676,10 +542,10 @@ static void ata_tdev_free(struct ata_device *dev)
}
/**
- * ata_tdev_delete -- remove ATA device
- * @ata_dev: ATA device to remove
+ * ata_tdev_delete -- remove an ATA transport device
+ * @ata_dev: struct ata_device owning the transport device to delete
*
- * Removes the specified ATA device.
+ * Removes the ATA transport device for the specified ATA device.
*/
static void ata_tdev_delete(struct ata_device *ata_dev)
{
@@ -690,15 +556,14 @@ static void ata_tdev_delete(struct ata_device *ata_dev)
ata_tdev_free(ata_dev);
}
-
/**
- * ata_tdev_add -- initialize a transport ATA device structure.
- * @ata_dev: ata_dev structure.
+ * ata_tdev_add -- initialize an ATA transport device
+ * @ata_dev: struct ata_device owning the transport device to add
*
- * Initialize an ATA device structure for sysfs. It will be added in the
- * device tree below the ATA LINK device it belongs to.
+ * Initialize an ATA transport device for sysfs. It will be added in the
+ * device tree below the ATA link device it belongs to.
*
- * Returns %0 on success
+ * Returns %0 on success and a negative error code on error.
*/
static int ata_tdev_add(struct ata_device *ata_dev)
{
@@ -734,6 +599,136 @@ static int ata_tdev_add(struct ata_device *ata_dev)
return 0;
}
+/*
+ * ATA link attributes
+ */
+static int noop(int x)
+{
+ return x;
+}
+
+#define ata_link_show_linkspeed(field, format) \
+static ssize_t \
+show_ata_link_##field(struct device *dev, \
+ struct device_attribute *attr, char *buf) \
+{ \
+ struct ata_link *link = transport_class_to_link(dev); \
+ \
+ return sprintf(buf, "%s\n", \
+ sata_spd_string(format(link->field))); \
+}
+
+#define ata_link_linkspeed_attr(field, format) \
+ ata_link_show_linkspeed(field, format) \
+static DEVICE_ATTR(field, 0444, show_ata_link_##field, NULL)
+
+ata_link_linkspeed_attr(hw_sata_spd_limit, fls);
+ata_link_linkspeed_attr(sata_spd_limit, fls);
+ata_link_linkspeed_attr(sata_spd, noop);
+
+static DECLARE_TRANSPORT_CLASS(ata_link_class,
+ "ata_link", NULL, NULL, NULL);
+
+static void ata_tlink_release(struct device *dev)
+{
+}
+
+/**
+ * ata_is_link -- check if a struct device represents a ATA link
+ * @dev: device to check
+ *
+ * Returns:
+ * true if the device represents a ATA link, false otherwise
+ */
+static bool ata_is_link(const struct device *dev)
+{
+ return dev->release == ata_tlink_release;
+}
+
+static int ata_tlink_match(struct attribute_container *cont,
+ struct device *dev)
+{
+ struct ata_internal *i = to_ata_internal(ata_scsi_transport_template);
+
+ if (!ata_is_link(dev))
+ return 0;
+ return &i->link_attr_cont.ac == cont;
+}
+
+/**
+ * ata_tlink_delete -- remove an ATA link transport device
+ * @link: struct ata_link owning the link transport device to remove
+ *
+ * Removes the link transport device of the specified ATA link. This also
+ * removes the ATA device(s) associated with the link as well.
+ */
+void ata_tlink_delete(struct ata_link *link)
+{
+ struct device *dev = &link->tdev;
+ struct ata_device *ata_dev;
+
+ ata_for_each_dev(ata_dev, link, ALL) {
+ ata_tdev_delete(ata_dev);
+ }
+
+ transport_remove_device(dev);
+ device_del(dev);
+ transport_destroy_device(dev);
+ put_device(dev);
+}
+
+/**
+ * ata_tlink_add -- initialize an ATA link transport device
+ * @link: struct ata_link owning the link transport device to initialize
+ *
+ * Initialize an ATA link transport device for sysfs. It will be added in the
+ * device tree below the ATA port it belongs to.
+ *
+ * Returns %0 on success and a negative error code on error.
+ */
+int ata_tlink_add(struct ata_link *link)
+{
+ struct device *dev = &link->tdev;
+ struct ata_port *ap = link->ap;
+ struct ata_device *ata_dev;
+ int error;
+
+ device_initialize(dev);
+ dev->parent = &ap->tdev;
+ dev->release = ata_tlink_release;
+ if (ata_is_host_link(link))
+ dev_set_name(dev, "link%d", ap->print_id);
+ else
+ dev_set_name(dev, "link%d.%d", ap->print_id, link->pmp);
+
+ transport_setup_device(dev);
+
+ error = device_add(dev);
+ if (error)
+ goto tlink_err;
+
+ error = transport_add_device(dev);
+ if (error)
+ goto tlink_transport_err;
+ transport_configure_device(dev);
+
+ ata_for_each_dev(ata_dev, link, ALL) {
+ error = ata_tdev_add(ata_dev);
+ if (error)
+ goto tlink_dev_err;
+ }
+ return 0;
+ tlink_dev_err:
+ while (--ata_dev >= link->device)
+ ata_tdev_delete(ata_dev);
+ transport_remove_device(dev);
+ tlink_transport_err:
+ device_del(dev);
+ tlink_err:
+ transport_destroy_device(dev);
+ put_device(dev);
+ return error;
+}
/*
* Setup / Teardown code
diff --git a/drivers/ata/libata-zpodd.c b/drivers/ata/libata-zpodd.c
index eefda51f97d3..4b83b517caec 100644
--- a/drivers/ata/libata-zpodd.c
+++ b/drivers/ata/libata-zpodd.c
@@ -112,7 +112,7 @@ static bool zpready(struct ata_device *dev)
if (!ret || sense_key != NOT_READY)
return false;
- sense_buf = dev->link->ap->sector_buf;
+ sense_buf = dev->sector_buf;
ret = atapi_eh_request_sense(dev, sense_buf, sense_key);
if (ret)
return false;
diff --git a/drivers/ata/libata.h b/drivers/ata/libata.h
index 6abf265f626e..0337be4faec7 100644
--- a/drivers/ata/libata.h
+++ b/drivers/ata/libata.h
@@ -38,6 +38,12 @@ extern int libata_noacpi;
extern int libata_allow_tpm;
extern const struct device_type ata_port_type;
extern struct ata_link *ata_dev_phys_link(struct ata_device *dev);
+
+static inline bool ata_sstatus_online(u32 sstatus)
+{
+ return (sstatus & 0xf) == 0x3;
+}
+
#ifdef CONFIG_ATA_FORCE
extern void ata_force_cbl(struct ata_port *ap);
#else
@@ -65,7 +71,7 @@ extern bool ata_dev_power_init_tf(struct ata_device *dev,
struct ata_taskfile *tf, bool set_active);
extern void ata_dev_power_set_standby(struct ata_device *dev);
extern void ata_dev_power_set_active(struct ata_device *dev);
-extern int sata_down_spd_limit(struct ata_link *link, u32 spd_limit);
+void ata_dev_free_resources(struct ata_device *dev);
extern int ata_down_xfermask_limit(struct ata_device *dev, unsigned int sel);
extern unsigned int ata_dev_set_feature(struct ata_device *dev,
u8 subcmd, u8 action);
@@ -84,9 +90,25 @@ extern int ata_cmd_ioctl(struct scsi_device *scsidev, void __user *arg);
extern const char *sata_spd_string(unsigned int spd);
extern unsigned int ata_read_log_page(struct ata_device *dev, u8 log,
u8 page, void *buf, unsigned int sectors);
+void ata_dev_cleanup_cdl_resources(struct ata_device *dev);
#define to_ata_port(d) container_of(d, struct ata_port, tdev)
+/* libata-sata.c */
+#ifdef CONFIG_SATA_HOST
+int sata_down_spd_limit(struct ata_link *link, u32 spd_limit);
+int ata_eh_get_ncq_success_sense(struct ata_link *link);
+#else
+static inline int sata_down_spd_limit(struct ata_link *link, u32 spd_limit)
+{
+ return -EOPNOTSUPP;
+}
+static inline int ata_eh_get_ncq_success_sense(struct ata_link *link)
+{
+ return -EOPNOTSUPP;
+}
+#endif
+
/* libata-acpi.c */
#ifdef CONFIG_ATA_ACPI
extern unsigned int ata_acpi_gtf_filter;
@@ -124,7 +146,6 @@ extern void ata_scsi_set_sense_information(struct ata_device *dev,
const struct ata_taskfile *tf);
extern void ata_scsi_media_change_notify(struct ata_device *dev);
extern void ata_scsi_hotplug(struct work_struct *work);
-extern void ata_schedule_scsi_eh(struct Scsi_Host *shost);
extern void ata_scsi_dev_rescan(struct work_struct *work);
extern int ata_scsi_user_scan(struct Scsi_Host *shost, unsigned int channel,
unsigned int id, u64 lun);
@@ -162,6 +183,7 @@ extern void ata_eh_finish(struct ata_port *ap);
extern int ata_ering_map(struct ata_ering *ering,
int (*map_fn)(struct ata_ering_entry *, void *),
void *arg);
+enum scsi_disposition ata_eh_decide_disposition(struct ata_queued_cmd *qc);
extern unsigned int atapi_eh_tur(struct ata_device *dev, u8 *r_sense_key);
extern unsigned int atapi_eh_request_sense(struct ata_device *dev,
u8 *sense_buf, u8 dfl_sense_key);
diff --git a/drivers/ata/pata_cs5520.c b/drivers/ata/pata_cs5520.c
index 027cf67101ef..3163c8d9cef5 100644
--- a/drivers/ata/pata_cs5520.c
+++ b/drivers/ata/pata_cs5520.c
@@ -8,9 +8,9 @@
* PIO mode and smarter silicon.
*
* The practical upshot of this is that we must always tune the
- * drive for the right PIO mode. We must also ignore all the blacklists
- * and the drive bus mastering DMA information. Also to confuse matters
- * further we can do DMA on PIO only drives.
+ * drive for the right PIO mode and ignore the drive bus mastering DMA
+ * information. Also to confuse matters further we can do DMA on PIO only
+ * drives.
*
* DMA on the 5510 also requires we disable_hlt() during DMA on early
* revisions.
diff --git a/drivers/ata/pata_ep93xx.c b/drivers/ata/pata_ep93xx.c
index c84a20892f1b..f3f5b2b0ecc9 100644
--- a/drivers/ata/pata_ep93xx.c
+++ b/drivers/ata/pata_ep93xx.c
@@ -44,8 +44,8 @@
#include <linux/delay.h>
#include <linux/dmaengine.h>
#include <linux/ktime.h>
+#include <linux/mod_devicetable.h>
-#include <linux/platform_data/dma-ep93xx.h>
#include <linux/soc/cirrus/ep93xx.h>
#define DRV_NAME "ep93xx-ide"
@@ -126,7 +126,7 @@ enum {
};
struct ep93xx_pata_data {
- const struct platform_device *pdev;
+ struct platform_device *pdev;
void __iomem *ide_base;
struct ata_timing t;
bool iordy;
@@ -135,9 +135,7 @@ struct ep93xx_pata_data {
unsigned long udma_out_phys;
struct dma_chan *dma_rx_channel;
- struct ep93xx_dma_data dma_rx_data;
struct dma_chan *dma_tx_channel;
- struct ep93xx_dma_data dma_tx_data;
};
static void ep93xx_pata_clear_regs(void __iomem *base)
@@ -637,20 +635,13 @@ static void ep93xx_pata_release_dma(struct ep93xx_pata_data *drv_data)
}
}
-static bool ep93xx_pata_dma_filter(struct dma_chan *chan, void *filter_param)
+static int ep93xx_pata_dma_init(struct ep93xx_pata_data *drv_data)
{
- if (ep93xx_dma_chan_is_m2p(chan))
- return false;
-
- chan->private = filter_param;
- return true;
-}
-
-static void ep93xx_pata_dma_init(struct ep93xx_pata_data *drv_data)
-{
- const struct platform_device *pdev = drv_data->pdev;
+ struct platform_device *pdev = drv_data->pdev;
+ struct device *dev = &pdev->dev;
dma_cap_mask_t mask;
struct dma_slave_config conf;
+ int ret;
dma_cap_zero(mask);
dma_cap_set(DMA_SLAVE, mask);
@@ -660,22 +651,16 @@ static void ep93xx_pata_dma_init(struct ep93xx_pata_data *drv_data)
* to request only one channel, and reprogram it's direction at
* start of new transfer.
*/
- drv_data->dma_rx_data.port = EP93XX_DMA_IDE;
- drv_data->dma_rx_data.direction = DMA_DEV_TO_MEM;
- drv_data->dma_rx_data.name = "ep93xx-pata-rx";
- drv_data->dma_rx_channel = dma_request_channel(mask,
- ep93xx_pata_dma_filter, &drv_data->dma_rx_data);
- if (!drv_data->dma_rx_channel)
- return;
-
- drv_data->dma_tx_data.port = EP93XX_DMA_IDE;
- drv_data->dma_tx_data.direction = DMA_MEM_TO_DEV;
- drv_data->dma_tx_data.name = "ep93xx-pata-tx";
- drv_data->dma_tx_channel = dma_request_channel(mask,
- ep93xx_pata_dma_filter, &drv_data->dma_tx_data);
- if (!drv_data->dma_tx_channel) {
- dma_release_channel(drv_data->dma_rx_channel);
- return;
+ drv_data->dma_rx_channel = dma_request_chan(dev, "rx");
+ if (IS_ERR(drv_data->dma_rx_channel))
+ return dev_err_probe(dev, PTR_ERR(drv_data->dma_rx_channel),
+ "rx DMA setup failed\n");
+
+ drv_data->dma_tx_channel = dma_request_chan(&pdev->dev, "tx");
+ if (IS_ERR(drv_data->dma_tx_channel)) {
+ ret = dev_err_probe(dev, PTR_ERR(drv_data->dma_tx_channel),
+ "tx DMA setup failed\n");
+ goto fail_release_rx;
}
/* Configure receive channel direction and source address */
@@ -683,10 +668,10 @@ static void ep93xx_pata_dma_init(struct ep93xx_pata_data *drv_data)
conf.direction = DMA_DEV_TO_MEM;
conf.src_addr = drv_data->udma_in_phys;
conf.src_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
- if (dmaengine_slave_config(drv_data->dma_rx_channel, &conf)) {
- dev_err(&pdev->dev, "failed to configure rx dma channel\n");
- ep93xx_pata_release_dma(drv_data);
- return;
+ ret = dmaengine_slave_config(drv_data->dma_rx_channel, &conf);
+ if (ret) {
+ dev_err_probe(dev, ret, "failed to configure rx dma channel");
+ goto fail_release_dma;
}
/* Configure transmit channel direction and destination address */
@@ -694,10 +679,20 @@ static void ep93xx_pata_dma_init(struct ep93xx_pata_data *drv_data)
conf.direction = DMA_MEM_TO_DEV;
conf.dst_addr = drv_data->udma_out_phys;
conf.dst_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
- if (dmaengine_slave_config(drv_data->dma_tx_channel, &conf)) {
- dev_err(&pdev->dev, "failed to configure tx dma channel\n");
- ep93xx_pata_release_dma(drv_data);
+ ret = dmaengine_slave_config(drv_data->dma_tx_channel, &conf);
+ if (ret) {
+ dev_err_probe(dev, ret, "failed to configure tx dma channel");
+ goto fail_release_dma;
}
+
+ return 0;
+
+fail_release_rx:
+ dma_release_channel(drv_data->dma_rx_channel);
+fail_release_dma:
+ ep93xx_pata_release_dma(drv_data);
+
+ return ret;
}
static void ep93xx_pata_dma_start(struct ata_queued_cmd *qc)
@@ -884,8 +879,6 @@ static const struct scsi_host_template ep93xx_pata_sht = {
static struct ata_port_operations ep93xx_pata_port_ops = {
.inherits = &ata_bmdma_port_ops,
- .qc_prep = ata_noop_qc_prep,
-
.softreset = ep93xx_pata_softreset,
.hardreset = ATA_OP_NULL,
@@ -927,34 +920,26 @@ static int ep93xx_pata_probe(struct platform_device *pdev)
void __iomem *ide_base;
int err;
- err = ep93xx_ide_acquire_gpio(pdev);
- if (err)
- return err;
-
/* INT[3] (IRQ_EP93XX_EXT3) line connected as pull down */
irq = platform_get_irq(pdev, 0);
- if (irq < 0) {
- err = irq;
- goto err_rel_gpio;
- }
+ if (irq < 0)
+ return irq;
ide_base = devm_platform_get_and_ioremap_resource(pdev, 0, &mem_res);
- if (IS_ERR(ide_base)) {
- err = PTR_ERR(ide_base);
- goto err_rel_gpio;
- }
+ if (IS_ERR(ide_base))
+ return PTR_ERR(ide_base);
drv_data = devm_kzalloc(&pdev->dev, sizeof(*drv_data), GFP_KERNEL);
- if (!drv_data) {
- err = -ENOMEM;
- goto err_rel_gpio;
- }
+ if (!drv_data)
+ return -ENOMEM;
drv_data->pdev = pdev;
drv_data->ide_base = ide_base;
drv_data->udma_in_phys = mem_res->start + IDEUDMADATAIN;
drv_data->udma_out_phys = mem_res->start + IDEUDMADATAOUT;
- ep93xx_pata_dma_init(drv_data);
+ err = ep93xx_pata_dma_init(drv_data);
+ if (err)
+ return err;
/* allocate host */
host = ata_host_alloc(&pdev->dev, 1);
@@ -1005,8 +990,6 @@ static int ep93xx_pata_probe(struct platform_device *pdev)
err_rel_dma:
ep93xx_pata_release_dma(drv_data);
-err_rel_gpio:
- ep93xx_ide_release_gpio(pdev);
return err;
}
@@ -1018,12 +1001,18 @@ static void ep93xx_pata_remove(struct platform_device *pdev)
ata_host_detach(host);
ep93xx_pata_release_dma(drv_data);
ep93xx_pata_clear_regs(drv_data->ide_base);
- ep93xx_ide_release_gpio(pdev);
}
+static const struct of_device_id ep93xx_pata_of_ids[] = {
+ { .compatible = "cirrus,ep9312-pata" },
+ { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(of, ep93xx_pata_of_ids);
+
static struct platform_driver ep93xx_pata_platform_driver = {
.driver = {
.name = DRV_NAME,
+ .of_match_table = ep93xx_pata_of_ids,
},
.probe = ep93xx_pata_probe,
.remove_new = ep93xx_pata_remove,
diff --git a/drivers/ata/pata_ftide010.c b/drivers/ata/pata_ftide010.c
index 4d6ef90ccc77..73a9a5109238 100644
--- a/drivers/ata/pata_ftide010.c
+++ b/drivers/ata/pata_ftide010.c
@@ -549,6 +549,7 @@ static const struct of_device_id pata_ftide010_of_match[] = {
{ .compatible = "faraday,ftide010", },
{ /* sentinel */ }
};
+MODULE_DEVICE_TABLE(of, pata_ftide010_of_match);
static struct platform_driver pata_ftide010_driver = {
.driver = {
diff --git a/drivers/ata/pata_hpt366.c b/drivers/ata/pata_hpt366.c
index bdccd1ba1524..5280e9960025 100644
--- a/drivers/ata/pata_hpt366.c
+++ b/drivers/ata/pata_hpt366.c
@@ -170,8 +170,8 @@ static const char * const bad_ata66_3[] = {
NULL
};
-static int hpt_dma_blacklisted(const struct ata_device *dev, char *modestr,
- const char * const list[])
+static int hpt_dma_broken(const struct ata_device *dev, char *modestr,
+ const char * const list[])
{
unsigned char model_num[ATA_ID_PROD_LEN + 1];
int i;
@@ -197,11 +197,11 @@ static int hpt_dma_blacklisted(const struct ata_device *dev, char *modestr,
static unsigned int hpt366_filter(struct ata_device *adev, unsigned int mask)
{
if (adev->class == ATA_DEV_ATA) {
- if (hpt_dma_blacklisted(adev, "UDMA", bad_ata33))
+ if (hpt_dma_broken(adev, "UDMA", bad_ata33))
mask &= ~ATA_MASK_UDMA;
- if (hpt_dma_blacklisted(adev, "UDMA3", bad_ata66_3))
+ if (hpt_dma_broken(adev, "UDMA3", bad_ata66_3))
mask &= ~(0xF8 << ATA_SHIFT_UDMA);
- if (hpt_dma_blacklisted(adev, "UDMA4", bad_ata66_4))
+ if (hpt_dma_broken(adev, "UDMA4", bad_ata66_4))
mask &= ~(0xF0 << ATA_SHIFT_UDMA);
} else if (adev->class == ATA_DEV_ATAPI)
mask &= ~(ATA_MASK_MWDMA | ATA_MASK_UDMA);
diff --git a/drivers/ata/pata_hpt37x.c b/drivers/ata/pata_hpt37x.c
index c0329cf01135..4af22b819416 100644
--- a/drivers/ata/pata_hpt37x.c
+++ b/drivers/ata/pata_hpt37x.c
@@ -218,8 +218,8 @@ static u32 hpt37x_find_mode(struct ata_port *ap, int speed)
return 0xffffffffU; /* silence compiler warning */
}
-static int hpt_dma_blacklisted(const struct ata_device *dev, char *modestr,
- const char * const list[])
+static int hpt_dma_broken(const struct ata_device *dev, char *modestr,
+ const char * const list[])
{
unsigned char model_num[ATA_ID_PROD_LEN + 1];
int i;
@@ -281,9 +281,9 @@ static const char * const bad_ata100_5[] = {
static unsigned int hpt370_filter(struct ata_device *adev, unsigned int mask)
{
if (adev->class == ATA_DEV_ATA) {
- if (hpt_dma_blacklisted(adev, "UDMA", bad_ata33))
+ if (hpt_dma_broken(adev, "UDMA", bad_ata33))
mask &= ~ATA_MASK_UDMA;
- if (hpt_dma_blacklisted(adev, "UDMA100", bad_ata100_5))
+ if (hpt_dma_broken(adev, "UDMA100", bad_ata100_5))
mask &= ~(0xE0 << ATA_SHIFT_UDMA);
}
return mask;
@@ -300,7 +300,7 @@ static unsigned int hpt370_filter(struct ata_device *adev, unsigned int mask)
static unsigned int hpt370a_filter(struct ata_device *adev, unsigned int mask)
{
if (adev->class == ATA_DEV_ATA) {
- if (hpt_dma_blacklisted(adev, "UDMA100", bad_ata100_5))
+ if (hpt_dma_broken(adev, "UDMA100", bad_ata100_5))
mask &= ~(0xE0 << ATA_SHIFT_UDMA);
}
return mask;
diff --git a/drivers/ata/pata_icside.c b/drivers/ata/pata_icside.c
index 9cfb064782c3..61d8760f09d9 100644
--- a/drivers/ata/pata_icside.c
+++ b/drivers/ata/pata_icside.c
@@ -328,8 +328,6 @@ static void pata_icside_postreset(struct ata_link *link, unsigned int *classes)
static struct ata_port_operations pata_icside_port_ops = {
.inherits = &ata_bmdma_port_ops,
- /* no need to build any PRD tables for DMA */
- .qc_prep = ata_noop_qc_prep,
.sff_data_xfer = ata_sff_data_xfer32,
.bmdma_setup = pata_icside_bmdma_setup,
.bmdma_start = pata_icside_bmdma_start,
diff --git a/drivers/ata/pata_it821x.c b/drivers/ata/pata_it821x.c
index 2fe3fb6102ce..042f6ad1f7c6 100644
--- a/drivers/ata/pata_it821x.c
+++ b/drivers/ata/pata_it821x.c
@@ -519,9 +519,9 @@ static void it821x_dev_config(struct ata_device *adev)
}
/* This is a controller firmware triggered funny, don't
report the drive faulty! */
- adev->horkage &= ~ATA_HORKAGE_DIAGNOSTIC;
+ adev->quirks &= ~ATA_QUIRK_DIAGNOSTIC;
/* No HPA in 'smart' mode */
- adev->horkage |= ATA_HORKAGE_BROKEN_HPA;
+ adev->quirks |= ATA_QUIRK_BROKEN_HPA;
}
/**
diff --git a/drivers/ata/pata_ixp4xx_cf.c b/drivers/ata/pata_ixp4xx_cf.c
index 246bb4f8f1f7..8a9ee828478f 100644
--- a/drivers/ata/pata_ixp4xx_cf.c
+++ b/drivers/ata/pata_ixp4xx_cf.c
@@ -290,6 +290,7 @@ static const struct of_device_id ixp4xx_pata_of_match[] = {
{ .compatible = "intel,ixp4xx-compact-flash", },
{ /* sentinel */ }
};
+MODULE_DEVICE_TABLE(of, ixp4xx_pata_of_match);
static struct platform_driver ixp4xx_pata_platform_driver = {
.driver = {
diff --git a/drivers/ata/pata_mpc52xx.c b/drivers/ata/pata_mpc52xx.c
index 6c317a461a1f..3f9258677915 100644
--- a/drivers/ata/pata_mpc52xx.c
+++ b/drivers/ata/pata_mpc52xx.c
@@ -620,7 +620,6 @@ static struct ata_port_operations mpc52xx_ata_port_ops = {
.bmdma_start = mpc52xx_bmdma_start,
.bmdma_stop = mpc52xx_bmdma_stop,
.bmdma_status = mpc52xx_bmdma_status,
- .qc_prep = ata_noop_qc_prep,
};
static int mpc52xx_ata_init_one(struct device *dev,
diff --git a/drivers/ata/pata_octeon_cf.c b/drivers/ata/pata_octeon_cf.c
index 2884acfc4863..0bb9607e7348 100644
--- a/drivers/ata/pata_octeon_cf.c
+++ b/drivers/ata/pata_octeon_cf.c
@@ -789,7 +789,6 @@ static unsigned int octeon_cf_qc_issue(struct ata_queued_cmd *qc)
static struct ata_port_operations octeon_cf_ops = {
.inherits = &ata_sff_port_ops,
.check_atapi_dma = octeon_cf_check_atapi_dma,
- .qc_prep = ata_noop_qc_prep,
.qc_issue = octeon_cf_qc_issue,
.sff_dev_select = octeon_cf_dev_select,
.sff_irq_on = octeon_cf_ata_port_noaction,
diff --git a/drivers/ata/pata_serverworks.c b/drivers/ata/pata_serverworks.c
index 549ff24a9823..4edddf6bcc15 100644
--- a/drivers/ata/pata_serverworks.c
+++ b/drivers/ata/pata_serverworks.c
@@ -46,10 +46,11 @@
#define SVWKS_CSB5_REVISION_NEW 0x92 /* min PCI_REVISION_ID for UDMA5 (A2.0) */
#define SVWKS_CSB6_REVISION 0xa0 /* min PCI_REVISION_ID for UDMA4 (A1.0) */
-/* Seagate Barracuda ATA IV Family drives in UDMA mode 5
- * can overrun their FIFOs when used with the CSB5 */
-
-static const char *csb_bad_ata100[] = {
+/*
+ * Seagate Barracuda ATA IV Family drives in UDMA mode 5
+ * can overrun their FIFOs when used with the CSB5.
+ */
+static const char * const csb_bad_ata100[] = {
"ST320011A",
"ST340016A",
"ST360021A",
@@ -163,10 +164,11 @@ static unsigned int serverworks_osb4_filter(struct ata_device *adev, unsigned in
* @adev: ATA device
* @mask: Mask of proposed modes
*
- * Check the blacklist and disable UDMA5 if matched
+ * Check the list of devices with broken UDMA5 and
+ * disable UDMA5 if matched.
*/
-
-static unsigned int serverworks_csb_filter(struct ata_device *adev, unsigned int mask)
+static unsigned int serverworks_csb_filter(struct ata_device *adev,
+ unsigned int mask)
{
const char *p;
char model_num[ATA_ID_PROD_LEN + 1];
diff --git a/drivers/ata/sata_gemini.c b/drivers/ata/sata_gemini.c
index 4c270999ba3c..f574e3c3f5b4 100644
--- a/drivers/ata/sata_gemini.c
+++ b/drivers/ata/sata_gemini.c
@@ -417,6 +417,7 @@ static const struct of_device_id gemini_sata_of_match[] = {
{ .compatible = "cortina,gemini-sata-bridge", },
{ /* sentinel */ }
};
+MODULE_DEVICE_TABLE(of, gemini_sata_of_match);
static struct platform_driver gemini_sata_driver = {
.driver = {
diff --git a/drivers/ata/sata_sil.c b/drivers/ata/sata_sil.c
index cc77c0248284..3a99f66198a9 100644
--- a/drivers/ata/sata_sil.c
+++ b/drivers/ata/sata_sil.c
@@ -128,7 +128,7 @@ static const struct pci_device_id sil_pci_tbl[] = {
static const struct sil_drivelist {
const char *product;
unsigned int quirk;
-} sil_blacklist [] = {
+} sil_quirks[] = {
{ "ST320012AS", SIL_QUIRK_MOD15WRITE },
{ "ST330013AS", SIL_QUIRK_MOD15WRITE },
{ "ST340017AS", SIL_QUIRK_MOD15WRITE },
@@ -600,8 +600,8 @@ static void sil_thaw(struct ata_port *ap)
* list, and apply the fixups to only the specific
* devices/hosts/firmwares that need it.
*
- * 20040111 - Seagate drives affected by the Mod15Write bug are blacklisted
- * The Maxtor quirk is in the blacklist, but I'm keeping the original
+ * 20040111 - Seagate drives affected by the Mod15Write bug are quirked
+ * The Maxtor quirk is in sil_quirks, but I'm keeping the original
* pessimistic fix for the following reasons...
* - There seems to be less info on it, only one device gleaned off the
* Windows driver, maybe only one is affected. More info would be greatly
@@ -616,13 +616,13 @@ static void sil_dev_config(struct ata_device *dev)
unsigned char model_num[ATA_ID_PROD_LEN + 1];
/* This controller doesn't support trim */
- dev->horkage |= ATA_HORKAGE_NOTRIM;
+ dev->quirks |= ATA_QUIRK_NOTRIM;
ata_id_c_string(dev->id, model_num, ATA_ID_PROD, sizeof(model_num));
- for (n = 0; sil_blacklist[n].product; n++)
- if (!strcmp(sil_blacklist[n].product, model_num)) {
- quirks = sil_blacklist[n].quirk;
+ for (n = 0; sil_quirks[n].product; n++)
+ if (!strcmp(sil_quirks[n].product, model_num)) {
+ quirks = sil_quirks[n].quirk;
break;
}
diff --git a/drivers/auxdisplay/charlcd.c b/drivers/auxdisplay/charlcd.c
index bb9463814454..19b619376d48 100644
--- a/drivers/auxdisplay/charlcd.c
+++ b/drivers/auxdisplay/charlcd.c
@@ -526,7 +526,6 @@ static const struct file_operations charlcd_fops = {
.write = charlcd_write,
.open = charlcd_open,
.release = charlcd_release,
- .llseek = no_llseek,
};
static struct miscdevice charlcd_dev = {
diff --git a/drivers/base/Kconfig b/drivers/base/Kconfig
index 2b8fd6bb7da0..064eb52ff7e2 100644
--- a/drivers/base/Kconfig
+++ b/drivers/base/Kconfig
@@ -226,6 +226,7 @@ config GENERIC_ARCH_TOPOLOGY
config GENERIC_ARCH_NUMA
bool
+ select NUMA_MEMBLKS
help
Enable support for generic NUMA implementation. Currently, RISC-V
and ARM64 use it.
diff --git a/drivers/base/arch_numa.c b/drivers/base/arch_numa.c
index 555aee3ee8e7..e18701676426 100644
--- a/drivers/base/arch_numa.c
+++ b/drivers/base/arch_numa.c
@@ -12,16 +12,12 @@
#include <linux/memblock.h>
#include <linux/module.h>
#include <linux/of.h>
+#include <linux/numa_memblks.h>
#include <asm/sections.h>
-struct pglist_data *node_data[MAX_NUMNODES] __read_mostly;
-EXPORT_SYMBOL(node_data);
-nodemask_t numa_nodes_parsed __initdata;
static int cpu_to_node_map[NR_CPUS] = { [0 ... NR_CPUS-1] = NUMA_NO_NODE };
-static int numa_distance_cnt;
-static u8 *numa_distance;
bool numa_off;
static __init int numa_parse_early_param(char *opt)
@@ -30,6 +26,8 @@ static __init int numa_parse_early_param(char *opt)
return -EINVAL;
if (str_has_prefix(opt, "off"))
numa_off = true;
+ if (!strncmp(opt, "fake=", 5))
+ return numa_emu_cmdline(opt + 5);
return 0;
}
@@ -61,6 +59,7 @@ EXPORT_SYMBOL(cpumask_of_node);
#endif
+#ifndef CONFIG_NUMA_EMU
static void numa_update_cpu(unsigned int cpu, bool remove)
{
int nid = cpu_to_node(cpu);
@@ -83,6 +82,7 @@ void numa_remove_cpu(unsigned int cpu)
{
numa_update_cpu(cpu, true);
}
+#endif
void numa_clear_node(unsigned int cpu)
{
@@ -144,7 +144,7 @@ void __init early_map_cpu_to_node(unsigned int cpu, int nid)
unsigned long __per_cpu_offset[NR_CPUS] __read_mostly;
EXPORT_SYMBOL(__per_cpu_offset);
-int __init early_cpu_to_node(int cpu)
+int early_cpu_to_node(int cpu)
{
return cpu_to_node_map[cpu];
}
@@ -189,174 +189,24 @@ void __init setup_per_cpu_areas(void)
}
#endif
-/**
- * numa_add_memblk() - Set node id to memblk
- * @nid: NUMA node ID of the new memblk
- * @start: Start address of the new memblk
- * @end: End address of the new memblk
- *
- * RETURNS:
- * 0 on success, -errno on failure.
- */
-int __init numa_add_memblk(int nid, u64 start, u64 end)
-{
- int ret;
-
- ret = memblock_set_node(start, (end - start), &memblock.memory, nid);
- if (ret < 0) {
- pr_err("memblock [0x%llx - 0x%llx] failed to add on node %d\n",
- start, (end - 1), nid);
- return ret;
- }
-
- node_set(nid, numa_nodes_parsed);
- return ret;
-}
-
/*
* Initialize NODE_DATA for a node on the local memory
*/
static void __init setup_node_data(int nid, u64 start_pfn, u64 end_pfn)
{
- const size_t nd_size = roundup(sizeof(pg_data_t), SMP_CACHE_BYTES);
- u64 nd_pa;
- void *nd;
- int tnid;
-
if (start_pfn >= end_pfn)
pr_info("Initmem setup node %d [<memory-less node>]\n", nid);
- nd_pa = memblock_phys_alloc_try_nid(nd_size, SMP_CACHE_BYTES, nid);
- if (!nd_pa)
- panic("Cannot allocate %zu bytes for node %d data\n",
- nd_size, nid);
+ alloc_node_data(nid);
- nd = __va(nd_pa);
-
- /* report and initialize */
- pr_info("NODE_DATA [mem %#010Lx-%#010Lx]\n",
- nd_pa, nd_pa + nd_size - 1);
- tnid = early_pfn_to_nid(nd_pa >> PAGE_SHIFT);
- if (tnid != nid)
- pr_info("NODE_DATA(%d) on node %d\n", nid, tnid);
-
- node_data[nid] = nd;
- memset(NODE_DATA(nid), 0, sizeof(pg_data_t));
NODE_DATA(nid)->node_id = nid;
NODE_DATA(nid)->node_start_pfn = start_pfn;
NODE_DATA(nid)->node_spanned_pages = end_pfn - start_pfn;
}
-/*
- * numa_free_distance
- *
- * The current table is freed.
- */
-void __init numa_free_distance(void)
-{
- size_t size;
-
- if (!numa_distance)
- return;
-
- size = numa_distance_cnt * numa_distance_cnt *
- sizeof(numa_distance[0]);
-
- memblock_free(numa_distance, size);
- numa_distance_cnt = 0;
- numa_distance = NULL;
-}
-
-/*
- * Create a new NUMA distance table.
- */
-static int __init numa_alloc_distance(void)
-{
- size_t size;
- int i, j;
-
- size = nr_node_ids * nr_node_ids * sizeof(numa_distance[0]);
- numa_distance = memblock_alloc(size, PAGE_SIZE);
- if (WARN_ON(!numa_distance))
- return -ENOMEM;
-
- numa_distance_cnt = nr_node_ids;
-
- /* fill with the default distances */
- for (i = 0; i < numa_distance_cnt; i++)
- for (j = 0; j < numa_distance_cnt; j++)
- numa_distance[i * numa_distance_cnt + j] = i == j ?
- LOCAL_DISTANCE : REMOTE_DISTANCE;
-
- pr_debug("Initialized distance table, cnt=%d\n", numa_distance_cnt);
-
- return 0;
-}
-
-/**
- * numa_set_distance() - Set inter node NUMA distance from node to node.
- * @from: the 'from' node to set distance
- * @to: the 'to' node to set distance
- * @distance: NUMA distance
- *
- * Set the distance from node @from to @to to @distance.
- * If distance table doesn't exist, a warning is printed.
- *
- * If @from or @to is higher than the highest known node or lower than zero
- * or @distance doesn't make sense, the call is ignored.
- */
-void __init numa_set_distance(int from, int to, int distance)
-{
- if (!numa_distance) {
- pr_warn_once("Warning: distance table not allocated yet\n");
- return;
- }
-
- if (from >= numa_distance_cnt || to >= numa_distance_cnt ||
- from < 0 || to < 0) {
- pr_warn_once("Warning: node ids are out of bound, from=%d to=%d distance=%d\n",
- from, to, distance);
- return;
- }
-
- if ((u8)distance != distance ||
- (from == to && distance != LOCAL_DISTANCE)) {
- pr_warn_once("Warning: invalid distance parameter, from=%d to=%d distance=%d\n",
- from, to, distance);
- return;
- }
-
- numa_distance[from * numa_distance_cnt + to] = distance;
-}
-
-/*
- * Return NUMA distance @from to @to
- */
-int __node_distance(int from, int to)
-{
- if (from >= numa_distance_cnt || to >= numa_distance_cnt)
- return from == to ? LOCAL_DISTANCE : REMOTE_DISTANCE;
- return numa_distance[from * numa_distance_cnt + to];
-}
-EXPORT_SYMBOL(__node_distance);
-
static int __init numa_register_nodes(void)
{
int nid;
- struct memblock_region *mblk;
-
- /* Check that valid nid is set to memblks */
- for_each_mem_region(mblk) {
- int mblk_nid = memblock_get_region_node(mblk);
- phys_addr_t start = mblk->base;
- phys_addr_t end = mblk->base + mblk->size - 1;
-
- if (mblk_nid == NUMA_NO_NODE || mblk_nid >= MAX_NUMNODES) {
- pr_warn("Warning: invalid memblk node %d [mem %pap-%pap]\n",
- mblk_nid, &start, &end);
- return -EINVAL;
- }
- }
/* Finally register nodes. */
for_each_node_mask(nid, numa_nodes_parsed) {
@@ -381,11 +231,7 @@ static int __init numa_init(int (*init_func)(void))
nodes_clear(node_possible_map);
nodes_clear(node_online_map);
- ret = numa_alloc_distance();
- if (ret < 0)
- return ret;
-
- ret = init_func();
+ ret = numa_memblks_init(init_func, /* memblock_force_top_down */ false);
if (ret < 0)
goto out_free_distance;
@@ -403,7 +249,7 @@ static int __init numa_init(int (*init_func)(void))
return 0;
out_free_distance:
- numa_free_distance();
+ numa_reset_distance();
return ret;
}
@@ -433,6 +279,7 @@ static int __init dummy_numa_init(void)
pr_err("NUMA init failed\n");
return ret;
}
+ node_set(0, numa_nodes_parsed);
numa_off = true;
return 0;
@@ -475,3 +322,54 @@ void __init arch_numa_init(void)
numa_init(dummy_numa_init);
}
+
+#ifdef CONFIG_NUMA_EMU
+void __init numa_emu_update_cpu_to_node(int *emu_nid_to_phys,
+ unsigned int nr_emu_nids)
+{
+ int i, j;
+
+ /*
+ * Transform cpu_to_node_map table to use emulated nids by
+ * reverse-mapping phys_nid. The maps should always exist but fall
+ * back to zero just in case.
+ */
+ for (i = 0; i < ARRAY_SIZE(cpu_to_node_map); i++) {
+ if (cpu_to_node_map[i] == NUMA_NO_NODE)
+ continue;
+ for (j = 0; j < nr_emu_nids; j++)
+ if (cpu_to_node_map[i] == emu_nid_to_phys[j])
+ break;
+ cpu_to_node_map[i] = j < nr_emu_nids ? j : 0;
+ }
+}
+
+u64 __init numa_emu_dma_end(void)
+{
+ return memblock_start_of_DRAM() + SZ_4G;
+}
+
+void debug_cpumask_set_cpu(unsigned int cpu, int node, bool enable)
+{
+ struct cpumask *mask;
+
+ if (node == NUMA_NO_NODE)
+ return;
+
+ mask = node_to_cpumask_map[node];
+ if (!cpumask_available(mask)) {
+ pr_err("node_to_cpumask_map[%i] NULL\n", node);
+ dump_stack();
+ return;
+ }
+
+ if (enable)
+ cpumask_set_cpu(cpu, mask);
+ else
+ cpumask_clear_cpu(cpu, mask);
+
+ pr_debug("%s cpu %d node %d: mask now %*pbl\n",
+ enable ? "numa_add_cpu" : "numa_remove_cpu",
+ cpu, node, cpumask_pr_args(mask));
+}
+#endif /* CONFIG_NUMA_EMU */
diff --git a/drivers/base/attribute_container.c b/drivers/base/attribute_container.c
index 01ef796c2055..b6f941a6ab69 100644
--- a/drivers/base/attribute_container.c
+++ b/drivers/base/attribute_container.c
@@ -346,8 +346,7 @@ attribute_container_device_trigger_safe(struct device *dev,
* @fn: the function to execute for each classdev.
*
* This function is for executing a trigger when you need to know both
- * the container and the classdev. If you only care about the
- * container, then use attribute_container_trigger() instead.
+ * the container and the classdev.
*/
void
attribute_container_device_trigger(struct device *dev,
@@ -379,33 +378,6 @@ attribute_container_device_trigger(struct device *dev,
}
/**
- * attribute_container_trigger - trigger a function for each matching container
- *
- * @dev: The generic device to activate the trigger for
- * @fn: the function to trigger
- *
- * This routine triggers a function that only needs to know the
- * matching containers (not the classdev) associated with a device.
- * It is more lightweight than attribute_container_device_trigger, so
- * should be used in preference unless the triggering function
- * actually needs to know the classdev.
- */
-void
-attribute_container_trigger(struct device *dev,
- int (*fn)(struct attribute_container *,
- struct device *))
-{
- struct attribute_container *cont;
-
- mutex_lock(&attribute_container_mutex);
- list_for_each_entry(cont, &attribute_container_list, node) {
- if (cont->match(cont, dev))
- fn(cont, dev);
- }
- mutex_unlock(&attribute_container_mutex);
-}
-
-/**
* attribute_container_add_attrs - add attributes
*
* @classdev: The class device
@@ -459,24 +431,6 @@ attribute_container_add_class_device(struct device *classdev)
}
/**
- * attribute_container_add_class_device_adapter - simple adapter for triggers
- *
- * @cont: the container to register.
- * @dev: the generic device to activate the trigger for
- * @classdev: the class device to add
- *
- * This function is identical to attribute_container_add_class_device except
- * that it is designed to be called from the triggers
- */
-int
-attribute_container_add_class_device_adapter(struct attribute_container *cont,
- struct device *dev,
- struct device *classdev)
-{
- return attribute_container_add_class_device(classdev);
-}
-
-/**
* attribute_container_remove_attrs - remove any attribute files
*
* @classdev: The class device to remove the files from
diff --git a/drivers/base/auxiliary.c b/drivers/base/auxiliary.c
index 54b92839e05c..7823888af4f6 100644
--- a/drivers/base/auxiliary.c
+++ b/drivers/base/auxiliary.c
@@ -352,7 +352,7 @@ EXPORT_SYMBOL_GPL(__auxiliary_device_add);
*/
struct auxiliary_device *auxiliary_find_device(struct device *start,
const void *data,
- int (*match)(struct device *dev, const void *data))
+ device_match_t match)
{
struct device *dev;
diff --git a/drivers/base/base.h b/drivers/base/base.h
index 0b53593372d7..8cf04a557bdb 100644
--- a/drivers/base/base.h
+++ b/drivers/base/base.h
@@ -145,7 +145,7 @@ void auxiliary_bus_init(void);
static inline void auxiliary_bus_init(void) { }
#endif
-struct kobject *virtual_device_parent(struct device *dev);
+struct kobject *virtual_device_parent(void);
int bus_add_device(struct device *dev);
void bus_probe_device(struct device *dev);
diff --git a/drivers/base/bus.c b/drivers/base/bus.c
index ffea0728b8b2..657c93c38b0d 100644
--- a/drivers/base/bus.c
+++ b/drivers/base/bus.c
@@ -152,7 +152,8 @@ static ssize_t bus_attr_show(struct kobject *kobj, struct attribute *attr,
{
struct bus_attribute *bus_attr = to_bus_attr(attr);
struct subsys_private *subsys_priv = to_subsys_private(kobj);
- ssize_t ret = 0;
+ /* return -EIO for reading a bus attribute without show() */
+ ssize_t ret = -EIO;
if (bus_attr->show)
ret = bus_attr->show(subsys_priv->bus, buf);
@@ -164,7 +165,8 @@ static ssize_t bus_attr_store(struct kobject *kobj, struct attribute *attr,
{
struct bus_attribute *bus_attr = to_bus_attr(attr);
struct subsys_private *subsys_priv = to_subsys_private(kobj);
- ssize_t ret = 0;
+ /* return -EIO for writing a bus attribute without store() */
+ ssize_t ret = -EIO;
if (bus_attr->store)
ret = bus_attr->store(subsys_priv->bus, buf, count);
@@ -389,7 +391,7 @@ EXPORT_SYMBOL_GPL(bus_for_each_dev);
*/
struct device *bus_find_device(const struct bus_type *bus,
struct device *start, const void *data,
- int (*match)(struct device *dev, const void *data))
+ device_match_t match)
{
struct subsys_private *sp = bus_to_subsys(bus);
struct klist_iter i;
@@ -920,6 +922,8 @@ bus_devices_fail:
bus_remove_file(bus, &bus_attr_uevent);
bus_uevent_fail:
kset_unregister(&priv->subsys);
+ /* Above kset_unregister() will kfree @priv */
+ priv = NULL;
out:
kfree(priv);
return retval;
@@ -1294,7 +1298,7 @@ int subsys_virtual_register(const struct bus_type *subsys,
{
struct kobject *virtual_dir;
- virtual_dir = virtual_device_parent(NULL);
+ virtual_dir = virtual_device_parent();
if (!virtual_dir)
return -ENOMEM;
@@ -1385,8 +1389,13 @@ int __init buses_init(void)
return -ENOMEM;
system_kset = kset_create_and_add("system", NULL, &devices_kset->kobj);
- if (!system_kset)
+ if (!system_kset) {
+ /* Do error handling here as devices_init() do */
+ kset_unregister(bus_kset);
+ bus_kset = NULL;
+ pr_err("%s: failed to create and add kset 'bus'\n", __func__);
return -ENOMEM;
+ }
return 0;
}
diff --git a/drivers/base/cacheinfo.c b/drivers/base/cacheinfo.c
index 23b8cba4a2a3..7a7609298e18 100644
--- a/drivers/base/cacheinfo.c
+++ b/drivers/base/cacheinfo.c
@@ -202,29 +202,24 @@ static void cache_of_set_props(struct cacheinfo *this_leaf,
static int cache_setup_of_node(unsigned int cpu)
{
- struct device_node *np, *prev;
struct cacheinfo *this_leaf;
unsigned int index = 0;
- np = of_cpu_device_node_get(cpu);
+ struct device_node *np __free(device_node) = of_cpu_device_node_get(cpu);
if (!np) {
pr_err("Failed to find cpu%d device node\n", cpu);
return -ENOENT;
}
if (!of_check_cache_nodes(np)) {
- of_node_put(np);
return -ENOENT;
}
- prev = np;
-
while (index < cache_leaves(cpu)) {
this_leaf = per_cpu_cacheinfo_idx(cpu, index);
if (this_leaf->level != 1) {
+ struct device_node *prev __free(device_node) = np;
np = of_find_next_cache_node(np);
- of_node_put(prev);
- prev = np;
if (!np)
break;
}
@@ -233,8 +228,6 @@ static int cache_setup_of_node(unsigned int cpu)
index++;
}
- of_node_put(np);
-
if (index != cache_leaves(cpu)) /* not all OF nodes populated */
return -ENOENT;
@@ -243,17 +236,14 @@ static int cache_setup_of_node(unsigned int cpu)
static bool of_check_cache_nodes(struct device_node *np)
{
- struct device_node *next;
-
if (of_property_present(np, "cache-size") ||
of_property_present(np, "i-cache-size") ||
of_property_present(np, "d-cache-size") ||
of_property_present(np, "cache-unified"))
return true;
- next = of_find_next_cache_node(np);
+ struct device_node *next __free(device_node) = of_find_next_cache_node(np);
if (next) {
- of_node_put(next);
return true;
}
@@ -287,12 +277,10 @@ static int of_count_cache_leaves(struct device_node *np)
int init_of_cache_level(unsigned int cpu)
{
struct cpu_cacheinfo *this_cpu_ci = get_cpu_cacheinfo(cpu);
- struct device_node *np = of_cpu_device_node_get(cpu);
- struct device_node *prev = NULL;
+ struct device_node *np __free(device_node) = of_cpu_device_node_get(cpu);
unsigned int levels = 0, leaves, level;
if (!of_check_cache_nodes(np)) {
- of_node_put(np);
return -ENOENT;
}
@@ -300,30 +288,27 @@ int init_of_cache_level(unsigned int cpu)
if (leaves > 0)
levels = 1;
- prev = np;
- while ((np = of_find_next_cache_node(np))) {
- of_node_put(prev);
- prev = np;
+ while (1) {
+ struct device_node *prev __free(device_node) = np;
+ np = of_find_next_cache_node(np);
+ if (!np)
+ break;
+
if (!of_device_is_compatible(np, "cache"))
- goto err_out;
+ return -EINVAL;
if (of_property_read_u32(np, "cache-level", &level))
- goto err_out;
+ return -EINVAL;
if (level <= levels)
- goto err_out;
+ return -EINVAL;
leaves += of_count_cache_leaves(np);
levels = level;
}
- of_node_put(np);
this_cpu_ci->num_levels = levels;
this_cpu_ci->num_leaves = leaves;
return 0;
-
-err_out:
- of_node_put(np);
- return -EINVAL;
}
#else
diff --git a/drivers/base/class.c b/drivers/base/class.c
index 7b38fdf8e1d7..cb5359235c70 100644
--- a/drivers/base/class.c
+++ b/drivers/base/class.c
@@ -183,6 +183,17 @@ int class_register(const struct class *cls)
pr_debug("device class '%s': registering\n", cls->name);
+ if (cls->ns_type && !cls->namespace) {
+ pr_err("%s: class '%s' does not have namespace\n",
+ __func__, cls->name);
+ return -EINVAL;
+ }
+ if (!cls->ns_type && cls->namespace) {
+ pr_err("%s: class '%s' does not have ns_type\n",
+ __func__, cls->name);
+ return -EINVAL;
+ }
+
cp = kzalloc(sizeof(*cp), GFP_KERNEL);
if (!cp)
return -ENOMEM;
@@ -433,8 +444,7 @@ EXPORT_SYMBOL_GPL(class_for_each_device);
* code. There's no locking restriction.
*/
struct device *class_find_device(const struct class *class, const struct device *start,
- const void *data,
- int (*match)(struct device *, const void *))
+ const void *data, device_match_t match)
{
struct subsys_private *sp = class_to_subsys(class);
struct class_dev_iter iter;
diff --git a/drivers/base/core.c b/drivers/base/core.c
index 8c0733d3aad8..a4c853411a6b 100644
--- a/drivers/base/core.c
+++ b/drivers/base/core.c
@@ -9,29 +9,30 @@
*/
#include <linux/acpi.h>
+#include <linux/blkdev.h>
+#include <linux/cleanup.h>
#include <linux/cpufreq.h>
#include <linux/device.h>
+#include <linux/dma-map-ops.h> /* for dma_default_coherent */
#include <linux/err.h>
#include <linux/fwnode.h>
#include <linux/init.h>
+#include <linux/kdev_t.h>
#include <linux/kstrtox.h>
#include <linux/module.h>
-#include <linux/slab.h>
-#include <linux/kdev_t.h>
+#include <linux/mutex.h>
+#include <linux/netdevice.h>
#include <linux/notifier.h>
#include <linux/of.h>
#include <linux/of_device.h>
-#include <linux/blkdev.h>
-#include <linux/mutex.h>
#include <linux/pm_runtime.h>
-#include <linux/netdevice.h>
#include <linux/rcupdate.h>
-#include <linux/sched/signal.h>
#include <linux/sched/mm.h>
+#include <linux/sched/signal.h>
+#include <linux/slab.h>
#include <linux/string_helpers.h>
#include <linux/swiotlb.h>
#include <linux/sysfs.h>
-#include <linux/dma-map-ops.h> /* for dma_default_coherent */
#include "base.h"
#include "physical_location.h"
@@ -97,12 +98,9 @@ static int __fwnode_link_add(struct fwnode_handle *con,
int fwnode_link_add(struct fwnode_handle *con, struct fwnode_handle *sup,
u8 flags)
{
- int ret;
+ guard(mutex)(&fwnode_link_lock);
- mutex_lock(&fwnode_link_lock);
- ret = __fwnode_link_add(con, sup, flags);
- mutex_unlock(&fwnode_link_lock);
- return ret;
+ return __fwnode_link_add(con, sup, flags);
}
/**
@@ -143,10 +141,10 @@ static void fwnode_links_purge_suppliers(struct fwnode_handle *fwnode)
{
struct fwnode_link *link, *tmp;
- mutex_lock(&fwnode_link_lock);
+ guard(mutex)(&fwnode_link_lock);
+
list_for_each_entry_safe(link, tmp, &fwnode->suppliers, c_hook)
__fwnode_link_del(link);
- mutex_unlock(&fwnode_link_lock);
}
/**
@@ -159,10 +157,10 @@ static void fwnode_links_purge_consumers(struct fwnode_handle *fwnode)
{
struct fwnode_link *link, *tmp;
- mutex_lock(&fwnode_link_lock);
+ guard(mutex)(&fwnode_link_lock);
+
list_for_each_entry_safe(link, tmp, &fwnode->consumers, s_hook)
__fwnode_link_del(link);
- mutex_unlock(&fwnode_link_lock);
}
/**
@@ -563,20 +561,11 @@ static struct class devlink_class = {
static int devlink_add_symlinks(struct device *dev)
{
+ char *buf_con __free(kfree) = NULL, *buf_sup __free(kfree) = NULL;
int ret;
- size_t len;
struct device_link *link = to_devlink(dev);
struct device *sup = link->supplier;
struct device *con = link->consumer;
- char *buf;
-
- len = max(strlen(dev_bus_name(sup)) + strlen(dev_name(sup)),
- strlen(dev_bus_name(con)) + strlen(dev_name(con)));
- len += strlen(":");
- len += strlen("supplier:") + 1;
- buf = kzalloc(len, GFP_KERNEL);
- if (!buf)
- return -ENOMEM;
ret = sysfs_create_link(&link->link_dev.kobj, &sup->kobj, "supplier");
if (ret)
@@ -586,58 +575,64 @@ static int devlink_add_symlinks(struct device *dev)
if (ret)
goto err_con;
- snprintf(buf, len, "consumer:%s:%s", dev_bus_name(con), dev_name(con));
- ret = sysfs_create_link(&sup->kobj, &link->link_dev.kobj, buf);
+ buf_con = kasprintf(GFP_KERNEL, "consumer:%s:%s", dev_bus_name(con), dev_name(con));
+ if (!buf_con) {
+ ret = -ENOMEM;
+ goto err_con_dev;
+ }
+
+ ret = sysfs_create_link(&sup->kobj, &link->link_dev.kobj, buf_con);
if (ret)
goto err_con_dev;
- snprintf(buf, len, "supplier:%s:%s", dev_bus_name(sup), dev_name(sup));
- ret = sysfs_create_link(&con->kobj, &link->link_dev.kobj, buf);
+ buf_sup = kasprintf(GFP_KERNEL, "supplier:%s:%s", dev_bus_name(sup), dev_name(sup));
+ if (!buf_sup) {
+ ret = -ENOMEM;
+ goto err_sup_dev;
+ }
+
+ ret = sysfs_create_link(&con->kobj, &link->link_dev.kobj, buf_sup);
if (ret)
goto err_sup_dev;
goto out;
err_sup_dev:
- snprintf(buf, len, "consumer:%s:%s", dev_bus_name(con), dev_name(con));
- sysfs_remove_link(&sup->kobj, buf);
+ sysfs_remove_link(&sup->kobj, buf_con);
err_con_dev:
sysfs_remove_link(&link->link_dev.kobj, "consumer");
err_con:
sysfs_remove_link(&link->link_dev.kobj, "supplier");
out:
- kfree(buf);
return ret;
}
static void devlink_remove_symlinks(struct device *dev)
{
+ char *buf_con __free(kfree) = NULL, *buf_sup __free(kfree) = NULL;
struct device_link *link = to_devlink(dev);
- size_t len;
struct device *sup = link->supplier;
struct device *con = link->consumer;
- char *buf;
sysfs_remove_link(&link->link_dev.kobj, "consumer");
sysfs_remove_link(&link->link_dev.kobj, "supplier");
- len = max(strlen(dev_bus_name(sup)) + strlen(dev_name(sup)),
- strlen(dev_bus_name(con)) + strlen(dev_name(con)));
- len += strlen(":");
- len += strlen("supplier:") + 1;
- buf = kzalloc(len, GFP_KERNEL);
- if (!buf) {
- WARN(1, "Unable to properly free device link symlinks!\n");
- return;
- }
-
if (device_is_registered(con)) {
- snprintf(buf, len, "supplier:%s:%s", dev_bus_name(sup), dev_name(sup));
- sysfs_remove_link(&con->kobj, buf);
+ buf_sup = kasprintf(GFP_KERNEL, "supplier:%s:%s", dev_bus_name(sup), dev_name(sup));
+ if (!buf_sup)
+ goto out;
+ sysfs_remove_link(&con->kobj, buf_sup);
}
- snprintf(buf, len, "consumer:%s:%s", dev_bus_name(con), dev_name(con));
- sysfs_remove_link(&sup->kobj, buf);
- kfree(buf);
+
+ buf_con = kasprintf(GFP_KERNEL, "consumer:%s:%s", dev_bus_name(con), dev_name(con));
+ if (!buf_con)
+ goto out;
+ sysfs_remove_link(&sup->kobj, buf_con);
+
+ return;
+
+out:
+ WARN(1, "Unable to properly free device link symlinks!\n");
}
static struct class_interface devlink_class_intf = {
@@ -678,6 +673,9 @@ postcore_initcall(devlink_class_init);
* @supplier: Supplier end of the link.
* @flags: Link flags.
*
+ * Return: On success, a device_link struct will be returned.
+ * On error or invalid flag settings, NULL will be returned.
+ *
* The caller is responsible for the proper synchronization of the link creation
* with runtime PM. First, setting the DL_FLAG_PM_RUNTIME flag will cause the
* runtime PM framework to take the link into account. Second, if the
@@ -1061,20 +1059,16 @@ int device_links_check_suppliers(struct device *dev)
* Device waiting for supplier to become available is not allowed to
* probe.
*/
- mutex_lock(&fwnode_link_lock);
- sup_fw = fwnode_links_check_suppliers(dev->fwnode);
- if (sup_fw) {
- if (!dev_is_best_effort(dev)) {
- fwnode_ret = -EPROBE_DEFER;
- dev_err_probe(dev, -EPROBE_DEFER,
- "wait for supplier %pfwf\n", sup_fw);
- } else {
- fwnode_ret = -EAGAIN;
+ scoped_guard(mutex, &fwnode_link_lock) {
+ sup_fw = fwnode_links_check_suppliers(dev->fwnode);
+ if (sup_fw) {
+ if (dev_is_best_effort(dev))
+ fwnode_ret = -EAGAIN;
+ else
+ return dev_err_probe(dev, -EPROBE_DEFER,
+ "wait for supplier %pfwf\n", sup_fw);
}
}
- mutex_unlock(&fwnode_link_lock);
- if (fwnode_ret == -EPROBE_DEFER)
- return fwnode_ret;
device_links_write_lock();
@@ -1093,10 +1087,8 @@ int device_links_check_suppliers(struct device *dev)
}
device_links_missing_supplier(dev);
- dev_err_probe(dev, -EPROBE_DEFER,
- "supplier %s not ready\n",
- dev_name(link->supplier));
- ret = -EPROBE_DEFER;
+ ret = dev_err_probe(dev, -EPROBE_DEFER,
+ "supplier %s not ready\n", dev_name(link->supplier));
break;
}
WRITE_ONCE(link->status, DL_STATE_CONSUMER_PROBE);
@@ -1249,9 +1241,8 @@ static ssize_t waiting_for_supplier_show(struct device *dev,
bool val;
device_lock(dev);
- mutex_lock(&fwnode_link_lock);
- val = !!fwnode_links_check_suppliers(dev->fwnode);
- mutex_unlock(&fwnode_link_lock);
+ scoped_guard(mutex, &fwnode_link_lock)
+ val = !!fwnode_links_check_suppliers(dev->fwnode);
device_unlock(dev);
return sysfs_emit(buf, "%u\n", val);
}
@@ -1324,13 +1315,15 @@ void device_links_driver_bound(struct device *dev)
*/
if (dev->fwnode && dev->fwnode->dev == dev) {
struct fwnode_handle *child;
+
fwnode_links_purge_suppliers(dev->fwnode);
- mutex_lock(&fwnode_link_lock);
+
+ guard(mutex)(&fwnode_link_lock);
+
fwnode_for_each_available_child_node(dev->fwnode, child)
__fw_devlink_pickup_dangling_consumers(child,
dev->fwnode);
__fw_devlink_link_to_consumers(dev);
- mutex_unlock(&fwnode_link_lock);
}
device_remove_file(dev, &dev_attr_waiting_for_supplier);
@@ -2339,10 +2332,10 @@ static void fw_devlink_link_device(struct device *dev)
fw_devlink_parse_fwtree(fwnode);
- mutex_lock(&fwnode_link_lock);
+ guard(mutex)(&fwnode_link_lock);
+
__fw_devlink_link_to_consumers(dev);
__fw_devlink_link_to_suppliers(dev, fwnode);
- mutex_unlock(&fwnode_link_lock);
}
/* Device links support end. */
@@ -2591,7 +2584,7 @@ static const void *device_namespace(const struct kobject *kobj)
const struct device *dev = kobj_to_dev(kobj);
const void *ns = NULL;
- if (dev->class && dev->class->ns_type)
+ if (dev->class && dev->class->namespace)
ns = dev->class->namespace(dev);
return ns;
@@ -3170,7 +3163,7 @@ void device_initialize(struct device *dev)
}
EXPORT_SYMBOL_GPL(device_initialize);
-struct kobject *virtual_device_parent(struct device *dev)
+struct kobject *virtual_device_parent(void)
{
static struct kobject *virtual_dir = NULL;
@@ -3248,7 +3241,7 @@ static struct kobject *get_device_parent(struct device *dev,
* in a "glue" directory to prevent namespace collisions.
*/
if (parent == NULL)
- parent_kobj = virtual_device_parent(dev);
+ parent_kobj = virtual_device_parent();
else if (parent->class && !dev->class->ns_type) {
subsys_put(sp);
return &parent->kobj;
@@ -4003,7 +3996,7 @@ int device_for_each_child(struct device *parent, void *data,
struct device *child;
int error = 0;
- if (!parent->p)
+ if (!parent || !parent->p)
return 0;
klist_iter_init(&parent->p->klist_children, &i);
@@ -4033,7 +4026,7 @@ int device_for_each_child_reverse(struct device *parent, void *data,
struct device *child;
int error = 0;
- if (!parent->p)
+ if (!parent || !parent->p)
return 0;
klist_iter_init(&parent->p->klist_children, &i);
@@ -4067,7 +4060,7 @@ struct device *device_find_child(struct device *parent, void *data,
struct klist_iter i;
struct device *child;
- if (!parent)
+ if (!parent || !parent->p)
return NULL;
klist_iter_init(&parent->p->klist_children, &i);
@@ -4515,9 +4508,11 @@ EXPORT_SYMBOL_GPL(device_destroy);
*/
int device_rename(struct device *dev, const char *new_name)
{
+ struct subsys_private *sp = NULL;
struct kobject *kobj = &dev->kobj;
char *old_device_name = NULL;
int error;
+ bool is_link_renamed = false;
dev = get_device(dev);
if (!dev)
@@ -4532,7 +4527,7 @@ int device_rename(struct device *dev, const char *new_name)
}
if (dev->class) {
- struct subsys_private *sp = class_to_subsys(dev->class);
+ sp = class_to_subsys(dev->class);
if (!sp) {
error = -EINVAL;
@@ -4541,16 +4536,19 @@ int device_rename(struct device *dev, const char *new_name)
error = sysfs_rename_link_ns(&sp->subsys.kobj, kobj, old_device_name,
new_name, kobject_namespace(kobj));
- subsys_put(sp);
if (error)
goto out;
+
+ is_link_renamed = true;
}
error = kobject_rename(kobj, new_name);
- if (error)
- goto out;
-
out:
+ if (error && is_link_renamed)
+ sysfs_rename_link_ns(&sp->subsys.kobj, kobj, new_name,
+ old_device_name, kobject_namespace(kobj));
+ subsys_put(sp);
+
put_device(dev);
kfree(old_device_name);
@@ -4872,7 +4870,7 @@ set_dev_info(const struct device *dev, struct dev_printk_info *dev_info)
else
return;
- strscpy(dev_info->subsystem, subsys, sizeof(dev_info->subsystem));
+ strscpy(dev_info->subsystem, subsys);
/*
* Add device identifier DEVICE=:
diff --git a/drivers/base/dd.c b/drivers/base/dd.c
index 9b745ba54de1..f0e4b4aba885 100644
--- a/drivers/base/dd.c
+++ b/drivers/base/dd.c
@@ -248,7 +248,7 @@ static int deferred_devs_show(struct seq_file *s, void *data)
list_for_each_entry(curr, &deferred_probe_pending_list, deferred_probe)
seq_printf(s, "%s\t%s", dev_name(curr->device),
- curr->device->p->deferred_probe_reason ?: "\n");
+ curr->deferred_probe_reason ?: "\n");
mutex_unlock(&deferred_probe_mutex);
@@ -393,6 +393,7 @@ bool device_is_bound(struct device *dev)
{
return dev->p && klist_node_attached(&dev->p->knode_driver);
}
+EXPORT_SYMBOL_GPL(device_is_bound);
static void driver_bound(struct device *dev)
{
diff --git a/drivers/base/devres.c b/drivers/base/devres.c
index a2ce0ead06a6..2152eec0c135 100644
--- a/drivers/base/devres.c
+++ b/drivers/base/devres.c
@@ -1231,6 +1231,6 @@ void devm_free_percpu(struct device *dev, void __percpu *pdata)
* devm_free_pages() does.
*/
WARN_ON(devres_release(dev, devm_percpu_release, devm_percpu_match,
- (__force void *)pdata));
+ (void *)(__force unsigned long)pdata));
}
EXPORT_SYMBOL_GPL(devm_free_percpu);
diff --git a/drivers/base/driver.c b/drivers/base/driver.c
index 88c6fd1f1992..b4eb5b89c4ee 100644
--- a/drivers/base/driver.c
+++ b/drivers/base/driver.c
@@ -150,7 +150,7 @@ EXPORT_SYMBOL_GPL(driver_for_each_device);
*/
struct device *driver_find_device(const struct device_driver *drv,
struct device *start, const void *data,
- int (*match)(struct device *dev, const void *data))
+ device_match_t match)
{
struct klist_iter i;
struct device *dev;
diff --git a/drivers/base/firmware_loader/main.c b/drivers/base/firmware_loader/main.c
index a03ee4b11134..324a9a3c087a 100644
--- a/drivers/base/firmware_loader/main.c
+++ b/drivers/base/firmware_loader/main.c
@@ -849,6 +849,26 @@ static void fw_log_firmware_info(const struct firmware *fw, const char *name,
{}
#endif
+/*
+ * Reject firmware file names with ".." path components.
+ * There are drivers that construct firmware file names from device-supplied
+ * strings, and we don't want some device to be able to tell us "I would like to
+ * be sent my firmware from ../../../etc/shadow, please".
+ *
+ * Search for ".." surrounded by either '/' or start/end of string.
+ *
+ * This intentionally only looks at the firmware name, not at the firmware base
+ * directory or at symlink contents.
+ */
+static bool name_contains_dotdot(const char *name)
+{
+ size_t name_len = strlen(name);
+
+ return strcmp(name, "..") == 0 || strncmp(name, "../", 3) == 0 ||
+ strstr(name, "/../") != NULL ||
+ (name_len >= 3 && strcmp(name+name_len-3, "/..") == 0);
+}
+
/* called from request_firmware() and request_firmware_work_func() */
static int
_request_firmware(const struct firmware **firmware_p, const char *name,
@@ -869,6 +889,14 @@ _request_firmware(const struct firmware **firmware_p, const char *name,
goto out;
}
+ if (name_contains_dotdot(name)) {
+ dev_warn(device,
+ "Firmware load for '%s' refused, path contains '..' component\n",
+ name);
+ ret = -EINVAL;
+ goto out;
+ }
+
ret = _request_firmware_prepare(&fw, name, device, buf, size,
offset, opt_flags);
if (ret <= 0) /* error or already assigned */
@@ -946,6 +974,8 @@ out:
* @name will be used as $FIRMWARE in the uevent environment and
* should be distinctive enough not to be confused with any other
* firmware image for this or any other device.
+ * It must not contain any ".." path components - "foo/bar..bin" is
+ * allowed, but "foo/../bar.bin" is not.
*
* Caller must hold the reference count of @device.
*
diff --git a/drivers/base/module.c b/drivers/base/module.c
index f742ad2a21da..c4eaa1158d54 100644
--- a/drivers/base/module.c
+++ b/drivers/base/module.c
@@ -66,27 +66,31 @@ int module_add_driver(struct module *mod, const struct device_driver *drv)
driver_name = make_driver_name(drv);
if (!driver_name) {
ret = -ENOMEM;
- goto out;
+ goto out_remove_kobj;
}
module_create_drivers_dir(mk);
if (!mk->drivers_dir) {
ret = -EINVAL;
- goto out;
+ goto out_free_driver_name;
}
ret = sysfs_create_link(mk->drivers_dir, &drv->p->kobj, driver_name);
if (ret)
- goto out;
+ goto out_remove_drivers_dir;
kfree(driver_name);
return 0;
-out:
- sysfs_remove_link(&drv->p->kobj, "module");
+
+out_remove_drivers_dir:
sysfs_remove_link(mk->drivers_dir, driver_name);
+
+out_free_driver_name:
kfree(driver_name);
+out_remove_kobj:
+ sysfs_remove_link(&drv->p->kobj, "module");
return ret;
}
diff --git a/drivers/base/platform.c b/drivers/base/platform.c
index 4c3ee6521ba5..6f2a33722c52 100644
--- a/drivers/base/platform.c
+++ b/drivers/base/platform.c
@@ -1474,7 +1474,7 @@ static const struct dev_pm_ops platform_dev_pm_ops = {
USE_PLATFORM_PM_SLEEP_OPS
};
-struct bus_type platform_bus_type = {
+const struct bus_type platform_bus_type = {
.name = "platform",
.dev_groups = platform_dev_groups,
.match = platform_match,
diff --git a/drivers/bcma/driver_pci_host.c b/drivers/bcma/driver_pci_host.c
index ed3be52ab63d..8540052d37c5 100644
--- a/drivers/bcma/driver_pci_host.c
+++ b/drivers/bcma/driver_pci_host.c
@@ -334,7 +334,7 @@ static u8 bcma_find_pci_capability(struct bcma_drv_pci *pc, unsigned int dev,
}
/* If the root port is capable of returning Config Request
- * Retry Status (CRS) Completion Status to software then
+ * Retry Status (RRS) Completion Status to software then
* enable the feature.
*/
static void bcma_core_pci_enable_crs(struct bcma_drv_pci *pc)
@@ -348,10 +348,10 @@ static void bcma_core_pci_enable_crs(struct bcma_drv_pci *pc)
NULL);
root_cap = cap_ptr + PCI_EXP_RTCAP;
bcma_extpci_read_config(pc, 0, 0, root_cap, &val16, sizeof(u16));
- if (val16 & BCMA_CORE_PCI_RC_CRS_VISIBILITY) {
- /* Enable CRS software visibility */
+ if (val16 & BCMA_CORE_PCI_RC_RRS_VISIBILITY) {
+ /* Enable Configuration RRS Software Visibility */
root_ctrl = cap_ptr + PCI_EXP_RTCTL;
- val16 = PCI_EXP_RTCTL_CRSSVE;
+ val16 = PCI_EXP_RTCTL_RRS_SVE;
bcma_extpci_read_config(pc, 0, 0, root_ctrl, &val16,
sizeof(u16));
@@ -360,7 +360,7 @@ static void bcma_core_pci_enable_crs(struct bcma_drv_pci *pc)
* 100 ms wait time from the end of Reset. If the device is
* not done with its internal initialization, it must at
* least return a completion TLP, with a completion status
- * of "Configuration Request Retry Status (CRS)". The root
+ * of "Configuration Request Retry Status (RRS)". The root
* complex must complete the request to the host by returning
* a read-data value of 0001h for the Vendor ID field and
* all 1s for any additional bytes included in the request.
diff --git a/drivers/block/drbd/drbd_main.c b/drivers/block/drbd/drbd_main.c
index 449123eb54bf..0d74d75260ef 100644
--- a/drivers/block/drbd/drbd_main.c
+++ b/drivers/block/drbd/drbd_main.c
@@ -3399,10 +3399,12 @@ void drbd_uuid_new_current(struct drbd_device *device) __must_hold(local)
void drbd_uuid_set_bm(struct drbd_device *device, u64 val) __must_hold(local)
{
unsigned long flags;
- if (device->ldev->md.uuid[UI_BITMAP] == 0 && val == 0)
+ spin_lock_irqsave(&device->ldev->md.uuid_lock, flags);
+ if (device->ldev->md.uuid[UI_BITMAP] == 0 && val == 0) {
+ spin_unlock_irqrestore(&device->ldev->md.uuid_lock, flags);
return;
+ }
- spin_lock_irqsave(&device->ldev->md.uuid_lock, flags);
if (val == 0) {
drbd_uuid_move_history(device);
device->ldev->md.uuid[UI_HISTORY_START] = device->ldev->md.uuid[UI_BITMAP];
diff --git a/drivers/block/mtip32xx/mtip32xx.c b/drivers/block/mtip32xx/mtip32xx.c
index 11901f2812ad..223faa9d5ffd 100644
--- a/drivers/block/mtip32xx/mtip32xx.c
+++ b/drivers/block/mtip32xx/mtip32xx.c
@@ -2259,14 +2259,12 @@ static const struct file_operations mtip_regs_fops = {
.owner = THIS_MODULE,
.open = simple_open,
.read = mtip_hw_read_registers,
- .llseek = no_llseek,
};
static const struct file_operations mtip_flags_fops = {
.owner = THIS_MODULE,
.open = simple_open,
.read = mtip_hw_read_flags,
- .llseek = no_llseek,
};
static void mtip_hw_debugfs_init(struct driver_data *dd)
diff --git a/drivers/block/pktcdvd.c b/drivers/block/pktcdvd.c
index 3edb37a41312..499c110465e3 100644
--- a/drivers/block/pktcdvd.c
+++ b/drivers/block/pktcdvd.c
@@ -2835,7 +2835,6 @@ static const struct file_operations pkt_ctl_fops = {
.compat_ioctl = pkt_ctl_compat_ioctl,
#endif
.owner = THIS_MODULE,
- .llseek = no_llseek,
};
static struct miscdevice pkt_misc = {
diff --git a/drivers/block/ublk_drv.c b/drivers/block/ublk_drv.c
index bca06bfb4bc3..a6c8e5cc6051 100644
--- a/drivers/block/ublk_drv.c
+++ b/drivers/block/ublk_drv.c
@@ -1983,7 +1983,6 @@ static const struct file_operations ublk_ch_fops = {
.owner = THIS_MODULE,
.open = ublk_ch_open,
.release = ublk_ch_release,
- .llseek = no_llseek,
.read_iter = ublk_ch_read_iter,
.write_iter = ublk_ch_write_iter,
.uring_cmd = ublk_ch_uring_cmd,
diff --git a/drivers/block/zram/Kconfig b/drivers/block/zram/Kconfig
index eacf1cba7bf4..6aea609b795c 100644
--- a/drivers/block/zram/Kconfig
+++ b/drivers/block/zram/Kconfig
@@ -2,8 +2,6 @@
config ZRAM
tristate "Compressed RAM block device support"
depends on BLOCK && SYSFS && MMU
- depends on HAVE_ZSMALLOC
- depends on CRYPTO_LZO || CRYPTO_ZSTD || CRYPTO_LZ4 || CRYPTO_LZ4HC || CRYPTO_842
select ZSMALLOC
help
Creates virtual block devices called /dev/zramX (X = 0, 1, ...).
@@ -16,6 +14,49 @@ config ZRAM
See Documentation/admin-guide/blockdev/zram.rst for more information.
+config ZRAM_BACKEND_LZ4
+ bool "lz4 compression support"
+ depends on ZRAM
+ select LZ4_COMPRESS
+ select LZ4_DECOMPRESS
+
+config ZRAM_BACKEND_LZ4HC
+ bool "lz4hc compression support"
+ depends on ZRAM
+ select LZ4HC_COMPRESS
+ select LZ4_DECOMPRESS
+
+config ZRAM_BACKEND_ZSTD
+ bool "zstd compression support"
+ depends on ZRAM
+ select ZSTD_COMPRESS
+ select ZSTD_DECOMPRESS
+
+config ZRAM_BACKEND_DEFLATE
+ bool "deflate compression support"
+ depends on ZRAM
+ select ZLIB_DEFLATE
+ select ZLIB_INFLATE
+
+config ZRAM_BACKEND_842
+ bool "842 compression support"
+ depends on ZRAM
+ select 842_COMPRESS
+ select 842_DECOMPRESS
+
+config ZRAM_BACKEND_FORCE_LZO
+ depends on ZRAM
+ def_bool !ZRAM_BACKEND_LZ4 && !ZRAM_BACKEND_LZ4HC && \
+ !ZRAM_BACKEND_ZSTD && !ZRAM_BACKEND_DEFLATE && \
+ !ZRAM_BACKEND_842
+
+config ZRAM_BACKEND_LZO
+ bool "lzo and lzo-rle compression support" if !ZRAM_BACKEND_FORCE_LZO
+ depends on ZRAM
+ default ZRAM_BACKEND_FORCE_LZO
+ select LZO_COMPRESS
+ select LZO_DECOMPRESS
+
choice
prompt "Default zram compressor"
default ZRAM_DEF_COMP_LZORLE
@@ -23,38 +64,44 @@ choice
config ZRAM_DEF_COMP_LZORLE
bool "lzo-rle"
- depends on CRYPTO_LZO
+ depends on ZRAM_BACKEND_LZO
-config ZRAM_DEF_COMP_ZSTD
- bool "zstd"
- depends on CRYPTO_ZSTD
+config ZRAM_DEF_COMP_LZO
+ bool "lzo"
+ depends on ZRAM_BACKEND_LZO
config ZRAM_DEF_COMP_LZ4
bool "lz4"
- depends on CRYPTO_LZ4
-
-config ZRAM_DEF_COMP_LZO
- bool "lzo"
- depends on CRYPTO_LZO
+ depends on ZRAM_BACKEND_LZ4
config ZRAM_DEF_COMP_LZ4HC
bool "lz4hc"
- depends on CRYPTO_LZ4HC
+ depends on ZRAM_BACKEND_LZ4HC
+
+config ZRAM_DEF_COMP_ZSTD
+ bool "zstd"
+ depends on ZRAM_BACKEND_ZSTD
+
+config ZRAM_DEF_COMP_DEFLATE
+ bool "deflate"
+ depends on ZRAM_BACKEND_DEFLATE
config ZRAM_DEF_COMP_842
bool "842"
- depends on CRYPTO_842
+ depends on ZRAM_BACKEND_842
endchoice
config ZRAM_DEF_COMP
string
default "lzo-rle" if ZRAM_DEF_COMP_LZORLE
- default "zstd" if ZRAM_DEF_COMP_ZSTD
- default "lz4" if ZRAM_DEF_COMP_LZ4
default "lzo" if ZRAM_DEF_COMP_LZO
+ default "lz4" if ZRAM_DEF_COMP_LZ4
default "lz4hc" if ZRAM_DEF_COMP_LZ4HC
+ default "zstd" if ZRAM_DEF_COMP_ZSTD
+ default "deflate" if ZRAM_DEF_COMP_DEFLATE
default "842" if ZRAM_DEF_COMP_842
+ default "unset-value"
config ZRAM_WRITEBACK
bool "Write back incompressible or idle page to backing device"
diff --git a/drivers/block/zram/Makefile b/drivers/block/zram/Makefile
index de9e457907b1..0fdefd576691 100644
--- a/drivers/block/zram/Makefile
+++ b/drivers/block/zram/Makefile
@@ -1,4 +1,12 @@
# SPDX-License-Identifier: GPL-2.0-only
+
zram-y := zcomp.o zram_drv.o
+zram-$(CONFIG_ZRAM_BACKEND_LZO) += backend_lzorle.o backend_lzo.o
+zram-$(CONFIG_ZRAM_BACKEND_LZ4) += backend_lz4.o
+zram-$(CONFIG_ZRAM_BACKEND_LZ4HC) += backend_lz4hc.o
+zram-$(CONFIG_ZRAM_BACKEND_ZSTD) += backend_zstd.o
+zram-$(CONFIG_ZRAM_BACKEND_DEFLATE) += backend_deflate.o
+zram-$(CONFIG_ZRAM_BACKEND_842) += backend_842.o
+
obj-$(CONFIG_ZRAM) += zram.o
diff --git a/drivers/block/zram/backend_842.c b/drivers/block/zram/backend_842.c
new file mode 100644
index 000000000000..10d9d5c60f53
--- /dev/null
+++ b/drivers/block/zram/backend_842.c
@@ -0,0 +1,61 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+
+#include <linux/kernel.h>
+#include <linux/slab.h>
+#include <linux/sw842.h>
+#include <linux/vmalloc.h>
+
+#include "backend_842.h"
+
+static void release_params_842(struct zcomp_params *params)
+{
+}
+
+static int setup_params_842(struct zcomp_params *params)
+{
+ return 0;
+}
+
+static void destroy_842(struct zcomp_ctx *ctx)
+{
+ kfree(ctx->context);
+}
+
+static int create_842(struct zcomp_params *params, struct zcomp_ctx *ctx)
+{
+ ctx->context = kmalloc(SW842_MEM_COMPRESS, GFP_KERNEL);
+ if (!ctx->context)
+ return -ENOMEM;
+ return 0;
+}
+
+static int compress_842(struct zcomp_params *params, struct zcomp_ctx *ctx,
+ struct zcomp_req *req)
+{
+ unsigned int dlen = req->dst_len;
+ int ret;
+
+ ret = sw842_compress(req->src, req->src_len, req->dst, &dlen,
+ ctx->context);
+ if (ret == 0)
+ req->dst_len = dlen;
+ return ret;
+}
+
+static int decompress_842(struct zcomp_params *params, struct zcomp_ctx *ctx,
+ struct zcomp_req *req)
+{
+ unsigned int dlen = req->dst_len;
+
+ return sw842_decompress(req->src, req->src_len, req->dst, &dlen);
+}
+
+const struct zcomp_ops backend_842 = {
+ .compress = compress_842,
+ .decompress = decompress_842,
+ .create_ctx = create_842,
+ .destroy_ctx = destroy_842,
+ .setup_params = setup_params_842,
+ .release_params = release_params_842,
+ .name = "842",
+};
diff --git a/drivers/block/zram/backend_842.h b/drivers/block/zram/backend_842.h
new file mode 100644
index 000000000000..4dc85c188799
--- /dev/null
+++ b/drivers/block/zram/backend_842.h
@@ -0,0 +1,10 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+
+#ifndef __BACKEND_842_H__
+#define __BACKEND_842_H__
+
+#include "zcomp.h"
+
+extern const struct zcomp_ops backend_842;
+
+#endif /* __BACKEND_842_H__ */
diff --git a/drivers/block/zram/backend_deflate.c b/drivers/block/zram/backend_deflate.c
new file mode 100644
index 000000000000..0f7f252c12f4
--- /dev/null
+++ b/drivers/block/zram/backend_deflate.c
@@ -0,0 +1,146 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+
+#include <linux/kernel.h>
+#include <linux/slab.h>
+#include <linux/vmalloc.h>
+#include <linux/zlib.h>
+
+#include "backend_deflate.h"
+
+/* Use the same value as crypto API */
+#define DEFLATE_DEF_WINBITS 11
+#define DEFLATE_DEF_MEMLEVEL MAX_MEM_LEVEL
+
+struct deflate_ctx {
+ struct z_stream_s cctx;
+ struct z_stream_s dctx;
+};
+
+static void deflate_release_params(struct zcomp_params *params)
+{
+}
+
+static int deflate_setup_params(struct zcomp_params *params)
+{
+ if (params->level == ZCOMP_PARAM_NO_LEVEL)
+ params->level = Z_DEFAULT_COMPRESSION;
+
+ return 0;
+}
+
+static void deflate_destroy(struct zcomp_ctx *ctx)
+{
+ struct deflate_ctx *zctx = ctx->context;
+
+ if (!zctx)
+ return;
+
+ if (zctx->cctx.workspace) {
+ zlib_deflateEnd(&zctx->cctx);
+ vfree(zctx->cctx.workspace);
+ }
+ if (zctx->dctx.workspace) {
+ zlib_inflateEnd(&zctx->dctx);
+ vfree(zctx->dctx.workspace);
+ }
+ kfree(zctx);
+}
+
+static int deflate_create(struct zcomp_params *params, struct zcomp_ctx *ctx)
+{
+ struct deflate_ctx *zctx;
+ size_t sz;
+ int ret;
+
+ zctx = kzalloc(sizeof(*zctx), GFP_KERNEL);
+ if (!zctx)
+ return -ENOMEM;
+
+ ctx->context = zctx;
+ sz = zlib_deflate_workspacesize(-DEFLATE_DEF_WINBITS, MAX_MEM_LEVEL);
+ zctx->cctx.workspace = vzalloc(sz);
+ if (!zctx->cctx.workspace)
+ goto error;
+
+ ret = zlib_deflateInit2(&zctx->cctx, params->level, Z_DEFLATED,
+ -DEFLATE_DEF_WINBITS, DEFLATE_DEF_MEMLEVEL,
+ Z_DEFAULT_STRATEGY);
+ if (ret != Z_OK)
+ goto error;
+
+ sz = zlib_inflate_workspacesize();
+ zctx->dctx.workspace = vzalloc(sz);
+ if (!zctx->dctx.workspace)
+ goto error;
+
+ ret = zlib_inflateInit2(&zctx->dctx, -DEFLATE_DEF_WINBITS);
+ if (ret != Z_OK)
+ goto error;
+
+ return 0;
+
+error:
+ deflate_destroy(ctx);
+ return -EINVAL;
+}
+
+static int deflate_compress(struct zcomp_params *params, struct zcomp_ctx *ctx,
+ struct zcomp_req *req)
+{
+ struct deflate_ctx *zctx = ctx->context;
+ struct z_stream_s *deflate;
+ int ret;
+
+ deflate = &zctx->cctx;
+ ret = zlib_deflateReset(deflate);
+ if (ret != Z_OK)
+ return -EINVAL;
+
+ deflate->next_in = (u8 *)req->src;
+ deflate->avail_in = req->src_len;
+ deflate->next_out = (u8 *)req->dst;
+ deflate->avail_out = req->dst_len;
+
+ ret = zlib_deflate(deflate, Z_FINISH);
+ if (ret != Z_STREAM_END)
+ return -EINVAL;
+
+ req->dst_len = deflate->total_out;
+ return 0;
+}
+
+static int deflate_decompress(struct zcomp_params *params,
+ struct zcomp_ctx *ctx,
+ struct zcomp_req *req)
+{
+ struct deflate_ctx *zctx = ctx->context;
+ struct z_stream_s *inflate;
+ int ret;
+
+ inflate = &zctx->dctx;
+
+ ret = zlib_inflateReset(inflate);
+ if (ret != Z_OK)
+ return -EINVAL;
+
+ inflate->next_in = (u8 *)req->src;
+ inflate->avail_in = req->src_len;
+ inflate->next_out = (u8 *)req->dst;
+ inflate->avail_out = req->dst_len;
+
+ ret = zlib_inflate(inflate, Z_SYNC_FLUSH);
+ if (ret != Z_STREAM_END)
+ return -EINVAL;
+
+ return 0;
+}
+
+const struct zcomp_ops backend_deflate = {
+ .compress = deflate_compress,
+ .decompress = deflate_decompress,
+ .create_ctx = deflate_create,
+ .destroy_ctx = deflate_destroy,
+ .setup_params = deflate_setup_params,
+ .release_params = deflate_release_params,
+ .name = "deflate",
+};
diff --git a/drivers/block/zram/backend_deflate.h b/drivers/block/zram/backend_deflate.h
new file mode 100644
index 000000000000..a39ac12b114c
--- /dev/null
+++ b/drivers/block/zram/backend_deflate.h
@@ -0,0 +1,10 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+
+#ifndef __BACKEND_DEFLATE_H__
+#define __BACKEND_DEFLATE_H__
+
+#include "zcomp.h"
+
+extern const struct zcomp_ops backend_deflate;
+
+#endif /* __BACKEND_DEFLATE_H__ */
diff --git a/drivers/block/zram/backend_lz4.c b/drivers/block/zram/backend_lz4.c
new file mode 100644
index 000000000000..847f3334eb38
--- /dev/null
+++ b/drivers/block/zram/backend_lz4.c
@@ -0,0 +1,127 @@
+#include <linux/kernel.h>
+#include <linux/lz4.h>
+#include <linux/slab.h>
+#include <linux/vmalloc.h>
+
+#include "backend_lz4.h"
+
+struct lz4_ctx {
+ void *mem;
+
+ LZ4_streamDecode_t *dstrm;
+ LZ4_stream_t *cstrm;
+};
+
+static void lz4_release_params(struct zcomp_params *params)
+{
+}
+
+static int lz4_setup_params(struct zcomp_params *params)
+{
+ if (params->level == ZCOMP_PARAM_NO_LEVEL)
+ params->level = LZ4_ACCELERATION_DEFAULT;
+
+ return 0;
+}
+
+static void lz4_destroy(struct zcomp_ctx *ctx)
+{
+ struct lz4_ctx *zctx = ctx->context;
+
+ if (!zctx)
+ return;
+
+ vfree(zctx->mem);
+ kfree(zctx->dstrm);
+ kfree(zctx->cstrm);
+ kfree(zctx);
+}
+
+static int lz4_create(struct zcomp_params *params, struct zcomp_ctx *ctx)
+{
+ struct lz4_ctx *zctx;
+
+ zctx = kzalloc(sizeof(*zctx), GFP_KERNEL);
+ if (!zctx)
+ return -ENOMEM;
+
+ ctx->context = zctx;
+ if (params->dict_sz == 0) {
+ zctx->mem = vmalloc(LZ4_MEM_COMPRESS);
+ if (!zctx->mem)
+ goto error;
+ } else {
+ zctx->dstrm = kzalloc(sizeof(*zctx->dstrm), GFP_KERNEL);
+ if (!zctx->dstrm)
+ goto error;
+
+ zctx->cstrm = kzalloc(sizeof(*zctx->cstrm), GFP_KERNEL);
+ if (!zctx->cstrm)
+ goto error;
+ }
+
+ return 0;
+
+error:
+ lz4_destroy(ctx);
+ return -ENOMEM;
+}
+
+static int lz4_compress(struct zcomp_params *params, struct zcomp_ctx *ctx,
+ struct zcomp_req *req)
+{
+ struct lz4_ctx *zctx = ctx->context;
+ int ret;
+
+ if (!zctx->cstrm) {
+ ret = LZ4_compress_fast(req->src, req->dst, req->src_len,
+ req->dst_len, params->level,
+ zctx->mem);
+ } else {
+ /* Cstrm needs to be reset */
+ ret = LZ4_loadDict(zctx->cstrm, params->dict, params->dict_sz);
+ if (ret != params->dict_sz)
+ return -EINVAL;
+ ret = LZ4_compress_fast_continue(zctx->cstrm, req->src,
+ req->dst, req->src_len,
+ req->dst_len, params->level);
+ }
+ if (!ret)
+ return -EINVAL;
+ req->dst_len = ret;
+ return 0;
+}
+
+static int lz4_decompress(struct zcomp_params *params, struct zcomp_ctx *ctx,
+ struct zcomp_req *req)
+{
+ struct lz4_ctx *zctx = ctx->context;
+ int ret;
+
+ if (!zctx->dstrm) {
+ ret = LZ4_decompress_safe(req->src, req->dst, req->src_len,
+ req->dst_len);
+ } else {
+ /* Dstrm needs to be reset */
+ ret = LZ4_setStreamDecode(zctx->dstrm, params->dict,
+ params->dict_sz);
+ if (!ret)
+ return -EINVAL;
+ ret = LZ4_decompress_safe_continue(zctx->dstrm, req->src,
+ req->dst, req->src_len,
+ req->dst_len);
+ }
+ if (ret < 0)
+ return -EINVAL;
+ return 0;
+}
+
+const struct zcomp_ops backend_lz4 = {
+ .compress = lz4_compress,
+ .decompress = lz4_decompress,
+ .create_ctx = lz4_create,
+ .destroy_ctx = lz4_destroy,
+ .setup_params = lz4_setup_params,
+ .release_params = lz4_release_params,
+ .name = "lz4",
+};
diff --git a/drivers/block/zram/backend_lz4.h b/drivers/block/zram/backend_lz4.h
new file mode 100644
index 000000000000..c11fa602a703
--- /dev/null
+++ b/drivers/block/zram/backend_lz4.h
@@ -0,0 +1,10 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+
+#ifndef __BACKEND_LZ4_H__
+#define __BACKEND_LZ4_H__
+
+#include "zcomp.h"
+
+extern const struct zcomp_ops backend_lz4;
+
+#endif /* __BACKEND_LZ4_H__ */
diff --git a/drivers/block/zram/backend_lz4hc.c b/drivers/block/zram/backend_lz4hc.c
new file mode 100644
index 000000000000..5f37d5abcaeb
--- /dev/null
+++ b/drivers/block/zram/backend_lz4hc.c
@@ -0,0 +1,128 @@
+#include <linux/kernel.h>
+#include <linux/lz4.h>
+#include <linux/slab.h>
+#include <linux/vmalloc.h>
+
+#include "backend_lz4hc.h"
+
+struct lz4hc_ctx {
+ void *mem;
+
+ LZ4_streamDecode_t *dstrm;
+ LZ4_streamHC_t *cstrm;
+};
+
+static void lz4hc_release_params(struct zcomp_params *params)
+{
+}
+
+static int lz4hc_setup_params(struct zcomp_params *params)
+{
+ if (params->level == ZCOMP_PARAM_NO_LEVEL)
+ params->level = LZ4HC_DEFAULT_CLEVEL;
+
+ return 0;
+}
+
+static void lz4hc_destroy(struct zcomp_ctx *ctx)
+{
+ struct lz4hc_ctx *zctx = ctx->context;
+
+ if (!zctx)
+ return;
+
+ kfree(zctx->dstrm);
+ kfree(zctx->cstrm);
+ vfree(zctx->mem);
+ kfree(zctx);
+}
+
+static int lz4hc_create(struct zcomp_params *params, struct zcomp_ctx *ctx)
+{
+ struct lz4hc_ctx *zctx;
+
+ zctx = kzalloc(sizeof(*zctx), GFP_KERNEL);
+ if (!zctx)
+ return -ENOMEM;
+
+ ctx->context = zctx;
+ if (params->dict_sz == 0) {
+ zctx->mem = vmalloc(LZ4HC_MEM_COMPRESS);
+ if (!zctx->mem)
+ goto error;
+ } else {
+ zctx->dstrm = kzalloc(sizeof(*zctx->dstrm), GFP_KERNEL);
+ if (!zctx->dstrm)
+ goto error;
+
+ zctx->cstrm = kzalloc(sizeof(*zctx->cstrm), GFP_KERNEL);
+ if (!zctx->cstrm)
+ goto error;
+ }
+
+ return 0;
+
+error:
+ lz4hc_destroy(ctx);
+ return -EINVAL;
+}
+
+static int lz4hc_compress(struct zcomp_params *params, struct zcomp_ctx *ctx,
+ struct zcomp_req *req)
+{
+ struct lz4hc_ctx *zctx = ctx->context;
+ int ret;
+
+ if (!zctx->cstrm) {
+ ret = LZ4_compress_HC(req->src, req->dst, req->src_len,
+ req->dst_len, params->level,
+ zctx->mem);
+ } else {
+ /* Cstrm needs to be reset */
+ LZ4_resetStreamHC(zctx->cstrm, params->level);
+ ret = LZ4_loadDictHC(zctx->cstrm, params->dict,
+ params->dict_sz);
+ if (ret != params->dict_sz)
+ return -EINVAL;
+ ret = LZ4_compress_HC_continue(zctx->cstrm, req->src, req->dst,
+ req->src_len, req->dst_len);
+ }
+ if (!ret)
+ return -EINVAL;
+ req->dst_len = ret;
+ return 0;
+}
+
+static int lz4hc_decompress(struct zcomp_params *params, struct zcomp_ctx *ctx,
+ struct zcomp_req *req)
+{
+ struct lz4hc_ctx *zctx = ctx->context;
+ int ret;
+
+ if (!zctx->dstrm) {
+ ret = LZ4_decompress_safe(req->src, req->dst, req->src_len,
+ req->dst_len);
+ } else {
+ /* Dstrm needs to be reset */
+ ret = LZ4_setStreamDecode(zctx->dstrm, params->dict,
+ params->dict_sz);
+ if (!ret)
+ return -EINVAL;
+ ret = LZ4_decompress_safe_continue(zctx->dstrm, req->src,
+ req->dst, req->src_len,
+ req->dst_len);
+ }
+ if (ret < 0)
+ return -EINVAL;
+ return 0;
+}
+
+const struct zcomp_ops backend_lz4hc = {
+ .compress = lz4hc_compress,
+ .decompress = lz4hc_decompress,
+ .create_ctx = lz4hc_create,
+ .destroy_ctx = lz4hc_destroy,
+ .setup_params = lz4hc_setup_params,
+ .release_params = lz4hc_release_params,
+ .name = "lz4hc",
+};
diff --git a/drivers/block/zram/backend_lz4hc.h b/drivers/block/zram/backend_lz4hc.h
new file mode 100644
index 000000000000..6de03551ed4d
--- /dev/null
+++ b/drivers/block/zram/backend_lz4hc.h
@@ -0,0 +1,10 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+
+#ifndef __BACKEND_LZ4HC_H__
+#define __BACKEND_LZ4HC_H__
+
+#include "zcomp.h"
+
+extern const struct zcomp_ops backend_lz4hc;
+
+#endif /* __BACKEND_LZ4HC_H__ */
diff --git a/drivers/block/zram/backend_lzo.c b/drivers/block/zram/backend_lzo.c
new file mode 100644
index 000000000000..4c906beaae6b
--- /dev/null
+++ b/drivers/block/zram/backend_lzo.c
@@ -0,0 +1,59 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+
+#include <linux/kernel.h>
+#include <linux/slab.h>
+#include <linux/lzo.h>
+
+#include "backend_lzo.h"
+
+static void lzo_release_params(struct zcomp_params *params)
+{
+}
+
+static int lzo_setup_params(struct zcomp_params *params)
+{
+ return 0;
+}
+
+static int lzo_create(struct zcomp_params *params, struct zcomp_ctx *ctx)
+{
+ ctx->context = kzalloc(LZO1X_MEM_COMPRESS, GFP_KERNEL);
+ if (!ctx->context)
+ return -ENOMEM;
+ return 0;
+}
+
+static void lzo_destroy(struct zcomp_ctx *ctx)
+{
+ kfree(ctx->context);
+}
+
+static int lzo_compress(struct zcomp_params *params, struct zcomp_ctx *ctx,
+ struct zcomp_req *req)
+{
+ int ret;
+
+ ret = lzo1x_1_compress(req->src, req->src_len, req->dst,
+ &req->dst_len, ctx->context);
+ return ret == LZO_E_OK ? 0 : ret;
+}
+
+static int lzo_decompress(struct zcomp_params *params, struct zcomp_ctx *ctx,
+ struct zcomp_req *req)
+{
+ int ret;
+
+ ret = lzo1x_decompress_safe(req->src, req->src_len,
+ req->dst, &req->dst_len);
+ return ret == LZO_E_OK ? 0 : ret;
+}
+
+const struct zcomp_ops backend_lzo = {
+ .compress = lzo_compress,
+ .decompress = lzo_decompress,
+ .create_ctx = lzo_create,
+ .destroy_ctx = lzo_destroy,
+ .setup_params = lzo_setup_params,
+ .release_params = lzo_release_params,
+ .name = "lzo",
+};
diff --git a/drivers/block/zram/backend_lzo.h b/drivers/block/zram/backend_lzo.h
new file mode 100644
index 000000000000..93d54749e63c
--- /dev/null
+++ b/drivers/block/zram/backend_lzo.h
@@ -0,0 +1,10 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+
+#ifndef __BACKEND_LZO_H__
+#define __BACKEND_LZO_H__
+
+#include "zcomp.h"
+
+extern const struct zcomp_ops backend_lzo;
+
+#endif /* __BACKEND_LZO_H__ */
diff --git a/drivers/block/zram/backend_lzorle.c b/drivers/block/zram/backend_lzorle.c
new file mode 100644
index 000000000000..10640c96cbfc
--- /dev/null
+++ b/drivers/block/zram/backend_lzorle.c
@@ -0,0 +1,59 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+
+#include <linux/kernel.h>
+#include <linux/slab.h>
+#include <linux/lzo.h>
+
+#include "backend_lzorle.h"
+
+static void lzorle_release_params(struct zcomp_params *params)
+{
+}
+
+static int lzorle_setup_params(struct zcomp_params *params)
+{
+ return 0;
+}
+
+static int lzorle_create(struct zcomp_params *params, struct zcomp_ctx *ctx)
+{
+ ctx->context = kzalloc(LZO1X_MEM_COMPRESS, GFP_KERNEL);
+ if (!ctx->context)
+ return -ENOMEM;
+ return 0;
+}
+
+static void lzorle_destroy(struct zcomp_ctx *ctx)
+{
+ kfree(ctx->context);
+}
+
+static int lzorle_compress(struct zcomp_params *params, struct zcomp_ctx *ctx,
+ struct zcomp_req *req)
+{
+ int ret;
+
+ ret = lzorle1x_1_compress(req->src, req->src_len, req->dst,
+ &req->dst_len, ctx->context);
+ return ret == LZO_E_OK ? 0 : ret;
+}
+
+static int lzorle_decompress(struct zcomp_params *params, struct zcomp_ctx *ctx,
+ struct zcomp_req *req)
+{
+ int ret;
+
+ ret = lzo1x_decompress_safe(req->src, req->src_len,
+ req->dst, &req->dst_len);
+ return ret == LZO_E_OK ? 0 : ret;
+}
+
+const struct zcomp_ops backend_lzorle = {
+ .compress = lzorle_compress,
+ .decompress = lzorle_decompress,
+ .create_ctx = lzorle_create,
+ .destroy_ctx = lzorle_destroy,
+ .setup_params = lzorle_setup_params,
+ .release_params = lzorle_release_params,
+ .name = "lzo-rle",
+};
diff --git a/drivers/block/zram/backend_lzorle.h b/drivers/block/zram/backend_lzorle.h
new file mode 100644
index 000000000000..6ecb163b09f1
--- /dev/null
+++ b/drivers/block/zram/backend_lzorle.h
@@ -0,0 +1,10 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+
+#ifndef __BACKEND_LZORLE_H__
+#define __BACKEND_LZORLE_H__
+
+#include "zcomp.h"
+
+extern const struct zcomp_ops backend_lzorle;
+
+#endif /* __BACKEND_LZORLE_H__ */
diff --git a/drivers/block/zram/backend_zstd.c b/drivers/block/zram/backend_zstd.c
new file mode 100644
index 000000000000..1184c0036f44
--- /dev/null
+++ b/drivers/block/zram/backend_zstd.c
@@ -0,0 +1,226 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+
+#include <linux/kernel.h>
+#include <linux/slab.h>
+#include <linux/vmalloc.h>
+#include <linux/zstd.h>
+
+#include "backend_zstd.h"
+
+struct zstd_ctx {
+ zstd_cctx *cctx;
+ zstd_dctx *dctx;
+ void *cctx_mem;
+ void *dctx_mem;
+};
+
+struct zstd_params {
+ zstd_custom_mem custom_mem;
+ zstd_cdict *cdict;
+ zstd_ddict *ddict;
+ zstd_parameters cprm;
+};
+
+/*
+ * For C/D dictionaries we need to provide zstd with zstd_custom_mem,
+ * which zstd uses internally to allocate/free memory when needed.
+ *
+ * This means that allocator.customAlloc() can be called from zcomp_compress()
+ * under local-lock (per-CPU compression stream), in which case we must use
+ * GFP_ATOMIC.
+ *
+ * Another complication here is that we can be configured as a swap device.
+ */
+static void *zstd_custom_alloc(void *opaque, size_t size)
+{
+ if (!preemptible())
+ return kvzalloc(size, GFP_ATOMIC);
+
+ return kvzalloc(size, __GFP_KSWAPD_RECLAIM | __GFP_NOWARN);
+}
+
+static void zstd_custom_free(void *opaque, void *address)
+{
+ kvfree(address);
+}
+
+static void zstd_release_params(struct zcomp_params *params)
+{
+ struct zstd_params *zp = params->drv_data;
+
+ params->drv_data = NULL;
+ if (!zp)
+ return;
+
+ zstd_free_cdict(zp->cdict);
+ zstd_free_ddict(zp->ddict);
+ kfree(zp);
+}
+
+static int zstd_setup_params(struct zcomp_params *params)
+{
+ zstd_compression_parameters prm;
+ struct zstd_params *zp;
+
+ zp = kzalloc(sizeof(*zp), GFP_KERNEL);
+ if (!zp)
+ return -ENOMEM;
+
+ params->drv_data = zp;
+ if (params->level == ZCOMP_PARAM_NO_LEVEL)
+ params->level = zstd_default_clevel();
+
+ zp->cprm = zstd_get_params(params->level, PAGE_SIZE);
+
+ zp->custom_mem.customAlloc = zstd_custom_alloc;
+ zp->custom_mem.customFree = zstd_custom_free;
+
+ prm = zstd_get_cparams(params->level, PAGE_SIZE,
+ params->dict_sz);
+
+ zp->cdict = zstd_create_cdict_byreference(params->dict,
+ params->dict_sz,
+ prm,
+ zp->custom_mem);
+ if (!zp->cdict)
+ goto error;
+
+ zp->ddict = zstd_create_ddict_byreference(params->dict,
+ params->dict_sz,
+ zp->custom_mem);
+ if (!zp->ddict)
+ goto error;
+
+ return 0;
+
+error:
+ zstd_release_params(params);
+ return -EINVAL;
+}
+
+static void zstd_destroy(struct zcomp_ctx *ctx)
+{
+ struct zstd_ctx *zctx = ctx->context;
+
+ if (!zctx)
+ return;
+
+ /*
+ * If ->cctx_mem and ->dctx_mem were allocated then we didn't use
+ * C/D dictionary and ->cctx / ->dctx were "embedded" into these
+ * buffers.
+ *
+ * If otherwise then we need to explicitly release ->cctx / ->dctx.
+ */
+ if (zctx->cctx_mem)
+ vfree(zctx->cctx_mem);
+ else
+ zstd_free_cctx(zctx->cctx);
+
+ if (zctx->dctx_mem)
+ vfree(zctx->dctx_mem);
+ else
+ zstd_free_dctx(zctx->dctx);
+
+ kfree(zctx);
+}
+
+static int zstd_create(struct zcomp_params *params, struct zcomp_ctx *ctx)
+{
+ struct zstd_ctx *zctx;
+ zstd_parameters prm;
+ size_t sz;
+
+ zctx = kzalloc(sizeof(*zctx), GFP_KERNEL);
+ if (!zctx)
+ return -ENOMEM;
+
+ ctx->context = zctx;
+ if (params->dict_sz == 0) {
+ prm = zstd_get_params(params->level, PAGE_SIZE);
+ sz = zstd_cctx_workspace_bound(&prm.cParams);
+ zctx->cctx_mem = vzalloc(sz);
+ if (!zctx->cctx_mem)
+ goto error;
+
+ zctx->cctx = zstd_init_cctx(zctx->cctx_mem, sz);
+ if (!zctx->cctx)
+ goto error;
+
+ sz = zstd_dctx_workspace_bound();
+ zctx->dctx_mem = vzalloc(sz);
+ if (!zctx->dctx_mem)
+ goto error;
+
+ zctx->dctx = zstd_init_dctx(zctx->dctx_mem, sz);
+ if (!zctx->dctx)
+ goto error;
+ } else {
+ struct zstd_params *zp = params->drv_data;
+
+ zctx->cctx = zstd_create_cctx_advanced(zp->custom_mem);
+ if (!zctx->cctx)
+ goto error;
+
+ zctx->dctx = zstd_create_dctx_advanced(zp->custom_mem);
+ if (!zctx->dctx)
+ goto error;
+ }
+
+ return 0;
+
+error:
+ zstd_release_params(params);
+ zstd_destroy(ctx);
+ return -EINVAL;
+}
+
+static int zstd_compress(struct zcomp_params *params, struct zcomp_ctx *ctx,
+ struct zcomp_req *req)
+{
+ struct zstd_params *zp = params->drv_data;
+ struct zstd_ctx *zctx = ctx->context;
+ size_t ret;
+
+ if (params->dict_sz == 0)
+ ret = zstd_compress_cctx(zctx->cctx, req->dst, req->dst_len,
+ req->src, req->src_len, &zp->cprm);
+ else
+ ret = zstd_compress_using_cdict(zctx->cctx, req->dst,
+ req->dst_len, req->src,
+ req->src_len,
+ zp->cdict);
+ if (zstd_is_error(ret))
+ return -EINVAL;
+ req->dst_len = ret;
+ return 0;
+}
+
+static int zstd_decompress(struct zcomp_params *params, struct zcomp_ctx *ctx,
+ struct zcomp_req *req)
+{
+ struct zstd_params *zp = params->drv_data;
+ struct zstd_ctx *zctx = ctx->context;
+ size_t ret;
+
+ if (params->dict_sz == 0)
+ ret = zstd_decompress_dctx(zctx->dctx, req->dst, req->dst_len,
+ req->src, req->src_len);
+ else
+ ret = zstd_decompress_using_ddict(zctx->dctx, req->dst,
+ req->dst_len, req->src,
+ req->src_len, zp->ddict);
+ if (zstd_is_error(ret))
+ return -EINVAL;
+ return 0;
+}
+
+const struct zcomp_ops backend_zstd = {
+ .compress = zstd_compress,
+ .decompress = zstd_decompress,
+ .create_ctx = zstd_create,
+ .destroy_ctx = zstd_destroy,
+ .setup_params = zstd_setup_params,
+ .release_params = zstd_release_params,
+ .name = "zstd",
+};
diff --git a/drivers/block/zram/backend_zstd.h b/drivers/block/zram/backend_zstd.h
new file mode 100644
index 000000000000..10fdfff1ec1c
--- /dev/null
+++ b/drivers/block/zram/backend_zstd.h
@@ -0,0 +1,10 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+
+#ifndef __BACKEND_ZSTD_H__
+#define __BACKEND_ZSTD_H__
+
+#include "zcomp.h"
+
+extern const struct zcomp_ops backend_zstd;
+
+#endif /* __BACKEND_ZSTD_H__ */
diff --git a/drivers/block/zram/zcomp.c b/drivers/block/zram/zcomp.c
index 8237b08c49d8..bb514403e305 100644
--- a/drivers/block/zram/zcomp.c
+++ b/drivers/block/zram/zcomp.c
@@ -1,7 +1,4 @@
// SPDX-License-Identifier: GPL-2.0-or-later
-/*
- * Copyright (C) 2014 Sergey Senozhatsky.
- */
#include <linux/kernel.h>
#include <linux/string.h>
@@ -15,91 +12,97 @@
#include "zcomp.h"
-static const char * const backends[] = {
-#if IS_ENABLED(CONFIG_CRYPTO_LZO)
- "lzo",
- "lzo-rle",
+#include "backend_lzo.h"
+#include "backend_lzorle.h"
+#include "backend_lz4.h"
+#include "backend_lz4hc.h"
+#include "backend_zstd.h"
+#include "backend_deflate.h"
+#include "backend_842.h"
+
+static const struct zcomp_ops *backends[] = {
+#if IS_ENABLED(CONFIG_ZRAM_BACKEND_LZO)
+ &backend_lzorle,
+ &backend_lzo,
#endif
-#if IS_ENABLED(CONFIG_CRYPTO_LZ4)
- "lz4",
+#if IS_ENABLED(CONFIG_ZRAM_BACKEND_LZ4)
+ &backend_lz4,
#endif
-#if IS_ENABLED(CONFIG_CRYPTO_LZ4HC)
- "lz4hc",
+#if IS_ENABLED(CONFIG_ZRAM_BACKEND_LZ4HC)
+ &backend_lz4hc,
#endif
-#if IS_ENABLED(CONFIG_CRYPTO_842)
- "842",
+#if IS_ENABLED(CONFIG_ZRAM_BACKEND_ZSTD)
+ &backend_zstd,
#endif
-#if IS_ENABLED(CONFIG_CRYPTO_ZSTD)
- "zstd",
+#if IS_ENABLED(CONFIG_ZRAM_BACKEND_DEFLATE)
+ &backend_deflate,
#endif
+#if IS_ENABLED(CONFIG_ZRAM_BACKEND_842)
+ &backend_842,
+#endif
+ NULL
};
-static void zcomp_strm_free(struct zcomp_strm *zstrm)
+static void zcomp_strm_free(struct zcomp *comp, struct zcomp_strm *zstrm)
{
- if (!IS_ERR_OR_NULL(zstrm->tfm))
- crypto_free_comp(zstrm->tfm);
+ comp->ops->destroy_ctx(&zstrm->ctx);
vfree(zstrm->buffer);
- zstrm->tfm = NULL;
zstrm->buffer = NULL;
}
-/*
- * Initialize zcomp_strm structure with ->tfm initialized by backend, and
- * ->buffer. Return a negative value on error.
- */
-static int zcomp_strm_init(struct zcomp_strm *zstrm, struct zcomp *comp)
+static int zcomp_strm_init(struct zcomp *comp, struct zcomp_strm *zstrm)
{
- zstrm->tfm = crypto_alloc_comp(comp->name, 0, 0);
+ int ret;
+
+ ret = comp->ops->create_ctx(comp->params, &zstrm->ctx);
+ if (ret)
+ return ret;
+
/*
* allocate 2 pages. 1 for compressed data, plus 1 extra for the
* case when compressed size is larger than the original one
*/
zstrm->buffer = vzalloc(2 * PAGE_SIZE);
- if (IS_ERR_OR_NULL(zstrm->tfm) || !zstrm->buffer) {
- zcomp_strm_free(zstrm);
+ if (!zstrm->buffer) {
+ zcomp_strm_free(comp, zstrm);
return -ENOMEM;
}
return 0;
}
+static const struct zcomp_ops *lookup_backend_ops(const char *comp)
+{
+ int i = 0;
+
+ while (backends[i]) {
+ if (sysfs_streq(comp, backends[i]->name))
+ break;
+ i++;
+ }
+ return backends[i];
+}
+
bool zcomp_available_algorithm(const char *comp)
{
- /*
- * Crypto does not ignore a trailing new line symbol,
- * so make sure you don't supply a string containing
- * one.
- * This also means that we permit zcomp initialisation
- * with any compressing algorithm known to crypto api.
- */
- return crypto_has_comp(comp, 0, 0) == 1;
+ return lookup_backend_ops(comp) != NULL;
}
/* show available compressors */
ssize_t zcomp_available_show(const char *comp, char *buf)
{
- bool known_algorithm = false;
ssize_t sz = 0;
int i;
- for (i = 0; i < ARRAY_SIZE(backends); i++) {
- if (!strcmp(comp, backends[i])) {
- known_algorithm = true;
+ for (i = 0; i < ARRAY_SIZE(backends) - 1; i++) {
+ if (!strcmp(comp, backends[i]->name)) {
sz += scnprintf(buf + sz, PAGE_SIZE - sz - 2,
- "[%s] ", backends[i]);
+ "[%s] ", backends[i]->name);
} else {
sz += scnprintf(buf + sz, PAGE_SIZE - sz - 2,
- "%s ", backends[i]);
+ "%s ", backends[i]->name);
}
}
- /*
- * Out-of-tree module known to crypto api or a missing
- * entry in `backends'.
- */
- if (!known_algorithm && crypto_has_comp(comp, 0, 0) == 1)
- sz += scnprintf(buf + sz, PAGE_SIZE - sz - 2,
- "[%s] ", comp);
-
sz += scnprintf(buf + sz, PAGE_SIZE - sz, "\n");
return sz;
}
@@ -115,38 +118,34 @@ void zcomp_stream_put(struct zcomp *comp)
local_unlock(&comp->stream->lock);
}
-int zcomp_compress(struct zcomp_strm *zstrm,
- const void *src, unsigned int *dst_len)
+int zcomp_compress(struct zcomp *comp, struct zcomp_strm *zstrm,
+ const void *src, unsigned int *dst_len)
{
- /*
- * Our dst memory (zstrm->buffer) is always `2 * PAGE_SIZE' sized
- * because sometimes we can endup having a bigger compressed data
- * due to various reasons: for example compression algorithms tend
- * to add some padding to the compressed buffer. Speaking of padding,
- * comp algorithm `842' pads the compressed length to multiple of 8
- * and returns -ENOSP when the dst memory is not big enough, which
- * is not something that ZRAM wants to see. We can handle the
- * `compressed_size > PAGE_SIZE' case easily in ZRAM, but when we
- * receive -ERRNO from the compressing backend we can't help it
- * anymore. To make `842' happy we need to tell the exact size of
- * the dst buffer, zram_drv will take care of the fact that
- * compressed buffer is too big.
- */
- *dst_len = PAGE_SIZE * 2;
+ struct zcomp_req req = {
+ .src = src,
+ .dst = zstrm->buffer,
+ .src_len = PAGE_SIZE,
+ .dst_len = 2 * PAGE_SIZE,
+ };
+ int ret;
- return crypto_comp_compress(zstrm->tfm,
- src, PAGE_SIZE,
- zstrm->buffer, dst_len);
+ ret = comp->ops->compress(comp->params, &zstrm->ctx, &req);
+ if (!ret)
+ *dst_len = req.dst_len;
+ return ret;
}
-int zcomp_decompress(struct zcomp_strm *zstrm,
- const void *src, unsigned int src_len, void *dst)
+int zcomp_decompress(struct zcomp *comp, struct zcomp_strm *zstrm,
+ const void *src, unsigned int src_len, void *dst)
{
- unsigned int dst_len = PAGE_SIZE;
-
- return crypto_comp_decompress(zstrm->tfm,
- src, src_len,
- dst, &dst_len);
+ struct zcomp_req req = {
+ .src = src,
+ .dst = dst,
+ .src_len = src_len,
+ .dst_len = PAGE_SIZE,
+ };
+
+ return comp->ops->decompress(comp->params, &zstrm->ctx, &req);
}
int zcomp_cpu_up_prepare(unsigned int cpu, struct hlist_node *node)
@@ -158,7 +157,7 @@ int zcomp_cpu_up_prepare(unsigned int cpu, struct hlist_node *node)
zstrm = per_cpu_ptr(comp->stream, cpu);
local_lock_init(&zstrm->lock);
- ret = zcomp_strm_init(zstrm, comp);
+ ret = zcomp_strm_init(comp, zstrm);
if (ret)
pr_err("Can't allocate a compression stream\n");
return ret;
@@ -170,11 +169,11 @@ int zcomp_cpu_dead(unsigned int cpu, struct hlist_node *node)
struct zcomp_strm *zstrm;
zstrm = per_cpu_ptr(comp->stream, cpu);
- zcomp_strm_free(zstrm);
+ zcomp_strm_free(comp, zstrm);
return 0;
}
-static int zcomp_init(struct zcomp *comp)
+static int zcomp_init(struct zcomp *comp, struct zcomp_params *params)
{
int ret;
@@ -182,12 +181,19 @@ static int zcomp_init(struct zcomp *comp)
if (!comp->stream)
return -ENOMEM;
+ comp->params = params;
+ ret = comp->ops->setup_params(comp->params);
+ if (ret)
+ goto cleanup;
+
ret = cpuhp_state_add_instance(CPUHP_ZCOMP_PREPARE, &comp->node);
if (ret < 0)
goto cleanup;
+
return 0;
cleanup:
+ comp->ops->release_params(comp->params);
free_percpu(comp->stream);
return ret;
}
@@ -195,37 +201,35 @@ cleanup:
void zcomp_destroy(struct zcomp *comp)
{
cpuhp_state_remove_instance(CPUHP_ZCOMP_PREPARE, &comp->node);
+ comp->ops->release_params(comp->params);
free_percpu(comp->stream);
kfree(comp);
}
-/*
- * search available compressors for requested algorithm.
- * allocate new zcomp and initialize it. return compressing
- * backend pointer or ERR_PTR if things went bad. ERR_PTR(-EINVAL)
- * if requested algorithm is not supported, ERR_PTR(-ENOMEM) in
- * case of allocation error, or any other error potentially
- * returned by zcomp_init().
- */
-struct zcomp *zcomp_create(const char *alg)
+struct zcomp *zcomp_create(const char *alg, struct zcomp_params *params)
{
struct zcomp *comp;
int error;
/*
- * Crypto API will execute /sbin/modprobe if the compression module
- * is not loaded yet. We must do it here, otherwise we are about to
- * call /sbin/modprobe under CPU hot-plug lock.
+ * The backends array has a sentinel NULL value, so the minimum
+ * size is 1. In order to be valid the array, apart from the
+ * sentinel NULL element, should have at least one compression
+ * backend selected.
*/
- if (!zcomp_available_algorithm(alg))
- return ERR_PTR(-EINVAL);
+ BUILD_BUG_ON(ARRAY_SIZE(backends) <= 1);
comp = kzalloc(sizeof(struct zcomp), GFP_KERNEL);
if (!comp)
return ERR_PTR(-ENOMEM);
- comp->name = alg;
- error = zcomp_init(comp);
+ comp->ops = lookup_backend_ops(alg);
+ if (!comp->ops) {
+ kfree(comp);
+ return ERR_PTR(-EINVAL);
+ }
+
+ error = zcomp_init(comp, params);
if (error) {
kfree(comp);
return ERR_PTR(error);
diff --git a/drivers/block/zram/zcomp.h b/drivers/block/zram/zcomp.h
index e9fe63da0e9b..ad5762813842 100644
--- a/drivers/block/zram/zcomp.h
+++ b/drivers/block/zram/zcomp.h
@@ -1,24 +1,70 @@
/* SPDX-License-Identifier: GPL-2.0-or-later */
-/*
- * Copyright (C) 2014 Sergey Senozhatsky.
- */
#ifndef _ZCOMP_H_
#define _ZCOMP_H_
+
#include <linux/local_lock.h>
+#define ZCOMP_PARAM_NO_LEVEL INT_MIN
+
+/*
+ * Immutable driver (backend) parameters. The driver may attach private
+ * data to it (e.g. driver representation of the dictionary, etc.).
+ *
+ * This data is kept per-comp and is shared among execution contexts.
+ */
+struct zcomp_params {
+ void *dict;
+ size_t dict_sz;
+ s32 level;
+
+ void *drv_data;
+};
+
+/*
+ * Run-time driver context - scratch buffers, etc. It is modified during
+ * request execution (compression/decompression), cannot be shared, so
+ * it's in per-CPU area.
+ */
+struct zcomp_ctx {
+ void *context;
+};
+
struct zcomp_strm {
- /* The members ->buffer and ->tfm are protected by ->lock. */
local_lock_t lock;
- /* compression/decompression buffer */
+ /* compression buffer */
void *buffer;
- struct crypto_comp *tfm;
+ struct zcomp_ctx ctx;
+};
+
+struct zcomp_req {
+ const unsigned char *src;
+ const size_t src_len;
+
+ unsigned char *dst;
+ size_t dst_len;
+};
+
+struct zcomp_ops {
+ int (*compress)(struct zcomp_params *params, struct zcomp_ctx *ctx,
+ struct zcomp_req *req);
+ int (*decompress)(struct zcomp_params *params, struct zcomp_ctx *ctx,
+ struct zcomp_req *req);
+
+ int (*create_ctx)(struct zcomp_params *params, struct zcomp_ctx *ctx);
+ void (*destroy_ctx)(struct zcomp_ctx *ctx);
+
+ int (*setup_params)(struct zcomp_params *params);
+ void (*release_params)(struct zcomp_params *params);
+
+ const char *name;
};
/* dynamic per-device compression frontend */
struct zcomp {
struct zcomp_strm __percpu *stream;
- const char *name;
+ const struct zcomp_ops *ops;
+ struct zcomp_params *params;
struct hlist_node node;
};
@@ -27,16 +73,15 @@ int zcomp_cpu_dead(unsigned int cpu, struct hlist_node *node);
ssize_t zcomp_available_show(const char *comp, char *buf);
bool zcomp_available_algorithm(const char *comp);
-struct zcomp *zcomp_create(const char *alg);
+struct zcomp *zcomp_create(const char *alg, struct zcomp_params *params);
void zcomp_destroy(struct zcomp *comp);
struct zcomp_strm *zcomp_stream_get(struct zcomp *comp);
void zcomp_stream_put(struct zcomp *comp);
-int zcomp_compress(struct zcomp_strm *zstrm,
- const void *src, unsigned int *dst_len);
-
-int zcomp_decompress(struct zcomp_strm *zstrm,
- const void *src, unsigned int src_len, void *dst);
+int zcomp_compress(struct zcomp *comp, struct zcomp_strm *zstrm,
+ const void *src, unsigned int *dst_len);
+int zcomp_decompress(struct zcomp *comp, struct zcomp_strm *zstrm,
+ const void *src, unsigned int src_len, void *dst);
#endif /* _ZCOMP_H_ */
diff --git a/drivers/block/zram/zram_drv.c b/drivers/block/zram/zram_drv.c
index 84cd62efac0f..ad9c9bc3ccfc 100644
--- a/drivers/block/zram/zram_drv.c
+++ b/drivers/block/zram/zram_drv.c
@@ -33,6 +33,7 @@
#include <linux/debugfs.h>
#include <linux/cpuhotplug.h>
#include <linux/part_stat.h>
+#include <linux/kernel_read_file.h>
#include "zram_drv.h"
@@ -998,6 +999,103 @@ static int __comp_algorithm_store(struct zram *zram, u32 prio, const char *buf)
return 0;
}
+static void comp_params_reset(struct zram *zram, u32 prio)
+{
+ struct zcomp_params *params = &zram->params[prio];
+
+ vfree(params->dict);
+ params->level = ZCOMP_PARAM_NO_LEVEL;
+ params->dict_sz = 0;
+ params->dict = NULL;
+}
+
+static int comp_params_store(struct zram *zram, u32 prio, s32 level,
+ const char *dict_path)
+{
+ ssize_t sz = 0;
+
+ comp_params_reset(zram, prio);
+
+ if (dict_path) {
+ sz = kernel_read_file_from_path(dict_path, 0,
+ &zram->params[prio].dict,
+ INT_MAX,
+ NULL,
+ READING_POLICY);
+ if (sz < 0)
+ return -EINVAL;
+ }
+
+ zram->params[prio].dict_sz = sz;
+ zram->params[prio].level = level;
+ return 0;
+}
+
+static ssize_t algorithm_params_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf,
+ size_t len)
+{
+ s32 prio = ZRAM_PRIMARY_COMP, level = ZCOMP_PARAM_NO_LEVEL;
+ char *args, *param, *val, *algo = NULL, *dict_path = NULL;
+ struct zram *zram = dev_to_zram(dev);
+ int ret;
+
+ args = skip_spaces(buf);
+ while (*args) {
+ args = next_arg(args, &param, &val);
+
+ if (!val || !*val)
+ return -EINVAL;
+
+ if (!strcmp(param, "priority")) {
+ ret = kstrtoint(val, 10, &prio);
+ if (ret)
+ return ret;
+ continue;
+ }
+
+ if (!strcmp(param, "level")) {
+ ret = kstrtoint(val, 10, &level);
+ if (ret)
+ return ret;
+ continue;
+ }
+
+ if (!strcmp(param, "algo")) {
+ algo = val;
+ continue;
+ }
+
+ if (!strcmp(param, "dict")) {
+ dict_path = val;
+ continue;
+ }
+ }
+
+ /* Lookup priority by algorithm name */
+ if (algo) {
+ s32 p;
+
+ prio = -EINVAL;
+ for (p = ZRAM_PRIMARY_COMP; p < ZRAM_MAX_COMPS; p++) {
+ if (!zram->comp_algs[p])
+ continue;
+
+ if (!strcmp(zram->comp_algs[p], algo)) {
+ prio = p;
+ break;
+ }
+ }
+ }
+
+ if (prio < ZRAM_PRIMARY_COMP || prio >= ZRAM_MAX_COMPS)
+ return -EINVAL;
+
+ ret = comp_params_store(zram, prio, level, dict_path);
+ return ret ? ret : len;
+}
+
static ssize_t comp_algorithm_show(struct device *dev,
struct device_attribute *attr,
char *buf)
@@ -1330,7 +1428,8 @@ static int zram_read_from_zspool(struct zram *zram, struct page *page,
ret = 0;
} else {
dst = kmap_local_page(page);
- ret = zcomp_decompress(zstrm, src, size, dst);
+ ret = zcomp_decompress(zram->comps[prio], zstrm,
+ src, size, dst);
kunmap_local(dst);
zcomp_stream_put(zram->comps[prio]);
}
@@ -1417,7 +1516,8 @@ static int zram_write_page(struct zram *zram, struct page *page, u32 index)
compress_again:
zstrm = zcomp_stream_get(zram->comps[ZRAM_PRIMARY_COMP]);
src = kmap_local_page(page);
- ret = zcomp_compress(zstrm, src, &comp_len);
+ ret = zcomp_compress(zram->comps[ZRAM_PRIMARY_COMP], zstrm,
+ src, &comp_len);
kunmap_local(src);
if (unlikely(ret)) {
@@ -1604,7 +1704,8 @@ static int zram_recompress(struct zram *zram, u32 index, struct page *page,
num_recomps++;
zstrm = zcomp_stream_get(zram->comps[prio]);
src = kmap_local_page(page);
- ret = zcomp_compress(zstrm, src, &comp_len_new);
+ ret = zcomp_compress(zram->comps[prio], zstrm,
+ src, &comp_len_new);
kunmap_local(src);
if (ret) {
@@ -1757,6 +1858,18 @@ static ssize_t recompress_store(struct device *dev,
algo = val;
continue;
}
+
+ if (!strcmp(param, "priority")) {
+ ret = kstrtouint(val, 10, &prio);
+ if (ret)
+ return ret;
+
+ if (prio == ZRAM_PRIMARY_COMP)
+ prio = ZRAM_SECONDARY_COMP;
+
+ prio_max = min(prio + 1, ZRAM_MAX_COMPS);
+ continue;
+ }
}
if (threshold >= huge_class_size)
@@ -1979,6 +2092,15 @@ static void zram_slot_free_notify(struct block_device *bdev,
zram_slot_unlock(zram, index);
}
+static void zram_comp_params_reset(struct zram *zram)
+{
+ u32 prio;
+
+ for (prio = ZRAM_PRIMARY_COMP; prio < ZRAM_MAX_COMPS; prio++) {
+ comp_params_reset(zram, prio);
+ }
+}
+
static void zram_destroy_comps(struct zram *zram)
{
u32 prio;
@@ -1992,6 +2114,15 @@ static void zram_destroy_comps(struct zram *zram)
zcomp_destroy(comp);
zram->num_active_comps--;
}
+
+ for (prio = ZRAM_PRIMARY_COMP; prio < ZRAM_MAX_COMPS; prio++) {
+ /* Do not free statically defined compression algorithms */
+ if (zram->comp_algs[prio] != default_compressor)
+ kfree(zram->comp_algs[prio]);
+ zram->comp_algs[prio] = NULL;
+ }
+
+ zram_comp_params_reset(zram);
}
static void zram_reset_device(struct zram *zram)
@@ -2049,7 +2180,8 @@ static ssize_t disksize_store(struct device *dev,
if (!zram->comp_algs[prio])
continue;
- comp = zcomp_create(zram->comp_algs[prio]);
+ comp = zcomp_create(zram->comp_algs[prio],
+ &zram->params[prio]);
if (IS_ERR(comp)) {
pr_err("Cannot initialise %s compressing backend\n",
zram->comp_algs[prio]);
@@ -2152,6 +2284,7 @@ static DEVICE_ATTR_RW(writeback_limit_enable);
static DEVICE_ATTR_RW(recomp_algorithm);
static DEVICE_ATTR_WO(recompress);
#endif
+static DEVICE_ATTR_WO(algorithm_params);
static struct attribute *zram_disk_attrs[] = {
&dev_attr_disksize.attr,
@@ -2179,6 +2312,7 @@ static struct attribute *zram_disk_attrs[] = {
&dev_attr_recomp_algorithm.attr,
&dev_attr_recompress.attr,
#endif
+ &dev_attr_algorithm_params.attr,
NULL,
};
@@ -2254,6 +2388,7 @@ static int zram_add(void)
if (ret)
goto out_cleanup_disk;
+ zram_comp_params_reset(zram);
comp_algorithm_set(zram, ZRAM_PRIMARY_COMP, default_compressor);
zram_debugfs_register(zram);
diff --git a/drivers/block/zram/zram_drv.h b/drivers/block/zram/zram_drv.h
index 531cefc66668..cfc8c059db63 100644
--- a/drivers/block/zram/zram_drv.h
+++ b/drivers/block/zram/zram_drv.h
@@ -106,6 +106,7 @@ struct zram {
struct zram_table_entry *table;
struct zs_pool *mem_pool;
struct zcomp *comps[ZRAM_MAX_COMPS];
+ struct zcomp_params params[ZRAM_MAX_COMPS];
struct gendisk *disk;
/* Prevent concurrent execution of device init */
struct rw_semaphore init_lock;
diff --git a/drivers/bluetooth/hci_vhci.c b/drivers/bluetooth/hci_vhci.c
index 43e9ac5a3324..aa6af351d02d 100644
--- a/drivers/bluetooth/hci_vhci.c
+++ b/drivers/bluetooth/hci_vhci.c
@@ -679,7 +679,6 @@ static const struct file_operations vhci_fops = {
.poll = vhci_poll,
.open = vhci_open,
.release = vhci_release,
- .llseek = no_llseek,
};
static struct miscdevice vhci_miscdev = {
diff --git a/drivers/bus/fsl-mc/fsl-mc-bus.c b/drivers/bus/fsl-mc/fsl-mc-bus.c
index dd68b8191a0a..930d8a3ba722 100644
--- a/drivers/bus/fsl-mc/fsl-mc-bus.c
+++ b/drivers/bus/fsl-mc/fsl-mc-bus.c
@@ -309,7 +309,7 @@ static struct attribute *fsl_mc_bus_attrs[] = {
ATTRIBUTE_GROUPS(fsl_mc_bus);
-struct bus_type fsl_mc_bus_type = {
+const struct bus_type fsl_mc_bus_type = {
.name = "fsl-mc",
.match = fsl_mc_bus_match,
.uevent = fsl_mc_bus_uevent,
diff --git a/drivers/bus/mhi/host/init.c b/drivers/bus/mhi/host/init.c
index ce7d2e62c2f1..a9b1f8beee7b 100644
--- a/drivers/bus/mhi/host/init.c
+++ b/drivers/bus/mhi/host/init.c
@@ -1464,7 +1464,7 @@ static int mhi_match(struct device *dev, const struct device_driver *drv)
return 0;
};
-struct bus_type mhi_bus_type = {
+const struct bus_type mhi_bus_type = {
.name = "mhi",
.dev_name = "mhi",
.match = mhi_match,
diff --git a/drivers/bus/mhi/host/internal.h b/drivers/bus/mhi/host/internal.h
index aaad40a07f69..d057e877932e 100644
--- a/drivers/bus/mhi/host/internal.h
+++ b/drivers/bus/mhi/host/internal.h
@@ -9,7 +9,7 @@
#include "../common.h"
-extern struct bus_type mhi_bus_type;
+extern const struct bus_type mhi_bus_type;
/* Host request register */
#define MHI_SOC_RESET_REQ_OFFSET 0xb0
diff --git a/drivers/bus/mhi/host/pci_generic.c b/drivers/bus/mhi/host/pci_generic.c
index 14a11880bcea..9938bb034c1c 100644
--- a/drivers/bus/mhi/host/pci_generic.c
+++ b/drivers/bus/mhi/host/pci_generic.c
@@ -26,6 +26,7 @@
/* PCI VID definitions */
#define PCI_VENDOR_ID_THALES 0x1269
#define PCI_VENDOR_ID_QUECTEL 0x1eac
+#define PCI_VENDOR_ID_NETPRISMA 0x203e
#define MHI_EDL_DB 91
#define MHI_EDL_COOKIE 0xEDEDEDED
@@ -433,8 +434,8 @@ static const struct mhi_controller_config modem_foxconn_sdx72_config = {
static const struct mhi_pci_dev_info mhi_foxconn_sdx55_info = {
.name = "foxconn-sdx55",
- .fw = "qcom/sdx55m/sbl1.mbn",
- .edl = "qcom/sdx55m/edl.mbn",
+ .edl = "qcom/sdx55m/foxconn/prog_firehose_sdx55.mbn",
+ .edl_trigger = true,
.config = &modem_foxconn_sdx55_config,
.bar_num = MHI_PCI_DEFAULT_BAR_NUM,
.dma_data_width = 32,
@@ -444,8 +445,8 @@ static const struct mhi_pci_dev_info mhi_foxconn_sdx55_info = {
static const struct mhi_pci_dev_info mhi_foxconn_t99w175_info = {
.name = "foxconn-t99w175",
- .fw = "qcom/sdx55m/sbl1.mbn",
- .edl = "qcom/sdx55m/edl.mbn",
+ .edl = "qcom/sdx55m/foxconn/prog_firehose_sdx55.mbn",
+ .edl_trigger = true,
.config = &modem_foxconn_sdx55_config,
.bar_num = MHI_PCI_DEFAULT_BAR_NUM,
.dma_data_width = 32,
@@ -455,8 +456,8 @@ static const struct mhi_pci_dev_info mhi_foxconn_t99w175_info = {
static const struct mhi_pci_dev_info mhi_foxconn_dw5930e_info = {
.name = "foxconn-dw5930e",
- .fw = "qcom/sdx55m/sbl1.mbn",
- .edl = "qcom/sdx55m/edl.mbn",
+ .edl = "qcom/sdx55m/foxconn/prog_firehose_sdx55.mbn",
+ .edl_trigger = true,
.config = &modem_foxconn_sdx55_config,
.bar_num = MHI_PCI_DEFAULT_BAR_NUM,
.dma_data_width = 32,
@@ -466,6 +467,8 @@ static const struct mhi_pci_dev_info mhi_foxconn_dw5930e_info = {
static const struct mhi_pci_dev_info mhi_foxconn_t99w368_info = {
.name = "foxconn-t99w368",
+ .edl = "qcom/sdx65m/foxconn/prog_firehose_lite.elf",
+ .edl_trigger = true,
.config = &modem_foxconn_sdx55_config,
.bar_num = MHI_PCI_DEFAULT_BAR_NUM,
.dma_data_width = 32,
@@ -475,6 +478,8 @@ static const struct mhi_pci_dev_info mhi_foxconn_t99w368_info = {
static const struct mhi_pci_dev_info mhi_foxconn_t99w373_info = {
.name = "foxconn-t99w373",
+ .edl = "qcom/sdx65m/foxconn/prog_firehose_lite.elf",
+ .edl_trigger = true,
.config = &modem_foxconn_sdx55_config,
.bar_num = MHI_PCI_DEFAULT_BAR_NUM,
.dma_data_width = 32,
@@ -484,6 +489,8 @@ static const struct mhi_pci_dev_info mhi_foxconn_t99w373_info = {
static const struct mhi_pci_dev_info mhi_foxconn_t99w510_info = {
.name = "foxconn-t99w510",
+ .edl = "qcom/sdx24m/foxconn/prog_firehose_sdx24.mbn",
+ .edl_trigger = true,
.config = &modem_foxconn_sdx55_config,
.bar_num = MHI_PCI_DEFAULT_BAR_NUM,
.dma_data_width = 32,
@@ -493,6 +500,8 @@ static const struct mhi_pci_dev_info mhi_foxconn_t99w510_info = {
static const struct mhi_pci_dev_info mhi_foxconn_dw5932e_info = {
.name = "foxconn-dw5932e",
+ .edl = "qcom/sdx65m/foxconn/prog_firehose_lite.elf",
+ .edl_trigger = true,
.config = &modem_foxconn_sdx55_config,
.bar_num = MHI_PCI_DEFAULT_BAR_NUM,
.dma_data_width = 32,
@@ -502,7 +511,7 @@ static const struct mhi_pci_dev_info mhi_foxconn_dw5932e_info = {
static const struct mhi_pci_dev_info mhi_foxconn_t99w515_info = {
.name = "foxconn-t99w515",
- .edl = "fox/sdx72m/edl.mbn",
+ .edl = "qcom/sdx72m/foxconn/edl.mbn",
.edl_trigger = true,
.config = &modem_foxconn_sdx72_config,
.bar_num = MHI_PCI_DEFAULT_BAR_NUM,
@@ -513,7 +522,7 @@ static const struct mhi_pci_dev_info mhi_foxconn_t99w515_info = {
static const struct mhi_pci_dev_info mhi_foxconn_dw5934e_info = {
.name = "foxconn-dw5934e",
- .edl = "fox/sdx72m/edl.mbn",
+ .edl = "qcom/sdx72m/foxconn/edl.mbn",
.edl_trigger = true,
.config = &modem_foxconn_sdx72_config,
.bar_num = MHI_PCI_DEFAULT_BAR_NUM,
@@ -680,6 +689,35 @@ static const struct mhi_pci_dev_info mhi_telit_fn990_info = {
.mru_default = 32768,
};
+static const struct mhi_pci_dev_info mhi_telit_fe990a_info = {
+ .name = "telit-fe990a",
+ .config = &modem_telit_fn990_config,
+ .bar_num = MHI_PCI_DEFAULT_BAR_NUM,
+ .dma_data_width = 32,
+ .sideband_wake = false,
+ .mru_default = 32768,
+};
+
+static const struct mhi_pci_dev_info mhi_netprisma_lcur57_info = {
+ .name = "netprisma-lcur57",
+ .edl = "qcom/prog_firehose_sdx24.mbn",
+ .config = &modem_quectel_em1xx_config,
+ .bar_num = MHI_PCI_DEFAULT_BAR_NUM,
+ .dma_data_width = 32,
+ .mru_default = 32768,
+ .sideband_wake = true,
+};
+
+static const struct mhi_pci_dev_info mhi_netprisma_fcun69_info = {
+ .name = "netprisma-fcun69",
+ .edl = "qcom/prog_firehose_sdx6x.elf",
+ .config = &modem_quectel_em1xx_config,
+ .bar_num = MHI_PCI_DEFAULT_BAR_NUM,
+ .dma_data_width = 32,
+ .mru_default = 32768,
+ .sideband_wake = true,
+};
+
/* Keep the list sorted based on the PID. New VID should be added as the last entry */
static const struct pci_device_id mhi_pci_id_table[] = {
{ PCI_DEVICE(PCI_VENDOR_ID_QCOM, 0x0304),
@@ -697,9 +735,9 @@ static const struct pci_device_id mhi_pci_id_table[] = {
/* Telit FN990 */
{ PCI_DEVICE_SUB(PCI_VENDOR_ID_QCOM, 0x0308, 0x1c5d, 0x2010),
.driver_data = (kernel_ulong_t) &mhi_telit_fn990_info },
- /* Telit FE990 */
+ /* Telit FE990A */
{ PCI_DEVICE_SUB(PCI_VENDOR_ID_QCOM, 0x0308, 0x1c5d, 0x2015),
- .driver_data = (kernel_ulong_t) &mhi_telit_fn990_info },
+ .driver_data = (kernel_ulong_t) &mhi_telit_fe990a_info },
{ PCI_DEVICE(PCI_VENDOR_ID_QCOM, 0x0308),
.driver_data = (kernel_ulong_t) &mhi_qcom_sdx65_info },
{ PCI_DEVICE(PCI_VENDOR_ID_QCOM, 0x0309),
@@ -778,6 +816,12 @@ static const struct pci_device_id mhi_pci_id_table[] = {
/* T99W175 (sdx55), HP variant */
{ PCI_DEVICE(0x03f0, 0x0a6c),
.driver_data = (kernel_ulong_t) &mhi_foxconn_t99w175_info },
+ /* NETPRISMA LCUR57 (SDX24) */
+ { PCI_DEVICE(PCI_VENDOR_ID_NETPRISMA, 0x1000),
+ .driver_data = (kernel_ulong_t) &mhi_netprisma_lcur57_info },
+ /* NETPRISMA FCUN69 (SDX6X) */
+ { PCI_DEVICE(PCI_VENDOR_ID_NETPRISMA, 0x1001),
+ .driver_data = (kernel_ulong_t) &mhi_netprisma_fcun69_info },
{ }
};
MODULE_DEVICE_TABLE(pci, mhi_pci_id_table);
diff --git a/drivers/bus/moxtet.c b/drivers/bus/moxtet.c
index 8412406c4f1d..6276551d7968 100644
--- a/drivers/bus/moxtet.c
+++ b/drivers/bus/moxtet.c
@@ -484,7 +484,6 @@ static const struct file_operations input_fops = {
.owner = THIS_MODULE,
.open = moxtet_debug_open,
.read = input_read,
- .llseek = no_llseek,
};
static ssize_t output_read(struct file *file, char __user *buf, size_t len,
@@ -549,7 +548,6 @@ static const struct file_operations output_fops = {
.open = moxtet_debug_open,
.read = output_read,
.write = output_write,
- .llseek = no_llseek,
};
static int moxtet_register_debugfs(struct moxtet *moxtet)
diff --git a/drivers/cdx/controller/mcdi.c b/drivers/cdx/controller/mcdi.c
index 1eedc5eeb315..e760f8d347cc 100644
--- a/drivers/cdx/controller/mcdi.c
+++ b/drivers/cdx/controller/mcdi.c
@@ -27,10 +27,6 @@
#include "bitfield.h"
#include "mcdi.h"
-struct cdx_mcdi_copy_buffer {
- struct cdx_dword buffer[DIV_ROUND_UP(MCDI_CTL_SDU_LEN_MAX, 4)];
-};
-
static void cdx_mcdi_cancel_cmd(struct cdx_mcdi *cdx, struct cdx_mcdi_cmd *cmd);
static void cdx_mcdi_wait_for_cleanup(struct cdx_mcdi *cdx);
static int cdx_mcdi_rpc_async_internal(struct cdx_mcdi *cdx,
diff --git a/drivers/char/applicom.c b/drivers/char/applicom.c
index 69314532f38c..9fed9706d9cd 100644
--- a/drivers/char/applicom.c
+++ b/drivers/char/applicom.c
@@ -111,7 +111,6 @@ static irqreturn_t ac_interrupt(int, void *);
static const struct file_operations ac_fops = {
.owner = THIS_MODULE,
- .llseek = no_llseek,
.read = ac_read,
.write = ac_write,
.unlocked_ioctl = ac_ioctl,
diff --git a/drivers/char/ds1620.c b/drivers/char/ds1620.c
index a4f4291b4492..44a1cdbd4bfb 100644
--- a/drivers/char/ds1620.c
+++ b/drivers/char/ds1620.c
@@ -353,7 +353,6 @@ static const struct file_operations ds1620_fops = {
.open = ds1620_open,
.read = ds1620_read,
.unlocked_ioctl = ds1620_unlocked_ioctl,
- .llseek = no_llseek,
};
static struct miscdevice ds1620_miscdev = {
diff --git a/drivers/char/dtlk.c b/drivers/char/dtlk.c
index 5a1a73310e97..27f5f9d19531 100644
--- a/drivers/char/dtlk.c
+++ b/drivers/char/dtlk.c
@@ -107,7 +107,6 @@ static const struct file_operations dtlk_fops =
.unlocked_ioctl = dtlk_ioctl,
.open = dtlk_open,
.release = dtlk_release,
- .llseek = no_llseek,
};
/* local prototypes */
diff --git a/drivers/char/hpet.c b/drivers/char/hpet.c
index da32e8ed0830..e904e476e49a 100644
--- a/drivers/char/hpet.c
+++ b/drivers/char/hpet.c
@@ -700,7 +700,6 @@ hpet_compat_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
static const struct file_operations hpet_fops = {
.owner = THIS_MODULE,
- .llseek = no_llseek,
.read = hpet_read,
.poll = hpet_poll,
.unlocked_ioctl = hpet_ioctl,
@@ -808,7 +807,7 @@ int hpet_alloc(struct hpet_data *hdp)
struct hpets *hpetp;
struct hpet __iomem *hpet;
static struct hpets *last;
- unsigned long period;
+ u32 period;
unsigned long long temp;
u32 remainder;
@@ -865,11 +864,11 @@ int hpet_alloc(struct hpet_data *hdp)
do_div(temp, period);
hpetp->hp_tick_freq = temp; /* ticks per second */
- printk(KERN_INFO "hpet%d: at MMIO 0x%lx, IRQ%s",
+ printk(KERN_INFO "hpet%u: at MMIO 0x%lx, IRQ%s",
hpetp->hp_which, hdp->hd_phys_address,
hpetp->hp_ntimer > 1 ? "s" : "");
for (i = 0; i < hpetp->hp_ntimer; i++)
- printk(KERN_CONT "%s %d", i > 0 ? "," : "", hdp->hd_irq[i]);
+ printk(KERN_CONT "%s %u", i > 0 ? "," : "", hdp->hd_irq[i]);
printk(KERN_CONT "\n");
temp = hpetp->hp_tick_freq;
diff --git a/drivers/char/ipmi/ipmi_watchdog.c b/drivers/char/ipmi/ipmi_watchdog.c
index 9a459257489f..335eea80054e 100644
--- a/drivers/char/ipmi/ipmi_watchdog.c
+++ b/drivers/char/ipmi/ipmi_watchdog.c
@@ -903,7 +903,6 @@ static const struct file_operations ipmi_wdog_fops = {
.open = ipmi_open,
.release = ipmi_close,
.fasync = ipmi_fasync,
- .llseek = no_llseek,
};
static struct miscdevice ipmi_wdog_miscdev = {
diff --git a/drivers/char/pc8736x_gpio.c b/drivers/char/pc8736x_gpio.c
index c39a836ebd15..5f4696813cea 100644
--- a/drivers/char/pc8736x_gpio.c
+++ b/drivers/char/pc8736x_gpio.c
@@ -235,7 +235,6 @@ static const struct file_operations pc8736x_gpio_fileops = {
.open = pc8736x_gpio_open,
.write = nsc_gpio_write,
.read = nsc_gpio_read,
- .llseek = no_llseek,
};
static void __init pc8736x_init_shadow(void)
diff --git a/drivers/char/ppdev.c b/drivers/char/ppdev.c
index eaff98dbaa8c..d1dfbd8d4d42 100644
--- a/drivers/char/ppdev.c
+++ b/drivers/char/ppdev.c
@@ -786,7 +786,6 @@ static const struct class ppdev_class = {
static const struct file_operations pp_fops = {
.owner = THIS_MODULE,
- .llseek = no_llseek,
.read = pp_read,
.write = pp_write,
.poll = pp_poll,
diff --git a/drivers/char/scx200_gpio.c b/drivers/char/scx200_gpio.c
index 9f701dcba95c..700e6affea6f 100644
--- a/drivers/char/scx200_gpio.c
+++ b/drivers/char/scx200_gpio.c
@@ -68,7 +68,6 @@ static const struct file_operations scx200_gpio_fileops = {
.read = nsc_gpio_read,
.open = scx200_gpio_open,
.release = scx200_gpio_release,
- .llseek = no_llseek,
};
static struct cdev scx200_gpio_cdev; /* use 1 cdev for all pins */
diff --git a/drivers/char/sonypi.c b/drivers/char/sonypi.c
index bb5115b1736a..0f8185e541ed 100644
--- a/drivers/char/sonypi.c
+++ b/drivers/char/sonypi.c
@@ -1054,7 +1054,6 @@ static const struct file_operations sonypi_misc_fops = {
.release = sonypi_misc_release,
.fasync = sonypi_misc_fasync,
.unlocked_ioctl = sonypi_misc_ioctl,
- .llseek = no_llseek,
};
static struct miscdevice sonypi_misc_device = {
diff --git a/drivers/char/tpm/tpm-dev.c b/drivers/char/tpm/tpm-dev.c
index e2c0baa69fef..97c94b5e9340 100644
--- a/drivers/char/tpm/tpm-dev.c
+++ b/drivers/char/tpm/tpm-dev.c
@@ -59,7 +59,6 @@ static int tpm_release(struct inode *inode, struct file *file)
const struct file_operations tpm_fops = {
.owner = THIS_MODULE,
- .llseek = no_llseek,
.open = tpm_open,
.read = tpm_common_read,
.write = tpm_common_write,
diff --git a/drivers/char/tpm/tpm_vtpm_proxy.c b/drivers/char/tpm/tpm_vtpm_proxy.c
index 11c502039faf..8fe4a01eea12 100644
--- a/drivers/char/tpm/tpm_vtpm_proxy.c
+++ b/drivers/char/tpm/tpm_vtpm_proxy.c
@@ -243,7 +243,6 @@ static int vtpm_proxy_fops_release(struct inode *inode, struct file *filp)
static const struct file_operations vtpm_proxy_fops = {
.owner = THIS_MODULE,
- .llseek = no_llseek,
.read = vtpm_proxy_fops_read,
.write = vtpm_proxy_fops_write,
.poll = vtpm_proxy_fops_poll,
diff --git a/drivers/char/tpm/tpmrm-dev.c b/drivers/char/tpm/tpmrm-dev.c
index eef0fb06ea83..c25df7ea064e 100644
--- a/drivers/char/tpm/tpmrm-dev.c
+++ b/drivers/char/tpm/tpmrm-dev.c
@@ -46,7 +46,6 @@ static int tpmrm_release(struct inode *inode, struct file *file)
const struct file_operations tpmrm_fops = {
.owner = THIS_MODULE,
- .llseek = no_llseek,
.open = tpmrm_open,
.read = tpm_common_read,
.write = tpm_common_write,
diff --git a/drivers/char/virtio_console.c b/drivers/char/virtio_console.c
index de7d720d99fa..99a7f2441e70 100644
--- a/drivers/char/virtio_console.c
+++ b/drivers/char/virtio_console.c
@@ -1093,7 +1093,6 @@ static const struct file_operations port_fops = {
.poll = port_fops_poll,
.release = port_fops_release,
.fasync = port_fops_fasync,
- .llseek = no_llseek,
};
/*
diff --git a/drivers/clk/.kunitconfig b/drivers/clk/.kunitconfig
index efa12ac2b3f2..54ece9207055 100644
--- a/drivers/clk/.kunitconfig
+++ b/drivers/clk/.kunitconfig
@@ -1,6 +1,8 @@
CONFIG_KUNIT=y
+CONFIG_OF=y
CONFIG_COMMON_CLK=y
CONFIG_CLK_KUNIT_TEST=y
+CONFIG_CLK_FIXED_RATE_KUNIT_TEST=y
CONFIG_CLK_GATE_KUNIT_TEST=y
CONFIG_CLK_FD_KUNIT_TEST=y
CONFIG_UML_PCI_OVER_VIRTIO=n
diff --git a/drivers/clk/Kconfig b/drivers/clk/Kconfig
index 983ef4f36d8c..299bc678ed1b 100644
--- a/drivers/clk/Kconfig
+++ b/drivers/clk/Kconfig
@@ -218,6 +218,14 @@ config COMMON_CLK_EN7523
This driver provides the fixed clocks and gates present on Airoha
ARM silicon.
+config COMMON_CLK_EP93XX
+ tristate "Clock driver for Cirrus Logic ep93xx SoC"
+ depends on ARCH_EP93XX || COMPILE_TEST
+ select AUXILIARY_BUS
+ select REGMAP_MMIO
+ help
+ This driver supports the SoC clocks on the Cirrus Logic ep93xx.
+
config COMMON_CLK_FSL_FLEXSPI
tristate "Clock driver for FlexSPI on Layerscape SoCs"
depends on ARCH_LAYERSCAPE || COMPILE_TEST
@@ -509,9 +517,20 @@ config CLK_KUNIT_TEST
tristate "Basic Clock Framework Kunit Tests" if !KUNIT_ALL_TESTS
depends on KUNIT
default KUNIT_ALL_TESTS
+ select OF_OVERLAY if OF
+ select DTC
help
Kunit tests for the common clock framework.
+config CLK_FIXED_RATE_KUNIT_TEST
+ tristate "Basic fixed rate clk type KUnit test" if !KUNIT_ALL_TESTS
+ depends on KUNIT
+ default KUNIT_ALL_TESTS
+ select OF_OVERLAY if OF
+ select DTC
+ help
+ KUnit tests for the basic fixed rate clk type.
+
config CLK_GATE_KUNIT_TEST
tristate "Basic gate type Kunit test" if !KUNIT_ALL_TESTS
depends on KUNIT
diff --git a/drivers/clk/Makefile b/drivers/clk/Makefile
index f793a16cad40..fb8878a5d7d9 100644
--- a/drivers/clk/Makefile
+++ b/drivers/clk/Makefile
@@ -2,10 +2,14 @@
# common clock types
obj-$(CONFIG_HAVE_CLK) += clk-devres.o clk-bulk.o clkdev.o
obj-$(CONFIG_COMMON_CLK) += clk.o
-obj-$(CONFIG_CLK_KUNIT_TEST) += clk_test.o
+obj-$(CONFIG_CLK_KUNIT_TEST) += clk-test.o
+clk-test-y := clk_test.o \
+ kunit_clk_parent_data_test.dtbo.o
obj-$(CONFIG_COMMON_CLK) += clk-divider.o
obj-$(CONFIG_COMMON_CLK) += clk-fixed-factor.o
obj-$(CONFIG_COMMON_CLK) += clk-fixed-rate.o
+obj-$(CONFIG_CLK_FIXED_RATE_KUNIT_TEST) += clk-fixed-rate-test.o
+clk-fixed-rate-test-y := clk-fixed-rate_test.o kunit_clk_fixed_rate_test.dtbo.o
obj-$(CONFIG_COMMON_CLK) += clk-gate.o
obj-$(CONFIG_CLK_GATE_KUNIT_TEST) += clk-gate_test.o
obj-$(CONFIG_COMMON_CLK) += clk-multiplier.o
@@ -18,6 +22,11 @@ ifeq ($(CONFIG_OF), y)
obj-$(CONFIG_COMMON_CLK) += clk-conf.o
endif
+# KUnit specific helpers
+ifeq ($(CONFIG_COMMON_CLK), y)
+obj-$(CONFIG_KUNIT) += clk_kunit_helpers.o
+endif
+
# hardware specific clock types
# please keep this section sorted lexicographically by file path name
obj-$(CONFIG_COMMON_CLK_APPLE_NCO) += clk-apple-nco.o
@@ -30,6 +39,7 @@ obj-$(CONFIG_COMMON_CLK_CDCE706) += clk-cdce706.o
obj-$(CONFIG_COMMON_CLK_CDCE925) += clk-cdce925.o
obj-$(CONFIG_ARCH_CLPS711X) += clk-clps711x.o
obj-$(CONFIG_COMMON_CLK_CS2000_CP) += clk-cs2000-cp.o
+obj-$(CONFIG_COMMON_CLK_EP93XX) += clk-ep93xx.o
obj-$(CONFIG_ARCH_SPARX5) += clk-sparx5.o
obj-$(CONFIG_COMMON_CLK_EN7523) += clk-en7523.o
obj-$(CONFIG_COMMON_CLK_FIXED_MMIO) += clk-fixed-mmio.o
diff --git a/drivers/clk/at91/Makefile b/drivers/clk/at91/Makefile
index 89061b85e7d2..8e3684ba2c74 100644
--- a/drivers/clk/at91/Makefile
+++ b/drivers/clk/at91/Makefile
@@ -20,6 +20,7 @@ obj-$(CONFIG_SOC_AT91SAM9) += at91sam9260.o at91sam9rl.o at91sam9x5.o dt-compat.
obj-$(CONFIG_SOC_AT91SAM9) += at91sam9g45.o dt-compat.o
obj-$(CONFIG_SOC_AT91SAM9) += at91sam9n12.o at91sam9x5.o dt-compat.o
obj-$(CONFIG_SOC_SAM9X60) += sam9x60.o
+obj-$(CONFIG_SOC_SAM9X7) += sam9x7.o
obj-$(CONFIG_SOC_SAMA5D3) += sama5d3.o dt-compat.o
obj-$(CONFIG_SOC_SAMA5D4) += sama5d4.o dt-compat.o
obj-$(CONFIG_SOC_SAMA5D2) += sama5d2.o dt-compat.o
diff --git a/drivers/clk/at91/clk-sam9x60-pll.c b/drivers/clk/at91/clk-sam9x60-pll.c
index ff65f7b916f0..fda041102224 100644
--- a/drivers/clk/at91/clk-sam9x60-pll.c
+++ b/drivers/clk/at91/clk-sam9x60-pll.c
@@ -23,9 +23,6 @@
#define UPLL_DIV 2
#define PLL_MUL_MAX (FIELD_GET(PMC_PLL_CTRL1_MUL_MSK, UINT_MAX) + 1)
-#define FCORE_MIN (600000000)
-#define FCORE_MAX (1200000000)
-
#define PLL_MAX_ID 7
struct sam9x60_pll_core {
@@ -76,9 +73,15 @@ static unsigned long sam9x60_frac_pll_recalc_rate(struct clk_hw *hw,
{
struct sam9x60_pll_core *core = to_sam9x60_pll_core(hw);
struct sam9x60_frac *frac = to_sam9x60_frac(core);
+ unsigned long freq;
- return parent_rate * (frac->mul + 1) +
+ freq = parent_rate * (frac->mul + 1) +
DIV_ROUND_CLOSEST_ULL((u64)parent_rate * frac->frac, (1 << 22));
+
+ if (core->layout->div2)
+ freq >>= 1;
+
+ return freq;
}
static int sam9x60_frac_pll_set(struct sam9x60_pll_core *core)
@@ -194,7 +197,8 @@ static long sam9x60_frac_pll_compute_mul_frac(struct sam9x60_pll_core *core,
unsigned long nmul = 0;
unsigned long nfrac = 0;
- if (rate < FCORE_MIN || rate > FCORE_MAX)
+ if (rate < core->characteristics->core_output[0].min ||
+ rate > core->characteristics->core_output[0].max)
return -ERANGE;
/*
@@ -214,7 +218,8 @@ static long sam9x60_frac_pll_compute_mul_frac(struct sam9x60_pll_core *core,
}
/* Check if resulted rate is a valid. */
- if (tmprate < FCORE_MIN || tmprate > FCORE_MAX)
+ if (tmprate < core->characteristics->core_output[0].min ||
+ tmprate > core->characteristics->core_output[0].max)
return -ERANGE;
if (update) {
@@ -433,6 +438,12 @@ static unsigned long sam9x60_div_pll_recalc_rate(struct clk_hw *hw,
return DIV_ROUND_CLOSEST_ULL(parent_rate, (div->div + 1));
}
+static unsigned long sam9x60_fixed_div_pll_recalc_rate(struct clk_hw *hw,
+ unsigned long parent_rate)
+{
+ return parent_rate >> 1;
+}
+
static long sam9x60_div_pll_compute_div(struct sam9x60_pll_core *core,
unsigned long *parent_rate,
unsigned long rate)
@@ -607,6 +618,16 @@ static const struct clk_ops sam9x60_div_pll_ops_chg = {
.restore_context = sam9x60_div_pll_restore_context,
};
+static const struct clk_ops sam9x60_fixed_div_pll_ops = {
+ .prepare = sam9x60_div_pll_prepare,
+ .unprepare = sam9x60_div_pll_unprepare,
+ .is_prepared = sam9x60_div_pll_is_prepared,
+ .recalc_rate = sam9x60_fixed_div_pll_recalc_rate,
+ .round_rate = sam9x60_div_pll_round_rate,
+ .save_context = sam9x60_div_pll_save_context,
+ .restore_context = sam9x60_div_pll_restore_context,
+};
+
struct clk_hw * __init
sam9x60_clk_register_frac_pll(struct regmap *regmap, spinlock_t *lock,
const char *name, const char *parent_name,
@@ -669,7 +690,8 @@ sam9x60_clk_register_frac_pll(struct regmap *regmap, spinlock_t *lock,
goto free;
}
- ret = sam9x60_frac_pll_compute_mul_frac(&frac->core, FCORE_MIN,
+ ret = sam9x60_frac_pll_compute_mul_frac(&frac->core,
+ characteristics->core_output[0].min,
parent_rate, true);
if (ret < 0) {
hw = ERR_PTR(ret);
@@ -725,10 +747,14 @@ sam9x60_clk_register_div_pll(struct regmap *regmap, spinlock_t *lock,
else
init.parent_names = &parent_name;
init.num_parents = 1;
- if (flags & CLK_SET_RATE_GATE)
+
+ if (layout->div2)
+ init.ops = &sam9x60_fixed_div_pll_ops;
+ else if (flags & CLK_SET_RATE_GATE)
init.ops = &sam9x60_div_pll_ops;
else
init.ops = &sam9x60_div_pll_ops_chg;
+
init.flags = flags;
div->core.id = id;
diff --git a/drivers/clk/at91/dt-compat.c b/drivers/clk/at91/dt-compat.c
index a32dc2111b90..f5a5f9ba7634 100644
--- a/drivers/clk/at91/dt-compat.c
+++ b/drivers/clk/at91/dt-compat.c
@@ -563,9 +563,10 @@ of_at91_clk_pll_get_characteristics(struct device_node *np)
if (num_cells < 2 || num_cells > 4)
return NULL;
- if (!of_get_property(np, "atmel,pll-clk-output-ranges", &tmp))
+ num_output = of_property_count_u32_elems(np, "atmel,pll-clk-output-ranges");
+ if (num_output <= 0)
return NULL;
- num_output = tmp / (sizeof(u32) * num_cells);
+ num_output /= num_cells;
characteristics = kzalloc(sizeof(*characteristics), GFP_KERNEL);
if (!characteristics)
diff --git a/drivers/clk/at91/pmc.h b/drivers/clk/at91/pmc.h
index 0f52e80bcd49..4fb29ca111f7 100644
--- a/drivers/clk/at91/pmc.h
+++ b/drivers/clk/at91/pmc.h
@@ -64,6 +64,7 @@ struct clk_pll_layout {
u8 frac_shift;
u8 div_shift;
u8 endiv_shift;
+ u8 div2;
};
extern const struct clk_pll_layout at91rm9200_pll_layout;
@@ -75,6 +76,7 @@ struct clk_pll_characteristics {
struct clk_range input;
int num_output;
const struct clk_range *output;
+ const struct clk_range *core_output;
u16 *icpll;
u8 *out;
u8 upll : 1;
@@ -119,6 +121,22 @@ struct at91_clk_pms {
#define ndck(a, s) (a[s - 1].id + 1)
#define nck(a) (a[ARRAY_SIZE(a) - 1].id + 1)
+
+#define PMC_INIT_TABLE(_table, _count) \
+ do { \
+ u8 _i; \
+ for (_i = 0; _i < (_count); _i++) \
+ (_table)[_i] = _i; \
+ } while (0)
+
+#define PMC_FILL_TABLE(_to, _from, _count) \
+ do { \
+ u8 _i; \
+ for (_i = 0; _i < (_count); _i++) { \
+ (_to)[_i] = (_from)[_i]; \
+ } \
+ } while (0)
+
struct pmc_data *pmc_data_allocate(unsigned int ncore, unsigned int nsystem,
unsigned int nperiph, unsigned int ngck,
unsigned int npck);
diff --git a/drivers/clk/at91/sam9x60.c b/drivers/clk/at91/sam9x60.c
index e309cbf3cb9a..db6db9e2073e 100644
--- a/drivers/clk/at91/sam9x60.c
+++ b/drivers/clk/at91/sam9x60.c
@@ -26,10 +26,16 @@ static const struct clk_range plla_outputs[] = {
{ .min = 2343750, .max = 1200000000 },
};
+/* Fractional PLL core output range. */
+static const struct clk_range core_outputs[] = {
+ { .min = 600000000, .max = 1200000000 },
+};
+
static const struct clk_pll_characteristics plla_characteristics = {
.input = { .min = 12000000, .max = 48000000 },
.num_output = ARRAY_SIZE(plla_outputs),
.output = plla_outputs,
+ .core_output = core_outputs,
};
static const struct clk_range upll_outputs[] = {
@@ -40,6 +46,7 @@ static const struct clk_pll_characteristics upll_characteristics = {
.input = { .min = 12000000, .max = 48000000 },
.num_output = ARRAY_SIZE(upll_outputs),
.output = upll_outputs,
+ .core_output = core_outputs,
.upll = true,
};
diff --git a/drivers/clk/at91/sam9x7.c b/drivers/clk/at91/sam9x7.c
new file mode 100644
index 000000000000..cbb8b220f16b
--- /dev/null
+++ b/drivers/clk/at91/sam9x7.c
@@ -0,0 +1,946 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * SAM9X7 PMC code.
+ *
+ * Copyright (C) 2023 Microchip Technology Inc. and its subsidiaries
+ *
+ * Author: Varshini Rajendran <varshini.rajendran@microchip.com>
+ *
+ */
+#include <linux/clk.h>
+#include <linux/clk-provider.h>
+#include <linux/mfd/syscon.h>
+#include <linux/slab.h>
+
+#include <dt-bindings/clock/at91.h>
+
+#include "pmc.h"
+
+static DEFINE_SPINLOCK(pmc_pll_lock);
+static DEFINE_SPINLOCK(mck_lock);
+
+/**
+ * enum pll_ids - PLL clocks identifiers
+ * @PLL_ID_PLLA: PLLA identifier
+ * @PLL_ID_UPLL: UPLL identifier
+ * @PLL_ID_AUDIO: Audio PLL identifier
+ * @PLL_ID_LVDS: LVDS PLL identifier
+ * @PLL_ID_PLLA_DIV2: PLLA DIV2 identifier
+ * @PLL_ID_MAX: Max PLL Identifier
+ */
+enum pll_ids {
+ PLL_ID_PLLA,
+ PLL_ID_UPLL,
+ PLL_ID_AUDIO,
+ PLL_ID_LVDS,
+ PLL_ID_PLLA_DIV2,
+ PLL_ID_MAX,
+};
+
+/**
+ * enum pll_type - PLL type identifiers
+ * @PLL_TYPE_FRAC: fractional PLL identifier
+ * @PLL_TYPE_DIV: divider PLL identifier
+ */
+enum pll_type {
+ PLL_TYPE_FRAC,
+ PLL_TYPE_DIV,
+};
+
+static const struct clk_master_characteristics mck_characteristics = {
+ .output = { .min = 32000000, .max = 266666667 },
+ .divisors = { 1, 2, 4, 3, 5},
+ .have_div3_pres = 1,
+};
+
+static const struct clk_master_layout sam9x7_master_layout = {
+ .mask = 0x373,
+ .pres_shift = 4,
+ .offset = 0x28,
+};
+
+/* Fractional PLL core output range. */
+static const struct clk_range plla_core_outputs[] = {
+ { .min = 375000000, .max = 1600000000 },
+};
+
+static const struct clk_range upll_core_outputs[] = {
+ { .min = 600000000, .max = 1200000000 },
+};
+
+static const struct clk_range lvdspll_core_outputs[] = {
+ { .min = 400000000, .max = 800000000 },
+};
+
+static const struct clk_range audiopll_core_outputs[] = {
+ { .min = 400000000, .max = 800000000 },
+};
+
+static const struct clk_range plladiv2_core_outputs[] = {
+ { .min = 375000000, .max = 1600000000 },
+};
+
+/* Fractional PLL output range. */
+static const struct clk_range plla_outputs[] = {
+ { .min = 732421, .max = 800000000 },
+};
+
+static const struct clk_range upll_outputs[] = {
+ { .min = 300000000, .max = 600000000 },
+};
+
+static const struct clk_range lvdspll_outputs[] = {
+ { .min = 10000000, .max = 800000000 },
+};
+
+static const struct clk_range audiopll_outputs[] = {
+ { .min = 10000000, .max = 800000000 },
+};
+
+static const struct clk_range plladiv2_outputs[] = {
+ { .min = 366210, .max = 400000000 },
+};
+
+/* PLL characteristics. */
+static const struct clk_pll_characteristics plla_characteristics = {
+ .input = { .min = 20000000, .max = 50000000 },
+ .num_output = ARRAY_SIZE(plla_outputs),
+ .output = plla_outputs,
+ .core_output = plla_core_outputs,
+};
+
+static const struct clk_pll_characteristics upll_characteristics = {
+ .input = { .min = 20000000, .max = 50000000 },
+ .num_output = ARRAY_SIZE(upll_outputs),
+ .output = upll_outputs,
+ .core_output = upll_core_outputs,
+ .upll = true,
+};
+
+static const struct clk_pll_characteristics lvdspll_characteristics = {
+ .input = { .min = 20000000, .max = 50000000 },
+ .num_output = ARRAY_SIZE(lvdspll_outputs),
+ .output = lvdspll_outputs,
+ .core_output = lvdspll_core_outputs,
+};
+
+static const struct clk_pll_characteristics audiopll_characteristics = {
+ .input = { .min = 20000000, .max = 50000000 },
+ .num_output = ARRAY_SIZE(audiopll_outputs),
+ .output = audiopll_outputs,
+ .core_output = audiopll_core_outputs,
+};
+
+static const struct clk_pll_characteristics plladiv2_characteristics = {
+ .input = { .min = 20000000, .max = 50000000 },
+ .num_output = ARRAY_SIZE(plladiv2_outputs),
+ .output = plladiv2_outputs,
+ .core_output = plladiv2_core_outputs,
+};
+
+/* Layout for fractional PLL ID PLLA. */
+static const struct clk_pll_layout plla_frac_layout = {
+ .mul_mask = GENMASK(31, 24),
+ .frac_mask = GENMASK(21, 0),
+ .mul_shift = 24,
+ .frac_shift = 0,
+ .div2 = 1,
+};
+
+/* Layout for fractional PLLs. */
+static const struct clk_pll_layout pll_frac_layout = {
+ .mul_mask = GENMASK(31, 24),
+ .frac_mask = GENMASK(21, 0),
+ .mul_shift = 24,
+ .frac_shift = 0,
+};
+
+/* Layout for DIV PLLs. */
+static const struct clk_pll_layout pll_divpmc_layout = {
+ .div_mask = GENMASK(7, 0),
+ .endiv_mask = BIT(29),
+ .div_shift = 0,
+ .endiv_shift = 29,
+};
+
+/* Layout for DIV PLL ID PLLADIV2. */
+static const struct clk_pll_layout plladiv2_divpmc_layout = {
+ .div_mask = GENMASK(7, 0),
+ .endiv_mask = BIT(29),
+ .div_shift = 0,
+ .endiv_shift = 29,
+ .div2 = 1,
+};
+
+/* Layout for DIVIO dividers. */
+static const struct clk_pll_layout pll_divio_layout = {
+ .div_mask = GENMASK(19, 12),
+ .endiv_mask = BIT(30),
+ .div_shift = 12,
+ .endiv_shift = 30,
+};
+
+/*
+ * PLL clocks description
+ * @n: clock name
+ * @p: clock parent
+ * @l: clock layout
+ * @t: clock type
+ * @c: pll characteristics
+ * @f: clock flags
+ * @eid: export index in sam9x7->chws[] array
+ */
+static const struct {
+ const char *n;
+ const char *p;
+ const struct clk_pll_layout *l;
+ u8 t;
+ const struct clk_pll_characteristics *c;
+ unsigned long f;
+ u8 eid;
+} sam9x7_plls[][3] = {
+ [PLL_ID_PLLA] = {
+ {
+ .n = "plla_fracck",
+ .p = "mainck",
+ .l = &plla_frac_layout,
+ .t = PLL_TYPE_FRAC,
+ /*
+ * This feeds plla_divpmcck which feeds CPU. It should
+ * not be disabled.
+ */
+ .f = CLK_IS_CRITICAL | CLK_SET_RATE_GATE,
+ .c = &plla_characteristics,
+ },
+
+ {
+ .n = "plla_divpmcck",
+ .p = "plla_fracck",
+ .l = &pll_divpmc_layout,
+ .t = PLL_TYPE_DIV,
+ /* This feeds CPU. It should not be disabled */
+ .f = CLK_IS_CRITICAL | CLK_SET_RATE_GATE,
+ .eid = PMC_PLLACK,
+ .c = &plla_characteristics,
+ },
+ },
+
+ [PLL_ID_UPLL] = {
+ {
+ .n = "upll_fracck",
+ .p = "main_osc",
+ .l = &pll_frac_layout,
+ .t = PLL_TYPE_FRAC,
+ .f = CLK_SET_RATE_GATE,
+ .c = &upll_characteristics,
+ },
+
+ {
+ .n = "upll_divpmcck",
+ .p = "upll_fracck",
+ .l = &pll_divpmc_layout,
+ .t = PLL_TYPE_DIV,
+ .f = CLK_SET_RATE_GATE | CLK_SET_PARENT_GATE |
+ CLK_SET_RATE_PARENT,
+ .eid = PMC_UTMI,
+ .c = &upll_characteristics,
+ },
+ },
+
+ [PLL_ID_AUDIO] = {
+ {
+ .n = "audiopll_fracck",
+ .p = "main_osc",
+ .l = &pll_frac_layout,
+ .f = CLK_SET_RATE_GATE,
+ .c = &audiopll_characteristics,
+ .t = PLL_TYPE_FRAC,
+ },
+
+ {
+ .n = "audiopll_divpmcck",
+ .p = "audiopll_fracck",
+ .l = &pll_divpmc_layout,
+ .f = CLK_SET_RATE_GATE | CLK_SET_PARENT_GATE |
+ CLK_SET_RATE_PARENT,
+ .c = &audiopll_characteristics,
+ .eid = PMC_AUDIOPMCPLL,
+ .t = PLL_TYPE_DIV,
+ },
+
+ {
+ .n = "audiopll_diviock",
+ .p = "audiopll_fracck",
+ .l = &pll_divio_layout,
+ .f = CLK_SET_RATE_GATE | CLK_SET_PARENT_GATE |
+ CLK_SET_RATE_PARENT,
+ .c = &audiopll_characteristics,
+ .eid = PMC_AUDIOIOPLL,
+ .t = PLL_TYPE_DIV,
+ },
+ },
+
+ [PLL_ID_LVDS] = {
+ {
+ .n = "lvdspll_fracck",
+ .p = "main_osc",
+ .l = &pll_frac_layout,
+ .f = CLK_SET_RATE_GATE,
+ .c = &lvdspll_characteristics,
+ .t = PLL_TYPE_FRAC,
+ },
+
+ {
+ .n = "lvdspll_divpmcck",
+ .p = "lvdspll_fracck",
+ .l = &pll_divpmc_layout,
+ .f = CLK_SET_RATE_GATE | CLK_SET_PARENT_GATE |
+ CLK_SET_RATE_PARENT,
+ .c = &lvdspll_characteristics,
+ .eid = PMC_LVDSPLL,
+ .t = PLL_TYPE_DIV,
+ },
+ },
+
+ [PLL_ID_PLLA_DIV2] = {
+ {
+ .n = "plla_div2pmcck",
+ .p = "plla_fracck",
+ .l = &plladiv2_divpmc_layout,
+ /*
+ * This may feed critical parts of the system like timers.
+ * It should not be disabled.
+ */
+ .f = CLK_IS_CRITICAL | CLK_SET_RATE_GATE,
+ .c = &plladiv2_characteristics,
+ .eid = PMC_PLLADIV2,
+ .t = PLL_TYPE_DIV,
+ },
+ },
+};
+
+static const struct clk_programmable_layout sam9x7_programmable_layout = {
+ .pres_mask = 0xff,
+ .pres_shift = 8,
+ .css_mask = 0x1f,
+ .have_slck_mck = 0,
+ .is_pres_direct = 1,
+};
+
+static const struct clk_pcr_layout sam9x7_pcr_layout = {
+ .offset = 0x88,
+ .cmd = BIT(31),
+ .gckcss_mask = GENMASK(12, 8),
+ .pid_mask = GENMASK(6, 0),
+};
+
+static const struct {
+ char *n;
+ char *p;
+ u8 id;
+ unsigned long flags;
+} sam9x7_systemck[] = {
+ /*
+ * ddrck feeds DDR controller and is enabled by bootloader thus we need
+ * to keep it enabled in case there is no Linux consumer for it.
+ */
+ { .n = "ddrck", .p = "masterck_div", .id = 2, .flags = CLK_IS_CRITICAL },
+ { .n = "uhpck", .p = "usbck", .id = 6 },
+ { .n = "pck0", .p = "prog0", .id = 8 },
+ { .n = "pck1", .p = "prog1", .id = 9 },
+};
+
+/*
+ * Peripheral clocks description
+ * @n: clock name
+ * @f: clock flags
+ * @id: peripheral id
+ */
+static const struct {
+ char *n;
+ unsigned long f;
+ u8 id;
+} sam9x7_periphck[] = {
+ { .n = "pioA_clk", .id = 2, },
+ { .n = "pioB_clk", .id = 3, },
+ { .n = "pioC_clk", .id = 4, },
+ { .n = "flex0_clk", .id = 5, },
+ { .n = "flex1_clk", .id = 6, },
+ { .n = "flex2_clk", .id = 7, },
+ { .n = "flex3_clk", .id = 8, },
+ { .n = "flex6_clk", .id = 9, },
+ { .n = "flex7_clk", .id = 10, },
+ { .n = "flex8_clk", .id = 11, },
+ { .n = "sdmmc0_clk", .id = 12, },
+ { .n = "flex4_clk", .id = 13, },
+ { .n = "flex5_clk", .id = 14, },
+ { .n = "flex9_clk", .id = 15, },
+ { .n = "flex10_clk", .id = 16, },
+ { .n = "tcb0_clk", .id = 17, },
+ { .n = "pwm_clk", .id = 18, },
+ { .n = "adc_clk", .id = 19, },
+ { .n = "dma0_clk", .id = 20, },
+ { .n = "uhphs_clk", .id = 22, },
+ { .n = "udphs_clk", .id = 23, },
+ { .n = "macb0_clk", .id = 24, },
+ { .n = "lcd_clk", .id = 25, },
+ { .n = "sdmmc1_clk", .id = 26, },
+ { .n = "ssc_clk", .id = 28, },
+ { .n = "can0_clk", .id = 29, },
+ { .n = "can1_clk", .id = 30, },
+ { .n = "flex11_clk", .id = 32, },
+ { .n = "flex12_clk", .id = 33, },
+ { .n = "i2s_clk", .id = 34, },
+ { .n = "qspi_clk", .id = 35, },
+ { .n = "gfx2d_clk", .id = 36, },
+ { .n = "pit64b0_clk", .id = 37, },
+ { .n = "trng_clk", .id = 38, },
+ { .n = "aes_clk", .id = 39, },
+ { .n = "tdes_clk", .id = 40, },
+ { .n = "sha_clk", .id = 41, },
+ { .n = "classd_clk", .id = 42, },
+ { .n = "isi_clk", .id = 43, },
+ { .n = "pioD_clk", .id = 44, },
+ { .n = "tcb1_clk", .id = 45, },
+ { .n = "dbgu_clk", .id = 47, },
+ /*
+ * mpddr_clk feeds DDR controller and is enabled by bootloader thus we
+ * need to keep it enabled in case there is no Linux consumer for it.
+ */
+ { .n = "mpddr_clk", .id = 49, .f = CLK_IS_CRITICAL },
+ { .n = "csi2dc_clk", .id = 52, },
+ { .n = "csi4l_clk", .id = 53, },
+ { .n = "dsi4l_clk", .id = 54, },
+ { .n = "lvdsc_clk", .id = 56, },
+ { .n = "pit64b1_clk", .id = 58, },
+ { .n = "puf_clk", .id = 59, },
+ { .n = "gmactsu_clk", .id = 67, },
+};
+
+/*
+ * Generic clock description
+ * @n: clock name
+ * @pp: PLL parents
+ * @pp_mux_table: PLL parents mux table
+ * @r: clock output range
+ * @pp_chg_id: id in parent array of changeable PLL parent
+ * @pp_count: PLL parents count
+ * @id: clock id
+ */
+static const struct {
+ const char *n;
+ const char *pp[8];
+ const char pp_mux_table[8];
+ struct clk_range r;
+ int pp_chg_id;
+ u8 pp_count;
+ u8 id;
+} sam9x7_gck[] = {
+ {
+ .n = "flex0_gclk",
+ .id = 5,
+ .pp = { "plla_div2pmcck", },
+ .pp_mux_table = { 8, },
+ .pp_count = 1,
+ .pp_chg_id = INT_MIN,
+ },
+
+ {
+ .n = "flex1_gclk",
+ .id = 6,
+ .pp = { "plla_div2pmcck", },
+ .pp_mux_table = { 8, },
+ .pp_count = 1,
+ .pp_chg_id = INT_MIN,
+ },
+
+ {
+ .n = "flex2_gclk",
+ .id = 7,
+ .pp = { "plla_div2pmcck", },
+ .pp_mux_table = { 8, },
+ .pp_count = 1,
+ .pp_chg_id = INT_MIN,
+ },
+
+ {
+ .n = "flex3_gclk",
+ .id = 8,
+ .pp = { "plla_div2pmcck", },
+ .pp_mux_table = { 8, },
+ .pp_count = 1,
+ .pp_chg_id = INT_MIN,
+ },
+
+ {
+ .n = "flex6_gclk",
+ .id = 9,
+ .pp = { "plla_div2pmcck", },
+ .pp_mux_table = { 8, },
+ .pp_count = 1,
+ .pp_chg_id = INT_MIN,
+ },
+
+ {
+ .n = "flex7_gclk",
+ .id = 10,
+ .pp = { "plla_div2pmcck", },
+ .pp_mux_table = { 8, },
+ .pp_count = 1,
+ .pp_chg_id = INT_MIN,
+ },
+
+ {
+ .n = "flex8_gclk",
+ .id = 11,
+ .pp = { "plla_div2pmcck", },
+ .pp_mux_table = { 8, },
+ .pp_count = 1,
+ .pp_chg_id = INT_MIN,
+ },
+
+ {
+ .n = "sdmmc0_gclk",
+ .id = 12,
+ .r = { .max = 105000000 },
+ .pp = { "audiopll_divpmcck", "plla_div2pmcck", },
+ .pp_mux_table = { 6, 8, },
+ .pp_count = 2,
+ .pp_chg_id = INT_MIN,
+ },
+
+ {
+ .n = "flex4_gclk",
+ .id = 13,
+ .pp = { "plla_div2pmcck", },
+ .pp_mux_table = { 8, },
+ .pp_count = 1,
+ .pp_chg_id = INT_MIN,
+ },
+
+ {
+ .n = "flex5_gclk",
+ .id = 14,
+ .pp = { "plla_div2pmcck", },
+ .pp_mux_table = { 8, },
+ .pp_count = 1,
+ .pp_chg_id = INT_MIN,
+ },
+
+ {
+ .n = "flex9_gclk",
+ .id = 15,
+ .pp = { "plla_div2pmcck", },
+ .pp_mux_table = { 8, },
+ .pp_count = 1,
+ .pp_chg_id = INT_MIN,
+ },
+
+ {
+ .n = "flex10_gclk",
+ .id = 16,
+ .pp = { "plla_div2pmcck", },
+ .pp_mux_table = { 8, },
+ .pp_count = 1,
+ .pp_chg_id = INT_MIN,
+ },
+
+ {
+ .n = "tcb0_gclk",
+ .id = 17,
+ .pp = { "audiopll_divpmcck", "plla_div2pmcck", },
+ .pp_mux_table = { 6, 8, },
+ .pp_count = 2,
+ .pp_chg_id = INT_MIN,
+ },
+
+ {
+ .n = "adc_gclk",
+ .id = 19,
+ .pp = { "upll_divpmcck", "plla_div2pmcck", },
+ .pp_mux_table = { 5, 8, },
+ .pp_count = 2,
+ .pp_chg_id = INT_MIN,
+ },
+
+ {
+ .n = "lcd_gclk",
+ .id = 25,
+ .r = { .max = 75000000 },
+ .pp = { "audiopll_divpmcck", "plla_div2pmcck", },
+ .pp_mux_table = { 6, 8, },
+ .pp_count = 2,
+ .pp_chg_id = INT_MIN,
+ },
+
+ {
+ .n = "sdmmc1_gclk",
+ .id = 26,
+ .r = { .max = 105000000 },
+ .pp = { "audiopll_divpmcck", "plla_div2pmcck", },
+ .pp_mux_table = { 6, 8, },
+ .pp_count = 2,
+ .pp_chg_id = INT_MIN,
+ },
+
+ {
+ .n = "mcan0_gclk",
+ .id = 29,
+ .r = { .max = 80000000 },
+ .pp = { "upll_divpmcck", "plla_div2pmcck", },
+ .pp_mux_table = { 5, 8, },
+ .pp_count = 2,
+ .pp_chg_id = INT_MIN,
+ },
+
+ {
+ .n = "mcan1_gclk",
+ .id = 30,
+ .r = { .max = 80000000 },
+ .pp = { "upll_divpmcck", "plla_div2pmcck", },
+ .pp_mux_table = { 5, 8, },
+ .pp_count = 2,
+ .pp_chg_id = INT_MIN,
+ },
+
+ {
+ .n = "flex11_gclk",
+ .id = 32,
+ .pp = { "plla_div2pmcck", },
+ .pp_mux_table = { 8, },
+ .pp_count = 1,
+ .pp_chg_id = INT_MIN,
+ },
+
+ {
+ .n = "flex12_gclk",
+ .id = 33,
+ .pp = { "plla_div2pmcck", },
+ .pp_mux_table = { 8, },
+ .pp_count = 1,
+ .pp_chg_id = INT_MIN,
+ },
+
+ {
+ .n = "i2s_gclk",
+ .id = 34,
+ .r = { .max = 100000000 },
+ .pp = { "audiopll_divpmcck", "plla_div2pmcck", },
+ .pp_mux_table = { 6, 8, },
+ .pp_count = 2,
+ .pp_chg_id = INT_MIN,
+ },
+
+ {
+ .n = "qspi_gclk",
+ .id = 35,
+ .r = { .max = 200000000 },
+ .pp = { "audiopll_divpmcck", "plla_div2pmcck", },
+ .pp_mux_table = { 6, 8, },
+ .pp_count = 2,
+ .pp_chg_id = INT_MIN,
+ },
+
+ {
+ .n = "pit64b0_gclk",
+ .id = 37,
+ .pp = { "plla_div2pmcck", },
+ .pp_mux_table = { 8, },
+ .pp_count = 1,
+ .pp_chg_id = INT_MIN,
+ },
+
+ {
+ .n = "classd_gclk",
+ .id = 42,
+ .r = { .max = 100000000 },
+ .pp = { "audiopll_divpmcck", "plla_div2pmcck", },
+ .pp_mux_table = { 6, 8, },
+ .pp_count = 2,
+ .pp_chg_id = INT_MIN,
+ },
+
+ {
+ .n = "tcb1_gclk",
+ .id = 45,
+ .pp = { "audiopll_divpmcck", "plla_div2pmcck", },
+ .pp_mux_table = { 6, 8, },
+ .pp_count = 2,
+ .pp_chg_id = INT_MIN,
+ },
+
+ {
+ .n = "dbgu_gclk",
+ .id = 47,
+ .pp = { "plla_div2pmcck", },
+ .pp_mux_table = { 8, },
+ .pp_count = 1,
+ .pp_chg_id = INT_MIN,
+ },
+
+ {
+ .n = "mipiphy_gclk",
+ .id = 55,
+ .r = { .max = 27000000 },
+ .pp = { "plla_div2pmcck", },
+ .pp_mux_table = { 8, },
+ .pp_count = 1,
+ .pp_chg_id = INT_MIN,
+ },
+
+ {
+ .n = "pit64b1_gclk",
+ .id = 58,
+ .pp = { "plla_div2pmcck", },
+ .pp_mux_table = { 8, },
+ .pp_count = 1,
+ .pp_chg_id = INT_MIN,
+ },
+
+ {
+ .n = "gmac_gclk",
+ .id = 67,
+ .pp = { "audiopll_divpmcck", "plla_div2pmcck", },
+ .pp_mux_table = { 6, 8, },
+ .pp_count = 2,
+ .pp_chg_id = INT_MIN,
+ },
+};
+
+static void __init sam9x7_pmc_setup(struct device_node *np)
+{
+ struct clk_range range = CLK_RANGE(0, 0);
+ const char *td_slck_name, *md_slck_name, *mainxtal_name;
+ struct pmc_data *sam9x7_pmc;
+ const char *parent_names[9];
+ void **clk_mux_buffer = NULL;
+ int clk_mux_buffer_size = 0;
+ struct clk_hw *main_osc_hw;
+ struct regmap *regmap;
+ struct clk_hw *hw;
+ int i, j;
+
+ i = of_property_match_string(np, "clock-names", "td_slck");
+ if (i < 0)
+ return;
+
+ td_slck_name = of_clk_get_parent_name(np, i);
+
+ i = of_property_match_string(np, "clock-names", "md_slck");
+ if (i < 0)
+ return;
+
+ md_slck_name = of_clk_get_parent_name(np, i);
+
+ i = of_property_match_string(np, "clock-names", "main_xtal");
+ if (i < 0)
+ return;
+ mainxtal_name = of_clk_get_parent_name(np, i);
+
+ regmap = device_node_to_regmap(np);
+ if (IS_ERR(regmap))
+ return;
+
+ sam9x7_pmc = pmc_data_allocate(PMC_LVDSPLL + 1,
+ nck(sam9x7_systemck),
+ nck(sam9x7_periphck),
+ nck(sam9x7_gck), 8);
+ if (!sam9x7_pmc)
+ return;
+
+ clk_mux_buffer = kmalloc(sizeof(void *) *
+ (ARRAY_SIZE(sam9x7_gck)),
+ GFP_KERNEL);
+ if (!clk_mux_buffer)
+ goto err_free;
+
+ hw = at91_clk_register_main_rc_osc(regmap, "main_rc_osc", 12000000,
+ 50000000);
+ if (IS_ERR(hw))
+ goto err_free;
+
+ hw = at91_clk_register_main_osc(regmap, "main_osc", mainxtal_name, NULL, 0);
+ if (IS_ERR(hw))
+ goto err_free;
+ main_osc_hw = hw;
+
+ parent_names[0] = "main_rc_osc";
+ parent_names[1] = "main_osc";
+ hw = at91_clk_register_sam9x5_main(regmap, "mainck", parent_names, NULL, 2);
+ if (IS_ERR(hw))
+ goto err_free;
+
+ sam9x7_pmc->chws[PMC_MAIN] = hw;
+
+ for (i = 0; i < PLL_ID_MAX; i++) {
+ for (j = 0; j < 3; j++) {
+ struct clk_hw *parent_hw;
+
+ if (!sam9x7_plls[i][j].n)
+ continue;
+
+ switch (sam9x7_plls[i][j].t) {
+ case PLL_TYPE_FRAC:
+ if (!strcmp(sam9x7_plls[i][j].p, "mainck"))
+ parent_hw = sam9x7_pmc->chws[PMC_MAIN];
+ else if (!strcmp(sam9x7_plls[i][j].p, "main_osc"))
+ parent_hw = main_osc_hw;
+ else
+ parent_hw = __clk_get_hw(of_clk_get_by_name
+ (np, sam9x7_plls[i][j].p));
+
+ hw = sam9x60_clk_register_frac_pll(regmap,
+ &pmc_pll_lock,
+ sam9x7_plls[i][j].n,
+ sam9x7_plls[i][j].p,
+ parent_hw, i,
+ sam9x7_plls[i][j].c,
+ sam9x7_plls[i][j].l,
+ sam9x7_plls[i][j].f);
+ break;
+
+ case PLL_TYPE_DIV:
+ hw = sam9x60_clk_register_div_pll(regmap,
+ &pmc_pll_lock,
+ sam9x7_plls[i][j].n,
+ sam9x7_plls[i][j].p, NULL, i,
+ sam9x7_plls[i][j].c,
+ sam9x7_plls[i][j].l,
+ sam9x7_plls[i][j].f, 0);
+ break;
+
+ default:
+ continue;
+ }
+
+ if (IS_ERR(hw))
+ goto err_free;
+
+ if (sam9x7_plls[i][j].eid)
+ sam9x7_pmc->chws[sam9x7_plls[i][j].eid] = hw;
+ }
+ }
+
+ parent_names[0] = md_slck_name;
+ parent_names[1] = "mainck";
+ parent_names[2] = "plla_divpmcck";
+ parent_names[3] = "upll_divpmcck";
+ hw = at91_clk_register_master_pres(regmap, "masterck_pres", 4,
+ parent_names, NULL, &sam9x7_master_layout,
+ &mck_characteristics, &mck_lock);
+ if (IS_ERR(hw))
+ goto err_free;
+
+ hw = at91_clk_register_master_div(regmap, "masterck_div",
+ "masterck_pres", NULL, &sam9x7_master_layout,
+ &mck_characteristics, &mck_lock,
+ CLK_SET_RATE_GATE, 0);
+ if (IS_ERR(hw))
+ goto err_free;
+
+ sam9x7_pmc->chws[PMC_MCK] = hw;
+
+ parent_names[0] = "plla_divpmcck";
+ parent_names[1] = "upll_divpmcck";
+ parent_names[2] = "main_osc";
+ hw = sam9x60_clk_register_usb(regmap, "usbck", parent_names, 3);
+ if (IS_ERR(hw))
+ goto err_free;
+
+ parent_names[0] = md_slck_name;
+ parent_names[1] = td_slck_name;
+ parent_names[2] = "mainck";
+ parent_names[3] = "masterck_div";
+ parent_names[4] = "plla_divpmcck";
+ parent_names[5] = "upll_divpmcck";
+ parent_names[6] = "audiopll_divpmcck";
+ for (i = 0; i < 2; i++) {
+ char name[6];
+
+ snprintf(name, sizeof(name), "prog%d", i);
+
+ hw = at91_clk_register_programmable(regmap, name,
+ parent_names, NULL, 7, i,
+ &sam9x7_programmable_layout,
+ NULL);
+ if (IS_ERR(hw))
+ goto err_free;
+
+ sam9x7_pmc->pchws[i] = hw;
+ }
+
+ for (i = 0; i < ARRAY_SIZE(sam9x7_systemck); i++) {
+ hw = at91_clk_register_system(regmap, sam9x7_systemck[i].n,
+ sam9x7_systemck[i].p, NULL,
+ sam9x7_systemck[i].id,
+ sam9x7_systemck[i].flags);
+ if (IS_ERR(hw))
+ goto err_free;
+
+ sam9x7_pmc->shws[sam9x7_systemck[i].id] = hw;
+ }
+
+ for (i = 0; i < ARRAY_SIZE(sam9x7_periphck); i++) {
+ hw = at91_clk_register_sam9x5_peripheral(regmap, &pmc_pcr_lock,
+ &sam9x7_pcr_layout,
+ sam9x7_periphck[i].n,
+ "masterck_div", NULL,
+ sam9x7_periphck[i].id,
+ &range, INT_MIN,
+ sam9x7_periphck[i].f);
+ if (IS_ERR(hw))
+ goto err_free;
+
+ sam9x7_pmc->phws[sam9x7_periphck[i].id] = hw;
+ }
+
+ parent_names[0] = md_slck_name;
+ parent_names[1] = td_slck_name;
+ parent_names[2] = "mainck";
+ parent_names[3] = "masterck_div";
+ for (i = 0; i < ARRAY_SIZE(sam9x7_gck); i++) {
+ u8 num_parents = 4 + sam9x7_gck[i].pp_count;
+ u32 *mux_table;
+
+ mux_table = kmalloc_array(num_parents, sizeof(*mux_table),
+ GFP_KERNEL);
+ if (!mux_table)
+ goto err_free;
+
+ PMC_INIT_TABLE(mux_table, 4);
+ PMC_FILL_TABLE(&mux_table[4], sam9x7_gck[i].pp_mux_table,
+ sam9x7_gck[i].pp_count);
+ PMC_FILL_TABLE(&parent_names[4], sam9x7_gck[i].pp,
+ sam9x7_gck[i].pp_count);
+
+ hw = at91_clk_register_generated(regmap, &pmc_pcr_lock,
+ &sam9x7_pcr_layout,
+ sam9x7_gck[i].n,
+ parent_names, NULL, mux_table,
+ num_parents,
+ sam9x7_gck[i].id,
+ &sam9x7_gck[i].r,
+ sam9x7_gck[i].pp_chg_id);
+ if (IS_ERR(hw))
+ goto err_free;
+
+ sam9x7_pmc->ghws[sam9x7_gck[i].id] = hw;
+ clk_mux_buffer[clk_mux_buffer_size++] = mux_table;
+ }
+
+ of_clk_add_hw_provider(np, of_clk_hw_pmc_get, sam9x7_pmc);
+ kfree(clk_mux_buffer);
+
+ return;
+
+err_free:
+ if (clk_mux_buffer) {
+ for (i = 0; i < clk_mux_buffer_size; i++)
+ kfree(clk_mux_buffer[i]);
+ kfree(clk_mux_buffer);
+ }
+ kfree(sam9x7_pmc);
+}
+
+/* Some clks are used for a clocksource */
+CLK_OF_DECLARE(sam9x7_pmc, "microchip,sam9x7-pmc", sam9x7_pmc_setup);
diff --git a/drivers/clk/at91/sama7g5.c b/drivers/clk/at91/sama7g5.c
index 91b5c6f14819..8385badc1c70 100644
--- a/drivers/clk/at91/sama7g5.c
+++ b/drivers/clk/at91/sama7g5.c
@@ -16,21 +16,6 @@
#include "pmc.h"
-#define SAMA7G5_INIT_TABLE(_table, _count) \
- do { \
- u8 _i; \
- for (_i = 0; _i < (_count); _i++) \
- (_table)[_i] = _i; \
- } while (0)
-
-#define SAMA7G5_FILL_TABLE(_to, _from, _count) \
- do { \
- u8 _i; \
- for (_i = 0; _i < (_count); _i++) { \
- (_to)[_i] = (_from)[_i]; \
- } \
- } while (0)
-
static DEFINE_SPINLOCK(pmc_pll_lock);
static DEFINE_SPINLOCK(pmc_mck0_lock);
static DEFINE_SPINLOCK(pmc_mckX_lock);
@@ -66,6 +51,7 @@ enum pll_component_id {
PLL_COMPID_FRAC,
PLL_COMPID_DIV0,
PLL_COMPID_DIV1,
+ PLL_COMPID_MAX,
};
/*
@@ -116,11 +102,17 @@ static const struct clk_range pll_outputs[] = {
{ .min = 2343750, .max = 1200000000 },
};
+/* Fractional PLL core output range. */
+static const struct clk_range core_outputs[] = {
+ { .min = 600000000, .max = 1200000000 },
+};
+
/* CPU PLL characteristics. */
static const struct clk_pll_characteristics cpu_pll_characteristics = {
.input = { .min = 12000000, .max = 50000000 },
.num_output = ARRAY_SIZE(cpu_pll_outputs),
.output = cpu_pll_outputs,
+ .core_output = core_outputs,
};
/* PLL characteristics. */
@@ -128,6 +120,7 @@ static const struct clk_pll_characteristics pll_characteristics = {
.input = { .min = 12000000, .max = 50000000 },
.num_output = ARRAY_SIZE(pll_outputs),
.output = pll_outputs,
+ .core_output = core_outputs,
};
/*
@@ -165,7 +158,7 @@ static struct sama7g5_pll {
u8 t;
u8 eid;
u8 safe_div;
-} sama7g5_plls[][PLL_ID_MAX] = {
+} sama7g5_plls[][PLL_COMPID_MAX] = {
[PLL_ID_CPU] = {
[PLL_COMPID_FRAC] = {
.n = "cpupll_fracck",
@@ -1038,7 +1031,7 @@ static void __init sama7g5_pmc_setup(struct device_node *np)
sama7g5_pmc->chws[PMC_MAIN] = hw;
for (i = 0; i < PLL_ID_MAX; i++) {
- for (j = 0; j < 3; j++) {
+ for (j = 0; j < PLL_COMPID_MAX; j++) {
struct clk_hw *parent_hw;
if (!sama7g5_plls[i][j].n)
@@ -1112,17 +1105,17 @@ static void __init sama7g5_pmc_setup(struct device_node *np)
if (!mux_table)
goto err_free;
- SAMA7G5_INIT_TABLE(mux_table, 3);
- SAMA7G5_FILL_TABLE(&mux_table[3], sama7g5_mckx[i].ep_mux_table,
- sama7g5_mckx[i].ep_count);
+ PMC_INIT_TABLE(mux_table, 3);
+ PMC_FILL_TABLE(&mux_table[3], sama7g5_mckx[i].ep_mux_table,
+ sama7g5_mckx[i].ep_count);
for (j = 0; j < sama7g5_mckx[i].ep_count; j++) {
u8 pll_id = sama7g5_mckx[i].ep[j].pll_id;
u8 pll_compid = sama7g5_mckx[i].ep[j].pll_compid;
tmp_parent_hws[j] = sama7g5_plls[pll_id][pll_compid].hw;
}
- SAMA7G5_FILL_TABLE(&parent_hws[3], tmp_parent_hws,
- sama7g5_mckx[i].ep_count);
+ PMC_FILL_TABLE(&parent_hws[3], tmp_parent_hws,
+ sama7g5_mckx[i].ep_count);
hw = at91_clk_sama7g5_register_master(regmap, sama7g5_mckx[i].n,
num_parents, NULL, parent_hws, mux_table,
@@ -1208,17 +1201,17 @@ static void __init sama7g5_pmc_setup(struct device_node *np)
if (!mux_table)
goto err_free;
- SAMA7G5_INIT_TABLE(mux_table, 3);
- SAMA7G5_FILL_TABLE(&mux_table[3], sama7g5_gck[i].pp_mux_table,
- sama7g5_gck[i].pp_count);
+ PMC_INIT_TABLE(mux_table, 3);
+ PMC_FILL_TABLE(&mux_table[3], sama7g5_gck[i].pp_mux_table,
+ sama7g5_gck[i].pp_count);
for (j = 0; j < sama7g5_gck[i].pp_count; j++) {
u8 pll_id = sama7g5_gck[i].pp[j].pll_id;
u8 pll_compid = sama7g5_gck[i].pp[j].pll_compid;
tmp_parent_hws[j] = sama7g5_plls[pll_id][pll_compid].hw;
}
- SAMA7G5_FILL_TABLE(&parent_hws[3], tmp_parent_hws,
- sama7g5_gck[i].pp_count);
+ PMC_FILL_TABLE(&parent_hws[3], tmp_parent_hws,
+ sama7g5_gck[i].pp_count);
hw = at91_clk_register_generated(regmap, &pmc_pcr_lock,
&sama7g5_pcr_layout,
diff --git a/drivers/clk/axs10x/i2s_pll_clock.c b/drivers/clk/axs10x/i2s_pll_clock.c
index 2334e6c334cf..9667ce898428 100644
--- a/drivers/clk/axs10x/i2s_pll_clock.c
+++ b/drivers/clk/axs10x/i2s_pll_clock.c
@@ -215,7 +215,7 @@ static struct platform_driver i2s_pll_clk_driver = {
.of_match_table = i2s_pll_clk_id,
},
.probe = i2s_pll_clk_probe,
- .remove_new = i2s_pll_clk_remove,
+ .remove = i2s_pll_clk_remove,
};
module_platform_driver(i2s_pll_clk_driver);
diff --git a/drivers/clk/bcm/clk-bcm2711-dvp.c b/drivers/clk/bcm/clk-bcm2711-dvp.c
index 3cb235df9d37..e79720e85685 100644
--- a/drivers/clk/bcm/clk-bcm2711-dvp.c
+++ b/drivers/clk/bcm/clk-bcm2711-dvp.c
@@ -110,7 +110,7 @@ MODULE_DEVICE_TABLE(of, clk_dvp_dt_ids);
static struct platform_driver clk_dvp_driver = {
.probe = clk_dvp_probe,
- .remove_new = clk_dvp_remove,
+ .remove = clk_dvp_remove,
.driver = {
.name = "brcm2711-dvp",
.of_match_table = clk_dvp_dt_ids,
diff --git a/drivers/clk/bcm/clk-bcm53573-ilp.c b/drivers/clk/bcm/clk-bcm53573-ilp.c
index 84f2af736ee8..83ef41d618be 100644
--- a/drivers/clk/bcm/clk-bcm53573-ilp.c
+++ b/drivers/clk/bcm/clk-bcm53573-ilp.c
@@ -112,7 +112,7 @@ static void bcm53573_ilp_init(struct device_node *np)
goto err_free_ilp;
}
- ilp->regmap = syscon_node_to_regmap(of_get_parent(np));
+ ilp->regmap = syscon_node_to_regmap(np->parent);
if (IS_ERR(ilp->regmap)) {
err = PTR_ERR(ilp->regmap);
goto err_free_ilp;
diff --git a/drivers/clk/bcm/clk-bcm63xx-gate.c b/drivers/clk/bcm/clk-bcm63xx-gate.c
index 36c7b302e396..d6d857474436 100644
--- a/drivers/clk/bcm/clk-bcm63xx-gate.c
+++ b/drivers/clk/bcm/clk-bcm63xx-gate.c
@@ -567,7 +567,7 @@ static const struct of_device_id clk_bcm63xx_dt_ids[] = {
static struct platform_driver clk_bcm63xx = {
.probe = clk_bcm63xx_probe,
- .remove_new = clk_bcm63xx_remove,
+ .remove = clk_bcm63xx_remove,
.driver = {
.name = "bcm63xx-clock",
.of_match_table = clk_bcm63xx_dt_ids,
diff --git a/drivers/clk/bcm/clk-raspberrypi.c b/drivers/clk/bcm/clk-raspberrypi.c
index 4d411408e4af..a18a8768feb4 100644
--- a/drivers/clk/bcm/clk-raspberrypi.c
+++ b/drivers/clk/bcm/clk-raspberrypi.c
@@ -458,7 +458,7 @@ static struct platform_driver raspberrypi_clk_driver = {
.of_match_table = raspberrypi_clk_match,
},
.probe = raspberrypi_clk_probe,
- .remove_new = raspberrypi_clk_remove,
+ .remove = raspberrypi_clk_remove,
};
module_platform_driver(raspberrypi_clk_driver);
diff --git a/drivers/clk/clk-conf.c b/drivers/clk/clk-conf.c
index 058420562020..303a0bb26e54 100644
--- a/drivers/clk/clk-conf.c
+++ b/drivers/clk/clk-conf.c
@@ -10,6 +10,7 @@
#include <linux/device.h>
#include <linux/of.h>
#include <linux/printk.h>
+#include <linux/slab.h>
static int __set_clk_parents(struct device_node *node, bool clk_supplier)
{
@@ -81,11 +82,44 @@ err:
static int __set_clk_rates(struct device_node *node, bool clk_supplier)
{
struct of_phandle_args clkspec;
- int rc, index = 0;
+ int rc, count, count_64, index;
struct clk *clk;
- u32 rate;
+ u64 *rates_64 __free(kfree) = NULL;
+ u32 *rates __free(kfree) = NULL;
+
+ count = of_property_count_u32_elems(node, "assigned-clock-rates");
+ count_64 = of_property_count_u64_elems(node, "assigned-clock-rates-u64");
+ if (count_64 > 0) {
+ count = count_64;
+ rates_64 = kcalloc(count, sizeof(*rates_64), GFP_KERNEL);
+ if (!rates_64)
+ return -ENOMEM;
+
+ rc = of_property_read_u64_array(node,
+ "assigned-clock-rates-u64",
+ rates_64, count);
+ } else if (count > 0) {
+ rates = kcalloc(count, sizeof(*rates), GFP_KERNEL);
+ if (!rates)
+ return -ENOMEM;
+
+ rc = of_property_read_u32_array(node, "assigned-clock-rates",
+ rates, count);
+ } else {
+ return 0;
+ }
+
+ if (rc)
+ return rc;
+
+ for (index = 0; index < count; index++) {
+ unsigned long rate;
+
+ if (rates_64)
+ rate = rates_64[index];
+ else
+ rate = rates[index];
- of_property_for_each_u32(node, "assigned-clock-rates", rate) {
if (rate) {
rc = of_parse_phandle_with_args(node, "assigned-clocks",
"#clock-cells", index, &clkspec);
@@ -112,12 +146,11 @@ static int __set_clk_rates(struct device_node *node, bool clk_supplier)
rc = clk_set_rate(clk, rate);
if (rc < 0)
- pr_err("clk: couldn't set %s clk rate to %u (%d), current rate: %lu\n",
+ pr_err("clk: couldn't set %s clk rate to %lu (%d), current rate: %lu\n",
__clk_get_name(clk), rate, rc,
clk_get_rate(clk));
clk_put(clk);
}
- index++;
}
return 0;
}
diff --git a/drivers/clk/clk-devres.c b/drivers/clk/clk-devres.c
index 90e6078fb6e1..82ae1f26e634 100644
--- a/drivers/clk/clk-devres.c
+++ b/drivers/clk/clk-devres.c
@@ -99,6 +99,34 @@ struct clk *devm_clk_get_optional_enabled(struct device *dev, const char *id)
}
EXPORT_SYMBOL_GPL(devm_clk_get_optional_enabled);
+struct clk *devm_clk_get_optional_enabled_with_rate(struct device *dev,
+ const char *id,
+ unsigned long rate)
+{
+ struct clk *clk;
+ int ret;
+
+ clk = __devm_clk_get(dev, id, clk_get_optional, NULL,
+ clk_disable_unprepare);
+ if (IS_ERR(clk))
+ return ERR_CAST(clk);
+
+ ret = clk_set_rate(clk, rate);
+ if (ret)
+ goto out_put_clk;
+
+ ret = clk_prepare_enable(clk);
+ if (ret)
+ goto out_put_clk;
+
+ return clk;
+
+out_put_clk:
+ devm_clk_put(dev, clk);
+ return ERR_PTR(ret);
+}
+EXPORT_SYMBOL_GPL(devm_clk_get_optional_enabled_with_rate);
+
struct clk_bulk_devres {
struct clk_bulk_data *clks;
int num_clks;
diff --git a/drivers/clk/clk-ep93xx.c b/drivers/clk/clk-ep93xx.c
new file mode 100644
index 000000000000..f888aed79b11
--- /dev/null
+++ b/drivers/clk/clk-ep93xx.c
@@ -0,0 +1,850 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * Clock control for Cirrus EP93xx chips.
+ * Copyright (C) 2021 Nikita Shubin <nikita.shubin@maquefel.me>
+ *
+ * Based on a rewrite of arch/arm/mach-ep93xx/clock.c:
+ * Copyright (C) 2006 Lennert Buytenhek <buytenh@wantstofly.org>
+ */
+#define pr_fmt(fmt) "ep93xx " KBUILD_MODNAME ": " fmt
+
+#include <linux/bits.h>
+#include <linux/cleanup.h>
+#include <linux/clk-provider.h>
+#include <linux/math.h>
+#include <linux/platform_device.h>
+#include <linux/regmap.h>
+#include <linux/spinlock.h>
+
+#include <linux/soc/cirrus/ep93xx.h>
+#include <dt-bindings/clock/cirrus,ep9301-syscon.h>
+
+#include <asm/div64.h>
+
+#define EP93XX_EXT_CLK_RATE 14745600
+#define EP93XX_EXT_RTC_RATE 32768
+
+#define EP93XX_SYSCON_POWER_STATE 0x00
+#define EP93XX_SYSCON_PWRCNT 0x04
+#define EP93XX_SYSCON_PWRCNT_UARTBAUD BIT(29)
+#define EP93XX_SYSCON_PWRCNT_USH_EN 28
+#define EP93XX_SYSCON_PWRCNT_DMA_M2M1 27
+#define EP93XX_SYSCON_PWRCNT_DMA_M2M0 26
+#define EP93XX_SYSCON_PWRCNT_DMA_M2P8 25
+#define EP93XX_SYSCON_PWRCNT_DMA_M2P9 24
+#define EP93XX_SYSCON_PWRCNT_DMA_M2P6 23
+#define EP93XX_SYSCON_PWRCNT_DMA_M2P7 22
+#define EP93XX_SYSCON_PWRCNT_DMA_M2P4 21
+#define EP93XX_SYSCON_PWRCNT_DMA_M2P5 20
+#define EP93XX_SYSCON_PWRCNT_DMA_M2P2 19
+#define EP93XX_SYSCON_PWRCNT_DMA_M2P3 18
+#define EP93XX_SYSCON_PWRCNT_DMA_M2P0 17
+#define EP93XX_SYSCON_PWRCNT_DMA_M2P1 16
+#define EP93XX_SYSCON_CLKSET1 0x20
+#define EP93XX_SYSCON_CLKSET1_NBYP1 BIT(23)
+#define EP93XX_SYSCON_CLKSET2 0x24
+#define EP93XX_SYSCON_CLKSET2_NBYP2 BIT(19)
+#define EP93XX_SYSCON_CLKSET2_PLL2_EN BIT(18)
+#define EP93XX_SYSCON_DEVCFG 0x80
+#define EP93XX_SYSCON_DEVCFG_U3EN 24
+#define EP93XX_SYSCON_DEVCFG_U2EN 20
+#define EP93XX_SYSCON_DEVCFG_U1EN 18
+#define EP93XX_SYSCON_VIDCLKDIV 0x84
+#define EP93XX_SYSCON_CLKDIV_ENABLE 15
+#define EP93XX_SYSCON_CLKDIV_ESEL BIT(14)
+#define EP93XX_SYSCON_CLKDIV_PSEL BIT(13)
+#define EP93XX_SYSCON_CLKDIV_MASK GENMASK(14, 13)
+#define EP93XX_SYSCON_CLKDIV_PDIV_SHIFT 8
+#define EP93XX_SYSCON_I2SCLKDIV 0x8c
+#define EP93XX_SYSCON_I2SCLKDIV_SENA 31
+#define EP93XX_SYSCON_I2SCLKDIV_ORIDE BIT(29)
+#define EP93XX_SYSCON_I2SCLKDIV_SPOL BIT(19)
+#define EP93XX_SYSCON_KEYTCHCLKDIV 0x90
+#define EP93XX_SYSCON_KEYTCHCLKDIV_TSEN 31
+#define EP93XX_SYSCON_KEYTCHCLKDIV_ADIV 16
+#define EP93XX_SYSCON_KEYTCHCLKDIV_KEN 15
+#define EP93XX_SYSCON_KEYTCHCLKDIV_KDIV 0
+#define EP93XX_SYSCON_CHIPID 0x94
+#define EP93XX_SYSCON_CHIPID_ID 0x9213
+
+#define EP93XX_FIXED_CLK_COUNT 21
+
+static const char ep93xx_adc_divisors[] = { 16, 4 };
+static const char ep93xx_sclk_divisors[] = { 2, 4 };
+static const char ep93xx_lrclk_divisors[] = { 32, 64, 128 };
+
+struct ep93xx_clk {
+ struct clk_hw hw;
+ u16 idx;
+ u16 reg;
+ u32 mask;
+ u8 bit_idx;
+ u8 shift;
+ u8 width;
+ u8 num_div;
+ const char *div;
+};
+
+struct ep93xx_clk_priv {
+ spinlock_t lock;
+ struct ep93xx_regmap_adev *aux_dev;
+ struct device *dev;
+ void __iomem *base;
+ struct regmap *map;
+ struct clk_hw *fixed[EP93XX_FIXED_CLK_COUNT];
+ struct ep93xx_clk reg[];
+};
+
+static struct ep93xx_clk *ep93xx_clk_from(struct clk_hw *hw)
+{
+ return container_of(hw, struct ep93xx_clk, hw);
+}
+
+static struct ep93xx_clk_priv *ep93xx_priv_from(struct ep93xx_clk *clk)
+{
+ return container_of(clk, struct ep93xx_clk_priv, reg[clk->idx]);
+}
+
+static void ep93xx_clk_write(struct ep93xx_clk_priv *priv, unsigned int reg, unsigned int val)
+{
+ struct ep93xx_regmap_adev *aux = priv->aux_dev;
+
+ aux->write(aux->map, aux->lock, reg, val);
+}
+
+static int ep93xx_clk_is_enabled(struct clk_hw *hw)
+{
+ struct ep93xx_clk *clk = ep93xx_clk_from(hw);
+ struct ep93xx_clk_priv *priv = ep93xx_priv_from(clk);
+ u32 val;
+
+ regmap_read(priv->map, clk->reg, &val);
+
+ return !!(val & BIT(clk->bit_idx));
+}
+
+static int ep93xx_clk_enable(struct clk_hw *hw)
+{
+ struct ep93xx_clk *clk = ep93xx_clk_from(hw);
+ struct ep93xx_clk_priv *priv = ep93xx_priv_from(clk);
+ u32 val;
+
+ guard(spinlock_irqsave)(&priv->lock);
+
+ regmap_read(priv->map, clk->reg, &val);
+ val |= BIT(clk->bit_idx);
+
+ ep93xx_clk_write(priv, clk->reg, val);
+
+ return 0;
+}
+
+static void ep93xx_clk_disable(struct clk_hw *hw)
+{
+ struct ep93xx_clk *clk = ep93xx_clk_from(hw);
+ struct ep93xx_clk_priv *priv = ep93xx_priv_from(clk);
+ u32 val;
+
+ guard(spinlock_irqsave)(&priv->lock);
+
+ regmap_read(priv->map, clk->reg, &val);
+ val &= ~BIT(clk->bit_idx);
+
+ ep93xx_clk_write(priv, clk->reg, val);
+}
+
+static const struct clk_ops clk_ep93xx_gate_ops = {
+ .enable = ep93xx_clk_enable,
+ .disable = ep93xx_clk_disable,
+ .is_enabled = ep93xx_clk_is_enabled,
+};
+
+static int ep93xx_clk_register_gate(struct ep93xx_clk *clk,
+ const char *name,
+ struct clk_parent_data *parent_data,
+ unsigned long flags,
+ unsigned int reg,
+ u8 bit_idx)
+{
+ struct ep93xx_clk_priv *priv = ep93xx_priv_from(clk);
+ struct clk_init_data init = { };
+
+ init.name = name;
+ init.ops = &clk_ep93xx_gate_ops;
+ init.flags = flags;
+ init.parent_data = parent_data;
+ init.num_parents = 1;
+
+ clk->reg = reg;
+ clk->bit_idx = bit_idx;
+ clk->hw.init = &init;
+
+ return devm_clk_hw_register(priv->dev, &clk->hw);
+}
+
+static u8 ep93xx_mux_get_parent(struct clk_hw *hw)
+{
+ struct ep93xx_clk *clk = ep93xx_clk_from(hw);
+ struct ep93xx_clk_priv *priv = ep93xx_priv_from(clk);
+ u32 val;
+
+ regmap_read(priv->map, clk->reg, &val);
+
+ val &= EP93XX_SYSCON_CLKDIV_MASK;
+
+ switch (val) {
+ case EP93XX_SYSCON_CLKDIV_ESEL:
+ return 1; /* PLL1 */
+ case EP93XX_SYSCON_CLKDIV_MASK:
+ return 2; /* PLL2 */
+ default:
+ return 0; /* XTALI */
+ };
+}
+
+static int ep93xx_mux_set_parent_lock(struct clk_hw *hw, u8 index)
+{
+ struct ep93xx_clk *clk = ep93xx_clk_from(hw);
+ struct ep93xx_clk_priv *priv = ep93xx_priv_from(clk);
+ u32 val;
+
+ if (index >= 3)
+ return -EINVAL;
+
+ guard(spinlock_irqsave)(&priv->lock);
+
+ regmap_read(priv->map, clk->reg, &val);
+ val &= ~(EP93XX_SYSCON_CLKDIV_MASK);
+ val |= index > 0 ? EP93XX_SYSCON_CLKDIV_ESEL : 0;
+ val |= index > 1 ? EP93XX_SYSCON_CLKDIV_PSEL : 0;
+
+ ep93xx_clk_write(priv, clk->reg, val);
+
+ return 0;
+}
+
+static bool is_best(unsigned long rate, unsigned long now,
+ unsigned long best)
+{
+ return abs_diff(rate, now) < abs_diff(rate, best);
+}
+
+static int ep93xx_mux_determine_rate(struct clk_hw *hw,
+ struct clk_rate_request *req)
+{
+ unsigned long best_rate = 0, actual_rate, mclk_rate;
+ unsigned long rate = req->rate;
+ struct clk_hw *parent_best = NULL;
+ unsigned long parent_rate_best;
+ unsigned long parent_rate;
+ int div, pdiv;
+ unsigned int i;
+
+ /*
+ * Try the two pll's and the external clock,
+ * because the valid predividers are 2, 2.5 and 3, we multiply
+ * all the clocks by 2 to avoid floating point math.
+ *
+ * This is based on the algorithm in the ep93xx raster guide:
+ * http://be-a-maverick.com/en/pubs/appNote/AN269REV1.pdf
+ *
+ */
+ for (i = 0; i < clk_hw_get_num_parents(hw); i++) {
+ struct clk_hw *parent = clk_hw_get_parent_by_index(hw, i);
+
+ parent_rate = clk_hw_get_rate(parent);
+ mclk_rate = parent_rate * 2;
+
+ /* Try each predivider value */
+ for (pdiv = 4; pdiv <= 6; pdiv++) {
+ div = DIV_ROUND_CLOSEST(mclk_rate, rate * pdiv);
+ if (!in_range(div, 1, 127))
+ continue;
+
+ actual_rate = DIV_ROUND_CLOSEST(mclk_rate, pdiv * div);
+ if (is_best(rate, actual_rate, best_rate)) {
+ best_rate = actual_rate;
+ parent_rate_best = parent_rate;
+ parent_best = parent;
+ }
+ }
+ }
+
+ if (!parent_best)
+ return -EINVAL;
+
+ req->best_parent_rate = parent_rate_best;
+ req->best_parent_hw = parent_best;
+ req->rate = best_rate;
+
+ return 0;
+}
+
+static unsigned long ep93xx_ddiv_recalc_rate(struct clk_hw *hw,
+ unsigned long parent_rate)
+{
+ struct ep93xx_clk *clk = ep93xx_clk_from(hw);
+ struct ep93xx_clk_priv *priv = ep93xx_priv_from(clk);
+ unsigned int pdiv, div;
+ u32 val;
+
+ regmap_read(priv->map, clk->reg, &val);
+ pdiv = (val >> EP93XX_SYSCON_CLKDIV_PDIV_SHIFT) & GENMASK(1, 0);
+ div = val & GENMASK(6, 0);
+ if (!div)
+ return 0;
+
+ return DIV_ROUND_CLOSEST(parent_rate * 2, (pdiv + 3) * div);
+}
+
+static int ep93xx_ddiv_set_rate(struct clk_hw *hw, unsigned long rate,
+ unsigned long parent_rate)
+{
+ struct ep93xx_clk *clk = ep93xx_clk_from(hw);
+ struct ep93xx_clk_priv *priv = ep93xx_priv_from(clk);
+ int pdiv, div, npdiv, ndiv;
+ unsigned long actual_rate, mclk_rate, rate_err = ULONG_MAX;
+ u32 val;
+
+ regmap_read(priv->map, clk->reg, &val);
+ mclk_rate = parent_rate * 2;
+
+ for (pdiv = 4; pdiv <= 6; pdiv++) {
+ div = DIV_ROUND_CLOSEST(mclk_rate, rate * pdiv);
+ if (!in_range(div, 1, 127))
+ continue;
+
+ actual_rate = DIV_ROUND_CLOSEST(mclk_rate, pdiv * div);
+ if (abs(actual_rate - rate) < rate_err) {
+ npdiv = pdiv - 3;
+ ndiv = div;
+ rate_err = abs(actual_rate - rate);
+ }
+ }
+
+ if (rate_err == ULONG_MAX)
+ return -EINVAL;
+
+ /*
+ * Clear old dividers.
+ * Bit 7 is reserved bit in all ClkDiv registers.
+ */
+ val &= ~(GENMASK(9, 0) & ~BIT(7));
+
+ /* Set the new pdiv and div bits for the new clock rate */
+ val |= (npdiv << EP93XX_SYSCON_CLKDIV_PDIV_SHIFT) | ndiv;
+
+ ep93xx_clk_write(priv, clk->reg, val);
+
+ return 0;
+}
+
+static const struct clk_ops clk_ddiv_ops = {
+ .enable = ep93xx_clk_enable,
+ .disable = ep93xx_clk_disable,
+ .is_enabled = ep93xx_clk_is_enabled,
+ .get_parent = ep93xx_mux_get_parent,
+ .set_parent = ep93xx_mux_set_parent_lock,
+ .determine_rate = ep93xx_mux_determine_rate,
+ .recalc_rate = ep93xx_ddiv_recalc_rate,
+ .set_rate = ep93xx_ddiv_set_rate,
+};
+
+static int ep93xx_clk_register_ddiv(struct ep93xx_clk *clk,
+ const char *name,
+ struct clk_parent_data *parent_data,
+ u8 num_parents,
+ unsigned int reg,
+ u8 bit_idx)
+{
+ struct ep93xx_clk_priv *priv = ep93xx_priv_from(clk);
+ struct clk_init_data init = { };
+
+ init.name = name;
+ init.ops = &clk_ddiv_ops;
+ init.flags = 0;
+ init.parent_data = parent_data;
+ init.num_parents = num_parents;
+
+ clk->reg = reg;
+ clk->bit_idx = bit_idx;
+ clk->hw.init = &init;
+
+ return devm_clk_hw_register(priv->dev, &clk->hw);
+}
+
+static unsigned long ep93xx_div_recalc_rate(struct clk_hw *hw,
+ unsigned long parent_rate)
+{
+ struct ep93xx_clk *clk = ep93xx_clk_from(hw);
+ struct ep93xx_clk_priv *priv = ep93xx_priv_from(clk);
+ u32 val;
+ u8 index;
+
+ regmap_read(priv->map, clk->reg, &val);
+ index = (val & clk->mask) >> clk->shift;
+ if (index >= clk->num_div)
+ return 0;
+
+ return DIV_ROUND_CLOSEST(parent_rate, clk->div[index]);
+}
+
+static long ep93xx_div_round_rate(struct clk_hw *hw, unsigned long rate,
+ unsigned long *parent_rate)
+{
+ struct ep93xx_clk *clk = ep93xx_clk_from(hw);
+ unsigned long best = 0, now;
+ unsigned int i;
+
+ for (i = 0; i < clk->num_div; i++) {
+ if ((rate * clk->div[i]) == *parent_rate)
+ return rate;
+
+ now = DIV_ROUND_CLOSEST(*parent_rate, clk->div[i]);
+ if (!best || is_best(rate, now, best))
+ best = now;
+ }
+
+ return best;
+}
+
+static int ep93xx_div_set_rate(struct clk_hw *hw, unsigned long rate,
+ unsigned long parent_rate)
+{
+ struct ep93xx_clk *clk = ep93xx_clk_from(hw);
+ struct ep93xx_clk_priv *priv = ep93xx_priv_from(clk);
+ unsigned int i;
+ u32 val;
+
+ regmap_read(priv->map, clk->reg, &val);
+ val &= ~clk->mask;
+ for (i = 0; i < clk->num_div; i++)
+ if (rate == DIV_ROUND_CLOSEST(parent_rate, clk->div[i]))
+ break;
+
+ if (i == clk->num_div)
+ return -EINVAL;
+
+ val |= i << clk->shift;
+
+ ep93xx_clk_write(priv, clk->reg, val);
+
+ return 0;
+}
+
+static const struct clk_ops ep93xx_div_ops = {
+ .enable = ep93xx_clk_enable,
+ .disable = ep93xx_clk_disable,
+ .is_enabled = ep93xx_clk_is_enabled,
+ .recalc_rate = ep93xx_div_recalc_rate,
+ .round_rate = ep93xx_div_round_rate,
+ .set_rate = ep93xx_div_set_rate,
+};
+
+static int ep93xx_register_div(struct ep93xx_clk *clk,
+ const char *name,
+ const struct clk_parent_data *parent_data,
+ unsigned int reg,
+ u8 enable_bit,
+ u8 shift,
+ u8 width,
+ const char *clk_divisors,
+ u8 num_div)
+{
+ struct ep93xx_clk_priv *priv = ep93xx_priv_from(clk);
+ struct clk_init_data init = { };
+
+ init.name = name;
+ init.ops = &ep93xx_div_ops;
+ init.flags = 0;
+ init.parent_data = parent_data;
+ init.num_parents = 1;
+
+ clk->reg = reg;
+ clk->bit_idx = enable_bit;
+ clk->mask = GENMASK(shift + width - 1, shift);
+ clk->shift = shift;
+ clk->div = clk_divisors;
+ clk->num_div = num_div;
+ clk->hw.init = &init;
+
+ return devm_clk_hw_register(priv->dev, &clk->hw);
+}
+
+struct ep93xx_gate {
+ unsigned int idx;
+ unsigned int bit;
+ const char *name;
+};
+
+static const struct ep93xx_gate ep93xx_uarts[] = {
+ { EP93XX_CLK_UART1, EP93XX_SYSCON_DEVCFG_U1EN, "uart1" },
+ { EP93XX_CLK_UART2, EP93XX_SYSCON_DEVCFG_U2EN, "uart2" },
+ { EP93XX_CLK_UART3, EP93XX_SYSCON_DEVCFG_U3EN, "uart3" },
+};
+
+static int ep93xx_uart_clock_init(struct ep93xx_clk_priv *priv)
+{
+ struct clk_parent_data parent_data = { };
+ unsigned int i, idx, ret, clk_uart_div;
+ struct ep93xx_clk *clk;
+ u32 val;
+
+ regmap_read(priv->map, EP93XX_SYSCON_PWRCNT, &val);
+ if (val & EP93XX_SYSCON_PWRCNT_UARTBAUD)
+ clk_uart_div = 1;
+ else
+ clk_uart_div = 2;
+
+ priv->fixed[EP93XX_CLK_UART] =
+ devm_clk_hw_register_fixed_factor_index(priv->dev, "uart",
+ 0, /* XTALI external clock */
+ 0, 1, clk_uart_div);
+ parent_data.hw = priv->fixed[EP93XX_CLK_UART];
+
+ /* parenting uart gate clocks to uart clock */
+ for (i = 0; i < ARRAY_SIZE(ep93xx_uarts); i++) {
+ idx = ep93xx_uarts[i].idx - EP93XX_CLK_UART1;
+ clk = &priv->reg[idx];
+ clk->idx = idx;
+ ret = ep93xx_clk_register_gate(clk,
+ ep93xx_uarts[i].name,
+ &parent_data, CLK_SET_RATE_PARENT,
+ EP93XX_SYSCON_DEVCFG,
+ ep93xx_uarts[i].bit);
+ if (ret)
+ return dev_err_probe(priv->dev, ret,
+ "failed to register uart[%d] clock\n", i);
+ }
+
+ return 0;
+}
+
+static const struct ep93xx_gate ep93xx_dmas[] = {
+ { EP93XX_CLK_M2M0, EP93XX_SYSCON_PWRCNT_DMA_M2M0, "m2m0" },
+ { EP93XX_CLK_M2M1, EP93XX_SYSCON_PWRCNT_DMA_M2M1, "m2m1" },
+ { EP93XX_CLK_M2P0, EP93XX_SYSCON_PWRCNT_DMA_M2P0, "m2p0" },
+ { EP93XX_CLK_M2P1, EP93XX_SYSCON_PWRCNT_DMA_M2P1, "m2p1" },
+ { EP93XX_CLK_M2P2, EP93XX_SYSCON_PWRCNT_DMA_M2P2, "m2p2" },
+ { EP93XX_CLK_M2P3, EP93XX_SYSCON_PWRCNT_DMA_M2P3, "m2p3" },
+ { EP93XX_CLK_M2P4, EP93XX_SYSCON_PWRCNT_DMA_M2P4, "m2p4" },
+ { EP93XX_CLK_M2P5, EP93XX_SYSCON_PWRCNT_DMA_M2P5, "m2p5" },
+ { EP93XX_CLK_M2P6, EP93XX_SYSCON_PWRCNT_DMA_M2P6, "m2p6" },
+ { EP93XX_CLK_M2P7, EP93XX_SYSCON_PWRCNT_DMA_M2P7, "m2p7" },
+ { EP93XX_CLK_M2P8, EP93XX_SYSCON_PWRCNT_DMA_M2P8, "m2p8" },
+ { EP93XX_CLK_M2P9, EP93XX_SYSCON_PWRCNT_DMA_M2P9, "m2p9" },
+};
+
+static int ep93xx_dma_clock_init(struct ep93xx_clk_priv *priv)
+{
+ struct clk_parent_data parent_data = { };
+ unsigned int i, idx;
+
+ parent_data.hw = priv->fixed[EP93XX_CLK_HCLK];
+ for (i = 0; i < ARRAY_SIZE(ep93xx_dmas); i++) {
+ idx = ep93xx_dmas[i].idx;
+ priv->fixed[idx] = devm_clk_hw_register_gate_parent_data(priv->dev,
+ ep93xx_dmas[i].name,
+ &parent_data, 0,
+ priv->base + EP93XX_SYSCON_PWRCNT,
+ ep93xx_dmas[i].bit,
+ 0,
+ &priv->lock);
+ if (IS_ERR(priv->fixed[idx]))
+ return PTR_ERR(priv->fixed[idx]);
+ }
+
+ return 0;
+}
+
+static struct clk_hw *of_clk_ep93xx_get(struct of_phandle_args *clkspec, void *data)
+{
+ struct ep93xx_clk_priv *priv = data;
+ unsigned int idx = clkspec->args[0];
+
+ if (idx < EP93XX_CLK_UART1)
+ return priv->fixed[idx];
+
+ if (idx <= EP93XX_CLK_I2S_LRCLK)
+ return &priv->reg[idx - EP93XX_CLK_UART1].hw;
+
+ return ERR_PTR(-EINVAL);
+}
+
+/*
+ * PLL rate = 14.7456 MHz * (X1FBD + 1) * (X2FBD + 1) / (X2IPD + 1) / 2^PS
+ */
+static unsigned long calc_pll_rate(u64 rate, u32 config_word)
+{
+ rate *= ((config_word >> 11) & GENMASK(4, 0)) + 1; /* X1FBD */
+ rate *= ((config_word >> 5) & GENMASK(5, 0)) + 1; /* X2FBD */
+ do_div(rate, (config_word & GENMASK(4, 0)) + 1); /* X2IPD */
+ rate >>= (config_word >> 16) & GENMASK(1, 0); /* PS */
+
+ return rate;
+}
+
+static int ep93xx_plls_init(struct ep93xx_clk_priv *priv)
+{
+ const char fclk_divisors[] = { 1, 2, 4, 8, 16, 1, 1, 1 };
+ const char hclk_divisors[] = { 1, 2, 4, 5, 6, 8, 16, 32 };
+ const char pclk_divisors[] = { 1, 2, 4, 8 };
+ struct clk_parent_data xtali = { .index = 0 };
+ unsigned int clk_f_div, clk_h_div, clk_p_div;
+ unsigned long clk_pll1_rate, clk_pll2_rate;
+ struct device *dev = priv->dev;
+ struct clk_hw *hw, *pll1;
+ u32 value;
+
+ /* Determine the bootloader configured pll1 rate */
+ regmap_read(priv->map, EP93XX_SYSCON_CLKSET1, &value);
+
+ if (value & EP93XX_SYSCON_CLKSET1_NBYP1)
+ clk_pll1_rate = calc_pll_rate(EP93XX_EXT_CLK_RATE, value);
+ else
+ clk_pll1_rate = EP93XX_EXT_CLK_RATE;
+
+ pll1 = devm_clk_hw_register_fixed_rate_parent_data(dev, "pll1", &xtali,
+ 0, clk_pll1_rate);
+ if (IS_ERR(pll1))
+ return PTR_ERR(pll1);
+
+ priv->fixed[EP93XX_CLK_PLL1] = pll1;
+
+ /* Initialize the pll1 derived clocks */
+ clk_f_div = fclk_divisors[(value >> 25) & GENMASK(2, 0)];
+ clk_h_div = hclk_divisors[(value >> 20) & GENMASK(2, 0)];
+ clk_p_div = pclk_divisors[(value >> 18) & GENMASK(1, 0)];
+
+ hw = devm_clk_hw_register_fixed_factor_parent_hw(dev, "fclk", pll1, 0, 1, clk_f_div);
+ if (IS_ERR(hw))
+ return PTR_ERR(hw);
+
+ priv->fixed[EP93XX_CLK_FCLK] = hw;
+
+ hw = devm_clk_hw_register_fixed_factor_parent_hw(dev, "hclk", pll1, 0, 1, clk_h_div);
+ if (IS_ERR(hw))
+ return PTR_ERR(hw);
+
+ priv->fixed[EP93XX_CLK_HCLK] = hw;
+
+ hw = devm_clk_hw_register_fixed_factor_parent_hw(dev, "pclk", hw, 0, 1, clk_p_div);
+ if (IS_ERR(hw))
+ return PTR_ERR(hw);
+
+ priv->fixed[EP93XX_CLK_PCLK] = hw;
+
+ /* Determine the bootloader configured pll2 rate */
+ regmap_read(priv->map, EP93XX_SYSCON_CLKSET2, &value);
+ if (!(value & EP93XX_SYSCON_CLKSET2_NBYP2))
+ clk_pll2_rate = EP93XX_EXT_CLK_RATE;
+ else if (value & EP93XX_SYSCON_CLKSET2_PLL2_EN)
+ clk_pll2_rate = calc_pll_rate(EP93XX_EXT_CLK_RATE, value);
+ else
+ clk_pll2_rate = 0;
+
+ hw = devm_clk_hw_register_fixed_rate_parent_data(dev, "pll2", &xtali,
+ 0, clk_pll2_rate);
+ if (IS_ERR(hw))
+ return PTR_ERR(hw);
+
+ priv->fixed[EP93XX_CLK_PLL2] = hw;
+
+ return 0;
+}
+
+static int ep93xx_clk_probe(struct auxiliary_device *adev,
+ const struct auxiliary_device_id *id)
+{
+ struct ep93xx_regmap_adev *rdev = to_ep93xx_regmap_adev(adev);
+ struct clk_parent_data xtali = { .index = 0 };
+ struct clk_parent_data ddiv_pdata[3] = { };
+ unsigned int clk_spi_div, clk_usb_div;
+ struct clk_parent_data pdata = {};
+ struct device *dev = &adev->dev;
+ struct ep93xx_clk_priv *priv;
+ struct ep93xx_clk *clk;
+ struct clk_hw *hw;
+ unsigned int idx;
+ int ret;
+ u32 value;
+
+ priv = devm_kzalloc(dev, struct_size(priv, reg, 10), GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+
+ spin_lock_init(&priv->lock);
+ priv->dev = dev;
+ priv->aux_dev = rdev;
+ priv->map = rdev->map;
+ priv->base = rdev->base;
+
+ ret = ep93xx_plls_init(priv);
+ if (ret)
+ return ret;
+
+ regmap_read(priv->map, EP93XX_SYSCON_CLKSET2, &value);
+ clk_usb_div = (value >> 28 & GENMASK(3, 0)) + 1;
+ hw = devm_clk_hw_register_fixed_factor_parent_hw(dev, "usb_clk",
+ priv->fixed[EP93XX_CLK_PLL2], 0, 1,
+ clk_usb_div);
+ if (IS_ERR(hw))
+ return PTR_ERR(hw);
+
+ priv->fixed[EP93XX_CLK_USB] = hw;
+
+ ret = ep93xx_uart_clock_init(priv);
+ if (ret)
+ return ret;
+
+ ret = ep93xx_dma_clock_init(priv);
+ if (ret)
+ return ret;
+
+ clk_spi_div = id->driver_data;
+ hw = devm_clk_hw_register_fixed_factor_index(dev, "ep93xx-spi.0",
+ 0, /* XTALI external clock */
+ 0, 1, clk_spi_div);
+ if (IS_ERR(hw))
+ return PTR_ERR(hw);
+
+ priv->fixed[EP93XX_CLK_SPI] = hw;
+
+ /* PWM clock */
+ hw = devm_clk_hw_register_fixed_factor_index(dev, "pwm_clk", 0, /* XTALI external clock */
+ 0, 1, 1);
+ if (IS_ERR(hw))
+ return PTR_ERR(hw);
+
+ priv->fixed[EP93XX_CLK_PWM] = hw;
+
+ /* USB clock */
+ pdata.hw = priv->fixed[EP93XX_CLK_USB];
+ hw = devm_clk_hw_register_gate_parent_data(priv->dev, "ohci-platform", &pdata,
+ 0, priv->base + EP93XX_SYSCON_PWRCNT,
+ EP93XX_SYSCON_PWRCNT_USH_EN, 0,
+ &priv->lock);
+ if (IS_ERR(hw))
+ return PTR_ERR(hw);
+
+ priv->fixed[EP93XX_CLK_USB] = hw;
+
+ ddiv_pdata[0].index = 0; /* XTALI external clock */
+ ddiv_pdata[1].hw = priv->fixed[EP93XX_CLK_PLL1];
+ ddiv_pdata[2].hw = priv->fixed[EP93XX_CLK_PLL2];
+
+ /* touchscreen/ADC clock */
+ idx = EP93XX_CLK_ADC - EP93XX_CLK_UART1;
+ clk = &priv->reg[idx];
+ clk->idx = idx;
+ ret = ep93xx_register_div(clk, "ep93xx-adc", &xtali,
+ EP93XX_SYSCON_KEYTCHCLKDIV,
+ EP93XX_SYSCON_KEYTCHCLKDIV_TSEN,
+ EP93XX_SYSCON_KEYTCHCLKDIV_ADIV,
+ 1,
+ ep93xx_adc_divisors,
+ ARRAY_SIZE(ep93xx_adc_divisors));
+
+
+ /* keypad clock */
+ idx = EP93XX_CLK_KEYPAD - EP93XX_CLK_UART1;
+ clk = &priv->reg[idx];
+ clk->idx = idx;
+ ret = ep93xx_register_div(clk, "ep93xx-keypad", &xtali,
+ EP93XX_SYSCON_KEYTCHCLKDIV,
+ EP93XX_SYSCON_KEYTCHCLKDIV_KEN,
+ EP93XX_SYSCON_KEYTCHCLKDIV_KDIV,
+ 1,
+ ep93xx_adc_divisors,
+ ARRAY_SIZE(ep93xx_adc_divisors));
+
+ /*
+ * On reset PDIV and VDIV is set to zero, while PDIV zero
+ * means clock disable, VDIV shouldn't be zero.
+ * So we set both video and i2s dividers to minimum.
+ * ENA - Enable CLK divider.
+ * PDIV - 00 - Disable clock
+ * VDIV - at least 2
+ */
+
+ /* Check and enable video clk registers */
+ regmap_read(priv->map, EP93XX_SYSCON_VIDCLKDIV, &value);
+ value |= BIT(EP93XX_SYSCON_CLKDIV_PDIV_SHIFT) | 2;
+ ep93xx_clk_write(priv, EP93XX_SYSCON_VIDCLKDIV, value);
+
+ /* Check and enable i2s clk registers */
+ regmap_read(priv->map, EP93XX_SYSCON_I2SCLKDIV, &value);
+ value |= BIT(EP93XX_SYSCON_CLKDIV_PDIV_SHIFT) | 2;
+
+ /*
+ * Override the SAI_MSTR_CLK_CFG from the I2S block and use the
+ * I2SClkDiv Register settings. LRCLK transitions on the falling SCLK
+ * edge.
+ */
+ value |= EP93XX_SYSCON_I2SCLKDIV_ORIDE | EP93XX_SYSCON_I2SCLKDIV_SPOL;
+ ep93xx_clk_write(priv, EP93XX_SYSCON_I2SCLKDIV, value);
+
+ /* video clk */
+ idx = EP93XX_CLK_VIDEO - EP93XX_CLK_UART1;
+ clk = &priv->reg[idx];
+ clk->idx = idx;
+ ret = ep93xx_clk_register_ddiv(clk, "ep93xx-fb",
+ ddiv_pdata, ARRAY_SIZE(ddiv_pdata),
+ EP93XX_SYSCON_VIDCLKDIV,
+ EP93XX_SYSCON_CLKDIV_ENABLE);
+
+ /* i2s clk */
+ idx = EP93XX_CLK_I2S_MCLK - EP93XX_CLK_UART1;
+ clk = &priv->reg[idx];
+ clk->idx = idx;
+ ret = ep93xx_clk_register_ddiv(clk, "mclk",
+ ddiv_pdata, ARRAY_SIZE(ddiv_pdata),
+ EP93XX_SYSCON_I2SCLKDIV,
+ EP93XX_SYSCON_CLKDIV_ENABLE);
+
+ /* i2s sclk */
+ idx = EP93XX_CLK_I2S_SCLK - EP93XX_CLK_UART1;
+ clk = &priv->reg[idx];
+ clk->idx = idx;
+ pdata.hw = &priv->reg[EP93XX_CLK_I2S_MCLK - EP93XX_CLK_UART1].hw;
+ ret = ep93xx_register_div(clk, "sclk", &pdata,
+ EP93XX_SYSCON_I2SCLKDIV,
+ EP93XX_SYSCON_I2SCLKDIV_SENA,
+ 16, /* EP93XX_I2SCLKDIV_SDIV_SHIFT */
+ 1, /* EP93XX_I2SCLKDIV_SDIV_WIDTH */
+ ep93xx_sclk_divisors,
+ ARRAY_SIZE(ep93xx_sclk_divisors));
+
+ /* i2s lrclk */
+ idx = EP93XX_CLK_I2S_LRCLK - EP93XX_CLK_UART1;
+ clk = &priv->reg[idx];
+ clk->idx = idx;
+ pdata.hw = &priv->reg[EP93XX_CLK_I2S_SCLK - EP93XX_CLK_UART1].hw;
+ ret = ep93xx_register_div(clk, "lrclk", &pdata,
+ EP93XX_SYSCON_I2SCLKDIV,
+ EP93XX_SYSCON_I2SCLKDIV_SENA,
+ 17, /* EP93XX_I2SCLKDIV_LRDIV32_SHIFT */
+ 2, /* EP93XX_I2SCLKDIV_LRDIV32_WIDTH */
+ ep93xx_lrclk_divisors,
+ ARRAY_SIZE(ep93xx_lrclk_divisors));
+
+ /* IrDa clk uses same pattern but no init code presents in original clock driver */
+ return devm_of_clk_add_hw_provider(priv->dev, of_clk_ep93xx_get, priv);
+}
+
+static const struct auxiliary_device_id ep93xx_clk_ids[] = {
+ { .name = "soc_ep93xx.clk-ep93xx", .driver_data = 2, },
+ { .name = "soc_ep93xx.clk-ep93xx.e2", .driver_data = 1, },
+ { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(auxiliary, ep93xx_clk_ids);
+
+static struct auxiliary_driver ep93xx_clk_driver = {
+ .probe = ep93xx_clk_probe,
+ .id_table = ep93xx_clk_ids,
+};
+module_auxiliary_driver(ep93xx_clk_driver);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Nikita Shubin <nikita.shubin@maquefel.me>");
+MODULE_DESCRIPTION("Clock control for Cirrus EP93xx chips");
diff --git a/drivers/clk/clk-fixed-factor.c b/drivers/clk/clk-fixed-factor.c
index fe0500a1af3e..8fba63fc70c5 100644
--- a/drivers/clk/clk-fixed-factor.c
+++ b/drivers/clk/clk-fixed-factor.c
@@ -405,7 +405,7 @@ static struct platform_driver of_fixed_factor_clk_driver = {
.of_match_table = of_fixed_factor_clk_ids,
},
.probe = of_fixed_factor_clk_probe,
- .remove_new = of_fixed_factor_clk_remove,
+ .remove = of_fixed_factor_clk_remove,
};
builtin_platform_driver(of_fixed_factor_clk_driver);
#endif
diff --git a/drivers/clk/clk-fixed-mmio.c b/drivers/clk/clk-fixed-mmio.c
index 0e08cb22c196..3bfcf4cd98a2 100644
--- a/drivers/clk/clk-fixed-mmio.c
+++ b/drivers/clk/clk-fixed-mmio.c
@@ -91,7 +91,7 @@ static struct platform_driver of_fixed_mmio_clk_driver = {
.of_match_table = of_fixed_mmio_clk_ids,
},
.probe = of_fixed_mmio_clk_probe,
- .remove_new = of_fixed_mmio_clk_remove,
+ .remove = of_fixed_mmio_clk_remove,
};
module_platform_driver(of_fixed_mmio_clk_driver);
diff --git a/drivers/clk/clk-fixed-rate.c b/drivers/clk/clk-fixed-rate.c
index 3481eb8cdeb3..6b4f76b9c4da 100644
--- a/drivers/clk/clk-fixed-rate.c
+++ b/drivers/clk/clk-fixed-rate.c
@@ -232,7 +232,7 @@ static struct platform_driver of_fixed_clk_driver = {
.of_match_table = of_fixed_clk_ids,
},
.probe = of_fixed_clk_probe,
- .remove_new = of_fixed_clk_remove,
+ .remove = of_fixed_clk_remove,
};
builtin_platform_driver(of_fixed_clk_driver);
#endif
diff --git a/drivers/clk/clk-fixed-rate_test.c b/drivers/clk/clk-fixed-rate_test.c
new file mode 100644
index 000000000000..0e04c10a21aa
--- /dev/null
+++ b/drivers/clk/clk-fixed-rate_test.c
@@ -0,0 +1,380 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * KUnit test for clk fixed rate basic type
+ */
+#include <linux/clk.h>
+#include <linux/clk-provider.h>
+#include <linux/completion.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+
+#include <kunit/clk.h>
+#include <kunit/of.h>
+#include <kunit/platform_device.h>
+#include <kunit/resource.h>
+#include <kunit/test.h>
+
+#include "clk-fixed-rate_test.h"
+
+/**
+ * struct clk_hw_fixed_rate_kunit_params - Parameters to pass to __clk_hw_register_fixed_rate()
+ * @dev: device registering clk
+ * @np: device_node of device registering clk
+ * @name: name of clk
+ * @parent_name: parent name of clk
+ * @parent_hw: clk_hw pointer to parent of clk
+ * @parent_data: parent_data describing parent of clk
+ * @flags: clk framework flags
+ * @fixed_rate: frequency of clk
+ * @fixed_accuracy: accuracy of clk
+ * @clk_fixed_flags: fixed rate specific clk flags
+ */
+struct clk_hw_fixed_rate_kunit_params {
+ struct device *dev;
+ struct device_node *np;
+ const char *name;
+ const char *parent_name;
+ const struct clk_hw *parent_hw;
+ const struct clk_parent_data *parent_data;
+ unsigned long flags;
+ unsigned long fixed_rate;
+ unsigned long fixed_accuracy;
+ unsigned long clk_fixed_flags;
+};
+
+static int
+clk_hw_register_fixed_rate_kunit_init(struct kunit_resource *res, void *context)
+{
+ struct clk_hw_fixed_rate_kunit_params *params = context;
+ struct clk_hw *hw;
+
+ hw = __clk_hw_register_fixed_rate(params->dev, params->np,
+ params->name,
+ params->parent_name,
+ params->parent_hw,
+ params->parent_data,
+ params->flags,
+ params->fixed_rate,
+ params->fixed_accuracy,
+ params->clk_fixed_flags,
+ false);
+ if (IS_ERR(hw))
+ return PTR_ERR(hw);
+
+ res->data = hw;
+
+ return 0;
+}
+
+static void clk_hw_register_fixed_rate_kunit_exit(struct kunit_resource *res)
+{
+ struct clk_hw *hw = res->data;
+
+ clk_hw_unregister_fixed_rate(hw);
+}
+
+/**
+ * clk_hw_register_fixed_rate_kunit() - Test managed __clk_hw_register_fixed_rate()
+ * @test: The test context
+ * @params: Arguments to __clk_hw_register_fixed_rate()
+ *
+ * Return: Registered fixed rate clk_hw or ERR_PTR on failure
+ */
+static struct clk_hw *
+clk_hw_register_fixed_rate_kunit(struct kunit *test,
+ struct clk_hw_fixed_rate_kunit_params *params)
+{
+ struct clk_hw *hw;
+
+ hw = kunit_alloc_resource(test,
+ clk_hw_register_fixed_rate_kunit_init,
+ clk_hw_register_fixed_rate_kunit_exit,
+ GFP_KERNEL, params);
+ if (!hw)
+ return ERR_PTR(-EINVAL);
+
+ return hw;
+}
+
+/**
+ * clk_hw_unregister_fixed_rate_kunit() - Test managed clk_hw_unregister_fixed_rate()
+ * @test: The test context
+ * @hw: fixed rate clk to unregister upon test completion
+ *
+ * Automatically unregister @hw when @test is complete via
+ * clk_hw_unregister_fixed_rate().
+ *
+ * Return: 0 on success or negative errno on failure
+ */
+static int clk_hw_unregister_fixed_rate_kunit(struct kunit *test, struct clk_hw *hw)
+{
+ if (!kunit_alloc_resource(test, NULL,
+ clk_hw_register_fixed_rate_kunit_exit,
+ GFP_KERNEL, hw))
+ return -ENOMEM;
+
+ return 0;
+}
+
+/*
+ * Test that clk_get_rate() on a fixed rate clk registered with
+ * clk_hw_register_fixed_rate() gets the proper frequency.
+ */
+static void clk_fixed_rate_rate_test(struct kunit *test)
+{
+ struct clk_hw *hw;
+ struct clk *clk;
+ const unsigned long fixed_rate = 230000;
+
+ hw = clk_hw_register_fixed_rate(NULL, "test-fixed-rate", NULL, 0, fixed_rate);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, hw);
+ KUNIT_ASSERT_EQ(test, 0, clk_hw_unregister_fixed_rate_kunit(test, hw));
+
+ clk = clk_hw_get_clk_prepared_enabled_kunit(test, hw, __func__);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, clk);
+
+ KUNIT_EXPECT_EQ(test, fixed_rate, clk_get_rate(clk));
+}
+
+/*
+ * Test that clk_get_accuracy() on a fixed rate clk registered via
+ * clk_hw_register_fixed_rate_with_accuracy() gets the proper accuracy.
+ */
+static void clk_fixed_rate_accuracy_test(struct kunit *test)
+{
+ struct clk_hw *hw;
+ struct clk *clk;
+ const unsigned long fixed_accuracy = 5000;
+
+ hw = clk_hw_register_fixed_rate_with_accuracy(NULL, "test-fixed-rate",
+ NULL, 0, 0,
+ fixed_accuracy);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, hw);
+ KUNIT_ASSERT_EQ(test, 0, clk_hw_unregister_fixed_rate_kunit(test, hw));
+
+ clk = clk_hw_get_clk_kunit(test, hw, __func__);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, clk);
+
+ KUNIT_EXPECT_EQ(test, fixed_accuracy, clk_get_accuracy(clk));
+}
+
+/* Test suite for a fixed rate clk without any parent */
+static struct kunit_case clk_fixed_rate_test_cases[] = {
+ KUNIT_CASE(clk_fixed_rate_rate_test),
+ KUNIT_CASE(clk_fixed_rate_accuracy_test),
+ {}
+};
+
+static struct kunit_suite clk_fixed_rate_suite = {
+ .name = "clk_fixed_rate",
+ .test_cases = clk_fixed_rate_test_cases,
+};
+
+/*
+ * Test that clk_get_parent() on a fixed rate clk gets the proper parent.
+ */
+static void clk_fixed_rate_parent_test(struct kunit *test)
+{
+ struct clk_hw *hw, *parent_hw;
+ struct clk *expected_parent, *actual_parent;
+ struct clk *clk;
+ const char *parent_name = "test-fixed-rate-parent";
+ struct clk_hw_fixed_rate_kunit_params parent_params = {
+ .name = parent_name,
+ };
+
+ parent_hw = clk_hw_register_fixed_rate_kunit(test, &parent_params);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, parent_hw);
+ KUNIT_ASSERT_STREQ(test, parent_name, clk_hw_get_name(parent_hw));
+
+ expected_parent = clk_hw_get_clk_kunit(test, parent_hw, __func__);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, expected_parent);
+
+ hw = clk_hw_register_fixed_rate(NULL, "test-fixed-rate", parent_name, 0, 0);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, hw);
+ KUNIT_ASSERT_EQ(test, 0, clk_hw_unregister_fixed_rate_kunit(test, hw));
+
+ clk = clk_hw_get_clk_kunit(test, hw, __func__);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, clk);
+
+ actual_parent = clk_get_parent(clk);
+ KUNIT_EXPECT_TRUE(test, clk_is_match(expected_parent, actual_parent));
+}
+
+/*
+ * Test that clk_get_rate() on a fixed rate clk ignores the parent rate.
+ */
+static void clk_fixed_rate_parent_rate_test(struct kunit *test)
+{
+ struct clk_hw *hw, *parent_hw;
+ struct clk *clk;
+ const unsigned long expected_rate = 1405;
+ const unsigned long parent_rate = 90402;
+ const char *parent_name = "test-fixed-rate-parent";
+ struct clk_hw_fixed_rate_kunit_params parent_params = {
+ .name = parent_name,
+ .fixed_rate = parent_rate,
+ };
+
+ parent_hw = clk_hw_register_fixed_rate_kunit(test, &parent_params);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, parent_hw);
+ KUNIT_ASSERT_STREQ(test, parent_name, clk_hw_get_name(parent_hw));
+
+ hw = clk_hw_register_fixed_rate(NULL, "test-fixed-rate", parent_name, 0,
+ expected_rate);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, hw);
+ KUNIT_ASSERT_EQ(test, 0, clk_hw_unregister_fixed_rate_kunit(test, hw));
+
+ clk = clk_hw_get_clk_prepared_enabled_kunit(test, hw, __func__);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, clk);
+
+ KUNIT_EXPECT_EQ(test, expected_rate, clk_get_rate(clk));
+}
+
+/*
+ * Test that clk_get_accuracy() on a fixed rate clk ignores the parent accuracy.
+ */
+static void clk_fixed_rate_parent_accuracy_test(struct kunit *test)
+{
+ struct clk_hw *hw, *parent_hw;
+ struct clk *clk;
+ const unsigned long expected_accuracy = 900;
+ const unsigned long parent_accuracy = 24000;
+ const char *parent_name = "test-fixed-rate-parent";
+ struct clk_hw_fixed_rate_kunit_params parent_params = {
+ .name = parent_name,
+ .fixed_accuracy = parent_accuracy,
+ };
+
+ parent_hw = clk_hw_register_fixed_rate_kunit(test, &parent_params);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, parent_hw);
+ KUNIT_ASSERT_STREQ(test, parent_name, clk_hw_get_name(parent_hw));
+
+ hw = clk_hw_register_fixed_rate_with_accuracy(NULL, "test-fixed-rate",
+ parent_name, 0, 0,
+ expected_accuracy);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, hw);
+ KUNIT_ASSERT_EQ(test, 0, clk_hw_unregister_fixed_rate_kunit(test, hw));
+
+ clk = clk_hw_get_clk_kunit(test, hw, __func__);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, clk);
+
+ KUNIT_EXPECT_EQ(test, expected_accuracy, clk_get_accuracy(clk));
+}
+
+/* Test suite for a fixed rate clk with a parent */
+static struct kunit_case clk_fixed_rate_parent_test_cases[] = {
+ KUNIT_CASE(clk_fixed_rate_parent_test),
+ KUNIT_CASE(clk_fixed_rate_parent_rate_test),
+ KUNIT_CASE(clk_fixed_rate_parent_accuracy_test),
+ {}
+};
+
+static struct kunit_suite clk_fixed_rate_parent_suite = {
+ .name = "clk_fixed_rate_parent",
+ .test_cases = clk_fixed_rate_parent_test_cases,
+};
+
+struct clk_fixed_rate_of_test_context {
+ struct device *dev;
+ struct platform_driver pdrv;
+ struct completion probed;
+};
+
+static inline struct clk_fixed_rate_of_test_context *
+pdev_to_clk_fixed_rate_of_test_context(struct platform_device *pdev)
+{
+ return container_of(to_platform_driver(pdev->dev.driver),
+ struct clk_fixed_rate_of_test_context,
+ pdrv);
+}
+
+/*
+ * Test that of_fixed_clk_setup() registers a fixed rate clk with the proper
+ * rate.
+ */
+static void clk_fixed_rate_of_probe_test(struct kunit *test)
+{
+ struct clk_fixed_rate_of_test_context *ctx = test->priv;
+ struct device *dev = ctx->dev;
+ struct clk *clk;
+
+ clk = clk_get_kunit(test, dev, NULL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, clk);
+
+ KUNIT_ASSERT_EQ(test, 0, clk_prepare_enable_kunit(test, clk));
+ KUNIT_EXPECT_EQ(test, TEST_FIXED_FREQUENCY, clk_get_rate(clk));
+}
+
+/*
+ * Test that of_fixed_clk_setup() registers a fixed rate clk with the proper
+ * accuracy.
+ */
+static void clk_fixed_rate_of_accuracy_test(struct kunit *test)
+{
+ struct clk_fixed_rate_of_test_context *ctx = test->priv;
+ struct device *dev = ctx->dev;
+ struct clk *clk;
+
+ clk = clk_get_kunit(test, dev, NULL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, clk);
+
+ KUNIT_EXPECT_EQ(test, TEST_FIXED_ACCURACY, clk_get_accuracy(clk));
+}
+
+static struct kunit_case clk_fixed_rate_of_cases[] = {
+ KUNIT_CASE(clk_fixed_rate_of_probe_test),
+ KUNIT_CASE(clk_fixed_rate_of_accuracy_test),
+ {}
+};
+
+static int clk_fixed_rate_of_test_probe(struct platform_device *pdev)
+{
+ struct clk_fixed_rate_of_test_context *ctx;
+
+ ctx = pdev_to_clk_fixed_rate_of_test_context(pdev);
+ ctx->dev = &pdev->dev;
+ complete(&ctx->probed);
+
+ return 0;
+}
+
+static int clk_fixed_rate_of_init(struct kunit *test)
+{
+ struct clk_fixed_rate_of_test_context *ctx;
+ static const struct of_device_id match_table[] = {
+ { .compatible = "test,single-clk-consumer" },
+ { }
+ };
+
+ KUNIT_ASSERT_EQ(test, 0, of_overlay_apply_kunit(test, kunit_clk_fixed_rate_test));
+
+ ctx = kunit_kzalloc(test, sizeof(*ctx), GFP_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ctx);
+ test->priv = ctx;
+
+ ctx->pdrv.probe = clk_fixed_rate_of_test_probe;
+ ctx->pdrv.driver.of_match_table = match_table;
+ ctx->pdrv.driver.name = __func__;
+ ctx->pdrv.driver.owner = THIS_MODULE;
+ init_completion(&ctx->probed);
+
+ KUNIT_ASSERT_EQ(test, 0, kunit_platform_driver_register(test, &ctx->pdrv));
+ KUNIT_ASSERT_NE(test, 0, wait_for_completion_timeout(&ctx->probed, HZ));
+
+ return 0;
+}
+
+static struct kunit_suite clk_fixed_rate_of_suite = {
+ .name = "clk_fixed_rate_of",
+ .init = clk_fixed_rate_of_init,
+ .test_cases = clk_fixed_rate_of_cases,
+};
+
+kunit_test_suites(
+ &clk_fixed_rate_suite,
+ &clk_fixed_rate_of_suite,
+ &clk_fixed_rate_parent_suite,
+);
+MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("KUnit test for clk fixed rate basic type");
diff --git a/drivers/clk/clk-fixed-rate_test.h b/drivers/clk/clk-fixed-rate_test.h
new file mode 100644
index 000000000000..e0d28e5b6081
--- /dev/null
+++ b/drivers/clk/clk-fixed-rate_test.h
@@ -0,0 +1,8 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _CLK_FIXED_RATE_TEST_H
+#define _CLK_FIXED_RATE_TEST_H
+
+#define TEST_FIXED_FREQUENCY 50000000
+#define TEST_FIXED_ACCURACY 300
+
+#endif
diff --git a/drivers/clk/clk-lmk04832.c b/drivers/clk/clk-lmk04832.c
index 99b271c1278a..c997e7491996 100644
--- a/drivers/clk/clk-lmk04832.c
+++ b/drivers/clk/clk-lmk04832.c
@@ -1405,16 +1405,12 @@ static int lmk04832_probe(struct spi_device *spi)
lmk->dev = &spi->dev;
- lmk->oscin = devm_clk_get(lmk->dev, "oscin");
+ lmk->oscin = devm_clk_get_enabled(lmk->dev, "oscin");
if (IS_ERR(lmk->oscin)) {
dev_err(lmk->dev, "failed to get oscin clock\n");
return PTR_ERR(lmk->oscin);
}
- ret = clk_prepare_enable(lmk->oscin);
- if (ret)
- return ret;
-
lmk->reset_gpio = devm_gpiod_get_optional(&spi->dev, "reset",
GPIOD_OUT_LOW);
@@ -1422,14 +1418,14 @@ static int lmk04832_probe(struct spi_device *spi)
sizeof(struct lmk_dclk), GFP_KERNEL);
if (!lmk->dclk) {
ret = -ENOMEM;
- goto err_disable_oscin;
+ return ret;
}
lmk->clkout = devm_kcalloc(lmk->dev, info->num_channels,
sizeof(*lmk->clkout), GFP_KERNEL);
if (!lmk->clkout) {
ret = -ENOMEM;
- goto err_disable_oscin;
+ return ret;
}
lmk->clk_data = devm_kzalloc(lmk->dev, struct_size(lmk->clk_data, hws,
@@ -1437,7 +1433,7 @@ static int lmk04832_probe(struct spi_device *spi)
GFP_KERNEL);
if (!lmk->clk_data) {
ret = -ENOMEM;
- goto err_disable_oscin;
+ return ret;
}
device_property_read_u32(lmk->dev, "ti,vco-hz", &lmk->vco_rate);
@@ -1465,7 +1461,7 @@ static int lmk04832_probe(struct spi_device *spi)
dev_err(lmk->dev, "missing reg property in child: %s\n",
child->full_name);
of_node_put(child);
- goto err_disable_oscin;
+ return ret;
}
of_property_read_u32(child, "ti,clkout-fmt",
@@ -1486,7 +1482,7 @@ static int lmk04832_probe(struct spi_device *spi)
__func__, PTR_ERR(lmk->regmap));
ret = PTR_ERR(lmk->regmap);
- goto err_disable_oscin;
+ return ret;
}
regmap_write(lmk->regmap, LMK04832_REG_RST3W, LMK04832_BIT_RESET);
@@ -1496,7 +1492,7 @@ static int lmk04832_probe(struct spi_device *spi)
&rdbk_pin);
ret = lmk04832_set_spi_rdbk(lmk, rdbk_pin);
if (ret)
- goto err_disable_oscin;
+ return ret;
}
regmap_bulk_read(lmk->regmap, LMK04832_REG_ID_PROD_MSB, &tmp, 3);
@@ -1504,13 +1500,13 @@ static int lmk04832_probe(struct spi_device *spi)
dev_err(lmk->dev, "unsupported device type: pid 0x%04x, maskrev 0x%02x\n",
tmp[0] << 8 | tmp[1], tmp[2]);
ret = -EINVAL;
- goto err_disable_oscin;
+ return ret;
}
ret = lmk04832_register_vco(lmk);
if (ret) {
dev_err(lmk->dev, "failed to init device clock path\n");
- goto err_disable_oscin;
+ return ret;
}
if (lmk->vco_rate) {
@@ -1518,21 +1514,21 @@ static int lmk04832_probe(struct spi_device *spi)
ret = clk_set_rate(lmk->vco.clk, lmk->vco_rate);
if (ret) {
dev_err(lmk->dev, "failed to set VCO rate\n");
- goto err_disable_oscin;
+ return ret;
}
}
ret = lmk04832_register_sclk(lmk);
if (ret) {
dev_err(lmk->dev, "failed to init SYNC/SYSREF clock path\n");
- goto err_disable_oscin;
+ return ret;
}
for (i = 0; i < info->num_channels; i++) {
ret = lmk04832_register_clkout(lmk, i);
if (ret) {
dev_err(lmk->dev, "failed to register clk %d\n", i);
- goto err_disable_oscin;
+ return ret;
}
}
@@ -1541,24 +1537,12 @@ static int lmk04832_probe(struct spi_device *spi)
lmk->clk_data);
if (ret) {
dev_err(lmk->dev, "failed to add provider (%d)\n", ret);
- goto err_disable_oscin;
+ return ret;
}
spi_set_drvdata(spi, lmk);
return 0;
-
-err_disable_oscin:
- clk_disable_unprepare(lmk->oscin);
-
- return ret;
-}
-
-static void lmk04832_remove(struct spi_device *spi)
-{
- struct lmk04832 *lmk = spi_get_drvdata(spi);
-
- clk_disable_unprepare(lmk->oscin);
}
static const struct spi_device_id lmk04832_id[] = {
@@ -1579,7 +1563,6 @@ static struct spi_driver lmk04832_driver = {
.of_match_table = lmk04832_of_id,
},
.probe = lmk04832_probe,
- .remove = lmk04832_remove,
.id_table = lmk04832_id,
};
module_spi_driver(lmk04832_driver);
diff --git a/drivers/clk/clk-palmas.c b/drivers/clk/clk-palmas.c
index 5efb10776ae5..39049f62dbbb 100644
--- a/drivers/clk/clk-palmas.c
+++ b/drivers/clk/clk-palmas.c
@@ -281,7 +281,7 @@ static struct platform_driver palmas_clks_driver = {
.of_match_table = palmas_clks_of_match,
},
.probe = palmas_clks_probe,
- .remove_new = palmas_clks_remove,
+ .remove = palmas_clks_remove,
};
module_platform_driver(palmas_clks_driver);
diff --git a/drivers/clk/clk-pwm.c b/drivers/clk/clk-pwm.c
index 3dd2b83d0404..bd4f21c22004 100644
--- a/drivers/clk/clk-pwm.c
+++ b/drivers/clk/clk-pwm.c
@@ -142,7 +142,7 @@ MODULE_DEVICE_TABLE(of, clk_pwm_dt_ids);
static struct platform_driver clk_pwm_driver = {
.probe = clk_pwm_probe,
- .remove_new = clk_pwm_remove,
+ .remove = clk_pwm_remove,
.driver = {
.name = "pwm-clock",
.of_match_table = clk_pwm_dt_ids,
diff --git a/drivers/clk/clk-s2mps11.c b/drivers/clk/clk-s2mps11.c
index 38c456540d1b..014db6386624 100644
--- a/drivers/clk/clk-s2mps11.c
+++ b/drivers/clk/clk-s2mps11.c
@@ -263,7 +263,7 @@ static struct platform_driver s2mps11_clk_driver = {
.name = "s2mps11-clk",
},
.probe = s2mps11_clk_probe,
- .remove_new = s2mps11_clk_remove,
+ .remove = s2mps11_clk_remove,
.id_table = s2mps11_clk_id,
};
module_platform_driver(s2mps11_clk_driver);
diff --git a/drivers/clk/clk-scmi.c b/drivers/clk/clk-scmi.c
index d86a02563f6c..15510c2ff21c 100644
--- a/drivers/clk/clk-scmi.c
+++ b/drivers/clk/clk-scmi.c
@@ -156,13 +156,13 @@ static void scmi_clk_atomic_disable(struct clk_hw *hw)
scmi_proto_clk_ops->disable(clk->ph, clk->id, ATOMIC);
}
-static int scmi_clk_atomic_is_enabled(struct clk_hw *hw)
+static int __scmi_clk_is_enabled(struct clk_hw *hw, bool atomic)
{
int ret;
bool enabled = false;
struct scmi_clk *clk = to_scmi_clk(hw);
- ret = scmi_proto_clk_ops->state_get(clk->ph, clk->id, &enabled, ATOMIC);
+ ret = scmi_proto_clk_ops->state_get(clk->ph, clk->id, &enabled, atomic);
if (ret)
dev_warn(clk->dev,
"Failed to get state for clock ID %d\n", clk->id);
@@ -170,6 +170,16 @@ static int scmi_clk_atomic_is_enabled(struct clk_hw *hw)
return !!enabled;
}
+static int scmi_clk_atomic_is_enabled(struct clk_hw *hw)
+{
+ return __scmi_clk_is_enabled(hw, ATOMIC);
+}
+
+static int scmi_clk_is_enabled(struct clk_hw *hw)
+{
+ return __scmi_clk_is_enabled(hw, NOT_ATOMIC);
+}
+
static int scmi_clk_get_duty_cycle(struct clk_hw *hw, struct clk_duty *duty)
{
int ret;
@@ -285,6 +295,8 @@ scmi_clk_ops_alloc(struct device *dev, unsigned long feats_key)
if (feats_key & BIT(SCMI_CLK_ATOMIC_SUPPORTED))
ops->is_enabled = scmi_clk_atomic_is_enabled;
+ else
+ ops->is_prepared = scmi_clk_is_enabled;
/* Rate ops */
ops->recalc_rate = scmi_clk_recalc_rate;
diff --git a/drivers/clk/clk-scpi.c b/drivers/clk/clk-scpi.c
index 108b697bd317..19d530d52e64 100644
--- a/drivers/clk/clk-scpi.c
+++ b/drivers/clk/clk-scpi.c
@@ -303,7 +303,7 @@ static struct platform_driver scpi_clocks_driver = {
.of_match_table = scpi_clocks_ids,
},
.probe = scpi_clocks_probe,
- .remove_new = scpi_clocks_remove,
+ .remove = scpi_clocks_remove,
};
module_platform_driver(scpi_clocks_driver);
diff --git a/drivers/clk/clk.c b/drivers/clk/clk.c
index 285ed1ad8a37..d02451f951cf 100644
--- a/drivers/clk/clk.c
+++ b/drivers/clk/clk.c
@@ -4762,7 +4762,7 @@ void __clk_put(struct clk *clk)
clk->exclusive_count = 0;
}
- hlist_del(&clk->clks_node);
+ clk_core_unlink_consumer(clk);
/* If we had any boundaries on that clock, let's drop them. */
if (clk->min_rate > 0 || clk->max_rate < ULONG_MAX)
@@ -5232,7 +5232,7 @@ static int of_parse_clkspec(const struct device_node *np, int index,
* clocks.
*/
np = np->parent;
- if (np && !of_get_property(np, "clock-ranges", NULL))
+ if (np && !of_property_present(np, "clock-ranges"))
break;
index = 0;
}
diff --git a/drivers/clk/clk_kunit_helpers.c b/drivers/clk/clk_kunit_helpers.c
new file mode 100644
index 000000000000..52fd25594c96
--- /dev/null
+++ b/drivers/clk/clk_kunit_helpers.c
@@ -0,0 +1,207 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * KUnit helpers for clk providers and consumers
+ */
+#include <linux/clk.h>
+#include <linux/clk-provider.h>
+#include <linux/err.h>
+#include <linux/kernel.h>
+#include <linux/slab.h>
+
+#include <kunit/clk.h>
+#include <kunit/resource.h>
+
+KUNIT_DEFINE_ACTION_WRAPPER(clk_disable_unprepare_wrapper,
+ clk_disable_unprepare, struct clk *);
+/**
+ * clk_prepare_enable_kunit() - Test managed clk_prepare_enable()
+ * @test: The test context
+ * @clk: clk to prepare and enable
+ *
+ * Return: 0 on success, or negative errno on failure.
+ */
+int clk_prepare_enable_kunit(struct kunit *test, struct clk *clk)
+{
+ int ret;
+
+ ret = clk_prepare_enable(clk);
+ if (ret)
+ return ret;
+
+ return kunit_add_action_or_reset(test, clk_disable_unprepare_wrapper,
+ clk);
+}
+EXPORT_SYMBOL_GPL(clk_prepare_enable_kunit);
+
+KUNIT_DEFINE_ACTION_WRAPPER(clk_put_wrapper, clk_put, struct clk *);
+
+static struct clk *__clk_get_kunit(struct kunit *test, struct clk *clk)
+{
+ int ret;
+
+ if (IS_ERR(clk))
+ return clk;
+
+ ret = kunit_add_action_or_reset(test, clk_put_wrapper, clk);
+ if (ret)
+ return ERR_PTR(ret);
+
+ return clk;
+}
+
+/**
+ * clk_get_kunit() - Test managed clk_get()
+ * @test: The test context
+ * @dev: device for clock "consumer"
+ * @con_id: clock consumer ID
+ *
+ * Just like clk_get(), except the clk is managed by the test case and is
+ * automatically put with clk_put() after the test case concludes.
+ *
+ * Return: new clk consumer or ERR_PTR on failure.
+ */
+struct clk *
+clk_get_kunit(struct kunit *test, struct device *dev, const char *con_id)
+{
+ struct clk *clk;
+
+ clk = clk_get(dev, con_id);
+
+ return __clk_get_kunit(test, clk);
+}
+EXPORT_SYMBOL_GPL(clk_get_kunit);
+
+/**
+ * of_clk_get_kunit() - Test managed of_clk_get()
+ * @test: The test context
+ * @np: device_node for clock "consumer"
+ * @index: index in 'clocks' property of @np
+ *
+ * Just like of_clk_get(), except the clk is managed by the test case and is
+ * automatically put with clk_put() after the test case concludes.
+ *
+ * Return: new clk consumer or ERR_PTR on failure.
+ */
+struct clk *
+of_clk_get_kunit(struct kunit *test, struct device_node *np, int index)
+{
+ struct clk *clk;
+
+ clk = of_clk_get(np, index);
+
+ return __clk_get_kunit(test, clk);
+}
+EXPORT_SYMBOL_GPL(of_clk_get_kunit);
+
+/**
+ * clk_hw_get_clk_kunit() - Test managed clk_hw_get_clk()
+ * @test: The test context
+ * @hw: clk_hw associated with the clk being consumed
+ * @con_id: connection ID string on device
+ *
+ * Just like clk_hw_get_clk(), except the clk is managed by the test case and
+ * is automatically put with clk_put() after the test case concludes.
+ *
+ * Return: new clk consumer or ERR_PTR on failure.
+ */
+struct clk *
+clk_hw_get_clk_kunit(struct kunit *test, struct clk_hw *hw, const char *con_id)
+{
+ struct clk *clk;
+
+ clk = clk_hw_get_clk(hw, con_id);
+
+ return __clk_get_kunit(test, clk);
+}
+EXPORT_SYMBOL_GPL(clk_hw_get_clk_kunit);
+
+/**
+ * clk_hw_get_clk_prepared_enabled_kunit() - Test managed clk_hw_get_clk() + clk_prepare_enable()
+ * @test: The test context
+ * @hw: clk_hw associated with the clk being consumed
+ * @con_id: connection ID string on device
+ *
+ * Just like
+ *
+ * .. code-block:: c
+ *
+ * struct clk *clk = clk_hw_get_clk(...);
+ * clk_prepare_enable(clk);
+ *
+ * except the clk is managed by the test case and is automatically disabled and
+ * unprepared with clk_disable_unprepare() and put with clk_put() after the
+ * test case concludes.
+ *
+ * Return: new clk consumer that is prepared and enabled or ERR_PTR on failure.
+ */
+struct clk *
+clk_hw_get_clk_prepared_enabled_kunit(struct kunit *test, struct clk_hw *hw,
+ const char *con_id)
+{
+ int ret;
+ struct clk *clk;
+
+ clk = clk_hw_get_clk_kunit(test, hw, con_id);
+ if (IS_ERR(clk))
+ return clk;
+
+ ret = clk_prepare_enable_kunit(test, clk);
+ if (ret)
+ return ERR_PTR(ret);
+
+ return clk;
+}
+EXPORT_SYMBOL_GPL(clk_hw_get_clk_prepared_enabled_kunit);
+
+KUNIT_DEFINE_ACTION_WRAPPER(clk_hw_unregister_wrapper,
+ clk_hw_unregister, struct clk_hw *);
+
+/**
+ * clk_hw_register_kunit() - Test managed clk_hw_register()
+ * @test: The test context
+ * @dev: device that is registering this clock
+ * @hw: link to hardware-specific clock data
+ *
+ * Just like clk_hw_register(), except the clk registration is managed by the
+ * test case and is automatically unregistered after the test case concludes.
+ *
+ * Return: 0 on success or a negative errno value on failure.
+ */
+int clk_hw_register_kunit(struct kunit *test, struct device *dev, struct clk_hw *hw)
+{
+ int ret;
+
+ ret = clk_hw_register(dev, hw);
+ if (ret)
+ return ret;
+
+ return kunit_add_action_or_reset(test, clk_hw_unregister_wrapper, hw);
+}
+EXPORT_SYMBOL_GPL(clk_hw_register_kunit);
+
+/**
+ * of_clk_hw_register_kunit() - Test managed of_clk_hw_register()
+ * @test: The test context
+ * @node: device_node of device that is registering this clock
+ * @hw: link to hardware-specific clock data
+ *
+ * Just like of_clk_hw_register(), except the clk registration is managed by
+ * the test case and is automatically unregistered after the test case
+ * concludes.
+ *
+ * Return: 0 on success or a negative errno value on failure.
+ */
+int of_clk_hw_register_kunit(struct kunit *test, struct device_node *node, struct clk_hw *hw)
+{
+ int ret;
+
+ ret = of_clk_hw_register(node, hw);
+ if (ret)
+ return ret;
+
+ return kunit_add_action_or_reset(test, clk_hw_unregister_wrapper, hw);
+}
+EXPORT_SYMBOL_GPL(of_clk_hw_register_kunit);
+
+MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("KUnit helpers for clk providers and consumers");
diff --git a/drivers/clk/clk_parent_data_test.h b/drivers/clk/clk_parent_data_test.h
new file mode 100644
index 000000000000..eedd53ae910d
--- /dev/null
+++ b/drivers/clk/clk_parent_data_test.h
@@ -0,0 +1,10 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _CLK_PARENT_DATA_TEST_H
+#define _CLK_PARENT_DATA_TEST_H
+
+#define CLK_PARENT_DATA_1MHZ_NAME "1mhz_fixed_legacy"
+#define CLK_PARENT_DATA_PARENT1 "parent_fwname"
+#define CLK_PARENT_DATA_PARENT2 "50"
+#define CLK_PARENT_DATA_50MHZ_NAME "50_clk"
+
+#endif
diff --git a/drivers/clk/clk_test.c b/drivers/clk/clk_test.c
index fbbea66d9cba..41fc8eba3418 100644
--- a/drivers/clk/clk_test.c
+++ b/drivers/clk/clk_test.c
@@ -4,12 +4,19 @@
*/
#include <linux/clk.h>
#include <linux/clk-provider.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
/* Needed for clk_hw_get_clk() */
#include "clk.h"
+#include <kunit/clk.h>
+#include <kunit/of.h>
+#include <kunit/platform_device.h>
#include <kunit/test.h>
+#include "clk_parent_data_test.h"
+
static const struct clk_ops empty_clk_ops = { };
#define DUMMY_CLOCK_INIT_RATE (42 * 1000 * 1000)
@@ -2659,6 +2666,448 @@ static struct kunit_suite clk_mux_no_reparent_test_suite = {
.test_cases = clk_mux_no_reparent_test_cases,
};
+struct clk_register_clk_parent_data_test_case {
+ const char *desc;
+ struct clk_parent_data pdata;
+};
+
+static void
+clk_register_clk_parent_data_test_case_to_desc(
+ const struct clk_register_clk_parent_data_test_case *t, char *desc)
+{
+ strcpy(desc, t->desc);
+}
+
+static const struct clk_register_clk_parent_data_test_case
+clk_register_clk_parent_data_of_cases[] = {
+ {
+ /*
+ * Test that a clk registered with a struct device_node can
+ * find a parent based on struct clk_parent_data::index.
+ */
+ .desc = "clk_parent_data_of_index_test",
+ .pdata.index = 0,
+ },
+ {
+ /*
+ * Test that a clk registered with a struct device_node can
+ * find a parent based on struct clk_parent_data::fwname.
+ */
+ .desc = "clk_parent_data_of_fwname_test",
+ .pdata.fw_name = CLK_PARENT_DATA_PARENT1,
+ },
+ {
+ /*
+ * Test that a clk registered with a struct device_node can
+ * find a parent based on struct clk_parent_data::name.
+ */
+ .desc = "clk_parent_data_of_name_test",
+ /* The index must be negative to indicate firmware not used */
+ .pdata.index = -1,
+ .pdata.name = CLK_PARENT_DATA_1MHZ_NAME,
+ },
+ {
+ /*
+ * Test that a clk registered with a struct device_node can
+ * find a parent based on struct
+ * clk_parent_data::{fw_name,name}.
+ */
+ .desc = "clk_parent_data_of_fwname_name_test",
+ .pdata.fw_name = CLK_PARENT_DATA_PARENT1,
+ .pdata.name = "not_matching",
+ },
+ {
+ /*
+ * Test that a clk registered with a struct device_node can
+ * find a parent based on struct clk_parent_data::{index,name}.
+ * Index takes priority.
+ */
+ .desc = "clk_parent_data_of_index_name_priority_test",
+ .pdata.index = 0,
+ .pdata.name = "not_matching",
+ },
+ {
+ /*
+ * Test that a clk registered with a struct device_node can
+ * find a parent based on struct
+ * clk_parent_data::{index,fwname,name}. The fw_name takes
+ * priority over index and name.
+ */
+ .desc = "clk_parent_data_of_index_fwname_name_priority_test",
+ .pdata.index = 1,
+ .pdata.fw_name = CLK_PARENT_DATA_PARENT1,
+ .pdata.name = "not_matching",
+ },
+};
+
+KUNIT_ARRAY_PARAM(clk_register_clk_parent_data_of_test, clk_register_clk_parent_data_of_cases,
+ clk_register_clk_parent_data_test_case_to_desc)
+
+/**
+ * struct clk_register_clk_parent_data_of_ctx - Context for clk_parent_data OF tests
+ * @np: device node of clk under test
+ * @hw: clk_hw for clk under test
+ */
+struct clk_register_clk_parent_data_of_ctx {
+ struct device_node *np;
+ struct clk_hw hw;
+};
+
+static int clk_register_clk_parent_data_of_test_init(struct kunit *test)
+{
+ struct clk_register_clk_parent_data_of_ctx *ctx;
+
+ KUNIT_ASSERT_EQ(test, 0,
+ of_overlay_apply_kunit(test, kunit_clk_parent_data_test));
+
+ ctx = kunit_kzalloc(test, sizeof(*ctx), GFP_KERNEL);
+ if (!ctx)
+ return -ENOMEM;
+ test->priv = ctx;
+
+ ctx->np = of_find_compatible_node(NULL, NULL, "test,clk-parent-data");
+ if (!ctx->np)
+ return -ENODEV;
+
+ of_node_put_kunit(test, ctx->np);
+
+ return 0;
+}
+
+/*
+ * Test that a clk registered with a struct device_node can find a parent based on
+ * struct clk_parent_data when the hw member isn't set.
+ */
+static void clk_register_clk_parent_data_of_test(struct kunit *test)
+{
+ struct clk_register_clk_parent_data_of_ctx *ctx = test->priv;
+ struct clk_hw *parent_hw;
+ const struct clk_register_clk_parent_data_test_case *test_param;
+ struct clk_init_data init = { };
+ struct clk *expected_parent, *actual_parent;
+
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ctx->np);
+
+ expected_parent = of_clk_get_kunit(test, ctx->np, 0);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, expected_parent);
+
+ test_param = test->param_value;
+ init.parent_data = &test_param->pdata;
+ init.num_parents = 1;
+ init.name = "parent_data_of_test_clk";
+ init.ops = &clk_dummy_single_parent_ops;
+ ctx->hw.init = &init;
+ KUNIT_ASSERT_EQ(test, 0, of_clk_hw_register_kunit(test, ctx->np, &ctx->hw));
+
+ parent_hw = clk_hw_get_parent(&ctx->hw);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, parent_hw);
+
+ actual_parent = clk_hw_get_clk_kunit(test, parent_hw, __func__);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, actual_parent);
+
+ KUNIT_EXPECT_TRUE(test, clk_is_match(expected_parent, actual_parent));
+}
+
+static struct kunit_case clk_register_clk_parent_data_of_test_cases[] = {
+ KUNIT_CASE_PARAM(clk_register_clk_parent_data_of_test,
+ clk_register_clk_parent_data_of_test_gen_params),
+ {}
+};
+
+/*
+ * Test suite for registering clks with struct clk_parent_data and a struct
+ * device_node.
+ */
+static struct kunit_suite clk_register_clk_parent_data_of_suite = {
+ .name = "clk_register_clk_parent_data_of",
+ .init = clk_register_clk_parent_data_of_test_init,
+ .test_cases = clk_register_clk_parent_data_of_test_cases,
+};
+
+/**
+ * struct clk_register_clk_parent_data_device_ctx - Context for clk_parent_data device tests
+ * @dev: device of clk under test
+ * @hw: clk_hw for clk under test
+ * @pdrv: driver to attach to find @dev
+ */
+struct clk_register_clk_parent_data_device_ctx {
+ struct device *dev;
+ struct clk_hw hw;
+ struct platform_driver pdrv;
+};
+
+static inline struct clk_register_clk_parent_data_device_ctx *
+clk_register_clk_parent_data_driver_to_test_context(struct platform_device *pdev)
+{
+ return container_of(to_platform_driver(pdev->dev.driver),
+ struct clk_register_clk_parent_data_device_ctx, pdrv);
+}
+
+static int clk_register_clk_parent_data_device_probe(struct platform_device *pdev)
+{
+ struct clk_register_clk_parent_data_device_ctx *ctx;
+
+ ctx = clk_register_clk_parent_data_driver_to_test_context(pdev);
+ ctx->dev = &pdev->dev;
+
+ return 0;
+}
+
+static void clk_register_clk_parent_data_device_driver(struct kunit *test)
+{
+ struct clk_register_clk_parent_data_device_ctx *ctx = test->priv;
+ static const struct of_device_id match_table[] = {
+ { .compatible = "test,clk-parent-data" },
+ { }
+ };
+
+ ctx->pdrv.probe = clk_register_clk_parent_data_device_probe;
+ ctx->pdrv.driver.of_match_table = match_table;
+ ctx->pdrv.driver.name = __func__;
+ ctx->pdrv.driver.owner = THIS_MODULE;
+
+ KUNIT_ASSERT_EQ(test, 0, kunit_platform_driver_register(test, &ctx->pdrv));
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ctx->dev);
+}
+
+static const struct clk_register_clk_parent_data_test_case
+clk_register_clk_parent_data_device_cases[] = {
+ {
+ /*
+ * Test that a clk registered with a struct device can find a
+ * parent based on struct clk_parent_data::index.
+ */
+ .desc = "clk_parent_data_device_index_test",
+ .pdata.index = 1,
+ },
+ {
+ /*
+ * Test that a clk registered with a struct device can find a
+ * parent based on struct clk_parent_data::fwname.
+ */
+ .desc = "clk_parent_data_device_fwname_test",
+ .pdata.fw_name = CLK_PARENT_DATA_PARENT2,
+ },
+ {
+ /*
+ * Test that a clk registered with a struct device can find a
+ * parent based on struct clk_parent_data::name.
+ */
+ .desc = "clk_parent_data_device_name_test",
+ /* The index must be negative to indicate firmware not used */
+ .pdata.index = -1,
+ .pdata.name = CLK_PARENT_DATA_50MHZ_NAME,
+ },
+ {
+ /*
+ * Test that a clk registered with a struct device can find a
+ * parent based on struct clk_parent_data::{fw_name,name}.
+ */
+ .desc = "clk_parent_data_device_fwname_name_test",
+ .pdata.fw_name = CLK_PARENT_DATA_PARENT2,
+ .pdata.name = "not_matching",
+ },
+ {
+ /*
+ * Test that a clk registered with a struct device can find a
+ * parent based on struct clk_parent_data::{index,name}. Index
+ * takes priority.
+ */
+ .desc = "clk_parent_data_device_index_name_priority_test",
+ .pdata.index = 1,
+ .pdata.name = "not_matching",
+ },
+ {
+ /*
+ * Test that a clk registered with a struct device can find a
+ * parent based on struct clk_parent_data::{index,fwname,name}.
+ * The fw_name takes priority over index and name.
+ */
+ .desc = "clk_parent_data_device_index_fwname_name_priority_test",
+ .pdata.index = 0,
+ .pdata.fw_name = CLK_PARENT_DATA_PARENT2,
+ .pdata.name = "not_matching",
+ },
+};
+
+KUNIT_ARRAY_PARAM(clk_register_clk_parent_data_device_test,
+ clk_register_clk_parent_data_device_cases,
+ clk_register_clk_parent_data_test_case_to_desc)
+
+/*
+ * Test that a clk registered with a struct device can find a parent based on
+ * struct clk_parent_data when the hw member isn't set.
+ */
+static void clk_register_clk_parent_data_device_test(struct kunit *test)
+{
+ struct clk_register_clk_parent_data_device_ctx *ctx;
+ const struct clk_register_clk_parent_data_test_case *test_param;
+ struct clk_hw *parent_hw;
+ struct clk_init_data init = { };
+ struct clk *expected_parent, *actual_parent;
+
+ ctx = kunit_kzalloc(test, sizeof(*ctx), GFP_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ctx);
+ test->priv = ctx;
+
+ clk_register_clk_parent_data_device_driver(test);
+
+ expected_parent = clk_get_kunit(test, ctx->dev, "50");
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, expected_parent);
+
+ test_param = test->param_value;
+ init.parent_data = &test_param->pdata;
+ init.num_parents = 1;
+ init.name = "parent_data_device_test_clk";
+ init.ops = &clk_dummy_single_parent_ops;
+ ctx->hw.init = &init;
+ KUNIT_ASSERT_EQ(test, 0, clk_hw_register_kunit(test, ctx->dev, &ctx->hw));
+
+ parent_hw = clk_hw_get_parent(&ctx->hw);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, parent_hw);
+
+ actual_parent = clk_hw_get_clk_kunit(test, parent_hw, __func__);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, actual_parent);
+
+ KUNIT_EXPECT_TRUE(test, clk_is_match(expected_parent, actual_parent));
+}
+
+static const struct clk_register_clk_parent_data_test_case
+clk_register_clk_parent_data_device_hw_cases[] = {
+ {
+ /*
+ * Test that a clk registered with a struct device can find a
+ * parent based on struct clk_parent_data::hw.
+ */
+ .desc = "clk_parent_data_device_hw_index_test",
+ /* The index must be negative to indicate firmware not used */
+ .pdata.index = -1,
+ },
+ {
+ /*
+ * Test that a clk registered with a struct device can find a
+ * parent based on struct clk_parent_data::hw when
+ * struct clk_parent_data::fw_name is set.
+ */
+ .desc = "clk_parent_data_device_hw_fwname_test",
+ .pdata.fw_name = CLK_PARENT_DATA_PARENT2,
+ },
+ {
+ /*
+ * Test that a clk registered with a struct device can find a
+ * parent based on struct clk_parent_data::hw when struct
+ * clk_parent_data::name is set.
+ */
+ .desc = "clk_parent_data_device_hw_name_test",
+ /* The index must be negative to indicate firmware not used */
+ .pdata.index = -1,
+ .pdata.name = CLK_PARENT_DATA_50MHZ_NAME,
+ },
+ {
+ /*
+ * Test that a clk registered with a struct device can find a
+ * parent based on struct clk_parent_data::hw when struct
+ * clk_parent_data::{fw_name,name} are set.
+ */
+ .desc = "clk_parent_data_device_hw_fwname_name_test",
+ .pdata.fw_name = CLK_PARENT_DATA_PARENT2,
+ .pdata.name = "not_matching",
+ },
+ {
+ /*
+ * Test that a clk registered with a struct device can find a
+ * parent based on struct clk_parent_data::hw when struct
+ * clk_parent_data::index is set. The hw pointer takes
+ * priority.
+ */
+ .desc = "clk_parent_data_device_hw_index_priority_test",
+ .pdata.index = 0,
+ },
+ {
+ /*
+ * Test that a clk registered with a struct device can find a
+ * parent based on struct clk_parent_data::hw when
+ * struct clk_parent_data::{index,fwname,name} are set.
+ * The hw pointer takes priority over everything else.
+ */
+ .desc = "clk_parent_data_device_hw_index_fwname_name_priority_test",
+ .pdata.index = 0,
+ .pdata.fw_name = CLK_PARENT_DATA_PARENT2,
+ .pdata.name = "not_matching",
+ },
+};
+
+KUNIT_ARRAY_PARAM(clk_register_clk_parent_data_device_hw_test,
+ clk_register_clk_parent_data_device_hw_cases,
+ clk_register_clk_parent_data_test_case_to_desc)
+
+/*
+ * Test that a clk registered with a struct device can find a
+ * parent based on struct clk_parent_data::hw.
+ */
+static void clk_register_clk_parent_data_device_hw_test(struct kunit *test)
+{
+ struct clk_register_clk_parent_data_device_ctx *ctx;
+ const struct clk_register_clk_parent_data_test_case *test_param;
+ struct clk_dummy_context *parent;
+ struct clk_hw *parent_hw;
+ struct clk_parent_data pdata = { };
+ struct clk_init_data init = { };
+
+ ctx = kunit_kzalloc(test, sizeof(*ctx), GFP_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ctx);
+ test->priv = ctx;
+
+ clk_register_clk_parent_data_device_driver(test);
+
+ parent = kunit_kzalloc(test, sizeof(*parent), GFP_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, parent);
+
+ parent_hw = &parent->hw;
+ parent_hw->init = CLK_HW_INIT_NO_PARENT("parent-clk",
+ &clk_dummy_rate_ops, 0);
+
+ KUNIT_ASSERT_EQ(test, 0, clk_hw_register_kunit(test, ctx->dev, parent_hw));
+
+ test_param = test->param_value;
+ memcpy(&pdata, &test_param->pdata, sizeof(pdata));
+ pdata.hw = parent_hw;
+ init.parent_data = &pdata;
+ init.num_parents = 1;
+ init.ops = &clk_dummy_single_parent_ops;
+ init.name = "parent_data_device_hw_test_clk";
+ ctx->hw.init = &init;
+ KUNIT_ASSERT_EQ(test, 0, clk_hw_register_kunit(test, ctx->dev, &ctx->hw));
+
+ KUNIT_EXPECT_PTR_EQ(test, parent_hw, clk_hw_get_parent(&ctx->hw));
+}
+
+static struct kunit_case clk_register_clk_parent_data_device_test_cases[] = {
+ KUNIT_CASE_PARAM(clk_register_clk_parent_data_device_test,
+ clk_register_clk_parent_data_device_test_gen_params),
+ KUNIT_CASE_PARAM(clk_register_clk_parent_data_device_hw_test,
+ clk_register_clk_parent_data_device_hw_test_gen_params),
+ {}
+};
+
+static int clk_register_clk_parent_data_device_init(struct kunit *test)
+{
+ KUNIT_ASSERT_EQ(test, 0,
+ of_overlay_apply_kunit(test, kunit_clk_parent_data_test));
+
+ return 0;
+}
+
+/*
+ * Test suite for registering clks with struct clk_parent_data and a struct
+ * device.
+ */
+static struct kunit_suite clk_register_clk_parent_data_device_suite = {
+ .name = "clk_register_clk_parent_data_device",
+ .init = clk_register_clk_parent_data_device_init,
+ .test_cases = clk_register_clk_parent_data_device_test_cases,
+};
+
kunit_test_suites(
&clk_leaf_mux_set_rate_parent_test_suite,
&clk_test_suite,
@@ -2671,8 +3120,10 @@ kunit_test_suites(
&clk_range_test_suite,
&clk_range_maximize_test_suite,
&clk_range_minimize_test_suite,
+ &clk_register_clk_parent_data_of_suite,
+ &clk_register_clk_parent_data_device_suite,
&clk_single_parent_mux_test_suite,
- &clk_uncached_test_suite
+ &clk_uncached_test_suite,
);
MODULE_DESCRIPTION("Kunit tests for clk framework");
MODULE_LICENSE("GPL v2");
diff --git a/drivers/clk/davinci/da8xx-cfgchip.c b/drivers/clk/davinci/da8xx-cfgchip.c
index ec60ecb517f1..a5109fe8b16e 100644
--- a/drivers/clk/davinci/da8xx-cfgchip.c
+++ b/drivers/clk/davinci/da8xx-cfgchip.c
@@ -513,8 +513,7 @@ da8xx_cfgchip_register_usb0_clk48(struct device *dev,
fck_clk = devm_clk_get(dev, "fck");
if (IS_ERR(fck_clk)) {
- dev_err_probe(dev, PTR_ERR(fck_clk), "Missing fck clock\n");
- return ERR_CAST(fck_clk);
+ return dev_err_cast_probe(dev, fck_clk, "Missing fck clock\n");
}
usb0 = devm_kzalloc(dev, sizeof(*usb0), GFP_KERNEL);
@@ -749,11 +748,9 @@ static int da8xx_cfgchip_probe(struct platform_device *pdev)
clk_init = device_get_match_data(dev);
if (clk_init) {
- struct device_node *parent;
+ struct device_node *parent __free(device_node) = of_get_parent(dev->of_node);
- parent = of_get_parent(dev->of_node);
regmap = syscon_node_to_regmap(parent);
- of_node_put(parent);
} else if (pdev->id_entry && pdata) {
clk_init = (void *)pdev->id_entry->driver_data;
regmap = pdata->cfgchip;
diff --git a/drivers/clk/hisilicon/clk-hi3519.c b/drivers/clk/hisilicon/clk-hi3519.c
index 141b727ff60d..0c50acd8543a 100644
--- a/drivers/clk/hisilicon/clk-hi3519.c
+++ b/drivers/clk/hisilicon/clk-hi3519.c
@@ -179,7 +179,7 @@ MODULE_DEVICE_TABLE(of, hi3519_clk_match_table);
static struct platform_driver hi3519_clk_driver = {
.probe = hi3519_clk_probe,
- .remove_new = hi3519_clk_remove,
+ .remove = hi3519_clk_remove,
.driver = {
.name = "hi3519-clk",
.of_match_table = hi3519_clk_match_table,
diff --git a/drivers/clk/hisilicon/clk-hi3559a.c b/drivers/clk/hisilicon/clk-hi3559a.c
index c79a94f6d9d2..f297fb25c512 100644
--- a/drivers/clk/hisilicon/clk-hi3559a.c
+++ b/drivers/clk/hisilicon/clk-hi3559a.c
@@ -407,7 +407,7 @@ static unsigned long clk_pll_recalc_rate(struct clk_hw *hw,
unsigned long parent_rate)
{
struct hi3559av100_clk_pll *clk = to_pll_clk(hw);
- u64 frac_val, fbdiv_val, refdiv_val;
+ u64 frac_val, fbdiv_val;
u32 postdiv1_val, postdiv2_val;
u32 val;
u64 tmp, rate;
@@ -435,14 +435,13 @@ static unsigned long clk_pll_recalc_rate(struct clk_hw *hw,
val = readl_relaxed(clk->ctrl_reg2);
val = val >> clk->refdiv_shift;
val &= ((1 << clk->refdiv_width) - 1);
- refdiv_val = val;
/* rate = 24000000 * (fbdiv + frac / (1<<24) ) / refdiv */
rate = 0;
tmp = 24000000 * fbdiv_val + (24000000 * frac_val) / (1 << 24);
rate += tmp;
- do_div(rate, refdiv_val);
- do_div(rate, postdiv1_val * postdiv2_val);
+ rate = div_u64(rate, val);
+ rate = div_u64(rate, postdiv1_val * postdiv2_val);
return rate;
}
@@ -818,7 +817,7 @@ static void hi3559av100_crg_remove(struct platform_device *pdev)
static struct platform_driver hi3559av100_crg_driver = {
.probe = hi3559av100_crg_probe,
- .remove_new = hi3559av100_crg_remove,
+ .remove = hi3559av100_crg_remove,
.driver = {
.name = "hi3559av100-clock",
.of_match_table = hi3559av100_crg_match_table,
diff --git a/drivers/clk/hisilicon/crg-hi3516cv300.c b/drivers/clk/hisilicon/crg-hi3516cv300.c
index e602e65fbc38..b66140f74c51 100644
--- a/drivers/clk/hisilicon/crg-hi3516cv300.c
+++ b/drivers/clk/hisilicon/crg-hi3516cv300.c
@@ -294,7 +294,7 @@ static void hi3516cv300_crg_remove(struct platform_device *pdev)
static struct platform_driver hi3516cv300_crg_driver = {
.probe = hi3516cv300_crg_probe,
- .remove_new = hi3516cv300_crg_remove,
+ .remove = hi3516cv300_crg_remove,
.driver = {
.name = "hi3516cv300-crg",
.of_match_table = hi3516cv300_crg_match_table,
diff --git a/drivers/clk/hisilicon/crg-hi3798cv200.c b/drivers/clk/hisilicon/crg-hi3798cv200.c
index f651b197e45a..8eabd1cc229f 100644
--- a/drivers/clk/hisilicon/crg-hi3798cv200.c
+++ b/drivers/clk/hisilicon/crg-hi3798cv200.c
@@ -377,7 +377,7 @@ static void hi3798cv200_crg_remove(struct platform_device *pdev)
static struct platform_driver hi3798cv200_crg_driver = {
.probe = hi3798cv200_crg_probe,
- .remove_new = hi3798cv200_crg_remove,
+ .remove = hi3798cv200_crg_remove,
.driver = {
.name = "hi3798cv200-crg",
.of_match_table = hi3798cv200_crg_match_table,
diff --git a/drivers/clk/imx/Kconfig b/drivers/clk/imx/Kconfig
index 6da0fba68225..6ff6d934848a 100644
--- a/drivers/clk/imx/Kconfig
+++ b/drivers/clk/imx/Kconfig
@@ -81,6 +81,7 @@ config CLK_IMX8MP
tristate "IMX8MP CCM Clock Driver"
depends on ARCH_MXC || COMPILE_TEST
select MXC_CLK
+ select AUXILIARY_BUS if RESET_CONTROLLER
help
Build the driver for i.MX8MP CCM Clock Driver
diff --git a/drivers/clk/imx/clk-composite-7ulp.c b/drivers/clk/imx/clk-composite-7ulp.c
index e208ddc51133..8ed2e0ad2769 100644
--- a/drivers/clk/imx/clk-composite-7ulp.c
+++ b/drivers/clk/imx/clk-composite-7ulp.c
@@ -14,6 +14,7 @@
#include "../clk-fractional-divider.h"
#include "clk.h"
+#define PCG_PR_MASK BIT(31)
#define PCG_PCS_SHIFT 24
#define PCG_PCS_MASK 0x7
#define PCG_CGC_SHIFT 30
@@ -78,6 +79,12 @@ static struct clk_hw *imx_ulp_clk_hw_composite(const char *name,
struct clk_hw *hw;
u32 val;
+ val = readl(reg);
+ if (!(val & PCG_PR_MASK)) {
+ pr_info("PCC PR is 0 for clk:%s, bypass\n", name);
+ return NULL;
+ }
+
if (mux_present) {
mux = kzalloc(sizeof(*mux), GFP_KERNEL);
if (!mux)
diff --git a/drivers/clk/imx/clk-composite-8m.c b/drivers/clk/imx/clk-composite-8m.c
index 8cc07d056a83..f187582ba491 100644
--- a/drivers/clk/imx/clk-composite-8m.c
+++ b/drivers/clk/imx/clk-composite-8m.c
@@ -204,6 +204,34 @@ static const struct clk_ops imx8m_clk_composite_mux_ops = {
.determine_rate = imx8m_clk_composite_mux_determine_rate,
};
+static int imx8m_clk_composite_gate_enable(struct clk_hw *hw)
+{
+ struct clk_gate *gate = to_clk_gate(hw);
+ unsigned long flags;
+ u32 val;
+
+ spin_lock_irqsave(gate->lock, flags);
+
+ val = readl(gate->reg);
+ val |= BIT(gate->bit_idx);
+ writel(val, gate->reg);
+
+ spin_unlock_irqrestore(gate->lock, flags);
+
+ return 0;
+}
+
+static void imx8m_clk_composite_gate_disable(struct clk_hw *hw)
+{
+ /* composite clk requires the disable hook */
+}
+
+static const struct clk_ops imx8m_clk_composite_gate_ops = {
+ .enable = imx8m_clk_composite_gate_enable,
+ .disable = imx8m_clk_composite_gate_disable,
+ .is_enabled = clk_gate_is_enabled,
+};
+
struct clk_hw *__imx8m_clk_hw_composite(const char *name,
const char * const *parent_names,
int num_parents, void __iomem *reg,
@@ -217,6 +245,7 @@ struct clk_hw *__imx8m_clk_hw_composite(const char *name,
struct clk_mux *mux;
const struct clk_ops *divider_ops;
const struct clk_ops *mux_ops;
+ const struct clk_ops *gate_ops;
mux = kzalloc(sizeof(*mux), GFP_KERNEL);
if (!mux)
@@ -257,20 +286,22 @@ struct clk_hw *__imx8m_clk_hw_composite(const char *name,
div->flags = CLK_DIVIDER_ROUND_CLOSEST;
/* skip registering the gate ops if M4 is enabled */
- if (!mcore_booted) {
- gate = kzalloc(sizeof(*gate), GFP_KERNEL);
- if (!gate)
- goto free_div;
-
- gate_hw = &gate->hw;
- gate->reg = reg;
- gate->bit_idx = PCG_CGC_SHIFT;
- gate->lock = &imx_ccm_lock;
- }
+ gate = kzalloc(sizeof(*gate), GFP_KERNEL);
+ if (!gate)
+ goto free_div;
+
+ gate_hw = &gate->hw;
+ gate->reg = reg;
+ gate->bit_idx = PCG_CGC_SHIFT;
+ gate->lock = &imx_ccm_lock;
+ if (!mcore_booted)
+ gate_ops = &clk_gate_ops;
+ else
+ gate_ops = &imx8m_clk_composite_gate_ops;
hw = clk_hw_register_composite(NULL, name, parent_names, num_parents,
mux_hw, mux_ops, div_hw,
- divider_ops, gate_hw, &clk_gate_ops, flags);
+ divider_ops, gate_hw, gate_ops, flags);
if (IS_ERR(hw))
goto free_gate;
diff --git a/drivers/clk/imx/clk-composite-93.c b/drivers/clk/imx/clk-composite-93.c
index 81164bdcd6cc..6c6c5a30f328 100644
--- a/drivers/clk/imx/clk-composite-93.c
+++ b/drivers/clk/imx/clk-composite-93.c
@@ -76,6 +76,13 @@ static int imx93_clk_composite_gate_enable(struct clk_hw *hw)
static void imx93_clk_composite_gate_disable(struct clk_hw *hw)
{
+ /*
+ * Skip disable the root clock gate if mcore enabled.
+ * The root clock may be used by the mcore.
+ */
+ if (mcore_booted)
+ return;
+
imx93_clk_composite_gate_endisable(hw, 0);
}
@@ -222,7 +229,7 @@ struct clk_hw *imx93_clk_composite_flags(const char *name, const char * const *p
hw = clk_hw_register_composite(NULL, name, parent_names, num_parents,
mux_hw, &clk_mux_ro_ops, div_hw,
&clk_divider_ro_ops, NULL, NULL, flags);
- } else if (!mcore_booted) {
+ } else {
gate = kzalloc(sizeof(*gate), GFP_KERNEL);
if (!gate)
goto fail;
@@ -238,12 +245,6 @@ struct clk_hw *imx93_clk_composite_flags(const char *name, const char * const *p
&imx93_clk_composite_divider_ops, gate_hw,
&imx93_clk_composite_gate_ops,
flags | CLK_SET_RATE_NO_REPARENT);
- } else {
- hw = clk_hw_register_composite(NULL, name, parent_names, num_parents,
- mux_hw, &imx93_clk_composite_mux_ops, div_hw,
- &imx93_clk_composite_divider_ops, NULL,
- &imx93_clk_composite_gate_ops,
- flags | CLK_SET_RATE_NO_REPARENT);
}
if (IS_ERR(hw))
diff --git a/drivers/clk/imx/clk-fracn-gppll.c b/drivers/clk/imx/clk-fracn-gppll.c
index 44462ab50e51..591e0364ee5c 100644
--- a/drivers/clk/imx/clk-fracn-gppll.c
+++ b/drivers/clk/imx/clk-fracn-gppll.c
@@ -78,6 +78,7 @@ struct clk_fracn_gppll {
* The Fvco should be in range 2.5Ghz to 5Ghz
*/
static const struct imx_fracn_gppll_rate_table fracn_tbl[] = {
+ PLL_FRACN_GP(1039500000U, 173, 25, 100, 1, 4),
PLL_FRACN_GP(650000000U, 162, 50, 100, 0, 6),
PLL_FRACN_GP(594000000U, 198, 0, 1, 0, 8),
PLL_FRACN_GP(560000000U, 140, 0, 1, 0, 6),
@@ -106,6 +107,7 @@ static const struct imx_fracn_gppll_rate_table int_tbl[] = {
PLL_FRACN_GP_INTEGER(1700000000U, 141, 1, 2),
PLL_FRACN_GP_INTEGER(1400000000U, 175, 1, 3),
PLL_FRACN_GP_INTEGER(900000000U, 150, 1, 4),
+ PLL_FRACN_GP_INTEGER(800000000U, 200, 1, 6),
};
struct imx_fracn_gppll_clk imx_fracn_gppll_integer = {
@@ -291,6 +293,10 @@ static int clk_fracn_gppll_prepare(struct clk_hw *hw)
if (val & POWERUP_MASK)
return 0;
+ if (pll->flags & CLK_FRACN_GPPLL_FRACN)
+ writel_relaxed(readl_relaxed(pll->base + PLL_NUMERATOR),
+ pll->base + PLL_NUMERATOR);
+
val |= CLKMUX_BYPASS;
writel_relaxed(val, pll->base + PLL_CTRL);
diff --git a/drivers/clk/imx/clk-imx6ul.c b/drivers/clk/imx/clk-imx6ul.c
index f9394e94f69d..05c7a82b751f 100644
--- a/drivers/clk/imx/clk-imx6ul.c
+++ b/drivers/clk/imx/clk-imx6ul.c
@@ -542,8 +542,8 @@ static void __init imx6ul_clocks_init(struct device_node *ccm_node)
clk_set_parent(hws[IMX6UL_CLK_ENFC_SEL]->clk, hws[IMX6UL_CLK_PLL2_PFD2]->clk);
- clk_set_parent(hws[IMX6UL_CLK_ENET1_REF_SEL]->clk, hws[IMX6UL_CLK_ENET_REF]->clk);
- clk_set_parent(hws[IMX6UL_CLK_ENET2_REF_SEL]->clk, hws[IMX6UL_CLK_ENET2_REF]->clk);
+ clk_set_parent(hws[IMX6UL_CLK_ENET1_REF_SEL]->clk, hws[IMX6UL_CLK_ENET1_REF_125M]->clk);
+ clk_set_parent(hws[IMX6UL_CLK_ENET2_REF_SEL]->clk, hws[IMX6UL_CLK_ENET2_REF_125M]->clk);
imx_register_uart_clocks();
}
diff --git a/drivers/clk/imx/clk-imx7d.c b/drivers/clk/imx/clk-imx7d.c
index 2b77d1fc7bb9..99adc55e3f5d 100644
--- a/drivers/clk/imx/clk-imx7d.c
+++ b/drivers/clk/imx/clk-imx7d.c
@@ -498,14 +498,14 @@ static void __init imx7d_clocks_init(struct device_node *ccm_node)
hws[IMX7D_ENET_AXI_ROOT_SRC] = imx_clk_hw_mux2_flags("enet_axi_src", base + 0x8900, 24, 3, enet_axi_sel, ARRAY_SIZE(enet_axi_sel), CLK_SET_PARENT_GATE);
hws[IMX7D_NAND_USDHC_BUS_ROOT_SRC] = imx_clk_hw_mux2_flags("nand_usdhc_src", base + 0x8980, 24, 3, nand_usdhc_bus_sel, ARRAY_SIZE(nand_usdhc_bus_sel), CLK_SET_PARENT_GATE);
hws[IMX7D_DRAM_PHYM_ROOT_SRC] = imx_clk_hw_mux2_flags("dram_phym_src", base + 0x9800, 24, 1, dram_phym_sel, ARRAY_SIZE(dram_phym_sel), CLK_SET_PARENT_GATE);
- hws[IMX7D_DRAM_ROOT_SRC] = imx_clk_hw_mux2_flags("dram_src", base + 0x9880, 24, 1, dram_sel, ARRAY_SIZE(dram_sel), CLK_SET_PARENT_GATE);
+ hws[IMX7D_DRAM_ROOT_SRC] = imx_clk_hw_mux2("dram_src", base + 0x9880, 24, 1, dram_sel, ARRAY_SIZE(dram_sel));
hws[IMX7D_DRAM_PHYM_ALT_ROOT_SRC] = imx_clk_hw_mux2_flags("dram_phym_alt_src", base + 0xa000, 24, 3, dram_phym_alt_sel, ARRAY_SIZE(dram_phym_alt_sel), CLK_SET_PARENT_GATE);
- hws[IMX7D_DRAM_ALT_ROOT_SRC] = imx_clk_hw_mux2_flags("dram_alt_src", base + 0xa080, 24, 3, dram_alt_sel, ARRAY_SIZE(dram_alt_sel), CLK_SET_PARENT_GATE);
+ hws[IMX7D_DRAM_ALT_ROOT_SRC] = imx_clk_hw_mux2("dram_alt_src", base + 0xa080, 24, 3, dram_alt_sel, ARRAY_SIZE(dram_alt_sel));
hws[IMX7D_USB_HSIC_ROOT_SRC] = imx_clk_hw_mux2_flags("usb_hsic_src", base + 0xa100, 24, 3, usb_hsic_sel, ARRAY_SIZE(usb_hsic_sel), CLK_SET_PARENT_GATE);
hws[IMX7D_PCIE_CTRL_ROOT_SRC] = imx_clk_hw_mux2_flags("pcie_ctrl_src", base + 0xa180, 24, 3, pcie_ctrl_sel, ARRAY_SIZE(pcie_ctrl_sel), CLK_SET_PARENT_GATE);
hws[IMX7D_PCIE_PHY_ROOT_SRC] = imx_clk_hw_mux2_flags("pcie_phy_src", base + 0xa200, 24, 3, pcie_phy_sel, ARRAY_SIZE(pcie_phy_sel), CLK_SET_PARENT_GATE);
hws[IMX7D_EPDC_PIXEL_ROOT_SRC] = imx_clk_hw_mux2_flags("epdc_pixel_src", base + 0xa280, 24, 3, epdc_pixel_sel, ARRAY_SIZE(epdc_pixel_sel), CLK_SET_PARENT_GATE);
- hws[IMX7D_LCDIF_PIXEL_ROOT_SRC] = imx_clk_hw_mux2_flags("lcdif_pixel_src", base + 0xa300, 24, 3, lcdif_pixel_sel, ARRAY_SIZE(lcdif_pixel_sel), CLK_SET_PARENT_GATE);
+ hws[IMX7D_LCDIF_PIXEL_ROOT_SRC] = imx_clk_hw_mux2_flags("lcdif_pixel_src", base + 0xa300, 24, 3, lcdif_pixel_sel, ARRAY_SIZE(lcdif_pixel_sel), CLK_SET_PARENT_GATE | CLK_SET_RATE_PARENT);
hws[IMX7D_MIPI_DSI_ROOT_SRC] = imx_clk_hw_mux2_flags("mipi_dsi_src", base + 0xa380, 24, 3, mipi_dsi_sel, ARRAY_SIZE(mipi_dsi_sel), CLK_SET_PARENT_GATE);
hws[IMX7D_MIPI_CSI_ROOT_SRC] = imx_clk_hw_mux2_flags("mipi_csi_src", base + 0xa400, 24, 3, mipi_csi_sel, ARRAY_SIZE(mipi_csi_sel), CLK_SET_PARENT_GATE);
hws[IMX7D_MIPI_DPHY_ROOT_SRC] = imx_clk_hw_mux2_flags("mipi_dphy_src", base + 0xa480, 24, 3, mipi_dphy_sel, ARRAY_SIZE(mipi_dphy_sel), CLK_SET_PARENT_GATE);
diff --git a/drivers/clk/imx/clk-imx8-acm.c b/drivers/clk/imx/clk-imx8-acm.c
index 1bdb480cc96c..6c351050b82a 100644
--- a/drivers/clk/imx/clk-imx8-acm.c
+++ b/drivers/clk/imx/clk-imx8-acm.c
@@ -54,10 +54,12 @@ struct clk_imx8_acm_sel {
* struct imx8_acm_soc_data - soc specific data
* @sels: pointer to struct clk_imx8_acm_sel
* @num_sels: numbers of items
+ * @mclk_sels: pointer to imx8qm/qxp/dxl_mclk_sels
*/
struct imx8_acm_soc_data {
struct clk_imx8_acm_sel *sels;
unsigned int num_sels;
+ struct clk_parent_data *mclk_sels;
};
/**
@@ -111,11 +113,14 @@ static const struct clk_parent_data imx8qm_mclk_out_sels[] = {
{ .fw_name = "sai6_rx_bclk" },
};
-static const struct clk_parent_data imx8qm_mclk_sels[] = {
+#define ACM_AUD_CLK0_SEL_INDEX 2
+#define ACM_AUD_CLK1_SEL_INDEX 3
+
+static struct clk_parent_data imx8qm_mclk_sels[] = {
{ .fw_name = "aud_pll_div_clk0_lpcg_clk" },
{ .fw_name = "aud_pll_div_clk1_lpcg_clk" },
- { .fw_name = "acm_aud_clk0_sel" },
- { .fw_name = "acm_aud_clk1_sel" },
+ { }, /* clk_hw pointer of "acm_aud_clk0_sel" */
+ { }, /* clk_hw pointer of "acm_aud_clk1_sel" */
};
static const struct clk_parent_data imx8qm_asrc_mux_clk_sels[] = {
@@ -176,11 +181,11 @@ static const struct clk_parent_data imx8qxp_mclk_out_sels[] = {
{ .fw_name = "sai4_rx_bclk" },
};
-static const struct clk_parent_data imx8qxp_mclk_sels[] = {
+static struct clk_parent_data imx8qxp_mclk_sels[] = {
{ .fw_name = "aud_pll_div_clk0_lpcg_clk" },
{ .fw_name = "aud_pll_div_clk1_lpcg_clk" },
- { .fw_name = "acm_aud_clk0_sel" },
- { .fw_name = "acm_aud_clk1_sel" },
+ { }, /* clk_hw pointer of "acm_aud_clk0_sel" */
+ { }, /* clk_hw pointer of "acm_aud_clk1_sel" */
};
static struct clk_imx8_acm_sel imx8qxp_sels[] = {
@@ -228,11 +233,11 @@ static const struct clk_parent_data imx8dxl_mclk_out_sels[] = {
{ .index = -1 },
};
-static const struct clk_parent_data imx8dxl_mclk_sels[] = {
+static struct clk_parent_data imx8dxl_mclk_sels[] = {
{ .fw_name = "aud_pll_div_clk0_lpcg_clk" },
{ .fw_name = "aud_pll_div_clk1_lpcg_clk" },
- { .fw_name = "acm_aud_clk0_sel" },
- { .fw_name = "acm_aud_clk1_sel" },
+ { }, /* clk_hw pointer of "acm_aud_clk0_sel" */
+ { }, /* clk_hw pointer of "acm_aud_clk1_sel" */
};
static struct clk_imx8_acm_sel imx8dxl_sels[] = {
@@ -375,6 +380,18 @@ static int imx8_acm_clk_probe(struct platform_device *pdev)
imx_check_clk_hws(hws, IMX_ADMA_ACM_CLK_END);
goto err_clk_register;
}
+
+ /*
+ * The IMX_ADMA_ACM_AUD_CLK0_SEL and IMX_ADMA_ACM_AUD_CLK1_SEL are
+ * registered first. After registration, update the clk_hw pointer
+ * to imx8qm/qxp/dxl_mclk_sels structures.
+ */
+ if (sels[i].clkid == IMX_ADMA_ACM_AUD_CLK0_SEL)
+ priv->soc_data->mclk_sels[ACM_AUD_CLK0_SEL_INDEX].hw =
+ hws[IMX_ADMA_ACM_AUD_CLK0_SEL];
+ if (sels[i].clkid == IMX_ADMA_ACM_AUD_CLK1_SEL)
+ priv->soc_data->mclk_sels[ACM_AUD_CLK1_SEL_INDEX].hw =
+ hws[IMX_ADMA_ACM_AUD_CLK1_SEL];
}
ret = devm_of_clk_add_hw_provider(dev, of_clk_hw_onecell_get, clk_hw_data);
@@ -406,16 +423,19 @@ static void imx8_acm_clk_remove(struct platform_device *pdev)
static const struct imx8_acm_soc_data imx8qm_acm_data = {
.sels = imx8qm_sels,
.num_sels = ARRAY_SIZE(imx8qm_sels),
+ .mclk_sels = imx8qm_mclk_sels,
};
static const struct imx8_acm_soc_data imx8qxp_acm_data = {
.sels = imx8qxp_sels,
.num_sels = ARRAY_SIZE(imx8qxp_sels),
+ .mclk_sels = imx8qxp_mclk_sels,
};
static const struct imx8_acm_soc_data imx8dxl_acm_data = {
.sels = imx8dxl_sels,
.num_sels = ARRAY_SIZE(imx8dxl_sels),
+ .mclk_sels = imx8dxl_mclk_sels,
};
static const struct of_device_id imx8_acm_match[] = {
@@ -468,7 +488,7 @@ static struct platform_driver imx8_acm_clk_driver = {
.pm = &imx8_acm_pm_ops,
},
.probe = imx8_acm_clk_probe,
- .remove_new = imx8_acm_clk_remove,
+ .remove = imx8_acm_clk_remove,
};
module_platform_driver(imx8_acm_clk_driver);
diff --git a/drivers/clk/imx/clk-imx8mm.c b/drivers/clk/imx/clk-imx8mm.c
index 075f643e3f35..342049b847b9 100644
--- a/drivers/clk/imx/clk-imx8mm.c
+++ b/drivers/clk/imx/clk-imx8mm.c
@@ -432,7 +432,7 @@ static int imx8mm_clocks_probe(struct platform_device *pdev)
/* BUS */
hws[IMX8MM_CLK_MAIN_AXI] = imx8m_clk_hw_composite_bus_critical("main_axi", imx8mm_main_axi_sels, base + 0x8800);
hws[IMX8MM_CLK_ENET_AXI] = imx8m_clk_hw_composite_bus("enet_axi", imx8mm_enet_axi_sels, base + 0x8880);
- hws[IMX8MM_CLK_NAND_USDHC_BUS] = imx8m_clk_hw_composite_bus_critical("nand_usdhc_bus", imx8mm_nand_usdhc_sels, base + 0x8900);
+ hws[IMX8MM_CLK_NAND_USDHC_BUS] = imx8m_clk_hw_composite("nand_usdhc_bus", imx8mm_nand_usdhc_sels, base + 0x8900);
hws[IMX8MM_CLK_VPU_BUS] = imx8m_clk_hw_composite_bus("vpu_bus", imx8mm_vpu_bus_sels, base + 0x8980);
hws[IMX8MM_CLK_DISP_AXI] = imx8m_clk_hw_composite_bus("disp_axi", imx8mm_disp_axi_sels, base + 0x8a00);
hws[IMX8MM_CLK_DISP_APB] = imx8m_clk_hw_composite_bus("disp_apb", imx8mm_disp_apb_sels, base + 0x8a80);
diff --git a/drivers/clk/imx/clk-imx8mn.c b/drivers/clk/imx/clk-imx8mn.c
index 4bd1ed11353b..ab77e148e70c 100644
--- a/drivers/clk/imx/clk-imx8mn.c
+++ b/drivers/clk/imx/clk-imx8mn.c
@@ -583,6 +583,7 @@ static int imx8mn_clocks_probe(struct platform_device *pdev)
hws[IMX8MN_CLK_SDMA2_ROOT] = imx_clk_hw_gate4("sdma2_clk", "ipg_audio_root", base + 0x43b0, 0);
hws[IMX8MN_CLK_SDMA3_ROOT] = imx_clk_hw_gate4("sdma3_clk", "ipg_audio_root", base + 0x45f0, 0);
hws[IMX8MN_CLK_SAI7_ROOT] = imx_clk_hw_gate2_shared2("sai7_root_clk", "sai7", base + 0x4650, 0, &share_count_sai7);
+ hws[IMX8MN_CLK_SAI7_IPG] = imx_clk_hw_gate2_shared2("sai7_ipg_clk", "ipg_audio_root", base + 0x4650, 0, &share_count_sai7);
hws[IMX8MN_CLK_GPT_3M] = imx_clk_hw_fixed_factor("gpt_3m", "osc_24m", 1, 8);
diff --git a/drivers/clk/imx/clk-imx8mp-audiomix.c b/drivers/clk/imx/clk-imx8mp-audiomix.c
index b381d6f784c8..b2cb157703c5 100644
--- a/drivers/clk/imx/clk-imx8mp-audiomix.c
+++ b/drivers/clk/imx/clk-imx8mp-audiomix.c
@@ -5,6 +5,7 @@
* Copyright (C) 2022 Marek Vasut <marex@denx.de>
*/
+#include <linux/auxiliary_bus.h>
#include <linux/clk-provider.h>
#include <linux/device.h>
#include <linux/io.h>
@@ -13,6 +14,7 @@
#include <linux/of.h>
#include <linux/platform_device.h>
#include <linux/pm_runtime.h>
+#include <linux/slab.h>
#include <dt-bindings/clock/imx8mp-clock.h>
@@ -154,6 +156,15 @@ static const struct clk_parent_data clk_imx8mp_audiomix_pll_bypass_sels[] = {
PDM_SEL, 2, 0 \
}
+#define CLK_GATE_PARENT(gname, cname, pname) \
+ { \
+ gname"_cg", \
+ IMX8MP_CLK_AUDIOMIX_##cname, \
+ { .fw_name = pname, .name = pname }, NULL, 1, \
+ CLKEN0 + 4 * !!(IMX8MP_CLK_AUDIOMIX_##cname / 32), \
+ 1, IMX8MP_CLK_AUDIOMIX_##cname % 32 \
+ }
+
struct clk_imx8mp_audiomix_sel {
const char *name;
int clkid;
@@ -171,14 +182,14 @@ static struct clk_imx8mp_audiomix_sel sels[] = {
CLK_GATE("earc", EARC_IPG),
CLK_GATE("ocrama", OCRAMA_IPG),
CLK_GATE("aud2htx", AUD2HTX_IPG),
- CLK_GATE("earc_phy", EARC_PHY),
+ CLK_GATE_PARENT("earc_phy", EARC_PHY, "sai_pll_out_div2"),
CLK_GATE("sdma2", SDMA2_ROOT),
CLK_GATE("sdma3", SDMA3_ROOT),
CLK_GATE("spba2", SPBA2_ROOT),
CLK_GATE("dsp", DSP_ROOT),
CLK_GATE("dspdbg", DSPDBG_ROOT),
CLK_GATE("edma", EDMA_ROOT),
- CLK_GATE("audpll", AUDPLL_ROOT),
+ CLK_GATE_PARENT("audpll", AUDPLL_ROOT, "osc_24m"),
CLK_GATE("mu2", MU2_ROOT),
CLK_GATE("mu3", MU3_ROOT),
CLK_PDM,
@@ -217,6 +228,63 @@ struct clk_imx8mp_audiomix_priv {
struct clk_hw_onecell_data clk_data;
};
+#if IS_ENABLED(CONFIG_RESET_CONTROLLER)
+
+static void clk_imx8mp_audiomix_reset_unregister_adev(void *_adev)
+{
+ struct auxiliary_device *adev = _adev;
+
+ auxiliary_device_delete(adev);
+ auxiliary_device_uninit(adev);
+}
+
+static void clk_imx8mp_audiomix_reset_adev_release(struct device *dev)
+{
+ struct auxiliary_device *adev = to_auxiliary_dev(dev);
+
+ kfree(adev);
+}
+
+static int clk_imx8mp_audiomix_reset_controller_register(struct device *dev,
+ struct clk_imx8mp_audiomix_priv *priv)
+{
+ struct auxiliary_device *adev __free(kfree) = NULL;
+ int ret;
+
+ if (!of_property_present(dev->of_node, "#reset-cells"))
+ return 0;
+
+ adev = kzalloc(sizeof(*adev), GFP_KERNEL);
+ if (!adev)
+ return -ENOMEM;
+
+ adev->name = "reset";
+ adev->dev.parent = dev;
+ adev->dev.release = clk_imx8mp_audiomix_reset_adev_release;
+
+ ret = auxiliary_device_init(adev);
+ if (ret)
+ return ret;
+
+ ret = auxiliary_device_add(adev);
+ if (ret) {
+ auxiliary_device_uninit(adev);
+ return ret;
+ }
+
+ return devm_add_action_or_reset(dev, clk_imx8mp_audiomix_reset_unregister_adev,
+ no_free_ptr(adev));
+}
+
+#else /* !CONFIG_RESET_CONTROLLER */
+
+static int clk_imx8mp_audiomix_reset_controller_register(struct clk_imx8mp_audiomix_priv *priv)
+{
+ return 0;
+}
+
+#endif /* !CONFIG_RESET_CONTROLLER */
+
static void clk_imx8mp_audiomix_save_restore(struct device *dev, bool save)
{
struct clk_imx8mp_audiomix_priv *priv = dev_get_drvdata(dev);
@@ -269,12 +337,12 @@ static int clk_imx8mp_audiomix_probe(struct platform_device *pdev)
for (i = 0; i < ARRAY_SIZE(sels); i++) {
if (sels[i].num_parents == 1) {
hw = devm_clk_hw_register_gate_parent_data(dev,
- sels[i].name, &sels[i].parent, 0,
+ sels[i].name, &sels[i].parent, CLK_SET_RATE_PARENT,
base + sels[i].reg, sels[i].shift, 0, NULL);
} else {
hw = devm_clk_hw_register_mux_parent_data_table(dev,
sels[i].name, sels[i].parents,
- sels[i].num_parents, 0,
+ sels[i].num_parents, CLK_SET_RATE_PARENT,
base + sels[i].reg,
sels[i].shift, sels[i].width,
0, NULL, NULL);
@@ -317,7 +385,8 @@ static int clk_imx8mp_audiomix_probe(struct platform_device *pdev)
clk_hw_data->hws[IMX8MP_CLK_AUDIOMIX_SAI_PLL_BYPASS] = hw;
hw = devm_clk_hw_register_gate(dev, "sai_pll_out", "sai_pll_bypass",
- 0, base + SAI_PLL_GNRL_CTL, 13,
+ CLK_SET_RATE_PARENT,
+ base + SAI_PLL_GNRL_CTL, 13,
0, NULL);
if (IS_ERR(hw)) {
ret = PTR_ERR(hw);
@@ -326,7 +395,8 @@ static int clk_imx8mp_audiomix_probe(struct platform_device *pdev)
clk_hw_data->hws[IMX8MP_CLK_AUDIOMIX_SAI_PLL_OUT] = hw;
hw = devm_clk_hw_register_fixed_factor(dev, "sai_pll_out_div2",
- "sai_pll_out", 0, 1, 2);
+ "sai_pll_out",
+ CLK_SET_RATE_PARENT, 1, 2);
if (IS_ERR(hw)) {
ret = PTR_ERR(hw);
goto err_clk_register;
@@ -337,6 +407,10 @@ static int clk_imx8mp_audiomix_probe(struct platform_device *pdev)
if (ret)
goto err_clk_register;
+ ret = clk_imx8mp_audiomix_reset_controller_register(dev, priv);
+ if (ret)
+ goto err_clk_register;
+
pm_runtime_put_sync(dev);
return 0;
@@ -380,7 +454,7 @@ MODULE_DEVICE_TABLE(of, clk_imx8mp_audiomix_of_match);
static struct platform_driver clk_imx8mp_audiomix_driver = {
.probe = clk_imx8mp_audiomix_probe,
- .remove_new = clk_imx8mp_audiomix_remove,
+ .remove = clk_imx8mp_audiomix_remove,
.driver = {
.name = "imx8mp-audio-blk-ctrl",
.of_match_table = clk_imx8mp_audiomix_of_match,
diff --git a/drivers/clk/imx/clk-imx8mp.c b/drivers/clk/imx/clk-imx8mp.c
index 670aa2bab301..516dbd170c8a 100644
--- a/drivers/clk/imx/clk-imx8mp.c
+++ b/drivers/clk/imx/clk-imx8mp.c
@@ -547,12 +547,12 @@ static int imx8mp_clocks_probe(struct platform_device *pdev)
hws[IMX8MP_CLK_AHB] = imx8m_clk_hw_composite_bus_critical("ahb_root", imx8mp_ahb_sels, ccm_base + 0x9000);
hws[IMX8MP_CLK_AUDIO_AHB] = imx8m_clk_hw_composite_bus("audio_ahb", imx8mp_audio_ahb_sels, ccm_base + 0x9100);
hws[IMX8MP_CLK_MIPI_DSI_ESC_RX] = imx8m_clk_hw_composite_bus("mipi_dsi_esc_rx", imx8mp_mipi_dsi_esc_rx_sels, ccm_base + 0x9200);
- hws[IMX8MP_CLK_MEDIA_DISP2_PIX] = imx8m_clk_hw_composite_bus("media_disp2_pix", imx8mp_media_disp_pix_sels, ccm_base + 0x9300);
+ hws[IMX8MP_CLK_MEDIA_DISP2_PIX] = imx8m_clk_hw_composite_bus_flags("media_disp2_pix", imx8mp_media_disp_pix_sels, ccm_base + 0x9300, CLK_SET_RATE_PARENT);
hws[IMX8MP_CLK_IPG_ROOT] = imx_clk_hw_divider2("ipg_root", "ahb_root", ccm_base + 0x9080, 0, 1);
- hws[IMX8MP_CLK_DRAM_ALT] = imx8m_clk_hw_composite("dram_alt", imx8mp_dram_alt_sels, ccm_base + 0xa000);
- hws[IMX8MP_CLK_DRAM_APB] = imx8m_clk_hw_composite_critical("dram_apb", imx8mp_dram_apb_sels, ccm_base + 0xa080);
+ hws[IMX8MP_CLK_DRAM_ALT] = imx8m_clk_hw_fw_managed_composite("dram_alt", imx8mp_dram_alt_sels, ccm_base + 0xa000);
+ hws[IMX8MP_CLK_DRAM_APB] = imx8m_clk_hw_fw_managed_composite_critical("dram_apb", imx8mp_dram_apb_sels, ccm_base + 0xa080);
hws[IMX8MP_CLK_VPU_G1] = imx8m_clk_hw_composite("vpu_g1", imx8mp_vpu_g1_sels, ccm_base + 0xa100);
hws[IMX8MP_CLK_VPU_G2] = imx8m_clk_hw_composite("vpu_g2", imx8mp_vpu_g2_sels, ccm_base + 0xa180);
hws[IMX8MP_CLK_CAN1] = imx8m_clk_hw_composite("can1", imx8mp_can1_sels, ccm_base + 0xa200);
@@ -609,7 +609,7 @@ static int imx8mp_clocks_probe(struct platform_device *pdev)
hws[IMX8MP_CLK_USDHC3] = imx8m_clk_hw_composite("usdhc3", imx8mp_usdhc3_sels, ccm_base + 0xbc80);
hws[IMX8MP_CLK_MEDIA_CAM1_PIX] = imx8m_clk_hw_composite("media_cam1_pix", imx8mp_media_cam1_pix_sels, ccm_base + 0xbd00);
hws[IMX8MP_CLK_MEDIA_MIPI_PHY1_REF] = imx8m_clk_hw_composite("media_mipi_phy1_ref", imx8mp_media_mipi_phy1_ref_sels, ccm_base + 0xbd80);
- hws[IMX8MP_CLK_MEDIA_DISP1_PIX] = imx8m_clk_hw_composite("media_disp1_pix", imx8mp_media_disp_pix_sels, ccm_base + 0xbe00);
+ hws[IMX8MP_CLK_MEDIA_DISP1_PIX] = imx8m_clk_hw_composite_bus_flags("media_disp1_pix", imx8mp_media_disp_pix_sels, ccm_base + 0xbe00, CLK_SET_RATE_PARENT);
hws[IMX8MP_CLK_MEDIA_CAM2_PIX] = imx8m_clk_hw_composite("media_cam2_pix", imx8mp_media_cam2_pix_sels, ccm_base + 0xbe80);
hws[IMX8MP_CLK_MEDIA_LDB] = imx8m_clk_hw_composite("media_ldb", imx8mp_media_ldb_sels, ccm_base + 0xbf00);
hws[IMX8MP_CLK_MEMREPAIR] = imx8m_clk_hw_composite_critical("mem_repair", imx8mp_memrepair_sels, ccm_base + 0xbf80);
diff --git a/drivers/clk/imx/clk-imx8qxp.c b/drivers/clk/imx/clk-imx8qxp.c
index 7d8883916cac..3ae162625bb1 100644
--- a/drivers/clk/imx/clk-imx8qxp.c
+++ b/drivers/clk/imx/clk-imx8qxp.c
@@ -71,7 +71,7 @@ static const char *const lvds0_sels[] = {
"clk_dummy",
"clk_dummy",
"clk_dummy",
- "mipi0_lvds_bypass_clk",
+ "lvds0_bypass_clk",
};
static const char *const lvds1_sels[] = {
@@ -79,7 +79,7 @@ static const char *const lvds1_sels[] = {
"clk_dummy",
"clk_dummy",
"clk_dummy",
- "mipi1_lvds_bypass_clk",
+ "lvds1_bypass_clk",
};
static const char * const mipi_sels[] = {
@@ -90,6 +90,22 @@ static const char * const mipi_sels[] = {
"clk_dummy",
};
+static const char * const mipi0_phy_sels[] = {
+ "clk_dummy",
+ "clk_dummy",
+ "mipi_pll_div2_clk",
+ "clk_dummy",
+ "mipi0_bypass_clk",
+};
+
+static const char * const mipi1_phy_sels[] = {
+ "clk_dummy",
+ "clk_dummy",
+ "mipi_pll_div2_clk",
+ "clk_dummy",
+ "mipi1_bypass_clk",
+};
+
static const char * const lcd_sels[] = {
"clk_dummy",
"clk_dummy",
@@ -170,8 +186,8 @@ static int imx8qxp_clk_probe(struct platform_device *pdev)
imx_clk_scu("pwm_clk", IMX_SC_R_LCD_0_PWM_0, IMX_SC_PM_CLK_PER);
imx_clk_scu("elcdif_pll", IMX_SC_R_ELCDIF_PLL, IMX_SC_PM_CLK_PLL);
imx_clk_scu2("lcd_clk", lcd_sels, ARRAY_SIZE(lcd_sels), IMX_SC_R_LCD_0, IMX_SC_PM_CLK_PER);
- imx_clk_scu2("lcd_pxl_clk", lcd_pxl_sels, ARRAY_SIZE(lcd_pxl_sels), IMX_SC_R_LCD_0, IMX_SC_PM_CLK_MISC0);
imx_clk_scu("lcd_pxl_bypass_div_clk", IMX_SC_R_LCD_0, IMX_SC_PM_CLK_BYPASS);
+ imx_clk_scu2("lcd_pxl_clk", lcd_pxl_sels, ARRAY_SIZE(lcd_pxl_sels), IMX_SC_R_LCD_0, IMX_SC_PM_CLK_MISC0);
/* Audio SS */
imx_clk_scu("audio_pll0_clk", IMX_SC_R_AUDIO_PLL_0, IMX_SC_PM_CLK_PLL);
@@ -206,42 +222,41 @@ static int imx8qxp_clk_probe(struct platform_device *pdev)
imx_clk_scu("usb3_lpm_div", IMX_SC_R_USB_2, IMX_SC_PM_CLK_MISC);
/* Display controller SS */
- imx_clk_scu2("dc0_disp0_clk", dc0_sels, ARRAY_SIZE(dc0_sels), IMX_SC_R_DC_0, IMX_SC_PM_CLK_MISC0);
- imx_clk_scu2("dc0_disp1_clk", dc0_sels, ARRAY_SIZE(dc0_sels), IMX_SC_R_DC_0, IMX_SC_PM_CLK_MISC1);
imx_clk_scu("dc0_pll0_clk", IMX_SC_R_DC_0_PLL_0, IMX_SC_PM_CLK_PLL);
imx_clk_scu("dc0_pll1_clk", IMX_SC_R_DC_0_PLL_1, IMX_SC_PM_CLK_PLL);
imx_clk_scu("dc0_bypass0_clk", IMX_SC_R_DC_0_VIDEO0, IMX_SC_PM_CLK_BYPASS);
+ imx_clk_scu2("dc0_disp0_clk", dc0_sels, ARRAY_SIZE(dc0_sels), IMX_SC_R_DC_0, IMX_SC_PM_CLK_MISC0);
+ imx_clk_scu2("dc0_disp1_clk", dc0_sels, ARRAY_SIZE(dc0_sels), IMX_SC_R_DC_0, IMX_SC_PM_CLK_MISC1);
imx_clk_scu("dc0_bypass1_clk", IMX_SC_R_DC_0_VIDEO1, IMX_SC_PM_CLK_BYPASS);
- imx_clk_scu2("dc1_disp0_clk", dc1_sels, ARRAY_SIZE(dc1_sels), IMX_SC_R_DC_1, IMX_SC_PM_CLK_MISC0);
- imx_clk_scu2("dc1_disp1_clk", dc1_sels, ARRAY_SIZE(dc1_sels), IMX_SC_R_DC_1, IMX_SC_PM_CLK_MISC1);
imx_clk_scu("dc1_pll0_clk", IMX_SC_R_DC_1_PLL_0, IMX_SC_PM_CLK_PLL);
imx_clk_scu("dc1_pll1_clk", IMX_SC_R_DC_1_PLL_1, IMX_SC_PM_CLK_PLL);
imx_clk_scu("dc1_bypass0_clk", IMX_SC_R_DC_1_VIDEO0, IMX_SC_PM_CLK_BYPASS);
+ imx_clk_scu2("dc1_disp0_clk", dc1_sels, ARRAY_SIZE(dc1_sels), IMX_SC_R_DC_1, IMX_SC_PM_CLK_MISC0);
+ imx_clk_scu2("dc1_disp1_clk", dc1_sels, ARRAY_SIZE(dc1_sels), IMX_SC_R_DC_1, IMX_SC_PM_CLK_MISC1);
imx_clk_scu("dc1_bypass1_clk", IMX_SC_R_DC_1_VIDEO1, IMX_SC_PM_CLK_BYPASS);
/* MIPI-LVDS SS */
imx_clk_scu("mipi0_bypass_clk", IMX_SC_R_MIPI_0, IMX_SC_PM_CLK_BYPASS);
- imx_clk_scu("mipi0_pixel_clk", IMX_SC_R_MIPI_0, IMX_SC_PM_CLK_PER);
- imx_clk_scu("mipi0_lvds_bypass_clk", IMX_SC_R_LVDS_0, IMX_SC_PM_CLK_BYPASS);
- imx_clk_scu2("mipi0_lvds_pixel_clk", lvds0_sels, ARRAY_SIZE(lvds0_sels), IMX_SC_R_LVDS_0, IMX_SC_PM_CLK_MISC2);
- imx_clk_scu2("mipi0_lvds_phy_clk", lvds0_sels, ARRAY_SIZE(lvds0_sels), IMX_SC_R_LVDS_0, IMX_SC_PM_CLK_MISC3);
+ imx_clk_scu2("mipi0_pixel_clk", mipi0_phy_sels, ARRAY_SIZE(mipi0_phy_sels), IMX_SC_R_MIPI_0, IMX_SC_PM_CLK_PER);
+ imx_clk_scu("lvds0_bypass_clk", IMX_SC_R_LVDS_0, IMX_SC_PM_CLK_BYPASS);
+ imx_clk_scu2("lvds0_pixel_clk", lvds0_sels, ARRAY_SIZE(lvds0_sels), IMX_SC_R_LVDS_0, IMX_SC_PM_CLK_MISC2);
+ imx_clk_scu2("lvds0_phy_clk", lvds0_sels, ARRAY_SIZE(lvds0_sels), IMX_SC_R_LVDS_0, IMX_SC_PM_CLK_MISC3);
imx_clk_scu2("mipi0_dsi_tx_esc_clk", mipi_sels, ARRAY_SIZE(mipi_sels), IMX_SC_R_MIPI_0, IMX_SC_PM_CLK_MST_BUS);
imx_clk_scu2("mipi0_dsi_rx_esc_clk", mipi_sels, ARRAY_SIZE(mipi_sels), IMX_SC_R_MIPI_0, IMX_SC_PM_CLK_SLV_BUS);
- imx_clk_scu2("mipi0_dsi_phy_clk", mipi_sels, ARRAY_SIZE(mipi_sels), IMX_SC_R_MIPI_0, IMX_SC_PM_CLK_PHY);
+ imx_clk_scu2("mipi0_dsi_phy_clk", mipi0_phy_sels, ARRAY_SIZE(mipi0_phy_sels), IMX_SC_R_MIPI_0, IMX_SC_PM_CLK_PHY);
imx_clk_scu("mipi0_i2c0_clk", IMX_SC_R_MIPI_0_I2C_0, IMX_SC_PM_CLK_MISC2);
imx_clk_scu("mipi0_i2c1_clk", IMX_SC_R_MIPI_0_I2C_1, IMX_SC_PM_CLK_MISC2);
imx_clk_scu("mipi0_pwm0_clk", IMX_SC_R_MIPI_0_PWM_0, IMX_SC_PM_CLK_PER);
imx_clk_scu("mipi1_bypass_clk", IMX_SC_R_MIPI_1, IMX_SC_PM_CLK_BYPASS);
- imx_clk_scu("mipi1_pixel_clk", IMX_SC_R_MIPI_1, IMX_SC_PM_CLK_PER);
- imx_clk_scu("mipi1_lvds_bypass_clk", IMX_SC_R_LVDS_1, IMX_SC_PM_CLK_BYPASS);
- imx_clk_scu2("mipi1_lvds_pixel_clk", lvds1_sels, ARRAY_SIZE(lvds1_sels), IMX_SC_R_LVDS_1, IMX_SC_PM_CLK_MISC2);
- imx_clk_scu2("mipi1_lvds_phy_clk", lvds1_sels, ARRAY_SIZE(lvds1_sels), IMX_SC_R_LVDS_1, IMX_SC_PM_CLK_MISC3);
-
+ imx_clk_scu2("mipi1_pixel_clk", mipi1_phy_sels, ARRAY_SIZE(mipi1_phy_sels), IMX_SC_R_MIPI_1, IMX_SC_PM_CLK_PER);
+ imx_clk_scu("lvds1_bypass_clk", IMX_SC_R_LVDS_1, IMX_SC_PM_CLK_BYPASS);
+ imx_clk_scu2("lvds1_pixel_clk", lvds1_sels, ARRAY_SIZE(lvds1_sels), IMX_SC_R_LVDS_1, IMX_SC_PM_CLK_MISC2);
+ imx_clk_scu2("lvds1_phy_clk", lvds1_sels, ARRAY_SIZE(lvds1_sels), IMX_SC_R_LVDS_1, IMX_SC_PM_CLK_MISC3);
imx_clk_scu2("mipi1_dsi_tx_esc_clk", mipi_sels, ARRAY_SIZE(mipi_sels), IMX_SC_R_MIPI_1, IMX_SC_PM_CLK_MST_BUS);
imx_clk_scu2("mipi1_dsi_rx_esc_clk", mipi_sels, ARRAY_SIZE(mipi_sels), IMX_SC_R_MIPI_1, IMX_SC_PM_CLK_SLV_BUS);
- imx_clk_scu2("mipi1_dsi_phy_clk", mipi_sels, ARRAY_SIZE(mipi_sels), IMX_SC_R_MIPI_1, IMX_SC_PM_CLK_PHY);
+ imx_clk_scu2("mipi1_dsi_phy_clk", mipi1_phy_sels, ARRAY_SIZE(mipi1_phy_sels), IMX_SC_R_MIPI_1, IMX_SC_PM_CLK_PHY);
imx_clk_scu("mipi1_i2c0_clk", IMX_SC_R_MIPI_1_I2C_0, IMX_SC_PM_CLK_MISC2);
imx_clk_scu("mipi1_i2c1_clk", IMX_SC_R_MIPI_1_I2C_1, IMX_SC_PM_CLK_MISC2);
imx_clk_scu("mipi1_pwm0_clk", IMX_SC_R_MIPI_1_PWM_0, IMX_SC_PM_CLK_PER);
diff --git a/drivers/clk/imx/clk-imx95-blk-ctl.c b/drivers/clk/imx/clk-imx95-blk-ctl.c
index 74f595f9e5e3..19a62da74be4 100644
--- a/drivers/clk/imx/clk-imx95-blk-ctl.c
+++ b/drivers/clk/imx/clk-imx95-blk-ctl.c
@@ -248,6 +248,35 @@ static const struct imx95_blk_ctl_dev_data dispmix_csr_dev_data = {
.clk_reg_offset = 0,
};
+static const struct imx95_blk_ctl_clk_dev_data netxmix_clk_dev_data[] = {
+ [IMX95_CLK_NETCMIX_ENETC0_RMII] = {
+ .name = "enetc0_rmii_sel",
+ .parent_names = (const char *[]){"ext_enetref", "enetref"},
+ .num_parents = 2,
+ .reg = 4,
+ .bit_idx = 5,
+ .bit_width = 1,
+ .type = CLK_MUX,
+ .flags = CLK_SET_RATE_NO_REPARENT | CLK_SET_RATE_PARENT,
+ },
+ [IMX95_CLK_NETCMIX_ENETC1_RMII] = {
+ .name = "enetc1_rmii_sel",
+ .parent_names = (const char *[]){"ext_enetref", "enetref"},
+ .num_parents = 2,
+ .reg = 4,
+ .bit_idx = 10,
+ .bit_width = 1,
+ .type = CLK_MUX,
+ .flags = CLK_SET_RATE_NO_REPARENT | CLK_SET_RATE_PARENT,
+ },
+};
+
+static const struct imx95_blk_ctl_dev_data netcmix_dev_data = {
+ .num_clks = ARRAY_SIZE(netxmix_clk_dev_data),
+ .clk_dev_data = netxmix_clk_dev_data,
+ .clk_reg_offset = 0,
+};
+
static int imx95_bc_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
@@ -419,6 +448,7 @@ static const struct of_device_id imx95_bc_of_match[] = {
{ .compatible = "nxp,imx95-lvds-csr", .data = &lvds_csr_dev_data },
{ .compatible = "nxp,imx95-display-csr", .data = &dispmix_csr_dev_data },
{ .compatible = "nxp,imx95-vpu-csr", .data = &vpublk_dev_data },
+ { .compatible = "nxp,imx95-netcmix-blk-ctrl", .data = &netcmix_dev_data},
{ /* Sentinel */ },
};
MODULE_DEVICE_TABLE(of, imx95_bc_of_match);
diff --git a/drivers/clk/imx/clk-imxrt1050.c b/drivers/clk/imx/clk-imxrt1050.c
index 08d155feb035..efd1ac9d8eeb 100644
--- a/drivers/clk/imx/clk-imxrt1050.c
+++ b/drivers/clk/imx/clk-imxrt1050.c
@@ -176,6 +176,7 @@ static struct platform_driver imxrt1050_clk_driver = {
};
module_platform_driver(imxrt1050_clk_driver);
+MODULE_DESCRIPTION("NXP i.MX RT1050 clock driver");
MODULE_LICENSE("Dual BSD/GPL");
MODULE_AUTHOR("Jesse Taube <Mr.Bossman075@gmail.com>");
MODULE_AUTHOR("Giulio Benetti <giulio.benetti@benettiengineering.com>");
diff --git a/drivers/clk/imx/clk.c b/drivers/clk/imx/clk.c
index e35496af5ceb..df83bd939492 100644
--- a/drivers/clk/imx/clk.c
+++ b/drivers/clk/imx/clk.c
@@ -226,4 +226,5 @@ static int __init imx_clk_disable_uart(void)
late_initcall_sync(imx_clk_disable_uart);
#endif
+MODULE_DESCRIPTION("Common clock support for NXP i.MX SoC family");
MODULE_LICENSE("GPL v2");
diff --git a/drivers/clk/imx/clk.h b/drivers/clk/imx/clk.h
index adb7ad649a0d..aa5202f284f3 100644
--- a/drivers/clk/imx/clk.h
+++ b/drivers/clk/imx/clk.h
@@ -442,6 +442,10 @@ struct clk_hw *__imx8m_clk_hw_composite(const char *name,
_imx8m_clk_hw_composite(name, parent_names, reg, \
IMX_COMPOSITE_BUS, IMX_COMPOSITE_CLK_FLAGS_DEFAULT)
+#define imx8m_clk_hw_composite_bus_flags(name, parent_names, reg, flags) \
+ _imx8m_clk_hw_composite(name, parent_names, reg, \
+ IMX_COMPOSITE_BUS, IMX_COMPOSITE_CLK_FLAGS_DEFAULT | flags)
+
#define imx8m_clk_hw_composite_bus_critical(name, parent_names, reg) \
_imx8m_clk_hw_composite(name, parent_names, reg, \
IMX_COMPOSITE_BUS, IMX_COMPOSITE_CLK_FLAGS_CRITICAL)
diff --git a/drivers/clk/keystone/sci-clk.c b/drivers/clk/keystone/sci-clk.c
index 5cefc30a843e..c5894fc9395e 100644
--- a/drivers/clk/keystone/sci-clk.c
+++ b/drivers/clk/keystone/sci-clk.c
@@ -707,7 +707,7 @@ static void ti_sci_clk_remove(struct platform_device *pdev)
static struct platform_driver ti_sci_clk_driver = {
.probe = ti_sci_clk_probe,
- .remove_new = ti_sci_clk_remove,
+ .remove = ti_sci_clk_remove,
.driver = {
.name = "ti-sci-clk",
.of_match_table = of_match_ptr(ti_sci_clk_of_match),
diff --git a/drivers/clk/kunit_clk_fixed_rate_test.dtso b/drivers/clk/kunit_clk_fixed_rate_test.dtso
new file mode 100644
index 000000000000..d838ce766fa2
--- /dev/null
+++ b/drivers/clk/kunit_clk_fixed_rate_test.dtso
@@ -0,0 +1,19 @@
+// SPDX-License-Identifier: GPL-2.0
+/dts-v1/;
+/plugin/;
+
+#include "clk-fixed-rate_test.h"
+
+&{/} {
+ fixed_50MHz: kunit-clock {
+ compatible = "fixed-clock";
+ #clock-cells = <0>;
+ clock-frequency = <TEST_FIXED_FREQUENCY>;
+ clock-accuracy = <TEST_FIXED_ACCURACY>;
+ };
+
+ kunit-clock-consumer {
+ compatible = "test,single-clk-consumer";
+ clocks = <&fixed_50MHz>;
+ };
+};
diff --git a/drivers/clk/kunit_clk_parent_data_test.dtso b/drivers/clk/kunit_clk_parent_data_test.dtso
new file mode 100644
index 000000000000..7d3ed9a5a2e8
--- /dev/null
+++ b/drivers/clk/kunit_clk_parent_data_test.dtso
@@ -0,0 +1,28 @@
+// SPDX-License-Identifier: GPL-2.0
+/dts-v1/;
+/plugin/;
+
+#include "clk_parent_data_test.h"
+
+&{/} {
+ fixed_50: kunit-clock-50MHz {
+ compatible = "fixed-clock";
+ #clock-cells = <0>;
+ clock-frequency = <50000000>;
+ clock-output-names = CLK_PARENT_DATA_50MHZ_NAME;
+ };
+
+ fixed_parent: kunit-clock-1MHz {
+ compatible = "fixed-clock";
+ #clock-cells = <0>;
+ clock-frequency = <1000000>;
+ clock-output-names = CLK_PARENT_DATA_1MHZ_NAME;
+ };
+
+ kunit-clock-controller {
+ compatible = "test,clk-parent-data";
+ clocks = <&fixed_parent>, <&fixed_50>;
+ clock-names = CLK_PARENT_DATA_PARENT1, CLK_PARENT_DATA_PARENT2;
+ #clock-cells = <1>;
+ };
+};
diff --git a/drivers/clk/mediatek/clk-mt2701-aud.c b/drivers/clk/mediatek/clk-mt2701-aud.c
index 15859132c769..425c69cfb105 100644
--- a/drivers/clk/mediatek/clk-mt2701-aud.c
+++ b/drivers/clk/mediatek/clk-mt2701-aud.c
@@ -158,7 +158,7 @@ static void clk_mt2701_aud_remove(struct platform_device *pdev)
static struct platform_driver clk_mt2701_aud_drv = {
.probe = clk_mt2701_aud_probe,
- .remove_new = clk_mt2701_aud_remove,
+ .remove = clk_mt2701_aud_remove,
.driver = {
.name = "clk-mt2701-aud",
.of_match_table = of_match_clk_mt2701_aud,
diff --git a/drivers/clk/mediatek/clk-mt2701-bdp.c b/drivers/clk/mediatek/clk-mt2701-bdp.c
index e203dca70786..5da3eabffd3e 100644
--- a/drivers/clk/mediatek/clk-mt2701-bdp.c
+++ b/drivers/clk/mediatek/clk-mt2701-bdp.c
@@ -99,7 +99,7 @@ MODULE_DEVICE_TABLE(of, of_match_clk_mt2701_bdp);
static struct platform_driver clk_mt2701_bdp_drv = {
.probe = mtk_clk_simple_probe,
- .remove_new = mtk_clk_simple_remove,
+ .remove = mtk_clk_simple_remove,
.driver = {
.name = "clk-mt2701-bdp",
.of_match_table = of_match_clk_mt2701_bdp,
diff --git a/drivers/clk/mediatek/clk-mt2701-eth.c b/drivers/clk/mediatek/clk-mt2701-eth.c
index f6e1fdc9ee0a..608252e73f24 100644
--- a/drivers/clk/mediatek/clk-mt2701-eth.c
+++ b/drivers/clk/mediatek/clk-mt2701-eth.c
@@ -53,7 +53,7 @@ MODULE_DEVICE_TABLE(of, of_match_clk_mt2701_eth);
static struct platform_driver clk_mt2701_eth_drv = {
.probe = mtk_clk_simple_probe,
- .remove_new = mtk_clk_simple_remove,
+ .remove = mtk_clk_simple_remove,
.driver = {
.name = "clk-mt2701-eth",
.of_match_table = of_match_clk_mt2701_eth,
diff --git a/drivers/clk/mediatek/clk-mt2701-g3d.c b/drivers/clk/mediatek/clk-mt2701-g3d.c
index 5e04975433ea..b3e18b6db75d 100644
--- a/drivers/clk/mediatek/clk-mt2701-g3d.c
+++ b/drivers/clk/mediatek/clk-mt2701-g3d.c
@@ -50,7 +50,7 @@ MODULE_DEVICE_TABLE(of, of_match_clk_mt2701_g3d);
static struct platform_driver clk_mt2701_g3d_drv = {
.probe = mtk_clk_simple_probe,
- .remove_new = mtk_clk_simple_remove,
+ .remove = mtk_clk_simple_remove,
.driver = {
.name = "clk-mt2701-g3d",
.of_match_table = of_match_clk_mt2701_g3d,
diff --git a/drivers/clk/mediatek/clk-mt2701-hif.c b/drivers/clk/mediatek/clk-mt2701-hif.c
index c7b38d066403..000e00576052 100644
--- a/drivers/clk/mediatek/clk-mt2701-hif.c
+++ b/drivers/clk/mediatek/clk-mt2701-hif.c
@@ -50,7 +50,7 @@ MODULE_DEVICE_TABLE(of, of_match_clk_mt2701_hif);
static struct platform_driver clk_mt2701_hif_drv = {
.probe = mtk_clk_simple_probe,
- .remove_new = mtk_clk_simple_remove,
+ .remove = mtk_clk_simple_remove,
.driver = {
.name = "clk-mt2701-hif",
.of_match_table = of_match_clk_mt2701_hif,
diff --git a/drivers/clk/mediatek/clk-mt2701-img.c b/drivers/clk/mediatek/clk-mt2701-img.c
index ce13b79a7994..875594bc9dcb 100644
--- a/drivers/clk/mediatek/clk-mt2701-img.c
+++ b/drivers/clk/mediatek/clk-mt2701-img.c
@@ -47,7 +47,7 @@ MODULE_DEVICE_TABLE(of, of_match_clk_mt2701_img);
static struct platform_driver clk_mt2701_img_drv = {
.probe = mtk_clk_simple_probe,
- .remove_new = mtk_clk_simple_remove,
+ .remove = mtk_clk_simple_remove,
.driver = {
.name = "clk-mt2701-img",
.of_match_table = of_match_clk_mt2701_img,
diff --git a/drivers/clk/mediatek/clk-mt2701-mm.c b/drivers/clk/mediatek/clk-mt2701-mm.c
index 903592be56b5..bc68fa718878 100644
--- a/drivers/clk/mediatek/clk-mt2701-mm.c
+++ b/drivers/clk/mediatek/clk-mt2701-mm.c
@@ -80,7 +80,7 @@ MODULE_DEVICE_TABLE(platform, clk_mt2701_mm_id_table);
static struct platform_driver clk_mt2701_mm_drv = {
.probe = mtk_clk_pdev_probe,
- .remove_new = mtk_clk_pdev_remove,
+ .remove = mtk_clk_pdev_remove,
.driver = {
.name = "clk-mt2701-mm",
},
diff --git a/drivers/clk/mediatek/clk-mt2701-vdec.c b/drivers/clk/mediatek/clk-mt2701-vdec.c
index 591091fb2151..94db86f8d0a4 100644
--- a/drivers/clk/mediatek/clk-mt2701-vdec.c
+++ b/drivers/clk/mediatek/clk-mt2701-vdec.c
@@ -52,7 +52,7 @@ MODULE_DEVICE_TABLE(of, of_match_clk_mt2701_vdec);
static struct platform_driver clk_mt2701_vdec_drv = {
.probe = mtk_clk_simple_probe,
- .remove_new = mtk_clk_simple_remove,
+ .remove = mtk_clk_simple_remove,
.driver = {
.name = "clk-mt2701-vdec",
.of_match_table = of_match_clk_mt2701_vdec,
diff --git a/drivers/clk/mediatek/clk-mt2712-apmixedsys.c b/drivers/clk/mediatek/clk-mt2712-apmixedsys.c
index 66987d205eee..a60622d251ff 100644
--- a/drivers/clk/mediatek/clk-mt2712-apmixedsys.c
+++ b/drivers/clk/mediatek/clk-mt2712-apmixedsys.c
@@ -156,7 +156,7 @@ MODULE_DEVICE_TABLE(of, of_match_clk_mt2712_apmixed);
static struct platform_driver clk_mt2712_apmixed_drv = {
.probe = clk_mt2712_apmixed_probe,
- .remove_new = clk_mt2712_apmixed_remove,
+ .remove = clk_mt2712_apmixed_remove,
.driver = {
.name = "clk-mt2712-apmixed",
.of_match_table = of_match_clk_mt2712_apmixed,
diff --git a/drivers/clk/mediatek/clk-mt2712-bdp.c b/drivers/clk/mediatek/clk-mt2712-bdp.c
index 93c5453e4392..c838311a0c51 100644
--- a/drivers/clk/mediatek/clk-mt2712-bdp.c
+++ b/drivers/clk/mediatek/clk-mt2712-bdp.c
@@ -69,7 +69,7 @@ MODULE_DEVICE_TABLE(of, of_match_clk_mt2712_bdp);
static struct platform_driver clk_mt2712_bdp_drv = {
.probe = mtk_clk_simple_probe,
- .remove_new = mtk_clk_simple_remove,
+ .remove = mtk_clk_simple_remove,
.driver = {
.name = "clk-mt2712-bdp",
.of_match_table = of_match_clk_mt2712_bdp,
diff --git a/drivers/clk/mediatek/clk-mt2712-img.c b/drivers/clk/mediatek/clk-mt2712-img.c
index 84abd0515fd2..bedebf86b0b5 100644
--- a/drivers/clk/mediatek/clk-mt2712-img.c
+++ b/drivers/clk/mediatek/clk-mt2712-img.c
@@ -47,7 +47,7 @@ MODULE_DEVICE_TABLE(of, of_match_clk_mt2712_img);
static struct platform_driver clk_mt2712_img_drv = {
.probe = mtk_clk_simple_probe,
- .remove_new = mtk_clk_simple_remove,
+ .remove = mtk_clk_simple_remove,
.driver = {
.name = "clk-mt2712-img",
.of_match_table = of_match_clk_mt2712_img,
diff --git a/drivers/clk/mediatek/clk-mt2712-jpgdec.c b/drivers/clk/mediatek/clk-mt2712-jpgdec.c
index 89be9082adba..1a73474b2f99 100644
--- a/drivers/clk/mediatek/clk-mt2712-jpgdec.c
+++ b/drivers/clk/mediatek/clk-mt2712-jpgdec.c
@@ -43,7 +43,7 @@ MODULE_DEVICE_TABLE(of, of_match_clk_mt2712_jpgdec);
static struct platform_driver clk_mt2712_jpgdec_drv = {
.probe = mtk_clk_simple_probe,
- .remove_new = mtk_clk_simple_remove,
+ .remove = mtk_clk_simple_remove,
.driver = {
.name = "clk-mt2712-jpgdec",
.of_match_table = of_match_clk_mt2712_jpgdec,
diff --git a/drivers/clk/mediatek/clk-mt2712-mfg.c b/drivers/clk/mediatek/clk-mt2712-mfg.c
index f7e0d0ebf665..c1bb45c7469e 100644
--- a/drivers/clk/mediatek/clk-mt2712-mfg.c
+++ b/drivers/clk/mediatek/clk-mt2712-mfg.c
@@ -42,7 +42,7 @@ MODULE_DEVICE_TABLE(of, of_match_clk_mt2712_mfg);
static struct platform_driver clk_mt2712_mfg_drv = {
.probe = mtk_clk_simple_probe,
- .remove_new = mtk_clk_simple_remove,
+ .remove = mtk_clk_simple_remove,
.driver = {
.name = "clk-mt2712-mfg",
.of_match_table = of_match_clk_mt2712_mfg,
diff --git a/drivers/clk/mediatek/clk-mt2712-mm.c b/drivers/clk/mediatek/clk-mt2712-mm.c
index 248529d3134d..32ecb949f7eb 100644
--- a/drivers/clk/mediatek/clk-mt2712-mm.c
+++ b/drivers/clk/mediatek/clk-mt2712-mm.c
@@ -121,7 +121,7 @@ MODULE_DEVICE_TABLE(platform, clk_mt2712_mm_id_table);
static struct platform_driver clk_mt2712_mm_drv = {
.probe = mtk_clk_pdev_probe,
- .remove_new = mtk_clk_pdev_remove,
+ .remove = mtk_clk_pdev_remove,
.driver = {
.name = "clk-mt2712-mm",
},
diff --git a/drivers/clk/mediatek/clk-mt2712-vdec.c b/drivers/clk/mediatek/clk-mt2712-vdec.c
index a063f1f0aa52..a766342fbafa 100644
--- a/drivers/clk/mediatek/clk-mt2712-vdec.c
+++ b/drivers/clk/mediatek/clk-mt2712-vdec.c
@@ -55,7 +55,7 @@ MODULE_DEVICE_TABLE(of, of_match_clk_mt2712_vdec);
static struct platform_driver clk_mt2712_vdec_drv = {
.probe = mtk_clk_simple_probe,
- .remove_new = mtk_clk_simple_remove,
+ .remove = mtk_clk_simple_remove,
.driver = {
.name = "clk-mt2712-vdec",
.of_match_table = of_match_clk_mt2712_vdec,
diff --git a/drivers/clk/mediatek/clk-mt2712-venc.c b/drivers/clk/mediatek/clk-mt2712-venc.c
index 5b15df0a26f5..fc193dc8e8f6 100644
--- a/drivers/clk/mediatek/clk-mt2712-venc.c
+++ b/drivers/clk/mediatek/clk-mt2712-venc.c
@@ -44,7 +44,7 @@ MODULE_DEVICE_TABLE(of, of_match_clk_mt2712_venc);
static struct platform_driver clk_mt2712_venc_drv = {
.probe = mtk_clk_simple_probe,
- .remove_new = mtk_clk_simple_remove,
+ .remove = mtk_clk_simple_remove,
.driver = {
.name = "clk-mt2712-venc",
.of_match_table = of_match_clk_mt2712_venc,
diff --git a/drivers/clk/mediatek/clk-mt2712.c b/drivers/clk/mediatek/clk-mt2712.c
index 91af45160aa4..964c92130e3c 100644
--- a/drivers/clk/mediatek/clk-mt2712.c
+++ b/drivers/clk/mediatek/clk-mt2712.c
@@ -993,7 +993,7 @@ MODULE_DEVICE_TABLE(of, of_match_clk_mt2712);
static struct platform_driver clk_mt2712_drv = {
.probe = mtk_clk_simple_probe,
- .remove_new = mtk_clk_simple_remove,
+ .remove = mtk_clk_simple_remove,
.driver = {
.name = "clk-mt2712",
.of_match_table = of_match_clk_mt2712,
diff --git a/drivers/clk/mediatek/clk-mt6765-audio.c b/drivers/clk/mediatek/clk-mt6765-audio.c
index 3e481c697eff..2be1458087e6 100644
--- a/drivers/clk/mediatek/clk-mt6765-audio.c
+++ b/drivers/clk/mediatek/clk-mt6765-audio.c
@@ -69,7 +69,7 @@ MODULE_DEVICE_TABLE(of, of_match_clk_mt6765_audio);
static struct platform_driver clk_mt6765_audio_drv = {
.probe = mtk_clk_simple_probe,
- .remove_new = mtk_clk_simple_remove,
+ .remove = mtk_clk_simple_remove,
.driver = {
.name = "clk-mt6765-audio",
.of_match_table = of_match_clk_mt6765_audio,
diff --git a/drivers/clk/mediatek/clk-mt6765-cam.c b/drivers/clk/mediatek/clk-mt6765-cam.c
index fed9c789d9fa..2a7f30dc85bb 100644
--- a/drivers/clk/mediatek/clk-mt6765-cam.c
+++ b/drivers/clk/mediatek/clk-mt6765-cam.c
@@ -50,7 +50,7 @@ MODULE_DEVICE_TABLE(of, of_match_clk_mt6765_cam);
static struct platform_driver clk_mt6765_cam_drv = {
.probe = mtk_clk_simple_probe,
- .remove_new = mtk_clk_simple_remove,
+ .remove = mtk_clk_simple_remove,
.driver = {
.name = "clk-mt6765-cam",
.of_match_table = of_match_clk_mt6765_cam,
diff --git a/drivers/clk/mediatek/clk-mt6765-img.c b/drivers/clk/mediatek/clk-mt6765-img.c
index 34bb89ffd2dd..ff857852cfb0 100644
--- a/drivers/clk/mediatek/clk-mt6765-img.c
+++ b/drivers/clk/mediatek/clk-mt6765-img.c
@@ -46,7 +46,7 @@ MODULE_DEVICE_TABLE(of, of_match_clk_mt6765_img);
static struct platform_driver clk_mt6765_img_drv = {
.probe = mtk_clk_simple_probe,
- .remove_new = mtk_clk_simple_remove,
+ .remove = mtk_clk_simple_remove,
.driver = {
.name = "clk-mt6765-img",
.of_match_table = of_match_clk_mt6765_img,
diff --git a/drivers/clk/mediatek/clk-mt6765-mipi0a.c b/drivers/clk/mediatek/clk-mt6765-mipi0a.c
index 957eb494fee5..8261dfd12a9a 100644
--- a/drivers/clk/mediatek/clk-mt6765-mipi0a.c
+++ b/drivers/clk/mediatek/clk-mt6765-mipi0a.c
@@ -43,7 +43,7 @@ MODULE_DEVICE_TABLE(of, of_match_clk_mt6765_mipi0a);
static struct platform_driver clk_mt6765_mipi0a_drv = {
.probe = mtk_clk_simple_probe,
- .remove_new = mtk_clk_simple_remove,
+ .remove = mtk_clk_simple_remove,
.driver = {
.name = "clk-mt6765-mipi0a",
.of_match_table = of_match_clk_mt6765_mipi0a,
diff --git a/drivers/clk/mediatek/clk-mt6765-mm.c b/drivers/clk/mediatek/clk-mt6765-mm.c
index 099540fcfc76..e525919f9e81 100644
--- a/drivers/clk/mediatek/clk-mt6765-mm.c
+++ b/drivers/clk/mediatek/clk-mt6765-mm.c
@@ -72,7 +72,7 @@ MODULE_DEVICE_TABLE(of, of_match_clk_mt6765_mm);
static struct platform_driver clk_mt6765_mm_drv = {
.probe = mtk_clk_simple_probe,
- .remove_new = mtk_clk_simple_remove,
+ .remove = mtk_clk_simple_remove,
.driver = {
.name = "clk-mt6765-mm",
.of_match_table = of_match_clk_mt6765_mm,
diff --git a/drivers/clk/mediatek/clk-mt6765-vcodec.c b/drivers/clk/mediatek/clk-mt6765-vcodec.c
index 64f3451d0aee..f309d1090cda 100644
--- a/drivers/clk/mediatek/clk-mt6765-vcodec.c
+++ b/drivers/clk/mediatek/clk-mt6765-vcodec.c
@@ -45,7 +45,7 @@ MODULE_DEVICE_TABLE(of, of_match_clk_mt6765_vcodec);
static struct platform_driver clk_mt6765_vcodec_drv = {
.probe = mtk_clk_simple_probe,
- .remove_new = mtk_clk_simple_remove,
+ .remove = mtk_clk_simple_remove,
.driver = {
.name = "clk-mt6765-vcodec",
.of_match_table = of_match_clk_mt6765_vcodec,
diff --git a/drivers/clk/mediatek/clk-mt6779-aud.c b/drivers/clk/mediatek/clk-mt6779-aud.c
index 3d23b8e29af6..8ed318bd7765 100644
--- a/drivers/clk/mediatek/clk-mt6779-aud.c
+++ b/drivers/clk/mediatek/clk-mt6779-aud.c
@@ -104,7 +104,7 @@ MODULE_DEVICE_TABLE(of, of_match_clk_mt6779_aud);
static struct platform_driver clk_mt6779_aud_drv = {
.probe = mtk_clk_simple_probe,
- .remove_new = mtk_clk_simple_remove,
+ .remove = mtk_clk_simple_remove,
.driver = {
.name = "clk-mt6779-aud",
.of_match_table = of_match_clk_mt6779_aud,
diff --git a/drivers/clk/mediatek/clk-mt6779-cam.c b/drivers/clk/mediatek/clk-mt6779-cam.c
index e76b2c4f548e..f397b55606de 100644
--- a/drivers/clk/mediatek/clk-mt6779-cam.c
+++ b/drivers/clk/mediatek/clk-mt6779-cam.c
@@ -55,7 +55,7 @@ MODULE_DEVICE_TABLE(of, of_match_clk_mt6779_cam);
static struct platform_driver clk_mt6779_cam_drv = {
.probe = mtk_clk_simple_probe,
- .remove_new = mtk_clk_simple_remove,
+ .remove = mtk_clk_simple_remove,
.driver = {
.name = "clk-mt6779-cam",
.of_match_table = of_match_clk_mt6779_cam,
diff --git a/drivers/clk/mediatek/clk-mt6779-img.c b/drivers/clk/mediatek/clk-mt6779-img.c
index 0c5971f3966a..474a59a4ca9e 100644
--- a/drivers/clk/mediatek/clk-mt6779-img.c
+++ b/drivers/clk/mediatek/clk-mt6779-img.c
@@ -47,7 +47,7 @@ MODULE_DEVICE_TABLE(of, of_match_clk_mt6779_img);
static struct platform_driver clk_mt6779_img_drv = {
.probe = mtk_clk_simple_probe,
- .remove_new = mtk_clk_simple_remove,
+ .remove = mtk_clk_simple_remove,
.driver = {
.name = "clk-mt6779-img",
.of_match_table = of_match_clk_mt6779_img,
diff --git a/drivers/clk/mediatek/clk-mt6779-ipe.c b/drivers/clk/mediatek/clk-mt6779-ipe.c
index 9c1a9f1b0f3e..c2314654f43a 100644
--- a/drivers/clk/mediatek/clk-mt6779-ipe.c
+++ b/drivers/clk/mediatek/clk-mt6779-ipe.c
@@ -49,7 +49,7 @@ MODULE_DEVICE_TABLE(of, of_match_clk_mt6779_ipe);
static struct platform_driver clk_mt6779_ipe_drv = {
.probe = mtk_clk_simple_probe,
- .remove_new = mtk_clk_simple_remove,
+ .remove = mtk_clk_simple_remove,
.driver = {
.name = "clk-mt6779-ipe",
.of_match_table = of_match_clk_mt6779_ipe,
diff --git a/drivers/clk/mediatek/clk-mt6779-mfg.c b/drivers/clk/mediatek/clk-mt6779-mfg.c
index 3cc82b59117f..21793cb6e6e3 100644
--- a/drivers/clk/mediatek/clk-mt6779-mfg.c
+++ b/drivers/clk/mediatek/clk-mt6779-mfg.c
@@ -44,7 +44,7 @@ MODULE_DEVICE_TABLE(of, of_match_clk_mt6779_mfg);
static struct platform_driver clk_mt6779_mfg_drv = {
.probe = mtk_clk_simple_probe,
- .remove_new = mtk_clk_simple_remove,
+ .remove = mtk_clk_simple_remove,
.driver = {
.name = "clk-mt6779-mfg",
.of_match_table = of_match_clk_mt6779_mfg,
diff --git a/drivers/clk/mediatek/clk-mt6779-mm.c b/drivers/clk/mediatek/clk-mt6779-mm.c
index 97d437a6f98f..30bbab308388 100644
--- a/drivers/clk/mediatek/clk-mt6779-mm.c
+++ b/drivers/clk/mediatek/clk-mt6779-mm.c
@@ -98,7 +98,7 @@ MODULE_DEVICE_TABLE(platform, clk_mt6779_mm_id_table);
static struct platform_driver clk_mt6779_mm_drv = {
.probe = mtk_clk_pdev_probe,
- .remove_new = mtk_clk_pdev_remove,
+ .remove = mtk_clk_pdev_remove,
.driver = {
.name = "clk-mt6779-mm",
},
diff --git a/drivers/clk/mediatek/clk-mt6779-vdec.c b/drivers/clk/mediatek/clk-mt6779-vdec.c
index a9122e627aa5..458d012f023c 100644
--- a/drivers/clk/mediatek/clk-mt6779-vdec.c
+++ b/drivers/clk/mediatek/clk-mt6779-vdec.c
@@ -56,7 +56,7 @@ MODULE_DEVICE_TABLE(of, of_match_clk_mt6779_vdec);
static struct platform_driver clk_mt6779_vdec_drv = {
.probe = mtk_clk_simple_probe,
- .remove_new = mtk_clk_simple_remove,
+ .remove = mtk_clk_simple_remove,
.driver = {
.name = "clk-mt6779-vdec",
.of_match_table = of_match_clk_mt6779_vdec,
diff --git a/drivers/clk/mediatek/clk-mt6779-venc.c b/drivers/clk/mediatek/clk-mt6779-venc.c
index 2cd032648eb1..70cebc274031 100644
--- a/drivers/clk/mediatek/clk-mt6779-venc.c
+++ b/drivers/clk/mediatek/clk-mt6779-venc.c
@@ -47,7 +47,7 @@ MODULE_DEVICE_TABLE(of, of_match_clk_mt6779_venc);
static struct platform_driver clk_mt6779_venc_drv = {
.probe = mtk_clk_simple_probe,
- .remove_new = mtk_clk_simple_remove,
+ .remove = mtk_clk_simple_remove,
.driver = {
.name = "clk-mt6779-venc",
.of_match_table = of_match_clk_mt6779_venc,
diff --git a/drivers/clk/mediatek/clk-mt6779.c b/drivers/clk/mediatek/clk-mt6779.c
index 819253b97a02..86732f5acf93 100644
--- a/drivers/clk/mediatek/clk-mt6779.c
+++ b/drivers/clk/mediatek/clk-mt6779.c
@@ -1305,7 +1305,7 @@ MODULE_DEVICE_TABLE(of, of_match_clk_mt6779);
static struct platform_driver clk_mt6779_infra_drv = {
.probe = mtk_clk_simple_probe,
- .remove_new = mtk_clk_simple_remove,
+ .remove = mtk_clk_simple_remove,
.driver = {
.name = "clk-mt6779-infra",
.of_match_table = of_match_clk_mt6779_infra,
diff --git a/drivers/clk/mediatek/clk-mt6795-apmixedsys.c b/drivers/clk/mediatek/clk-mt6795-apmixedsys.c
index 8c65974ed9b8..91665d7f125e 100644
--- a/drivers/clk/mediatek/clk-mt6795-apmixedsys.c
+++ b/drivers/clk/mediatek/clk-mt6795-apmixedsys.c
@@ -201,7 +201,7 @@ static void clk_mt6795_apmixed_remove(struct platform_device *pdev)
static struct platform_driver clk_mt6795_apmixed_drv = {
.probe = clk_mt6795_apmixed_probe,
- .remove_new = clk_mt6795_apmixed_remove,
+ .remove = clk_mt6795_apmixed_remove,
.driver = {
.name = "clk-mt6795-apmixed",
.of_match_table = of_match_clk_mt6795_apmixed,
diff --git a/drivers/clk/mediatek/clk-mt6795-infracfg.c b/drivers/clk/mediatek/clk-mt6795-infracfg.c
index 06d7fdf3098b..e4559569f5b0 100644
--- a/drivers/clk/mediatek/clk-mt6795-infracfg.c
+++ b/drivers/clk/mediatek/clk-mt6795-infracfg.c
@@ -144,7 +144,7 @@ static struct platform_driver clk_mt6795_infracfg_drv = {
.of_match_table = of_match_clk_mt6795_infracfg,
},
.probe = clk_mt6795_infracfg_probe,
- .remove_new = clk_mt6795_infracfg_remove,
+ .remove = clk_mt6795_infracfg_remove,
};
module_platform_driver(clk_mt6795_infracfg_drv);
diff --git a/drivers/clk/mediatek/clk-mt6795-mfg.c b/drivers/clk/mediatek/clk-mt6795-mfg.c
index dff6a6ded837..1d658bb19e82 100644
--- a/drivers/clk/mediatek/clk-mt6795-mfg.c
+++ b/drivers/clk/mediatek/clk-mt6795-mfg.c
@@ -43,7 +43,7 @@ static struct platform_driver clk_mt6795_mfg_drv = {
.of_match_table = of_match_clk_mt6795_mfg,
},
.probe = mtk_clk_simple_probe,
- .remove_new = mtk_clk_simple_remove,
+ .remove = mtk_clk_simple_remove,
};
module_platform_driver(clk_mt6795_mfg_drv);
diff --git a/drivers/clk/mediatek/clk-mt6795-mm.c b/drivers/clk/mediatek/clk-mt6795-mm.c
index dd1708d689dc..733d0e2021fc 100644
--- a/drivers/clk/mediatek/clk-mt6795-mm.c
+++ b/drivers/clk/mediatek/clk-mt6795-mm.c
@@ -93,7 +93,7 @@ static struct platform_driver clk_mt6795_mm_drv = {
},
.id_table = clk_mt6795_mm_id_table,
.probe = mtk_clk_pdev_probe,
- .remove_new = mtk_clk_pdev_remove,
+ .remove = mtk_clk_pdev_remove,
};
module_platform_driver(clk_mt6795_mm_drv);
diff --git a/drivers/clk/mediatek/clk-mt6795-pericfg.c b/drivers/clk/mediatek/clk-mt6795-pericfg.c
index 3f6bea418a5a..d48240eb2a67 100644
--- a/drivers/clk/mediatek/clk-mt6795-pericfg.c
+++ b/drivers/clk/mediatek/clk-mt6795-pericfg.c
@@ -153,7 +153,7 @@ static struct platform_driver clk_mt6795_pericfg_drv = {
.of_match_table = of_match_clk_mt6795_pericfg,
},
.probe = clk_mt6795_pericfg_probe,
- .remove_new = clk_mt6795_pericfg_remove,
+ .remove = clk_mt6795_pericfg_remove,
};
module_platform_driver(clk_mt6795_pericfg_drv);
diff --git a/drivers/clk/mediatek/clk-mt6795-topckgen.c b/drivers/clk/mediatek/clk-mt6795-topckgen.c
index be595853a925..9c6d63a80b19 100644
--- a/drivers/clk/mediatek/clk-mt6795-topckgen.c
+++ b/drivers/clk/mediatek/clk-mt6795-topckgen.c
@@ -547,7 +547,7 @@ static struct platform_driver clk_mt6795_topckgen_drv = {
.of_match_table = of_match_clk_mt6795_topckgen,
},
.probe = mtk_clk_simple_probe,
- .remove_new = mtk_clk_simple_remove,
+ .remove = mtk_clk_simple_remove,
};
module_platform_driver(clk_mt6795_topckgen_drv);
diff --git a/drivers/clk/mediatek/clk-mt6795-vdecsys.c b/drivers/clk/mediatek/clk-mt6795-vdecsys.c
index 9e91d6f7f5bf..f2968f859dca 100644
--- a/drivers/clk/mediatek/clk-mt6795-vdecsys.c
+++ b/drivers/clk/mediatek/clk-mt6795-vdecsys.c
@@ -44,7 +44,7 @@ MODULE_DEVICE_TABLE(of, of_match_clk_mt6795_vdecsys);
static struct platform_driver clk_mt6795_vdecsys_drv = {
.probe = mtk_clk_simple_probe,
- .remove_new = mtk_clk_simple_remove,
+ .remove = mtk_clk_simple_remove,
.driver = {
.name = "clk-mt6795-vdecsys",
.of_match_table = of_match_clk_mt6795_vdecsys,
diff --git a/drivers/clk/mediatek/clk-mt6795-vencsys.c b/drivers/clk/mediatek/clk-mt6795-vencsys.c
index bd81e80b744f..2f8d48da1a85 100644
--- a/drivers/clk/mediatek/clk-mt6795-vencsys.c
+++ b/drivers/clk/mediatek/clk-mt6795-vencsys.c
@@ -43,7 +43,7 @@ static struct platform_driver clk_mt6795_vencsys_drv = {
.of_match_table = of_match_clk_mt6795_vencsys,
},
.probe = mtk_clk_simple_probe,
- .remove_new = mtk_clk_simple_remove,
+ .remove = mtk_clk_simple_remove,
};
module_platform_driver(clk_mt6795_vencsys_drv);
diff --git a/drivers/clk/mediatek/clk-mt6797-img.c b/drivers/clk/mediatek/clk-mt6797-img.c
index 0ec0cf2154dc..338c69234f24 100644
--- a/drivers/clk/mediatek/clk-mt6797-img.c
+++ b/drivers/clk/mediatek/clk-mt6797-img.c
@@ -43,7 +43,7 @@ MODULE_DEVICE_TABLE(of, of_match_clk_mt6797_img);
static struct platform_driver clk_mt6797_img_drv = {
.probe = mtk_clk_simple_probe,
- .remove_new = mtk_clk_simple_remove,
+ .remove = mtk_clk_simple_remove,
.driver = {
.name = "clk-mt6797-img",
.of_match_table = of_match_clk_mt6797_img,
diff --git a/drivers/clk/mediatek/clk-mt6797-mm.c b/drivers/clk/mediatek/clk-mt6797-mm.c
index f5701e965792..ddb40b8a1a7d 100644
--- a/drivers/clk/mediatek/clk-mt6797-mm.c
+++ b/drivers/clk/mediatek/clk-mt6797-mm.c
@@ -93,7 +93,7 @@ MODULE_DEVICE_TABLE(platform, clk_mt6797_mm_id_table);
static struct platform_driver clk_mt6797_mm_drv = {
.probe = mtk_clk_pdev_probe,
- .remove_new = mtk_clk_pdev_remove,
+ .remove = mtk_clk_pdev_remove,
.driver = {
.name = "clk-mt6797-mm",
},
diff --git a/drivers/clk/mediatek/clk-mt6797-vdec.c b/drivers/clk/mediatek/clk-mt6797-vdec.c
index c967d5e25c7d..d832f48123f5 100644
--- a/drivers/clk/mediatek/clk-mt6797-vdec.c
+++ b/drivers/clk/mediatek/clk-mt6797-vdec.c
@@ -54,7 +54,7 @@ MODULE_DEVICE_TABLE(of, of_match_clk_mt6797_vdec);
static struct platform_driver clk_mt6797_vdec_drv = {
.probe = mtk_clk_simple_probe,
- .remove_new = mtk_clk_simple_remove,
+ .remove = mtk_clk_simple_remove,
.driver = {
.name = "clk-mt6797-vdec",
.of_match_table = of_match_clk_mt6797_vdec,
diff --git a/drivers/clk/mediatek/clk-mt6797-venc.c b/drivers/clk/mediatek/clk-mt6797-venc.c
index f6fac5db65b0..fd4446f4a9d7 100644
--- a/drivers/clk/mediatek/clk-mt6797-venc.c
+++ b/drivers/clk/mediatek/clk-mt6797-venc.c
@@ -45,7 +45,7 @@ MODULE_DEVICE_TABLE(of, of_match_clk_mt6797_venc);
static struct platform_driver clk_mt6797_venc_drv = {
.probe = mtk_clk_simple_probe,
- .remove_new = mtk_clk_simple_remove,
+ .remove = mtk_clk_simple_remove,
.driver = {
.name = "clk-mt6797-venc",
.of_match_table = of_match_clk_mt6797_venc,
diff --git a/drivers/clk/mediatek/clk-mt7622-apmixedsys.c b/drivers/clk/mediatek/clk-mt7622-apmixedsys.c
index 1b8f859b6b6c..2350592d9a93 100644
--- a/drivers/clk/mediatek/clk-mt7622-apmixedsys.c
+++ b/drivers/clk/mediatek/clk-mt7622-apmixedsys.c
@@ -137,7 +137,7 @@ MODULE_DEVICE_TABLE(of, of_match_clk_mt7622_apmixed);
static struct platform_driver clk_mt7622_apmixed_drv = {
.probe = clk_mt7622_apmixed_probe,
- .remove_new = clk_mt7622_apmixed_remove,
+ .remove = clk_mt7622_apmixed_remove,
.driver = {
.name = "clk-mt7622-apmixed",
.of_match_table = of_match_clk_mt7622_apmixed,
diff --git a/drivers/clk/mediatek/clk-mt7622-aud.c b/drivers/clk/mediatek/clk-mt7622-aud.c
index b7bf626e4d14..931a0598e598 100644
--- a/drivers/clk/mediatek/clk-mt7622-aud.c
+++ b/drivers/clk/mediatek/clk-mt7622-aud.c
@@ -149,7 +149,7 @@ MODULE_DEVICE_TABLE(of, of_match_clk_mt7622_aud);
static struct platform_driver clk_mt7622_aud_drv = {
.probe = clk_mt7622_aud_probe,
- .remove_new = clk_mt7622_aud_remove,
+ .remove = clk_mt7622_aud_remove,
.driver = {
.name = "clk-mt7622-aud",
.of_match_table = of_match_clk_mt7622_aud,
diff --git a/drivers/clk/mediatek/clk-mt7622-eth.c b/drivers/clk/mediatek/clk-mt7622-eth.c
index fa4876317a8d..1c1033a92c46 100644
--- a/drivers/clk/mediatek/clk-mt7622-eth.c
+++ b/drivers/clk/mediatek/clk-mt7622-eth.c
@@ -79,7 +79,7 @@ MODULE_DEVICE_TABLE(of, of_match_clk_mt7622_eth);
static struct platform_driver clk_mt7622_eth_drv = {
.probe = mtk_clk_simple_probe,
- .remove_new = mtk_clk_simple_remove,
+ .remove = mtk_clk_simple_remove,
.driver = {
.name = "clk-mt7622-eth",
.of_match_table = of_match_clk_mt7622_eth,
diff --git a/drivers/clk/mediatek/clk-mt7622-hif.c b/drivers/clk/mediatek/clk-mt7622-hif.c
index 8e57582454c2..5bcfe12c4fd0 100644
--- a/drivers/clk/mediatek/clk-mt7622-hif.c
+++ b/drivers/clk/mediatek/clk-mt7622-hif.c
@@ -91,7 +91,7 @@ MODULE_DEVICE_TABLE(of, of_match_clk_mt7622_hif);
static struct platform_driver clk_mt7622_hif_drv = {
.probe = mtk_clk_simple_probe,
- .remove_new = mtk_clk_simple_remove,
+ .remove = mtk_clk_simple_remove,
.driver = {
.name = "clk-mt7622-hif",
.of_match_table = of_match_clk_mt7622_hif,
diff --git a/drivers/clk/mediatek/clk-mt7622-infracfg.c b/drivers/clk/mediatek/clk-mt7622-infracfg.c
index 6bc911cb29a6..cfdf3b07c3e0 100644
--- a/drivers/clk/mediatek/clk-mt7622-infracfg.c
+++ b/drivers/clk/mediatek/clk-mt7622-infracfg.c
@@ -118,7 +118,7 @@ static struct platform_driver clk_mt7622_infracfg_drv = {
.of_match_table = of_match_clk_mt7622_infracfg,
},
.probe = clk_mt7622_infracfg_probe,
- .remove_new = clk_mt7622_infracfg_remove,
+ .remove = clk_mt7622_infracfg_remove,
};
module_platform_driver(clk_mt7622_infracfg_drv);
diff --git a/drivers/clk/mediatek/clk-mt7622.c b/drivers/clk/mediatek/clk-mt7622.c
index 27781a62a131..f62b03abab4f 100644
--- a/drivers/clk/mediatek/clk-mt7622.c
+++ b/drivers/clk/mediatek/clk-mt7622.c
@@ -524,7 +524,7 @@ static struct platform_driver clk_mt7622_drv = {
.of_match_table = of_match_clk_mt7622,
},
.probe = mtk_clk_simple_probe,
- .remove_new = mtk_clk_simple_remove,
+ .remove = mtk_clk_simple_remove,
};
module_platform_driver(clk_mt7622_drv)
diff --git a/drivers/clk/mediatek/clk-mt7629-hif.c b/drivers/clk/mediatek/clk-mt7629-hif.c
index 96d1a82ad75f..3fdc2d7d4274 100644
--- a/drivers/clk/mediatek/clk-mt7629-hif.c
+++ b/drivers/clk/mediatek/clk-mt7629-hif.c
@@ -86,7 +86,7 @@ MODULE_DEVICE_TABLE(of, of_match_clk_mt7629_hif);
static struct platform_driver clk_mt7629_hif_drv = {
.probe = mtk_clk_simple_probe,
- .remove_new = mtk_clk_simple_remove,
+ .remove = mtk_clk_simple_remove,
.driver = {
.name = "clk-mt7629-hif",
.of_match_table = of_match_clk_mt7629_hif,
diff --git a/drivers/clk/mediatek/clk-mt7981-eth.c b/drivers/clk/mediatek/clk-mt7981-eth.c
index e8cb247db0ce..906aec9ddff5 100644
--- a/drivers/clk/mediatek/clk-mt7981-eth.c
+++ b/drivers/clk/mediatek/clk-mt7981-eth.c
@@ -107,7 +107,7 @@ MODULE_DEVICE_TABLE(of, of_match_clk_mt7981_eth);
static struct platform_driver clk_mt7981_eth_drv = {
.probe = mtk_clk_simple_probe,
- .remove_new = mtk_clk_simple_remove,
+ .remove = mtk_clk_simple_remove,
.driver = {
.name = "clk-mt7981-eth",
.of_match_table = of_match_clk_mt7981_eth,
diff --git a/drivers/clk/mediatek/clk-mt7981-infracfg.c b/drivers/clk/mediatek/clk-mt7981-infracfg.c
index b2b055151297..0487b6bb80ae 100644
--- a/drivers/clk/mediatek/clk-mt7981-infracfg.c
+++ b/drivers/clk/mediatek/clk-mt7981-infracfg.c
@@ -197,7 +197,7 @@ MODULE_DEVICE_TABLE(of, of_match_clk_mt7981_infracfg);
static struct platform_driver clk_mt7981_infracfg_drv = {
.probe = mtk_clk_simple_probe,
- .remove_new = mtk_clk_simple_remove,
+ .remove = mtk_clk_simple_remove,
.driver = {
.name = "clk-mt7981-infracfg",
.of_match_table = of_match_clk_mt7981_infracfg,
diff --git a/drivers/clk/mediatek/clk-mt7981-topckgen.c b/drivers/clk/mediatek/clk-mt7981-topckgen.c
index 72f2f4f30e85..1943f11e47c1 100644
--- a/drivers/clk/mediatek/clk-mt7981-topckgen.c
+++ b/drivers/clk/mediatek/clk-mt7981-topckgen.c
@@ -413,7 +413,7 @@ MODULE_DEVICE_TABLE(of, of_match_clk_mt7981_topckgen);
static struct platform_driver clk_mt7981_topckgen_drv = {
.probe = mtk_clk_simple_probe,
- .remove_new = mtk_clk_simple_remove,
+ .remove = mtk_clk_simple_remove,
.driver = {
.name = "clk-mt7981-topckgen",
.of_match_table = of_match_clk_mt7981_topckgen,
diff --git a/drivers/clk/mediatek/clk-mt7986-eth.c b/drivers/clk/mediatek/clk-mt7986-eth.c
index 7ab78e0f49a1..4514d42c0829 100644
--- a/drivers/clk/mediatek/clk-mt7986-eth.c
+++ b/drivers/clk/mediatek/clk-mt7986-eth.c
@@ -92,7 +92,7 @@ static struct platform_driver clk_mt7986_eth_drv = {
.of_match_table = of_match_clk_mt7986_eth,
},
.probe = mtk_clk_simple_probe,
- .remove_new = mtk_clk_simple_remove,
+ .remove = mtk_clk_simple_remove,
};
module_platform_driver(clk_mt7986_eth_drv);
diff --git a/drivers/clk/mediatek/clk-mt7986-infracfg.c b/drivers/clk/mediatek/clk-mt7986-infracfg.c
index cb8ab3e53abf..732c65e616de 100644
--- a/drivers/clk/mediatek/clk-mt7986-infracfg.c
+++ b/drivers/clk/mediatek/clk-mt7986-infracfg.c
@@ -177,7 +177,7 @@ static struct platform_driver clk_mt7986_infracfg_drv = {
.of_match_table = of_match_clk_mt7986_infracfg,
},
.probe = mtk_clk_simple_probe,
- .remove_new = mtk_clk_simple_remove,
+ .remove = mtk_clk_simple_remove,
};
module_platform_driver(clk_mt7986_infracfg_drv);
diff --git a/drivers/clk/mediatek/clk-mt7986-topckgen.c b/drivers/clk/mediatek/clk-mt7986-topckgen.c
index b644b4ca4710..2dd30da306d9 100644
--- a/drivers/clk/mediatek/clk-mt7986-topckgen.c
+++ b/drivers/clk/mediatek/clk-mt7986-topckgen.c
@@ -306,7 +306,7 @@ MODULE_DEVICE_TABLE(of, of_match_clk_mt7986_topckgen);
static struct platform_driver clk_mt7986_topckgen_drv = {
.probe = mtk_clk_simple_probe,
- .remove_new = mtk_clk_simple_remove,
+ .remove = mtk_clk_simple_remove,
.driver = {
.name = "clk-mt7986-topckgen",
.of_match_table = of_match_clk_mt7986_topckgen,
diff --git a/drivers/clk/mediatek/clk-mt7988-eth.c b/drivers/clk/mediatek/clk-mt7988-eth.c
index adf4a9d39b38..7d9463688be2 100644
--- a/drivers/clk/mediatek/clk-mt7988-eth.c
+++ b/drivers/clk/mediatek/clk-mt7988-eth.c
@@ -142,7 +142,7 @@ static struct platform_driver clk_mt7988_eth_drv = {
.of_match_table = of_match_clk_mt7988_eth,
},
.probe = mtk_clk_simple_probe,
- .remove_new = mtk_clk_simple_remove,
+ .remove = mtk_clk_simple_remove,
};
module_platform_driver(clk_mt7988_eth_drv);
diff --git a/drivers/clk/mediatek/clk-mt7988-infracfg.c b/drivers/clk/mediatek/clk-mt7988-infracfg.c
index 6c2bebabb4de..ef8267319d91 100644
--- a/drivers/clk/mediatek/clk-mt7988-infracfg.c
+++ b/drivers/clk/mediatek/clk-mt7988-infracfg.c
@@ -292,7 +292,7 @@ static struct platform_driver clk_mt7988_infracfg_drv = {
.of_match_table = of_match_clk_mt7988_infracfg,
},
.probe = mtk_clk_simple_probe,
- .remove_new = mtk_clk_simple_remove,
+ .remove = mtk_clk_simple_remove,
};
module_platform_driver(clk_mt7988_infracfg_drv);
diff --git a/drivers/clk/mediatek/clk-mt7988-topckgen.c b/drivers/clk/mediatek/clk-mt7988-topckgen.c
index 7300e9694582..50e02cc7a214 100644
--- a/drivers/clk/mediatek/clk-mt7988-topckgen.c
+++ b/drivers/clk/mediatek/clk-mt7988-topckgen.c
@@ -315,7 +315,7 @@ MODULE_DEVICE_TABLE(of, of_match_clk_mt7988_topckgen);
static struct platform_driver clk_mt7988_topckgen_drv = {
.probe = mtk_clk_simple_probe,
- .remove_new = mtk_clk_simple_remove,
+ .remove = mtk_clk_simple_remove,
.driver = {
.name = "clk-mt7988-topckgen",
.of_match_table = of_match_clk_mt7988_topckgen,
diff --git a/drivers/clk/mediatek/clk-mt7988-xfipll.c b/drivers/clk/mediatek/clk-mt7988-xfipll.c
index 9b9ca5471158..f941e4d3ef28 100644
--- a/drivers/clk/mediatek/clk-mt7988-xfipll.c
+++ b/drivers/clk/mediatek/clk-mt7988-xfipll.c
@@ -74,7 +74,7 @@ static struct platform_driver clk_mt7988_xfipll_drv = {
.of_match_table = of_match_clk_mt7988_xfipll,
},
.probe = clk_mt7988_xfipll_probe,
- .remove_new = mtk_clk_simple_remove,
+ .remove = mtk_clk_simple_remove,
};
module_platform_driver(clk_mt7988_xfipll_drv);
diff --git a/drivers/clk/mediatek/clk-mt8135-apmixedsys.c b/drivers/clk/mediatek/clk-mt8135-apmixedsys.c
index 41bb2d2e2ea7..bdadc35c64cb 100644
--- a/drivers/clk/mediatek/clk-mt8135-apmixedsys.c
+++ b/drivers/clk/mediatek/clk-mt8135-apmixedsys.c
@@ -93,7 +93,7 @@ MODULE_DEVICE_TABLE(of, of_match_clk_mt8135_apmixed);
static struct platform_driver clk_mt8135_apmixed_drv = {
.probe = clk_mt8135_apmixed_probe,
- .remove_new = clk_mt8135_apmixed_remove,
+ .remove = clk_mt8135_apmixed_remove,
.driver = {
.name = "clk-mt8135-apmixed",
.of_match_table = of_match_clk_mt8135_apmixed,
diff --git a/drivers/clk/mediatek/clk-mt8135.c b/drivers/clk/mediatek/clk-mt8135.c
index 019af88d7f9c..084e48a554c2 100644
--- a/drivers/clk/mediatek/clk-mt8135.c
+++ b/drivers/clk/mediatek/clk-mt8135.c
@@ -558,7 +558,7 @@ static struct platform_driver clk_mt8135_drv = {
.of_match_table = of_match_clk_mt8135,
},
.probe = mtk_clk_simple_probe,
- .remove_new = mtk_clk_simple_remove,
+ .remove = mtk_clk_simple_remove,
};
module_platform_driver(clk_mt8135_drv);
diff --git a/drivers/clk/mediatek/clk-mt8167-aud.c b/drivers/clk/mediatek/clk-mt8167-aud.c
index d1a42ff549c1..d6cff4bdf4cb 100644
--- a/drivers/clk/mediatek/clk-mt8167-aud.c
+++ b/drivers/clk/mediatek/clk-mt8167-aud.c
@@ -54,7 +54,7 @@ MODULE_DEVICE_TABLE(of, of_match_clk_mt8167_audsys);
static struct platform_driver clk_mt8167_audsys_drv = {
.probe = mtk_clk_simple_probe,
- .remove_new = mtk_clk_simple_remove,
+ .remove = mtk_clk_simple_remove,
.driver = {
.name = "clk-mt8167-audsys",
.of_match_table = of_match_clk_mt8167_audsys,
diff --git a/drivers/clk/mediatek/clk-mt8167-img.c b/drivers/clk/mediatek/clk-mt8167-img.c
index 888ac3bdeacb..42d38ae94b69 100644
--- a/drivers/clk/mediatek/clk-mt8167-img.c
+++ b/drivers/clk/mediatek/clk-mt8167-img.c
@@ -46,7 +46,7 @@ MODULE_DEVICE_TABLE(of, of_match_clk_mt8167_imgsys);
static struct platform_driver clk_mt8167_imgsys_drv = {
.probe = mtk_clk_simple_probe,
- .remove_new = mtk_clk_simple_remove,
+ .remove = mtk_clk_simple_remove,
.driver = {
.name = "clk-mt8167-imgsys",
.of_match_table = of_match_clk_mt8167_imgsys,
diff --git a/drivers/clk/mediatek/clk-mt8167-mfgcfg.c b/drivers/clk/mediatek/clk-mt8167-mfgcfg.c
index e873766f130c..1ef37a3e6851 100644
--- a/drivers/clk/mediatek/clk-mt8167-mfgcfg.c
+++ b/drivers/clk/mediatek/clk-mt8167-mfgcfg.c
@@ -44,7 +44,7 @@ MODULE_DEVICE_TABLE(of, of_match_clk_mt8167_mfgcfg);
static struct platform_driver clk_mt8167_mfgcfg_drv = {
.probe = mtk_clk_simple_probe,
- .remove_new = mtk_clk_simple_remove,
+ .remove = mtk_clk_simple_remove,
.driver = {
.name = "clk-mt8167-mfgcfg",
.of_match_table = of_match_clk_mt8167_mfgcfg,
diff --git a/drivers/clk/mediatek/clk-mt8167-mm.c b/drivers/clk/mediatek/clk-mt8167-mm.c
index 38deedffaacf..cef66ee836f3 100644
--- a/drivers/clk/mediatek/clk-mt8167-mm.c
+++ b/drivers/clk/mediatek/clk-mt8167-mm.c
@@ -85,7 +85,7 @@ MODULE_DEVICE_TABLE(platform, clk_mt8167_mm_id_table);
static struct platform_driver clk_mt8167_mm_drv = {
.probe = mtk_clk_pdev_probe,
- .remove_new = mtk_clk_pdev_remove,
+ .remove = mtk_clk_pdev_remove,
.driver = {
.name = "clk-mt8167-mm",
},
diff --git a/drivers/clk/mediatek/clk-mt8167-vdec.c b/drivers/clk/mediatek/clk-mt8167-vdec.c
index c3c892bb8334..e3769bc556a9 100644
--- a/drivers/clk/mediatek/clk-mt8167-vdec.c
+++ b/drivers/clk/mediatek/clk-mt8167-vdec.c
@@ -53,7 +53,7 @@ MODULE_DEVICE_TABLE(of, of_match_clk_mt8167_vdec);
static struct platform_driver clk_mt8167_vdec_drv = {
.probe = mtk_clk_simple_probe,
- .remove_new = mtk_clk_simple_remove,
+ .remove = mtk_clk_simple_remove,
.driver = {
.name = "clk-mt8167-vdecsys",
.of_match_table = of_match_clk_mt8167_vdec,
diff --git a/drivers/clk/mediatek/clk-mt8167.c b/drivers/clk/mediatek/clk-mt8167.c
index 5c94995f859c..c64d918c37de 100644
--- a/drivers/clk/mediatek/clk-mt8167.c
+++ b/drivers/clk/mediatek/clk-mt8167.c
@@ -887,7 +887,7 @@ MODULE_DEVICE_TABLE(of, of_match_clk_mt8167);
static struct platform_driver clk_mt8167_drv = {
.probe = mtk_clk_simple_probe,
- .remove_new = mtk_clk_simple_remove,
+ .remove = mtk_clk_simple_remove,
.driver = {
.name = "clk-mt8167",
.of_match_table = of_match_clk_mt8167,
diff --git a/drivers/clk/mediatek/clk-mt8173-apmixedsys.c b/drivers/clk/mediatek/clk-mt8173-apmixedsys.c
index 6cab483b8e1e..95385bb67d55 100644
--- a/drivers/clk/mediatek/clk-mt8173-apmixedsys.c
+++ b/drivers/clk/mediatek/clk-mt8173-apmixedsys.c
@@ -207,7 +207,7 @@ static void clk_mt8173_apmixed_remove(struct platform_device *pdev)
static struct platform_driver clk_mt8173_apmixed_drv = {
.probe = clk_mt8173_apmixed_probe,
- .remove_new = clk_mt8173_apmixed_remove,
+ .remove = clk_mt8173_apmixed_remove,
.driver = {
.name = "clk-mt8173-apmixed",
.of_match_table = of_match_clk_mt8173_apmixed,
diff --git a/drivers/clk/mediatek/clk-mt8173-img.c b/drivers/clk/mediatek/clk-mt8173-img.c
index 1011b9ab3dad..6db2b9ab2bc9 100644
--- a/drivers/clk/mediatek/clk-mt8173-img.c
+++ b/drivers/clk/mediatek/clk-mt8173-img.c
@@ -44,7 +44,7 @@ MODULE_DEVICE_TABLE(of, of_match_clk_mt8173_imgsys);
static struct platform_driver clk_mt8173_vdecsys_drv = {
.probe = mtk_clk_simple_probe,
- .remove_new = mtk_clk_simple_remove,
+ .remove = mtk_clk_simple_remove,
.driver = {
.name = "clk-mt8173-imgsys",
.of_match_table = of_match_clk_mt8173_imgsys,
diff --git a/drivers/clk/mediatek/clk-mt8173-infracfg.c b/drivers/clk/mediatek/clk-mt8173-infracfg.c
index ecc8b0063ea5..fa2d1d557e04 100644
--- a/drivers/clk/mediatek/clk-mt8173-infracfg.c
+++ b/drivers/clk/mediatek/clk-mt8173-infracfg.c
@@ -156,7 +156,7 @@ static struct platform_driver clk_mt8173_infracfg_drv = {
.of_match_table = of_match_clk_mt8173_infracfg,
},
.probe = clk_mt8173_infracfg_probe,
- .remove_new = clk_mt8173_infracfg_remove,
+ .remove = clk_mt8173_infracfg_remove,
};
module_platform_driver(clk_mt8173_infracfg_drv);
diff --git a/drivers/clk/mediatek/clk-mt8173-mm.c b/drivers/clk/mediatek/clk-mt8173-mm.c
index fd903bee328f..26d27250b914 100644
--- a/drivers/clk/mediatek/clk-mt8173-mm.c
+++ b/drivers/clk/mediatek/clk-mt8173-mm.c
@@ -106,7 +106,7 @@ static struct platform_driver clk_mt8173_mm_drv = {
},
.id_table = clk_mt8173_mm_id_table,
.probe = mtk_clk_pdev_probe,
- .remove_new = mtk_clk_pdev_remove,
+ .remove = mtk_clk_pdev_remove,
};
module_platform_driver(clk_mt8173_mm_drv);
diff --git a/drivers/clk/mediatek/clk-mt8173-pericfg.c b/drivers/clk/mediatek/clk-mt8173-pericfg.c
index 783efed3f254..bebda74d0f43 100644
--- a/drivers/clk/mediatek/clk-mt8173-pericfg.c
+++ b/drivers/clk/mediatek/clk-mt8173-pericfg.c
@@ -115,7 +115,7 @@ static struct platform_driver clk_mt8173_pericfg_drv = {
.of_match_table = of_match_clk_mt8173_pericfg,
},
.probe = mtk_clk_simple_probe,
- .remove_new = mtk_clk_simple_remove,
+ .remove = mtk_clk_simple_remove,
};
module_platform_driver(clk_mt8173_pericfg_drv);
diff --git a/drivers/clk/mediatek/clk-mt8173-topckgen.c b/drivers/clk/mediatek/clk-mt8173-topckgen.c
index 6bb7ffd74487..42c37541cebb 100644
--- a/drivers/clk/mediatek/clk-mt8173-topckgen.c
+++ b/drivers/clk/mediatek/clk-mt8173-topckgen.c
@@ -646,7 +646,7 @@ static struct platform_driver clk_mt8173_topckgen_drv = {
.of_match_table = of_match_clk_mt8173_topckgen,
},
.probe = mtk_clk_simple_probe,
- .remove_new = mtk_clk_simple_remove,
+ .remove = mtk_clk_simple_remove,
};
module_platform_driver(clk_mt8173_topckgen_drv);
diff --git a/drivers/clk/mediatek/clk-mt8173-vdecsys.c b/drivers/clk/mediatek/clk-mt8173-vdecsys.c
index 011e3812156f..625ca0b09cc2 100644
--- a/drivers/clk/mediatek/clk-mt8173-vdecsys.c
+++ b/drivers/clk/mediatek/clk-mt8173-vdecsys.c
@@ -46,7 +46,7 @@ MODULE_DEVICE_TABLE(of, of_match_clk_mt8173_vdecsys);
static struct platform_driver clk_mt8173_vdecsys_drv = {
.probe = mtk_clk_simple_probe,
- .remove_new = mtk_clk_simple_remove,
+ .remove = mtk_clk_simple_remove,
.driver = {
.name = "clk-mt8173-vdecsys",
.of_match_table = of_match_clk_mt8173_vdecsys,
diff --git a/drivers/clk/mediatek/clk-mt8173-vencsys.c b/drivers/clk/mediatek/clk-mt8173-vencsys.c
index 1bf84ae6a0bc..87755dd1a337 100644
--- a/drivers/clk/mediatek/clk-mt8173-vencsys.c
+++ b/drivers/clk/mediatek/clk-mt8173-vencsys.c
@@ -57,7 +57,7 @@ static struct platform_driver clk_mt8173_vencsys_drv = {
.of_match_table = of_match_clk_mt8173_vencsys,
},
.probe = mtk_clk_simple_probe,
- .remove_new = mtk_clk_simple_remove,
+ .remove = mtk_clk_simple_remove,
};
module_platform_driver(clk_mt8173_vencsys_drv);
diff --git a/drivers/clk/mediatek/clk-mt8183-audio.c b/drivers/clk/mediatek/clk-mt8183-audio.c
index 30a20e8ba84b..011d329ad30e 100644
--- a/drivers/clk/mediatek/clk-mt8183-audio.c
+++ b/drivers/clk/mediatek/clk-mt8183-audio.c
@@ -101,7 +101,7 @@ MODULE_DEVICE_TABLE(of, of_match_clk_mt8183_audio);
static struct platform_driver clk_mt8183_audio_drv = {
.probe = clk_mt8183_audio_probe,
- .remove_new = clk_mt8183_audio_remove,
+ .remove = clk_mt8183_audio_remove,
.driver = {
.name = "clk-mt8183-audio",
.of_match_table = of_match_clk_mt8183_audio,
diff --git a/drivers/clk/mediatek/clk-mt8183-cam.c b/drivers/clk/mediatek/clk-mt8183-cam.c
index f16c3aa3c911..c7642085f8de 100644
--- a/drivers/clk/mediatek/clk-mt8183-cam.c
+++ b/drivers/clk/mediatek/clk-mt8183-cam.c
@@ -51,7 +51,7 @@ MODULE_DEVICE_TABLE(of, of_match_clk_mt8183_cam);
static struct platform_driver clk_mt8183_cam_drv = {
.probe = mtk_clk_simple_probe,
- .remove_new = mtk_clk_simple_remove,
+ .remove = mtk_clk_simple_remove,
.driver = {
.name = "clk-mt8183-cam",
.of_match_table = of_match_clk_mt8183_cam,
diff --git a/drivers/clk/mediatek/clk-mt8183-img.c b/drivers/clk/mediatek/clk-mt8183-img.c
index 32ee6a1867fc..ee92459c74ca 100644
--- a/drivers/clk/mediatek/clk-mt8183-img.c
+++ b/drivers/clk/mediatek/clk-mt8183-img.c
@@ -51,7 +51,7 @@ MODULE_DEVICE_TABLE(of, of_match_clk_mt8183_img);
static struct platform_driver clk_mt8183_img_drv = {
.probe = mtk_clk_simple_probe,
- .remove_new = mtk_clk_simple_remove,
+ .remove = mtk_clk_simple_remove,
.driver = {
.name = "clk-mt8183-img",
.of_match_table = of_match_clk_mt8183_img,
diff --git a/drivers/clk/mediatek/clk-mt8183-ipu0.c b/drivers/clk/mediatek/clk-mt8183-ipu0.c
index dc2916c4e0dc..6831747f123b 100644
--- a/drivers/clk/mediatek/clk-mt8183-ipu0.c
+++ b/drivers/clk/mediatek/clk-mt8183-ipu0.c
@@ -44,7 +44,7 @@ MODULE_DEVICE_TABLE(of, of_match_clk_mt8183_ipu_core0);
static struct platform_driver clk_mt8183_ipu_core0_drv = {
.probe = mtk_clk_simple_probe,
- .remove_new = mtk_clk_simple_remove,
+ .remove = mtk_clk_simple_remove,
.driver = {
.name = "clk-mt8183-ipu_core0",
.of_match_table = of_match_clk_mt8183_ipu_core0,
diff --git a/drivers/clk/mediatek/clk-mt8183-ipu1.c b/drivers/clk/mediatek/clk-mt8183-ipu1.c
index 9c63e4c592d0..ecf434432e7b 100644
--- a/drivers/clk/mediatek/clk-mt8183-ipu1.c
+++ b/drivers/clk/mediatek/clk-mt8183-ipu1.c
@@ -44,7 +44,7 @@ MODULE_DEVICE_TABLE(of, of_match_clk_mt8183_ipu_core1);
static struct platform_driver clk_mt8183_ipu_core1_drv = {
.probe = mtk_clk_simple_probe,
- .remove_new = mtk_clk_simple_remove,
+ .remove = mtk_clk_simple_remove,
.driver = {
.name = "clk-mt8183-ipu_core1",
.of_match_table = of_match_clk_mt8183_ipu_core1,
diff --git a/drivers/clk/mediatek/clk-mt8183-ipu_adl.c b/drivers/clk/mediatek/clk-mt8183-ipu_adl.c
index 54a50eda1719..c1a770ba3245 100644
--- a/drivers/clk/mediatek/clk-mt8183-ipu_adl.c
+++ b/drivers/clk/mediatek/clk-mt8183-ipu_adl.c
@@ -42,7 +42,7 @@ MODULE_DEVICE_TABLE(of, of_match_clk_mt8183_ipu_adl);
static struct platform_driver clk_mt8183_ipu_adl_drv = {
.probe = mtk_clk_simple_probe,
- .remove_new = mtk_clk_simple_remove,
+ .remove = mtk_clk_simple_remove,
.driver = {
.name = "clk-mt8183-ipu_adl",
.of_match_table = of_match_clk_mt8183_ipu_adl,
diff --git a/drivers/clk/mediatek/clk-mt8183-ipu_conn.c b/drivers/clk/mediatek/clk-mt8183-ipu_conn.c
index 99a817d3be6c..f0e72e6edb7a 100644
--- a/drivers/clk/mediatek/clk-mt8183-ipu_conn.c
+++ b/drivers/clk/mediatek/clk-mt8183-ipu_conn.c
@@ -111,7 +111,7 @@ MODULE_DEVICE_TABLE(of, of_match_clk_mt8183_ipu_conn);
static struct platform_driver clk_mt8183_ipu_conn_drv = {
.probe = mtk_clk_simple_probe,
- .remove_new = mtk_clk_simple_remove,
+ .remove = mtk_clk_simple_remove,
.driver = {
.name = "clk-mt8183-ipu_conn",
.of_match_table = of_match_clk_mt8183_ipu_conn,
diff --git a/drivers/clk/mediatek/clk-mt8183-mfgcfg.c b/drivers/clk/mediatek/clk-mt8183-mfgcfg.c
index b1e802bbfaef..be44889783ff 100644
--- a/drivers/clk/mediatek/clk-mt8183-mfgcfg.c
+++ b/drivers/clk/mediatek/clk-mt8183-mfgcfg.c
@@ -44,7 +44,7 @@ MODULE_DEVICE_TABLE(of, of_match_clk_mt8183_mfg);
static struct platform_driver clk_mt8183_mfg_drv = {
.probe = mtk_clk_simple_probe,
- .remove_new = mtk_clk_simple_remove,
+ .remove = mtk_clk_simple_remove,
.driver = {
.name = "clk-mt8183-mfg",
.of_match_table = of_match_clk_mt8183_mfg,
diff --git a/drivers/clk/mediatek/clk-mt8183-mm.c b/drivers/clk/mediatek/clk-mt8183-mm.c
index 59acf1e2951b..0f132f05fa8b 100644
--- a/drivers/clk/mediatek/clk-mt8183-mm.c
+++ b/drivers/clk/mediatek/clk-mt8183-mm.c
@@ -95,7 +95,7 @@ MODULE_DEVICE_TABLE(platform, clk_mt8183_mm_id_table);
static struct platform_driver clk_mt8183_mm_drv = {
.probe = mtk_clk_pdev_probe,
- .remove_new = mtk_clk_pdev_remove,
+ .remove = mtk_clk_pdev_remove,
.driver = {
.name = "clk-mt8183-mm",
},
diff --git a/drivers/clk/mediatek/clk-mt8183-vdec.c b/drivers/clk/mediatek/clk-mt8183-vdec.c
index 48a8ef3f69aa..43bf34077b16 100644
--- a/drivers/clk/mediatek/clk-mt8183-vdec.c
+++ b/drivers/clk/mediatek/clk-mt8183-vdec.c
@@ -55,7 +55,7 @@ MODULE_DEVICE_TABLE(of, of_match_clk_mt8183_vdec);
static struct platform_driver clk_mt8183_vdec_drv = {
.probe = mtk_clk_simple_probe,
- .remove_new = mtk_clk_simple_remove,
+ .remove = mtk_clk_simple_remove,
.driver = {
.name = "clk-mt8183-vdec",
.of_match_table = of_match_clk_mt8183_vdec,
diff --git a/drivers/clk/mediatek/clk-mt8183-venc.c b/drivers/clk/mediatek/clk-mt8183-venc.c
index 8f36688dfa14..c3d99b3b8ff7 100644
--- a/drivers/clk/mediatek/clk-mt8183-venc.c
+++ b/drivers/clk/mediatek/clk-mt8183-venc.c
@@ -47,7 +47,7 @@ MODULE_DEVICE_TABLE(of, of_match_clk_mt8183_venc);
static struct platform_driver clk_mt8183_venc_drv = {
.probe = mtk_clk_simple_probe,
- .remove_new = mtk_clk_simple_remove,
+ .remove = mtk_clk_simple_remove,
.driver = {
.name = "clk-mt8183-venc",
.of_match_table = of_match_clk_mt8183_venc,
diff --git a/drivers/clk/mediatek/clk-mt8183.c b/drivers/clk/mediatek/clk-mt8183.c
index 27eee4ef2c0f..aa7cc7709b2d 100644
--- a/drivers/clk/mediatek/clk-mt8183.c
+++ b/drivers/clk/mediatek/clk-mt8183.c
@@ -899,7 +899,7 @@ MODULE_DEVICE_TABLE(of, of_match_clk_mt8183);
static struct platform_driver clk_mt8183_drv = {
.probe = mtk_clk_simple_probe,
- .remove_new = mtk_clk_simple_remove,
+ .remove = mtk_clk_simple_remove,
.driver = {
.name = "clk-mt8183",
.of_match_table = of_match_clk_mt8183,
diff --git a/drivers/clk/mediatek/clk-mt8186-apmixedsys.c b/drivers/clk/mediatek/clk-mt8186-apmixedsys.c
index 6f7127003e4f..4b2b16578232 100644
--- a/drivers/clk/mediatek/clk-mt8186-apmixedsys.c
+++ b/drivers/clk/mediatek/clk-mt8186-apmixedsys.c
@@ -185,7 +185,7 @@ static void clk_mt8186_apmixed_remove(struct platform_device *pdev)
static struct platform_driver clk_mt8186_apmixed_drv = {
.probe = clk_mt8186_apmixed_probe,
- .remove_new = clk_mt8186_apmixed_remove,
+ .remove = clk_mt8186_apmixed_remove,
.driver = {
.name = "clk-mt8186-apmixed",
.of_match_table = of_match_clk_mt8186_apmixed,
diff --git a/drivers/clk/mediatek/clk-mt8186-cam.c b/drivers/clk/mediatek/clk-mt8186-cam.c
index 0082f0d9286b..2ddd5f90377f 100644
--- a/drivers/clk/mediatek/clk-mt8186-cam.c
+++ b/drivers/clk/mediatek/clk-mt8186-cam.c
@@ -82,7 +82,7 @@ MODULE_DEVICE_TABLE(of, of_match_clk_mt8186_cam);
static struct platform_driver clk_mt8186_cam_drv = {
.probe = mtk_clk_simple_probe,
- .remove_new = mtk_clk_simple_remove,
+ .remove = mtk_clk_simple_remove,
.driver = {
.name = "clk-mt8186-cam",
.of_match_table = of_match_clk_mt8186_cam,
diff --git a/drivers/clk/mediatek/clk-mt8186-img.c b/drivers/clk/mediatek/clk-mt8186-img.c
index 0583a18805ce..5e466e1f5f44 100644
--- a/drivers/clk/mediatek/clk-mt8186-img.c
+++ b/drivers/clk/mediatek/clk-mt8186-img.c
@@ -60,7 +60,7 @@ MODULE_DEVICE_TABLE(of, of_match_clk_mt8186_img);
static struct platform_driver clk_mt8186_img_drv = {
.probe = mtk_clk_simple_probe,
- .remove_new = mtk_clk_simple_remove,
+ .remove = mtk_clk_simple_remove,
.driver = {
.name = "clk-mt8186-img",
.of_match_table = of_match_clk_mt8186_img,
diff --git a/drivers/clk/mediatek/clk-mt8186-imp_iic_wrap.c b/drivers/clk/mediatek/clk-mt8186-imp_iic_wrap.c
index 2a2a6bb23205..75abb871044c 100644
--- a/drivers/clk/mediatek/clk-mt8186-imp_iic_wrap.c
+++ b/drivers/clk/mediatek/clk-mt8186-imp_iic_wrap.c
@@ -59,7 +59,7 @@ MODULE_DEVICE_TABLE(of, of_match_clk_mt8186_imp_iic_wrap);
static struct platform_driver clk_mt8186_imp_iic_wrap_drv = {
.probe = mtk_clk_simple_probe,
- .remove_new = mtk_clk_simple_remove,
+ .remove = mtk_clk_simple_remove,
.driver = {
.name = "clk-mt8186-imp_iic_wrap",
.of_match_table = of_match_clk_mt8186_imp_iic_wrap,
diff --git a/drivers/clk/mediatek/clk-mt8186-infra_ao.c b/drivers/clk/mediatek/clk-mt8186-infra_ao.c
index d7239875fb15..8d9d86a510ff 100644
--- a/drivers/clk/mediatek/clk-mt8186-infra_ao.c
+++ b/drivers/clk/mediatek/clk-mt8186-infra_ao.c
@@ -231,7 +231,7 @@ MODULE_DEVICE_TABLE(of, of_match_clk_mt8186_infra_ao);
static struct platform_driver clk_mt8186_infra_ao_drv = {
.probe = mtk_clk_simple_probe,
- .remove_new = mtk_clk_simple_remove,
+ .remove = mtk_clk_simple_remove,
.driver = {
.name = "clk-mt8186-infra-ao",
.of_match_table = of_match_clk_mt8186_infra_ao,
diff --git a/drivers/clk/mediatek/clk-mt8186-ipe.c b/drivers/clk/mediatek/clk-mt8186-ipe.c
index 77bdd2806517..f66a0aeaa6b3 100644
--- a/drivers/clk/mediatek/clk-mt8186-ipe.c
+++ b/drivers/clk/mediatek/clk-mt8186-ipe.c
@@ -47,7 +47,7 @@ MODULE_DEVICE_TABLE(of, of_match_clk_mt8186_ipe);
static struct platform_driver clk_mt8186_ipe_drv = {
.probe = mtk_clk_simple_probe,
- .remove_new = mtk_clk_simple_remove,
+ .remove = mtk_clk_simple_remove,
.driver = {
.name = "clk-mt8186-ipe",
.of_match_table = of_match_clk_mt8186_ipe,
diff --git a/drivers/clk/mediatek/clk-mt8186-mcu.c b/drivers/clk/mediatek/clk-mt8186-mcu.c
index eb54ccb77b74..d1640e4dc2ad 100644
--- a/drivers/clk/mediatek/clk-mt8186-mcu.c
+++ b/drivers/clk/mediatek/clk-mt8186-mcu.c
@@ -60,7 +60,7 @@ static struct platform_driver clk_mt8186_mcu_drv = {
.of_match_table = of_match_clk_mt8186_mcu,
},
.probe = mtk_clk_simple_probe,
- .remove_new = mtk_clk_simple_remove,
+ .remove = mtk_clk_simple_remove,
};
module_platform_driver(clk_mt8186_mcu_drv);
diff --git a/drivers/clk/mediatek/clk-mt8186-mdp.c b/drivers/clk/mediatek/clk-mt8186-mdp.c
index fb47d6bacf7f..01561cf902c4 100644
--- a/drivers/clk/mediatek/clk-mt8186-mdp.c
+++ b/drivers/clk/mediatek/clk-mt8186-mdp.c
@@ -72,7 +72,7 @@ MODULE_DEVICE_TABLE(of, of_match_clk_mt8186_mdp);
static struct platform_driver clk_mt8186_mdp_drv = {
.probe = mtk_clk_simple_probe,
- .remove_new = mtk_clk_simple_remove,
+ .remove = mtk_clk_simple_remove,
.driver = {
.name = "clk-mt8186-mdp",
.of_match_table = of_match_clk_mt8186_mdp,
diff --git a/drivers/clk/mediatek/clk-mt8186-mfg.c b/drivers/clk/mediatek/clk-mt8186-mfg.c
index 64cdee1fddd4..3f21b1f222e1 100644
--- a/drivers/clk/mediatek/clk-mt8186-mfg.c
+++ b/drivers/clk/mediatek/clk-mt8186-mfg.c
@@ -41,7 +41,7 @@ MODULE_DEVICE_TABLE(of, of_match_clk_mt8186_mfg);
static struct platform_driver clk_mt8186_mfg_drv = {
.probe = mtk_clk_simple_probe,
- .remove_new = mtk_clk_simple_remove,
+ .remove = mtk_clk_simple_remove,
.driver = {
.name = "clk-mt8186-mfg",
.of_match_table = of_match_clk_mt8186_mfg,
diff --git a/drivers/clk/mediatek/clk-mt8186-mm.c b/drivers/clk/mediatek/clk-mt8186-mm.c
index 403566187e64..fc8488c44866 100644
--- a/drivers/clk/mediatek/clk-mt8186-mm.c
+++ b/drivers/clk/mediatek/clk-mt8186-mm.c
@@ -71,7 +71,7 @@ MODULE_DEVICE_TABLE(platform, clk_mt8186_mm_id_table);
static struct platform_driver clk_mt8186_mm_drv = {
.probe = mtk_clk_pdev_probe,
- .remove_new = mtk_clk_pdev_remove,
+ .remove = mtk_clk_pdev_remove,
.driver = {
.name = "clk-mt8186-mm",
},
diff --git a/drivers/clk/mediatek/clk-mt8186-topckgen.c b/drivers/clk/mediatek/clk-mt8186-topckgen.c
index eb9f51e77ca8..14f1cbdbbd13 100644
--- a/drivers/clk/mediatek/clk-mt8186-topckgen.c
+++ b/drivers/clk/mediatek/clk-mt8186-topckgen.c
@@ -725,7 +725,7 @@ MODULE_DEVICE_TABLE(of, of_match_clk_mt8186_topck);
static struct platform_driver clk_mt8186_topck_drv = {
.probe = mtk_clk_simple_probe,
- .remove_new = mtk_clk_simple_remove,
+ .remove = mtk_clk_simple_remove,
.driver = {
.name = "clk-mt8186-topck",
.of_match_table = of_match_clk_mt8186_topck,
diff --git a/drivers/clk/mediatek/clk-mt8186-vdec.c b/drivers/clk/mediatek/clk-mt8186-vdec.c
index 25465704ddfb..522b8c952969 100644
--- a/drivers/clk/mediatek/clk-mt8186-vdec.c
+++ b/drivers/clk/mediatek/clk-mt8186-vdec.c
@@ -80,7 +80,7 @@ MODULE_DEVICE_TABLE(of, of_match_clk_mt8186_vdec);
static struct platform_driver clk_mt8186_vdec_drv = {
.probe = mtk_clk_simple_probe,
- .remove_new = mtk_clk_simple_remove,
+ .remove = mtk_clk_simple_remove,
.driver = {
.name = "clk-mt8186-vdec",
.of_match_table = of_match_clk_mt8186_vdec,
diff --git a/drivers/clk/mediatek/clk-mt8186-venc.c b/drivers/clk/mediatek/clk-mt8186-venc.c
index 647dd66a3ce0..c0c98bc75112 100644
--- a/drivers/clk/mediatek/clk-mt8186-venc.c
+++ b/drivers/clk/mediatek/clk-mt8186-venc.c
@@ -43,7 +43,7 @@ MODULE_DEVICE_TABLE(of, of_match_clk_mt8186_venc);
static struct platform_driver clk_mt8186_venc_drv = {
.probe = mtk_clk_simple_probe,
- .remove_new = mtk_clk_simple_remove,
+ .remove = mtk_clk_simple_remove,
.driver = {
.name = "clk-mt8186-venc",
.of_match_table = of_match_clk_mt8186_venc,
diff --git a/drivers/clk/mediatek/clk-mt8186-wpe.c b/drivers/clk/mediatek/clk-mt8186-wpe.c
index 47f96e088361..babd7b2778c2 100644
--- a/drivers/clk/mediatek/clk-mt8186-wpe.c
+++ b/drivers/clk/mediatek/clk-mt8186-wpe.c
@@ -43,7 +43,7 @@ MODULE_DEVICE_TABLE(of, of_match_clk_mt8186_wpe);
static struct platform_driver clk_mt8186_wpe_drv = {
.probe = mtk_clk_simple_probe,
- .remove_new = mtk_clk_simple_remove,
+ .remove = mtk_clk_simple_remove,
.driver = {
.name = "clk-mt8186-wpe",
.of_match_table = of_match_clk_mt8186_wpe,
diff --git a/drivers/clk/mediatek/clk-mt8188-adsp_audio26m.c b/drivers/clk/mediatek/clk-mt8188-adsp_audio26m.c
index 5ac035bbe684..dcde2187d24a 100644
--- a/drivers/clk/mediatek/clk-mt8188-adsp_audio26m.c
+++ b/drivers/clk/mediatek/clk-mt8188-adsp_audio26m.c
@@ -40,7 +40,7 @@ MODULE_DEVICE_TABLE(of, of_match_clk_mt8188_adsp_audio26m);
static struct platform_driver clk_mt8188_adsp_audio26m_drv = {
.probe = mtk_clk_simple_probe,
- .remove_new = mtk_clk_simple_remove,
+ .remove = mtk_clk_simple_remove,
.driver = {
.name = "clk-mt8188-adsp_audio26m",
.of_match_table = of_match_clk_mt8188_adsp_audio26m,
diff --git a/drivers/clk/mediatek/clk-mt8188-apmixedsys.c b/drivers/clk/mediatek/clk-mt8188-apmixedsys.c
index 85d573d96081..21d7a9a2ab1a 100644
--- a/drivers/clk/mediatek/clk-mt8188-apmixedsys.c
+++ b/drivers/clk/mediatek/clk-mt8188-apmixedsys.c
@@ -145,7 +145,7 @@ static void clk_mt8188_apmixed_remove(struct platform_device *pdev)
static struct platform_driver clk_mt8188_apmixed_drv = {
.probe = clk_mt8188_apmixed_probe,
- .remove_new = clk_mt8188_apmixed_remove,
+ .remove = clk_mt8188_apmixed_remove,
.driver = {
.name = "clk-mt8188-apmixed",
.of_match_table = of_match_clk_mt8188_apmixed,
diff --git a/drivers/clk/mediatek/clk-mt8188-cam.c b/drivers/clk/mediatek/clk-mt8188-cam.c
index a6a6581f0461..7500bd25387f 100644
--- a/drivers/clk/mediatek/clk-mt8188-cam.c
+++ b/drivers/clk/mediatek/clk-mt8188-cam.c
@@ -109,7 +109,7 @@ MODULE_DEVICE_TABLE(of, of_match_clk_mt8188_cam);
static struct platform_driver clk_mt8188_cam_drv = {
.probe = mtk_clk_simple_probe,
- .remove_new = mtk_clk_simple_remove,
+ .remove = mtk_clk_simple_remove,
.driver = {
.name = "clk-mt8188-cam",
.of_match_table = of_match_clk_mt8188_cam,
diff --git a/drivers/clk/mediatek/clk-mt8188-ccu.c b/drivers/clk/mediatek/clk-mt8188-ccu.c
index 9532fc652f01..1566fc437ea3 100644
--- a/drivers/clk/mediatek/clk-mt8188-ccu.c
+++ b/drivers/clk/mediatek/clk-mt8188-ccu.c
@@ -39,7 +39,7 @@ MODULE_DEVICE_TABLE(of, of_match_clk_mt8188_ccu);
static struct platform_driver clk_mt8188_ccu_drv = {
.probe = mtk_clk_simple_probe,
- .remove_new = mtk_clk_simple_remove,
+ .remove = mtk_clk_simple_remove,
.driver = {
.name = "clk-mt8188-ccu",
.of_match_table = of_match_clk_mt8188_ccu,
diff --git a/drivers/clk/mediatek/clk-mt8188-img.c b/drivers/clk/mediatek/clk-mt8188-img.c
index 00ad6d7884ae..cb2fbd4136b9 100644
--- a/drivers/clk/mediatek/clk-mt8188-img.c
+++ b/drivers/clk/mediatek/clk-mt8188-img.c
@@ -101,7 +101,7 @@ MODULE_DEVICE_TABLE(of, of_match_clk_mt8188_imgsys_main);
static struct platform_driver clk_mt8188_imgsys_main_drv = {
.probe = mtk_clk_simple_probe,
- .remove_new = mtk_clk_simple_remove,
+ .remove = mtk_clk_simple_remove,
.driver = {
.name = "clk-mt8188-imgsys_main",
.of_match_table = of_match_clk_mt8188_imgsys_main,
diff --git a/drivers/clk/mediatek/clk-mt8188-imp_iic_wrap.c b/drivers/clk/mediatek/clk-mt8188-imp_iic_wrap.c
index 7b713f4cd662..14a4b575b583 100644
--- a/drivers/clk/mediatek/clk-mt8188-imp_iic_wrap.c
+++ b/drivers/clk/mediatek/clk-mt8188-imp_iic_wrap.c
@@ -71,7 +71,7 @@ MODULE_DEVICE_TABLE(of, of_match_clk_mt8188_imp_iic_wrap);
static struct platform_driver clk_mt8188_imp_iic_wrap_drv = {
.probe = mtk_clk_simple_probe,
- .remove_new = mtk_clk_simple_remove,
+ .remove = mtk_clk_simple_remove,
.driver = {
.name = "clk-mt8188-imp_iic_wrap",
.of_match_table = of_match_clk_mt8188_imp_iic_wrap,
diff --git a/drivers/clk/mediatek/clk-mt8188-infra_ao.c b/drivers/clk/mediatek/clk-mt8188-infra_ao.c
index face3e191464..b9bc8fcc2ade 100644
--- a/drivers/clk/mediatek/clk-mt8188-infra_ao.c
+++ b/drivers/clk/mediatek/clk-mt8188-infra_ao.c
@@ -213,7 +213,7 @@ MODULE_DEVICE_TABLE(of, of_match_clk_mt8188_infra_ao);
static struct platform_driver clk_mt8188_infra_ao_drv = {
.probe = mtk_clk_simple_probe,
- .remove_new = mtk_clk_simple_remove,
+ .remove = mtk_clk_simple_remove,
.driver = {
.name = "clk-mt8188-infra_ao",
.of_match_table = of_match_clk_mt8188_infra_ao,
diff --git a/drivers/clk/mediatek/clk-mt8188-ipe.c b/drivers/clk/mediatek/clk-mt8188-ipe.c
index fa439af34359..8f1933b71e28 100644
--- a/drivers/clk/mediatek/clk-mt8188-ipe.c
+++ b/drivers/clk/mediatek/clk-mt8188-ipe.c
@@ -41,7 +41,7 @@ MODULE_DEVICE_TABLE(of, of_match_clk_mt8188_ipe);
static struct platform_driver clk_mt8188_ipe_drv = {
.probe = mtk_clk_simple_probe,
- .remove_new = mtk_clk_simple_remove,
+ .remove = mtk_clk_simple_remove,
.driver = {
.name = "clk-mt8188-ipe",
.of_match_table = of_match_clk_mt8188_ipe,
diff --git a/drivers/clk/mediatek/clk-mt8188-mfg.c b/drivers/clk/mediatek/clk-mt8188-mfg.c
index ec562e7d459d..2ddfb1a3de47 100644
--- a/drivers/clk/mediatek/clk-mt8188-mfg.c
+++ b/drivers/clk/mediatek/clk-mt8188-mfg.c
@@ -38,7 +38,7 @@ MODULE_DEVICE_TABLE(of, of_match_clk_mt8188_mfgcfg);
static struct platform_driver clk_mt8188_mfgcfg_drv = {
.probe = mtk_clk_simple_probe,
- .remove_new = mtk_clk_simple_remove,
+ .remove = mtk_clk_simple_remove,
.driver = {
.name = "clk-mt8188-mfgcfg",
.of_match_table = of_match_clk_mt8188_mfgcfg,
diff --git a/drivers/clk/mediatek/clk-mt8188-peri_ao.c b/drivers/clk/mediatek/clk-mt8188-peri_ao.c
index e4339885b062..639865335fc8 100644
--- a/drivers/clk/mediatek/clk-mt8188-peri_ao.c
+++ b/drivers/clk/mediatek/clk-mt8188-peri_ao.c
@@ -49,7 +49,7 @@ MODULE_DEVICE_TABLE(of, of_match_clk_mt8188_peri_ao);
static struct platform_driver clk_mt8188_peri_ao_drv = {
.probe = mtk_clk_simple_probe,
- .remove_new = mtk_clk_simple_remove,
+ .remove = mtk_clk_simple_remove,
.driver = {
.name = "clk-mt8188-peri_ao",
.of_match_table = of_match_clk_mt8188_peri_ao,
diff --git a/drivers/clk/mediatek/clk-mt8188-topckgen.c b/drivers/clk/mediatek/clk-mt8188-topckgen.c
index 2ccc8a1c98f9..c4baf4076ed6 100644
--- a/drivers/clk/mediatek/clk-mt8188-topckgen.c
+++ b/drivers/clk/mediatek/clk-mt8188-topckgen.c
@@ -1347,7 +1347,7 @@ static void clk_mt8188_topck_remove(struct platform_device *pdev)
static struct platform_driver clk_mt8188_topck_drv = {
.probe = clk_mt8188_topck_probe,
- .remove_new = clk_mt8188_topck_remove,
+ .remove = clk_mt8188_topck_remove,
.driver = {
.name = "clk-mt8188-topck",
.of_match_table = of_match_clk_mt8188_topck,
diff --git a/drivers/clk/mediatek/clk-mt8188-vdec.c b/drivers/clk/mediatek/clk-mt8188-vdec.c
index bf388997c3f8..f48f0716d7c2 100644
--- a/drivers/clk/mediatek/clk-mt8188-vdec.c
+++ b/drivers/clk/mediatek/clk-mt8188-vdec.c
@@ -81,7 +81,7 @@ MODULE_DEVICE_TABLE(of, of_match_clk_mt8188_vdec);
static struct platform_driver clk_mt8188_vdec_drv = {
.probe = mtk_clk_simple_probe,
- .remove_new = mtk_clk_simple_remove,
+ .remove = mtk_clk_simple_remove,
.driver = {
.name = "clk-mt8188-vdec",
.of_match_table = of_match_clk_mt8188_vdec,
diff --git a/drivers/clk/mediatek/clk-mt8188-vdo0.c b/drivers/clk/mediatek/clk-mt8188-vdo0.c
index 935371fbf1d2..017d6662589b 100644
--- a/drivers/clk/mediatek/clk-mt8188-vdo0.c
+++ b/drivers/clk/mediatek/clk-mt8188-vdo0.c
@@ -97,7 +97,7 @@ MODULE_DEVICE_TABLE(platform, clk_mt8188_vdo0_id_table);
static struct platform_driver clk_mt8188_vdo0_drv = {
.probe = mtk_clk_pdev_probe,
- .remove_new = mtk_clk_pdev_remove,
+ .remove = mtk_clk_pdev_remove,
.driver = {
.name = "clk-mt8188-vdo0",
},
diff --git a/drivers/clk/mediatek/clk-mt8188-vdo1.c b/drivers/clk/mediatek/clk-mt8188-vdo1.c
index fb24c9026fd8..4fa355f8f0c2 100644
--- a/drivers/clk/mediatek/clk-mt8188-vdo1.c
+++ b/drivers/clk/mediatek/clk-mt8188-vdo1.c
@@ -144,7 +144,7 @@ MODULE_DEVICE_TABLE(platform, clk_mt8188_vdo1_id_table);
static struct platform_driver clk_mt8188_vdo1_drv = {
.probe = mtk_clk_pdev_probe,
- .remove_new = mtk_clk_pdev_remove,
+ .remove = mtk_clk_pdev_remove,
.driver = {
.name = "clk-mt8188-vdo1",
},
diff --git a/drivers/clk/mediatek/clk-mt8188-venc.c b/drivers/clk/mediatek/clk-mt8188-venc.c
index 4df8d4e05159..01e971545506 100644
--- a/drivers/clk/mediatek/clk-mt8188-venc.c
+++ b/drivers/clk/mediatek/clk-mt8188-venc.c
@@ -45,7 +45,7 @@ MODULE_DEVICE_TABLE(of, of_match_clk_mt8188_venc1);
static struct platform_driver clk_mt8188_venc1_drv = {
.probe = mtk_clk_simple_probe,
- .remove_new = mtk_clk_simple_remove,
+ .remove = mtk_clk_simple_remove,
.driver = {
.name = "clk-mt8188-venc1",
.of_match_table = of_match_clk_mt8188_venc1,
diff --git a/drivers/clk/mediatek/clk-mt8188-vpp0.c b/drivers/clk/mediatek/clk-mt8188-vpp0.c
index 310792108793..cd2579b7b9c3 100644
--- a/drivers/clk/mediatek/clk-mt8188-vpp0.c
+++ b/drivers/clk/mediatek/clk-mt8188-vpp0.c
@@ -104,7 +104,7 @@ MODULE_DEVICE_TABLE(platform, clk_mt8188_vpp0_id_table);
static struct platform_driver clk_mt8188_vpp0_drv = {
.probe = mtk_clk_pdev_probe,
- .remove_new = mtk_clk_pdev_remove,
+ .remove = mtk_clk_pdev_remove,
.driver = {
.name = "clk-mt8188-vpp0",
},
diff --git a/drivers/clk/mediatek/clk-mt8188-vpp1.c b/drivers/clk/mediatek/clk-mt8188-vpp1.c
index 0aa10aaa0292..0e1bd8306e8a 100644
--- a/drivers/clk/mediatek/clk-mt8188-vpp1.c
+++ b/drivers/clk/mediatek/clk-mt8188-vpp1.c
@@ -99,7 +99,7 @@ MODULE_DEVICE_TABLE(platform, clk_mt8188_vpp1_id_table);
static struct platform_driver clk_mt8188_vpp1_drv = {
.probe = mtk_clk_pdev_probe,
- .remove_new = mtk_clk_pdev_remove,
+ .remove = mtk_clk_pdev_remove,
.driver = {
.name = "clk-mt8188-vpp1",
},
diff --git a/drivers/clk/mediatek/clk-mt8188-wpe.c b/drivers/clk/mediatek/clk-mt8188-wpe.c
index fbac440363cc..d709bb1ee1d6 100644
--- a/drivers/clk/mediatek/clk-mt8188-wpe.c
+++ b/drivers/clk/mediatek/clk-mt8188-wpe.c
@@ -94,7 +94,7 @@ MODULE_DEVICE_TABLE(of, of_match_clk_mt8188_wpe);
static struct platform_driver clk_mt8188_wpe_drv = {
.probe = mtk_clk_simple_probe,
- .remove_new = mtk_clk_simple_remove,
+ .remove = mtk_clk_simple_remove,
.driver = {
.name = "clk-mt8188-wpe",
.of_match_table = of_match_clk_mt8188_wpe,
diff --git a/drivers/clk/mediatek/clk-mt8192-apmixedsys.c b/drivers/clk/mediatek/clk-mt8192-apmixedsys.c
index 3590932acc63..0b66a27e4d5a 100644
--- a/drivers/clk/mediatek/clk-mt8192-apmixedsys.c
+++ b/drivers/clk/mediatek/clk-mt8192-apmixedsys.c
@@ -206,7 +206,7 @@ static struct platform_driver clk_mt8192_apmixed_drv = {
.of_match_table = of_match_clk_mt8192_apmixed,
},
.probe = clk_mt8192_apmixed_probe,
- .remove_new = clk_mt8192_apmixed_remove,
+ .remove = clk_mt8192_apmixed_remove,
};
module_platform_driver(clk_mt8192_apmixed_drv);
MODULE_DESCRIPTION("MediaTek MT8192 apmixed clocks driver");
diff --git a/drivers/clk/mediatek/clk-mt8192-aud.c b/drivers/clk/mediatek/clk-mt8192-aud.c
index b438ebad998d..f3ebf8713fbb 100644
--- a/drivers/clk/mediatek/clk-mt8192-aud.c
+++ b/drivers/clk/mediatek/clk-mt8192-aud.c
@@ -111,7 +111,7 @@ MODULE_DEVICE_TABLE(of, of_match_clk_mt8192_aud);
static struct platform_driver clk_mt8192_aud_drv = {
.probe = clk_mt8192_aud_probe,
- .remove_new = clk_mt8192_aud_remove,
+ .remove = clk_mt8192_aud_remove,
.driver = {
.name = "clk-mt8192-aud",
.of_match_table = of_match_clk_mt8192_aud,
diff --git a/drivers/clk/mediatek/clk-mt8192-cam.c b/drivers/clk/mediatek/clk-mt8192-cam.c
index 3eed4a7b6d8e..891d2f88d9cf 100644
--- a/drivers/clk/mediatek/clk-mt8192-cam.c
+++ b/drivers/clk/mediatek/clk-mt8192-cam.c
@@ -99,7 +99,7 @@ MODULE_DEVICE_TABLE(of, of_match_clk_mt8192_cam);
static struct platform_driver clk_mt8192_cam_drv = {
.probe = mtk_clk_simple_probe,
- .remove_new = mtk_clk_simple_remove,
+ .remove = mtk_clk_simple_remove,
.driver = {
.name = "clk-mt8192-cam",
.of_match_table = of_match_clk_mt8192_cam,
diff --git a/drivers/clk/mediatek/clk-mt8192-img.c b/drivers/clk/mediatek/clk-mt8192-img.c
index 13a435332752..c08e831125a5 100644
--- a/drivers/clk/mediatek/clk-mt8192-img.c
+++ b/drivers/clk/mediatek/clk-mt8192-img.c
@@ -62,7 +62,7 @@ MODULE_DEVICE_TABLE(of, of_match_clk_mt8192_img);
static struct platform_driver clk_mt8192_img_drv = {
.probe = mtk_clk_simple_probe,
- .remove_new = mtk_clk_simple_remove,
+ .remove = mtk_clk_simple_remove,
.driver = {
.name = "clk-mt8192-img",
.of_match_table = of_match_clk_mt8192_img,
diff --git a/drivers/clk/mediatek/clk-mt8192-imp_iic_wrap.c b/drivers/clk/mediatek/clk-mt8192-imp_iic_wrap.c
index 45585f2edd50..0f9530d9263c 100644
--- a/drivers/clk/mediatek/clk-mt8192-imp_iic_wrap.c
+++ b/drivers/clk/mediatek/clk-mt8192-imp_iic_wrap.c
@@ -111,7 +111,7 @@ MODULE_DEVICE_TABLE(of, of_match_clk_mt8192_imp_iic_wrap);
static struct platform_driver clk_mt8192_imp_iic_wrap_drv = {
.probe = mtk_clk_simple_probe,
- .remove_new = mtk_clk_simple_remove,
+ .remove = mtk_clk_simple_remove,
.driver = {
.name = "clk-mt8192-imp_iic_wrap",
.of_match_table = of_match_clk_mt8192_imp_iic_wrap,
diff --git a/drivers/clk/mediatek/clk-mt8192-ipe.c b/drivers/clk/mediatek/clk-mt8192-ipe.c
index da2e2d83cd25..c932b8b20edc 100644
--- a/drivers/clk/mediatek/clk-mt8192-ipe.c
+++ b/drivers/clk/mediatek/clk-mt8192-ipe.c
@@ -49,7 +49,7 @@ MODULE_DEVICE_TABLE(of, of_match_clk_mt8192_ipe);
static struct platform_driver clk_mt8192_ipe_drv = {
.probe = mtk_clk_simple_probe,
- .remove_new = mtk_clk_simple_remove,
+ .remove = mtk_clk_simple_remove,
.driver = {
.name = "clk-mt8192-ipe",
.of_match_table = of_match_clk_mt8192_ipe,
diff --git a/drivers/clk/mediatek/clk-mt8192-mdp.c b/drivers/clk/mediatek/clk-mt8192-mdp.c
index be674d6c31d7..30334ebca864 100644
--- a/drivers/clk/mediatek/clk-mt8192-mdp.c
+++ b/drivers/clk/mediatek/clk-mt8192-mdp.c
@@ -74,7 +74,7 @@ MODULE_DEVICE_TABLE(of, of_match_clk_mt8192_mdp);
static struct platform_driver clk_mt8192_mdp_drv = {
.probe = mtk_clk_simple_probe,
- .remove_new = mtk_clk_simple_remove,
+ .remove = mtk_clk_simple_remove,
.driver = {
.name = "clk-mt8192-mdp",
.of_match_table = of_match_clk_mt8192_mdp,
diff --git a/drivers/clk/mediatek/clk-mt8192-mfg.c b/drivers/clk/mediatek/clk-mt8192-mfg.c
index 2da969f4ca6b..9d176659e8a2 100644
--- a/drivers/clk/mediatek/clk-mt8192-mfg.c
+++ b/drivers/clk/mediatek/clk-mt8192-mfg.c
@@ -44,7 +44,7 @@ MODULE_DEVICE_TABLE(of, of_match_clk_mt8192_mfg);
static struct platform_driver clk_mt8192_mfg_drv = {
.probe = mtk_clk_simple_probe,
- .remove_new = mtk_clk_simple_remove,
+ .remove = mtk_clk_simple_remove,
.driver = {
.name = "clk-mt8192-mfg",
.of_match_table = of_match_clk_mt8192_mfg,
diff --git a/drivers/clk/mediatek/clk-mt8192-mm.c b/drivers/clk/mediatek/clk-mt8192-mm.c
index 2b9c1c4524c2..bda4406e1304 100644
--- a/drivers/clk/mediatek/clk-mt8192-mm.c
+++ b/drivers/clk/mediatek/clk-mt8192-mm.c
@@ -93,7 +93,7 @@ MODULE_DEVICE_TABLE(platform, clk_mt8192_mm_id_table);
static struct platform_driver clk_mt8192_mm_drv = {
.probe = mtk_clk_pdev_probe,
- .remove_new = mtk_clk_pdev_remove,
+ .remove = mtk_clk_pdev_remove,
.driver = {
.name = "clk-mt8192-mm",
},
diff --git a/drivers/clk/mediatek/clk-mt8192-msdc.c b/drivers/clk/mediatek/clk-mt8192-msdc.c
index bc5ce987b76c..04a66220f269 100644
--- a/drivers/clk/mediatek/clk-mt8192-msdc.c
+++ b/drivers/clk/mediatek/clk-mt8192-msdc.c
@@ -56,7 +56,7 @@ MODULE_DEVICE_TABLE(of, of_match_clk_mt8192_msdc);
static struct platform_driver clk_mt8192_msdc_drv = {
.probe = mtk_clk_simple_probe,
- .remove_new = mtk_clk_simple_remove,
+ .remove = mtk_clk_simple_remove,
.driver = {
.name = "clk-mt8192-msdc",
.of_match_table = of_match_clk_mt8192_msdc,
diff --git a/drivers/clk/mediatek/clk-mt8192-scp_adsp.c b/drivers/clk/mediatek/clk-mt8192-scp_adsp.c
index e017d30a8832..f9e4c16573e2 100644
--- a/drivers/clk/mediatek/clk-mt8192-scp_adsp.c
+++ b/drivers/clk/mediatek/clk-mt8192-scp_adsp.c
@@ -42,7 +42,7 @@ MODULE_DEVICE_TABLE(of, of_match_clk_mt8192_scp_adsp);
static struct platform_driver clk_mt8192_scp_adsp_drv = {
.probe = mtk_clk_simple_probe,
- .remove_new = mtk_clk_simple_remove,
+ .remove = mtk_clk_simple_remove,
.driver = {
.name = "clk-mt8192-scp_adsp",
.of_match_table = of_match_clk_mt8192_scp_adsp,
diff --git a/drivers/clk/mediatek/clk-mt8192-vdec.c b/drivers/clk/mediatek/clk-mt8192-vdec.c
index fcb34b1dcdab..9c10161807b2 100644
--- a/drivers/clk/mediatek/clk-mt8192-vdec.c
+++ b/drivers/clk/mediatek/clk-mt8192-vdec.c
@@ -86,7 +86,7 @@ MODULE_DEVICE_TABLE(of, of_match_clk_mt8192_vdec);
static struct platform_driver clk_mt8192_vdec_drv = {
.probe = mtk_clk_simple_probe,
- .remove_new = mtk_clk_simple_remove,
+ .remove = mtk_clk_simple_remove,
.driver = {
.name = "clk-mt8192-vdec",
.of_match_table = of_match_clk_mt8192_vdec,
diff --git a/drivers/clk/mediatek/clk-mt8192-venc.c b/drivers/clk/mediatek/clk-mt8192-venc.c
index 98d58a9397cd..0b01e2b7f036 100644
--- a/drivers/clk/mediatek/clk-mt8192-venc.c
+++ b/drivers/clk/mediatek/clk-mt8192-venc.c
@@ -45,7 +45,7 @@ MODULE_DEVICE_TABLE(of, of_match_clk_mt8192_venc);
static struct platform_driver clk_mt8192_venc_drv = {
.probe = mtk_clk_simple_probe,
- .remove_new = mtk_clk_simple_remove,
+ .remove = mtk_clk_simple_remove,
.driver = {
.name = "clk-mt8192-venc",
.of_match_table = of_match_clk_mt8192_venc,
diff --git a/drivers/clk/mediatek/clk-mt8192.c b/drivers/clk/mediatek/clk-mt8192.c
index bce2298ebc8d..50b43807c60c 100644
--- a/drivers/clk/mediatek/clk-mt8192.c
+++ b/drivers/clk/mediatek/clk-mt8192.c
@@ -1026,7 +1026,7 @@ static struct platform_driver clk_mt8192_drv = {
.of_match_table = of_match_clk_mt8192,
},
.probe = mtk_clk_simple_probe,
- .remove_new = mtk_clk_simple_remove,
+ .remove = mtk_clk_simple_remove,
};
module_platform_driver(clk_mt8192_drv);
diff --git a/drivers/clk/mediatek/clk-mt8195-apmixedsys.c b/drivers/clk/mediatek/clk-mt8195-apmixedsys.c
index 049ae8123e34..282a3137dc89 100644
--- a/drivers/clk/mediatek/clk-mt8195-apmixedsys.c
+++ b/drivers/clk/mediatek/clk-mt8195-apmixedsys.c
@@ -223,7 +223,7 @@ static void clk_mt8195_apmixed_remove(struct platform_device *pdev)
static struct platform_driver clk_mt8195_apmixed_drv = {
.probe = clk_mt8195_apmixed_probe,
- .remove_new = clk_mt8195_apmixed_remove,
+ .remove = clk_mt8195_apmixed_remove,
.driver = {
.name = "clk-mt8195-apmixed",
.of_match_table = of_match_clk_mt8195_apmixed,
diff --git a/drivers/clk/mediatek/clk-mt8195-apusys_pll.c b/drivers/clk/mediatek/clk-mt8195-apusys_pll.c
index b1b562e44cb4..8b45a3fad02f 100644
--- a/drivers/clk/mediatek/clk-mt8195-apusys_pll.c
+++ b/drivers/clk/mediatek/clk-mt8195-apusys_pll.c
@@ -103,7 +103,7 @@ MODULE_DEVICE_TABLE(of, of_match_clk_mt8195_apusys_pll);
static struct platform_driver clk_mt8195_apusys_pll_drv = {
.probe = clk_mt8195_apusys_pll_probe,
- .remove_new = clk_mt8195_apusys_pll_remove,
+ .remove = clk_mt8195_apusys_pll_remove,
.driver = {
.name = "clk-mt8195-apusys_pll",
.of_match_table = of_match_clk_mt8195_apusys_pll,
diff --git a/drivers/clk/mediatek/clk-mt8195-cam.c b/drivers/clk/mediatek/clk-mt8195-cam.c
index 7c8f77817616..02cb20c2948b 100644
--- a/drivers/clk/mediatek/clk-mt8195-cam.c
+++ b/drivers/clk/mediatek/clk-mt8195-cam.c
@@ -135,7 +135,7 @@ MODULE_DEVICE_TABLE(of, of_match_clk_mt8195_cam);
static struct platform_driver clk_mt8195_cam_drv = {
.probe = mtk_clk_simple_probe,
- .remove_new = mtk_clk_simple_remove,
+ .remove = mtk_clk_simple_remove,
.driver = {
.name = "clk-mt8195-cam",
.of_match_table = of_match_clk_mt8195_cam,
diff --git a/drivers/clk/mediatek/clk-mt8195-ccu.c b/drivers/clk/mediatek/clk-mt8195-ccu.c
index f78afd7b6ade..22cd1cb070f1 100644
--- a/drivers/clk/mediatek/clk-mt8195-ccu.c
+++ b/drivers/clk/mediatek/clk-mt8195-ccu.c
@@ -43,7 +43,7 @@ MODULE_DEVICE_TABLE(of, of_match_clk_mt8195_ccu);
static struct platform_driver clk_mt8195_ccu_drv = {
.probe = mtk_clk_simple_probe,
- .remove_new = mtk_clk_simple_remove,
+ .remove = mtk_clk_simple_remove,
.driver = {
.name = "clk-mt8195-ccu",
.of_match_table = of_match_clk_mt8195_ccu,
diff --git a/drivers/clk/mediatek/clk-mt8195-img.c b/drivers/clk/mediatek/clk-mt8195-img.c
index a59c082ef522..11beba4b2ac2 100644
--- a/drivers/clk/mediatek/clk-mt8195-img.c
+++ b/drivers/clk/mediatek/clk-mt8195-img.c
@@ -89,7 +89,7 @@ MODULE_DEVICE_TABLE(of, of_match_clk_mt8195_img);
static struct platform_driver clk_mt8195_img_drv = {
.probe = mtk_clk_simple_probe,
- .remove_new = mtk_clk_simple_remove,
+ .remove = mtk_clk_simple_remove,
.driver = {
.name = "clk-mt8195-img",
.of_match_table = of_match_clk_mt8195_img,
diff --git a/drivers/clk/mediatek/clk-mt8195-imp_iic_wrap.c b/drivers/clk/mediatek/clk-mt8195-imp_iic_wrap.c
index 54557f1b0681..8711b18b1576 100644
--- a/drivers/clk/mediatek/clk-mt8195-imp_iic_wrap.c
+++ b/drivers/clk/mediatek/clk-mt8195-imp_iic_wrap.c
@@ -59,7 +59,7 @@ MODULE_DEVICE_TABLE(of, of_match_clk_mt8195_imp_iic_wrap);
static struct platform_driver clk_mt8195_imp_iic_wrap_drv = {
.probe = mtk_clk_simple_probe,
- .remove_new = mtk_clk_simple_remove,
+ .remove = mtk_clk_simple_remove,
.driver = {
.name = "clk-mt8195-imp_iic_wrap",
.of_match_table = of_match_clk_mt8195_imp_iic_wrap,
diff --git a/drivers/clk/mediatek/clk-mt8195-infra_ao.c b/drivers/clk/mediatek/clk-mt8195-infra_ao.c
index 165fe92c6f61..bb648a88e43a 100644
--- a/drivers/clk/mediatek/clk-mt8195-infra_ao.c
+++ b/drivers/clk/mediatek/clk-mt8195-infra_ao.c
@@ -233,7 +233,7 @@ MODULE_DEVICE_TABLE(of, of_match_clk_mt8195_infra_ao);
static struct platform_driver clk_mt8195_infra_ao_drv = {
.probe = mtk_clk_simple_probe,
- .remove_new = mtk_clk_simple_remove,
+ .remove = mtk_clk_simple_remove,
.driver = {
.name = "clk-mt8195-infra_ao",
.of_match_table = of_match_clk_mt8195_infra_ao,
diff --git a/drivers/clk/mediatek/clk-mt8195-ipe.c b/drivers/clk/mediatek/clk-mt8195-ipe.c
index 38a23d88370b..b1af00348a86 100644
--- a/drivers/clk/mediatek/clk-mt8195-ipe.c
+++ b/drivers/clk/mediatek/clk-mt8195-ipe.c
@@ -44,7 +44,7 @@ MODULE_DEVICE_TABLE(of, of_match_clk_mt8195_ipe);
static struct platform_driver clk_mt8195_ipe_drv = {
.probe = mtk_clk_simple_probe,
- .remove_new = mtk_clk_simple_remove,
+ .remove = mtk_clk_simple_remove,
.driver = {
.name = "clk-mt8195-ipe",
.of_match_table = of_match_clk_mt8195_ipe,
diff --git a/drivers/clk/mediatek/clk-mt8195-mfg.c b/drivers/clk/mediatek/clk-mt8195-mfg.c
index e19968eeb346..07c358db1af9 100644
--- a/drivers/clk/mediatek/clk-mt8195-mfg.c
+++ b/drivers/clk/mediatek/clk-mt8195-mfg.c
@@ -42,7 +42,7 @@ MODULE_DEVICE_TABLE(of, of_match_clk_mt8195_mfg);
static struct platform_driver clk_mt8195_mfg_drv = {
.probe = mtk_clk_simple_probe,
- .remove_new = mtk_clk_simple_remove,
+ .remove = mtk_clk_simple_remove,
.driver = {
.name = "clk-mt8195-mfg",
.of_match_table = of_match_clk_mt8195_mfg,
diff --git a/drivers/clk/mediatek/clk-mt8195-peri_ao.c b/drivers/clk/mediatek/clk-mt8195-peri_ao.c
index fc341030f10b..b743eb60a30b 100644
--- a/drivers/clk/mediatek/clk-mt8195-peri_ao.c
+++ b/drivers/clk/mediatek/clk-mt8195-peri_ao.c
@@ -55,7 +55,7 @@ MODULE_DEVICE_TABLE(of, of_match_clk_mt8195_peri_ao);
static struct platform_driver clk_mt8195_peri_ao_drv = {
.probe = mtk_clk_simple_probe,
- .remove_new = mtk_clk_simple_remove,
+ .remove = mtk_clk_simple_remove,
.driver = {
.name = "clk-mt8195-peri_ao",
.of_match_table = of_match_clk_mt8195_peri_ao,
diff --git a/drivers/clk/mediatek/clk-mt8195-scp_adsp.c b/drivers/clk/mediatek/clk-mt8195-scp_adsp.c
index 1f37bde97d90..bc73fccd0515 100644
--- a/drivers/clk/mediatek/clk-mt8195-scp_adsp.c
+++ b/drivers/clk/mediatek/clk-mt8195-scp_adsp.c
@@ -40,7 +40,7 @@ MODULE_DEVICE_TABLE(of, of_match_clk_mt8195_scp_adsp);
static struct platform_driver clk_mt8195_scp_adsp_drv = {
.probe = mtk_clk_simple_probe,
- .remove_new = mtk_clk_simple_remove,
+ .remove = mtk_clk_simple_remove,
.driver = {
.name = "clk-mt8195-scp_adsp",
.of_match_table = of_match_clk_mt8195_scp_adsp,
diff --git a/drivers/clk/mediatek/clk-mt8195-topckgen.c b/drivers/clk/mediatek/clk-mt8195-topckgen.c
index 704498c40349..b1f44b873354 100644
--- a/drivers/clk/mediatek/clk-mt8195-topckgen.c
+++ b/drivers/clk/mediatek/clk-mt8195-topckgen.c
@@ -1354,7 +1354,7 @@ static void clk_mt8195_topck_remove(struct platform_device *pdev)
static struct platform_driver clk_mt8195_topck_drv = {
.probe = clk_mt8195_topck_probe,
- .remove_new = clk_mt8195_topck_remove,
+ .remove = clk_mt8195_topck_remove,
.driver = {
.name = "clk-mt8195-topck",
.of_match_table = of_match_clk_mt8195_topck,
diff --git a/drivers/clk/mediatek/clk-mt8195-vdec.c b/drivers/clk/mediatek/clk-mt8195-vdec.c
index 9e4cc1a82cbe..0bad706047c9 100644
--- a/drivers/clk/mediatek/clk-mt8195-vdec.c
+++ b/drivers/clk/mediatek/clk-mt8195-vdec.c
@@ -97,7 +97,7 @@ MODULE_DEVICE_TABLE(of, of_match_clk_mt8195_vdec);
static struct platform_driver clk_mt8195_vdec_drv = {
.probe = mtk_clk_simple_probe,
- .remove_new = mtk_clk_simple_remove,
+ .remove = mtk_clk_simple_remove,
.driver = {
.name = "clk-mt8195-vdec",
.of_match_table = of_match_clk_mt8195_vdec,
diff --git a/drivers/clk/mediatek/clk-mt8195-vdo0.c b/drivers/clk/mediatek/clk-mt8195-vdo0.c
index 6e9c3ef19502..581d99f8c254 100644
--- a/drivers/clk/mediatek/clk-mt8195-vdo0.c
+++ b/drivers/clk/mediatek/clk-mt8195-vdo0.c
@@ -106,7 +106,7 @@ MODULE_DEVICE_TABLE(platform, clk_mt8195_vdo0_id_table);
static struct platform_driver clk_mt8195_vdo0_drv = {
.probe = mtk_clk_pdev_probe,
- .remove_new = mtk_clk_pdev_remove,
+ .remove = mtk_clk_pdev_remove,
.driver = {
.name = "clk-mt8195-vdo0",
},
diff --git a/drivers/clk/mediatek/clk-mt8195-vdo1.c b/drivers/clk/mediatek/clk-mt8195-vdo1.c
index 422e5729386c..7f8b1a8967bd 100644
--- a/drivers/clk/mediatek/clk-mt8195-vdo1.c
+++ b/drivers/clk/mediatek/clk-mt8195-vdo1.c
@@ -133,7 +133,7 @@ MODULE_DEVICE_TABLE(platform, clk_mt8195_vdo1_id_table);
static struct platform_driver clk_mt8195_vdo1_drv = {
.probe = mtk_clk_pdev_probe,
- .remove_new = mtk_clk_pdev_remove,
+ .remove = mtk_clk_pdev_remove,
.driver = {
.name = "clk-mt8195-vdo1",
},
diff --git a/drivers/clk/mediatek/clk-mt8195-venc.c b/drivers/clk/mediatek/clk-mt8195-venc.c
index db7a6ce97ed0..3b52ff025d5e 100644
--- a/drivers/clk/mediatek/clk-mt8195-venc.c
+++ b/drivers/clk/mediatek/clk-mt8195-venc.c
@@ -62,7 +62,7 @@ MODULE_DEVICE_TABLE(of, of_match_clk_mt8195_venc);
static struct platform_driver clk_mt8195_venc_drv = {
.probe = mtk_clk_simple_probe,
- .remove_new = mtk_clk_simple_remove,
+ .remove = mtk_clk_simple_remove,
.driver = {
.name = "clk-mt8195-venc",
.of_match_table = of_match_clk_mt8195_venc,
diff --git a/drivers/clk/mediatek/clk-mt8195-vpp0.c b/drivers/clk/mediatek/clk-mt8195-vpp0.c
index 77d9aaf47a25..0e3e1dd7977c 100644
--- a/drivers/clk/mediatek/clk-mt8195-vpp0.c
+++ b/drivers/clk/mediatek/clk-mt8195-vpp0.c
@@ -99,7 +99,7 @@ MODULE_DEVICE_TABLE(platform, clk_mt8195_vpp0_id_table);
static struct platform_driver clk_mt8195_vpp0_drv = {
.probe = mtk_clk_pdev_probe,
- .remove_new = mtk_clk_pdev_remove,
+ .remove = mtk_clk_pdev_remove,
.driver = {
.name = "clk-mt8195-vpp0",
},
diff --git a/drivers/clk/mediatek/clk-mt8195-vpp1.c b/drivers/clk/mediatek/clk-mt8195-vpp1.c
index 18ca8f1d9538..fb7b7aef0bba 100644
--- a/drivers/clk/mediatek/clk-mt8195-vpp1.c
+++ b/drivers/clk/mediatek/clk-mt8195-vpp1.c
@@ -97,7 +97,7 @@ MODULE_DEVICE_TABLE(platform, clk_mt8195_vpp1_id_table);
static struct platform_driver clk_mt8195_vpp1_drv = {
.probe = mtk_clk_pdev_probe,
- .remove_new = mtk_clk_pdev_remove,
+ .remove = mtk_clk_pdev_remove,
.driver = {
.name = "clk-mt8195-vpp1",
},
diff --git a/drivers/clk/mediatek/clk-mt8195-wpe.c b/drivers/clk/mediatek/clk-mt8195-wpe.c
index 9c45a2fed0ce..315b93bbfcdc 100644
--- a/drivers/clk/mediatek/clk-mt8195-wpe.c
+++ b/drivers/clk/mediatek/clk-mt8195-wpe.c
@@ -136,7 +136,7 @@ MODULE_DEVICE_TABLE(of, of_match_clk_mt8195_wpe);
static struct platform_driver clk_mt8195_wpe_drv = {
.probe = mtk_clk_simple_probe,
- .remove_new = mtk_clk_simple_remove,
+ .remove = mtk_clk_simple_remove,
.driver = {
.name = "clk-mt8195-wpe",
.of_match_table = of_match_clk_mt8195_wpe,
diff --git a/drivers/clk/mediatek/clk-mt8365-apu.c b/drivers/clk/mediatek/clk-mt8365-apu.c
index 934060e6d9e9..2583c4704ffa 100644
--- a/drivers/clk/mediatek/clk-mt8365-apu.c
+++ b/drivers/clk/mediatek/clk-mt8365-apu.c
@@ -46,7 +46,7 @@ MODULE_DEVICE_TABLE(of, of_match_clk_mt8365_apu);
static struct platform_driver clk_mt8365_apu_drv = {
.probe = mtk_clk_simple_probe,
- .remove_new = mtk_clk_simple_remove,
+ .remove = mtk_clk_simple_remove,
.driver = {
.name = "clk-mt8365-apu",
.of_match_table = of_match_clk_mt8365_apu,
diff --git a/drivers/clk/mediatek/clk-mt8365-cam.c b/drivers/clk/mediatek/clk-mt8365-cam.c
index c8fe5f5bb06c..89d2bd50263b 100644
--- a/drivers/clk/mediatek/clk-mt8365-cam.c
+++ b/drivers/clk/mediatek/clk-mt8365-cam.c
@@ -48,7 +48,7 @@ MODULE_DEVICE_TABLE(of, of_match_clk_mt8365_cam);
static struct platform_driver clk_mt8365_cam_drv = {
.probe = mtk_clk_simple_probe,
- .remove_new = mtk_clk_simple_remove,
+ .remove = mtk_clk_simple_remove,
.driver = {
.name = "clk-mt8365-cam",
.of_match_table = of_match_clk_mt8365_cam,
diff --git a/drivers/clk/mediatek/clk-mt8365-mfg.c b/drivers/clk/mediatek/clk-mt8365-mfg.c
index 5355f725363d..41bcd389119c 100644
--- a/drivers/clk/mediatek/clk-mt8365-mfg.c
+++ b/drivers/clk/mediatek/clk-mt8365-mfg.c
@@ -54,7 +54,7 @@ MODULE_DEVICE_TABLE(of, of_match_clk_mt8365_mfg);
static struct platform_driver clk_mt8365_mfg_drv = {
.probe = mtk_clk_simple_probe,
- .remove_new = mtk_clk_simple_remove,
+ .remove = mtk_clk_simple_remove,
.driver = {
.name = "clk-mt8365-mfg",
.of_match_table = of_match_clk_mt8365_mfg,
diff --git a/drivers/clk/mediatek/clk-mt8365-mm.c b/drivers/clk/mediatek/clk-mt8365-mm.c
index 8201949bfdae..56fb2a43ecd0 100644
--- a/drivers/clk/mediatek/clk-mt8365-mm.c
+++ b/drivers/clk/mediatek/clk-mt8365-mm.c
@@ -85,7 +85,7 @@ MODULE_DEVICE_TABLE(platform, clk_mt8365_mm_id_table);
static struct platform_driver clk_mt8365_mm_drv = {
.probe = mtk_clk_pdev_probe,
- .remove_new = mtk_clk_pdev_remove,
+ .remove = mtk_clk_pdev_remove,
.driver = {
.name = "clk-mt8365-mm",
},
diff --git a/drivers/clk/mediatek/clk-mt8365-vdec.c b/drivers/clk/mediatek/clk-mt8365-vdec.c
index 1be0b3faa2c3..f5d0518bc2e0 100644
--- a/drivers/clk/mediatek/clk-mt8365-vdec.c
+++ b/drivers/clk/mediatek/clk-mt8365-vdec.c
@@ -54,7 +54,7 @@ MODULE_DEVICE_TABLE(of, of_match_clk_mt8365_vdec);
static struct platform_driver clk_mt8365_vdec_drv = {
.probe = mtk_clk_simple_probe,
- .remove_new = mtk_clk_simple_remove,
+ .remove = mtk_clk_simple_remove,
.driver = {
.name = "clk-mt8365-vdec",
.of_match_table = of_match_clk_mt8365_vdec,
diff --git a/drivers/clk/mediatek/clk-mt8365-venc.c b/drivers/clk/mediatek/clk-mt8365-venc.c
index 4228ddec5657..35abd908537c 100644
--- a/drivers/clk/mediatek/clk-mt8365-venc.c
+++ b/drivers/clk/mediatek/clk-mt8365-venc.c
@@ -43,7 +43,7 @@ MODULE_DEVICE_TABLE(of, of_match_clk_mt8365_venc);
static struct platform_driver clk_mt8365_venc_drv = {
.probe = mtk_clk_simple_probe,
- .remove_new = mtk_clk_simple_remove,
+ .remove = mtk_clk_simple_remove,
.driver = {
.name = "clk-mt8365-venc",
.of_match_table = of_match_clk_mt8365_venc,
diff --git a/drivers/clk/mediatek/clk-mt8365.c b/drivers/clk/mediatek/clk-mt8365.c
index 485b525b8acd..e7952121112e 100644
--- a/drivers/clk/mediatek/clk-mt8365.c
+++ b/drivers/clk/mediatek/clk-mt8365.c
@@ -809,7 +809,7 @@ static struct platform_driver clk_mt8365_drv = {
.of_match_table = of_match_clk_mt8365,
},
.probe = mtk_clk_simple_probe,
- .remove_new = mtk_clk_simple_remove,
+ .remove = mtk_clk_simple_remove,
};
module_platform_driver(clk_mt8365_drv);
diff --git a/drivers/clk/mediatek/clk-mt8516-aud.c b/drivers/clk/mediatek/clk-mt8516-aud.c
index 53e1866fb8e2..6227635fd5a1 100644
--- a/drivers/clk/mediatek/clk-mt8516-aud.c
+++ b/drivers/clk/mediatek/clk-mt8516-aud.c
@@ -53,7 +53,7 @@ MODULE_DEVICE_TABLE(of, of_match_clk_mt8516_aud);
static struct platform_driver clk_mt8516_aud_drv = {
.probe = mtk_clk_simple_probe,
- .remove_new = mtk_clk_simple_remove,
+ .remove = mtk_clk_simple_remove,
.driver = {
.name = "clk-mt8516-aud",
.of_match_table = of_match_clk_mt8516_aud,
diff --git a/drivers/clk/mediatek/clk-mt8516.c b/drivers/clk/mediatek/clk-mt8516.c
index b8ae837c59dc..21eb052b0a53 100644
--- a/drivers/clk/mediatek/clk-mt8516.c
+++ b/drivers/clk/mediatek/clk-mt8516.c
@@ -669,7 +669,7 @@ MODULE_DEVICE_TABLE(of, of_match_clk_mt8516);
static struct platform_driver clk_mt8516_drv = {
.probe = mtk_clk_simple_probe,
- .remove_new = mtk_clk_simple_remove,
+ .remove = mtk_clk_simple_remove,
.driver = {
.name = "clk-mt8516",
.of_match_table = of_match_clk_mt8516,
diff --git a/drivers/clk/mediatek/reset.c b/drivers/clk/mediatek/reset.c
index 290ceda84ce4..2e3303975096 100644
--- a/drivers/clk/mediatek/reset.c
+++ b/drivers/clk/mediatek/reset.c
@@ -110,65 +110,6 @@ static int reset_xlate(struct reset_controller_dev *rcdev,
return data->desc->rst_idx_map[reset_spec->args[0]];
}
-int mtk_register_reset_controller(struct device_node *np,
- const struct mtk_clk_rst_desc *desc)
-{
- struct regmap *regmap;
- const struct reset_control_ops *rcops = NULL;
- struct mtk_clk_rst_data *data;
- int ret;
-
- if (!desc) {
- pr_err("mtk clock reset desc is NULL\n");
- return -EINVAL;
- }
-
- switch (desc->version) {
- case MTK_RST_SIMPLE:
- rcops = &mtk_reset_ops;
- break;
- case MTK_RST_SET_CLR:
- rcops = &mtk_reset_ops_set_clr;
- break;
- default:
- pr_err("Unknown reset version %d\n", desc->version);
- return -EINVAL;
- }
-
- regmap = device_node_to_regmap(np);
- if (IS_ERR(regmap)) {
- pr_err("Cannot find regmap for %pOF: %pe\n", np, regmap);
- return -EINVAL;
- }
-
- data = kzalloc(sizeof(*data), GFP_KERNEL);
- if (!data)
- return -ENOMEM;
-
- data->desc = desc;
- data->regmap = regmap;
- data->rcdev.owner = THIS_MODULE;
- data->rcdev.ops = rcops;
- data->rcdev.of_node = np;
-
- if (data->desc->rst_idx_map_nr > 0) {
- data->rcdev.of_reset_n_cells = 1;
- data->rcdev.nr_resets = desc->rst_idx_map_nr;
- data->rcdev.of_xlate = reset_xlate;
- } else {
- data->rcdev.nr_resets = desc->rst_bank_nr * RST_NR_PER_BANK;
- }
-
- ret = reset_controller_register(&data->rcdev);
- if (ret) {
- pr_err("could not register reset controller: %d\n", ret);
- kfree(data);
- return ret;
- }
-
- return 0;
-}
-
int mtk_register_reset_controller_with_dev(struct device *dev,
const struct mtk_clk_rst_desc *desc)
{
@@ -198,7 +139,7 @@ int mtk_register_reset_controller_with_dev(struct device *dev,
regmap = device_node_to_regmap(np);
if (IS_ERR(regmap)) {
dev_err(dev, "Cannot find regmap %pe\n", regmap);
- return -EINVAL;
+ return PTR_ERR(regmap);
}
data = devm_kzalloc(dev, sizeof(*data), GFP_KERNEL);
diff --git a/drivers/clk/mediatek/reset.h b/drivers/clk/mediatek/reset.h
index 6a58a3d59165..562ffd290a22 100644
--- a/drivers/clk/mediatek/reset.h
+++ b/drivers/clk/mediatek/reset.h
@@ -60,16 +60,6 @@ struct mtk_clk_rst_data {
};
/**
- * mtk_register_reset_controller - Register MediaTek clock reset controller
- * @np: Pointer to device node.
- * @desc: Constant pointer to description of clock reset.
- *
- * Return: 0 on success and errorno otherwise.
- */
-int mtk_register_reset_controller(struct device_node *np,
- const struct mtk_clk_rst_desc *desc);
-
-/**
* mtk_register_reset_controller - Register mediatek clock reset controller with device
* @np: Pointer to device.
* @desc: Constant pointer to description of clock reset.
diff --git a/drivers/clk/meson/a1-peripherals.c b/drivers/clk/meson/a1-peripherals.c
index 99b5bc450446..7aa6abb2eb1f 100644
--- a/drivers/clk/meson/a1-peripherals.c
+++ b/drivers/clk/meson/a1-peripherals.c
@@ -2183,7 +2183,7 @@ static struct clk_regmap *const a1_periphs_regmaps[] = {
&dmc_sel2,
};
-static struct regmap_config a1_periphs_regmap_cfg = {
+static const struct regmap_config a1_periphs_regmap_cfg = {
.reg_bits = 32,
.val_bits = 32,
.reg_stride = 4,
@@ -2246,3 +2246,4 @@ MODULE_DESCRIPTION("Amlogic A1 Peripherals Clock Controller driver");
MODULE_AUTHOR("Jian Hu <jian.hu@amlogic.com>");
MODULE_AUTHOR("Dmitry Rokosov <ddrokosov@sberdevices.ru>");
MODULE_LICENSE("GPL");
+MODULE_IMPORT_NS(CLK_MESON);
diff --git a/drivers/clk/meson/a1-pll.c b/drivers/clk/meson/a1-pll.c
index a16e537d139a..8e5a42d1afbb 100644
--- a/drivers/clk/meson/a1-pll.c
+++ b/drivers/clk/meson/a1-pll.c
@@ -295,7 +295,7 @@ static struct clk_regmap *const a1_pll_regmaps[] = {
&hifi_pll,
};
-static struct regmap_config a1_pll_regmap_cfg = {
+static const struct regmap_config a1_pll_regmap_cfg = {
.reg_bits = 32,
.val_bits = 32,
.reg_stride = 4,
@@ -360,3 +360,4 @@ MODULE_DESCRIPTION("Amlogic S4 PLL Clock Controller driver");
MODULE_AUTHOR("Jian Hu <jian.hu@amlogic.com>");
MODULE_AUTHOR("Dmitry Rokosov <ddrokosov@sberdevices.ru>");
MODULE_LICENSE("GPL");
+MODULE_IMPORT_NS(CLK_MESON);
diff --git a/drivers/clk/meson/axg-aoclk.c b/drivers/clk/meson/axg-aoclk.c
index fa1dcb7f91e4..1dabc81535a6 100644
--- a/drivers/clk/meson/axg-aoclk.c
+++ b/drivers/clk/meson/axg-aoclk.c
@@ -342,3 +342,4 @@ module_platform_driver(axg_aoclkc_driver);
MODULE_DESCRIPTION("Amlogic AXG Always-ON Clock Controller driver");
MODULE_LICENSE("GPL");
+MODULE_IMPORT_NS(CLK_MESON);
diff --git a/drivers/clk/meson/axg-audio.c b/drivers/clk/meson/axg-audio.c
index e03a5bf899c0..beda86349389 100644
--- a/drivers/clk/meson/axg-audio.c
+++ b/drivers/clk/meson/axg-audio.c
@@ -753,6 +753,9 @@ static struct clk_regmap toddr_d =
AUD_PCLK_GATE(toddr_d, AUDIO_CLK_GATE_EN1, 1);
static struct clk_regmap loopback_b =
AUD_PCLK_GATE(loopback_b, AUDIO_CLK_GATE_EN1, 2);
+static struct clk_regmap earcrx =
+ AUD_PCLK_GATE(earcrx, AUDIO_CLK_GATE_EN1, 6);
+
static struct clk_regmap sm1_mst_a_mclk_sel =
AUD_MST_MCLK_MUX(mst_a_mclk, AUDIO_SM1_MCLK_A_CTRL);
@@ -766,6 +769,10 @@ static struct clk_regmap sm1_mst_e_mclk_sel =
AUD_MST_MCLK_MUX(mst_e_mclk, AUDIO_SM1_MCLK_E_CTRL);
static struct clk_regmap sm1_mst_f_mclk_sel =
AUD_MST_MCLK_MUX(mst_f_mclk, AUDIO_SM1_MCLK_F_CTRL);
+static struct clk_regmap sm1_earcrx_cmdc_clk_sel =
+ AUD_MST_MCLK_MUX(earcrx_cmdc_clk, AUDIO_EARCRX_CMDC_CLK_CTRL);
+static struct clk_regmap sm1_earcrx_dmac_clk_sel =
+ AUD_MST_MCLK_MUX(earcrx_dmac_clk, AUDIO_EARCRX_DMAC_CLK_CTRL);
static struct clk_regmap sm1_mst_a_mclk_div =
AUD_MST_MCLK_DIV(mst_a_mclk, AUDIO_SM1_MCLK_A_CTRL);
@@ -779,6 +786,11 @@ static struct clk_regmap sm1_mst_e_mclk_div =
AUD_MST_MCLK_DIV(mst_e_mclk, AUDIO_SM1_MCLK_E_CTRL);
static struct clk_regmap sm1_mst_f_mclk_div =
AUD_MST_MCLK_DIV(mst_f_mclk, AUDIO_SM1_MCLK_F_CTRL);
+static struct clk_regmap sm1_earcrx_cmdc_clk_div =
+ AUD_MST_MCLK_DIV(earcrx_cmdc_clk, AUDIO_EARCRX_CMDC_CLK_CTRL);
+static struct clk_regmap sm1_earcrx_dmac_clk_div =
+ AUD_MST_MCLK_DIV(earcrx_dmac_clk, AUDIO_EARCRX_DMAC_CLK_CTRL);
+
static struct clk_regmap sm1_mst_a_mclk =
AUD_MST_MCLK_GATE(mst_a_mclk, AUDIO_SM1_MCLK_A_CTRL);
@@ -792,6 +804,10 @@ static struct clk_regmap sm1_mst_e_mclk =
AUD_MST_MCLK_GATE(mst_e_mclk, AUDIO_SM1_MCLK_E_CTRL);
static struct clk_regmap sm1_mst_f_mclk =
AUD_MST_MCLK_GATE(mst_f_mclk, AUDIO_SM1_MCLK_F_CTRL);
+static struct clk_regmap sm1_earcrx_cmdc_clk =
+ AUD_MST_MCLK_GATE(earcrx_cmdc_clk, AUDIO_EARCRX_CMDC_CLK_CTRL);
+static struct clk_regmap sm1_earcrx_dmac_clk =
+ AUD_MST_MCLK_GATE(earcrx_dmac_clk, AUDIO_EARCRX_DMAC_CLK_CTRL);
static struct clk_regmap sm1_tdm_mclk_pad_0 = AUD_TDM_PAD_CTRL(
tdm_mclk_pad_0, AUDIO_SM1_MST_PAD_CTRL0, 0, mclk_pad_ctrl_parent_data);
@@ -1232,6 +1248,13 @@ static struct clk_hw *sm1_audio_hw_clks[] = {
[AUD_CLKID_SYSCLK_A_EN] = &sm1_sysclk_a_en.hw,
[AUD_CLKID_SYSCLK_B_DIV] = &sm1_sysclk_b_div.hw,
[AUD_CLKID_SYSCLK_B_EN] = &sm1_sysclk_b_en.hw,
+ [AUD_CLKID_EARCRX] = &earcrx.hw,
+ [AUD_CLKID_EARCRX_CMDC_SEL] = &sm1_earcrx_cmdc_clk_sel.hw,
+ [AUD_CLKID_EARCRX_CMDC_DIV] = &sm1_earcrx_cmdc_clk_div.hw,
+ [AUD_CLKID_EARCRX_CMDC] = &sm1_earcrx_cmdc_clk.hw,
+ [AUD_CLKID_EARCRX_DMAC_SEL] = &sm1_earcrx_dmac_clk_sel.hw,
+ [AUD_CLKID_EARCRX_DMAC_DIV] = &sm1_earcrx_dmac_clk_div.hw,
+ [AUD_CLKID_EARCRX_DMAC] = &sm1_earcrx_dmac_clk.hw,
};
@@ -1646,6 +1669,13 @@ static struct clk_regmap *const sm1_clk_regmaps[] = {
&sm1_sysclk_a_en,
&sm1_sysclk_b_div,
&sm1_sysclk_b_en,
+ &earcrx,
+ &sm1_earcrx_cmdc_clk_sel,
+ &sm1_earcrx_cmdc_clk_div,
+ &sm1_earcrx_cmdc_clk,
+ &sm1_earcrx_dmac_clk_sel,
+ &sm1_earcrx_dmac_clk_div,
+ &sm1_earcrx_dmac_clk,
};
struct axg_audio_reset_data {
@@ -1726,11 +1756,10 @@ static const struct reset_control_ops axg_audio_rstc_ops = {
.status = axg_audio_reset_status,
};
-static const struct regmap_config axg_audio_regmap_cfg = {
+static struct regmap_config axg_audio_regmap_cfg = {
.reg_bits = 32,
.val_bits = 32,
.reg_stride = 4,
- .max_register = AUDIO_CLK_SPDIFOUT_B_CTRL,
};
struct audioclk_data {
@@ -1739,6 +1768,7 @@ struct audioclk_data {
struct meson_clk_hw_data hw_clks;
unsigned int reset_offset;
unsigned int reset_num;
+ unsigned int max_register;
};
static int axg_audio_clkc_probe(struct platform_device *pdev)
@@ -1760,6 +1790,7 @@ static int axg_audio_clkc_probe(struct platform_device *pdev)
if (IS_ERR(regs))
return PTR_ERR(regs);
+ axg_audio_regmap_cfg.max_register = data->max_register;
map = devm_regmap_init_mmio(dev, regs, &axg_audio_regmap_cfg);
if (IS_ERR(map)) {
dev_err(dev, "failed to init regmap: %ld\n", PTR_ERR(map));
@@ -1828,6 +1859,7 @@ static const struct audioclk_data axg_audioclk_data = {
.hws = axg_audio_hw_clks,
.num = ARRAY_SIZE(axg_audio_hw_clks),
},
+ .max_register = AUDIO_CLK_PDMIN_CTRL1,
};
static const struct audioclk_data g12a_audioclk_data = {
@@ -1839,6 +1871,7 @@ static const struct audioclk_data g12a_audioclk_data = {
},
.reset_offset = AUDIO_SW_RESET,
.reset_num = 26,
+ .max_register = AUDIO_CLK_SPDIFOUT_B_CTRL,
};
static const struct audioclk_data sm1_audioclk_data = {
@@ -1850,6 +1883,7 @@ static const struct audioclk_data sm1_audioclk_data = {
},
.reset_offset = AUDIO_SM1_SW_RESET0,
.reset_num = 39,
+ .max_register = AUDIO_EARCRX_DMAC_CLK_CTRL,
};
static const struct of_device_id clkc_match_table[] = {
@@ -1878,3 +1912,4 @@ module_platform_driver(axg_audio_driver);
MODULE_DESCRIPTION("Amlogic AXG/G12A/SM1 Audio Clock driver");
MODULE_AUTHOR("Jerome Brunet <jbrunet@baylibre.com>");
MODULE_LICENSE("GPL");
+MODULE_IMPORT_NS(CLK_MESON);
diff --git a/drivers/clk/meson/axg-audio.h b/drivers/clk/meson/axg-audio.h
index 01a3da19933e..9e7765b630c9 100644
--- a/drivers/clk/meson/axg-audio.h
+++ b/drivers/clk/meson/axg-audio.h
@@ -64,5 +64,7 @@
#define AUDIO_SM1_SW_RESET1 0x02C
#define AUDIO_CLK81_CTRL 0x030
#define AUDIO_CLK81_EN 0x034
+#define AUDIO_EARCRX_CMDC_CLK_CTRL 0x0D0
+#define AUDIO_EARCRX_DMAC_CLK_CTRL 0x0D4
#endif /*__AXG_AUDIO_CLKC_H */
diff --git a/drivers/clk/meson/axg.c b/drivers/clk/meson/axg.c
index 065b5f198297..757c7a28c53d 100644
--- a/drivers/clk/meson/axg.c
+++ b/drivers/clk/meson/axg.c
@@ -2187,3 +2187,4 @@ module_platform_driver(axg_driver);
MODULE_DESCRIPTION("Amlogic AXG Main Clock Controller driver");
MODULE_LICENSE("GPL");
+MODULE_IMPORT_NS(CLK_MESON);
diff --git a/drivers/clk/meson/c3-peripherals.c b/drivers/clk/meson/c3-peripherals.c
index 56b33d23c317..7dcbf4ebee07 100644
--- a/drivers/clk/meson/c3-peripherals.c
+++ b/drivers/clk/meson/c3-peripherals.c
@@ -2296,7 +2296,7 @@ static struct clk_regmap *const c3_periphs_clk_regmaps[] = {
&vapb,
};
-static struct regmap_config clkc_regmap_config = {
+static const struct regmap_config clkc_regmap_config = {
.reg_bits = 32,
.val_bits = 32,
.reg_stride = 4,
@@ -2364,3 +2364,4 @@ module_platform_driver(c3_peripherals_driver);
MODULE_DESCRIPTION("Amlogic C3 Peripherals Clock Controller driver");
MODULE_AUTHOR("Chuan Liu <chuan.liu@amlogic.com>");
MODULE_LICENSE("GPL");
+MODULE_IMPORT_NS(CLK_MESON);
diff --git a/drivers/clk/meson/c3-pll.c b/drivers/clk/meson/c3-pll.c
index 6d5271c61d14..32bd2ed9d304 100644
--- a/drivers/clk/meson/c3-pll.c
+++ b/drivers/clk/meson/c3-pll.c
@@ -678,7 +678,7 @@ static struct clk_regmap *const c3_pll_clk_regmaps[] = {
&mclk1,
};
-static struct regmap_config clkc_regmap_config = {
+static const struct regmap_config clkc_regmap_config = {
.reg_bits = 32,
.val_bits = 32,
.reg_stride = 4,
@@ -745,3 +745,4 @@ module_platform_driver(c3_pll_driver);
MODULE_DESCRIPTION("Amlogic C3 PLL Clock Controller driver");
MODULE_AUTHOR("Chuan Liu <chuan.liu@amlogic.com>");
MODULE_LICENSE("GPL");
+MODULE_IMPORT_NS(CLK_MESON);
diff --git a/drivers/clk/meson/clk-cpu-dyndiv.c b/drivers/clk/meson/clk-cpu-dyndiv.c
index aa824b030cb8..6c1f58826e24 100644
--- a/drivers/clk/meson/clk-cpu-dyndiv.c
+++ b/drivers/clk/meson/clk-cpu-dyndiv.c
@@ -65,8 +65,9 @@ const struct clk_ops meson_clk_cpu_dyndiv_ops = {
.determine_rate = meson_clk_cpu_dyndiv_determine_rate,
.set_rate = meson_clk_cpu_dyndiv_set_rate,
};
-EXPORT_SYMBOL_GPL(meson_clk_cpu_dyndiv_ops);
+EXPORT_SYMBOL_NS_GPL(meson_clk_cpu_dyndiv_ops, CLK_MESON);
MODULE_DESCRIPTION("Amlogic CPU Dynamic Clock divider");
MODULE_AUTHOR("Neil Armstrong <narmstrong@baylibre.com>");
MODULE_LICENSE("GPL");
+MODULE_IMPORT_NS(CLK_MESON);
diff --git a/drivers/clk/meson/clk-dualdiv.c b/drivers/clk/meson/clk-dualdiv.c
index d46c02b51be5..913bf25d3771 100644
--- a/drivers/clk/meson/clk-dualdiv.c
+++ b/drivers/clk/meson/clk-dualdiv.c
@@ -130,14 +130,15 @@ const struct clk_ops meson_clk_dualdiv_ops = {
.determine_rate = meson_clk_dualdiv_determine_rate,
.set_rate = meson_clk_dualdiv_set_rate,
};
-EXPORT_SYMBOL_GPL(meson_clk_dualdiv_ops);
+EXPORT_SYMBOL_NS_GPL(meson_clk_dualdiv_ops, CLK_MESON);
const struct clk_ops meson_clk_dualdiv_ro_ops = {
.recalc_rate = meson_clk_dualdiv_recalc_rate,
};
-EXPORT_SYMBOL_GPL(meson_clk_dualdiv_ro_ops);
+EXPORT_SYMBOL_NS_GPL(meson_clk_dualdiv_ro_ops, CLK_MESON);
MODULE_DESCRIPTION("Amlogic dual divider driver");
MODULE_AUTHOR("Neil Armstrong <narmstrong@baylibre.com>");
MODULE_AUTHOR("Jerome Brunet <jbrunet@baylibre.com>");
MODULE_LICENSE("GPL");
+MODULE_IMPORT_NS(CLK_MESON);
diff --git a/drivers/clk/meson/clk-mpll.c b/drivers/clk/meson/clk-mpll.c
index eae9b7dc5a6c..f639d56f0fd3 100644
--- a/drivers/clk/meson/clk-mpll.c
+++ b/drivers/clk/meson/clk-mpll.c
@@ -165,7 +165,7 @@ const struct clk_ops meson_clk_mpll_ro_ops = {
.recalc_rate = mpll_recalc_rate,
.determine_rate = mpll_determine_rate,
};
-EXPORT_SYMBOL_GPL(meson_clk_mpll_ro_ops);
+EXPORT_SYMBOL_NS_GPL(meson_clk_mpll_ro_ops, CLK_MESON);
const struct clk_ops meson_clk_mpll_ops = {
.recalc_rate = mpll_recalc_rate,
@@ -173,8 +173,9 @@ const struct clk_ops meson_clk_mpll_ops = {
.set_rate = mpll_set_rate,
.init = mpll_init,
};
-EXPORT_SYMBOL_GPL(meson_clk_mpll_ops);
+EXPORT_SYMBOL_NS_GPL(meson_clk_mpll_ops, CLK_MESON);
MODULE_DESCRIPTION("Amlogic MPLL driver");
MODULE_AUTHOR("Michael Turquette <mturquette@baylibre.com>");
MODULE_LICENSE("GPL");
+MODULE_IMPORT_NS(CLK_MESON);
diff --git a/drivers/clk/meson/clk-phase.c b/drivers/clk/meson/clk-phase.c
index ff3f0b1a3ed1..c1526fbfb6c4 100644
--- a/drivers/clk/meson/clk-phase.c
+++ b/drivers/clk/meson/clk-phase.c
@@ -61,7 +61,7 @@ const struct clk_ops meson_clk_phase_ops = {
.get_phase = meson_clk_phase_get_phase,
.set_phase = meson_clk_phase_set_phase,
};
-EXPORT_SYMBOL_GPL(meson_clk_phase_ops);
+EXPORT_SYMBOL_NS_GPL(meson_clk_phase_ops, CLK_MESON);
/*
* This is a special clock for the audio controller.
@@ -123,7 +123,7 @@ const struct clk_ops meson_clk_triphase_ops = {
.get_phase = meson_clk_triphase_get_phase,
.set_phase = meson_clk_triphase_set_phase,
};
-EXPORT_SYMBOL_GPL(meson_clk_triphase_ops);
+EXPORT_SYMBOL_NS_GPL(meson_clk_triphase_ops, CLK_MESON);
/*
* This is a special clock for the audio controller.
@@ -178,9 +178,9 @@ const struct clk_ops meson_sclk_ws_inv_ops = {
.get_phase = meson_sclk_ws_inv_get_phase,
.set_phase = meson_sclk_ws_inv_set_phase,
};
-EXPORT_SYMBOL_GPL(meson_sclk_ws_inv_ops);
-
+EXPORT_SYMBOL_NS_GPL(meson_sclk_ws_inv_ops, CLK_MESON);
MODULE_DESCRIPTION("Amlogic phase driver");
MODULE_AUTHOR("Jerome Brunet <jbrunet@baylibre.com>");
MODULE_LICENSE("GPL");
+MODULE_IMPORT_NS(CLK_MESON);
diff --git a/drivers/clk/meson/clk-pll.c b/drivers/clk/meson/clk-pll.c
index 467dc8b61a37..bc570a2ff3a3 100644
--- a/drivers/clk/meson/clk-pll.c
+++ b/drivers/clk/meson/clk-pll.c
@@ -472,7 +472,7 @@ const struct clk_ops meson_clk_pcie_pll_ops = {
.enable = meson_clk_pcie_pll_enable,
.disable = meson_clk_pll_disable
};
-EXPORT_SYMBOL_GPL(meson_clk_pcie_pll_ops);
+EXPORT_SYMBOL_NS_GPL(meson_clk_pcie_pll_ops, CLK_MESON);
const struct clk_ops meson_clk_pll_ops = {
.init = meson_clk_pll_init,
@@ -483,15 +483,16 @@ const struct clk_ops meson_clk_pll_ops = {
.enable = meson_clk_pll_enable,
.disable = meson_clk_pll_disable
};
-EXPORT_SYMBOL_GPL(meson_clk_pll_ops);
+EXPORT_SYMBOL_NS_GPL(meson_clk_pll_ops, CLK_MESON);
const struct clk_ops meson_clk_pll_ro_ops = {
.recalc_rate = meson_clk_pll_recalc_rate,
.is_enabled = meson_clk_pll_is_enabled,
};
-EXPORT_SYMBOL_GPL(meson_clk_pll_ro_ops);
+EXPORT_SYMBOL_NS_GPL(meson_clk_pll_ro_ops, CLK_MESON);
MODULE_DESCRIPTION("Amlogic PLL driver");
MODULE_AUTHOR("Carlo Caione <carlo@endlessm.com>");
MODULE_AUTHOR("Jerome Brunet <jbrunet@baylibre.com>");
MODULE_LICENSE("GPL");
+MODULE_IMPORT_NS(CLK_MESON);
diff --git a/drivers/clk/meson/clk-regmap.c b/drivers/clk/meson/clk-regmap.c
index ad116d24f700..07f7e441b916 100644
--- a/drivers/clk/meson/clk-regmap.c
+++ b/drivers/clk/meson/clk-regmap.c
@@ -49,12 +49,12 @@ const struct clk_ops clk_regmap_gate_ops = {
.disable = clk_regmap_gate_disable,
.is_enabled = clk_regmap_gate_is_enabled,
};
-EXPORT_SYMBOL_GPL(clk_regmap_gate_ops);
+EXPORT_SYMBOL_NS_GPL(clk_regmap_gate_ops, CLK_MESON);
const struct clk_ops clk_regmap_gate_ro_ops = {
.is_enabled = clk_regmap_gate_is_enabled,
};
-EXPORT_SYMBOL_GPL(clk_regmap_gate_ro_ops);
+EXPORT_SYMBOL_NS_GPL(clk_regmap_gate_ro_ops, CLK_MESON);
static unsigned long clk_regmap_div_recalc_rate(struct clk_hw *hw,
unsigned long prate)
@@ -125,13 +125,13 @@ const struct clk_ops clk_regmap_divider_ops = {
.determine_rate = clk_regmap_div_determine_rate,
.set_rate = clk_regmap_div_set_rate,
};
-EXPORT_SYMBOL_GPL(clk_regmap_divider_ops);
+EXPORT_SYMBOL_NS_GPL(clk_regmap_divider_ops, CLK_MESON);
const struct clk_ops clk_regmap_divider_ro_ops = {
.recalc_rate = clk_regmap_div_recalc_rate,
.determine_rate = clk_regmap_div_determine_rate,
};
-EXPORT_SYMBOL_GPL(clk_regmap_divider_ro_ops);
+EXPORT_SYMBOL_NS_GPL(clk_regmap_divider_ro_ops, CLK_MESON);
static u8 clk_regmap_mux_get_parent(struct clk_hw *hw)
{
@@ -174,13 +174,14 @@ const struct clk_ops clk_regmap_mux_ops = {
.set_parent = clk_regmap_mux_set_parent,
.determine_rate = clk_regmap_mux_determine_rate,
};
-EXPORT_SYMBOL_GPL(clk_regmap_mux_ops);
+EXPORT_SYMBOL_NS_GPL(clk_regmap_mux_ops, CLK_MESON);
const struct clk_ops clk_regmap_mux_ro_ops = {
.get_parent = clk_regmap_mux_get_parent,
};
-EXPORT_SYMBOL_GPL(clk_regmap_mux_ro_ops);
+EXPORT_SYMBOL_NS_GPL(clk_regmap_mux_ro_ops, CLK_MESON);
MODULE_DESCRIPTION("Amlogic regmap backed clock driver");
MODULE_AUTHOR("Jerome Brunet <jbrunet@baylibre.com>");
MODULE_LICENSE("GPL");
+MODULE_IMPORT_NS(CLK_MESON);
diff --git a/drivers/clk/meson/g12a-aoclk.c b/drivers/clk/meson/g12a-aoclk.c
index a5f4d15d8396..f0a18d8c9fc2 100644
--- a/drivers/clk/meson/g12a-aoclk.c
+++ b/drivers/clk/meson/g12a-aoclk.c
@@ -477,3 +477,4 @@ module_platform_driver(g12a_aoclkc_driver);
MODULE_DESCRIPTION("Amlogic G12A Always-ON Clock Controller driver");
MODULE_LICENSE("GPL");
+MODULE_IMPORT_NS(CLK_MESON);
diff --git a/drivers/clk/meson/g12a.c b/drivers/clk/meson/g12a.c
index 4647e84d2502..02dda57105b1 100644
--- a/drivers/clk/meson/g12a.c
+++ b/drivers/clk/meson/g12a.c
@@ -5616,3 +5616,4 @@ module_platform_driver(g12a_driver);
MODULE_DESCRIPTION("Amlogic G12/SM1 Main Clock Controller driver");
MODULE_LICENSE("GPL");
+MODULE_IMPORT_NS(CLK_MESON);
diff --git a/drivers/clk/meson/gxbb-aoclk.c b/drivers/clk/meson/gxbb-aoclk.c
index 33fafbdf65c4..83b034157b35 100644
--- a/drivers/clk/meson/gxbb-aoclk.c
+++ b/drivers/clk/meson/gxbb-aoclk.c
@@ -303,3 +303,4 @@ module_platform_driver(gxbb_aoclkc_driver);
MODULE_DESCRIPTION("Amlogic GXBB Always-ON Clock Controller driver");
MODULE_LICENSE("GPL");
+MODULE_IMPORT_NS(CLK_MESON);
diff --git a/drivers/clk/meson/gxbb.c b/drivers/clk/meson/gxbb.c
index d3175e4335bb..f071faad1ebb 100644
--- a/drivers/clk/meson/gxbb.c
+++ b/drivers/clk/meson/gxbb.c
@@ -3571,3 +3571,4 @@ module_platform_driver(gxbb_driver);
MODULE_DESCRIPTION("Amlogic GXBB Main Clock Controller driver");
MODULE_LICENSE("GPL");
+MODULE_IMPORT_NS(CLK_MESON);
diff --git a/drivers/clk/meson/meson-aoclk.c b/drivers/clk/meson/meson-aoclk.c
index 2dd064201fae..053940ee8940 100644
--- a/drivers/clk/meson/meson-aoclk.c
+++ b/drivers/clk/meson/meson-aoclk.c
@@ -88,7 +88,8 @@ int meson_aoclkc_probe(struct platform_device *pdev)
return devm_of_clk_add_hw_provider(dev, meson_clk_hw_get, (void *)&data->hw_clks);
}
-EXPORT_SYMBOL_GPL(meson_aoclkc_probe);
+EXPORT_SYMBOL_NS_GPL(meson_aoclkc_probe, CLK_MESON);
MODULE_DESCRIPTION("Amlogic Always-ON Clock Controller helpers");
MODULE_LICENSE("GPL");
+MODULE_IMPORT_NS(CLK_MESON);
diff --git a/drivers/clk/meson/meson-clkc-utils.c b/drivers/clk/meson/meson-clkc-utils.c
index 4dd5948b7ae4..a8cd2c21fab7 100644
--- a/drivers/clk/meson/meson-clkc-utils.c
+++ b/drivers/clk/meson/meson-clkc-utils.c
@@ -20,7 +20,8 @@ struct clk_hw *meson_clk_hw_get(struct of_phandle_args *clkspec, void *clk_hw_da
return data->hws[idx];
}
-EXPORT_SYMBOL_GPL(meson_clk_hw_get);
+EXPORT_SYMBOL_NS_GPL(meson_clk_hw_get, CLK_MESON);
MODULE_DESCRIPTION("Amlogic Clock Controller Utilities");
MODULE_LICENSE("GPL");
+MODULE_IMPORT_NS(CLK_MESON);
diff --git a/drivers/clk/meson/meson-eeclk.c b/drivers/clk/meson/meson-eeclk.c
index 570992eece86..66f79e384fe5 100644
--- a/drivers/clk/meson/meson-eeclk.c
+++ b/drivers/clk/meson/meson-eeclk.c
@@ -57,7 +57,8 @@ int meson_eeclkc_probe(struct platform_device *pdev)
return devm_of_clk_add_hw_provider(dev, meson_clk_hw_get, (void *)&data->hw_clks);
}
-EXPORT_SYMBOL_GPL(meson_eeclkc_probe);
+EXPORT_SYMBOL_NS_GPL(meson_eeclkc_probe, CLK_MESON);
MODULE_DESCRIPTION("Amlogic Main Clock Controller Helpers");
MODULE_LICENSE("GPL");
+MODULE_IMPORT_NS(CLK_MESON);
diff --git a/drivers/clk/meson/s4-peripherals.c b/drivers/clk/meson/s4-peripherals.c
index 130c50554290..c930cf0614a0 100644
--- a/drivers/clk/meson/s4-peripherals.c
+++ b/drivers/clk/meson/s4-peripherals.c
@@ -3747,7 +3747,7 @@ static struct clk_regmap *const s4_periphs_clk_regmaps[] = {
&s4_adc_extclk_in_gate,
};
-static struct regmap_config clkc_regmap_config = {
+static const struct regmap_config clkc_regmap_config = {
.reg_bits = 32,
.val_bits = 32,
.reg_stride = 4,
@@ -3814,3 +3814,4 @@ module_platform_driver(s4_driver);
MODULE_DESCRIPTION("Amlogic S4 Peripherals Clock Controller driver");
MODULE_AUTHOR("Yu Tu <yu.tu@amlogic.com>");
MODULE_LICENSE("GPL");
+MODULE_IMPORT_NS(CLK_MESON);
diff --git a/drivers/clk/meson/s4-pll.c b/drivers/clk/meson/s4-pll.c
index c2afade24f9f..b0258933fb9d 100644
--- a/drivers/clk/meson/s4-pll.c
+++ b/drivers/clk/meson/s4-pll.c
@@ -799,7 +799,7 @@ static const struct reg_sequence s4_init_regs[] = {
{ .reg = ANACTRL_MPLL_CTRL0, .def = 0x00000543 },
};
-static struct regmap_config clkc_regmap_config = {
+static const struct regmap_config clkc_regmap_config = {
.reg_bits = 32,
.val_bits = 32,
.reg_stride = 4,
@@ -873,3 +873,4 @@ module_platform_driver(s4_driver);
MODULE_DESCRIPTION("Amlogic S4 PLL Clock Controller driver");
MODULE_AUTHOR("Yu Tu <yu.tu@amlogic.com>");
MODULE_LICENSE("GPL");
+MODULE_IMPORT_NS(CLK_MESON);
diff --git a/drivers/clk/meson/sclk-div.c b/drivers/clk/meson/sclk-div.c
index 987f5b06587c..ae03b048182f 100644
--- a/drivers/clk/meson/sclk-div.c
+++ b/drivers/clk/meson/sclk-div.c
@@ -247,8 +247,9 @@ const struct clk_ops meson_sclk_div_ops = {
.set_duty_cycle = sclk_div_set_duty_cycle,
.init = sclk_div_init,
};
-EXPORT_SYMBOL_GPL(meson_sclk_div_ops);
+EXPORT_SYMBOL_NS_GPL(meson_sclk_div_ops, CLK_MESON);
MODULE_DESCRIPTION("Amlogic Sample divider driver");
MODULE_AUTHOR("Jerome Brunet <jbrunet@baylibre.com>");
MODULE_LICENSE("GPL");
+MODULE_IMPORT_NS(CLK_MESON);
diff --git a/drivers/clk/meson/vclk.c b/drivers/clk/meson/vclk.c
index e886df55d6e3..36f637d2d01b 100644
--- a/drivers/clk/meson/vclk.c
+++ b/drivers/clk/meson/vclk.c
@@ -49,7 +49,7 @@ const struct clk_ops meson_vclk_gate_ops = {
.disable = meson_vclk_gate_disable,
.is_enabled = meson_vclk_gate_is_enabled,
};
-EXPORT_SYMBOL_GPL(meson_vclk_gate_ops);
+EXPORT_SYMBOL_NS_GPL(meson_vclk_gate_ops, CLK_MESON);
/* The VCLK Divider has supplementary reset & enable bits */
@@ -134,8 +134,9 @@ const struct clk_ops meson_vclk_div_ops = {
.disable = meson_vclk_div_disable,
.is_enabled = meson_vclk_div_is_enabled,
};
-EXPORT_SYMBOL_GPL(meson_vclk_div_ops);
+EXPORT_SYMBOL_NS_GPL(meson_vclk_div_ops, CLK_MESON);
MODULE_DESCRIPTION("Amlogic vclk clock driver");
MODULE_AUTHOR("Neil Armstrong <neil.armstrong@linaro.org>");
MODULE_LICENSE("GPL");
+MODULE_IMPORT_NS(CLK_MESON);
diff --git a/drivers/clk/meson/vid-pll-div.c b/drivers/clk/meson/vid-pll-div.c
index ee129f86794d..486cf68fc97a 100644
--- a/drivers/clk/meson/vid-pll-div.c
+++ b/drivers/clk/meson/vid-pll-div.c
@@ -92,8 +92,9 @@ static unsigned long meson_vid_pll_div_recalc_rate(struct clk_hw *hw,
const struct clk_ops meson_vid_pll_div_ro_ops = {
.recalc_rate = meson_vid_pll_div_recalc_rate,
};
-EXPORT_SYMBOL_GPL(meson_vid_pll_div_ro_ops);
+EXPORT_SYMBOL_NS_GPL(meson_vid_pll_div_ro_ops, CLK_MESON);
MODULE_DESCRIPTION("Amlogic video pll divider driver");
MODULE_AUTHOR("Neil Armstrong <narmstrong@baylibre.com>");
MODULE_LICENSE("GPL");
+MODULE_IMPORT_NS(CLK_MESON);
diff --git a/drivers/clk/mmp/clk-audio.c b/drivers/clk/mmp/clk-audio.c
index ae521aaf8cdc..88d798d510cd 100644
--- a/drivers/clk/mmp/clk-audio.c
+++ b/drivers/clk/mmp/clk-audio.c
@@ -436,7 +436,7 @@ static struct platform_driver mmp2_audio_clk_driver = {
.pm = &mmp2_audio_clk_pm_ops,
},
.probe = mmp2_audio_clk_probe,
- .remove_new = mmp2_audio_clk_remove,
+ .remove = mmp2_audio_clk_remove,
};
module_platform_driver(mmp2_audio_clk_driver);
diff --git a/drivers/clk/mmp/clk-mix.c b/drivers/clk/mmp/clk-mix.c
index 454d131f475e..07ac9e6937e5 100644
--- a/drivers/clk/mmp/clk-mix.c
+++ b/drivers/clk/mmp/clk-mix.c
@@ -447,7 +447,6 @@ struct clk *mmp_clk_register_mix(struct device *dev,
struct mmp_clk_mix *mix;
struct clk *clk;
struct clk_init_data init;
- size_t table_bytes;
mix = kzalloc(sizeof(*mix), GFP_KERNEL);
if (!mix)
@@ -461,8 +460,8 @@ struct clk *mmp_clk_register_mix(struct device *dev,
memcpy(&mix->reg_info, &config->reg_info, sizeof(config->reg_info));
if (config->table) {
- table_bytes = sizeof(*config->table) * config->table_size;
- mix->table = kmemdup(config->table, table_bytes, GFP_KERNEL);
+ mix->table = kmemdup_array(config->table, config->table_size,
+ sizeof(*mix->table), GFP_KERNEL);
if (!mix->table)
goto free_mix;
@@ -470,9 +469,8 @@ struct clk *mmp_clk_register_mix(struct device *dev,
}
if (config->mux_table) {
- table_bytes = sizeof(u32) * num_parents;
- mix->mux_table = kmemdup(config->mux_table, table_bytes,
- GFP_KERNEL);
+ mix->mux_table = kmemdup_array(config->mux_table, num_parents,
+ sizeof(*mix->mux_table), GFP_KERNEL);
if (!mix->mux_table) {
kfree(mix->table);
goto free_mix;
diff --git a/drivers/clk/mvebu/armada-37xx-periph.c b/drivers/clk/mvebu/armada-37xx-periph.c
index 8701a58a5804..13906e31bef8 100644
--- a/drivers/clk/mvebu/armada-37xx-periph.c
+++ b/drivers/clk/mvebu/armada-37xx-periph.c
@@ -792,7 +792,7 @@ static void armada_3700_periph_clock_remove(struct platform_device *pdev)
static struct platform_driver armada_3700_periph_clock_driver = {
.probe = armada_3700_periph_clock_probe,
- .remove_new = armada_3700_periph_clock_remove,
+ .remove = armada_3700_periph_clock_remove,
.driver = {
.name = "marvell-armada-3700-periph-clock",
.of_match_table = armada_3700_periph_clock_of_match,
diff --git a/drivers/clk/mvebu/armada-37xx-tbg.c b/drivers/clk/mvebu/armada-37xx-tbg.c
index e94c336e0f1c..1a16f9c0b1d8 100644
--- a/drivers/clk/mvebu/armada-37xx-tbg.c
+++ b/drivers/clk/mvebu/armada-37xx-tbg.c
@@ -141,7 +141,7 @@ static const struct of_device_id armada_3700_tbg_clock_of_match[] = {
static struct platform_driver armada_3700_tbg_clock_driver = {
.probe = armada_3700_tbg_clock_probe,
- .remove_new = armada_3700_tbg_clock_remove,
+ .remove = armada_3700_tbg_clock_remove,
.driver = {
.name = "marvell-armada-3700-tbg-clock",
.of_match_table = armada_3700_tbg_clock_of_match,
diff --git a/drivers/clk/mvebu/armada-37xx-xtal.c b/drivers/clk/mvebu/armada-37xx-xtal.c
index 0e2e7d00ae11..ca88e5e78b06 100644
--- a/drivers/clk/mvebu/armada-37xx-xtal.c
+++ b/drivers/clk/mvebu/armada-37xx-xtal.c
@@ -77,7 +77,7 @@ static const struct of_device_id armada_3700_xtal_clock_of_match[] = {
static struct platform_driver armada_3700_xtal_clock_driver = {
.probe = armada_3700_xtal_clock_probe,
- .remove_new = armada_3700_xtal_clock_remove,
+ .remove = armada_3700_xtal_clock_remove,
.driver = {
.name = "marvell-armada-3700-xtal-clock",
.of_match_table = armada_3700_xtal_clock_of_match,
diff --git a/drivers/clk/qcom/Kconfig b/drivers/clk/qcom/Kconfig
index 11ae28430dad..a3e2a09e2105 100644
--- a/drivers/clk/qcom/Kconfig
+++ b/drivers/clk/qcom/Kconfig
@@ -810,6 +810,14 @@ config SDX_GCC_75
Say Y if you want to use peripheral devices such as UART,
SPI, I2C, USB, SD/eMMC, PCIe etc.
+config SM_CAMCC_4450
+ tristate "SM4450 Camera Clock Controller"
+ depends on ARM64 || COMPILE_TEST
+ select SM_GCC_4450
+ help
+ Support for the camera clock controller on SM4450 devices.
+ Say Y if you want to support camera devices and camera functionality.
+
config SM_CAMCC_6350
tristate "SM6350 Camera Clock Controller"
depends on ARM64 || COMPILE_TEST
@@ -826,6 +834,16 @@ config SM_CAMCC_7150
Support for the camera clock controller on SM7150 devices.
Say Y if you want to support camera devices and camera functionality.
+config SM_CAMCC_8150
+ tristate "SM8150 Camera Clock Controller"
+ depends on ARM64 || COMPILE_TEST
+ select SM_GCC_8150
+ help
+ Support for the camera clock controller on Qualcomm Technologies, Inc
+ SM8150 devices.
+ Say Y if you want to support camera devices and functionality such as
+ capturing pictures.
+
config SM_CAMCC_8250
tristate "SM8250 Camera Clock Controller"
depends on ARM64 || COMPILE_TEST
@@ -858,6 +876,16 @@ config SM_CAMCC_8650
Support for the camera clock controller on SM8650 devices.
Say Y if you want to support camera devices and camera functionality.
+config SM_DISPCC_4450
+ tristate "SM4450 Display Clock Controller"
+ depends on ARM64 || COMPILE_TEST
+ depends on SM_GCC_4450
+ help
+ Support for the display clock controller on Qualcomm Technologies, Inc
+ SM4450 devices.
+ Say Y if you want to support display devices and functionality such as
+ splash screen
+
config SM_DISPCC_6115
tristate "SM6115 Display Clock Controller"
depends on ARM64 || COMPILE_TEST
@@ -931,20 +959,10 @@ config SM_DISPCC_8450
config SM_DISPCC_8550
tristate "SM8550 Display Clock Controller"
depends on ARM64 || COMPILE_TEST
- depends on SM_GCC_8550
+ depends on SM_GCC_8550 || SM_GCC_8650
help
Support for the display clock controller on Qualcomm Technologies, Inc
- SM8550 devices.
- Say Y if you want to support display devices and functionality such as
- splash screen.
-
-config SM_DISPCC_8650
- tristate "SM8650 Display Clock Controller"
- depends on ARM64 || COMPILE_TEST
- select SM_GCC_8650
- help
- Support for the display clock controller on Qualcomm Technologies, Inc
- SM8650 devices.
+ SM8550 or SM8650 devices.
Say Y if you want to support display devices and functionality such as
splash screen.
@@ -1054,6 +1072,15 @@ config SM_GCC_8650
Say Y if you want to use peripheral devices such as UART,
SPI, I2C, USB, SD/UFS, PCIe etc.
+config SM_GPUCC_4450
+ tristate "SM4450 Graphics Clock Controller"
+ depends on ARM64 || COMPILE_TEST
+ select SM_GCC_4450
+ help
+ Support for the graphics clock controller on SM4450 devices.
+ Say Y if you want to support graphics controller devices and
+ functionality such as 3D graphics.
+
config SM_GPUCC_6115
tristate "SM6115 Graphics Clock Controller"
select SM_GCC_6115
diff --git a/drivers/clk/qcom/Makefile b/drivers/clk/qcom/Makefile
index 0de5fce6113a..2b378667a63f 100644
--- a/drivers/clk/qcom/Makefile
+++ b/drivers/clk/qcom/Makefile
@@ -107,12 +107,15 @@ obj-$(CONFIG_SDM_VIDEOCC_845) += videocc-sdm845.o
obj-$(CONFIG_SDX_GCC_55) += gcc-sdx55.o
obj-$(CONFIG_SDX_GCC_65) += gcc-sdx65.o
obj-$(CONFIG_SDX_GCC_75) += gcc-sdx75.o
+obj-$(CONFIG_SM_CAMCC_4450) += camcc-sm4450.o
obj-$(CONFIG_SM_CAMCC_6350) += camcc-sm6350.o
obj-$(CONFIG_SM_CAMCC_7150) += camcc-sm7150.o
+obj-$(CONFIG_SM_CAMCC_8150) += camcc-sm8150.o
obj-$(CONFIG_SM_CAMCC_8250) += camcc-sm8250.o
obj-$(CONFIG_SM_CAMCC_8450) += camcc-sm8450.o
obj-$(CONFIG_SM_CAMCC_8550) += camcc-sm8550.o
obj-$(CONFIG_SM_CAMCC_8650) += camcc-sm8650.o
+obj-$(CONFIG_SM_DISPCC_4450) += dispcc-sm4450.o
obj-$(CONFIG_SM_DISPCC_6115) += dispcc-sm6115.o
obj-$(CONFIG_SM_DISPCC_6125) += dispcc-sm6125.o
obj-$(CONFIG_SM_DISPCC_6350) += dispcc-sm6350.o
@@ -121,7 +124,6 @@ obj-$(CONFIG_SM_DISPCC_7150) += dispcc-sm7150.o
obj-$(CONFIG_SM_DISPCC_8250) += dispcc-sm8250.o
obj-$(CONFIG_SM_DISPCC_8450) += dispcc-sm8450.o
obj-$(CONFIG_SM_DISPCC_8550) += dispcc-sm8550.o
-obj-$(CONFIG_SM_DISPCC_8650) += dispcc-sm8650.o
obj-$(CONFIG_SM_GCC_4450) += gcc-sm4450.o
obj-$(CONFIG_SM_GCC_6115) += gcc-sm6115.o
obj-$(CONFIG_SM_GCC_6125) += gcc-sm6125.o
@@ -134,6 +136,7 @@ obj-$(CONFIG_SM_GCC_8350) += gcc-sm8350.o
obj-$(CONFIG_SM_GCC_8450) += gcc-sm8450.o
obj-$(CONFIG_SM_GCC_8550) += gcc-sm8550.o
obj-$(CONFIG_SM_GCC_8650) += gcc-sm8650.o
+obj-$(CONFIG_SM_GPUCC_4450) += gpucc-sm4450.o
obj-$(CONFIG_SM_GPUCC_6115) += gpucc-sm6115.o
obj-$(CONFIG_SM_GPUCC_6125) += gpucc-sm6125.o
obj-$(CONFIG_SM_GPUCC_6350) += gpucc-sm6350.o
diff --git a/drivers/clk/qcom/a53-pll.c b/drivers/clk/qcom/a53-pll.c
index f9c5e296dba2..f43d455ab4b8 100644
--- a/drivers/clk/qcom/a53-pll.c
+++ b/drivers/clk/qcom/a53-pll.c
@@ -151,6 +151,7 @@ static int qcom_a53pll_probe(struct platform_device *pdev)
}
static const struct of_device_id qcom_a53pll_match_table[] = {
+ { .compatible = "qcom,msm8226-a7pll" },
{ .compatible = "qcom,msm8916-a53pll" },
{ .compatible = "qcom,msm8939-a53pll" },
{ }
diff --git a/drivers/clk/qcom/apcs-msm8916.c b/drivers/clk/qcom/apcs-msm8916.c
index ce57b333ec99..ef31386831eb 100644
--- a/drivers/clk/qcom/apcs-msm8916.c
+++ b/drivers/clk/qcom/apcs-msm8916.c
@@ -128,7 +128,7 @@ static void qcom_apcs_msm8916_clk_remove(struct platform_device *pdev)
static struct platform_driver qcom_apcs_msm8916_clk_driver = {
.probe = qcom_apcs_msm8916_clk_probe,
- .remove_new = qcom_apcs_msm8916_clk_remove,
+ .remove = qcom_apcs_msm8916_clk_remove,
.driver = {
.name = "qcom-apcs-msm8916-clk",
},
diff --git a/drivers/clk/qcom/apcs-sdx55.c b/drivers/clk/qcom/apcs-sdx55.c
index d644e6e1f8b7..76ece6c4a969 100644
--- a/drivers/clk/qcom/apcs-sdx55.c
+++ b/drivers/clk/qcom/apcs-sdx55.c
@@ -131,7 +131,7 @@ static void qcom_apcs_sdx55_clk_remove(struct platform_device *pdev)
static struct platform_driver qcom_apcs_sdx55_clk_driver = {
.probe = qcom_apcs_sdx55_clk_probe,
- .remove_new = qcom_apcs_sdx55_clk_remove,
+ .remove = qcom_apcs_sdx55_clk_remove,
.driver = {
.name = "qcom-sdx55-acps-clk",
},
diff --git a/drivers/clk/qcom/camcc-sm4450.c b/drivers/clk/qcom/camcc-sm4450.c
new file mode 100644
index 000000000000..f8503ced3d05
--- /dev/null
+++ b/drivers/clk/qcom/camcc-sm4450.c
@@ -0,0 +1,1688 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2024, Qualcomm Innovation Center, Inc. All rights reserved.
+ */
+
+#include <linux/clk-provider.h>
+#include <linux/module.h>
+#include <linux/mod_devicetable.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+#include <linux/regmap.h>
+
+#include <dt-bindings/clock/qcom,sm4450-camcc.h>
+
+#include "clk-alpha-pll.h"
+#include "clk-branch.h"
+#include "clk-pll.h"
+#include "clk-rcg.h"
+#include "clk-regmap.h"
+#include "common.h"
+#include "gdsc.h"
+#include "reset.h"
+
+enum {
+ DT_BI_TCXO,
+};
+
+enum {
+ P_BI_TCXO,
+ P_CAM_CC_PLL0_OUT_EVEN,
+ P_CAM_CC_PLL0_OUT_MAIN,
+ P_CAM_CC_PLL0_OUT_ODD,
+ P_CAM_CC_PLL1_OUT_EVEN,
+ P_CAM_CC_PLL1_OUT_MAIN,
+ P_CAM_CC_PLL2_OUT_EVEN,
+ P_CAM_CC_PLL2_OUT_MAIN,
+ P_CAM_CC_PLL3_OUT_EVEN,
+ P_CAM_CC_PLL4_OUT_EVEN,
+ P_CAM_CC_PLL4_OUT_MAIN,
+};
+
+static const struct pll_vco lucid_evo_vco[] = {
+ { 249600000, 2020000000, 0 },
+};
+
+static const struct pll_vco rivian_evo_vco[] = {
+ { 864000000, 1056000000, 0 },
+};
+
+/* 1200.0 MHz Configuration */
+static const struct alpha_pll_config cam_cc_pll0_config = {
+ .l = 0x3e,
+ .alpha = 0x8000,
+ .config_ctl_val = 0x20485699,
+ .config_ctl_hi_val = 0x00182261,
+ .config_ctl_hi1_val = 0x32aa299c,
+ .user_ctl_val = 0x00008400,
+ .user_ctl_hi_val = 0x00000805,
+};
+
+static struct clk_alpha_pll cam_cc_pll0 = {
+ .offset = 0x0,
+ .vco_table = lucid_evo_vco,
+ .num_vco = ARRAY_SIZE(lucid_evo_vco),
+ .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_LUCID_EVO],
+ .clkr = {
+ .hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_pll0",
+ .parent_data = &(const struct clk_parent_data) {
+ .index = DT_BI_TCXO,
+ },
+ .num_parents = 1,
+ .ops = &clk_alpha_pll_lucid_evo_ops,
+ },
+ },
+};
+
+static const struct clk_div_table post_div_table_cam_cc_pll0_out_even[] = {
+ { 0x1, 2 },
+ { }
+};
+
+static struct clk_alpha_pll_postdiv cam_cc_pll0_out_even = {
+ .offset = 0x0,
+ .post_div_shift = 10,
+ .post_div_table = post_div_table_cam_cc_pll0_out_even,
+ .num_post_div = ARRAY_SIZE(post_div_table_cam_cc_pll0_out_even),
+ .width = 4,
+ .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_LUCID_EVO],
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_pll0_out_even",
+ .parent_hws = (const struct clk_hw*[]) {
+ &cam_cc_pll0.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_alpha_pll_postdiv_lucid_evo_ops,
+ },
+};
+
+static const struct clk_div_table post_div_table_cam_cc_pll0_out_odd[] = {
+ { 0x2, 3 },
+ { }
+};
+
+static struct clk_alpha_pll_postdiv cam_cc_pll0_out_odd = {
+ .offset = 0x0,
+ .post_div_shift = 14,
+ .post_div_table = post_div_table_cam_cc_pll0_out_odd,
+ .num_post_div = ARRAY_SIZE(post_div_table_cam_cc_pll0_out_odd),
+ .width = 4,
+ .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_LUCID_EVO],
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_pll0_out_odd",
+ .parent_hws = (const struct clk_hw*[]) {
+ &cam_cc_pll0.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_alpha_pll_postdiv_lucid_evo_ops,
+ },
+};
+
+/* 600.0 MHz Configuration */
+static const struct alpha_pll_config cam_cc_pll1_config = {
+ .l = 0x1f,
+ .alpha = 0x4000,
+ .config_ctl_val = 0x20485699,
+ .config_ctl_hi_val = 0x00182261,
+ .config_ctl_hi1_val = 0x32aa299c,
+ .user_ctl_val = 0x00000400,
+ .user_ctl_hi_val = 0x00000805,
+};
+
+static struct clk_alpha_pll cam_cc_pll1 = {
+ .offset = 0x1000,
+ .vco_table = lucid_evo_vco,
+ .num_vco = ARRAY_SIZE(lucid_evo_vco),
+ .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_LUCID_EVO],
+ .clkr = {
+ .hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_pll1",
+ .parent_data = &(const struct clk_parent_data) {
+ .index = DT_BI_TCXO,
+ },
+ .num_parents = 1,
+ .ops = &clk_alpha_pll_lucid_evo_ops,
+ },
+ },
+};
+
+static const struct clk_div_table post_div_table_cam_cc_pll1_out_even[] = {
+ { 0x1, 2 },
+ { }
+};
+
+static struct clk_alpha_pll_postdiv cam_cc_pll1_out_even = {
+ .offset = 0x1000,
+ .post_div_shift = 10,
+ .post_div_table = post_div_table_cam_cc_pll1_out_even,
+ .num_post_div = ARRAY_SIZE(post_div_table_cam_cc_pll1_out_even),
+ .width = 4,
+ .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_LUCID_EVO],
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_pll1_out_even",
+ .parent_hws = (const struct clk_hw*[]) {
+ &cam_cc_pll1.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_alpha_pll_postdiv_lucid_evo_ops,
+ },
+};
+
+/* 960.0 MHz Configuration */
+static const struct alpha_pll_config cam_cc_pll2_config = {
+ .l = 0x32,
+ .alpha = 0x0,
+ .config_ctl_val = 0x90008820,
+ .config_ctl_hi_val = 0x00890263,
+ .config_ctl_hi1_val = 0x00000247,
+ .user_ctl_val = 0x00000400,
+ .user_ctl_hi_val = 0x00400000,
+};
+
+static struct clk_alpha_pll cam_cc_pll2 = {
+ .offset = 0x2000,
+ .vco_table = rivian_evo_vco,
+ .num_vco = ARRAY_SIZE(rivian_evo_vco),
+ .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_RIVIAN_EVO],
+ .clkr = {
+ .hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_pll2",
+ .parent_data = &(const struct clk_parent_data) {
+ .index = DT_BI_TCXO,
+ },
+ .num_parents = 1,
+ .ops = &clk_alpha_pll_rivian_evo_ops,
+ },
+ },
+};
+
+static const struct clk_div_table post_div_table_cam_cc_pll2_out_even[] = {
+ { 0x1, 2 },
+ { }
+};
+
+static struct clk_alpha_pll_postdiv cam_cc_pll2_out_even = {
+ .offset = 0x2000,
+ .post_div_shift = 10,
+ .post_div_table = post_div_table_cam_cc_pll2_out_even,
+ .num_post_div = ARRAY_SIZE(post_div_table_cam_cc_pll2_out_even),
+ .width = 4,
+ .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_RIVIAN_EVO],
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_pll2_out_even",
+ .parent_hws = (const struct clk_hw*[]) {
+ &cam_cc_pll2.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_alpha_pll_postdiv_rivian_evo_ops,
+ },
+};
+
+/* 600.0 MHz Configuration */
+static const struct alpha_pll_config cam_cc_pll3_config = {
+ .l = 0x1f,
+ .alpha = 0x4000,
+ .config_ctl_val = 0x20485699,
+ .config_ctl_hi_val = 0x00182261,
+ .config_ctl_hi1_val = 0x32aa299c,
+ .user_ctl_val = 0x00000400,
+ .user_ctl_hi_val = 0x00000805,
+};
+
+static struct clk_alpha_pll cam_cc_pll3 = {
+ .offset = 0x3000,
+ .vco_table = lucid_evo_vco,
+ .num_vco = ARRAY_SIZE(lucid_evo_vco),
+ .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_LUCID_EVO],
+ .clkr = {
+ .hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_pll3",
+ .parent_data = &(const struct clk_parent_data) {
+ .index = DT_BI_TCXO,
+ },
+ .num_parents = 1,
+ .ops = &clk_alpha_pll_lucid_evo_ops,
+ },
+ },
+};
+
+static const struct clk_div_table post_div_table_cam_cc_pll3_out_even[] = {
+ { 0x1, 2 },
+ { }
+};
+
+static struct clk_alpha_pll_postdiv cam_cc_pll3_out_even = {
+ .offset = 0x3000,
+ .post_div_shift = 10,
+ .post_div_table = post_div_table_cam_cc_pll3_out_even,
+ .num_post_div = ARRAY_SIZE(post_div_table_cam_cc_pll3_out_even),
+ .width = 4,
+ .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_LUCID_EVO],
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_pll3_out_even",
+ .parent_hws = (const struct clk_hw*[]) {
+ &cam_cc_pll3.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_alpha_pll_postdiv_lucid_evo_ops,
+ },
+};
+
+/* 700.0 MHz Configuration */
+static const struct alpha_pll_config cam_cc_pll4_config = {
+ .l = 0x24,
+ .alpha = 0x7555,
+ .config_ctl_val = 0x20485699,
+ .config_ctl_hi_val = 0x00182261,
+ .config_ctl_hi1_val = 0x32aa299c,
+ .user_ctl_val = 0x00000400,
+ .user_ctl_hi_val = 0x00000805,
+};
+
+static struct clk_alpha_pll cam_cc_pll4 = {
+ .offset = 0x4000,
+ .vco_table = lucid_evo_vco,
+ .num_vco = ARRAY_SIZE(lucid_evo_vco),
+ .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_LUCID_EVO],
+ .clkr = {
+ .hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_pll4",
+ .parent_data = &(const struct clk_parent_data) {
+ .index = DT_BI_TCXO,
+ },
+ .num_parents = 1,
+ .ops = &clk_alpha_pll_lucid_evo_ops,
+ },
+ },
+};
+
+static const struct clk_div_table post_div_table_cam_cc_pll4_out_even[] = {
+ { 0x1, 2 },
+ { }
+};
+
+static struct clk_alpha_pll_postdiv cam_cc_pll4_out_even = {
+ .offset = 0x4000,
+ .post_div_shift = 10,
+ .post_div_table = post_div_table_cam_cc_pll4_out_even,
+ .num_post_div = ARRAY_SIZE(post_div_table_cam_cc_pll4_out_even),
+ .width = 4,
+ .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_LUCID_EVO],
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_pll4_out_even",
+ .parent_hws = (const struct clk_hw*[]) {
+ &cam_cc_pll4.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_alpha_pll_postdiv_lucid_evo_ops,
+ },
+};
+
+static const struct parent_map cam_cc_parent_map_0[] = {
+ { P_BI_TCXO, 0 },
+ { P_CAM_CC_PLL0_OUT_MAIN, 1 },
+ { P_CAM_CC_PLL0_OUT_ODD, 5 },
+ { P_CAM_CC_PLL0_OUT_EVEN, 6 },
+};
+
+static const struct clk_parent_data cam_cc_parent_data_0[] = {
+ { .index = DT_BI_TCXO },
+ { .hw = &cam_cc_pll0.clkr.hw },
+ { .hw = &cam_cc_pll0_out_odd.clkr.hw },
+ { .hw = &cam_cc_pll0_out_even.clkr.hw },
+};
+
+static const struct parent_map cam_cc_parent_map_1[] = {
+ { P_BI_TCXO, 0 },
+ { P_CAM_CC_PLL2_OUT_EVEN, 3 },
+ { P_CAM_CC_PLL2_OUT_MAIN, 4 },
+};
+
+static const struct clk_parent_data cam_cc_parent_data_1[] = {
+ { .index = DT_BI_TCXO },
+ { .hw = &cam_cc_pll2_out_even.clkr.hw },
+ { .hw = &cam_cc_pll2.clkr.hw },
+};
+
+static const struct parent_map cam_cc_parent_map_2[] = {
+ { P_BI_TCXO, 0 },
+ { P_CAM_CC_PLL0_OUT_ODD, 5 },
+ { P_CAM_CC_PLL0_OUT_EVEN, 6 },
+};
+
+static const struct clk_parent_data cam_cc_parent_data_2[] = {
+ { .index = DT_BI_TCXO },
+ { .hw = &cam_cc_pll0_out_odd.clkr.hw },
+ { .hw = &cam_cc_pll0_out_even.clkr.hw },
+};
+
+static const struct parent_map cam_cc_parent_map_3[] = {
+ { P_BI_TCXO, 0 },
+ { P_CAM_CC_PLL0_OUT_MAIN, 1 },
+ { P_CAM_CC_PLL4_OUT_EVEN, 2 },
+ { P_CAM_CC_PLL4_OUT_MAIN, 3 },
+ { P_CAM_CC_PLL0_OUT_ODD, 5 },
+ { P_CAM_CC_PLL0_OUT_EVEN, 6 },
+};
+
+static const struct clk_parent_data cam_cc_parent_data_3[] = {
+ { .index = DT_BI_TCXO },
+ { .hw = &cam_cc_pll0.clkr.hw },
+ { .hw = &cam_cc_pll4_out_even.clkr.hw },
+ { .hw = &cam_cc_pll4.clkr.hw },
+ { .hw = &cam_cc_pll0_out_odd.clkr.hw },
+ { .hw = &cam_cc_pll0_out_even.clkr.hw },
+};
+
+static const struct parent_map cam_cc_parent_map_4[] = {
+ { P_BI_TCXO, 0 },
+ { P_CAM_CC_PLL0_OUT_MAIN, 1 },
+ { P_CAM_CC_PLL1_OUT_MAIN, 2 },
+ { P_CAM_CC_PLL1_OUT_EVEN, 3 },
+ { P_CAM_CC_PLL0_OUT_ODD, 5 },
+ { P_CAM_CC_PLL0_OUT_EVEN, 6 },
+};
+
+static const struct clk_parent_data cam_cc_parent_data_4[] = {
+ { .index = DT_BI_TCXO },
+ { .hw = &cam_cc_pll0.clkr.hw },
+ { .hw = &cam_cc_pll1.clkr.hw },
+ { .hw = &cam_cc_pll1_out_even.clkr.hw },
+ { .hw = &cam_cc_pll0_out_odd.clkr.hw },
+ { .hw = &cam_cc_pll0_out_even.clkr.hw },
+};
+
+static const struct parent_map cam_cc_parent_map_5[] = {
+ { P_BI_TCXO, 0 },
+ { P_CAM_CC_PLL1_OUT_MAIN, 2 },
+ { P_CAM_CC_PLL1_OUT_EVEN, 3 },
+};
+
+static const struct clk_parent_data cam_cc_parent_data_5[] = {
+ { .index = DT_BI_TCXO },
+ { .hw = &cam_cc_pll1.clkr.hw },
+ { .hw = &cam_cc_pll1_out_even.clkr.hw },
+};
+
+static const struct parent_map cam_cc_parent_map_6[] = {
+ { P_BI_TCXO, 0 },
+ { P_CAM_CC_PLL0_OUT_MAIN, 1 },
+ { P_CAM_CC_PLL0_OUT_EVEN, 6 },
+};
+
+static const struct clk_parent_data cam_cc_parent_data_6[] = {
+ { .index = DT_BI_TCXO },
+ { .hw = &cam_cc_pll0.clkr.hw },
+ { .hw = &cam_cc_pll0_out_even.clkr.hw },
+};
+
+static const struct parent_map cam_cc_parent_map_7[] = {
+ { P_BI_TCXO, 0 },
+ { P_CAM_CC_PLL0_OUT_MAIN, 1 },
+ { P_CAM_CC_PLL3_OUT_EVEN, 5 },
+ { P_CAM_CC_PLL0_OUT_EVEN, 6 },
+};
+
+static const struct clk_parent_data cam_cc_parent_data_7[] = {
+ { .index = DT_BI_TCXO },
+ { .hw = &cam_cc_pll0.clkr.hw },
+ { .hw = &cam_cc_pll3_out_even.clkr.hw },
+ { .hw = &cam_cc_pll0_out_even.clkr.hw },
+};
+
+static const struct freq_tbl ftbl_cam_cc_bps_clk_src[] = {
+ F(19200000, P_BI_TCXO, 1, 0, 0),
+ F(300000000, P_CAM_CC_PLL1_OUT_EVEN, 1, 0, 0),
+ F(410000000, P_CAM_CC_PLL1_OUT_EVEN, 1, 0, 0),
+ F(460000000, P_CAM_CC_PLL1_OUT_EVEN, 1, 0, 0),
+ F(600000000, P_CAM_CC_PLL1_OUT_EVEN, 1, 0, 0),
+ F(700000000, P_CAM_CC_PLL1_OUT_EVEN, 1, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 cam_cc_bps_clk_src = {
+ .cmd_rcgr = 0xa004,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = cam_cc_parent_map_4,
+ .freq_tbl = ftbl_cam_cc_bps_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_bps_clk_src",
+ .parent_data = cam_cc_parent_data_4,
+ .num_parents = ARRAY_SIZE(cam_cc_parent_data_4),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_cam_cc_camnoc_axi_clk_src[] = {
+ F(19200000, P_BI_TCXO, 1, 0, 0),
+ F(150000000, P_CAM_CC_PLL0_OUT_EVEN, 4, 0, 0),
+ F(240000000, P_CAM_CC_PLL0_OUT_EVEN, 2.5, 0, 0),
+ F(300000000, P_CAM_CC_PLL0_OUT_EVEN, 2, 0, 0),
+ F(400000000, P_CAM_CC_PLL0_OUT_ODD, 1, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 cam_cc_camnoc_axi_clk_src = {
+ .cmd_rcgr = 0x13014,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = cam_cc_parent_map_0,
+ .freq_tbl = ftbl_cam_cc_camnoc_axi_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_camnoc_axi_clk_src",
+ .parent_data = cam_cc_parent_data_0,
+ .num_parents = ARRAY_SIZE(cam_cc_parent_data_0),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_cam_cc_cci_0_clk_src[] = {
+ F(19200000, P_BI_TCXO, 1, 0, 0),
+ F(37500000, P_CAM_CC_PLL0_OUT_EVEN, 16, 0, 0),
+ F(50000000, P_CAM_CC_PLL0_OUT_EVEN, 12, 0, 0),
+ F(100000000, P_CAM_CC_PLL0_OUT_EVEN, 6, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 cam_cc_cci_0_clk_src = {
+ .cmd_rcgr = 0x10004,
+ .mnd_width = 8,
+ .hid_width = 5,
+ .parent_map = cam_cc_parent_map_2,
+ .freq_tbl = ftbl_cam_cc_cci_0_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_cci_0_clk_src",
+ .parent_data = cam_cc_parent_data_2,
+ .num_parents = ARRAY_SIZE(cam_cc_parent_data_2),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static struct clk_rcg2 cam_cc_cci_1_clk_src = {
+ .cmd_rcgr = 0x11004,
+ .mnd_width = 8,
+ .hid_width = 5,
+ .parent_map = cam_cc_parent_map_2,
+ .freq_tbl = ftbl_cam_cc_cci_0_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_cci_1_clk_src",
+ .parent_data = cam_cc_parent_data_2,
+ .num_parents = ARRAY_SIZE(cam_cc_parent_data_2),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_cam_cc_cphy_rx_clk_src[] = {
+ F(19200000, P_BI_TCXO, 1, 0, 0),
+ F(300000000, P_CAM_CC_PLL0_OUT_EVEN, 2, 0, 0),
+ F(400000000, P_CAM_CC_PLL0_OUT_EVEN, 1.5, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 cam_cc_cphy_rx_clk_src = {
+ .cmd_rcgr = 0xc054,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = cam_cc_parent_map_0,
+ .freq_tbl = ftbl_cam_cc_cphy_rx_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_cphy_rx_clk_src",
+ .parent_data = cam_cc_parent_data_0,
+ .num_parents = ARRAY_SIZE(cam_cc_parent_data_0),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static struct clk_rcg2 cam_cc_cre_clk_src = {
+ .cmd_rcgr = 0x16004,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = cam_cc_parent_map_5,
+ .freq_tbl = ftbl_cam_cc_bps_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_cre_clk_src",
+ .parent_data = cam_cc_parent_data_5,
+ .num_parents = ARRAY_SIZE(cam_cc_parent_data_5),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_cam_cc_csi0phytimer_clk_src[] = {
+ F(19200000, P_BI_TCXO, 1, 0, 0),
+ F(300000000, P_CAM_CC_PLL0_OUT_EVEN, 2, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 cam_cc_csi0phytimer_clk_src = {
+ .cmd_rcgr = 0x9004,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = cam_cc_parent_map_0,
+ .freq_tbl = ftbl_cam_cc_csi0phytimer_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_csi0phytimer_clk_src",
+ .parent_data = cam_cc_parent_data_0,
+ .num_parents = ARRAY_SIZE(cam_cc_parent_data_0),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static struct clk_rcg2 cam_cc_csi1phytimer_clk_src = {
+ .cmd_rcgr = 0x9028,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = cam_cc_parent_map_0,
+ .freq_tbl = ftbl_cam_cc_csi0phytimer_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_csi1phytimer_clk_src",
+ .parent_data = cam_cc_parent_data_0,
+ .num_parents = ARRAY_SIZE(cam_cc_parent_data_0),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static struct clk_rcg2 cam_cc_csi2phytimer_clk_src = {
+ .cmd_rcgr = 0x904c,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = cam_cc_parent_map_0,
+ .freq_tbl = ftbl_cam_cc_csi0phytimer_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_csi2phytimer_clk_src",
+ .parent_data = cam_cc_parent_data_0,
+ .num_parents = ARRAY_SIZE(cam_cc_parent_data_0),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_cam_cc_fast_ahb_clk_src[] = {
+ F(19200000, P_BI_TCXO, 1, 0, 0),
+ F(100000000, P_CAM_CC_PLL0_OUT_EVEN, 6, 0, 0),
+ F(150000000, P_CAM_CC_PLL0_OUT_EVEN, 4, 0, 0),
+ F(200000000, P_CAM_CC_PLL0_OUT_MAIN, 6, 0, 0),
+ F(240000000, P_CAM_CC_PLL0_OUT_MAIN, 5, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 cam_cc_fast_ahb_clk_src = {
+ .cmd_rcgr = 0xa02c,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = cam_cc_parent_map_0,
+ .freq_tbl = ftbl_cam_cc_fast_ahb_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_fast_ahb_clk_src",
+ .parent_data = cam_cc_parent_data_0,
+ .num_parents = ARRAY_SIZE(cam_cc_parent_data_0),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_cam_cc_icp_clk_src[] = {
+ F(19200000, P_BI_TCXO, 1, 0, 0),
+ F(400000000, P_CAM_CC_PLL0_OUT_MAIN, 3, 0, 0),
+ F(480000000, P_CAM_CC_PLL0_OUT_MAIN, 2.5, 0, 0),
+ F(600000000, P_CAM_CC_PLL0_OUT_MAIN, 2, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 cam_cc_icp_clk_src = {
+ .cmd_rcgr = 0xf014,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = cam_cc_parent_map_6,
+ .freq_tbl = ftbl_cam_cc_icp_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_icp_clk_src",
+ .parent_data = cam_cc_parent_data_6,
+ .num_parents = ARRAY_SIZE(cam_cc_parent_data_6),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_cam_cc_mclk0_clk_src[] = {
+ F(19200000, P_CAM_CC_PLL2_OUT_MAIN, 1, 1, 50),
+ F(24000000, P_CAM_CC_PLL2_OUT_MAIN, 10, 1, 4),
+ F(64000000, P_CAM_CC_PLL2_OUT_MAIN, 15, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 cam_cc_mclk0_clk_src = {
+ .cmd_rcgr = 0x8004,
+ .mnd_width = 8,
+ .hid_width = 5,
+ .parent_map = cam_cc_parent_map_1,
+ .freq_tbl = ftbl_cam_cc_mclk0_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_mclk0_clk_src",
+ .parent_data = cam_cc_parent_data_1,
+ .num_parents = ARRAY_SIZE(cam_cc_parent_data_1),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static struct clk_rcg2 cam_cc_mclk1_clk_src = {
+ .cmd_rcgr = 0x8024,
+ .mnd_width = 8,
+ .hid_width = 5,
+ .parent_map = cam_cc_parent_map_1,
+ .freq_tbl = ftbl_cam_cc_mclk0_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_mclk1_clk_src",
+ .parent_data = cam_cc_parent_data_1,
+ .num_parents = ARRAY_SIZE(cam_cc_parent_data_1),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static struct clk_rcg2 cam_cc_mclk2_clk_src = {
+ .cmd_rcgr = 0x8044,
+ .mnd_width = 8,
+ .hid_width = 5,
+ .parent_map = cam_cc_parent_map_1,
+ .freq_tbl = ftbl_cam_cc_mclk0_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_mclk2_clk_src",
+ .parent_data = cam_cc_parent_data_1,
+ .num_parents = ARRAY_SIZE(cam_cc_parent_data_1),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static struct clk_rcg2 cam_cc_mclk3_clk_src = {
+ .cmd_rcgr = 0x8064,
+ .mnd_width = 8,
+ .hid_width = 5,
+ .parent_map = cam_cc_parent_map_1,
+ .freq_tbl = ftbl_cam_cc_mclk0_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_mclk3_clk_src",
+ .parent_data = cam_cc_parent_data_1,
+ .num_parents = ARRAY_SIZE(cam_cc_parent_data_1),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_cam_cc_ope_0_clk_src[] = {
+ F(19200000, P_BI_TCXO, 1, 0, 0),
+ F(300000000, P_CAM_CC_PLL3_OUT_EVEN, 1, 0, 0),
+ F(410000000, P_CAM_CC_PLL3_OUT_EVEN, 1, 0, 0),
+ F(460000000, P_CAM_CC_PLL3_OUT_EVEN, 1, 0, 0),
+ F(600000000, P_CAM_CC_PLL3_OUT_EVEN, 1, 0, 0),
+ F(700000000, P_CAM_CC_PLL3_OUT_EVEN, 1, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 cam_cc_ope_0_clk_src = {
+ .cmd_rcgr = 0xb004,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = cam_cc_parent_map_7,
+ .freq_tbl = ftbl_cam_cc_ope_0_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_ope_0_clk_src",
+ .parent_data = cam_cc_parent_data_7,
+ .num_parents = ARRAY_SIZE(cam_cc_parent_data_7),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_cam_cc_slow_ahb_clk_src[] = {
+ F(19200000, P_BI_TCXO, 1, 0, 0),
+ F(80000000, P_CAM_CC_PLL0_OUT_EVEN, 7.5, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 cam_cc_slow_ahb_clk_src = {
+ .cmd_rcgr = 0xa048,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = cam_cc_parent_map_0,
+ .freq_tbl = ftbl_cam_cc_slow_ahb_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_slow_ahb_clk_src",
+ .parent_data = cam_cc_parent_data_0,
+ .num_parents = ARRAY_SIZE(cam_cc_parent_data_0),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_cam_cc_tfe_0_clk_src[] = {
+ F(19200000, P_BI_TCXO, 1, 0, 0),
+ F(350000000, P_CAM_CC_PLL4_OUT_EVEN, 1, 0, 0),
+ F(432000000, P_CAM_CC_PLL4_OUT_EVEN, 1, 0, 0),
+ F(548000000, P_CAM_CC_PLL4_OUT_EVEN, 1, 0, 0),
+ F(630000000, P_CAM_CC_PLL4_OUT_EVEN, 1, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 cam_cc_tfe_0_clk_src = {
+ .cmd_rcgr = 0xc004,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = cam_cc_parent_map_3,
+ .freq_tbl = ftbl_cam_cc_tfe_0_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_tfe_0_clk_src",
+ .parent_data = cam_cc_parent_data_3,
+ .num_parents = ARRAY_SIZE(cam_cc_parent_data_3),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static struct clk_rcg2 cam_cc_tfe_0_csid_clk_src = {
+ .cmd_rcgr = 0xc02c,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = cam_cc_parent_map_0,
+ .freq_tbl = ftbl_cam_cc_cphy_rx_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_tfe_0_csid_clk_src",
+ .parent_data = cam_cc_parent_data_0,
+ .num_parents = ARRAY_SIZE(cam_cc_parent_data_0),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static struct clk_rcg2 cam_cc_tfe_1_clk_src = {
+ .cmd_rcgr = 0xd004,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = cam_cc_parent_map_3,
+ .freq_tbl = ftbl_cam_cc_tfe_0_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_tfe_1_clk_src",
+ .parent_data = cam_cc_parent_data_3,
+ .num_parents = ARRAY_SIZE(cam_cc_parent_data_3),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static struct clk_rcg2 cam_cc_tfe_1_csid_clk_src = {
+ .cmd_rcgr = 0xd024,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = cam_cc_parent_map_0,
+ .freq_tbl = ftbl_cam_cc_cphy_rx_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_tfe_1_csid_clk_src",
+ .parent_data = cam_cc_parent_data_0,
+ .num_parents = ARRAY_SIZE(cam_cc_parent_data_0),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static struct clk_branch cam_cc_bps_ahb_clk = {
+ .halt_reg = 0xa060,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0xa060,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_bps_ahb_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &cam_cc_slow_ahb_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch cam_cc_bps_areg_clk = {
+ .halt_reg = 0xa044,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0xa044,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_bps_areg_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &cam_cc_fast_ahb_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch cam_cc_bps_clk = {
+ .halt_reg = 0xa01c,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0xa01c,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_bps_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &cam_cc_bps_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch cam_cc_camnoc_atb_clk = {
+ .halt_reg = 0x13034,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x13034,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_camnoc_atb_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch cam_cc_camnoc_axi_clk = {
+ .halt_reg = 0x1302c,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x1302c,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_camnoc_axi_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &cam_cc_camnoc_axi_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch cam_cc_camnoc_axi_hf_clk = {
+ .halt_reg = 0x1300c,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x1300c,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_camnoc_axi_hf_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch cam_cc_camnoc_axi_sf_clk = {
+ .halt_reg = 0x13004,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x13004,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_camnoc_axi_sf_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch cam_cc_cci_0_clk = {
+ .halt_reg = 0x1001c,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x1001c,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_cci_0_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &cam_cc_cci_0_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch cam_cc_cci_1_clk = {
+ .halt_reg = 0x1101c,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x1101c,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_cci_1_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &cam_cc_cci_1_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch cam_cc_core_ahb_clk = {
+ .halt_reg = 0x1401c,
+ .halt_check = BRANCH_HALT_DELAY,
+ .clkr = {
+ .enable_reg = 0x1401c,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_core_ahb_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &cam_cc_slow_ahb_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch cam_cc_cpas_ahb_clk = {
+ .halt_reg = 0x12004,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x12004,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_cpas_ahb_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &cam_cc_slow_ahb_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch cam_cc_cre_ahb_clk = {
+ .halt_reg = 0x16020,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x16020,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_cre_ahb_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &cam_cc_slow_ahb_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch cam_cc_cre_clk = {
+ .halt_reg = 0x1601c,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x1601c,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_cre_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &cam_cc_cre_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch cam_cc_csi0phytimer_clk = {
+ .halt_reg = 0x901c,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x901c,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_csi0phytimer_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &cam_cc_csi0phytimer_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch cam_cc_csi1phytimer_clk = {
+ .halt_reg = 0x9040,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x9040,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_csi1phytimer_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &cam_cc_csi1phytimer_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch cam_cc_csi2phytimer_clk = {
+ .halt_reg = 0x9064,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x9064,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_csi2phytimer_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &cam_cc_csi2phytimer_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch cam_cc_csiphy0_clk = {
+ .halt_reg = 0x9020,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x9020,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_csiphy0_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &cam_cc_cphy_rx_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch cam_cc_csiphy1_clk = {
+ .halt_reg = 0x9044,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x9044,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_csiphy1_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &cam_cc_cphy_rx_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch cam_cc_csiphy2_clk = {
+ .halt_reg = 0x9068,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x9068,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_csiphy2_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &cam_cc_cphy_rx_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch cam_cc_icp_atb_clk = {
+ .halt_reg = 0xf004,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0xf004,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_icp_atb_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch cam_cc_icp_clk = {
+ .halt_reg = 0xf02c,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0xf02c,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_icp_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &cam_cc_icp_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch cam_cc_icp_cti_clk = {
+ .halt_reg = 0xf008,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0xf008,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_icp_cti_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch cam_cc_icp_ts_clk = {
+ .halt_reg = 0xf00c,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0xf00c,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_icp_ts_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch cam_cc_mclk0_clk = {
+ .halt_reg = 0x801c,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x801c,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_mclk0_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &cam_cc_mclk0_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch cam_cc_mclk1_clk = {
+ .halt_reg = 0x803c,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x803c,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_mclk1_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &cam_cc_mclk1_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch cam_cc_mclk2_clk = {
+ .halt_reg = 0x805c,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x805c,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_mclk2_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &cam_cc_mclk2_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch cam_cc_mclk3_clk = {
+ .halt_reg = 0x807c,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x807c,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_mclk3_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &cam_cc_mclk3_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch cam_cc_ope_0_ahb_clk = {
+ .halt_reg = 0xb030,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0xb030,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_ope_0_ahb_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &cam_cc_slow_ahb_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch cam_cc_ope_0_areg_clk = {
+ .halt_reg = 0xb02c,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0xb02c,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_ope_0_areg_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &cam_cc_fast_ahb_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch cam_cc_ope_0_clk = {
+ .halt_reg = 0xb01c,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0xb01c,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_ope_0_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &cam_cc_ope_0_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch cam_cc_soc_ahb_clk = {
+ .halt_reg = 0x14018,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x14018,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_soc_ahb_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch cam_cc_sys_tmr_clk = {
+ .halt_reg = 0xf034,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0xf034,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_sys_tmr_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch cam_cc_tfe_0_ahb_clk = {
+ .halt_reg = 0xc070,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0xc070,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_tfe_0_ahb_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &cam_cc_slow_ahb_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch cam_cc_tfe_0_clk = {
+ .halt_reg = 0xc01c,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0xc01c,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_tfe_0_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &cam_cc_tfe_0_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch cam_cc_tfe_0_cphy_rx_clk = {
+ .halt_reg = 0xc06c,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0xc06c,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_tfe_0_cphy_rx_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &cam_cc_cphy_rx_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch cam_cc_tfe_0_csid_clk = {
+ .halt_reg = 0xc044,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0xc044,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_tfe_0_csid_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &cam_cc_tfe_0_csid_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch cam_cc_tfe_1_ahb_clk = {
+ .halt_reg = 0xd048,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0xd048,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_tfe_1_ahb_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &cam_cc_slow_ahb_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch cam_cc_tfe_1_clk = {
+ .halt_reg = 0xd01c,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0xd01c,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_tfe_1_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &cam_cc_tfe_1_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch cam_cc_tfe_1_cphy_rx_clk = {
+ .halt_reg = 0xd044,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0xd044,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_tfe_1_cphy_rx_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &cam_cc_cphy_rx_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch cam_cc_tfe_1_csid_clk = {
+ .halt_reg = 0xd03c,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0xd03c,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_tfe_1_csid_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &cam_cc_tfe_1_csid_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct gdsc cam_cc_camss_top_gdsc = {
+ .gdscr = 0x14004,
+ .en_rest_wait_val = 0x2,
+ .en_few_wait_val = 0x2,
+ .clk_dis_wait_val = 0xf,
+ .pd = {
+ .name = "cam_cc_camss_top_gdsc",
+ },
+ .pwrsts = PWRSTS_OFF_ON,
+ .flags = POLL_CFG_GDSCR | RETAIN_FF_ENABLE,
+};
+
+static struct clk_regmap *cam_cc_sm4450_clocks[] = {
+ [CAM_CC_BPS_AHB_CLK] = &cam_cc_bps_ahb_clk.clkr,
+ [CAM_CC_BPS_AREG_CLK] = &cam_cc_bps_areg_clk.clkr,
+ [CAM_CC_BPS_CLK] = &cam_cc_bps_clk.clkr,
+ [CAM_CC_BPS_CLK_SRC] = &cam_cc_bps_clk_src.clkr,
+ [CAM_CC_CAMNOC_ATB_CLK] = &cam_cc_camnoc_atb_clk.clkr,
+ [CAM_CC_CAMNOC_AXI_CLK] = &cam_cc_camnoc_axi_clk.clkr,
+ [CAM_CC_CAMNOC_AXI_CLK_SRC] = &cam_cc_camnoc_axi_clk_src.clkr,
+ [CAM_CC_CAMNOC_AXI_HF_CLK] = &cam_cc_camnoc_axi_hf_clk.clkr,
+ [CAM_CC_CAMNOC_AXI_SF_CLK] = &cam_cc_camnoc_axi_sf_clk.clkr,
+ [CAM_CC_CCI_0_CLK] = &cam_cc_cci_0_clk.clkr,
+ [CAM_CC_CCI_0_CLK_SRC] = &cam_cc_cci_0_clk_src.clkr,
+ [CAM_CC_CCI_1_CLK] = &cam_cc_cci_1_clk.clkr,
+ [CAM_CC_CCI_1_CLK_SRC] = &cam_cc_cci_1_clk_src.clkr,
+ [CAM_CC_CORE_AHB_CLK] = &cam_cc_core_ahb_clk.clkr,
+ [CAM_CC_CPAS_AHB_CLK] = &cam_cc_cpas_ahb_clk.clkr,
+ [CAM_CC_CPHY_RX_CLK_SRC] = &cam_cc_cphy_rx_clk_src.clkr,
+ [CAM_CC_CRE_AHB_CLK] = &cam_cc_cre_ahb_clk.clkr,
+ [CAM_CC_CRE_CLK] = &cam_cc_cre_clk.clkr,
+ [CAM_CC_CRE_CLK_SRC] = &cam_cc_cre_clk_src.clkr,
+ [CAM_CC_CSI0PHYTIMER_CLK] = &cam_cc_csi0phytimer_clk.clkr,
+ [CAM_CC_CSI0PHYTIMER_CLK_SRC] = &cam_cc_csi0phytimer_clk_src.clkr,
+ [CAM_CC_CSI1PHYTIMER_CLK] = &cam_cc_csi1phytimer_clk.clkr,
+ [CAM_CC_CSI1PHYTIMER_CLK_SRC] = &cam_cc_csi1phytimer_clk_src.clkr,
+ [CAM_CC_CSI2PHYTIMER_CLK] = &cam_cc_csi2phytimer_clk.clkr,
+ [CAM_CC_CSI2PHYTIMER_CLK_SRC] = &cam_cc_csi2phytimer_clk_src.clkr,
+ [CAM_CC_CSIPHY0_CLK] = &cam_cc_csiphy0_clk.clkr,
+ [CAM_CC_CSIPHY1_CLK] = &cam_cc_csiphy1_clk.clkr,
+ [CAM_CC_CSIPHY2_CLK] = &cam_cc_csiphy2_clk.clkr,
+ [CAM_CC_FAST_AHB_CLK_SRC] = &cam_cc_fast_ahb_clk_src.clkr,
+ [CAM_CC_ICP_ATB_CLK] = &cam_cc_icp_atb_clk.clkr,
+ [CAM_CC_ICP_CLK] = &cam_cc_icp_clk.clkr,
+ [CAM_CC_ICP_CLK_SRC] = &cam_cc_icp_clk_src.clkr,
+ [CAM_CC_ICP_CTI_CLK] = &cam_cc_icp_cti_clk.clkr,
+ [CAM_CC_ICP_TS_CLK] = &cam_cc_icp_ts_clk.clkr,
+ [CAM_CC_MCLK0_CLK] = &cam_cc_mclk0_clk.clkr,
+ [CAM_CC_MCLK0_CLK_SRC] = &cam_cc_mclk0_clk_src.clkr,
+ [CAM_CC_MCLK1_CLK] = &cam_cc_mclk1_clk.clkr,
+ [CAM_CC_MCLK1_CLK_SRC] = &cam_cc_mclk1_clk_src.clkr,
+ [CAM_CC_MCLK2_CLK] = &cam_cc_mclk2_clk.clkr,
+ [CAM_CC_MCLK2_CLK_SRC] = &cam_cc_mclk2_clk_src.clkr,
+ [CAM_CC_MCLK3_CLK] = &cam_cc_mclk3_clk.clkr,
+ [CAM_CC_MCLK3_CLK_SRC] = &cam_cc_mclk3_clk_src.clkr,
+ [CAM_CC_OPE_0_AHB_CLK] = &cam_cc_ope_0_ahb_clk.clkr,
+ [CAM_CC_OPE_0_AREG_CLK] = &cam_cc_ope_0_areg_clk.clkr,
+ [CAM_CC_OPE_0_CLK] = &cam_cc_ope_0_clk.clkr,
+ [CAM_CC_OPE_0_CLK_SRC] = &cam_cc_ope_0_clk_src.clkr,
+ [CAM_CC_PLL0] = &cam_cc_pll0.clkr,
+ [CAM_CC_PLL0_OUT_EVEN] = &cam_cc_pll0_out_even.clkr,
+ [CAM_CC_PLL0_OUT_ODD] = &cam_cc_pll0_out_odd.clkr,
+ [CAM_CC_PLL1] = &cam_cc_pll1.clkr,
+ [CAM_CC_PLL1_OUT_EVEN] = &cam_cc_pll1_out_even.clkr,
+ [CAM_CC_PLL2] = &cam_cc_pll2.clkr,
+ [CAM_CC_PLL2_OUT_EVEN] = &cam_cc_pll2_out_even.clkr,
+ [CAM_CC_PLL3] = &cam_cc_pll3.clkr,
+ [CAM_CC_PLL3_OUT_EVEN] = &cam_cc_pll3_out_even.clkr,
+ [CAM_CC_PLL4] = &cam_cc_pll4.clkr,
+ [CAM_CC_PLL4_OUT_EVEN] = &cam_cc_pll4_out_even.clkr,
+ [CAM_CC_SLOW_AHB_CLK_SRC] = &cam_cc_slow_ahb_clk_src.clkr,
+ [CAM_CC_SOC_AHB_CLK] = &cam_cc_soc_ahb_clk.clkr,
+ [CAM_CC_SYS_TMR_CLK] = &cam_cc_sys_tmr_clk.clkr,
+ [CAM_CC_TFE_0_AHB_CLK] = &cam_cc_tfe_0_ahb_clk.clkr,
+ [CAM_CC_TFE_0_CLK] = &cam_cc_tfe_0_clk.clkr,
+ [CAM_CC_TFE_0_CLK_SRC] = &cam_cc_tfe_0_clk_src.clkr,
+ [CAM_CC_TFE_0_CPHY_RX_CLK] = &cam_cc_tfe_0_cphy_rx_clk.clkr,
+ [CAM_CC_TFE_0_CSID_CLK] = &cam_cc_tfe_0_csid_clk.clkr,
+ [CAM_CC_TFE_0_CSID_CLK_SRC] = &cam_cc_tfe_0_csid_clk_src.clkr,
+ [CAM_CC_TFE_1_AHB_CLK] = &cam_cc_tfe_1_ahb_clk.clkr,
+ [CAM_CC_TFE_1_CLK] = &cam_cc_tfe_1_clk.clkr,
+ [CAM_CC_TFE_1_CLK_SRC] = &cam_cc_tfe_1_clk_src.clkr,
+ [CAM_CC_TFE_1_CPHY_RX_CLK] = &cam_cc_tfe_1_cphy_rx_clk.clkr,
+ [CAM_CC_TFE_1_CSID_CLK] = &cam_cc_tfe_1_csid_clk.clkr,
+ [CAM_CC_TFE_1_CSID_CLK_SRC] = &cam_cc_tfe_1_csid_clk_src.clkr,
+};
+
+static struct gdsc *cam_cc_sm4450_gdscs[] = {
+ [CAM_CC_CAMSS_TOP_GDSC] = &cam_cc_camss_top_gdsc,
+};
+
+static const struct qcom_reset_map cam_cc_sm4450_resets[] = {
+ [CAM_CC_BPS_BCR] = { 0xa000 },
+ [CAM_CC_CAMNOC_BCR] = { 0x13000 },
+ [CAM_CC_CAMSS_TOP_BCR] = { 0x14000 },
+ [CAM_CC_CCI_0_BCR] = { 0x10000 },
+ [CAM_CC_CCI_1_BCR] = { 0x11000 },
+ [CAM_CC_CPAS_BCR] = { 0x12000 },
+ [CAM_CC_CRE_BCR] = { 0x16000 },
+ [CAM_CC_CSI0PHY_BCR] = { 0x9000 },
+ [CAM_CC_CSI1PHY_BCR] = { 0x9024 },
+ [CAM_CC_CSI2PHY_BCR] = { 0x9048 },
+ [CAM_CC_ICP_BCR] = { 0xf000 },
+ [CAM_CC_MCLK0_BCR] = { 0x8000 },
+ [CAM_CC_MCLK1_BCR] = { 0x8020 },
+ [CAM_CC_MCLK2_BCR] = { 0x8040 },
+ [CAM_CC_MCLK3_BCR] = { 0x8060 },
+ [CAM_CC_OPE_0_BCR] = { 0xb000 },
+ [CAM_CC_TFE_0_BCR] = { 0xc000 },
+ [CAM_CC_TFE_1_BCR] = { 0xd000 },
+};
+
+static const struct regmap_config cam_cc_sm4450_regmap_config = {
+ .reg_bits = 32,
+ .reg_stride = 4,
+ .val_bits = 32,
+ .max_register = 0x16024,
+ .fast_io = true,
+};
+
+static struct qcom_cc_desc cam_cc_sm4450_desc = {
+ .config = &cam_cc_sm4450_regmap_config,
+ .clks = cam_cc_sm4450_clocks,
+ .num_clks = ARRAY_SIZE(cam_cc_sm4450_clocks),
+ .resets = cam_cc_sm4450_resets,
+ .num_resets = ARRAY_SIZE(cam_cc_sm4450_resets),
+ .gdscs = cam_cc_sm4450_gdscs,
+ .num_gdscs = ARRAY_SIZE(cam_cc_sm4450_gdscs),
+};
+
+static const struct of_device_id cam_cc_sm4450_match_table[] = {
+ { .compatible = "qcom,sm4450-camcc" },
+ { }
+};
+MODULE_DEVICE_TABLE(of, cam_cc_sm4450_match_table);
+
+static int cam_cc_sm4450_probe(struct platform_device *pdev)
+{
+ struct regmap *regmap;
+
+ regmap = qcom_cc_map(pdev, &cam_cc_sm4450_desc);
+ if (IS_ERR(regmap))
+ return PTR_ERR(regmap);
+
+ clk_lucid_evo_pll_configure(&cam_cc_pll0, regmap, &cam_cc_pll0_config);
+ clk_lucid_evo_pll_configure(&cam_cc_pll1, regmap, &cam_cc_pll1_config);
+ clk_rivian_evo_pll_configure(&cam_cc_pll2, regmap, &cam_cc_pll2_config);
+ clk_lucid_evo_pll_configure(&cam_cc_pll3, regmap, &cam_cc_pll3_config);
+ clk_lucid_evo_pll_configure(&cam_cc_pll4, regmap, &cam_cc_pll4_config);
+
+ return qcom_cc_really_probe(&pdev->dev, &cam_cc_sm4450_desc, regmap);
+}
+
+static struct platform_driver cam_cc_sm4450_driver = {
+ .probe = cam_cc_sm4450_probe,
+ .driver = {
+ .name = "camcc-sm4450",
+ .of_match_table = cam_cc_sm4450_match_table,
+ },
+};
+
+module_platform_driver(cam_cc_sm4450_driver);
+
+MODULE_DESCRIPTION("QTI CAMCC SM4450 Driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/clk/qcom/camcc-sm8150.c b/drivers/clk/qcom/camcc-sm8150.c
new file mode 100644
index 000000000000..bb3009818ad7
--- /dev/null
+++ b/drivers/clk/qcom/camcc-sm8150.c
@@ -0,0 +1,2159 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2024, Qualcomm Innovation Center, Inc. All rights reserved.
+ */
+
+#include <linux/clk-provider.h>
+#include <linux/err.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/of.h>
+#include <linux/regmap.h>
+#include <linux/pm_runtime.h>
+
+#include <dt-bindings/clock/qcom,sm8150-camcc.h>
+
+#include "clk-alpha-pll.h"
+#include "clk-branch.h"
+#include "clk-rcg.h"
+#include "clk-regmap.h"
+#include "common.h"
+#include "gdsc.h"
+#include "reset.h"
+
+enum {
+ DT_BI_TCXO,
+ DT_IFACE,
+};
+
+enum {
+ P_BI_TCXO,
+ P_CAM_CC_PLL0_OUT_EVEN,
+ P_CAM_CC_PLL0_OUT_MAIN,
+ P_CAM_CC_PLL0_OUT_ODD,
+ P_CAM_CC_PLL1_OUT_EVEN,
+ P_CAM_CC_PLL2_OUT_EARLY,
+ P_CAM_CC_PLL2_OUT_MAIN,
+ P_CAM_CC_PLL3_OUT_EVEN,
+ P_CAM_CC_PLL4_OUT_EVEN,
+};
+
+static const struct pll_vco regera_vco[] = {
+ { 600000000, 3300000000, 0 },
+};
+
+static const struct pll_vco trion_vco[] = {
+ { 249600000, 2000000000, 0 },
+};
+
+static const struct alpha_pll_config cam_cc_pll0_config = {
+ .l = 0x3e,
+ .alpha = 0x8000,
+ .config_ctl_val = 0x20485699,
+ .config_ctl_hi_val = 0x00002267,
+ .config_ctl_hi1_val = 0x00000024,
+ .test_ctl_val = 0x00000000,
+ .test_ctl_hi_val = 0x00000000,
+ .test_ctl_hi1_val = 0x00000020,
+ .user_ctl_val = 0x00003100,
+ .user_ctl_hi_val = 0x00000805,
+ .user_ctl_hi1_val = 0x000000D0,
+};
+
+static struct clk_alpha_pll cam_cc_pll0 = {
+ .offset = 0x0,
+ .vco_table = trion_vco,
+ .num_vco = ARRAY_SIZE(trion_vco),
+ .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_TRION],
+ .clkr = {
+ .hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_pll0",
+ .parent_data = &(const struct clk_parent_data) {
+ .index = DT_BI_TCXO,
+ },
+ .num_parents = 1,
+ .ops = &clk_alpha_pll_trion_ops,
+ },
+ },
+};
+
+static const struct clk_div_table post_div_table_cam_cc_pll0_out_even[] = {
+ { 0x1, 2 },
+ { }
+};
+
+static struct clk_alpha_pll_postdiv cam_cc_pll0_out_even = {
+ .offset = 0x0,
+ .post_div_shift = 8,
+ .post_div_table = post_div_table_cam_cc_pll0_out_even,
+ .num_post_div = ARRAY_SIZE(post_div_table_cam_cc_pll0_out_even),
+ .width = 4,
+ .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_TRION],
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_pll0_out_even",
+ .parent_hws = (const struct clk_hw*[]) {
+ &cam_cc_pll0.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_alpha_pll_postdiv_trion_ops,
+ },
+};
+
+static const struct clk_div_table post_div_table_cam_cc_pll0_out_odd[] = {
+ { 0x3, 3 },
+ { }
+};
+
+static struct clk_alpha_pll_postdiv cam_cc_pll0_out_odd = {
+ .offset = 0x0,
+ .post_div_shift = 12,
+ .post_div_table = post_div_table_cam_cc_pll0_out_odd,
+ .num_post_div = ARRAY_SIZE(post_div_table_cam_cc_pll0_out_odd),
+ .width = 4,
+ .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_TRION],
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_pll0_out_odd",
+ .parent_hws = (const struct clk_hw*[]) {
+ &cam_cc_pll0.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_alpha_pll_postdiv_trion_ops,
+ },
+};
+
+static const struct alpha_pll_config cam_cc_pll1_config = {
+ .l = 0x1f,
+ .alpha = 0x4000,
+ .config_ctl_val = 0x20485699,
+ .config_ctl_hi_val = 0x00002267,
+ .config_ctl_hi1_val = 0x00000024,
+ .test_ctl_val = 0x00000000,
+ .test_ctl_hi_val = 0x00000000,
+ .test_ctl_hi1_val = 0x00000020,
+ .user_ctl_val = 0x00000100,
+ .user_ctl_hi_val = 0x00000805,
+ .user_ctl_hi1_val = 0x000000D0,
+};
+
+static struct clk_alpha_pll cam_cc_pll1 = {
+ .offset = 0x1000,
+ .vco_table = trion_vco,
+ .num_vco = ARRAY_SIZE(trion_vco),
+ .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_TRION],
+ .clkr = {
+ .hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_pll1",
+ .parent_data = &(const struct clk_parent_data) {
+ .index = DT_BI_TCXO,
+ },
+ .num_parents = 1,
+ .ops = &clk_alpha_pll_trion_ops,
+ },
+ },
+};
+
+static const struct clk_div_table post_div_table_cam_cc_pll1_out_even[] = {
+ { 0x1, 2 },
+ { }
+};
+
+static struct clk_alpha_pll_postdiv cam_cc_pll1_out_even = {
+ .offset = 0x1000,
+ .post_div_shift = 8,
+ .post_div_table = post_div_table_cam_cc_pll1_out_even,
+ .num_post_div = ARRAY_SIZE(post_div_table_cam_cc_pll1_out_even),
+ .width = 4,
+ .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_TRION],
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_pll1_out_even",
+ .parent_hws = (const struct clk_hw*[]) {
+ &cam_cc_pll1.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_alpha_pll_postdiv_trion_ops,
+ },
+};
+
+static const struct alpha_pll_config cam_cc_pll2_config = {
+ .l = 0x32,
+ .alpha = 0x0,
+ .config_ctl_val = 0x10000807,
+ .config_ctl_hi_val = 0x00000011,
+ .config_ctl_hi1_val = 0x04300142,
+ .test_ctl_val = 0x04000400,
+ .test_ctl_hi_val = 0x00004000,
+ .test_ctl_hi1_val = 0x00000000,
+ .user_ctl_val = 0x00000100,
+ .user_ctl_hi_val = 0x00000000,
+ .user_ctl_hi1_val = 0x00000000,
+};
+
+static struct clk_alpha_pll cam_cc_pll2 = {
+ .offset = 0x2000,
+ .vco_table = regera_vco,
+ .num_vco = ARRAY_SIZE(regera_vco),
+ .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_REGERA],
+ .clkr = {
+ .hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_pll2",
+ .parent_data = &(const struct clk_parent_data) {
+ .index = DT_BI_TCXO,
+ },
+ .num_parents = 1,
+ .ops = &clk_alpha_pll_regera_ops,
+ },
+ },
+};
+
+static const struct clk_div_table post_div_table_cam_cc_pll2_out_main[] = {
+ { 0x1, 2 },
+ { }
+};
+
+static struct clk_alpha_pll_postdiv cam_cc_pll2_out_main = {
+ .offset = 0x2000,
+ .post_div_shift = 8,
+ .post_div_table = post_div_table_cam_cc_pll2_out_main,
+ .num_post_div = ARRAY_SIZE(post_div_table_cam_cc_pll2_out_main),
+ .width = 2,
+ .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_REGERA],
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_pll2_out_main",
+ .parent_hws = (const struct clk_hw*[]) {
+ &cam_cc_pll2.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_alpha_pll_postdiv_trion_ops,
+ },
+};
+
+static const struct alpha_pll_config cam_cc_pll3_config = {
+ .l = 0x29,
+ .alpha = 0xaaaa,
+ .config_ctl_val = 0x20485699,
+ .config_ctl_hi_val = 0x00002267,
+ .config_ctl_hi1_val = 0x00000024,
+ .test_ctl_val = 0x00000000,
+ .test_ctl_hi_val = 0x00000000,
+ .test_ctl_hi1_val = 0x00000020,
+ .user_ctl_val = 0x00000100,
+ .user_ctl_hi_val = 0x00000805,
+ .user_ctl_hi1_val = 0x000000D0,
+};
+
+static struct clk_alpha_pll cam_cc_pll3 = {
+ .offset = 0x3000,
+ .vco_table = trion_vco,
+ .num_vco = ARRAY_SIZE(trion_vco),
+ .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_TRION],
+ .clkr = {
+ .hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_pll3",
+ .parent_data = &(const struct clk_parent_data) {
+ .index = DT_BI_TCXO,
+ },
+ .num_parents = 1,
+ .ops = &clk_alpha_pll_trion_ops,
+ },
+ },
+};
+
+static const struct clk_div_table post_div_table_cam_cc_pll3_out_even[] = {
+ { 0x1, 2 },
+ { }
+};
+
+static struct clk_alpha_pll_postdiv cam_cc_pll3_out_even = {
+ .offset = 0x3000,
+ .post_div_shift = 8,
+ .post_div_table = post_div_table_cam_cc_pll3_out_even,
+ .num_post_div = ARRAY_SIZE(post_div_table_cam_cc_pll3_out_even),
+ .width = 4,
+ .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_TRION],
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_pll3_out_even",
+ .parent_hws = (const struct clk_hw*[]) {
+ &cam_cc_pll3.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_alpha_pll_postdiv_trion_ops,
+ },
+};
+
+static const struct alpha_pll_config cam_cc_pll4_config = {
+ .l = 0x29,
+ .alpha = 0xaaaa,
+ .config_ctl_val = 0x20485699,
+ .config_ctl_hi_val = 0x00002267,
+ .config_ctl_hi1_val = 0x00000024,
+ .test_ctl_val = 0x00000000,
+ .test_ctl_hi_val = 0x00000000,
+ .test_ctl_hi1_val = 0x00000020,
+ .user_ctl_val = 0x00000100,
+ .user_ctl_hi_val = 0x00000805,
+ .user_ctl_hi1_val = 0x000000D0,
+};
+
+static struct clk_alpha_pll cam_cc_pll4 = {
+ .offset = 0x4000,
+ .vco_table = trion_vco,
+ .num_vco = ARRAY_SIZE(trion_vco),
+ .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_TRION],
+ .clkr = {
+ .hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_pll4",
+ .parent_data = &(const struct clk_parent_data) {
+ .index = DT_BI_TCXO,
+ },
+ .num_parents = 1,
+ .ops = &clk_alpha_pll_trion_ops,
+ },
+ },
+};
+
+static const struct clk_div_table post_div_table_cam_cc_pll4_out_even[] = {
+ { 0x1, 2 },
+ { }
+};
+
+static struct clk_alpha_pll_postdiv cam_cc_pll4_out_even = {
+ .offset = 0x4000,
+ .post_div_shift = 8,
+ .post_div_table = post_div_table_cam_cc_pll4_out_even,
+ .num_post_div = ARRAY_SIZE(post_div_table_cam_cc_pll4_out_even),
+ .width = 4,
+ .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_TRION],
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_pll4_out_even",
+ .parent_hws = (const struct clk_hw*[]) {
+ &cam_cc_pll4.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_alpha_pll_postdiv_trion_ops,
+ },
+};
+
+static const struct parent_map cam_cc_parent_map_0[] = {
+ { P_BI_TCXO, 0 },
+ { P_CAM_CC_PLL0_OUT_MAIN, 1 },
+ { P_CAM_CC_PLL0_OUT_EVEN, 2 },
+ { P_CAM_CC_PLL0_OUT_ODD, 3 },
+ { P_CAM_CC_PLL2_OUT_MAIN, 5 },
+};
+
+static const struct clk_parent_data cam_cc_parent_data_0[] = {
+ { .index = DT_BI_TCXO },
+ { .hw = &cam_cc_pll0.clkr.hw },
+ { .hw = &cam_cc_pll0_out_even.clkr.hw },
+ { .hw = &cam_cc_pll0_out_odd.clkr.hw },
+ { .hw = &cam_cc_pll2_out_main.clkr.hw },
+};
+
+static const struct parent_map cam_cc_parent_map_1[] = {
+ { P_BI_TCXO, 0 },
+ { P_CAM_CC_PLL2_OUT_EARLY, 5 },
+};
+
+static const struct clk_parent_data cam_cc_parent_data_1[] = {
+ { .index = DT_BI_TCXO },
+ { .hw = &cam_cc_pll2.clkr.hw },
+};
+
+static const struct parent_map cam_cc_parent_map_2[] = {
+ { P_BI_TCXO, 0 },
+ { P_CAM_CC_PLL3_OUT_EVEN, 6 },
+};
+
+static const struct clk_parent_data cam_cc_parent_data_2[] = {
+ { .index = DT_BI_TCXO },
+ { .hw = &cam_cc_pll3_out_even.clkr.hw },
+};
+
+static const struct parent_map cam_cc_parent_map_3[] = {
+ { P_BI_TCXO, 0 },
+ { P_CAM_CC_PLL4_OUT_EVEN, 6 },
+};
+
+static const struct clk_parent_data cam_cc_parent_data_3[] = {
+ { .index = DT_BI_TCXO },
+ { .hw = &cam_cc_pll4_out_even.clkr.hw },
+};
+
+static const struct parent_map cam_cc_parent_map_4[] = {
+ { P_BI_TCXO, 0 },
+ { P_CAM_CC_PLL1_OUT_EVEN, 4 },
+};
+
+static const struct clk_parent_data cam_cc_parent_data_4[] = {
+ { .index = DT_BI_TCXO },
+ { .hw = &cam_cc_pll1_out_even.clkr.hw },
+};
+
+static const struct freq_tbl ftbl_cam_cc_bps_clk_src[] = {
+ F(19200000, P_BI_TCXO, 1, 0, 0),
+ F(100000000, P_CAM_CC_PLL0_OUT_EVEN, 6, 0, 0),
+ F(200000000, P_CAM_CC_PLL0_OUT_ODD, 2, 0, 0),
+ F(400000000, P_CAM_CC_PLL0_OUT_ODD, 1, 0, 0),
+ F(480000000, P_CAM_CC_PLL2_OUT_MAIN, 1, 0, 0),
+ F(600000000, P_CAM_CC_PLL0_OUT_MAIN, 2, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 cam_cc_bps_clk_src = {
+ .cmd_rcgr = 0x7010,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = cam_cc_parent_map_0,
+ .freq_tbl = ftbl_cam_cc_bps_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_bps_clk_src",
+ .parent_data = cam_cc_parent_data_0,
+ .num_parents = ARRAY_SIZE(cam_cc_parent_data_0),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_cam_cc_camnoc_axi_clk_src[] = {
+ F(19200000, P_BI_TCXO, 1, 0, 0),
+ F(150000000, P_CAM_CC_PLL0_OUT_EVEN, 4, 0, 0),
+ F(266666667, P_CAM_CC_PLL0_OUT_ODD, 1.5, 0, 0),
+ F(320000000, P_CAM_CC_PLL2_OUT_MAIN, 1.5, 0, 0),
+ F(400000000, P_CAM_CC_PLL0_OUT_MAIN, 3, 0, 0),
+ F(480000000, P_CAM_CC_PLL2_OUT_MAIN, 1, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 cam_cc_camnoc_axi_clk_src = {
+ .cmd_rcgr = 0xc170,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = cam_cc_parent_map_0,
+ .freq_tbl = ftbl_cam_cc_camnoc_axi_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_camnoc_axi_clk_src",
+ .parent_data = cam_cc_parent_data_0,
+ .num_parents = ARRAY_SIZE(cam_cc_parent_data_0),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_cam_cc_cci_0_clk_src[] = {
+ F(19200000, P_BI_TCXO, 1, 0, 0),
+ F(37500000, P_CAM_CC_PLL0_OUT_EVEN, 16, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 cam_cc_cci_0_clk_src = {
+ .cmd_rcgr = 0xc108,
+ .mnd_width = 8,
+ .hid_width = 5,
+ .parent_map = cam_cc_parent_map_0,
+ .freq_tbl = ftbl_cam_cc_cci_0_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_cci_0_clk_src",
+ .parent_data = cam_cc_parent_data_0,
+ .num_parents = ARRAY_SIZE(cam_cc_parent_data_0),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static struct clk_rcg2 cam_cc_cci_1_clk_src = {
+ .cmd_rcgr = 0xc124,
+ .mnd_width = 8,
+ .hid_width = 5,
+ .parent_map = cam_cc_parent_map_0,
+ .freq_tbl = ftbl_cam_cc_cci_0_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_cci_1_clk_src",
+ .parent_data = cam_cc_parent_data_0,
+ .num_parents = ARRAY_SIZE(cam_cc_parent_data_0),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_cam_cc_cphy_rx_clk_src[] = {
+ F(19200000, P_BI_TCXO, 1, 0, 0),
+ F(400000000, P_CAM_CC_PLL0_OUT_ODD, 1, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 cam_cc_cphy_rx_clk_src = {
+ .cmd_rcgr = 0xa064,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = cam_cc_parent_map_0,
+ .freq_tbl = ftbl_cam_cc_cphy_rx_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_cphy_rx_clk_src",
+ .parent_data = cam_cc_parent_data_0,
+ .num_parents = ARRAY_SIZE(cam_cc_parent_data_0),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_cam_cc_csi0phytimer_clk_src[] = {
+ F(19200000, P_BI_TCXO, 1, 0, 0),
+ F(300000000, P_CAM_CC_PLL0_OUT_EVEN, 2, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 cam_cc_csi0phytimer_clk_src = {
+ .cmd_rcgr = 0x6004,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = cam_cc_parent_map_0,
+ .freq_tbl = ftbl_cam_cc_csi0phytimer_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_csi0phytimer_clk_src",
+ .parent_data = cam_cc_parent_data_0,
+ .num_parents = ARRAY_SIZE(cam_cc_parent_data_0),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static struct clk_rcg2 cam_cc_csi1phytimer_clk_src = {
+ .cmd_rcgr = 0x6028,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = cam_cc_parent_map_0,
+ .freq_tbl = ftbl_cam_cc_csi0phytimer_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_csi1phytimer_clk_src",
+ .parent_data = cam_cc_parent_data_0,
+ .num_parents = ARRAY_SIZE(cam_cc_parent_data_0),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static struct clk_rcg2 cam_cc_csi2phytimer_clk_src = {
+ .cmd_rcgr = 0x604c,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = cam_cc_parent_map_0,
+ .freq_tbl = ftbl_cam_cc_csi0phytimer_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_csi2phytimer_clk_src",
+ .parent_data = cam_cc_parent_data_0,
+ .num_parents = ARRAY_SIZE(cam_cc_parent_data_0),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static struct clk_rcg2 cam_cc_csi3phytimer_clk_src = {
+ .cmd_rcgr = 0x6070,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = cam_cc_parent_map_0,
+ .freq_tbl = ftbl_cam_cc_csi0phytimer_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_csi3phytimer_clk_src",
+ .parent_data = cam_cc_parent_data_0,
+ .num_parents = ARRAY_SIZE(cam_cc_parent_data_0),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_cam_cc_fast_ahb_clk_src[] = {
+ F(19200000, P_BI_TCXO, 1, 0, 0),
+ F(50000000, P_CAM_CC_PLL0_OUT_EVEN, 12, 0, 0),
+ F(100000000, P_CAM_CC_PLL0_OUT_EVEN, 6, 0, 0),
+ F(200000000, P_CAM_CC_PLL0_OUT_EVEN, 3, 0, 0),
+ F(300000000, P_CAM_CC_PLL0_OUT_MAIN, 4, 0, 0),
+ F(400000000, P_CAM_CC_PLL0_OUT_MAIN, 3, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 cam_cc_fast_ahb_clk_src = {
+ .cmd_rcgr = 0x703c,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = cam_cc_parent_map_0,
+ .freq_tbl = ftbl_cam_cc_fast_ahb_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_fast_ahb_clk_src",
+ .parent_data = cam_cc_parent_data_0,
+ .num_parents = ARRAY_SIZE(cam_cc_parent_data_0),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_cam_cc_fd_core_clk_src[] = {
+ F(19200000, P_BI_TCXO, 1, 0, 0),
+ F(400000000, P_CAM_CC_PLL0_OUT_ODD, 1, 0, 0),
+ F(480000000, P_CAM_CC_PLL2_OUT_MAIN, 1, 0, 0),
+ F(600000000, P_CAM_CC_PLL0_OUT_MAIN, 2, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 cam_cc_fd_core_clk_src = {
+ .cmd_rcgr = 0xc0e0,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = cam_cc_parent_map_0,
+ .freq_tbl = ftbl_cam_cc_fd_core_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_fd_core_clk_src",
+ .parent_data = cam_cc_parent_data_0,
+ .num_parents = ARRAY_SIZE(cam_cc_parent_data_0),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_cam_cc_icp_clk_src[] = {
+ F(19200000, P_BI_TCXO, 1, 0, 0),
+ F(400000000, P_CAM_CC_PLL0_OUT_ODD, 1, 0, 0),
+ F(600000000, P_CAM_CC_PLL0_OUT_MAIN, 2, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 cam_cc_icp_clk_src = {
+ .cmd_rcgr = 0xc0b8,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = cam_cc_parent_map_0,
+ .freq_tbl = ftbl_cam_cc_icp_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_icp_clk_src",
+ .parent_data = cam_cc_parent_data_0,
+ .num_parents = ARRAY_SIZE(cam_cc_parent_data_0),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_cam_cc_ife_0_clk_src[] = {
+ F(19200000, P_BI_TCXO, 1, 0, 0),
+ F(400000000, P_CAM_CC_PLL3_OUT_EVEN, 1, 0, 0),
+ F(558000000, P_CAM_CC_PLL3_OUT_EVEN, 1, 0, 0),
+ F(637000000, P_CAM_CC_PLL3_OUT_EVEN, 1, 0, 0),
+ F(847000000, P_CAM_CC_PLL3_OUT_EVEN, 1, 0, 0),
+ F(950000000, P_CAM_CC_PLL3_OUT_EVEN, 1, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 cam_cc_ife_0_clk_src = {
+ .cmd_rcgr = 0xa010,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = cam_cc_parent_map_2,
+ .freq_tbl = ftbl_cam_cc_ife_0_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_ife_0_clk_src",
+ .parent_data = cam_cc_parent_data_2,
+ .num_parents = ARRAY_SIZE(cam_cc_parent_data_2),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_cam_cc_ife_0_csid_clk_src[] = {
+ F(19200000, P_BI_TCXO, 1, 0, 0),
+ F(75000000, P_CAM_CC_PLL0_OUT_EVEN, 8, 0, 0),
+ F(400000000, P_CAM_CC_PLL0_OUT_ODD, 1, 0, 0),
+ F(480000000, P_CAM_CC_PLL2_OUT_MAIN, 1, 0, 0),
+ F(600000000, P_CAM_CC_PLL0_OUT_MAIN, 2, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 cam_cc_ife_0_csid_clk_src = {
+ .cmd_rcgr = 0xa03c,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = cam_cc_parent_map_0,
+ .freq_tbl = ftbl_cam_cc_ife_0_csid_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_ife_0_csid_clk_src",
+ .parent_data = cam_cc_parent_data_0,
+ .num_parents = ARRAY_SIZE(cam_cc_parent_data_0),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_cam_cc_ife_1_clk_src[] = {
+ F(19200000, P_BI_TCXO, 1, 0, 0),
+ F(400000000, P_CAM_CC_PLL4_OUT_EVEN, 1, 0, 0),
+ F(558000000, P_CAM_CC_PLL4_OUT_EVEN, 1, 0, 0),
+ F(637000000, P_CAM_CC_PLL4_OUT_EVEN, 1, 0, 0),
+ F(847000000, P_CAM_CC_PLL4_OUT_EVEN, 1, 0, 0),
+ F(950000000, P_CAM_CC_PLL4_OUT_EVEN, 1, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 cam_cc_ife_1_clk_src = {
+ .cmd_rcgr = 0xb010,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = cam_cc_parent_map_3,
+ .freq_tbl = ftbl_cam_cc_ife_1_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_ife_1_clk_src",
+ .parent_data = cam_cc_parent_data_3,
+ .num_parents = ARRAY_SIZE(cam_cc_parent_data_3),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static struct clk_rcg2 cam_cc_ife_1_csid_clk_src = {
+ .cmd_rcgr = 0xb034,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = cam_cc_parent_map_0,
+ .freq_tbl = ftbl_cam_cc_ife_0_csid_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_ife_1_csid_clk_src",
+ .parent_data = cam_cc_parent_data_0,
+ .num_parents = ARRAY_SIZE(cam_cc_parent_data_0),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_cam_cc_ife_lite_0_clk_src[] = {
+ F(19200000, P_BI_TCXO, 1, 0, 0),
+ F(320000000, P_CAM_CC_PLL2_OUT_MAIN, 1.5, 0, 0),
+ F(400000000, P_CAM_CC_PLL0_OUT_ODD, 1, 0, 0),
+ F(480000000, P_CAM_CC_PLL2_OUT_MAIN, 1, 0, 0),
+ F(600000000, P_CAM_CC_PLL0_OUT_MAIN, 2, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 cam_cc_ife_lite_0_clk_src = {
+ .cmd_rcgr = 0xc004,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = cam_cc_parent_map_0,
+ .freq_tbl = ftbl_cam_cc_ife_lite_0_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_ife_lite_0_clk_src",
+ .parent_data = cam_cc_parent_data_0,
+ .num_parents = ARRAY_SIZE(cam_cc_parent_data_0),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static struct clk_rcg2 cam_cc_ife_lite_0_csid_clk_src = {
+ .cmd_rcgr = 0xc020,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = cam_cc_parent_map_0,
+ .freq_tbl = ftbl_cam_cc_fd_core_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_ife_lite_0_csid_clk_src",
+ .parent_data = cam_cc_parent_data_0,
+ .num_parents = ARRAY_SIZE(cam_cc_parent_data_0),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static struct clk_rcg2 cam_cc_ife_lite_1_clk_src = {
+ .cmd_rcgr = 0xc048,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = cam_cc_parent_map_0,
+ .freq_tbl = ftbl_cam_cc_ife_lite_0_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_ife_lite_1_clk_src",
+ .parent_data = cam_cc_parent_data_0,
+ .num_parents = ARRAY_SIZE(cam_cc_parent_data_0),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static struct clk_rcg2 cam_cc_ife_lite_1_csid_clk_src = {
+ .cmd_rcgr = 0xc064,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = cam_cc_parent_map_0,
+ .freq_tbl = ftbl_cam_cc_fd_core_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_ife_lite_1_csid_clk_src",
+ .parent_data = cam_cc_parent_data_0,
+ .num_parents = ARRAY_SIZE(cam_cc_parent_data_0),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_cam_cc_ipe_0_clk_src[] = {
+ F(19200000, P_BI_TCXO, 1, 0, 0),
+ F(300000000, P_CAM_CC_PLL1_OUT_EVEN, 1, 0, 0),
+ F(475000000, P_CAM_CC_PLL1_OUT_EVEN, 1, 0, 0),
+ F(520000000, P_CAM_CC_PLL1_OUT_EVEN, 1, 0, 0),
+ F(600000000, P_CAM_CC_PLL1_OUT_EVEN, 1, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 cam_cc_ipe_0_clk_src = {
+ .cmd_rcgr = 0x8010,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = cam_cc_parent_map_4,
+ .freq_tbl = ftbl_cam_cc_ipe_0_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_ipe_0_clk_src",
+ .parent_data = cam_cc_parent_data_4,
+ .num_parents = ARRAY_SIZE(cam_cc_parent_data_4),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static struct clk_rcg2 cam_cc_jpeg_clk_src = {
+ .cmd_rcgr = 0xc08c,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = cam_cc_parent_map_0,
+ .freq_tbl = ftbl_cam_cc_bps_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_jpeg_clk_src",
+ .parent_data = cam_cc_parent_data_0,
+ .num_parents = ARRAY_SIZE(cam_cc_parent_data_0),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_cam_cc_lrme_clk_src[] = {
+ F(19200000, P_BI_TCXO, 1, 0, 0),
+ F(100000000, P_CAM_CC_PLL0_OUT_EVEN, 6, 0, 0),
+ F(240000000, P_CAM_CC_PLL2_OUT_MAIN, 2, 0, 0),
+ F(300000000, P_CAM_CC_PLL0_OUT_EVEN, 2, 0, 0),
+ F(320000000, P_CAM_CC_PLL2_OUT_MAIN, 1.5, 0, 0),
+ F(400000000, P_CAM_CC_PLL0_OUT_MAIN, 3, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 cam_cc_lrme_clk_src = {
+ .cmd_rcgr = 0xc144,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = cam_cc_parent_map_0,
+ .freq_tbl = ftbl_cam_cc_lrme_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_lrme_clk_src",
+ .parent_data = cam_cc_parent_data_0,
+ .num_parents = ARRAY_SIZE(cam_cc_parent_data_0),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_cam_cc_mclk0_clk_src[] = {
+ F(12000000, P_CAM_CC_PLL2_OUT_EARLY, 10, 1, 8),
+ F(19200000, P_BI_TCXO, 1, 0, 0),
+ F(24000000, P_CAM_CC_PLL2_OUT_EARLY, 10, 1, 4),
+ F(68571429, P_CAM_CC_PLL2_OUT_EARLY, 14, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 cam_cc_mclk0_clk_src = {
+ .cmd_rcgr = 0x5004,
+ .mnd_width = 8,
+ .hid_width = 5,
+ .parent_map = cam_cc_parent_map_1,
+ .freq_tbl = ftbl_cam_cc_mclk0_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_mclk0_clk_src",
+ .parent_data = cam_cc_parent_data_1,
+ .num_parents = ARRAY_SIZE(cam_cc_parent_data_1),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static struct clk_rcg2 cam_cc_mclk1_clk_src = {
+ .cmd_rcgr = 0x5024,
+ .mnd_width = 8,
+ .hid_width = 5,
+ .parent_map = cam_cc_parent_map_1,
+ .freq_tbl = ftbl_cam_cc_mclk0_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_mclk1_clk_src",
+ .parent_data = cam_cc_parent_data_1,
+ .num_parents = ARRAY_SIZE(cam_cc_parent_data_1),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static struct clk_rcg2 cam_cc_mclk2_clk_src = {
+ .cmd_rcgr = 0x5044,
+ .mnd_width = 8,
+ .hid_width = 5,
+ .parent_map = cam_cc_parent_map_1,
+ .freq_tbl = ftbl_cam_cc_mclk0_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_mclk2_clk_src",
+ .parent_data = cam_cc_parent_data_1,
+ .num_parents = ARRAY_SIZE(cam_cc_parent_data_1),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static struct clk_rcg2 cam_cc_mclk3_clk_src = {
+ .cmd_rcgr = 0x5064,
+ .mnd_width = 8,
+ .hid_width = 5,
+ .parent_map = cam_cc_parent_map_1,
+ .freq_tbl = ftbl_cam_cc_mclk0_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_mclk3_clk_src",
+ .parent_data = cam_cc_parent_data_1,
+ .num_parents = ARRAY_SIZE(cam_cc_parent_data_1),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_cam_cc_slow_ahb_clk_src[] = {
+ F(19200000, P_BI_TCXO, 1, 0, 0),
+ F(80000000, P_CAM_CC_PLL0_OUT_EVEN, 7.5, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 cam_cc_slow_ahb_clk_src = {
+ .cmd_rcgr = 0x7058,
+ .mnd_width = 8,
+ .hid_width = 5,
+ .parent_map = cam_cc_parent_map_0,
+ .freq_tbl = ftbl_cam_cc_slow_ahb_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_slow_ahb_clk_src",
+ .parent_data = cam_cc_parent_data_0,
+ .num_parents = ARRAY_SIZE(cam_cc_parent_data_0),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static struct clk_branch cam_cc_bps_ahb_clk = {
+ .halt_reg = 0x7070,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x7070,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_bps_ahb_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &cam_cc_slow_ahb_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch cam_cc_bps_areg_clk = {
+ .halt_reg = 0x7054,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x7054,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_bps_areg_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &cam_cc_fast_ahb_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch cam_cc_bps_axi_clk = {
+ .halt_reg = 0x7038,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x7038,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_bps_axi_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &cam_cc_camnoc_axi_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch cam_cc_bps_clk = {
+ .halt_reg = 0x7028,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x7028,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_bps_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &cam_cc_bps_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch cam_cc_camnoc_axi_clk = {
+ .halt_reg = 0xc18c,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0xc18c,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_camnoc_axi_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &cam_cc_camnoc_axi_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch cam_cc_camnoc_dcd_xo_clk = {
+ .halt_reg = 0xc194,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0xc194,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_camnoc_dcd_xo_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch cam_cc_cci_0_clk = {
+ .halt_reg = 0xc120,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0xc120,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_cci_0_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &cam_cc_cci_0_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch cam_cc_cci_1_clk = {
+ .halt_reg = 0xc13c,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0xc13c,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_cci_1_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &cam_cc_cci_1_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch cam_cc_core_ahb_clk = {
+ .halt_reg = 0xc1c8,
+ .halt_check = BRANCH_HALT_DELAY,
+ .clkr = {
+ .enable_reg = 0xc1c8,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_core_ahb_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &cam_cc_slow_ahb_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch cam_cc_cpas_ahb_clk = {
+ .halt_reg = 0xc168,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0xc168,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_cpas_ahb_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &cam_cc_slow_ahb_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch cam_cc_csi0phytimer_clk = {
+ .halt_reg = 0x601c,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x601c,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_csi0phytimer_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &cam_cc_csi0phytimer_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch cam_cc_csi1phytimer_clk = {
+ .halt_reg = 0x6040,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x6040,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_csi1phytimer_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &cam_cc_csi1phytimer_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch cam_cc_csi2phytimer_clk = {
+ .halt_reg = 0x6064,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x6064,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_csi2phytimer_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &cam_cc_csi2phytimer_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch cam_cc_csi3phytimer_clk = {
+ .halt_reg = 0x6088,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x6088,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_csi3phytimer_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &cam_cc_csi3phytimer_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch cam_cc_csiphy0_clk = {
+ .halt_reg = 0x6020,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x6020,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_csiphy0_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &cam_cc_cphy_rx_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch cam_cc_csiphy1_clk = {
+ .halt_reg = 0x6044,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x6044,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_csiphy1_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &cam_cc_cphy_rx_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch cam_cc_csiphy2_clk = {
+ .halt_reg = 0x6068,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x6068,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_csiphy2_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &cam_cc_cphy_rx_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch cam_cc_csiphy3_clk = {
+ .halt_reg = 0x608c,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x608c,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_csiphy3_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &cam_cc_cphy_rx_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch cam_cc_fd_core_clk = {
+ .halt_reg = 0xc0f8,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0xc0f8,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_fd_core_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &cam_cc_fd_core_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch cam_cc_fd_core_uar_clk = {
+ .halt_reg = 0xc100,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0xc100,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_fd_core_uar_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &cam_cc_fd_core_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch cam_cc_icp_ahb_clk = {
+ .halt_reg = 0xc0d8,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0xc0d8,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_icp_ahb_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &cam_cc_slow_ahb_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch cam_cc_icp_clk = {
+ .halt_reg = 0xc0d0,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0xc0d0,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_icp_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &cam_cc_icp_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch cam_cc_ife_0_axi_clk = {
+ .halt_reg = 0xa080,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0xa080,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_ife_0_axi_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &cam_cc_camnoc_axi_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch cam_cc_ife_0_clk = {
+ .halt_reg = 0xa028,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0xa028,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_ife_0_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &cam_cc_ife_0_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch cam_cc_ife_0_cphy_rx_clk = {
+ .halt_reg = 0xa07c,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0xa07c,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_ife_0_cphy_rx_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &cam_cc_cphy_rx_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch cam_cc_ife_0_csid_clk = {
+ .halt_reg = 0xa054,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0xa054,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_ife_0_csid_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &cam_cc_ife_0_csid_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch cam_cc_ife_0_dsp_clk = {
+ .halt_reg = 0xa038,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0xa038,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_ife_0_dsp_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &cam_cc_ife_0_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch cam_cc_ife_1_axi_clk = {
+ .halt_reg = 0xb058,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0xb058,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_ife_1_axi_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &cam_cc_camnoc_axi_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch cam_cc_ife_1_clk = {
+ .halt_reg = 0xb028,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0xb028,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_ife_1_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &cam_cc_ife_1_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch cam_cc_ife_1_cphy_rx_clk = {
+ .halt_reg = 0xb054,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0xb054,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_ife_1_cphy_rx_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &cam_cc_cphy_rx_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch cam_cc_ife_1_csid_clk = {
+ .halt_reg = 0xb04c,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0xb04c,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_ife_1_csid_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &cam_cc_ife_1_csid_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch cam_cc_ife_1_dsp_clk = {
+ .halt_reg = 0xb030,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0xb030,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_ife_1_dsp_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &cam_cc_ife_1_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch cam_cc_ife_lite_0_clk = {
+ .halt_reg = 0xc01c,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0xc01c,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_ife_lite_0_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &cam_cc_ife_lite_0_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch cam_cc_ife_lite_0_cphy_rx_clk = {
+ .halt_reg = 0xc040,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0xc040,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_ife_lite_0_cphy_rx_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &cam_cc_cphy_rx_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch cam_cc_ife_lite_0_csid_clk = {
+ .halt_reg = 0xc038,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0xc038,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_ife_lite_0_csid_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &cam_cc_ife_lite_0_csid_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch cam_cc_ife_lite_1_clk = {
+ .halt_reg = 0xc060,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0xc060,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_ife_lite_1_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &cam_cc_ife_lite_1_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch cam_cc_ife_lite_1_cphy_rx_clk = {
+ .halt_reg = 0xc084,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0xc084,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_ife_lite_1_cphy_rx_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &cam_cc_cphy_rx_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch cam_cc_ife_lite_1_csid_clk = {
+ .halt_reg = 0xc07c,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0xc07c,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_ife_lite_1_csid_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &cam_cc_ife_lite_1_csid_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch cam_cc_ipe_0_ahb_clk = {
+ .halt_reg = 0x8040,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x8040,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_ipe_0_ahb_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &cam_cc_slow_ahb_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch cam_cc_ipe_0_areg_clk = {
+ .halt_reg = 0x803c,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x803c,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_ipe_0_areg_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &cam_cc_fast_ahb_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch cam_cc_ipe_0_axi_clk = {
+ .halt_reg = 0x8038,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x8038,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_ipe_0_axi_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &cam_cc_camnoc_axi_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch cam_cc_ipe_0_clk = {
+ .halt_reg = 0x8028,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x8028,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_ipe_0_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &cam_cc_ipe_0_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch cam_cc_ipe_1_ahb_clk = {
+ .halt_reg = 0x9028,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x9028,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_ipe_1_ahb_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &cam_cc_slow_ahb_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch cam_cc_ipe_1_areg_clk = {
+ .halt_reg = 0x9024,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x9024,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_ipe_1_areg_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &cam_cc_fast_ahb_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch cam_cc_ipe_1_axi_clk = {
+ .halt_reg = 0x9020,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x9020,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_ipe_1_axi_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &cam_cc_camnoc_axi_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch cam_cc_ipe_1_clk = {
+ .halt_reg = 0x9010,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x9010,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_ipe_1_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &cam_cc_ipe_0_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch cam_cc_jpeg_clk = {
+ .halt_reg = 0xc0a4,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0xc0a4,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_jpeg_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &cam_cc_jpeg_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch cam_cc_lrme_clk = {
+ .halt_reg = 0xc15c,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0xc15c,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_lrme_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &cam_cc_lrme_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch cam_cc_mclk0_clk = {
+ .halt_reg = 0x501c,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x501c,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_mclk0_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &cam_cc_mclk0_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch cam_cc_mclk1_clk = {
+ .halt_reg = 0x503c,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x503c,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_mclk1_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &cam_cc_mclk1_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch cam_cc_mclk2_clk = {
+ .halt_reg = 0x505c,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x505c,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_mclk2_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &cam_cc_mclk2_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch cam_cc_mclk3_clk = {
+ .halt_reg = 0x507c,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x507c,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_mclk3_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &cam_cc_mclk3_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct gdsc titan_top_gdsc = {
+ .gdscr = 0xc1bc,
+ .en_rest_wait_val = 0x2,
+ .en_few_wait_val = 0x2,
+ .clk_dis_wait_val = 0xf,
+ .pd = {
+ .name = "titan_top_gdsc",
+ },
+ .pwrsts = PWRSTS_OFF_ON,
+ .flags = POLL_CFG_GDSCR,
+};
+
+static struct gdsc bps_gdsc = {
+ .gdscr = 0x7004,
+ .en_rest_wait_val = 0x2,
+ .en_few_wait_val = 0x2,
+ .clk_dis_wait_val = 0xf,
+ .pd = {
+ .name = "bps_gdsc",
+ },
+ .pwrsts = PWRSTS_OFF_ON,
+ .parent = &titan_top_gdsc.pd,
+ .flags = POLL_CFG_GDSCR,
+};
+
+static struct gdsc ife_0_gdsc = {
+ .gdscr = 0xa004,
+ .en_rest_wait_val = 0x2,
+ .en_few_wait_val = 0x2,
+ .clk_dis_wait_val = 0xf,
+ .pd = {
+ .name = "ife_0_gdsc",
+ },
+ .pwrsts = PWRSTS_OFF_ON,
+ .parent = &titan_top_gdsc.pd,
+ .flags = POLL_CFG_GDSCR,
+};
+
+static struct gdsc ife_1_gdsc = {
+ .gdscr = 0xb004,
+ .en_rest_wait_val = 0x2,
+ .en_few_wait_val = 0x2,
+ .clk_dis_wait_val = 0xf,
+ .pd = {
+ .name = "ife_1_gdsc",
+ },
+ .pwrsts = PWRSTS_OFF_ON,
+ .parent = &titan_top_gdsc.pd,
+ .flags = POLL_CFG_GDSCR,
+};
+
+static struct gdsc ipe_0_gdsc = {
+ .gdscr = 0x8004,
+ .en_rest_wait_val = 0x2,
+ .en_few_wait_val = 0x2,
+ .clk_dis_wait_val = 0xf,
+ .pd = {
+ .name = "ipe_0_gdsc",
+ },
+ .pwrsts = PWRSTS_OFF_ON,
+ .parent = &titan_top_gdsc.pd,
+ .flags = POLL_CFG_GDSCR,
+};
+
+static struct gdsc ipe_1_gdsc = {
+ .gdscr = 0x9004,
+ .en_rest_wait_val = 0x2,
+ .en_few_wait_val = 0x2,
+ .clk_dis_wait_val = 0xf,
+ .pd = {
+ .name = "ipe_1_gdsc",
+ },
+ .pwrsts = PWRSTS_OFF_ON,
+ .parent = &titan_top_gdsc.pd,
+ .flags = POLL_CFG_GDSCR,
+};
+
+static struct clk_regmap *cam_cc_sm8150_clocks[] = {
+ [CAM_CC_PLL0] = &cam_cc_pll0.clkr,
+ [CAM_CC_PLL0_OUT_EVEN] = &cam_cc_pll0_out_even.clkr,
+ [CAM_CC_PLL0_OUT_ODD] = &cam_cc_pll0_out_odd.clkr,
+ [CAM_CC_PLL1] = &cam_cc_pll1.clkr,
+ [CAM_CC_PLL1_OUT_EVEN] = &cam_cc_pll1_out_even.clkr,
+ [CAM_CC_PLL2] = &cam_cc_pll2.clkr,
+ [CAM_CC_PLL2_OUT_MAIN] = &cam_cc_pll2_out_main.clkr,
+ [CAM_CC_PLL3] = &cam_cc_pll3.clkr,
+ [CAM_CC_PLL3_OUT_EVEN] = &cam_cc_pll3_out_even.clkr,
+ [CAM_CC_PLL4] = &cam_cc_pll4.clkr,
+ [CAM_CC_PLL4_OUT_EVEN] = &cam_cc_pll4_out_even.clkr,
+ [CAM_CC_BPS_AHB_CLK] = &cam_cc_bps_ahb_clk.clkr,
+ [CAM_CC_BPS_AREG_CLK] = &cam_cc_bps_areg_clk.clkr,
+ [CAM_CC_BPS_AXI_CLK] = &cam_cc_bps_axi_clk.clkr,
+ [CAM_CC_BPS_CLK] = &cam_cc_bps_clk.clkr,
+ [CAM_CC_BPS_CLK_SRC] = &cam_cc_bps_clk_src.clkr,
+ [CAM_CC_CAMNOC_AXI_CLK] = &cam_cc_camnoc_axi_clk.clkr,
+ [CAM_CC_CAMNOC_AXI_CLK_SRC] = &cam_cc_camnoc_axi_clk_src.clkr,
+ [CAM_CC_CAMNOC_DCD_XO_CLK] = &cam_cc_camnoc_dcd_xo_clk.clkr,
+ [CAM_CC_CCI_0_CLK] = &cam_cc_cci_0_clk.clkr,
+ [CAM_CC_CCI_0_CLK_SRC] = &cam_cc_cci_0_clk_src.clkr,
+ [CAM_CC_CCI_1_CLK] = &cam_cc_cci_1_clk.clkr,
+ [CAM_CC_CCI_1_CLK_SRC] = &cam_cc_cci_1_clk_src.clkr,
+ [CAM_CC_CORE_AHB_CLK] = &cam_cc_core_ahb_clk.clkr,
+ [CAM_CC_CPAS_AHB_CLK] = &cam_cc_cpas_ahb_clk.clkr,
+ [CAM_CC_CPHY_RX_CLK_SRC] = &cam_cc_cphy_rx_clk_src.clkr,
+ [CAM_CC_CSI0PHYTIMER_CLK] = &cam_cc_csi0phytimer_clk.clkr,
+ [CAM_CC_CSI0PHYTIMER_CLK_SRC] = &cam_cc_csi0phytimer_clk_src.clkr,
+ [CAM_CC_CSI1PHYTIMER_CLK] = &cam_cc_csi1phytimer_clk.clkr,
+ [CAM_CC_CSI1PHYTIMER_CLK_SRC] = &cam_cc_csi1phytimer_clk_src.clkr,
+ [CAM_CC_CSI2PHYTIMER_CLK] = &cam_cc_csi2phytimer_clk.clkr,
+ [CAM_CC_CSI2PHYTIMER_CLK_SRC] = &cam_cc_csi2phytimer_clk_src.clkr,
+ [CAM_CC_CSI3PHYTIMER_CLK] = &cam_cc_csi3phytimer_clk.clkr,
+ [CAM_CC_CSI3PHYTIMER_CLK_SRC] = &cam_cc_csi3phytimer_clk_src.clkr,
+ [CAM_CC_CSIPHY0_CLK] = &cam_cc_csiphy0_clk.clkr,
+ [CAM_CC_CSIPHY1_CLK] = &cam_cc_csiphy1_clk.clkr,
+ [CAM_CC_CSIPHY2_CLK] = &cam_cc_csiphy2_clk.clkr,
+ [CAM_CC_CSIPHY3_CLK] = &cam_cc_csiphy3_clk.clkr,
+ [CAM_CC_FAST_AHB_CLK_SRC] = &cam_cc_fast_ahb_clk_src.clkr,
+ [CAM_CC_FD_CORE_CLK] = &cam_cc_fd_core_clk.clkr,
+ [CAM_CC_FD_CORE_CLK_SRC] = &cam_cc_fd_core_clk_src.clkr,
+ [CAM_CC_FD_CORE_UAR_CLK] = &cam_cc_fd_core_uar_clk.clkr,
+ [CAM_CC_ICP_AHB_CLK] = &cam_cc_icp_ahb_clk.clkr,
+ [CAM_CC_ICP_CLK] = &cam_cc_icp_clk.clkr,
+ [CAM_CC_ICP_CLK_SRC] = &cam_cc_icp_clk_src.clkr,
+ [CAM_CC_IFE_0_AXI_CLK] = &cam_cc_ife_0_axi_clk.clkr,
+ [CAM_CC_IFE_0_CLK] = &cam_cc_ife_0_clk.clkr,
+ [CAM_CC_IFE_0_CLK_SRC] = &cam_cc_ife_0_clk_src.clkr,
+ [CAM_CC_IFE_0_CPHY_RX_CLK] = &cam_cc_ife_0_cphy_rx_clk.clkr,
+ [CAM_CC_IFE_0_CSID_CLK] = &cam_cc_ife_0_csid_clk.clkr,
+ [CAM_CC_IFE_0_CSID_CLK_SRC] = &cam_cc_ife_0_csid_clk_src.clkr,
+ [CAM_CC_IFE_0_DSP_CLK] = &cam_cc_ife_0_dsp_clk.clkr,
+ [CAM_CC_IFE_1_AXI_CLK] = &cam_cc_ife_1_axi_clk.clkr,
+ [CAM_CC_IFE_1_CLK] = &cam_cc_ife_1_clk.clkr,
+ [CAM_CC_IFE_1_CLK_SRC] = &cam_cc_ife_1_clk_src.clkr,
+ [CAM_CC_IFE_1_CPHY_RX_CLK] = &cam_cc_ife_1_cphy_rx_clk.clkr,
+ [CAM_CC_IFE_1_CSID_CLK] = &cam_cc_ife_1_csid_clk.clkr,
+ [CAM_CC_IFE_1_CSID_CLK_SRC] = &cam_cc_ife_1_csid_clk_src.clkr,
+ [CAM_CC_IFE_1_DSP_CLK] = &cam_cc_ife_1_dsp_clk.clkr,
+ [CAM_CC_IFE_LITE_0_CLK] = &cam_cc_ife_lite_0_clk.clkr,
+ [CAM_CC_IFE_LITE_0_CLK_SRC] = &cam_cc_ife_lite_0_clk_src.clkr,
+ [CAM_CC_IFE_LITE_0_CPHY_RX_CLK] = &cam_cc_ife_lite_0_cphy_rx_clk.clkr,
+ [CAM_CC_IFE_LITE_0_CSID_CLK] = &cam_cc_ife_lite_0_csid_clk.clkr,
+ [CAM_CC_IFE_LITE_0_CSID_CLK_SRC] = &cam_cc_ife_lite_0_csid_clk_src.clkr,
+ [CAM_CC_IFE_LITE_1_CLK] = &cam_cc_ife_lite_1_clk.clkr,
+ [CAM_CC_IFE_LITE_1_CLK_SRC] = &cam_cc_ife_lite_1_clk_src.clkr,
+ [CAM_CC_IFE_LITE_1_CPHY_RX_CLK] = &cam_cc_ife_lite_1_cphy_rx_clk.clkr,
+ [CAM_CC_IFE_LITE_1_CSID_CLK] = &cam_cc_ife_lite_1_csid_clk.clkr,
+ [CAM_CC_IFE_LITE_1_CSID_CLK_SRC] = &cam_cc_ife_lite_1_csid_clk_src.clkr,
+ [CAM_CC_IPE_0_AHB_CLK] = &cam_cc_ipe_0_ahb_clk.clkr,
+ [CAM_CC_IPE_0_AREG_CLK] = &cam_cc_ipe_0_areg_clk.clkr,
+ [CAM_CC_IPE_0_AXI_CLK] = &cam_cc_ipe_0_axi_clk.clkr,
+ [CAM_CC_IPE_0_CLK] = &cam_cc_ipe_0_clk.clkr,
+ [CAM_CC_IPE_0_CLK_SRC] = &cam_cc_ipe_0_clk_src.clkr,
+ [CAM_CC_IPE_1_AHB_CLK] = &cam_cc_ipe_1_ahb_clk.clkr,
+ [CAM_CC_IPE_1_AREG_CLK] = &cam_cc_ipe_1_areg_clk.clkr,
+ [CAM_CC_IPE_1_AXI_CLK] = &cam_cc_ipe_1_axi_clk.clkr,
+ [CAM_CC_IPE_1_CLK] = &cam_cc_ipe_1_clk.clkr,
+ [CAM_CC_JPEG_CLK] = &cam_cc_jpeg_clk.clkr,
+ [CAM_CC_JPEG_CLK_SRC] = &cam_cc_jpeg_clk_src.clkr,
+ [CAM_CC_LRME_CLK] = &cam_cc_lrme_clk.clkr,
+ [CAM_CC_LRME_CLK_SRC] = &cam_cc_lrme_clk_src.clkr,
+ [CAM_CC_MCLK0_CLK] = &cam_cc_mclk0_clk.clkr,
+ [CAM_CC_MCLK0_CLK_SRC] = &cam_cc_mclk0_clk_src.clkr,
+ [CAM_CC_MCLK1_CLK] = &cam_cc_mclk1_clk.clkr,
+ [CAM_CC_MCLK1_CLK_SRC] = &cam_cc_mclk1_clk_src.clkr,
+ [CAM_CC_MCLK2_CLK] = &cam_cc_mclk2_clk.clkr,
+ [CAM_CC_MCLK2_CLK_SRC] = &cam_cc_mclk2_clk_src.clkr,
+ [CAM_CC_MCLK3_CLK] = &cam_cc_mclk3_clk.clkr,
+ [CAM_CC_MCLK3_CLK_SRC] = &cam_cc_mclk3_clk_src.clkr,
+ [CAM_CC_SLOW_AHB_CLK_SRC] = &cam_cc_slow_ahb_clk_src.clkr,
+};
+
+static struct gdsc *cam_cc_sm8150_gdscs[] = {
+ [TITAN_TOP_GDSC] = &titan_top_gdsc,
+ [BPS_GDSC] = &bps_gdsc,
+ [IFE_0_GDSC] = &ife_0_gdsc,
+ [IFE_1_GDSC] = &ife_1_gdsc,
+ [IPE_0_GDSC] = &ipe_0_gdsc,
+ [IPE_1_GDSC] = &ipe_1_gdsc,
+};
+
+static const struct qcom_reset_map cam_cc_sm8150_resets[] = {
+ [CAM_CC_BPS_BCR] = { 0x7000 },
+ [CAM_CC_CAMNOC_BCR] = { 0xc16c },
+ [CAM_CC_CCI_BCR] = { 0xc104 },
+ [CAM_CC_CPAS_BCR] = { 0xc164 },
+ [CAM_CC_CSI0PHY_BCR] = { 0x6000 },
+ [CAM_CC_CSI1PHY_BCR] = { 0x6024 },
+ [CAM_CC_CSI2PHY_BCR] = { 0x6048 },
+ [CAM_CC_CSI3PHY_BCR] = { 0x606c },
+ [CAM_CC_FD_BCR] = { 0xc0dc },
+ [CAM_CC_ICP_BCR] = { 0xc0b4 },
+ [CAM_CC_IFE_0_BCR] = { 0xa000 },
+ [CAM_CC_IFE_1_BCR] = { 0xb000 },
+ [CAM_CC_IFE_LITE_0_BCR] = { 0xc000 },
+ [CAM_CC_IFE_LITE_1_BCR] = { 0xc044 },
+ [CAM_CC_IPE_0_BCR] = { 0x8000 },
+ [CAM_CC_IPE_1_BCR] = { 0x9000 },
+ [CAM_CC_JPEG_BCR] = { 0xc088 },
+ [CAM_CC_LRME_BCR] = { 0xc140 },
+ [CAM_CC_MCLK0_BCR] = { 0x5000 },
+ [CAM_CC_MCLK1_BCR] = { 0x5020 },
+ [CAM_CC_MCLK2_BCR] = { 0x5040 },
+ [CAM_CC_MCLK3_BCR] = { 0x5060 },
+};
+
+static const struct regmap_config cam_cc_sm8150_regmap_config = {
+ .reg_bits = 32,
+ .reg_stride = 4,
+ .val_bits = 32,
+ .max_register = 0xe004,
+ .fast_io = true,
+};
+
+static struct qcom_cc_desc cam_cc_sm8150_desc = {
+ .config = &cam_cc_sm8150_regmap_config,
+ .clks = cam_cc_sm8150_clocks,
+ .num_clks = ARRAY_SIZE(cam_cc_sm8150_clocks),
+ .resets = cam_cc_sm8150_resets,
+ .num_resets = ARRAY_SIZE(cam_cc_sm8150_resets),
+ .gdscs = cam_cc_sm8150_gdscs,
+ .num_gdscs = ARRAY_SIZE(cam_cc_sm8150_gdscs),
+};
+
+static const struct of_device_id cam_cc_sm8150_match_table[] = {
+ { .compatible = "qcom,sm8150-camcc" },
+ { }
+};
+MODULE_DEVICE_TABLE(of, cam_cc_sm8150_match_table);
+
+static int cam_cc_sm8150_probe(struct platform_device *pdev)
+{
+ struct regmap *regmap;
+ int ret;
+
+ ret = devm_pm_runtime_enable(&pdev->dev);
+ if (ret)
+ return ret;
+
+ ret = pm_runtime_resume_and_get(&pdev->dev);
+ if (ret)
+ return ret;
+
+ regmap = qcom_cc_map(pdev, &cam_cc_sm8150_desc);
+ if (IS_ERR(regmap)) {
+ pm_runtime_put(&pdev->dev);
+ return PTR_ERR(regmap);
+ }
+
+ clk_trion_pll_configure(&cam_cc_pll0, regmap, &cam_cc_pll0_config);
+ clk_trion_pll_configure(&cam_cc_pll1, regmap, &cam_cc_pll1_config);
+ clk_regera_pll_configure(&cam_cc_pll2, regmap, &cam_cc_pll2_config);
+ clk_trion_pll_configure(&cam_cc_pll3, regmap, &cam_cc_pll3_config);
+ clk_trion_pll_configure(&cam_cc_pll4, regmap, &cam_cc_pll4_config);
+
+ /* Keep the critical clock always-on */
+ qcom_branch_set_clk_en(regmap, 0xc1e4); /* cam_cc_gdsc_clk */
+
+ ret = qcom_cc_really_probe(&pdev->dev, &cam_cc_sm8150_desc, regmap);
+
+ pm_runtime_put(&pdev->dev);
+
+ return ret;
+}
+
+static struct platform_driver cam_cc_sm8150_driver = {
+ .probe = cam_cc_sm8150_probe,
+ .driver = {
+ .name = "camcc-sm8150",
+ .of_match_table = cam_cc_sm8150_match_table,
+ },
+};
+
+module_platform_driver(cam_cc_sm8150_driver);
+
+MODULE_DESCRIPTION("QTI CAM_CC SM8150 Driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/clk/qcom/clk-alpha-pll.c b/drivers/clk/qcom/clk-alpha-pll.c
index 31bf9d13f154..f9105443d7db 100644
--- a/drivers/clk/qcom/clk-alpha-pll.c
+++ b/drivers/clk/qcom/clk-alpha-pll.c
@@ -1,7 +1,7 @@
// SPDX-License-Identifier: GPL-2.0
/*
* Copyright (c) 2015, 2018, The Linux Foundation. All rights reserved.
- * Copyright (c) 2021, 2023, Qualcomm Innovation Center, Inc. All rights reserved.
+ * Copyright (c) 2021, 2023-2024, Qualcomm Innovation Center, Inc. All rights reserved.
*/
#include <linux/kernel.h>
@@ -1713,7 +1713,7 @@ static int __alpha_pll_trion_set_rate(struct clk_hw *hw, unsigned long rate,
if (ret < 0)
return ret;
- regmap_write(pll->clkr.regmap, PLL_L_VAL(pll), l);
+ regmap_update_bits(pll->clkr.regmap, PLL_L_VAL(pll), LUCID_EVO_PLL_L_VAL_MASK, l);
regmap_write(pll->clkr.regmap, PLL_ALPHA_VAL(pll), a);
/* Latch the PLL input */
@@ -1832,6 +1832,58 @@ const struct clk_ops clk_alpha_pll_agera_ops = {
};
EXPORT_SYMBOL_GPL(clk_alpha_pll_agera_ops);
+/**
+ * clk_lucid_5lpe_pll_configure - configure the lucid 5lpe pll
+ *
+ * @pll: clk alpha pll
+ * @regmap: register map
+ * @config: configuration to apply for pll
+ */
+void clk_lucid_5lpe_pll_configure(struct clk_alpha_pll *pll, struct regmap *regmap,
+ const struct alpha_pll_config *config)
+{
+ /*
+ * If the bootloader left the PLL enabled it's likely that there are
+ * RCGs that will lock up if we disable the PLL below.
+ */
+ if (trion_pll_is_enabled(pll, regmap)) {
+ pr_debug("Lucid 5LPE PLL is already enabled, skipping configuration\n");
+ return;
+ }
+
+ clk_alpha_pll_write_config(regmap, PLL_L_VAL(pll), config->l);
+ regmap_write(regmap, PLL_CAL_L_VAL(pll), TRION_PLL_CAL_VAL);
+ clk_alpha_pll_write_config(regmap, PLL_ALPHA_VAL(pll), config->alpha);
+ clk_alpha_pll_write_config(regmap, PLL_CONFIG_CTL(pll),
+ config->config_ctl_val);
+ clk_alpha_pll_write_config(regmap, PLL_CONFIG_CTL_U(pll),
+ config->config_ctl_hi_val);
+ clk_alpha_pll_write_config(regmap, PLL_CONFIG_CTL_U1(pll),
+ config->config_ctl_hi1_val);
+ clk_alpha_pll_write_config(regmap, PLL_USER_CTL(pll),
+ config->user_ctl_val);
+ clk_alpha_pll_write_config(regmap, PLL_USER_CTL_U(pll),
+ config->user_ctl_hi_val);
+ clk_alpha_pll_write_config(regmap, PLL_USER_CTL_U1(pll),
+ config->user_ctl_hi1_val);
+ clk_alpha_pll_write_config(regmap, PLL_TEST_CTL(pll),
+ config->test_ctl_val);
+ clk_alpha_pll_write_config(regmap, PLL_TEST_CTL_U(pll),
+ config->test_ctl_hi_val);
+ clk_alpha_pll_write_config(regmap, PLL_TEST_CTL_U1(pll),
+ config->test_ctl_hi1_val);
+
+ /* Disable PLL output */
+ regmap_update_bits(regmap, PLL_MODE(pll), PLL_OUTCTRL, 0);
+
+ /* Set operation mode to OFF */
+ regmap_write(regmap, PLL_OPMODE(pll), PLL_STANDBY);
+
+ /* Place the PLL in STANDBY mode */
+ regmap_update_bits(regmap, PLL_MODE(pll), PLL_RESET_N, PLL_RESET_N);
+}
+EXPORT_SYMBOL_GPL(clk_lucid_5lpe_pll_configure);
+
static int alpha_pll_lucid_5lpe_enable(struct clk_hw *hw)
{
struct clk_alpha_pll *pll = to_clk_alpha_pll(hw);
@@ -2674,3 +2726,33 @@ const struct clk_ops clk_alpha_pll_stromer_plus_ops = {
.set_rate = clk_alpha_pll_stromer_plus_set_rate,
};
EXPORT_SYMBOL_GPL(clk_alpha_pll_stromer_plus_ops);
+
+void clk_regera_pll_configure(struct clk_alpha_pll *pll, struct regmap *regmap,
+ const struct alpha_pll_config *config)
+{
+ clk_alpha_pll_write_config(regmap, PLL_L_VAL(pll), config->l);
+ clk_alpha_pll_write_config(regmap, PLL_ALPHA_VAL(pll), config->alpha);
+ clk_alpha_pll_write_config(regmap, PLL_CONFIG_CTL(pll), config->config_ctl_val);
+ clk_alpha_pll_write_config(regmap, PLL_CONFIG_CTL_U(pll), config->config_ctl_hi_val);
+ clk_alpha_pll_write_config(regmap, PLL_CONFIG_CTL_U1(pll), config->config_ctl_hi1_val);
+ clk_alpha_pll_write_config(regmap, PLL_USER_CTL(pll), config->user_ctl_val);
+ clk_alpha_pll_write_config(regmap, PLL_USER_CTL_U(pll), config->user_ctl_hi_val);
+ clk_alpha_pll_write_config(regmap, PLL_USER_CTL_U1(pll), config->user_ctl_hi1_val);
+ clk_alpha_pll_write_config(regmap, PLL_TEST_CTL(pll), config->test_ctl_val);
+ clk_alpha_pll_write_config(regmap, PLL_TEST_CTL_U(pll), config->test_ctl_hi_val);
+ clk_alpha_pll_write_config(regmap, PLL_TEST_CTL_U1(pll), config->test_ctl_hi1_val);
+
+ /* Set operation mode to STANDBY */
+ regmap_write(regmap, PLL_OPMODE(pll), PLL_STANDBY);
+}
+EXPORT_SYMBOL_GPL(clk_regera_pll_configure);
+
+const struct clk_ops clk_alpha_pll_regera_ops = {
+ .enable = clk_zonda_pll_enable,
+ .disable = clk_zonda_pll_disable,
+ .is_enabled = clk_alpha_pll_is_enabled,
+ .recalc_rate = clk_trion_pll_recalc_rate,
+ .round_rate = clk_alpha_pll_round_rate,
+ .set_rate = clk_zonda_pll_set_rate,
+};
+EXPORT_SYMBOL_GPL(clk_alpha_pll_regera_ops);
diff --git a/drivers/clk/qcom/clk-alpha-pll.h b/drivers/clk/qcom/clk-alpha-pll.h
index df8f0fe15531..55eca04b23a1 100644
--- a/drivers/clk/qcom/clk-alpha-pll.h
+++ b/drivers/clk/qcom/clk-alpha-pll.h
@@ -23,6 +23,7 @@ enum {
CLK_ALPHA_PLL_TYPE_LUCID = CLK_ALPHA_PLL_TYPE_TRION,
CLK_ALPHA_PLL_TYPE_AGERA,
CLK_ALPHA_PLL_TYPE_ZONDA,
+ CLK_ALPHA_PLL_TYPE_REGERA = CLK_ALPHA_PLL_TYPE_ZONDA,
CLK_ALPHA_PLL_TYPE_ZONDA_OLE,
CLK_ALPHA_PLL_TYPE_LUCID_EVO,
CLK_ALPHA_PLL_TYPE_LUCID_OLE,
@@ -193,6 +194,8 @@ extern const struct clk_ops clk_alpha_pll_postdiv_lucid_evo_ops;
extern const struct clk_ops clk_alpha_pll_rivian_evo_ops;
#define clk_alpha_pll_postdiv_rivian_evo_ops clk_alpha_pll_postdiv_fabia_ops
+extern const struct clk_ops clk_alpha_pll_regera_ops;
+
void clk_alpha_pll_configure(struct clk_alpha_pll *pll, struct regmap *regmap,
const struct alpha_pll_config *config);
void clk_huayra_2290_pll_configure(struct clk_alpha_pll *pll, struct regmap *regmap,
@@ -208,6 +211,8 @@ void clk_agera_pll_configure(struct clk_alpha_pll *pll, struct regmap *regmap,
void clk_zonda_pll_configure(struct clk_alpha_pll *pll, struct regmap *regmap,
const struct alpha_pll_config *config);
+void clk_lucid_5lpe_pll_configure(struct clk_alpha_pll *pll, struct regmap *regmap,
+ const struct alpha_pll_config *config);
void clk_lucid_evo_pll_configure(struct clk_alpha_pll *pll, struct regmap *regmap,
const struct alpha_pll_config *config);
void clk_lucid_ole_pll_configure(struct clk_alpha_pll *pll, struct regmap *regmap,
@@ -216,5 +221,7 @@ void clk_rivian_evo_pll_configure(struct clk_alpha_pll *pll, struct regmap *regm
const struct alpha_pll_config *config);
void clk_stromer_pll_configure(struct clk_alpha_pll *pll, struct regmap *regmap,
const struct alpha_pll_config *config);
+void clk_regera_pll_configure(struct clk_alpha_pll *pll, struct regmap *regmap,
+ const struct alpha_pll_config *config);
#endif
diff --git a/drivers/clk/qcom/clk-cbf-8996.c b/drivers/clk/qcom/clk-cbf-8996.c
index f5fd1ff9c6c9..ce4efcd995ea 100644
--- a/drivers/clk/qcom/clk-cbf-8996.c
+++ b/drivers/clk/qcom/clk-cbf-8996.c
@@ -346,7 +346,7 @@ MODULE_DEVICE_TABLE(of, qcom_msm8996_cbf_match_table);
static struct platform_driver qcom_msm8996_cbf_driver = {
.probe = qcom_msm8996_cbf_probe,
- .remove_new = qcom_msm8996_cbf_remove,
+ .remove = qcom_msm8996_cbf_remove,
.driver = {
.name = "qcom-msm8996-cbf",
.of_match_table = qcom_msm8996_cbf_match_table,
diff --git a/drivers/clk/qcom/clk-rpmh.c b/drivers/clk/qcom/clk-rpmh.c
index bb82abeed88f..4acde937114a 100644
--- a/drivers/clk/qcom/clk-rpmh.c
+++ b/drivers/clk/qcom/clk-rpmh.c
@@ -263,6 +263,8 @@ static int clk_rpmh_bcm_send_cmd(struct clk_rpmh *c, bool enable)
cmd_state = 0;
}
+ cmd_state = min(cmd_state, BCM_TCS_CMD_VOTE_MASK);
+
if (c->last_sent_aggr_state != cmd_state) {
cmd.addr = c->res_addr;
cmd.data = BCM_TCS_CMD(1, enable, 0, cmd_state);
diff --git a/drivers/clk/qcom/dispcc-sm4450.c b/drivers/clk/qcom/dispcc-sm4450.c
new file mode 100644
index 000000000000..98ba016bc57f
--- /dev/null
+++ b/drivers/clk/qcom/dispcc-sm4450.c
@@ -0,0 +1,770 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2024, Qualcomm Innovation Center, Inc. All rights reserved.
+ */
+
+#include <linux/clk-provider.h>
+#include <linux/module.h>
+#include <linux/mod_devicetable.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+#include <linux/regmap.h>
+
+#include <dt-bindings/clock/qcom,sm4450-dispcc.h>
+
+#include "clk-alpha-pll.h"
+#include "clk-branch.h"
+#include "clk-pll.h"
+#include "clk-rcg.h"
+#include "clk-regmap.h"
+#include "clk-regmap-divider.h"
+#include "common.h"
+#include "gdsc.h"
+#include "reset.h"
+
+enum {
+ DT_BI_TCXO,
+ DT_BI_TCXO_AO,
+ DT_AHB_CLK,
+ DT_SLEEP_CLK,
+
+ DT_DSI0_PHY_PLL_OUT_BYTECLK,
+ DT_DSI0_PHY_PLL_OUT_DSICLK,
+};
+
+enum {
+ P_BI_TCXO,
+ P_DISP_CC_PLL0_OUT_MAIN,
+ P_DISP_CC_PLL1_OUT_EVEN,
+ P_DISP_CC_PLL1_OUT_MAIN,
+ P_DSI0_PHY_PLL_OUT_BYTECLK,
+ P_DSI0_PHY_PLL_OUT_DSICLK,
+ P_SLEEP_CLK,
+};
+
+static const struct pll_vco lucid_evo_vco[] = {
+ { 249600000, 2020000000, 0 },
+};
+
+/* 600.0 MHz Configuration */
+static const struct alpha_pll_config disp_cc_pll0_config = {
+ .l = 0x1f,
+ .alpha = 0x4000,
+ .config_ctl_val = 0x20485699,
+ .config_ctl_hi_val = 0x00182261,
+ .config_ctl_hi1_val = 0x32aa299c,
+ .user_ctl_val = 0x00000000,
+ .user_ctl_hi_val = 0x00000805,
+};
+
+static struct clk_alpha_pll disp_cc_pll0 = {
+ .offset = 0x0,
+ .vco_table = lucid_evo_vco,
+ .num_vco = ARRAY_SIZE(lucid_evo_vco),
+ .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_LUCID_EVO],
+ .clkr = {
+ .hw.init = &(const struct clk_init_data) {
+ .name = "disp_cc_pll0",
+ .parent_data = &(const struct clk_parent_data) {
+ .index = DT_BI_TCXO,
+ },
+ .num_parents = 1,
+ .ops = &clk_alpha_pll_lucid_evo_ops,
+ },
+ },
+};
+
+static struct clk_alpha_pll disp_cc_pll1 = {
+ .offset = 0x1000,
+ .vco_table = lucid_evo_vco,
+ .num_vco = ARRAY_SIZE(lucid_evo_vco),
+ .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_LUCID_EVO],
+ .clkr = {
+ .hw.init = &(const struct clk_init_data) {
+ .name = "disp_cc_pll1",
+ .parent_data = &(const struct clk_parent_data) {
+ .index = DT_BI_TCXO,
+ },
+ .num_parents = 1,
+ .ops = &clk_alpha_pll_lucid_evo_ops,
+ },
+ },
+};
+
+static const struct parent_map disp_cc_parent_map_0[] = {
+ { P_BI_TCXO, 0 },
+ { P_DSI0_PHY_PLL_OUT_DSICLK, 1 },
+ { P_DSI0_PHY_PLL_OUT_BYTECLK, 2 },
+};
+
+static const struct clk_parent_data disp_cc_parent_data_0[] = {
+ { .index = DT_BI_TCXO },
+ { .index = DT_DSI0_PHY_PLL_OUT_DSICLK },
+ { .index = DT_DSI0_PHY_PLL_OUT_BYTECLK },
+};
+
+static const struct parent_map disp_cc_parent_map_1[] = {
+ { P_BI_TCXO, 0 },
+ { P_DISP_CC_PLL0_OUT_MAIN, 1 },
+ { P_DISP_CC_PLL1_OUT_MAIN, 4 },
+ { P_DISP_CC_PLL1_OUT_EVEN, 6 },
+};
+
+static const struct clk_parent_data disp_cc_parent_data_1[] = {
+ { .index = DT_BI_TCXO },
+ { .hw = &disp_cc_pll0.clkr.hw },
+ { .hw = &disp_cc_pll1.clkr.hw },
+ { .hw = &disp_cc_pll1.clkr.hw },
+};
+
+static const struct parent_map disp_cc_parent_map_2[] = {
+ { P_BI_TCXO, 0 },
+};
+
+static const struct clk_parent_data disp_cc_parent_data_2[] = {
+ { .index = DT_BI_TCXO },
+};
+
+static const struct clk_parent_data disp_cc_parent_data_2_ao[] = {
+ { .index = DT_BI_TCXO_AO },
+};
+
+static const struct parent_map disp_cc_parent_map_3[] = {
+ { P_BI_TCXO, 0 },
+ { P_DISP_CC_PLL1_OUT_MAIN, 4 },
+ { P_DISP_CC_PLL1_OUT_EVEN, 6 },
+};
+
+static const struct clk_parent_data disp_cc_parent_data_3[] = {
+ { .index = DT_BI_TCXO },
+ { .hw = &disp_cc_pll1.clkr.hw },
+ { .hw = &disp_cc_pll1.clkr.hw },
+};
+
+static const struct parent_map disp_cc_parent_map_4[] = {
+ { P_BI_TCXO, 0 },
+ { P_DSI0_PHY_PLL_OUT_BYTECLK, 2 },
+};
+
+static const struct clk_parent_data disp_cc_parent_data_4[] = {
+ { .index = DT_BI_TCXO },
+ { .index = DT_DSI0_PHY_PLL_OUT_BYTECLK },
+};
+
+static const struct parent_map disp_cc_parent_map_5[] = {
+ { P_SLEEP_CLK, 0 },
+};
+
+static const struct clk_parent_data disp_cc_parent_data_5[] = {
+ { .index = DT_SLEEP_CLK },
+};
+
+static const struct freq_tbl ftbl_disp_cc_mdss_ahb_clk_src[] = {
+ F(19200000, P_BI_TCXO, 1, 0, 0),
+ F(37500000, P_DISP_CC_PLL1_OUT_MAIN, 16, 0, 0),
+ F(75000000, P_DISP_CC_PLL1_OUT_MAIN, 8, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 disp_cc_mdss_ahb_clk_src = {
+ .cmd_rcgr = 0x82a4,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = disp_cc_parent_map_3,
+ .freq_tbl = ftbl_disp_cc_mdss_ahb_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "disp_cc_mdss_ahb_clk_src",
+ .parent_data = disp_cc_parent_data_3,
+ .num_parents = ARRAY_SIZE(disp_cc_parent_data_3),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_disp_cc_mdss_byte0_clk_src[] = {
+ F(19200000, P_BI_TCXO, 1, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 disp_cc_mdss_byte0_clk_src = {
+ .cmd_rcgr = 0x80f8,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = disp_cc_parent_map_0,
+ .freq_tbl = ftbl_disp_cc_mdss_byte0_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "disp_cc_mdss_byte0_clk_src",
+ .parent_data = disp_cc_parent_data_0,
+ .num_parents = ARRAY_SIZE(disp_cc_parent_data_0),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_byte2_ops,
+ },
+};
+
+static struct clk_rcg2 disp_cc_mdss_esc0_clk_src = {
+ .cmd_rcgr = 0x8114,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = disp_cc_parent_map_4,
+ .freq_tbl = ftbl_disp_cc_mdss_byte0_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "disp_cc_mdss_esc0_clk_src",
+ .parent_data = disp_cc_parent_data_4,
+ .num_parents = ARRAY_SIZE(disp_cc_parent_data_4),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_disp_cc_mdss_mdp_clk_src[] = {
+ F(200000000, P_DISP_CC_PLL0_OUT_MAIN, 3, 0, 0),
+ F(325000000, P_DISP_CC_PLL0_OUT_MAIN, 3, 0, 0),
+ F(380000000, P_DISP_CC_PLL0_OUT_MAIN, 3, 0, 0),
+ F(506000000, P_DISP_CC_PLL0_OUT_MAIN, 3, 0, 0),
+ F(608000000, P_DISP_CC_PLL0_OUT_MAIN, 3, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 disp_cc_mdss_mdp_clk_src = {
+ .cmd_rcgr = 0x80b0,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = disp_cc_parent_map_1,
+ .freq_tbl = ftbl_disp_cc_mdss_mdp_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "disp_cc_mdss_mdp_clk_src",
+ .parent_data = disp_cc_parent_data_1,
+ .num_parents = ARRAY_SIZE(disp_cc_parent_data_1),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static struct clk_rcg2 disp_cc_mdss_pclk0_clk_src = {
+ .cmd_rcgr = 0x8098,
+ .mnd_width = 8,
+ .hid_width = 5,
+ .parent_map = disp_cc_parent_map_0,
+ .freq_tbl = ftbl_disp_cc_mdss_byte0_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "disp_cc_mdss_pclk0_clk_src",
+ .parent_data = disp_cc_parent_data_0,
+ .num_parents = ARRAY_SIZE(disp_cc_parent_data_0),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_pixel_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_disp_cc_mdss_rot_clk_src[] = {
+ F(200000000, P_DISP_CC_PLL1_OUT_MAIN, 3, 0, 0),
+ F(300000000, P_DISP_CC_PLL1_OUT_MAIN, 2, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 disp_cc_mdss_rot_clk_src = {
+ .cmd_rcgr = 0x80c8,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = disp_cc_parent_map_1,
+ .freq_tbl = ftbl_disp_cc_mdss_rot_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "disp_cc_mdss_rot_clk_src",
+ .parent_data = disp_cc_parent_data_1,
+ .num_parents = ARRAY_SIZE(disp_cc_parent_data_1),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static struct clk_rcg2 disp_cc_mdss_vsync_clk_src = {
+ .cmd_rcgr = 0x80e0,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = disp_cc_parent_map_2,
+ .freq_tbl = ftbl_disp_cc_mdss_byte0_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "disp_cc_mdss_vsync_clk_src",
+ .parent_data = disp_cc_parent_data_2,
+ .num_parents = ARRAY_SIZE(disp_cc_parent_data_2),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_disp_cc_sleep_clk_src[] = {
+ F(32000, P_SLEEP_CLK, 1, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 disp_cc_sleep_clk_src = {
+ .cmd_rcgr = 0xe058,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = disp_cc_parent_map_5,
+ .freq_tbl = ftbl_disp_cc_sleep_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "disp_cc_sleep_clk_src",
+ .parent_data = disp_cc_parent_data_5,
+ .num_parents = ARRAY_SIZE(disp_cc_parent_data_5),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static struct clk_rcg2 disp_cc_xo_clk_src = {
+ .cmd_rcgr = 0xe03c,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = disp_cc_parent_map_2,
+ .freq_tbl = ftbl_disp_cc_mdss_byte0_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "disp_cc_xo_clk_src",
+ .parent_data = disp_cc_parent_data_2_ao,
+ .num_parents = ARRAY_SIZE(disp_cc_parent_data_2_ao),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static struct clk_regmap_div disp_cc_mdss_byte0_div_clk_src = {
+ .reg = 0x8110,
+ .shift = 0,
+ .width = 4,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "disp_cc_mdss_byte0_div_clk_src",
+ .parent_hws = (const struct clk_hw*[]) {
+ &disp_cc_mdss_byte0_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_regmap_div_ops,
+ },
+};
+
+static struct clk_branch disp_cc_mdss_ahb1_clk = {
+ .halt_reg = 0xa020,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0xa020,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "disp_cc_mdss_ahb1_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &disp_cc_mdss_ahb_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch disp_cc_mdss_ahb_clk = {
+ .halt_reg = 0x8094,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x8094,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "disp_cc_mdss_ahb_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &disp_cc_mdss_ahb_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch disp_cc_mdss_byte0_clk = {
+ .halt_reg = 0x8024,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x8024,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "disp_cc_mdss_byte0_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &disp_cc_mdss_byte0_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch disp_cc_mdss_byte0_intf_clk = {
+ .halt_reg = 0x8028,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x8028,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "disp_cc_mdss_byte0_intf_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &disp_cc_mdss_byte0_div_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch disp_cc_mdss_esc0_clk = {
+ .halt_reg = 0x802c,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x802c,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "disp_cc_mdss_esc0_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &disp_cc_mdss_esc0_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch disp_cc_mdss_mdp1_clk = {
+ .halt_reg = 0xa004,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0xa004,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "disp_cc_mdss_mdp1_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &disp_cc_mdss_mdp_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch disp_cc_mdss_mdp_clk = {
+ .halt_reg = 0x8008,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x8008,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "disp_cc_mdss_mdp_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &disp_cc_mdss_mdp_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch disp_cc_mdss_mdp_lut1_clk = {
+ .halt_reg = 0xa014,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0xa014,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "disp_cc_mdss_mdp_lut1_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &disp_cc_mdss_mdp_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch disp_cc_mdss_mdp_lut_clk = {
+ .halt_reg = 0x8018,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x8018,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "disp_cc_mdss_mdp_lut_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &disp_cc_mdss_mdp_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch disp_cc_mdss_non_gdsc_ahb_clk = {
+ .halt_reg = 0xc004,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0xc004,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "disp_cc_mdss_non_gdsc_ahb_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &disp_cc_mdss_ahb_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch disp_cc_mdss_pclk0_clk = {
+ .halt_reg = 0x8004,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x8004,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "disp_cc_mdss_pclk0_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &disp_cc_mdss_pclk0_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch disp_cc_mdss_rot1_clk = {
+ .halt_reg = 0xa00c,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0xa00c,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "disp_cc_mdss_rot1_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &disp_cc_mdss_rot_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch disp_cc_mdss_rot_clk = {
+ .halt_reg = 0x8010,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x8010,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "disp_cc_mdss_rot_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &disp_cc_mdss_rot_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch disp_cc_mdss_rscc_ahb_clk = {
+ .halt_reg = 0xc00c,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0xc00c,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "disp_cc_mdss_rscc_ahb_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &disp_cc_mdss_ahb_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch disp_cc_mdss_rscc_vsync_clk = {
+ .halt_reg = 0xc008,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0xc008,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "disp_cc_mdss_rscc_vsync_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &disp_cc_mdss_vsync_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch disp_cc_mdss_vsync1_clk = {
+ .halt_reg = 0xa01c,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0xa01c,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "disp_cc_mdss_vsync1_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &disp_cc_mdss_vsync_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch disp_cc_mdss_vsync_clk = {
+ .halt_reg = 0x8020,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x8020,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "disp_cc_mdss_vsync_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &disp_cc_mdss_vsync_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct gdsc disp_cc_mdss_core_gdsc = {
+ .gdscr = 0x9000,
+ .en_rest_wait_val = 0x2,
+ .en_few_wait_val = 0x2,
+ .clk_dis_wait_val = 0xf,
+ .pd = {
+ .name = "disp_cc_mdss_core_gdsc",
+ },
+ .pwrsts = PWRSTS_OFF_ON,
+ .flags = HW_CTRL | POLL_CFG_GDSCR | RETAIN_FF_ENABLE,
+};
+
+static struct gdsc disp_cc_mdss_core_int2_gdsc = {
+ .gdscr = 0xb000,
+ .en_rest_wait_val = 0x2,
+ .en_few_wait_val = 0x2,
+ .clk_dis_wait_val = 0xf,
+ .pd = {
+ .name = "disp_cc_mdss_core_int2_gdsc",
+ },
+ .pwrsts = PWRSTS_OFF_ON,
+ .flags = HW_CTRL | POLL_CFG_GDSCR | RETAIN_FF_ENABLE,
+};
+
+static struct clk_regmap *disp_cc_sm4450_clocks[] = {
+ [DISP_CC_MDSS_AHB1_CLK] = &disp_cc_mdss_ahb1_clk.clkr,
+ [DISP_CC_MDSS_AHB_CLK] = &disp_cc_mdss_ahb_clk.clkr,
+ [DISP_CC_MDSS_AHB_CLK_SRC] = &disp_cc_mdss_ahb_clk_src.clkr,
+ [DISP_CC_MDSS_BYTE0_CLK] = &disp_cc_mdss_byte0_clk.clkr,
+ [DISP_CC_MDSS_BYTE0_CLK_SRC] = &disp_cc_mdss_byte0_clk_src.clkr,
+ [DISP_CC_MDSS_BYTE0_DIV_CLK_SRC] = &disp_cc_mdss_byte0_div_clk_src.clkr,
+ [DISP_CC_MDSS_BYTE0_INTF_CLK] = &disp_cc_mdss_byte0_intf_clk.clkr,
+ [DISP_CC_MDSS_ESC0_CLK] = &disp_cc_mdss_esc0_clk.clkr,
+ [DISP_CC_MDSS_ESC0_CLK_SRC] = &disp_cc_mdss_esc0_clk_src.clkr,
+ [DISP_CC_MDSS_MDP1_CLK] = &disp_cc_mdss_mdp1_clk.clkr,
+ [DISP_CC_MDSS_MDP_CLK] = &disp_cc_mdss_mdp_clk.clkr,
+ [DISP_CC_MDSS_MDP_CLK_SRC] = &disp_cc_mdss_mdp_clk_src.clkr,
+ [DISP_CC_MDSS_MDP_LUT1_CLK] = &disp_cc_mdss_mdp_lut1_clk.clkr,
+ [DISP_CC_MDSS_MDP_LUT_CLK] = &disp_cc_mdss_mdp_lut_clk.clkr,
+ [DISP_CC_MDSS_NON_GDSC_AHB_CLK] = &disp_cc_mdss_non_gdsc_ahb_clk.clkr,
+ [DISP_CC_MDSS_PCLK0_CLK] = &disp_cc_mdss_pclk0_clk.clkr,
+ [DISP_CC_MDSS_PCLK0_CLK_SRC] = &disp_cc_mdss_pclk0_clk_src.clkr,
+ [DISP_CC_MDSS_ROT1_CLK] = &disp_cc_mdss_rot1_clk.clkr,
+ [DISP_CC_MDSS_ROT_CLK] = &disp_cc_mdss_rot_clk.clkr,
+ [DISP_CC_MDSS_ROT_CLK_SRC] = &disp_cc_mdss_rot_clk_src.clkr,
+ [DISP_CC_MDSS_RSCC_AHB_CLK] = &disp_cc_mdss_rscc_ahb_clk.clkr,
+ [DISP_CC_MDSS_RSCC_VSYNC_CLK] = &disp_cc_mdss_rscc_vsync_clk.clkr,
+ [DISP_CC_MDSS_VSYNC1_CLK] = &disp_cc_mdss_vsync1_clk.clkr,
+ [DISP_CC_MDSS_VSYNC_CLK] = &disp_cc_mdss_vsync_clk.clkr,
+ [DISP_CC_MDSS_VSYNC_CLK_SRC] = &disp_cc_mdss_vsync_clk_src.clkr,
+ [DISP_CC_PLL0] = &disp_cc_pll0.clkr,
+ [DISP_CC_PLL1] = &disp_cc_pll1.clkr,
+ [DISP_CC_SLEEP_CLK_SRC] = &disp_cc_sleep_clk_src.clkr,
+ [DISP_CC_XO_CLK_SRC] = &disp_cc_xo_clk_src.clkr,
+};
+
+static struct gdsc *disp_cc_sm4450_gdscs[] = {
+ [DISP_CC_MDSS_CORE_GDSC] = &disp_cc_mdss_core_gdsc,
+ [DISP_CC_MDSS_CORE_INT2_GDSC] = &disp_cc_mdss_core_int2_gdsc,
+};
+
+static const struct qcom_reset_map disp_cc_sm4450_resets[] = {
+ [DISP_CC_MDSS_CORE_BCR] = { 0x8000 },
+ [DISP_CC_MDSS_CORE_INT2_BCR] = { 0xa000 },
+ [DISP_CC_MDSS_RSCC_BCR] = { 0xc000 },
+};
+
+static const struct regmap_config disp_cc_sm4450_regmap_config = {
+ .reg_bits = 32,
+ .reg_stride = 4,
+ .val_bits = 32,
+ .max_register = 0x11008,
+ .fast_io = true,
+};
+
+static struct qcom_cc_desc disp_cc_sm4450_desc = {
+ .config = &disp_cc_sm4450_regmap_config,
+ .clks = disp_cc_sm4450_clocks,
+ .num_clks = ARRAY_SIZE(disp_cc_sm4450_clocks),
+ .resets = disp_cc_sm4450_resets,
+ .num_resets = ARRAY_SIZE(disp_cc_sm4450_resets),
+ .gdscs = disp_cc_sm4450_gdscs,
+ .num_gdscs = ARRAY_SIZE(disp_cc_sm4450_gdscs),
+};
+
+static const struct of_device_id disp_cc_sm4450_match_table[] = {
+ { .compatible = "qcom,sm4450-dispcc" },
+ { }
+};
+MODULE_DEVICE_TABLE(of, disp_cc_sm4450_match_table);
+
+static int disp_cc_sm4450_probe(struct platform_device *pdev)
+{
+ struct regmap *regmap;
+
+ regmap = qcom_cc_map(pdev, &disp_cc_sm4450_desc);
+ if (IS_ERR(regmap))
+ return PTR_ERR(regmap);
+
+ clk_lucid_evo_pll_configure(&disp_cc_pll0, regmap, &disp_cc_pll0_config);
+ clk_lucid_evo_pll_configure(&disp_cc_pll1, regmap, &disp_cc_pll0_config);
+
+ /* Keep some clocks always enabled */
+ qcom_branch_set_clk_en(regmap, 0xe070); /* DISP_CC_SLEEP_CLK */
+ qcom_branch_set_clk_en(regmap, 0xe054); /* DISP_CC_XO_CLK */
+
+ return qcom_cc_really_probe(&pdev->dev, &disp_cc_sm4450_desc, regmap);
+}
+
+static struct platform_driver disp_cc_sm4450_driver = {
+ .probe = disp_cc_sm4450_probe,
+ .driver = {
+ .name = "dispcc-sm4450",
+ .of_match_table = disp_cc_sm4450_match_table,
+ },
+};
+
+module_platform_driver(disp_cc_sm4450_driver);
+
+MODULE_DESCRIPTION("QTI DISPCC SM4450 Driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/clk/qcom/dispcc-sm8250.c b/drivers/clk/qcom/dispcc-sm8250.c
index 5a09009b7289..884bbd3fb305 100644
--- a/drivers/clk/qcom/dispcc-sm8250.c
+++ b/drivers/clk/qcom/dispcc-sm8250.c
@@ -849,6 +849,7 @@ static struct clk_branch disp_cc_mdss_dp_link1_intf_clk = {
&disp_cc_mdss_dp_link1_div_clk_src.clkr.hw,
},
.num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
.ops = &clk_branch2_ops,
},
},
@@ -884,6 +885,7 @@ static struct clk_branch disp_cc_mdss_dp_link_intf_clk = {
&disp_cc_mdss_dp_link_div_clk_src.clkr.hw,
},
.num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
.ops = &clk_branch2_ops,
},
},
@@ -1009,6 +1011,7 @@ static struct clk_branch disp_cc_mdss_mdp_lut_clk = {
&disp_cc_mdss_mdp_clk_src.clkr.hw,
},
.num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
.ops = &clk_branch2_ops,
},
},
@@ -1357,8 +1360,13 @@ static int disp_cc_sm8250_probe(struct platform_device *pdev)
disp_cc_sm8250_clocks[DISP_CC_MDSS_EDP_GTC_CLK_SRC] = NULL;
}
- clk_lucid_pll_configure(&disp_cc_pll0, regmap, &disp_cc_pll0_config);
- clk_lucid_pll_configure(&disp_cc_pll1, regmap, &disp_cc_pll1_config);
+ if (of_device_is_compatible(pdev->dev.of_node, "qcom,sm8350-dispcc")) {
+ clk_lucid_5lpe_pll_configure(&disp_cc_pll0, regmap, &disp_cc_pll0_config);
+ clk_lucid_5lpe_pll_configure(&disp_cc_pll1, regmap, &disp_cc_pll1_config);
+ } else {
+ clk_lucid_pll_configure(&disp_cc_pll0, regmap, &disp_cc_pll0_config);
+ clk_lucid_pll_configure(&disp_cc_pll1, regmap, &disp_cc_pll1_config);
+ }
/* Enable clock gating for MDP clocks */
regmap_update_bits(regmap, 0x8000, 0x10, 0x10);
diff --git a/drivers/clk/qcom/dispcc-sm8550.c b/drivers/clk/qcom/dispcc-sm8550.c
index 31ae46f180a5..7f9021ca0ecb 100644
--- a/drivers/clk/qcom/dispcc-sm8550.c
+++ b/drivers/clk/qcom/dispcc-sm8550.c
@@ -71,7 +71,7 @@ enum {
P_SLEEP_CLK,
};
-static const struct pll_vco lucid_ole_vco[] = {
+static struct pll_vco lucid_ole_vco[] = {
{ 249600000, 2000000000, 0 },
};
@@ -95,7 +95,7 @@ static struct clk_alpha_pll disp_cc_pll0 = {
.num_vco = ARRAY_SIZE(lucid_ole_vco),
.regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_LUCID_OLE],
.clkr = {
- .hw.init = &(struct clk_init_data) {
+ .hw.init = &(const struct clk_init_data) {
.name = "disp_cc_pll0",
.parent_data = &(const struct clk_parent_data) {
.index = DT_BI_TCXO,
@@ -126,7 +126,7 @@ static struct clk_alpha_pll disp_cc_pll1 = {
.num_vco = ARRAY_SIZE(lucid_ole_vco),
.regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_LUCID_OLE],
.clkr = {
- .hw.init = &(struct clk_init_data) {
+ .hw.init = &(const struct clk_init_data) {
.name = "disp_cc_pll1",
.parent_data = &(const struct clk_parent_data) {
.index = DT_BI_TCXO,
@@ -196,7 +196,7 @@ static const struct clk_parent_data disp_cc_parent_data_3[] = {
static const struct parent_map disp_cc_parent_map_4[] = {
{ P_BI_TCXO, 0 },
{ P_DP0_PHY_PLL_LINK_CLK, 1 },
- { P_DP1_PHY_PLL_VCO_DIV_CLK, 2 },
+ { P_DP0_PHY_PLL_VCO_DIV_CLK, 2 },
{ P_DP3_PHY_PLL_VCO_DIV_CLK, 3 },
{ P_DP1_PHY_PLL_VCO_DIV_CLK, 4 },
{ P_DP2_PHY_PLL_VCO_DIV_CLK, 6 },
@@ -213,7 +213,7 @@ static const struct clk_parent_data disp_cc_parent_data_4[] = {
static const struct parent_map disp_cc_parent_map_5[] = {
{ P_BI_TCXO, 0 },
- { P_DSI0_PHY_PLL_OUT_BYTECLK, 4 },
+ { P_DSI0_PHY_PLL_OUT_BYTECLK, 2 },
{ P_DSI1_PHY_PLL_OUT_BYTECLK, 4 },
};
@@ -286,7 +286,7 @@ static struct clk_rcg2 disp_cc_mdss_ahb_clk_src = {
.hid_width = 5,
.parent_map = disp_cc_parent_map_6,
.freq_tbl = ftbl_disp_cc_mdss_ahb_clk_src,
- .clkr.hw.init = &(struct clk_init_data) {
+ .clkr.hw.init = &(const struct clk_init_data) {
.name = "disp_cc_mdss_ahb_clk_src",
.parent_data = disp_cc_parent_data_6,
.num_parents = ARRAY_SIZE(disp_cc_parent_data_6),
@@ -306,7 +306,7 @@ static struct clk_rcg2 disp_cc_mdss_byte0_clk_src = {
.hid_width = 5,
.parent_map = disp_cc_parent_map_2,
.freq_tbl = ftbl_disp_cc_mdss_byte0_clk_src,
- .clkr.hw.init = &(struct clk_init_data) {
+ .clkr.hw.init = &(const struct clk_init_data) {
.name = "disp_cc_mdss_byte0_clk_src",
.parent_data = disp_cc_parent_data_2,
.num_parents = ARRAY_SIZE(disp_cc_parent_data_2),
@@ -321,7 +321,7 @@ static struct clk_rcg2 disp_cc_mdss_byte1_clk_src = {
.hid_width = 5,
.parent_map = disp_cc_parent_map_2,
.freq_tbl = ftbl_disp_cc_mdss_byte0_clk_src,
- .clkr.hw.init = &(struct clk_init_data) {
+ .clkr.hw.init = &(const struct clk_init_data) {
.name = "disp_cc_mdss_byte1_clk_src",
.parent_data = disp_cc_parent_data_2,
.num_parents = ARRAY_SIZE(disp_cc_parent_data_2),
@@ -336,7 +336,7 @@ static struct clk_rcg2 disp_cc_mdss_dptx0_aux_clk_src = {
.hid_width = 5,
.parent_map = disp_cc_parent_map_0,
.freq_tbl = ftbl_disp_cc_mdss_byte0_clk_src,
- .clkr.hw.init = &(struct clk_init_data) {
+ .clkr.hw.init = &(const struct clk_init_data) {
.name = "disp_cc_mdss_dptx0_aux_clk_src",
.parent_data = disp_cc_parent_data_0,
.num_parents = ARRAY_SIZE(disp_cc_parent_data_0),
@@ -350,7 +350,7 @@ static struct clk_rcg2 disp_cc_mdss_dptx0_link_clk_src = {
.mnd_width = 0,
.hid_width = 5,
.parent_map = disp_cc_parent_map_7,
- .clkr.hw.init = &(struct clk_init_data) {
+ .clkr.hw.init = &(const struct clk_init_data) {
.name = "disp_cc_mdss_dptx0_link_clk_src",
.parent_data = disp_cc_parent_data_7,
.num_parents = ARRAY_SIZE(disp_cc_parent_data_7),
@@ -365,7 +365,7 @@ static struct clk_rcg2 disp_cc_mdss_dptx0_pixel0_clk_src = {
.hid_width = 5,
.parent_map = disp_cc_parent_map_4,
.freq_tbl = ftbl_disp_cc_mdss_byte0_clk_src,
- .clkr.hw.init = &(struct clk_init_data) {
+ .clkr.hw.init = &(const struct clk_init_data) {
.name = "disp_cc_mdss_dptx0_pixel0_clk_src",
.parent_data = disp_cc_parent_data_4,
.num_parents = ARRAY_SIZE(disp_cc_parent_data_4),
@@ -380,7 +380,7 @@ static struct clk_rcg2 disp_cc_mdss_dptx0_pixel1_clk_src = {
.hid_width = 5,
.parent_map = disp_cc_parent_map_4,
.freq_tbl = ftbl_disp_cc_mdss_byte0_clk_src,
- .clkr.hw.init = &(struct clk_init_data) {
+ .clkr.hw.init = &(const struct clk_init_data) {
.name = "disp_cc_mdss_dptx0_pixel1_clk_src",
.parent_data = disp_cc_parent_data_4,
.num_parents = ARRAY_SIZE(disp_cc_parent_data_4),
@@ -395,12 +395,12 @@ static struct clk_rcg2 disp_cc_mdss_dptx1_aux_clk_src = {
.hid_width = 5,
.parent_map = disp_cc_parent_map_0,
.freq_tbl = ftbl_disp_cc_mdss_byte0_clk_src,
- .clkr.hw.init = &(struct clk_init_data) {
+ .clkr.hw.init = &(const struct clk_init_data) {
.name = "disp_cc_mdss_dptx1_aux_clk_src",
.parent_data = disp_cc_parent_data_0,
.num_parents = ARRAY_SIZE(disp_cc_parent_data_0),
.flags = CLK_SET_RATE_PARENT,
- .ops = &clk_dp_ops,
+ .ops = &clk_rcg2_ops,
},
};
@@ -409,7 +409,7 @@ static struct clk_rcg2 disp_cc_mdss_dptx1_link_clk_src = {
.mnd_width = 0,
.hid_width = 5,
.parent_map = disp_cc_parent_map_3,
- .clkr.hw.init = &(struct clk_init_data) {
+ .clkr.hw.init = &(const struct clk_init_data) {
.name = "disp_cc_mdss_dptx1_link_clk_src",
.parent_data = disp_cc_parent_data_3,
.num_parents = ARRAY_SIZE(disp_cc_parent_data_3),
@@ -424,7 +424,7 @@ static struct clk_rcg2 disp_cc_mdss_dptx1_pixel0_clk_src = {
.hid_width = 5,
.parent_map = disp_cc_parent_map_1,
.freq_tbl = ftbl_disp_cc_mdss_byte0_clk_src,
- .clkr.hw.init = &(struct clk_init_data) {
+ .clkr.hw.init = &(const struct clk_init_data) {
.name = "disp_cc_mdss_dptx1_pixel0_clk_src",
.parent_data = disp_cc_parent_data_1,
.num_parents = ARRAY_SIZE(disp_cc_parent_data_1),
@@ -439,7 +439,7 @@ static struct clk_rcg2 disp_cc_mdss_dptx1_pixel1_clk_src = {
.hid_width = 5,
.parent_map = disp_cc_parent_map_1,
.freq_tbl = ftbl_disp_cc_mdss_byte0_clk_src,
- .clkr.hw.init = &(struct clk_init_data) {
+ .clkr.hw.init = &(const struct clk_init_data) {
.name = "disp_cc_mdss_dptx1_pixel1_clk_src",
.parent_data = disp_cc_parent_data_1,
.num_parents = ARRAY_SIZE(disp_cc_parent_data_1),
@@ -454,7 +454,7 @@ static struct clk_rcg2 disp_cc_mdss_dptx2_aux_clk_src = {
.hid_width = 5,
.parent_map = disp_cc_parent_map_0,
.freq_tbl = ftbl_disp_cc_mdss_byte0_clk_src,
- .clkr.hw.init = &(struct clk_init_data) {
+ .clkr.hw.init = &(const struct clk_init_data) {
.name = "disp_cc_mdss_dptx2_aux_clk_src",
.parent_data = disp_cc_parent_data_0,
.num_parents = ARRAY_SIZE(disp_cc_parent_data_0),
@@ -468,7 +468,7 @@ static struct clk_rcg2 disp_cc_mdss_dptx2_link_clk_src = {
.mnd_width = 0,
.hid_width = 5,
.parent_map = disp_cc_parent_map_3,
- .clkr.hw.init = &(struct clk_init_data) {
+ .clkr.hw.init = &(const struct clk_init_data) {
.name = "disp_cc_mdss_dptx2_link_clk_src",
.parent_data = disp_cc_parent_data_3,
.num_parents = ARRAY_SIZE(disp_cc_parent_data_3),
@@ -483,7 +483,7 @@ static struct clk_rcg2 disp_cc_mdss_dptx2_pixel0_clk_src = {
.hid_width = 5,
.parent_map = disp_cc_parent_map_1,
.freq_tbl = ftbl_disp_cc_mdss_byte0_clk_src,
- .clkr.hw.init = &(struct clk_init_data) {
+ .clkr.hw.init = &(const struct clk_init_data) {
.name = "disp_cc_mdss_dptx2_pixel0_clk_src",
.parent_data = disp_cc_parent_data_1,
.num_parents = ARRAY_SIZE(disp_cc_parent_data_1),
@@ -498,7 +498,7 @@ static struct clk_rcg2 disp_cc_mdss_dptx2_pixel1_clk_src = {
.hid_width = 5,
.parent_map = disp_cc_parent_map_1,
.freq_tbl = ftbl_disp_cc_mdss_byte0_clk_src,
- .clkr.hw.init = &(struct clk_init_data) {
+ .clkr.hw.init = &(const struct clk_init_data) {
.name = "disp_cc_mdss_dptx2_pixel1_clk_src",
.parent_data = disp_cc_parent_data_1,
.num_parents = ARRAY_SIZE(disp_cc_parent_data_1),
@@ -513,7 +513,7 @@ static struct clk_rcg2 disp_cc_mdss_dptx3_aux_clk_src = {
.hid_width = 5,
.parent_map = disp_cc_parent_map_0,
.freq_tbl = ftbl_disp_cc_mdss_byte0_clk_src,
- .clkr.hw.init = &(struct clk_init_data) {
+ .clkr.hw.init = &(const struct clk_init_data) {
.name = "disp_cc_mdss_dptx3_aux_clk_src",
.parent_data = disp_cc_parent_data_0,
.num_parents = ARRAY_SIZE(disp_cc_parent_data_0),
@@ -527,7 +527,7 @@ static struct clk_rcg2 disp_cc_mdss_dptx3_link_clk_src = {
.mnd_width = 0,
.hid_width = 5,
.parent_map = disp_cc_parent_map_3,
- .clkr.hw.init = &(struct clk_init_data) {
+ .clkr.hw.init = &(const struct clk_init_data) {
.name = "disp_cc_mdss_dptx3_link_clk_src",
.parent_data = disp_cc_parent_data_3,
.num_parents = ARRAY_SIZE(disp_cc_parent_data_3),
@@ -542,7 +542,7 @@ static struct clk_rcg2 disp_cc_mdss_dptx3_pixel0_clk_src = {
.hid_width = 5,
.parent_map = disp_cc_parent_map_1,
.freq_tbl = ftbl_disp_cc_mdss_byte0_clk_src,
- .clkr.hw.init = &(struct clk_init_data) {
+ .clkr.hw.init = &(const struct clk_init_data) {
.name = "disp_cc_mdss_dptx3_pixel0_clk_src",
.parent_data = disp_cc_parent_data_1,
.num_parents = ARRAY_SIZE(disp_cc_parent_data_1),
@@ -557,12 +557,12 @@ static struct clk_rcg2 disp_cc_mdss_esc0_clk_src = {
.hid_width = 5,
.parent_map = disp_cc_parent_map_5,
.freq_tbl = ftbl_disp_cc_mdss_byte0_clk_src,
- .clkr.hw.init = &(struct clk_init_data) {
+ .clkr.hw.init = &(const struct clk_init_data) {
.name = "disp_cc_mdss_esc0_clk_src",
.parent_data = disp_cc_parent_data_5,
.num_parents = ARRAY_SIZE(disp_cc_parent_data_5),
.flags = CLK_SET_RATE_PARENT,
- .ops = &clk_rcg2_ops,
+ .ops = &clk_rcg2_shared_ops,
},
};
@@ -572,12 +572,12 @@ static struct clk_rcg2 disp_cc_mdss_esc1_clk_src = {
.hid_width = 5,
.parent_map = disp_cc_parent_map_5,
.freq_tbl = ftbl_disp_cc_mdss_byte0_clk_src,
- .clkr.hw.init = &(struct clk_init_data) {
+ .clkr.hw.init = &(const struct clk_init_data) {
.name = "disp_cc_mdss_esc1_clk_src",
.parent_data = disp_cc_parent_data_5,
.num_parents = ARRAY_SIZE(disp_cc_parent_data_5),
.flags = CLK_SET_RATE_PARENT,
- .ops = &clk_rcg2_ops,
+ .ops = &clk_rcg2_shared_ops,
},
};
@@ -594,13 +594,25 @@ static const struct freq_tbl ftbl_disp_cc_mdss_mdp_clk_src[] = {
{ }
};
+static const struct freq_tbl ftbl_disp_cc_mdss_mdp_clk_src_sm8650[] = {
+ F(19200000, P_BI_TCXO, 1, 0, 0),
+ F(85714286, P_DISP_CC_PLL0_OUT_MAIN, 3, 0, 0),
+ F(100000000, P_DISP_CC_PLL0_OUT_MAIN, 3, 0, 0),
+ F(150000000, P_DISP_CC_PLL0_OUT_MAIN, 3, 0, 0),
+ F(200000000, P_DISP_CC_PLL0_OUT_MAIN, 3, 0, 0),
+ F(325000000, P_DISP_CC_PLL0_OUT_MAIN, 3, 0, 0),
+ F(402000000, P_DISP_CC_PLL0_OUT_MAIN, 3, 0, 0),
+ F(514000000, P_DISP_CC_PLL0_OUT_MAIN, 3, 0, 0),
+ { }
+};
+
static struct clk_rcg2 disp_cc_mdss_mdp_clk_src = {
.cmd_rcgr = 0x80d8,
.mnd_width = 0,
.hid_width = 5,
.parent_map = disp_cc_parent_map_8,
.freq_tbl = ftbl_disp_cc_mdss_mdp_clk_src,
- .clkr.hw.init = &(struct clk_init_data) {
+ .clkr.hw.init = &(const struct clk_init_data) {
.name = "disp_cc_mdss_mdp_clk_src",
.parent_data = disp_cc_parent_data_8,
.num_parents = ARRAY_SIZE(disp_cc_parent_data_8),
@@ -615,7 +627,7 @@ static struct clk_rcg2 disp_cc_mdss_pclk0_clk_src = {
.hid_width = 5,
.parent_map = disp_cc_parent_map_2,
.freq_tbl = ftbl_disp_cc_mdss_byte0_clk_src,
- .clkr.hw.init = &(struct clk_init_data) {
+ .clkr.hw.init = &(const struct clk_init_data) {
.name = "disp_cc_mdss_pclk0_clk_src",
.parent_data = disp_cc_parent_data_2,
.num_parents = ARRAY_SIZE(disp_cc_parent_data_2),
@@ -630,7 +642,7 @@ static struct clk_rcg2 disp_cc_mdss_pclk1_clk_src = {
.hid_width = 5,
.parent_map = disp_cc_parent_map_2,
.freq_tbl = ftbl_disp_cc_mdss_byte0_clk_src,
- .clkr.hw.init = &(struct clk_init_data) {
+ .clkr.hw.init = &(const struct clk_init_data) {
.name = "disp_cc_mdss_pclk1_clk_src",
.parent_data = disp_cc_parent_data_2,
.num_parents = ARRAY_SIZE(disp_cc_parent_data_2),
@@ -645,7 +657,7 @@ static struct clk_rcg2 disp_cc_mdss_vsync_clk_src = {
.hid_width = 5,
.parent_map = disp_cc_parent_map_0,
.freq_tbl = ftbl_disp_cc_mdss_byte0_clk_src,
- .clkr.hw.init = &(struct clk_init_data) {
+ .clkr.hw.init = &(const struct clk_init_data) {
.name = "disp_cc_mdss_vsync_clk_src",
.parent_data = disp_cc_parent_data_0,
.num_parents = ARRAY_SIZE(disp_cc_parent_data_0),
@@ -665,7 +677,7 @@ static struct clk_rcg2 disp_cc_sleep_clk_src = {
.hid_width = 5,
.parent_map = disp_cc_parent_map_9,
.freq_tbl = ftbl_disp_cc_sleep_clk_src,
- .clkr.hw.init = &(struct clk_init_data) {
+ .clkr.hw.init = &(const struct clk_init_data) {
.name = "disp_cc_sleep_clk_src",
.parent_data = disp_cc_parent_data_9,
.num_parents = ARRAY_SIZE(disp_cc_parent_data_9),
@@ -680,7 +692,7 @@ static struct clk_rcg2 disp_cc_xo_clk_src = {
.hid_width = 5,
.parent_map = disp_cc_parent_map_0,
.freq_tbl = ftbl_disp_cc_mdss_byte0_clk_src,
- .clkr.hw.init = &(struct clk_init_data) {
+ .clkr.hw.init = &(const struct clk_init_data) {
.name = "disp_cc_xo_clk_src",
.parent_data = disp_cc_parent_data_0_ao,
.num_parents = ARRAY_SIZE(disp_cc_parent_data_0_ao),
@@ -693,7 +705,7 @@ static struct clk_regmap_div disp_cc_mdss_byte0_div_clk_src = {
.reg = 0x8120,
.shift = 0,
.width = 4,
- .clkr.hw.init = &(struct clk_init_data) {
+ .clkr.hw.init = &(const struct clk_init_data) {
.name = "disp_cc_mdss_byte0_div_clk_src",
.parent_hws = (const struct clk_hw*[]) {
&disp_cc_mdss_byte0_clk_src.clkr.hw,
@@ -707,7 +719,7 @@ static struct clk_regmap_div disp_cc_mdss_byte1_div_clk_src = {
.reg = 0x813c,
.shift = 0,
.width = 4,
- .clkr.hw.init = &(struct clk_init_data) {
+ .clkr.hw.init = &(const struct clk_init_data) {
.name = "disp_cc_mdss_byte1_div_clk_src",
.parent_hws = (const struct clk_hw*[]) {
&disp_cc_mdss_byte1_clk_src.clkr.hw,
@@ -721,7 +733,7 @@ static struct clk_regmap_div disp_cc_mdss_dptx0_link_div_clk_src = {
.reg = 0x8188,
.shift = 0,
.width = 4,
- .clkr.hw.init = &(struct clk_init_data) {
+ .clkr.hw.init = &(const struct clk_init_data) {
.name = "disp_cc_mdss_dptx0_link_div_clk_src",
.parent_hws = (const struct clk_hw*[]) {
&disp_cc_mdss_dptx0_link_clk_src.clkr.hw,
@@ -736,7 +748,7 @@ static struct clk_regmap_div disp_cc_mdss_dptx1_link_div_clk_src = {
.reg = 0x821c,
.shift = 0,
.width = 4,
- .clkr.hw.init = &(struct clk_init_data) {
+ .clkr.hw.init = &(const struct clk_init_data) {
.name = "disp_cc_mdss_dptx1_link_div_clk_src",
.parent_hws = (const struct clk_hw*[]) {
&disp_cc_mdss_dptx1_link_clk_src.clkr.hw,
@@ -751,7 +763,7 @@ static struct clk_regmap_div disp_cc_mdss_dptx2_link_div_clk_src = {
.reg = 0x8250,
.shift = 0,
.width = 4,
- .clkr.hw.init = &(struct clk_init_data) {
+ .clkr.hw.init = &(const struct clk_init_data) {
.name = "disp_cc_mdss_dptx2_link_div_clk_src",
.parent_hws = (const struct clk_hw*[]) {
&disp_cc_mdss_dptx2_link_clk_src.clkr.hw,
@@ -766,7 +778,7 @@ static struct clk_regmap_div disp_cc_mdss_dptx3_link_div_clk_src = {
.reg = 0x82cc,
.shift = 0,
.width = 4,
- .clkr.hw.init = &(struct clk_init_data) {
+ .clkr.hw.init = &(const struct clk_init_data) {
.name = "disp_cc_mdss_dptx3_link_div_clk_src",
.parent_hws = (const struct clk_hw*[]) {
&disp_cc_mdss_dptx3_link_clk_src.clkr.hw,
@@ -783,7 +795,7 @@ static struct clk_branch disp_cc_mdss_accu_clk = {
.clkr = {
.enable_reg = 0xe058,
.enable_mask = BIT(0),
- .hw.init = &(struct clk_init_data){
+ .hw.init = &(const struct clk_init_data) {
.name = "disp_cc_mdss_accu_clk",
.parent_hws = (const struct clk_hw*[]) {
&disp_cc_xo_clk_src.clkr.hw,
@@ -801,7 +813,7 @@ static struct clk_branch disp_cc_mdss_ahb1_clk = {
.clkr = {
.enable_reg = 0xa020,
.enable_mask = BIT(0),
- .hw.init = &(struct clk_init_data) {
+ .hw.init = &(const struct clk_init_data) {
.name = "disp_cc_mdss_ahb1_clk",
.parent_hws = (const struct clk_hw*[]) {
&disp_cc_mdss_ahb_clk_src.clkr.hw,
@@ -819,7 +831,7 @@ static struct clk_branch disp_cc_mdss_ahb_clk = {
.clkr = {
.enable_reg = 0x80a4,
.enable_mask = BIT(0),
- .hw.init = &(struct clk_init_data) {
+ .hw.init = &(const struct clk_init_data) {
.name = "disp_cc_mdss_ahb_clk",
.parent_hws = (const struct clk_hw*[]) {
&disp_cc_mdss_ahb_clk_src.clkr.hw,
@@ -837,7 +849,7 @@ static struct clk_branch disp_cc_mdss_byte0_clk = {
.clkr = {
.enable_reg = 0x8028,
.enable_mask = BIT(0),
- .hw.init = &(struct clk_init_data) {
+ .hw.init = &(const struct clk_init_data) {
.name = "disp_cc_mdss_byte0_clk",
.parent_hws = (const struct clk_hw*[]) {
&disp_cc_mdss_byte0_clk_src.clkr.hw,
@@ -855,7 +867,7 @@ static struct clk_branch disp_cc_mdss_byte0_intf_clk = {
.clkr = {
.enable_reg = 0x802c,
.enable_mask = BIT(0),
- .hw.init = &(struct clk_init_data) {
+ .hw.init = &(const struct clk_init_data) {
.name = "disp_cc_mdss_byte0_intf_clk",
.parent_hws = (const struct clk_hw*[]) {
&disp_cc_mdss_byte0_div_clk_src.clkr.hw,
@@ -873,7 +885,7 @@ static struct clk_branch disp_cc_mdss_byte1_clk = {
.clkr = {
.enable_reg = 0x8030,
.enable_mask = BIT(0),
- .hw.init = &(struct clk_init_data) {
+ .hw.init = &(const struct clk_init_data) {
.name = "disp_cc_mdss_byte1_clk",
.parent_hws = (const struct clk_hw*[]) {
&disp_cc_mdss_byte1_clk_src.clkr.hw,
@@ -891,7 +903,7 @@ static struct clk_branch disp_cc_mdss_byte1_intf_clk = {
.clkr = {
.enable_reg = 0x8034,
.enable_mask = BIT(0),
- .hw.init = &(struct clk_init_data) {
+ .hw.init = &(const struct clk_init_data) {
.name = "disp_cc_mdss_byte1_intf_clk",
.parent_hws = (const struct clk_hw*[]) {
&disp_cc_mdss_byte1_div_clk_src.clkr.hw,
@@ -909,7 +921,7 @@ static struct clk_branch disp_cc_mdss_dptx0_aux_clk = {
.clkr = {
.enable_reg = 0x8058,
.enable_mask = BIT(0),
- .hw.init = &(struct clk_init_data) {
+ .hw.init = &(const struct clk_init_data) {
.name = "disp_cc_mdss_dptx0_aux_clk",
.parent_hws = (const struct clk_hw*[]) {
&disp_cc_mdss_dptx0_aux_clk_src.clkr.hw,
@@ -927,7 +939,7 @@ static struct clk_branch disp_cc_mdss_dptx0_crypto_clk = {
.clkr = {
.enable_reg = 0x804c,
.enable_mask = BIT(0),
- .hw.init = &(struct clk_init_data) {
+ .hw.init = &(const struct clk_init_data) {
.name = "disp_cc_mdss_dptx0_crypto_clk",
.parent_hws = (const struct clk_hw*[]) {
&disp_cc_mdss_dptx0_link_clk_src.clkr.hw,
@@ -945,7 +957,7 @@ static struct clk_branch disp_cc_mdss_dptx0_link_clk = {
.clkr = {
.enable_reg = 0x8040,
.enable_mask = BIT(0),
- .hw.init = &(struct clk_init_data) {
+ .hw.init = &(const struct clk_init_data) {
.name = "disp_cc_mdss_dptx0_link_clk",
.parent_hws = (const struct clk_hw*[]) {
&disp_cc_mdss_dptx0_link_clk_src.clkr.hw,
@@ -963,7 +975,7 @@ static struct clk_branch disp_cc_mdss_dptx0_link_intf_clk = {
.clkr = {
.enable_reg = 0x8048,
.enable_mask = BIT(0),
- .hw.init = &(struct clk_init_data) {
+ .hw.init = &(const struct clk_init_data) {
.name = "disp_cc_mdss_dptx0_link_intf_clk",
.parent_hws = (const struct clk_hw*[]) {
&disp_cc_mdss_dptx0_link_div_clk_src.clkr.hw,
@@ -981,7 +993,7 @@ static struct clk_branch disp_cc_mdss_dptx0_pixel0_clk = {
.clkr = {
.enable_reg = 0x8050,
.enable_mask = BIT(0),
- .hw.init = &(struct clk_init_data) {
+ .hw.init = &(const struct clk_init_data) {
.name = "disp_cc_mdss_dptx0_pixel0_clk",
.parent_hws = (const struct clk_hw*[]) {
&disp_cc_mdss_dptx0_pixel0_clk_src.clkr.hw,
@@ -999,7 +1011,7 @@ static struct clk_branch disp_cc_mdss_dptx0_pixel1_clk = {
.clkr = {
.enable_reg = 0x8054,
.enable_mask = BIT(0),
- .hw.init = &(struct clk_init_data) {
+ .hw.init = &(const struct clk_init_data) {
.name = "disp_cc_mdss_dptx0_pixel1_clk",
.parent_hws = (const struct clk_hw*[]) {
&disp_cc_mdss_dptx0_pixel1_clk_src.clkr.hw,
@@ -1017,7 +1029,7 @@ static struct clk_branch disp_cc_mdss_dptx0_usb_router_link_intf_clk = {
.clkr = {
.enable_reg = 0x8044,
.enable_mask = BIT(0),
- .hw.init = &(struct clk_init_data) {
+ .hw.init = &(const struct clk_init_data) {
.name = "disp_cc_mdss_dptx0_usb_router_link_intf_clk",
.parent_hws = (const struct clk_hw*[]) {
&disp_cc_mdss_dptx0_link_div_clk_src.clkr.hw,
@@ -1035,7 +1047,7 @@ static struct clk_branch disp_cc_mdss_dptx1_aux_clk = {
.clkr = {
.enable_reg = 0x8074,
.enable_mask = BIT(0),
- .hw.init = &(struct clk_init_data) {
+ .hw.init = &(const struct clk_init_data) {
.name = "disp_cc_mdss_dptx1_aux_clk",
.parent_hws = (const struct clk_hw*[]) {
&disp_cc_mdss_dptx1_aux_clk_src.clkr.hw,
@@ -1053,7 +1065,7 @@ static struct clk_branch disp_cc_mdss_dptx1_crypto_clk = {
.clkr = {
.enable_reg = 0x8070,
.enable_mask = BIT(0),
- .hw.init = &(struct clk_init_data) {
+ .hw.init = &(const struct clk_init_data) {
.name = "disp_cc_mdss_dptx1_crypto_clk",
.parent_hws = (const struct clk_hw*[]) {
&disp_cc_mdss_dptx1_link_clk_src.clkr.hw,
@@ -1071,7 +1083,7 @@ static struct clk_branch disp_cc_mdss_dptx1_link_clk = {
.clkr = {
.enable_reg = 0x8064,
.enable_mask = BIT(0),
- .hw.init = &(struct clk_init_data) {
+ .hw.init = &(const struct clk_init_data) {
.name = "disp_cc_mdss_dptx1_link_clk",
.parent_hws = (const struct clk_hw*[]) {
&disp_cc_mdss_dptx1_link_clk_src.clkr.hw,
@@ -1089,7 +1101,7 @@ static struct clk_branch disp_cc_mdss_dptx1_link_intf_clk = {
.clkr = {
.enable_reg = 0x806c,
.enable_mask = BIT(0),
- .hw.init = &(struct clk_init_data) {
+ .hw.init = &(const struct clk_init_data) {
.name = "disp_cc_mdss_dptx1_link_intf_clk",
.parent_hws = (const struct clk_hw*[]) {
&disp_cc_mdss_dptx1_link_div_clk_src.clkr.hw,
@@ -1107,7 +1119,7 @@ static struct clk_branch disp_cc_mdss_dptx1_pixel0_clk = {
.clkr = {
.enable_reg = 0x805c,
.enable_mask = BIT(0),
- .hw.init = &(struct clk_init_data) {
+ .hw.init = &(const struct clk_init_data) {
.name = "disp_cc_mdss_dptx1_pixel0_clk",
.parent_hws = (const struct clk_hw*[]) {
&disp_cc_mdss_dptx1_pixel0_clk_src.clkr.hw,
@@ -1125,7 +1137,7 @@ static struct clk_branch disp_cc_mdss_dptx1_pixel1_clk = {
.clkr = {
.enable_reg = 0x8060,
.enable_mask = BIT(0),
- .hw.init = &(struct clk_init_data) {
+ .hw.init = &(const struct clk_init_data) {
.name = "disp_cc_mdss_dptx1_pixel1_clk",
.parent_hws = (const struct clk_hw*[]) {
&disp_cc_mdss_dptx1_pixel1_clk_src.clkr.hw,
@@ -1143,7 +1155,7 @@ static struct clk_branch disp_cc_mdss_dptx1_usb_router_link_intf_clk = {
.clkr = {
.enable_reg = 0x8068,
.enable_mask = BIT(0),
- .hw.init = &(struct clk_init_data) {
+ .hw.init = &(const struct clk_init_data) {
.name = "disp_cc_mdss_dptx1_usb_router_link_intf_clk",
.parent_hws = (const struct clk_hw*[]) {
&disp_cc_mdss_dptx0_link_div_clk_src.clkr.hw,
@@ -1161,7 +1173,7 @@ static struct clk_branch disp_cc_mdss_dptx2_aux_clk = {
.clkr = {
.enable_reg = 0x808c,
.enable_mask = BIT(0),
- .hw.init = &(struct clk_init_data) {
+ .hw.init = &(const struct clk_init_data) {
.name = "disp_cc_mdss_dptx2_aux_clk",
.parent_hws = (const struct clk_hw*[]) {
&disp_cc_mdss_dptx2_aux_clk_src.clkr.hw,
@@ -1179,7 +1191,7 @@ static struct clk_branch disp_cc_mdss_dptx2_crypto_clk = {
.clkr = {
.enable_reg = 0x8088,
.enable_mask = BIT(0),
- .hw.init = &(struct clk_init_data) {
+ .hw.init = &(const struct clk_init_data) {
.name = "disp_cc_mdss_dptx2_crypto_clk",
.parent_hws = (const struct clk_hw*[]) {
&disp_cc_mdss_dptx2_link_clk_src.clkr.hw,
@@ -1197,7 +1209,7 @@ static struct clk_branch disp_cc_mdss_dptx2_link_clk = {
.clkr = {
.enable_reg = 0x8080,
.enable_mask = BIT(0),
- .hw.init = &(struct clk_init_data) {
+ .hw.init = &(const struct clk_init_data) {
.name = "disp_cc_mdss_dptx2_link_clk",
.parent_hws = (const struct clk_hw*[]) {
&disp_cc_mdss_dptx2_link_clk_src.clkr.hw,
@@ -1215,7 +1227,7 @@ static struct clk_branch disp_cc_mdss_dptx2_link_intf_clk = {
.clkr = {
.enable_reg = 0x8084,
.enable_mask = BIT(0),
- .hw.init = &(struct clk_init_data) {
+ .hw.init = &(const struct clk_init_data) {
.name = "disp_cc_mdss_dptx2_link_intf_clk",
.parent_hws = (const struct clk_hw*[]) {
&disp_cc_mdss_dptx2_link_div_clk_src.clkr.hw,
@@ -1233,7 +1245,7 @@ static struct clk_branch disp_cc_mdss_dptx2_pixel0_clk = {
.clkr = {
.enable_reg = 0x8078,
.enable_mask = BIT(0),
- .hw.init = &(struct clk_init_data) {
+ .hw.init = &(const struct clk_init_data) {
.name = "disp_cc_mdss_dptx2_pixel0_clk",
.parent_hws = (const struct clk_hw*[]) {
&disp_cc_mdss_dptx2_pixel0_clk_src.clkr.hw,
@@ -1251,7 +1263,7 @@ static struct clk_branch disp_cc_mdss_dptx2_pixel1_clk = {
.clkr = {
.enable_reg = 0x807c,
.enable_mask = BIT(0),
- .hw.init = &(struct clk_init_data) {
+ .hw.init = &(const struct clk_init_data) {
.name = "disp_cc_mdss_dptx2_pixel1_clk",
.parent_hws = (const struct clk_hw*[]) {
&disp_cc_mdss_dptx2_pixel1_clk_src.clkr.hw,
@@ -1269,7 +1281,7 @@ static struct clk_branch disp_cc_mdss_dptx3_aux_clk = {
.clkr = {
.enable_reg = 0x809c,
.enable_mask = BIT(0),
- .hw.init = &(struct clk_init_data) {
+ .hw.init = &(const struct clk_init_data) {
.name = "disp_cc_mdss_dptx3_aux_clk",
.parent_hws = (const struct clk_hw*[]) {
&disp_cc_mdss_dptx3_aux_clk_src.clkr.hw,
@@ -1287,7 +1299,7 @@ static struct clk_branch disp_cc_mdss_dptx3_crypto_clk = {
.clkr = {
.enable_reg = 0x80a0,
.enable_mask = BIT(0),
- .hw.init = &(struct clk_init_data) {
+ .hw.init = &(const struct clk_init_data) {
.name = "disp_cc_mdss_dptx3_crypto_clk",
.parent_hws = (const struct clk_hw*[]) {
&disp_cc_mdss_dptx3_link_clk_src.clkr.hw,
@@ -1305,7 +1317,7 @@ static struct clk_branch disp_cc_mdss_dptx3_link_clk = {
.clkr = {
.enable_reg = 0x8094,
.enable_mask = BIT(0),
- .hw.init = &(struct clk_init_data) {
+ .hw.init = &(const struct clk_init_data) {
.name = "disp_cc_mdss_dptx3_link_clk",
.parent_hws = (const struct clk_hw*[]) {
&disp_cc_mdss_dptx3_link_clk_src.clkr.hw,
@@ -1323,7 +1335,7 @@ static struct clk_branch disp_cc_mdss_dptx3_link_intf_clk = {
.clkr = {
.enable_reg = 0x8098,
.enable_mask = BIT(0),
- .hw.init = &(struct clk_init_data) {
+ .hw.init = &(const struct clk_init_data) {
.name = "disp_cc_mdss_dptx3_link_intf_clk",
.parent_hws = (const struct clk_hw*[]) {
&disp_cc_mdss_dptx3_link_div_clk_src.clkr.hw,
@@ -1341,7 +1353,7 @@ static struct clk_branch disp_cc_mdss_dptx3_pixel0_clk = {
.clkr = {
.enable_reg = 0x8090,
.enable_mask = BIT(0),
- .hw.init = &(struct clk_init_data) {
+ .hw.init = &(const struct clk_init_data) {
.name = "disp_cc_mdss_dptx3_pixel0_clk",
.parent_hws = (const struct clk_hw*[]) {
&disp_cc_mdss_dptx3_pixel0_clk_src.clkr.hw,
@@ -1359,7 +1371,7 @@ static struct clk_branch disp_cc_mdss_esc0_clk = {
.clkr = {
.enable_reg = 0x8038,
.enable_mask = BIT(0),
- .hw.init = &(struct clk_init_data) {
+ .hw.init = &(const struct clk_init_data) {
.name = "disp_cc_mdss_esc0_clk",
.parent_hws = (const struct clk_hw*[]) {
&disp_cc_mdss_esc0_clk_src.clkr.hw,
@@ -1377,7 +1389,7 @@ static struct clk_branch disp_cc_mdss_esc1_clk = {
.clkr = {
.enable_reg = 0x803c,
.enable_mask = BIT(0),
- .hw.init = &(struct clk_init_data) {
+ .hw.init = &(const struct clk_init_data) {
.name = "disp_cc_mdss_esc1_clk",
.parent_hws = (const struct clk_hw*[]) {
&disp_cc_mdss_esc1_clk_src.clkr.hw,
@@ -1395,7 +1407,7 @@ static struct clk_branch disp_cc_mdss_mdp1_clk = {
.clkr = {
.enable_reg = 0xa004,
.enable_mask = BIT(0),
- .hw.init = &(struct clk_init_data) {
+ .hw.init = &(const struct clk_init_data) {
.name = "disp_cc_mdss_mdp1_clk",
.parent_hws = (const struct clk_hw*[]) {
&disp_cc_mdss_mdp_clk_src.clkr.hw,
@@ -1413,7 +1425,7 @@ static struct clk_branch disp_cc_mdss_mdp_clk = {
.clkr = {
.enable_reg = 0x800c,
.enable_mask = BIT(0),
- .hw.init = &(struct clk_init_data) {
+ .hw.init = &(const struct clk_init_data) {
.name = "disp_cc_mdss_mdp_clk",
.parent_hws = (const struct clk_hw*[]) {
&disp_cc_mdss_mdp_clk_src.clkr.hw,
@@ -1431,7 +1443,7 @@ static struct clk_branch disp_cc_mdss_mdp_lut1_clk = {
.clkr = {
.enable_reg = 0xa010,
.enable_mask = BIT(0),
- .hw.init = &(struct clk_init_data) {
+ .hw.init = &(const struct clk_init_data) {
.name = "disp_cc_mdss_mdp_lut1_clk",
.parent_hws = (const struct clk_hw*[]) {
&disp_cc_mdss_mdp_clk_src.clkr.hw,
@@ -1449,7 +1461,7 @@ static struct clk_branch disp_cc_mdss_mdp_lut_clk = {
.clkr = {
.enable_reg = 0x8018,
.enable_mask = BIT(0),
- .hw.init = &(struct clk_init_data) {
+ .hw.init = &(const struct clk_init_data) {
.name = "disp_cc_mdss_mdp_lut_clk",
.parent_hws = (const struct clk_hw*[]) {
&disp_cc_mdss_mdp_clk_src.clkr.hw,
@@ -1467,7 +1479,7 @@ static struct clk_branch disp_cc_mdss_non_gdsc_ahb_clk = {
.clkr = {
.enable_reg = 0xc004,
.enable_mask = BIT(0),
- .hw.init = &(struct clk_init_data) {
+ .hw.init = &(const struct clk_init_data) {
.name = "disp_cc_mdss_non_gdsc_ahb_clk",
.parent_hws = (const struct clk_hw*[]) {
&disp_cc_mdss_ahb_clk_src.clkr.hw,
@@ -1485,7 +1497,7 @@ static struct clk_branch disp_cc_mdss_pclk0_clk = {
.clkr = {
.enable_reg = 0x8004,
.enable_mask = BIT(0),
- .hw.init = &(struct clk_init_data) {
+ .hw.init = &(const struct clk_init_data) {
.name = "disp_cc_mdss_pclk0_clk",
.parent_hws = (const struct clk_hw*[]) {
&disp_cc_mdss_pclk0_clk_src.clkr.hw,
@@ -1503,7 +1515,7 @@ static struct clk_branch disp_cc_mdss_pclk1_clk = {
.clkr = {
.enable_reg = 0x8008,
.enable_mask = BIT(0),
- .hw.init = &(struct clk_init_data) {
+ .hw.init = &(const struct clk_init_data) {
.name = "disp_cc_mdss_pclk1_clk",
.parent_hws = (const struct clk_hw*[]) {
&disp_cc_mdss_pclk1_clk_src.clkr.hw,
@@ -1521,7 +1533,7 @@ static struct clk_branch disp_cc_mdss_rscc_ahb_clk = {
.clkr = {
.enable_reg = 0xc00c,
.enable_mask = BIT(0),
- .hw.init = &(struct clk_init_data) {
+ .hw.init = &(const struct clk_init_data) {
.name = "disp_cc_mdss_rscc_ahb_clk",
.parent_hws = (const struct clk_hw*[]) {
&disp_cc_mdss_ahb_clk_src.clkr.hw,
@@ -1539,7 +1551,7 @@ static struct clk_branch disp_cc_mdss_rscc_vsync_clk = {
.clkr = {
.enable_reg = 0xc008,
.enable_mask = BIT(0),
- .hw.init = &(struct clk_init_data) {
+ .hw.init = &(const struct clk_init_data) {
.name = "disp_cc_mdss_rscc_vsync_clk",
.parent_hws = (const struct clk_hw*[]) {
&disp_cc_mdss_vsync_clk_src.clkr.hw,
@@ -1557,7 +1569,7 @@ static struct clk_branch disp_cc_mdss_vsync1_clk = {
.clkr = {
.enable_reg = 0xa01c,
.enable_mask = BIT(0),
- .hw.init = &(struct clk_init_data) {
+ .hw.init = &(const struct clk_init_data) {
.name = "disp_cc_mdss_vsync1_clk",
.parent_hws = (const struct clk_hw*[]) {
&disp_cc_mdss_vsync_clk_src.clkr.hw,
@@ -1575,7 +1587,7 @@ static struct clk_branch disp_cc_mdss_vsync_clk = {
.clkr = {
.enable_reg = 0x8024,
.enable_mask = BIT(0),
- .hw.init = &(struct clk_init_data) {
+ .hw.init = &(const struct clk_init_data) {
.name = "disp_cc_mdss_vsync_clk",
.parent_hws = (const struct clk_hw*[]) {
&disp_cc_mdss_vsync_clk_src.clkr.hw,
@@ -1593,7 +1605,7 @@ static struct clk_branch disp_cc_sleep_clk = {
.clkr = {
.enable_reg = 0xe074,
.enable_mask = BIT(0),
- .hw.init = &(struct clk_init_data) {
+ .hw.init = &(const struct clk_init_data) {
.name = "disp_cc_sleep_clk",
.parent_hws = (const struct clk_hw*[]) {
&disp_cc_sleep_clk_src.clkr.hw,
@@ -1611,7 +1623,7 @@ static struct gdsc mdss_gdsc = {
.name = "mdss_gdsc",
},
.pwrsts = PWRSTS_OFF_ON,
- .flags = HW_CTRL | RETAIN_FF_ENABLE,
+ .flags = POLL_CFG_GDSCR | HW_CTRL | RETAIN_FF_ENABLE,
};
static struct gdsc mdss_int2_gdsc = {
@@ -1620,7 +1632,7 @@ static struct gdsc mdss_int2_gdsc = {
.name = "mdss_int2_gdsc",
},
.pwrsts = PWRSTS_OFF_ON,
- .flags = HW_CTRL | RETAIN_FF_ENABLE,
+ .flags = POLL_CFG_GDSCR | HW_CTRL | RETAIN_FF_ENABLE,
};
static struct clk_regmap *disp_cc_sm8550_clocks[] = {
@@ -1739,6 +1751,7 @@ static struct qcom_cc_desc disp_cc_sm8550_desc = {
static const struct of_device_id disp_cc_sm8550_match_table[] = {
{ .compatible = "qcom,sm8550-dispcc" },
+ { .compatible = "qcom,sm8650-dispcc" },
{ }
};
MODULE_DEVICE_TABLE(of, disp_cc_sm8550_match_table);
@@ -1762,6 +1775,13 @@ static int disp_cc_sm8550_probe(struct platform_device *pdev)
goto err_put_rpm;
}
+ if (of_device_is_compatible(pdev->dev.of_node, "qcom,sm8650-dispcc")) {
+ lucid_ole_vco[0].max_freq = 2100000000;
+ disp_cc_mdss_mdp_clk_src.freq_tbl = ftbl_disp_cc_mdss_mdp_clk_src_sm8650;
+ disp_cc_mdss_dptx1_usb_router_link_intf_clk.clkr.hw.init->parent_hws[0] =
+ &disp_cc_mdss_dptx1_link_div_clk_src.clkr.hw;
+ }
+
clk_lucid_ole_pll_configure(&disp_cc_pll0, regmap, &disp_cc_pll0_config);
clk_lucid_ole_pll_configure(&disp_cc_pll1, regmap, &disp_cc_pll1_config);
@@ -1795,5 +1815,5 @@ static struct platform_driver disp_cc_sm8550_driver = {
module_platform_driver(disp_cc_sm8550_driver);
-MODULE_DESCRIPTION("QTI DISPCC SM8550 Driver");
+MODULE_DESCRIPTION("QTI DISPCC SM8550 / SM8650 Driver");
MODULE_LICENSE("GPL");
diff --git a/drivers/clk/qcom/dispcc-sm8650.c b/drivers/clk/qcom/dispcc-sm8650.c
deleted file mode 100644
index c9d2751f5cb8..000000000000
--- a/drivers/clk/qcom/dispcc-sm8650.c
+++ /dev/null
@@ -1,1796 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-only
-/*
- * Copyright (c) 2022, Qualcomm Innovation Center, Inc. All rights reserved
- * Copyright (c) 2023, Linaro Ltd.
- */
-
-#include <linux/clk-provider.h>
-#include <linux/err.h>
-#include <linux/kernel.h>
-#include <linux/mod_devicetable.h>
-#include <linux/module.h>
-#include <linux/platform_device.h>
-#include <linux/pm_runtime.h>
-#include <linux/regmap.h>
-
-#include <dt-bindings/clock/qcom,sm8650-dispcc.h>
-
-#include "common.h"
-#include "clk-alpha-pll.h"
-#include "clk-branch.h"
-#include "clk-pll.h"
-#include "clk-rcg.h"
-#include "clk-regmap.h"
-#include "clk-regmap-divider.h"
-#include "reset.h"
-#include "gdsc.h"
-
-/* Need to match the order of clocks in DT binding */
-enum {
- DT_BI_TCXO,
- DT_BI_TCXO_AO,
- DT_AHB_CLK,
- DT_SLEEP_CLK,
-
- DT_DSI0_PHY_PLL_OUT_BYTECLK,
- DT_DSI0_PHY_PLL_OUT_DSICLK,
- DT_DSI1_PHY_PLL_OUT_BYTECLK,
- DT_DSI1_PHY_PLL_OUT_DSICLK,
-
- DT_DP0_PHY_PLL_LINK_CLK,
- DT_DP0_PHY_PLL_VCO_DIV_CLK,
- DT_DP1_PHY_PLL_LINK_CLK,
- DT_DP1_PHY_PLL_VCO_DIV_CLK,
- DT_DP2_PHY_PLL_LINK_CLK,
- DT_DP2_PHY_PLL_VCO_DIV_CLK,
- DT_DP3_PHY_PLL_LINK_CLK,
- DT_DP3_PHY_PLL_VCO_DIV_CLK,
-};
-
-#define DISP_CC_MISC_CMD 0xF000
-
-enum {
- P_BI_TCXO,
- P_DISP_CC_PLL0_OUT_MAIN,
- P_DISP_CC_PLL1_OUT_EVEN,
- P_DISP_CC_PLL1_OUT_MAIN,
- P_DP0_PHY_PLL_LINK_CLK,
- P_DP0_PHY_PLL_VCO_DIV_CLK,
- P_DP1_PHY_PLL_LINK_CLK,
- P_DP1_PHY_PLL_VCO_DIV_CLK,
- P_DP2_PHY_PLL_LINK_CLK,
- P_DP2_PHY_PLL_VCO_DIV_CLK,
- P_DP3_PHY_PLL_LINK_CLK,
- P_DP3_PHY_PLL_VCO_DIV_CLK,
- P_DSI0_PHY_PLL_OUT_BYTECLK,
- P_DSI0_PHY_PLL_OUT_DSICLK,
- P_DSI1_PHY_PLL_OUT_BYTECLK,
- P_DSI1_PHY_PLL_OUT_DSICLK,
- P_SLEEP_CLK,
-};
-
-static const struct pll_vco lucid_ole_vco[] = {
- { 249600000, 2100000000, 0 },
-};
-
-static const struct alpha_pll_config disp_cc_pll0_config = {
- .l = 0xd,
- .alpha = 0x6492,
- .config_ctl_val = 0x20485699,
- .config_ctl_hi_val = 0x00182261,
- .config_ctl_hi1_val = 0x82aa299c,
- .test_ctl_val = 0x00000000,
- .test_ctl_hi_val = 0x00000003,
- .test_ctl_hi1_val = 0x00009000,
- .test_ctl_hi2_val = 0x00000034,
- .user_ctl_val = 0x00000000,
- .user_ctl_hi_val = 0x00000005,
-};
-
-static struct clk_alpha_pll disp_cc_pll0 = {
- .offset = 0x0,
- .vco_table = lucid_ole_vco,
- .num_vco = ARRAY_SIZE(lucid_ole_vco),
- .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_LUCID_OLE],
- .clkr = {
- .hw.init = &(const struct clk_init_data) {
- .name = "disp_cc_pll0",
- .parent_data = &(const struct clk_parent_data) {
- .index = DT_BI_TCXO,
- },
- .num_parents = 1,
- .ops = &clk_alpha_pll_reset_lucid_ole_ops,
- },
- },
-};
-
-static const struct alpha_pll_config disp_cc_pll1_config = {
- .l = 0x1f,
- .alpha = 0x4000,
- .config_ctl_val = 0x20485699,
- .config_ctl_hi_val = 0x00182261,
- .config_ctl_hi1_val = 0x82aa299c,
- .test_ctl_val = 0x00000000,
- .test_ctl_hi_val = 0x00000003,
- .test_ctl_hi1_val = 0x00009000,
- .test_ctl_hi2_val = 0x00000034,
- .user_ctl_val = 0x00000000,
- .user_ctl_hi_val = 0x00000005,
-};
-
-static struct clk_alpha_pll disp_cc_pll1 = {
- .offset = 0x1000,
- .vco_table = lucid_ole_vco,
- .num_vco = ARRAY_SIZE(lucid_ole_vco),
- .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_LUCID_OLE],
- .clkr = {
- .hw.init = &(const struct clk_init_data) {
- .name = "disp_cc_pll1",
- .parent_data = &(const struct clk_parent_data) {
- .index = DT_BI_TCXO,
- },
- .num_parents = 1,
- .ops = &clk_alpha_pll_reset_lucid_ole_ops,
- },
- },
-};
-
-static const struct parent_map disp_cc_parent_map_0[] = {
- { P_BI_TCXO, 0 },
-};
-
-static const struct clk_parent_data disp_cc_parent_data_0[] = {
- { .index = DT_BI_TCXO },
-};
-
-static const struct clk_parent_data disp_cc_parent_data_0_ao[] = {
- { .index = DT_BI_TCXO_AO },
-};
-
-static const struct parent_map disp_cc_parent_map_1[] = {
- { P_BI_TCXO, 0 },
- { P_DP3_PHY_PLL_VCO_DIV_CLK, 3 },
- { P_DP1_PHY_PLL_VCO_DIV_CLK, 4 },
- { P_DP2_PHY_PLL_VCO_DIV_CLK, 6 },
-};
-
-static const struct clk_parent_data disp_cc_parent_data_1[] = {
- { .index = DT_BI_TCXO },
- { .index = DT_DP3_PHY_PLL_VCO_DIV_CLK },
- { .index = DT_DP1_PHY_PLL_VCO_DIV_CLK },
- { .index = DT_DP2_PHY_PLL_VCO_DIV_CLK },
-};
-
-static const struct parent_map disp_cc_parent_map_2[] = {
- { P_BI_TCXO, 0 },
- { P_DSI0_PHY_PLL_OUT_DSICLK, 1 },
- { P_DSI0_PHY_PLL_OUT_BYTECLK, 2 },
- { P_DSI1_PHY_PLL_OUT_DSICLK, 3 },
- { P_DSI1_PHY_PLL_OUT_BYTECLK, 4 },
-};
-
-static const struct clk_parent_data disp_cc_parent_data_2[] = {
- { .index = DT_BI_TCXO },
- { .index = DT_DSI0_PHY_PLL_OUT_DSICLK },
- { .index = DT_DSI0_PHY_PLL_OUT_BYTECLK },
- { .index = DT_DSI1_PHY_PLL_OUT_DSICLK },
- { .index = DT_DSI1_PHY_PLL_OUT_BYTECLK },
-};
-
-static const struct parent_map disp_cc_parent_map_3[] = {
- { P_BI_TCXO, 0 },
- { P_DP1_PHY_PLL_LINK_CLK, 2 },
- { P_DP2_PHY_PLL_LINK_CLK, 3 },
- { P_DP3_PHY_PLL_LINK_CLK, 4 },
-};
-
-static const struct clk_parent_data disp_cc_parent_data_3[] = {
- { .index = DT_BI_TCXO },
- { .index = DT_DP1_PHY_PLL_LINK_CLK },
- { .index = DT_DP2_PHY_PLL_LINK_CLK },
- { .index = DT_DP3_PHY_PLL_LINK_CLK },
-};
-
-static const struct parent_map disp_cc_parent_map_4[] = {
- { P_BI_TCXO, 0 },
- { P_DP0_PHY_PLL_LINK_CLK, 1 },
- { P_DP0_PHY_PLL_VCO_DIV_CLK, 2 },
- { P_DP3_PHY_PLL_VCO_DIV_CLK, 3 },
- { P_DP1_PHY_PLL_VCO_DIV_CLK, 4 },
- { P_DP2_PHY_PLL_VCO_DIV_CLK, 6 },
-};
-
-static const struct clk_parent_data disp_cc_parent_data_4[] = {
- { .index = DT_BI_TCXO },
- { .index = DT_DP0_PHY_PLL_LINK_CLK },
- { .index = DT_DP0_PHY_PLL_VCO_DIV_CLK },
- { .index = DT_DP3_PHY_PLL_VCO_DIV_CLK },
- { .index = DT_DP1_PHY_PLL_VCO_DIV_CLK },
- { .index = DT_DP2_PHY_PLL_VCO_DIV_CLK },
-};
-
-static const struct parent_map disp_cc_parent_map_5[] = {
- { P_BI_TCXO, 0 },
- { P_DSI0_PHY_PLL_OUT_BYTECLK, 2 },
- { P_DSI1_PHY_PLL_OUT_BYTECLK, 4 },
-};
-
-static const struct clk_parent_data disp_cc_parent_data_5[] = {
- { .index = DT_BI_TCXO },
- { .index = DT_DSI0_PHY_PLL_OUT_BYTECLK },
- { .index = DT_DSI1_PHY_PLL_OUT_BYTECLK },
-};
-
-static const struct parent_map disp_cc_parent_map_6[] = {
- { P_BI_TCXO, 0 },
- { P_DISP_CC_PLL1_OUT_MAIN, 4 },
- { P_DISP_CC_PLL1_OUT_EVEN, 6 },
-};
-
-static const struct clk_parent_data disp_cc_parent_data_6[] = {
- { .index = DT_BI_TCXO },
- { .hw = &disp_cc_pll1.clkr.hw },
- { .hw = &disp_cc_pll1.clkr.hw },
-};
-
-static const struct parent_map disp_cc_parent_map_7[] = {
- { P_BI_TCXO, 0 },
- { P_DP0_PHY_PLL_LINK_CLK, 1 },
- { P_DP1_PHY_PLL_LINK_CLK, 2 },
- { P_DP2_PHY_PLL_LINK_CLK, 3 },
- { P_DP3_PHY_PLL_LINK_CLK, 4 },
-};
-
-static const struct clk_parent_data disp_cc_parent_data_7[] = {
- { .index = DT_BI_TCXO },
- { .index = DT_DP0_PHY_PLL_LINK_CLK },
- { .index = DT_DP1_PHY_PLL_LINK_CLK },
- { .index = DT_DP2_PHY_PLL_LINK_CLK },
- { .index = DT_DP3_PHY_PLL_LINK_CLK },
-};
-
-static const struct parent_map disp_cc_parent_map_8[] = {
- { P_BI_TCXO, 0 },
- { P_DISP_CC_PLL0_OUT_MAIN, 1 },
- { P_DISP_CC_PLL1_OUT_MAIN, 4 },
- { P_DISP_CC_PLL1_OUT_EVEN, 6 },
-};
-
-static const struct clk_parent_data disp_cc_parent_data_8[] = {
- { .index = DT_BI_TCXO },
- { .hw = &disp_cc_pll0.clkr.hw },
- { .hw = &disp_cc_pll1.clkr.hw },
- { .hw = &disp_cc_pll1.clkr.hw },
-};
-
-static const struct parent_map disp_cc_parent_map_9[] = {
- { P_SLEEP_CLK, 0 },
-};
-
-static const struct clk_parent_data disp_cc_parent_data_9[] = {
- { .index = DT_SLEEP_CLK },
-};
-
-static const struct freq_tbl ftbl_disp_cc_mdss_ahb_clk_src[] = {
- F(19200000, P_BI_TCXO, 1, 0, 0),
- F(37500000, P_DISP_CC_PLL1_OUT_MAIN, 16, 0, 0),
- F(75000000, P_DISP_CC_PLL1_OUT_MAIN, 8, 0, 0),
- { }
-};
-
-static struct clk_rcg2 disp_cc_mdss_ahb_clk_src = {
- .cmd_rcgr = 0x82e8,
- .mnd_width = 0,
- .hid_width = 5,
- .parent_map = disp_cc_parent_map_6,
- .freq_tbl = ftbl_disp_cc_mdss_ahb_clk_src,
- .clkr.hw.init = &(const struct clk_init_data) {
- .name = "disp_cc_mdss_ahb_clk_src",
- .parent_data = disp_cc_parent_data_6,
- .num_parents = ARRAY_SIZE(disp_cc_parent_data_6),
- .flags = CLK_SET_RATE_PARENT,
- .ops = &clk_rcg2_shared_ops,
- },
-};
-
-static const struct freq_tbl ftbl_disp_cc_mdss_byte0_clk_src[] = {
- F(19200000, P_BI_TCXO, 1, 0, 0),
- { }
-};
-
-static struct clk_rcg2 disp_cc_mdss_byte0_clk_src = {
- .cmd_rcgr = 0x8108,
- .mnd_width = 0,
- .hid_width = 5,
- .parent_map = disp_cc_parent_map_2,
- .freq_tbl = ftbl_disp_cc_mdss_byte0_clk_src,
- .clkr.hw.init = &(const struct clk_init_data) {
- .name = "disp_cc_mdss_byte0_clk_src",
- .parent_data = disp_cc_parent_data_2,
- .num_parents = ARRAY_SIZE(disp_cc_parent_data_2),
- .flags = CLK_SET_RATE_PARENT,
- .ops = &clk_byte2_ops,
- },
-};
-
-static struct clk_rcg2 disp_cc_mdss_byte1_clk_src = {
- .cmd_rcgr = 0x8124,
- .mnd_width = 0,
- .hid_width = 5,
- .parent_map = disp_cc_parent_map_2,
- .freq_tbl = ftbl_disp_cc_mdss_byte0_clk_src,
- .clkr.hw.init = &(const struct clk_init_data) {
- .name = "disp_cc_mdss_byte1_clk_src",
- .parent_data = disp_cc_parent_data_2,
- .num_parents = ARRAY_SIZE(disp_cc_parent_data_2),
- .flags = CLK_SET_RATE_PARENT,
- .ops = &clk_byte2_ops,
- },
-};
-
-static struct clk_rcg2 disp_cc_mdss_dptx0_aux_clk_src = {
- .cmd_rcgr = 0x81bc,
- .mnd_width = 0,
- .hid_width = 5,
- .parent_map = disp_cc_parent_map_0,
- .freq_tbl = ftbl_disp_cc_mdss_byte0_clk_src,
- .clkr.hw.init = &(const struct clk_init_data) {
- .name = "disp_cc_mdss_dptx0_aux_clk_src",
- .parent_data = disp_cc_parent_data_0,
- .num_parents = ARRAY_SIZE(disp_cc_parent_data_0),
- .flags = CLK_SET_RATE_PARENT,
- .ops = &clk_rcg2_ops,
- },
-};
-
-static struct clk_rcg2 disp_cc_mdss_dptx0_link_clk_src = {
- .cmd_rcgr = 0x8170,
- .mnd_width = 0,
- .hid_width = 5,
- .parent_map = disp_cc_parent_map_7,
- .clkr.hw.init = &(const struct clk_init_data) {
- .name = "disp_cc_mdss_dptx0_link_clk_src",
- .parent_data = disp_cc_parent_data_7,
- .num_parents = ARRAY_SIZE(disp_cc_parent_data_7),
- .flags = CLK_SET_RATE_PARENT,
- .ops = &clk_byte2_ops,
- },
-};
-
-static struct clk_rcg2 disp_cc_mdss_dptx0_pixel0_clk_src = {
- .cmd_rcgr = 0x818c,
- .mnd_width = 16,
- .hid_width = 5,
- .parent_map = disp_cc_parent_map_4,
- .freq_tbl = ftbl_disp_cc_mdss_byte0_clk_src,
- .clkr.hw.init = &(const struct clk_init_data) {
- .name = "disp_cc_mdss_dptx0_pixel0_clk_src",
- .parent_data = disp_cc_parent_data_4,
- .num_parents = ARRAY_SIZE(disp_cc_parent_data_4),
- .flags = CLK_SET_RATE_PARENT,
- .ops = &clk_dp_ops,
- },
-};
-
-static struct clk_rcg2 disp_cc_mdss_dptx0_pixel1_clk_src = {
- .cmd_rcgr = 0x81a4,
- .mnd_width = 16,
- .hid_width = 5,
- .parent_map = disp_cc_parent_map_4,
- .freq_tbl = ftbl_disp_cc_mdss_byte0_clk_src,
- .clkr.hw.init = &(const struct clk_init_data) {
- .name = "disp_cc_mdss_dptx0_pixel1_clk_src",
- .parent_data = disp_cc_parent_data_4,
- .num_parents = ARRAY_SIZE(disp_cc_parent_data_4),
- .flags = CLK_SET_RATE_PARENT,
- .ops = &clk_dp_ops,
- },
-};
-
-static struct clk_rcg2 disp_cc_mdss_dptx1_aux_clk_src = {
- .cmd_rcgr = 0x8220,
- .mnd_width = 0,
- .hid_width = 5,
- .parent_map = disp_cc_parent_map_0,
- .freq_tbl = ftbl_disp_cc_mdss_byte0_clk_src,
- .clkr.hw.init = &(const struct clk_init_data) {
- .name = "disp_cc_mdss_dptx1_aux_clk_src",
- .parent_data = disp_cc_parent_data_0,
- .num_parents = ARRAY_SIZE(disp_cc_parent_data_0),
- .flags = CLK_SET_RATE_PARENT,
- .ops = &clk_dp_ops,
- },
-};
-
-static struct clk_rcg2 disp_cc_mdss_dptx1_link_clk_src = {
- .cmd_rcgr = 0x8204,
- .mnd_width = 0,
- .hid_width = 5,
- .parent_map = disp_cc_parent_map_3,
- .clkr.hw.init = &(const struct clk_init_data) {
- .name = "disp_cc_mdss_dptx1_link_clk_src",
- .parent_data = disp_cc_parent_data_3,
- .num_parents = ARRAY_SIZE(disp_cc_parent_data_3),
- .flags = CLK_SET_RATE_PARENT,
- .ops = &clk_byte2_ops,
- },
-};
-
-static struct clk_rcg2 disp_cc_mdss_dptx1_pixel0_clk_src = {
- .cmd_rcgr = 0x81d4,
- .mnd_width = 16,
- .hid_width = 5,
- .parent_map = disp_cc_parent_map_1,
- .freq_tbl = ftbl_disp_cc_mdss_byte0_clk_src,
- .clkr.hw.init = &(const struct clk_init_data) {
- .name = "disp_cc_mdss_dptx1_pixel0_clk_src",
- .parent_data = disp_cc_parent_data_1,
- .num_parents = ARRAY_SIZE(disp_cc_parent_data_1),
- .flags = CLK_SET_RATE_PARENT,
- .ops = &clk_dp_ops,
- },
-};
-
-static struct clk_rcg2 disp_cc_mdss_dptx1_pixel1_clk_src = {
- .cmd_rcgr = 0x81ec,
- .mnd_width = 16,
- .hid_width = 5,
- .parent_map = disp_cc_parent_map_1,
- .freq_tbl = ftbl_disp_cc_mdss_byte0_clk_src,
- .clkr.hw.init = &(const struct clk_init_data) {
- .name = "disp_cc_mdss_dptx1_pixel1_clk_src",
- .parent_data = disp_cc_parent_data_1,
- .num_parents = ARRAY_SIZE(disp_cc_parent_data_1),
- .flags = CLK_SET_RATE_PARENT,
- .ops = &clk_dp_ops,
- },
-};
-
-static struct clk_rcg2 disp_cc_mdss_dptx2_aux_clk_src = {
- .cmd_rcgr = 0x8284,
- .mnd_width = 0,
- .hid_width = 5,
- .parent_map = disp_cc_parent_map_0,
- .freq_tbl = ftbl_disp_cc_mdss_byte0_clk_src,
- .clkr.hw.init = &(const struct clk_init_data) {
- .name = "disp_cc_mdss_dptx2_aux_clk_src",
- .parent_data = disp_cc_parent_data_0,
- .num_parents = ARRAY_SIZE(disp_cc_parent_data_0),
- .flags = CLK_SET_RATE_PARENT,
- .ops = &clk_rcg2_ops,
- },
-};
-
-static struct clk_rcg2 disp_cc_mdss_dptx2_link_clk_src = {
- .cmd_rcgr = 0x8238,
- .mnd_width = 0,
- .hid_width = 5,
- .parent_map = disp_cc_parent_map_3,
- .clkr.hw.init = &(const struct clk_init_data) {
- .name = "disp_cc_mdss_dptx2_link_clk_src",
- .parent_data = disp_cc_parent_data_3,
- .num_parents = ARRAY_SIZE(disp_cc_parent_data_3),
- .flags = CLK_SET_RATE_PARENT,
- .ops = &clk_byte2_ops,
- },
-};
-
-static struct clk_rcg2 disp_cc_mdss_dptx2_pixel0_clk_src = {
- .cmd_rcgr = 0x8254,
- .mnd_width = 16,
- .hid_width = 5,
- .parent_map = disp_cc_parent_map_1,
- .freq_tbl = ftbl_disp_cc_mdss_byte0_clk_src,
- .clkr.hw.init = &(const struct clk_init_data) {
- .name = "disp_cc_mdss_dptx2_pixel0_clk_src",
- .parent_data = disp_cc_parent_data_1,
- .num_parents = ARRAY_SIZE(disp_cc_parent_data_1),
- .flags = CLK_SET_RATE_PARENT,
- .ops = &clk_dp_ops,
- },
-};
-
-static struct clk_rcg2 disp_cc_mdss_dptx2_pixel1_clk_src = {
- .cmd_rcgr = 0x826c,
- .mnd_width = 16,
- .hid_width = 5,
- .parent_map = disp_cc_parent_map_1,
- .freq_tbl = ftbl_disp_cc_mdss_byte0_clk_src,
- .clkr.hw.init = &(const struct clk_init_data) {
- .name = "disp_cc_mdss_dptx2_pixel1_clk_src",
- .parent_data = disp_cc_parent_data_1,
- .num_parents = ARRAY_SIZE(disp_cc_parent_data_1),
- .flags = CLK_SET_RATE_PARENT,
- .ops = &clk_dp_ops,
- },
-};
-
-static struct clk_rcg2 disp_cc_mdss_dptx3_aux_clk_src = {
- .cmd_rcgr = 0x82d0,
- .mnd_width = 0,
- .hid_width = 5,
- .parent_map = disp_cc_parent_map_0,
- .freq_tbl = ftbl_disp_cc_mdss_byte0_clk_src,
- .clkr.hw.init = &(const struct clk_init_data) {
- .name = "disp_cc_mdss_dptx3_aux_clk_src",
- .parent_data = disp_cc_parent_data_0,
- .num_parents = ARRAY_SIZE(disp_cc_parent_data_0),
- .flags = CLK_SET_RATE_PARENT,
- .ops = &clk_rcg2_ops,
- },
-};
-
-static struct clk_rcg2 disp_cc_mdss_dptx3_link_clk_src = {
- .cmd_rcgr = 0x82b4,
- .mnd_width = 0,
- .hid_width = 5,
- .parent_map = disp_cc_parent_map_3,
- .clkr.hw.init = &(const struct clk_init_data) {
- .name = "disp_cc_mdss_dptx3_link_clk_src",
- .parent_data = disp_cc_parent_data_3,
- .num_parents = ARRAY_SIZE(disp_cc_parent_data_3),
- .flags = CLK_SET_RATE_PARENT,
- .ops = &clk_byte2_ops,
- },
-};
-
-static struct clk_rcg2 disp_cc_mdss_dptx3_pixel0_clk_src = {
- .cmd_rcgr = 0x829c,
- .mnd_width = 16,
- .hid_width = 5,
- .parent_map = disp_cc_parent_map_1,
- .freq_tbl = ftbl_disp_cc_mdss_byte0_clk_src,
- .clkr.hw.init = &(const struct clk_init_data) {
- .name = "disp_cc_mdss_dptx3_pixel0_clk_src",
- .parent_data = disp_cc_parent_data_1,
- .num_parents = ARRAY_SIZE(disp_cc_parent_data_1),
- .flags = CLK_SET_RATE_PARENT,
- .ops = &clk_dp_ops,
- },
-};
-
-static struct clk_rcg2 disp_cc_mdss_esc0_clk_src = {
- .cmd_rcgr = 0x8140,
- .mnd_width = 0,
- .hid_width = 5,
- .parent_map = disp_cc_parent_map_5,
- .freq_tbl = ftbl_disp_cc_mdss_byte0_clk_src,
- .clkr.hw.init = &(const struct clk_init_data) {
- .name = "disp_cc_mdss_esc0_clk_src",
- .parent_data = disp_cc_parent_data_5,
- .num_parents = ARRAY_SIZE(disp_cc_parent_data_5),
- .flags = CLK_SET_RATE_PARENT,
- .ops = &clk_rcg2_ops,
- },
-};
-
-static struct clk_rcg2 disp_cc_mdss_esc1_clk_src = {
- .cmd_rcgr = 0x8158,
- .mnd_width = 0,
- .hid_width = 5,
- .parent_map = disp_cc_parent_map_5,
- .freq_tbl = ftbl_disp_cc_mdss_byte0_clk_src,
- .clkr.hw.init = &(const struct clk_init_data) {
- .name = "disp_cc_mdss_esc1_clk_src",
- .parent_data = disp_cc_parent_data_5,
- .num_parents = ARRAY_SIZE(disp_cc_parent_data_5),
- .flags = CLK_SET_RATE_PARENT,
- .ops = &clk_rcg2_ops,
- },
-};
-
-static const struct freq_tbl ftbl_disp_cc_mdss_mdp_clk_src[] = {
- F(19200000, P_BI_TCXO, 1, 0, 0),
- F(85714286, P_DISP_CC_PLL0_OUT_MAIN, 3, 0, 0),
- F(100000000, P_DISP_CC_PLL0_OUT_MAIN, 3, 0, 0),
- F(150000000, P_DISP_CC_PLL0_OUT_MAIN, 3, 0, 0),
- F(200000000, P_DISP_CC_PLL0_OUT_MAIN, 3, 0, 0),
- F(325000000, P_DISP_CC_PLL0_OUT_MAIN, 3, 0, 0),
- F(402000000, P_DISP_CC_PLL0_OUT_MAIN, 3, 0, 0),
- F(514000000, P_DISP_CC_PLL0_OUT_MAIN, 3, 0, 0),
- { }
-};
-
-static struct clk_rcg2 disp_cc_mdss_mdp_clk_src = {
- .cmd_rcgr = 0x80d8,
- .mnd_width = 0,
- .hid_width = 5,
- .parent_map = disp_cc_parent_map_8,
- .freq_tbl = ftbl_disp_cc_mdss_mdp_clk_src,
- .clkr.hw.init = &(const struct clk_init_data) {
- .name = "disp_cc_mdss_mdp_clk_src",
- .parent_data = disp_cc_parent_data_8,
- .num_parents = ARRAY_SIZE(disp_cc_parent_data_8),
- .flags = CLK_SET_RATE_PARENT,
- .ops = &clk_rcg2_shared_ops,
- },
-};
-
-static struct clk_rcg2 disp_cc_mdss_pclk0_clk_src = {
- .cmd_rcgr = 0x80a8,
- .mnd_width = 8,
- .hid_width = 5,
- .parent_map = disp_cc_parent_map_2,
- .freq_tbl = ftbl_disp_cc_mdss_byte0_clk_src,
- .clkr.hw.init = &(const struct clk_init_data) {
- .name = "disp_cc_mdss_pclk0_clk_src",
- .parent_data = disp_cc_parent_data_2,
- .num_parents = ARRAY_SIZE(disp_cc_parent_data_2),
- .flags = CLK_SET_RATE_PARENT,
- .ops = &clk_pixel_ops,
- },
-};
-
-static struct clk_rcg2 disp_cc_mdss_pclk1_clk_src = {
- .cmd_rcgr = 0x80c0,
- .mnd_width = 8,
- .hid_width = 5,
- .parent_map = disp_cc_parent_map_2,
- .freq_tbl = ftbl_disp_cc_mdss_byte0_clk_src,
- .clkr.hw.init = &(const struct clk_init_data) {
- .name = "disp_cc_mdss_pclk1_clk_src",
- .parent_data = disp_cc_parent_data_2,
- .num_parents = ARRAY_SIZE(disp_cc_parent_data_2),
- .flags = CLK_SET_RATE_PARENT,
- .ops = &clk_pixel_ops,
- },
-};
-
-static struct clk_rcg2 disp_cc_mdss_vsync_clk_src = {
- .cmd_rcgr = 0x80f0,
- .mnd_width = 0,
- .hid_width = 5,
- .parent_map = disp_cc_parent_map_0,
- .freq_tbl = ftbl_disp_cc_mdss_byte0_clk_src,
- .clkr.hw.init = &(const struct clk_init_data) {
- .name = "disp_cc_mdss_vsync_clk_src",
- .parent_data = disp_cc_parent_data_0,
- .num_parents = ARRAY_SIZE(disp_cc_parent_data_0),
- .flags = CLK_SET_RATE_PARENT,
- .ops = &clk_rcg2_ops,
- },
-};
-
-static const struct freq_tbl ftbl_disp_cc_sleep_clk_src[] = {
- F(32000, P_SLEEP_CLK, 1, 0, 0),
- { }
-};
-
-static struct clk_rcg2 disp_cc_sleep_clk_src = {
- .cmd_rcgr = 0xe05c,
- .mnd_width = 0,
- .hid_width = 5,
- .parent_map = disp_cc_parent_map_9,
- .freq_tbl = ftbl_disp_cc_sleep_clk_src,
- .clkr.hw.init = &(const struct clk_init_data) {
- .name = "disp_cc_sleep_clk_src",
- .parent_data = disp_cc_parent_data_9,
- .num_parents = ARRAY_SIZE(disp_cc_parent_data_9),
- .flags = CLK_SET_RATE_PARENT,
- .ops = &clk_rcg2_ops,
- },
-};
-
-static struct clk_rcg2 disp_cc_xo_clk_src = {
- .cmd_rcgr = 0xe03c,
- .mnd_width = 0,
- .hid_width = 5,
- .parent_map = disp_cc_parent_map_0,
- .freq_tbl = ftbl_disp_cc_mdss_byte0_clk_src,
- .clkr.hw.init = &(const struct clk_init_data) {
- .name = "disp_cc_xo_clk_src",
- .parent_data = disp_cc_parent_data_0_ao,
- .num_parents = ARRAY_SIZE(disp_cc_parent_data_0_ao),
- .flags = CLK_SET_RATE_PARENT,
- .ops = &clk_rcg2_ops,
- },
-};
-
-static struct clk_regmap_div disp_cc_mdss_byte0_div_clk_src = {
- .reg = 0x8120,
- .shift = 0,
- .width = 4,
- .clkr.hw.init = &(const struct clk_init_data) {
- .name = "disp_cc_mdss_byte0_div_clk_src",
- .parent_hws = (const struct clk_hw*[]) {
- &disp_cc_mdss_byte0_clk_src.clkr.hw,
- },
- .num_parents = 1,
- .ops = &clk_regmap_div_ops,
- },
-};
-
-static struct clk_regmap_div disp_cc_mdss_byte1_div_clk_src = {
- .reg = 0x813c,
- .shift = 0,
- .width = 4,
- .clkr.hw.init = &(const struct clk_init_data) {
- .name = "disp_cc_mdss_byte1_div_clk_src",
- .parent_hws = (const struct clk_hw*[]) {
- &disp_cc_mdss_byte1_clk_src.clkr.hw,
- },
- .num_parents = 1,
- .ops = &clk_regmap_div_ops,
- },
-};
-
-static struct clk_regmap_div disp_cc_mdss_dptx0_link_div_clk_src = {
- .reg = 0x8188,
- .shift = 0,
- .width = 4,
- .clkr.hw.init = &(const struct clk_init_data) {
- .name = "disp_cc_mdss_dptx0_link_div_clk_src",
- .parent_hws = (const struct clk_hw*[]) {
- &disp_cc_mdss_dptx0_link_clk_src.clkr.hw,
- },
- .num_parents = 1,
- .flags = CLK_SET_RATE_PARENT,
- .ops = &clk_regmap_div_ro_ops,
- },
-};
-
-static struct clk_regmap_div disp_cc_mdss_dptx1_link_div_clk_src = {
- .reg = 0x821c,
- .shift = 0,
- .width = 4,
- .clkr.hw.init = &(const struct clk_init_data) {
- .name = "disp_cc_mdss_dptx1_link_div_clk_src",
- .parent_hws = (const struct clk_hw*[]) {
- &disp_cc_mdss_dptx1_link_clk_src.clkr.hw,
- },
- .num_parents = 1,
- .flags = CLK_SET_RATE_PARENT,
- .ops = &clk_regmap_div_ro_ops,
- },
-};
-
-static struct clk_regmap_div disp_cc_mdss_dptx2_link_div_clk_src = {
- .reg = 0x8250,
- .shift = 0,
- .width = 4,
- .clkr.hw.init = &(const struct clk_init_data) {
- .name = "disp_cc_mdss_dptx2_link_div_clk_src",
- .parent_hws = (const struct clk_hw*[]) {
- &disp_cc_mdss_dptx2_link_clk_src.clkr.hw,
- },
- .num_parents = 1,
- .flags = CLK_SET_RATE_PARENT,
- .ops = &clk_regmap_div_ro_ops,
- },
-};
-
-static struct clk_regmap_div disp_cc_mdss_dptx3_link_div_clk_src = {
- .reg = 0x82cc,
- .shift = 0,
- .width = 4,
- .clkr.hw.init = &(const struct clk_init_data) {
- .name = "disp_cc_mdss_dptx3_link_div_clk_src",
- .parent_hws = (const struct clk_hw*[]) {
- &disp_cc_mdss_dptx3_link_clk_src.clkr.hw,
- },
- .num_parents = 1,
- .flags = CLK_SET_RATE_PARENT,
- .ops = &clk_regmap_div_ro_ops,
- },
-};
-
-static struct clk_branch disp_cc_mdss_accu_clk = {
- .halt_reg = 0xe058,
- .halt_check = BRANCH_HALT_VOTED,
- .clkr = {
- .enable_reg = 0xe058,
- .enable_mask = BIT(0),
- .hw.init = &(const struct clk_init_data) {
- .name = "disp_cc_mdss_accu_clk",
- .parent_hws = (const struct clk_hw*[]) {
- &disp_cc_xo_clk_src.clkr.hw,
- },
- .num_parents = 1,
- .flags = CLK_SET_RATE_PARENT,
- .ops = &clk_branch2_ops,
- },
- },
-};
-
-static struct clk_branch disp_cc_mdss_ahb1_clk = {
- .halt_reg = 0xa020,
- .halt_check = BRANCH_HALT,
- .clkr = {
- .enable_reg = 0xa020,
- .enable_mask = BIT(0),
- .hw.init = &(const struct clk_init_data) {
- .name = "disp_cc_mdss_ahb1_clk",
- .parent_hws = (const struct clk_hw*[]) {
- &disp_cc_mdss_ahb_clk_src.clkr.hw,
- },
- .num_parents = 1,
- .flags = CLK_SET_RATE_PARENT,
- .ops = &clk_branch2_ops,
- },
- },
-};
-
-static struct clk_branch disp_cc_mdss_ahb_clk = {
- .halt_reg = 0x80a4,
- .halt_check = BRANCH_HALT,
- .clkr = {
- .enable_reg = 0x80a4,
- .enable_mask = BIT(0),
- .hw.init = &(const struct clk_init_data) {
- .name = "disp_cc_mdss_ahb_clk",
- .parent_hws = (const struct clk_hw*[]) {
- &disp_cc_mdss_ahb_clk_src.clkr.hw,
- },
- .num_parents = 1,
- .flags = CLK_SET_RATE_PARENT,
- .ops = &clk_branch2_ops,
- },
- },
-};
-
-static struct clk_branch disp_cc_mdss_byte0_clk = {
- .halt_reg = 0x8028,
- .halt_check = BRANCH_HALT,
- .clkr = {
- .enable_reg = 0x8028,
- .enable_mask = BIT(0),
- .hw.init = &(const struct clk_init_data) {
- .name = "disp_cc_mdss_byte0_clk",
- .parent_hws = (const struct clk_hw*[]) {
- &disp_cc_mdss_byte0_clk_src.clkr.hw,
- },
- .num_parents = 1,
- .flags = CLK_SET_RATE_PARENT,
- .ops = &clk_branch2_ops,
- },
- },
-};
-
-static struct clk_branch disp_cc_mdss_byte0_intf_clk = {
- .halt_reg = 0x802c,
- .halt_check = BRANCH_HALT,
- .clkr = {
- .enable_reg = 0x802c,
- .enable_mask = BIT(0),
- .hw.init = &(const struct clk_init_data) {
- .name = "disp_cc_mdss_byte0_intf_clk",
- .parent_hws = (const struct clk_hw*[]) {
- &disp_cc_mdss_byte0_div_clk_src.clkr.hw,
- },
- .num_parents = 1,
- .flags = CLK_SET_RATE_PARENT,
- .ops = &clk_branch2_ops,
- },
- },
-};
-
-static struct clk_branch disp_cc_mdss_byte1_clk = {
- .halt_reg = 0x8030,
- .halt_check = BRANCH_HALT,
- .clkr = {
- .enable_reg = 0x8030,
- .enable_mask = BIT(0),
- .hw.init = &(const struct clk_init_data) {
- .name = "disp_cc_mdss_byte1_clk",
- .parent_hws = (const struct clk_hw*[]) {
- &disp_cc_mdss_byte1_clk_src.clkr.hw,
- },
- .num_parents = 1,
- .flags = CLK_SET_RATE_PARENT,
- .ops = &clk_branch2_ops,
- },
- },
-};
-
-static struct clk_branch disp_cc_mdss_byte1_intf_clk = {
- .halt_reg = 0x8034,
- .halt_check = BRANCH_HALT,
- .clkr = {
- .enable_reg = 0x8034,
- .enable_mask = BIT(0),
- .hw.init = &(const struct clk_init_data) {
- .name = "disp_cc_mdss_byte1_intf_clk",
- .parent_hws = (const struct clk_hw*[]) {
- &disp_cc_mdss_byte1_div_clk_src.clkr.hw,
- },
- .num_parents = 1,
- .flags = CLK_SET_RATE_PARENT,
- .ops = &clk_branch2_ops,
- },
- },
-};
-
-static struct clk_branch disp_cc_mdss_dptx0_aux_clk = {
- .halt_reg = 0x8058,
- .halt_check = BRANCH_HALT,
- .clkr = {
- .enable_reg = 0x8058,
- .enable_mask = BIT(0),
- .hw.init = &(const struct clk_init_data) {
- .name = "disp_cc_mdss_dptx0_aux_clk",
- .parent_hws = (const struct clk_hw*[]) {
- &disp_cc_mdss_dptx0_aux_clk_src.clkr.hw,
- },
- .num_parents = 1,
- .flags = CLK_SET_RATE_PARENT,
- .ops = &clk_branch2_ops,
- },
- },
-};
-
-static struct clk_branch disp_cc_mdss_dptx0_crypto_clk = {
- .halt_reg = 0x804c,
- .halt_check = BRANCH_HALT,
- .clkr = {
- .enable_reg = 0x804c,
- .enable_mask = BIT(0),
- .hw.init = &(const struct clk_init_data) {
- .name = "disp_cc_mdss_dptx0_crypto_clk",
- .parent_hws = (const struct clk_hw*[]) {
- &disp_cc_mdss_dptx0_link_clk_src.clkr.hw,
- },
- .num_parents = 1,
- .flags = CLK_SET_RATE_PARENT,
- .ops = &clk_branch2_ops,
- },
- },
-};
-
-static struct clk_branch disp_cc_mdss_dptx0_link_clk = {
- .halt_reg = 0x8040,
- .halt_check = BRANCH_HALT,
- .clkr = {
- .enable_reg = 0x8040,
- .enable_mask = BIT(0),
- .hw.init = &(const struct clk_init_data) {
- .name = "disp_cc_mdss_dptx0_link_clk",
- .parent_hws = (const struct clk_hw*[]) {
- &disp_cc_mdss_dptx0_link_clk_src.clkr.hw,
- },
- .num_parents = 1,
- .flags = CLK_SET_RATE_PARENT,
- .ops = &clk_branch2_ops,
- },
- },
-};
-
-static struct clk_branch disp_cc_mdss_dptx0_link_intf_clk = {
- .halt_reg = 0x8048,
- .halt_check = BRANCH_HALT,
- .clkr = {
- .enable_reg = 0x8048,
- .enable_mask = BIT(0),
- .hw.init = &(const struct clk_init_data) {
- .name = "disp_cc_mdss_dptx0_link_intf_clk",
- .parent_hws = (const struct clk_hw*[]) {
- &disp_cc_mdss_dptx0_link_div_clk_src.clkr.hw,
- },
- .num_parents = 1,
- .flags = CLK_SET_RATE_PARENT,
- .ops = &clk_branch2_ops,
- },
- },
-};
-
-static struct clk_branch disp_cc_mdss_dptx0_pixel0_clk = {
- .halt_reg = 0x8050,
- .halt_check = BRANCH_HALT,
- .clkr = {
- .enable_reg = 0x8050,
- .enable_mask = BIT(0),
- .hw.init = &(const struct clk_init_data) {
- .name = "disp_cc_mdss_dptx0_pixel0_clk",
- .parent_hws = (const struct clk_hw*[]) {
- &disp_cc_mdss_dptx0_pixel0_clk_src.clkr.hw,
- },
- .num_parents = 1,
- .flags = CLK_SET_RATE_PARENT,
- .ops = &clk_branch2_ops,
- },
- },
-};
-
-static struct clk_branch disp_cc_mdss_dptx0_pixel1_clk = {
- .halt_reg = 0x8054,
- .halt_check = BRANCH_HALT,
- .clkr = {
- .enable_reg = 0x8054,
- .enable_mask = BIT(0),
- .hw.init = &(const struct clk_init_data) {
- .name = "disp_cc_mdss_dptx0_pixel1_clk",
- .parent_hws = (const struct clk_hw*[]) {
- &disp_cc_mdss_dptx0_pixel1_clk_src.clkr.hw,
- },
- .num_parents = 1,
- .flags = CLK_SET_RATE_PARENT,
- .ops = &clk_branch2_ops,
- },
- },
-};
-
-static struct clk_branch disp_cc_mdss_dptx0_usb_router_link_intf_clk = {
- .halt_reg = 0x8044,
- .halt_check = BRANCH_HALT,
- .clkr = {
- .enable_reg = 0x8044,
- .enable_mask = BIT(0),
- .hw.init = &(const struct clk_init_data) {
- .name = "disp_cc_mdss_dptx0_usb_router_link_intf_clk",
- .parent_hws = (const struct clk_hw*[]) {
- &disp_cc_mdss_dptx0_link_div_clk_src.clkr.hw,
- },
- .num_parents = 1,
- .flags = CLK_SET_RATE_PARENT,
- .ops = &clk_branch2_ops,
- },
- },
-};
-
-static struct clk_branch disp_cc_mdss_dptx1_aux_clk = {
- .halt_reg = 0x8074,
- .halt_check = BRANCH_HALT,
- .clkr = {
- .enable_reg = 0x8074,
- .enable_mask = BIT(0),
- .hw.init = &(const struct clk_init_data) {
- .name = "disp_cc_mdss_dptx1_aux_clk",
- .parent_hws = (const struct clk_hw*[]) {
- &disp_cc_mdss_dptx1_aux_clk_src.clkr.hw,
- },
- .num_parents = 1,
- .flags = CLK_SET_RATE_PARENT,
- .ops = &clk_branch2_ops,
- },
- },
-};
-
-static struct clk_branch disp_cc_mdss_dptx1_crypto_clk = {
- .halt_reg = 0x8070,
- .halt_check = BRANCH_HALT,
- .clkr = {
- .enable_reg = 0x8070,
- .enable_mask = BIT(0),
- .hw.init = &(const struct clk_init_data) {
- .name = "disp_cc_mdss_dptx1_crypto_clk",
- .parent_hws = (const struct clk_hw*[]) {
- &disp_cc_mdss_dptx1_link_clk_src.clkr.hw,
- },
- .num_parents = 1,
- .flags = CLK_SET_RATE_PARENT,
- .ops = &clk_branch2_ops,
- },
- },
-};
-
-static struct clk_branch disp_cc_mdss_dptx1_link_clk = {
- .halt_reg = 0x8064,
- .halt_check = BRANCH_HALT,
- .clkr = {
- .enable_reg = 0x8064,
- .enable_mask = BIT(0),
- .hw.init = &(const struct clk_init_data) {
- .name = "disp_cc_mdss_dptx1_link_clk",
- .parent_hws = (const struct clk_hw*[]) {
- &disp_cc_mdss_dptx1_link_clk_src.clkr.hw,
- },
- .num_parents = 1,
- .flags = CLK_SET_RATE_PARENT,
- .ops = &clk_branch2_ops,
- },
- },
-};
-
-static struct clk_branch disp_cc_mdss_dptx1_link_intf_clk = {
- .halt_reg = 0x806c,
- .halt_check = BRANCH_HALT,
- .clkr = {
- .enable_reg = 0x806c,
- .enable_mask = BIT(0),
- .hw.init = &(const struct clk_init_data) {
- .name = "disp_cc_mdss_dptx1_link_intf_clk",
- .parent_hws = (const struct clk_hw*[]) {
- &disp_cc_mdss_dptx1_link_div_clk_src.clkr.hw,
- },
- .num_parents = 1,
- .flags = CLK_SET_RATE_PARENT,
- .ops = &clk_branch2_ops,
- },
- },
-};
-
-static struct clk_branch disp_cc_mdss_dptx1_pixel0_clk = {
- .halt_reg = 0x805c,
- .halt_check = BRANCH_HALT,
- .clkr = {
- .enable_reg = 0x805c,
- .enable_mask = BIT(0),
- .hw.init = &(const struct clk_init_data) {
- .name = "disp_cc_mdss_dptx1_pixel0_clk",
- .parent_hws = (const struct clk_hw*[]) {
- &disp_cc_mdss_dptx1_pixel0_clk_src.clkr.hw,
- },
- .num_parents = 1,
- .flags = CLK_SET_RATE_PARENT,
- .ops = &clk_branch2_ops,
- },
- },
-};
-
-static struct clk_branch disp_cc_mdss_dptx1_pixel1_clk = {
- .halt_reg = 0x8060,
- .halt_check = BRANCH_HALT,
- .clkr = {
- .enable_reg = 0x8060,
- .enable_mask = BIT(0),
- .hw.init = &(const struct clk_init_data) {
- .name = "disp_cc_mdss_dptx1_pixel1_clk",
- .parent_hws = (const struct clk_hw*[]) {
- &disp_cc_mdss_dptx1_pixel1_clk_src.clkr.hw,
- },
- .num_parents = 1,
- .flags = CLK_SET_RATE_PARENT,
- .ops = &clk_branch2_ops,
- },
- },
-};
-
-static struct clk_branch disp_cc_mdss_dptx1_usb_router_link_intf_clk = {
- .halt_reg = 0x8068,
- .halt_check = BRANCH_HALT,
- .clkr = {
- .enable_reg = 0x8068,
- .enable_mask = BIT(0),
- .hw.init = &(const struct clk_init_data) {
- .name = "disp_cc_mdss_dptx1_usb_router_link_intf_clk",
- .parent_hws = (const struct clk_hw*[]) {
- &disp_cc_mdss_dptx1_link_div_clk_src.clkr.hw,
- },
- .num_parents = 1,
- .flags = CLK_SET_RATE_PARENT,
- .ops = &clk_branch2_ops,
- },
- },
-};
-
-static struct clk_branch disp_cc_mdss_dptx2_aux_clk = {
- .halt_reg = 0x808c,
- .halt_check = BRANCH_HALT,
- .clkr = {
- .enable_reg = 0x808c,
- .enable_mask = BIT(0),
- .hw.init = &(const struct clk_init_data) {
- .name = "disp_cc_mdss_dptx2_aux_clk",
- .parent_hws = (const struct clk_hw*[]) {
- &disp_cc_mdss_dptx2_aux_clk_src.clkr.hw,
- },
- .num_parents = 1,
- .flags = CLK_SET_RATE_PARENT,
- .ops = &clk_branch2_ops,
- },
- },
-};
-
-static struct clk_branch disp_cc_mdss_dptx2_crypto_clk = {
- .halt_reg = 0x8088,
- .halt_check = BRANCH_HALT,
- .clkr = {
- .enable_reg = 0x8088,
- .enable_mask = BIT(0),
- .hw.init = &(const struct clk_init_data) {
- .name = "disp_cc_mdss_dptx2_crypto_clk",
- .parent_hws = (const struct clk_hw*[]) {
- &disp_cc_mdss_dptx2_link_clk_src.clkr.hw,
- },
- .num_parents = 1,
- .flags = CLK_SET_RATE_PARENT,
- .ops = &clk_branch2_ops,
- },
- },
-};
-
-static struct clk_branch disp_cc_mdss_dptx2_link_clk = {
- .halt_reg = 0x8080,
- .halt_check = BRANCH_HALT,
- .clkr = {
- .enable_reg = 0x8080,
- .enable_mask = BIT(0),
- .hw.init = &(const struct clk_init_data) {
- .name = "disp_cc_mdss_dptx2_link_clk",
- .parent_hws = (const struct clk_hw*[]) {
- &disp_cc_mdss_dptx2_link_clk_src.clkr.hw,
- },
- .num_parents = 1,
- .flags = CLK_SET_RATE_PARENT,
- .ops = &clk_branch2_ops,
- },
- },
-};
-
-static struct clk_branch disp_cc_mdss_dptx2_link_intf_clk = {
- .halt_reg = 0x8084,
- .halt_check = BRANCH_HALT,
- .clkr = {
- .enable_reg = 0x8084,
- .enable_mask = BIT(0),
- .hw.init = &(const struct clk_init_data) {
- .name = "disp_cc_mdss_dptx2_link_intf_clk",
- .parent_hws = (const struct clk_hw*[]) {
- &disp_cc_mdss_dptx2_link_div_clk_src.clkr.hw,
- },
- .num_parents = 1,
- .flags = CLK_SET_RATE_PARENT,
- .ops = &clk_branch2_ops,
- },
- },
-};
-
-static struct clk_branch disp_cc_mdss_dptx2_pixel0_clk = {
- .halt_reg = 0x8078,
- .halt_check = BRANCH_HALT,
- .clkr = {
- .enable_reg = 0x8078,
- .enable_mask = BIT(0),
- .hw.init = &(const struct clk_init_data) {
- .name = "disp_cc_mdss_dptx2_pixel0_clk",
- .parent_hws = (const struct clk_hw*[]) {
- &disp_cc_mdss_dptx2_pixel0_clk_src.clkr.hw,
- },
- .num_parents = 1,
- .flags = CLK_SET_RATE_PARENT,
- .ops = &clk_branch2_ops,
- },
- },
-};
-
-static struct clk_branch disp_cc_mdss_dptx2_pixel1_clk = {
- .halt_reg = 0x807c,
- .halt_check = BRANCH_HALT,
- .clkr = {
- .enable_reg = 0x807c,
- .enable_mask = BIT(0),
- .hw.init = &(const struct clk_init_data) {
- .name = "disp_cc_mdss_dptx2_pixel1_clk",
- .parent_hws = (const struct clk_hw*[]) {
- &disp_cc_mdss_dptx2_pixel1_clk_src.clkr.hw,
- },
- .num_parents = 1,
- .flags = CLK_SET_RATE_PARENT,
- .ops = &clk_branch2_ops,
- },
- },
-};
-
-static struct clk_branch disp_cc_mdss_dptx3_aux_clk = {
- .halt_reg = 0x809c,
- .halt_check = BRANCH_HALT,
- .clkr = {
- .enable_reg = 0x809c,
- .enable_mask = BIT(0),
- .hw.init = &(const struct clk_init_data) {
- .name = "disp_cc_mdss_dptx3_aux_clk",
- .parent_hws = (const struct clk_hw*[]) {
- &disp_cc_mdss_dptx3_aux_clk_src.clkr.hw,
- },
- .num_parents = 1,
- .flags = CLK_SET_RATE_PARENT,
- .ops = &clk_branch2_ops,
- },
- },
-};
-
-static struct clk_branch disp_cc_mdss_dptx3_crypto_clk = {
- .halt_reg = 0x80a0,
- .halt_check = BRANCH_HALT,
- .clkr = {
- .enable_reg = 0x80a0,
- .enable_mask = BIT(0),
- .hw.init = &(const struct clk_init_data) {
- .name = "disp_cc_mdss_dptx3_crypto_clk",
- .parent_hws = (const struct clk_hw*[]) {
- &disp_cc_mdss_dptx3_link_clk_src.clkr.hw,
- },
- .num_parents = 1,
- .flags = CLK_SET_RATE_PARENT,
- .ops = &clk_branch2_ops,
- },
- },
-};
-
-static struct clk_branch disp_cc_mdss_dptx3_link_clk = {
- .halt_reg = 0x8094,
- .halt_check = BRANCH_HALT,
- .clkr = {
- .enable_reg = 0x8094,
- .enable_mask = BIT(0),
- .hw.init = &(const struct clk_init_data) {
- .name = "disp_cc_mdss_dptx3_link_clk",
- .parent_hws = (const struct clk_hw*[]) {
- &disp_cc_mdss_dptx3_link_clk_src.clkr.hw,
- },
- .num_parents = 1,
- .flags = CLK_SET_RATE_PARENT,
- .ops = &clk_branch2_ops,
- },
- },
-};
-
-static struct clk_branch disp_cc_mdss_dptx3_link_intf_clk = {
- .halt_reg = 0x8098,
- .halt_check = BRANCH_HALT,
- .clkr = {
- .enable_reg = 0x8098,
- .enable_mask = BIT(0),
- .hw.init = &(const struct clk_init_data) {
- .name = "disp_cc_mdss_dptx3_link_intf_clk",
- .parent_hws = (const struct clk_hw*[]) {
- &disp_cc_mdss_dptx3_link_div_clk_src.clkr.hw,
- },
- .num_parents = 1,
- .flags = CLK_SET_RATE_PARENT,
- .ops = &clk_branch2_ops,
- },
- },
-};
-
-static struct clk_branch disp_cc_mdss_dptx3_pixel0_clk = {
- .halt_reg = 0x8090,
- .halt_check = BRANCH_HALT,
- .clkr = {
- .enable_reg = 0x8090,
- .enable_mask = BIT(0),
- .hw.init = &(const struct clk_init_data) {
- .name = "disp_cc_mdss_dptx3_pixel0_clk",
- .parent_hws = (const struct clk_hw*[]) {
- &disp_cc_mdss_dptx3_pixel0_clk_src.clkr.hw,
- },
- .num_parents = 1,
- .flags = CLK_SET_RATE_PARENT,
- .ops = &clk_branch2_ops,
- },
- },
-};
-
-static struct clk_branch disp_cc_mdss_esc0_clk = {
- .halt_reg = 0x8038,
- .halt_check = BRANCH_HALT,
- .clkr = {
- .enable_reg = 0x8038,
- .enable_mask = BIT(0),
- .hw.init = &(const struct clk_init_data) {
- .name = "disp_cc_mdss_esc0_clk",
- .parent_hws = (const struct clk_hw*[]) {
- &disp_cc_mdss_esc0_clk_src.clkr.hw,
- },
- .num_parents = 1,
- .flags = CLK_SET_RATE_PARENT,
- .ops = &clk_branch2_ops,
- },
- },
-};
-
-static struct clk_branch disp_cc_mdss_esc1_clk = {
- .halt_reg = 0x803c,
- .halt_check = BRANCH_HALT,
- .clkr = {
- .enable_reg = 0x803c,
- .enable_mask = BIT(0),
- .hw.init = &(const struct clk_init_data) {
- .name = "disp_cc_mdss_esc1_clk",
- .parent_hws = (const struct clk_hw*[]) {
- &disp_cc_mdss_esc1_clk_src.clkr.hw,
- },
- .num_parents = 1,
- .flags = CLK_SET_RATE_PARENT,
- .ops = &clk_branch2_ops,
- },
- },
-};
-
-static struct clk_branch disp_cc_mdss_mdp1_clk = {
- .halt_reg = 0xa004,
- .halt_check = BRANCH_HALT,
- .clkr = {
- .enable_reg = 0xa004,
- .enable_mask = BIT(0),
- .hw.init = &(const struct clk_init_data) {
- .name = "disp_cc_mdss_mdp1_clk",
- .parent_hws = (const struct clk_hw*[]) {
- &disp_cc_mdss_mdp_clk_src.clkr.hw,
- },
- .num_parents = 1,
- .flags = CLK_SET_RATE_PARENT,
- .ops = &clk_branch2_ops,
- },
- },
-};
-
-static struct clk_branch disp_cc_mdss_mdp_clk = {
- .halt_reg = 0x800c,
- .halt_check = BRANCH_HALT,
- .clkr = {
- .enable_reg = 0x800c,
- .enable_mask = BIT(0),
- .hw.init = &(const struct clk_init_data) {
- .name = "disp_cc_mdss_mdp_clk",
- .parent_hws = (const struct clk_hw*[]) {
- &disp_cc_mdss_mdp_clk_src.clkr.hw,
- },
- .num_parents = 1,
- .flags = CLK_SET_RATE_PARENT,
- .ops = &clk_branch2_ops,
- },
- },
-};
-
-static struct clk_branch disp_cc_mdss_mdp_lut1_clk = {
- .halt_reg = 0xa010,
- .halt_check = BRANCH_HALT,
- .clkr = {
- .enable_reg = 0xa010,
- .enable_mask = BIT(0),
- .hw.init = &(const struct clk_init_data) {
- .name = "disp_cc_mdss_mdp_lut1_clk",
- .parent_hws = (const struct clk_hw*[]) {
- &disp_cc_mdss_mdp_clk_src.clkr.hw,
- },
- .num_parents = 1,
- .flags = CLK_SET_RATE_PARENT,
- .ops = &clk_branch2_ops,
- },
- },
-};
-
-static struct clk_branch disp_cc_mdss_mdp_lut_clk = {
- .halt_reg = 0x8018,
- .halt_check = BRANCH_HALT_VOTED,
- .clkr = {
- .enable_reg = 0x8018,
- .enable_mask = BIT(0),
- .hw.init = &(const struct clk_init_data) {
- .name = "disp_cc_mdss_mdp_lut_clk",
- .parent_hws = (const struct clk_hw*[]) {
- &disp_cc_mdss_mdp_clk_src.clkr.hw,
- },
- .num_parents = 1,
- .flags = CLK_SET_RATE_PARENT,
- .ops = &clk_branch2_ops,
- },
- },
-};
-
-static struct clk_branch disp_cc_mdss_non_gdsc_ahb_clk = {
- .halt_reg = 0xc004,
- .halt_check = BRANCH_HALT_VOTED,
- .clkr = {
- .enable_reg = 0xc004,
- .enable_mask = BIT(0),
- .hw.init = &(const struct clk_init_data) {
- .name = "disp_cc_mdss_non_gdsc_ahb_clk",
- .parent_hws = (const struct clk_hw*[]) {
- &disp_cc_mdss_ahb_clk_src.clkr.hw,
- },
- .num_parents = 1,
- .flags = CLK_SET_RATE_PARENT,
- .ops = &clk_branch2_ops,
- },
- },
-};
-
-static struct clk_branch disp_cc_mdss_pclk0_clk = {
- .halt_reg = 0x8004,
- .halt_check = BRANCH_HALT,
- .clkr = {
- .enable_reg = 0x8004,
- .enable_mask = BIT(0),
- .hw.init = &(const struct clk_init_data) {
- .name = "disp_cc_mdss_pclk0_clk",
- .parent_hws = (const struct clk_hw*[]) {
- &disp_cc_mdss_pclk0_clk_src.clkr.hw,
- },
- .num_parents = 1,
- .flags = CLK_SET_RATE_PARENT,
- .ops = &clk_branch2_ops,
- },
- },
-};
-
-static struct clk_branch disp_cc_mdss_pclk1_clk = {
- .halt_reg = 0x8008,
- .halt_check = BRANCH_HALT,
- .clkr = {
- .enable_reg = 0x8008,
- .enable_mask = BIT(0),
- .hw.init = &(const struct clk_init_data) {
- .name = "disp_cc_mdss_pclk1_clk",
- .parent_hws = (const struct clk_hw*[]) {
- &disp_cc_mdss_pclk1_clk_src.clkr.hw,
- },
- .num_parents = 1,
- .flags = CLK_SET_RATE_PARENT,
- .ops = &clk_branch2_ops,
- },
- },
-};
-
-static struct clk_branch disp_cc_mdss_rscc_ahb_clk = {
- .halt_reg = 0xc00c,
- .halt_check = BRANCH_HALT,
- .clkr = {
- .enable_reg = 0xc00c,
- .enable_mask = BIT(0),
- .hw.init = &(const struct clk_init_data) {
- .name = "disp_cc_mdss_rscc_ahb_clk",
- .parent_hws = (const struct clk_hw*[]) {
- &disp_cc_mdss_ahb_clk_src.clkr.hw,
- },
- .num_parents = 1,
- .flags = CLK_SET_RATE_PARENT,
- .ops = &clk_branch2_ops,
- },
- },
-};
-
-static struct clk_branch disp_cc_mdss_rscc_vsync_clk = {
- .halt_reg = 0xc008,
- .halt_check = BRANCH_HALT,
- .clkr = {
- .enable_reg = 0xc008,
- .enable_mask = BIT(0),
- .hw.init = &(const struct clk_init_data) {
- .name = "disp_cc_mdss_rscc_vsync_clk",
- .parent_hws = (const struct clk_hw*[]) {
- &disp_cc_mdss_vsync_clk_src.clkr.hw,
- },
- .num_parents = 1,
- .flags = CLK_SET_RATE_PARENT,
- .ops = &clk_branch2_ops,
- },
- },
-};
-
-static struct clk_branch disp_cc_mdss_vsync1_clk = {
- .halt_reg = 0xa01c,
- .halt_check = BRANCH_HALT,
- .clkr = {
- .enable_reg = 0xa01c,
- .enable_mask = BIT(0),
- .hw.init = &(const struct clk_init_data) {
- .name = "disp_cc_mdss_vsync1_clk",
- .parent_hws = (const struct clk_hw*[]) {
- &disp_cc_mdss_vsync_clk_src.clkr.hw,
- },
- .num_parents = 1,
- .flags = CLK_SET_RATE_PARENT,
- .ops = &clk_branch2_ops,
- },
- },
-};
-
-static struct clk_branch disp_cc_mdss_vsync_clk = {
- .halt_reg = 0x8024,
- .halt_check = BRANCH_HALT,
- .clkr = {
- .enable_reg = 0x8024,
- .enable_mask = BIT(0),
- .hw.init = &(const struct clk_init_data) {
- .name = "disp_cc_mdss_vsync_clk",
- .parent_hws = (const struct clk_hw*[]) {
- &disp_cc_mdss_vsync_clk_src.clkr.hw,
- },
- .num_parents = 1,
- .flags = CLK_SET_RATE_PARENT,
- .ops = &clk_branch2_ops,
- },
- },
-};
-
-static struct clk_branch disp_cc_sleep_clk = {
- .halt_reg = 0xe074,
- .halt_check = BRANCH_HALT,
- .clkr = {
- .enable_reg = 0xe074,
- .enable_mask = BIT(0),
- .hw.init = &(const struct clk_init_data) {
- .name = "disp_cc_sleep_clk",
- .parent_hws = (const struct clk_hw*[]) {
- &disp_cc_sleep_clk_src.clkr.hw,
- },
- .num_parents = 1,
- .flags = CLK_SET_RATE_PARENT,
- .ops = &clk_branch2_ops,
- },
- },
-};
-
-static struct gdsc mdss_gdsc = {
- .gdscr = 0x9000,
- .pd = {
- .name = "mdss_gdsc",
- },
- .pwrsts = PWRSTS_OFF_ON,
- .flags = HW_CTRL | RETAIN_FF_ENABLE,
-};
-
-static struct gdsc mdss_int2_gdsc = {
- .gdscr = 0xb000,
- .pd = {
- .name = "mdss_int2_gdsc",
- },
- .pwrsts = PWRSTS_OFF_ON,
- .flags = HW_CTRL | RETAIN_FF_ENABLE,
-};
-
-static struct clk_regmap *disp_cc_sm8650_clocks[] = {
- [DISP_CC_MDSS_ACCU_CLK] = &disp_cc_mdss_accu_clk.clkr,
- [DISP_CC_MDSS_AHB1_CLK] = &disp_cc_mdss_ahb1_clk.clkr,
- [DISP_CC_MDSS_AHB_CLK] = &disp_cc_mdss_ahb_clk.clkr,
- [DISP_CC_MDSS_AHB_CLK_SRC] = &disp_cc_mdss_ahb_clk_src.clkr,
- [DISP_CC_MDSS_BYTE0_CLK] = &disp_cc_mdss_byte0_clk.clkr,
- [DISP_CC_MDSS_BYTE0_CLK_SRC] = &disp_cc_mdss_byte0_clk_src.clkr,
- [DISP_CC_MDSS_BYTE0_DIV_CLK_SRC] = &disp_cc_mdss_byte0_div_clk_src.clkr,
- [DISP_CC_MDSS_BYTE0_INTF_CLK] = &disp_cc_mdss_byte0_intf_clk.clkr,
- [DISP_CC_MDSS_BYTE1_CLK] = &disp_cc_mdss_byte1_clk.clkr,
- [DISP_CC_MDSS_BYTE1_CLK_SRC] = &disp_cc_mdss_byte1_clk_src.clkr,
- [DISP_CC_MDSS_BYTE1_DIV_CLK_SRC] = &disp_cc_mdss_byte1_div_clk_src.clkr,
- [DISP_CC_MDSS_BYTE1_INTF_CLK] = &disp_cc_mdss_byte1_intf_clk.clkr,
- [DISP_CC_MDSS_DPTX0_AUX_CLK] = &disp_cc_mdss_dptx0_aux_clk.clkr,
- [DISP_CC_MDSS_DPTX0_AUX_CLK_SRC] = &disp_cc_mdss_dptx0_aux_clk_src.clkr,
- [DISP_CC_MDSS_DPTX0_CRYPTO_CLK] = &disp_cc_mdss_dptx0_crypto_clk.clkr,
- [DISP_CC_MDSS_DPTX0_LINK_CLK] = &disp_cc_mdss_dptx0_link_clk.clkr,
- [DISP_CC_MDSS_DPTX0_LINK_CLK_SRC] = &disp_cc_mdss_dptx0_link_clk_src.clkr,
- [DISP_CC_MDSS_DPTX0_LINK_DIV_CLK_SRC] = &disp_cc_mdss_dptx0_link_div_clk_src.clkr,
- [DISP_CC_MDSS_DPTX0_LINK_INTF_CLK] = &disp_cc_mdss_dptx0_link_intf_clk.clkr,
- [DISP_CC_MDSS_DPTX0_PIXEL0_CLK] = &disp_cc_mdss_dptx0_pixel0_clk.clkr,
- [DISP_CC_MDSS_DPTX0_PIXEL0_CLK_SRC] = &disp_cc_mdss_dptx0_pixel0_clk_src.clkr,
- [DISP_CC_MDSS_DPTX0_PIXEL1_CLK] = &disp_cc_mdss_dptx0_pixel1_clk.clkr,
- [DISP_CC_MDSS_DPTX0_PIXEL1_CLK_SRC] = &disp_cc_mdss_dptx0_pixel1_clk_src.clkr,
- [DISP_CC_MDSS_DPTX0_USB_ROUTER_LINK_INTF_CLK] =
- &disp_cc_mdss_dptx0_usb_router_link_intf_clk.clkr,
- [DISP_CC_MDSS_DPTX1_AUX_CLK] = &disp_cc_mdss_dptx1_aux_clk.clkr,
- [DISP_CC_MDSS_DPTX1_AUX_CLK_SRC] = &disp_cc_mdss_dptx1_aux_clk_src.clkr,
- [DISP_CC_MDSS_DPTX1_CRYPTO_CLK] = &disp_cc_mdss_dptx1_crypto_clk.clkr,
- [DISP_CC_MDSS_DPTX1_LINK_CLK] = &disp_cc_mdss_dptx1_link_clk.clkr,
- [DISP_CC_MDSS_DPTX1_LINK_CLK_SRC] = &disp_cc_mdss_dptx1_link_clk_src.clkr,
- [DISP_CC_MDSS_DPTX1_LINK_DIV_CLK_SRC] = &disp_cc_mdss_dptx1_link_div_clk_src.clkr,
- [DISP_CC_MDSS_DPTX1_LINK_INTF_CLK] = &disp_cc_mdss_dptx1_link_intf_clk.clkr,
- [DISP_CC_MDSS_DPTX1_PIXEL0_CLK] = &disp_cc_mdss_dptx1_pixel0_clk.clkr,
- [DISP_CC_MDSS_DPTX1_PIXEL0_CLK_SRC] = &disp_cc_mdss_dptx1_pixel0_clk_src.clkr,
- [DISP_CC_MDSS_DPTX1_PIXEL1_CLK] = &disp_cc_mdss_dptx1_pixel1_clk.clkr,
- [DISP_CC_MDSS_DPTX1_PIXEL1_CLK_SRC] = &disp_cc_mdss_dptx1_pixel1_clk_src.clkr,
- [DISP_CC_MDSS_DPTX1_USB_ROUTER_LINK_INTF_CLK] =
- &disp_cc_mdss_dptx1_usb_router_link_intf_clk.clkr,
- [DISP_CC_MDSS_DPTX2_AUX_CLK] = &disp_cc_mdss_dptx2_aux_clk.clkr,
- [DISP_CC_MDSS_DPTX2_AUX_CLK_SRC] = &disp_cc_mdss_dptx2_aux_clk_src.clkr,
- [DISP_CC_MDSS_DPTX2_CRYPTO_CLK] = &disp_cc_mdss_dptx2_crypto_clk.clkr,
- [DISP_CC_MDSS_DPTX2_LINK_CLK] = &disp_cc_mdss_dptx2_link_clk.clkr,
- [DISP_CC_MDSS_DPTX2_LINK_CLK_SRC] = &disp_cc_mdss_dptx2_link_clk_src.clkr,
- [DISP_CC_MDSS_DPTX2_LINK_DIV_CLK_SRC] = &disp_cc_mdss_dptx2_link_div_clk_src.clkr,
- [DISP_CC_MDSS_DPTX2_LINK_INTF_CLK] = &disp_cc_mdss_dptx2_link_intf_clk.clkr,
- [DISP_CC_MDSS_DPTX2_PIXEL0_CLK] = &disp_cc_mdss_dptx2_pixel0_clk.clkr,
- [DISP_CC_MDSS_DPTX2_PIXEL0_CLK_SRC] = &disp_cc_mdss_dptx2_pixel0_clk_src.clkr,
- [DISP_CC_MDSS_DPTX2_PIXEL1_CLK] = &disp_cc_mdss_dptx2_pixel1_clk.clkr,
- [DISP_CC_MDSS_DPTX2_PIXEL1_CLK_SRC] = &disp_cc_mdss_dptx2_pixel1_clk_src.clkr,
- [DISP_CC_MDSS_DPTX3_AUX_CLK] = &disp_cc_mdss_dptx3_aux_clk.clkr,
- [DISP_CC_MDSS_DPTX3_AUX_CLK_SRC] = &disp_cc_mdss_dptx3_aux_clk_src.clkr,
- [DISP_CC_MDSS_DPTX3_CRYPTO_CLK] = &disp_cc_mdss_dptx3_crypto_clk.clkr,
- [DISP_CC_MDSS_DPTX3_LINK_CLK] = &disp_cc_mdss_dptx3_link_clk.clkr,
- [DISP_CC_MDSS_DPTX3_LINK_CLK_SRC] = &disp_cc_mdss_dptx3_link_clk_src.clkr,
- [DISP_CC_MDSS_DPTX3_LINK_DIV_CLK_SRC] = &disp_cc_mdss_dptx3_link_div_clk_src.clkr,
- [DISP_CC_MDSS_DPTX3_LINK_INTF_CLK] = &disp_cc_mdss_dptx3_link_intf_clk.clkr,
- [DISP_CC_MDSS_DPTX3_PIXEL0_CLK] = &disp_cc_mdss_dptx3_pixel0_clk.clkr,
- [DISP_CC_MDSS_DPTX3_PIXEL0_CLK_SRC] = &disp_cc_mdss_dptx3_pixel0_clk_src.clkr,
- [DISP_CC_MDSS_ESC0_CLK] = &disp_cc_mdss_esc0_clk.clkr,
- [DISP_CC_MDSS_ESC0_CLK_SRC] = &disp_cc_mdss_esc0_clk_src.clkr,
- [DISP_CC_MDSS_ESC1_CLK] = &disp_cc_mdss_esc1_clk.clkr,
- [DISP_CC_MDSS_ESC1_CLK_SRC] = &disp_cc_mdss_esc1_clk_src.clkr,
- [DISP_CC_MDSS_MDP1_CLK] = &disp_cc_mdss_mdp1_clk.clkr,
- [DISP_CC_MDSS_MDP_CLK] = &disp_cc_mdss_mdp_clk.clkr,
- [DISP_CC_MDSS_MDP_CLK_SRC] = &disp_cc_mdss_mdp_clk_src.clkr,
- [DISP_CC_MDSS_MDP_LUT1_CLK] = &disp_cc_mdss_mdp_lut1_clk.clkr,
- [DISP_CC_MDSS_MDP_LUT_CLK] = &disp_cc_mdss_mdp_lut_clk.clkr,
- [DISP_CC_MDSS_NON_GDSC_AHB_CLK] = &disp_cc_mdss_non_gdsc_ahb_clk.clkr,
- [DISP_CC_MDSS_PCLK0_CLK] = &disp_cc_mdss_pclk0_clk.clkr,
- [DISP_CC_MDSS_PCLK0_CLK_SRC] = &disp_cc_mdss_pclk0_clk_src.clkr,
- [DISP_CC_MDSS_PCLK1_CLK] = &disp_cc_mdss_pclk1_clk.clkr,
- [DISP_CC_MDSS_PCLK1_CLK_SRC] = &disp_cc_mdss_pclk1_clk_src.clkr,
- [DISP_CC_MDSS_RSCC_AHB_CLK] = &disp_cc_mdss_rscc_ahb_clk.clkr,
- [DISP_CC_MDSS_RSCC_VSYNC_CLK] = &disp_cc_mdss_rscc_vsync_clk.clkr,
- [DISP_CC_MDSS_VSYNC1_CLK] = &disp_cc_mdss_vsync1_clk.clkr,
- [DISP_CC_MDSS_VSYNC_CLK] = &disp_cc_mdss_vsync_clk.clkr,
- [DISP_CC_MDSS_VSYNC_CLK_SRC] = &disp_cc_mdss_vsync_clk_src.clkr,
- [DISP_CC_PLL0] = &disp_cc_pll0.clkr,
- [DISP_CC_PLL1] = &disp_cc_pll1.clkr,
- [DISP_CC_SLEEP_CLK] = &disp_cc_sleep_clk.clkr,
- [DISP_CC_SLEEP_CLK_SRC] = &disp_cc_sleep_clk_src.clkr,
- [DISP_CC_XO_CLK_SRC] = &disp_cc_xo_clk_src.clkr,
-};
-
-static const struct qcom_reset_map disp_cc_sm8650_resets[] = {
- [DISP_CC_MDSS_CORE_BCR] = { 0x8000 },
- [DISP_CC_MDSS_CORE_INT2_BCR] = { 0xa000 },
- [DISP_CC_MDSS_RSCC_BCR] = { 0xc000 },
-};
-
-static struct gdsc *disp_cc_sm8650_gdscs[] = {
- [MDSS_GDSC] = &mdss_gdsc,
- [MDSS_INT2_GDSC] = &mdss_int2_gdsc,
-};
-
-static const struct regmap_config disp_cc_sm8650_regmap_config = {
- .reg_bits = 32,
- .reg_stride = 4,
- .val_bits = 32,
- .max_register = 0x11008,
- .fast_io = true,
-};
-
-static struct qcom_cc_desc disp_cc_sm8650_desc = {
- .config = &disp_cc_sm8650_regmap_config,
- .clks = disp_cc_sm8650_clocks,
- .num_clks = ARRAY_SIZE(disp_cc_sm8650_clocks),
- .resets = disp_cc_sm8650_resets,
- .num_resets = ARRAY_SIZE(disp_cc_sm8650_resets),
- .gdscs = disp_cc_sm8650_gdscs,
- .num_gdscs = ARRAY_SIZE(disp_cc_sm8650_gdscs),
-};
-
-static const struct of_device_id disp_cc_sm8650_match_table[] = {
- { .compatible = "qcom,sm8650-dispcc" },
- { }
-};
-MODULE_DEVICE_TABLE(of, disp_cc_sm8650_match_table);
-
-static int disp_cc_sm8650_probe(struct platform_device *pdev)
-{
- struct regmap *regmap;
- int ret;
-
- ret = devm_pm_runtime_enable(&pdev->dev);
- if (ret)
- return ret;
-
- ret = pm_runtime_resume_and_get(&pdev->dev);
- if (ret)
- return ret;
-
- regmap = qcom_cc_map(pdev, &disp_cc_sm8650_desc);
- if (IS_ERR(regmap)) {
- ret = PTR_ERR(regmap);
- goto err_put_rpm;
- }
-
- clk_lucid_ole_pll_configure(&disp_cc_pll0, regmap, &disp_cc_pll0_config);
- clk_lucid_ole_pll_configure(&disp_cc_pll1, regmap, &disp_cc_pll1_config);
-
- /* Enable clock gating for MDP clocks */
- regmap_update_bits(regmap, DISP_CC_MISC_CMD, 0x10, 0x10);
-
- /* Keep some clocks always-on */
- qcom_branch_set_clk_en(regmap, 0xe054); /* DISP_CC_XO_CLK */
-
- ret = qcom_cc_really_probe(&pdev->dev, &disp_cc_sm8650_desc, regmap);
- if (ret)
- goto err_put_rpm;
-
- pm_runtime_put(&pdev->dev);
-
- return 0;
-
-err_put_rpm:
- pm_runtime_put_sync(&pdev->dev);
-
- return ret;
-}
-
-static struct platform_driver disp_cc_sm8650_driver = {
- .probe = disp_cc_sm8650_probe,
- .driver = {
- .name = "disp_cc-sm8650",
- .of_match_table = disp_cc_sm8650_match_table,
- },
-};
-
-module_platform_driver(disp_cc_sm8650_driver);
-
-MODULE_DESCRIPTION("QTI DISPCC SM8650 Driver");
-MODULE_LICENSE("GPL");
diff --git a/drivers/clk/qcom/gcc-ipq5332.c b/drivers/clk/qcom/gcc-ipq5332.c
index f98591148a97..9536b2b7d07c 100644
--- a/drivers/clk/qcom/gcc-ipq5332.c
+++ b/drivers/clk/qcom/gcc-ipq5332.c
@@ -4,12 +4,14 @@
*/
#include <linux/clk-provider.h>
+#include <linux/interconnect-provider.h>
#include <linux/mod_devicetable.h>
#include <linux/module.h>
#include <linux/platform_device.h>
#include <linux/regmap.h>
#include <dt-bindings/clock/qcom,ipq5332-gcc.h>
+#include <dt-bindings/interconnect/qcom,ipq5332.h>
#include "clk-alpha-pll.h"
#include "clk-branch.h"
@@ -126,17 +128,6 @@ static struct clk_alpha_pll gpll4_main = {
.parent_data = &gcc_parent_data_xo,
.num_parents = 1,
.ops = &clk_alpha_pll_stromer_ops,
- /*
- * There are no consumers for this GPLL in kernel yet,
- * (will be added soon), so the clock framework
- * disables this source. But some of the clocks
- * initialized by boot loaders uses this source. So we
- * need to keep this clock ON. Add the
- * CLK_IGNORE_UNUSED flag so the clock will not be
- * disabled. Once the consumer in kernel is added, we
- * can get rid of this flag.
- */
- .flags = CLK_IGNORE_UNUSED,
},
},
};
@@ -3388,6 +3379,7 @@ static struct clk_regmap *gcc_ipq5332_clocks[] = {
[GCC_QDSS_DAP_DIV_CLK_SRC] = &gcc_qdss_dap_div_clk_src.clkr,
[GCC_QDSS_ETR_USB_CLK] = &gcc_qdss_etr_usb_clk.clkr,
[GCC_QDSS_EUD_AT_CLK] = &gcc_qdss_eud_at_clk.clkr,
+ [GCC_QDSS_TSCTR_CLK_SRC] = &gcc_qdss_tsctr_clk_src.clkr,
[GCC_QPIC_AHB_CLK] = &gcc_qpic_ahb_clk.clkr,
[GCC_QPIC_CLK] = &gcc_qpic_clk.clkr,
[GCC_QPIC_IO_MACRO_CLK] = &gcc_qpic_io_macro_clk.clkr,
@@ -3628,6 +3620,24 @@ static const struct qcom_reset_map gcc_ipq5332_resets[] = {
[GCC_UNIPHY1_XPCS_ARES] = { 0x16060 },
};
+#define IPQ_APPS_ID 5332 /* some unique value */
+
+static struct qcom_icc_hws_data icc_ipq5332_hws[] = {
+ { MASTER_SNOC_PCIE3_1_M, SLAVE_SNOC_PCIE3_1_M, GCC_SNOC_PCIE3_1LANE_M_CLK },
+ { MASTER_ANOC_PCIE3_1_S, SLAVE_ANOC_PCIE3_1_S, GCC_SNOC_PCIE3_1LANE_S_CLK },
+ { MASTER_SNOC_PCIE3_2_M, SLAVE_SNOC_PCIE3_2_M, GCC_SNOC_PCIE3_2LANE_M_CLK },
+ { MASTER_ANOC_PCIE3_2_S, SLAVE_ANOC_PCIE3_2_S, GCC_SNOC_PCIE3_2LANE_S_CLK },
+ { MASTER_SNOC_USB, SLAVE_SNOC_USB, GCC_SNOC_USB_CLK },
+ { MASTER_NSSNOC_NSSCC, SLAVE_NSSNOC_NSSCC, GCC_NSSNOC_NSSCC_CLK },
+ { MASTER_NSSNOC_SNOC_0, SLAVE_NSSNOC_SNOC_0, GCC_NSSNOC_SNOC_CLK },
+ { MASTER_NSSNOC_SNOC_1, SLAVE_NSSNOC_SNOC_1, GCC_NSSNOC_SNOC_1_CLK },
+ { MASTER_NSSNOC_ATB, SLAVE_NSSNOC_ATB, GCC_NSSNOC_ATB_CLK },
+ { MASTER_NSSNOC_PCNOC_1, SLAVE_NSSNOC_PCNOC_1, GCC_NSSNOC_PCNOC_1_CLK },
+ { MASTER_NSSNOC_QOSGEN_REF, SLAVE_NSSNOC_QOSGEN_REF, GCC_NSSNOC_QOSGEN_REF_CLK },
+ { MASTER_NSSNOC_TIMEOUT_REF, SLAVE_NSSNOC_TIMEOUT_REF, GCC_NSSNOC_TIMEOUT_REF_CLK },
+ { MASTER_NSSNOC_XO_DCD, SLAVE_NSSNOC_XO_DCD, GCC_NSSNOC_XO_DCD_CLK },
+};
+
static const struct regmap_config gcc_ipq5332_regmap_config = {
.reg_bits = 32,
.reg_stride = 4,
@@ -3656,6 +3666,9 @@ static const struct qcom_cc_desc gcc_ipq5332_desc = {
.num_resets = ARRAY_SIZE(gcc_ipq5332_resets),
.clk_hws = gcc_ipq5332_hws,
.num_clk_hws = ARRAY_SIZE(gcc_ipq5332_hws),
+ .icc_hws = icc_ipq5332_hws,
+ .num_icc_hws = ARRAY_SIZE(icc_ipq5332_hws),
+ .icc_first_node_id = IPQ_APPS_ID,
};
static int gcc_ipq5332_probe(struct platform_device *pdev)
@@ -3674,6 +3687,7 @@ static struct platform_driver gcc_ipq5332_driver = {
.driver = {
.name = "gcc-ipq5332",
.of_match_table = gcc_ipq5332_match_table,
+ .sync_state = icc_sync_state,
},
};
diff --git a/drivers/clk/qcom/gcc-ipq6018.c b/drivers/clk/qcom/gcc-ipq6018.c
index 2e411d874662..ab0f7fc665a9 100644
--- a/drivers/clk/qcom/gcc-ipq6018.c
+++ b/drivers/clk/qcom/gcc-ipq6018.c
@@ -2684,7 +2684,7 @@ static struct clk_rcg2 lpass_q6_axim_clk_src = {
},
};
-static struct freq_tbl ftbl_rbcpr_wcss_clk_src[] = {
+static const struct freq_tbl ftbl_rbcpr_wcss_clk_src[] = {
F(24000000, P_XO, 1, 0, 0),
F(50000000, P_GPLL0, 16, 0, 0),
{ }
diff --git a/drivers/clk/qcom/gcc-ipq806x.c b/drivers/clk/qcom/gcc-ipq806x.c
index 974d01fd4381..9260e2fdb839 100644
--- a/drivers/clk/qcom/gcc-ipq806x.c
+++ b/drivers/clk/qcom/gcc-ipq806x.c
@@ -390,7 +390,7 @@ static const struct clk_parent_data gcc_pxo_pll3_pll0_pll14_pll18_pll11[] = {
};
-static struct freq_tbl clk_tbl_gsbi_uart[] = {
+static const struct freq_tbl clk_tbl_gsbi_uart[] = {
{ 1843200, P_PLL8, 2, 6, 625 },
{ 3686400, P_PLL8, 2, 12, 625 },
{ 7372800, P_PLL8, 2, 24, 625 },
@@ -714,7 +714,7 @@ static struct clk_branch gsbi7_uart_clk = {
},
};
-static struct freq_tbl clk_tbl_gsbi_qup[] = {
+static const struct freq_tbl clk_tbl_gsbi_qup[] = {
{ 1100000, P_PXO, 1, 2, 49 },
{ 5400000, P_PXO, 1, 1, 5 },
{ 10800000, P_PXO, 1, 2, 5 },
diff --git a/drivers/clk/qcom/gcc-ipq8074.c b/drivers/clk/qcom/gcc-ipq8074.c
index 32fd01ef469a..7258ba5c0900 100644
--- a/drivers/clk/qcom/gcc-ipq8074.c
+++ b/drivers/clk/qcom/gcc-ipq8074.c
@@ -1947,7 +1947,7 @@ static struct clk_regmap_div nss_port6_tx_div_clk_src = {
},
};
-static struct freq_tbl ftbl_crypto_clk_src[] = {
+static const struct freq_tbl ftbl_crypto_clk_src[] = {
F(40000000, P_GPLL0_DIV2, 10, 0, 0),
F(80000000, P_GPLL0, 10, 0, 0),
F(100000000, P_GPLL0, 8, 0, 0),
@@ -1968,7 +1968,7 @@ static struct clk_rcg2 crypto_clk_src = {
},
};
-static struct freq_tbl ftbl_gp_clk_src[] = {
+static const struct freq_tbl ftbl_gp_clk_src[] = {
F(19200000, P_XO, 1, 0, 0),
{ }
};
diff --git a/drivers/clk/qcom/gcc-mdm9615.c b/drivers/clk/qcom/gcc-mdm9615.c
index 33987b957737..37fc5607b2d3 100644
--- a/drivers/clk/qcom/gcc-mdm9615.c
+++ b/drivers/clk/qcom/gcc-mdm9615.c
@@ -164,7 +164,7 @@ static const struct clk_parent_data gcc_cxo_pll14[] = {
{ .hw = &pll14_vote.hw },
};
-static struct freq_tbl clk_tbl_gsbi_uart[] = {
+static const struct freq_tbl clk_tbl_gsbi_uart[] = {
{ 1843200, P_PLL8, 2, 6, 625 },
{ 3686400, P_PLL8, 2, 12, 625 },
{ 7372800, P_PLL8, 2, 24, 625 },
@@ -437,7 +437,7 @@ static struct clk_branch gsbi5_uart_clk = {
},
};
-static struct freq_tbl clk_tbl_gsbi_qup[] = {
+static const struct freq_tbl clk_tbl_gsbi_qup[] = {
{ 960000, P_CXO, 4, 1, 5 },
{ 4800000, P_CXO, 4, 0, 1 },
{ 9600000, P_CXO, 2, 0, 1 },
diff --git a/drivers/clk/qcom/gcc-msm8660.c b/drivers/clk/qcom/gcc-msm8660.c
index 67870c899ab9..a6a4477ccdef 100644
--- a/drivers/clk/qcom/gcc-msm8660.c
+++ b/drivers/clk/qcom/gcc-msm8660.c
@@ -82,7 +82,7 @@ static const struct clk_parent_data gcc_pxo_pll8_cxo[] = {
{ .fw_name = "cxo", .name = "cxo_board" },
};
-static struct freq_tbl clk_tbl_gsbi_uart[] = {
+static const struct freq_tbl clk_tbl_gsbi_uart[] = {
{ 1843200, P_PLL8, 2, 6, 625 },
{ 3686400, P_PLL8, 2, 12, 625 },
{ 7372800, P_PLL8, 2, 24, 625 },
@@ -712,7 +712,7 @@ static struct clk_branch gsbi12_uart_clk = {
},
};
-static struct freq_tbl clk_tbl_gsbi_qup[] = {
+static const struct freq_tbl clk_tbl_gsbi_qup[] = {
{ 1100000, P_PXO, 1, 2, 49 },
{ 5400000, P_PXO, 1, 1, 5 },
{ 10800000, P_PXO, 1, 2, 5 },
diff --git a/drivers/clk/qcom/gcc-msm8960.c b/drivers/clk/qcom/gcc-msm8960.c
index 6236a458e4eb..9ddce11db6df 100644
--- a/drivers/clk/qcom/gcc-msm8960.c
+++ b/drivers/clk/qcom/gcc-msm8960.c
@@ -328,7 +328,7 @@ static const struct clk_parent_data gcc_pxo_pll8_pll3[] = {
{ .hw = &pll3.clkr.hw },
};
-static struct freq_tbl clk_tbl_gsbi_uart[] = {
+static const struct freq_tbl clk_tbl_gsbi_uart[] = {
{ 1843200, P_PLL8, 2, 6, 625 },
{ 3686400, P_PLL8, 2, 12, 625 },
{ 7372800, P_PLL8, 2, 24, 625 },
@@ -958,7 +958,7 @@ static struct clk_branch gsbi12_uart_clk = {
},
};
-static struct freq_tbl clk_tbl_gsbi_qup[] = {
+static const struct freq_tbl clk_tbl_gsbi_qup[] = {
{ 1100000, P_PXO, 1, 2, 49 },
{ 5400000, P_PXO, 1, 1, 5 },
{ 10800000, P_PXO, 1, 2, 5 },
@@ -2940,7 +2940,7 @@ static struct clk_branch adm0_pbus_clk = {
},
};
-static struct freq_tbl clk_tbl_ce3[] = {
+static const struct freq_tbl clk_tbl_ce3[] = {
{ 48000000, P_PLL8, 8 },
{ 100000000, P_PLL3, 12 },
{ 120000000, P_PLL3, 10 },
@@ -3761,7 +3761,7 @@ static void gcc_msm8960_remove(struct platform_device *pdev)
static struct platform_driver gcc_msm8960_driver = {
.probe = gcc_msm8960_probe,
- .remove_new = gcc_msm8960_remove,
+ .remove = gcc_msm8960_remove,
.driver = {
.name = "gcc-msm8960",
.of_match_table = gcc_msm8960_match_table,
diff --git a/drivers/clk/qcom/gcc-msm8994.c b/drivers/clk/qcom/gcc-msm8994.c
index 80170a805c3b..6a6b7da2b151 100644
--- a/drivers/clk/qcom/gcc-msm8994.c
+++ b/drivers/clk/qcom/gcc-msm8994.c
@@ -112,7 +112,7 @@ static const struct clk_parent_data gcc_xo_gpll0_gpll4[] = {
{ .hw = &gpll4.clkr.hw },
};
-static struct freq_tbl ftbl_ufs_axi_clk_src[] = {
+static const struct freq_tbl ftbl_ufs_axi_clk_src[] = {
F(50000000, P_GPLL0, 12, 0, 0),
F(100000000, P_GPLL0, 6, 0, 0),
F(150000000, P_GPLL0, 4, 0, 0),
@@ -136,7 +136,7 @@ static struct clk_rcg2 ufs_axi_clk_src = {
},
};
-static struct freq_tbl ftbl_usb30_master_clk_src[] = {
+static const struct freq_tbl ftbl_usb30_master_clk_src[] = {
F(19200000, P_XO, 1, 0, 0),
F(125000000, P_GPLL0, 1, 5, 24),
{ }
@@ -156,7 +156,7 @@ static struct clk_rcg2 usb30_master_clk_src = {
},
};
-static struct freq_tbl ftbl_blsp_i2c_apps_clk_src[] = {
+static const struct freq_tbl ftbl_blsp_i2c_apps_clk_src[] = {
F(19200000, P_XO, 1, 0, 0),
F(50000000, P_GPLL0, 12, 0, 0),
{ }
@@ -175,7 +175,7 @@ static struct clk_rcg2 blsp1_qup1_i2c_apps_clk_src = {
},
};
-static struct freq_tbl ftbl_blsp1_qup1_spi_apps_clk_src[] = {
+static const struct freq_tbl ftbl_blsp1_qup1_spi_apps_clk_src[] = {
F(960000, P_XO, 10, 1, 2),
F(4800000, P_XO, 4, 0, 0),
F(9600000, P_XO, 2, 0, 0),
@@ -188,7 +188,7 @@ static struct freq_tbl ftbl_blsp1_qup1_spi_apps_clk_src[] = {
{ }
};
-static struct freq_tbl ftbl_blsp1_qup_spi_apps_clk_src_8992[] = {
+static const struct freq_tbl ftbl_blsp1_qup_spi_apps_clk_src_8992[] = {
F(960000, P_XO, 10, 1, 2),
F(4800000, P_XO, 4, 0, 0),
F(9600000, P_XO, 2, 0, 0),
@@ -226,7 +226,7 @@ static struct clk_rcg2 blsp1_qup2_i2c_apps_clk_src = {
},
};
-static struct freq_tbl ftbl_blsp1_qup2_spi_apps_clk_src[] = {
+static const struct freq_tbl ftbl_blsp1_qup2_spi_apps_clk_src[] = {
F(960000, P_XO, 10, 1, 2),
F(4800000, P_XO, 4, 0, 0),
F(9600000, P_XO, 2, 0, 0),
@@ -266,7 +266,7 @@ static struct clk_rcg2 blsp1_qup3_i2c_apps_clk_src = {
},
};
-static struct freq_tbl ftbl_blsp1_qup3_4_spi_apps_clk_src[] = {
+static const struct freq_tbl ftbl_blsp1_qup3_4_spi_apps_clk_src[] = {
F(960000, P_XO, 10, 1, 2),
F(4800000, P_XO, 4, 0, 0),
F(9600000, P_XO, 2, 0, 0),
@@ -333,7 +333,7 @@ static struct clk_rcg2 blsp1_qup5_i2c_apps_clk_src = {
},
};
-static struct freq_tbl ftbl_blsp1_qup5_spi_apps_clk_src[] = {
+static const struct freq_tbl ftbl_blsp1_qup5_spi_apps_clk_src[] = {
F(960000, P_XO, 10, 1, 2),
F(4800000, P_XO, 4, 0, 0),
F(9600000, P_XO, 2, 0, 0),
@@ -373,7 +373,7 @@ static struct clk_rcg2 blsp1_qup6_i2c_apps_clk_src = {
},
};
-static struct freq_tbl ftbl_blsp1_qup6_spi_apps_clk_src[] = {
+static const struct freq_tbl ftbl_blsp1_qup6_spi_apps_clk_src[] = {
F(960000, P_XO, 10, 1, 2),
F(4800000, P_XO, 4, 0, 0),
F(9600000, P_XO, 2, 0, 0),
@@ -400,7 +400,7 @@ static struct clk_rcg2 blsp1_qup6_spi_apps_clk_src = {
},
};
-static struct freq_tbl ftbl_blsp_uart_apps_clk_src[] = {
+static const struct freq_tbl ftbl_blsp_uart_apps_clk_src[] = {
F(3686400, P_GPLL0, 1, 96, 15625),
F(7372800, P_GPLL0, 1, 192, 15625),
F(14745600, P_GPLL0, 1, 384, 15625),
@@ -516,7 +516,7 @@ static struct clk_rcg2 blsp2_qup1_i2c_apps_clk_src = {
},
};
-static struct freq_tbl ftbl_blsp2_qup1_2_spi_apps_clk_src[] = {
+static const struct freq_tbl ftbl_blsp2_qup1_2_spi_apps_clk_src[] = {
F(960000, P_XO, 10, 1, 2),
F(4800000, P_XO, 4, 0, 0),
F(9600000, P_XO, 2, 0, 0),
@@ -570,7 +570,7 @@ static struct clk_rcg2 blsp2_qup2_spi_apps_clk_src = {
},
};
-static struct freq_tbl ftbl_blsp2_qup3_4_spi_apps_clk_src[] = {
+static const struct freq_tbl ftbl_blsp2_qup3_4_spi_apps_clk_src[] = {
F(960000, P_XO, 10, 1, 2),
F(4800000, P_XO, 4, 0, 0),
F(9600000, P_XO, 2, 0, 0),
@@ -678,7 +678,7 @@ static struct clk_rcg2 blsp2_qup6_i2c_apps_clk_src = {
},
};
-static struct freq_tbl ftbl_blsp2_qup6_spi_apps_clk_src[] = {
+static const struct freq_tbl ftbl_blsp2_qup6_spi_apps_clk_src[] = {
F(960000, P_XO, 10, 1, 2),
F(4800000, P_XO, 4, 0, 0),
F(9600000, P_XO, 2, 0, 0),
@@ -789,7 +789,7 @@ static struct clk_rcg2 blsp2_uart6_apps_clk_src = {
},
};
-static struct freq_tbl ftbl_gp1_clk_src[] = {
+static const struct freq_tbl ftbl_gp1_clk_src[] = {
F(19200000, P_XO, 1, 0, 0),
F(100000000, P_GPLL0, 6, 0, 0),
F(200000000, P_GPLL0, 3, 0, 0),
@@ -810,7 +810,7 @@ static struct clk_rcg2 gp1_clk_src = {
},
};
-static struct freq_tbl ftbl_gp2_clk_src[] = {
+static const struct freq_tbl ftbl_gp2_clk_src[] = {
F(19200000, P_XO, 1, 0, 0),
F(100000000, P_GPLL0, 6, 0, 0),
F(200000000, P_GPLL0, 3, 0, 0),
@@ -831,7 +831,7 @@ static struct clk_rcg2 gp2_clk_src = {
},
};
-static struct freq_tbl ftbl_gp3_clk_src[] = {
+static const struct freq_tbl ftbl_gp3_clk_src[] = {
F(19200000, P_XO, 1, 0, 0),
F(100000000, P_GPLL0, 6, 0, 0),
F(200000000, P_GPLL0, 3, 0, 0),
@@ -852,7 +852,7 @@ static struct clk_rcg2 gp3_clk_src = {
},
};
-static struct freq_tbl ftbl_pcie_0_aux_clk_src[] = {
+static const struct freq_tbl ftbl_pcie_0_aux_clk_src[] = {
F(1011000, P_XO, 1, 1, 19),
{ }
};
@@ -872,7 +872,7 @@ static struct clk_rcg2 pcie_0_aux_clk_src = {
},
};
-static struct freq_tbl ftbl_pcie_pipe_clk_src[] = {
+static const struct freq_tbl ftbl_pcie_pipe_clk_src[] = {
F(125000000, P_XO, 1, 0, 0),
{ }
};
@@ -891,7 +891,7 @@ static struct clk_rcg2 pcie_0_pipe_clk_src = {
},
};
-static struct freq_tbl ftbl_pcie_1_aux_clk_src[] = {
+static const struct freq_tbl ftbl_pcie_1_aux_clk_src[] = {
F(1011000, P_XO, 1, 1, 19),
{ }
};
@@ -925,7 +925,7 @@ static struct clk_rcg2 pcie_1_pipe_clk_src = {
},
};
-static struct freq_tbl ftbl_pdm2_clk_src[] = {
+static const struct freq_tbl ftbl_pdm2_clk_src[] = {
F(60000000, P_GPLL0, 10, 0, 0),
{ }
};
@@ -943,7 +943,7 @@ static struct clk_rcg2 pdm2_clk_src = {
},
};
-static struct freq_tbl ftbl_sdcc1_apps_clk_src[] = {
+static const struct freq_tbl ftbl_sdcc1_apps_clk_src[] = {
F(144000, P_XO, 16, 3, 25),
F(400000, P_XO, 12, 1, 4),
F(20000000, P_GPLL0, 15, 1, 2),
@@ -955,7 +955,7 @@ static struct freq_tbl ftbl_sdcc1_apps_clk_src[] = {
{ }
};
-static struct freq_tbl ftbl_sdcc1_apps_clk_src_8992[] = {
+static const struct freq_tbl ftbl_sdcc1_apps_clk_src_8992[] = {
F(144000, P_XO, 16, 3, 25),
F(400000, P_XO, 12, 1, 4),
F(20000000, P_GPLL0, 15, 1, 2),
@@ -981,7 +981,7 @@ static struct clk_rcg2 sdcc1_apps_clk_src = {
},
};
-static struct freq_tbl ftbl_sdcc2_4_apps_clk_src[] = {
+static const struct freq_tbl ftbl_sdcc2_4_apps_clk_src[] = {
F(144000, P_XO, 16, 3, 25),
F(400000, P_XO, 12, 1, 4),
F(20000000, P_GPLL0, 15, 1, 2),
@@ -1034,7 +1034,7 @@ static struct clk_rcg2 sdcc4_apps_clk_src = {
},
};
-static struct freq_tbl ftbl_tsif_ref_clk_src[] = {
+static const struct freq_tbl ftbl_tsif_ref_clk_src[] = {
F(105500, P_XO, 1, 1, 182),
{ }
};
@@ -1054,7 +1054,7 @@ static struct clk_rcg2 tsif_ref_clk_src = {
},
};
-static struct freq_tbl ftbl_usb30_mock_utmi_clk_src[] = {
+static const struct freq_tbl ftbl_usb30_mock_utmi_clk_src[] = {
F(19200000, P_XO, 1, 0, 0),
F(60000000, P_GPLL0, 10, 0, 0),
{ }
@@ -1073,7 +1073,7 @@ static struct clk_rcg2 usb30_mock_utmi_clk_src = {
},
};
-static struct freq_tbl ftbl_usb3_phy_aux_clk_src[] = {
+static const struct freq_tbl ftbl_usb3_phy_aux_clk_src[] = {
F(1200000, P_XO, 16, 0, 0),
{ }
};
@@ -1092,7 +1092,7 @@ static struct clk_rcg2 usb3_phy_aux_clk_src = {
},
};
-static struct freq_tbl ftbl_usb_hs_system_clk_src[] = {
+static const struct freq_tbl ftbl_usb_hs_system_clk_src[] = {
F(75000000, P_GPLL0, 8, 0, 0),
{ }
};
diff --git a/drivers/clk/qcom/gcc-msm8996.c b/drivers/clk/qcom/gcc-msm8996.c
index 4fc667b94cf2..aa3bd2777868 100644
--- a/drivers/clk/qcom/gcc-msm8996.c
+++ b/drivers/clk/qcom/gcc-msm8996.c
@@ -359,7 +359,7 @@ static struct clk_rcg2 sdcc1_apps_clk_src = {
},
};
-static struct freq_tbl ftbl_sdcc1_ice_core_clk_src[] = {
+static const struct freq_tbl ftbl_sdcc1_ice_core_clk_src[] = {
F(19200000, P_XO, 1, 0, 0),
F(150000000, P_GPLL0, 4, 0, 0),
F(300000000, P_GPLL0, 2, 0, 0),
diff --git a/drivers/clk/qcom/gcc-msm8998.c b/drivers/clk/qcom/gcc-msm8998.c
index 90b66caba2cd..c9701f7f6e18 100644
--- a/drivers/clk/qcom/gcc-msm8998.c
+++ b/drivers/clk/qcom/gcc-msm8998.c
@@ -2242,7 +2242,7 @@ static struct clk_branch gcc_hmss_trig_clk = {
},
};
-static struct freq_tbl ftbl_hmss_gpll0_clk_src[] = {
+static const struct freq_tbl ftbl_hmss_gpll0_clk_src[] = {
F( 300000000, P_GPLL0_OUT_MAIN, 2, 0, 0),
F( 600000000, P_GPLL0_OUT_MAIN, 1, 0, 0),
{ }
@@ -2922,6 +2922,43 @@ static struct clk_branch ssc_cnoc_ahbs_clk = {
},
};
+static struct clk_branch hlos1_vote_lpass_core_smmu_clk = {
+ .halt_reg = 0x7D010,
+ .clkr = {
+ .enable_reg = 0x7D010,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data) {
+ .name = "hlos1_vote_lpass_core_smmu_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch hlos1_vote_lpass_adsp_smmu_clk = {
+ .halt_reg = 0x7D014,
+ .clkr = {
+ .enable_reg = 0x7D014,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data) {
+ .name = "hlos1_vote_lpass_adsp_smmu_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_mss_q6_bimc_axi_clk = {
+ .halt_reg = 0x8A040,
+ .clkr = {
+ .enable_reg = 0x8A040,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data) {
+ .name = "gcc_mss_q6_bimc_axi_clk",
+ .flags = CLK_IS_CRITICAL,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
static struct gdsc pcie_0_gdsc = {
.gdscr = 0x6b004,
.gds_hw_ctrl = 0x0,
@@ -2953,6 +2990,26 @@ static struct gdsc usb_30_gdsc = {
.flags = VOTABLE,
};
+static struct gdsc hlos1_vote_lpass_adsp = {
+ .gdscr = 0x7d034,
+ .gds_hw_ctrl = 0x0,
+ .pd = {
+ .name = "lpass_adsp_gdsc",
+ },
+ .pwrsts = PWRSTS_OFF_ON,
+ .flags = VOTABLE,
+};
+
+static struct gdsc hlos1_vote_lpass_core = {
+ .gdscr = 0x7d038,
+ .gds_hw_ctrl = 0x0,
+ .pd = {
+ .name = "lpass_core_gdsc",
+ },
+ .pwrsts = PWRSTS_OFF_ON,
+ .flags = ALWAYS_ON,
+};
+
static struct clk_regmap *gcc_msm8998_clocks[] = {
[BLSP1_QUP1_I2C_APPS_CLK_SRC] = &blsp1_qup1_i2c_apps_clk_src.clkr,
[BLSP1_QUP1_SPI_APPS_CLK_SRC] = &blsp1_qup1_spi_apps_clk_src.clkr,
@@ -3133,12 +3190,17 @@ static struct clk_regmap *gcc_msm8998_clocks[] = {
[GCC_MMSS_GPLL0_DIV_CLK] = &gcc_mmss_gpll0_div_clk.clkr,
[GCC_GPU_GPLL0_DIV_CLK] = &gcc_gpu_gpll0_div_clk.clkr,
[GCC_GPU_GPLL0_CLK] = &gcc_gpu_gpll0_clk.clkr,
+ [HLOS1_VOTE_LPASS_CORE_SMMU_CLK] = &hlos1_vote_lpass_core_smmu_clk.clkr,
+ [HLOS1_VOTE_LPASS_ADSP_SMMU_CLK] = &hlos1_vote_lpass_adsp_smmu_clk.clkr,
+ [GCC_MSS_Q6_BIMC_AXI_CLK] = &gcc_mss_q6_bimc_axi_clk.clkr,
};
static struct gdsc *gcc_msm8998_gdscs[] = {
[PCIE_0_GDSC] = &pcie_0_gdsc,
[UFS_GDSC] = &ufs_gdsc,
[USB_30_GDSC] = &usb_30_gdsc,
+ [LPASS_ADSP_GDSC] = &hlos1_vote_lpass_adsp,
+ [LPASS_CORE_GDSC] = &hlos1_vote_lpass_core,
};
static const struct qcom_reset_map gcc_msm8998_resets[] = {
diff --git a/drivers/clk/qcom/gcc-sc8180x.c b/drivers/clk/qcom/gcc-sc8180x.c
index ad135bfa4c76..31e788e22ab4 100644
--- a/drivers/clk/qcom/gcc-sc8180x.c
+++ b/drivers/clk/qcom/gcc-sc8180x.c
@@ -142,6 +142,23 @@ static struct clk_alpha_pll gpll7 = {
},
};
+static struct clk_alpha_pll gpll9 = {
+ .offset = 0x1c000,
+ .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_TRION],
+ .clkr = {
+ .enable_reg = 0x52000,
+ .enable_mask = BIT(9),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gpll9",
+ .parent_data = &(const struct clk_parent_data) {
+ .fw_name = "bi_tcxo",
+ },
+ .num_parents = 1,
+ .ops = &clk_alpha_pll_fixed_trion_ops,
+ },
+ },
+};
+
static const struct parent_map gcc_parent_map_0[] = {
{ P_BI_TCXO, 0 },
{ P_GPLL0_OUT_MAIN, 1 },
@@ -241,7 +258,7 @@ static const struct parent_map gcc_parent_map_7[] = {
static const struct clk_parent_data gcc_parents_7[] = {
{ .fw_name = "bi_tcxo", },
{ .hw = &gpll0.clkr.hw },
- { .name = "gppl9" },
+ { .hw = &gpll9.clkr.hw },
{ .hw = &gpll4.clkr.hw },
{ .hw = &gpll0_out_even.clkr.hw },
};
@@ -260,28 +277,6 @@ static const struct clk_parent_data gcc_parents_8[] = {
{ .hw = &gpll0_out_even.clkr.hw },
};
-static const struct freq_tbl ftbl_gcc_cpuss_ahb_clk_src[] = {
- F(19200000, P_BI_TCXO, 1, 0, 0),
- F(50000000, P_GPLL0_OUT_MAIN, 12, 0, 0),
- F(100000000, P_GPLL0_OUT_MAIN, 6, 0, 0),
- { }
-};
-
-static struct clk_rcg2 gcc_cpuss_ahb_clk_src = {
- .cmd_rcgr = 0x48014,
- .mnd_width = 0,
- .hid_width = 5,
- .parent_map = gcc_parent_map_0,
- .freq_tbl = ftbl_gcc_cpuss_ahb_clk_src,
- .clkr.hw.init = &(struct clk_init_data){
- .name = "gcc_cpuss_ahb_clk_src",
- .parent_data = gcc_parents_0,
- .num_parents = ARRAY_SIZE(gcc_parents_0),
- .flags = CLK_SET_RATE_PARENT,
- .ops = &clk_rcg2_ops,
- },
-};
-
static const struct freq_tbl ftbl_gcc_emac_ptp_clk_src[] = {
F(19200000, P_BI_TCXO, 1, 0, 0),
F(50000000, P_GPLL0_OUT_EVEN, 6, 0, 0),
@@ -609,19 +604,29 @@ static const struct freq_tbl ftbl_gcc_qupv3_wrap0_s0_clk_src[] = {
{ }
};
+static struct clk_init_data gcc_qupv3_wrap0_s0_clk_src_init = {
+ .name = "gcc_qupv3_wrap0_s0_clk_src",
+ .parent_data = gcc_parents_0,
+ .num_parents = ARRAY_SIZE(gcc_parents_0),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_ops,
+};
+
static struct clk_rcg2 gcc_qupv3_wrap0_s0_clk_src = {
.cmd_rcgr = 0x17148,
.mnd_width = 16,
.hid_width = 5,
.parent_map = gcc_parent_map_0,
.freq_tbl = ftbl_gcc_qupv3_wrap0_s0_clk_src,
- .clkr.hw.init = &(struct clk_init_data){
- .name = "gcc_qupv3_wrap0_s0_clk_src",
- .parent_data = gcc_parents_0,
- .num_parents = ARRAY_SIZE(gcc_parents_0),
- .flags = CLK_SET_RATE_PARENT,
- .ops = &clk_rcg2_ops,
- },
+ .clkr.hw.init = &gcc_qupv3_wrap0_s0_clk_src_init,
+};
+
+static struct clk_init_data gcc_qupv3_wrap0_s1_clk_src_init = {
+ .name = "gcc_qupv3_wrap0_s1_clk_src",
+ .parent_data = gcc_parents_0,
+ .num_parents = ARRAY_SIZE(gcc_parents_0),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_ops,
};
static struct clk_rcg2 gcc_qupv3_wrap0_s1_clk_src = {
@@ -630,13 +635,15 @@ static struct clk_rcg2 gcc_qupv3_wrap0_s1_clk_src = {
.hid_width = 5,
.parent_map = gcc_parent_map_0,
.freq_tbl = ftbl_gcc_qupv3_wrap0_s0_clk_src,
- .clkr.hw.init = &(struct clk_init_data){
- .name = "gcc_qupv3_wrap0_s1_clk_src",
- .parent_data = gcc_parents_0,
- .num_parents = ARRAY_SIZE(gcc_parents_0),
- .flags = CLK_SET_RATE_PARENT,
- .ops = &clk_rcg2_ops,
- },
+ .clkr.hw.init = &gcc_qupv3_wrap0_s1_clk_src_init,
+};
+
+static struct clk_init_data gcc_qupv3_wrap0_s2_clk_src_init = {
+ .name = "gcc_qupv3_wrap0_s2_clk_src",
+ .parent_data = gcc_parents_0,
+ .num_parents = ARRAY_SIZE(gcc_parents_0),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_ops,
};
static struct clk_rcg2 gcc_qupv3_wrap0_s2_clk_src = {
@@ -645,13 +652,15 @@ static struct clk_rcg2 gcc_qupv3_wrap0_s2_clk_src = {
.hid_width = 5,
.parent_map = gcc_parent_map_0,
.freq_tbl = ftbl_gcc_qupv3_wrap0_s0_clk_src,
- .clkr.hw.init = &(struct clk_init_data){
- .name = "gcc_qupv3_wrap0_s2_clk_src",
- .parent_data = gcc_parents_0,
- .num_parents = ARRAY_SIZE(gcc_parents_0),
- .flags = CLK_SET_RATE_PARENT,
- .ops = &clk_rcg2_ops,
- },
+ .clkr.hw.init = &gcc_qupv3_wrap0_s2_clk_src_init,
+};
+
+static struct clk_init_data gcc_qupv3_wrap0_s3_clk_src_init = {
+ .name = "gcc_qupv3_wrap0_s3_clk_src",
+ .parent_data = gcc_parents_0,
+ .num_parents = ARRAY_SIZE(gcc_parents_0),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_ops,
};
static struct clk_rcg2 gcc_qupv3_wrap0_s3_clk_src = {
@@ -660,13 +669,15 @@ static struct clk_rcg2 gcc_qupv3_wrap0_s3_clk_src = {
.hid_width = 5,
.parent_map = gcc_parent_map_0,
.freq_tbl = ftbl_gcc_qupv3_wrap0_s0_clk_src,
- .clkr.hw.init = &(struct clk_init_data){
- .name = "gcc_qupv3_wrap0_s3_clk_src",
- .parent_data = gcc_parents_0,
- .num_parents = ARRAY_SIZE(gcc_parents_0),
- .flags = CLK_SET_RATE_PARENT,
- .ops = &clk_rcg2_ops,
- },
+ .clkr.hw.init = &gcc_qupv3_wrap0_s3_clk_src_init,
+};
+
+static struct clk_init_data gcc_qupv3_wrap0_s4_clk_src_init = {
+ .name = "gcc_qupv3_wrap0_s4_clk_src",
+ .parent_data = gcc_parents_0,
+ .num_parents = ARRAY_SIZE(gcc_parents_0),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_ops,
};
static struct clk_rcg2 gcc_qupv3_wrap0_s4_clk_src = {
@@ -675,13 +686,15 @@ static struct clk_rcg2 gcc_qupv3_wrap0_s4_clk_src = {
.hid_width = 5,
.parent_map = gcc_parent_map_0,
.freq_tbl = ftbl_gcc_qupv3_wrap0_s0_clk_src,
- .clkr.hw.init = &(struct clk_init_data){
- .name = "gcc_qupv3_wrap0_s4_clk_src",
- .parent_data = gcc_parents_0,
- .num_parents = ARRAY_SIZE(gcc_parents_0),
- .flags = CLK_SET_RATE_PARENT,
- .ops = &clk_rcg2_ops,
- },
+ .clkr.hw.init = &gcc_qupv3_wrap0_s4_clk_src_init,
+};
+
+static struct clk_init_data gcc_qupv3_wrap0_s5_clk_src_init = {
+ .name = "gcc_qupv3_wrap0_s5_clk_src",
+ .parent_data = gcc_parents_0,
+ .num_parents = ARRAY_SIZE(gcc_parents_0),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_ops,
};
static struct clk_rcg2 gcc_qupv3_wrap0_s5_clk_src = {
@@ -690,13 +703,15 @@ static struct clk_rcg2 gcc_qupv3_wrap0_s5_clk_src = {
.hid_width = 5,
.parent_map = gcc_parent_map_0,
.freq_tbl = ftbl_gcc_qupv3_wrap0_s0_clk_src,
- .clkr.hw.init = &(struct clk_init_data){
- .name = "gcc_qupv3_wrap0_s5_clk_src",
- .parent_data = gcc_parents_0,
- .num_parents = ARRAY_SIZE(gcc_parents_0),
- .flags = CLK_SET_RATE_PARENT,
- .ops = &clk_rcg2_ops,
- },
+ .clkr.hw.init = &gcc_qupv3_wrap0_s5_clk_src_init,
+};
+
+static struct clk_init_data gcc_qupv3_wrap0_s6_clk_src_init = {
+ .name = "gcc_qupv3_wrap0_s6_clk_src",
+ .parent_data = gcc_parents_0,
+ .num_parents = ARRAY_SIZE(gcc_parents_0),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_ops,
};
static struct clk_rcg2 gcc_qupv3_wrap0_s6_clk_src = {
@@ -705,13 +720,15 @@ static struct clk_rcg2 gcc_qupv3_wrap0_s6_clk_src = {
.hid_width = 5,
.parent_map = gcc_parent_map_0,
.freq_tbl = ftbl_gcc_qupv3_wrap0_s0_clk_src,
- .clkr.hw.init = &(struct clk_init_data){
- .name = "gcc_qupv3_wrap0_s6_clk_src",
- .parent_data = gcc_parents_0,
- .num_parents = ARRAY_SIZE(gcc_parents_0),
- .flags = CLK_SET_RATE_PARENT,
- .ops = &clk_rcg2_ops,
- },
+ .clkr.hw.init = &gcc_qupv3_wrap0_s6_clk_src_init,
+};
+
+static struct clk_init_data gcc_qupv3_wrap0_s7_clk_src_init = {
+ .name = "gcc_qupv3_wrap0_s7_clk_src",
+ .parent_data = gcc_parents_0,
+ .num_parents = ARRAY_SIZE(gcc_parents_0),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_ops,
};
static struct clk_rcg2 gcc_qupv3_wrap0_s7_clk_src = {
@@ -720,13 +737,15 @@ static struct clk_rcg2 gcc_qupv3_wrap0_s7_clk_src = {
.hid_width = 5,
.parent_map = gcc_parent_map_0,
.freq_tbl = ftbl_gcc_qupv3_wrap0_s0_clk_src,
- .clkr.hw.init = &(struct clk_init_data){
- .name = "gcc_qupv3_wrap0_s7_clk_src",
- .parent_data = gcc_parents_0,
- .num_parents = ARRAY_SIZE(gcc_parents_0),
- .flags = CLK_SET_RATE_PARENT,
- .ops = &clk_rcg2_ops,
- },
+ .clkr.hw.init = &gcc_qupv3_wrap0_s7_clk_src_init,
+};
+
+static struct clk_init_data gcc_qupv3_wrap1_s0_clk_src_init = {
+ .name = "gcc_qupv3_wrap1_s0_clk_src",
+ .parent_data = gcc_parents_0,
+ .num_parents = ARRAY_SIZE(gcc_parents_0),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_ops,
};
static struct clk_rcg2 gcc_qupv3_wrap1_s0_clk_src = {
@@ -735,13 +754,15 @@ static struct clk_rcg2 gcc_qupv3_wrap1_s0_clk_src = {
.hid_width = 5,
.parent_map = gcc_parent_map_0,
.freq_tbl = ftbl_gcc_qupv3_wrap0_s0_clk_src,
- .clkr.hw.init = &(struct clk_init_data){
- .name = "gcc_qupv3_wrap1_s0_clk_src",
- .parent_data = gcc_parents_0,
- .num_parents = ARRAY_SIZE(gcc_parents_0),
- .flags = CLK_SET_RATE_PARENT,
- .ops = &clk_rcg2_ops,
- },
+ .clkr.hw.init = &gcc_qupv3_wrap1_s0_clk_src_init,
+};
+
+static struct clk_init_data gcc_qupv3_wrap1_s1_clk_src_init = {
+ .name = "gcc_qupv3_wrap1_s1_clk_src",
+ .parent_data = gcc_parents_0,
+ .num_parents = ARRAY_SIZE(gcc_parents_0),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_ops,
};
static struct clk_rcg2 gcc_qupv3_wrap1_s1_clk_src = {
@@ -750,13 +771,15 @@ static struct clk_rcg2 gcc_qupv3_wrap1_s1_clk_src = {
.hid_width = 5,
.parent_map = gcc_parent_map_0,
.freq_tbl = ftbl_gcc_qupv3_wrap0_s0_clk_src,
- .clkr.hw.init = &(struct clk_init_data){
- .name = "gcc_qupv3_wrap1_s1_clk_src",
- .parent_data = gcc_parents_0,
- .num_parents = ARRAY_SIZE(gcc_parents_0),
- .flags = CLK_SET_RATE_PARENT,
- .ops = &clk_rcg2_ops,
- },
+ .clkr.hw.init = &gcc_qupv3_wrap1_s1_clk_src_init,
+};
+
+static struct clk_init_data gcc_qupv3_wrap1_s2_clk_src_init = {
+ .name = "gcc_qupv3_wrap1_s2_clk_src",
+ .parent_data = gcc_parents_0,
+ .num_parents = ARRAY_SIZE(gcc_parents_0),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_ops,
};
static struct clk_rcg2 gcc_qupv3_wrap1_s2_clk_src = {
@@ -765,13 +788,15 @@ static struct clk_rcg2 gcc_qupv3_wrap1_s2_clk_src = {
.hid_width = 5,
.parent_map = gcc_parent_map_0,
.freq_tbl = ftbl_gcc_qupv3_wrap0_s0_clk_src,
- .clkr.hw.init = &(struct clk_init_data){
- .name = "gcc_qupv3_wrap1_s2_clk_src",
- .parent_data = gcc_parents_0,
- .num_parents = ARRAY_SIZE(gcc_parents_0),
- .flags = CLK_SET_RATE_PARENT,
- .ops = &clk_rcg2_ops,
- },
+ .clkr.hw.init = &gcc_qupv3_wrap1_s2_clk_src_init,
+};
+
+static struct clk_init_data gcc_qupv3_wrap1_s3_clk_src_init = {
+ .name = "gcc_qupv3_wrap1_s3_clk_src",
+ .parent_data = gcc_parents_0,
+ .num_parents = ARRAY_SIZE(gcc_parents_0),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_ops,
};
static struct clk_rcg2 gcc_qupv3_wrap1_s3_clk_src = {
@@ -780,13 +805,15 @@ static struct clk_rcg2 gcc_qupv3_wrap1_s3_clk_src = {
.hid_width = 5,
.parent_map = gcc_parent_map_0,
.freq_tbl = ftbl_gcc_qupv3_wrap0_s0_clk_src,
- .clkr.hw.init = &(struct clk_init_data){
- .name = "gcc_qupv3_wrap1_s3_clk_src",
- .parent_data = gcc_parents_0,
- .num_parents = ARRAY_SIZE(gcc_parents_0),
- .flags = CLK_SET_RATE_PARENT,
- .ops = &clk_rcg2_ops,
- },
+ .clkr.hw.init = &gcc_qupv3_wrap1_s3_clk_src_init,
+};
+
+static struct clk_init_data gcc_qupv3_wrap1_s4_clk_src_init = {
+ .name = "gcc_qupv3_wrap1_s4_clk_src",
+ .parent_data = gcc_parents_0,
+ .num_parents = ARRAY_SIZE(gcc_parents_0),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_ops,
};
static struct clk_rcg2 gcc_qupv3_wrap1_s4_clk_src = {
@@ -795,13 +822,15 @@ static struct clk_rcg2 gcc_qupv3_wrap1_s4_clk_src = {
.hid_width = 5,
.parent_map = gcc_parent_map_0,
.freq_tbl = ftbl_gcc_qupv3_wrap0_s0_clk_src,
- .clkr.hw.init = &(struct clk_init_data){
- .name = "gcc_qupv3_wrap1_s4_clk_src",
- .parent_data = gcc_parents_0,
- .num_parents = ARRAY_SIZE(gcc_parents_0),
- .flags = CLK_SET_RATE_PARENT,
- .ops = &clk_rcg2_ops,
- },
+ .clkr.hw.init = &gcc_qupv3_wrap1_s4_clk_src_init,
+};
+
+static struct clk_init_data gcc_qupv3_wrap1_s5_clk_src_init = {
+ .name = "gcc_qupv3_wrap1_s5_clk_src",
+ .parent_data = gcc_parents_0,
+ .num_parents = ARRAY_SIZE(gcc_parents_0),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_ops,
};
static struct clk_rcg2 gcc_qupv3_wrap1_s5_clk_src = {
@@ -810,13 +839,15 @@ static struct clk_rcg2 gcc_qupv3_wrap1_s5_clk_src = {
.hid_width = 5,
.parent_map = gcc_parent_map_0,
.freq_tbl = ftbl_gcc_qupv3_wrap0_s0_clk_src,
- .clkr.hw.init = &(struct clk_init_data){
- .name = "gcc_qupv3_wrap1_s5_clk_src",
- .parent_data = gcc_parents_0,
- .num_parents = ARRAY_SIZE(gcc_parents_0),
- .flags = CLK_SET_RATE_PARENT,
- .ops = &clk_rcg2_ops,
- },
+ .clkr.hw.init = &gcc_qupv3_wrap1_s5_clk_src_init,
+};
+
+static struct clk_init_data gcc_qupv3_wrap2_s0_clk_src_init = {
+ .name = "gcc_qupv3_wrap2_s0_clk_src",
+ .parent_data = gcc_parents_0,
+ .num_parents = ARRAY_SIZE(gcc_parents_0),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_ops,
};
static struct clk_rcg2 gcc_qupv3_wrap2_s0_clk_src = {
@@ -825,13 +856,15 @@ static struct clk_rcg2 gcc_qupv3_wrap2_s0_clk_src = {
.hid_width = 5,
.parent_map = gcc_parent_map_0,
.freq_tbl = ftbl_gcc_qupv3_wrap0_s0_clk_src,
- .clkr.hw.init = &(struct clk_init_data){
- .name = "gcc_qupv3_wrap2_s0_clk_src",
- .parent_data = gcc_parents_0,
- .num_parents = ARRAY_SIZE(gcc_parents_0),
- .flags = CLK_SET_RATE_PARENT,
- .ops = &clk_rcg2_ops,
- },
+ .clkr.hw.init = &gcc_qupv3_wrap2_s0_clk_src_init,
+};
+
+static struct clk_init_data gcc_qupv3_wrap2_s1_clk_src_init = {
+ .name = "gcc_qupv3_wrap2_s1_clk_src",
+ .parent_data = gcc_parents_0,
+ .num_parents = ARRAY_SIZE(gcc_parents_0),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_ops,
};
static struct clk_rcg2 gcc_qupv3_wrap2_s1_clk_src = {
@@ -840,28 +873,33 @@ static struct clk_rcg2 gcc_qupv3_wrap2_s1_clk_src = {
.hid_width = 5,
.parent_map = gcc_parent_map_0,
.freq_tbl = ftbl_gcc_qupv3_wrap0_s0_clk_src,
- .clkr.hw.init = &(struct clk_init_data){
- .name = "gcc_qupv3_wrap2_s1_clk_src",
- .parent_data = gcc_parents_0,
- .num_parents = ARRAY_SIZE(gcc_parents_0),
- .flags = CLK_SET_RATE_PARENT,
- .ops = &clk_rcg2_ops,
- },
+ .clkr.hw.init = &gcc_qupv3_wrap2_s1_clk_src_init,
};
+static struct clk_init_data gcc_qupv3_wrap2_s2_clk_src_init = {
+ .name = "gcc_qupv3_wrap2_s2_clk_src",
+ .parent_data = gcc_parents_0,
+ .num_parents = ARRAY_SIZE(gcc_parents_0),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_ops,
+};
+
+
static struct clk_rcg2 gcc_qupv3_wrap2_s2_clk_src = {
.cmd_rcgr = 0x1e3a8,
.mnd_width = 16,
.hid_width = 5,
.parent_map = gcc_parent_map_0,
.freq_tbl = ftbl_gcc_qupv3_wrap0_s0_clk_src,
- .clkr.hw.init = &(struct clk_init_data){
- .name = "gcc_qupv3_wrap2_s2_clk_src",
- .parent_data = gcc_parents_0,
- .num_parents = ARRAY_SIZE(gcc_parents_0),
- .flags = CLK_SET_RATE_PARENT,
- .ops = &clk_rcg2_ops,
- },
+ .clkr.hw.init = &gcc_qupv3_wrap2_s2_clk_src_init,
+};
+
+static struct clk_init_data gcc_qupv3_wrap2_s3_clk_src_init = {
+ .name = "gcc_qupv3_wrap2_s3_clk_src",
+ .parent_data = gcc_parents_0,
+ .num_parents = ARRAY_SIZE(gcc_parents_0),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_ops,
};
static struct clk_rcg2 gcc_qupv3_wrap2_s3_clk_src = {
@@ -870,13 +908,15 @@ static struct clk_rcg2 gcc_qupv3_wrap2_s3_clk_src = {
.hid_width = 5,
.parent_map = gcc_parent_map_0,
.freq_tbl = ftbl_gcc_qupv3_wrap0_s0_clk_src,
- .clkr.hw.init = &(struct clk_init_data){
- .name = "gcc_qupv3_wrap2_s3_clk_src",
- .parent_data = gcc_parents_0,
- .num_parents = ARRAY_SIZE(gcc_parents_0),
- .flags = CLK_SET_RATE_PARENT,
- .ops = &clk_rcg2_ops,
- },
+ .clkr.hw.init = &gcc_qupv3_wrap2_s3_clk_src_init,
+};
+
+static struct clk_init_data gcc_qupv3_wrap2_s4_clk_src_init = {
+ .name = "gcc_qupv3_wrap2_s4_clk_src",
+ .parent_data = gcc_parents_0,
+ .num_parents = ARRAY_SIZE(gcc_parents_0),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_ops,
};
static struct clk_rcg2 gcc_qupv3_wrap2_s4_clk_src = {
@@ -885,13 +925,15 @@ static struct clk_rcg2 gcc_qupv3_wrap2_s4_clk_src = {
.hid_width = 5,
.parent_map = gcc_parent_map_0,
.freq_tbl = ftbl_gcc_qupv3_wrap0_s0_clk_src,
- .clkr.hw.init = &(struct clk_init_data){
- .name = "gcc_qupv3_wrap2_s4_clk_src",
- .parent_data = gcc_parents_0,
- .num_parents = ARRAY_SIZE(gcc_parents_0),
- .flags = CLK_SET_RATE_PARENT,
- .ops = &clk_rcg2_ops,
- },
+ .clkr.hw.init = &gcc_qupv3_wrap2_s4_clk_src_init,
+};
+
+static struct clk_init_data gcc_qupv3_wrap2_s5_clk_src_init = {
+ .name = "gcc_qupv3_wrap2_s5_clk_src",
+ .parent_data = gcc_parents_0,
+ .num_parents = ARRAY_SIZE(gcc_parents_0),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_ops,
};
static struct clk_rcg2 gcc_qupv3_wrap2_s5_clk_src = {
@@ -900,13 +942,7 @@ static struct clk_rcg2 gcc_qupv3_wrap2_s5_clk_src = {
.hid_width = 5,
.parent_map = gcc_parent_map_0,
.freq_tbl = ftbl_gcc_qupv3_wrap0_s0_clk_src,
- .clkr.hw.init = &(struct clk_init_data){
- .name = "gcc_qupv3_wrap2_s5_clk_src",
- .parent_data = gcc_parents_0,
- .num_parents = ARRAY_SIZE(gcc_parents_0),
- .flags = CLK_SET_RATE_PARENT,
- .ops = &clk_rcg2_ops,
- },
+ .clkr.hw.init = &gcc_qupv3_wrap2_s5_clk_src_init,
};
static const struct freq_tbl ftbl_gcc_sdcc2_apps_clk_src[] = {
@@ -916,7 +952,7 @@ static const struct freq_tbl ftbl_gcc_sdcc2_apps_clk_src[] = {
F(25000000, P_GPLL0_OUT_MAIN, 12, 1, 2),
F(50000000, P_GPLL0_OUT_MAIN, 12, 0, 0),
F(100000000, P_GPLL0_OUT_MAIN, 6, 0, 0),
- F(200000000, P_GPLL0_OUT_MAIN, 3, 0, 0),
+ F(202000000, P_GPLL9_OUT_MAIN, 4, 0, 0),
{ }
};
@@ -939,9 +975,8 @@ static const struct freq_tbl ftbl_gcc_sdcc4_apps_clk_src[] = {
F(400000, P_BI_TCXO, 12, 1, 4),
F(9600000, P_BI_TCXO, 2, 0, 0),
F(19200000, P_BI_TCXO, 1, 0, 0),
- F(37500000, P_GPLL0_OUT_MAIN, 16, 0, 0),
F(50000000, P_GPLL0_OUT_MAIN, 12, 0, 0),
- F(75000000, P_GPLL0_OUT_MAIN, 8, 0, 0),
+ F(100000000, P_GPLL0_OUT_MAIN, 6, 0, 0),
{ }
};
@@ -1599,25 +1634,6 @@ static struct clk_branch gcc_cfg_noc_usb3_sec_axi_clk = {
},
};
-/* For CPUSS functionality the AHB clock needs to be left enabled */
-static struct clk_branch gcc_cpuss_ahb_clk = {
- .halt_reg = 0x48000,
- .halt_check = BRANCH_HALT_VOTED,
- .clkr = {
- .enable_reg = 0x52004,
- .enable_mask = BIT(21),
- .hw.init = &(struct clk_init_data){
- .name = "gcc_cpuss_ahb_clk",
- .parent_hws = (const struct clk_hw *[]){
- &gcc_cpuss_ahb_clk_src.clkr.hw
- },
- .num_parents = 1,
- .flags = CLK_IS_CRITICAL | CLK_SET_RATE_PARENT,
- .ops = &clk_branch2_ops,
- },
- },
-};
-
static struct clk_branch gcc_cpuss_rbcpr_clk = {
.halt_reg = 0x48008,
.halt_check = BRANCH_HALT,
@@ -3150,25 +3166,6 @@ static struct clk_branch gcc_sdcc4_apps_clk = {
},
};
-/* For CPUSS functionality the SYS NOC clock needs to be left enabled */
-static struct clk_branch gcc_sys_noc_cpuss_ahb_clk = {
- .halt_reg = 0x4819c,
- .halt_check = BRANCH_HALT_VOTED,
- .clkr = {
- .enable_reg = 0x52004,
- .enable_mask = BIT(0),
- .hw.init = &(struct clk_init_data){
- .name = "gcc_sys_noc_cpuss_ahb_clk",
- .parent_hws = (const struct clk_hw *[]){
- &gcc_cpuss_ahb_clk_src.clkr.hw
- },
- .num_parents = 1,
- .flags = CLK_IS_CRITICAL | CLK_SET_RATE_PARENT,
- .ops = &clk_branch2_ops,
- },
- },
-};
-
static struct clk_branch gcc_tsif_ahb_clk = {
.halt_reg = 0x36004,
.halt_check = BRANCH_HALT,
@@ -4284,8 +4281,6 @@ static struct clk_regmap *gcc_sc8180x_clocks[] = {
[GCC_CFG_NOC_USB3_MP_AXI_CLK] = &gcc_cfg_noc_usb3_mp_axi_clk.clkr,
[GCC_CFG_NOC_USB3_PRIM_AXI_CLK] = &gcc_cfg_noc_usb3_prim_axi_clk.clkr,
[GCC_CFG_NOC_USB3_SEC_AXI_CLK] = &gcc_cfg_noc_usb3_sec_axi_clk.clkr,
- [GCC_CPUSS_AHB_CLK] = &gcc_cpuss_ahb_clk.clkr,
- [GCC_CPUSS_AHB_CLK_SRC] = &gcc_cpuss_ahb_clk_src.clkr,
[GCC_CPUSS_RBCPR_CLK] = &gcc_cpuss_rbcpr_clk.clkr,
[GCC_DDRSS_GPU_AXI_CLK] = &gcc_ddrss_gpu_axi_clk.clkr,
[GCC_DISP_HF_AXI_CLK] = &gcc_disp_hf_axi_clk.clkr,
@@ -4422,7 +4417,6 @@ static struct clk_regmap *gcc_sc8180x_clocks[] = {
[GCC_SDCC4_AHB_CLK] = &gcc_sdcc4_ahb_clk.clkr,
[GCC_SDCC4_APPS_CLK] = &gcc_sdcc4_apps_clk.clkr,
[GCC_SDCC4_APPS_CLK_SRC] = &gcc_sdcc4_apps_clk_src.clkr,
- [GCC_SYS_NOC_CPUSS_AHB_CLK] = &gcc_sys_noc_cpuss_ahb_clk.clkr,
[GCC_TSIF_AHB_CLK] = &gcc_tsif_ahb_clk.clkr,
[GCC_TSIF_INACTIVITY_TIMERS_CLK] = &gcc_tsif_inactivity_timers_clk.clkr,
[GCC_TSIF_REF_CLK] = &gcc_tsif_ref_clk.clkr,
@@ -4511,6 +4505,7 @@ static struct clk_regmap *gcc_sc8180x_clocks[] = {
[GPLL1] = &gpll1.clkr,
[GPLL4] = &gpll4.clkr,
[GPLL7] = &gpll7.clkr,
+ [GPLL9] = &gpll9.clkr,
};
static const struct qcom_reset_map gcc_sc8180x_resets[] = {
@@ -4546,6 +4541,10 @@ static const struct qcom_reset_map gcc_sc8180x_resets[] = {
[GCC_USB3_PHY_SEC_BCR] = { 0x50018 },
[GCC_USB3PHY_PHY_SEC_BCR] = { 0x5001c },
[GCC_USB3_DP_PHY_SEC_BCR] = { 0x50020 },
+ [GCC_USB3_UNIPHY_MP0_BCR] = { 0x50024 },
+ [GCC_USB3_UNIPHY_MP1_BCR] = { 0x50028 },
+ [GCC_USB3UNIPHY_PHY_MP0_BCR] = { 0x5002c },
+ [GCC_USB3UNIPHY_PHY_MP1_BCR] = { 0x50030 },
[GCC_SDCC2_BCR] = { 0x14000 },
[GCC_SDCC4_BCR] = { 0x16000 },
[GCC_TSIF_BCR] = { 0x36000 },
@@ -4561,6 +4560,29 @@ static const struct qcom_reset_map gcc_sc8180x_resets[] = {
[GCC_VIDEO_AXI1_CLK_BCR] = { .reg = 0xb028, .bit = 2, .udelay = 150 },
};
+static const struct clk_rcg_dfs_data gcc_dfs_clocks[] = {
+ DEFINE_RCG_DFS(gcc_qupv3_wrap0_s0_clk_src),
+ DEFINE_RCG_DFS(gcc_qupv3_wrap0_s1_clk_src),
+ DEFINE_RCG_DFS(gcc_qupv3_wrap0_s2_clk_src),
+ DEFINE_RCG_DFS(gcc_qupv3_wrap0_s3_clk_src),
+ DEFINE_RCG_DFS(gcc_qupv3_wrap0_s4_clk_src),
+ DEFINE_RCG_DFS(gcc_qupv3_wrap0_s5_clk_src),
+ DEFINE_RCG_DFS(gcc_qupv3_wrap0_s6_clk_src),
+ DEFINE_RCG_DFS(gcc_qupv3_wrap0_s7_clk_src),
+ DEFINE_RCG_DFS(gcc_qupv3_wrap1_s0_clk_src),
+ DEFINE_RCG_DFS(gcc_qupv3_wrap1_s1_clk_src),
+ DEFINE_RCG_DFS(gcc_qupv3_wrap1_s2_clk_src),
+ DEFINE_RCG_DFS(gcc_qupv3_wrap1_s3_clk_src),
+ DEFINE_RCG_DFS(gcc_qupv3_wrap1_s4_clk_src),
+ DEFINE_RCG_DFS(gcc_qupv3_wrap1_s5_clk_src),
+ DEFINE_RCG_DFS(gcc_qupv3_wrap2_s0_clk_src),
+ DEFINE_RCG_DFS(gcc_qupv3_wrap2_s1_clk_src),
+ DEFINE_RCG_DFS(gcc_qupv3_wrap2_s2_clk_src),
+ DEFINE_RCG_DFS(gcc_qupv3_wrap2_s3_clk_src),
+ DEFINE_RCG_DFS(gcc_qupv3_wrap2_s4_clk_src),
+ DEFINE_RCG_DFS(gcc_qupv3_wrap2_s5_clk_src),
+};
+
static struct gdsc *gcc_sc8180x_gdscs[] = {
[EMAC_GDSC] = &emac_gdsc,
[PCIE_0_GDSC] = &pcie_0_gdsc,
@@ -4602,6 +4624,7 @@ MODULE_DEVICE_TABLE(of, gcc_sc8180x_match_table);
static int gcc_sc8180x_probe(struct platform_device *pdev)
{
struct regmap *regmap;
+ int ret;
regmap = qcom_cc_map(pdev, &gcc_sc8180x_desc);
if (IS_ERR(regmap))
@@ -4623,6 +4646,11 @@ static int gcc_sc8180x_probe(struct platform_device *pdev)
regmap_update_bits(regmap, 0x4d110, 0x3, 0x3);
regmap_update_bits(regmap, 0x71028, 0x3, 0x3);
+ ret = qcom_cc_register_rcg_dfs(regmap, gcc_dfs_clocks,
+ ARRAY_SIZE(gcc_dfs_clocks));
+ if (ret)
+ return ret;
+
return qcom_cc_really_probe(&pdev->dev, &gcc_sc8180x_desc, regmap);
}
diff --git a/drivers/clk/qcom/gcc-sm8250.c b/drivers/clk/qcom/gcc-sm8250.c
index 991cd8b8d597..1c59d70e0f96 100644
--- a/drivers/clk/qcom/gcc-sm8250.c
+++ b/drivers/clk/qcom/gcc-sm8250.c
@@ -3226,7 +3226,7 @@ static struct gdsc pcie_0_gdsc = {
.pd = {
.name = "pcie_0_gdsc",
},
- .pwrsts = PWRSTS_OFF_ON,
+ .pwrsts = PWRSTS_RET_ON,
};
static struct gdsc pcie_1_gdsc = {
@@ -3234,7 +3234,7 @@ static struct gdsc pcie_1_gdsc = {
.pd = {
.name = "pcie_1_gdsc",
},
- .pwrsts = PWRSTS_OFF_ON,
+ .pwrsts = PWRSTS_RET_ON,
};
static struct gdsc pcie_2_gdsc = {
@@ -3242,7 +3242,7 @@ static struct gdsc pcie_2_gdsc = {
.pd = {
.name = "pcie_2_gdsc",
},
- .pwrsts = PWRSTS_OFF_ON,
+ .pwrsts = PWRSTS_RET_ON,
};
static struct gdsc ufs_card_gdsc = {
diff --git a/drivers/clk/qcom/gcc-sm8450.c b/drivers/clk/qcom/gcc-sm8450.c
index 639a9a955914..c445c271678a 100644
--- a/drivers/clk/qcom/gcc-sm8450.c
+++ b/drivers/clk/qcom/gcc-sm8450.c
@@ -2974,7 +2974,7 @@ static struct gdsc pcie_0_gdsc = {
.pd = {
.name = "pcie_0_gdsc",
},
- .pwrsts = PWRSTS_OFF_ON,
+ .pwrsts = PWRSTS_RET_ON,
};
static struct gdsc pcie_1_gdsc = {
@@ -2982,7 +2982,7 @@ static struct gdsc pcie_1_gdsc = {
.pd = {
.name = "pcie_1_gdsc",
},
- .pwrsts = PWRSTS_OFF_ON,
+ .pwrsts = PWRSTS_RET_ON,
};
static struct gdsc ufs_phy_gdsc = {
diff --git a/drivers/clk/qcom/gpucc-sm4450.c b/drivers/clk/qcom/gpucc-sm4450.c
new file mode 100644
index 000000000000..a14d0bb031ac
--- /dev/null
+++ b/drivers/clk/qcom/gpucc-sm4450.c
@@ -0,0 +1,805 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2024, Qualcomm Innovation Center, Inc. All rights reserved.
+ */
+
+#include <linux/clk-provider.h>
+#include <linux/module.h>
+#include <linux/mod_devicetable.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+#include <linux/regmap.h>
+
+#include <dt-bindings/clock/qcom,sm4450-gpucc.h>
+
+#include "clk-alpha-pll.h"
+#include "clk-branch.h"
+#include "clk-pll.h"
+#include "clk-rcg.h"
+#include "clk-regmap.h"
+#include "clk-regmap-divider.h"
+#include "common.h"
+#include "gdsc.h"
+#include "reset.h"
+
+enum {
+ DT_BI_TCXO,
+ DT_GPLL0_OUT_MAIN,
+ DT_GPLL0_OUT_MAIN_DIV,
+};
+
+enum {
+ P_BI_TCXO,
+ P_GPLL0_OUT_MAIN,
+ P_GPLL0_OUT_MAIN_DIV,
+ P_GPU_CC_PLL0_OUT_EVEN,
+ P_GPU_CC_PLL0_OUT_MAIN,
+ P_GPU_CC_PLL0_OUT_ODD,
+ P_GPU_CC_PLL1_OUT_EVEN,
+ P_GPU_CC_PLL1_OUT_MAIN,
+ P_GPU_CC_PLL1_OUT_ODD,
+};
+
+static const struct pll_vco lucid_evo_vco[] = {
+ { 249600000, 2020000000, 0 },
+};
+
+/* 680.0 MHz Configuration */
+static const struct alpha_pll_config gpu_cc_pll0_config = {
+ .l = 0x23,
+ .alpha = 0x6aaa,
+ .config_ctl_val = 0x20485699,
+ .config_ctl_hi_val = 0x00182261,
+ .config_ctl_hi1_val = 0x32aa299c,
+ .user_ctl_val = 0x00000000,
+ .user_ctl_hi_val = 0x00000805,
+};
+
+static struct clk_alpha_pll gpu_cc_pll0 = {
+ .offset = 0x0,
+ .vco_table = lucid_evo_vco,
+ .num_vco = ARRAY_SIZE(lucid_evo_vco),
+ .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_LUCID_EVO],
+ .clkr = {
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gpu_cc_pll0",
+ .parent_data = &(const struct clk_parent_data) {
+ .index = DT_BI_TCXO,
+ },
+ .num_parents = 1,
+ .ops = &clk_alpha_pll_lucid_evo_ops,
+ },
+ },
+};
+
+/* 500.0 MHz Configuration */
+static const struct alpha_pll_config gpu_cc_pll1_config = {
+ .l = 0x1a,
+ .alpha = 0xaaa,
+ .config_ctl_val = 0x20485699,
+ .config_ctl_hi_val = 0x00182261,
+ .config_ctl_hi1_val = 0x32aa299c,
+ .user_ctl_val = 0x00000000,
+ .user_ctl_hi_val = 0x00000805,
+};
+
+static struct clk_alpha_pll gpu_cc_pll1 = {
+ .offset = 0x1000,
+ .vco_table = lucid_evo_vco,
+ .num_vco = ARRAY_SIZE(lucid_evo_vco),
+ .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_LUCID_EVO],
+ .clkr = {
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gpu_cc_pll1",
+ .parent_data = &(const struct clk_parent_data) {
+ .index = DT_BI_TCXO,
+ },
+ .num_parents = 1,
+ .ops = &clk_alpha_pll_lucid_evo_ops,
+ },
+ },
+};
+
+static const struct parent_map gpu_cc_parent_map_0[] = {
+ { P_BI_TCXO, 0 },
+ { P_GPLL0_OUT_MAIN, 5 },
+ { P_GPLL0_OUT_MAIN_DIV, 6 },
+};
+
+static const struct clk_parent_data gpu_cc_parent_data_0[] = {
+ { .index = DT_BI_TCXO },
+ { .index = DT_GPLL0_OUT_MAIN },
+ { .index = DT_GPLL0_OUT_MAIN_DIV },
+};
+
+static const struct parent_map gpu_cc_parent_map_1[] = {
+ { P_BI_TCXO, 0 },
+ { P_GPU_CC_PLL0_OUT_MAIN, 1 },
+ { P_GPU_CC_PLL1_OUT_MAIN, 3 },
+ { P_GPLL0_OUT_MAIN, 5 },
+ { P_GPLL0_OUT_MAIN_DIV, 6 },
+};
+
+static const struct clk_parent_data gpu_cc_parent_data_1[] = {
+ { .index = DT_BI_TCXO },
+ { .hw = &gpu_cc_pll0.clkr.hw },
+ { .hw = &gpu_cc_pll1.clkr.hw },
+ { .index = DT_GPLL0_OUT_MAIN },
+ { .index = DT_GPLL0_OUT_MAIN_DIV },
+};
+
+static const struct parent_map gpu_cc_parent_map_2[] = {
+ { P_BI_TCXO, 0 },
+ { P_GPU_CC_PLL0_OUT_EVEN, 1 },
+ { P_GPU_CC_PLL0_OUT_ODD, 2 },
+ { P_GPU_CC_PLL1_OUT_EVEN, 3 },
+ { P_GPU_CC_PLL1_OUT_ODD, 4 },
+ { P_GPLL0_OUT_MAIN, 5 },
+};
+
+static const struct clk_parent_data gpu_cc_parent_data_2[] = {
+ { .index = DT_BI_TCXO },
+ { .hw = &gpu_cc_pll0.clkr.hw },
+ { .hw = &gpu_cc_pll0.clkr.hw },
+ { .hw = &gpu_cc_pll1.clkr.hw },
+ { .hw = &gpu_cc_pll1.clkr.hw },
+ { .index = DT_GPLL0_OUT_MAIN },
+};
+
+static const struct parent_map gpu_cc_parent_map_3[] = {
+ { P_BI_TCXO, 0 },
+ { P_GPU_CC_PLL1_OUT_MAIN, 3 },
+ { P_GPLL0_OUT_MAIN, 5 },
+ { P_GPLL0_OUT_MAIN_DIV, 6 },
+};
+
+static const struct clk_parent_data gpu_cc_parent_data_3[] = {
+ { .index = DT_BI_TCXO },
+ { .hw = &gpu_cc_pll1.clkr.hw },
+ { .index = DT_GPLL0_OUT_MAIN },
+ { .index = DT_GPLL0_OUT_MAIN_DIV },
+};
+
+static const struct parent_map gpu_cc_parent_map_4[] = {
+ { P_BI_TCXO, 0 },
+};
+
+static const struct clk_parent_data gpu_cc_parent_data_4[] = {
+ { .index = DT_BI_TCXO },
+};
+
+static const struct freq_tbl ftbl_gpu_cc_ff_clk_src[] = {
+ F(200000000, P_GPLL0_OUT_MAIN_DIV, 1.5, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 gpu_cc_ff_clk_src = {
+ .cmd_rcgr = 0x9474,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = gpu_cc_parent_map_0,
+ .freq_tbl = ftbl_gpu_cc_ff_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "gpu_cc_ff_clk_src",
+ .parent_data = gpu_cc_parent_data_0,
+ .num_parents = ARRAY_SIZE(gpu_cc_parent_data_0),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static struct clk_rcg2 gpu_cc_gmu_clk_src = {
+ .cmd_rcgr = 0x9318,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = gpu_cc_parent_map_1,
+ .freq_tbl = ftbl_gpu_cc_ff_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "gpu_cc_gmu_clk_src",
+ .parent_data = gpu_cc_parent_data_1,
+ .num_parents = ARRAY_SIZE(gpu_cc_parent_data_1),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_gpu_cc_gx_gfx3d_clk_src[] = {
+ F(340000000, P_GPU_CC_PLL0_OUT_EVEN, 2, 0, 0),
+ F(500000000, P_GPU_CC_PLL0_OUT_EVEN, 2, 0, 0),
+ F(605000000, P_GPU_CC_PLL0_OUT_EVEN, 2, 0, 0),
+ F(765000000, P_GPU_CC_PLL0_OUT_EVEN, 2, 0, 0),
+ F(850000000, P_GPU_CC_PLL0_OUT_EVEN, 2, 0, 0),
+ F(955000000, P_GPU_CC_PLL0_OUT_EVEN, 2, 0, 0),
+ F(1010000000, P_GPU_CC_PLL0_OUT_EVEN, 2, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 gpu_cc_gx_gfx3d_clk_src = {
+ .cmd_rcgr = 0x9070,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = gpu_cc_parent_map_2,
+ .freq_tbl = ftbl_gpu_cc_gx_gfx3d_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "gpu_cc_gx_gfx3d_clk_src",
+ .parent_data = gpu_cc_parent_data_2,
+ .num_parents = ARRAY_SIZE(gpu_cc_parent_data_2),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_gpu_cc_hub_clk_src[] = {
+ F(150000000, P_GPLL0_OUT_MAIN_DIV, 2, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 gpu_cc_hub_clk_src = {
+ .cmd_rcgr = 0x93ec,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = gpu_cc_parent_map_3,
+ .freq_tbl = ftbl_gpu_cc_hub_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "gpu_cc_hub_clk_src",
+ .parent_data = gpu_cc_parent_data_3,
+ .num_parents = ARRAY_SIZE(gpu_cc_parent_data_3),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_gpu_cc_xo_clk_src[] = {
+ F(19200000, P_BI_TCXO, 1, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 gpu_cc_xo_clk_src = {
+ .cmd_rcgr = 0x9010,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = gpu_cc_parent_map_4,
+ .freq_tbl = ftbl_gpu_cc_xo_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "gpu_cc_xo_clk_src",
+ .parent_data = gpu_cc_parent_data_4,
+ .num_parents = ARRAY_SIZE(gpu_cc_parent_data_4),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static struct clk_regmap_div gpu_cc_demet_div_clk_src = {
+ .reg = 0x9054,
+ .shift = 0,
+ .width = 4,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "gpu_cc_demet_div_clk_src",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gpu_cc_xo_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_regmap_div_ro_ops,
+ },
+};
+
+static struct clk_regmap_div gpu_cc_hub_ahb_div_clk_src = {
+ .reg = 0x9430,
+ .shift = 0,
+ .width = 4,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "gpu_cc_hub_ahb_div_clk_src",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gpu_cc_hub_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_regmap_div_ro_ops,
+ },
+};
+
+static struct clk_regmap_div gpu_cc_hub_cx_int_div_clk_src = {
+ .reg = 0x942c,
+ .shift = 0,
+ .width = 4,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "gpu_cc_hub_cx_int_div_clk_src",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gpu_cc_hub_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_regmap_div_ro_ops,
+ },
+};
+
+static struct clk_regmap_div gpu_cc_xo_div_clk_src = {
+ .reg = 0x9050,
+ .shift = 0,
+ .width = 4,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "gpu_cc_xo_div_clk_src",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gpu_cc_xo_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_regmap_div_ro_ops,
+ },
+};
+
+static struct clk_branch gpu_cc_ahb_clk = {
+ .halt_reg = 0x911c,
+ .halt_check = BRANCH_HALT_DELAY,
+ .clkr = {
+ .enable_reg = 0x911c,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gpu_cc_ahb_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gpu_cc_hub_ahb_div_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gpu_cc_crc_ahb_clk = {
+ .halt_reg = 0x9120,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x9120,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gpu_cc_crc_ahb_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gpu_cc_hub_ahb_div_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gpu_cc_cx_ff_clk = {
+ .halt_reg = 0x914c,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x914c,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gpu_cc_cx_ff_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gpu_cc_ff_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gpu_cc_cx_gfx3d_clk = {
+ .halt_reg = 0x919c,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x919c,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gpu_cc_cx_gfx3d_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gpu_cc_gx_gfx3d_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gpu_cc_cx_gfx3d_slv_clk = {
+ .halt_reg = 0x91a0,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x91a0,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gpu_cc_cx_gfx3d_slv_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gpu_cc_gx_gfx3d_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gpu_cc_cx_gmu_clk = {
+ .halt_reg = 0x913c,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x913c,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gpu_cc_cx_gmu_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gpu_cc_gmu_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_aon_ops,
+ },
+ },
+};
+
+static struct clk_branch gpu_cc_cx_snoc_dvm_clk = {
+ .halt_reg = 0x9130,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x9130,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gpu_cc_cx_snoc_dvm_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gpu_cc_cxo_clk = {
+ .halt_reg = 0x9144,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x9144,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gpu_cc_cxo_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gpu_cc_xo_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gpu_cc_freq_measure_clk = {
+ .halt_reg = 0x9008,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x9008,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gpu_cc_freq_measure_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gpu_cc_xo_div_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gpu_cc_gx_cxo_clk = {
+ .halt_reg = 0x90b8,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x90b8,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gpu_cc_gx_cxo_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gpu_cc_xo_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gpu_cc_gx_ff_clk = {
+ .halt_reg = 0x90c0,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x90c0,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gpu_cc_gx_ff_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gpu_cc_ff_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gpu_cc_gx_gfx3d_clk = {
+ .halt_reg = 0x90a8,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x90a8,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gpu_cc_gx_gfx3d_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gpu_cc_gx_gfx3d_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gpu_cc_gx_gfx3d_rdvm_clk = {
+ .halt_reg = 0x90c8,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x90c8,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gpu_cc_gx_gfx3d_rdvm_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gpu_cc_gx_gfx3d_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gpu_cc_gx_gmu_clk = {
+ .halt_reg = 0x90bc,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x90bc,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gpu_cc_gx_gmu_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gpu_cc_gmu_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gpu_cc_gx_vsense_clk = {
+ .halt_reg = 0x90b0,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x90b0,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gpu_cc_gx_vsense_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gpu_cc_hub_aon_clk = {
+ .halt_reg = 0x93e8,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x93e8,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gpu_cc_hub_aon_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gpu_cc_hub_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_aon_ops,
+ },
+ },
+};
+
+static struct clk_branch gpu_cc_hub_cx_int_clk = {
+ .halt_reg = 0x9148,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x9148,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gpu_cc_hub_cx_int_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gpu_cc_hub_cx_int_div_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_aon_ops,
+ },
+ },
+};
+
+static struct clk_branch gpu_cc_memnoc_gfx_clk = {
+ .halt_reg = 0x9150,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x9150,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gpu_cc_memnoc_gfx_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gpu_cc_mnd1x_0_gfx3d_clk = {
+ .halt_reg = 0x9288,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x9288,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gpu_cc_mnd1x_0_gfx3d_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gpu_cc_gx_gfx3d_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gpu_cc_sleep_clk = {
+ .halt_reg = 0x9134,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x9134,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gpu_cc_sleep_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct gdsc gpu_cc_cx_gdsc = {
+ .gdscr = 0x9108,
+ .gds_hw_ctrl = 0x953c,
+ .clk_dis_wait_val = 8,
+ .pd = {
+ .name = "gpu_cx_gdsc",
+ },
+ .pwrsts = PWRSTS_OFF_ON,
+ .flags = VOTABLE | RETAIN_FF_ENABLE,
+};
+
+static struct gdsc gpu_cc_gx_gdsc = {
+ .gdscr = 0x905c,
+ .clamp_io_ctrl = 0x9504,
+ .resets = (unsigned int []){ GPU_CC_GX_BCR,
+ GPU_CC_ACD_BCR,
+ GPU_CC_GX_ACD_IROOT_BCR },
+ .reset_count = 3,
+ .pd = {
+ .name = "gpu_gx_gdsc",
+ .power_on = gdsc_gx_do_nothing_enable,
+ },
+ .pwrsts = PWRSTS_OFF_ON,
+ .flags = CLAMP_IO | AON_RESET | SW_RESET | POLL_CFG_GDSCR,
+};
+
+static struct clk_regmap *gpu_cc_sm4450_clocks[] = {
+ [GPU_CC_AHB_CLK] = &gpu_cc_ahb_clk.clkr,
+ [GPU_CC_CRC_AHB_CLK] = &gpu_cc_crc_ahb_clk.clkr,
+ [GPU_CC_CX_FF_CLK] = &gpu_cc_cx_ff_clk.clkr,
+ [GPU_CC_CX_GFX3D_CLK] = &gpu_cc_cx_gfx3d_clk.clkr,
+ [GPU_CC_CX_GFX3D_SLV_CLK] = &gpu_cc_cx_gfx3d_slv_clk.clkr,
+ [GPU_CC_CX_GMU_CLK] = &gpu_cc_cx_gmu_clk.clkr,
+ [GPU_CC_CX_SNOC_DVM_CLK] = &gpu_cc_cx_snoc_dvm_clk.clkr,
+ [GPU_CC_CXO_CLK] = &gpu_cc_cxo_clk.clkr,
+ [GPU_CC_DEMET_DIV_CLK_SRC] = &gpu_cc_demet_div_clk_src.clkr,
+ [GPU_CC_FF_CLK_SRC] = &gpu_cc_ff_clk_src.clkr,
+ [GPU_CC_FREQ_MEASURE_CLK] = &gpu_cc_freq_measure_clk.clkr,
+ [GPU_CC_GMU_CLK_SRC] = &gpu_cc_gmu_clk_src.clkr,
+ [GPU_CC_GX_CXO_CLK] = &gpu_cc_gx_cxo_clk.clkr,
+ [GPU_CC_GX_FF_CLK] = &gpu_cc_gx_ff_clk.clkr,
+ [GPU_CC_GX_GFX3D_CLK] = &gpu_cc_gx_gfx3d_clk.clkr,
+ [GPU_CC_GX_GFX3D_CLK_SRC] = &gpu_cc_gx_gfx3d_clk_src.clkr,
+ [GPU_CC_GX_GFX3D_RDVM_CLK] = &gpu_cc_gx_gfx3d_rdvm_clk.clkr,
+ [GPU_CC_GX_GMU_CLK] = &gpu_cc_gx_gmu_clk.clkr,
+ [GPU_CC_GX_VSENSE_CLK] = &gpu_cc_gx_vsense_clk.clkr,
+ [GPU_CC_HUB_AHB_DIV_CLK_SRC] = &gpu_cc_hub_ahb_div_clk_src.clkr,
+ [GPU_CC_HUB_AON_CLK] = &gpu_cc_hub_aon_clk.clkr,
+ [GPU_CC_HUB_CLK_SRC] = &gpu_cc_hub_clk_src.clkr,
+ [GPU_CC_HUB_CX_INT_CLK] = &gpu_cc_hub_cx_int_clk.clkr,
+ [GPU_CC_HUB_CX_INT_DIV_CLK_SRC] = &gpu_cc_hub_cx_int_div_clk_src.clkr,
+ [GPU_CC_MEMNOC_GFX_CLK] = &gpu_cc_memnoc_gfx_clk.clkr,
+ [GPU_CC_MND1X_0_GFX3D_CLK] = &gpu_cc_mnd1x_0_gfx3d_clk.clkr,
+ [GPU_CC_PLL0] = &gpu_cc_pll0.clkr,
+ [GPU_CC_PLL1] = &gpu_cc_pll1.clkr,
+ [GPU_CC_SLEEP_CLK] = &gpu_cc_sleep_clk.clkr,
+ [GPU_CC_XO_CLK_SRC] = &gpu_cc_xo_clk_src.clkr,
+ [GPU_CC_XO_DIV_CLK_SRC] = &gpu_cc_xo_div_clk_src.clkr,
+};
+
+static struct gdsc *gpu_cc_sm4450_gdscs[] = {
+ [GPU_CC_CX_GDSC] = &gpu_cc_cx_gdsc,
+ [GPU_CC_GX_GDSC] = &gpu_cc_gx_gdsc,
+};
+
+static const struct qcom_reset_map gpu_cc_sm4450_resets[] = {
+ [GPU_CC_CB_BCR] = { 0x93a0 },
+ [GPU_CC_CX_BCR] = { 0x9104 },
+ [GPU_CC_GX_BCR] = { 0x9058 },
+ [GPU_CC_FAST_HUB_BCR] = { 0x93e4 },
+ [GPU_CC_ACD_BCR] = { 0x9358 },
+ [GPU_CC_FF_BCR] = { 0x9470 },
+ [GPU_CC_GFX3D_AON_BCR] = { 0x9198 },
+ [GPU_CC_GMU_BCR] = { 0x9314 },
+ [GPU_CC_RBCPR_BCR] = { 0x91e0 },
+ [GPU_CC_XO_BCR] = { 0x9000 },
+ [GPU_CC_GX_ACD_IROOT_BCR] = { 0x958c },
+};
+
+static const struct regmap_config gpu_cc_sm4450_regmap_config = {
+ .reg_bits = 32,
+ .reg_stride = 4,
+ .val_bits = 32,
+ .max_register = 0x95c0,
+ .fast_io = true,
+};
+
+static const struct qcom_cc_desc gpu_cc_sm4450_desc = {
+ .config = &gpu_cc_sm4450_regmap_config,
+ .clks = gpu_cc_sm4450_clocks,
+ .num_clks = ARRAY_SIZE(gpu_cc_sm4450_clocks),
+ .resets = gpu_cc_sm4450_resets,
+ .num_resets = ARRAY_SIZE(gpu_cc_sm4450_resets),
+ .gdscs = gpu_cc_sm4450_gdscs,
+ .num_gdscs = ARRAY_SIZE(gpu_cc_sm4450_gdscs),
+};
+
+static const struct of_device_id gpu_cc_sm4450_match_table[] = {
+ { .compatible = "qcom,sm4450-gpucc" },
+ { }
+};
+MODULE_DEVICE_TABLE(of, gpu_cc_sm4450_match_table);
+
+static int gpu_cc_sm4450_probe(struct platform_device *pdev)
+{
+ struct regmap *regmap;
+
+ regmap = qcom_cc_map(pdev, &gpu_cc_sm4450_desc);
+ if (IS_ERR(regmap))
+ return PTR_ERR(regmap);
+
+ clk_lucid_evo_pll_configure(&gpu_cc_pll0, regmap, &gpu_cc_pll0_config);
+ clk_lucid_evo_pll_configure(&gpu_cc_pll1, regmap, &gpu_cc_pll1_config);
+
+ /* Keep some clocks always enabled */
+ qcom_branch_set_clk_en(regmap, 0x93a4); /* GPU_CC_CB_CLK */
+ qcom_branch_set_clk_en(regmap, 0x9004); /* GPU_CC_CXO_AON_CLK */
+ qcom_branch_set_clk_en(regmap, 0x900c); /* GPU_CC_DEMET_CLK */
+
+ return qcom_cc_really_probe(&pdev->dev, &gpu_cc_sm4450_desc, regmap);
+}
+
+static struct platform_driver gpu_cc_sm4450_driver = {
+ .probe = gpu_cc_sm4450_probe,
+ .driver = {
+ .name = "gpucc-sm4450",
+ .of_match_table = gpu_cc_sm4450_match_table,
+ },
+};
+
+module_platform_driver(gpu_cc_sm4450_driver);
+
+MODULE_DESCRIPTION("QTI GPUCC SM4450 Driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/clk/qcom/lcc-ipq806x.c b/drivers/clk/qcom/lcc-ipq806x.c
index bf5320a43e8c..bbacd7fedb2f 100644
--- a/drivers/clk/qcom/lcc-ipq806x.c
+++ b/drivers/clk/qcom/lcc-ipq806x.c
@@ -70,7 +70,7 @@ static const struct clk_parent_data lcc_pxo_pll4[] = {
{ .fw_name = "pll4_vote", .name = "pll4_vote" },
};
-static struct freq_tbl clk_tbl_aif_mi2s[] = {
+static const struct freq_tbl clk_tbl_aif_mi2s[] = {
{ 1024000, P_PLL4, 4, 1, 96 },
{ 1411200, P_PLL4, 4, 2, 139 },
{ 1536000, P_PLL4, 4, 1, 64 },
@@ -214,7 +214,7 @@ static struct clk_regmap_mux mi2s_bit_clk = {
},
};
-static struct freq_tbl clk_tbl_pcm[] = {
+static const struct freq_tbl clk_tbl_pcm[] = {
{ 64000, P_PLL4, 4, 1, 1536 },
{ 128000, P_PLL4, 4, 1, 768 },
{ 256000, P_PLL4, 4, 1, 384 },
@@ -296,7 +296,7 @@ static struct clk_regmap_mux pcm_clk = {
},
};
-static struct freq_tbl clk_tbl_aif_osr[] = {
+static const struct freq_tbl clk_tbl_aif_osr[] = {
{ 2822400, P_PLL4, 1, 147, 20480 },
{ 4096000, P_PLL4, 1, 1, 96 },
{ 5644800, P_PLL4, 1, 147, 10240 },
@@ -360,7 +360,7 @@ static struct clk_branch spdif_clk = {
},
};
-static struct freq_tbl clk_tbl_ahbix[] = {
+static const struct freq_tbl clk_tbl_ahbix[] = {
{ 131072000, P_PLL4, 1, 1, 3 },
{ },
};
diff --git a/drivers/clk/qcom/lcc-msm8960.c b/drivers/clk/qcom/lcc-msm8960.c
index d53bf315e9c3..7cba2ce3e408 100644
--- a/drivers/clk/qcom/lcc-msm8960.c
+++ b/drivers/clk/qcom/lcc-msm8960.c
@@ -57,7 +57,7 @@ static struct clk_parent_data lcc_pxo_pll4[] = {
{ .fw_name = "pll4_vote", .name = "pll4_vote" },
};
-static struct freq_tbl clk_tbl_aif_osr_492[] = {
+static const struct freq_tbl clk_tbl_aif_osr_492[] = {
{ 512000, P_PLL4, 4, 1, 240 },
{ 768000, P_PLL4, 4, 1, 160 },
{ 1024000, P_PLL4, 4, 1, 120 },
@@ -73,7 +73,7 @@ static struct freq_tbl clk_tbl_aif_osr_492[] = {
{ }
};
-static struct freq_tbl clk_tbl_aif_osr_393[] = {
+static const struct freq_tbl clk_tbl_aif_osr_393[] = {
{ 512000, P_PLL4, 4, 1, 192 },
{ 768000, P_PLL4, 4, 1, 128 },
{ 1024000, P_PLL4, 4, 1, 96 },
@@ -218,7 +218,7 @@ CLK_AIF_OSR_DIV(spare_i2s_mic, 0x78, 0x7c, 0x80);
CLK_AIF_OSR_DIV(codec_i2s_spkr, 0x6c, 0x70, 0x74);
CLK_AIF_OSR_DIV(spare_i2s_spkr, 0x84, 0x88, 0x8c);
-static struct freq_tbl clk_tbl_pcm_492[] = {
+static const struct freq_tbl clk_tbl_pcm_492[] = {
{ 256000, P_PLL4, 4, 1, 480 },
{ 512000, P_PLL4, 4, 1, 240 },
{ 768000, P_PLL4, 4, 1, 160 },
@@ -235,7 +235,7 @@ static struct freq_tbl clk_tbl_pcm_492[] = {
{ }
};
-static struct freq_tbl clk_tbl_pcm_393[] = {
+static const struct freq_tbl clk_tbl_pcm_393[] = {
{ 256000, P_PLL4, 4, 1, 384 },
{ 512000, P_PLL4, 4, 1, 192 },
{ 768000, P_PLL4, 4, 1, 128 },
diff --git a/drivers/clk/qcom/mmcc-apq8084.c b/drivers/clk/qcom/mmcc-apq8084.c
index c89700ab93f9..cc03722596a4 100644
--- a/drivers/clk/qcom/mmcc-apq8084.c
+++ b/drivers/clk/qcom/mmcc-apq8084.c
@@ -338,7 +338,7 @@ static struct clk_rcg2 mmss_ahb_clk_src = {
},
};
-static struct freq_tbl ftbl_mmss_axi_clk[] = {
+static const struct freq_tbl ftbl_mmss_axi_clk[] = {
F(19200000, P_XO, 1, 0, 0),
F(37500000, P_GPLL0, 16, 0, 0),
F(50000000, P_GPLL0, 12, 0, 0),
@@ -364,7 +364,7 @@ static struct clk_rcg2 mmss_axi_clk_src = {
},
};
-static struct freq_tbl ftbl_ocmemnoc_clk[] = {
+static const struct freq_tbl ftbl_ocmemnoc_clk[] = {
F(19200000, P_XO, 1, 0, 0),
F(37500000, P_GPLL0, 16, 0, 0),
F(50000000, P_GPLL0, 12, 0, 0),
@@ -389,7 +389,7 @@ static struct clk_rcg2 ocmemnoc_clk_src = {
},
};
-static struct freq_tbl ftbl_camss_csi0_3_clk[] = {
+static const struct freq_tbl ftbl_camss_csi0_3_clk[] = {
F(100000000, P_GPLL0, 6, 0, 0),
F(200000000, P_MMPLL0, 4, 0, 0),
{ }
@@ -447,7 +447,7 @@ static struct clk_rcg2 csi3_clk_src = {
},
};
-static struct freq_tbl ftbl_camss_vfe_vfe0_1_clk[] = {
+static const struct freq_tbl ftbl_camss_vfe_vfe0_1_clk[] = {
F(37500000, P_GPLL0, 16, 0, 0),
F(50000000, P_GPLL0, 12, 0, 0),
F(60000000, P_GPLL0, 10, 0, 0),
@@ -490,7 +490,7 @@ static struct clk_rcg2 vfe1_clk_src = {
},
};
-static struct freq_tbl ftbl_mdss_mdp_clk[] = {
+static const struct freq_tbl ftbl_mdss_mdp_clk[] = {
F(37500000, P_GPLL0, 16, 0, 0),
F(60000000, P_GPLL0, 10, 0, 0),
F(75000000, P_GPLL0, 8, 0, 0),
@@ -530,7 +530,7 @@ static struct clk_rcg2 gfx3d_clk_src = {
},
};
-static struct freq_tbl ftbl_camss_jpeg_jpeg0_2_clk[] = {
+static const struct freq_tbl ftbl_camss_jpeg_jpeg0_2_clk[] = {
F(75000000, P_GPLL0, 8, 0, 0),
F(133330000, P_GPLL0, 4.5, 0, 0),
F(200000000, P_GPLL0, 3, 0, 0),
@@ -607,7 +607,7 @@ static struct clk_rcg2 pclk1_clk_src = {
},
};
-static struct freq_tbl ftbl_venus0_vcodec0_clk[] = {
+static const struct freq_tbl ftbl_venus0_vcodec0_clk[] = {
F(50000000, P_GPLL0, 12, 0, 0),
F(100000000, P_GPLL0, 6, 0, 0),
F(133330000, P_GPLL0, 4.5, 0, 0),
@@ -631,7 +631,7 @@ static struct clk_rcg2 vcodec0_clk_src = {
},
};
-static struct freq_tbl ftbl_avsync_vp_clk[] = {
+static const struct freq_tbl ftbl_avsync_vp_clk[] = {
F(150000000, P_GPLL0, 4, 0, 0),
F(320000000, P_MMPLL0, 2.5, 0, 0),
{ }
@@ -650,7 +650,7 @@ static struct clk_rcg2 vp_clk_src = {
},
};
-static struct freq_tbl ftbl_camss_cci_cci_clk[] = {
+static const struct freq_tbl ftbl_camss_cci_cci_clk[] = {
F(19200000, P_XO, 1, 0, 0),
{ }
};
@@ -669,7 +669,7 @@ static struct clk_rcg2 cci_clk_src = {
},
};
-static struct freq_tbl ftbl_camss_gp0_1_clk[] = {
+static const struct freq_tbl ftbl_camss_gp0_1_clk[] = {
F(10000, P_XO, 16, 1, 120),
F(24000, P_XO, 16, 1, 50),
F(6000000, P_GPLL0, 10, 1, 10),
@@ -707,7 +707,7 @@ static struct clk_rcg2 camss_gp1_clk_src = {
},
};
-static struct freq_tbl ftbl_camss_mclk0_3_clk[] = {
+static const struct freq_tbl ftbl_camss_mclk0_3_clk[] = {
F(4800000, P_XO, 4, 0, 0),
F(6000000, P_GPLL0, 10, 1, 10),
F(8000000, P_GPLL0, 15, 1, 5),
@@ -777,7 +777,7 @@ static struct clk_rcg2 mclk3_clk_src = {
},
};
-static struct freq_tbl ftbl_camss_phy0_2_csi0_2phytimer_clk[] = {
+static const struct freq_tbl ftbl_camss_phy0_2_csi0_2phytimer_clk[] = {
F(100000000, P_GPLL0, 6, 0, 0),
F(200000000, P_MMPLL0, 4, 0, 0),
{ }
@@ -822,7 +822,7 @@ static struct clk_rcg2 csi2phytimer_clk_src = {
},
};
-static struct freq_tbl ftbl_camss_vfe_cpp_clk[] = {
+static const struct freq_tbl ftbl_camss_vfe_cpp_clk[] = {
F(133330000, P_GPLL0, 4.5, 0, 0),
F(266670000, P_MMPLL0, 3, 0, 0),
F(320000000, P_MMPLL0, 2.5, 0, 0),
@@ -871,7 +871,7 @@ static struct clk_rcg2 byte1_clk_src = {
},
};
-static struct freq_tbl ftbl_mdss_edpaux_clk[] = {
+static const struct freq_tbl ftbl_mdss_edpaux_clk[] = {
F(19200000, P_XO, 1, 0, 0),
{ }
};
@@ -889,7 +889,7 @@ static struct clk_rcg2 edpaux_clk_src = {
},
};
-static struct freq_tbl ftbl_mdss_edplink_clk[] = {
+static const struct freq_tbl ftbl_mdss_edplink_clk[] = {
F(135000000, P_EDPLINK, 2, 0, 0),
F(270000000, P_EDPLINK, 11, 0, 0),
{ }
@@ -909,7 +909,7 @@ static struct clk_rcg2 edplink_clk_src = {
},
};
-static struct freq_tbl edp_pixel_freq_tbl[] = {
+static const struct freq_tbl edp_pixel_freq_tbl[] = {
{ .src = P_EDPVCO },
{ }
};
@@ -928,7 +928,7 @@ static struct clk_rcg2 edppixel_clk_src = {
},
};
-static struct freq_tbl ftbl_mdss_esc0_1_clk[] = {
+static const struct freq_tbl ftbl_mdss_esc0_1_clk[] = {
F(19200000, P_XO, 1, 0, 0),
{ }
};
@@ -959,7 +959,7 @@ static struct clk_rcg2 esc1_clk_src = {
},
};
-static struct freq_tbl extpclk_freq_tbl[] = {
+static const struct freq_tbl extpclk_freq_tbl[] = {
{ .src = P_HDMIPLL },
{ }
};
@@ -978,7 +978,7 @@ static struct clk_rcg2 extpclk_clk_src = {
},
};
-static struct freq_tbl ftbl_mdss_hdmi_clk[] = {
+static const struct freq_tbl ftbl_mdss_hdmi_clk[] = {
F(19200000, P_XO, 1, 0, 0),
{ }
};
@@ -996,7 +996,7 @@ static struct clk_rcg2 hdmi_clk_src = {
},
};
-static struct freq_tbl ftbl_mdss_vsync_clk[] = {
+static const struct freq_tbl ftbl_mdss_vsync_clk[] = {
F(19200000, P_XO, 1, 0, 0),
{ }
};
@@ -1014,7 +1014,7 @@ static struct clk_rcg2 vsync_clk_src = {
},
};
-static struct freq_tbl ftbl_mmss_rbcpr_clk[] = {
+static const struct freq_tbl ftbl_mmss_rbcpr_clk[] = {
F(50000000, P_GPLL0, 12, 0, 0),
{ }
};
@@ -1032,7 +1032,7 @@ static struct clk_rcg2 rbcpr_clk_src = {
},
};
-static struct freq_tbl ftbl_oxili_rbbmtimer_clk[] = {
+static const struct freq_tbl ftbl_oxili_rbbmtimer_clk[] = {
F(19200000, P_XO, 1, 0, 0),
{ }
};
@@ -1050,7 +1050,7 @@ static struct clk_rcg2 rbbmtimer_clk_src = {
},
};
-static struct freq_tbl ftbl_vpu_maple_clk[] = {
+static const struct freq_tbl ftbl_vpu_maple_clk[] = {
F(50000000, P_GPLL0, 12, 0, 0),
F(100000000, P_GPLL0, 6, 0, 0),
F(133330000, P_GPLL0, 4.5, 0, 0),
@@ -1073,7 +1073,7 @@ static struct clk_rcg2 maple_clk_src = {
},
};
-static struct freq_tbl ftbl_vpu_vdp_clk[] = {
+static const struct freq_tbl ftbl_vpu_vdp_clk[] = {
F(50000000, P_GPLL0, 12, 0, 0),
F(100000000, P_GPLL0, 6, 0, 0),
F(200000000, P_MMPLL0, 4, 0, 0),
@@ -1095,7 +1095,7 @@ static struct clk_rcg2 vdp_clk_src = {
},
};
-static struct freq_tbl ftbl_vpu_bus_clk[] = {
+static const struct freq_tbl ftbl_vpu_bus_clk[] = {
F(40000000, P_GPLL0, 15, 0, 0),
F(80000000, P_MMPLL0, 10, 0, 0),
{ }
diff --git a/drivers/clk/qcom/mmcc-msm8960.c b/drivers/clk/qcom/mmcc-msm8960.c
index 1061322534c4..3f41249c5ae4 100644
--- a/drivers/clk/qcom/mmcc-msm8960.c
+++ b/drivers/clk/qcom/mmcc-msm8960.c
@@ -155,7 +155,7 @@ static const struct clk_parent_data mmcc_pxo_dsi1_dsi2_byte[] = {
{ .fw_name = "dsi2pllbyte", .name = "dsi2pllbyte" },
};
-static struct freq_tbl clk_tbl_cam[] = {
+static const struct freq_tbl clk_tbl_cam[] = {
{ 6000000, P_PLL8, 4, 1, 16 },
{ 8000000, P_PLL8, 4, 1, 12 },
{ 12000000, P_PLL8, 4, 1, 8 },
@@ -323,7 +323,7 @@ static struct clk_branch camclk2_clk = {
};
-static struct freq_tbl clk_tbl_csi[] = {
+static const struct freq_tbl clk_tbl_csi[] = {
{ 27000000, P_PXO, 1, 0, 0 },
{ 85330000, P_PLL8, 1, 2, 9 },
{ 177780000, P_PLL2, 1, 2, 9 },
@@ -715,7 +715,7 @@ static struct clk_pix_rdi csi_rdi2_clk = {
},
};
-static struct freq_tbl clk_tbl_csiphytimer[] = {
+static const struct freq_tbl clk_tbl_csiphytimer[] = {
{ 85330000, P_PLL8, 1, 2, 9 },
{ 177780000, P_PLL2, 1, 2, 9 },
{ }
@@ -808,7 +808,7 @@ static struct clk_branch csiphy2_timer_clk = {
},
};
-static struct freq_tbl clk_tbl_gfx2d[] = {
+static const struct freq_tbl clk_tbl_gfx2d[] = {
F_MN( 27000000, P_PXO, 1, 0),
F_MN( 48000000, P_PLL8, 1, 8),
F_MN( 54857000, P_PLL8, 1, 7),
@@ -948,7 +948,7 @@ static struct clk_branch gfx2d1_clk = {
},
};
-static struct freq_tbl clk_tbl_gfx3d[] = {
+static const struct freq_tbl clk_tbl_gfx3d[] = {
F_MN( 27000000, P_PXO, 1, 0),
F_MN( 48000000, P_PLL8, 1, 8),
F_MN( 54857000, P_PLL8, 1, 7),
@@ -968,7 +968,7 @@ static struct freq_tbl clk_tbl_gfx3d[] = {
{ }
};
-static struct freq_tbl clk_tbl_gfx3d_8064[] = {
+static const struct freq_tbl clk_tbl_gfx3d_8064[] = {
F_MN( 27000000, P_PXO, 0, 0),
F_MN( 48000000, P_PLL8, 1, 8),
F_MN( 54857000, P_PLL8, 1, 7),
@@ -1058,7 +1058,7 @@ static struct clk_branch gfx3d_clk = {
},
};
-static struct freq_tbl clk_tbl_vcap[] = {
+static const struct freq_tbl clk_tbl_vcap[] = {
F_MN( 27000000, P_PXO, 0, 0),
F_MN( 54860000, P_PLL8, 1, 7),
F_MN( 64000000, P_PLL8, 1, 6),
@@ -1149,7 +1149,7 @@ static struct clk_branch vcap_npl_clk = {
},
};
-static struct freq_tbl clk_tbl_ijpeg[] = {
+static const struct freq_tbl clk_tbl_ijpeg[] = {
{ 27000000, P_PXO, 1, 0, 0 },
{ 36570000, P_PLL8, 1, 2, 21 },
{ 54860000, P_PLL8, 7, 0, 0 },
@@ -1214,7 +1214,7 @@ static struct clk_branch ijpeg_clk = {
},
};
-static struct freq_tbl clk_tbl_jpegd[] = {
+static const struct freq_tbl clk_tbl_jpegd[] = {
{ 64000000, P_PLL8, 6 },
{ 76800000, P_PLL8, 5 },
{ 96000000, P_PLL8, 4 },
@@ -1264,7 +1264,7 @@ static struct clk_branch jpegd_clk = {
},
};
-static struct freq_tbl clk_tbl_mdp[] = {
+static const struct freq_tbl clk_tbl_mdp[] = {
{ 9600000, P_PLL8, 1, 1, 40 },
{ 13710000, P_PLL8, 1, 1, 28 },
{ 27000000, P_PXO, 1, 0, 0 },
@@ -1381,7 +1381,7 @@ static struct clk_branch mdp_vsync_clk = {
},
};
-static struct freq_tbl clk_tbl_rot[] = {
+static const struct freq_tbl clk_tbl_rot[] = {
{ 27000000, P_PXO, 1 },
{ 29540000, P_PLL8, 13 },
{ 32000000, P_PLL8, 12 },
@@ -1461,7 +1461,7 @@ static const struct clk_parent_data mmcc_pxo_hdmi[] = {
{ .fw_name = "hdmipll", .name = "hdmi_pll" },
};
-static struct freq_tbl clk_tbl_tv[] = {
+static const struct freq_tbl clk_tbl_tv[] = {
{ .src = P_HDMI_PLL, .pre_div = 1 },
{ }
};
@@ -1624,7 +1624,7 @@ static struct clk_branch hdmi_app_clk = {
},
};
-static struct freq_tbl clk_tbl_vcodec[] = {
+static const struct freq_tbl clk_tbl_vcodec[] = {
F_MN( 27000000, P_PXO, 1, 0),
F_MN( 32000000, P_PLL8, 1, 12),
F_MN( 48000000, P_PLL8, 1, 8),
@@ -1699,7 +1699,7 @@ static struct clk_branch vcodec_clk = {
},
};
-static struct freq_tbl clk_tbl_vpe[] = {
+static const struct freq_tbl clk_tbl_vpe[] = {
{ 27000000, P_PXO, 1 },
{ 34909000, P_PLL8, 11 },
{ 38400000, P_PLL8, 10 },
@@ -1752,7 +1752,7 @@ static struct clk_branch vpe_clk = {
},
};
-static struct freq_tbl clk_tbl_vfe[] = {
+static const struct freq_tbl clk_tbl_vfe[] = {
{ 13960000, P_PLL8, 1, 2, 55 },
{ 27000000, P_PXO, 1, 0, 0 },
{ 36570000, P_PLL8, 1, 2, 21 },
diff --git a/drivers/clk/qcom/mmcc-msm8974.c b/drivers/clk/qcom/mmcc-msm8974.c
index d5bcb09ebd0c..169e85f60550 100644
--- a/drivers/clk/qcom/mmcc-msm8974.c
+++ b/drivers/clk/qcom/mmcc-msm8974.c
@@ -268,7 +268,7 @@ static struct clk_rcg2 mmss_ahb_clk_src = {
},
};
-static struct freq_tbl ftbl_mmss_axi_clk_msm8226[] = {
+static const struct freq_tbl ftbl_mmss_axi_clk_msm8226[] = {
F(19200000, P_XO, 1, 0, 0),
F(37500000, P_GPLL0, 16, 0, 0),
F(50000000, P_GPLL0, 12, 0, 0),
@@ -280,7 +280,7 @@ static struct freq_tbl ftbl_mmss_axi_clk_msm8226[] = {
{ }
};
-static struct freq_tbl ftbl_mmss_axi_clk[] = {
+static const struct freq_tbl ftbl_mmss_axi_clk[] = {
F( 19200000, P_XO, 1, 0, 0),
F( 37500000, P_GPLL0, 16, 0, 0),
F( 50000000, P_GPLL0, 12, 0, 0),
@@ -306,7 +306,7 @@ static struct clk_rcg2 mmss_axi_clk_src = {
},
};
-static struct freq_tbl ftbl_ocmemnoc_clk[] = {
+static const struct freq_tbl ftbl_ocmemnoc_clk[] = {
F( 19200000, P_XO, 1, 0, 0),
F( 37500000, P_GPLL0, 16, 0, 0),
F( 50000000, P_GPLL0, 12, 0, 0),
@@ -331,7 +331,7 @@ static struct clk_rcg2 ocmemnoc_clk_src = {
},
};
-static struct freq_tbl ftbl_camss_csi0_3_clk[] = {
+static const struct freq_tbl ftbl_camss_csi0_3_clk[] = {
F(100000000, P_GPLL0, 6, 0, 0),
F(200000000, P_MMPLL0, 4, 0, 0),
{ }
@@ -389,7 +389,7 @@ static struct clk_rcg2 csi3_clk_src = {
},
};
-static struct freq_tbl ftbl_camss_vfe_vfe0_clk_msm8226[] = {
+static const struct freq_tbl ftbl_camss_vfe_vfe0_clk_msm8226[] = {
F(37500000, P_GPLL0, 16, 0, 0),
F(50000000, P_GPLL0, 12, 0, 0),
F(60000000, P_GPLL0, 10, 0, 0),
@@ -406,7 +406,7 @@ static struct freq_tbl ftbl_camss_vfe_vfe0_clk_msm8226[] = {
{ }
};
-static struct freq_tbl ftbl_camss_vfe_vfe0_1_clk[] = {
+static const struct freq_tbl ftbl_camss_vfe_vfe0_1_clk[] = {
F(37500000, P_GPLL0, 16, 0, 0),
F(50000000, P_GPLL0, 12, 0, 0),
F(60000000, P_GPLL0, 10, 0, 0),
@@ -449,7 +449,7 @@ static struct clk_rcg2 vfe1_clk_src = {
},
};
-static struct freq_tbl ftbl_mdss_mdp_clk_msm8226[] = {
+static const struct freq_tbl ftbl_mdss_mdp_clk_msm8226[] = {
F(37500000, P_GPLL0, 16, 0, 0),
F(60000000, P_GPLL0, 10, 0, 0),
F(75000000, P_GPLL0, 8, 0, 0),
@@ -461,7 +461,7 @@ static struct freq_tbl ftbl_mdss_mdp_clk_msm8226[] = {
{ }
};
-static struct freq_tbl ftbl_mdss_mdp_clk[] = {
+static const struct freq_tbl ftbl_mdss_mdp_clk[] = {
F(37500000, P_GPLL0, 16, 0, 0),
F(60000000, P_GPLL0, 10, 0, 0),
F(75000000, P_GPLL0, 8, 0, 0),
@@ -490,7 +490,7 @@ static struct clk_rcg2 mdp_clk_src = {
},
};
-static struct freq_tbl ftbl_camss_jpeg_jpeg0_2_clk[] = {
+static const struct freq_tbl ftbl_camss_jpeg_jpeg0_2_clk[] = {
F(75000000, P_GPLL0, 8, 0, 0),
F(133330000, P_GPLL0, 4.5, 0, 0),
F(200000000, P_GPLL0, 3, 0, 0),
@@ -567,7 +567,7 @@ static struct clk_rcg2 pclk1_clk_src = {
},
};
-static struct freq_tbl ftbl_venus0_vcodec0_clk_msm8226[] = {
+static const struct freq_tbl ftbl_venus0_vcodec0_clk_msm8226[] = {
F(66700000, P_GPLL0, 9, 0, 0),
F(100000000, P_GPLL0, 6, 0, 0),
F(133330000, P_MMPLL0, 6, 0, 0),
@@ -575,7 +575,7 @@ static struct freq_tbl ftbl_venus0_vcodec0_clk_msm8226[] = {
{ }
};
-static struct freq_tbl ftbl_venus0_vcodec0_clk[] = {
+static const struct freq_tbl ftbl_venus0_vcodec0_clk[] = {
F(50000000, P_GPLL0, 12, 0, 0),
F(100000000, P_GPLL0, 6, 0, 0),
F(133330000, P_MMPLL0, 6, 0, 0),
@@ -599,7 +599,7 @@ static struct clk_rcg2 vcodec0_clk_src = {
},
};
-static struct freq_tbl ftbl_camss_cci_cci_clk[] = {
+static const struct freq_tbl ftbl_camss_cci_cci_clk[] = {
F(19200000, P_XO, 1, 0, 0),
{ }
};
@@ -617,7 +617,7 @@ static struct clk_rcg2 cci_clk_src = {
},
};
-static struct freq_tbl ftbl_camss_gp0_1_clk[] = {
+static const struct freq_tbl ftbl_camss_gp0_1_clk[] = {
F(10000, P_XO, 16, 1, 120),
F(24000, P_XO, 16, 1, 50),
F(6000000, P_GPLL0, 10, 1, 10),
@@ -655,14 +655,14 @@ static struct clk_rcg2 camss_gp1_clk_src = {
},
};
-static struct freq_tbl ftbl_camss_mclk0_3_clk_msm8226[] = {
+static const struct freq_tbl ftbl_camss_mclk0_3_clk_msm8226[] = {
F(19200000, P_XO, 1, 0, 0),
F(24000000, P_GPLL0, 5, 1, 5),
F(66670000, P_GPLL0, 9, 0, 0),
{ }
};
-static struct freq_tbl ftbl_camss_mclk0_3_clk[] = {
+static const struct freq_tbl ftbl_camss_mclk0_3_clk[] = {
F(4800000, P_XO, 4, 0, 0),
F(6000000, P_GPLL0, 10, 1, 10),
F(8000000, P_GPLL0, 15, 1, 5),
@@ -729,7 +729,7 @@ static struct clk_rcg2 mclk3_clk_src = {
},
};
-static struct freq_tbl ftbl_camss_phy0_2_csi0_2phytimer_clk[] = {
+static const struct freq_tbl ftbl_camss_phy0_2_csi0_2phytimer_clk[] = {
F(100000000, P_GPLL0, 6, 0, 0),
F(200000000, P_MMPLL0, 4, 0, 0),
{ }
@@ -774,7 +774,7 @@ static struct clk_rcg2 csi2phytimer_clk_src = {
},
};
-static struct freq_tbl ftbl_camss_vfe_cpp_clk_msm8226[] = {
+static const struct freq_tbl ftbl_camss_vfe_cpp_clk_msm8226[] = {
F(133330000, P_GPLL0, 4.5, 0, 0),
F(150000000, P_GPLL0, 4, 0, 0),
F(266670000, P_MMPLL0, 3, 0, 0),
@@ -783,7 +783,7 @@ static struct freq_tbl ftbl_camss_vfe_cpp_clk_msm8226[] = {
{ }
};
-static struct freq_tbl ftbl_camss_vfe_cpp_clk[] = {
+static const struct freq_tbl ftbl_camss_vfe_cpp_clk[] = {
F(133330000, P_GPLL0, 4.5, 0, 0),
F(266670000, P_MMPLL0, 3, 0, 0),
F(320000000, P_MMPLL0, 2.5, 0, 0),
@@ -805,7 +805,7 @@ static struct clk_rcg2 cpp_clk_src = {
},
};
-static struct freq_tbl byte_freq_tbl[] = {
+static const struct freq_tbl byte_freq_tbl[] = {
{ .src = P_DSI0PLL_BYTE },
{ }
};
@@ -838,7 +838,7 @@ static struct clk_rcg2 byte1_clk_src = {
},
};
-static struct freq_tbl ftbl_mdss_edpaux_clk[] = {
+static const struct freq_tbl ftbl_mdss_edpaux_clk[] = {
F(19200000, P_XO, 1, 0, 0),
{ }
};
@@ -856,7 +856,7 @@ static struct clk_rcg2 edpaux_clk_src = {
},
};
-static struct freq_tbl ftbl_mdss_edplink_clk[] = {
+static const struct freq_tbl ftbl_mdss_edplink_clk[] = {
F(135000000, P_EDPLINK, 2, 0, 0),
F(270000000, P_EDPLINK, 11, 0, 0),
{ }
@@ -876,7 +876,7 @@ static struct clk_rcg2 edplink_clk_src = {
},
};
-static struct freq_tbl edp_pixel_freq_tbl[] = {
+static const struct freq_tbl edp_pixel_freq_tbl[] = {
{ .src = P_EDPVCO },
{ }
};
@@ -895,7 +895,7 @@ static struct clk_rcg2 edppixel_clk_src = {
},
};
-static struct freq_tbl ftbl_mdss_esc0_1_clk[] = {
+static const struct freq_tbl ftbl_mdss_esc0_1_clk[] = {
F(19200000, P_XO, 1, 0, 0),
{ }
};
@@ -926,7 +926,7 @@ static struct clk_rcg2 esc1_clk_src = {
},
};
-static struct freq_tbl extpclk_freq_tbl[] = {
+static const struct freq_tbl extpclk_freq_tbl[] = {
{ .src = P_HDMIPLL },
{ }
};
@@ -945,7 +945,7 @@ static struct clk_rcg2 extpclk_clk_src = {
},
};
-static struct freq_tbl ftbl_mdss_hdmi_clk[] = {
+static const struct freq_tbl ftbl_mdss_hdmi_clk[] = {
F(19200000, P_XO, 1, 0, 0),
{ }
};
@@ -963,7 +963,7 @@ static struct clk_rcg2 hdmi_clk_src = {
},
};
-static struct freq_tbl ftbl_mdss_vsync_clk[] = {
+static const struct freq_tbl ftbl_mdss_vsync_clk[] = {
F(19200000, P_XO, 1, 0, 0),
{ }
};
diff --git a/drivers/clk/qcom/mmcc-msm8994.c b/drivers/clk/qcom/mmcc-msm8994.c
index 78e5083eaf0f..f70d080bf51c 100644
--- a/drivers/clk/qcom/mmcc-msm8994.c
+++ b/drivers/clk/qcom/mmcc-msm8994.c
@@ -974,7 +974,7 @@ static struct clk_rcg2 byte1_clk_src = {
},
};
-static struct freq_tbl ftbl_mdss_esc0_1_clk[] = {
+static const struct freq_tbl ftbl_mdss_esc0_1_clk[] = {
F(19200000, P_XO, 1, 0, 0),
{ }
};
@@ -1005,7 +1005,7 @@ static struct clk_rcg2 esc1_clk_src = {
},
};
-static struct freq_tbl extpclk_freq_tbl[] = {
+static const struct freq_tbl extpclk_freq_tbl[] = {
{ .src = P_HDMIPLL },
{ }
};
@@ -1024,7 +1024,7 @@ static struct clk_rcg2 extpclk_clk_src = {
},
};
-static struct freq_tbl ftbl_hdmi_clk_src[] = {
+static const struct freq_tbl ftbl_hdmi_clk_src[] = {
F(19200000, P_XO, 1, 0, 0),
{ }
};
@@ -1042,7 +1042,7 @@ static struct clk_rcg2 hdmi_clk_src = {
},
};
-static struct freq_tbl ftbl_mdss_vsync_clk[] = {
+static const struct freq_tbl ftbl_mdss_vsync_clk[] = {
F(19200000, P_XO, 1, 0, 0),
{ }
};
diff --git a/drivers/clk/qcom/mmcc-msm8996.c b/drivers/clk/qcom/mmcc-msm8996.c
index 1a32c6eb8217..a742f848e4ee 100644
--- a/drivers/clk/qcom/mmcc-msm8996.c
+++ b/drivers/clk/qcom/mmcc-msm8996.c
@@ -734,7 +734,7 @@ static struct clk_rcg2 mdp_clk_src = {
},
};
-static struct freq_tbl extpclk_freq_tbl[] = {
+static const struct freq_tbl extpclk_freq_tbl[] = {
{ .src = P_HDMIPLL },
{ }
};
@@ -753,7 +753,7 @@ static struct clk_rcg2 extpclk_clk_src = {
},
};
-static struct freq_tbl ftbl_mdss_vsync_clk[] = {
+static const struct freq_tbl ftbl_mdss_vsync_clk[] = {
F(19200000, P_XO, 1, 0, 0),
{ }
};
@@ -771,7 +771,7 @@ static struct clk_rcg2 vsync_clk_src = {
},
};
-static struct freq_tbl ftbl_mdss_hdmi_clk[] = {
+static const struct freq_tbl ftbl_mdss_hdmi_clk[] = {
F(19200000, P_XO, 1, 0, 0),
{ }
};
@@ -815,7 +815,7 @@ static struct clk_rcg2 byte1_clk_src = {
},
};
-static struct freq_tbl ftbl_mdss_esc0_1_clk[] = {
+static const struct freq_tbl ftbl_mdss_esc0_1_clk[] = {
F(19200000, P_XO, 1, 0, 0),
{ }
};
diff --git a/drivers/clk/qcom/videocc-sm8550.c b/drivers/clk/qcom/videocc-sm8550.c
index 97d150b132a6..7c25a50cfa97 100644
--- a/drivers/clk/qcom/videocc-sm8550.c
+++ b/drivers/clk/qcom/videocc-sm8550.c
@@ -449,7 +449,7 @@ static struct gdsc video_cc_mvs0_gdsc = {
},
.pwrsts = PWRSTS_OFF_ON,
.parent = &video_cc_mvs0c_gdsc.pd,
- .flags = POLL_CFG_GDSCR | RETAIN_FF_ENABLE | HW_CTRL,
+ .flags = POLL_CFG_GDSCR | RETAIN_FF_ENABLE | HW_CTRL_TRIGGER,
};
static struct gdsc video_cc_mvs1c_gdsc = {
@@ -474,7 +474,7 @@ static struct gdsc video_cc_mvs1_gdsc = {
},
.pwrsts = PWRSTS_OFF_ON,
.parent = &video_cc_mvs1c_gdsc.pd,
- .flags = POLL_CFG_GDSCR | RETAIN_FF_ENABLE | HW_CTRL,
+ .flags = POLL_CFG_GDSCR | RETAIN_FF_ENABLE | HW_CTRL_TRIGGER,
};
static struct clk_regmap *video_cc_sm8550_clocks[] = {
diff --git a/drivers/clk/renesas/Kconfig b/drivers/clk/renesas/Kconfig
index 4410d16de4e2..76791a1c50ac 100644
--- a/drivers/clk/renesas/Kconfig
+++ b/drivers/clk/renesas/Kconfig
@@ -40,6 +40,7 @@ config CLK_RENESAS
select CLK_R9A07G054 if ARCH_R9A07G054
select CLK_R9A08G045 if ARCH_R9A08G045
select CLK_R9A09G011 if ARCH_R9A09G011
+ select CLK_R9A09G057 if ARCH_R9A09G057
select CLK_SH73A0 if ARCH_SH73A0
if CLK_RENESAS
@@ -193,6 +194,10 @@ config CLK_R9A09G011
bool "RZ/V2M clock support" if COMPILE_TEST
select CLK_RZG2L
+config CLK_R9A09G057
+ bool "RZ/V2H(P) clock support" if COMPILE_TEST
+ select CLK_RZV2H
+
config CLK_SH73A0
bool "SH-Mobile AG5 clock support" if COMPILE_TEST
select CLK_RENESAS_CPG_MSTP
@@ -228,6 +233,10 @@ config CLK_RZG2L
bool "RZ/{G2L,G2UL,G3S,V2L} family clock support" if COMPILE_TEST
select RESET_CONTROLLER
+config CLK_RZV2H
+ bool "RZ/V2H(P) family clock support" if COMPILE_TEST
+ select RESET_CONTROLLER
+
# Generic
config CLK_RENESAS_CPG_MSSR
bool "CPG/MSSR clock support" if COMPILE_TEST
diff --git a/drivers/clk/renesas/Makefile b/drivers/clk/renesas/Makefile
index f7e18679c3b8..23d2e26051c8 100644
--- a/drivers/clk/renesas/Makefile
+++ b/drivers/clk/renesas/Makefile
@@ -37,6 +37,7 @@ obj-$(CONFIG_CLK_R9A07G044) += r9a07g044-cpg.o
obj-$(CONFIG_CLK_R9A07G054) += r9a07g044-cpg.o
obj-$(CONFIG_CLK_R9A08G045) += r9a08g045-cpg.o
obj-$(CONFIG_CLK_R9A09G011) += r9a09g011-cpg.o
+obj-$(CONFIG_CLK_R9A09G057) += r9a09g057-cpg.o
obj-$(CONFIG_CLK_SH73A0) += clk-sh73a0.o
# Family
@@ -46,6 +47,7 @@ obj-$(CONFIG_CLK_RCAR_GEN3_CPG) += rcar-gen3-cpg.o
obj-$(CONFIG_CLK_RCAR_GEN4_CPG) += rcar-gen4-cpg.o
obj-$(CONFIG_CLK_RCAR_USB2_CLOCK_SEL) += rcar-usb2-clock-sel.o
obj-$(CONFIG_CLK_RZG2L) += rzg2l-cpg.o
+obj-$(CONFIG_CLK_RZV2H) += rzv2h-cpg.o
# Generic
obj-$(CONFIG_CLK_RENESAS_CPG_MSSR) += renesas-cpg-mssr.o
diff --git a/drivers/clk/renesas/clk-mstp.c b/drivers/clk/renesas/clk-mstp.c
index 5304c977562f..5bc473c2adb3 100644
--- a/drivers/clk/renesas/clk-mstp.c
+++ b/drivers/clk/renesas/clk-mstp.c
@@ -207,7 +207,7 @@ static void __init cpg_mstp_clocks_init(struct device_node *np)
for (i = 0; i < MSTP_MAX_CLOCKS; ++i)
clks[i] = ERR_PTR(-ENOENT);
- if (of_find_property(np, "clock-indices", &i))
+ if (of_property_present(np, "clock-indices"))
idxname = "clock-indices";
else
idxname = "renesas,clock-indices";
diff --git a/drivers/clk/renesas/r8a779a0-cpg-mssr.c b/drivers/clk/renesas/r8a779a0-cpg-mssr.c
index ff3f85e906fe..4c8e4c69c1bf 100644
--- a/drivers/clk/renesas/r8a779a0-cpg-mssr.c
+++ b/drivers/clk/renesas/r8a779a0-cpg-mssr.c
@@ -61,6 +61,11 @@ enum clk_ids {
DEF_BASE(_name, _id, CLK_TYPE_GEN4_PLL2X_3X, CLK_MAIN, \
.offset = _offset)
+#define CPG_PLL20CR 0x0834 /* PLL20 Control Register */
+#define CPG_PLL21CR 0x0838 /* PLL21 Control Register */
+#define CPG_PLL30CR 0x083c /* PLL30 Control Register */
+#define CPG_PLL31CR 0x0840 /* PLL31 Control Register */
+
static const struct cpg_core_clk r8a779a0_core_clks[] __initconst = {
/* External Clock Inputs */
DEF_INPUT("extal", CLK_EXTAL),
@@ -70,10 +75,10 @@ static const struct cpg_core_clk r8a779a0_core_clks[] __initconst = {
DEF_BASE(".main", CLK_MAIN, CLK_TYPE_GEN4_MAIN, CLK_EXTAL),
DEF_BASE(".pll1", CLK_PLL1, CLK_TYPE_GEN4_PLL1, CLK_MAIN),
DEF_BASE(".pll5", CLK_PLL5, CLK_TYPE_GEN4_PLL5, CLK_MAIN),
- DEF_PLL(".pll20", CLK_PLL20, 0x0834),
- DEF_PLL(".pll21", CLK_PLL21, 0x0838),
- DEF_PLL(".pll30", CLK_PLL30, 0x083c),
- DEF_PLL(".pll31", CLK_PLL31, 0x0840),
+ DEF_PLL(".pll20", CLK_PLL20, CPG_PLL20CR),
+ DEF_PLL(".pll21", CLK_PLL21, CPG_PLL21CR),
+ DEF_PLL(".pll30", CLK_PLL30, CPG_PLL30CR),
+ DEF_PLL(".pll31", CLK_PLL31, CPG_PLL31CR),
DEF_FIXED(".pll1_div2", CLK_PLL1_DIV2, CLK_PLL1, 2, 1),
DEF_FIXED(".pll20_div2", CLK_PLL20_DIV2, CLK_PLL20, 2, 1),
@@ -116,17 +121,17 @@ static const struct cpg_core_clk r8a779a0_core_clks[] __initconst = {
DEF_FIXED("cp", R8A779A0_CLK_CP, CLK_EXTAL, 2, 1),
DEF_FIXED("cl16mck", R8A779A0_CLK_CL16MCK, CLK_PLL1_DIV2, 64, 1),
- DEF_GEN4_SDH("sd0h", R8A779A0_CLK_SD0H, CLK_SDSRC, 0x870),
- DEF_GEN4_SD("sd0", R8A779A0_CLK_SD0, R8A779A0_CLK_SD0H, 0x870),
+ DEF_GEN4_SDH("sd0h", R8A779A0_CLK_SD0H, CLK_SDSRC, CPG_SD0CKCR),
+ DEF_GEN4_SD("sd0", R8A779A0_CLK_SD0, R8A779A0_CLK_SD0H, CPG_SD0CKCR),
DEF_BASE("rpc", R8A779A0_CLK_RPC, CLK_TYPE_GEN4_RPC, CLK_RPCSRC),
DEF_BASE("rpcd2", R8A779A0_CLK_RPCD2, CLK_TYPE_GEN4_RPCD2,
R8A779A0_CLK_RPC),
- DEF_DIV6P1("mso", R8A779A0_CLK_MSO, CLK_PLL5_DIV4, 0x87c),
- DEF_DIV6P1("canfd", R8A779A0_CLK_CANFD, CLK_PLL5_DIV4, 0x878),
- DEF_DIV6P1("csi0", R8A779A0_CLK_CSI0, CLK_PLL5_DIV4, 0x880),
- DEF_DIV6P1("dsi", R8A779A0_CLK_DSI, CLK_PLL5_DIV4, 0x884),
+ DEF_DIV6P1("mso", R8A779A0_CLK_MSO, CLK_PLL5_DIV4, CPG_MSOCKCR),
+ DEF_DIV6P1("canfd", R8A779A0_CLK_CANFD, CLK_PLL5_DIV4, CPG_CANFDCKCR),
+ DEF_DIV6P1("csi0", R8A779A0_CLK_CSI0, CLK_PLL5_DIV4, CPG_CSICKCR),
+ DEF_DIV6P1("dsi", R8A779A0_CLK_DSI, CLK_PLL5_DIV4, CPG_DSIEXTCKCR),
DEF_GEN4_OSC("osc", R8A779A0_CLK_OSC, CLK_EXTAL, 8),
DEF_GEN4_MDSEL("r", R8A779A0_CLK_R, 29, CLK_EXTALR, 1, CLK_OCO, 1),
@@ -253,12 +258,12 @@ static const unsigned int r8a779a0_crit_mod_clks[] __initconst = {
*/
#define CPG_PLL_CONFIG_INDEX(md) ((((md) & BIT(14)) >> 13) | \
(((md) & BIT(13)) >> 13))
-static const struct rcar_gen4_cpg_pll_config cpg_pll_configs[4] = {
- /* EXTAL div PLL1 mult/div PLL2 mult/div PLL3 mult/div PLL4 mult/div PLL5 mult/div PLL6 mult/div OSC prediv */
- { 1, 128, 1, 0, 0, 0, 0, 144, 1, 192, 1, 0, 0, 16, },
- { 1, 106, 1, 0, 0, 0, 0, 120, 1, 160, 1, 0, 0, 19, },
- { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, },
- { 2, 128, 1, 0, 0, 0, 0, 144, 1, 192, 1, 0, 0, 32, },
+static const struct rcar_gen4_cpg_pll_config cpg_pll_configs[4] __initconst = {
+ /* EXTAL div PLL1 mult/div PLL5 mult/div OSC prediv */
+ { 1, 128, 1, 192, 1, 16, },
+ { 1, 106, 1, 160, 1, 19, },
+ { 0, 0, 0, 0, 0, 0, },
+ { 2, 128, 1, 192, 1, 32, },
};
diff --git a/drivers/clk/renesas/r8a779f0-cpg-mssr.c b/drivers/clk/renesas/r8a779f0-cpg-mssr.c
index cc06127406ab..f33342314b2e 100644
--- a/drivers/clk/renesas/r8a779f0-cpg-mssr.c
+++ b/drivers/clk/renesas/r8a779f0-cpg-mssr.c
@@ -57,12 +57,12 @@ static const struct cpg_core_clk r8a779f0_core_clks[] __initconst = {
DEF_INPUT("extalr", CLK_EXTALR),
/* Internal Core Clocks */
- DEF_BASE(".main", CLK_MAIN, CLK_TYPE_GEN4_MAIN, CLK_EXTAL),
- DEF_BASE(".pll1", CLK_PLL1, CLK_TYPE_GEN4_PLL1, CLK_MAIN),
- DEF_BASE(".pll2", CLK_PLL2, CLK_TYPE_GEN4_PLL2, CLK_MAIN),
- DEF_BASE(".pll3", CLK_PLL3, CLK_TYPE_GEN4_PLL3, CLK_MAIN),
- DEF_BASE(".pll5", CLK_PLL5, CLK_TYPE_GEN4_PLL5, CLK_MAIN),
- DEF_BASE(".pll6", CLK_PLL6, CLK_TYPE_GEN4_PLL6, CLK_MAIN),
+ DEF_BASE(".main", CLK_MAIN, CLK_TYPE_GEN4_MAIN, CLK_EXTAL),
+ DEF_GEN4_PLL_F9_24(".pll1", 1, CLK_PLL1, CLK_MAIN),
+ DEF_GEN4_PLL_V9_24(".pll2", 2, CLK_PLL2, CLK_MAIN),
+ DEF_GEN4_PLL_V9_24(".pll3", 3, CLK_PLL3, CLK_MAIN),
+ DEF_BASE(".pll5", CLK_PLL5, CLK_TYPE_GEN4_PLL5, CLK_MAIN),
+ DEF_GEN4_PLL_V9_24(".pll6", 6, CLK_PLL6, CLK_MAIN),
DEF_FIXED(".pll1_div2", CLK_PLL1_DIV2, CLK_PLL1, 2, 1),
DEF_FIXED(".pll2_div2", CLK_PLL2_DIV2, CLK_PLL2, 2, 1),
@@ -115,13 +115,13 @@ static const struct cpg_core_clk r8a779f0_core_clks[] __initconst = {
DEF_FIXED("sasyncperd2",R8A779F0_CLK_SASYNCPERD2, CLK_SASYNCPER,2, 1),
DEF_FIXED("sasyncperd4",R8A779F0_CLK_SASYNCPERD4, CLK_SASYNCPER,4, 1),
- DEF_GEN4_SDH("sd0h", R8A779F0_CLK_SD0H, CLK_SDSRC, 0x870),
- DEF_GEN4_SD("sd0", R8A779F0_CLK_SD0, R8A779F0_CLK_SD0H, 0x870),
+ DEF_GEN4_SDH("sd0h", R8A779F0_CLK_SD0H, CLK_SDSRC, CPG_SD0CKCR),
+ DEF_GEN4_SD("sd0", R8A779F0_CLK_SD0, R8A779F0_CLK_SD0H, CPG_SD0CKCR),
DEF_BASE("rpc", R8A779F0_CLK_RPC, CLK_TYPE_GEN4_RPC, CLK_RPCSRC),
DEF_BASE("rpcd2", R8A779F0_CLK_RPCD2, CLK_TYPE_GEN4_RPCD2, R8A779F0_CLK_RPC),
- DEF_DIV6P1("mso", R8A779F0_CLK_MSO, CLK_PLL5_DIV4, 0x87c),
+ DEF_DIV6P1("mso", R8A779F0_CLK_MSO, CLK_PLL5_DIV4, CPG_MSOCKCR),
DEF_GEN4_OSC("osc", R8A779F0_CLK_OSC, CLK_EXTAL, 8),
DEF_GEN4_MDSEL("r", R8A779F0_CLK_R, 29, CLK_EXTALR, 1, CLK_OCO, 1),
@@ -187,12 +187,12 @@ static const unsigned int r8a779f0_crit_mod_clks[] __initconst = {
#define CPG_PLL_CONFIG_INDEX(md) ((((md) & BIT(14)) >> 13) | \
(((md) & BIT(13)) >> 13))
-static const struct rcar_gen4_cpg_pll_config cpg_pll_configs[4] = {
- /* EXTAL div PLL1 mult/div PLL2 mult/div PLL3 mult/div PLL4 mult/div PLL5 mult/div PLL6 mult/div OSC prediv */
- { 1, 200, 1, 150, 1, 200, 1, 0, 0, 200, 1, 134, 1, 15, },
- { 1, 160, 1, 120, 1, 160, 1, 0, 0, 160, 1, 106, 1, 19, },
- { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, },
- { 2, 160, 1, 120, 1, 160, 1, 0, 0, 160, 1, 106, 1, 38, },
+static const struct rcar_gen4_cpg_pll_config cpg_pll_configs[4] __initconst = {
+ /* EXTAL div PLL1 mult/div PLL5 mult/div OSC prediv */
+ { 1, 200, 1, 200, 1, 15, },
+ { 1, 160, 1, 160, 1, 19, },
+ { 0, 0, 0, 0, 0, 0, },
+ { 2, 160, 1, 160, 1, 38, },
};
static int __init r8a779f0_cpg_mssr_init(struct device *dev)
diff --git a/drivers/clk/renesas/r8a779g0-cpg-mssr.c b/drivers/clk/renesas/r8a779g0-cpg-mssr.c
index c4b1938db76b..55c8dd032fc3 100644
--- a/drivers/clk/renesas/r8a779g0-cpg-mssr.c
+++ b/drivers/clk/renesas/r8a779g0-cpg-mssr.c
@@ -66,13 +66,13 @@ static const struct cpg_core_clk r8a779g0_core_clks[] __initconst = {
DEF_INPUT("extalr", CLK_EXTALR),
/* Internal Core Clocks */
- DEF_BASE(".main", CLK_MAIN, CLK_TYPE_GEN4_MAIN, CLK_EXTAL),
- DEF_BASE(".pll1", CLK_PLL1, CLK_TYPE_GEN4_PLL1, CLK_MAIN),
- DEF_BASE(".pll2", CLK_PLL2, CLK_TYPE_GEN4_PLL2_VAR, CLK_MAIN),
- DEF_BASE(".pll3", CLK_PLL3, CLK_TYPE_GEN4_PLL3, CLK_MAIN),
- DEF_BASE(".pll4", CLK_PLL4, CLK_TYPE_GEN4_PLL4, CLK_MAIN),
- DEF_BASE(".pll5", CLK_PLL5, CLK_TYPE_GEN4_PLL5, CLK_MAIN),
- DEF_BASE(".pll6", CLK_PLL6, CLK_TYPE_GEN4_PLL6, CLK_MAIN),
+ DEF_BASE(".main", CLK_MAIN, CLK_TYPE_GEN4_MAIN, CLK_EXTAL),
+ DEF_GEN4_PLL_F8_25(".pll1", 1, CLK_PLL1, CLK_MAIN),
+ DEF_GEN4_PLL_V8_25(".pll2", 2, CLK_PLL2, CLK_MAIN),
+ DEF_GEN4_PLL_V8_25(".pll3", 3, CLK_PLL3, CLK_MAIN),
+ DEF_GEN4_PLL_V8_25(".pll4", 4, CLK_PLL4, CLK_MAIN),
+ DEF_BASE(".pll5", CLK_PLL5, CLK_TYPE_GEN4_PLL5, CLK_MAIN),
+ DEF_GEN4_PLL_V8_25(".pll6", 6, CLK_PLL6, CLK_MAIN),
DEF_FIXED(".pll1_div2", CLK_PLL1_DIV2, CLK_PLL1, 2, 1),
DEF_FIXED(".pll2_div2", CLK_PLL2_DIV2, CLK_PLL2, 2, 1),
@@ -146,14 +146,14 @@ static const struct cpg_core_clk r8a779g0_core_clks[] __initconst = {
DEF_FIXED("viobusd2", R8A779G0_CLK_VIOBUSD2, CLK_VIO, 2, 1),
DEF_FIXED("vcbus", R8A779G0_CLK_VCBUS, CLK_VC, 1, 1),
DEF_FIXED("vcbusd2", R8A779G0_CLK_VCBUSD2, CLK_VC, 2, 1),
- DEF_DIV6P1("canfd", R8A779G0_CLK_CANFD, CLK_PLL5_DIV4, 0x878),
- DEF_DIV6P1("csi", R8A779G0_CLK_CSI, CLK_PLL5_DIV4, 0x880),
+ DEF_DIV6P1("canfd", R8A779G0_CLK_CANFD, CLK_PLL5_DIV4, CPG_CANFDCKCR),
+ DEF_DIV6P1("csi", R8A779G0_CLK_CSI, CLK_PLL5_DIV4, CPG_CSICKCR),
DEF_FIXED("dsiref", R8A779G0_CLK_DSIREF, CLK_PLL5_DIV4, 48, 1),
- DEF_DIV6P1("dsiext", R8A779G0_CLK_DSIEXT, CLK_PLL5_DIV4, 0x884),
+ DEF_DIV6P1("dsiext", R8A779G0_CLK_DSIEXT, CLK_PLL5_DIV4, CPG_DSIEXTCKCR),
- DEF_GEN4_SDH("sd0h", R8A779G0_CLK_SD0H, CLK_SDSRC, 0x870),
- DEF_GEN4_SD("sd0", R8A779G0_CLK_SD0, R8A779G0_CLK_SD0H, 0x870),
- DEF_DIV6P1("mso", R8A779G0_CLK_MSO, CLK_PLL5_DIV4, 0x87c),
+ DEF_GEN4_SDH("sd0h", R8A779G0_CLK_SD0H, CLK_SDSRC, CPG_SD0CKCR),
+ DEF_GEN4_SD("sd0", R8A779G0_CLK_SD0, R8A779G0_CLK_SD0H, CPG_SD0CKCR),
+ DEF_DIV6P1("mso", R8A779G0_CLK_MSO, CLK_PLL5_DIV4, CPG_MSOCKCR),
DEF_BASE("rpc", R8A779G0_CLK_RPC, CLK_TYPE_GEN4_RPC, CLK_RPCSRC),
DEF_BASE("rpcd2", R8A779G0_CLK_RPCD2, CLK_TYPE_GEN4_RPCD2, R8A779G0_CLK_RPC),
@@ -258,12 +258,12 @@ static const struct mssr_mod_clk r8a779g0_mod_clks[] __initconst = {
#define CPG_PLL_CONFIG_INDEX(md) ((((md) & BIT(14)) >> 13) | \
(((md) & BIT(13)) >> 13))
-static const struct rcar_gen4_cpg_pll_config cpg_pll_configs[4] = {
- /* EXTAL div PLL1 mult/div PLL2 mult/div PLL3 mult/div PLL4 mult/div PLL5 mult/div PLL6 mult/div OSC prediv */
- { 1, 192, 1, 204, 1, 192, 1, 144, 1, 192, 1, 168, 1, 16, },
- { 1, 160, 1, 170, 1, 160, 1, 120, 1, 160, 1, 140, 1, 19, },
- { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, },
- { 2, 192, 1, 204, 1, 192, 1, 144, 1, 192, 1, 168, 1, 32, },
+static const struct rcar_gen4_cpg_pll_config cpg_pll_configs[4] __initconst = {
+ /* EXTAL div PLL1 mult/div PLL5 mult/div OSC prediv */
+ { 1, 192, 1, 192, 1, 16, },
+ { 1, 160, 1, 160, 1, 19, },
+ { 0, 0, 0, 0, 0, 0, },
+ { 2, 192, 1, 192, 1, 32, },
};
static int __init r8a779g0_cpg_mssr_init(struct device *dev)
diff --git a/drivers/clk/renesas/r8a779h0-cpg-mssr.c b/drivers/clk/renesas/r8a779h0-cpg-mssr.c
index 16a2e26abcc7..e20c048bfa9b 100644
--- a/drivers/clk/renesas/r8a779h0-cpg-mssr.c
+++ b/drivers/clk/renesas/r8a779h0-cpg-mssr.c
@@ -63,19 +63,19 @@ enum clk_ids {
MOD_CLK_BASE
};
-static const struct cpg_core_clk r8a779h0_core_clks[] = {
+static const struct cpg_core_clk r8a779h0_core_clks[] __initconst = {
/* External Clock Inputs */
DEF_INPUT("extal", CLK_EXTAL),
DEF_INPUT("extalr", CLK_EXTALR),
/* Internal Core Clocks */
DEF_BASE(".main", CLK_MAIN, CLK_TYPE_GEN4_MAIN, CLK_EXTAL),
- DEF_BASE(".pll1", CLK_PLL1, CLK_TYPE_GEN4_PLL1, CLK_MAIN),
- DEF_BASE(".pll2", CLK_PLL2, CLK_TYPE_GEN4_PLL2, CLK_MAIN),
- DEF_BASE(".pll3", CLK_PLL3, CLK_TYPE_GEN4_PLL3, CLK_MAIN),
- DEF_BASE(".pll4", CLK_PLL4, CLK_TYPE_GEN4_PLL4, CLK_MAIN),
+ DEF_GEN4_PLL_F8_25(".pll1", 1, CLK_PLL1, CLK_MAIN),
+ DEF_GEN4_PLL_V8_25(".pll2", 2, CLK_PLL2, CLK_MAIN),
+ DEF_GEN4_PLL_V8_25(".pll3", 3, CLK_PLL3, CLK_MAIN),
+ DEF_GEN4_PLL_V8_25(".pll4", 4, CLK_PLL4, CLK_MAIN),
DEF_BASE(".pll5", CLK_PLL5, CLK_TYPE_GEN4_PLL5, CLK_MAIN),
- DEF_BASE(".pll6", CLK_PLL6, CLK_TYPE_GEN4_PLL6, CLK_MAIN),
+ DEF_GEN4_PLL_V8_25(".pll6", 6, CLK_PLL6, CLK_MAIN),
DEF_FIXED(".pll1_div2", CLK_PLL1_DIV2, CLK_PLL1, 2, 1),
DEF_FIXED(".pll2_div2", CLK_PLL2_DIV2, CLK_PLL2, 2, 1),
@@ -156,14 +156,14 @@ static const struct cpg_core_clk r8a779h0_core_clks[] = {
DEF_FIXED("viobusd2", R8A779H0_CLK_VIOBUSD2, CLK_VIOSRC, 2, 1),
DEF_FIXED("vcbusd1", R8A779H0_CLK_VCBUSD1, CLK_VCSRC, 1, 1),
DEF_FIXED("vcbusd2", R8A779H0_CLK_VCBUSD2, CLK_VCSRC, 2, 1),
- DEF_DIV6P1("canfd", R8A779H0_CLK_CANFD, CLK_PLL5_DIV4, 0x878),
- DEF_DIV6P1("csi", R8A779H0_CLK_CSI, CLK_PLL5_DIV4, 0x880),
+ DEF_DIV6P1("canfd", R8A779H0_CLK_CANFD, CLK_PLL5_DIV4, CPG_CANFDCKCR),
+ DEF_DIV6P1("csi", R8A779H0_CLK_CSI, CLK_PLL5_DIV4, CPG_CSICKCR),
DEF_FIXED("dsiref", R8A779H0_CLK_DSIREF, CLK_PLL5_DIV4, 48, 1),
- DEF_DIV6P1("dsiext", R8A779H0_CLK_DSIEXT, CLK_PLL5_DIV4, 0x884),
- DEF_DIV6P1("mso", R8A779H0_CLK_MSO, CLK_PLL5_DIV4, 0x87c),
+ DEF_DIV6P1("dsiext", R8A779H0_CLK_DSIEXT, CLK_PLL5_DIV4, CPG_DSIEXTCKCR),
+ DEF_DIV6P1("mso", R8A779H0_CLK_MSO, CLK_PLL5_DIV4, CPG_MSOCKCR),
- DEF_GEN4_SDH("sd0h", R8A779H0_CLK_SD0H, CLK_SDSRC, 0x870),
- DEF_GEN4_SD("sd0", R8A779H0_CLK_SD0, R8A779H0_CLK_SD0H, 0x870),
+ DEF_GEN4_SDH("sd0h", R8A779H0_CLK_SD0H, CLK_SDSRC, CPG_SD0CKCR),
+ DEF_GEN4_SD("sd0", R8A779H0_CLK_SD0, R8A779H0_CLK_SD0H, CPG_SD0CKCR),
DEF_BASE("rpc", R8A779H0_CLK_RPC, CLK_TYPE_GEN4_RPC, CLK_RPCSRC),
DEF_BASE("rpcd2", R8A779H0_CLK_RPCD2, CLK_TYPE_GEN4_RPCD2, R8A779H0_CLK_RPC),
@@ -172,10 +172,11 @@ static const struct cpg_core_clk r8a779h0_core_clks[] = {
DEF_GEN4_MDSEL("r", R8A779H0_CLK_R, 29, CLK_EXTALR, 1, CLK_OCO, 1),
};
-static const struct mssr_mod_clk r8a779h0_mod_clks[] = {
+static const struct mssr_mod_clk r8a779h0_mod_clks[] __initconst = {
DEF_MOD("avb0:rgmii0", 211, R8A779H0_CLK_S0D8_HSC),
DEF_MOD("avb1:rgmii1", 212, R8A779H0_CLK_S0D8_HSC),
DEF_MOD("avb2:rgmii2", 213, R8A779H0_CLK_S0D8_HSC),
+ DEF_MOD("canfd0", 328, R8A779H0_CLK_SASYNCPERD2),
DEF_MOD("csi40", 331, R8A779H0_CLK_CSI),
DEF_MOD("csi41", 400, R8A779H0_CLK_CSI),
DEF_MOD("hscif0", 514, R8A779H0_CLK_SASYNCPERD1),
@@ -195,6 +196,8 @@ static const struct mssr_mod_clk r8a779h0_mod_clks[] = {
DEF_MOD("msi3", 621, R8A779H0_CLK_MSO),
DEF_MOD("msi4", 622, R8A779H0_CLK_MSO),
DEF_MOD("msi5", 623, R8A779H0_CLK_MSO),
+ DEF_MOD("pcie0", 624, R8A779H0_CLK_S0D2_HSC),
+ DEF_MOD("pwm", 628, R8A779H0_CLK_SASYNCPERD4),
DEF_MOD("rpc-if", 629, R8A779H0_CLK_RPCD2),
DEF_MOD("scif0", 702, R8A779H0_CLK_SASYNCPERD4),
DEF_MOD("scif1", 703, R8A779H0_CLK_SASYNCPERD4),
@@ -252,12 +255,12 @@ static const struct mssr_mod_clk r8a779h0_mod_clks[] = {
#define CPG_PLL_CONFIG_INDEX(md) ((((md) & BIT(14)) >> 13) | \
(((md) & BIT(13)) >> 13))
-static const struct rcar_gen4_cpg_pll_config cpg_pll_configs[4] = {
- /* EXTAL div PLL1 mult/div PLL2 mult/div PLL3 mult/div PLL4 mult/div PLL5 mult/div PLL6 mult/div OSC prediv */
- { 1, 192, 1, 240, 1, 192, 1, 240, 1, 192, 1, 168, 1, 16, },
- { 1, 160, 1, 200, 1, 160, 1, 200, 1, 160, 1, 140, 1, 19, },
- { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, },
- { 2, 192, 1, 240, 1, 192, 1, 240, 1, 192, 1, 168, 1, 32, },
+static const struct rcar_gen4_cpg_pll_config cpg_pll_configs[4] __initconst = {
+ /* EXTAL div PLL1 mult/div PLL5 mult/div OSC prediv */
+ { 1, 192, 1, 192, 1, 16, },
+ { 1, 160, 1, 160, 1, 19, },
+ { 0, 0, 0, 0, 0, 0, },
+ { 2, 192, 1, 192, 1, 32, },
};
static int __init r8a779h0_cpg_mssr_init(struct device *dev)
diff --git a/drivers/clk/renesas/r9a07g043-cpg.c b/drivers/clk/renesas/r9a07g043-cpg.c
index 16acc95f3c62..c3c2b0c43983 100644
--- a/drivers/clk/renesas/r9a07g043-cpg.c
+++ b/drivers/clk/renesas/r9a07g043-cpg.c
@@ -52,6 +52,8 @@ enum clk_ids {
CLK_PLL5,
CLK_PLL5_500,
CLK_PLL5_250,
+ CLK_PLL5_FOUTPOSTDIV,
+ CLK_DSI_DIV,
#endif
CLK_PLL6,
CLK_PLL6_250,
@@ -120,6 +122,7 @@ static const struct cpg_core_clk r9a07g043_core_clks[] __initconst = {
DEF_FIXED(".pll5", CLK_PLL5, CLK_EXTAL, 125, 1),
DEF_FIXED(".pll5_500", CLK_PLL5_500, CLK_PLL5, 1, 6),
DEF_FIXED(".pll5_250", CLK_PLL5_250, CLK_PLL5_500, 1, 2),
+ DEF_PLL5_FOUTPOSTDIV(".pll5_foutpostdiv", CLK_PLL5_FOUTPOSTDIV, CLK_EXTAL),
#endif
DEF_FIXED(".pll6", CLK_PLL6, CLK_EXTAL, 125, 6),
DEF_FIXED(".pll6_250", CLK_PLL6_250, CLK_PLL6, 1, 2),
@@ -146,6 +149,8 @@ static const struct cpg_core_clk r9a07g043_core_clks[] __initconst = {
#ifdef CONFIG_ARM64
DEF_FIXED("M2", R9A07G043_CLK_M2, CLK_PLL3_533, 1, 2),
DEF_FIXED("M2_DIV2", CLK_M2_DIV2, R9A07G043_CLK_M2, 1, 2),
+ DEF_DSI_DIV("DSI_DIV", CLK_DSI_DIV, CLK_PLL5_FOUTPOSTDIV, CLK_SET_RATE_PARENT),
+ DEF_FIXED("M3", R9A07G043_CLK_M3, CLK_DSI_DIV, 1, 1),
#endif
};
@@ -209,6 +214,12 @@ static const struct rzg2l_mod_clk r9a07g043_mod_clks[] = {
0x564, 2),
DEF_MOD("cru_aclk", R9A07G043_CRU_ACLK, R9A07G043_CLK_M0,
0x564, 3),
+ DEF_COUPLED("lcdc_clk_a", R9A07G043_LCDC_CLK_A, R9A07G043_CLK_M0,
+ 0x56c, 0),
+ DEF_COUPLED("lcdc_clk_p", R9A07G043_LCDC_CLK_P, R9A07G043_CLK_ZT,
+ 0x56c, 0),
+ DEF_MOD("lcdc_clk_d", R9A07G043_LCDC_CLK_D, R9A07G043_CLK_M3,
+ 0x56c, 1),
#endif
DEF_MOD("ssi0_pclk", R9A07G043_SSI0_PCLK2, R9A07G043_CLK_P0,
0x570, 0),
@@ -309,6 +320,7 @@ static const struct rzg2l_reset r9a07g043_resets[] = {
DEF_RST(R9A07G043_CRU_CMN_RSTB, 0x864, 0),
DEF_RST(R9A07G043_CRU_PRESETN, 0x864, 1),
DEF_RST(R9A07G043_CRU_ARESETN, 0x864, 2),
+ DEF_RST(R9A07G043_LCDC_RESET_N, 0x86c, 0),
#endif
DEF_RST(R9A07G043_SSI0_RST_M2_REG, 0x870, 0),
DEF_RST(R9A07G043_SSI1_RST_M2_REG, 0x870, 1),
diff --git a/drivers/clk/renesas/r9a08g045-cpg.c b/drivers/clk/renesas/r9a08g045-cpg.c
index a891bfc3ab5a..1ce40fb51f13 100644
--- a/drivers/clk/renesas/r9a08g045-cpg.c
+++ b/drivers/clk/renesas/r9a08g045-cpg.c
@@ -193,6 +193,7 @@ static const struct rzg2l_mod_clk r9a08g045_mod_clks[] = {
DEF_MOD("ia55_pclk", R9A08G045_IA55_PCLK, R9A08G045_CLK_P2, 0x518, 0),
DEF_MOD("ia55_clk", R9A08G045_IA55_CLK, R9A08G045_CLK_P1, 0x518, 1),
DEF_MOD("dmac_aclk", R9A08G045_DMAC_ACLK, R9A08G045_CLK_P3, 0x52c, 0),
+ DEF_MOD("dmac_pclk", R9A08G045_DMAC_PCLK, CLK_P3_DIV2, 0x52c, 1),
DEF_MOD("wdt0_pclk", R9A08G045_WDT0_PCLK, R9A08G045_CLK_P0, 0x548, 0),
DEF_MOD("wdt0_clk", R9A08G045_WDT0_CLK, R9A08G045_OSCCLK, 0x548, 1),
DEF_MOD("sdhi0_imclk", R9A08G045_SDHI0_IMCLK, CLK_SD0_DIV4, 0x554, 0),
@@ -207,6 +208,10 @@ static const struct rzg2l_mod_clk r9a08g045_mod_clks[] = {
DEF_MOD("sdhi2_imclk2", R9A08G045_SDHI2_IMCLK2, CLK_SD2_DIV4, 0x554, 9),
DEF_MOD("sdhi2_clk_hs", R9A08G045_SDHI2_CLK_HS, R9A08G045_CLK_SD2, 0x554, 10),
DEF_MOD("sdhi2_aclk", R9A08G045_SDHI2_ACLK, R9A08G045_CLK_P1, 0x554, 11),
+ DEF_MOD("usb0_host", R9A08G045_USB_U2H0_HCLK, R9A08G045_CLK_P1, 0x578, 0),
+ DEF_MOD("usb1_host", R9A08G045_USB_U2H1_HCLK, R9A08G045_CLK_P1, 0x578, 1),
+ DEF_MOD("usb0_func", R9A08G045_USB_U2P_EXR_CPUCLK, R9A08G045_CLK_P1, 0x578, 2),
+ DEF_MOD("usb_pclk", R9A08G045_USB_PCLK, R9A08G045_CLK_P1, 0x578, 3),
DEF_COUPLED("eth0_axi", R9A08G045_ETH0_CLK_AXI, R9A08G045_CLK_M0, 0x57c, 0),
DEF_COUPLED("eth0_chi", R9A08G045_ETH0_CLK_CHI, R9A08G045_CLK_ZT, 0x57c, 0),
DEF_MOD("eth0_refclk", R9A08G045_ETH0_REFCLK, R9A08G045_CLK_HP, 0x57c, 8),
@@ -226,10 +231,16 @@ static const struct rzg2l_reset r9a08g045_resets[] = {
DEF_RST(R9A08G045_GIC600_GICRESET_N, 0x814, 0),
DEF_RST(R9A08G045_GIC600_DBG_GICRESET_N, 0x814, 1),
DEF_RST(R9A08G045_IA55_RESETN, 0x818, 0),
+ DEF_RST(R9A08G045_DMAC_ARESETN, 0x82c, 0),
+ DEF_RST(R9A08G045_DMAC_RST_ASYNC, 0x82c, 1),
DEF_RST(R9A08G045_WDT0_PRESETN, 0x848, 0),
DEF_RST(R9A08G045_SDHI0_IXRST, 0x854, 0),
DEF_RST(R9A08G045_SDHI1_IXRST, 0x854, 1),
DEF_RST(R9A08G045_SDHI2_IXRST, 0x854, 2),
+ DEF_RST(R9A08G045_USB_U2H0_HRESETN, 0x878, 0),
+ DEF_RST(R9A08G045_USB_U2H1_HRESETN, 0x878, 1),
+ DEF_RST(R9A08G045_USB_U2P_EXL_SYSRST, 0x878, 2),
+ DEF_RST(R9A08G045_USB_PRESETN, 0x878, 3),
DEF_RST(R9A08G045_ETH0_RST_HW_N, 0x87c, 0),
DEF_RST(R9A08G045_ETH1_RST_HW_N, 0x87c, 1),
DEF_RST(R9A08G045_I2C0_MRST, 0x880, 0),
@@ -277,6 +288,15 @@ static const struct rzg2l_cpg_pm_domain_init_data r9a08g045_pm_domains[] = {
DEF_PD("sdhi2", R9A08G045_PD_SDHI2,
DEF_REG_CONF(CPG_BUS_PERI_COM_MSTOP, BIT(11)),
RZG2L_PD_F_NONE),
+ DEF_PD("usb0", R9A08G045_PD_USB0,
+ DEF_REG_CONF(CPG_BUS_PERI_COM_MSTOP, GENMASK(6, 5)),
+ RZG2L_PD_F_NONE),
+ DEF_PD("usb1", R9A08G045_PD_USB1,
+ DEF_REG_CONF(CPG_BUS_PERI_COM_MSTOP, BIT(7)),
+ RZG2L_PD_F_NONE),
+ DEF_PD("usb-phy", R9A08G045_PD_USB_PHY,
+ DEF_REG_CONF(CPG_BUS_PERI_COM_MSTOP, BIT(4)),
+ RZG2L_PD_F_NONE),
DEF_PD("eth0", R9A08G045_PD_ETHER0,
DEF_REG_CONF(CPG_BUS_PERI_COM_MSTOP, BIT(2)),
RZG2L_PD_F_NONE),
diff --git a/drivers/clk/renesas/r9a09g057-cpg.c b/drivers/clk/renesas/r9a09g057-cpg.c
new file mode 100644
index 000000000000..3ee32db5c0af
--- /dev/null
+++ b/drivers/clk/renesas/r9a09g057-cpg.c
@@ -0,0 +1,164 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Renesas RZ/V2H(P) CPG driver
+ *
+ * Copyright (C) 2024 Renesas Electronics Corp.
+ */
+
+#include <linux/clk-provider.h>
+#include <linux/device.h>
+#include <linux/init.h>
+#include <linux/kernel.h>
+
+#include <dt-bindings/clock/renesas,r9a09g057-cpg.h>
+
+#include "rzv2h-cpg.h"
+
+enum clk_ids {
+ /* Core Clock Outputs exported to DT */
+ LAST_DT_CORE_CLK = R9A09G057_IOTOP_0_SHCLK,
+
+ /* External Input Clocks */
+ CLK_AUDIO_EXTAL,
+ CLK_RTXIN,
+ CLK_QEXTAL,
+
+ /* PLL Clocks */
+ CLK_PLLCM33,
+ CLK_PLLCLN,
+ CLK_PLLDTY,
+ CLK_PLLCA55,
+
+ /* Internal Core Clocks */
+ CLK_PLLCM33_DIV16,
+ CLK_PLLCLN_DIV2,
+ CLK_PLLCLN_DIV8,
+ CLK_PLLCLN_DIV16,
+ CLK_PLLDTY_ACPU,
+ CLK_PLLDTY_ACPU_DIV4,
+
+ /* Module Clocks */
+ MOD_CLK_BASE,
+};
+
+static const struct clk_div_table dtable_2_64[] = {
+ {0, 2},
+ {1, 4},
+ {2, 8},
+ {3, 16},
+ {4, 64},
+ {0, 0},
+};
+
+static const struct cpg_core_clk r9a09g057_core_clks[] __initconst = {
+ /* External Clock Inputs */
+ DEF_INPUT("audio_extal", CLK_AUDIO_EXTAL),
+ DEF_INPUT("rtxin", CLK_RTXIN),
+ DEF_INPUT("qextal", CLK_QEXTAL),
+
+ /* PLL Clocks */
+ DEF_FIXED(".pllcm33", CLK_PLLCM33, CLK_QEXTAL, 200, 3),
+ DEF_FIXED(".pllcln", CLK_PLLCLN, CLK_QEXTAL, 200, 3),
+ DEF_FIXED(".plldty", CLK_PLLDTY, CLK_QEXTAL, 200, 3),
+ DEF_PLL(".pllca55", CLK_PLLCA55, CLK_QEXTAL, PLL_CONF(0x64)),
+
+ /* Internal Core Clocks */
+ DEF_FIXED(".pllcm33_div16", CLK_PLLCM33_DIV16, CLK_PLLCM33, 1, 16),
+
+ DEF_FIXED(".pllcln_div2", CLK_PLLCLN_DIV2, CLK_PLLCLN, 1, 2),
+ DEF_FIXED(".pllcln_div8", CLK_PLLCLN_DIV8, CLK_PLLCLN, 1, 8),
+ DEF_FIXED(".pllcln_div16", CLK_PLLCLN_DIV16, CLK_PLLCLN, 1, 16),
+
+ DEF_DDIV(".plldty_acpu", CLK_PLLDTY_ACPU, CLK_PLLDTY, CDDIV0_DIVCTL2, dtable_2_64),
+ DEF_FIXED(".plldty_acpu_div4", CLK_PLLDTY_ACPU_DIV4, CLK_PLLDTY_ACPU, 1, 4),
+
+ /* Core Clocks */
+ DEF_FIXED("sys_0_pclk", R9A09G057_SYS_0_PCLK, CLK_QEXTAL, 1, 1),
+ DEF_FIXED("iotop_0_shclk", R9A09G057_IOTOP_0_SHCLK, CLK_PLLCM33_DIV16, 1, 1),
+};
+
+static const struct rzv2h_mod_clk r9a09g057_mod_clks[] __initconst = {
+ DEF_MOD("gtm_0_pclk", CLK_PLLCM33_DIV16, 4, 3, 2, 3),
+ DEF_MOD("gtm_1_pclk", CLK_PLLCM33_DIV16, 4, 4, 2, 4),
+ DEF_MOD("gtm_2_pclk", CLK_PLLCLN_DIV16, 4, 5, 2, 5),
+ DEF_MOD("gtm_3_pclk", CLK_PLLCLN_DIV16, 4, 6, 2, 6),
+ DEF_MOD("gtm_4_pclk", CLK_PLLCLN_DIV16, 4, 7, 2, 7),
+ DEF_MOD("gtm_5_pclk", CLK_PLLCLN_DIV16, 4, 8, 2, 8),
+ DEF_MOD("gtm_6_pclk", CLK_PLLCLN_DIV16, 4, 9, 2, 9),
+ DEF_MOD("gtm_7_pclk", CLK_PLLCLN_DIV16, 4, 10, 2, 10),
+ DEF_MOD("wdt_0_clkp", CLK_PLLCM33_DIV16, 4, 11, 2, 11),
+ DEF_MOD("wdt_0_clk_loco", CLK_QEXTAL, 4, 12, 2, 12),
+ DEF_MOD("wdt_1_clkp", CLK_PLLCLN_DIV16, 4, 13, 2, 13),
+ DEF_MOD("wdt_1_clk_loco", CLK_QEXTAL, 4, 14, 2, 14),
+ DEF_MOD("wdt_2_clkp", CLK_PLLCLN_DIV16, 4, 15, 2, 15),
+ DEF_MOD("wdt_2_clk_loco", CLK_QEXTAL, 5, 0, 2, 16),
+ DEF_MOD("wdt_3_clkp", CLK_PLLCLN_DIV16, 5, 1, 2, 17),
+ DEF_MOD("wdt_3_clk_loco", CLK_QEXTAL, 5, 2, 2, 18),
+ DEF_MOD("scif_0_clk_pck", CLK_PLLCM33_DIV16, 8, 15, 4, 15),
+ DEF_MOD("riic_8_ckm", CLK_PLLCM33_DIV16, 9, 3, 4, 19),
+ DEF_MOD("riic_0_ckm", CLK_PLLCLN_DIV16, 9, 4, 4, 20),
+ DEF_MOD("riic_1_ckm", CLK_PLLCLN_DIV16, 9, 5, 4, 21),
+ DEF_MOD("riic_2_ckm", CLK_PLLCLN_DIV16, 9, 6, 4, 22),
+ DEF_MOD("riic_3_ckm", CLK_PLLCLN_DIV16, 9, 7, 4, 23),
+ DEF_MOD("riic_4_ckm", CLK_PLLCLN_DIV16, 9, 8, 4, 24),
+ DEF_MOD("riic_5_ckm", CLK_PLLCLN_DIV16, 9, 9, 4, 25),
+ DEF_MOD("riic_6_ckm", CLK_PLLCLN_DIV16, 9, 10, 4, 26),
+ DEF_MOD("riic_7_ckm", CLK_PLLCLN_DIV16, 9, 11, 4, 27),
+ DEF_MOD("sdhi_0_imclk", CLK_PLLCLN_DIV8, 10, 3, 5, 3),
+ DEF_MOD("sdhi_0_imclk2", CLK_PLLCLN_DIV8, 10, 4, 5, 4),
+ DEF_MOD("sdhi_0_clk_hs", CLK_PLLCLN_DIV2, 10, 5, 5, 5),
+ DEF_MOD("sdhi_0_aclk", CLK_PLLDTY_ACPU_DIV4, 10, 6, 5, 6),
+ DEF_MOD("sdhi_1_imclk", CLK_PLLCLN_DIV8, 10, 7, 5, 7),
+ DEF_MOD("sdhi_1_imclk2", CLK_PLLCLN_DIV8, 10, 8, 5, 8),
+ DEF_MOD("sdhi_1_clk_hs", CLK_PLLCLN_DIV2, 10, 9, 5, 9),
+ DEF_MOD("sdhi_1_aclk", CLK_PLLDTY_ACPU_DIV4, 10, 10, 5, 10),
+ DEF_MOD("sdhi_2_imclk", CLK_PLLCLN_DIV8, 10, 11, 5, 11),
+ DEF_MOD("sdhi_2_imclk2", CLK_PLLCLN_DIV8, 10, 12, 5, 12),
+ DEF_MOD("sdhi_2_clk_hs", CLK_PLLCLN_DIV2, 10, 13, 5, 13),
+ DEF_MOD("sdhi_2_aclk", CLK_PLLDTY_ACPU_DIV4, 10, 14, 5, 14),
+};
+
+static const struct rzv2h_reset r9a09g057_resets[] __initconst = {
+ DEF_RST(6, 13, 2, 30), /* GTM_0_PRESETZ */
+ DEF_RST(6, 14, 2, 31), /* GTM_1_PRESETZ */
+ DEF_RST(6, 15, 3, 0), /* GTM_2_PRESETZ */
+ DEF_RST(7, 0, 3, 1), /* GTM_3_PRESETZ */
+ DEF_RST(7, 1, 3, 2), /* GTM_4_PRESETZ */
+ DEF_RST(7, 2, 3, 3), /* GTM_5_PRESETZ */
+ DEF_RST(7, 3, 3, 4), /* GTM_6_PRESETZ */
+ DEF_RST(7, 4, 3, 5), /* GTM_7_PRESETZ */
+ DEF_RST(7, 5, 3, 6), /* WDT_0_RESET */
+ DEF_RST(7, 6, 3, 7), /* WDT_1_RESET */
+ DEF_RST(7, 7, 3, 8), /* WDT_2_RESET */
+ DEF_RST(7, 8, 3, 9), /* WDT_3_RESET */
+ DEF_RST(9, 5, 4, 6), /* SCIF_0_RST_SYSTEM_N */
+ DEF_RST(9, 8, 4, 9), /* RIIC_0_MRST */
+ DEF_RST(9, 9, 4, 10), /* RIIC_1_MRST */
+ DEF_RST(9, 10, 4, 11), /* RIIC_2_MRST */
+ DEF_RST(9, 11, 4, 12), /* RIIC_3_MRST */
+ DEF_RST(9, 12, 4, 13), /* RIIC_4_MRST */
+ DEF_RST(9, 13, 4, 14), /* RIIC_5_MRST */
+ DEF_RST(9, 14, 4, 15), /* RIIC_6_MRST */
+ DEF_RST(9, 15, 4, 16), /* RIIC_7_MRST */
+ DEF_RST(10, 0, 4, 17), /* RIIC_8_MRST */
+ DEF_RST(10, 7, 4, 24), /* SDHI_0_IXRST */
+ DEF_RST(10, 8, 4, 25), /* SDHI_1_IXRST */
+ DEF_RST(10, 9, 4, 26), /* SDHI_2_IXRST */
+};
+
+const struct rzv2h_cpg_info r9a09g057_cpg_info __initconst = {
+ /* Core Clocks */
+ .core_clks = r9a09g057_core_clks,
+ .num_core_clks = ARRAY_SIZE(r9a09g057_core_clks),
+ .last_dt_core_clk = LAST_DT_CORE_CLK,
+ .num_total_core_clks = MOD_CLK_BASE,
+
+ /* Module Clocks */
+ .mod_clks = r9a09g057_mod_clks,
+ .num_mod_clks = ARRAY_SIZE(r9a09g057_mod_clks),
+ .num_hw_mod_clks = 25 * 16,
+
+ /* Resets */
+ .resets = r9a09g057_resets,
+ .num_resets = ARRAY_SIZE(r9a09g057_resets),
+};
diff --git a/drivers/clk/renesas/rcar-gen4-cpg.c b/drivers/clk/renesas/rcar-gen4-cpg.c
index 77a4bb3e17f3..31aa790fd003 100644
--- a/drivers/clk/renesas/rcar-gen4-cpg.c
+++ b/drivers/clk/renesas/rcar-gen4-cpg.c
@@ -45,7 +45,6 @@ static u32 cpg_mode __initdata;
#define CPG_PLL6CR1 0x8d8
#define CPG_PLLxCR0_KICK BIT(31)
-#define CPG_PLLxCR0_NI GENMASK(27, 20) /* Integer mult. factor */
#define CPG_PLLxCR0_SSMODE GENMASK(18, 16) /* PLL mode */
#define CPG_PLLxCR0_SSMODE_FM BIT(18) /* Fractional Multiplication */
#define CPG_PLLxCR0_SSMODE_DITH BIT(17) /* Frequency Dithering */
@@ -53,35 +52,57 @@ static u32 cpg_mode __initdata;
#define CPG_PLLxCR0_SSFREQ GENMASK(14, 8) /* SSCG Modulation Frequency */
#define CPG_PLLxCR0_SSDEPT GENMASK(6, 0) /* SSCG Modulation Depth */
-#define SSMODE_FM BIT(2) /* Fractional Multiplication */
-#define SSMODE_DITHER BIT(1) /* Frequency Dithering */
-#define SSMODE_CENTER BIT(0) /* Center (vs. Down) Spread Dithering */
+/* Fractional 8.25 PLL */
+#define CPG_PLLxCR0_NI8 GENMASK(27, 20) /* Integer mult. factor */
+#define CPG_PLLxCR1_NF25 GENMASK(24, 0) /* Fractional mult. factor */
+
+/* Fractional 9.24 PLL */
+#define CPG_PLLxCR0_NI9 GENMASK(28, 20) /* Integer mult. factor */
+#define CPG_PLLxCR1_NF24 GENMASK(23, 0) /* Fractional mult. factor */
+
+#define CPG_PLLxCR_STC GENMASK(30, 24) /* R_Car V3U PLLxCR */
+
+#define CPG_RPCCKCR 0x874 /* RPC Clock Freq. Control Register */
+
+#define CPG_SD0CKCR1 0x8a4 /* SD-IF0 Clock Freq. Control Reg. 1 */
+
+#define CPG_SD0CKCR1_SDSRC_SEL GENMASK(30, 29) /* SDSRC clock freq. select */
/* PLL Clocks */
struct cpg_pll_clk {
struct clk_hw hw;
void __iomem *pllcr0_reg;
+ void __iomem *pllcr1_reg;
void __iomem *pllecr_reg;
u32 pllecr_pllst_mask;
};
#define to_pll_clk(_hw) container_of(_hw, struct cpg_pll_clk, hw)
-static unsigned long cpg_pll_clk_recalc_rate(struct clk_hw *hw,
- unsigned long parent_rate)
+static unsigned long cpg_pll_8_25_clk_recalc_rate(struct clk_hw *hw,
+ unsigned long parent_rate)
{
struct cpg_pll_clk *pll_clk = to_pll_clk(hw);
- unsigned int mult;
-
- mult = FIELD_GET(CPG_PLLxCR0_NI, readl(pll_clk->pllcr0_reg)) + 1;
+ u32 cr0 = readl(pll_clk->pllcr0_reg);
+ unsigned int ni, nf;
+ unsigned long rate;
+
+ ni = (FIELD_GET(CPG_PLLxCR0_NI8, cr0) + 1) * 2;
+ rate = parent_rate * ni;
+ if (cr0 & CPG_PLLxCR0_SSMODE_FM) {
+ nf = FIELD_GET(CPG_PLLxCR1_NF25, readl(pll_clk->pllcr1_reg));
+ rate += mul_u64_u32_shr(parent_rate, nf, 24);
+ }
- return parent_rate * mult * 2;
+ return rate;
}
-static int cpg_pll_clk_determine_rate(struct clk_hw *hw,
- struct clk_rate_request *req)
+static int cpg_pll_8_25_clk_determine_rate(struct clk_hw *hw,
+ struct clk_rate_request *req)
{
- unsigned int min_mult, max_mult, mult;
+ struct cpg_pll_clk *pll_clk = to_pll_clk(hw);
+ unsigned int min_mult, max_mult, ni, nf;
+ u32 cr0 = readl(pll_clk->pllcr0_reg);
unsigned long prate;
prate = req->best_parent_rate * 2;
@@ -90,28 +111,58 @@ static int cpg_pll_clk_determine_rate(struct clk_hw *hw,
if (max_mult < min_mult)
return -EINVAL;
- mult = DIV_ROUND_CLOSEST_ULL(req->rate, prate);
- mult = clamp(mult, min_mult, max_mult);
+ if (cr0 & CPG_PLLxCR0_SSMODE_FM) {
+ ni = div64_ul(req->rate, prate);
+ if (ni < min_mult) {
+ ni = min_mult;
+ nf = 0;
+ } else {
+ ni = min(ni, max_mult);
+ nf = div64_ul((u64)(req->rate - prate * ni) << 24,
+ req->best_parent_rate);
+ }
+ } else {
+ ni = DIV_ROUND_CLOSEST_ULL(req->rate, prate);
+ ni = clamp(ni, min_mult, max_mult);
+ nf = 0;
+ }
+ req->rate = prate * ni + mul_u64_u32_shr(req->best_parent_rate, nf, 24);
- req->rate = prate * mult;
return 0;
}
-static int cpg_pll_clk_set_rate(struct clk_hw *hw, unsigned long rate,
- unsigned long parent_rate)
+static int cpg_pll_8_25_clk_set_rate(struct clk_hw *hw, unsigned long rate,
+ unsigned long parent_rate)
{
struct cpg_pll_clk *pll_clk = to_pll_clk(hw);
- unsigned int mult;
+ unsigned long prate = parent_rate * 2;
+ u32 cr0 = readl(pll_clk->pllcr0_reg);
+ unsigned int ni, nf;
u32 val;
- mult = DIV_ROUND_CLOSEST_ULL(rate, parent_rate * 2);
- mult = clamp(mult, 1U, 256U);
+ if (cr0 & CPG_PLLxCR0_SSMODE_FM) {
+ ni = div64_ul(rate, prate);
+ if (ni < 1) {
+ ni = 1;
+ nf = 0;
+ } else {
+ ni = min(ni, 256U);
+ nf = div64_ul((u64)(rate - prate * ni) << 24,
+ parent_rate);
+ }
+ } else {
+ ni = DIV_ROUND_CLOSEST_ULL(rate, prate);
+ ni = clamp(ni, 1U, 256U);
+ }
if (readl(pll_clk->pllcr0_reg) & CPG_PLLxCR0_KICK)
return -EBUSY;
- cpg_reg_modify(pll_clk->pllcr0_reg, CPG_PLLxCR0_NI,
- FIELD_PREP(CPG_PLLxCR0_NI, mult - 1));
+ cpg_reg_modify(pll_clk->pllcr0_reg, CPG_PLLxCR0_NI8,
+ FIELD_PREP(CPG_PLLxCR0_NI8, ni - 1));
+ if (cr0 & CPG_PLLxCR0_SSMODE_FM)
+ cpg_reg_modify(pll_clk->pllcr1_reg, CPG_PLLxCR1_NF25,
+ FIELD_PREP(CPG_PLLxCR1_NF25, nf));
/*
* Set KICK bit in PLLxCR0 to update hardware setting and wait for
@@ -132,22 +183,55 @@ static int cpg_pll_clk_set_rate(struct clk_hw *hw, unsigned long rate,
val & pll_clk->pllecr_pllst_mask, 0, 1000);
}
-static const struct clk_ops cpg_pll_clk_ops = {
- .recalc_rate = cpg_pll_clk_recalc_rate,
- .determine_rate = cpg_pll_clk_determine_rate,
- .set_rate = cpg_pll_clk_set_rate,
+static const struct clk_ops cpg_pll_f8_25_clk_ops = {
+ .recalc_rate = cpg_pll_8_25_clk_recalc_rate,
+};
+
+static const struct clk_ops cpg_pll_v8_25_clk_ops = {
+ .recalc_rate = cpg_pll_8_25_clk_recalc_rate,
+ .determine_rate = cpg_pll_8_25_clk_determine_rate,
+ .set_rate = cpg_pll_8_25_clk_set_rate,
+};
+
+static unsigned long cpg_pll_9_24_clk_recalc_rate(struct clk_hw *hw,
+ unsigned long parent_rate)
+{
+ struct cpg_pll_clk *pll_clk = to_pll_clk(hw);
+ u32 cr0 = readl(pll_clk->pllcr0_reg);
+ unsigned int ni, nf;
+ unsigned long rate;
+
+ ni = FIELD_GET(CPG_PLLxCR0_NI9, cr0) + 1;
+ rate = parent_rate * ni;
+ if (cr0 & CPG_PLLxCR0_SSMODE_FM) {
+ nf = FIELD_GET(CPG_PLLxCR1_NF24, readl(pll_clk->pllcr1_reg));
+ rate += mul_u64_u32_shr(parent_rate, nf, 24);
+ } else {
+ rate *= 2;
+ }
+
+ return rate;
+}
+
+static const struct clk_ops cpg_pll_f9_24_clk_ops = {
+ .recalc_rate = cpg_pll_9_24_clk_recalc_rate,
};
static struct clk * __init cpg_pll_clk_register(const char *name,
const char *parent_name,
void __iomem *base,
- unsigned int cr0_offset,
- unsigned int cr1_offset,
- unsigned int index)
-
+ unsigned int index,
+ const struct clk_ops *ops)
{
- struct cpg_pll_clk *pll_clk;
+ static const struct { u16 cr0, cr1; } pll_cr_offsets[] __initconst = {
+ [1 - 1] = { CPG_PLL1CR0, CPG_PLL1CR1 },
+ [2 - 1] = { CPG_PLL2CR0, CPG_PLL2CR1 },
+ [3 - 1] = { CPG_PLL3CR0, CPG_PLL3CR1 },
+ [4 - 1] = { CPG_PLL4CR0, CPG_PLL4CR1 },
+ [6 - 1] = { CPG_PLL6CR0, CPG_PLL6CR1 },
+ };
struct clk_init_data init = {};
+ struct cpg_pll_clk *pll_clk;
struct clk *clk;
pll_clk = kzalloc(sizeof(*pll_clk), GFP_KERNEL);
@@ -155,25 +239,23 @@ static struct clk * __init cpg_pll_clk_register(const char *name,
return ERR_PTR(-ENOMEM);
init.name = name;
- init.ops = &cpg_pll_clk_ops;
+ init.ops = ops;
init.parent_names = &parent_name;
init.num_parents = 1;
pll_clk->hw.init = &init;
- pll_clk->pllcr0_reg = base + cr0_offset;
+ pll_clk->pllcr0_reg = base + pll_cr_offsets[index - 1].cr0;
+ pll_clk->pllcr1_reg = base + pll_cr_offsets[index - 1].cr1;
pll_clk->pllecr_reg = base + CPG_PLLECR;
pll_clk->pllecr_pllst_mask = CPG_PLLECR_PLLST(index);
- /* Disable Fractional Multiplication and Frequency Dithering */
- writel(0, base + cr1_offset);
- cpg_reg_modify(pll_clk->pllcr0_reg, CPG_PLLxCR0_SSMODE, 0);
-
clk = clk_register(NULL, &pll_clk->hw);
if (IS_ERR(clk))
kfree(pll_clk);
return clk;
}
+
/*
* Z0 Clock & Z1 Clock
*/
@@ -358,51 +440,41 @@ struct clk * __init rcar_gen4_cpg_clk_register(struct device *dev,
div = cpg_pll_config->pll1_div;
break;
- case CLK_TYPE_GEN4_PLL2_VAR:
- /*
- * PLL2 is implemented as a custom clock, to change the
- * multiplier when cpufreq changes between normal and boost
- * modes.
- */
- return cpg_pll_clk_register(core->name, __clk_get_name(parent),
- base, CPG_PLL2CR0, CPG_PLL2CR1, 2);
-
- case CLK_TYPE_GEN4_PLL2:
- mult = cpg_pll_config->pll2_mult;
- div = cpg_pll_config->pll2_div;
- break;
-
- case CLK_TYPE_GEN4_PLL3:
- mult = cpg_pll_config->pll3_mult;
- div = cpg_pll_config->pll3_div;
- break;
-
- case CLK_TYPE_GEN4_PLL4:
- mult = cpg_pll_config->pll4_mult;
- div = cpg_pll_config->pll4_div;
- break;
-
case CLK_TYPE_GEN4_PLL5:
mult = cpg_pll_config->pll5_mult;
div = cpg_pll_config->pll5_div;
break;
- case CLK_TYPE_GEN4_PLL6:
- mult = cpg_pll_config->pll6_mult;
- div = cpg_pll_config->pll6_div;
- break;
-
case CLK_TYPE_GEN4_PLL2X_3X:
value = readl(base + core->offset);
- mult = (((value >> 24) & 0x7f) + 1) * 2;
+ mult = (FIELD_GET(CPG_PLLxCR_STC, value) + 1) * 2;
break;
+ case CLK_TYPE_GEN4_PLL_F8_25:
+ return cpg_pll_clk_register(core->name, __clk_get_name(parent),
+ base, core->offset,
+ &cpg_pll_f8_25_clk_ops);
+
+ case CLK_TYPE_GEN4_PLL_V8_25:
+ return cpg_pll_clk_register(core->name, __clk_get_name(parent),
+ base, core->offset,
+ &cpg_pll_v8_25_clk_ops);
+
+ case CLK_TYPE_GEN4_PLL_V9_24:
+ /* Variable fractional 9.24 is not yet supported, using fixed */
+ fallthrough;
+ case CLK_TYPE_GEN4_PLL_F9_24:
+ return cpg_pll_clk_register(core->name, __clk_get_name(parent),
+ base, core->offset,
+ &cpg_pll_f9_24_clk_ops);
+
case CLK_TYPE_GEN4_Z:
return cpg_z_clk_register(core->name, __clk_get_name(parent),
base, core->div, core->offset);
case CLK_TYPE_GEN4_SDSRC:
- div = ((readl(base + SD0CKCR1) >> 29) & 0x03) + 4;
+ value = readl(base + CPG_SD0CKCR1);
+ div = FIELD_GET(CPG_SD0CKCR1_SDSRC_SEL, value) + 4;
break;
case CLK_TYPE_GEN4_SDH:
diff --git a/drivers/clk/renesas/rcar-gen4-cpg.h b/drivers/clk/renesas/rcar-gen4-cpg.h
index 006537e29e4e..717fd148464f 100644
--- a/drivers/clk/renesas/rcar-gen4-cpg.h
+++ b/drivers/clk/renesas/rcar-gen4-cpg.h
@@ -12,13 +12,12 @@
enum rcar_gen4_clk_types {
CLK_TYPE_GEN4_MAIN = CLK_TYPE_CUSTOM,
CLK_TYPE_GEN4_PLL1,
- CLK_TYPE_GEN4_PLL2,
- CLK_TYPE_GEN4_PLL2_VAR,
CLK_TYPE_GEN4_PLL2X_3X, /* r8a779a0 only */
- CLK_TYPE_GEN4_PLL3,
- CLK_TYPE_GEN4_PLL4,
CLK_TYPE_GEN4_PLL5,
- CLK_TYPE_GEN4_PLL6,
+ CLK_TYPE_GEN4_PLL_F8_25, /* Fixed fractional 8.25 PLL */
+ CLK_TYPE_GEN4_PLL_V8_25, /* Variable fractional 8.25 PLL */
+ CLK_TYPE_GEN4_PLL_F9_24, /* Fixed fractional 9.24 PLL */
+ CLK_TYPE_GEN4_PLL_V9_24, /* Variable fractional 9.24 PLL */
CLK_TYPE_GEN4_SDSRC,
CLK_TYPE_GEN4_SDH,
CLK_TYPE_GEN4_SD,
@@ -47,6 +46,18 @@ enum rcar_gen4_clk_types {
#define DEF_GEN4_OSC(_name, _id, _parent, _div) \
DEF_BASE(_name, _id, CLK_TYPE_GEN4_OSC, _parent, .div = _div)
+#define DEF_GEN4_PLL_F8_25(_name, _idx, _id, _parent) \
+ DEF_BASE(_name, _id, CLK_TYPE_GEN4_PLL_F8_25, _parent, .offset = _idx)
+
+#define DEF_GEN4_PLL_V8_25(_name, _idx, _id, _parent) \
+ DEF_BASE(_name, _id, CLK_TYPE_GEN4_PLL_V8_25, _parent, .offset = _idx)
+
+#define DEF_GEN4_PLL_F9_24(_name, _idx, _id, _parent) \
+ DEF_BASE(_name, _id, CLK_TYPE_GEN4_PLL_F9_24, _parent, .offset = _idx)
+
+#define DEF_GEN4_PLL_V9_24(_name, _idx, _id, _parent) \
+ DEF_BASE(_name, _id, CLK_TYPE_GEN4_PLL_V9_24, _parent, .offset = _idx)
+
#define DEF_GEN4_Z(_name, _id, _type, _parent, _div, _offset) \
DEF_BASE(_name, _id, _type, _parent, .div = _div, .offset = _offset)
@@ -54,21 +65,16 @@ struct rcar_gen4_cpg_pll_config {
u8 extal_div;
u8 pll1_mult;
u8 pll1_div;
- u8 pll2_mult;
- u8 pll2_div;
- u8 pll3_mult;
- u8 pll3_div;
- u8 pll4_mult;
- u8 pll4_div;
u8 pll5_mult;
u8 pll5_div;
- u8 pll6_mult;
- u8 pll6_div;
u8 osc_prediv;
};
-#define CPG_RPCCKCR 0x874
-#define SD0CKCR1 0x8a4
+#define CPG_SD0CKCR 0x870 /* SD-IF0 Clock Frequency Control Register */
+#define CPG_CANFDCKCR 0x878 /* CAN-FD Clock Frequency Control Register */
+#define CPG_MSOCKCR 0x87c /* MSIOF Clock Frequency Control Register */
+#define CPG_CSICKCR 0x880 /* CSI Clock Frequency Control Register */
+#define CPG_DSIEXTCKCR 0x884 /* DSI Clock Frequency Control Register */
struct clk *rcar_gen4_cpg_clk_register(struct device *dev,
const struct cpg_core_clk *core, const struct cpg_mssr_info *info,
diff --git a/drivers/clk/renesas/rcar-usb2-clock-sel.c b/drivers/clk/renesas/rcar-usb2-clock-sel.c
index de4896cf5f40..421ae973ea8e 100644
--- a/drivers/clk/renesas/rcar-usb2-clock-sel.c
+++ b/drivers/clk/renesas/rcar-usb2-clock-sel.c
@@ -212,7 +212,7 @@ static struct platform_driver rcar_usb2_clock_sel_driver = {
.pm = &rcar_usb2_clock_sel_pm_ops,
},
.probe = rcar_usb2_clock_sel_probe,
- .remove_new = rcar_usb2_clock_sel_remove,
+ .remove = rcar_usb2_clock_sel_remove,
};
builtin_platform_driver(rcar_usb2_clock_sel_driver);
diff --git a/drivers/clk/renesas/rzg2l-cpg.c b/drivers/clk/renesas/rzg2l-cpg.c
index 04b78064d4e0..88bf39e8c79c 100644
--- a/drivers/clk/renesas/rzg2l-cpg.c
+++ b/drivers/clk/renesas/rzg2l-cpg.c
@@ -339,8 +339,7 @@ static const struct clk_ops rzg3s_div_clk_ops = {
};
static struct clk * __init
-rzg3s_cpg_div_clk_register(const struct cpg_core_clk *core, struct clk **clks,
- void __iomem *base, struct rzg2l_cpg_priv *priv)
+rzg3s_cpg_div_clk_register(const struct cpg_core_clk *core, struct rzg2l_cpg_priv *priv)
{
struct div_hw_data *div_hw_data;
struct clk_init_data init = {};
@@ -351,7 +350,7 @@ rzg3s_cpg_div_clk_register(const struct cpg_core_clk *core, struct clk **clks,
u32 max = 0;
int ret;
- parent = clks[core->parent & 0xffff];
+ parent = priv->clks[core->parent];
if (IS_ERR(parent))
return ERR_CAST(parent);
@@ -400,16 +399,15 @@ rzg3s_cpg_div_clk_register(const struct cpg_core_clk *core, struct clk **clks,
static struct clk * __init
rzg2l_cpg_div_clk_register(const struct cpg_core_clk *core,
- struct clk **clks,
- void __iomem *base,
struct rzg2l_cpg_priv *priv)
{
+ void __iomem *base = priv->base;
struct device *dev = priv->dev;
const struct clk *parent;
const char *parent_name;
struct clk_hw *clk_hw;
- parent = clks[core->parent & 0xffff];
+ parent = priv->clks[core->parent];
if (IS_ERR(parent))
return ERR_CAST(parent);
@@ -440,7 +438,6 @@ rzg2l_cpg_div_clk_register(const struct cpg_core_clk *core,
static struct clk * __init
rzg2l_cpg_mux_clk_register(const struct cpg_core_clk *core,
- void __iomem *base,
struct rzg2l_cpg_priv *priv)
{
const struct clk_hw *clk_hw;
@@ -448,7 +445,7 @@ rzg2l_cpg_mux_clk_register(const struct cpg_core_clk *core,
clk_hw = devm_clk_hw_register_mux(priv->dev, core->name,
core->parent_names, core->num_parents,
core->flag,
- base + GET_REG_OFFSET(core->conf),
+ priv->base + GET_REG_OFFSET(core->conf),
GET_SHIFT(core->conf),
GET_WIDTH(core->conf),
core->mux_flags, &priv->rmw_lock);
@@ -508,7 +505,6 @@ static const struct clk_ops rzg2l_cpg_sd_clk_mux_ops = {
static struct clk * __init
rzg2l_cpg_sd_mux_clk_register(const struct cpg_core_clk *core,
- void __iomem *base,
struct rzg2l_cpg_priv *priv)
{
struct sd_mux_hw_data *sd_mux_hw_data;
@@ -652,7 +648,6 @@ static const struct clk_ops rzg2l_cpg_dsi_div_ops = {
static struct clk * __init
rzg2l_cpg_dsi_div_clk_register(const struct cpg_core_clk *core,
- struct clk **clks,
struct rzg2l_cpg_priv *priv)
{
struct dsi_div_hw_data *clk_hw_data;
@@ -662,7 +657,7 @@ rzg2l_cpg_dsi_div_clk_register(const struct cpg_core_clk *core,
struct clk_hw *clk_hw;
int ret;
- parent = clks[core->parent & 0xffff];
+ parent = priv->clks[core->parent];
if (IS_ERR(parent))
return ERR_CAST(parent);
@@ -900,7 +895,6 @@ static const struct clk_ops rzg2l_cpg_sipll5_ops = {
static struct clk * __init
rzg2l_cpg_sipll5_register(const struct cpg_core_clk *core,
- struct clk **clks,
struct rzg2l_cpg_priv *priv)
{
const struct clk *parent;
@@ -910,7 +904,7 @@ rzg2l_cpg_sipll5_register(const struct cpg_core_clk *core,
struct clk_hw *clk_hw;
int ret;
- parent = clks[core->parent & 0xffff];
+ parent = priv->clks[core->parent];
if (IS_ERR(parent))
return ERR_CAST(parent);
@@ -1013,8 +1007,6 @@ static const struct clk_ops rzg3s_cpg_pll_ops = {
static struct clk * __init
rzg2l_cpg_pll_clk_register(const struct cpg_core_clk *core,
- struct clk **clks,
- void __iomem *base,
struct rzg2l_cpg_priv *priv,
const struct clk_ops *ops)
{
@@ -1023,8 +1015,9 @@ rzg2l_cpg_pll_clk_register(const struct cpg_core_clk *core,
struct clk_init_data init;
const char *parent_name;
struct pll_clk *pll_clk;
+ int ret;
- parent = clks[core->parent & 0xffff];
+ parent = priv->clks[core->parent];
if (IS_ERR(parent))
return ERR_CAST(parent);
@@ -1041,11 +1034,15 @@ rzg2l_cpg_pll_clk_register(const struct cpg_core_clk *core,
pll_clk->hw.init = &init;
pll_clk->conf = core->conf;
- pll_clk->base = base;
+ pll_clk->base = priv->base;
pll_clk->priv = priv;
pll_clk->type = core->type;
- return clk_register(NULL, &pll_clk->hw);
+ ret = devm_clk_hw_register(dev, &pll_clk->hw);
+ if (ret)
+ return ERR_PTR(ret);
+
+ return pll_clk->hw.clk;
}
static struct clk
@@ -1102,6 +1099,7 @@ rzg2l_cpg_register_core_clk(const struct cpg_core_clk *core,
struct device *dev = priv->dev;
unsigned int id = core->id, div = core->div;
const char *parent_name;
+ struct clk_hw *clk_hw;
WARN_DEBUG(id >= priv->num_core_clks);
WARN_DEBUG(PTR_ERR(priv->clks[id]) != -ENOENT);
@@ -1124,39 +1122,40 @@ rzg2l_cpg_register_core_clk(const struct cpg_core_clk *core,
}
parent_name = __clk_get_name(parent);
- clk = clk_register_fixed_factor(NULL, core->name,
- parent_name, CLK_SET_RATE_PARENT,
- core->mult, div);
+ clk_hw = devm_clk_hw_register_fixed_factor(dev, core->name, parent_name,
+ CLK_SET_RATE_PARENT,
+ core->mult, div);
+ if (IS_ERR(clk_hw))
+ clk = ERR_CAST(clk_hw);
+ else
+ clk = clk_hw->clk;
break;
case CLK_TYPE_SAM_PLL:
- clk = rzg2l_cpg_pll_clk_register(core, priv->clks, priv->base, priv,
- &rzg2l_cpg_pll_ops);
+ clk = rzg2l_cpg_pll_clk_register(core, priv, &rzg2l_cpg_pll_ops);
break;
case CLK_TYPE_G3S_PLL:
- clk = rzg2l_cpg_pll_clk_register(core, priv->clks, priv->base, priv,
- &rzg3s_cpg_pll_ops);
+ clk = rzg2l_cpg_pll_clk_register(core, priv, &rzg3s_cpg_pll_ops);
break;
case CLK_TYPE_SIPLL5:
- clk = rzg2l_cpg_sipll5_register(core, priv->clks, priv);
+ clk = rzg2l_cpg_sipll5_register(core, priv);
break;
case CLK_TYPE_DIV:
- clk = rzg2l_cpg_div_clk_register(core, priv->clks,
- priv->base, priv);
+ clk = rzg2l_cpg_div_clk_register(core, priv);
break;
case CLK_TYPE_G3S_DIV:
- clk = rzg3s_cpg_div_clk_register(core, priv->clks, priv->base, priv);
+ clk = rzg3s_cpg_div_clk_register(core, priv);
break;
case CLK_TYPE_MUX:
- clk = rzg2l_cpg_mux_clk_register(core, priv->base, priv);
+ clk = rzg2l_cpg_mux_clk_register(core, priv);
break;
case CLK_TYPE_SD_MUX:
- clk = rzg2l_cpg_sd_mux_clk_register(core, priv->base, priv);
+ clk = rzg2l_cpg_sd_mux_clk_register(core, priv);
break;
case CLK_TYPE_PLL5_4_MUX:
clk = rzg2l_cpg_pll5_4_mux_clk_register(core, priv);
break;
case CLK_TYPE_DSI_DIV:
- clk = rzg2l_cpg_dsi_div_clk_register(core, priv->clks, priv);
+ clk = rzg2l_cpg_dsi_div_clk_register(core, priv);
break;
default:
goto fail;
@@ -1337,6 +1336,7 @@ rzg2l_cpg_register_mod_clk(const struct rzg2l_mod_clk *mod,
struct clk *parent, *clk;
const char *parent_name;
unsigned int i;
+ int ret;
WARN_DEBUG(id < priv->num_core_clks);
WARN_DEBUG(id >= priv->num_core_clks + priv->num_mod_clks);
@@ -1380,10 +1380,13 @@ rzg2l_cpg_register_mod_clk(const struct rzg2l_mod_clk *mod,
clock->priv = priv;
clock->hw.init = &init;
- clk = clk_register(NULL, &clock->hw);
- if (IS_ERR(clk))
+ ret = devm_clk_hw_register(dev, &clock->hw);
+ if (ret) {
+ clk = ERR_PTR(ret);
goto fail;
+ }
+ clk = clock->hw.clk;
dev_dbg(dev, "Module clock %pC at %lu Hz\n", clk, clk_get_rate(clk));
priv->clks[id] = clk;
diff --git a/drivers/clk/renesas/rzv2h-cpg.c b/drivers/clk/renesas/rzv2h-cpg.c
new file mode 100644
index 000000000000..b524a9d33610
--- /dev/null
+++ b/drivers/clk/renesas/rzv2h-cpg.c
@@ -0,0 +1,853 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Renesas RZ/V2H(P) Clock Pulse Generator
+ *
+ * Copyright (C) 2024 Renesas Electronics Corp.
+ *
+ * Based on rzg2l-cpg.c
+ *
+ * Copyright (C) 2015 Glider bvba
+ * Copyright (C) 2013 Ideas On Board SPRL
+ * Copyright (C) 2015 Renesas Electronics Corp.
+ */
+
+#include <linux/bitfield.h>
+#include <linux/clk.h>
+#include <linux/clk-provider.h>
+#include <linux/delay.h>
+#include <linux/init.h>
+#include <linux/iopoll.h>
+#include <linux/mod_devicetable.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+#include <linux/pm_clock.h>
+#include <linux/pm_domain.h>
+#include <linux/reset-controller.h>
+
+#include <dt-bindings/clock/renesas-cpg-mssr.h>
+
+#include "rzv2h-cpg.h"
+
+#ifdef DEBUG
+#define WARN_DEBUG(x) WARN_ON(x)
+#else
+#define WARN_DEBUG(x) do { } while (0)
+#endif
+
+#define GET_CLK_ON_OFFSET(x) (0x600 + ((x) * 4))
+#define GET_CLK_MON_OFFSET(x) (0x800 + ((x) * 4))
+#define GET_RST_OFFSET(x) (0x900 + ((x) * 4))
+#define GET_RST_MON_OFFSET(x) (0xA00 + ((x) * 4))
+
+#define KDIV(val) ((s16)FIELD_GET(GENMASK(31, 16), (val)))
+#define MDIV(val) FIELD_GET(GENMASK(15, 6), (val))
+#define PDIV(val) FIELD_GET(GENMASK(5, 0), (val))
+#define SDIV(val) FIELD_GET(GENMASK(2, 0), (val))
+
+#define DDIV_DIVCTL_WEN(shift) BIT((shift) + 16)
+
+#define GET_MOD_CLK_ID(base, index, bit) \
+ ((base) + ((((index) * (16))) + (bit)))
+
+#define CPG_CLKSTATUS0 (0x700)
+
+/**
+ * struct rzv2h_cpg_priv - Clock Pulse Generator Private Data
+ *
+ * @dev: CPG device
+ * @base: CPG register block base address
+ * @rmw_lock: protects register accesses
+ * @clks: Array containing all Core and Module Clocks
+ * @num_core_clks: Number of Core Clocks in clks[]
+ * @num_mod_clks: Number of Module Clocks in clks[]
+ * @resets: Array of resets
+ * @num_resets: Number of Module Resets in info->resets[]
+ * @last_dt_core_clk: ID of the last Core Clock exported to DT
+ * @rcdev: Reset controller entity
+ */
+struct rzv2h_cpg_priv {
+ struct device *dev;
+ void __iomem *base;
+ spinlock_t rmw_lock;
+
+ struct clk **clks;
+ unsigned int num_core_clks;
+ unsigned int num_mod_clks;
+ struct rzv2h_reset *resets;
+ unsigned int num_resets;
+ unsigned int last_dt_core_clk;
+
+ struct reset_controller_dev rcdev;
+};
+
+#define rcdev_to_priv(x) container_of(x, struct rzv2h_cpg_priv, rcdev)
+
+struct pll_clk {
+ struct rzv2h_cpg_priv *priv;
+ void __iomem *base;
+ struct clk_hw hw;
+ unsigned int conf;
+ unsigned int type;
+};
+
+#define to_pll(_hw) container_of(_hw, struct pll_clk, hw)
+
+/**
+ * struct mod_clock - Module clock
+ *
+ * @priv: CPG private data
+ * @hw: handle between common and hardware-specific interfaces
+ * @on_index: register offset
+ * @on_bit: ON/MON bit
+ * @mon_index: monitor register offset
+ * @mon_bit: montor bit
+ */
+struct mod_clock {
+ struct rzv2h_cpg_priv *priv;
+ struct clk_hw hw;
+ u8 on_index;
+ u8 on_bit;
+ s8 mon_index;
+ u8 mon_bit;
+};
+
+#define to_mod_clock(_hw) container_of(_hw, struct mod_clock, hw)
+
+/**
+ * struct ddiv_clk - DDIV clock
+ *
+ * @priv: CPG private data
+ * @div: divider clk
+ * @mon: monitor bit in CPG_CLKSTATUS0 register
+ */
+struct ddiv_clk {
+ struct rzv2h_cpg_priv *priv;
+ struct clk_divider div;
+ u8 mon;
+};
+
+#define to_ddiv_clock(_div) container_of(_div, struct ddiv_clk, div)
+
+static unsigned long rzv2h_cpg_pll_clk_recalc_rate(struct clk_hw *hw,
+ unsigned long parent_rate)
+{
+ struct pll_clk *pll_clk = to_pll(hw);
+ struct rzv2h_cpg_priv *priv = pll_clk->priv;
+ unsigned int clk1, clk2;
+ u64 rate;
+
+ if (!PLL_CLK_ACCESS(pll_clk->conf))
+ return 0;
+
+ clk1 = readl(priv->base + PLL_CLK1_OFFSET(pll_clk->conf));
+ clk2 = readl(priv->base + PLL_CLK2_OFFSET(pll_clk->conf));
+
+ rate = mul_u64_u32_shr(parent_rate, (MDIV(clk1) << 16) + KDIV(clk1),
+ 16 + SDIV(clk2));
+
+ return DIV_ROUND_CLOSEST_ULL(rate, PDIV(clk1));
+}
+
+static const struct clk_ops rzv2h_cpg_pll_ops = {
+ .recalc_rate = rzv2h_cpg_pll_clk_recalc_rate,
+};
+
+static struct clk * __init
+rzv2h_cpg_pll_clk_register(const struct cpg_core_clk *core,
+ struct rzv2h_cpg_priv *priv,
+ const struct clk_ops *ops)
+{
+ void __iomem *base = priv->base;
+ struct device *dev = priv->dev;
+ struct clk_init_data init;
+ const struct clk *parent;
+ const char *parent_name;
+ struct pll_clk *pll_clk;
+ int ret;
+
+ parent = priv->clks[core->parent];
+ if (IS_ERR(parent))
+ return ERR_CAST(parent);
+
+ pll_clk = devm_kzalloc(dev, sizeof(*pll_clk), GFP_KERNEL);
+ if (!pll_clk)
+ return ERR_PTR(-ENOMEM);
+
+ parent_name = __clk_get_name(parent);
+ init.name = core->name;
+ init.ops = ops;
+ init.flags = 0;
+ init.parent_names = &parent_name;
+ init.num_parents = 1;
+
+ pll_clk->hw.init = &init;
+ pll_clk->conf = core->cfg.conf;
+ pll_clk->base = base;
+ pll_clk->priv = priv;
+ pll_clk->type = core->type;
+
+ ret = devm_clk_hw_register(dev, &pll_clk->hw);
+ if (ret)
+ return ERR_PTR(ret);
+
+ return pll_clk->hw.clk;
+}
+
+static unsigned long rzv2h_ddiv_recalc_rate(struct clk_hw *hw,
+ unsigned long parent_rate)
+{
+ struct clk_divider *divider = to_clk_divider(hw);
+ unsigned int val;
+
+ val = readl(divider->reg) >> divider->shift;
+ val &= clk_div_mask(divider->width);
+
+ return divider_recalc_rate(hw, parent_rate, val, divider->table,
+ divider->flags, divider->width);
+}
+
+static long rzv2h_ddiv_round_rate(struct clk_hw *hw, unsigned long rate,
+ unsigned long *prate)
+{
+ struct clk_divider *divider = to_clk_divider(hw);
+
+ return divider_round_rate(hw, rate, prate, divider->table,
+ divider->width, divider->flags);
+}
+
+static int rzv2h_ddiv_determine_rate(struct clk_hw *hw,
+ struct clk_rate_request *req)
+{
+ struct clk_divider *divider = to_clk_divider(hw);
+
+ return divider_determine_rate(hw, req, divider->table, divider->width,
+ divider->flags);
+}
+
+static inline int rzv2h_cpg_wait_ddiv_clk_update_done(void __iomem *base, u8 mon)
+{
+ u32 bitmask = BIT(mon);
+ u32 val;
+
+ return readl_poll_timeout_atomic(base + CPG_CLKSTATUS0, val, !(val & bitmask), 10, 200);
+}
+
+static int rzv2h_ddiv_set_rate(struct clk_hw *hw, unsigned long rate,
+ unsigned long parent_rate)
+{
+ struct clk_divider *divider = to_clk_divider(hw);
+ struct ddiv_clk *ddiv = to_ddiv_clock(divider);
+ struct rzv2h_cpg_priv *priv = ddiv->priv;
+ unsigned long flags = 0;
+ int value;
+ u32 val;
+ int ret;
+
+ value = divider_get_val(rate, parent_rate, divider->table,
+ divider->width, divider->flags);
+ if (value < 0)
+ return value;
+
+ spin_lock_irqsave(divider->lock, flags);
+
+ ret = rzv2h_cpg_wait_ddiv_clk_update_done(priv->base, ddiv->mon);
+ if (ret)
+ goto ddiv_timeout;
+
+ val = readl(divider->reg) | DDIV_DIVCTL_WEN(divider->shift);
+ val &= ~(clk_div_mask(divider->width) << divider->shift);
+ val |= (u32)value << divider->shift;
+ writel(val, divider->reg);
+
+ ret = rzv2h_cpg_wait_ddiv_clk_update_done(priv->base, ddiv->mon);
+ if (ret)
+ goto ddiv_timeout;
+
+ spin_unlock_irqrestore(divider->lock, flags);
+
+ return 0;
+
+ddiv_timeout:
+ spin_unlock_irqrestore(divider->lock, flags);
+ return ret;
+}
+
+static const struct clk_ops rzv2h_ddiv_clk_divider_ops = {
+ .recalc_rate = rzv2h_ddiv_recalc_rate,
+ .round_rate = rzv2h_ddiv_round_rate,
+ .determine_rate = rzv2h_ddiv_determine_rate,
+ .set_rate = rzv2h_ddiv_set_rate,
+};
+
+static struct clk * __init
+rzv2h_cpg_ddiv_clk_register(const struct cpg_core_clk *core,
+ struct rzv2h_cpg_priv *priv)
+{
+ struct ddiv cfg_ddiv = core->cfg.ddiv;
+ struct clk_init_data init = {};
+ struct device *dev = priv->dev;
+ u8 shift = cfg_ddiv.shift;
+ u8 width = cfg_ddiv.width;
+ const struct clk *parent;
+ const char *parent_name;
+ struct clk_divider *div;
+ struct ddiv_clk *ddiv;
+ int ret;
+
+ parent = priv->clks[core->parent];
+ if (IS_ERR(parent))
+ return ERR_CAST(parent);
+
+ parent_name = __clk_get_name(parent);
+
+ if ((shift + width) > 16)
+ return ERR_PTR(-EINVAL);
+
+ ddiv = devm_kzalloc(priv->dev, sizeof(*ddiv), GFP_KERNEL);
+ if (!ddiv)
+ return ERR_PTR(-ENOMEM);
+
+ init.name = core->name;
+ init.ops = &rzv2h_ddiv_clk_divider_ops;
+ init.parent_names = &parent_name;
+ init.num_parents = 1;
+
+ ddiv->priv = priv;
+ ddiv->mon = cfg_ddiv.monbit;
+ div = &ddiv->div;
+ div->reg = priv->base + cfg_ddiv.offset;
+ div->shift = shift;
+ div->width = width;
+ div->flags = core->flag;
+ div->lock = &priv->rmw_lock;
+ div->hw.init = &init;
+ div->table = core->dtable;
+
+ ret = devm_clk_hw_register(dev, &div->hw);
+ if (ret)
+ return ERR_PTR(ret);
+
+ return div->hw.clk;
+}
+
+static struct clk
+*rzv2h_cpg_clk_src_twocell_get(struct of_phandle_args *clkspec,
+ void *data)
+{
+ unsigned int clkidx = clkspec->args[1];
+ struct rzv2h_cpg_priv *priv = data;
+ struct device *dev = priv->dev;
+ const char *type;
+ struct clk *clk;
+
+ switch (clkspec->args[0]) {
+ case CPG_CORE:
+ type = "core";
+ if (clkidx > priv->last_dt_core_clk) {
+ dev_err(dev, "Invalid %s clock index %u\n", type, clkidx);
+ return ERR_PTR(-EINVAL);
+ }
+ clk = priv->clks[clkidx];
+ break;
+
+ case CPG_MOD:
+ type = "module";
+ if (clkidx >= priv->num_mod_clks) {
+ dev_err(dev, "Invalid %s clock index %u\n", type, clkidx);
+ return ERR_PTR(-EINVAL);
+ }
+ clk = priv->clks[priv->num_core_clks + clkidx];
+ break;
+
+ default:
+ dev_err(dev, "Invalid CPG clock type %u\n", clkspec->args[0]);
+ return ERR_PTR(-EINVAL);
+ }
+
+ if (IS_ERR(clk))
+ dev_err(dev, "Cannot get %s clock %u: %ld", type, clkidx,
+ PTR_ERR(clk));
+ else
+ dev_dbg(dev, "clock (%u, %u) is %pC at %lu Hz\n",
+ clkspec->args[0], clkspec->args[1], clk,
+ clk_get_rate(clk));
+ return clk;
+}
+
+static void __init
+rzv2h_cpg_register_core_clk(const struct cpg_core_clk *core,
+ struct rzv2h_cpg_priv *priv)
+{
+ struct clk *clk = ERR_PTR(-EOPNOTSUPP), *parent;
+ unsigned int id = core->id, div = core->div;
+ struct device *dev = priv->dev;
+ const char *parent_name;
+ struct clk_hw *clk_hw;
+
+ WARN_DEBUG(id >= priv->num_core_clks);
+ WARN_DEBUG(PTR_ERR(priv->clks[id]) != -ENOENT);
+
+ switch (core->type) {
+ case CLK_TYPE_IN:
+ clk = of_clk_get_by_name(priv->dev->of_node, core->name);
+ break;
+ case CLK_TYPE_FF:
+ WARN_DEBUG(core->parent >= priv->num_core_clks);
+ parent = priv->clks[core->parent];
+ if (IS_ERR(parent)) {
+ clk = parent;
+ goto fail;
+ }
+
+ parent_name = __clk_get_name(parent);
+ clk_hw = devm_clk_hw_register_fixed_factor(dev, core->name,
+ parent_name, CLK_SET_RATE_PARENT,
+ core->mult, div);
+ if (IS_ERR(clk_hw))
+ clk = ERR_CAST(clk_hw);
+ else
+ clk = clk_hw->clk;
+ break;
+ case CLK_TYPE_PLL:
+ clk = rzv2h_cpg_pll_clk_register(core, priv, &rzv2h_cpg_pll_ops);
+ break;
+ case CLK_TYPE_DDIV:
+ clk = rzv2h_cpg_ddiv_clk_register(core, priv);
+ break;
+ default:
+ goto fail;
+ }
+
+ if (IS_ERR_OR_NULL(clk))
+ goto fail;
+
+ dev_dbg(dev, "Core clock %pC at %lu Hz\n", clk, clk_get_rate(clk));
+ priv->clks[id] = clk;
+ return;
+
+fail:
+ dev_err(dev, "Failed to register core clock %s: %ld\n",
+ core->name, PTR_ERR(clk));
+}
+
+static int rzv2h_mod_clock_endisable(struct clk_hw *hw, bool enable)
+{
+ struct mod_clock *clock = to_mod_clock(hw);
+ unsigned int reg = GET_CLK_ON_OFFSET(clock->on_index);
+ struct rzv2h_cpg_priv *priv = clock->priv;
+ u32 bitmask = BIT(clock->on_bit);
+ struct device *dev = priv->dev;
+ u32 value;
+ int error;
+
+ dev_dbg(dev, "CLK_ON 0x%x/%pC %s\n", reg, hw->clk,
+ enable ? "ON" : "OFF");
+
+ value = bitmask << 16;
+ if (enable)
+ value |= bitmask;
+
+ writel(value, priv->base + reg);
+
+ if (!enable || clock->mon_index < 0)
+ return 0;
+
+ reg = GET_CLK_MON_OFFSET(clock->mon_index);
+ bitmask = BIT(clock->mon_bit);
+ error = readl_poll_timeout_atomic(priv->base + reg, value,
+ value & bitmask, 0, 10);
+ if (error)
+ dev_err(dev, "Failed to enable CLK_ON %p\n",
+ priv->base + reg);
+
+ return error;
+}
+
+static int rzv2h_mod_clock_enable(struct clk_hw *hw)
+{
+ return rzv2h_mod_clock_endisable(hw, true);
+}
+
+static void rzv2h_mod_clock_disable(struct clk_hw *hw)
+{
+ rzv2h_mod_clock_endisable(hw, false);
+}
+
+static int rzv2h_mod_clock_is_enabled(struct clk_hw *hw)
+{
+ struct mod_clock *clock = to_mod_clock(hw);
+ struct rzv2h_cpg_priv *priv = clock->priv;
+ u32 bitmask;
+ u32 offset;
+
+ if (clock->mon_index >= 0) {
+ offset = GET_CLK_MON_OFFSET(clock->mon_index);
+ bitmask = BIT(clock->mon_bit);
+ } else {
+ offset = GET_CLK_ON_OFFSET(clock->on_index);
+ bitmask = BIT(clock->on_bit);
+ }
+
+ return readl(priv->base + offset) & bitmask;
+}
+
+static const struct clk_ops rzv2h_mod_clock_ops = {
+ .enable = rzv2h_mod_clock_enable,
+ .disable = rzv2h_mod_clock_disable,
+ .is_enabled = rzv2h_mod_clock_is_enabled,
+};
+
+static void __init
+rzv2h_cpg_register_mod_clk(const struct rzv2h_mod_clk *mod,
+ struct rzv2h_cpg_priv *priv)
+{
+ struct mod_clock *clock = NULL;
+ struct device *dev = priv->dev;
+ struct clk_init_data init;
+ struct clk *parent, *clk;
+ const char *parent_name;
+ unsigned int id;
+ int ret;
+
+ id = GET_MOD_CLK_ID(priv->num_core_clks, mod->on_index, mod->on_bit);
+ WARN_DEBUG(id >= priv->num_core_clks + priv->num_mod_clks);
+ WARN_DEBUG(mod->parent >= priv->num_core_clks + priv->num_mod_clks);
+ WARN_DEBUG(PTR_ERR(priv->clks[id]) != -ENOENT);
+
+ parent = priv->clks[mod->parent];
+ if (IS_ERR(parent)) {
+ clk = parent;
+ goto fail;
+ }
+
+ clock = devm_kzalloc(dev, sizeof(*clock), GFP_KERNEL);
+ if (!clock) {
+ clk = ERR_PTR(-ENOMEM);
+ goto fail;
+ }
+
+ init.name = mod->name;
+ init.ops = &rzv2h_mod_clock_ops;
+ init.flags = CLK_SET_RATE_PARENT;
+ if (mod->critical)
+ init.flags |= CLK_IS_CRITICAL;
+
+ parent_name = __clk_get_name(parent);
+ init.parent_names = &parent_name;
+ init.num_parents = 1;
+
+ clock->on_index = mod->on_index;
+ clock->on_bit = mod->on_bit;
+ clock->mon_index = mod->mon_index;
+ clock->mon_bit = mod->mon_bit;
+ clock->priv = priv;
+ clock->hw.init = &init;
+
+ ret = devm_clk_hw_register(dev, &clock->hw);
+ if (ret) {
+ clk = ERR_PTR(ret);
+ goto fail;
+ }
+
+ priv->clks[id] = clock->hw.clk;
+
+ return;
+
+fail:
+ dev_err(dev, "Failed to register module clock %s: %ld\n",
+ mod->name, PTR_ERR(clk));
+}
+
+static int rzv2h_cpg_assert(struct reset_controller_dev *rcdev,
+ unsigned long id)
+{
+ struct rzv2h_cpg_priv *priv = rcdev_to_priv(rcdev);
+ unsigned int reg = GET_RST_OFFSET(priv->resets[id].reset_index);
+ u32 mask = BIT(priv->resets[id].reset_bit);
+ u8 monbit = priv->resets[id].mon_bit;
+ u32 value = mask << 16;
+
+ dev_dbg(rcdev->dev, "assert id:%ld offset:0x%x\n", id, reg);
+
+ writel(value, priv->base + reg);
+
+ reg = GET_RST_MON_OFFSET(priv->resets[id].mon_index);
+ mask = BIT(monbit);
+
+ return readl_poll_timeout_atomic(priv->base + reg, value,
+ value & mask, 10, 200);
+}
+
+static int rzv2h_cpg_deassert(struct reset_controller_dev *rcdev,
+ unsigned long id)
+{
+ struct rzv2h_cpg_priv *priv = rcdev_to_priv(rcdev);
+ unsigned int reg = GET_RST_OFFSET(priv->resets[id].reset_index);
+ u32 mask = BIT(priv->resets[id].reset_bit);
+ u8 monbit = priv->resets[id].mon_bit;
+ u32 value = (mask << 16) | mask;
+
+ dev_dbg(rcdev->dev, "deassert id:%ld offset:0x%x\n", id, reg);
+
+ writel(value, priv->base + reg);
+
+ reg = GET_RST_MON_OFFSET(priv->resets[id].mon_index);
+ mask = BIT(monbit);
+
+ return readl_poll_timeout_atomic(priv->base + reg, value,
+ !(value & mask), 10, 200);
+}
+
+static int rzv2h_cpg_reset(struct reset_controller_dev *rcdev,
+ unsigned long id)
+{
+ int ret;
+
+ ret = rzv2h_cpg_assert(rcdev, id);
+ if (ret)
+ return ret;
+
+ return rzv2h_cpg_deassert(rcdev, id);
+}
+
+static int rzv2h_cpg_status(struct reset_controller_dev *rcdev,
+ unsigned long id)
+{
+ struct rzv2h_cpg_priv *priv = rcdev_to_priv(rcdev);
+ unsigned int reg = GET_RST_MON_OFFSET(priv->resets[id].mon_index);
+ u8 monbit = priv->resets[id].mon_bit;
+
+ return !!(readl(priv->base + reg) & BIT(monbit));
+}
+
+static const struct reset_control_ops rzv2h_cpg_reset_ops = {
+ .reset = rzv2h_cpg_reset,
+ .assert = rzv2h_cpg_assert,
+ .deassert = rzv2h_cpg_deassert,
+ .status = rzv2h_cpg_status,
+};
+
+static int rzv2h_cpg_reset_xlate(struct reset_controller_dev *rcdev,
+ const struct of_phandle_args *reset_spec)
+{
+ struct rzv2h_cpg_priv *priv = rcdev_to_priv(rcdev);
+ unsigned int id = reset_spec->args[0];
+ u8 rst_index = id / 16;
+ u8 rst_bit = id % 16;
+ unsigned int i;
+
+ for (i = 0; i < rcdev->nr_resets; i++) {
+ if (rst_index == priv->resets[i].reset_index &&
+ rst_bit == priv->resets[i].reset_bit)
+ return i;
+ }
+
+ return -EINVAL;
+}
+
+static int rzv2h_cpg_reset_controller_register(struct rzv2h_cpg_priv *priv)
+{
+ priv->rcdev.ops = &rzv2h_cpg_reset_ops;
+ priv->rcdev.of_node = priv->dev->of_node;
+ priv->rcdev.dev = priv->dev;
+ priv->rcdev.of_reset_n_cells = 1;
+ priv->rcdev.of_xlate = rzv2h_cpg_reset_xlate;
+ priv->rcdev.nr_resets = priv->num_resets;
+
+ return devm_reset_controller_register(priv->dev, &priv->rcdev);
+}
+
+/**
+ * struct rzv2h_cpg_pd - RZ/V2H power domain data structure
+ * @priv: pointer to CPG private data structure
+ * @genpd: generic PM domain
+ */
+struct rzv2h_cpg_pd {
+ struct rzv2h_cpg_priv *priv;
+ struct generic_pm_domain genpd;
+};
+
+static int rzv2h_cpg_attach_dev(struct generic_pm_domain *domain, struct device *dev)
+{
+ struct device_node *np = dev->of_node;
+ struct of_phandle_args clkspec;
+ bool once = true;
+ struct clk *clk;
+ int error;
+ int i = 0;
+
+ while (!of_parse_phandle_with_args(np, "clocks", "#clock-cells", i,
+ &clkspec)) {
+ if (once) {
+ once = false;
+ error = pm_clk_create(dev);
+ if (error) {
+ of_node_put(clkspec.np);
+ goto err;
+ }
+ }
+ clk = of_clk_get_from_provider(&clkspec);
+ of_node_put(clkspec.np);
+ if (IS_ERR(clk)) {
+ error = PTR_ERR(clk);
+ goto fail_destroy;
+ }
+
+ error = pm_clk_add_clk(dev, clk);
+ if (error) {
+ dev_err(dev, "pm_clk_add_clk failed %d\n",
+ error);
+ goto fail_put;
+ }
+ i++;
+ }
+
+ return 0;
+
+fail_put:
+ clk_put(clk);
+
+fail_destroy:
+ pm_clk_destroy(dev);
+err:
+ return error;
+}
+
+static void rzv2h_cpg_detach_dev(struct generic_pm_domain *unused, struct device *dev)
+{
+ if (!pm_clk_no_clocks(dev))
+ pm_clk_destroy(dev);
+}
+
+static void rzv2h_cpg_genpd_remove_simple(void *data)
+{
+ pm_genpd_remove(data);
+}
+
+static int __init rzv2h_cpg_add_pm_domains(struct rzv2h_cpg_priv *priv)
+{
+ struct device *dev = priv->dev;
+ struct device_node *np = dev->of_node;
+ struct rzv2h_cpg_pd *pd;
+ int ret;
+
+ pd = devm_kzalloc(dev, sizeof(*pd), GFP_KERNEL);
+ if (!pd)
+ return -ENOMEM;
+
+ pd->genpd.name = np->name;
+ pd->priv = priv;
+ pd->genpd.flags |= GENPD_FLAG_ALWAYS_ON | GENPD_FLAG_PM_CLK | GENPD_FLAG_ACTIVE_WAKEUP;
+ pd->genpd.attach_dev = rzv2h_cpg_attach_dev;
+ pd->genpd.detach_dev = rzv2h_cpg_detach_dev;
+ ret = pm_genpd_init(&pd->genpd, &pm_domain_always_on_gov, false);
+ if (ret)
+ return ret;
+
+ ret = devm_add_action_or_reset(dev, rzv2h_cpg_genpd_remove_simple, &pd->genpd);
+ if (ret)
+ return ret;
+
+ return of_genpd_add_provider_simple(np, &pd->genpd);
+}
+
+static void rzv2h_cpg_del_clk_provider(void *data)
+{
+ of_clk_del_provider(data);
+}
+
+static int __init rzv2h_cpg_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct device_node *np = dev->of_node;
+ const struct rzv2h_cpg_info *info;
+ struct rzv2h_cpg_priv *priv;
+ unsigned int nclks, i;
+ struct clk **clks;
+ int error;
+
+ info = of_device_get_match_data(dev);
+
+ priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+
+ spin_lock_init(&priv->rmw_lock);
+
+ priv->dev = dev;
+
+ priv->base = devm_platform_ioremap_resource(pdev, 0);
+ if (IS_ERR(priv->base))
+ return PTR_ERR(priv->base);
+
+ nclks = info->num_total_core_clks + info->num_hw_mod_clks;
+ clks = devm_kmalloc_array(dev, nclks, sizeof(*clks), GFP_KERNEL);
+ if (!clks)
+ return -ENOMEM;
+
+ priv->resets = devm_kmemdup(dev, info->resets, sizeof(*info->resets) *
+ info->num_resets, GFP_KERNEL);
+ if (!priv->resets)
+ return -ENOMEM;
+
+ dev_set_drvdata(dev, priv);
+ priv->clks = clks;
+ priv->num_core_clks = info->num_total_core_clks;
+ priv->num_mod_clks = info->num_hw_mod_clks;
+ priv->last_dt_core_clk = info->last_dt_core_clk;
+ priv->num_resets = info->num_resets;
+
+ for (i = 0; i < nclks; i++)
+ clks[i] = ERR_PTR(-ENOENT);
+
+ for (i = 0; i < info->num_core_clks; i++)
+ rzv2h_cpg_register_core_clk(&info->core_clks[i], priv);
+
+ for (i = 0; i < info->num_mod_clks; i++)
+ rzv2h_cpg_register_mod_clk(&info->mod_clks[i], priv);
+
+ error = of_clk_add_provider(np, rzv2h_cpg_clk_src_twocell_get, priv);
+ if (error)
+ return error;
+
+ error = devm_add_action_or_reset(dev, rzv2h_cpg_del_clk_provider, np);
+ if (error)
+ return error;
+
+ error = rzv2h_cpg_add_pm_domains(priv);
+ if (error)
+ return error;
+
+ error = rzv2h_cpg_reset_controller_register(priv);
+ if (error)
+ return error;
+
+ return 0;
+}
+
+static const struct of_device_id rzv2h_cpg_match[] = {
+#ifdef CONFIG_CLK_R9A09G057
+ {
+ .compatible = "renesas,r9a09g057-cpg",
+ .data = &r9a09g057_cpg_info,
+ },
+#endif
+ { /* sentinel */ }
+};
+
+static struct platform_driver rzv2h_cpg_driver = {
+ .driver = {
+ .name = "rzv2h-cpg",
+ .of_match_table = rzv2h_cpg_match,
+ },
+};
+
+static int __init rzv2h_cpg_init(void)
+{
+ return platform_driver_probe(&rzv2h_cpg_driver, rzv2h_cpg_probe);
+}
+
+subsys_initcall(rzv2h_cpg_init);
+
+MODULE_DESCRIPTION("Renesas RZ/V2H CPG Driver");
diff --git a/drivers/clk/renesas/rzv2h-cpg.h b/drivers/clk/renesas/rzv2h-cpg.h
new file mode 100644
index 000000000000..1bd406c69015
--- /dev/null
+++ b/drivers/clk/renesas/rzv2h-cpg.h
@@ -0,0 +1,190 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Renesas RZ/V2H(P) Clock Pulse Generator
+ *
+ * Copyright (C) 2024 Renesas Electronics Corp.
+ */
+
+#ifndef __RENESAS_RZV2H_CPG_H__
+#define __RENESAS_RZV2H_CPG_H__
+
+/**
+ * struct ddiv - Structure for dynamic switching divider
+ *
+ * @offset: register offset
+ * @shift: position of the divider bit
+ * @width: width of the divider
+ * @monbit: monitor bit in CPG_CLKSTATUS0 register
+ */
+struct ddiv {
+ unsigned int offset:11;
+ unsigned int shift:4;
+ unsigned int width:4;
+ unsigned int monbit:5;
+};
+
+#define DDIV_PACK(_offset, _shift, _width, _monbit) \
+ ((struct ddiv){ \
+ .offset = _offset, \
+ .shift = _shift, \
+ .width = _width, \
+ .monbit = _monbit \
+ })
+
+#define CPG_CDDIV0 (0x400)
+
+#define CDDIV0_DIVCTL2 DDIV_PACK(CPG_CDDIV0, 8, 3, 2)
+
+/**
+ * Definitions of CPG Core Clocks
+ *
+ * These include:
+ * - Clock outputs exported to DT
+ * - External input clocks
+ * - Internal CPG clocks
+ */
+struct cpg_core_clk {
+ const char *name;
+ unsigned int id;
+ unsigned int parent;
+ unsigned int div;
+ unsigned int mult;
+ unsigned int type;
+ union {
+ unsigned int conf;
+ struct ddiv ddiv;
+ } cfg;
+ const struct clk_div_table *dtable;
+ u32 flag;
+};
+
+enum clk_types {
+ /* Generic */
+ CLK_TYPE_IN, /* External Clock Input */
+ CLK_TYPE_FF, /* Fixed Factor Clock */
+ CLK_TYPE_PLL,
+ CLK_TYPE_DDIV, /* Dynamic Switching Divider */
+};
+
+/* BIT(31) indicates if CLK1/2 are accessible or not */
+#define PLL_CONF(n) (BIT(31) | ((n) & ~GENMASK(31, 16)))
+#define PLL_CLK_ACCESS(n) ((n) & BIT(31) ? 1 : 0)
+#define PLL_CLK1_OFFSET(n) ((n) & ~GENMASK(31, 16))
+#define PLL_CLK2_OFFSET(n) (((n) & ~GENMASK(31, 16)) + (0x4))
+
+#define DEF_TYPE(_name, _id, _type...) \
+ { .name = _name, .id = _id, .type = _type }
+#define DEF_BASE(_name, _id, _type, _parent...) \
+ DEF_TYPE(_name, _id, _type, .parent = _parent)
+#define DEF_PLL(_name, _id, _parent, _conf) \
+ DEF_TYPE(_name, _id, CLK_TYPE_PLL, .parent = _parent, .cfg.conf = _conf)
+#define DEF_INPUT(_name, _id) \
+ DEF_TYPE(_name, _id, CLK_TYPE_IN)
+#define DEF_FIXED(_name, _id, _parent, _mult, _div) \
+ DEF_BASE(_name, _id, CLK_TYPE_FF, _parent, .div = _div, .mult = _mult)
+#define DEF_DDIV(_name, _id, _parent, _ddiv_packed, _dtable) \
+ DEF_TYPE(_name, _id, CLK_TYPE_DDIV, \
+ .cfg.ddiv = _ddiv_packed, \
+ .parent = _parent, \
+ .dtable = _dtable, \
+ .flag = CLK_DIVIDER_HIWORD_MASK)
+
+/**
+ * struct rzv2h_mod_clk - Module Clocks definitions
+ *
+ * @name: handle between common and hardware-specific interfaces
+ * @parent: id of parent clock
+ * @critical: flag to indicate the clock is critical
+ * @on_index: control register index
+ * @on_bit: ON bit
+ * @mon_index: monitor register index
+ * @mon_bit: monitor bit
+ */
+struct rzv2h_mod_clk {
+ const char *name;
+ u16 parent;
+ bool critical;
+ u8 on_index;
+ u8 on_bit;
+ s8 mon_index;
+ u8 mon_bit;
+};
+
+#define DEF_MOD_BASE(_name, _parent, _critical, _onindex, _onbit, _monindex, _monbit) \
+ { \
+ .name = (_name), \
+ .parent = (_parent), \
+ .critical = (_critical), \
+ .on_index = (_onindex), \
+ .on_bit = (_onbit), \
+ .mon_index = (_monindex), \
+ .mon_bit = (_monbit), \
+ }
+
+#define DEF_MOD(_name, _parent, _onindex, _onbit, _monindex, _monbit) \
+ DEF_MOD_BASE(_name, _parent, false, _onindex, _onbit, _monindex, _monbit)
+
+#define DEF_MOD_CRITICAL(_name, _parent, _onindex, _onbit, _monindex, _monbit) \
+ DEF_MOD_BASE(_name, _parent, true, _onindex, _onbit, _monindex, _monbit)
+
+/**
+ * struct rzv2h_reset - Reset definitions
+ *
+ * @reset_index: reset register index
+ * @reset_bit: reset bit
+ * @mon_index: monitor register index
+ * @mon_bit: monitor bit
+ */
+struct rzv2h_reset {
+ u8 reset_index;
+ u8 reset_bit;
+ u8 mon_index;
+ u8 mon_bit;
+};
+
+#define DEF_RST_BASE(_resindex, _resbit, _monindex, _monbit) \
+ { \
+ .reset_index = (_resindex), \
+ .reset_bit = (_resbit), \
+ .mon_index = (_monindex), \
+ .mon_bit = (_monbit), \
+ }
+
+#define DEF_RST(_resindex, _resbit, _monindex, _monbit) \
+ DEF_RST_BASE(_resindex, _resbit, _monindex, _monbit)
+
+/**
+ * struct rzv2h_cpg_info - SoC-specific CPG Description
+ *
+ * @core_clks: Array of Core Clock definitions
+ * @num_core_clks: Number of entries in core_clks[]
+ * @last_dt_core_clk: ID of the last Core Clock exported to DT
+ * @num_total_core_clks: Total number of Core Clocks (exported + internal)
+ *
+ * @mod_clks: Array of Module Clock definitions
+ * @num_mod_clks: Number of entries in mod_clks[]
+ * @num_hw_mod_clks: Number of Module Clocks supported by the hardware
+ *
+ * @resets: Array of Module Reset definitions
+ * @num_resets: Number of entries in resets[]
+ */
+struct rzv2h_cpg_info {
+ /* Core Clocks */
+ const struct cpg_core_clk *core_clks;
+ unsigned int num_core_clks;
+ unsigned int last_dt_core_clk;
+ unsigned int num_total_core_clks;
+
+ /* Module Clocks */
+ const struct rzv2h_mod_clk *mod_clks;
+ unsigned int num_mod_clks;
+ unsigned int num_hw_mod_clks;
+
+ /* Resets */
+ const struct rzv2h_reset *resets;
+ unsigned int num_resets;
+};
+
+extern const struct rzv2h_cpg_info r9a09g057_cpg_info;
+
+#endif /* __RENESAS_RZV2H_CPG_H__ */
diff --git a/drivers/clk/rockchip/Kconfig b/drivers/clk/rockchip/Kconfig
index 9aad86925cd2..570ad90835d3 100644
--- a/drivers/clk/rockchip/Kconfig
+++ b/drivers/clk/rockchip/Kconfig
@@ -100,6 +100,13 @@ config CLK_RK3568
help
Build the driver for RK3568 Clock Driver.
+config CLK_RK3576
+ bool "Rockchip RK3576 clock controller support"
+ depends on ARM64 || COMPILE_TEST
+ default y
+ help
+ Build the driver for RK3576 Clock Driver.
+
config CLK_RK3588
bool "Rockchip RK3588 clock controller support"
depends on ARM64 || COMPILE_TEST
diff --git a/drivers/clk/rockchip/Makefile b/drivers/clk/rockchip/Makefile
index 36894f6a7022..af2ade54a7ef 100644
--- a/drivers/clk/rockchip/Makefile
+++ b/drivers/clk/rockchip/Makefile
@@ -28,4 +28,5 @@ obj-$(CONFIG_CLK_RK3328) += clk-rk3328.o
obj-$(CONFIG_CLK_RK3368) += clk-rk3368.o
obj-$(CONFIG_CLK_RK3399) += clk-rk3399.o
obj-$(CONFIG_CLK_RK3568) += clk-rk3568.o
+obj-$(CONFIG_CLK_RK3576) += clk-rk3576.o rst-rk3576.o
obj-$(CONFIG_CLK_RK3588) += clk-rk3588.o rst-rk3588.o
diff --git a/drivers/clk/rockchip/clk-pll.c b/drivers/clk/rockchip/clk-pll.c
index 606ce5458f54..fe76756e592e 100644
--- a/drivers/clk/rockchip/clk-pll.c
+++ b/drivers/clk/rockchip/clk-pll.c
@@ -914,7 +914,10 @@ static unsigned long rockchip_rk3588_pll_recalc_rate(struct clk_hw *hw, unsigned
}
rate64 = rate64 >> cur.s;
- return (unsigned long)rate64;
+ if (pll->type == pll_rk3588_ddr)
+ return (unsigned long)rate64 * 2;
+ else
+ return (unsigned long)rate64;
}
static int rockchip_rk3588_pll_set_params(struct rockchip_clk_pll *pll,
@@ -1167,6 +1170,7 @@ struct clk *rockchip_clk_register_pll(struct rockchip_clk_provider *ctx,
break;
case pll_rk3588:
case pll_rk3588_core:
+ case pll_rk3588_ddr:
if (!pll->rate_table)
init.ops = &rockchip_rk3588_pll_clk_norate_ops;
else
diff --git a/drivers/clk/rockchip/clk-px30.c b/drivers/clk/rockchip/clk-px30.c
index b58619eb412b..caf7c0e6e479 100644
--- a/drivers/clk/rockchip/clk-px30.c
+++ b/drivers/clk/rockchip/clk-px30.c
@@ -1002,6 +1002,7 @@ static const char *const px30_cru_critical_clocks[] __initconst = {
static void __init px30_clk_init(struct device_node *np)
{
struct rockchip_clk_provider *ctx;
+ unsigned long clk_nr_clks;
void __iomem *reg_base;
reg_base = of_iomap(np, 0);
@@ -1010,7 +1011,9 @@ static void __init px30_clk_init(struct device_node *np)
return;
}
- ctx = rockchip_clk_init(np, reg_base, CLK_NR_CLKS);
+ clk_nr_clks = rockchip_clk_find_max_clk_id(px30_clk_branches,
+ ARRAY_SIZE(px30_clk_branches)) + 1;
+ ctx = rockchip_clk_init(np, reg_base, clk_nr_clks);
if (IS_ERR(ctx)) {
pr_err("%s: rockchip clk init failed\n", __func__);
iounmap(reg_base);
@@ -1043,6 +1046,7 @@ CLK_OF_DECLARE(px30_cru, "rockchip,px30-cru", px30_clk_init);
static void __init px30_pmu_clk_init(struct device_node *np)
{
struct rockchip_clk_provider *ctx;
+ unsigned long clkpmu_nr_clks;
void __iomem *reg_base;
reg_base = of_iomap(np, 0);
@@ -1051,7 +1055,9 @@ static void __init px30_pmu_clk_init(struct device_node *np)
return;
}
- ctx = rockchip_clk_init(np, reg_base, CLKPMU_NR_CLKS);
+ clkpmu_nr_clks = rockchip_clk_find_max_clk_id(px30_clk_pmu_branches,
+ ARRAY_SIZE(px30_clk_pmu_branches)) + 1;
+ ctx = rockchip_clk_init(np, reg_base, clkpmu_nr_clks);
if (IS_ERR(ctx)) {
pr_err("%s: rockchip pmu clk init failed\n", __func__);
return;
diff --git a/drivers/clk/rockchip/clk-rk3036.c b/drivers/clk/rockchip/clk-rk3036.c
index d644bc155ec6..d341ce0708aa 100644
--- a/drivers/clk/rockchip/clk-rk3036.c
+++ b/drivers/clk/rockchip/clk-rk3036.c
@@ -436,6 +436,7 @@ static const char *const rk3036_critical_clocks[] __initconst = {
static void __init rk3036_clk_init(struct device_node *np)
{
struct rockchip_clk_provider *ctx;
+ unsigned long clk_nr_clks;
void __iomem *reg_base;
struct clk *clk;
@@ -452,7 +453,9 @@ static void __init rk3036_clk_init(struct device_node *np)
writel_relaxed(HIWORD_UPDATE(0x2, 0x3, 10),
reg_base + RK2928_CLKSEL_CON(13));
- ctx = rockchip_clk_init(np, reg_base, CLK_NR_CLKS);
+ clk_nr_clks = rockchip_clk_find_max_clk_id(rk3036_clk_branches,
+ ARRAY_SIZE(rk3036_clk_branches)) + 1;
+ ctx = rockchip_clk_init(np, reg_base, clk_nr_clks);
if (IS_ERR(ctx)) {
pr_err("%s: rockchip clk init failed\n", __func__);
iounmap(reg_base);
diff --git a/drivers/clk/rockchip/clk-rk3228.c b/drivers/clk/rockchip/clk-rk3228.c
index a24a35553e13..ed602c27b624 100644
--- a/drivers/clk/rockchip/clk-rk3228.c
+++ b/drivers/clk/rockchip/clk-rk3228.c
@@ -409,7 +409,7 @@ static struct rockchip_clk_branch rk3228_clk_branches[] __initdata = {
RK2928_CLKSEL_CON(29), 0, 3, DFLAGS),
DIV(0, "sclk_vop_pre", "sclk_vop_src", 0,
RK2928_CLKSEL_CON(27), 8, 8, DFLAGS),
- MUX(DCLK_VOP, "dclk_vop", mux_dclk_vop_p, 0,
+ MUX(DCLK_VOP, "dclk_vop", mux_dclk_vop_p, CLK_SET_RATE_PARENT | CLK_SET_RATE_NO_REPARENT,
RK2928_CLKSEL_CON(27), 1, 1, MFLAGS),
FACTOR(0, "xin12m", "xin24m", 0, 1, 2),
@@ -683,6 +683,7 @@ static const char *const rk3228_critical_clocks[] __initconst = {
static void __init rk3228_clk_init(struct device_node *np)
{
struct rockchip_clk_provider *ctx;
+ unsigned long clk_nr_clks;
void __iomem *reg_base;
reg_base = of_iomap(np, 0);
@@ -691,7 +692,9 @@ static void __init rk3228_clk_init(struct device_node *np)
return;
}
- ctx = rockchip_clk_init(np, reg_base, CLK_NR_CLKS);
+ clk_nr_clks = rockchip_clk_find_max_clk_id(rk3228_clk_branches,
+ ARRAY_SIZE(rk3228_clk_branches)) + 1;
+ ctx = rockchip_clk_init(np, reg_base, clk_nr_clks);
if (IS_ERR(ctx)) {
pr_err("%s: rockchip clk init failed\n", __func__);
iounmap(reg_base);
diff --git a/drivers/clk/rockchip/clk-rk3288.c b/drivers/clk/rockchip/clk-rk3288.c
index baa5aebd3277..90d329216064 100644
--- a/drivers/clk/rockchip/clk-rk3288.c
+++ b/drivers/clk/rockchip/clk-rk3288.c
@@ -932,6 +932,7 @@ static void __init rk3288_common_init(struct device_node *np,
enum rk3288_variant soc)
{
struct rockchip_clk_provider *ctx;
+ unsigned long clk_nr_clks;
rk3288_cru_base = of_iomap(np, 0);
if (!rk3288_cru_base) {
@@ -939,7 +940,9 @@ static void __init rk3288_common_init(struct device_node *np,
return;
}
- ctx = rockchip_clk_init(np, rk3288_cru_base, CLK_NR_CLKS);
+ clk_nr_clks = rockchip_clk_find_max_clk_id(rk3288_clk_branches,
+ ARRAY_SIZE(rk3288_clk_branches)) + 1;
+ ctx = rockchip_clk_init(np, rk3288_cru_base, clk_nr_clks);
if (IS_ERR(ctx)) {
pr_err("%s: rockchip clk init failed\n", __func__);
iounmap(rk3288_cru_base);
diff --git a/drivers/clk/rockchip/clk-rk3308.c b/drivers/clk/rockchip/clk-rk3308.c
index db3396c3e6e9..95a9512a41a3 100644
--- a/drivers/clk/rockchip/clk-rk3308.c
+++ b/drivers/clk/rockchip/clk-rk3308.c
@@ -917,6 +917,7 @@ static const char *const rk3308_critical_clocks[] __initconst = {
static void __init rk3308_clk_init(struct device_node *np)
{
struct rockchip_clk_provider *ctx;
+ unsigned long clk_nr_clks;
void __iomem *reg_base;
reg_base = of_iomap(np, 0);
@@ -925,7 +926,9 @@ static void __init rk3308_clk_init(struct device_node *np)
return;
}
- ctx = rockchip_clk_init(np, reg_base, CLK_NR_CLKS);
+ clk_nr_clks = rockchip_clk_find_max_clk_id(rk3308_clk_branches,
+ ARRAY_SIZE(rk3308_clk_branches)) + 1;
+ ctx = rockchip_clk_init(np, reg_base, clk_nr_clks);
if (IS_ERR(ctx)) {
pr_err("%s: rockchip clk init failed\n", __func__);
iounmap(reg_base);
diff --git a/drivers/clk/rockchip/clk-rk3328.c b/drivers/clk/rockchip/clk-rk3328.c
index 267ab54937d3..3bb87b27b662 100644
--- a/drivers/clk/rockchip/clk-rk3328.c
+++ b/drivers/clk/rockchip/clk-rk3328.c
@@ -881,6 +881,7 @@ static const char *const rk3328_critical_clocks[] __initconst = {
static void __init rk3328_clk_init(struct device_node *np)
{
struct rockchip_clk_provider *ctx;
+ unsigned long clk_nr_clks;
void __iomem *reg_base;
reg_base = of_iomap(np, 0);
@@ -889,7 +890,9 @@ static void __init rk3328_clk_init(struct device_node *np)
return;
}
- ctx = rockchip_clk_init(np, reg_base, CLK_NR_CLKS);
+ clk_nr_clks = rockchip_clk_find_max_clk_id(rk3328_clk_branches,
+ ARRAY_SIZE(rk3328_clk_branches)) + 1;
+ ctx = rockchip_clk_init(np, reg_base, clk_nr_clks);
if (IS_ERR(ctx)) {
pr_err("%s: rockchip clk init failed\n", __func__);
iounmap(reg_base);
diff --git a/drivers/clk/rockchip/clk-rk3368.c b/drivers/clk/rockchip/clk-rk3368.c
index 2c50cc2cc6db..04391e4e2874 100644
--- a/drivers/clk/rockchip/clk-rk3368.c
+++ b/drivers/clk/rockchip/clk-rk3368.c
@@ -866,6 +866,7 @@ static const char *const rk3368_critical_clocks[] __initconst = {
static void __init rk3368_clk_init(struct device_node *np)
{
struct rockchip_clk_provider *ctx;
+ unsigned long clk_nr_clks;
void __iomem *reg_base;
reg_base = of_iomap(np, 0);
@@ -874,7 +875,9 @@ static void __init rk3368_clk_init(struct device_node *np)
return;
}
- ctx = rockchip_clk_init(np, reg_base, CLK_NR_CLKS);
+ clk_nr_clks = rockchip_clk_find_max_clk_id(rk3368_clk_branches,
+ ARRAY_SIZE(rk3368_clk_branches)) + 1;
+ ctx = rockchip_clk_init(np, reg_base, clk_nr_clks);
if (IS_ERR(ctx)) {
pr_err("%s: rockchip clk init failed\n", __func__);
iounmap(reg_base);
diff --git a/drivers/clk/rockchip/clk-rk3399.c b/drivers/clk/rockchip/clk-rk3399.c
index 4f1a5782c230..c2b243d7a5e2 100644
--- a/drivers/clk/rockchip/clk-rk3399.c
+++ b/drivers/clk/rockchip/clk-rk3399.c
@@ -1531,6 +1531,7 @@ static const char *const rk3399_pmucru_critical_clocks[] __initconst = {
static void __init rk3399_clk_init(struct device_node *np)
{
struct rockchip_clk_provider *ctx;
+ unsigned long clk_nr_clks;
void __iomem *reg_base;
reg_base = of_iomap(np, 0);
@@ -1539,7 +1540,9 @@ static void __init rk3399_clk_init(struct device_node *np)
return;
}
- ctx = rockchip_clk_init(np, reg_base, CLK_NR_CLKS);
+ clk_nr_clks = rockchip_clk_find_max_clk_id(rk3399_clk_branches,
+ ARRAY_SIZE(rk3399_clk_branches)) + 1;
+ ctx = rockchip_clk_init(np, reg_base, clk_nr_clks);
if (IS_ERR(ctx)) {
pr_err("%s: rockchip clk init failed\n", __func__);
iounmap(reg_base);
@@ -1577,6 +1580,7 @@ CLK_OF_DECLARE(rk3399_cru, "rockchip,rk3399-cru", rk3399_clk_init);
static void __init rk3399_pmu_clk_init(struct device_node *np)
{
struct rockchip_clk_provider *ctx;
+ unsigned long clkpmu_nr_clks;
void __iomem *reg_base;
reg_base = of_iomap(np, 0);
@@ -1585,7 +1589,9 @@ static void __init rk3399_pmu_clk_init(struct device_node *np)
return;
}
- ctx = rockchip_clk_init(np, reg_base, CLKPMU_NR_CLKS);
+ clkpmu_nr_clks = rockchip_clk_find_max_clk_id(rk3399_clk_pmu_branches,
+ ARRAY_SIZE(rk3399_clk_pmu_branches)) + 1;
+ ctx = rockchip_clk_init(np, reg_base, clkpmu_nr_clks);
if (IS_ERR(ctx)) {
pr_err("%s: rockchip pmu clk init failed\n", __func__);
iounmap(reg_base);
diff --git a/drivers/clk/rockchip/clk-rk3576.c b/drivers/clk/rockchip/clk-rk3576.c
new file mode 100644
index 000000000000..595e010341f7
--- /dev/null
+++ b/drivers/clk/rockchip/clk-rk3576.c
@@ -0,0 +1,1818 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (c) 2023 Rockchip Electronics Co. Ltd.
+ * Author: Elaine Zhang <zhangqing@rock-chips.com>
+ */
+
+#include <linux/clk-provider.h>
+#include <linux/of.h>
+#include <linux/of_address.h>
+#include <linux/platform_device.h>
+#include <linux/syscore_ops.h>
+#include <linux/mfd/syscon.h>
+#include <dt-bindings/clock/rockchip,rk3576-cru.h>
+#include "clk.h"
+
+#define RK3576_GRF_SOC_STATUS0 0x600
+#define RK3576_PMU0_GRF_OSC_CON6 0x18
+
+enum rk3576_plls {
+ bpll, lpll, vpll, aupll, cpll, gpll, ppll,
+};
+
+static struct rockchip_pll_rate_table rk3576_pll_rates[] = {
+ /* _mhz, _p, _m, _s, _k */
+ RK3588_PLL_RATE(2520000000, 2, 210, 0, 0),
+ RK3588_PLL_RATE(2496000000, 2, 208, 0, 0),
+ RK3588_PLL_RATE(2472000000, 2, 206, 0, 0),
+ RK3588_PLL_RATE(2448000000, 2, 204, 0, 0),
+ RK3588_PLL_RATE(2424000000, 2, 202, 0, 0),
+ RK3588_PLL_RATE(2400000000, 2, 200, 0, 0),
+ RK3588_PLL_RATE(2376000000, 2, 198, 0, 0),
+ RK3588_PLL_RATE(2352000000, 2, 196, 0, 0),
+ RK3588_PLL_RATE(2328000000, 2, 194, 0, 0),
+ RK3588_PLL_RATE(2304000000, 2, 192, 0, 0),
+ RK3588_PLL_RATE(2280000000, 2, 190, 0, 0),
+ RK3588_PLL_RATE(2256000000, 2, 376, 1, 0),
+ RK3588_PLL_RATE(2232000000, 2, 372, 1, 0),
+ RK3588_PLL_RATE(2208000000, 2, 368, 1, 0),
+ RK3588_PLL_RATE(2184000000, 2, 364, 1, 0),
+ RK3588_PLL_RATE(2160000000, 2, 360, 1, 0),
+ RK3588_PLL_RATE(2136000000, 2, 356, 1, 0),
+ RK3588_PLL_RATE(2112000000, 2, 352, 1, 0),
+ RK3588_PLL_RATE(2088000000, 2, 348, 1, 0),
+ RK3588_PLL_RATE(2064000000, 2, 344, 1, 0),
+ RK3588_PLL_RATE(2040000000, 2, 340, 1, 0),
+ RK3588_PLL_RATE(2016000000, 2, 336, 1, 0),
+ RK3588_PLL_RATE(1992000000, 2, 332, 1, 0),
+ RK3588_PLL_RATE(1968000000, 2, 328, 1, 0),
+ RK3588_PLL_RATE(1944000000, 2, 324, 1, 0),
+ RK3588_PLL_RATE(1920000000, 2, 320, 1, 0),
+ RK3588_PLL_RATE(1896000000, 2, 316, 1, 0),
+ RK3588_PLL_RATE(1872000000, 2, 312, 1, 0),
+ RK3588_PLL_RATE(1848000000, 2, 308, 1, 0),
+ RK3588_PLL_RATE(1824000000, 2, 304, 1, 0),
+ RK3588_PLL_RATE(1800000000, 2, 300, 1, 0),
+ RK3588_PLL_RATE(1776000000, 2, 296, 1, 0),
+ RK3588_PLL_RATE(1752000000, 2, 292, 1, 0),
+ RK3588_PLL_RATE(1728000000, 2, 288, 1, 0),
+ RK3588_PLL_RATE(1704000000, 2, 284, 1, 0),
+ RK3588_PLL_RATE(1680000000, 2, 280, 1, 0),
+ RK3588_PLL_RATE(1656000000, 2, 276, 1, 0),
+ RK3588_PLL_RATE(1632000000, 2, 272, 1, 0),
+ RK3588_PLL_RATE(1608000000, 2, 268, 1, 0),
+ RK3588_PLL_RATE(1584000000, 2, 264, 1, 0),
+ RK3588_PLL_RATE(1560000000, 2, 260, 1, 0),
+ RK3588_PLL_RATE(1536000000, 2, 256, 1, 0),
+ RK3588_PLL_RATE(1512000000, 2, 252, 1, 0),
+ RK3588_PLL_RATE(1488000000, 2, 248, 1, 0),
+ RK3588_PLL_RATE(1464000000, 2, 244, 1, 0),
+ RK3588_PLL_RATE(1440000000, 2, 240, 1, 0),
+ RK3588_PLL_RATE(1416000000, 2, 236, 1, 0),
+ RK3588_PLL_RATE(1392000000, 2, 232, 1, 0),
+ RK3588_PLL_RATE(1320000000, 2, 220, 1, 0),
+ RK3588_PLL_RATE(1200000000, 2, 200, 1, 0),
+ RK3588_PLL_RATE(1188000000, 2, 198, 1, 0),
+ RK3588_PLL_RATE(1100000000, 3, 550, 2, 0),
+ RK3588_PLL_RATE(1008000000, 2, 336, 2, 0),
+ RK3588_PLL_RATE(1000000000, 3, 500, 2, 0),
+ RK3588_PLL_RATE(983040000, 4, 655, 2, 23592),
+ RK3588_PLL_RATE(955520000, 3, 477, 2, 49806),
+ RK3588_PLL_RATE(903168000, 6, 903, 2, 11009),
+ RK3588_PLL_RATE(900000000, 2, 300, 2, 0),
+ RK3588_PLL_RATE(816000000, 2, 272, 2, 0),
+ RK3588_PLL_RATE(786432000, 2, 262, 2, 9437),
+ RK3588_PLL_RATE(786000000, 1, 131, 2, 0),
+ RK3588_PLL_RATE(785560000, 3, 392, 2, 51117),
+ RK3588_PLL_RATE(722534400, 8, 963, 2, 24850),
+ RK3588_PLL_RATE(600000000, 2, 200, 2, 0),
+ RK3588_PLL_RATE(594000000, 2, 198, 2, 0),
+ RK3588_PLL_RATE(408000000, 2, 272, 3, 0),
+ RK3588_PLL_RATE(312000000, 2, 208, 3, 0),
+ RK3588_PLL_RATE(216000000, 2, 288, 4, 0),
+ RK3588_PLL_RATE(96000000, 2, 256, 5, 0),
+ { /* sentinel */ },
+};
+
+static struct rockchip_pll_rate_table rk3576_ppll_rates[] = {
+ /* _mhz, _p, _m, _s, _k */
+ RK3588_PLL_RATE(1300000000, 3, 325, 2, 0),
+ { /* sentinel */ },
+};
+
+#define RK3576_ACLK_M_BIGCORE_DIV_MASK 0x1f
+#define RK3576_ACLK_M_BIGCORE_DIV_SHIFT 0
+#define RK3576_ACLK_M_LITCORE_DIV_MASK 0x1f
+#define RK3576_ACLK_M_LITCORE_DIV_SHIFT 8
+#define RK3576_PCLK_DBG_LITCORE_DIV_MASK 0x1f
+#define RK3576_PCLK_DBG_LITCORE_DIV_SHIFT 0
+#define RK3576_ACLK_CCI_DIV_MASK 0x1f
+#define RK3576_ACLK_CCI_DIV_SHIFT 7
+#define RK3576_ACLK_CCI_MUX_MASK 0x3
+#define RK3576_ACLK_CCI_MUX_SHIFT 12
+
+#define RK3576_BIGCORE_CLKSEL2(_amcore) \
+{ \
+ .reg = RK3576_BIGCORE_CLKSEL_CON(2), \
+ .val = HIWORD_UPDATE(_amcore - 1, RK3576_ACLK_M_BIGCORE_DIV_MASK, \
+ RK3576_ACLK_M_BIGCORE_DIV_SHIFT), \
+}
+
+#define RK3576_LITCORE_CLKSEL1(_amcore) \
+{ \
+ .reg = RK3576_LITCORE_CLKSEL_CON(1), \
+ .val = HIWORD_UPDATE(_amcore - 1, RK3576_ACLK_M_LITCORE_DIV_MASK, \
+ RK3576_ACLK_M_LITCORE_DIV_SHIFT), \
+}
+
+#define RK3576_LITCORE_CLKSEL2(_pclkdbg) \
+{ \
+ .reg = RK3576_LITCORE_CLKSEL_CON(2), \
+ .val = HIWORD_UPDATE(_pclkdbg - 1, RK3576_PCLK_DBG_LITCORE_DIV_MASK, \
+ RK3576_PCLK_DBG_LITCORE_DIV_SHIFT), \
+}
+
+#define RK3576_CCI_CLKSEL4(_ccisel, _div) \
+{ \
+ .reg = RK3576_CCI_CLKSEL_CON(4), \
+ .val = HIWORD_UPDATE(_ccisel, RK3576_ACLK_CCI_MUX_MASK, \
+ RK3576_ACLK_CCI_MUX_SHIFT) | \
+ HIWORD_UPDATE(_div - 1, RK3576_ACLK_CCI_DIV_MASK, \
+ RK3576_ACLK_CCI_DIV_SHIFT), \
+}
+
+#define RK3576_CPUBCLK_RATE(_prate, _amcore) \
+{ \
+ .prate = _prate##U, \
+ .divs = { \
+ RK3576_BIGCORE_CLKSEL2(_amcore), \
+ }, \
+}
+
+#define RK3576_CPULCLK_RATE(_prate, _amcore, _pclkdbg, _ccisel) \
+{ \
+ .prate = _prate##U, \
+ .divs = { \
+ RK3576_LITCORE_CLKSEL1(_amcore), \
+ RK3576_LITCORE_CLKSEL2(_pclkdbg), \
+ }, \
+ .pre_muxs = { \
+ RK3576_CCI_CLKSEL4(2, 2), \
+ }, \
+ .post_muxs = { \
+ RK3576_CCI_CLKSEL4(_ccisel, 2), \
+ }, \
+}
+
+static struct rockchip_cpuclk_rate_table rk3576_cpubclk_rates[] __initdata = {
+ RK3576_CPUBCLK_RATE(2496000000, 2),
+ RK3576_CPUBCLK_RATE(2400000000, 2),
+ RK3576_CPUBCLK_RATE(2304000000, 2),
+ RK3576_CPUBCLK_RATE(2208000000, 2),
+ RK3576_CPUBCLK_RATE(2184000000, 2),
+ RK3576_CPUBCLK_RATE(2088000000, 2),
+ RK3576_CPUBCLK_RATE(2040000000, 2),
+ RK3576_CPUBCLK_RATE(2016000000, 2),
+ RK3576_CPUBCLK_RATE(1992000000, 2),
+ RK3576_CPUBCLK_RATE(1896000000, 2),
+ RK3576_CPUBCLK_RATE(1800000000, 2),
+ RK3576_CPUBCLK_RATE(1704000000, 2),
+ RK3576_CPUBCLK_RATE(1608000000, 2),
+ RK3576_CPUBCLK_RATE(1584000000, 2),
+ RK3576_CPUBCLK_RATE(1560000000, 2),
+ RK3576_CPUBCLK_RATE(1536000000, 2),
+ RK3576_CPUBCLK_RATE(1512000000, 2),
+ RK3576_CPUBCLK_RATE(1488000000, 2),
+ RK3576_CPUBCLK_RATE(1464000000, 2),
+ RK3576_CPUBCLK_RATE(1440000000, 2),
+ RK3576_CPUBCLK_RATE(1416000000, 2),
+ RK3576_CPUBCLK_RATE(1392000000, 2),
+ RK3576_CPUBCLK_RATE(1368000000, 2),
+ RK3576_CPUBCLK_RATE(1344000000, 2),
+ RK3576_CPUBCLK_RATE(1320000000, 2),
+ RK3576_CPUBCLK_RATE(1296000000, 2),
+ RK3576_CPUBCLK_RATE(1272000000, 2),
+ RK3576_CPUBCLK_RATE(1248000000, 2),
+ RK3576_CPUBCLK_RATE(1224000000, 2),
+ RK3576_CPUBCLK_RATE(1200000000, 2),
+ RK3576_CPUBCLK_RATE(1104000000, 2),
+ RK3576_CPUBCLK_RATE(1008000000, 2),
+ RK3576_CPUBCLK_RATE(912000000, 2),
+ RK3576_CPUBCLK_RATE(816000000, 2),
+ RK3576_CPUBCLK_RATE(696000000, 2),
+ RK3576_CPUBCLK_RATE(600000000, 2),
+ RK3576_CPUBCLK_RATE(408000000, 2),
+ RK3576_CPUBCLK_RATE(312000000, 2),
+ RK3576_CPUBCLK_RATE(216000000, 2),
+ RK3576_CPUBCLK_RATE(96000000, 2),
+};
+
+static const struct rockchip_cpuclk_reg_data rk3576_cpubclk_data = {
+ .core_reg[0] = RK3576_BIGCORE_CLKSEL_CON(1),
+ .div_core_shift[0] = 7,
+ .div_core_mask[0] = 0x1f,
+ .num_cores = 1,
+ .mux_core_alt = 1,
+ .mux_core_main = 0,
+ .mux_core_shift = 12,
+ .mux_core_mask = 0x3,
+};
+
+static struct rockchip_cpuclk_rate_table rk3576_cpulclk_rates[] __initdata = {
+ RK3576_CPULCLK_RATE(2400000000, 2, 6, 3),
+ RK3576_CPULCLK_RATE(2304000000, 2, 6, 3),
+ RK3576_CPULCLK_RATE(2208000000, 2, 6, 3),
+ RK3576_CPULCLK_RATE(2184000000, 2, 6, 3),
+ RK3576_CPULCLK_RATE(2088000000, 2, 6, 3),
+ RK3576_CPULCLK_RATE(2040000000, 2, 6, 3),
+ RK3576_CPULCLK_RATE(2016000000, 2, 6, 3),
+ RK3576_CPULCLK_RATE(1992000000, 2, 6, 3),
+ RK3576_CPULCLK_RATE(1896000000, 2, 6, 3),
+ RK3576_CPULCLK_RATE(1800000000, 2, 6, 3),
+ RK3576_CPULCLK_RATE(1704000000, 2, 6, 3),
+ RK3576_CPULCLK_RATE(1608000000, 2, 6, 3),
+ RK3576_CPULCLK_RATE(1584000000, 2, 6, 3),
+ RK3576_CPULCLK_RATE(1560000000, 2, 6, 3),
+ RK3576_CPULCLK_RATE(1536000000, 2, 6, 3),
+ RK3576_CPULCLK_RATE(1512000000, 2, 6, 3),
+ RK3576_CPULCLK_RATE(1488000000, 2, 6, 3),
+ RK3576_CPULCLK_RATE(1464000000, 2, 6, 3),
+ RK3576_CPULCLK_RATE(1440000000, 2, 6, 3),
+ RK3576_CPULCLK_RATE(1416000000, 2, 6, 3),
+ RK3576_CPULCLK_RATE(1392000000, 2, 6, 3),
+ RK3576_CPULCLK_RATE(1368000000, 2, 6, 3),
+ RK3576_CPULCLK_RATE(1344000000, 2, 6, 3),
+ RK3576_CPULCLK_RATE(1320000000, 2, 6, 3),
+ RK3576_CPULCLK_RATE(1296000000, 2, 6, 3),
+ RK3576_CPULCLK_RATE(1272000000, 2, 6, 3),
+ RK3576_CPULCLK_RATE(1248000000, 2, 6, 3),
+ RK3576_CPULCLK_RATE(1224000000, 2, 6, 3),
+ RK3576_CPULCLK_RATE(1200000000, 2, 6, 2),
+ RK3576_CPULCLK_RATE(1104000000, 2, 6, 2),
+ RK3576_CPULCLK_RATE(1008000000, 2, 6, 2),
+ RK3576_CPULCLK_RATE(912000000, 2, 6, 2),
+ RK3576_CPULCLK_RATE(816000000, 2, 6, 2),
+ RK3576_CPULCLK_RATE(696000000, 2, 6, 2),
+ RK3576_CPULCLK_RATE(600000000, 2, 6, 2),
+ RK3576_CPULCLK_RATE(408000000, 2, 6, 2),
+ RK3576_CPULCLK_RATE(312000000, 2, 6, 2),
+ RK3576_CPULCLK_RATE(216000000, 2, 6, 2),
+ RK3576_CPULCLK_RATE(96000000, 2, 6, 2),
+};
+
+static const struct rockchip_cpuclk_reg_data rk3576_cpulclk_data = {
+ .core_reg[0] = RK3576_LITCORE_CLKSEL_CON(0),
+ .div_core_shift[0] = 7,
+ .div_core_mask[0] = 0x1f,
+ .num_cores = 1,
+ .mux_core_alt = 1,
+ .mux_core_main = 0,
+ .mux_core_shift = 12,
+ .mux_core_mask = 0x3,
+};
+
+#define MFLAGS CLK_MUX_HIWORD_MASK
+#define DFLAGS CLK_DIVIDER_HIWORD_MASK
+#define GFLAGS (CLK_GATE_HIWORD_MASK | CLK_GATE_SET_TO_DISABLE)
+
+PNAME(mux_pll_p) = { "xin24m", "xin32k" };
+PNAME(mux_24m_32k_p) = { "xin24m", "xin_osc0_div" };
+PNAME(mux_armclkl_p) = { "xin24m", "pll_lpll", "lpll" };
+PNAME(mux_armclkb_p) = { "xin24m", "pll_bpll", "bpll" };
+PNAME(gpll_24m_p) = { "gpll", "xin24m" };
+PNAME(cpll_24m_p) = { "cpll", "xin24m" };
+PNAME(gpll_cpll_p) = { "gpll", "cpll" };
+PNAME(gpll_spll_p) = { "gpll", "spll" };
+PNAME(gpll_cpll_aupll_p) = { "gpll", "cpll", "aupll" };
+PNAME(gpll_cpll_24m_p) = { "gpll", "cpll", "xin24m" };
+PNAME(gpll_cpll_24m_spll_p) = { "gpll", "cpll", "xin24m", "spll" };
+PNAME(gpll_cpll_aupll_24m_p) = { "gpll", "cpll", "aupll", "xin24m" };
+PNAME(gpll_cpll_aupll_spll_p) = { "gpll", "cpll", "aupll", "spll" };
+PNAME(gpll_cpll_aupll_spll_lpll_p) = { "gpll", "cpll", "aupll", "spll", "lpll_dummy" };
+PNAME(gpll_cpll_spll_bpll_p) = { "gpll", "cpll", "spll", "bpll_dummy" };
+PNAME(gpll_cpll_lpll_bpll_p) = { "gpll", "cpll", "lpll_dummy", "bpll_dummy" };
+PNAME(gpll_spll_cpll_bpll_lpll_p) = { "gpll", "spll", "cpll", "bpll_dummy", "lpll_dummy" };
+PNAME(gpll_cpll_vpll_aupll_24m_p) = { "gpll", "cpll", "vpll", "aupll", "xin24m" };
+PNAME(gpll_cpll_spll_aupll_bpll_p) = { "gpll", "cpll", "spll", "aupll", "bpll_dummy" };
+PNAME(gpll_cpll_spll_bpll_lpll_p) = { "gpll", "cpll", "spll", "bpll_dummy", "lpll_dummy" };
+PNAME(gpll_cpll_spll_lpll_bpll_p) = { "gpll", "cpll", "spll", "lpll_dummy", "bpll_dummy" };
+PNAME(gpll_cpll_vpll_bpll_lpll_p) = { "gpll", "cpll", "vpll", "bpll_dummy", "lpll_dummy" };
+PNAME(gpll_spll_aupll_bpll_lpll_p) = { "gpll", "spll", "aupll", "bpll_dummy", "lpll_dummy" };
+PNAME(gpll_spll_isppvtpll_bpll_lpll_p) = { "gpll", "spll", "isp_pvtpll", "bpll_dummy", "lpll_dummy" };
+PNAME(gpll_cpll_spll_aupll_lpll_24m_p) = { "gpll", "cpll", "spll", "aupll", "lpll_dummy", "xin24m" };
+PNAME(gpll_cpll_spll_vpll_bpll_lpll_p) = { "gpll", "cpll", "spll", "vpll", "bpll_dummy", "lpll_dummy" };
+PNAME(cpll_vpll_lpll_bpll_p) = { "cpll", "vpll", "lpll_dummy", "bpll_dummy" };
+PNAME(mux_24m_ccipvtpll_gpll_lpll_p) = { "xin24m", "cci_pvtpll", "gpll", "lpll" };
+PNAME(mux_24m_spll_gpll_cpll_p) = {"xin24m", "spll", "gpll", "cpll" };
+PNAME(audio_frac_int_p) = { "xin24m", "clk_audio_frac_0", "clk_audio_frac_1", "clk_audio_frac_2",
+ "clk_audio_frac_3", "clk_audio_int_0", "clk_audio_int_1", "clk_audio_int_2" };
+PNAME(audio_frac_p) = { "clk_audio_frac_0", "clk_audio_frac_1", "clk_audio_frac_2", "clk_audio_frac_3" };
+PNAME(mux_100m_24m_p) = { "clk_cpll_div10", "xin24m" };
+PNAME(mux_100m_50m_24m_p) = { "clk_cpll_div10", "clk_cpll_div20", "xin24m" };
+PNAME(mux_100m_24m_lclk0_p) = { "clk_cpll_div10", "xin24m", "lclk_asrc_src_0" };
+PNAME(mux_100m_24m_lclk1_p) = { "clk_cpll_div10", "xin24m", "lclk_asrc_src_1" };
+PNAME(mux_150m_100m_50m_24m_p) = { "clk_gpll_div8", "clk_cpll_div10", "clk_cpll_div20", "xin24m" };
+PNAME(mux_200m_100m_50m_24m_p) = { "clk_gpll_div6", "clk_cpll_div10", "clk_cpll_div20", "xin24m" };
+PNAME(mux_400m_200m_100m_24m_p) = { "clk_gpll_div3", "clk_gpll_div6", "clk_cpll_div10", "xin24m" };
+PNAME(mux_500m_250m_100m_24m_p) = { "clk_cpll_div2", "clk_cpll_div4", "clk_cpll_div10", "xin24m" };
+PNAME(mux_600m_400m_300m_24m_p) = { "clk_gpll_div2", "clk_gpll_div3", "clk_gpll_div4", "xin24m" };
+PNAME(mux_350m_175m_116m_24m_p) = { "clk_spll_div2", "clk_spll_div4", "clk_spll_div6", "xin24m" };
+PNAME(mux_175m_116m_58m_24m_p) = { "clk_spll_div4", "clk_spll_div6", "clk_spll_div12", "xin24m" };
+PNAME(mux_116m_58m_24m_p) = { "clk_spll_div6", "clk_spll_div12", "xin24m" };
+PNAME(mclk_sai0_8ch_p) = { "mclk_sai0_8ch_src", "sai0_mclkin", "sai1_mclkin" };
+PNAME(mclk_sai1_8ch_p) = { "mclk_sai1_8ch_src", "sai1_mclkin" };
+PNAME(mclk_sai2_2ch_p) = { "mclk_sai2_2ch_src", "sai2_mclkin", "sai1_mclkin" };
+PNAME(mclk_sai3_2ch_p) = { "mclk_sai3_2ch_src", "sai3_mclkin", "sai1_mclkin" };
+PNAME(mclk_sai4_2ch_p) = { "mclk_sai4_2ch_src", "sai4_mclkin", "sai1_mclkin" };
+PNAME(mclk_sai5_8ch_p) = { "mclk_sai5_8ch_src", "sai1_mclkin" };
+PNAME(mclk_sai6_8ch_p) = { "mclk_sai6_8ch_src", "sai1_mclkin" };
+PNAME(mclk_sai7_8ch_p) = { "mclk_sai7_8ch_src", "sai1_mclkin" };
+PNAME(mclk_sai8_8ch_p) = { "mclk_sai8_8ch_src", "sai1_mclkin" };
+PNAME(mclk_sai9_8ch_p) = { "mclk_sai9_8ch_src", "sai1_mclkin" };
+PNAME(uart1_p) = { "clk_uart1_src_top", "xin24m" };
+PNAME(clk_gmac1_ptp_ref_src_p) = { "gpll", "cpll", "gmac1_ptp_refclk_in" };
+PNAME(clk_gmac0_ptp_ref_src_p) = { "gpll", "cpll", "gmac0_ptp_refclk_in" };
+PNAME(dclk_ebc_p) = { "gpll", "cpll", "vpll", "aupll", "lpll_dummy",
+ "dclk_ebc_frac", "xin24m" };
+PNAME(dclk_vp0_p) = { "dclk_vp0_src", "clk_hdmiphy_pixel0" };
+PNAME(dclk_vp1_p) = { "dclk_vp1_src", "clk_hdmiphy_pixel0" };
+PNAME(dclk_vp2_p) = { "dclk_vp2_src", "clk_hdmiphy_pixel0" };
+PNAME(clk_uart_p) = { "gpll", "cpll", "aupll", "xin24m", "clk_uart_frac_0",
+ "clk_uart_frac_1", "clk_uart_frac_2"};
+PNAME(clk_freq_pwm1_p) = { "sai0_mclkin", "sai1_mclkin", "sai2_mclkin",
+ "sai3_mclkin", "sai4_mclkin", "sai_sclkin_freq"};
+PNAME(clk_counter_pwm1_p) = { "sai0_mclkin", "sai1_mclkin", "sai2_mclkin",
+ "sai3_mclkin", "sai4_mclkin", "sai_sclkin_counter"};
+PNAME(sai_sclkin_freq_p) = { "sai0_sclk_in", "sai1_sclk_in", "sai2_sclk_in",
+ "sai3_sclk_in", "sai4_sclk_in"};
+PNAME(clk_ref_pcie0_phy_p) = { "clk_pcie_100m_src", "clk_pcie_100m_nduty_src",
+ "xin24m"};
+PNAME(hclk_vi_root_p) = { "clk_gpll_div6", "clk_cpll_div10",
+ "aclk_vi_root_inter", "xin24m"};
+PNAME(clk_ref_osc_mphy_p) = { "xin24m", "clk_gpio_mphy_i", "clk_ref_mphy_26m"};
+PNAME(mux_pmu200m_pmu100m_pmu50m_24m_p) = { "clk_200m_pmu_src", "clk_100m_pmu_src",
+ "clk_50m_pmu_src", "xin24m" };
+PNAME(mux_pmu100m_pmu50m_24m_p) = { "clk_100m_pmu_src", "clk_50m_pmu_src", "xin24m" };
+PNAME(mux_pmu100m_24m_32k_p) = { "clk_100m_pmu_src", "xin24m", "xin_osc0_div" };
+PNAME(clk_phy_ref_src_p) = { "xin24m", "clk_pmuphy_ref_src" };
+PNAME(clk_usbphy_ref_src_p) = { "usbphy0_24m", "usbphy1_24m" };
+PNAME(clk_cpll_ref_src_p) = { "xin24m", "clk_usbphy_ref_src" };
+PNAME(clk_aupll_ref_src_p) = { "xin24m", "clk_aupll_ref_io" };
+
+static struct rockchip_pll_clock rk3576_pll_clks[] __initdata = {
+ [bpll] = PLL(pll_rk3588_core, PLL_BPLL, "bpll", mux_pll_p,
+ 0, RK3576_PLL_CON(0),
+ RK3576_BPLL_MODE_CON0, 0, 15, 0, rk3576_pll_rates),
+ [lpll] = PLL(pll_rk3588_core, PLL_LPLL, "lpll", mux_pll_p,
+ 0, RK3576_LPLL_CON(16),
+ RK3576_LPLL_MODE_CON0, 0, 15, 0, rk3576_pll_rates),
+ [vpll] = PLL(pll_rk3588, PLL_VPLL, "vpll", mux_pll_p,
+ 0, RK3576_PLL_CON(88),
+ RK3576_MODE_CON0, 4, 15, 0, rk3576_pll_rates),
+ [aupll] = PLL(pll_rk3588, PLL_AUPLL, "aupll", mux_pll_p,
+ 0, RK3576_PLL_CON(96),
+ RK3576_MODE_CON0, 6, 15, 0, rk3576_pll_rates),
+ [cpll] = PLL(pll_rk3588, PLL_CPLL, "cpll", mux_pll_p,
+ CLK_IGNORE_UNUSED, RK3576_PLL_CON(104),
+ RK3576_MODE_CON0, 8, 15, 0, rk3576_pll_rates),
+ [gpll] = PLL(pll_rk3588, PLL_GPLL, "gpll", mux_pll_p,
+ CLK_IGNORE_UNUSED, RK3576_PLL_CON(112),
+ RK3576_MODE_CON0, 2, 15, 0, rk3576_pll_rates),
+ [ppll] = PLL(pll_rk3588_ddr, PLL_PPLL, "ppll", mux_pll_p,
+ CLK_IGNORE_UNUSED, RK3576_PMU_PLL_CON(128),
+ RK3576_MODE_CON0, 10, 15, 0, rk3576_ppll_rates),
+};
+
+static struct rockchip_clk_branch rk3576_clk_branches[] __initdata = {
+ /*
+ * CRU Clock-Architecture
+ */
+ /* fixed */
+ FACTOR(0, "xin12m", "xin24m", 0, 1, 2),
+
+ COMPOSITE_FRAC(XIN_OSC0_DIV, "xin_osc0_div", "xin24m", CLK_IS_CRITICAL,
+ RK3576_PMU_CLKSEL_CON(21), 0,
+ RK3576_PMU_CLKGATE_CON(7), 11, GFLAGS),
+
+ FACTOR(0, "clk_spll_div12", "spll", 0, 1, 12),
+ FACTOR(0, "clk_spll_div6", "spll", 0, 1, 6),
+ FACTOR(0, "clk_spll_div4", "spll", 0, 1, 4),
+ FACTOR(0, "lpll_div2", "lpll", 0, 1, 2),
+ FACTOR(0, "bpll_div4", "bpll", 0, 1, 4),
+
+ /* top */
+ COMPOSITE(CLK_CPLL_DIV20, "clk_cpll_div20", gpll_cpll_p, CLK_IS_CRITICAL,
+ RK3576_CLKSEL_CON(0), 5, 1, MFLAGS, 0, 5, DFLAGS,
+ RK3576_CLKGATE_CON(0), 0, GFLAGS),
+ COMPOSITE(CLK_CPLL_DIV10, "clk_cpll_div10", gpll_cpll_p, CLK_IS_CRITICAL,
+ RK3576_CLKSEL_CON(0), 11, 1, MFLAGS, 6, 5, DFLAGS,
+ RK3576_CLKGATE_CON(0), 1, GFLAGS),
+ COMPOSITE(CLK_GPLL_DIV8, "clk_gpll_div8", gpll_cpll_p, CLK_IS_CRITICAL,
+ RK3576_CLKSEL_CON(1), 5, 1, MFLAGS, 0, 5, DFLAGS,
+ RK3576_CLKGATE_CON(0), 2, GFLAGS),
+ COMPOSITE(CLK_GPLL_DIV6, "clk_gpll_div6", gpll_cpll_p, CLK_IS_CRITICAL,
+ RK3576_CLKSEL_CON(1), 11, 1, MFLAGS, 6, 5, DFLAGS,
+ RK3576_CLKGATE_CON(0), 3, GFLAGS),
+ COMPOSITE(CLK_CPLL_DIV4, "clk_cpll_div4", gpll_cpll_p, CLK_IS_CRITICAL,
+ RK3576_CLKSEL_CON(2), 5, 1, MFLAGS, 0, 5, DFLAGS,
+ RK3576_CLKGATE_CON(0), 4, GFLAGS),
+ COMPOSITE(CLK_GPLL_DIV4, "clk_gpll_div4", gpll_cpll_p, CLK_IS_CRITICAL,
+ RK3576_CLKSEL_CON(2), 11, 1, MFLAGS, 6, 5, DFLAGS,
+ RK3576_CLKGATE_CON(0), 5, GFLAGS),
+ COMPOSITE(CLK_SPLL_DIV2, "clk_spll_div2", gpll_cpll_spll_bpll_p, CLK_IS_CRITICAL,
+ RK3576_CLKSEL_CON(3), 5, 2, MFLAGS, 0, 5, DFLAGS,
+ RK3576_CLKGATE_CON(0), 6, GFLAGS),
+ COMPOSITE(CLK_GPLL_DIV3, "clk_gpll_div3", gpll_cpll_p, CLK_IS_CRITICAL,
+ RK3576_CLKSEL_CON(3), 12, 1, MFLAGS, 7, 5, DFLAGS,
+ RK3576_CLKGATE_CON(0), 7, GFLAGS),
+ COMPOSITE(CLK_CPLL_DIV2, "clk_cpll_div2", gpll_cpll_p, CLK_IS_CRITICAL,
+ RK3576_CLKSEL_CON(4), 11, 1, MFLAGS, 6, 5, DFLAGS,
+ RK3576_CLKGATE_CON(0), 9, GFLAGS),
+ COMPOSITE(CLK_GPLL_DIV2, "clk_gpll_div2", gpll_cpll_p, CLK_IS_CRITICAL,
+ RK3576_CLKSEL_CON(5), 5, 1, MFLAGS, 0, 5, DFLAGS,
+ RK3576_CLKGATE_CON(0), 10, GFLAGS),
+ COMPOSITE(CLK_SPLL_DIV1, "clk_spll_div1", gpll_cpll_spll_bpll_lpll_p, CLK_IS_CRITICAL,
+ RK3576_CLKSEL_CON(6), 5, 3, MFLAGS, 0, 5, DFLAGS,
+ RK3576_CLKGATE_CON(0), 12, GFLAGS),
+ COMPOSITE_NODIV(PCLK_TOP_ROOT, "pclk_top_root", mux_100m_50m_24m_p, CLK_IS_CRITICAL,
+ RK3576_CLKSEL_CON(8), 7, 2, MFLAGS,
+ RK3576_CLKGATE_CON(1), 1, GFLAGS),
+ COMPOSITE(ACLK_TOP, "aclk_top", gpll_cpll_aupll_p, CLK_IS_CRITICAL,
+ RK3576_CLKSEL_CON(9), 5, 2, MFLAGS, 0, 5, DFLAGS,
+ RK3576_CLKGATE_CON(1), 3, GFLAGS),
+ COMPOSITE(ACLK_TOP_MID, "aclk_top_mid", gpll_cpll_p, CLK_IS_CRITICAL,
+ RK3576_CLKSEL_CON(10), 5, 1, MFLAGS, 0, 5, DFLAGS,
+ RK3576_CLKGATE_CON(1), 6, GFLAGS),
+ COMPOSITE(ACLK_SECURE_HIGH, "aclk_secure_high", gpll_spll_aupll_bpll_lpll_p, CLK_IS_CRITICAL,
+ RK3576_CLKSEL_CON(10), 11, 3, MFLAGS, 6, 5, DFLAGS,
+ RK3576_CLKGATE_CON(1), 7, GFLAGS),
+ COMPOSITE_NODIV(HCLK_TOP, "hclk_top", mux_200m_100m_50m_24m_p, CLK_IS_CRITICAL,
+ RK3576_CLKSEL_CON(19), 2, 2, MFLAGS,
+ RK3576_CLKGATE_CON(1), 14, GFLAGS),
+ COMPOSITE_NODIV(HCLK_VO0VOP_CHANNEL, "hclk_vo0vop_channel", mux_200m_100m_50m_24m_p, CLK_IS_CRITICAL,
+ RK3576_CLKSEL_CON(19), 6, 2, MFLAGS,
+ RK3576_CLKGATE_CON(2), 0, GFLAGS),
+ COMPOSITE(ACLK_VO0VOP_CHANNEL, "aclk_vo0vop_channel", gpll_cpll_lpll_bpll_p, CLK_IS_CRITICAL,
+ RK3576_CLKSEL_CON(19), 12, 2, MFLAGS, 8, 4, DFLAGS,
+ RK3576_CLKGATE_CON(2), 1, GFLAGS),
+ MUX(CLK_AUDIO_FRAC_0_SRC, "clk_audio_frac_0_src", gpll_cpll_aupll_24m_p, 0,
+ RK3576_CLKSEL_CON(13), 0, 2, MFLAGS),
+ COMPOSITE_FRAC(CLK_AUDIO_FRAC_0, "clk_audio_frac_0", "clk_audio_frac_0_src", 0,
+ RK3576_CLKSEL_CON(12), 0,
+ RK3576_CLKGATE_CON(1), 10, GFLAGS),
+ MUX(CLK_AUDIO_FRAC_1_SRC, "clk_audio_frac_1_src", gpll_cpll_aupll_24m_p, 0,
+ RK3576_CLKSEL_CON(15), 0, 2, MFLAGS),
+ COMPOSITE_FRAC(CLK_AUDIO_FRAC_1, "clk_audio_frac_1", "clk_audio_frac_1_src", 0,
+ RK3576_CLKSEL_CON(14), 0,
+ RK3576_CLKGATE_CON(1), 11, GFLAGS),
+ MUX(CLK_AUDIO_FRAC_2_SRC, "clk_audio_frac_2_src", gpll_cpll_aupll_24m_p, 0,
+ RK3576_CLKSEL_CON(17), 0, 2, MFLAGS),
+ COMPOSITE_FRAC(CLK_AUDIO_FRAC_2, "clk_audio_frac_2", "clk_audio_frac_2_src", 0,
+ RK3576_CLKSEL_CON(16), 0,
+ RK3576_CLKGATE_CON(1), 12, GFLAGS),
+ MUX(CLK_AUDIO_FRAC_3_SRC, "clk_audio_frac_3_src", gpll_cpll_aupll_24m_p, 0,
+ RK3576_CLKSEL_CON(19), 0, 2, MFLAGS),
+ COMPOSITE_FRAC(CLK_AUDIO_FRAC_3, "clk_audio_frac_3", "clk_audio_frac_3_src", 0,
+ RK3576_CLKSEL_CON(18), 0,
+ RK3576_CLKGATE_CON(1), 13, GFLAGS),
+ MUX(0, "clk_uart_frac_0_src", gpll_cpll_aupll_24m_p, 0,
+ RK3576_CLKSEL_CON(22), 0, 2, MFLAGS),
+ COMPOSITE_FRAC(CLK_UART_FRAC_0, "clk_uart_frac_0", "clk_uart_frac_0_src", 0,
+ RK3576_CLKSEL_CON(21), 0,
+ RK3576_CLKGATE_CON(2), 5, GFLAGS),
+ MUX(0, "clk_uart_frac_1_src", gpll_cpll_aupll_24m_p, 0,
+ RK3576_CLKSEL_CON(24), 0, 2, MFLAGS),
+ COMPOSITE_FRAC(CLK_UART_FRAC_1, "clk_uart_frac_1", "clk_uart_frac_1_src", 0,
+ RK3576_CLKSEL_CON(23), 0,
+ RK3576_CLKGATE_CON(2), 6, GFLAGS),
+ MUX(0, "clk_uart_frac_2_src", gpll_cpll_aupll_24m_p, 0,
+ RK3576_CLKSEL_CON(26), 0, 2, MFLAGS),
+ COMPOSITE_FRAC(CLK_UART_FRAC_2, "clk_uart_frac_2", "clk_uart_frac_2_src", 0,
+ RK3576_CLKSEL_CON(25), 0,
+ RK3576_CLKGATE_CON(2), 7, GFLAGS),
+ COMPOSITE(CLK_UART1_SRC_TOP, "clk_uart1_src_top", clk_uart_p, 0,
+ RK3576_CLKSEL_CON(27), 13, 3, MFLAGS, 5, 8, DFLAGS,
+ RK3576_CLKGATE_CON(2), 13, GFLAGS),
+ COMPOSITE_NOMUX(CLK_AUDIO_INT_0, "clk_audio_int_0", "gpll", 0,
+ RK3576_CLKSEL_CON(28), 0, 5, DFLAGS,
+ RK3576_CLKGATE_CON(2), 14, GFLAGS),
+ COMPOSITE_NOMUX(CLK_AUDIO_INT_1, "clk_audio_int_1", "cpll", 0,
+ RK3576_CLKSEL_CON(28), 5, 5, DFLAGS,
+ RK3576_CLKGATE_CON(2), 15, GFLAGS),
+ COMPOSITE_NOMUX(CLK_AUDIO_INT_2, "clk_audio_int_2", "aupll", 0,
+ RK3576_CLKSEL_CON(28), 10, 5, DFLAGS,
+ RK3576_CLKGATE_CON(3), 0, GFLAGS),
+ COMPOSITE(CLK_PDM0_SRC_TOP, "clk_pdm0_src_top", audio_frac_int_p, 0,
+ RK3576_CLKSEL_CON(29), 9, 3, MFLAGS, 0, 9, DFLAGS,
+ RK3576_CLKGATE_CON(3), 2, GFLAGS),
+ COMPOSITE_NOMUX(CLK_GMAC0_125M_SRC, "clk_gmac0_125m_src", "cpll", 0,
+ RK3576_CLKSEL_CON(30), 10, 5, DFLAGS,
+ RK3576_CLKGATE_CON(3), 6, GFLAGS),
+ COMPOSITE_NOMUX(CLK_GMAC1_125M_SRC, "clk_gmac1_125m_src", "cpll", 0,
+ RK3576_CLKSEL_CON(31), 0, 5, DFLAGS,
+ RK3576_CLKGATE_CON(3), 7, GFLAGS),
+ COMPOSITE(LCLK_ASRC_SRC_0, "lclk_asrc_src_0", audio_frac_p, 0,
+ RK3576_CLKSEL_CON(31), 10, 2, MFLAGS, 5, 5, DFLAGS,
+ RK3576_CLKGATE_CON(3), 10, GFLAGS),
+ COMPOSITE(LCLK_ASRC_SRC_1, "lclk_asrc_src_1", audio_frac_p, 0,
+ RK3576_CLKSEL_CON(32), 5, 2, MFLAGS, 0, 5, DFLAGS,
+ RK3576_CLKGATE_CON(3), 11, GFLAGS),
+ COMPOSITE(REF_CLK0_OUT_PLL, "ref_clk0_out_pll", gpll_cpll_spll_aupll_lpll_24m_p, 0,
+ RK3576_CLKSEL_CON(33), 8, 3, MFLAGS, 0, 8, DFLAGS,
+ RK3576_CLKGATE_CON(4), 1, GFLAGS),
+ COMPOSITE(REF_CLK1_OUT_PLL, "ref_clk1_out_pll", gpll_cpll_spll_aupll_lpll_24m_p, 0,
+ RK3576_CLKSEL_CON(34), 8, 3, MFLAGS, 0, 8, DFLAGS,
+ RK3576_CLKGATE_CON(4), 2, GFLAGS),
+ COMPOSITE(REF_CLK2_OUT_PLL, "ref_clk2_out_pll", gpll_cpll_spll_aupll_lpll_24m_p, 0,
+ RK3576_CLKSEL_CON(35), 8, 3, MFLAGS, 0, 8, DFLAGS,
+ RK3576_CLKGATE_CON(4), 3, GFLAGS),
+ COMPOSITE(REFCLKO25M_GMAC0_OUT, "refclko25m_gmac0_out", gpll_cpll_p, 0,
+ RK3576_CLKSEL_CON(36), 7, 1, MFLAGS, 0, 7, DFLAGS,
+ RK3576_CLKGATE_CON(5), 10, GFLAGS),
+ COMPOSITE(REFCLKO25M_GMAC1_OUT, "refclko25m_gmac1_out", gpll_cpll_p, 0,
+ RK3576_CLKSEL_CON(36), 15, 1, MFLAGS, 8, 7, DFLAGS,
+ RK3576_CLKGATE_CON(5), 11, GFLAGS),
+ COMPOSITE(CLK_CIFOUT_OUT, "clk_cifout_out", gpll_cpll_24m_spll_p, 0,
+ RK3576_CLKSEL_CON(37), 8, 2, MFLAGS, 0, 8, DFLAGS,
+ RK3576_CLKGATE_CON(5), 12, GFLAGS),
+ GATE(CLK_GMAC0_RMII_CRU, "clk_gmac0_rmii_cru", "clk_cpll_div20", 0,
+ RK3576_CLKGATE_CON(5), 13, GFLAGS),
+ GATE(CLK_GMAC1_RMII_CRU, "clk_gmac1_rmii_cru", "clk_cpll_div20", 0,
+ RK3576_CLKGATE_CON(5), 14, GFLAGS),
+ GATE(CLK_OTPC_AUTO_RD_G, "clk_otpc_auto_rd_g", "xin24m", 0,
+ RK3576_CLKGATE_CON(5), 15, GFLAGS),
+ COMPOSITE(CLK_MIPI_CAMERAOUT_M0, "clk_mipi_cameraout_m0", mux_24m_spll_gpll_cpll_p, 0,
+ RK3576_CLKSEL_CON(38), 8, 2, MFLAGS, 0, 8, DFLAGS,
+ RK3576_CLKGATE_CON(6), 3, GFLAGS),
+ COMPOSITE(CLK_MIPI_CAMERAOUT_M1, "clk_mipi_cameraout_m1", mux_24m_spll_gpll_cpll_p, 0,
+ RK3576_CLKSEL_CON(39), 8, 2, MFLAGS, 0, 8, DFLAGS,
+ RK3576_CLKGATE_CON(6), 4, GFLAGS),
+ COMPOSITE(CLK_MIPI_CAMERAOUT_M2, "clk_mipi_cameraout_m2", mux_24m_spll_gpll_cpll_p, 0,
+ RK3576_CLKSEL_CON(40), 8, 2, MFLAGS, 0, 8, DFLAGS,
+ RK3576_CLKGATE_CON(6), 5, GFLAGS),
+ COMPOSITE(MCLK_PDM0_SRC_TOP, "mclk_pdm0_src_top", audio_frac_int_p, 0,
+ RK3576_CLKSEL_CON(41), 7, 3, MFLAGS, 2, 5, DFLAGS,
+ RK3576_CLKGATE_CON(6), 8, GFLAGS),
+
+ /* bus */
+ COMPOSITE_NODIV(HCLK_BUS_ROOT, "hclk_bus_root", mux_200m_100m_50m_24m_p, CLK_IS_CRITICAL,
+ RK3576_CLKSEL_CON(55), 0, 2, MFLAGS,
+ RK3576_CLKGATE_CON(11), 0, GFLAGS),
+ COMPOSITE_NODIV(PCLK_BUS_ROOT, "pclk_bus_root", mux_100m_50m_24m_p, CLK_IS_CRITICAL,
+ RK3576_CLKSEL_CON(55), 2, 2, MFLAGS,
+ RK3576_CLKGATE_CON(11), 1, GFLAGS),
+ COMPOSITE(ACLK_BUS_ROOT, "aclk_bus_root", gpll_cpll_p, CLK_IS_CRITICAL,
+ RK3576_CLKSEL_CON(55), 9, 1, MFLAGS, 4, 5, DFLAGS,
+ RK3576_CLKGATE_CON(11), 2, GFLAGS),
+ GATE(HCLK_CAN0, "hclk_can0", "hclk_bus_root", 0,
+ RK3576_CLKGATE_CON(11), 6, GFLAGS),
+ COMPOSITE(CLK_CAN0, "clk_can0", gpll_cpll_24m_p, 0,
+ RK3576_CLKSEL_CON(56), 5, 2, MFLAGS, 0, 5, DFLAGS,
+ RK3576_CLKGATE_CON(11), 7, GFLAGS),
+ GATE(HCLK_CAN1, "hclk_can1", "hclk_bus_root", 0,
+ RK3576_CLKGATE_CON(11), 8, GFLAGS),
+ COMPOSITE(CLK_CAN1, "clk_can1", gpll_cpll_24m_p, 0,
+ RK3576_CLKSEL_CON(56), 12, 2, MFLAGS, 7, 5, DFLAGS,
+ RK3576_CLKGATE_CON(11), 9, GFLAGS),
+ GATE(CLK_KEY_SHIFT, "clk_key_shift", "xin24m", CLK_IS_CRITICAL,
+ RK3576_CLKGATE_CON(11), 15, GFLAGS),
+ GATE(PCLK_I2C1, "pclk_i2c1", "pclk_bus_root", 0,
+ RK3576_CLKGATE_CON(12), 0, GFLAGS),
+ GATE(PCLK_I2C2, "pclk_i2c2", "pclk_bus_root", 0,
+ RK3576_CLKGATE_CON(12), 1, GFLAGS),
+ GATE(PCLK_I2C3, "pclk_i2c3", "pclk_bus_root", 0,
+ RK3576_CLKGATE_CON(12), 2, GFLAGS),
+ GATE(PCLK_I2C4, "pclk_i2c4", "pclk_bus_root", 0,
+ RK3576_CLKGATE_CON(12), 3, GFLAGS),
+ GATE(PCLK_I2C5, "pclk_i2c5", "pclk_bus_root", 0,
+ RK3576_CLKGATE_CON(12), 4, GFLAGS),
+ GATE(PCLK_I2C6, "pclk_i2c6", "pclk_bus_root", 0,
+ RK3576_CLKGATE_CON(12), 5, GFLAGS),
+ GATE(PCLK_I2C7, "pclk_i2c7", "pclk_bus_root", 0,
+ RK3576_CLKGATE_CON(12), 6, GFLAGS),
+ GATE(PCLK_I2C8, "pclk_i2c8", "pclk_bus_root", 0,
+ RK3576_CLKGATE_CON(12), 7, GFLAGS),
+ GATE(PCLK_I2C9, "pclk_i2c9", "pclk_bus_root", 0,
+ RK3576_CLKGATE_CON(12), 8, GFLAGS),
+ GATE(PCLK_WDT_BUSMCU, "pclk_wdt_busmcu", "pclk_bus_root", 0,
+ RK3576_CLKGATE_CON(12), 9, GFLAGS),
+ GATE(TCLK_WDT_BUSMCU, "tclk_wdt_busmcu", "xin24m", 0,
+ RK3576_CLKGATE_CON(12), 10, GFLAGS),
+ GATE(ACLK_GIC, "aclk_gic", "aclk_bus_root", CLK_IS_CRITICAL,
+ RK3576_CLKGATE_CON(12), 11, GFLAGS),
+ COMPOSITE_NODIV(CLK_I2C1, "clk_i2c1", mux_200m_100m_50m_24m_p, 0,
+ RK3576_CLKSEL_CON(57), 0, 2, MFLAGS,
+ RK3576_CLKGATE_CON(12), 12, GFLAGS),
+ COMPOSITE_NODIV(CLK_I2C2, "clk_i2c2", mux_200m_100m_50m_24m_p, 0,
+ RK3576_CLKSEL_CON(57), 2, 2, MFLAGS,
+ RK3576_CLKGATE_CON(12), 13, GFLAGS),
+ COMPOSITE_NODIV(CLK_I2C3, "clk_i2c3", mux_200m_100m_50m_24m_p, 0,
+ RK3576_CLKSEL_CON(57), 4, 2, MFLAGS,
+ RK3576_CLKGATE_CON(12), 14, GFLAGS),
+ COMPOSITE_NODIV(CLK_I2C4, "clk_i2c4", mux_200m_100m_50m_24m_p, 0,
+ RK3576_CLKSEL_CON(57), 6, 2, MFLAGS,
+ RK3576_CLKGATE_CON(12), 15, GFLAGS),
+ COMPOSITE_NODIV(CLK_I2C5, "clk_i2c5", mux_200m_100m_50m_24m_p, 0,
+ RK3576_CLKSEL_CON(57), 8, 2, MFLAGS,
+ RK3576_CLKGATE_CON(13), 0, GFLAGS),
+ COMPOSITE_NODIV(CLK_I2C6, "clk_i2c6", mux_200m_100m_50m_24m_p, 0,
+ RK3576_CLKSEL_CON(57), 10, 2, MFLAGS,
+ RK3576_CLKGATE_CON(13), 1, GFLAGS),
+ COMPOSITE_NODIV(CLK_I2C7, "clk_i2c7", mux_200m_100m_50m_24m_p, 0,
+ RK3576_CLKSEL_CON(57), 12, 2, MFLAGS,
+ RK3576_CLKGATE_CON(13), 2, GFLAGS),
+ COMPOSITE_NODIV(CLK_I2C8, "clk_i2c8", mux_200m_100m_50m_24m_p, 0,
+ RK3576_CLKSEL_CON(57), 14, 2, MFLAGS,
+ RK3576_CLKGATE_CON(13), 3, GFLAGS),
+ COMPOSITE_NODIV(CLK_I2C9, "clk_i2c9", mux_200m_100m_50m_24m_p, 0,
+ RK3576_CLKSEL_CON(58), 0, 2, MFLAGS,
+ RK3576_CLKGATE_CON(13), 4, GFLAGS),
+ GATE(PCLK_SARADC, "pclk_saradc", "pclk_bus_root", 0,
+ RK3576_CLKGATE_CON(13), 6, GFLAGS),
+ COMPOSITE(CLK_SARADC, "clk_saradc", gpll_24m_p, 0,
+ RK3576_CLKSEL_CON(58), 12, 1, MFLAGS, 4, 8, DFLAGS,
+ RK3576_CLKGATE_CON(13), 7, GFLAGS),
+ GATE(PCLK_TSADC, "pclk_tsadc", "pclk_bus_root", 0,
+ RK3576_CLKGATE_CON(13), 8, GFLAGS),
+ COMPOSITE_NOMUX(CLK_TSADC, "clk_tsadc", "xin24m", 0,
+ RK3576_CLKSEL_CON(59), 0, 8, DFLAGS,
+ RK3576_CLKGATE_CON(13), 9, GFLAGS),
+ GATE(PCLK_UART0, "pclk_uart0", "pclk_bus_root", 0,
+ RK3576_CLKGATE_CON(13), 10, GFLAGS),
+ GATE(PCLK_UART2, "pclk_uart2", "pclk_bus_root", 0,
+ RK3576_CLKGATE_CON(13), 11, GFLAGS),
+ GATE(PCLK_UART3, "pclk_uart3", "pclk_bus_root", 0,
+ RK3576_CLKGATE_CON(13), 12, GFLAGS),
+ GATE(PCLK_UART4, "pclk_uart4", "pclk_bus_root", 0,
+ RK3576_CLKGATE_CON(13), 13, GFLAGS),
+ GATE(PCLK_UART5, "pclk_uart5", "pclk_bus_root", 0,
+ RK3576_CLKGATE_CON(13), 14, GFLAGS),
+ GATE(PCLK_UART6, "pclk_uart6", "pclk_bus_root", 0,
+ RK3576_CLKGATE_CON(13), 15, GFLAGS),
+ GATE(PCLK_UART7, "pclk_uart7", "pclk_bus_root", 0,
+ RK3576_CLKGATE_CON(14), 0, GFLAGS),
+ GATE(PCLK_UART8, "pclk_uart8", "pclk_bus_root", 0,
+ RK3576_CLKGATE_CON(14), 1, GFLAGS),
+ GATE(PCLK_UART9, "pclk_uart9", "pclk_bus_root", 0,
+ RK3576_CLKGATE_CON(14), 2, GFLAGS),
+ GATE(PCLK_UART10, "pclk_uart10", "pclk_bus_root", 0,
+ RK3576_CLKGATE_CON(14), 3, GFLAGS),
+ GATE(PCLK_UART11, "pclk_uart11", "pclk_bus_root", 0,
+ RK3576_CLKGATE_CON(14), 4, GFLAGS),
+ COMPOSITE(SCLK_UART0, "sclk_uart0", clk_uart_p, 0,
+ RK3576_CLKSEL_CON(60), 8, 3, MFLAGS, 0, 8, DFLAGS,
+ RK3576_CLKGATE_CON(14), 5, GFLAGS),
+ COMPOSITE(SCLK_UART2, "sclk_uart2", clk_uart_p, 0,
+ RK3576_CLKSEL_CON(61), 8, 3, MFLAGS, 0, 8, DFLAGS,
+ RK3576_CLKGATE_CON(14), 6, GFLAGS),
+ COMPOSITE(SCLK_UART3, "sclk_uart3", clk_uart_p, 0,
+ RK3576_CLKSEL_CON(62), 8, 3, MFLAGS, 0, 8, DFLAGS,
+ RK3576_CLKGATE_CON(14), 9, GFLAGS),
+ COMPOSITE(SCLK_UART4, "sclk_uart4", clk_uart_p, 0,
+ RK3576_CLKSEL_CON(63), 8, 3, MFLAGS, 0, 8, DFLAGS,
+ RK3576_CLKGATE_CON(14), 12, GFLAGS),
+ COMPOSITE(SCLK_UART5, "sclk_uart5", clk_uart_p, 0,
+ RK3576_CLKSEL_CON(64), 8, 3, MFLAGS, 0, 8, DFLAGS,
+ RK3576_CLKGATE_CON(14), 15, GFLAGS),
+ COMPOSITE(SCLK_UART6, "sclk_uart6", clk_uart_p, 0,
+ RK3576_CLKSEL_CON(65), 8, 3, MFLAGS, 0, 8, DFLAGS,
+ RK3576_CLKGATE_CON(15), 2, GFLAGS),
+ COMPOSITE(SCLK_UART7, "sclk_uart7", clk_uart_p, 0,
+ RK3576_CLKSEL_CON(66), 8, 3, MFLAGS, 0, 8, DFLAGS,
+ RK3576_CLKGATE_CON(15), 5, GFLAGS),
+ COMPOSITE(SCLK_UART8, "sclk_uart8", clk_uart_p, 0,
+ RK3576_CLKSEL_CON(67), 8, 3, MFLAGS, 0, 8, DFLAGS,
+ RK3576_CLKGATE_CON(15), 8, GFLAGS),
+ COMPOSITE(SCLK_UART9, "sclk_uart9", clk_uart_p, 0,
+ RK3576_CLKSEL_CON(68), 8, 3, MFLAGS, 0, 8, DFLAGS,
+ RK3576_CLKGATE_CON(15), 9, GFLAGS),
+ COMPOSITE(SCLK_UART10, "sclk_uart10", clk_uart_p, 0,
+ RK3576_CLKSEL_CON(69), 8, 3, MFLAGS, 0, 8, DFLAGS,
+ RK3576_CLKGATE_CON(15), 10, GFLAGS),
+ COMPOSITE(SCLK_UART11, "sclk_uart11", clk_uart_p, 0,
+ RK3576_CLKSEL_CON(70), 8, 3, MFLAGS, 0, 8, DFLAGS,
+ RK3576_CLKGATE_CON(15), 11, GFLAGS),
+ GATE(PCLK_SPI0, "pclk_spi0", "pclk_bus_root", 0,
+ RK3576_CLKGATE_CON(15), 13, GFLAGS),
+ GATE(PCLK_SPI1, "pclk_spi1", "pclk_bus_root", 0,
+ RK3576_CLKGATE_CON(15), 14, GFLAGS),
+ GATE(PCLK_SPI2, "pclk_spi2", "pclk_bus_root", 0,
+ RK3576_CLKGATE_CON(15), 15, GFLAGS),
+ GATE(PCLK_SPI3, "pclk_spi3", "pclk_bus_root", 0,
+ RK3576_CLKGATE_CON(16), 0, GFLAGS),
+ GATE(PCLK_SPI4, "pclk_spi4", "pclk_bus_root", 0,
+ RK3576_CLKGATE_CON(16), 1, GFLAGS),
+ COMPOSITE_NODIV(CLK_SPI0, "clk_spi0", mux_200m_100m_50m_24m_p, 0,
+ RK3576_CLKSEL_CON(70), 13, 2, MFLAGS,
+ RK3576_CLKGATE_CON(16), 2, GFLAGS),
+ COMPOSITE_NODIV(CLK_SPI1, "clk_spi1", mux_200m_100m_50m_24m_p, 0,
+ RK3576_CLKSEL_CON(71), 0, 2, MFLAGS,
+ RK3576_CLKGATE_CON(16), 3, GFLAGS),
+ COMPOSITE_NODIV(CLK_SPI2, "clk_spi2", mux_200m_100m_50m_24m_p, 0,
+ RK3576_CLKSEL_CON(71), 2, 2, MFLAGS,
+ RK3576_CLKGATE_CON(16), 4, GFLAGS),
+ COMPOSITE_NODIV(CLK_SPI3, "clk_spi3", mux_200m_100m_50m_24m_p, 0,
+ RK3576_CLKSEL_CON(71), 4, 2, MFLAGS,
+ RK3576_CLKGATE_CON(16), 5, GFLAGS),
+ COMPOSITE_NODIV(CLK_SPI4, "clk_spi4", mux_200m_100m_50m_24m_p, 0,
+ RK3576_CLKSEL_CON(71), 6, 2, MFLAGS,
+ RK3576_CLKGATE_CON(16), 6, GFLAGS),
+ GATE(PCLK_WDT0, "pclk_wdt0", "pclk_bus_root", 0,
+ RK3576_CLKGATE_CON(16), 7, GFLAGS),
+ GATE(TCLK_WDT0, "tclk_wdt0", "xin24m", 0,
+ RK3576_CLKGATE_CON(16), 8, GFLAGS),
+ GATE(PCLK_PWM1, "pclk_pwm1", "pclk_bus_root", 0,
+ RK3576_CLKGATE_CON(16), 10, GFLAGS),
+ COMPOSITE_NODIV(CLK_PWM1, "clk_pwm1", mux_100m_50m_24m_p, 0,
+ RK3576_CLKSEL_CON(71), 8, 2, MFLAGS,
+ RK3576_CLKGATE_CON(16), 11, GFLAGS),
+ GATE(CLK_OSC_PWM1, "clk_osc_pwm1", "xin24m", 0,
+ RK3576_CLKGATE_CON(16), 13, GFLAGS),
+ GATE(CLK_RC_PWM1, "clk_rc_pwm1", "clk_pvtm_clkout", 0,
+ RK3576_CLKGATE_CON(16), 15, GFLAGS),
+ GATE(PCLK_BUSTIMER0, "pclk_bustimer0", "pclk_bus_root", 0,
+ RK3576_CLKGATE_CON(17), 3, GFLAGS),
+ GATE(PCLK_BUSTIMER1, "pclk_bustimer1", "pclk_bus_root", 0,
+ RK3576_CLKGATE_CON(17), 4, GFLAGS),
+ COMPOSITE_NODIV(CLK_TIMER0_ROOT, "clk_timer0_root", mux_100m_24m_p, 0,
+ RK3576_CLKSEL_CON(71), 14, 1, MFLAGS,
+ RK3576_CLKGATE_CON(17), 5, GFLAGS),
+ GATE(CLK_TIMER0, "clk_timer0", "clk_timer0_root", 0,
+ RK3576_CLKGATE_CON(17), 6, GFLAGS),
+ GATE(CLK_TIMER1, "clk_timer1", "clk_timer0_root", 0,
+ RK3576_CLKGATE_CON(17), 7, GFLAGS),
+ GATE(CLK_TIMER2, "clk_timer2", "clk_timer0_root", 0,
+ RK3576_CLKGATE_CON(17), 8, GFLAGS),
+ GATE(CLK_TIMER3, "clk_timer3", "clk_timer0_root", 0,
+ RK3576_CLKGATE_CON(17), 9, GFLAGS),
+ GATE(CLK_TIMER4, "clk_timer4", "clk_timer0_root", 0,
+ RK3576_CLKGATE_CON(17), 10, GFLAGS),
+ GATE(CLK_TIMER5, "clk_timer5", "clk_timer0_root", 0,
+ RK3576_CLKGATE_CON(17), 11, GFLAGS),
+ GATE(PCLK_MAILBOX0, "pclk_mailbox0", "pclk_bus_root", 0,
+ RK3576_CLKGATE_CON(17), 13, GFLAGS),
+ GATE(PCLK_GPIO1, "pclk_gpio1", "pclk_bus_root", 0,
+ RK3576_CLKGATE_CON(17), 15, GFLAGS),
+ GATE(DBCLK_GPIO1, "dbclk_gpio1", "xin24m", 0,
+ RK3576_CLKGATE_CON(18), 0, GFLAGS),
+ GATE(PCLK_GPIO2, "pclk_gpio2", "pclk_bus_root", 0,
+ RK3576_CLKGATE_CON(18), 1, GFLAGS),
+ GATE(DBCLK_GPIO2, "dbclk_gpio2", "xin24m", 0,
+ RK3576_CLKGATE_CON(18), 2, GFLAGS),
+ GATE(PCLK_GPIO3, "pclk_gpio3", "pclk_bus_root", 0,
+ RK3576_CLKGATE_CON(18), 3, GFLAGS),
+ GATE(DBCLK_GPIO3, "dbclk_gpio3", "xin24m", 0,
+ RK3576_CLKGATE_CON(18), 4, GFLAGS),
+ GATE(PCLK_GPIO4, "pclk_gpio4", "pclk_bus_root", 0,
+ RK3576_CLKGATE_CON(18), 5, GFLAGS),
+ GATE(DBCLK_GPIO4, "dbclk_gpio4", "xin24m", 0,
+ RK3576_CLKGATE_CON(18), 6, GFLAGS),
+ GATE(ACLK_DECOM, "aclk_decom", "aclk_bus_root", 0,
+ RK3576_CLKGATE_CON(18), 7, GFLAGS),
+ GATE(PCLK_DECOM, "pclk_decom", "pclk_bus_root", 0,
+ RK3576_CLKGATE_CON(18), 8, GFLAGS),
+ COMPOSITE(DCLK_DECOM, "dclk_decom", gpll_spll_p, 0,
+ RK3576_CLKSEL_CON(72), 5, 1, MFLAGS, 0, 5, DFLAGS,
+ RK3576_CLKGATE_CON(18), 9, GFLAGS),
+ COMPOSITE_NODIV(CLK_TIMER1_ROOT, "clk_timer1_root", mux_100m_24m_p, 0,
+ RK3576_CLKSEL_CON(72), 6, 1, MFLAGS,
+ RK3576_CLKGATE_CON(18), 10, GFLAGS),
+ GATE(CLK_TIMER6, "clk_timer6", "clk_timer1_root", 0,
+ RK3576_CLKGATE_CON(18), 11, GFLAGS),
+ COMPOSITE(CLK_TIMER7, "clk_timer7", mux_100m_24m_lclk0_p, 0,
+ RK3576_CLKSEL_CON(72), 12, 2, MFLAGS, 7, 5, DFLAGS,
+ RK3576_CLKGATE_CON(18), 12, GFLAGS),
+ COMPOSITE(CLK_TIMER8, "clk_timer8", mux_100m_24m_lclk1_p, 0,
+ RK3576_CLKSEL_CON(73), 5, 2, MFLAGS, 0, 5, DFLAGS,
+ RK3576_CLKGATE_CON(18), 13, GFLAGS),
+ GATE(CLK_TIMER9, "clk_timer9", "clk_timer1_root", 0,
+ RK3576_CLKGATE_CON(18), 14, GFLAGS),
+ GATE(CLK_TIMER10, "clk_timer10", "clk_timer1_root", 0,
+ RK3576_CLKGATE_CON(18), 15, GFLAGS),
+ GATE(CLK_TIMER11, "clk_timer11", "clk_timer1_root", 0,
+ RK3576_CLKGATE_CON(19), 0, GFLAGS),
+ GATE(ACLK_DMAC0, "aclk_dmac0", "aclk_bus_root", 0,
+ RK3576_CLKGATE_CON(19), 1, GFLAGS),
+ GATE(ACLK_DMAC1, "aclk_dmac1", "aclk_bus_root", 0,
+ RK3576_CLKGATE_CON(19), 2, GFLAGS),
+ GATE(ACLK_DMAC2, "aclk_dmac2", "aclk_bus_root", 0,
+ RK3576_CLKGATE_CON(19), 3, GFLAGS),
+ GATE(ACLK_SPINLOCK, "aclk_spinlock", "aclk_bus_root", 0,
+ RK3576_CLKGATE_CON(19), 4, GFLAGS),
+ GATE(HCLK_I3C0, "hclk_i3c0", "hclk_bus_root", 0,
+ RK3576_CLKGATE_CON(19), 7, GFLAGS),
+ GATE(HCLK_I3C1, "hclk_i3c1", "hclk_bus_root", 0,
+ RK3576_CLKGATE_CON(19), 9, GFLAGS),
+ COMPOSITE_NODIV(HCLK_BUS_CM0_ROOT, "hclk_bus_cm0_root", mux_400m_200m_100m_24m_p, 0,
+ RK3576_CLKSEL_CON(73), 13, 2, MFLAGS,
+ RK3576_CLKGATE_CON(19), 10, GFLAGS),
+ GATE(FCLK_BUS_CM0_CORE, "fclk_bus_cm0_core", "hclk_bus_cm0_root", 0,
+ RK3576_CLKGATE_CON(19), 12, GFLAGS),
+ COMPOSITE(CLK_BUS_CM0_RTC, "clk_bus_cm0_rtc", mux_24m_32k_p, 0,
+ RK3576_CLKSEL_CON(74), 5, 1, MFLAGS, 0, 5, DFLAGS,
+ RK3576_CLKGATE_CON(19), 14, GFLAGS),
+ GATE(PCLK_PMU2, "pclk_pmu2", "pclk_bus_root", CLK_IS_CRITICAL,
+ RK3576_CLKGATE_CON(19), 15, GFLAGS),
+ GATE(PCLK_PWM2, "pclk_pwm2", "pclk_bus_root", 0,
+ RK3576_CLKGATE_CON(20), 4, GFLAGS),
+ COMPOSITE_NODIV(CLK_PWM2, "clk_pwm2", mux_100m_50m_24m_p, 0,
+ RK3576_CLKSEL_CON(74), 6, 2, MFLAGS,
+ RK3576_CLKGATE_CON(20), 5, GFLAGS),
+ GATE(CLK_OSC_PWM2, "clk_osc_pwm2", "xin24m", 0,
+ RK3576_CLKGATE_CON(20), 7, GFLAGS),
+ GATE(CLK_RC_PWM2, "clk_rc_pwm2", "clk_pvtm_clkout", 0,
+ RK3576_CLKGATE_CON(20), 6, GFLAGS),
+ COMPOSITE_NODIV(CLK_FREQ_PWM1, "clk_freq_pwm1", clk_freq_pwm1_p, 0,
+ RK3576_CLKSEL_CON(74), 8, 3, MFLAGS,
+ RK3576_CLKGATE_CON(20), 8, GFLAGS),
+ COMPOSITE_NODIV(CLK_COUNTER_PWM1, "clk_counter_pwm1", clk_counter_pwm1_p, 0,
+ RK3576_CLKSEL_CON(74), 11, 3, MFLAGS,
+ RK3576_CLKGATE_CON(20), 9, GFLAGS),
+ COMPOSITE_NODIV(SAI_SCLKIN_FREQ, "sai_sclkin_freq", sai_sclkin_freq_p, 0,
+ RK3576_CLKSEL_CON(75), 0, 3, MFLAGS,
+ RK3576_CLKGATE_CON(20), 10, GFLAGS),
+ COMPOSITE_NODIV(SAI_SCLKIN_COUNTER, "sai_sclkin_counter", sai_sclkin_freq_p, 0,
+ RK3576_CLKSEL_CON(75), 3, 3, MFLAGS,
+ RK3576_CLKGATE_CON(20), 11, GFLAGS),
+ COMPOSITE(CLK_I3C0, "clk_i3c0", gpll_cpll_aupll_spll_p, 0,
+ RK3576_CLKSEL_CON(78), 5, 2, MFLAGS, 0, 5, DFLAGS,
+ RK3576_CLKGATE_CON(20), 12, GFLAGS),
+ COMPOSITE(CLK_I3C1, "clk_i3c1", gpll_cpll_aupll_spll_p, 0,
+ RK3576_CLKSEL_CON(78), 12, 2, MFLAGS, 7, 5, DFLAGS,
+ RK3576_CLKGATE_CON(20), 13, GFLAGS),
+ GATE(PCLK_CSIDPHY1, "pclk_csidphy1", "pclk_bus_root", 0,
+ RK3576_CLKGATE_CON(40), 2, GFLAGS),
+
+ /* cci */
+ COMPOSITE(PCLK_CCI_ROOT, "pclk_cci_root", mux_24m_ccipvtpll_gpll_lpll_p, CLK_IS_CRITICAL,
+ RK3576_CCI_CLKSEL_CON(4), 5, 2, MFLAGS, 0, 5, DFLAGS,
+ RK3576_CCI_CLKGATE_CON(1), 10, GFLAGS),
+ COMPOSITE(ACLK_CCI_ROOT, "aclk_cci_root", mux_24m_ccipvtpll_gpll_lpll_p, CLK_IS_CRITICAL,
+ RK3576_CCI_CLKSEL_CON(4), 12, 2, MFLAGS, 7, 5, DFLAGS,
+ RK3576_CCI_CLKGATE_CON(1), 11, GFLAGS),
+
+ /* center */
+ COMPOSITE_DIV_OFFSET(ACLK_CENTER_ROOT, "aclk_center_root", gpll_cpll_spll_aupll_bpll_p, CLK_IS_CRITICAL,
+ RK3576_CLKSEL_CON(168), 5, 3, MFLAGS,
+ RK3576_CLKSEL_CON(167), 9, 5, DFLAGS,
+ RK3576_CLKGATE_CON(72), 0, GFLAGS),
+ COMPOSITE_NODIV(ACLK_CENTER_LOW_ROOT, "aclk_center_low_root", mux_500m_250m_100m_24m_p, CLK_IS_CRITICAL,
+ RK3576_CLKSEL_CON(168), 8, 2, MFLAGS,
+ RK3576_CLKGATE_CON(72), 1, GFLAGS),
+ COMPOSITE_NODIV(HCLK_CENTER_ROOT, "hclk_center_root", mux_200m_100m_50m_24m_p, CLK_IS_CRITICAL,
+ RK3576_CLKSEL_CON(168), 10, 2, MFLAGS,
+ RK3576_CLKGATE_CON(72), 2, GFLAGS),
+ COMPOSITE_NODIV(PCLK_CENTER_ROOT, "pclk_center_root", mux_200m_100m_50m_24m_p, CLK_IS_CRITICAL,
+ RK3576_CLKSEL_CON(168), 12, 2, MFLAGS,
+ RK3576_CLKGATE_CON(72), 3, GFLAGS),
+ GATE(ACLK_DMA2DDR, "aclk_dma2ddr", "aclk_center_root", CLK_IGNORE_UNUSED,
+ RK3576_CLKGATE_CON(72), 5, GFLAGS),
+ GATE(ACLK_DDR_SHAREMEM, "aclk_ddr_sharemem", "aclk_center_low_root", CLK_IGNORE_UNUSED,
+ RK3576_CLKGATE_CON(72), 6, GFLAGS),
+ GATE(PCLK_DMA2DDR, "pclk_dma2ddr", "pclk_center_root", CLK_IGNORE_UNUSED,
+ RK3576_CLKGATE_CON(72), 10, GFLAGS),
+ GATE(PCLK_SHAREMEM, "pclk_sharemem", "pclk_center_root", CLK_IGNORE_UNUSED,
+ RK3576_CLKGATE_CON(72), 11, GFLAGS),
+
+ /* ddr */
+ COMPOSITE(PCLK_DDR_ROOT, "pclk_ddr_root", gpll_cpll_24m_p, CLK_IS_CRITICAL,
+ RK3576_CLKSEL_CON(76), 5, 2, MFLAGS, 0, 5, DFLAGS,
+ RK3576_CLKGATE_CON(21), 0, GFLAGS),
+ GATE(PCLK_DDR_MON_CH0, "pclk_ddr_mon_ch0", "pclk_ddr_root", CLK_IGNORE_UNUSED,
+ RK3576_CLKGATE_CON(21), 1, GFLAGS),
+ COMPOSITE(HCLK_DDR_ROOT, "hclk_ddr_root", gpll_cpll_p, CLK_IGNORE_UNUSED,
+ RK3576_CLKSEL_CON(77), 5, 1, MFLAGS, 0, 5, DFLAGS,
+ RK3576_CLKGATE_CON(22), 11, GFLAGS),
+ GATE(FCLK_DDR_CM0_CORE, "fclk_ddr_cm0_core", "hclk_ddr_root", CLK_IS_CRITICAL,
+ RK3576_CLKGATE_CON(22), 15, GFLAGS),
+ COMPOSITE_NODIV(CLK_DDR_TIMER_ROOT, "clk_ddr_timer_root", mux_100m_24m_p, 0,
+ RK3576_CLKSEL_CON(77), 6, 1, MFLAGS,
+ RK3576_CLKGATE_CON(23), 3, GFLAGS),
+ GATE(CLK_DDR_TIMER0, "clk_ddr_timer0", "clk_ddr_timer_root", 0,
+ RK3576_CLKGATE_CON(23), 4, GFLAGS),
+ GATE(CLK_DDR_TIMER1, "clk_ddr_timer1", "clk_ddr_timer_root", 0,
+ RK3576_CLKGATE_CON(23), 5, GFLAGS),
+ GATE(TCLK_WDT_DDR, "tclk_wdt_ddr", "xin24m", 0,
+ RK3576_CLKGATE_CON(23), 6, GFLAGS),
+ GATE(PCLK_WDT, "pclk_wdt", "pclk_ddr_root", 0,
+ RK3576_CLKGATE_CON(23), 7, GFLAGS),
+ GATE(PCLK_TIMER, "pclk_timer", "pclk_ddr_root", 0,
+ RK3576_CLKGATE_CON(23), 8, GFLAGS),
+ COMPOSITE(CLK_DDR_CM0_RTC, "clk_ddr_cm0_rtc", mux_24m_32k_p, 0,
+ RK3576_CLKSEL_CON(77), 12, 1, MFLAGS, 7, 5, DFLAGS,
+ RK3576_CLKGATE_CON(23), 10, GFLAGS),
+
+ /* gpu */
+ COMPOSITE(CLK_GPU_SRC_PRE, "clk_gpu_src_pre", gpll_cpll_aupll_spll_lpll_p, 0,
+ RK3576_CLKSEL_CON(165), 5, 3, MFLAGS, 0, 5, DFLAGS,
+ RK3576_CLKGATE_CON(69), 1, GFLAGS),
+ GATE(CLK_GPU, "clk_gpu", "clk_gpu_src_pre", 0,
+ RK3576_CLKGATE_CON(69), 3, GFLAGS),
+ COMPOSITE_NODIV(PCLK_GPU_ROOT, "pclk_gpu_root", mux_100m_50m_24m_p, 0,
+ RK3576_CLKSEL_CON(166), 10, 2, MFLAGS,
+ RK3576_CLKGATE_CON(69), 8, GFLAGS),
+
+ /* npu */
+ COMPOSITE_NODIV(HCLK_RKNN_ROOT, "hclk_rknn_root", mux_200m_100m_50m_24m_p, 0,
+ RK3576_CLKSEL_CON(86), 0, 2, MFLAGS,
+ RK3576_CLKGATE_CON(31), 4, GFLAGS),
+ COMPOSITE(CLK_RKNN_DSU0, "clk_rknn_dsu0", gpll_cpll_aupll_spll_p, 0,
+ RK3576_CLKSEL_CON(86), 7, 2, MFLAGS, 2, 5, DFLAGS,
+ RK3576_CLKGATE_CON(31), 5, GFLAGS),
+ GATE(ACLK_RKNN0, "aclk_rknn0", "clk_rknn_dsu0", 0,
+ RK3576_CLKGATE_CON(28), 9, GFLAGS),
+ GATE(ACLK_RKNN1, "aclk_rknn1", "clk_rknn_dsu0", 0,
+ RK3576_CLKGATE_CON(29), 0, GFLAGS),
+ COMPOSITE_NODIV(PCLK_NPUTOP_ROOT, "pclk_nputop_root", mux_100m_50m_24m_p, 0,
+ RK3576_CLKSEL_CON(87), 0, 2, MFLAGS,
+ RK3576_CLKGATE_CON(31), 8, GFLAGS),
+ GATE(PCLK_NPU_TIMER, "pclk_npu_timer", "pclk_nputop_root", 0,
+ RK3576_CLKGATE_CON(31), 10, GFLAGS),
+ COMPOSITE_NODIV(CLK_NPUTIMER_ROOT, "clk_nputimer_root", mux_100m_24m_p, 0,
+ RK3576_CLKSEL_CON(87), 2, 1, MFLAGS,
+ RK3576_CLKGATE_CON(31), 11, GFLAGS),
+ GATE(CLK_NPUTIMER0, "clk_nputimer0", "clk_nputimer_root", 0,
+ RK3576_CLKGATE_CON(31), 12, GFLAGS),
+ GATE(CLK_NPUTIMER1, "clk_nputimer1", "clk_nputimer_root", 0,
+ RK3576_CLKGATE_CON(31), 13, GFLAGS),
+ GATE(PCLK_NPU_WDT, "pclk_npu_wdt", "pclk_nputop_root", 0,
+ RK3576_CLKGATE_CON(31), 14, GFLAGS),
+ GATE(TCLK_NPU_WDT, "tclk_npu_wdt", "xin24m", 0,
+ RK3576_CLKGATE_CON(31), 15, GFLAGS),
+ GATE(ACLK_RKNN_CBUF, "aclk_rknn_cbuf", "clk_rknn_dsu0", 0,
+ RK3576_CLKGATE_CON(32), 0, GFLAGS),
+ COMPOSITE_NODIV(HCLK_NPU_CM0_ROOT, "hclk_npu_cm0_root", mux_400m_200m_100m_24m_p, 0,
+ RK3576_CLKSEL_CON(87), 3, 2, MFLAGS,
+ RK3576_CLKGATE_CON(32), 5, GFLAGS),
+ GATE(FCLK_NPU_CM0_CORE, "fclk_npu_cm0_core", "hclk_npu_cm0_root", 0,
+ RK3576_CLKGATE_CON(32), 7, GFLAGS),
+ COMPOSITE(CLK_NPU_CM0_RTC, "clk_npu_cm0_rtc", mux_24m_32k_p, 0,
+ RK3576_CLKSEL_CON(87), 10, 1, MFLAGS, 5, 5, DFLAGS,
+ RK3576_CLKGATE_CON(32), 9, GFLAGS),
+ GATE(HCLK_RKNN_CBUF, "hclk_rknn_cbuf", "hclk_rknn_root", 0,
+ RK3576_CLKGATE_CON(32), 12, GFLAGS),
+
+ /* nvm */
+ COMPOSITE_NODIV(HCLK_NVM_ROOT, "hclk_nvm_root", mux_200m_100m_50m_24m_p, CLK_IS_CRITICAL,
+ RK3576_CLKSEL_CON(88), 0, 2, MFLAGS,
+ RK3576_CLKGATE_CON(33), 0, GFLAGS),
+ COMPOSITE(ACLK_NVM_ROOT, "aclk_nvm_root", gpll_cpll_p, CLK_IS_CRITICAL,
+ RK3576_CLKSEL_CON(88), 7, 1, MFLAGS, 2, 5, DFLAGS,
+ RK3576_CLKGATE_CON(33), 1, GFLAGS),
+ COMPOSITE(SCLK_FSPI_X2, "sclk_fspi_x2", gpll_cpll_24m_p, 0,
+ RK3576_CLKSEL_CON(89), 6, 2, MFLAGS, 0, 6, DFLAGS,
+ RK3576_CLKGATE_CON(33), 6, GFLAGS),
+ GATE(HCLK_FSPI, "hclk_fspi", "hclk_nvm_root", 0,
+ RK3576_CLKGATE_CON(33), 7, GFLAGS),
+ COMPOSITE(CCLK_SRC_EMMC, "cclk_src_emmc", gpll_cpll_24m_p, 0,
+ RK3576_CLKSEL_CON(89), 14, 2, MFLAGS, 8, 6, DFLAGS,
+ RK3576_CLKGATE_CON(33), 8, GFLAGS),
+ GATE(HCLK_EMMC, "hclk_emmc", "hclk_nvm_root", 0,
+ RK3576_CLKGATE_CON(33), 9, GFLAGS),
+ GATE(ACLK_EMMC, "aclk_emmc", "aclk_nvm_root", 0,
+ RK3576_CLKGATE_CON(33), 10, GFLAGS),
+ COMPOSITE_NODIV(BCLK_EMMC, "bclk_emmc", mux_200m_100m_50m_24m_p, 0,
+ RK3576_CLKSEL_CON(90), 0, 2, MFLAGS,
+ RK3576_CLKGATE_CON(33), 11, GFLAGS),
+ GATE(TCLK_EMMC, "tclk_emmc", "xin24m", 0,
+ RK3576_CLKGATE_CON(33), 12, GFLAGS),
+
+ /* usb */
+ COMPOSITE(ACLK_UFS_ROOT, "aclk_ufs_root", gpll_cpll_p, 0,
+ RK3576_CLKSEL_CON(115), 5, 1, MFLAGS, 0, 5, DFLAGS,
+ RK3576_CLKGATE_CON(47), 0, GFLAGS),
+ COMPOSITE(ACLK_USB_ROOT, "aclk_usb_root", gpll_cpll_p, CLK_IS_CRITICAL,
+ RK3576_CLKSEL_CON(115), 11, 1, MFLAGS, 6, 5, DFLAGS,
+ RK3576_CLKGATE_CON(47), 1, GFLAGS),
+ COMPOSITE_NODIV(PCLK_USB_ROOT, "pclk_usb_root", mux_100m_50m_24m_p, CLK_IS_CRITICAL,
+ RK3576_CLKSEL_CON(115), 12, 2, MFLAGS,
+ RK3576_CLKGATE_CON(47), 2, GFLAGS),
+ GATE(ACLK_USB3OTG0, "aclk_usb3otg0", "aclk_usb_root", 0,
+ RK3576_CLKGATE_CON(47), 5, GFLAGS),
+ GATE(CLK_REF_USB3OTG0, "clk_ref_usb3otg0", "xin24m", 0,
+ RK3576_CLKGATE_CON(47), 6, GFLAGS),
+ GATE(CLK_SUSPEND_USB3OTG0, "clk_suspend_usb3otg0", "xin24m", 0,
+ RK3576_CLKGATE_CON(47), 7, GFLAGS),
+ GATE(ACLK_MMU2, "aclk_mmu2", "aclk_usb_root", 0,
+ RK3576_CLKGATE_CON(47), 12, GFLAGS),
+ GATE(ACLK_SLV_MMU2, "aclk_slv_mmu2", "aclk_usb_root", 0,
+ RK3576_CLKGATE_CON(47), 13, GFLAGS),
+ GATE(ACLK_UFS_SYS, "aclk_ufs_sys", "aclk_ufs_root", 0,
+ RK3576_CLKGATE_CON(47), 15, GFLAGS),
+
+ /* vdec */
+ COMPOSITE_NODIV(HCLK_RKVDEC_ROOT, "hclk_rkvdec_root", mux_200m_100m_50m_24m_p, 0,
+ RK3576_CLKSEL_CON(110), 0, 2, MFLAGS,
+ RK3576_CLKGATE_CON(45), 0, GFLAGS),
+ COMPOSITE(ACLK_RKVDEC_ROOT, "aclk_rkvdec_root", gpll_cpll_aupll_spll_p, 0,
+ RK3576_CLKSEL_CON(110), 7, 2, MFLAGS, 2, 5, DFLAGS,
+ RK3576_CLKGATE_CON(45), 1, GFLAGS),
+ COMPOSITE(ACLK_RKVDEC_ROOT_BAK, "aclk_rkvdec_root_bak", cpll_vpll_lpll_bpll_p, 0,
+ RK3576_CLKSEL_CON(110), 14, 2, MFLAGS, 9, 5, DFLAGS,
+ RK3576_CLKGATE_CON(45), 2, GFLAGS),
+ GATE(HCLK_RKVDEC, "hclk_rkvdec", "hclk_rkvdec_root", 0,
+ RK3576_CLKGATE_CON(45), 3, GFLAGS),
+ COMPOSITE(CLK_RKVDEC_HEVC_CA, "clk_rkvdec_hevc_ca", gpll_cpll_lpll_bpll_p, 0,
+ RK3576_CLKSEL_CON(111), 5, 2, MFLAGS, 0, 5, DFLAGS,
+ RK3576_CLKGATE_CON(45), 8, GFLAGS),
+ GATE(CLK_RKVDEC_CORE, "clk_rkvdec_core", "aclk_rkvdec_root", 0,
+ RK3576_CLKGATE_CON(45), 9, GFLAGS),
+
+ /* venc */
+ COMPOSITE_NODIV(HCLK_VEPU0_ROOT, "hclk_vepu0_root", mux_200m_100m_50m_24m_p, 0,
+ RK3576_CLKSEL_CON(124), 0, 2, MFLAGS,
+ RK3576_CLKGATE_CON(51), 0, GFLAGS),
+ COMPOSITE(ACLK_VEPU0_ROOT, "aclk_vepu0_root", gpll_cpll_p, 0,
+ RK3576_CLKSEL_CON(124), 7, 1, MFLAGS, 2, 5, DFLAGS,
+ RK3576_CLKGATE_CON(51), 1, GFLAGS),
+ COMPOSITE(CLK_VEPU0_CORE, "clk_vepu0_core", gpll_cpll_spll_lpll_bpll_p, 0,
+ RK3576_CLKSEL_CON(124), 13, 3, MFLAGS, 8, 5, DFLAGS,
+ RK3576_CLKGATE_CON(51), 6, GFLAGS),
+ GATE(HCLK_VEPU0, "hclk_vepu0", "hclk_vepu0_root", 0,
+ RK3576_CLKGATE_CON(51), 4, GFLAGS),
+ GATE(ACLK_VEPU0, "aclk_vepu0", "aclk_vepu0_root", 0,
+ RK3576_CLKGATE_CON(51), 5, GFLAGS),
+
+ /* vi */
+ COMPOSITE(ACLK_VI_ROOT, "aclk_vi_root", gpll_spll_isppvtpll_bpll_lpll_p, CLK_IS_CRITICAL,
+ RK3576_CLKSEL_CON(128), 5, 3, MFLAGS, 0, 5, DFLAGS,
+ RK3576_CLKGATE_CON(53), 0, GFLAGS),
+ COMPOSITE_NOMUX(ACLK_VI_ROOT_INTER, "aclk_vi_root_inter", "aclk_vi_root", 0,
+ RK3576_CLKSEL_CON(130), 10, 3, DFLAGS,
+ RK3576_CLKGATE_CON(54), 13, GFLAGS),
+ COMPOSITE_NODIV(HCLK_VI_ROOT, "hclk_vi_root", hclk_vi_root_p, CLK_IS_CRITICAL,
+ RK3576_CLKSEL_CON(128), 8, 2, MFLAGS,
+ RK3576_CLKGATE_CON(53), 1, GFLAGS),
+ COMPOSITE_NODIV(PCLK_VI_ROOT, "pclk_vi_root", mux_100m_50m_24m_p, 0,
+ RK3576_CLKSEL_CON(128), 10, 2, MFLAGS,
+ RK3576_CLKGATE_CON(53), 2, GFLAGS),
+ COMPOSITE(DCLK_VICAP, "dclk_vicap", gpll_cpll_p, 0,
+ RK3576_CLKSEL_CON(129), 5, 1, MFLAGS, 0, 5, DFLAGS,
+ RK3576_CLKGATE_CON(53), 6, GFLAGS),
+ GATE(ACLK_VICAP, "aclk_vicap", "aclk_vi_root", 0,
+ RK3576_CLKGATE_CON(53), 7, GFLAGS),
+ GATE(HCLK_VICAP, "hclk_vicap", "hclk_vi_root", 0,
+ RK3576_CLKGATE_CON(53), 8, GFLAGS),
+ COMPOSITE(CLK_ISP_CORE, "clk_isp_core", gpll_spll_isppvtpll_bpll_lpll_p, 0,
+ RK3576_CLKSEL_CON(129), 11, 3, MFLAGS, 6, 5, DFLAGS,
+ RK3576_CLKGATE_CON(53), 9, GFLAGS),
+ GATE(CLK_ISP_CORE_MARVIN, "clk_isp_core_marvin", "clk_isp_core", 0,
+ RK3576_CLKGATE_CON(53), 10, GFLAGS),
+ GATE(CLK_ISP_CORE_VICAP, "clk_isp_core_vicap", "clk_isp_core", 0,
+ RK3576_CLKGATE_CON(53), 11, GFLAGS),
+ GATE(ACLK_ISP, "aclk_isp", "aclk_vi_root", 0,
+ RK3576_CLKGATE_CON(53), 12, GFLAGS),
+ GATE(HCLK_ISP, "hclk_isp", "hclk_vi_root", 0,
+ RK3576_CLKGATE_CON(53), 13, GFLAGS),
+ GATE(ACLK_VPSS, "aclk_vpss", "aclk_vi_root", 0,
+ RK3576_CLKGATE_CON(53), 15, GFLAGS),
+ GATE(HCLK_VPSS, "hclk_vpss", "hclk_vi_root", 0,
+ RK3576_CLKGATE_CON(54), 0, GFLAGS),
+ GATE(CLK_CORE_VPSS, "clk_core_vpss", "clk_isp_core", 0,
+ RK3576_CLKGATE_CON(54), 1, GFLAGS),
+ GATE(PCLK_CSI_HOST_0, "pclk_csi_host_0", "pclk_vi_root", 0,
+ RK3576_CLKGATE_CON(54), 4, GFLAGS),
+ GATE(PCLK_CSI_HOST_1, "pclk_csi_host_1", "pclk_vi_root", 0,
+ RK3576_CLKGATE_CON(54), 5, GFLAGS),
+ GATE(PCLK_CSI_HOST_2, "pclk_csi_host_2", "pclk_vi_root", 0,
+ RK3576_CLKGATE_CON(54), 6, GFLAGS),
+ GATE(PCLK_CSI_HOST_3, "pclk_csi_host_3", "pclk_vi_root", 0,
+ RK3576_CLKGATE_CON(54), 7, GFLAGS),
+ GATE(PCLK_CSI_HOST_4, "pclk_csi_host_4", "pclk_vi_root", 0,
+ RK3576_CLKGATE_CON(54), 8, GFLAGS),
+ COMPOSITE_NODIV(ICLK_CSIHOST01, "iclk_csihost01", mux_400m_200m_100m_24m_p, 0,
+ RK3576_CLKSEL_CON(130), 7, 2, MFLAGS,
+ RK3576_CLKGATE_CON(54), 10, GFLAGS),
+ GATE(ICLK_CSIHOST0, "iclk_csihost0", "iclk_csihost01", 0,
+ RK3576_CLKGATE_CON(54), 11, GFLAGS),
+ COMPOSITE(ACLK_VOP_ROOT, "aclk_vop_root", gpll_cpll_aupll_spll_lpll_p, CLK_IS_CRITICAL,
+ RK3576_CLKSEL_CON(144), 5, 3, MFLAGS, 0, 5, DFLAGS,
+ RK3576_CLKGATE_CON(61), 0, GFLAGS),
+ COMPOSITE_NODIV(HCLK_VOP_ROOT, "hclk_vop_root", mux_200m_100m_50m_24m_p, CLK_IS_CRITICAL,
+ RK3576_CLKSEL_CON(144), 10, 2, MFLAGS,
+ RK3576_CLKGATE_CON(61), 2, GFLAGS),
+ COMPOSITE_NODIV(PCLK_VOP_ROOT, "pclk_vop_root", mux_100m_50m_24m_p, 0,
+ RK3576_CLKSEL_CON(144), 12, 2, MFLAGS,
+ RK3576_CLKGATE_CON(61), 3, GFLAGS),
+ GATE(HCLK_VOP, "hclk_vop", "hclk_vop_root", 0,
+ RK3576_CLKGATE_CON(61), 8, GFLAGS),
+ GATE(ACLK_VOP, "aclk_vop", "aclk_vop_root", 0,
+ RK3576_CLKGATE_CON(61), 9, GFLAGS),
+ COMPOSITE(DCLK_VP0_SRC, "dclk_vp0_src", gpll_cpll_vpll_bpll_lpll_p, CLK_SET_RATE_NO_REPARENT,
+ RK3576_CLKSEL_CON(145), 8, 3, MFLAGS, 0, 8, DFLAGS,
+ RK3576_CLKGATE_CON(61), 10, GFLAGS),
+ COMPOSITE(DCLK_VP1_SRC, "dclk_vp1_src", gpll_cpll_vpll_bpll_lpll_p, CLK_SET_RATE_NO_REPARENT,
+ RK3576_CLKSEL_CON(146), 8, 3, MFLAGS, 0, 8, DFLAGS,
+ RK3576_CLKGATE_CON(61), 11, GFLAGS),
+ COMPOSITE(DCLK_VP2_SRC, "dclk_vp2_src", gpll_cpll_vpll_bpll_lpll_p, CLK_SET_RATE_NO_REPARENT,
+ RK3576_CLKSEL_CON(147), 8, 3, MFLAGS, 0, 8, DFLAGS,
+ RK3576_CLKGATE_CON(61), 12, GFLAGS),
+ COMPOSITE_NODIV(DCLK_VP0, "dclk_vp0", dclk_vp0_p, CLK_SET_RATE_PARENT | CLK_SET_RATE_NO_REPARENT,
+ RK3576_CLKSEL_CON(147), 11, 1, MFLAGS,
+ RK3576_CLKGATE_CON(61), 13, GFLAGS),
+ COMPOSITE_NODIV(DCLK_VP1, "dclk_vp1", dclk_vp1_p, CLK_SET_RATE_PARENT | CLK_SET_RATE_NO_REPARENT,
+ RK3576_CLKSEL_CON(147), 12, 1, MFLAGS,
+ RK3576_CLKGATE_CON(62), 0, GFLAGS),
+ COMPOSITE_NODIV(DCLK_VP2, "dclk_vp2", dclk_vp2_p, CLK_SET_RATE_PARENT | CLK_SET_RATE_NO_REPARENT,
+ RK3576_CLKSEL_CON(147), 13, 1, MFLAGS,
+ RK3576_CLKGATE_CON(62), 1, GFLAGS),
+
+ /* vo0 */
+ COMPOSITE(ACLK_VO0_ROOT, "aclk_vo0_root", gpll_cpll_lpll_bpll_p, 0,
+ RK3576_CLKSEL_CON(149), 5, 2, MFLAGS, 0, 5, DFLAGS,
+ RK3576_CLKGATE_CON(63), 0, GFLAGS),
+ COMPOSITE_NODIV(HCLK_VO0_ROOT, "hclk_vo0_root", mux_200m_100m_50m_24m_p, CLK_IS_CRITICAL,
+ RK3576_CLKSEL_CON(149), 7, 2, MFLAGS,
+ RK3576_CLKGATE_CON(63), 1, GFLAGS),
+ COMPOSITE_NODIV(PCLK_VO0_ROOT, "pclk_vo0_root", mux_150m_100m_50m_24m_p, 0,
+ RK3576_CLKSEL_CON(149), 11, 2, MFLAGS,
+ RK3576_CLKGATE_CON(63), 3, GFLAGS),
+ GATE(ACLK_HDCP0, "aclk_hdcp0", "aclk_vo0_root", 0,
+ RK3576_CLKGATE_CON(63), 12, GFLAGS),
+ GATE(HCLK_HDCP0, "hclk_hdcp0", "hclk_vo0_root", 0,
+ RK3576_CLKGATE_CON(63), 13, GFLAGS),
+ GATE(PCLK_HDCP0, "pclk_hdcp0", "pclk_vo0_root", 0,
+ RK3576_CLKGATE_CON(63), 14, GFLAGS),
+ GATE(CLK_TRNG0_SKP, "clk_trng0_skp", "aclk_hdcp0", 0,
+ RK3576_CLKGATE_CON(64), 4, GFLAGS),
+ GATE(PCLK_DSIHOST0, "pclk_dsihost0", "pclk_vo0_root", 0,
+ RK3576_CLKGATE_CON(64), 5, GFLAGS),
+ COMPOSITE(CLK_DSIHOST0, "clk_dsihost0", gpll_cpll_spll_vpll_bpll_lpll_p, 0,
+ RK3576_CLKSEL_CON(151), 7, 3, MFLAGS, 0, 7, DFLAGS,
+ RK3576_CLKGATE_CON(64), 6, GFLAGS),
+ GATE(PCLK_HDMITX0, "pclk_hdmitx0", "pclk_vo0_root", 0,
+ RK3576_CLKGATE_CON(64), 7, GFLAGS),
+ COMPOSITE(CLK_HDMITX0_EARC, "clk_hdmitx0_earc", gpll_cpll_p, 0,
+ RK3576_CLKSEL_CON(151), 15, 1, MFLAGS, 10, 5, DFLAGS,
+ RK3576_CLKGATE_CON(64), 8, GFLAGS),
+ GATE(CLK_HDMITX0_REF, "clk_hdmitx0_ref", "aclk_vo0_root", 0,
+ RK3576_CLKGATE_CON(64), 9, GFLAGS),
+ GATE(PCLK_EDP0, "pclk_edp0", "pclk_vo0_root", 0,
+ RK3576_CLKGATE_CON(64), 13, GFLAGS),
+ GATE(CLK_EDP0_24M, "clk_edp0_24m", "xin24m", 0,
+ RK3576_CLKGATE_CON(64), 14, GFLAGS),
+ COMPOSITE_NODIV(CLK_EDP0_200M, "clk_edp0_200m", mux_200m_100m_50m_24m_p, 0,
+ RK3576_CLKSEL_CON(152), 1, 2, MFLAGS,
+ RK3576_CLKGATE_CON(64), 15, GFLAGS),
+ COMPOSITE(MCLK_SAI5_8CH_SRC, "mclk_sai5_8ch_src", audio_frac_int_p, 0,
+ RK3576_CLKSEL_CON(154), 10, 3, MFLAGS, 2, 8, DFLAGS,
+ RK3576_CLKGATE_CON(65), 3, GFLAGS),
+ COMPOSITE_NODIV(MCLK_SAI5_8CH, "mclk_sai5_8ch", mclk_sai5_8ch_p, CLK_SET_RATE_PARENT,
+ RK3576_CLKSEL_CON(154), 13, 1, MFLAGS,
+ RK3576_CLKGATE_CON(65), 4, GFLAGS),
+ GATE(HCLK_SAI5_8CH, "hclk_sai5_8ch", "hclk_vo0_root", 0,
+ RK3576_CLKGATE_CON(65), 5, GFLAGS),
+ COMPOSITE(MCLK_SAI6_8CH_SRC, "mclk_sai6_8ch_src", audio_frac_int_p, 0,
+ RK3576_CLKSEL_CON(155), 8, 3, MFLAGS, 0, 8, DFLAGS,
+ RK3576_CLKGATE_CON(65), 7, GFLAGS),
+ COMPOSITE_NODIV(MCLK_SAI6_8CH, "mclk_sai6_8ch", mclk_sai6_8ch_p, CLK_SET_RATE_PARENT,
+ RK3576_CLKSEL_CON(155), 11, 1, MFLAGS,
+ RK3576_CLKGATE_CON(65), 8, GFLAGS),
+ GATE(HCLK_SAI6_8CH, "hclk_sai6_8ch", "hclk_vo0_root", 0,
+ RK3576_CLKGATE_CON(65), 9, GFLAGS),
+ GATE(HCLK_SPDIF_TX2, "hclk_spdif_tx2", "hclk_vo0_root", 0,
+ RK3576_CLKGATE_CON(65), 10, GFLAGS),
+ COMPOSITE(MCLK_SPDIF_TX2, "mclk_spdif_tx2", audio_frac_int_p, 0,
+ RK3576_CLKSEL_CON(156), 5, 3, MFLAGS, 0, 5, DFLAGS,
+ RK3576_CLKGATE_CON(65), 13, GFLAGS),
+ GATE(HCLK_SPDIF_RX2, "hclk_spdif_rx2", "hclk_vo0_root", 0,
+ RK3576_CLKGATE_CON(65), 14, GFLAGS),
+ COMPOSITE(MCLK_SPDIF_RX2, "mclk_spdif_rx2", gpll_cpll_aupll_p, 0,
+ RK3576_CLKSEL_CON(156), 13, 2, MFLAGS, 8, 5, DFLAGS,
+ RK3576_CLKGATE_CON(65), 15, GFLAGS),
+
+ /* vo1 */
+ COMPOSITE(ACLK_VO1_ROOT, "aclk_vo1_root", gpll_cpll_lpll_bpll_p, 0,
+ RK3576_CLKSEL_CON(158), 5, 2, MFLAGS, 0, 5, DFLAGS,
+ RK3576_CLKGATE_CON(67), 1, GFLAGS),
+ COMPOSITE_NODIV(HCLK_VO1_ROOT, "hclk_vo1_root", mux_200m_100m_50m_24m_p, CLK_IS_CRITICAL,
+ RK3576_CLKSEL_CON(158), 7, 2, MFLAGS,
+ RK3576_CLKGATE_CON(67), 2, GFLAGS),
+ COMPOSITE_NODIV(PCLK_VO1_ROOT, "pclk_vo1_root", mux_100m_50m_24m_p, 0,
+ RK3576_CLKSEL_CON(158), 9, 2, MFLAGS,
+ RK3576_CLKGATE_CON(67), 3, GFLAGS),
+ COMPOSITE(MCLK_SAI8_8CH_SRC, "mclk_sai8_8ch_src", audio_frac_int_p, 0,
+ RK3576_CLKSEL_CON(157), 8, 3, MFLAGS, 0, 8, DFLAGS,
+ RK3576_CLKGATE_CON(66), 1, GFLAGS),
+ COMPOSITE_NODIV(MCLK_SAI8_8CH, "mclk_sai8_8ch", mclk_sai8_8ch_p, CLK_SET_RATE_PARENT,
+ RK3576_CLKSEL_CON(157), 11, 1, MFLAGS,
+ RK3576_CLKGATE_CON(66), 2, GFLAGS),
+ GATE(HCLK_SAI8_8CH, "hclk_sai8_8ch", "hclk_vo1_root", 0,
+ RK3576_CLKGATE_CON(66), 0, GFLAGS),
+ COMPOSITE(MCLK_SAI7_8CH_SRC, "mclk_sai7_8ch_src", audio_frac_int_p, 0,
+ RK3576_CLKSEL_CON(159), 8, 3, MFLAGS, 0, 8, DFLAGS,
+ RK3576_CLKGATE_CON(67), 8, GFLAGS),
+ COMPOSITE_NODIV(MCLK_SAI7_8CH, "mclk_sai7_8ch", mclk_sai7_8ch_p, CLK_SET_RATE_PARENT,
+ RK3576_CLKSEL_CON(159), 11, 1, MFLAGS,
+ RK3576_CLKGATE_CON(67), 9, GFLAGS),
+ GATE(HCLK_SAI7_8CH, "hclk_sai7_8ch", "hclk_vo1_root", 0,
+ RK3576_CLKGATE_CON(67), 10, GFLAGS),
+ GATE(HCLK_SPDIF_TX3, "hclk_spdif_tx3", "hclk_vo1_root", 0,
+ RK3576_CLKGATE_CON(67), 11, GFLAGS),
+ GATE(HCLK_SPDIF_TX4, "hclk_spdif_tx4", "hclk_vo1_root", 0,
+ RK3576_CLKGATE_CON(67), 12, GFLAGS),
+ GATE(HCLK_SPDIF_TX5, "hclk_spdif_tx5", "hclk_vo1_root", 0,
+ RK3576_CLKGATE_CON(67), 13, GFLAGS),
+ COMPOSITE(MCLK_SPDIF_TX3, "mclk_spdif_tx3", audio_frac_int_p, 0,
+ RK3576_CLKSEL_CON(160), 8, 3, MFLAGS, 0, 8, DFLAGS,
+ RK3576_CLKGATE_CON(67), 14, GFLAGS),
+ COMPOSITE_NOMUX(CLK_AUX16MHZ_0, "clk_aux16mhz_0", "gpll", 0,
+ RK3576_CLKSEL_CON(161), 0, 8, DFLAGS,
+ RK3576_CLKGATE_CON(67), 15, GFLAGS),
+ GATE(ACLK_DP0, "aclk_dp0", "aclk_vo1_root", 0,
+ RK3576_CLKGATE_CON(68), 0, GFLAGS),
+ GATE(PCLK_DP0, "pclk_dp0", "pclk_vo1_root", 0,
+ RK3576_CLKGATE_CON(68), 1, GFLAGS),
+ GATE(ACLK_HDCP1, "aclk_hdcp1", "aclk_vo1_root", 0,
+ RK3576_CLKGATE_CON(68), 4, GFLAGS),
+ GATE(HCLK_HDCP1, "hclk_hdcp1", "hclk_vo1_root", 0,
+ RK3576_CLKGATE_CON(68), 5, GFLAGS),
+ GATE(PCLK_HDCP1, "pclk_hdcp1", "pclk_vo1_root", 0,
+ RK3576_CLKGATE_CON(68), 6, GFLAGS),
+ GATE(CLK_TRNG1_SKP, "clk_trng1_skp", "aclk_hdcp1", 0,
+ RK3576_CLKGATE_CON(68), 7, GFLAGS),
+ GATE(HCLK_SAI9_8CH, "hclk_sai9_8ch", "hclk_vo1_root", 0,
+ RK3576_CLKGATE_CON(68), 9, GFLAGS),
+ COMPOSITE(MCLK_SAI9_8CH_SRC, "mclk_sai9_8ch_src", audio_frac_int_p, 0,
+ RK3576_CLKSEL_CON(162), 8, 3, MFLAGS, 0, 8, DFLAGS,
+ RK3576_CLKGATE_CON(68), 10, GFLAGS),
+ COMPOSITE_NODIV(MCLK_SAI9_8CH, "mclk_sai9_8ch", mclk_sai9_8ch_p, CLK_SET_RATE_PARENT,
+ RK3576_CLKSEL_CON(162), 11, 1, MFLAGS,
+ RK3576_CLKGATE_CON(68), 11, GFLAGS),
+ COMPOSITE(MCLK_SPDIF_TX4, "mclk_spdif_tx4", audio_frac_int_p, 0,
+ RK3576_CLKSEL_CON(163), 8, 3, MFLAGS, 0, 8, DFLAGS,
+ RK3576_CLKGATE_CON(68), 12, GFLAGS),
+ COMPOSITE(MCLK_SPDIF_TX5, "mclk_spdif_tx5", audio_frac_int_p, 0,
+ RK3576_CLKSEL_CON(164), 8, 3, MFLAGS, 0, 8, DFLAGS,
+ RK3576_CLKGATE_CON(68), 13, GFLAGS),
+
+ /* vpu */
+ COMPOSITE(ACLK_VPU_ROOT, "aclk_vpu_root", gpll_spll_cpll_bpll_lpll_p, CLK_IS_CRITICAL,
+ RK3576_CLKSEL_CON(118), 5, 3, MFLAGS, 0, 5, DFLAGS,
+ RK3576_CLKGATE_CON(49), 0, GFLAGS),
+ COMPOSITE_NODIV(ACLK_VPU_MID_ROOT, "aclk_vpu_mid_root", mux_600m_400m_300m_24m_p, 0,
+ RK3576_CLKSEL_CON(118), 8, 2, MFLAGS,
+ RK3576_CLKGATE_CON(49), 1, GFLAGS),
+ COMPOSITE_NODIV(HCLK_VPU_ROOT, "hclk_vpu_root", mux_200m_100m_50m_24m_p, 0,
+ RK3576_CLKSEL_CON(118), 10, 2, MFLAGS,
+ RK3576_CLKGATE_CON(49), 2, GFLAGS),
+ COMPOSITE(ACLK_JPEG_ROOT, "aclk_jpeg_root", gpll_cpll_aupll_spll_p, 0,
+ RK3576_CLKSEL_CON(119), 5, 2, MFLAGS, 0, 5, DFLAGS,
+ RK3576_CLKGATE_CON(49), 3, GFLAGS),
+ COMPOSITE_NODIV(ACLK_VPU_LOW_ROOT, "aclk_vpu_low_root", mux_400m_200m_100m_24m_p, 0,
+ RK3576_CLKSEL_CON(119), 7, 2, MFLAGS,
+ RK3576_CLKGATE_CON(49), 4, GFLAGS),
+ GATE(HCLK_RGA2E_0, "hclk_rga2e_0", "hclk_vpu_root", 0,
+ RK3576_CLKGATE_CON(49), 13, GFLAGS),
+ GATE(ACLK_RGA2E_0, "aclk_rga2e_0", "aclk_vpu_root", 0,
+ RK3576_CLKGATE_CON(49), 14, GFLAGS),
+ COMPOSITE(CLK_CORE_RGA2E_0, "clk_core_rga2e_0", gpll_spll_cpll_bpll_lpll_p, 0,
+ RK3576_CLKSEL_CON(120), 5, 3, MFLAGS, 0, 5, DFLAGS,
+ RK3576_CLKGATE_CON(49), 15, GFLAGS),
+ GATE(ACLK_JPEG, "aclk_jpeg", "aclk_jpeg_root", 0,
+ RK3576_CLKGATE_CON(50), 0, GFLAGS),
+ GATE(HCLK_JPEG, "hclk_jpeg", "hclk_vpu_root", 0,
+ RK3576_CLKGATE_CON(50), 1, GFLAGS),
+ GATE(HCLK_VDPP, "hclk_vdpp", "hclk_vpu_root", 0,
+ RK3576_CLKGATE_CON(50), 2, GFLAGS),
+ GATE(ACLK_VDPP, "aclk_vdpp", "aclk_vpu_mid_root", 0,
+ RK3576_CLKGATE_CON(50), 3, GFLAGS),
+ COMPOSITE(CLK_CORE_VDPP, "clk_core_vdpp", gpll_cpll_p, 0,
+ RK3576_CLKSEL_CON(120), 13, 1, MFLAGS, 8, 5, DFLAGS,
+ RK3576_CLKGATE_CON(50), 4, GFLAGS),
+ GATE(HCLK_RGA2E_1, "hclk_rga2e_1", "hclk_vpu_root", 0,
+ RK3576_CLKGATE_CON(50), 5, GFLAGS),
+ GATE(ACLK_RGA2E_1, "aclk_rga2e_1", "aclk_vpu_root", 0,
+ RK3576_CLKGATE_CON(50), 6, GFLAGS),
+ COMPOSITE(CLK_CORE_RGA2E_1, "clk_core_rga2e_1", gpll_spll_cpll_bpll_lpll_p, 0,
+ RK3576_CLKSEL_CON(121), 5, 3, MFLAGS, 0, 5, DFLAGS,
+ RK3576_CLKGATE_CON(50), 7, GFLAGS),
+ MUX(0, "dclk_ebc_frac_src_p", gpll_cpll_vpll_aupll_24m_p, 0,
+ RK3576_CLKSEL_CON(123), 0, 3, MFLAGS),
+ COMPOSITE_FRAC(DCLK_EBC_FRAC_SRC, "dclk_ebc_frac_src", "dclk_ebc_frac_src_p", 0,
+ RK3576_CLKSEL_CON(122), 0,
+ RK3576_CLKGATE_CON(50), 9, GFLAGS),
+ GATE(ACLK_EBC, "aclk_ebc", "aclk_vpu_low_root", 0,
+ RK3576_CLKGATE_CON(50), 11, GFLAGS),
+ GATE(HCLK_EBC, "hclk_ebc", "hclk_vpu_root", 0,
+ RK3576_CLKGATE_CON(50), 10, GFLAGS),
+ COMPOSITE(DCLK_EBC, "dclk_ebc", dclk_ebc_p, CLK_SET_RATE_NO_REPARENT,
+ RK3576_CLKSEL_CON(123), 12, 3, MFLAGS, 3, 9, DFLAGS,
+ RK3576_CLKGATE_CON(50), 12, GFLAGS),
+
+ /* vepu */
+ COMPOSITE_NODIV(HCLK_VEPU1_ROOT, "hclk_vepu1_root", mux_200m_100m_50m_24m_p, 0,
+ RK3576_CLKSEL_CON(178), 0, 2, MFLAGS,
+ RK3576_CLKGATE_CON(78), 0, GFLAGS),
+ COMPOSITE(ACLK_VEPU1_ROOT, "aclk_vepu1_root", gpll_cpll_p, 0,
+ RK3576_CLKSEL_CON(180), 5, 1, MFLAGS, 0, 5, DFLAGS,
+ RK3576_CLKGATE_CON(79), 0, GFLAGS),
+ GATE(HCLK_VEPU1, "hclk_vepu1", "hclk_vepu1_root", 0,
+ RK3576_CLKGATE_CON(79), 3, GFLAGS),
+ GATE(ACLK_VEPU1, "aclk_vepu1", "aclk_vepu1_root", 0,
+ RK3576_CLKGATE_CON(79), 4, GFLAGS),
+ COMPOSITE(CLK_VEPU1_CORE, "clk_vepu1_core", gpll_cpll_spll_lpll_bpll_p, 0,
+ RK3576_CLKSEL_CON(180), 11, 3, MFLAGS, 6, 5, DFLAGS,
+ RK3576_CLKGATE_CON(79), 5, GFLAGS),
+
+ /* php */
+ COMPOSITE_NODIV(PCLK_PHP_ROOT, "pclk_php_root", mux_100m_50m_24m_p, 0,
+ RK3576_CLKSEL_CON(92), 0, 2, MFLAGS,
+ RK3576_CLKGATE_CON(34), 0, GFLAGS),
+ COMPOSITE(ACLK_PHP_ROOT, "aclk_php_root", gpll_cpll_p, 0,
+ RK3576_CLKSEL_CON(92), 9, 1, MFLAGS, 4, 5, DFLAGS,
+ RK3576_CLKGATE_CON(34), 7, GFLAGS),
+ GATE(PCLK_PCIE0, "pclk_pcie0", "pclk_php_root", 0,
+ RK3576_CLKGATE_CON(34), 13, GFLAGS),
+ GATE(CLK_PCIE0_AUX, "clk_pcie0_aux", "xin24m", 0,
+ RK3576_CLKGATE_CON(34), 14, GFLAGS),
+ GATE(ACLK_PCIE0_MST, "aclk_pcie0_mst", "aclk_php_root", 0,
+ RK3576_CLKGATE_CON(34), 15, GFLAGS),
+ GATE(ACLK_PCIE0_SLV, "aclk_pcie0_slv", "aclk_php_root", 0,
+ RK3576_CLKGATE_CON(35), 0, GFLAGS),
+ GATE(ACLK_PCIE0_DBI, "aclk_pcie0_dbi", "aclk_php_root", 0,
+ RK3576_CLKGATE_CON(35), 1, GFLAGS),
+ GATE(ACLK_USB3OTG1, "aclk_usb3otg1", "aclk_php_root", 0,
+ RK3576_CLKGATE_CON(35), 3, GFLAGS),
+ GATE(CLK_REF_USB3OTG1, "clk_ref_usb3otg1", "xin24m", 0,
+ RK3576_CLKGATE_CON(35), 4, GFLAGS),
+ GATE(CLK_SUSPEND_USB3OTG1, "clk_suspend_usb3otg1", "xin24m", 0,
+ RK3576_CLKGATE_CON(35), 5, GFLAGS),
+ GATE(ACLK_MMU0, "aclk_mmu0", "aclk_php_root", 0,
+ RK3576_CLKGATE_CON(35), 11, GFLAGS),
+ GATE(ACLK_SLV_MMU0, "aclk_slv_mmu0", "aclk_php_root", 0,
+ RK3576_CLKGATE_CON(35), 13, GFLAGS),
+ GATE(ACLK_MMU1, "aclk_mmu1", "aclk_php_root", 0,
+ RK3576_CLKGATE_CON(35), 14, GFLAGS),
+ GATE(ACLK_SLV_MMU1, "aclk_slv_mmu1", "aclk_php_root", 0,
+ RK3576_CLKGATE_CON(36), 0, GFLAGS),
+ GATE(PCLK_PCIE1, "pclk_pcie1", "pclk_php_root", 0,
+ RK3576_CLKGATE_CON(36), 7, GFLAGS),
+ GATE(CLK_PCIE1_AUX, "clk_pcie1_aux", "xin24m", 0,
+ RK3576_CLKGATE_CON(36), 8, GFLAGS),
+ GATE(ACLK_PCIE1_MST, "aclk_pcie1_mst", "aclk_php_root", 0,
+ RK3576_CLKGATE_CON(36), 9, GFLAGS),
+ GATE(ACLK_PCIE1_SLV, "aclk_pcie1_slv", "aclk_php_root", 0,
+ RK3576_CLKGATE_CON(36), 10, GFLAGS),
+ GATE(ACLK_PCIE1_DBI, "aclk_pcie1_dbi", "aclk_php_root", 0,
+ RK3576_CLKGATE_CON(36), 11, GFLAGS),
+ COMPOSITE(CLK_RXOOB0, "clk_rxoob0", gpll_cpll_p, 0,
+ RK3576_CLKSEL_CON(93), 7, 1, MFLAGS, 0, 7, DFLAGS,
+ RK3576_CLKGATE_CON(37), 0, GFLAGS),
+ COMPOSITE(CLK_RXOOB1, "clk_rxoob1", gpll_cpll_p, 0,
+ RK3576_CLKSEL_CON(93), 15, 1, MFLAGS, 8, 7, DFLAGS,
+ RK3576_CLKGATE_CON(37), 1, GFLAGS),
+ GATE(CLK_PMALIVE0, "clk_pmalive0", "xin24m", CLK_IS_CRITICAL,
+ RK3576_CLKGATE_CON(37), 2, GFLAGS),
+ GATE(CLK_PMALIVE1, "clk_pmalive1", "xin24m", CLK_IS_CRITICAL,
+ RK3576_CLKGATE_CON(37), 3, GFLAGS),
+ GATE(ACLK_SATA0, "aclk_sata0", "aclk_php_root", 0,
+ RK3576_CLKGATE_CON(37), 4, GFLAGS),
+ GATE(ACLK_SATA1, "aclk_sata1", "aclk_php_root", 0,
+ RK3576_CLKGATE_CON(37), 5, GFLAGS),
+
+ /* audio */
+ COMPOSITE_NODIV(HCLK_AUDIO_ROOT, "hclk_audio_root", mux_200m_100m_50m_24m_p, 0,
+ RK3576_CLKSEL_CON(42), 0, 2, MFLAGS,
+ RK3576_CLKGATE_CON(7), 1, GFLAGS),
+ GATE(HCLK_ASRC_2CH_0, "hclk_asrc_2ch_0", "hclk_audio_root", 0,
+ RK3576_CLKGATE_CON(7), 3, GFLAGS),
+ GATE(HCLK_ASRC_2CH_1, "hclk_asrc_2ch_1", "hclk_audio_root", 0,
+ RK3576_CLKGATE_CON(7), 4, GFLAGS),
+ GATE(HCLK_ASRC_4CH_0, "hclk_asrc_4ch_0", "hclk_audio_root", 0,
+ RK3576_CLKGATE_CON(7), 5, GFLAGS),
+ GATE(HCLK_ASRC_4CH_1, "hclk_asrc_4ch_1", "hclk_audio_root", 0,
+ RK3576_CLKGATE_CON(7), 6, GFLAGS),
+ COMPOSITE(CLK_ASRC_2CH_0, "clk_asrc_2ch_0", gpll_cpll_aupll_p, 0,
+ RK3576_CLKSEL_CON(42), 7, 2, MFLAGS, 2, 5, DFLAGS,
+ RK3576_CLKGATE_CON(7), 7, GFLAGS),
+ COMPOSITE(CLK_ASRC_2CH_1, "clk_asrc_2ch_1", gpll_cpll_aupll_p, 0,
+ RK3576_CLKSEL_CON(42), 14, 2, MFLAGS, 9, 5, DFLAGS,
+ RK3576_CLKGATE_CON(7), 8, GFLAGS),
+ COMPOSITE(CLK_ASRC_4CH_0, "clk_asrc_4ch_0", gpll_cpll_aupll_p, 0,
+ RK3576_CLKSEL_CON(43), 5, 2, MFLAGS, 0, 5, DFLAGS,
+ RK3576_CLKGATE_CON(7), 9, GFLAGS),
+ COMPOSITE(CLK_ASRC_4CH_1, "clk_asrc_4ch_1", gpll_cpll_aupll_p, 0,
+ RK3576_CLKSEL_CON(43), 12, 2, MFLAGS, 7, 5, DFLAGS,
+ RK3576_CLKGATE_CON(7), 10, GFLAGS),
+ COMPOSITE(MCLK_SAI0_8CH_SRC, "mclk_sai0_8ch_src", audio_frac_int_p, 0,
+ RK3576_CLKSEL_CON(44), 8, 3, MFLAGS, 0, 8, DFLAGS,
+ RK3576_CLKGATE_CON(7), 11, GFLAGS),
+ COMPOSITE_NODIV(MCLK_SAI0_8CH, "mclk_sai0_8ch", mclk_sai0_8ch_p, CLK_SET_RATE_PARENT,
+ RK3576_CLKSEL_CON(44), 11, 2, MFLAGS,
+ RK3576_CLKGATE_CON(7), 12, GFLAGS),
+ GATE(HCLK_SAI0_8CH, "hclk_sai0_8ch", "hclk_audio_root", 0,
+ RK3576_CLKGATE_CON(7), 13, GFLAGS),
+ GATE(HCLK_SPDIF_RX0, "hclk_spdif_rx0", "hclk_audio_root", 0,
+ RK3576_CLKGATE_CON(7), 14, GFLAGS),
+ COMPOSITE(MCLK_SPDIF_RX0, "mclk_spdif_rx0", gpll_cpll_aupll_p, 0,
+ RK3576_CLKSEL_CON(45), 5, 2, MFLAGS, 0, 5, DFLAGS,
+ RK3576_CLKGATE_CON(7), 15, GFLAGS),
+ GATE(HCLK_SPDIF_RX1, "hclk_spdif_rx1", "hclk_audio_root", 0,
+ RK3576_CLKGATE_CON(8), 0, GFLAGS),
+ COMPOSITE(MCLK_SPDIF_RX1, "mclk_spdif_rx1", gpll_cpll_aupll_p, 0,
+ RK3576_CLKSEL_CON(45), 12, 2, MFLAGS, 7, 5, DFLAGS,
+ RK3576_CLKGATE_CON(8), 1, GFLAGS),
+ COMPOSITE(MCLK_SAI1_8CH_SRC, "mclk_sai1_8ch_src", audio_frac_int_p, 0,
+ RK3576_CLKSEL_CON(46), 8, 3, MFLAGS, 0, 8, DFLAGS,
+ RK3576_CLKGATE_CON(8), 4, GFLAGS),
+ COMPOSITE_NODIV(MCLK_SAI1_8CH, "mclk_sai1_8ch", mclk_sai1_8ch_p, CLK_SET_RATE_PARENT,
+ RK3576_CLKSEL_CON(46), 11, 1, MFLAGS,
+ RK3576_CLKGATE_CON(8), 5, GFLAGS),
+ GATE(HCLK_SAI1_8CH, "hclk_sai1_8ch", "hclk_audio_root", 0,
+ RK3576_CLKGATE_CON(8), 6, GFLAGS),
+ COMPOSITE(MCLK_SAI2_2CH_SRC, "mclk_sai2_2ch_src", audio_frac_int_p, 0,
+ RK3576_CLKSEL_CON(47), 8, 3, MFLAGS, 0, 8, DFLAGS,
+ RK3576_CLKGATE_CON(8), 7, GFLAGS),
+ COMPOSITE_NODIV(MCLK_SAI2_2CH, "mclk_sai2_2ch", mclk_sai2_2ch_p, CLK_SET_RATE_PARENT,
+ RK3576_CLKSEL_CON(47), 11, 2, MFLAGS,
+ RK3576_CLKGATE_CON(8), 8, GFLAGS),
+ GATE(HCLK_SAI2_2CH, "hclk_sai2_2ch", "hclk_audio_root", 0,
+ RK3576_CLKGATE_CON(8), 10, GFLAGS),
+ COMPOSITE(MCLK_SAI3_2CH_SRC, "mclk_sai3_2ch_src", audio_frac_int_p, 0,
+ RK3576_CLKSEL_CON(48), 8, 3, MFLAGS, 0, 8, DFLAGS,
+ RK3576_CLKGATE_CON(8), 11, GFLAGS),
+ COMPOSITE_NODIV(MCLK_SAI3_2CH, "mclk_sai3_2ch", mclk_sai3_2ch_p, CLK_SET_RATE_PARENT,
+ RK3576_CLKSEL_CON(48), 11, 2, MFLAGS,
+ RK3576_CLKGATE_CON(8), 12, GFLAGS),
+ GATE(HCLK_SAI3_2CH, "hclk_sai3_2ch", "hclk_audio_root", 0,
+ RK3576_CLKGATE_CON(8), 14, GFLAGS),
+ COMPOSITE(MCLK_SAI4_2CH_SRC, "mclk_sai4_2ch_src", audio_frac_int_p, 0,
+ RK3576_CLKSEL_CON(49), 8, 3, MFLAGS, 0, 8, DFLAGS,
+ RK3576_CLKGATE_CON(8), 15, GFLAGS),
+ COMPOSITE_NODIV(MCLK_SAI4_2CH, "mclk_sai4_2ch", mclk_sai4_2ch_p, CLK_SET_RATE_PARENT,
+ RK3576_CLKSEL_CON(49), 11, 2, MFLAGS,
+ RK3576_CLKGATE_CON(9), 0, GFLAGS),
+ GATE(HCLK_SAI4_2CH, "hclk_sai4_2ch", "hclk_audio_root", 0,
+ RK3576_CLKGATE_CON(9), 2, GFLAGS),
+ GATE(HCLK_ACDCDIG_DSM, "hclk_acdcdig_dsm", "hclk_audio_root", 0,
+ RK3576_CLKGATE_CON(9), 3, GFLAGS),
+ GATE(MCLK_ACDCDIG_DSM, "mclk_acdcdig_dsm", "mclk_sai4_2ch", 0,
+ RK3576_CLKGATE_CON(9), 4, GFLAGS),
+ COMPOSITE(CLK_PDM1, "clk_pdm1", audio_frac_int_p, 0,
+ RK3576_CLKSEL_CON(50), 9, 3, MFLAGS, 0, 9, DFLAGS,
+ RK3576_CLKGATE_CON(9), 5, GFLAGS),
+ GATE(HCLK_PDM1, "hclk_pdm1", "hclk_audio_root", 0,
+ RK3576_CLKGATE_CON(9), 7, GFLAGS),
+ GATE(CLK_PDM1_OUT, "clk_pdm1_out", "clk_pdm1", 0,
+ RK3576_CLKGATE_CON(3), 5, GFLAGS),
+ COMPOSITE(MCLK_PDM1, "mclk_pdm1", audio_frac_int_p, 0,
+ RK3576_CLKSEL_CON(51), 5, 3, MFLAGS, 0, 5, DFLAGS,
+ RK3576_CLKGATE_CON(9), 8, GFLAGS),
+ GATE(HCLK_SPDIF_TX0, "hclk_spdif_tx0", "hclk_audio_root", 0,
+ RK3576_CLKGATE_CON(9), 9, GFLAGS),
+ COMPOSITE(MCLK_SPDIF_TX0, "mclk_spdif_tx0", audio_frac_int_p, 0,
+ RK3576_CLKSEL_CON(52), 8, 3, MFLAGS, 0, 8, DFLAGS,
+ RK3576_CLKGATE_CON(9), 10, GFLAGS),
+ GATE(HCLK_SPDIF_TX1, "hclk_spdif_tx1", "hclk_audio_root", 0,
+ RK3576_CLKGATE_CON(9), 11, GFLAGS),
+ COMPOSITE(MCLK_SPDIF_TX1, "mclk_spdif_tx1", audio_frac_int_p, 0,
+ RK3576_CLKSEL_CON(53), 8, 3, MFLAGS, 0, 8, DFLAGS,
+ RK3576_CLKGATE_CON(9), 12, GFLAGS),
+ GATE(CLK_SAI1_MCLKOUT, "clk_sai1_mclkout", "mclk_sai1_8ch", 0,
+ RK3576_CLKGATE_CON(9), 13, GFLAGS),
+ GATE(CLK_SAI2_MCLKOUT, "clk_sai2_mclkout", "mclk_sai2_2ch", 0,
+ RK3576_CLKGATE_CON(9), 14, GFLAGS),
+ GATE(CLK_SAI3_MCLKOUT, "clk_sai3_mclkout", "mclk_sai3_2ch", 0,
+ RK3576_CLKGATE_CON(9), 15, GFLAGS),
+ GATE(CLK_SAI4_MCLKOUT, "clk_sai4_mclkout", "mclk_sai4_2ch", 0,
+ RK3576_CLKGATE_CON(10), 0, GFLAGS),
+ GATE(CLK_SAI0_MCLKOUT, "clk_sai0_mclkout", "mclk_sai0_8ch", 0,
+ RK3576_CLKGATE_CON(10), 1, GFLAGS),
+
+ /* sdgmac */
+ COMPOSITE_NODIV(HCLK_SDGMAC_ROOT, "hclk_sdgmac_root", mux_200m_100m_50m_24m_p, 0,
+ RK3576_CLKSEL_CON(103), 0, 2, MFLAGS,
+ RK3576_CLKGATE_CON(42), 0, GFLAGS),
+ COMPOSITE(ACLK_SDGMAC_ROOT, "aclk_sdgmac_root", gpll_cpll_p, CLK_IS_CRITICAL,
+ RK3576_CLKSEL_CON(103), 7, 1, MFLAGS, 2, 5, DFLAGS,
+ RK3576_CLKGATE_CON(42), 1, GFLAGS),
+ COMPOSITE_NODIV(PCLK_SDGMAC_ROOT, "pclk_sdgmac_root", mux_100m_50m_24m_p, 0,
+ RK3576_CLKSEL_CON(103), 8, 2, MFLAGS,
+ RK3576_CLKGATE_CON(42), 2, GFLAGS),
+ GATE(ACLK_GMAC0, "aclk_gmac0", "aclk_sdgmac_root", 0,
+ RK3576_CLKGATE_CON(42), 7, GFLAGS),
+ GATE(ACLK_GMAC1, "aclk_gmac1", "aclk_sdgmac_root", 0,
+ RK3576_CLKGATE_CON(42), 8, GFLAGS),
+ GATE(PCLK_GMAC0, "pclk_gmac0", "pclk_sdgmac_root", 0,
+ RK3576_CLKGATE_CON(42), 9, GFLAGS),
+ GATE(PCLK_GMAC1, "pclk_gmac1", "pclk_sdgmac_root", 0,
+ RK3576_CLKGATE_CON(42), 10, GFLAGS),
+ COMPOSITE(CCLK_SRC_SDIO, "cclk_src_sdio", gpll_cpll_24m_p, 0,
+ RK3576_CLKSEL_CON(104), 6, 2, MFLAGS, 0, 6, DFLAGS,
+ RK3576_CLKGATE_CON(42), 11, GFLAGS),
+ GATE(HCLK_SDIO, "hclk_sdio", "hclk_sdgmac_root", 0,
+ RK3576_CLKGATE_CON(42), 12, GFLAGS),
+ COMPOSITE(CLK_GMAC1_PTP_REF_SRC, "clk_gmac1_ptp_ref_src", clk_gmac1_ptp_ref_src_p, 0,
+ RK3576_CLKSEL_CON(104), 13, 2, MFLAGS, 8, 5, DFLAGS,
+ RK3576_CLKGATE_CON(42), 15, GFLAGS),
+ COMPOSITE(CLK_GMAC0_PTP_REF_SRC, "clk_gmac0_ptp_ref_src", clk_gmac0_ptp_ref_src_p, 0,
+ RK3576_CLKSEL_CON(105), 5, 2, MFLAGS, 0, 5, DFLAGS,
+ RK3576_CLKGATE_CON(43), 0, GFLAGS),
+ GATE(CLK_GMAC1_PTP_REF, "clk_gmac1_ptp_ref", "clk_gmac1_ptp_ref_src", 0,
+ RK3576_CLKGATE_CON(42), 13, GFLAGS),
+ GATE(CLK_GMAC0_PTP_REF, "clk_gmac0_ptp_ref", "clk_gmac0_ptp_ref_src", 0,
+ RK3576_CLKGATE_CON(42), 14, GFLAGS),
+ COMPOSITE(CCLK_SRC_SDMMC0, "cclk_src_sdmmc0", gpll_cpll_24m_p, 0,
+ RK3576_CLKSEL_CON(105), 13, 2, MFLAGS, 7, 6, DFLAGS,
+ RK3576_CLKGATE_CON(43), 1, GFLAGS),
+ GATE(HCLK_SDMMC0, "hclk_sdmmc0", "hclk_sdgmac_root", 0,
+ RK3576_CLKGATE_CON(43), 2, GFLAGS),
+ COMPOSITE(SCLK_FSPI1_X2, "sclk_fspi1_x2", gpll_cpll_24m_p, 0,
+ RK3576_CLKSEL_CON(106), 6, 2, MFLAGS, 0, 6, DFLAGS,
+ RK3576_CLKGATE_CON(43), 3, GFLAGS),
+ GATE(HCLK_FSPI1, "hclk_fspi1", "hclk_sdgmac_root", 0,
+ RK3576_CLKGATE_CON(43), 4, GFLAGS),
+ COMPOSITE(ACLK_DSMC_ROOT, "aclk_dsmc_root", gpll_cpll_p, CLK_IS_CRITICAL,
+ RK3576_CLKSEL_CON(106), 13, 1, MFLAGS, 8, 5, DFLAGS,
+ RK3576_CLKGATE_CON(43), 5, GFLAGS),
+ GATE(ACLK_DSMC, "aclk_dsmc", "aclk_dsmc_root", 0,
+ RK3576_CLKGATE_CON(43), 7, GFLAGS),
+ GATE(PCLK_DSMC, "pclk_dsmc", "pclk_sdgmac_root", 0,
+ RK3576_CLKGATE_CON(43), 8, GFLAGS),
+ COMPOSITE(CLK_DSMC_SYS, "clk_dsmc_sys", gpll_cpll_p, 0,
+ RK3576_CLKSEL_CON(107), 5, 1, MFLAGS, 0, 5, DFLAGS,
+ RK3576_CLKGATE_CON(43), 9, GFLAGS),
+ GATE(HCLK_HSGPIO, "hclk_hsgpio", "hclk_sdgmac_root", 0,
+ RK3576_CLKGATE_CON(43), 10, GFLAGS),
+ COMPOSITE(CLK_HSGPIO_TX, "clk_hsgpio_tx", gpll_cpll_24m_p, 0,
+ RK3576_CLKSEL_CON(107), 11, 2, MFLAGS, 6, 5, DFLAGS,
+ RK3576_CLKGATE_CON(43), 11, GFLAGS),
+ COMPOSITE(CLK_HSGPIO_RX, "clk_hsgpio_rx", gpll_cpll_24m_p, 0,
+ RK3576_CLKSEL_CON(108), 5, 2, MFLAGS, 0, 5, DFLAGS,
+ RK3576_CLKGATE_CON(43), 12, GFLAGS),
+ GATE(ACLK_HSGPIO, "aclk_hsgpio", "aclk_sdgmac_root", 0,
+ RK3576_CLKGATE_CON(43), 13, GFLAGS),
+
+ /* phpphy */
+ GATE(PCLK_PHPPHY_ROOT, "pclk_phpphy_root", "pclk_bus_root", CLK_IS_CRITICAL,
+ RK3576_PHP_CLKGATE_CON(0), 2, GFLAGS),
+ GATE(PCLK_PCIE2_COMBOPHY0, "pclk_pcie2_combophy0", "pclk_phpphy_root", 0,
+ RK3576_PHP_CLKGATE_CON(0), 5, GFLAGS),
+ GATE(PCLK_PCIE2_COMBOPHY1, "pclk_pcie2_combophy1", "pclk_phpphy_root", 0,
+ RK3576_PHP_CLKGATE_CON(0), 7, GFLAGS),
+ COMPOSITE_NOMUX(CLK_PCIE_100M_SRC, "clk_pcie_100m_src", "ppll", 0,
+ RK3576_PHP_CLKSEL_CON(0), 2, 5, DFLAGS,
+ RK3576_PHP_CLKGATE_CON(1), 1, GFLAGS),
+ COMPOSITE_NOMUX(CLK_PCIE_100M_NDUTY_SRC, "clk_pcie_100m_nduty_src", "ppll", 0,
+ RK3576_PHP_CLKSEL_CON(0), 7, 5, DFLAGS,
+ RK3576_PHP_CLKGATE_CON(1), 2, GFLAGS),
+ COMPOSITE_NODIV(CLK_REF_PCIE0_PHY, "clk_ref_pcie0_phy", clk_ref_pcie0_phy_p, 0,
+ RK3576_PHP_CLKSEL_CON(0), 12, 2, MFLAGS,
+ RK3576_PHP_CLKGATE_CON(1), 5, GFLAGS),
+ COMPOSITE_NODIV(CLK_REF_PCIE1_PHY, "clk_ref_pcie1_phy", clk_ref_pcie0_phy_p, 0,
+ RK3576_PHP_CLKSEL_CON(0), 14, 2, MFLAGS,
+ RK3576_PHP_CLKGATE_CON(1), 8, GFLAGS),
+ COMPOSITE_NOMUX(CLK_REF_MPHY_26M, "clk_ref_mphy_26m", "ppll", CLK_IS_CRITICAL,
+ RK3576_PHP_CLKSEL_CON(1), 0, 8, DFLAGS,
+ RK3576_PHP_CLKGATE_CON(1), 9, GFLAGS),
+
+ /* pmu */
+ GATE(CLK_200M_PMU_SRC, "clk_200m_pmu_src", "clk_gpll_div6", 0,
+ RK3576_PMU_CLKGATE_CON(3), 2, GFLAGS),
+ COMPOSITE_NOMUX(CLK_100M_PMU_SRC, "clk_100m_pmu_src", "cpll", 0,
+ RK3576_PMU_CLKSEL_CON(4), 4, 5, DFLAGS,
+ RK3576_PMU_CLKGATE_CON(3), 3, GFLAGS),
+ FACTOR_GATE(CLK_50M_PMU_SRC, "clk_50m_pmu_src", "clk_100m_pmu_src", 0, 1, 2,
+ RK3576_PMU_CLKGATE_CON(3), 4, GFLAGS),
+ COMPOSITE_NODIV(HCLK_PMU1_ROOT, "hclk_pmu1_root", mux_pmu200m_pmu100m_pmu50m_24m_p, CLK_IS_CRITICAL,
+ RK3576_PMU_CLKSEL_CON(4), 0, 2, MFLAGS,
+ RK3576_PMU_CLKGATE_CON(3), 0, GFLAGS),
+ COMPOSITE_NODIV(HCLK_PMU_CM0_ROOT, "hclk_pmu_cm0_root", mux_pmu200m_pmu100m_pmu50m_24m_p, 0,
+ RK3576_PMU_CLKSEL_CON(4), 2, 2, MFLAGS,
+ RK3576_PMU_CLKGATE_CON(3), 1, GFLAGS),
+ COMPOSITE_NODIV(PCLK_PMU0_ROOT, "pclk_pmu0_root", mux_pmu100m_pmu50m_24m_p, 0,
+ RK3576_PMU_CLKSEL_CON(20), 0, 2, MFLAGS,
+ RK3576_PMU_CLKGATE_CON(7), 0, GFLAGS),
+ GATE(PCLK_PMU0, "pclk_pmu0", "pclk_pmu0_root", CLK_IS_CRITICAL,
+ RK3576_PMU_CLKGATE_CON(7), 3, GFLAGS),
+ GATE(PCLK_PMU1_ROOT, "pclk_pmu1_root", "pclk_pmu0_root", CLK_IS_CRITICAL,
+ RK3576_PMU_CLKGATE_CON(7), 9, GFLAGS),
+ GATE(PCLK_PMU1, "pclk_pmu1", "pclk_pmu1_root", CLK_IS_CRITICAL,
+ RK3576_PMU_CLKGATE_CON(3), 15, GFLAGS),
+ GATE(CLK_PMU1, "clk_pmu1", "xin24m", CLK_IS_CRITICAL,
+ RK3576_PMU_CLKGATE_CON(4), 2, GFLAGS),
+ GATE(PCLK_PMUPHY_ROOT, "pclk_pmuphy_root", "pclk_pmu1_root", CLK_IS_CRITICAL,
+ RK3576_PMU_CLKGATE_CON(5), 0, GFLAGS),
+ GATE(PCLK_HDPTX_APB, "pclk_hdptx_apb", "pclk_pmuphy_root", 0,
+ RK3576_PMU_CLKGATE_CON(0), 1, GFLAGS),
+ GATE(PCLK_MIPI_DCPHY, "pclk_mipi_dcphy", "pclk_pmuphy_root", 0,
+ RK3576_PMU_CLKGATE_CON(0), 2, GFLAGS),
+ GATE(PCLK_CSIDPHY, "pclk_csidphy", "pclk_pmuphy_root", 0,
+ RK3576_PMU_CLKGATE_CON(0), 8, GFLAGS),
+ GATE(PCLK_USBDPPHY, "pclk_usbdpphy", "pclk_pmuphy_root", 0,
+ RK3576_PMU_CLKGATE_CON(0), 12, GFLAGS),
+ COMPOSITE_NOMUX(CLK_PMUPHY_REF_SRC, "clk_pmuphy_ref_src", "cpll", 0,
+ RK3576_PMU_CLKSEL_CON(0), 0, 5, DFLAGS,
+ RK3576_PMU_CLKGATE_CON(0), 13, GFLAGS),
+ GATE(CLK_USBDP_COMBO_PHY_IMMORTAL, "clk_usbdp_combo_phy_immortal", "xin24m", 0,
+ RK3576_PMU_CLKGATE_CON(0), 15, GFLAGS),
+ GATE(CLK_HDMITXHDP, "clk_hdmitxhdp", "xin24m", 0,
+ RK3576_PMU_CLKGATE_CON(1), 13, GFLAGS),
+ GATE(PCLK_MPHY, "pclk_mphy", "pclk_pmuphy_root", 0,
+ RK3576_PMU_CLKGATE_CON(2), 0, GFLAGS),
+ MUX(CLK_REF_OSC_MPHY, "clk_ref_osc_mphy", clk_ref_osc_mphy_p, 0,
+ RK3576_PMU_CLKSEL_CON(3), 0, 2, MFLAGS),
+ GATE(CLK_REF_UFS_CLKOUT, "clk_ref_ufs_clkout", "clk_ref_osc_mphy", 0,
+ RK3576_PMU_CLKGATE_CON(2), 5, GFLAGS),
+ GATE(FCLK_PMU_CM0_CORE, "fclk_pmu_cm0_core", "hclk_pmu_cm0_root", 0,
+ RK3576_PMU_CLKGATE_CON(3), 12, GFLAGS),
+ COMPOSITE(CLK_PMU_CM0_RTC, "clk_pmu_cm0_rtc", mux_24m_32k_p, 0,
+ RK3576_PMU_CLKSEL_CON(4), 14, 1, MFLAGS, 9, 5, DFLAGS,
+ RK3576_PMU_CLKGATE_CON(3), 14, GFLAGS),
+ GATE(PCLK_PMU1WDT, "pclk_pmu1wdt", "pclk_pmu1_root", 0,
+ RK3576_PMU_CLKGATE_CON(4), 5, GFLAGS),
+ COMPOSITE_NODIV(TCLK_PMU1WDT, "tclk_pmu1wdt", mux_24m_32k_p, 0,
+ RK3576_PMU_CLKSEL_CON(4), 15, 1, MFLAGS,
+ RK3576_PMU_CLKGATE_CON(4), 6, GFLAGS),
+ GATE(PCLK_PMUTIMER, "pclk_pmutimer", "pclk_pmu1_root", 0,
+ RK3576_PMU_CLKGATE_CON(4), 7, GFLAGS),
+ COMPOSITE_NODIV(CLK_PMUTIMER_ROOT, "clk_pmutimer_root", mux_pmu100m_24m_32k_p, 0,
+ RK3576_PMU_CLKSEL_CON(5), 0, 2, MFLAGS,
+ RK3576_PMU_CLKGATE_CON(4), 8, GFLAGS),
+ GATE(CLK_PMUTIMER0, "clk_pmutimer0", "clk_pmutimer_root", 0,
+ RK3576_PMU_CLKGATE_CON(4), 9, GFLAGS),
+ GATE(CLK_PMUTIMER1, "clk_pmutimer1", "clk_pmutimer_root", 0,
+ RK3576_PMU_CLKGATE_CON(4), 10, GFLAGS),
+ GATE(PCLK_PMU1PWM, "pclk_pmu1pwm", "pclk_pmu1_root", 0,
+ RK3576_PMU_CLKGATE_CON(4), 11, GFLAGS),
+ COMPOSITE_NODIV(CLK_PMU1PWM, "clk_pmu1pwm", mux_pmu100m_pmu50m_24m_p, 0,
+ RK3576_PMU_CLKSEL_CON(5), 2, 2, MFLAGS,
+ RK3576_PMU_CLKGATE_CON(4), 12, GFLAGS),
+ GATE(CLK_PMU1PWM_OSC, "clk_pmu1pwm_osc", "xin24m", 0,
+ RK3576_PMU_CLKGATE_CON(4), 13, GFLAGS),
+ GATE(PCLK_I2C0, "pclk_i2c0", "pclk_pmu1_root", 0,
+ RK3576_PMU_CLKGATE_CON(5), 1, GFLAGS),
+ COMPOSITE_NODIV(CLK_I2C0, "clk_i2c0", mux_pmu200m_pmu100m_pmu50m_24m_p, 0,
+ RK3576_PMU_CLKSEL_CON(6), 7, 2, MFLAGS,
+ RK3576_PMU_CLKGATE_CON(5), 2, GFLAGS),
+ COMPOSITE_NODIV(SCLK_UART1, "sclk_uart1", uart1_p, 0,
+ RK3576_PMU_CLKSEL_CON(8), 0, 1, MFLAGS,
+ RK3576_PMU_CLKGATE_CON(5), 5, GFLAGS),
+ GATE(PCLK_UART1, "pclk_uart1", "pclk_pmu1_root", 0,
+ RK3576_PMU_CLKGATE_CON(5), 6, GFLAGS),
+ GATE(CLK_PDM0, "clk_pdm0", "clk_pdm0_src_top", 0,
+ RK3576_PMU_CLKGATE_CON(5), 13, GFLAGS),
+ GATE(HCLK_PDM0, "hclk_pdm0", "hclk_pmu1_root", 0,
+ RK3576_PMU_CLKGATE_CON(5), 15, GFLAGS),
+ GATE(MCLK_PDM0, "mclk_pdm0", "mclk_pdm0_src_top", 0,
+ RK3576_PMU_CLKGATE_CON(6), 0, GFLAGS),
+ GATE(HCLK_VAD, "hclk_vad", "hclk_pmu1_root", 0,
+ RK3576_PMU_CLKGATE_CON(6), 1, GFLAGS),
+ GATE(CLK_PDM0_OUT, "clk_pdm0_out", "clk_pdm0", 0,
+ RK3576_PMU_CLKGATE_CON(6), 8, GFLAGS),
+ COMPOSITE(CLK_HPTIMER_SRC, "clk_hptimer_src", cpll_24m_p, CLK_IS_CRITICAL,
+ RK3576_PMU_CLKSEL_CON(11), 6, 1, MFLAGS, 1, 5, DFLAGS,
+ RK3576_PMU_CLKGATE_CON(6), 10, GFLAGS),
+ GATE(PCLK_GPIO0, "pclk_gpio0", "pclk_pmu0_root", 0,
+ RK3576_PMU_CLKGATE_CON(7), 6, GFLAGS),
+ COMPOSITE_NODIV(DBCLK_GPIO0, "dbclk_gpio0", mux_24m_32k_p, 0,
+ RK3576_PMU_CLKSEL_CON(20), 2, 1, MFLAGS,
+ RK3576_PMU_CLKGATE_CON(7), 7, GFLAGS),
+ GATE(CLK_OSC0_PMU1, "clk_osc0_pmu1", "xin24m", CLK_IS_CRITICAL,
+ RK3576_PMU_CLKGATE_CON(7), 8, GFLAGS),
+ GATE(CLK_PMU1PWM_RC, "clk_pmu1pwm_rc", "clk_pvtm_clkout", 0,
+ RK3576_PMU_CLKGATE_CON(5), 7, GFLAGS),
+
+ /* phy ref */
+ MUXGRF(CLK_PHY_REF_SRC, "clk_phy_ref_src", clk_phy_ref_src_p, 0,
+ RK3576_PMU0_GRF_OSC_CON6, 4, 1, MFLAGS),
+ MUXGRF(CLK_USBPHY_REF_SRC, "clk_usbphy_ref_src", clk_usbphy_ref_src_p, 0,
+ RK3576_PMU0_GRF_OSC_CON6, 2, 1, MFLAGS),
+ MUXGRF(CLK_CPLL_REF_SRC, "clk_cpll_ref_src", clk_cpll_ref_src_p, 0,
+ RK3576_PMU0_GRF_OSC_CON6, 1, 1, MFLAGS),
+ MUXGRF(CLK_AUPLL_REF_SRC, "clk_aupll_ref_src", clk_aupll_ref_src_p, 0,
+ RK3576_PMU0_GRF_OSC_CON6, 0, 1, MFLAGS),
+
+ /* secure ns */
+ COMPOSITE_NODIV(ACLK_SECURE_NS, "aclk_secure_ns", mux_350m_175m_116m_24m_p, CLK_IS_CRITICAL,
+ RK3576_SECURE_NS_CLKSEL_CON(0), 0, 2, MFLAGS,
+ RK3576_SECURE_NS_CLKGATE_CON(0), 0, GFLAGS),
+ COMPOSITE_NODIV(HCLK_SECURE_NS, "hclk_secure_ns", mux_175m_116m_58m_24m_p, CLK_IS_CRITICAL,
+ RK3576_SECURE_NS_CLKSEL_CON(0), 2, 2, MFLAGS,
+ RK3576_SECURE_NS_CLKGATE_CON(0), 1, GFLAGS),
+ COMPOSITE_NODIV(PCLK_SECURE_NS, "pclk_secure_ns", mux_116m_58m_24m_p, CLK_IS_CRITICAL,
+ RK3576_SECURE_NS_CLKSEL_CON(0), 4, 2, MFLAGS,
+ RK3576_SECURE_NS_CLKGATE_CON(0), 2, GFLAGS),
+ GATE(HCLK_CRYPTO_NS, "hclk_crypto_ns", "hclk_secure_ns", 0,
+ RK3576_SECURE_NS_CLKGATE_CON(0), 3, GFLAGS),
+ GATE(PCLK_OTPC_NS, "pclk_otpc_ns", "pclk_secure_ns", 0,
+ RK3576_SECURE_NS_CLKGATE_CON(0), 8, GFLAGS),
+ GATE(CLK_OTPC_NS, "clk_otpc_ns", "xin24m", 0,
+ RK3576_SECURE_NS_CLKGATE_CON(0), 9, GFLAGS),
+ GATE(ACLK_CRYPTO_NS, "aclk_crypto_ns", "aclk_secure_s", 0,
+ RK3576_NON_SECURE_GATING_CON00, 14, GFLAGS),
+ GATE(HCLK_TRNG_NS, "hclk_trng_ns", "hclk_secure_s", 0,
+ RK3576_NON_SECURE_GATING_CON00, 13, GFLAGS),
+ GATE(CLK_PKA_CRYPTO_NS, "clk_pka_crypto_ns", "clk_pka_crypto_s", 0,
+ RK3576_NON_SECURE_GATING_CON00, 1, GFLAGS),
+
+ /* io */
+ GATE(CLK_VICAP_I0CLK, "clk_vicap_i0clk", "clk_csihost0_clkdata_i", 0,
+ RK3576_CLKGATE_CON(59), 1, GFLAGS),
+ GATE(CLK_VICAP_I1CLK, "clk_vicap_i1clk", "clk_csihost1_clkdata_i", 0,
+ RK3576_CLKGATE_CON(59), 2, GFLAGS),
+ GATE(CLK_VICAP_I2CLK, "clk_vicap_i2clk", "clk_csihost2_clkdata_i", 0,
+ RK3576_CLKGATE_CON(59), 3, GFLAGS),
+ GATE(CLK_VICAP_I3CLK, "clk_vicap_i3clk", "clk_csihost3_clkdata_i", 0,
+ RK3576_CLKGATE_CON(59), 4, GFLAGS),
+ GATE(CLK_VICAP_I4CLK, "clk_vicap_i4clk", "clk_csihost4_clkdata_i", 0,
+ RK3576_CLKGATE_CON(59), 5, GFLAGS),
+};
+
+static void __init rk3576_clk_init(struct device_node *np)
+{
+ struct rockchip_clk_provider *ctx;
+ unsigned long clk_nr_clks;
+ void __iomem *reg_base;
+ struct regmap *grf;
+
+ clk_nr_clks = rockchip_clk_find_max_clk_id(rk3576_clk_branches,
+ ARRAY_SIZE(rk3576_clk_branches)) + 1;
+
+ grf = syscon_regmap_lookup_by_compatible("rockchip,rk3576-pmu0-grf");
+ if (IS_ERR(grf)) {
+ pr_err("%s: could not get PMU0 GRF syscon\n", __func__);
+ return;
+ }
+
+ reg_base = of_iomap(np, 0);
+ if (!reg_base) {
+ pr_err("%s: could not map cru region\n", __func__);
+ return;
+ }
+
+ ctx = rockchip_clk_init(np, reg_base, clk_nr_clks);
+ if (IS_ERR(ctx)) {
+ pr_err("%s: rockchip clk init failed\n", __func__);
+ iounmap(reg_base);
+ return;
+ }
+
+ ctx->grf = grf;
+
+ rockchip_clk_register_plls(ctx, rk3576_pll_clks,
+ ARRAY_SIZE(rk3576_pll_clks),
+ RK3576_GRF_SOC_STATUS0);
+
+ rockchip_clk_register_armclk(ctx, ARMCLK_L, "armclk_l",
+ mux_armclkl_p, ARRAY_SIZE(mux_armclkl_p),
+ &rk3576_cpulclk_data, rk3576_cpulclk_rates,
+ ARRAY_SIZE(rk3576_cpulclk_rates));
+ rockchip_clk_register_armclk(ctx, ARMCLK_B, "armclk_b",
+ mux_armclkb_p, ARRAY_SIZE(mux_armclkb_p),
+ &rk3576_cpubclk_data, rk3576_cpubclk_rates,
+ ARRAY_SIZE(rk3576_cpubclk_rates));
+
+ rockchip_clk_register_branches(ctx, rk3576_clk_branches,
+ ARRAY_SIZE(rk3576_clk_branches));
+
+ rk3576_rst_init(np, reg_base);
+
+ rockchip_register_restart_notifier(ctx, RK3576_GLB_SRST_FST, NULL);
+
+ rockchip_clk_of_add_provider(np, ctx);
+}
+
+CLK_OF_DECLARE(rk3576_cru, "rockchip,rk3576-cru", rk3576_clk_init);
+
+struct clk_rk3576_inits {
+ void (*inits)(struct device_node *np);
+};
+
+static const struct clk_rk3576_inits clk_rk3576_cru_init = {
+ .inits = rk3576_clk_init,
+};
+
+static const struct of_device_id clk_rk3576_match_table[] = {
+ {
+ .compatible = "rockchip,rk3576-cru",
+ .data = &clk_rk3576_cru_init,
+ },
+ { }
+};
+
+static int clk_rk3576_probe(struct platform_device *pdev)
+{
+ const struct clk_rk3576_inits *init_data;
+ struct device *dev = &pdev->dev;
+
+ init_data = device_get_match_data(dev);
+ if (!init_data)
+ return -EINVAL;
+
+ if (init_data->inits)
+ init_data->inits(dev->of_node);
+
+ return 0;
+}
+
+static struct platform_driver clk_rk3576_driver = {
+ .probe = clk_rk3576_probe,
+ .driver = {
+ .name = "clk-rk3576",
+ .of_match_table = clk_rk3576_match_table,
+ .suppress_bind_attrs = true,
+ },
+};
+builtin_platform_driver_probe(clk_rk3576_driver, clk_rk3576_probe);
diff --git a/drivers/clk/rockchip/clk-rk3588.c b/drivers/clk/rockchip/clk-rk3588.c
index b30279a96dc8..0ffaf639f807 100644
--- a/drivers/clk/rockchip/clk-rk3588.c
+++ b/drivers/clk/rockchip/clk-rk3588.c
@@ -526,7 +526,7 @@ PNAME(pmu_200m_100m_p) = { "clk_pmu1_200m_src", "clk_pmu1_100m_src" };
PNAME(pmu_300m_24m_p) = { "clk_300m_src", "xin24m" };
PNAME(pmu_400m_24m_p) = { "clk_400m_src", "xin24m" };
PNAME(pmu_100m_50m_24m_src_p) = { "clk_pmu1_100m_src", "clk_pmu1_50m_src", "xin24m" };
-PNAME(pmu_24m_32k_100m_src_p) = { "xin24m", "32k", "clk_pmu1_100m_src" };
+PNAME(pmu_24m_32k_100m_src_p) = { "xin24m", "xin32k", "clk_pmu1_100m_src" };
PNAME(hclk_pmu1_root_p) = { "clk_pmu1_200m_src", "clk_pmu1_100m_src", "clk_pmu1_50m_src", "xin24m" };
PNAME(hclk_pmu_cm0_root_p) = { "clk_pmu1_400m_src", "clk_pmu1_200m_src", "clk_pmu1_100m_src", "xin24m" };
PNAME(mclk_pdm0_p) = { "clk_pmu1_300m_src", "clk_pmu1_200m_src" };
@@ -2502,43 +2502,3 @@ static void __init rk3588_clk_init(struct device_node *np)
}
CLK_OF_DECLARE(rk3588_cru, "rockchip,rk3588-cru", rk3588_clk_init);
-
-struct clk_rk3588_inits {
- void (*inits)(struct device_node *np);
-};
-
-static const struct clk_rk3588_inits clk_3588_cru_init = {
- .inits = rk3588_clk_init,
-};
-
-static const struct of_device_id clk_rk3588_match_table[] = {
- {
- .compatible = "rockchip,rk3588-cru",
- .data = &clk_3588_cru_init,
- },
- { }
-};
-
-static int __init clk_rk3588_probe(struct platform_device *pdev)
-{
- const struct clk_rk3588_inits *init_data;
- struct device *dev = &pdev->dev;
-
- init_data = device_get_match_data(dev);
- if (!init_data)
- return -EINVAL;
-
- if (init_data->inits)
- init_data->inits(dev->of_node);
-
- return 0;
-}
-
-static struct platform_driver clk_rk3588_driver = {
- .driver = {
- .name = "clk-rk3588",
- .of_match_table = clk_rk3588_match_table,
- .suppress_bind_attrs = true,
- },
-};
-builtin_platform_driver_probe(clk_rk3588_driver, clk_rk3588_probe);
diff --git a/drivers/clk/rockchip/clk.c b/drivers/clk/rockchip/clk.c
index 73d2cbdc716b..2fa7253c73b2 100644
--- a/drivers/clk/rockchip/clk.c
+++ b/drivers/clk/rockchip/clk.c
@@ -450,12 +450,13 @@ void rockchip_clk_register_branches(struct rockchip_clk_provider *ctx,
struct rockchip_clk_branch *list,
unsigned int nr_clk)
{
- struct clk *clk = NULL;
+ struct clk *clk;
unsigned int idx;
unsigned long flags;
for (idx = 0; idx < nr_clk; idx++, list++) {
flags = list->flags;
+ clk = NULL;
/* catch simple muxes */
switch (list->branch_type) {
diff --git a/drivers/clk/rockchip/clk.h b/drivers/clk/rockchip/clk.h
index fd3b476dedda..f1957e1c1178 100644
--- a/drivers/clk/rockchip/clk.h
+++ b/drivers/clk/rockchip/clk.h
@@ -235,6 +235,58 @@ struct clk;
#define RK3568_PMU_CLKGATE_CON(x) ((x) * 0x4 + 0x180)
#define RK3568_PMU_SOFTRST_CON(x) ((x) * 0x4 + 0x200)
+#define RK3576_PHP_CRU_BASE 0x8000
+#define RK3576_SECURE_NS_CRU_BASE 0x10000
+#define RK3576_PMU_CRU_BASE 0x20000
+#define RK3576_BIGCORE_CRU_BASE 0x38000
+#define RK3576_LITCORE_CRU_BASE 0x40000
+#define RK3576_CCI_CRU_BASE 0x48000
+
+#define RK3576_PLL_CON(x) RK2928_PLL_CON(x)
+#define RK3576_MODE_CON0 0x280
+#define RK3576_BPLL_MODE_CON0 (RK3576_BIGCORE_CRU_BASE + 0x280)
+#define RK3576_LPLL_MODE_CON0 (RK3576_LITCORE_CRU_BASE + 0x280)
+#define RK3576_PPLL_MODE_CON0 (RK3576_PHP_CRU_BASE + 0x280)
+#define RK3576_CLKSEL_CON(x) ((x) * 0x4 + 0x300)
+#define RK3576_CLKGATE_CON(x) ((x) * 0x4 + 0x800)
+#define RK3576_SOFTRST_CON(x) ((x) * 0x4 + 0xa00)
+#define RK3576_GLB_CNT_TH 0xc00
+#define RK3576_GLB_SRST_FST 0xc08
+#define RK3576_GLB_SRST_SND 0xc0c
+#define RK3576_GLB_RST_CON 0xc10
+#define RK3576_GLB_RST_ST 0xc04
+#define RK3576_SDIO_CON0 0xC24
+#define RK3576_SDIO_CON1 0xC28
+#define RK3576_SDMMC_CON0 0xC30
+#define RK3576_SDMMC_CON1 0xC34
+
+#define RK3576_PHP_CLKSEL_CON(x) ((x) * 0x4 + RK3576_PHP_CRU_BASE + 0x300)
+#define RK3576_PHP_CLKGATE_CON(x) ((x) * 0x4 + RK3576_PHP_CRU_BASE + 0x800)
+#define RK3576_PHP_SOFTRST_CON(x) ((x) * 0x4 + RK3576_PHP_CRU_BASE + 0xa00)
+
+#define RK3576_PMU_PLL_CON(x) ((x) * 0x4 + RK3576_PHP_CRU_BASE)
+#define RK3576_PMU_CLKSEL_CON(x) ((x) * 0x4 + RK3576_PMU_CRU_BASE + 0x300)
+#define RK3576_PMU_CLKGATE_CON(x) ((x) * 0x4 + RK3576_PMU_CRU_BASE + 0x800)
+#define RK3576_PMU_SOFTRST_CON(x) ((x) * 0x4 + RK3576_PMU_CRU_BASE + 0xa00)
+
+#define RK3576_SECURE_NS_CLKSEL_CON(x) ((x) * 0x4 + RK3576_SECURE_NS_CRU_BASE + 0x300)
+#define RK3576_SECURE_NS_CLKGATE_CON(x) ((x) * 0x4 + RK3576_SECURE_NS_CRU_BASE + 0x800)
+#define RK3576_SECURE_NS_SOFTRST_CON(x) ((x) * 0x4 + RK3576_SECURE_NS_CRU_BASE + 0xa00)
+
+#define RK3576_CCI_CLKSEL_CON(x) ((x) * 0x4 + RK3576_CCI_CRU_BASE + 0x300)
+#define RK3576_CCI_CLKGATE_CON(x) ((x) * 0x4 + RK3576_CCI_CRU_BASE + 0x800)
+#define RK3576_CCI_SOFTRST_CON(x) ((x) * 0x4 + RK3576_CCI_CRU_BASE + 0xa00)
+
+#define RK3576_BPLL_CON(x) ((x) * 0x4 + RK3576_BIGCORE_CRU_BASE)
+#define RK3576_BIGCORE_CLKSEL_CON(x) ((x) * 0x4 + RK3576_BIGCORE_CRU_BASE + 0x300)
+#define RK3576_BIGCORE_CLKGATE_CON(x) ((x) * 0x4 + RK3576_BIGCORE_CRU_BASE + 0x800)
+#define RK3576_BIGCORE_SOFTRST_CON(x) ((x) * 0x4 + RK3576_BIGCORE_CRU_BASE + 0xa00)
+#define RK3576_LPLL_CON(x) ((x) * 0x4 + RK3576_CCI_CRU_BASE)
+#define RK3576_LITCORE_CLKSEL_CON(x) ((x) * 0x4 + RK3576_LITCORE_CRU_BASE + 0x300)
+#define RK3576_LITCORE_CLKGATE_CON(x) ((x) * 0x4 + RK3576_LITCORE_CRU_BASE + 0x800)
+#define RK3576_LITCORE_SOFTRST_CON(x) ((x) * 0x4 + RK3576_LITCORE_CRU_BASE + 0xa00)
+#define RK3576_NON_SECURE_GATING_CON00 0xc48
+
#define RK3588_PHP_CRU_BASE 0x8000
#define RK3588_PMU_CRU_BASE 0x30000
#define RK3588_BIGCORE0_CRU_BASE 0x50000
@@ -287,6 +339,7 @@ enum rockchip_pll_type {
pll_rk3399,
pll_rk3588,
pll_rk3588_core,
+ pll_rk3588_ddr,
};
#define RK3036_PLL_RATE(_rate, _refdiv, _fbdiv, _postdiv1, \
@@ -1025,6 +1078,7 @@ static inline void rockchip_register_softrst(struct device_node *np,
return rockchip_register_softrst_lut(np, NULL, num_regs, base, flags);
}
+void rk3576_rst_init(struct device_node *np, void __iomem *reg_base);
void rk3588_rst_init(struct device_node *np, void __iomem *reg_base);
#endif
diff --git a/drivers/clk/rockchip/rst-rk3576.c b/drivers/clk/rockchip/rst-rk3576.c
new file mode 100644
index 000000000000..15cbb9bc0a41
--- /dev/null
+++ b/drivers/clk/rockchip/rst-rk3576.c
@@ -0,0 +1,651 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * Copyright (c) 2021 Rockchip Electronics Co., Ltd.
+ * Copyright (c) 2024 Collabora Ltd.
+ * Author: Detlev Casanova <detlev.casanova@collabora.com>
+ * Based on Sebastien Reichel's implementation for RK3588
+ */
+
+#include <linux/module.h>
+#include <linux/of.h>
+#include <dt-bindings/reset/rockchip,rk3576-cru.h>
+#include "clk.h"
+
+/* 0x27200000 + 0x0A00 */
+#define RK3576_CRU_RESET_OFFSET(id, reg, bit) [id] = (0 + reg * 16 + bit)
+/* 0x27208000 + 0x0A00 */
+#define RK3576_PHPCRU_RESET_OFFSET(id, reg, bit) [id] = (0x8000*4 + reg * 16 + bit)
+/* 0x27210000 + 0x0A00 */
+#define RK3576_SECURENSCRU_RESET_OFFSET(id, reg, bit) [id] = (0x10000*4 + reg * 16 + bit)
+/* 0x27220000 + 0x0A00 */
+#define RK3576_PMU1CRU_RESET_OFFSET(id, reg, bit) [id] = (0x20000*4 + reg * 16 + bit)
+
+/* mapping table for reset ID to register offset */
+static const int rk3576_register_offset[] = {
+ /* SOFTRST_CON01 */
+ RK3576_CRU_RESET_OFFSET(SRST_A_TOP_BIU, 1, 3),
+ RK3576_CRU_RESET_OFFSET(SRST_P_TOP_BIU, 1, 5),
+ RK3576_CRU_RESET_OFFSET(SRST_A_TOP_MID_BIU, 1, 6),
+ RK3576_CRU_RESET_OFFSET(SRST_A_SECURE_HIGH_BIU, 1, 7),
+ RK3576_CRU_RESET_OFFSET(SRST_H_TOP_BIU, 1, 14),
+
+ /* SOFTRST_CON02 */
+ RK3576_CRU_RESET_OFFSET(SRST_H_VO0VOP_CHANNEL_BIU, 2, 0),
+ RK3576_CRU_RESET_OFFSET(SRST_A_VO0VOP_CHANNEL_BIU, 2, 1),
+
+ /* SOFTRST_CON06 */
+ RK3576_CRU_RESET_OFFSET(SRST_BISRINTF, 6, 2),
+
+ /* SOFTRST_CON07 */
+ RK3576_CRU_RESET_OFFSET(SRST_H_AUDIO_BIU, 7, 2),
+ RK3576_CRU_RESET_OFFSET(SRST_H_ASRC_2CH_0, 7, 3),
+ RK3576_CRU_RESET_OFFSET(SRST_H_ASRC_2CH_1, 7, 4),
+ RK3576_CRU_RESET_OFFSET(SRST_H_ASRC_4CH_0, 7, 5),
+ RK3576_CRU_RESET_OFFSET(SRST_H_ASRC_4CH_1, 7, 6),
+ RK3576_CRU_RESET_OFFSET(SRST_ASRC_2CH_0, 7, 7),
+ RK3576_CRU_RESET_OFFSET(SRST_ASRC_2CH_1, 7, 8),
+ RK3576_CRU_RESET_OFFSET(SRST_ASRC_4CH_0, 7, 9),
+ RK3576_CRU_RESET_OFFSET(SRST_ASRC_4CH_1, 7, 10),
+ RK3576_CRU_RESET_OFFSET(SRST_M_SAI0_8CH, 7, 12),
+ RK3576_CRU_RESET_OFFSET(SRST_H_SAI0_8CH, 7, 13),
+ RK3576_CRU_RESET_OFFSET(SRST_H_SPDIF_RX0, 7, 14),
+ RK3576_CRU_RESET_OFFSET(SRST_M_SPDIF_RX0, 7, 15),
+
+ /* SOFTRST_CON08 */
+ RK3576_CRU_RESET_OFFSET(SRST_H_SPDIF_RX1, 8, 0),
+ RK3576_CRU_RESET_OFFSET(SRST_M_SPDIF_RX1, 8, 1),
+ RK3576_CRU_RESET_OFFSET(SRST_M_SAI1_8CH, 8, 5),
+ RK3576_CRU_RESET_OFFSET(SRST_H_SAI1_8CH, 8, 6),
+ RK3576_CRU_RESET_OFFSET(SRST_M_SAI2_2CH, 8, 8),
+ RK3576_CRU_RESET_OFFSET(SRST_H_SAI2_2CH, 8, 10),
+ RK3576_CRU_RESET_OFFSET(SRST_M_SAI3_2CH, 8, 12),
+ RK3576_CRU_RESET_OFFSET(SRST_H_SAI3_2CH, 8, 14),
+
+ /* SOFTRST_CON09 */
+ RK3576_CRU_RESET_OFFSET(SRST_M_SAI4_2CH, 9, 0),
+ RK3576_CRU_RESET_OFFSET(SRST_H_SAI4_2CH, 9, 2),
+ RK3576_CRU_RESET_OFFSET(SRST_H_ACDCDIG_DSM, 9, 3),
+ RK3576_CRU_RESET_OFFSET(SRST_M_ACDCDIG_DSM, 9, 4),
+ RK3576_CRU_RESET_OFFSET(SRST_PDM1, 9, 5),
+ RK3576_CRU_RESET_OFFSET(SRST_H_PDM1, 9, 7),
+ RK3576_CRU_RESET_OFFSET(SRST_M_PDM1, 9, 8),
+ RK3576_CRU_RESET_OFFSET(SRST_H_SPDIF_TX0, 9, 9),
+ RK3576_CRU_RESET_OFFSET(SRST_M_SPDIF_TX0, 9, 10),
+ RK3576_CRU_RESET_OFFSET(SRST_H_SPDIF_TX1, 9, 11),
+ RK3576_CRU_RESET_OFFSET(SRST_M_SPDIF_TX1, 9, 12),
+
+ /* SOFTRST_CON11 */
+ RK3576_CRU_RESET_OFFSET(SRST_A_BUS_BIU, 11, 3),
+ RK3576_CRU_RESET_OFFSET(SRST_P_BUS_BIU, 11, 4),
+ RK3576_CRU_RESET_OFFSET(SRST_P_CRU, 11, 5),
+ RK3576_CRU_RESET_OFFSET(SRST_H_CAN0, 11, 6),
+ RK3576_CRU_RESET_OFFSET(SRST_CAN0, 11, 7),
+ RK3576_CRU_RESET_OFFSET(SRST_H_CAN1, 11, 8),
+ RK3576_CRU_RESET_OFFSET(SRST_CAN1, 11, 9),
+ RK3576_CRU_RESET_OFFSET(SRST_P_INTMUX2BUS, 11, 12),
+ RK3576_CRU_RESET_OFFSET(SRST_P_VCCIO_IOC, 11, 13),
+ RK3576_CRU_RESET_OFFSET(SRST_H_BUS_BIU, 11, 14),
+ RK3576_CRU_RESET_OFFSET(SRST_KEY_SHIFT, 11, 15),
+
+ /* SOFTRST_CON12 */
+ RK3576_CRU_RESET_OFFSET(SRST_P_I2C1, 12, 0),
+ RK3576_CRU_RESET_OFFSET(SRST_P_I2C2, 12, 1),
+ RK3576_CRU_RESET_OFFSET(SRST_P_I2C3, 12, 2),
+ RK3576_CRU_RESET_OFFSET(SRST_P_I2C4, 12, 3),
+ RK3576_CRU_RESET_OFFSET(SRST_P_I2C5, 12, 4),
+ RK3576_CRU_RESET_OFFSET(SRST_P_I2C6, 12, 5),
+ RK3576_CRU_RESET_OFFSET(SRST_P_I2C7, 12, 6),
+ RK3576_CRU_RESET_OFFSET(SRST_P_I2C8, 12, 7),
+ RK3576_CRU_RESET_OFFSET(SRST_P_I2C9, 12, 8),
+ RK3576_CRU_RESET_OFFSET(SRST_P_WDT_BUSMCU, 12, 9),
+ RK3576_CRU_RESET_OFFSET(SRST_T_WDT_BUSMCU, 12, 10),
+ RK3576_CRU_RESET_OFFSET(SRST_A_GIC, 12, 11),
+ RK3576_CRU_RESET_OFFSET(SRST_I2C1, 12, 12),
+ RK3576_CRU_RESET_OFFSET(SRST_I2C2, 12, 13),
+ RK3576_CRU_RESET_OFFSET(SRST_I2C3, 12, 14),
+ RK3576_CRU_RESET_OFFSET(SRST_I2C4, 12, 15),
+
+ /* SOFTRST_CON13 */
+ RK3576_CRU_RESET_OFFSET(SRST_I2C5, 13, 0),
+ RK3576_CRU_RESET_OFFSET(SRST_I2C6, 13, 1),
+ RK3576_CRU_RESET_OFFSET(SRST_I2C7, 13, 2),
+ RK3576_CRU_RESET_OFFSET(SRST_I2C8, 13, 3),
+ RK3576_CRU_RESET_OFFSET(SRST_I2C9, 13, 4),
+ RK3576_CRU_RESET_OFFSET(SRST_P_SARADC, 13, 6),
+ RK3576_CRU_RESET_OFFSET(SRST_SARADC, 13, 7),
+ RK3576_CRU_RESET_OFFSET(SRST_P_TSADC, 13, 8),
+ RK3576_CRU_RESET_OFFSET(SRST_TSADC, 13, 9),
+ RK3576_CRU_RESET_OFFSET(SRST_P_UART0, 13, 10),
+ RK3576_CRU_RESET_OFFSET(SRST_P_UART2, 13, 11),
+ RK3576_CRU_RESET_OFFSET(SRST_P_UART3, 13, 12),
+ RK3576_CRU_RESET_OFFSET(SRST_P_UART4, 13, 13),
+ RK3576_CRU_RESET_OFFSET(SRST_P_UART5, 13, 14),
+ RK3576_CRU_RESET_OFFSET(SRST_P_UART6, 13, 15),
+
+ /* SOFTRST_CON14 */
+ RK3576_CRU_RESET_OFFSET(SRST_P_UART7, 14, 0),
+ RK3576_CRU_RESET_OFFSET(SRST_P_UART8, 14, 1),
+ RK3576_CRU_RESET_OFFSET(SRST_P_UART9, 14, 2),
+ RK3576_CRU_RESET_OFFSET(SRST_P_UART10, 14, 3),
+ RK3576_CRU_RESET_OFFSET(SRST_P_UART11, 14, 4),
+ RK3576_CRU_RESET_OFFSET(SRST_S_UART0, 14, 5),
+ RK3576_CRU_RESET_OFFSET(SRST_S_UART2, 14, 6),
+ RK3576_CRU_RESET_OFFSET(SRST_S_UART3, 14, 9),
+ RK3576_CRU_RESET_OFFSET(SRST_S_UART4, 14, 12),
+ RK3576_CRU_RESET_OFFSET(SRST_S_UART5, 14, 15),
+
+ /* SOFTRST_CON15 */
+ RK3576_CRU_RESET_OFFSET(SRST_S_UART6, 15, 2),
+ RK3576_CRU_RESET_OFFSET(SRST_S_UART7, 15, 5),
+ RK3576_CRU_RESET_OFFSET(SRST_S_UART8, 15, 8),
+ RK3576_CRU_RESET_OFFSET(SRST_S_UART9, 15, 9),
+ RK3576_CRU_RESET_OFFSET(SRST_S_UART10, 15, 10),
+ RK3576_CRU_RESET_OFFSET(SRST_S_UART11, 15, 11),
+ RK3576_CRU_RESET_OFFSET(SRST_P_SPI0, 15, 13),
+ RK3576_CRU_RESET_OFFSET(SRST_P_SPI1, 15, 14),
+ RK3576_CRU_RESET_OFFSET(SRST_P_SPI2, 15, 15),
+
+ /* SOFTRST_CON16 */
+ RK3576_CRU_RESET_OFFSET(SRST_P_SPI3, 16, 0),
+ RK3576_CRU_RESET_OFFSET(SRST_P_SPI4, 16, 1),
+ RK3576_CRU_RESET_OFFSET(SRST_SPI0, 16, 2),
+ RK3576_CRU_RESET_OFFSET(SRST_SPI1, 16, 3),
+ RK3576_CRU_RESET_OFFSET(SRST_SPI2, 16, 4),
+ RK3576_CRU_RESET_OFFSET(SRST_SPI3, 16, 5),
+ RK3576_CRU_RESET_OFFSET(SRST_SPI4, 16, 6),
+ RK3576_CRU_RESET_OFFSET(SRST_P_WDT0, 16, 7),
+ RK3576_CRU_RESET_OFFSET(SRST_T_WDT0, 16, 8),
+ RK3576_CRU_RESET_OFFSET(SRST_P_SYS_GRF, 16, 9),
+ RK3576_CRU_RESET_OFFSET(SRST_P_PWM1, 16, 10),
+ RK3576_CRU_RESET_OFFSET(SRST_PWM1, 16, 11),
+
+ /* SOFTRST_CON17 */
+ RK3576_CRU_RESET_OFFSET(SRST_P_BUSTIMER0, 17, 3),
+ RK3576_CRU_RESET_OFFSET(SRST_P_BUSTIMER1, 17, 4),
+ RK3576_CRU_RESET_OFFSET(SRST_TIMER0, 17, 6),
+ RK3576_CRU_RESET_OFFSET(SRST_TIMER1, 17, 7),
+ RK3576_CRU_RESET_OFFSET(SRST_TIMER2, 17, 8),
+ RK3576_CRU_RESET_OFFSET(SRST_TIMER3, 17, 9),
+ RK3576_CRU_RESET_OFFSET(SRST_TIMER4, 17, 10),
+ RK3576_CRU_RESET_OFFSET(SRST_TIMER5, 17, 11),
+ RK3576_CRU_RESET_OFFSET(SRST_P_BUSIOC, 17, 12),
+ RK3576_CRU_RESET_OFFSET(SRST_P_MAILBOX0, 17, 13),
+ RK3576_CRU_RESET_OFFSET(SRST_P_GPIO1, 17, 15),
+
+ /* SOFTRST_CON18 */
+ RK3576_CRU_RESET_OFFSET(SRST_GPIO1, 18, 0),
+ RK3576_CRU_RESET_OFFSET(SRST_P_GPIO2, 18, 1),
+ RK3576_CRU_RESET_OFFSET(SRST_GPIO2, 18, 2),
+ RK3576_CRU_RESET_OFFSET(SRST_P_GPIO3, 18, 3),
+ RK3576_CRU_RESET_OFFSET(SRST_GPIO3, 18, 4),
+ RK3576_CRU_RESET_OFFSET(SRST_P_GPIO4, 18, 5),
+ RK3576_CRU_RESET_OFFSET(SRST_GPIO4, 18, 6),
+ RK3576_CRU_RESET_OFFSET(SRST_A_DECOM, 18, 7),
+ RK3576_CRU_RESET_OFFSET(SRST_P_DECOM, 18, 8),
+ RK3576_CRU_RESET_OFFSET(SRST_D_DECOM, 18, 9),
+ RK3576_CRU_RESET_OFFSET(SRST_TIMER6, 18, 11),
+ RK3576_CRU_RESET_OFFSET(SRST_TIMER7, 18, 12),
+ RK3576_CRU_RESET_OFFSET(SRST_TIMER8, 18, 13),
+ RK3576_CRU_RESET_OFFSET(SRST_TIMER9, 18, 14),
+ RK3576_CRU_RESET_OFFSET(SRST_TIMER10, 18, 15),
+
+ /* SOFTRST_CON19 */
+ RK3576_CRU_RESET_OFFSET(SRST_TIMER11, 19, 0),
+ RK3576_CRU_RESET_OFFSET(SRST_A_DMAC0, 19, 1),
+ RK3576_CRU_RESET_OFFSET(SRST_A_DMAC1, 19, 2),
+ RK3576_CRU_RESET_OFFSET(SRST_A_DMAC2, 19, 3),
+ RK3576_CRU_RESET_OFFSET(SRST_A_SPINLOCK, 19, 4),
+ RK3576_CRU_RESET_OFFSET(SRST_REF_PVTPLL_BUS, 19, 5),
+ RK3576_CRU_RESET_OFFSET(SRST_H_I3C0, 19, 7),
+ RK3576_CRU_RESET_OFFSET(SRST_H_I3C1, 19, 9),
+ RK3576_CRU_RESET_OFFSET(SRST_H_BUS_CM0_BIU, 19, 11),
+ RK3576_CRU_RESET_OFFSET(SRST_F_BUS_CM0_CORE, 19, 12),
+ RK3576_CRU_RESET_OFFSET(SRST_T_BUS_CM0_JTAG, 19, 13),
+
+ /* SOFTRST_CON20 */
+ RK3576_CRU_RESET_OFFSET(SRST_P_INTMUX2PMU, 20, 0),
+ RK3576_CRU_RESET_OFFSET(SRST_P_INTMUX2DDR, 20, 1),
+ RK3576_CRU_RESET_OFFSET(SRST_P_PVTPLL_BUS, 20, 3),
+ RK3576_CRU_RESET_OFFSET(SRST_P_PWM2, 20, 4),
+ RK3576_CRU_RESET_OFFSET(SRST_PWM2, 20, 5),
+ RK3576_CRU_RESET_OFFSET(SRST_FREQ_PWM1, 20, 8),
+ RK3576_CRU_RESET_OFFSET(SRST_COUNTER_PWM1, 20, 9),
+ RK3576_CRU_RESET_OFFSET(SRST_I3C0, 20, 12),
+ RK3576_CRU_RESET_OFFSET(SRST_I3C1, 20, 13),
+
+ /* SOFTRST_CON21 */
+ RK3576_CRU_RESET_OFFSET(SRST_P_DDR_MON_CH0, 21, 1),
+ RK3576_CRU_RESET_OFFSET(SRST_P_DDR_BIU, 21, 2),
+ RK3576_CRU_RESET_OFFSET(SRST_P_DDR_UPCTL_CH0, 21, 3),
+ RK3576_CRU_RESET_OFFSET(SRST_TM_DDR_MON_CH0, 21, 4),
+ RK3576_CRU_RESET_OFFSET(SRST_A_DDR_BIU, 21, 5),
+ RK3576_CRU_RESET_OFFSET(SRST_DFI_CH0, 21, 6),
+ RK3576_CRU_RESET_OFFSET(SRST_DDR_MON_CH0, 21, 10),
+ RK3576_CRU_RESET_OFFSET(SRST_P_DDR_HWLP_CH0, 21, 13),
+ RK3576_CRU_RESET_OFFSET(SRST_P_DDR_MON_CH1, 21, 14),
+ RK3576_CRU_RESET_OFFSET(SRST_P_DDR_HWLP_CH1, 21, 15),
+
+ /* SOFTRST_CON22 */
+ RK3576_CRU_RESET_OFFSET(SRST_P_DDR_UPCTL_CH1, 22, 0),
+ RK3576_CRU_RESET_OFFSET(SRST_TM_DDR_MON_CH1, 22, 1),
+ RK3576_CRU_RESET_OFFSET(SRST_DFI_CH1, 22, 2),
+ RK3576_CRU_RESET_OFFSET(SRST_A_DDR01_MSCH0, 22, 3),
+ RK3576_CRU_RESET_OFFSET(SRST_A_DDR01_MSCH1, 22, 4),
+ RK3576_CRU_RESET_OFFSET(SRST_DDR_MON_CH1, 22, 6),
+ RK3576_CRU_RESET_OFFSET(SRST_DDR_SCRAMBLE_CH0, 22, 9),
+ RK3576_CRU_RESET_OFFSET(SRST_DDR_SCRAMBLE_CH1, 22, 10),
+ RK3576_CRU_RESET_OFFSET(SRST_P_AHB2APB, 22, 12),
+ RK3576_CRU_RESET_OFFSET(SRST_H_AHB2APB, 22, 13),
+ RK3576_CRU_RESET_OFFSET(SRST_H_DDR_BIU, 22, 14),
+ RK3576_CRU_RESET_OFFSET(SRST_F_DDR_CM0_CORE, 22, 15),
+
+ /* SOFTRST_CON23 */
+ RK3576_CRU_RESET_OFFSET(SRST_P_DDR01_MSCH0, 23, 1),
+ RK3576_CRU_RESET_OFFSET(SRST_P_DDR01_MSCH1, 23, 2),
+ RK3576_CRU_RESET_OFFSET(SRST_DDR_TIMER0, 23, 4),
+ RK3576_CRU_RESET_OFFSET(SRST_DDR_TIMER1, 23, 5),
+ RK3576_CRU_RESET_OFFSET(SRST_T_WDT_DDR, 23, 6),
+ RK3576_CRU_RESET_OFFSET(SRST_P_WDT, 23, 7),
+ RK3576_CRU_RESET_OFFSET(SRST_P_TIMER, 23, 8),
+ RK3576_CRU_RESET_OFFSET(SRST_T_DDR_CM0_JTAG, 23, 9),
+ RK3576_CRU_RESET_OFFSET(SRST_P_DDR_GRF, 23, 11),
+
+ /* SOFTRST_CON25 */
+ RK3576_CRU_RESET_OFFSET(SRST_DDR_UPCTL_CH0, 25, 1),
+ RK3576_CRU_RESET_OFFSET(SRST_A_DDR_UPCTL_0_CH0, 25, 2),
+ RK3576_CRU_RESET_OFFSET(SRST_A_DDR_UPCTL_1_CH0, 25, 3),
+ RK3576_CRU_RESET_OFFSET(SRST_A_DDR_UPCTL_2_CH0, 25, 4),
+ RK3576_CRU_RESET_OFFSET(SRST_A_DDR_UPCTL_3_CH0, 25, 5),
+ RK3576_CRU_RESET_OFFSET(SRST_A_DDR_UPCTL_4_CH0, 25, 6),
+
+ /* SOFTRST_CON26 */
+ RK3576_CRU_RESET_OFFSET(SRST_DDR_UPCTL_CH1, 26, 1),
+ RK3576_CRU_RESET_OFFSET(SRST_A_DDR_UPCTL_0_CH1, 26, 2),
+ RK3576_CRU_RESET_OFFSET(SRST_A_DDR_UPCTL_1_CH1, 26, 3),
+ RK3576_CRU_RESET_OFFSET(SRST_A_DDR_UPCTL_2_CH1, 26, 4),
+ RK3576_CRU_RESET_OFFSET(SRST_A_DDR_UPCTL_3_CH1, 26, 5),
+ RK3576_CRU_RESET_OFFSET(SRST_A_DDR_UPCTL_4_CH1, 26, 6),
+
+ /* SOFTRST_CON27 */
+ RK3576_CRU_RESET_OFFSET(SRST_REF_PVTPLL_DDR, 27, 0),
+ RK3576_CRU_RESET_OFFSET(SRST_P_PVTPLL_DDR, 27, 1),
+
+ /* SOFTRST_CON28 */
+ RK3576_CRU_RESET_OFFSET(SRST_A_RKNN0, 28, 9),
+ RK3576_CRU_RESET_OFFSET(SRST_A_RKNN0_BIU, 28, 11),
+ RK3576_CRU_RESET_OFFSET(SRST_L_RKNN0_BIU, 28, 12),
+
+ /* SOFTRST_CON29 */
+ RK3576_CRU_RESET_OFFSET(SRST_A_RKNN1, 29, 0),
+ RK3576_CRU_RESET_OFFSET(SRST_A_RKNN1_BIU, 29, 2),
+ RK3576_CRU_RESET_OFFSET(SRST_L_RKNN1_BIU, 29, 3),
+
+ /* SOFTRST_CON31 */
+ RK3576_CRU_RESET_OFFSET(SRST_NPU_DAP, 31, 0),
+ RK3576_CRU_RESET_OFFSET(SRST_L_NPUSUBSYS_BIU, 31, 1),
+ RK3576_CRU_RESET_OFFSET(SRST_P_NPUTOP_BIU, 31, 9),
+ RK3576_CRU_RESET_OFFSET(SRST_P_NPU_TIMER, 31, 10),
+ RK3576_CRU_RESET_OFFSET(SRST_NPUTIMER0, 31, 12),
+ RK3576_CRU_RESET_OFFSET(SRST_NPUTIMER1, 31, 13),
+ RK3576_CRU_RESET_OFFSET(SRST_P_NPU_WDT, 31, 14),
+ RK3576_CRU_RESET_OFFSET(SRST_T_NPU_WDT, 31, 15),
+
+ /* SOFTRST_CON32 */
+ RK3576_CRU_RESET_OFFSET(SRST_A_RKNN_CBUF, 32, 0),
+ RK3576_CRU_RESET_OFFSET(SRST_A_RVCORE0, 32, 1),
+ RK3576_CRU_RESET_OFFSET(SRST_P_NPU_GRF, 32, 2),
+ RK3576_CRU_RESET_OFFSET(SRST_P_PVTPLL_NPU, 32, 3),
+ RK3576_CRU_RESET_OFFSET(SRST_NPU_PVTPLL, 32, 4),
+ RK3576_CRU_RESET_OFFSET(SRST_H_NPU_CM0_BIU, 32, 6),
+ RK3576_CRU_RESET_OFFSET(SRST_F_NPU_CM0_CORE, 32, 7),
+ RK3576_CRU_RESET_OFFSET(SRST_T_NPU_CM0_JTAG, 32, 8),
+ RK3576_CRU_RESET_OFFSET(SRST_A_RKNNTOP_BIU, 32, 11),
+ RK3576_CRU_RESET_OFFSET(SRST_H_RKNN_CBUF, 32, 12),
+ RK3576_CRU_RESET_OFFSET(SRST_H_RKNNTOP_BIU, 32, 13),
+
+ /* SOFTRST_CON33 */
+ RK3576_CRU_RESET_OFFSET(SRST_H_NVM_BIU, 33, 2),
+ RK3576_CRU_RESET_OFFSET(SRST_A_NVM_BIU, 33, 3),
+ RK3576_CRU_RESET_OFFSET(SRST_S_FSPI, 33, 6),
+ RK3576_CRU_RESET_OFFSET(SRST_H_FSPI, 33, 7),
+ RK3576_CRU_RESET_OFFSET(SRST_C_EMMC, 33, 8),
+ RK3576_CRU_RESET_OFFSET(SRST_H_EMMC, 33, 9),
+ RK3576_CRU_RESET_OFFSET(SRST_A_EMMC, 33, 10),
+ RK3576_CRU_RESET_OFFSET(SRST_B_EMMC, 33, 11),
+ RK3576_CRU_RESET_OFFSET(SRST_T_EMMC, 33, 12),
+
+ /* SOFTRST_CON34 */
+ RK3576_CRU_RESET_OFFSET(SRST_P_GRF, 34, 1),
+ RK3576_CRU_RESET_OFFSET(SRST_P_PHP_BIU, 34, 5),
+ RK3576_CRU_RESET_OFFSET(SRST_A_PHP_BIU, 34, 9),
+ RK3576_CRU_RESET_OFFSET(SRST_P_PCIE0, 34, 13),
+ RK3576_CRU_RESET_OFFSET(SRST_PCIE0_POWER_UP, 34, 15),
+
+ /* SOFTRST_CON35 */
+ RK3576_CRU_RESET_OFFSET(SRST_A_USB3OTG1, 35, 3),
+ RK3576_CRU_RESET_OFFSET(SRST_A_MMU0, 35, 11),
+ RK3576_CRU_RESET_OFFSET(SRST_A_SLV_MMU0, 35, 13),
+ RK3576_CRU_RESET_OFFSET(SRST_A_MMU1, 35, 14),
+
+ /* SOFTRST_CON36 */
+ RK3576_CRU_RESET_OFFSET(SRST_A_SLV_MMU1, 36, 0),
+ RK3576_CRU_RESET_OFFSET(SRST_P_PCIE1, 36, 7),
+ RK3576_CRU_RESET_OFFSET(SRST_PCIE1_POWER_UP, 36, 9),
+
+ /* SOFTRST_CON37 */
+ RK3576_CRU_RESET_OFFSET(SRST_RXOOB0, 37, 0),
+ RK3576_CRU_RESET_OFFSET(SRST_RXOOB1, 37, 1),
+ RK3576_CRU_RESET_OFFSET(SRST_PMALIVE0, 37, 2),
+ RK3576_CRU_RESET_OFFSET(SRST_PMALIVE1, 37, 3),
+ RK3576_CRU_RESET_OFFSET(SRST_A_SATA0, 37, 4),
+ RK3576_CRU_RESET_OFFSET(SRST_A_SATA1, 37, 5),
+ RK3576_CRU_RESET_OFFSET(SRST_ASIC1, 37, 6),
+ RK3576_CRU_RESET_OFFSET(SRST_ASIC0, 37, 7),
+
+ /* SOFTRST_CON40 */
+ RK3576_CRU_RESET_OFFSET(SRST_P_CSIDPHY1, 40, 2),
+ RK3576_CRU_RESET_OFFSET(SRST_SCAN_CSIDPHY1, 40, 3),
+
+ /* SOFTRST_CON42 */
+ RK3576_CRU_RESET_OFFSET(SRST_P_SDGMAC_GRF, 42, 3),
+ RK3576_CRU_RESET_OFFSET(SRST_P_SDGMAC_BIU, 42, 4),
+ RK3576_CRU_RESET_OFFSET(SRST_A_SDGMAC_BIU, 42, 5),
+ RK3576_CRU_RESET_OFFSET(SRST_H_SDGMAC_BIU, 42, 6),
+ RK3576_CRU_RESET_OFFSET(SRST_A_GMAC0, 42, 7),
+ RK3576_CRU_RESET_OFFSET(SRST_A_GMAC1, 42, 8),
+ RK3576_CRU_RESET_OFFSET(SRST_P_GMAC0, 42, 9),
+ RK3576_CRU_RESET_OFFSET(SRST_P_GMAC1, 42, 10),
+ RK3576_CRU_RESET_OFFSET(SRST_H_SDIO, 42, 12),
+
+ /* SOFTRST_CON43 */
+ RK3576_CRU_RESET_OFFSET(SRST_H_SDMMC0, 43, 2),
+ RK3576_CRU_RESET_OFFSET(SRST_S_FSPI1, 43, 3),
+ RK3576_CRU_RESET_OFFSET(SRST_H_FSPI1, 43, 4),
+ RK3576_CRU_RESET_OFFSET(SRST_A_DSMC_BIU, 43, 6),
+ RK3576_CRU_RESET_OFFSET(SRST_A_DSMC, 43, 7),
+ RK3576_CRU_RESET_OFFSET(SRST_P_DSMC, 43, 8),
+ RK3576_CRU_RESET_OFFSET(SRST_H_HSGPIO, 43, 10),
+ RK3576_CRU_RESET_OFFSET(SRST_HSGPIO, 43, 11),
+ RK3576_CRU_RESET_OFFSET(SRST_A_HSGPIO, 43, 13),
+
+ /* SOFTRST_CON45 */
+ RK3576_CRU_RESET_OFFSET(SRST_H_RKVDEC, 45, 3),
+ RK3576_CRU_RESET_OFFSET(SRST_H_RKVDEC_BIU, 45, 5),
+ RK3576_CRU_RESET_OFFSET(SRST_A_RKVDEC_BIU, 45, 6),
+ RK3576_CRU_RESET_OFFSET(SRST_RKVDEC_HEVC_CA, 45, 8),
+ RK3576_CRU_RESET_OFFSET(SRST_RKVDEC_CORE, 45, 9),
+
+ /* SOFTRST_CON47 */
+ RK3576_CRU_RESET_OFFSET(SRST_A_USB_BIU, 47, 3),
+ RK3576_CRU_RESET_OFFSET(SRST_P_USBUFS_BIU, 47, 4),
+ RK3576_CRU_RESET_OFFSET(SRST_A_USB3OTG0, 47, 5),
+ RK3576_CRU_RESET_OFFSET(SRST_A_UFS_BIU, 47, 10),
+ RK3576_CRU_RESET_OFFSET(SRST_A_MMU2, 47, 12),
+ RK3576_CRU_RESET_OFFSET(SRST_A_SLV_MMU2, 47, 13),
+ RK3576_CRU_RESET_OFFSET(SRST_A_UFS_SYS, 47, 15),
+
+ /* SOFTRST_CON48 */
+ RK3576_CRU_RESET_OFFSET(SRST_A_UFS, 48, 0),
+ RK3576_CRU_RESET_OFFSET(SRST_P_USBUFS_GRF, 48, 1),
+ RK3576_CRU_RESET_OFFSET(SRST_P_UFS_GRF, 48, 2),
+
+ /* SOFTRST_CON49 */
+ RK3576_CRU_RESET_OFFSET(SRST_H_VPU_BIU, 49, 6),
+ RK3576_CRU_RESET_OFFSET(SRST_A_JPEG_BIU, 49, 7),
+ RK3576_CRU_RESET_OFFSET(SRST_A_RGA_BIU, 49, 10),
+ RK3576_CRU_RESET_OFFSET(SRST_A_VDPP_BIU, 49, 11),
+ RK3576_CRU_RESET_OFFSET(SRST_A_EBC_BIU, 49, 12),
+ RK3576_CRU_RESET_OFFSET(SRST_H_RGA2E_0, 49, 13),
+ RK3576_CRU_RESET_OFFSET(SRST_A_RGA2E_0, 49, 14),
+ RK3576_CRU_RESET_OFFSET(SRST_CORE_RGA2E_0, 49, 15),
+
+ /* SOFTRST_CON50 */
+ RK3576_CRU_RESET_OFFSET(SRST_A_JPEG, 50, 0),
+ RK3576_CRU_RESET_OFFSET(SRST_H_JPEG, 50, 1),
+ RK3576_CRU_RESET_OFFSET(SRST_H_VDPP, 50, 2),
+ RK3576_CRU_RESET_OFFSET(SRST_A_VDPP, 50, 3),
+ RK3576_CRU_RESET_OFFSET(SRST_CORE_VDPP, 50, 4),
+ RK3576_CRU_RESET_OFFSET(SRST_H_RGA2E_1, 50, 5),
+ RK3576_CRU_RESET_OFFSET(SRST_A_RGA2E_1, 50, 6),
+ RK3576_CRU_RESET_OFFSET(SRST_CORE_RGA2E_1, 50, 7),
+ RK3576_CRU_RESET_OFFSET(SRST_H_EBC, 50, 10),
+ RK3576_CRU_RESET_OFFSET(SRST_A_EBC, 50, 11),
+ RK3576_CRU_RESET_OFFSET(SRST_D_EBC, 50, 12),
+
+ /* SOFTRST_CON51 */
+ RK3576_CRU_RESET_OFFSET(SRST_H_VEPU0_BIU, 51, 2),
+ RK3576_CRU_RESET_OFFSET(SRST_A_VEPU0_BIU, 51, 3),
+ RK3576_CRU_RESET_OFFSET(SRST_H_VEPU0, 51, 4),
+ RK3576_CRU_RESET_OFFSET(SRST_A_VEPU0, 51, 5),
+ RK3576_CRU_RESET_OFFSET(SRST_VEPU0_CORE, 51, 6),
+
+ /* SOFTRST_CON53 */
+ RK3576_CRU_RESET_OFFSET(SRST_A_VI_BIU, 53, 3),
+ RK3576_CRU_RESET_OFFSET(SRST_H_VI_BIU, 53, 4),
+ RK3576_CRU_RESET_OFFSET(SRST_P_VI_BIU, 53, 5),
+ RK3576_CRU_RESET_OFFSET(SRST_D_VICAP, 53, 6),
+ RK3576_CRU_RESET_OFFSET(SRST_A_VICAP, 53, 7),
+ RK3576_CRU_RESET_OFFSET(SRST_H_VICAP, 53, 8),
+ RK3576_CRU_RESET_OFFSET(SRST_ISP0, 53, 10),
+ RK3576_CRU_RESET_OFFSET(SRST_ISP0_VICAP, 53, 11),
+
+ /* SOFTRST_CON54 */
+ RK3576_CRU_RESET_OFFSET(SRST_CORE_VPSS, 54, 1),
+ RK3576_CRU_RESET_OFFSET(SRST_P_CSI_HOST_0, 54, 4),
+ RK3576_CRU_RESET_OFFSET(SRST_P_CSI_HOST_1, 54, 5),
+ RK3576_CRU_RESET_OFFSET(SRST_P_CSI_HOST_2, 54, 6),
+ RK3576_CRU_RESET_OFFSET(SRST_P_CSI_HOST_3, 54, 7),
+ RK3576_CRU_RESET_OFFSET(SRST_P_CSI_HOST_4, 54, 8),
+
+ /* SOFTRST_CON59 */
+ RK3576_CRU_RESET_OFFSET(SRST_CIFIN, 59, 0),
+ RK3576_CRU_RESET_OFFSET(SRST_VICAP_I0CLK, 59, 1),
+ RK3576_CRU_RESET_OFFSET(SRST_VICAP_I1CLK, 59, 2),
+ RK3576_CRU_RESET_OFFSET(SRST_VICAP_I2CLK, 59, 3),
+ RK3576_CRU_RESET_OFFSET(SRST_VICAP_I3CLK, 59, 4),
+ RK3576_CRU_RESET_OFFSET(SRST_VICAP_I4CLK, 59, 5),
+
+ /* SOFTRST_CON61 */
+ RK3576_CRU_RESET_OFFSET(SRST_A_VOP_BIU, 61, 4),
+ RK3576_CRU_RESET_OFFSET(SRST_A_VOP2_BIU, 61, 5),
+ RK3576_CRU_RESET_OFFSET(SRST_H_VOP_BIU, 61, 6),
+ RK3576_CRU_RESET_OFFSET(SRST_P_VOP_BIU, 61, 7),
+ RK3576_CRU_RESET_OFFSET(SRST_H_VOP, 61, 8),
+ RK3576_CRU_RESET_OFFSET(SRST_A_VOP, 61, 9),
+ RK3576_CRU_RESET_OFFSET(SRST_D_VP0, 61, 13),
+
+ /* SOFTRST_CON62 */
+ RK3576_CRU_RESET_OFFSET(SRST_D_VP1, 62, 0),
+ RK3576_CRU_RESET_OFFSET(SRST_D_VP2, 62, 1),
+ RK3576_CRU_RESET_OFFSET(SRST_P_VOP2_BIU, 62, 2),
+ RK3576_CRU_RESET_OFFSET(SRST_P_VOPGRF, 62, 3),
+
+ /* SOFTRST_CON63 */
+ RK3576_CRU_RESET_OFFSET(SRST_H_VO0_BIU, 63, 5),
+ RK3576_CRU_RESET_OFFSET(SRST_P_VO0_BIU, 63, 7),
+ RK3576_CRU_RESET_OFFSET(SRST_A_HDCP0_BIU, 63, 9),
+ RK3576_CRU_RESET_OFFSET(SRST_P_VO0_GRF, 63, 10),
+ RK3576_CRU_RESET_OFFSET(SRST_A_HDCP0, 63, 12),
+ RK3576_CRU_RESET_OFFSET(SRST_H_HDCP0, 63, 13),
+ RK3576_CRU_RESET_OFFSET(SRST_HDCP0, 63, 14),
+
+ /* SOFTRST_CON64 */
+ RK3576_CRU_RESET_OFFSET(SRST_P_DSIHOST0, 64, 5),
+ RK3576_CRU_RESET_OFFSET(SRST_DSIHOST0, 64, 6),
+ RK3576_CRU_RESET_OFFSET(SRST_P_HDMITX0, 64, 7),
+ RK3576_CRU_RESET_OFFSET(SRST_HDMITX0_REF, 64, 9),
+ RK3576_CRU_RESET_OFFSET(SRST_P_EDP0, 64, 13),
+ RK3576_CRU_RESET_OFFSET(SRST_EDP0_24M, 64, 14),
+
+ /* SOFTRST_CON65 */
+ RK3576_CRU_RESET_OFFSET(SRST_M_SAI5_8CH, 65, 4),
+ RK3576_CRU_RESET_OFFSET(SRST_H_SAI5_8CH, 65, 5),
+ RK3576_CRU_RESET_OFFSET(SRST_M_SAI6_8CH, 65, 8),
+ RK3576_CRU_RESET_OFFSET(SRST_H_SAI6_8CH, 65, 9),
+ RK3576_CRU_RESET_OFFSET(SRST_H_SPDIF_TX2, 65, 10),
+ RK3576_CRU_RESET_OFFSET(SRST_M_SPDIF_TX2, 65, 13),
+ RK3576_CRU_RESET_OFFSET(SRST_H_SPDIF_RX2, 65, 14),
+ RK3576_CRU_RESET_OFFSET(SRST_M_SPDIF_RX2, 65, 15),
+
+ /* SOFTRST_CON66 */
+ RK3576_CRU_RESET_OFFSET(SRST_H_SAI8_8CH, 66, 0),
+ RK3576_CRU_RESET_OFFSET(SRST_M_SAI8_8CH, 66, 2),
+
+ /* SOFTRST_CON67 */
+ RK3576_CRU_RESET_OFFSET(SRST_H_VO1_BIU, 67, 5),
+ RK3576_CRU_RESET_OFFSET(SRST_P_VO1_BIU, 67, 6),
+ RK3576_CRU_RESET_OFFSET(SRST_M_SAI7_8CH, 67, 9),
+ RK3576_CRU_RESET_OFFSET(SRST_H_SAI7_8CH, 67, 10),
+ RK3576_CRU_RESET_OFFSET(SRST_H_SPDIF_TX3, 67, 11),
+ RK3576_CRU_RESET_OFFSET(SRST_H_SPDIF_TX4, 67, 12),
+ RK3576_CRU_RESET_OFFSET(SRST_H_SPDIF_TX5, 67, 13),
+ RK3576_CRU_RESET_OFFSET(SRST_M_SPDIF_TX3, 67, 14),
+
+ /* SOFTRST_CON68 */
+ RK3576_CRU_RESET_OFFSET(SRST_DP0, 68, 0),
+ RK3576_CRU_RESET_OFFSET(SRST_P_VO1_GRF, 68, 2),
+ RK3576_CRU_RESET_OFFSET(SRST_A_HDCP1_BIU, 68, 3),
+ RK3576_CRU_RESET_OFFSET(SRST_A_HDCP1, 68, 4),
+ RK3576_CRU_RESET_OFFSET(SRST_H_HDCP1, 68, 5),
+ RK3576_CRU_RESET_OFFSET(SRST_HDCP1, 68, 6),
+ RK3576_CRU_RESET_OFFSET(SRST_H_SAI9_8CH, 68, 9),
+ RK3576_CRU_RESET_OFFSET(SRST_M_SAI9_8CH, 68, 11),
+ RK3576_CRU_RESET_OFFSET(SRST_M_SPDIF_TX4, 68, 12),
+ RK3576_CRU_RESET_OFFSET(SRST_M_SPDIF_TX5, 68, 13),
+
+ /* SOFTRST_CON69 */
+ RK3576_CRU_RESET_OFFSET(SRST_GPU, 69, 3),
+ RK3576_CRU_RESET_OFFSET(SRST_A_S_GPU_BIU, 69, 6),
+ RK3576_CRU_RESET_OFFSET(SRST_A_M0_GPU_BIU, 69, 7),
+ RK3576_CRU_RESET_OFFSET(SRST_P_GPU_BIU, 69, 9),
+ RK3576_CRU_RESET_OFFSET(SRST_P_GPU_GRF, 69, 13),
+ RK3576_CRU_RESET_OFFSET(SRST_GPU_PVTPLL, 69, 14),
+ RK3576_CRU_RESET_OFFSET(SRST_P_PVTPLL_GPU, 69, 15),
+
+ /* SOFTRST_CON72 */
+ RK3576_CRU_RESET_OFFSET(SRST_A_CENTER_BIU, 72, 4),
+ RK3576_CRU_RESET_OFFSET(SRST_A_DMA2DDR, 72, 5),
+ RK3576_CRU_RESET_OFFSET(SRST_A_DDR_SHAREMEM, 72, 6),
+ RK3576_CRU_RESET_OFFSET(SRST_A_DDR_SHAREMEM_BIU, 72, 7),
+ RK3576_CRU_RESET_OFFSET(SRST_H_CENTER_BIU, 72, 8),
+ RK3576_CRU_RESET_OFFSET(SRST_P_CENTER_GRF, 72, 9),
+ RK3576_CRU_RESET_OFFSET(SRST_P_DMA2DDR, 72, 10),
+ RK3576_CRU_RESET_OFFSET(SRST_P_SHAREMEM, 72, 11),
+ RK3576_CRU_RESET_OFFSET(SRST_P_CENTER_BIU, 72, 12),
+
+ /* SOFTRST_CON75 */
+ RK3576_CRU_RESET_OFFSET(SRST_LINKSYM_HDMITXPHY0, 75, 1),
+
+ /* SOFTRST_CON78 */
+ RK3576_CRU_RESET_OFFSET(SRST_DP0_PIXELCLK, 78, 1),
+ RK3576_CRU_RESET_OFFSET(SRST_PHY_DP0_TX, 78, 2),
+ RK3576_CRU_RESET_OFFSET(SRST_DP1_PIXELCLK, 78, 3),
+ RK3576_CRU_RESET_OFFSET(SRST_DP2_PIXELCLK, 78, 4),
+
+ /* SOFTRST_CON79 */
+ RK3576_CRU_RESET_OFFSET(SRST_H_VEPU1_BIU, 79, 1),
+ RK3576_CRU_RESET_OFFSET(SRST_A_VEPU1_BIU, 79, 2),
+ RK3576_CRU_RESET_OFFSET(SRST_H_VEPU1, 79, 3),
+ RK3576_CRU_RESET_OFFSET(SRST_A_VEPU1, 79, 4),
+ RK3576_CRU_RESET_OFFSET(SRST_VEPU1_CORE, 79, 5),
+
+ /* PPLL_SOFTRST_CON00 */
+ RK3576_PHPCRU_RESET_OFFSET(SRST_P_PHPPHY_CRU, 0, 1),
+ RK3576_PHPCRU_RESET_OFFSET(SRST_P_APB2ASB_SLV_CHIP_TOP, 0, 3),
+ RK3576_PHPCRU_RESET_OFFSET(SRST_P_PCIE2_COMBOPHY0, 0, 5),
+ RK3576_PHPCRU_RESET_OFFSET(SRST_P_PCIE2_COMBOPHY0_GRF, 0, 6),
+ RK3576_PHPCRU_RESET_OFFSET(SRST_P_PCIE2_COMBOPHY1, 0, 7),
+ RK3576_PHPCRU_RESET_OFFSET(SRST_P_PCIE2_COMBOPHY1_GRF, 0, 8),
+
+ /* PPLL_SOFTRST_CON01 */
+ RK3576_PHPCRU_RESET_OFFSET(SRST_PCIE0_PIPE_PHY, 1, 5),
+ RK3576_PHPCRU_RESET_OFFSET(SRST_PCIE1_PIPE_PHY, 1, 8),
+
+ /* SECURENS_SOFTRST_CON00 */
+ RK3576_SECURENSCRU_RESET_OFFSET(SRST_H_CRYPTO_NS, 0, 3),
+ RK3576_SECURENSCRU_RESET_OFFSET(SRST_H_TRNG_NS, 0, 4),
+ RK3576_SECURENSCRU_RESET_OFFSET(SRST_P_OTPC_NS, 0, 8),
+ RK3576_SECURENSCRU_RESET_OFFSET(SRST_OTPC_NS, 0, 9),
+
+ /* PMU1_SOFTRST_CON00 */
+ RK3576_PMU1CRU_RESET_OFFSET(SRST_P_HDPTX_GRF, 0, 0),
+ RK3576_PMU1CRU_RESET_OFFSET(SRST_P_HDPTX_APB, 0, 1),
+ RK3576_PMU1CRU_RESET_OFFSET(SRST_P_MIPI_DCPHY, 0, 2),
+ RK3576_PMU1CRU_RESET_OFFSET(SRST_P_DCPHY_GRF, 0, 3),
+ RK3576_PMU1CRU_RESET_OFFSET(SRST_P_BOT0_APB2ASB, 0, 4),
+ RK3576_PMU1CRU_RESET_OFFSET(SRST_P_BOT1_APB2ASB, 0, 5),
+ RK3576_PMU1CRU_RESET_OFFSET(SRST_USB2DEBUG, 0, 6),
+ RK3576_PMU1CRU_RESET_OFFSET(SRST_P_CSIPHY_GRF, 0, 7),
+ RK3576_PMU1CRU_RESET_OFFSET(SRST_P_CSIPHY, 0, 8),
+ RK3576_PMU1CRU_RESET_OFFSET(SRST_P_USBPHY_GRF_0, 0, 9),
+ RK3576_PMU1CRU_RESET_OFFSET(SRST_P_USBPHY_GRF_1, 0, 10),
+ RK3576_PMU1CRU_RESET_OFFSET(SRST_P_USBDP_GRF, 0, 11),
+ RK3576_PMU1CRU_RESET_OFFSET(SRST_P_USBDPPHY, 0, 12),
+ RK3576_PMU1CRU_RESET_OFFSET(SRST_USBDP_COMBO_PHY_INIT, 0, 15),
+
+ /* PMU1_SOFTRST_CON01 */
+ RK3576_PMU1CRU_RESET_OFFSET(SRST_USBDP_COMBO_PHY_CMN, 1, 0),
+ RK3576_PMU1CRU_RESET_OFFSET(SRST_USBDP_COMBO_PHY_LANE, 1, 1),
+ RK3576_PMU1CRU_RESET_OFFSET(SRST_USBDP_COMBO_PHY_PCS, 1, 2),
+ RK3576_PMU1CRU_RESET_OFFSET(SRST_M_MIPI_DCPHY, 1, 3),
+ RK3576_PMU1CRU_RESET_OFFSET(SRST_S_MIPI_DCPHY, 1, 4),
+ RK3576_PMU1CRU_RESET_OFFSET(SRST_SCAN_CSIPHY, 1, 5),
+ RK3576_PMU1CRU_RESET_OFFSET(SRST_P_VCCIO6_IOC, 1, 6),
+ RK3576_PMU1CRU_RESET_OFFSET(SRST_OTGPHY_0, 1, 7),
+ RK3576_PMU1CRU_RESET_OFFSET(SRST_OTGPHY_1, 1, 8),
+ RK3576_PMU1CRU_RESET_OFFSET(SRST_HDPTX_INIT, 1, 9),
+ RK3576_PMU1CRU_RESET_OFFSET(SRST_HDPTX_CMN, 1, 10),
+ RK3576_PMU1CRU_RESET_OFFSET(SRST_HDPTX_LANE, 1, 11),
+ RK3576_PMU1CRU_RESET_OFFSET(SRST_HDMITXHDP, 1, 13),
+
+ /* PMU1_SOFTRST_CON02 */
+ RK3576_PMU1CRU_RESET_OFFSET(SRST_MPHY_INIT, 2, 0),
+ RK3576_PMU1CRU_RESET_OFFSET(SRST_P_MPHY_GRF, 2, 1),
+ RK3576_PMU1CRU_RESET_OFFSET(SRST_P_VCCIO7_IOC, 2, 3),
+
+ /* PMU1_SOFTRST_CON03 */
+ RK3576_PMU1CRU_RESET_OFFSET(SRST_H_PMU1_BIU, 3, 9),
+ RK3576_PMU1CRU_RESET_OFFSET(SRST_P_PMU1_NIU, 3, 10),
+ RK3576_PMU1CRU_RESET_OFFSET(SRST_H_PMU_CM0_BIU, 3, 11),
+ RK3576_PMU1CRU_RESET_OFFSET(SRST_PMU_CM0_CORE, 3, 12),
+ RK3576_PMU1CRU_RESET_OFFSET(SRST_PMU_CM0_JTAG, 3, 13),
+
+ /* PMU1_SOFTRST_CON04 */
+ RK3576_PMU1CRU_RESET_OFFSET(SRST_P_CRU_PMU1, 4, 1),
+ RK3576_PMU1CRU_RESET_OFFSET(SRST_P_PMU1_GRF, 4, 3),
+ RK3576_PMU1CRU_RESET_OFFSET(SRST_P_PMU1_IOC, 4, 4),
+ RK3576_PMU1CRU_RESET_OFFSET(SRST_P_PMU1WDT, 4, 5),
+ RK3576_PMU1CRU_RESET_OFFSET(SRST_T_PMU1WDT, 4, 6),
+ RK3576_PMU1CRU_RESET_OFFSET(SRST_P_PMUTIMER, 4, 7),
+ RK3576_PMU1CRU_RESET_OFFSET(SRST_PMUTIMER0, 4, 9),
+ RK3576_PMU1CRU_RESET_OFFSET(SRST_PMUTIMER1, 4, 10),
+ RK3576_PMU1CRU_RESET_OFFSET(SRST_P_PMU1PWM, 4, 11),
+ RK3576_PMU1CRU_RESET_OFFSET(SRST_PMU1PWM, 4, 12),
+
+ /* PMU1_SOFTRST_CON05 */
+ RK3576_PMU1CRU_RESET_OFFSET(SRST_P_I2C0, 5, 1),
+ RK3576_PMU1CRU_RESET_OFFSET(SRST_I2C0, 5, 2),
+ RK3576_PMU1CRU_RESET_OFFSET(SRST_S_UART1, 5, 5),
+ RK3576_PMU1CRU_RESET_OFFSET(SRST_P_UART1, 5, 6),
+ RK3576_PMU1CRU_RESET_OFFSET(SRST_PDM0, 5, 13),
+ RK3576_PMU1CRU_RESET_OFFSET(SRST_H_PDM0, 5, 15),
+
+ /* PMU1_SOFTRST_CON06 */
+ RK3576_PMU1CRU_RESET_OFFSET(SRST_M_PDM0, 6, 0),
+ RK3576_PMU1CRU_RESET_OFFSET(SRST_H_VAD, 6, 1),
+
+ /* PMU1_SOFTRST_CON07 */
+ RK3576_PMU1CRU_RESET_OFFSET(SRST_P_PMU0GRF, 7, 4),
+ RK3576_PMU1CRU_RESET_OFFSET(SRST_P_PMU0IOC, 7, 5),
+ RK3576_PMU1CRU_RESET_OFFSET(SRST_P_GPIO0, 7, 6),
+ RK3576_PMU1CRU_RESET_OFFSET(SRST_DB_GPIO0, 7, 7),
+};
+
+void rk3576_rst_init(struct device_node *np, void __iomem *reg_base)
+{
+ rockchip_register_softrst_lut(np,
+ rk3576_register_offset,
+ ARRAY_SIZE(rk3576_register_offset),
+ reg_base + RK3576_SOFTRST_CON(0),
+ ROCKCHIP_SOFTRST_HIWORD_MASK);
+}
diff --git a/drivers/clk/samsung/Makefile b/drivers/clk/samsung/Makefile
index 3056944a5a54..f1ba48758c78 100644
--- a/drivers/clk/samsung/Makefile
+++ b/drivers/clk/samsung/Makefile
@@ -21,6 +21,7 @@ obj-$(CONFIG_EXYNOS_ARM64_COMMON_CLK) += clk-exynos7.o
obj-$(CONFIG_EXYNOS_ARM64_COMMON_CLK) += clk-exynos7885.o
obj-$(CONFIG_EXYNOS_ARM64_COMMON_CLK) += clk-exynos850.o
obj-$(CONFIG_EXYNOS_ARM64_COMMON_CLK) += clk-exynosautov9.o
+obj-$(CONFIG_EXYNOS_ARM64_COMMON_CLK) += clk-exynosautov920.o
obj-$(CONFIG_EXYNOS_ARM64_COMMON_CLK) += clk-gs101.o
obj-$(CONFIG_S3C64XX_COMMON_CLK) += clk-s3c64xx.o
obj-$(CONFIG_S5PV210_COMMON_CLK) += clk-s5pv210.o clk-s5pv210-audss.o
diff --git a/drivers/clk/samsung/clk-exynos-audss.c b/drivers/clk/samsung/clk-exynos-audss.c
index e44b172d7255..abd49edcf707 100644
--- a/drivers/clk/samsung/clk-exynos-audss.c
+++ b/drivers/clk/samsung/clk-exynos-audss.c
@@ -292,7 +292,7 @@ static struct platform_driver exynos_audss_clk_driver = {
.pm = &exynos_audss_clk_pm_ops,
},
.probe = exynos_audss_clk_probe,
- .remove_new = exynos_audss_clk_remove,
+ .remove = exynos_audss_clk_remove,
};
module_platform_driver(exynos_audss_clk_driver);
diff --git a/drivers/clk/samsung/clk-exynos-clkout.c b/drivers/clk/samsung/clk-exynos-clkout.c
index 89cf2000884f..2ef5748c139b 100644
--- a/drivers/clk/samsung/clk-exynos-clkout.c
+++ b/drivers/clk/samsung/clk-exynos-clkout.c
@@ -241,7 +241,7 @@ static struct platform_driver exynos_clkout_driver = {
.pm = &exynos_clkout_pm_ops,
},
.probe = exynos_clkout_probe,
- .remove_new = exynos_clkout_remove,
+ .remove = exynos_clkout_remove,
};
module_platform_driver(exynos_clkout_driver);
diff --git a/drivers/clk/samsung/clk-exynos7885.c b/drivers/clk/samsung/clk-exynos7885.c
index f7d7427a558b..fc42251731ed 100644
--- a/drivers/clk/samsung/clk-exynos7885.c
+++ b/drivers/clk/samsung/clk-exynos7885.c
@@ -17,10 +17,10 @@
#include "clk-exynos-arm64.h"
/* NOTE: Must be equal to the last clock ID increased by one */
-#define CLKS_NR_TOP (CLK_GOUT_FSYS_USB30DRD + 1)
+#define CLKS_NR_TOP (CLK_MOUT_SHARED1_PLL + 1)
#define CLKS_NR_CORE (CLK_GOUT_TREX_P_CORE_PCLK_P_CORE + 1)
#define CLKS_NR_PERI (CLK_GOUT_WDT1_PCLK + 1)
-#define CLKS_NR_FSYS (CLK_GOUT_MMC_SDIO_SDCLKIN + 1)
+#define CLKS_NR_FSYS (CLK_FSYS_USB30DRD_REF_CLK + 1)
/* ---- CMU_TOP ------------------------------------------------------------- */
@@ -162,6 +162,10 @@ static const struct samsung_pll_clock top_pll_clks[] __initconst = {
NULL),
};
+/* List of parent clocks for Muxes in CMU_TOP */
+PNAME(mout_shared0_pll_p) = { "oscclk", "fout_shared0_pll" };
+PNAME(mout_shared1_pll_p) = { "oscclk", "fout_shared1_pll" };
+
/* List of parent clocks for Muxes in CMU_TOP: for CMU_CORE */
PNAME(mout_core_bus_p) = { "dout_shared0_div2", "dout_shared1_div2",
"dout_shared0_div3", "dout_shared0_div3" };
@@ -189,6 +193,12 @@ PNAME(mout_fsys_mmc_sdio_p) = { "dout_shared0_div2", "dout_shared1_div2" };
PNAME(mout_fsys_usb30drd_p) = { "dout_shared0_div4", "dout_shared1_div4" };
static const struct samsung_mux_clock top_mux_clks[] __initconst = {
+ /* TOP */
+ MUX(CLK_MOUT_SHARED0_PLL, "mout_shared0_pll", mout_shared0_pll_p,
+ PLL_CON0_PLL_SHARED0, 4, 1),
+ MUX(CLK_MOUT_SHARED1_PLL, "mout_shared1_pll", mout_shared1_pll_p,
+ PLL_CON0_PLL_SHARED1, 4, 1),
+
/* CORE */
MUX(CLK_MOUT_CORE_BUS, "mout_core_bus", mout_core_bus_p,
CLK_CON_MUX_MUX_CLKCMU_CORE_BUS, 0, 2),
@@ -232,17 +242,17 @@ static const struct samsung_mux_clock top_mux_clks[] __initconst = {
static const struct samsung_div_clock top_div_clks[] __initconst = {
/* TOP */
- DIV(CLK_DOUT_SHARED0_DIV2, "dout_shared0_div2", "fout_shared0_pll",
+ DIV(CLK_DOUT_SHARED0_DIV2, "dout_shared0_div2", "mout_shared0_pll",
CLK_CON_DIV_PLL_SHARED0_DIV2, 0, 1),
- DIV(CLK_DOUT_SHARED0_DIV3, "dout_shared0_div3", "fout_shared0_pll",
+ DIV(CLK_DOUT_SHARED0_DIV3, "dout_shared0_div3", "mout_shared0_pll",
CLK_CON_DIV_PLL_SHARED0_DIV3, 0, 2),
DIV(CLK_DOUT_SHARED0_DIV4, "dout_shared0_div4", "dout_shared0_div2",
CLK_CON_DIV_PLL_SHARED0_DIV4, 0, 1),
- DIV(CLK_DOUT_SHARED0_DIV5, "dout_shared0_div5", "fout_shared0_pll",
+ DIV(CLK_DOUT_SHARED0_DIV5, "dout_shared0_div5", "mout_shared0_pll",
CLK_CON_DIV_PLL_SHARED0_DIV5, 0, 3),
- DIV(CLK_DOUT_SHARED1_DIV2, "dout_shared1_div2", "fout_shared1_pll",
+ DIV(CLK_DOUT_SHARED1_DIV2, "dout_shared1_div2", "mout_shared1_pll",
CLK_CON_DIV_PLL_SHARED1_DIV2, 0, 1),
- DIV(CLK_DOUT_SHARED1_DIV3, "dout_shared1_div3", "fout_shared1_pll",
+ DIV(CLK_DOUT_SHARED1_DIV3, "dout_shared1_div3", "mout_shared1_pll",
CLK_CON_DIV_PLL_SHARED1_DIV3, 0, 2),
DIV(CLK_DOUT_SHARED1_DIV4, "dout_shared1_div4", "dout_shared1_div2",
CLK_CON_DIV_PLL_SHARED1_DIV4, 0, 1),
@@ -676,30 +686,56 @@ static const struct samsung_cmu_info core_cmu_info __initconst = {
/* ---- CMU_FSYS ------------------------------------------------------------ */
/* Register Offset definitions for CMU_FSYS (0x13400000) */
-#define PLL_CON0_MUX_CLKCMU_FSYS_BUS_USER 0x0100
-#define PLL_CON0_MUX_CLKCMU_FSYS_MMC_CARD_USER 0x0120
-#define PLL_CON0_MUX_CLKCMU_FSYS_MMC_EMBD_USER 0x0140
-#define PLL_CON0_MUX_CLKCMU_FSYS_MMC_SDIO_USER 0x0160
-#define PLL_CON0_MUX_CLKCMU_FSYS_USB30DRD_USER 0x0180
-#define CLK_CON_GAT_GOUT_FSYS_MMC_CARD_I_ACLK 0x2030
-#define CLK_CON_GAT_GOUT_FSYS_MMC_CARD_SDCLKIN 0x2034
-#define CLK_CON_GAT_GOUT_FSYS_MMC_EMBD_I_ACLK 0x2038
-#define CLK_CON_GAT_GOUT_FSYS_MMC_EMBD_SDCLKIN 0x203c
-#define CLK_CON_GAT_GOUT_FSYS_MMC_SDIO_I_ACLK 0x2040
-#define CLK_CON_GAT_GOUT_FSYS_MMC_SDIO_SDCLKIN 0x2044
+#define PLL_LOCKTIME_PLL_USB 0x0000
+#define PLL_CON0_MUX_CLKCMU_FSYS_BUS_USER 0x0100
+#define PLL_CON0_MUX_CLKCMU_FSYS_MMC_CARD_USER 0x0120
+#define PLL_CON0_MUX_CLKCMU_FSYS_MMC_EMBD_USER 0x0140
+#define PLL_CON0_MUX_CLKCMU_FSYS_MMC_SDIO_USER 0x0160
+#define PLL_CON0_MUX_CLKCMU_FSYS_USB30DRD_USER 0x0180
+#define PLL_CON0_PLL_USB 0x01a0
+#define CLK_CON_GAT_CLK_FSYS_USB20PHY_CLKCORE 0x200c
+#define CLK_CON_GAT_GOUT_FSYS_MMC_CARD_I_ACLK 0x2030
+#define CLK_CON_GAT_GOUT_FSYS_MMC_CARD_SDCLKIN 0x2034
+#define CLK_CON_GAT_GOUT_FSYS_MMC_EMBD_I_ACLK 0x2038
+#define CLK_CON_GAT_GOUT_FSYS_MMC_EMBD_SDCLKIN 0x203c
+#define CLK_CON_GAT_GOUT_FSYS_MMC_SDIO_I_ACLK 0x2040
+#define CLK_CON_GAT_GOUT_FSYS_MMC_SDIO_SDCLKIN 0x2044
+#define CLK_CON_GAT_GOUT_FSYS_USB30DRD_ACLK_20PHYCTRL 0x2068
+#define CLK_CON_GAT_GOUT_FSYS_USB30DRD_ACLK_30PHYCTRL_0 0x206c
+#define CLK_CON_GAT_GOUT_FSYS_USB30DRD_ACLK_30PHYCTRL_1 0x2070
+#define CLK_CON_GAT_GOUT_FSYS_USB30DRD_BUS_CLK_EARLY 0x2074
+#define CLK_CON_GAT_GOUT_FSYS_USB30DRD_REF_CLK 0x2078
static const unsigned long fsys_clk_regs[] __initconst = {
+ PLL_LOCKTIME_PLL_USB,
PLL_CON0_MUX_CLKCMU_FSYS_BUS_USER,
PLL_CON0_MUX_CLKCMU_FSYS_MMC_CARD_USER,
PLL_CON0_MUX_CLKCMU_FSYS_MMC_EMBD_USER,
PLL_CON0_MUX_CLKCMU_FSYS_MMC_SDIO_USER,
PLL_CON0_MUX_CLKCMU_FSYS_USB30DRD_USER,
+ PLL_CON0_PLL_USB,
+ CLK_CON_GAT_CLK_FSYS_USB20PHY_CLKCORE,
CLK_CON_GAT_GOUT_FSYS_MMC_CARD_I_ACLK,
CLK_CON_GAT_GOUT_FSYS_MMC_CARD_SDCLKIN,
CLK_CON_GAT_GOUT_FSYS_MMC_EMBD_I_ACLK,
CLK_CON_GAT_GOUT_FSYS_MMC_EMBD_SDCLKIN,
CLK_CON_GAT_GOUT_FSYS_MMC_SDIO_I_ACLK,
CLK_CON_GAT_GOUT_FSYS_MMC_SDIO_SDCLKIN,
+ CLK_CON_GAT_GOUT_FSYS_USB30DRD_ACLK_20PHYCTRL,
+ CLK_CON_GAT_GOUT_FSYS_USB30DRD_ACLK_30PHYCTRL_0,
+ CLK_CON_GAT_GOUT_FSYS_USB30DRD_ACLK_30PHYCTRL_1,
+ CLK_CON_GAT_GOUT_FSYS_USB30DRD_BUS_CLK_EARLY,
+ CLK_CON_GAT_GOUT_FSYS_USB30DRD_REF_CLK,
+};
+
+static const struct samsung_pll_rate_table pll_usb_rate_table[] __initconst = {
+ PLL_35XX_RATE(26 * MHZ, 50000000U, 400, 13, 4),
+};
+
+static const struct samsung_pll_clock fsys_pll_clks[] __initconst = {
+ PLL(pll_1418x, CLK_FOUT_USB_PLL, "fout_usb_pll", "oscclk",
+ PLL_LOCKTIME_PLL_USB, PLL_CON0_PLL_USB,
+ pll_usb_rate_table),
};
/* List of parent clocks for Muxes in CMU_FSYS */
@@ -708,6 +744,7 @@ PNAME(mout_fsys_mmc_card_user_p) = { "oscclk", "dout_fsys_mmc_card" };
PNAME(mout_fsys_mmc_embd_user_p) = { "oscclk", "dout_fsys_mmc_embd" };
PNAME(mout_fsys_mmc_sdio_user_p) = { "oscclk", "dout_fsys_mmc_sdio" };
PNAME(mout_fsys_usb30drd_user_p) = { "oscclk", "dout_fsys_usb30drd" };
+PNAME(mout_usb_pll_p) = { "oscclk", "fout_usb_pll" };
static const struct samsung_mux_clock fsys_mux_clks[] __initconst = {
MUX(CLK_MOUT_FSYS_BUS_USER, "mout_fsys_bus_user", mout_fsys_bus_user_p,
@@ -721,12 +758,16 @@ static const struct samsung_mux_clock fsys_mux_clks[] __initconst = {
MUX_F(CLK_MOUT_FSYS_MMC_SDIO_USER, "mout_fsys_mmc_sdio_user",
mout_fsys_mmc_sdio_user_p, PLL_CON0_MUX_CLKCMU_FSYS_MMC_SDIO_USER,
4, 1, CLK_SET_RATE_PARENT, 0),
- MUX_F(CLK_MOUT_FSYS_USB30DRD_USER, "mout_fsys_usb30drd_user",
+ MUX(CLK_MOUT_FSYS_USB30DRD_USER, "mout_fsys_usb30drd_user",
mout_fsys_usb30drd_user_p, PLL_CON0_MUX_CLKCMU_FSYS_USB30DRD_USER,
- 4, 1, CLK_SET_RATE_PARENT, 0),
+ 4, 1),
+ nMUX_F(CLK_MOUT_USB_PLL, "mout_usb_pll", mout_usb_pll_p,
+ PLL_CON0_PLL_USB, 4, 1, CLK_SET_RATE_PARENT, 0),
};
static const struct samsung_gate_clock fsys_gate_clks[] __initconst = {
+ GATE(CLK_FSYS_USB20PHY_CLKCORE, "clk_fsys_usb20phy_clkcore", "mout_usb_pll",
+ CLK_CON_GAT_CLK_FSYS_USB20PHY_CLKCORE, 21, CLK_SET_RATE_PARENT, 0),
GATE(CLK_GOUT_MMC_CARD_ACLK, "gout_mmc_card_aclk", "mout_fsys_bus_user",
CLK_CON_GAT_GOUT_FSYS_MMC_CARD_I_ACLK, 21, 0, 0),
GATE(CLK_GOUT_MMC_CARD_SDCLKIN, "gout_mmc_card_sdclkin",
@@ -742,9 +783,21 @@ static const struct samsung_gate_clock fsys_gate_clks[] __initconst = {
GATE(CLK_GOUT_MMC_SDIO_SDCLKIN, "gout_mmc_sdio_sdclkin",
"mout_fsys_mmc_sdio_user", CLK_CON_GAT_GOUT_FSYS_MMC_SDIO_SDCLKIN,
21, CLK_SET_RATE_PARENT, 0),
+ GATE(CLK_FSYS_USB30DRD_ACLK_20PHYCTRL, "clk_fsys_usb30drd_aclk_20phyctrl",
+ "mout_fsys_bus_user", CLK_CON_GAT_GOUT_FSYS_USB30DRD_ACLK_20PHYCTRL, 21, 0, 0),
+ GATE(CLK_FSYS_USB30DRD_ACLK_30PHYCTRL_0, "clk_fsys_usb30drd_aclk_30phyctrl_0",
+ "mout_fsys_bus_user", CLK_CON_GAT_GOUT_FSYS_USB30DRD_ACLK_30PHYCTRL_0, 21, 0, 0),
+ GATE(CLK_FSYS_USB30DRD_ACLK_30PHYCTRL_1, "clk_fsys_usb30drd_aclk_30phyctrl_1",
+ "mout_fsys_bus_user", CLK_CON_GAT_GOUT_FSYS_USB30DRD_ACLK_30PHYCTRL_1, 21, 0, 0),
+ GATE(CLK_FSYS_USB30DRD_BUS_CLK_EARLY, "clk_fsys_usb30drd_bus_clk_early",
+ "mout_fsys_bus_user", CLK_CON_GAT_GOUT_FSYS_USB30DRD_BUS_CLK_EARLY, 21, 0, 0),
+ GATE(CLK_FSYS_USB30DRD_REF_CLK, "clk_fsys_usb30drd_ref_clk", "mout_fsys_usb30drd_user",
+ CLK_CON_GAT_GOUT_FSYS_USB30DRD_REF_CLK, 21, 0, 0),
};
static const struct samsung_cmu_info fsys_cmu_info __initconst = {
+ .pll_clks = fsys_pll_clks,
+ .nr_pll_clks = ARRAY_SIZE(fsys_pll_clks),
.mux_clks = fsys_mux_clks,
.nr_mux_clks = ARRAY_SIZE(fsys_mux_clks),
.gate_clks = fsys_gate_clks,
diff --git a/drivers/clk/samsung/clk-exynos850.c b/drivers/clk/samsung/clk-exynos850.c
index 6215471c4ac6..e00e213b1201 100644
--- a/drivers/clk/samsung/clk-exynos850.c
+++ b/drivers/clk/samsung/clk-exynos850.c
@@ -28,7 +28,7 @@
#define CLKS_NR_HSI (CLK_GOUT_HSI_CMU_HSI_PCLK + 1)
#define CLKS_NR_IS (CLK_GOUT_IS_SYSREG_PCLK + 1)
#define CLKS_NR_MFCMSCL (CLK_GOUT_MFCMSCL_SYSREG_PCLK + 1)
-#define CLKS_NR_PERI (CLK_GOUT_WDT1_PCLK + 1)
+#define CLKS_NR_PERI (CLK_GOUT_BUSIF_TMU_PCLK + 1)
#define CLKS_NR_CORE (CLK_GOUT_SPDMA_CORE_ACLK + 1)
#define CLKS_NR_DPU (CLK_GOUT_DPU_SYSREG_PCLK + 1)
@@ -1921,6 +1921,7 @@ static const struct samsung_cmu_info mfcmscl_cmu_info __initconst = {
#define CLK_CON_GAT_GATE_CLK_PERI_HSI2C_0 0x200c
#define CLK_CON_GAT_GATE_CLK_PERI_HSI2C_1 0x2010
#define CLK_CON_GAT_GATE_CLK_PERI_HSI2C_2 0x2014
+#define CLK_CON_GAT_GOUT_PERI_BUSIF_TMU_PCLK 0x2018
#define CLK_CON_GAT_GOUT_PERI_GPIO_PERI_PCLK 0x2020
#define CLK_CON_GAT_GOUT_PERI_HSI2C_0_IPCLK 0x2024
#define CLK_CON_GAT_GOUT_PERI_HSI2C_0_PCLK 0x2028
@@ -1957,6 +1958,7 @@ static const unsigned long peri_clk_regs[] __initconst = {
CLK_CON_GAT_GATE_CLK_PERI_HSI2C_0,
CLK_CON_GAT_GATE_CLK_PERI_HSI2C_1,
CLK_CON_GAT_GATE_CLK_PERI_HSI2C_2,
+ CLK_CON_GAT_GOUT_PERI_BUSIF_TMU_PCLK,
CLK_CON_GAT_GOUT_PERI_GPIO_PERI_PCLK,
CLK_CON_GAT_GOUT_PERI_HSI2C_0_IPCLK,
CLK_CON_GAT_GOUT_PERI_HSI2C_0_PCLK,
@@ -2068,6 +2070,9 @@ static const struct samsung_gate_clock peri_gate_clks[] __initconst = {
GATE(CLK_GOUT_GPIO_PERI_PCLK, "gout_gpio_peri_pclk",
"mout_peri_bus_user",
CLK_CON_GAT_GOUT_PERI_GPIO_PERI_PCLK, 21, CLK_IGNORE_UNUSED, 0),
+ GATE(CLK_GOUT_BUSIF_TMU_PCLK, "gout_busif_tmu_pclk",
+ "mout_peri_bus_user",
+ CLK_CON_GAT_GOUT_PERI_BUSIF_TMU_PCLK, 21, 0, 0),
};
static const struct samsung_cmu_info peri_cmu_info __initconst = {
diff --git a/drivers/clk/samsung/clk-exynosautov9.c b/drivers/clk/samsung/clk-exynosautov9.c
index f04bacacab2c..5971e680e566 100644
--- a/drivers/clk/samsung/clk-exynosautov9.c
+++ b/drivers/clk/samsung/clk-exynosautov9.c
@@ -20,6 +20,7 @@
#define CLKS_NR_TOP (GOUT_CLKCMU_PERIS_BUS + 1)
#define CLKS_NR_BUSMC (CLK_GOUT_BUSMC_SPDMA_PCLK + 1)
#define CLKS_NR_CORE (CLK_GOUT_CORE_CMU_CORE_PCLK + 1)
+#define CLKS_NR_DPUM (CLK_GOUT_DPUM_SYSMMU_D3_CLK + 1)
#define CLKS_NR_FSYS0 (CLK_GOUT_FSYS0_PCIE_GEN3B_4L_CLK + 1)
#define CLKS_NR_FSYS1 (CLK_GOUT_FSYS1_USB30_1_ACLK + 1)
#define CLKS_NR_FSYS2 (CLK_GOUT_FSYS2_UFS_EMBD1_UNIPRO + 1)
@@ -1076,6 +1077,85 @@ static const struct samsung_cmu_info core_cmu_info __initconst = {
.clk_name = "dout_clkcmu_core_bus",
};
+/* ---- CMU_DPUM ---------------------------------------------------------- */
+
+/* Register Offset definitions for CMU_DPUM (0x18c00000) */
+#define PLL_CON0_MUX_CLKCMU_DPUM_BUS_USER 0x0600
+#define CLK_CON_DIV_DIV_CLK_DPUM_BUSP 0x1800
+#define CLK_CON_GAT_GOUT_BLK_DPUM_UID_DPUM_IPCLKPORT_ACLK_DECON 0x202c
+#define CLK_CON_GAT_GOUT_BLK_DPUM_UID_DPUM_IPCLKPORT_ACLK_DMA 0x2030
+#define CLK_CON_GAT_GOUT_BLK_DPUM_UID_DPUM_IPCLKPORT_ACLK_DPP 0x2034
+#define CLK_CON_GAT_GOUT_BLK_DPUM_UID_SYSMMU_D0_DPUM_IPCLKPORT_CLK_S1 0x207c
+#define CLK_CON_GAT_GOUT_BLK_DPUM_UID_SYSMMU_D1_DPUM_IPCLKPORT_CLK_S1 0x2084
+#define CLK_CON_GAT_GOUT_BLK_DPUM_UID_SYSMMU_D2_DPUM_IPCLKPORT_CLK_S1 0x208c
+#define CLK_CON_GAT_GOUT_BLK_DPUM_UID_SYSMMU_D3_DPUM_IPCLKPORT_CLK_S1 0x2094
+
+static const unsigned long dpum_clk_regs[] __initconst = {
+ PLL_CON0_MUX_CLKCMU_DPUM_BUS_USER,
+ CLK_CON_DIV_DIV_CLK_DPUM_BUSP,
+ CLK_CON_GAT_GOUT_BLK_DPUM_UID_DPUM_IPCLKPORT_ACLK_DECON,
+ CLK_CON_GAT_GOUT_BLK_DPUM_UID_DPUM_IPCLKPORT_ACLK_DMA,
+ CLK_CON_GAT_GOUT_BLK_DPUM_UID_DPUM_IPCLKPORT_ACLK_DPP,
+ CLK_CON_GAT_GOUT_BLK_DPUM_UID_SYSMMU_D0_DPUM_IPCLKPORT_CLK_S1,
+ CLK_CON_GAT_GOUT_BLK_DPUM_UID_SYSMMU_D1_DPUM_IPCLKPORT_CLK_S1,
+ CLK_CON_GAT_GOUT_BLK_DPUM_UID_SYSMMU_D2_DPUM_IPCLKPORT_CLK_S1,
+ CLK_CON_GAT_GOUT_BLK_DPUM_UID_SYSMMU_D3_DPUM_IPCLKPORT_CLK_S1,
+};
+
+PNAME(mout_dpum_bus_user_p) = { "oscclk", "dout_clkcmu_dpum_bus" };
+
+static const struct samsung_mux_clock dpum_mux_clks[] __initconst = {
+ MUX(CLK_MOUT_DPUM_BUS_USER, "mout_dpum_bus_user",
+ mout_dpum_bus_user_p, PLL_CON0_MUX_CLKCMU_DPUM_BUS_USER, 4, 1),
+};
+
+static const struct samsung_div_clock dpum_div_clks[] __initconst = {
+ DIV(CLK_DOUT_DPUM_BUSP, "dout_dpum_busp", "mout_dpum_bus_user",
+ CLK_CON_DIV_DIV_CLK_DPUM_BUSP, 0, 3),
+};
+
+static const struct samsung_gate_clock dpum_gate_clks[] __initconst = {
+ GATE(CLK_GOUT_DPUM_ACLK_DECON, "gout_dpum_decon_aclk",
+ "mout_dpum_bus_user",
+ CLK_CON_GAT_GOUT_BLK_DPUM_UID_DPUM_IPCLKPORT_ACLK_DECON, 21,
+ 0, 0),
+ GATE(CLK_GOUT_DPUM_ACLK_DMA, "gout_dpum_dma_aclk", "mout_dpum_bus_user",
+ CLK_CON_GAT_GOUT_BLK_DPUM_UID_DPUM_IPCLKPORT_ACLK_DMA, 21,
+ 0, 0),
+ GATE(CLK_GOUT_DPUM_ACLK_DPP, "gout_dpum_dpp_aclk", "mout_dpum_bus_user",
+ CLK_CON_GAT_GOUT_BLK_DPUM_UID_DPUM_IPCLKPORT_ACLK_DPP, 21,
+ 0, 0),
+ GATE(CLK_GOUT_DPUM_SYSMMU_D0_CLK, "gout_dpum_sysmmu_d0_clk",
+ "mout_dpum_bus_user",
+ CLK_CON_GAT_GOUT_BLK_DPUM_UID_SYSMMU_D0_DPUM_IPCLKPORT_CLK_S1, 21,
+ 0, 0),
+ GATE(CLK_GOUT_DPUM_SYSMMU_D1_CLK, "gout_dpum_sysmmu_d1_clk",
+ "mout_dpum_bus_user",
+ CLK_CON_GAT_GOUT_BLK_DPUM_UID_SYSMMU_D1_DPUM_IPCLKPORT_CLK_S1, 21,
+ 0, 0),
+ GATE(CLK_GOUT_DPUM_SYSMMU_D2_CLK, "gout_dpum_sysmmu_d2_clk",
+ "mout_dpum_bus_user",
+ CLK_CON_GAT_GOUT_BLK_DPUM_UID_SYSMMU_D2_DPUM_IPCLKPORT_CLK_S1, 21,
+ 0, 0),
+ GATE(CLK_GOUT_DPUM_SYSMMU_D3_CLK, "gout_dpum_sysmmu_d3_clk",
+ "mout_dpum_bus_user",
+ CLK_CON_GAT_GOUT_BLK_DPUM_UID_SYSMMU_D3_DPUM_IPCLKPORT_CLK_S1, 21,
+ 0, 0),
+};
+
+static const struct samsung_cmu_info dpum_cmu_info __initconst = {
+ .mux_clks = dpum_mux_clks,
+ .nr_mux_clks = ARRAY_SIZE(dpum_mux_clks),
+ .div_clks = dpum_div_clks,
+ .nr_div_clks = ARRAY_SIZE(dpum_div_clks),
+ .gate_clks = dpum_gate_clks,
+ .nr_gate_clks = ARRAY_SIZE(dpum_gate_clks),
+ .nr_clk_ids = CLKS_NR_DPUM,
+ .clk_regs = dpum_clk_regs,
+ .nr_clk_regs = ARRAY_SIZE(dpum_clk_regs),
+ .clk_name = "bus",
+};
+
/* ---- CMU_FSYS0 ---------------------------------------------------------- */
/* Register Offset definitions for CMU_FSYS2 (0x17700000) */
@@ -2086,6 +2166,9 @@ static const struct of_device_id exynosautov9_cmu_of_match[] = {
.compatible = "samsung,exynosautov9-cmu-core",
.data = &core_cmu_info,
}, {
+ .compatible = "samsung,exynosautov9-cmu-dpum",
+ .data = &dpum_cmu_info,
+ }, {
.compatible = "samsung,exynosautov9-cmu-fsys0",
.data = &fsys0_cmu_info,
}, {
diff --git a/drivers/clk/samsung/clk-exynosautov920.c b/drivers/clk/samsung/clk-exynosautov920.c
new file mode 100644
index 000000000000..7ba9748c0526
--- /dev/null
+++ b/drivers/clk/samsung/clk-exynosautov920.c
@@ -0,0 +1,1173 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2024 Samsung Electronics Co., Ltd.
+ * Author: Sunyeal Hong <sunyeal.hong@samsung.com>
+ *
+ * Common Clock Framework support for ExynosAuto v920 SoC.
+ */
+
+#include <linux/clk.h>
+#include <linux/clk-provider.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+
+#include <dt-bindings/clock/samsung,exynosautov920.h>
+
+#include "clk.h"
+#include "clk-exynos-arm64.h"
+
+/* NOTE: Must be equal to the last clock ID increased by one */
+#define CLKS_NR_TOP (DOUT_CLKCMU_TAA_NOC + 1)
+#define CLKS_NR_PERIC0 (CLK_DOUT_PERIC0_I3C + 1)
+
+/* ---- CMU_TOP ------------------------------------------------------------ */
+
+/* Register Offset definitions for CMU_TOP (0x11000000) */
+#define PLL_LOCKTIME_PLL_MMC 0x0004
+#define PLL_LOCKTIME_PLL_SHARED0 0x0008
+#define PLL_LOCKTIME_PLL_SHARED1 0x000c
+#define PLL_LOCKTIME_PLL_SHARED2 0x0010
+#define PLL_LOCKTIME_PLL_SHARED3 0x0014
+#define PLL_LOCKTIME_PLL_SHARED4 0x0018
+#define PLL_LOCKTIME_PLL_SHARED5 0x0018
+#define PLL_CON0_PLL_MMC 0x0140
+#define PLL_CON3_PLL_MMC 0x014c
+#define PLL_CON0_PLL_SHARED0 0x0180
+#define PLL_CON3_PLL_SHARED0 0x018c
+#define PLL_CON0_PLL_SHARED1 0x01c0
+#define PLL_CON3_PLL_SHARED1 0x01cc
+#define PLL_CON0_PLL_SHARED2 0x0200
+#define PLL_CON3_PLL_SHARED2 0x020c
+#define PLL_CON0_PLL_SHARED3 0x0240
+#define PLL_CON3_PLL_SHARED3 0x024c
+#define PLL_CON0_PLL_SHARED4 0x0280
+#define PLL_CON3_PLL_SHARED4 0x028c
+#define PLL_CON0_PLL_SHARED5 0x02c0
+#define PLL_CON3_PLL_SHARED5 0x02cc
+
+/* MUX */
+#define CLK_CON_MUX_MUX_CLKCMU_ACC_NOC 0x1000
+#define CLK_CON_MUX_MUX_CLKCMU_APM_NOC 0x1004
+#define CLK_CON_MUX_MUX_CLKCMU_AUD_CPU 0x1008
+#define CLK_CON_MUX_MUX_CLKCMU_AUD_NOC 0x100c
+#define CLK_CON_MUX_MUX_CLKCMU_CIS_MCLK0 0x1010
+#define CLK_CON_MUX_MUX_CLKCMU_CIS_MCLK1 0x1014
+#define CLK_CON_MUX_MUX_CLKCMU_CIS_MCLK2 0x1018
+#define CLK_CON_MUX_MUX_CLKCMU_CIS_MCLK3 0x101c
+#define CLK_CON_MUX_MUX_CLKCMU_CMU_BOOST 0x1020
+#define CLK_CON_MUX_MUX_CLKCMU_CPUCL0_CLUSTER 0x1024
+#define CLK_CON_MUX_MUX_CLKCMU_CPUCL0_DBG 0x1028
+#define CLK_CON_MUX_MUX_CLKCMU_CPUCL0_SWITCH 0x102c
+#define CLK_CON_MUX_MUX_CLKCMU_CPUCL1_CLUSTER 0x1030
+#define CLK_CON_MUX_MUX_CLKCMU_CPUCL1_SWITCH 0x1034
+#define CLK_CON_MUX_MUX_CLKCMU_CPUCL2_CLUSTER 0x1038
+#define CLK_CON_MUX_MUX_CLKCMU_CPUCL2_SWITCH 0x103c
+#define CLK_CON_MUX_MUX_CLKCMU_DNC_NOC 0x1040
+#define CLK_CON_MUX_MUX_CLKCMU_DPTX_DPGTC 0x1044
+#define CLK_CON_MUX_MUX_CLKCMU_DPTX_DPOSC 0x1048
+#define CLK_CON_MUX_MUX_CLKCMU_DPTX_NOC 0x104c
+#define CLK_CON_MUX_MUX_CLKCMU_DPUB_DSIM 0x1050
+#define CLK_CON_MUX_MUX_CLKCMU_DPUB_NOC 0x1054
+#define CLK_CON_MUX_MUX_CLKCMU_DPUF0_NOC 0x1058
+#define CLK_CON_MUX_MUX_CLKCMU_DPUF1_NOC 0x105c
+#define CLK_CON_MUX_MUX_CLKCMU_DPUF2_NOC 0x1060
+#define CLK_CON_MUX_MUX_CLKCMU_DSP_NOC 0x1064
+#define CLK_CON_MUX_MUX_CLKCMU_G3D_NOCP 0x1068
+#define CLK_CON_MUX_MUX_CLKCMU_G3D_SWITCH 0x106c
+#define CLK_CON_MUX_MUX_CLKCMU_GNPU_NOC 0x1070
+#define CLK_CON_MUX_MUX_CLKCMU_HSI0_NOC 0x1074
+#define CLK_CON_MUX_MUX_CLKCMU_ACC_ORB 0x1078
+#define CLK_CON_MUX_MUX_CLKCMU_GNPU_XMAA 0x107c
+#define CLK_CON_MUX_MUX_CLKCMU_HSI1_MMC_CARD 0x1080
+#define CLK_CON_MUX_MUX_CLKCMU_HSI1_NOC 0x1084
+#define CLK_CON_MUX_MUX_CLKCMU_HSI1_USBDRD 0x1088
+#define CLK_CON_MUX_MUX_CLKCMU_HSI2_ETHERNET 0x108c
+#define CLK_CON_MUX_MUX_CLKCMU_HSI2_NOC 0x1090
+#define CLK_CON_MUX_MUX_CLKCMU_HSI2_NOC_UFS 0x1094
+#define CLK_CON_MUX_MUX_CLKCMU_HSI2_UFS_EMBD 0x1098
+#define CLK_CON_MUX_MUX_CLKCMU_ISP_NOC 0x109c
+#define CLK_CON_MUX_MUX_CLKCMU_M2M_JPEG 0x10a0
+#define CLK_CON_MUX_MUX_CLKCMU_M2M_NOC 0x10a4
+#define CLK_CON_MUX_MUX_CLKCMU_MFC_MFC 0x10a8
+#define CLK_CON_MUX_MUX_CLKCMU_MFC_WFD 0x10ac
+#define CLK_CON_MUX_MUX_CLKCMU_MFD_NOC 0x10b0
+#define CLK_CON_MUX_MUX_CLKCMU_MIF_NOCP 0x10b4
+#define CLK_CON_MUX_MUX_CLKCMU_MIF_SWITCH 0x10b8
+#define CLK_CON_MUX_MUX_CLKCMU_MISC_NOC 0x10bc
+#define CLK_CON_MUX_MUX_CLKCMU_NOCL0_NOC 0x10c0
+#define CLK_CON_MUX_MUX_CLKCMU_NOCL1_NOC 0x10c4
+#define CLK_CON_MUX_MUX_CLKCMU_NOCL2_NOC 0x10c8
+#define CLK_CON_MUX_MUX_CLKCMU_PERIC0_IP 0x10cc
+#define CLK_CON_MUX_MUX_CLKCMU_PERIC0_NOC 0x10d0
+#define CLK_CON_MUX_MUX_CLKCMU_PERIC1_IP 0x10d4
+#define CLK_CON_MUX_MUX_CLKCMU_PERIC1_NOC 0x10d8
+#define CLK_CON_MUX_MUX_CLKCMU_SDMA_NOC 0x10dc
+#define CLK_CON_MUX_MUX_CLKCMU_SNW_NOC 0x10e0
+#define CLK_CON_MUX_MUX_CLKCMU_SSP_NOC 0x10e4
+#define CLK_CON_MUX_MUX_CLKCMU_TAA_NOC 0x10e8
+#define CLK_CON_MUX_MUX_CLK_CMU_NOCP 0x10ec
+#define CLK_CON_MUX_MUX_CLK_CMU_PLLCLKOUT 0x10f0
+#define CLK_CON_MUX_MUX_CMU_CMUREF 0x10f4
+
+/* DIV */
+#define CLK_CON_DIV_CLKCMU_ACC_NOC 0x1800
+#define CLK_CON_DIV_CLKCMU_APM_NOC 0x1804
+#define CLK_CON_DIV_CLKCMU_AUD_CPU 0x1808
+#define CLK_CON_DIV_CLKCMU_AUD_NOC 0x180c
+#define CLK_CON_DIV_CLKCMU_CIS_MCLK0 0x1810
+#define CLK_CON_DIV_CLKCMU_CIS_MCLK1 0x1814
+#define CLK_CON_DIV_CLKCMU_CIS_MCLK2 0x1818
+#define CLK_CON_DIV_CLKCMU_CIS_MCLK3 0x181c
+#define CLK_CON_DIV_CLKCMU_CPUCL0_CLUSTER 0x1820
+#define CLK_CON_DIV_CLKCMU_CPUCL0_DBG 0x1824
+#define CLK_CON_DIV_CLKCMU_CPUCL0_SWITCH 0x1828
+#define CLK_CON_DIV_CLKCMU_CPUCL1_CLUSTER 0x182c
+#define CLK_CON_DIV_CLKCMU_CPUCL1_SWITCH 0x1830
+#define CLK_CON_DIV_CLKCMU_CPUCL2_CLUSTER 0x1834
+#define CLK_CON_DIV_CLKCMU_CPUCL2_SWITCH 0x1838
+#define CLK_CON_DIV_CLKCMU_DNC_NOC 0x183c
+#define CLK_CON_DIV_CLKCMU_DPTX_DPGTC 0x1840
+#define CLK_CON_DIV_CLKCMU_DPTX_DPOSC 0x1844
+#define CLK_CON_DIV_CLKCMU_DPTX_NOC 0x1848
+#define CLK_CON_DIV_CLKCMU_DPUB_DSIM 0x184c
+#define CLK_CON_DIV_CLKCMU_DPUB_NOC 0x1850
+#define CLK_CON_DIV_CLKCMU_DPUF0_NOC 0x1854
+#define CLK_CON_DIV_CLKCMU_DPUF1_NOC 0x1858
+#define CLK_CON_DIV_CLKCMU_DPUF2_NOC 0x185c
+#define CLK_CON_DIV_CLKCMU_DSP_NOC 0x1860
+#define CLK_CON_DIV_CLKCMU_G3D_NOCP 0x1864
+#define CLK_CON_DIV_CLKCMU_G3D_SWITCH 0x1868
+#define CLK_CON_DIV_CLKCMU_GNPU_NOC 0x186c
+#define CLK_CON_DIV_CLKCMU_HSI0_NOC 0x1870
+#define CLK_CON_DIV_CLKCMU_ACC_ORB 0x1874
+#define CLK_CON_DIV_CLKCMU_GNPU_XMAA 0x1878
+#define CLK_CON_DIV_CLKCMU_HSI1_MMC_CARD 0x187c
+#define CLK_CON_DIV_CLKCMU_HSI1_NOC 0x1880
+#define CLK_CON_DIV_CLKCMU_HSI1_USBDRD 0x1884
+#define CLK_CON_DIV_CLKCMU_HSI2_ETHERNET 0x1888
+#define CLK_CON_DIV_CLKCMU_HSI2_NOC 0x188c
+#define CLK_CON_DIV_CLKCMU_HSI2_NOC_UFS 0x1890
+#define CLK_CON_DIV_CLKCMU_HSI2_UFS_EMBD 0x1894
+#define CLK_CON_DIV_CLKCMU_ISP_NOC 0x1898
+#define CLK_CON_DIV_CLKCMU_M2M_JPEG 0x189c
+#define CLK_CON_DIV_CLKCMU_M2M_NOC 0x18a0
+#define CLK_CON_DIV_CLKCMU_MFC_MFC 0x18a4
+#define CLK_CON_DIV_CLKCMU_MFC_WFD 0x18a8
+#define CLK_CON_DIV_CLKCMU_MFD_NOC 0x18ac
+#define CLK_CON_DIV_CLKCMU_MIF_NOCP 0x18b0
+#define CLK_CON_DIV_CLKCMU_MISC_NOC 0x18b4
+#define CLK_CON_DIV_CLKCMU_NOCL0_NOC 0x18b8
+#define CLK_CON_DIV_CLKCMU_NOCL1_NOC 0x18bc
+#define CLK_CON_DIV_CLKCMU_NOCL2_NOC 0x18c0
+#define CLK_CON_DIV_CLKCMU_PERIC0_IP 0x18c4
+#define CLK_CON_DIV_CLKCMU_PERIC0_NOC 0x18c8
+#define CLK_CON_DIV_CLKCMU_PERIC1_IP 0x18cc
+#define CLK_CON_DIV_CLKCMU_PERIC1_NOC 0x18d0
+#define CLK_CON_DIV_CLKCMU_SDMA_NOC 0x18d4
+#define CLK_CON_DIV_CLKCMU_SNW_NOC 0x18d8
+#define CLK_CON_DIV_CLKCMU_SSP_NOC 0x18dc
+#define CLK_CON_DIV_CLKCMU_TAA_NOC 0x18e0
+#define CLK_CON_DIV_CLK_ADD_CH_CLK 0x18e4
+#define CLK_CON_DIV_CLK_CMU_PLLCLKOUT 0x18e8
+#define CLK_CON_DIV_DIV_CLKCMU_CMU_BOOST 0x18ec
+#define CLK_CON_DIV_DIV_CLK_CMU_NOCP 0x18f0
+
+static const unsigned long top_clk_regs[] __initconst = {
+ PLL_LOCKTIME_PLL_MMC,
+ PLL_LOCKTIME_PLL_SHARED0,
+ PLL_LOCKTIME_PLL_SHARED1,
+ PLL_LOCKTIME_PLL_SHARED2,
+ PLL_LOCKTIME_PLL_SHARED3,
+ PLL_LOCKTIME_PLL_SHARED4,
+ PLL_LOCKTIME_PLL_SHARED5,
+ PLL_CON0_PLL_MMC,
+ PLL_CON3_PLL_MMC,
+ PLL_CON0_PLL_SHARED0,
+ PLL_CON3_PLL_SHARED0,
+ PLL_CON0_PLL_SHARED1,
+ PLL_CON3_PLL_SHARED1,
+ PLL_CON0_PLL_SHARED2,
+ PLL_CON3_PLL_SHARED2,
+ PLL_CON0_PLL_SHARED3,
+ PLL_CON3_PLL_SHARED3,
+ PLL_CON0_PLL_SHARED4,
+ PLL_CON3_PLL_SHARED4,
+ PLL_CON0_PLL_SHARED5,
+ PLL_CON3_PLL_SHARED5,
+ CLK_CON_MUX_MUX_CLKCMU_ACC_NOC,
+ CLK_CON_MUX_MUX_CLKCMU_APM_NOC,
+ CLK_CON_MUX_MUX_CLKCMU_AUD_CPU,
+ CLK_CON_MUX_MUX_CLKCMU_AUD_NOC,
+ CLK_CON_MUX_MUX_CLKCMU_CIS_MCLK0,
+ CLK_CON_MUX_MUX_CLKCMU_CIS_MCLK1,
+ CLK_CON_MUX_MUX_CLKCMU_CIS_MCLK2,
+ CLK_CON_MUX_MUX_CLKCMU_CIS_MCLK3,
+ CLK_CON_MUX_MUX_CLKCMU_CMU_BOOST,
+ CLK_CON_MUX_MUX_CLKCMU_CPUCL0_CLUSTER,
+ CLK_CON_MUX_MUX_CLKCMU_CPUCL0_DBG,
+ CLK_CON_MUX_MUX_CLKCMU_CPUCL0_SWITCH,
+ CLK_CON_MUX_MUX_CLKCMU_CPUCL1_CLUSTER,
+ CLK_CON_MUX_MUX_CLKCMU_CPUCL1_SWITCH,
+ CLK_CON_MUX_MUX_CLKCMU_CPUCL2_CLUSTER,
+ CLK_CON_MUX_MUX_CLKCMU_CPUCL2_SWITCH,
+ CLK_CON_MUX_MUX_CLKCMU_DNC_NOC,
+ CLK_CON_MUX_MUX_CLKCMU_DPTX_DPGTC,
+ CLK_CON_MUX_MUX_CLKCMU_DPTX_DPOSC,
+ CLK_CON_MUX_MUX_CLKCMU_DPTX_NOC,
+ CLK_CON_MUX_MUX_CLKCMU_DPUB_DSIM,
+ CLK_CON_MUX_MUX_CLKCMU_DPUB_NOC,
+ CLK_CON_MUX_MUX_CLKCMU_DPUF0_NOC,
+ CLK_CON_MUX_MUX_CLKCMU_DPUF1_NOC,
+ CLK_CON_MUX_MUX_CLKCMU_DPUF2_NOC,
+ CLK_CON_MUX_MUX_CLKCMU_DSP_NOC,
+ CLK_CON_MUX_MUX_CLKCMU_G3D_NOCP,
+ CLK_CON_MUX_MUX_CLKCMU_G3D_SWITCH,
+ CLK_CON_MUX_MUX_CLKCMU_GNPU_NOC,
+ CLK_CON_MUX_MUX_CLKCMU_HSI0_NOC,
+ CLK_CON_MUX_MUX_CLKCMU_ACC_ORB,
+ CLK_CON_MUX_MUX_CLKCMU_GNPU_XMAA,
+ CLK_CON_MUX_MUX_CLKCMU_HSI1_MMC_CARD,
+ CLK_CON_MUX_MUX_CLKCMU_HSI1_NOC,
+ CLK_CON_MUX_MUX_CLKCMU_HSI1_USBDRD,
+ CLK_CON_MUX_MUX_CLKCMU_HSI2_ETHERNET,
+ CLK_CON_MUX_MUX_CLKCMU_HSI2_NOC,
+ CLK_CON_MUX_MUX_CLKCMU_HSI2_NOC_UFS,
+ CLK_CON_MUX_MUX_CLKCMU_HSI2_UFS_EMBD,
+ CLK_CON_MUX_MUX_CLKCMU_ISP_NOC,
+ CLK_CON_MUX_MUX_CLKCMU_M2M_JPEG,
+ CLK_CON_MUX_MUX_CLKCMU_M2M_NOC,
+ CLK_CON_MUX_MUX_CLKCMU_MFC_MFC,
+ CLK_CON_MUX_MUX_CLKCMU_MFC_WFD,
+ CLK_CON_MUX_MUX_CLKCMU_MFD_NOC,
+ CLK_CON_MUX_MUX_CLKCMU_MIF_NOCP,
+ CLK_CON_MUX_MUX_CLKCMU_MIF_SWITCH,
+ CLK_CON_MUX_MUX_CLKCMU_MISC_NOC,
+ CLK_CON_MUX_MUX_CLKCMU_NOCL0_NOC,
+ CLK_CON_MUX_MUX_CLKCMU_NOCL1_NOC,
+ CLK_CON_MUX_MUX_CLKCMU_NOCL2_NOC,
+ CLK_CON_MUX_MUX_CLKCMU_PERIC0_IP,
+ CLK_CON_MUX_MUX_CLKCMU_PERIC0_NOC,
+ CLK_CON_MUX_MUX_CLKCMU_PERIC1_IP,
+ CLK_CON_MUX_MUX_CLKCMU_PERIC1_NOC,
+ CLK_CON_MUX_MUX_CLKCMU_SDMA_NOC,
+ CLK_CON_MUX_MUX_CLKCMU_SNW_NOC,
+ CLK_CON_MUX_MUX_CLKCMU_SSP_NOC,
+ CLK_CON_MUX_MUX_CLKCMU_TAA_NOC,
+ CLK_CON_MUX_MUX_CLK_CMU_NOCP,
+ CLK_CON_MUX_MUX_CLK_CMU_PLLCLKOUT,
+ CLK_CON_MUX_MUX_CMU_CMUREF,
+ CLK_CON_DIV_CLKCMU_ACC_NOC,
+ CLK_CON_DIV_CLKCMU_APM_NOC,
+ CLK_CON_DIV_CLKCMU_AUD_CPU,
+ CLK_CON_DIV_CLKCMU_AUD_NOC,
+ CLK_CON_DIV_CLKCMU_CIS_MCLK0,
+ CLK_CON_DIV_CLKCMU_CIS_MCLK1,
+ CLK_CON_DIV_CLKCMU_CIS_MCLK2,
+ CLK_CON_DIV_CLKCMU_CIS_MCLK3,
+ CLK_CON_DIV_CLKCMU_CPUCL0_CLUSTER,
+ CLK_CON_DIV_CLKCMU_CPUCL0_DBG,
+ CLK_CON_DIV_CLKCMU_CPUCL0_SWITCH,
+ CLK_CON_DIV_CLKCMU_CPUCL1_CLUSTER,
+ CLK_CON_DIV_CLKCMU_CPUCL1_SWITCH,
+ CLK_CON_DIV_CLKCMU_CPUCL2_CLUSTER,
+ CLK_CON_DIV_CLKCMU_CPUCL2_SWITCH,
+ CLK_CON_DIV_CLKCMU_DNC_NOC,
+ CLK_CON_DIV_CLKCMU_DPTX_DPGTC,
+ CLK_CON_DIV_CLKCMU_DPTX_DPOSC,
+ CLK_CON_DIV_CLKCMU_DPTX_NOC,
+ CLK_CON_DIV_CLKCMU_DPUB_DSIM,
+ CLK_CON_DIV_CLKCMU_DPUB_NOC,
+ CLK_CON_DIV_CLKCMU_DPUF0_NOC,
+ CLK_CON_DIV_CLKCMU_DPUF1_NOC,
+ CLK_CON_DIV_CLKCMU_DPUF2_NOC,
+ CLK_CON_DIV_CLKCMU_DSP_NOC,
+ CLK_CON_DIV_CLKCMU_G3D_NOCP,
+ CLK_CON_DIV_CLKCMU_G3D_SWITCH,
+ CLK_CON_DIV_CLKCMU_GNPU_NOC,
+ CLK_CON_DIV_CLKCMU_HSI0_NOC,
+ CLK_CON_DIV_CLKCMU_ACC_ORB,
+ CLK_CON_DIV_CLKCMU_GNPU_XMAA,
+ CLK_CON_DIV_CLKCMU_HSI1_MMC_CARD,
+ CLK_CON_DIV_CLKCMU_HSI1_NOC,
+ CLK_CON_DIV_CLKCMU_HSI1_USBDRD,
+ CLK_CON_DIV_CLKCMU_HSI2_ETHERNET,
+ CLK_CON_DIV_CLKCMU_HSI2_NOC,
+ CLK_CON_DIV_CLKCMU_HSI2_NOC_UFS,
+ CLK_CON_DIV_CLKCMU_HSI2_UFS_EMBD,
+ CLK_CON_DIV_CLKCMU_ISP_NOC,
+ CLK_CON_DIV_CLKCMU_M2M_JPEG,
+ CLK_CON_DIV_CLKCMU_M2M_NOC,
+ CLK_CON_DIV_CLKCMU_MFC_MFC,
+ CLK_CON_DIV_CLKCMU_MFC_WFD,
+ CLK_CON_DIV_CLKCMU_MFD_NOC,
+ CLK_CON_DIV_CLKCMU_MIF_NOCP,
+ CLK_CON_DIV_CLKCMU_MISC_NOC,
+ CLK_CON_DIV_CLKCMU_NOCL0_NOC,
+ CLK_CON_DIV_CLKCMU_NOCL1_NOC,
+ CLK_CON_DIV_CLKCMU_NOCL2_NOC,
+ CLK_CON_DIV_CLKCMU_PERIC0_IP,
+ CLK_CON_DIV_CLKCMU_PERIC0_NOC,
+ CLK_CON_DIV_CLKCMU_PERIC1_IP,
+ CLK_CON_DIV_CLKCMU_PERIC1_NOC,
+ CLK_CON_DIV_CLKCMU_SDMA_NOC,
+ CLK_CON_DIV_CLKCMU_SNW_NOC,
+ CLK_CON_DIV_CLKCMU_SSP_NOC,
+ CLK_CON_DIV_CLKCMU_TAA_NOC,
+ CLK_CON_DIV_CLK_ADD_CH_CLK,
+ CLK_CON_DIV_CLK_CMU_PLLCLKOUT,
+ CLK_CON_DIV_DIV_CLKCMU_CMU_BOOST,
+ CLK_CON_DIV_DIV_CLK_CMU_NOCP,
+};
+
+static const struct samsung_pll_clock top_pll_clks[] __initconst = {
+ /* CMU_TOP_PURECLKCOMP */
+ PLL(pll_531x, FOUT_SHARED0_PLL, "fout_shared0_pll", "oscclk",
+ PLL_LOCKTIME_PLL_SHARED0, PLL_CON3_PLL_SHARED0, NULL),
+ PLL(pll_531x, FOUT_SHARED1_PLL, "fout_shared1_pll", "oscclk",
+ PLL_LOCKTIME_PLL_SHARED1, PLL_CON3_PLL_SHARED1, NULL),
+ PLL(pll_531x, FOUT_SHARED2_PLL, "fout_shared2_pll", "oscclk",
+ PLL_LOCKTIME_PLL_SHARED2, PLL_CON3_PLL_SHARED2, NULL),
+ PLL(pll_531x, FOUT_SHARED3_PLL, "fout_shared3_pll", "oscclk",
+ PLL_LOCKTIME_PLL_SHARED3, PLL_CON3_PLL_SHARED3, NULL),
+ PLL(pll_531x, FOUT_SHARED4_PLL, "fout_shared4_pll", "oscclk",
+ PLL_LOCKTIME_PLL_SHARED4, PLL_CON3_PLL_SHARED4, NULL),
+ PLL(pll_531x, FOUT_SHARED5_PLL, "fout_shared5_pll", "oscclk",
+ PLL_LOCKTIME_PLL_SHARED5, PLL_CON3_PLL_SHARED5, NULL),
+ PLL(pll_531x, FOUT_MMC_PLL, "fout_mmc_pll", "oscclk",
+ PLL_LOCKTIME_PLL_MMC, PLL_CON3_PLL_MMC, NULL),
+};
+
+/* List of parent clocks for Muxes in CMU_TOP */
+PNAME(mout_shared0_pll_p) = { "oscclk", "fout_shared0_pll" };
+PNAME(mout_shared1_pll_p) = { "oscclk", "fout_shared1_pll" };
+PNAME(mout_shared2_pll_p) = { "oscclk", "fout_shared2_pll" };
+PNAME(mout_shared3_pll_p) = { "oscclk", "fout_shared3_pll" };
+PNAME(mout_shared4_pll_p) = { "oscclk", "fout_shared4_pll" };
+PNAME(mout_shared5_pll_p) = { "oscclk", "fout_shared5_pll" };
+PNAME(mout_mmc_pll_p) = { "oscclk", "fout_mmc_pll" };
+
+PNAME(mout_clkcmu_cmu_boost_p) = { "dout_shared2_div3", "dout_shared1_div4",
+ "dout_shared2_div4", "dout_shared4_div4" };
+
+PNAME(mout_clkcmu_cmu_cmuref_p) = { "oscclk", "dout_cmu_boost" };
+
+PNAME(mout_clkcmu_acc_noc_p) = { "dout_shared2_div2", "dout_shared0_div3",
+ "dout_shared4_div2", "dout_shared1_div3",
+ "dout_shared2_div3", "dout_shared5_div1",
+ "dout_shared3_div1", "oscclk" };
+
+PNAME(mout_clkcmu_acc_orb_p) = { "dout_shared2_div2", "dout_shared0_div3",
+ "dout_shared1_div2", "dout_shared1_div3",
+ "dout_shared2_div3", "fout_shared5_pll",
+ "fout_shared3_pll", "oscclk" };
+
+PNAME(mout_clkcmu_apm_noc_p) = { "dout_shared2_div2", "dout_shared1_div4",
+ "dout_shared2_div4", "dout_shared4_div4" };
+
+PNAME(mout_clkcmu_aud_cpu_p) = { "dout_shared0_div2", "dout_shared1_div2",
+ "dout_shared2_div2", "dout_shared0_div3",
+ "dout_shared4_div2", "dout_shared1_div3",
+ "dout_shared2_div3", "dout_shared4_div3" };
+
+PNAME(mout_clkcmu_aud_noc_p) = { "dout_shared2_div2", "dout_shared4_div2",
+ "dout_shared1_div2", "dout_shared2_div3" };
+
+PNAME(mout_clkcmu_cpucl0_switch_p) = { "dout_shared0_div2", "dout_shared1_div2",
+ "dout_shared2_div2", "dout_shared4_div2" };
+
+PNAME(mout_clkcmu_cpucl0_cluster_p) = { "fout_shared2_pll", "fout_shared4_pll",
+ "dout_shared0_div2", "dout_shared1_div2",
+ "dout_shared2_div2", "dout_shared4_div2",
+ "dout_shared2_div3", "fout_shared3_pll" };
+
+PNAME(mout_clkcmu_cpucl0_dbg_p) = { "dout_shared2_div2", "dout_shared0_div3",
+ "dout_shared4_div2", "dout_shared0_div4" };
+
+PNAME(mout_clkcmu_cpucl1_switch_p) = { "dout_shared0_div2", "dout_shared1_div2",
+ "dout_shared2_div2", "dout_shared4_div2" };
+
+PNAME(mout_clkcmu_cpucl1_cluster_p) = { "fout_shared2_pll", "fout_shared4_pll",
+ "dout_shared0_div2", "dout_shared1_div2",
+ "dout_shared2_div2", "dout_shared4_div2",
+ "dout_shared2_div3", "fout_shared3_pll" };
+
+PNAME(mout_clkcmu_cpucl2_switch_p) = { "dout_shared0_div2", "dout_shared1_div2",
+ "dout_shared2_div2", "dout_shared4_div2" };
+
+PNAME(mout_clkcmu_cpucl2_cluster_p) = { "fout_shared2_pll", "fout_shared4_pll",
+ "dout_shared0_div2", "dout_shared1_div2",
+ "dout_shared2_div2", "dout_shared4_div2",
+ "dout_shared2_div3", "fout_shared3_pll" };
+
+PNAME(mout_clkcmu_dnc_noc_p) = { "dout_shared1_div2", "dout_shared2_div2",
+ "dout_shared0_div3", "dout_shared4_div2",
+ "dout_shared1_div3", "dout_shared2_div3",
+ "dout_shared1_div4", "fout_shared3_pll" };
+
+PNAME(mout_clkcmu_dptx_noc_p) = { "dout_shared4_div2", "dout_shared2_div3",
+ "dout_shared1_div4", "dout_shared2_div4" };
+
+PNAME(mout_clkcmu_dptx_dpgtc_p) = { "oscclk", "dout_shared2_div3",
+ "dout_shared2_div4", "dout_shared4_div4" };
+
+PNAME(mout_clkcmu_dptx_dposc_p) = { "oscclk", "dout_shared2_div4" };
+
+PNAME(mout_clkcmu_dpub_noc_p) = { "dout_shared4_div2", "dout_shared1_div3",
+ "dout_shared2_div3", "dout_shared1_div4",
+ "dout_shared2_div4", "dout_shared4_div4",
+ "fout_shared3_pll" };
+
+PNAME(mout_clkcmu_dpub_dsim_p) = { "dout_shared2_div3", "dout_shared2_div4" };
+
+PNAME(mout_clkcmu_dpuf_noc_p) = { "dout_shared4_div2", "dout_shared1_div3",
+ "dout_shared2_div3", "dout_shared1_div4",
+ "dout_shared2_div4", "dout_shared4_div4",
+ "fout_shared3_pll" };
+
+PNAME(mout_clkcmu_dsp_noc_p) = { "dout_shared0_div2", "dout_shared1_div2",
+ "dout_shared2_div2", "dout_shared0_div3",
+ "dout_shared4_div2", "dout_shared1_div3",
+ "fout_shared5_pll", "fout_shared3_pll" };
+
+PNAME(mout_clkcmu_g3d_switch_p) = { "dout_shared0_div2", "dout_shared1_div2",
+ "dout_shared2_div2", "dout_shared4_div2" };
+
+PNAME(mout_clkcmu_g3d_nocp_p) = { "dout_shared2_div3", "dout_shared1_div4",
+ "dout_shared2_div4", "dout_shared4_div4" };
+
+PNAME(mout_clkcmu_gnpu_noc_p) = { "dout_shared0_div2", "dout_shared1_div2",
+ "dout_shared2_div2", "dout_shared0_div3",
+ "dout_shared4_div2", "dout_shared2_div3",
+ "fout_shared5_pll", "fout_shared3_pll" };
+
+PNAME(mout_clkcmu_hsi0_noc_p) = { "dout_shared4_div2", "dout_shared2_div3",
+ "dout_shared1_div4", "dout_shared2_div4" };
+
+PNAME(mout_clkcmu_hsi1_noc_p) = { "dout_shared2_div3", "dout_shared1_div4",
+ "dout_shared2_div4", "dout_shared4_div4" };
+
+PNAME(mout_clkcmu_hsi1_usbdrd_p) = { "oscclk", "dout_shared2_div3",
+ "dout_shared2_div4", "dout_shared4_div4" };
+
+PNAME(mout_clkcmu_hsi1_mmc_card_p) = { "oscclk", "dout_shared2_div2",
+ "dout_shared4_div2", "fout_mmc_pll" };
+
+PNAME(mout_clkcmu_hsi2_noc_p) = { "dout_shared4_div2", "dout_shared2_div3",
+ "dout_shared1_div4", "dout_shared2_div4" };
+
+PNAME(mout_clkcmu_hsi2_noc_ufs_p) = { "dout_shared4_div2", "dout_shared2_div3",
+ "dout_shared1_div4", "dout_shared2_div2" };
+
+PNAME(mout_clkcmu_hsi2_ufs_embd_p) = { "oscclk", "dout_shared2_div3",
+ "dout_shared2_div4", "dout_shared4_div4" };
+
+PNAME(mout_clkcmu_hsi2_ethernet_p) = { "oscclk", "dout_shared2_div2",
+ "dout_shared0_div3", "dout_shared1_div3" };
+
+PNAME(mout_clkcmu_isp_noc_p) = { "dout_shared2_div2", "dout_shared0_div3",
+ "dout_shared4_div2", "dout_shared1_div3",
+ "dout_shared2_div3", "fout_shared5_pll",
+ "fout_shared3_pll", "oscclk" };
+
+PNAME(mout_clkcmu_m2m_noc_p) = { "dout_shared0_div3", "dout_shared4_div2",
+ "dout_shared2_div3", "dout_shared1_div4" };
+
+PNAME(mout_clkcmu_m2m_jpeg_p) = { "dout_shared0_div3", "dout_shared4_div2",
+ "dout_shared2_div3", "dout_shared1_div4" };
+
+PNAME(mout_clkcmu_mfc_mfc_p) = { "dout_shared0_div3", "dout_shared4_div2",
+ "dout_shared2_div3", "dout_shared1_div4" };
+
+PNAME(mout_clkcmu_mfc_wfd_p) = { "dout_shared0_div3", "dout_shared4_div2",
+ "dout_shared2_div3", "dout_shared1_div4" };
+
+PNAME(mout_clkcmu_mfd_noc_p) = { "dout_shared2_div2", "dout_shared0_div3",
+ "dout_shared4_div2", "dout_shared1_div3",
+ "dout_shared2_div3", "fout_shared5_pll",
+ "fout_shared3_pll", "oscclk" };
+
+PNAME(mout_clkcmu_mif_switch_p) = { "fout_shared0_pll", "fout_shared1_pll",
+ "fout_shared2_pll", "fout_shared4_pll",
+ "dout_shared0_div2", "dout_shared1_div2",
+ "dout_shared2_div2", "fout_shared5_pll" };
+
+PNAME(mout_clkcmu_mif_nocp_p) = { "dout_shared2_div3", "dout_shared1_div4",
+ "dout_shared2_div4", "dout_shared4_div4" };
+
+PNAME(mout_clkcmu_misc_noc_p) = { "dout_shared4_div2", "dout_shared2_div3",
+ "dout_shared1_div4", "dout_shared2_div4" };
+
+PNAME(mout_clkcmu_nocl0_noc_p) = { "dout_shared0_div2", "dout_shared1_div2",
+ "dout_shared2_div2", "dout_shared0_div3",
+ "dout_shared4_div2", "dout_shared1_div3",
+ "dout_shared2_div3", "fout_shared3_pll" };
+
+PNAME(mout_clkcmu_nocl1_noc_p) = { "dout_shared2_div2", "dout_shared0_div3",
+ "dout_shared4_div2", "dout_shared1_div3",
+ "dout_shared2_div3", "fout_shared5_pll",
+ "fout_shared3_pll", "oscclk" };
+
+PNAME(mout_clkcmu_nocl2_noc_p) = { "dout_shared2_div2", "dout_shared0_div3",
+ "dout_shared4_div2", "dout_shared1_div3",
+ "dout_shared2_div3", "fout_shared5_pll",
+ "fout_shared3_pll", "oscclk" };
+
+PNAME(mout_clkcmu_peric0_noc_p) = { "dout_shared2_div3", "dout_shared2_div4" };
+
+PNAME(mout_clkcmu_peric0_ip_p) = { "dout_shared2_div3", "dout_shared2_div4" };
+
+PNAME(mout_clkcmu_peric1_noc_p) = { "dout_shared2_div3", "dout_shared2_div4" };
+
+PNAME(mout_clkcmu_peric1_ip_p) = { "dout_shared2_div3", "dout_shared2_div4" };
+
+PNAME(mout_clkcmu_sdma_noc_p) = { "dout_shared1_div2", "dout_shared2_div2",
+ "dout_shared0_div3", "dout_shared4_div2",
+ "dout_shared1_div3", "dout_shared2_div3",
+ "dout_shared1_div4", "fout_shared3_pll" };
+
+PNAME(mout_clkcmu_snw_noc_p) = { "dout_shared2_div2", "dout_shared0_div3",
+ "dout_shared4_div2", "dout_shared1_div3",
+ "dout_shared2_div3", "fout_shared5_pll",
+ "fout_shared3_pll", "oscclk" };
+
+PNAME(mout_clkcmu_ssp_noc_p) = { "dout_shared2_div3", "dout_shared1_div4",
+ "dout_shared2_div2", "dout_shared4_div4" };
+
+PNAME(mout_clkcmu_taa_noc_p) = { "dout_shared2_div2", "dout_shared0_div3",
+ "dout_shared4_div2", "dout_shared1_div3",
+ "dout_shared2_div3", "fout_shared5_pll",
+ "fout_shared3_pll", "oscclk" };
+
+static const struct samsung_mux_clock top_mux_clks[] __initconst = {
+ /* CMU_TOP_PURECLKCOMP */
+ MUX(MOUT_SHARED0_PLL, "mout_shared0_pll", mout_shared0_pll_p,
+ PLL_CON0_PLL_SHARED0, 4, 1),
+ MUX(MOUT_SHARED1_PLL, "mout_shared1_pll", mout_shared1_pll_p,
+ PLL_CON0_PLL_SHARED1, 4, 1),
+ MUX(MOUT_SHARED2_PLL, "mout_shared2_pll", mout_shared2_pll_p,
+ PLL_CON0_PLL_SHARED2, 4, 1),
+ MUX(MOUT_SHARED3_PLL, "mout_shared3_pll", mout_shared3_pll_p,
+ PLL_CON0_PLL_SHARED3, 4, 1),
+ MUX(MOUT_SHARED4_PLL, "mout_shared4_pll", mout_shared4_pll_p,
+ PLL_CON0_PLL_SHARED4, 4, 1),
+ MUX(MOUT_SHARED5_PLL, "mout_shared5_pll", mout_shared5_pll_p,
+ PLL_CON0_PLL_SHARED5, 4, 1),
+ MUX(MOUT_MMC_PLL, "mout_mmc_pll", mout_mmc_pll_p,
+ PLL_CON0_PLL_MMC, 4, 1),
+
+ /* BOOST */
+ MUX(MOUT_CLKCMU_CMU_BOOST, "mout_clkcmu_cmu_boost",
+ mout_clkcmu_cmu_boost_p, CLK_CON_MUX_MUX_CLKCMU_CMU_BOOST, 0, 2),
+ MUX(MOUT_CLKCMU_CMU_CMUREF, "mout_clkcmu_cmu_cmuref",
+ mout_clkcmu_cmu_cmuref_p, CLK_CON_MUX_MUX_CMU_CMUREF, 0, 1),
+
+ /* ACC */
+ MUX(MOUT_CLKCMU_ACC_NOC, "mout_clkcmu_acc_noc",
+ mout_clkcmu_acc_noc_p, CLK_CON_MUX_MUX_CLKCMU_ACC_NOC, 0, 3),
+ MUX(MOUT_CLKCMU_ACC_ORB, "mout_clkcmu_acc_orb",
+ mout_clkcmu_acc_orb_p, CLK_CON_MUX_MUX_CLKCMU_ACC_ORB, 0, 3),
+
+ /* APM */
+ MUX(MOUT_CLKCMU_APM_NOC, "mout_clkcmu_apm_noc",
+ mout_clkcmu_apm_noc_p, CLK_CON_MUX_MUX_CLKCMU_APM_NOC, 0, 2),
+
+ /* AUD */
+ MUX(MOUT_CLKCMU_AUD_CPU, "mout_clkcmu_aud_cpu",
+ mout_clkcmu_aud_cpu_p, CLK_CON_MUX_MUX_CLKCMU_AUD_CPU, 0, 3),
+ MUX(MOUT_CLKCMU_AUD_NOC, "mout_clkcmu_aud_noc",
+ mout_clkcmu_aud_noc_p, CLK_CON_MUX_MUX_CLKCMU_AUD_NOC, 0, 2),
+
+ /* CPUCL0 */
+ MUX(MOUT_CLKCMU_CPUCL0_SWITCH, "mout_clkcmu_cpucl0_switch",
+ mout_clkcmu_cpucl0_switch_p, CLK_CON_MUX_MUX_CLKCMU_CPUCL0_SWITCH,
+ 0, 2),
+ MUX(MOUT_CLKCMU_CPUCL0_CLUSTER, "mout_clkcmu_cpucl0_cluster",
+ mout_clkcmu_cpucl0_cluster_p, CLK_CON_MUX_MUX_CLKCMU_CPUCL0_CLUSTER,
+ 0, 3),
+ MUX(MOUT_CLKCMU_CPUCL0_DBG, "mout_clkcmu_cpucl0_dbg",
+ mout_clkcmu_cpucl0_dbg_p, CLK_CON_MUX_MUX_CLKCMU_CPUCL0_DBG,
+ 0, 2),
+
+ /* CPUCL1 */
+ MUX(MOUT_CLKCMU_CPUCL1_SWITCH, "mout_clkcmu_cpucl1_switch",
+ mout_clkcmu_cpucl1_switch_p, CLK_CON_MUX_MUX_CLKCMU_CPUCL1_SWITCH,
+ 0, 2),
+ MUX(MOUT_CLKCMU_CPUCL1_CLUSTER, "mout_clkcmu_cpucl1_cluster",
+ mout_clkcmu_cpucl1_cluster_p, CLK_CON_MUX_MUX_CLKCMU_CPUCL1_CLUSTER,
+ 0, 3),
+
+ /* CPUCL2 */
+ MUX(MOUT_CLKCMU_CPUCL2_SWITCH, "mout_clkcmu_cpucl2_switch",
+ mout_clkcmu_cpucl2_switch_p, CLK_CON_MUX_MUX_CLKCMU_CPUCL2_SWITCH,
+ 0, 2),
+ MUX(MOUT_CLKCMU_CPUCL2_CLUSTER, "mout_clkcmu_cpucl2_cluster",
+ mout_clkcmu_cpucl2_cluster_p, CLK_CON_MUX_MUX_CLKCMU_CPUCL2_CLUSTER,
+ 0, 3),
+
+ /* DNC */
+ MUX(MOUT_CLKCMU_DNC_NOC, "mout_clkcmu_dnc_noc",
+ mout_clkcmu_dnc_noc_p, CLK_CON_MUX_MUX_CLKCMU_DNC_NOC, 0, 3),
+
+ /* DPTX */
+ MUX(MOUT_CLKCMU_DPTX_NOC, "mout_clkcmu_dptx_noc",
+ mout_clkcmu_dptx_noc_p, CLK_CON_MUX_MUX_CLKCMU_DPTX_NOC, 0, 2),
+ MUX(MOUT_CLKCMU_DPTX_DPGTC, "mout_clkcmu_dptx_dpgtc",
+ mout_clkcmu_dptx_dpgtc_p, CLK_CON_MUX_MUX_CLKCMU_DPTX_DPGTC, 0, 2),
+ MUX(MOUT_CLKCMU_DPTX_DPOSC, "mout_clkcmu_dptx_dposc",
+ mout_clkcmu_dptx_dposc_p, CLK_CON_MUX_MUX_CLKCMU_DPTX_DPOSC, 0, 1),
+
+ /* DPUB */
+ MUX(MOUT_CLKCMU_DPUB_NOC, "mout_clkcmu_dpub_noc",
+ mout_clkcmu_dpub_noc_p, CLK_CON_MUX_MUX_CLKCMU_DPUB_NOC, 0, 3),
+ MUX(MOUT_CLKCMU_DPUB_DSIM, "mout_clkcmu_dpub_dsim",
+ mout_clkcmu_dpub_dsim_p, CLK_CON_MUX_MUX_CLKCMU_DPUB_DSIM, 0, 1),
+
+ /* DPUF */
+ MUX(MOUT_CLKCMU_DPUF0_NOC, "mout_clkcmu_dpuf0_noc",
+ mout_clkcmu_dpuf_noc_p, CLK_CON_MUX_MUX_CLKCMU_DPUF0_NOC, 0, 3),
+ MUX(MOUT_CLKCMU_DPUF1_NOC, "mout_clkcmu_dpuf1_noc",
+ mout_clkcmu_dpuf_noc_p, CLK_CON_MUX_MUX_CLKCMU_DPUF1_NOC, 0, 3),
+ MUX(MOUT_CLKCMU_DPUF2_NOC, "mout_clkcmu_dpuf2_noc",
+ mout_clkcmu_dpuf_noc_p, CLK_CON_MUX_MUX_CLKCMU_DPUF2_NOC, 0, 3),
+
+ /* DSP */
+ MUX(MOUT_CLKCMU_DSP_NOC, "mout_clkcmu_dsp_noc",
+ mout_clkcmu_dsp_noc_p, CLK_CON_MUX_MUX_CLKCMU_DSP_NOC, 0, 3),
+
+ /* G3D */
+ MUX(MOUT_CLKCMU_G3D_SWITCH, "mout_clkcmu_g3d_switch",
+ mout_clkcmu_g3d_switch_p, CLK_CON_MUX_MUX_CLKCMU_G3D_SWITCH, 0, 2),
+ MUX(MOUT_CLKCMU_G3D_NOCP, "mout_clkcmu_g3d_nocp",
+ mout_clkcmu_g3d_nocp_p, CLK_CON_MUX_MUX_CLKCMU_G3D_NOCP, 0, 2),
+
+ /* GNPU */
+ MUX(MOUT_CLKCMU_GNPU_NOC, "mout_clkcmu_gnpu_noc",
+ mout_clkcmu_gnpu_noc_p, CLK_CON_MUX_MUX_CLKCMU_GNPU_NOC, 0, 3),
+
+ /* HSI0 */
+ MUX(MOUT_CLKCMU_HSI0_NOC, "mout_clkcmu_hsi0_noc",
+ mout_clkcmu_hsi0_noc_p, CLK_CON_MUX_MUX_CLKCMU_HSI0_NOC, 0, 2),
+
+ /* HSI1 */
+ MUX(MOUT_CLKCMU_HSI1_NOC, "mout_clkcmu_hsi1_noc",
+ mout_clkcmu_hsi1_noc_p, CLK_CON_MUX_MUX_CLKCMU_HSI1_NOC,
+ 0, 2),
+ MUX(MOUT_CLKCMU_HSI1_USBDRD, "mout_clkcmu_hsi1_usbdrd",
+ mout_clkcmu_hsi1_usbdrd_p, CLK_CON_MUX_MUX_CLKCMU_HSI1_USBDRD,
+ 0, 2),
+ MUX(MOUT_CLKCMU_HSI1_MMC_CARD, "mout_clkcmu_hsi1_mmc_card",
+ mout_clkcmu_hsi1_mmc_card_p, CLK_CON_MUX_MUX_CLKCMU_HSI1_MMC_CARD,
+ 0, 2),
+
+ /* HSI2 */
+ MUX(MOUT_CLKCMU_HSI2_NOC, "mout_clkcmu_hsi2_noc",
+ mout_clkcmu_hsi2_noc_p, CLK_CON_MUX_MUX_CLKCMU_HSI2_NOC,
+ 0, 2),
+ MUX(MOUT_CLKCMU_HSI2_NOC_UFS, "mout_clkcmu_hsi2_noc_ufs",
+ mout_clkcmu_hsi2_noc_ufs_p, CLK_CON_MUX_MUX_CLKCMU_HSI2_NOC_UFS,
+ 0, 2),
+ MUX(MOUT_CLKCMU_HSI2_UFS_EMBD, "mout_clkcmu_hsi2_ufs_embd",
+ mout_clkcmu_hsi2_ufs_embd_p, CLK_CON_MUX_MUX_CLKCMU_HSI2_UFS_EMBD,
+ 0, 2),
+ MUX(MOUT_CLKCMU_HSI2_ETHERNET, "mout_clkcmu_hsi2_ethernet",
+ mout_clkcmu_hsi2_ethernet_p, CLK_CON_MUX_MUX_CLKCMU_HSI2_ETHERNET,
+ 0, 2),
+
+ /* ISP */
+ MUX(MOUT_CLKCMU_ISP_NOC, "mout_clkcmu_isp_noc",
+ mout_clkcmu_isp_noc_p, CLK_CON_MUX_MUX_CLKCMU_ISP_NOC, 0, 3),
+
+ /* M2M */
+ MUX(MOUT_CLKCMU_M2M_NOC, "mout_clkcmu_m2m_noc",
+ mout_clkcmu_m2m_noc_p, CLK_CON_MUX_MUX_CLKCMU_M2M_NOC, 0, 2),
+ MUX(MOUT_CLKCMU_M2M_JPEG, "mout_clkcmu_m2m_jpeg",
+ mout_clkcmu_m2m_jpeg_p, CLK_CON_MUX_MUX_CLKCMU_M2M_JPEG, 0, 2),
+
+ /* MFC */
+ MUX(MOUT_CLKCMU_MFC_MFC, "mout_clkcmu_mfc_mfc",
+ mout_clkcmu_mfc_mfc_p, CLK_CON_MUX_MUX_CLKCMU_MFC_MFC, 0, 2),
+ MUX(MOUT_CLKCMU_MFC_WFD, "mout_clkcmu_mfc_wfd",
+ mout_clkcmu_mfc_wfd_p, CLK_CON_MUX_MUX_CLKCMU_MFC_WFD, 0, 2),
+
+ /* MFD */
+ MUX(MOUT_CLKCMU_MFD_NOC, "mout_clkcmu_mfd_noc",
+ mout_clkcmu_mfd_noc_p, CLK_CON_MUX_MUX_CLKCMU_MFD_NOC, 0, 3),
+
+ /* MIF */
+ MUX(MOUT_CLKCMU_MIF_SWITCH, "mout_clkcmu_mif_switch",
+ mout_clkcmu_mif_switch_p, CLK_CON_MUX_MUX_CLKCMU_MIF_SWITCH, 0, 3),
+ MUX(MOUT_CLKCMU_MIF_NOCP, "mout_clkcmu_mif_nocp",
+ mout_clkcmu_mif_nocp_p, CLK_CON_MUX_MUX_CLKCMU_MIF_NOCP, 0, 2),
+
+ /* MISC */
+ MUX(MOUT_CLKCMU_MISC_NOC, "mout_clkcmu_misc_noc",
+ mout_clkcmu_misc_noc_p, CLK_CON_MUX_MUX_CLKCMU_MISC_NOC, 0, 2),
+
+ /* NOCL0 */
+ MUX(MOUT_CLKCMU_NOCL0_NOC, "mout_clkcmu_nocl0_noc",
+ mout_clkcmu_nocl0_noc_p, CLK_CON_MUX_MUX_CLKCMU_NOCL0_NOC, 0, 3),
+
+ /* NOCL1 */
+ MUX(MOUT_CLKCMU_NOCL1_NOC, "mout_clkcmu_nocl1_noc",
+ mout_clkcmu_nocl1_noc_p, CLK_CON_MUX_MUX_CLKCMU_NOCL1_NOC, 0, 3),
+
+ /* NOCL2 */
+ MUX(MOUT_CLKCMU_NOCL2_NOC, "mout_clkcmu_nocl2_noc",
+ mout_clkcmu_nocl2_noc_p, CLK_CON_MUX_MUX_CLKCMU_NOCL2_NOC, 0, 3),
+
+ /* PERIC0 */
+ MUX(MOUT_CLKCMU_PERIC0_NOC, "mout_clkcmu_peric0_noc",
+ mout_clkcmu_peric0_noc_p, CLK_CON_MUX_MUX_CLKCMU_PERIC0_NOC, 0, 1),
+ MUX(MOUT_CLKCMU_PERIC0_IP, "mout_clkcmu_peric0_ip",
+ mout_clkcmu_peric0_ip_p, CLK_CON_MUX_MUX_CLKCMU_PERIC0_IP, 0, 1),
+
+ /* PERIC1 */
+ MUX(MOUT_CLKCMU_PERIC1_NOC, "mout_clkcmu_peric1_noc",
+ mout_clkcmu_peric1_noc_p, CLK_CON_MUX_MUX_CLKCMU_PERIC1_NOC, 0, 1),
+ MUX(MOUT_CLKCMU_PERIC1_IP, "mout_clkcmu_peric1_ip",
+ mout_clkcmu_peric1_ip_p, CLK_CON_MUX_MUX_CLKCMU_PERIC1_IP, 0, 1),
+
+ /* SDMA */
+ MUX(MOUT_CLKCMU_SDMA_NOC, "mout_clkcmu_sdma_noc",
+ mout_clkcmu_sdma_noc_p, CLK_CON_MUX_MUX_CLKCMU_SDMA_NOC, 0, 3),
+
+ /* SNW */
+ MUX(MOUT_CLKCMU_SNW_NOC, "mout_clkcmu_snw_noc",
+ mout_clkcmu_snw_noc_p, CLK_CON_MUX_MUX_CLKCMU_SNW_NOC, 0, 3),
+
+ /* SSP */
+ MUX(MOUT_CLKCMU_SSP_NOC, "mout_clkcmu_ssp_noc",
+ mout_clkcmu_ssp_noc_p, CLK_CON_MUX_MUX_CLKCMU_SSP_NOC, 0, 2),
+
+ /* TAA */
+ MUX(MOUT_CLKCMU_TAA_NOC, "mout_clkcmu_taa_noc",
+ mout_clkcmu_taa_noc_p, CLK_CON_MUX_MUX_CLKCMU_TAA_NOC, 0, 3),
+};
+
+static const struct samsung_div_clock top_div_clks[] __initconst = {
+ /* CMU_TOP_PURECLKCOMP */
+
+ /* BOOST */
+ DIV(DOUT_CLKCMU_CMU_BOOST, "dout_clkcmu_cmu_boost",
+ "mout_clkcmu_cmu_boost", CLK_CON_DIV_DIV_CLKCMU_CMU_BOOST, 0, 2),
+
+ /* ACC */
+ DIV(DOUT_CLKCMU_ACC_NOC, "dout_clkcmu_acc_noc",
+ "mout_clkcmu_acc_noc", CLK_CON_DIV_CLKCMU_ACC_NOC, 0, 4),
+ DIV(DOUT_CLKCMU_ACC_ORB, "dout_clkcmu_acc_orb",
+ "mout_clkcmu_acc_orb", CLK_CON_DIV_CLKCMU_ACC_ORB, 0, 4),
+
+ /* APM */
+ DIV(DOUT_CLKCMU_APM_NOC, "dout_clkcmu_apm_noc",
+ "mout_clkcmu_apm_noc", CLK_CON_DIV_CLKCMU_APM_NOC, 0, 3),
+
+ /* AUD */
+ DIV(DOUT_CLKCMU_AUD_CPU, "dout_clkcmu_aud_cpu",
+ "mout_clkcmu_aud_cpu", CLK_CON_DIV_CLKCMU_AUD_CPU, 0, 3),
+ DIV(DOUT_CLKCMU_AUD_NOC, "dout_clkcmu_aud_noc",
+ "mout_clkcmu_aud_noc", CLK_CON_DIV_CLKCMU_AUD_NOC, 0, 4),
+
+ /* CPUCL0 */
+ DIV(DOUT_CLKCMU_CPUCL0_SWITCH, "dout_clkcmu_cpucl0_switch",
+ "mout_clkcmu_cpucl0_switch",
+ CLK_CON_DIV_CLKCMU_CPUCL0_SWITCH, 0, 3),
+ DIV(DOUT_CLKCMU_CPUCL0_CLUSTER, "dout_clkcmu_cpucl0_cluster",
+ "mout_clkcmu_cpucl0_cluster",
+ CLK_CON_DIV_CLKCMU_CPUCL0_CLUSTER, 0, 3),
+ DIV(DOUT_CLKCMU_CPUCL0_DBG, "dout_clkcmu_cpucl0_dbg",
+ "mout_clkcmu_cpucl0_dbg",
+ CLK_CON_DIV_CLKCMU_CPUCL0_DBG, 0, 4),
+
+ /* CPUCL1 */
+ DIV(DOUT_CLKCMU_CPUCL1_SWITCH, "dout_clkcmu_cpucl1_switch",
+ "mout_clkcmu_cpucl1_switch",
+ CLK_CON_DIV_CLKCMU_CPUCL1_SWITCH, 0, 3),
+ DIV(DOUT_CLKCMU_CPUCL1_CLUSTER, "dout_clkcmu_cpucl1_cluster",
+ "mout_clkcmu_cpucl1_cluster",
+ CLK_CON_DIV_CLKCMU_CPUCL1_CLUSTER, 0, 3),
+
+ /* CPUCL2 */
+ DIV(DOUT_CLKCMU_CPUCL2_SWITCH, "dout_clkcmu_cpucl2_switch",
+ "mout_clkcmu_cpucl2_switch",
+ CLK_CON_DIV_CLKCMU_CPUCL2_SWITCH, 0, 3),
+ DIV(DOUT_CLKCMU_CPUCL2_CLUSTER, "dout_clkcmu_cpucl2_cluster",
+ "mout_clkcmu_cpucl2_cluster",
+ CLK_CON_DIV_CLKCMU_CPUCL2_CLUSTER, 0, 3),
+
+ /* DNC */
+ DIV(DOUT_CLKCMU_DNC_NOC, "dout_clkcmu_dnc_noc",
+ "mout_clkcmu_dnc_noc", CLK_CON_DIV_CLKCMU_DNC_NOC, 0, 4),
+
+ /* DPTX */
+ DIV(DOUT_CLKCMU_DPTX_NOC, "dout_clkcmu_dptx_noc",
+ "mout_clkcmu_dptx_noc", CLK_CON_DIV_CLKCMU_DPTX_NOC, 0, 4),
+ DIV(DOUT_CLKCMU_DPTX_DPGTC, "dout_clkcmu_dptx_dpgtc",
+ "mout_clkcmu_dptx_dpgtc", CLK_CON_DIV_CLKCMU_DPTX_DPGTC, 0, 3),
+ DIV(DOUT_CLKCMU_DPTX_DPOSC, "dout_clkcmu_dptx_dposc",
+ "mout_clkcmu_dptx_dposc", CLK_CON_DIV_CLKCMU_DPTX_DPOSC, 0, 5),
+
+ /* DPUB */
+ DIV(DOUT_CLKCMU_DPUB_NOC, "dout_clkcmu_dpub_noc",
+ "mout_clkcmu_dpub_noc", CLK_CON_DIV_CLKCMU_DPUB_NOC, 0, 4),
+ DIV(DOUT_CLKCMU_DPUB_DSIM, "dout_clkcmu_dpub_dsim",
+ "mout_clkcmu_dpub_dsim", CLK_CON_DIV_CLKCMU_DPUB_DSIM, 0, 4),
+
+ /* DPUF */
+ DIV(DOUT_CLKCMU_DPUF0_NOC, "dout_clkcmu_dpuf0_noc",
+ "mout_clkcmu_dpuf0_noc", CLK_CON_DIV_CLKCMU_DPUF0_NOC, 0, 4),
+ DIV(DOUT_CLKCMU_DPUF1_NOC, "dout_clkcmu_dpuf1_noc",
+ "mout_clkcmu_dpuf1_noc", CLK_CON_DIV_CLKCMU_DPUF1_NOC, 0, 4),
+ DIV(DOUT_CLKCMU_DPUF2_NOC, "dout_clkcmu_dpuf2_noc",
+ "mout_clkcmu_dpuf2_noc", CLK_CON_DIV_CLKCMU_DPUF2_NOC, 0, 4),
+
+ /* DSP */
+ DIV(DOUT_CLKCMU_DSP_NOC, "dout_clkcmu_dsp_noc",
+ "mout_clkcmu_dsp_noc", CLK_CON_DIV_CLKCMU_DSP_NOC, 0, 4),
+
+ /* G3D */
+ DIV(DOUT_CLKCMU_G3D_SWITCH, "dout_clkcmu_g3d_switch",
+ "mout_clkcmu_g3d_switch", CLK_CON_DIV_CLKCMU_G3D_SWITCH, 0, 3),
+ DIV(DOUT_CLKCMU_G3D_NOCP, "dout_clkcmu_g3d_nocp",
+ "mout_clkcmu_g3d_nocp", CLK_CON_DIV_CLKCMU_G3D_NOCP, 0, 3),
+
+ /* GNPU */
+ DIV(DOUT_CLKCMU_GNPU_NOC, "dout_clkcmu_gnpu_noc",
+ "mout_clkcmu_gnpu_noc", CLK_CON_DIV_CLKCMU_GNPU_NOC, 0, 4),
+
+ /* HSI0 */
+ DIV(DOUT_CLKCMU_HSI0_NOC, "dout_clkcmu_hsi0_noc",
+ "mout_clkcmu_hsi0_noc", CLK_CON_DIV_CLKCMU_HSI0_NOC, 0, 4),
+
+ /* HSI1 */
+ DIV(DOUT_CLKCMU_HSI1_NOC, "dout_clkcmu_hsi1_noc",
+ "mout_clkcmu_hsi1_noc", CLK_CON_DIV_CLKCMU_HSI1_NOC, 0, 4),
+ DIV(DOUT_CLKCMU_HSI1_USBDRD, "dout_clkcmu_hsi1_usbdrd",
+ "mout_clkcmu_hsi1_usbdrd", CLK_CON_DIV_CLKCMU_HSI1_USBDRD, 0, 4),
+ DIV(DOUT_CLKCMU_HSI1_MMC_CARD, "dout_clkcmu_hsi1_mmc_card",
+ "mout_clkcmu_hsi1_mmc_card", CLK_CON_DIV_CLKCMU_HSI1_MMC_CARD, 0, 9),
+
+ /* HSI2 */
+ DIV(DOUT_CLKCMU_HSI2_NOC, "dout_clkcmu_hsi2_noc",
+ "mout_clkcmu_hsi2_noc", CLK_CON_DIV_CLKCMU_HSI2_NOC, 0, 4),
+ DIV(DOUT_CLKCMU_HSI2_NOC_UFS, "dout_clkcmu_hsi2_noc_ufs",
+ "mout_clkcmu_hsi2_noc_ufs", CLK_CON_DIV_CLKCMU_HSI2_NOC_UFS, 0, 4),
+ DIV(DOUT_CLKCMU_HSI2_UFS_EMBD, "dout_clkcmu_hsi2_ufs_embd",
+ "mout_clkcmu_hsi2_ufs_embd", CLK_CON_DIV_CLKCMU_HSI2_UFS_EMBD, 0, 3),
+ DIV(DOUT_CLKCMU_HSI2_ETHERNET, "dout_clkcmu_hsi2_ethernet",
+ "mout_clkcmu_hsi2_ethernet", CLK_CON_DIV_CLKCMU_HSI2_ETHERNET, 0, 3),
+
+ /* ISP */
+ DIV(DOUT_CLKCMU_ISP_NOC, "dout_clkcmu_isp_noc",
+ "mout_clkcmu_isp_noc", CLK_CON_DIV_CLKCMU_ISP_NOC, 0, 4),
+
+ /* M2M */
+ DIV(DOUT_CLKCMU_M2M_NOC, "dout_clkcmu_m2m_noc",
+ "mout_clkcmu_m2m_noc", CLK_CON_DIV_CLKCMU_M2M_NOC, 0, 4),
+ DIV(DOUT_CLKCMU_M2M_JPEG, "dout_clkcmu_m2m_jpeg",
+ "mout_clkcmu_m2m_jpeg", CLK_CON_DIV_CLKCMU_M2M_JPEG, 0, 4),
+
+ /* MFC */
+ DIV(DOUT_CLKCMU_MFC_MFC, "dout_clkcmu_mfc_mfc",
+ "mout_clkcmu_mfc_mfc", CLK_CON_DIV_CLKCMU_MFC_MFC, 0, 4),
+ DIV(DOUT_CLKCMU_MFC_WFD, "dout_clkcmu_mfc_wfd",
+ "mout_clkcmu_mfc_wfd", CLK_CON_DIV_CLKCMU_MFC_WFD, 0, 4),
+
+ /* MFD */
+ DIV(DOUT_CLKCMU_MFD_NOC, "dout_clkcmu_mfd_noc",
+ "mout_clkcmu_mfd_noc", CLK_CON_DIV_CLKCMU_MFD_NOC, 0, 4),
+
+ /* MIF */
+ DIV(DOUT_CLKCMU_MIF_NOCP, "dout_clkcmu_mif_nocp",
+ "mout_clkcmu_mif_nocp", CLK_CON_DIV_CLKCMU_MIF_NOCP, 0, 4),
+
+ /* MISC */
+ DIV(DOUT_CLKCMU_MISC_NOC, "dout_clkcmu_misc_noc",
+ "mout_clkcmu_misc_noc", CLK_CON_DIV_CLKCMU_MISC_NOC, 0, 4),
+
+ /* NOCL0 */
+ DIV(DOUT_CLKCMU_NOCL0_NOC, "dout_clkcmu_nocl0_noc",
+ "mout_clkcmu_nocl0_noc", CLK_CON_DIV_CLKCMU_NOCL0_NOC, 0, 4),
+
+ /* NOCL1 */
+ DIV(DOUT_CLKCMU_NOCL1_NOC, "dout_clkcmu_nocl1_noc",
+ "mout_clkcmu_nocl1_noc", CLK_CON_DIV_CLKCMU_NOCL1_NOC, 0, 4),
+
+ /* NOCL2 */
+ DIV(DOUT_CLKCMU_NOCL2_NOC, "dout_clkcmu_nocl2_noc",
+ "mout_clkcmu_nocl2_noc", CLK_CON_DIV_CLKCMU_NOCL2_NOC, 0, 4),
+
+ /* PERIC0 */
+ DIV(DOUT_CLKCMU_PERIC0_NOC, "dout_clkcmu_peric0_noc",
+ "mout_clkcmu_peric0_noc", CLK_CON_DIV_CLKCMU_PERIC0_NOC, 0, 4),
+ DIV(DOUT_CLKCMU_PERIC0_IP, "dout_clkcmu_peric0_ip",
+ "mout_clkcmu_peric0_ip", CLK_CON_DIV_CLKCMU_PERIC0_IP, 0, 4),
+
+ /* PERIC1 */
+ DIV(DOUT_CLKCMU_PERIC1_NOC, "dout_clkcmu_peric1_noc",
+ "mout_clkcmu_peric1_noc", CLK_CON_DIV_CLKCMU_PERIC1_NOC, 0, 4),
+ DIV(DOUT_CLKCMU_PERIC1_IP, "dout_clkcmu_peric1_ip",
+ "mout_clkcmu_peric1_ip", CLK_CON_DIV_CLKCMU_PERIC1_IP, 0, 4),
+
+ /* SDMA */
+ DIV(DOUT_CLKCMU_SDMA_NOC, "dout_clkcmu_sdma_noc",
+ "mout_clkcmu_sdma_noc", CLK_CON_DIV_CLKCMU_SDMA_NOC, 0, 4),
+
+ /* SNW */
+ DIV(DOUT_CLKCMU_SNW_NOC, "dout_clkcmu_snw_noc",
+ "mout_clkcmu_snw_noc", CLK_CON_DIV_CLKCMU_SNW_NOC, 0, 4),
+
+ /* SSP */
+ DIV(DOUT_CLKCMU_SSP_NOC, "dout_clkcmu_ssp_noc",
+ "mout_clkcmu_ssp_noc", CLK_CON_DIV_CLKCMU_SSP_NOC, 0, 4),
+
+ /* TAA */
+ DIV(DOUT_CLKCMU_TAA_NOC, "dout_clkcmu_taa_noc",
+ "mout_clkcmu_taa_noc", CLK_CON_DIV_CLKCMU_TAA_NOC, 0, 4),
+};
+
+static const struct samsung_fixed_factor_clock top_fixed_factor_clks[] __initconst = {
+ FFACTOR(DOUT_SHARED0_DIV1, "dout_shared0_div1",
+ "mout_shared0_pll", 1, 1, 0),
+ FFACTOR(DOUT_SHARED0_DIV2, "dout_shared0_div2",
+ "mout_shared0_pll", 1, 2, 0),
+ FFACTOR(DOUT_SHARED0_DIV3, "dout_shared0_div3",
+ "mout_shared0_pll", 1, 3, 0),
+ FFACTOR(DOUT_SHARED0_DIV4, "dout_shared0_div4",
+ "mout_shared0_pll", 1, 4, 0),
+ FFACTOR(DOUT_SHARED1_DIV1, "dout_shared1_div1",
+ "mout_shared1_pll", 1, 1, 0),
+ FFACTOR(DOUT_SHARED1_DIV2, "dout_shared1_div2",
+ "mout_shared1_pll", 1, 2, 0),
+ FFACTOR(DOUT_SHARED1_DIV3, "dout_shared1_div3",
+ "mout_shared1_pll", 1, 3, 0),
+ FFACTOR(DOUT_SHARED1_DIV4, "dout_shared1_div4",
+ "mout_shared1_pll", 1, 4, 0),
+ FFACTOR(DOUT_SHARED2_DIV1, "dout_shared2_div1",
+ "mout_shared2_pll", 1, 1, 0),
+ FFACTOR(DOUT_SHARED2_DIV2, "dout_shared2_div2",
+ "mout_shared2_pll", 1, 2, 0),
+ FFACTOR(DOUT_SHARED2_DIV3, "dout_shared2_div3",
+ "mout_shared2_pll", 1, 3, 0),
+ FFACTOR(DOUT_SHARED2_DIV4, "dout_shared2_div4",
+ "mout_shared2_pll", 1, 4, 0),
+ FFACTOR(DOUT_SHARED3_DIV1, "dout_shared3_div1",
+ "mout_shared3_pll", 1, 1, 0),
+ FFACTOR(DOUT_SHARED3_DIV2, "dout_shared3_div2",
+ "mout_shared3_pll", 1, 2, 0),
+ FFACTOR(DOUT_SHARED3_DIV3, "dout_shared3_div3",
+ "mout_shared3_pll", 1, 3, 0),
+ FFACTOR(DOUT_SHARED3_DIV4, "dout_shared3_div4",
+ "mout_shared3_pll", 1, 4, 0),
+ FFACTOR(DOUT_SHARED4_DIV1, "dout_shared4_div1",
+ "mout_shared4_pll", 1, 1, 0),
+ FFACTOR(DOUT_SHARED4_DIV2, "dout_shared4_div2",
+ "mout_shared4_pll", 1, 2, 0),
+ FFACTOR(DOUT_SHARED4_DIV3, "dout_shared4_div3",
+ "mout_shared4_pll", 1, 3, 0),
+ FFACTOR(DOUT_SHARED4_DIV4, "dout_shared4_div4",
+ "mout_shared4_pll", 1, 4, 0),
+ FFACTOR(DOUT_SHARED5_DIV1, "dout_shared5_div1",
+ "mout_shared5_pll", 1, 1, 0),
+ FFACTOR(DOUT_SHARED5_DIV2, "dout_shared5_div2",
+ "mout_shared5_pll", 1, 2, 0),
+ FFACTOR(DOUT_SHARED5_DIV3, "dout_shared5_div3",
+ "mout_shared5_pll", 1, 3, 0),
+ FFACTOR(DOUT_SHARED5_DIV4, "dout_shared5_div4",
+ "mout_shared5_pll", 1, 4, 0),
+};
+
+static const struct samsung_cmu_info top_cmu_info __initconst = {
+ .pll_clks = top_pll_clks,
+ .nr_pll_clks = ARRAY_SIZE(top_pll_clks),
+ .mux_clks = top_mux_clks,
+ .nr_mux_clks = ARRAY_SIZE(top_mux_clks),
+ .div_clks = top_div_clks,
+ .nr_div_clks = ARRAY_SIZE(top_div_clks),
+ .fixed_factor_clks = top_fixed_factor_clks,
+ .nr_fixed_factor_clks = ARRAY_SIZE(top_fixed_factor_clks),
+ .nr_clk_ids = CLKS_NR_TOP,
+ .clk_regs = top_clk_regs,
+ .nr_clk_regs = ARRAY_SIZE(top_clk_regs),
+};
+
+static void __init exynosautov920_cmu_top_init(struct device_node *np)
+{
+ exynos_arm64_register_cmu(NULL, np, &top_cmu_info);
+}
+
+/* Register CMU_TOP early, as it's a dependency for other early domains */
+CLK_OF_DECLARE(exynosautov920_cmu_top, "samsung,exynosautov920-cmu-top",
+ exynosautov920_cmu_top_init);
+
+/* ---- CMU_PERIC0 --------------------------------------------------------- */
+
+/* Register Offset definitions for CMU_PERIC0 (0x10800000) */
+#define PLL_CON0_MUX_CLKCMU_PERIC0_IP_USER 0x0600
+#define PLL_CON0_MUX_CLKCMU_PERIC0_NOC_USER 0x0610
+#define CLK_CON_MUX_MUX_CLK_PERIC0_I3C 0x1000
+#define CLK_CON_MUX_MUX_CLK_PERIC0_USI00_USI 0x1004
+#define CLK_CON_MUX_MUX_CLK_PERIC0_USI01_USI 0x1008
+#define CLK_CON_MUX_MUX_CLK_PERIC0_USI02_USI 0x100c
+#define CLK_CON_MUX_MUX_CLK_PERIC0_USI03_USI 0x1010
+#define CLK_CON_MUX_MUX_CLK_PERIC0_USI04_USI 0x1014
+#define CLK_CON_MUX_MUX_CLK_PERIC0_USI05_USI 0x1018
+#define CLK_CON_MUX_MUX_CLK_PERIC0_USI06_USI 0x101c
+#define CLK_CON_MUX_MUX_CLK_PERIC0_USI07_USI 0x1020
+#define CLK_CON_MUX_MUX_CLK_PERIC0_USI08_USI 0x1024
+#define CLK_CON_MUX_MUX_CLK_PERIC0_USI_I2C 0x1028
+#define CLK_CON_DIV_DIV_CLK_PERIC0_I3C 0x1800
+#define CLK_CON_DIV_DIV_CLK_PERIC0_USI00_USI 0x1804
+#define CLK_CON_DIV_DIV_CLK_PERIC0_USI01_USI 0x1808
+#define CLK_CON_DIV_DIV_CLK_PERIC0_USI02_USI 0x180c
+#define CLK_CON_DIV_DIV_CLK_PERIC0_USI03_USI 0x1810
+#define CLK_CON_DIV_DIV_CLK_PERIC0_USI04_USI 0x1814
+#define CLK_CON_DIV_DIV_CLK_PERIC0_USI05_USI 0x1818
+#define CLK_CON_DIV_DIV_CLK_PERIC0_USI06_USI 0x181c
+#define CLK_CON_DIV_DIV_CLK_PERIC0_USI07_USI 0x1820
+#define CLK_CON_DIV_DIV_CLK_PERIC0_USI08_USI 0x1824
+#define CLK_CON_DIV_DIV_CLK_PERIC0_USI_I2C 0x1828
+
+static const unsigned long peric0_clk_regs[] __initconst = {
+ PLL_CON0_MUX_CLKCMU_PERIC0_IP_USER,
+ PLL_CON0_MUX_CLKCMU_PERIC0_NOC_USER,
+ CLK_CON_MUX_MUX_CLK_PERIC0_I3C,
+ CLK_CON_MUX_MUX_CLK_PERIC0_USI00_USI,
+ CLK_CON_MUX_MUX_CLK_PERIC0_USI01_USI,
+ CLK_CON_MUX_MUX_CLK_PERIC0_USI02_USI,
+ CLK_CON_MUX_MUX_CLK_PERIC0_USI03_USI,
+ CLK_CON_MUX_MUX_CLK_PERIC0_USI04_USI,
+ CLK_CON_MUX_MUX_CLK_PERIC0_USI05_USI,
+ CLK_CON_MUX_MUX_CLK_PERIC0_USI06_USI,
+ CLK_CON_MUX_MUX_CLK_PERIC0_USI07_USI,
+ CLK_CON_MUX_MUX_CLK_PERIC0_USI08_USI,
+ CLK_CON_MUX_MUX_CLK_PERIC0_USI_I2C,
+ CLK_CON_DIV_DIV_CLK_PERIC0_I3C,
+ CLK_CON_DIV_DIV_CLK_PERIC0_USI00_USI,
+ CLK_CON_DIV_DIV_CLK_PERIC0_USI01_USI,
+ CLK_CON_DIV_DIV_CLK_PERIC0_USI02_USI,
+ CLK_CON_DIV_DIV_CLK_PERIC0_USI03_USI,
+ CLK_CON_DIV_DIV_CLK_PERIC0_USI04_USI,
+ CLK_CON_DIV_DIV_CLK_PERIC0_USI05_USI,
+ CLK_CON_DIV_DIV_CLK_PERIC0_USI06_USI,
+ CLK_CON_DIV_DIV_CLK_PERIC0_USI07_USI,
+ CLK_CON_DIV_DIV_CLK_PERIC0_USI08_USI,
+ CLK_CON_DIV_DIV_CLK_PERIC0_USI_I2C,
+};
+
+/* List of parent clocks for Muxes in CMU_PERIC0 */
+PNAME(mout_peric0_ip_user_p) = { "oscclk", "dout_clkcmu_peric0_ip" };
+PNAME(mout_peric0_noc_user_p) = { "oscclk", "dout_clkcmu_peric0_noc" };
+PNAME(mout_peric0_usi_p) = { "oscclk", "mout_peric0_ip_user" };
+
+static const struct samsung_mux_clock peric0_mux_clks[] __initconst = {
+ MUX(CLK_MOUT_PERIC0_IP_USER, "mout_peric0_ip_user",
+ mout_peric0_ip_user_p, PLL_CON0_MUX_CLKCMU_PERIC0_IP_USER, 4, 1),
+ MUX(CLK_MOUT_PERIC0_NOC_USER, "mout_peric0_noc_user",
+ mout_peric0_noc_user_p, PLL_CON0_MUX_CLKCMU_PERIC0_NOC_USER, 4, 1),
+ /* USI00 ~ USI08 */
+ MUX(CLK_MOUT_PERIC0_USI00_USI, "mout_peric0_usi00_usi",
+ mout_peric0_usi_p, CLK_CON_MUX_MUX_CLK_PERIC0_USI00_USI, 0, 1),
+ MUX(CLK_MOUT_PERIC0_USI01_USI, "mout_peric0_usi01_usi",
+ mout_peric0_usi_p, CLK_CON_MUX_MUX_CLK_PERIC0_USI01_USI, 0, 1),
+ MUX(CLK_MOUT_PERIC0_USI02_USI, "mout_peric0_usi02_usi",
+ mout_peric0_usi_p, CLK_CON_MUX_MUX_CLK_PERIC0_USI02_USI, 0, 1),
+ MUX(CLK_MOUT_PERIC0_USI03_USI, "mout_peric0_usi03_usi",
+ mout_peric0_usi_p, CLK_CON_MUX_MUX_CLK_PERIC0_USI03_USI, 0, 1),
+ MUX(CLK_MOUT_PERIC0_USI04_USI, "mout_peric0_usi04_usi",
+ mout_peric0_usi_p, CLK_CON_MUX_MUX_CLK_PERIC0_USI04_USI, 0, 1),
+ MUX(CLK_MOUT_PERIC0_USI05_USI, "mout_peric0_usi05_usi",
+ mout_peric0_usi_p, CLK_CON_MUX_MUX_CLK_PERIC0_USI05_USI, 0, 1),
+ MUX(CLK_MOUT_PERIC0_USI06_USI, "mout_peric0_usi06_usi",
+ mout_peric0_usi_p, CLK_CON_MUX_MUX_CLK_PERIC0_USI06_USI, 0, 1),
+ MUX(CLK_MOUT_PERIC0_USI07_USI, "mout_peric0_usi07_usi",
+ mout_peric0_usi_p, CLK_CON_MUX_MUX_CLK_PERIC0_USI07_USI, 0, 1),
+ MUX(CLK_MOUT_PERIC0_USI08_USI, "mout_peric0_usi08_usi",
+ mout_peric0_usi_p, CLK_CON_MUX_MUX_CLK_PERIC0_USI08_USI, 0, 1),
+ /* USI_I2C */
+ MUX(CLK_MOUT_PERIC0_USI_I2C, "mout_peric0_usi_i2c",
+ mout_peric0_usi_p, CLK_CON_MUX_MUX_CLK_PERIC0_USI_I2C, 0, 1),
+ /* USI_I3C */
+ MUX(CLK_MOUT_PERIC0_I3C, "mout_peric0_i3c",
+ mout_peric0_usi_p, CLK_CON_MUX_MUX_CLK_PERIC0_I3C, 0, 1),
+};
+
+static const struct samsung_div_clock peric0_div_clks[] __initconst = {
+ /* USI00 ~ USI08 */
+ DIV(CLK_DOUT_PERIC0_USI00_USI, "dout_peric0_usi00_usi",
+ "mout_peric0_usi00_usi", CLK_CON_DIV_DIV_CLK_PERIC0_USI00_USI,
+ 0, 4),
+ DIV(CLK_DOUT_PERIC0_USI01_USI, "dout_peric0_usi01_usi",
+ "mout_peric0_usi01_usi", CLK_CON_DIV_DIV_CLK_PERIC0_USI01_USI,
+ 0, 4),
+ DIV(CLK_DOUT_PERIC0_USI02_USI, "dout_peric0_usi02_usi",
+ "mout_peric0_usi02_usi", CLK_CON_DIV_DIV_CLK_PERIC0_USI02_USI,
+ 0, 4),
+ DIV(CLK_DOUT_PERIC0_USI03_USI, "dout_peric0_usi03_usi",
+ "mout_peric0_usi03_usi", CLK_CON_DIV_DIV_CLK_PERIC0_USI03_USI,
+ 0, 4),
+ DIV(CLK_DOUT_PERIC0_USI04_USI, "dout_peric0_usi04_usi",
+ "mout_peric0_usi04_usi", CLK_CON_DIV_DIV_CLK_PERIC0_USI04_USI,
+ 0, 4),
+ DIV(CLK_DOUT_PERIC0_USI05_USI, "dout_peric0_usi05_usi",
+ "mout_peric0_usi05_usi", CLK_CON_DIV_DIV_CLK_PERIC0_USI05_USI,
+ 0, 4),
+ DIV(CLK_DOUT_PERIC0_USI06_USI, "dout_peric0_usi06_usi",
+ "mout_peric0_usi06_usi", CLK_CON_DIV_DIV_CLK_PERIC0_USI06_USI,
+ 0, 4),
+ DIV(CLK_DOUT_PERIC0_USI07_USI, "dout_peric0_usi07_usi",
+ "mout_peric0_usi07_usi", CLK_CON_DIV_DIV_CLK_PERIC0_USI07_USI,
+ 0, 4),
+ DIV(CLK_DOUT_PERIC0_USI08_USI, "dout_peric0_usi08_usi",
+ "mout_peric0_usi08_usi", CLK_CON_DIV_DIV_CLK_PERIC0_USI08_USI,
+ 0, 4),
+ /* USI_I2C */
+ DIV(CLK_DOUT_PERIC0_USI_I2C, "dout_peric0_usi_i2c",
+ "mout_peric0_usi_i2c", CLK_CON_DIV_DIV_CLK_PERIC0_USI_I2C, 0, 4),
+ /* USI_I3C */
+ DIV(CLK_DOUT_PERIC0_I3C, "dout_peric0_i3c",
+ "mout_peric0_i3c", CLK_CON_DIV_DIV_CLK_PERIC0_I3C, 0, 4),
+};
+
+static const struct samsung_cmu_info peric0_cmu_info __initconst = {
+ .mux_clks = peric0_mux_clks,
+ .nr_mux_clks = ARRAY_SIZE(peric0_mux_clks),
+ .div_clks = peric0_div_clks,
+ .nr_div_clks = ARRAY_SIZE(peric0_div_clks),
+ .nr_clk_ids = CLKS_NR_PERIC0,
+ .clk_regs = peric0_clk_regs,
+ .nr_clk_regs = ARRAY_SIZE(peric0_clk_regs),
+ .clk_name = "noc",
+};
+
+static int __init exynosautov920_cmu_probe(struct platform_device *pdev)
+{
+ const struct samsung_cmu_info *info;
+ struct device *dev = &pdev->dev;
+
+ info = of_device_get_match_data(dev);
+ exynos_arm64_register_cmu(dev, dev->of_node, info);
+
+ return 0;
+}
+
+static const struct of_device_id exynosautov920_cmu_of_match[] = {
+ {
+ .compatible = "samsung,exynosautov920-cmu-peric0",
+ .data = &peric0_cmu_info,
+ },
+};
+
+static struct platform_driver exynosautov920_cmu_driver __refdata = {
+ .driver = {
+ .name = "exynosautov920-cmu",
+ .of_match_table = exynosautov920_cmu_of_match,
+ .suppress_bind_attrs = true,
+ },
+ .probe = exynosautov920_cmu_probe,
+};
+
+static int __init exynosautov920_cmu_init(void)
+{
+ return platform_driver_register(&exynosautov920_cmu_driver);
+}
+core_initcall(exynosautov920_cmu_init);
diff --git a/drivers/clk/samsung/clk-pll.c b/drivers/clk/samsung/clk-pll.c
index 4be879ab917e..cca3e630922c 100644
--- a/drivers/clk/samsung/clk-pll.c
+++ b/drivers/clk/samsung/clk-pll.c
@@ -430,6 +430,9 @@ static const struct clk_ops samsung_pll36xx_clk_min_ops = {
#define PLL0822X_LOCK_STAT_SHIFT (29)
#define PLL0822X_ENABLE_SHIFT (31)
+/* PLL1418x is similar to PLL0822x, except that MDIV is one bit smaller */
+#define PLL1418X_MDIV_MASK (0x1FF)
+
static unsigned long samsung_pll0822x_recalc_rate(struct clk_hw *hw,
unsigned long parent_rate)
{
@@ -438,7 +441,10 @@ static unsigned long samsung_pll0822x_recalc_rate(struct clk_hw *hw,
u64 fvco = parent_rate;
pll_con3 = readl_relaxed(pll->con_reg);
- mdiv = (pll_con3 >> PLL0822X_MDIV_SHIFT) & PLL0822X_MDIV_MASK;
+ if (pll->type != pll_1418x)
+ mdiv = (pll_con3 >> PLL0822X_MDIV_SHIFT) & PLL0822X_MDIV_MASK;
+ else
+ mdiv = (pll_con3 >> PLL0822X_MDIV_SHIFT) & PLL1418X_MDIV_MASK;
pdiv = (pll_con3 >> PLL0822X_PDIV_SHIFT) & PLL0822X_PDIV_MASK;
sdiv = (pll_con3 >> PLL0822X_SDIV_SHIFT) & PLL0822X_SDIV_MASK;
@@ -456,7 +462,12 @@ static int samsung_pll0822x_set_rate(struct clk_hw *hw, unsigned long drate,
{
const struct samsung_pll_rate_table *rate;
struct samsung_clk_pll *pll = to_clk_pll(hw);
- u32 pll_con3;
+ u32 mdiv_mask, pll_con3;
+
+ if (pll->type != pll_1418x)
+ mdiv_mask = PLL0822X_MDIV_MASK;
+ else
+ mdiv_mask = PLL1418X_MDIV_MASK;
/* Get required rate settings from table */
rate = samsung_get_pll_settings(pll, drate);
@@ -468,7 +479,7 @@ static int samsung_pll0822x_set_rate(struct clk_hw *hw, unsigned long drate,
/* Change PLL PMS values */
pll_con3 = readl_relaxed(pll->con_reg);
- pll_con3 &= ~((PLL0822X_MDIV_MASK << PLL0822X_MDIV_SHIFT) |
+ pll_con3 &= ~((mdiv_mask << PLL0822X_MDIV_SHIFT) |
(PLL0822X_PDIV_MASK << PLL0822X_PDIV_SHIFT) |
(PLL0822X_SDIV_MASK << PLL0822X_SDIV_SHIFT));
pll_con3 |= (rate->mdiv << PLL0822X_MDIV_SHIFT) |
@@ -1261,6 +1272,47 @@ static const struct clk_ops samsung_pll2650xx_clk_min_ops = {
.recalc_rate = samsung_pll2650xx_recalc_rate,
};
+/*
+ * PLL531X Clock Type
+ */
+/* Maximum lock time can be 500 * PDIV cycles */
+#define PLL531X_LOCK_FACTOR (500)
+#define PLL531X_MDIV_MASK (0x3FF)
+#define PLL531X_PDIV_MASK (0x3F)
+#define PLL531X_SDIV_MASK (0x7)
+#define PLL531X_FDIV_MASK (0xFFFFFFFF)
+#define PLL531X_MDIV_SHIFT (16)
+#define PLL531X_PDIV_SHIFT (8)
+#define PLL531X_SDIV_SHIFT (0)
+
+static unsigned long samsung_pll531x_recalc_rate(struct clk_hw *hw,
+ unsigned long parent_rate)
+{
+ struct samsung_clk_pll *pll = to_clk_pll(hw);
+ u32 pdiv, sdiv, fdiv, pll_con0, pll_con8;
+ u64 mdiv, fout = parent_rate;
+
+ pll_con0 = readl_relaxed(pll->con_reg);
+ pll_con8 = readl_relaxed(pll->con_reg + 20);
+ mdiv = (pll_con0 >> PLL531X_MDIV_SHIFT) & PLL531X_MDIV_MASK;
+ pdiv = (pll_con0 >> PLL531X_PDIV_SHIFT) & PLL531X_PDIV_MASK;
+ sdiv = (pll_con0 >> PLL531X_SDIV_SHIFT) & PLL531X_SDIV_MASK;
+ fdiv = (pll_con8 & PLL531X_FDIV_MASK);
+
+ if (fdiv >> 31)
+ mdiv--;
+
+ fout *= (mdiv << 24) + (fdiv >> 8);
+ do_div(fout, (pdiv << sdiv));
+ fout >>= 24;
+
+ return (unsigned long)fout;
+}
+
+static const struct clk_ops samsung_pll531x_clk_ops = {
+ .recalc_rate = samsung_pll531x_recalc_rate,
+};
+
static void __init _samsung_clk_register_pll(struct samsung_clk_provider *ctx,
const struct samsung_pll_clock *pll_clk)
{
@@ -1317,6 +1369,7 @@ static void __init _samsung_clk_register_pll(struct samsung_clk_provider *ctx,
init.ops = &samsung_pll35xx_clk_ops;
break;
case pll_1417x:
+ case pll_1418x:
case pll_0818x:
case pll_0822x:
case pll_0516x:
@@ -1394,6 +1447,9 @@ static void __init _samsung_clk_register_pll(struct samsung_clk_provider *ctx,
else
init.ops = &samsung_pll2650xx_clk_ops;
break;
+ case pll_531x:
+ init.ops = &samsung_pll531x_clk_ops;
+ break;
default:
pr_warn("%s: Unknown pll type for pll clk %s\n",
__func__, pll_clk->name);
diff --git a/drivers/clk/samsung/clk-pll.h b/drivers/clk/samsung/clk-pll.h
index ffd3d52c0dec..3481941ba07a 100644
--- a/drivers/clk/samsung/clk-pll.h
+++ b/drivers/clk/samsung/clk-pll.h
@@ -30,6 +30,7 @@ enum samsung_pll_type {
pll_2650x,
pll_2650xx,
pll_1417x,
+ pll_1418x,
pll_1450x,
pll_1451x,
pll_1452x,
@@ -41,6 +42,7 @@ enum samsung_pll_type {
pll_0516x,
pll_0517x,
pll_0518x,
+ pll_531x,
};
#define PLL_RATE(_fin, _m, _p, _s, _k, _ks) \
diff --git a/drivers/clk/starfive/clk-starfive-jh7110-isp.c b/drivers/clk/starfive/clk-starfive-jh7110-isp.c
index d3c85421f948..8c4c3a958a9f 100644
--- a/drivers/clk/starfive/clk-starfive-jh7110-isp.c
+++ b/drivers/clk/starfive/clk-starfive-jh7110-isp.c
@@ -216,7 +216,7 @@ MODULE_DEVICE_TABLE(of, jh7110_ispcrg_match);
static struct platform_driver jh7110_ispcrg_driver = {
.probe = jh7110_ispcrg_probe,
- .remove_new = jh7110_ispcrg_remove,
+ .remove = jh7110_ispcrg_remove,
.driver = {
.name = "clk-starfive-jh7110-isp",
.of_match_table = jh7110_ispcrg_match,
diff --git a/drivers/clk/starfive/clk-starfive-jh7110-vout.c b/drivers/clk/starfive/clk-starfive-jh7110-vout.c
index 53f7af234cc2..04eeed199087 100644
--- a/drivers/clk/starfive/clk-starfive-jh7110-vout.c
+++ b/drivers/clk/starfive/clk-starfive-jh7110-vout.c
@@ -145,7 +145,7 @@ static int jh7110_voutcrg_probe(struct platform_device *pdev)
/* enable power domain and clocks */
pm_runtime_enable(priv->dev);
- ret = pm_runtime_get_sync(priv->dev);
+ ret = pm_runtime_resume_and_get(priv->dev);
if (ret < 0)
return dev_err_probe(priv->dev, ret, "failed to turn on power\n");
@@ -223,7 +223,7 @@ MODULE_DEVICE_TABLE(of, jh7110_voutcrg_match);
static struct platform_driver jh7110_voutcrg_driver = {
.probe = jh7110_voutcrg_probe,
- .remove_new = jh7110_voutcrg_remove,
+ .remove = jh7110_voutcrg_remove,
.driver = {
.name = "clk-starfive-jh7110-vout",
.of_match_table = jh7110_voutcrg_match,
diff --git a/drivers/clk/stm32/clk-stm32mp1.c b/drivers/clk/stm32/clk-stm32mp1.c
index 7e2337297402..5fcc4c77c11f 100644
--- a/drivers/clk/stm32/clk-stm32mp1.c
+++ b/drivers/clk/stm32/clk-stm32mp1.c
@@ -2354,7 +2354,7 @@ static struct platform_driver stm32mp1_rcc_clocks_driver = {
.of_match_table = stm32mp1_match_data,
},
.probe = stm32mp1_rcc_clocks_probe,
- .remove_new = stm32mp1_rcc_clocks_remove,
+ .remove = stm32mp1_rcc_clocks_remove,
};
static int __init stm32mp1_clocks_init(void)
diff --git a/drivers/clk/tegra/clk-tegra124-dfll-fcpu.c b/drivers/clk/tegra/clk-tegra124-dfll-fcpu.c
index a9be4b56b2b7..0251618b82c8 100644
--- a/drivers/clk/tegra/clk-tegra124-dfll-fcpu.c
+++ b/drivers/clk/tegra/clk-tegra124-dfll-fcpu.c
@@ -635,7 +635,7 @@ static const struct dev_pm_ops tegra124_dfll_pm_ops = {
static struct platform_driver tegra124_dfll_fcpu_driver = {
.probe = tegra124_dfll_fcpu_probe,
- .remove_new = tegra124_dfll_fcpu_remove,
+ .remove = tegra124_dfll_fcpu_remove,
.driver = {
.name = "tegra124-dfll",
.of_match_table = tegra124_dfll_fcpu_of_match,
diff --git a/drivers/clk/ti/adpll.c b/drivers/clk/ti/adpll.c
index 6121020b4b38..e305fcbac647 100644
--- a/drivers/clk/ti/adpll.c
+++ b/drivers/clk/ti/adpll.c
@@ -934,7 +934,7 @@ static struct platform_driver ti_adpll_driver = {
.of_match_table = ti_adpll_match,
},
.probe = ti_adpll_probe,
- .remove_new = ti_adpll_remove,
+ .remove = ti_adpll_remove,
};
static int __init ti_adpll_init(void)
diff --git a/drivers/clk/ti/clk-dra7-atl.c b/drivers/clk/ti/clk-dra7-atl.c
index d964e3affd42..0eab7f3e2eab 100644
--- a/drivers/clk/ti/clk-dra7-atl.c
+++ b/drivers/clk/ti/clk-dra7-atl.c
@@ -240,6 +240,7 @@ static int of_dra7_atl_clk_probe(struct platform_device *pdev)
}
clk = of_clk_get_from_provider(&clkspec);
+ of_node_put(clkspec.np);
if (IS_ERR(clk)) {
pr_err("%s: failed to get atl clock %d from provider\n",
__func__, i);
diff --git a/drivers/clk/versatile/clk-sp810.c b/drivers/clk/versatile/clk-sp810.c
index 45adac1b4630..033d4f78edc8 100644
--- a/drivers/clk/versatile/clk-sp810.c
+++ b/drivers/clk/versatile/clk-sp810.c
@@ -110,7 +110,7 @@ static void __init clk_sp810_of_setup(struct device_node *node)
init.parent_names = parent_names;
init.num_parents = num;
- deprecated = !of_find_property(node, "assigned-clock-parents", NULL);
+ deprecated = !of_property_present(node, "assigned-clock-parents");
for (i = 0; i < ARRAY_SIZE(sp810->timerclken); i++) {
snprintf(name, sizeof(name), "sp810_%d_%d", instance, i);
diff --git a/drivers/clk/visconti/pll.c b/drivers/clk/visconti/pll.c
index e9cd80e085dc..3f929cf8dd2f 100644
--- a/drivers/clk/visconti/pll.c
+++ b/drivers/clk/visconti/pll.c
@@ -262,9 +262,9 @@ static struct clk_hw *visconti_register_pll(struct visconti_pll_provider *ctx,
for (len = 0; rate_table[len].rate != 0; )
len++;
pll->rate_count = len;
- pll->rate_table = kmemdup(rate_table,
- pll->rate_count * sizeof(struct visconti_pll_rate_table),
- GFP_KERNEL);
+ pll->rate_table = kmemdup_array(rate_table,
+ pll->rate_count, sizeof(*pll->rate_table),
+ GFP_KERNEL);
WARN(!pll->rate_table, "%s: could not allocate rate table for %s\n", __func__, name);
init.ops = &visconti_pll_ops;
diff --git a/drivers/clk/x86/clk-fch.c b/drivers/clk/x86/clk-fch.c
index aed7d22fae63..cf5cd3ad4647 100644
--- a/drivers/clk/x86/clk-fch.c
+++ b/drivers/clk/x86/clk-fch.c
@@ -115,6 +115,6 @@ static struct platform_driver fch_clk_driver = {
.suppress_bind_attrs = true,
},
.probe = fch_clk_probe,
- .remove_new = fch_clk_remove,
+ .remove = fch_clk_remove,
};
builtin_platform_driver(fch_clk_driver);
diff --git a/drivers/clk/x86/clk-pmc-atom.c b/drivers/clk/x86/clk-pmc-atom.c
index 5ec9255e33fa..99291ba65da7 100644
--- a/drivers/clk/x86/clk-pmc-atom.c
+++ b/drivers/clk/x86/clk-pmc-atom.c
@@ -373,6 +373,6 @@ static struct platform_driver plt_clk_driver = {
.name = "clk-pmc-atom",
},
.probe = plt_clk_probe,
- .remove_new = plt_clk_remove,
+ .remove = plt_clk_remove,
};
builtin_platform_driver(plt_clk_driver);
diff --git a/drivers/clk/xilinx/clk-xlnx-clock-wizard.c b/drivers/clk/xilinx/clk-xlnx-clock-wizard.c
index 19eb3fb7ae31..7a0269bdfbb3 100644
--- a/drivers/clk/xilinx/clk-xlnx-clock-wizard.c
+++ b/drivers/clk/xilinx/clk-xlnx-clock-wizard.c
@@ -1257,7 +1257,7 @@ static struct platform_driver clk_wzrd_driver = {
.pm = &clk_wzrd_dev_pm_ops,
},
.probe = clk_wzrd_probe,
- .remove_new = clk_wzrd_remove,
+ .remove = clk_wzrd_remove,
};
module_platform_driver(clk_wzrd_driver);
diff --git a/drivers/clk/xilinx/xlnx_vcu.c b/drivers/clk/xilinx/xlnx_vcu.c
index d983fab12756..81501b48412e 100644
--- a/drivers/clk/xilinx/xlnx_vcu.c
+++ b/drivers/clk/xilinx/xlnx_vcu.c
@@ -729,7 +729,7 @@ static struct platform_driver xvcu_driver = {
.of_match_table = xvcu_of_id_table,
},
.probe = xvcu_probe,
- .remove_new = xvcu_remove,
+ .remove = xvcu_remove,
};
module_platform_driver(xvcu_driver);
diff --git a/drivers/comedi/drivers/ni_atmio.c b/drivers/comedi/drivers/ni_atmio.c
index 8876a1d24c56..330ae1c58800 100644
--- a/drivers/comedi/drivers/ni_atmio.c
+++ b/drivers/comedi/drivers/ni_atmio.c
@@ -79,6 +79,15 @@
#include "ni_stc.h"
+static const struct comedi_lrange range_ni_E_ao_ext = {
+ 4, {
+ BIP_RANGE(10),
+ UNI_RANGE(10),
+ RANGE_ext(-1, 1),
+ RANGE_ext(0, 1)
+ }
+};
+
/* AT specific setup */
static const struct ni_board_struct ni_boards[] = {
{
diff --git a/drivers/comedi/drivers/ni_mio_common.c b/drivers/comedi/drivers/ni_mio_common.c
index 980f309d6de7..3acb449d293c 100644
--- a/drivers/comedi/drivers/ni_mio_common.c
+++ b/drivers/comedi/drivers/ni_mio_common.c
@@ -166,15 +166,6 @@ static const struct comedi_lrange range_ni_M_ai_628x = {
}
};
-static const struct comedi_lrange range_ni_E_ao_ext = {
- 4, {
- BIP_RANGE(10),
- UNI_RANGE(10),
- RANGE_ext(-1, 1),
- RANGE_ext(0, 1)
- }
-};
-
static const struct comedi_lrange *const ni_range_lkup[] = {
[ai_gain_16] = &range_ni_E_ai,
[ai_gain_8] = &range_ni_E_ai_limited,
diff --git a/drivers/comedi/drivers/ni_pcimio.c b/drivers/comedi/drivers/ni_pcimio.c
index 0b055321023d..f63c390314e1 100644
--- a/drivers/comedi/drivers/ni_pcimio.c
+++ b/drivers/comedi/drivers/ni_pcimio.c
@@ -102,6 +102,15 @@
#define PCIDMA
+static const struct comedi_lrange range_ni_E_ao_ext = {
+ 4, {
+ BIP_RANGE(10),
+ UNI_RANGE(10),
+ RANGE_ext(-1, 1),
+ RANGE_ext(0, 1)
+ }
+};
+
/*
* These are not all the possible ao ranges for 628x boards.
* They can do OFFSET +- REFERENCE where OFFSET can be
diff --git a/drivers/comedi/drivers/ni_routing/tools/convert_c_to_py.c b/drivers/comedi/drivers/ni_routing/tools/convert_c_to_py.c
index d55521b5bdcb..892a66b2cea6 100644
--- a/drivers/comedi/drivers/ni_routing/tools/convert_c_to_py.c
+++ b/drivers/comedi/drivers/ni_routing/tools/convert_c_to_py.c
@@ -140,6 +140,11 @@ int main(void)
{
FILE *fp = fopen("ni_values.py", "w");
+ if (fp == NULL) {
+ fprintf(stderr, "Could not open file!");
+ return -1;
+ }
+
/* write route register values */
fprintf(fp, "ni_route_values = {\n");
for (int i = 0; ni_all_route_values[i]; ++i)
diff --git a/drivers/comedi/drivers/ni_stc.h b/drivers/comedi/drivers/ni_stc.h
index fbc0b753a0f5..7837e4683c6d 100644
--- a/drivers/comedi/drivers/ni_stc.h
+++ b/drivers/comedi/drivers/ni_stc.h
@@ -1137,6 +1137,4 @@ struct ni_private {
u8 rgout0_usage;
};
-static const struct comedi_lrange range_ni_E_ao_ext;
-
#endif /* _COMEDI_NI_STC_H */
diff --git a/drivers/counter/counter-chrdev.c b/drivers/counter/counter-chrdev.c
index afc94d0062b1..3ee75e1a78cd 100644
--- a/drivers/counter/counter-chrdev.c
+++ b/drivers/counter/counter-chrdev.c
@@ -454,7 +454,6 @@ out_unlock:
static const struct file_operations counter_fops = {
.owner = THIS_MODULE,
- .llseek = no_llseek,
.read = counter_chrdev_read,
.poll = counter_chrdev_poll,
.unlocked_ioctl = counter_chrdev_ioctl,
diff --git a/drivers/cpufreq/cppc_cpufreq.c b/drivers/cpufreq/cppc_cpufreq.c
index bafa32dd375d..1a5ad184d28f 100644
--- a/drivers/cpufreq/cppc_cpufreq.c
+++ b/drivers/cpufreq/cppc_cpufreq.c
@@ -224,9 +224,9 @@ static void __init cppc_freq_invariance_init(void)
* Fake (unused) bandwidth; workaround to "fix"
* priority inheritance.
*/
- .sched_runtime = 1000000,
- .sched_deadline = 10000000,
- .sched_period = 10000000,
+ .sched_runtime = NSEC_PER_MSEC,
+ .sched_deadline = 10 * NSEC_PER_MSEC,
+ .sched_period = 10 * NSEC_PER_MSEC,
};
int ret;
diff --git a/drivers/crypto/Kconfig b/drivers/crypto/Kconfig
index 94f23c6fc93b..08b1238bcd7b 100644
--- a/drivers/crypto/Kconfig
+++ b/drivers/crypto/Kconfig
@@ -21,7 +21,7 @@ config CRYPTO_DEV_PADLOCK
(so called VIA PadLock ACE, Advanced Cryptography Engine)
that provides instructions for very fast cryptographic
operations with supported algorithms.
-
+
The instructions are used only when the CPU supports them.
Otherwise software encryption is used.
@@ -78,18 +78,79 @@ config ZCRYPT
config PKEY
tristate "Kernel API for protected key handling"
depends on S390
- depends on ZCRYPT
help
- With this option enabled the pkey kernel module provides an API
+ With this option enabled the pkey kernel modules provide an API
for creation and handling of protected keys. Other parts of the
kernel or userspace applications may use these functions.
+ The protected key support is distributed into:
+ - A pkey base and API kernel module (pkey.ko) which offers the
+ infrastructure for the pkey handler kernel modules, the ioctl
+ and the sysfs API and the in-kernel API to the crypto cipher
+ implementations using protected key.
+ - A pkey pckmo kernel module (pkey-pckmo.ko) which is automatically
+ loaded when pckmo support (that is generation of protected keys
+ from clear key values) is available.
+ - A pkey CCA kernel module (pkey-cca.ko) which is automatically
+ loaded when a CEX crypto card is available.
+ - A pkey EP11 kernel module (pkey-ep11.ko) which is automatically
+ loaded when a CEX crypto card is available.
+
Select this option if you want to enable the kernel and userspace
- API for proteced key handling.
+ API for protected key handling.
+
+config PKEY_CCA
+ tristate "PKEY CCA support handler"
+ depends on PKEY
+ depends on ZCRYPT
+ help
+ This is the CCA support handler for deriving protected keys
+ from CCA (secure) keys. Also this handler provides an alternate
+ way to make protected keys from clear key values.
+
+ The PKEY CCA support handler needs a Crypto Express card (CEX)
+ in CCA mode.
+
+ If you have selected the PKEY option then you should also enable
+ this option unless you are sure you never need to derive protected
+ keys from CCA key material.
+
+config PKEY_EP11
+ tristate "PKEY EP11 support handler"
+ depends on PKEY
+ depends on ZCRYPT
+ help
+ This is the EP11 support handler for deriving protected keys
+ from EP11 (secure) keys. Also this handler provides an alternate
+ way to make protected keys from clear key values.
+
+ The PKEY EP11 support handler needs a Crypto Express card (CEX)
+ in EP11 mode.
+
+ If you have selected the PKEY option then you should also enable
+ this option unless you are sure you never need to derive protected
+ keys from EP11 key material.
+
+config PKEY_PCKMO
+ tristate "PKEY PCKMO support handler"
+ depends on PKEY
+ help
+ This is the PCKMO support handler for deriving protected keys
+ from clear key values via invoking the PCKMO instruction.
+
+ The PCKMO instruction can be enabled and disabled in the crypto
+ settings at the LPAR profile. This handler checks for availability
+ during initialization and if build as a kernel module unloads
+ itself if PCKMO is disabled.
+
+ The PCKMO way of deriving protected keys from clear key material
+ is especially used during self test of protected key ciphers like
+ PAES but the CCA and EP11 handler provide alternate ways to
+ generate protected keys from clear key values.
- Please note that creation of protected keys from secure keys
- requires to have at least one CEX card in coprocessor mode
- available at runtime.
+ If you have selected the PKEY option then you should also enable
+ this option unless you are sure you never need to derive protected
+ keys from clear key values directly via PCKMO.
config CRYPTO_PAES_S390
tristate "PAES cipher algorithms"
diff --git a/drivers/crypto/caam/caamhash.c b/drivers/crypto/caam/caamhash.c
index fdd724228c2f..25c02e267258 100644
--- a/drivers/crypto/caam/caamhash.c
+++ b/drivers/crypto/caam/caamhash.c
@@ -708,6 +708,7 @@ static struct ahash_edesc *ahash_edesc_alloc(struct ahash_request *req,
GFP_KERNEL : GFP_ATOMIC;
struct ahash_edesc *edesc;
+ sg_num = pad_sg_nents(sg_num);
edesc = kzalloc(struct_size(edesc, sec4_sg, sg_num), flags);
if (!edesc)
return NULL;
diff --git a/drivers/cxl/Kconfig b/drivers/cxl/Kconfig
index 99b5c25be079..29c192f20082 100644
--- a/drivers/cxl/Kconfig
+++ b/drivers/cxl/Kconfig
@@ -6,7 +6,7 @@ menuconfig CXL_BUS
select FW_UPLOAD
select PCI_DOE
select FIRMWARE_TABLE
- select NUMA_KEEP_MEMINFO if (NUMA && X86)
+ select NUMA_KEEP_MEMINFO if NUMA_MEMBLKS
help
CXL is a bus that is electrically compatible with PCI Express, but
layers three protocols on that signalling (CXL.io, CXL.cache, and
diff --git a/drivers/cxl/core/cdat.c b/drivers/cxl/core/cdat.c
index bb83867d9fec..ef1621d40f05 100644
--- a/drivers/cxl/core/cdat.c
+++ b/drivers/cxl/core/cdat.c
@@ -9,13 +9,12 @@
#include "cxlmem.h"
#include "core.h"
#include "cxl.h"
-#include "core.h"
struct dsmas_entry {
struct range dpa_range;
u8 handle;
struct access_coordinate coord[ACCESS_COORDINATE_MAX];
-
+ struct access_coordinate cdat_coord[ACCESS_COORDINATE_MAX];
int entries;
int qos_class;
};
@@ -163,7 +162,7 @@ static int cdat_dslbis_handler(union acpi_subtable_headers *header, void *arg,
val = cdat_normalize(le16_to_cpu(le_val), le64_to_cpu(le_base),
dslbis->data_type);
- cxl_access_coordinate_set(dent->coord, dslbis->data_type, val);
+ cxl_access_coordinate_set(dent->cdat_coord, dslbis->data_type, val);
return 0;
}
@@ -220,7 +219,7 @@ static int cxl_port_perf_data_calculate(struct cxl_port *port,
xa_for_each(dsmas_xa, index, dent) {
int qos_class;
- cxl_coordinates_combine(dent->coord, dent->coord, ep_c);
+ cxl_coordinates_combine(dent->coord, dent->cdat_coord, ep_c);
dent->entries = 1;
rc = cxl_root->ops->qos_class(cxl_root,
&dent->coord[ACCESS_COORDINATE_CPU],
@@ -241,8 +240,10 @@ static int cxl_port_perf_data_calculate(struct cxl_port *port,
static void update_perf_entry(struct device *dev, struct dsmas_entry *dent,
struct cxl_dpa_perf *dpa_perf)
{
- for (int i = 0; i < ACCESS_COORDINATE_MAX; i++)
+ for (int i = 0; i < ACCESS_COORDINATE_MAX; i++) {
dpa_perf->coord[i] = dent->coord[i];
+ dpa_perf->cdat_coord[i] = dent->cdat_coord[i];
+ }
dpa_perf->dpa_range = dent->dpa_range;
dpa_perf->qos_class = dent->qos_class;
dev_dbg(dev,
@@ -546,19 +547,37 @@ void cxl_coordinates_combine(struct access_coordinate *out,
MODULE_IMPORT_NS(CXL);
-void cxl_region_perf_data_calculate(struct cxl_region *cxlr,
- struct cxl_endpoint_decoder *cxled)
+static void cxl_bandwidth_add(struct access_coordinate *coord,
+ struct access_coordinate *c1,
+ struct access_coordinate *c2)
+{
+ for (int i = 0; i < ACCESS_COORDINATE_MAX; i++) {
+ coord[i].read_bandwidth = c1[i].read_bandwidth +
+ c2[i].read_bandwidth;
+ coord[i].write_bandwidth = c1[i].write_bandwidth +
+ c2[i].write_bandwidth;
+ }
+}
+
+static bool dpa_perf_contains(struct cxl_dpa_perf *perf,
+ struct resource *dpa_res)
{
- struct cxl_memdev *cxlmd = cxled_to_memdev(cxled);
- struct cxl_dev_state *cxlds = cxlmd->cxlds;
- struct cxl_memdev_state *mds = to_cxl_memdev_state(cxlds);
struct range dpa = {
- .start = cxled->dpa_res->start,
- .end = cxled->dpa_res->end,
+ .start = dpa_res->start,
+ .end = dpa_res->end,
};
+
+ return range_contains(&perf->dpa_range, &dpa);
+}
+
+static struct cxl_dpa_perf *cxled_get_dpa_perf(struct cxl_endpoint_decoder *cxled,
+ enum cxl_decoder_mode mode)
+{
+ struct cxl_memdev *cxlmd = cxled_to_memdev(cxled);
+ struct cxl_memdev_state *mds = to_cxl_memdev_state(cxlmd->cxlds);
struct cxl_dpa_perf *perf;
- switch (cxlr->mode) {
+ switch (mode) {
case CXL_DECODER_RAM:
perf = &mds->ram_perf;
break;
@@ -566,12 +585,473 @@ void cxl_region_perf_data_calculate(struct cxl_region *cxlr,
perf = &mds->pmem_perf;
break;
default:
+ return ERR_PTR(-EINVAL);
+ }
+
+ if (!dpa_perf_contains(perf, cxled->dpa_res))
+ return ERR_PTR(-EINVAL);
+
+ return perf;
+}
+
+/*
+ * Transient context for containing the current calculation of bandwidth when
+ * doing walking the port hierarchy to deal with shared upstream link.
+ */
+struct cxl_perf_ctx {
+ struct access_coordinate coord[ACCESS_COORDINATE_MAX];
+ struct cxl_port *port;
+};
+
+/**
+ * cxl_endpoint_gather_bandwidth - collect all the endpoint bandwidth in an xarray
+ * @cxlr: CXL region for the bandwidth calculation
+ * @cxled: endpoint decoder to start on
+ * @usp_xa: (output) the xarray that collects all the bandwidth coordinates
+ * indexed by the upstream device with data of 'struct cxl_perf_ctx'.
+ * @gp_is_root: (output) bool of whether the grandparent is cxl root.
+ *
+ * Return: 0 for success or -errno
+ *
+ * Collects aggregated endpoint bandwidth and store the bandwidth in
+ * an xarray indexed by the upstream device of the switch or the RP
+ * device. Each endpoint consists the minimum of the bandwidth from DSLBIS
+ * from the endpoint CDAT, the endpoint upstream link bandwidth, and the
+ * bandwidth from the SSLBIS of the switch CDAT for the switch upstream port to
+ * the downstream port that's associated with the endpoint. If the
+ * device is directly connected to a RP, then no SSLBIS is involved.
+ */
+static int cxl_endpoint_gather_bandwidth(struct cxl_region *cxlr,
+ struct cxl_endpoint_decoder *cxled,
+ struct xarray *usp_xa,
+ bool *gp_is_root)
+{
+ struct cxl_port *endpoint = to_cxl_port(cxled->cxld.dev.parent);
+ struct cxl_port *parent_port = to_cxl_port(endpoint->dev.parent);
+ struct cxl_port *gp_port = to_cxl_port(parent_port->dev.parent);
+ struct access_coordinate pci_coord[ACCESS_COORDINATE_MAX];
+ struct access_coordinate sw_coord[ACCESS_COORDINATE_MAX];
+ struct access_coordinate ep_coord[ACCESS_COORDINATE_MAX];
+ struct cxl_memdev *cxlmd = cxled_to_memdev(cxled);
+ struct cxl_dev_state *cxlds = cxlmd->cxlds;
+ struct pci_dev *pdev = to_pci_dev(cxlds->dev);
+ struct cxl_perf_ctx *perf_ctx;
+ struct cxl_dpa_perf *perf;
+ unsigned long index;
+ void *ptr;
+ int rc;
+
+ if (cxlds->rcd)
+ return -ENODEV;
+
+ perf = cxled_get_dpa_perf(cxled, cxlr->mode);
+ if (IS_ERR(perf))
+ return PTR_ERR(perf);
+
+ gp_port = to_cxl_port(parent_port->dev.parent);
+ *gp_is_root = is_cxl_root(gp_port);
+
+ /*
+ * If the grandparent is cxl root, then index is the root port,
+ * otherwise it's the parent switch upstream device.
+ */
+ if (*gp_is_root)
+ index = (unsigned long)endpoint->parent_dport->dport_dev;
+ else
+ index = (unsigned long)parent_port->uport_dev;
+
+ perf_ctx = xa_load(usp_xa, index);
+ if (!perf_ctx) {
+ struct cxl_perf_ctx *c __free(kfree) =
+ kzalloc(sizeof(*perf_ctx), GFP_KERNEL);
+
+ if (!c)
+ return -ENOMEM;
+ ptr = xa_store(usp_xa, index, c, GFP_KERNEL);
+ if (xa_is_err(ptr))
+ return xa_err(ptr);
+ perf_ctx = no_free_ptr(c);
+ perf_ctx->port = parent_port;
+ }
+
+ /* Direct upstream link from EP bandwidth */
+ rc = cxl_pci_get_bandwidth(pdev, pci_coord);
+ if (rc < 0)
+ return rc;
+
+ /*
+ * Min of upstream link bandwidth and Endpoint CDAT bandwidth from
+ * DSLBIS.
+ */
+ cxl_coordinates_combine(ep_coord, pci_coord, perf->cdat_coord);
+
+ /*
+ * If grandparent port is root, then there's no switch involved and
+ * the endpoint is connected to a root port.
+ */
+ if (!*gp_is_root) {
+ /*
+ * Retrieve the switch SSLBIS for switch downstream port
+ * associated with the endpoint bandwidth.
+ */
+ rc = cxl_port_get_switch_dport_bandwidth(endpoint, sw_coord);
+ if (rc)
+ return rc;
+
+ /*
+ * Min of the earlier coordinates with the switch SSLBIS
+ * bandwidth
+ */
+ cxl_coordinates_combine(ep_coord, ep_coord, sw_coord);
+ }
+
+ /*
+ * Aggregate the computed bandwidth with the current aggregated bandwidth
+ * of the endpoints with the same switch upstream device or RP.
+ */
+ cxl_bandwidth_add(perf_ctx->coord, perf_ctx->coord, ep_coord);
+
+ return 0;
+}
+
+static void free_perf_xa(struct xarray *xa)
+{
+ struct cxl_perf_ctx *ctx;
+ unsigned long index;
+
+ if (!xa)
return;
+
+ xa_for_each(xa, index, ctx)
+ kfree(ctx);
+ xa_destroy(xa);
+ kfree(xa);
+}
+DEFINE_FREE(free_perf_xa, struct xarray *, if (_T) free_perf_xa(_T))
+
+/**
+ * cxl_switch_gather_bandwidth - collect all the bandwidth at switch level in an xarray
+ * @cxlr: The region being operated on
+ * @input_xa: xarray indexed by upstream device of a switch with data of 'struct
+ * cxl_perf_ctx'
+ * @gp_is_root: (output) bool of whether the grandparent is cxl root.
+ *
+ * Return: a xarray of resulting cxl_perf_ctx per parent switch or root port
+ * or ERR_PTR(-errno)
+ *
+ * Iterate through the xarray. Take the minimum of the downstream calculated
+ * bandwidth, the upstream link bandwidth, and the SSLBIS of the upstream
+ * switch if exists. Sum the resulting bandwidth under the switch upstream
+ * device or a RP device. The function can be iterated over multiple switches
+ * if the switches are present.
+ */
+static struct xarray *cxl_switch_gather_bandwidth(struct cxl_region *cxlr,
+ struct xarray *input_xa,
+ bool *gp_is_root)
+{
+ struct xarray *res_xa __free(free_perf_xa) =
+ kzalloc(sizeof(*res_xa), GFP_KERNEL);
+ struct access_coordinate coords[ACCESS_COORDINATE_MAX];
+ struct cxl_perf_ctx *ctx, *us_ctx;
+ unsigned long index, us_index;
+ int dev_count = 0;
+ int gp_count = 0;
+ void *ptr;
+ int rc;
+
+ if (!res_xa)
+ return ERR_PTR(-ENOMEM);
+ xa_init(res_xa);
+
+ xa_for_each(input_xa, index, ctx) {
+ struct device *dev = (struct device *)index;
+ struct cxl_port *port = ctx->port;
+ struct cxl_port *parent_port = to_cxl_port(port->dev.parent);
+ struct cxl_port *gp_port = to_cxl_port(parent_port->dev.parent);
+ struct cxl_dport *dport = port->parent_dport;
+ bool is_root = false;
+
+ dev_count++;
+ if (is_cxl_root(gp_port)) {
+ is_root = true;
+ gp_count++;
+ }
+
+ /*
+ * If the grandparent is cxl root, then index is the root port,
+ * otherwise it's the parent switch upstream device.
+ */
+ if (is_root)
+ us_index = (unsigned long)port->parent_dport->dport_dev;
+ else
+ us_index = (unsigned long)parent_port->uport_dev;
+
+ us_ctx = xa_load(res_xa, us_index);
+ if (!us_ctx) {
+ struct cxl_perf_ctx *n __free(kfree) =
+ kzalloc(sizeof(*n), GFP_KERNEL);
+
+ if (!n)
+ return ERR_PTR(-ENOMEM);
+
+ ptr = xa_store(res_xa, us_index, n, GFP_KERNEL);
+ if (xa_is_err(ptr))
+ return ERR_PTR(xa_err(ptr));
+ us_ctx = no_free_ptr(n);
+ us_ctx->port = parent_port;
+ }
+
+ /*
+ * If the device isn't an upstream PCIe port, there's something
+ * wrong with the topology.
+ */
+ if (!dev_is_pci(dev))
+ return ERR_PTR(-EINVAL);
+
+ /* Retrieve the upstream link bandwidth */
+ rc = cxl_pci_get_bandwidth(to_pci_dev(dev), coords);
+ if (rc)
+ return ERR_PTR(-ENXIO);
+
+ /*
+ * Take the min of downstream bandwidth and the upstream link
+ * bandwidth.
+ */
+ cxl_coordinates_combine(coords, coords, ctx->coord);
+
+ /*
+ * Take the min of the calculated bandwdith and the upstream
+ * switch SSLBIS bandwidth if there's a parent switch
+ */
+ if (!is_root)
+ cxl_coordinates_combine(coords, coords, dport->coord);
+
+ /*
+ * Aggregate the calculated bandwidth common to an upstream
+ * switch.
+ */
+ cxl_bandwidth_add(us_ctx->coord, us_ctx->coord, coords);
}
+ /* Asymmetric topology detected. */
+ if (gp_count) {
+ if (gp_count != dev_count) {
+ dev_dbg(&cxlr->dev,
+ "Asymmetric hierarchy detected, bandwidth not updated\n");
+ return ERR_PTR(-EOPNOTSUPP);
+ }
+ *gp_is_root = true;
+ }
+
+ return no_free_ptr(res_xa);
+}
+
+/**
+ * cxl_rp_gather_bandwidth - handle the root port level bandwidth collection
+ * @xa: the xarray that holds the cxl_perf_ctx that has the bandwidth calculated
+ * below each root port device.
+ *
+ * Return: xarray that holds cxl_perf_ctx per host bridge or ERR_PTR(-errno)
+ */
+static struct xarray *cxl_rp_gather_bandwidth(struct xarray *xa)
+{
+ struct xarray *hb_xa __free(free_perf_xa) =
+ kzalloc(sizeof(*hb_xa), GFP_KERNEL);
+ struct cxl_perf_ctx *ctx;
+ unsigned long index;
+
+ if (!hb_xa)
+ return ERR_PTR(-ENOMEM);
+ xa_init(hb_xa);
+
+ xa_for_each(xa, index, ctx) {
+ struct cxl_port *port = ctx->port;
+ unsigned long hb_index = (unsigned long)port->uport_dev;
+ struct cxl_perf_ctx *hb_ctx;
+ void *ptr;
+
+ hb_ctx = xa_load(hb_xa, hb_index);
+ if (!hb_ctx) {
+ struct cxl_perf_ctx *n __free(kfree) =
+ kzalloc(sizeof(*n), GFP_KERNEL);
+
+ if (!n)
+ return ERR_PTR(-ENOMEM);
+ ptr = xa_store(hb_xa, hb_index, n, GFP_KERNEL);
+ if (xa_is_err(ptr))
+ return ERR_PTR(xa_err(ptr));
+ hb_ctx = no_free_ptr(n);
+ hb_ctx->port = port;
+ }
+
+ cxl_bandwidth_add(hb_ctx->coord, hb_ctx->coord, ctx->coord);
+ }
+
+ return no_free_ptr(hb_xa);
+}
+
+/**
+ * cxl_hb_gather_bandwidth - handle the host bridge level bandwidth collection
+ * @xa: the xarray that holds the cxl_perf_ctx that has the bandwidth calculated
+ * below each host bridge.
+ *
+ * Return: xarray that holds cxl_perf_ctx per ACPI0017 device or ERR_PTR(-errno)
+ */
+static struct xarray *cxl_hb_gather_bandwidth(struct xarray *xa)
+{
+ struct xarray *mw_xa __free(free_perf_xa) =
+ kzalloc(sizeof(*mw_xa), GFP_KERNEL);
+ struct cxl_perf_ctx *ctx;
+ unsigned long index;
+
+ if (!mw_xa)
+ return ERR_PTR(-ENOMEM);
+ xa_init(mw_xa);
+
+ xa_for_each(xa, index, ctx) {
+ struct cxl_port *port = ctx->port;
+ struct cxl_port *parent_port;
+ struct cxl_perf_ctx *mw_ctx;
+ struct cxl_dport *dport;
+ unsigned long mw_index;
+ void *ptr;
+
+ parent_port = to_cxl_port(port->dev.parent);
+ mw_index = (unsigned long)parent_port->uport_dev;
+
+ mw_ctx = xa_load(mw_xa, mw_index);
+ if (!mw_ctx) {
+ struct cxl_perf_ctx *n __free(kfree) =
+ kzalloc(sizeof(*n), GFP_KERNEL);
+
+ if (!n)
+ return ERR_PTR(-ENOMEM);
+ ptr = xa_store(mw_xa, mw_index, n, GFP_KERNEL);
+ if (xa_is_err(ptr))
+ return ERR_PTR(xa_err(ptr));
+ mw_ctx = no_free_ptr(n);
+ }
+
+ dport = port->parent_dport;
+ cxl_coordinates_combine(ctx->coord, ctx->coord, dport->coord);
+ cxl_bandwidth_add(mw_ctx->coord, mw_ctx->coord, ctx->coord);
+ }
+
+ return no_free_ptr(mw_xa);
+}
+
+/**
+ * cxl_region_update_bandwidth - Update the bandwidth access coordinates of a region
+ * @cxlr: The region being operated on
+ * @input_xa: xarray holds cxl_perf_ctx wht calculated bandwidth per ACPI0017 instance
+ */
+static void cxl_region_update_bandwidth(struct cxl_region *cxlr,
+ struct xarray *input_xa)
+{
+ struct access_coordinate coord[ACCESS_COORDINATE_MAX];
+ struct cxl_perf_ctx *ctx;
+ unsigned long index;
+
+ memset(coord, 0, sizeof(coord));
+ xa_for_each(input_xa, index, ctx)
+ cxl_bandwidth_add(coord, coord, ctx->coord);
+
+ for (int i = 0; i < ACCESS_COORDINATE_MAX; i++) {
+ cxlr->coord[i].read_bandwidth = coord[i].read_bandwidth;
+ cxlr->coord[i].write_bandwidth = coord[i].write_bandwidth;
+ }
+}
+
+/**
+ * cxl_region_shared_upstream_bandwidth_update - Recalculate the bandwidth for
+ * the region
+ * @cxlr: the cxl region to recalculate
+ *
+ * The function walks the topology from bottom up and calculates the bandwidth. It
+ * starts at the endpoints, processes at the switches if any, processes at the rootport
+ * level, at the host bridge level, and finally aggregates at the region.
+ */
+void cxl_region_shared_upstream_bandwidth_update(struct cxl_region *cxlr)
+{
+ struct xarray *working_xa;
+ int root_count = 0;
+ bool is_root;
+ int rc;
+
+ lockdep_assert_held(&cxl_dpa_rwsem);
+
+ struct xarray *usp_xa __free(free_perf_xa) =
+ kzalloc(sizeof(*usp_xa), GFP_KERNEL);
+
+ if (!usp_xa)
+ return;
+
+ xa_init(usp_xa);
+
+ /* Collect bandwidth data from all the endpoints. */
+ for (int i = 0; i < cxlr->params.nr_targets; i++) {
+ struct cxl_endpoint_decoder *cxled = cxlr->params.targets[i];
+
+ is_root = false;
+ rc = cxl_endpoint_gather_bandwidth(cxlr, cxled, usp_xa, &is_root);
+ if (rc)
+ return;
+ root_count += is_root;
+ }
+
+ /* Detect asymmetric hierarchy with some direct attached endpoints. */
+ if (root_count && root_count != cxlr->params.nr_targets) {
+ dev_dbg(&cxlr->dev,
+ "Asymmetric hierarchy detected, bandwidth not updated\n");
+ return;
+ }
+
+ /*
+ * Walk up one or more switches to deal with the bandwidth of the
+ * switches if they exist. Endpoints directly attached to RPs skip
+ * over this part.
+ */
+ if (!root_count) {
+ do {
+ working_xa = cxl_switch_gather_bandwidth(cxlr, usp_xa,
+ &is_root);
+ if (IS_ERR(working_xa))
+ return;
+ free_perf_xa(usp_xa);
+ usp_xa = working_xa;
+ } while (!is_root);
+ }
+
+ /* Handle the bandwidth at the root port of the hierarchy */
+ working_xa = cxl_rp_gather_bandwidth(usp_xa);
+ if (IS_ERR(working_xa))
+ return;
+ free_perf_xa(usp_xa);
+ usp_xa = working_xa;
+
+ /* Handle the bandwidth at the host bridge of the hierarchy */
+ working_xa = cxl_hb_gather_bandwidth(usp_xa);
+ if (IS_ERR(working_xa))
+ return;
+ free_perf_xa(usp_xa);
+ usp_xa = working_xa;
+
+ /*
+ * Aggregate all the bandwidth collected per CFMWS (ACPI0017) and
+ * update the region bandwidth with the final calculated values.
+ */
+ cxl_region_update_bandwidth(cxlr, usp_xa);
+}
+
+void cxl_region_perf_data_calculate(struct cxl_region *cxlr,
+ struct cxl_endpoint_decoder *cxled)
+{
+ struct cxl_dpa_perf *perf;
+
lockdep_assert_held(&cxl_dpa_rwsem);
- if (!range_contains(&perf->dpa_range, &dpa))
+ perf = cxled_get_dpa_perf(cxled, cxlr->mode);
+ if (IS_ERR(perf))
return;
for (int i = 0; i < ACCESS_COORDINATE_MAX; i++) {
diff --git a/drivers/cxl/core/core.h b/drivers/cxl/core/core.h
index 72a506c9dbd0..0c62b4069ba0 100644
--- a/drivers/cxl/core/core.h
+++ b/drivers/cxl/core/core.h
@@ -103,9 +103,11 @@ enum cxl_poison_trace_type {
};
long cxl_pci_get_latency(struct pci_dev *pdev);
-
+int cxl_pci_get_bandwidth(struct pci_dev *pdev, struct access_coordinate *c);
int cxl_update_hmat_access_coordinates(int nid, struct cxl_region *cxlr,
enum access_coordinate_class access);
bool cxl_need_node_perf_attrs_update(int nid);
+int cxl_port_get_switch_dport_bandwidth(struct cxl_port *port,
+ struct access_coordinate *c);
#endif /* __CXL_CORE_H__ */
diff --git a/drivers/cxl/core/mbox.c b/drivers/cxl/core/mbox.c
index e5cdeafdf76e..946f8e44455f 100644
--- a/drivers/cxl/core/mbox.c
+++ b/drivers/cxl/core/mbox.c
@@ -225,7 +225,7 @@ static const char *cxl_mem_opcode_to_name(u16 opcode)
/**
* cxl_internal_send_cmd() - Kernel internal interface to send a mailbox command
- * @mds: The driver data for the operation
+ * @cxl_mbox: CXL mailbox context
* @mbox_cmd: initialized command to execute
*
* Context: Any context.
@@ -241,19 +241,19 @@ static const char *cxl_mem_opcode_to_name(u16 opcode)
* error. While this distinction can be useful for commands from userspace, the
* kernel will only be able to use results when both are successful.
*/
-int cxl_internal_send_cmd(struct cxl_memdev_state *mds,
+int cxl_internal_send_cmd(struct cxl_mailbox *cxl_mbox,
struct cxl_mbox_cmd *mbox_cmd)
{
size_t out_size, min_out;
int rc;
- if (mbox_cmd->size_in > mds->payload_size ||
- mbox_cmd->size_out > mds->payload_size)
+ if (mbox_cmd->size_in > cxl_mbox->payload_size ||
+ mbox_cmd->size_out > cxl_mbox->payload_size)
return -E2BIG;
out_size = mbox_cmd->size_out;
min_out = mbox_cmd->min_out;
- rc = mds->mbox_send(mds, mbox_cmd);
+ rc = cxl_mbox->mbox_send(cxl_mbox, mbox_cmd);
/*
* EIO is reserved for a payload size mismatch and mbox_send()
* may not return this error.
@@ -353,6 +353,7 @@ static int cxl_mbox_cmd_ctor(struct cxl_mbox_cmd *mbox,
struct cxl_memdev_state *mds, u16 opcode,
size_t in_size, size_t out_size, u64 in_payload)
{
+ struct cxl_mailbox *cxl_mbox = &mds->cxlds.cxl_mbox;
*mbox = (struct cxl_mbox_cmd) {
.opcode = opcode,
.size_in = in_size,
@@ -374,7 +375,7 @@ static int cxl_mbox_cmd_ctor(struct cxl_mbox_cmd *mbox,
/* Prepare to handle a full payload for variable sized output */
if (out_size == CXL_VARIABLE_PAYLOAD)
- mbox->size_out = mds->payload_size;
+ mbox->size_out = cxl_mbox->payload_size;
else
mbox->size_out = out_size;
@@ -398,6 +399,8 @@ static int cxl_to_mem_cmd_raw(struct cxl_mem_command *mem_cmd,
const struct cxl_send_command *send_cmd,
struct cxl_memdev_state *mds)
{
+ struct cxl_mailbox *cxl_mbox = &mds->cxlds.cxl_mbox;
+
if (send_cmd->raw.rsvd)
return -EINVAL;
@@ -406,7 +409,7 @@ static int cxl_to_mem_cmd_raw(struct cxl_mem_command *mem_cmd,
* gets passed along without further checking, so it must be
* validated here.
*/
- if (send_cmd->out.size > mds->payload_size)
+ if (send_cmd->out.size > cxl_mbox->payload_size)
return -EINVAL;
if (!cxl_mem_raw_command_allowed(send_cmd->raw.opcode))
@@ -494,6 +497,7 @@ static int cxl_validate_cmd_from_user(struct cxl_mbox_cmd *mbox_cmd,
struct cxl_memdev_state *mds,
const struct cxl_send_command *send_cmd)
{
+ struct cxl_mailbox *cxl_mbox = &mds->cxlds.cxl_mbox;
struct cxl_mem_command mem_cmd;
int rc;
@@ -505,7 +509,7 @@ static int cxl_validate_cmd_from_user(struct cxl_mbox_cmd *mbox_cmd,
* supports, but output can be arbitrarily large (simply write out as
* much data as the hardware provides).
*/
- if (send_cmd->in.size > mds->payload_size)
+ if (send_cmd->in.size > cxl_mbox->payload_size)
return -EINVAL;
/* Sanitize and construct a cxl_mem_command */
@@ -542,7 +546,7 @@ int cxl_query_cmd(struct cxl_memdev *cxlmd,
return put_user(ARRAY_SIZE(cxl_mem_commands), &q->n_commands);
/*
- * otherwise, return max(n_commands, total commands) cxl_command_info
+ * otherwise, return min(n_commands, total commands) cxl_command_info
* structures.
*/
cxl_for_each_cmd(cmd) {
@@ -591,6 +595,7 @@ static int handle_mailbox_cmd_from_user(struct cxl_memdev_state *mds,
u64 out_payload, s32 *size_out,
u32 *retval)
{
+ struct cxl_mailbox *cxl_mbox = &mds->cxlds.cxl_mbox;
struct device *dev = mds->cxlds.dev;
int rc;
@@ -601,7 +606,7 @@ static int handle_mailbox_cmd_from_user(struct cxl_memdev_state *mds,
cxl_mem_opcode_to_name(mbox_cmd->opcode),
mbox_cmd->opcode, mbox_cmd->size_in);
- rc = mds->mbox_send(mds, mbox_cmd);
+ rc = cxl_mbox->mbox_send(cxl_mbox, mbox_cmd);
if (rc)
goto out;
@@ -659,11 +664,12 @@ int cxl_send_cmd(struct cxl_memdev *cxlmd, struct cxl_send_command __user *s)
static int cxl_xfer_log(struct cxl_memdev_state *mds, uuid_t *uuid,
u32 *size, u8 *out)
{
+ struct cxl_mailbox *cxl_mbox = &mds->cxlds.cxl_mbox;
u32 remaining = *size;
u32 offset = 0;
while (remaining) {
- u32 xfer_size = min_t(u32, remaining, mds->payload_size);
+ u32 xfer_size = min_t(u32, remaining, cxl_mbox->payload_size);
struct cxl_mbox_cmd mbox_cmd;
struct cxl_mbox_get_log log;
int rc;
@@ -682,7 +688,7 @@ static int cxl_xfer_log(struct cxl_memdev_state *mds, uuid_t *uuid,
.payload_out = out,
};
- rc = cxl_internal_send_cmd(mds, &mbox_cmd);
+ rc = cxl_internal_send_cmd(cxl_mbox, &mbox_cmd);
/*
* The output payload length that indicates the number
@@ -752,22 +758,23 @@ static void cxl_walk_cel(struct cxl_memdev_state *mds, size_t size, u8 *cel)
static struct cxl_mbox_get_supported_logs *cxl_get_gsl(struct cxl_memdev_state *mds)
{
+ struct cxl_mailbox *cxl_mbox = &mds->cxlds.cxl_mbox;
struct cxl_mbox_get_supported_logs *ret;
struct cxl_mbox_cmd mbox_cmd;
int rc;
- ret = kvmalloc(mds->payload_size, GFP_KERNEL);
+ ret = kvmalloc(cxl_mbox->payload_size, GFP_KERNEL);
if (!ret)
return ERR_PTR(-ENOMEM);
mbox_cmd = (struct cxl_mbox_cmd) {
.opcode = CXL_MBOX_OP_GET_SUPPORTED_LOGS,
- .size_out = mds->payload_size,
+ .size_out = cxl_mbox->payload_size,
.payload_out = ret,
/* At least the record number field must be valid */
.min_out = 2,
};
- rc = cxl_internal_send_cmd(mds, &mbox_cmd);
+ rc = cxl_internal_send_cmd(cxl_mbox, &mbox_cmd);
if (rc < 0) {
kvfree(ret);
return ERR_PTR(rc);
@@ -910,6 +917,7 @@ static int cxl_clear_event_record(struct cxl_memdev_state *mds,
enum cxl_event_log_type log,
struct cxl_get_event_payload *get_pl)
{
+ struct cxl_mailbox *cxl_mbox = &mds->cxlds.cxl_mbox;
struct cxl_mbox_clear_event_payload *payload;
u16 total = le16_to_cpu(get_pl->record_count);
u8 max_handles = CXL_CLEAR_EVENT_MAX_HANDLES;
@@ -920,8 +928,8 @@ static int cxl_clear_event_record(struct cxl_memdev_state *mds,
int i;
/* Payload size may limit the max handles */
- if (pl_size > mds->payload_size) {
- max_handles = (mds->payload_size - sizeof(*payload)) /
+ if (pl_size > cxl_mbox->payload_size) {
+ max_handles = (cxl_mbox->payload_size - sizeof(*payload)) /
sizeof(__le16);
pl_size = struct_size(payload, handles, max_handles);
}
@@ -955,7 +963,7 @@ static int cxl_clear_event_record(struct cxl_memdev_state *mds,
if (i == max_handles) {
payload->nr_recs = i;
- rc = cxl_internal_send_cmd(mds, &mbox_cmd);
+ rc = cxl_internal_send_cmd(cxl_mbox, &mbox_cmd);
if (rc)
goto free_pl;
i = 0;
@@ -966,7 +974,7 @@ static int cxl_clear_event_record(struct cxl_memdev_state *mds,
if (i) {
payload->nr_recs = i;
mbox_cmd.size_in = struct_size(payload, handles, i);
- rc = cxl_internal_send_cmd(mds, &mbox_cmd);
+ rc = cxl_internal_send_cmd(cxl_mbox, &mbox_cmd);
if (rc)
goto free_pl;
}
@@ -979,6 +987,7 @@ free_pl:
static void cxl_mem_get_records_log(struct cxl_memdev_state *mds,
enum cxl_event_log_type type)
{
+ struct cxl_mailbox *cxl_mbox = &mds->cxlds.cxl_mbox;
struct cxl_memdev *cxlmd = mds->cxlds.cxlmd;
struct device *dev = mds->cxlds.dev;
struct cxl_get_event_payload *payload;
@@ -995,11 +1004,11 @@ static void cxl_mem_get_records_log(struct cxl_memdev_state *mds,
.payload_in = &log_type,
.size_in = sizeof(log_type),
.payload_out = payload,
- .size_out = mds->payload_size,
+ .size_out = cxl_mbox->payload_size,
.min_out = struct_size(payload, records, 0),
};
- rc = cxl_internal_send_cmd(mds, &mbox_cmd);
+ rc = cxl_internal_send_cmd(cxl_mbox, &mbox_cmd);
if (rc) {
dev_err_ratelimited(dev,
"Event log '%d': Failed to query event records : %d",
@@ -1070,6 +1079,7 @@ EXPORT_SYMBOL_NS_GPL(cxl_mem_get_event_records, CXL);
*/
static int cxl_mem_get_partition_info(struct cxl_memdev_state *mds)
{
+ struct cxl_mailbox *cxl_mbox = &mds->cxlds.cxl_mbox;
struct cxl_mbox_get_partition_info pi;
struct cxl_mbox_cmd mbox_cmd;
int rc;
@@ -1079,7 +1089,7 @@ static int cxl_mem_get_partition_info(struct cxl_memdev_state *mds)
.size_out = sizeof(pi),
.payload_out = &pi,
};
- rc = cxl_internal_send_cmd(mds, &mbox_cmd);
+ rc = cxl_internal_send_cmd(cxl_mbox, &mbox_cmd);
if (rc)
return rc;
@@ -1106,6 +1116,7 @@ static int cxl_mem_get_partition_info(struct cxl_memdev_state *mds)
*/
int cxl_dev_state_identify(struct cxl_memdev_state *mds)
{
+ struct cxl_mailbox *cxl_mbox = &mds->cxlds.cxl_mbox;
/* See CXL 2.0 Table 175 Identify Memory Device Output Payload */
struct cxl_mbox_identify id;
struct cxl_mbox_cmd mbox_cmd;
@@ -1120,7 +1131,7 @@ int cxl_dev_state_identify(struct cxl_memdev_state *mds)
.size_out = sizeof(id),
.payload_out = &id,
};
- rc = cxl_internal_send_cmd(mds, &mbox_cmd);
+ rc = cxl_internal_send_cmd(cxl_mbox, &mbox_cmd);
if (rc < 0)
return rc;
@@ -1148,6 +1159,7 @@ EXPORT_SYMBOL_NS_GPL(cxl_dev_state_identify, CXL);
static int __cxl_mem_sanitize(struct cxl_memdev_state *mds, u16 cmd)
{
+ struct cxl_mailbox *cxl_mbox = &mds->cxlds.cxl_mbox;
int rc;
u32 sec_out = 0;
struct cxl_get_security_output {
@@ -1159,14 +1171,13 @@ static int __cxl_mem_sanitize(struct cxl_memdev_state *mds, u16 cmd)
.size_out = sizeof(out),
};
struct cxl_mbox_cmd mbox_cmd = { .opcode = cmd };
- struct cxl_dev_state *cxlds = &mds->cxlds;
if (cmd != CXL_MBOX_OP_SANITIZE && cmd != CXL_MBOX_OP_SECURE_ERASE)
return -EINVAL;
- rc = cxl_internal_send_cmd(mds, &sec_cmd);
+ rc = cxl_internal_send_cmd(cxl_mbox, &sec_cmd);
if (rc < 0) {
- dev_err(cxlds->dev, "Failed to get security state : %d", rc);
+ dev_err(cxl_mbox->host, "Failed to get security state : %d", rc);
return rc;
}
@@ -1183,9 +1194,9 @@ static int __cxl_mem_sanitize(struct cxl_memdev_state *mds, u16 cmd)
sec_out & CXL_PMEM_SEC_STATE_LOCKED)
return -EINVAL;
- rc = cxl_internal_send_cmd(mds, &mbox_cmd);
+ rc = cxl_internal_send_cmd(cxl_mbox, &mbox_cmd);
if (rc < 0) {
- dev_err(cxlds->dev, "Failed to sanitize device : %d", rc);
+ dev_err(cxl_mbox->host, "Failed to sanitize device : %d", rc);
return rc;
}
@@ -1214,7 +1225,7 @@ int cxl_mem_sanitize(struct cxl_memdev *cxlmd, u16 cmd)
int rc;
/* synchronize with cxl_mem_probe() and decoder write operations */
- device_lock(&cxlmd->dev);
+ guard(device)(&cxlmd->dev);
endpoint = cxlmd->endpoint;
down_read(&cxl_region_rwsem);
/*
@@ -1226,7 +1237,6 @@ int cxl_mem_sanitize(struct cxl_memdev *cxlmd, u16 cmd)
else
rc = -EBUSY;
up_read(&cxl_region_rwsem);
- device_unlock(&cxlmd->dev);
return rc;
}
@@ -1300,6 +1310,7 @@ EXPORT_SYMBOL_NS_GPL(cxl_mem_create_range_info, CXL);
int cxl_set_timestamp(struct cxl_memdev_state *mds)
{
+ struct cxl_mailbox *cxl_mbox = &mds->cxlds.cxl_mbox;
struct cxl_mbox_cmd mbox_cmd;
struct cxl_mbox_set_timestamp_in pi;
int rc;
@@ -1311,7 +1322,7 @@ int cxl_set_timestamp(struct cxl_memdev_state *mds)
.payload_in = &pi,
};
- rc = cxl_internal_send_cmd(mds, &mbox_cmd);
+ rc = cxl_internal_send_cmd(cxl_mbox, &mbox_cmd);
/*
* Command is optional. Devices may have another way of providing
* a timestamp, or may return all 0s in timestamp fields.
@@ -1328,6 +1339,7 @@ int cxl_mem_get_poison(struct cxl_memdev *cxlmd, u64 offset, u64 len,
struct cxl_region *cxlr)
{
struct cxl_memdev_state *mds = to_cxl_memdev_state(cxlmd->cxlds);
+ struct cxl_mailbox *cxl_mbox = &cxlmd->cxlds->cxl_mbox;
struct cxl_mbox_poison_out *po;
struct cxl_mbox_poison_in pi;
int nr_records = 0;
@@ -1346,12 +1358,12 @@ int cxl_mem_get_poison(struct cxl_memdev *cxlmd, u64 offset, u64 len,
.opcode = CXL_MBOX_OP_GET_POISON,
.size_in = sizeof(pi),
.payload_in = &pi,
- .size_out = mds->payload_size,
+ .size_out = cxl_mbox->payload_size,
.payload_out = po,
.min_out = struct_size(po, record, 0),
};
- rc = cxl_internal_send_cmd(mds, &mbox_cmd);
+ rc = cxl_internal_send_cmd(cxl_mbox, &mbox_cmd);
if (rc)
break;
@@ -1382,7 +1394,9 @@ static void free_poison_buf(void *buf)
/* Get Poison List output buffer is protected by mds->poison.lock */
static int cxl_poison_alloc_buf(struct cxl_memdev_state *mds)
{
- mds->poison.list_out = kvmalloc(mds->payload_size, GFP_KERNEL);
+ struct cxl_mailbox *cxl_mbox = &mds->cxlds.cxl_mbox;
+
+ mds->poison.list_out = kvmalloc(cxl_mbox->payload_size, GFP_KERNEL);
if (!mds->poison.list_out)
return -ENOMEM;
@@ -1408,6 +1422,19 @@ int cxl_poison_state_init(struct cxl_memdev_state *mds)
}
EXPORT_SYMBOL_NS_GPL(cxl_poison_state_init, CXL);
+int cxl_mailbox_init(struct cxl_mailbox *cxl_mbox, struct device *host)
+{
+ if (!cxl_mbox || !host)
+ return -EINVAL;
+
+ cxl_mbox->host = host;
+ mutex_init(&cxl_mbox->mbox_mutex);
+ rcuwait_init(&cxl_mbox->mbox_wait);
+
+ return 0;
+}
+EXPORT_SYMBOL_NS_GPL(cxl_mailbox_init, CXL);
+
struct cxl_memdev_state *cxl_memdev_state_create(struct device *dev)
{
struct cxl_memdev_state *mds;
@@ -1418,7 +1445,6 @@ struct cxl_memdev_state *cxl_memdev_state_create(struct device *dev)
return ERR_PTR(-ENOMEM);
}
- mutex_init(&mds->mbox_mutex);
mutex_init(&mds->event.log_lock);
mds->cxlds.dev = dev;
mds->cxlds.reg_map.host = dev;
diff --git a/drivers/cxl/core/memdev.c b/drivers/cxl/core/memdev.c
index 0277726afd04..84fefb76dafa 100644
--- a/drivers/cxl/core/memdev.c
+++ b/drivers/cxl/core/memdev.c
@@ -58,7 +58,7 @@ static ssize_t payload_max_show(struct device *dev,
if (!mds)
return sysfs_emit(buf, "\n");
- return sysfs_emit(buf, "%zu\n", mds->payload_size);
+ return sysfs_emit(buf, "%zu\n", cxlds->cxl_mbox.payload_size);
}
static DEVICE_ATTR_RO(payload_max);
@@ -124,15 +124,16 @@ static ssize_t security_state_show(struct device *dev,
{
struct cxl_memdev *cxlmd = to_cxl_memdev(dev);
struct cxl_dev_state *cxlds = cxlmd->cxlds;
+ struct cxl_mailbox *cxl_mbox = &cxlds->cxl_mbox;
struct cxl_memdev_state *mds = to_cxl_memdev_state(cxlds);
unsigned long state = mds->security.state;
int rc = 0;
/* sync with latest submission state */
- mutex_lock(&mds->mbox_mutex);
+ mutex_lock(&cxl_mbox->mbox_mutex);
if (mds->security.sanitize_active)
rc = sysfs_emit(buf, "sanitize\n");
- mutex_unlock(&mds->mbox_mutex);
+ mutex_unlock(&cxl_mbox->mbox_mutex);
if (rc)
return rc;
@@ -277,7 +278,7 @@ static int cxl_validate_poison_dpa(struct cxl_memdev *cxlmd, u64 dpa)
int cxl_inject_poison(struct cxl_memdev *cxlmd, u64 dpa)
{
- struct cxl_memdev_state *mds = to_cxl_memdev_state(cxlmd->cxlds);
+ struct cxl_mailbox *cxl_mbox = &cxlmd->cxlds->cxl_mbox;
struct cxl_mbox_inject_poison inject;
struct cxl_poison_record record;
struct cxl_mbox_cmd mbox_cmd;
@@ -307,13 +308,13 @@ int cxl_inject_poison(struct cxl_memdev *cxlmd, u64 dpa)
.size_in = sizeof(inject),
.payload_in = &inject,
};
- rc = cxl_internal_send_cmd(mds, &mbox_cmd);
+ rc = cxl_internal_send_cmd(cxl_mbox, &mbox_cmd);
if (rc)
goto out;
cxlr = cxl_dpa_to_region(cxlmd, dpa);
if (cxlr)
- dev_warn_once(mds->cxlds.dev,
+ dev_warn_once(cxl_mbox->host,
"poison inject dpa:%#llx region: %s\n", dpa,
dev_name(&cxlr->dev));
@@ -332,7 +333,7 @@ EXPORT_SYMBOL_NS_GPL(cxl_inject_poison, CXL);
int cxl_clear_poison(struct cxl_memdev *cxlmd, u64 dpa)
{
- struct cxl_memdev_state *mds = to_cxl_memdev_state(cxlmd->cxlds);
+ struct cxl_mailbox *cxl_mbox = &cxlmd->cxlds->cxl_mbox;
struct cxl_mbox_clear_poison clear;
struct cxl_poison_record record;
struct cxl_mbox_cmd mbox_cmd;
@@ -371,13 +372,13 @@ int cxl_clear_poison(struct cxl_memdev *cxlmd, u64 dpa)
.payload_in = &clear,
};
- rc = cxl_internal_send_cmd(mds, &mbox_cmd);
+ rc = cxl_internal_send_cmd(cxl_mbox, &mbox_cmd);
if (rc)
goto out;
cxlr = cxl_dpa_to_region(cxlmd, dpa);
if (cxlr)
- dev_warn_once(mds->cxlds.dev,
+ dev_warn_once(cxl_mbox->host,
"poison clear dpa:%#llx region: %s\n", dpa,
dev_name(&cxlr->dev));
@@ -714,6 +715,7 @@ static int cxl_memdev_release_file(struct inode *inode, struct file *file)
*/
static int cxl_mem_get_fw_info(struct cxl_memdev_state *mds)
{
+ struct cxl_mailbox *cxl_mbox = &mds->cxlds.cxl_mbox;
struct cxl_mbox_get_fw_info info;
struct cxl_mbox_cmd mbox_cmd;
int rc;
@@ -724,7 +726,7 @@ static int cxl_mem_get_fw_info(struct cxl_memdev_state *mds)
.payload_out = &info,
};
- rc = cxl_internal_send_cmd(mds, &mbox_cmd);
+ rc = cxl_internal_send_cmd(cxl_mbox, &mbox_cmd);
if (rc < 0)
return rc;
@@ -748,6 +750,7 @@ static int cxl_mem_get_fw_info(struct cxl_memdev_state *mds)
*/
static int cxl_mem_activate_fw(struct cxl_memdev_state *mds, int slot)
{
+ struct cxl_mailbox *cxl_mbox = &mds->cxlds.cxl_mbox;
struct cxl_mbox_activate_fw activate;
struct cxl_mbox_cmd mbox_cmd;
@@ -764,7 +767,7 @@ static int cxl_mem_activate_fw(struct cxl_memdev_state *mds, int slot)
activate.action = CXL_FW_ACTIVATE_OFFLINE;
activate.slot = slot;
- return cxl_internal_send_cmd(mds, &mbox_cmd);
+ return cxl_internal_send_cmd(cxl_mbox, &mbox_cmd);
}
/**
@@ -779,6 +782,7 @@ static int cxl_mem_activate_fw(struct cxl_memdev_state *mds, int slot)
*/
static int cxl_mem_abort_fw_xfer(struct cxl_memdev_state *mds)
{
+ struct cxl_mailbox *cxl_mbox = &mds->cxlds.cxl_mbox;
struct cxl_mbox_transfer_fw *transfer;
struct cxl_mbox_cmd mbox_cmd;
int rc;
@@ -798,7 +802,7 @@ static int cxl_mem_abort_fw_xfer(struct cxl_memdev_state *mds)
transfer->action = CXL_FW_TRANSFER_ACTION_ABORT;
- rc = cxl_internal_send_cmd(mds, &mbox_cmd);
+ rc = cxl_internal_send_cmd(cxl_mbox, &mbox_cmd);
kfree(transfer);
return rc;
}
@@ -829,12 +833,13 @@ static enum fw_upload_err cxl_fw_prepare(struct fw_upload *fwl, const u8 *data,
{
struct cxl_memdev_state *mds = fwl->dd_handle;
struct cxl_mbox_transfer_fw *transfer;
+ struct cxl_mailbox *cxl_mbox = &mds->cxlds.cxl_mbox;
if (!size)
return FW_UPLOAD_ERR_INVALID_SIZE;
mds->fw.oneshot = struct_size(transfer, data, size) <
- mds->payload_size;
+ cxl_mbox->payload_size;
if (cxl_mem_get_fw_info(mds))
return FW_UPLOAD_ERR_HW_ERROR;
@@ -854,6 +859,7 @@ static enum fw_upload_err cxl_fw_write(struct fw_upload *fwl, const u8 *data,
{
struct cxl_memdev_state *mds = fwl->dd_handle;
struct cxl_dev_state *cxlds = &mds->cxlds;
+ struct cxl_mailbox *cxl_mbox = &cxlds->cxl_mbox;
struct cxl_memdev *cxlmd = cxlds->cxlmd;
struct cxl_mbox_transfer_fw *transfer;
struct cxl_mbox_cmd mbox_cmd;
@@ -877,7 +883,7 @@ static enum fw_upload_err cxl_fw_write(struct fw_upload *fwl, const u8 *data,
* sizeof(*transfer) is 128. These constraints imply that @cur_size
* will always be 128b aligned.
*/
- cur_size = min_t(size_t, size, mds->payload_size - sizeof(*transfer));
+ cur_size = min_t(size_t, size, cxl_mbox->payload_size - sizeof(*transfer));
remaining = size - cur_size;
size_in = struct_size(transfer, data, cur_size);
@@ -921,7 +927,7 @@ static enum fw_upload_err cxl_fw_write(struct fw_upload *fwl, const u8 *data,
.poll_count = 30,
};
- rc = cxl_internal_send_cmd(mds, &mbox_cmd);
+ rc = cxl_internal_send_cmd(cxl_mbox, &mbox_cmd);
if (rc < 0) {
rc = FW_UPLOAD_ERR_RW_ERROR;
goto out_free;
@@ -1059,16 +1065,17 @@ EXPORT_SYMBOL_NS_GPL(devm_cxl_add_memdev, CXL);
static void sanitize_teardown_notifier(void *data)
{
struct cxl_memdev_state *mds = data;
+ struct cxl_mailbox *cxl_mbox = &mds->cxlds.cxl_mbox;
struct kernfs_node *state;
/*
* Prevent new irq triggered invocations of the workqueue and
* flush inflight invocations.
*/
- mutex_lock(&mds->mbox_mutex);
+ mutex_lock(&cxl_mbox->mbox_mutex);
state = mds->security.sanitize_node;
mds->security.sanitize_node = NULL;
- mutex_unlock(&mds->mbox_mutex);
+ mutex_unlock(&cxl_mbox->mbox_mutex);
cancel_delayed_work_sync(&mds->security.poll_dwork);
sysfs_put(state);
diff --git a/drivers/cxl/core/pci.c b/drivers/cxl/core/pci.c
index 51132a575b27..5b46bc46aaa9 100644
--- a/drivers/cxl/core/pci.c
+++ b/drivers/cxl/core/pci.c
@@ -211,37 +211,6 @@ int cxl_await_media_ready(struct cxl_dev_state *cxlds)
}
EXPORT_SYMBOL_NS_GPL(cxl_await_media_ready, CXL);
-static int wait_for_valid(struct pci_dev *pdev, int d)
-{
- u32 val;
- int rc;
-
- /*
- * Memory_Info_Valid: When set, indicates that the CXL Range 1 Size high
- * and Size Low registers are valid. Must be set within 1 second of
- * deassertion of reset to CXL device. Likely it is already set by the
- * time this runs, but otherwise give a 1.5 second timeout in case of
- * clock skew.
- */
- rc = pci_read_config_dword(pdev, d + CXL_DVSEC_RANGE_SIZE_LOW(0), &val);
- if (rc)
- return rc;
-
- if (val & CXL_DVSEC_MEM_INFO_VALID)
- return 0;
-
- msleep(1500);
-
- rc = pci_read_config_dword(pdev, d + CXL_DVSEC_RANGE_SIZE_LOW(0), &val);
- if (rc)
- return rc;
-
- if (val & CXL_DVSEC_MEM_INFO_VALID)
- return 0;
-
- return -ETIMEDOUT;
-}
-
static int cxl_set_mem_enable(struct cxl_dev_state *cxlds, u16 val)
{
struct pci_dev *pdev = to_pci_dev(cxlds->dev);
@@ -322,11 +291,13 @@ static int devm_cxl_enable_hdm(struct device *host, struct cxl_hdm *cxlhdm)
return devm_add_action_or_reset(host, disable_hdm, cxlhdm);
}
-int cxl_dvsec_rr_decode(struct device *dev, int d,
+int cxl_dvsec_rr_decode(struct device *dev, struct cxl_port *port,
struct cxl_endpoint_dvsec_info *info)
{
struct pci_dev *pdev = to_pci_dev(dev);
+ struct cxl_dev_state *cxlds = pci_get_drvdata(pdev);
int hdm_count, rc, i, ranges = 0;
+ int d = cxlds->cxl_dvsec;
u16 cap, ctrl;
if (!d) {
@@ -353,12 +324,6 @@ int cxl_dvsec_rr_decode(struct device *dev, int d,
if (!hdm_count || hdm_count > 2)
return -EINVAL;
- rc = wait_for_valid(pdev, d);
- if (rc) {
- dev_dbg(dev, "Failure awaiting MEM_INFO_VALID (%d)\n", rc);
- return rc;
- }
-
/*
* The current DVSEC values are moot if the memory capability is
* disabled, and they will remain moot after the HDM Decoder
@@ -376,6 +341,10 @@ int cxl_dvsec_rr_decode(struct device *dev, int d,
u64 base, size;
u32 temp;
+ rc = cxl_dvsec_mem_range_valid(cxlds, i);
+ if (rc)
+ return rc;
+
rc = pci_read_config_dword(
pdev, d + CXL_DVSEC_RANGE_SIZE_HIGH(i), &temp);
if (rc)
@@ -390,10 +359,6 @@ int cxl_dvsec_rr_decode(struct device *dev, int d,
size |= temp & CXL_DVSEC_MEM_SIZE_LOW_MASK;
if (!size) {
- info->dvsec_range[i] = (struct range) {
- .start = 0,
- .end = CXL_RESOURCE_NONE,
- };
continue;
}
@@ -411,12 +376,10 @@ int cxl_dvsec_rr_decode(struct device *dev, int d,
base |= temp & CXL_DVSEC_MEM_BASE_LOW_MASK;
- info->dvsec_range[i] = (struct range) {
+ info->dvsec_range[ranges++] = (struct range) {
.start = base,
.end = base + size - 1
};
-
- ranges++;
}
info->ranges = ranges;
@@ -463,7 +426,15 @@ int cxl_hdm_decode_init(struct cxl_dev_state *cxlds, struct cxl_hdm *cxlhdm,
return -ENODEV;
}
- for (i = 0, allowed = 0; info->mem_enabled && i < info->ranges; i++) {
+ if (!info->mem_enabled) {
+ rc = devm_cxl_enable_hdm(&port->dev, cxlhdm);
+ if (rc)
+ return rc;
+
+ return devm_cxl_enable_mem(&port->dev, cxlds);
+ }
+
+ for (i = 0, allowed = 0; i < info->ranges; i++) {
struct device *cxld_dev;
cxld_dev = device_find_child(&root->dev, &info->dvsec_range[i],
@@ -477,7 +448,7 @@ int cxl_hdm_decode_init(struct cxl_dev_state *cxlds, struct cxl_hdm *cxlhdm,
allowed++;
}
- if (!allowed && info->mem_enabled) {
+ if (!allowed) {
dev_err(dev, "Range register decodes outside platform defined CXL ranges.\n");
return -ENXIO;
}
@@ -491,14 +462,7 @@ int cxl_hdm_decode_init(struct cxl_dev_state *cxlds, struct cxl_hdm *cxlhdm,
* match. If at least one DVSEC range is enabled and allowed, skip HDM
* Decoder Capability Enable.
*/
- if (info->mem_enabled)
- return 0;
-
- rc = devm_cxl_enable_hdm(&port->dev, cxlhdm);
- if (rc)
- return rc;
-
- return devm_cxl_enable_mem(&port->dev, cxlds);
+ return 0;
}
EXPORT_SYMBOL_NS_GPL(cxl_hdm_decode_init, CXL);
@@ -772,22 +736,20 @@ static bool cxl_handle_endpoint_ras(struct cxl_dev_state *cxlds)
static void cxl_dport_map_rch_aer(struct cxl_dport *dport)
{
- struct cxl_rcrb_info *ri = &dport->rcrb;
- void __iomem *dport_aer = NULL;
resource_size_t aer_phys;
struct device *host;
+ u16 aer_cap;
- if (dport->rch && ri->aer_cap) {
+ aer_cap = cxl_rcrb_to_aer(dport->dport_dev, dport->rcrb.base);
+ if (aer_cap) {
host = dport->reg_map.host;
- aer_phys = ri->aer_cap + ri->base;
- dport_aer = devm_cxl_iomap_block(host, aer_phys,
- sizeof(struct aer_capability_regs));
+ aer_phys = aer_cap + dport->rcrb.base;
+ dport->regs.dport_aer = devm_cxl_iomap_block(host, aer_phys,
+ sizeof(struct aer_capability_regs));
}
-
- dport->regs.dport_aer = dport_aer;
}
-static void cxl_dport_map_regs(struct cxl_dport *dport)
+static void cxl_dport_map_ras(struct cxl_dport *dport)
{
struct cxl_register_map *map = &dport->reg_map;
struct device *dev = dport->dport_dev;
@@ -797,22 +759,16 @@ static void cxl_dport_map_regs(struct cxl_dport *dport)
else if (cxl_map_component_regs(map, &dport->regs.component,
BIT(CXL_CM_CAP_CAP_ID_RAS)))
dev_dbg(dev, "Failed to map RAS capability.\n");
-
- if (dport->rch)
- cxl_dport_map_rch_aer(dport);
}
static void cxl_disable_rch_root_ints(struct cxl_dport *dport)
{
void __iomem *aer_base = dport->regs.dport_aer;
- struct pci_host_bridge *bridge;
u32 aer_cmd_mask, aer_cmd;
if (!aer_base)
return;
- bridge = to_pci_host_bridge(dport->dport_dev);
-
/*
* Disable RCH root port command interrupts.
* CXL 3.0 12.2.1.1 - RCH Downstream Port-detected Errors
@@ -821,34 +777,35 @@ static void cxl_disable_rch_root_ints(struct cxl_dport *dport)
* the root cmd register's interrupts is required. But, PCI spec
* shows these are disabled by default on reset.
*/
- if (bridge->native_aer) {
- aer_cmd_mask = (PCI_ERR_ROOT_CMD_COR_EN |
- PCI_ERR_ROOT_CMD_NONFATAL_EN |
- PCI_ERR_ROOT_CMD_FATAL_EN);
- aer_cmd = readl(aer_base + PCI_ERR_ROOT_COMMAND);
- aer_cmd &= ~aer_cmd_mask;
- writel(aer_cmd, aer_base + PCI_ERR_ROOT_COMMAND);
- }
+ aer_cmd_mask = (PCI_ERR_ROOT_CMD_COR_EN |
+ PCI_ERR_ROOT_CMD_NONFATAL_EN |
+ PCI_ERR_ROOT_CMD_FATAL_EN);
+ aer_cmd = readl(aer_base + PCI_ERR_ROOT_COMMAND);
+ aer_cmd &= ~aer_cmd_mask;
+ writel(aer_cmd, aer_base + PCI_ERR_ROOT_COMMAND);
}
-void cxl_setup_parent_dport(struct device *host, struct cxl_dport *dport)
+/**
+ * cxl_dport_init_ras_reporting - Setup CXL RAS report on this dport
+ * @dport: the cxl_dport that needs to be initialized
+ * @host: host device for devm operations
+ */
+void cxl_dport_init_ras_reporting(struct cxl_dport *dport, struct device *host)
{
- struct device *dport_dev = dport->dport_dev;
+ dport->reg_map.host = host;
+ cxl_dport_map_ras(dport);
if (dport->rch) {
- struct pci_host_bridge *host_bridge = to_pci_host_bridge(dport_dev);
-
- if (host_bridge->native_aer)
- dport->rcrb.aer_cap = cxl_rcrb_to_aer(dport_dev, dport->rcrb.base);
- }
+ struct pci_host_bridge *host_bridge = to_pci_host_bridge(dport->dport_dev);
- dport->reg_map.host = host;
- cxl_dport_map_regs(dport);
+ if (!host_bridge->native_aer)
+ return;
- if (dport->rch)
+ cxl_dport_map_rch_aer(dport);
cxl_disable_rch_root_ints(dport);
+ }
}
-EXPORT_SYMBOL_NS_GPL(cxl_setup_parent_dport, CXL);
+EXPORT_SYMBOL_NS_GPL(cxl_dport_init_ras_reporting, CXL);
static void cxl_handle_rdport_cor_ras(struct cxl_dev_state *cxlds,
struct cxl_dport *dport)
@@ -915,15 +872,13 @@ static void cxl_handle_rdport_errors(struct cxl_dev_state *cxlds)
struct pci_dev *pdev = to_pci_dev(cxlds->dev);
struct aer_capability_regs aer_regs;
struct cxl_dport *dport;
- struct cxl_port *port;
int severity;
- port = cxl_pci_find_port(pdev, &dport);
+ struct cxl_port *port __free(put_cxl_port) =
+ cxl_pci_find_port(pdev, &dport);
if (!port)
return;
- put_device(&port->dev);
-
if (!cxl_rch_get_aer_info(dport->regs.dport_aer, &aer_regs))
return;
@@ -1076,3 +1031,26 @@ bool cxl_endpoint_decoder_reset_detected(struct cxl_port *port)
__cxl_endpoint_decoder_reset_detected);
}
EXPORT_SYMBOL_NS_GPL(cxl_endpoint_decoder_reset_detected, CXL);
+
+int cxl_pci_get_bandwidth(struct pci_dev *pdev, struct access_coordinate *c)
+{
+ int speed, bw;
+ u16 lnksta;
+ u32 width;
+
+ speed = pcie_link_speed_mbps(pdev);
+ if (speed < 0)
+ return speed;
+ speed /= BITS_PER_BYTE;
+
+ pcie_capability_read_word(pdev, PCI_EXP_LNKSTA, &lnksta);
+ width = FIELD_GET(PCI_EXP_LNKSTA_NLW, lnksta);
+ bw = speed * width;
+
+ for (int i = 0; i < ACCESS_COORDINATE_MAX; i++) {
+ c[i].read_bandwidth = bw;
+ c[i].write_bandwidth = bw;
+ }
+
+ return 0;
+}
diff --git a/drivers/cxl/core/port.c b/drivers/cxl/core/port.c
index 1d5007e3795a..e666ec6a9085 100644
--- a/drivers/cxl/core/port.c
+++ b/drivers/cxl/core/port.c
@@ -3,7 +3,6 @@
#include <linux/platform_device.h>
#include <linux/memregion.h>
#include <linux/workqueue.h>
-#include <linux/einj-cxl.h>
#include <linux/debugfs.h>
#include <linux/device.h>
#include <linux/module.h>
@@ -11,6 +10,7 @@
#include <linux/slab.h>
#include <linux/idr.h>
#include <linux/node.h>
+#include <cxl/einj.h>
#include <cxlmem.h>
#include <cxlpci.h>
#include <cxl.h>
@@ -828,27 +828,20 @@ static void cxl_debugfs_create_dport_dir(struct cxl_dport *dport)
&cxl_einj_inject_fops);
}
-static struct cxl_port *__devm_cxl_add_port(struct device *host,
- struct device *uport_dev,
- resource_size_t component_reg_phys,
- struct cxl_dport *parent_dport)
+static int cxl_port_add(struct cxl_port *port,
+ resource_size_t component_reg_phys,
+ struct cxl_dport *parent_dport)
{
- struct cxl_port *port;
- struct device *dev;
+ struct device *dev __free(put_device) = &port->dev;
int rc;
- port = cxl_port_alloc(uport_dev, parent_dport);
- if (IS_ERR(port))
- return port;
-
- dev = &port->dev;
- if (is_cxl_memdev(uport_dev)) {
- struct cxl_memdev *cxlmd = to_cxl_memdev(uport_dev);
+ if (is_cxl_memdev(port->uport_dev)) {
+ struct cxl_memdev *cxlmd = to_cxl_memdev(port->uport_dev);
struct cxl_dev_state *cxlds = cxlmd->cxlds;
rc = dev_set_name(dev, "endpoint%d", port->id);
if (rc)
- goto err;
+ return rc;
/*
* The endpoint driver already enumerated the component and RAS
@@ -861,19 +854,41 @@ static struct cxl_port *__devm_cxl_add_port(struct device *host,
} else if (parent_dport) {
rc = dev_set_name(dev, "port%d", port->id);
if (rc)
- goto err;
+ return rc;
rc = cxl_port_setup_regs(port, component_reg_phys);
if (rc)
- goto err;
- } else
+ return rc;
+ } else {
rc = dev_set_name(dev, "root%d", port->id);
- if (rc)
- goto err;
+ if (rc)
+ return rc;
+ }
rc = device_add(dev);
if (rc)
- goto err;
+ return rc;
+
+ /* Inhibit the cleanup function invoked */
+ dev = NULL;
+ return 0;
+}
+
+static struct cxl_port *__devm_cxl_add_port(struct device *host,
+ struct device *uport_dev,
+ resource_size_t component_reg_phys,
+ struct cxl_dport *parent_dport)
+{
+ struct cxl_port *port;
+ int rc;
+
+ port = cxl_port_alloc(uport_dev, parent_dport);
+ if (IS_ERR(port))
+ return port;
+
+ rc = cxl_port_add(port, component_reg_phys, parent_dport);
+ if (rc)
+ return ERR_PTR(rc);
rc = devm_add_action_or_reset(host, unregister_port, port);
if (rc)
@@ -891,10 +906,6 @@ static struct cxl_port *__devm_cxl_add_port(struct device *host,
port->pci_latency = cxl_pci_get_latency(to_pci_dev(uport_dev));
return port;
-
-err:
- put_device(dev);
- return ERR_PTR(rc);
}
/**
@@ -941,7 +952,7 @@ struct cxl_root *devm_cxl_add_root(struct device *host,
port = devm_cxl_add_port(host, host, CXL_RESOURCE_NONE, NULL);
if (IS_ERR(port))
- return (struct cxl_root *)port;
+ return ERR_CAST(port);
cxl_root = to_cxl_root(port);
cxl_root->ops = ops;
@@ -1258,18 +1269,13 @@ EXPORT_SYMBOL_NS_GPL(devm_cxl_add_rch_dport, CXL);
static int add_ep(struct cxl_ep *new)
{
struct cxl_port *port = new->dport->port;
- int rc;
- device_lock(&port->dev);
- if (port->dead) {
- device_unlock(&port->dev);
+ guard(device)(&port->dev);
+ if (port->dead)
return -ENXIO;
- }
- rc = xa_insert(&port->endpoints, (unsigned long)new->ep, new,
- GFP_KERNEL);
- device_unlock(&port->dev);
- return rc;
+ return xa_insert(&port->endpoints, (unsigned long)new->ep,
+ new, GFP_KERNEL);
}
/**
@@ -1393,14 +1399,14 @@ static void delete_endpoint(void *data)
struct cxl_port *endpoint = cxlmd->endpoint;
struct device *host = endpoint_host(endpoint);
- device_lock(host);
- if (host->driver && !endpoint->dead) {
- devm_release_action(host, cxl_unlink_parent_dport, endpoint);
- devm_release_action(host, cxl_unlink_uport, endpoint);
- devm_release_action(host, unregister_port, endpoint);
+ scoped_guard(device, host) {
+ if (host->driver && !endpoint->dead) {
+ devm_release_action(host, cxl_unlink_parent_dport, endpoint);
+ devm_release_action(host, cxl_unlink_uport, endpoint);
+ devm_release_action(host, unregister_port, endpoint);
+ }
+ cxlmd->endpoint = NULL;
}
- cxlmd->endpoint = NULL;
- device_unlock(host);
put_device(&endpoint->dev);
put_device(host);
}
@@ -1477,12 +1483,11 @@ static void cxl_detach_ep(void *data)
.cxlmd = cxlmd,
.depth = i,
};
- struct device *dev;
struct cxl_ep *ep;
bool died = false;
- dev = bus_find_device(&cxl_bus_type, NULL, &ctx,
- port_has_memdev);
+ struct device *dev __free(put_device) =
+ bus_find_device(&cxl_bus_type, NULL, &ctx, port_has_memdev);
if (!dev)
continue;
port = to_cxl_port(dev);
@@ -1512,7 +1517,6 @@ static void cxl_detach_ep(void *data)
dev_name(&port->dev));
delete_switch_port(port);
}
- put_device(&port->dev);
device_unlock(&parent_port->dev);
}
}
@@ -1540,7 +1544,6 @@ static int add_port_attach_ep(struct cxl_memdev *cxlmd,
struct device *dport_dev)
{
struct device *dparent = grandparent(dport_dev);
- struct cxl_port *port, *parent_port = NULL;
struct cxl_dport *dport, *parent_dport;
resource_size_t component_reg_phys;
int rc;
@@ -1556,50 +1559,52 @@ static int add_port_attach_ep(struct cxl_memdev *cxlmd,
return -ENXIO;
}
- parent_port = find_cxl_port(dparent, &parent_dport);
+ struct cxl_port *parent_port __free(put_cxl_port) =
+ find_cxl_port(dparent, &parent_dport);
if (!parent_port) {
/* iterate to create this parent_port */
return -EAGAIN;
}
- device_lock(&parent_port->dev);
- if (!parent_port->dev.driver) {
- dev_warn(&cxlmd->dev,
- "port %s:%s disabled, failed to enumerate CXL.mem\n",
- dev_name(&parent_port->dev), dev_name(uport_dev));
- port = ERR_PTR(-ENXIO);
- goto out;
- }
+ /*
+ * Definition with __free() here to keep the sequence of
+ * dereferencing the device of the port before the parent_port releasing.
+ */
+ struct cxl_port *port __free(put_cxl_port) = NULL;
+ scoped_guard(device, &parent_port->dev) {
+ if (!parent_port->dev.driver) {
+ dev_warn(&cxlmd->dev,
+ "port %s:%s disabled, failed to enumerate CXL.mem\n",
+ dev_name(&parent_port->dev), dev_name(uport_dev));
+ return -ENXIO;
+ }
- port = find_cxl_port_at(parent_port, dport_dev, &dport);
- if (!port) {
- component_reg_phys = find_component_registers(uport_dev);
- port = devm_cxl_add_port(&parent_port->dev, uport_dev,
- component_reg_phys, parent_dport);
- /* retry find to pick up the new dport information */
- if (!IS_ERR(port))
+ port = find_cxl_port_at(parent_port, dport_dev, &dport);
+ if (!port) {
+ component_reg_phys = find_component_registers(uport_dev);
+ port = devm_cxl_add_port(&parent_port->dev, uport_dev,
+ component_reg_phys, parent_dport);
+ if (IS_ERR(port))
+ return PTR_ERR(port);
+
+ /* retry find to pick up the new dport information */
port = find_cxl_port_at(parent_port, dport_dev, &dport);
+ if (!port)
+ return -ENXIO;
+ }
}
-out:
- device_unlock(&parent_port->dev);
- if (IS_ERR(port))
- rc = PTR_ERR(port);
- else {
- dev_dbg(&cxlmd->dev, "add to new port %s:%s\n",
- dev_name(&port->dev), dev_name(port->uport_dev));
- rc = cxl_add_ep(dport, &cxlmd->dev);
- if (rc == -EBUSY) {
- /*
- * "can't" happen, but this error code means
- * something to the caller, so translate it.
- */
- rc = -ENXIO;
- }
- put_device(&port->dev);
+ dev_dbg(&cxlmd->dev, "add to new port %s:%s\n",
+ dev_name(&port->dev), dev_name(port->uport_dev));
+ rc = cxl_add_ep(dport, &cxlmd->dev);
+ if (rc == -EBUSY) {
+ /*
+ * "can't" happen, but this error code means
+ * something to the caller, so translate it.
+ */
+ rc = -ENXIO;
}
- put_device(&parent_port->dev);
return rc;
}
@@ -1630,7 +1635,6 @@ retry:
struct device *dport_dev = grandparent(iter);
struct device *uport_dev;
struct cxl_dport *dport;
- struct cxl_port *port;
/*
* The terminal "grandparent" in PCI is NULL and @platform_bus
@@ -1649,7 +1653,8 @@ retry:
dev_dbg(dev, "scan: iter: %s dport_dev: %s parent: %s\n",
dev_name(iter), dev_name(dport_dev),
dev_name(uport_dev));
- port = find_cxl_port(dport_dev, &dport);
+ struct cxl_port *port __free(put_cxl_port) =
+ find_cxl_port(dport_dev, &dport);
if (port) {
dev_dbg(&cxlmd->dev,
"found already registered port %s:%s\n",
@@ -1664,18 +1669,13 @@ retry:
* the parent_port lock as the current port may be being
* reaped.
*/
- if (rc && rc != -EBUSY) {
- put_device(&port->dev);
+ if (rc && rc != -EBUSY)
return rc;
- }
/* Any more ports to add between this one and the root? */
- if (!dev_is_cxl_root_child(&port->dev)) {
- put_device(&port->dev);
+ if (!dev_is_cxl_root_child(&port->dev))
continue;
- }
- put_device(&port->dev);
return 0;
}
@@ -1983,7 +1983,6 @@ EXPORT_SYMBOL_NS_GPL(cxl_decoder_add_locked, CXL);
int cxl_decoder_add(struct cxl_decoder *cxld, int *target_map)
{
struct cxl_port *port;
- int rc;
if (WARN_ON_ONCE(!cxld))
return -EINVAL;
@@ -1993,11 +1992,8 @@ int cxl_decoder_add(struct cxl_decoder *cxld, int *target_map)
port = to_cxl_port(cxld->dev.parent);
- device_lock(&port->dev);
- rc = cxl_decoder_add_locked(cxld, target_map);
- device_unlock(&port->dev);
-
- return rc;
+ guard(device)(&port->dev);
+ return cxl_decoder_add_locked(cxld, target_map);
}
EXPORT_SYMBOL_NS_GPL(cxl_decoder_add, CXL);
@@ -2241,6 +2237,26 @@ int cxl_endpoint_get_perf_coordinates(struct cxl_port *port,
}
EXPORT_SYMBOL_NS_GPL(cxl_endpoint_get_perf_coordinates, CXL);
+int cxl_port_get_switch_dport_bandwidth(struct cxl_port *port,
+ struct access_coordinate *c)
+{
+ struct cxl_dport *dport = port->parent_dport;
+
+ /* Check this port is connected to a switch DSP and not an RP */
+ if (parent_port_is_cxl_root(to_cxl_port(port->dev.parent)))
+ return -ENODEV;
+
+ if (!coordinates_valid(dport->coord))
+ return -EINVAL;
+
+ for (int i = 0; i < ACCESS_COORDINATE_MAX; i++) {
+ c[i].read_bandwidth = dport->coord[i].read_bandwidth;
+ c[i].write_bandwidth = dport->coord[i].write_bandwidth;
+ }
+
+ return 0;
+}
+
/* for user tooling to ensure port disable work has completed */
static ssize_t flush_store(const struct bus_type *bus, const char *buf, size_t count)
{
diff --git a/drivers/cxl/core/region.c b/drivers/cxl/core/region.c
index 21ad5f242875..e701e4b04032 100644
--- a/drivers/cxl/core/region.c
+++ b/drivers/cxl/core/region.c
@@ -1983,6 +1983,7 @@ static int cxl_region_attach(struct cxl_region *cxlr,
* then the region is already committed.
*/
p->state = CXL_CONFIG_COMMIT;
+ cxl_region_shared_upstream_bandwidth_update(cxlr);
return 0;
}
@@ -2004,6 +2005,7 @@ static int cxl_region_attach(struct cxl_region *cxlr,
if (rc)
return rc;
p->state = CXL_CONFIG_ACTIVE;
+ cxl_region_shared_upstream_bandwidth_update(cxlr);
}
cxled->cxld.interleave_ways = p->interleave_ways;
@@ -2313,8 +2315,6 @@ static void unregister_region(void *_cxlr)
struct cxl_region_params *p = &cxlr->params;
int i;
- unregister_memory_notifier(&cxlr->memory_notifier);
- unregister_mt_adistance_algorithm(&cxlr->adist_notifier);
device_del(&cxlr->dev);
/*
@@ -2391,18 +2391,6 @@ static bool cxl_region_update_coordinates(struct cxl_region *cxlr, int nid)
return true;
}
-static int cxl_region_nid(struct cxl_region *cxlr)
-{
- struct cxl_region_params *p = &cxlr->params;
- struct resource *res;
-
- guard(rwsem_read)(&cxl_region_rwsem);
- res = p->res;
- if (!res)
- return NUMA_NO_NODE;
- return phys_to_target_node(res->start);
-}
-
static int cxl_region_perf_attrs_callback(struct notifier_block *nb,
unsigned long action, void *arg)
{
@@ -2415,7 +2403,11 @@ static int cxl_region_perf_attrs_callback(struct notifier_block *nb,
if (nid == NUMA_NO_NODE || action != MEM_ONLINE)
return NOTIFY_DONE;
- region_nid = cxl_region_nid(cxlr);
+ /*
+ * No need to hold cxl_region_rwsem; region parameters are stable
+ * within the cxl_region driver.
+ */
+ region_nid = phys_to_target_node(cxlr->params.res->start);
if (nid != region_nid)
return NOTIFY_DONE;
@@ -2434,7 +2426,11 @@ static int cxl_region_calculate_adistance(struct notifier_block *nb,
int *adist = data;
int region_nid;
- region_nid = cxl_region_nid(cxlr);
+ /*
+ * No need to hold cxl_region_rwsem; region parameters are stable
+ * within the cxl_region driver.
+ */
+ region_nid = phys_to_target_node(cxlr->params.res->start);
if (nid != region_nid)
return NOTIFY_OK;
@@ -2484,14 +2480,6 @@ static struct cxl_region *devm_cxl_add_region(struct cxl_root_decoder *cxlrd,
if (rc)
goto err;
- cxlr->memory_notifier.notifier_call = cxl_region_perf_attrs_callback;
- cxlr->memory_notifier.priority = CXL_CALLBACK_PRI;
- register_memory_notifier(&cxlr->memory_notifier);
-
- cxlr->adist_notifier.notifier_call = cxl_region_calculate_adistance;
- cxlr->adist_notifier.priority = 100;
- register_mt_adistance_algorithm(&cxlr->adist_notifier);
-
rc = devm_add_action_or_reset(port->uport_dev, unregister_region, cxlr);
if (rc)
return ERR_PTR(rc);
@@ -3094,11 +3082,11 @@ static void cxlr_release_nvdimm(void *_cxlr)
struct cxl_region *cxlr = _cxlr;
struct cxl_nvdimm_bridge *cxl_nvb = cxlr->cxl_nvb;
- device_lock(&cxl_nvb->dev);
- if (cxlr->cxlr_pmem)
- devm_release_action(&cxl_nvb->dev, cxlr_pmem_unregister,
- cxlr->cxlr_pmem);
- device_unlock(&cxl_nvb->dev);
+ scoped_guard(device, &cxl_nvb->dev) {
+ if (cxlr->cxlr_pmem)
+ devm_release_action(&cxl_nvb->dev, cxlr_pmem_unregister,
+ cxlr->cxlr_pmem);
+ }
cxlr->cxl_nvb = NULL;
put_device(&cxl_nvb->dev);
}
@@ -3134,13 +3122,14 @@ static int devm_cxl_add_pmem_region(struct cxl_region *cxlr)
dev_dbg(&cxlr->dev, "%s: register %s\n", dev_name(dev->parent),
dev_name(dev));
- device_lock(&cxl_nvb->dev);
- if (cxl_nvb->dev.driver)
- rc = devm_add_action_or_reset(&cxl_nvb->dev,
- cxlr_pmem_unregister, cxlr_pmem);
- else
- rc = -ENXIO;
- device_unlock(&cxl_nvb->dev);
+ scoped_guard(device, &cxl_nvb->dev) {
+ if (cxl_nvb->dev.driver)
+ rc = devm_add_action_or_reset(&cxl_nvb->dev,
+ cxlr_pmem_unregister,
+ cxlr_pmem);
+ else
+ rc = -ENXIO;
+ }
if (rc)
goto err_bridge;
@@ -3386,6 +3375,14 @@ static int is_system_ram(struct resource *res, void *arg)
return 1;
}
+static void shutdown_notifiers(void *_cxlr)
+{
+ struct cxl_region *cxlr = _cxlr;
+
+ unregister_memory_notifier(&cxlr->memory_notifier);
+ unregister_mt_adistance_algorithm(&cxlr->adist_notifier);
+}
+
static int cxl_region_probe(struct device *dev)
{
struct cxl_region *cxlr = to_cxl_region(dev);
@@ -3421,6 +3418,18 @@ out:
if (rc)
return rc;
+ cxlr->memory_notifier.notifier_call = cxl_region_perf_attrs_callback;
+ cxlr->memory_notifier.priority = CXL_CALLBACK_PRI;
+ register_memory_notifier(&cxlr->memory_notifier);
+
+ cxlr->adist_notifier.notifier_call = cxl_region_calculate_adistance;
+ cxlr->adist_notifier.priority = 100;
+ register_mt_adistance_algorithm(&cxlr->adist_notifier);
+
+ rc = devm_add_action_or_reset(&cxlr->dev, shutdown_notifiers, cxlr);
+ if (rc)
+ return rc;
+
switch (cxlr->mode) {
case CXL_DECODER_PMEM:
return devm_cxl_add_pmem_region(cxlr);
diff --git a/drivers/cxl/cxl.h b/drivers/cxl/cxl.h
index 9afb407d438f..0d8b810a51f0 100644
--- a/drivers/cxl/cxl.h
+++ b/drivers/cxl/cxl.h
@@ -744,6 +744,7 @@ struct cxl_root *find_cxl_root(struct cxl_port *port);
void put_cxl_root(struct cxl_root *cxl_root);
DEFINE_FREE(put_cxl_root, struct cxl_root *, if (_T) put_cxl_root(_T))
+DEFINE_FREE(put_cxl_port, struct cxl_port *, if (!IS_ERR_OR_NULL(_T)) put_device(&_T->dev))
int devm_cxl_enumerate_ports(struct cxl_memdev *cxlmd);
void cxl_bus_rescan(void);
void cxl_bus_drain(void);
@@ -762,9 +763,10 @@ struct cxl_dport *devm_cxl_add_rch_dport(struct cxl_port *port,
#ifdef CONFIG_PCIEAER_CXL
void cxl_setup_parent_dport(struct device *host, struct cxl_dport *dport);
+void cxl_dport_init_ras_reporting(struct cxl_dport *dport, struct device *host);
#else
-static inline void cxl_setup_parent_dport(struct device *host,
- struct cxl_dport *dport) { }
+static inline void cxl_dport_init_ras_reporting(struct cxl_dport *dport,
+ struct device *host) { }
#endif
struct cxl_decoder *to_cxl_decoder(struct device *dev);
@@ -809,7 +811,7 @@ struct cxl_hdm *devm_cxl_setup_hdm(struct cxl_port *port,
int devm_cxl_enumerate_decoders(struct cxl_hdm *cxlhdm,
struct cxl_endpoint_dvsec_info *info);
int devm_cxl_add_passthrough_decoder(struct cxl_port *port);
-int cxl_dvsec_rr_decode(struct device *dev, int dvsec,
+int cxl_dvsec_rr_decode(struct device *dev, struct cxl_port *port,
struct cxl_endpoint_dvsec_info *info);
bool is_cxl_region(struct device *dev);
@@ -889,6 +891,7 @@ int cxl_endpoint_get_perf_coordinates(struct cxl_port *port,
struct access_coordinate *coord);
void cxl_region_perf_data_calculate(struct cxl_region *cxlr,
struct cxl_endpoint_decoder *cxled);
+void cxl_region_shared_upstream_bandwidth_update(struct cxl_region *cxlr);
void cxl_memdev_update_perf(struct cxl_memdev *cxlmd);
diff --git a/drivers/cxl/cxlmem.h b/drivers/cxl/cxlmem.h
index afb53d058d62..2a25d1957ddb 100644
--- a/drivers/cxl/cxlmem.h
+++ b/drivers/cxl/cxlmem.h
@@ -3,11 +3,12 @@
#ifndef __CXL_MEM_H__
#define __CXL_MEM_H__
#include <uapi/linux/cxl_mem.h>
+#include <linux/pci.h>
#include <linux/cdev.h>
#include <linux/uuid.h>
-#include <linux/rcuwait.h>
-#include <linux/cxl-event.h>
#include <linux/node.h>
+#include <cxl/event.h>
+#include <cxl/mailbox.h>
#include "cxl.h"
/* CXL 2.0 8.2.8.5.1.1 Memory Device Status Register */
@@ -397,11 +398,13 @@ enum cxl_devtype {
* struct cxl_dpa_perf - DPA performance property entry
* @dpa_range: range for DPA address
* @coord: QoS performance data (i.e. latency, bandwidth)
+ * @cdat_coord: raw QoS performance data from CDAT
* @qos_class: QoS Class cookies
*/
struct cxl_dpa_perf {
struct range dpa_range;
struct access_coordinate coord[ACCESS_COORDINATE_MAX];
+ struct access_coordinate cdat_coord[ACCESS_COORDINATE_MAX];
int qos_class;
};
@@ -424,6 +427,7 @@ struct cxl_dpa_perf {
* @ram_res: Active Volatile memory capacity configuration
* @serial: PCIe Device Serial Number
* @type: Generic Memory Class device or Vendor Specific Memory device
+ * @cxl_mbox: CXL mailbox context
*/
struct cxl_dev_state {
struct device *dev;
@@ -438,8 +442,14 @@ struct cxl_dev_state {
struct resource ram_res;
u64 serial;
enum cxl_devtype type;
+ struct cxl_mailbox cxl_mbox;
};
+static inline struct cxl_dev_state *mbox_to_cxlds(struct cxl_mailbox *cxl_mbox)
+{
+ return dev_get_drvdata(cxl_mbox->host);
+}
+
/**
* struct cxl_memdev_state - Generic Type-3 Memory Device Class driver data
*
@@ -448,11 +458,8 @@ struct cxl_dev_state {
* the functionality related to that like Identify Memory Device and Get
* Partition Info
* @cxlds: Core driver state common across Type-2 and Type-3 devices
- * @payload_size: Size of space for payload
- * (CXL 2.0 8.2.8.4.3 Mailbox Capabilities Register)
* @lsa_size: Size of Label Storage Area
* (CXL 2.0 8.2.9.5.1.1 Identify Memory Device)
- * @mbox_mutex: Mutex to synchronize mailbox access.
* @firmware_version: Firmware version for the memory device.
* @enabled_cmds: Hardware commands found enabled in CEL.
* @exclusive_cmds: Commands that are kernel-internal only
@@ -470,17 +477,13 @@ struct cxl_dev_state {
* @poison: poison driver state info
* @security: security driver state info
* @fw: firmware upload / activation state
- * @mbox_wait: RCU wait for mbox send completely
- * @mbox_send: @dev specific transport for transmitting mailbox commands
*
* See CXL 3.0 8.2.9.8.2 Capacity Configuration and Label Storage for
* details on capacity parameters.
*/
struct cxl_memdev_state {
struct cxl_dev_state cxlds;
- size_t payload_size;
size_t lsa_size;
- struct mutex mbox_mutex; /* Protects device mailbox and firmware */
char firmware_version[0x10];
DECLARE_BITMAP(enabled_cmds, CXL_MEM_COMMAND_ID_MAX);
DECLARE_BITMAP(exclusive_cmds, CXL_MEM_COMMAND_ID_MAX);
@@ -500,10 +503,6 @@ struct cxl_memdev_state {
struct cxl_poison_state poison;
struct cxl_security_state security;
struct cxl_fw_state fw;
-
- struct rcuwait mbox_wait;
- int (*mbox_send)(struct cxl_memdev_state *mds,
- struct cxl_mbox_cmd *cmd);
};
static inline struct cxl_memdev_state *
@@ -814,7 +813,7 @@ enum {
CXL_PMEM_SEC_PASS_USER,
};
-int cxl_internal_send_cmd(struct cxl_memdev_state *mds,
+int cxl_internal_send_cmd(struct cxl_mailbox *cxl_mbox,
struct cxl_mbox_cmd *cmd);
int cxl_dev_state_identify(struct cxl_memdev_state *mds);
int cxl_await_media_ready(struct cxl_dev_state *cxlds);
diff --git a/drivers/cxl/mem.c b/drivers/cxl/mem.c
index 7de232eaeb17..a9fd5cd5a0d2 100644
--- a/drivers/cxl/mem.c
+++ b/drivers/cxl/mem.c
@@ -109,7 +109,6 @@ static int cxl_mem_probe(struct device *dev)
struct cxl_memdev_state *mds = to_cxl_memdev_state(cxlmd->cxlds);
struct cxl_dev_state *cxlds = cxlmd->cxlds;
struct device *endpoint_parent;
- struct cxl_port *parent_port;
struct cxl_dport *dport;
struct dentry *dentry;
int rc;
@@ -146,7 +145,8 @@ static int cxl_mem_probe(struct device *dev)
if (rc)
return rc;
- parent_port = cxl_mem_find_port(cxlmd, &dport);
+ struct cxl_port *parent_port __free(put_cxl_port) =
+ cxl_mem_find_port(cxlmd, &dport);
if (!parent_port) {
dev_err(dev, "CXL port topology not found\n");
return -ENXIO;
@@ -166,22 +166,19 @@ static int cxl_mem_probe(struct device *dev)
else
endpoint_parent = &parent_port->dev;
- cxl_setup_parent_dport(dev, dport);
+ cxl_dport_init_ras_reporting(dport, dev);
- device_lock(endpoint_parent);
- if (!endpoint_parent->driver) {
- dev_err(dev, "CXL port topology %s not enabled\n",
- dev_name(endpoint_parent));
- rc = -ENXIO;
- goto unlock;
- }
+ scoped_guard(device, endpoint_parent) {
+ if (!endpoint_parent->driver) {
+ dev_err(dev, "CXL port topology %s not enabled\n",
+ dev_name(endpoint_parent));
+ return -ENXIO;
+ }
- rc = devm_cxl_add_endpoint(endpoint_parent, cxlmd, dport);
-unlock:
- device_unlock(endpoint_parent);
- put_device(&parent_port->dev);
- if (rc)
- return rc;
+ rc = devm_cxl_add_endpoint(endpoint_parent, cxlmd, dport);
+ if (rc)
+ return rc;
+ }
/*
* The kernel may be operating out of CXL memory on this device,
diff --git a/drivers/cxl/pci.c b/drivers/cxl/pci.c
index 4be35dc22202..37164174b5fb 100644
--- a/drivers/cxl/pci.c
+++ b/drivers/cxl/pci.c
@@ -11,6 +11,7 @@
#include <linux/pci.h>
#include <linux/aer.h>
#include <linux/io.h>
+#include <cxl/mailbox.h>
#include "cxlmem.h"
#include "cxlpci.h"
#include "cxl.h"
@@ -124,6 +125,7 @@ static irqreturn_t cxl_pci_mbox_irq(int irq, void *id)
u16 opcode;
struct cxl_dev_id *dev_id = id;
struct cxl_dev_state *cxlds = dev_id->cxlds;
+ struct cxl_mailbox *cxl_mbox = &cxlds->cxl_mbox;
struct cxl_memdev_state *mds = to_cxl_memdev_state(cxlds);
if (!cxl_mbox_background_complete(cxlds))
@@ -132,13 +134,13 @@ static irqreturn_t cxl_pci_mbox_irq(int irq, void *id)
reg = readq(cxlds->regs.mbox + CXLDEV_MBOX_BG_CMD_STATUS_OFFSET);
opcode = FIELD_GET(CXLDEV_MBOX_BG_CMD_COMMAND_OPCODE_MASK, reg);
if (opcode == CXL_MBOX_OP_SANITIZE) {
- mutex_lock(&mds->mbox_mutex);
+ mutex_lock(&cxl_mbox->mbox_mutex);
if (mds->security.sanitize_node)
mod_delayed_work(system_wq, &mds->security.poll_dwork, 0);
- mutex_unlock(&mds->mbox_mutex);
+ mutex_unlock(&cxl_mbox->mbox_mutex);
} else {
/* short-circuit the wait in __cxl_pci_mbox_send_cmd() */
- rcuwait_wake_up(&mds->mbox_wait);
+ rcuwait_wake_up(&cxl_mbox->mbox_wait);
}
return IRQ_HANDLED;
@@ -152,8 +154,9 @@ static void cxl_mbox_sanitize_work(struct work_struct *work)
struct cxl_memdev_state *mds =
container_of(work, typeof(*mds), security.poll_dwork.work);
struct cxl_dev_state *cxlds = &mds->cxlds;
+ struct cxl_mailbox *cxl_mbox = &cxlds->cxl_mbox;
- mutex_lock(&mds->mbox_mutex);
+ mutex_lock(&cxl_mbox->mbox_mutex);
if (cxl_mbox_background_complete(cxlds)) {
mds->security.poll_tmo_secs = 0;
if (mds->security.sanitize_node)
@@ -167,12 +170,12 @@ static void cxl_mbox_sanitize_work(struct work_struct *work)
mds->security.poll_tmo_secs = min(15 * 60, timeout);
schedule_delayed_work(&mds->security.poll_dwork, timeout * HZ);
}
- mutex_unlock(&mds->mbox_mutex);
+ mutex_unlock(&cxl_mbox->mbox_mutex);
}
/**
* __cxl_pci_mbox_send_cmd() - Execute a mailbox command
- * @mds: The memory device driver data
+ * @cxl_mbox: CXL mailbox context
* @mbox_cmd: Command to send to the memory device.
*
* Context: Any context. Expects mbox_mutex to be held.
@@ -192,17 +195,18 @@ static void cxl_mbox_sanitize_work(struct work_struct *work)
* not need to coordinate with each other. The driver only uses the primary
* mailbox.
*/
-static int __cxl_pci_mbox_send_cmd(struct cxl_memdev_state *mds,
+static int __cxl_pci_mbox_send_cmd(struct cxl_mailbox *cxl_mbox,
struct cxl_mbox_cmd *mbox_cmd)
{
- struct cxl_dev_state *cxlds = &mds->cxlds;
+ struct cxl_dev_state *cxlds = mbox_to_cxlds(cxl_mbox);
+ struct cxl_memdev_state *mds = to_cxl_memdev_state(cxlds);
void __iomem *payload = cxlds->regs.mbox + CXLDEV_MBOX_PAYLOAD_OFFSET;
struct device *dev = cxlds->dev;
u64 cmd_reg, status_reg;
size_t out_len;
int rc;
- lockdep_assert_held(&mds->mbox_mutex);
+ lockdep_assert_held(&cxl_mbox->mbox_mutex);
/*
* Here are the steps from 8.2.8.4 of the CXL 2.0 spec.
@@ -315,10 +319,10 @@ static int __cxl_pci_mbox_send_cmd(struct cxl_memdev_state *mds,
timeout = mbox_cmd->poll_interval_ms;
for (i = 0; i < mbox_cmd->poll_count; i++) {
- if (rcuwait_wait_event_timeout(&mds->mbox_wait,
- cxl_mbox_background_complete(cxlds),
- TASK_UNINTERRUPTIBLE,
- msecs_to_jiffies(timeout)) > 0)
+ if (rcuwait_wait_event_timeout(&cxl_mbox->mbox_wait,
+ cxl_mbox_background_complete(cxlds),
+ TASK_UNINTERRUPTIBLE,
+ msecs_to_jiffies(timeout)) > 0)
break;
}
@@ -360,7 +364,7 @@ success:
*/
size_t n;
- n = min3(mbox_cmd->size_out, mds->payload_size, out_len);
+ n = min3(mbox_cmd->size_out, cxl_mbox->payload_size, out_len);
memcpy_fromio(mbox_cmd->payload_out, payload, n);
mbox_cmd->size_out = n;
} else {
@@ -370,14 +374,14 @@ success:
return 0;
}
-static int cxl_pci_mbox_send(struct cxl_memdev_state *mds,
+static int cxl_pci_mbox_send(struct cxl_mailbox *cxl_mbox,
struct cxl_mbox_cmd *cmd)
{
int rc;
- mutex_lock_io(&mds->mbox_mutex);
- rc = __cxl_pci_mbox_send_cmd(mds, cmd);
- mutex_unlock(&mds->mbox_mutex);
+ mutex_lock_io(&cxl_mbox->mbox_mutex);
+ rc = __cxl_pci_mbox_send_cmd(cxl_mbox, cmd);
+ mutex_unlock(&cxl_mbox->mbox_mutex);
return rc;
}
@@ -385,6 +389,7 @@ static int cxl_pci_mbox_send(struct cxl_memdev_state *mds,
static int cxl_pci_setup_mailbox(struct cxl_memdev_state *mds, bool irq_avail)
{
struct cxl_dev_state *cxlds = &mds->cxlds;
+ struct cxl_mailbox *cxl_mbox = &cxlds->cxl_mbox;
const int cap = readl(cxlds->regs.mbox + CXLDEV_MBOX_CAPS_OFFSET);
struct device *dev = cxlds->dev;
unsigned long timeout;
@@ -417,8 +422,8 @@ static int cxl_pci_setup_mailbox(struct cxl_memdev_state *mds, bool irq_avail)
return -ETIMEDOUT;
}
- mds->mbox_send = cxl_pci_mbox_send;
- mds->payload_size =
+ cxl_mbox->mbox_send = cxl_pci_mbox_send;
+ cxl_mbox->payload_size =
1 << FIELD_GET(CXLDEV_MBOX_CAP_PAYLOAD_SIZE_MASK, cap);
/*
@@ -428,16 +433,15 @@ static int cxl_pci_setup_mailbox(struct cxl_memdev_state *mds, bool irq_avail)
* there's no point in going forward. If the size is too large, there's
* no harm is soft limiting it.
*/
- mds->payload_size = min_t(size_t, mds->payload_size, SZ_1M);
- if (mds->payload_size < 256) {
+ cxl_mbox->payload_size = min_t(size_t, cxl_mbox->payload_size, SZ_1M);
+ if (cxl_mbox->payload_size < 256) {
dev_err(dev, "Mailbox is too small (%zub)",
- mds->payload_size);
+ cxl_mbox->payload_size);
return -ENXIO;
}
- dev_dbg(dev, "Mailbox payload sized %zu", mds->payload_size);
+ dev_dbg(dev, "Mailbox payload sized %zu", cxl_mbox->payload_size);
- rcuwait_init(&mds->mbox_wait);
INIT_DELAYED_WORK(&mds->security.poll_dwork, cxl_mbox_sanitize_work);
/* background command interrupts are optional */
@@ -473,7 +477,6 @@ static bool is_cxl_restricted(struct pci_dev *pdev)
static int cxl_rcrb_get_comp_regs(struct pci_dev *pdev,
struct cxl_register_map *map)
{
- struct cxl_port *port;
struct cxl_dport *dport;
resource_size_t component_reg_phys;
@@ -482,14 +485,12 @@ static int cxl_rcrb_get_comp_regs(struct pci_dev *pdev,
.resource = CXL_RESOURCE_NONE,
};
- port = cxl_pci_find_port(pdev, &dport);
+ struct cxl_port *port __free(put_cxl_port) =
+ cxl_pci_find_port(pdev, &dport);
if (!port)
return -EPROBE_DEFER;
component_reg_phys = cxl_rcd_component_reg_phys(&pdev->dev, dport);
-
- put_device(&port->dev);
-
if (component_reg_phys == CXL_RESOURCE_NONE)
return -ENXIO;
@@ -578,9 +579,10 @@ static void free_event_buf(void *buf)
*/
static int cxl_mem_alloc_event_buf(struct cxl_memdev_state *mds)
{
+ struct cxl_mailbox *cxl_mbox = &mds->cxlds.cxl_mbox;
struct cxl_get_event_payload *buf;
- buf = kvmalloc(mds->payload_size, GFP_KERNEL);
+ buf = kvmalloc(cxl_mbox->payload_size, GFP_KERNEL);
if (!buf)
return -ENOMEM;
mds->event.buf = buf;
@@ -653,6 +655,7 @@ static int cxl_event_req_irq(struct cxl_dev_state *cxlds, u8 setting)
static int cxl_event_get_int_policy(struct cxl_memdev_state *mds,
struct cxl_event_interrupt_policy *policy)
{
+ struct cxl_mailbox *cxl_mbox = &mds->cxlds.cxl_mbox;
struct cxl_mbox_cmd mbox_cmd = {
.opcode = CXL_MBOX_OP_GET_EVT_INT_POLICY,
.payload_out = policy,
@@ -660,7 +663,7 @@ static int cxl_event_get_int_policy(struct cxl_memdev_state *mds,
};
int rc;
- rc = cxl_internal_send_cmd(mds, &mbox_cmd);
+ rc = cxl_internal_send_cmd(cxl_mbox, &mbox_cmd);
if (rc < 0)
dev_err(mds->cxlds.dev,
"Failed to get event interrupt policy : %d", rc);
@@ -671,6 +674,7 @@ static int cxl_event_get_int_policy(struct cxl_memdev_state *mds,
static int cxl_event_config_msgnums(struct cxl_memdev_state *mds,
struct cxl_event_interrupt_policy *policy)
{
+ struct cxl_mailbox *cxl_mbox = &mds->cxlds.cxl_mbox;
struct cxl_mbox_cmd mbox_cmd;
int rc;
@@ -687,7 +691,7 @@ static int cxl_event_config_msgnums(struct cxl_memdev_state *mds,
.size_in = sizeof(*policy),
};
- rc = cxl_internal_send_cmd(mds, &mbox_cmd);
+ rc = cxl_internal_send_cmd(cxl_mbox, &mbox_cmd);
if (rc < 0) {
dev_err(mds->cxlds.dev, "Failed to set event interrupt policy : %d",
rc);
@@ -786,6 +790,23 @@ static int cxl_event_config(struct pci_host_bridge *host_bridge,
return 0;
}
+static int cxl_pci_type3_init_mailbox(struct cxl_dev_state *cxlds)
+{
+ int rc;
+
+ /*
+ * Fail the init if there's no mailbox. For a type3 this is out of spec.
+ */
+ if (!cxlds->reg_map.device_map.mbox.valid)
+ return -ENODEV;
+
+ rc = cxl_mailbox_init(&cxlds->cxl_mbox, cxlds->dev);
+ if (rc)
+ return rc;
+
+ return 0;
+}
+
static int cxl_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
{
struct pci_host_bridge *host_bridge = pci_find_host_bridge(pdev->bus);
@@ -846,6 +867,10 @@ static int cxl_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
if (rc)
dev_dbg(&pdev->dev, "Failed to map RAS capability.\n");
+ rc = cxl_pci_type3_init_mailbox(cxlds);
+ if (rc)
+ return rc;
+
rc = cxl_await_media_ready(cxlds);
if (rc == 0)
cxlds->media_ready = true;
diff --git a/drivers/cxl/pmem.c b/drivers/cxl/pmem.c
index 4ef93da22335..a6538a5f5c9f 100644
--- a/drivers/cxl/pmem.c
+++ b/drivers/cxl/pmem.c
@@ -102,13 +102,15 @@ static int cxl_pmem_get_config_size(struct cxl_memdev_state *mds,
struct nd_cmd_get_config_size *cmd,
unsigned int buf_len)
{
+ struct cxl_mailbox *cxl_mbox = &mds->cxlds.cxl_mbox;
+
if (sizeof(*cmd) > buf_len)
return -EINVAL;
*cmd = (struct nd_cmd_get_config_size){
.config_size = mds->lsa_size,
.max_xfer =
- mds->payload_size - sizeof(struct cxl_mbox_set_lsa),
+ cxl_mbox->payload_size - sizeof(struct cxl_mbox_set_lsa),
};
return 0;
@@ -118,6 +120,7 @@ static int cxl_pmem_get_config_data(struct cxl_memdev_state *mds,
struct nd_cmd_get_config_data_hdr *cmd,
unsigned int buf_len)
{
+ struct cxl_mailbox *cxl_mbox = &mds->cxlds.cxl_mbox;
struct cxl_mbox_get_lsa get_lsa;
struct cxl_mbox_cmd mbox_cmd;
int rc;
@@ -139,7 +142,7 @@ static int cxl_pmem_get_config_data(struct cxl_memdev_state *mds,
.payload_out = cmd->out_buf,
};
- rc = cxl_internal_send_cmd(mds, &mbox_cmd);
+ rc = cxl_internal_send_cmd(cxl_mbox, &mbox_cmd);
cmd->status = 0;
return rc;
@@ -149,6 +152,7 @@ static int cxl_pmem_set_config_data(struct cxl_memdev_state *mds,
struct nd_cmd_set_config_hdr *cmd,
unsigned int buf_len)
{
+ struct cxl_mailbox *cxl_mbox = &mds->cxlds.cxl_mbox;
struct cxl_mbox_set_lsa *set_lsa;
struct cxl_mbox_cmd mbox_cmd;
int rc;
@@ -175,7 +179,7 @@ static int cxl_pmem_set_config_data(struct cxl_memdev_state *mds,
.size_in = struct_size(set_lsa, data, cmd->in_length),
};
- rc = cxl_internal_send_cmd(mds, &mbox_cmd);
+ rc = cxl_internal_send_cmd(cxl_mbox, &mbox_cmd);
/*
* Set "firmware" status (4-packed bytes at the end of the input
@@ -233,15 +237,13 @@ static int detach_nvdimm(struct device *dev, void *data)
if (!is_cxl_nvdimm(dev))
return 0;
- device_lock(dev);
- if (!dev->driver)
- goto out;
-
- cxl_nvd = to_cxl_nvdimm(dev);
- if (cxl_nvd->cxlmd && cxl_nvd->cxlmd->cxl_nvb == data)
- release = true;
-out:
- device_unlock(dev);
+ scoped_guard(device, dev) {
+ if (dev->driver) {
+ cxl_nvd = to_cxl_nvdimm(dev);
+ if (cxl_nvd->cxlmd && cxl_nvd->cxlmd->cxl_nvb == data)
+ release = true;
+ }
+ }
if (release)
device_release_driver(dev);
return 0;
diff --git a/drivers/cxl/port.c b/drivers/cxl/port.c
index d7d5d982ce69..861dde65768f 100644
--- a/drivers/cxl/port.c
+++ b/drivers/cxl/port.c
@@ -98,7 +98,7 @@ static int cxl_endpoint_port_probe(struct cxl_port *port)
struct cxl_port *root;
int rc;
- rc = cxl_dvsec_rr_decode(cxlds->dev, cxlds->cxl_dvsec, &info);
+ rc = cxl_dvsec_rr_decode(cxlds->dev, port, &info);
if (rc < 0)
return rc;
diff --git a/drivers/cxl/security.c b/drivers/cxl/security.c
index 21856a3f408e..452d1a9b9148 100644
--- a/drivers/cxl/security.c
+++ b/drivers/cxl/security.c
@@ -14,6 +14,7 @@ static unsigned long cxl_pmem_get_security_flags(struct nvdimm *nvdimm,
{
struct cxl_nvdimm *cxl_nvd = nvdimm_provider_data(nvdimm);
struct cxl_memdev *cxlmd = cxl_nvd->cxlmd;
+ struct cxl_mailbox *cxl_mbox = &cxlmd->cxlds->cxl_mbox;
struct cxl_memdev_state *mds = to_cxl_memdev_state(cxlmd->cxlds);
unsigned long security_flags = 0;
struct cxl_get_security_output {
@@ -29,7 +30,7 @@ static unsigned long cxl_pmem_get_security_flags(struct nvdimm *nvdimm,
.payload_out = &out,
};
- rc = cxl_internal_send_cmd(mds, &mbox_cmd);
+ rc = cxl_internal_send_cmd(cxl_mbox, &mbox_cmd);
if (rc < 0)
return 0;
@@ -70,7 +71,7 @@ static int cxl_pmem_security_change_key(struct nvdimm *nvdimm,
{
struct cxl_nvdimm *cxl_nvd = nvdimm_provider_data(nvdimm);
struct cxl_memdev *cxlmd = cxl_nvd->cxlmd;
- struct cxl_memdev_state *mds = to_cxl_memdev_state(cxlmd->cxlds);
+ struct cxl_mailbox *cxl_mbox = &cxlmd->cxlds->cxl_mbox;
struct cxl_mbox_cmd mbox_cmd;
struct cxl_set_pass set_pass;
@@ -87,7 +88,7 @@ static int cxl_pmem_security_change_key(struct nvdimm *nvdimm,
.payload_in = &set_pass,
};
- return cxl_internal_send_cmd(mds, &mbox_cmd);
+ return cxl_internal_send_cmd(cxl_mbox, &mbox_cmd);
}
static int __cxl_pmem_security_disable(struct nvdimm *nvdimm,
@@ -96,7 +97,7 @@ static int __cxl_pmem_security_disable(struct nvdimm *nvdimm,
{
struct cxl_nvdimm *cxl_nvd = nvdimm_provider_data(nvdimm);
struct cxl_memdev *cxlmd = cxl_nvd->cxlmd;
- struct cxl_memdev_state *mds = to_cxl_memdev_state(cxlmd->cxlds);
+ struct cxl_mailbox *cxl_mbox = &cxlmd->cxlds->cxl_mbox;
struct cxl_disable_pass dis_pass;
struct cxl_mbox_cmd mbox_cmd;
@@ -112,7 +113,7 @@ static int __cxl_pmem_security_disable(struct nvdimm *nvdimm,
.payload_in = &dis_pass,
};
- return cxl_internal_send_cmd(mds, &mbox_cmd);
+ return cxl_internal_send_cmd(cxl_mbox, &mbox_cmd);
}
static int cxl_pmem_security_disable(struct nvdimm *nvdimm,
@@ -131,12 +132,12 @@ static int cxl_pmem_security_freeze(struct nvdimm *nvdimm)
{
struct cxl_nvdimm *cxl_nvd = nvdimm_provider_data(nvdimm);
struct cxl_memdev *cxlmd = cxl_nvd->cxlmd;
- struct cxl_memdev_state *mds = to_cxl_memdev_state(cxlmd->cxlds);
+ struct cxl_mailbox *cxl_mbox = &cxlmd->cxlds->cxl_mbox;
struct cxl_mbox_cmd mbox_cmd = {
.opcode = CXL_MBOX_OP_FREEZE_SECURITY,
};
- return cxl_internal_send_cmd(mds, &mbox_cmd);
+ return cxl_internal_send_cmd(cxl_mbox, &mbox_cmd);
}
static int cxl_pmem_security_unlock(struct nvdimm *nvdimm,
@@ -144,7 +145,7 @@ static int cxl_pmem_security_unlock(struct nvdimm *nvdimm,
{
struct cxl_nvdimm *cxl_nvd = nvdimm_provider_data(nvdimm);
struct cxl_memdev *cxlmd = cxl_nvd->cxlmd;
- struct cxl_memdev_state *mds = to_cxl_memdev_state(cxlmd->cxlds);
+ struct cxl_mailbox *cxl_mbox = &cxlmd->cxlds->cxl_mbox;
u8 pass[NVDIMM_PASSPHRASE_LEN];
struct cxl_mbox_cmd mbox_cmd;
int rc;
@@ -156,7 +157,7 @@ static int cxl_pmem_security_unlock(struct nvdimm *nvdimm,
.payload_in = pass,
};
- rc = cxl_internal_send_cmd(mds, &mbox_cmd);
+ rc = cxl_internal_send_cmd(cxl_mbox, &mbox_cmd);
if (rc < 0)
return rc;
@@ -169,7 +170,7 @@ static int cxl_pmem_security_passphrase_erase(struct nvdimm *nvdimm,
{
struct cxl_nvdimm *cxl_nvd = nvdimm_provider_data(nvdimm);
struct cxl_memdev *cxlmd = cxl_nvd->cxlmd;
- struct cxl_memdev_state *mds = to_cxl_memdev_state(cxlmd->cxlds);
+ struct cxl_mailbox *cxl_mbox = &cxlmd->cxlds->cxl_mbox;
struct cxl_mbox_cmd mbox_cmd;
struct cxl_pass_erase erase;
int rc;
@@ -185,7 +186,7 @@ static int cxl_pmem_security_passphrase_erase(struct nvdimm *nvdimm,
.payload_in = &erase,
};
- rc = cxl_internal_send_cmd(mds, &mbox_cmd);
+ rc = cxl_internal_send_cmd(cxl_mbox, &mbox_cmd);
if (rc < 0)
return rc;
diff --git a/drivers/dax/Kconfig b/drivers/dax/Kconfig
index a88744244149..d656e4c0eb84 100644
--- a/drivers/dax/Kconfig
+++ b/drivers/dax/Kconfig
@@ -30,7 +30,7 @@ config DEV_DAX_PMEM
config DEV_DAX_HMEM
tristate "HMEM DAX: direct access to 'specific purpose' memory"
depends on EFI_SOFT_RESERVE
- select NUMA_KEEP_MEMINFO if (NUMA && X86)
+ select NUMA_KEEP_MEMINFO if NUMA_MEMBLKS
default DEV_DAX
help
EFI 2.8 platforms, and others, may advertise 'specific purpose'
diff --git a/drivers/dax/device.c b/drivers/dax/device.c
index 2051e4f73c8a..9c1a729cd77e 100644
--- a/drivers/dax/device.c
+++ b/drivers/dax/device.c
@@ -235,9 +235,9 @@ static vm_fault_t dev_dax_huge_fault(struct vm_fault *vmf, unsigned int order)
int id;
struct dev_dax *dev_dax = filp->private_data;
- dev_dbg(&dev_dax->dev, "%s: %s (%#lx - %#lx) order:%d\n", current->comm,
- (vmf->flags & FAULT_FLAG_WRITE) ? "write" : "read",
- vmf->vma->vm_start, vmf->vma->vm_end, order);
+ dev_dbg(&dev_dax->dev, "%s: op=%s addr=%#lx order=%d\n", current->comm,
+ (vmf->flags & FAULT_FLAG_WRITE) ? "write" : "read",
+ vmf->address & ~((1UL << (order + PAGE_SHIFT)) - 1), order);
id = dax_read_lock();
if (order == 0)
diff --git a/drivers/dma-buf/dma-fence-array.c b/drivers/dma-buf/dma-fence-array.c
index c74ac197d5fe..8a08ffde31e7 100644
--- a/drivers/dma-buf/dma-fence-array.c
+++ b/drivers/dma-buf/dma-fence-array.c
@@ -144,37 +144,38 @@ const struct dma_fence_ops dma_fence_array_ops = {
EXPORT_SYMBOL(dma_fence_array_ops);
/**
- * dma_fence_array_create - Create a custom fence array
+ * dma_fence_array_alloc - Allocate a custom fence array
+ * @num_fences: [in] number of fences to add in the array
+ *
+ * Return dma fence array on success, NULL on failure
+ */
+struct dma_fence_array *dma_fence_array_alloc(int num_fences)
+{
+ struct dma_fence_array *array;
+
+ return kzalloc(struct_size(array, callbacks, num_fences), GFP_KERNEL);
+}
+EXPORT_SYMBOL(dma_fence_array_alloc);
+
+/**
+ * dma_fence_array_init - Init a custom fence array
+ * @array: [in] dma fence array to arm
* @num_fences: [in] number of fences to add in the array
* @fences: [in] array containing the fences
* @context: [in] fence context to use
* @seqno: [in] sequence number to use
* @signal_on_any: [in] signal on any fence in the array
*
- * Allocate a dma_fence_array object and initialize the base fence with
- * dma_fence_init().
- * In case of error it returns NULL.
- *
- * The caller should allocate the fences array with num_fences size
- * and fill it with the fences it wants to add to the object. Ownership of this
- * array is taken and dma_fence_put() is used on each fence on release.
- *
- * If @signal_on_any is true the fence array signals if any fence in the array
- * signals, otherwise it signals when all fences in the array signal.
+ * Implementation of @dma_fence_array_create without allocation. Useful to init
+ * a preallocated dma fence array in the path of reclaim or dma fence signaling.
*/
-struct dma_fence_array *dma_fence_array_create(int num_fences,
- struct dma_fence **fences,
- u64 context, unsigned seqno,
- bool signal_on_any)
+void dma_fence_array_init(struct dma_fence_array *array,
+ int num_fences, struct dma_fence **fences,
+ u64 context, unsigned seqno,
+ bool signal_on_any)
{
- struct dma_fence_array *array;
-
WARN_ON(!num_fences || !fences);
- array = kzalloc(struct_size(array, callbacks, num_fences), GFP_KERNEL);
- if (!array)
- return NULL;
-
array->num_fences = num_fences;
spin_lock_init(&array->lock);
@@ -200,6 +201,41 @@ struct dma_fence_array *dma_fence_array_create(int num_fences,
*/
while (num_fences--)
WARN_ON(dma_fence_is_container(fences[num_fences]));
+}
+EXPORT_SYMBOL(dma_fence_array_init);
+
+/**
+ * dma_fence_array_create - Create a custom fence array
+ * @num_fences: [in] number of fences to add in the array
+ * @fences: [in] array containing the fences
+ * @context: [in] fence context to use
+ * @seqno: [in] sequence number to use
+ * @signal_on_any: [in] signal on any fence in the array
+ *
+ * Allocate a dma_fence_array object and initialize the base fence with
+ * dma_fence_init().
+ * In case of error it returns NULL.
+ *
+ * The caller should allocate the fences array with num_fences size
+ * and fill it with the fences it wants to add to the object. Ownership of this
+ * array is taken and dma_fence_put() is used on each fence on release.
+ *
+ * If @signal_on_any is true the fence array signals if any fence in the array
+ * signals, otherwise it signals when all fences in the array signal.
+ */
+struct dma_fence_array *dma_fence_array_create(int num_fences,
+ struct dma_fence **fences,
+ u64 context, unsigned seqno,
+ bool signal_on_any)
+{
+ struct dma_fence_array *array;
+
+ array = dma_fence_array_alloc(num_fences);
+ if (!array)
+ return NULL;
+
+ dma_fence_array_init(array, num_fences, fences,
+ context, seqno, signal_on_any);
return array;
}
diff --git a/drivers/dma-buf/dma-heap.c b/drivers/dma-buf/dma-heap.c
index 2298ca5e112e..3cbe87d4a464 100644
--- a/drivers/dma-buf/dma-heap.c
+++ b/drivers/dma-buf/dma-heap.c
@@ -7,17 +7,15 @@
*/
#include <linux/cdev.h>
-#include <linux/debugfs.h>
#include <linux/device.h>
#include <linux/dma-buf.h>
+#include <linux/dma-heap.h>
#include <linux/err.h>
-#include <linux/xarray.h>
#include <linux/list.h>
-#include <linux/slab.h>
#include <linux/nospec.h>
-#include <linux/uaccess.h>
#include <linux/syscalls.h>
-#include <linux/dma-heap.h>
+#include <linux/uaccess.h>
+#include <linux/xarray.h>
#include <uapi/linux/dma-heap.h>
#define DEVNAME "dma_heap"
@@ -28,9 +26,10 @@
* struct dma_heap - represents a dmabuf heap in the system
* @name: used for debugging/device-node name
* @ops: ops struct for this heap
- * @heap_devt heap device node
- * @list list head connecting to list of heaps
- * @heap_cdev heap char device
+ * @priv: private data for this heap
+ * @heap_devt: heap device node
+ * @list: list head connecting to list of heaps
+ * @heap_cdev: heap char device
*
* Represents a heap of memory from which buffers can be made.
*/
@@ -193,11 +192,11 @@ static const struct file_operations dma_heap_fops = {
};
/**
- * dma_heap_get_drvdata() - get per-subdriver data for the heap
+ * dma_heap_get_drvdata - get per-heap driver data
* @heap: DMA-Heap to retrieve private data for
*
* Returns:
- * The per-subdriver data for the heap.
+ * The per-heap data for the heap.
*/
void *dma_heap_get_drvdata(struct dma_heap *heap)
{
@@ -205,8 +204,8 @@ void *dma_heap_get_drvdata(struct dma_heap *heap)
}
/**
- * dma_heap_get_name() - get heap name
- * @heap: DMA-Heap to retrieve private data for
+ * dma_heap_get_name - get heap name
+ * @heap: DMA-Heap to retrieve the name of
*
* Returns:
* The char* for the heap name.
@@ -216,6 +215,10 @@ const char *dma_heap_get_name(struct dma_heap *heap)
return heap->name;
}
+/**
+ * dma_heap_add - adds a heap to dmabuf heaps
+ * @exp_info: information needed to register this heap
+ */
struct dma_heap *dma_heap_add(const struct dma_heap_export_info *exp_info)
{
struct dma_heap *heap, *h, *err_ret;
diff --git a/drivers/dma/Kconfig b/drivers/dma/Kconfig
index cc0a62c34861..d9ec1e69e428 100644
--- a/drivers/dma/Kconfig
+++ b/drivers/dma/Kconfig
@@ -369,6 +369,15 @@ config K3_DMA
Support the DMA engine for Hisilicon K3 platform
devices.
+config LOONGSON1_APB_DMA
+ tristate "Loongson1 APB DMA support"
+ depends on MACH_LOONGSON32 || COMPILE_TEST
+ select DMA_ENGINE
+ select DMA_VIRTUAL_CHANNELS
+ help
+ This selects support for the APB DMA controller in Loongson1 SoCs,
+ which is required by Loongson1 NAND and audio support.
+
config LPC18XX_DMAMUX
bool "NXP LPC18xx/43xx DMA MUX for PL080"
depends on ARCH_LPC18XX || COMPILE_TEST
@@ -378,6 +387,15 @@ config LPC18XX_DMAMUX
Enable support for DMA on NXP LPC18xx/43xx platforms
with PL080 and multiplexed DMA request lines.
+config LPC32XX_DMAMUX
+ bool "NXP LPC32xx DMA MUX for PL080"
+ depends on ARCH_LPC32XX || COMPILE_TEST
+ depends on OF && AMBA_PL08X
+ select MFD_SYSCON
+ help
+ Support for PL080 multiplexed DMA request lines on
+ LPC32XX platrofm.
+
config LS2X_APB_DMA
tristate "Loongson LS2X APB DMA support"
depends on LOONGARCH || COMPILE_TEST
@@ -716,6 +734,8 @@ config XILINX_ZYNQMP_DPDMA
display driver.
# driver files
+source "drivers/dma/amd/Kconfig"
+
source "drivers/dma/bestcomm/Kconfig"
source "drivers/dma/mediatek/Kconfig"
diff --git a/drivers/dma/Makefile b/drivers/dma/Makefile
index 374ea98faf43..ad6a03c052ec 100644
--- a/drivers/dma/Makefile
+++ b/drivers/dma/Makefile
@@ -49,7 +49,9 @@ obj-$(CONFIG_INTEL_IDMA64) += idma64.o
obj-$(CONFIG_INTEL_IOATDMA) += ioat/
obj-y += idxd/
obj-$(CONFIG_K3_DMA) += k3dma.o
+obj-$(CONFIG_LOONGSON1_APB_DMA) += loongson1-apb-dma.o
obj-$(CONFIG_LPC18XX_DMAMUX) += lpc18xx-dmamux.o
+obj-$(CONFIG_LPC32XX_DMAMUX) += lpc32xx-dmamux.o
obj-$(CONFIG_LS2X_APB_DMA) += ls2x-apb-dma.o
obj-$(CONFIG_MILBEAUT_HDMAC) += milbeaut-hdmac.o
obj-$(CONFIG_MILBEAUT_XDMAC) += milbeaut-xdmac.o
@@ -83,6 +85,7 @@ obj-$(CONFIG_ST_FDMA) += st_fdma.o
obj-$(CONFIG_FSL_DPAA2_QDMA) += fsl-dpaa2-qdma/
obj-$(CONFIG_INTEL_LDMA) += lgm/
+obj-y += amd/
obj-y += mediatek/
obj-y += qcom/
obj-y += stm32/
diff --git a/drivers/dma/acpi-dma.c b/drivers/dma/acpi-dma.c
index 5906eae26e2a..a58a1600dd65 100644
--- a/drivers/dma/acpi-dma.c
+++ b/drivers/dma/acpi-dma.c
@@ -112,7 +112,7 @@ static int acpi_dma_parse_resource_group(const struct acpi_csrt_group *grp,
}
/**
- * acpi_dma_parse_csrt - parse CSRT to exctract additional DMA resources
+ * acpi_dma_parse_csrt - parse CSRT to extract additional DMA resources
* @adev: ACPI device to match with
* @adma: struct acpi_dma of the given DMA controller
*
@@ -305,7 +305,7 @@ EXPORT_SYMBOL_GPL(devm_acpi_dma_controller_free);
* found.
*
* Return:
- * 0, if no information is avaiable, -1 on mismatch, and 1 otherwise.
+ * 0, if no information is available, -1 on mismatch, and 1 otherwise.
*/
static int acpi_dma_update_dma_spec(struct acpi_dma *adma,
struct acpi_dma_spec *dma_spec)
diff --git a/drivers/dma/altera-msgdma.c b/drivers/dma/altera-msgdma.c
index 0968176f323d..e6a6566b309e 100644
--- a/drivers/dma/altera-msgdma.c
+++ b/drivers/dma/altera-msgdma.c
@@ -153,7 +153,7 @@ struct msgdma_extended_desc {
/**
* struct msgdma_sw_desc - implements a sw descriptor
* @async_tx: support for the async_tx api
- * @hw_desc: assosiated HW descriptor
+ * @hw_desc: associated HW descriptor
* @node: node to move from the free list to the tx list
* @tx_list: transmit list node
*/
@@ -511,7 +511,7 @@ static void msgdma_copy_one(struct msgdma_device *mdev,
* of the DMA controller. The descriptor will get flushed to the
* FIFO, once the last word (control word) is written. Since we
* are not 100% sure that memcpy() writes all word in the "correct"
- * oder (address from low to high) on all architectures, we make
+ * order (address from low to high) on all architectures, we make
* sure this control word is written last by single coding it and
* adding some write-barriers here.
*/
diff --git a/drivers/dma/amba-pl08x.c b/drivers/dma/amba-pl08x.c
index 73a5cfb4da8a..38cdbca59485 100644
--- a/drivers/dma/amba-pl08x.c
+++ b/drivers/dma/amba-pl08x.c
@@ -2,7 +2,7 @@
/*
* Copyright (c) 2006 ARM Ltd.
* Copyright (c) 2010 ST-Ericsson SA
- * Copyirght (c) 2017 Linaro Ltd.
+ * Copyright (c) 2017 Linaro Ltd.
*
* Author: Peter Pearse <peter.pearse@arm.com>
* Author: Linus Walleij <linus.walleij@linaro.org>
diff --git a/drivers/dma/amd/Kconfig b/drivers/dma/amd/Kconfig
new file mode 100644
index 000000000000..7d1f51d69675
--- /dev/null
+++ b/drivers/dma/amd/Kconfig
@@ -0,0 +1,14 @@
+# SPDX-License-Identifier: GPL-2.0-only
+
+config AMD_QDMA
+ tristate "AMD Queue-based DMA"
+ depends on HAS_IOMEM
+ select DMA_ENGINE
+ select DMA_VIRTUAL_CHANNELS
+ select REGMAP_MMIO
+ help
+ Enable support for the AMD Queue-based DMA subsystem. The primary
+ mechanism to transfer data using the QDMA is for the QDMA engine to
+ operate on instructions (descriptors) provided by the host operating
+ system. Using the descriptors, the QDMA can move data in either the
+ Host to Card (H2C) direction or the Card to Host (C2H) direction.
diff --git a/drivers/dma/amd/Makefile b/drivers/dma/amd/Makefile
new file mode 100644
index 000000000000..37212be9364f
--- /dev/null
+++ b/drivers/dma/amd/Makefile
@@ -0,0 +1,3 @@
+# SPDX-License-Identifier: GPL-2.0
+
+obj-$(CONFIG_AMD_QDMA) += qdma/
diff --git a/drivers/dma/amd/qdma/Makefile b/drivers/dma/amd/qdma/Makefile
new file mode 100644
index 000000000000..011268fef377
--- /dev/null
+++ b/drivers/dma/amd/qdma/Makefile
@@ -0,0 +1,5 @@
+# SPDX-License-Identifier: GPL-2.0
+
+obj-$(CONFIG_AMD_QDMA) += amd-qdma.o
+
+amd-qdma-$(CONFIG_AMD_QDMA) := qdma.o qdma-comm-regs.o
diff --git a/drivers/dma/amd/qdma/qdma-comm-regs.c b/drivers/dma/amd/qdma/qdma-comm-regs.c
new file mode 100644
index 000000000000..9162f9d367cc
--- /dev/null
+++ b/drivers/dma/amd/qdma/qdma-comm-regs.c
@@ -0,0 +1,64 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * Copyright (C) 2023-2024, Advanced Micro Devices, Inc.
+ */
+
+#ifndef __QDMA_REGS_DEF_H
+#define __QDMA_REGS_DEF_H
+
+#include "qdma.h"
+
+const struct qdma_reg qdma_regos_default[QDMA_REGO_MAX] = {
+ [QDMA_REGO_CTXT_DATA] = QDMA_REGO(0x804, 8),
+ [QDMA_REGO_CTXT_CMD] = QDMA_REGO(0x844, 1),
+ [QDMA_REGO_CTXT_MASK] = QDMA_REGO(0x824, 8),
+ [QDMA_REGO_MM_H2C_CTRL] = QDMA_REGO(0x1004, 1),
+ [QDMA_REGO_MM_C2H_CTRL] = QDMA_REGO(0x1204, 1),
+ [QDMA_REGO_QUEUE_COUNT] = QDMA_REGO(0x120, 1),
+ [QDMA_REGO_RING_SIZE] = QDMA_REGO(0x204, 1),
+ [QDMA_REGO_H2C_PIDX] = QDMA_REGO(0x18004, 1),
+ [QDMA_REGO_C2H_PIDX] = QDMA_REGO(0x18008, 1),
+ [QDMA_REGO_INTR_CIDX] = QDMA_REGO(0x18000, 1),
+ [QDMA_REGO_FUNC_ID] = QDMA_REGO(0x12c, 1),
+ [QDMA_REGO_ERR_INT] = QDMA_REGO(0xb04, 1),
+ [QDMA_REGO_ERR_STAT] = QDMA_REGO(0x248, 1),
+};
+
+const struct qdma_reg_field qdma_regfs_default[QDMA_REGF_MAX] = {
+ /* QDMA_REGO_CTXT_DATA fields */
+ [QDMA_REGF_IRQ_ENABLE] = QDMA_REGF(53, 53),
+ [QDMA_REGF_WBK_ENABLE] = QDMA_REGF(52, 52),
+ [QDMA_REGF_WBI_CHECK] = QDMA_REGF(34, 34),
+ [QDMA_REGF_IRQ_ARM] = QDMA_REGF(16, 16),
+ [QDMA_REGF_IRQ_VEC] = QDMA_REGF(138, 128),
+ [QDMA_REGF_IRQ_AGG] = QDMA_REGF(139, 139),
+ [QDMA_REGF_WBI_INTVL_ENABLE] = QDMA_REGF(35, 35),
+ [QDMA_REGF_MRKR_DISABLE] = QDMA_REGF(62, 62),
+ [QDMA_REGF_QUEUE_ENABLE] = QDMA_REGF(32, 32),
+ [QDMA_REGF_QUEUE_MODE] = QDMA_REGF(63, 63),
+ [QDMA_REGF_DESC_BASE] = QDMA_REGF(127, 64),
+ [QDMA_REGF_DESC_SIZE] = QDMA_REGF(49, 48),
+ [QDMA_REGF_RING_ID] = QDMA_REGF(47, 44),
+ [QDMA_REGF_QUEUE_BASE] = QDMA_REGF(11, 0),
+ [QDMA_REGF_QUEUE_MAX] = QDMA_REGF(44, 32),
+ [QDMA_REGF_FUNCTION_ID] = QDMA_REGF(24, 17),
+ [QDMA_REGF_INTR_AGG_BASE] = QDMA_REGF(66, 15),
+ [QDMA_REGF_INTR_VECTOR] = QDMA_REGF(11, 1),
+ [QDMA_REGF_INTR_SIZE] = QDMA_REGF(69, 67),
+ [QDMA_REGF_INTR_VALID] = QDMA_REGF(0, 0),
+ [QDMA_REGF_INTR_COLOR] = QDMA_REGF(14, 14),
+ [QDMA_REGF_INTR_FUNCTION_ID] = QDMA_REGF(125, 114),
+ /* QDMA_REGO_CTXT_CMD fields */
+ [QDMA_REGF_CMD_INDX] = QDMA_REGF(19, 7),
+ [QDMA_REGF_CMD_CMD] = QDMA_REGF(6, 5),
+ [QDMA_REGF_CMD_TYPE] = QDMA_REGF(4, 1),
+ [QDMA_REGF_CMD_BUSY] = QDMA_REGF(0, 0),
+ /* QDMA_REGO_QUEUE_COUNT fields */
+ [QDMA_REGF_QUEUE_COUNT] = QDMA_REGF(11, 0),
+ /* QDMA_REGO_ERR_INT fields */
+ [QDMA_REGF_ERR_INT_FUNC] = QDMA_REGF(11, 0),
+ [QDMA_REGF_ERR_INT_VEC] = QDMA_REGF(22, 12),
+ [QDMA_REGF_ERR_INT_ARM] = QDMA_REGF(24, 24),
+};
+
+#endif /* __QDMA_REGS_DEF_H */
diff --git a/drivers/dma/amd/qdma/qdma.c b/drivers/dma/amd/qdma/qdma.c
new file mode 100644
index 000000000000..b0a1f3ad851b
--- /dev/null
+++ b/drivers/dma/amd/qdma/qdma.c
@@ -0,0 +1,1143 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * DMA driver for AMD Queue-based DMA Subsystem
+ *
+ * Copyright (C) 2023-2024, Advanced Micro Devices, Inc.
+ */
+#include <linux/bitfield.h>
+#include <linux/bitops.h>
+#include <linux/dmaengine.h>
+#include <linux/module.h>
+#include <linux/mod_devicetable.h>
+#include <linux/dma-map-ops.h>
+#include <linux/platform_device.h>
+#include <linux/platform_data/amd_qdma.h>
+#include <linux/regmap.h>
+
+#include "qdma.h"
+
+#define CHAN_STR(q) (((q)->dir == DMA_MEM_TO_DEV) ? "H2C" : "C2H")
+#define QDMA_REG_OFF(d, r) ((d)->roffs[r].off)
+
+/* MMIO regmap config for all QDMA registers */
+static const struct regmap_config qdma_regmap_config = {
+ .reg_bits = 32,
+ .val_bits = 32,
+ .reg_stride = 4,
+};
+
+static inline struct qdma_queue *to_qdma_queue(struct dma_chan *chan)
+{
+ return container_of(chan, struct qdma_queue, vchan.chan);
+}
+
+static inline struct qdma_mm_vdesc *to_qdma_vdesc(struct virt_dma_desc *vdesc)
+{
+ return container_of(vdesc, struct qdma_mm_vdesc, vdesc);
+}
+
+static inline u32 qdma_get_intr_ring_idx(struct qdma_device *qdev)
+{
+ u32 idx;
+
+ idx = qdev->qintr_rings[qdev->qintr_ring_idx++].ridx;
+ qdev->qintr_ring_idx %= qdev->qintr_ring_num;
+
+ return idx;
+}
+
+static u64 qdma_get_field(const struct qdma_device *qdev, const u32 *data,
+ enum qdma_reg_fields field)
+{
+ const struct qdma_reg_field *f = &qdev->rfields[field];
+ u16 low_pos, hi_pos, low_bit, hi_bit;
+ u64 value = 0, mask;
+
+ low_pos = f->lsb / BITS_PER_TYPE(*data);
+ hi_pos = f->msb / BITS_PER_TYPE(*data);
+
+ if (low_pos == hi_pos) {
+ low_bit = f->lsb % BITS_PER_TYPE(*data);
+ hi_bit = f->msb % BITS_PER_TYPE(*data);
+ mask = GENMASK(hi_bit, low_bit);
+ value = (data[low_pos] & mask) >> low_bit;
+ } else if (hi_pos == low_pos + 1) {
+ low_bit = f->lsb % BITS_PER_TYPE(*data);
+ hi_bit = low_bit + (f->msb - f->lsb);
+ value = ((u64)data[hi_pos] << BITS_PER_TYPE(*data)) |
+ data[low_pos];
+ mask = GENMASK_ULL(hi_bit, low_bit);
+ value = (value & mask) >> low_bit;
+ } else {
+ hi_bit = f->msb % BITS_PER_TYPE(*data);
+ mask = GENMASK(hi_bit, 0);
+ value = data[hi_pos] & mask;
+ low_bit = f->msb - f->lsb - hi_bit;
+ value <<= low_bit;
+ low_bit -= 32;
+ value |= (u64)data[hi_pos - 1] << low_bit;
+ mask = GENMASK(31, 32 - low_bit);
+ value |= (data[hi_pos - 2] & mask) >> low_bit;
+ }
+
+ return value;
+}
+
+static void qdma_set_field(const struct qdma_device *qdev, u32 *data,
+ enum qdma_reg_fields field, u64 value)
+{
+ const struct qdma_reg_field *f = &qdev->rfields[field];
+ u16 low_pos, hi_pos, low_bit;
+
+ low_pos = f->lsb / BITS_PER_TYPE(*data);
+ hi_pos = f->msb / BITS_PER_TYPE(*data);
+ low_bit = f->lsb % BITS_PER_TYPE(*data);
+
+ data[low_pos++] |= value << low_bit;
+ if (low_pos <= hi_pos)
+ data[low_pos++] |= (u32)(value >> (32 - low_bit));
+ if (low_pos <= hi_pos)
+ data[low_pos] |= (u32)(value >> (64 - low_bit));
+}
+
+static inline int qdma_reg_write(const struct qdma_device *qdev,
+ const u32 *data, enum qdma_regs reg)
+{
+ const struct qdma_reg *r = &qdev->roffs[reg];
+ int ret;
+
+ if (r->count > 1)
+ ret = regmap_bulk_write(qdev->regmap, r->off, data, r->count);
+ else
+ ret = regmap_write(qdev->regmap, r->off, *data);
+
+ return ret;
+}
+
+static inline int qdma_reg_read(const struct qdma_device *qdev, u32 *data,
+ enum qdma_regs reg)
+{
+ const struct qdma_reg *r = &qdev->roffs[reg];
+ int ret;
+
+ if (r->count > 1)
+ ret = regmap_bulk_read(qdev->regmap, r->off, data, r->count);
+ else
+ ret = regmap_read(qdev->regmap, r->off, data);
+
+ return ret;
+}
+
+static int qdma_context_cmd_execute(const struct qdma_device *qdev,
+ enum qdma_ctxt_type type,
+ enum qdma_ctxt_cmd cmd, u16 index)
+{
+ u32 value = 0;
+ int ret;
+
+ qdma_set_field(qdev, &value, QDMA_REGF_CMD_INDX, index);
+ qdma_set_field(qdev, &value, QDMA_REGF_CMD_CMD, cmd);
+ qdma_set_field(qdev, &value, QDMA_REGF_CMD_TYPE, type);
+
+ ret = qdma_reg_write(qdev, &value, QDMA_REGO_CTXT_CMD);
+ if (ret)
+ return ret;
+
+ ret = regmap_read_poll_timeout(qdev->regmap,
+ QDMA_REG_OFF(qdev, QDMA_REGO_CTXT_CMD),
+ value,
+ !qdma_get_field(qdev, &value,
+ QDMA_REGF_CMD_BUSY),
+ QDMA_POLL_INTRVL_US,
+ QDMA_POLL_TIMEOUT_US);
+ if (ret) {
+ qdma_err(qdev, "Context command execution timed out");
+ return ret;
+ }
+
+ return 0;
+}
+
+static int qdma_context_write_data(const struct qdma_device *qdev,
+ const u32 *data)
+{
+ u32 mask[QDMA_CTXT_REGMAP_LEN];
+ int ret;
+
+ memset(mask, ~0, sizeof(mask));
+
+ ret = qdma_reg_write(qdev, mask, QDMA_REGO_CTXT_MASK);
+ if (ret)
+ return ret;
+
+ ret = qdma_reg_write(qdev, data, QDMA_REGO_CTXT_DATA);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+
+static void qdma_prep_sw_desc_context(const struct qdma_device *qdev,
+ const struct qdma_ctxt_sw_desc *ctxt,
+ u32 *data)
+{
+ memset(data, 0, QDMA_CTXT_REGMAP_LEN * sizeof(*data));
+ qdma_set_field(qdev, data, QDMA_REGF_DESC_BASE, ctxt->desc_base);
+ qdma_set_field(qdev, data, QDMA_REGF_IRQ_VEC, ctxt->vec);
+ qdma_set_field(qdev, data, QDMA_REGF_FUNCTION_ID, qdev->fid);
+
+ qdma_set_field(qdev, data, QDMA_REGF_DESC_SIZE, QDMA_DESC_SIZE_32B);
+ qdma_set_field(qdev, data, QDMA_REGF_RING_ID, QDMA_DEFAULT_RING_ID);
+ qdma_set_field(qdev, data, QDMA_REGF_QUEUE_MODE, QDMA_QUEUE_OP_MM);
+ qdma_set_field(qdev, data, QDMA_REGF_IRQ_ENABLE, 1);
+ qdma_set_field(qdev, data, QDMA_REGF_WBK_ENABLE, 1);
+ qdma_set_field(qdev, data, QDMA_REGF_WBI_CHECK, 1);
+ qdma_set_field(qdev, data, QDMA_REGF_IRQ_ARM, 1);
+ qdma_set_field(qdev, data, QDMA_REGF_IRQ_AGG, 1);
+ qdma_set_field(qdev, data, QDMA_REGF_WBI_INTVL_ENABLE, 1);
+ qdma_set_field(qdev, data, QDMA_REGF_QUEUE_ENABLE, 1);
+ qdma_set_field(qdev, data, QDMA_REGF_MRKR_DISABLE, 1);
+}
+
+static void qdma_prep_intr_context(const struct qdma_device *qdev,
+ const struct qdma_ctxt_intr *ctxt,
+ u32 *data)
+{
+ memset(data, 0, QDMA_CTXT_REGMAP_LEN * sizeof(*data));
+ qdma_set_field(qdev, data, QDMA_REGF_INTR_AGG_BASE, ctxt->agg_base);
+ qdma_set_field(qdev, data, QDMA_REGF_INTR_VECTOR, ctxt->vec);
+ qdma_set_field(qdev, data, QDMA_REGF_INTR_SIZE, ctxt->size);
+ qdma_set_field(qdev, data, QDMA_REGF_INTR_VALID, ctxt->valid);
+ qdma_set_field(qdev, data, QDMA_REGF_INTR_COLOR, ctxt->color);
+ qdma_set_field(qdev, data, QDMA_REGF_INTR_FUNCTION_ID, qdev->fid);
+}
+
+static void qdma_prep_fmap_context(const struct qdma_device *qdev,
+ const struct qdma_ctxt_fmap *ctxt,
+ u32 *data)
+{
+ memset(data, 0, QDMA_CTXT_REGMAP_LEN * sizeof(*data));
+ qdma_set_field(qdev, data, QDMA_REGF_QUEUE_BASE, ctxt->qbase);
+ qdma_set_field(qdev, data, QDMA_REGF_QUEUE_MAX, ctxt->qmax);
+}
+
+/*
+ * Program the indirect context register space
+ *
+ * Once the queue is enabled, context is dynamically updated by hardware. Any
+ * modification of the context through this API when the queue is enabled can
+ * result in unexpected behavior. Reading the context when the queue is enabled
+ * is not recommended as it can result in reduced performance.
+ */
+static int qdma_prog_context(struct qdma_device *qdev, enum qdma_ctxt_type type,
+ enum qdma_ctxt_cmd cmd, u16 index, u32 *ctxt)
+{
+ int ret;
+
+ mutex_lock(&qdev->ctxt_lock);
+ if (cmd == QDMA_CTXT_WRITE) {
+ ret = qdma_context_write_data(qdev, ctxt);
+ if (ret)
+ goto failed;
+ }
+
+ ret = qdma_context_cmd_execute(qdev, type, cmd, index);
+ if (ret)
+ goto failed;
+
+ if (cmd == QDMA_CTXT_READ) {
+ ret = qdma_reg_read(qdev, ctxt, QDMA_REGO_CTXT_DATA);
+ if (ret)
+ goto failed;
+ }
+
+failed:
+ mutex_unlock(&qdev->ctxt_lock);
+
+ return ret;
+}
+
+static int qdma_check_queue_status(struct qdma_device *qdev,
+ enum dma_transfer_direction dir, u16 qid)
+{
+ u32 status, data[QDMA_CTXT_REGMAP_LEN] = {0};
+ enum qdma_ctxt_type type;
+ int ret;
+
+ if (dir == DMA_MEM_TO_DEV)
+ type = QDMA_CTXT_DESC_SW_H2C;
+ else
+ type = QDMA_CTXT_DESC_SW_C2H;
+ ret = qdma_prog_context(qdev, type, QDMA_CTXT_READ, qid, data);
+ if (ret)
+ return ret;
+
+ status = qdma_get_field(qdev, data, QDMA_REGF_QUEUE_ENABLE);
+ if (status) {
+ qdma_err(qdev, "queue %d already in use", qid);
+ return -EBUSY;
+ }
+
+ return 0;
+}
+
+static int qdma_clear_queue_context(const struct qdma_queue *queue)
+{
+ enum qdma_ctxt_type h2c_types[] = { QDMA_CTXT_DESC_SW_H2C,
+ QDMA_CTXT_DESC_HW_H2C,
+ QDMA_CTXT_DESC_CR_H2C,
+ QDMA_CTXT_PFTCH, };
+ enum qdma_ctxt_type c2h_types[] = { QDMA_CTXT_DESC_SW_C2H,
+ QDMA_CTXT_DESC_HW_C2H,
+ QDMA_CTXT_DESC_CR_C2H,
+ QDMA_CTXT_PFTCH, };
+ struct qdma_device *qdev = queue->qdev;
+ enum qdma_ctxt_type *type;
+ int ret, num, i;
+
+ if (queue->dir == DMA_MEM_TO_DEV) {
+ type = h2c_types;
+ num = ARRAY_SIZE(h2c_types);
+ } else {
+ type = c2h_types;
+ num = ARRAY_SIZE(c2h_types);
+ }
+ for (i = 0; i < num; i++) {
+ ret = qdma_prog_context(qdev, type[i], QDMA_CTXT_CLEAR,
+ queue->qid, NULL);
+ if (ret) {
+ qdma_err(qdev, "Failed to clear ctxt %d", type[i]);
+ return ret;
+ }
+ }
+
+ return 0;
+}
+
+static int qdma_setup_fmap_context(struct qdma_device *qdev)
+{
+ u32 ctxt[QDMA_CTXT_REGMAP_LEN];
+ struct qdma_ctxt_fmap fmap;
+ int ret;
+
+ ret = qdma_prog_context(qdev, QDMA_CTXT_FMAP, QDMA_CTXT_CLEAR,
+ qdev->fid, NULL);
+ if (ret) {
+ qdma_err(qdev, "Failed clearing context");
+ return ret;
+ }
+
+ fmap.qbase = 0;
+ fmap.qmax = qdev->chan_num * 2;
+ qdma_prep_fmap_context(qdev, &fmap, ctxt);
+ ret = qdma_prog_context(qdev, QDMA_CTXT_FMAP, QDMA_CTXT_WRITE,
+ qdev->fid, ctxt);
+ if (ret)
+ qdma_err(qdev, "Failed setup fmap, ret %d", ret);
+
+ return ret;
+}
+
+static int qdma_setup_queue_context(struct qdma_device *qdev,
+ const struct qdma_ctxt_sw_desc *sw_desc,
+ enum dma_transfer_direction dir, u16 qid)
+{
+ u32 ctxt[QDMA_CTXT_REGMAP_LEN];
+ enum qdma_ctxt_type type;
+ int ret;
+
+ if (dir == DMA_MEM_TO_DEV)
+ type = QDMA_CTXT_DESC_SW_H2C;
+ else
+ type = QDMA_CTXT_DESC_SW_C2H;
+
+ qdma_prep_sw_desc_context(qdev, sw_desc, ctxt);
+ /* Setup SW descriptor context */
+ ret = qdma_prog_context(qdev, type, QDMA_CTXT_WRITE, qid, ctxt);
+ if (ret)
+ qdma_err(qdev, "Failed setup SW desc ctxt for queue: %d", qid);
+
+ return ret;
+}
+
+/*
+ * Enable or disable memory-mapped DMA engines
+ * 1: enable, 0: disable
+ */
+static int qdma_sgdma_control(struct qdma_device *qdev, u32 ctrl)
+{
+ int ret;
+
+ ret = qdma_reg_write(qdev, &ctrl, QDMA_REGO_MM_H2C_CTRL);
+ ret |= qdma_reg_write(qdev, &ctrl, QDMA_REGO_MM_C2H_CTRL);
+
+ return ret;
+}
+
+static int qdma_get_hw_info(struct qdma_device *qdev)
+{
+ struct qdma_platdata *pdata = dev_get_platdata(&qdev->pdev->dev);
+ u32 value = 0;
+ int ret;
+
+ ret = qdma_reg_read(qdev, &value, QDMA_REGO_QUEUE_COUNT);
+ if (ret)
+ return ret;
+
+ value = qdma_get_field(qdev, &value, QDMA_REGF_QUEUE_COUNT) + 1;
+ if (pdata->max_mm_channels * 2 > value) {
+ qdma_err(qdev, "not enough hw queues %d", value);
+ return -EINVAL;
+ }
+ qdev->chan_num = pdata->max_mm_channels;
+
+ ret = qdma_reg_read(qdev, &qdev->fid, QDMA_REGO_FUNC_ID);
+ if (ret)
+ return ret;
+
+ qdma_info(qdev, "max channel %d, function id %d",
+ qdev->chan_num, qdev->fid);
+
+ return 0;
+}
+
+static inline int qdma_update_pidx(const struct qdma_queue *queue, u16 pidx)
+{
+ struct qdma_device *qdev = queue->qdev;
+
+ return regmap_write(qdev->regmap, queue->pidx_reg,
+ pidx | QDMA_QUEUE_ARM_BIT);
+}
+
+static inline int qdma_update_cidx(const struct qdma_queue *queue,
+ u16 ridx, u16 cidx)
+{
+ struct qdma_device *qdev = queue->qdev;
+
+ return regmap_write(qdev->regmap, queue->cidx_reg,
+ ((u32)ridx << 16) | cidx);
+}
+
+/**
+ * qdma_free_vdesc - Free descriptor
+ * @vdesc: Virtual DMA descriptor
+ */
+static void qdma_free_vdesc(struct virt_dma_desc *vdesc)
+{
+ struct qdma_mm_vdesc *vd = to_qdma_vdesc(vdesc);
+
+ kfree(vd);
+}
+
+static int qdma_alloc_queues(struct qdma_device *qdev,
+ enum dma_transfer_direction dir)
+{
+ struct qdma_queue *q, **queues;
+ u32 i, pidx_base;
+ int ret;
+
+ if (dir == DMA_MEM_TO_DEV) {
+ queues = &qdev->h2c_queues;
+ pidx_base = QDMA_REG_OFF(qdev, QDMA_REGO_H2C_PIDX);
+ } else {
+ queues = &qdev->c2h_queues;
+ pidx_base = QDMA_REG_OFF(qdev, QDMA_REGO_C2H_PIDX);
+ }
+
+ *queues = devm_kcalloc(&qdev->pdev->dev, qdev->chan_num, sizeof(*q),
+ GFP_KERNEL);
+ if (!*queues)
+ return -ENOMEM;
+
+ for (i = 0; i < qdev->chan_num; i++) {
+ ret = qdma_check_queue_status(qdev, dir, i);
+ if (ret)
+ return ret;
+
+ q = &(*queues)[i];
+ q->ring_size = QDMA_DEFAULT_RING_SIZE;
+ q->idx_mask = q->ring_size - 2;
+ q->qdev = qdev;
+ q->dir = dir;
+ q->qid = i;
+ q->pidx_reg = pidx_base + i * QDMA_DMAP_REG_STRIDE;
+ q->cidx_reg = QDMA_REG_OFF(qdev, QDMA_REGO_INTR_CIDX) +
+ i * QDMA_DMAP_REG_STRIDE;
+ q->vchan.desc_free = qdma_free_vdesc;
+ vchan_init(&q->vchan, &qdev->dma_dev);
+ }
+
+ return 0;
+}
+
+static int qdma_device_verify(struct qdma_device *qdev)
+{
+ u32 value;
+ int ret;
+
+ ret = regmap_read(qdev->regmap, QDMA_IDENTIFIER_REGOFF, &value);
+ if (ret)
+ return ret;
+
+ value = FIELD_GET(QDMA_IDENTIFIER_MASK, value);
+ if (value != QDMA_IDENTIFIER) {
+ qdma_err(qdev, "Invalid identifier");
+ return -ENODEV;
+ }
+ qdev->rfields = qdma_regfs_default;
+ qdev->roffs = qdma_regos_default;
+
+ return 0;
+}
+
+static int qdma_device_setup(struct qdma_device *qdev)
+{
+ struct device *dev = &qdev->pdev->dev;
+ u32 ring_sz = QDMA_DEFAULT_RING_SIZE;
+ int ret = 0;
+
+ while (dev && get_dma_ops(dev))
+ dev = dev->parent;
+ if (!dev) {
+ qdma_err(qdev, "dma device not found");
+ return -EINVAL;
+ }
+ set_dma_ops(&qdev->pdev->dev, get_dma_ops(dev));
+
+ ret = qdma_setup_fmap_context(qdev);
+ if (ret) {
+ qdma_err(qdev, "Failed setup fmap context");
+ return ret;
+ }
+
+ /* Setup global ring buffer size at QDMA_DEFAULT_RING_ID index */
+ ret = qdma_reg_write(qdev, &ring_sz, QDMA_REGO_RING_SIZE);
+ if (ret) {
+ qdma_err(qdev, "Failed to setup ring %d of size %ld",
+ QDMA_DEFAULT_RING_ID, QDMA_DEFAULT_RING_SIZE);
+ return ret;
+ }
+
+ /* Enable memory-mapped DMA engine in both directions */
+ ret = qdma_sgdma_control(qdev, 1);
+ if (ret) {
+ qdma_err(qdev, "Failed to SGDMA with error %d", ret);
+ return ret;
+ }
+
+ ret = qdma_alloc_queues(qdev, DMA_MEM_TO_DEV);
+ if (ret) {
+ qdma_err(qdev, "Failed to alloc H2C queues, ret %d", ret);
+ return ret;
+ }
+
+ ret = qdma_alloc_queues(qdev, DMA_DEV_TO_MEM);
+ if (ret) {
+ qdma_err(qdev, "Failed to alloc C2H queues, ret %d", ret);
+ return ret;
+ }
+
+ return 0;
+}
+
+/**
+ * qdma_free_queue_resources() - Free queue resources
+ * @chan: DMA channel
+ */
+static void qdma_free_queue_resources(struct dma_chan *chan)
+{
+ struct qdma_queue *queue = to_qdma_queue(chan);
+ struct qdma_device *qdev = queue->qdev;
+ struct device *dev = qdev->dma_dev.dev;
+
+ qdma_clear_queue_context(queue);
+ vchan_free_chan_resources(&queue->vchan);
+ dma_free_coherent(dev, queue->ring_size * QDMA_MM_DESC_SIZE,
+ queue->desc_base, queue->dma_desc_base);
+}
+
+/**
+ * qdma_alloc_queue_resources() - Allocate queue resources
+ * @chan: DMA channel
+ */
+static int qdma_alloc_queue_resources(struct dma_chan *chan)
+{
+ struct qdma_queue *queue = to_qdma_queue(chan);
+ struct qdma_device *qdev = queue->qdev;
+ struct qdma_ctxt_sw_desc desc;
+ size_t size;
+ int ret;
+
+ ret = qdma_clear_queue_context(queue);
+ if (ret)
+ return ret;
+
+ size = queue->ring_size * QDMA_MM_DESC_SIZE;
+ queue->desc_base = dma_alloc_coherent(qdev->dma_dev.dev, size,
+ &queue->dma_desc_base,
+ GFP_KERNEL);
+ if (!queue->desc_base) {
+ qdma_err(qdev, "Failed to allocate descriptor ring");
+ return -ENOMEM;
+ }
+
+ /* Setup SW descriptor queue context for DMA memory map */
+ desc.vec = qdma_get_intr_ring_idx(qdev);
+ desc.desc_base = queue->dma_desc_base;
+ ret = qdma_setup_queue_context(qdev, &desc, queue->dir, queue->qid);
+ if (ret) {
+ qdma_err(qdev, "Failed to setup SW desc ctxt for %s",
+ chan->name);
+ dma_free_coherent(qdev->dma_dev.dev, size, queue->desc_base,
+ queue->dma_desc_base);
+ return ret;
+ }
+
+ queue->pidx = 0;
+ queue->cidx = 0;
+
+ return 0;
+}
+
+static bool qdma_filter_fn(struct dma_chan *chan, void *param)
+{
+ struct qdma_queue *queue = to_qdma_queue(chan);
+ struct qdma_queue_info *info = param;
+
+ return info->dir == queue->dir;
+}
+
+static int qdma_xfer_start(struct qdma_queue *queue)
+{
+ struct qdma_device *qdev = queue->qdev;
+ int ret;
+
+ if (!vchan_next_desc(&queue->vchan))
+ return 0;
+
+ qdma_dbg(qdev, "Tnx kickoff with P: %d for %s%d",
+ queue->issued_vdesc->pidx, CHAN_STR(queue), queue->qid);
+
+ ret = qdma_update_pidx(queue, queue->issued_vdesc->pidx);
+ if (ret) {
+ qdma_err(qdev, "Failed to update PIDX to %d for %s queue: %d",
+ queue->pidx, CHAN_STR(queue), queue->qid);
+ }
+
+ return ret;
+}
+
+static void qdma_issue_pending(struct dma_chan *chan)
+{
+ struct qdma_queue *queue = to_qdma_queue(chan);
+ unsigned long flags;
+
+ spin_lock_irqsave(&queue->vchan.lock, flags);
+ if (vchan_issue_pending(&queue->vchan)) {
+ if (queue->submitted_vdesc) {
+ queue->issued_vdesc = queue->submitted_vdesc;
+ queue->submitted_vdesc = NULL;
+ }
+ qdma_xfer_start(queue);
+ }
+
+ spin_unlock_irqrestore(&queue->vchan.lock, flags);
+}
+
+static struct qdma_mm_desc *qdma_get_desc(struct qdma_queue *q)
+{
+ struct qdma_mm_desc *desc;
+
+ if (((q->pidx + 1) & q->idx_mask) == q->cidx)
+ return NULL;
+
+ desc = q->desc_base + q->pidx;
+ q->pidx = (q->pidx + 1) & q->idx_mask;
+
+ return desc;
+}
+
+static int qdma_hw_enqueue(struct qdma_queue *q, struct qdma_mm_vdesc *vdesc)
+{
+ struct qdma_mm_desc *desc;
+ struct scatterlist *sg;
+ u64 addr, *src, *dst;
+ u32 rest, len;
+ int ret = 0;
+ u32 i;
+
+ if (!vdesc->sg_len)
+ return 0;
+
+ if (q->dir == DMA_MEM_TO_DEV) {
+ dst = &vdesc->dev_addr;
+ src = &addr;
+ } else {
+ dst = &addr;
+ src = &vdesc->dev_addr;
+ }
+
+ for_each_sg(vdesc->sgl, sg, vdesc->sg_len, i) {
+ addr = sg_dma_address(sg) + vdesc->sg_off;
+ rest = sg_dma_len(sg) - vdesc->sg_off;
+ while (rest) {
+ len = min_t(u32, rest, QDMA_MM_DESC_MAX_LEN);
+ desc = qdma_get_desc(q);
+ if (!desc) {
+ ret = -EBUSY;
+ goto out;
+ }
+
+ desc->src_addr = cpu_to_le64(*src);
+ desc->dst_addr = cpu_to_le64(*dst);
+ desc->len = cpu_to_le32(len);
+
+ vdesc->dev_addr += len;
+ vdesc->sg_off += len;
+ vdesc->pending_descs++;
+ addr += len;
+ rest -= len;
+ }
+ vdesc->sg_off = 0;
+ }
+out:
+ vdesc->sg_len -= i;
+ vdesc->pidx = q->pidx;
+ return ret;
+}
+
+static void qdma_fill_pending_vdesc(struct qdma_queue *q)
+{
+ struct virt_dma_chan *vc = &q->vchan;
+ struct qdma_mm_vdesc *vdesc = NULL;
+ struct virt_dma_desc *vd;
+ int ret;
+
+ if (!list_empty(&vc->desc_issued)) {
+ vd = &q->issued_vdesc->vdesc;
+ list_for_each_entry_from(vd, &vc->desc_issued, node) {
+ vdesc = to_qdma_vdesc(vd);
+ ret = qdma_hw_enqueue(q, vdesc);
+ if (ret) {
+ q->issued_vdesc = vdesc;
+ return;
+ }
+ }
+ q->issued_vdesc = vdesc;
+ }
+
+ if (list_empty(&vc->desc_submitted))
+ return;
+
+ if (q->submitted_vdesc)
+ vd = &q->submitted_vdesc->vdesc;
+ else
+ vd = list_first_entry(&vc->desc_submitted, typeof(*vd), node);
+
+ list_for_each_entry_from(vd, &vc->desc_submitted, node) {
+ vdesc = to_qdma_vdesc(vd);
+ ret = qdma_hw_enqueue(q, vdesc);
+ if (ret)
+ break;
+ }
+ q->submitted_vdesc = vdesc;
+}
+
+static dma_cookie_t qdma_tx_submit(struct dma_async_tx_descriptor *tx)
+{
+ struct virt_dma_chan *vc = to_virt_chan(tx->chan);
+ struct qdma_queue *q = to_qdma_queue(&vc->chan);
+ struct virt_dma_desc *vd;
+ unsigned long flags;
+ dma_cookie_t cookie;
+
+ vd = container_of(tx, struct virt_dma_desc, tx);
+ spin_lock_irqsave(&vc->lock, flags);
+ cookie = dma_cookie_assign(tx);
+
+ list_move_tail(&vd->node, &vc->desc_submitted);
+ qdma_fill_pending_vdesc(q);
+ spin_unlock_irqrestore(&vc->lock, flags);
+
+ return cookie;
+}
+
+static struct dma_async_tx_descriptor *
+qdma_prep_device_sg(struct dma_chan *chan, struct scatterlist *sgl,
+ unsigned int sg_len, enum dma_transfer_direction dir,
+ unsigned long flags, void *context)
+{
+ struct qdma_queue *q = to_qdma_queue(chan);
+ struct dma_async_tx_descriptor *tx;
+ struct qdma_mm_vdesc *vdesc;
+
+ vdesc = kzalloc(sizeof(*vdesc), GFP_NOWAIT);
+ if (!vdesc)
+ return NULL;
+ vdesc->sgl = sgl;
+ vdesc->sg_len = sg_len;
+ if (dir == DMA_MEM_TO_DEV)
+ vdesc->dev_addr = q->cfg.dst_addr;
+ else
+ vdesc->dev_addr = q->cfg.src_addr;
+
+ tx = vchan_tx_prep(&q->vchan, &vdesc->vdesc, flags);
+ tx->tx_submit = qdma_tx_submit;
+
+ return tx;
+}
+
+static int qdma_device_config(struct dma_chan *chan,
+ struct dma_slave_config *cfg)
+{
+ struct qdma_queue *q = to_qdma_queue(chan);
+
+ memcpy(&q->cfg, cfg, sizeof(*cfg));
+
+ return 0;
+}
+
+static int qdma_arm_err_intr(const struct qdma_device *qdev)
+{
+ u32 value = 0;
+
+ qdma_set_field(qdev, &value, QDMA_REGF_ERR_INT_FUNC, qdev->fid);
+ qdma_set_field(qdev, &value, QDMA_REGF_ERR_INT_VEC, qdev->err_irq_idx);
+ qdma_set_field(qdev, &value, QDMA_REGF_ERR_INT_ARM, 1);
+
+ return qdma_reg_write(qdev, &value, QDMA_REGO_ERR_INT);
+}
+
+static irqreturn_t qdma_error_isr(int irq, void *data)
+{
+ struct qdma_device *qdev = data;
+ u32 err_stat = 0;
+ int ret;
+
+ ret = qdma_reg_read(qdev, &err_stat, QDMA_REGO_ERR_STAT);
+ if (ret) {
+ qdma_err(qdev, "read error state failed, ret %d", ret);
+ goto out;
+ }
+
+ qdma_err(qdev, "global error %d", err_stat);
+ ret = qdma_reg_write(qdev, &err_stat, QDMA_REGO_ERR_STAT);
+ if (ret)
+ qdma_err(qdev, "clear error state failed, ret %d", ret);
+
+out:
+ qdma_arm_err_intr(qdev);
+ return IRQ_HANDLED;
+}
+
+static irqreturn_t qdma_queue_isr(int irq, void *data)
+{
+ struct qdma_intr_ring *intr = data;
+ struct qdma_queue *q = NULL;
+ struct qdma_device *qdev;
+ u32 index, comp_desc;
+ u64 intr_ent;
+ u8 color;
+ int ret;
+ u16 qid;
+
+ qdev = intr->qdev;
+ index = intr->cidx;
+ while (1) {
+ struct virt_dma_desc *vd;
+ struct qdma_mm_vdesc *vdesc;
+ unsigned long flags;
+ u32 cidx;
+
+ intr_ent = le64_to_cpu(intr->base[index]);
+ color = FIELD_GET(QDMA_INTR_MASK_COLOR, intr_ent);
+ if (color != intr->color)
+ break;
+
+ qid = FIELD_GET(QDMA_INTR_MASK_QID, intr_ent);
+ if (FIELD_GET(QDMA_INTR_MASK_TYPE, intr_ent))
+ q = qdev->c2h_queues;
+ else
+ q = qdev->h2c_queues;
+ q += qid;
+
+ cidx = FIELD_GET(QDMA_INTR_MASK_CIDX, intr_ent);
+
+ spin_lock_irqsave(&q->vchan.lock, flags);
+ comp_desc = (cidx - q->cidx) & q->idx_mask;
+
+ vd = vchan_next_desc(&q->vchan);
+ if (!vd)
+ goto skip;
+
+ vdesc = to_qdma_vdesc(vd);
+ while (comp_desc > vdesc->pending_descs) {
+ list_del(&vd->node);
+ vchan_cookie_complete(vd);
+ comp_desc -= vdesc->pending_descs;
+ vd = vchan_next_desc(&q->vchan);
+ vdesc = to_qdma_vdesc(vd);
+ }
+ vdesc->pending_descs -= comp_desc;
+ if (!vdesc->pending_descs && QDMA_VDESC_QUEUED(vdesc)) {
+ list_del(&vd->node);
+ vchan_cookie_complete(vd);
+ }
+ q->cidx = cidx;
+
+ qdma_fill_pending_vdesc(q);
+ qdma_xfer_start(q);
+
+skip:
+ spin_unlock_irqrestore(&q->vchan.lock, flags);
+
+ /*
+ * Wrap the index value and flip the expected color value if
+ * interrupt aggregation PIDX has wrapped around.
+ */
+ index++;
+ index &= QDMA_INTR_RING_IDX_MASK;
+ if (!index)
+ intr->color = !intr->color;
+ }
+
+ /*
+ * Update the software interrupt aggregation ring CIDX if a valid entry
+ * was found.
+ */
+ if (q) {
+ qdma_dbg(qdev, "update intr ring%d %d", intr->ridx, index);
+
+ /*
+ * Record the last read index of status descriptor from the
+ * interrupt aggregation ring.
+ */
+ intr->cidx = index;
+
+ ret = qdma_update_cidx(q, intr->ridx, index);
+ if (ret) {
+ qdma_err(qdev, "Failed to update IRQ CIDX");
+ return IRQ_NONE;
+ }
+ }
+
+ return IRQ_HANDLED;
+}
+
+static int qdma_init_error_irq(struct qdma_device *qdev)
+{
+ struct device *dev = &qdev->pdev->dev;
+ int ret;
+ u32 vec;
+
+ vec = qdev->queue_irq_start - 1;
+
+ ret = devm_request_threaded_irq(dev, vec, NULL, qdma_error_isr,
+ IRQF_ONESHOT, "amd-qdma-error", qdev);
+ if (ret) {
+ qdma_err(qdev, "Failed to request error IRQ vector: %d", vec);
+ return ret;
+ }
+
+ ret = qdma_arm_err_intr(qdev);
+ if (ret)
+ qdma_err(qdev, "Failed to arm err interrupt, ret %d", ret);
+
+ return ret;
+}
+
+static int qdmam_alloc_qintr_rings(struct qdma_device *qdev)
+{
+ u32 ctxt[QDMA_CTXT_REGMAP_LEN];
+ struct device *dev = &qdev->pdev->dev;
+ struct qdma_intr_ring *ring;
+ struct qdma_ctxt_intr intr_ctxt;
+ u32 vector;
+ int ret, i;
+
+ qdev->qintr_ring_num = qdev->queue_irq_num;
+ qdev->qintr_rings = devm_kcalloc(dev, qdev->qintr_ring_num,
+ sizeof(*qdev->qintr_rings),
+ GFP_KERNEL);
+ if (!qdev->qintr_rings)
+ return -ENOMEM;
+
+ vector = qdev->queue_irq_start;
+ for (i = 0; i < qdev->qintr_ring_num; i++, vector++) {
+ ring = &qdev->qintr_rings[i];
+ ring->qdev = qdev;
+ ring->msix_id = qdev->err_irq_idx + i + 1;
+ ring->ridx = i;
+ ring->color = 1;
+ ring->base = dmam_alloc_coherent(dev, QDMA_INTR_RING_SIZE,
+ &ring->dev_base, GFP_KERNEL);
+ if (!ring->base) {
+ qdma_err(qdev, "Failed to alloc intr ring %d", i);
+ return -ENOMEM;
+ }
+ intr_ctxt.agg_base = QDMA_INTR_RING_BASE(ring->dev_base);
+ intr_ctxt.size = (QDMA_INTR_RING_SIZE - 1) / 4096;
+ intr_ctxt.vec = ring->msix_id;
+ intr_ctxt.valid = true;
+ intr_ctxt.color = true;
+ ret = qdma_prog_context(qdev, QDMA_CTXT_INTR_COAL,
+ QDMA_CTXT_CLEAR, ring->ridx, NULL);
+ if (ret) {
+ qdma_err(qdev, "Failed clear intr ctx, ret %d", ret);
+ return ret;
+ }
+
+ qdma_prep_intr_context(qdev, &intr_ctxt, ctxt);
+ ret = qdma_prog_context(qdev, QDMA_CTXT_INTR_COAL,
+ QDMA_CTXT_WRITE, ring->ridx, ctxt);
+ if (ret) {
+ qdma_err(qdev, "Failed setup intr ctx, ret %d", ret);
+ return ret;
+ }
+
+ ret = devm_request_threaded_irq(dev, vector, NULL,
+ qdma_queue_isr, IRQF_ONESHOT,
+ "amd-qdma-queue", ring);
+ if (ret) {
+ qdma_err(qdev, "Failed to request irq %d", vector);
+ return ret;
+ }
+ }
+
+ return 0;
+}
+
+static int qdma_intr_init(struct qdma_device *qdev)
+{
+ int ret;
+
+ ret = qdma_init_error_irq(qdev);
+ if (ret) {
+ qdma_err(qdev, "Failed to init error IRQs, ret %d", ret);
+ return ret;
+ }
+
+ ret = qdmam_alloc_qintr_rings(qdev);
+ if (ret) {
+ qdma_err(qdev, "Failed to init queue IRQs, ret %d", ret);
+ return ret;
+ }
+
+ return 0;
+}
+
+static void amd_qdma_remove(struct platform_device *pdev)
+{
+ struct qdma_device *qdev = platform_get_drvdata(pdev);
+
+ qdma_sgdma_control(qdev, 0);
+ dma_async_device_unregister(&qdev->dma_dev);
+
+ mutex_destroy(&qdev->ctxt_lock);
+}
+
+static int amd_qdma_probe(struct platform_device *pdev)
+{
+ struct qdma_platdata *pdata = dev_get_platdata(&pdev->dev);
+ struct qdma_device *qdev;
+ struct resource *res;
+ void __iomem *regs;
+ int ret;
+
+ qdev = devm_kzalloc(&pdev->dev, sizeof(*qdev), GFP_KERNEL);
+ if (!qdev)
+ return -ENOMEM;
+
+ platform_set_drvdata(pdev, qdev);
+ qdev->pdev = pdev;
+ mutex_init(&qdev->ctxt_lock);
+
+ res = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
+ if (!res) {
+ qdma_err(qdev, "Failed to get IRQ resource");
+ ret = -ENODEV;
+ goto failed;
+ }
+ qdev->err_irq_idx = pdata->irq_index;
+ qdev->queue_irq_start = res->start + 1;
+ qdev->queue_irq_num = resource_size(res) - 1;
+
+ regs = devm_platform_get_and_ioremap_resource(pdev, 0, NULL);
+ if (IS_ERR(regs)) {
+ ret = PTR_ERR(regs);
+ qdma_err(qdev, "Failed to map IO resource, err %d", ret);
+ goto failed;
+ }
+
+ qdev->regmap = devm_regmap_init_mmio(&pdev->dev, regs,
+ &qdma_regmap_config);
+ if (IS_ERR(qdev->regmap)) {
+ ret = PTR_ERR(qdev->regmap);
+ qdma_err(qdev, "Regmap init failed, err %d", ret);
+ goto failed;
+ }
+
+ ret = qdma_device_verify(qdev);
+ if (ret)
+ goto failed;
+
+ ret = qdma_get_hw_info(qdev);
+ if (ret)
+ goto failed;
+
+ INIT_LIST_HEAD(&qdev->dma_dev.channels);
+
+ ret = qdma_device_setup(qdev);
+ if (ret)
+ goto failed;
+
+ ret = qdma_intr_init(qdev);
+ if (ret) {
+ qdma_err(qdev, "Failed to initialize IRQs %d", ret);
+ goto failed_disable_engine;
+ }
+
+ dma_cap_set(DMA_SLAVE, qdev->dma_dev.cap_mask);
+ dma_cap_set(DMA_PRIVATE, qdev->dma_dev.cap_mask);
+
+ qdev->dma_dev.dev = &pdev->dev;
+ qdev->dma_dev.filter.map = pdata->device_map;
+ qdev->dma_dev.filter.mapcnt = qdev->chan_num * 2;
+ qdev->dma_dev.filter.fn = qdma_filter_fn;
+ qdev->dma_dev.device_alloc_chan_resources = qdma_alloc_queue_resources;
+ qdev->dma_dev.device_free_chan_resources = qdma_free_queue_resources;
+ qdev->dma_dev.device_prep_slave_sg = qdma_prep_device_sg;
+ qdev->dma_dev.device_config = qdma_device_config;
+ qdev->dma_dev.device_issue_pending = qdma_issue_pending;
+ qdev->dma_dev.device_tx_status = dma_cookie_status;
+ qdev->dma_dev.directions = BIT(DMA_DEV_TO_MEM) | BIT(DMA_MEM_TO_DEV);
+
+ ret = dma_async_device_register(&qdev->dma_dev);
+ if (ret) {
+ qdma_err(qdev, "Failed to register AMD QDMA: %d", ret);
+ goto failed_disable_engine;
+ }
+
+ return 0;
+
+failed_disable_engine:
+ qdma_sgdma_control(qdev, 0);
+failed:
+ mutex_destroy(&qdev->ctxt_lock);
+ qdma_err(qdev, "Failed to probe AMD QDMA driver");
+ return ret;
+}
+
+static struct platform_driver amd_qdma_driver = {
+ .driver = {
+ .name = "amd-qdma",
+ },
+ .probe = amd_qdma_probe,
+ .remove_new = amd_qdma_remove,
+};
+
+module_platform_driver(amd_qdma_driver);
+
+MODULE_DESCRIPTION("AMD QDMA driver");
+MODULE_AUTHOR("XRT Team <runtimeca39d@amd.com>");
+MODULE_LICENSE("GPL");
diff --git a/drivers/dma/amd/qdma/qdma.h b/drivers/dma/amd/qdma/qdma.h
new file mode 100644
index 000000000000..94089f1f0c11
--- /dev/null
+++ b/drivers/dma/amd/qdma/qdma.h
@@ -0,0 +1,266 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+/*
+ * DMA header for AMD Queue-based DMA Subsystem
+ *
+ * Copyright (C) 2023-2024, Advanced Micro Devices, Inc.
+ */
+
+#ifndef __QDMA_H
+#define __QDMA_H
+
+#include <linux/bitfield.h>
+#include <linux/dmaengine.h>
+#include <linux/kernel.h>
+#include <linux/platform_device.h>
+#include <linux/regmap.h>
+
+#include "../../virt-dma.h"
+
+#define DISABLE 0
+#define ENABLE 1
+
+#define QDMA_MIN_IRQ 3
+#define QDMA_INTR_NAME_MAX_LEN 30
+#define QDMA_INTR_PREFIX "amd-qdma"
+
+#define QDMA_IDENTIFIER 0x1FD3
+#define QDMA_DEFAULT_RING_SIZE (BIT(10) + 1)
+#define QDMA_DEFAULT_RING_ID 0
+#define QDMA_POLL_INTRVL_US 10 /* 10us */
+#define QDMA_POLL_TIMEOUT_US (500 * 1000) /* 500ms */
+#define QDMA_DMAP_REG_STRIDE 16
+#define QDMA_CTXT_REGMAP_LEN 8 /* 8 regs */
+#define QDMA_MM_DESC_SIZE 32 /* Bytes */
+#define QDMA_MM_DESC_LEN_BITS 28
+#define QDMA_MM_DESC_MAX_LEN (BIT(QDMA_MM_DESC_LEN_BITS) - 1)
+#define QDMA_MIN_DMA_ALLOC_SIZE 4096
+#define QDMA_INTR_RING_SIZE BIT(13)
+#define QDMA_INTR_RING_IDX_MASK GENMASK(9, 0)
+#define QDMA_INTR_RING_BASE(_addr) ((_addr) >> 12)
+
+#define QDMA_IDENTIFIER_REGOFF 0x0
+#define QDMA_IDENTIFIER_MASK GENMASK(31, 16)
+#define QDMA_QUEUE_ARM_BIT BIT(16)
+
+#define qdma_err(qdev, fmt, args...) \
+ dev_err(&(qdev)->pdev->dev, fmt, ##args)
+
+#define qdma_dbg(qdev, fmt, args...) \
+ dev_dbg(&(qdev)->pdev->dev, fmt, ##args)
+
+#define qdma_info(qdev, fmt, args...) \
+ dev_info(&(qdev)->pdev->dev, fmt, ##args)
+
+enum qdma_reg_fields {
+ QDMA_REGF_IRQ_ENABLE,
+ QDMA_REGF_WBK_ENABLE,
+ QDMA_REGF_WBI_CHECK,
+ QDMA_REGF_IRQ_ARM,
+ QDMA_REGF_IRQ_VEC,
+ QDMA_REGF_IRQ_AGG,
+ QDMA_REGF_WBI_INTVL_ENABLE,
+ QDMA_REGF_MRKR_DISABLE,
+ QDMA_REGF_QUEUE_ENABLE,
+ QDMA_REGF_QUEUE_MODE,
+ QDMA_REGF_DESC_BASE,
+ QDMA_REGF_DESC_SIZE,
+ QDMA_REGF_RING_ID,
+ QDMA_REGF_CMD_INDX,
+ QDMA_REGF_CMD_CMD,
+ QDMA_REGF_CMD_TYPE,
+ QDMA_REGF_CMD_BUSY,
+ QDMA_REGF_QUEUE_COUNT,
+ QDMA_REGF_QUEUE_MAX,
+ QDMA_REGF_QUEUE_BASE,
+ QDMA_REGF_FUNCTION_ID,
+ QDMA_REGF_INTR_AGG_BASE,
+ QDMA_REGF_INTR_VECTOR,
+ QDMA_REGF_INTR_SIZE,
+ QDMA_REGF_INTR_VALID,
+ QDMA_REGF_INTR_COLOR,
+ QDMA_REGF_INTR_FUNCTION_ID,
+ QDMA_REGF_ERR_INT_FUNC,
+ QDMA_REGF_ERR_INT_VEC,
+ QDMA_REGF_ERR_INT_ARM,
+ QDMA_REGF_MAX
+};
+
+enum qdma_regs {
+ QDMA_REGO_CTXT_DATA,
+ QDMA_REGO_CTXT_CMD,
+ QDMA_REGO_CTXT_MASK,
+ QDMA_REGO_MM_H2C_CTRL,
+ QDMA_REGO_MM_C2H_CTRL,
+ QDMA_REGO_QUEUE_COUNT,
+ QDMA_REGO_RING_SIZE,
+ QDMA_REGO_H2C_PIDX,
+ QDMA_REGO_C2H_PIDX,
+ QDMA_REGO_INTR_CIDX,
+ QDMA_REGO_FUNC_ID,
+ QDMA_REGO_ERR_INT,
+ QDMA_REGO_ERR_STAT,
+ QDMA_REGO_MAX
+};
+
+struct qdma_reg_field {
+ u16 lsb; /* Least significant bit of field */
+ u16 msb; /* Most significant bit of field */
+};
+
+struct qdma_reg {
+ u32 off;
+ u32 count;
+};
+
+#define QDMA_REGF(_msb, _lsb) { \
+ .lsb = (_lsb), \
+ .msb = (_msb), \
+}
+
+#define QDMA_REGO(_off, _count) { \
+ .off = (_off), \
+ .count = (_count), \
+}
+
+enum qdma_desc_size {
+ QDMA_DESC_SIZE_8B,
+ QDMA_DESC_SIZE_16B,
+ QDMA_DESC_SIZE_32B,
+ QDMA_DESC_SIZE_64B,
+};
+
+enum qdma_queue_op_mode {
+ QDMA_QUEUE_OP_STREAM,
+ QDMA_QUEUE_OP_MM,
+};
+
+enum qdma_ctxt_type {
+ QDMA_CTXT_DESC_SW_C2H,
+ QDMA_CTXT_DESC_SW_H2C,
+ QDMA_CTXT_DESC_HW_C2H,
+ QDMA_CTXT_DESC_HW_H2C,
+ QDMA_CTXT_DESC_CR_C2H,
+ QDMA_CTXT_DESC_CR_H2C,
+ QDMA_CTXT_WRB,
+ QDMA_CTXT_PFTCH,
+ QDMA_CTXT_INTR_COAL,
+ QDMA_CTXT_RSVD,
+ QDMA_CTXT_HOST_PROFILE,
+ QDMA_CTXT_TIMER,
+ QDMA_CTXT_FMAP,
+ QDMA_CTXT_FNC_STS,
+};
+
+enum qdma_ctxt_cmd {
+ QDMA_CTXT_CLEAR,
+ QDMA_CTXT_WRITE,
+ QDMA_CTXT_READ,
+ QDMA_CTXT_INVALIDATE,
+ QDMA_CTXT_MAX
+};
+
+struct qdma_ctxt_sw_desc {
+ u64 desc_base;
+ u16 vec;
+};
+
+struct qdma_ctxt_intr {
+ u64 agg_base;
+ u16 vec;
+ u32 size;
+ bool valid;
+ bool color;
+};
+
+struct qdma_ctxt_fmap {
+ u16 qbase;
+ u16 qmax;
+};
+
+struct qdma_device;
+
+struct qdma_mm_desc {
+ __le64 src_addr;
+ __le32 len;
+ __le32 reserved1;
+ __le64 dst_addr;
+ __le64 reserved2;
+} __packed;
+
+struct qdma_mm_vdesc {
+ struct virt_dma_desc vdesc;
+ struct qdma_queue *queue;
+ struct scatterlist *sgl;
+ u64 sg_off;
+ u32 sg_len;
+ u64 dev_addr;
+ u32 pidx;
+ u32 pending_descs;
+ struct dma_slave_config cfg;
+};
+
+#define QDMA_VDESC_QUEUED(vdesc) (!(vdesc)->sg_len)
+
+struct qdma_queue {
+ struct qdma_device *qdev;
+ struct virt_dma_chan vchan;
+ enum dma_transfer_direction dir;
+ struct dma_slave_config cfg;
+ struct qdma_mm_desc *desc_base;
+ struct qdma_mm_vdesc *submitted_vdesc;
+ struct qdma_mm_vdesc *issued_vdesc;
+ dma_addr_t dma_desc_base;
+ u32 pidx_reg;
+ u32 cidx_reg;
+ u32 ring_size;
+ u32 idx_mask;
+ u16 qid;
+ u32 pidx;
+ u32 cidx;
+};
+
+struct qdma_intr_ring {
+ struct qdma_device *qdev;
+ __le64 *base;
+ dma_addr_t dev_base;
+ char msix_name[QDMA_INTR_NAME_MAX_LEN];
+ u32 msix_vector;
+ u16 msix_id;
+ u32 ring_size;
+ u16 ridx;
+ u16 cidx;
+ u8 color;
+};
+
+#define QDMA_INTR_MASK_PIDX GENMASK_ULL(15, 0)
+#define QDMA_INTR_MASK_CIDX GENMASK_ULL(31, 16)
+#define QDMA_INTR_MASK_DESC_COLOR GENMASK_ULL(32, 32)
+#define QDMA_INTR_MASK_STATE GENMASK_ULL(34, 33)
+#define QDMA_INTR_MASK_ERROR GENMASK_ULL(36, 35)
+#define QDMA_INTR_MASK_TYPE GENMASK_ULL(38, 38)
+#define QDMA_INTR_MASK_QID GENMASK_ULL(62, 39)
+#define QDMA_INTR_MASK_COLOR GENMASK_ULL(63, 63)
+
+struct qdma_device {
+ struct platform_device *pdev;
+ struct dma_device dma_dev;
+ struct regmap *regmap;
+ struct mutex ctxt_lock; /* protect ctxt registers */
+ const struct qdma_reg_field *rfields;
+ const struct qdma_reg *roffs;
+ struct qdma_queue *h2c_queues;
+ struct qdma_queue *c2h_queues;
+ struct qdma_intr_ring *qintr_rings;
+ u32 qintr_ring_num;
+ u32 qintr_ring_idx;
+ u32 chan_num;
+ u32 queue_irq_start;
+ u32 queue_irq_num;
+ u32 err_irq_idx;
+ u32 fid;
+};
+
+extern const struct qdma_reg qdma_regos_default[QDMA_REGO_MAX];
+extern const struct qdma_reg_field qdma_regfs_default[QDMA_REGF_MAX];
+
+#endif /* __QDMA_H */
diff --git a/drivers/dma/at_hdmac.c b/drivers/dma/at_hdmac.c
index 40052d1bd0b5..baebddc740b0 100644
--- a/drivers/dma/at_hdmac.c
+++ b/drivers/dma/at_hdmac.c
@@ -339,7 +339,7 @@ static inline u8 convert_buswidth(enum dma_slave_buswidth addr_width)
* @regs: memory mapped register base
* @clk: dma controller clock
* @save_imr: interrupt mask register that is saved on suspend/resume cycle
- * @all_chan_mask: all channels availlable in a mask
+ * @all_chan_mask: all channels available in a mask
* @lli_pool: hw lli table
* @memset_pool: hw memset pool
* @chan: channels table to store at_dma_chan structures
@@ -668,7 +668,7 @@ static inline u32 atc_calc_bytes_left(u32 current_len, u32 ctrla)
* CTRLA is read in turn, next the DSCR is read a second time. If the two
* consecutive read values of the DSCR are the same then we assume both refers
* to the very same LLI as well as the CTRLA value read inbetween does. For
- * cyclic tranfers, the assumption is that a full loop is "not so fast". If the
+ * cyclic transfers, the assumption is that a full loop is "not so fast". If the
* two DSCR values are different, we read again the CTRLA then the DSCR till two
* consecutive read values from DSCR are equal or till the maximum trials is
* reach. This algorithm is very unlikely not to find a stable value for DSCR.
@@ -700,7 +700,7 @@ static int atc_get_llis_residue(struct at_dma_chan *atchan,
break;
/*
- * DSCR has changed inside the DMA controller, so the previouly
+ * DSCR has changed inside the DMA controller, so the previously
* read value of CTRLA may refer to an already processed
* descriptor hence could be outdated. We need to update ctrla
* to match the current descriptor.
diff --git a/drivers/dma/bcm-sba-raid.c b/drivers/dma/bcm-sba-raid.c
index fbaacb4c19b2..cfa6e1167a1f 100644
--- a/drivers/dma/bcm-sba-raid.c
+++ b/drivers/dma/bcm-sba-raid.c
@@ -15,7 +15,7 @@
* number of hardware rings over one or more SBA hardware devices. By
* design, the internal buffer size of SBA hardware device is limited
* but all offload operations supported by SBA can be broken down into
- * multiple small size requests and executed parallely on multiple SBA
+ * multiple small size requests and executed parallelly on multiple SBA
* hardware devices for achieving high through-put.
*
* The Broadcom SBA RAID driver does not require any register programming
@@ -135,7 +135,7 @@ struct sba_device {
u32 max_xor_srcs;
u32 max_resp_pool_size;
u32 max_cmds_pool_size;
- /* Maibox client and Mailbox channels */
+ /* Mailbox client and Mailbox channels */
struct mbox_client client;
struct mbox_chan *mchan;
struct device *mbox_dev;
diff --git a/drivers/dma/bcm2835-dma.c b/drivers/dma/bcm2835-dma.c
index 9d74fe97452e..e1b92b4d7b05 100644
--- a/drivers/dma/bcm2835-dma.c
+++ b/drivers/dma/bcm2835-dma.c
@@ -369,7 +369,7 @@ static struct bcm2835_desc *bcm2835_dma_create_cb_chain(
/* the last frame requires extra flags */
d->cb_list[d->frames - 1].cb->info |= finalextrainfo;
- /* detect a size missmatch */
+ /* detect a size mismatch */
if (buf_len && (d->size != buf_len))
goto error_cb;
diff --git a/drivers/dma/dmaengine.c b/drivers/dma/dmaengine.c
index c380a4dda77a..c1357d7f3dc6 100644
--- a/drivers/dma/dmaengine.c
+++ b/drivers/dma/dmaengine.c
@@ -1070,7 +1070,7 @@ static int __dma_async_device_channel_register(struct dma_device *device,
if (!name)
dev_set_name(&chan->dev->device, "dma%dchan%d", device->dev_id, chan->chan_id);
else
- dev_set_name(&chan->dev->device, name);
+ dev_set_name(&chan->dev->device, "%s", name);
rc = device_register(&chan->dev->device);
if (rc)
goto err_out_ida;
diff --git a/drivers/dma/dmatest.c b/drivers/dma/dmatest.c
index 1f201a542b37..91b2fbc0b864 100644
--- a/drivers/dma/dmatest.c
+++ b/drivers/dma/dmatest.c
@@ -500,7 +500,7 @@ static unsigned long long dmatest_persec(s64 runtime, unsigned int val)
per_sec *= val;
per_sec = INT_TO_FIXPT(per_sec);
- do_div(per_sec, runtime);
+ do_div(per_sec, (u32)runtime);
return per_sec;
}
diff --git a/drivers/dma/ep93xx_dma.c b/drivers/dma/ep93xx_dma.c
index d6c60635e90d..995427afe077 100644
--- a/drivers/dma/ep93xx_dma.c
+++ b/drivers/dma/ep93xx_dma.c
@@ -17,14 +17,15 @@
#include <linux/clk.h>
#include <linux/init.h>
#include <linux/interrupt.h>
+#include <linux/dma-mapping.h>
#include <linux/dmaengine.h>
#include <linux/module.h>
#include <linux/mod_devicetable.h>
+#include <linux/of_dma.h>
+#include <linux/overflow.h>
#include <linux/platform_device.h>
#include <linux/slab.h>
-#include <linux/platform_data/dma-ep93xx.h>
-
#include "dmaengine.h"
/* M2P registers */
@@ -104,6 +105,31 @@
#define DMA_MAX_CHAN_BYTES 0xffff
#define DMA_MAX_CHAN_DESCRIPTORS 32
+/*
+ * M2P channels.
+ *
+ * Note that these values are also directly used for setting the PPALLOC
+ * register.
+ */
+#define EP93XX_DMA_I2S1 0
+#define EP93XX_DMA_I2S2 1
+#define EP93XX_DMA_AAC1 2
+#define EP93XX_DMA_AAC2 3
+#define EP93XX_DMA_AAC3 4
+#define EP93XX_DMA_I2S3 5
+#define EP93XX_DMA_UART1 6
+#define EP93XX_DMA_UART2 7
+#define EP93XX_DMA_UART3 8
+#define EP93XX_DMA_IRDA 9
+/* M2M channels */
+#define EP93XX_DMA_SSP 10
+#define EP93XX_DMA_IDE 11
+
+enum ep93xx_dma_type {
+ M2P_DMA,
+ M2M_DMA,
+};
+
struct ep93xx_dma_engine;
static int ep93xx_dma_slave_config_write(struct dma_chan *chan,
enum dma_transfer_direction dir,
@@ -129,11 +155,17 @@ struct ep93xx_dma_desc {
struct list_head node;
};
+struct ep93xx_dma_chan_cfg {
+ u8 port;
+ enum dma_transfer_direction dir;
+};
+
/**
* struct ep93xx_dma_chan - an EP93xx DMA M2P/M2M channel
* @chan: dmaengine API channel
* @edma: pointer to the engine device
* @regs: memory mapped registers
+ * @dma_cfg: channel number, direction
* @irq: interrupt number of the channel
* @clk: clock used by this channel
* @tasklet: channel specific tasklet used for callbacks
@@ -157,14 +189,12 @@ struct ep93xx_dma_desc {
* descriptor in the chain. When a descriptor is moved to the @active queue,
* the first and chained descriptors are flattened into a single list.
*
- * @chan.private holds pointer to &struct ep93xx_dma_data which contains
- * necessary channel configuration information. For memcpy channels this must
- * be %NULL.
*/
struct ep93xx_dma_chan {
struct dma_chan chan;
const struct ep93xx_dma_engine *edma;
void __iomem *regs;
+ struct ep93xx_dma_chan_cfg dma_cfg;
int irq;
struct clk *clk;
struct tasklet_struct tasklet;
@@ -216,6 +246,11 @@ struct ep93xx_dma_engine {
struct ep93xx_dma_chan channels[] __counted_by(num_channels);
};
+struct ep93xx_edma_data {
+ u32 id;
+ size_t num_channels;
+};
+
static inline struct device *chan2dev(struct ep93xx_dma_chan *edmac)
{
return &edmac->chan.dev->device;
@@ -226,6 +261,31 @@ static struct ep93xx_dma_chan *to_ep93xx_dma_chan(struct dma_chan *chan)
return container_of(chan, struct ep93xx_dma_chan, chan);
}
+static inline bool ep93xx_dma_chan_is_m2p(struct dma_chan *chan)
+{
+ if (device_is_compatible(chan->device->dev, "cirrus,ep9301-dma-m2p"))
+ return true;
+
+ return !strcmp(dev_name(chan->device->dev), "ep93xx-dma-m2p");
+}
+
+/*
+ * ep93xx_dma_chan_direction - returns direction the channel can be used
+ *
+ * This function can be used in filter functions to find out whether the
+ * channel supports given DMA direction. Only M2P channels have such
+ * limitation, for M2M channels the direction is configurable.
+ */
+static inline enum dma_transfer_direction
+ep93xx_dma_chan_direction(struct dma_chan *chan)
+{
+ if (!ep93xx_dma_chan_is_m2p(chan))
+ return DMA_TRANS_NONE;
+
+ /* even channels are for TX, odd for RX */
+ return (chan->chan_id % 2 == 0) ? DMA_MEM_TO_DEV : DMA_DEV_TO_MEM;
+}
+
/**
* ep93xx_dma_set_active - set new active descriptor chain
* @edmac: channel
@@ -318,10 +378,9 @@ static void m2p_set_control(struct ep93xx_dma_chan *edmac, u32 control)
static int m2p_hw_setup(struct ep93xx_dma_chan *edmac)
{
- struct ep93xx_dma_data *data = edmac->chan.private;
u32 control;
- writel(data->port & 0xf, edmac->regs + M2P_PPALLOC);
+ writel(edmac->dma_cfg.port & 0xf, edmac->regs + M2P_PPALLOC);
control = M2P_CONTROL_CH_ERROR_INT | M2P_CONTROL_ICE
| M2P_CONTROL_ENABLE;
@@ -458,16 +517,15 @@ static int m2p_hw_interrupt(struct ep93xx_dma_chan *edmac)
static int m2m_hw_setup(struct ep93xx_dma_chan *edmac)
{
- const struct ep93xx_dma_data *data = edmac->chan.private;
u32 control = 0;
- if (!data) {
+ if (edmac->dma_cfg.dir == DMA_MEM_TO_MEM) {
/* This is memcpy channel, nothing to configure */
writel(control, edmac->regs + M2M_CONTROL);
return 0;
}
- switch (data->port) {
+ switch (edmac->dma_cfg.port) {
case EP93XX_DMA_SSP:
/*
* This was found via experimenting - anything less than 5
@@ -477,7 +535,7 @@ static int m2m_hw_setup(struct ep93xx_dma_chan *edmac)
control = (5 << M2M_CONTROL_PWSC_SHIFT);
control |= M2M_CONTROL_NO_HDSK;
- if (data->direction == DMA_MEM_TO_DEV) {
+ if (edmac->dma_cfg.dir == DMA_MEM_TO_DEV) {
control |= M2M_CONTROL_DAH;
control |= M2M_CONTROL_TM_TX;
control |= M2M_CONTROL_RSS_SSPTX;
@@ -493,7 +551,7 @@ static int m2m_hw_setup(struct ep93xx_dma_chan *edmac)
* This IDE part is totally untested. Values below are taken
* from the EP93xx Users's Guide and might not be correct.
*/
- if (data->direction == DMA_MEM_TO_DEV) {
+ if (edmac->dma_cfg.dir == DMA_MEM_TO_DEV) {
/* Worst case from the UG */
control = (3 << M2M_CONTROL_PWSC_SHIFT);
control |= M2M_CONTROL_DAH;
@@ -548,7 +606,6 @@ static void m2m_fill_desc(struct ep93xx_dma_chan *edmac)
static void m2m_hw_submit(struct ep93xx_dma_chan *edmac)
{
- struct ep93xx_dma_data *data = edmac->chan.private;
u32 control = readl(edmac->regs + M2M_CONTROL);
/*
@@ -574,7 +631,7 @@ static void m2m_hw_submit(struct ep93xx_dma_chan *edmac)
control |= M2M_CONTROL_ENABLE;
writel(control, edmac->regs + M2M_CONTROL);
- if (!data) {
+ if (edmac->dma_cfg.dir == DMA_MEM_TO_MEM) {
/*
* For memcpy channels the software trigger must be asserted
* in order to start the memcpy operation.
@@ -636,7 +693,7 @@ static int m2m_hw_interrupt(struct ep93xx_dma_chan *edmac)
*/
if (ep93xx_dma_advance_active(edmac)) {
m2m_fill_desc(edmac);
- if (done && !edmac->chan.private) {
+ if (done && edmac->dma_cfg.dir == DMA_MEM_TO_MEM) {
/* Software trigger for memcpy channel */
control = readl(edmac->regs + M2M_CONTROL);
control |= M2M_CONTROL_START;
@@ -841,7 +898,7 @@ static dma_cookie_t ep93xx_dma_tx_submit(struct dma_async_tx_descriptor *tx)
desc = container_of(tx, struct ep93xx_dma_desc, txd);
/*
- * If nothing is currently prosessed, we push this descriptor
+ * If nothing is currently processed, we push this descriptor
* directly to the hardware. Otherwise we put the descriptor
* to the pending queue.
*/
@@ -867,25 +924,22 @@ static dma_cookie_t ep93xx_dma_tx_submit(struct dma_async_tx_descriptor *tx)
static int ep93xx_dma_alloc_chan_resources(struct dma_chan *chan)
{
struct ep93xx_dma_chan *edmac = to_ep93xx_dma_chan(chan);
- struct ep93xx_dma_data *data = chan->private;
const char *name = dma_chan_name(chan);
int ret, i;
/* Sanity check the channel parameters */
if (!edmac->edma->m2m) {
- if (!data)
- return -EINVAL;
- if (data->port < EP93XX_DMA_I2S1 ||
- data->port > EP93XX_DMA_IRDA)
+ if (edmac->dma_cfg.port < EP93XX_DMA_I2S1 ||
+ edmac->dma_cfg.port > EP93XX_DMA_IRDA)
return -EINVAL;
- if (data->direction != ep93xx_dma_chan_direction(chan))
+ if (edmac->dma_cfg.dir != ep93xx_dma_chan_direction(chan))
return -EINVAL;
} else {
- if (data) {
- switch (data->port) {
+ if (edmac->dma_cfg.dir != DMA_MEM_TO_MEM) {
+ switch (edmac->dma_cfg.port) {
case EP93XX_DMA_SSP:
case EP93XX_DMA_IDE:
- if (!is_slave_direction(data->direction))
+ if (!is_slave_direction(edmac->dma_cfg.dir))
return -EINVAL;
break;
default:
@@ -894,9 +948,6 @@ static int ep93xx_dma_alloc_chan_resources(struct dma_chan *chan)
}
}
- if (data && data->name)
- name = data->name;
-
ret = clk_prepare_enable(edmac->clk);
if (ret)
return ret;
@@ -1025,7 +1076,7 @@ fail:
* @chan: channel
* @sgl: list of buffers to transfer
* @sg_len: number of entries in @sgl
- * @dir: direction of tha DMA transfer
+ * @dir: direction of the DMA transfer
* @flags: flags for the descriptor
* @context: operation context (ignored)
*
@@ -1315,36 +1366,53 @@ static void ep93xx_dma_issue_pending(struct dma_chan *chan)
ep93xx_dma_advance_work(to_ep93xx_dma_chan(chan));
}
-static int __init ep93xx_dma_probe(struct platform_device *pdev)
+static struct ep93xx_dma_engine *ep93xx_dma_of_probe(struct platform_device *pdev)
{
- struct ep93xx_dma_platform_data *pdata = dev_get_platdata(&pdev->dev);
+ const struct ep93xx_edma_data *data;
+ struct device *dev = &pdev->dev;
struct ep93xx_dma_engine *edma;
struct dma_device *dma_dev;
- int ret, i;
+ char dma_clk_name[5];
+ int i;
- edma = kzalloc(struct_size(edma, channels, pdata->num_channels), GFP_KERNEL);
+ data = device_get_match_data(dev);
+ if (!data)
+ return ERR_PTR(dev_err_probe(dev, -ENODEV, "No device match found\n"));
+
+ edma = devm_kzalloc(dev, struct_size(edma, channels, data->num_channels),
+ GFP_KERNEL);
if (!edma)
- return -ENOMEM;
+ return ERR_PTR(-ENOMEM);
+ edma->m2m = data->id;
+ edma->num_channels = data->num_channels;
dma_dev = &edma->dma_dev;
- edma->m2m = platform_get_device_id(pdev)->driver_data;
- edma->num_channels = pdata->num_channels;
INIT_LIST_HEAD(&dma_dev->channels);
- for (i = 0; i < pdata->num_channels; i++) {
- const struct ep93xx_dma_chan_data *cdata = &pdata->channels[i];
+ for (i = 0; i < edma->num_channels; i++) {
struct ep93xx_dma_chan *edmac = &edma->channels[i];
edmac->chan.device = dma_dev;
- edmac->regs = cdata->base;
- edmac->irq = cdata->irq;
+ edmac->regs = devm_platform_ioremap_resource(pdev, i);
+ if (IS_ERR(edmac->regs))
+ return edmac->regs;
+
+ edmac->irq = fwnode_irq_get(dev_fwnode(dev), i);
+ if (edmac->irq < 0)
+ return ERR_PTR(edmac->irq);
+
edmac->edma = edma;
- edmac->clk = clk_get(NULL, cdata->name);
+ if (edma->m2m)
+ snprintf(dma_clk_name, sizeof(dma_clk_name), "m2m%u", i);
+ else
+ snprintf(dma_clk_name, sizeof(dma_clk_name), "m2p%u", i);
+
+ edmac->clk = devm_clk_get(dev, dma_clk_name);
if (IS_ERR(edmac->clk)) {
- dev_warn(&pdev->dev, "failed to get clock for %s\n",
- cdata->name);
- continue;
+ dev_err_probe(dev, PTR_ERR(edmac->clk),
+ "no %s clock found\n", dma_clk_name);
+ return ERR_CAST(edmac->clk);
}
spin_lock_init(&edmac->lock);
@@ -1357,6 +1425,90 @@ static int __init ep93xx_dma_probe(struct platform_device *pdev)
&dma_dev->channels);
}
+ return edma;
+}
+
+static bool ep93xx_m2p_dma_filter(struct dma_chan *chan, void *filter_param)
+{
+ struct ep93xx_dma_chan *echan = to_ep93xx_dma_chan(chan);
+ struct ep93xx_dma_chan_cfg *cfg = filter_param;
+
+ if (cfg->dir != ep93xx_dma_chan_direction(chan))
+ return false;
+
+ echan->dma_cfg = *cfg;
+ return true;
+}
+
+static struct dma_chan *ep93xx_m2p_dma_of_xlate(struct of_phandle_args *dma_spec,
+ struct of_dma *ofdma)
+{
+ struct ep93xx_dma_engine *edma = ofdma->of_dma_data;
+ dma_cap_mask_t mask = edma->dma_dev.cap_mask;
+ struct ep93xx_dma_chan_cfg dma_cfg;
+ u8 port = dma_spec->args[0];
+ u8 direction = dma_spec->args[1];
+
+ if (port > EP93XX_DMA_IRDA)
+ return NULL;
+
+ if (!is_slave_direction(direction))
+ return NULL;
+
+ dma_cfg.port = port;
+ dma_cfg.dir = direction;
+
+ return __dma_request_channel(&mask, ep93xx_m2p_dma_filter, &dma_cfg, ofdma->of_node);
+}
+
+static bool ep93xx_m2m_dma_filter(struct dma_chan *chan, void *filter_param)
+{
+ struct ep93xx_dma_chan *echan = to_ep93xx_dma_chan(chan);
+ struct ep93xx_dma_chan_cfg *cfg = filter_param;
+
+ echan->dma_cfg = *cfg;
+
+ return true;
+}
+
+static struct dma_chan *ep93xx_m2m_dma_of_xlate(struct of_phandle_args *dma_spec,
+ struct of_dma *ofdma)
+{
+ struct ep93xx_dma_engine *edma = ofdma->of_dma_data;
+ dma_cap_mask_t mask = edma->dma_dev.cap_mask;
+ struct ep93xx_dma_chan_cfg dma_cfg;
+ u8 port = dma_spec->args[0];
+ u8 direction = dma_spec->args[1];
+
+ if (!is_slave_direction(direction))
+ return NULL;
+
+ switch (port) {
+ case EP93XX_DMA_SSP:
+ case EP93XX_DMA_IDE:
+ break;
+ default:
+ return NULL;
+ }
+
+ dma_cfg.port = port;
+ dma_cfg.dir = direction;
+
+ return __dma_request_channel(&mask, ep93xx_m2m_dma_filter, &dma_cfg, ofdma->of_node);
+}
+
+static int ep93xx_dma_probe(struct platform_device *pdev)
+{
+ struct ep93xx_dma_engine *edma;
+ struct dma_device *dma_dev;
+ int ret;
+
+ edma = ep93xx_dma_of_probe(pdev);
+ if (IS_ERR(edma))
+ return PTR_ERR(edma);
+
+ dma_dev = &edma->dma_dev;
+
dma_cap_zero(dma_dev->cap_mask);
dma_cap_set(DMA_SLAVE, dma_dev->cap_mask);
dma_cap_set(DMA_CYCLIC, dma_dev->cap_mask);
@@ -1393,21 +1545,46 @@ static int __init ep93xx_dma_probe(struct platform_device *pdev)
}
ret = dma_async_device_register(dma_dev);
- if (unlikely(ret)) {
- for (i = 0; i < edma->num_channels; i++) {
- struct ep93xx_dma_chan *edmac = &edma->channels[i];
- if (!IS_ERR_OR_NULL(edmac->clk))
- clk_put(edmac->clk);
- }
- kfree(edma);
+ if (ret)
+ return ret;
+
+ if (edma->m2m) {
+ ret = of_dma_controller_register(pdev->dev.of_node, ep93xx_m2m_dma_of_xlate,
+ edma);
} else {
- dev_info(dma_dev->dev, "EP93xx M2%s DMA ready\n",
- edma->m2m ? "M" : "P");
+ ret = of_dma_controller_register(pdev->dev.of_node, ep93xx_m2p_dma_of_xlate,
+ edma);
}
+ if (ret)
+ goto err_dma_unregister;
+
+ dev_info(dma_dev->dev, "EP93xx M2%s DMA ready\n", edma->m2m ? "M" : "P");
+
+ return 0;
+
+err_dma_unregister:
+ dma_async_device_unregister(dma_dev);
return ret;
}
+static const struct ep93xx_edma_data edma_m2p = {
+ .id = M2P_DMA,
+ .num_channels = 10,
+};
+
+static const struct ep93xx_edma_data edma_m2m = {
+ .id = M2M_DMA,
+ .num_channels = 2,
+};
+
+static const struct of_device_id ep93xx_dma_of_ids[] = {
+ { .compatible = "cirrus,ep9301-dma-m2p", .data = &edma_m2p },
+ { .compatible = "cirrus,ep9301-dma-m2m", .data = &edma_m2m },
+ { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(of, ep93xx_dma_of_ids);
+
static const struct platform_device_id ep93xx_dma_driver_ids[] = {
{ "ep93xx-dma-m2p", 0 },
{ "ep93xx-dma-m2m", 1 },
@@ -1417,15 +1594,13 @@ static const struct platform_device_id ep93xx_dma_driver_ids[] = {
static struct platform_driver ep93xx_dma_driver = {
.driver = {
.name = "ep93xx-dma",
+ .of_match_table = ep93xx_dma_of_ids,
},
.id_table = ep93xx_dma_driver_ids,
+ .probe = ep93xx_dma_probe,
};
-static int __init ep93xx_dma_module_init(void)
-{
- return platform_driver_probe(&ep93xx_dma_driver, ep93xx_dma_probe);
-}
-subsys_initcall(ep93xx_dma_module_init);
+module_platform_driver(ep93xx_dma_driver);
MODULE_AUTHOR("Mika Westerberg <mika.westerberg@iki.fi>");
MODULE_DESCRIPTION("EP93xx DMA driver");
diff --git a/drivers/dma/fsl-dpaa2-qdma/dpaa2-qdma.h b/drivers/dma/fsl-dpaa2-qdma/dpaa2-qdma.h
index 2c80077cb7c0..36c284a3d184 100644
--- a/drivers/dma/fsl-dpaa2-qdma/dpaa2-qdma.h
+++ b/drivers/dma/fsl-dpaa2-qdma/dpaa2-qdma.h
@@ -12,8 +12,8 @@ struct dpaa2_qdma_sd_d {
u32 rsv:32;
union {
struct {
- u32 ssd:12; /* souce stride distance */
- u32 sss:12; /* souce stride size */
+ u32 ssd:12; /* source stride distance */
+ u32 sss:12; /* source stride size */
u32 rsv1:8;
} sdf;
struct {
@@ -48,7 +48,7 @@ struct dpaa2_qdma_sd_d {
#define QDMA_SER_DISABLE (8) /* no notification */
#define QDMA_SER_CTX BIT(8) /* notification by FQD_CTX[fqid] */
#define QDMA_SER_DEST (2 << 8) /* notification by destination desc */
-#define QDMA_SER_BOTH (3 << 8) /* soruce and dest notification */
+#define QDMA_SER_BOTH (3 << 8) /* source and dest notification */
#define QDMA_FD_SPF_ENALBE BIT(30) /* source prefetch enable */
#define QMAN_FD_VA_ENABLE BIT(14) /* Address used is virtual address */
diff --git a/drivers/dma/fsl-edma-main.c b/drivers/dma/fsl-edma-main.c
index c66185c5a199..f9f1eda79254 100644
--- a/drivers/dma/fsl-edma-main.c
+++ b/drivers/dma/fsl-edma-main.c
@@ -100,6 +100,22 @@ static irqreturn_t fsl_edma_irq_handler(int irq, void *dev_id)
return fsl_edma_err_handler(irq, dev_id);
}
+static bool fsl_edma_srcid_in_use(struct fsl_edma_engine *fsl_edma, u32 srcid)
+{
+ struct fsl_edma_chan *fsl_chan;
+ int i;
+
+ for (i = 0; i < fsl_edma->n_chans; i++) {
+ fsl_chan = &fsl_edma->chans[i];
+
+ if (fsl_chan->srcid && srcid == fsl_chan->srcid) {
+ dev_err(&fsl_chan->pdev->dev, "The srcid is in use, can't use!");
+ return true;
+ }
+ }
+ return false;
+}
+
static struct dma_chan *fsl_edma_xlate(struct of_phandle_args *dma_spec,
struct of_dma *ofdma)
{
@@ -117,6 +133,10 @@ static struct dma_chan *fsl_edma_xlate(struct of_phandle_args *dma_spec,
list_for_each_entry_safe(chan, _chan, &fsl_edma->dma_dev.channels, device_node) {
if (chan->client_count)
continue;
+
+ if (fsl_edma_srcid_in_use(fsl_edma, dma_spec->args[1]))
+ return NULL;
+
if ((chan->chan_id / chans_per_mux) == dma_spec->args[0]) {
chan = dma_get_slave_channel(chan);
if (chan) {
@@ -153,7 +173,7 @@ static struct dma_chan *fsl_edma3_xlate(struct of_phandle_args *dma_spec,
b_chmux = !!(fsl_edma->drvdata->flags & FSL_EDMA_DRV_HAS_CHMUX);
- mutex_lock(&fsl_edma->fsl_edma_mutex);
+ guard(mutex)(&fsl_edma->fsl_edma_mutex);
list_for_each_entry_safe(chan, _chan, &fsl_edma->dma_dev.channels,
device_node) {
@@ -161,6 +181,8 @@ static struct dma_chan *fsl_edma3_xlate(struct of_phandle_args *dma_spec,
continue;
fsl_chan = to_fsl_edma_chan(chan);
+ if (fsl_edma_srcid_in_use(fsl_edma, dma_spec->args[0]))
+ return NULL;
i = fsl_chan - fsl_edma->chans;
fsl_chan->priority = dma_spec->args[1];
@@ -177,18 +199,15 @@ static struct dma_chan *fsl_edma3_xlate(struct of_phandle_args *dma_spec,
if (!b_chmux && i == dma_spec->args[0]) {
chan = dma_get_slave_channel(chan);
chan->device->privatecnt++;
- mutex_unlock(&fsl_edma->fsl_edma_mutex);
return chan;
} else if (b_chmux && !fsl_chan->srcid) {
/* if controller support channel mux, choose a free channel */
chan = dma_get_slave_channel(chan);
chan->device->privatecnt++;
fsl_chan->srcid = dma_spec->args[0];
- mutex_unlock(&fsl_edma->fsl_edma_mutex);
return chan;
}
}
- mutex_unlock(&fsl_edma->fsl_edma_mutex);
return NULL;
}
diff --git a/drivers/dma/hisi_dma.c b/drivers/dma/hisi_dma.c
index 4c47bff81064..25a4134be36b 100644
--- a/drivers/dma/hisi_dma.c
+++ b/drivers/dma/hisi_dma.c
@@ -677,7 +677,7 @@ static void hisi_dma_init_hw_qp(struct hisi_dma_dev *hdma_dev, u32 index)
writel_relaxed(tmp, addr);
/*
- * 0 - dma should process FLR whith CPU.
+ * 0 - dma should process FLR with CPU.
* 1 - dma not process FLR, only cpu process FLR.
*/
addr = q_base + HISI_DMA_HIP09_DMA_FLR_DISABLE +
diff --git a/drivers/dma/idma64.c b/drivers/dma/idma64.c
index e3505e56784b..3c648308a54a 100644
--- a/drivers/dma/idma64.c
+++ b/drivers/dma/idma64.c
@@ -290,7 +290,7 @@ static void idma64_desc_fill(struct idma64_chan *idma64c,
desc->length += hw->len;
} while (i);
- /* Trigger an interrupt after the last block is transfered */
+ /* Trigger an interrupt after the last block is transferred */
lli->ctllo |= IDMA64C_CTLL_INT_EN;
/* Disable LLP transfer in the last block */
@@ -364,7 +364,7 @@ static size_t idma64_active_desc_size(struct idma64_chan *idma64c)
if (!i)
return bytes;
- /* The current chunk is not fully transfered yet */
+ /* The current chunk is not fully transferred yet */
bytes += desc->hw[--i].len;
return bytes - IDMA64C_CTLH_BLOCK_TS(ctlhi);
@@ -598,9 +598,7 @@ static int idma64_probe(struct idma64_chip *chip)
idma64->dma.dev = chip->sysdev;
- ret = dma_set_max_seg_size(idma64->dma.dev, IDMA64C_CTLH_BLOCK_TS_MASK);
- if (ret)
- return ret;
+ dma_set_max_seg_size(idma64->dma.dev, IDMA64C_CTLH_BLOCK_TS_MASK);
ret = dma_async_device_register(&idma64->dma);
if (ret)
diff --git a/drivers/dma/idxd/init.c b/drivers/dma/idxd/init.c
index 5725ea82c409..234c1c658ec7 100644
--- a/drivers/dma/idxd/init.c
+++ b/drivers/dma/idxd/init.c
@@ -69,9 +69,15 @@ static struct idxd_driver_data idxd_driver_data[] = {
static struct pci_device_id idxd_pci_tbl[] = {
/* DSA ver 1.0 platforms */
{ PCI_DEVICE_DATA(INTEL, DSA_SPR0, &idxd_driver_data[IDXD_TYPE_DSA]) },
+ /* DSA on GNR-D platforms */
+ { PCI_DEVICE_DATA(INTEL, DSA_GNRD, &idxd_driver_data[IDXD_TYPE_DSA]) },
+ /* DSA on DMR platforms */
+ { PCI_DEVICE_DATA(INTEL, DSA_DMR, &idxd_driver_data[IDXD_TYPE_DSA]) },
/* IAX ver 1.0 platforms */
{ PCI_DEVICE_DATA(INTEL, IAX_SPR0, &idxd_driver_data[IDXD_TYPE_IAX]) },
+ /* IAA on DMR platforms */
+ { PCI_DEVICE_DATA(INTEL, IAA_DMR, &idxd_driver_data[IDXD_TYPE_IAX]) },
{ 0, }
};
MODULE_DEVICE_TABLE(pci, idxd_pci_tbl);
diff --git a/drivers/dma/idxd/perfmon.c b/drivers/dma/idxd/perfmon.c
index f511cf15845b..4b6af2f15d8a 100644
--- a/drivers/dma/idxd/perfmon.c
+++ b/drivers/dma/idxd/perfmon.c
@@ -449,8 +449,8 @@ static void idxd_pmu_init(struct idxd_pmu *idxd_pmu)
idxd_pmu->pmu.attr_groups = perfmon_attr_groups;
idxd_pmu->pmu.task_ctx_nr = perf_invalid_context;
idxd_pmu->pmu.event_init = perfmon_pmu_event_init;
- idxd_pmu->pmu.pmu_enable = perfmon_pmu_enable,
- idxd_pmu->pmu.pmu_disable = perfmon_pmu_disable,
+ idxd_pmu->pmu.pmu_enable = perfmon_pmu_enable;
+ idxd_pmu->pmu.pmu_disable = perfmon_pmu_disable;
idxd_pmu->pmu.add = perfmon_pmu_event_add;
idxd_pmu->pmu.del = perfmon_pmu_event_del;
idxd_pmu->pmu.start = perfmon_pmu_event_start;
diff --git a/drivers/dma/idxd/submit.c b/drivers/dma/idxd/submit.c
index 817a564413b0..94eca25ae9b9 100644
--- a/drivers/dma/idxd/submit.c
+++ b/drivers/dma/idxd/submit.c
@@ -134,7 +134,7 @@ static void llist_abort_desc(struct idxd_wq *wq, struct idxd_irq_entry *ie,
* completing the descriptor will return desc to allocator and
* the desc can be acquired by a different process and the
* desc->list can be modified. Delete desc from list so the
- * list trasversing does not get corrupted by the other process.
+ * list traversing does not get corrupted by the other process.
*/
list_for_each_entry_safe(d, t, &flist, list) {
list_del_init(&d->list);
diff --git a/drivers/dma/imx-dma.c b/drivers/dma/imx-dma.c
index ebf7c115d553..e913f0db99da 100644
--- a/drivers/dma/imx-dma.c
+++ b/drivers/dma/imx-dma.c
@@ -167,7 +167,6 @@ struct imxdma_channel {
enum imx_dma_type {
IMX1_DMA,
- IMX21_DMA,
IMX27_DMA,
};
@@ -195,8 +194,6 @@ static const struct of_device_id imx_dma_of_dev_id[] = {
{
.compatible = "fsl,imx1-dma", .data = (const void *)IMX1_DMA,
}, {
- .compatible = "fsl,imx21-dma", .data = (const void *)IMX21_DMA,
- }, {
.compatible = "fsl,imx27-dma", .data = (const void *)IMX27_DMA,
}, {
/* sentinel */
diff --git a/drivers/dma/ioat/init.c b/drivers/dma/ioat/init.c
index 7b502b60b38b..cc9ddd6c325b 100644
--- a/drivers/dma/ioat/init.c
+++ b/drivers/dma/ioat/init.c
@@ -905,7 +905,7 @@ static int ioat_xor_val_self_test(struct ioatdma_device *ioat_dma)
op = IOAT_OP_XOR_VAL;
- /* validate the sources with the destintation page */
+ /* validate the sources with the destination page */
for (i = 0; i < IOAT_NUM_SRC_TEST; i++)
xor_val_srcs[i] = xor_srcs[i];
xor_val_srcs[i] = dest;
diff --git a/drivers/dma/lgm/lgm-dma.c b/drivers/dma/lgm/lgm-dma.c
index 4117c7b67e9c..8173c3f1075a 100644
--- a/drivers/dma/lgm/lgm-dma.c
+++ b/drivers/dma/lgm/lgm-dma.c
@@ -107,7 +107,7 @@
* If header mode is set in DMA descriptor,
* If bit 30 is disabled, HDR_LEN must be configured according to channel
* requirement.
- * If bit 30 is enabled(checksum with heade mode), HDR_LEN has no need to
+ * If bit 30 is enabled(checksum with header mode), HDR_LEN has no need to
* be configured. It will enable check sum for switch
* If header mode is not set in DMA descriptor,
* This register setting doesn't matter
diff --git a/drivers/dma/loongson1-apb-dma.c b/drivers/dma/loongson1-apb-dma.c
new file mode 100644
index 000000000000..255fe7eca212
--- /dev/null
+++ b/drivers/dma/loongson1-apb-dma.c
@@ -0,0 +1,660 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * Driver for Loongson-1 APB DMA Controller
+ *
+ * Copyright (C) 2015-2024 Keguang Zhang <keguang.zhang@gmail.com>
+ */
+
+#include <linux/dmapool.h>
+#include <linux/dma-mapping.h>
+#include <linux/init.h>
+#include <linux/interrupt.h>
+#include <linux/iopoll.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_dma.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+
+#include "dmaengine.h"
+#include "virt-dma.h"
+
+/* Loongson-1 DMA Control Register */
+#define LS1X_DMA_CTRL 0x0
+
+/* DMA Control Register Bits */
+#define LS1X_DMA_STOP BIT(4)
+#define LS1X_DMA_START BIT(3)
+#define LS1X_DMA_ASK_VALID BIT(2)
+
+/* DMA Next Field Bits */
+#define LS1X_DMA_NEXT_VALID BIT(0)
+
+/* DMA Command Field Bits */
+#define LS1X_DMA_RAM2DEV BIT(12)
+#define LS1X_DMA_INT BIT(1)
+#define LS1X_DMA_INT_MASK BIT(0)
+
+#define LS1X_DMA_LLI_ALIGNMENT 64
+#define LS1X_DMA_LLI_ADDR_MASK GENMASK(31, __ffs(LS1X_DMA_LLI_ALIGNMENT))
+#define LS1X_DMA_MAX_CHANNELS 3
+
+enum ls1x_dmadesc_offsets {
+ LS1X_DMADESC_NEXT = 0,
+ LS1X_DMADESC_SADDR,
+ LS1X_DMADESC_DADDR,
+ LS1X_DMADESC_LENGTH,
+ LS1X_DMADESC_STRIDE,
+ LS1X_DMADESC_CYCLES,
+ LS1X_DMADESC_CMD,
+ LS1X_DMADESC_SIZE
+};
+
+struct ls1x_dma_lli {
+ unsigned int hw[LS1X_DMADESC_SIZE];
+ dma_addr_t phys;
+ struct list_head node;
+} __aligned(LS1X_DMA_LLI_ALIGNMENT);
+
+struct ls1x_dma_desc {
+ struct virt_dma_desc vd;
+ struct list_head lli_list;
+};
+
+struct ls1x_dma_chan {
+ struct virt_dma_chan vc;
+ struct dma_pool *lli_pool;
+ phys_addr_t src_addr;
+ phys_addr_t dst_addr;
+ enum dma_slave_buswidth src_addr_width;
+ enum dma_slave_buswidth dst_addr_width;
+ unsigned int bus_width;
+ void __iomem *reg_base;
+ int irq;
+ bool is_cyclic;
+ struct ls1x_dma_lli *curr_lli;
+};
+
+struct ls1x_dma {
+ struct dma_device ddev;
+ unsigned int nr_chans;
+ struct ls1x_dma_chan chan[];
+};
+
+static irqreturn_t ls1x_dma_irq_handler(int irq, void *data);
+
+#define to_ls1x_dma_chan(dchan) \
+ container_of(dchan, struct ls1x_dma_chan, vc.chan)
+
+#define to_ls1x_dma_desc(d) \
+ container_of(d, struct ls1x_dma_desc, vd)
+
+static inline struct device *chan2dev(struct dma_chan *chan)
+{
+ return &chan->dev->device;
+}
+
+static inline int ls1x_dma_query(struct ls1x_dma_chan *chan,
+ dma_addr_t *lli_phys)
+{
+ struct dma_chan *dchan = &chan->vc.chan;
+ int val, ret;
+
+ val = *lli_phys & LS1X_DMA_LLI_ADDR_MASK;
+ val |= LS1X_DMA_ASK_VALID;
+ val |= dchan->chan_id;
+ writel(val, chan->reg_base + LS1X_DMA_CTRL);
+ ret = readl_poll_timeout_atomic(chan->reg_base + LS1X_DMA_CTRL, val,
+ !(val & LS1X_DMA_ASK_VALID), 0, 3000);
+ if (ret)
+ dev_err(chan2dev(dchan), "failed to query DMA\n");
+
+ return ret;
+}
+
+static inline int ls1x_dma_start(struct ls1x_dma_chan *chan,
+ dma_addr_t *lli_phys)
+{
+ struct dma_chan *dchan = &chan->vc.chan;
+ struct device *dev = chan2dev(dchan);
+ int val, ret;
+
+ val = *lli_phys & LS1X_DMA_LLI_ADDR_MASK;
+ val |= LS1X_DMA_START;
+ val |= dchan->chan_id;
+ writel(val, chan->reg_base + LS1X_DMA_CTRL);
+ ret = readl_poll_timeout(chan->reg_base + LS1X_DMA_CTRL, val,
+ !(val & LS1X_DMA_START), 0, 1000);
+ if (!ret)
+ dev_dbg(dev, "start DMA with lli_phys=%pad\n", lli_phys);
+ else
+ dev_err(dev, "failed to start DMA\n");
+
+ return ret;
+}
+
+static inline void ls1x_dma_stop(struct ls1x_dma_chan *chan)
+{
+ int val = readl(chan->reg_base + LS1X_DMA_CTRL);
+
+ writel(val | LS1X_DMA_STOP, chan->reg_base + LS1X_DMA_CTRL);
+}
+
+static void ls1x_dma_free_chan_resources(struct dma_chan *dchan)
+{
+ struct ls1x_dma_chan *chan = to_ls1x_dma_chan(dchan);
+ struct device *dev = chan2dev(dchan);
+
+ dma_free_coherent(dev, sizeof(struct ls1x_dma_lli),
+ chan->curr_lli, chan->curr_lli->phys);
+ dma_pool_destroy(chan->lli_pool);
+ chan->lli_pool = NULL;
+ devm_free_irq(dev, chan->irq, chan);
+ vchan_free_chan_resources(&chan->vc);
+}
+
+static int ls1x_dma_alloc_chan_resources(struct dma_chan *dchan)
+{
+ struct ls1x_dma_chan *chan = to_ls1x_dma_chan(dchan);
+ struct device *dev = chan2dev(dchan);
+ dma_addr_t phys;
+ int ret;
+
+ ret = devm_request_irq(dev, chan->irq, ls1x_dma_irq_handler,
+ IRQF_SHARED, dma_chan_name(dchan), chan);
+ if (ret) {
+ dev_err(dev, "failed to request IRQ %d\n", chan->irq);
+ return ret;
+ }
+
+ chan->lli_pool = dma_pool_create(dma_chan_name(dchan), dev,
+ sizeof(struct ls1x_dma_lli),
+ __alignof__(struct ls1x_dma_lli), 0);
+ if (!chan->lli_pool)
+ return -ENOMEM;
+
+ /* allocate memory for querying the current lli */
+ dma_set_coherent_mask(dev, DMA_BIT_MASK(32));
+ chan->curr_lli = dma_alloc_coherent(dev, sizeof(struct ls1x_dma_lli),
+ &phys, GFP_KERNEL);
+ if (!chan->curr_lli) {
+ dma_pool_destroy(chan->lli_pool);
+ return -ENOMEM;
+ }
+ chan->curr_lli->phys = phys;
+
+ return 0;
+}
+
+static void ls1x_dma_free_desc(struct virt_dma_desc *vd)
+{
+ struct ls1x_dma_desc *desc = to_ls1x_dma_desc(vd);
+ struct ls1x_dma_chan *chan = to_ls1x_dma_chan(vd->tx.chan);
+ struct ls1x_dma_lli *lli, *_lli;
+
+ list_for_each_entry_safe(lli, _lli, &desc->lli_list, node) {
+ list_del(&lli->node);
+ dma_pool_free(chan->lli_pool, lli, lli->phys);
+ }
+
+ kfree(desc);
+}
+
+static struct ls1x_dma_desc *ls1x_dma_alloc_desc(void)
+{
+ struct ls1x_dma_desc *desc;
+
+ desc = kzalloc(sizeof(*desc), GFP_NOWAIT);
+ if (!desc)
+ return NULL;
+
+ INIT_LIST_HEAD(&desc->lli_list);
+
+ return desc;
+}
+
+static int ls1x_dma_prep_lli(struct dma_chan *dchan, struct ls1x_dma_desc *desc,
+ struct scatterlist *sgl, unsigned int sg_len,
+ enum dma_transfer_direction dir, bool is_cyclic)
+{
+ struct ls1x_dma_chan *chan = to_ls1x_dma_chan(dchan);
+ struct ls1x_dma_lli *lli, *prev = NULL, *first = NULL;
+ struct device *dev = chan2dev(dchan);
+ struct list_head *pos = NULL;
+ struct scatterlist *sg;
+ unsigned int dev_addr, cmd, i;
+
+ switch (dir) {
+ case DMA_MEM_TO_DEV:
+ dev_addr = chan->dst_addr;
+ chan->bus_width = chan->dst_addr_width;
+ cmd = LS1X_DMA_RAM2DEV | LS1X_DMA_INT;
+ break;
+ case DMA_DEV_TO_MEM:
+ dev_addr = chan->src_addr;
+ chan->bus_width = chan->src_addr_width;
+ cmd = LS1X_DMA_INT;
+ break;
+ default:
+ dev_err(dev, "unsupported DMA direction: %s\n",
+ dmaengine_get_direction_text(dir));
+ return -EINVAL;
+ }
+
+ for_each_sg(sgl, sg, sg_len, i) {
+ dma_addr_t buf_addr = sg_dma_address(sg);
+ size_t buf_len = sg_dma_len(sg);
+ dma_addr_t phys;
+
+ if (!is_dma_copy_aligned(dchan->device, buf_addr, 0, buf_len)) {
+ dev_err(dev, "buffer is not aligned\n");
+ return -EINVAL;
+ }
+
+ /* allocate HW descriptors */
+ lli = dma_pool_zalloc(chan->lli_pool, GFP_NOWAIT, &phys);
+ if (!lli) {
+ dev_err(dev, "failed to alloc lli %u\n", i);
+ return -ENOMEM;
+ }
+
+ /* setup HW descriptors */
+ lli->phys = phys;
+ lli->hw[LS1X_DMADESC_SADDR] = buf_addr;
+ lli->hw[LS1X_DMADESC_DADDR] = dev_addr;
+ lli->hw[LS1X_DMADESC_LENGTH] = buf_len / chan->bus_width;
+ lli->hw[LS1X_DMADESC_STRIDE] = 0;
+ lli->hw[LS1X_DMADESC_CYCLES] = 1;
+ lli->hw[LS1X_DMADESC_CMD] = cmd;
+
+ if (prev)
+ prev->hw[LS1X_DMADESC_NEXT] =
+ lli->phys | LS1X_DMA_NEXT_VALID;
+ prev = lli;
+
+ if (!first)
+ first = lli;
+
+ list_add_tail(&lli->node, &desc->lli_list);
+ }
+
+ if (is_cyclic) {
+ lli->hw[LS1X_DMADESC_NEXT] = first->phys | LS1X_DMA_NEXT_VALID;
+ chan->is_cyclic = is_cyclic;
+ }
+
+ list_for_each(pos, &desc->lli_list) {
+ lli = list_entry(pos, struct ls1x_dma_lli, node);
+ print_hex_dump_debug("LLI: ", DUMP_PREFIX_OFFSET, 16, 4,
+ lli, sizeof(*lli), false);
+ }
+
+ return 0;
+}
+
+static struct dma_async_tx_descriptor *
+ls1x_dma_prep_slave_sg(struct dma_chan *dchan, struct scatterlist *sgl,
+ unsigned int sg_len, enum dma_transfer_direction dir,
+ unsigned long flags, void *context)
+{
+ struct ls1x_dma_desc *desc;
+
+ dev_dbg(chan2dev(dchan), "sg_len=%u flags=0x%lx dir=%s\n",
+ sg_len, flags, dmaengine_get_direction_text(dir));
+
+ desc = ls1x_dma_alloc_desc();
+ if (!desc)
+ return NULL;
+
+ if (ls1x_dma_prep_lli(dchan, desc, sgl, sg_len, dir, false)) {
+ ls1x_dma_free_desc(&desc->vd);
+ return NULL;
+ }
+
+ return vchan_tx_prep(to_virt_chan(dchan), &desc->vd, flags);
+}
+
+static struct dma_async_tx_descriptor *
+ls1x_dma_prep_dma_cyclic(struct dma_chan *dchan, dma_addr_t buf_addr,
+ size_t buf_len, size_t period_len,
+ enum dma_transfer_direction dir, unsigned long flags)
+{
+ struct ls1x_dma_desc *desc;
+ struct scatterlist *sgl;
+ unsigned int sg_len;
+ unsigned int i;
+ int ret;
+
+ dev_dbg(chan2dev(dchan),
+ "buf_len=%zu period_len=%zu flags=0x%lx dir=%s\n",
+ buf_len, period_len, flags, dmaengine_get_direction_text(dir));
+
+ desc = ls1x_dma_alloc_desc();
+ if (!desc)
+ return NULL;
+
+ /* allocate the scatterlist */
+ sg_len = buf_len / period_len;
+ sgl = kmalloc_array(sg_len, sizeof(*sgl), GFP_NOWAIT);
+ if (!sgl)
+ return NULL;
+
+ sg_init_table(sgl, sg_len);
+ for (i = 0; i < sg_len; ++i) {
+ sg_set_page(&sgl[i], pfn_to_page(PFN_DOWN(buf_addr)),
+ period_len, offset_in_page(buf_addr));
+ sg_dma_address(&sgl[i]) = buf_addr;
+ sg_dma_len(&sgl[i]) = period_len;
+ buf_addr += period_len;
+ }
+
+ ret = ls1x_dma_prep_lli(dchan, desc, sgl, sg_len, dir, true);
+ kfree(sgl);
+ if (ret) {
+ ls1x_dma_free_desc(&desc->vd);
+ return NULL;
+ }
+
+ return vchan_tx_prep(to_virt_chan(dchan), &desc->vd, flags);
+}
+
+static int ls1x_dma_slave_config(struct dma_chan *dchan,
+ struct dma_slave_config *config)
+{
+ struct ls1x_dma_chan *chan = to_ls1x_dma_chan(dchan);
+
+ chan->src_addr = config->src_addr;
+ chan->src_addr_width = config->src_addr_width;
+ chan->dst_addr = config->dst_addr;
+ chan->dst_addr_width = config->dst_addr_width;
+
+ return 0;
+}
+
+static int ls1x_dma_pause(struct dma_chan *dchan)
+{
+ struct ls1x_dma_chan *chan = to_ls1x_dma_chan(dchan);
+ int ret;
+
+ guard(spinlock_irqsave)(&chan->vc.lock);
+ /* save the current lli */
+ ret = ls1x_dma_query(chan, &chan->curr_lli->phys);
+ if (!ret)
+ ls1x_dma_stop(chan);
+
+ return ret;
+}
+
+static int ls1x_dma_resume(struct dma_chan *dchan)
+{
+ struct ls1x_dma_chan *chan = to_ls1x_dma_chan(dchan);
+
+ guard(spinlock_irqsave)(&chan->vc.lock);
+
+ return ls1x_dma_start(chan, &chan->curr_lli->phys);
+}
+
+static int ls1x_dma_terminate_all(struct dma_chan *dchan)
+{
+ struct ls1x_dma_chan *chan = to_ls1x_dma_chan(dchan);
+ struct virt_dma_desc *vd;
+ LIST_HEAD(head);
+
+ ls1x_dma_stop(chan);
+
+ scoped_guard(spinlock_irqsave, &chan->vc.lock) {
+ vd = vchan_next_desc(&chan->vc);
+ if (vd)
+ vchan_terminate_vdesc(vd);
+
+ vchan_get_all_descriptors(&chan->vc, &head);
+ }
+
+ vchan_dma_desc_free_list(&chan->vc, &head);
+
+ return 0;
+}
+
+static void ls1x_dma_synchronize(struct dma_chan *dchan)
+{
+ vchan_synchronize(to_virt_chan(dchan));
+}
+
+static enum dma_status ls1x_dma_tx_status(struct dma_chan *dchan,
+ dma_cookie_t cookie,
+ struct dma_tx_state *state)
+{
+ struct ls1x_dma_chan *chan = to_ls1x_dma_chan(dchan);
+ struct virt_dma_desc *vd;
+ enum dma_status status;
+ size_t bytes = 0;
+
+ status = dma_cookie_status(dchan, cookie, state);
+ if (status == DMA_COMPLETE)
+ return status;
+
+ scoped_guard(spinlock_irqsave, &chan->vc.lock) {
+ vd = vchan_find_desc(&chan->vc, cookie);
+ if (vd) {
+ struct ls1x_dma_desc *desc = to_ls1x_dma_desc(vd);
+ struct ls1x_dma_lli *lli;
+ dma_addr_t next_phys;
+
+ /* get the current lli */
+ if (ls1x_dma_query(chan, &chan->curr_lli->phys))
+ return status;
+
+ /* locate the current lli */
+ next_phys = chan->curr_lli->hw[LS1X_DMADESC_NEXT];
+ list_for_each_entry(lli, &desc->lli_list, node)
+ if (lli->hw[LS1X_DMADESC_NEXT] == next_phys)
+ break;
+
+ dev_dbg(chan2dev(dchan), "current lli_phys=%pad",
+ &lli->phys);
+
+ /* count the residues */
+ list_for_each_entry_from(lli, &desc->lli_list, node)
+ bytes += lli->hw[LS1X_DMADESC_LENGTH] *
+ chan->bus_width;
+ }
+ }
+
+ dma_set_residue(state, bytes);
+
+ return status;
+}
+
+static void ls1x_dma_issue_pending(struct dma_chan *dchan)
+{
+ struct ls1x_dma_chan *chan = to_ls1x_dma_chan(dchan);
+
+ guard(spinlock_irqsave)(&chan->vc.lock);
+
+ if (vchan_issue_pending(&chan->vc)) {
+ struct virt_dma_desc *vd = vchan_next_desc(&chan->vc);
+
+ if (vd) {
+ struct ls1x_dma_desc *desc = to_ls1x_dma_desc(vd);
+ struct ls1x_dma_lli *lli;
+
+ lli = list_first_entry(&desc->lli_list,
+ struct ls1x_dma_lli, node);
+ ls1x_dma_start(chan, &lli->phys);
+ }
+ }
+}
+
+static irqreturn_t ls1x_dma_irq_handler(int irq, void *data)
+{
+ struct ls1x_dma_chan *chan = data;
+ struct dma_chan *dchan = &chan->vc.chan;
+ struct device *dev = chan2dev(dchan);
+ struct virt_dma_desc *vd;
+
+ scoped_guard(spinlock, &chan->vc.lock) {
+ vd = vchan_next_desc(&chan->vc);
+ if (!vd) {
+ dev_warn(dev,
+ "IRQ %d with no active desc on channel %d\n",
+ irq, dchan->chan_id);
+ return IRQ_NONE;
+ }
+
+ if (chan->is_cyclic) {
+ vchan_cyclic_callback(vd);
+ } else {
+ list_del(&vd->node);
+ vchan_cookie_complete(vd);
+ }
+ }
+
+ dev_dbg(dev, "DMA IRQ %d on channel %d\n", irq, dchan->chan_id);
+
+ return IRQ_HANDLED;
+}
+
+static int ls1x_dma_chan_probe(struct platform_device *pdev,
+ struct ls1x_dma *dma)
+{
+ void __iomem *reg_base;
+ int id;
+
+ reg_base = devm_platform_ioremap_resource(pdev, 0);
+ if (IS_ERR(reg_base))
+ return PTR_ERR(reg_base);
+
+ for (id = 0; id < dma->nr_chans; id++) {
+ struct ls1x_dma_chan *chan = &dma->chan[id];
+ char pdev_irqname[16];
+
+ snprintf(pdev_irqname, sizeof(pdev_irqname), "ch%d", id);
+ chan->irq = platform_get_irq_byname(pdev, pdev_irqname);
+ if (chan->irq < 0)
+ return dev_err_probe(&pdev->dev, chan->irq,
+ "failed to get IRQ for ch%d\n",
+ id);
+
+ chan->reg_base = reg_base;
+ chan->vc.desc_free = ls1x_dma_free_desc;
+ vchan_init(&chan->vc, &dma->ddev);
+ }
+
+ return 0;
+}
+
+static void ls1x_dma_chan_remove(struct ls1x_dma *dma)
+{
+ int id;
+
+ for (id = 0; id < dma->nr_chans; id++) {
+ struct ls1x_dma_chan *chan = &dma->chan[id];
+
+ if (chan->vc.chan.device == &dma->ddev) {
+ list_del(&chan->vc.chan.device_node);
+ tasklet_kill(&chan->vc.task);
+ }
+ }
+}
+
+static int ls1x_dma_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct dma_device *ddev;
+ struct ls1x_dma *dma;
+ int ret;
+
+ ret = platform_irq_count(pdev);
+ if (ret <= 0 || ret > LS1X_DMA_MAX_CHANNELS)
+ return dev_err_probe(dev, -EINVAL,
+ "Invalid number of IRQ channels: %d\n",
+ ret);
+
+ dma = devm_kzalloc(dev, struct_size(dma, chan, ret), GFP_KERNEL);
+ if (!dma)
+ return -ENOMEM;
+ dma->nr_chans = ret;
+
+ /* initialize DMA device */
+ ddev = &dma->ddev;
+ ddev->dev = dev;
+ ddev->copy_align = DMAENGINE_ALIGN_4_BYTES;
+ ddev->src_addr_widths = BIT(DMA_SLAVE_BUSWIDTH_1_BYTE) |
+ BIT(DMA_SLAVE_BUSWIDTH_2_BYTES) |
+ BIT(DMA_SLAVE_BUSWIDTH_4_BYTES);
+ ddev->dst_addr_widths = BIT(DMA_SLAVE_BUSWIDTH_1_BYTE) |
+ BIT(DMA_SLAVE_BUSWIDTH_2_BYTES) |
+ BIT(DMA_SLAVE_BUSWIDTH_4_BYTES);
+ ddev->directions = BIT(DMA_DEV_TO_MEM) | BIT(DMA_MEM_TO_DEV);
+ ddev->residue_granularity = DMA_RESIDUE_GRANULARITY_SEGMENT;
+ ddev->device_alloc_chan_resources = ls1x_dma_alloc_chan_resources;
+ ddev->device_free_chan_resources = ls1x_dma_free_chan_resources;
+ ddev->device_prep_slave_sg = ls1x_dma_prep_slave_sg;
+ ddev->device_prep_dma_cyclic = ls1x_dma_prep_dma_cyclic;
+ ddev->device_config = ls1x_dma_slave_config;
+ ddev->device_pause = ls1x_dma_pause;
+ ddev->device_resume = ls1x_dma_resume;
+ ddev->device_terminate_all = ls1x_dma_terminate_all;
+ ddev->device_synchronize = ls1x_dma_synchronize;
+ ddev->device_tx_status = ls1x_dma_tx_status;
+ ddev->device_issue_pending = ls1x_dma_issue_pending;
+ dma_cap_set(DMA_SLAVE, ddev->cap_mask);
+ INIT_LIST_HEAD(&ddev->channels);
+
+ /* initialize DMA channels */
+ ret = ls1x_dma_chan_probe(pdev, dma);
+ if (ret)
+ goto err;
+
+ ret = dmaenginem_async_device_register(ddev);
+ if (ret) {
+ dev_err(dev, "failed to register DMA device\n");
+ goto err;
+ }
+
+ ret = of_dma_controller_register(dev->of_node, of_dma_xlate_by_chan_id,
+ ddev);
+ if (ret) {
+ dev_err(dev, "failed to register DMA controller\n");
+ goto err;
+ }
+
+ platform_set_drvdata(pdev, dma);
+ dev_info(dev, "Loongson1 DMA driver registered\n");
+
+ return 0;
+
+err:
+ ls1x_dma_chan_remove(dma);
+
+ return ret;
+}
+
+static void ls1x_dma_remove(struct platform_device *pdev)
+{
+ struct ls1x_dma *dma = platform_get_drvdata(pdev);
+
+ of_dma_controller_free(pdev->dev.of_node);
+ ls1x_dma_chan_remove(dma);
+}
+
+static const struct of_device_id ls1x_dma_match[] = {
+ { .compatible = "loongson,ls1b-apbdma" },
+ { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(of, ls1x_dma_match);
+
+static struct platform_driver ls1x_dma_driver = {
+ .probe = ls1x_dma_probe,
+ .remove = ls1x_dma_remove,
+ .driver = {
+ .name = KBUILD_MODNAME,
+ .of_match_table = ls1x_dma_match,
+ },
+};
+
+module_platform_driver(ls1x_dma_driver);
+
+MODULE_AUTHOR("Keguang Zhang <keguang.zhang@gmail.com>");
+MODULE_DESCRIPTION("Loongson-1 APB DMA Controller driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/dma/lpc32xx-dmamux.c b/drivers/dma/lpc32xx-dmamux.c
new file mode 100644
index 000000000000..351d7e23e615
--- /dev/null
+++ b/drivers/dma/lpc32xx-dmamux.c
@@ -0,0 +1,195 @@
+// SPDX-License-Identifier: GPL-2.0-only
+//
+// Copyright 2024 Timesys Corporation <piotr.wojtaszczyk@timesys.com>
+//
+// Based on TI DMA Crossbar driver by:
+// Copyright (C) 2015 Texas Instruments Incorporated - http://www.ti.com
+// Author: Peter Ujfalusi <peter.ujfalusi@ti.com>
+
+#include <linux/err.h>
+#include <linux/init.h>
+#include <linux/mfd/syscon.h>
+#include <linux/of.h>
+#include <linux/of_dma.h>
+#include <linux/of_platform.h>
+#include <linux/platform_device.h>
+#include <linux/regmap.h>
+#include <linux/spinlock.h>
+
+#define LPC32XX_SSP_CLK_CTRL 0x78
+#define LPC32XX_I2S_CLK_CTRL 0x7c
+
+struct lpc32xx_dmamux {
+ int signal;
+ char *name_sel0;
+ char *name_sel1;
+ int muxval;
+ int muxreg;
+ int bit;
+ bool busy;
+};
+
+struct lpc32xx_dmamux_data {
+ struct dma_router dmarouter;
+ struct regmap *reg;
+ spinlock_t lock; /* protects busy status flag */
+};
+
+/* From LPC32x0 User manual "3.2.1 DMA request signals" */
+static struct lpc32xx_dmamux lpc32xx_muxes[] = {
+ {
+ .signal = 3,
+ .name_sel0 = "spi2-rx-tx",
+ .name_sel1 = "ssp1-rx",
+ .muxreg = LPC32XX_SSP_CLK_CTRL,
+ .bit = 5,
+ },
+ {
+ .signal = 10,
+ .name_sel0 = "uart7-rx",
+ .name_sel1 = "i2s1-dma1",
+ .muxreg = LPC32XX_I2S_CLK_CTRL,
+ .bit = 4,
+ },
+ {
+ .signal = 11,
+ .name_sel0 = "spi1-rx-tx",
+ .name_sel1 = "ssp1-tx",
+ .muxreg = LPC32XX_SSP_CLK_CTRL,
+ .bit = 4,
+ },
+ {
+ .signal = 14,
+ .name_sel0 = "none",
+ .name_sel1 = "ssp0-rx",
+ .muxreg = LPC32XX_SSP_CLK_CTRL,
+ .bit = 3,
+ },
+ {
+ .signal = 15,
+ .name_sel0 = "none",
+ .name_sel1 = "ssp0-tx",
+ .muxreg = LPC32XX_SSP_CLK_CTRL,
+ .bit = 2,
+ },
+};
+
+static void lpc32xx_dmamux_release(struct device *dev, void *route_data)
+{
+ struct lpc32xx_dmamux_data *dmamux = dev_get_drvdata(dev);
+ struct lpc32xx_dmamux *mux = route_data;
+
+ dev_dbg(dev, "releasing dma request signal %d routed to %s\n",
+ mux->signal, mux->muxval ? mux->name_sel1 : mux->name_sel1);
+
+ guard(spinlock)(&dmamux->lock);
+
+ mux->busy = false;
+}
+
+static void *lpc32xx_dmamux_reserve(struct of_phandle_args *dma_spec,
+ struct of_dma *ofdma)
+{
+ struct platform_device *pdev = of_find_device_by_node(ofdma->of_node);
+ struct device *dev = &pdev->dev;
+ struct lpc32xx_dmamux_data *dmamux = platform_get_drvdata(pdev);
+ unsigned long flags;
+ struct lpc32xx_dmamux *mux = NULL;
+ int i;
+
+ if (dma_spec->args_count != 3) {
+ dev_err(&pdev->dev, "invalid number of dma mux args\n");
+ return ERR_PTR(-EINVAL);
+ }
+
+ for (i = 0; i < ARRAY_SIZE(lpc32xx_muxes); i++) {
+ if (lpc32xx_muxes[i].signal == dma_spec->args[0]) {
+ mux = &lpc32xx_muxes[i];
+ break;
+ }
+ }
+ if (!mux) {
+ dev_err(&pdev->dev, "invalid mux request number: %d\n",
+ dma_spec->args[0]);
+ return ERR_PTR(-EINVAL);
+ }
+
+ if (dma_spec->args[2] > 1) {
+ dev_err(&pdev->dev, "invalid dma mux value: %d\n",
+ dma_spec->args[1]);
+ return ERR_PTR(-EINVAL);
+ }
+
+ /* The of_node_put() will be done in the core for the node */
+ dma_spec->np = of_parse_phandle(ofdma->of_node, "dma-masters", 0);
+ if (!dma_spec->np) {
+ dev_err(&pdev->dev, "can't get dma master\n");
+ return ERR_PTR(-EINVAL);
+ }
+
+ spin_lock_irqsave(&dmamux->lock, flags);
+ if (mux->busy) {
+ spin_unlock_irqrestore(&dmamux->lock, flags);
+ dev_err(dev, "dma request signal %d busy, routed to %s\n",
+ mux->signal, mux->muxval ? mux->name_sel1 : mux->name_sel1);
+ of_node_put(dma_spec->np);
+ return ERR_PTR(-EBUSY);
+ }
+
+ mux->busy = true;
+ mux->muxval = dma_spec->args[2] ? BIT(mux->bit) : 0;
+
+ regmap_update_bits(dmamux->reg, mux->muxreg, BIT(mux->bit), mux->muxval);
+ spin_unlock_irqrestore(&dmamux->lock, flags);
+
+ dma_spec->args[2] = 0;
+ dma_spec->args_count = 2;
+
+ dev_dbg(dev, "dma request signal %d routed to %s\n",
+ mux->signal, mux->muxval ? mux->name_sel1 : mux->name_sel1);
+
+ return mux;
+}
+
+static int lpc32xx_dmamux_probe(struct platform_device *pdev)
+{
+ struct device_node *np = pdev->dev.of_node;
+ struct lpc32xx_dmamux_data *dmamux;
+
+ dmamux = devm_kzalloc(&pdev->dev, sizeof(*dmamux), GFP_KERNEL);
+ if (!dmamux)
+ return -ENOMEM;
+
+ dmamux->reg = syscon_node_to_regmap(np->parent);
+ if (IS_ERR(dmamux->reg)) {
+ dev_err(&pdev->dev, "syscon lookup failed\n");
+ return PTR_ERR(dmamux->reg);
+ }
+
+ spin_lock_init(&dmamux->lock);
+ platform_set_drvdata(pdev, dmamux);
+ dmamux->dmarouter.dev = &pdev->dev;
+ dmamux->dmarouter.route_free = lpc32xx_dmamux_release;
+
+ return of_dma_router_register(np, lpc32xx_dmamux_reserve,
+ &dmamux->dmarouter);
+}
+
+static const struct of_device_id lpc32xx_dmamux_match[] = {
+ { .compatible = "nxp,lpc3220-dmamux" },
+ {},
+};
+
+static struct platform_driver lpc32xx_dmamux_driver = {
+ .probe = lpc32xx_dmamux_probe,
+ .driver = {
+ .name = "lpc32xx-dmamux",
+ .of_match_table = lpc32xx_dmamux_match,
+ },
+};
+
+static int __init lpc32xx_dmamux_init(void)
+{
+ return platform_driver_register(&lpc32xx_dmamux_driver);
+}
+arch_initcall(lpc32xx_dmamux_init);
diff --git a/drivers/dma/ls2x-apb-dma.c b/drivers/dma/ls2x-apb-dma.c
index a49913f3ed3f..9652e8666722 100644
--- a/drivers/dma/ls2x-apb-dma.c
+++ b/drivers/dma/ls2x-apb-dma.c
@@ -33,11 +33,11 @@
#define LDMA_STOP BIT(4) /* DMA stop operation */
#define LDMA_CONFIG_MASK GENMASK(4, 0) /* DMA controller config bits mask */
-/* Bitfields in ndesc_addr field of HW decriptor */
+/* Bitfields in ndesc_addr field of HW descriptor */
#define LDMA_DESC_EN BIT(0) /*1: The next descriptor is valid */
#define LDMA_DESC_ADDR_LOW GENMASK(31, 1)
-/* Bitfields in cmd field of HW decriptor */
+/* Bitfields in cmd field of HW descriptor */
#define LDMA_INT BIT(1) /* Enable DMA interrupts */
#define LDMA_DATA_DIRECTION BIT(12) /* 1: write to device, 0: read from device */
diff --git a/drivers/dma/mediatek/mtk-cqdma.c b/drivers/dma/mediatek/mtk-cqdma.c
index 529100c5b9f5..b69eabf12a24 100644
--- a/drivers/dma/mediatek/mtk-cqdma.c
+++ b/drivers/dma/mediatek/mtk-cqdma.c
@@ -518,7 +518,7 @@ mtk_cqdma_prep_dma_memcpy(struct dma_chan *c, dma_addr_t dest,
/* setup dma channel */
cvd[i]->ch = c;
- /* setup sourece, destination, and length */
+ /* setup source, destination, and length */
tlen = (len > MTK_CQDMA_MAX_LEN) ? MTK_CQDMA_MAX_LEN : len;
cvd[i]->len = tlen;
cvd[i]->src = src;
@@ -617,7 +617,7 @@ static int mtk_cqdma_alloc_chan_resources(struct dma_chan *c)
u32 i, min_refcnt = U32_MAX, refcnt;
unsigned long flags;
- /* allocate PC with the minimun refcount */
+ /* allocate PC with the minimum refcount */
for (i = 0; i < cqdma->dma_channels; ++i) {
refcnt = refcount_read(&cqdma->pc[i]->refcnt);
if (refcnt < min_refcnt) {
diff --git a/drivers/dma/mediatek/mtk-hsdma.c b/drivers/dma/mediatek/mtk-hsdma.c
index 36ff11e909ea..58c7961ab9ad 100644
--- a/drivers/dma/mediatek/mtk-hsdma.c
+++ b/drivers/dma/mediatek/mtk-hsdma.c
@@ -226,7 +226,7 @@ struct mtk_hsdma_soc {
* @pc_refcnt: Track how many VCs are using the PC
* @lock: Lock protect agaisting multiple VCs access PC
* @soc: The pointer to area holding differences among
- * vaious platform
+ * various platform
*/
struct mtk_hsdma_device {
struct dma_device ddev;
diff --git a/drivers/dma/mv_xor.c b/drivers/dma/mv_xor.c
index bcd3b623ac6c..43efce77bb57 100644
--- a/drivers/dma/mv_xor.c
+++ b/drivers/dma/mv_xor.c
@@ -414,7 +414,7 @@ mv_xor_tx_submit(struct dma_async_tx_descriptor *tx)
if (!mv_chan_is_busy(mv_chan)) {
u32 current_desc = mv_chan_get_current_desc(mv_chan);
/*
- * and the curren desc is the end of the chain before
+ * and the current desc is the end of the chain before
* the append, then we need to start the channel
*/
if (current_desc == old_chain_tail->async_tx.phys)
@@ -1074,7 +1074,7 @@ mv_xor_channel_add(struct mv_xor_device *xordev,
if (!mv_chan->dma_desc_pool_virt)
return ERR_PTR(-ENOMEM);
- /* discover transaction capabilites from the platform data */
+ /* discover transaction capabilities from the platform data */
dma_dev->cap_mask = cap_mask;
INIT_LIST_HEAD(&dma_dev->channels);
diff --git a/drivers/dma/mv_xor.h b/drivers/dma/mv_xor.h
index d86086b05b0e..c87cefd38a07 100644
--- a/drivers/dma/mv_xor.h
+++ b/drivers/dma/mv_xor.h
@@ -99,7 +99,7 @@ struct mv_xor_device {
* @common: common dmaengine channel object members
* @slots_allocated: records the actual size of the descriptor slot pool
* @irq_tasklet: bottom half where mv_xor_slot_cleanup runs
- * @op_in_desc: new mode of driver, each op is writen to descriptor.
+ * @op_in_desc: new mode of driver, each op is written to descriptor.
*/
struct mv_xor_chan {
int pending;
diff --git a/drivers/dma/mv_xor_v2.c b/drivers/dma/mv_xor_v2.c
index 97ebc791a30b..c8c67f4d982c 100644
--- a/drivers/dma/mv_xor_v2.c
+++ b/drivers/dma/mv_xor_v2.c
@@ -175,7 +175,7 @@ struct mv_xor_v2_device {
* struct mv_xor_v2_sw_desc - implements a xor SW descriptor
* @idx: descriptor index
* @async_tx: support for the async_tx api
- * @hw_desc: assosiated HW descriptor
+ * @hw_desc: associated HW descriptor
* @free_list: node of the free SW descriprots list
*/
struct mv_xor_v2_sw_desc {
diff --git a/drivers/dma/nbpfaxi.c b/drivers/dma/nbpfaxi.c
index c08916339aa7..3b011a91d48e 100644
--- a/drivers/dma/nbpfaxi.c
+++ b/drivers/dma/nbpfaxi.c
@@ -897,7 +897,7 @@ static int nbpf_config(struct dma_chan *dchan,
/*
* We could check config->slave_id to match chan->terminal here,
* but with DT they would be coming from the same source, so
- * such a check would be superflous
+ * such a check would be superfluous
*/
chan->slave_dst_addr = config->dst_addr;
diff --git a/drivers/dma/of-dma.c b/drivers/dma/of-dma.c
index e588fff9f21d..423442e55d36 100644
--- a/drivers/dma/of-dma.c
+++ b/drivers/dma/of-dma.c
@@ -26,7 +26,7 @@ static DEFINE_MUTEX(of_dma_lock);
*
* Finds a DMA controller with matching device node and number for dma cells
* in a list of registered DMA controllers. If a match is found a valid pointer
- * to the DMA data stored is retuned. A NULL pointer is returned if no match is
+ * to the DMA data stored is returned. A NULL pointer is returned if no match is
* found.
*/
static struct of_dma *of_dma_find_controller(const struct of_phandle_args *dma_spec)
@@ -342,7 +342,7 @@ EXPORT_SYMBOL_GPL(of_dma_simple_xlate);
*
* This function can be used as the of xlate callback for DMA driver which wants
* to match the channel based on the channel id. When using this xlate function
- * the #dma-cells propety of the DMA controller dt node needs to be set to 1.
+ * the #dma-cells property of the DMA controller dt node needs to be set to 1.
* The data parameter of of_dma_controller_register must be a pointer to the
* dma_device struct the function should match upon.
*
diff --git a/drivers/dma/owl-dma.c b/drivers/dma/owl-dma.c
index e001f4f7aa64..aa436f9e3571 100644
--- a/drivers/dma/owl-dma.c
+++ b/drivers/dma/owl-dma.c
@@ -1156,7 +1156,7 @@ static int owl_dma_probe(struct platform_device *pdev)
}
/*
- * Eventhough the DMA controller is capable of generating 4
+ * Even though the DMA controller is capable of generating 4
* IRQ's for DMA priority feature, we only use 1 IRQ for
* simplification.
*/
diff --git a/drivers/dma/pl330.c b/drivers/dma/pl330.c
index 60c4de8dac1d..82a9fe88ad54 100644
--- a/drivers/dma/pl330.c
+++ b/drivers/dma/pl330.c
@@ -3163,10 +3163,7 @@ pl330_probe(struct amba_device *adev, const struct amba_id *id)
* This is the limit for transfers with a buswidth of 1, larger
* buswidths will have larger limits.
*/
- ret = dma_set_max_seg_size(&adev->dev, 1900800);
- if (ret)
- dev_err(&adev->dev, "unable to set the seg size\n");
-
+ dma_set_max_seg_size(&adev->dev, 1900800);
init_pl330_debugfs(pl330);
dev_info(&adev->dev,
diff --git a/drivers/dma/ppc4xx/adma.c b/drivers/dma/ppc4xx/adma.c
index bbb60a970dab..7b78759ac734 100644
--- a/drivers/dma/ppc4xx/adma.c
+++ b/drivers/dma/ppc4xx/adma.c
@@ -9,7 +9,7 @@
*/
/*
- * This driver supports the asynchrounous DMA copy and RAID engines available
+ * This driver supports the asynchronous DMA copy and RAID engines available
* on the AMCC PPC440SPe Processors.
* Based on the Intel Xscale(R) family of I/O Processors (IOP 32x, 33x, 134x)
* ADMA driver written by D.Williams.
diff --git a/drivers/dma/ppc4xx/dma.h b/drivers/dma/ppc4xx/dma.h
index 1ff4be23db0f..b5725481bfa6 100644
--- a/drivers/dma/ppc4xx/dma.h
+++ b/drivers/dma/ppc4xx/dma.h
@@ -14,7 +14,7 @@
/* Number of elements in the array with statical CDBs */
#define MAX_STAT_DMA_CDBS 16
-/* Number of DMA engines available on the contoller */
+/* Number of DMA engines available on the controller */
#define DMA_ENGINES_NUM 2
/* Maximum h/w supported number of destinations */
diff --git a/drivers/dma/ptdma/ptdma.h b/drivers/dma/ptdma/ptdma.h
index 21b4bf895200..39bc37268235 100644
--- a/drivers/dma/ptdma/ptdma.h
+++ b/drivers/dma/ptdma/ptdma.h
@@ -192,7 +192,7 @@ struct pt_cmd_queue {
/* Queue dma pool */
struct dma_pool *dma_pool;
- /* Queue base address (not neccessarily aligned)*/
+ /* Queue base address (not necessarily aligned)*/
struct ptdma_desc *qbase;
/* Aligned queue start address (per requirement) */
diff --git a/drivers/dma/qcom/bam_dma.c b/drivers/dma/qcom/bam_dma.c
index 5e7d332731e0..d43a881e43b9 100644
--- a/drivers/dma/qcom/bam_dma.c
+++ b/drivers/dma/qcom/bam_dma.c
@@ -440,7 +440,7 @@ static void bam_reset(struct bam_device *bdev)
val |= BAM_EN;
writel_relaxed(val, bam_addr(bdev, 0, BAM_CTRL));
- /* set descriptor threshhold, start with 4 bytes */
+ /* set descriptor threshold, start with 4 bytes */
writel_relaxed(DEFAULT_CNT_THRSHLD,
bam_addr(bdev, 0, BAM_DESC_CNT_TRSHLD));
@@ -667,7 +667,7 @@ static struct dma_async_tx_descriptor *bam_prep_slave_sg(struct dma_chan *chan,
for_each_sg(sgl, sg, sg_len, i)
num_alloc += DIV_ROUND_UP(sg_dma_len(sg), BAM_FIFO_SIZE);
- /* allocate enough room to accomodate the number of entries */
+ /* allocate enough room to accommodate the number of entries */
async_desc = kzalloc(struct_size(async_desc, desc, num_alloc),
GFP_NOWAIT);
@@ -1325,11 +1325,7 @@ static int bam_dma_probe(struct platform_device *pdev)
/* set max dma segment size */
bdev->common.dev = bdev->dev;
- ret = dma_set_max_seg_size(bdev->common.dev, BAM_FIFO_SIZE);
- if (ret) {
- dev_err(bdev->dev, "cannot set maximum segment size\n");
- goto err_bam_channel_exit;
- }
+ dma_set_max_seg_size(bdev->common.dev, BAM_FIFO_SIZE);
platform_set_drvdata(pdev, bdev);
diff --git a/drivers/dma/qcom/gpi.c b/drivers/dma/qcom/gpi.c
index e6ebd688d746..52a7c8f2498f 100644
--- a/drivers/dma/qcom/gpi.c
+++ b/drivers/dma/qcom/gpi.c
@@ -1856,7 +1856,7 @@ static void gpi_issue_pending(struct dma_chan *chan)
read_lock_irqsave(&gpii->pm_lock, pm_lock_flags);
- /* move all submitted discriptors to issued list */
+ /* move all submitted descriptors to issued list */
spin_lock_irqsave(&gchan->vc.lock, flags);
if (vchan_issue_pending(&gchan->vc))
vd = list_last_entry(&gchan->vc.desc_issued,
diff --git a/drivers/dma/qcom/qcom_adm.c b/drivers/dma/qcom/qcom_adm.c
index 53f4273b657c..c1db398adc84 100644
--- a/drivers/dma/qcom/qcom_adm.c
+++ b/drivers/dma/qcom/qcom_adm.c
@@ -650,7 +650,7 @@ static enum dma_status adm_tx_status(struct dma_chan *chan, dma_cookie_t cookie,
/*
* residue is either the full length if it is in the issued list, or 0
* if it is in progress. We have no reliable way of determining
- * anything inbetween
+ * anything in between
*/
dma_set_residue(txstate, residue);
diff --git a/drivers/dma/sh/rcar-dmac.c b/drivers/dma/sh/rcar-dmac.c
index 40482cb73d79..1094a2f82164 100644
--- a/drivers/dma/sh/rcar-dmac.c
+++ b/drivers/dma/sh/rcar-dmac.c
@@ -1868,9 +1868,7 @@ static int rcar_dmac_probe(struct platform_device *pdev)
dmac->dev = &pdev->dev;
platform_set_drvdata(pdev, dmac);
- ret = dma_set_max_seg_size(dmac->dev, RCAR_DMATCR_MASK);
- if (ret)
- return ret;
+ dma_set_max_seg_size(dmac->dev, RCAR_DMATCR_MASK);
ret = dma_set_mask_and_coherent(dmac->dev, DMA_BIT_MASK(40));
if (ret)
diff --git a/drivers/dma/sh/shdmac.c b/drivers/dma/sh/shdmac.c
index 7cc9eb2217e8..8ead0a1fd237 100644
--- a/drivers/dma/sh/shdmac.c
+++ b/drivers/dma/sh/shdmac.c
@@ -318,7 +318,7 @@ static void sh_dmae_setup_xfer(struct shdma_chan *schan,
}
/*
- * Find a slave channel configuration from the contoller list by either a slave
+ * Find a slave channel configuration from the controller list by either a slave
* ID in the non-DT case, or by a MID/RID value in the DT case
*/
static const struct sh_dmae_slave_config *dmae_find_slave(
diff --git a/drivers/dma/ste_dma40.c b/drivers/dma/ste_dma40.c
index 2c489299148e..d52e1685aed5 100644
--- a/drivers/dma/ste_dma40.c
+++ b/drivers/dma/ste_dma40.c
@@ -3632,11 +3632,7 @@ static int __init d40_probe(struct platform_device *pdev)
if (ret)
goto destroy_cache;
- ret = dma_set_max_seg_size(base->dev, STEDMA40_MAX_SEG_SIZE);
- if (ret) {
- d40_err(dev, "Failed to set dma max seg size\n");
- goto destroy_cache;
- }
+ dma_set_max_seg_size(base->dev, STEDMA40_MAX_SEG_SIZE);
d40_hw_init(base);
diff --git a/drivers/dma/ste_dma40.h b/drivers/dma/ste_dma40.h
index c697bfe16a01..a90c786acc1f 100644
--- a/drivers/dma/ste_dma40.h
+++ b/drivers/dma/ste_dma40.h
@@ -4,7 +4,7 @@
#define STE_DMA40_H
/*
- * Maxium size for a single dma descriptor
+ * Maximum size for a single dma descriptor
* Size is limited to 16 bits.
* Size is in the units of addr-widths (1,2,4,8 bytes)
* Larger transfers will be split up to multiple linked desc
diff --git a/drivers/dma/ste_dma40_ll.h b/drivers/dma/ste_dma40_ll.h
index c504e855eb02..2e30e9a94a1e 100644
--- a/drivers/dma/ste_dma40_ll.h
+++ b/drivers/dma/ste_dma40_ll.h
@@ -369,7 +369,7 @@ struct d40_phy_lli_bidir {
* @lcsp02: Either maps to register lcsp0 if src or lcsp2 if dst.
* @lcsp13: Either maps to register lcsp1 if src or lcsp3 if dst.
*
- * This struct must be 8 bytes aligned since it will be accessed directy by
+ * This struct must be 8 bytes aligned since it will be accessed directly by
* the DMA. Never add any none hw mapped registers to this struct.
*/
diff --git a/drivers/dma/tegra20-apb-dma.c b/drivers/dma/tegra20-apb-dma.c
index ac69778827f2..7d1acda2d72b 100644
--- a/drivers/dma/tegra20-apb-dma.c
+++ b/drivers/dma/tegra20-apb-dma.c
@@ -463,7 +463,7 @@ static void tegra_dma_configure_for_next(struct tegra_dma_channel *tdc,
/*
* If interrupt is pending then do nothing as the ISR will handle
- * the programing for new request.
+ * the programming for new request.
*/
if (status & TEGRA_APBDMA_STATUS_ISE_EOC) {
dev_err(tdc2dev(tdc),
diff --git a/drivers/dma/ti/k3-udma.h b/drivers/dma/ti/k3-udma.h
index d349c6d482ae..9062a237cd16 100644
--- a/drivers/dma/ti/k3-udma.h
+++ b/drivers/dma/ti/k3-udma.h
@@ -131,7 +131,6 @@ int xudma_navss_psil_unpair(struct udma_dev *ud, u32 src_thread,
struct udma_dev *of_xudma_dev_get(struct device_node *np, const char *property);
struct device *xudma_get_device(struct udma_dev *ud);
struct k3_ringacc *xudma_get_ringacc(struct udma_dev *ud);
-void xudma_dev_put(struct udma_dev *ud);
u32 xudma_dev_get_psil_base(struct udma_dev *ud);
struct udma_tisci_rm *xudma_dev_get_tisci_rm(struct udma_dev *ud);
diff --git a/drivers/dma/xgene-dma.c b/drivers/dma/xgene-dma.c
index fd4397adeb79..275848a9c450 100644
--- a/drivers/dma/xgene-dma.c
+++ b/drivers/dma/xgene-dma.c
@@ -1742,7 +1742,7 @@ static int xgene_dma_probe(struct platform_device *pdev)
/* Initialize DMA channels software state */
xgene_dma_init_channels(pdma);
- /* Configue DMA rings */
+ /* Configure DMA rings */
ret = xgene_dma_init_rings(pdma);
if (ret)
goto err_clk_enable;
diff --git a/drivers/dma/xilinx/xilinx_dpdma.c b/drivers/dma/xilinx/xilinx_dpdma.c
index 36bd4825d389..be87764af9e8 100644
--- a/drivers/dma/xilinx/xilinx_dpdma.c
+++ b/drivers/dma/xilinx/xilinx_dpdma.c
@@ -149,7 +149,7 @@ struct xilinx_dpdma_chan;
* @addr_ext: upper 16 bit of 48 bit address (next_desc and src_addr)
* @next_desc: next descriptor 32 bit address
* @src_addr: payload source address (1st page, 32 LSB)
- * @addr_ext_23: payload source address (3nd and 3rd pages, 16 LSBs)
+ * @addr_ext_23: payload source address (2nd and 3rd pages, 16 LSBs)
* @addr_ext_45: payload source address (4th and 5th pages, 16 LSBs)
* @src_addr2: payload source address (2nd page, 32 LSB)
* @src_addr3: payload source address (3rd page, 32 LSB)
@@ -210,7 +210,7 @@ struct xilinx_dpdma_tx_desc {
* @vchan: virtual DMA channel
* @reg: register base address
* @id: channel ID
- * @wait_to_stop: queue to wait for outstanding transacitons before stopping
+ * @wait_to_stop: queue to wait for outstanding transactions before stopping
* @running: true if the channel is running
* @first_frame: flag for the first frame of stream
* @video_group: flag if multi-channel operation is needed for video channels
@@ -671,6 +671,84 @@ static void xilinx_dpdma_chan_free_tx_desc(struct virt_dma_desc *vdesc)
}
/**
+ * xilinx_dpdma_chan_prep_cyclic - Prepare a cyclic dma descriptor
+ * @chan: DPDMA channel
+ * @buf_addr: buffer address
+ * @buf_len: buffer length
+ * @period_len: number of periods
+ * @flags: tx flags argument passed in to prepare function
+ *
+ * Prepare a tx descriptor incudling internal software/hardware descriptors
+ * for the given cyclic transaction.
+ *
+ * Return: A dma async tx descriptor on success, or NULL.
+ */
+static struct dma_async_tx_descriptor *
+xilinx_dpdma_chan_prep_cyclic(struct xilinx_dpdma_chan *chan,
+ dma_addr_t buf_addr, size_t buf_len,
+ size_t period_len, unsigned long flags)
+{
+ struct xilinx_dpdma_tx_desc *tx_desc;
+ struct xilinx_dpdma_sw_desc *sw_desc, *last = NULL;
+ unsigned int periods = buf_len / period_len;
+ unsigned int i;
+
+ tx_desc = xilinx_dpdma_chan_alloc_tx_desc(chan);
+ if (!tx_desc)
+ return NULL;
+
+ for (i = 0; i < periods; i++) {
+ struct xilinx_dpdma_hw_desc *hw_desc;
+
+ if (!IS_ALIGNED(buf_addr, XILINX_DPDMA_ALIGN_BYTES)) {
+ dev_err(chan->xdev->dev,
+ "buffer should be aligned at %d B\n",
+ XILINX_DPDMA_ALIGN_BYTES);
+ goto error;
+ }
+
+ sw_desc = xilinx_dpdma_chan_alloc_sw_desc(chan);
+ if (!sw_desc)
+ goto error;
+
+ xilinx_dpdma_sw_desc_set_dma_addrs(chan->xdev, sw_desc, last,
+ &buf_addr, 1);
+ hw_desc = &sw_desc->hw;
+ hw_desc->xfer_size = period_len;
+ hw_desc->hsize_stride =
+ FIELD_PREP(XILINX_DPDMA_DESC_HSIZE_STRIDE_HSIZE_MASK,
+ period_len) |
+ FIELD_PREP(XILINX_DPDMA_DESC_HSIZE_STRIDE_STRIDE_MASK,
+ period_len);
+ hw_desc->control = XILINX_DPDMA_DESC_CONTROL_PREEMBLE |
+ XILINX_DPDMA_DESC_CONTROL_IGNORE_DONE |
+ XILINX_DPDMA_DESC_CONTROL_COMPLETE_INTR;
+
+ list_add_tail(&sw_desc->node, &tx_desc->descriptors);
+
+ buf_addr += period_len;
+ last = sw_desc;
+ }
+
+ sw_desc = list_first_entry(&tx_desc->descriptors,
+ struct xilinx_dpdma_sw_desc, node);
+ last->hw.next_desc = lower_32_bits(sw_desc->dma_addr);
+ if (chan->xdev->ext_addr)
+ last->hw.addr_ext |=
+ FIELD_PREP(XILINX_DPDMA_DESC_ADDR_EXT_NEXT_ADDR_MASK,
+ upper_32_bits(sw_desc->dma_addr));
+
+ last->hw.control |= XILINX_DPDMA_DESC_CONTROL_LAST_OF_FRAME;
+
+ return vchan_tx_prep(&chan->vchan, &tx_desc->vdesc, flags);
+
+error:
+ xilinx_dpdma_chan_free_tx_desc(&tx_desc->vdesc);
+
+ return NULL;
+}
+
+/**
* xilinx_dpdma_chan_prep_interleaved_dma - Prepare an interleaved dma
* descriptor
* @chan: DPDMA channel
@@ -1189,6 +1267,23 @@ out_unlock:
/* -----------------------------------------------------------------------------
* DMA Engine Operations
*/
+static struct dma_async_tx_descriptor *
+xilinx_dpdma_prep_dma_cyclic(struct dma_chan *dchan, dma_addr_t buf_addr,
+ size_t buf_len, size_t period_len,
+ enum dma_transfer_direction direction,
+ unsigned long flags)
+{
+ struct xilinx_dpdma_chan *chan = to_xilinx_chan(dchan);
+
+ if (direction != DMA_MEM_TO_DEV)
+ return NULL;
+
+ if (buf_len % period_len)
+ return NULL;
+
+ return xilinx_dpdma_chan_prep_cyclic(chan, buf_addr, buf_len,
+ period_len, flags);
+}
static struct dma_async_tx_descriptor *
xilinx_dpdma_prep_interleaved_dma(struct dma_chan *dchan,
@@ -1672,6 +1767,7 @@ static int xilinx_dpdma_probe(struct platform_device *pdev)
dma_cap_set(DMA_SLAVE, ddev->cap_mask);
dma_cap_set(DMA_PRIVATE, ddev->cap_mask);
+ dma_cap_set(DMA_CYCLIC, ddev->cap_mask);
dma_cap_set(DMA_INTERLEAVE, ddev->cap_mask);
dma_cap_set(DMA_REPEAT, ddev->cap_mask);
dma_cap_set(DMA_LOAD_EOT, ddev->cap_mask);
@@ -1679,6 +1775,7 @@ static int xilinx_dpdma_probe(struct platform_device *pdev)
ddev->device_alloc_chan_resources = xilinx_dpdma_alloc_chan_resources;
ddev->device_free_chan_resources = xilinx_dpdma_free_chan_resources;
+ ddev->device_prep_dma_cyclic = xilinx_dpdma_prep_dma_cyclic;
ddev->device_prep_interleaved_dma = xilinx_dpdma_prep_interleaved_dma;
/* TODO: Can we achieve better granularity ? */
ddev->device_tx_status = dma_cookie_status;
diff --git a/drivers/dma/xilinx/zynqmp_dma.c b/drivers/dma/xilinx/zynqmp_dma.c
index f31631bef961..9ae46f1198fe 100644
--- a/drivers/dma/xilinx/zynqmp_dma.c
+++ b/drivers/dma/xilinx/zynqmp_dma.c
@@ -22,10 +22,10 @@
#include "../dmaengine.h"
/* Register Offsets */
-#define ZYNQMP_DMA_ISR 0x100
-#define ZYNQMP_DMA_IMR 0x104
-#define ZYNQMP_DMA_IER 0x108
-#define ZYNQMP_DMA_IDS 0x10C
+#define ZYNQMP_DMA_ISR (chan->irq_offset + 0x100)
+#define ZYNQMP_DMA_IMR (chan->irq_offset + 0x104)
+#define ZYNQMP_DMA_IER (chan->irq_offset + 0x108)
+#define ZYNQMP_DMA_IDS (chan->irq_offset + 0x10c)
#define ZYNQMP_DMA_CTRL0 0x110
#define ZYNQMP_DMA_CTRL1 0x114
#define ZYNQMP_DMA_DATA_ATTR 0x120
@@ -145,6 +145,9 @@
#define tx_to_desc(tx) container_of(tx, struct zynqmp_dma_desc_sw, \
async_tx)
+/* IRQ Register offset for Versal Gen 2 */
+#define IRQ_REG_OFFSET 0x308
+
/**
* struct zynqmp_dma_desc_ll - Hw linked list descriptor
* @addr: Buffer address
@@ -211,6 +214,7 @@ struct zynqmp_dma_desc_sw {
* @bus_width: Bus width
* @src_burst_len: Source burst length
* @dst_burst_len: Dest burst length
+ * @irq_offset: Irq register offset
*/
struct zynqmp_dma_chan {
struct zynqmp_dma_device *zdev;
@@ -235,6 +239,7 @@ struct zynqmp_dma_chan {
u32 bus_width;
u32 src_burst_len;
u32 dst_burst_len;
+ u32 irq_offset;
};
/**
@@ -253,6 +258,14 @@ struct zynqmp_dma_device {
struct clk *clk_apb;
};
+struct zynqmp_dma_config {
+ u32 offset;
+};
+
+static const struct zynqmp_dma_config versal2_dma_config = {
+ .offset = IRQ_REG_OFFSET,
+};
+
static inline void zynqmp_dma_writeq(struct zynqmp_dma_chan *chan, u32 reg,
u64 value)
{
@@ -892,6 +905,7 @@ static int zynqmp_dma_chan_probe(struct zynqmp_dma_device *zdev,
{
struct zynqmp_dma_chan *chan;
struct device_node *node = pdev->dev.of_node;
+ const struct zynqmp_dma_config *match_data;
int err;
chan = devm_kzalloc(zdev->dev, sizeof(*chan), GFP_KERNEL);
@@ -919,6 +933,10 @@ static int zynqmp_dma_chan_probe(struct zynqmp_dma_device *zdev,
return -EINVAL;
}
+ match_data = of_device_get_match_data(&pdev->dev);
+ if (match_data)
+ chan->irq_offset = match_data->offset;
+
chan->is_dmacoherent = of_property_read_bool(node, "dma-coherent");
zdev->chan = chan;
tasklet_setup(&chan->tasklet, zynqmp_dma_do_tasklet);
@@ -1161,6 +1179,7 @@ static void zynqmp_dma_remove(struct platform_device *pdev)
}
static const struct of_device_id zynqmp_dma_of_match[] = {
+ { .compatible = "amd,versal2-dma-1.0", .data = &versal2_dma_config },
{ .compatible = "xlnx,zynqmp-dma-1.0", },
{}
};
diff --git a/drivers/extcon/Kconfig b/drivers/extcon/Kconfig
index 3da94b382292..a6f6d467aacf 100644
--- a/drivers/extcon/Kconfig
+++ b/drivers/extcon/Kconfig
@@ -75,6 +75,17 @@ config EXTCON_INTEL_MRFLD
Say Y here to enable extcon support for charger detection / control
on the Intel Merrifield Basin Cove PMIC.
+config EXTCON_LC824206XA
+ tristate "LC824206XA extcon Support"
+ depends on I2C
+ depends on POWER_SUPPLY
+ help
+ Say Y here to enable support for the ON Semiconductor LC824206XA
+ microUSB switch and accessory detector chip. The LC824206XA is a USB
+ port accessory detector and switch. The LC824206XA is fully controlled
+ using I2C and enables USB data, stereo and mono audio, video,
+ microphone and UART data to use a common connector port.
+
config EXTCON_MAX14577
tristate "Maxim MAX14577/77836 EXTCON Support"
depends on MFD_MAX14577
diff --git a/drivers/extcon/Makefile b/drivers/extcon/Makefile
index f779adb5e4c7..0d6d23faf748 100644
--- a/drivers/extcon/Makefile
+++ b/drivers/extcon/Makefile
@@ -12,6 +12,7 @@ obj-$(CONFIG_EXTCON_GPIO) += extcon-gpio.o
obj-$(CONFIG_EXTCON_INTEL_INT3496) += extcon-intel-int3496.o
obj-$(CONFIG_EXTCON_INTEL_CHT_WC) += extcon-intel-cht-wc.o
obj-$(CONFIG_EXTCON_INTEL_MRFLD) += extcon-intel-mrfld.o
+obj-$(CONFIG_EXTCON_LC824206XA) += extcon-lc824206xa.o
obj-$(CONFIG_EXTCON_MAX14577) += extcon-max14577.o
obj-$(CONFIG_EXTCON_MAX3355) += extcon-max3355.o
obj-$(CONFIG_EXTCON_MAX77693) += extcon-max77693.o
diff --git a/drivers/extcon/extcon-lc824206xa.c b/drivers/extcon/extcon-lc824206xa.c
new file mode 100644
index 000000000000..56938748aea8
--- /dev/null
+++ b/drivers/extcon/extcon-lc824206xa.c
@@ -0,0 +1,495 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * ON Semiconductor LC824206XA Micro USB Switch driver
+ *
+ * Copyright (c) 2024 Hans de Goede <hansg@kernel.org>
+ *
+ * ON Semiconductor has an "Advance Information" datasheet available
+ * (ENA2222-D.PDF), but no full datasheet. So there is no documentation
+ * available for the registers.
+ *
+ * This driver is based on the register info from the extcon-fsa9285.c driver,
+ * from the Lollipop Android sources for the Lenovo Yoga Tablet 2 (Pro)
+ * 830 / 1050 / 1380 models. Note despite the name this is actually a driver
+ * for the LC824206XA not the FSA9285. The Android sources can be downloaded
+ * from Lenovo's support page for these tablets, filename:
+ * yoga_tab_2_osc_android_to_lollipop_201505.rar.
+ */
+
+#include <linux/bits.h>
+#include <linux/delay.h>
+#include <linux/device.h>
+#include <linux/extcon-provider.h>
+#include <linux/i2c.h>
+#include <linux/interrupt.h>
+#include <linux/module.h>
+#include <linux/power_supply.h>
+#include <linux/property.h>
+#include <linux/regulator/consumer.h>
+#include <linux/workqueue.h>
+
+/*
+ * Register defines as mentioned above there is no datasheet with register
+ * info, so this may not be 100% accurate.
+ */
+#define REG00 0x00
+#define REG00_INIT_VALUE 0x01
+
+#define REG_STATUS 0x01
+#define STATUS_OVP BIT(0)
+#define STATUS_DATA_SHORT BIT(1)
+#define STATUS_VBUS_PRESENT BIT(2)
+#define STATUS_USB_ID GENMASK(7, 3)
+#define STATUS_USB_ID_GND 0x80
+#define STATUS_USB_ID_ACA 0xf0
+#define STATUS_USB_ID_FLOAT 0xf8
+
+/*
+ * This controls the DP/DM muxes + other switches,
+ * meaning of individual bits is unknown.
+ */
+#define REG_SWITCH_CONTROL 0x02
+#define SWITCH_STEREO_MIC 0xc8
+#define SWITCH_USB_HOST 0xec
+#define SWITCH_DISCONNECTED 0xf8
+#define SWITCH_USB_DEVICE 0xfc
+
+/* 5 bits? ADC 0x10 GND, 0x1a-0x1f ACA, 0x1f float */
+#define REG_ID_PIN_ADC_VALUE 0x03
+
+/* Masks for all 3 interrupt registers */
+#define INTR_ID_PIN_CHANGE BIT(0)
+#define INTR_VBUS_CHANGE BIT(1)
+/* Both of these get set after a continuous mode ADC conversion */
+#define INTR_ID_PIN_ADC_INT1 BIT(2)
+#define INTR_ID_PIN_ADC_INT2 BIT(3)
+/* Charger type available in reg 0x09 */
+#define INTR_CHARGER_DET_DONE BIT(4)
+#define INTR_OVP BIT(5)
+
+/* There are 7 interrupt sources, bit 6 use is unknown (OCP?) */
+#define INTR_ALL GENMASK(6, 0)
+
+/* Unmask interrupts this driver cares about */
+#define INTR_MASK \
+ (INTR_ALL & ~(INTR_ID_PIN_CHANGE | INTR_VBUS_CHANGE | INTR_CHARGER_DET_DONE))
+
+/* Active (event happened and not cleared yet) interrupts */
+#define REG_INTR_STATUS 0x04
+
+/*
+ * Writing a 1 to a bit here clears it in INTR_STATUS. These bits do NOT
+ * auto-reset to 0, so these must be set to 0 manually after clearing.
+ */
+#define REG_INTR_CLEAR 0x05
+
+/* Interrupts which bit is set to 1 here will not raise the HW IRQ */
+#define REG_INTR_MASK 0x06
+
+/* ID pin ADC control, meaning of individual bits is unknown */
+#define REG_ID_PIN_ADC_CTRL 0x07
+#define ID_PIN_ADC_AUTO 0x40
+#define ID_PIN_ADC_CONTINUOUS 0x44
+
+#define REG_CHARGER_DET 0x08
+#define CHARGER_DET_ON BIT(0)
+#define CHARGER_DET_CDP_ON BIT(1)
+#define CHARGER_DET_CDP_VAL BIT(2)
+
+#define REG_CHARGER_TYPE 0x09
+#define CHARGER_TYPE_UNKNOWN 0x00
+#define CHARGER_TYPE_DCP 0x01
+#define CHARGER_TYPE_SDP_OR_CDP 0x04
+#define CHARGER_TYPE_QC 0x06
+
+#define REG10 0x10
+#define REG10_INIT_VALUE 0x00
+
+struct lc824206xa_data {
+ struct work_struct work;
+ struct i2c_client *client;
+ struct extcon_dev *edev;
+ struct power_supply *psy;
+ struct regulator *vbus_boost;
+ unsigned int usb_type;
+ unsigned int cable;
+ unsigned int previous_cable;
+ u8 switch_control;
+ u8 previous_switch_control;
+ bool vbus_ok;
+ bool vbus_boost_enabled;
+ bool fastcharge_over_miclr;
+};
+
+static const unsigned int lc824206xa_cables[] = {
+ EXTCON_USB_HOST,
+ EXTCON_CHG_USB_SDP,
+ EXTCON_CHG_USB_CDP,
+ EXTCON_CHG_USB_DCP,
+ EXTCON_CHG_USB_ACA,
+ EXTCON_CHG_USB_FAST,
+ EXTCON_NONE,
+};
+
+/* read/write reg helpers to add error logging to smbus byte functions */
+static int lc824206xa_read_reg(struct lc824206xa_data *data, u8 reg)
+{
+ int ret;
+
+ ret = i2c_smbus_read_byte_data(data->client, reg);
+ if (ret < 0)
+ dev_err(&data->client->dev, "Error %d reading reg 0x%02x\n", ret, reg);
+
+ return ret;
+}
+
+static int lc824206xa_write_reg(struct lc824206xa_data *data, u8 reg, u8 val)
+{
+ int ret;
+
+ ret = i2c_smbus_write_byte_data(data->client, reg, val);
+ if (ret < 0)
+ dev_err(&data->client->dev, "Error %d writing reg 0x%02x\n", ret, reg);
+
+ return ret;
+}
+
+static int lc824206xa_get_id(struct lc824206xa_data *data)
+{
+ int ret;
+
+ ret = lc824206xa_write_reg(data, REG_ID_PIN_ADC_CTRL, ID_PIN_ADC_CONTINUOUS);
+ if (ret)
+ return ret;
+
+ ret = lc824206xa_read_reg(data, REG_ID_PIN_ADC_VALUE);
+
+ lc824206xa_write_reg(data, REG_ID_PIN_ADC_CTRL, ID_PIN_ADC_AUTO);
+
+ return ret;
+}
+
+static void lc824206xa_set_vbus_boost(struct lc824206xa_data *data, bool enable)
+{
+ int ret;
+
+ if (data->vbus_boost_enabled == enable)
+ return;
+
+ if (enable)
+ ret = regulator_enable(data->vbus_boost);
+ else
+ ret = regulator_disable(data->vbus_boost);
+
+ if (ret == 0)
+ data->vbus_boost_enabled = enable;
+ else
+ dev_err(&data->client->dev, "Error updating Vbus boost regulator: %d\n", ret);
+}
+
+static void lc824206xa_charger_detect(struct lc824206xa_data *data)
+{
+ int charger_type, ret;
+
+ charger_type = lc824206xa_read_reg(data, REG_CHARGER_TYPE);
+ if (charger_type < 0)
+ return;
+
+ dev_dbg(&data->client->dev, "charger type 0x%02x\n", charger_type);
+
+ switch (charger_type) {
+ case CHARGER_TYPE_UNKNOWN:
+ data->usb_type = POWER_SUPPLY_USB_TYPE_UNKNOWN;
+ /* Treat as SDP */
+ data->cable = EXTCON_CHG_USB_SDP;
+ data->switch_control = SWITCH_USB_DEVICE;
+ break;
+ case CHARGER_TYPE_SDP_OR_CDP:
+ data->usb_type = POWER_SUPPLY_USB_TYPE_SDP;
+ data->cable = EXTCON_CHG_USB_SDP;
+ data->switch_control = SWITCH_USB_DEVICE;
+
+ ret = lc824206xa_write_reg(data, REG_CHARGER_DET,
+ CHARGER_DET_CDP_ON | CHARGER_DET_ON);
+ if (ret < 0)
+ break;
+
+ msleep(100);
+ ret = lc824206xa_read_reg(data, REG_CHARGER_DET);
+ if (ret >= 0 && (ret & CHARGER_DET_CDP_VAL)) {
+ data->usb_type = POWER_SUPPLY_USB_TYPE_CDP;
+ data->cable = EXTCON_CHG_USB_CDP;
+ }
+
+ lc824206xa_write_reg(data, REG_CHARGER_DET, CHARGER_DET_ON);
+ break;
+ case CHARGER_TYPE_DCP:
+ data->usb_type = POWER_SUPPLY_USB_TYPE_DCP;
+ data->cable = EXTCON_CHG_USB_DCP;
+ if (data->fastcharge_over_miclr)
+ data->switch_control = SWITCH_STEREO_MIC;
+ else
+ data->switch_control = SWITCH_DISCONNECTED;
+ break;
+ case CHARGER_TYPE_QC:
+ data->usb_type = POWER_SUPPLY_USB_TYPE_DCP;
+ data->cable = EXTCON_CHG_USB_DCP;
+ data->switch_control = SWITCH_DISCONNECTED;
+ break;
+ default:
+ dev_warn(&data->client->dev, "Unknown charger type: 0x%02x\n", charger_type);
+ break;
+ }
+}
+
+static void lc824206xa_work(struct work_struct *work)
+{
+ struct lc824206xa_data *data = container_of(work, struct lc824206xa_data, work);
+ bool vbus_boost_enable = false;
+ int status, id;
+
+ status = lc824206xa_read_reg(data, REG_STATUS);
+ if (status < 0)
+ return;
+
+ dev_dbg(&data->client->dev, "status 0x%02x\n", status);
+
+ data->vbus_ok = (status & (STATUS_VBUS_PRESENT | STATUS_OVP)) == STATUS_VBUS_PRESENT;
+
+ /* Read id pin ADC if necessary */
+ switch (status & STATUS_USB_ID) {
+ case STATUS_USB_ID_GND:
+ case STATUS_USB_ID_FLOAT:
+ break;
+ default:
+ /* Happens when the connector is inserted slowly, log at dbg level */
+ dev_dbg(&data->client->dev, "Unknown status 0x%02x\n", status);
+ fallthrough;
+ case STATUS_USB_ID_ACA:
+ id = lc824206xa_get_id(data);
+ dev_dbg(&data->client->dev, "RID 0x%02x\n", id);
+ switch (id) {
+ case 0x10:
+ status = STATUS_USB_ID_GND;
+ break;
+ case 0x18 ... 0x1e:
+ status = STATUS_USB_ID_ACA;
+ break;
+ case 0x1f:
+ status = STATUS_USB_ID_FLOAT;
+ break;
+ default:
+ dev_warn(&data->client->dev, "Unknown RID 0x%02x\n", id);
+ return;
+ }
+ }
+
+ /* Check for out of spec OTG charging hubs, treat as ACA */
+ if ((status & STATUS_USB_ID) == STATUS_USB_ID_GND &&
+ data->vbus_ok && !data->vbus_boost_enabled) {
+ dev_info(&data->client->dev, "Out of spec USB host adapter with Vbus present, not enabling 5V output\n");
+ status = STATUS_USB_ID_ACA;
+ }
+
+ switch (status & STATUS_USB_ID) {
+ case STATUS_USB_ID_ACA:
+ data->usb_type = POWER_SUPPLY_USB_TYPE_ACA;
+ data->cable = EXTCON_CHG_USB_ACA;
+ data->switch_control = SWITCH_USB_HOST;
+ break;
+ case STATUS_USB_ID_GND:
+ data->usb_type = POWER_SUPPLY_USB_TYPE_UNKNOWN;
+ data->cable = EXTCON_USB_HOST;
+ data->switch_control = SWITCH_USB_HOST;
+ vbus_boost_enable = true;
+ break;
+ case STATUS_USB_ID_FLOAT:
+ /* When fast charging with Vbus > 5V, OVP will be set */
+ if (data->fastcharge_over_miclr &&
+ data->switch_control == SWITCH_STEREO_MIC &&
+ (status & STATUS_OVP)) {
+ data->cable = EXTCON_CHG_USB_FAST;
+ break;
+ }
+
+ if (data->vbus_ok) {
+ lc824206xa_charger_detect(data);
+ } else {
+ data->usb_type = POWER_SUPPLY_USB_TYPE_UNKNOWN;
+ data->cable = EXTCON_NONE;
+ data->switch_control = SWITCH_DISCONNECTED;
+ }
+ break;
+ }
+
+ lc824206xa_set_vbus_boost(data, vbus_boost_enable);
+
+ if (data->switch_control != data->previous_switch_control) {
+ lc824206xa_write_reg(data, REG_SWITCH_CONTROL, data->switch_control);
+ data->previous_switch_control = data->switch_control;
+ }
+
+ if (data->cable != data->previous_cable) {
+ extcon_set_state_sync(data->edev, data->previous_cable, false);
+ extcon_set_state_sync(data->edev, data->cable, true);
+ data->previous_cable = data->cable;
+ }
+
+ power_supply_changed(data->psy);
+}
+
+static irqreturn_t lc824206xa_irq(int irq, void *_data)
+{
+ struct lc824206xa_data *data = _data;
+ int intr_status;
+
+ intr_status = lc824206xa_read_reg(data, REG_INTR_STATUS);
+ if (intr_status < 0)
+ intr_status = INTR_ALL; /* Should never happen, clear all */
+
+ dev_dbg(&data->client->dev, "interrupt 0x%02x\n", intr_status);
+
+ lc824206xa_write_reg(data, REG_INTR_CLEAR, intr_status);
+ lc824206xa_write_reg(data, REG_INTR_CLEAR, 0);
+
+ schedule_work(&data->work);
+ return IRQ_HANDLED;
+}
+
+/*
+ * Newer charger (power_supply) drivers expect the max input current to be
+ * provided by a parent power_supply device for the charger chip.
+ */
+static int lc824206xa_psy_get_prop(struct power_supply *psy,
+ enum power_supply_property psp,
+ union power_supply_propval *val)
+{
+ struct lc824206xa_data *data = power_supply_get_drvdata(psy);
+
+ switch (psp) {
+ case POWER_SUPPLY_PROP_ONLINE:
+ val->intval = data->vbus_ok && !data->vbus_boost_enabled;
+ break;
+ case POWER_SUPPLY_PROP_USB_TYPE:
+ val->intval = data->usb_type;
+ break;
+ case POWER_SUPPLY_PROP_CURRENT_MAX:
+ switch (data->usb_type) {
+ case POWER_SUPPLY_USB_TYPE_DCP:
+ case POWER_SUPPLY_USB_TYPE_ACA:
+ val->intval = 2000000;
+ break;
+ case POWER_SUPPLY_USB_TYPE_CDP:
+ val->intval = 1500000;
+ break;
+ default:
+ val->intval = 500000;
+ }
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static const enum power_supply_property lc824206xa_psy_props[] = {
+ POWER_SUPPLY_PROP_ONLINE,
+ POWER_SUPPLY_PROP_USB_TYPE,
+ POWER_SUPPLY_PROP_CURRENT_MAX,
+};
+
+static const struct power_supply_desc lc824206xa_psy_desc = {
+ .name = "lc824206xa-charger-detect",
+ .type = POWER_SUPPLY_TYPE_USB,
+ .usb_types = BIT(POWER_SUPPLY_USB_TYPE_SDP) |
+ BIT(POWER_SUPPLY_USB_TYPE_CDP) |
+ BIT(POWER_SUPPLY_USB_TYPE_DCP) |
+ BIT(POWER_SUPPLY_USB_TYPE_ACA) |
+ BIT(POWER_SUPPLY_USB_TYPE_UNKNOWN),
+ .properties = lc824206xa_psy_props,
+ .num_properties = ARRAY_SIZE(lc824206xa_psy_props),
+ .get_property = lc824206xa_psy_get_prop,
+};
+
+static int lc824206xa_probe(struct i2c_client *client)
+{
+ struct power_supply_config psy_cfg = { };
+ struct device *dev = &client->dev;
+ struct lc824206xa_data *data;
+ int ret;
+
+ data = devm_kzalloc(dev, sizeof(*data), GFP_KERNEL);
+ if (!data)
+ return -ENOMEM;
+
+ data->client = client;
+ INIT_WORK(&data->work, lc824206xa_work);
+ data->cable = EXTCON_NONE;
+ data->previous_cable = EXTCON_NONE;
+ data->usb_type = POWER_SUPPLY_USB_TYPE_UNKNOWN;
+ /* Some designs use a custom fast-charge protocol over the mic L/R inputs */
+ data->fastcharge_over_miclr =
+ device_property_read_bool(dev, "onnn,enable-miclr-for-dcp");
+
+ data->vbus_boost = devm_regulator_get(dev, "vbus");
+ if (IS_ERR(data->vbus_boost))
+ return dev_err_probe(dev, PTR_ERR(data->vbus_boost),
+ "getting regulator\n");
+
+ /* Init */
+ ret = lc824206xa_write_reg(data, REG00, REG00_INIT_VALUE);
+ ret |= lc824206xa_write_reg(data, REG10, REG10_INIT_VALUE);
+ msleep(100);
+ ret |= lc824206xa_write_reg(data, REG_INTR_CLEAR, INTR_ALL);
+ ret |= lc824206xa_write_reg(data, REG_INTR_CLEAR, 0);
+ ret |= lc824206xa_write_reg(data, REG_INTR_MASK, INTR_MASK);
+ ret |= lc824206xa_write_reg(data, REG_ID_PIN_ADC_CTRL, ID_PIN_ADC_AUTO);
+ ret |= lc824206xa_write_reg(data, REG_CHARGER_DET, CHARGER_DET_ON);
+ if (ret)
+ return -EIO;
+
+ /* Initialize extcon device */
+ data->edev = devm_extcon_dev_allocate(dev, lc824206xa_cables);
+ if (IS_ERR(data->edev))
+ return PTR_ERR(data->edev);
+
+ ret = devm_extcon_dev_register(dev, data->edev);
+ if (ret)
+ return dev_err_probe(dev, ret, "registering extcon device\n");
+
+ psy_cfg.drv_data = data;
+ data->psy = devm_power_supply_register(dev, &lc824206xa_psy_desc, &psy_cfg);
+ if (IS_ERR(data->psy))
+ return dev_err_probe(dev, PTR_ERR(data->psy), "registering power supply\n");
+
+ ret = devm_request_threaded_irq(dev, client->irq, NULL, lc824206xa_irq,
+ IRQF_TRIGGER_LOW | IRQF_ONESHOT,
+ KBUILD_MODNAME, data);
+ if (ret)
+ return dev_err_probe(dev, ret, "requesting IRQ\n");
+
+ /* Sync initial state */
+ schedule_work(&data->work);
+ return 0;
+}
+
+static const struct i2c_device_id lc824206xa_i2c_ids[] = {
+ { "lc824206xa" },
+ { }
+};
+MODULE_DEVICE_TABLE(i2c, lc824206xa_i2c_ids);
+
+static struct i2c_driver lc824206xa_driver = {
+ .driver = {
+ .name = KBUILD_MODNAME,
+ },
+ .probe = lc824206xa_probe,
+ .id_table = lc824206xa_i2c_ids,
+};
+
+module_i2c_driver(lc824206xa_driver);
+
+MODULE_AUTHOR("Hans de Goede <hansg@kernel.org>");
+MODULE_DESCRIPTION("LC824206XA Micro USB Switch driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/firewire/core-card.c b/drivers/firewire/core-card.c
index f8b99dd6cd82..01354b9de8b2 100644
--- a/drivers/firewire/core-card.c
+++ b/drivers/firewire/core-card.c
@@ -168,7 +168,6 @@ static size_t required_space(struct fw_descriptor *desc)
int fw_core_add_descriptor(struct fw_descriptor *desc)
{
size_t i;
- int ret;
/*
* Check descriptor is valid; the length of all blocks in the
@@ -182,29 +181,25 @@ int fw_core_add_descriptor(struct fw_descriptor *desc)
if (i != desc->length)
return -EINVAL;
- mutex_lock(&card_mutex);
+ guard(mutex)(&card_mutex);
- if (config_rom_length + required_space(desc) > 256) {
- ret = -EBUSY;
- } else {
- list_add_tail(&desc->link, &descriptor_list);
- config_rom_length += required_space(desc);
- descriptor_count++;
- if (desc->immediate > 0)
- descriptor_count++;
- update_config_roms();
- ret = 0;
- }
+ if (config_rom_length + required_space(desc) > 256)
+ return -EBUSY;
- mutex_unlock(&card_mutex);
+ list_add_tail(&desc->link, &descriptor_list);
+ config_rom_length += required_space(desc);
+ descriptor_count++;
+ if (desc->immediate > 0)
+ descriptor_count++;
+ update_config_roms();
- return ret;
+ return 0;
}
EXPORT_SYMBOL(fw_core_add_descriptor);
void fw_core_remove_descriptor(struct fw_descriptor *desc)
{
- mutex_lock(&card_mutex);
+ guard(mutex)(&card_mutex);
list_del(&desc->link);
config_rom_length -= required_space(desc);
@@ -212,8 +207,6 @@ void fw_core_remove_descriptor(struct fw_descriptor *desc)
if (desc->immediate > 0)
descriptor_count--;
update_config_roms();
-
- mutex_unlock(&card_mutex);
}
EXPORT_SYMBOL(fw_core_remove_descriptor);
@@ -381,11 +374,11 @@ static void bm_work(struct work_struct *work)
bm_id = be32_to_cpu(transaction_data[0]);
- spin_lock_irq(&card->lock);
- if (rcode == RCODE_COMPLETE && generation == card->generation)
- card->bm_node_id =
- bm_id == 0x3f ? local_id : 0xffc0 | bm_id;
- spin_unlock_irq(&card->lock);
+ scoped_guard(spinlock_irq, &card->lock) {
+ if (rcode == RCODE_COMPLETE && generation == card->generation)
+ card->bm_node_id =
+ bm_id == 0x3f ? local_id : 0xffc0 | bm_id;
+ }
if (rcode == RCODE_COMPLETE && bm_id != 0x3f) {
/* Somebody else is BM. Only act as IRM. */
@@ -578,25 +571,47 @@ void fw_card_initialize(struct fw_card *card,
}
EXPORT_SYMBOL(fw_card_initialize);
-int fw_card_add(struct fw_card *card,
- u32 max_receive, u32 link_speed, u64 guid)
+int fw_card_add(struct fw_card *card, u32 max_receive, u32 link_speed, u64 guid,
+ unsigned int supported_isoc_contexts)
{
+ struct workqueue_struct *isoc_wq;
int ret;
+ // This workqueue should be:
+ // * != WQ_BH Sleepable.
+ // * == WQ_UNBOUND Any core can process data for isoc context. The
+ // implementation of unit protocol could consumes the core
+ // longer somehow.
+ // * != WQ_MEM_RECLAIM Not used for any backend of block device.
+ // * == WQ_FREEZABLE Isochronous communication is at regular interval in real
+ // time, thus should be drained if possible at freeze phase.
+ // * == WQ_HIGHPRI High priority to process semi-realtime timestamped data.
+ // * == WQ_SYSFS Parameters are available via sysfs.
+ // * max_active == n_it + n_ir A hardIRQ could notify events for multiple isochronous
+ // contexts if they are scheduled to the same cycle.
+ isoc_wq = alloc_workqueue("firewire-isoc-card%u",
+ WQ_UNBOUND | WQ_FREEZABLE | WQ_HIGHPRI | WQ_SYSFS,
+ supported_isoc_contexts, card->index);
+ if (!isoc_wq)
+ return -ENOMEM;
+
card->max_receive = max_receive;
card->link_speed = link_speed;
card->guid = guid;
- mutex_lock(&card_mutex);
+ guard(mutex)(&card_mutex);
generate_config_rom(card, tmp_config_rom);
ret = card->driver->enable(card, tmp_config_rom, config_rom_length);
- if (ret == 0)
- list_add_tail(&card->link, &card_list);
+ if (ret < 0) {
+ destroy_workqueue(isoc_wq);
+ return ret;
+ }
- mutex_unlock(&card_mutex);
+ card->isoc_wq = isoc_wq;
+ list_add_tail(&card->link, &card_list);
- return ret;
+ return 0;
}
EXPORT_SYMBOL(fw_card_add);
@@ -714,29 +729,31 @@ EXPORT_SYMBOL_GPL(fw_card_release);
void fw_core_remove_card(struct fw_card *card)
{
struct fw_card_driver dummy_driver = dummy_driver_template;
- unsigned long flags;
+
+ might_sleep();
card->driver->update_phy_reg(card, 4,
PHY_LINK_ACTIVE | PHY_CONTENDER, 0);
fw_schedule_bus_reset(card, false, true);
- mutex_lock(&card_mutex);
- list_del_init(&card->link);
- mutex_unlock(&card_mutex);
+ scoped_guard(mutex, &card_mutex)
+ list_del_init(&card->link);
/* Switch off most of the card driver interface. */
dummy_driver.free_iso_context = card->driver->free_iso_context;
dummy_driver.stop_iso = card->driver->stop_iso;
card->driver = &dummy_driver;
+ drain_workqueue(card->isoc_wq);
- spin_lock_irqsave(&card->lock, flags);
- fw_destroy_nodes(card);
- spin_unlock_irqrestore(&card->lock, flags);
+ scoped_guard(spinlock_irqsave, &card->lock)
+ fw_destroy_nodes(card);
/* Wait for all users, especially device workqueue jobs, to finish. */
fw_card_put(card);
wait_for_completion(&card->done);
+ destroy_workqueue(card->isoc_wq);
+
WARN_ON(!list_empty(&card->transaction_list));
}
EXPORT_SYMBOL(fw_core_remove_card);
diff --git a/drivers/firewire/core-cdev.c b/drivers/firewire/core-cdev.c
index 9a7dc90330a3..b360dca2c69e 100644
--- a/drivers/firewire/core-cdev.c
+++ b/drivers/firewire/core-cdev.c
@@ -14,7 +14,6 @@
#include <linux/errno.h>
#include <linux/firewire.h>
#include <linux/firewire-cdev.h>
-#include <linux/idr.h>
#include <linux/irqflags.h>
#include <linux/jiffies.h>
#include <linux/kernel.h>
@@ -37,6 +36,8 @@
#include "core.h"
#include <trace/events/firewire.h>
+#include "packet-header-definitions.h"
+
/*
* ABI version history is documented in linux/firewire-cdev.h.
*/
@@ -52,7 +53,7 @@ struct client {
spinlock_t lock;
bool in_shutdown;
- struct idr resource_idr;
+ struct xarray resource_xa;
struct list_head event_list;
wait_queue_head_t wait;
wait_queue_head_t tx_flush_wait;
@@ -137,8 +138,41 @@ struct iso_resource {
struct iso_resource_event *e_alloc, *e_dealloc;
};
+static struct address_handler_resource *to_address_handler_resource(struct client_resource *resource)
+{
+ return container_of(resource, struct address_handler_resource, resource);
+}
+
+static struct inbound_transaction_resource *to_inbound_transaction_resource(struct client_resource *resource)
+{
+ return container_of(resource, struct inbound_transaction_resource, resource);
+}
+
+static struct descriptor_resource *to_descriptor_resource(struct client_resource *resource)
+{
+ return container_of(resource, struct descriptor_resource, resource);
+}
+
+static struct iso_resource *to_iso_resource(struct client_resource *resource)
+{
+ return container_of(resource, struct iso_resource, resource);
+}
+
static void release_iso_resource(struct client *, struct client_resource *);
+static int is_iso_resource(const struct client_resource *resource)
+{
+ return resource->release == release_iso_resource;
+}
+
+static void release_transaction(struct client *client,
+ struct client_resource *resource);
+
+static int is_outbound_transaction_resource(const struct client_resource *resource)
+{
+ return resource->release == release_transaction;
+}
+
static void schedule_iso_resource(struct iso_resource *r, unsigned long delay)
{
client_get(r->client);
@@ -146,13 +180,6 @@ static void schedule_iso_resource(struct iso_resource *r, unsigned long delay)
client_put(r->client);
}
-static void schedule_if_iso_resource(struct client_resource *resource)
-{
- if (resource->release == release_iso_resource)
- schedule_iso_resource(container_of(resource,
- struct iso_resource, resource), 0);
-}
-
/*
* dequeue_event() just kfree()'s the event, so the event has to be
* the first field in a struct XYZ_event.
@@ -269,7 +296,7 @@ static int fw_device_op_open(struct inode *inode, struct file *file)
client->device = device;
spin_lock_init(&client->lock);
- idr_init(&client->resource_idr);
+ xa_init_flags(&client->resource_xa, XA_FLAGS_ALLOC1 | XA_FLAGS_LOCK_BH);
INIT_LIST_HEAD(&client->event_list);
init_waitqueue_head(&client->wait);
init_waitqueue_head(&client->tx_flush_wait);
@@ -285,19 +312,17 @@ static int fw_device_op_open(struct inode *inode, struct file *file)
static void queue_event(struct client *client, struct event *event,
void *data0, size_t size0, void *data1, size_t size1)
{
- unsigned long flags;
-
event->v[0].data = data0;
event->v[0].size = size0;
event->v[1].data = data1;
event->v[1].size = size1;
- spin_lock_irqsave(&client->lock, flags);
- if (client->in_shutdown)
- kfree(event);
- else
- list_add_tail(&event->link, &client->event_list);
- spin_unlock_irqrestore(&client->lock, flags);
+ scoped_guard(spinlock_irqsave, &client->lock) {
+ if (client->in_shutdown)
+ kfree(event);
+ else
+ list_add_tail(&event->link, &client->event_list);
+ }
wake_up_interruptible(&client->wait);
}
@@ -319,10 +344,10 @@ static int dequeue_event(struct client *client,
fw_device_is_shutdown(client->device))
return -ENODEV;
- spin_lock_irq(&client->lock);
- event = list_first_entry(&client->event_list, struct event, link);
- list_del(&event->link);
- spin_unlock_irq(&client->lock);
+ scoped_guard(spinlock_irq, &client->lock) {
+ event = list_first_entry(&client->event_list, struct event, link);
+ list_del(&event->link);
+ }
total = 0;
for (i = 0; i < ARRAY_SIZE(event->v) && total < count; i++) {
@@ -354,7 +379,7 @@ static void fill_bus_reset_event(struct fw_cdev_event_bus_reset *event,
{
struct fw_card *card = client->device->card;
- spin_lock_irq(&card->lock);
+ guard(spinlock_irq)(&card->lock);
event->closure = client->bus_reset_closure;
event->type = FW_CDEV_EVENT_BUS_RESET;
@@ -364,8 +389,6 @@ static void fill_bus_reset_event(struct fw_cdev_event_bus_reset *event,
event->bm_node_id = card->bm_node_id;
event->irm_node_id = card->irm_node->node_id;
event->root_node_id = card->root_node->node_id;
-
- spin_unlock_irq(&card->lock);
}
static void for_each_client(struct fw_device *device,
@@ -373,22 +396,17 @@ static void for_each_client(struct fw_device *device,
{
struct client *c;
- mutex_lock(&device->client_list_mutex);
+ guard(mutex)(&device->client_list_mutex);
+
list_for_each_entry(c, &device->client_list, link)
callback(c);
- mutex_unlock(&device->client_list_mutex);
-}
-
-static int schedule_reallocations(int id, void *p, void *data)
-{
- schedule_if_iso_resource(p);
-
- return 0;
}
static void queue_bus_reset_event(struct client *client)
{
struct bus_reset_event *e;
+ struct client_resource *resource;
+ unsigned long index;
e = kzalloc(sizeof(*e), GFP_KERNEL);
if (e == NULL)
@@ -399,9 +417,12 @@ static void queue_bus_reset_event(struct client *client)
queue_event(client, &e->event,
&e->reset, sizeof(e->reset), NULL, 0);
- spin_lock_irq(&client->lock);
- idr_for_each(&client->resource_idr, schedule_reallocations, client);
- spin_unlock_irq(&client->lock);
+ guard(spinlock_irq)(&client->lock);
+
+ xa_for_each(&client->resource_xa, index, resource) {
+ if (is_iso_resource(resource))
+ schedule_iso_resource(to_iso_resource(resource), 0);
+ }
}
void fw_device_cdev_update(struct fw_device *device)
@@ -452,23 +473,20 @@ static int ioctl_get_info(struct client *client, union ioctl_arg *arg)
a->version = FW_CDEV_KERNEL_VERSION;
a->card = client->device->card->index;
- down_read(&fw_device_rwsem);
-
- if (a->rom != 0) {
- size_t want = a->rom_length;
- size_t have = client->device->config_rom_length * 4;
+ scoped_guard(rwsem_read, &fw_device_rwsem) {
+ if (a->rom != 0) {
+ size_t want = a->rom_length;
+ size_t have = client->device->config_rom_length * 4;
- ret = copy_to_user(u64_to_uptr(a->rom),
- client->device->config_rom, min(want, have));
+ ret = copy_to_user(u64_to_uptr(a->rom), client->device->config_rom,
+ min(want, have));
+ if (ret != 0)
+ return -EFAULT;
+ }
+ a->rom_length = client->device->config_rom_length * 4;
}
- a->rom_length = client->device->config_rom_length * 4;
-
- up_read(&fw_device_rwsem);
- if (ret != 0)
- return -EFAULT;
-
- mutex_lock(&client->device->client_list_mutex);
+ guard(mutex)(&client->device->client_list_mutex);
client->bus_reset_closure = a->bus_reset_closure;
if (a->bus_reset != 0) {
@@ -479,37 +497,36 @@ static int ioctl_get_info(struct client *client, union ioctl_arg *arg)
if (ret == 0 && list_empty(&client->link))
list_add_tail(&client->link, &client->device->client_list);
- mutex_unlock(&client->device->client_list_mutex);
-
return ret ? -EFAULT : 0;
}
-static int add_client_resource(struct client *client,
- struct client_resource *resource, gfp_t gfp_mask)
+static int add_client_resource(struct client *client, struct client_resource *resource,
+ gfp_t gfp_mask)
{
- bool preload = gfpflags_allow_blocking(gfp_mask);
- unsigned long flags;
int ret;
- if (preload)
- idr_preload(gfp_mask);
- spin_lock_irqsave(&client->lock, flags);
+ scoped_guard(spinlock_irqsave, &client->lock) {
+ u32 index;
- if (client->in_shutdown)
- ret = -ECANCELED;
- else
- ret = idr_alloc(&client->resource_idr, resource, 0, 0,
- GFP_NOWAIT);
- if (ret >= 0) {
- resource->handle = ret;
- client_get(client);
- schedule_if_iso_resource(resource);
+ if (client->in_shutdown) {
+ ret = -ECANCELED;
+ } else {
+ if (gfpflags_allow_blocking(gfp_mask)) {
+ ret = xa_alloc(&client->resource_xa, &index, resource, xa_limit_32b,
+ GFP_NOWAIT);
+ } else {
+ ret = xa_alloc_bh(&client->resource_xa, &index, resource,
+ xa_limit_32b, GFP_NOWAIT);
+ }
+ }
+ if (ret >= 0) {
+ resource->handle = index;
+ client_get(client);
+ if (is_iso_resource(resource))
+ schedule_iso_resource(to_iso_resource(resource), 0);
+ }
}
- spin_unlock_irqrestore(&client->lock, flags);
- if (preload)
- idr_preload_end();
-
return ret < 0 ? ret : 0;
}
@@ -517,19 +534,19 @@ static int release_client_resource(struct client *client, u32 handle,
client_resource_release_fn_t release,
struct client_resource **return_resource)
{
+ unsigned long index = handle;
struct client_resource *resource;
- spin_lock_irq(&client->lock);
- if (client->in_shutdown)
- resource = NULL;
- else
- resource = idr_find(&client->resource_idr, handle);
- if (resource && resource->release == release)
- idr_remove(&client->resource_idr, handle);
- spin_unlock_irq(&client->lock);
+ scoped_guard(spinlock_irq, &client->lock) {
+ if (client->in_shutdown)
+ return -EINVAL;
- if (!(resource && resource->release == release))
- return -EINVAL;
+ resource = xa_load(&client->resource_xa, index);
+ if (!resource || resource->release != release)
+ return -EINVAL;
+
+ xa_erase(&client->resource_xa, handle);
+ }
if (return_resource)
*return_resource = resource;
@@ -551,13 +568,13 @@ static void complete_transaction(struct fw_card *card, int rcode, u32 request_ts
{
struct outbound_transaction_event *e = data;
struct client *client = e->client;
- unsigned long flags;
+ unsigned long index = e->r.resource.handle;
- spin_lock_irqsave(&client->lock, flags);
- idr_remove(&client->resource_idr, e->r.resource.handle);
- if (client->in_shutdown)
- wake_up(&client->tx_flush_wait);
- spin_unlock_irqrestore(&client->lock, flags);
+ scoped_guard(spinlock_irqsave, &client->lock) {
+ xa_erase(&client->resource_xa, index);
+ if (client->in_shutdown)
+ wake_up(&client->tx_flush_wait);
+ }
switch (e->rsp.without_tstamp.type) {
case FW_CDEV_EVENT_RESPONSE:
@@ -599,13 +616,13 @@ static void complete_transaction(struct fw_card *card, int rcode, u32 request_ts
queue_event(client, &e->event, rsp, sizeof(*rsp) + rsp->length, NULL, 0);
break;
+ }
default:
WARN_ON(1);
break;
}
- }
- /* Drop the idr's reference */
+ // Drop the xarray's reference.
client_put(client);
}
@@ -693,8 +710,7 @@ static int ioctl_send_request(struct client *client, union ioctl_arg *arg)
static void release_request(struct client *client,
struct client_resource *resource)
{
- struct inbound_transaction_resource *r = container_of(resource,
- struct inbound_transaction_resource, resource);
+ struct inbound_transaction_resource *r = to_inbound_transaction_resource(resource);
if (r->is_fcp)
fw_request_put(r->request);
@@ -804,8 +820,7 @@ static void handle_request(struct fw_card *card, struct fw_request *request,
static void release_address_handler(struct client *client,
struct client_resource *resource)
{
- struct address_handler_resource *r =
- container_of(resource, struct address_handler_resource, resource);
+ struct address_handler_resource *r = to_address_handler_resource(resource);
fw_core_remove_address_handler(&r->handler);
kfree(r);
@@ -869,8 +884,7 @@ static int ioctl_send_response(struct client *client, union ioctl_arg *arg)
release_request, &resource) < 0)
return -EINVAL;
- r = container_of(resource, struct inbound_transaction_resource,
- resource);
+ r = to_inbound_transaction_resource(resource);
if (r->is_fcp) {
fw_request_put(r->request);
goto out;
@@ -904,8 +918,7 @@ static int ioctl_initiate_bus_reset(struct client *client, union ioctl_arg *arg)
static void release_descriptor(struct client *client,
struct client_resource *resource)
{
- struct descriptor_resource *r =
- container_of(resource, struct descriptor_resource, resource);
+ struct descriptor_resource *r = to_descriptor_resource(resource);
fw_core_remove_descriptor(&r->descriptor);
kfree(r);
@@ -969,7 +982,7 @@ static void iso_callback(struct fw_iso_context *context, u32 cycle,
struct client *client = data;
struct iso_interrupt_event *e;
- e = kmalloc(sizeof(*e) + header_length, GFP_ATOMIC);
+ e = kmalloc(sizeof(*e) + header_length, GFP_KERNEL);
if (e == NULL)
return;
@@ -988,7 +1001,7 @@ static void iso_mc_callback(struct fw_iso_context *context,
struct client *client = data;
struct iso_interrupt_mc_event *e;
- e = kmalloc(sizeof(*e), GFP_ATOMIC);
+ e = kmalloc(sizeof(*e), GFP_KERNEL);
if (e == NULL)
return;
@@ -1070,10 +1083,10 @@ static int ioctl_create_iso_context(struct client *client, union ioctl_arg *arg)
if (client->version < FW_CDEV_VERSION_AUTO_FLUSH_ISO_OVERFLOW)
context->drop_overflow_headers = true;
- /* We only support one context at this time. */
- spin_lock_irq(&client->lock);
+ // We only support one context at this time.
+ guard(spinlock_irq)(&client->lock);
+
if (client->iso_context != NULL) {
- spin_unlock_irq(&client->lock);
fw_iso_context_destroy(context);
return -EBUSY;
@@ -1083,7 +1096,6 @@ static int ioctl_create_iso_context(struct client *client, union ioctl_arg *arg)
client->device->card,
iso_dma_direction(context));
if (ret < 0) {
- spin_unlock_irq(&client->lock);
fw_iso_context_destroy(context);
return ret;
@@ -1092,7 +1104,6 @@ static int ioctl_create_iso_context(struct client *client, union ioctl_arg *arg)
}
client->iso_closure = a->closure;
client->iso_context = context;
- spin_unlock_irq(&client->lock);
a->handle = 0;
@@ -1266,29 +1277,27 @@ static int ioctl_get_cycle_timer2(struct client *client, union ioctl_arg *arg)
struct fw_card *card = client->device->card;
struct timespec64 ts = {0, 0};
u32 cycle_time = 0;
- int ret = 0;
+ int ret;
- local_irq_disable();
+ guard(irq)();
ret = fw_card_read_cycle_time(card, &cycle_time);
if (ret < 0)
- goto end;
+ return ret;
switch (a->clk_id) {
case CLOCK_REALTIME: ktime_get_real_ts64(&ts); break;
case CLOCK_MONOTONIC: ktime_get_ts64(&ts); break;
case CLOCK_MONOTONIC_RAW: ktime_get_raw_ts64(&ts); break;
default:
- ret = -EINVAL;
+ return -EINVAL;
}
-end:
- local_irq_enable();
a->tv_sec = ts.tv_sec;
a->tv_nsec = ts.tv_nsec;
a->cycle_timer = cycle_time;
- return ret;
+ return 0;
}
static int ioctl_get_cycle_timer(struct client *client, union ioctl_arg *arg)
@@ -1311,28 +1320,28 @@ static void iso_resource_work(struct work_struct *work)
struct iso_resource *r =
container_of(work, struct iso_resource, work.work);
struct client *client = r->client;
+ unsigned long index = r->resource.handle;
int generation, channel, bandwidth, todo;
bool skip, free, success;
- spin_lock_irq(&client->lock);
- generation = client->device->generation;
- todo = r->todo;
- /* Allow 1000ms grace period for other reallocations. */
- if (todo == ISO_RES_ALLOC &&
- time_before64(get_jiffies_64(),
- client->device->card->reset_jiffies + HZ)) {
- schedule_iso_resource(r, DIV_ROUND_UP(HZ, 3));
- skip = true;
- } else {
- /* We could be called twice within the same generation. */
- skip = todo == ISO_RES_REALLOC &&
- r->generation == generation;
+ scoped_guard(spinlock_irq, &client->lock) {
+ generation = client->device->generation;
+ todo = r->todo;
+ // Allow 1000ms grace period for other reallocations.
+ if (todo == ISO_RES_ALLOC &&
+ time_before64(get_jiffies_64(), client->device->card->reset_jiffies + HZ)) {
+ schedule_iso_resource(r, DIV_ROUND_UP(HZ, 3));
+ skip = true;
+ } else {
+ // We could be called twice within the same generation.
+ skip = todo == ISO_RES_REALLOC &&
+ r->generation == generation;
+ }
+ free = todo == ISO_RES_DEALLOC ||
+ todo == ISO_RES_ALLOC_ONCE ||
+ todo == ISO_RES_DEALLOC_ONCE;
+ r->generation = generation;
}
- free = todo == ISO_RES_DEALLOC ||
- todo == ISO_RES_ALLOC_ONCE ||
- todo == ISO_RES_DEALLOC_ONCE;
- r->generation = generation;
- spin_unlock_irq(&client->lock);
if (skip)
goto out;
@@ -1346,7 +1355,7 @@ static void iso_resource_work(struct work_struct *work)
todo == ISO_RES_ALLOC_ONCE);
/*
* Is this generation outdated already? As long as this resource sticks
- * in the idr, it will be scheduled again for a newer generation or at
+ * in the xarray, it will be scheduled again for a newer generation or at
* shutdown.
*/
if (channel == -EAGAIN &&
@@ -1355,24 +1364,20 @@ static void iso_resource_work(struct work_struct *work)
success = channel >= 0 || bandwidth > 0;
- spin_lock_irq(&client->lock);
- /*
- * Transit from allocation to reallocation, except if the client
- * requested deallocation in the meantime.
- */
- if (r->todo == ISO_RES_ALLOC)
- r->todo = ISO_RES_REALLOC;
- /*
- * Allocation or reallocation failure? Pull this resource out of the
- * idr and prepare for deletion, unless the client is shutting down.
- */
- if (r->todo == ISO_RES_REALLOC && !success &&
- !client->in_shutdown &&
- idr_remove(&client->resource_idr, r->resource.handle)) {
- client_put(client);
- free = true;
+ scoped_guard(spinlock_irq, &client->lock) {
+ // Transit from allocation to reallocation, except if the client
+ // requested deallocation in the meantime.
+ if (r->todo == ISO_RES_ALLOC)
+ r->todo = ISO_RES_REALLOC;
+ // Allocation or reallocation failure? Pull this resource out of the
+ // xarray and prepare for deletion, unless the client is shutting down.
+ if (r->todo == ISO_RES_REALLOC && !success &&
+ !client->in_shutdown &&
+ xa_erase(&client->resource_xa, index)) {
+ client_put(client);
+ free = true;
+ }
}
- spin_unlock_irq(&client->lock);
if (todo == ISO_RES_ALLOC && channel >= 0)
r->channels = 1ULL << channel;
@@ -1407,13 +1412,12 @@ static void iso_resource_work(struct work_struct *work)
static void release_iso_resource(struct client *client,
struct client_resource *resource)
{
- struct iso_resource *r =
- container_of(resource, struct iso_resource, resource);
+ struct iso_resource *r = to_iso_resource(resource);
+
+ guard(spinlock_irq)(&client->lock);
- spin_lock_irq(&client->lock);
r->todo = ISO_RES_DEALLOC;
schedule_iso_resource(r, 0);
- spin_unlock_irq(&client->lock);
}
static int init_iso_resource(struct client *client,
@@ -1635,7 +1639,7 @@ static int ioctl_send_phy_packet(struct client *client, union ioctl_arg *arg)
e->client = client;
e->p.speed = SCODE_100;
e->p.generation = a->generation;
- e->p.header[0] = TCODE_LINK_INTERNAL << 4;
+ async_header_set_tcode(e->p.header, TCODE_LINK_INTERNAL);
e->p.header[1] = a->data[0];
e->p.header[2] = a->data[1];
e->p.header_length = 12;
@@ -1676,26 +1680,22 @@ static int ioctl_receive_phy_packets(struct client *client, union ioctl_arg *arg
if (!client->device->is_local)
return -ENOSYS;
- spin_lock_irq(&card->lock);
+ guard(spinlock_irq)(&card->lock);
list_move_tail(&client->phy_receiver_link, &card->phy_receiver_list);
client->phy_receiver_closure = a->closure;
- spin_unlock_irq(&card->lock);
-
return 0;
}
void fw_cdev_handle_phy_packet(struct fw_card *card, struct fw_packet *p)
{
struct client *client;
- struct inbound_phy_packet_event *e;
- unsigned long flags;
- spin_lock_irqsave(&card->lock, flags);
+ guard(spinlock_irqsave)(&card->lock);
list_for_each_entry(client, &card->phy_receiver_list, phy_receiver_link) {
- e = kmalloc(sizeof(*e) + 8, GFP_ATOMIC);
+ struct inbound_phy_packet_event *e = kmalloc(sizeof(*e) + 8, GFP_ATOMIC);
if (e == NULL)
break;
@@ -1723,8 +1723,6 @@ void fw_cdev_handle_phy_packet(struct fw_card *card, struct fw_packet *p)
queue_event(client, &e->event, &e->phy_packet, sizeof(*pp) + 8, NULL, 0);
}
}
-
- spin_unlock_irqrestore(&card->lock, flags);
}
static int (* const ioctl_handlers[])(struct client *, union ioctl_arg *) = {
@@ -1821,16 +1819,15 @@ static int fw_device_op_mmap(struct file *file, struct vm_area_struct *vma)
if (ret < 0)
return ret;
- spin_lock_irq(&client->lock);
- if (client->iso_context) {
- ret = fw_iso_buffer_map_dma(&client->buffer,
- client->device->card,
- iso_dma_direction(client->iso_context));
- client->buffer_is_mapped = (ret == 0);
+ scoped_guard(spinlock_irq, &client->lock) {
+ if (client->iso_context) {
+ ret = fw_iso_buffer_map_dma(&client->buffer, client->device->card,
+ iso_dma_direction(client->iso_context));
+ if (ret < 0)
+ goto fail;
+ client->buffer_is_mapped = true;
+ }
}
- spin_unlock_irq(&client->lock);
- if (ret < 0)
- goto fail;
ret = vm_map_pages_zero(vma, client->buffer.pages,
client->buffer.page_count);
@@ -1843,48 +1840,33 @@ static int fw_device_op_mmap(struct file *file, struct vm_area_struct *vma)
return ret;
}
-static int is_outbound_transaction_resource(int id, void *p, void *data)
+static bool has_outbound_transactions(struct client *client)
{
- struct client_resource *resource = p;
-
- return resource->release == release_transaction;
-}
-
-static int has_outbound_transactions(struct client *client)
-{
- int ret;
-
- spin_lock_irq(&client->lock);
- ret = idr_for_each(&client->resource_idr,
- is_outbound_transaction_resource, NULL);
- spin_unlock_irq(&client->lock);
+ struct client_resource *resource;
+ unsigned long index;
- return ret;
-}
+ guard(spinlock_irq)(&client->lock);
-static int shutdown_resource(int id, void *p, void *data)
-{
- struct client_resource *resource = p;
- struct client *client = data;
-
- resource->release(client, resource);
- client_put(client);
+ xa_for_each(&client->resource_xa, index, resource) {
+ if (is_outbound_transaction_resource(resource))
+ return true;
+ }
- return 0;
+ return false;
}
static int fw_device_op_release(struct inode *inode, struct file *file)
{
struct client *client = file->private_data;
struct event *event, *next_event;
+ struct client_resource *resource;
+ unsigned long index;
- spin_lock_irq(&client->device->card->lock);
- list_del(&client->phy_receiver_link);
- spin_unlock_irq(&client->device->card->lock);
+ scoped_guard(spinlock_irq, &client->device->card->lock)
+ list_del(&client->phy_receiver_link);
- mutex_lock(&client->device->client_list_mutex);
- list_del(&client->link);
- mutex_unlock(&client->device->client_list_mutex);
+ scoped_guard(mutex, &client->device->client_list_mutex)
+ list_del(&client->link);
if (client->iso_context)
fw_iso_context_destroy(client->iso_context);
@@ -1892,15 +1874,17 @@ static int fw_device_op_release(struct inode *inode, struct file *file)
if (client->buffer.pages)
fw_iso_buffer_destroy(&client->buffer, client->device->card);
- /* Freeze client->resource_idr and client->event_list */
- spin_lock_irq(&client->lock);
- client->in_shutdown = true;
- spin_unlock_irq(&client->lock);
+ // Freeze client->resource_xa and client->event_list.
+ scoped_guard(spinlock_irq, &client->lock)
+ client->in_shutdown = true;
wait_event(client->tx_flush_wait, !has_outbound_transactions(client));
- idr_for_each(&client->resource_idr, shutdown_resource, client);
- idr_destroy(&client->resource_idr);
+ xa_for_each(&client->resource_xa, index, resource) {
+ resource->release(client, resource);
+ client_put(client);
+ }
+ xa_destroy(&client->resource_xa);
list_for_each_entry_safe(event, next_event, &client->event_list, link)
kfree(event);
@@ -1927,7 +1911,6 @@ static __poll_t fw_device_op_poll(struct file *file, poll_table * pt)
const struct file_operations fw_device_ops = {
.owner = THIS_MODULE,
- .llseek = no_llseek,
.open = fw_device_op_open,
.read = fw_device_op_read,
.unlocked_ioctl = fw_device_op_ioctl,
diff --git a/drivers/firewire/core-device.c b/drivers/firewire/core-device.c
index 00e9a13e6c45..a99fe35f1f0d 100644
--- a/drivers/firewire/core-device.c
+++ b/drivers/firewire/core-device.c
@@ -12,7 +12,6 @@
#include <linux/errno.h>
#include <linux/firewire.h>
#include <linux/firewire-constants.h>
-#include <linux/idr.h>
#include <linux/jiffies.h>
#include <linux/kobject.h>
#include <linux/list.h>
@@ -288,7 +287,7 @@ static ssize_t show_immediate(struct device *dev,
const u32 *directories[] = {NULL, NULL};
int i, value = -1;
- down_read(&fw_device_rwsem);
+ guard(rwsem_read)(&fw_device_rwsem);
if (is_fw_unit(dev)) {
directories[0] = fw_unit(dev)->directory;
@@ -317,8 +316,6 @@ static ssize_t show_immediate(struct device *dev,
}
}
- up_read(&fw_device_rwsem);
-
if (value < 0)
return -ENOENT;
@@ -339,7 +336,7 @@ static ssize_t show_text_leaf(struct device *dev,
char dummy_buf[2];
int i, ret = -ENOENT;
- down_read(&fw_device_rwsem);
+ guard(rwsem_read)(&fw_device_rwsem);
if (is_fw_unit(dev)) {
directories[0] = fw_unit(dev)->directory;
@@ -382,15 +379,14 @@ static ssize_t show_text_leaf(struct device *dev,
}
}
- if (ret >= 0) {
- /* Strip trailing whitespace and add newline. */
- while (ret > 0 && isspace(buf[ret - 1]))
- ret--;
- strcpy(buf + ret, "\n");
- ret++;
- }
+ if (ret < 0)
+ return ret;
- up_read(&fw_device_rwsem);
+ // Strip trailing whitespace and add newline.
+ while (ret > 0 && isspace(buf[ret - 1]))
+ ret--;
+ strcpy(buf + ret, "\n");
+ ret++;
return ret;
}
@@ -466,10 +462,10 @@ static ssize_t config_rom_show(struct device *dev,
struct fw_device *device = fw_device(dev);
size_t length;
- down_read(&fw_device_rwsem);
+ guard(rwsem_read)(&fw_device_rwsem);
+
length = device->config_rom_length * 4;
memcpy(buf, device->config_rom, length);
- up_read(&fw_device_rwsem);
return length;
}
@@ -478,13 +474,10 @@ static ssize_t guid_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct fw_device *device = fw_device(dev);
- int ret;
- down_read(&fw_device_rwsem);
- ret = sysfs_emit(buf, "0x%08x%08x\n", device->config_rom[3], device->config_rom[4]);
- up_read(&fw_device_rwsem);
+ guard(rwsem_read)(&fw_device_rwsem);
- return ret;
+ return sysfs_emit(buf, "0x%08x%08x\n", device->config_rom[3], device->config_rom[4]);
}
static ssize_t is_local_show(struct device *dev,
@@ -524,7 +517,8 @@ static ssize_t units_show(struct device *dev,
struct fw_csr_iterator ci;
int key, value, i = 0;
- down_read(&fw_device_rwsem);
+ guard(rwsem_read)(&fw_device_rwsem);
+
fw_csr_iterator_init(&ci, &device->config_rom[ROOT_DIR_OFFSET]);
while (fw_csr_iterator_next(&ci, &key, &value)) {
if (key != (CSR_UNIT | CSR_DIRECTORY))
@@ -533,7 +527,6 @@ static ssize_t units_show(struct device *dev,
if (i >= PAGE_SIZE - (8 + 1 + 8 + 1))
break;
}
- up_read(&fw_device_rwsem);
if (i)
buf[i - 1] = '\n';
@@ -571,7 +564,8 @@ static int read_rom(struct fw_device *device,
return rcode;
}
-#define MAX_CONFIG_ROM_SIZE 256
+// By quadlet unit.
+#define MAX_CONFIG_ROM_SIZE ((CSR_CONFIG_ROM_END - CSR_CONFIG_ROM) / sizeof(u32))
/*
* Read the bus info block, perform a speed probe, and read all of the rest of
@@ -729,10 +723,10 @@ static int read_config_rom(struct fw_device *device, int generation)
goto out;
}
- down_write(&fw_device_rwsem);
- device->config_rom = new_rom;
- device->config_rom_length = length;
- up_write(&fw_device_rwsem);
+ scoped_guard(rwsem_write, &fw_device_rwsem) {
+ device->config_rom = new_rom;
+ device->config_rom_length = length;
+ }
kfree(old_rom);
ret = RCODE_COMPLETE;
@@ -813,24 +807,21 @@ static int shutdown_unit(struct device *device, void *data)
/*
* fw_device_rwsem acts as dual purpose mutex:
- * - serializes accesses to fw_device_idr,
* - serializes accesses to fw_device.config_rom/.config_rom_length and
* fw_unit.directory, unless those accesses happen at safe occasions
*/
DECLARE_RWSEM(fw_device_rwsem);
-DEFINE_IDR(fw_device_idr);
+DEFINE_XARRAY_ALLOC(fw_device_xa);
int fw_cdev_major;
struct fw_device *fw_device_get_by_devt(dev_t devt)
{
struct fw_device *device;
- down_read(&fw_device_rwsem);
- device = idr_find(&fw_device_idr, MINOR(devt));
+ device = xa_load(&fw_device_xa, MINOR(devt));
if (device)
fw_device_get(device);
- up_read(&fw_device_rwsem);
return device;
}
@@ -864,7 +855,6 @@ static void fw_device_shutdown(struct work_struct *work)
{
struct fw_device *device =
container_of(work, struct fw_device, work.work);
- int minor = MINOR(device->device.devt);
if (time_before64(get_jiffies_64(),
device->card->reset_jiffies + SHUTDOWN_DELAY)
@@ -882,9 +872,7 @@ static void fw_device_shutdown(struct work_struct *work)
device_for_each_child(&device->device, NULL, shutdown_unit);
device_unregister(&device->device);
- down_write(&fw_device_rwsem);
- idr_remove(&fw_device_idr, minor);
- up_write(&fw_device_rwsem);
+ xa_erase(&fw_device_xa, MINOR(device->device.devt));
fw_device_put(device);
}
@@ -893,16 +881,14 @@ static void fw_device_release(struct device *dev)
{
struct fw_device *device = fw_device(dev);
struct fw_card *card = device->card;
- unsigned long flags;
/*
* Take the card lock so we don't set this to NULL while a
* FW_NODE_UPDATED callback is being handled or while the
* bus manager work looks at this node.
*/
- spin_lock_irqsave(&card->lock, flags);
- device->node->data = NULL;
- spin_unlock_irqrestore(&card->lock, flags);
+ scoped_guard(spinlock_irqsave, &card->lock)
+ device->node->data = NULL;
fw_node_put(device->node);
kfree(device->config_rom);
@@ -942,59 +928,6 @@ static void fw_device_update(struct work_struct *work)
device_for_each_child(&device->device, NULL, update_unit);
}
-/*
- * If a device was pending for deletion because its node went away but its
- * bus info block and root directory header matches that of a newly discovered
- * device, revive the existing fw_device.
- * The newly allocated fw_device becomes obsolete instead.
- */
-static int lookup_existing_device(struct device *dev, void *data)
-{
- struct fw_device *old = fw_device(dev);
- struct fw_device *new = data;
- struct fw_card *card = new->card;
- int match = 0;
-
- if (!is_fw_device(dev))
- return 0;
-
- down_read(&fw_device_rwsem); /* serialize config_rom access */
- spin_lock_irq(&card->lock); /* serialize node access */
-
- if (memcmp(old->config_rom, new->config_rom, 6 * 4) == 0 &&
- atomic_cmpxchg(&old->state,
- FW_DEVICE_GONE,
- FW_DEVICE_RUNNING) == FW_DEVICE_GONE) {
- struct fw_node *current_node = new->node;
- struct fw_node *obsolete_node = old->node;
-
- new->node = obsolete_node;
- new->node->data = new;
- old->node = current_node;
- old->node->data = old;
-
- old->max_speed = new->max_speed;
- old->node_id = current_node->node_id;
- smp_wmb(); /* update node_id before generation */
- old->generation = card->generation;
- old->config_rom_retries = 0;
- fw_notice(card, "rediscovered device %s\n", dev_name(dev));
-
- old->workfn = fw_device_update;
- fw_schedule_device_work(old, 0);
-
- if (current_node == card->root_node)
- fw_schedule_bm_work(card, 0);
-
- match = 1;
- }
-
- spin_unlock_irq(&card->lock);
- up_read(&fw_device_rwsem);
-
- return match;
-}
-
enum { BC_UNKNOWN = 0, BC_UNIMPLEMENTED, BC_IMPLEMENTED, };
static void set_broadcast_channel(struct fw_device *device, int generation)
@@ -1055,13 +988,26 @@ int fw_device_set_broadcast_channel(struct device *dev, void *gen)
return 0;
}
+static int compare_configuration_rom(struct device *dev, void *data)
+{
+ const struct fw_device *old = fw_device(dev);
+ const u32 *config_rom = data;
+
+ if (!is_fw_device(dev))
+ return 0;
+
+ // Compare the bus information block and root_length/root_crc.
+ return !memcmp(old->config_rom, config_rom, 6 * 4);
+}
+
static void fw_device_init(struct work_struct *work)
{
struct fw_device *device =
container_of(work, struct fw_device, work.work);
struct fw_card *card = device->card;
- struct device *revived_dev;
- int minor, ret;
+ struct device *found;
+ u32 minor;
+ int ret;
/*
* All failure paths here set node->data to NULL, so that we
@@ -1087,24 +1033,62 @@ static void fw_device_init(struct work_struct *work)
return;
}
- revived_dev = device_find_child(card->device,
- device, lookup_existing_device);
- if (revived_dev) {
- put_device(revived_dev);
- fw_device_release(&device->device);
+ // If a device was pending for deletion because its node went away but its bus info block
+ // and root directory header matches that of a newly discovered device, revive the
+ // existing fw_device. The newly allocated fw_device becomes obsolete instead.
+ //
+ // serialize config_rom access.
+ scoped_guard(rwsem_read, &fw_device_rwsem) {
+ found = device_find_child(card->device, (void *)device->config_rom,
+ compare_configuration_rom);
+ }
+ if (found) {
+ struct fw_device *reused = fw_device(found);
+
+ if (atomic_cmpxchg(&reused->state,
+ FW_DEVICE_GONE,
+ FW_DEVICE_RUNNING) == FW_DEVICE_GONE) {
+ // serialize node access
+ scoped_guard(spinlock_irq, &card->lock) {
+ struct fw_node *current_node = device->node;
+ struct fw_node *obsolete_node = reused->node;
+
+ device->node = obsolete_node;
+ device->node->data = device;
+ reused->node = current_node;
+ reused->node->data = reused;
+
+ reused->max_speed = device->max_speed;
+ reused->node_id = current_node->node_id;
+ smp_wmb(); /* update node_id before generation */
+ reused->generation = card->generation;
+ reused->config_rom_retries = 0;
+ fw_notice(card, "rediscovered device %s\n",
+ dev_name(found));
+
+ reused->workfn = fw_device_update;
+ fw_schedule_device_work(reused, 0);
+
+ if (current_node == card->root_node)
+ fw_schedule_bm_work(card, 0);
+ }
- return;
+ put_device(found);
+ fw_device_release(&device->device);
+
+ return;
+ }
+
+ put_device(found);
}
device_initialize(&device->device);
fw_device_get(device);
- down_write(&fw_device_rwsem);
- minor = idr_alloc(&fw_device_idr, device, 0, 1 << MINORBITS,
- GFP_KERNEL);
- up_write(&fw_device_rwsem);
- if (minor < 0)
+ // The index of allocated entry is used for minor identifier of device node.
+ ret = xa_alloc(&fw_device_xa, &minor, device, XA_LIMIT(0, MINORMASK), GFP_KERNEL);
+ if (ret < 0)
goto error;
device->device.bus = &fw_bus_type;
@@ -1165,11 +1149,9 @@ static void fw_device_init(struct work_struct *work)
return;
error_with_cdev:
- down_write(&fw_device_rwsem);
- idr_remove(&fw_device_idr, minor);
- up_write(&fw_device_rwsem);
+ xa_erase(&fw_device_xa, minor);
error:
- fw_device_put(device); /* fw_device_idr's reference */
+ fw_device_put(device); // fw_device_xa's reference.
put_device(&device->device); /* our reference */
}
diff --git a/drivers/firewire/core-iso.c b/drivers/firewire/core-iso.c
index b3eda38a36f3..a67493862c85 100644
--- a/drivers/firewire/core-iso.c
+++ b/drivers/firewire/core-iso.c
@@ -209,23 +209,63 @@ void fw_iso_context_queue_flush(struct fw_iso_context *ctx)
}
EXPORT_SYMBOL(fw_iso_context_queue_flush);
+/**
+ * fw_iso_context_flush_completions() - process isochronous context in current process context.
+ * @ctx: the isochronous context
+ *
+ * Process the isochronous context in the current process context. The registered callback function
+ * is called when a queued packet buffer with the interrupt flag is completed, either after
+ * transmission in the IT context or after being filled in the IR context. Additionally, the
+ * callback function is also called for the packet buffer completed at last. Furthermore, the
+ * callback function is called as well when the header buffer in the context becomes full. If it is
+ * required to process the context asynchronously, fw_iso_context_schedule_flush_completions() is
+ * available instead.
+ *
+ * Context: Process context. May sleep due to disable_work_sync().
+ */
int fw_iso_context_flush_completions(struct fw_iso_context *ctx)
{
+ int err;
+
trace_isoc_outbound_flush_completions(ctx);
trace_isoc_inbound_single_flush_completions(ctx);
trace_isoc_inbound_multiple_flush_completions(ctx);
- return ctx->card->driver->flush_iso_completions(ctx);
+ might_sleep();
+
+ // Avoid dead lock due to programming mistake.
+ if (WARN_ON_ONCE(current_work() == &ctx->work))
+ return 0;
+
+ disable_work_sync(&ctx->work);
+
+ err = ctx->card->driver->flush_iso_completions(ctx);
+
+ enable_work(&ctx->work);
+
+ return err;
}
EXPORT_SYMBOL(fw_iso_context_flush_completions);
int fw_iso_context_stop(struct fw_iso_context *ctx)
{
+ int err;
+
trace_isoc_outbound_stop(ctx);
trace_isoc_inbound_single_stop(ctx);
trace_isoc_inbound_multiple_stop(ctx);
- return ctx->card->driver->stop_iso(ctx);
+ might_sleep();
+
+ // Avoid dead lock due to programming mistake.
+ if (WARN_ON_ONCE(current_work() == &ctx->work))
+ return 0;
+
+ err = ctx->card->driver->stop_iso(ctx);
+
+ cancel_work_sync(&ctx->work);
+
+ return err;
}
EXPORT_SYMBOL(fw_iso_context_stop);
@@ -375,9 +415,8 @@ void fw_iso_resource_manage(struct fw_card *card, int generation,
u32 channels_lo = channels_mask >> 32; /* channels 63...32 */
int irm_id, ret, c = -EINVAL;
- spin_lock_irq(&card->lock);
- irm_id = card->irm_node->node_id;
- spin_unlock_irq(&card->lock);
+ scoped_guard(spinlock_irq, &card->lock)
+ irm_id = card->irm_node->node_id;
if (channels_hi)
c = manage_channel(card, irm_id, generation, channels_hi,
diff --git a/drivers/firewire/core-topology.c b/drivers/firewire/core-topology.c
index b4e637aa6932..6adadb11962e 100644
--- a/drivers/firewire/core-topology.c
+++ b/drivers/firewire/core-topology.c
@@ -39,7 +39,7 @@ static struct fw_node *fw_node_create(u32 sid, int port_count, int color)
node->initiated_reset = phy_packet_self_id_zero_get_initiated_reset(sid);
node->port_count = port_count;
- refcount_set(&node->ref_count, 1);
+ kref_init(&node->kref);
INIT_LIST_HEAD(&node->link);
return node;
@@ -455,11 +455,10 @@ void fw_core_handle_bus_reset(struct fw_card *card, int node_id, int generation,
int self_id_count, u32 *self_ids, bool bm_abdicate)
{
struct fw_node *local_node;
- unsigned long flags;
trace_bus_reset_handle(card->index, generation, node_id, bm_abdicate, self_ids, self_id_count);
- spin_lock_irqsave(&card->lock, flags);
+ guard(spinlock_irqsave)(&card->lock);
/*
* If the selfID buffer is not the immediate successor of the
@@ -500,7 +499,5 @@ void fw_core_handle_bus_reset(struct fw_card *card, int node_id, int generation,
} else {
update_tree(card, local_node);
}
-
- spin_unlock_irqrestore(&card->lock, flags);
}
EXPORT_SYMBOL(fw_core_handle_bus_reset);
diff --git a/drivers/firewire/core-transaction.c b/drivers/firewire/core-transaction.c
index 4d2fc1f31fec..e141d24a7644 100644
--- a/drivers/firewire/core-transaction.c
+++ b/drivers/firewire/core-transaction.c
@@ -13,7 +13,6 @@
#include <linux/firewire-constants.h>
#include <linux/fs.h>
#include <linux/init.h>
-#include <linux/idr.h>
#include <linux/jiffies.h>
#include <linux/kernel.h>
#include <linux/list.h>
@@ -49,35 +48,31 @@ static int close_transaction(struct fw_transaction *transaction, struct fw_card
u32 response_tstamp)
{
struct fw_transaction *t = NULL, *iter;
- unsigned long flags;
- spin_lock_irqsave(&card->lock, flags);
- list_for_each_entry(iter, &card->transaction_list, link) {
- if (iter == transaction) {
- if (!try_cancel_split_timeout(iter)) {
- spin_unlock_irqrestore(&card->lock, flags);
- goto timed_out;
+ scoped_guard(spinlock_irqsave, &card->lock) {
+ list_for_each_entry(iter, &card->transaction_list, link) {
+ if (iter == transaction) {
+ if (try_cancel_split_timeout(iter)) {
+ list_del_init(&iter->link);
+ card->tlabel_mask &= ~(1ULL << iter->tlabel);
+ t = iter;
+ }
+ break;
}
- list_del_init(&iter->link);
- card->tlabel_mask &= ~(1ULL << iter->tlabel);
- t = iter;
- break;
}
}
- spin_unlock_irqrestore(&card->lock, flags);
- if (t) {
- if (!t->with_tstamp) {
- t->callback.without_tstamp(card, rcode, NULL, 0, t->callback_data);
- } else {
- t->callback.with_tstamp(card, rcode, t->packet.timestamp, response_tstamp,
- NULL, 0, t->callback_data);
- }
- return 0;
+ if (!t)
+ return -ENOENT;
+
+ if (!t->with_tstamp) {
+ t->callback.without_tstamp(card, rcode, NULL, 0, t->callback_data);
+ } else {
+ t->callback.with_tstamp(card, rcode, t->packet.timestamp, response_tstamp, NULL, 0,
+ t->callback_data);
}
- timed_out:
- return -ENOENT;
+ return 0;
}
/*
@@ -121,16 +116,13 @@ static void split_transaction_timeout_callback(struct timer_list *timer)
{
struct fw_transaction *t = from_timer(t, timer, split_timeout_timer);
struct fw_card *card = t->card;
- unsigned long flags;
- spin_lock_irqsave(&card->lock, flags);
- if (list_empty(&t->link)) {
- spin_unlock_irqrestore(&card->lock, flags);
- return;
+ scoped_guard(spinlock_irqsave, &card->lock) {
+ if (list_empty(&t->link))
+ return;
+ list_del(&t->link);
+ card->tlabel_mask &= ~(1ULL << t->tlabel);
}
- list_del(&t->link);
- card->tlabel_mask &= ~(1ULL << t->tlabel);
- spin_unlock_irqrestore(&card->lock, flags);
if (!t->with_tstamp) {
t->callback.without_tstamp(card, RCODE_CANCELLED, NULL, 0, t->callback_data);
@@ -143,20 +135,14 @@ static void split_transaction_timeout_callback(struct timer_list *timer)
static void start_split_transaction_timeout(struct fw_transaction *t,
struct fw_card *card)
{
- unsigned long flags;
+ guard(spinlock_irqsave)(&card->lock);
- spin_lock_irqsave(&card->lock, flags);
-
- if (list_empty(&t->link) || WARN_ON(t->is_split_transaction)) {
- spin_unlock_irqrestore(&card->lock, flags);
+ if (list_empty(&t->link) || WARN_ON(t->is_split_transaction))
return;
- }
t->is_split_transaction = true;
mod_timer(&t->split_timeout_timer,
jiffies + card->split_timeout_jiffies);
-
- spin_unlock_irqrestore(&card->lock, flags);
}
static u32 compute_split_timeout_timestamp(struct fw_card *card, u32 request_timestamp);
@@ -464,7 +450,6 @@ static void transmit_phy_packet_callback(struct fw_packet *packet,
static struct fw_packet phy_config_packet = {
.header_length = 12,
- .header[0] = TCODE_LINK_INTERNAL << 4,
.payload_length = 0,
.speed = SCODE_100,
.callback = transmit_phy_packet_callback,
@@ -495,8 +480,9 @@ void fw_send_phy_config(struct fw_card *card,
phy_packet_phy_config_set_gap_count(&data, gap_count);
phy_packet_phy_config_set_gap_count_optimization(&data, true);
- mutex_lock(&phy_config_mutex);
+ guard(mutex)(&phy_config_mutex);
+ async_header_set_tcode(phy_config_packet.header, TCODE_LINK_INTERNAL);
phy_config_packet.header[1] = data;
phy_config_packet.header[2] = ~data;
phy_config_packet.generation = generation;
@@ -508,8 +494,6 @@ void fw_send_phy_config(struct fw_card *card,
card->driver->send_request(card, &phy_config_packet);
wait_for_completion_timeout(&phy_config_done, timeout);
-
- mutex_unlock(&phy_config_mutex);
}
static struct fw_address_handler *lookup_overlapping_address_handler(
@@ -598,7 +582,7 @@ int fw_core_add_address_handler(struct fw_address_handler *handler,
handler->length == 0)
return -EINVAL;
- spin_lock(&address_handler_list_lock);
+ guard(spinlock)(&address_handler_list_lock);
handler->offset = region->start;
while (handler->offset + handler->length <= region->end) {
@@ -617,8 +601,6 @@ int fw_core_add_address_handler(struct fw_address_handler *handler,
}
}
- spin_unlock(&address_handler_list_lock);
-
return ret;
}
EXPORT_SYMBOL(fw_core_add_address_handler);
@@ -634,9 +616,9 @@ EXPORT_SYMBOL(fw_core_add_address_handler);
*/
void fw_core_remove_address_handler(struct fw_address_handler *handler)
{
- spin_lock(&address_handler_list_lock);
- list_del_rcu(&handler->link);
- spin_unlock(&address_handler_list_lock);
+ scoped_guard(spinlock, &address_handler_list_lock)
+ list_del_rcu(&handler->link);
+
synchronize_rcu();
}
EXPORT_SYMBOL(fw_core_remove_address_handler);
@@ -927,16 +909,14 @@ static void handle_exclusive_region_request(struct fw_card *card,
if (tcode == TCODE_LOCK_REQUEST)
tcode = 0x10 + async_header_get_extended_tcode(p->header);
- rcu_read_lock();
- handler = lookup_enclosing_address_handler(&address_handler_list,
- offset, request->length);
- if (handler)
- handler->address_callback(card, request,
- tcode, destination, source,
- p->generation, offset,
- request->data, request->length,
- handler->callback_data);
- rcu_read_unlock();
+ scoped_guard(rcu) {
+ handler = lookup_enclosing_address_handler(&address_handler_list, offset,
+ request->length);
+ if (handler)
+ handler->address_callback(card, request, tcode, destination, source,
+ p->generation, offset, request->data,
+ request->length, handler->callback_data);
+ }
if (!handler)
fw_send_response(card, request, RCODE_ADDRESS_ERROR);
@@ -969,17 +949,14 @@ static void handle_fcp_region_request(struct fw_card *card,
return;
}
- rcu_read_lock();
- list_for_each_entry_rcu(handler, &address_handler_list, link) {
- if (is_enclosing_handler(handler, offset, request->length))
- handler->address_callback(card, request, tcode,
- destination, source,
- p->generation, offset,
- request->data,
- request->length,
- handler->callback_data);
+ scoped_guard(rcu) {
+ list_for_each_entry_rcu(handler, &address_handler_list, link) {
+ if (is_enclosing_handler(handler, offset, request->length))
+ handler->address_callback(card, request, tcode, destination, source,
+ p->generation, offset, request->data,
+ request->length, handler->callback_data);
+ }
}
- rcu_read_unlock();
fw_send_response(card, request, RCODE_COMPLETE);
}
@@ -1024,7 +1001,6 @@ EXPORT_SYMBOL(fw_core_handle_request);
void fw_core_handle_response(struct fw_card *card, struct fw_packet *p)
{
struct fw_transaction *t = NULL, *iter;
- unsigned long flags;
u32 *data;
size_t data_length;
int tcode, tlabel, source, rcode;
@@ -1063,26 +1039,23 @@ void fw_core_handle_response(struct fw_card *card, struct fw_packet *p)
break;
}
- spin_lock_irqsave(&card->lock, flags);
- list_for_each_entry(iter, &card->transaction_list, link) {
- if (iter->node_id == source && iter->tlabel == tlabel) {
- if (!try_cancel_split_timeout(iter)) {
- spin_unlock_irqrestore(&card->lock, flags);
- goto timed_out;
+ scoped_guard(spinlock_irqsave, &card->lock) {
+ list_for_each_entry(iter, &card->transaction_list, link) {
+ if (iter->node_id == source && iter->tlabel == tlabel) {
+ if (try_cancel_split_timeout(iter)) {
+ list_del_init(&iter->link);
+ card->tlabel_mask &= ~(1ULL << iter->tlabel);
+ t = iter;
+ }
+ break;
}
- list_del_init(&iter->link);
- card->tlabel_mask &= ~(1ULL << iter->tlabel);
- t = iter;
- break;
}
}
- spin_unlock_irqrestore(&card->lock, flags);
trace_async_response_inbound((uintptr_t)t, card->index, p->generation, p->speed, p->ack,
p->timestamp, p->header, data, data_length / 4);
if (!t) {
- timed_out:
fw_notice(card, "unsolicited response (source %x, tlabel %x)\n",
source, tlabel);
return;
@@ -1186,7 +1159,6 @@ static void handle_registers(struct fw_card *card, struct fw_request *request,
int reg = offset & ~CSR_REGISTER_BASE;
__be32 *data = payload;
int rcode = RCODE_COMPLETE;
- unsigned long flags;
switch (reg) {
case CSR_PRIORITY_BUDGET:
@@ -1228,10 +1200,10 @@ static void handle_registers(struct fw_card *card, struct fw_request *request,
if (tcode == TCODE_READ_QUADLET_REQUEST) {
*data = cpu_to_be32(card->split_timeout_hi);
} else if (tcode == TCODE_WRITE_QUADLET_REQUEST) {
- spin_lock_irqsave(&card->lock, flags);
+ guard(spinlock_irqsave)(&card->lock);
+
card->split_timeout_hi = be32_to_cpu(*data) & 7;
update_split_timeout(card);
- spin_unlock_irqrestore(&card->lock, flags);
} else {
rcode = RCODE_TYPE_ERROR;
}
@@ -1241,11 +1213,10 @@ static void handle_registers(struct fw_card *card, struct fw_request *request,
if (tcode == TCODE_READ_QUADLET_REQUEST) {
*data = cpu_to_be32(card->split_timeout_lo);
} else if (tcode == TCODE_WRITE_QUADLET_REQUEST) {
- spin_lock_irqsave(&card->lock, flags);
- card->split_timeout_lo =
- be32_to_cpu(*data) & 0xfff80000;
+ guard(spinlock_irqsave)(&card->lock);
+
+ card->split_timeout_lo = be32_to_cpu(*data) & 0xfff80000;
update_split_timeout(card);
- spin_unlock_irqrestore(&card->lock, flags);
} else {
rcode = RCODE_TYPE_ERROR;
}
@@ -1387,7 +1358,7 @@ static void __exit fw_core_cleanup(void)
unregister_chrdev(fw_cdev_major, "firewire");
bus_unregister(&fw_bus_type);
destroy_workqueue(fw_workqueue);
- idr_destroy(&fw_device_idr);
+ xa_destroy(&fw_device_xa);
}
module_init(fw_core_init);
diff --git a/drivers/firewire/core.h b/drivers/firewire/core.h
index 7c36d2628e37..0ae2c84ecafe 100644
--- a/drivers/firewire/core.h
+++ b/drivers/firewire/core.h
@@ -7,7 +7,7 @@
#include <linux/dma-mapping.h>
#include <linux/fs.h>
#include <linux/list.h>
-#include <linux/idr.h>
+#include <linux/xarray.h>
#include <linux/mm_types.h>
#include <linux/rwsem.h>
#include <linux/slab.h>
@@ -115,8 +115,8 @@ struct fw_card_driver {
void fw_card_initialize(struct fw_card *card,
const struct fw_card_driver *driver, struct device *device);
-int fw_card_add(struct fw_card *card,
- u32 max_receive, u32 link_speed, u64 guid);
+int fw_card_add(struct fw_card *card, u32 max_receive, u32 link_speed, u64 guid,
+ unsigned int supported_isoc_contexts);
void fw_core_remove_card(struct fw_card *card);
int fw_compute_block_crc(__be32 *block);
void fw_schedule_bm_work(struct fw_card *card, unsigned long delay);
@@ -133,7 +133,7 @@ void fw_cdev_handle_phy_packet(struct fw_card *card, struct fw_packet *p);
/* -device */
extern struct rw_semaphore fw_device_rwsem;
-extern struct idr fw_device_idr;
+extern struct xarray fw_device_xa;
extern int fw_cdev_major;
static inline struct fw_device *fw_device_get(struct fw_device *device)
@@ -159,6 +159,11 @@ int fw_iso_buffer_alloc(struct fw_iso_buffer *buffer, int page_count);
int fw_iso_buffer_map_dma(struct fw_iso_buffer *buffer, struct fw_card *card,
enum dma_data_direction direction);
+static inline void fw_iso_context_init_work(struct fw_iso_context *ctx, work_func_t func)
+{
+ INIT_WORK(&ctx->work, func);
+}
+
/* -topology */
@@ -183,7 +188,8 @@ struct fw_node {
* local node to this node. */
u8 max_depth:4; /* Maximum depth to any leaf node */
u8 max_hops:4; /* Max hops in this sub tree */
- refcount_t ref_count;
+
+ struct kref kref;
/* For serializing node topology into a list. */
struct list_head link;
@@ -196,15 +202,21 @@ struct fw_node {
static inline struct fw_node *fw_node_get(struct fw_node *node)
{
- refcount_inc(&node->ref_count);
+ kref_get(&node->kref);
return node;
}
+static void release_node(struct kref *kref)
+{
+ struct fw_node *node = container_of(kref, struct fw_node, kref);
+
+ kfree(node);
+}
+
static inline void fw_node_put(struct fw_node *node)
{
- if (refcount_dec_and_test(&node->ref_count))
- kfree(node);
+ kref_put(&node->kref, release_node);
}
void fw_core_handle_bus_reset(struct fw_card *card, int node_id,
diff --git a/drivers/firewire/ohci-serdes-test.c b/drivers/firewire/ohci-serdes-test.c
index 304a09ff528e..258f668619ef 100644
--- a/drivers/firewire/ohci-serdes-test.c
+++ b/drivers/firewire/ohci-serdes-test.c
@@ -40,9 +40,75 @@ static void test_self_id_receive_buffer_deserialization(struct kunit *test)
KUNIT_EXPECT_EQ(test, 0xf38b, timestamp);
}
+static void test_at_data_serdes(struct kunit *test)
+{
+ static const __le32 expected[] = {
+ cpu_to_le32(0x00020e80),
+ cpu_to_le32(0xffc2ffff),
+ cpu_to_le32(0xe0000000),
+ };
+ __le32 quadlets[] = {0, 0, 0};
+ bool has_src_bus_id = ohci1394_at_data_get_src_bus_id(expected);
+ unsigned int speed = ohci1394_at_data_get_speed(expected);
+ unsigned int tlabel = ohci1394_at_data_get_tlabel(expected);
+ unsigned int retry = ohci1394_at_data_get_retry(expected);
+ unsigned int tcode = ohci1394_at_data_get_tcode(expected);
+ unsigned int destination_id = ohci1394_at_data_get_destination_id(expected);
+ u64 destination_offset = ohci1394_at_data_get_destination_offset(expected);
+
+ KUNIT_EXPECT_FALSE(test, has_src_bus_id);
+ KUNIT_EXPECT_EQ(test, 0x02, speed);
+ KUNIT_EXPECT_EQ(test, 0x03, tlabel);
+ KUNIT_EXPECT_EQ(test, 0x02, retry);
+ KUNIT_EXPECT_EQ(test, 0x08, tcode);
+
+ ohci1394_at_data_set_src_bus_id(quadlets, has_src_bus_id);
+ ohci1394_at_data_set_speed(quadlets, speed);
+ ohci1394_at_data_set_tlabel(quadlets, tlabel);
+ ohci1394_at_data_set_retry(quadlets, retry);
+ ohci1394_at_data_set_tcode(quadlets, tcode);
+ ohci1394_at_data_set_destination_id(quadlets, destination_id);
+ ohci1394_at_data_set_destination_offset(quadlets, destination_offset);
+
+ KUNIT_EXPECT_MEMEQ(test, quadlets, expected, sizeof(expected));
+}
+
+static void test_it_data_serdes(struct kunit *test)
+{
+ static const __le32 expected[] = {
+ cpu_to_le32(0x000349a7),
+ cpu_to_le32(0x02300000),
+ };
+ __le32 quadlets[] = {0, 0};
+ unsigned int scode = ohci1394_it_data_get_speed(expected);
+ unsigned int tag = ohci1394_it_data_get_tag(expected);
+ unsigned int channel = ohci1394_it_data_get_channel(expected);
+ unsigned int tcode = ohci1394_it_data_get_tcode(expected);
+ unsigned int sync = ohci1394_it_data_get_sync(expected);
+ unsigned int data_length = ohci1394_it_data_get_data_length(expected);
+
+ KUNIT_EXPECT_EQ(test, 0x03, scode);
+ KUNIT_EXPECT_EQ(test, 0x01, tag);
+ KUNIT_EXPECT_EQ(test, 0x09, channel);
+ KUNIT_EXPECT_EQ(test, 0x0a, tcode);
+ KUNIT_EXPECT_EQ(test, 0x7, sync);
+ KUNIT_EXPECT_EQ(test, 0x0230, data_length);
+
+ ohci1394_it_data_set_speed(quadlets, scode);
+ ohci1394_it_data_set_tag(quadlets, tag);
+ ohci1394_it_data_set_channel(quadlets, channel);
+ ohci1394_it_data_set_tcode(quadlets, tcode);
+ ohci1394_it_data_set_sync(quadlets, sync);
+ ohci1394_it_data_set_data_length(quadlets, data_length);
+
+ KUNIT_EXPECT_MEMEQ(test, quadlets, expected, sizeof(expected));
+}
+
static struct kunit_case ohci_serdes_test_cases[] = {
KUNIT_CASE(test_self_id_count_register_deserialization),
KUNIT_CASE(test_self_id_receive_buffer_deserialization),
+ KUNIT_CASE(test_at_data_serdes),
+ KUNIT_CASE(test_it_data_serdes),
{}
};
diff --git a/drivers/firewire/ohci.c b/drivers/firewire/ohci.c
index 314a29c0fd3e..7ee55c2804de 100644
--- a/drivers/firewire/ohci.c
+++ b/drivers/firewire/ohci.c
@@ -50,7 +50,6 @@ static u32 cond_le32_to_cpu(__le32 value, bool has_be_header_quirk);
#define CREATE_TRACE_POINTS
#include <trace/events/firewire_ohci.h>
-#define ohci_info(ohci, f, args...) dev_info(ohci->card.device, f, ##args)
#define ohci_notice(ohci, f, args...) dev_notice(ohci->card.device, f, ##args)
#define ohci_err(ohci, f, args...) dev_err(ohci->card.device, f, ##args)
@@ -77,7 +76,7 @@ struct descriptor {
__le32 branch_address;
__le16 res_count;
__le16 transfer_status;
-} __attribute__((aligned(16)));
+} __aligned(16);
#define CONTROL_SET(regs) (regs)
#define CONTROL_CLEAR(regs) ((regs) + 4)
@@ -162,13 +161,6 @@ struct context {
struct tasklet_struct tasklet;
};
-#define IT_HEADER_SY(v) ((v) << 0)
-#define IT_HEADER_TCODE(v) ((v) << 4)
-#define IT_HEADER_CHANNEL(v) ((v) << 8)
-#define IT_HEADER_TAG(v) ((v) << 14)
-#define IT_HEADER_SPEED(v) ((v) << 16)
-#define IT_HEADER_DATA_LENGTH(v) ((v) << 16)
-
struct iso_context {
struct fw_iso_context base;
struct context context;
@@ -182,7 +174,7 @@ struct iso_context {
u8 tags;
};
-#define CONFIG_ROM_SIZE 1024
+#define CONFIG_ROM_SIZE (CSR_CONFIG_ROM_END - CSR_CONFIG_ROM)
struct fw_ohci {
struct fw_card card;
@@ -264,7 +256,6 @@ static inline struct fw_ohci *fw_ohci(struct fw_card *card)
#define OHCI1394_REGISTER_SIZE 0x800
#define OHCI1394_PCI_HCI_Control 0x40
#define SELF_ID_BUF_SIZE 0x800
-#define OHCI_TCODE_PHY_PACKET 0x0e
#define OHCI_VERSION_1_1 0x010010
static char ohci_driver_name[] = KBUILD_MODNAME;
@@ -405,7 +396,7 @@ MODULE_PARM_DESC(quirks, "Chip quirks (default = 0"
static int param_debug;
module_param_named(debug, param_debug, int, 0644);
-MODULE_PARM_DESC(debug, "Verbose logging (default = 0"
+MODULE_PARM_DESC(debug, "Verbose logging, deprecated in v6.11 kernel or later. (default = 0"
", AT/AR events = " __stringify(OHCI_PARAM_DEBUG_AT_AR)
", self-IDs = " __stringify(OHCI_PARAM_DEBUG_SELFIDS)
", IRQs = " __stringify(OHCI_PARAM_DEBUG_IRQS)
@@ -532,20 +523,28 @@ static const char *evts[] = {
[0x1e] = "ack_type_error", [0x1f] = "-reserved-",
[0x20] = "pending/cancelled",
};
-static const char *tcodes[] = {
- [0x0] = "QW req", [0x1] = "BW req",
- [0x2] = "W resp", [0x3] = "-reserved-",
- [0x4] = "QR req", [0x5] = "BR req",
- [0x6] = "QR resp", [0x7] = "BR resp",
- [0x8] = "cycle start", [0x9] = "Lk req",
- [0xa] = "async stream packet", [0xb] = "Lk resp",
- [0xc] = "-reserved-", [0xd] = "-reserved-",
- [0xe] = "link internal", [0xf] = "-reserved-",
-};
static void log_ar_at_event(struct fw_ohci *ohci,
char dir, int speed, u32 *header, int evt)
{
+ static const char *const tcodes[] = {
+ [TCODE_WRITE_QUADLET_REQUEST] = "QW req",
+ [TCODE_WRITE_BLOCK_REQUEST] = "BW req",
+ [TCODE_WRITE_RESPONSE] = "W resp",
+ [0x3] = "-reserved-",
+ [TCODE_READ_QUADLET_REQUEST] = "QR req",
+ [TCODE_READ_BLOCK_REQUEST] = "BR req",
+ [TCODE_READ_QUADLET_RESPONSE] = "QR resp",
+ [TCODE_READ_BLOCK_RESPONSE] = "BR resp",
+ [TCODE_CYCLE_START] = "cycle start",
+ [TCODE_LOCK_REQUEST] = "Lk req",
+ [TCODE_STREAM_DATA] = "async stream packet",
+ [TCODE_LOCK_RESPONSE] = "Lk resp",
+ [0xc] = "-reserved-",
+ [0xd] = "-reserved-",
+ [TCODE_LINK_INTERNAL] = "link internal",
+ [0xf] = "-reserved-",
+ };
int tcode = async_header_get_tcode(header);
char specific[12];
@@ -586,7 +585,7 @@ static void log_ar_at_event(struct fw_ohci *ohci,
ohci_notice(ohci, "A%c %s, %s\n",
dir, evts[evt], tcodes[tcode]);
break;
- case 0xe:
+ case TCODE_LINK_INTERNAL:
ohci_notice(ohci, "A%c %s, PHY %08x %08x\n",
dir, evts[evt], header[1], header[2]);
break;
@@ -713,26 +712,20 @@ static int read_paged_phy_reg(struct fw_ohci *ohci, int page, int addr)
static int ohci_read_phy_reg(struct fw_card *card, int addr)
{
struct fw_ohci *ohci = fw_ohci(card);
- int ret;
- mutex_lock(&ohci->phy_reg_mutex);
- ret = read_phy_reg(ohci, addr);
- mutex_unlock(&ohci->phy_reg_mutex);
+ guard(mutex)(&ohci->phy_reg_mutex);
- return ret;
+ return read_phy_reg(ohci, addr);
}
static int ohci_update_phy_reg(struct fw_card *card, int addr,
int clear_bits, int set_bits)
{
struct fw_ohci *ohci = fw_ohci(card);
- int ret;
- mutex_lock(&ohci->phy_reg_mutex);
- ret = update_phy_reg(ohci, addr, clear_bits, set_bits);
- mutex_unlock(&ohci->phy_reg_mutex);
+ guard(mutex)(&ohci->phy_reg_mutex);
- return ret;
+ return update_phy_reg(ohci, addr, clear_bits, set_bits);
}
static inline dma_addr_t ar_buffer_bus(struct ar_context *ctx, unsigned int i)
@@ -939,7 +932,7 @@ static __le32 *handle_ar_packet(struct ar_context *ctx, __le32 *buffer)
case TCODE_WRITE_RESPONSE:
case TCODE_READ_QUADLET_REQUEST:
- case OHCI_TCODE_PHY_PACKET:
+ case TCODE_LINK_INTERNAL:
p.header_length = 12;
p.payload_length = 0;
break;
@@ -967,7 +960,7 @@ static __le32 *handle_ar_packet(struct ar_context *ctx, __le32 *buffer)
* Several controllers, notably from NEC and VIA, forget to
* write ack_complete status at PHY packet reception.
*/
- if (evt == OHCI1394_evt_no_status && tcode == OHCI1394_phy_tcode)
+ if (evt == OHCI1394_evt_no_status && tcode == TCODE_LINK_INTERNAL)
p.ack = ACK_COMPLETE;
/*
@@ -1148,9 +1141,8 @@ static struct descriptor *find_branch_descriptor(struct descriptor *d, int z)
return d + z - 1;
}
-static void context_tasklet(unsigned long data)
+static void context_retire_descriptors(struct context *ctx)
{
- struct context *ctx = (struct context *) data;
struct descriptor *d, *last;
u32 address;
int z;
@@ -1179,18 +1171,31 @@ static void context_tasklet(unsigned long data)
break;
if (old_desc != desc) {
- /* If we've advanced to the next buffer, move the
- * previous buffer to the free list. */
- unsigned long flags;
+ // If we've advanced to the next buffer, move the previous buffer to the
+ // free list.
old_desc->used = 0;
- spin_lock_irqsave(&ctx->ohci->lock, flags);
+ guard(spinlock_irqsave)(&ctx->ohci->lock);
list_move_tail(&old_desc->list, &ctx->buffer_list);
- spin_unlock_irqrestore(&ctx->ohci->lock, flags);
}
ctx->last = last;
}
}
+static void context_tasklet(unsigned long data)
+{
+ struct context *ctx = (struct context *) data;
+
+ context_retire_descriptors(ctx);
+}
+
+static void ohci_isoc_context_work(struct work_struct *work)
+{
+ struct fw_iso_context *base = container_of(work, struct fw_iso_context, work);
+ struct iso_context *isoc_ctx = container_of(base, struct iso_context, base);
+
+ context_retire_descriptors(&isoc_ctx->context);
+}
+
/*
* Allocate a new buffer and add it to the list of free buffers for this
* context. Must be called with ohci->lock held.
@@ -1402,12 +1407,6 @@ static int at_context_queue_packet(struct context *ctx,
d[0].control = cpu_to_le16(DESCRIPTOR_KEY_IMMEDIATE);
d[0].res_count = cpu_to_le16(packet->timestamp);
- /*
- * The DMA format for asynchronous link packets is different
- * from the IEEE1394 layout, so shift the fields around
- * accordingly.
- */
-
tcode = async_header_get_tcode(packet->header);
header = (__le32 *) &d[1];
switch (tcode) {
@@ -1420,11 +1419,21 @@ static int at_context_queue_packet(struct context *ctx,
case TCODE_READ_BLOCK_RESPONSE:
case TCODE_LOCK_REQUEST:
case TCODE_LOCK_RESPONSE:
- header[0] = cpu_to_le32((packet->header[0] & 0xffff) |
- (packet->speed << 16));
- header[1] = cpu_to_le32((packet->header[1] & 0xffff) |
- (packet->header[0] & 0xffff0000));
- header[2] = cpu_to_le32(packet->header[2]);
+ ohci1394_at_data_set_src_bus_id(header, false);
+ ohci1394_at_data_set_speed(header, packet->speed);
+ ohci1394_at_data_set_tlabel(header, async_header_get_tlabel(packet->header));
+ ohci1394_at_data_set_retry(header, async_header_get_retry(packet->header));
+ ohci1394_at_data_set_tcode(header, tcode);
+
+ ohci1394_at_data_set_destination_id(header,
+ async_header_get_destination(packet->header));
+
+ if (ctx == &ctx->ohci->at_response_ctx) {
+ ohci1394_at_data_set_rcode(header, async_header_get_rcode(packet->header));
+ } else {
+ ohci1394_at_data_set_destination_offset(header,
+ async_header_get_offset(packet->header));
+ }
if (tcode_is_block_packet(tcode))
header[3] = cpu_to_le32(packet->header[3]);
@@ -1433,10 +1442,10 @@ static int at_context_queue_packet(struct context *ctx,
d[0].req_count = cpu_to_le16(packet->header_length);
break;
-
case TCODE_LINK_INTERNAL:
- header[0] = cpu_to_le32((OHCI1394_phy_tcode << 4) |
- (packet->speed << 16));
+ ohci1394_at_data_set_speed(header, packet->speed);
+ ohci1394_at_data_set_tcode(header, TCODE_LINK_INTERNAL);
+
header[1] = cpu_to_le32(packet->header[1]);
header[2] = cpu_to_le32(packet->header[2]);
d[0].req_count = cpu_to_le16(12);
@@ -1446,9 +1455,14 @@ static int at_context_queue_packet(struct context *ctx,
break;
case TCODE_STREAM_DATA:
- header[0] = cpu_to_le32((packet->header[0] & 0xffff) |
- (packet->speed << 16));
- header[1] = cpu_to_le32(packet->header[0] & 0xffff0000);
+ ohci1394_it_data_set_speed(header, packet->speed);
+ ohci1394_it_data_set_tag(header, isoc_header_get_tag(packet->header[0]));
+ ohci1394_it_data_set_channel(header, isoc_header_get_channel(packet->header[0]));
+ ohci1394_it_data_set_tcode(header, TCODE_STREAM_DATA);
+ ohci1394_it_data_set_sync(header, isoc_header_get_sy(packet->header[0]));
+
+ ohci1394_it_data_set_data_length(header, isoc_header_get_data_length(packet->header[0]));
+
d[0].req_count = cpu_to_le16(8);
break;
@@ -1873,13 +1887,15 @@ static int get_status_for_port(struct fw_ohci *ohci, int port_index,
{
int reg;
- mutex_lock(&ohci->phy_reg_mutex);
- reg = write_phy_reg(ohci, 7, port_index);
- if (reg >= 0)
+ scoped_guard(mutex, &ohci->phy_reg_mutex) {
+ reg = write_phy_reg(ohci, 7, port_index);
+ if (reg < 0)
+ return reg;
+
reg = read_phy_reg(ohci, 8);
- mutex_unlock(&ohci->phy_reg_mutex);
- if (reg < 0)
- return reg;
+ if (reg < 0)
+ return reg;
+ }
switch (reg & 0x0f) {
case 0x06:
@@ -1917,29 +1933,36 @@ static int get_self_id_pos(struct fw_ohci *ohci, u32 self_id,
return i;
}
-static bool initiated_reset(struct fw_ohci *ohci)
+static int detect_initiated_reset(struct fw_ohci *ohci, bool *is_initiated_reset)
{
int reg;
- int ret = false;
- mutex_lock(&ohci->phy_reg_mutex);
- reg = write_phy_reg(ohci, 7, 0xe0); /* Select page 7 */
- if (reg >= 0) {
- reg = read_phy_reg(ohci, 8);
- reg |= 0x40;
- reg = write_phy_reg(ohci, 8, reg); /* set PMODE bit */
- if (reg >= 0) {
- reg = read_phy_reg(ohci, 12); /* read register 12 */
- if (reg >= 0) {
- if ((reg & 0x08) == 0x08) {
- /* bit 3 indicates "initiated reset" */
- ret = true;
- }
- }
- }
- }
- mutex_unlock(&ohci->phy_reg_mutex);
- return ret;
+ guard(mutex)(&ohci->phy_reg_mutex);
+
+ // Select page 7
+ reg = write_phy_reg(ohci, 7, 0xe0);
+ if (reg < 0)
+ return reg;
+
+ reg = read_phy_reg(ohci, 8);
+ if (reg < 0)
+ return reg;
+
+ // set PMODE bit
+ reg |= 0x40;
+ reg = write_phy_reg(ohci, 8, reg);
+ if (reg < 0)
+ return reg;
+
+ // read register 12
+ reg = read_phy_reg(ohci, 12);
+ if (reg < 0)
+ return reg;
+
+ // bit 3 indicates "initiated reset"
+ *is_initiated_reset = !!((reg & 0x08) == 0x08);
+
+ return 0;
}
/*
@@ -1949,7 +1972,8 @@ static bool initiated_reset(struct fw_ohci *ohci)
*/
static int find_and_insert_self_id(struct fw_ohci *ohci, int self_id_count)
{
- int reg, i, pos;
+ int reg, i, pos, err;
+ bool is_initiated_reset;
u32 self_id = 0;
// link active 1, speed 3, bridge 0, contender 1, more packets 0.
@@ -1978,7 +2002,6 @@ static int find_and_insert_self_id(struct fw_ohci *ohci, int self_id_count)
for (i = 0; i < 3; i++) {
enum phy_packet_self_id_port_status status;
- int err;
err = get_status_for_port(ohci, i, &status);
if (err < 0)
@@ -1987,7 +2010,10 @@ static int find_and_insert_self_id(struct fw_ohci *ohci, int self_id_count)
self_id_sequence_set_port_status(&self_id, 1, i, status);
}
- phy_packet_self_id_zero_set_initiated_reset(&self_id, initiated_reset(ohci));
+ err = detect_initiated_reset(ohci, &is_initiated_reset);
+ if (err < 0)
+ return err;
+ phy_packet_self_id_zero_set_initiated_reset(&self_id, is_initiated_reset);
pos = get_self_id_pos(ohci, self_id, self_id_count);
if (pos >= 0) {
@@ -2112,14 +2138,12 @@ static void bus_reset_work(struct work_struct *work)
return;
}
- /* FIXME: Document how the locking works. */
- spin_lock_irq(&ohci->lock);
-
- ohci->generation = -1; /* prevent AT packet queueing */
- context_stop(&ohci->at_request_ctx);
- context_stop(&ohci->at_response_ctx);
-
- spin_unlock_irq(&ohci->lock);
+ // FIXME: Document how the locking works.
+ scoped_guard(spinlock_irq, &ohci->lock) {
+ ohci->generation = -1; // prevent AT packet queueing
+ context_stop(&ohci->at_request_ctx);
+ context_stop(&ohci->at_response_ctx);
+ }
/*
* Per OHCI 1.2 draft, clause 7.2.3.3, hardware may leave unsent
@@ -2129,53 +2153,42 @@ static void bus_reset_work(struct work_struct *work)
at_context_flush(&ohci->at_request_ctx);
at_context_flush(&ohci->at_response_ctx);
- spin_lock_irq(&ohci->lock);
-
- ohci->generation = generation;
- reg_write(ohci, OHCI1394_IntEventClear, OHCI1394_busReset);
- reg_write(ohci, OHCI1394_IntMaskSet, OHCI1394_busReset);
-
- if (ohci->quirks & QUIRK_RESET_PACKET)
- ohci->request_generation = generation;
-
- /*
- * This next bit is unrelated to the AT context stuff but we
- * have to do it under the spinlock also. If a new config rom
- * was set up before this reset, the old one is now no longer
- * in use and we can free it. Update the config rom pointers
- * to point to the current config rom and clear the
- * next_config_rom pointer so a new update can take place.
- */
-
- if (ohci->next_config_rom != NULL) {
- if (ohci->next_config_rom != ohci->config_rom) {
- free_rom = ohci->config_rom;
- free_rom_bus = ohci->config_rom_bus;
+ scoped_guard(spinlock_irq, &ohci->lock) {
+ ohci->generation = generation;
+ reg_write(ohci, OHCI1394_IntEventClear, OHCI1394_busReset);
+ reg_write(ohci, OHCI1394_IntMaskSet, OHCI1394_busReset);
+
+ if (ohci->quirks & QUIRK_RESET_PACKET)
+ ohci->request_generation = generation;
+
+ // This next bit is unrelated to the AT context stuff but we have to do it under the
+ // spinlock also. If a new config rom was set up before this reset, the old one is
+ // now no longer in use and we can free it. Update the config rom pointers to point
+ // to the current config rom and clear the next_config_rom pointer so a new update
+ // can take place.
+ if (ohci->next_config_rom != NULL) {
+ if (ohci->next_config_rom != ohci->config_rom) {
+ free_rom = ohci->config_rom;
+ free_rom_bus = ohci->config_rom_bus;
+ }
+ ohci->config_rom = ohci->next_config_rom;
+ ohci->config_rom_bus = ohci->next_config_rom_bus;
+ ohci->next_config_rom = NULL;
+
+ // Restore config_rom image and manually update config_rom registers.
+ // Writing the header quadlet will indicate that the config rom is ready,
+ // so we do that last.
+ reg_write(ohci, OHCI1394_BusOptions, be32_to_cpu(ohci->config_rom[2]));
+ ohci->config_rom[0] = ohci->next_header;
+ reg_write(ohci, OHCI1394_ConfigROMhdr, be32_to_cpu(ohci->next_header));
}
- ohci->config_rom = ohci->next_config_rom;
- ohci->config_rom_bus = ohci->next_config_rom_bus;
- ohci->next_config_rom = NULL;
-
- /*
- * Restore config_rom image and manually update
- * config_rom registers. Writing the header quadlet
- * will indicate that the config rom is ready, so we
- * do that last.
- */
- reg_write(ohci, OHCI1394_BusOptions,
- be32_to_cpu(ohci->config_rom[2]));
- ohci->config_rom[0] = ohci->next_header;
- reg_write(ohci, OHCI1394_ConfigROMhdr,
- be32_to_cpu(ohci->next_header));
- }
- if (param_remote_dma) {
- reg_write(ohci, OHCI1394_PhyReqFilterHiSet, ~0);
- reg_write(ohci, OHCI1394_PhyReqFilterLoSet, ~0);
+ if (param_remote_dma) {
+ reg_write(ohci, OHCI1394_PhyReqFilterHiSet, ~0);
+ reg_write(ohci, OHCI1394_PhyReqFilterLoSet, ~0);
+ }
}
- spin_unlock_irq(&ohci->lock);
-
if (free_rom)
dmam_free_coherent(ohci->card.device, CONFIG_ROM_SIZE, free_rom, free_rom_bus);
@@ -2198,6 +2211,11 @@ static irqreturn_t irq_handler(int irq, void *data)
if (!event || !~event)
return IRQ_NONE;
+ if (unlikely(param_debug > 0)) {
+ dev_notice_ratelimited(ohci->card.device,
+ "The debug parameter is superceded by tracepoints events, and deprecated.");
+ }
+
/*
* busReset and postedWriteErr events must not be cleared yet
* (OHCI 1.1 clauses 7.2.3.2 and 13.2.8.1)
@@ -2238,8 +2256,7 @@ static irqreturn_t irq_handler(int irq, void *data)
while (iso_event) {
i = ffs(iso_event) - 1;
- tasklet_schedule(
- &ohci->ir_context_list[i].context.tasklet);
+ fw_iso_context_schedule_flush_completions(&ohci->ir_context_list[i].base);
iso_event &= ~(1 << i);
}
}
@@ -2250,8 +2267,7 @@ static irqreturn_t irq_handler(int irq, void *data)
while (iso_event) {
i = ffs(iso_event) - 1;
- tasklet_schedule(
- &ohci->it_context_list[i].context.tasklet);
+ fw_iso_context_schedule_flush_completions(&ohci->it_context_list[i].base);
iso_event &= ~(1 << i);
}
}
@@ -2264,13 +2280,11 @@ static irqreturn_t irq_handler(int irq, void *data)
reg_read(ohci, OHCI1394_PostedWriteAddressLo);
reg_write(ohci, OHCI1394_IntEventClear,
OHCI1394_postedWriteErr);
- if (printk_ratelimit())
- ohci_err(ohci, "PCI posted write error\n");
+ dev_err_ratelimited(ohci->card.device, "PCI posted write error\n");
}
if (unlikely(event & OHCI1394_cycleTooLong)) {
- if (printk_ratelimit())
- ohci_notice(ohci, "isochronous cycle too long\n");
+ dev_notice_ratelimited(ohci->card.device, "isochronous cycle too long\n");
reg_write(ohci, OHCI1394_LinkControlSet,
OHCI1394_LinkControl_cycleMaster);
}
@@ -2282,17 +2296,15 @@ static irqreturn_t irq_handler(int irq, void *data)
* stop active cycleMatch iso contexts now and restart
* them at least two cycles later. (FIXME?)
*/
- if (printk_ratelimit())
- ohci_notice(ohci, "isochronous cycle inconsistent\n");
+ dev_notice_ratelimited(ohci->card.device, "isochronous cycle inconsistent\n");
}
if (unlikely(event & OHCI1394_unrecoverableError))
handle_dead_contexts(ohci);
if (event & OHCI1394_cycle64Seconds) {
- spin_lock(&ohci->lock);
+ guard(spinlock)(&ohci->lock);
update_bus_time(ohci);
- spin_unlock(&ohci->lock);
} else
flush_writes(ohci);
@@ -2617,33 +2629,26 @@ static int ohci_set_config_rom(struct fw_card *card,
if (next_config_rom == NULL)
return -ENOMEM;
- spin_lock_irq(&ohci->lock);
-
- /*
- * If there is not an already pending config_rom update,
- * push our new allocation into the ohci->next_config_rom
- * and then mark the local variable as null so that we
- * won't deallocate the new buffer.
- *
- * OTOH, if there is a pending config_rom update, just
- * use that buffer with the new config_rom data, and
- * let this routine free the unused DMA allocation.
- */
-
- if (ohci->next_config_rom == NULL) {
- ohci->next_config_rom = next_config_rom;
- ohci->next_config_rom_bus = next_config_rom_bus;
- next_config_rom = NULL;
- }
-
- copy_config_rom(ohci->next_config_rom, config_rom, length);
+ scoped_guard(spinlock_irq, &ohci->lock) {
+ // If there is not an already pending config_rom update, push our new allocation
+ // into the ohci->next_config_rom and then mark the local variable as null so that
+ // we won't deallocate the new buffer.
+ //
+ // OTOH, if there is a pending config_rom update, just use that buffer with the new
+ // config_rom data, and let this routine free the unused DMA allocation.
+ if (ohci->next_config_rom == NULL) {
+ ohci->next_config_rom = next_config_rom;
+ ohci->next_config_rom_bus = next_config_rom_bus;
+ next_config_rom = NULL;
+ }
- ohci->next_header = config_rom[0];
- ohci->next_config_rom[0] = 0;
+ copy_config_rom(ohci->next_config_rom, config_rom, length);
- reg_write(ohci, OHCI1394_ConfigROMmap, ohci->next_config_rom_bus);
+ ohci->next_header = config_rom[0];
+ ohci->next_config_rom[0] = 0;
- spin_unlock_irq(&ohci->lock);
+ reg_write(ohci, OHCI1394_ConfigROMmap, ohci->next_config_rom_bus);
+ }
/* If we didn't use the DMA allocation, delete it. */
if (next_config_rom != NULL) {
@@ -2713,7 +2718,6 @@ static int ohci_enable_phys_dma(struct fw_card *card,
int node_id, int generation)
{
struct fw_ohci *ohci = fw_ohci(card);
- unsigned long flags;
int n, ret = 0;
if (param_remote_dma)
@@ -2724,12 +2728,10 @@ static int ohci_enable_phys_dma(struct fw_card *card,
* interrupt bit. Clear physReqResourceAllBuses on bus reset.
*/
- spin_lock_irqsave(&ohci->lock, flags);
+ guard(spinlock_irqsave)(&ohci->lock);
- if (ohci->generation != generation) {
- ret = -ESTALE;
- goto out;
- }
+ if (ohci->generation != generation)
+ return -ESTALE;
/*
* Note, if the node ID contains a non-local bus ID, physical DMA is
@@ -2743,8 +2745,6 @@ static int ohci_enable_phys_dma(struct fw_card *card,
reg_write(ohci, OHCI1394_PhyReqFilterHiSet, 1 << (n - 32));
flush_writes(ohci);
- out:
- spin_unlock_irqrestore(&ohci->lock, flags);
return ret;
}
@@ -2752,7 +2752,6 @@ static int ohci_enable_phys_dma(struct fw_card *card,
static u32 ohci_read_csr(struct fw_card *card, int csr_offset)
{
struct fw_ohci *ohci = fw_ohci(card);
- unsigned long flags;
u32 value;
switch (csr_offset) {
@@ -2776,16 +2775,14 @@ static u32 ohci_read_csr(struct fw_card *card, int csr_offset)
return get_cycle_time(ohci);
case CSR_BUS_TIME:
- /*
- * We might be called just after the cycle timer has wrapped
- * around but just before the cycle64Seconds handler, so we
- * better check here, too, if the bus time needs to be updated.
- */
- spin_lock_irqsave(&ohci->lock, flags);
- value = update_bus_time(ohci);
- spin_unlock_irqrestore(&ohci->lock, flags);
- return value;
+ {
+ // We might be called just after the cycle timer has wrapped around but just before
+ // the cycle64Seconds handler, so we better check here, too, if the bus time needs
+ // to be updated.
+ guard(spinlock_irqsave)(&ohci->lock);
+ return update_bus_time(ohci);
+ }
case CSR_BUSY_TIMEOUT:
value = reg_read(ohci, OHCI1394_ATRetries);
return (value >> 4) & 0x0ffff00f;
@@ -2803,7 +2800,6 @@ static u32 ohci_read_csr(struct fw_card *card, int csr_offset)
static void ohci_write_csr(struct fw_card *card, int csr_offset, u32 value)
{
struct fw_ohci *ohci = fw_ohci(card);
- unsigned long flags;
switch (csr_offset) {
case CSR_STATE_CLEAR:
@@ -2839,12 +2835,11 @@ static void ohci_write_csr(struct fw_card *card, int csr_offset, u32 value)
break;
case CSR_BUS_TIME:
- spin_lock_irqsave(&ohci->lock, flags);
- ohci->bus_time = (update_bus_time(ohci) & 0x40) |
- (value & ~0x7f);
- spin_unlock_irqrestore(&ohci->lock, flags);
+ {
+ guard(spinlock_irqsave)(&ohci->lock);
+ ohci->bus_time = (update_bus_time(ohci) & 0x40) | (value & ~0x7f);
break;
-
+ }
case CSR_BUSY_TIMEOUT:
value = (value & 0xf) | ((value & 0xf) << 4) |
((value & 0xf) << 8) | ((value & 0x0ffff000) << 4);
@@ -2932,7 +2927,7 @@ static int handle_ir_packet_per_buffer(struct context *context,
copy_iso_headers(ctx, (u32 *) (last + 1));
if (last->control & cpu_to_le16(DESCRIPTOR_IRQ_ALWAYS))
- flush_iso_completions(ctx, FW_ISO_CONTEXT_COMPLETIONS_CAUSE_IRQ);
+ flush_iso_completions(ctx, FW_ISO_CONTEXT_COMPLETIONS_CAUSE_INTERRUPT);
return 1;
}
@@ -2968,7 +2963,7 @@ static int handle_ir_buffer_fill(struct context *context,
if (last->control & cpu_to_le16(DESCRIPTOR_IRQ_ALWAYS)) {
trace_isoc_inbound_multiple_completions(&ctx->base, completed,
- FW_ISO_CONTEXT_COMPLETIONS_CAUSE_IRQ);
+ FW_ISO_CONTEXT_COMPLETIONS_CAUSE_INTERRUPT);
ctx->base.callback.mc(&ctx->base,
buffer_dma + completed,
@@ -3064,7 +3059,7 @@ static int handle_it_packet(struct context *context,
ctx->header_length += 4;
if (last->control & cpu_to_le16(DESCRIPTOR_IRQ_ALWAYS))
- flush_iso_completions(ctx, FW_ISO_CONTEXT_COMPLETIONS_CAUSE_IRQ);
+ flush_iso_completions(ctx, FW_ISO_CONTEXT_COMPLETIONS_CAUSE_INTERRUPT);
return 1;
}
@@ -3090,55 +3085,53 @@ static struct fw_iso_context *ohci_allocate_iso_context(struct fw_card *card,
u32 *mask, regs;
int index, ret = -EBUSY;
- spin_lock_irq(&ohci->lock);
+ scoped_guard(spinlock_irq, &ohci->lock) {
+ switch (type) {
+ case FW_ISO_CONTEXT_TRANSMIT:
+ mask = &ohci->it_context_mask;
+ callback = handle_it_packet;
+ index = ffs(*mask) - 1;
+ if (index >= 0) {
+ *mask &= ~(1 << index);
+ regs = OHCI1394_IsoXmitContextBase(index);
+ ctx = &ohci->it_context_list[index];
+ }
+ break;
- switch (type) {
- case FW_ISO_CONTEXT_TRANSMIT:
- mask = &ohci->it_context_mask;
- callback = handle_it_packet;
- index = ffs(*mask) - 1;
- if (index >= 0) {
- *mask &= ~(1 << index);
- regs = OHCI1394_IsoXmitContextBase(index);
- ctx = &ohci->it_context_list[index];
- }
- break;
+ case FW_ISO_CONTEXT_RECEIVE:
+ channels = &ohci->ir_context_channels;
+ mask = &ohci->ir_context_mask;
+ callback = handle_ir_packet_per_buffer;
+ index = *channels & 1ULL << channel ? ffs(*mask) - 1 : -1;
+ if (index >= 0) {
+ *channels &= ~(1ULL << channel);
+ *mask &= ~(1 << index);
+ regs = OHCI1394_IsoRcvContextBase(index);
+ ctx = &ohci->ir_context_list[index];
+ }
+ break;
- case FW_ISO_CONTEXT_RECEIVE:
- channels = &ohci->ir_context_channels;
- mask = &ohci->ir_context_mask;
- callback = handle_ir_packet_per_buffer;
- index = *channels & 1ULL << channel ? ffs(*mask) - 1 : -1;
- if (index >= 0) {
- *channels &= ~(1ULL << channel);
- *mask &= ~(1 << index);
- regs = OHCI1394_IsoRcvContextBase(index);
- ctx = &ohci->ir_context_list[index];
- }
- break;
+ case FW_ISO_CONTEXT_RECEIVE_MULTICHANNEL:
+ mask = &ohci->ir_context_mask;
+ callback = handle_ir_buffer_fill;
+ index = !ohci->mc_allocated ? ffs(*mask) - 1 : -1;
+ if (index >= 0) {
+ ohci->mc_allocated = true;
+ *mask &= ~(1 << index);
+ regs = OHCI1394_IsoRcvContextBase(index);
+ ctx = &ohci->ir_context_list[index];
+ }
+ break;
- case FW_ISO_CONTEXT_RECEIVE_MULTICHANNEL:
- mask = &ohci->ir_context_mask;
- callback = handle_ir_buffer_fill;
- index = !ohci->mc_allocated ? ffs(*mask) - 1 : -1;
- if (index >= 0) {
- ohci->mc_allocated = true;
- *mask &= ~(1 << index);
- regs = OHCI1394_IsoRcvContextBase(index);
- ctx = &ohci->ir_context_list[index];
+ default:
+ index = -1;
+ ret = -ENOSYS;
}
- break;
- default:
- index = -1;
- ret = -ENOSYS;
+ if (index < 0)
+ return ERR_PTR(ret);
}
- spin_unlock_irq(&ohci->lock);
-
- if (index < 0)
- return ERR_PTR(ret);
-
memset(ctx, 0, sizeof(*ctx));
ctx->header_length = 0;
ctx->header = (void *) __get_free_page(GFP_KERNEL);
@@ -3149,6 +3142,7 @@ static struct fw_iso_context *ohci_allocate_iso_context(struct fw_card *card,
ret = context_init(&ctx->context, ohci, regs, callback);
if (ret < 0)
goto out_with_header;
+ fw_iso_context_init_work(&ctx->base, ohci_isoc_context_work);
if (type == FW_ISO_CONTEXT_RECEIVE_MULTICHANNEL) {
set_multichannel_mask(ohci, 0);
@@ -3160,20 +3154,18 @@ static struct fw_iso_context *ohci_allocate_iso_context(struct fw_card *card,
out_with_header:
free_page((unsigned long)ctx->header);
out:
- spin_lock_irq(&ohci->lock);
-
- switch (type) {
- case FW_ISO_CONTEXT_RECEIVE:
- *channels |= 1ULL << channel;
- break;
+ scoped_guard(spinlock_irq, &ohci->lock) {
+ switch (type) {
+ case FW_ISO_CONTEXT_RECEIVE:
+ *channels |= 1ULL << channel;
+ break;
- case FW_ISO_CONTEXT_RECEIVE_MULTICHANNEL:
- ohci->mc_allocated = false;
- break;
+ case FW_ISO_CONTEXT_RECEIVE_MULTICHANNEL:
+ ohci->mc_allocated = false;
+ break;
+ }
+ *mask |= 1 << index;
}
- *mask |= 1 << index;
-
- spin_unlock_irq(&ohci->lock);
return ERR_PTR(ret);
}
@@ -3248,7 +3240,6 @@ static int ohci_stop_iso(struct fw_iso_context *base)
}
flush_writes(ohci);
context_stop(&ctx->context);
- tasklet_kill(&ctx->context.tasklet);
return 0;
}
@@ -3257,14 +3248,13 @@ static void ohci_free_iso_context(struct fw_iso_context *base)
{
struct fw_ohci *ohci = fw_ohci(base->card);
struct iso_context *ctx = container_of(base, struct iso_context, base);
- unsigned long flags;
int index;
ohci_stop_iso(base);
context_release(&ctx->context);
free_page((unsigned long)ctx->header);
- spin_lock_irqsave(&ohci->lock, flags);
+ guard(spinlock_irqsave)(&ohci->lock);
switch (base->type) {
case FW_ISO_CONTEXT_TRANSMIT:
@@ -3286,38 +3276,29 @@ static void ohci_free_iso_context(struct fw_iso_context *base)
ohci->mc_allocated = false;
break;
}
-
- spin_unlock_irqrestore(&ohci->lock, flags);
}
static int ohci_set_iso_channels(struct fw_iso_context *base, u64 *channels)
{
struct fw_ohci *ohci = fw_ohci(base->card);
- unsigned long flags;
- int ret;
switch (base->type) {
case FW_ISO_CONTEXT_RECEIVE_MULTICHANNEL:
+ {
+ guard(spinlock_irqsave)(&ohci->lock);
- spin_lock_irqsave(&ohci->lock, flags);
-
- /* Don't allow multichannel to grab other contexts' channels. */
+ // Don't allow multichannel to grab other contexts' channels.
if (~ohci->ir_context_channels & ~ohci->mc_channels & *channels) {
*channels = ohci->ir_context_channels;
- ret = -EBUSY;
+ return -EBUSY;
} else {
set_multichannel_mask(ohci, *channels);
- ret = 0;
+ return 0;
}
-
- spin_unlock_irqrestore(&ohci->lock, flags);
-
- break;
+ }
default:
- ret = -EINVAL;
+ return -EINVAL;
}
-
- return ret;
}
#ifdef CONFIG_PM
@@ -3392,14 +3373,14 @@ static int queue_iso_transmit(struct iso_context *ctx,
d[0].branch_address = cpu_to_le32(d_bus | z);
header = (__le32 *) &d[1];
- header[0] = cpu_to_le32(IT_HEADER_SY(p->sy) |
- IT_HEADER_TAG(p->tag) |
- IT_HEADER_TCODE(TCODE_STREAM_DATA) |
- IT_HEADER_CHANNEL(ctx->base.channel) |
- IT_HEADER_SPEED(ctx->base.speed));
- header[1] =
- cpu_to_le32(IT_HEADER_DATA_LENGTH(p->header_length +
- p->payload_length));
+
+ ohci1394_it_data_set_speed(header, ctx->base.speed);
+ ohci1394_it_data_set_tag(header, p->tag);
+ ohci1394_it_data_set_channel(header, ctx->base.channel);
+ ohci1394_it_data_set_tcode(header, TCODE_STREAM_DATA);
+ ohci1394_it_data_set_sync(header, p->sy);
+
+ ohci1394_it_data_set_data_length(header, p->header_length + p->payload_length);
}
if (p->header_length > 0) {
@@ -3587,24 +3568,19 @@ static int ohci_queue_iso(struct fw_iso_context *base,
unsigned long payload)
{
struct iso_context *ctx = container_of(base, struct iso_context, base);
- unsigned long flags;
- int ret = -ENOSYS;
- spin_lock_irqsave(&ctx->context.ohci->lock, flags);
+ guard(spinlock_irqsave)(&ctx->context.ohci->lock);
+
switch (base->type) {
case FW_ISO_CONTEXT_TRANSMIT:
- ret = queue_iso_transmit(ctx, packet, buffer, payload);
- break;
+ return queue_iso_transmit(ctx, packet, buffer, payload);
case FW_ISO_CONTEXT_RECEIVE:
- ret = queue_iso_packet_per_buffer(ctx, packet, buffer, payload);
- break;
+ return queue_iso_packet_per_buffer(ctx, packet, buffer, payload);
case FW_ISO_CONTEXT_RECEIVE_MULTICHANNEL:
- ret = queue_iso_buffer_fill(ctx, packet, buffer, payload);
- break;
+ return queue_iso_buffer_fill(ctx, packet, buffer, payload);
+ default:
+ return -ENOSYS;
}
- spin_unlock_irqrestore(&ctx->context.ohci->lock, flags);
-
- return ret;
}
static void ohci_flush_queue_iso(struct fw_iso_context *base)
@@ -3620,10 +3596,8 @@ static int ohci_flush_iso_completions(struct fw_iso_context *base)
struct iso_context *ctx = container_of(base, struct iso_context, base);
int ret = 0;
- tasklet_disable_in_atomic(&ctx->context.tasklet);
-
if (!test_and_set_bit_lock(0, &ctx->flushing_completions)) {
- context_tasklet((unsigned long)&ctx->context);
+ ohci_isoc_context_work(&base->work);
switch (base->type) {
case FW_ISO_CONTEXT_TRANSMIT:
@@ -3643,8 +3617,6 @@ static int ohci_flush_iso_completions(struct fw_iso_context *base)
smp_mb__after_atomic();
}
- tasklet_enable(&ctx->context.tasklet);
-
return ret;
}
@@ -3863,7 +3835,7 @@ static int pci_probe(struct pci_dev *dev,
goto fail_msi;
}
- err = fw_card_add(&ohci->card, max_receive, link_speed, guid);
+ err = fw_card_add(&ohci->card, max_receive, link_speed, guid, ohci->n_it + ohci->n_ir);
if (err)
goto fail_irq;
diff --git a/drivers/firewire/ohci.h b/drivers/firewire/ohci.h
index 71c2ed84cafb..218666cfe14a 100644
--- a/drivers/firewire/ohci.h
+++ b/drivers/firewire/ohci.h
@@ -153,7 +153,205 @@
#define OHCI1394_evt_unknown 0xe
#define OHCI1394_evt_flushed 0xf
-#define OHCI1394_phy_tcode 0xe
+
+// Asynchronous Transmit DMA.
+//
+// The content of first two quadlets of data for AT DMA is different from the header for IEEE 1394
+// asynchronous packet.
+
+#define OHCI1394_AT_DATA_Q0_srcBusID_MASK 0x00800000
+#define OHCI1394_AT_DATA_Q0_srcBusID_SHIFT 23
+#define OHCI1394_AT_DATA_Q0_spd_MASK 0x00070000
+#define OHCI1394_AT_DATA_Q0_spd_SHIFT 16
+#define OHCI1394_AT_DATA_Q0_tLabel_MASK 0x0000fc00
+#define OHCI1394_AT_DATA_Q0_tLabel_SHIFT 10
+#define OHCI1394_AT_DATA_Q0_rt_MASK 0x00000300
+#define OHCI1394_AT_DATA_Q0_rt_SHIFT 8
+#define OHCI1394_AT_DATA_Q0_tCode_MASK 0x000000f0
+#define OHCI1394_AT_DATA_Q0_tCode_SHIFT 4
+#define OHCI1394_AT_DATA_Q1_destinationId_MASK 0xffff0000
+#define OHCI1394_AT_DATA_Q1_destinationId_SHIFT 16
+#define OHCI1394_AT_DATA_Q1_destinationOffsetHigh_MASK 0x0000ffff
+#define OHCI1394_AT_DATA_Q1_destinationOffsetHigh_SHIFT 0
+#define OHCI1394_AT_DATA_Q1_rCode_MASK 0x0000f000
+#define OHCI1394_AT_DATA_Q1_rCode_SHIFT 12
+
+static inline bool ohci1394_at_data_get_src_bus_id(const __le32 *data)
+{
+ return !!((data[0] & OHCI1394_AT_DATA_Q0_srcBusID_MASK) >> OHCI1394_AT_DATA_Q0_srcBusID_SHIFT);
+}
+
+static inline void ohci1394_at_data_set_src_bus_id(__le32 *data, bool src_bus_id)
+{
+ data[0] &= cpu_to_le32(~OHCI1394_AT_DATA_Q0_srcBusID_MASK);
+ data[0] |= cpu_to_le32((src_bus_id << OHCI1394_AT_DATA_Q0_srcBusID_SHIFT) & OHCI1394_AT_DATA_Q0_srcBusID_MASK);
+}
+
+static inline unsigned int ohci1394_at_data_get_speed(const __le32 *data)
+{
+ return (le32_to_cpu(data[0]) & OHCI1394_AT_DATA_Q0_spd_MASK) >> OHCI1394_AT_DATA_Q0_spd_SHIFT;
+}
+
+static inline void ohci1394_at_data_set_speed(__le32 *data, unsigned int scode)
+{
+ data[0] &= cpu_to_le32(~OHCI1394_AT_DATA_Q0_spd_MASK);
+ data[0] |= cpu_to_le32((scode << OHCI1394_AT_DATA_Q0_spd_SHIFT) & OHCI1394_AT_DATA_Q0_spd_MASK);
+}
+
+static inline unsigned int ohci1394_at_data_get_tlabel(const __le32 *data)
+{
+ return (le32_to_cpu(data[0]) & OHCI1394_AT_DATA_Q0_tLabel_MASK) >> OHCI1394_AT_DATA_Q0_tLabel_SHIFT;
+}
+
+static inline void ohci1394_at_data_set_tlabel(__le32 *data, unsigned int tlabel)
+{
+ data[0] &= cpu_to_le32(~OHCI1394_AT_DATA_Q0_tLabel_MASK);
+ data[0] |= cpu_to_le32((tlabel << OHCI1394_AT_DATA_Q0_tLabel_SHIFT) & OHCI1394_AT_DATA_Q0_tLabel_MASK);
+}
+
+static inline unsigned int ohci1394_at_data_get_retry(const __le32 *data)
+{
+ return (le32_to_cpu(data[0]) & OHCI1394_AT_DATA_Q0_rt_MASK) >> OHCI1394_AT_DATA_Q0_rt_SHIFT;
+}
+
+static inline void ohci1394_at_data_set_retry(__le32 *data, unsigned int retry)
+{
+ data[0] &= cpu_to_le32(~OHCI1394_AT_DATA_Q0_rt_MASK);
+ data[0] |= cpu_to_le32((retry << OHCI1394_AT_DATA_Q0_rt_SHIFT) & OHCI1394_AT_DATA_Q0_rt_MASK);
+}
+
+static inline unsigned int ohci1394_at_data_get_tcode(const __le32 *data)
+{
+ return (le32_to_cpu(data[0]) & OHCI1394_AT_DATA_Q0_tCode_MASK) >> OHCI1394_AT_DATA_Q0_tCode_SHIFT;
+}
+
+static inline void ohci1394_at_data_set_tcode(__le32 *data, unsigned int tcode)
+{
+ data[0] &= cpu_to_le32(~OHCI1394_AT_DATA_Q0_tCode_MASK);
+ data[0] |= cpu_to_le32((tcode << OHCI1394_AT_DATA_Q0_tCode_SHIFT) & OHCI1394_AT_DATA_Q0_tCode_MASK);
+}
+
+static inline unsigned int ohci1394_at_data_get_destination_id(const __le32 *data)
+{
+ return (le32_to_cpu(data[1]) & OHCI1394_AT_DATA_Q1_destinationId_MASK) >> OHCI1394_AT_DATA_Q1_destinationId_SHIFT;
+}
+
+static inline void ohci1394_at_data_set_destination_id(__le32 *data, unsigned int destination_id)
+{
+ data[1] &= cpu_to_le32(~OHCI1394_AT_DATA_Q1_destinationId_MASK);
+ data[1] |= cpu_to_le32((destination_id << OHCI1394_AT_DATA_Q1_destinationId_SHIFT) & OHCI1394_AT_DATA_Q1_destinationId_MASK);
+}
+
+static inline u64 ohci1394_at_data_get_destination_offset(const __le32 *data)
+{
+ u64 hi = (u64)((le32_to_cpu(data[1]) & OHCI1394_AT_DATA_Q1_destinationOffsetHigh_MASK) >> OHCI1394_AT_DATA_Q1_destinationOffsetHigh_SHIFT);
+ u64 lo = (u64)le32_to_cpu(data[2]);
+ return (hi << 32) | lo;
+}
+
+static inline void ohci1394_at_data_set_destination_offset(__le32 *data, u64 offset)
+{
+ u32 hi = (u32)(offset >> 32);
+ u32 lo = (u32)(offset & 0x00000000ffffffff);
+ data[1] &= cpu_to_le32(~OHCI1394_AT_DATA_Q1_destinationOffsetHigh_MASK);
+ data[1] |= cpu_to_le32((hi << OHCI1394_AT_DATA_Q1_destinationOffsetHigh_SHIFT) & OHCI1394_AT_DATA_Q1_destinationOffsetHigh_MASK);
+ data[2] = cpu_to_le32(lo);
+}
+
+static inline unsigned int ohci1394_at_data_get_rcode(const __le32 *data)
+{
+ return (le32_to_cpu(data[1]) & OHCI1394_AT_DATA_Q1_rCode_MASK) >> OHCI1394_AT_DATA_Q1_rCode_SHIFT;
+}
+
+static inline void ohci1394_at_data_set_rcode(__le32 *data, unsigned int rcode)
+{
+ data[1] &= cpu_to_le32(~OHCI1394_AT_DATA_Q1_rCode_MASK);
+ data[1] |= cpu_to_le32((rcode << OHCI1394_AT_DATA_Q1_rCode_SHIFT) & OHCI1394_AT_DATA_Q1_rCode_MASK);
+}
+
+// Isochronous Transmit DMA.
+//
+// The content of first two quadlets of data for IT DMA is different from the header for IEEE 1394
+// isochronous packet.
+
+#define OHCI1394_IT_DATA_Q0_spd_MASK 0x00070000
+#define OHCI1394_IT_DATA_Q0_spd_SHIFT 16
+#define OHCI1394_IT_DATA_Q0_tag_MASK 0x0000c000
+#define OHCI1394_IT_DATA_Q0_tag_SHIFT 14
+#define OHCI1394_IT_DATA_Q0_chanNum_MASK 0x00003f00
+#define OHCI1394_IT_DATA_Q0_chanNum_SHIFT 8
+#define OHCI1394_IT_DATA_Q0_tcode_MASK 0x000000f0
+#define OHCI1394_IT_DATA_Q0_tcode_SHIFT 4
+#define OHCI1394_IT_DATA_Q0_sy_MASK 0x0000000f
+#define OHCI1394_IT_DATA_Q0_sy_SHIFT 0
+#define OHCI1394_IT_DATA_Q1_dataLength_MASK 0xffff0000
+#define OHCI1394_IT_DATA_Q1_dataLength_SHIFT 16
+
+static inline unsigned int ohci1394_it_data_get_speed(const __le32 *data)
+{
+ return (le32_to_cpu(data[0]) & OHCI1394_IT_DATA_Q0_spd_MASK) >> OHCI1394_IT_DATA_Q0_spd_SHIFT;
+}
+
+static inline void ohci1394_it_data_set_speed(__le32 *data, unsigned int scode)
+{
+ data[0] &= cpu_to_le32(~OHCI1394_IT_DATA_Q0_spd_MASK);
+ data[0] |= cpu_to_le32((scode << OHCI1394_IT_DATA_Q0_spd_SHIFT) & OHCI1394_IT_DATA_Q0_spd_MASK);
+}
+
+static inline unsigned int ohci1394_it_data_get_tag(const __le32 *data)
+{
+ return (le32_to_cpu(data[0]) & OHCI1394_IT_DATA_Q0_tag_MASK) >> OHCI1394_IT_DATA_Q0_tag_SHIFT;
+}
+
+static inline void ohci1394_it_data_set_tag(__le32 *data, unsigned int tag)
+{
+ data[0] &= cpu_to_le32(~OHCI1394_IT_DATA_Q0_tag_MASK);
+ data[0] |= cpu_to_le32((tag << OHCI1394_IT_DATA_Q0_tag_SHIFT) & OHCI1394_IT_DATA_Q0_tag_MASK);
+}
+
+static inline unsigned int ohci1394_it_data_get_channel(const __le32 *data)
+{
+ return (le32_to_cpu(data[0]) & OHCI1394_IT_DATA_Q0_chanNum_MASK) >> OHCI1394_IT_DATA_Q0_chanNum_SHIFT;
+}
+
+static inline void ohci1394_it_data_set_channel(__le32 *data, unsigned int channel)
+{
+ data[0] &= cpu_to_le32(~OHCI1394_IT_DATA_Q0_chanNum_MASK);
+ data[0] |= cpu_to_le32((channel << OHCI1394_IT_DATA_Q0_chanNum_SHIFT) & OHCI1394_IT_DATA_Q0_chanNum_MASK);
+}
+
+static inline unsigned int ohci1394_it_data_get_tcode(const __le32 *data)
+{
+ return (le32_to_cpu(data[0]) & OHCI1394_IT_DATA_Q0_tcode_MASK) >> OHCI1394_IT_DATA_Q0_tcode_SHIFT;
+}
+
+static inline void ohci1394_it_data_set_tcode(__le32 *data, unsigned int tcode)
+{
+ data[0] &= cpu_to_le32(~OHCI1394_IT_DATA_Q0_tcode_MASK);
+ data[0] |= cpu_to_le32((tcode << OHCI1394_IT_DATA_Q0_tcode_SHIFT) & OHCI1394_IT_DATA_Q0_tcode_MASK);
+}
+
+static inline unsigned int ohci1394_it_data_get_sync(const __le32 *data)
+{
+ return (le32_to_cpu(data[0]) & OHCI1394_IT_DATA_Q0_sy_MASK) >> OHCI1394_IT_DATA_Q0_sy_SHIFT;
+}
+
+static inline void ohci1394_it_data_set_sync(__le32 *data, unsigned int sync)
+{
+ data[0] &= cpu_to_le32(~OHCI1394_IT_DATA_Q0_sy_MASK);
+ data[0] |= cpu_to_le32((sync << OHCI1394_IT_DATA_Q0_sy_SHIFT) & OHCI1394_IT_DATA_Q0_sy_MASK);
+}
+
+static inline unsigned int ohci1394_it_data_get_data_length(const __le32 *data)
+{
+ return (le32_to_cpu(data[1]) & OHCI1394_IT_DATA_Q1_dataLength_MASK) >> OHCI1394_IT_DATA_Q1_dataLength_SHIFT;
+}
+
+static inline void ohci1394_it_data_set_data_length(__le32 *data, unsigned int data_length)
+{
+ data[1] &= cpu_to_le32(~OHCI1394_IT_DATA_Q1_dataLength_MASK);
+ data[1] |= cpu_to_le32((data_length << OHCI1394_IT_DATA_Q1_dataLength_SHIFT) & OHCI1394_IT_DATA_Q1_dataLength_MASK);
+}
// Self-ID DMA.
diff --git a/drivers/firmware/arm_scmi/driver.c b/drivers/firmware/arm_scmi/driver.c
index 69c15135371c..88c5c4ff4bb6 100644
--- a/drivers/firmware/arm_scmi/driver.c
+++ b/drivers/firmware/arm_scmi/driver.c
@@ -2886,7 +2886,6 @@ static ssize_t reset_all_on_write(struct file *filp, const char __user *buf,
static const struct file_operations fops_reset_counts = {
.owner = THIS_MODULE,
.open = simple_open,
- .llseek = no_llseek,
.write = reset_all_on_write,
};
diff --git a/drivers/firmware/arm_scmi/raw_mode.c b/drivers/firmware/arm_scmi/raw_mode.c
index 130d13e9cd6b..9e89a6a763da 100644
--- a/drivers/firmware/arm_scmi/raw_mode.c
+++ b/drivers/firmware/arm_scmi/raw_mode.c
@@ -950,7 +950,6 @@ static const struct file_operations scmi_dbg_raw_mode_reset_fops = {
.open = scmi_dbg_raw_mode_open,
.release = scmi_dbg_raw_mode_release,
.write = scmi_dbg_raw_mode_reset_write,
- .llseek = no_llseek,
.owner = THIS_MODULE,
};
@@ -960,7 +959,6 @@ static const struct file_operations scmi_dbg_raw_mode_message_fops = {
.read = scmi_dbg_raw_mode_message_read,
.write = scmi_dbg_raw_mode_message_write,
.poll = scmi_dbg_raw_mode_message_poll,
- .llseek = no_llseek,
.owner = THIS_MODULE,
};
@@ -977,7 +975,6 @@ static const struct file_operations scmi_dbg_raw_mode_message_async_fops = {
.read = scmi_dbg_raw_mode_message_read,
.write = scmi_dbg_raw_mode_message_async_write,
.poll = scmi_dbg_raw_mode_message_poll,
- .llseek = no_llseek,
.owner = THIS_MODULE,
};
@@ -1001,7 +998,6 @@ static const struct file_operations scmi_dbg_raw_mode_notification_fops = {
.release = scmi_dbg_raw_mode_release,
.read = scmi_test_dbg_raw_mode_notif_read,
.poll = scmi_test_dbg_raw_mode_notif_poll,
- .llseek = no_llseek,
.owner = THIS_MODULE,
};
@@ -1025,7 +1021,6 @@ static const struct file_operations scmi_dbg_raw_mode_errors_fops = {
.release = scmi_dbg_raw_mode_release,
.read = scmi_test_dbg_raw_mode_errors_read,
.poll = scmi_test_dbg_raw_mode_errors_poll,
- .llseek = no_llseek,
.owner = THIS_MODULE,
};
diff --git a/drivers/firmware/efi/capsule-loader.c b/drivers/firmware/efi/capsule-loader.c
index 97bafb5f7038..0c17bdd388e1 100644
--- a/drivers/firmware/efi/capsule-loader.c
+++ b/drivers/firmware/efi/capsule-loader.c
@@ -309,7 +309,6 @@ static const struct file_operations efi_capsule_fops = {
.open = efi_capsule_open,
.write = efi_capsule_write,
.release = efi_capsule_release,
- .llseek = no_llseek,
};
static struct miscdevice efi_capsule_misc = {
diff --git a/drivers/firmware/efi/cper.c b/drivers/firmware/efi/cper.c
index 7d2cdd9e2227..b69e68ef3f02 100644
--- a/drivers/firmware/efi/cper.c
+++ b/drivers/firmware/efi/cper.c
@@ -434,12 +434,17 @@ static void cper_print_pcie(const char *pfx, const struct cper_sec_pcie *pcie,
"%s""bridge: secondary_status: 0x%04x, control: 0x%04x\n",
pfx, pcie->bridge.secondary_status, pcie->bridge.control);
- /* Fatal errors call __ghes_panic() before AER handler prints this */
- if ((pcie->validation_bits & CPER_PCIE_VALID_AER_INFO) &&
- (gdata->error_severity & CPER_SEV_FATAL)) {
+ /*
+ * Print all valid AER info. Record may be from BERT (boot-time) or GHES (run-time).
+ *
+ * Fatal errors call __ghes_panic() before AER handler prints this.
+ */
+ if (pcie->validation_bits & CPER_PCIE_VALID_AER_INFO) {
struct aer_capability_regs *aer;
aer = (struct aer_capability_regs *)pcie->aer_info;
+ printk("%saer_cor_status: 0x%08x, aer_cor_mask: 0x%08x\n",
+ pfx, aer->cor_status, aer->cor_mask);
printk("%saer_uncor_status: 0x%08x, aer_uncor_mask: 0x%08x\n",
pfx, aer->uncor_status, aer->uncor_mask);
printk("%saer_uncor_severity: 0x%08x\n",
diff --git a/drivers/firmware/efi/efi.c b/drivers/firmware/efi/efi.c
index fdf07dd6f459..70490bf2697b 100644
--- a/drivers/firmware/efi/efi.c
+++ b/drivers/firmware/efi/efi.c
@@ -349,7 +349,7 @@ static void __init efi_debugfs_init(void)
int i = 0;
efi_debugfs = debugfs_create_dir("efi", NULL);
- if (IS_ERR_OR_NULL(efi_debugfs))
+ if (IS_ERR(efi_debugfs))
return;
for_each_efi_memory_desc(md) {
diff --git a/drivers/firmware/efi/libstub/efistub.h b/drivers/firmware/efi/libstub/efistub.h
index d33ccbc4a2c6..685098f9626f 100644
--- a/drivers/firmware/efi/libstub/efistub.h
+++ b/drivers/firmware/efi/libstub/efistub.h
@@ -1229,7 +1229,7 @@ efi_zboot_entry(efi_handle_t handle, efi_system_table_t *systab);
efi_status_t allocate_unaccepted_bitmap(__u32 nr_desc,
struct efi_boot_memmap *map);
void process_unaccepted_memory(u64 start, u64 end);
-void accept_memory(phys_addr_t start, phys_addr_t end);
+void accept_memory(phys_addr_t start, unsigned long size);
void arch_accept_memory(phys_addr_t start, phys_addr_t end);
#endif
diff --git a/drivers/firmware/efi/libstub/tpm.c b/drivers/firmware/efi/libstub/tpm.c
index df3182f2e63a..1fd6823248ab 100644
--- a/drivers/firmware/efi/libstub/tpm.c
+++ b/drivers/firmware/efi/libstub/tpm.c
@@ -96,7 +96,7 @@ static void efi_retrieve_tcg2_eventlog(int version, efi_physical_addr_t log_loca
}
/* Allocate space for the logs and copy them. */
- status = efi_bs_call(allocate_pool, EFI_LOADER_DATA,
+ status = efi_bs_call(allocate_pool, EFI_ACPI_RECLAIM_MEMORY,
sizeof(*log_tbl) + log_size, (void **)&log_tbl);
if (status != EFI_SUCCESS) {
diff --git a/drivers/firmware/efi/libstub/unaccepted_memory.c b/drivers/firmware/efi/libstub/unaccepted_memory.c
index c295ea3a6efc..757dbe734a47 100644
--- a/drivers/firmware/efi/libstub/unaccepted_memory.c
+++ b/drivers/firmware/efi/libstub/unaccepted_memory.c
@@ -177,9 +177,10 @@ void process_unaccepted_memory(u64 start, u64 end)
start / unit_size, (end - start) / unit_size);
}
-void accept_memory(phys_addr_t start, phys_addr_t end)
+void accept_memory(phys_addr_t start, unsigned long size)
{
unsigned long range_start, range_end;
+ phys_addr_t end = start + size;
unsigned long bitmap_size;
u64 unit_size;
diff --git a/drivers/firmware/efi/test/efi_test.c b/drivers/firmware/efi/test/efi_test.c
index 47d67bb0a516..9e2628728aad 100644
--- a/drivers/firmware/efi/test/efi_test.c
+++ b/drivers/firmware/efi/test/efi_test.c
@@ -750,7 +750,6 @@ static const struct file_operations efi_test_fops = {
.unlocked_ioctl = efi_test_ioctl,
.open = efi_test_open,
.release = efi_test_close,
- .llseek = no_llseek,
};
static struct miscdevice efi_test_dev = {
diff --git a/drivers/firmware/efi/unaccepted_memory.c b/drivers/firmware/efi/unaccepted_memory.c
index 50f6503fe49f..c2c067eff634 100644
--- a/drivers/firmware/efi/unaccepted_memory.c
+++ b/drivers/firmware/efi/unaccepted_memory.c
@@ -30,11 +30,12 @@ static LIST_HEAD(accepting_list);
* - memory that is below phys_base;
* - memory that is above the memory that addressable by the bitmap;
*/
-void accept_memory(phys_addr_t start, phys_addr_t end)
+void accept_memory(phys_addr_t start, unsigned long size)
{
struct efi_unaccepted_memory *unaccepted;
unsigned long range_start, range_end;
struct accept_range range, *entry;
+ phys_addr_t end = start + size;
unsigned long flags;
u64 unit_size;
@@ -74,13 +75,13 @@ void accept_memory(phys_addr_t start, phys_addr_t end)
* "guard" page is accepted in addition to the memory that needs to be
* used:
*
- * 1. Implicitly extend the range_contains_unaccepted_memory(start, end)
- * checks up to end+unit_size if 'end' is aligned on a unit_size
- * boundary.
+ * 1. Implicitly extend the range_contains_unaccepted_memory(start, size)
+ * checks up to the next unit_size if 'start+size' is aligned on a
+ * unit_size boundary.
*
- * 2. Implicitly extend accept_memory(start, end) to end+unit_size if
- * 'end' is aligned on a unit_size boundary. (immediately following
- * this comment)
+ * 2. Implicitly extend accept_memory(start, size) to the next unit_size
+ * if 'size+end' is aligned on a unit_size boundary. (immediately
+ * following this comment)
*/
if (!(end % unit_size))
end += unit_size;
@@ -156,9 +157,10 @@ retry:
spin_unlock_irqrestore(&unaccepted_memory_lock, flags);
}
-bool range_contains_unaccepted_memory(phys_addr_t start, phys_addr_t end)
+bool range_contains_unaccepted_memory(phys_addr_t start, unsigned long size)
{
struct efi_unaccepted_memory *unaccepted;
+ phys_addr_t end = start + size;
unsigned long flags;
bool ret = false;
u64 unit_size;
diff --git a/drivers/firmware/qemu_fw_cfg.c b/drivers/firmware/qemu_fw_cfg.c
index 5f43dfa22f79..85c525745b31 100644
--- a/drivers/firmware/qemu_fw_cfg.c
+++ b/drivers/firmware/qemu_fw_cfg.c
@@ -452,7 +452,7 @@ static void fw_cfg_sysfs_release_entry(struct kobject *kobj)
}
/* kobj_type: ties together all properties required to register an entry */
-static struct kobj_type fw_cfg_sysfs_entry_ktype = {
+static const struct kobj_type fw_cfg_sysfs_entry_ktype = {
.default_groups = fw_cfg_sysfs_entry_groups,
.sysfs_ops = &fw_cfg_sysfs_attr_ops,
.release = fw_cfg_sysfs_release_entry,
diff --git a/drivers/firmware/turris-mox-rwtm.c b/drivers/firmware/turris-mox-rwtm.c
index 525ebdc7ded5..f3bc0d427825 100644
--- a/drivers/firmware/turris-mox-rwtm.c
+++ b/drivers/firmware/turris-mox-rwtm.c
@@ -386,7 +386,6 @@ static const struct file_operations do_sign_fops = {
.open = rwtm_debug_open,
.read = do_sign_read,
.write = do_sign_write,
- .llseek = no_llseek,
};
static void rwtm_debugfs_release(void *root)
diff --git a/drivers/fpga/socfpga.c b/drivers/fpga/socfpga.c
index 723ea0ad3f09..b08b4bb8f650 100644
--- a/drivers/fpga/socfpga.c
+++ b/drivers/fpga/socfpga.c
@@ -301,16 +301,17 @@ static irqreturn_t socfpga_fpga_isr(int irq, void *dev_id)
static int socfpga_fpga_wait_for_config_done(struct socfpga_fpga_priv *priv)
{
- int timeout, ret = 0;
+ int ret = 0;
+ long time_left;
socfpga_fpga_disable_irqs(priv);
init_completion(&priv->status_complete);
socfpga_fpga_enable_irqs(priv, SOCFPGA_FPGMGR_MON_CONF_DONE);
- timeout = wait_for_completion_interruptible_timeout(
+ time_left = wait_for_completion_interruptible_timeout(
&priv->status_complete,
msecs_to_jiffies(10));
- if (timeout == 0)
+ if (time_left == 0)
ret = -ETIMEDOUT;
socfpga_fpga_disable_irqs(priv);
diff --git a/drivers/fpga/tests/fpga-bridge-test.c b/drivers/fpga/tests/fpga-bridge-test.c
index 2f7a24f23808..b9ab29809e96 100644
--- a/drivers/fpga/tests/fpga-bridge-test.c
+++ b/drivers/fpga/tests/fpga-bridge-test.c
@@ -23,6 +23,13 @@ struct bridge_ctx {
struct bridge_stats stats;
};
+/*
+ * Wrapper to avoid a cast warning when passing the action function directly
+ * to kunit_add_action().
+ */
+KUNIT_DEFINE_ACTION_WRAPPER(fpga_bridge_unregister_wrapper, fpga_bridge_unregister,
+ struct fpga_bridge *);
+
static int op_enable_set(struct fpga_bridge *bridge, bool enable)
{
struct bridge_stats *stats = bridge->priv;
@@ -50,6 +57,7 @@ static const struct fpga_bridge_ops fake_bridge_ops = {
static struct bridge_ctx *register_test_bridge(struct kunit *test, const char *dev_name)
{
struct bridge_ctx *ctx;
+ int ret;
ctx = kunit_kzalloc(test, sizeof(*ctx), GFP_KERNEL);
KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ctx);
@@ -61,13 +69,10 @@ static struct bridge_ctx *register_test_bridge(struct kunit *test, const char *d
&ctx->stats);
KUNIT_ASSERT_FALSE(test, IS_ERR_OR_NULL(ctx->bridge));
- return ctx;
-}
+ ret = kunit_add_action_or_reset(test, fpga_bridge_unregister_wrapper, ctx->bridge);
+ KUNIT_ASSERT_EQ(test, ret, 0);
-static void unregister_test_bridge(struct kunit *test, struct bridge_ctx *ctx)
-{
- fpga_bridge_unregister(ctx->bridge);
- kunit_device_unregister(test, ctx->dev);
+ return ctx;
}
static void fpga_bridge_test_get(struct kunit *test)
@@ -141,8 +146,6 @@ static void fpga_bridge_test_get_put_list(struct kunit *test)
fpga_bridges_put(&bridge_list);
KUNIT_EXPECT_TRUE(test, list_empty(&bridge_list));
-
- unregister_test_bridge(test, ctx_1);
}
static int fpga_bridge_test_init(struct kunit *test)
@@ -152,11 +155,6 @@ static int fpga_bridge_test_init(struct kunit *test)
return 0;
}
-static void fpga_bridge_test_exit(struct kunit *test)
-{
- unregister_test_bridge(test, test->priv);
-}
-
static struct kunit_case fpga_bridge_test_cases[] = {
KUNIT_CASE(fpga_bridge_test_get),
KUNIT_CASE(fpga_bridge_test_toggle),
@@ -167,7 +165,6 @@ static struct kunit_case fpga_bridge_test_cases[] = {
static struct kunit_suite fpga_bridge_suite = {
.name = "fpga_bridge",
.init = fpga_bridge_test_init,
- .exit = fpga_bridge_test_exit,
.test_cases = fpga_bridge_test_cases,
};
diff --git a/drivers/fpga/tests/fpga-mgr-test.c b/drivers/fpga/tests/fpga-mgr-test.c
index 125b3a4d43c6..9cb37aefbac4 100644
--- a/drivers/fpga/tests/fpga-mgr-test.c
+++ b/drivers/fpga/tests/fpga-mgr-test.c
@@ -44,6 +44,16 @@ struct mgr_ctx {
struct mgr_stats stats;
};
+/*
+ * Wrappers to avoid cast warnings when passing action functions directly
+ * to kunit_add_action().
+ */
+KUNIT_DEFINE_ACTION_WRAPPER(sg_free_table_wrapper, sg_free_table,
+ struct sg_table *);
+
+KUNIT_DEFINE_ACTION_WRAPPER(fpga_image_info_free_wrapper, fpga_image_info_free,
+ struct fpga_image_info *);
+
/**
* init_test_buffer() - Allocate and initialize a test image in a buffer.
* @test: KUnit test context object.
@@ -257,6 +267,9 @@ static void fpga_mgr_test_img_load_sgt(struct kunit *test)
KUNIT_ASSERT_EQ(test, ret, 0);
sg_init_one(sgt->sgl, img_buf, IMAGE_SIZE);
+ ret = kunit_add_action_or_reset(test, sg_free_table_wrapper, sgt);
+ KUNIT_ASSERT_EQ(test, ret, 0);
+
ctx->img_info->sgt = sgt;
ret = fpga_mgr_load(ctx->mgr, ctx->img_info);
@@ -273,13 +286,12 @@ static void fpga_mgr_test_img_load_sgt(struct kunit *test)
KUNIT_EXPECT_EQ(test, ctx->stats.op_write_init_seq, ctx->stats.op_parse_header_seq + 1);
KUNIT_EXPECT_EQ(test, ctx->stats.op_write_sg_seq, ctx->stats.op_parse_header_seq + 2);
KUNIT_EXPECT_EQ(test, ctx->stats.op_write_complete_seq, ctx->stats.op_parse_header_seq + 3);
-
- sg_free_table(ctx->img_info->sgt);
}
static int fpga_mgr_test_init(struct kunit *test)
{
struct mgr_ctx *ctx;
+ int ret;
ctx = kunit_kzalloc(test, sizeof(*ctx), GFP_KERNEL);
KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ctx);
@@ -294,19 +306,14 @@ static int fpga_mgr_test_init(struct kunit *test)
ctx->img_info = fpga_image_info_alloc(ctx->dev);
KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ctx->img_info);
+ ret = kunit_add_action_or_reset(test, fpga_image_info_free_wrapper, ctx->img_info);
+ KUNIT_ASSERT_EQ(test, ret, 0);
+
test->priv = ctx;
return 0;
}
-static void fpga_mgr_test_exit(struct kunit *test)
-{
- struct mgr_ctx *ctx = test->priv;
-
- fpga_image_info_free(ctx->img_info);
- kunit_device_unregister(test, ctx->dev);
-}
-
static struct kunit_case fpga_mgr_test_cases[] = {
KUNIT_CASE(fpga_mgr_test_get),
KUNIT_CASE(fpga_mgr_test_lock),
@@ -318,7 +325,6 @@ static struct kunit_case fpga_mgr_test_cases[] = {
static struct kunit_suite fpga_mgr_suite = {
.name = "fpga_mgr",
.init = fpga_mgr_test_init,
- .exit = fpga_mgr_test_exit,
.test_cases = fpga_mgr_test_cases,
};
diff --git a/drivers/fpga/tests/fpga-region-test.c b/drivers/fpga/tests/fpga-region-test.c
index bcf0651df261..6a108cafded8 100644
--- a/drivers/fpga/tests/fpga-region-test.c
+++ b/drivers/fpga/tests/fpga-region-test.c
@@ -35,6 +35,19 @@ struct test_ctx {
struct mgr_stats mgr_stats;
};
+/*
+ * Wrappers to avoid cast warnings when passing action functions directly
+ * to kunit_add_action().
+ */
+KUNIT_DEFINE_ACTION_WRAPPER(fpga_image_info_free_wrapper, fpga_image_info_free,
+ struct fpga_image_info *);
+
+KUNIT_DEFINE_ACTION_WRAPPER(fpga_bridge_unregister_wrapper, fpga_bridge_unregister,
+ struct fpga_bridge *);
+
+KUNIT_DEFINE_ACTION_WRAPPER(fpga_region_unregister_wrapper, fpga_region_unregister,
+ struct fpga_region *);
+
static int op_write(struct fpga_manager *mgr, const char *buf, size_t count)
{
struct mgr_stats *stats = mgr->priv;
@@ -111,6 +124,9 @@ static void fpga_region_test_program_fpga(struct kunit *test)
img_info = fpga_image_info_alloc(ctx->mgr_dev);
KUNIT_ASSERT_NOT_ERR_OR_NULL(test, img_info);
+ ret = kunit_add_action_or_reset(test, fpga_image_info_free_wrapper, img_info);
+ KUNIT_ASSERT_EQ(test, ret, 0);
+
img_info->buf = img_buf;
img_info->count = sizeof(img_buf);
@@ -130,8 +146,6 @@ static void fpga_region_test_program_fpga(struct kunit *test)
KUNIT_EXPECT_EQ(test, 2, ctx->bridge_stats.cycles_count);
fpga_bridges_put(&ctx->region->bridge_list);
-
- fpga_image_info_free(img_info);
}
/*
@@ -144,6 +158,7 @@ static int fpga_region_test_init(struct kunit *test)
{
struct test_ctx *ctx;
struct fpga_region_info region_info = { 0 };
+ int ret;
ctx = kunit_kzalloc(test, sizeof(*ctx), GFP_KERNEL);
KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ctx);
@@ -164,6 +179,9 @@ static int fpga_region_test_init(struct kunit *test)
ctx->bridge_stats.enable = true;
+ ret = kunit_add_action_or_reset(test, fpga_bridge_unregister_wrapper, ctx->bridge);
+ KUNIT_ASSERT_EQ(test, ret, 0);
+
ctx->region_dev = kunit_device_register(test, "fpga-region-test-dev");
KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ctx->region_dev);
@@ -174,24 +192,14 @@ static int fpga_region_test_init(struct kunit *test)
ctx->region = fpga_region_register_full(ctx->region_dev, &region_info);
KUNIT_ASSERT_FALSE(test, IS_ERR_OR_NULL(ctx->region));
+ ret = kunit_add_action_or_reset(test, fpga_region_unregister_wrapper, ctx->region);
+ KUNIT_ASSERT_EQ(test, ret, 0);
+
test->priv = ctx;
return 0;
}
-static void fpga_region_test_exit(struct kunit *test)
-{
- struct test_ctx *ctx = test->priv;
-
- fpga_region_unregister(ctx->region);
- kunit_device_unregister(test, ctx->region_dev);
-
- fpga_bridge_unregister(ctx->bridge);
- kunit_device_unregister(test, ctx->bridge_dev);
-
- kunit_device_unregister(test, ctx->mgr_dev);
-}
-
static struct kunit_case fpga_region_test_cases[] = {
KUNIT_CASE(fpga_region_test_class_find),
KUNIT_CASE(fpga_region_test_program_fpga),
@@ -199,9 +207,8 @@ static struct kunit_case fpga_region_test_cases[] = {
};
static struct kunit_suite fpga_region_suite = {
- .name = "fpga_mgr",
+ .name = "fpga_region",
.init = fpga_region_test_init,
- .exit = fpga_region_test_exit,
.test_cases = fpga_region_test_cases,
};
diff --git a/drivers/fpga/zynq-fpga.c b/drivers/fpga/zynq-fpga.c
index 0ac93183d201..4db3d80e10b0 100644
--- a/drivers/fpga/zynq-fpga.c
+++ b/drivers/fpga/zynq-fpga.c
@@ -387,7 +387,7 @@ static int zynq_fpga_ops_write(struct fpga_manager *mgr, struct sg_table *sgt)
const char *why;
int err;
u32 intr_status;
- unsigned long timeout;
+ unsigned long time_left;
unsigned long flags;
struct scatterlist *sg;
int i;
@@ -427,8 +427,8 @@ static int zynq_fpga_ops_write(struct fpga_manager *mgr, struct sg_table *sgt)
zynq_step_dma(priv);
spin_unlock_irqrestore(&priv->dma_lock, flags);
- timeout = wait_for_completion_timeout(&priv->dma_done,
- msecs_to_jiffies(DMA_TIMEOUT_MS));
+ time_left = wait_for_completion_timeout(&priv->dma_done,
+ msecs_to_jiffies(DMA_TIMEOUT_MS));
spin_lock_irqsave(&priv->dma_lock, flags);
zynq_fpga_set_irq(priv, 0);
@@ -452,7 +452,7 @@ static int zynq_fpga_ops_write(struct fpga_manager *mgr, struct sg_table *sgt)
if (priv->cur_sg ||
!((intr_status & IXR_D_P_DONE_MASK) == IXR_D_P_DONE_MASK)) {
- if (timeout == 0)
+ if (time_left == 0)
why = "DMA timed out";
else
why = "DMA did not complete";
diff --git a/drivers/gnss/core.c b/drivers/gnss/core.c
index 48f2ee0f78c4..883ef86ad3fc 100644
--- a/drivers/gnss/core.c
+++ b/drivers/gnss/core.c
@@ -206,7 +206,6 @@ static const struct file_operations gnss_fops = {
.read = gnss_read,
.write = gnss_write,
.poll = gnss_poll,
- .llseek = no_llseek,
};
static struct class *gnss_class;
diff --git a/drivers/gpio/gpio-ep93xx.c b/drivers/gpio/gpio-ep93xx.c
index 6cedf46efec6..ab798c848215 100644
--- a/drivers/gpio/gpio-ep93xx.c
+++ b/drivers/gpio/gpio-ep93xx.c
@@ -12,6 +12,7 @@
#include <linux/init.h>
#include <linux/module.h>
#include <linux/platform_device.h>
+#include <linux/interrupt.h>
#include <linux/io.h>
#include <linux/irq.h>
#include <linux/slab.h>
@@ -19,29 +20,8 @@
#include <linux/bitops.h>
#include <linux/seq_file.h>
-#define EP93XX_GPIO_F_INT_STATUS 0x5c
-#define EP93XX_GPIO_A_INT_STATUS 0xa0
-#define EP93XX_GPIO_B_INT_STATUS 0xbc
-
-/* Maximum value for gpio line identifiers */
-#define EP93XX_GPIO_LINE_MAX 63
-
-/* Number of GPIO chips in EP93XX */
-#define EP93XX_GPIO_CHIP_NUM 8
-
-/* Maximum value for irq capable line identifiers */
-#define EP93XX_GPIO_LINE_MAX_IRQ 23
-
-#define EP93XX_GPIO_A_IRQ_BASE 64
-#define EP93XX_GPIO_B_IRQ_BASE 72
-/*
- * Static mapping of GPIO bank F IRQS:
- * F0..F7 (16..24) to irq 80..87.
- */
-#define EP93XX_GPIO_F_IRQ_BASE 80
-
struct ep93xx_gpio_irq_chip {
- u8 irq_offset;
+ void __iomem *base;
u8 int_unmasked;
u8 int_enabled;
u8 int_type1;
@@ -50,15 +30,11 @@ struct ep93xx_gpio_irq_chip {
};
struct ep93xx_gpio_chip {
+ void __iomem *base;
struct gpio_chip gc;
struct ep93xx_gpio_irq_chip *eic;
};
-struct ep93xx_gpio {
- void __iomem *base;
- struct ep93xx_gpio_chip gc[EP93XX_GPIO_CHIP_NUM];
-};
-
#define to_ep93xx_gpio_chip(x) container_of(x, struct ep93xx_gpio_chip, gc)
static struct ep93xx_gpio_irq_chip *to_ep93xx_gpio_irq_chip(struct gpio_chip *gc)
@@ -79,25 +55,23 @@ static struct ep93xx_gpio_irq_chip *to_ep93xx_gpio_irq_chip(struct gpio_chip *gc
#define EP93XX_INT_RAW_STATUS_OFFSET 0x14
#define EP93XX_INT_DEBOUNCE_OFFSET 0x18
-static void ep93xx_gpio_update_int_params(struct ep93xx_gpio *epg,
- struct ep93xx_gpio_irq_chip *eic)
+static void ep93xx_gpio_update_int_params(struct ep93xx_gpio_irq_chip *eic)
{
- writeb_relaxed(0, epg->base + eic->irq_offset + EP93XX_INT_EN_OFFSET);
+ writeb_relaxed(0, eic->base + EP93XX_INT_EN_OFFSET);
writeb_relaxed(eic->int_type2,
- epg->base + eic->irq_offset + EP93XX_INT_TYPE2_OFFSET);
+ eic->base + EP93XX_INT_TYPE2_OFFSET);
writeb_relaxed(eic->int_type1,
- epg->base + eic->irq_offset + EP93XX_INT_TYPE1_OFFSET);
+ eic->base + EP93XX_INT_TYPE1_OFFSET);
writeb_relaxed(eic->int_unmasked & eic->int_enabled,
- epg->base + eic->irq_offset + EP93XX_INT_EN_OFFSET);
+ eic->base + EP93XX_INT_EN_OFFSET);
}
static void ep93xx_gpio_int_debounce(struct gpio_chip *gc,
unsigned int offset, bool enable)
{
- struct ep93xx_gpio *epg = gpiochip_get_data(gc);
struct ep93xx_gpio_irq_chip *eic = to_ep93xx_gpio_irq_chip(gc);
int port_mask = BIT(offset);
@@ -106,53 +80,43 @@ static void ep93xx_gpio_int_debounce(struct gpio_chip *gc,
else
eic->int_debounce &= ~port_mask;
- writeb(eic->int_debounce,
- epg->base + eic->irq_offset + EP93XX_INT_DEBOUNCE_OFFSET);
+ writeb(eic->int_debounce, eic->base + EP93XX_INT_DEBOUNCE_OFFSET);
}
-static void ep93xx_gpio_ab_irq_handler(struct irq_desc *desc)
+static u32 ep93xx_gpio_ab_irq_handler(struct gpio_chip *gc)
{
- struct gpio_chip *gc = irq_desc_get_handler_data(desc);
- struct ep93xx_gpio *epg = gpiochip_get_data(gc);
- struct irq_chip *irqchip = irq_desc_get_chip(desc);
+ struct ep93xx_gpio_irq_chip *eic = to_ep93xx_gpio_irq_chip(gc);
unsigned long stat;
int offset;
- chained_irq_enter(irqchip, desc);
-
- /*
- * Dispatch the IRQs to the irqdomain of each A and B
- * gpiochip irqdomains depending on what has fired.
- * The tricky part is that the IRQ line is shared
- * between bank A and B and each has their own gpiochip.
- */
- stat = readb(epg->base + EP93XX_GPIO_A_INT_STATUS);
+ stat = readb(eic->base + EP93XX_INT_STATUS_OFFSET);
for_each_set_bit(offset, &stat, 8)
- generic_handle_domain_irq(epg->gc[0].gc.irq.domain,
- offset);
+ generic_handle_domain_irq(gc->irq.domain, offset);
- stat = readb(epg->base + EP93XX_GPIO_B_INT_STATUS);
- for_each_set_bit(offset, &stat, 8)
- generic_handle_domain_irq(epg->gc[1].gc.irq.domain,
- offset);
+ return stat;
+}
- chained_irq_exit(irqchip, desc);
+static irqreturn_t ep93xx_ab_irq_handler(int irq, void *dev_id)
+{
+ return IRQ_RETVAL(ep93xx_gpio_ab_irq_handler(dev_id));
}
static void ep93xx_gpio_f_irq_handler(struct irq_desc *desc)
{
- /*
- * map discontiguous hw irq range to continuous sw irq range:
- *
- * IRQ_EP93XX_GPIO{0..7}MUX -> EP93XX_GPIO_LINE_F{0..7}
- */
struct irq_chip *irqchip = irq_desc_get_chip(desc);
- unsigned int irq = irq_desc_get_irq(desc);
- int port_f_idx = (irq & 7) ^ 4; /* {20..23,48..51} -> {0..7} */
- int gpio_irq = EP93XX_GPIO_F_IRQ_BASE + port_f_idx;
+ struct gpio_chip *gc = irq_desc_get_handler_data(desc);
+ struct gpio_irq_chip *gic = &gc->irq;
+ unsigned int parent = irq_desc_get_irq(desc);
+ unsigned int i;
chained_irq_enter(irqchip, desc);
- generic_handle_irq(gpio_irq);
+ for (i = 0; i < gic->num_parents; i++)
+ if (gic->parents[i] == parent)
+ break;
+
+ if (i < gic->num_parents)
+ generic_handle_domain_irq(gc->irq.domain, i);
+
chained_irq_exit(irqchip, desc);
}
@@ -160,54 +124,53 @@ static void ep93xx_gpio_irq_ack(struct irq_data *d)
{
struct gpio_chip *gc = irq_data_get_irq_chip_data(d);
struct ep93xx_gpio_irq_chip *eic = to_ep93xx_gpio_irq_chip(gc);
- struct ep93xx_gpio *epg = gpiochip_get_data(gc);
- int port_mask = BIT(d->irq & 7);
+ int port_mask = BIT(irqd_to_hwirq(d));
if (irqd_get_trigger_type(d) == IRQ_TYPE_EDGE_BOTH) {
eic->int_type2 ^= port_mask; /* switch edge direction */
- ep93xx_gpio_update_int_params(epg, eic);
+ ep93xx_gpio_update_int_params(eic);
}
- writeb(port_mask, epg->base + eic->irq_offset + EP93XX_INT_EOI_OFFSET);
+ writeb(port_mask, eic->base + EP93XX_INT_EOI_OFFSET);
}
static void ep93xx_gpio_irq_mask_ack(struct irq_data *d)
{
struct gpio_chip *gc = irq_data_get_irq_chip_data(d);
struct ep93xx_gpio_irq_chip *eic = to_ep93xx_gpio_irq_chip(gc);
- struct ep93xx_gpio *epg = gpiochip_get_data(gc);
- int port_mask = BIT(d->irq & 7);
+ irq_hw_number_t hwirq = irqd_to_hwirq(d);
+ int port_mask = BIT(hwirq);
if (irqd_get_trigger_type(d) == IRQ_TYPE_EDGE_BOTH)
eic->int_type2 ^= port_mask; /* switch edge direction */
eic->int_unmasked &= ~port_mask;
- ep93xx_gpio_update_int_params(epg, eic);
+ ep93xx_gpio_update_int_params(eic);
- writeb(port_mask, epg->base + eic->irq_offset + EP93XX_INT_EOI_OFFSET);
- gpiochip_disable_irq(gc, irqd_to_hwirq(d));
+ writeb(port_mask, eic->base + EP93XX_INT_EOI_OFFSET);
+ gpiochip_disable_irq(gc, hwirq);
}
static void ep93xx_gpio_irq_mask(struct irq_data *d)
{
struct gpio_chip *gc = irq_data_get_irq_chip_data(d);
struct ep93xx_gpio_irq_chip *eic = to_ep93xx_gpio_irq_chip(gc);
- struct ep93xx_gpio *epg = gpiochip_get_data(gc);
+ irq_hw_number_t hwirq = irqd_to_hwirq(d);
- eic->int_unmasked &= ~BIT(d->irq & 7);
- ep93xx_gpio_update_int_params(epg, eic);
- gpiochip_disable_irq(gc, irqd_to_hwirq(d));
+ eic->int_unmasked &= ~BIT(hwirq);
+ ep93xx_gpio_update_int_params(eic);
+ gpiochip_disable_irq(gc, hwirq);
}
static void ep93xx_gpio_irq_unmask(struct irq_data *d)
{
struct gpio_chip *gc = irq_data_get_irq_chip_data(d);
struct ep93xx_gpio_irq_chip *eic = to_ep93xx_gpio_irq_chip(gc);
- struct ep93xx_gpio *epg = gpiochip_get_data(gc);
+ irq_hw_number_t hwirq = irqd_to_hwirq(d);
- gpiochip_enable_irq(gc, irqd_to_hwirq(d));
- eic->int_unmasked |= BIT(d->irq & 7);
- ep93xx_gpio_update_int_params(epg, eic);
+ gpiochip_enable_irq(gc, hwirq);
+ eic->int_unmasked |= BIT(hwirq);
+ ep93xx_gpio_update_int_params(eic);
}
/*
@@ -219,12 +182,11 @@ static int ep93xx_gpio_irq_type(struct irq_data *d, unsigned int type)
{
struct gpio_chip *gc = irq_data_get_irq_chip_data(d);
struct ep93xx_gpio_irq_chip *eic = to_ep93xx_gpio_irq_chip(gc);
- struct ep93xx_gpio *epg = gpiochip_get_data(gc);
- int offset = d->irq & 7;
- int port_mask = BIT(offset);
+ irq_hw_number_t hwirq = irqd_to_hwirq(d);
+ int port_mask = BIT(hwirq);
irq_flow_handler_t handler;
- gc->direction_input(gc, offset);
+ gc->direction_input(gc, hwirq);
switch (type) {
case IRQ_TYPE_EDGE_RISING:
@@ -250,7 +212,7 @@ static int ep93xx_gpio_irq_type(struct irq_data *d, unsigned int type)
case IRQ_TYPE_EDGE_BOTH:
eic->int_type1 |= port_mask;
/* set initial polarity based on current input level */
- if (gc->get(gc, offset))
+ if (gc->get(gc, hwirq))
eic->int_type2 &= ~port_mask; /* falling */
else
eic->int_type2 |= port_mask; /* rising */
@@ -264,51 +226,11 @@ static int ep93xx_gpio_irq_type(struct irq_data *d, unsigned int type)
eic->int_enabled |= port_mask;
- ep93xx_gpio_update_int_params(epg, eic);
+ ep93xx_gpio_update_int_params(eic);
return 0;
}
-/*************************************************************************
- * gpiolib interface for EP93xx on-chip GPIOs
- *************************************************************************/
-struct ep93xx_gpio_bank {
- const char *label;
- int data;
- int dir;
- int irq;
- int base;
- bool has_irq;
- bool has_hierarchical_irq;
- unsigned int irq_base;
-};
-
-#define EP93XX_GPIO_BANK(_label, _data, _dir, _irq, _base, _has_irq, _has_hier, _irq_base) \
- { \
- .label = _label, \
- .data = _data, \
- .dir = _dir, \
- .irq = _irq, \
- .base = _base, \
- .has_irq = _has_irq, \
- .has_hierarchical_irq = _has_hier, \
- .irq_base = _irq_base, \
- }
-
-static struct ep93xx_gpio_bank ep93xx_gpio_banks[] = {
- /* Bank A has 8 IRQs */
- EP93XX_GPIO_BANK("A", 0x00, 0x10, 0x90, 0, true, false, EP93XX_GPIO_A_IRQ_BASE),
- /* Bank B has 8 IRQs */
- EP93XX_GPIO_BANK("B", 0x04, 0x14, 0xac, 8, true, false, EP93XX_GPIO_B_IRQ_BASE),
- EP93XX_GPIO_BANK("C", 0x08, 0x18, 0x00, 40, false, false, 0),
- EP93XX_GPIO_BANK("D", 0x0c, 0x1c, 0x00, 24, false, false, 0),
- EP93XX_GPIO_BANK("E", 0x20, 0x24, 0x00, 32, false, false, 0),
- /* Bank F has 8 IRQs */
- EP93XX_GPIO_BANK("F", 0x30, 0x34, 0x4c, 16, false, true, EP93XX_GPIO_F_IRQ_BASE),
- EP93XX_GPIO_BANK("G", 0x38, 0x3c, 0x00, 48, false, false, 0),
- EP93XX_GPIO_BANK("H", 0x40, 0x44, 0x00, 56, false, false, 0),
-};
-
static int ep93xx_gpio_set_config(struct gpio_chip *gc, unsigned offset,
unsigned long config)
{
@@ -342,115 +264,112 @@ static const struct irq_chip gpio_eic_irq_chip = {
GPIOCHIP_IRQ_RESOURCE_HELPERS,
};
-static int ep93xx_gpio_add_bank(struct ep93xx_gpio_chip *egc,
- struct platform_device *pdev,
- struct ep93xx_gpio *epg,
- struct ep93xx_gpio_bank *bank)
+static int ep93xx_setup_irqs(struct platform_device *pdev,
+ struct ep93xx_gpio_chip *egc)
{
- void __iomem *data = epg->base + bank->data;
- void __iomem *dir = epg->base + bank->dir;
struct gpio_chip *gc = &egc->gc;
struct device *dev = &pdev->dev;
- struct gpio_irq_chip *girq;
- int err;
-
- err = bgpio_init(gc, dev, 1, data, NULL, NULL, dir, NULL, 0);
- if (err)
- return err;
-
- gc->label = bank->label;
- gc->base = bank->base;
-
- girq = &gc->irq;
- if (bank->has_irq || bank->has_hierarchical_irq) {
- gc->set_config = ep93xx_gpio_set_config;
- egc->eic = devm_kcalloc(dev, 1,
- sizeof(*egc->eic),
- GFP_KERNEL);
- if (!egc->eic)
- return -ENOMEM;
- egc->eic->irq_offset = bank->irq;
- gpio_irq_chip_set_chip(girq, &gpio_eic_irq_chip);
- }
+ struct gpio_irq_chip *girq = &gc->irq;
+ int ret, irq, i;
+ void __iomem *intr;
- if (bank->has_irq) {
- int ab_parent_irq = platform_get_irq(pdev, 0);
-
- girq->parent_handler = ep93xx_gpio_ab_irq_handler;
- girq->num_parents = 1;
- girq->parents = devm_kcalloc(dev, girq->num_parents,
- sizeof(*girq->parents),
- GFP_KERNEL);
- if (!girq->parents)
- return -ENOMEM;
- girq->default_type = IRQ_TYPE_NONE;
- girq->handler = handle_level_irq;
- girq->parents[0] = ab_parent_irq;
- girq->first = bank->irq_base;
- }
+ intr = devm_platform_ioremap_resource_byname(pdev, "intr");
+ if (IS_ERR(intr))
+ return PTR_ERR(intr);
+
+ gc->set_config = ep93xx_gpio_set_config;
+ egc->eic = devm_kzalloc(dev, sizeof(*egc->eic), GFP_KERNEL);
+ if (!egc->eic)
+ return -ENOMEM;
+
+ egc->eic->base = intr;
+ gpio_irq_chip_set_chip(girq, &gpio_eic_irq_chip);
+ girq->num_parents = platform_irq_count(pdev);
+ if (girq->num_parents == 0)
+ return -EINVAL;
+
+ girq->parents = devm_kcalloc(dev, girq->num_parents, sizeof(*girq->parents),
+ GFP_KERNEL);
+ if (!girq->parents)
+ return -ENOMEM;
- /* Only bank F has especially funky IRQ handling */
- if (bank->has_hierarchical_irq) {
- int gpio_irq;
- int i;
+ if (girq->num_parents == 1) { /* A/B irqchips */
+ irq = platform_get_irq(pdev, 0);
+ if (irq < 0)
+ return irq;
- /*
- * FIXME: convert this to use hierarchical IRQ support!
- * this requires fixing the root irqchip to be hierarchical.
- */
+ ret = devm_request_irq(dev, irq, ep93xx_ab_irq_handler,
+ IRQF_SHARED, gc->label, gc);
+ if (ret)
+ return dev_err_probe(dev, ret, "requesting IRQ: %d\n", irq);
+
+ girq->parents[0] = irq;
+ } else { /* F irqchip */
girq->parent_handler = ep93xx_gpio_f_irq_handler;
- girq->num_parents = 8;
- girq->parents = devm_kcalloc(dev, girq->num_parents,
- sizeof(*girq->parents),
- GFP_KERNEL);
- if (!girq->parents)
- return -ENOMEM;
- /* Pick resources 1..8 for these IRQs */
+
for (i = 0; i < girq->num_parents; i++) {
- girq->parents[i] = platform_get_irq(pdev, i + 1);
- gpio_irq = bank->irq_base + i;
- irq_set_chip_data(gpio_irq, &epg->gc[5]);
- irq_set_chip_and_handler(gpio_irq,
- girq->chip,
- handle_level_irq);
- irq_clear_status_flags(gpio_irq, IRQ_NOREQUEST);
+ irq = platform_get_irq_optional(pdev, i);
+ if (irq < 0)
+ continue;
+
+ girq->parents[i] = irq;
}
- girq->default_type = IRQ_TYPE_NONE;
- girq->handler = handle_level_irq;
- girq->first = bank->irq_base;
+
+ girq->map = girq->parents;
}
- return devm_gpiochip_add_data(dev, gc, epg);
+ girq->default_type = IRQ_TYPE_NONE;
+ /* TODO: replace with handle_bad_irq() once we are fully hierarchical */
+ girq->handler = handle_simple_irq;
+
+ return 0;
}
static int ep93xx_gpio_probe(struct platform_device *pdev)
{
- struct ep93xx_gpio *epg;
- int i;
-
- epg = devm_kzalloc(&pdev->dev, sizeof(*epg), GFP_KERNEL);
- if (!epg)
+ struct ep93xx_gpio_chip *egc;
+ struct gpio_chip *gc;
+ void __iomem *data;
+ void __iomem *dir;
+ int ret;
+
+ egc = devm_kzalloc(&pdev->dev, sizeof(*egc), GFP_KERNEL);
+ if (!egc)
return -ENOMEM;
- epg->base = devm_platform_ioremap_resource(pdev, 0);
- if (IS_ERR(epg->base))
- return PTR_ERR(epg->base);
-
- for (i = 0; i < ARRAY_SIZE(ep93xx_gpio_banks); i++) {
- struct ep93xx_gpio_chip *gc = &epg->gc[i];
- struct ep93xx_gpio_bank *bank = &ep93xx_gpio_banks[i];
-
- if (ep93xx_gpio_add_bank(gc, pdev, epg, bank))
- dev_warn(&pdev->dev, "Unable to add gpio bank %s\n",
- bank->label);
+ data = devm_platform_ioremap_resource_byname(pdev, "data");
+ if (IS_ERR(data))
+ return PTR_ERR(data);
+
+ dir = devm_platform_ioremap_resource_byname(pdev, "dir");
+ if (IS_ERR(dir))
+ return PTR_ERR(dir);
+
+ gc = &egc->gc;
+ ret = bgpio_init(gc, &pdev->dev, 1, data, NULL, NULL, dir, NULL, 0);
+ if (ret)
+ return dev_err_probe(&pdev->dev, ret, "unable to init generic GPIO\n");
+
+ gc->label = dev_name(&pdev->dev);
+ if (platform_irq_count(pdev) > 0) {
+ dev_dbg(&pdev->dev, "setting up irqs for %s\n", dev_name(&pdev->dev));
+ ret = ep93xx_setup_irqs(pdev, egc);
+ if (ret)
+ dev_err_probe(&pdev->dev, ret, "setup irqs failed");
}
- return 0;
+ return devm_gpiochip_add_data(&pdev->dev, gc, egc);
}
+static const struct of_device_id ep93xx_gpio_match[] = {
+ { .compatible = "cirrus,ep9301-gpio" },
+ { /* sentinel */ }
+};
+
static struct platform_driver ep93xx_gpio_driver = {
.driver = {
.name = "gpio-ep93xx",
+ .of_match_table = ep93xx_gpio_match,
},
.probe = ep93xx_gpio_probe,
};
diff --git a/drivers/gpio/gpio-mockup.c b/drivers/gpio/gpio-mockup.c
index 455eecf6380e..d39c6618bade 100644
--- a/drivers/gpio/gpio-mockup.c
+++ b/drivers/gpio/gpio-mockup.c
@@ -347,7 +347,6 @@ static const struct file_operations gpio_mockup_debugfs_ops = {
.open = gpio_mockup_debugfs_open,
.read = gpio_mockup_debugfs_read,
.write = gpio_mockup_debugfs_write,
- .llseek = no_llseek,
.release = single_release,
};
diff --git a/drivers/gpio/gpio-sloppy-logic-analyzer.c b/drivers/gpio/gpio-sloppy-logic-analyzer.c
index aed6d1f6cfc3..07e0d7180579 100644
--- a/drivers/gpio/gpio-sloppy-logic-analyzer.c
+++ b/drivers/gpio/gpio-sloppy-logic-analyzer.c
@@ -217,7 +217,6 @@ static const struct file_operations fops_trigger = {
.owner = THIS_MODULE,
.open = trigger_open,
.write = trigger_write,
- .llseek = no_llseek,
.release = single_release,
};
diff --git a/drivers/gpio/gpiolib-cdev.c b/drivers/gpio/gpiolib-cdev.c
index 5aac59de0d76..78c9d9ed3d68 100644
--- a/drivers/gpio/gpiolib-cdev.c
+++ b/drivers/gpio/gpiolib-cdev.c
@@ -2842,7 +2842,6 @@ static const struct file_operations gpio_fileops = {
.poll = lineinfo_watch_poll,
.read = lineinfo_watch_read,
.owner = THIS_MODULE,
- .llseek = no_llseek,
.unlocked_ioctl = gpio_ioctl,
#ifdef CONFIG_COMPAT
.compat_ioctl = gpio_ioctl_compat,
diff --git a/drivers/gpu/drm/Kconfig b/drivers/gpu/drm/Kconfig
index 6b2c6b91f962..1cb5a4f19293 100644
--- a/drivers/gpu/drm/Kconfig
+++ b/drivers/gpu/drm/Kconfig
@@ -107,7 +107,7 @@ config DRM_KMS_HELPER
config DRM_PANIC
bool "Display a user-friendly message when a kernel panic occurs"
- depends on DRM && !(FRAMEBUFFER_CONSOLE && VT_CONSOLE)
+ depends on DRM
select FONT_SUPPORT
help
Enable a drm panic handler, which will display a user-friendly message
@@ -149,6 +149,37 @@ config DRM_PANIC_SCREEN
or by writing to /sys/module/drm/parameters/panic_screen sysfs entry
Default is "user"
+config DRM_PANIC_SCREEN_QR_CODE
+ bool "Add a panic screen with a QR code"
+ depends on DRM_PANIC && RUST
+ help
+ This option adds a QR code generator, and a panic screen with a QR
+ code. The QR code will contain the last lines of kmsg and other debug
+ information. This should be easier for the user to report a kernel
+ panic, with all debug information available.
+ To use this panic screen, also set DRM_PANIC_SCREEN to "qr_code"
+
+config DRM_PANIC_SCREEN_QR_CODE_URL
+ string "Base URL of the QR code in the panic screen"
+ depends on DRM_PANIC_SCREEN_QR_CODE
+ help
+ This option sets the base URL to report the kernel panic. If it's set
+ the QR code will contain the URL and the kmsg compressed with zlib as
+ a URL parameter. If it's empty, the QR code will contain the kmsg as
+ uncompressed text only.
+ There is a demo code in javascript, to decode and uncompress the kmsg
+ data from the URL parameter at https://github.com/kdj0c/panic_report
+
+config DRM_PANIC_SCREEN_QR_VERSION
+ int "Maximum version (size) of the QR code."
+ depends on DRM_PANIC_SCREEN_QR_CODE
+ default 40
+ help
+ This option limits the version (or size) of the QR code. QR code
+ version ranges from Version 1 (21x21) to Version 40 (177x177).
+ Smaller QR code are easier to read, but will contain less debugging
+ data. Default is 40.
+
config DRM_DEBUG_DP_MST_TOPOLOGY_REFS
bool "Enable refcount backtrace history in the DP MST helpers"
depends on STACKTRACE_SUPPORT
diff --git a/drivers/gpu/drm/Makefile b/drivers/gpu/drm/Makefile
index fa432a1ac9e2..784229d4504d 100644
--- a/drivers/gpu/drm/Makefile
+++ b/drivers/gpu/drm/Makefile
@@ -89,6 +89,7 @@ drm-$(CONFIG_DRM_PRIVACY_SCREEN) += \
drm_privacy_screen_x86.o
drm-$(CONFIG_DRM_ACCEL) += ../../accel/drm_accel.o
drm-$(CONFIG_DRM_PANIC) += drm_panic.o
+drm-$(CONFIG_DRM_PANIC_SCREEN_QR_CODE) += drm_panic_qr.o
obj-$(CONFIG_DRM) += drm.o
obj-$(CONFIG_DRM_PANEL_ORIENTATION_QUIRKS) += drm_panel_orientation_quirks.o
diff --git a/drivers/gpu/drm/amd/amdgpu/Makefile b/drivers/gpu/drm/amd/amdgpu/Makefile
index 38408e4e158e..c7b18c52825d 100644
--- a/drivers/gpu/drm/amd/amdgpu/Makefile
+++ b/drivers/gpu/drm/amd/amdgpu/Makefile
@@ -39,23 +39,7 @@ ccflags-y := -I$(FULL_AMD_PATH)/include/asic_reg \
-I$(FULL_AMD_DISPLAY_PATH)/amdgpu_dm \
-I$(FULL_AMD_PATH)/amdkfd
-subdir-ccflags-y := -Wextra
-subdir-ccflags-y += -Wunused
-subdir-ccflags-y += -Wmissing-prototypes
-subdir-ccflags-y += -Wmissing-declarations
-subdir-ccflags-y += -Wmissing-include-dirs
-subdir-ccflags-y += -Wold-style-definition
-subdir-ccflags-y += -Wmissing-format-attribute
-# Need this to avoid recursive variable evaluation issues
-cond-flags := $(call cc-option, -Wunused-but-set-variable) \
- $(call cc-option, -Wunused-const-variable) \
- $(call cc-option, -Wstringop-truncation) \
- $(call cc-option, -Wpacked-not-aligned)
-subdir-ccflags-y += $(cond-flags)
-subdir-ccflags-y += -Wno-unused-parameter
-subdir-ccflags-y += -Wno-type-limits
-subdir-ccflags-y += -Wno-sign-compare
-subdir-ccflags-y += -Wno-missing-field-initializers
+# Locally disable W=1 warnings enabled in drm subsystem Makefile
subdir-ccflags-y += -Wno-override-init
subdir-ccflags-$(CONFIG_DRM_AMDGPU_WERROR) += -Werror
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu.h b/drivers/gpu/drm/amd/amdgpu/amdgpu.h
index 137a88b8de45..9b1e0ede05a4 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu.h
@@ -118,6 +118,8 @@
#define MAX_GPU_INSTANCE 64
+#define GFX_SLICE_PERIOD msecs_to_jiffies(250)
+
struct amdgpu_gpu_instance {
struct amdgpu_device *adev;
int mgpu_fan_enabled;
@@ -235,6 +237,7 @@ extern int sched_policy;
extern bool debug_evictions;
extern bool no_system_mem_limit;
extern int halt_if_hws_hang;
+extern uint amdgpu_svm_default_granularity;
#else
static const int __maybe_unused sched_policy = KFD_SCHED_POLICY_HWS;
static const bool __maybe_unused debug_evictions; /* = false */
@@ -347,9 +350,9 @@ enum amdgpu_kiq_irq {
AMDGPU_CP_KIQ_IRQ_DRIVER0 = 0,
AMDGPU_CP_KIQ_IRQ_LAST
};
-#define SRIOV_USEC_TIMEOUT 1200000 /* wait 12 * 100ms for SRIOV */
-#define MAX_KIQ_REG_WAIT (amdgpu_sriov_vf(adev) ? 50000 : 5000) /* in usecs, extend for VF */
-#define MAX_KIQ_REG_BAILOUT_INTERVAL 5 /* in msecs, 5ms */
+#define SRIOV_USEC_TIMEOUT 1200000 /* wait 12 * 100ms for SRIOV */
+#define MAX_KIQ_REG_WAIT 5000 /* in usecs, 5ms */
+#define MAX_KIQ_REG_BAILOUT_INTERVAL 5 /* in msecs, 5ms */
#define MAX_KIQ_REG_TRY 1000
int amdgpu_device_ip_set_clockgating_state(void *dev,
@@ -823,17 +826,6 @@ struct amdgpu_mqd {
struct amdgpu_reset_domain;
struct amdgpu_fru_info;
-struct amdgpu_reset_info {
- /* reset dump register */
- u32 *reset_dump_reg_list;
- u32 *reset_dump_reg_value;
- int num_regs;
-
-#ifdef CONFIG_DEV_COREDUMP
- struct amdgpu_coredump_info *coredump_info;
-#endif
-};
-
/*
* Non-zero (true) if the GPU has VRAM. Zero (false) otherwise.
*/
@@ -1091,10 +1083,6 @@ struct amdgpu_device {
struct amdgpu_virt virt;
- /* link all shadow bo */
- struct list_head shadow_list;
- struct mutex shadow_list_lock;
-
/* record hw reset is performed */
bool has_hw_reset;
u8 reset_magic[AMDGPU_RESET_MAGIC_NUM];
@@ -1157,8 +1145,6 @@ struct amdgpu_device {
struct mutex benchmark_mutex;
- struct amdgpu_reset_info reset_info;
-
bool scpm_enabled;
uint32_t scpm_status;
@@ -1175,6 +1161,11 @@ struct amdgpu_device {
bool debug_disable_soft_recovery;
bool debug_use_vram_fw_buf;
bool debug_enable_ras_aca;
+ bool debug_exp_resets;
+
+ bool enforce_isolation[MAX_XCP];
+ /* Added this mutex for cleaner shader isolation between GFX and compute processes */
+ struct mutex enforce_isolation_mutex;
};
static inline uint32_t amdgpu_ip_version(const struct amdgpu_device *adev,
@@ -1484,7 +1475,6 @@ extern const int amdgpu_max_kms_ioctl;
int amdgpu_driver_load_kms(struct amdgpu_device *adev, unsigned long flags);
void amdgpu_driver_unload_kms(struct drm_device *dev);
-void amdgpu_driver_lastclose_kms(struct drm_device *dev);
int amdgpu_driver_open_kms(struct drm_device *dev, struct drm_file *file_priv);
void amdgpu_driver_postclose_kms(struct drm_device *dev,
struct drm_file *file_priv);
@@ -1588,13 +1578,6 @@ static inline bool amdgpu_acpi_is_s3_active(struct amdgpu_device *adev) { return
static inline void amdgpu_choose_low_power_state(struct amdgpu_device *adev) { }
#endif
-#if defined(CONFIG_DRM_AMD_DC)
-int amdgpu_dm_display_resume(struct amdgpu_device *adev );
-#else
-static inline int amdgpu_dm_display_resume(struct amdgpu_device *adev) { return 0; }
-#endif
-
-
void amdgpu_register_gpu_instance(struct amdgpu_device *adev);
void amdgpu_unregister_gpu_instance(struct amdgpu_device *adev);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_aca.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_aca.c
index 19158cc30f31..2ca127173135 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_aca.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_aca.c
@@ -80,6 +80,9 @@ static void aca_banks_release(struct aca_banks *banks)
{
struct aca_bank_node *node, *tmp;
+ if (list_empty(&banks->list))
+ return;
+
list_for_each_entry_safe(node, tmp, &banks->list, node) {
list_del(&node->node);
kvfree(node);
@@ -453,13 +456,13 @@ static int aca_log_aca_error_data(struct aca_bank_error *bank_error, enum aca_er
switch (type) {
case ACA_ERROR_TYPE_UE:
- amdgpu_ras_error_statistic_ue_count(err_data, &mcm_info, NULL, count);
+ amdgpu_ras_error_statistic_ue_count(err_data, &mcm_info, count);
break;
case ACA_ERROR_TYPE_CE:
- amdgpu_ras_error_statistic_ce_count(err_data, &mcm_info, NULL, count);
+ amdgpu_ras_error_statistic_ce_count(err_data, &mcm_info, count);
break;
case ACA_ERROR_TYPE_DEFERRED:
- amdgpu_ras_error_statistic_de_count(err_data, &mcm_info, NULL, count);
+ amdgpu_ras_error_statistic_de_count(err_data, &mcm_info, count);
break;
default:
break;
@@ -508,7 +511,7 @@ static int __aca_get_error_data(struct amdgpu_device *adev, struct aca_handle *h
return -EINVAL;
}
- /* udpate aca bank to aca source error_cache first */
+ /* update aca bank to aca source error_cache first */
ret = aca_banks_update(adev, smu_type, handler_aca_log_bank_error, qctx, NULL);
if (ret)
return ret;
@@ -562,9 +565,13 @@ static void aca_error_fini(struct aca_error *aerr)
struct aca_bank_error *bank_error, *tmp;
mutex_lock(&aerr->lock);
+ if (list_empty(&aerr->list))
+ goto out_unlock;
+
list_for_each_entry_safe(bank_error, tmp, &aerr->list, node)
aca_bank_error_remove(aerr, bank_error);
+out_unlock:
mutex_destroy(&aerr->lock);
}
@@ -680,6 +687,9 @@ static void aca_manager_fini(struct aca_handle_manager *mgr)
{
struct aca_handle *handle, *tmp;
+ if (list_empty(&mgr->list))
+ return;
+
list_for_each_entry_safe(handle, tmp, &mgr->list, node)
amdgpu_aca_remove_handle(handle);
}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c
index 03205e3c3746..4f08b153cb66 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c
@@ -364,15 +364,15 @@ allocate_mem_reserve_bo_failed:
return r;
}
-void amdgpu_amdkfd_free_gtt_mem(struct amdgpu_device *adev, void *mem_obj)
+void amdgpu_amdkfd_free_gtt_mem(struct amdgpu_device *adev, void **mem_obj)
{
- struct amdgpu_bo *bo = (struct amdgpu_bo *) mem_obj;
+ struct amdgpu_bo **bo = (struct amdgpu_bo **) mem_obj;
- amdgpu_bo_reserve(bo, true);
- amdgpu_bo_kunmap(bo);
- amdgpu_bo_unpin(bo);
- amdgpu_bo_unreserve(bo);
- amdgpu_bo_unref(&(bo));
+ amdgpu_bo_reserve(*bo, true);
+ amdgpu_bo_kunmap(*bo);
+ amdgpu_bo_unpin(*bo);
+ amdgpu_bo_unreserve(*bo);
+ amdgpu_bo_unref(bo);
}
int amdgpu_amdkfd_alloc_gws(struct amdgpu_device *adev, size_t size,
@@ -783,22 +783,6 @@ int amdgpu_amdkfd_send_close_event_drain_irq(struct amdgpu_device *adev,
return 0;
}
-bool amdgpu_amdkfd_ras_query_utcl2_poison_status(struct amdgpu_device *adev,
- int hub_inst, int hub_type)
-{
- if (!hub_type) {
- if (adev->gfxhub.funcs->query_utcl2_poison_status)
- return adev->gfxhub.funcs->query_utcl2_poison_status(adev, hub_inst);
- else
- return false;
- } else {
- if (adev->mmhub.funcs->query_utcl2_poison_status)
- return adev->mmhub.funcs->query_utcl2_poison_status(adev, hub_inst);
- else
- return false;
- }
-}
-
int amdgpu_amdkfd_check_and_lock_kfd(struct amdgpu_device *adev)
{
return kgd2kfd_check_and_lock_kfd();
@@ -887,3 +871,21 @@ free_ring_funcs:
return r;
}
+
+/* Stop scheduling on KFD */
+int amdgpu_amdkfd_stop_sched(struct amdgpu_device *adev, uint32_t node_id)
+{
+ if (!adev->kfd.init_complete)
+ return 0;
+
+ return kgd2kfd_stop_sched(adev->kfd.dev, node_id);
+}
+
+/* Start scheduling on KFD */
+int amdgpu_amdkfd_start_sched(struct amdgpu_device *adev, uint32_t node_id)
+{
+ if (!adev->kfd.init_complete)
+ return 0;
+
+ return kgd2kfd_start_sched(adev->kfd.dev, node_id);
+}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h
index e7bb1ca35801..f9d119448442 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h
@@ -235,7 +235,7 @@ int amdgpu_amdkfd_bo_validate_and_fence(struct amdgpu_bo *bo,
int amdgpu_amdkfd_alloc_gtt_mem(struct amdgpu_device *adev, size_t size,
void **mem_obj, uint64_t *gpu_addr,
void **cpu_ptr, bool mqd_gfx9);
-void amdgpu_amdkfd_free_gtt_mem(struct amdgpu_device *adev, void *mem_obj);
+void amdgpu_amdkfd_free_gtt_mem(struct amdgpu_device *adev, void **mem_obj);
int amdgpu_amdkfd_alloc_gws(struct amdgpu_device *adev, size_t size,
void **mem_obj);
void amdgpu_amdkfd_free_gws(struct amdgpu_device *adev, void *mem_obj);
@@ -264,6 +264,8 @@ int amdgpu_amdkfd_send_close_event_drain_irq(struct amdgpu_device *adev,
uint32_t *payload);
int amdgpu_amdkfd_unmap_hiq(struct amdgpu_device *adev, u32 doorbell_off,
u32 inst);
+int amdgpu_amdkfd_start_sched(struct amdgpu_device *adev, uint32_t node_id);
+int amdgpu_amdkfd_stop_sched(struct amdgpu_device *adev, uint32_t node_id);
/* Read user wptr from a specified user address space with page fault
* disabled. The memory must be pinned and mapped to the hardware when
@@ -322,7 +324,7 @@ int amdgpu_amdkfd_gpuvm_map_gtt_bo_to_kernel(struct kgd_mem *mem,
void **kptr, uint64_t *size);
void amdgpu_amdkfd_gpuvm_unmap_gtt_bo_from_kernel(struct kgd_mem *mem);
-int amdgpu_amdkfd_map_gtt_bo_to_gart(struct amdgpu_bo *bo);
+int amdgpu_amdkfd_map_gtt_bo_to_gart(struct amdgpu_bo *bo, struct amdgpu_bo **bo_gart);
int amdgpu_amdkfd_gpuvm_restore_process_bos(void *process_info,
struct dma_fence __rcu **ef);
@@ -345,11 +347,9 @@ void amdgpu_amdkfd_ras_pasid_poison_consumption_handler(struct amdgpu_device *ad
pasid_notify pasid_fn, void *data, uint32_t reset);
bool amdgpu_amdkfd_is_fed(struct amdgpu_device *adev);
-bool amdgpu_amdkfd_bo_mapped_to_dev(struct amdgpu_device *adev, struct kgd_mem *mem);
+bool amdgpu_amdkfd_bo_mapped_to_dev(void *drm_priv, struct kgd_mem *mem);
void amdgpu_amdkfd_block_mmu_notifications(void *p);
int amdgpu_amdkfd_criu_resume(void *p);
-bool amdgpu_amdkfd_ras_query_utcl2_poison_status(struct amdgpu_device *adev,
- int hub_inst, int hub_type);
int amdgpu_amdkfd_reserve_mem_limit(struct amdgpu_device *adev,
uint64_t size, u32 alloc_flag, int8_t xcp_id);
void amdgpu_amdkfd_unreserve_mem_limit(struct amdgpu_device *adev,
@@ -426,6 +426,8 @@ void kgd2kfd_set_sram_ecc_flag(struct kfd_dev *kfd);
void kgd2kfd_smi_event_throttle(struct kfd_dev *kfd, uint64_t throttle_bitmask);
int kgd2kfd_check_and_lock_kfd(void);
void kgd2kfd_unlock_kfd(void);
+int kgd2kfd_start_sched(struct kfd_dev *kfd, uint32_t node_id);
+int kgd2kfd_stop_sched(struct kfd_dev *kfd, uint32_t node_id);
#else
static inline int kgd2kfd_init(void)
{
@@ -496,5 +498,15 @@ static inline int kgd2kfd_check_and_lock_kfd(void)
static inline void kgd2kfd_unlock_kfd(void)
{
}
+
+static inline int kgd2kfd_start_sched(struct kfd_dev *kfd, uint32_t node_id)
+{
+ return 0;
+}
+
+static inline int kgd2kfd_stop_sched(struct kfd_dev *kfd, uint32_t node_id)
+{
+ return 0;
+}
#endif
#endif /* AMDGPU_AMDKFD_H_INCLUDED */
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_aldebaran.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_aldebaran.c
index aff08321e976..8dfdb18197c4 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_aldebaran.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_aldebaran.c
@@ -191,4 +191,6 @@ const struct kfd2kgd_calls aldebaran_kfd2kgd = {
.get_iq_wait_times = kgd_gfx_v9_get_iq_wait_times,
.build_grace_period_packet_info = kgd_gfx_v9_build_grace_period_packet_info,
.program_trap_handler_settings = kgd_gfx_v9_program_trap_handler_settings,
+ .hqd_get_pq_addr = kgd_gfx_v9_hqd_get_pq_addr,
+ .hqd_reset = kgd_gfx_v9_hqd_reset,
};
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_arcturus.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_arcturus.c
index 3a3f3ce09f00..9435af2e6bdc 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_arcturus.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_arcturus.c
@@ -20,7 +20,6 @@
* OTHER DEALINGS IN THE SOFTWARE.
*/
#include <linux/module.h>
-#include <linux/fdtable.h>
#include <linux/uaccess.h>
#include <linux/firmware.h>
#include "amdgpu.h"
@@ -300,7 +299,7 @@ static int suspend_resume_compute_scheduler(struct amdgpu_device *adev, bool sus
if (r)
goto out;
} else {
- drm_sched_start(&ring->sched, false);
+ drm_sched_start(&ring->sched);
}
}
@@ -418,5 +417,7 @@ const struct kfd2kgd_calls arcturus_kfd2kgd = {
.get_iq_wait_times = kgd_gfx_v9_get_iq_wait_times,
.build_grace_period_packet_info = kgd_gfx_v9_build_grace_period_packet_info,
.get_cu_occupancy = kgd_gfx_v9_get_cu_occupancy,
- .program_trap_handler_settings = kgd_gfx_v9_program_trap_handler_settings
+ .program_trap_handler_settings = kgd_gfx_v9_program_trap_handler_settings,
+ .hqd_get_pq_addr = kgd_gfx_v9_hqd_get_pq_addr,
+ .hqd_reset = kgd_gfx_v9_hqd_reset
};
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gc_9_4_3.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gc_9_4_3.c
index a5c7259cf2a3..e2ae714a700f 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gc_9_4_3.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gc_9_4_3.c
@@ -541,5 +541,7 @@ const struct kfd2kgd_calls gc_9_4_3_kfd2kgd = {
kgd_gfx_v9_4_3_set_wave_launch_trap_override,
.set_wave_launch_mode = kgd_aldebaran_set_wave_launch_mode,
.set_address_watch = kgd_gfx_v9_4_3_set_address_watch,
- .clear_address_watch = kgd_gfx_v9_4_3_clear_address_watch
+ .clear_address_watch = kgd_gfx_v9_4_3_clear_address_watch,
+ .hqd_get_pq_addr = kgd_gfx_v9_hqd_get_pq_addr,
+ .hqd_reset = kgd_gfx_v9_hqd_reset
};
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v10.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v10.c
index 3ab6c3aa0ad1..62176d607bef 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v10.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v10.c
@@ -1070,6 +1070,20 @@ static void program_trap_handler_settings(struct amdgpu_device *adev,
unlock_srbm(adev);
}
+uint64_t kgd_gfx_v10_hqd_get_pq_addr(struct amdgpu_device *adev,
+ uint32_t pipe_id, uint32_t queue_id,
+ uint32_t inst)
+{
+ return 0;
+}
+
+uint64_t kgd_gfx_v10_hqd_reset(struct amdgpu_device *adev,
+ uint32_t pipe_id, uint32_t queue_id,
+ uint32_t inst, unsigned int utimeout)
+{
+ return 0;
+}
+
const struct kfd2kgd_calls gfx_v10_kfd2kgd = {
.program_sh_mem_settings = kgd_program_sh_mem_settings,
.set_pasid_vmid_mapping = kgd_set_pasid_vmid_mapping,
@@ -1097,4 +1111,6 @@ const struct kfd2kgd_calls gfx_v10_kfd2kgd = {
.get_iq_wait_times = kgd_gfx_v10_get_iq_wait_times,
.build_grace_period_packet_info = kgd_gfx_v10_build_grace_period_packet_info,
.program_trap_handler_settings = program_trap_handler_settings,
+ .hqd_get_pq_addr = kgd_gfx_v10_hqd_get_pq_addr,
+ .hqd_reset = kgd_gfx_v10_hqd_reset
};
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v10.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v10.h
index 67bcaa3d4226..9efd2dd4fdd7 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v10.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v10.h
@@ -56,3 +56,12 @@ void kgd_gfx_v10_build_grace_period_packet_info(struct amdgpu_device *adev,
uint32_t grace_period,
uint32_t *reg_offset,
uint32_t *reg_data);
+uint64_t kgd_gfx_v10_hqd_get_pq_addr(struct amdgpu_device *adev,
+ uint32_t pipe_id,
+ uint32_t queue_id,
+ uint32_t inst);
+uint64_t kgd_gfx_v10_hqd_reset(struct amdgpu_device *adev,
+ uint32_t pipe_id,
+ uint32_t queue_id,
+ uint32_t inst,
+ unsigned int utimeout);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v10_3.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v10_3.c
index 8c8437a4383f..c718bedda0ca 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v10_3.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v10_3.c
@@ -680,5 +680,7 @@ const struct kfd2kgd_calls gfx_v10_3_kfd2kgd = {
.set_wave_launch_trap_override = kgd_gfx_v10_set_wave_launch_trap_override,
.set_wave_launch_mode = kgd_gfx_v10_set_wave_launch_mode,
.set_address_watch = kgd_gfx_v10_set_address_watch,
- .clear_address_watch = kgd_gfx_v10_clear_address_watch
+ .clear_address_watch = kgd_gfx_v10_clear_address_watch,
+ .hqd_get_pq_addr = kgd_gfx_v10_hqd_get_pq_addr,
+ .hqd_reset = kgd_gfx_v10_hqd_reset
};
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v11.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v11.c
index b61a32d6af4b..a4ba49cb22db 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v11.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v11.c
@@ -786,6 +786,20 @@ static uint32_t kgd_gfx_v11_clear_address_watch(struct amdgpu_device *adev,
return 0;
}
+static uint64_t kgd_gfx_v11_hqd_get_pq_addr(struct amdgpu_device *adev,
+ uint32_t pipe_id, uint32_t queue_id,
+ uint32_t inst)
+{
+ return 0;
+}
+
+static uint64_t kgd_gfx_v11_hqd_reset(struct amdgpu_device *adev,
+ uint32_t pipe_id, uint32_t queue_id,
+ uint32_t inst, unsigned int utimeout)
+{
+ return 0;
+}
+
const struct kfd2kgd_calls gfx_v11_kfd2kgd = {
.program_sh_mem_settings = program_sh_mem_settings_v11,
.set_pasid_vmid_mapping = set_pasid_vmid_mapping_v11,
@@ -808,5 +822,7 @@ const struct kfd2kgd_calls gfx_v11_kfd2kgd = {
.set_wave_launch_trap_override = kgd_gfx_v11_set_wave_launch_trap_override,
.set_wave_launch_mode = kgd_gfx_v11_set_wave_launch_mode,
.set_address_watch = kgd_gfx_v11_set_address_watch,
- .clear_address_watch = kgd_gfx_v11_clear_address_watch
+ .clear_address_watch = kgd_gfx_v11_clear_address_watch,
+ .hqd_get_pq_addr = kgd_gfx_v11_hqd_get_pq_addr,
+ .hqd_reset = kgd_gfx_v11_hqd_reset
};
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v9.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v9.c
index 5a35a8ca8922..3bc0cbf45bc5 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v9.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v9.c
@@ -950,28 +950,30 @@ static void unlock_spi_csq_mutexes(struct amdgpu_device *adev)
* @inst: xcc's instance number on a multi-XCC setup
*/
static void get_wave_count(struct amdgpu_device *adev, int queue_idx,
- int *wave_cnt, int *vmid, uint32_t inst)
+ struct kfd_cu_occupancy *queue_cnt, uint32_t inst)
{
int pipe_idx;
int queue_slot;
unsigned int reg_val;
-
+ unsigned int wave_cnt;
/*
* Program GRBM with appropriate MEID, PIPEID, QUEUEID and VMID
* parameters to read out waves in flight. Get VMID if there are
* non-zero waves in flight.
*/
- *vmid = 0xFF;
- *wave_cnt = 0;
pipe_idx = queue_idx / adev->gfx.mec.num_queue_per_pipe;
queue_slot = queue_idx % adev->gfx.mec.num_queue_per_pipe;
- soc15_grbm_select(adev, 1, pipe_idx, queue_slot, 0, inst);
- reg_val = RREG32_SOC15_IP(GC, SOC15_REG_OFFSET(GC, inst, mmSPI_CSQ_WF_ACTIVE_COUNT_0) +
- queue_slot);
- *wave_cnt = reg_val & SPI_CSQ_WF_ACTIVE_COUNT_0__COUNT_MASK;
- if (*wave_cnt != 0)
- *vmid = (RREG32_SOC15(GC, inst, mmCP_HQD_VMID) &
- CP_HQD_VMID__VMID_MASK) >> CP_HQD_VMID__VMID__SHIFT;
+ soc15_grbm_select(adev, 1, pipe_idx, queue_slot, 0, GET_INST(GC, inst));
+ reg_val = RREG32_SOC15_IP(GC, SOC15_REG_OFFSET(GC, GET_INST(GC, inst),
+ mmSPI_CSQ_WF_ACTIVE_COUNT_0) + queue_slot);
+ wave_cnt = reg_val & SPI_CSQ_WF_ACTIVE_COUNT_0__COUNT_MASK;
+ if (wave_cnt != 0) {
+ queue_cnt->wave_cnt += wave_cnt;
+ queue_cnt->doorbell_off =
+ (RREG32_SOC15(GC, GET_INST(GC, inst), mmCP_HQD_PQ_DOORBELL_CONTROL) &
+ CP_HQD_PQ_DOORBELL_CONTROL__DOORBELL_OFFSET_MASK) >>
+ CP_HQD_PQ_DOORBELL_CONTROL__DOORBELL_OFFSET__SHIFT;
+ }
}
/**
@@ -981,9 +983,8 @@ static void get_wave_count(struct amdgpu_device *adev, int queue_idx,
* or more queues running and submitting waves to compute units.
*
* @adev: Handle of device from which to get number of waves in flight
- * @pasid: Identifies the process for which this query call is invoked
- * @pasid_wave_cnt: Output parameter updated with number of waves in flight that
- * belong to process with given pasid
+ * @cu_occupancy: Array that gets filled with wave_cnt and doorbell offset
+ * for comparison later.
* @max_waves_per_cu: Output parameter updated with maximum number of waves
* possible per Compute Unit
* @inst: xcc's instance number on a multi-XCC setup
@@ -1011,34 +1012,28 @@ static void get_wave_count(struct amdgpu_device *adev, int queue_idx,
* number of waves that are in flight for the queue at specified index. The
* index ranges from 0 to 7.
*
- * If non-zero waves are in flight, read CP_HQD_VMID register to obtain VMID
- * of the wave(s).
+ * If non-zero waves are in flight, store the corresponding doorbell offset
+ * of the queue, along with the wave count.
*
- * Determine if VMID from above step maps to pasid provided as parameter. If
- * it matches agrregate the wave count. That the VMID will not match pasid is
- * a normal condition i.e. a device is expected to support multiple queues
- * from multiple proceses.
+ * Determine if the queue belongs to the process by comparing the doorbell
+ * offset against the process's queues. If it matches, aggregate the wave
+ * count for the process.
*
* Reading registers referenced above involves programming GRBM appropriately
*/
-void kgd_gfx_v9_get_cu_occupancy(struct amdgpu_device *adev, int pasid,
- int *pasid_wave_cnt, int *max_waves_per_cu, uint32_t inst)
+void kgd_gfx_v9_get_cu_occupancy(struct amdgpu_device *adev,
+ struct kfd_cu_occupancy *cu_occupancy,
+ int *max_waves_per_cu, uint32_t inst)
{
int qidx;
- int vmid;
int se_idx;
- int sh_idx;
int se_cnt;
- int sh_cnt;
- int wave_cnt;
int queue_map;
- int pasid_tmp;
int max_queue_cnt;
- int vmid_wave_cnt = 0;
DECLARE_BITMAP(cp_queue_bitmap, AMDGPU_MAX_QUEUES);
lock_spi_csq_mutexes(adev);
- soc15_grbm_select(adev, 1, 0, 0, 0, inst);
+ soc15_grbm_select(adev, 1, 0, 0, 0, GET_INST(GC, inst));
/*
* Iterate through the shader engines and arrays of the device
@@ -1048,51 +1043,38 @@ void kgd_gfx_v9_get_cu_occupancy(struct amdgpu_device *adev, int pasid,
AMDGPU_MAX_QUEUES);
max_queue_cnt = adev->gfx.mec.num_pipe_per_mec *
adev->gfx.mec.num_queue_per_pipe;
- sh_cnt = adev->gfx.config.max_sh_per_se;
se_cnt = adev->gfx.config.max_shader_engines;
for (se_idx = 0; se_idx < se_cnt; se_idx++) {
- for (sh_idx = 0; sh_idx < sh_cnt; sh_idx++) {
+ amdgpu_gfx_select_se_sh(adev, se_idx, 0, 0xffffffff, inst);
+ queue_map = RREG32_SOC15(GC, GET_INST(GC, inst), mmSPI_CSQ_WF_ACTIVE_STATUS);
+
+ /*
+ * Assumption: queue map encodes following schema: four
+ * pipes per each micro-engine, with each pipe mapping
+ * eight queues. This schema is true for GFX9 devices
+ * and must be verified for newer device families
+ */
+ for (qidx = 0; qidx < max_queue_cnt; qidx++) {
+ /* Skip qeueus that are not associated with
+ * compute functions
+ */
+ if (!test_bit(qidx, cp_queue_bitmap))
+ continue;
- amdgpu_gfx_select_se_sh(adev, se_idx, sh_idx, 0xffffffff, inst);
- queue_map = RREG32_SOC15(GC, inst, mmSPI_CSQ_WF_ACTIVE_STATUS);
+ if (!(queue_map & (1 << qidx)))
+ continue;
- /*
- * Assumption: queue map encodes following schema: four
- * pipes per each micro-engine, with each pipe mapping
- * eight queues. This schema is true for GFX9 devices
- * and must be verified for newer device families
- */
- for (qidx = 0; qidx < max_queue_cnt; qidx++) {
-
- /* Skip qeueus that are not associated with
- * compute functions
- */
- if (!test_bit(qidx, cp_queue_bitmap))
- continue;
-
- if (!(queue_map & (1 << qidx)))
- continue;
-
- /* Get number of waves in flight and aggregate them */
- get_wave_count(adev, qidx, &wave_cnt, &vmid,
- inst);
- if (wave_cnt != 0) {
- pasid_tmp =
- RREG32(SOC15_REG_OFFSET(OSSSYS, inst,
- mmIH_VMID_0_LUT) + vmid);
- if (pasid_tmp == pasid)
- vmid_wave_cnt += wave_cnt;
- }
- }
+ /* Get number of waves in flight and aggregate them */
+ get_wave_count(adev, qidx, &cu_occupancy[qidx],
+ inst);
}
}
amdgpu_gfx_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff, inst);
- soc15_grbm_select(adev, 0, 0, 0, 0, inst);
+ soc15_grbm_select(adev, 0, 0, 0, 0, GET_INST(GC, inst));
unlock_spi_csq_mutexes(adev);
/* Update the output parameters and return */
- *pasid_wave_cnt = vmid_wave_cnt;
*max_waves_per_cu = adev->gfx.cu_info.simd_per_cu *
adev->gfx.cu_info.max_waves_per_simd;
}
@@ -1144,6 +1126,109 @@ void kgd_gfx_v9_program_trap_handler_settings(struct amdgpu_device *adev,
kgd_gfx_v9_unlock_srbm(adev, inst);
}
+uint64_t kgd_gfx_v9_hqd_get_pq_addr(struct amdgpu_device *adev,
+ uint32_t pipe_id, uint32_t queue_id,
+ uint32_t inst)
+{
+ uint32_t low, high;
+ uint64_t queue_addr = 0;
+
+ if (!adev->debug_exp_resets &&
+ !adev->gfx.num_gfx_rings)
+ return 0;
+
+ kgd_gfx_v9_acquire_queue(adev, pipe_id, queue_id, inst);
+ amdgpu_gfx_rlc_enter_safe_mode(adev, inst);
+
+ if (!RREG32_SOC15(GC, GET_INST(GC, inst), mmCP_HQD_ACTIVE))
+ goto unlock_out;
+
+ low = RREG32_SOC15(GC, GET_INST(GC, inst), mmCP_HQD_PQ_BASE);
+ high = RREG32_SOC15(GC, GET_INST(GC, inst), mmCP_HQD_PQ_BASE_HI);
+
+ /* only concerned with user queues. */
+ if (!high)
+ goto unlock_out;
+
+ queue_addr = (((queue_addr | high) << 32) | low) << 8;
+
+unlock_out:
+ amdgpu_gfx_rlc_exit_safe_mode(adev, inst);
+ kgd_gfx_v9_release_queue(adev, inst);
+
+ return queue_addr;
+}
+
+/* assume queue acquired */
+static int kgd_gfx_v9_hqd_dequeue_wait(struct amdgpu_device *adev, uint32_t inst,
+ unsigned int utimeout)
+{
+ unsigned long end_jiffies = (utimeout * HZ / 1000) + jiffies;
+
+ while (true) {
+ uint32_t temp = RREG32_SOC15(GC, GET_INST(GC, inst), mmCP_HQD_ACTIVE);
+
+ if (!(temp & CP_HQD_ACTIVE__ACTIVE_MASK))
+ return 0;
+
+ if (time_after(jiffies, end_jiffies))
+ return -ETIME;
+
+ usleep_range(500, 1000);
+ }
+}
+
+uint64_t kgd_gfx_v9_hqd_reset(struct amdgpu_device *adev,
+ uint32_t pipe_id, uint32_t queue_id,
+ uint32_t inst, unsigned int utimeout)
+{
+ uint32_t low, high, pipe_reset_data = 0;
+ uint64_t queue_addr = 0;
+
+ kgd_gfx_v9_acquire_queue(adev, pipe_id, queue_id, inst);
+ amdgpu_gfx_rlc_enter_safe_mode(adev, inst);
+
+ if (!RREG32_SOC15(GC, GET_INST(GC, inst), mmCP_HQD_ACTIVE))
+ goto unlock_out;
+
+ low = RREG32_SOC15(GC, GET_INST(GC, inst), mmCP_HQD_PQ_BASE);
+ high = RREG32_SOC15(GC, GET_INST(GC, inst), mmCP_HQD_PQ_BASE_HI);
+
+ /* only concerned with user queues. */
+ if (!high)
+ goto unlock_out;
+
+ queue_addr = (((queue_addr | high) << 32) | low) << 8;
+
+ pr_debug("Attempting queue reset on XCC %i pipe id %i queue id %i\n",
+ inst, pipe_id, queue_id);
+
+ /* assume previous dequeue request issued will take affect after reset */
+ WREG32_SOC15(GC, GET_INST(GC, inst), mmSPI_COMPUTE_QUEUE_RESET, 0x1);
+
+ if (!kgd_gfx_v9_hqd_dequeue_wait(adev, inst, utimeout))
+ goto unlock_out;
+
+ pr_debug("Attempting pipe reset on XCC %i pipe id %i\n", inst, pipe_id);
+
+ pipe_reset_data = REG_SET_FIELD(pipe_reset_data, CP_MEC_CNTL, MEC_ME1_PIPE0_RESET, 1);
+ pipe_reset_data = pipe_reset_data << pipe_id;
+
+ WREG32_SOC15(GC, GET_INST(GC, inst), mmCP_MEC_CNTL, pipe_reset_data);
+ WREG32_SOC15(GC, GET_INST(GC, inst), mmCP_MEC_CNTL, 0);
+
+ if (kgd_gfx_v9_hqd_dequeue_wait(adev, inst, utimeout))
+ queue_addr = 0;
+
+unlock_out:
+ pr_debug("queue reset on XCC %i pipe id %i queue id %i %s\n",
+ inst, pipe_id, queue_id, !!queue_addr ? "succeeded!" : "failed!");
+ amdgpu_gfx_rlc_exit_safe_mode(adev, inst);
+ kgd_gfx_v9_release_queue(adev, inst);
+
+ return queue_addr;
+}
+
const struct kfd2kgd_calls gfx_v9_kfd2kgd = {
.program_sh_mem_settings = kgd_gfx_v9_program_sh_mem_settings,
.set_pasid_vmid_mapping = kgd_gfx_v9_set_pasid_vmid_mapping,
@@ -1172,4 +1257,6 @@ const struct kfd2kgd_calls gfx_v9_kfd2kgd = {
.build_grace_period_packet_info = kgd_gfx_v9_build_grace_period_packet_info,
.get_cu_occupancy = kgd_gfx_v9_get_cu_occupancy,
.program_trap_handler_settings = kgd_gfx_v9_program_trap_handler_settings,
+ .hqd_get_pq_addr = kgd_gfx_v9_hqd_get_pq_addr,
+ .hqd_reset = kgd_gfx_v9_hqd_reset
};
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v9.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v9.h
index ce424615f59b..b6a91a552aa4 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v9.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v9.h
@@ -52,8 +52,9 @@ bool kgd_gfx_v9_get_atc_vmid_pasid_mapping_info(struct amdgpu_device *adev,
uint8_t vmid, uint16_t *p_pasid);
void kgd_gfx_v9_set_vm_context_page_table_base(struct amdgpu_device *adev,
uint32_t vmid, uint64_t page_table_base);
-void kgd_gfx_v9_get_cu_occupancy(struct amdgpu_device *adev, int pasid,
- int *pasid_wave_cnt, int *max_waves_per_cu, uint32_t inst);
+void kgd_gfx_v9_get_cu_occupancy(struct amdgpu_device *adev,
+ struct kfd_cu_occupancy *cu_occupancy,
+ int *max_waves_per_cu, uint32_t inst);
void kgd_gfx_v9_program_trap_handler_settings(struct amdgpu_device *adev,
uint32_t vmid, uint64_t tba_addr, uint64_t tma_addr,
uint32_t inst);
@@ -101,3 +102,12 @@ void kgd_gfx_v9_build_grace_period_packet_info(struct amdgpu_device *adev,
uint32_t grace_period,
uint32_t *reg_offset,
uint32_t *reg_data);
+uint64_t kgd_gfx_v9_hqd_get_pq_addr(struct amdgpu_device *adev,
+ uint32_t pipe_id,
+ uint32_t queue_id,
+ uint32_t inst);
+uint64_t kgd_gfx_v9_hqd_reset(struct amdgpu_device *adev,
+ uint32_t pipe_id,
+ uint32_t queue_id,
+ uint32_t inst,
+ unsigned int utimeout);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c
index 11672bfe4fad..ce5ca304dba9 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c
@@ -25,7 +25,6 @@
#include <linux/pagemap.h>
#include <linux/sched/mm.h>
#include <linux/sched/task.h>
-#include <linux/fdtable.h>
#include <drm/ttm/ttm_tt.h>
#include <drm/drm_exec.h>
@@ -818,18 +817,13 @@ static int kfd_mem_export_dmabuf(struct kgd_mem *mem)
if (!mem->dmabuf) {
struct amdgpu_device *bo_adev;
struct dma_buf *dmabuf;
- int r, fd;
bo_adev = amdgpu_ttm_adev(mem->bo->tbo.bdev);
- r = drm_gem_prime_handle_to_fd(&bo_adev->ddev, bo_adev->kfd.client.file,
+ dmabuf = drm_gem_prime_handle_to_dmabuf(&bo_adev->ddev, bo_adev->kfd.client.file,
mem->gem_handle,
mem->alloc_flags & KFD_IOC_ALLOC_MEM_FLAGS_WRITABLE ?
- DRM_RDWR : 0, &fd);
- if (r)
- return r;
- dmabuf = dma_buf_get(fd);
- close_fd(fd);
- if (WARN_ON_ONCE(IS_ERR(dmabuf)))
+ DRM_RDWR : 0);
+ if (IS_ERR(dmabuf))
return PTR_ERR(dmabuf);
mem->dmabuf = dmabuf;
}
@@ -1252,7 +1246,7 @@ static int unreserve_bo_and_vms(struct bo_vm_reservation_context *ctx,
return ret;
}
-static void unmap_bo_from_gpuvm(struct kgd_mem *mem,
+static int unmap_bo_from_gpuvm(struct kgd_mem *mem,
struct kfd_mem_attachment *entry,
struct amdgpu_sync *sync)
{
@@ -1260,11 +1254,18 @@ static void unmap_bo_from_gpuvm(struct kgd_mem *mem,
struct amdgpu_device *adev = entry->adev;
struct amdgpu_vm *vm = bo_va->base.vm;
+ if (bo_va->queue_refcount) {
+ pr_debug("bo_va->queue_refcount %d\n", bo_va->queue_refcount);
+ return -EBUSY;
+ }
+
amdgpu_vm_bo_unmap(adev, bo_va, entry->va);
amdgpu_vm_clear_freed(adev, vm, &bo_va->last_pt_update);
amdgpu_sync_fence(sync, bo_va->last_pt_update);
+
+ return 0;
}
static int update_gpuvm_pte(struct kgd_mem *mem,
@@ -1498,7 +1499,7 @@ static int amdgpu_amdkfd_gpuvm_pin_bo(struct amdgpu_bo *bo, u32 domain)
}
}
- ret = amdgpu_bo_pin_restricted(bo, domain, 0, 0);
+ ret = amdgpu_bo_pin(bo, domain);
if (ret)
pr_err("Error in Pinning BO to domain: %d\n", domain);
@@ -2191,7 +2192,10 @@ int amdgpu_amdkfd_gpuvm_unmap_memory_from_gpu(
pr_debug("\t unmap VA 0x%llx - 0x%llx from entry %p\n",
entry->va, entry->va + bo_size, entry);
- unmap_bo_from_gpuvm(mem, entry, ctx.sync);
+ ret = unmap_bo_from_gpuvm(mem, entry, ctx.sync);
+ if (ret)
+ goto unreserve_out;
+
entry->is_mapped = false;
mem->mapped_to_gpu_memory--;
@@ -2226,11 +2230,12 @@ int amdgpu_amdkfd_gpuvm_sync_memory(
/**
* amdgpu_amdkfd_map_gtt_bo_to_gart - Map BO to GART and increment reference count
* @bo: Buffer object to be mapped
+ * @bo_gart: Return bo reference
*
* Before return, bo reference count is incremented. To release the reference and unpin/
* unmap the BO, call amdgpu_amdkfd_free_gtt_mem.
*/
-int amdgpu_amdkfd_map_gtt_bo_to_gart(struct amdgpu_bo *bo)
+int amdgpu_amdkfd_map_gtt_bo_to_gart(struct amdgpu_bo *bo, struct amdgpu_bo **bo_gart)
{
int ret;
@@ -2257,7 +2262,7 @@ int amdgpu_amdkfd_map_gtt_bo_to_gart(struct amdgpu_bo *bo)
amdgpu_bo_unreserve(bo);
- bo = amdgpu_bo_ref(bo);
+ *bo_gart = amdgpu_bo_ref(bo);
return 0;
@@ -3200,12 +3205,13 @@ int amdgpu_amdkfd_get_tile_config(struct amdgpu_device *adev,
return 0;
}
-bool amdgpu_amdkfd_bo_mapped_to_dev(struct amdgpu_device *adev, struct kgd_mem *mem)
+bool amdgpu_amdkfd_bo_mapped_to_dev(void *drm_priv, struct kgd_mem *mem)
{
+ struct amdgpu_vm *vm = drm_priv_to_vm(drm_priv);
struct kfd_mem_attachment *entry;
list_for_each_entry(entry, &mem->attachments, list) {
- if (entry->is_mapped && entry->adev == adev)
+ if (entry->is_mapped && entry->bo_va->base.vm == vm)
return true;
}
return false;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_atombios.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_atombios.c
index 7dc102f0bc1d..0c8975ac5af9 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_atombios.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_atombios.c
@@ -1018,8 +1018,9 @@ int amdgpu_atombios_get_clock_dividers(struct amdgpu_device *adev,
if (clock_type == COMPUTE_ENGINE_PLL_PARAM) {
args.v3.ulClockParams = cpu_to_le32((clock_type << 24) | clock);
- amdgpu_atom_execute_table(adev->mode_info.atom_context, index, (uint32_t *)&args,
- sizeof(args));
+ if (amdgpu_atom_execute_table(adev->mode_info.atom_context,
+ index, (uint32_t *)&args, sizeof(args)))
+ return -EINVAL;
dividers->post_div = args.v3.ucPostDiv;
dividers->enable_post_div = (args.v3.ucCntlFlag &
@@ -1039,8 +1040,9 @@ int amdgpu_atombios_get_clock_dividers(struct amdgpu_device *adev,
if (strobe_mode)
args.v5.ucInputFlag = ATOM_PLL_INPUT_FLAG_PLL_STROBE_MODE_EN;
- amdgpu_atom_execute_table(adev->mode_info.atom_context, index, (uint32_t *)&args,
- sizeof(args));
+ if (amdgpu_atom_execute_table(adev->mode_info.atom_context,
+ index, (uint32_t *)&args, sizeof(args)))
+ return -EINVAL;
dividers->post_div = args.v5.ucPostDiv;
dividers->enable_post_div = (args.v5.ucCntlFlag &
@@ -1058,8 +1060,9 @@ int amdgpu_atombios_get_clock_dividers(struct amdgpu_device *adev,
/* fusion */
args.v4.ulClock = cpu_to_le32(clock); /* 10 khz */
- amdgpu_atom_execute_table(adev->mode_info.atom_context, index, (uint32_t *)&args,
- sizeof(args));
+ if (amdgpu_atom_execute_table(adev->mode_info.atom_context,
+ index, (uint32_t *)&args, sizeof(args)))
+ return -EINVAL;
dividers->post_divider = dividers->post_div = args.v4.ucPostDiv;
dividers->real_clock = le32_to_cpu(args.v4.ulClock);
@@ -1070,8 +1073,9 @@ int amdgpu_atombios_get_clock_dividers(struct amdgpu_device *adev,
args.v6_in.ulClock.ulComputeClockFlag = clock_type;
args.v6_in.ulClock.ulClockFreq = cpu_to_le32(clock); /* 10 khz */
- amdgpu_atom_execute_table(adev->mode_info.atom_context, index, (uint32_t *)&args,
- sizeof(args));
+ if (amdgpu_atom_execute_table(adev->mode_info.atom_context,
+ index, (uint32_t *)&args, sizeof(args)))
+ return -EINVAL;
dividers->whole_fb_div = le16_to_cpu(args.v6_out.ulFbDiv.usFbDiv);
dividers->frac_fb_div = le16_to_cpu(args.v6_out.ulFbDiv.usFbDivFrac);
@@ -1113,8 +1117,9 @@ int amdgpu_atombios_get_memory_pll_dividers(struct amdgpu_device *adev,
if (strobe_mode)
args.ucInputFlag |= MPLL_INPUT_FLAG_STROBE_MODE_EN;
- amdgpu_atom_execute_table(adev->mode_info.atom_context, index, (uint32_t *)&args,
- sizeof(args));
+ if (amdgpu_atom_execute_table(adev->mode_info.atom_context,
+ index, (uint32_t *)&args, sizeof(args)))
+ return -EINVAL;
mpll_param->clkfrac = le16_to_cpu(args.ulFbDiv.usFbDivFrac);
mpll_param->clkf = le16_to_cpu(args.ulFbDiv.usFbDiv);
@@ -1211,8 +1216,9 @@ int amdgpu_atombios_get_max_vddc(struct amdgpu_device *adev, u8 voltage_type,
args.v2.ucVoltageMode = 0;
args.v2.usVoltageLevel = 0;
- amdgpu_atom_execute_table(adev->mode_info.atom_context, index, (uint32_t *)&args,
- sizeof(args));
+ if (amdgpu_atom_execute_table(adev->mode_info.atom_context,
+ index, (uint32_t *)&args, sizeof(args)))
+ return -EINVAL;
*voltage = le16_to_cpu(args.v2.usVoltageLevel);
break;
@@ -1221,8 +1227,9 @@ int amdgpu_atombios_get_max_vddc(struct amdgpu_device *adev, u8 voltage_type,
args.v3.ucVoltageMode = ATOM_GET_VOLTAGE_LEVEL;
args.v3.usVoltageLevel = cpu_to_le16(voltage_id);
- amdgpu_atom_execute_table(adev->mode_info.atom_context, index, (uint32_t *)&args,
- sizeof(args));
+ if (amdgpu_atom_execute_table(adev->mode_info.atom_context,
+ index, (uint32_t *)&args, sizeof(args)))
+ return -EINVAL;
*voltage = le16_to_cpu(args.v3.usVoltageLevel);
break;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_bios.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_bios.c
index 618e469e3622..45affc02548c 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_bios.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_bios.c
@@ -87,8 +87,9 @@ static bool check_atom_bios(uint8_t *bios, size_t size)
* part of the system bios. On boot, the system bios puts a
* copy of the igp rom at the start of vram if a discrete card is
* present.
+ * For SR-IOV, the vbios image is also put in VRAM in the VF.
*/
-static bool igp_read_bios_from_vram(struct amdgpu_device *adev)
+static bool amdgpu_read_bios_from_vram(struct amdgpu_device *adev)
{
uint8_t __iomem *bios;
resource_size_t vram_base;
@@ -284,10 +285,6 @@ static bool amdgpu_atrm_get_bios(struct amdgpu_device *adev)
acpi_status status;
bool found = false;
- /* ATRM is for the discrete card only */
- if (adev->flags & AMD_IS_APU)
- return false;
-
/* ATRM is for on-platform devices only */
if (dev_is_removable(&adev->pdev->dev))
return false;
@@ -343,11 +340,8 @@ static inline bool amdgpu_atrm_get_bios(struct amdgpu_device *adev)
static bool amdgpu_read_disabled_bios(struct amdgpu_device *adev)
{
- if (adev->flags & AMD_IS_APU)
- return igp_read_bios_from_vram(adev);
- else
- return (!adev->asic_funcs || !adev->asic_funcs->read_disabled_bios) ?
- false : amdgpu_asic_read_disabled_bios(adev);
+ return (!adev->asic_funcs || !adev->asic_funcs->read_disabled_bios) ?
+ false : amdgpu_asic_read_disabled_bios(adev);
}
#ifdef CONFIG_ACPI
@@ -414,7 +408,36 @@ static inline bool amdgpu_acpi_vfct_bios(struct amdgpu_device *adev)
}
#endif
-bool amdgpu_get_bios(struct amdgpu_device *adev)
+static bool amdgpu_get_bios_apu(struct amdgpu_device *adev)
+{
+ if (amdgpu_acpi_vfct_bios(adev)) {
+ dev_info(adev->dev, "Fetched VBIOS from VFCT\n");
+ goto success;
+ }
+
+ if (amdgpu_read_bios_from_vram(adev)) {
+ dev_info(adev->dev, "Fetched VBIOS from VRAM BAR\n");
+ goto success;
+ }
+
+ if (amdgpu_read_bios(adev)) {
+ dev_info(adev->dev, "Fetched VBIOS from ROM BAR\n");
+ goto success;
+ }
+
+ if (amdgpu_read_platform_bios(adev)) {
+ dev_info(adev->dev, "Fetched VBIOS from platform\n");
+ goto success;
+ }
+
+ dev_err(adev->dev, "Unable to locate a BIOS ROM\n");
+ return false;
+
+success:
+ return true;
+}
+
+static bool amdgpu_get_bios_dgpu(struct amdgpu_device *adev)
{
if (amdgpu_atrm_get_bios(adev)) {
dev_info(adev->dev, "Fetched VBIOS from ATRM\n");
@@ -426,11 +449,17 @@ bool amdgpu_get_bios(struct amdgpu_device *adev)
goto success;
}
- if (igp_read_bios_from_vram(adev)) {
+ /* this is required for SR-IOV */
+ if (amdgpu_read_bios_from_vram(adev)) {
dev_info(adev->dev, "Fetched VBIOS from VRAM BAR\n");
goto success;
}
+ if (amdgpu_read_platform_bios(adev)) {
+ dev_info(adev->dev, "Fetched VBIOS from platform\n");
+ goto success;
+ }
+
if (amdgpu_read_bios(adev)) {
dev_info(adev->dev, "Fetched VBIOS from ROM BAR\n");
goto success;
@@ -446,19 +475,28 @@ bool amdgpu_get_bios(struct amdgpu_device *adev)
goto success;
}
- if (amdgpu_read_platform_bios(adev)) {
- dev_info(adev->dev, "Fetched VBIOS from platform\n");
- goto success;
- }
-
dev_err(adev->dev, "Unable to locate a BIOS ROM\n");
return false;
success:
- adev->is_atom_fw = adev->asic_type >= CHIP_VEGA10;
return true;
}
+bool amdgpu_get_bios(struct amdgpu_device *adev)
+{
+ bool found;
+
+ if (adev->flags & AMD_IS_APU)
+ found = amdgpu_get_bios_apu(adev);
+ else
+ found = amdgpu_get_bios_dgpu(adev);
+
+ if (found)
+ adev->is_atom_fw = adev->asic_type >= CHIP_VEGA10;
+
+ return found;
+}
+
/* helper function for soc15 and onwards to read bios from rom */
bool amdgpu_soc15_read_bios_from_rom(struct amdgpu_device *adev,
u8 *bios, u32 length_bytes)
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_cgs.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_cgs.c
index c3d89088123d..16153d275d7a 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_cgs.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_cgs.c
@@ -414,7 +414,7 @@ static int amdgpu_cgs_get_firmware_info(struct cgs_device *cgs_device,
return -EINVAL;
}
- err = amdgpu_ucode_request(adev, &adev->pm.fw, fw_name);
+ err = amdgpu_ucode_request(adev, &adev->pm.fw, "%s", fw_name);
if (err) {
DRM_ERROR("Failed to load firmware \"%s\"", fw_name);
amdgpu_ucode_release(&adev->pm.fw);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_connectors.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_connectors.c
index cae7479c3ecf..344e0a9ee08a 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_connectors.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_connectors.c
@@ -249,11 +249,7 @@ amdgpu_connector_find_encoder(struct drm_connector *connector,
static struct edid *
amdgpu_connector_get_hardcoded_edid(struct amdgpu_device *adev)
{
- if (adev->mode_info.bios_hardcoded_edid) {
- return kmemdup((unsigned char *)adev->mode_info.bios_hardcoded_edid,
- adev->mode_info.bios_hardcoded_edid_size, GFP_KERNEL);
- }
- return NULL;
+ return drm_edid_duplicate(drm_edid_raw(adev->mode_info.bios_hardcoded_edid));
}
static void amdgpu_connector_get_edid(struct drm_connector *connector)
@@ -442,6 +438,9 @@ static void amdgpu_connector_add_common_modes(struct drm_encoder *encoder,
continue;
mode = drm_cvt_mode(dev, common_modes[i].w, common_modes[i].h, 60, false, false, false);
+ if (!mode)
+ return;
+
drm_mode_probed_add(connector, mode);
}
}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
index 6dfdff58bffd..1e475eb01417 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
@@ -263,6 +263,10 @@ static int amdgpu_cs_pass1(struct amdgpu_cs_parser *p,
if (size < sizeof(struct drm_amdgpu_bo_list_in))
goto free_partial_kdata;
+ /* Only a single BO list is allowed to simplify handling. */
+ if (p->bo_list)
+ ret = -EINVAL;
+
ret = amdgpu_cs_p1_bo_handles(p, p->chunks[i].kdata);
if (ret)
goto free_partial_kdata;
@@ -292,6 +296,7 @@ static int amdgpu_cs_pass1(struct amdgpu_cs_parser *p,
num_ibs[i], &p->jobs[i]);
if (ret)
goto free_all_kdata;
+ p->jobs[i]->enforce_isolation = p->adev->enforce_isolation[fpriv->xcp_id];
}
p->gang_leader = p->jobs[p->gang_leader_idx];
@@ -1106,7 +1111,7 @@ static int amdgpu_cs_vm_handling(struct amdgpu_cs_parser *p)
struct drm_gpu_scheduler *sched = entity->rq->sched;
struct amdgpu_ring *ring = to_amdgpu_ring(sched);
- if (amdgpu_vmid_uses_reserved(vm, ring->vm_hub))
+ if (amdgpu_vmid_uses_reserved(adev, vm, ring->vm_hub))
return -EINVAL;
}
}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c
index 0e1a11b6b989..cbef720de779 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c
@@ -2026,100 +2026,6 @@ DEFINE_DEBUGFS_ATTRIBUTE(fops_ib_preempt, NULL,
DEFINE_DEBUGFS_ATTRIBUTE(fops_sclk_set, NULL,
amdgpu_debugfs_sclk_set, "%llu\n");
-static ssize_t amdgpu_reset_dump_register_list_read(struct file *f,
- char __user *buf, size_t size, loff_t *pos)
-{
- struct amdgpu_device *adev = (struct amdgpu_device *)file_inode(f)->i_private;
- char reg_offset[12];
- int i, ret, len = 0;
-
- if (*pos)
- return 0;
-
- memset(reg_offset, 0, 12);
- ret = down_read_killable(&adev->reset_domain->sem);
- if (ret)
- return ret;
-
- for (i = 0; i < adev->reset_info.num_regs; i++) {
- sprintf(reg_offset, "0x%x\n", adev->reset_info.reset_dump_reg_list[i]);
- up_read(&adev->reset_domain->sem);
- if (copy_to_user(buf + len, reg_offset, strlen(reg_offset)))
- return -EFAULT;
-
- len += strlen(reg_offset);
- ret = down_read_killable(&adev->reset_domain->sem);
- if (ret)
- return ret;
- }
-
- up_read(&adev->reset_domain->sem);
- *pos += len;
-
- return len;
-}
-
-static ssize_t amdgpu_reset_dump_register_list_write(struct file *f,
- const char __user *buf, size_t size, loff_t *pos)
-{
- struct amdgpu_device *adev = (struct amdgpu_device *)file_inode(f)->i_private;
- char reg_offset[11];
- uint32_t *new = NULL, *tmp = NULL;
- unsigned int len = 0;
- int ret, i = 0;
-
- do {
- memset(reg_offset, 0, 11);
- if (copy_from_user(reg_offset, buf + len,
- min(10, (size-len)))) {
- ret = -EFAULT;
- goto error_free;
- }
-
- new = krealloc_array(tmp, i + 1, sizeof(uint32_t), GFP_KERNEL);
- if (!new) {
- ret = -ENOMEM;
- goto error_free;
- }
- tmp = new;
- if (sscanf(reg_offset, "%X %n", &tmp[i], &ret) != 1) {
- ret = -EINVAL;
- goto error_free;
- }
-
- len += ret;
- i++;
- } while (len < size);
-
- new = kmalloc_array(i, sizeof(uint32_t), GFP_KERNEL);
- if (!new) {
- ret = -ENOMEM;
- goto error_free;
- }
- ret = down_write_killable(&adev->reset_domain->sem);
- if (ret)
- goto error_free;
-
- swap(adev->reset_info.reset_dump_reg_list, tmp);
- swap(adev->reset_info.reset_dump_reg_value, new);
- adev->reset_info.num_regs = i;
- up_write(&adev->reset_domain->sem);
- ret = size;
-
-error_free:
- if (tmp != new)
- kfree(tmp);
- kfree(new);
- return ret;
-}
-
-static const struct file_operations amdgpu_reset_dump_register_list = {
- .owner = THIS_MODULE,
- .read = amdgpu_reset_dump_register_list_read,
- .write = amdgpu_reset_dump_register_list_write,
- .llseek = default_llseek
-};
-
int amdgpu_debugfs_init(struct amdgpu_device *adev)
{
struct dentry *root = adev_to_drm(adev)->primary->debugfs_root;
@@ -2204,8 +2110,6 @@ int amdgpu_debugfs_init(struct amdgpu_device *adev)
&amdgpu_debugfs_vm_info_fops);
debugfs_create_file("amdgpu_benchmark", 0200, root, adev,
&amdgpu_benchmark_fops);
- debugfs_create_file("amdgpu_reset_dump_register_list", 0644, root, adev,
- &amdgpu_reset_dump_register_list);
adev->debugfs_vbios_blob.data = adev->bios;
adev->debugfs_vbios_blob.size = adev->bios_size;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_dev_coredump.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_dev_coredump.c
index f0a44d0dec27..5ac59b62020c 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_dev_coredump.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_dev_coredump.c
@@ -28,8 +28,8 @@
#include "atom.h"
#ifndef CONFIG_DEV_COREDUMP
-void amdgpu_coredump(struct amdgpu_device *adev, bool vram_lost,
- struct amdgpu_reset_context *reset_context)
+void amdgpu_coredump(struct amdgpu_device *adev, bool skip_vram_check,
+ bool vram_lost, struct amdgpu_job *job)
{
}
#else
@@ -203,7 +203,7 @@ amdgpu_devcoredump_read(char *buffer, loff_t offset, size_t count,
struct amdgpu_coredump_info *coredump = data;
struct drm_print_iterator iter;
struct amdgpu_vm_fault_info *fault_info;
- int i, ver;
+ int ver;
iter.data = buffer;
iter.offset = 0;
@@ -236,7 +236,7 @@ amdgpu_devcoredump_read(char *buffer, loff_t offset, size_t count,
drm_printf(&p, "\nSOC Memory Information\n");
drm_printf(&p, "real vram size: %llu\n", coredump->adev->gmc.real_vram_size);
drm_printf(&p, "visible vram size: %llu\n", coredump->adev->gmc.visible_vram_size);
- drm_printf(&p, "visible vram size: %llu\n", coredump->adev->mman.gtt_mgr.manager.size);
+ drm_printf(&p, "gtt size: %llu\n", coredump->adev->mman.gtt_mgr.manager.size);
/* GDS Config */
drm_printf(&p, "\nGDS Config\n");
@@ -315,16 +315,10 @@ amdgpu_devcoredump_read(char *buffer, loff_t offset, size_t count,
}
}
- if (coredump->reset_vram_lost)
+ if (coredump->skip_vram_check)
+ drm_printf(&p, "VRAM lost check is skipped!\n");
+ else if (coredump->reset_vram_lost)
drm_printf(&p, "VRAM is lost due to GPU reset!\n");
- if (coredump->adev->reset_info.num_regs) {
- drm_printf(&p, "AMDGPU register dumps:\nOffset: Value:\n");
-
- for (i = 0; i < coredump->adev->reset_info.num_regs; i++)
- drm_printf(&p, "0x%08x: 0x%08x\n",
- coredump->adev->reset_info.reset_dump_reg_list[i],
- coredump->adev->reset_info.reset_dump_reg_value[i]);
- }
return count - iter.remain;
}
@@ -334,12 +328,11 @@ static void amdgpu_devcoredump_free(void *data)
kfree(data);
}
-void amdgpu_coredump(struct amdgpu_device *adev, bool vram_lost,
- struct amdgpu_reset_context *reset_context)
+void amdgpu_coredump(struct amdgpu_device *adev, bool skip_vram_check,
+ bool vram_lost, struct amdgpu_job *job)
{
- struct amdgpu_coredump_info *coredump;
struct drm_device *dev = adev_to_drm(adev);
- struct amdgpu_job *job = reset_context->job;
+ struct amdgpu_coredump_info *coredump;
struct drm_sched_job *s_job;
coredump = kzalloc(sizeof(*coredump), GFP_NOWAIT);
@@ -349,11 +342,12 @@ void amdgpu_coredump(struct amdgpu_device *adev, bool vram_lost,
return;
}
+ coredump->skip_vram_check = skip_vram_check;
coredump->reset_vram_lost = vram_lost;
- if (reset_context->job && reset_context->job->vm) {
+ if (job && job->vm) {
+ struct amdgpu_vm *vm = job->vm;
struct amdgpu_task_info *ti;
- struct amdgpu_vm *vm = reset_context->job->vm;
ti = amdgpu_vm_get_task_info_vm(vm);
if (ti) {
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_dev_coredump.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_dev_coredump.h
index 52459512cb2b..ef9772c6bcc9 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_dev_coredump.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_dev_coredump.h
@@ -26,7 +26,6 @@
#define __AMDGPU_DEV_COREDUMP_H__
#include "amdgpu.h"
-#include "amdgpu_reset.h"
#ifdef CONFIG_DEV_COREDUMP
@@ -36,12 +35,12 @@ struct amdgpu_coredump_info {
struct amdgpu_device *adev;
struct amdgpu_task_info reset_task_info;
struct timespec64 reset_time;
+ bool skip_vram_check;
bool reset_vram_lost;
struct amdgpu_ring *ring;
};
#endif
-void amdgpu_coredump(struct amdgpu_device *adev, bool vram_lost,
- struct amdgpu_reset_context *reset_context);
-
+void amdgpu_coredump(struct amdgpu_device *adev, bool skip_vram_check,
+ bool vram_lost, struct amdgpu_job *job);
#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
index bcacf2e35eba..c2394c8b4d6b 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
@@ -1916,6 +1916,8 @@ static int amdgpu_device_init_apu_flags(struct amdgpu_device *adev)
*/
static int amdgpu_device_check_arguments(struct amdgpu_device *adev)
{
+ int i;
+
if (amdgpu_sched_jobs < 4) {
dev_warn(adev->dev, "sched jobs (%d) must be at least 4\n",
amdgpu_sched_jobs);
@@ -1970,6 +1972,9 @@ static int amdgpu_device_check_arguments(struct amdgpu_device *adev)
adev->firmware.load_type = amdgpu_ucode_get_load_type(adev, amdgpu_fw_load_type);
+ for (i = 0; i < MAX_XCP; i++)
+ adev->enforce_isolation[i] = !!enforce_isolation;
+
return 0;
}
@@ -2471,6 +2476,7 @@ out:
*/
static int amdgpu_device_ip_early_init(struct amdgpu_device *adev)
{
+ struct amdgpu_ip_block *ip_block;
struct pci_dev *parent;
int i, r;
bool total;
@@ -2608,7 +2614,10 @@ static int amdgpu_device_ip_early_init(struct amdgpu_device *adev)
if (!total)
return -ENODEV;
- amdgpu_amdkfd_device_probe(adev);
+ ip_block = amdgpu_device_ip_get_ip_block(adev, AMD_IP_BLOCK_TYPE_GFX);
+ if (ip_block->status.valid != false)
+ amdgpu_amdkfd_device_probe(adev);
+
adev->cg_flags &= amdgpu_cg_mask;
adev->pg_flags &= amdgpu_pg_mask;
@@ -3948,6 +3957,27 @@ static void amdgpu_device_check_iommu_direct_map(struct amdgpu_device *adev)
adev->ram_is_direct_mapped = true;
}
+#if defined(CONFIG_HSA_AMD_P2P)
+/**
+ * amdgpu_device_check_iommu_remap - Check if DMA remapping is enabled.
+ *
+ * @adev: amdgpu_device pointer
+ *
+ * return if IOMMU remapping bar address
+ */
+static bool amdgpu_device_check_iommu_remap(struct amdgpu_device *adev)
+{
+ struct iommu_domain *domain;
+
+ domain = iommu_get_domain_for_dev(adev->dev);
+ if (domain && (domain->type == IOMMU_DOMAIN_DMA ||
+ domain->type == IOMMU_DOMAIN_DMA_FQ))
+ return true;
+
+ return false;
+}
+#endif
+
static const struct attribute *amdgpu_dev_attributes[] = {
&dev_attr_pcie_replay_count.attr,
NULL
@@ -4055,6 +4085,10 @@ int amdgpu_device_init(struct amdgpu_device *adev,
mutex_init(&adev->notifier_lock);
mutex_init(&adev->pm.stable_pstate_ctx_lock);
mutex_init(&adev->benchmark_mutex);
+ mutex_init(&adev->gfx.reset_sem_mutex);
+ /* Initialize the mutex for cleaner shader isolation between GFX and compute processes */
+ mutex_init(&adev->enforce_isolation_mutex);
+ mutex_init(&adev->gfx.kfd_sch_mutex);
amdgpu_device_init_apu_flags(adev);
@@ -4073,9 +4107,6 @@ int amdgpu_device_init(struct amdgpu_device *adev,
spin_lock_init(&adev->mm_stats.lock);
spin_lock_init(&adev->wb.lock);
- INIT_LIST_HEAD(&adev->shadow_list);
- mutex_init(&adev->shadow_list_lock);
-
INIT_LIST_HEAD(&adev->reset_list);
INIT_LIST_HEAD(&adev->ras_list);
@@ -4086,6 +4117,21 @@ int amdgpu_device_init(struct amdgpu_device *adev,
amdgpu_device_delayed_init_work_handler);
INIT_DELAYED_WORK(&adev->gfx.gfx_off_delay_work,
amdgpu_device_delay_enable_gfx_off);
+ /*
+ * Initialize the enforce_isolation work structures for each XCP
+ * partition. This work handler is responsible for enforcing shader
+ * isolation on AMD GPUs. It counts the number of emitted fences for
+ * each GFX and compute ring. If there are any fences, it schedules
+ * the `enforce_isolation_work` to be run after a delay. If there are
+ * no fences, it signals the Kernel Fusion Driver (KFD) to resume the
+ * runqueue.
+ */
+ for (i = 0; i < MAX_XCP; i++) {
+ INIT_DELAYED_WORK(&adev->gfx.enforce_isolation[i].work,
+ amdgpu_gfx_enforce_isolation_handler);
+ adev->gfx.enforce_isolation[i].adev = adev;
+ adev->gfx.enforce_isolation[i].xcp_id = i;
+ }
INIT_WORK(&adev->xgmi_reset_work, amdgpu_device_xgmi_reset_func);
@@ -4482,6 +4528,9 @@ void amdgpu_device_fini_hw(struct amdgpu_device *adev)
{
dev_info(adev->dev, "amdgpu: finishing device.\n");
flush_delayed_work(&adev->delayed_init_work);
+
+ if (adev->mman.initialized)
+ drain_workqueue(adev->mman.bdev.wq);
adev->shutdown = true;
/* make sure IB test finished before entering exclusive mode
@@ -4502,9 +4551,6 @@ void amdgpu_device_fini_hw(struct amdgpu_device *adev)
}
amdgpu_fence_driver_hw_fini(adev);
- if (adev->mman.initialized)
- drain_workqueue(adev->mman.bdev.wq);
-
if (adev->pm.sysfs_initialized)
amdgpu_pm_sysfs_fini(adev);
if (adev->ucode_sysfs_en)
@@ -4981,80 +5027,6 @@ static int amdgpu_device_ip_post_soft_reset(struct amdgpu_device *adev)
}
/**
- * amdgpu_device_recover_vram - Recover some VRAM contents
- *
- * @adev: amdgpu_device pointer
- *
- * Restores the contents of VRAM buffers from the shadows in GTT. Used to
- * restore things like GPUVM page tables after a GPU reset where
- * the contents of VRAM might be lost.
- *
- * Returns:
- * 0 on success, negative error code on failure.
- */
-static int amdgpu_device_recover_vram(struct amdgpu_device *adev)
-{
- struct dma_fence *fence = NULL, *next = NULL;
- struct amdgpu_bo *shadow;
- struct amdgpu_bo_vm *vmbo;
- long r = 1, tmo;
-
- if (amdgpu_sriov_runtime(adev))
- tmo = msecs_to_jiffies(8000);
- else
- tmo = msecs_to_jiffies(100);
-
- dev_info(adev->dev, "recover vram bo from shadow start\n");
- mutex_lock(&adev->shadow_list_lock);
- list_for_each_entry(vmbo, &adev->shadow_list, shadow_list) {
- /* If vm is compute context or adev is APU, shadow will be NULL */
- if (!vmbo->shadow)
- continue;
- shadow = vmbo->shadow;
-
- /* No need to recover an evicted BO */
- if (!shadow->tbo.resource ||
- shadow->tbo.resource->mem_type != TTM_PL_TT ||
- shadow->tbo.resource->start == AMDGPU_BO_INVALID_OFFSET ||
- shadow->parent->tbo.resource->mem_type != TTM_PL_VRAM)
- continue;
-
- r = amdgpu_bo_restore_shadow(shadow, &next);
- if (r)
- break;
-
- if (fence) {
- tmo = dma_fence_wait_timeout(fence, false, tmo);
- dma_fence_put(fence);
- fence = next;
- if (tmo == 0) {
- r = -ETIMEDOUT;
- break;
- } else if (tmo < 0) {
- r = tmo;
- break;
- }
- } else {
- fence = next;
- }
- }
- mutex_unlock(&adev->shadow_list_lock);
-
- if (fence)
- tmo = dma_fence_wait_timeout(fence, false, tmo);
- dma_fence_put(fence);
-
- if (r < 0 || tmo <= 0) {
- dev_err(adev->dev, "recover vram bo from shadow failed, r is %ld, tmo is %ld\n", r, tmo);
- return -EIO;
- }
-
- dev_info(adev->dev, "recover vram bo from shadow done\n");
- return 0;
-}
-
-
-/**
* amdgpu_device_reset_sriov - reset ASIC for SR-IOV vf
*
* @adev: amdgpu_device pointer
@@ -5116,12 +5088,8 @@ static int amdgpu_device_reset_sriov(struct amdgpu_device *adev,
if (r)
return r;
- if (adev->virt.gim_feature & AMDGIM_FEATURE_GIM_FLR_VRAMLOST) {
+ if (adev->virt.gim_feature & AMDGIM_FEATURE_GIM_FLR_VRAMLOST)
amdgpu_inc_vram_lost(adev);
- r = amdgpu_device_recover_vram(adev);
- }
- if (r)
- return r;
/* need to be called during full access so we can't do it later like
* bare-metal does.
@@ -5278,16 +5246,15 @@ int amdgpu_device_pre_asic_reset(struct amdgpu_device *adev,
{
int i, r = 0;
struct amdgpu_job *job = NULL;
+ struct amdgpu_device *tmp_adev = reset_context->reset_req_dev;
bool need_full_reset =
test_bit(AMDGPU_NEED_FULL_RESET, &reset_context->flags);
if (reset_context->reset_req_dev == adev)
job = reset_context->job;
- if (amdgpu_sriov_vf(adev)) {
- /* stop the data exchange thread */
- amdgpu_virt_fini_data_exchange(adev);
- }
+ if (amdgpu_sriov_vf(adev))
+ amdgpu_virt_pre_reset(adev);
amdgpu_fence_driver_isr_toggle(adev, true);
@@ -5336,6 +5303,16 @@ int amdgpu_device_pre_asic_reset(struct amdgpu_device *adev,
}
}
+ if (!test_bit(AMDGPU_SKIP_COREDUMP, &reset_context->flags)) {
+ dev_info(tmp_adev->dev, "Dumping IP State\n");
+ /* Trigger ip dump before we reset the asic */
+ for (i = 0; i < tmp_adev->num_ip_blocks; i++)
+ if (tmp_adev->ip_blocks[i].version->funcs->dump_ip_state)
+ tmp_adev->ip_blocks[i].version->funcs
+ ->dump_ip_state((void *)tmp_adev);
+ dev_info(tmp_adev->dev, "Dumping IP State Completed\n");
+ }
+
if (need_full_reset)
r = amdgpu_device_ip_suspend(adev);
if (need_full_reset)
@@ -5348,47 +5325,17 @@ int amdgpu_device_pre_asic_reset(struct amdgpu_device *adev,
return r;
}
-static int amdgpu_reset_reg_dumps(struct amdgpu_device *adev)
-{
- int i;
-
- lockdep_assert_held(&adev->reset_domain->sem);
-
- for (i = 0; i < adev->reset_info.num_regs; i++) {
- adev->reset_info.reset_dump_reg_value[i] =
- RREG32(adev->reset_info.reset_dump_reg_list[i]);
-
- trace_amdgpu_reset_reg_dumps(adev->reset_info.reset_dump_reg_list[i],
- adev->reset_info.reset_dump_reg_value[i]);
- }
-
- return 0;
-}
-
int amdgpu_do_asic_reset(struct list_head *device_list_handle,
struct amdgpu_reset_context *reset_context)
{
struct amdgpu_device *tmp_adev = NULL;
bool need_full_reset, skip_hw_reset, vram_lost = false;
int r = 0;
- uint32_t i;
/* Try reset handler method first */
tmp_adev = list_first_entry(device_list_handle, struct amdgpu_device,
reset_list);
- if (!test_bit(AMDGPU_SKIP_COREDUMP, &reset_context->flags)) {
- amdgpu_reset_reg_dumps(tmp_adev);
-
- dev_info(tmp_adev->dev, "Dumping IP State\n");
- /* Trigger ip dump before we reset the asic */
- for (i = 0; i < tmp_adev->num_ip_blocks; i++)
- if (tmp_adev->ip_blocks[i].version->funcs->dump_ip_state)
- tmp_adev->ip_blocks[i].version->funcs
- ->dump_ip_state((void *)tmp_adev);
- dev_info(tmp_adev->dev, "Dumping IP State Completed\n");
- }
-
reset_context->reset_device_list = device_list_handle;
r = amdgpu_reset_perform_reset(tmp_adev, reset_context);
/* If reset handler not implemented, continue; otherwise return */
@@ -5461,7 +5408,7 @@ int amdgpu_do_asic_reset(struct list_head *device_list_handle,
vram_lost = amdgpu_device_check_vram_lost(tmp_adev);
if (!test_bit(AMDGPU_SKIP_COREDUMP, &reset_context->flags))
- amdgpu_coredump(tmp_adev, vram_lost, reset_context);
+ amdgpu_coredump(tmp_adev, false, vram_lost, reset_context->job);
if (vram_lost) {
DRM_INFO("VRAM is lost due to GPU reset!\n");
@@ -5513,7 +5460,7 @@ int amdgpu_do_asic_reset(struct list_head *device_list_handle,
* bad_page_threshold value to fix this once
* probing driver again.
*/
- if (!amdgpu_ras_eeprom_check_err_threshold(tmp_adev)) {
+ if (!amdgpu_ras_is_rma(tmp_adev)) {
/* must succeed. */
amdgpu_ras_resume(tmp_adev);
} else {
@@ -5541,9 +5488,7 @@ out:
}
}
- if (!r)
- r = amdgpu_device_recover_vram(tmp_adev);
- else
+ if (r)
tmp_adev->asic_reset_res = r;
}
@@ -5879,7 +5824,7 @@ skip_hw_reset:
if (!amdgpu_ring_sched_ready(ring))
continue;
- drm_sched_start(&ring->sched, true);
+ drm_sched_start(&ring->sched);
}
if (!drm_drv_uses_atomic_modeset(adev_to_drm(tmp_adev)) && !job_signaled)
@@ -5891,8 +5836,14 @@ skip_hw_reset:
tmp_adev->asic_reset_res = 0;
if (r) {
- /* bad news, how to tell it to userspace ? */
- dev_info(tmp_adev->dev, "GPU reset(%d) failed\n", atomic_read(&tmp_adev->gpu_reset_counter));
+ /* bad news, how to tell it to userspace ?
+ * for ras error, we should report GPU bad status instead of
+ * reset failure
+ */
+ if (reset_context->src != AMDGPU_RESET_SRC_RAS ||
+ !amdgpu_ras_eeprom_check_err_threshold(tmp_adev))
+ dev_info(tmp_adev->dev, "GPU reset(%d) failed\n",
+ atomic_read(&tmp_adev->gpu_reset_counter));
amdgpu_vf_error_put(tmp_adev, AMDGIM_ERROR_VF_GPU_RESET_FAIL, 0, r);
} else {
dev_info(tmp_adev->dev, "GPU reset(%d) succeeded!\n", atomic_read(&tmp_adev->gpu_reset_counter));
@@ -6138,18 +6089,24 @@ bool amdgpu_device_is_peer_accessible(struct amdgpu_device *adev,
struct amdgpu_device *peer_adev)
{
#ifdef CONFIG_HSA_AMD_P2P
- uint64_t address_mask = peer_adev->dev->dma_mask ?
- ~*peer_adev->dev->dma_mask : ~((1ULL << 32) - 1);
- resource_size_t aper_limit =
- adev->gmc.aper_base + adev->gmc.aper_size - 1;
bool p2p_access =
!adev->gmc.xgmi.connected_to_cpu &&
!(pci_p2pdma_distance(adev->pdev, peer_adev->dev, false) < 0);
- return pcie_p2p && p2p_access && (adev->gmc.visible_vram_size &&
- adev->gmc.real_vram_size == adev->gmc.visible_vram_size &&
- !(adev->gmc.aper_base & address_mask ||
- aper_limit & address_mask));
+ bool is_large_bar = adev->gmc.visible_vram_size &&
+ adev->gmc.real_vram_size == adev->gmc.visible_vram_size;
+ bool p2p_addressable = amdgpu_device_check_iommu_remap(peer_adev);
+
+ if (!p2p_addressable) {
+ uint64_t address_mask = peer_adev->dev->dma_mask ?
+ ~*peer_adev->dev->dma_mask : ~((1ULL << 32) - 1);
+ resource_size_t aper_limit =
+ adev->gmc.aper_base + adev->gmc.aper_size - 1;
+
+ p2p_addressable = !(adev->gmc.aper_base & address_mask ||
+ aper_limit & address_mask);
+ }
+ return pcie_p2p && is_large_bar && p2p_access && p2p_addressable;
#else
return false;
#endif
@@ -6374,7 +6331,7 @@ void amdgpu_pci_resume(struct pci_dev *pdev)
if (!amdgpu_ring_sched_ready(ring))
continue;
- drm_sched_start(&ring->sched, true);
+ drm_sched_start(&ring->sched);
}
amdgpu_device_unset_mp1_state(adev);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c
index 092ec11258cd..b119d27271c1 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c
@@ -233,6 +233,7 @@ int amdgpu_display_crtc_page_flip_target(struct drm_crtc *crtc,
}
if (!adev->enable_virtual_display) {
+ new_abo->flags |= AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS;
r = amdgpu_bo_pin(new_abo,
amdgpu_display_supported_domains(adev, new_abo->flags));
if (unlikely(r != 0)) {
@@ -1474,7 +1475,7 @@ bool amdgpu_display_crtc_scaling_mode_fixup(struct drm_crtc *crtc,
if ((!(mode->flags & DRM_MODE_FLAG_INTERLACE)) &&
((amdgpu_encoder->underscan_type == UNDERSCAN_ON) ||
((amdgpu_encoder->underscan_type == UNDERSCAN_AUTO) &&
- connector->display_info.is_hdmi &&
+ connector && connector->display_info.is_hdmi &&
amdgpu_display_is_hdtv_mode(mode)))) {
if (amdgpu_encoder->underscan_hborder != 0)
amdgpu_crtc->h_border = amdgpu_encoder->underscan_hborder;
@@ -1759,6 +1760,7 @@ int amdgpu_display_resume_helper(struct amdgpu_device *adev)
r = amdgpu_bo_reserve(aobj, true);
if (r == 0) {
+ aobj->flags |= AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS;
r = amdgpu_bo_pin(aobj, AMDGPU_GEM_DOMAIN_VRAM);
if (r != 0)
dev_err(adev->dev, "Failed to pin cursor BO (%d)\n", r);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
index d7ef8cbecf6c..81d9877c8735 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
@@ -117,9 +117,10 @@
* - 3.56.0 - Update IB start address and size alignment for decode and encode
* - 3.57.0 - Compute tunneling on GFX10+
* - 3.58.0 - Add GFX12 DCC support
+ * - 3.59.0 - Cleared VRAM
*/
#define KMS_DRIVER_MAJOR 3
-#define KMS_DRIVER_MINOR 58
+#define KMS_DRIVER_MINOR 59
#define KMS_DRIVER_PATCHLEVEL 0
/*
@@ -131,6 +132,7 @@ enum AMDGPU_DEBUG_MASK {
AMDGPU_DEBUG_DISABLE_GPU_SOFT_RECOVERY = BIT(2),
AMDGPU_DEBUG_USE_VRAM_FW_BUF = BIT(3),
AMDGPU_DEBUG_ENABLE_RAS_ACA = BIT(4),
+ AMDGPU_DEBUG_ENABLE_EXP_RESETS = BIT(5),
};
unsigned int amdgpu_vram_limit = UINT_MAX;
@@ -168,6 +170,16 @@ uint amdgpu_sdma_phase_quantum = 32;
char *amdgpu_disable_cu;
char *amdgpu_virtual_display;
bool enforce_isolation;
+
+/* Specifies the default granularity for SVM, used in buffer
+ * migration and restoration of backing memory when handling
+ * recoverable page faults.
+ *
+ * The value is given as log(numPages(buffer)); for a 2 MiB
+ * buffer it computes to be 9
+ */
+uint amdgpu_svm_default_granularity = 9;
+
/*
* OverDrive(bit 14) disabled by default
* GFX DCS(bit 19) disabled by default
@@ -320,6 +332,13 @@ MODULE_PARM_DESC(msi, "MSI support (1 = enable, 0 = disable, -1 = auto)");
module_param_named(msi, amdgpu_msi, int, 0444);
/**
+ * DOC: svm_default_granularity (uint)
+ * Used in buffer migration and handling of recoverable page faults
+ */
+MODULE_PARM_DESC(svm_default_granularity, "SVM's default granularity in log(2^Pages), default 9 = 2^9 = 2 MiB");
+module_param_named(svm_default_granularity, amdgpu_svm_default_granularity, uint, 0644);
+
+/**
* DOC: lockup_timeout (string)
* Set GPU scheduler timeout value in ms.
*
@@ -2199,6 +2218,11 @@ static void amdgpu_init_debug_options(struct amdgpu_device *adev)
pr_info("debug: enable RAS ACA\n");
adev->debug_enable_ras_aca = true;
}
+
+ if (amdgpu_debug_mask & AMDGPU_DEBUG_ENABLE_EXP_RESETS) {
+ pr_info("debug: enable experimental reset features\n");
+ adev->debug_exp_resets = true;
+ }
}
static unsigned long amdgpu_fix_asic_type(struct pci_dev *pdev, unsigned long flags)
@@ -2954,7 +2978,6 @@ static const struct drm_driver amdgpu_kms_driver = {
DRIVER_SYNCOBJ_TIMELINE,
.open = amdgpu_driver_open_kms,
.postclose = amdgpu_driver_postclose_kms,
- .lastclose = amdgpu_driver_lastclose_kms,
.ioctls = amdgpu_ioctls_kms,
.num_ioctls = ARRAY_SIZE(amdgpu_ioctls_kms),
.dumb_create = amdgpu_mode_dumb_create,
@@ -2981,7 +3004,6 @@ const struct drm_driver amdgpu_partition_driver = {
DRIVER_SYNCOBJ_TIMELINE,
.open = amdgpu_driver_open_kms,
.postclose = amdgpu_driver_postclose_kms,
- .lastclose = amdgpu_driver_lastclose_kms,
.ioctls = amdgpu_ioctls_kms,
.num_ioctls = ARRAY_SIZE(amdgpu_ioctls_kms),
.dumb_create = amdgpu_mode_dumb_create,
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gart.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_gart.h
index 8283d682f543..7cc980bf4725 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gart.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gart.h
@@ -55,8 +55,6 @@ int amdgpu_gart_table_ram_alloc(struct amdgpu_device *adev);
void amdgpu_gart_table_ram_free(struct amdgpu_device *adev);
int amdgpu_gart_table_vram_alloc(struct amdgpu_device *adev);
void amdgpu_gart_table_vram_free(struct amdgpu_device *adev);
-int amdgpu_gart_table_vram_pin(struct amdgpu_device *adev);
-void amdgpu_gart_table_vram_unpin(struct amdgpu_device *adev);
int amdgpu_gart_init(struct amdgpu_device *adev);
void amdgpu_gart_dummy_page_fini(struct amdgpu_device *adev);
void amdgpu_gart_unbind(struct amdgpu_device *adev, uint64_t offset,
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c
index 0e617dff8765..1a5df8b94661 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c
@@ -43,8 +43,6 @@
#include "amdgpu_hmm.h"
#include "amdgpu_xgmi.h"
-static const struct drm_gem_object_funcs amdgpu_gem_object_funcs;
-
static vm_fault_t amdgpu_gem_fault(struct vm_fault *vmf)
{
struct ttm_buffer_object *bo = vmf->vma->vm_private_data;
@@ -87,11 +85,11 @@ static const struct vm_operations_struct amdgpu_gem_vm_ops = {
static void amdgpu_gem_object_free(struct drm_gem_object *gobj)
{
- struct amdgpu_bo *robj = gem_to_amdgpu_bo(gobj);
+ struct amdgpu_bo *aobj = gem_to_amdgpu_bo(gobj);
- if (robj) {
- amdgpu_hmm_unregister(robj);
- amdgpu_bo_unref(&robj);
+ if (aobj) {
+ amdgpu_hmm_unregister(aobj);
+ ttm_bo_put(&aobj->tbo);
}
}
@@ -126,7 +124,6 @@ int amdgpu_gem_object_create(struct amdgpu_device *adev, unsigned long size,
bo = &ubo->bo;
*obj = &bo->tbo.base;
- (*obj)->funcs = &amdgpu_gem_object_funcs;
return 0;
}
@@ -295,7 +292,7 @@ static int amdgpu_gem_object_mmap(struct drm_gem_object *obj, struct vm_area_str
return drm_gem_ttm_mmap(obj, vma);
}
-static const struct drm_gem_object_funcs amdgpu_gem_object_funcs = {
+const struct drm_gem_object_funcs amdgpu_gem_object_funcs = {
.free = amdgpu_gem_object_free,
.open = amdgpu_gem_object_open,
.close = amdgpu_gem_object_close,
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.h
index f30264782ba2..3a8f57900a3a 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.h
@@ -33,6 +33,8 @@
#define AMDGPU_GEM_DOMAIN_MAX 0x3
#define gem_to_amdgpu_bo(gobj) container_of((gobj), struct amdgpu_bo, tbo.base)
+extern const struct drm_gem_object_funcs amdgpu_gem_object_funcs;
+
unsigned long amdgpu_gem_timeout(uint64_t timeout_ns);
/*
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c
index 1849510a308a..83e54697f0ee 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c
@@ -24,10 +24,13 @@
*/
#include <linux/firmware.h>
+#include <linux/pm_runtime.h>
+
#include "amdgpu.h"
#include "amdgpu_gfx.h"
#include "amdgpu_rlc.h"
#include "amdgpu_ras.h"
+#include "amdgpu_reset.h"
#include "amdgpu_xcp.h"
#include "amdgpu_xgmi.h"
@@ -882,8 +885,11 @@ int amdgpu_gfx_ras_late_init(struct amdgpu_device *adev, struct ras_common_if *r
int r;
if (amdgpu_ras_is_supported(adev, ras_block->block)) {
- if (!amdgpu_persistent_edc_harvesting_supported(adev))
- amdgpu_ras_reset_error_status(adev, AMDGPU_RAS_BLOCK__GFX);
+ if (!amdgpu_persistent_edc_harvesting_supported(adev)) {
+ r = amdgpu_ras_reset_error_status(adev, AMDGPU_RAS_BLOCK__GFX);
+ if (r)
+ return r;
+ }
r = amdgpu_ras_block_late_init(adev, ras_block);
if (r)
@@ -1027,7 +1033,10 @@ uint32_t amdgpu_kiq_rreg(struct amdgpu_device *adev, uint32_t reg, uint32_t xcc_
pr_err("critical bug! too many kiq readers\n");
goto failed_unlock;
}
- amdgpu_ring_alloc(ring, 32);
+ r = amdgpu_ring_alloc(ring, 32);
+ if (r)
+ goto failed_unlock;
+
amdgpu_ring_emit_rreg(ring, reg, reg_val_offs);
r = amdgpu_fence_emit_polling(ring, &seq, MAX_KIQ_REG_WAIT);
if (r)
@@ -1093,7 +1102,10 @@ void amdgpu_kiq_wreg(struct amdgpu_device *adev, uint32_t reg, uint32_t v, uint3
}
spin_lock_irqsave(&kiq->ring_lock, flags);
- amdgpu_ring_alloc(ring, 32);
+ r = amdgpu_ring_alloc(ring, 32);
+ if (r)
+ goto failed_unlock;
+
amdgpu_ring_emit_wreg(ring, reg, v);
r = amdgpu_fence_emit_polling(ring, &seq, MAX_KIQ_REG_WAIT);
if (r)
@@ -1129,6 +1141,7 @@ void amdgpu_kiq_wreg(struct amdgpu_device *adev, uint32_t reg, uint32_t v, uint3
failed_undo:
amdgpu_ring_undo(ring);
+failed_unlock:
spin_unlock_irqrestore(&kiq->ring_lock, flags);
failed_kiq_write:
dev_err(adev->dev, "failed to write reg:%x\n", reg);
@@ -1381,6 +1394,217 @@ static ssize_t amdgpu_gfx_get_available_compute_partition(struct device *dev,
return sysfs_emit(buf, "%s\n", supported_partition);
}
+static int amdgpu_gfx_run_cleaner_shader_job(struct amdgpu_ring *ring)
+{
+ struct amdgpu_device *adev = ring->adev;
+ struct drm_gpu_scheduler *sched = &ring->sched;
+ struct drm_sched_entity entity;
+ struct dma_fence *f;
+ struct amdgpu_job *job;
+ struct amdgpu_ib *ib;
+ int i, r;
+
+ /* Initialize the scheduler entity */
+ r = drm_sched_entity_init(&entity, DRM_SCHED_PRIORITY_NORMAL,
+ &sched, 1, NULL);
+ if (r) {
+ dev_err(adev->dev, "Failed setting up GFX kernel entity.\n");
+ goto err;
+ }
+
+ r = amdgpu_job_alloc_with_ib(ring->adev, &entity, NULL,
+ 64, 0,
+ &job);
+ if (r)
+ goto err;
+
+ job->enforce_isolation = true;
+
+ ib = &job->ibs[0];
+ for (i = 0; i <= ring->funcs->align_mask; ++i)
+ ib->ptr[i] = ring->funcs->nop;
+ ib->length_dw = ring->funcs->align_mask + 1;
+
+ f = amdgpu_job_submit(job);
+
+ r = dma_fence_wait(f, false);
+ if (r)
+ goto err;
+
+ dma_fence_put(f);
+
+ /* Clean up the scheduler entity */
+ drm_sched_entity_destroy(&entity);
+ return 0;
+
+err:
+ return r;
+}
+
+static int amdgpu_gfx_run_cleaner_shader(struct amdgpu_device *adev, int xcp_id)
+{
+ int num_xcc = NUM_XCC(adev->gfx.xcc_mask);
+ struct amdgpu_ring *ring;
+ int num_xcc_to_clear;
+ int i, r, xcc_id;
+
+ if (adev->gfx.num_xcc_per_xcp)
+ num_xcc_to_clear = adev->gfx.num_xcc_per_xcp;
+ else
+ num_xcc_to_clear = 1;
+
+ for (xcc_id = 0; xcc_id < num_xcc; xcc_id++) {
+ for (i = 0; i < adev->gfx.num_compute_rings; i++) {
+ ring = &adev->gfx.compute_ring[i + xcc_id * adev->gfx.num_compute_rings];
+ if ((ring->xcp_id == xcp_id) && ring->sched.ready) {
+ r = amdgpu_gfx_run_cleaner_shader_job(ring);
+ if (r)
+ return r;
+ num_xcc_to_clear--;
+ break;
+ }
+ }
+ }
+
+ if (num_xcc_to_clear)
+ return -ENOENT;
+
+ return 0;
+}
+
+static ssize_t amdgpu_gfx_set_run_cleaner_shader(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf,
+ size_t count)
+{
+ struct drm_device *ddev = dev_get_drvdata(dev);
+ struct amdgpu_device *adev = drm_to_adev(ddev);
+ int ret;
+ long value;
+
+ if (amdgpu_in_reset(adev))
+ return -EPERM;
+ if (adev->in_suspend && !adev->in_runpm)
+ return -EPERM;
+
+ ret = kstrtol(buf, 0, &value);
+
+ if (ret)
+ return -EINVAL;
+
+ if (value < 0)
+ return -EINVAL;
+
+ if (adev->xcp_mgr) {
+ if (value >= adev->xcp_mgr->num_xcps)
+ return -EINVAL;
+ } else {
+ if (value > 1)
+ return -EINVAL;
+ }
+
+ ret = pm_runtime_get_sync(ddev->dev);
+ if (ret < 0) {
+ pm_runtime_put_autosuspend(ddev->dev);
+ return ret;
+ }
+
+ ret = amdgpu_gfx_run_cleaner_shader(adev, value);
+
+ pm_runtime_mark_last_busy(ddev->dev);
+ pm_runtime_put_autosuspend(ddev->dev);
+
+ if (ret)
+ return ret;
+
+ return count;
+}
+
+static ssize_t amdgpu_gfx_get_enforce_isolation(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct drm_device *ddev = dev_get_drvdata(dev);
+ struct amdgpu_device *adev = drm_to_adev(ddev);
+ int i;
+ ssize_t size = 0;
+
+ if (adev->xcp_mgr) {
+ for (i = 0; i < adev->xcp_mgr->num_xcps; i++) {
+ size += sysfs_emit_at(buf, size, "%u", adev->enforce_isolation[i]);
+ if (i < (adev->xcp_mgr->num_xcps - 1))
+ size += sysfs_emit_at(buf, size, " ");
+ }
+ buf[size++] = '\n';
+ } else {
+ size = sysfs_emit_at(buf, 0, "%u\n", adev->enforce_isolation[0]);
+ }
+
+ return size;
+}
+
+static ssize_t amdgpu_gfx_set_enforce_isolation(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct drm_device *ddev = dev_get_drvdata(dev);
+ struct amdgpu_device *adev = drm_to_adev(ddev);
+ long partition_values[MAX_XCP] = {0};
+ int ret, i, num_partitions;
+ const char *input_buf = buf;
+
+ for (i = 0; i < (adev->xcp_mgr ? adev->xcp_mgr->num_xcps : 1); i++) {
+ ret = sscanf(input_buf, "%ld", &partition_values[i]);
+ if (ret <= 0)
+ break;
+
+ /* Move the pointer to the next value in the string */
+ input_buf = strchr(input_buf, ' ');
+ if (input_buf) {
+ input_buf++;
+ } else {
+ i++;
+ break;
+ }
+ }
+ num_partitions = i;
+
+ if (adev->xcp_mgr && num_partitions != adev->xcp_mgr->num_xcps)
+ return -EINVAL;
+
+ if (!adev->xcp_mgr && num_partitions != 1)
+ return -EINVAL;
+
+ for (i = 0; i < num_partitions; i++) {
+ if (partition_values[i] != 0 && partition_values[i] != 1)
+ return -EINVAL;
+ }
+
+ mutex_lock(&adev->enforce_isolation_mutex);
+
+ for (i = 0; i < num_partitions; i++) {
+ if (adev->enforce_isolation[i] && !partition_values[i]) {
+ /* Going from enabled to disabled */
+ amdgpu_vmid_free_reserved(adev, AMDGPU_GFXHUB(i));
+ } else if (!adev->enforce_isolation[i] && partition_values[i]) {
+ /* Going from disabled to enabled */
+ amdgpu_vmid_alloc_reserved(adev, AMDGPU_GFXHUB(i));
+ }
+ adev->enforce_isolation[i] = partition_values[i];
+ }
+
+ mutex_unlock(&adev->enforce_isolation_mutex);
+
+ return count;
+}
+
+static DEVICE_ATTR(run_cleaner_shader, 0200,
+ NULL, amdgpu_gfx_set_run_cleaner_shader);
+
+static DEVICE_ATTR(enforce_isolation, 0644,
+ amdgpu_gfx_get_enforce_isolation,
+ amdgpu_gfx_set_enforce_isolation);
+
static DEVICE_ATTR(current_compute_partition, 0644,
amdgpu_gfx_get_current_compute_partition,
amdgpu_gfx_set_compute_partition);
@@ -1406,3 +1630,229 @@ void amdgpu_gfx_sysfs_fini(struct amdgpu_device *adev)
device_remove_file(adev->dev, &dev_attr_current_compute_partition);
device_remove_file(adev->dev, &dev_attr_available_compute_partition);
}
+
+int amdgpu_gfx_sysfs_isolation_shader_init(struct amdgpu_device *adev)
+{
+ int r;
+
+ if (!amdgpu_sriov_vf(adev)) {
+ r = device_create_file(adev->dev, &dev_attr_enforce_isolation);
+ if (r)
+ return r;
+ }
+
+ r = device_create_file(adev->dev, &dev_attr_run_cleaner_shader);
+ if (r)
+ return r;
+
+ return 0;
+}
+
+void amdgpu_gfx_sysfs_isolation_shader_fini(struct amdgpu_device *adev)
+{
+ if (!amdgpu_sriov_vf(adev))
+ device_remove_file(adev->dev, &dev_attr_enforce_isolation);
+ device_remove_file(adev->dev, &dev_attr_run_cleaner_shader);
+}
+
+int amdgpu_gfx_cleaner_shader_sw_init(struct amdgpu_device *adev,
+ unsigned int cleaner_shader_size)
+{
+ if (!adev->gfx.enable_cleaner_shader)
+ return -EOPNOTSUPP;
+
+ return amdgpu_bo_create_kernel(adev, cleaner_shader_size, PAGE_SIZE,
+ AMDGPU_GEM_DOMAIN_VRAM | AMDGPU_GEM_DOMAIN_GTT,
+ &adev->gfx.cleaner_shader_obj,
+ &adev->gfx.cleaner_shader_gpu_addr,
+ (void **)&adev->gfx.cleaner_shader_cpu_ptr);
+}
+
+void amdgpu_gfx_cleaner_shader_sw_fini(struct amdgpu_device *adev)
+{
+ if (!adev->gfx.enable_cleaner_shader)
+ return;
+
+ amdgpu_bo_free_kernel(&adev->gfx.cleaner_shader_obj,
+ &adev->gfx.cleaner_shader_gpu_addr,
+ (void **)&adev->gfx.cleaner_shader_cpu_ptr);
+}
+
+void amdgpu_gfx_cleaner_shader_init(struct amdgpu_device *adev,
+ unsigned int cleaner_shader_size,
+ const void *cleaner_shader_ptr)
+{
+ if (!adev->gfx.enable_cleaner_shader)
+ return;
+
+ if (adev->gfx.cleaner_shader_cpu_ptr && cleaner_shader_ptr)
+ memcpy_toio(adev->gfx.cleaner_shader_cpu_ptr, cleaner_shader_ptr,
+ cleaner_shader_size);
+}
+
+/**
+ * amdgpu_gfx_kfd_sch_ctrl - Control the KFD scheduler from the KGD (Graphics Driver)
+ * @adev: amdgpu_device pointer
+ * @idx: Index of the scheduler to control
+ * @enable: Whether to enable or disable the KFD scheduler
+ *
+ * This function is used to control the KFD (Kernel Fusion Driver) scheduler
+ * from the KGD. It is part of the cleaner shader feature. This function plays
+ * a key role in enforcing process isolation on the GPU.
+ *
+ * The function uses a reference count mechanism (kfd_sch_req_count) to keep
+ * track of the number of requests to enable the KFD scheduler. When a request
+ * to enable the KFD scheduler is made, the reference count is decremented.
+ * When the reference count reaches zero, a delayed work is scheduled to
+ * enforce isolation after a delay of GFX_SLICE_PERIOD.
+ *
+ * When a request to disable the KFD scheduler is made, the function first
+ * checks if the reference count is zero. If it is, it cancels the delayed work
+ * for enforcing isolation and checks if the KFD scheduler is active. If the
+ * KFD scheduler is active, it sends a request to stop the KFD scheduler and
+ * sets the KFD scheduler state to inactive. Then, it increments the reference
+ * count.
+ *
+ * The function is synchronized using the kfd_sch_mutex to ensure that the KFD
+ * scheduler state and reference count are updated atomically.
+ *
+ * Note: If the reference count is already zero when a request to enable the
+ * KFD scheduler is made, it means there's an imbalance bug somewhere. The
+ * function triggers a warning in this case.
+ */
+static void amdgpu_gfx_kfd_sch_ctrl(struct amdgpu_device *adev, u32 idx,
+ bool enable)
+{
+ mutex_lock(&adev->gfx.kfd_sch_mutex);
+
+ if (enable) {
+ /* If the count is already 0, it means there's an imbalance bug somewhere.
+ * Note that the bug may be in a different caller than the one which triggers the
+ * WARN_ON_ONCE.
+ */
+ if (WARN_ON_ONCE(adev->gfx.kfd_sch_req_count[idx] == 0)) {
+ dev_err(adev->dev, "Attempted to enable KFD scheduler when reference count is already zero\n");
+ goto unlock;
+ }
+
+ adev->gfx.kfd_sch_req_count[idx]--;
+
+ if (adev->gfx.kfd_sch_req_count[idx] == 0 &&
+ adev->gfx.kfd_sch_inactive[idx]) {
+ schedule_delayed_work(&adev->gfx.enforce_isolation[idx].work,
+ GFX_SLICE_PERIOD);
+ }
+ } else {
+ if (adev->gfx.kfd_sch_req_count[idx] == 0) {
+ cancel_delayed_work_sync(&adev->gfx.enforce_isolation[idx].work);
+ if (!adev->gfx.kfd_sch_inactive[idx]) {
+ amdgpu_amdkfd_stop_sched(adev, idx);
+ adev->gfx.kfd_sch_inactive[idx] = true;
+ }
+ }
+
+ adev->gfx.kfd_sch_req_count[idx]++;
+ }
+
+unlock:
+ mutex_unlock(&adev->gfx.kfd_sch_mutex);
+}
+
+/**
+ * amdgpu_gfx_enforce_isolation_handler - work handler for enforcing shader isolation
+ *
+ * @work: work_struct.
+ *
+ * This function is the work handler for enforcing shader isolation on AMD GPUs.
+ * It counts the number of emitted fences for each GFX and compute ring. If there
+ * are any fences, it schedules the `enforce_isolation_work` to be run after a
+ * delay of `GFX_SLICE_PERIOD`. If there are no fences, it signals the Kernel Fusion
+ * Driver (KFD) to resume the runqueue. The function is synchronized using the
+ * `enforce_isolation_mutex`.
+ */
+void amdgpu_gfx_enforce_isolation_handler(struct work_struct *work)
+{
+ struct amdgpu_isolation_work *isolation_work =
+ container_of(work, struct amdgpu_isolation_work, work.work);
+ struct amdgpu_device *adev = isolation_work->adev;
+ u32 i, idx, fences = 0;
+
+ if (isolation_work->xcp_id == AMDGPU_XCP_NO_PARTITION)
+ idx = 0;
+ else
+ idx = isolation_work->xcp_id;
+
+ if (idx >= MAX_XCP)
+ return;
+
+ mutex_lock(&adev->enforce_isolation_mutex);
+ for (i = 0; i < AMDGPU_MAX_GFX_RINGS; ++i) {
+ if (isolation_work->xcp_id == adev->gfx.gfx_ring[i].xcp_id)
+ fences += amdgpu_fence_count_emitted(&adev->gfx.gfx_ring[i]);
+ }
+ for (i = 0; i < (AMDGPU_MAX_COMPUTE_RINGS * AMDGPU_MAX_GC_INSTANCES); ++i) {
+ if (isolation_work->xcp_id == adev->gfx.compute_ring[i].xcp_id)
+ fences += amdgpu_fence_count_emitted(&adev->gfx.compute_ring[i]);
+ }
+ if (fences) {
+ schedule_delayed_work(&adev->gfx.enforce_isolation[idx].work,
+ GFX_SLICE_PERIOD);
+ } else {
+ /* Tell KFD to resume the runqueue */
+ if (adev->kfd.init_complete) {
+ WARN_ON_ONCE(!adev->gfx.kfd_sch_inactive[idx]);
+ WARN_ON_ONCE(adev->gfx.kfd_sch_req_count[idx]);
+ amdgpu_amdkfd_start_sched(adev, idx);
+ adev->gfx.kfd_sch_inactive[idx] = false;
+ }
+ }
+ mutex_unlock(&adev->enforce_isolation_mutex);
+}
+
+void amdgpu_gfx_enforce_isolation_ring_begin_use(struct amdgpu_ring *ring)
+{
+ struct amdgpu_device *adev = ring->adev;
+ u32 idx;
+
+ if (!adev->gfx.enable_cleaner_shader)
+ return;
+
+ if (ring->xcp_id == AMDGPU_XCP_NO_PARTITION)
+ idx = 0;
+ else
+ idx = ring->xcp_id;
+
+ if (idx >= MAX_XCP)
+ return;
+
+ mutex_lock(&adev->enforce_isolation_mutex);
+ if (adev->enforce_isolation[idx]) {
+ if (adev->kfd.init_complete)
+ amdgpu_gfx_kfd_sch_ctrl(adev, idx, false);
+ }
+ mutex_unlock(&adev->enforce_isolation_mutex);
+}
+
+void amdgpu_gfx_enforce_isolation_ring_end_use(struct amdgpu_ring *ring)
+{
+ struct amdgpu_device *adev = ring->adev;
+ u32 idx;
+
+ if (!adev->gfx.enable_cleaner_shader)
+ return;
+
+ if (ring->xcp_id == AMDGPU_XCP_NO_PARTITION)
+ idx = 0;
+ else
+ idx = ring->xcp_id;
+
+ if (idx >= MAX_XCP)
+ return;
+
+ mutex_lock(&adev->enforce_isolation_mutex);
+ if (adev->enforce_isolation[idx]) {
+ if (adev->kfd.init_complete)
+ amdgpu_gfx_kfd_sch_ctrl(adev, idx, true);
+ }
+ mutex_unlock(&adev->enforce_isolation_mutex);
+}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.h
index 56cc58edbb4e..5644e10a86a9 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.h
@@ -34,6 +34,7 @@
#include "soc15.h"
#include "amdgpu_ras.h"
#include "amdgpu_ring_mux.h"
+#include "amdgpu_xcp.h"
/* GFX current status */
#define AMDGPU_GFX_NORMAL_MODE 0x00000000L
@@ -138,6 +139,10 @@ struct kiq_pm4_funcs {
void (*kiq_invalidate_tlbs)(struct amdgpu_ring *kiq_ring,
uint16_t pasid, uint32_t flush_type,
bool all_hub);
+ void (*kiq_reset_hw_queue)(struct amdgpu_ring *kiq_ring,
+ uint32_t queue_type, uint32_t me_id,
+ uint32_t pipe_id, uint32_t queue_id,
+ uint32_t xcc_id, uint32_t vmid);
/* Packet sizes */
int set_resources_size;
int map_queues_size;
@@ -345,6 +350,12 @@ struct amdgpu_me {
DECLARE_BITMAP(queue_bitmap, AMDGPU_MAX_GFX_QUEUES);
};
+struct amdgpu_isolation_work {
+ struct amdgpu_device *adev;
+ u32 xcp_id;
+ struct delayed_work work;
+};
+
struct amdgpu_gfx {
struct mutex gpu_clock_mutex;
struct amdgpu_gfx_config config;
@@ -397,6 +408,7 @@ struct amdgpu_gfx {
struct amdgpu_irq_src eop_irq;
struct amdgpu_irq_src priv_reg_irq;
struct amdgpu_irq_src priv_inst_irq;
+ struct amdgpu_irq_src bad_op_irq;
struct amdgpu_irq_src cp_ecc_error_irq;
struct amdgpu_irq_src sq_irq;
struct amdgpu_irq_src rlc_gc_fed_irq;
@@ -445,6 +457,21 @@ struct amdgpu_gfx {
uint32_t *ip_dump_core;
uint32_t *ip_dump_compute_queues;
uint32_t *ip_dump_gfx_queues;
+
+ struct mutex reset_sem_mutex;
+
+ /* cleaner shader */
+ struct amdgpu_bo *cleaner_shader_obj;
+ unsigned int cleaner_shader_size;
+ u64 cleaner_shader_gpu_addr;
+ void *cleaner_shader_cpu_ptr;
+ const void *cleaner_shader_ptr;
+ bool enable_cleaner_shader;
+ struct amdgpu_isolation_work enforce_isolation[MAX_XCP];
+ /* Mutex for synchronizing KFD scheduler operations */
+ struct mutex kfd_sch_mutex;
+ u64 kfd_sch_req_count[MAX_XCP];
+ bool kfd_sch_inactive[MAX_XCP];
};
struct amdgpu_gfx_ras_reg_entry {
@@ -546,6 +573,17 @@ void amdgpu_gfx_ras_error_func(struct amdgpu_device *adev,
void *ras_error_status,
void (*func)(struct amdgpu_device *adev, void *ras_error_status,
int xcc_id));
+int amdgpu_gfx_cleaner_shader_sw_init(struct amdgpu_device *adev,
+ unsigned int cleaner_shader_size);
+void amdgpu_gfx_cleaner_shader_sw_fini(struct amdgpu_device *adev);
+void amdgpu_gfx_cleaner_shader_init(struct amdgpu_device *adev,
+ unsigned int cleaner_shader_size,
+ const void *cleaner_shader_ptr);
+int amdgpu_gfx_sysfs_isolation_shader_init(struct amdgpu_device *adev);
+void amdgpu_gfx_sysfs_isolation_shader_fini(struct amdgpu_device *adev);
+void amdgpu_gfx_enforce_isolation_handler(struct work_struct *work);
+void amdgpu_gfx_enforce_isolation_ring_begin_use(struct amdgpu_ring *ring);
+void amdgpu_gfx_enforce_isolation_ring_end_use(struct amdgpu_ring *ring);
static inline const char *amdgpu_gfx_compute_mode_desc(int mode)
{
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gfxhub.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_gfxhub.h
index 103a837ccc71..c7b44aeb671b 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gfxhub.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gfxhub.h
@@ -38,8 +38,6 @@ struct amdgpu_gfxhub_funcs {
void (*mode2_save_regs)(struct amdgpu_device *adev);
void (*mode2_restore_regs)(struct amdgpu_device *adev);
void (*halt)(struct amdgpu_device *adev);
- bool (*query_utcl2_poison_status)(struct amdgpu_device *adev,
- int xcc_id);
};
struct amdgpu_gfxhub {
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.c
index b49b3650fd62..17a19d49d30a 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.c
@@ -786,7 +786,8 @@ void amdgpu_gmc_fw_reg_write_reg_wait(struct amdgpu_device *adev,
goto failed_kiq;
might_sleep();
- while (r < 1 && cnt++ < MAX_KIQ_REG_TRY) {
+ while (r < 1 && cnt++ < MAX_KIQ_REG_TRY &&
+ !amdgpu_reset_pending(adev->reset_domain)) {
msleep(MAX_KIQ_REG_BAILOUT_INTERVAL);
r = amdgpu_fence_wait_polling(ring, seq, MAX_KIQ_REG_WAIT);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ids.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ids.c
index b6a8bddada4c..92d27d32de41 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ids.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ids.c
@@ -424,7 +424,7 @@ int amdgpu_vmid_grab(struct amdgpu_vm *vm, struct amdgpu_ring *ring,
if (r || !idle)
goto error;
- if (amdgpu_vmid_uses_reserved(vm, vmhub)) {
+ if (amdgpu_vmid_uses_reserved(adev, vm, vmhub)) {
r = amdgpu_vmid_grab_reserved(vm, ring, job, &id, fence);
if (r || !id)
goto error;
@@ -476,15 +476,19 @@ error:
/*
* amdgpu_vmid_uses_reserved - check if a VM will use a reserved VMID
+ * @adev: amdgpu_device pointer
* @vm: the VM to check
* @vmhub: the VMHUB which will be used
*
* Returns: True if the VM will use a reserved VMID.
*/
-bool amdgpu_vmid_uses_reserved(struct amdgpu_vm *vm, unsigned int vmhub)
+bool amdgpu_vmid_uses_reserved(struct amdgpu_device *adev,
+ struct amdgpu_vm *vm, unsigned int vmhub)
{
return vm->reserved_vmid[vmhub] ||
- (enforce_isolation && (vmhub == AMDGPU_GFXHUB(0)));
+ (adev->enforce_isolation[(vm->root.bo->xcp_id != AMDGPU_XCP_NO_PARTITION) ?
+ vm->root.bo->xcp_id : 0] &&
+ AMDGPU_IS_GFXHUB(vmhub));
}
int amdgpu_vmid_alloc_reserved(struct amdgpu_device *adev,
@@ -600,9 +604,10 @@ void amdgpu_vmid_mgr_init(struct amdgpu_device *adev)
}
}
/* alloc a default reserved vmid to enforce isolation */
- if (enforce_isolation)
- amdgpu_vmid_alloc_reserved(adev, AMDGPU_GFXHUB(0));
-
+ for (i = 0; i < (adev->xcp_mgr ? adev->xcp_mgr->num_xcps : 1); i++) {
+ if (adev->enforce_isolation[i])
+ amdgpu_vmid_alloc_reserved(adev, AMDGPU_GFXHUB(i));
+ }
}
/**
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ids.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_ids.h
index 240fa6751260..4012fb2dd08a 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ids.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ids.h
@@ -78,7 +78,8 @@ void amdgpu_pasid_free_delayed(struct dma_resv *resv,
bool amdgpu_vmid_had_gpu_reset(struct amdgpu_device *adev,
struct amdgpu_vmid *id);
-bool amdgpu_vmid_uses_reserved(struct amdgpu_vm *vm, unsigned int vmhub);
+bool amdgpu_vmid_uses_reserved(struct amdgpu_device *adev,
+ struct amdgpu_vm *vm, unsigned int vmhub);
int amdgpu_vmid_alloc_reserved(struct amdgpu_device *adev,
unsigned vmhub);
void amdgpu_vmid_free_reserved(struct amdgpu_device *adev,
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_isp.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_isp.h
index 44e2ea8c9728..b03664c66dd6 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_isp.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_isp.h
@@ -49,6 +49,7 @@ struct amdgpu_isp {
const struct isp_funcs *funcs;
struct mfd_cell *isp_cell;
struct resource *isp_res;
+ struct resource *isp_i2c_res;
struct isp_platform_data *isp_pdata;
unsigned int harvest_config;
const struct firmware *fw;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c
index 908e13455152..16f2605ac50b 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c
@@ -30,6 +30,60 @@
#include "amdgpu.h"
#include "amdgpu_trace.h"
#include "amdgpu_reset.h"
+#include "amdgpu_dev_coredump.h"
+#include "amdgpu_xgmi.h"
+
+static void amdgpu_job_do_core_dump(struct amdgpu_device *adev,
+ struct amdgpu_job *job)
+{
+ int i;
+
+ dev_info(adev->dev, "Dumping IP State\n");
+ for (i = 0; i < adev->num_ip_blocks; i++)
+ if (adev->ip_blocks[i].version->funcs->dump_ip_state)
+ adev->ip_blocks[i].version->funcs
+ ->dump_ip_state((void *)adev);
+ dev_info(adev->dev, "Dumping IP State Completed\n");
+
+ amdgpu_coredump(adev, true, false, job);
+}
+
+static void amdgpu_job_core_dump(struct amdgpu_device *adev,
+ struct amdgpu_job *job)
+{
+ struct list_head device_list, *device_list_handle = NULL;
+ struct amdgpu_device *tmp_adev = NULL;
+ struct amdgpu_hive_info *hive = NULL;
+
+ if (!amdgpu_sriov_vf(adev))
+ hive = amdgpu_get_xgmi_hive(adev);
+ if (hive)
+ mutex_lock(&hive->hive_lock);
+ /*
+ * Reuse the logic in amdgpu_device_gpu_recover() to build list of
+ * devices for code dump
+ */
+ INIT_LIST_HEAD(&device_list);
+ if (!amdgpu_sriov_vf(adev) && (adev->gmc.xgmi.num_physical_nodes > 1) && hive) {
+ list_for_each_entry(tmp_adev, &hive->device_list, gmc.xgmi.head)
+ list_add_tail(&tmp_adev->reset_list, &device_list);
+ if (!list_is_first(&adev->reset_list, &device_list))
+ list_rotate_to_front(&adev->reset_list, &device_list);
+ device_list_handle = &device_list;
+ } else {
+ list_add_tail(&adev->reset_list, &device_list);
+ device_list_handle = &device_list;
+ }
+
+ /* Do the coredump for each device */
+ list_for_each_entry(tmp_adev, device_list_handle, reset_list)
+ amdgpu_job_do_core_dump(tmp_adev, job);
+
+ if (hive) {
+ mutex_unlock(&hive->hive_lock);
+ amdgpu_put_xgmi_hive(hive);
+ }
+}
static enum drm_gpu_sched_stat amdgpu_job_timedout(struct drm_sched_job *s_job)
{
@@ -48,9 +102,17 @@ static enum drm_gpu_sched_stat amdgpu_job_timedout(struct drm_sched_job *s_job)
return DRM_GPU_SCHED_STAT_ENODEV;
}
-
adev->job_hang = true;
+ /*
+ * Do the coredump immediately after a job timeout to get a very
+ * close dump/snapshot/representation of GPU's current error status
+ * Skip it for SRIOV, since VF FLR will be triggered by host driver
+ * before job timeout
+ */
+ if (!amdgpu_sriov_vf(adev))
+ amdgpu_job_core_dump(adev, job);
+
if (amdgpu_gpu_recovery &&
amdgpu_ring_soft_recovery(ring, job->vmid, s_job->s_fence->parent)) {
dev_err(adev->dev, "ring %s timeout, but soft recovered\n",
@@ -72,6 +134,26 @@ static enum drm_gpu_sched_stat amdgpu_job_timedout(struct drm_sched_job *s_job)
dma_fence_set_error(&s_job->s_fence->finished, -ETIME);
+ /* attempt a per ring reset */
+ if (amdgpu_gpu_recovery &&
+ ring->funcs->reset) {
+ /* stop the scheduler, but don't mess with the
+ * bad job yet because if ring reset fails
+ * we'll fall back to full GPU reset.
+ */
+ drm_sched_wqueue_stop(&ring->sched);
+ r = amdgpu_ring_reset(ring, job->vmid);
+ if (!r) {
+ if (amdgpu_ring_sched_ready(ring))
+ drm_sched_stop(&ring->sched, s_job);
+ atomic_inc(&ring->adev->gpu_reset_counter);
+ amdgpu_fence_driver_force_completion(ring);
+ if (amdgpu_ring_sched_ready(ring))
+ drm_sched_start(&ring->sched);
+ goto exit;
+ }
+ }
+
if (amdgpu_device_should_recover_gpu(ring->adev)) {
struct amdgpu_reset_context reset_context;
memset(&reset_context, 0, sizeof(reset_context));
@@ -81,6 +163,12 @@ static enum drm_gpu_sched_stat amdgpu_job_timedout(struct drm_sched_job *s_job)
reset_context.src = AMDGPU_RESET_SRC_JOB;
clear_bit(AMDGPU_NEED_FULL_RESET, &reset_context.flags);
+ /*
+ * To avoid an unnecessary extra coredump, as we have already
+ * got the very close representation of GPU's error status
+ */
+ set_bit(AMDGPU_SKIP_COREDUMP, &reset_context.flags);
+
r = amdgpu_device_gpu_recover(ring->adev, job, &reset_context);
if (r)
dev_err(adev->dev, "GPU Recovery Failed: %d\n", r);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_job.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_job.h
index a963a25ddd62..ce6b9ba967ff 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_job.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_job.h
@@ -76,6 +76,9 @@ struct amdgpu_job {
/* job_run_counter >= 1 means a resubmit job */
uint32_t job_run_counter;
+ /* enforce isolation */
+ bool enforce_isolation;
+
uint32_t num_ibs;
struct amdgpu_ib ibs[];
};
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c
index 66782be5917b..016a6f6c4267 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c
@@ -43,6 +43,7 @@
#include "amdgpu_gem.h"
#include "amdgpu_display.h"
#include "amdgpu_ras.h"
+#include "amdgpu_reset.h"
#include "amd_pcie.h"
void amdgpu_unregister_gpu_instance(struct amdgpu_device *adev)
@@ -778,6 +779,7 @@ int amdgpu_info_ioctl(struct drm_device *dev, void *data, struct drm_file *filp)
? -EFAULT : 0;
}
case AMDGPU_INFO_READ_MMR_REG: {
+ int ret = 0;
unsigned int n, alloc_size;
uint32_t *regs;
unsigned int se_num = (info->read_mmr_reg.instance >>
@@ -787,24 +789,37 @@ int amdgpu_info_ioctl(struct drm_device *dev, void *data, struct drm_file *filp)
AMDGPU_INFO_MMR_SH_INDEX_SHIFT) &
AMDGPU_INFO_MMR_SH_INDEX_MASK;
+ if (!down_read_trylock(&adev->reset_domain->sem))
+ return -ENOENT;
+
/* set full masks if the userspace set all bits
* in the bitfields
*/
- if (se_num == AMDGPU_INFO_MMR_SE_INDEX_MASK)
+ if (se_num == AMDGPU_INFO_MMR_SE_INDEX_MASK) {
se_num = 0xffffffff;
- else if (se_num >= AMDGPU_GFX_MAX_SE)
- return -EINVAL;
- if (sh_num == AMDGPU_INFO_MMR_SH_INDEX_MASK)
+ } else if (se_num >= AMDGPU_GFX_MAX_SE) {
+ ret = -EINVAL;
+ goto out;
+ }
+
+ if (sh_num == AMDGPU_INFO_MMR_SH_INDEX_MASK) {
sh_num = 0xffffffff;
- else if (sh_num >= AMDGPU_GFX_MAX_SH_PER_SE)
- return -EINVAL;
+ } else if (sh_num >= AMDGPU_GFX_MAX_SH_PER_SE) {
+ ret = -EINVAL;
+ goto out;
+ }
- if (info->read_mmr_reg.count > 128)
- return -EINVAL;
+ if (info->read_mmr_reg.count > 128) {
+ ret = -EINVAL;
+ goto out;
+ }
regs = kmalloc_array(info->read_mmr_reg.count, sizeof(*regs), GFP_KERNEL);
- if (!regs)
- return -ENOMEM;
+ if (!regs) {
+ ret = -ENOMEM;
+ goto out;
+ }
+
alloc_size = info->read_mmr_reg.count * sizeof(*regs);
amdgpu_gfx_off_ctrl(adev, false);
@@ -816,13 +831,17 @@ int amdgpu_info_ioctl(struct drm_device *dev, void *data, struct drm_file *filp)
info->read_mmr_reg.dword_offset + i);
kfree(regs);
amdgpu_gfx_off_ctrl(adev, true);
- return -EFAULT;
+ ret = -EFAULT;
+ goto out;
}
}
amdgpu_gfx_off_ctrl(adev, true);
n = copy_to_user(out, regs, min(size, alloc_size));
kfree(regs);
- return n ? -EFAULT : 0;
+ ret = (n ? -EFAULT : 0);
+out:
+ up_read(&adev->reset_domain->sem);
+ return ret;
}
case AMDGPU_INFO_DEV_INFO: {
struct drm_amdgpu_info_device *dev_info;
@@ -1269,23 +1288,6 @@ int amdgpu_info_ioctl(struct drm_device *dev, void *data, struct drm_file *filp)
return 0;
}
-
-/*
- * Outdated mess for old drm with Xorg being in charge (void function now).
- */
-/**
- * amdgpu_driver_lastclose_kms - drm callback for last close
- *
- * @dev: drm dev pointer
- *
- * Switch vga_switcheroo state after last close (all asics).
- */
-void amdgpu_driver_lastclose_kms(struct drm_device *dev)
-{
- drm_fb_helper_lastclose(dev);
- vga_switcheroo_process_delayed_switch();
-}
-
/**
* amdgpu_driver_open_kms - drm callback for open
*
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_mca.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_mca.c
index 2542bd7aa7c7..18ee60378727 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_mca.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_mca.c
@@ -396,7 +396,6 @@ static int amdgpu_mca_smu_parse_mca_error_count(struct amdgpu_device *adev, enum
static int amdgpu_mca_dispatch_mca_set(struct amdgpu_device *adev, enum amdgpu_ras_block blk, enum amdgpu_mca_error_type type,
struct mca_bank_set *mca_set, struct ras_err_data *err_data)
{
- struct ras_err_addr err_addr;
struct amdgpu_smuio_mcm_config_info mcm_info;
struct mca_bank_node *node, *tmp;
struct mca_bank_entry *entry;
@@ -421,27 +420,20 @@ static int amdgpu_mca_dispatch_mca_set(struct amdgpu_device *adev, enum amdgpu_r
continue;
memset(&mcm_info, 0, sizeof(mcm_info));
- memset(&err_addr, 0, sizeof(err_addr));
mcm_info.socket_id = entry->info.socket_id;
mcm_info.die_id = entry->info.aid;
- if (blk == AMDGPU_RAS_BLOCK__UMC) {
- err_addr.err_status = entry->regs[MCA_REG_IDX_STATUS];
- err_addr.err_ipid = entry->regs[MCA_REG_IDX_IPID];
- err_addr.err_addr = entry->regs[MCA_REG_IDX_ADDR];
- }
-
if (type == AMDGPU_MCA_ERROR_TYPE_UE) {
amdgpu_ras_error_statistic_ue_count(err_data,
- &mcm_info, &err_addr, (uint64_t)count);
+ &mcm_info, (uint64_t)count);
} else {
if (amdgpu_mca_is_deferred_error(adev, entry->regs[MCA_REG_IDX_STATUS]))
amdgpu_ras_error_statistic_de_count(err_data,
- &mcm_info, &err_addr, (uint64_t)count);
+ &mcm_info, (uint64_t)count);
else
amdgpu_ras_error_statistic_ce_count(err_data,
- &mcm_info, &err_addr, (uint64_t)count);
+ &mcm_info, (uint64_t)count);
}
amdgpu_mca_bank_set_remove_node(mca_set, node);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_mes.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_mes.c
index 1cb1ec7beefe..10b61ff63802 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_mes.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_mes.c
@@ -501,60 +501,50 @@ int amdgpu_mes_remove_gang(struct amdgpu_device *adev, int gang_id)
int amdgpu_mes_suspend(struct amdgpu_device *adev)
{
- struct idr *idp;
- struct amdgpu_mes_process *process;
- struct amdgpu_mes_gang *gang;
struct mes_suspend_gang_input input;
- int r, pasid;
+ int r;
+
+ if (!amdgpu_mes_suspend_resume_all_supported(adev))
+ return 0;
+
+ memset(&input, 0x0, sizeof(struct mes_suspend_gang_input));
+ input.suspend_all_gangs = 1;
/*
* Avoid taking any other locks under MES lock to avoid circular
* lock dependencies.
*/
amdgpu_mes_lock(&adev->mes);
-
- idp = &adev->mes.pasid_idr;
-
- idr_for_each_entry(idp, process, pasid) {
- list_for_each_entry(gang, &process->gang_list, list) {
- r = adev->mes.funcs->suspend_gang(&adev->mes, &input);
- if (r)
- DRM_ERROR("failed to suspend pasid %d gangid %d",
- pasid, gang->gang_id);
- }
- }
-
+ r = adev->mes.funcs->suspend_gang(&adev->mes, &input);
amdgpu_mes_unlock(&adev->mes);
- return 0;
+ if (r)
+ DRM_ERROR("failed to suspend all gangs");
+
+ return r;
}
int amdgpu_mes_resume(struct amdgpu_device *adev)
{
- struct idr *idp;
- struct amdgpu_mes_process *process;
- struct amdgpu_mes_gang *gang;
struct mes_resume_gang_input input;
- int r, pasid;
+ int r;
+
+ if (!amdgpu_mes_suspend_resume_all_supported(adev))
+ return 0;
+
+ memset(&input, 0x0, sizeof(struct mes_resume_gang_input));
+ input.resume_all_gangs = 1;
/*
* Avoid taking any other locks under MES lock to avoid circular
* lock dependencies.
*/
amdgpu_mes_lock(&adev->mes);
-
- idp = &adev->mes.pasid_idr;
-
- idr_for_each_entry(idp, process, pasid) {
- list_for_each_entry(gang, &process->gang_list, list) {
- r = adev->mes.funcs->resume_gang(&adev->mes, &input);
- if (r)
- DRM_ERROR("failed to resume pasid %d gangid %d",
- pasid, gang->gang_id);
- }
- }
-
+ r = adev->mes.funcs->resume_gang(&adev->mes, &input);
amdgpu_mes_unlock(&adev->mes);
- return 0;
+ if (r)
+ DRM_ERROR("failed to resume all gangs");
+
+ return r;
}
static int amdgpu_mes_queue_alloc_mqd(struct amdgpu_device *adev,
@@ -793,6 +783,68 @@ int amdgpu_mes_remove_hw_queue(struct amdgpu_device *adev, int queue_id)
return 0;
}
+int amdgpu_mes_reset_hw_queue(struct amdgpu_device *adev, int queue_id)
+{
+ unsigned long flags;
+ struct amdgpu_mes_queue *queue;
+ struct amdgpu_mes_gang *gang;
+ struct mes_reset_queue_input queue_input;
+ int r;
+
+ /*
+ * Avoid taking any other locks under MES lock to avoid circular
+ * lock dependencies.
+ */
+ amdgpu_mes_lock(&adev->mes);
+
+ /* remove the mes gang from idr list */
+ spin_lock_irqsave(&adev->mes.queue_id_lock, flags);
+
+ queue = idr_find(&adev->mes.queue_id_idr, queue_id);
+ if (!queue) {
+ spin_unlock_irqrestore(&adev->mes.queue_id_lock, flags);
+ amdgpu_mes_unlock(&adev->mes);
+ DRM_ERROR("queue id %d doesn't exist\n", queue_id);
+ return -EINVAL;
+ }
+ spin_unlock_irqrestore(&adev->mes.queue_id_lock, flags);
+
+ DRM_DEBUG("try to reset queue, doorbell off = 0x%llx\n",
+ queue->doorbell_off);
+
+ gang = queue->gang;
+ queue_input.doorbell_offset = queue->doorbell_off;
+ queue_input.gang_context_addr = gang->gang_ctx_gpu_addr;
+
+ r = adev->mes.funcs->reset_hw_queue(&adev->mes, &queue_input);
+ if (r)
+ DRM_ERROR("failed to reset hardware queue, queue id = %d\n",
+ queue_id);
+
+ amdgpu_mes_unlock(&adev->mes);
+
+ return 0;
+}
+
+int amdgpu_mes_reset_hw_queue_mmio(struct amdgpu_device *adev, int queue_type,
+ int me_id, int pipe_id, int queue_id, int vmid)
+{
+ struct mes_reset_queue_input queue_input;
+ int r;
+
+ queue_input.queue_type = queue_type;
+ queue_input.use_mmio = true;
+ queue_input.me_id = me_id;
+ queue_input.pipe_id = pipe_id;
+ queue_input.queue_id = queue_id;
+ queue_input.vmid = vmid;
+ r = adev->mes.funcs->reset_hw_queue(&adev->mes, &queue_input);
+ if (r)
+ DRM_ERROR("failed to reset hardware queue by mmio, queue id = %d\n",
+ queue_id);
+ return r;
+}
+
int amdgpu_mes_map_legacy_queue(struct amdgpu_device *adev,
struct amdgpu_ring *ring)
{
@@ -838,6 +890,33 @@ int amdgpu_mes_unmap_legacy_queue(struct amdgpu_device *adev,
return r;
}
+int amdgpu_mes_reset_legacy_queue(struct amdgpu_device *adev,
+ struct amdgpu_ring *ring,
+ unsigned int vmid,
+ bool use_mmio)
+{
+ struct mes_reset_legacy_queue_input queue_input;
+ int r;
+
+ memset(&queue_input, 0, sizeof(queue_input));
+
+ queue_input.queue_type = ring->funcs->type;
+ queue_input.doorbell_offset = ring->doorbell_index;
+ queue_input.me_id = ring->me;
+ queue_input.pipe_id = ring->pipe;
+ queue_input.queue_id = ring->queue;
+ queue_input.mqd_addr = amdgpu_bo_gpu_offset(ring->mqd_obj);
+ queue_input.wptr_addr = ring->wptr_gpu_addr;
+ queue_input.vmid = vmid;
+ queue_input.use_mmio = use_mmio;
+
+ r = adev->mes.funcs->reset_legacy_queue(&adev->mes, &queue_input);
+ if (r)
+ DRM_ERROR("failed to reset legacy queue\n");
+
+ return r;
+}
+
uint32_t amdgpu_mes_rreg(struct amdgpu_device *adev, uint32_t reg)
{
struct mes_misc_op_input op_input;
@@ -1533,7 +1612,7 @@ int amdgpu_mes_init_microcode(struct amdgpu_device *adev, int pipe)
pipe == AMDGPU_MES_SCHED_PIPE ? "" : "1");
}
- r = amdgpu_ucode_request(adev, &adev->mes.fw[pipe], fw_name);
+ r = amdgpu_ucode_request(adev, &adev->mes.fw[pipe], "%s", fw_name);
if (r && need_retry && pipe == AMDGPU_MES_SCHED_PIPE) {
dev_info(adev->dev, "try to fall back to %s_mes.bin\n", ucode_prefix);
r = amdgpu_ucode_request(adev, &adev->mes.fw[pipe],
@@ -1584,6 +1663,19 @@ out:
return r;
}
+bool amdgpu_mes_suspend_resume_all_supported(struct amdgpu_device *adev)
+{
+ uint32_t mes_rev = adev->mes.sched_version & AMDGPU_MES_VERSION_MASK;
+ bool is_supported = false;
+
+ if (amdgpu_ip_version(adev, GC_HWIP, 0) >= IP_VERSION(11, 0, 0) &&
+ amdgpu_ip_version(adev, GC_HWIP, 0) < IP_VERSION(12, 0, 0) &&
+ mes_rev >= 0x63)
+ is_supported = true;
+
+ return is_supported;
+}
+
#if defined(CONFIG_DEBUG_FS)
static int amdgpu_debugfs_mes_event_log_show(struct seq_file *m, void *unused)
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_mes.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_mes.h
index bcce1add4ef6..96788c0f42f1 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_mes.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_mes.h
@@ -249,6 +249,18 @@ struct mes_remove_queue_input {
uint64_t gang_context_addr;
};
+struct mes_reset_queue_input {
+ uint32_t doorbell_offset;
+ uint64_t gang_context_addr;
+ bool use_mmio;
+ uint32_t queue_type;
+ uint32_t me_id;
+ uint32_t pipe_id;
+ uint32_t queue_id;
+ uint32_t xcc_id;
+ uint32_t vmid;
+};
+
struct mes_map_legacy_queue_input {
uint32_t queue_type;
uint32_t doorbell_offset;
@@ -280,6 +292,18 @@ struct mes_resume_gang_input {
uint64_t gang_context_addr;
};
+struct mes_reset_legacy_queue_input {
+ uint32_t queue_type;
+ uint32_t doorbell_offset;
+ bool use_mmio;
+ uint32_t me_id;
+ uint32_t pipe_id;
+ uint32_t queue_id;
+ uint64_t mqd_addr;
+ uint64_t wptr_addr;
+ uint32_t vmid;
+};
+
enum mes_misc_opcode {
MES_MISC_OP_WRITE_REG,
MES_MISC_OP_READ_REG,
@@ -348,6 +372,12 @@ struct amdgpu_mes_funcs {
int (*misc_op)(struct amdgpu_mes *mes,
struct mes_misc_op_input *input);
+
+ int (*reset_legacy_queue)(struct amdgpu_mes *mes,
+ struct mes_reset_legacy_queue_input *input);
+
+ int (*reset_hw_queue)(struct amdgpu_mes *mes,
+ struct mes_reset_queue_input *input);
};
#define amdgpu_mes_kiq_hw_init(adev) (adev)->mes.kiq_hw_init((adev))
@@ -375,6 +405,9 @@ int amdgpu_mes_add_hw_queue(struct amdgpu_device *adev, int gang_id,
struct amdgpu_mes_queue_properties *qprops,
int *queue_id);
int amdgpu_mes_remove_hw_queue(struct amdgpu_device *adev, int queue_id);
+int amdgpu_mes_reset_hw_queue(struct amdgpu_device *adev, int queue_id);
+int amdgpu_mes_reset_hw_queue_mmio(struct amdgpu_device *adev, int queue_type,
+ int me_id, int pipe_id, int queue_id, int vmid);
int amdgpu_mes_map_legacy_queue(struct amdgpu_device *adev,
struct amdgpu_ring *ring);
@@ -382,6 +415,10 @@ int amdgpu_mes_unmap_legacy_queue(struct amdgpu_device *adev,
struct amdgpu_ring *ring,
enum amdgpu_unmap_queues_action action,
u64 gpu_addr, u64 seq);
+int amdgpu_mes_reset_legacy_queue(struct amdgpu_device *adev,
+ struct amdgpu_ring *ring,
+ unsigned int vmid,
+ bool use_mmio);
uint32_t amdgpu_mes_rreg(struct amdgpu_device *adev, uint32_t reg);
int amdgpu_mes_wreg(struct amdgpu_device *adev,
@@ -479,4 +516,6 @@ static inline void amdgpu_mes_unlock(struct amdgpu_mes *mes)
memalloc_noreclaim_restore(mes->saved_flags);
mutex_unlock(&mes->mutex_hidden);
}
+
+bool amdgpu_mes_suspend_resume_all_supported(struct amdgpu_device *adev);
#endif /* __AMDGPU_MES_H__ */
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_mmhub.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_mmhub.h
index 95d676ee207f..1ca9d4ed8063 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_mmhub.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_mmhub.h
@@ -63,8 +63,6 @@ struct amdgpu_mmhub_funcs {
uint64_t page_table_base);
void (*update_power_gating)(struct amdgpu_device *adev,
bool enable);
- bool (*query_utcl2_poison_status)(struct amdgpu_device *adev,
- int hub_inst);
};
struct amdgpu_mmhub {
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_mode.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_mode.h
index d002b845d8ac..5e3faefc5510 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_mode.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_mode.h
@@ -51,6 +51,7 @@ struct amdgpu_encoder;
struct amdgpu_router;
struct amdgpu_hpd;
struct edid;
+struct drm_edid;
#define to_amdgpu_crtc(x) container_of(x, struct amdgpu_crtc, base)
#define to_amdgpu_connector(x) container_of(x, struct amdgpu_connector, base)
@@ -326,8 +327,7 @@ struct amdgpu_mode_info {
/* FMT dithering */
struct drm_property *dither_property;
/* hardcoded DFP edid from BIOS */
- struct edid *bios_hardcoded_edid;
- int bios_hardcoded_edid_size;
+ const struct drm_edid *bios_hardcoded_edid;
/* firmware flags */
u32 firmware_flags;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
index e32161f6b67a..44819cdba7fb 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
@@ -77,24 +77,6 @@ static void amdgpu_bo_user_destroy(struct ttm_buffer_object *tbo)
amdgpu_bo_destroy(tbo);
}
-static void amdgpu_bo_vm_destroy(struct ttm_buffer_object *tbo)
-{
- struct amdgpu_device *adev = amdgpu_ttm_adev(tbo->bdev);
- struct amdgpu_bo *shadow_bo = ttm_to_amdgpu_bo(tbo), *bo;
- struct amdgpu_bo_vm *vmbo;
-
- bo = shadow_bo->parent;
- vmbo = to_amdgpu_bo_vm(bo);
- /* in case amdgpu_device_recover_vram got NULL of bo->parent */
- if (!list_empty(&vmbo->shadow_list)) {
- mutex_lock(&adev->shadow_list_lock);
- list_del_init(&vmbo->shadow_list);
- mutex_unlock(&adev->shadow_list_lock);
- }
-
- amdgpu_bo_destroy(tbo);
-}
-
/**
* amdgpu_bo_is_amdgpu_bo - check if the buffer object is an &amdgpu_bo
* @bo: buffer object to be checked
@@ -108,8 +90,7 @@ static void amdgpu_bo_vm_destroy(struct ttm_buffer_object *tbo)
bool amdgpu_bo_is_amdgpu_bo(struct ttm_buffer_object *bo)
{
if (bo->destroy == &amdgpu_bo_destroy ||
- bo->destroy == &amdgpu_bo_user_destroy ||
- bo->destroy == &amdgpu_bo_vm_destroy)
+ bo->destroy == &amdgpu_bo_user_destroy)
return true;
return false;
@@ -583,6 +564,7 @@ int amdgpu_bo_create(struct amdgpu_device *adev,
if (bo == NULL)
return -ENOMEM;
drm_gem_private_object_init(adev_to_drm(adev), &bo->tbo.base, size);
+ bo->tbo.base.funcs = &amdgpu_gem_object_funcs;
bo->vm_bo = NULL;
bo->preferred_domains = bp->preferred_domain ? bp->preferred_domain :
bp->domain;
@@ -723,52 +705,6 @@ int amdgpu_bo_create_vm(struct amdgpu_device *adev,
}
/**
- * amdgpu_bo_add_to_shadow_list - add a BO to the shadow list
- *
- * @vmbo: BO that will be inserted into the shadow list
- *
- * Insert a BO to the shadow list.
- */
-void amdgpu_bo_add_to_shadow_list(struct amdgpu_bo_vm *vmbo)
-{
- struct amdgpu_device *adev = amdgpu_ttm_adev(vmbo->bo.tbo.bdev);
-
- mutex_lock(&adev->shadow_list_lock);
- list_add_tail(&vmbo->shadow_list, &adev->shadow_list);
- vmbo->shadow->parent = amdgpu_bo_ref(&vmbo->bo);
- vmbo->shadow->tbo.destroy = &amdgpu_bo_vm_destroy;
- mutex_unlock(&adev->shadow_list_lock);
-}
-
-/**
- * amdgpu_bo_restore_shadow - restore an &amdgpu_bo shadow
- *
- * @shadow: &amdgpu_bo shadow to be restored
- * @fence: dma_fence associated with the operation
- *
- * Copies a buffer object's shadow content back to the object.
- * This is used for recovering a buffer from its shadow in case of a gpu
- * reset where vram context may be lost.
- *
- * Returns:
- * 0 for success or a negative error code on failure.
- */
-int amdgpu_bo_restore_shadow(struct amdgpu_bo *shadow, struct dma_fence **fence)
-
-{
- struct amdgpu_device *adev = amdgpu_ttm_adev(shadow->tbo.bdev);
- struct amdgpu_ring *ring = adev->mman.buffer_funcs_ring;
- uint64_t shadow_addr, parent_addr;
-
- shadow_addr = amdgpu_bo_gpu_offset(shadow);
- parent_addr = amdgpu_bo_gpu_offset(shadow->parent);
-
- return amdgpu_copy_buffer(ring, shadow_addr, parent_addr,
- amdgpu_bo_size(shadow), NULL, fence,
- true, false, 0);
-}
-
-/**
* amdgpu_bo_kmap - map an &amdgpu_bo buffer object
* @bo: &amdgpu_bo buffer object to be mapped
* @ptr: kernel virtual address to be returned
@@ -851,7 +787,7 @@ struct amdgpu_bo *amdgpu_bo_ref(struct amdgpu_bo *bo)
if (bo == NULL)
return NULL;
- ttm_bo_get(&bo->tbo);
+ drm_gem_object_get(&bo->tbo.base);
return bo;
}
@@ -863,40 +799,30 @@ struct amdgpu_bo *amdgpu_bo_ref(struct amdgpu_bo *bo)
*/
void amdgpu_bo_unref(struct amdgpu_bo **bo)
{
- struct ttm_buffer_object *tbo;
-
if ((*bo) == NULL)
return;
- tbo = &((*bo)->tbo);
- ttm_bo_put(tbo);
+ drm_gem_object_put(&(*bo)->tbo.base);
*bo = NULL;
}
/**
- * amdgpu_bo_pin_restricted - pin an &amdgpu_bo buffer object
+ * amdgpu_bo_pin - pin an &amdgpu_bo buffer object
* @bo: &amdgpu_bo buffer object to be pinned
* @domain: domain to be pinned to
- * @min_offset: the start of requested address range
- * @max_offset: the end of requested address range
*
- * Pins the buffer object according to requested domain and address range. If
- * the memory is unbound gart memory, binds the pages into gart table. Adjusts
- * pin_count and pin_size accordingly.
+ * Pins the buffer object according to requested domain. If the memory is
+ * unbound gart memory, binds the pages into gart table. Adjusts pin_count and
+ * pin_size accordingly.
*
* Pinning means to lock pages in memory along with keeping them at a fixed
* offset. It is required when a buffer can not be moved, for example, when
* a display buffer is being scanned out.
*
- * Compared with amdgpu_bo_pin(), this function gives more flexibility on
- * where to pin a buffer if there are specific restrictions on where a buffer
- * must be located.
- *
* Returns:
* 0 for success or a negative error code on failure.
*/
-int amdgpu_bo_pin_restricted(struct amdgpu_bo *bo, u32 domain,
- u64 min_offset, u64 max_offset)
+int amdgpu_bo_pin(struct amdgpu_bo *bo, u32 domain)
{
struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev);
struct ttm_operation_ctx ctx = { false, false };
@@ -905,9 +831,6 @@ int amdgpu_bo_pin_restricted(struct amdgpu_bo *bo, u32 domain,
if (amdgpu_ttm_tt_get_usermm(bo->tbo.ttm))
return -EPERM;
- if (WARN_ON_ONCE(min_offset > max_offset))
- return -EINVAL;
-
/* Check domain to be pinned to against preferred domains */
if (bo->preferred_domains & domain)
domain = bo->preferred_domains & domain;
@@ -933,14 +856,6 @@ int amdgpu_bo_pin_restricted(struct amdgpu_bo *bo, u32 domain,
return -EINVAL;
ttm_bo_pin(&bo->tbo);
-
- if (max_offset != 0) {
- u64 domain_start = amdgpu_ttm_domain_start(adev,
- mem_type);
- WARN_ON_ONCE(max_offset <
- (amdgpu_bo_gpu_offset(bo) - domain_start));
- }
-
return 0;
}
@@ -957,17 +872,6 @@ int amdgpu_bo_pin_restricted(struct amdgpu_bo *bo, u32 domain,
bo->flags |= AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED;
amdgpu_bo_placement_from_domain(bo, domain);
for (i = 0; i < bo->placement.num_placement; i++) {
- unsigned int fpfn, lpfn;
-
- fpfn = min_offset >> PAGE_SHIFT;
- lpfn = max_offset >> PAGE_SHIFT;
-
- if (fpfn > bo->placements[i].fpfn)
- bo->placements[i].fpfn = fpfn;
- if (!bo->placements[i].lpfn ||
- (lpfn && lpfn < bo->placements[i].lpfn))
- bo->placements[i].lpfn = lpfn;
-
if (bo->flags & AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS &&
bo->placements[i].mem_type == TTM_PL_VRAM)
bo->placements[i].flags |= TTM_PL_FLAG_CONTIGUOUS;
@@ -994,24 +898,6 @@ error:
}
/**
- * amdgpu_bo_pin - pin an &amdgpu_bo buffer object
- * @bo: &amdgpu_bo buffer object to be pinned
- * @domain: domain to be pinned to
- *
- * A simple wrapper to amdgpu_bo_pin_restricted().
- * Provides a simpler API for buffers that do not have any strict restrictions
- * on where a buffer must be located.
- *
- * Returns:
- * 0 for success or a negative error code on failure.
- */
-int amdgpu_bo_pin(struct amdgpu_bo *bo, u32 domain)
-{
- bo->flags |= AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS;
- return amdgpu_bo_pin_restricted(bo, domain, 0, 0);
-}
-
-/**
* amdgpu_bo_unpin - unpin an &amdgpu_bo buffer object
* @bo: &amdgpu_bo buffer object to be unpinned
*
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.h
index bc42ccbde659..717e47b46167 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.h
@@ -90,6 +90,12 @@ struct amdgpu_bo_va {
bool cleared;
bool is_xgmi;
+
+ /*
+ * protected by vm reservation lock
+ * if non-zero, cannot unmap from GPU because user queues may still access it
+ */
+ unsigned int queue_refcount;
};
struct amdgpu_bo {
@@ -130,8 +136,6 @@ struct amdgpu_bo_user {
struct amdgpu_bo_vm {
struct amdgpu_bo bo;
- struct amdgpu_bo *shadow;
- struct list_head shadow_list;
struct amdgpu_vm_bo_base entries[];
};
@@ -269,22 +273,6 @@ static inline bool amdgpu_bo_encrypted(struct amdgpu_bo *bo)
return bo->flags & AMDGPU_GEM_CREATE_ENCRYPTED;
}
-/**
- * amdgpu_bo_shadowed - check if the BO is shadowed
- *
- * @bo: BO to be tested.
- *
- * Returns:
- * NULL if not shadowed or else return a BO pointer.
- */
-static inline struct amdgpu_bo *amdgpu_bo_shadowed(struct amdgpu_bo *bo)
-{
- if (bo->tbo.type == ttm_bo_type_kernel)
- return to_amdgpu_bo_vm(bo)->shadow;
-
- return NULL;
-}
-
bool amdgpu_bo_is_amdgpu_bo(struct ttm_buffer_object *bo);
void amdgpu_bo_placement_from_domain(struct amdgpu_bo *abo, u32 domain);
@@ -316,8 +304,6 @@ void amdgpu_bo_kunmap(struct amdgpu_bo *bo);
struct amdgpu_bo *amdgpu_bo_ref(struct amdgpu_bo *bo);
void amdgpu_bo_unref(struct amdgpu_bo **bo);
int amdgpu_bo_pin(struct amdgpu_bo *bo, u32 domain);
-int amdgpu_bo_pin_restricted(struct amdgpu_bo *bo, u32 domain,
- u64 min_offset, u64 max_offset);
void amdgpu_bo_unpin(struct amdgpu_bo *bo);
int amdgpu_bo_init(struct amdgpu_device *adev);
void amdgpu_bo_fini(struct amdgpu_device *adev);
@@ -343,9 +329,6 @@ u64 amdgpu_bo_gpu_offset(struct amdgpu_bo *bo);
u64 amdgpu_bo_gpu_offset_no_check(struct amdgpu_bo *bo);
void amdgpu_bo_get_memory(struct amdgpu_bo *bo,
struct amdgpu_mem_stats *stats);
-void amdgpu_bo_add_to_shadow_list(struct amdgpu_bo_vm *vmbo);
-int amdgpu_bo_restore_shadow(struct amdgpu_bo *shadow,
- struct dma_fence **fence);
uint32_t amdgpu_bo_get_preferred_domain(struct amdgpu_device *adev,
uint32_t domain);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_pll.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_pll.c
index 0bb2466d539a..675aa138ea11 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_pll.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_pll.c
@@ -94,7 +94,7 @@ static void amdgpu_pll_get_fb_ref_div(struct amdgpu_device *adev, unsigned int n
ref_div_max = min(128 / post_div, ref_div_max);
/* get matching reference and feedback divider */
- *ref_div = min(max(DIV_ROUND_CLOSEST(den, post_div), 1u), ref_div_max);
+ *ref_div = clamp(DIV_ROUND_CLOSEST(den, post_div), 1u, ref_div_max);
*fb_div = DIV_ROUND_CLOSEST(nom * *ref_div * post_div, den);
/* limit fb divider to its maximum */
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c
index 189574d53ebd..0b28b2cf1517 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c
@@ -2853,7 +2853,7 @@ static int psp_load_non_psp_fw(struct psp_context *psp)
if (ret)
return ret;
- /* Start rlc autoload after psp recieved all the gfx firmware */
+ /* Start rlc autoload after psp received all the gfx firmware */
if (psp->autoload_supported && ucode->ucode_id == (amdgpu_sriov_vf(adev) ?
adev->virt.autoload_ucode_id : AMDGPU_UCODE_ID_RLC_G)) {
ret = psp_rlc_autoload_start(psp);
@@ -3425,9 +3425,11 @@ int psp_init_sos_microcode(struct psp_context *psp, const char *chip_name)
const struct psp_firmware_header_v1_2 *sos_hdr_v1_2;
const struct psp_firmware_header_v1_3 *sos_hdr_v1_3;
const struct psp_firmware_header_v2_0 *sos_hdr_v2_0;
- int err = 0;
+ const struct psp_firmware_header_v2_1 *sos_hdr_v2_1;
+ int fw_index, fw_bin_count, start_index = 0;
+ const struct psp_fw_bin_desc *fw_bin;
uint8_t *ucode_array_start_addr;
- int fw_index = 0;
+ int err = 0;
err = amdgpu_ucode_request(adev, &adev->psp.sos_fw, "amdgpu/%s_sos.bin", chip_name);
if (err)
@@ -3478,15 +3480,30 @@ int psp_init_sos_microcode(struct psp_context *psp, const char *chip_name)
case 2:
sos_hdr_v2_0 = (const struct psp_firmware_header_v2_0 *)adev->psp.sos_fw->data;
- if (le32_to_cpu(sos_hdr_v2_0->psp_fw_bin_count) >= UCODE_MAX_PSP_PACKAGING) {
+ fw_bin_count = le32_to_cpu(sos_hdr_v2_0->psp_fw_bin_count);
+
+ if (fw_bin_count >= UCODE_MAX_PSP_PACKAGING) {
dev_err(adev->dev, "packed SOS count exceeds maximum limit\n");
err = -EINVAL;
goto out;
}
- for (fw_index = 0; fw_index < le32_to_cpu(sos_hdr_v2_0->psp_fw_bin_count); fw_index++) {
- err = parse_sos_bin_descriptor(psp,
- &sos_hdr_v2_0->psp_fw_bin[fw_index],
+ if (sos_hdr_v2_0->header.header_version_minor == 1) {
+ sos_hdr_v2_1 = (const struct psp_firmware_header_v2_1 *)adev->psp.sos_fw->data;
+
+ fw_bin = sos_hdr_v2_1->psp_fw_bin;
+
+ if (psp_is_aux_sos_load_required(psp))
+ start_index = le32_to_cpu(sos_hdr_v2_1->psp_aux_fw_bin_index);
+ else
+ fw_bin_count -= le32_to_cpu(sos_hdr_v2_1->psp_aux_fw_bin_index);
+
+ } else {
+ fw_bin = sos_hdr_v2_0->psp_fw_bin;
+ }
+
+ for (fw_index = start_index; fw_index < fw_bin_count; fw_index++) {
+ err = parse_sos_bin_descriptor(psp, fw_bin + fw_index,
sos_hdr_v2_0);
if (err)
goto out;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.h
index 74a96516c913..e8abbbcb4326 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.h
@@ -138,6 +138,7 @@ struct psp_funcs {
int (*vbflash_stat)(struct psp_context *psp);
int (*fatal_error_recovery_quirk)(struct psp_context *psp);
bool (*get_ras_capability)(struct psp_context *psp);
+ bool (*is_aux_sos_load_required)(struct psp_context *psp);
};
struct ta_funcs {
@@ -464,6 +465,9 @@ struct amdgpu_psp_funcs {
((psp)->funcs->fatal_error_recovery_quirk ? \
(psp)->funcs->fatal_error_recovery_quirk((psp)) : 0)
+#define psp_is_aux_sos_load_required(psp) \
+ ((psp)->funcs->is_aux_sos_load_required ? (psp)->funcs->is_aux_sos_load_required((psp)) : 0)
+
extern const struct amd_ip_funcs psp_ip_funcs;
extern const struct amdgpu_ip_block_version psp_v3_1_ip_block;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c
index d0307c55da50..1a1395c5fff1 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c
@@ -882,7 +882,7 @@ int amdgpu_ras_feature_enable_on_boot(struct amdgpu_device *adev,
if (ret)
return ret;
- /* gfx block ras dsiable cmd must send to ras-ta */
+ /* gfx block ras disable cmd must send to ras-ta */
if (head->block == AMDGPU_RAS_BLOCK__GFX)
con->features |= BIT(head->block);
@@ -1223,11 +1223,11 @@ static void amdgpu_rasmgr_error_data_statistic_update(struct ras_manager *obj, s
for_each_ras_error(err_node, err_data) {
err_info = &err_node->err_info;
amdgpu_ras_error_statistic_de_count(&obj->err_data,
- &err_info->mcm_info, NULL, err_info->de_count);
+ &err_info->mcm_info, err_info->de_count);
amdgpu_ras_error_statistic_ce_count(&obj->err_data,
- &err_info->mcm_info, NULL, err_info->ce_count);
+ &err_info->mcm_info, err_info->ce_count);
amdgpu_ras_error_statistic_ue_count(&obj->err_data,
- &err_info->mcm_info, NULL, err_info->ue_count);
+ &err_info->mcm_info, err_info->ue_count);
}
} else {
/* for legacy asic path which doesn't has error source info */
@@ -2153,7 +2153,7 @@ static void amdgpu_ras_interrupt_poison_consumption_handler(struct ras_manager *
/* gpu reset is fallback for failed and default cases.
* For RMA case, amdgpu_umc_poison_handler will handle gpu reset.
*/
- if (poison_stat && !con->is_rma) {
+ if (poison_stat && !amdgpu_ras_is_rma(adev)) {
event_id = amdgpu_ras_acquire_event_id(adev, type);
RAS_EVENT_LOG(adev, event_id,
"GPU reset for %s RAS poison consumption is issued!\n",
@@ -2881,9 +2881,6 @@ static void amdgpu_ras_ecc_log_init(struct ras_ecc_log_info *ecc_log)
{
mutex_init(&ecc_log->lock);
- /* Set any value as siphash key */
- memset(&ecc_log->ecc_key, 0xad, sizeof(ecc_log->ecc_key));
-
INIT_RADIX_TREE(&ecc_log->de_page_tree, GFP_KERNEL);
ecc_log->de_queried_count = 0;
ecc_log->prev_de_queried_count = 0;
@@ -2948,7 +2945,7 @@ static void amdgpu_ras_do_page_retirement(struct work_struct *work)
amdgpu_ras_error_data_fini(&err_data);
- if (err_cnt && con->is_rma)
+ if (err_cnt && amdgpu_ras_is_rma(adev))
amdgpu_ras_reset_gpu(adev);
amdgpu_ras_schedule_retirement_dwork(con,
@@ -3049,7 +3046,7 @@ static int amdgpu_ras_poison_consumption_handler(struct amdgpu_device *adev,
}
/* for RMA, amdgpu_ras_poison_creation_handler will trigger gpu reset */
- if (reset_flags && !con->is_rma) {
+ if (reset_flags && !amdgpu_ras_is_rma(adev)) {
if (reset_flags & AMDGPU_RAS_GPU_RESET_MODE1_RESET)
reset = AMDGPU_RAS_GPU_RESET_MODE1_RESET;
else if (reset_flags & AMDGPU_RAS_GPU_RESET_MODE2_RESET)
@@ -3195,7 +3192,7 @@ int amdgpu_ras_recovery_init(struct amdgpu_device *adev)
* This calling fails when is_rma is true or
* ret != 0.
*/
- if (con->is_rma || ret)
+ if (amdgpu_ras_is_rma(adev) || ret)
goto free;
if (con->eeprom_control.ras_num_recs) {
@@ -3244,7 +3241,7 @@ out:
* Except error threshold exceeding case, other failure cases in this
* function would not fail amdgpu driver init.
*/
- if (!con->is_rma)
+ if (!amdgpu_ras_is_rma(adev))
ret = 0;
else
ret = -EINVAL;
@@ -3471,6 +3468,11 @@ init_ras_enabled_flag:
/* aca is disabled by default */
adev->aca.is_enabled = false;
+
+ /* bad page feature is not applicable to specific app platform */
+ if (adev->gmc.is_app_apu &&
+ amdgpu_ip_version(adev, UMC_HWIP, 0) == IP_VERSION(12, 0, 0))
+ amdgpu_bad_page_threshold = 0;
}
static void amdgpu_ras_counte_dw(struct work_struct *work)
@@ -4287,7 +4289,7 @@ int amdgpu_ras_reset_gpu(struct amdgpu_device *adev)
struct amdgpu_ras *ras = amdgpu_ras_get_context(adev);
/* mode1 is the only selection for RMA status */
- if (ras->is_rma) {
+ if (amdgpu_ras_is_rma(adev)) {
ras->gpu_reset_flags = 0;
ras->gpu_reset_flags |= AMDGPU_RAS_GPU_RESET_MODE1_RESET;
}
@@ -4611,8 +4613,6 @@ static struct ras_err_info *amdgpu_ras_error_get_info(struct ras_err_data *err_d
if (!err_node)
return NULL;
- INIT_LIST_HEAD(&err_node->err_info.err_addr_list);
-
memcpy(&err_node->err_info.mcm_info, mcm_info, sizeof(*mcm_info));
err_data->err_list_count++;
@@ -4622,21 +4622,9 @@ static struct ras_err_info *amdgpu_ras_error_get_info(struct ras_err_data *err_d
return &err_node->err_info;
}
-void amdgpu_ras_add_mca_err_addr(struct ras_err_info *err_info, struct ras_err_addr *err_addr)
-{
- /* This function will be retired. */
- return;
-}
-
-void amdgpu_ras_del_mca_err_addr(struct ras_err_info *err_info, struct ras_err_addr *mca_err_addr)
-{
- list_del(&mca_err_addr->node);
- kfree(mca_err_addr);
-}
-
int amdgpu_ras_error_statistic_ue_count(struct ras_err_data *err_data,
- struct amdgpu_smuio_mcm_config_info *mcm_info,
- struct ras_err_addr *err_addr, u64 count)
+ struct amdgpu_smuio_mcm_config_info *mcm_info,
+ u64 count)
{
struct ras_err_info *err_info;
@@ -4650,9 +4638,6 @@ int amdgpu_ras_error_statistic_ue_count(struct ras_err_data *err_data,
if (!err_info)
return -EINVAL;
- if (err_addr && err_addr->err_status)
- amdgpu_ras_add_mca_err_addr(err_info, err_addr);
-
err_info->ue_count += count;
err_data->ue_count += count;
@@ -4660,8 +4645,8 @@ int amdgpu_ras_error_statistic_ue_count(struct ras_err_data *err_data,
}
int amdgpu_ras_error_statistic_ce_count(struct ras_err_data *err_data,
- struct amdgpu_smuio_mcm_config_info *mcm_info,
- struct ras_err_addr *err_addr, u64 count)
+ struct amdgpu_smuio_mcm_config_info *mcm_info,
+ u64 count)
{
struct ras_err_info *err_info;
@@ -4682,8 +4667,8 @@ int amdgpu_ras_error_statistic_ce_count(struct ras_err_data *err_data,
}
int amdgpu_ras_error_statistic_de_count(struct ras_err_data *err_data,
- struct amdgpu_smuio_mcm_config_info *mcm_info,
- struct ras_err_addr *err_addr, u64 count)
+ struct amdgpu_smuio_mcm_config_info *mcm_info,
+ u64 count)
{
struct ras_err_info *err_info;
@@ -4697,9 +4682,6 @@ int amdgpu_ras_error_statistic_de_count(struct ras_err_data *err_data,
if (!err_info)
return -EINVAL;
- if (err_addr && err_addr->err_status)
- amdgpu_ras_add_mca_err_addr(err_info, err_addr);
-
err_info->de_count += count;
err_data->de_count += count;
@@ -4771,6 +4753,16 @@ static void amdgpu_ras_boot_time_error_reporting(struct amdgpu_device *adev,
dev_info(adev->dev,
"socket: %d, aid: %d, hbm: %d, fw_status: 0x%x, hbm bist test failed\n",
socket_id, aid_id, hbm_id, fw_status);
+
+ if (AMDGPU_RAS_GPU_ERR_DATA_ABORT(boot_error))
+ dev_info(adev->dev,
+ "socket: %d, aid: %d, fw_status: 0x%x, data abort exception\n",
+ socket_id, aid_id, fw_status);
+
+ if (AMDGPU_RAS_GPU_ERR_UNKNOWN(boot_error))
+ dev_info(adev->dev,
+ "socket: %d, aid: %d, fw_status: 0x%x, unknown boot time errors\n",
+ socket_id, aid_id, fw_status);
}
static bool amdgpu_ras_boot_error_detected(struct amdgpu_device *adev,
@@ -4837,3 +4829,13 @@ void amdgpu_ras_event_log_print(struct amdgpu_device *adev, u64 event_id,
va_end(args);
}
+
+bool amdgpu_ras_is_rma(struct amdgpu_device *adev)
+{
+ struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
+
+ if (!con)
+ return false;
+
+ return con->is_rma;
+}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.h
index dcf1f3dbb5c4..669720a9c60a 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.h
@@ -28,7 +28,6 @@
#include <linux/list.h>
#include <linux/kfifo.h>
#include <linux/radix-tree.h>
-#include <linux/siphash.h>
#include "ta_ras_if.h"
#include "amdgpu_ras_eeprom.h"
#include "amdgpu_smuio.h"
@@ -47,6 +46,8 @@ struct amdgpu_iv_entry;
#define AMDGPU_RAS_GPU_ERR_SOCKET_ID(x) AMDGPU_GET_REG_FIELD(x, 10, 8)
#define AMDGPU_RAS_GPU_ERR_AID_ID(x) AMDGPU_GET_REG_FIELD(x, 12, 11)
#define AMDGPU_RAS_GPU_ERR_HBM_ID(x) AMDGPU_GET_REG_FIELD(x, 14, 13)
+#define AMDGPU_RAS_GPU_ERR_DATA_ABORT(x) AMDGPU_GET_REG_FIELD(x, 29, 29)
+#define AMDGPU_RAS_GPU_ERR_UNKNOWN(x) AMDGPU_GET_REG_FIELD(x, 30, 30)
#define AMDGPU_RAS_BOOT_STATUS_POLLING_LIMIT 100
#define AMDGPU_RAS_BOOT_STEADY_STATUS 0xBA
@@ -476,16 +477,15 @@ struct ras_err_pages {
};
struct ras_ecc_err {
- u64 hash_index;
uint64_t status;
uint64_t ipid;
uint64_t addr;
+ uint64_t pa_pfn;
struct ras_err_pages err_pages;
};
struct ras_ecc_log_info {
struct mutex lock;
- siphash_key_t ecc_key;
struct radix_tree_root de_page_tree;
uint64_t de_queried_count;
uint64_t prev_de_queried_count;
@@ -572,19 +572,11 @@ struct ras_fs_data {
char debugfs_name[32];
};
-struct ras_err_addr {
- struct list_head node;
- uint64_t err_status;
- uint64_t err_ipid;
- uint64_t err_addr;
-};
-
struct ras_err_info {
struct amdgpu_smuio_mcm_config_info mcm_info;
u64 ce_count;
u64 ue_count;
u64 de_count;
- struct list_head err_addr_list;
};
struct ras_err_node {
@@ -941,14 +933,14 @@ void amdgpu_ras_inst_reset_ras_error_count(struct amdgpu_device *adev,
int amdgpu_ras_error_data_init(struct ras_err_data *err_data);
void amdgpu_ras_error_data_fini(struct ras_err_data *err_data);
int amdgpu_ras_error_statistic_ce_count(struct ras_err_data *err_data,
- struct amdgpu_smuio_mcm_config_info *mcm_info,
- struct ras_err_addr *err_addr, u64 count);
+ struct amdgpu_smuio_mcm_config_info *mcm_info,
+ u64 count);
int amdgpu_ras_error_statistic_ue_count(struct ras_err_data *err_data,
- struct amdgpu_smuio_mcm_config_info *mcm_info,
- struct ras_err_addr *err_addr, u64 count);
+ struct amdgpu_smuio_mcm_config_info *mcm_info,
+ u64 count);
int amdgpu_ras_error_statistic_de_count(struct ras_err_data *err_data,
- struct amdgpu_smuio_mcm_config_info *mcm_info,
- struct ras_err_addr *err_addr, u64 count);
+ struct amdgpu_smuio_mcm_config_info *mcm_info,
+ u64 count);
void amdgpu_ras_query_boot_status(struct amdgpu_device *adev, u32 num_instances);
int amdgpu_ras_bind_aca(struct amdgpu_device *adev, enum amdgpu_ras_block blk,
const struct aca_info *aca_info, void *data);
@@ -957,12 +949,6 @@ int amdgpu_ras_unbind_aca(struct amdgpu_device *adev, enum amdgpu_ras_block blk)
ssize_t amdgpu_ras_aca_sysfs_read(struct device *dev, struct device_attribute *attr,
struct aca_handle *handle, char *buf, void *data);
-void amdgpu_ras_add_mca_err_addr(struct ras_err_info *err_info,
- struct ras_err_addr *err_addr);
-
-void amdgpu_ras_del_mca_err_addr(struct ras_err_info *err_info,
- struct ras_err_addr *mca_err_addr);
-
void amdgpu_ras_set_fed(struct amdgpu_device *adev, bool status);
bool amdgpu_ras_get_fed_status(struct amdgpu_device *adev);
@@ -982,4 +968,5 @@ __printf(3, 4)
void amdgpu_ras_event_log_print(struct amdgpu_device *adev, u64 event_id,
const char *fmt, ...);
+bool amdgpu_ras_is_rma(struct amdgpu_device *adev);
#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ras_eeprom.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ras_eeprom.c
index aab8077e5098..f28f6b4ba765 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ras_eeprom.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ras_eeprom.c
@@ -58,7 +58,7 @@
#define EEPROM_I2C_MADDR_4 0x40000
/*
- * The 2 macros bellow represent the actual size in bytes that
+ * The 2 macros below represent the actual size in bytes that
* those entities occupy in the EEPROM memory.
* RAS_TABLE_RECORD_SIZE is different than sizeof(eeprom_table_record) which
* uses uint64 to store 6b fields such as retired_page.
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_reset.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_reset.h
index 4ae581f3fcb5..1cb920abc2fe 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_reset.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_reset.h
@@ -136,6 +136,12 @@ static inline bool amdgpu_reset_domain_schedule(struct amdgpu_reset_domain *doma
return queue_work(domain->wq, work);
}
+static inline bool amdgpu_reset_pending(struct amdgpu_reset_domain *domain)
+{
+ lockdep_assert_held(&domain->sem);
+ return rwsem_is_contended(&domain->sem);
+}
+
void amdgpu_device_lock_reset_domain(struct amdgpu_reset_domain *reset_domain);
void amdgpu_device_unlock_reset_domain(struct amdgpu_reset_domain *reset_domain);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c
index e6344a6b0a9f..690976665cf6 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c
@@ -144,8 +144,10 @@ void amdgpu_ring_commit(struct amdgpu_ring *ring)
/* We pad to match fetch size */
count = ring->funcs->align_mask + 1 -
(ring->wptr & ring->funcs->align_mask);
- count %= ring->funcs->align_mask + 1;
- ring->funcs->insert_nop(ring, count);
+ count &= ring->funcs->align_mask;
+
+ if (count != 0)
+ ring->funcs->insert_nop(ring, count);
mb();
amdgpu_ring_set_wptr(ring);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.h
index 582053f1cd56..f93f51002201 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.h
@@ -235,6 +235,8 @@ struct amdgpu_ring_funcs {
void (*patch_cntl)(struct amdgpu_ring *ring, unsigned offset);
void (*patch_ce)(struct amdgpu_ring *ring, unsigned offset);
void (*patch_de)(struct amdgpu_ring *ring, unsigned offset);
+ int (*reset)(struct amdgpu_ring *ring, unsigned int vmid);
+ void (*emit_cleaner_shader)(struct amdgpu_ring *ring);
};
struct amdgpu_ring {
@@ -334,6 +336,7 @@ struct amdgpu_ring {
#define amdgpu_ring_patch_cntl(r, o) ((r)->funcs->patch_cntl((r), (o)))
#define amdgpu_ring_patch_ce(r, o) ((r)->funcs->patch_ce((r), (o)))
#define amdgpu_ring_patch_de(r, o) ((r)->funcs->patch_de((r), (o)))
+#define amdgpu_ring_reset(r, v) (r)->funcs->reset((r), (v))
unsigned int amdgpu_ring_max_ibs(enum amdgpu_ring_type type);
int amdgpu_ring_alloc(struct amdgpu_ring *ring, unsigned ndw);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ring_mux.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ring_mux.c
index d234b7ccfaaf..1c66da1c3fb4 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ring_mux.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ring_mux.c
@@ -410,7 +410,7 @@ void amdgpu_sw_ring_ib_end(struct amdgpu_ring *ring)
struct amdgpu_ring_mux *mux = &adev->gfx.muxer;
WARN_ON(!ring->is_sw_ring);
- if (ring->hw_prio > AMDGPU_RING_PRIO_DEFAULT)
+ if (adev->gfx.mcbp && ring->hw_prio > AMDGPU_RING_PRIO_DEFAULT)
return;
amdgpu_ring_mux_end_ib(mux, ring);
}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_sched.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_sched.c
index 863b2a34b2d6..b0a8abc7a8ec 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_sched.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_sched.c
@@ -22,7 +22,6 @@
* Authors: Andres Rodriguez <andresx7@gmail.com>
*/
-#include <linux/fdtable.h>
#include <linux/file.h>
#include <linux/pid.h>
@@ -43,10 +42,10 @@ static int amdgpu_sched_process_priority_override(struct amdgpu_device *adev,
uint32_t id;
int r;
- if (!f.file)
+ if (!fd_file(f))
return -EINVAL;
- r = amdgpu_file_to_fpriv(f.file, &fpriv);
+ r = amdgpu_file_to_fpriv(fd_file(f), &fpriv);
if (r) {
fdput(f);
return r;
@@ -72,10 +71,10 @@ static int amdgpu_sched_context_priority_override(struct amdgpu_device *adev,
struct amdgpu_ctx *ctx;
int r;
- if (!f.file)
+ if (!fd_file(f))
return -EINVAL;
- r = amdgpu_file_to_fpriv(f.file, &fpriv);
+ r = amdgpu_file_to_fpriv(fd_file(f), &fpriv);
if (r) {
fdput(f);
return r;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_sdma.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_sdma.h
index d3706a484870..087ce0f6fa07 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_sdma.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_sdma.h
@@ -115,6 +115,7 @@ struct amdgpu_sdma {
bool has_page_queue;
struct ras_common_if *ras_if;
struct amdgpu_sdma_ras *ras;
+ uint32_t *ip_dump;
};
/*
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_sync.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_sync.c
index bdf1ef825d89..c586ab4c911b 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_sync.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_sync.c
@@ -260,6 +260,36 @@ int amdgpu_sync_resv(struct amdgpu_device *adev, struct amdgpu_sync *sync,
return 0;
}
+/**
+ * amdgpu_sync_kfd - sync to KFD fences
+ *
+ * @sync: sync object to add KFD fences to
+ * @resv: reservation object with KFD fences
+ *
+ * Extract all KFD fences and add them to the sync object.
+ */
+int amdgpu_sync_kfd(struct amdgpu_sync *sync, struct dma_resv *resv)
+{
+ struct dma_resv_iter cursor;
+ struct dma_fence *f;
+ int r = 0;
+
+ dma_resv_iter_begin(&cursor, resv, DMA_RESV_USAGE_BOOKKEEP);
+ dma_resv_for_each_fence_unlocked(&cursor, f) {
+ void *fence_owner = amdgpu_sync_get_owner(f);
+
+ if (fence_owner != AMDGPU_FENCE_OWNER_KFD)
+ continue;
+
+ r = amdgpu_sync_fence(sync, f);
+ if (r)
+ break;
+ }
+ dma_resv_iter_end(&cursor);
+
+ return r;
+}
+
/* Free the entry back to the slab */
static void amdgpu_sync_entry_free(struct amdgpu_sync_entry *e)
{
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_sync.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_sync.h
index cf1e9e858efd..e3272dce798d 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_sync.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_sync.h
@@ -51,6 +51,7 @@ int amdgpu_sync_fence(struct amdgpu_sync *sync, struct dma_fence *f);
int amdgpu_sync_resv(struct amdgpu_device *adev, struct amdgpu_sync *sync,
struct dma_resv *resv, enum amdgpu_sync_mode mode,
void *owner);
+int amdgpu_sync_kfd(struct amdgpu_sync *sync, struct dma_resv *resv);
struct dma_fence *amdgpu_sync_peek_fence(struct amdgpu_sync *sync,
struct amdgpu_ring *ring);
struct dma_fence *amdgpu_sync_get_fence(struct amdgpu_sync *sync);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
index b8bc7fa8c375..74adb983ab03 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
@@ -1970,7 +1970,7 @@ int amdgpu_ttm_init(struct amdgpu_device *adev)
DRM_INFO("amdgpu: %uM of GTT memory ready.\n",
(unsigned int)(gtt_size / (1024 * 1024)));
- /* Initiailize doorbell pool on PCI BAR */
+ /* Initialize doorbell pool on PCI BAR */
r = amdgpu_ttm_init_on_chip(adev, AMDGPU_PL_DOORBELL, adev->doorbell.size / PAGE_SIZE);
if (r) {
DRM_ERROR("Failed initializing doorbell heap.\n");
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.h
index 5bc37acd3981..4e23419b92d4 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.h
@@ -136,6 +136,14 @@ struct psp_firmware_header_v2_0 {
struct psp_fw_bin_desc psp_fw_bin[];
};
+/* version_major=2, version_minor=1 */
+struct psp_firmware_header_v2_1 {
+ struct common_firmware_header header;
+ uint32_t psp_fw_bin_count;
+ uint32_t psp_aux_fw_bin_index;
+ struct psp_fw_bin_desc psp_fw_bin[];
+};
+
/* version_major=1, version_minor=0 */
struct ta_firmware_header_v1_0 {
struct common_firmware_header header;
@@ -426,6 +434,7 @@ union amdgpu_firmware_header {
struct psp_firmware_header_v1_1 psp_v1_1;
struct psp_firmware_header_v1_3 psp_v1_3;
struct psp_firmware_header_v2_0 psp_v2_0;
+ struct psp_firmware_header_v2_0 psp_v2_1;
struct ta_firmware_header_v1_0 ta;
struct ta_firmware_header_v2_0 ta_v2_0;
struct gfx_firmware_header_v1_0 gfx;
@@ -447,7 +456,7 @@ union amdgpu_firmware_header {
uint8_t raw[0x100];
};
-#define UCODE_MAX_PSP_PACKAGING ((sizeof(union amdgpu_firmware_header) - sizeof(struct common_firmware_header) - 4) / sizeof(struct psp_fw_bin_desc))
+#define UCODE_MAX_PSP_PACKAGING (((sizeof(union amdgpu_firmware_header) - sizeof(struct common_firmware_header) - 4) / sizeof(struct psp_fw_bin_desc)) * 2)
/*
* fw loading support
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_umc.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_umc.c
index 2f84bdb8c594..bb7b9b2eaac1 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_umc.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_umc.c
@@ -196,7 +196,7 @@ static int amdgpu_umc_do_page_retirement(struct amdgpu_device *adev,
amdgpu_umc_handle_bad_pages(adev, ras_error_status);
if ((err_data->ue_count || err_data->de_count) &&
- (reset || (con && con->is_rma))) {
+ (reset || amdgpu_ras_is_rma(adev))) {
con->gpu_reset_flags |= reset;
amdgpu_ras_reset_gpu(adev);
}
@@ -204,55 +204,6 @@ static int amdgpu_umc_do_page_retirement(struct amdgpu_device *adev,
return AMDGPU_RAS_SUCCESS;
}
-int amdgpu_umc_bad_page_polling_timeout(struct amdgpu_device *adev,
- uint32_t reset, uint32_t timeout_ms)
-{
- struct ras_err_data err_data;
- struct ras_common_if head = {
- .block = AMDGPU_RAS_BLOCK__UMC,
- };
- struct ras_manager *obj = amdgpu_ras_find_obj(adev, &head);
- struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
- uint32_t timeout = timeout_ms;
-
- memset(&err_data, 0, sizeof(err_data));
- amdgpu_ras_error_data_init(&err_data);
-
- do {
-
- amdgpu_umc_handle_bad_pages(adev, &err_data);
-
- if (timeout && !err_data.de_count) {
- msleep(1);
- timeout--;
- }
-
- } while (timeout && !err_data.de_count);
-
- if (!timeout)
- dev_warn(adev->dev, "Can't find bad pages\n");
-
- if (err_data.de_count)
- dev_info(adev->dev, "%ld new deferred hardware errors detected\n", err_data.de_count);
-
- if (obj) {
- obj->err_data.ue_count += err_data.ue_count;
- obj->err_data.ce_count += err_data.ce_count;
- obj->err_data.de_count += err_data.de_count;
- }
-
- amdgpu_ras_error_data_fini(&err_data);
-
- kgd2kfd_set_sram_ecc_flag(adev->kfd.dev);
-
- if (reset || (err_data.err_addr_cnt && con && con->is_rma)) {
- con->gpu_reset_flags |= reset;
- amdgpu_ras_reset_gpu(adev);
- }
-
- return 0;
-}
-
int amdgpu_umc_pasid_poison_handler(struct amdgpu_device *adev,
enum amdgpu_ras_block block, uint16_t pasid,
pasid_notify pasid_fn, void *data, uint32_t reset)
@@ -472,43 +423,6 @@ int amdgpu_umc_update_ecc_status(struct amdgpu_device *adev,
return 0;
}
-static int amdgpu_umc_uint64_cmp(const void *a, const void *b)
-{
- uint64_t *addr_a = (uint64_t *)a;
- uint64_t *addr_b = (uint64_t *)b;
-
- if (*addr_a > *addr_b)
- return 1;
- else if (*addr_a < *addr_b)
- return -1;
- else
- return 0;
-}
-
-/* Use string hash to avoid logging the same bad pages repeatedly */
-int amdgpu_umc_build_pages_hash(struct amdgpu_device *adev,
- uint64_t *pfns, int len, uint64_t *val)
-{
- struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
- char buf[MAX_UMC_HASH_STRING_SIZE] = {0};
- int offset = 0, i = 0;
- uint64_t hash_val;
-
- if (!pfns || !len)
- return -EINVAL;
-
- sort(pfns, len, sizeof(uint64_t), amdgpu_umc_uint64_cmp, NULL);
-
- for (i = 0; i < len; i++)
- offset += snprintf(&buf[offset], sizeof(buf) - offset, "%llx", pfns[i]);
-
- hash_val = siphash(buf, offset, &con->umc_ecc_log.ecc_key);
-
- *val = hash_val;
-
- return 0;
-}
-
int amdgpu_umc_logs_ecc_err(struct amdgpu_device *adev,
struct radix_tree_root *ecc_tree, struct ras_ecc_err *ecc_err)
{
@@ -519,18 +433,10 @@ int amdgpu_umc_logs_ecc_err(struct amdgpu_device *adev,
ecc_log = &con->umc_ecc_log;
mutex_lock(&ecc_log->lock);
- ret = radix_tree_insert(ecc_tree, ecc_err->hash_index, ecc_err);
- if (!ret) {
- struct ras_err_pages *err_pages = &ecc_err->err_pages;
- int i;
-
- /* Reserve memory */
- for (i = 0; i < err_pages->count; i++)
- amdgpu_ras_reserve_page(adev, err_pages->pfn[i]);
-
+ ret = radix_tree_insert(ecc_tree, ecc_err->pa_pfn, ecc_err);
+ if (!ret)
radix_tree_tag_set(ecc_tree,
- ecc_err->hash_index, UMC_ECC_NEW_DETECTED_TAG);
- }
+ ecc_err->pa_pfn, UMC_ECC_NEW_DETECTED_TAG);
mutex_unlock(&ecc_log->lock);
return ret;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_umc.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_umc.h
index 5f50c69c3cec..ce4179db2a6d 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_umc.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_umc.h
@@ -127,13 +127,8 @@ int amdgpu_umc_page_retirement_mca(struct amdgpu_device *adev,
int amdgpu_umc_loop_channels(struct amdgpu_device *adev,
umc_func func, void *data);
-int amdgpu_umc_bad_page_polling_timeout(struct amdgpu_device *adev,
- uint32_t reset, uint32_t timeout_ms);
-
int amdgpu_umc_update_ecc_status(struct amdgpu_device *adev,
uint64_t status, uint64_t ipid, uint64_t addr);
-int amdgpu_umc_build_pages_hash(struct amdgpu_device *adev,
- uint64_t *pfns, int len, uint64_t *val);
int amdgpu_umc_logs_ecc_err(struct amdgpu_device *adev,
struct radix_tree_root *ecc_tree, struct ras_ecc_err *ecc_err);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_umsch_mm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_umsch_mm.c
index fbc2852278e1..6162582d0aa2 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_umsch_mm.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_umsch_mm.c
@@ -587,7 +587,7 @@ int amdgpu_umsch_mm_init_microcode(struct amdgpu_umsch_mm *umsch)
break;
}
- r = amdgpu_ucode_request(adev, &adev->umsch_mm.fw, fw_name);
+ r = amdgpu_ucode_request(adev, &adev->umsch_mm.fw, "%s", fw_name);
if (r) {
release_firmware(adev->umsch_mm.fw);
adev->umsch_mm.fw = NULL;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c
index 07d930339b07..31fd30dcd593 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c
@@ -260,7 +260,7 @@ int amdgpu_uvd_sw_init(struct amdgpu_device *adev)
return -EINVAL;
}
- r = amdgpu_ucode_request(adev, &adev->uvd.fw, fw_name);
+ r = amdgpu_ucode_request(adev, &adev->uvd.fw, "%s", fw_name);
if (r) {
dev_err(adev->dev, "amdgpu_uvd: Can't validate firmware \"%s\"\n",
fw_name);
@@ -1088,7 +1088,6 @@ int amdgpu_uvd_ring_parse_cs(struct amdgpu_cs_parser *parser,
int r;
job->vm = NULL;
- ib->gpu_addr = amdgpu_sa_bo_gpu_addr(ib->sa_bo);
if (ib->length_dw % 16) {
DRM_ERROR("UVD IB length (%d) not 16 dwords aligned!\n",
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c
index 968ca2c84ef7..74fdbf71d95b 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c
@@ -158,7 +158,7 @@ int amdgpu_vce_sw_init(struct amdgpu_device *adev, unsigned long size)
return -EINVAL;
}
- r = amdgpu_ucode_request(adev, &adev->vce.fw, fw_name);
+ r = amdgpu_ucode_request(adev, &adev->vce.fw, "%s", fw_name);
if (r) {
dev_err(adev->dev, "amdgpu_vce: Can't validate firmware \"%s\"\n",
fw_name);
@@ -749,7 +749,6 @@ int amdgpu_vce_ring_parse_cs(struct amdgpu_cs_parser *p,
int i, r = 0;
job->vm = NULL;
- ib->gpu_addr = amdgpu_sa_bo_gpu_addr(ib->sa_bo);
for (idx = 0; idx < ib->length_dw;) {
uint32_t len = amdgpu_ib_get_value(ib, idx);
@@ -1044,7 +1043,6 @@ out:
if (!r) {
/* No error, free all destroyed handle slots */
tmp = destroyed;
- amdgpu_ib_free(p->adev, ib, NULL);
} else {
/* Error during parsing, free all allocated handle slots */
tmp = allocated;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.h
index c87d68d4be53..2a1f3dbb14d3 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.h
@@ -330,6 +330,9 @@ struct amdgpu_vcn {
uint16_t inst_mask;
uint8_t num_inst_per_aid;
bool using_unified_queue;
+
+ /* IP reg dump */
+ uint32_t *ip_dump;
};
struct amdgpu_fw_shared_rb_ptrs_struct {
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c
index b287a82e6177..b6397d3229e1 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c
@@ -33,6 +33,7 @@
#include "amdgpu.h"
#include "amdgpu_ras.h"
#include "amdgpu_reset.h"
+#include "amdgpu_dpm.h"
#include "vi.h"
#include "soc15.h"
#include "nv.h"
@@ -849,6 +850,13 @@ enum amdgpu_sriov_vf_mode amdgpu_virt_get_sriov_vf_mode(struct amdgpu_device *ad
return mode;
}
+void amdgpu_virt_pre_reset(struct amdgpu_device *adev)
+{
+ /* stop the data exchange thread */
+ amdgpu_virt_fini_data_exchange(adev);
+ amdgpu_dpm_set_mp1_state(adev, PP_MP1_STATE_FLR);
+}
+
void amdgpu_virt_post_reset(struct amdgpu_device *adev)
{
if (amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(11, 0, 3)) {
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.h
index b42a8854dca0..b650a2032c42 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.h
@@ -376,6 +376,7 @@ u32 amdgpu_sriov_rreg(struct amdgpu_device *adev,
u32 offset, u32 acc_flags, u32 hwip, u32 xcc_id);
bool amdgpu_virt_fw_load_skip_check(struct amdgpu_device *adev,
uint32_t ucode_id);
+void amdgpu_virt_pre_reset(struct amdgpu_device *adev);
void amdgpu_virt_post_reset(struct amdgpu_device *adev);
bool amdgpu_sriov_xnack_support(struct amdgpu_device *adev);
bool amdgpu_virt_get_rlcg_reg_access_flag(struct amdgpu_device *adev,
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vkms.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vkms.c
index 6415d0d039e1..d4c2afafbb73 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vkms.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vkms.c
@@ -338,6 +338,7 @@ static int amdgpu_vkms_prepare_fb(struct drm_plane *plane,
else
domain = AMDGPU_GEM_DOMAIN_VRAM;
+ rbo->flags |= AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS;
r = amdgpu_bo_pin(rbo, domain);
if (unlikely(r != 0)) {
if (r != -ERESTARTSYS)
@@ -549,7 +550,7 @@ static int amdgpu_vkms_sw_fini(void *handle)
adev->mode_info.mode_config_initialized = false;
- kfree(adev->mode_info.bios_hardcoded_edid);
+ drm_edid_free(adev->mode_info.bios_hardcoded_edid);
kfree(adev->amdgpu_vkms_output);
return 0;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
index a060c28f0877..6005280f5f38 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
@@ -465,7 +465,6 @@ int amdgpu_vm_validate(struct amdgpu_device *adev, struct amdgpu_vm *vm,
{
uint64_t new_vm_generation = amdgpu_vm_generation(adev, vm);
struct amdgpu_vm_bo_base *bo_base;
- struct amdgpu_bo *shadow;
struct amdgpu_bo *bo;
int r;
@@ -486,16 +485,10 @@ int amdgpu_vm_validate(struct amdgpu_device *adev, struct amdgpu_vm *vm,
spin_unlock(&vm->status_lock);
bo = bo_base->bo;
- shadow = amdgpu_bo_shadowed(bo);
r = validate(param, bo);
if (r)
return r;
- if (shadow) {
- r = validate(param, shadow);
- if (r)
- return r;
- }
if (bo->tbo.type != ttm_bo_type_kernel) {
amdgpu_vm_bo_moved(bo_base);
@@ -681,6 +674,11 @@ int amdgpu_vm_flush(struct amdgpu_ring *ring, struct amdgpu_job *job,
pasid_mapping_needed &= adev->gmc.gmc_funcs->emit_pasid_mapping &&
ring->funcs->emit_wreg;
+ if (adev->gfx.enable_cleaner_shader &&
+ ring->funcs->emit_cleaner_shader &&
+ job->enforce_isolation)
+ ring->funcs->emit_cleaner_shader(ring);
+
if (!vm_flush_needed && !gds_switch_needed && !need_pipe_sync)
return 0;
@@ -742,6 +740,7 @@ int amdgpu_vm_flush(struct amdgpu_ring *ring, struct amdgpu_job *job,
amdgpu_ring_emit_switch_buffer(ring);
amdgpu_ring_emit_switch_buffer(ring);
}
+
amdgpu_ring_ib_end(ring);
return 0;
}
@@ -838,7 +837,7 @@ int amdgpu_vm_update_pdes(struct amdgpu_device *adev,
params.vm = vm;
params.immediate = immediate;
- r = vm->update_funcs->prepare(&params, NULL, AMDGPU_SYNC_EXPLICIT);
+ r = vm->update_funcs->prepare(&params, NULL);
if (r)
goto error;
@@ -902,10 +901,12 @@ amdgpu_vm_tlb_flush(struct amdgpu_vm_update_params *params,
{
struct amdgpu_vm *vm = params->vm;
- if (!fence || !*fence)
+ tlb_cb->vm = vm;
+ if (!fence || !*fence) {
+ amdgpu_vm_tlb_seq_cb(NULL, &tlb_cb->cb);
return;
+ }
- tlb_cb->vm = vm;
if (!dma_fence_add_callback(*fence, &tlb_cb->cb,
amdgpu_vm_tlb_seq_cb)) {
dma_fence_put(vm->last_tlb_flush);
@@ -933,7 +934,7 @@ amdgpu_vm_tlb_flush(struct amdgpu_vm_update_params *params,
* @unlocked: unlocked invalidation during MM callback
* @flush_tlb: trigger tlb invalidation after update completed
* @allow_override: change MTYPE for local NUMA nodes
- * @resv: fences we need to sync to
+ * @sync: fences we need to sync to
* @start: start of mapped range
* @last: last mapped entry
* @flags: flags for the entries
@@ -949,16 +950,16 @@ amdgpu_vm_tlb_flush(struct amdgpu_vm_update_params *params,
* 0 for success, negative erro code for failure.
*/
int amdgpu_vm_update_range(struct amdgpu_device *adev, struct amdgpu_vm *vm,
- bool immediate, bool unlocked, bool flush_tlb, bool allow_override,
- struct dma_resv *resv, uint64_t start, uint64_t last,
- uint64_t flags, uint64_t offset, uint64_t vram_base,
+ bool immediate, bool unlocked, bool flush_tlb,
+ bool allow_override, struct amdgpu_sync *sync,
+ uint64_t start, uint64_t last, uint64_t flags,
+ uint64_t offset, uint64_t vram_base,
struct ttm_resource *res, dma_addr_t *pages_addr,
struct dma_fence **fence)
{
struct amdgpu_vm_tlb_seq_struct *tlb_cb;
struct amdgpu_vm_update_params params;
struct amdgpu_res_cursor cursor;
- enum amdgpu_sync_mode sync_mode;
int r, idx;
if (!drm_dev_enter(adev_to_drm(adev), &idx))
@@ -991,14 +992,6 @@ int amdgpu_vm_update_range(struct amdgpu_device *adev, struct amdgpu_vm *vm,
params.allow_override = allow_override;
INIT_LIST_HEAD(&params.tlb_flush_waitlist);
- /* Implicitly sync to command submissions in the same VM before
- * unmapping. Sync to moving fences before mapping.
- */
- if (!(flags & AMDGPU_PTE_VALID))
- sync_mode = AMDGPU_SYNC_EQ_OWNER;
- else
- sync_mode = AMDGPU_SYNC_EXPLICIT;
-
amdgpu_vm_eviction_lock(vm);
if (vm->evicting) {
r = -EBUSY;
@@ -1013,7 +1006,7 @@ int amdgpu_vm_update_range(struct amdgpu_device *adev, struct amdgpu_vm *vm,
dma_fence_put(tmp);
}
- r = vm->update_funcs->prepare(&params, resv, sync_mode);
+ r = vm->update_funcs->prepare(&params, sync);
if (r)
goto error_free;
@@ -1155,23 +1148,36 @@ int amdgpu_vm_bo_update(struct amdgpu_device *adev, struct amdgpu_bo_va *bo_va,
struct amdgpu_bo *bo = bo_va->base.bo;
struct amdgpu_vm *vm = bo_va->base.vm;
struct amdgpu_bo_va_mapping *mapping;
+ struct dma_fence **last_update;
dma_addr_t *pages_addr = NULL;
struct ttm_resource *mem;
- struct dma_fence **last_update;
+ struct amdgpu_sync sync;
bool flush_tlb = clear;
- bool uncached;
- struct dma_resv *resv;
uint64_t vram_base;
uint64_t flags;
+ bool uncached;
int r;
+ amdgpu_sync_create(&sync);
if (clear || !bo) {
mem = NULL;
- resv = vm->root.bo->tbo.base.resv;
+
+ /* Implicitly sync to command submissions in the same VM before
+ * unmapping.
+ */
+ r = amdgpu_sync_resv(adev, &sync, vm->root.bo->tbo.base.resv,
+ AMDGPU_SYNC_EQ_OWNER, vm);
+ if (r)
+ goto error_free;
+ if (bo) {
+ r = amdgpu_sync_kfd(&sync, bo->tbo.base.resv);
+ if (r)
+ goto error_free;
+ }
+
} else {
struct drm_gem_object *obj = &bo->tbo.base;
- resv = bo->tbo.base.resv;
if (obj->import_attach && bo_va->is_xgmi) {
struct dma_buf *dma_buf = obj->import_attach->dmabuf;
struct drm_gem_object *gobj = dma_buf->priv;
@@ -1185,6 +1191,12 @@ int amdgpu_vm_bo_update(struct amdgpu_device *adev, struct amdgpu_bo_va *bo_va,
if (mem && (mem->mem_type == TTM_PL_TT ||
mem->mem_type == AMDGPU_PL_PREEMPT))
pages_addr = bo->tbo.ttm->dma_address;
+
+ /* Implicitly sync to moving fences before mapping anything */
+ r = amdgpu_sync_resv(adev, &sync, bo->tbo.base.resv,
+ AMDGPU_SYNC_EXPLICIT, vm);
+ if (r)
+ goto error_free;
}
if (bo) {
@@ -1234,12 +1246,12 @@ int amdgpu_vm_bo_update(struct amdgpu_device *adev, struct amdgpu_bo_va *bo_va,
trace_amdgpu_vm_bo_update(mapping);
r = amdgpu_vm_update_range(adev, vm, false, false, flush_tlb,
- !uncached, resv, mapping->start, mapping->last,
- update_flags, mapping->offset,
- vram_base, mem, pages_addr,
- last_update);
+ !uncached, &sync, mapping->start,
+ mapping->last, update_flags,
+ mapping->offset, vram_base, mem,
+ pages_addr, last_update);
if (r)
- return r;
+ goto error_free;
}
/* If the BO is not in its preferred location add it back to
@@ -1267,7 +1279,9 @@ int amdgpu_vm_bo_update(struct amdgpu_device *adev, struct amdgpu_bo_va *bo_va,
trace_amdgpu_vm_bo_mapping(mapping);
}
- return 0;
+error_free:
+ amdgpu_sync_free(&sync);
+ return r;
}
/**
@@ -1414,25 +1428,34 @@ int amdgpu_vm_clear_freed(struct amdgpu_device *adev,
struct amdgpu_vm *vm,
struct dma_fence **fence)
{
- struct dma_resv *resv = vm->root.bo->tbo.base.resv;
struct amdgpu_bo_va_mapping *mapping;
- uint64_t init_pte_value = 0;
struct dma_fence *f = NULL;
+ struct amdgpu_sync sync;
int r;
+
+ /*
+ * Implicitly sync to command submissions in the same VM before
+ * unmapping.
+ */
+ amdgpu_sync_create(&sync);
+ r = amdgpu_sync_resv(adev, &sync, vm->root.bo->tbo.base.resv,
+ AMDGPU_SYNC_EQ_OWNER, vm);
+ if (r)
+ goto error_free;
+
while (!list_empty(&vm->freed)) {
mapping = list_first_entry(&vm->freed,
struct amdgpu_bo_va_mapping, list);
list_del(&mapping->list);
r = amdgpu_vm_update_range(adev, vm, false, false, true, false,
- resv, mapping->start, mapping->last,
- init_pte_value, 0, 0, NULL, NULL,
- &f);
+ &sync, mapping->start, mapping->last,
+ 0, 0, 0, NULL, NULL, &f);
amdgpu_vm_free_mapping(adev, vm, mapping, f);
if (r) {
dma_fence_put(f);
- return r;
+ goto error_free;
}
}
@@ -1443,7 +1466,9 @@ int amdgpu_vm_clear_freed(struct amdgpu_device *adev,
dma_fence_put(f);
}
- return 0;
+error_free:
+ amdgpu_sync_free(&sync);
+ return r;
}
@@ -2123,10 +2148,6 @@ void amdgpu_vm_bo_invalidate(struct amdgpu_device *adev,
{
struct amdgpu_vm_bo_base *bo_base;
- /* shadow bo doesn't have bo base, its validation needs its parent */
- if (bo->parent && (amdgpu_bo_shadowed(bo->parent) == bo))
- bo = bo->parent;
-
for (bo_base = bo->vm_bo; bo_base; bo_base = bo_base->next) {
struct amdgpu_vm *vm = bo_base->vm;
@@ -2218,7 +2239,7 @@ void amdgpu_vm_adjust_size(struct amdgpu_device *adev, uint32_t min_vm_size,
phys_ram_gb = ((uint64_t)si.totalram * si.mem_unit +
(1 << 30) - 1) >> 30;
vm_size = roundup_pow_of_two(
- min(max(phys_ram_gb * 3, min_vm_size), max_size));
+ clamp(phys_ram_gb * 3, min_vm_size, max_size));
}
adev->vm_manager.max_pfn = (uint64_t)vm_size << 18;
@@ -2421,6 +2442,8 @@ int amdgpu_vm_init(struct amdgpu_device *adev, struct amdgpu_vm *vm,
if (r)
return r;
+ ttm_lru_bulk_move_init(&vm->lru_bulk_move);
+
vm->is_compute_context = false;
vm->use_cpu_for_update = !!(adev->vm_manager.vm_update_mode &
@@ -2454,7 +2477,6 @@ int amdgpu_vm_init(struct amdgpu_device *adev, struct amdgpu_vm *vm,
root_bo = amdgpu_bo_ref(&root->bo);
r = amdgpu_bo_reserve(root_bo, true);
if (r) {
- amdgpu_bo_unref(&root->shadow);
amdgpu_bo_unref(&root_bo);
goto error_free_delayed;
}
@@ -2485,6 +2507,7 @@ error_free_root:
error_free_delayed:
dma_fence_put(vm->last_tlb_flush);
dma_fence_put(vm->last_unlocked);
+ ttm_lru_bulk_move_fini(&adev->mman.bdev, &vm->lru_bulk_move);
amdgpu_vm_fini_entities(vm);
return r;
@@ -2546,11 +2569,6 @@ int amdgpu_vm_make_compute(struct amdgpu_device *adev, struct amdgpu_vm *vm)
vm->last_update = dma_fence_get_stub();
vm->is_compute_context = true;
- /* Free the shadow bo for compute VM */
- amdgpu_bo_unref(&to_amdgpu_bo_vm(vm->root.bo)->shadow);
-
- goto unreserve_bo;
-
unreserve_bo:
amdgpu_bo_unreserve(vm->root.bo);
return r;
@@ -2641,6 +2659,7 @@ void amdgpu_vm_fini(struct amdgpu_device *adev, struct amdgpu_vm *vm)
}
}
+ ttm_lru_bulk_move_fini(&adev->mman.bdev, &vm->lru_bulk_move);
}
/**
@@ -2754,6 +2773,7 @@ int amdgpu_vm_ioctl(struct drm_device *dev, void *data, struct drm_file *filp)
* amdgpu_vm_handle_fault - graceful handling of VM faults.
* @adev: amdgpu device pointer
* @pasid: PASID of the VM
+ * @ts: Timestamp of the fault
* @vmid: VMID, only used for GFX 9.4.3.
* @node_id: Node_id received in IH cookie. Only applicable for
* GFX 9.4.3.
@@ -2764,7 +2784,7 @@ int amdgpu_vm_ioctl(struct drm_device *dev, void *data, struct drm_file *filp)
* shouldn't be reported any more.
*/
bool amdgpu_vm_handle_fault(struct amdgpu_device *adev, u32 pasid,
- u32 vmid, u32 node_id, uint64_t addr,
+ u32 vmid, u32 node_id, uint64_t addr, uint64_t ts,
bool write_fault)
{
bool is_compute_context = false;
@@ -2790,7 +2810,7 @@ bool amdgpu_vm_handle_fault(struct amdgpu_device *adev, u32 pasid,
addr /= AMDGPU_GPU_PAGE_SIZE;
if (is_compute_context && !svm_range_restore_pages(adev, pasid, vmid,
- node_id, addr, write_fault)) {
+ node_id, addr, ts, write_fault)) {
amdgpu_bo_unref(&root);
return true;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h
index 046949c4b695..52dd7cdfdc81 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h
@@ -304,8 +304,8 @@ struct amdgpu_vm_update_params {
struct amdgpu_vm_update_funcs {
int (*map_table)(struct amdgpu_bo_vm *bo);
- int (*prepare)(struct amdgpu_vm_update_params *p, struct dma_resv *resv,
- enum amdgpu_sync_mode sync_mode);
+ int (*prepare)(struct amdgpu_vm_update_params *p,
+ struct amdgpu_sync *sync);
int (*update)(struct amdgpu_vm_update_params *p,
struct amdgpu_bo_vm *bo, uint64_t pe, uint64_t addr,
unsigned count, uint32_t incr, uint64_t flags);
@@ -505,9 +505,10 @@ int amdgpu_vm_flush_compute_tlb(struct amdgpu_device *adev,
void amdgpu_vm_bo_base_init(struct amdgpu_vm_bo_base *base,
struct amdgpu_vm *vm, struct amdgpu_bo *bo);
int amdgpu_vm_update_range(struct amdgpu_device *adev, struct amdgpu_vm *vm,
- bool immediate, bool unlocked, bool flush_tlb, bool allow_override,
- struct dma_resv *resv, uint64_t start, uint64_t last,
- uint64_t flags, uint64_t offset, uint64_t vram_base,
+ bool immediate, bool unlocked, bool flush_tlb,
+ bool allow_override, struct amdgpu_sync *sync,
+ uint64_t start, uint64_t last, uint64_t flags,
+ uint64_t offset, uint64_t vram_base,
struct ttm_resource *res, dma_addr_t *pages_addr,
struct dma_fence **fence);
int amdgpu_vm_bo_update(struct amdgpu_device *adev,
@@ -558,7 +559,7 @@ amdgpu_vm_get_task_info_vm(struct amdgpu_vm *vm);
void amdgpu_vm_put_task_info(struct amdgpu_task_info *task_info);
bool amdgpu_vm_handle_fault(struct amdgpu_device *adev, u32 pasid,
- u32 vmid, u32 node_id, uint64_t addr,
+ u32 vmid, u32 node_id, uint64_t addr, uint64_t ts,
bool write_fault);
void amdgpu_vm_set_task_info(struct amdgpu_vm *vm);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm_cpu.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm_cpu.c
index 3895bd7d176a..0c1ef5850a5e 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm_cpu.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm_cpu.c
@@ -39,20 +39,18 @@ static int amdgpu_vm_cpu_map_table(struct amdgpu_bo_vm *table)
* amdgpu_vm_cpu_prepare - prepare page table update with the CPU
*
* @p: see amdgpu_vm_update_params definition
- * @resv: reservation object with embedded fence
- * @sync_mode: synchronization mode
+ * @sync: sync obj with fences to wait on
*
* Returns:
* Negativ errno, 0 for success.
*/
static int amdgpu_vm_cpu_prepare(struct amdgpu_vm_update_params *p,
- struct dma_resv *resv,
- enum amdgpu_sync_mode sync_mode)
+ struct amdgpu_sync *sync)
{
- if (!resv)
+ if (!sync)
return 0;
- return amdgpu_bo_sync_wait_resv(p->adev, resv, sync_mode, p->vm, true);
+ return amdgpu_sync_wait(sync, true);
}
/**
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm_pt.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm_pt.c
index e39d6e7643bf..f78a0434a48f 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm_pt.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm_pt.c
@@ -383,14 +383,6 @@ int amdgpu_vm_pt_clear(struct amdgpu_device *adev, struct amdgpu_vm *vm,
if (r)
return r;
- if (vmbo->shadow) {
- struct amdgpu_bo *shadow = vmbo->shadow;
-
- r = ttm_bo_validate(&shadow->tbo, &shadow->placement, &ctx);
- if (r)
- return r;
- }
-
if (!drm_dev_enter(adev_to_drm(adev), &idx))
return -ENODEV;
@@ -403,7 +395,7 @@ int amdgpu_vm_pt_clear(struct amdgpu_device *adev, struct amdgpu_vm *vm,
params.vm = vm;
params.immediate = immediate;
- r = vm->update_funcs->prepare(&params, NULL, AMDGPU_SYNC_EXPLICIT);
+ r = vm->update_funcs->prepare(&params, NULL);
if (r)
goto exit;
@@ -448,10 +440,7 @@ int amdgpu_vm_pt_create(struct amdgpu_device *adev, struct amdgpu_vm *vm,
int32_t xcp_id)
{
struct amdgpu_bo_param bp;
- struct amdgpu_bo *bo;
- struct dma_resv *resv;
unsigned int num_entries;
- int r;
memset(&bp, 0, sizeof(bp));
@@ -484,42 +473,7 @@ int amdgpu_vm_pt_create(struct amdgpu_device *adev, struct amdgpu_vm *vm,
if (vm->root.bo)
bp.resv = vm->root.bo->tbo.base.resv;
- r = amdgpu_bo_create_vm(adev, &bp, vmbo);
- if (r)
- return r;
-
- bo = &(*vmbo)->bo;
- if (vm->is_compute_context || (adev->flags & AMD_IS_APU)) {
- (*vmbo)->shadow = NULL;
- return 0;
- }
-
- if (!bp.resv)
- WARN_ON(dma_resv_lock(bo->tbo.base.resv,
- NULL));
- resv = bp.resv;
- memset(&bp, 0, sizeof(bp));
- bp.size = amdgpu_vm_pt_size(adev, level);
- bp.domain = AMDGPU_GEM_DOMAIN_GTT;
- bp.flags = AMDGPU_GEM_CREATE_CPU_GTT_USWC;
- bp.type = ttm_bo_type_kernel;
- bp.resv = bo->tbo.base.resv;
- bp.bo_ptr_size = sizeof(struct amdgpu_bo);
- bp.xcp_id_plus1 = xcp_id + 1;
-
- r = amdgpu_bo_create(adev, &bp, &(*vmbo)->shadow);
-
- if (!resv)
- dma_resv_unlock(bo->tbo.base.resv);
-
- if (r) {
- amdgpu_bo_unref(&bo);
- return r;
- }
-
- amdgpu_bo_add_to_shadow_list(*vmbo);
-
- return 0;
+ return amdgpu_bo_create_vm(adev, &bp, vmbo);
}
/**
@@ -569,7 +523,6 @@ static int amdgpu_vm_pt_alloc(struct amdgpu_device *adev,
return 0;
error_free_pt:
- amdgpu_bo_unref(&pt->shadow);
amdgpu_bo_unref(&pt_bo);
return r;
}
@@ -581,17 +534,10 @@ error_free_pt:
*/
static void amdgpu_vm_pt_free(struct amdgpu_vm_bo_base *entry)
{
- struct amdgpu_bo *shadow;
-
if (!entry->bo)
return;
entry->bo->vm_bo = NULL;
- shadow = amdgpu_bo_shadowed(entry->bo);
- if (shadow) {
- ttm_bo_set_bulk_move(&shadow->tbo, NULL);
- amdgpu_bo_unref(&shadow);
- }
ttm_bo_set_bulk_move(&entry->bo->tbo, NULL);
spin_lock(&entry->vm->status_lock);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm_sdma.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm_sdma.c
index 9b748d7058b5..46d9fb433ab2 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm_sdma.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm_sdma.c
@@ -35,16 +35,7 @@
*/
static int amdgpu_vm_sdma_map_table(struct amdgpu_bo_vm *table)
{
- int r;
-
- r = amdgpu_ttm_alloc_gart(&table->bo.tbo);
- if (r)
- return r;
-
- if (table->shadow)
- r = amdgpu_ttm_alloc_gart(&table->shadow->tbo);
-
- return r;
+ return amdgpu_ttm_alloc_gart(&table->bo.tbo);
}
/* Allocate a new job for @count PTE updates */
@@ -77,32 +68,24 @@ static int amdgpu_vm_sdma_alloc_job(struct amdgpu_vm_update_params *p,
* amdgpu_vm_sdma_prepare - prepare SDMA command submission
*
* @p: see amdgpu_vm_update_params definition
- * @resv: reservation object with embedded fence
- * @sync_mode: synchronization mode
+ * @sync: amdgpu_sync object with fences to wait for
*
* Returns:
* Negativ errno, 0 for success.
*/
static int amdgpu_vm_sdma_prepare(struct amdgpu_vm_update_params *p,
- struct dma_resv *resv,
- enum amdgpu_sync_mode sync_mode)
+ struct amdgpu_sync *sync)
{
- struct amdgpu_sync sync;
int r;
r = amdgpu_vm_sdma_alloc_job(p, 0);
if (r)
return r;
- if (!resv)
+ if (!sync)
return 0;
- amdgpu_sync_create(&sync);
- r = amdgpu_sync_resv(p->adev, &sync, resv, sync_mode, p->vm);
- if (!r)
- r = amdgpu_sync_push_to_job(&sync, p->job);
- amdgpu_sync_free(&sync);
-
+ r = amdgpu_sync_push_to_job(sync, p->job);
if (r) {
p->num_dw_left = 0;
amdgpu_job_free(p->job);
@@ -273,17 +256,13 @@ static int amdgpu_vm_sdma_update(struct amdgpu_vm_update_params *p,
if (!p->pages_addr) {
/* set page commands needed */
- if (vmbo->shadow)
- amdgpu_vm_sdma_set_ptes(p, vmbo->shadow, pe, addr,
- count, incr, flags);
amdgpu_vm_sdma_set_ptes(p, bo, pe, addr, count,
incr, flags);
return 0;
}
/* copy commands needed */
- ndw -= p->adev->vm_manager.vm_pte_funcs->copy_pte_num_dw *
- (vmbo->shadow ? 2 : 1);
+ ndw -= p->adev->vm_manager.vm_pte_funcs->copy_pte_num_dw;
/* for padding */
ndw -= 7;
@@ -298,8 +277,6 @@ static int amdgpu_vm_sdma_update(struct amdgpu_vm_update_params *p,
pte[i] |= flags;
}
- if (vmbo->shadow)
- amdgpu_vm_sdma_copy_ptes(p, vmbo->shadow, pe, nptes);
amdgpu_vm_sdma_copy_ptes(p, bo, pe, nptes);
pe += nptes * 8;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_xcp.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_xcp.h
index 90138bc5f03d..32775260556f 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_xcp.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_xcp.h
@@ -180,6 +180,6 @@ amdgpu_get_next_xcp(struct amdgpu_xcp_mgr *xcp_mgr, int *from)
#define for_each_xcp(xcp_mgr, xcp, i) \
for (i = 0, xcp = amdgpu_get_next_xcp(xcp_mgr, &i); xcp; \
- xcp = amdgpu_get_next_xcp(xcp_mgr, &i))
+ ++i, xcp = amdgpu_get_next_xcp(xcp_mgr, &i))
#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_xgmi.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_xgmi.c
index 821ba2309dec..7de449fae1e3 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_xgmi.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_xgmi.c
@@ -1389,10 +1389,10 @@ static void __xgmi_v6_4_0_query_error_count(struct amdgpu_device *adev, struct a
switch (xgmi_v6_4_0_pcs_mca_get_error_type(adev, status)) {
case ACA_ERROR_TYPE_UE:
- amdgpu_ras_error_statistic_ue_count(err_data, mcm_info, NULL, 1ULL);
+ amdgpu_ras_error_statistic_ue_count(err_data, mcm_info, 1ULL);
break;
case ACA_ERROR_TYPE_CE:
- amdgpu_ras_error_statistic_ce_count(err_data, mcm_info, NULL, 1ULL);
+ amdgpu_ras_error_statistic_ce_count(err_data, mcm_info, 1ULL);
break;
default:
break;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgv_sriovmsg.h b/drivers/gpu/drm/amd/amdgpu/amdgv_sriovmsg.h
index fb2b394bb9c5..6e9eeaeb3de1 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgv_sriovmsg.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgv_sriovmsg.h
@@ -213,7 +213,7 @@ struct amd_sriov_msg_pf2vf_info {
uint32_t gpu_capacity;
/* reserved */
uint32_t reserved[256 - AMD_SRIOV_MSG_PF2VF_INFO_FILLED_SIZE];
-};
+} __packed;
struct amd_sriov_msg_vf2pf_info_header {
/* the total structure size in byte */
@@ -273,7 +273,7 @@ struct amd_sriov_msg_vf2pf_info {
uint32_t mes_info_size;
/* reserved */
uint32_t reserved[256 - AMD_SRIOV_MSG_VF2PF_INFO_FILLED_SIZE];
-};
+} __packed;
/* mailbox message send from guest to host */
enum amd_sriov_mailbox_request_message {
diff --git a/drivers/gpu/drm/amd/amdgpu/aqua_vanjaram.c b/drivers/gpu/drm/amd/amdgpu/aqua_vanjaram.c
index 228fd4dd32f1..5e8833e4fed2 100644
--- a/drivers/gpu/drm/amd/amdgpu/aqua_vanjaram.c
+++ b/drivers/gpu/drm/amd/amdgpu/aqua_vanjaram.c
@@ -75,6 +75,8 @@ static void aqua_vanjaram_set_xcp_id(struct amdgpu_device *adev,
uint32_t inst_mask;
ring->xcp_id = AMDGPU_XCP_NO_PARTITION;
+ if (ring->funcs->type == AMDGPU_RING_TYPE_COMPUTE)
+ adev->gfx.enforce_isolation[0].xcp_id = ring->xcp_id;
if (adev->xcp_mgr->mode == AMDGPU_XCP_MODE_NONE)
return;
@@ -92,8 +94,6 @@ static void aqua_vanjaram_set_xcp_id(struct amdgpu_device *adev,
case AMDGPU_RING_TYPE_VCN_ENC:
case AMDGPU_RING_TYPE_VCN_JPEG:
ip_blk = AMDGPU_XCP_VCN;
- if (aqua_vanjaram_xcp_vcn_shared(adev))
- inst_mask = 1 << (inst_idx * 2);
break;
default:
DRM_ERROR("Not support ring type %d!", ring->funcs->type);
@@ -103,6 +103,10 @@ static void aqua_vanjaram_set_xcp_id(struct amdgpu_device *adev,
for (xcp_id = 0; xcp_id < adev->xcp_mgr->num_xcps; xcp_id++) {
if (adev->xcp_mgr->xcp[xcp_id].ip[ip_blk].inst_mask & inst_mask) {
ring->xcp_id = xcp_id;
+ dev_dbg(adev->dev, "ring:%s xcp_id :%u", ring->name,
+ ring->xcp_id);
+ if (ring->funcs->type == AMDGPU_RING_TYPE_COMPUTE)
+ adev->gfx.enforce_isolation[xcp_id].xcp_id = xcp_id;
break;
}
}
@@ -390,38 +394,31 @@ static int __aqua_vanjaram_get_xcp_ip_info(struct amdgpu_xcp_mgr *xcp_mgr, int x
struct amdgpu_xcp_ip *ip)
{
struct amdgpu_device *adev = xcp_mgr->adev;
+ int num_sdma, num_vcn, num_shared_vcn, num_xcp;
int num_xcc_xcp, num_sdma_xcp, num_vcn_xcp;
- int num_sdma, num_vcn;
num_sdma = adev->sdma.num_instances;
num_vcn = adev->vcn.num_vcn_inst;
+ num_shared_vcn = 1;
+
+ num_xcc_xcp = adev->gfx.num_xcc_per_xcp;
+ num_xcp = NUM_XCC(adev->gfx.xcc_mask) / num_xcc_xcp;
switch (xcp_mgr->mode) {
case AMDGPU_SPX_PARTITION_MODE:
- num_sdma_xcp = num_sdma;
- num_vcn_xcp = num_vcn;
- break;
case AMDGPU_DPX_PARTITION_MODE:
- num_sdma_xcp = num_sdma / 2;
- num_vcn_xcp = num_vcn / 2;
- break;
case AMDGPU_TPX_PARTITION_MODE:
- num_sdma_xcp = num_sdma / 3;
- num_vcn_xcp = num_vcn / 3;
- break;
case AMDGPU_QPX_PARTITION_MODE:
- num_sdma_xcp = num_sdma / 4;
- num_vcn_xcp = num_vcn / 4;
- break;
case AMDGPU_CPX_PARTITION_MODE:
- num_sdma_xcp = 2;
- num_vcn_xcp = num_vcn ? 1 : 0;
+ num_sdma_xcp = DIV_ROUND_UP(num_sdma, num_xcp);
+ num_vcn_xcp = DIV_ROUND_UP(num_vcn, num_xcp);
break;
default:
return -EINVAL;
}
- num_xcc_xcp = adev->gfx.num_xcc_per_xcp;
+ if (num_vcn && num_xcp > num_vcn)
+ num_shared_vcn = num_xcp / num_vcn;
switch (ip_id) {
case AMDGPU_XCP_GFXHUB:
@@ -437,7 +434,8 @@ static int __aqua_vanjaram_get_xcp_ip_info(struct amdgpu_xcp_mgr *xcp_mgr, int x
ip->ip_funcs = &sdma_v4_4_2_xcp_funcs;
break;
case AMDGPU_XCP_VCN:
- ip->inst_mask = XCP_INST_MASK(num_vcn_xcp, xcp_id);
+ ip->inst_mask =
+ XCP_INST_MASK(num_vcn_xcp, xcp_id / num_shared_vcn);
/* TODO : Assign IP funcs */
break;
default:
diff --git a/drivers/gpu/drm/amd/amdgpu/atombios_encoders.c b/drivers/gpu/drm/amd/amdgpu/atombios_encoders.c
index 25feab188dfe..a51f3414b65d 100644
--- a/drivers/gpu/drm/amd/amdgpu/atombios_encoders.c
+++ b/drivers/gpu/drm/amd/amdgpu/atombios_encoders.c
@@ -215,7 +215,7 @@ void amdgpu_atombios_encoder_init_backlight(struct amdgpu_encoder *amdgpu_encode
dig->bl_dev = bd;
bd->props.brightness = amdgpu_atombios_encoder_get_backlight_brightness(bd);
- bd->props.power = FB_BLANK_UNBLANK;
+ bd->props.power = BACKLIGHT_POWER_ON;
backlight_update_status(bd);
DRM_INFO("amdgpu atom DIG backlight initialized\n");
@@ -2064,27 +2064,25 @@ amdgpu_atombios_encoder_get_lcd_info(struct amdgpu_encoder *encoder)
case LCD_FAKE_EDID_PATCH_RECORD_TYPE:
fake_edid_record = (ATOM_FAKE_EDID_PATCH_RECORD *)record;
if (fake_edid_record->ucFakeEDIDLength) {
- struct edid *edid;
- int edid_size =
- max((int)EDID_LENGTH, (int)fake_edid_record->ucFakeEDIDLength);
- edid = kmalloc(edid_size, GFP_KERNEL);
- if (edid) {
- memcpy((u8 *)edid, (u8 *)&fake_edid_record->ucFakeEDIDString[0],
- fake_edid_record->ucFakeEDIDLength);
-
- if (drm_edid_is_valid(edid)) {
- adev->mode_info.bios_hardcoded_edid = edid;
- adev->mode_info.bios_hardcoded_edid_size = edid_size;
- } else
- kfree(edid);
- }
+ const struct drm_edid *edid;
+ int edid_size;
+
+ if (fake_edid_record->ucFakeEDIDLength == 128)
+ edid_size = fake_edid_record->ucFakeEDIDLength;
+ else
+ edid_size = fake_edid_record->ucFakeEDIDLength * 128;
+ edid = drm_edid_alloc(fake_edid_record->ucFakeEDIDString, edid_size);
+ if (drm_edid_valid(edid))
+ adev->mode_info.bios_hardcoded_edid = edid;
+ else
+ drm_edid_free(edid);
+ record += struct_size(fake_edid_record,
+ ucFakeEDIDString,
+ edid_size);
+ } else {
+ /* empty fake edid record must be 3 bytes long */
+ record += sizeof(ATOM_FAKE_EDID_PATCH_RECORD) + 1;
}
- record += fake_edid_record->ucFakeEDIDLength ?
- struct_size(fake_edid_record,
- ucFakeEDIDString,
- fake_edid_record->ucFakeEDIDLength) :
- /* empty fake edid record must be 3 bytes long */
- sizeof(ATOM_FAKE_EDID_PATCH_RECORD) + 1;
break;
case LCD_PANEL_RESOLUTION_RECORD_TYPE:
panel_res_record = (ATOM_PANEL_RESOLUTION_PATCH_RECORD *)record;
diff --git a/drivers/gpu/drm/amd/amdgpu/cikd.h b/drivers/gpu/drm/amd/amdgpu/cikd.h
index 55982c0064b5..06088d52d81c 100644
--- a/drivers/gpu/drm/amd/amdgpu/cikd.h
+++ b/drivers/gpu/drm/amd/amdgpu/cikd.h
@@ -364,6 +364,7 @@
* 1 - Stream
* 2 - Bypass
*/
+#define EOP_EXEC (1 << 28) /* For Trailing Fence */
#define DATA_SEL(x) ((x) << 29)
/* 0 - discard
* 1 - send low 32bit data
diff --git a/drivers/gpu/drm/amd/amdgpu/dce_v10_0.c b/drivers/gpu/drm/amd/amdgpu/dce_v10_0.c
index dddb5fe16f2c..70c1399f738d 100644
--- a/drivers/gpu/drm/amd/amdgpu/dce_v10_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/dce_v10_0.c
@@ -1881,6 +1881,7 @@ static int dce_v10_0_crtc_do_set_base(struct drm_crtc *crtc,
return r;
if (!atomic) {
+ abo->flags |= AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS;
r = amdgpu_bo_pin(abo, AMDGPU_GEM_DOMAIN_VRAM);
if (unlikely(r != 0)) {
amdgpu_bo_unreserve(abo);
@@ -2401,6 +2402,7 @@ static int dce_v10_0_crtc_cursor_set2(struct drm_crtc *crtc,
return ret;
}
+ aobj->flags |= AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS;
ret = amdgpu_bo_pin(aobj, AMDGPU_GEM_DOMAIN_VRAM);
amdgpu_bo_unreserve(aobj);
if (ret) {
@@ -2846,7 +2848,7 @@ static int dce_v10_0_sw_fini(void *handle)
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
- kfree(adev->mode_info.bios_hardcoded_edid);
+ drm_edid_free(adev->mode_info.bios_hardcoded_edid);
drm_kms_helper_poll_fini(adev_to_drm(adev));
diff --git a/drivers/gpu/drm/amd/amdgpu/dce_v11_0.c b/drivers/gpu/drm/amd/amdgpu/dce_v11_0.c
index 11780e4d7e9f..f154c24499c8 100644
--- a/drivers/gpu/drm/amd/amdgpu/dce_v11_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/dce_v11_0.c
@@ -1931,6 +1931,7 @@ static int dce_v11_0_crtc_do_set_base(struct drm_crtc *crtc,
return r;
if (!atomic) {
+ abo->flags |= AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS;
r = amdgpu_bo_pin(abo, AMDGPU_GEM_DOMAIN_VRAM);
if (unlikely(r != 0)) {
amdgpu_bo_unreserve(abo);
@@ -2485,6 +2486,7 @@ static int dce_v11_0_crtc_cursor_set2(struct drm_crtc *crtc,
return ret;
}
+ aobj->flags |= AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS;
ret = amdgpu_bo_pin(aobj, AMDGPU_GEM_DOMAIN_VRAM);
amdgpu_bo_unreserve(aobj);
if (ret) {
@@ -2973,7 +2975,7 @@ static int dce_v11_0_sw_fini(void *handle)
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
- kfree(adev->mode_info.bios_hardcoded_edid);
+ drm_edid_free(adev->mode_info.bios_hardcoded_edid);
drm_kms_helper_poll_fini(adev_to_drm(adev));
diff --git a/drivers/gpu/drm/amd/amdgpu/dce_v6_0.c b/drivers/gpu/drm/amd/amdgpu/dce_v6_0.c
index 05c0df97f01d..a7fcb135827f 100644
--- a/drivers/gpu/drm/amd/amdgpu/dce_v6_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/dce_v6_0.c
@@ -1861,6 +1861,7 @@ static int dce_v6_0_crtc_do_set_base(struct drm_crtc *crtc,
return r;
if (!atomic) {
+ abo->flags |= AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS;
r = amdgpu_bo_pin(abo, AMDGPU_GEM_DOMAIN_VRAM);
if (unlikely(r != 0)) {
amdgpu_bo_unreserve(abo);
@@ -2321,6 +2322,7 @@ static int dce_v6_0_crtc_cursor_set2(struct drm_crtc *crtc,
return ret;
}
+ aobj->flags |= AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS;
ret = amdgpu_bo_pin(aobj, AMDGPU_GEM_DOMAIN_VRAM);
amdgpu_bo_unreserve(aobj);
if (ret) {
@@ -2745,7 +2747,7 @@ static int dce_v6_0_sw_fini(void *handle)
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
- kfree(adev->mode_info.bios_hardcoded_edid);
+ drm_edid_free(adev->mode_info.bios_hardcoded_edid);
drm_kms_helper_poll_fini(adev_to_drm(adev));
diff --git a/drivers/gpu/drm/amd/amdgpu/dce_v8_0.c b/drivers/gpu/drm/amd/amdgpu/dce_v8_0.c
index dc73e301d937..77ac3f114d24 100644
--- a/drivers/gpu/drm/amd/amdgpu/dce_v8_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/dce_v8_0.c
@@ -1828,6 +1828,7 @@ static int dce_v8_0_crtc_do_set_base(struct drm_crtc *crtc,
return r;
if (!atomic) {
+ abo->flags |= AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS;
r = amdgpu_bo_pin(abo, AMDGPU_GEM_DOMAIN_VRAM);
if (unlikely(r != 0)) {
amdgpu_bo_unreserve(abo);
@@ -2320,6 +2321,7 @@ static int dce_v8_0_crtc_cursor_set2(struct drm_crtc *crtc,
return ret;
}
+ aobj->flags |= AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS;
ret = amdgpu_bo_pin(aobj, AMDGPU_GEM_DOMAIN_VRAM);
amdgpu_bo_unreserve(aobj);
if (ret) {
@@ -2766,7 +2768,7 @@ static int dce_v8_0_sw_fini(void *handle)
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
- kfree(adev->mode_info.bios_hardcoded_edid);
+ drm_edid_free(adev->mode_info.bios_hardcoded_edid);
drm_kms_helper_poll_fini(adev_to_drm(adev));
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c
index e444e621ddaa..45ed97038df0 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c
@@ -4649,7 +4649,7 @@ static void gfx_v10_0_alloc_ip_dump(struct amdgpu_device *adev)
uint32_t inst;
ptr = kcalloc(reg_count, sizeof(uint32_t), GFP_KERNEL);
- if (ptr == NULL) {
+ if (!ptr) {
DRM_ERROR("Failed to allocate memory for GFX IP Dump\n");
adev->gfx.ip_dump_core = NULL;
} else {
@@ -4662,7 +4662,7 @@ static void gfx_v10_0_alloc_ip_dump(struct amdgpu_device *adev)
adev->gfx.mec.num_queue_per_pipe;
ptr = kcalloc(reg_count * inst, sizeof(uint32_t), GFP_KERNEL);
- if (ptr == NULL) {
+ if (!ptr) {
DRM_ERROR("Failed to allocate memory for Compute Queues IP Dump\n");
adev->gfx.ip_dump_compute_queues = NULL;
} else {
@@ -4675,7 +4675,7 @@ static void gfx_v10_0_alloc_ip_dump(struct amdgpu_device *adev)
adev->gfx.me.num_queue_per_pipe;
ptr = kcalloc(reg_count * inst, sizeof(uint32_t), GFP_KERNEL);
- if (ptr == NULL) {
+ if (!ptr) {
DRM_ERROR("Failed to allocate memory for GFX Queues IP Dump\n");
adev->gfx.ip_dump_gfx_queues = NULL;
} else {
@@ -4741,6 +4741,13 @@ static int gfx_v10_0_sw_init(void *handle)
if (r)
return r;
+ /* Bad opcode Event */
+ r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_GRBM_CP,
+ GFX_10_1__SRCID__CP_BAD_OPCODE_ERROR,
+ &adev->gfx.bad_op_irq);
+ if (r)
+ return r;
+
/* Privileged reg */
r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_GRBM_CP, GFX_10_1__SRCID__CP_PRIV_REG_FAULT,
&adev->gfx.priv_reg_irq);
@@ -5213,26 +5220,74 @@ static void gfx_v10_0_constants_init(struct amdgpu_device *adev)
}
+static u32 gfx_v10_0_get_cpg_int_cntl(struct amdgpu_device *adev,
+ int me, int pipe)
+{
+ if (me != 0)
+ return 0;
+
+ switch (pipe) {
+ case 0:
+ return SOC15_REG_OFFSET(GC, 0, mmCP_INT_CNTL_RING0);
+ case 1:
+ return SOC15_REG_OFFSET(GC, 0, mmCP_INT_CNTL_RING1);
+ default:
+ return 0;
+ }
+}
+
+static u32 gfx_v10_0_get_cpc_int_cntl(struct amdgpu_device *adev,
+ int me, int pipe)
+{
+ /*
+ * amdgpu controls only the first MEC. That's why this function only
+ * handles the setting of interrupts for this specific MEC. All other
+ * pipes' interrupts are set by amdkfd.
+ */
+ if (me != 1)
+ return 0;
+
+ switch (pipe) {
+ case 0:
+ return SOC15_REG_OFFSET(GC, 0, mmCP_ME1_PIPE0_INT_CNTL);
+ case 1:
+ return SOC15_REG_OFFSET(GC, 0, mmCP_ME1_PIPE1_INT_CNTL);
+ case 2:
+ return SOC15_REG_OFFSET(GC, 0, mmCP_ME1_PIPE2_INT_CNTL);
+ case 3:
+ return SOC15_REG_OFFSET(GC, 0, mmCP_ME1_PIPE3_INT_CNTL);
+ default:
+ return 0;
+ }
+}
+
static void gfx_v10_0_enable_gui_idle_interrupt(struct amdgpu_device *adev,
bool enable)
{
- u32 tmp;
+ u32 tmp, cp_int_cntl_reg;
+ int i, j;
if (amdgpu_sriov_vf(adev))
return;
- tmp = RREG32_SOC15(GC, 0, mmCP_INT_CNTL_RING0);
-
- tmp = REG_SET_FIELD(tmp, CP_INT_CNTL_RING0, CNTX_BUSY_INT_ENABLE,
- enable ? 1 : 0);
- tmp = REG_SET_FIELD(tmp, CP_INT_CNTL_RING0, CNTX_EMPTY_INT_ENABLE,
- enable ? 1 : 0);
- tmp = REG_SET_FIELD(tmp, CP_INT_CNTL_RING0, CMP_BUSY_INT_ENABLE,
- enable ? 1 : 0);
- tmp = REG_SET_FIELD(tmp, CP_INT_CNTL_RING0, GFX_IDLE_INT_ENABLE,
- enable ? 1 : 0);
-
- WREG32_SOC15(GC, 0, mmCP_INT_CNTL_RING0, tmp);
+ for (i = 0; i < adev->gfx.me.num_me; i++) {
+ for (j = 0; j < adev->gfx.me.num_pipe_per_me; j++) {
+ cp_int_cntl_reg = gfx_v10_0_get_cpg_int_cntl(adev, i, j);
+
+ if (cp_int_cntl_reg) {
+ tmp = RREG32_SOC15_IP(GC, cp_int_cntl_reg);
+ tmp = REG_SET_FIELD(tmp, CP_INT_CNTL_RING0, CNTX_BUSY_INT_ENABLE,
+ enable ? 1 : 0);
+ tmp = REG_SET_FIELD(tmp, CP_INT_CNTL_RING0, CNTX_EMPTY_INT_ENABLE,
+ enable ? 1 : 0);
+ tmp = REG_SET_FIELD(tmp, CP_INT_CNTL_RING0, CMP_BUSY_INT_ENABLE,
+ enable ? 1 : 0);
+ tmp = REG_SET_FIELD(tmp, CP_INT_CNTL_RING0, GFX_IDLE_INT_ENABLE,
+ enable ? 1 : 0);
+ WREG32_SOC15_IP(GC, cp_int_cntl_reg, tmp);
+ }
+ }
+ }
}
static int gfx_v10_0_init_csb(struct amdgpu_device *adev)
@@ -6637,13 +6692,13 @@ static int gfx_v10_0_gfx_mqd_init(struct amdgpu_device *adev, void *m,
return 0;
}
-static int gfx_v10_0_gfx_init_queue(struct amdgpu_ring *ring)
+static int gfx_v10_0_kgq_init_queue(struct amdgpu_ring *ring, bool reset)
{
struct amdgpu_device *adev = ring->adev;
struct v10_gfx_mqd *mqd = ring->mqd_ptr;
int mqd_idx = ring - &adev->gfx.gfx_ring[0];
- if (!amdgpu_in_reset(adev) && !adev->in_suspend) {
+ if (!reset && !amdgpu_in_reset(adev) && !adev->in_suspend) {
memset((void *)mqd, 0, sizeof(*mqd));
mutex_lock(&adev->srbm_mutex);
nv_grbm_select(adev, ring->me, ring->pipe, ring->queue, 0);
@@ -6695,7 +6750,7 @@ static int gfx_v10_0_cp_async_gfx_ring_resume(struct amdgpu_device *adev)
r = amdgpu_bo_kmap(ring->mqd_obj, (void **)&ring->mqd_ptr);
if (!r) {
- r = gfx_v10_0_gfx_init_queue(ring);
+ r = gfx_v10_0_kgq_init_queue(ring, false);
amdgpu_bo_kunmap(ring->mqd_obj);
ring->mqd_ptr = NULL;
}
@@ -6975,13 +7030,13 @@ static int gfx_v10_0_kiq_init_queue(struct amdgpu_ring *ring)
return 0;
}
-static int gfx_v10_0_kcq_init_queue(struct amdgpu_ring *ring)
+static int gfx_v10_0_kcq_init_queue(struct amdgpu_ring *ring, bool restore)
{
struct amdgpu_device *adev = ring->adev;
struct v10_compute_mqd *mqd = ring->mqd_ptr;
int mqd_idx = ring - &adev->gfx.compute_ring[0];
- if (!amdgpu_in_reset(adev) && !adev->in_suspend) {
+ if (!restore && !amdgpu_in_reset(adev) && !adev->in_suspend) {
memset((void *)mqd, 0, sizeof(*mqd));
mutex_lock(&adev->srbm_mutex);
nv_grbm_select(adev, ring->me, ring->pipe, ring->queue, 0);
@@ -7043,7 +7098,7 @@ static int gfx_v10_0_kcq_resume(struct amdgpu_device *adev)
goto done;
r = amdgpu_bo_kmap(ring->mqd_obj, (void **)&ring->mqd_ptr);
if (!r) {
- r = gfx_v10_0_kcq_init_queue(ring);
+ r = gfx_v10_0_kcq_init_queue(ring, false);
amdgpu_bo_kunmap(ring->mqd_obj);
ring->mqd_ptr = NULL;
}
@@ -7369,6 +7424,7 @@ static int gfx_v10_0_hw_fini(void *handle)
amdgpu_irq_put(adev, &adev->gfx.priv_reg_irq, 0);
amdgpu_irq_put(adev, &adev->gfx.priv_inst_irq, 0);
+ amdgpu_irq_put(adev, &adev->gfx.bad_op_irq, 0);
/* WA added for Vangogh asic fixing the SMU suspend failure
* It needs to set power gating again during gfxoff control
@@ -7679,6 +7735,10 @@ static int gfx_v10_0_late_init(void *handle)
if (r)
return r;
+ r = amdgpu_irq_get(adev, &adev->gfx.bad_op_irq, 0);
+ if (r)
+ return r;
+
return 0;
}
@@ -8889,7 +8949,9 @@ static void gfx_v10_0_ring_soft_recovery(struct amdgpu_ring *ring,
value = REG_SET_FIELD(value, SQ_CMD, MODE, 0x01);
value = REG_SET_FIELD(value, SQ_CMD, CHECK_VMID, 1);
value = REG_SET_FIELD(value, SQ_CMD, VM_ID, vmid);
+ amdgpu_gfx_rlc_enter_safe_mode(adev, 0);
WREG32_SOC15(GC, 0, mmSQ_CMD, value);
+ amdgpu_gfx_rlc_exit_safe_mode(adev, 0);
}
static void
@@ -9074,12 +9136,39 @@ static int gfx_v10_0_set_priv_reg_fault_state(struct amdgpu_device *adev,
unsigned int type,
enum amdgpu_interrupt_state state)
{
+ u32 cp_int_cntl_reg, cp_int_cntl;
+ int i, j;
+
switch (state) {
case AMDGPU_IRQ_STATE_DISABLE:
case AMDGPU_IRQ_STATE_ENABLE:
- WREG32_FIELD15(GC, 0, CP_INT_CNTL_RING0,
- PRIV_REG_INT_ENABLE,
- state == AMDGPU_IRQ_STATE_ENABLE ? 1 : 0);
+ for (i = 0; i < adev->gfx.me.num_me; i++) {
+ for (j = 0; j < adev->gfx.me.num_pipe_per_me; j++) {
+ cp_int_cntl_reg = gfx_v10_0_get_cpg_int_cntl(adev, i, j);
+
+ if (cp_int_cntl_reg) {
+ cp_int_cntl = RREG32_SOC15_IP(GC, cp_int_cntl_reg);
+ cp_int_cntl = REG_SET_FIELD(cp_int_cntl, CP_INT_CNTL_RING0,
+ PRIV_REG_INT_ENABLE,
+ state == AMDGPU_IRQ_STATE_ENABLE ? 1 : 0);
+ WREG32_SOC15_IP(GC, cp_int_cntl_reg, cp_int_cntl);
+ }
+ }
+ }
+ for (i = 0; i < adev->gfx.mec.num_mec; i++) {
+ for (j = 0; j < adev->gfx.mec.num_pipe_per_mec; j++) {
+ /* MECs start at 1 */
+ cp_int_cntl_reg = gfx_v10_0_get_cpc_int_cntl(adev, i + 1, j);
+
+ if (cp_int_cntl_reg) {
+ cp_int_cntl = RREG32_SOC15_IP(GC, cp_int_cntl_reg);
+ cp_int_cntl = REG_SET_FIELD(cp_int_cntl, CP_ME1_PIPE0_INT_CNTL,
+ PRIV_REG_INT_ENABLE,
+ state == AMDGPU_IRQ_STATE_ENABLE ? 1 : 0);
+ WREG32_SOC15_IP(GC, cp_int_cntl_reg, cp_int_cntl);
+ }
+ }
+ }
break;
default:
break;
@@ -9088,17 +9177,75 @@ static int gfx_v10_0_set_priv_reg_fault_state(struct amdgpu_device *adev,
return 0;
}
+static int gfx_v10_0_set_bad_op_fault_state(struct amdgpu_device *adev,
+ struct amdgpu_irq_src *source,
+ unsigned type,
+ enum amdgpu_interrupt_state state)
+{
+ u32 cp_int_cntl_reg, cp_int_cntl;
+ int i, j;
+
+ switch (state) {
+ case AMDGPU_IRQ_STATE_DISABLE:
+ case AMDGPU_IRQ_STATE_ENABLE:
+ for (i = 0; i < adev->gfx.me.num_me; i++) {
+ for (j = 0; j < adev->gfx.me.num_pipe_per_me; j++) {
+ cp_int_cntl_reg = gfx_v10_0_get_cpg_int_cntl(adev, i, j);
+
+ if (cp_int_cntl_reg) {
+ cp_int_cntl = RREG32_SOC15_IP(GC, cp_int_cntl_reg);
+ cp_int_cntl = REG_SET_FIELD(cp_int_cntl, CP_INT_CNTL_RING0,
+ OPCODE_ERROR_INT_ENABLE,
+ state == AMDGPU_IRQ_STATE_ENABLE ? 1 : 0);
+ WREG32_SOC15_IP(GC, cp_int_cntl_reg, cp_int_cntl);
+ }
+ }
+ }
+ for (i = 0; i < adev->gfx.mec.num_mec; i++) {
+ for (j = 0; j < adev->gfx.mec.num_pipe_per_mec; j++) {
+ /* MECs start at 1 */
+ cp_int_cntl_reg = gfx_v10_0_get_cpc_int_cntl(adev, i + 1, j);
+
+ if (cp_int_cntl_reg) {
+ cp_int_cntl = RREG32_SOC15_IP(GC, cp_int_cntl_reg);
+ cp_int_cntl = REG_SET_FIELD(cp_int_cntl, CP_ME1_PIPE0_INT_CNTL,
+ OPCODE_ERROR_INT_ENABLE,
+ state == AMDGPU_IRQ_STATE_ENABLE ? 1 : 0);
+ WREG32_SOC15_IP(GC, cp_int_cntl_reg, cp_int_cntl);
+ }
+ }
+ }
+ break;
+ default:
+ break;
+ }
+ return 0;
+}
+
static int gfx_v10_0_set_priv_inst_fault_state(struct amdgpu_device *adev,
struct amdgpu_irq_src *source,
unsigned int type,
enum amdgpu_interrupt_state state)
{
+ u32 cp_int_cntl_reg, cp_int_cntl;
+ int i, j;
+
switch (state) {
case AMDGPU_IRQ_STATE_DISABLE:
case AMDGPU_IRQ_STATE_ENABLE:
- WREG32_FIELD15(GC, 0, CP_INT_CNTL_RING0,
- PRIV_INSTR_INT_ENABLE,
- state == AMDGPU_IRQ_STATE_ENABLE ? 1 : 0);
+ for (i = 0; i < adev->gfx.me.num_me; i++) {
+ for (j = 0; j < adev->gfx.me.num_pipe_per_me; j++) {
+ cp_int_cntl_reg = gfx_v10_0_get_cpg_int_cntl(adev, i, j);
+
+ if (cp_int_cntl_reg) {
+ cp_int_cntl = RREG32_SOC15_IP(GC, cp_int_cntl_reg);
+ cp_int_cntl = REG_SET_FIELD(cp_int_cntl, CP_INT_CNTL_RING0,
+ PRIV_INSTR_INT_ENABLE,
+ state == AMDGPU_IRQ_STATE_ENABLE ? 1 : 0);
+ WREG32_SOC15_IP(GC, cp_int_cntl_reg, cp_int_cntl);
+ }
+ }
+ }
break;
default:
break;
@@ -9122,8 +9269,8 @@ static void gfx_v10_0_handle_priv_fault(struct amdgpu_device *adev,
case 0:
for (i = 0; i < adev->gfx.num_gfx_rings; i++) {
ring = &adev->gfx.gfx_ring[i];
- /* we only enabled 1 gfx queue per pipe for now */
- if (ring->me == me_id && ring->pipe == pipe_id)
+ if (ring->me == me_id && ring->pipe == pipe_id &&
+ ring->queue == queue_id)
drm_sched_fault(&ring->sched);
}
break;
@@ -9150,6 +9297,15 @@ static int gfx_v10_0_priv_reg_irq(struct amdgpu_device *adev,
return 0;
}
+static int gfx_v10_0_bad_op_irq(struct amdgpu_device *adev,
+ struct amdgpu_irq_src *source,
+ struct amdgpu_iv_entry *entry)
+{
+ DRM_ERROR("Illegal opcode in command stream \n");
+ gfx_v10_0_handle_priv_fault(adev, entry);
+ return 0;
+}
+
static int gfx_v10_0_priv_inst_irq(struct amdgpu_device *adev,
struct amdgpu_irq_src *source,
struct amdgpu_iv_entry *entry)
@@ -9244,6 +9400,174 @@ static void gfx_v10_0_emit_mem_sync(struct amdgpu_ring *ring)
amdgpu_ring_write(ring, gcr_cntl); /* GCR_CNTL */
}
+static void gfx_v10_ring_insert_nop(struct amdgpu_ring *ring, uint32_t num_nop)
+{
+ int i;
+
+ /* Header itself is a NOP packet */
+ if (num_nop == 1) {
+ amdgpu_ring_write(ring, ring->funcs->nop);
+ return;
+ }
+
+ /* Max HW optimization till 0x3ffe, followed by remaining one NOP at a time*/
+ amdgpu_ring_write(ring, PACKET3(PACKET3_NOP, min(num_nop - 2, 0x3ffe)));
+
+ /* Header is at index 0, followed by num_nops - 1 NOP packet's */
+ for (i = 1; i < num_nop; i++)
+ amdgpu_ring_write(ring, ring->funcs->nop);
+}
+
+static int gfx_v10_0_reset_kgq(struct amdgpu_ring *ring, unsigned int vmid)
+{
+ struct amdgpu_device *adev = ring->adev;
+ struct amdgpu_kiq *kiq = &adev->gfx.kiq[0];
+ struct amdgpu_ring *kiq_ring = &kiq->ring;
+ unsigned long flags;
+ u32 tmp;
+ u64 addr;
+ int r;
+
+ if (amdgpu_sriov_vf(adev))
+ return -EINVAL;
+
+ if (!kiq->pmf || !kiq->pmf->kiq_unmap_queues)
+ return -EINVAL;
+
+ spin_lock_irqsave(&kiq->ring_lock, flags);
+
+ if (amdgpu_ring_alloc(kiq_ring, 5 + 7 + 7 + kiq->pmf->map_queues_size)) {
+ spin_unlock_irqrestore(&kiq->ring_lock, flags);
+ return -ENOMEM;
+ }
+
+ addr = amdgpu_bo_gpu_offset(ring->mqd_obj) +
+ offsetof(struct v10_gfx_mqd, cp_gfx_hqd_active);
+ tmp = REG_SET_FIELD(0, CP_VMID_RESET, RESET_REQUEST, 1 << vmid);
+ if (ring->pipe == 0)
+ tmp = REG_SET_FIELD(tmp, CP_VMID_RESET, PIPE0_QUEUES, 1 << ring->queue);
+ else
+ tmp = REG_SET_FIELD(tmp, CP_VMID_RESET, PIPE1_QUEUES, 1 << ring->queue);
+
+ gfx_v10_0_ring_emit_wreg(kiq_ring,
+ SOC15_REG_OFFSET(GC, 0, mmCP_VMID_RESET), tmp);
+ gfx_v10_0_wait_reg_mem(kiq_ring, 0, 1, 0,
+ lower_32_bits(addr), upper_32_bits(addr),
+ 0, 1, 0x20);
+ gfx_v10_0_ring_emit_reg_wait(kiq_ring,
+ SOC15_REG_OFFSET(GC, 0, mmCP_VMID_RESET), 0, 0xffffffff);
+ kiq->pmf->kiq_map_queues(kiq_ring, ring);
+ amdgpu_ring_commit(kiq_ring);
+
+ spin_unlock_irqrestore(&kiq->ring_lock, flags);
+
+ r = amdgpu_ring_test_ring(kiq_ring);
+ if (r)
+ return r;
+
+ r = amdgpu_bo_reserve(ring->mqd_obj, false);
+ if (unlikely(r != 0)) {
+ DRM_ERROR("fail to resv mqd_obj\n");
+ return r;
+ }
+ r = amdgpu_bo_kmap(ring->mqd_obj, (void **)&ring->mqd_ptr);
+ if (!r) {
+ r = gfx_v10_0_kgq_init_queue(ring, true);
+ amdgpu_bo_kunmap(ring->mqd_obj);
+ ring->mqd_ptr = NULL;
+ }
+ amdgpu_bo_unreserve(ring->mqd_obj);
+ if (r) {
+ DRM_ERROR("fail to unresv mqd_obj\n");
+ return r;
+ }
+
+ return amdgpu_ring_test_ring(ring);
+}
+
+static int gfx_v10_0_reset_kcq(struct amdgpu_ring *ring,
+ unsigned int vmid)
+{
+ struct amdgpu_device *adev = ring->adev;
+ struct amdgpu_kiq *kiq = &adev->gfx.kiq[0];
+ struct amdgpu_ring *kiq_ring = &kiq->ring;
+ unsigned long flags;
+ int i, r;
+
+ if (amdgpu_sriov_vf(adev))
+ return -EINVAL;
+
+ if (!kiq->pmf || !kiq->pmf->kiq_unmap_queues)
+ return -EINVAL;
+
+ spin_lock_irqsave(&kiq->ring_lock, flags);
+
+ if (amdgpu_ring_alloc(kiq_ring, kiq->pmf->unmap_queues_size)) {
+ spin_unlock_irqrestore(&kiq->ring_lock, flags);
+ return -ENOMEM;
+ }
+
+ kiq->pmf->kiq_unmap_queues(kiq_ring, ring, RESET_QUEUES,
+ 0, 0);
+ amdgpu_ring_commit(kiq_ring);
+ spin_unlock_irqrestore(&kiq->ring_lock, flags);
+
+ r = amdgpu_ring_test_ring(kiq_ring);
+ if (r)
+ return r;
+
+ /* make sure dequeue is complete*/
+ amdgpu_gfx_rlc_enter_safe_mode(adev, 0);
+ mutex_lock(&adev->srbm_mutex);
+ nv_grbm_select(adev, ring->me, ring->pipe, ring->queue, 0);
+ for (i = 0; i < adev->usec_timeout; i++) {
+ if (!(RREG32_SOC15(GC, 0, mmCP_HQD_ACTIVE) & 1))
+ break;
+ udelay(1);
+ }
+ if (i >= adev->usec_timeout)
+ r = -ETIMEDOUT;
+ nv_grbm_select(adev, 0, 0, 0, 0);
+ mutex_unlock(&adev->srbm_mutex);
+ amdgpu_gfx_rlc_exit_safe_mode(adev, 0);
+ if (r) {
+ dev_err(adev->dev, "fail to wait on hqd deactivate\n");
+ return r;
+ }
+
+ r = amdgpu_bo_reserve(ring->mqd_obj, false);
+ if (unlikely(r != 0)) {
+ dev_err(adev->dev, "fail to resv mqd_obj\n");
+ return r;
+ }
+ r = amdgpu_bo_kmap(ring->mqd_obj, (void **)&ring->mqd_ptr);
+ if (!r) {
+ r = gfx_v10_0_kcq_init_queue(ring, true);
+ amdgpu_bo_kunmap(ring->mqd_obj);
+ ring->mqd_ptr = NULL;
+ }
+ amdgpu_bo_unreserve(ring->mqd_obj);
+ if (r) {
+ dev_err(adev->dev, "fail to unresv mqd_obj\n");
+ return r;
+ }
+
+ spin_lock_irqsave(&kiq->ring_lock, flags);
+ if (amdgpu_ring_alloc(kiq_ring, kiq->pmf->map_queues_size)) {
+ spin_unlock_irqrestore(&kiq->ring_lock, flags);
+ return -ENOMEM;
+ }
+ kiq->pmf->kiq_map_queues(kiq_ring, ring);
+ amdgpu_ring_commit(kiq_ring);
+ spin_unlock_irqrestore(&kiq->ring_lock, flags);
+
+ r = amdgpu_ring_test_ring(kiq_ring);
+ if (r)
+ return r;
+
+ return amdgpu_ring_test_ring(ring);
+}
+
static void gfx_v10_ip_print(void *handle, struct drm_printer *p)
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
@@ -9435,7 +9759,7 @@ static const struct amdgpu_ring_funcs gfx_v10_0_ring_funcs_gfx = {
.emit_hdp_flush = gfx_v10_0_ring_emit_hdp_flush,
.test_ring = gfx_v10_0_ring_test_ring,
.test_ib = gfx_v10_0_ring_test_ib,
- .insert_nop = amdgpu_ring_insert_nop,
+ .insert_nop = gfx_v10_ring_insert_nop,
.pad_ib = amdgpu_ring_generic_pad_ib,
.emit_switch_buffer = gfx_v10_0_ring_emit_sb,
.emit_cntxcntl = gfx_v10_0_ring_emit_cntxcntl,
@@ -9447,6 +9771,7 @@ static const struct amdgpu_ring_funcs gfx_v10_0_ring_funcs_gfx = {
.emit_reg_write_reg_wait = gfx_v10_0_ring_emit_reg_write_reg_wait,
.soft_recovery = gfx_v10_0_ring_soft_recovery,
.emit_mem_sync = gfx_v10_0_emit_mem_sync,
+ .reset = gfx_v10_0_reset_kgq,
};
static const struct amdgpu_ring_funcs gfx_v10_0_ring_funcs_compute = {
@@ -9476,12 +9801,14 @@ static const struct amdgpu_ring_funcs gfx_v10_0_ring_funcs_compute = {
.emit_hdp_flush = gfx_v10_0_ring_emit_hdp_flush,
.test_ring = gfx_v10_0_ring_test_ring,
.test_ib = gfx_v10_0_ring_test_ib,
- .insert_nop = amdgpu_ring_insert_nop,
+ .insert_nop = gfx_v10_ring_insert_nop,
.pad_ib = amdgpu_ring_generic_pad_ib,
.emit_wreg = gfx_v10_0_ring_emit_wreg,
.emit_reg_wait = gfx_v10_0_ring_emit_reg_wait,
.emit_reg_write_reg_wait = gfx_v10_0_ring_emit_reg_write_reg_wait,
+ .soft_recovery = gfx_v10_0_ring_soft_recovery,
.emit_mem_sync = gfx_v10_0_emit_mem_sync,
+ .reset = gfx_v10_0_reset_kcq,
};
static const struct amdgpu_ring_funcs gfx_v10_0_ring_funcs_kiq = {
@@ -9536,6 +9863,11 @@ static const struct amdgpu_irq_src_funcs gfx_v10_0_priv_reg_irq_funcs = {
.process = gfx_v10_0_priv_reg_irq,
};
+static const struct amdgpu_irq_src_funcs gfx_v10_0_bad_op_irq_funcs = {
+ .set = gfx_v10_0_set_bad_op_fault_state,
+ .process = gfx_v10_0_bad_op_irq,
+};
+
static const struct amdgpu_irq_src_funcs gfx_v10_0_priv_inst_irq_funcs = {
.set = gfx_v10_0_set_priv_inst_fault_state,
.process = gfx_v10_0_priv_inst_irq,
@@ -9557,6 +9889,9 @@ static void gfx_v10_0_set_irq_funcs(struct amdgpu_device *adev)
adev->gfx.priv_reg_irq.num_types = 1;
adev->gfx.priv_reg_irq.funcs = &gfx_v10_0_priv_reg_irq_funcs;
+ adev->gfx.bad_op_irq.num_types = 1;
+ adev->gfx.bad_op_irq.funcs = &gfx_v10_0_bad_op_irq_funcs;
+
adev->gfx.priv_inst_irq.num_types = 1;
adev->gfx.priv_inst_irq.funcs = &gfx_v10_0_priv_inst_irq_funcs;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v11_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v11_0.c
index dcef39907449..d3e8be82a172 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfx_v11_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v11_0.c
@@ -481,6 +481,24 @@ static void gfx_v11_0_wait_reg_mem(struct amdgpu_ring *ring, int eng_sel,
amdgpu_ring_write(ring, inv); /* poll interval */
}
+static void gfx_v11_ring_insert_nop(struct amdgpu_ring *ring, uint32_t num_nop)
+{
+ int i;
+
+ /* Header itself is a NOP packet */
+ if (num_nop == 1) {
+ amdgpu_ring_write(ring, ring->funcs->nop);
+ return;
+ }
+
+ /* Max HW optimization till 0x3ffe, followed by remaining one NOP at a time*/
+ amdgpu_ring_write(ring, PACKET3(PACKET3_NOP, min(num_nop - 2, 0x3ffe)));
+
+ /* Header is at index 0, followed by num_nops - 1 NOP packet's */
+ for (i = 1; i < num_nop; i++)
+ amdgpu_ring_write(ring, ring->funcs->nop);
+}
+
static int gfx_v11_0_ring_test_ring(struct amdgpu_ring *ring)
{
struct amdgpu_device *adev = ring->adev;
@@ -1484,7 +1502,7 @@ static void gfx_v11_0_alloc_ip_dump(struct amdgpu_device *adev)
uint32_t inst;
ptr = kcalloc(reg_count, sizeof(uint32_t), GFP_KERNEL);
- if (ptr == NULL) {
+ if (!ptr) {
DRM_ERROR("Failed to allocate memory for GFX IP Dump\n");
adev->gfx.ip_dump_core = NULL;
} else {
@@ -1497,7 +1515,7 @@ static void gfx_v11_0_alloc_ip_dump(struct amdgpu_device *adev)
adev->gfx.mec.num_queue_per_pipe;
ptr = kcalloc(reg_count * inst, sizeof(uint32_t), GFP_KERNEL);
- if (ptr == NULL) {
+ if (!ptr) {
DRM_ERROR("Failed to allocate memory for Compute Queues IP Dump\n");
adev->gfx.ip_dump_compute_queues = NULL;
} else {
@@ -1510,7 +1528,7 @@ static void gfx_v11_0_alloc_ip_dump(struct amdgpu_device *adev)
adev->gfx.me.num_queue_per_pipe;
ptr = kcalloc(reg_count * inst, sizeof(uint32_t), GFP_KERNEL);
- if (ptr == NULL) {
+ if (!ptr) {
DRM_ERROR("Failed to allocate memory for GFX Queues IP Dump\n");
adev->gfx.ip_dump_gfx_queues = NULL;
} else {
@@ -1569,6 +1587,13 @@ static int gfx_v11_0_sw_init(void *handle)
if (r)
return r;
+ /* Bad opcode Event */
+ r = amdgpu_irq_add_id(adev, SOC21_IH_CLIENTID_GRBM_CP,
+ GFX_11_0_0__SRCID__CP_BAD_OPCODE_ERROR,
+ &adev->gfx.bad_op_irq);
+ if (r)
+ return r;
+
/* Privileged reg */
r = amdgpu_irq_add_id(adev, SOC21_IH_CLIENTID_GRBM_CP,
GFX_11_0_0__SRCID__CP_PRIV_REG_FAULT,
@@ -1953,26 +1978,74 @@ static void gfx_v11_0_constants_init(struct amdgpu_device *adev)
gfx_v11_0_init_gds_vmid(adev);
}
+static u32 gfx_v11_0_get_cpg_int_cntl(struct amdgpu_device *adev,
+ int me, int pipe)
+{
+ if (me != 0)
+ return 0;
+
+ switch (pipe) {
+ case 0:
+ return SOC15_REG_OFFSET(GC, 0, regCP_INT_CNTL_RING0);
+ case 1:
+ return SOC15_REG_OFFSET(GC, 0, regCP_INT_CNTL_RING1);
+ default:
+ return 0;
+ }
+}
+
+static u32 gfx_v11_0_get_cpc_int_cntl(struct amdgpu_device *adev,
+ int me, int pipe)
+{
+ /*
+ * amdgpu controls only the first MEC. That's why this function only
+ * handles the setting of interrupts for this specific MEC. All other
+ * pipes' interrupts are set by amdkfd.
+ */
+ if (me != 1)
+ return 0;
+
+ switch (pipe) {
+ case 0:
+ return SOC15_REG_OFFSET(GC, 0, regCP_ME1_PIPE0_INT_CNTL);
+ case 1:
+ return SOC15_REG_OFFSET(GC, 0, regCP_ME1_PIPE1_INT_CNTL);
+ case 2:
+ return SOC15_REG_OFFSET(GC, 0, regCP_ME1_PIPE2_INT_CNTL);
+ case 3:
+ return SOC15_REG_OFFSET(GC, 0, regCP_ME1_PIPE3_INT_CNTL);
+ default:
+ return 0;
+ }
+}
+
static void gfx_v11_0_enable_gui_idle_interrupt(struct amdgpu_device *adev,
bool enable)
{
- u32 tmp;
+ u32 tmp, cp_int_cntl_reg;
+ int i, j;
if (amdgpu_sriov_vf(adev))
return;
- tmp = RREG32_SOC15(GC, 0, regCP_INT_CNTL_RING0);
-
- tmp = REG_SET_FIELD(tmp, CP_INT_CNTL_RING0, CNTX_BUSY_INT_ENABLE,
- enable ? 1 : 0);
- tmp = REG_SET_FIELD(tmp, CP_INT_CNTL_RING0, CNTX_EMPTY_INT_ENABLE,
- enable ? 1 : 0);
- tmp = REG_SET_FIELD(tmp, CP_INT_CNTL_RING0, CMP_BUSY_INT_ENABLE,
- enable ? 1 : 0);
- tmp = REG_SET_FIELD(tmp, CP_INT_CNTL_RING0, GFX_IDLE_INT_ENABLE,
- enable ? 1 : 0);
-
- WREG32_SOC15(GC, 0, regCP_INT_CNTL_RING0, tmp);
+ for (i = 0; i < adev->gfx.me.num_me; i++) {
+ for (j = 0; j < adev->gfx.me.num_pipe_per_me; j++) {
+ cp_int_cntl_reg = gfx_v11_0_get_cpg_int_cntl(adev, i, j);
+
+ if (cp_int_cntl_reg) {
+ tmp = RREG32_SOC15_IP(GC, cp_int_cntl_reg);
+ tmp = REG_SET_FIELD(tmp, CP_INT_CNTL_RING0, CNTX_BUSY_INT_ENABLE,
+ enable ? 1 : 0);
+ tmp = REG_SET_FIELD(tmp, CP_INT_CNTL_RING0, CNTX_EMPTY_INT_ENABLE,
+ enable ? 1 : 0);
+ tmp = REG_SET_FIELD(tmp, CP_INT_CNTL_RING0, CMP_BUSY_INT_ENABLE,
+ enable ? 1 : 0);
+ tmp = REG_SET_FIELD(tmp, CP_INT_CNTL_RING0, GFX_IDLE_INT_ENABLE,
+ enable ? 1 : 0);
+ WREG32_SOC15_IP(GC, cp_int_cntl_reg, tmp);
+ }
+ }
+ }
}
static int gfx_v11_0_init_csb(struct amdgpu_device *adev)
@@ -3911,13 +3984,13 @@ static int gfx_v11_0_gfx_mqd_init(struct amdgpu_device *adev, void *m,
return 0;
}
-static int gfx_v11_0_gfx_init_queue(struct amdgpu_ring *ring)
+static int gfx_v11_0_kgq_init_queue(struct amdgpu_ring *ring, bool reset)
{
struct amdgpu_device *adev = ring->adev;
struct v11_gfx_mqd *mqd = ring->mqd_ptr;
int mqd_idx = ring - &adev->gfx.gfx_ring[0];
- if (!amdgpu_in_reset(adev) && !adev->in_suspend) {
+ if (!reset && !amdgpu_in_reset(adev) && !adev->in_suspend) {
memset((void *)mqd, 0, sizeof(*mqd));
mutex_lock(&adev->srbm_mutex);
soc21_grbm_select(adev, ring->me, ring->pipe, ring->queue, 0);
@@ -3953,7 +4026,7 @@ static int gfx_v11_0_cp_async_gfx_ring_resume(struct amdgpu_device *adev)
r = amdgpu_bo_kmap(ring->mqd_obj, (void **)&ring->mqd_ptr);
if (!r) {
- r = gfx_v11_0_gfx_init_queue(ring);
+ r = gfx_v11_0_kgq_init_queue(ring, false);
amdgpu_bo_kunmap(ring->mqd_obj);
ring->mqd_ptr = NULL;
}
@@ -4248,13 +4321,13 @@ static int gfx_v11_0_kiq_init_queue(struct amdgpu_ring *ring)
return 0;
}
-static int gfx_v11_0_kcq_init_queue(struct amdgpu_ring *ring)
+static int gfx_v11_0_kcq_init_queue(struct amdgpu_ring *ring, bool reset)
{
struct amdgpu_device *adev = ring->adev;
struct v11_compute_mqd *mqd = ring->mqd_ptr;
int mqd_idx = ring - &adev->gfx.compute_ring[0];
- if (!amdgpu_in_reset(adev) && !adev->in_suspend) {
+ if (!reset && !amdgpu_in_reset(adev) && !adev->in_suspend) {
memset((void *)mqd, 0, sizeof(*mqd));
mutex_lock(&adev->srbm_mutex);
soc21_grbm_select(adev, ring->me, ring->pipe, ring->queue, 0);
@@ -4318,7 +4391,7 @@ static int gfx_v11_0_kcq_resume(struct amdgpu_device *adev)
goto done;
r = amdgpu_bo_kmap(ring->mqd_obj, (void **)&ring->mqd_ptr);
if (!r) {
- r = gfx_v11_0_kcq_init_queue(ring);
+ r = gfx_v11_0_kcq_init_queue(ring, false);
amdgpu_bo_kunmap(ring->mqd_obj);
ring->mqd_ptr = NULL;
}
@@ -4598,6 +4671,7 @@ static int gfx_v11_0_hw_fini(void *handle)
amdgpu_irq_put(adev, &adev->gfx.priv_reg_irq, 0);
amdgpu_irq_put(adev, &adev->gfx.priv_inst_irq, 0);
+ amdgpu_irq_put(adev, &adev->gfx.bad_op_irq, 0);
if (!adev->no_hw_access) {
if (amdgpu_async_gfx_ring) {
@@ -4668,8 +4742,8 @@ static int gfx_v11_0_wait_for_idle(void *handle)
return -ETIMEDOUT;
}
-static int gfx_v11_0_request_gfx_index_mutex(struct amdgpu_device *adev,
- int req)
+int gfx_v11_0_request_gfx_index_mutex(struct amdgpu_device *adev,
+ bool req)
{
u32 i, tmp, val;
@@ -4707,6 +4781,8 @@ static int gfx_v11_0_soft_reset(void *handle)
int r, i, j, k;
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ amdgpu_gfx_rlc_enter_safe_mode(adev, 0);
+
tmp = RREG32_SOC15(GC, 0, regCP_INT_CNTL);
tmp = REG_SET_FIELD(tmp, CP_INT_CNTL, CMP_BUSY_INT_ENABLE, 0);
tmp = REG_SET_FIELD(tmp, CP_INT_CNTL, CNTX_BUSY_INT_ENABLE, 0);
@@ -4714,8 +4790,6 @@ static int gfx_v11_0_soft_reset(void *handle)
tmp = REG_SET_FIELD(tmp, CP_INT_CNTL, GFX_IDLE_INT_ENABLE, 0);
WREG32_SOC15(GC, 0, regCP_INT_CNTL, tmp);
- gfx_v11_0_set_safe_mode(adev, 0);
-
mutex_lock(&adev->srbm_mutex);
for (i = 0; i < adev->gfx.mec.num_mec; ++i) {
for (j = 0; j < adev->gfx.mec.num_queue_per_pipe; j++) {
@@ -4740,8 +4814,10 @@ static int gfx_v11_0_soft_reset(void *handle)
mutex_unlock(&adev->srbm_mutex);
/* Try to acquire the gfx mutex before access to CP_VMID_RESET */
- r = gfx_v11_0_request_gfx_index_mutex(adev, 1);
+ mutex_lock(&adev->gfx.reset_sem_mutex);
+ r = gfx_v11_0_request_gfx_index_mutex(adev, true);
if (r) {
+ mutex_unlock(&adev->gfx.reset_sem_mutex);
DRM_ERROR("Failed to acquire the gfx mutex during soft reset\n");
return r;
}
@@ -4755,7 +4831,8 @@ static int gfx_v11_0_soft_reset(void *handle)
RREG32_SOC15(GC, 0, regCP_VMID_RESET);
/* release the gfx mutex */
- r = gfx_v11_0_request_gfx_index_mutex(adev, 0);
+ r = gfx_v11_0_request_gfx_index_mutex(adev, false);
+ mutex_unlock(&adev->gfx.reset_sem_mutex);
if (r) {
DRM_ERROR("Failed to release the gfx mutex during soft reset\n");
return r;
@@ -4823,7 +4900,7 @@ static int gfx_v11_0_soft_reset(void *handle)
tmp = REG_SET_FIELD(tmp, CP_INT_CNTL, GFX_IDLE_INT_ENABLE, 1);
WREG32_SOC15(GC, 0, regCP_INT_CNTL, tmp);
- gfx_v11_0_unset_safe_mode(adev, 0);
+ amdgpu_gfx_rlc_exit_safe_mode(adev, 0);
return gfx_v11_0_cp_resume(adev);
}
@@ -4954,6 +5031,9 @@ static int gfx_v11_0_late_init(void *handle)
if (r)
return r;
+ r = amdgpu_irq_get(adev, &adev->gfx.bad_op_irq, 0);
+ if (r)
+ return r;
return 0;
}
@@ -5843,6 +5923,9 @@ static int gfx_v11_0_ring_preempt_ib(struct amdgpu_ring *ring)
struct amdgpu_ring *kiq_ring = &kiq->ring;
unsigned long flags;
+ if (adev->enable_mes)
+ return -EINVAL;
+
if (!kiq->pmf || !kiq->pmf->kiq_unmap_queues)
return -EINVAL;
@@ -6008,7 +6091,9 @@ static void gfx_v11_0_ring_soft_recovery(struct amdgpu_ring *ring,
value = REG_SET_FIELD(value, SQ_CMD, MODE, 0x01);
value = REG_SET_FIELD(value, SQ_CMD, CHECK_VMID, 1);
value = REG_SET_FIELD(value, SQ_CMD, VM_ID, vmid);
+ amdgpu_gfx_rlc_enter_safe_mode(adev, 0);
WREG32_SOC15(GC, 0, regSQ_CMD, value);
+ amdgpu_gfx_rlc_exit_safe_mode(adev, 0);
}
static void
@@ -6201,15 +6286,42 @@ static int gfx_v11_0_eop_irq(struct amdgpu_device *adev,
static int gfx_v11_0_set_priv_reg_fault_state(struct amdgpu_device *adev,
struct amdgpu_irq_src *source,
- unsigned type,
+ unsigned int type,
enum amdgpu_interrupt_state state)
{
+ u32 cp_int_cntl_reg, cp_int_cntl;
+ int i, j;
+
switch (state) {
case AMDGPU_IRQ_STATE_DISABLE:
case AMDGPU_IRQ_STATE_ENABLE:
- WREG32_FIELD15_PREREG(GC, 0, CP_INT_CNTL_RING0,
- PRIV_REG_INT_ENABLE,
- state == AMDGPU_IRQ_STATE_ENABLE ? 1 : 0);
+ for (i = 0; i < adev->gfx.me.num_me; i++) {
+ for (j = 0; j < adev->gfx.me.num_pipe_per_me; j++) {
+ cp_int_cntl_reg = gfx_v11_0_get_cpg_int_cntl(adev, i, j);
+
+ if (cp_int_cntl_reg) {
+ cp_int_cntl = RREG32_SOC15_IP(GC, cp_int_cntl_reg);
+ cp_int_cntl = REG_SET_FIELD(cp_int_cntl, CP_INT_CNTL_RING0,
+ PRIV_REG_INT_ENABLE,
+ state == AMDGPU_IRQ_STATE_ENABLE ? 1 : 0);
+ WREG32_SOC15_IP(GC, cp_int_cntl_reg, cp_int_cntl);
+ }
+ }
+ }
+ for (i = 0; i < adev->gfx.mec.num_mec; i++) {
+ for (j = 0; j < adev->gfx.mec.num_pipe_per_mec; j++) {
+ /* MECs start at 1 */
+ cp_int_cntl_reg = gfx_v11_0_get_cpc_int_cntl(adev, i + 1, j);
+
+ if (cp_int_cntl_reg) {
+ cp_int_cntl = RREG32_SOC15_IP(GC, cp_int_cntl_reg);
+ cp_int_cntl = REG_SET_FIELD(cp_int_cntl, CP_ME1_PIPE0_INT_CNTL,
+ PRIV_REG_INT_ENABLE,
+ state == AMDGPU_IRQ_STATE_ENABLE ? 1 : 0);
+ WREG32_SOC15_IP(GC, cp_int_cntl_reg, cp_int_cntl);
+ }
+ }
+ }
break;
default:
break;
@@ -6218,17 +6330,75 @@ static int gfx_v11_0_set_priv_reg_fault_state(struct amdgpu_device *adev,
return 0;
}
+static int gfx_v11_0_set_bad_op_fault_state(struct amdgpu_device *adev,
+ struct amdgpu_irq_src *source,
+ unsigned type,
+ enum amdgpu_interrupt_state state)
+{
+ u32 cp_int_cntl_reg, cp_int_cntl;
+ int i, j;
+
+ switch (state) {
+ case AMDGPU_IRQ_STATE_DISABLE:
+ case AMDGPU_IRQ_STATE_ENABLE:
+ for (i = 0; i < adev->gfx.me.num_me; i++) {
+ for (j = 0; j < adev->gfx.me.num_pipe_per_me; j++) {
+ cp_int_cntl_reg = gfx_v11_0_get_cpg_int_cntl(adev, i, j);
+
+ if (cp_int_cntl_reg) {
+ cp_int_cntl = RREG32_SOC15_IP(GC, cp_int_cntl_reg);
+ cp_int_cntl = REG_SET_FIELD(cp_int_cntl, CP_INT_CNTL_RING0,
+ OPCODE_ERROR_INT_ENABLE,
+ state == AMDGPU_IRQ_STATE_ENABLE ? 1 : 0);
+ WREG32_SOC15_IP(GC, cp_int_cntl_reg, cp_int_cntl);
+ }
+ }
+ }
+ for (i = 0; i < adev->gfx.mec.num_mec; i++) {
+ for (j = 0; j < adev->gfx.mec.num_pipe_per_mec; j++) {
+ /* MECs start at 1 */
+ cp_int_cntl_reg = gfx_v11_0_get_cpc_int_cntl(adev, i + 1, j);
+
+ if (cp_int_cntl_reg) {
+ cp_int_cntl = RREG32_SOC15_IP(GC, cp_int_cntl_reg);
+ cp_int_cntl = REG_SET_FIELD(cp_int_cntl, CP_ME1_PIPE0_INT_CNTL,
+ OPCODE_ERROR_INT_ENABLE,
+ state == AMDGPU_IRQ_STATE_ENABLE ? 1 : 0);
+ WREG32_SOC15_IP(GC, cp_int_cntl_reg, cp_int_cntl);
+ }
+ }
+ }
+ break;
+ default:
+ break;
+ }
+ return 0;
+}
+
static int gfx_v11_0_set_priv_inst_fault_state(struct amdgpu_device *adev,
struct amdgpu_irq_src *source,
- unsigned type,
+ unsigned int type,
enum amdgpu_interrupt_state state)
{
+ u32 cp_int_cntl_reg, cp_int_cntl;
+ int i, j;
+
switch (state) {
case AMDGPU_IRQ_STATE_DISABLE:
case AMDGPU_IRQ_STATE_ENABLE:
- WREG32_FIELD15_PREREG(GC, 0, CP_INT_CNTL_RING0,
- PRIV_INSTR_INT_ENABLE,
- state == AMDGPU_IRQ_STATE_ENABLE ? 1 : 0);
+ for (i = 0; i < adev->gfx.me.num_me; i++) {
+ for (j = 0; j < adev->gfx.me.num_pipe_per_me; j++) {
+ cp_int_cntl_reg = gfx_v11_0_get_cpg_int_cntl(adev, i, j);
+
+ if (cp_int_cntl_reg) {
+ cp_int_cntl = RREG32_SOC15_IP(GC, cp_int_cntl_reg);
+ cp_int_cntl = REG_SET_FIELD(cp_int_cntl, CP_INT_CNTL_RING0,
+ PRIV_INSTR_INT_ENABLE,
+ state == AMDGPU_IRQ_STATE_ENABLE ? 1 : 0);
+ WREG32_SOC15_IP(GC, cp_int_cntl_reg, cp_int_cntl);
+ }
+ }
+ }
break;
default:
break;
@@ -6252,8 +6422,8 @@ static void gfx_v11_0_handle_priv_fault(struct amdgpu_device *adev,
case 0:
for (i = 0; i < adev->gfx.num_gfx_rings; i++) {
ring = &adev->gfx.gfx_ring[i];
- /* we only enabled 1 gfx queue per pipe for now */
- if (ring->me == me_id && ring->pipe == pipe_id)
+ if (ring->me == me_id && ring->pipe == pipe_id &&
+ ring->queue == queue_id)
drm_sched_fault(&ring->sched);
}
break;
@@ -6281,6 +6451,15 @@ static int gfx_v11_0_priv_reg_irq(struct amdgpu_device *adev,
return 0;
}
+static int gfx_v11_0_bad_op_irq(struct amdgpu_device *adev,
+ struct amdgpu_irq_src *source,
+ struct amdgpu_iv_entry *entry)
+{
+ DRM_ERROR("Illegal opcode in command stream \n");
+ gfx_v11_0_handle_priv_fault(adev, entry);
+ return 0;
+}
+
static int gfx_v11_0_priv_inst_irq(struct amdgpu_device *adev,
struct amdgpu_irq_src *source,
struct amdgpu_iv_entry *entry)
@@ -6367,6 +6546,99 @@ static void gfx_v11_0_emit_mem_sync(struct amdgpu_ring *ring)
amdgpu_ring_write(ring, gcr_cntl); /* GCR_CNTL */
}
+static int gfx_v11_0_reset_kgq(struct amdgpu_ring *ring, unsigned int vmid)
+{
+ struct amdgpu_device *adev = ring->adev;
+ int r;
+
+ if (amdgpu_sriov_vf(adev))
+ return -EINVAL;
+
+ r = amdgpu_mes_reset_legacy_queue(ring->adev, ring, vmid, false);
+ if (r)
+ return r;
+
+ r = amdgpu_bo_reserve(ring->mqd_obj, false);
+ if (unlikely(r != 0)) {
+ dev_err(adev->dev, "fail to resv mqd_obj\n");
+ return r;
+ }
+ r = amdgpu_bo_kmap(ring->mqd_obj, (void **)&ring->mqd_ptr);
+ if (!r) {
+ r = gfx_v11_0_kgq_init_queue(ring, true);
+ amdgpu_bo_kunmap(ring->mqd_obj);
+ ring->mqd_ptr = NULL;
+ }
+ amdgpu_bo_unreserve(ring->mqd_obj);
+ if (r) {
+ dev_err(adev->dev, "fail to unresv mqd_obj\n");
+ return r;
+ }
+
+ r = amdgpu_mes_map_legacy_queue(adev, ring);
+ if (r) {
+ dev_err(adev->dev, "failed to remap kgq\n");
+ return r;
+ }
+
+ return amdgpu_ring_test_ring(ring);
+}
+
+static int gfx_v11_0_reset_kcq(struct amdgpu_ring *ring, unsigned int vmid)
+{
+ struct amdgpu_device *adev = ring->adev;
+ int i, r = 0;
+
+ if (amdgpu_sriov_vf(adev))
+ return -EINVAL;
+
+ amdgpu_gfx_rlc_enter_safe_mode(adev, 0);
+ mutex_lock(&adev->srbm_mutex);
+ soc21_grbm_select(adev, ring->me, ring->pipe, ring->queue, 0);
+ WREG32_SOC15(GC, 0, regCP_HQD_DEQUEUE_REQUEST, 0x2);
+ WREG32_SOC15(GC, 0, regSPI_COMPUTE_QUEUE_RESET, 0x1);
+
+ /* make sure dequeue is complete*/
+ for (i = 0; i < adev->usec_timeout; i++) {
+ if (!(RREG32_SOC15(GC, 0, regCP_HQD_ACTIVE) & 1))
+ break;
+ udelay(1);
+ }
+ if (i >= adev->usec_timeout)
+ r = -ETIMEDOUT;
+ soc21_grbm_select(adev, 0, 0, 0, 0);
+ mutex_unlock(&adev->srbm_mutex);
+ amdgpu_gfx_rlc_exit_safe_mode(adev, 0);
+ if (r) {
+ dev_err(adev->dev, "fail to wait on hqd deactivate\n");
+ return r;
+ }
+
+ r = amdgpu_bo_reserve(ring->mqd_obj, false);
+ if (unlikely(r != 0)) {
+ dev_err(adev->dev, "fail to resv mqd_obj\n");
+ return r;
+ }
+ r = amdgpu_bo_kmap(ring->mqd_obj, (void **)&ring->mqd_ptr);
+ if (!r) {
+ r = gfx_v11_0_kcq_init_queue(ring, true);
+ amdgpu_bo_kunmap(ring->mqd_obj);
+ ring->mqd_ptr = NULL;
+ }
+ amdgpu_bo_unreserve(ring->mqd_obj);
+ if (r) {
+ dev_err(adev->dev, "fail to unresv mqd_obj\n");
+ return r;
+ }
+ r = amdgpu_mes_map_legacy_queue(adev, ring);
+ if (r) {
+ dev_err(adev->dev, "failed to remap kcq\n");
+ return r;
+ }
+
+ return amdgpu_ring_test_ring(ring);
+}
+
static void gfx_v11_ip_print(void *handle, struct drm_printer *p)
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
@@ -6556,7 +6828,7 @@ static const struct amdgpu_ring_funcs gfx_v11_0_ring_funcs_gfx = {
.emit_hdp_flush = gfx_v11_0_ring_emit_hdp_flush,
.test_ring = gfx_v11_0_ring_test_ring,
.test_ib = gfx_v11_0_ring_test_ib,
- .insert_nop = amdgpu_ring_insert_nop,
+ .insert_nop = gfx_v11_ring_insert_nop,
.pad_ib = amdgpu_ring_generic_pad_ib,
.emit_cntxcntl = gfx_v11_0_ring_emit_cntxcntl,
.emit_gfx_shadow = gfx_v11_0_ring_emit_gfx_shadow,
@@ -6568,6 +6840,7 @@ static const struct amdgpu_ring_funcs gfx_v11_0_ring_funcs_gfx = {
.emit_reg_write_reg_wait = gfx_v11_0_ring_emit_reg_write_reg_wait,
.soft_recovery = gfx_v11_0_ring_soft_recovery,
.emit_mem_sync = gfx_v11_0_emit_mem_sync,
+ .reset = gfx_v11_0_reset_kgq,
};
static const struct amdgpu_ring_funcs gfx_v11_0_ring_funcs_compute = {
@@ -6598,12 +6871,14 @@ static const struct amdgpu_ring_funcs gfx_v11_0_ring_funcs_compute = {
.emit_hdp_flush = gfx_v11_0_ring_emit_hdp_flush,
.test_ring = gfx_v11_0_ring_test_ring,
.test_ib = gfx_v11_0_ring_test_ib,
- .insert_nop = amdgpu_ring_insert_nop,
+ .insert_nop = gfx_v11_ring_insert_nop,
.pad_ib = amdgpu_ring_generic_pad_ib,
.emit_wreg = gfx_v11_0_ring_emit_wreg,
.emit_reg_wait = gfx_v11_0_ring_emit_reg_wait,
.emit_reg_write_reg_wait = gfx_v11_0_ring_emit_reg_write_reg_wait,
+ .soft_recovery = gfx_v11_0_ring_soft_recovery,
.emit_mem_sync = gfx_v11_0_emit_mem_sync,
+ .reset = gfx_v11_0_reset_kcq,
};
static const struct amdgpu_ring_funcs gfx_v11_0_ring_funcs_kiq = {
@@ -6658,6 +6933,11 @@ static const struct amdgpu_irq_src_funcs gfx_v11_0_priv_reg_irq_funcs = {
.process = gfx_v11_0_priv_reg_irq,
};
+static const struct amdgpu_irq_src_funcs gfx_v11_0_bad_op_irq_funcs = {
+ .set = gfx_v11_0_set_bad_op_fault_state,
+ .process = gfx_v11_0_bad_op_irq,
+};
+
static const struct amdgpu_irq_src_funcs gfx_v11_0_priv_inst_irq_funcs = {
.set = gfx_v11_0_set_priv_inst_fault_state,
.process = gfx_v11_0_priv_inst_irq,
@@ -6675,6 +6955,9 @@ static void gfx_v11_0_set_irq_funcs(struct amdgpu_device *adev)
adev->gfx.priv_reg_irq.num_types = 1;
adev->gfx.priv_reg_irq.funcs = &gfx_v11_0_priv_reg_irq_funcs;
+ adev->gfx.bad_op_irq.num_types = 1;
+ adev->gfx.bad_op_irq.funcs = &gfx_v11_0_bad_op_irq_funcs;
+
adev->gfx.priv_inst_irq.num_types = 1;
adev->gfx.priv_inst_irq.funcs = &gfx_v11_0_priv_inst_irq_funcs;
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v11_0.h b/drivers/gpu/drm/amd/amdgpu/gfx_v11_0.h
index 10cfc29c27c9..157a5c812259 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfx_v11_0.h
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v11_0.h
@@ -26,4 +26,7 @@
extern const struct amdgpu_ip_block_version gfx_v11_0_ip_block;
+int gfx_v11_0_request_gfx_index_mutex(struct amdgpu_device *adev,
+ bool req);
+
#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v11_0_3.c b/drivers/gpu/drm/amd/amdgpu/gfx_v11_0_3.c
index 9cd221ed240c..999bb3cc88b7 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfx_v11_0_3.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v11_0_3.c
@@ -97,7 +97,7 @@ static int gfx_v11_0_3_poison_consumption_handler(struct amdgpu_device *adev,
ras->gpu_reset_flags |= AMDGPU_RAS_GPU_RESET_MODE2_RESET;
}
- if (con && !con->is_rma)
+ if (con && !amdgpu_ras_is_rma(adev))
amdgpu_ras_reset_gpu(adev);
}
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v12_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v12_0.c
index e45d23e82878..47b47d21f464 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfx_v12_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v12_0.c
@@ -202,12 +202,16 @@ static const struct amdgpu_hwip_reg_entry gc_gfx_queue_reg_list_12[] = {
SOC15_REG_ENTRY_STR(GC, 0, regCP_IB1_BUFSZ)
};
-static const struct soc15_reg_golden golden_settings_gc_12_0[] = {
+static const struct soc15_reg_golden golden_settings_gc_12_0_rev0[] = {
SOC15_REG_GOLDEN_VALUE(GC, 0, regDB_MEM_CONFIG, 0x0000000f, 0x0000000f),
SOC15_REG_GOLDEN_VALUE(GC, 0, regCB_HW_CONTROL_1, 0x03000000, 0x03000000),
SOC15_REG_GOLDEN_VALUE(GC, 0, regGL2C_CTRL5, 0x00000070, 0x00000020)
};
+static const struct soc15_reg_golden golden_settings_gc_12_0[] = {
+ SOC15_REG_GOLDEN_VALUE(GC, 0, regDB_MEM_CONFIG, 0x00008000, 0x00008000),
+};
+
#define DEFAULT_SH_MEM_CONFIG \
((SH_MEM_ADDRESS_MODE_64 << SH_MEM_CONFIG__ADDRESS_MODE__SHIFT) | \
(SH_MEM_ALIGNMENT_MODE_UNALIGNED << SH_MEM_CONFIG__ALIGNMENT_MODE__SHIFT) | \
@@ -1281,7 +1285,7 @@ static void gfx_v12_0_alloc_ip_dump(struct amdgpu_device *adev)
uint32_t inst;
ptr = kcalloc(reg_count, sizeof(uint32_t), GFP_KERNEL);
- if (ptr == NULL) {
+ if (!ptr) {
DRM_ERROR("Failed to allocate memory for GFX IP Dump\n");
adev->gfx.ip_dump_core = NULL;
} else {
@@ -1294,7 +1298,7 @@ static void gfx_v12_0_alloc_ip_dump(struct amdgpu_device *adev)
adev->gfx.mec.num_queue_per_pipe;
ptr = kcalloc(reg_count * inst, sizeof(uint32_t), GFP_KERNEL);
- if (ptr == NULL) {
+ if (!ptr) {
DRM_ERROR("Failed to allocate memory for Compute Queues IP Dump\n");
adev->gfx.ip_dump_compute_queues = NULL;
} else {
@@ -1307,7 +1311,7 @@ static void gfx_v12_0_alloc_ip_dump(struct amdgpu_device *adev)
adev->gfx.me.num_queue_per_pipe;
ptr = kcalloc(reg_count * inst, sizeof(uint32_t), GFP_KERNEL);
- if (ptr == NULL) {
+ if (!ptr) {
DRM_ERROR("Failed to allocate memory for GFX Queues IP Dump\n");
adev->gfx.ip_dump_gfx_queues = NULL;
} else {
@@ -1355,6 +1359,13 @@ static int gfx_v12_0_sw_init(void *handle)
if (r)
return r;
+ /* Bad opcode Event */
+ r = amdgpu_irq_add_id(adev, SOC21_IH_CLIENTID_GRBM_CP,
+ GFX_11_0_0__SRCID__CP_BAD_OPCODE_ERROR,
+ &adev->gfx.bad_op_irq);
+ if (r)
+ return r;
+
/* Privileged reg */
r = amdgpu_irq_add_id(adev, SOC21_IH_CLIENTID_GRBM_CP,
GFX_11_0_0__SRCID__CP_PRIV_REG_FAULT,
@@ -1686,26 +1697,68 @@ static void gfx_v12_0_constants_init(struct amdgpu_device *adev)
gfx_v12_0_init_compute_vmid(adev);
}
+static u32 gfx_v12_0_get_cpg_int_cntl(struct amdgpu_device *adev,
+ int me, int pipe)
+{
+ if (me != 0)
+ return 0;
+
+ switch (pipe) {
+ case 0:
+ return SOC15_REG_OFFSET(GC, 0, regCP_INT_CNTL_RING0);
+ default:
+ return 0;
+ }
+}
+
+static u32 gfx_v12_0_get_cpc_int_cntl(struct amdgpu_device *adev,
+ int me, int pipe)
+{
+ /*
+ * amdgpu controls only the first MEC. That's why this function only
+ * handles the setting of interrupts for this specific MEC. All other
+ * pipes' interrupts are set by amdkfd.
+ */
+ if (me != 1)
+ return 0;
+
+ switch (pipe) {
+ case 0:
+ return SOC15_REG_OFFSET(GC, 0, regCP_ME1_PIPE0_INT_CNTL);
+ case 1:
+ return SOC15_REG_OFFSET(GC, 0, regCP_ME1_PIPE1_INT_CNTL);
+ default:
+ return 0;
+ }
+}
+
static void gfx_v12_0_enable_gui_idle_interrupt(struct amdgpu_device *adev,
- bool enable)
+ bool enable)
{
- u32 tmp;
+ u32 tmp, cp_int_cntl_reg;
+ int i, j;
if (amdgpu_sriov_vf(adev))
return;
- tmp = RREG32_SOC15(GC, 0, regCP_INT_CNTL_RING0);
-
- tmp = REG_SET_FIELD(tmp, CP_INT_CNTL_RING0, CNTX_BUSY_INT_ENABLE,
- enable ? 1 : 0);
- tmp = REG_SET_FIELD(tmp, CP_INT_CNTL_RING0, CNTX_EMPTY_INT_ENABLE,
- enable ? 1 : 0);
- tmp = REG_SET_FIELD(tmp, CP_INT_CNTL_RING0, CMP_BUSY_INT_ENABLE,
- enable ? 1 : 0);
- tmp = REG_SET_FIELD(tmp, CP_INT_CNTL_RING0, GFX_IDLE_INT_ENABLE,
- enable ? 1 : 0);
-
- WREG32_SOC15(GC, 0, regCP_INT_CNTL_RING0, tmp);
+ for (i = 0; i < adev->gfx.me.num_me; i++) {
+ for (j = 0; j < adev->gfx.me.num_pipe_per_me; j++) {
+ cp_int_cntl_reg = gfx_v12_0_get_cpg_int_cntl(adev, i, j);
+
+ if (cp_int_cntl_reg) {
+ tmp = RREG32_SOC15_IP(GC, cp_int_cntl_reg);
+ tmp = REG_SET_FIELD(tmp, CP_INT_CNTL_RING0, CNTX_BUSY_INT_ENABLE,
+ enable ? 1 : 0);
+ tmp = REG_SET_FIELD(tmp, CP_INT_CNTL_RING0, CNTX_EMPTY_INT_ENABLE,
+ enable ? 1 : 0);
+ tmp = REG_SET_FIELD(tmp, CP_INT_CNTL_RING0, CMP_BUSY_INT_ENABLE,
+ enable ? 1 : 0);
+ tmp = REG_SET_FIELD(tmp, CP_INT_CNTL_RING0, GFX_IDLE_INT_ENABLE,
+ enable ? 1 : 0);
+ WREG32_SOC15_IP(GC, cp_int_cntl_reg, tmp);
+ }
+ }
+ }
}
static int gfx_v12_0_init_csb(struct amdgpu_device *adev)
@@ -2867,13 +2920,13 @@ static int gfx_v12_0_gfx_mqd_init(struct amdgpu_device *adev, void *m,
return 0;
}
-static int gfx_v12_0_gfx_init_queue(struct amdgpu_ring *ring)
+static int gfx_v12_0_kgq_init_queue(struct amdgpu_ring *ring, bool reset)
{
struct amdgpu_device *adev = ring->adev;
struct v12_gfx_mqd *mqd = ring->mqd_ptr;
int mqd_idx = ring - &adev->gfx.gfx_ring[0];
- if (!amdgpu_in_reset(adev) && !adev->in_suspend) {
+ if (!reset && !amdgpu_in_reset(adev) && !adev->in_suspend) {
memset((void *)mqd, 0, sizeof(*mqd));
mutex_lock(&adev->srbm_mutex);
soc24_grbm_select(adev, ring->me, ring->pipe, ring->queue, 0);
@@ -2909,7 +2962,7 @@ static int gfx_v12_0_cp_async_gfx_ring_resume(struct amdgpu_device *adev)
r = amdgpu_bo_kmap(ring->mqd_obj, (void **)&ring->mqd_ptr);
if (!r) {
- r = gfx_v12_0_gfx_init_queue(ring);
+ r = gfx_v12_0_kgq_init_queue(ring, false);
amdgpu_bo_kunmap(ring->mqd_obj);
ring->mqd_ptr = NULL;
}
@@ -3213,13 +3266,13 @@ static int gfx_v12_0_kiq_init_queue(struct amdgpu_ring *ring)
return 0;
}
-static int gfx_v12_0_kcq_init_queue(struct amdgpu_ring *ring)
+static int gfx_v12_0_kcq_init_queue(struct amdgpu_ring *ring, bool reset)
{
struct amdgpu_device *adev = ring->adev;
struct v12_compute_mqd *mqd = ring->mqd_ptr;
int mqd_idx = ring - &adev->gfx.compute_ring[0];
- if (!amdgpu_in_reset(adev) && !adev->in_suspend) {
+ if (!reset && !amdgpu_in_reset(adev) && !adev->in_suspend) {
memset((void *)mqd, 0, sizeof(*mqd));
mutex_lock(&adev->srbm_mutex);
soc24_grbm_select(adev, ring->me, ring->pipe, ring->queue, 0);
@@ -3283,7 +3336,7 @@ static int gfx_v12_0_kcq_resume(struct amdgpu_device *adev)
goto done;
r = amdgpu_bo_kmap(ring->mqd_obj, (void **)&ring->mqd_ptr);
if (!r) {
- r = gfx_v12_0_kcq_init_queue(ring);
+ r = gfx_v12_0_kcq_init_queue(ring, false);
amdgpu_bo_kunmap(ring->mqd_obj);
ring->mqd_ptr = NULL;
}
@@ -3446,10 +3499,14 @@ static void gfx_v12_0_init_golden_registers(struct amdgpu_device *adev)
switch (amdgpu_ip_version(adev, GC_HWIP, 0)) {
case IP_VERSION(12, 0, 0):
case IP_VERSION(12, 0, 1):
+ soc15_program_register_sequence(adev,
+ golden_settings_gc_12_0,
+ (const u32)ARRAY_SIZE(golden_settings_gc_12_0));
+
if (adev->rev_id == 0)
soc15_program_register_sequence(adev,
- golden_settings_gc_12_0,
- (const u32)ARRAY_SIZE(golden_settings_gc_12_0));
+ golden_settings_gc_12_0_rev0,
+ (const u32)ARRAY_SIZE(golden_settings_gc_12_0_rev0));
break;
default:
break;
@@ -3553,6 +3610,7 @@ static int gfx_v12_0_hw_fini(void *handle)
amdgpu_irq_put(adev, &adev->gfx.priv_reg_irq, 0);
amdgpu_irq_put(adev, &adev->gfx.priv_inst_irq, 0);
+ amdgpu_irq_put(adev, &adev->gfx.bad_op_irq, 0);
if (!adev->no_hw_access) {
if (amdgpu_async_gfx_ring) {
@@ -3672,6 +3730,10 @@ static int gfx_v12_0_late_init(void *handle)
if (r)
return r;
+ r = amdgpu_irq_get(adev, &adev->gfx.bad_op_irq, 0);
+ if (r)
+ return r;
+
return 0;
}
@@ -4447,6 +4509,9 @@ static int gfx_v12_0_ring_preempt_ib(struct amdgpu_ring *ring)
struct amdgpu_ring *kiq_ring = &kiq->ring;
unsigned long flags;
+ if (adev->enable_mes)
+ return -EINVAL;
+
if (!kiq->pmf || !kiq->pmf->kiq_unmap_queues)
return -EINVAL;
@@ -4563,7 +4628,9 @@ static void gfx_v12_0_ring_soft_recovery(struct amdgpu_ring *ring,
value = REG_SET_FIELD(value, SQ_CMD, MODE, 0x01);
value = REG_SET_FIELD(value, SQ_CMD, CHECK_VMID, 1);
value = REG_SET_FIELD(value, SQ_CMD, VM_ID, vmid);
+ amdgpu_gfx_rlc_enter_safe_mode(adev, 0);
WREG32_SOC15(GC, 0, regSQ_CMD, value);
+ amdgpu_gfx_rlc_exit_safe_mode(adev, 0);
}
static void
@@ -4747,15 +4814,42 @@ static int gfx_v12_0_eop_irq(struct amdgpu_device *adev,
static int gfx_v12_0_set_priv_reg_fault_state(struct amdgpu_device *adev,
struct amdgpu_irq_src *source,
- unsigned type,
+ unsigned int type,
enum amdgpu_interrupt_state state)
{
+ u32 cp_int_cntl_reg, cp_int_cntl;
+ int i, j;
+
switch (state) {
case AMDGPU_IRQ_STATE_DISABLE:
case AMDGPU_IRQ_STATE_ENABLE:
- WREG32_FIELD15_PREREG(GC, 0, CP_INT_CNTL_RING0,
- PRIV_REG_INT_ENABLE,
- state == AMDGPU_IRQ_STATE_ENABLE ? 1 : 0);
+ for (i = 0; i < adev->gfx.me.num_me; i++) {
+ for (j = 0; j < adev->gfx.me.num_pipe_per_me; j++) {
+ cp_int_cntl_reg = gfx_v12_0_get_cpg_int_cntl(adev, i, j);
+
+ if (cp_int_cntl_reg) {
+ cp_int_cntl = RREG32_SOC15_IP(GC, cp_int_cntl_reg);
+ cp_int_cntl = REG_SET_FIELD(cp_int_cntl, CP_INT_CNTL_RING0,
+ PRIV_REG_INT_ENABLE,
+ state == AMDGPU_IRQ_STATE_ENABLE ? 1 : 0);
+ WREG32_SOC15_IP(GC, cp_int_cntl_reg, cp_int_cntl);
+ }
+ }
+ }
+ for (i = 0; i < adev->gfx.mec.num_mec; i++) {
+ for (j = 0; j < adev->gfx.mec.num_pipe_per_mec; j++) {
+ /* MECs start at 1 */
+ cp_int_cntl_reg = gfx_v12_0_get_cpc_int_cntl(adev, i + 1, j);
+
+ if (cp_int_cntl_reg) {
+ cp_int_cntl = RREG32_SOC15_IP(GC, cp_int_cntl_reg);
+ cp_int_cntl = REG_SET_FIELD(cp_int_cntl, CP_ME1_PIPE0_INT_CNTL,
+ PRIV_REG_INT_ENABLE,
+ state == AMDGPU_IRQ_STATE_ENABLE ? 1 : 0);
+ WREG32_SOC15_IP(GC, cp_int_cntl_reg, cp_int_cntl);
+ }
+ }
+ }
break;
default:
break;
@@ -4764,17 +4858,75 @@ static int gfx_v12_0_set_priv_reg_fault_state(struct amdgpu_device *adev,
return 0;
}
+static int gfx_v12_0_set_bad_op_fault_state(struct amdgpu_device *adev,
+ struct amdgpu_irq_src *source,
+ unsigned type,
+ enum amdgpu_interrupt_state state)
+{
+ u32 cp_int_cntl_reg, cp_int_cntl;
+ int i, j;
+
+ switch (state) {
+ case AMDGPU_IRQ_STATE_DISABLE:
+ case AMDGPU_IRQ_STATE_ENABLE:
+ for (i = 0; i < adev->gfx.me.num_me; i++) {
+ for (j = 0; j < adev->gfx.me.num_pipe_per_me; j++) {
+ cp_int_cntl_reg = gfx_v12_0_get_cpg_int_cntl(adev, i, j);
+
+ if (cp_int_cntl_reg) {
+ cp_int_cntl = RREG32_SOC15_IP(GC, cp_int_cntl_reg);
+ cp_int_cntl = REG_SET_FIELD(cp_int_cntl, CP_INT_CNTL_RING0,
+ OPCODE_ERROR_INT_ENABLE,
+ state == AMDGPU_IRQ_STATE_ENABLE ? 1 : 0);
+ WREG32_SOC15_IP(GC, cp_int_cntl_reg, cp_int_cntl);
+ }
+ }
+ }
+ for (i = 0; i < adev->gfx.mec.num_mec; i++) {
+ for (j = 0; j < adev->gfx.mec.num_pipe_per_mec; j++) {
+ /* MECs start at 1 */
+ cp_int_cntl_reg = gfx_v12_0_get_cpc_int_cntl(adev, i + 1, j);
+
+ if (cp_int_cntl_reg) {
+ cp_int_cntl = RREG32_SOC15_IP(GC, cp_int_cntl_reg);
+ cp_int_cntl = REG_SET_FIELD(cp_int_cntl, CP_ME1_PIPE0_INT_CNTL,
+ OPCODE_ERROR_INT_ENABLE,
+ state == AMDGPU_IRQ_STATE_ENABLE ? 1 : 0);
+ WREG32_SOC15_IP(GC, cp_int_cntl_reg, cp_int_cntl);
+ }
+ }
+ }
+ break;
+ default:
+ break;
+ }
+ return 0;
+}
+
static int gfx_v12_0_set_priv_inst_fault_state(struct amdgpu_device *adev,
struct amdgpu_irq_src *source,
- unsigned type,
+ unsigned int type,
enum amdgpu_interrupt_state state)
{
+ u32 cp_int_cntl_reg, cp_int_cntl;
+ int i, j;
+
switch (state) {
case AMDGPU_IRQ_STATE_DISABLE:
case AMDGPU_IRQ_STATE_ENABLE:
- WREG32_FIELD15_PREREG(GC, 0, CP_INT_CNTL_RING0,
- PRIV_INSTR_INT_ENABLE,
- state == AMDGPU_IRQ_STATE_ENABLE ? 1 : 0);
+ for (i = 0; i < adev->gfx.me.num_me; i++) {
+ for (j = 0; j < adev->gfx.me.num_pipe_per_me; j++) {
+ cp_int_cntl_reg = gfx_v12_0_get_cpg_int_cntl(adev, i, j);
+
+ if (cp_int_cntl_reg) {
+ cp_int_cntl = RREG32_SOC15_IP(GC, cp_int_cntl_reg);
+ cp_int_cntl = REG_SET_FIELD(cp_int_cntl, CP_INT_CNTL_RING0,
+ PRIV_INSTR_INT_ENABLE,
+ state == AMDGPU_IRQ_STATE_ENABLE ? 1 : 0);
+ WREG32_SOC15_IP(GC, cp_int_cntl_reg, cp_int_cntl);
+ }
+ }
+ }
break;
default:
break;
@@ -4798,8 +4950,8 @@ static void gfx_v12_0_handle_priv_fault(struct amdgpu_device *adev,
case 0:
for (i = 0; i < adev->gfx.num_gfx_rings; i++) {
ring = &adev->gfx.gfx_ring[i];
- /* we only enabled 1 gfx queue per pipe for now */
- if (ring->me == me_id && ring->pipe == pipe_id)
+ if (ring->me == me_id && ring->pipe == pipe_id &&
+ ring->queue == queue_id)
drm_sched_fault(&ring->sched);
}
break;
@@ -4827,6 +4979,15 @@ static int gfx_v12_0_priv_reg_irq(struct amdgpu_device *adev,
return 0;
}
+static int gfx_v12_0_bad_op_irq(struct amdgpu_device *adev,
+ struct amdgpu_irq_src *source,
+ struct amdgpu_iv_entry *entry)
+{
+ DRM_ERROR("Illegal opcode in command stream \n");
+ gfx_v12_0_handle_priv_fault(adev, entry);
+ return 0;
+}
+
static int gfx_v12_0_priv_inst_irq(struct amdgpu_device *adev,
struct amdgpu_irq_src *source,
struct amdgpu_iv_entry *entry)
@@ -4859,6 +5020,24 @@ static void gfx_v12_0_emit_mem_sync(struct amdgpu_ring *ring)
amdgpu_ring_write(ring, gcr_cntl); /* GCR_CNTL */
}
+static void gfx_v12_ring_insert_nop(struct amdgpu_ring *ring, uint32_t num_nop)
+{
+ int i;
+
+ /* Header itself is a NOP packet */
+ if (num_nop == 1) {
+ amdgpu_ring_write(ring, ring->funcs->nop);
+ return;
+ }
+
+ /* Max HW optimization till 0x3ffe, followed by remaining one NOP at a time*/
+ amdgpu_ring_write(ring, PACKET3(PACKET3_NOP, min(num_nop - 2, 0x3ffe)));
+
+ /* Header is at index 0, followed by num_nops - 1 NOP packet's */
+ for (i = 1; i < num_nop; i++)
+ amdgpu_ring_write(ring, ring->funcs->nop);
+}
+
static void gfx_v12_ip_print(void *handle, struct drm_printer *p)
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
@@ -4989,6 +5168,93 @@ static void gfx_v12_ip_dump(void *handle)
amdgpu_gfx_off_ctrl(adev, true);
}
+static int gfx_v12_0_reset_kgq(struct amdgpu_ring *ring, unsigned int vmid)
+{
+ struct amdgpu_device *adev = ring->adev;
+ int r;
+
+ if (amdgpu_sriov_vf(adev))
+ return -EINVAL;
+
+ r = amdgpu_mes_reset_legacy_queue(ring->adev, ring, vmid, false);
+ if (r) {
+ dev_err(adev->dev, "reset via MES failed %d\n", r);
+ return r;
+ }
+
+ r = amdgpu_bo_reserve(ring->mqd_obj, false);
+ if (unlikely(r != 0)) {
+ dev_err(adev->dev, "fail to resv mqd_obj\n");
+ return r;
+ }
+ r = amdgpu_bo_kmap(ring->mqd_obj, (void **)&ring->mqd_ptr);
+ if (!r) {
+ r = gfx_v12_0_kgq_init_queue(ring, true);
+ amdgpu_bo_kunmap(ring->mqd_obj);
+ ring->mqd_ptr = NULL;
+ }
+ amdgpu_bo_unreserve(ring->mqd_obj);
+ if (r) {
+ DRM_ERROR("fail to unresv mqd_obj\n");
+ return r;
+ }
+
+ r = amdgpu_mes_map_legacy_queue(adev, ring);
+ if (r) {
+ dev_err(adev->dev, "failed to remap kgq\n");
+ return r;
+ }
+
+ return amdgpu_ring_test_ring(ring);
+}
+
+static int gfx_v12_0_reset_kcq(struct amdgpu_ring *ring, unsigned int vmid)
+{
+ struct amdgpu_device *adev = ring->adev;
+ int r, i;
+
+ if (amdgpu_sriov_vf(adev))
+ return -EINVAL;
+
+ amdgpu_gfx_rlc_enter_safe_mode(adev, 0);
+ mutex_lock(&adev->srbm_mutex);
+ soc24_grbm_select(adev, ring->me, ring->pipe, ring->queue, 0);
+ WREG32_SOC15(GC, 0, regCP_HQD_DEQUEUE_REQUEST, 0x2);
+ WREG32_SOC15(GC, 0, regSPI_COMPUTE_QUEUE_RESET, 0x1);
+ for (i = 0; i < adev->usec_timeout; i++) {
+ if (!(RREG32_SOC15(GC, 0, regCP_HQD_ACTIVE) & 1))
+ break;
+ udelay(1);
+ }
+ soc24_grbm_select(adev, 0, 0, 0, 0);
+ mutex_unlock(&adev->srbm_mutex);
+ amdgpu_gfx_rlc_exit_safe_mode(adev, 0);
+
+ r = amdgpu_bo_reserve(ring->mqd_obj, false);
+ if (unlikely(r != 0)) {
+ DRM_ERROR("fail to resv mqd_obj\n");
+ return r;
+ }
+ r = amdgpu_bo_kmap(ring->mqd_obj, (void **)&ring->mqd_ptr);
+ if (!r) {
+ r = gfx_v12_0_kcq_init_queue(ring, true);
+ amdgpu_bo_kunmap(ring->mqd_obj);
+ ring->mqd_ptr = NULL;
+ }
+ amdgpu_bo_unreserve(ring->mqd_obj);
+ if (r) {
+ DRM_ERROR("fail to unresv mqd_obj\n");
+ return r;
+ }
+ r = amdgpu_mes_map_legacy_queue(adev, ring);
+ if (r) {
+ dev_err(adev->dev, "failed to remap kcq\n");
+ return r;
+ }
+
+ return amdgpu_ring_test_ring(ring);
+}
+
static const struct amd_ip_funcs gfx_v12_0_ip_funcs = {
.name = "gfx_v12_0",
.early_init = gfx_v12_0_early_init,
@@ -5040,7 +5306,7 @@ static const struct amdgpu_ring_funcs gfx_v12_0_ring_funcs_gfx = {
.emit_hdp_flush = gfx_v12_0_ring_emit_hdp_flush,
.test_ring = gfx_v12_0_ring_test_ring,
.test_ib = gfx_v12_0_ring_test_ib,
- .insert_nop = amdgpu_ring_insert_nop,
+ .insert_nop = gfx_v12_ring_insert_nop,
.pad_ib = amdgpu_ring_generic_pad_ib,
.emit_cntxcntl = gfx_v12_0_ring_emit_cntxcntl,
.init_cond_exec = gfx_v12_0_ring_emit_init_cond_exec,
@@ -5051,6 +5317,7 @@ static const struct amdgpu_ring_funcs gfx_v12_0_ring_funcs_gfx = {
.emit_reg_write_reg_wait = gfx_v12_0_ring_emit_reg_write_reg_wait,
.soft_recovery = gfx_v12_0_ring_soft_recovery,
.emit_mem_sync = gfx_v12_0_emit_mem_sync,
+ .reset = gfx_v12_0_reset_kgq,
};
static const struct amdgpu_ring_funcs gfx_v12_0_ring_funcs_compute = {
@@ -5078,12 +5345,14 @@ static const struct amdgpu_ring_funcs gfx_v12_0_ring_funcs_compute = {
.emit_hdp_flush = gfx_v12_0_ring_emit_hdp_flush,
.test_ring = gfx_v12_0_ring_test_ring,
.test_ib = gfx_v12_0_ring_test_ib,
- .insert_nop = amdgpu_ring_insert_nop,
+ .insert_nop = gfx_v12_ring_insert_nop,
.pad_ib = amdgpu_ring_generic_pad_ib,
.emit_wreg = gfx_v12_0_ring_emit_wreg,
.emit_reg_wait = gfx_v12_0_ring_emit_reg_wait,
.emit_reg_write_reg_wait = gfx_v12_0_ring_emit_reg_write_reg_wait,
+ .soft_recovery = gfx_v12_0_ring_soft_recovery,
.emit_mem_sync = gfx_v12_0_emit_mem_sync,
+ .reset = gfx_v12_0_reset_kcq,
};
static const struct amdgpu_ring_funcs gfx_v12_0_ring_funcs_kiq = {
@@ -5138,6 +5407,11 @@ static const struct amdgpu_irq_src_funcs gfx_v12_0_priv_reg_irq_funcs = {
.process = gfx_v12_0_priv_reg_irq,
};
+static const struct amdgpu_irq_src_funcs gfx_v12_0_bad_op_irq_funcs = {
+ .set = gfx_v12_0_set_bad_op_fault_state,
+ .process = gfx_v12_0_bad_op_irq,
+};
+
static const struct amdgpu_irq_src_funcs gfx_v12_0_priv_inst_irq_funcs = {
.set = gfx_v12_0_set_priv_inst_fault_state,
.process = gfx_v12_0_priv_inst_irq,
@@ -5151,6 +5425,9 @@ static void gfx_v12_0_set_irq_funcs(struct amdgpu_device *adev)
adev->gfx.priv_reg_irq.num_types = 1;
adev->gfx.priv_reg_irq.funcs = &gfx_v12_0_priv_reg_irq_funcs;
+ adev->gfx.bad_op_irq.num_types = 1;
+ adev->gfx.bad_op_irq.funcs = &gfx_v12_0_bad_op_irq_funcs;
+
adev->gfx.priv_inst_irq.num_types = 1;
adev->gfx.priv_inst_irq.funcs = &gfx_v12_0_priv_inst_irq_funcs;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c
index d84589137df9..f146806c4633 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c
@@ -2114,6 +2114,8 @@ static void gfx_v7_0_ring_emit_fence_gfx(struct amdgpu_ring *ring, u64 addr,
{
bool write64bit = flags & AMDGPU_FENCE_FLAG_64BIT;
bool int_sel = flags & AMDGPU_FENCE_FLAG_INT;
+ bool exec = flags & AMDGPU_FENCE_FLAG_EXEC;
+
/* Workaround for cache flush problems. First send a dummy EOP
* event down the pipe with seq one below.
*/
@@ -2133,7 +2135,8 @@ static void gfx_v7_0_ring_emit_fence_gfx(struct amdgpu_ring *ring, u64 addr,
amdgpu_ring_write(ring, (EOP_TCL1_ACTION_EN |
EOP_TC_ACTION_EN |
EVENT_TYPE(CACHE_FLUSH_AND_INV_TS_EVENT) |
- EVENT_INDEX(5)));
+ EVENT_INDEX(5) |
+ (exec ? EOP_EXEC : 0)));
amdgpu_ring_write(ring, addr & 0xfffffffc);
amdgpu_ring_write(ring, (upper_32_bits(addr) & 0xffff) |
DATA_SEL(write64bit ? 2 : 1) | INT_SEL(int_sel ? 2 : 0));
@@ -4921,6 +4924,76 @@ static void gfx_v7_0_emit_mem_sync_compute(struct amdgpu_ring *ring)
amdgpu_ring_write(ring, 0x0000000A); /* poll interval */
}
+static void gfx_v7_0_wait_reg_mem(struct amdgpu_ring *ring, int eng_sel,
+ int mem_space, int opt, uint32_t addr0,
+ uint32_t addr1, uint32_t ref, uint32_t mask,
+ uint32_t inv)
+{
+ amdgpu_ring_write(ring, PACKET3(PACKET3_WAIT_REG_MEM, 5));
+ amdgpu_ring_write(ring,
+ /* memory (1) or register (0) */
+ (WAIT_REG_MEM_MEM_SPACE(mem_space) |
+ WAIT_REG_MEM_OPERATION(opt) | /* wait */
+ WAIT_REG_MEM_FUNCTION(3) | /* equal */
+ WAIT_REG_MEM_ENGINE(eng_sel)));
+
+ if (mem_space)
+ BUG_ON(addr0 & 0x3); /* Dword align */
+ amdgpu_ring_write(ring, addr0);
+ amdgpu_ring_write(ring, addr1);
+ amdgpu_ring_write(ring, ref);
+ amdgpu_ring_write(ring, mask);
+ amdgpu_ring_write(ring, inv); /* poll interval */
+}
+
+static void gfx_v7_0_ring_emit_reg_wait(struct amdgpu_ring *ring, uint32_t reg,
+ uint32_t val, uint32_t mask)
+{
+ gfx_v7_0_wait_reg_mem(ring, 0, 0, 0, reg, 0, val, mask, 0x20);
+}
+
+static int gfx_v7_0_reset_kgq(struct amdgpu_ring *ring, unsigned int vmid)
+{
+ struct amdgpu_device *adev = ring->adev;
+ struct amdgpu_kiq *kiq = &adev->gfx.kiq[0];
+ struct amdgpu_ring *kiq_ring = &kiq->ring;
+ unsigned long flags;
+ u32 tmp;
+ int r;
+
+ if (amdgpu_sriov_vf(adev))
+ return -EINVAL;
+
+ if (!kiq->pmf || !kiq->pmf->kiq_unmap_queues)
+ return -EINVAL;
+
+ spin_lock_irqsave(&kiq->ring_lock, flags);
+
+ if (amdgpu_ring_alloc(kiq_ring, 5)) {
+ spin_unlock_irqrestore(&kiq->ring_lock, flags);
+ return -ENOMEM;
+ }
+
+ tmp = REG_SET_FIELD(0, CP_VMID_RESET, RESET_REQUEST, 1 << vmid);
+ gfx_v7_0_ring_emit_wreg(kiq_ring, mmCP_VMID_RESET, tmp);
+ amdgpu_ring_commit(kiq_ring);
+
+ spin_unlock_irqrestore(&kiq->ring_lock, flags);
+
+ r = amdgpu_ring_test_ring(kiq_ring);
+ if (r)
+ return r;
+
+ if (amdgpu_ring_alloc(ring, 7 + 12 + 5))
+ return -ENOMEM;
+ gfx_v7_0_ring_emit_fence_gfx(ring, ring->fence_drv.gpu_addr,
+ ring->fence_drv.sync_seq, AMDGPU_FENCE_FLAG_EXEC);
+ gfx_v7_0_ring_emit_reg_wait(ring, mmCP_VMID_RESET, 0, 0xffff);
+ gfx_v7_0_ring_emit_wreg(ring, mmCP_VMID_RESET, 0);
+
+ return amdgpu_ring_test_ring(ring);
+}
+
static const struct amd_ip_funcs gfx_v7_0_ip_funcs = {
.name = "gfx_v7_0",
.early_init = gfx_v7_0_early_init,
@@ -4972,6 +5045,7 @@ static const struct amdgpu_ring_funcs gfx_v7_0_ring_funcs_gfx = {
.emit_wreg = gfx_v7_0_ring_emit_wreg,
.soft_recovery = gfx_v7_0_ring_soft_recovery,
.emit_mem_sync = gfx_v7_0_emit_mem_sync,
+ .reset = gfx_v7_0_reset_kgq,
};
static const struct amdgpu_ring_funcs gfx_v7_0_ring_funcs_compute = {
@@ -5002,6 +5076,7 @@ static const struct amdgpu_ring_funcs gfx_v7_0_ring_funcs_compute = {
.insert_nop = amdgpu_ring_insert_nop,
.pad_ib = amdgpu_ring_generic_pad_ib,
.emit_wreg = gfx_v7_0_ring_emit_wreg,
+ .soft_recovery = gfx_v7_0_ring_soft_recovery,
.emit_mem_sync = gfx_v7_0_emit_mem_sync_compute,
};
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c
index b4658c7db0e1..bc8295812cc8 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c
@@ -6149,6 +6149,7 @@ static void gfx_v8_0_ring_emit_fence_gfx(struct amdgpu_ring *ring, u64 addr,
{
bool write64bit = flags & AMDGPU_FENCE_FLAG_64BIT;
bool int_sel = flags & AMDGPU_FENCE_FLAG_INT;
+ bool exec = flags & AMDGPU_FENCE_FLAG_EXEC;
/* Workaround for cache flush problems. First send a dummy EOP
* event down the pipe with seq one below.
@@ -6172,7 +6173,8 @@ static void gfx_v8_0_ring_emit_fence_gfx(struct amdgpu_ring *ring, u64 addr,
EOP_TC_ACTION_EN |
EOP_TC_WB_ACTION_EN |
EVENT_TYPE(CACHE_FLUSH_AND_INV_TS_EVENT) |
- EVENT_INDEX(5)));
+ EVENT_INDEX(5) |
+ (exec ? EOP_EXEC : 0)));
amdgpu_ring_write(ring, addr & 0xfffffffc);
amdgpu_ring_write(ring, (upper_32_bits(addr) & 0xffff) |
DATA_SEL(write64bit ? 2 : 1) | INT_SEL(int_sel ? 2 : 0));
@@ -6380,6 +6382,34 @@ static void gfx_v8_0_ring_emit_wreg(struct amdgpu_ring *ring, uint32_t reg,
amdgpu_ring_write(ring, val);
}
+static void gfx_v8_0_wait_reg_mem(struct amdgpu_ring *ring, int eng_sel,
+ int mem_space, int opt, uint32_t addr0,
+ uint32_t addr1, uint32_t ref, uint32_t mask,
+ uint32_t inv)
+{
+ amdgpu_ring_write(ring, PACKET3(PACKET3_WAIT_REG_MEM, 5));
+ amdgpu_ring_write(ring,
+ /* memory (1) or register (0) */
+ (WAIT_REG_MEM_MEM_SPACE(mem_space) |
+ WAIT_REG_MEM_OPERATION(opt) | /* wait */
+ WAIT_REG_MEM_FUNCTION(3) | /* equal */
+ WAIT_REG_MEM_ENGINE(eng_sel)));
+
+ if (mem_space)
+ BUG_ON(addr0 & 0x3); /* Dword align */
+ amdgpu_ring_write(ring, addr0);
+ amdgpu_ring_write(ring, addr1);
+ amdgpu_ring_write(ring, ref);
+ amdgpu_ring_write(ring, mask);
+ amdgpu_ring_write(ring, inv); /* poll interval */
+}
+
+static void gfx_v8_0_ring_emit_reg_wait(struct amdgpu_ring *ring, uint32_t reg,
+ uint32_t val, uint32_t mask)
+{
+ gfx_v8_0_wait_reg_mem(ring, 0, 0, 0, reg, 0, val, mask, 0x20);
+}
+
static void gfx_v8_0_ring_soft_recovery(struct amdgpu_ring *ring, unsigned vmid)
{
struct amdgpu_device *adev = ring->adev;
@@ -6856,6 +6886,48 @@ static void gfx_v8_0_emit_wave_limit(struct amdgpu_ring *ring, bool enable)
}
+static int gfx_v8_0_reset_kgq(struct amdgpu_ring *ring, unsigned int vmid)
+{
+ struct amdgpu_device *adev = ring->adev;
+ struct amdgpu_kiq *kiq = &adev->gfx.kiq[0];
+ struct amdgpu_ring *kiq_ring = &kiq->ring;
+ unsigned long flags;
+ u32 tmp;
+ int r;
+
+ if (amdgpu_sriov_vf(adev))
+ return -EINVAL;
+
+ if (!kiq->pmf || !kiq->pmf->kiq_unmap_queues)
+ return -EINVAL;
+
+ spin_lock_irqsave(&kiq->ring_lock, flags);
+
+ if (amdgpu_ring_alloc(kiq_ring, 5)) {
+ spin_unlock_irqrestore(&kiq->ring_lock, flags);
+ return -ENOMEM;
+ }
+
+ tmp = REG_SET_FIELD(0, CP_VMID_RESET, RESET_REQUEST, 1 << vmid);
+ gfx_v8_0_ring_emit_wreg(kiq_ring, mmCP_VMID_RESET, tmp);
+ amdgpu_ring_commit(kiq_ring);
+
+ spin_unlock_irqrestore(&kiq->ring_lock, flags);
+
+ r = amdgpu_ring_test_ring(kiq_ring);
+ if (r)
+ return r;
+
+ if (amdgpu_ring_alloc(ring, 7 + 12 + 5))
+ return -ENOMEM;
+ gfx_v8_0_ring_emit_fence_gfx(ring, ring->fence_drv.gpu_addr,
+ ring->fence_drv.sync_seq, AMDGPU_FENCE_FLAG_EXEC);
+ gfx_v8_0_ring_emit_reg_wait(ring, mmCP_VMID_RESET, 0, 0xffff);
+ gfx_v8_0_ring_emit_wreg(ring, mmCP_VMID_RESET, 0);
+
+ return amdgpu_ring_test_ring(ring);
+}
+
static const struct amd_ip_funcs gfx_v8_0_ip_funcs = {
.name = "gfx_v8_0",
.early_init = gfx_v8_0_early_init,
@@ -6923,6 +6995,7 @@ static const struct amdgpu_ring_funcs gfx_v8_0_ring_funcs_gfx = {
.emit_wreg = gfx_v8_0_ring_emit_wreg,
.soft_recovery = gfx_v8_0_ring_soft_recovery,
.emit_mem_sync = gfx_v8_0_emit_mem_sync,
+ .reset = gfx_v8_0_reset_kgq,
};
static const struct amdgpu_ring_funcs gfx_v8_0_ring_funcs_compute = {
@@ -6955,6 +7028,7 @@ static const struct amdgpu_ring_funcs gfx_v8_0_ring_funcs_compute = {
.insert_nop = amdgpu_ring_insert_nop,
.pad_ib = amdgpu_ring_generic_pad_ib,
.emit_wreg = gfx_v8_0_ring_emit_wreg,
+ .soft_recovery = gfx_v8_0_ring_soft_recovery,
.emit_mem_sync = gfx_v8_0_emit_mem_sync_compute,
.emit_wave_limit = gfx_v8_0_emit_wave_limit,
};
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
index 2929c8972ea7..23f0573ae47b 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
@@ -50,6 +50,7 @@
#include "amdgpu_ring_mux.h"
#include "gfx_v9_4.h"
#include "gfx_v9_0.h"
+#include "gfx_v9_0_cleaner_shader.h"
#include "gfx_v9_4_2.h"
#include "asic_reg/pwr/pwr_10_0_offset.h"
@@ -893,10 +894,18 @@ static int gfx_v9_0_ras_error_inject(struct amdgpu_device *adev,
static void gfx_v9_0_reset_ras_error_count(struct amdgpu_device *adev);
static void gfx_v9_0_update_spm_vmid_internal(struct amdgpu_device *adev,
unsigned int vmid);
+static void gfx_v9_0_set_safe_mode(struct amdgpu_device *adev, int xcc_id);
+static void gfx_v9_0_unset_safe_mode(struct amdgpu_device *adev, int xcc_id);
static void gfx_v9_0_kiq_set_resources(struct amdgpu_ring *kiq_ring,
uint64_t queue_mask)
{
+ struct amdgpu_device *adev = kiq_ring->adev;
+ u64 shader_mc_addr;
+
+ /* Cleaner shader MC address */
+ shader_mc_addr = adev->gfx.cleaner_shader_gpu_addr >> 8;
+
amdgpu_ring_write(kiq_ring, PACKET3(PACKET3_SET_RESOURCES, 6));
amdgpu_ring_write(kiq_ring,
PACKET3_SET_RESOURCES_VMID_MASK(0) |
@@ -906,8 +915,8 @@ static void gfx_v9_0_kiq_set_resources(struct amdgpu_ring *kiq_ring,
lower_32_bits(queue_mask)); /* queue mask lo */
amdgpu_ring_write(kiq_ring,
upper_32_bits(queue_mask)); /* queue mask hi */
- amdgpu_ring_write(kiq_ring, 0); /* gws mask lo */
- amdgpu_ring_write(kiq_ring, 0); /* gws mask hi */
+ amdgpu_ring_write(kiq_ring, lower_32_bits(shader_mc_addr)); /* cleaner shader addr lo */
+ amdgpu_ring_write(kiq_ring, upper_32_bits(shader_mc_addr)); /* cleaner shader addr hi */
amdgpu_ring_write(kiq_ring, 0); /* oac mask */
amdgpu_ring_write(kiq_ring, 0); /* gds heap base:0, gds heap size:0 */
}
@@ -1004,12 +1013,47 @@ static void gfx_v9_0_kiq_invalidate_tlbs(struct amdgpu_ring *kiq_ring,
PACKET3_INVALIDATE_TLBS_FLUSH_TYPE(flush_type));
}
+
+static void gfx_v9_0_kiq_reset_hw_queue(struct amdgpu_ring *kiq_ring, uint32_t queue_type,
+ uint32_t me_id, uint32_t pipe_id, uint32_t queue_id,
+ uint32_t xcc_id, uint32_t vmid)
+{
+ struct amdgpu_device *adev = kiq_ring->adev;
+ unsigned i;
+
+ /* enter save mode */
+ amdgpu_gfx_rlc_enter_safe_mode(adev, xcc_id);
+ mutex_lock(&adev->srbm_mutex);
+ soc15_grbm_select(adev, me_id, pipe_id, queue_id, 0, 0);
+
+ if (queue_type == AMDGPU_RING_TYPE_COMPUTE) {
+ WREG32_SOC15(GC, 0, mmCP_HQD_DEQUEUE_REQUEST, 0x2);
+ WREG32_SOC15(GC, 0, mmSPI_COMPUTE_QUEUE_RESET, 0x1);
+ /* wait till dequeue take effects */
+ for (i = 0; i < adev->usec_timeout; i++) {
+ if (!(RREG32_SOC15(GC, 0, mmCP_HQD_ACTIVE) & 1))
+ break;
+ udelay(1);
+ }
+ if (i >= adev->usec_timeout)
+ dev_err(adev->dev, "fail to wait on hqd deactive\n");
+ } else {
+ dev_err(adev->dev, "reset queue_type(%d) not supported\n", queue_type);
+ }
+
+ soc15_grbm_select(adev, 0, 0, 0, 0, 0);
+ mutex_unlock(&adev->srbm_mutex);
+ /* exit safe mode */
+ amdgpu_gfx_rlc_exit_safe_mode(adev, xcc_id);
+}
+
static const struct kiq_pm4_funcs gfx_v9_0_kiq_pm4_funcs = {
.kiq_set_resources = gfx_v9_0_kiq_set_resources,
.kiq_map_queues = gfx_v9_0_kiq_map_queues,
.kiq_unmap_queues = gfx_v9_0_kiq_unmap_queues,
.kiq_query_status = gfx_v9_0_kiq_query_status,
.kiq_invalidate_tlbs = gfx_v9_0_kiq_invalidate_tlbs,
+ .kiq_reset_hw_queue = gfx_v9_0_kiq_reset_hw_queue,
.set_resources_size = 8,
.map_queues_size = 7,
.unmap_queues_size = 6,
@@ -1301,6 +1345,10 @@ static const struct amdgpu_gfxoff_quirk amdgpu_gfxoff_quirk_list[] = {
{ 0x1002, 0x15dd, 0x1002, 0x15dd, 0xc6 },
/* Apple MacBook Pro (15-inch, 2019) Radeon Pro Vega 20 4 GB */
{ 0x1002, 0x69af, 0x106b, 0x019a, 0xc0 },
+ /* https://bbs.openkylin.top/t/topic/171497 */
+ { 0x1002, 0x15d8, 0x19e5, 0x3e14, 0xc2 },
+ /* HP 705G4 DM with R5 2400G */
+ { 0x1002, 0x15dd, 0x103c, 0x8464, 0xd6 },
{ 0, 0, 0, 0, 0 },
};
@@ -2129,7 +2177,7 @@ static void gfx_v9_0_alloc_ip_dump(struct amdgpu_device *adev)
uint32_t inst;
ptr = kcalloc(reg_count, sizeof(uint32_t), GFP_KERNEL);
- if (ptr == NULL) {
+ if (!ptr) {
DRM_ERROR("Failed to allocate memory for GFX IP Dump\n");
adev->gfx.ip_dump_core = NULL;
} else {
@@ -2142,7 +2190,7 @@ static void gfx_v9_0_alloc_ip_dump(struct amdgpu_device *adev)
adev->gfx.mec.num_queue_per_pipe;
ptr = kcalloc(reg_count * inst, sizeof(uint32_t), GFP_KERNEL);
- if (ptr == NULL) {
+ if (!ptr) {
DRM_ERROR("Failed to allocate memory for Compute Queues IP Dump\n");
adev->gfx.ip_dump_compute_queues = NULL;
} else {
@@ -2174,6 +2222,12 @@ static int gfx_v9_0_sw_init(void *handle)
break;
}
+ switch (amdgpu_ip_version(adev, GC_HWIP, 0)) {
+ default:
+ adev->gfx.enable_cleaner_shader = false;
+ break;
+ }
+
adev->gfx.mec.num_pipe_per_mec = 4;
adev->gfx.mec.num_queue_per_pipe = 8;
@@ -2182,6 +2236,13 @@ static int gfx_v9_0_sw_init(void *handle)
if (r)
return r;
+ /* Bad opcode Event */
+ r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_GRBM_CP,
+ GFX_9_0__SRCID__CP_BAD_OPCODE_ERROR,
+ &adev->gfx.bad_op_irq);
+ if (r)
+ return r;
+
/* Privileged reg */
r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_GRBM_CP, GFX_9_0__SRCID__CP_PRIV_REG_FAULT,
&adev->gfx.priv_reg_irq);
@@ -2329,6 +2390,10 @@ static int gfx_v9_0_sw_init(void *handle)
gfx_v9_0_alloc_ip_dump(adev);
+ r = amdgpu_gfx_sysfs_isolation_shader_init(adev);
+ if (r)
+ return r;
+
return 0;
}
@@ -2364,6 +2429,8 @@ static int gfx_v9_0_sw_fini(void *handle)
}
gfx_v9_0_free_microcode(adev);
+ amdgpu_gfx_sysfs_isolation_shader_fini(adev);
+
kfree(adev->gfx.ip_dump_core);
kfree(adev->gfx.ip_dump_compute_queues);
@@ -2634,7 +2701,7 @@ static void gfx_v9_0_enable_gui_idle_interrupt(struct amdgpu_device *adev,
tmp = REG_SET_FIELD(tmp, CP_INT_CNTL_RING0, CNTX_BUSY_INT_ENABLE, enable ? 1 : 0);
tmp = REG_SET_FIELD(tmp, CP_INT_CNTL_RING0, CNTX_EMPTY_INT_ENABLE, enable ? 1 : 0);
tmp = REG_SET_FIELD(tmp, CP_INT_CNTL_RING0, CMP_BUSY_INT_ENABLE, enable ? 1 : 0);
- if(adev->gfx.num_gfx_rings)
+ if (adev->gfx.num_gfx_rings)
tmp = REG_SET_FIELD(tmp, CP_INT_CNTL_RING0, GFX_IDLE_INT_ENABLE, enable ? 1 : 0);
WREG32_SOC15(GC, 0, mmCP_INT_CNTL_RING0, tmp);
@@ -3735,7 +3802,7 @@ static int gfx_v9_0_kiq_init_queue(struct amdgpu_ring *ring)
return 0;
}
-static int gfx_v9_0_kcq_init_queue(struct amdgpu_ring *ring)
+static int gfx_v9_0_kcq_init_queue(struct amdgpu_ring *ring, bool restore)
{
struct amdgpu_device *adev = ring->adev;
struct v9_mqd *mqd = ring->mqd_ptr;
@@ -3747,8 +3814,8 @@ static int gfx_v9_0_kcq_init_queue(struct amdgpu_ring *ring)
*/
tmp_mqd = (struct v9_mqd *)adev->gfx.mec.mqd_backup[mqd_idx];
- if (!tmp_mqd->cp_hqd_pq_control ||
- (!amdgpu_in_reset(adev) && !adev->in_suspend)) {
+ if (!restore && (!tmp_mqd->cp_hqd_pq_control ||
+ (!amdgpu_in_reset(adev) && !adev->in_suspend))) {
memset((void *)mqd, 0, sizeof(struct v9_mqd_allocation));
((struct v9_mqd_allocation *)mqd)->dynamic_cu_mask = 0xFFFFFFFF;
((struct v9_mqd_allocation *)mqd)->dynamic_rb_mask = 0xFFFFFFFF;
@@ -3812,7 +3879,7 @@ static int gfx_v9_0_kcq_resume(struct amdgpu_device *adev)
goto done;
r = amdgpu_bo_kmap(ring->mqd_obj, (void **)&ring->mqd_ptr);
if (!r) {
- r = gfx_v9_0_kcq_init_queue(ring);
+ r = gfx_v9_0_kcq_init_queue(ring, false);
amdgpu_bo_kunmap(ring->mqd_obj);
ring->mqd_ptr = NULL;
}
@@ -3908,6 +3975,9 @@ static int gfx_v9_0_hw_init(void *handle)
int r;
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ amdgpu_gfx_cleaner_shader_init(adev, adev->gfx.cleaner_shader_size,
+ adev->gfx.cleaner_shader_ptr);
+
if (!amdgpu_sriov_vf(adev))
gfx_v9_0_init_golden_registers(adev);
@@ -3937,6 +4007,7 @@ static int gfx_v9_0_hw_fini(void *handle)
amdgpu_irq_put(adev, &adev->gfx.cp_ecc_error_irq, 0);
amdgpu_irq_put(adev, &adev->gfx.priv_reg_irq, 0);
amdgpu_irq_put(adev, &adev->gfx.priv_inst_irq, 0);
+ amdgpu_irq_put(adev, &adev->gfx.bad_op_irq, 0);
/* DF freeze and kcq disable will fail */
if (!amdgpu_ras_intr_triggered())
@@ -4747,6 +4818,10 @@ static int gfx_v9_0_late_init(void *handle)
if (r)
return r;
+ r = amdgpu_irq_get(adev, &adev->gfx.bad_op_irq, 0);
+ if (r)
+ return r;
+
r = gfx_v9_0_ecc_late_init(handle);
if (r)
return r;
@@ -5858,7 +5933,9 @@ static void gfx_v9_0_ring_soft_recovery(struct amdgpu_ring *ring, unsigned vmid)
value = REG_SET_FIELD(value, SQ_CMD, MODE, 0x01);
value = REG_SET_FIELD(value, SQ_CMD, CHECK_VMID, 1);
value = REG_SET_FIELD(value, SQ_CMD, VM_ID, vmid);
+ amdgpu_gfx_rlc_enter_safe_mode(adev, 0);
WREG32_SOC15(GC, 0, mmSQ_CMD, value);
+ amdgpu_gfx_rlc_exit_safe_mode(adev, 0);
}
static void gfx_v9_0_set_gfx_eop_interrupt_state(struct amdgpu_device *adev,
@@ -5929,17 +6006,95 @@ static void gfx_v9_0_set_compute_eop_interrupt_state(struct amdgpu_device *adev,
}
}
+static u32 gfx_v9_0_get_cpc_int_cntl(struct amdgpu_device *adev,
+ int me, int pipe)
+{
+ /*
+ * amdgpu controls only the first MEC. That's why this function only
+ * handles the setting of interrupts for this specific MEC. All other
+ * pipes' interrupts are set by amdkfd.
+ */
+ if (me != 1)
+ return 0;
+
+ switch (pipe) {
+ case 0:
+ return SOC15_REG_OFFSET(GC, 0, mmCP_ME1_PIPE0_INT_CNTL);
+ case 1:
+ return SOC15_REG_OFFSET(GC, 0, mmCP_ME1_PIPE1_INT_CNTL);
+ case 2:
+ return SOC15_REG_OFFSET(GC, 0, mmCP_ME1_PIPE2_INT_CNTL);
+ case 3:
+ return SOC15_REG_OFFSET(GC, 0, mmCP_ME1_PIPE3_INT_CNTL);
+ default:
+ return 0;
+ }
+}
+
static int gfx_v9_0_set_priv_reg_fault_state(struct amdgpu_device *adev,
struct amdgpu_irq_src *source,
unsigned type,
enum amdgpu_interrupt_state state)
{
+ u32 cp_int_cntl_reg, cp_int_cntl;
+ int i, j;
+
switch (state) {
case AMDGPU_IRQ_STATE_DISABLE:
case AMDGPU_IRQ_STATE_ENABLE:
WREG32_FIELD15(GC, 0, CP_INT_CNTL_RING0,
PRIV_REG_INT_ENABLE,
state == AMDGPU_IRQ_STATE_ENABLE ? 1 : 0);
+ for (i = 0; i < adev->gfx.mec.num_mec; i++) {
+ for (j = 0; j < adev->gfx.mec.num_pipe_per_mec; j++) {
+ /* MECs start at 1 */
+ cp_int_cntl_reg = gfx_v9_0_get_cpc_int_cntl(adev, i + 1, j);
+
+ if (cp_int_cntl_reg) {
+ cp_int_cntl = RREG32_SOC15_IP(GC, cp_int_cntl_reg);
+ cp_int_cntl = REG_SET_FIELD(cp_int_cntl, CP_ME1_PIPE0_INT_CNTL,
+ PRIV_REG_INT_ENABLE,
+ state == AMDGPU_IRQ_STATE_ENABLE ? 1 : 0);
+ WREG32_SOC15_IP(GC, cp_int_cntl_reg, cp_int_cntl);
+ }
+ }
+ }
+ break;
+ default:
+ break;
+ }
+
+ return 0;
+}
+
+static int gfx_v9_0_set_bad_op_fault_state(struct amdgpu_device *adev,
+ struct amdgpu_irq_src *source,
+ unsigned type,
+ enum amdgpu_interrupt_state state)
+{
+ u32 cp_int_cntl_reg, cp_int_cntl;
+ int i, j;
+
+ switch (state) {
+ case AMDGPU_IRQ_STATE_DISABLE:
+ case AMDGPU_IRQ_STATE_ENABLE:
+ WREG32_FIELD15(GC, 0, CP_INT_CNTL_RING0,
+ OPCODE_ERROR_INT_ENABLE,
+ state == AMDGPU_IRQ_STATE_ENABLE ? 1 : 0);
+ for (i = 0; i < adev->gfx.mec.num_mec; i++) {
+ for (j = 0; j < adev->gfx.mec.num_pipe_per_mec; j++) {
+ /* MECs start at 1 */
+ cp_int_cntl_reg = gfx_v9_0_get_cpc_int_cntl(adev, i + 1, j);
+
+ if (cp_int_cntl_reg) {
+ cp_int_cntl = RREG32_SOC15_IP(GC, cp_int_cntl_reg);
+ cp_int_cntl = REG_SET_FIELD(cp_int_cntl, CP_ME1_PIPE0_INT_CNTL,
+ OPCODE_ERROR_INT_ENABLE,
+ state == AMDGPU_IRQ_STATE_ENABLE ? 1 : 0);
+ WREG32_SOC15_IP(GC, cp_int_cntl_reg, cp_int_cntl);
+ }
+ }
+ }
break;
default:
break;
@@ -6121,6 +6276,15 @@ static int gfx_v9_0_priv_reg_irq(struct amdgpu_device *adev,
return 0;
}
+static int gfx_v9_0_bad_op_irq(struct amdgpu_device *adev,
+ struct amdgpu_irq_src *source,
+ struct amdgpu_iv_entry *entry)
+{
+ DRM_ERROR("Illegal opcode in command stream\n");
+ gfx_v9_0_fault(adev, entry);
+ return 0;
+}
+
static int gfx_v9_0_priv_inst_irq(struct amdgpu_device *adev,
struct amdgpu_irq_src *source,
struct amdgpu_iv_entry *entry)
@@ -7001,6 +7165,157 @@ static void gfx_v9_0_emit_wave_limit(struct amdgpu_ring *ring, bool enable)
}
}
+static void gfx_v9_ring_insert_nop(struct amdgpu_ring *ring, uint32_t num_nop)
+{
+ int i;
+
+ /* Header itself is a NOP packet */
+ if (num_nop == 1) {
+ amdgpu_ring_write(ring, ring->funcs->nop);
+ return;
+ }
+
+ /* Max HW optimization till 0x3ffe, followed by remaining one NOP at a time*/
+ amdgpu_ring_write(ring, PACKET3(PACKET3_NOP, min(num_nop - 2, 0x3ffe)));
+
+ /* Header is at index 0, followed by num_nops - 1 NOP packet's */
+ for (i = 1; i < num_nop; i++)
+ amdgpu_ring_write(ring, ring->funcs->nop);
+}
+
+static int gfx_v9_0_reset_kgq(struct amdgpu_ring *ring, unsigned int vmid)
+{
+ struct amdgpu_device *adev = ring->adev;
+ struct amdgpu_kiq *kiq = &adev->gfx.kiq[0];
+ struct amdgpu_ring *kiq_ring = &kiq->ring;
+ unsigned long flags;
+ u32 tmp;
+ int r;
+
+ if (amdgpu_sriov_vf(adev))
+ return -EINVAL;
+
+ if (!kiq->pmf || !kiq->pmf->kiq_unmap_queues)
+ return -EINVAL;
+
+ spin_lock_irqsave(&kiq->ring_lock, flags);
+
+ if (amdgpu_ring_alloc(kiq_ring, 5)) {
+ spin_unlock_irqrestore(&kiq->ring_lock, flags);
+ return -ENOMEM;
+ }
+
+ tmp = REG_SET_FIELD(0, CP_VMID_RESET, RESET_REQUEST, 1 << vmid);
+ gfx_v9_0_ring_emit_wreg(kiq_ring,
+ SOC15_REG_OFFSET(GC, 0, mmCP_VMID_RESET), tmp);
+ amdgpu_ring_commit(kiq_ring);
+
+ spin_unlock_irqrestore(&kiq->ring_lock, flags);
+
+ r = amdgpu_ring_test_ring(kiq_ring);
+ if (r)
+ return r;
+
+ if (amdgpu_ring_alloc(ring, 7 + 7 + 5))
+ return -ENOMEM;
+ gfx_v9_0_ring_emit_fence(ring, ring->fence_drv.gpu_addr,
+ ring->fence_drv.sync_seq, AMDGPU_FENCE_FLAG_EXEC);
+ gfx_v9_0_ring_emit_reg_wait(ring,
+ SOC15_REG_OFFSET(GC, 0, mmCP_VMID_RESET), 0, 0xffff);
+ gfx_v9_0_ring_emit_wreg(ring,
+ SOC15_REG_OFFSET(GC, 0, mmCP_VMID_RESET), 0);
+
+ return amdgpu_ring_test_ring(ring);
+}
+
+static int gfx_v9_0_reset_kcq(struct amdgpu_ring *ring,
+ unsigned int vmid)
+{
+ struct amdgpu_device *adev = ring->adev;
+ struct amdgpu_kiq *kiq = &adev->gfx.kiq[0];
+ struct amdgpu_ring *kiq_ring = &kiq->ring;
+ unsigned long flags;
+ int i, r;
+
+ if (!adev->debug_exp_resets &&
+ !adev->gfx.num_gfx_rings)
+ return -EINVAL;
+
+ if (amdgpu_sriov_vf(adev))
+ return -EINVAL;
+
+ if (!kiq->pmf || !kiq->pmf->kiq_unmap_queues)
+ return -EINVAL;
+
+ spin_lock_irqsave(&kiq->ring_lock, flags);
+
+ if (amdgpu_ring_alloc(kiq_ring, kiq->pmf->unmap_queues_size)) {
+ spin_unlock_irqrestore(&kiq->ring_lock, flags);
+ return -ENOMEM;
+ }
+
+ kiq->pmf->kiq_unmap_queues(kiq_ring, ring, RESET_QUEUES,
+ 0, 0);
+ amdgpu_ring_commit(kiq_ring);
+
+ spin_unlock_irqrestore(&kiq->ring_lock, flags);
+
+ r = amdgpu_ring_test_ring(kiq_ring);
+ if (r)
+ return r;
+
+ /* make sure dequeue is complete*/
+ amdgpu_gfx_rlc_enter_safe_mode(adev, 0);
+ mutex_lock(&adev->srbm_mutex);
+ soc15_grbm_select(adev, ring->me, ring->pipe, ring->queue, 0, 0);
+ for (i = 0; i < adev->usec_timeout; i++) {
+ if (!(RREG32_SOC15(GC, 0, mmCP_HQD_ACTIVE) & 1))
+ break;
+ udelay(1);
+ }
+ if (i >= adev->usec_timeout)
+ r = -ETIMEDOUT;
+ soc15_grbm_select(adev, 0, 0, 0, 0, 0);
+ mutex_unlock(&adev->srbm_mutex);
+ amdgpu_gfx_rlc_exit_safe_mode(adev, 0);
+ if (r) {
+ dev_err(adev->dev, "fail to wait on hqd deactive\n");
+ return r;
+ }
+
+ r = amdgpu_bo_reserve(ring->mqd_obj, false);
+ if (unlikely(r != 0)){
+ dev_err(adev->dev, "fail to resv mqd_obj\n");
+ return r;
+ }
+ r = amdgpu_bo_kmap(ring->mqd_obj, (void **)&ring->mqd_ptr);
+ if (!r) {
+ r = gfx_v9_0_kcq_init_queue(ring, true);
+ amdgpu_bo_kunmap(ring->mqd_obj);
+ ring->mqd_ptr = NULL;
+ }
+ amdgpu_bo_unreserve(ring->mqd_obj);
+ if (r) {
+ dev_err(adev->dev, "fail to unresv mqd_obj\n");
+ return r;
+ }
+ spin_lock_irqsave(&kiq->ring_lock, flags);
+ r = amdgpu_ring_alloc(kiq_ring, kiq->pmf->map_queues_size);
+ if (r) {
+ spin_unlock_irqrestore(&kiq->ring_lock, flags);
+ return -ENOMEM;
+ }
+ kiq->pmf->kiq_map_queues(kiq_ring, ring);
+ amdgpu_ring_commit(kiq_ring);
+ spin_unlock_irqrestore(&kiq->ring_lock, flags);
+ r = amdgpu_ring_test_ring(kiq_ring);
+ if (r) {
+ DRM_ERROR("fail to remap queue\n");
+ return r;
+ }
+ return amdgpu_ring_test_ring(ring);
+}
+
static void gfx_v9_ip_print(void *handle, struct drm_printer *p)
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
@@ -7083,6 +7398,13 @@ static void gfx_v9_ip_dump(void *handle)
}
+static void gfx_v9_0_ring_emit_cleaner_shader(struct amdgpu_ring *ring)
+{
+ /* Emit the cleaner shader */
+ amdgpu_ring_write(ring, PACKET3(PACKET3_RUN_CLEANER_SHADER, 0));
+ amdgpu_ring_write(ring, 0); /* RESERVED field, programmed to zero */
+}
+
static const struct amd_ip_funcs gfx_v9_0_ip_funcs = {
.name = "gfx_v9_0",
.early_init = gfx_v9_0_early_init,
@@ -7132,7 +7454,8 @@ static const struct amdgpu_ring_funcs gfx_v9_0_ring_funcs_gfx = {
5 + /* HDP_INVL */
8 + 8 + /* FENCE x2 */
2 + /* SWITCH_BUFFER */
- 7, /* gfx_v9_0_emit_mem_sync */
+ 7 + /* gfx_v9_0_emit_mem_sync */
+ 2, /* gfx_v9_0_ring_emit_cleaner_shader */
.emit_ib_size = 4, /* gfx_v9_0_ring_emit_ib_gfx */
.emit_ib = gfx_v9_0_ring_emit_ib_gfx,
.emit_fence = gfx_v9_0_ring_emit_fence,
@@ -7141,7 +7464,7 @@ static const struct amdgpu_ring_funcs gfx_v9_0_ring_funcs_gfx = {
.emit_gds_switch = gfx_v9_0_ring_emit_gds_switch,
.emit_hdp_flush = gfx_v9_0_ring_emit_hdp_flush,
.test_ring = gfx_v9_0_ring_test_ring,
- .insert_nop = amdgpu_ring_insert_nop,
+ .insert_nop = gfx_v9_ring_insert_nop,
.pad_ib = amdgpu_ring_generic_pad_ib,
.emit_switch_buffer = gfx_v9_ring_emit_sb,
.emit_cntxcntl = gfx_v9_ring_emit_cntxcntl,
@@ -7153,6 +7476,10 @@ static const struct amdgpu_ring_funcs gfx_v9_0_ring_funcs_gfx = {
.emit_reg_write_reg_wait = gfx_v9_0_ring_emit_reg_write_reg_wait,
.soft_recovery = gfx_v9_0_ring_soft_recovery,
.emit_mem_sync = gfx_v9_0_emit_mem_sync,
+ .reset = gfx_v9_0_reset_kgq,
+ .emit_cleaner_shader = gfx_v9_0_ring_emit_cleaner_shader,
+ .begin_use = amdgpu_gfx_enforce_isolation_ring_begin_use,
+ .end_use = amdgpu_gfx_enforce_isolation_ring_end_use,
};
static const struct amdgpu_ring_funcs gfx_v9_0_sw_ring_funcs_gfx = {
@@ -7185,7 +7512,8 @@ static const struct amdgpu_ring_funcs gfx_v9_0_sw_ring_funcs_gfx = {
5 + /* HDP_INVL */
8 + 8 + /* FENCE x2 */
2 + /* SWITCH_BUFFER */
- 7, /* gfx_v9_0_emit_mem_sync */
+ 7 + /* gfx_v9_0_emit_mem_sync */
+ 2, /* gfx_v9_0_ring_emit_cleaner_shader */
.emit_ib_size = 4, /* gfx_v9_0_ring_emit_ib_gfx */
.emit_ib = gfx_v9_0_ring_emit_ib_gfx,
.emit_fence = gfx_v9_0_ring_emit_fence,
@@ -7195,7 +7523,7 @@ static const struct amdgpu_ring_funcs gfx_v9_0_sw_ring_funcs_gfx = {
.emit_hdp_flush = gfx_v9_0_ring_emit_hdp_flush,
.test_ring = gfx_v9_0_ring_test_ring,
.test_ib = gfx_v9_0_ring_test_ib,
- .insert_nop = amdgpu_sw_ring_insert_nop,
+ .insert_nop = gfx_v9_ring_insert_nop,
.pad_ib = amdgpu_ring_generic_pad_ib,
.emit_switch_buffer = gfx_v9_ring_emit_sb,
.emit_cntxcntl = gfx_v9_ring_emit_cntxcntl,
@@ -7209,6 +7537,9 @@ static const struct amdgpu_ring_funcs gfx_v9_0_sw_ring_funcs_gfx = {
.patch_cntl = gfx_v9_0_ring_patch_cntl,
.patch_de = gfx_v9_0_ring_patch_de_meta,
.patch_ce = gfx_v9_0_ring_patch_ce_meta,
+ .emit_cleaner_shader = gfx_v9_0_ring_emit_cleaner_shader,
+ .begin_use = amdgpu_gfx_enforce_isolation_ring_begin_use,
+ .end_use = amdgpu_gfx_enforce_isolation_ring_end_use,
};
static const struct amdgpu_ring_funcs gfx_v9_0_ring_funcs_compute = {
@@ -7229,7 +7560,8 @@ static const struct amdgpu_ring_funcs gfx_v9_0_ring_funcs_compute = {
8 + 8 + 8 + /* gfx_v9_0_ring_emit_fence x3 for user fence, vm fence */
7 + /* gfx_v9_0_emit_mem_sync */
5 + /* gfx_v9_0_emit_wave_limit for updating mmSPI_WCL_PIPE_PERCENT_GFX register */
- 15, /* for updating 3 mmSPI_WCL_PIPE_PERCENT_CS registers */
+ 15 + /* for updating 3 mmSPI_WCL_PIPE_PERCENT_CS registers */
+ 2, /* gfx_v9_0_ring_emit_cleaner_shader */
.emit_ib_size = 7, /* gfx_v9_0_ring_emit_ib_compute */
.emit_ib = gfx_v9_0_ring_emit_ib_compute,
.emit_fence = gfx_v9_0_ring_emit_fence,
@@ -7239,13 +7571,18 @@ static const struct amdgpu_ring_funcs gfx_v9_0_ring_funcs_compute = {
.emit_hdp_flush = gfx_v9_0_ring_emit_hdp_flush,
.test_ring = gfx_v9_0_ring_test_ring,
.test_ib = gfx_v9_0_ring_test_ib,
- .insert_nop = amdgpu_ring_insert_nop,
+ .insert_nop = gfx_v9_ring_insert_nop,
.pad_ib = amdgpu_ring_generic_pad_ib,
.emit_wreg = gfx_v9_0_ring_emit_wreg,
.emit_reg_wait = gfx_v9_0_ring_emit_reg_wait,
.emit_reg_write_reg_wait = gfx_v9_0_ring_emit_reg_write_reg_wait,
+ .soft_recovery = gfx_v9_0_ring_soft_recovery,
.emit_mem_sync = gfx_v9_0_emit_mem_sync,
.emit_wave_limit = gfx_v9_0_emit_wave_limit,
+ .reset = gfx_v9_0_reset_kcq,
+ .emit_cleaner_shader = gfx_v9_0_ring_emit_cleaner_shader,
+ .begin_use = amdgpu_gfx_enforce_isolation_ring_begin_use,
+ .end_use = amdgpu_gfx_enforce_isolation_ring_end_use,
};
static const struct amdgpu_ring_funcs gfx_v9_0_ring_funcs_kiq = {
@@ -7303,6 +7640,11 @@ static const struct amdgpu_irq_src_funcs gfx_v9_0_priv_reg_irq_funcs = {
.process = gfx_v9_0_priv_reg_irq,
};
+static const struct amdgpu_irq_src_funcs gfx_v9_0_bad_op_irq_funcs = {
+ .set = gfx_v9_0_set_bad_op_fault_state,
+ .process = gfx_v9_0_bad_op_irq,
+};
+
static const struct amdgpu_irq_src_funcs gfx_v9_0_priv_inst_irq_funcs = {
.set = gfx_v9_0_set_priv_inst_fault_state,
.process = gfx_v9_0_priv_inst_irq,
@@ -7322,6 +7664,9 @@ static void gfx_v9_0_set_irq_funcs(struct amdgpu_device *adev)
adev->gfx.priv_reg_irq.num_types = 1;
adev->gfx.priv_reg_irq.funcs = &gfx_v9_0_priv_reg_irq_funcs;
+ adev->gfx.bad_op_irq.num_types = 1;
+ adev->gfx.bad_op_irq.funcs = &gfx_v9_0_bad_op_irq_funcs;
+
adev->gfx.priv_inst_irq.num_types = 1;
adev->gfx.priv_inst_irq.funcs = &gfx_v9_0_priv_inst_irq_funcs;
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0_cleaner_shader.h b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0_cleaner_shader.h
new file mode 100644
index 000000000000..36c0292b5110
--- /dev/null
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0_cleaner_shader.h
@@ -0,0 +1,26 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright 2018 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+/* Define the cleaner shader gfx_9_0 */
+static const u32 __maybe_unused gfx_9_0_cleaner_shader_hex[] = {
+ /* Add the cleaner shader code here */
+};
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v9_4_3.c b/drivers/gpu/drm/amd/amdgpu/gfx_v9_4_3.c
index 20ea6cb01edf..c100845409f7 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfx_v9_4_3.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v9_4_3.c
@@ -37,6 +37,7 @@
#include "gc/gc_9_4_3_sh_mask.h"
#include "gfx_v9_4_3.h"
+#include "gfx_v9_4_3_cleaner_shader.h"
#include "amdgpu_xcp.h"
#include "amdgpu_aca.h"
@@ -63,6 +64,98 @@ MODULE_FIRMWARE("amdgpu/gc_9_4_4_rlc.bin");
#define NORMALIZE_XCC_REG_OFFSET(offset) \
(offset & 0xFFFF)
+static const struct amdgpu_hwip_reg_entry gc_reg_list_9_4_3[] = {
+ SOC15_REG_ENTRY_STR(GC, 0, regGRBM_STATUS),
+ SOC15_REG_ENTRY_STR(GC, 0, regGRBM_STATUS2),
+ SOC15_REG_ENTRY_STR(GC, 0, regCP_STALLED_STAT1),
+ SOC15_REG_ENTRY_STR(GC, 0, regCP_STALLED_STAT2),
+ SOC15_REG_ENTRY_STR(GC, 0, regCP_CPC_STALLED_STAT1),
+ SOC15_REG_ENTRY_STR(GC, 0, regCP_CPF_STALLED_STAT1),
+ SOC15_REG_ENTRY_STR(GC, 0, regCP_BUSY_STAT),
+ SOC15_REG_ENTRY_STR(GC, 0, regCP_CPC_BUSY_STAT),
+ SOC15_REG_ENTRY_STR(GC, 0, regCP_CPF_BUSY_STAT),
+ SOC15_REG_ENTRY_STR(GC, 0, regCP_CPF_STATUS),
+ SOC15_REG_ENTRY_STR(GC, 0, regCP_GFX_ERROR),
+ SOC15_REG_ENTRY_STR(GC, 0, regCPF_UTCL1_STATUS),
+ SOC15_REG_ENTRY_STR(GC, 0, regCPC_UTCL1_STATUS),
+ SOC15_REG_ENTRY_STR(GC, 0, regCPG_UTCL1_STATUS),
+ SOC15_REG_ENTRY_STR(GC, 0, regGDS_PROTECTION_FAULT),
+ SOC15_REG_ENTRY_STR(GC, 0, regGDS_VM_PROTECTION_FAULT),
+ SOC15_REG_ENTRY_STR(GC, 0, regRLC_UTCL1_STATUS),
+ SOC15_REG_ENTRY_STR(GC, 0, regRMI_UTCL1_STATUS),
+ SOC15_REG_ENTRY_STR(GC, 0, regSQC_DCACHE_UTCL1_STATUS),
+ SOC15_REG_ENTRY_STR(GC, 0, regSQC_ICACHE_UTCL1_STATUS),
+ SOC15_REG_ENTRY_STR(GC, 0, regSQ_UTCL1_STATUS),
+ SOC15_REG_ENTRY_STR(GC, 0, regTCP_UTCL1_STATUS),
+ SOC15_REG_ENTRY_STR(GC, 0, regWD_UTCL1_STATUS),
+ SOC15_REG_ENTRY_STR(GC, 0, regVM_L2_PROTECTION_FAULT_CNTL),
+ SOC15_REG_ENTRY_STR(GC, 0, regVM_L2_PROTECTION_FAULT_STATUS),
+ SOC15_REG_ENTRY_STR(GC, 0, regCP_DEBUG),
+ SOC15_REG_ENTRY_STR(GC, 0, regCP_MEC_CNTL),
+ SOC15_REG_ENTRY_STR(GC, 0, regCP_MEC1_INSTR_PNTR),
+ SOC15_REG_ENTRY_STR(GC, 0, regCP_MEC2_INSTR_PNTR),
+ SOC15_REG_ENTRY_STR(GC, 0, regCP_CPC_STATUS),
+ SOC15_REG_ENTRY_STR(GC, 0, regRLC_STAT),
+ SOC15_REG_ENTRY_STR(GC, 0, regRLC_SMU_COMMAND),
+ SOC15_REG_ENTRY_STR(GC, 0, regRLC_SMU_MESSAGE),
+ SOC15_REG_ENTRY_STR(GC, 0, regRLC_SMU_ARGUMENT_1),
+ SOC15_REG_ENTRY_STR(GC, 0, regRLC_SMU_ARGUMENT_2),
+ SOC15_REG_ENTRY_STR(GC, 0, regSMU_RLC_RESPONSE),
+ SOC15_REG_ENTRY_STR(GC, 0, regRLC_SAFE_MODE),
+ SOC15_REG_ENTRY_STR(GC, 0, regRLC_SMU_SAFE_MODE),
+ SOC15_REG_ENTRY_STR(GC, 0, regRLC_INT_STAT),
+ SOC15_REG_ENTRY_STR(GC, 0, regRLC_GPM_GENERAL_6),
+ /* cp header registers */
+ SOC15_REG_ENTRY_STR(GC, 0, regCP_MEC_ME1_HEADER_DUMP),
+ SOC15_REG_ENTRY_STR(GC, 0, regCP_MEC_ME2_HEADER_DUMP),
+ /* SE status registers */
+ SOC15_REG_ENTRY_STR(GC, 0, regGRBM_STATUS_SE0),
+ SOC15_REG_ENTRY_STR(GC, 0, regGRBM_STATUS_SE1),
+ SOC15_REG_ENTRY_STR(GC, 0, regGRBM_STATUS_SE2),
+ SOC15_REG_ENTRY_STR(GC, 0, regGRBM_STATUS_SE3)
+};
+
+static const struct amdgpu_hwip_reg_entry gc_cp_reg_list_9_4_3[] = {
+ /* compute queue registers */
+ SOC15_REG_ENTRY_STR(GC, 0, regCP_HQD_VMID),
+ SOC15_REG_ENTRY_STR(GC, 0, regCP_HQD_ACTIVE),
+ SOC15_REG_ENTRY_STR(GC, 0, regCP_HQD_PERSISTENT_STATE),
+ SOC15_REG_ENTRY_STR(GC, 0, regCP_HQD_PIPE_PRIORITY),
+ SOC15_REG_ENTRY_STR(GC, 0, regCP_HQD_QUEUE_PRIORITY),
+ SOC15_REG_ENTRY_STR(GC, 0, regCP_HQD_QUANTUM),
+ SOC15_REG_ENTRY_STR(GC, 0, regCP_HQD_PQ_BASE),
+ SOC15_REG_ENTRY_STR(GC, 0, regCP_HQD_PQ_BASE_HI),
+ SOC15_REG_ENTRY_STR(GC, 0, regCP_HQD_PQ_RPTR),
+ SOC15_REG_ENTRY_STR(GC, 0, regCP_HQD_PQ_WPTR_POLL_ADDR),
+ SOC15_REG_ENTRY_STR(GC, 0, regCP_HQD_PQ_WPTR_POLL_ADDR_HI),
+ SOC15_REG_ENTRY_STR(GC, 0, regCP_HQD_PQ_DOORBELL_CONTROL),
+ SOC15_REG_ENTRY_STR(GC, 0, regCP_HQD_PQ_CONTROL),
+ SOC15_REG_ENTRY_STR(GC, 0, regCP_HQD_IB_BASE_ADDR),
+ SOC15_REG_ENTRY_STR(GC, 0, regCP_HQD_IB_BASE_ADDR_HI),
+ SOC15_REG_ENTRY_STR(GC, 0, regCP_HQD_IB_RPTR),
+ SOC15_REG_ENTRY_STR(GC, 0, regCP_HQD_IB_CONTROL),
+ SOC15_REG_ENTRY_STR(GC, 0, regCP_HQD_DEQUEUE_REQUEST),
+ SOC15_REG_ENTRY_STR(GC, 0, regCP_HQD_EOP_BASE_ADDR),
+ SOC15_REG_ENTRY_STR(GC, 0, regCP_HQD_EOP_BASE_ADDR_HI),
+ SOC15_REG_ENTRY_STR(GC, 0, regCP_HQD_EOP_CONTROL),
+ SOC15_REG_ENTRY_STR(GC, 0, regCP_HQD_EOP_RPTR),
+ SOC15_REG_ENTRY_STR(GC, 0, regCP_HQD_EOP_WPTR),
+ SOC15_REG_ENTRY_STR(GC, 0, regCP_HQD_EOP_EVENTS),
+ SOC15_REG_ENTRY_STR(GC, 0, regCP_HQD_CTX_SAVE_BASE_ADDR_LO),
+ SOC15_REG_ENTRY_STR(GC, 0, regCP_HQD_CTX_SAVE_BASE_ADDR_HI),
+ SOC15_REG_ENTRY_STR(GC, 0, regCP_HQD_CTX_SAVE_CONTROL),
+ SOC15_REG_ENTRY_STR(GC, 0, regCP_HQD_CNTL_STACK_OFFSET),
+ SOC15_REG_ENTRY_STR(GC, 0, regCP_HQD_CNTL_STACK_SIZE),
+ SOC15_REG_ENTRY_STR(GC, 0, regCP_HQD_WG_STATE_OFFSET),
+ SOC15_REG_ENTRY_STR(GC, 0, regCP_HQD_CTX_SAVE_SIZE),
+ SOC15_REG_ENTRY_STR(GC, 0, regCP_HQD_GDS_RESOURCE_STATE),
+ SOC15_REG_ENTRY_STR(GC, 0, regCP_HQD_ERROR),
+ SOC15_REG_ENTRY_STR(GC, 0, regCP_HQD_EOP_WPTR_MEM),
+ SOC15_REG_ENTRY_STR(GC, 0, regCP_HQD_PQ_WPTR_LO),
+ SOC15_REG_ENTRY_STR(GC, 0, regCP_HQD_PQ_WPTR_HI),
+ SOC15_REG_ENTRY_STR(GC, 0, regCP_HQD_GFX_STATUS),
+};
+
struct amdgpu_gfx_ras gfx_v9_4_3_ras;
static void gfx_v9_4_3_set_ring_funcs(struct amdgpu_device *adev);
@@ -71,10 +164,18 @@ static void gfx_v9_4_3_set_gds_init(struct amdgpu_device *adev);
static void gfx_v9_4_3_set_rlc_funcs(struct amdgpu_device *adev);
static int gfx_v9_4_3_get_cu_info(struct amdgpu_device *adev,
struct amdgpu_cu_info *cu_info);
+static void gfx_v9_4_3_xcc_set_safe_mode(struct amdgpu_device *adev, int xcc_id);
+static void gfx_v9_4_3_xcc_unset_safe_mode(struct amdgpu_device *adev, int xcc_id);
static void gfx_v9_4_3_kiq_set_resources(struct amdgpu_ring *kiq_ring,
uint64_t queue_mask)
{
+ struct amdgpu_device *adev = kiq_ring->adev;
+ u64 shader_mc_addr;
+
+ /* Cleaner shader MC address */
+ shader_mc_addr = adev->gfx.cleaner_shader_gpu_addr >> 8;
+
amdgpu_ring_write(kiq_ring, PACKET3(PACKET3_SET_RESOURCES, 6));
amdgpu_ring_write(kiq_ring,
PACKET3_SET_RESOURCES_VMID_MASK(0) |
@@ -84,8 +185,8 @@ static void gfx_v9_4_3_kiq_set_resources(struct amdgpu_ring *kiq_ring,
lower_32_bits(queue_mask)); /* queue mask lo */
amdgpu_ring_write(kiq_ring,
upper_32_bits(queue_mask)); /* queue mask hi */
- amdgpu_ring_write(kiq_ring, 0); /* gws mask lo */
- amdgpu_ring_write(kiq_ring, 0); /* gws mask hi */
+ amdgpu_ring_write(kiq_ring, lower_32_bits(shader_mc_addr)); /* cleaner shader addr lo */
+ amdgpu_ring_write(kiq_ring, upper_32_bits(shader_mc_addr)); /* cleaner shader addr hi */
amdgpu_ring_write(kiq_ring, 0); /* oac mask */
amdgpu_ring_write(kiq_ring, 0); /* gds heap base:0, gds heap size:0 */
}
@@ -182,12 +283,46 @@ static void gfx_v9_4_3_kiq_invalidate_tlbs(struct amdgpu_ring *kiq_ring,
PACKET3_INVALIDATE_TLBS_FLUSH_TYPE(flush_type));
}
+static void gfx_v9_4_3_kiq_reset_hw_queue(struct amdgpu_ring *kiq_ring, uint32_t queue_type,
+ uint32_t me_id, uint32_t pipe_id, uint32_t queue_id,
+ uint32_t xcc_id, uint32_t vmid)
+{
+ struct amdgpu_device *adev = kiq_ring->adev;
+ unsigned i;
+
+ /* enter save mode */
+ amdgpu_gfx_rlc_enter_safe_mode(adev, xcc_id);
+ mutex_lock(&adev->srbm_mutex);
+ soc15_grbm_select(adev, me_id, pipe_id, queue_id, 0, xcc_id);
+
+ if (queue_type == AMDGPU_RING_TYPE_COMPUTE) {
+ WREG32_SOC15(GC, GET_INST(GC, xcc_id), regCP_HQD_DEQUEUE_REQUEST, 0x2);
+ WREG32_SOC15(GC, GET_INST(GC, xcc_id), regSPI_COMPUTE_QUEUE_RESET, 0x1);
+ /* wait till dequeue take effects */
+ for (i = 0; i < adev->usec_timeout; i++) {
+ if (!(RREG32_SOC15(GC, GET_INST(GC, xcc_id), regCP_HQD_ACTIVE) & 1))
+ break;
+ udelay(1);
+ }
+ if (i >= adev->usec_timeout)
+ dev_err(adev->dev, "fail to wait on hqd deactive\n");
+ } else {
+ dev_err(adev->dev, "reset queue_type(%d) not supported\n\n", queue_type);
+ }
+
+ soc15_grbm_select(adev, 0, 0, 0, 0, 0);
+ mutex_unlock(&adev->srbm_mutex);
+ /* exit safe mode */
+ amdgpu_gfx_rlc_exit_safe_mode(adev, xcc_id);
+}
+
static const struct kiq_pm4_funcs gfx_v9_4_3_kiq_pm4_funcs = {
.kiq_set_resources = gfx_v9_4_3_kiq_set_resources,
.kiq_map_queues = gfx_v9_4_3_kiq_map_queues,
.kiq_unmap_queues = gfx_v9_4_3_kiq_unmap_queues,
.kiq_query_status = gfx_v9_4_3_kiq_query_status,
.kiq_invalidate_tlbs = gfx_v9_4_3_kiq_invalidate_tlbs,
+ .kiq_reset_hw_queue = gfx_v9_4_3_kiq_reset_hw_queue,
.set_resources_size = 8,
.map_queues_size = 7,
.unmap_queues_size = 6,
@@ -885,11 +1020,59 @@ static int gfx_v9_4_3_compute_ring_init(struct amdgpu_device *adev, int ring_id,
hw_prio, NULL);
}
+static void gfx_v9_4_3_alloc_ip_dump(struct amdgpu_device *adev)
+{
+ uint32_t reg_count = ARRAY_SIZE(gc_reg_list_9_4_3);
+ uint32_t *ptr, num_xcc, inst;
+
+ num_xcc = NUM_XCC(adev->gfx.xcc_mask);
+
+ ptr = kcalloc(reg_count * num_xcc, sizeof(uint32_t), GFP_KERNEL);
+ if (!ptr) {
+ DRM_ERROR("Failed to allocate memory for GFX IP Dump\n");
+ adev->gfx.ip_dump_core = NULL;
+ } else {
+ adev->gfx.ip_dump_core = ptr;
+ }
+
+ /* Allocate memory for compute queue registers for all the instances */
+ reg_count = ARRAY_SIZE(gc_cp_reg_list_9_4_3);
+ inst = adev->gfx.mec.num_mec * adev->gfx.mec.num_pipe_per_mec *
+ adev->gfx.mec.num_queue_per_pipe;
+
+ ptr = kcalloc(reg_count * inst * num_xcc, sizeof(uint32_t), GFP_KERNEL);
+ if (!ptr) {
+ DRM_ERROR("Failed to allocate memory for Compute Queues IP Dump\n");
+ adev->gfx.ip_dump_compute_queues = NULL;
+ } else {
+ adev->gfx.ip_dump_compute_queues = ptr;
+ }
+}
+
static int gfx_v9_4_3_sw_init(void *handle)
{
int i, j, k, r, ring_id, xcc_id, num_xcc;
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ switch (amdgpu_ip_version(adev, GC_HWIP, 0)) {
+ case IP_VERSION(9, 4, 3):
+ case IP_VERSION(9, 4, 4):
+ adev->gfx.cleaner_shader_ptr = gfx_9_4_3_cleaner_shader_hex;
+ adev->gfx.cleaner_shader_size = sizeof(gfx_9_4_3_cleaner_shader_hex);
+ if (adev->gfx.mec_fw_version >= 153) {
+ adev->gfx.enable_cleaner_shader = true;
+ r = amdgpu_gfx_cleaner_shader_sw_init(adev, adev->gfx.cleaner_shader_size);
+ if (r) {
+ adev->gfx.enable_cleaner_shader = false;
+ dev_err(adev->dev, "Failed to initialize cleaner shader\n");
+ }
+ }
+ break;
+ default:
+ adev->gfx.enable_cleaner_shader = false;
+ break;
+ }
+
adev->gfx.mec.num_mec = 2;
adev->gfx.mec.num_pipe_per_mec = 4;
adev->gfx.mec.num_queue_per_pipe = 8;
@@ -901,6 +1084,13 @@ static int gfx_v9_4_3_sw_init(void *handle)
if (r)
return r;
+ /* Bad opcode Event */
+ r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_GRBM_CP,
+ GFX_9_0__SRCID__CP_BAD_OPCODE_ERROR,
+ &adev->gfx.bad_op_irq);
+ if (r)
+ return r;
+
/* Privileged reg */
r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_GRBM_CP, GFX_9_0__SRCID__CP_PRIV_REG_FAULT,
&adev->gfx.priv_reg_irq);
@@ -976,10 +1166,19 @@ static int gfx_v9_4_3_sw_init(void *handle)
return r;
- if (!amdgpu_sriov_vf(adev))
+ if (!amdgpu_sriov_vf(adev)) {
r = amdgpu_gfx_sysfs_init(adev);
+ if (r)
+ return r;
+ }
- return r;
+ gfx_v9_4_3_alloc_ip_dump(adev);
+
+ r = amdgpu_gfx_sysfs_isolation_shader_init(adev);
+ if (r)
+ return r;
+
+ return 0;
}
static int gfx_v9_4_3_sw_fini(void *handle)
@@ -997,11 +1196,17 @@ static int gfx_v9_4_3_sw_fini(void *handle)
amdgpu_gfx_kiq_fini(adev, i);
}
+ amdgpu_gfx_cleaner_shader_sw_fini(adev);
+
gfx_v9_4_3_mec_fini(adev);
amdgpu_bo_unref(&adev->gfx.rlc.clear_state_obj);
gfx_v9_4_3_free_microcode(adev);
if (!amdgpu_sriov_vf(adev))
amdgpu_gfx_sysfs_fini(adev);
+ amdgpu_gfx_sysfs_isolation_shader_fini(adev);
+
+ kfree(adev->gfx.ip_dump_core);
+ kfree(adev->gfx.ip_dump_compute_queues);
return 0;
}
@@ -1496,7 +1701,15 @@ static void gfx_v9_4_3_xcc_cp_compute_enable(struct amdgpu_device *adev,
WREG32_SOC15_RLC(GC, GET_INST(GC, xcc_id), regCP_MEC_CNTL, 0);
} else {
WREG32_SOC15_RLC(GC, GET_INST(GC, xcc_id), regCP_MEC_CNTL,
- (CP_MEC_CNTL__MEC_ME1_HALT_MASK | CP_MEC_CNTL__MEC_ME2_HALT_MASK));
+ (CP_MEC_CNTL__MEC_INVALIDATE_ICACHE_MASK |
+ CP_MEC_CNTL__MEC_ME1_PIPE0_RESET_MASK |
+ CP_MEC_CNTL__MEC_ME1_PIPE1_RESET_MASK |
+ CP_MEC_CNTL__MEC_ME1_PIPE2_RESET_MASK |
+ CP_MEC_CNTL__MEC_ME1_PIPE3_RESET_MASK |
+ CP_MEC_CNTL__MEC_ME2_PIPE0_RESET_MASK |
+ CP_MEC_CNTL__MEC_ME2_PIPE1_RESET_MASK |
+ CP_MEC_CNTL__MEC_ME1_HALT_MASK |
+ CP_MEC_CNTL__MEC_ME2_HALT_MASK));
adev->gfx.kiq[xcc_id].ring.sched.ready = false;
}
udelay(50);
@@ -1910,7 +2123,7 @@ static int gfx_v9_4_3_xcc_kiq_init_queue(struct amdgpu_ring *ring, int xcc_id)
return 0;
}
-static int gfx_v9_4_3_xcc_kcq_init_queue(struct amdgpu_ring *ring, int xcc_id)
+static int gfx_v9_4_3_xcc_kcq_init_queue(struct amdgpu_ring *ring, int xcc_id, bool restore)
{
struct amdgpu_device *adev = ring->adev;
struct v9_mqd *mqd = ring->mqd_ptr;
@@ -1922,8 +2135,8 @@ static int gfx_v9_4_3_xcc_kcq_init_queue(struct amdgpu_ring *ring, int xcc_id)
*/
tmp_mqd = (struct v9_mqd *)adev->gfx.mec.mqd_backup[mqd_idx];
- if (!tmp_mqd->cp_hqd_pq_control ||
- (!amdgpu_in_reset(adev) && !adev->in_suspend)) {
+ if (!restore && (!tmp_mqd->cp_hqd_pq_control ||
+ (!amdgpu_in_reset(adev) && !adev->in_suspend))) {
memset((void *)mqd, 0, sizeof(struct v9_mqd_allocation));
((struct v9_mqd_allocation *)mqd)->dynamic_cu_mask = 0xFFFFFFFF;
((struct v9_mqd_allocation *)mqd)->dynamic_rb_mask = 0xFFFFFFFF;
@@ -2008,7 +2221,7 @@ static int gfx_v9_4_3_xcc_kcq_resume(struct amdgpu_device *adev, int xcc_id)
goto done;
r = amdgpu_bo_kmap(ring->mqd_obj, (void **)&ring->mqd_ptr);
if (!r) {
- r = gfx_v9_4_3_xcc_kcq_init_queue(ring, xcc_id);
+ r = gfx_v9_4_3_xcc_kcq_init_queue(ring, xcc_id, false);
amdgpu_bo_kunmap(ring->mqd_obj);
ring->mqd_ptr = NULL;
}
@@ -2035,6 +2248,8 @@ static int gfx_v9_4_3_xcc_cp_resume(struct amdgpu_device *adev, int xcc_id)
r = gfx_v9_4_3_xcc_cp_compute_load_microcode(adev, xcc_id);
if (r)
return r;
+ } else {
+ gfx_v9_4_3_xcc_cp_compute_enable(adev, false, xcc_id);
}
r = gfx_v9_4_3_xcc_kiq_resume(adev, xcc_id);
@@ -2094,12 +2309,6 @@ static int gfx_v9_4_3_cp_resume(struct amdgpu_device *adev)
return 0;
}
-static void gfx_v9_4_3_xcc_cp_enable(struct amdgpu_device *adev, bool enable,
- int xcc_id)
-{
- gfx_v9_4_3_xcc_cp_compute_enable(adev, enable, xcc_id);
-}
-
static void gfx_v9_4_3_xcc_fini(struct amdgpu_device *adev, int xcc_id)
{
if (amdgpu_gfx_disable_kcq(adev, xcc_id))
@@ -2131,7 +2340,7 @@ static void gfx_v9_4_3_xcc_fini(struct amdgpu_device *adev, int xcc_id)
}
gfx_v9_4_3_xcc_kcq_fini_register(adev, xcc_id);
- gfx_v9_4_3_xcc_cp_enable(adev, false, xcc_id);
+ gfx_v9_4_3_xcc_cp_compute_enable(adev, false, xcc_id);
}
static int gfx_v9_4_3_hw_init(void *handle)
@@ -2139,6 +2348,9 @@ static int gfx_v9_4_3_hw_init(void *handle)
int r;
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ amdgpu_gfx_cleaner_shader_init(adev, adev->gfx.cleaner_shader_size,
+ adev->gfx.cleaner_shader_ptr);
+
if (!amdgpu_sriov_vf(adev))
gfx_v9_4_3_init_golden_registers(adev);
@@ -2162,6 +2374,7 @@ static int gfx_v9_4_3_hw_fini(void *handle)
amdgpu_irq_put(adev, &adev->gfx.priv_reg_irq, 0);
amdgpu_irq_put(adev, &adev->gfx.priv_inst_irq, 0);
+ amdgpu_irq_put(adev, &adev->gfx.bad_op_irq, 0);
num_xcc = NUM_XCC(adev->gfx.xcc_mask);
for (i = 0; i < num_xcc; i++) {
@@ -2327,6 +2540,10 @@ static int gfx_v9_4_3_late_init(void *handle)
if (r)
return r;
+ r = amdgpu_irq_get(adev, &adev->gfx.bad_op_irq, 0);
+ if (r)
+ return r;
+
if (adev->gfx.ras &&
adev->gfx.ras->enable_watchdog_timer)
adev->gfx.ras->enable_watchdog_timer(adev);
@@ -2833,6 +3050,24 @@ static void gfx_v9_4_3_ring_emit_reg_write_reg_wait(struct amdgpu_ring *ring,
ref, mask);
}
+static void gfx_v9_4_3_ring_soft_recovery(struct amdgpu_ring *ring,
+ unsigned vmid)
+{
+ struct amdgpu_device *adev = ring->adev;
+ uint32_t value = 0;
+
+ if (!adev->debug_exp_resets)
+ return;
+
+ value = REG_SET_FIELD(value, SQ_CMD, CMD, 0x03);
+ value = REG_SET_FIELD(value, SQ_CMD, MODE, 0x01);
+ value = REG_SET_FIELD(value, SQ_CMD, CHECK_VMID, 1);
+ value = REG_SET_FIELD(value, SQ_CMD, VM_ID, vmid);
+ amdgpu_gfx_rlc_enter_safe_mode(adev, ring->xcc_id);
+ WREG32_SOC15(GC, GET_INST(GC, ring->xcc_id), regSQ_CMD, value);
+ amdgpu_gfx_rlc_exit_safe_mode(adev, ring->xcc_id);
+}
+
static void gfx_v9_4_3_xcc_set_compute_eop_interrupt_state(
struct amdgpu_device *adev, int me, int pipe,
enum amdgpu_interrupt_state state, int xcc_id)
@@ -2886,21 +3121,103 @@ static void gfx_v9_4_3_xcc_set_compute_eop_interrupt_state(
}
}
+static u32 gfx_v9_4_3_get_cpc_int_cntl(struct amdgpu_device *adev,
+ int xcc_id, int me, int pipe)
+{
+ /*
+ * amdgpu controls only the first MEC. That's why this function only
+ * handles the setting of interrupts for this specific MEC. All other
+ * pipes' interrupts are set by amdkfd.
+ */
+ if (me != 1)
+ return 0;
+
+ switch (pipe) {
+ case 0:
+ return SOC15_REG_OFFSET(GC, GET_INST(GC, xcc_id), regCP_ME1_PIPE0_INT_CNTL);
+ case 1:
+ return SOC15_REG_OFFSET(GC, GET_INST(GC, xcc_id), regCP_ME1_PIPE1_INT_CNTL);
+ case 2:
+ return SOC15_REG_OFFSET(GC, GET_INST(GC, xcc_id), regCP_ME1_PIPE2_INT_CNTL);
+ case 3:
+ return SOC15_REG_OFFSET(GC, GET_INST(GC, xcc_id), regCP_ME1_PIPE3_INT_CNTL);
+ default:
+ return 0;
+ }
+}
+
static int gfx_v9_4_3_set_priv_reg_fault_state(struct amdgpu_device *adev,
struct amdgpu_irq_src *source,
unsigned type,
enum amdgpu_interrupt_state state)
{
- int i, num_xcc;
+ u32 mec_int_cntl_reg, mec_int_cntl;
+ int i, j, k, num_xcc;
num_xcc = NUM_XCC(adev->gfx.xcc_mask);
switch (state) {
case AMDGPU_IRQ_STATE_DISABLE:
case AMDGPU_IRQ_STATE_ENABLE:
- for (i = 0; i < num_xcc; i++)
+ for (i = 0; i < num_xcc; i++) {
WREG32_FIELD15_PREREG(GC, GET_INST(GC, i), CP_INT_CNTL_RING0,
- PRIV_REG_INT_ENABLE,
- state == AMDGPU_IRQ_STATE_ENABLE ? 1 : 0);
+ PRIV_REG_INT_ENABLE,
+ state == AMDGPU_IRQ_STATE_ENABLE ? 1 : 0);
+ for (j = 0; j < adev->gfx.mec.num_mec; j++) {
+ for (k = 0; k < adev->gfx.mec.num_pipe_per_mec; k++) {
+ /* MECs start at 1 */
+ mec_int_cntl_reg = gfx_v9_4_3_get_cpc_int_cntl(adev, i, j + 1, k);
+
+ if (mec_int_cntl_reg) {
+ mec_int_cntl = RREG32_XCC(mec_int_cntl_reg, i);
+ mec_int_cntl = REG_SET_FIELD(mec_int_cntl, CP_ME1_PIPE0_INT_CNTL,
+ PRIV_REG_INT_ENABLE,
+ state == AMDGPU_IRQ_STATE_ENABLE ?
+ 1 : 0);
+ WREG32_XCC(mec_int_cntl_reg, mec_int_cntl, i);
+ }
+ }
+ }
+ }
+ break;
+ default:
+ break;
+ }
+
+ return 0;
+}
+
+static int gfx_v9_4_3_set_bad_op_fault_state(struct amdgpu_device *adev,
+ struct amdgpu_irq_src *source,
+ unsigned type,
+ enum amdgpu_interrupt_state state)
+{
+ u32 mec_int_cntl_reg, mec_int_cntl;
+ int i, j, k, num_xcc;
+
+ num_xcc = NUM_XCC(adev->gfx.xcc_mask);
+ switch (state) {
+ case AMDGPU_IRQ_STATE_DISABLE:
+ case AMDGPU_IRQ_STATE_ENABLE:
+ for (i = 0; i < num_xcc; i++) {
+ WREG32_FIELD15_PREREG(GC, GET_INST(GC, i), CP_INT_CNTL_RING0,
+ OPCODE_ERROR_INT_ENABLE,
+ state == AMDGPU_IRQ_STATE_ENABLE ? 1 : 0);
+ for (j = 0; j < adev->gfx.mec.num_mec; j++) {
+ for (k = 0; k < adev->gfx.mec.num_pipe_per_mec; k++) {
+ /* MECs start at 1 */
+ mec_int_cntl_reg = gfx_v9_4_3_get_cpc_int_cntl(adev, i, j + 1, k);
+
+ if (mec_int_cntl_reg) {
+ mec_int_cntl = RREG32_XCC(mec_int_cntl_reg, i);
+ mec_int_cntl = REG_SET_FIELD(mec_int_cntl, CP_ME1_PIPE0_INT_CNTL,
+ OPCODE_ERROR_INT_ENABLE,
+ state == AMDGPU_IRQ_STATE_ENABLE ?
+ 1 : 0);
+ WREG32_XCC(mec_int_cntl_reg, mec_int_cntl, i);
+ }
+ }
+ }
+ }
break;
default:
break;
@@ -3061,6 +3378,15 @@ static int gfx_v9_4_3_priv_reg_irq(struct amdgpu_device *adev,
return 0;
}
+static int gfx_v9_4_3_bad_op_irq(struct amdgpu_device *adev,
+ struct amdgpu_irq_src *source,
+ struct amdgpu_iv_entry *entry)
+{
+ DRM_ERROR("Illegal opcode in command stream\n");
+ gfx_v9_4_3_fault(adev, entry);
+ return 0;
+}
+
static int gfx_v9_4_3_priv_inst_irq(struct amdgpu_device *adev,
struct amdgpu_irq_src *source,
struct amdgpu_iv_entry *entry)
@@ -3147,6 +3473,183 @@ static void gfx_v9_4_3_emit_wave_limit(struct amdgpu_ring *ring, bool enable)
}
}
+static int gfx_v9_4_3_unmap_done(struct amdgpu_device *adev, uint32_t me,
+ uint32_t pipe, uint32_t queue,
+ uint32_t xcc_id)
+{
+ int i, r;
+ /* make sure dequeue is complete*/
+ gfx_v9_4_3_xcc_set_safe_mode(adev, xcc_id);
+ mutex_lock(&adev->srbm_mutex);
+ soc15_grbm_select(adev, me, pipe, queue, 0, GET_INST(GC, xcc_id));
+ for (i = 0; i < adev->usec_timeout; i++) {
+ if (!(RREG32_SOC15(GC, GET_INST(GC, xcc_id), regCP_HQD_ACTIVE) & 1))
+ break;
+ udelay(1);
+ }
+ if (i >= adev->usec_timeout)
+ r = -ETIMEDOUT;
+ else
+ r = 0;
+ soc15_grbm_select(adev, 0, 0, 0, 0, GET_INST(GC, xcc_id));
+ mutex_unlock(&adev->srbm_mutex);
+ gfx_v9_4_3_xcc_unset_safe_mode(adev, xcc_id);
+
+ return r;
+
+}
+
+static bool gfx_v9_4_3_pipe_reset_support(struct amdgpu_device *adev)
+{
+ /*TODO: Need check gfx9.4.4 mec fw whether supports pipe reset as well.*/
+ if (amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(9, 4, 3) &&
+ adev->gfx.mec_fw_version >= 0x0000009b)
+ return true;
+ else
+ dev_warn_once(adev->dev, "Please use the latest MEC version to see whether support pipe reset\n");
+
+ return false;
+}
+
+static int gfx_v9_4_3_reset_hw_pipe(struct amdgpu_ring *ring)
+{
+ struct amdgpu_device *adev = ring->adev;
+ uint32_t reset_pipe, clean_pipe;
+ int r;
+
+ if (!gfx_v9_4_3_pipe_reset_support(adev))
+ return -EINVAL;
+
+ gfx_v9_4_3_xcc_set_safe_mode(adev, ring->xcc_id);
+ mutex_lock(&adev->srbm_mutex);
+
+ reset_pipe = RREG32_SOC15(GC, GET_INST(GC, ring->xcc_id), regCP_MEC_CNTL);
+ clean_pipe = reset_pipe;
+
+ if (ring->me == 1) {
+ switch (ring->pipe) {
+ case 0:
+ reset_pipe = REG_SET_FIELD(reset_pipe, CP_MEC_CNTL,
+ MEC_ME1_PIPE0_RESET, 1);
+ break;
+ case 1:
+ reset_pipe = REG_SET_FIELD(reset_pipe, CP_MEC_CNTL,
+ MEC_ME1_PIPE1_RESET, 1);
+ break;
+ case 2:
+ reset_pipe = REG_SET_FIELD(reset_pipe, CP_MEC_CNTL,
+ MEC_ME1_PIPE2_RESET, 1);
+ break;
+ case 3:
+ reset_pipe = REG_SET_FIELD(reset_pipe, CP_MEC_CNTL,
+ MEC_ME1_PIPE3_RESET, 1);
+ break;
+ default:
+ break;
+ }
+ } else {
+ if (ring->pipe)
+ reset_pipe = REG_SET_FIELD(reset_pipe, CP_MEC_CNTL,
+ MEC_ME2_PIPE1_RESET, 1);
+ else
+ reset_pipe = REG_SET_FIELD(reset_pipe, CP_MEC_CNTL,
+ MEC_ME2_PIPE0_RESET, 1);
+ }
+
+ WREG32_SOC15(GC, GET_INST(GC, ring->xcc_id), regCP_MEC_CNTL, reset_pipe);
+ WREG32_SOC15(GC, GET_INST(GC, ring->xcc_id), regCP_MEC_CNTL, clean_pipe);
+ mutex_unlock(&adev->srbm_mutex);
+ gfx_v9_4_3_xcc_unset_safe_mode(adev, ring->xcc_id);
+
+ r = gfx_v9_4_3_unmap_done(adev, ring->me, ring->pipe, ring->queue, ring->xcc_id);
+ return r;
+}
+
+static int gfx_v9_4_3_reset_kcq(struct amdgpu_ring *ring,
+ unsigned int vmid)
+{
+ struct amdgpu_device *adev = ring->adev;
+ struct amdgpu_kiq *kiq = &adev->gfx.kiq[ring->xcc_id];
+ struct amdgpu_ring *kiq_ring = &kiq->ring;
+ unsigned long flags;
+ int r;
+
+ if (!adev->debug_exp_resets)
+ return -EINVAL;
+
+ if (amdgpu_sriov_vf(adev))
+ return -EINVAL;
+
+ if (!kiq->pmf || !kiq->pmf->kiq_unmap_queues)
+ return -EINVAL;
+
+ spin_lock_irqsave(&kiq->ring_lock, flags);
+
+ if (amdgpu_ring_alloc(kiq_ring, kiq->pmf->unmap_queues_size)) {
+ spin_unlock_irqrestore(&kiq->ring_lock, flags);
+ return -ENOMEM;
+ }
+
+ kiq->pmf->kiq_unmap_queues(kiq_ring, ring, RESET_QUEUES,
+ 0, 0);
+ amdgpu_ring_commit(kiq_ring);
+
+ spin_unlock_irqrestore(&kiq->ring_lock, flags);
+
+ r = amdgpu_ring_test_ring(kiq_ring);
+ if (r) {
+ dev_err(adev->dev, "kiq ring test failed after ring: %s queue reset\n",
+ ring->name);
+ goto pipe_reset;
+ }
+
+ r = gfx_v9_4_3_unmap_done(adev, ring->me, ring->pipe, ring->queue, ring->xcc_id);
+ if (r)
+ dev_err(adev->dev, "fail to wait on hqd deactive and will try pipe reset\n");
+
+pipe_reset:
+ if(r) {
+ r = gfx_v9_4_3_reset_hw_pipe(ring);
+ dev_info(adev->dev, "ring: %s pipe reset :%s\n", ring->name,
+ r ? "failed" : "successfully");
+ if (r)
+ return r;
+ }
+
+ r = amdgpu_bo_reserve(ring->mqd_obj, false);
+ if (unlikely(r != 0)){
+ dev_err(adev->dev, "fail to resv mqd_obj\n");
+ return r;
+ }
+ r = amdgpu_bo_kmap(ring->mqd_obj, (void **)&ring->mqd_ptr);
+ if (!r) {
+ r = gfx_v9_4_3_xcc_kcq_init_queue(ring, ring->xcc_id, true);
+ amdgpu_bo_kunmap(ring->mqd_obj);
+ ring->mqd_ptr = NULL;
+ }
+ amdgpu_bo_unreserve(ring->mqd_obj);
+ if (r) {
+ dev_err(adev->dev, "fail to unresv mqd_obj\n");
+ return r;
+ }
+ spin_lock_irqsave(&kiq->ring_lock, flags);
+ r = amdgpu_ring_alloc(kiq_ring, kiq->pmf->map_queues_size);
+ if (r) {
+ spin_unlock_irqrestore(&kiq->ring_lock, flags);
+ return -ENOMEM;
+ }
+ kiq->pmf->kiq_map_queues(kiq_ring, ring);
+ amdgpu_ring_commit(kiq_ring);
+ spin_unlock_irqrestore(&kiq->ring_lock, flags);
+
+ r = amdgpu_ring_test_ring(kiq_ring);
+ if (r) {
+ dev_err(adev->dev, "fail to remap queue\n");
+ return r;
+ }
+ return amdgpu_ring_test_ring(ring);
+}
+
enum amdgpu_gfx_cp_ras_mem_id {
AMDGPU_GFX_CP_MEM1 = 1,
AMDGPU_GFX_CP_MEM2,
@@ -3959,8 +4462,8 @@ static void gfx_v9_4_3_inst_query_ras_err_count(struct amdgpu_device *adev,
/* the caller should make sure initialize value of
* err_data->ue_count and err_data->ce_count
*/
- amdgpu_ras_error_statistic_ue_count(err_data, &mcm_info, NULL, ue_count);
- amdgpu_ras_error_statistic_ce_count(err_data, &mcm_info, NULL, ce_count);
+ amdgpu_ras_error_statistic_ue_count(err_data, &mcm_info, ue_count);
+ amdgpu_ras_error_statistic_ce_count(err_data, &mcm_info, ce_count);
}
static void gfx_v9_4_3_inst_reset_ras_err_count(struct amdgpu_device *adev,
@@ -4062,6 +4565,151 @@ static void gfx_v9_4_3_enable_watchdog_timer(struct amdgpu_device *adev)
amdgpu_gfx_ras_error_func(adev, NULL, gfx_v9_4_3_inst_enable_watchdog_timer);
}
+static void gfx_v9_4_3_ring_insert_nop(struct amdgpu_ring *ring, uint32_t num_nop)
+{
+ int i;
+
+ /* Header itself is a NOP packet */
+ if (num_nop == 1) {
+ amdgpu_ring_write(ring, ring->funcs->nop);
+ return;
+ }
+
+ /* Max HW optimization till 0x3ffe, followed by remaining one NOP at a time*/
+ amdgpu_ring_write(ring, PACKET3(PACKET3_NOP, min(num_nop - 2, 0x3ffe)));
+
+ /* Header is at index 0, followed by num_nops - 1 NOP packet's */
+ for (i = 1; i < num_nop; i++)
+ amdgpu_ring_write(ring, ring->funcs->nop);
+}
+
+static void gfx_v9_4_3_ip_print(void *handle, struct drm_printer *p)
+{
+ struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ uint32_t i, j, k;
+ uint32_t xcc_id, xcc_offset, inst_offset;
+ uint32_t num_xcc, reg, num_inst;
+ uint32_t reg_count = ARRAY_SIZE(gc_reg_list_9_4_3);
+
+ if (!adev->gfx.ip_dump_core)
+ return;
+
+ num_xcc = NUM_XCC(adev->gfx.xcc_mask);
+ drm_printf(p, "Number of Instances:%d\n", num_xcc);
+ for (xcc_id = 0; xcc_id < num_xcc; xcc_id++) {
+ xcc_offset = xcc_id * reg_count;
+ drm_printf(p, "\nInstance id:%d\n", xcc_id);
+ for (i = 0; i < reg_count; i++)
+ drm_printf(p, "%-50s \t 0x%08x\n",
+ gc_reg_list_9_4_3[i].reg_name,
+ adev->gfx.ip_dump_core[xcc_offset + i]);
+ }
+
+ /* print compute queue registers for all instances */
+ if (!adev->gfx.ip_dump_compute_queues)
+ return;
+
+ num_inst = adev->gfx.mec.num_mec * adev->gfx.mec.num_pipe_per_mec *
+ adev->gfx.mec.num_queue_per_pipe;
+
+ reg_count = ARRAY_SIZE(gc_cp_reg_list_9_4_3);
+ drm_printf(p, "\nnum_xcc: %d num_mec: %d num_pipe: %d num_queue: %d\n",
+ num_xcc,
+ adev->gfx.mec.num_mec,
+ adev->gfx.mec.num_pipe_per_mec,
+ adev->gfx.mec.num_queue_per_pipe);
+
+ for (xcc_id = 0; xcc_id < num_xcc; xcc_id++) {
+ xcc_offset = xcc_id * reg_count * num_inst;
+ inst_offset = 0;
+ for (i = 0; i < adev->gfx.mec.num_mec; i++) {
+ for (j = 0; j < adev->gfx.mec.num_pipe_per_mec; j++) {
+ for (k = 0; k < adev->gfx.mec.num_queue_per_pipe; k++) {
+ drm_printf(p,
+ "\nxcc:%d mec:%d, pipe:%d, queue:%d\n",
+ xcc_id, i, j, k);
+ for (reg = 0; reg < reg_count; reg++) {
+ drm_printf(p,
+ "%-50s \t 0x%08x\n",
+ gc_cp_reg_list_9_4_3[reg].reg_name,
+ adev->gfx.ip_dump_compute_queues
+ [xcc_offset + inst_offset +
+ reg]);
+ }
+ inst_offset += reg_count;
+ }
+ }
+ }
+ }
+}
+
+static void gfx_v9_4_3_ip_dump(void *handle)
+{
+ struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ uint32_t i, j, k;
+ uint32_t num_xcc, reg, num_inst;
+ uint32_t xcc_id, xcc_offset, inst_offset;
+ uint32_t reg_count = ARRAY_SIZE(gc_reg_list_9_4_3);
+
+ if (!adev->gfx.ip_dump_core)
+ return;
+
+ num_xcc = NUM_XCC(adev->gfx.xcc_mask);
+
+ amdgpu_gfx_off_ctrl(adev, false);
+ for (xcc_id = 0; xcc_id < num_xcc; xcc_id++) {
+ xcc_offset = xcc_id * reg_count;
+ for (i = 0; i < reg_count; i++)
+ adev->gfx.ip_dump_core[xcc_offset + i] =
+ RREG32(SOC15_REG_ENTRY_OFFSET_INST(gc_reg_list_9_4_3[i],
+ GET_INST(GC, xcc_id)));
+ }
+ amdgpu_gfx_off_ctrl(adev, true);
+
+ /* dump compute queue registers for all instances */
+ if (!adev->gfx.ip_dump_compute_queues)
+ return;
+
+ num_inst = adev->gfx.mec.num_mec * adev->gfx.mec.num_pipe_per_mec *
+ adev->gfx.mec.num_queue_per_pipe;
+ reg_count = ARRAY_SIZE(gc_cp_reg_list_9_4_3);
+ amdgpu_gfx_off_ctrl(adev, false);
+ mutex_lock(&adev->srbm_mutex);
+ for (xcc_id = 0; xcc_id < num_xcc; xcc_id++) {
+ xcc_offset = xcc_id * reg_count * num_inst;
+ inst_offset = 0;
+ for (i = 0; i < adev->gfx.mec.num_mec; i++) {
+ for (j = 0; j < adev->gfx.mec.num_pipe_per_mec; j++) {
+ for (k = 0; k < adev->gfx.mec.num_queue_per_pipe; k++) {
+ /* ME0 is for GFX so start from 1 for CP */
+ soc15_grbm_select(adev, 1 + i, j, k, 0,
+ GET_INST(GC, xcc_id));
+
+ for (reg = 0; reg < reg_count; reg++) {
+ adev->gfx.ip_dump_compute_queues
+ [xcc_offset +
+ inst_offset + reg] =
+ RREG32(SOC15_REG_ENTRY_OFFSET_INST(
+ gc_cp_reg_list_9_4_3[reg],
+ GET_INST(GC, xcc_id)));
+ }
+ inst_offset += reg_count;
+ }
+ }
+ }
+ }
+ soc15_grbm_select(adev, 0, 0, 0, 0, 0);
+ mutex_unlock(&adev->srbm_mutex);
+ amdgpu_gfx_off_ctrl(adev, true);
+}
+
+static void gfx_v9_4_3_ring_emit_cleaner_shader(struct amdgpu_ring *ring)
+{
+ /* Emit the cleaner shader */
+ amdgpu_ring_write(ring, PACKET3(PACKET3_RUN_CLEANER_SHADER, 0));
+ amdgpu_ring_write(ring, 0); /* RESERVED field, programmed to zero */
+}
+
static const struct amd_ip_funcs gfx_v9_4_3_ip_funcs = {
.name = "gfx_v9_4_3",
.early_init = gfx_v9_4_3_early_init,
@@ -4078,8 +4726,8 @@ static const struct amd_ip_funcs gfx_v9_4_3_ip_funcs = {
.set_clockgating_state = gfx_v9_4_3_set_clockgating_state,
.set_powergating_state = gfx_v9_4_3_set_powergating_state,
.get_clockgating_state = gfx_v9_4_3_get_clockgating_state,
- .dump_ip_state = NULL,
- .print_ip_state = NULL,
+ .dump_ip_state = gfx_v9_4_3_ip_dump,
+ .print_ip_state = gfx_v9_4_3_ip_print,
};
static const struct amdgpu_ring_funcs gfx_v9_4_3_ring_funcs_compute = {
@@ -4101,7 +4749,8 @@ static const struct amdgpu_ring_funcs gfx_v9_4_3_ring_funcs_compute = {
8 + 8 + 8 + /* gfx_v9_4_3_ring_emit_fence x3 for user fence, vm fence */
7 + /* gfx_v9_4_3_emit_mem_sync */
5 + /* gfx_v9_4_3_emit_wave_limit for updating regSPI_WCL_PIPE_PERCENT_GFX register */
- 15, /* for updating 3 regSPI_WCL_PIPE_PERCENT_CS registers */
+ 15 + /* for updating 3 regSPI_WCL_PIPE_PERCENT_CS registers */
+ 2, /* gfx_v9_4_3_ring_emit_cleaner_shader */
.emit_ib_size = 7, /* gfx_v9_4_3_ring_emit_ib_compute */
.emit_ib = gfx_v9_4_3_ring_emit_ib_compute,
.emit_fence = gfx_v9_4_3_ring_emit_fence,
@@ -4111,13 +4760,18 @@ static const struct amdgpu_ring_funcs gfx_v9_4_3_ring_funcs_compute = {
.emit_hdp_flush = gfx_v9_4_3_ring_emit_hdp_flush,
.test_ring = gfx_v9_4_3_ring_test_ring,
.test_ib = gfx_v9_4_3_ring_test_ib,
- .insert_nop = amdgpu_ring_insert_nop,
+ .insert_nop = gfx_v9_4_3_ring_insert_nop,
.pad_ib = amdgpu_ring_generic_pad_ib,
.emit_wreg = gfx_v9_4_3_ring_emit_wreg,
.emit_reg_wait = gfx_v9_4_3_ring_emit_reg_wait,
.emit_reg_write_reg_wait = gfx_v9_4_3_ring_emit_reg_write_reg_wait,
+ .soft_recovery = gfx_v9_4_3_ring_soft_recovery,
.emit_mem_sync = gfx_v9_4_3_emit_mem_sync,
.emit_wave_limit = gfx_v9_4_3_emit_wave_limit,
+ .reset = gfx_v9_4_3_reset_kcq,
+ .emit_cleaner_shader = gfx_v9_4_3_ring_emit_cleaner_shader,
+ .begin_use = amdgpu_gfx_enforce_isolation_ring_begin_use,
+ .end_use = amdgpu_gfx_enforce_isolation_ring_end_use,
};
static const struct amdgpu_ring_funcs gfx_v9_4_3_ring_funcs_kiq = {
@@ -4172,6 +4826,11 @@ static const struct amdgpu_irq_src_funcs gfx_v9_4_3_priv_reg_irq_funcs = {
.process = gfx_v9_4_3_priv_reg_irq,
};
+static const struct amdgpu_irq_src_funcs gfx_v9_4_3_bad_op_irq_funcs = {
+ .set = gfx_v9_4_3_set_bad_op_fault_state,
+ .process = gfx_v9_4_3_bad_op_irq,
+};
+
static const struct amdgpu_irq_src_funcs gfx_v9_4_3_priv_inst_irq_funcs = {
.set = gfx_v9_4_3_set_priv_inst_fault_state,
.process = gfx_v9_4_3_priv_inst_irq,
@@ -4185,6 +4844,9 @@ static void gfx_v9_4_3_set_irq_funcs(struct amdgpu_device *adev)
adev->gfx.priv_reg_irq.num_types = 1;
adev->gfx.priv_reg_irq.funcs = &gfx_v9_4_3_priv_reg_irq_funcs;
+ adev->gfx.bad_op_irq.num_types = 1;
+ adev->gfx.bad_op_irq.funcs = &gfx_v9_4_3_bad_op_irq_funcs;
+
adev->gfx.priv_inst_irq.num_types = 1;
adev->gfx.priv_inst_irq.funcs = &gfx_v9_4_3_priv_inst_irq_funcs;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v9_4_3_cleaner_shader.asm b/drivers/gpu/drm/amd/amdgpu/gfx_v9_4_3_cleaner_shader.asm
new file mode 100644
index 000000000000..d5325ef80ab0
--- /dev/null
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v9_4_3_cleaner_shader.asm
@@ -0,0 +1,153 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright 2024 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+// This shader is to clean LDS, SGPRs and VGPRs. It is first 64 Dwords or 256 bytes of 192 Dwords cleaner shader.
+//To turn this shader program on for complitaion change this to main and lower shader main to main_1
+
+// MI300 : Clear SGPRs, VGPRs and LDS
+// Uses two kernels launched separately:
+// 1. Clean VGPRs, LDS, and lower SGPRs
+// Launches one workgroup per CU, each workgroup with 4x wave64 per SIMD in the CU
+// Waves are "wave64" and have 128 VGPRs each, which uses all 512 VGPRs per SIMD
+// Waves in the workgroup share the 64KB of LDS
+// Each wave clears SGPRs 0 - 95. Because there are 4 waves/SIMD, this is physical SGPRs 0-383
+// Each wave clears 128 VGPRs, so all 512 in the SIMD
+// The first wave of the workgroup clears its 64KB of LDS
+// The shader starts with "S_BARRIER" to ensure SPI has launched all waves of the workgroup
+// before any wave in the workgroup could end. Without this, it is possible not all SGPRs get cleared.
+// 2. Clean remaining SGPRs
+// Launches a workgroup with 24 waves per workgroup, yielding 6 waves per SIMD in each CU
+// Waves are allocating 96 SGPRs
+// CP sets up SPI_RESOURCE_RESERVE_* registers to prevent these waves from allocating SGPRs 0-223.
+// As such, these 6 waves per SIMD are allocated physical SGPRs 224-799
+// Barriers do not work for >16 waves per workgroup, so we cannot start with S_BARRIER
+// Instead, the shader starts with an S_SETHALT 1. Once all waves are launched CP will send unhalt command
+// The shader then clears all SGPRs allocated to it, cleaning out physical SGPRs 224-799
+
+shader main
+ asic(MI300)
+ type(CS)
+ wave_size(64)
+// Note: original source code from SQ team
+
+// (theorhetical fastest = ~512clks vgpr + 1536 lds + ~128 sgpr = 2176 clks)
+
+ s_cmp_eq_u32 s0, 1 // Bit0 is set, sgpr0 is set then clear VGPRS and LDS as FW set COMPUTE_USER_DATA_3
+ s_cbranch_scc0 label_0023 // Clean VGPRs and LDS if sgpr0 of wave is set, scc = (s3 == 1)
+ S_BARRIER
+
+ s_movk_i32 m0, 0x0000
+ s_mov_b32 s2, 0x00000078 // Loop 128/8=16 times (loop unrolled for performance)
+ //
+ // CLEAR VGPRs
+ //
+ s_set_gpr_idx_on s2, 0x8 // enable Dest VGPR indexing
+label_0005:
+ v_mov_b32 v0, 0
+ v_mov_b32 v1, 0
+ v_mov_b32 v2, 0
+ v_mov_b32 v3, 0
+ v_mov_b32 v4, 0
+ v_mov_b32 v5, 0
+ v_mov_b32 v6, 0
+ v_mov_b32 v7, 0
+ s_sub_u32 s2, s2, 8
+ s_set_gpr_idx_idx s2
+ s_cbranch_scc0 label_0005
+ s_set_gpr_idx_off
+
+ //
+ //
+
+ s_mov_b32 s2, 0x80000000 // Bit31 is first_wave
+ s_and_b32 s2, s2, s1 // sgpr0 has tg_size (first_wave) term as in ucode only COMPUTE_PGM_RSRC2.tg_size_en is set
+ s_cbranch_scc0 label_clean_sgpr_1 // Clean LDS if its first wave of ThreadGroup/WorkGroup
+ // CLEAR LDS
+ //
+ s_mov_b32 exec_lo, 0xffffffff
+ s_mov_b32 exec_hi, 0xffffffff
+ v_mbcnt_lo_u32_b32 v1, exec_hi, 0 // Set V1 to thread-ID (0..63)
+ v_mbcnt_hi_u32_b32 v1, exec_lo, v1 // Set V1 to thread-ID (0..63)
+ v_mul_u32_u24 v1, 0x00000008, v1 // * 8, so each thread is a double-dword address (8byte)
+ s_mov_b32 s2, 0x00000003f // 64 loop iteraions
+ s_mov_b32 m0, 0xffffffff
+ // Clear all of LDS space
+ // Each FirstWave of WorkGroup clears 64kbyte block
+
+label_001F:
+ ds_write2_b64 v1, v[2:3], v[2:3] offset1:32
+ ds_write2_b64 v1, v[4:5], v[4:5] offset0:64 offset1:96
+ v_add_co_u32 v1, vcc, 0x00000400, v1
+ s_sub_u32 s2, s2, 1
+ s_cbranch_scc0 label_001F
+ //
+ // CLEAR SGPRs
+ //
+label_clean_sgpr_1:
+ s_mov_b32 m0, 0x0000005c // Loop 96/4=24 times (loop unrolled for performance)
+ s_nop 0
+label_sgpr_loop:
+ s_movreld_b32 s0, 0
+ s_movreld_b32 s1, 0
+ s_movreld_b32 s2, 0
+ s_movreld_b32 s3, 0
+ s_sub_u32 m0, m0, 4
+ s_cbranch_scc0 label_sgpr_loop
+
+ //clear vcc, flat scratch
+ s_mov_b32 flat_scratch_lo, 0 //clear flat scratch lo SGPR
+ s_mov_b32 flat_scratch_hi, 0 //clear flat scratch hi SGPR
+ s_mov_b64 vcc, 0 //clear vcc
+ s_mov_b64 ttmp0, 0 //Clear ttmp0 and ttmp1
+ s_mov_b64 ttmp2, 0 //Clear ttmp2 and ttmp3
+ s_mov_b64 ttmp4, 0 //Clear ttmp4 and ttmp5
+ s_mov_b64 ttmp6, 0 //Clear ttmp6 and ttmp7
+ s_mov_b64 ttmp8, 0 //Clear ttmp8 and ttmp9
+ s_mov_b64 ttmp10, 0 //Clear ttmp10 and ttmp11
+ s_mov_b64 ttmp12, 0 //Clear ttmp12 and ttmp13
+ s_mov_b64 ttmp14, 0 //Clear ttmp14 and ttmp15
+s_endpgm
+
+label_0023:
+
+ s_sethalt 1
+
+ s_mov_b32 m0, 0x0000005c // Loop 96/4=24 times (loop unrolled for performance)
+ s_nop 0
+label_sgpr_loop1:
+
+ s_movreld_b32 s0, 0
+ s_movreld_b32 s1, 0
+ s_movreld_b32 s2, 0
+ s_movreld_b32 s3, 0
+ s_sub_u32 m0, m0, 4
+ s_cbranch_scc0 label_sgpr_loop1
+
+ //clear vcc, flat scratch
+ s_mov_b32 flat_scratch_lo, 0 //clear flat scratch lo SGPR
+ s_mov_b32 flat_scratch_hi, 0 //clear flat scratch hi SGPR
+ s_mov_b64 vcc, 0xee //clear vcc
+
+s_endpgm
+end
+
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v9_4_3_cleaner_shader.h b/drivers/gpu/drm/amd/amdgpu/gfx_v9_4_3_cleaner_shader.h
new file mode 100644
index 000000000000..69aa567c6c1d
--- /dev/null
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v9_4_3_cleaner_shader.h
@@ -0,0 +1,64 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright 2024 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+/* Define the cleaner shader gfx_9_4_3 */
+static const u32 gfx_9_4_3_cleaner_shader_hex[] = {
+ 0xbf068100, 0xbf84003b,
+ 0xbf8a0000, 0xb07c0000,
+ 0xbe8200ff, 0x00000078,
+ 0xbf110802, 0x7e000280,
+ 0x7e020280, 0x7e040280,
+ 0x7e060280, 0x7e080280,
+ 0x7e0a0280, 0x7e0c0280,
+ 0x7e0e0280, 0x80828802,
+ 0xbe803202, 0xbf84fff5,
+ 0xbf9c0000, 0xbe8200ff,
+ 0x80000000, 0x86020102,
+ 0xbf840011, 0xbefe00c1,
+ 0xbeff00c1, 0xd28c0001,
+ 0x0001007f, 0xd28d0001,
+ 0x0002027e, 0x10020288,
+ 0xbe8200bf, 0xbefc00c1,
+ 0xd89c2000, 0x00020201,
+ 0xd89c6040, 0x00040401,
+ 0x320202ff, 0x00000400,
+ 0x80828102, 0xbf84fff8,
+ 0xbefc00ff, 0x0000005c,
+ 0xbf800000, 0xbe802c80,
+ 0xbe812c80, 0xbe822c80,
+ 0xbe832c80, 0x80fc847c,
+ 0xbf84fffa, 0xbee60080,
+ 0xbee70080, 0xbeea0180,
+ 0xbeec0180, 0xbeee0180,
+ 0xbef00180, 0xbef20180,
+ 0xbef40180, 0xbef60180,
+ 0xbef80180, 0xbefa0180,
+ 0xbf810000, 0xbf8d0001,
+ 0xbefc00ff, 0x0000005c,
+ 0xbf800000, 0xbe802c80,
+ 0xbe812c80, 0xbe822c80,
+ 0xbe832c80, 0x80fc847c,
+ 0xbf84fffa, 0xbee60080,
+ 0xbee70080, 0xbeea01ff,
+ 0x000000ee, 0xbf810000,
+};
diff --git a/drivers/gpu/drm/amd/amdgpu/gfxhub_v1_0.c b/drivers/gpu/drm/amd/amdgpu/gfxhub_v1_0.c
index d200310d1731..0e3ddea7b8e0 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfxhub_v1_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfxhub_v1_0.c
@@ -443,23 +443,6 @@ static void gfxhub_v1_0_init(struct amdgpu_device *adev)
mmVM_INVALIDATE_ENG0_ADDR_RANGE_LO32;
}
-static bool gfxhub_v1_0_query_utcl2_poison_status(struct amdgpu_device *adev,
- int xcc_id)
-{
- u32 status = 0;
- struct amdgpu_vmhub *hub;
-
- if (amdgpu_ip_version(adev, GC_HWIP, 0) != IP_VERSION(9, 4, 2))
- return false;
-
- hub = &adev->vmhub[AMDGPU_GFXHUB(0)];
- status = RREG32(hub->vm_l2_pro_fault_status);
- /* reset page fault status */
- WREG32_P(hub->vm_l2_pro_fault_cntl, 1, ~1);
-
- return REG_GET_FIELD(status, VM_L2_PROTECTION_FAULT_STATUS, FED);
-}
-
const struct amdgpu_gfxhub_funcs gfxhub_v1_0_funcs = {
.get_mc_fb_offset = gfxhub_v1_0_get_mc_fb_offset,
.setup_vm_pt_regs = gfxhub_v1_0_setup_vm_pt_regs,
@@ -468,5 +451,4 @@ const struct amdgpu_gfxhub_funcs gfxhub_v1_0_funcs = {
.set_fault_enable_default = gfxhub_v1_0_set_fault_enable_default,
.init = gfxhub_v1_0_init,
.get_xgmi_info = gfxhub_v1_1_get_xgmi_info,
- .query_utcl2_poison_status = gfxhub_v1_0_query_utcl2_poison_status,
};
diff --git a/drivers/gpu/drm/amd/amdgpu/gfxhub_v1_2.c b/drivers/gpu/drm/amd/amdgpu/gfxhub_v1_2.c
index 72109abe7c86..ed8e130c7d19 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfxhub_v1_2.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfxhub_v1_2.c
@@ -622,22 +622,6 @@ static int gfxhub_v1_2_get_xgmi_info(struct amdgpu_device *adev)
return 0;
}
-static bool gfxhub_v1_2_query_utcl2_poison_status(struct amdgpu_device *adev,
- int xcc_id)
-{
- u32 fed, status;
-
- status = RREG32_SOC15(GC, GET_INST(GC, xcc_id), regVM_L2_PROTECTION_FAULT_STATUS);
- fed = REG_GET_FIELD(status, VM_L2_PROTECTION_FAULT_STATUS, FED);
- if (!amdgpu_sriov_vf(adev)) {
- /* clear page fault status and address */
- WREG32_P(SOC15_REG_OFFSET(GC, GET_INST(GC, xcc_id),
- regVM_L2_PROTECTION_FAULT_CNTL), 1, ~1);
- }
-
- return fed;
-}
-
const struct amdgpu_gfxhub_funcs gfxhub_v1_2_funcs = {
.get_mc_fb_offset = gfxhub_v1_2_get_mc_fb_offset,
.setup_vm_pt_regs = gfxhub_v1_2_setup_vm_pt_regs,
@@ -646,7 +630,6 @@ const struct amdgpu_gfxhub_funcs gfxhub_v1_2_funcs = {
.set_fault_enable_default = gfxhub_v1_2_set_fault_enable_default,
.init = gfxhub_v1_2_init,
.get_xgmi_info = gfxhub_v1_2_get_xgmi_info,
- .query_utcl2_poison_status = gfxhub_v1_2_query_utcl2_poison_status,
};
static int gfxhub_v1_2_xcp_resume(void *handle, uint32_t inst_mask)
diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v10_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v10_0.c
index f0ceab3ce5bf..9784a2892185 100644
--- a/drivers/gpu/drm/amd/amdgpu/gmc_v10_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gmc_v10_0.c
@@ -132,7 +132,8 @@ static int gmc_v10_0_process_interrupt(struct amdgpu_device *adev,
/* Try to handle the recoverable page faults by filling page
* tables
*/
- if (amdgpu_vm_handle_fault(adev, entry->pasid, 0, 0, addr, write_fault))
+ if (amdgpu_vm_handle_fault(adev, entry->pasid, 0, 0, addr,
+ entry->timestamp, write_fault))
return 1;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c
index b73136d390cc..c76ac0dfe572 100644
--- a/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c
@@ -595,7 +595,7 @@ static int gmc_v9_0_process_interrupt(struct amdgpu_device *adev,
cam_index = entry->src_data[2] & 0x3ff;
ret = amdgpu_vm_handle_fault(adev, entry->pasid, entry->vmid, node_id,
- addr, write_fault);
+ addr, entry->timestamp, write_fault);
WDOORBELL32(adev->irq.retry_cam_doorbell_index, cam_index);
if (ret)
return 1;
@@ -618,7 +618,7 @@ static int gmc_v9_0_process_interrupt(struct amdgpu_device *adev,
* tables
*/
if (amdgpu_vm_handle_fault(adev, entry->pasid, entry->vmid, node_id,
- addr, write_fault))
+ addr, entry->timestamp, write_fault))
return 1;
}
}
diff --git a/drivers/gpu/drm/amd/amdgpu/hdp_v4_0.c b/drivers/gpu/drm/amd/amdgpu/hdp_v4_0.c
index 077c6d920e27..e019249883fb 100644
--- a/drivers/gpu/drm/amd/amdgpu/hdp_v4_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/hdp_v4_0.c
@@ -41,7 +41,7 @@ static void hdp_v4_0_flush_hdp(struct amdgpu_device *adev,
struct amdgpu_ring *ring)
{
if (!ring || !ring->funcs->emit_wreg)
- WREG32_NO_KIQ((adev->rmmio_remap.reg_offset + KFD_MMIO_REMAP_HDP_MEM_FLUSH_CNTL) >> 2, 0);
+ WREG32((adev->rmmio_remap.reg_offset + KFD_MMIO_REMAP_HDP_MEM_FLUSH_CNTL) >> 2, 0);
else
amdgpu_ring_emit_wreg(ring, (adev->rmmio_remap.reg_offset + KFD_MMIO_REMAP_HDP_MEM_FLUSH_CNTL) >> 2, 0);
}
diff --git a/drivers/gpu/drm/amd/amdgpu/hdp_v5_0.c b/drivers/gpu/drm/amd/amdgpu/hdp_v5_0.c
index a9ea23fa0def..ed7facacf2fe 100644
--- a/drivers/gpu/drm/amd/amdgpu/hdp_v5_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/hdp_v5_0.c
@@ -32,7 +32,7 @@ static void hdp_v5_0_flush_hdp(struct amdgpu_device *adev,
struct amdgpu_ring *ring)
{
if (!ring || !ring->funcs->emit_wreg)
- WREG32_NO_KIQ((adev->rmmio_remap.reg_offset + KFD_MMIO_REMAP_HDP_MEM_FLUSH_CNTL) >> 2, 0);
+ WREG32((adev->rmmio_remap.reg_offset + KFD_MMIO_REMAP_HDP_MEM_FLUSH_CNTL) >> 2, 0);
else
amdgpu_ring_emit_wreg(ring, (adev->rmmio_remap.reg_offset + KFD_MMIO_REMAP_HDP_MEM_FLUSH_CNTL) >> 2, 0);
}
diff --git a/drivers/gpu/drm/amd/amdgpu/hdp_v6_0.c b/drivers/gpu/drm/amd/amdgpu/hdp_v6_0.c
index ab06c2b4b20b..33736d361dd0 100644
--- a/drivers/gpu/drm/amd/amdgpu/hdp_v6_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/hdp_v6_0.c
@@ -35,7 +35,7 @@ static void hdp_v6_0_flush_hdp(struct amdgpu_device *adev,
struct amdgpu_ring *ring)
{
if (!ring || !ring->funcs->emit_wreg)
- WREG32_NO_KIQ((adev->rmmio_remap.reg_offset + KFD_MMIO_REMAP_HDP_MEM_FLUSH_CNTL) >> 2, 0);
+ WREG32((adev->rmmio_remap.reg_offset + KFD_MMIO_REMAP_HDP_MEM_FLUSH_CNTL) >> 2, 0);
else
amdgpu_ring_emit_wreg(ring, (adev->rmmio_remap.reg_offset + KFD_MMIO_REMAP_HDP_MEM_FLUSH_CNTL) >> 2, 0);
}
diff --git a/drivers/gpu/drm/amd/amdgpu/hdp_v7_0.c b/drivers/gpu/drm/amd/amdgpu/hdp_v7_0.c
index 8d7d0813e331..1c99bb09e2a1 100644
--- a/drivers/gpu/drm/amd/amdgpu/hdp_v7_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/hdp_v7_0.c
@@ -32,7 +32,7 @@ static void hdp_v7_0_flush_hdp(struct amdgpu_device *adev,
struct amdgpu_ring *ring)
{
if (!ring || !ring->funcs->emit_wreg)
- WREG32_NO_KIQ((adev->rmmio_remap.reg_offset + KFD_MMIO_REMAP_HDP_MEM_FLUSH_CNTL) >> 2, 0);
+ WREG32((adev->rmmio_remap.reg_offset + KFD_MMIO_REMAP_HDP_MEM_FLUSH_CNTL) >> 2, 0);
else
amdgpu_ring_emit_wreg(ring, (adev->rmmio_remap.reg_offset + KFD_MMIO_REMAP_HDP_MEM_FLUSH_CNTL) >> 2, 0);
}
diff --git a/drivers/gpu/drm/amd/amdgpu/imu_v11_0.c b/drivers/gpu/drm/amd/amdgpu/imu_v11_0.c
index 6c1891889c4d..d4f72e47ae9e 100644
--- a/drivers/gpu/drm/amd/amdgpu/imu_v11_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/imu_v11_0.c
@@ -153,7 +153,7 @@ static void imu_v11_0_setup(struct amdgpu_device *adev)
WREG32_SOC15(GC, 0, regGFX_IMU_C2PMSG_16, imu_reg_val);
}
- //disble imu Rtavfs, SmsRepair, DfllBTC, and ClkB
+ //disable imu Rtavfs, SmsRepair, DfllBTC, and ClkB
imu_reg_val = RREG32_SOC15(GC, 0, regGFX_IMU_SCRATCH_10);
imu_reg_val |= 0x10007;
WREG32_SOC15(GC, 0, regGFX_IMU_SCRATCH_10, imu_reg_val);
diff --git a/drivers/gpu/drm/amd/amdgpu/isp_v4_1_0.c b/drivers/gpu/drm/amd/amdgpu/isp_v4_1_0.c
index aac107898bae..964c29ef25dc 100644
--- a/drivers/gpu/drm/amd/amdgpu/isp_v4_1_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/isp_v4_1_0.c
@@ -42,23 +42,23 @@ static const unsigned int isp_4_1_0_int_srcid[MAX_ISP410_INT_SRC] = {
static int isp_v4_1_0_hw_init(struct amdgpu_isp *isp)
{
struct amdgpu_device *adev = isp->adev;
+ int idx, int_idx, num_res, r;
u64 isp_base;
- int int_idx;
- int r;
if (adev->rmmio_size == 0 || adev->rmmio_size < 0x5289)
return -EINVAL;
isp_base = adev->rmmio_base;
- isp->isp_cell = kcalloc(1, sizeof(struct mfd_cell), GFP_KERNEL);
+ isp->isp_cell = kcalloc(2, sizeof(struct mfd_cell), GFP_KERNEL);
if (!isp->isp_cell) {
r = -ENOMEM;
DRM_ERROR("%s: isp mfd cell alloc failed\n", __func__);
goto failure;
}
- isp->isp_res = kcalloc(MAX_ISP410_INT_SRC + 1, sizeof(struct resource),
+ num_res = MAX_ISP410_MEM_RES + MAX_ISP410_SENSOR_RES + MAX_ISP410_INT_SRC;
+ isp->isp_res = kcalloc(num_res, sizeof(struct resource),
GFP_KERNEL);
if (!isp->isp_res) {
r = -ENOMEM;
@@ -83,22 +83,53 @@ static int isp_v4_1_0_hw_init(struct amdgpu_isp *isp)
isp->isp_res[0].start = isp_base;
isp->isp_res[0].end = isp_base + ISP_REGS_OFFSET_END;
- for (int_idx = 0; int_idx < MAX_ISP410_INT_SRC; int_idx++) {
- isp->isp_res[int_idx + 1].name = "isp_4_1_0_irq";
- isp->isp_res[int_idx + 1].flags = IORESOURCE_IRQ;
- isp->isp_res[int_idx + 1].start =
+ isp->isp_res[1].name = "isp_4_1_phy0_reg";
+ isp->isp_res[1].flags = IORESOURCE_MEM;
+ isp->isp_res[1].start = isp_base + ISP410_PHY0_OFFSET;
+ isp->isp_res[1].end = isp_base + ISP410_PHY0_OFFSET + ISP410_PHY0_SIZE;
+
+ isp->isp_res[2].name = "isp_gpio_sensor0_reg";
+ isp->isp_res[2].flags = IORESOURCE_MEM;
+ isp->isp_res[2].start = isp_base + ISP410_GPIO_SENSOR0_OFFSET;
+ isp->isp_res[2].end = isp_base + ISP410_GPIO_SENSOR0_OFFSET +
+ ISP410_GPIO_SENSOR0_SIZE;
+
+ for (idx = MAX_ISP410_MEM_RES + MAX_ISP410_SENSOR_RES, int_idx = 0;
+ idx < num_res; idx++, int_idx++) {
+ isp->isp_res[idx].name = "isp_4_1_0_irq";
+ isp->isp_res[idx].flags = IORESOURCE_IRQ;
+ isp->isp_res[idx].start =
amdgpu_irq_create_mapping(adev, isp_4_1_0_int_srcid[int_idx]);
- isp->isp_res[int_idx + 1].end =
- isp->isp_res[int_idx + 1].start;
+ isp->isp_res[idx].end =
+ isp->isp_res[idx].start;
}
isp->isp_cell[0].name = "amd_isp_capture";
- isp->isp_cell[0].num_resources = MAX_ISP410_INT_SRC + 1;
+ isp->isp_cell[0].num_resources = num_res;
isp->isp_cell[0].resources = &isp->isp_res[0];
isp->isp_cell[0].platform_data = isp->isp_pdata;
isp->isp_cell[0].pdata_size = sizeof(struct isp_platform_data);
- r = mfd_add_hotplug_devices(isp->parent, isp->isp_cell, 1);
+ isp->isp_i2c_res = kcalloc(1, sizeof(struct resource),
+ GFP_KERNEL);
+ if (!isp->isp_i2c_res) {
+ r = -ENOMEM;
+ DRM_ERROR("%s: isp mfd res alloc failed\n", __func__);
+ goto failure;
+ }
+
+ isp->isp_i2c_res[0].name = "isp_i2c0_reg";
+ isp->isp_i2c_res[0].flags = IORESOURCE_MEM;
+ isp->isp_i2c_res[0].start = isp_base + ISP410_I2C0_OFFSET;
+ isp->isp_i2c_res[0].end = isp_base + ISP410_I2C0_OFFSET + ISP410_I2C0_SIZE;
+
+ isp->isp_cell[1].name = "amd_isp_i2c_designware";
+ isp->isp_cell[1].num_resources = 1;
+ isp->isp_cell[1].resources = &isp->isp_i2c_res[0];
+ isp->isp_cell[1].platform_data = isp->isp_pdata;
+ isp->isp_cell[1].pdata_size = sizeof(struct isp_platform_data);
+
+ r = mfd_add_hotplug_devices(isp->parent, isp->isp_cell, 2);
if (r) {
DRM_ERROR("%s: add mfd hotplug device failed\n", __func__);
goto failure;
@@ -111,6 +142,7 @@ failure:
kfree(isp->isp_pdata);
kfree(isp->isp_res);
kfree(isp->isp_cell);
+ kfree(isp->isp_i2c_res);
return r;
}
@@ -122,6 +154,7 @@ static int isp_v4_1_0_hw_fini(struct amdgpu_isp *isp)
kfree(isp->isp_res);
kfree(isp->isp_cell);
kfree(isp->isp_pdata);
+ kfree(isp->isp_i2c_res);
return 0;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/isp_v4_1_0.h b/drivers/gpu/drm/amd/amdgpu/isp_v4_1_0.h
index 315f2822410c..7db24c0f1080 100644
--- a/drivers/gpu/drm/amd/amdgpu/isp_v4_1_0.h
+++ b/drivers/gpu/drm/amd/amdgpu/isp_v4_1_0.h
@@ -32,8 +32,19 @@
#include "ivsrcid/isp/irqsrcs_isp_4_1.h"
+#define MAX_ISP410_MEM_RES 2
+#define MAX_ISP410_SENSOR_RES 1
#define MAX_ISP410_INT_SRC 8
+#define ISP410_PHY0_OFFSET 0x66700
+#define ISP410_PHY0_SIZE 0xD30
+
+#define ISP410_I2C0_OFFSET 0x66400
+#define ISP410_I2C0_SIZE 0x100
+
+#define ISP410_GPIO_SENSOR0_OFFSET 0x6613C
+#define ISP410_GPIO_SENSOR0_SIZE 0x4
+
void isp_v4_1_0_set_isp_funcs(struct amdgpu_isp *isp);
#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/isp_v4_1_1.c b/drivers/gpu/drm/amd/amdgpu/isp_v4_1_1.c
index 4e17fa03f7b5..b56f27295468 100644
--- a/drivers/gpu/drm/amd/amdgpu/isp_v4_1_1.c
+++ b/drivers/gpu/drm/amd/amdgpu/isp_v4_1_1.c
@@ -42,23 +42,24 @@ static const unsigned int isp_4_1_1_int_srcid[MAX_ISP411_INT_SRC] = {
static int isp_v4_1_1_hw_init(struct amdgpu_isp *isp)
{
struct amdgpu_device *adev = isp->adev;
+ int idx, int_idx, num_res, r;
u64 isp_base;
- int int_idx;
- int r;
if (adev->rmmio_size == 0 || adev->rmmio_size < 0x5289)
return -EINVAL;
isp_base = adev->rmmio_base;
- isp->isp_cell = kcalloc(1, sizeof(struct mfd_cell), GFP_KERNEL);
+ isp->isp_cell = kcalloc(2, sizeof(struct mfd_cell), GFP_KERNEL);
if (!isp->isp_cell) {
r = -ENOMEM;
DRM_ERROR("%s: isp mfd cell alloc failed\n", __func__);
goto failure;
}
- isp->isp_res = kcalloc(MAX_ISP411_INT_SRC + 1, sizeof(struct resource),
+ num_res = MAX_ISP411_MEM_RES + MAX_ISP411_SENSOR_RES + MAX_ISP411_INT_SRC;
+
+ isp->isp_res = kcalloc(num_res, sizeof(struct resource),
GFP_KERNEL);
if (!isp->isp_res) {
r = -ENOMEM;
@@ -83,22 +84,52 @@ static int isp_v4_1_1_hw_init(struct amdgpu_isp *isp)
isp->isp_res[0].start = isp_base;
isp->isp_res[0].end = isp_base + ISP_REGS_OFFSET_END;
- for (int_idx = 0; int_idx < MAX_ISP411_INT_SRC; int_idx++) {
- isp->isp_res[int_idx + 1].name = "isp_4_1_1_irq";
- isp->isp_res[int_idx + 1].flags = IORESOURCE_IRQ;
- isp->isp_res[int_idx + 1].start =
+ isp->isp_res[1].name = "isp_4_1_1_phy0_reg";
+ isp->isp_res[1].flags = IORESOURCE_MEM;
+ isp->isp_res[1].start = isp_base + ISP411_PHY0_OFFSET;
+ isp->isp_res[1].end = isp_base + ISP411_PHY0_OFFSET + ISP411_PHY0_SIZE;
+
+ isp->isp_res[2].name = "isp_4_1_1_sensor0_reg";
+ isp->isp_res[2].flags = IORESOURCE_MEM;
+ isp->isp_res[2].start = isp_base + ISP411_GPIO_SENSOR0_OFFSET;
+ isp->isp_res[2].end = isp_base + ISP411_GPIO_SENSOR0_OFFSET +
+ ISP411_GPIO_SENSOR0_SIZE;
+
+ for (idx = MAX_ISP411_MEM_RES + MAX_ISP411_SENSOR_RES, int_idx = 0;
+ idx < num_res; idx++, int_idx++) {
+ isp->isp_res[idx].name = "isp_4_1_1_irq";
+ isp->isp_res[idx].flags = IORESOURCE_IRQ;
+ isp->isp_res[idx].start =
amdgpu_irq_create_mapping(adev, isp_4_1_1_int_srcid[int_idx]);
- isp->isp_res[int_idx + 1].end =
- isp->isp_res[int_idx + 1].start;
+ isp->isp_res[idx].end =
+ isp->isp_res[idx].start;
}
isp->isp_cell[0].name = "amd_isp_capture";
- isp->isp_cell[0].num_resources = MAX_ISP411_INT_SRC + 1;
+ isp->isp_cell[0].num_resources = num_res;
isp->isp_cell[0].resources = &isp->isp_res[0];
isp->isp_cell[0].platform_data = isp->isp_pdata;
isp->isp_cell[0].pdata_size = sizeof(struct isp_platform_data);
- r = mfd_add_hotplug_devices(isp->parent, isp->isp_cell, 1);
+ isp->isp_i2c_res = kcalloc(1, sizeof(struct resource), GFP_KERNEL);
+ if (!isp->isp_i2c_res) {
+ r = -ENOMEM;
+ DRM_ERROR("%s: isp mfd res alloc failed\n", __func__);
+ goto failure;
+ }
+
+ isp->isp_i2c_res[0].name = "isp_i2c0_reg";
+ isp->isp_i2c_res[0].flags = IORESOURCE_MEM;
+ isp->isp_i2c_res[0].start = isp_base + ISP411_I2C0_OFFSET;
+ isp->isp_i2c_res[0].end = isp_base + ISP411_I2C0_OFFSET + ISP411_I2C0_SIZE;
+
+ isp->isp_cell[1].name = "amd_isp_i2c_designware";
+ isp->isp_cell[1].num_resources = 1;
+ isp->isp_cell[1].resources = &isp->isp_i2c_res[0];
+ isp->isp_cell[1].platform_data = isp->isp_pdata;
+ isp->isp_cell[1].pdata_size = sizeof(struct isp_platform_data);
+
+ r = mfd_add_hotplug_devices(isp->parent, isp->isp_cell, 2);
if (r) {
DRM_ERROR("%s: add mfd hotplug device failed\n", __func__);
goto failure;
@@ -111,6 +142,7 @@ failure:
kfree(isp->isp_pdata);
kfree(isp->isp_res);
kfree(isp->isp_cell);
+ kfree(isp->isp_i2c_res);
return r;
}
@@ -122,6 +154,7 @@ static int isp_v4_1_1_hw_fini(struct amdgpu_isp *isp)
kfree(isp->isp_res);
kfree(isp->isp_cell);
kfree(isp->isp_pdata);
+ kfree(isp->isp_i2c_res);
return 0;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/isp_v4_1_1.h b/drivers/gpu/drm/amd/amdgpu/isp_v4_1_1.h
index dfb9522c9d6a..40887ddeb08c 100644
--- a/drivers/gpu/drm/amd/amdgpu/isp_v4_1_1.h
+++ b/drivers/gpu/drm/amd/amdgpu/isp_v4_1_1.h
@@ -32,8 +32,19 @@
#include "ivsrcid/isp/irqsrcs_isp_4_1.h"
+#define MAX_ISP411_MEM_RES 2
+#define MAX_ISP411_SENSOR_RES 1
#define MAX_ISP411_INT_SRC 8
+#define ISP411_PHY0_OFFSET 0x66700
+#define ISP411_PHY0_SIZE 0xD30
+
+#define ISP411_I2C0_OFFSET 0x66400
+#define ISP411_I2C0_SIZE 0x100
+
+#define ISP411_GPIO_SENSOR0_OFFSET 0x6613C
+#define ISP411_GPIO_SENSOR0_SIZE 0x4
+
void isp_v4_1_1_set_isp_funcs(struct amdgpu_isp *isp);
#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/jpeg_v4_0_3.c b/drivers/gpu/drm/amd/amdgpu/jpeg_v4_0_3.c
index b55041f38cec..86958cb2c2ab 100644
--- a/drivers/gpu/drm/amd/amdgpu/jpeg_v4_0_3.c
+++ b/drivers/gpu/drm/amd/amdgpu/jpeg_v4_0_3.c
@@ -59,6 +59,12 @@ static int amdgpu_ih_srcid_jpeg[] = {
VCN_4_0__SRCID__JPEG7_DECODE
};
+static inline bool jpeg_v4_0_3_normalizn_reqd(struct amdgpu_device *adev)
+{
+ return amdgpu_sriov_vf(adev) ||
+ (amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(9, 4, 4));
+}
+
/**
* jpeg_v4_0_3_early_init - set function pointers
*
@@ -734,32 +740,20 @@ void jpeg_v4_0_3_dec_ring_emit_fence(struct amdgpu_ring *ring, u64 addr, u64 seq
0, PACKETJ_CONDITION_CHECK0, PACKETJ_TYPE4));
amdgpu_ring_write(ring, 0);
- if (ring->adev->jpeg.inst[ring->me].aid_id) {
- amdgpu_ring_write(ring, PACKETJ(regUVD_JRBC_EXTERNAL_MCM_ADDR_INTERNAL_OFFSET,
- 0, PACKETJ_CONDITION_CHECK0, PACKETJ_TYPE0));
- amdgpu_ring_write(ring, 0x4);
- } else {
- amdgpu_ring_write(ring, PACKETJ(0, 0, 0, PACKETJ_TYPE6));
- amdgpu_ring_write(ring, 0);
- }
+ amdgpu_ring_write(ring, PACKETJ(0, 0, 0, PACKETJ_TYPE6));
+ amdgpu_ring_write(ring, 0);
amdgpu_ring_write(ring, PACKETJ(regUVD_JRBC_EXTERNAL_REG_INTERNAL_OFFSET,
0, 0, PACKETJ_TYPE0));
amdgpu_ring_write(ring, 0x3fbc);
- if (ring->adev->jpeg.inst[ring->me].aid_id) {
- amdgpu_ring_write(ring, PACKETJ(regUVD_JRBC_EXTERNAL_MCM_ADDR_INTERNAL_OFFSET,
- 0, PACKETJ_CONDITION_CHECK0, PACKETJ_TYPE0));
- amdgpu_ring_write(ring, 0x0);
- } else {
- amdgpu_ring_write(ring, PACKETJ(0, 0, 0, PACKETJ_TYPE6));
- amdgpu_ring_write(ring, 0);
- }
-
amdgpu_ring_write(ring, PACKETJ(JRBC_DEC_EXTERNAL_REG_WRITE_ADDR,
0, 0, PACKETJ_TYPE0));
amdgpu_ring_write(ring, 0x1);
+ amdgpu_ring_write(ring, PACKETJ(0, 0, 0, PACKETJ_TYPE6));
+ amdgpu_ring_write(ring, 0);
+
amdgpu_ring_write(ring, PACKETJ(0, 0, 0, PACKETJ_TYPE7));
amdgpu_ring_write(ring, 0);
}
@@ -834,8 +828,8 @@ void jpeg_v4_0_3_dec_ring_emit_reg_wait(struct amdgpu_ring *ring, uint32_t reg,
{
uint32_t reg_offset;
- /* For VF, only local offsets should be used */
- if (amdgpu_sriov_vf(ring->adev))
+ /* Use normalized offsets if required */
+ if (jpeg_v4_0_3_normalizn_reqd(ring->adev))
reg = NORMALIZE_JPEG_REG_OFFSET(reg);
reg_offset = (reg << 2);
@@ -881,8 +875,8 @@ void jpeg_v4_0_3_dec_ring_emit_wreg(struct amdgpu_ring *ring, uint32_t reg, uint
{
uint32_t reg_offset;
- /* For VF, only local offsets should be used */
- if (amdgpu_sriov_vf(ring->adev))
+ /* Use normalized offsets if required */
+ if (jpeg_v4_0_3_normalizn_reqd(ring->adev))
reg = NORMALIZE_JPEG_REG_OFFSET(reg);
reg_offset = (reg << 2);
diff --git a/drivers/gpu/drm/amd/amdgpu/mes_v11_0.c b/drivers/gpu/drm/amd/amdgpu/mes_v11_0.c
index 8aded0a67037..231a3d490ea8 100644
--- a/drivers/gpu/drm/amd/amdgpu/mes_v11_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/mes_v11_0.c
@@ -26,6 +26,7 @@
#include "amdgpu.h"
#include "soc15_common.h"
#include "soc21.h"
+#include "gfx_v11_0.h"
#include "gc/gc_11_0_0_offset.h"
#include "gc/gc_11_0_0_sh_mask.h"
#include "gc/gc_11_0_0_default.h"
@@ -160,7 +161,7 @@ static int mes_v11_0_submit_pkt_and_poll_completion(struct amdgpu_mes *mes,
int api_status_off)
{
union MESAPI__QUERY_MES_STATUS mes_status_pkt;
- signed long timeout = 3000000; /* 3000 ms */
+ signed long timeout = 2100000; /* 2100 ms */
struct amdgpu_device *adev = mes->adev;
struct amdgpu_ring *ring = &mes->ring[0];
struct MES_API_STATUS *api_status;
@@ -360,6 +361,100 @@ static int mes_v11_0_remove_hw_queue(struct amdgpu_mes *mes,
offsetof(union MESAPI__REMOVE_QUEUE, api_status));
}
+static int mes_v11_0_reset_queue_mmio(struct amdgpu_mes *mes, uint32_t queue_type,
+ uint32_t me_id, uint32_t pipe_id,
+ uint32_t queue_id, uint32_t vmid)
+{
+ struct amdgpu_device *adev = mes->adev;
+ uint32_t value;
+ int i, r = 0;
+
+ amdgpu_gfx_rlc_enter_safe_mode(adev, 0);
+
+ if (queue_type == AMDGPU_RING_TYPE_GFX) {
+ dev_info(adev->dev, "reset gfx queue (%d:%d:%d: vmid:%d)\n",
+ me_id, pipe_id, queue_id, vmid);
+
+ mutex_lock(&adev->gfx.reset_sem_mutex);
+ gfx_v11_0_request_gfx_index_mutex(adev, true);
+ /* all se allow writes */
+ WREG32_SOC15(GC, 0, regGRBM_GFX_INDEX,
+ (uint32_t)(0x1 << GRBM_GFX_INDEX__SE_BROADCAST_WRITES__SHIFT));
+ value = REG_SET_FIELD(0, CP_VMID_RESET, RESET_REQUEST, 1 << vmid);
+ if (pipe_id == 0)
+ value = REG_SET_FIELD(value, CP_VMID_RESET, PIPE0_QUEUES, 1 << queue_id);
+ else
+ value = REG_SET_FIELD(value, CP_VMID_RESET, PIPE1_QUEUES, 1 << queue_id);
+ WREG32_SOC15(GC, 0, regCP_VMID_RESET, value);
+ gfx_v11_0_request_gfx_index_mutex(adev, false);
+ mutex_unlock(&adev->gfx.reset_sem_mutex);
+
+ mutex_lock(&adev->srbm_mutex);
+ soc21_grbm_select(adev, me_id, pipe_id, queue_id, 0);
+ /* wait till dequeue take effects */
+ for (i = 0; i < adev->usec_timeout; i++) {
+ if (!(RREG32_SOC15(GC, 0, regCP_GFX_HQD_ACTIVE) & 1))
+ break;
+ udelay(1);
+ }
+ if (i >= adev->usec_timeout) {
+ dev_err(adev->dev, "failed to wait on gfx hqd deactivate\n");
+ r = -ETIMEDOUT;
+ }
+
+ soc21_grbm_select(adev, 0, 0, 0, 0);
+ mutex_unlock(&adev->srbm_mutex);
+ } else if (queue_type == AMDGPU_RING_TYPE_COMPUTE) {
+ dev_info(adev->dev, "reset compute queue (%d:%d:%d)\n",
+ me_id, pipe_id, queue_id);
+ mutex_lock(&adev->srbm_mutex);
+ soc21_grbm_select(adev, me_id, pipe_id, queue_id, 0);
+ WREG32_SOC15(GC, 0, regCP_HQD_DEQUEUE_REQUEST, 0x2);
+ WREG32_SOC15(GC, 0, regSPI_COMPUTE_QUEUE_RESET, 0x1);
+
+ /* wait till dequeue take effects */
+ for (i = 0; i < adev->usec_timeout; i++) {
+ if (!(RREG32_SOC15(GC, 0, regCP_HQD_ACTIVE) & 1))
+ break;
+ udelay(1);
+ }
+ if (i >= adev->usec_timeout) {
+ dev_err(adev->dev, "failed to wait on hqd deactivate\n");
+ r = -ETIMEDOUT;
+ }
+ soc21_grbm_select(adev, 0, 0, 0, 0);
+ mutex_unlock(&adev->srbm_mutex);
+ }
+
+ amdgpu_gfx_rlc_exit_safe_mode(adev, 0);
+ return r;
+}
+
+static int mes_v11_0_reset_hw_queue(struct amdgpu_mes *mes,
+ struct mes_reset_queue_input *input)
+{
+ if (input->use_mmio)
+ return mes_v11_0_reset_queue_mmio(mes, input->queue_type,
+ input->me_id, input->pipe_id,
+ input->queue_id, input->vmid);
+
+ union MESAPI__RESET mes_reset_queue_pkt;
+
+ memset(&mes_reset_queue_pkt, 0, sizeof(mes_reset_queue_pkt));
+
+ mes_reset_queue_pkt.header.type = MES_API_TYPE_SCHEDULER;
+ mes_reset_queue_pkt.header.opcode = MES_SCH_API_RESET;
+ mes_reset_queue_pkt.header.dwsize = API_FRAME_SIZE_IN_DWORDS;
+
+ mes_reset_queue_pkt.doorbell_offset = input->doorbell_offset;
+ mes_reset_queue_pkt.gang_context_addr = input->gang_context_addr;
+ /*mes_reset_queue_pkt.reset_queue_only = 1;*/
+
+ return mes_v11_0_submit_pkt_and_poll_completion(mes,
+ &mes_reset_queue_pkt, sizeof(mes_reset_queue_pkt),
+ offsetof(union MESAPI__REMOVE_QUEUE, api_status));
+}
+
static int mes_v11_0_map_legacy_queue(struct amdgpu_mes *mes,
struct mes_map_legacy_queue_input *input)
{
@@ -421,13 +516,41 @@ static int mes_v11_0_unmap_legacy_queue(struct amdgpu_mes *mes,
static int mes_v11_0_suspend_gang(struct amdgpu_mes *mes,
struct mes_suspend_gang_input *input)
{
- return 0;
+ union MESAPI__SUSPEND mes_suspend_gang_pkt;
+
+ memset(&mes_suspend_gang_pkt, 0, sizeof(mes_suspend_gang_pkt));
+
+ mes_suspend_gang_pkt.header.type = MES_API_TYPE_SCHEDULER;
+ mes_suspend_gang_pkt.header.opcode = MES_SCH_API_SUSPEND;
+ mes_suspend_gang_pkt.header.dwsize = API_FRAME_SIZE_IN_DWORDS;
+
+ mes_suspend_gang_pkt.suspend_all_gangs = input->suspend_all_gangs;
+ mes_suspend_gang_pkt.gang_context_addr = input->gang_context_addr;
+ mes_suspend_gang_pkt.suspend_fence_addr = input->suspend_fence_addr;
+ mes_suspend_gang_pkt.suspend_fence_value = input->suspend_fence_value;
+
+ return mes_v11_0_submit_pkt_and_poll_completion(mes,
+ &mes_suspend_gang_pkt, sizeof(mes_suspend_gang_pkt),
+ offsetof(union MESAPI__SUSPEND, api_status));
}
static int mes_v11_0_resume_gang(struct amdgpu_mes *mes,
struct mes_resume_gang_input *input)
{
- return 0;
+ union MESAPI__RESUME mes_resume_gang_pkt;
+
+ memset(&mes_resume_gang_pkt, 0, sizeof(mes_resume_gang_pkt));
+
+ mes_resume_gang_pkt.header.type = MES_API_TYPE_SCHEDULER;
+ mes_resume_gang_pkt.header.opcode = MES_SCH_API_RESUME;
+ mes_resume_gang_pkt.header.dwsize = API_FRAME_SIZE_IN_DWORDS;
+
+ mes_resume_gang_pkt.resume_all_gangs = input->resume_all_gangs;
+ mes_resume_gang_pkt.gang_context_addr = input->gang_context_addr;
+
+ return mes_v11_0_submit_pkt_and_poll_completion(mes,
+ &mes_resume_gang_pkt, sizeof(mes_resume_gang_pkt),
+ offsetof(union MESAPI__RESUME, api_status));
}
static int mes_v11_0_query_sched_status(struct amdgpu_mes *mes)
@@ -595,6 +718,43 @@ static int mes_v11_0_set_hw_resources_1(struct amdgpu_mes *mes)
offsetof(union MESAPI_SET_HW_RESOURCES_1, api_status));
}
+static int mes_v11_0_reset_legacy_queue(struct amdgpu_mes *mes,
+ struct mes_reset_legacy_queue_input *input)
+{
+ union MESAPI__RESET mes_reset_queue_pkt;
+
+ if (input->use_mmio)
+ return mes_v11_0_reset_queue_mmio(mes, input->queue_type,
+ input->me_id, input->pipe_id,
+ input->queue_id, input->vmid);
+
+ memset(&mes_reset_queue_pkt, 0, sizeof(mes_reset_queue_pkt));
+
+ mes_reset_queue_pkt.header.type = MES_API_TYPE_SCHEDULER;
+ mes_reset_queue_pkt.header.opcode = MES_SCH_API_RESET;
+ mes_reset_queue_pkt.header.dwsize = API_FRAME_SIZE_IN_DWORDS;
+
+ mes_reset_queue_pkt.queue_type =
+ convert_to_mes_queue_type(input->queue_type);
+
+ if (mes_reset_queue_pkt.queue_type == MES_QUEUE_TYPE_GFX) {
+ mes_reset_queue_pkt.reset_legacy_gfx = 1;
+ mes_reset_queue_pkt.pipe_id_lp = input->pipe_id;
+ mes_reset_queue_pkt.queue_id_lp = input->queue_id;
+ mes_reset_queue_pkt.mqd_mc_addr_lp = input->mqd_addr;
+ mes_reset_queue_pkt.doorbell_offset_lp = input->doorbell_offset;
+ mes_reset_queue_pkt.wptr_addr_lp = input->wptr_addr;
+ mes_reset_queue_pkt.vmid_id_lp = input->vmid;
+ } else {
+ mes_reset_queue_pkt.reset_queue_only = 1;
+ mes_reset_queue_pkt.doorbell_offset = input->doorbell_offset;
+ }
+
+ return mes_v11_0_submit_pkt_and_poll_completion(mes,
+ &mes_reset_queue_pkt, sizeof(mes_reset_queue_pkt),
+ offsetof(union MESAPI__RESET, api_status));
+}
+
static const struct amdgpu_mes_funcs mes_v11_0_funcs = {
.add_hw_queue = mes_v11_0_add_hw_queue,
.remove_hw_queue = mes_v11_0_remove_hw_queue,
@@ -603,6 +763,8 @@ static const struct amdgpu_mes_funcs mes_v11_0_funcs = {
.suspend_gang = mes_v11_0_suspend_gang,
.resume_gang = mes_v11_0_resume_gang,
.misc_op = mes_v11_0_misc_op,
+ .reset_legacy_queue = mes_v11_0_reset_legacy_queue,
+ .reset_hw_queue = mes_v11_0_reset_hw_queue,
};
static int mes_v11_0_allocate_ucode_buffer(struct amdgpu_device *adev,
diff --git a/drivers/gpu/drm/amd/amdgpu/mes_v12_0.c b/drivers/gpu/drm/amd/amdgpu/mes_v12_0.c
index a79a8adc3aa5..8d27421689c9 100644
--- a/drivers/gpu/drm/amd/amdgpu/mes_v12_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/mes_v12_0.c
@@ -146,7 +146,7 @@ static int mes_v12_0_submit_pkt_and_poll_completion(struct amdgpu_mes *mes,
int api_status_off)
{
union MESAPI__QUERY_MES_STATUS mes_status_pkt;
- signed long timeout = 3000000; /* 3000 ms */
+ signed long timeout = 2100000; /* 2100 ms */
struct amdgpu_device *adev = mes->adev;
struct amdgpu_ring *ring = &mes->ring[pipe];
spinlock_t *ring_lock = &mes->ring_lock[pipe];
@@ -350,6 +350,32 @@ static int mes_v12_0_remove_hw_queue(struct amdgpu_mes *mes,
offsetof(union MESAPI__REMOVE_QUEUE, api_status));
}
+static int mes_v12_0_reset_hw_queue(struct amdgpu_mes *mes,
+ struct mes_reset_queue_input *input)
+{
+ union MESAPI__RESET mes_reset_queue_pkt;
+ int pipe;
+
+ memset(&mes_reset_queue_pkt, 0, sizeof(mes_reset_queue_pkt));
+
+ mes_reset_queue_pkt.header.type = MES_API_TYPE_SCHEDULER;
+ mes_reset_queue_pkt.header.opcode = MES_SCH_API_RESET;
+ mes_reset_queue_pkt.header.dwsize = API_FRAME_SIZE_IN_DWORDS;
+
+ mes_reset_queue_pkt.doorbell_offset = input->doorbell_offset;
+ mes_reset_queue_pkt.gang_context_addr = input->gang_context_addr;
+ /*mes_reset_queue_pkt.reset_queue_only = 1;*/
+
+ if (mes->adev->enable_uni_mes)
+ pipe = AMDGPU_MES_KIQ_PIPE;
+ else
+ pipe = AMDGPU_MES_SCHED_PIPE;
+
+ return mes_v12_0_submit_pkt_and_poll_completion(mes, pipe,
+ &mes_reset_queue_pkt, sizeof(mes_reset_queue_pkt),
+ offsetof(union MESAPI__REMOVE_QUEUE, api_status));
+}
+
static int mes_v12_0_map_legacy_queue(struct amdgpu_mes *mes,
struct mes_map_legacy_queue_input *input)
{
@@ -453,6 +479,11 @@ static int mes_v12_0_misc_op(struct amdgpu_mes *mes,
union MESAPI__MISC misc_pkt;
int pipe;
+ if (mes->adev->enable_uni_mes)
+ pipe = AMDGPU_MES_KIQ_PIPE;
+ else
+ pipe = AMDGPU_MES_SCHED_PIPE;
+
memset(&misc_pkt, 0, sizeof(misc_pkt));
misc_pkt.header.type = MES_API_TYPE_SCHEDULER;
@@ -487,6 +518,7 @@ static int mes_v12_0_misc_op(struct amdgpu_mes *mes,
misc_pkt.wait_reg_mem.reg_offset2 = input->wrm_reg.reg1;
break;
case MES_MISC_OP_SET_SHADER_DEBUGGER:
+ pipe = AMDGPU_MES_SCHED_PIPE;
misc_pkt.opcode = MESAPI_MISC__SET_SHADER_DEBUGGER;
misc_pkt.set_shader_debugger.process_context_addr =
input->set_shader_debugger.process_context_addr;
@@ -504,11 +536,6 @@ static int mes_v12_0_misc_op(struct amdgpu_mes *mes,
return -EINVAL;
}
- if (mes->adev->enable_uni_mes)
- pipe = AMDGPU_MES_KIQ_PIPE;
- else
- pipe = AMDGPU_MES_SCHED_PIPE;
-
return mes_v12_0_submit_pkt_and_poll_completion(mes, pipe,
&misc_pkt, sizeof(misc_pkt),
offsetof(union MESAPI__MISC, api_status));
@@ -582,6 +609,7 @@ static int mes_v12_0_set_hw_resources(struct amdgpu_mes *mes, int pipe)
mes_set_hw_res_pkt.disable_mes_log = 1;
mes_set_hw_res_pkt.use_different_vmid_compute = 1;
mes_set_hw_res_pkt.enable_reg_active_poll = 1;
+ mes_set_hw_res_pkt.enable_level_process_quantum_check = 1;
/*
* Keep oversubscribe timer for sdma . When we have unmapped doorbell
@@ -676,6 +704,44 @@ static void mes_v12_0_enable_unmapped_doorbell_handling(
WREG32_SOC15(GC, 0, regCP_UNMAPPED_DOORBELL, data);
}
+static int mes_v12_0_reset_legacy_queue(struct amdgpu_mes *mes,
+ struct mes_reset_legacy_queue_input *input)
+{
+ union MESAPI__RESET mes_reset_queue_pkt;
+ int pipe;
+
+ memset(&mes_reset_queue_pkt, 0, sizeof(mes_reset_queue_pkt));
+
+ mes_reset_queue_pkt.header.type = MES_API_TYPE_SCHEDULER;
+ mes_reset_queue_pkt.header.opcode = MES_SCH_API_RESET;
+ mes_reset_queue_pkt.header.dwsize = API_FRAME_SIZE_IN_DWORDS;
+
+ mes_reset_queue_pkt.queue_type =
+ convert_to_mes_queue_type(input->queue_type);
+
+ if (mes_reset_queue_pkt.queue_type == MES_QUEUE_TYPE_GFX) {
+ mes_reset_queue_pkt.reset_legacy_gfx = 1;
+ mes_reset_queue_pkt.pipe_id_lp = input->pipe_id;
+ mes_reset_queue_pkt.queue_id_lp = input->queue_id;
+ mes_reset_queue_pkt.mqd_mc_addr_lp = input->mqd_addr;
+ mes_reset_queue_pkt.doorbell_offset_lp = input->doorbell_offset;
+ mes_reset_queue_pkt.wptr_addr_lp = input->wptr_addr;
+ mes_reset_queue_pkt.vmid_id_lp = input->vmid;
+ } else {
+ mes_reset_queue_pkt.reset_queue_only = 1;
+ mes_reset_queue_pkt.doorbell_offset = input->doorbell_offset;
+ }
+
+ if (mes->adev->enable_uni_mes)
+ pipe = AMDGPU_MES_KIQ_PIPE;
+ else
+ pipe = AMDGPU_MES_SCHED_PIPE;
+
+ return mes_v12_0_submit_pkt_and_poll_completion(mes, pipe,
+ &mes_reset_queue_pkt, sizeof(mes_reset_queue_pkt),
+ offsetof(union MESAPI__RESET, api_status));
+}
+
static const struct amdgpu_mes_funcs mes_v12_0_funcs = {
.add_hw_queue = mes_v12_0_add_hw_queue,
.remove_hw_queue = mes_v12_0_remove_hw_queue,
@@ -684,6 +750,8 @@ static const struct amdgpu_mes_funcs mes_v12_0_funcs = {
.suspend_gang = mes_v12_0_suspend_gang,
.resume_gang = mes_v12_0_resume_gang,
.misc_op = mes_v12_0_misc_op,
+ .reset_legacy_queue = mes_v12_0_reset_legacy_queue,
+ .reset_hw_queue = mes_v12_0_reset_hw_queue,
};
static int mes_v12_0_allocate_ucode_buffer(struct amdgpu_device *adev,
diff --git a/drivers/gpu/drm/amd/amdgpu/mmhub_v1_8.c b/drivers/gpu/drm/amd/amdgpu/mmhub_v1_8.c
index 621761a17ac7..b01bb759d0f4 100644
--- a/drivers/gpu/drm/amd/amdgpu/mmhub_v1_8.c
+++ b/drivers/gpu/drm/amd/amdgpu/mmhub_v1_8.c
@@ -559,22 +559,6 @@ static void mmhub_v1_8_get_clockgating(struct amdgpu_device *adev, u64 *flags)
}
-static bool mmhub_v1_8_query_utcl2_poison_status(struct amdgpu_device *adev,
- int hub_inst)
-{
- u32 fed, status;
-
- status = RREG32_SOC15(MMHUB, hub_inst, regVM_L2_PROTECTION_FAULT_STATUS);
- fed = REG_GET_FIELD(status, VM_L2_PROTECTION_FAULT_STATUS, FED);
- if (!amdgpu_sriov_vf(adev)) {
- /* clear page fault status and address */
- WREG32_P(SOC15_REG_OFFSET(MMHUB, hub_inst,
- regVM_L2_PROTECTION_FAULT_CNTL), 1, ~1);
- }
-
- return fed;
-}
-
const struct amdgpu_mmhub_funcs mmhub_v1_8_funcs = {
.get_fb_location = mmhub_v1_8_get_fb_location,
.init = mmhub_v1_8_init,
@@ -584,7 +568,6 @@ const struct amdgpu_mmhub_funcs mmhub_v1_8_funcs = {
.setup_vm_pt_regs = mmhub_v1_8_setup_vm_pt_regs,
.set_clockgating = mmhub_v1_8_set_clockgating,
.get_clockgating = mmhub_v1_8_get_clockgating,
- .query_utcl2_poison_status = mmhub_v1_8_query_utcl2_poison_status,
};
static const struct amdgpu_ras_err_status_reg_entry mmhub_v1_8_ce_reg_list[] = {
@@ -670,8 +653,8 @@ static void mmhub_v1_8_inst_query_ras_error_count(struct amdgpu_device *adev,
AMDGPU_RAS_ERROR__MULTI_UNCORRECTABLE,
&ue_count);
- amdgpu_ras_error_statistic_ce_count(err_data, &mcm_info, NULL, ce_count);
- amdgpu_ras_error_statistic_ue_count(err_data, &mcm_info, NULL, ue_count);
+ amdgpu_ras_error_statistic_ce_count(err_data, &mcm_info, ce_count);
+ amdgpu_ras_error_statistic_ue_count(err_data, &mcm_info, ue_count);
}
static void mmhub_v1_8_query_ras_error_count(struct amdgpu_device *adev,
diff --git a/drivers/gpu/drm/amd/amdgpu/mxgpu_nv.h b/drivers/gpu/drm/amd/amdgpu/mxgpu_nv.h
index caf616a2c8a6..1d099ffb3a5a 100644
--- a/drivers/gpu/drm/amd/amdgpu/mxgpu_nv.h
+++ b/drivers/gpu/drm/amd/amdgpu/mxgpu_nv.h
@@ -25,7 +25,7 @@
#define __MXGPU_NV_H__
#define NV_MAILBOX_POLL_ACK_TIMEDOUT 500
-#define NV_MAILBOX_POLL_MSG_TIMEDOUT 6000
+#define NV_MAILBOX_POLL_MSG_TIMEDOUT 15000
#define NV_MAILBOX_POLL_FLR_TIMEDOUT 10000
#define NV_MAILBOX_POLL_MSG_REP_MAX 11
diff --git a/drivers/gpu/drm/amd/amdgpu/nbio_v2_3.c b/drivers/gpu/drm/amd/amdgpu/nbio_v2_3.c
index fa479dfa1ec1..739fce4fa8fd 100644
--- a/drivers/gpu/drm/amd/amdgpu/nbio_v2_3.c
+++ b/drivers/gpu/drm/amd/amdgpu/nbio_v2_3.c
@@ -365,7 +365,7 @@ static void nbio_v2_3_enable_aspm(struct amdgpu_device *adev,
data &= ~PCIE_LC_CNTL__LC_PMI_TO_L1_DIS_MASK;
} else {
- /* Disbale ASPM L1 */
+ /* Disable ASPM L1 */
data &= ~PCIE_LC_CNTL__LC_L1_INACTIVITY_MASK;
/* Disable ASPM TxL0s */
data &= ~PCIE_LC_CNTL__LC_L0S_INACTIVITY_MASK;
diff --git a/drivers/gpu/drm/amd/amdgpu/psp_v13_0.c b/drivers/gpu/drm/amd/amdgpu/psp_v13_0.c
index 1251ee38a676..51e470e8d67d 100644
--- a/drivers/gpu/drm/amd/amdgpu/psp_v13_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/psp_v13_0.c
@@ -81,6 +81,8 @@ MODULE_FIRMWARE("amdgpu/psp_14_0_4_ta.bin");
/* memory training timeout define */
#define MEM_TRAIN_SEND_MSG_TIMEOUT_US 3000000
+#define regMP1_PUB_SCRATCH0 0x3b10090
+
static int psp_v13_0_init_microcode(struct psp_context *psp)
{
struct amdgpu_device *adev = psp->adev;
@@ -807,6 +809,20 @@ static bool psp_v13_0_get_ras_capability(struct psp_context *psp)
}
}
+static bool psp_v13_0_is_aux_sos_load_required(struct psp_context *psp)
+{
+ struct amdgpu_device *adev = psp->adev;
+ u32 pmfw_ver;
+
+ if (amdgpu_ip_version(adev, MP0_HWIP, 0) != IP_VERSION(13, 0, 6))
+ return false;
+
+ /* load 4e version of sos if pmfw version less than 85.115.0 */
+ pmfw_ver = RREG32(regMP1_PUB_SCRATCH0 / 4);
+
+ return (pmfw_ver < 0x557300);
+}
+
static const struct psp_funcs psp_v13_0_funcs = {
.init_microcode = psp_v13_0_init_microcode,
.wait_for_bootloader = psp_v13_0_wait_for_bootloader_steady_state,
@@ -830,6 +846,7 @@ static const struct psp_funcs psp_v13_0_funcs = {
.vbflash_stat = psp_v13_0_vbflash_status,
.fatal_error_recovery_quirk = psp_v13_0_fatal_error_recovery_quirk,
.get_ras_capability = psp_v13_0_get_ras_capability,
+ .is_aux_sos_load_required = psp_v13_0_is_aux_sos_load_required,
};
void psp_v13_0_set_psp_funcs(struct psp_context *psp)
diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c b/drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c
index aa637541da58..e65194fe94af 100644
--- a/drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c
@@ -710,7 +710,7 @@ static int sdma_v3_0_gfx_resume(struct amdgpu_device *adev)
upper_32_bits(wptr_gpu_addr));
wptr_poll_cntl = RREG32(mmSDMA0_GFX_RB_WPTR_POLL_CNTL + sdma_offsets[i]);
if (ring->use_pollmem) {
- /*wptr polling is not enogh fast, directly clean the wptr register */
+ /*wptr polling is not enough fast, directly clean the wptr register */
WREG32(mmSDMA0_GFX_RB_WPTR + sdma_offsets[i], 0);
wptr_poll_cntl = REG_SET_FIELD(wptr_poll_cntl,
SDMA0_GFX_RB_WPTR_POLL_CNTL,
diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c b/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c
index 772604feb6ac..23ef4eb36b40 100644
--- a/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c
@@ -72,6 +72,53 @@ MODULE_FIRMWARE("amdgpu/renoir_sdma.bin");
MODULE_FIRMWARE("amdgpu/green_sardine_sdma.bin");
MODULE_FIRMWARE("amdgpu/aldebaran_sdma.bin");
+static const struct amdgpu_hwip_reg_entry sdma_reg_list_4_0[] = {
+ SOC15_REG_ENTRY_STR(GC, 0, mmSDMA0_STATUS_REG),
+ SOC15_REG_ENTRY_STR(GC, 0, mmSDMA0_STATUS1_REG),
+ SOC15_REG_ENTRY_STR(GC, 0, mmSDMA0_STATUS2_REG),
+ SOC15_REG_ENTRY_STR(GC, 0, mmSDMA0_STATUS3_REG),
+ SOC15_REG_ENTRY_STR(GC, 0, mmSDMA0_UCODE_CHECKSUM),
+ SOC15_REG_ENTRY_STR(GC, 0, mmSDMA0_RB_RPTR_FETCH_HI),
+ SOC15_REG_ENTRY_STR(GC, 0, mmSDMA0_RB_RPTR_FETCH),
+ SOC15_REG_ENTRY_STR(GC, 0, mmSDMA0_UTCL1_RD_STATUS),
+ SOC15_REG_ENTRY_STR(GC, 0, mmSDMA0_UTCL1_WR_STATUS),
+ SOC15_REG_ENTRY_STR(GC, 0, mmSDMA0_UTCL1_RD_XNACK0),
+ SOC15_REG_ENTRY_STR(GC, 0, mmSDMA0_UTCL1_RD_XNACK1),
+ SOC15_REG_ENTRY_STR(GC, 0, mmSDMA0_UTCL1_WR_XNACK0),
+ SOC15_REG_ENTRY_STR(GC, 0, mmSDMA0_UTCL1_WR_XNACK1),
+ SOC15_REG_ENTRY_STR(GC, 0, mmSDMA0_GFX_RB_CNTL),
+ SOC15_REG_ENTRY_STR(GC, 0, mmSDMA0_GFX_RB_RPTR),
+ SOC15_REG_ENTRY_STR(GC, 0, mmSDMA0_GFX_RB_RPTR_HI),
+ SOC15_REG_ENTRY_STR(GC, 0, mmSDMA0_GFX_RB_WPTR),
+ SOC15_REG_ENTRY_STR(GC, 0, mmSDMA0_GFX_RB_WPTR_HI),
+ SOC15_REG_ENTRY_STR(GC, 0, mmSDMA0_GFX_IB_OFFSET),
+ SOC15_REG_ENTRY_STR(GC, 0, mmSDMA0_GFX_IB_BASE_LO),
+ SOC15_REG_ENTRY_STR(GC, 0, mmSDMA0_GFX_IB_BASE_HI),
+ SOC15_REG_ENTRY_STR(GC, 0, mmSDMA0_GFX_IB_CNTL),
+ SOC15_REG_ENTRY_STR(GC, 0, mmSDMA0_GFX_IB_RPTR),
+ SOC15_REG_ENTRY_STR(GC, 0, mmSDMA0_GFX_IB_SUB_REMAIN),
+ SOC15_REG_ENTRY_STR(GC, 0, mmSDMA0_GFX_DUMMY_REG),
+ SOC15_REG_ENTRY_STR(GC, 0, mmSDMA0_PAGE_RB_CNTL),
+ SOC15_REG_ENTRY_STR(GC, 0, mmSDMA0_PAGE_RB_RPTR),
+ SOC15_REG_ENTRY_STR(GC, 0, mmSDMA0_PAGE_RB_RPTR_HI),
+ SOC15_REG_ENTRY_STR(GC, 0, mmSDMA0_PAGE_RB_WPTR),
+ SOC15_REG_ENTRY_STR(GC, 0, mmSDMA0_PAGE_RB_WPTR_HI),
+ SOC15_REG_ENTRY_STR(GC, 0, mmSDMA0_PAGE_IB_OFFSET),
+ SOC15_REG_ENTRY_STR(GC, 0, mmSDMA0_PAGE_IB_BASE_LO),
+ SOC15_REG_ENTRY_STR(GC, 0, mmSDMA0_PAGE_IB_BASE_HI),
+ SOC15_REG_ENTRY_STR(GC, 0, mmSDMA0_PAGE_DUMMY_REG),
+ SOC15_REG_ENTRY_STR(GC, 0, mmSDMA0_RLC0_RB_CNTL),
+ SOC15_REG_ENTRY_STR(GC, 0, mmSDMA0_RLC0_RB_RPTR),
+ SOC15_REG_ENTRY_STR(GC, 0, mmSDMA0_RLC0_RB_RPTR_HI),
+ SOC15_REG_ENTRY_STR(GC, 0, mmSDMA0_RLC0_RB_WPTR),
+ SOC15_REG_ENTRY_STR(GC, 0, mmSDMA0_RLC0_RB_WPTR_HI),
+ SOC15_REG_ENTRY_STR(GC, 0, mmSDMA0_RLC0_IB_OFFSET),
+ SOC15_REG_ENTRY_STR(GC, 0, mmSDMA0_RLC0_IB_BASE_LO),
+ SOC15_REG_ENTRY_STR(GC, 0, mmSDMA0_RLC0_IB_BASE_HI),
+ SOC15_REG_ENTRY_STR(GC, 0, mmSDMA0_RLC0_DUMMY_REG),
+ SOC15_REG_ENTRY_STR(GC, 0, mmSDMA0_VM_CNTL)
+};
+
#define SDMA0_POWER_CNTL__ON_OFF_CONDITION_HOLD_TIME_MASK 0x000000F8L
#define SDMA0_POWER_CNTL__ON_OFF_STATUS_DURATION_TIME_MASK 0xFC000000L
@@ -1750,6 +1797,8 @@ static int sdma_v4_0_sw_init(void *handle)
struct amdgpu_ring *ring;
int r, i;
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ uint32_t reg_count = ARRAY_SIZE(sdma_reg_list_4_0);
+ uint32_t *ptr;
/* SDMA trap event */
for (i = 0; i < adev->sdma.num_instances; i++) {
@@ -1870,6 +1919,13 @@ static int sdma_v4_0_sw_init(void *handle)
return -EINVAL;
}
+ /* Allocate memory for SDMA IP Dump buffer */
+ ptr = kcalloc(adev->sdma.num_instances * reg_count, sizeof(uint32_t), GFP_KERNEL);
+ if (ptr)
+ adev->sdma.ip_dump = ptr;
+ else
+ DRM_ERROR("Failed to allocated memory for SDMA IP Dump\n");
+
return r;
}
@@ -1890,6 +1946,8 @@ static int sdma_v4_0_sw_fini(void *handle)
else
amdgpu_sdma_destroy_inst_ctx(adev, false);
+ kfree(adev->sdma.ip_dump);
+
return 0;
}
@@ -2292,6 +2350,48 @@ static void sdma_v4_0_get_clockgating_state(void *handle, u64 *flags)
*flags |= AMD_CG_SUPPORT_SDMA_LS;
}
+static void sdma_v4_0_print_ip_state(void *handle, struct drm_printer *p)
+{
+ struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ int i, j;
+ uint32_t reg_count = ARRAY_SIZE(sdma_reg_list_4_0);
+ uint32_t instance_offset;
+
+ if (!adev->sdma.ip_dump)
+ return;
+
+ drm_printf(p, "num_instances:%d\n", adev->sdma.num_instances);
+ for (i = 0; i < adev->sdma.num_instances; i++) {
+ instance_offset = i * reg_count;
+ drm_printf(p, "\nInstance:%d\n", i);
+
+ for (j = 0; j < reg_count; j++)
+ drm_printf(p, "%-50s \t 0x%08x\n", sdma_reg_list_4_0[j].reg_name,
+ adev->sdma.ip_dump[instance_offset + j]);
+ }
+}
+
+static void sdma_v4_0_dump_ip_state(void *handle)
+{
+ struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ int i, j;
+ uint32_t instance_offset;
+ uint32_t reg_count = ARRAY_SIZE(sdma_reg_list_4_0);
+
+ if (!adev->sdma.ip_dump)
+ return;
+
+ amdgpu_gfx_off_ctrl(adev, false);
+ for (i = 0; i < adev->sdma.num_instances; i++) {
+ instance_offset = i * reg_count;
+ for (j = 0; j < reg_count; j++)
+ adev->sdma.ip_dump[instance_offset + j] =
+ RREG32(sdma_v4_0_get_reg_offset(adev, i,
+ sdma_reg_list_4_0[j].reg_offset));
+ }
+ amdgpu_gfx_off_ctrl(adev, true);
+}
+
const struct amd_ip_funcs sdma_v4_0_ip_funcs = {
.name = "sdma_v4_0",
.early_init = sdma_v4_0_early_init,
@@ -2308,6 +2408,8 @@ const struct amd_ip_funcs sdma_v4_0_ip_funcs = {
.set_clockgating_state = sdma_v4_0_set_clockgating_state,
.set_powergating_state = sdma_v4_0_set_powergating_state,
.get_clockgating_state = sdma_v4_0_get_clockgating_state,
+ .dump_ip_state = sdma_v4_0_dump_ip_state,
+ .print_ip_state = sdma_v4_0_print_ip_state,
};
static const struct amdgpu_ring_funcs sdma_v4_0_ring_funcs = {
diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v4_4_2.c b/drivers/gpu/drm/amd/amdgpu/sdma_v4_4_2.c
index 2c55bfd935bb..c77889040760 100644
--- a/drivers/gpu/drm/amd/amdgpu/sdma_v4_4_2.c
+++ b/drivers/gpu/drm/amd/amdgpu/sdma_v4_4_2.c
@@ -46,6 +46,53 @@
MODULE_FIRMWARE("amdgpu/sdma_4_4_2.bin");
MODULE_FIRMWARE("amdgpu/sdma_4_4_5.bin");
+static const struct amdgpu_hwip_reg_entry sdma_reg_list_4_4_2[] = {
+ SOC15_REG_ENTRY_STR(GC, 0, regSDMA_STATUS_REG),
+ SOC15_REG_ENTRY_STR(GC, 0, regSDMA_STATUS1_REG),
+ SOC15_REG_ENTRY_STR(GC, 0, regSDMA_STATUS2_REG),
+ SOC15_REG_ENTRY_STR(GC, 0, regSDMA_STATUS3_REG),
+ SOC15_REG_ENTRY_STR(GC, 0, regSDMA_UCODE_CHECKSUM),
+ SOC15_REG_ENTRY_STR(GC, 0, regSDMA_RB_RPTR_FETCH_HI),
+ SOC15_REG_ENTRY_STR(GC, 0, regSDMA_RB_RPTR_FETCH),
+ SOC15_REG_ENTRY_STR(GC, 0, regSDMA_UTCL1_RD_STATUS),
+ SOC15_REG_ENTRY_STR(GC, 0, regSDMA_UTCL1_WR_STATUS),
+ SOC15_REG_ENTRY_STR(GC, 0, regSDMA_UTCL1_RD_XNACK0),
+ SOC15_REG_ENTRY_STR(GC, 0, regSDMA_UTCL1_RD_XNACK1),
+ SOC15_REG_ENTRY_STR(GC, 0, regSDMA_UTCL1_WR_XNACK0),
+ SOC15_REG_ENTRY_STR(GC, 0, regSDMA_UTCL1_WR_XNACK1),
+ SOC15_REG_ENTRY_STR(GC, 0, regSDMA_GFX_RB_CNTL),
+ SOC15_REG_ENTRY_STR(GC, 0, regSDMA_GFX_RB_RPTR),
+ SOC15_REG_ENTRY_STR(GC, 0, regSDMA_GFX_RB_RPTR_HI),
+ SOC15_REG_ENTRY_STR(GC, 0, regSDMA_GFX_RB_WPTR),
+ SOC15_REG_ENTRY_STR(GC, 0, regSDMA_GFX_RB_WPTR_HI),
+ SOC15_REG_ENTRY_STR(GC, 0, regSDMA_GFX_IB_OFFSET),
+ SOC15_REG_ENTRY_STR(GC, 0, regSDMA_GFX_IB_BASE_LO),
+ SOC15_REG_ENTRY_STR(GC, 0, regSDMA_GFX_IB_BASE_HI),
+ SOC15_REG_ENTRY_STR(GC, 0, regSDMA_GFX_IB_CNTL),
+ SOC15_REG_ENTRY_STR(GC, 0, regSDMA_GFX_IB_RPTR),
+ SOC15_REG_ENTRY_STR(GC, 0, regSDMA_GFX_IB_SUB_REMAIN),
+ SOC15_REG_ENTRY_STR(GC, 0, regSDMA_GFX_DUMMY_REG),
+ SOC15_REG_ENTRY_STR(GC, 0, regSDMA_PAGE_RB_CNTL),
+ SOC15_REG_ENTRY_STR(GC, 0, regSDMA_PAGE_RB_RPTR),
+ SOC15_REG_ENTRY_STR(GC, 0, regSDMA_PAGE_RB_RPTR_HI),
+ SOC15_REG_ENTRY_STR(GC, 0, regSDMA_PAGE_RB_WPTR),
+ SOC15_REG_ENTRY_STR(GC, 0, regSDMA_PAGE_RB_WPTR_HI),
+ SOC15_REG_ENTRY_STR(GC, 0, regSDMA_PAGE_IB_OFFSET),
+ SOC15_REG_ENTRY_STR(GC, 0, regSDMA_PAGE_IB_BASE_LO),
+ SOC15_REG_ENTRY_STR(GC, 0, regSDMA_PAGE_IB_BASE_HI),
+ SOC15_REG_ENTRY_STR(GC, 0, regSDMA_PAGE_DUMMY_REG),
+ SOC15_REG_ENTRY_STR(GC, 0, regSDMA_RLC0_RB_CNTL),
+ SOC15_REG_ENTRY_STR(GC, 0, regSDMA_RLC0_RB_RPTR),
+ SOC15_REG_ENTRY_STR(GC, 0, regSDMA_RLC0_RB_RPTR_HI),
+ SOC15_REG_ENTRY_STR(GC, 0, regSDMA_RLC0_RB_WPTR),
+ SOC15_REG_ENTRY_STR(GC, 0, regSDMA_RLC0_RB_WPTR_HI),
+ SOC15_REG_ENTRY_STR(GC, 0, regSDMA_RLC0_IB_OFFSET),
+ SOC15_REG_ENTRY_STR(GC, 0, regSDMA_RLC0_IB_BASE_LO),
+ SOC15_REG_ENTRY_STR(GC, 0, regSDMA_RLC0_IB_BASE_HI),
+ SOC15_REG_ENTRY_STR(GC, 0, regSDMA_RLC0_DUMMY_REG),
+ SOC15_REG_ENTRY_STR(GC, 0, regSDMA_VM_CNTL)
+};
+
#define mmSMNAID_AID0_MCA_SMU 0x03b30400
#define WREG32_SDMA(instance, offset, value) \
@@ -1291,6 +1338,8 @@ static int sdma_v4_4_2_sw_init(void *handle)
int r, i;
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
u32 aid_id;
+ uint32_t reg_count = ARRAY_SIZE(sdma_reg_list_4_4_2);
+ uint32_t *ptr;
/* SDMA trap event */
for (i = 0; i < adev->sdma.num_inst_per_aid; i++) {
@@ -1386,6 +1435,13 @@ static int sdma_v4_4_2_sw_init(void *handle)
return -EINVAL;
}
+ /* Allocate memory for SDMA IP Dump buffer */
+ ptr = kcalloc(adev->sdma.num_instances * reg_count, sizeof(uint32_t), GFP_KERNEL);
+ if (ptr)
+ adev->sdma.ip_dump = ptr;
+ else
+ DRM_ERROR("Failed to allocated memory for SDMA IP Dump\n");
+
return r;
}
@@ -1406,6 +1462,8 @@ static int sdma_v4_4_2_sw_fini(void *handle)
else
amdgpu_sdma_destroy_inst_ctx(adev, false);
+ kfree(adev->sdma.ip_dump);
+
return 0;
}
@@ -1799,6 +1857,48 @@ static void sdma_v4_4_2_get_clockgating_state(void *handle, u64 *flags)
*flags |= AMD_CG_SUPPORT_SDMA_LS;
}
+static void sdma_v4_4_2_print_ip_state(void *handle, struct drm_printer *p)
+{
+ struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ int i, j;
+ uint32_t reg_count = ARRAY_SIZE(sdma_reg_list_4_4_2);
+ uint32_t instance_offset;
+
+ if (!adev->sdma.ip_dump)
+ return;
+
+ drm_printf(p, "num_instances:%d\n", adev->sdma.num_instances);
+ for (i = 0; i < adev->sdma.num_instances; i++) {
+ instance_offset = i * reg_count;
+ drm_printf(p, "\nInstance:%d\n", i);
+
+ for (j = 0; j < reg_count; j++)
+ drm_printf(p, "%-50s \t 0x%08x\n", sdma_reg_list_4_4_2[j].reg_name,
+ adev->sdma.ip_dump[instance_offset + j]);
+ }
+}
+
+static void sdma_v4_4_2_dump_ip_state(void *handle)
+{
+ struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ int i, j;
+ uint32_t instance_offset;
+ uint32_t reg_count = ARRAY_SIZE(sdma_reg_list_4_4_2);
+
+ if (!adev->sdma.ip_dump)
+ return;
+
+ amdgpu_gfx_off_ctrl(adev, false);
+ for (i = 0; i < adev->sdma.num_instances; i++) {
+ instance_offset = i * reg_count;
+ for (j = 0; j < reg_count; j++)
+ adev->sdma.ip_dump[instance_offset + j] =
+ RREG32(sdma_v4_4_2_get_reg_offset(adev, i,
+ sdma_reg_list_4_4_2[j].reg_offset));
+ }
+ amdgpu_gfx_off_ctrl(adev, true);
+}
+
const struct amd_ip_funcs sdma_v4_4_2_ip_funcs = {
.name = "sdma_v4_4_2",
.early_init = sdma_v4_4_2_early_init,
@@ -1815,6 +1915,8 @@ const struct amd_ip_funcs sdma_v4_4_2_ip_funcs = {
.set_clockgating_state = sdma_v4_4_2_set_clockgating_state,
.set_powergating_state = sdma_v4_4_2_set_powergating_state,
.get_clockgating_state = sdma_v4_4_2_get_clockgating_state,
+ .dump_ip_state = sdma_v4_4_2_dump_ip_state,
+ .print_ip_state = sdma_v4_4_2_print_ip_state,
};
static const struct amdgpu_ring_funcs sdma_v4_4_2_ring_funcs = {
@@ -2141,7 +2243,7 @@ static void sdma_v4_4_2_inst_query_ras_error_count(struct amdgpu_device *adev,
AMDGPU_RAS_ERROR__MULTI_UNCORRECTABLE,
&ue_count);
- amdgpu_ras_error_statistic_ue_count(err_data, &mcm_info, NULL, ue_count);
+ amdgpu_ras_error_statistic_ue_count(err_data, &mcm_info, ue_count);
}
static void sdma_v4_4_2_query_ras_error_count(struct amdgpu_device *adev,
diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v5_0.c b/drivers/gpu/drm/amd/amdgpu/sdma_v5_0.c
index b7d33d78bce0..3e48ea38385d 100644
--- a/drivers/gpu/drm/amd/amdgpu/sdma_v5_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/sdma_v5_0.c
@@ -59,6 +59,55 @@ MODULE_FIRMWARE("amdgpu/cyan_skillfish2_sdma1.bin");
#define SDMA0_HYP_DEC_REG_END 0x5893
#define SDMA1_HYP_DEC_REG_OFFSET 0x20
+static const struct amdgpu_hwip_reg_entry sdma_reg_list_5_0[] = {
+ SOC15_REG_ENTRY_STR(GC, 0, mmSDMA0_STATUS_REG),
+ SOC15_REG_ENTRY_STR(GC, 0, mmSDMA0_STATUS1_REG),
+ SOC15_REG_ENTRY_STR(GC, 0, mmSDMA0_STATUS2_REG),
+ SOC15_REG_ENTRY_STR(GC, 0, mmSDMA0_STATUS3_REG),
+ SOC15_REG_ENTRY_STR(GC, 0, mmSDMA0_UCODE_CHECKSUM),
+ SOC15_REG_ENTRY_STR(GC, 0, mmSDMA0_RB_RPTR_FETCH_HI),
+ SOC15_REG_ENTRY_STR(GC, 0, mmSDMA0_RB_RPTR_FETCH),
+ SOC15_REG_ENTRY_STR(GC, 0, mmSDMA0_UTCL1_RD_STATUS),
+ SOC15_REG_ENTRY_STR(GC, 0, mmSDMA0_UTCL1_WR_STATUS),
+ SOC15_REG_ENTRY_STR(GC, 0, mmSDMA0_UTCL1_RD_XNACK0),
+ SOC15_REG_ENTRY_STR(GC, 0, mmSDMA0_UTCL1_RD_XNACK1),
+ SOC15_REG_ENTRY_STR(GC, 0, mmSDMA0_UTCL1_WR_XNACK0),
+ SOC15_REG_ENTRY_STR(GC, 0, mmSDMA0_UTCL1_WR_XNACK1),
+ SOC15_REG_ENTRY_STR(GC, 0, mmSDMA0_GFX_RB_CNTL),
+ SOC15_REG_ENTRY_STR(GC, 0, mmSDMA0_GFX_RB_RPTR),
+ SOC15_REG_ENTRY_STR(GC, 0, mmSDMA0_GFX_RB_RPTR_HI),
+ SOC15_REG_ENTRY_STR(GC, 0, mmSDMA0_GFX_RB_WPTR),
+ SOC15_REG_ENTRY_STR(GC, 0, mmSDMA0_GFX_RB_WPTR_HI),
+ SOC15_REG_ENTRY_STR(GC, 0, mmSDMA0_GFX_IB_OFFSET),
+ SOC15_REG_ENTRY_STR(GC, 0, mmSDMA0_GFX_IB_BASE_LO),
+ SOC15_REG_ENTRY_STR(GC, 0, mmSDMA0_GFX_IB_BASE_HI),
+ SOC15_REG_ENTRY_STR(GC, 0, mmSDMA0_GFX_IB_CNTL),
+ SOC15_REG_ENTRY_STR(GC, 0, mmSDMA0_GFX_IB_RPTR),
+ SOC15_REG_ENTRY_STR(GC, 0, mmSDMA0_GFX_IB_SUB_REMAIN),
+ SOC15_REG_ENTRY_STR(GC, 0, mmSDMA0_GFX_DUMMY_REG),
+ SOC15_REG_ENTRY_STR(GC, 0, mmSDMA0_PAGE_RB_CNTL),
+ SOC15_REG_ENTRY_STR(GC, 0, mmSDMA0_PAGE_RB_RPTR),
+ SOC15_REG_ENTRY_STR(GC, 0, mmSDMA0_PAGE_RB_RPTR_HI),
+ SOC15_REG_ENTRY_STR(GC, 0, mmSDMA0_PAGE_RB_WPTR),
+ SOC15_REG_ENTRY_STR(GC, 0, mmSDMA0_PAGE_RB_WPTR_HI),
+ SOC15_REG_ENTRY_STR(GC, 0, mmSDMA0_PAGE_IB_OFFSET),
+ SOC15_REG_ENTRY_STR(GC, 0, mmSDMA0_PAGE_IB_BASE_LO),
+ SOC15_REG_ENTRY_STR(GC, 0, mmSDMA0_PAGE_IB_BASE_HI),
+ SOC15_REG_ENTRY_STR(GC, 0, mmSDMA0_PAGE_DUMMY_REG),
+ SOC15_REG_ENTRY_STR(GC, 0, mmSDMA0_RLC0_RB_CNTL),
+ SOC15_REG_ENTRY_STR(GC, 0, mmSDMA0_RLC0_RB_RPTR),
+ SOC15_REG_ENTRY_STR(GC, 0, mmSDMA0_RLC0_RB_RPTR_HI),
+ SOC15_REG_ENTRY_STR(GC, 0, mmSDMA0_RLC0_RB_WPTR),
+ SOC15_REG_ENTRY_STR(GC, 0, mmSDMA0_RLC0_RB_WPTR_HI),
+ SOC15_REG_ENTRY_STR(GC, 0, mmSDMA0_RLC0_IB_OFFSET),
+ SOC15_REG_ENTRY_STR(GC, 0, mmSDMA0_RLC0_IB_BASE_LO),
+ SOC15_REG_ENTRY_STR(GC, 0, mmSDMA0_RLC0_IB_BASE_HI),
+ SOC15_REG_ENTRY_STR(GC, 0, mmSDMA0_RLC0_DUMMY_REG),
+ SOC15_REG_ENTRY_STR(GC, 0, mmSDMA0_INT_STATUS),
+ SOC15_REG_ENTRY_STR(GC, 0, mmSDMA0_VM_CNTL),
+ SOC15_REG_ENTRY_STR(GC, 0, mmGRBM_STATUS2)
+};
+
static void sdma_v5_0_set_ring_funcs(struct amdgpu_device *adev);
static void sdma_v5_0_set_buffer_funcs(struct amdgpu_device *adev);
static void sdma_v5_0_set_vm_pte_funcs(struct amdgpu_device *adev);
@@ -1341,6 +1390,8 @@ static int sdma_v5_0_sw_init(void *handle)
struct amdgpu_ring *ring;
int r, i;
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ uint32_t reg_count = ARRAY_SIZE(sdma_reg_list_5_0);
+ uint32_t *ptr;
/* SDMA trap event */
r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_SDMA0,
@@ -1378,6 +1429,13 @@ static int sdma_v5_0_sw_init(void *handle)
return r;
}
+ /* Allocate memory for SDMA IP Dump buffer */
+ ptr = kcalloc(adev->sdma.num_instances * reg_count, sizeof(uint32_t), GFP_KERNEL);
+ if (ptr)
+ adev->sdma.ip_dump = ptr;
+ else
+ DRM_ERROR("Failed to allocated memory for SDMA IP Dump\n");
+
return r;
}
@@ -1391,6 +1449,8 @@ static int sdma_v5_0_sw_fini(void *handle)
amdgpu_sdma_destroy_inst_ctx(adev, false);
+ kfree(adev->sdma.ip_dump);
+
return 0;
}
@@ -1718,7 +1778,49 @@ static void sdma_v5_0_get_clockgating_state(void *handle, u64 *flags)
*flags |= AMD_CG_SUPPORT_SDMA_LS;
}
-const struct amd_ip_funcs sdma_v5_0_ip_funcs = {
+static void sdma_v5_0_print_ip_state(void *handle, struct drm_printer *p)
+{
+ struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ int i, j;
+ uint32_t reg_count = ARRAY_SIZE(sdma_reg_list_5_0);
+ uint32_t instance_offset;
+
+ if (!adev->sdma.ip_dump)
+ return;
+
+ drm_printf(p, "num_instances:%d\n", adev->sdma.num_instances);
+ for (i = 0; i < adev->sdma.num_instances; i++) {
+ instance_offset = i * reg_count;
+ drm_printf(p, "\nInstance:%d\n", i);
+
+ for (j = 0; j < reg_count; j++)
+ drm_printf(p, "%-50s \t 0x%08x\n", sdma_reg_list_5_0[j].reg_name,
+ adev->sdma.ip_dump[instance_offset + j]);
+ }
+}
+
+static void sdma_v5_0_dump_ip_state(void *handle)
+{
+ struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ int i, j;
+ uint32_t instance_offset;
+ uint32_t reg_count = ARRAY_SIZE(sdma_reg_list_5_0);
+
+ if (!adev->sdma.ip_dump)
+ return;
+
+ amdgpu_gfx_off_ctrl(adev, false);
+ for (i = 0; i < adev->sdma.num_instances; i++) {
+ instance_offset = i * reg_count;
+ for (j = 0; j < reg_count; j++)
+ adev->sdma.ip_dump[instance_offset + j] =
+ RREG32(sdma_v5_0_get_reg_offset(adev, i,
+ sdma_reg_list_5_0[j].reg_offset));
+ }
+ amdgpu_gfx_off_ctrl(adev, true);
+}
+
+static const struct amd_ip_funcs sdma_v5_0_ip_funcs = {
.name = "sdma_v5_0",
.early_init = sdma_v5_0_early_init,
.late_init = NULL,
@@ -1734,6 +1836,8 @@ const struct amd_ip_funcs sdma_v5_0_ip_funcs = {
.set_clockgating_state = sdma_v5_0_set_clockgating_state,
.set_powergating_state = sdma_v5_0_set_powergating_state,
.get_clockgating_state = sdma_v5_0_get_clockgating_state,
+ .dump_ip_state = sdma_v5_0_dump_ip_state,
+ .print_ip_state = sdma_v5_0_print_ip_state,
};
static const struct amdgpu_ring_funcs sdma_v5_0_ring_funcs = {
diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v5_0.h b/drivers/gpu/drm/amd/amdgpu/sdma_v5_0.h
index d4e3c2e696f6..2ab71f21755a 100644
--- a/drivers/gpu/drm/amd/amdgpu/sdma_v5_0.h
+++ b/drivers/gpu/drm/amd/amdgpu/sdma_v5_0.h
@@ -24,7 +24,6 @@
#ifndef __SDMA_V5_0_H__
#define __SDMA_V5_0_H__
-extern const struct amd_ip_funcs sdma_v5_0_ip_funcs;
extern const struct amdgpu_ip_block_version sdma_v5_0_ip_block;
#endif /* __SDMA_V5_0_H__ */
diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v5_2.c b/drivers/gpu/drm/amd/amdgpu/sdma_v5_2.c
index 2e72d445415f..bc9b240a3488 100644
--- a/drivers/gpu/drm/amd/amdgpu/sdma_v5_2.c
+++ b/drivers/gpu/drm/amd/amdgpu/sdma_v5_2.c
@@ -60,6 +60,55 @@ MODULE_FIRMWARE("amdgpu/sdma_5_2_7.bin");
#define SDMA0_HYP_DEC_REG_END 0x5893
#define SDMA1_HYP_DEC_REG_OFFSET 0x20
+static const struct amdgpu_hwip_reg_entry sdma_reg_list_5_2[] = {
+ SOC15_REG_ENTRY_STR(GC, 0, mmSDMA0_STATUS_REG),
+ SOC15_REG_ENTRY_STR(GC, 0, mmSDMA0_STATUS1_REG),
+ SOC15_REG_ENTRY_STR(GC, 0, mmSDMA0_STATUS2_REG),
+ SOC15_REG_ENTRY_STR(GC, 0, mmSDMA0_STATUS3_REG),
+ SOC15_REG_ENTRY_STR(GC, 0, mmSDMA0_UCODE_CHECKSUM),
+ SOC15_REG_ENTRY_STR(GC, 0, mmSDMA0_RB_RPTR_FETCH_HI),
+ SOC15_REG_ENTRY_STR(GC, 0, mmSDMA0_RB_RPTR_FETCH),
+ SOC15_REG_ENTRY_STR(GC, 0, mmSDMA0_UTCL1_RD_STATUS),
+ SOC15_REG_ENTRY_STR(GC, 0, mmSDMA0_UTCL1_WR_STATUS),
+ SOC15_REG_ENTRY_STR(GC, 0, mmSDMA0_UTCL1_RD_XNACK0),
+ SOC15_REG_ENTRY_STR(GC, 0, mmSDMA0_UTCL1_RD_XNACK1),
+ SOC15_REG_ENTRY_STR(GC, 0, mmSDMA0_UTCL1_WR_XNACK0),
+ SOC15_REG_ENTRY_STR(GC, 0, mmSDMA0_UTCL1_WR_XNACK1),
+ SOC15_REG_ENTRY_STR(GC, 0, mmSDMA0_GFX_RB_CNTL),
+ SOC15_REG_ENTRY_STR(GC, 0, mmSDMA0_GFX_RB_RPTR),
+ SOC15_REG_ENTRY_STR(GC, 0, mmSDMA0_GFX_RB_RPTR_HI),
+ SOC15_REG_ENTRY_STR(GC, 0, mmSDMA0_GFX_RB_WPTR),
+ SOC15_REG_ENTRY_STR(GC, 0, mmSDMA0_GFX_RB_WPTR_HI),
+ SOC15_REG_ENTRY_STR(GC, 0, mmSDMA0_GFX_IB_OFFSET),
+ SOC15_REG_ENTRY_STR(GC, 0, mmSDMA0_GFX_IB_BASE_LO),
+ SOC15_REG_ENTRY_STR(GC, 0, mmSDMA0_GFX_IB_BASE_HI),
+ SOC15_REG_ENTRY_STR(GC, 0, mmSDMA0_GFX_IB_CNTL),
+ SOC15_REG_ENTRY_STR(GC, 0, mmSDMA0_GFX_IB_RPTR),
+ SOC15_REG_ENTRY_STR(GC, 0, mmSDMA0_GFX_IB_SUB_REMAIN),
+ SOC15_REG_ENTRY_STR(GC, 0, mmSDMA0_GFX_DUMMY_REG),
+ SOC15_REG_ENTRY_STR(GC, 0, mmSDMA0_PAGE_RB_CNTL),
+ SOC15_REG_ENTRY_STR(GC, 0, mmSDMA0_PAGE_RB_RPTR),
+ SOC15_REG_ENTRY_STR(GC, 0, mmSDMA0_PAGE_RB_RPTR_HI),
+ SOC15_REG_ENTRY_STR(GC, 0, mmSDMA0_PAGE_RB_WPTR),
+ SOC15_REG_ENTRY_STR(GC, 0, mmSDMA0_PAGE_RB_WPTR_HI),
+ SOC15_REG_ENTRY_STR(GC, 0, mmSDMA0_PAGE_IB_OFFSET),
+ SOC15_REG_ENTRY_STR(GC, 0, mmSDMA0_PAGE_IB_BASE_LO),
+ SOC15_REG_ENTRY_STR(GC, 0, mmSDMA0_PAGE_IB_BASE_HI),
+ SOC15_REG_ENTRY_STR(GC, 0, mmSDMA0_PAGE_DUMMY_REG),
+ SOC15_REG_ENTRY_STR(GC, 0, mmSDMA0_RLC0_RB_CNTL),
+ SOC15_REG_ENTRY_STR(GC, 0, mmSDMA0_RLC0_RB_RPTR),
+ SOC15_REG_ENTRY_STR(GC, 0, mmSDMA0_RLC0_RB_RPTR_HI),
+ SOC15_REG_ENTRY_STR(GC, 0, mmSDMA0_RLC0_RB_WPTR),
+ SOC15_REG_ENTRY_STR(GC, 0, mmSDMA0_RLC0_RB_WPTR_HI),
+ SOC15_REG_ENTRY_STR(GC, 0, mmSDMA0_RLC0_IB_OFFSET),
+ SOC15_REG_ENTRY_STR(GC, 0, mmSDMA0_RLC0_IB_BASE_LO),
+ SOC15_REG_ENTRY_STR(GC, 0, mmSDMA0_RLC0_IB_BASE_HI),
+ SOC15_REG_ENTRY_STR(GC, 0, mmSDMA0_RLC0_DUMMY_REG),
+ SOC15_REG_ENTRY_STR(GC, 0, mmSDMA0_INT_STATUS),
+ SOC15_REG_ENTRY_STR(GC, 0, mmSDMA0_VM_CNTL),
+ SOC15_REG_ENTRY_STR(GC, 0, mmGRBM_STATUS2)
+};
+
static void sdma_v5_2_set_ring_funcs(struct amdgpu_device *adev);
static void sdma_v5_2_set_buffer_funcs(struct amdgpu_device *adev);
static void sdma_v5_2_set_vm_pte_funcs(struct amdgpu_device *adev);
@@ -1224,6 +1273,8 @@ static int sdma_v5_2_sw_init(void *handle)
struct amdgpu_ring *ring;
int r, i;
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ uint32_t reg_count = ARRAY_SIZE(sdma_reg_list_5_2);
+ uint32_t *ptr;
/* SDMA trap event */
for (i = 0; i < adev->sdma.num_instances; i++) {
@@ -1255,6 +1306,13 @@ static int sdma_v5_2_sw_init(void *handle)
return r;
}
+ /* Allocate memory for SDMA IP Dump buffer */
+ ptr = kcalloc(adev->sdma.num_instances * reg_count, sizeof(uint32_t), GFP_KERNEL);
+ if (ptr)
+ adev->sdma.ip_dump = ptr;
+ else
+ DRM_ERROR("Failed to allocated memory for SDMA IP Dump\n");
+
return r;
}
@@ -1268,6 +1326,8 @@ static int sdma_v5_2_sw_fini(void *handle)
amdgpu_sdma_destroy_inst_ctx(adev, true);
+ kfree(adev->sdma.ip_dump);
+
return 0;
}
@@ -1676,7 +1736,49 @@ static void sdma_v5_2_ring_end_use(struct amdgpu_ring *ring)
amdgpu_gfx_off_ctrl(adev, true);
}
-const struct amd_ip_funcs sdma_v5_2_ip_funcs = {
+static void sdma_v5_2_print_ip_state(void *handle, struct drm_printer *p)
+{
+ struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ int i, j;
+ uint32_t reg_count = ARRAY_SIZE(sdma_reg_list_5_2);
+ uint32_t instance_offset;
+
+ if (!adev->sdma.ip_dump)
+ return;
+
+ drm_printf(p, "num_instances:%d\n", adev->sdma.num_instances);
+ for (i = 0; i < adev->sdma.num_instances; i++) {
+ instance_offset = i * reg_count;
+ drm_printf(p, "\nInstance:%d\n", i);
+
+ for (j = 0; j < reg_count; j++)
+ drm_printf(p, "%-50s \t 0x%08x\n", sdma_reg_list_5_2[j].reg_name,
+ adev->sdma.ip_dump[instance_offset + j]);
+ }
+}
+
+static void sdma_v5_2_dump_ip_state(void *handle)
+{
+ struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ int i, j;
+ uint32_t instance_offset;
+ uint32_t reg_count = ARRAY_SIZE(sdma_reg_list_5_2);
+
+ if (!adev->sdma.ip_dump)
+ return;
+
+ amdgpu_gfx_off_ctrl(adev, false);
+ for (i = 0; i < adev->sdma.num_instances; i++) {
+ instance_offset = i * reg_count;
+ for (j = 0; j < reg_count; j++)
+ adev->sdma.ip_dump[instance_offset + j] =
+ RREG32(sdma_v5_2_get_reg_offset(adev, i,
+ sdma_reg_list_5_2[j].reg_offset));
+ }
+ amdgpu_gfx_off_ctrl(adev, true);
+}
+
+static const struct amd_ip_funcs sdma_v5_2_ip_funcs = {
.name = "sdma_v5_2",
.early_init = sdma_v5_2_early_init,
.late_init = NULL,
@@ -1692,6 +1794,8 @@ const struct amd_ip_funcs sdma_v5_2_ip_funcs = {
.set_clockgating_state = sdma_v5_2_set_clockgating_state,
.set_powergating_state = sdma_v5_2_set_powergating_state,
.get_clockgating_state = sdma_v5_2_get_clockgating_state,
+ .dump_ip_state = sdma_v5_2_dump_ip_state,
+ .print_ip_state = sdma_v5_2_print_ip_state,
};
static const struct amdgpu_ring_funcs sdma_v5_2_ring_funcs = {
diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v5_2.h b/drivers/gpu/drm/amd/amdgpu/sdma_v5_2.h
index b70414fef2a1..863145b3a77e 100644
--- a/drivers/gpu/drm/amd/amdgpu/sdma_v5_2.h
+++ b/drivers/gpu/drm/amd/amdgpu/sdma_v5_2.h
@@ -24,7 +24,6 @@
#ifndef __SDMA_V5_2_H__
#define __SDMA_V5_2_H__
-extern const struct amd_ip_funcs sdma_v5_2_ip_funcs;
extern const struct amdgpu_ip_block_version sdma_v5_2_ip_block;
#endif /* __SDMA_V5_2_H__ */
diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v6_0.c b/drivers/gpu/drm/amd/amdgpu/sdma_v6_0.c
index dab4c2db8c9d..208a1fa9d4e7 100644
--- a/drivers/gpu/drm/amd/amdgpu/sdma_v6_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/sdma_v6_0.c
@@ -57,6 +57,63 @@ MODULE_FIRMWARE("amdgpu/sdma_6_1_2.bin");
#define SDMA0_HYP_DEC_REG_END 0x589a
#define SDMA1_HYP_DEC_REG_OFFSET 0x20
+static const struct amdgpu_hwip_reg_entry sdma_reg_list_6_0[] = {
+ SOC15_REG_ENTRY_STR(GC, 0, regSDMA0_STATUS_REG),
+ SOC15_REG_ENTRY_STR(GC, 0, regSDMA0_STATUS1_REG),
+ SOC15_REG_ENTRY_STR(GC, 0, regSDMA0_STATUS2_REG),
+ SOC15_REG_ENTRY_STR(GC, 0, regSDMA0_STATUS3_REG),
+ SOC15_REG_ENTRY_STR(GC, 0, regSDMA0_STATUS4_REG),
+ SOC15_REG_ENTRY_STR(GC, 0, regSDMA0_STATUS5_REG),
+ SOC15_REG_ENTRY_STR(GC, 0, regSDMA0_STATUS6_REG),
+ SOC15_REG_ENTRY_STR(GC, 0, regSDMA0_UCODE_CHECKSUM),
+ SOC15_REG_ENTRY_STR(GC, 0, regSDMA0_RB_RPTR_FETCH_HI),
+ SOC15_REG_ENTRY_STR(GC, 0, regSDMA0_RB_RPTR_FETCH),
+ SOC15_REG_ENTRY_STR(GC, 0, regSDMA0_UTCL1_RD_STATUS),
+ SOC15_REG_ENTRY_STR(GC, 0, regSDMA0_UTCL1_WR_STATUS),
+ SOC15_REG_ENTRY_STR(GC, 0, regSDMA0_UTCL1_RD_XNACK0),
+ SOC15_REG_ENTRY_STR(GC, 0, regSDMA0_UTCL1_RD_XNACK1),
+ SOC15_REG_ENTRY_STR(GC, 0, regSDMA0_UTCL1_WR_XNACK0),
+ SOC15_REG_ENTRY_STR(GC, 0, regSDMA0_UTCL1_WR_XNACK1),
+ SOC15_REG_ENTRY_STR(GC, 0, regSDMA0_QUEUE0_RB_CNTL),
+ SOC15_REG_ENTRY_STR(GC, 0, regSDMA0_QUEUE0_RB_RPTR),
+ SOC15_REG_ENTRY_STR(GC, 0, regSDMA0_QUEUE0_RB_RPTR_HI),
+ SOC15_REG_ENTRY_STR(GC, 0, regSDMA0_QUEUE0_RB_WPTR),
+ SOC15_REG_ENTRY_STR(GC, 0, regSDMA0_QUEUE0_RB_WPTR_HI),
+ SOC15_REG_ENTRY_STR(GC, 0, regSDMA0_QUEUE0_IB_OFFSET),
+ SOC15_REG_ENTRY_STR(GC, 0, regSDMA0_QUEUE0_IB_BASE_LO),
+ SOC15_REG_ENTRY_STR(GC, 0, regSDMA0_QUEUE0_IB_BASE_HI),
+ SOC15_REG_ENTRY_STR(GC, 0, regSDMA0_QUEUE0_IB_CNTL),
+ SOC15_REG_ENTRY_STR(GC, 0, regSDMA0_QUEUE0_IB_RPTR),
+ SOC15_REG_ENTRY_STR(GC, 0, regSDMA0_QUEUE0_IB_SUB_REMAIN),
+ SOC15_REG_ENTRY_STR(GC, 0, regSDMA0_QUEUE0_DUMMY_REG),
+ SOC15_REG_ENTRY_STR(GC, 0, regSDMA0_QUEUE_STATUS0),
+ SOC15_REG_ENTRY_STR(GC, 0, regSDMA0_QUEUE1_RB_CNTL),
+ SOC15_REG_ENTRY_STR(GC, 0, regSDMA0_QUEUE1_RB_RPTR),
+ SOC15_REG_ENTRY_STR(GC, 0, regSDMA0_QUEUE1_RB_RPTR_HI),
+ SOC15_REG_ENTRY_STR(GC, 0, regSDMA0_QUEUE1_RB_WPTR),
+ SOC15_REG_ENTRY_STR(GC, 0, regSDMA0_QUEUE1_RB_WPTR_HI),
+ SOC15_REG_ENTRY_STR(GC, 0, regSDMA0_QUEUE1_IB_OFFSET),
+ SOC15_REG_ENTRY_STR(GC, 0, regSDMA0_QUEUE1_IB_BASE_LO),
+ SOC15_REG_ENTRY_STR(GC, 0, regSDMA0_QUEUE1_IB_BASE_HI),
+ SOC15_REG_ENTRY_STR(GC, 0, regSDMA0_QUEUE1_IB_RPTR),
+ SOC15_REG_ENTRY_STR(GC, 0, regSDMA0_QUEUE1_IB_SUB_REMAIN),
+ SOC15_REG_ENTRY_STR(GC, 0, regSDMA0_QUEUE1_DUMMY_REG),
+ SOC15_REG_ENTRY_STR(GC, 0, regSDMA0_QUEUE2_RB_CNTL),
+ SOC15_REG_ENTRY_STR(GC, 0, regSDMA0_QUEUE2_RB_RPTR),
+ SOC15_REG_ENTRY_STR(GC, 0, regSDMA0_QUEUE2_RB_RPTR_HI),
+ SOC15_REG_ENTRY_STR(GC, 0, regSDMA0_QUEUE2_RB_WPTR),
+ SOC15_REG_ENTRY_STR(GC, 0, regSDMA0_QUEUE2_RB_WPTR_HI),
+ SOC15_REG_ENTRY_STR(GC, 0, regSDMA0_QUEUE2_IB_OFFSET),
+ SOC15_REG_ENTRY_STR(GC, 0, regSDMA0_QUEUE2_IB_BASE_LO),
+ SOC15_REG_ENTRY_STR(GC, 0, regSDMA0_QUEUE2_IB_BASE_HI),
+ SOC15_REG_ENTRY_STR(GC, 0, regSDMA0_QUEUE2_IB_RPTR),
+ SOC15_REG_ENTRY_STR(GC, 0, regSDMA0_QUEUE2_IB_SUB_REMAIN),
+ SOC15_REG_ENTRY_STR(GC, 0, regSDMA0_QUEUE2_DUMMY_REG),
+ SOC15_REG_ENTRY_STR(GC, 0, regSDMA0_INT_STATUS),
+ SOC15_REG_ENTRY_STR(GC, 0, regGRBM_STATUS2),
+ SOC15_REG_ENTRY_STR(GC, 0, regSDMA0_CHICKEN_BITS),
+};
+
static void sdma_v6_0_set_ring_funcs(struct amdgpu_device *adev);
static void sdma_v6_0_set_buffer_funcs(struct amdgpu_device *adev);
static void sdma_v6_0_set_vm_pte_funcs(struct amdgpu_device *adev);
@@ -1239,6 +1296,8 @@ static int sdma_v6_0_sw_init(void *handle)
struct amdgpu_ring *ring;
int r, i;
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ uint32_t reg_count = ARRAY_SIZE(sdma_reg_list_6_0);
+ uint32_t *ptr;
/* SDMA trap event */
r = amdgpu_irq_add_id(adev, SOC21_IH_CLIENTID_GFX,
@@ -1274,6 +1333,13 @@ static int sdma_v6_0_sw_init(void *handle)
return -EINVAL;
}
+ /* Allocate memory for SDMA IP Dump buffer */
+ ptr = kcalloc(adev->sdma.num_instances * reg_count, sizeof(uint32_t), GFP_KERNEL);
+ if (ptr)
+ adev->sdma.ip_dump = ptr;
+ else
+ DRM_ERROR("Failed to allocated memory for SDMA IP Dump\n");
+
return r;
}
@@ -1287,6 +1353,8 @@ static int sdma_v6_0_sw_fini(void *handle)
amdgpu_sdma_destroy_inst_ctx(adev, true);
+ kfree(adev->sdma.ip_dump);
+
return 0;
}
@@ -1488,6 +1556,48 @@ static void sdma_v6_0_get_clockgating_state(void *handle, u64 *flags)
{
}
+static void sdma_v6_0_print_ip_state(void *handle, struct drm_printer *p)
+{
+ struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ int i, j;
+ uint32_t reg_count = ARRAY_SIZE(sdma_reg_list_6_0);
+ uint32_t instance_offset;
+
+ if (!adev->sdma.ip_dump)
+ return;
+
+ drm_printf(p, "num_instances:%d\n", adev->sdma.num_instances);
+ for (i = 0; i < adev->sdma.num_instances; i++) {
+ instance_offset = i * reg_count;
+ drm_printf(p, "\nInstance:%d\n", i);
+
+ for (j = 0; j < reg_count; j++)
+ drm_printf(p, "%-50s \t 0x%08x\n", sdma_reg_list_6_0[j].reg_name,
+ adev->sdma.ip_dump[instance_offset + j]);
+ }
+}
+
+static void sdma_v6_0_dump_ip_state(void *handle)
+{
+ struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ int i, j;
+ uint32_t instance_offset;
+ uint32_t reg_count = ARRAY_SIZE(sdma_reg_list_6_0);
+
+ if (!adev->sdma.ip_dump)
+ return;
+
+ amdgpu_gfx_off_ctrl(adev, false);
+ for (i = 0; i < adev->sdma.num_instances; i++) {
+ instance_offset = i * reg_count;
+ for (j = 0; j < reg_count; j++)
+ adev->sdma.ip_dump[instance_offset + j] =
+ RREG32(sdma_v6_0_get_reg_offset(adev, i,
+ sdma_reg_list_6_0[j].reg_offset));
+ }
+ amdgpu_gfx_off_ctrl(adev, true);
+}
+
const struct amd_ip_funcs sdma_v6_0_ip_funcs = {
.name = "sdma_v6_0",
.early_init = sdma_v6_0_early_init,
@@ -1505,6 +1615,8 @@ const struct amd_ip_funcs sdma_v6_0_ip_funcs = {
.set_clockgating_state = sdma_v6_0_set_clockgating_state,
.set_powergating_state = sdma_v6_0_set_powergating_state,
.get_clockgating_state = sdma_v6_0_get_clockgating_state,
+ .dump_ip_state = sdma_v6_0_dump_ip_state,
+ .print_ip_state = sdma_v6_0_print_ip_state,
};
static const struct amdgpu_ring_funcs sdma_v6_0_ring_funcs = {
diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v7_0.c b/drivers/gpu/drm/amd/amdgpu/sdma_v7_0.c
index ecee9e7d7e4c..a8763496aed3 100644
--- a/drivers/gpu/drm/amd/amdgpu/sdma_v7_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/sdma_v7_0.c
@@ -51,6 +51,64 @@ MODULE_FIRMWARE("amdgpu/sdma_7_0_1.bin");
#define SDMA0_HYP_DEC_REG_END 0x589a
#define SDMA1_HYP_DEC_REG_OFFSET 0x20
+static const struct amdgpu_hwip_reg_entry sdma_reg_list_7_0[] = {
+ SOC15_REG_ENTRY_STR(GC, 0, regSDMA0_STATUS_REG),
+ SOC15_REG_ENTRY_STR(GC, 0, regSDMA0_STATUS1_REG),
+ SOC15_REG_ENTRY_STR(GC, 0, regSDMA0_STATUS2_REG),
+ SOC15_REG_ENTRY_STR(GC, 0, regSDMA0_STATUS3_REG),
+ SOC15_REG_ENTRY_STR(GC, 0, regSDMA0_STATUS4_REG),
+ SOC15_REG_ENTRY_STR(GC, 0, regSDMA0_STATUS5_REG),
+ SOC15_REG_ENTRY_STR(GC, 0, regSDMA0_STATUS6_REG),
+ SOC15_REG_ENTRY_STR(GC, 0, regSDMA0_UCODE_REV),
+ SOC15_REG_ENTRY_STR(GC, 0, regSDMA0_RB_RPTR_FETCH_HI),
+ SOC15_REG_ENTRY_STR(GC, 0, regSDMA0_RB_RPTR_FETCH),
+ SOC15_REG_ENTRY_STR(GC, 0, regSDMA0_UTCL1_RD_STATUS),
+ SOC15_REG_ENTRY_STR(GC, 0, regSDMA0_UTCL1_WR_STATUS),
+ SOC15_REG_ENTRY_STR(GC, 0, regSDMA0_UTCL1_RD_XNACK0),
+ SOC15_REG_ENTRY_STR(GC, 0, regSDMA0_UTCL1_RD_XNACK1),
+ SOC15_REG_ENTRY_STR(GC, 0, regSDMA0_UTCL1_WR_XNACK0),
+ SOC15_REG_ENTRY_STR(GC, 0, regSDMA0_UTCL1_WR_XNACK1),
+ SOC15_REG_ENTRY_STR(GC, 0, regSDMA0_QUEUE0_RB_CNTL),
+ SOC15_REG_ENTRY_STR(GC, 0, regSDMA0_QUEUE0_RB_RPTR),
+ SOC15_REG_ENTRY_STR(GC, 0, regSDMA0_QUEUE0_RB_RPTR_HI),
+ SOC15_REG_ENTRY_STR(GC, 0, regSDMA0_QUEUE0_RB_WPTR),
+ SOC15_REG_ENTRY_STR(GC, 0, regSDMA0_QUEUE0_RB_WPTR_HI),
+ SOC15_REG_ENTRY_STR(GC, 0, regSDMA0_QUEUE0_IB_OFFSET),
+ SOC15_REG_ENTRY_STR(GC, 0, regSDMA0_QUEUE0_IB_BASE_LO),
+ SOC15_REG_ENTRY_STR(GC, 0, regSDMA0_QUEUE0_IB_BASE_HI),
+ SOC15_REG_ENTRY_STR(GC, 0, regSDMA0_QUEUE0_IB_CNTL),
+ SOC15_REG_ENTRY_STR(GC, 0, regSDMA0_QUEUE0_IB_RPTR),
+ SOC15_REG_ENTRY_STR(GC, 0, regSDMA0_QUEUE0_IB_SUB_REMAIN),
+ SOC15_REG_ENTRY_STR(GC, 0, regSDMA0_QUEUE0_DUMMY_REG),
+ SOC15_REG_ENTRY_STR(GC, 0, regSDMA0_QUEUE_STATUS0),
+ SOC15_REG_ENTRY_STR(GC, 0, regSDMA0_QUEUE1_RB_CNTL),
+ SOC15_REG_ENTRY_STR(GC, 0, regSDMA0_QUEUE1_RB_RPTR),
+ SOC15_REG_ENTRY_STR(GC, 0, regSDMA0_QUEUE1_RB_RPTR_HI),
+ SOC15_REG_ENTRY_STR(GC, 0, regSDMA0_QUEUE1_RB_WPTR),
+ SOC15_REG_ENTRY_STR(GC, 0, regSDMA0_QUEUE1_RB_WPTR_HI),
+ SOC15_REG_ENTRY_STR(GC, 0, regSDMA0_QUEUE1_IB_OFFSET),
+ SOC15_REG_ENTRY_STR(GC, 0, regSDMA0_QUEUE1_IB_BASE_LO),
+ SOC15_REG_ENTRY_STR(GC, 0, regSDMA0_QUEUE1_IB_BASE_HI),
+ SOC15_REG_ENTRY_STR(GC, 0, regSDMA0_QUEUE1_IB_RPTR),
+ SOC15_REG_ENTRY_STR(GC, 0, regSDMA0_QUEUE1_IB_SUB_REMAIN),
+ SOC15_REG_ENTRY_STR(GC, 0, regSDMA0_QUEUE1_DUMMY_REG),
+ SOC15_REG_ENTRY_STR(GC, 0, regSDMA0_QUEUE2_RB_CNTL),
+ SOC15_REG_ENTRY_STR(GC, 0, regSDMA0_QUEUE2_RB_RPTR),
+ SOC15_REG_ENTRY_STR(GC, 0, regSDMA0_QUEUE2_RB_RPTR_HI),
+ SOC15_REG_ENTRY_STR(GC, 0, regSDMA0_QUEUE2_RB_WPTR),
+ SOC15_REG_ENTRY_STR(GC, 0, regSDMA0_QUEUE2_RB_WPTR_HI),
+ SOC15_REG_ENTRY_STR(GC, 0, regSDMA0_QUEUE2_IB_OFFSET),
+ SOC15_REG_ENTRY_STR(GC, 0, regSDMA0_QUEUE2_IB_BASE_LO),
+ SOC15_REG_ENTRY_STR(GC, 0, regSDMA0_QUEUE2_IB_BASE_HI),
+ SOC15_REG_ENTRY_STR(GC, 0, regSDMA0_QUEUE2_IB_RPTR),
+ SOC15_REG_ENTRY_STR(GC, 0, regSDMA0_QUEUE2_IB_SUB_REMAIN),
+ SOC15_REG_ENTRY_STR(GC, 0, regSDMA0_QUEUE2_DUMMY_REG),
+ SOC15_REG_ENTRY_STR(GC, 0, regSDMA0_INT_STATUS),
+ SOC15_REG_ENTRY_STR(GC, 0, regSDMA0_VM_CNTL),
+ SOC15_REG_ENTRY_STR(GC, 0, regGRBM_STATUS2),
+ SOC15_REG_ENTRY_STR(GC, 0, regSDMA0_CHICKEN_BITS),
+};
+
static void sdma_v7_0_set_ring_funcs(struct amdgpu_device *adev);
static void sdma_v7_0_set_buffer_funcs(struct amdgpu_device *adev);
static void sdma_v7_0_set_vm_pte_funcs(struct amdgpu_device *adev);
@@ -1022,13 +1080,16 @@ static void sdma_v7_0_vm_copy_pte(struct amdgpu_ib *ib,
unsigned bytes = count * 8;
ib->ptr[ib->length_dw++] = SDMA_PKT_COPY_LINEAR_HEADER_OP(SDMA_OP_COPY) |
- SDMA_PKT_COPY_LINEAR_HEADER_SUB_OP(SDMA_SUBOP_COPY_LINEAR);
+ SDMA_PKT_COPY_LINEAR_HEADER_SUB_OP(SDMA_SUBOP_COPY_LINEAR) |
+ SDMA_PKT_COPY_LINEAR_HEADER_CPV(1);
+
ib->ptr[ib->length_dw++] = bytes - 1;
ib->ptr[ib->length_dw++] = 0; /* src/dst endian swap */
ib->ptr[ib->length_dw++] = lower_32_bits(src);
ib->ptr[ib->length_dw++] = upper_32_bits(src);
ib->ptr[ib->length_dw++] = lower_32_bits(pe);
ib->ptr[ib->length_dw++] = upper_32_bits(pe);
+ ib->ptr[ib->length_dw++] = 0;
}
@@ -1217,6 +1278,8 @@ static int sdma_v7_0_sw_init(void *handle)
struct amdgpu_ring *ring;
int r, i;
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ uint32_t reg_count = ARRAY_SIZE(sdma_reg_list_7_0);
+ uint32_t *ptr;
/* SDMA trap event */
r = amdgpu_irq_add_id(adev, SOC21_IH_CLIENTID_GFX,
@@ -1247,6 +1310,13 @@ static int sdma_v7_0_sw_init(void *handle)
return r;
}
+ /* Allocate memory for SDMA IP Dump buffer */
+ ptr = kcalloc(adev->sdma.num_instances * reg_count, sizeof(uint32_t), GFP_KERNEL);
+ if (ptr)
+ adev->sdma.ip_dump = ptr;
+ else
+ DRM_ERROR("Failed to allocated memory for SDMA IP Dump\n");
+
return r;
}
@@ -1263,6 +1333,8 @@ static int sdma_v7_0_sw_fini(void *handle)
if (adev->firmware.load_type == AMDGPU_FW_LOAD_DIRECT)
sdma_v12_0_free_ucode_buffer(adev);
+ kfree(adev->sdma.ip_dump);
+
return 0;
}
@@ -1466,6 +1538,48 @@ static void sdma_v7_0_get_clockgating_state(void *handle, u64 *flags)
{
}
+static void sdma_v7_0_print_ip_state(void *handle, struct drm_printer *p)
+{
+ struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ int i, j;
+ uint32_t reg_count = ARRAY_SIZE(sdma_reg_list_7_0);
+ uint32_t instance_offset;
+
+ if (!adev->sdma.ip_dump)
+ return;
+
+ drm_printf(p, "num_instances:%d\n", adev->sdma.num_instances);
+ for (i = 0; i < adev->sdma.num_instances; i++) {
+ instance_offset = i * reg_count;
+ drm_printf(p, "\nInstance:%d\n", i);
+
+ for (j = 0; j < reg_count; j++)
+ drm_printf(p, "%-50s \t 0x%08x\n", sdma_reg_list_7_0[j].reg_name,
+ adev->sdma.ip_dump[instance_offset + j]);
+ }
+}
+
+static void sdma_v7_0_dump_ip_state(void *handle)
+{
+ struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ int i, j;
+ uint32_t instance_offset;
+ uint32_t reg_count = ARRAY_SIZE(sdma_reg_list_7_0);
+
+ if (!adev->sdma.ip_dump)
+ return;
+
+ amdgpu_gfx_off_ctrl(adev, false);
+ for (i = 0; i < adev->sdma.num_instances; i++) {
+ instance_offset = i * reg_count;
+ for (j = 0; j < reg_count; j++)
+ adev->sdma.ip_dump[instance_offset + j] =
+ RREG32(sdma_v7_0_get_reg_offset(adev, i,
+ sdma_reg_list_7_0[j].reg_offset));
+ }
+ amdgpu_gfx_off_ctrl(adev, true);
+}
+
const struct amd_ip_funcs sdma_v7_0_ip_funcs = {
.name = "sdma_v7_0",
.early_init = sdma_v7_0_early_init,
@@ -1483,6 +1597,8 @@ const struct amd_ip_funcs sdma_v7_0_ip_funcs = {
.set_clockgating_state = sdma_v7_0_set_clockgating_state,
.set_powergating_state = sdma_v7_0_set_powergating_state,
.get_clockgating_state = sdma_v7_0_get_clockgating_state,
+ .dump_ip_state = sdma_v7_0_dump_ip_state,
+ .print_ip_state = sdma_v7_0_print_ip_state,
};
static const struct amdgpu_ring_funcs sdma_v7_0_ring_funcs = {
@@ -1631,7 +1747,7 @@ static void sdma_v7_0_set_buffer_funcs(struct amdgpu_device *adev)
}
static const struct amdgpu_vm_pte_funcs sdma_v7_0_vm_pte_funcs = {
- .copy_pte_num_dw = 7,
+ .copy_pte_num_dw = 8,
.copy_pte = sdma_v7_0_vm_copy_pte,
.write_pte = sdma_v7_0_vm_write_pte,
.set_pte_pde = sdma_v7_0_vm_set_pte_pde,
diff --git a/drivers/gpu/drm/amd/amdgpu/smuio_v9_0.c b/drivers/gpu/drm/amd/amdgpu/smuio_v9_0.c
index e4e30b9d481b..c04fdd2d5b38 100644
--- a/drivers/gpu/drm/amd/amdgpu/smuio_v9_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/smuio_v9_0.c
@@ -60,7 +60,7 @@ static void smuio_v9_0_get_clock_gating_state(struct amdgpu_device *adev, u64 *f
{
u32 data;
- /* CGTT_ROM_CLK_CTRL0 is not availabe for APUs */
+ /* CGTT_ROM_CLK_CTRL0 is not available for APUs */
if (adev->flags & AMD_IS_APU)
return;
diff --git a/drivers/gpu/drm/amd/amdgpu/soc15.h b/drivers/gpu/drm/amd/amdgpu/soc15.h
index 282584a48be0..ef7c603b50ae 100644
--- a/drivers/gpu/drm/amd/amdgpu/soc15.h
+++ b/drivers/gpu/drm/amd/amdgpu/soc15.h
@@ -93,6 +93,10 @@ struct soc15_ras_field_entry {
#define SOC15_REG_ENTRY_OFFSET(entry) (adev->reg_offset[entry.hwip][entry.inst][entry.seg] + entry.reg_offset)
+/* Over ride the instance id */
+#define SOC15_REG_ENTRY_OFFSET_INST(entry, inst) \
+ (adev->reg_offset[entry.hwip][inst][entry.seg] + entry.reg_offset)
+
#define SOC15_REG_GOLDEN_VALUE(ip, inst, reg, and_mask, or_mask) \
{ ip##_HWIP, inst, reg##_BASE_IDX, reg, and_mask, or_mask }
diff --git a/drivers/gpu/drm/amd/amdgpu/soc15d.h b/drivers/gpu/drm/amd/amdgpu/soc15d.h
index e74e1983da53..b9cbeb389edc 100644
--- a/drivers/gpu/drm/amd/amdgpu/soc15d.h
+++ b/drivers/gpu/drm/amd/amdgpu/soc15d.h
@@ -413,6 +413,10 @@
# define PACKET3_QUERY_STATUS_DOORBELL_OFFSET(x) ((x) << 2)
# define PACKET3_QUERY_STATUS_ENG_SEL(x) ((x) << 25)
+#define PACKET3_RUN_CLEANER_SHADER 0xD2
+/* 1. header
+ * 2. RESERVED [31:0]
+ */
#define VCE_CMD_NO_OP 0x00000000
#define VCE_CMD_END 0x00000001
diff --git a/drivers/gpu/drm/amd/amdgpu/soc24.c b/drivers/gpu/drm/amd/amdgpu/soc24.c
index b0c3678cfb31..fd4c3d4f8387 100644
--- a/drivers/gpu/drm/amd/amdgpu/soc24.c
+++ b/drivers/gpu/drm/amd/amdgpu/soc24.c
@@ -250,13 +250,6 @@ static void soc24_program_aspm(struct amdgpu_device *adev)
adev->nbio.funcs->program_aspm(adev);
}
-static void soc24_enable_doorbell_aperture(struct amdgpu_device *adev,
- bool enable)
-{
- adev->nbio.funcs->enable_doorbell_aperture(adev, enable);
- adev->nbio.funcs->enable_doorbell_selfring_aperture(adev, enable);
-}
-
const struct amdgpu_ip_block_version soc24_common_ip_block = {
.type = AMD_IP_BLOCK_TYPE_COMMON,
.major = 1,
@@ -454,6 +447,11 @@ static int soc24_common_late_init(void *handle)
if (amdgpu_sriov_vf(adev))
xgpu_nv_mailbox_get_irq(adev);
+ /* Enable selfring doorbell aperture late because doorbell BAR
+ * aperture will change if resize BAR successfully in gmc sw_init.
+ */
+ adev->nbio.funcs->enable_doorbell_selfring_aperture(adev, true);
+
return 0;
}
@@ -491,7 +489,7 @@ static int soc24_common_hw_init(void *handle)
adev->df.funcs->hw_init(adev);
/* enable the doorbell aperture */
- soc24_enable_doorbell_aperture(adev, true);
+ adev->nbio.funcs->enable_doorbell_aperture(adev, true);
return 0;
}
@@ -500,8 +498,13 @@ static int soc24_common_hw_fini(void *handle)
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
- /* disable the doorbell aperture */
- soc24_enable_doorbell_aperture(adev, false);
+ /* Disable the doorbell aperture and selfring doorbell aperture
+ * separately in hw_fini because soc21_enable_doorbell_aperture
+ * has been removed and there is no need to delay disabling
+ * selfring doorbell.
+ */
+ adev->nbio.funcs->enable_doorbell_aperture(adev, false);
+ adev->nbio.funcs->enable_doorbell_selfring_aperture(adev, false);
if (amdgpu_sriov_vf(adev))
xgpu_nv_mailbox_put_irq(adev);
diff --git a/drivers/gpu/drm/amd/amdgpu/umc_v12_0.c b/drivers/gpu/drm/amd/amdgpu/umc_v12_0.c
index 9dbb13adb661..1a8ea834efa6 100644
--- a/drivers/gpu/drm/amd/amdgpu/umc_v12_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/umc_v12_0.c
@@ -157,9 +157,9 @@ static int umc_v12_0_query_error_count(struct amdgpu_device *adev,
umc_v12_0_query_error_count_per_type(adev, umc_reg_offset,
&de_count, umc_v12_0_is_deferred_error);
- amdgpu_ras_error_statistic_ue_count(err_data, &mcm_info, NULL, ue_count);
- amdgpu_ras_error_statistic_ce_count(err_data, &mcm_info, NULL, ce_count);
- amdgpu_ras_error_statistic_de_count(err_data, &mcm_info, NULL, de_count);
+ amdgpu_ras_error_statistic_ue_count(err_data, &mcm_info, ue_count);
+ amdgpu_ras_error_statistic_ce_count(err_data, &mcm_info, ce_count);
+ amdgpu_ras_error_statistic_de_count(err_data, &mcm_info, de_count);
return 0;
}
@@ -225,26 +225,16 @@ static void umc_v12_0_convert_error_address(struct amdgpu_device *adev,
}
}
-static int umc_v12_0_convert_err_addr(struct amdgpu_device *adev,
- struct ta_ras_query_address_input *addr_in,
- uint64_t *pfns, int len)
+static void umc_v12_0_dump_addr_info(struct amdgpu_device *adev,
+ struct ta_ras_query_address_output *addr_out,
+ uint64_t err_addr)
{
uint32_t col, row, row_xor, bank, channel_index;
- uint64_t soc_pa, retired_page, column, err_addr;
- struct ta_ras_query_address_output addr_out;
- uint32_t pos = 0;
-
- err_addr = addr_in->ma.err_addr;
- addr_in->addr_type = TA_RAS_MCA_TO_PA;
- if (psp_ras_query_address(&adev->psp, addr_in, &addr_out)) {
- dev_warn(adev->dev, "Failed to query RAS physical address for 0x%llx",
- err_addr);
- return 0;
- }
+ uint64_t soc_pa, retired_page, column;
- soc_pa = addr_out.pa.pa;
- bank = addr_out.pa.bank;
- channel_index = addr_out.pa.channel_idx;
+ soc_pa = addr_out->pa.pa;
+ bank = addr_out->pa.bank;
+ channel_index = addr_out->pa.channel_idx;
col = (err_addr >> 1) & 0x1fULL;
row = (err_addr >> 10) & 0x3fffULL;
@@ -258,11 +248,6 @@ static int umc_v12_0_convert_err_addr(struct amdgpu_device *adev,
for (column = 0; column < UMC_V12_0_NA_MAP_PA_NUM; column++) {
retired_page = soc_pa | ((column & 0x3) << UMC_V12_0_PA_C2_BIT);
retired_page |= (((column & 0x4) >> 2) << UMC_V12_0_PA_C4_BIT);
-
- if (pos >= len)
- return 0;
- pfns[pos++] = retired_page >> AMDGPU_GPU_PAGE_SHIFT;
-
/* include column bit 0 and 1 */
col &= 0x3;
col |= (column << 2);
@@ -272,19 +257,74 @@ static int umc_v12_0_convert_err_addr(struct amdgpu_device *adev,
/* shift R13 bit */
retired_page ^= (0x1ULL << UMC_V12_0_PA_R13_BIT);
+ dev_info(adev->dev,
+ "Error Address(PA):0x%-10llx Row:0x%-4x Col:0x%-2x Bank:0x%x Channel:0x%x\n",
+ retired_page, row_xor, col, bank, channel_index);
+ }
+}
+
+static int umc_v12_0_lookup_bad_pages_in_a_row(struct amdgpu_device *adev,
+ uint64_t pa_addr, uint64_t *pfns, int len)
+{
+ uint64_t soc_pa, retired_page, column;
+ uint32_t pos = 0;
+
+ soc_pa = pa_addr;
+ /* clear [C3 C2] in soc physical address */
+ soc_pa &= ~(0x3ULL << UMC_V12_0_PA_C2_BIT);
+ /* clear [C4] in soc physical address */
+ soc_pa &= ~(0x1ULL << UMC_V12_0_PA_C4_BIT);
+
+ /* loop for all possibilities of [C4 C3 C2] */
+ for (column = 0; column < UMC_V12_0_NA_MAP_PA_NUM; column++) {
+ retired_page = soc_pa | ((column & 0x3) << UMC_V12_0_PA_C2_BIT);
+ retired_page |= (((column & 0x4) >> 2) << UMC_V12_0_PA_C4_BIT);
+
+ if (pos >= len)
+ return 0;
+ pfns[pos++] = retired_page >> AMDGPU_GPU_PAGE_SHIFT;
+
+ /* shift R13 bit */
+ retired_page ^= (0x1ULL << UMC_V12_0_PA_R13_BIT);
if (pos >= len)
return 0;
pfns[pos++] = retired_page >> AMDGPU_GPU_PAGE_SHIFT;
- dev_info(adev->dev,
- "Error Address(PA):0x%-10llx Row:0x%-4x Col:0x%-2x Bank:0x%x Channel:0x%x\n",
- retired_page, row_xor, col, bank, channel_index);
}
return pos;
}
+static int umc_v12_0_convert_mca_to_addr(struct amdgpu_device *adev,
+ uint64_t err_addr, uint32_t ch, uint32_t umc,
+ uint32_t node, uint32_t socket,
+ uint64_t *addr, bool dump_addr)
+{
+ struct ta_ras_query_address_input addr_in;
+ struct ta_ras_query_address_output addr_out;
+
+ memset(&addr_in, 0, sizeof(addr_in));
+ addr_in.ma.err_addr = err_addr;
+ addr_in.ma.ch_inst = ch;
+ addr_in.ma.umc_inst = umc;
+ addr_in.ma.node_inst = node;
+ addr_in.ma.socket_id = socket;
+ addr_in.addr_type = TA_RAS_MCA_TO_PA;
+ if (psp_ras_query_address(&adev->psp, &addr_in, &addr_out)) {
+ dev_warn(adev->dev, "Failed to query RAS physical address for 0x%llx",
+ err_addr);
+ return -EINVAL;
+ }
+
+ if (dump_addr)
+ umc_v12_0_dump_addr_info(adev, &addr_out, err_addr);
+
+ *addr = addr_out.pa.pa;
+
+ return 0;
+}
+
static int umc_v12_0_query_error_address(struct amdgpu_device *adev,
uint32_t node_inst, uint32_t umc_inst,
uint32_t ch_inst, void *data)
@@ -483,12 +523,10 @@ static int umc_v12_0_update_ecc_status(struct amdgpu_device *adev,
{
struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
uint16_t hwid, mcatype;
- struct ta_ras_query_address_input addr_in;
uint64_t page_pfn[UMC_V12_0_BAD_PAGE_NUM_PER_CHANNEL];
- uint64_t err_addr, hash_val = 0;
+ uint64_t err_addr, pa_addr = 0;
struct ras_ecc_err *ecc_err;
- int count;
- int ret;
+ int count, ret, i;
hwid = REG_GET_FIELD(ipid, MCMP1_IPIDT0, HardwareID);
mcatype = REG_GET_FIELD(ipid, MCMP1_IPIDT0, McaType);
@@ -514,46 +552,25 @@ static int umc_v12_0_update_ecc_status(struct amdgpu_device *adev,
MCA_IPID_2_UMC_CH(ipid),
err_addr);
- memset(page_pfn, 0, sizeof(page_pfn));
-
- memset(&addr_in, 0, sizeof(addr_in));
- addr_in.ma.err_addr = err_addr;
- addr_in.ma.ch_inst = MCA_IPID_2_UMC_CH(ipid);
- addr_in.ma.umc_inst = MCA_IPID_2_UMC_INST(ipid);
- addr_in.ma.node_inst = MCA_IPID_2_DIE_ID(ipid);
- addr_in.ma.socket_id = MCA_IPID_2_SOCKET_ID(ipid);
-
- count = umc_v12_0_convert_err_addr(adev,
- &addr_in, page_pfn, ARRAY_SIZE(page_pfn));
- if (count <= 0) {
- dev_warn(adev->dev, "Fail to convert error address! count:%d\n", count);
- return 0;
- }
-
- ret = amdgpu_umc_build_pages_hash(adev,
- page_pfn, count, &hash_val);
- if (ret) {
- dev_err(adev->dev, "Fail to build error pages hash\n");
+ ret = umc_v12_0_convert_mca_to_addr(adev,
+ err_addr, MCA_IPID_2_UMC_CH(ipid),
+ MCA_IPID_2_UMC_INST(ipid), MCA_IPID_2_DIE_ID(ipid),
+ MCA_IPID_2_SOCKET_ID(ipid), &pa_addr, true);
+ if (ret)
return ret;
- }
ecc_err = kzalloc(sizeof(*ecc_err), GFP_KERNEL);
if (!ecc_err)
return -ENOMEM;
- ecc_err->err_pages.pfn = kcalloc(count, sizeof(*ecc_err->err_pages.pfn), GFP_KERNEL);
- if (!ecc_err->err_pages.pfn) {
- kfree(ecc_err);
- return -ENOMEM;
- }
-
- memcpy(ecc_err->err_pages.pfn, page_pfn, count * sizeof(*ecc_err->err_pages.pfn));
- ecc_err->err_pages.count = count;
-
- ecc_err->hash_index = hash_val;
ecc_err->status = status;
ecc_err->ipid = ipid;
ecc_err->addr = addr;
+ ecc_err->pa_pfn = UMC_V12_ADDR_MASK_BAD_COLS(pa_addr) >> AMDGPU_GPU_PAGE_SHIFT;
+
+ /* If converted pa_pfn is 0, use pa C4 pfn. */
+ if (!ecc_err->pa_pfn)
+ ecc_err->pa_pfn = BIT_ULL(UMC_V12_0_PA_C4_BIT) >> AMDGPU_GPU_PAGE_SHIFT;
ret = amdgpu_umc_logs_ecc_err(adev, &con->umc_ecc_log.de_page_tree, ecc_err);
if (ret) {
@@ -562,13 +579,25 @@ static int umc_v12_0_update_ecc_status(struct amdgpu_device *adev,
else
dev_err(adev->dev, "Fail to log ecc error! ret:%d\n", ret);
- kfree(ecc_err->err_pages.pfn);
kfree(ecc_err);
return ret;
}
con->umc_ecc_log.de_queried_count++;
+ memset(page_pfn, 0, sizeof(page_pfn));
+ count = umc_v12_0_lookup_bad_pages_in_a_row(adev,
+ pa_addr,
+ page_pfn, ARRAY_SIZE(page_pfn));
+ if (count <= 0) {
+ dev_warn(adev->dev, "Fail to convert error address! count:%d\n", count);
+ return 0;
+ }
+
+ /* Reserve memory */
+ for (i = 0; i < count; i++)
+ amdgpu_ras_reserve_page(adev, page_pfn[i]);
+
/* The problem case is as follows:
* 1. GPU A triggers a gpu ras reset, and GPU A drives
* GPU B to also perform a gpu ras reset.
@@ -593,16 +622,21 @@ static int umc_v12_0_fill_error_record(struct amdgpu_device *adev,
struct ras_ecc_err *ecc_err, void *ras_error_status)
{
struct ras_err_data *err_data = (struct ras_err_data *)ras_error_status;
- uint32_t i = 0;
- int ret = 0;
+ uint64_t page_pfn[UMC_V12_0_BAD_PAGE_NUM_PER_CHANNEL];
+ int ret, i, count;
if (!err_data || !ecc_err)
return -EINVAL;
- for (i = 0; i < ecc_err->err_pages.count; i++) {
+ memset(page_pfn, 0, sizeof(page_pfn));
+ count = umc_v12_0_lookup_bad_pages_in_a_row(adev,
+ ecc_err->pa_pfn << AMDGPU_GPU_PAGE_SHIFT,
+ page_pfn, ARRAY_SIZE(page_pfn));
+
+ for (i = 0; i < count; i++) {
ret = amdgpu_umc_fill_error_record(err_data,
ecc_err->addr,
- ecc_err->err_pages.pfn[i] << AMDGPU_GPU_PAGE_SHIFT,
+ page_pfn[i] << AMDGPU_GPU_PAGE_SHIFT,
MCA_IPID_2_UMC_CH(ecc_err->ipid),
MCA_IPID_2_UMC_INST(ecc_err->ipid));
if (ret)
@@ -636,7 +670,8 @@ static void umc_v12_0_query_ras_ecc_err_addr(struct amdgpu_device *adev,
dev_err(adev->dev, "Fail to fill umc error record, ret:%d\n", ret);
break;
}
- radix_tree_tag_clear(ecc_tree, entries[i]->hash_index, UMC_ECC_NEW_DETECTED_TAG);
+ radix_tree_tag_clear(ecc_tree,
+ entries[i]->pa_pfn, UMC_ECC_NEW_DETECTED_TAG);
}
mutex_unlock(&con->umc_ecc_log.lock);
}
diff --git a/drivers/gpu/drm/amd/amdgpu/umc_v12_0.h b/drivers/gpu/drm/amd/amdgpu/umc_v12_0.h
index b4974793850b..be5598d76c1d 100644
--- a/drivers/gpu/drm/amd/amdgpu/umc_v12_0.h
+++ b/drivers/gpu/drm/amd/amdgpu/umc_v12_0.h
@@ -81,6 +81,11 @@
(((REG_GET_FIELD(ipid, MCMP1_IPIDT0, InstanceIdLo) & 0x1) << 2) | \
(REG_GET_FIELD(ipid, MCMP1_IPIDT0, InstanceIdHi) & 0x03))
+#define UMC_V12_ADDR_MASK_BAD_COLS(addr) \
+ ((addr) & ~((0x3ULL << UMC_V12_0_PA_C2_BIT) | \
+ (0x1ULL << UMC_V12_0_PA_C4_BIT) | \
+ (0x1ULL << UMC_V12_0_PA_R13_BIT)))
+
bool umc_v12_0_is_deferred_error(struct amdgpu_device *adev, uint64_t mc_umc_status);
bool umc_v12_0_is_uncorrectable_error(struct amdgpu_device *adev, uint64_t mc_umc_status);
bool umc_v12_0_is_correctable_error(struct amdgpu_device *adev, uint64_t mc_umc_status);
diff --git a/drivers/gpu/drm/amd/amdgpu/vce_v3_0.c b/drivers/gpu/drm/amd/amdgpu/vce_v3_0.c
index 32517c364cf7..4bfba2931b08 100644
--- a/drivers/gpu/drm/amd/amdgpu/vce_v3_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/vce_v3_0.c
@@ -950,7 +950,7 @@ static const struct amdgpu_ring_funcs vce_v3_0_ring_vm_funcs = {
.get_rptr = vce_v3_0_ring_get_rptr,
.get_wptr = vce_v3_0_ring_get_wptr,
.set_wptr = vce_v3_0_ring_set_wptr,
- .parse_cs = amdgpu_vce_ring_parse_cs_vm,
+ .patch_cs_in_place = amdgpu_vce_ring_parse_cs_vm,
.emit_frame_size =
6 + /* vce_v3_0_emit_vm_flush */
4 + /* vce_v3_0_emit_pipeline_sync */
diff --git a/drivers/gpu/drm/amd/amdgpu/vce_v4_0.c b/drivers/gpu/drm/amd/amdgpu/vce_v4_0.c
index 06d787385ad4..0748bf44c880 100644
--- a/drivers/gpu/drm/amd/amdgpu/vce_v4_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/vce_v4_0.c
@@ -1102,7 +1102,7 @@ static const struct amdgpu_ring_funcs vce_v4_0_ring_vm_funcs = {
.get_rptr = vce_v4_0_ring_get_rptr,
.get_wptr = vce_v4_0_ring_get_wptr,
.set_wptr = vce_v4_0_ring_set_wptr,
- .parse_cs = amdgpu_vce_ring_parse_cs_vm,
+ .patch_cs_in_place = amdgpu_vce_ring_parse_cs_vm,
.emit_frame_size =
SOC15_FLUSH_GPU_TLB_NUM_WREG * 3 +
SOC15_FLUSH_GPU_TLB_NUM_REG_WAIT * 4 +
diff --git a/drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c b/drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c
index a280b9fecb77..ecdfbfefd66a 100644
--- a/drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c
@@ -45,6 +45,42 @@
#define mmUVD_REG_XX_MASK_1_0 0x05ac
#define mmUVD_REG_XX_MASK_1_0_BASE_IDX 1
+static const struct amdgpu_hwip_reg_entry vcn_reg_list_1_0[] = {
+ SOC15_REG_ENTRY_STR(VCN, 0, mmUVD_POWER_STATUS),
+ SOC15_REG_ENTRY_STR(VCN, 0, mmUVD_STATUS),
+ SOC15_REG_ENTRY_STR(VCN, 0, mmUVD_CONTEXT_ID),
+ SOC15_REG_ENTRY_STR(VCN, 0, mmUVD_CONTEXT_ID2),
+ SOC15_REG_ENTRY_STR(VCN, 0, mmUVD_GPCOM_VCPU_DATA0),
+ SOC15_REG_ENTRY_STR(VCN, 0, mmUVD_GPCOM_VCPU_DATA1),
+ SOC15_REG_ENTRY_STR(VCN, 0, mmUVD_GPCOM_VCPU_CMD),
+ SOC15_REG_ENTRY_STR(VCN, 0, mmUVD_RB_BASE_HI),
+ SOC15_REG_ENTRY_STR(VCN, 0, mmUVD_RB_BASE_LO),
+ SOC15_REG_ENTRY_STR(VCN, 0, mmUVD_RB_BASE_HI2),
+ SOC15_REG_ENTRY_STR(VCN, 0, mmUVD_RB_BASE_LO2),
+ SOC15_REG_ENTRY_STR(VCN, 0, mmUVD_RB_BASE_HI3),
+ SOC15_REG_ENTRY_STR(VCN, 0, mmUVD_RB_BASE_LO3),
+ SOC15_REG_ENTRY_STR(VCN, 0, mmUVD_RB_BASE_HI4),
+ SOC15_REG_ENTRY_STR(VCN, 0, mmUVD_RB_BASE_LO4),
+ SOC15_REG_ENTRY_STR(VCN, 0, mmUVD_RB_RPTR),
+ SOC15_REG_ENTRY_STR(VCN, 0, mmUVD_RB_WPTR),
+ SOC15_REG_ENTRY_STR(VCN, 0, mmUVD_RB_RPTR2),
+ SOC15_REG_ENTRY_STR(VCN, 0, mmUVD_RB_WPTR2),
+ SOC15_REG_ENTRY_STR(VCN, 0, mmUVD_RB_RPTR3),
+ SOC15_REG_ENTRY_STR(VCN, 0, mmUVD_RB_WPTR3),
+ SOC15_REG_ENTRY_STR(VCN, 0, mmUVD_RB_RPTR4),
+ SOC15_REG_ENTRY_STR(VCN, 0, mmUVD_RB_WPTR4),
+ SOC15_REG_ENTRY_STR(VCN, 0, mmUVD_RB_SIZE),
+ SOC15_REG_ENTRY_STR(VCN, 0, mmUVD_RB_SIZE2),
+ SOC15_REG_ENTRY_STR(VCN, 0, mmUVD_RB_SIZE3),
+ SOC15_REG_ENTRY_STR(VCN, 0, mmUVD_RB_SIZE4),
+ SOC15_REG_ENTRY_STR(VCN, 0, mmUVD_PGFSM_CONFIG),
+ SOC15_REG_ENTRY_STR(VCN, 0, mmUVD_PGFSM_STATUS),
+ SOC15_REG_ENTRY_STR(VCN, 0, mmUVD_DPG_LMA_CTL),
+ SOC15_REG_ENTRY_STR(VCN, 0, mmUVD_DPG_LMA_DATA),
+ SOC15_REG_ENTRY_STR(VCN, 0, mmUVD_DPG_LMA_MASK),
+ SOC15_REG_ENTRY_STR(VCN, 0, mmUVD_DPG_PAUSE)
+};
+
static int vcn_v1_0_stop(struct amdgpu_device *adev);
static void vcn_v1_0_set_dec_ring_funcs(struct amdgpu_device *adev);
static void vcn_v1_0_set_enc_ring_funcs(struct amdgpu_device *adev);
@@ -90,6 +126,8 @@ static int vcn_v1_0_sw_init(void *handle)
{
struct amdgpu_ring *ring;
int i, r;
+ uint32_t reg_count = ARRAY_SIZE(vcn_reg_list_1_0);
+ uint32_t *ptr;
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
/* VCN DEC TRAP */
@@ -161,6 +199,14 @@ static int vcn_v1_0_sw_init(void *handle)
r = jpeg_v1_0_sw_init(handle);
+ /* Allocate memory for VCN IP Dump buffer */
+ ptr = kcalloc(adev->vcn.num_vcn_inst * reg_count, sizeof(uint32_t), GFP_KERNEL);
+ if (!ptr) {
+ DRM_ERROR("Failed to allocate memory for VCN IP Dump\n");
+ adev->vcn.ip_dump = NULL;
+ } else {
+ adev->vcn.ip_dump = ptr;
+ }
return r;
}
@@ -184,6 +230,8 @@ static int vcn_v1_0_sw_fini(void *handle)
r = amdgpu_vcn_sw_fini(adev);
+ kfree(adev->vcn.ip_dump);
+
return r;
}
@@ -1877,6 +1925,66 @@ void vcn_v1_0_ring_end_use(struct amdgpu_ring *ring)
mutex_unlock(&ring->adev->vcn.vcn1_jpeg1_workaround);
}
+static void vcn_v1_0_print_ip_state(void *handle, struct drm_printer *p)
+{
+ struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ int i, j;
+ uint32_t reg_count = ARRAY_SIZE(vcn_reg_list_1_0);
+ uint32_t inst_off, is_powered;
+
+ if (!adev->vcn.ip_dump)
+ return;
+
+ drm_printf(p, "num_instances:%d\n", adev->vcn.num_vcn_inst);
+ for (i = 0; i < adev->vcn.num_vcn_inst; i++) {
+ if (adev->vcn.harvest_config & (1 << i)) {
+ drm_printf(p, "\nHarvested Instance:VCN%d Skipping dump\n", i);
+ continue;
+ }
+
+ inst_off = i * reg_count;
+ is_powered = (adev->vcn.ip_dump[inst_off] &
+ UVD_POWER_STATUS__UVD_POWER_STATUS_MASK) != 1;
+
+ if (is_powered) {
+ drm_printf(p, "\nActive Instance:VCN%d\n", i);
+ for (j = 0; j < reg_count; j++)
+ drm_printf(p, "%-50s \t 0x%08x\n", vcn_reg_list_1_0[j].reg_name,
+ adev->vcn.ip_dump[inst_off + j]);
+ } else {
+ drm_printf(p, "\nInactive Instance:VCN%d\n", i);
+ }
+ }
+}
+
+static void vcn_v1_0_dump_ip_state(void *handle)
+{
+ struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ int i, j;
+ bool is_powered;
+ uint32_t inst_off;
+ uint32_t reg_count = ARRAY_SIZE(vcn_reg_list_1_0);
+
+ if (!adev->vcn.ip_dump)
+ return;
+
+ for (i = 0; i < adev->vcn.num_vcn_inst; i++) {
+ if (adev->vcn.harvest_config & (1 << i))
+ continue;
+
+ inst_off = i * reg_count;
+ /* mmUVD_POWER_STATUS is always readable and is first element of the array */
+ adev->vcn.ip_dump[inst_off] = RREG32_SOC15(VCN, i, mmUVD_POWER_STATUS);
+ is_powered = (adev->vcn.ip_dump[inst_off] &
+ UVD_POWER_STATUS__UVD_POWER_STATUS_MASK) != 1;
+
+ if (is_powered)
+ for (j = 1; j < reg_count; j++)
+ adev->vcn.ip_dump[inst_off + j] =
+ RREG32(SOC15_REG_ENTRY_OFFSET_INST(vcn_reg_list_1_0[j], i));
+ }
+}
+
static const struct amd_ip_funcs vcn_v1_0_ip_funcs = {
.name = "vcn_v1_0",
.early_init = vcn_v1_0_early_init,
@@ -1895,8 +2003,8 @@ static const struct amd_ip_funcs vcn_v1_0_ip_funcs = {
.post_soft_reset = NULL /* vcn_v1_0_post_soft_reset */,
.set_clockgating_state = vcn_v1_0_set_clockgating_state,
.set_powergating_state = vcn_v1_0_set_powergating_state,
- .dump_ip_state = NULL,
- .print_ip_state = NULL,
+ .dump_ip_state = vcn_v1_0_dump_ip_state,
+ .print_ip_state = vcn_v1_0_print_ip_state,
};
/*
diff --git a/drivers/gpu/drm/amd/amdgpu/vcn_v2_0.c b/drivers/gpu/drm/amd/amdgpu/vcn_v2_0.c
index d3d096909a7f..bfd067e2d2f1 100644
--- a/drivers/gpu/drm/amd/amdgpu/vcn_v2_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/vcn_v2_0.c
@@ -53,6 +53,42 @@
#define mmUVD_LMI_RBC_IB_64BIT_BAR_LOW_INTERNAL_OFFSET 0x5a7
#define mmUVD_RBC_IB_SIZE_INTERNAL_OFFSET 0x1e2
+static const struct amdgpu_hwip_reg_entry vcn_reg_list_2_0[] = {
+ SOC15_REG_ENTRY_STR(VCN, 0, mmUVD_POWER_STATUS),
+ SOC15_REG_ENTRY_STR(VCN, 0, mmUVD_STATUS),
+ SOC15_REG_ENTRY_STR(VCN, 0, mmUVD_CONTEXT_ID),
+ SOC15_REG_ENTRY_STR(VCN, 0, mmUVD_CONTEXT_ID2),
+ SOC15_REG_ENTRY_STR(VCN, 0, mmUVD_GPCOM_VCPU_DATA0),
+ SOC15_REG_ENTRY_STR(VCN, 0, mmUVD_GPCOM_VCPU_DATA1),
+ SOC15_REG_ENTRY_STR(VCN, 0, mmUVD_GPCOM_VCPU_CMD),
+ SOC15_REG_ENTRY_STR(VCN, 0, mmUVD_RB_BASE_HI),
+ SOC15_REG_ENTRY_STR(VCN, 0, mmUVD_RB_BASE_LO),
+ SOC15_REG_ENTRY_STR(VCN, 0, mmUVD_RB_BASE_HI2),
+ SOC15_REG_ENTRY_STR(VCN, 0, mmUVD_RB_BASE_LO2),
+ SOC15_REG_ENTRY_STR(VCN, 0, mmUVD_RB_BASE_HI3),
+ SOC15_REG_ENTRY_STR(VCN, 0, mmUVD_RB_BASE_LO3),
+ SOC15_REG_ENTRY_STR(VCN, 0, mmUVD_RB_BASE_HI4),
+ SOC15_REG_ENTRY_STR(VCN, 0, mmUVD_RB_BASE_LO4),
+ SOC15_REG_ENTRY_STR(VCN, 0, mmUVD_RB_RPTR),
+ SOC15_REG_ENTRY_STR(VCN, 0, mmUVD_RB_WPTR),
+ SOC15_REG_ENTRY_STR(VCN, 0, mmUVD_RB_RPTR2),
+ SOC15_REG_ENTRY_STR(VCN, 0, mmUVD_RB_WPTR2),
+ SOC15_REG_ENTRY_STR(VCN, 0, mmUVD_RB_RPTR3),
+ SOC15_REG_ENTRY_STR(VCN, 0, mmUVD_RB_WPTR3),
+ SOC15_REG_ENTRY_STR(VCN, 0, mmUVD_RB_RPTR4),
+ SOC15_REG_ENTRY_STR(VCN, 0, mmUVD_RB_WPTR4),
+ SOC15_REG_ENTRY_STR(VCN, 0, mmUVD_RB_SIZE),
+ SOC15_REG_ENTRY_STR(VCN, 0, mmUVD_RB_SIZE2),
+ SOC15_REG_ENTRY_STR(VCN, 0, mmUVD_RB_SIZE3),
+ SOC15_REG_ENTRY_STR(VCN, 0, mmUVD_RB_SIZE4),
+ SOC15_REG_ENTRY_STR(VCN, 0, mmUVD_PGFSM_CONFIG),
+ SOC15_REG_ENTRY_STR(VCN, 0, mmUVD_PGFSM_STATUS),
+ SOC15_REG_ENTRY_STR(VCN, 0, mmUVD_DPG_LMA_CTL),
+ SOC15_REG_ENTRY_STR(VCN, 0, mmUVD_DPG_LMA_DATA),
+ SOC15_REG_ENTRY_STR(VCN, 0, mmUVD_DPG_LMA_MASK),
+ SOC15_REG_ENTRY_STR(VCN, 0, mmUVD_DPG_PAUSE)
+};
+
static void vcn_v2_0_set_dec_ring_funcs(struct amdgpu_device *adev);
static void vcn_v2_0_set_enc_ring_funcs(struct amdgpu_device *adev);
static void vcn_v2_0_set_irq_funcs(struct amdgpu_device *adev);
@@ -96,6 +132,8 @@ static int vcn_v2_0_sw_init(void *handle)
{
struct amdgpu_ring *ring;
int i, r;
+ uint32_t reg_count = ARRAY_SIZE(vcn_reg_list_2_0);
+ uint32_t *ptr;
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
volatile struct amdgpu_fw_shared *fw_shared;
@@ -184,6 +222,15 @@ static int vcn_v2_0_sw_init(void *handle)
if (amdgpu_vcnfw_log)
amdgpu_vcn_fwlog_init(adev->vcn.inst);
+ /* Allocate memory for VCN IP Dump buffer */
+ ptr = kcalloc(adev->vcn.num_vcn_inst * reg_count, sizeof(uint32_t), GFP_KERNEL);
+ if (!ptr) {
+ DRM_ERROR("Failed to allocate memory for VCN IP Dump\n");
+ adev->vcn.ip_dump = NULL;
+ } else {
+ adev->vcn.ip_dump = ptr;
+ }
+
return 0;
}
@@ -213,6 +260,8 @@ static int vcn_v2_0_sw_fini(void *handle)
r = amdgpu_vcn_sw_fini(adev);
+ kfree(adev->vcn.ip_dump);
+
return r;
}
@@ -1985,6 +2034,66 @@ static int vcn_v2_0_start_sriov(struct amdgpu_device *adev)
return vcn_v2_0_start_mmsch(adev, &adev->virt.mm_table);
}
+static void vcn_v2_0_print_ip_state(void *handle, struct drm_printer *p)
+{
+ struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ int i, j;
+ uint32_t reg_count = ARRAY_SIZE(vcn_reg_list_2_0);
+ uint32_t inst_off, is_powered;
+
+ if (!adev->vcn.ip_dump)
+ return;
+
+ drm_printf(p, "num_instances:%d\n", adev->vcn.num_vcn_inst);
+ for (i = 0; i < adev->vcn.num_vcn_inst; i++) {
+ if (adev->vcn.harvest_config & (1 << i)) {
+ drm_printf(p, "\nHarvested Instance:VCN%d Skipping dump\n", i);
+ continue;
+ }
+
+ inst_off = i * reg_count;
+ is_powered = (adev->vcn.ip_dump[inst_off] &
+ UVD_POWER_STATUS__UVD_POWER_STATUS_MASK) != 1;
+
+ if (is_powered) {
+ drm_printf(p, "\nActive Instance:VCN%d\n", i);
+ for (j = 0; j < reg_count; j++)
+ drm_printf(p, "%-50s \t 0x%08x\n", vcn_reg_list_2_0[j].reg_name,
+ adev->vcn.ip_dump[inst_off + j]);
+ } else {
+ drm_printf(p, "\nInactive Instance:VCN%d\n", i);
+ }
+ }
+}
+
+static void vcn_v2_0_dump_ip_state(void *handle)
+{
+ struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ int i, j;
+ bool is_powered;
+ uint32_t inst_off;
+ uint32_t reg_count = ARRAY_SIZE(vcn_reg_list_2_0);
+
+ if (!adev->vcn.ip_dump)
+ return;
+
+ for (i = 0; i < adev->vcn.num_vcn_inst; i++) {
+ if (adev->vcn.harvest_config & (1 << i))
+ continue;
+
+ inst_off = i * reg_count;
+ /* mmUVD_POWER_STATUS is always readable and is first element of the array */
+ adev->vcn.ip_dump[inst_off] = RREG32_SOC15(VCN, i, mmUVD_POWER_STATUS);
+ is_powered = (adev->vcn.ip_dump[inst_off] &
+ UVD_POWER_STATUS__UVD_POWER_STATUS_MASK) != 1;
+
+ if (is_powered)
+ for (j = 1; j < reg_count; j++)
+ adev->vcn.ip_dump[inst_off + j] =
+ RREG32(SOC15_REG_ENTRY_OFFSET_INST(vcn_reg_list_2_0[j], i));
+ }
+}
+
static const struct amd_ip_funcs vcn_v2_0_ip_funcs = {
.name = "vcn_v2_0",
.early_init = vcn_v2_0_early_init,
@@ -2003,8 +2112,8 @@ static const struct amd_ip_funcs vcn_v2_0_ip_funcs = {
.post_soft_reset = NULL,
.set_clockgating_state = vcn_v2_0_set_clockgating_state,
.set_powergating_state = vcn_v2_0_set_powergating_state,
- .dump_ip_state = NULL,
- .print_ip_state = NULL,
+ .dump_ip_state = vcn_v2_0_dump_ip_state,
+ .print_ip_state = vcn_v2_0_print_ip_state,
};
static const struct amdgpu_ring_funcs vcn_v2_0_dec_ring_vm_funcs = {
diff --git a/drivers/gpu/drm/amd/amdgpu/vcn_v2_5.c b/drivers/gpu/drm/amd/amdgpu/vcn_v2_5.c
index 96f60c303161..04e9e806e318 100644
--- a/drivers/gpu/drm/amd/amdgpu/vcn_v2_5.c
+++ b/drivers/gpu/drm/amd/amdgpu/vcn_v2_5.c
@@ -55,6 +55,43 @@
#define VCN25_MAX_HW_INSTANCES_ARCTURUS 2
+static const struct amdgpu_hwip_reg_entry vcn_reg_list_2_5[] = {
+ SOC15_REG_ENTRY_STR(VCN, 0, mmUVD_POWER_STATUS),
+ SOC15_REG_ENTRY_STR(VCN, 0, mmUVD_POWER_STATUS),
+ SOC15_REG_ENTRY_STR(VCN, 0, mmUVD_STATUS),
+ SOC15_REG_ENTRY_STR(VCN, 0, mmUVD_CONTEXT_ID),
+ SOC15_REG_ENTRY_STR(VCN, 0, mmUVD_CONTEXT_ID2),
+ SOC15_REG_ENTRY_STR(VCN, 0, mmUVD_GPCOM_VCPU_DATA0),
+ SOC15_REG_ENTRY_STR(VCN, 0, mmUVD_GPCOM_VCPU_DATA1),
+ SOC15_REG_ENTRY_STR(VCN, 0, mmUVD_GPCOM_VCPU_CMD),
+ SOC15_REG_ENTRY_STR(VCN, 0, mmUVD_RB_BASE_HI),
+ SOC15_REG_ENTRY_STR(VCN, 0, mmUVD_RB_BASE_LO),
+ SOC15_REG_ENTRY_STR(VCN, 0, mmUVD_RB_BASE_HI2),
+ SOC15_REG_ENTRY_STR(VCN, 0, mmUVD_RB_BASE_LO2),
+ SOC15_REG_ENTRY_STR(VCN, 0, mmUVD_RB_BASE_HI3),
+ SOC15_REG_ENTRY_STR(VCN, 0, mmUVD_RB_BASE_LO3),
+ SOC15_REG_ENTRY_STR(VCN, 0, mmUVD_RB_BASE_HI4),
+ SOC15_REG_ENTRY_STR(VCN, 0, mmUVD_RB_BASE_LO4),
+ SOC15_REG_ENTRY_STR(VCN, 0, mmUVD_RB_RPTR),
+ SOC15_REG_ENTRY_STR(VCN, 0, mmUVD_RB_WPTR),
+ SOC15_REG_ENTRY_STR(VCN, 0, mmUVD_RB_RPTR2),
+ SOC15_REG_ENTRY_STR(VCN, 0, mmUVD_RB_WPTR2),
+ SOC15_REG_ENTRY_STR(VCN, 0, mmUVD_RB_RPTR3),
+ SOC15_REG_ENTRY_STR(VCN, 0, mmUVD_RB_WPTR3),
+ SOC15_REG_ENTRY_STR(VCN, 0, mmUVD_RB_RPTR4),
+ SOC15_REG_ENTRY_STR(VCN, 0, mmUVD_RB_WPTR4),
+ SOC15_REG_ENTRY_STR(VCN, 0, mmUVD_RB_SIZE),
+ SOC15_REG_ENTRY_STR(VCN, 0, mmUVD_RB_SIZE2),
+ SOC15_REG_ENTRY_STR(VCN, 0, mmUVD_RB_SIZE3),
+ SOC15_REG_ENTRY_STR(VCN, 0, mmUVD_RB_SIZE4),
+ SOC15_REG_ENTRY_STR(VCN, 0, mmUVD_PGFSM_CONFIG),
+ SOC15_REG_ENTRY_STR(VCN, 0, mmUVD_PGFSM_STATUS),
+ SOC15_REG_ENTRY_STR(VCN, 0, mmUVD_DPG_LMA_CTL),
+ SOC15_REG_ENTRY_STR(VCN, 0, mmUVD_DPG_LMA_DATA),
+ SOC15_REG_ENTRY_STR(VCN, 0, mmUVD_DPG_LMA_MASK),
+ SOC15_REG_ENTRY_STR(VCN, 0, mmUVD_DPG_PAUSE)
+};
+
static void vcn_v2_5_set_dec_ring_funcs(struct amdgpu_device *adev);
static void vcn_v2_5_set_enc_ring_funcs(struct amdgpu_device *adev);
static void vcn_v2_5_set_irq_funcs(struct amdgpu_device *adev);
@@ -122,6 +159,8 @@ static int vcn_v2_5_sw_init(void *handle)
{
struct amdgpu_ring *ring;
int i, j, r;
+ uint32_t reg_count = ARRAY_SIZE(vcn_reg_list_2_5);
+ uint32_t *ptr;
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
for (j = 0; j < adev->vcn.num_vcn_inst; j++) {
@@ -241,6 +280,15 @@ static int vcn_v2_5_sw_init(void *handle)
if (r)
return r;
+ /* Allocate memory for VCN IP Dump buffer */
+ ptr = kcalloc(adev->vcn.num_vcn_inst * reg_count, sizeof(uint32_t), GFP_KERNEL);
+ if (!ptr) {
+ DRM_ERROR("Failed to allocate memory for VCN IP Dump\n");
+ adev->vcn.ip_dump = NULL;
+ } else {
+ adev->vcn.ip_dump = ptr;
+ }
+
return 0;
}
@@ -277,6 +325,8 @@ static int vcn_v2_5_sw_fini(void *handle)
r = amdgpu_vcn_sw_fini(adev);
+ kfree(adev->vcn.ip_dump);
+
return r;
}
@@ -1876,6 +1926,66 @@ static void vcn_v2_5_set_irq_funcs(struct amdgpu_device *adev)
}
}
+static void vcn_v2_5_print_ip_state(void *handle, struct drm_printer *p)
+{
+ struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ int i, j;
+ uint32_t reg_count = ARRAY_SIZE(vcn_reg_list_2_5);
+ uint32_t inst_off, is_powered;
+
+ if (!adev->vcn.ip_dump)
+ return;
+
+ drm_printf(p, "num_instances:%d\n", adev->vcn.num_vcn_inst);
+ for (i = 0; i < adev->vcn.num_vcn_inst; i++) {
+ if (adev->vcn.harvest_config & (1 << i)) {
+ drm_printf(p, "\nHarvested Instance:VCN%d Skipping dump\n", i);
+ continue;
+ }
+
+ inst_off = i * reg_count;
+ is_powered = (adev->vcn.ip_dump[inst_off] &
+ UVD_POWER_STATUS__UVD_POWER_STATUS_MASK) != 1;
+
+ if (is_powered) {
+ drm_printf(p, "\nActive Instance:VCN%d\n", i);
+ for (j = 0; j < reg_count; j++)
+ drm_printf(p, "%-50s \t 0x%08x\n", vcn_reg_list_2_5[j].reg_name,
+ adev->vcn.ip_dump[inst_off + j]);
+ } else {
+ drm_printf(p, "\nInactive Instance:VCN%d\n", i);
+ }
+ }
+}
+
+static void vcn_v2_5_dump_ip_state(void *handle)
+{
+ struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ int i, j;
+ bool is_powered;
+ uint32_t inst_off;
+ uint32_t reg_count = ARRAY_SIZE(vcn_reg_list_2_5);
+
+ if (!adev->vcn.ip_dump)
+ return;
+
+ for (i = 0; i < adev->vcn.num_vcn_inst; i++) {
+ if (adev->vcn.harvest_config & (1 << i))
+ continue;
+
+ inst_off = i * reg_count;
+ /* mmUVD_POWER_STATUS is always readable and is first element of the array */
+ adev->vcn.ip_dump[inst_off] = RREG32_SOC15(VCN, i, mmUVD_POWER_STATUS);
+ is_powered = (adev->vcn.ip_dump[inst_off] &
+ UVD_POWER_STATUS__UVD_POWER_STATUS_MASK) != 1;
+
+ if (is_powered)
+ for (j = 1; j < reg_count; j++)
+ adev->vcn.ip_dump[inst_off + j] =
+ RREG32(SOC15_REG_ENTRY_OFFSET_INST(vcn_reg_list_2_5[j], i));
+ }
+}
+
static const struct amd_ip_funcs vcn_v2_5_ip_funcs = {
.name = "vcn_v2_5",
.early_init = vcn_v2_5_early_init,
@@ -1894,8 +2004,8 @@ static const struct amd_ip_funcs vcn_v2_5_ip_funcs = {
.post_soft_reset = NULL,
.set_clockgating_state = vcn_v2_5_set_clockgating_state,
.set_powergating_state = vcn_v2_5_set_powergating_state,
- .dump_ip_state = NULL,
- .print_ip_state = NULL,
+ .dump_ip_state = vcn_v2_5_dump_ip_state,
+ .print_ip_state = vcn_v2_5_print_ip_state,
};
static const struct amd_ip_funcs vcn_v2_6_ip_funcs = {
@@ -1916,8 +2026,8 @@ static const struct amd_ip_funcs vcn_v2_6_ip_funcs = {
.post_soft_reset = NULL,
.set_clockgating_state = vcn_v2_5_set_clockgating_state,
.set_powergating_state = vcn_v2_5_set_powergating_state,
- .dump_ip_state = NULL,
- .print_ip_state = NULL,
+ .dump_ip_state = vcn_v2_5_dump_ip_state,
+ .print_ip_state = vcn_v2_5_print_ip_state,
};
const struct amdgpu_ip_block_version vcn_v2_5_ip_block =
diff --git a/drivers/gpu/drm/amd/amdgpu/vcn_v3_0.c b/drivers/gpu/drm/amd/amdgpu/vcn_v3_0.c
index 24f947751c46..65dd68b32280 100644
--- a/drivers/gpu/drm/amd/amdgpu/vcn_v3_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/vcn_v3_0.c
@@ -60,6 +60,42 @@
#define RDECODE_MSG_CREATE 0x00000000
#define RDECODE_MESSAGE_CREATE 0x00000001
+static const struct amdgpu_hwip_reg_entry vcn_reg_list_3_0[] = {
+ SOC15_REG_ENTRY_STR(VCN, 0, mmUVD_POWER_STATUS),
+ SOC15_REG_ENTRY_STR(VCN, 0, mmUVD_STATUS),
+ SOC15_REG_ENTRY_STR(VCN, 0, mmUVD_CONTEXT_ID),
+ SOC15_REG_ENTRY_STR(VCN, 0, mmUVD_CONTEXT_ID2),
+ SOC15_REG_ENTRY_STR(VCN, 0, mmUVD_GPCOM_VCPU_DATA0),
+ SOC15_REG_ENTRY_STR(VCN, 0, mmUVD_GPCOM_VCPU_DATA1),
+ SOC15_REG_ENTRY_STR(VCN, 0, mmUVD_GPCOM_VCPU_CMD),
+ SOC15_REG_ENTRY_STR(VCN, 0, mmUVD_RB_BASE_HI),
+ SOC15_REG_ENTRY_STR(VCN, 0, mmUVD_RB_BASE_LO),
+ SOC15_REG_ENTRY_STR(VCN, 0, mmUVD_RB_BASE_HI2),
+ SOC15_REG_ENTRY_STR(VCN, 0, mmUVD_RB_BASE_LO2),
+ SOC15_REG_ENTRY_STR(VCN, 0, mmUVD_RB_BASE_HI3),
+ SOC15_REG_ENTRY_STR(VCN, 0, mmUVD_RB_BASE_LO3),
+ SOC15_REG_ENTRY_STR(VCN, 0, mmUVD_RB_BASE_HI4),
+ SOC15_REG_ENTRY_STR(VCN, 0, mmUVD_RB_BASE_LO4),
+ SOC15_REG_ENTRY_STR(VCN, 0, mmUVD_RB_RPTR),
+ SOC15_REG_ENTRY_STR(VCN, 0, mmUVD_RB_WPTR),
+ SOC15_REG_ENTRY_STR(VCN, 0, mmUVD_RB_RPTR2),
+ SOC15_REG_ENTRY_STR(VCN, 0, mmUVD_RB_WPTR2),
+ SOC15_REG_ENTRY_STR(VCN, 0, mmUVD_RB_RPTR3),
+ SOC15_REG_ENTRY_STR(VCN, 0, mmUVD_RB_WPTR3),
+ SOC15_REG_ENTRY_STR(VCN, 0, mmUVD_RB_RPTR4),
+ SOC15_REG_ENTRY_STR(VCN, 0, mmUVD_RB_WPTR4),
+ SOC15_REG_ENTRY_STR(VCN, 0, mmUVD_RB_SIZE),
+ SOC15_REG_ENTRY_STR(VCN, 0, mmUVD_RB_SIZE2),
+ SOC15_REG_ENTRY_STR(VCN, 0, mmUVD_RB_SIZE3),
+ SOC15_REG_ENTRY_STR(VCN, 0, mmUVD_RB_SIZE4),
+ SOC15_REG_ENTRY_STR(VCN, 0, mmUVD_PGFSM_CONFIG),
+ SOC15_REG_ENTRY_STR(VCN, 0, mmUVD_PGFSM_STATUS),
+ SOC15_REG_ENTRY_STR(VCN, 0, mmUVD_DPG_LMA_CTL),
+ SOC15_REG_ENTRY_STR(VCN, 0, mmUVD_DPG_LMA_DATA),
+ SOC15_REG_ENTRY_STR(VCN, 0, mmUVD_DPG_LMA_MASK),
+ SOC15_REG_ENTRY_STR(VCN, 0, mmUVD_DPG_PAUSE)
+};
+
static int amdgpu_ih_clientid_vcns[] = {
SOC15_IH_CLIENTID_VCN,
SOC15_IH_CLIENTID_VCN1
@@ -126,6 +162,8 @@ static int vcn_v3_0_sw_init(void *handle)
struct amdgpu_ring *ring;
int i, j, r;
int vcn_doorbell_index = 0;
+ uint32_t reg_count = ARRAY_SIZE(vcn_reg_list_3_0);
+ uint32_t *ptr;
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
r = amdgpu_vcn_sw_init(adev);
@@ -246,6 +284,15 @@ static int vcn_v3_0_sw_init(void *handle)
if (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG)
adev->vcn.pause_dpg_mode = vcn_v3_0_pause_dpg_mode;
+ /* Allocate memory for VCN IP Dump buffer */
+ ptr = kcalloc(adev->vcn.num_vcn_inst * reg_count, sizeof(uint32_t), GFP_KERNEL);
+ if (ptr == NULL) {
+ DRM_ERROR("Failed to allocate memory for VCN IP Dump\n");
+ adev->vcn.ip_dump = NULL;
+ } else {
+ adev->vcn.ip_dump = ptr;
+ }
+
return 0;
}
@@ -284,6 +331,7 @@ static int vcn_v3_0_sw_fini(void *handle)
r = amdgpu_vcn_sw_fini(adev);
+ kfree(adev->vcn.ip_dump);
return r;
}
@@ -2203,6 +2251,67 @@ static void vcn_v3_0_set_irq_funcs(struct amdgpu_device *adev)
}
}
+static void vcn_v3_0_print_ip_state(void *handle, struct drm_printer *p)
+{
+ struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ int i, j;
+ uint32_t reg_count = ARRAY_SIZE(vcn_reg_list_3_0);
+ uint32_t inst_off;
+ bool is_powered;
+
+ if (!adev->vcn.ip_dump)
+ return;
+
+ drm_printf(p, "num_instances:%d\n", adev->vcn.num_vcn_inst);
+ for (i = 0; i < adev->vcn.num_vcn_inst; i++) {
+ if (adev->vcn.harvest_config & (1 << i)) {
+ drm_printf(p, "\nHarvested Instance:VCN%d Skipping dump\n", i);
+ continue;
+ }
+
+ inst_off = i * reg_count;
+ is_powered = (adev->vcn.ip_dump[inst_off] &
+ UVD_POWER_STATUS__UVD_POWER_STATUS_MASK) != 1;
+
+ if (is_powered) {
+ drm_printf(p, "\nActive Instance:VCN%d\n", i);
+ for (j = 0; j < reg_count; j++)
+ drm_printf(p, "%-50s \t 0x%08x\n", vcn_reg_list_3_0[j].reg_name,
+ adev->vcn.ip_dump[inst_off + j]);
+ } else {
+ drm_printf(p, "\nInactive Instance:VCN%d\n", i);
+ }
+ }
+}
+
+static void vcn_v3_0_dump_ip_state(void *handle)
+{
+ struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ int i, j;
+ bool is_powered;
+ uint32_t inst_off;
+ uint32_t reg_count = ARRAY_SIZE(vcn_reg_list_3_0);
+
+ if (!adev->vcn.ip_dump)
+ return;
+
+ for (i = 0; i < adev->vcn.num_vcn_inst; i++) {
+ if (adev->vcn.harvest_config & (1 << i))
+ continue;
+
+ inst_off = i * reg_count;
+ /* mmUVD_POWER_STATUS is always readable and is first element of the array */
+ adev->vcn.ip_dump[inst_off] = RREG32_SOC15(VCN, i, mmUVD_POWER_STATUS);
+ is_powered = (adev->vcn.ip_dump[inst_off] &
+ UVD_POWER_STATUS__UVD_POWER_STATUS_MASK) != 1;
+
+ if (is_powered)
+ for (j = 1; j < reg_count; j++)
+ adev->vcn.ip_dump[inst_off + j] =
+ RREG32(SOC15_REG_ENTRY_OFFSET_INST(vcn_reg_list_3_0[j], i));
+ }
+}
+
static const struct amd_ip_funcs vcn_v3_0_ip_funcs = {
.name = "vcn_v3_0",
.early_init = vcn_v3_0_early_init,
@@ -2221,8 +2330,8 @@ static const struct amd_ip_funcs vcn_v3_0_ip_funcs = {
.post_soft_reset = NULL,
.set_clockgating_state = vcn_v3_0_set_clockgating_state,
.set_powergating_state = vcn_v3_0_set_powergating_state,
- .dump_ip_state = NULL,
- .print_ip_state = NULL,
+ .dump_ip_state = vcn_v3_0_dump_ip_state,
+ .print_ip_state = vcn_v3_0_print_ip_state,
};
const struct amdgpu_ip_block_version vcn_v3_0_ip_block = {
diff --git a/drivers/gpu/drm/amd/amdgpu/vcn_v4_0.c b/drivers/gpu/drm/amd/amdgpu/vcn_v4_0.c
index 776c539bfdda..26c6f10a8c8f 100644
--- a/drivers/gpu/drm/amd/amdgpu/vcn_v4_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/vcn_v4_0.c
@@ -52,6 +52,42 @@
#define RDECODE_MSG_CREATE 0x00000000
#define RDECODE_MESSAGE_CREATE 0x00000001
+static const struct amdgpu_hwip_reg_entry vcn_reg_list_4_0[] = {
+ SOC15_REG_ENTRY_STR(VCN, 0, regUVD_POWER_STATUS),
+ SOC15_REG_ENTRY_STR(VCN, 0, regUVD_STATUS),
+ SOC15_REG_ENTRY_STR(VCN, 0, regUVD_CONTEXT_ID),
+ SOC15_REG_ENTRY_STR(VCN, 0, regUVD_CONTEXT_ID2),
+ SOC15_REG_ENTRY_STR(VCN, 0, regUVD_GPCOM_VCPU_DATA0),
+ SOC15_REG_ENTRY_STR(VCN, 0, regUVD_GPCOM_VCPU_DATA1),
+ SOC15_REG_ENTRY_STR(VCN, 0, regUVD_GPCOM_VCPU_CMD),
+ SOC15_REG_ENTRY_STR(VCN, 0, regUVD_RB_BASE_HI),
+ SOC15_REG_ENTRY_STR(VCN, 0, regUVD_RB_BASE_LO),
+ SOC15_REG_ENTRY_STR(VCN, 0, regUVD_RB_BASE_HI2),
+ SOC15_REG_ENTRY_STR(VCN, 0, regUVD_RB_BASE_LO2),
+ SOC15_REG_ENTRY_STR(VCN, 0, regUVD_RB_BASE_HI3),
+ SOC15_REG_ENTRY_STR(VCN, 0, regUVD_RB_BASE_LO3),
+ SOC15_REG_ENTRY_STR(VCN, 0, regUVD_RB_BASE_HI4),
+ SOC15_REG_ENTRY_STR(VCN, 0, regUVD_RB_BASE_LO4),
+ SOC15_REG_ENTRY_STR(VCN, 0, regUVD_RB_RPTR),
+ SOC15_REG_ENTRY_STR(VCN, 0, regUVD_RB_WPTR),
+ SOC15_REG_ENTRY_STR(VCN, 0, regUVD_RB_RPTR2),
+ SOC15_REG_ENTRY_STR(VCN, 0, regUVD_RB_WPTR2),
+ SOC15_REG_ENTRY_STR(VCN, 0, regUVD_RB_RPTR3),
+ SOC15_REG_ENTRY_STR(VCN, 0, regUVD_RB_WPTR3),
+ SOC15_REG_ENTRY_STR(VCN, 0, regUVD_RB_RPTR4),
+ SOC15_REG_ENTRY_STR(VCN, 0, regUVD_RB_WPTR4),
+ SOC15_REG_ENTRY_STR(VCN, 0, regUVD_RB_SIZE),
+ SOC15_REG_ENTRY_STR(VCN, 0, regUVD_RB_SIZE2),
+ SOC15_REG_ENTRY_STR(VCN, 0, regUVD_RB_SIZE3),
+ SOC15_REG_ENTRY_STR(VCN, 0, regUVD_RB_SIZE4),
+ SOC15_REG_ENTRY_STR(VCN, 0, regUVD_PGFSM_CONFIG),
+ SOC15_REG_ENTRY_STR(VCN, 0, regUVD_PGFSM_STATUS),
+ SOC15_REG_ENTRY_STR(VCN, 0, regUVD_DPG_LMA_CTL),
+ SOC15_REG_ENTRY_STR(VCN, 0, regUVD_DPG_LMA_DATA),
+ SOC15_REG_ENTRY_STR(VCN, 0, regUVD_DPG_LMA_MASK),
+ SOC15_REG_ENTRY_STR(VCN, 0, regUVD_DPG_PAUSE)
+};
+
static int amdgpu_ih_clientid_vcns[] = {
SOC15_IH_CLIENTID_VCN,
SOC15_IH_CLIENTID_VCN1
@@ -137,6 +173,8 @@ static int vcn_v4_0_sw_init(void *handle)
struct amdgpu_ring *ring;
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
int i, r;
+ uint32_t reg_count = ARRAY_SIZE(vcn_reg_list_4_0);
+ uint32_t *ptr;
r = amdgpu_vcn_sw_init(adev);
if (r)
@@ -200,6 +238,15 @@ static int vcn_v4_0_sw_init(void *handle)
if (r)
return r;
+ /* Allocate memory for VCN IP Dump buffer */
+ ptr = kcalloc(adev->vcn.num_vcn_inst * reg_count, sizeof(uint32_t), GFP_KERNEL);
+ if (!ptr) {
+ DRM_ERROR("Failed to allocate memory for VCN IP Dump\n");
+ adev->vcn.ip_dump = NULL;
+ } else {
+ adev->vcn.ip_dump = ptr;
+ }
+
return 0;
}
@@ -239,6 +286,8 @@ static int vcn_v4_0_sw_fini(void *handle)
r = amdgpu_vcn_sw_fini(adev);
+ kfree(adev->vcn.ip_dump);
+
return r;
}
@@ -2109,6 +2158,67 @@ static void vcn_v4_0_set_irq_funcs(struct amdgpu_device *adev)
}
}
+static void vcn_v4_0_print_ip_state(void *handle, struct drm_printer *p)
+{
+ struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ int i, j;
+ uint32_t reg_count = ARRAY_SIZE(vcn_reg_list_4_0);
+ uint32_t inst_off, is_powered;
+
+ if (!adev->vcn.ip_dump)
+ return;
+
+ drm_printf(p, "num_instances:%d\n", adev->vcn.num_vcn_inst);
+ for (i = 0; i < adev->vcn.num_vcn_inst; i++) {
+ if (adev->vcn.harvest_config & (1 << i)) {
+ drm_printf(p, "\nHarvested Instance:VCN%d Skipping dump\n", i);
+ continue;
+ }
+
+ inst_off = i * reg_count;
+ is_powered = (adev->vcn.ip_dump[inst_off] &
+ UVD_POWER_STATUS__UVD_POWER_STATUS_MASK) != 1;
+
+ if (is_powered) {
+ drm_printf(p, "\nActive Instance:VCN%d\n", i);
+ for (j = 0; j < reg_count; j++)
+ drm_printf(p, "%-50s \t 0x%08x\n", vcn_reg_list_4_0[j].reg_name,
+ adev->vcn.ip_dump[inst_off + j]);
+ } else {
+ drm_printf(p, "\nInactive Instance:VCN%d\n", i);
+ }
+ }
+}
+
+static void vcn_v4_0_dump_ip_state(void *handle)
+{
+ struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ int i, j;
+ bool is_powered;
+ uint32_t inst_off;
+ uint32_t reg_count = ARRAY_SIZE(vcn_reg_list_4_0);
+
+ if (!adev->vcn.ip_dump)
+ return;
+
+ for (i = 0; i < adev->vcn.num_vcn_inst; i++) {
+ if (adev->vcn.harvest_config & (1 << i))
+ continue;
+
+ inst_off = i * reg_count;
+ /* mmUVD_POWER_STATUS is always readable and is first element of the array */
+ adev->vcn.ip_dump[inst_off] = RREG32_SOC15(VCN, i, regUVD_POWER_STATUS);
+ is_powered = (adev->vcn.ip_dump[inst_off] &
+ UVD_POWER_STATUS__UVD_POWER_STATUS_MASK) != 1;
+
+ if (is_powered)
+ for (j = 1; j < reg_count; j++)
+ adev->vcn.ip_dump[inst_off + j] =
+ RREG32(SOC15_REG_ENTRY_OFFSET_INST(vcn_reg_list_4_0[j],
+ i));
+ }
+}
+
static const struct amd_ip_funcs vcn_v4_0_ip_funcs = {
.name = "vcn_v4_0",
.early_init = vcn_v4_0_early_init,
@@ -2127,8 +2237,8 @@ static const struct amd_ip_funcs vcn_v4_0_ip_funcs = {
.post_soft_reset = NULL,
.set_clockgating_state = vcn_v4_0_set_clockgating_state,
.set_powergating_state = vcn_v4_0_set_powergating_state,
- .dump_ip_state = NULL,
- .print_ip_state = NULL,
+ .dump_ip_state = vcn_v4_0_dump_ip_state,
+ .print_ip_state = vcn_v4_0_print_ip_state,
};
const struct amdgpu_ip_block_version vcn_v4_0_ip_block = {
diff --git a/drivers/gpu/drm/amd/amdgpu/vcn_v4_0_3.c b/drivers/gpu/drm/amd/amdgpu/vcn_v4_0_3.c
index 9bae95538b62..0fda70336300 100644
--- a/drivers/gpu/drm/amd/amdgpu/vcn_v4_0_3.c
+++ b/drivers/gpu/drm/amd/amdgpu/vcn_v4_0_3.c
@@ -45,6 +45,42 @@
#define VCN_VID_SOC_ADDRESS_2_0 0x1fb00
#define VCN1_VID_SOC_ADDRESS_3_0 0x48300
+static const struct amdgpu_hwip_reg_entry vcn_reg_list_4_0_3[] = {
+ SOC15_REG_ENTRY_STR(VCN, 0, regUVD_POWER_STATUS),
+ SOC15_REG_ENTRY_STR(VCN, 0, regUVD_STATUS),
+ SOC15_REG_ENTRY_STR(VCN, 0, regUVD_CONTEXT_ID),
+ SOC15_REG_ENTRY_STR(VCN, 0, regUVD_CONTEXT_ID2),
+ SOC15_REG_ENTRY_STR(VCN, 0, regUVD_GPCOM_VCPU_DATA0),
+ SOC15_REG_ENTRY_STR(VCN, 0, regUVD_GPCOM_VCPU_DATA1),
+ SOC15_REG_ENTRY_STR(VCN, 0, regUVD_GPCOM_VCPU_CMD),
+ SOC15_REG_ENTRY_STR(VCN, 0, regUVD_RB_BASE_HI),
+ SOC15_REG_ENTRY_STR(VCN, 0, regUVD_RB_BASE_LO),
+ SOC15_REG_ENTRY_STR(VCN, 0, regUVD_RB_BASE_HI2),
+ SOC15_REG_ENTRY_STR(VCN, 0, regUVD_RB_BASE_LO2),
+ SOC15_REG_ENTRY_STR(VCN, 0, regUVD_RB_BASE_HI3),
+ SOC15_REG_ENTRY_STR(VCN, 0, regUVD_RB_BASE_LO3),
+ SOC15_REG_ENTRY_STR(VCN, 0, regUVD_RB_BASE_HI4),
+ SOC15_REG_ENTRY_STR(VCN, 0, regUVD_RB_BASE_LO4),
+ SOC15_REG_ENTRY_STR(VCN, 0, regUVD_RB_RPTR),
+ SOC15_REG_ENTRY_STR(VCN, 0, regUVD_RB_WPTR),
+ SOC15_REG_ENTRY_STR(VCN, 0, regUVD_RB_RPTR2),
+ SOC15_REG_ENTRY_STR(VCN, 0, regUVD_RB_WPTR2),
+ SOC15_REG_ENTRY_STR(VCN, 0, regUVD_RB_RPTR3),
+ SOC15_REG_ENTRY_STR(VCN, 0, regUVD_RB_WPTR3),
+ SOC15_REG_ENTRY_STR(VCN, 0, regUVD_RB_RPTR4),
+ SOC15_REG_ENTRY_STR(VCN, 0, regUVD_RB_WPTR4),
+ SOC15_REG_ENTRY_STR(VCN, 0, regUVD_RB_SIZE),
+ SOC15_REG_ENTRY_STR(VCN, 0, regUVD_RB_SIZE2),
+ SOC15_REG_ENTRY_STR(VCN, 0, regUVD_RB_SIZE3),
+ SOC15_REG_ENTRY_STR(VCN, 0, regUVD_RB_SIZE4),
+ SOC15_REG_ENTRY_STR(VCN, 0, regUVD_PGFSM_CONFIG),
+ SOC15_REG_ENTRY_STR(VCN, 0, regUVD_PGFSM_STATUS),
+ SOC15_REG_ENTRY_STR(VCN, 0, regUVD_DPG_LMA_CTL),
+ SOC15_REG_ENTRY_STR(VCN, 0, regUVD_DPG_LMA_DATA),
+ SOC15_REG_ENTRY_STR(VCN, 0, regUVD_DPG_LMA_MASK),
+ SOC15_REG_ENTRY_STR(VCN, 0, regUVD_DPG_PAUSE)
+};
+
#define NORMALIZE_VCN_REG_OFFSET(offset) \
(offset & 0x1FFFF)
@@ -92,6 +128,8 @@ static int vcn_v4_0_3_sw_init(void *handle)
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
struct amdgpu_ring *ring;
int i, r, vcn_inst;
+ uint32_t reg_count = ARRAY_SIZE(vcn_reg_list_4_0_3);
+ uint32_t *ptr;
r = amdgpu_vcn_sw_init(adev);
if (r)
@@ -159,6 +197,15 @@ static int vcn_v4_0_3_sw_init(void *handle)
}
}
+ /* Allocate memory for VCN IP Dump buffer */
+ ptr = kcalloc(adev->vcn.num_vcn_inst * reg_count, sizeof(uint32_t), GFP_KERNEL);
+ if (!ptr) {
+ DRM_ERROR("Failed to allocate memory for VCN IP Dump\n");
+ adev->vcn.ip_dump = NULL;
+ } else {
+ adev->vcn.ip_dump = ptr;
+ }
+
return 0;
}
@@ -194,6 +241,8 @@ static int vcn_v4_0_3_sw_fini(void *handle)
r = amdgpu_vcn_sw_fini(adev);
+ kfree(adev->vcn.ip_dump);
+
return r;
}
@@ -1684,6 +1733,68 @@ static void vcn_v4_0_3_set_irq_funcs(struct amdgpu_device *adev)
adev->vcn.inst->irq.funcs = &vcn_v4_0_3_irq_funcs;
}
+static void vcn_v4_0_3_print_ip_state(void *handle, struct drm_printer *p)
+{
+ struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ int i, j;
+ uint32_t reg_count = ARRAY_SIZE(vcn_reg_list_4_0_3);
+ uint32_t inst_off, is_powered;
+
+ if (!adev->vcn.ip_dump)
+ return;
+
+ drm_printf(p, "num_instances:%d\n", adev->vcn.num_vcn_inst);
+ for (i = 0; i < adev->vcn.num_vcn_inst; i++) {
+ if (adev->vcn.harvest_config & (1 << i)) {
+ drm_printf(p, "\nHarvested Instance:VCN%d Skipping dump\n", i);
+ continue;
+ }
+
+ inst_off = i * reg_count;
+ is_powered = (adev->vcn.ip_dump[inst_off] &
+ UVD_POWER_STATUS__UVD_POWER_STATUS_MASK) != 1;
+
+ if (is_powered) {
+ drm_printf(p, "\nActive Instance:VCN%d\n", i);
+ for (j = 0; j < reg_count; j++)
+ drm_printf(p, "%-50s \t 0x%08x\n", vcn_reg_list_4_0_3[j].reg_name,
+ adev->vcn.ip_dump[inst_off + j]);
+ } else {
+ drm_printf(p, "\nInactive Instance:VCN%d\n", i);
+ }
+ }
+}
+
+static void vcn_v4_0_3_dump_ip_state(void *handle)
+{
+ struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ int i, j;
+ bool is_powered;
+ uint32_t inst_off, inst_id;
+ uint32_t reg_count = ARRAY_SIZE(vcn_reg_list_4_0_3);
+
+ if (!adev->vcn.ip_dump)
+ return;
+
+ for (i = 0; i < adev->vcn.num_vcn_inst; i++) {
+ if (adev->vcn.harvest_config & (1 << i))
+ continue;
+
+ inst_id = GET_INST(VCN, i);
+ inst_off = i * reg_count;
+ /* mmUVD_POWER_STATUS is always readable and is first element of the array */
+ adev->vcn.ip_dump[inst_off] = RREG32_SOC15(VCN, inst_id, regUVD_POWER_STATUS);
+ is_powered = (adev->vcn.ip_dump[inst_off] &
+ UVD_POWER_STATUS__UVD_POWER_STATUS_MASK) != 1;
+
+ if (is_powered)
+ for (j = 1; j < reg_count; j++)
+ adev->vcn.ip_dump[inst_off + j] =
+ RREG32(SOC15_REG_ENTRY_OFFSET_INST(vcn_reg_list_4_0_3[j],
+ inst_id));
+ }
+}
+
static const struct amd_ip_funcs vcn_v4_0_3_ip_funcs = {
.name = "vcn_v4_0_3",
.early_init = vcn_v4_0_3_early_init,
@@ -1702,8 +1813,8 @@ static const struct amd_ip_funcs vcn_v4_0_3_ip_funcs = {
.post_soft_reset = NULL,
.set_clockgating_state = vcn_v4_0_3_set_clockgating_state,
.set_powergating_state = vcn_v4_0_3_set_powergating_state,
- .dump_ip_state = NULL,
- .print_ip_state = NULL,
+ .dump_ip_state = vcn_v4_0_3_dump_ip_state,
+ .print_ip_state = vcn_v4_0_3_print_ip_state,
};
const struct amdgpu_ip_block_version vcn_v4_0_3_ip_block = {
diff --git a/drivers/gpu/drm/amd/amdgpu/vcn_v4_0_5.c b/drivers/gpu/drm/amd/amdgpu/vcn_v4_0_5.c
index 8d75061f9f38..9d4f5352a62c 100644
--- a/drivers/gpu/drm/amd/amdgpu/vcn_v4_0_5.c
+++ b/drivers/gpu/drm/amd/amdgpu/vcn_v4_0_5.c
@@ -52,6 +52,42 @@
#define RDECODE_MSG_CREATE 0x00000000
#define RDECODE_MESSAGE_CREATE 0x00000001
+static const struct amdgpu_hwip_reg_entry vcn_reg_list_4_0_5[] = {
+ SOC15_REG_ENTRY_STR(VCN, 0, regUVD_POWER_STATUS),
+ SOC15_REG_ENTRY_STR(VCN, 0, regUVD_STATUS),
+ SOC15_REG_ENTRY_STR(VCN, 0, regUVD_CONTEXT_ID),
+ SOC15_REG_ENTRY_STR(VCN, 0, regUVD_CONTEXT_ID2),
+ SOC15_REG_ENTRY_STR(VCN, 0, regUVD_GPCOM_VCPU_DATA0),
+ SOC15_REG_ENTRY_STR(VCN, 0, regUVD_GPCOM_VCPU_DATA1),
+ SOC15_REG_ENTRY_STR(VCN, 0, regUVD_GPCOM_VCPU_CMD),
+ SOC15_REG_ENTRY_STR(VCN, 0, regUVD_RB_BASE_HI),
+ SOC15_REG_ENTRY_STR(VCN, 0, regUVD_RB_BASE_LO),
+ SOC15_REG_ENTRY_STR(VCN, 0, regUVD_RB_BASE_HI2),
+ SOC15_REG_ENTRY_STR(VCN, 0, regUVD_RB_BASE_LO2),
+ SOC15_REG_ENTRY_STR(VCN, 0, regUVD_RB_BASE_HI3),
+ SOC15_REG_ENTRY_STR(VCN, 0, regUVD_RB_BASE_LO3),
+ SOC15_REG_ENTRY_STR(VCN, 0, regUVD_RB_BASE_HI4),
+ SOC15_REG_ENTRY_STR(VCN, 0, regUVD_RB_BASE_LO4),
+ SOC15_REG_ENTRY_STR(VCN, 0, regUVD_RB_RPTR),
+ SOC15_REG_ENTRY_STR(VCN, 0, regUVD_RB_WPTR),
+ SOC15_REG_ENTRY_STR(VCN, 0, regUVD_RB_RPTR2),
+ SOC15_REG_ENTRY_STR(VCN, 0, regUVD_RB_WPTR2),
+ SOC15_REG_ENTRY_STR(VCN, 0, regUVD_RB_RPTR3),
+ SOC15_REG_ENTRY_STR(VCN, 0, regUVD_RB_WPTR3),
+ SOC15_REG_ENTRY_STR(VCN, 0, regUVD_RB_RPTR4),
+ SOC15_REG_ENTRY_STR(VCN, 0, regUVD_RB_WPTR4),
+ SOC15_REG_ENTRY_STR(VCN, 0, regUVD_RB_SIZE),
+ SOC15_REG_ENTRY_STR(VCN, 0, regUVD_RB_SIZE2),
+ SOC15_REG_ENTRY_STR(VCN, 0, regUVD_RB_SIZE3),
+ SOC15_REG_ENTRY_STR(VCN, 0, regUVD_RB_SIZE4),
+ SOC15_REG_ENTRY_STR(VCN, 0, regUVD_PGFSM_CONFIG),
+ SOC15_REG_ENTRY_STR(VCN, 0, regUVD_PGFSM_STATUS),
+ SOC15_REG_ENTRY_STR(VCN, 0, regUVD_DPG_LMA_CTL),
+ SOC15_REG_ENTRY_STR(VCN, 0, regUVD_DPG_LMA_DATA),
+ SOC15_REG_ENTRY_STR(VCN, 0, regUVD_DPG_LMA_MASK),
+ SOC15_REG_ENTRY_STR(VCN, 0, regUVD_DPG_PAUSE)
+};
+
static int amdgpu_ih_clientid_vcns[] = {
SOC15_IH_CLIENTID_VCN,
SOC15_IH_CLIENTID_VCN1
@@ -97,6 +133,8 @@ static int vcn_v4_0_5_sw_init(void *handle)
struct amdgpu_ring *ring;
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
int i, r;
+ uint32_t reg_count = ARRAY_SIZE(vcn_reg_list_4_0_5);
+ uint32_t *ptr;
r = amdgpu_vcn_sw_init(adev);
if (r)
@@ -168,6 +206,14 @@ static int vcn_v4_0_5_sw_init(void *handle)
if (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG)
adev->vcn.pause_dpg_mode = vcn_v4_0_5_pause_dpg_mode;
+ /* Allocate memory for VCN IP Dump buffer */
+ ptr = kcalloc(adev->vcn.num_vcn_inst * reg_count, sizeof(uint32_t), GFP_KERNEL);
+ if (!ptr) {
+ DRM_ERROR("Failed to allocate memory for VCN IP Dump\n");
+ adev->vcn.ip_dump = NULL;
+ } else {
+ adev->vcn.ip_dump = ptr;
+ }
return 0;
}
@@ -207,6 +253,8 @@ static int vcn_v4_0_5_sw_fini(void *handle)
r = amdgpu_vcn_sw_fini(adev);
+ kfree(adev->vcn.ip_dump);
+
return r;
}
@@ -1347,170 +1395,6 @@ static void vcn_v4_0_5_unified_ring_set_wptr(struct amdgpu_ring *ring)
}
}
-static int vcn_v4_0_5_limit_sched(struct amdgpu_cs_parser *p,
- struct amdgpu_job *job)
-{
- struct drm_gpu_scheduler **scheds;
-
- /* The create msg must be in the first IB submitted */
- if (atomic_read(&job->base.entity->fence_seq))
- return -EINVAL;
-
- /* if VCN0 is harvested, we can't support AV1 */
- if (p->adev->vcn.harvest_config & AMDGPU_VCN_HARVEST_VCN0)
- return -EINVAL;
-
- scheds = p->adev->gpu_sched[AMDGPU_HW_IP_VCN_ENC]
- [AMDGPU_RING_PRIO_0].sched;
- drm_sched_entity_modify_sched(job->base.entity, scheds, 1);
- return 0;
-}
-
-static int vcn_v4_0_5_dec_msg(struct amdgpu_cs_parser *p, struct amdgpu_job *job,
- uint64_t addr)
-{
- struct ttm_operation_ctx ctx = { false, false };
- struct amdgpu_bo_va_mapping *map;
- uint32_t *msg, num_buffers;
- struct amdgpu_bo *bo;
- uint64_t start, end;
- unsigned int i;
- void *ptr;
- int r;
-
- addr &= AMDGPU_GMC_HOLE_MASK;
- r = amdgpu_cs_find_mapping(p, addr, &bo, &map);
- if (r) {
- DRM_ERROR("Can't find BO for addr 0x%08llx\n", addr);
- return r;
- }
-
- start = map->start * AMDGPU_GPU_PAGE_SIZE;
- end = (map->last + 1) * AMDGPU_GPU_PAGE_SIZE;
- if (addr & 0x7) {
- DRM_ERROR("VCN messages must be 8 byte aligned!\n");
- return -EINVAL;
- }
-
- bo->flags |= AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED;
- amdgpu_bo_placement_from_domain(bo, bo->allowed_domains);
- r = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
- if (r) {
- DRM_ERROR("Failed validating the VCN message BO (%d)!\n", r);
- return r;
- }
-
- r = amdgpu_bo_kmap(bo, &ptr);
- if (r) {
- DRM_ERROR("Failed mapping the VCN message (%d)!\n", r);
- return r;
- }
-
- msg = ptr + addr - start;
-
- /* Check length */
- if (msg[1] > end - addr) {
- r = -EINVAL;
- goto out;
- }
-
- if (msg[3] != RDECODE_MSG_CREATE)
- goto out;
-
- num_buffers = msg[2];
- for (i = 0, msg = &msg[6]; i < num_buffers; ++i, msg += 4) {
- uint32_t offset, size, *create;
-
- if (msg[0] != RDECODE_MESSAGE_CREATE)
- continue;
-
- offset = msg[1];
- size = msg[2];
-
- if (offset + size > end) {
- r = -EINVAL;
- goto out;
- }
-
- create = ptr + addr + offset - start;
-
- /* H264, HEVC and VP9 can run on any instance */
- if (create[0] == 0x7 || create[0] == 0x10 || create[0] == 0x11)
- continue;
-
- r = vcn_v4_0_5_limit_sched(p, job);
- if (r)
- goto out;
- }
-
-out:
- amdgpu_bo_kunmap(bo);
- return r;
-}
-
-#define RADEON_VCN_ENGINE_TYPE_ENCODE (0x00000002)
-#define RADEON_VCN_ENGINE_TYPE_DECODE (0x00000003)
-
-#define RADEON_VCN_ENGINE_INFO (0x30000001)
-#define RADEON_VCN_ENGINE_INFO_MAX_OFFSET 16
-
-#define RENCODE_ENCODE_STANDARD_AV1 2
-#define RENCODE_IB_PARAM_SESSION_INIT 0x00000003
-#define RENCODE_IB_PARAM_SESSION_INIT_MAX_OFFSET 64
-
-/* return the offset in ib if id is found, -1 otherwise
- * to speed up the searching we only search upto max_offset
- */
-static int vcn_v4_0_5_enc_find_ib_param(struct amdgpu_ib *ib, uint32_t id, int max_offset)
-{
- int i;
-
- for (i = 0; i < ib->length_dw && i < max_offset && ib->ptr[i] >= 8; i += ib->ptr[i]/4) {
- if (ib->ptr[i + 1] == id)
- return i;
- }
- return -1;
-}
-
-static int vcn_v4_0_5_ring_patch_cs_in_place(struct amdgpu_cs_parser *p,
- struct amdgpu_job *job,
- struct amdgpu_ib *ib)
-{
- struct amdgpu_ring *ring = amdgpu_job_ring(job);
- struct amdgpu_vcn_decode_buffer *decode_buffer;
- uint64_t addr;
- uint32_t val;
- int idx;
-
- /* The first instance can decode anything */
- if (!ring->me)
- return 0;
-
- /* RADEON_VCN_ENGINE_INFO is at the top of ib block */
- idx = vcn_v4_0_5_enc_find_ib_param(ib, RADEON_VCN_ENGINE_INFO,
- RADEON_VCN_ENGINE_INFO_MAX_OFFSET);
- if (idx < 0) /* engine info is missing */
- return 0;
-
- val = amdgpu_ib_get_value(ib, idx + 2); /* RADEON_VCN_ENGINE_TYPE */
- if (val == RADEON_VCN_ENGINE_TYPE_DECODE) {
- decode_buffer = (struct amdgpu_vcn_decode_buffer *)&ib->ptr[idx + 6];
-
- if (!(decode_buffer->valid_buf_flag & 0x1))
- return 0;
-
- addr = ((u64)decode_buffer->msg_buffer_address_hi) << 32 |
- decode_buffer->msg_buffer_address_lo;
- return vcn_v4_0_5_dec_msg(p, job, addr);
- } else if (val == RADEON_VCN_ENGINE_TYPE_ENCODE) {
- idx = vcn_v4_0_5_enc_find_ib_param(ib, RENCODE_IB_PARAM_SESSION_INIT,
- RENCODE_IB_PARAM_SESSION_INIT_MAX_OFFSET);
- if (idx >= 0 && ib->ptr[idx + 2] == RENCODE_ENCODE_STANDARD_AV1)
- return vcn_v4_0_5_limit_sched(p, job);
- }
- return 0;
-}
-
static const struct amdgpu_ring_funcs vcn_v4_0_5_unified_ring_vm_funcs = {
.type = AMDGPU_RING_TYPE_VCN_ENC,
.align_mask = 0x3f,
@@ -1518,7 +1402,6 @@ static const struct amdgpu_ring_funcs vcn_v4_0_5_unified_ring_vm_funcs = {
.get_rptr = vcn_v4_0_5_unified_ring_get_rptr,
.get_wptr = vcn_v4_0_5_unified_ring_get_wptr,
.set_wptr = vcn_v4_0_5_unified_ring_set_wptr,
- .patch_cs_in_place = vcn_v4_0_5_ring_patch_cs_in_place,
.emit_frame_size =
SOC15_FLUSH_GPU_TLB_NUM_WREG * 3 +
SOC15_FLUSH_GPU_TLB_NUM_REG_WAIT * 4 +
@@ -1733,6 +1616,67 @@ static void vcn_v4_0_5_set_irq_funcs(struct amdgpu_device *adev)
}
}
+static void vcn_v4_0_5_print_ip_state(void *handle, struct drm_printer *p)
+{
+ struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ int i, j;
+ uint32_t reg_count = ARRAY_SIZE(vcn_reg_list_4_0_5);
+ uint32_t inst_off, is_powered;
+
+ if (!adev->vcn.ip_dump)
+ return;
+
+ drm_printf(p, "num_instances:%d\n", adev->vcn.num_vcn_inst);
+ for (i = 0; i < adev->vcn.num_vcn_inst; i++) {
+ if (adev->vcn.harvest_config & (1 << i)) {
+ drm_printf(p, "\nHarvested Instance:VCN%d Skipping dump\n", i);
+ continue;
+ }
+
+ inst_off = i * reg_count;
+ is_powered = (adev->vcn.ip_dump[inst_off] &
+ UVD_POWER_STATUS__UVD_POWER_STATUS_MASK) != 1;
+
+ if (is_powered) {
+ drm_printf(p, "\nActive Instance:VCN%d\n", i);
+ for (j = 0; j < reg_count; j++)
+ drm_printf(p, "%-50s \t 0x%08x\n", vcn_reg_list_4_0_5[j].reg_name,
+ adev->vcn.ip_dump[inst_off + j]);
+ } else {
+ drm_printf(p, "\nInactive Instance:VCN%d\n", i);
+ }
+ }
+}
+
+static void vcn_v4_0_5_dump_ip_state(void *handle)
+{
+ struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ int i, j;
+ bool is_powered;
+ uint32_t inst_off;
+ uint32_t reg_count = ARRAY_SIZE(vcn_reg_list_4_0_5);
+
+ if (!adev->vcn.ip_dump)
+ return;
+
+ for (i = 0; i < adev->vcn.num_vcn_inst; i++) {
+ if (adev->vcn.harvest_config & (1 << i))
+ continue;
+
+ inst_off = i * reg_count;
+ /* mmUVD_POWER_STATUS is always readable and is first element of the array */
+ adev->vcn.ip_dump[inst_off] = RREG32_SOC15(VCN, i, regUVD_POWER_STATUS);
+ is_powered = (adev->vcn.ip_dump[inst_off] &
+ UVD_POWER_STATUS__UVD_POWER_STATUS_MASK) != 1;
+
+ if (is_powered)
+ for (j = 1; j < reg_count; j++)
+ adev->vcn.ip_dump[inst_off + j] =
+ RREG32(SOC15_REG_ENTRY_OFFSET_INST(vcn_reg_list_4_0_5[j],
+ i));
+ }
+}
+
static const struct amd_ip_funcs vcn_v4_0_5_ip_funcs = {
.name = "vcn_v4_0_5",
.early_init = vcn_v4_0_5_early_init,
@@ -1751,8 +1695,8 @@ static const struct amd_ip_funcs vcn_v4_0_5_ip_funcs = {
.post_soft_reset = NULL,
.set_clockgating_state = vcn_v4_0_5_set_clockgating_state,
.set_powergating_state = vcn_v4_0_5_set_powergating_state,
- .dump_ip_state = NULL,
- .print_ip_state = NULL,
+ .dump_ip_state = vcn_v4_0_5_dump_ip_state,
+ .print_ip_state = vcn_v4_0_5_print_ip_state,
};
const struct amdgpu_ip_block_version vcn_v4_0_5_ip_block = {
diff --git a/drivers/gpu/drm/amd/amdgpu/vcn_v5_0_0.c b/drivers/gpu/drm/amd/amdgpu/vcn_v5_0_0.c
index 68c97fcd539b..c305386358b4 100644
--- a/drivers/gpu/drm/amd/amdgpu/vcn_v5_0_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/vcn_v5_0_0.c
@@ -37,6 +37,40 @@
#include <drm/drm_drv.h>
+static const struct amdgpu_hwip_reg_entry vcn_reg_list_5_0[] = {
+ SOC15_REG_ENTRY_STR(VCN, 0, regUVD_POWER_STATUS),
+ SOC15_REG_ENTRY_STR(VCN, 0, regUVD_STATUS),
+ SOC15_REG_ENTRY_STR(VCN, 0, regUVD_CONTEXT_ID),
+ SOC15_REG_ENTRY_STR(VCN, 0, regUVD_CONTEXT_ID2),
+ SOC15_REG_ENTRY_STR(VCN, 0, regUVD_GPCOM_VCPU_DATA0),
+ SOC15_REG_ENTRY_STR(VCN, 0, regUVD_GPCOM_VCPU_DATA1),
+ SOC15_REG_ENTRY_STR(VCN, 0, regUVD_GPCOM_VCPU_CMD),
+ SOC15_REG_ENTRY_STR(VCN, 0, regUVD_RB_BASE_HI),
+ SOC15_REG_ENTRY_STR(VCN, 0, regUVD_RB_BASE_LO),
+ SOC15_REG_ENTRY_STR(VCN, 0, regUVD_RB_BASE_HI2),
+ SOC15_REG_ENTRY_STR(VCN, 0, regUVD_RB_BASE_LO2),
+ SOC15_REG_ENTRY_STR(VCN, 0, regUVD_RB_BASE_HI3),
+ SOC15_REG_ENTRY_STR(VCN, 0, regUVD_RB_BASE_LO3),
+ SOC15_REG_ENTRY_STR(VCN, 0, regUVD_RB_BASE_HI4),
+ SOC15_REG_ENTRY_STR(VCN, 0, regUVD_RB_BASE_LO4),
+ SOC15_REG_ENTRY_STR(VCN, 0, regUVD_RB_RPTR),
+ SOC15_REG_ENTRY_STR(VCN, 0, regUVD_RB_WPTR),
+ SOC15_REG_ENTRY_STR(VCN, 0, regUVD_RB_RPTR2),
+ SOC15_REG_ENTRY_STR(VCN, 0, regUVD_RB_WPTR2),
+ SOC15_REG_ENTRY_STR(VCN, 0, regUVD_RB_RPTR3),
+ SOC15_REG_ENTRY_STR(VCN, 0, regUVD_RB_WPTR3),
+ SOC15_REG_ENTRY_STR(VCN, 0, regUVD_RB_RPTR4),
+ SOC15_REG_ENTRY_STR(VCN, 0, regUVD_RB_WPTR4),
+ SOC15_REG_ENTRY_STR(VCN, 0, regUVD_RB_SIZE),
+ SOC15_REG_ENTRY_STR(VCN, 0, regUVD_RB_SIZE2),
+ SOC15_REG_ENTRY_STR(VCN, 0, regUVD_RB_SIZE3),
+ SOC15_REG_ENTRY_STR(VCN, 0, regUVD_RB_SIZE4),
+ SOC15_REG_ENTRY_STR(VCN, 0, regUVD_DPG_LMA_CTL),
+ SOC15_REG_ENTRY_STR(VCN, 0, regUVD_DPG_LMA_DATA),
+ SOC15_REG_ENTRY_STR(VCN, 0, regUVD_DPG_LMA_MASK),
+ SOC15_REG_ENTRY_STR(VCN, 0, regUVD_DPG_PAUSE)
+};
+
static int amdgpu_ih_clientid_vcns[] = {
SOC15_IH_CLIENTID_VCN,
SOC15_IH_CLIENTID_VCN1
@@ -83,6 +117,8 @@ static int vcn_v5_0_0_sw_init(void *handle)
struct amdgpu_ring *ring;
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
int i, r;
+ uint32_t reg_count = ARRAY_SIZE(vcn_reg_list_5_0);
+ uint32_t *ptr;
r = amdgpu_vcn_sw_init(adev);
if (r)
@@ -137,6 +173,14 @@ static int vcn_v5_0_0_sw_init(void *handle)
if (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG)
adev->vcn.pause_dpg_mode = vcn_v5_0_0_pause_dpg_mode;
+ /* Allocate memory for VCN IP Dump buffer */
+ ptr = kcalloc(adev->vcn.num_vcn_inst * reg_count, sizeof(uint32_t), GFP_KERNEL);
+ if (!ptr) {
+ DRM_ERROR("Failed to allocate memory for VCN IP Dump\n");
+ adev->vcn.ip_dump = NULL;
+ } else {
+ adev->vcn.ip_dump = ptr;
+ }
return 0;
}
@@ -173,6 +217,8 @@ static int vcn_v5_0_0_sw_fini(void *handle)
r = amdgpu_vcn_sw_fini(adev);
+ kfree(adev->vcn.ip_dump);
+
return r;
}
@@ -1297,6 +1343,66 @@ static void vcn_v5_0_0_set_irq_funcs(struct amdgpu_device *adev)
}
}
+static void vcn_v5_0_print_ip_state(void *handle, struct drm_printer *p)
+{
+ struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ int i, j;
+ uint32_t reg_count = ARRAY_SIZE(vcn_reg_list_5_0);
+ uint32_t inst_off, is_powered;
+
+ if (!adev->vcn.ip_dump)
+ return;
+
+ drm_printf(p, "num_instances:%d\n", adev->vcn.num_vcn_inst);
+ for (i = 0; i < adev->vcn.num_vcn_inst; i++) {
+ if (adev->vcn.harvest_config & (1 << i)) {
+ drm_printf(p, "\nHarvested Instance:VCN%d Skipping dump\n", i);
+ continue;
+ }
+
+ inst_off = i * reg_count;
+ is_powered = (adev->vcn.ip_dump[inst_off] &
+ UVD_POWER_STATUS__UVD_POWER_STATUS_MASK) != 1;
+
+ if (is_powered) {
+ drm_printf(p, "\nActive Instance:VCN%d\n", i);
+ for (j = 0; j < reg_count; j++)
+ drm_printf(p, "%-50s \t 0x%08x\n", vcn_reg_list_5_0[j].reg_name,
+ adev->vcn.ip_dump[inst_off + j]);
+ } else {
+ drm_printf(p, "\nInactive Instance:VCN%d\n", i);
+ }
+ }
+}
+
+static void vcn_v5_0_dump_ip_state(void *handle)
+{
+ struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ int i, j;
+ bool is_powered;
+ uint32_t inst_off;
+ uint32_t reg_count = ARRAY_SIZE(vcn_reg_list_5_0);
+
+ if (!adev->vcn.ip_dump)
+ return;
+
+ for (i = 0; i < adev->vcn.num_vcn_inst; i++) {
+ if (adev->vcn.harvest_config & (1 << i))
+ continue;
+
+ inst_off = i * reg_count;
+ /* mmUVD_POWER_STATUS is always readable and is first element of the array */
+ adev->vcn.ip_dump[inst_off] = RREG32_SOC15(VCN, i, regUVD_POWER_STATUS);
+ is_powered = (adev->vcn.ip_dump[inst_off] &
+ UVD_POWER_STATUS__UVD_POWER_STATUS_MASK) != 1;
+
+ if (is_powered)
+ for (j = 1; j < reg_count; j++)
+ adev->vcn.ip_dump[inst_off + j] =
+ RREG32(SOC15_REG_ENTRY_OFFSET_INST(vcn_reg_list_5_0[j], i));
+ }
+}
+
static const struct amd_ip_funcs vcn_v5_0_0_ip_funcs = {
.name = "vcn_v5_0_0",
.early_init = vcn_v5_0_0_early_init,
@@ -1315,8 +1421,8 @@ static const struct amd_ip_funcs vcn_v5_0_0_ip_funcs = {
.post_soft_reset = NULL,
.set_clockgating_state = vcn_v5_0_0_set_clockgating_state,
.set_powergating_state = vcn_v5_0_0_set_powergating_state,
- .dump_ip_state = NULL,
- .print_ip_state = NULL,
+ .dump_ip_state = vcn_v5_0_dump_ip_state,
+ .print_ip_state = vcn_v5_0_print_ip_state,
};
const struct amdgpu_ip_block_version vcn_v5_0_0_ip_block = {
diff --git a/drivers/gpu/drm/amd/amdgpu/vid.h b/drivers/gpu/drm/amd/amdgpu/vid.h
index 80ce42aacc0c..b61f6b838ec2 100644
--- a/drivers/gpu/drm/amd/amdgpu/vid.h
+++ b/drivers/gpu/drm/amd/amdgpu/vid.h
@@ -246,6 +246,7 @@
* 1 - Stream
* 2 - Bypass
*/
+#define EOP_EXEC (1 << 28) /* For Trailing Fence */
#define DATA_SEL(x) ((x) << 29)
/* 0 - discard
* 1 - send low 32bit data
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c b/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c
index 32e5db509560..9044bdb38cf4 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c
@@ -36,7 +36,6 @@
#include <linux/mman.h>
#include <linux/ptrace.h>
#include <linux/dma-buf.h>
-#include <linux/fdtable.h>
#include <linux/processor.h>
#include "kfd_priv.h"
#include "kfd_device_queue_manager.h"
@@ -247,14 +246,15 @@ static int set_queue_properties_from_user(struct queue_properties *q_properties,
q_properties->priority = args->queue_priority;
q_properties->queue_address = args->ring_base_address;
q_properties->queue_size = args->ring_size;
- q_properties->read_ptr = (uint32_t *) args->read_pointer_address;
- q_properties->write_ptr = (uint32_t *) args->write_pointer_address;
+ q_properties->read_ptr = (void __user *)args->read_pointer_address;
+ q_properties->write_ptr = (void __user *)args->write_pointer_address;
q_properties->eop_ring_buffer_address = args->eop_buffer_address;
q_properties->eop_ring_buffer_size = args->eop_buffer_size;
q_properties->ctx_save_restore_area_address =
args->ctx_save_restore_address;
q_properties->ctx_save_restore_area_size = args->ctx_save_restore_size;
q_properties->ctl_stack_size = args->ctl_stack_size;
+ q_properties->sdma_engine_id = args->sdma_engine_id;
if (args->queue_type == KFD_IOC_QUEUE_TYPE_COMPUTE ||
args->queue_type == KFD_IOC_QUEUE_TYPE_COMPUTE_AQL)
q_properties->type = KFD_QUEUE_TYPE_COMPUTE;
@@ -262,6 +262,8 @@ static int set_queue_properties_from_user(struct queue_properties *q_properties,
q_properties->type = KFD_QUEUE_TYPE_SDMA;
else if (args->queue_type == KFD_IOC_QUEUE_TYPE_SDMA_XGMI)
q_properties->type = KFD_QUEUE_TYPE_SDMA_XGMI;
+ else if (args->queue_type == KFD_IOC_QUEUE_TYPE_SDMA_BY_ENG_ID)
+ q_properties->type = KFD_QUEUE_TYPE_SDMA_BY_ENG_ID;
else
return -ENOTSUPP;
@@ -306,7 +308,6 @@ static int kfd_ioctl_create_queue(struct file *filep, struct kfd_process *p,
struct kfd_process_device *pdd;
struct queue_properties q_properties;
uint32_t doorbell_offset_in_process = 0;
- struct amdgpu_bo *wptr_bo = NULL;
memset(&q_properties, 0, sizeof(struct queue_properties));
@@ -334,6 +335,18 @@ static int kfd_ioctl_create_queue(struct file *filep, struct kfd_process *p,
goto err_bind_process;
}
+ if (q_properties.type == KFD_QUEUE_TYPE_SDMA_BY_ENG_ID) {
+ int max_sdma_eng_id = kfd_get_num_sdma_engines(dev) +
+ kfd_get_num_xgmi_sdma_engines(dev) - 1;
+
+ if (q_properties.sdma_engine_id > max_sdma_eng_id) {
+ err = -EINVAL;
+ pr_err("sdma_engine_id %i exceeds maximum id of %i\n",
+ q_properties.sdma_engine_id, max_sdma_eng_id);
+ goto err_sdma_engine_id;
+ }
+ }
+
if (!pdd->qpd.proc_doorbells) {
err = kfd_alloc_process_doorbells(dev->kfd, pdd);
if (err) {
@@ -342,53 +355,17 @@ static int kfd_ioctl_create_queue(struct file *filep, struct kfd_process *p,
}
}
- /* Starting with GFX11, wptr BOs must be mapped to GART for MES to determine work
- * on unmapped queues for usermode queue oversubscription (no aggregated doorbell)
- */
- if (dev->kfd->shared_resources.enable_mes &&
- ((dev->adev->mes.sched_version & AMDGPU_MES_API_VERSION_MASK)
- >> AMDGPU_MES_API_VERSION_SHIFT) >= 2) {
- struct amdgpu_bo_va_mapping *wptr_mapping;
- struct amdgpu_vm *wptr_vm;
-
- wptr_vm = drm_priv_to_vm(pdd->drm_priv);
- err = amdgpu_bo_reserve(wptr_vm->root.bo, false);
- if (err)
- goto err_wptr_map_gart;
-
- wptr_mapping = amdgpu_vm_bo_lookup_mapping(
- wptr_vm, args->write_pointer_address >> PAGE_SHIFT);
- amdgpu_bo_unreserve(wptr_vm->root.bo);
- if (!wptr_mapping) {
- pr_err("Failed to lookup wptr bo\n");
- err = -EINVAL;
- goto err_wptr_map_gart;
- }
-
- wptr_bo = wptr_mapping->bo_va->base.bo;
- if (wptr_bo->tbo.base.size > PAGE_SIZE) {
- pr_err("Requested GART mapping for wptr bo larger than one page\n");
- err = -EINVAL;
- goto err_wptr_map_gart;
- }
- if (dev->adev != amdgpu_ttm_adev(wptr_bo->tbo.bdev)) {
- pr_err("Queue memory allocated to wrong device\n");
- err = -EINVAL;
- goto err_wptr_map_gart;
- }
-
- err = amdgpu_amdkfd_map_gtt_bo_to_gart(wptr_bo);
- if (err) {
- pr_err("Failed to map wptr bo to GART\n");
- goto err_wptr_map_gart;
- }
+ err = kfd_queue_acquire_buffers(pdd, &q_properties);
+ if (err) {
+ pr_debug("failed to acquire user queue buffers\n");
+ goto err_acquire_queue_buf;
}
pr_debug("Creating queue for PASID 0x%x on gpu 0x%x\n",
p->pasid,
dev->id);
- err = pqm_create_queue(&p->pqm, dev, filep, &q_properties, &queue_id, wptr_bo,
+ err = pqm_create_queue(&p->pqm, dev, filep, &q_properties, &queue_id,
NULL, NULL, NULL, &doorbell_offset_in_process);
if (err != 0)
goto err_create_queue;
@@ -422,9 +399,10 @@ static int kfd_ioctl_create_queue(struct file *filep, struct kfd_process *p,
return 0;
err_create_queue:
- if (wptr_bo)
- amdgpu_amdkfd_free_gtt_mem(dev->adev, wptr_bo);
-err_wptr_map_gart:
+ kfd_queue_unref_bo_vas(pdd, &q_properties);
+ kfd_queue_release_buffers(pdd, &q_properties);
+err_acquire_queue_buf:
+err_sdma_engine_id:
err_bind_process:
err_pdd:
mutex_unlock(&p->mutex);
@@ -1422,8 +1400,7 @@ static int kfd_ioctl_unmap_memory_from_gpu(struct file *filep,
err = amdgpu_amdkfd_gpuvm_unmap_memory_from_gpu(
peer_pdd->dev->adev, (struct kgd_mem *)mem, peer_pdd->drm_priv);
if (err) {
- pr_err("Failed to unmap from gpu %d/%d\n",
- i, args->n_devices);
+ pr_debug("Failed to unmap from gpu %d/%d\n", i, args->n_devices);
goto unmap_memory_from_gpu_failed;
}
args->n_success = i+1;
@@ -1857,7 +1834,8 @@ static uint32_t get_process_num_bos(struct kfd_process *p)
}
static int criu_get_prime_handle(struct kgd_mem *mem,
- int flags, u32 *shared_fd)
+ int flags, u32 *shared_fd,
+ struct file **file)
{
struct dma_buf *dmabuf;
int ret;
@@ -1868,13 +1846,14 @@ static int criu_get_prime_handle(struct kgd_mem *mem,
return ret;
}
- ret = dma_buf_fd(dmabuf, flags);
+ ret = get_unused_fd_flags(flags);
if (ret < 0) {
pr_err("dmabuf create fd failed, ret:%d\n", ret);
goto out_free_dmabuf;
}
*shared_fd = ret;
+ *file = dmabuf->file;
return 0;
out_free_dmabuf:
@@ -1882,6 +1861,25 @@ out_free_dmabuf:
return ret;
}
+static void commit_files(struct file **files,
+ struct kfd_criu_bo_bucket *bo_buckets,
+ unsigned int count,
+ int err)
+{
+ while (count--) {
+ struct file *file = files[count];
+
+ if (!file)
+ continue;
+ if (err) {
+ fput(file);
+ put_unused_fd(bo_buckets[count].dmabuf_fd);
+ } else {
+ fd_install(bo_buckets[count].dmabuf_fd, file);
+ }
+ }
+}
+
static int criu_checkpoint_bos(struct kfd_process *p,
uint32_t num_bos,
uint8_t __user *user_bos,
@@ -1890,6 +1888,7 @@ static int criu_checkpoint_bos(struct kfd_process *p,
{
struct kfd_criu_bo_bucket *bo_buckets;
struct kfd_criu_bo_priv_data *bo_privs;
+ struct file **files = NULL;
int ret = 0, pdd_index, bo_index = 0, id;
void *mem;
@@ -1903,6 +1902,12 @@ static int criu_checkpoint_bos(struct kfd_process *p,
goto exit;
}
+ files = kvzalloc(num_bos * sizeof(struct file *), GFP_KERNEL);
+ if (!files) {
+ ret = -ENOMEM;
+ goto exit;
+ }
+
for (pdd_index = 0; pdd_index < p->n_pdds; pdd_index++) {
struct kfd_process_device *pdd = p->pdds[pdd_index];
struct amdgpu_bo *dumper_bo;
@@ -1945,7 +1950,7 @@ static int criu_checkpoint_bos(struct kfd_process *p,
ret = criu_get_prime_handle(kgd_mem,
bo_bucket->alloc_flags &
KFD_IOC_ALLOC_MEM_FLAGS_WRITABLE ? DRM_RDWR : 0,
- &bo_bucket->dmabuf_fd);
+ &bo_bucket->dmabuf_fd, &files[bo_index]);
if (ret)
goto exit;
} else {
@@ -1963,7 +1968,7 @@ static int criu_checkpoint_bos(struct kfd_process *p,
bo_bucket->offset = amdgpu_bo_mmap_offset(dumper_bo);
for (i = 0; i < p->n_pdds; i++) {
- if (amdgpu_amdkfd_bo_mapped_to_dev(p->pdds[i]->dev->adev, kgd_mem))
+ if (amdgpu_amdkfd_bo_mapped_to_dev(p->pdds[i]->drm_priv, kgd_mem))
bo_priv->mapped_gpuids[dev_idx++] = p->pdds[i]->user_gpu_id;
}
@@ -1996,12 +2001,8 @@ static int criu_checkpoint_bos(struct kfd_process *p,
*priv_offset += num_bos * sizeof(*bo_privs);
exit:
- while (ret && bo_index--) {
- if (bo_buckets[bo_index].alloc_flags
- & (KFD_IOC_ALLOC_MEM_FLAGS_VRAM | KFD_IOC_ALLOC_MEM_FLAGS_GTT))
- close_fd(bo_buckets[bo_index].dmabuf_fd);
- }
-
+ commit_files(files, bo_buckets, bo_index, ret);
+ kvfree(files);
kvfree(bo_buckets);
kvfree(bo_privs);
return ret;
@@ -2353,7 +2354,8 @@ static int criu_restore_memory_of_gpu(struct kfd_process_device *pdd,
static int criu_restore_bo(struct kfd_process *p,
struct kfd_criu_bo_bucket *bo_bucket,
- struct kfd_criu_bo_priv_data *bo_priv)
+ struct kfd_criu_bo_priv_data *bo_priv,
+ struct file **file)
{
struct kfd_process_device *pdd;
struct kgd_mem *kgd_mem;
@@ -2405,7 +2407,7 @@ static int criu_restore_bo(struct kfd_process *p,
if (bo_bucket->alloc_flags
& (KFD_IOC_ALLOC_MEM_FLAGS_VRAM | KFD_IOC_ALLOC_MEM_FLAGS_GTT)) {
ret = criu_get_prime_handle(kgd_mem, DRM_RDWR,
- &bo_bucket->dmabuf_fd);
+ &bo_bucket->dmabuf_fd, file);
if (ret)
return ret;
} else {
@@ -2422,6 +2424,7 @@ static int criu_restore_bos(struct kfd_process *p,
{
struct kfd_criu_bo_bucket *bo_buckets = NULL;
struct kfd_criu_bo_priv_data *bo_privs = NULL;
+ struct file **files = NULL;
int ret = 0;
uint32_t i = 0;
@@ -2435,6 +2438,12 @@ static int criu_restore_bos(struct kfd_process *p,
if (!bo_buckets)
return -ENOMEM;
+ files = kvzalloc(args->num_bos * sizeof(struct file *), GFP_KERNEL);
+ if (!files) {
+ ret = -ENOMEM;
+ goto exit;
+ }
+
ret = copy_from_user(bo_buckets, (void __user *)args->bos,
args->num_bos * sizeof(*bo_buckets));
if (ret) {
@@ -2460,7 +2469,7 @@ static int criu_restore_bos(struct kfd_process *p,
/* Create and map new BOs */
for (; i < args->num_bos; i++) {
- ret = criu_restore_bo(p, &bo_buckets[i], &bo_privs[i]);
+ ret = criu_restore_bo(p, &bo_buckets[i], &bo_privs[i], &files[i]);
if (ret) {
pr_debug("Failed to restore BO[%d] ret%d\n", i, ret);
goto exit;
@@ -2475,11 +2484,8 @@ static int criu_restore_bos(struct kfd_process *p,
ret = -EFAULT;
exit:
- while (ret && i--) {
- if (bo_buckets[i].alloc_flags
- & (KFD_IOC_ALLOC_MEM_FLAGS_VRAM | KFD_IOC_ALLOC_MEM_FLAGS_GTT))
- close_fd(bo_buckets[i].dmabuf_fd);
- }
+ commit_files(files, bo_buckets, i, ret);
+ kvfree(files);
kvfree(bo_buckets);
kvfree(bo_privs);
return ret;
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_debug.c b/drivers/gpu/drm/amd/amdkfd/kfd_debug.c
index 34a282540c7e..312dfa84f29f 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_debug.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_debug.c
@@ -365,47 +365,47 @@ static int kfd_dbg_get_dev_watch_id(struct kfd_process_device *pdd, int *watch_i
*watch_id = KFD_DEBUGGER_INVALID_WATCH_POINT_ID;
- spin_lock(&pdd->dev->kfd->watch_points_lock);
+ spin_lock(&pdd->dev->watch_points_lock);
for (i = 0; i < MAX_WATCH_ADDRESSES; i++) {
/* device watchpoint in use so skip */
- if ((pdd->dev->kfd->alloc_watch_ids >> i) & 0x1)
+ if ((pdd->dev->alloc_watch_ids >> i) & 0x1)
continue;
pdd->alloc_watch_ids |= 0x1 << i;
- pdd->dev->kfd->alloc_watch_ids |= 0x1 << i;
+ pdd->dev->alloc_watch_ids |= 0x1 << i;
*watch_id = i;
- spin_unlock(&pdd->dev->kfd->watch_points_lock);
+ spin_unlock(&pdd->dev->watch_points_lock);
return 0;
}
- spin_unlock(&pdd->dev->kfd->watch_points_lock);
+ spin_unlock(&pdd->dev->watch_points_lock);
return -ENOMEM;
}
static void kfd_dbg_clear_dev_watch_id(struct kfd_process_device *pdd, int watch_id)
{
- spin_lock(&pdd->dev->kfd->watch_points_lock);
+ spin_lock(&pdd->dev->watch_points_lock);
/* process owns device watch point so safe to clear */
if ((pdd->alloc_watch_ids >> watch_id) & 0x1) {
pdd->alloc_watch_ids &= ~(0x1 << watch_id);
- pdd->dev->kfd->alloc_watch_ids &= ~(0x1 << watch_id);
+ pdd->dev->alloc_watch_ids &= ~(0x1 << watch_id);
}
- spin_unlock(&pdd->dev->kfd->watch_points_lock);
+ spin_unlock(&pdd->dev->watch_points_lock);
}
static bool kfd_dbg_owns_dev_watch_id(struct kfd_process_device *pdd, int watch_id)
{
bool owns_watch_id = false;
- spin_lock(&pdd->dev->kfd->watch_points_lock);
+ spin_lock(&pdd->dev->watch_points_lock);
owns_watch_id = watch_id < MAX_WATCH_ADDRESSES &&
((pdd->alloc_watch_ids >> watch_id) & 0x1);
- spin_unlock(&pdd->dev->kfd->watch_points_lock);
+ spin_unlock(&pdd->dev->watch_points_lock);
return owns_watch_id;
}
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_device.c b/drivers/gpu/drm/amd/amdkfd/kfd_device.c
index f4d20adaa068..fad1c8f2bc83 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_device.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_device.c
@@ -884,13 +884,14 @@ bool kgd2kfd_device_init(struct kfd_dev *kfd,
dev_err(kfd_device, "Error initializing KFD node\n");
goto node_init_error;
}
+
+ spin_lock_init(&node->watch_points_lock);
+
kfd->nodes[i] = node;
}
svm_range_set_max_pages(kfd->adev);
- spin_lock_init(&kfd->watch_points_lock);
-
kfd->init_complete = true;
dev_info(kfd_device, "added device %x:%x\n", kfd->adev->pdev->vendor,
kfd->adev->pdev->device);
@@ -907,7 +908,7 @@ node_alloc_error:
kfd_doorbell_error:
kfd_gtt_sa_fini(kfd);
kfd_gtt_sa_init_error:
- amdgpu_amdkfd_free_gtt_mem(kfd->adev, kfd->gtt_mem);
+ amdgpu_amdkfd_free_gtt_mem(kfd->adev, &kfd->gtt_mem);
alloc_gtt_mem_failure:
dev_err(kfd_device,
"device %x:%x NOT added due to errors\n",
@@ -925,7 +926,7 @@ void kgd2kfd_device_exit(struct kfd_dev *kfd)
kfd_doorbell_fini(kfd);
ida_destroy(&kfd->doorbell_ida);
kfd_gtt_sa_fini(kfd);
- amdgpu_amdkfd_free_gtt_mem(kfd->adev, kfd->gtt_mem);
+ amdgpu_amdkfd_free_gtt_mem(kfd->adev, &kfd->gtt_mem);
}
kfree(kfd);
@@ -1445,6 +1446,45 @@ void kgd2kfd_unlock_kfd(void)
mutex_unlock(&kfd_processes_mutex);
}
+int kgd2kfd_start_sched(struct kfd_dev *kfd, uint32_t node_id)
+{
+ struct kfd_node *node;
+ int ret;
+
+ if (!kfd->init_complete)
+ return 0;
+
+ if (node_id >= kfd->num_nodes) {
+ dev_warn(kfd->adev->dev, "Invalid node ID: %u exceeds %u\n",
+ node_id, kfd->num_nodes - 1);
+ return -EINVAL;
+ }
+ node = kfd->nodes[node_id];
+
+ ret = node->dqm->ops.unhalt(node->dqm);
+ if (ret)
+ dev_err(kfd_device, "Error in starting scheduler\n");
+
+ return ret;
+}
+
+int kgd2kfd_stop_sched(struct kfd_dev *kfd, uint32_t node_id)
+{
+ struct kfd_node *node;
+
+ if (!kfd->init_complete)
+ return 0;
+
+ if (node_id >= kfd->num_nodes) {
+ dev_warn(kfd->adev->dev, "Invalid node ID: %u exceeds %u\n",
+ node_id, kfd->num_nodes - 1);
+ return -EINVAL;
+ }
+
+ node = kfd->nodes[node_id];
+ return node->dqm->ops.halt(node->dqm);
+}
+
#if defined(CONFIG_DEBUG_FS)
/* This function will send a package to HIQ to hang the HWS
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c
index 4f48507418d2..648f40091aa3 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c
@@ -153,6 +153,20 @@ void program_sh_mem_settings(struct device_queue_manager *dqm,
static void kfd_hws_hang(struct device_queue_manager *dqm)
{
+ struct device_process_node *cur;
+ struct qcm_process_device *qpd;
+ struct queue *q;
+
+ /* Mark all device queues as reset. */
+ list_for_each_entry(cur, &dqm->queues, list) {
+ qpd = cur->qpd;
+ list_for_each_entry(q, &qpd->queues_list, list) {
+ struct kfd_process_device *pdd = qpd_to_pdd(qpd);
+
+ pdd->has_reset_queue = true;
+ }
+ }
+
/*
* Issue a GPU reset if HWS is unresponsive
*/
@@ -208,10 +222,8 @@ static int add_queue_mes(struct device_queue_manager *dqm, struct queue *q,
queue_input.mqd_addr = q->gart_mqd_addr;
queue_input.wptr_addr = (uint64_t)q->properties.write_ptr;
- if (q->wptr_bo) {
- wptr_addr_off = (uint64_t)q->properties.write_ptr & (PAGE_SIZE - 1);
- queue_input.wptr_mc_addr = amdgpu_bo_gpu_offset(q->wptr_bo) + wptr_addr_off;
- }
+ wptr_addr_off = (uint64_t)q->properties.write_ptr & (PAGE_SIZE - 1);
+ queue_input.wptr_mc_addr = amdgpu_bo_gpu_offset(q->properties.wptr_bo) + wptr_addr_off;
queue_input.is_kfd_process = 1;
queue_input.is_aql_queue = (q->properties.format == KFD_QUEUE_FORMAT_AQL);
@@ -307,6 +319,46 @@ static int remove_all_queues_mes(struct device_queue_manager *dqm)
return retval;
}
+static int suspend_all_queues_mes(struct device_queue_manager *dqm)
+{
+ struct amdgpu_device *adev = (struct amdgpu_device *)dqm->dev->adev;
+ int r = 0;
+
+ if (!down_read_trylock(&adev->reset_domain->sem))
+ return -EIO;
+
+ r = amdgpu_mes_suspend(adev);
+ up_read(&adev->reset_domain->sem);
+
+ if (r) {
+ dev_err(adev->dev, "failed to suspend gangs from MES\n");
+ dev_err(adev->dev, "MES might be in unrecoverable state, issue a GPU reset\n");
+ kfd_hws_hang(dqm);
+ }
+
+ return r;
+}
+
+static int resume_all_queues_mes(struct device_queue_manager *dqm)
+{
+ struct amdgpu_device *adev = (struct amdgpu_device *)dqm->dev->adev;
+ int r = 0;
+
+ if (!down_read_trylock(&adev->reset_domain->sem))
+ return -EIO;
+
+ r = amdgpu_mes_resume(adev);
+ up_read(&adev->reset_domain->sem);
+
+ if (r) {
+ dev_err(adev->dev, "failed to resume gangs from MES\n");
+ dev_err(adev->dev, "MES might be in unrecoverable state, issue a GPU reset\n");
+ kfd_hws_hang(dqm);
+ }
+
+ return r;
+}
+
static void increment_queue_count(struct device_queue_manager *dqm,
struct qcm_process_device *qpd,
struct queue *q)
@@ -880,6 +932,12 @@ static int update_queue(struct device_queue_manager *dqm, struct queue *q,
else if (prev_active)
retval = remove_queue_mes(dqm, q, &pdd->qpd);
+ /* queue is reset so inaccessable */
+ if (pdd->has_reset_queue) {
+ retval = -EACCES;
+ goto out_unlock;
+ }
+
if (retval) {
dev_err(dev, "unmap queue failed\n");
goto out_unlock;
@@ -1534,6 +1592,41 @@ static int allocate_sdma_queue(struct device_queue_manager *dqm,
q->sdma_id % kfd_get_num_xgmi_sdma_engines(dqm->dev);
q->properties.sdma_queue_id = q->sdma_id /
kfd_get_num_xgmi_sdma_engines(dqm->dev);
+ } else if (q->properties.type == KFD_QUEUE_TYPE_SDMA_BY_ENG_ID) {
+ int i, num_queues, num_engines, eng_offset = 0, start_engine;
+ bool free_bit_found = false, is_xgmi = false;
+
+ if (q->properties.sdma_engine_id < kfd_get_num_sdma_engines(dqm->dev)) {
+ num_queues = get_num_sdma_queues(dqm);
+ num_engines = kfd_get_num_sdma_engines(dqm->dev);
+ q->properties.type = KFD_QUEUE_TYPE_SDMA;
+ } else {
+ num_queues = get_num_xgmi_sdma_queues(dqm);
+ num_engines = kfd_get_num_xgmi_sdma_engines(dqm->dev);
+ eng_offset = kfd_get_num_sdma_engines(dqm->dev);
+ q->properties.type = KFD_QUEUE_TYPE_SDMA_XGMI;
+ is_xgmi = true;
+ }
+
+ /* Scan available bit based on target engine ID. */
+ start_engine = q->properties.sdma_engine_id - eng_offset;
+ for (i = start_engine; i < num_queues; i += num_engines) {
+
+ if (!test_bit(i, is_xgmi ? dqm->xgmi_sdma_bitmap : dqm->sdma_bitmap))
+ continue;
+
+ clear_bit(i, is_xgmi ? dqm->xgmi_sdma_bitmap : dqm->sdma_bitmap);
+ q->sdma_id = i;
+ q->properties.sdma_queue_id = q->sdma_id / num_engines;
+ free_bit_found = true;
+ break;
+ }
+
+ if (!free_bit_found) {
+ dev_err(dev, "No more SDMA queue to allocate for target ID %i\n",
+ q->properties.sdma_engine_id);
+ return -ENOMEM;
+ }
}
pr_debug("SDMA engine id: %d\n", q->properties.sdma_engine_id);
@@ -1626,10 +1719,64 @@ static int initialize_cpsch(struct device_queue_manager *dqm)
return 0;
}
+/* halt_cpsch:
+ * Unmap queues so the schedule doesn't continue remaining jobs in the queue.
+ * Then set dqm->sched_halt so queues don't map to runlist until unhalt_cpsch
+ * is called.
+ */
+static int halt_cpsch(struct device_queue_manager *dqm)
+{
+ int ret = 0;
+
+ dqm_lock(dqm);
+ if (!dqm->sched_running) {
+ dqm_unlock(dqm);
+ return 0;
+ }
+
+ WARN_ONCE(dqm->sched_halt, "Scheduling is already on halt\n");
+
+ if (!dqm->is_hws_hang) {
+ if (!dqm->dev->kfd->shared_resources.enable_mes)
+ ret = unmap_queues_cpsch(dqm,
+ KFD_UNMAP_QUEUES_FILTER_ALL_QUEUES, 0,
+ USE_DEFAULT_GRACE_PERIOD, false);
+ else
+ ret = remove_all_queues_mes(dqm);
+ }
+ dqm->sched_halt = true;
+ dqm_unlock(dqm);
+
+ return ret;
+}
+
+/* unhalt_cpsch
+ * Unset dqm->sched_halt and map queues back to runlist
+ */
+static int unhalt_cpsch(struct device_queue_manager *dqm)
+{
+ int ret = 0;
+
+ dqm_lock(dqm);
+ if (!dqm->sched_running || !dqm->sched_halt) {
+ WARN_ONCE(!dqm->sched_halt, "Scheduling is not on halt.\n");
+ dqm_unlock(dqm);
+ return 0;
+ }
+ dqm->sched_halt = false;
+ if (!dqm->dev->kfd->shared_resources.enable_mes)
+ ret = execute_queues_cpsch(dqm,
+ KFD_UNMAP_QUEUES_FILTER_DYNAMIC_QUEUES,
+ 0, USE_DEFAULT_GRACE_PERIOD);
+ dqm_unlock(dqm);
+
+ return ret;
+}
+
static int start_cpsch(struct device_queue_manager *dqm)
{
struct device *dev = dqm->dev->adev->dev;
- int retval;
+ int retval, num_hw_queue_slots;
retval = 0;
@@ -1682,9 +1829,24 @@ static int start_cpsch(struct device_queue_manager *dqm)
&dqm->wait_times);
}
+ /* setup per-queue reset detection buffer */
+ num_hw_queue_slots = dqm->dev->kfd->shared_resources.num_queue_per_pipe *
+ dqm->dev->kfd->shared_resources.num_pipe_per_mec *
+ NUM_XCC(dqm->dev->xcc_mask);
+
+ dqm->detect_hang_info_size = num_hw_queue_slots * sizeof(struct dqm_detect_hang_info);
+ dqm->detect_hang_info = kzalloc(dqm->detect_hang_info_size, GFP_KERNEL);
+
+ if (!dqm->detect_hang_info) {
+ retval = -ENOMEM;
+ goto fail_detect_hang_buffer;
+ }
+
dqm_unlock(dqm);
return 0;
+fail_detect_hang_buffer:
+ kfd_gtt_sa_free(dqm->dev, dqm->fence_mem);
fail_allocate_vidmem:
fail_set_sched_resources:
if (!dqm->dev->kfd->shared_resources.enable_mes)
@@ -1715,6 +1877,8 @@ static int stop_cpsch(struct device_queue_manager *dqm)
kfd_gtt_sa_free(dqm->dev, dqm->fence_mem);
if (!dqm->dev->kfd->shared_resources.enable_mes)
pm_uninit(&dqm->packet_mgr);
+ kfree(dqm->detect_hang_info);
+ dqm->detect_hang_info = NULL;
dqm_unlock(dqm);
return 0;
@@ -1786,7 +1950,8 @@ static int create_queue_cpsch(struct device_queue_manager *dqm, struct queue *q,
}
if (q->properties.type == KFD_QUEUE_TYPE_SDMA ||
- q->properties.type == KFD_QUEUE_TYPE_SDMA_XGMI) {
+ q->properties.type == KFD_QUEUE_TYPE_SDMA_XGMI ||
+ q->properties.type == KFD_QUEUE_TYPE_SDMA_BY_ENG_ID) {
dqm_lock(dqm);
retval = allocate_sdma_queue(dqm, q, qd ? &qd->sdma_id : NULL);
dqm_unlock(dqm);
@@ -1913,7 +2078,7 @@ static int map_queues_cpsch(struct device_queue_manager *dqm)
struct device *dev = dqm->dev->adev->dev;
int retval;
- if (!dqm->sched_running)
+ if (!dqm->sched_running || dqm->sched_halt)
return 0;
if (dqm->active_queue_count <= 0 || dqm->processes_count <= 0)
return 0;
@@ -1931,6 +2096,135 @@ static int map_queues_cpsch(struct device_queue_manager *dqm)
return retval;
}
+static void set_queue_as_reset(struct device_queue_manager *dqm, struct queue *q,
+ struct qcm_process_device *qpd)
+{
+ struct kfd_process_device *pdd = qpd_to_pdd(qpd);
+
+ dev_err(dqm->dev->adev->dev, "queue id 0x%0x at pasid 0x%0x is reset\n",
+ q->properties.queue_id, q->process->pasid);
+
+ pdd->has_reset_queue = true;
+ if (q->properties.is_active) {
+ q->properties.is_active = false;
+ decrement_queue_count(dqm, qpd, q);
+ }
+}
+
+static int detect_queue_hang(struct device_queue_manager *dqm)
+{
+ int i;
+
+ /* detect should be used only in dqm locked queue reset */
+ if (WARN_ON(dqm->detect_hang_count > 0))
+ return 0;
+
+ memset(dqm->detect_hang_info, 0, dqm->detect_hang_info_size);
+
+ for (i = 0; i < AMDGPU_MAX_QUEUES; ++i) {
+ uint32_t mec, pipe, queue;
+ int xcc_id;
+
+ mec = (i / dqm->dev->kfd->shared_resources.num_queue_per_pipe)
+ / dqm->dev->kfd->shared_resources.num_pipe_per_mec;
+
+ if (mec || !test_bit(i, dqm->dev->kfd->shared_resources.cp_queue_bitmap))
+ continue;
+
+ amdgpu_queue_mask_bit_to_mec_queue(dqm->dev->adev, i, &mec, &pipe, &queue);
+
+ for_each_inst(xcc_id, dqm->dev->xcc_mask) {
+ uint64_t queue_addr = dqm->dev->kfd2kgd->hqd_get_pq_addr(
+ dqm->dev->adev, pipe, queue, xcc_id);
+ struct dqm_detect_hang_info hang_info;
+
+ if (!queue_addr)
+ continue;
+
+ hang_info.pipe_id = pipe;
+ hang_info.queue_id = queue;
+ hang_info.xcc_id = xcc_id;
+ hang_info.queue_address = queue_addr;
+
+ dqm->detect_hang_info[dqm->detect_hang_count] = hang_info;
+ dqm->detect_hang_count++;
+ }
+ }
+
+ return dqm->detect_hang_count;
+}
+
+static struct queue *find_queue_by_address(struct device_queue_manager *dqm, uint64_t queue_address)
+{
+ struct device_process_node *cur;
+ struct qcm_process_device *qpd;
+ struct queue *q;
+
+ list_for_each_entry(cur, &dqm->queues, list) {
+ qpd = cur->qpd;
+ list_for_each_entry(q, &qpd->queues_list, list) {
+ if (queue_address == q->properties.queue_address)
+ return q;
+ }
+ }
+
+ return NULL;
+}
+
+/* only for compute queue */
+static int reset_queues_on_hws_hang(struct device_queue_manager *dqm)
+{
+ int r = 0, reset_count = 0, i;
+
+ if (!dqm->detect_hang_info || dqm->is_hws_hang)
+ return -EIO;
+
+ /* assume dqm locked. */
+ if (!detect_queue_hang(dqm))
+ return -ENOTRECOVERABLE;
+
+ for (i = 0; i < dqm->detect_hang_count; i++) {
+ struct dqm_detect_hang_info hang_info = dqm->detect_hang_info[i];
+ struct queue *q = find_queue_by_address(dqm, hang_info.queue_address);
+ struct kfd_process_device *pdd;
+ uint64_t queue_addr = 0;
+
+ if (!q) {
+ r = -ENOTRECOVERABLE;
+ goto reset_fail;
+ }
+
+ pdd = kfd_get_process_device_data(dqm->dev, q->process);
+ if (!pdd) {
+ r = -ENOTRECOVERABLE;
+ goto reset_fail;
+ }
+
+ queue_addr = dqm->dev->kfd2kgd->hqd_reset(dqm->dev->adev,
+ hang_info.pipe_id, hang_info.queue_id, hang_info.xcc_id,
+ KFD_UNMAP_LATENCY_MS);
+
+ /* either reset failed or we reset an unexpected queue. */
+ if (queue_addr != q->properties.queue_address) {
+ r = -ENOTRECOVERABLE;
+ goto reset_fail;
+ }
+
+ set_queue_as_reset(dqm, q, &pdd->qpd);
+ reset_count++;
+ }
+
+ if (reset_count == dqm->detect_hang_count)
+ kfd_signal_reset_event(dqm->dev);
+ else
+ r = -ENOTRECOVERABLE;
+
+reset_fail:
+ dqm->detect_hang_count = 0;
+
+ return r;
+}
+
/* dqm->lock mutex has to be locked before calling this function */
static int unmap_queues_cpsch(struct device_queue_manager *dqm,
enum kfd_unmap_queues_filter filter,
@@ -1981,11 +2275,14 @@ static int unmap_queues_cpsch(struct device_queue_manager *dqm,
*/
mqd_mgr = dqm->mqd_mgrs[KFD_MQD_TYPE_HIQ];
if (mqd_mgr->check_preemption_failed(mqd_mgr, dqm->packet_mgr.priv_queue->queue->mqd)) {
- while (halt_if_hws_hang)
- schedule();
- kfd_hws_hang(dqm);
- retval = -ETIME;
- goto out;
+ if (reset_queues_on_hws_hang(dqm)) {
+ while (halt_if_hws_hang)
+ schedule();
+ dqm->is_hws_hang = true;
+ kfd_hws_hang(dqm);
+ retval = -ETIME;
+ goto out;
+ }
}
/* We need to reset the grace period value for this device */
@@ -2004,8 +2301,7 @@ out:
}
/* only for compute queue */
-static int reset_queues_cpsch(struct device_queue_manager *dqm,
- uint16_t pasid)
+static int reset_queues_cpsch(struct device_queue_manager *dqm, uint16_t pasid)
{
int retval;
@@ -2111,10 +2407,9 @@ static int destroy_queue_cpsch(struct device_queue_manager *dqm,
pdd->sdma_past_activity_counter += sdma_val;
}
- list_del(&q->list);
- qpd->queue_count--;
if (q->properties.is_active) {
decrement_queue_count(dqm, qpd, q);
+ q->properties.is_active = false;
if (!dqm->dev->kfd->shared_resources.enable_mes) {
retval = execute_queues_cpsch(dqm,
KFD_UNMAP_QUEUES_FILTER_DYNAMIC_QUEUES, 0,
@@ -2125,6 +2420,8 @@ static int destroy_queue_cpsch(struct device_queue_manager *dqm,
retval = remove_queue_mes(dqm, q, qpd);
}
}
+ list_del(&q->list);
+ qpd->queue_count--;
/*
* Unconditionally decrement this counter, regardless of the queue's
@@ -2525,6 +2822,8 @@ struct device_queue_manager *device_queue_manager_init(struct kfd_node *dev)
dqm->ops.initialize = initialize_cpsch;
dqm->ops.start = start_cpsch;
dqm->ops.stop = stop_cpsch;
+ dqm->ops.halt = halt_cpsch;
+ dqm->ops.unhalt = unhalt_cpsch;
dqm->ops.destroy_queue = destroy_queue_cpsch;
dqm->ops.update_queue = update_queue;
dqm->ops.register_process = register_process;
@@ -2621,7 +2920,7 @@ static void deallocate_hiq_sdma_mqd(struct kfd_node *dev,
{
WARN(!mqd, "No hiq sdma mqd trunk to free");
- amdgpu_amdkfd_free_gtt_mem(dev->adev, mqd->gtt_mem);
+ amdgpu_amdkfd_free_gtt_mem(dev->adev, &mqd->gtt_mem);
}
void device_queue_manager_uninit(struct device_queue_manager *dqm)
@@ -2633,6 +2932,95 @@ void device_queue_manager_uninit(struct device_queue_manager *dqm)
kfree(dqm);
}
+int kfd_dqm_suspend_bad_queue_mes(struct kfd_node *knode, u32 pasid, u32 doorbell_id)
+{
+ struct kfd_process_device *pdd;
+ struct kfd_process *p = kfd_lookup_process_by_pasid(pasid);
+ struct device_queue_manager *dqm = knode->dqm;
+ struct device *dev = dqm->dev->adev->dev;
+ struct qcm_process_device *qpd;
+ struct queue *q = NULL;
+ int ret = 0;
+
+ if (!p)
+ return -EINVAL;
+
+ dqm_lock(dqm);
+
+ pdd = kfd_get_process_device_data(dqm->dev, p);
+ if (pdd) {
+ qpd = &pdd->qpd;
+
+ list_for_each_entry(q, &qpd->queues_list, list) {
+ if (q->doorbell_id == doorbell_id && q->properties.is_active) {
+ ret = suspend_all_queues_mes(dqm);
+ if (ret) {
+ dev_err(dev, "Suspending all queues failed");
+ goto out;
+ }
+
+ q->properties.is_evicted = true;
+ q->properties.is_active = false;
+ decrement_queue_count(dqm, qpd, q);
+
+ ret = remove_queue_mes(dqm, q, qpd);
+ if (ret) {
+ dev_err(dev, "Removing bad queue failed");
+ goto out;
+ }
+
+ ret = resume_all_queues_mes(dqm);
+ if (ret)
+ dev_err(dev, "Resuming all queues failed");
+
+ break;
+ }
+ }
+ }
+
+out:
+ dqm_unlock(dqm);
+ return ret;
+}
+
+static int kfd_dqm_evict_pasid_mes(struct device_queue_manager *dqm,
+ struct qcm_process_device *qpd)
+{
+ struct device *dev = dqm->dev->adev->dev;
+ int ret = 0;
+
+ /* Check if process is already evicted */
+ dqm_lock(dqm);
+ if (qpd->evicted) {
+ /* Increment the evicted count to make sure the
+ * process stays evicted before its terminated.
+ */
+ qpd->evicted++;
+ dqm_unlock(dqm);
+ goto out;
+ }
+ dqm_unlock(dqm);
+
+ ret = suspend_all_queues_mes(dqm);
+ if (ret) {
+ dev_err(dev, "Suspending all queues failed");
+ goto out;
+ }
+
+ ret = dqm->ops.evict_process_queues(dqm, qpd);
+ if (ret) {
+ dev_err(dev, "Evicting process queues failed");
+ goto out;
+ }
+
+ ret = resume_all_queues_mes(dqm);
+ if (ret)
+ dev_err(dev, "Resuming all queues failed");
+
+out:
+ return ret;
+}
+
int kfd_dqm_evict_pasid(struct device_queue_manager *dqm, u32 pasid)
{
struct kfd_process_device *pdd;
@@ -2643,8 +3031,13 @@ int kfd_dqm_evict_pasid(struct device_queue_manager *dqm, u32 pasid)
return -EINVAL;
WARN(debug_evictions, "Evicting pid %d", p->lead_thread->pid);
pdd = kfd_get_process_device_data(dqm->dev, p);
- if (pdd)
- ret = dqm->ops.evict_process_queues(dqm, &pdd->qpd);
+ if (pdd) {
+ if (dqm->dev->kfd->shared_resources.enable_mes)
+ ret = kfd_dqm_evict_pasid_mes(dqm, &pdd->qpd);
+ else
+ ret = dqm->ops.evict_process_queues(dqm, &pdd->qpd);
+ }
+
kfd_unref_process(p);
return ret;
@@ -3147,6 +3540,30 @@ int debug_refresh_runlist(struct device_queue_manager *dqm)
return debug_map_and_unlock(dqm);
}
+bool kfd_dqm_is_queue_in_process(struct device_queue_manager *dqm,
+ struct qcm_process_device *qpd,
+ int doorbell_off, u32 *queue_format)
+{
+ struct queue *q;
+ bool r = false;
+
+ if (!queue_format)
+ return r;
+
+ dqm_lock(dqm);
+
+ list_for_each_entry(q, &qpd->queues_list, list) {
+ if (q->properties.doorbell_off == doorbell_off) {
+ *queue_format = q->properties.format;
+ r = true;
+ goto out;
+ }
+ }
+
+out:
+ dqm_unlock(dqm);
+ return r;
+}
#if defined(CONFIG_DEBUG_FS)
static void seq_reg_dump(struct seq_file *m,
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.h b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.h
index 3b9b8eabaacc..09ab36f8e8c6 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.h
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.h
@@ -106,6 +106,12 @@ union GRBM_GFX_INDEX_BITS {
* @uninitialize: Destroys all the device queue manager resources allocated in
* initialize routine.
*
+ * @halt: This routine unmaps queues from runlist and set halt status to true
+ * so no more queues will be mapped to runlist until unhalt.
+ *
+ * @unhalt: This routine unset halt status to flase and maps queues back to
+ * runlist.
+ *
* @create_kernel_queue: Creates kernel queue. Used for debug queue.
*
* @destroy_kernel_queue: Destroys kernel queue. Used for debug queue.
@@ -153,6 +159,8 @@ struct device_queue_manager_ops {
int (*start)(struct device_queue_manager *dqm);
int (*stop)(struct device_queue_manager *dqm);
void (*uninitialize)(struct device_queue_manager *dqm);
+ int (*halt)(struct device_queue_manager *dqm);
+ int (*unhalt)(struct device_queue_manager *dqm);
int (*create_kernel_queue)(struct device_queue_manager *dqm,
struct kernel_queue *kq,
struct qcm_process_device *qpd);
@@ -210,6 +218,13 @@ struct device_queue_manager_asic_ops {
struct kfd_node *dev);
};
+struct dqm_detect_hang_info {
+ int pipe_id;
+ int queue_id;
+ int xcc_id;
+ uint64_t queue_address;
+};
+
/**
* struct device_queue_manager
*
@@ -257,6 +272,7 @@ struct device_queue_manager {
struct work_struct hw_exception_work;
struct kfd_mem_obj hiq_sdma_mqd;
bool sched_running;
+ bool sched_halt;
/* used for GFX 9.4.3 only */
uint32_t current_logical_xcc_start;
@@ -264,6 +280,11 @@ struct device_queue_manager {
uint32_t wait_times;
wait_queue_head_t destroy_wait;
+
+ /* for per-queue reset support */
+ struct dqm_detect_hang_info *detect_hang_info;
+ size_t detect_hang_info_size;
+ int detect_hang_count;
};
void device_queue_manager_init_cik(
@@ -303,6 +324,9 @@ void set_queue_snapshot_entry(struct queue *q,
int debug_lock_and_unmap(struct device_queue_manager *dqm);
int debug_map_and_unlock(struct device_queue_manager *dqm);
int debug_refresh_runlist(struct device_queue_manager *dqm);
+bool kfd_dqm_is_queue_in_process(struct device_queue_manager *dqm,
+ struct qcm_process_device *qpd,
+ int doorbell_off, u32 *queue_format);
static inline unsigned int get_sh_mem_bases_32(struct kfd_process_device *pdd)
{
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_events.c b/drivers/gpu/drm/amd/amdkfd/kfd_events.c
index 9b33d9d2c9ad..ea3792249209 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_events.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_events.c
@@ -31,6 +31,7 @@
#include <linux/memory.h>
#include "kfd_priv.h"
#include "kfd_events.h"
+#include "kfd_device_queue_manager.h"
#include <linux/device.h>
/*
@@ -1244,12 +1245,33 @@ void kfd_signal_reset_event(struct kfd_node *dev)
idx = srcu_read_lock(&kfd_processes_srcu);
hash_for_each_rcu(kfd_processes_table, temp, p, kfd_processes) {
int user_gpu_id = kfd_process_get_user_gpu_id(p, dev->id);
+ struct kfd_process_device *pdd = kfd_get_process_device_data(dev, p);
if (unlikely(user_gpu_id == -EINVAL)) {
WARN_ONCE(1, "Could not get user_gpu_id from dev->id:%x\n", dev->id);
continue;
}
+ if (unlikely(!pdd)) {
+ WARN_ONCE(1, "Could not get device data from pasid:0x%x\n", p->pasid);
+ continue;
+ }
+
+ if (dev->dqm->detect_hang_count && !pdd->has_reset_queue)
+ continue;
+
+ if (dev->dqm->detect_hang_count) {
+ struct amdgpu_task_info *ti;
+
+ ti = amdgpu_vm_get_task_info_pasid(dev->adev, p->pasid);
+ if (ti) {
+ dev_err(dev->adev->dev,
+ "Queues reset on process %s tid %d thread %s pid %d\n",
+ ti->process_name, ti->tgid, ti->task_name, ti->pid);
+ amdgpu_vm_put_task_info(ti);
+ }
+ }
+
rcu_read_lock();
id = KFD_FIRST_NONSIGNAL_EVENT_ID;
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_int_process_v10.c b/drivers/gpu/drm/amd/amdkfd/kfd_int_process_v10.c
index 8e0d0356e810..37b69fe0ede3 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_int_process_v10.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_int_process_v10.c
@@ -129,63 +129,6 @@ enum SQ_INTERRUPT_ERROR_TYPE {
KFD_DEBUG_CP_BAD_OP_ECODE_MASK) \
>> KFD_DEBUG_CP_BAD_OP_ECODE_SHIFT)
-static void event_interrupt_poison_consumption(struct kfd_node *dev,
- uint16_t pasid, uint16_t client_id)
-{
- enum amdgpu_ras_block block = 0;
- int old_poison, ret = -EINVAL;
- uint32_t reset = 0;
- struct kfd_process *p = kfd_lookup_process_by_pasid(pasid);
-
- if (!p)
- return;
-
- /* all queues of a process will be unmapped in one time */
- old_poison = atomic_cmpxchg(&p->poison, 0, 1);
- kfd_unref_process(p);
- if (old_poison)
- return;
-
- switch (client_id) {
- case SOC15_IH_CLIENTID_SE0SH:
- case SOC15_IH_CLIENTID_SE1SH:
- case SOC15_IH_CLIENTID_SE2SH:
- case SOC15_IH_CLIENTID_SE3SH:
- case SOC15_IH_CLIENTID_UTCL2:
- ret = kfd_dqm_evict_pasid(dev->dqm, pasid);
- block = AMDGPU_RAS_BLOCK__GFX;
- if (ret)
- reset = AMDGPU_RAS_GPU_RESET_MODE2_RESET;
- break;
- case SOC15_IH_CLIENTID_SDMA0:
- case SOC15_IH_CLIENTID_SDMA1:
- case SOC15_IH_CLIENTID_SDMA2:
- case SOC15_IH_CLIENTID_SDMA3:
- case SOC15_IH_CLIENTID_SDMA4:
- block = AMDGPU_RAS_BLOCK__SDMA;
- reset = AMDGPU_RAS_GPU_RESET_MODE2_RESET;
- break;
- default:
- break;
- }
-
- kfd_signal_poison_consumed_event(dev, pasid);
-
- /* resetting queue passes, do page retirement without gpu reset
- * resetting queue fails, fallback to gpu reset solution
- */
- if (!ret)
- dev_warn(dev->adev->dev,
- "RAS poison consumption, unmap queue flow succeeded: client id %d\n",
- client_id);
- else
- dev_warn(dev->adev->dev,
- "RAS poison consumption, fall back to gpu reset flow: client id %d\n",
- client_id);
-
- amdgpu_amdkfd_ras_poison_consumption_handler(dev->adev, block, reset);
-}
-
static bool event_interrupt_isr_v10(struct kfd_node *dev,
const uint32_t *ih_ring_entry,
uint32_t *patched_ihre,
@@ -332,11 +275,6 @@ static void event_interrupt_wq_v10(struct kfd_node *dev,
REG_GET_FIELD(context_id1, SQ_INTERRUPT_WORD_WAVE_CTXID1,
WGP_ID),
sq_intr_err_type);
- if (sq_intr_err_type != SQ_INTERRUPT_ERROR_TYPE_ILLEGAL_INST &&
- sq_intr_err_type != SQ_INTERRUPT_ERROR_TYPE_MEMVIOL) {
- event_interrupt_poison_consumption(dev, pasid, source_id);
- return;
- }
break;
default:
break;
@@ -362,38 +300,14 @@ static void event_interrupt_wq_v10(struct kfd_node *dev,
client_id == SOC15_IH_CLIENTID_SDMA7) {
if (source_id == SOC15_INTSRC_SDMA_TRAP) {
kfd_signal_event_interrupt(pasid, context_id0 & 0xfffffff, 28);
- } else if (source_id == SOC15_INTSRC_SDMA_ECC) {
- event_interrupt_poison_consumption(dev, pasid, source_id);
- return;
}
} else if (client_id == SOC15_IH_CLIENTID_VMC ||
client_id == SOC15_IH_CLIENTID_VMC1 ||
client_id == SOC15_IH_CLIENTID_UTCL2) {
struct kfd_vm_fault_info info = {0};
uint16_t ring_id = SOC15_RING_ID_FROM_IH_ENTRY(ih_ring_entry);
- uint32_t node_id = SOC15_NODEID_FROM_IH_ENTRY(ih_ring_entry);
- uint32_t vmid_type = SOC15_VMID_TYPE_FROM_IH_ENTRY(ih_ring_entry);
- int hub_inst = 0;
struct kfd_hsa_memory_exception_data exception_data;
- /* gfxhub */
- if (!vmid_type && dev->adev->gfx.funcs->ih_node_to_logical_xcc) {
- hub_inst = dev->adev->gfx.funcs->ih_node_to_logical_xcc(dev->adev,
- node_id);
- if (hub_inst < 0)
- hub_inst = 0;
- }
-
- /* mmhub */
- if (vmid_type && client_id == SOC15_IH_CLIENTID_VMC)
- hub_inst = node_id / 4;
-
- if (amdgpu_amdkfd_ras_query_utcl2_poison_status(dev->adev,
- hub_inst, vmid_type)) {
- event_interrupt_poison_consumption(dev, pasid, client_id);
- return;
- }
-
info.vmid = vmid;
info.mc_id = client_id;
info.page_addr = ih_ring_entry[4] |
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_int_process_v11.c b/drivers/gpu/drm/amd/amdkfd/kfd_int_process_v11.c
index f524a55eee11..b3f988b275a8 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_int_process_v11.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_int_process_v11.c
@@ -330,11 +330,14 @@ static void event_interrupt_wq_v11(struct kfd_node *dev,
if (source_id == SOC15_INTSRC_CP_END_OF_PIPE)
kfd_signal_event_interrupt(pasid, context_id0, 32);
else if (source_id == SOC15_INTSRC_CP_BAD_OPCODE &&
- KFD_DBG_EC_TYPE_IS_PACKET(KFD_CTXID0_CP_BAD_OP_ECODE(context_id0)))
- kfd_set_dbg_ev_from_interrupt(dev, pasid,
- KFD_CTXID0_DOORBELL_ID(context_id0),
+ KFD_DBG_EC_TYPE_IS_PACKET(KFD_CTXID0_CP_BAD_OP_ECODE(context_id0))) {
+ u32 doorbell_id = KFD_CTXID0_DOORBELL_ID(context_id0);
+
+ kfd_set_dbg_ev_from_interrupt(dev, pasid, doorbell_id,
KFD_EC_MASK(KFD_CTXID0_CP_BAD_OP_ECODE(context_id0)),
NULL, 0);
+ kfd_dqm_suspend_bad_queue_mes(dev, pasid, doorbell_id);
+ }
/* SDMA */
else if (source_id == SOC21_INTSRC_SDMA_TRAP)
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_int_process_v9.c b/drivers/gpu/drm/amd/amdkfd/kfd_int_process_v9.c
index a9c3580be8c9..d46a13156ee9 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_int_process_v9.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_int_process_v9.c
@@ -167,11 +167,23 @@ static void event_interrupt_poison_consumption_v9(struct kfd_node *dev,
case SOC15_IH_CLIENTID_SE3SH:
case SOC15_IH_CLIENTID_UTCL2:
block = AMDGPU_RAS_BLOCK__GFX;
- if (amdgpu_ip_version(dev->adev, GC_HWIP, 0) == IP_VERSION(9, 4, 3) ||
- amdgpu_ip_version(dev->adev, GC_HWIP, 0) == IP_VERSION(9, 4, 4))
- reset = AMDGPU_RAS_GPU_RESET_MODE1_RESET;
- else
+ if (amdgpu_ip_version(dev->adev, GC_HWIP, 0) == IP_VERSION(9, 4, 3)) {
+ /* driver mode-2 for gfx poison is only supported by
+ * pmfw 0x00557300 and onwards */
+ if (dev->adev->pm.fw_version < 0x00557300)
+ reset = AMDGPU_RAS_GPU_RESET_MODE1_RESET;
+ else
+ reset = AMDGPU_RAS_GPU_RESET_MODE2_RESET;
+ } else if (amdgpu_ip_version(dev->adev, GC_HWIP, 0) == IP_VERSION(9, 4, 4)) {
+ /* driver mode-2 for gfx poison is only supported by
+ * pmfw 0x05550C00 and onwards */
+ if (dev->adev->pm.fw_version < 0x05550C00)
+ reset = AMDGPU_RAS_GPU_RESET_MODE1_RESET;
+ else
+ reset = AMDGPU_RAS_GPU_RESET_MODE2_RESET;
+ } else {
reset = AMDGPU_RAS_GPU_RESET_MODE2_RESET;
+ }
break;
case SOC15_IH_CLIENTID_VMC:
case SOC15_IH_CLIENTID_VMC1:
@@ -184,11 +196,23 @@ static void event_interrupt_poison_consumption_v9(struct kfd_node *dev,
case SOC15_IH_CLIENTID_SDMA3:
case SOC15_IH_CLIENTID_SDMA4:
block = AMDGPU_RAS_BLOCK__SDMA;
- if (amdgpu_ip_version(dev->adev, GC_HWIP, 0) == IP_VERSION(9, 4, 3) ||
- amdgpu_ip_version(dev->adev, GC_HWIP, 0) == IP_VERSION(9, 4, 4))
- reset = AMDGPU_RAS_GPU_RESET_MODE1_RESET;
- else
+ if (amdgpu_ip_version(dev->adev, SDMA0_HWIP, 0) == IP_VERSION(4, 4, 2)) {
+ /* driver mode-2 for gfx poison is only supported by
+ * pmfw 0x00557300 and onwards */
+ if (dev->adev->pm.fw_version < 0x00557300)
+ reset = AMDGPU_RAS_GPU_RESET_MODE1_RESET;
+ else
+ reset = AMDGPU_RAS_GPU_RESET_MODE2_RESET;
+ } else if (amdgpu_ip_version(dev->adev, SDMA0_HWIP, 0) == IP_VERSION(4, 4, 5)) {
+ /* driver mode-2 for gfx poison is only supported by
+ * pmfw 0x05550C00 and onwards */
+ if (dev->adev->pm.fw_version < 0x05550C00)
+ reset = AMDGPU_RAS_GPU_RESET_MODE1_RESET;
+ else
+ reset = AMDGPU_RAS_GPU_RESET_MODE2_RESET;
+ } else {
reset = AMDGPU_RAS_GPU_RESET_MODE2_RESET;
+ }
break;
default:
dev_warn(dev->adev->dev,
@@ -431,25 +455,9 @@ static void event_interrupt_wq_v9(struct kfd_node *dev,
client_id == SOC15_IH_CLIENTID_UTCL2) {
struct kfd_vm_fault_info info = {0};
uint16_t ring_id = SOC15_RING_ID_FROM_IH_ENTRY(ih_ring_entry);
- uint32_t node_id = SOC15_NODEID_FROM_IH_ENTRY(ih_ring_entry);
- uint32_t vmid_type = SOC15_VMID_TYPE_FROM_IH_ENTRY(ih_ring_entry);
- int hub_inst = 0;
struct kfd_hsa_memory_exception_data exception_data;
- /* gfxhub */
- if (!vmid_type && dev->adev->gfx.funcs->ih_node_to_logical_xcc) {
- hub_inst = dev->adev->gfx.funcs->ih_node_to_logical_xcc(dev->adev,
- node_id);
- if (hub_inst < 0)
- hub_inst = 0;
- }
-
- /* mmhub */
- if (vmid_type && client_id == SOC15_IH_CLIENTID_VMC)
- hub_inst = node_id / 4;
-
- if (amdgpu_amdkfd_ras_query_utcl2_poison_status(dev->adev,
- hub_inst, vmid_type)) {
+ if (source_id == SOC15_INTSRC_VMC_UTCL2_POISON) {
event_interrupt_poison_consumption_v9(dev, pasid, client_id);
return;
}
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager.c b/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager.c
index 50a81da43ce1..d9ae854b6908 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager.c
@@ -225,7 +225,7 @@ void kfd_free_mqd_cp(struct mqd_manager *mm, void *mqd,
struct kfd_mem_obj *mqd_mem_obj)
{
if (mqd_mem_obj->gtt_mem) {
- amdgpu_amdkfd_free_gtt_mem(mm->dev->adev, mqd_mem_obj->gtt_mem);
+ amdgpu_amdkfd_free_gtt_mem(mm->dev->adev, &mqd_mem_obj->gtt_mem);
kfree(mqd_mem_obj);
} else {
kfd_gtt_sa_free(mm->dev, mqd_mem_obj);
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v12.c b/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v12.c
index d163d92a692f..2b72d5b4949b 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v12.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v12.c
@@ -341,6 +341,10 @@ static void update_mqd_sdma(struct mqd_manager *mm, void *mqd,
m->sdmax_rlcx_doorbell_offset =
q->doorbell_off << SDMA0_QUEUE0_DOORBELL_OFFSET__OFFSET__SHIFT;
+ m->sdmax_rlcx_sched_cntl = (amdgpu_sdma_phase_quantum
+ << SDMA0_QUEUE0_SCHEDULE_CNTL__CONTEXT_QUANTUM__SHIFT)
+ & SDMA0_QUEUE0_SCHEDULE_CNTL__CONTEXT_QUANTUM_MASK;
+
m->sdma_engine_id = q->sdma_engine_id;
m->sdma_queue_id = q->sdma_queue_id;
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v9.c b/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v9.c
index 66c73825c0a0..84e8ea3a8a0c 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v9.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v9.c
@@ -321,8 +321,11 @@ static void update_mqd(struct mqd_manager *mm, void *mqd,
static bool check_preemption_failed(struct mqd_manager *mm, void *mqd)
{
struct v9_mqd *m = (struct v9_mqd *)mqd;
+ uint32_t doorbell_id = m->queue_doorbell_id0;
- return kfd_check_hiq_mqd_doorbell_id(mm->dev, m->queue_doorbell_id0, 0);
+ m->queue_doorbell_id0 = 0;
+
+ return kfd_check_hiq_mqd_doorbell_id(mm->dev, doorbell_id, 0);
}
static int get_wave_state(struct mqd_manager *mm, void *mqd,
@@ -624,6 +627,7 @@ static bool check_preemption_failed_v9_4_3(struct mqd_manager *mm, void *mqd)
m = get_mqd(mqd + hiq_mqd_size * inst);
ret |= kfd_check_hiq_mqd_doorbell_id(mm->dev,
m->queue_doorbell_id0, inst);
+ m->queue_doorbell_id0 = 0;
++inst;
}
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_packet_manager_v9.c b/drivers/gpu/drm/amd/amdkfd/kfd_packet_manager_v9.c
index 00776f08351c..1f9f5bfeaf86 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_packet_manager_v9.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_packet_manager_v9.c
@@ -37,11 +37,14 @@ static int pm_map_process_v9(struct packet_manager *pm,
struct kfd_node *kfd = pm->dqm->dev;
struct kfd_process_device *pdd =
container_of(qpd, struct kfd_process_device, qpd);
+ struct amdgpu_device *adev = kfd->adev;
packet = (struct pm4_mes_map_process *)buffer;
memset(buffer, 0, sizeof(struct pm4_mes_map_process));
packet->header.u32All = pm_build_pm4_header(IT_MAP_PROCESS,
sizeof(struct pm4_mes_map_process));
+ if (adev->enforce_isolation[kfd->node_id])
+ packet->bitfields2.exec_cleaner_shader = 1;
packet->bitfields2.diq_enable = (qpd->is_debug) ? 1 : 0;
packet->bitfields2.process_quantum = 10;
packet->bitfields2.pasid = qpd->pqm->process->pasid;
@@ -89,14 +92,18 @@ static int pm_map_process_aldebaran(struct packet_manager *pm,
struct pm4_mes_map_process_aldebaran *packet;
uint64_t vm_page_table_base_addr = qpd->page_table_base;
struct kfd_dev *kfd = pm->dqm->dev->kfd;
+ struct kfd_node *knode = pm->dqm->dev;
struct kfd_process_device *pdd =
container_of(qpd, struct kfd_process_device, qpd);
int i;
+ struct amdgpu_device *adev = kfd->adev;
packet = (struct pm4_mes_map_process_aldebaran *)buffer;
memset(buffer, 0, sizeof(struct pm4_mes_map_process_aldebaran));
packet->header.u32All = pm_build_pm4_header(IT_MAP_PROCESS,
sizeof(struct pm4_mes_map_process_aldebaran));
+ if (adev->enforce_isolation[knode->node_id])
+ packet->bitfields2.exec_cleaner_shader = 1;
packet->bitfields2.diq_enable = (qpd->is_debug) ? 1 : 0;
packet->bitfields2.process_quantum = 10;
packet->bitfields2.pasid = qpd->pqm->process->pasid;
@@ -144,17 +151,22 @@ static int pm_runlist_v9(struct packet_manager *pm, uint32_t *buffer,
int concurrent_proc_cnt = 0;
struct kfd_node *kfd = pm->dqm->dev;
+ struct amdgpu_device *adev = kfd->adev;
/* Determine the number of processes to map together to HW:
* it can not exceed the number of VMIDs available to the
* scheduler, and it is determined by the smaller of the number
* of processes in the runlist and kfd module parameter
* hws_max_conc_proc.
+ * However, if enforce_isolation is set (toggle LDS/VGPRs/SGPRs
+ * cleaner between process switch), enable single-process mode
+ * in HWS.
* Note: the arbitration between the number of VMIDs and
* hws_max_conc_proc has been done in
* kgd2kfd_device_init().
*/
- concurrent_proc_cnt = min(pm->dqm->processes_count,
+ concurrent_proc_cnt = adev->enforce_isolation[kfd->node_id] ?
+ 1 : min(pm->dqm->processes_count,
kfd->max_proc_per_quantum);
packet = (struct pm4_mes_runlist *)buffer;
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_pm4_headers_ai.h b/drivers/gpu/drm/amd/amdkfd/kfd_pm4_headers_ai.h
index 8b6b2bd5c148..cd8611401a66 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_pm4_headers_ai.h
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_pm4_headers_ai.h
@@ -145,8 +145,9 @@ struct pm4_mes_map_process {
union {
struct {
- uint32_t pasid:16;
- uint32_t reserved1:2;
+ uint32_t pasid:16; /* 0 - 15 */
+ uint32_t reserved1:1; /* 16 */
+ uint32_t exec_cleaner_shader:1; /* 17 */
uint32_t debug_vmid:4;
uint32_t new_debug:1;
uint32_t reserved2:1;
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_pm4_headers_aldebaran.h b/drivers/gpu/drm/amd/amdkfd/kfd_pm4_headers_aldebaran.h
index 38f5cb6a222a..e0ed62c4ade0 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_pm4_headers_aldebaran.h
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_pm4_headers_aldebaran.h
@@ -37,7 +37,7 @@ struct pm4_mes_map_process_aldebaran {
struct {
uint32_t pasid:16; /* 0 - 15 */
uint32_t single_memops:1; /* 16 */
- uint32_t reserved1:1; /* 17 */
+ uint32_t exec_cleaner_shader:1; /* 17 */
uint32_t debug_vmid:4; /* 18 - 21 */
uint32_t new_debug:1; /* 22 */
uint32_t tmz:1; /* 23 */
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_priv.h b/drivers/gpu/drm/amd/amdkfd/kfd_priv.h
index 2b3ec92981e8..d6530febabad 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_priv.h
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_priv.h
@@ -310,6 +310,10 @@ struct kfd_node {
struct kfd_local_mem_info local_mem_info;
struct kfd_dev *kfd;
+
+ /* Track per device allocated watch points */
+ uint32_t alloc_watch_ids;
+ spinlock_t watch_points_lock;
};
struct kfd_dev {
@@ -362,10 +366,6 @@ struct kfd_dev {
struct kfd_node *nodes[MAX_KFD_NODES];
unsigned int num_nodes;
- /* Track per device allocated watch points */
- uint32_t alloc_watch_ids;
- spinlock_t watch_points_lock;
-
/* Kernel doorbells for KFD device */
struct amdgpu_bo *doorbells;
@@ -414,13 +414,16 @@ enum kfd_unmap_queues_filter {
* @KFD_QUEUE_TYPE_DIQ: DIQ queue type.
*
* @KFD_QUEUE_TYPE_SDMA_XGMI: Special SDMA queue for XGMI interface.
+ *
+ * @KFD_QUEUE_TYPE_SDMA_BY_ENG_ID: SDMA user mode queue with target SDMA engine ID.
*/
enum kfd_queue_type {
KFD_QUEUE_TYPE_COMPUTE,
KFD_QUEUE_TYPE_SDMA,
KFD_QUEUE_TYPE_HIQ,
KFD_QUEUE_TYPE_DIQ,
- KFD_QUEUE_TYPE_SDMA_XGMI
+ KFD_QUEUE_TYPE_SDMA_XGMI,
+ KFD_QUEUE_TYPE_SDMA_BY_ENG_ID
};
enum kfd_queue_format {
@@ -494,8 +497,8 @@ struct queue_properties {
uint64_t queue_size;
uint32_t priority;
uint32_t queue_percent;
- uint32_t *read_ptr;
- uint32_t *write_ptr;
+ void __user *read_ptr;
+ void __user *write_ptr;
void __iomem *doorbell_ptr;
uint32_t doorbell_off;
bool is_interop;
@@ -522,6 +525,12 @@ struct queue_properties {
uint64_t tba_addr;
uint64_t tma_addr;
uint64_t exception_status;
+
+ struct amdgpu_bo *wptr_bo;
+ struct amdgpu_bo *rptr_bo;
+ struct amdgpu_bo *ring_bo;
+ struct amdgpu_bo *eop_buf_bo;
+ struct amdgpu_bo *cwsr_bo;
};
#define QUEUE_IS_ACTIVE(q) ((q).queue_size > 0 && \
@@ -604,7 +613,7 @@ struct queue {
uint64_t gang_ctx_gpu_addr;
void *gang_ctx_cpu_ptr;
- struct amdgpu_bo *wptr_bo;
+ struct amdgpu_bo *wptr_bo_gart;
};
enum KFD_MQD_TYPE {
@@ -837,6 +846,9 @@ struct kfd_process_device {
void *proc_ctx_bo;
uint64_t proc_ctx_gpu_addr;
void *proc_ctx_cpu_ptr;
+
+ /* Tracks queue reset status */
+ bool has_reset_queue;
};
#define qpd_to_pdd(x) container_of(x, struct kfd_process_device, qpd)
@@ -854,6 +866,14 @@ struct svm_range_list {
struct delayed_work restore_work;
DECLARE_BITMAP(bitmap_supported, MAX_GPU_INSTANCE);
struct task_struct *faulting_task;
+ /* check point ts decides if page fault recovery need be dropped */
+ uint64_t checkpoint_ts[MAX_GPU_INSTANCE];
+
+ /* Default granularity to use in buffer migration
+ * and restoration of backing memory while handling
+ * recoverable page faults
+ */
+ uint8_t default_granularity;
};
/* Process data */
@@ -1284,6 +1304,15 @@ int init_queue(struct queue **q, const struct queue_properties *properties);
void uninit_queue(struct queue *q);
void print_queue_properties(struct queue_properties *q);
void print_queue(struct queue *q);
+int kfd_queue_buffer_get(struct amdgpu_vm *vm, void __user *addr, struct amdgpu_bo **pbo,
+ u64 expected_size);
+void kfd_queue_buffer_put(struct amdgpu_bo **bo);
+int kfd_queue_acquire_buffers(struct kfd_process_device *pdd, struct queue_properties *properties);
+int kfd_queue_release_buffers(struct kfd_process_device *pdd, struct queue_properties *properties);
+void kfd_queue_unref_bo_va(struct amdgpu_vm *vm, struct amdgpu_bo **bo);
+int kfd_queue_unref_bo_vas(struct kfd_process_device *pdd,
+ struct queue_properties *properties);
+void kfd_queue_ctx_save_restore_size(struct kfd_topology_device *dev);
struct mqd_manager *mqd_manager_init_cik(enum KFD_MQD_TYPE type,
struct kfd_node *dev);
@@ -1303,6 +1332,7 @@ struct kernel_queue *kernel_queue_init(struct kfd_node *dev,
enum kfd_queue_type type);
void kernel_queue_uninit(struct kernel_queue *kq);
int kfd_dqm_evict_pasid(struct device_queue_manager *dqm, u32 pasid);
+int kfd_dqm_suspend_bad_queue_mes(struct kfd_node *knode, u32 pasid, u32 doorbell_id);
/* Process Queue Manager */
struct process_queue_node {
@@ -1320,7 +1350,6 @@ int pqm_create_queue(struct process_queue_manager *pqm,
struct file *f,
struct queue_properties *properties,
unsigned int *qid,
- struct amdgpu_bo *wptr_bo,
const struct kfd_criu_queue_priv_data *q_data,
const void *restore_mqd,
const void *restore_ctl_stack,
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_process.c b/drivers/gpu/drm/amd/amdkfd/kfd_process.c
index 17e42161b015..d07acf1b2f93 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_process.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_process.c
@@ -270,6 +270,11 @@ static int kfd_get_cu_occupancy(struct attribute *attr, char *buffer)
struct kfd_node *dev = NULL;
struct kfd_process *proc = NULL;
struct kfd_process_device *pdd = NULL;
+ int i;
+ struct kfd_cu_occupancy cu_occupancy[AMDGPU_MAX_QUEUES];
+ u32 queue_format;
+
+ memset(cu_occupancy, 0x0, sizeof(cu_occupancy));
pdd = container_of(attr, struct kfd_process_device, attr_cu_occupancy);
dev = pdd->dev;
@@ -287,8 +292,29 @@ static int kfd_get_cu_occupancy(struct attribute *attr, char *buffer)
/* Collect wave count from device if it supports */
wave_cnt = 0;
max_waves_per_cu = 0;
- dev->kfd2kgd->get_cu_occupancy(dev->adev, proc->pasid, &wave_cnt,
- &max_waves_per_cu, 0);
+
+ /*
+ * For GFX 9.4.3, fetch the CU occupancy from the first XCC in the partition.
+ * For AQL queues, because of cooperative dispatch we multiply the wave count
+ * by number of XCCs in the partition to get the total wave counts across all
+ * XCCs in the partition.
+ * For PM4 queues, there is no cooperative dispatch so wave_cnt stay as it is.
+ */
+ dev->kfd2kgd->get_cu_occupancy(dev->adev, cu_occupancy,
+ &max_waves_per_cu, ffs(dev->xcc_mask) - 1);
+
+ for (i = 0; i < AMDGPU_MAX_QUEUES; i++) {
+ if (cu_occupancy[i].wave_cnt != 0 &&
+ kfd_dqm_is_queue_in_process(dev->dqm, &pdd->qpd,
+ cu_occupancy[i].doorbell_off,
+ &queue_format)) {
+ if (unlikely(queue_format == KFD_QUEUE_FORMAT_PM4))
+ wave_cnt += cu_occupancy[i].wave_cnt;
+ else
+ wave_cnt += (NUM_XCC(dev->xcc_mask) *
+ cu_occupancy[i].wave_cnt);
+ }
+ }
/* Translate wave count to number of compute units */
cu_cnt = (wave_cnt + (max_waves_per_cu - 1)) / max_waves_per_cu;
@@ -1048,7 +1074,7 @@ static void kfd_process_destroy_pdds(struct kfd_process *p)
if (pdd->dev->kfd->shared_resources.enable_mes)
amdgpu_amdkfd_free_gtt_mem(pdd->dev->adev,
- pdd->proc_ctx_bo);
+ &pdd->proc_ctx_bo);
/*
* before destroying pdd, make sure to report availability
* for auto suspend
@@ -1851,6 +1877,8 @@ int kfd_process_evict_queues(struct kfd_process *p, uint32_t trigger)
goto fail;
}
n_evicted++;
+
+ pdd->dev->dqm->is_hws_hang = false;
}
return r;
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c b/drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c
index 21f5a1fb3bf8..01b960b15274 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c
@@ -204,19 +204,23 @@ static void pqm_clean_queue_resource(struct process_queue_manager *pqm,
}
if (dev->kfd->shared_resources.enable_mes) {
- amdgpu_amdkfd_free_gtt_mem(dev->adev, pqn->q->gang_ctx_bo);
- if (pqn->q->wptr_bo)
- amdgpu_amdkfd_free_gtt_mem(dev->adev, pqn->q->wptr_bo);
+ amdgpu_amdkfd_free_gtt_mem(dev->adev, &pqn->q->gang_ctx_bo);
+ amdgpu_amdkfd_free_gtt_mem(dev->adev, (void **)&pqn->q->wptr_bo_gart);
}
}
void pqm_uninit(struct process_queue_manager *pqm)
{
struct process_queue_node *pqn, *next;
+ struct kfd_process_device *pdd;
list_for_each_entry_safe(pqn, next, &pqm->queues, process_queue_list) {
- if (pqn->q)
+ if (pqn->q) {
+ pdd = kfd_get_process_device_data(pqn->q->device, pqm->process);
+ kfd_queue_unref_bo_vas(pdd, &pqn->q->properties);
+ kfd_queue_release_buffers(pdd, &pqn->q->properties);
pqm_clean_queue_resource(pqm, pqn);
+ }
kfd_procfs_del_queue(pqn->q);
uninit_queue(pqn->q);
@@ -231,8 +235,7 @@ void pqm_uninit(struct process_queue_manager *pqm)
static int init_user_queue(struct process_queue_manager *pqm,
struct kfd_node *dev, struct queue **q,
struct queue_properties *q_properties,
- struct file *f, struct amdgpu_bo *wptr_bo,
- unsigned int qid)
+ struct file *f, unsigned int qid)
{
int retval;
@@ -263,12 +266,32 @@ static int init_user_queue(struct process_queue_manager *pqm,
goto cleanup;
}
memset((*q)->gang_ctx_cpu_ptr, 0, AMDGPU_MES_GANG_CTX_SIZE);
- (*q)->wptr_bo = wptr_bo;
+
+ /* Starting with GFX11, wptr BOs must be mapped to GART for MES to determine work
+ * on unmapped queues for usermode queue oversubscription (no aggregated doorbell)
+ */
+ if (((dev->adev->mes.sched_version & AMDGPU_MES_API_VERSION_MASK)
+ >> AMDGPU_MES_API_VERSION_SHIFT) >= 2) {
+ if (dev->adev != amdgpu_ttm_adev(q_properties->wptr_bo->tbo.bdev)) {
+ pr_err("Queue memory allocated to wrong device\n");
+ retval = -EINVAL;
+ goto free_gang_ctx_bo;
+ }
+
+ retval = amdgpu_amdkfd_map_gtt_bo_to_gart(q_properties->wptr_bo,
+ &(*q)->wptr_bo_gart);
+ if (retval) {
+ pr_err("Failed to map wptr bo to GART\n");
+ goto free_gang_ctx_bo;
+ }
+ }
}
pr_debug("PQM After init queue");
return 0;
+free_gang_ctx_bo:
+ amdgpu_amdkfd_free_gtt_mem(dev->adev, (*q)->gang_ctx_bo);
cleanup:
uninit_queue(*q);
*q = NULL;
@@ -280,7 +303,6 @@ int pqm_create_queue(struct process_queue_manager *pqm,
struct file *f,
struct queue_properties *properties,
unsigned int *qid,
- struct amdgpu_bo *wptr_bo,
const struct kfd_criu_queue_priv_data *q_data,
const void *restore_mqd,
const void *restore_ctl_stack,
@@ -345,13 +367,14 @@ int pqm_create_queue(struct process_queue_manager *pqm,
switch (type) {
case KFD_QUEUE_TYPE_SDMA:
case KFD_QUEUE_TYPE_SDMA_XGMI:
+ case KFD_QUEUE_TYPE_SDMA_BY_ENG_ID:
/* SDMA queues are always allocated statically no matter
* which scheduler mode is used. We also do not need to
* check whether a SDMA queue can be allocated here, because
* allocate_sdma_queue() in create_queue() has the
* corresponding check logic.
*/
- retval = init_user_queue(pqm, dev, &q, properties, f, wptr_bo, *qid);
+ retval = init_user_queue(pqm, dev, &q, properties, f, *qid);
if (retval != 0)
goto err_create_queue;
pqn->q = q;
@@ -372,7 +395,7 @@ int pqm_create_queue(struct process_queue_manager *pqm,
goto err_create_queue;
}
- retval = init_user_queue(pqm, dev, &q, properties, f, wptr_bo, *qid);
+ retval = init_user_queue(pqm, dev, &q, properties, f, *qid);
if (retval != 0)
goto err_create_queue;
pqn->q = q;
@@ -490,7 +513,10 @@ int pqm_destroy_queue(struct process_queue_manager *pqm, unsigned int qid)
}
if (pqn->q) {
- kfd_procfs_del_queue(pqn->q);
+ retval = kfd_queue_unref_bo_vas(pdd, &pqn->q->properties);
+ if (retval)
+ goto err_destroy_queue;
+
dqm = pqn->q->device->dqm;
retval = dqm->ops.destroy_queue(dqm, &pdd->qpd, pqn->q);
if (retval) {
@@ -500,7 +526,8 @@ int pqm_destroy_queue(struct process_queue_manager *pqm, unsigned int qid)
if (retval != -ETIME)
goto err_destroy_queue;
}
-
+ kfd_procfs_del_queue(pqn->q);
+ kfd_queue_release_buffers(pdd, &pqn->q->properties);
pqm_clean_queue_resource(pqm, pqn);
uninit_queue(pqn->q);
}
@@ -524,11 +551,42 @@ int pqm_update_queue_properties(struct process_queue_manager *pqm,
struct process_queue_node *pqn;
pqn = get_queue_by_qid(pqm, qid);
- if (!pqn) {
+ if (!pqn || !pqn->q) {
pr_debug("No queue %d exists for update operation\n", qid);
return -EFAULT;
}
+ /*
+ * Update with NULL ring address is used to disable the queue
+ */
+ if (p->queue_address && p->queue_size) {
+ struct kfd_process_device *pdd;
+ struct amdgpu_vm *vm;
+ struct queue *q = pqn->q;
+ int err;
+
+ pdd = kfd_get_process_device_data(q->device, q->process);
+ if (!pdd)
+ return -ENODEV;
+ vm = drm_priv_to_vm(pdd->drm_priv);
+ err = amdgpu_bo_reserve(vm->root.bo, false);
+ if (err)
+ return err;
+
+ if (kfd_queue_buffer_get(vm, (void *)p->queue_address, &p->ring_bo,
+ p->queue_size)) {
+ pr_debug("ring buf 0x%llx size 0x%llx not mapped on GPU\n",
+ p->queue_address, p->queue_size);
+ return -EFAULT;
+ }
+
+ kfd_queue_unref_bo_va(vm, &pqn->q->properties.ring_bo);
+ kfd_queue_buffer_put(&pqn->q->properties.ring_bo);
+ amdgpu_bo_unreserve(vm->root.bo);
+
+ pqn->q->properties.ring_bo = p->ring_bo;
+ }
+
pqn->q->properties.queue_address = p->queue_address;
pqn->q->properties.queue_size = p->queue_size;
pqn->q->properties.queue_percent = p->queue_percent;
@@ -971,7 +1029,7 @@ int kfd_criu_restore_queue(struct kfd_process *p,
print_queue_properties(&qp);
- ret = pqm_create_queue(&p->pqm, pdd->dev, NULL, &qp, &queue_id, NULL, q_data, mqd, ctl_stack,
+ ret = pqm_create_queue(&p->pqm, pdd->dev, NULL, &qp, &queue_id, q_data, mqd, ctl_stack,
NULL);
if (ret) {
pr_err("Failed to create new queue err:%d\n", ret);
@@ -988,6 +1046,7 @@ exit:
pr_debug("Queue id %d was restored successfully\n", queue_id);
kfree(q_data);
+ kfree(q_extra_data);
return ret;
}
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_queue.c b/drivers/gpu/drm/amd/amdkfd/kfd_queue.c
index 0f6992b1895c..ad29634f8b44 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_queue.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_queue.c
@@ -24,6 +24,8 @@
#include <linux/slab.h>
#include "kfd_priv.h"
+#include "kfd_topology.h"
+#include "kfd_svm.h"
void print_queue_properties(struct queue_properties *q)
{
@@ -82,3 +84,374 @@ void uninit_queue(struct queue *q)
{
kfree(q);
}
+
+#if IS_ENABLED(CONFIG_HSA_AMD_SVM)
+
+static int kfd_queue_buffer_svm_get(struct kfd_process_device *pdd, u64 addr, u64 size)
+{
+ struct kfd_process *p = pdd->process;
+ struct list_head update_list;
+ struct svm_range *prange;
+ int ret = -EINVAL;
+
+ INIT_LIST_HEAD(&update_list);
+ addr >>= PAGE_SHIFT;
+ size >>= PAGE_SHIFT;
+
+ mutex_lock(&p->svms.lock);
+
+ /*
+ * range may split to multiple svm pranges aligned to granularity boundaery.
+ */
+ while (size) {
+ uint32_t gpuid, gpuidx;
+ int r;
+
+ prange = svm_range_from_addr(&p->svms, addr, NULL);
+ if (!prange)
+ break;
+
+ if (!prange->mapped_to_gpu)
+ break;
+
+ r = kfd_process_gpuid_from_node(p, pdd->dev, &gpuid, &gpuidx);
+ if (r < 0)
+ break;
+ if (!test_bit(gpuidx, prange->bitmap_access) &&
+ !test_bit(gpuidx, prange->bitmap_aip))
+ break;
+
+ if (!(prange->flags & KFD_IOCTL_SVM_FLAG_GPU_ALWAYS_MAPPED))
+ break;
+
+ list_add(&prange->update_list, &update_list);
+
+ if (prange->last - prange->start + 1 >= size) {
+ size = 0;
+ break;
+ }
+
+ size -= prange->last - prange->start + 1;
+ addr += prange->last - prange->start + 1;
+ }
+ if (size) {
+ pr_debug("[0x%llx 0x%llx] not registered\n", addr, addr + size - 1);
+ goto out_unlock;
+ }
+
+ list_for_each_entry(prange, &update_list, update_list)
+ atomic_inc(&prange->queue_refcount);
+ ret = 0;
+
+out_unlock:
+ mutex_unlock(&p->svms.lock);
+ return ret;
+}
+
+static void kfd_queue_buffer_svm_put(struct kfd_process_device *pdd, u64 addr, u64 size)
+{
+ struct kfd_process *p = pdd->process;
+ struct svm_range *prange, *pchild;
+ struct interval_tree_node *node;
+ unsigned long last;
+
+ addr >>= PAGE_SHIFT;
+ last = addr + (size >> PAGE_SHIFT) - 1;
+
+ mutex_lock(&p->svms.lock);
+
+ node = interval_tree_iter_first(&p->svms.objects, addr, last);
+ while (node) {
+ struct interval_tree_node *next_node;
+ unsigned long next_start;
+
+ prange = container_of(node, struct svm_range, it_node);
+ next_node = interval_tree_iter_next(node, addr, last);
+ next_start = min(node->last, last) + 1;
+
+ if (atomic_add_unless(&prange->queue_refcount, -1, 0)) {
+ list_for_each_entry(pchild, &prange->child_list, child_list)
+ atomic_add_unless(&pchild->queue_refcount, -1, 0);
+ }
+
+ node = next_node;
+ addr = next_start;
+ }
+
+ mutex_unlock(&p->svms.lock);
+}
+#else
+
+static int kfd_queue_buffer_svm_get(struct kfd_process_device *pdd, u64 addr, u64 size)
+{
+ return -EINVAL;
+}
+
+static void kfd_queue_buffer_svm_put(struct kfd_process_device *pdd, u64 addr, u64 size)
+{
+}
+
+#endif
+
+int kfd_queue_buffer_get(struct amdgpu_vm *vm, void __user *addr, struct amdgpu_bo **pbo,
+ u64 expected_size)
+{
+ struct amdgpu_bo_va_mapping *mapping;
+ u64 user_addr;
+ u64 size;
+
+ user_addr = (u64)addr >> AMDGPU_GPU_PAGE_SHIFT;
+ size = expected_size >> AMDGPU_GPU_PAGE_SHIFT;
+
+ mapping = amdgpu_vm_bo_lookup_mapping(vm, user_addr);
+ if (!mapping)
+ goto out_err;
+
+ if (user_addr != mapping->start ||
+ (size != 0 && user_addr + size - 1 != mapping->last)) {
+ pr_debug("expected size 0x%llx not equal to mapping addr 0x%llx size 0x%llx\n",
+ expected_size, mapping->start << AMDGPU_GPU_PAGE_SHIFT,
+ (mapping->last - mapping->start + 1) << AMDGPU_GPU_PAGE_SHIFT);
+ goto out_err;
+ }
+
+ *pbo = amdgpu_bo_ref(mapping->bo_va->base.bo);
+ mapping->bo_va->queue_refcount++;
+ return 0;
+
+out_err:
+ *pbo = NULL;
+ return -EINVAL;
+}
+
+/* FIXME: remove this function, just call amdgpu_bo_unref directly */
+void kfd_queue_buffer_put(struct amdgpu_bo **bo)
+{
+ amdgpu_bo_unref(bo);
+}
+
+int kfd_queue_acquire_buffers(struct kfd_process_device *pdd, struct queue_properties *properties)
+{
+ struct kfd_topology_device *topo_dev;
+ struct amdgpu_vm *vm;
+ u32 total_cwsr_size;
+ int err;
+
+ topo_dev = kfd_topology_device_by_id(pdd->dev->id);
+ if (!topo_dev)
+ return -EINVAL;
+
+ vm = drm_priv_to_vm(pdd->drm_priv);
+ err = amdgpu_bo_reserve(vm->root.bo, false);
+ if (err)
+ return err;
+
+ err = kfd_queue_buffer_get(vm, properties->write_ptr, &properties->wptr_bo, PAGE_SIZE);
+ if (err)
+ goto out_err_unreserve;
+
+ err = kfd_queue_buffer_get(vm, properties->read_ptr, &properties->rptr_bo, PAGE_SIZE);
+ if (err)
+ goto out_err_unreserve;
+
+ err = kfd_queue_buffer_get(vm, (void *)properties->queue_address,
+ &properties->ring_bo, properties->queue_size);
+ if (err)
+ goto out_err_unreserve;
+
+ /* only compute queue requires EOP buffer and CWSR area */
+ if (properties->type != KFD_QUEUE_TYPE_COMPUTE)
+ goto out_unreserve;
+
+ /* EOP buffer is not required for all ASICs */
+ if (properties->eop_ring_buffer_address) {
+ if (properties->eop_ring_buffer_size != topo_dev->node_props.eop_buffer_size) {
+ pr_debug("queue eop bo size 0x%lx not equal to node eop buf size 0x%x\n",
+ properties->eop_buf_bo->tbo.base.size,
+ topo_dev->node_props.eop_buffer_size);
+ err = -EINVAL;
+ goto out_err_unreserve;
+ }
+ err = kfd_queue_buffer_get(vm, (void *)properties->eop_ring_buffer_address,
+ &properties->eop_buf_bo,
+ properties->eop_ring_buffer_size);
+ if (err)
+ goto out_err_unreserve;
+ }
+
+ if (properties->ctl_stack_size != topo_dev->node_props.ctl_stack_size) {
+ pr_debug("queue ctl stack size 0x%x not equal to node ctl stack size 0x%x\n",
+ properties->ctl_stack_size,
+ topo_dev->node_props.ctl_stack_size);
+ err = -EINVAL;
+ goto out_err_unreserve;
+ }
+
+ if (properties->ctx_save_restore_area_size != topo_dev->node_props.cwsr_size) {
+ pr_debug("queue cwsr size 0x%x not equal to node cwsr size 0x%x\n",
+ properties->ctx_save_restore_area_size,
+ topo_dev->node_props.cwsr_size);
+ err = -EINVAL;
+ goto out_err_unreserve;
+ }
+
+ total_cwsr_size = (topo_dev->node_props.cwsr_size + topo_dev->node_props.debug_memory_size)
+ * NUM_XCC(pdd->dev->xcc_mask);
+ total_cwsr_size = ALIGN(total_cwsr_size, PAGE_SIZE);
+
+ err = kfd_queue_buffer_get(vm, (void *)properties->ctx_save_restore_area_address,
+ &properties->cwsr_bo, total_cwsr_size);
+ if (!err)
+ goto out_unreserve;
+
+ amdgpu_bo_unreserve(vm->root.bo);
+
+ err = kfd_queue_buffer_svm_get(pdd, properties->ctx_save_restore_area_address,
+ total_cwsr_size);
+ if (err)
+ goto out_err_release;
+
+ return 0;
+
+out_unreserve:
+ amdgpu_bo_unreserve(vm->root.bo);
+ return 0;
+
+out_err_unreserve:
+ amdgpu_bo_unreserve(vm->root.bo);
+out_err_release:
+ /* FIXME: make a _locked version of this that can be called before
+ * dropping the VM reservation.
+ */
+ kfd_queue_unref_bo_vas(pdd, properties);
+ kfd_queue_release_buffers(pdd, properties);
+ return err;
+}
+
+int kfd_queue_release_buffers(struct kfd_process_device *pdd, struct queue_properties *properties)
+{
+ struct kfd_topology_device *topo_dev;
+ u32 total_cwsr_size;
+
+ kfd_queue_buffer_put(&properties->wptr_bo);
+ kfd_queue_buffer_put(&properties->rptr_bo);
+ kfd_queue_buffer_put(&properties->ring_bo);
+ kfd_queue_buffer_put(&properties->eop_buf_bo);
+ kfd_queue_buffer_put(&properties->cwsr_bo);
+
+ topo_dev = kfd_topology_device_by_id(pdd->dev->id);
+ if (!topo_dev)
+ return -EINVAL;
+ total_cwsr_size = (topo_dev->node_props.cwsr_size + topo_dev->node_props.debug_memory_size)
+ * NUM_XCC(pdd->dev->xcc_mask);
+ total_cwsr_size = ALIGN(total_cwsr_size, PAGE_SIZE);
+
+ kfd_queue_buffer_svm_put(pdd, properties->ctx_save_restore_area_address, total_cwsr_size);
+ return 0;
+}
+
+void kfd_queue_unref_bo_va(struct amdgpu_vm *vm, struct amdgpu_bo **bo)
+{
+ if (*bo) {
+ struct amdgpu_bo_va *bo_va;
+
+ bo_va = amdgpu_vm_bo_find(vm, *bo);
+ if (bo_va && bo_va->queue_refcount)
+ bo_va->queue_refcount--;
+ }
+}
+
+int kfd_queue_unref_bo_vas(struct kfd_process_device *pdd,
+ struct queue_properties *properties)
+{
+ struct amdgpu_vm *vm;
+ int err;
+
+ vm = drm_priv_to_vm(pdd->drm_priv);
+ err = amdgpu_bo_reserve(vm->root.bo, false);
+ if (err)
+ return err;
+
+ kfd_queue_unref_bo_va(vm, &properties->wptr_bo);
+ kfd_queue_unref_bo_va(vm, &properties->rptr_bo);
+ kfd_queue_unref_bo_va(vm, &properties->ring_bo);
+ kfd_queue_unref_bo_va(vm, &properties->eop_buf_bo);
+ kfd_queue_unref_bo_va(vm, &properties->cwsr_bo);
+
+ amdgpu_bo_unreserve(vm->root.bo);
+ return 0;
+}
+
+#define SGPR_SIZE_PER_CU 0x4000
+#define LDS_SIZE_PER_CU 0x10000
+#define HWREG_SIZE_PER_CU 0x1000
+#define DEBUGGER_BYTES_ALIGN 64
+#define DEBUGGER_BYTES_PER_WAVE 32
+
+static u32 kfd_get_vgpr_size_per_cu(u32 gfxv)
+{
+ u32 vgpr_size = 0x40000;
+
+ if ((gfxv / 100 * 100) == 90400 || /* GFX_VERSION_AQUA_VANJARAM */
+ gfxv == 90010 || /* GFX_VERSION_ALDEBARAN */
+ gfxv == 90008) /* GFX_VERSION_ARCTURUS */
+ vgpr_size = 0x80000;
+ else if (gfxv == 110000 || /* GFX_VERSION_PLUM_BONITO */
+ gfxv == 110001 || /* GFX_VERSION_WHEAT_NAS */
+ gfxv == 120000 || /* GFX_VERSION_GFX1200 */
+ gfxv == 120001) /* GFX_VERSION_GFX1201 */
+ vgpr_size = 0x60000;
+
+ return vgpr_size;
+}
+
+#define WG_CONTEXT_DATA_SIZE_PER_CU(gfxv) \
+ (kfd_get_vgpr_size_per_cu(gfxv) + SGPR_SIZE_PER_CU +\
+ LDS_SIZE_PER_CU + HWREG_SIZE_PER_CU)
+
+#define CNTL_STACK_BYTES_PER_WAVE(gfxv) \
+ ((gfxv) >= 100100 ? 12 : 8) /* GFX_VERSION_NAVI10*/
+
+#define SIZEOF_HSA_USER_CONTEXT_SAVE_AREA_HEADER 40
+
+void kfd_queue_ctx_save_restore_size(struct kfd_topology_device *dev)
+{
+ struct kfd_node_properties *props = &dev->node_props;
+ u32 gfxv = props->gfx_target_version;
+ u32 ctl_stack_size;
+ u32 wg_data_size;
+ u32 wave_num;
+ u32 cu_num;
+
+ if (gfxv < 80001) /* GFX_VERSION_CARRIZO */
+ return;
+
+ cu_num = props->simd_count / props->simd_per_cu / NUM_XCC(dev->gpu->xcc_mask);
+ wave_num = (gfxv < 100100) ? /* GFX_VERSION_NAVI10 */
+ min(cu_num * 40, props->array_count / props->simd_arrays_per_engine * 512)
+ : cu_num * 32;
+
+ wg_data_size = ALIGN(cu_num * WG_CONTEXT_DATA_SIZE_PER_CU(gfxv), PAGE_SIZE);
+ ctl_stack_size = wave_num * CNTL_STACK_BYTES_PER_WAVE(gfxv) + 8;
+ ctl_stack_size = ALIGN(SIZEOF_HSA_USER_CONTEXT_SAVE_AREA_HEADER + ctl_stack_size,
+ PAGE_SIZE);
+
+ if ((gfxv / 10000 * 10000) == 100000) {
+ /* HW design limits control stack size to 0x7000.
+ * This is insufficient for theoretical PM4 cases
+ * but sufficient for AQL, limited by SPI events.
+ */
+ ctl_stack_size = min(ctl_stack_size, 0x7000);
+ }
+
+ props->ctl_stack_size = ctl_stack_size;
+ props->debug_memory_size = ALIGN(wave_num * DEBUGGER_BYTES_PER_WAVE, DEBUGGER_BYTES_ALIGN);
+ props->cwsr_size = ctl_stack_size + wg_data_size;
+
+ if (gfxv == 80002) /* GFX_VERSION_TONGA */
+ props->eop_buffer_size = 0x8000;
+ else if ((gfxv / 100 * 100) == 90400) /* GFX_VERSION_AQUA_VANJARAM */
+ props->eop_buffer_size = 4096;
+ else if (gfxv >= 80000)
+ props->eop_buffer_size = 4096;
+}
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_smi_events.c b/drivers/gpu/drm/amd/amdkfd/kfd_smi_events.c
index ea6a8e43bd5b..de8b9abf7afc 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_smi_events.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_smi_events.c
@@ -235,17 +235,16 @@ void kfd_smi_event_update_gpu_reset(struct kfd_node *dev, bool post_reset,
amdgpu_reset_get_desc(reset_context, reset_cause,
sizeof(reset_cause));
- kfd_smi_event_add(0, dev, event, "%x %s\n",
- dev->reset_seq_num,
- reset_cause);
+ kfd_smi_event_add(0, dev, event, KFD_EVENT_FMT_UPDATE_GPU_RESET(
+ dev->reset_seq_num, reset_cause));
}
void kfd_smi_event_update_thermal_throttling(struct kfd_node *dev,
uint64_t throttle_bitmask)
{
- kfd_smi_event_add(0, dev, KFD_SMI_EVENT_THERMAL_THROTTLE, "%llx:%llx\n",
+ kfd_smi_event_add(0, dev, KFD_SMI_EVENT_THERMAL_THROTTLE, KFD_EVENT_FMT_THERMAL_THROTTLING(
throttle_bitmask,
- amdgpu_dpm_get_thermal_throttling_counter(dev->adev));
+ amdgpu_dpm_get_thermal_throttling_counter(dev->adev)));
}
void kfd_smi_event_update_vmfault(struct kfd_node *dev, uint16_t pasid)
@@ -256,8 +255,8 @@ void kfd_smi_event_update_vmfault(struct kfd_node *dev, uint16_t pasid)
if (task_info) {
/* Report VM faults from user applications, not retry from kernel */
if (task_info->pid)
- kfd_smi_event_add(0, dev, KFD_SMI_EVENT_VMFAULT, "%x:%s\n",
- task_info->pid, task_info->task_name);
+ kfd_smi_event_add(0, dev, KFD_SMI_EVENT_VMFAULT, KFD_EVENT_FMT_VMFAULT(
+ task_info->pid, task_info->task_name));
amdgpu_vm_put_task_info(task_info);
}
}
@@ -267,16 +266,16 @@ void kfd_smi_event_page_fault_start(struct kfd_node *node, pid_t pid,
ktime_t ts)
{
kfd_smi_event_add(pid, node, KFD_SMI_EVENT_PAGE_FAULT_START,
- "%lld -%d @%lx(%x) %c\n", ktime_to_ns(ts), pid,
- address, node->id, write_fault ? 'W' : 'R');
+ KFD_EVENT_FMT_PAGEFAULT_START(ktime_to_ns(ts), pid,
+ address, node->id, write_fault ? 'W' : 'R'));
}
void kfd_smi_event_page_fault_end(struct kfd_node *node, pid_t pid,
unsigned long address, bool migration)
{
kfd_smi_event_add(pid, node, KFD_SMI_EVENT_PAGE_FAULT_END,
- "%lld -%d @%lx(%x) %c\n", ktime_get_boottime_ns(),
- pid, address, node->id, migration ? 'M' : 'U');
+ KFD_EVENT_FMT_PAGEFAULT_END(ktime_get_boottime_ns(),
+ pid, address, node->id, migration ? 'M' : 'U'));
}
void kfd_smi_event_migration_start(struct kfd_node *node, pid_t pid,
@@ -286,9 +285,9 @@ void kfd_smi_event_migration_start(struct kfd_node *node, pid_t pid,
uint32_t trigger)
{
kfd_smi_event_add(pid, node, KFD_SMI_EVENT_MIGRATE_START,
- "%lld -%d @%lx(%lx) %x->%x %x:%x %d\n",
+ KFD_EVENT_FMT_MIGRATE_START(
ktime_get_boottime_ns(), pid, start, end - start,
- from, to, prefetch_loc, preferred_loc, trigger);
+ from, to, prefetch_loc, preferred_loc, trigger));
}
void kfd_smi_event_migration_end(struct kfd_node *node, pid_t pid,
@@ -296,24 +295,24 @@ void kfd_smi_event_migration_end(struct kfd_node *node, pid_t pid,
uint32_t from, uint32_t to, uint32_t trigger)
{
kfd_smi_event_add(pid, node, KFD_SMI_EVENT_MIGRATE_END,
- "%lld -%d @%lx(%lx) %x->%x %d\n",
+ KFD_EVENT_FMT_MIGRATE_END(
ktime_get_boottime_ns(), pid, start, end - start,
- from, to, trigger);
+ from, to, trigger));
}
void kfd_smi_event_queue_eviction(struct kfd_node *node, pid_t pid,
uint32_t trigger)
{
kfd_smi_event_add(pid, node, KFD_SMI_EVENT_QUEUE_EVICTION,
- "%lld -%d %x %d\n", ktime_get_boottime_ns(), pid,
- node->id, trigger);
+ KFD_EVENT_FMT_QUEUE_EVICTION(ktime_get_boottime_ns(), pid,
+ node->id, trigger));
}
void kfd_smi_event_queue_restore(struct kfd_node *node, pid_t pid)
{
kfd_smi_event_add(pid, node, KFD_SMI_EVENT_QUEUE_RESTORE,
- "%lld -%d %x\n", ktime_get_boottime_ns(), pid,
- node->id);
+ KFD_EVENT_FMT_QUEUE_RESTORE(ktime_get_boottime_ns(), pid,
+ node->id, 0));
}
void kfd_smi_event_queue_restore_rescheduled(struct mm_struct *mm)
@@ -330,8 +329,8 @@ void kfd_smi_event_queue_restore_rescheduled(struct mm_struct *mm)
kfd_smi_event_add(p->lead_thread->pid, pdd->dev,
KFD_SMI_EVENT_QUEUE_RESTORE,
- "%lld -%d %x %c\n", ktime_get_boottime_ns(),
- p->lead_thread->pid, pdd->dev->id, 'R');
+ KFD_EVENT_FMT_QUEUE_RESTORE(ktime_get_boottime_ns(),
+ p->lead_thread->pid, pdd->dev->id, 'R'));
}
kfd_unref_process(p);
}
@@ -341,8 +340,8 @@ void kfd_smi_event_unmap_from_gpu(struct kfd_node *node, pid_t pid,
uint32_t trigger)
{
kfd_smi_event_add(pid, node, KFD_SMI_EVENT_UNMAP_FROM_GPU,
- "%lld -%d @%lx(%lx) %x %d\n", ktime_get_boottime_ns(),
- pid, address, last - address + 1, node->id, trigger);
+ KFD_EVENT_FMT_UNMAP_FROM_GPU(ktime_get_boottime_ns(),
+ pid, address, last - address + 1, node->id, trigger));
}
int kfd_smi_event_open(struct kfd_node *dev, uint32_t *fd)
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_svm.c b/drivers/gpu/drm/amd/amdkfd/kfd_svm.c
index bd9c2921e0dc..04e746923697 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_svm.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_svm.c
@@ -309,12 +309,13 @@ static void svm_range_free(struct svm_range *prange, bool do_unmap)
}
static void
-svm_range_set_default_attributes(int32_t *location, int32_t *prefetch_loc,
- uint8_t *granularity, uint32_t *flags)
+svm_range_set_default_attributes(struct svm_range_list *svms, int32_t *location,
+ int32_t *prefetch_loc, uint8_t *granularity,
+ uint32_t *flags)
{
*location = KFD_IOCTL_SVM_LOCATION_UNDEFINED;
*prefetch_loc = KFD_IOCTL_SVM_LOCATION_UNDEFINED;
- *granularity = 9;
+ *granularity = svms->default_granularity;
*flags =
KFD_IOCTL_SVM_FLAG_HOST_ACCESS | KFD_IOCTL_SVM_FLAG_COHERENT;
}
@@ -358,7 +359,7 @@ svm_range *svm_range_new(struct svm_range_list *svms, uint64_t start,
bitmap_copy(prange->bitmap_access, svms->bitmap_supported,
MAX_GPU_INSTANCE);
- svm_range_set_default_attributes(&prange->preferred_loc,
+ svm_range_set_default_attributes(svms, &prange->preferred_loc,
&prange->prefetch_loc,
&prange->granularity, &prange->flags);
@@ -1051,6 +1052,7 @@ svm_range_split_adjust(struct svm_range *new, struct svm_range *old,
new->mapped_to_gpu = old->mapped_to_gpu;
bitmap_copy(new->bitmap_access, old->bitmap_access, MAX_GPU_INSTANCE);
bitmap_copy(new->bitmap_aip, old->bitmap_aip, MAX_GPU_INSTANCE);
+ atomic_set(&new->queue_refcount, atomic_read(&old->queue_refcount));
return 0;
}
@@ -1992,6 +1994,7 @@ static struct svm_range *svm_range_clone(struct svm_range *old)
new->vram_pages = old->vram_pages;
bitmap_copy(new->bitmap_access, old->bitmap_access, MAX_GPU_INSTANCE);
bitmap_copy(new->bitmap_aip, old->bitmap_aip, MAX_GPU_INSTANCE);
+ atomic_set(&new->queue_refcount, atomic_read(&old->queue_refcount));
return new;
}
@@ -2260,16 +2263,10 @@ static void svm_range_drain_retry_fault(struct svm_range_list *svms)
{
struct kfd_process_device *pdd;
struct kfd_process *p;
- int drain;
uint32_t i;
p = container_of(svms, struct kfd_process, svms);
-restart:
- drain = atomic_read(&svms->drain_pagefaults);
- if (!drain)
- return;
-
for_each_set_bit(i, svms->bitmap_supported, p->n_pdds) {
pdd = p->pdds[i];
if (!pdd)
@@ -2289,8 +2286,6 @@ restart:
pr_debug("drain retry fault gpu %d svms 0x%p done\n", i, svms);
}
- if (atomic_cmpxchg(&svms->drain_pagefaults, drain, 0) != drain)
- goto restart;
}
static void svm_range_deferred_list_work(struct work_struct *work)
@@ -2312,17 +2307,8 @@ static void svm_range_deferred_list_work(struct work_struct *work)
prange->start, prange->last, prange->work_item.op);
mm = prange->work_item.mm;
-retry:
- mmap_write_lock(mm);
- /* Checking for the need to drain retry faults must be inside
- * mmap write lock to serialize with munmap notifiers.
- */
- if (unlikely(atomic_read(&svms->drain_pagefaults))) {
- mmap_write_unlock(mm);
- svm_range_drain_retry_fault(svms);
- goto retry;
- }
+ mmap_write_lock(mm);
/* Remove from deferred_list must be inside mmap write lock, for
* two race cases:
@@ -2443,6 +2429,17 @@ svm_range_unmap_from_cpu(struct mm_struct *mm, struct svm_range *prange,
struct kfd_process *p;
unsigned long s, l;
bool unmap_parent;
+ uint32_t i;
+
+ if (atomic_read(&prange->queue_refcount)) {
+ int r;
+
+ pr_warn("Freeing queue vital buffer 0x%lx, queue evicted\n",
+ prange->start << PAGE_SHIFT);
+ r = kgd2kfd_quiesce_mm(mm, KFD_QUEUE_EVICTION_TRIGGER_SVM);
+ if (r)
+ pr_debug("failed %d to quiesce KFD queues\n", r);
+ }
p = kfd_lookup_process_by_mm(mm);
if (!p)
@@ -2452,11 +2449,38 @@ svm_range_unmap_from_cpu(struct mm_struct *mm, struct svm_range *prange,
pr_debug("svms 0x%p prange 0x%p [0x%lx 0x%lx] [0x%lx 0x%lx]\n", svms,
prange, prange->start, prange->last, start, last);
- /* Make sure pending page faults are drained in the deferred worker
- * before the range is freed to avoid straggler interrupts on
- * unmapped memory causing "phantom faults".
+ /* calculate time stamps that are used to decide which page faults need be
+ * dropped or handled before unmap pages from gpu vm
*/
- atomic_inc(&svms->drain_pagefaults);
+ for_each_set_bit(i, svms->bitmap_supported, p->n_pdds) {
+ struct kfd_process_device *pdd;
+ struct amdgpu_device *adev;
+ struct amdgpu_ih_ring *ih;
+ uint32_t checkpoint_wptr;
+
+ pdd = p->pdds[i];
+ if (!pdd)
+ continue;
+
+ adev = pdd->dev->adev;
+
+ /* Check and drain ih1 ring if cam not available */
+ if (adev->irq.ih1.ring_size) {
+ ih = &adev->irq.ih1;
+ checkpoint_wptr = amdgpu_ih_get_wptr(adev, ih);
+ if (ih->rptr != checkpoint_wptr) {
+ svms->checkpoint_ts[i] =
+ amdgpu_ih_decode_iv_ts(adev, ih, checkpoint_wptr, -1);
+ continue;
+ }
+ }
+
+ /* check if dev->irq.ih_soft is not empty */
+ ih = &adev->irq.ih_soft;
+ checkpoint_wptr = amdgpu_ih_get_wptr(adev, ih);
+ if (ih->rptr != checkpoint_wptr)
+ svms->checkpoint_ts[i] = amdgpu_ih_decode_iv_ts(adev, ih, checkpoint_wptr, -1);
+ }
unmap_parent = start <= prange->start && last >= prange->last;
@@ -2680,9 +2704,10 @@ svm_range_get_range_boundaries(struct kfd_process *p, int64_t addr,
*is_heap_stack = vma_is_initial_heap(vma) || vma_is_initial_stack(vma);
start_limit = max(vma->vm_start >> PAGE_SHIFT,
- (unsigned long)ALIGN_DOWN(addr, 2UL << 8));
+ (unsigned long)ALIGN_DOWN(addr, 1UL << p->svms.default_granularity));
end_limit = min(vma->vm_end >> PAGE_SHIFT,
- (unsigned long)ALIGN(addr + 1, 2UL << 8));
+ (unsigned long)ALIGN(addr + 1, 1UL << p->svms.default_granularity));
+
/* First range that starts after the fault address */
node = interval_tree_iter_first(&p->svms.objects, addr + 1, ULONG_MAX);
if (node) {
@@ -2897,7 +2922,7 @@ svm_fault_allowed(struct vm_area_struct *vma, bool write_fault)
int
svm_range_restore_pages(struct amdgpu_device *adev, unsigned int pasid,
uint32_t vmid, uint32_t node_id,
- uint64_t addr, bool write_fault)
+ uint64_t addr, uint64_t ts, bool write_fault)
{
unsigned long start, last, size;
struct mm_struct *mm = NULL;
@@ -2907,7 +2932,7 @@ svm_range_restore_pages(struct amdgpu_device *adev, unsigned int pasid,
ktime_t timestamp = ktime_get_boottime();
struct kfd_node *node;
int32_t best_loc;
- int32_t gpuidx = MAX_GPU_INSTANCE;
+ int32_t gpuid, gpuidx = MAX_GPU_INSTANCE;
bool write_locked = false;
struct vm_area_struct *vma;
bool migration = false;
@@ -2928,11 +2953,38 @@ svm_range_restore_pages(struct amdgpu_device *adev, unsigned int pasid,
pr_debug("restoring svms 0x%p fault address 0x%llx\n", svms, addr);
if (atomic_read(&svms->drain_pagefaults)) {
- pr_debug("draining retry fault, drop fault 0x%llx\n", addr);
+ pr_debug("page fault handling disabled, drop fault 0x%llx\n", addr);
r = 0;
goto out;
}
+ node = kfd_node_by_irq_ids(adev, node_id, vmid);
+ if (!node) {
+ pr_debug("kfd node does not exist node_id: %d, vmid: %d\n", node_id,
+ vmid);
+ r = -EFAULT;
+ goto out;
+ }
+
+ if (kfd_process_gpuid_from_node(p, node, &gpuid, &gpuidx)) {
+ pr_debug("failed to get gpuid/gpuidex for node_id: %d\n", node_id);
+ r = -EFAULT;
+ goto out;
+ }
+
+ /* check if this page fault time stamp is before svms->checkpoint_ts */
+ if (svms->checkpoint_ts[gpuidx] != 0) {
+ if (amdgpu_ih_ts_after(ts, svms->checkpoint_ts[gpuidx])) {
+ pr_debug("draining retry fault, drop fault 0x%llx\n", addr);
+ r = 0;
+ goto out;
+ } else
+ /* ts is after svms->checkpoint_ts now, reset svms->checkpoint_ts
+ * to zero to avoid following ts wrap around give wrong comparing
+ */
+ svms->checkpoint_ts[gpuidx] = 0;
+ }
+
if (!p->xnack_enabled) {
pr_debug("XNACK not enabled for pasid 0x%x\n", pasid);
r = -EFAULT;
@@ -2949,13 +3001,6 @@ svm_range_restore_pages(struct amdgpu_device *adev, unsigned int pasid,
goto out;
}
- node = kfd_node_by_irq_ids(adev, node_id, vmid);
- if (!node) {
- pr_debug("kfd node does not exist node_id: %d, vmid: %d\n", node_id,
- vmid);
- r = -EFAULT;
- goto out;
- }
mmap_read_lock(mm);
retry_write_locked:
mutex_lock(&svms->lock);
@@ -3170,8 +3215,9 @@ void svm_range_list_fini(struct kfd_process *p)
/*
* Ensure no retry fault comes in afterwards, as page fault handler will
* not find kfd process and take mm lock to recover fault.
+ * stop kfd page fault handing, then wait pending page faults got drained
*/
- atomic_inc(&p->svms.drain_pagefaults);
+ atomic_set(&p->svms.drain_pagefaults, 1);
svm_range_drain_retry_fault(&p->svms);
list_for_each_entry_safe(prange, next, &p->svms.list, list) {
@@ -3205,6 +3251,12 @@ int svm_range_list_init(struct kfd_process *p)
if (KFD_IS_SVM_API_SUPPORTED(p->pdds[i]->dev->adev))
bitmap_set(svms->bitmap_supported, i, 1);
+ /* Value of default granularity cannot exceed 0x1B, the
+ * number of pages supported by a 4-level paging table
+ */
+ svms->default_granularity = min_t(u8, amdgpu_svm_default_granularity, 0x1B);
+ pr_debug("Default SVM Granularity to use: %d\n", svms->default_granularity);
+
return 0;
}
@@ -3732,7 +3784,7 @@ svm_range_get_attr(struct kfd_process *p, struct mm_struct *mm,
node = interval_tree_iter_first(&svms->objects, start, last);
if (!node) {
pr_debug("range attrs not found return default values\n");
- svm_range_set_default_attributes(&location, &prefetch_loc,
+ svm_range_set_default_attributes(svms, &location, &prefetch_loc,
&granularity, &flags_and);
flags_or = flags_and;
if (p->xnack_enabled)
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_svm.h b/drivers/gpu/drm/amd/amdkfd/kfd_svm.h
index 70c1776611c4..bddd24f04669 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_svm.h
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_svm.h
@@ -137,6 +137,7 @@ struct svm_range {
DECLARE_BITMAP(bitmap_access, MAX_GPU_INSTANCE);
DECLARE_BITMAP(bitmap_aip, MAX_GPU_INSTANCE);
bool mapped_to_gpu;
+ atomic_t queue_refcount;
};
static inline void svm_range_lock(struct svm_range *prange)
@@ -173,7 +174,7 @@ int svm_range_vram_node_new(struct kfd_node *node, struct svm_range *prange,
bool clear);
void svm_range_vram_node_free(struct svm_range *prange);
int svm_range_restore_pages(struct amdgpu_device *adev, unsigned int pasid,
- uint32_t vmid, uint32_t node_id, uint64_t addr,
+ uint32_t vmid, uint32_t node_id, uint64_t addr, uint64_t ts,
bool write_fault);
int svm_range_schedule_evict_svm_bo(struct amdgpu_amdkfd_fence *fence);
void svm_range_add_list_work(struct svm_range_list *svms,
@@ -224,7 +225,7 @@ static inline void svm_range_list_fini(struct kfd_process *p)
static inline int svm_range_restore_pages(struct amdgpu_device *adev,
unsigned int pasid,
uint32_t client_id, uint32_t node_id,
- uint64_t addr, bool write_fault)
+ uint64_t addr, uint64_t ts, bool write_fault)
{
return -EFAULT;
}
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_topology.c b/drivers/gpu/drm/amd/amdkfd/kfd_topology.c
index 6f89b06f89d3..3871591c9aec 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_topology.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_topology.c
@@ -292,6 +292,8 @@ static ssize_t iolink_show(struct kobject *kobj, struct attribute *attr,
iolink->max_bandwidth);
sysfs_show_32bit_prop(buffer, offs, "recommended_transfer_size",
iolink->rec_transfer_size);
+ sysfs_show_32bit_prop(buffer, offs, "recommended_sdma_engine_id_mask",
+ iolink->rec_sdma_eng_id_mask);
sysfs_show_32bit_prop(buffer, offs, "flags", iolink->flags);
return offs;
@@ -1265,6 +1267,54 @@ static void kfd_set_iolink_non_coherent(struct kfd_topology_device *to_dev,
}
}
+#define REC_SDMA_NUM_GPU 8
+static const int rec_sdma_eng_map[REC_SDMA_NUM_GPU][REC_SDMA_NUM_GPU] = {
+ { -1, 14, 12, 2, 4, 8, 10, 6 },
+ { 14, -1, 2, 10, 8, 4, 6, 12 },
+ { 10, 2, -1, 12, 14, 6, 4, 8 },
+ { 2, 12, 10, -1, 6, 14, 8, 4 },
+ { 4, 8, 14, 6, -1, 10, 12, 2 },
+ { 8, 4, 6, 14, 12, -1, 2, 10 },
+ { 10, 6, 4, 8, 12, 2, -1, 14 },
+ { 6, 12, 8, 4, 2, 10, 14, -1 }};
+
+static void kfd_set_recommended_sdma_engines(struct kfd_topology_device *to_dev,
+ struct kfd_iolink_properties *outbound_link,
+ struct kfd_iolink_properties *inbound_link)
+{
+ struct kfd_node *gpu = outbound_link->gpu;
+ struct amdgpu_device *adev = gpu->adev;
+ int num_xgmi_nodes = adev->gmc.xgmi.num_physical_nodes;
+ bool support_rec_eng = !amdgpu_sriov_vf(adev) && to_dev->gpu &&
+ adev->aid_mask && num_xgmi_nodes && gpu->kfd->num_nodes == 1 &&
+ kfd_get_num_xgmi_sdma_engines(gpu) >= 14 &&
+ (!(adev->flags & AMD_IS_APU) && num_xgmi_nodes == 8);
+
+ if (support_rec_eng) {
+ int src_socket_id = adev->gmc.xgmi.physical_node_id;
+ int dst_socket_id = to_dev->gpu->adev->gmc.xgmi.physical_node_id;
+
+ outbound_link->rec_sdma_eng_id_mask =
+ 1 << rec_sdma_eng_map[src_socket_id][dst_socket_id];
+ inbound_link->rec_sdma_eng_id_mask =
+ 1 << rec_sdma_eng_map[dst_socket_id][src_socket_id];
+ } else {
+ int num_sdma_eng = kfd_get_num_sdma_engines(gpu);
+ int i, eng_offset = 0;
+
+ if (outbound_link->iolink_type == CRAT_IOLINK_TYPE_XGMI &&
+ kfd_get_num_xgmi_sdma_engines(gpu) && to_dev->gpu) {
+ eng_offset = num_sdma_eng;
+ num_sdma_eng = kfd_get_num_xgmi_sdma_engines(gpu);
+ }
+
+ for (i = 0; i < num_sdma_eng; i++) {
+ outbound_link->rec_sdma_eng_id_mask |= (1 << (i + eng_offset));
+ inbound_link->rec_sdma_eng_id_mask |= (1 << (i + eng_offset));
+ }
+ }
+}
+
static void kfd_fill_iolink_non_crat_info(struct kfd_topology_device *dev)
{
struct kfd_iolink_properties *link, *inbound_link;
@@ -1303,6 +1353,7 @@ static void kfd_fill_iolink_non_crat_info(struct kfd_topology_device *dev)
inbound_link->flags = CRAT_IOLINK_FLAGS_ENABLED;
kfd_set_iolink_no_atomics(peer_dev, dev, inbound_link);
kfd_set_iolink_non_coherent(peer_dev, link, inbound_link);
+ kfd_set_recommended_sdma_engines(peer_dev, link, inbound_link);
}
}
@@ -2027,7 +2078,7 @@ int kfd_topology_add_device(struct kfd_node *gpu)
HSA_CAP_ASIC_REVISION_MASK);
dev->node_props.location_id = pci_dev_id(gpu->adev->pdev);
- if (KFD_GC_VERSION(dev->gpu->kfd) == IP_VERSION(9, 4, 3))
+ if (gpu->kfd->num_nodes > 1)
dev->node_props.location_id |= dev->gpu->node_id;
dev->node_props.domain = pci_domain_nr(gpu->adev->pdev->bus);
@@ -2120,6 +2171,8 @@ int kfd_topology_add_device(struct kfd_node *gpu)
dev->gpu->adev->gmc.xgmi.connected_to_cpu)
dev->node_props.capability |= HSA_CAP_FLAGS_COHERENTHOSTACCESS;
+ kfd_queue_ctx_save_restore_size(dev);
+
kfd_debug_print_topology();
kfd_notify_gpu_change(gpu_id, 1);
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_topology.h b/drivers/gpu/drm/amd/amdkfd/kfd_topology.h
index 2d1c9d771bef..155b5c410af1 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_topology.h
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_topology.h
@@ -74,6 +74,10 @@ struct kfd_node_properties {
uint32_t num_sdma_xgmi_engines;
uint32_t num_sdma_queues_per_engine;
uint32_t num_cp_queues;
+ uint32_t cwsr_size;
+ uint32_t ctl_stack_size;
+ uint32_t eop_buffer_size;
+ uint32_t debug_memory_size;
char name[KFD_TOPOLOGY_PUBLIC_NAME_SIZE];
};
@@ -121,6 +125,7 @@ struct kfd_iolink_properties {
uint32_t min_bandwidth;
uint32_t max_bandwidth;
uint32_t rec_transfer_size;
+ uint32_t rec_sdma_eng_id_mask;
uint32_t flags;
struct kfd_node *gpu;
struct kobject *kobj;
diff --git a/drivers/gpu/drm/amd/amdkfd/soc15_int.h b/drivers/gpu/drm/amd/amdkfd/soc15_int.h
index 10138676f27f..e5c0205f2618 100644
--- a/drivers/gpu/drm/amd/amdkfd/soc15_int.h
+++ b/drivers/gpu/drm/amd/amdkfd/soc15_int.h
@@ -29,6 +29,7 @@
#define SOC15_INTSRC_CP_BAD_OPCODE 183
#define SOC15_INTSRC_SQ_INTERRUPT_MSG 239
#define SOC15_INTSRC_VMC_FAULT 0
+#define SOC15_INTSRC_VMC_UTCL2_POISON 1
#define SOC15_INTSRC_SDMA_TRAP 224
#define SOC15_INTSRC_SDMA_ECC 220
#define SOC21_INTSRC_SDMA_TRAP 49
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
index 1e069fa5211e..6e79028c5d78 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
@@ -176,6 +176,7 @@ MODULE_FIRMWARE(FIRMWARE_DCN_401_DMUB);
static int amdgpu_dm_init(struct amdgpu_device *adev);
static void amdgpu_dm_fini(struct amdgpu_device *adev);
static bool is_freesync_video_mode(const struct drm_display_mode *mode, struct amdgpu_dm_connector *aconnector);
+static void reset_freesync_config_for_crtc(struct dm_crtc_state *new_crtc_state);
static enum drm_mode_subconnector get_subconnector_type(struct dc_link *link)
{
@@ -807,6 +808,20 @@ static void dmub_hpd_callback(struct amdgpu_device *adev,
}
/**
+ * dmub_hpd_sense_callback - DMUB HPD sense processing callback.
+ * @adev: amdgpu_device pointer
+ * @notify: dmub notification structure
+ *
+ * HPD sense changes can occur during low power states and need to be
+ * notified from firmware to driver.
+ */
+static void dmub_hpd_sense_callback(struct amdgpu_device *adev,
+ struct dmub_notification *notify)
+{
+ DRM_DEBUG_DRIVER("DMUB HPD SENSE callback.\n");
+}
+
+/**
* register_dmub_notify_callback - Sets callback for DMUB notify
* @adev: amdgpu_device pointer
* @type: Type of dmub notification
@@ -877,6 +892,7 @@ static void dm_dmub_outbox1_low_irq(void *interrupt_params)
"HPD_IRQ",
"SET_CONFIGC_REPLY",
"DPIA_NOTIFICATION",
+ "HPD_SENSE_NOTIFY",
};
do {
@@ -1740,7 +1756,7 @@ static struct dml2_soc_bb *dm_dmub_get_vbios_bounding_box(struct amdgpu_device *
/* Send the chunk */
ret = dm_dmub_send_vbios_gpint_command(adev, send_addrs[i], chunk, 30000);
if (ret != DMUB_STATUS_OK)
- /* No need to free bb here since it shall be done unconditionally <elsewhere> */
+ /* No need to free bb here since it shall be done in dm_sw_fini() */
return NULL;
}
@@ -1755,25 +1771,41 @@ static struct dml2_soc_bb *dm_dmub_get_vbios_bounding_box(struct amdgpu_device *
static enum dmub_ips_disable_type dm_get_default_ips_mode(
struct amdgpu_device *adev)
{
- /*
- * On DCN35 systems with Z8 enabled, it's possible for IPS2 + Z8 to
- * cause a hard hang. A fix exists for newer PMFW.
- *
- * As a workaround, for non-fixed PMFW, force IPS1+RCG as the deepest
- * IPS state in all cases, except for s0ix and all displays off (DPMS),
- * where IPS2 is allowed.
- *
- * When checking pmfw version, use the major and minor only.
- */
- if (amdgpu_ip_version(adev, DCE_HWIP, 0) == IP_VERSION(3, 5, 0) &&
- (adev->pm.fw_version & 0x00FFFF00) < 0x005D6300)
- return DMUB_IPS_RCG_IN_ACTIVE_IPS2_IN_OFF;
+ enum dmub_ips_disable_type ret = DMUB_IPS_ENABLE;
- if (amdgpu_ip_version(adev, DCE_HWIP, 0) >= IP_VERSION(3, 5, 0))
- return DMUB_IPS_ENABLE;
+ switch (amdgpu_ip_version(adev, DCE_HWIP, 0)) {
+ case IP_VERSION(3, 5, 0):
+ /*
+ * On DCN35 systems with Z8 enabled, it's possible for IPS2 + Z8 to
+ * cause a hard hang. A fix exists for newer PMFW.
+ *
+ * As a workaround, for non-fixed PMFW, force IPS1+RCG as the deepest
+ * IPS state in all cases, except for s0ix and all displays off (DPMS),
+ * where IPS2 is allowed.
+ *
+ * When checking pmfw version, use the major and minor only.
+ */
+ if ((adev->pm.fw_version & 0x00FFFF00) < 0x005D6300)
+ ret = DMUB_IPS_RCG_IN_ACTIVE_IPS2_IN_OFF;
+ else if (amdgpu_ip_version(adev, GC_HWIP, 0) > IP_VERSION(11, 5, 0))
+ /*
+ * Other ASICs with DCN35 that have residency issues with
+ * IPS2 in idle.
+ * We want them to use IPS2 only in display off cases.
+ */
+ ret = DMUB_IPS_RCG_IN_ACTIVE_IPS2_IN_OFF;
+ break;
+ case IP_VERSION(3, 5, 1):
+ ret = DMUB_IPS_RCG_IN_ACTIVE_IPS2_IN_OFF;
+ break;
+ default:
+ /* ASICs older than DCN35 do not have IPSs */
+ if (amdgpu_ip_version(adev, DCE_HWIP, 0) < IP_VERSION(3, 5, 0))
+ ret = DMUB_IPS_DISABLE_ALL;
+ break;
+ }
- /* ASICs older than DCN35 do not have IPSs */
- return DMUB_IPS_DISABLE_ALL;
+ return ret;
}
static int amdgpu_dm_init(struct amdgpu_device *adev)
@@ -1886,6 +1918,12 @@ static int amdgpu_dm_init(struct amdgpu_device *adev)
if (amdgpu_dc_debug_mask & DC_DISABLE_IPS)
init_data.flags.disable_ips = DMUB_IPS_DISABLE_ALL;
+ else if (amdgpu_dc_debug_mask & DC_DISABLE_IPS_DYNAMIC)
+ init_data.flags.disable_ips = DMUB_IPS_DISABLE_DYNAMIC;
+ else if (amdgpu_dc_debug_mask & DC_DISABLE_IPS2_DYNAMIC)
+ init_data.flags.disable_ips = DMUB_IPS_RCG_IN_ACTIVE_IPS2_IN_OFF;
+ else if (amdgpu_dc_debug_mask & DC_FORCE_IPS_ENABLE)
+ init_data.flags.disable_ips = DMUB_IPS_ENABLE;
else
init_data.flags.disable_ips = dm_get_default_ips_mode(adev);
@@ -2242,7 +2280,7 @@ static int load_dmcu_fw(struct amdgpu_device *adev)
return 0;
}
- r = amdgpu_ucode_request(adev, &adev->dm.fw_dmcu, fw_name_dmcu);
+ r = amdgpu_ucode_request(adev, &adev->dm.fw_dmcu, "%s", fw_name_dmcu);
if (r == -ENODEV) {
/* DMCU firmware is not necessary, so don't raise a fuss if it's missing */
DRM_DEBUG_KMS("dm: DMCU firmware not found\n");
@@ -2489,8 +2527,17 @@ static int dm_sw_init(void *handle)
static int dm_sw_fini(void *handle)
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct dal_allocation *da;
+
+ list_for_each_entry(da, &adev->dm.da_list, list) {
+ if (adev->dm.bb_from_dmub == (void *) da->cpu_ptr) {
+ amdgpu_bo_free_kernel(&da->bo, &da->gpu_addr, &da->cpu_ptr);
+ list_del(&da->list);
+ kfree(da);
+ break;
+ }
+ }
- kfree(adev->dm.bb_from_dmub);
adev->dm.bb_from_dmub = NULL;
kfree(adev->dm.dmub_fb_info);
@@ -2592,9 +2639,9 @@ static int dm_late_init(void *handle)
static void resume_mst_branch_status(struct drm_dp_mst_topology_mgr *mgr)
{
+ u8 buf[UUID_SIZE];
+ guid_t guid;
int ret;
- u8 guid[16];
- u64 tmp64;
mutex_lock(&mgr->lock);
if (!mgr->mst_primary)
@@ -2615,26 +2662,27 @@ static void resume_mst_branch_status(struct drm_dp_mst_topology_mgr *mgr)
}
/* Some hubs forget their guids after they resume */
- ret = drm_dp_dpcd_read(mgr->aux, DP_GUID, guid, 16);
- if (ret != 16) {
+ ret = drm_dp_dpcd_read(mgr->aux, DP_GUID, buf, sizeof(buf));
+ if (ret != sizeof(buf)) {
drm_dbg_kms(mgr->dev, "dpcd read failed - undocked during suspend?\n");
goto out_fail;
}
- if (memchr_inv(guid, 0, 16) == NULL) {
- tmp64 = get_jiffies_64();
- memcpy(&guid[0], &tmp64, sizeof(u64));
- memcpy(&guid[8], &tmp64, sizeof(u64));
+ import_guid(&guid, buf);
+
+ if (guid_is_null(&guid)) {
+ guid_gen(&guid);
+ export_guid(buf, &guid);
- ret = drm_dp_dpcd_write(mgr->aux, DP_GUID, guid, 16);
+ ret = drm_dp_dpcd_write(mgr->aux, DP_GUID, buf, sizeof(buf));
- if (ret != 16) {
+ if (ret != sizeof(buf)) {
drm_dbg_kms(mgr->dev, "check mstb guid failed - undocked during suspend?\n");
goto out_fail;
}
}
- memcpy(mgr->mst_primary->guid, guid, 16);
+ guid_copy(&mgr->mst_primary->guid, &guid);
out_fail:
mutex_unlock(&mgr->lock);
@@ -3230,8 +3278,11 @@ static int dm_resume(void *handle)
drm_connector_list_iter_end(&iter);
/* Force mode set in atomic commit */
- for_each_new_crtc_in_state(dm->cached_state, crtc, new_crtc_state, i)
+ for_each_new_crtc_in_state(dm->cached_state, crtc, new_crtc_state, i) {
new_crtc_state->active_changed = true;
+ dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
+ reset_freesync_config_for_crtc(dm_new_crtc_state);
+ }
/*
* atomic_check is expected to create the dc states. We need to release
@@ -3787,6 +3838,12 @@ static int register_hpd_handlers(struct amdgpu_device *adev)
DRM_ERROR("amdgpu: fail to register dmub hpd callback");
return -EINVAL;
}
+
+ if (!register_dmub_notify_callback(adev, DMUB_NOTIFICATION_HPD_SENSE_NOTIFY,
+ dmub_hpd_sense_callback, true)) {
+ DRM_ERROR("amdgpu: fail to register dmub hpd sense callback");
+ return -EINVAL;
+ }
}
list_for_each_entry(connector,
@@ -4428,6 +4485,7 @@ static int amdgpu_dm_mode_config_init(struct amdgpu_device *adev)
#define AMDGPU_DM_DEFAULT_MIN_BACKLIGHT 12
#define AMDGPU_DM_DEFAULT_MAX_BACKLIGHT 255
+#define AMDGPU_DM_MIN_SPREAD ((AMDGPU_DM_DEFAULT_MAX_BACKLIGHT - AMDGPU_DM_DEFAULT_MIN_BACKLIGHT) / 2)
#define AUX_BL_DEFAULT_TRANSITION_TIME_MS 50
static void amdgpu_dm_update_backlight_caps(struct amdgpu_display_manager *dm,
@@ -4442,6 +4500,21 @@ static void amdgpu_dm_update_backlight_caps(struct amdgpu_display_manager *dm,
return;
amdgpu_acpi_get_backlight_caps(&caps);
+
+ /* validate the firmware value is sane */
+ if (caps.caps_valid) {
+ int spread = caps.max_input_signal - caps.min_input_signal;
+
+ if (caps.max_input_signal > AMDGPU_DM_DEFAULT_MAX_BACKLIGHT ||
+ caps.min_input_signal < 0 ||
+ spread > AMDGPU_DM_DEFAULT_MAX_BACKLIGHT ||
+ spread < AMDGPU_DM_MIN_SPREAD) {
+ DRM_DEBUG_KMS("DM: Invalid backlight caps: min=%d, max=%d\n",
+ caps.min_input_signal, caps.max_input_signal);
+ caps.caps_valid = false;
+ }
+ }
+
if (caps.caps_valid) {
dm->backlight_caps[bl_idx].caps_valid = true;
if (caps.aux_support)
@@ -4874,18 +4947,14 @@ static int amdgpu_dm_initialize_drm_device(struct amdgpu_device *adev)
/* Determine whether to enable Replay support by default. */
if (!(amdgpu_dc_debug_mask & DC_DISABLE_REPLAY)) {
switch (amdgpu_ip_version(adev, DCE_HWIP, 0)) {
-/*
- * Disabled by default due to https://gitlab.freedesktop.org/drm/amd/-/issues/3344
- * case IP_VERSION(3, 1, 4):
- * case IP_VERSION(3, 1, 5):
- * case IP_VERSION(3, 1, 6):
- * case IP_VERSION(3, 2, 0):
- * case IP_VERSION(3, 2, 1):
- * case IP_VERSION(3, 5, 0):
- * case IP_VERSION(3, 5, 1):
- * replay_feature_enabled = true;
- * break;
- */
+ case IP_VERSION(3, 1, 4):
+ case IP_VERSION(3, 2, 0):
+ case IP_VERSION(3, 2, 1):
+ case IP_VERSION(3, 5, 0):
+ case IP_VERSION(3, 5, 1):
+ replay_feature_enabled = true;
+ break;
+
default:
replay_feature_enabled = amdgpu_dc_feature_mask & DC_REPLAY_MASK;
break;
@@ -4972,12 +5041,6 @@ static int amdgpu_dm_initialize_drm_device(struct amdgpu_device *adev)
if (psr_feature_enabled)
amdgpu_dm_set_psr_caps(link);
-
- /* TODO: Fix vblank control helpers to delay PSR entry to allow this when
- * PSR is also supported.
- */
- if (link->psr_settings.psr_feature_enabled)
- adev_to_drm(adev)->vblank_disable_immediate = false;
}
}
amdgpu_set_panel_orientation(&aconnector->base);
@@ -5182,7 +5245,7 @@ static int dm_init_microcode(struct amdgpu_device *adev)
/* ASIC doesn't support DMUB. */
return 0;
}
- r = amdgpu_ucode_request(adev, &adev->dm.dmub_fw, fw_name_dmub);
+ r = amdgpu_ucode_request(adev, &adev->dm.dmub_fw, "%s", fw_name_dmub);
return r;
}
@@ -6471,7 +6534,8 @@ static void apply_dsc_policy_for_stream(struct amdgpu_dm_connector *aconnector,
dc_link_get_highest_encoding_format(aconnector->dc_link),
&stream->timing.dsc_cfg)) {
stream->timing.flags.DSC = 1;
- DRM_DEBUG_DRIVER("%s: [%s] DSC is selected from SST RX\n", __func__, drm_connector->name);
+ DRM_DEBUG_DRIVER("%s: SST_DSC [%s] DSC is selected from SST RX\n",
+ __func__, drm_connector->name);
}
} else if (sink->link->dpcd_caps.dongle_type == DISPLAY_DONGLE_DP_HDMI_CONVERTER) {
timing_bw_in_kbps = dc_bandwidth_in_kbps_from_timing(&stream->timing,
@@ -6490,7 +6554,7 @@ static void apply_dsc_policy_for_stream(struct amdgpu_dm_connector *aconnector,
dc_link_get_highest_encoding_format(aconnector->dc_link),
&stream->timing.dsc_cfg)) {
stream->timing.flags.DSC = 1;
- DRM_DEBUG_DRIVER("%s: [%s] DSC is selected from DP-HDMI PCON\n",
+ DRM_DEBUG_DRIVER("%s: SST_DSC [%s] DSC is selected from DP-HDMI PCON\n",
__func__, drm_connector->name);
}
}
@@ -7233,6 +7297,9 @@ create_validate_stream_for_sink(struct amdgpu_dm_connector *aconnector,
int requested_bpc = drm_state ? drm_state->max_requested_bpc : 8;
enum dc_status dc_result = DC_OK;
+ if (!dm_state)
+ return NULL;
+
do {
stream = create_stream_for_sink(connector, drm_mode,
dm_state, old_stream,
@@ -8270,7 +8337,7 @@ static int amdgpu_dm_encoder_init(struct drm_device *dev,
static void manage_dm_interrupts(struct amdgpu_device *adev,
struct amdgpu_crtc *acrtc,
- bool enable)
+ struct dm_crtc_state *acrtc_state)
{
/*
* We have no guarantee that the frontend index maps to the same
@@ -8282,9 +8349,31 @@ static void manage_dm_interrupts(struct amdgpu_device *adev,
amdgpu_display_crtc_idx_to_irq_type(
adev,
acrtc->crtc_id);
+ struct drm_vblank_crtc_config config = {0};
+ struct dc_crtc_timing *timing;
+ int offdelay;
+
+ if (acrtc_state) {
+ if (amdgpu_ip_version(adev, DCE_HWIP, 0) <
+ IP_VERSION(3, 5, 0) ||
+ acrtc_state->stream->link->psr_settings.psr_version <
+ DC_PSR_VERSION_UNSUPPORTED) {
+ timing = &acrtc_state->stream->timing;
+
+ /* at least 2 frames */
+ offdelay = DIV64_U64_ROUND_UP((u64)20 *
+ timing->v_total *
+ timing->h_total,
+ timing->pix_clk_100hz);
+
+ config.offdelay_ms = offdelay ?: 30;
+ } else {
+ config.disable_immediate = true;
+ }
+
+ drm_crtc_vblank_on_config(&acrtc->base,
+ &config);
- if (enable) {
- drm_crtc_vblank_on(&acrtc->base);
amdgpu_irq_get(
adev,
&adev->pageflip_irq,
@@ -8750,7 +8839,8 @@ static void amdgpu_dm_update_cursor(struct drm_plane *plane,
adev->dm.dc->caps.color.dpp.gamma_corr)
attributes.attribute_flags.bits.ENABLE_CURSOR_DEGAMMA = 1;
- attributes.pitch = afb->base.pitches[0] / afb->base.format->cpp[0];
+ if (afb)
+ attributes.pitch = afb->base.pitches[0] / afb->base.format->cpp[0];
if (crtc_state->stream) {
if (!dc_stream_set_cursor_attributes(crtc_state->stream,
@@ -9340,7 +9430,7 @@ static void amdgpu_dm_commit_streams(struct drm_atomic_state *state,
if (acrtc)
old_crtc_state = drm_atomic_get_old_crtc_state(state, &acrtc->base);
- if (!acrtc->wb_enabled)
+ if (!acrtc || !acrtc->wb_enabled)
continue;
dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
@@ -9358,7 +9448,7 @@ static void amdgpu_dm_commit_streams(struct drm_atomic_state *state,
if (old_crtc_state->active &&
(!new_crtc_state->active ||
drm_atomic_crtc_needs_modeset(new_crtc_state))) {
- manage_dm_interrupts(adev, acrtc, false);
+ manage_dm_interrupts(adev, acrtc, NULL);
dc_stream_release(dm_old_crtc_state->stream);
}
}
@@ -9744,9 +9834,10 @@ static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state)
DRM_INFO("[HDCP_DM] hdcp_update_display enable_encryption = %x\n", enable_encryption);
- hdcp_update_display(
- adev->dm.hdcp_workqueue, aconnector->dc_link->link_index, aconnector,
- new_con_state->hdcp_content_type, enable_encryption);
+ if (aconnector->dc_link)
+ hdcp_update_display(
+ adev->dm.hdcp_workqueue, aconnector->dc_link->link_index, aconnector,
+ new_con_state->hdcp_content_type, enable_encryption);
}
}
@@ -9873,7 +9964,7 @@ static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state)
drm_atomic_crtc_needs_modeset(new_crtc_state))) {
dc_stream_retain(dm_new_crtc_state->stream);
acrtc->dm_irq_params.stream = dm_new_crtc_state->stream;
- manage_dm_interrupts(adev, acrtc, true);
+ manage_dm_interrupts(adev, acrtc, dm_new_crtc_state);
}
/* Handle vrr on->off / off->on transitions */
amdgpu_dm_handle_vrr_transition(dm_old_crtc_state, dm_new_crtc_state);
@@ -11651,7 +11742,7 @@ static int amdgpu_dm_atomic_check(struct drm_device *dev,
if (dc_resource_is_dsc_encoding_supported(dc)) {
ret = compute_mst_dsc_configs_for_state(state, dm_state->context, vars);
if (ret) {
- drm_dbg_atomic(dev, "compute_mst_dsc_configs_for_state() failed\n");
+ drm_dbg_atomic(dev, "MST_DSC compute_mst_dsc_configs_for_state() failed\n");
ret = -EINVAL;
goto fail;
}
@@ -11672,7 +11763,7 @@ static int amdgpu_dm_atomic_check(struct drm_device *dev,
*/
ret = drm_dp_mst_atomic_check(state);
if (ret) {
- drm_dbg_atomic(dev, "drm_dp_mst_atomic_check() failed\n");
+ drm_dbg_atomic(dev, "MST drm_dp_mst_atomic_check() failed\n");
goto fail;
}
status = dc_validate_global_state(dc, dm_state->context, true);
@@ -11766,25 +11857,6 @@ fail:
return ret;
}
-static bool is_dp_capable_without_timing_msa(struct dc *dc,
- struct amdgpu_dm_connector *amdgpu_dm_connector)
-{
- u8 dpcd_data;
- bool capable = false;
-
- if (amdgpu_dm_connector->dc_link &&
- dm_helpers_dp_read_dpcd(
- NULL,
- amdgpu_dm_connector->dc_link,
- DP_DOWN_STREAM_PORT_COUNT,
- &dpcd_data,
- sizeof(dpcd_data))) {
- capable = (dpcd_data & DP_MSA_TIMING_PAR_IGNORED) ? true:false;
- }
-
- return capable;
-}
-
static bool dm_edid_parser_send_cea(struct amdgpu_display_manager *dm,
unsigned int offset,
unsigned int total_length,
@@ -12087,8 +12159,8 @@ void amdgpu_dm_update_freesync_caps(struct drm_connector *connector,
sink->sink_signal == SIGNAL_TYPE_EDP)) {
bool edid_check_required = false;
- if (is_dp_capable_without_timing_msa(adev->dm.dc,
- amdgpu_dm_connector)) {
+ if (amdgpu_dm_connector->dc_link &&
+ amdgpu_dm_connector->dc_link->dpcd_caps.allow_invalid_MSA_timing_param) {
if (edid->features & DRM_EDID_FEATURE_CONTINUOUS_FREQ) {
amdgpu_dm_connector->min_vfreq = connector->display_info.monitor_range.min_vfreq;
amdgpu_dm_connector->max_vfreq = connector->display_info.monitor_range.max_vfreq;
@@ -12170,7 +12242,8 @@ void amdgpu_dm_update_freesync_caps(struct drm_connector *connector,
}
}
- as_type = dm_get_adaptive_sync_support_type(amdgpu_dm_connector->dc_link);
+ if (amdgpu_dm_connector->dc_link)
+ as_type = dm_get_adaptive_sync_support_type(amdgpu_dm_connector->dc_link);
if (as_type == FREESYNC_TYPE_PCON_IN_WHITELIST) {
i = parse_hdmi_amd_vsdb(amdgpu_dm_connector, edid, &vsdb_info);
@@ -12194,6 +12267,12 @@ update:
if (dm_con_state)
dm_con_state->freesync_capable = freesync_capable;
+ if (connector->state && amdgpu_dm_connector->dc_link && !freesync_capable &&
+ amdgpu_dm_connector->dc_link->replay_settings.config.replay_supported) {
+ amdgpu_dm_connector->dc_link->replay_settings.config.replay_supported = false;
+ amdgpu_dm_connector->dc_link->replay_settings.replay_feature_enabled = false;
+ }
+
if (connector->vrr_capable_property)
drm_connector_set_vrr_capable_property(connector,
freesync_capable);
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h
index 2d7755e2b6c3..15d4690c74d6 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h
@@ -50,7 +50,7 @@
#define AMDGPU_DM_MAX_NUM_EDP 2
-#define AMDGPU_DMUB_NOTIFICATION_MAX 6
+#define AMDGPU_DMUB_NOTIFICATION_MAX 7
#define HDMI_AMD_VENDOR_SPECIFIC_DATA_BLOCK_IEEE_REGISTRATION_ID 0x00001A
#define AMD_VSDB_VERSION_3_FEATURECAP_REPLAYMODE 0x40
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crtc.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crtc.c
index 99014339aaa3..a2cf2c066a76 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crtc.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crtc.c
@@ -251,9 +251,10 @@ static void amdgpu_dm_crtc_vblank_control_worker(struct work_struct *work)
else if (dm->active_vblank_irq_count)
dm->active_vblank_irq_count--;
- dc_allow_idle_optimizations(dm->dc, dm->active_vblank_irq_count == 0);
-
- DRM_DEBUG_KMS("Allow idle optimizations (MALL): %d\n", dm->active_vblank_irq_count == 0);
+ if (dm->active_vblank_irq_count > 0) {
+ DRM_DEBUG_KMS("Allow idle optimizations (MALL): false\n");
+ dc_allow_idle_optimizations(dm->dc, false);
+ }
/*
* Control PSR based on vblank requirements from OS
@@ -272,6 +273,11 @@ static void amdgpu_dm_crtc_vblank_control_worker(struct work_struct *work)
vblank_work->stream->link->replay_settings.replay_feature_enabled);
}
+ if (dm->active_vblank_irq_count == 0) {
+ DRM_DEBUG_KMS("Allow idle optimizations (MALL): true\n");
+ dc_allow_idle_optimizations(dm->dc, true);
+ }
+
mutex_unlock(&dm->dc_lock);
dc_stream_release(vblank_work->stream);
@@ -286,11 +292,14 @@ static inline int amdgpu_dm_crtc_set_vblank(struct drm_crtc *crtc, bool enable)
struct dm_crtc_state *acrtc_state = to_dm_crtc_state(crtc->state);
struct amdgpu_display_manager *dm = &adev->dm;
struct vblank_control_work *work;
+ int irq_type;
int rc = 0;
if (acrtc->otg_inst == -1)
goto skip;
+ irq_type = amdgpu_display_crtc_idx_to_irq_type(adev, acrtc->crtc_id);
+
if (enable) {
/* vblank irq on -> Only need vupdate irq in vrr mode */
if (amdgpu_dm_crtc_vrr_active(acrtc_state))
@@ -303,13 +312,52 @@ static inline int amdgpu_dm_crtc_set_vblank(struct drm_crtc *crtc, bool enable)
if (rc)
return rc;
- rc = (enable)
- ? amdgpu_irq_get(adev, &adev->crtc_irq, acrtc->crtc_id)
- : amdgpu_irq_put(adev, &adev->crtc_irq, acrtc->crtc_id);
+ /* crtc vblank or vstartup interrupt */
+ if (enable) {
+ rc = amdgpu_irq_get(adev, &adev->crtc_irq, irq_type);
+ drm_dbg_vbl(crtc->dev, "Get crtc_irq ret=%d\n", rc);
+ } else {
+ rc = amdgpu_irq_put(adev, &adev->crtc_irq, irq_type);
+ drm_dbg_vbl(crtc->dev, "Put crtc_irq ret=%d\n", rc);
+ }
if (rc)
return rc;
+ /*
+ * hubp surface flip interrupt
+ *
+ * We have no guarantee that the frontend index maps to the same
+ * backend index - some even map to more than one.
+ *
+ * TODO: Use a different interrupt or check DC itself for the mapping.
+ */
+ if (enable) {
+ rc = amdgpu_irq_get(adev, &adev->pageflip_irq, irq_type);
+ drm_dbg_vbl(crtc->dev, "Get pageflip_irq ret=%d\n", rc);
+ } else {
+ rc = amdgpu_irq_put(adev, &adev->pageflip_irq, irq_type);
+ drm_dbg_vbl(crtc->dev, "Put pageflip_irq ret=%d\n", rc);
+ }
+
+ if (rc)
+ return rc;
+
+#if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
+ /* crtc vline0 interrupt, only available on DCN+ */
+ if (amdgpu_ip_version(adev, DCE_HWIP, 0) != 0) {
+ if (enable) {
+ rc = amdgpu_irq_get(adev, &adev->vline0_irq, irq_type);
+ drm_dbg_vbl(crtc->dev, "Get vline0_irq ret=%d\n", rc);
+ } else {
+ rc = amdgpu_irq_put(adev, &adev->vline0_irq, irq_type);
+ drm_dbg_vbl(crtc->dev, "Put vline0_irq ret=%d\n", rc);
+ }
+
+ if (rc)
+ return rc;
+ }
+#endif
skip:
if (amdgpu_in_reset(adev))
return 0;
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_debugfs.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_debugfs.c
index 62cb59f00929..db56b0aa5454 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_debugfs.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_debugfs.c
@@ -3804,9 +3804,12 @@ static int trigger_hpd_mst_set(void *data, u64 val)
if (aconnector->dc_link->type == dc_connection_mst_branch &&
aconnector->mst_mgr.aux) {
mutex_lock(&adev->dm.dc_lock);
- dc_link_detect(aconnector->dc_link, DETECT_REASON_HPD);
+ ret = dc_link_detect(aconnector->dc_link, DETECT_REASON_HPD);
mutex_unlock(&adev->dm.dc_lock);
+ if (!ret)
+ DRM_ERROR("DM_MST: Failed to detect dc link!");
+
ret = drm_dp_mst_topology_mgr_set_mst(&aconnector->mst_mgr, true);
if (ret < 0)
DRM_ERROR("DM_MST: Failed to set the device into MST mode!");
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_helpers.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_helpers.c
index b490ae67b6be..50109d13d967 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_helpers.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_helpers.c
@@ -759,7 +759,7 @@ static uint8_t write_dsc_enable_synaptics_non_virtual_dpcd_mst(
uint8_t ret = 0;
drm_dbg_dp(aux->drm_dev,
- "Configure DSC to non-virtual dpcd synaptics\n");
+ "MST_DSC Configure DSC to non-virtual dpcd synaptics\n");
if (enable) {
/* When DSC is enabled on previous boot and reboot with the hub,
@@ -772,7 +772,7 @@ static uint8_t write_dsc_enable_synaptics_non_virtual_dpcd_mst(
apply_synaptics_fifo_reset_wa(aux);
ret = drm_dp_dpcd_write(aux, DP_DSC_ENABLE, &enable, 1);
- DRM_INFO("Send DSC enable to synaptics\n");
+ DRM_INFO("MST_DSC Send DSC enable to synaptics\n");
} else {
/* Synaptics hub not support virtual dpcd,
@@ -781,7 +781,7 @@ static uint8_t write_dsc_enable_synaptics_non_virtual_dpcd_mst(
*/
if (!stream->link->link_status.link_active) {
ret = drm_dp_dpcd_write(aux, DP_DSC_ENABLE, &enable, 1);
- DRM_INFO("Send DSC disable to synaptics\n");
+ DRM_INFO("MST_DSC Send DSC disable to synaptics\n");
}
}
@@ -823,14 +823,14 @@ bool dm_helpers_dp_write_dsc_enable(
DP_DSC_ENABLE,
&enable_passthrough, 1);
drm_dbg_dp(dev,
- "Sent DSC pass-through enable to virtual dpcd port, ret = %u\n",
+ "MST_DSC Sent DSC pass-through enable to virtual dpcd port, ret = %u\n",
ret);
}
ret = drm_dp_dpcd_write(aconnector->dsc_aux,
DP_DSC_ENABLE, &enable_dsc, 1);
drm_dbg_dp(dev,
- "Sent DSC decoding enable to %s port, ret = %u\n",
+ "MST_DSC Sent DSC decoding enable to %s port, ret = %u\n",
(port->passthrough_aux) ? "remote RX" :
"virtual dpcd",
ret);
@@ -838,7 +838,7 @@ bool dm_helpers_dp_write_dsc_enable(
ret = drm_dp_dpcd_write(aconnector->dsc_aux,
DP_DSC_ENABLE, &enable_dsc, 1);
drm_dbg_dp(dev,
- "Sent DSC decoding disable to %s port, ret = %u\n",
+ "MST_DSC Sent DSC decoding disable to %s port, ret = %u\n",
(port->passthrough_aux) ? "remote RX" :
"virtual dpcd",
ret);
@@ -848,7 +848,7 @@ bool dm_helpers_dp_write_dsc_enable(
DP_DSC_ENABLE,
&enable_passthrough, 1);
drm_dbg_dp(dev,
- "Sent DSC pass-through disable to virtual dpcd port, ret = %u\n",
+ "MST_DSC Sent DSC pass-through disable to virtual dpcd port, ret = %u\n",
ret);
}
}
@@ -858,12 +858,12 @@ bool dm_helpers_dp_write_dsc_enable(
if (stream->sink->link->dpcd_caps.dongle_type == DISPLAY_DONGLE_NONE) {
ret = dm_helpers_dp_write_dpcd(ctx, stream->link, DP_DSC_ENABLE, &enable_dsc, 1);
drm_dbg_dp(dev,
- "Send DSC %s to SST RX\n",
+ "SST_DSC Send DSC %s to SST RX\n",
enable_dsc ? "enable" : "disable");
} else if (stream->sink->link->dpcd_caps.dongle_type == DISPLAY_DONGLE_DP_HDMI_CONVERTER) {
ret = dm_helpers_dp_write_dpcd(ctx, stream->link, DP_DSC_ENABLE, &enable_dsc, 1);
drm_dbg_dp(dev,
- "Send DSC %s to DP-HDMI PCON\n",
+ "SST_DSC Send DSC %s to DP-HDMI PCON\n",
enable_dsc ? "enable" : "disable");
}
}
@@ -1286,3 +1286,15 @@ enum adaptive_sync_type dm_get_adaptive_sync_support_type(struct dc_link *link)
return as_type;
}
+
+bool dm_helpers_is_fullscreen(struct dc_context *ctx, struct dc_stream_state *stream)
+{
+ // TODO
+ return false;
+}
+
+bool dm_helpers_is_hdr_on(struct dc_context *ctx, struct dc_stream_state *stream)
+{
+ // TODO
+ return false;
+} \ No newline at end of file
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c
index 2e9f6da1acdc..83a31b97e96b 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c
@@ -253,7 +253,7 @@ static bool validate_dsc_caps_on_connector(struct amdgpu_dm_connector *aconnecto
aconnector->dsc_aux = &aconnector->mst_root->dm_dp_aux.aux;
/* synaptics cascaded MST hub case */
- if (!aconnector->dsc_aux && is_synaptics_cascaded_panamera(aconnector->dc_link, port))
+ if (is_synaptics_cascaded_panamera(aconnector->dc_link, port))
aconnector->dsc_aux = port->mgr->aux;
if (!aconnector->dsc_aux)
@@ -578,6 +578,8 @@ dm_dp_add_mst_connector(struct drm_dp_mst_topology_mgr *mgr,
if (!aconnector)
return NULL;
+ DRM_DEBUG_DRIVER("%s: Create aconnector 0x%p for port 0x%p\n", __func__, aconnector, port);
+
connector = &aconnector->base;
aconnector->mst_output_port = port;
aconnector->mst_root = master;
@@ -872,11 +874,11 @@ static void set_dsc_configs_from_fairness_vars(struct dsc_mst_fairness_params *p
if (params[i].sink) {
if (params[i].sink->sink_signal != SIGNAL_TYPE_VIRTUAL &&
params[i].sink->sink_signal != SIGNAL_TYPE_NONE)
- DRM_DEBUG_DRIVER("%s i=%d dispname=%s\n", __func__, i,
+ DRM_DEBUG_DRIVER("MST_DSC %s i=%d dispname=%s\n", __func__, i,
params[i].sink->edid_caps.display_name);
}
- DRM_DEBUG_DRIVER("dsc=%d bits_per_pixel=%d pbn=%d\n",
+ DRM_DEBUG_DRIVER("MST_DSC dsc=%d bits_per_pixel=%d pbn=%d\n",
params[i].timing->flags.DSC,
params[i].timing->dsc_cfg.bits_per_pixel,
vars[i + k].pbn);
@@ -1054,6 +1056,7 @@ static int try_disable_dsc(struct drm_atomic_state *state,
if (next_index == -1)
break;
+ DRM_DEBUG_DRIVER("MST_DSC index #%d, try no compression\n", next_index);
vars[next_index].pbn = kbps_to_peak_pbn(params[next_index].bw_range.stream_kbps, fec_overhead_multiplier_x1000);
ret = drm_dp_atomic_find_time_slots(state,
params[next_index].port->mgr,
@@ -1064,10 +1067,12 @@ static int try_disable_dsc(struct drm_atomic_state *state,
ret = drm_dp_mst_atomic_check(state);
if (ret == 0) {
+ DRM_DEBUG_DRIVER("MST_DSC index #%d, greedily disable dsc\n", next_index);
vars[next_index].dsc_enabled = false;
vars[next_index].bpp_x16 = 0;
} else {
- vars[next_index].pbn = kbps_to_peak_pbn(params[next_index].bw_range.stream_kbps, fec_overhead_multiplier_x1000);
+ DRM_DEBUG_DRIVER("MST_DSC index #%d, restore minimum compression\n", next_index);
+ vars[next_index].pbn = kbps_to_peak_pbn(params[next_index].bw_range.max_kbps, fec_overhead_multiplier_x1000);
ret = drm_dp_atomic_find_time_slots(state,
params[next_index].port->mgr,
params[next_index].port,
@@ -1082,6 +1087,15 @@ static int try_disable_dsc(struct drm_atomic_state *state,
return 0;
}
+static void log_dsc_params(int count, struct dsc_mst_fairness_vars *vars, int k)
+{
+ int i;
+
+ for (i = 0; i < count; i++)
+ DRM_DEBUG_DRIVER("MST_DSC DSC params: stream #%d --- dsc_enabled = %d, bpp_x16 = %d, pbn = %d\n",
+ i, vars[i + k].dsc_enabled, vars[i + k].bpp_x16, vars[i + k].pbn);
+}
+
static int compute_mst_dsc_configs_for_link(struct drm_atomic_state *state,
struct dc_state *dc_state,
struct dc_link *dc_link,
@@ -1104,6 +1118,7 @@ static int compute_mst_dsc_configs_for_link(struct drm_atomic_state *state,
return PTR_ERR(mst_state);
/* Set up params */
+ DRM_DEBUG_DRIVER("%s: MST_DSC Set up params for %d streams\n", __func__, dc_state->stream_count);
for (i = 0; i < dc_state->stream_count; i++) {
struct dc_dsc_policy dsc_policy = {0};
@@ -1132,7 +1147,7 @@ static int compute_mst_dsc_configs_for_link(struct drm_atomic_state *state,
params[count].num_slices_v = aconnector->dsc_settings.dsc_num_slices_v;
params[count].bpp_overwrite = aconnector->dsc_settings.dsc_bits_per_pixel;
params[count].compression_possible = stream->sink->dsc_caps.dsc_dec_caps.is_dsc_supported;
- dc_dsc_get_policy_for_timing(params[count].timing, 0, &dsc_policy);
+ dc_dsc_get_policy_for_timing(params[count].timing, 0, &dsc_policy, dc_link_get_highest_encoding_format(stream->link));
if (!dc_dsc_compute_bandwidth_range(
stream->sink->ctx->dc->res_pool->dscs[0],
stream->sink->ctx->dc->debug.dsc_min_slice_height_override,
@@ -1145,6 +1160,9 @@ static int compute_mst_dsc_configs_for_link(struct drm_atomic_state *state,
params[count].bw_range.stream_kbps = dc_bandwidth_in_kbps_from_timing(&stream->timing,
dc_link_get_highest_encoding_format(dc_link));
+ DRM_DEBUG_DRIVER("MST_DSC #%d stream 0x%p - max_kbps = %u, min_kbps = %u, uncompressed_kbps = %u\n",
+ count, stream, params[count].bw_range.max_kbps, params[count].bw_range.min_kbps,
+ params[count].bw_range.stream_kbps);
count++;
}
@@ -1159,6 +1177,7 @@ static int compute_mst_dsc_configs_for_link(struct drm_atomic_state *state,
*link_vars_start_index += count;
/* Try no compression */
+ DRM_DEBUG_DRIVER("MST_DSC Try no compression\n");
for (i = 0; i < count; i++) {
vars[i + k].aconnector = params[i].aconnector;
vars[i + k].pbn = kbps_to_peak_pbn(params[i].bw_range.stream_kbps, fec_overhead_multiplier_x1000);
@@ -1177,7 +1196,10 @@ static int compute_mst_dsc_configs_for_link(struct drm_atomic_state *state,
return ret;
}
+ log_dsc_params(count, vars, k);
+
/* Try max compression */
+ DRM_DEBUG_DRIVER("MST_DSC Try max compression\n");
for (i = 0; i < count; i++) {
if (params[i].compression_possible && params[i].clock_force_enable != DSC_CLK_FORCE_DISABLE) {
vars[i + k].pbn = kbps_to_peak_pbn(params[i].bw_range.min_kbps, fec_overhead_multiplier_x1000);
@@ -1201,14 +1223,26 @@ static int compute_mst_dsc_configs_for_link(struct drm_atomic_state *state,
if (ret != 0)
return ret;
+ log_dsc_params(count, vars, k);
+
/* Optimize degree of compression */
+ DRM_DEBUG_DRIVER("MST_DSC Try optimize compression\n");
ret = increase_dsc_bpp(state, mst_state, dc_link, params, vars, count, k);
- if (ret < 0)
+ if (ret < 0) {
+ DRM_DEBUG_DRIVER("MST_DSC Failed to optimize compression\n");
return ret;
+ }
+ log_dsc_params(count, vars, k);
+
+ DRM_DEBUG_DRIVER("MST_DSC Try disable compression\n");
ret = try_disable_dsc(state, dc_link, params, vars, count, k);
- if (ret < 0)
+ if (ret < 0) {
+ DRM_DEBUG_DRIVER("MST_DSC Failed to disable compression\n");
return ret;
+ }
+
+ log_dsc_params(count, vars, k);
set_dsc_configs_from_fairness_vars(params, vars, count, k);
@@ -1230,17 +1264,19 @@ static bool is_dsc_need_re_compute(
/* only check phy used by dsc mst branch */
if (dc_link->type != dc_connection_mst_branch)
- return false;
+ goto out;
/* add a check for older MST DSC with no virtual DPCDs */
if (needs_dsc_aux_workaround(dc_link) &&
(!(dc_link->dpcd_caps.dsc_caps.dsc_basic_caps.fields.dsc_support.DSC_SUPPORT ||
dc_link->dpcd_caps.dsc_caps.dsc_basic_caps.fields.dsc_support.DSC_PASSTHROUGH_SUPPORT)))
- return false;
+ goto out;
for (i = 0; i < MAX_PIPES; i++)
stream_on_link[i] = NULL;
+ DRM_DEBUG_DRIVER("%s: MST_DSC check on %d streams in new dc_state\n", __func__, dc_state->stream_count);
+
/* check if there is mode change in new request */
for (i = 0; i < dc_state->stream_count; i++) {
struct drm_crtc_state *new_crtc_state;
@@ -1250,6 +1286,8 @@ static bool is_dsc_need_re_compute(
if (!stream)
continue;
+ DRM_DEBUG_DRIVER("%s:%d MST_DSC checking #%d stream 0x%p\n", __func__, __LINE__, i, stream);
+
/* check if stream using the same link for mst */
if (stream->link != dc_link)
continue;
@@ -1262,8 +1300,11 @@ static bool is_dsc_need_re_compute(
new_stream_on_link_num++;
new_conn_state = drm_atomic_get_new_connector_state(state, &aconnector->base);
- if (!new_conn_state)
+ if (!new_conn_state) {
+ DRM_DEBUG_DRIVER("%s:%d MST_DSC no new_conn_state for stream 0x%p, aconnector 0x%p\n",
+ __func__, __LINE__, stream, aconnector);
continue;
+ }
if (IS_ERR(new_conn_state))
continue;
@@ -1272,21 +1313,36 @@ static bool is_dsc_need_re_compute(
continue;
new_crtc_state = drm_atomic_get_new_crtc_state(state, new_conn_state->crtc);
- if (!new_crtc_state)
+ if (!new_crtc_state) {
+ DRM_DEBUG_DRIVER("%s:%d MST_DSC no new_crtc_state for crtc of stream 0x%p, aconnector 0x%p\n",
+ __func__, __LINE__, stream, aconnector);
continue;
+ }
if (IS_ERR(new_crtc_state))
continue;
if (new_crtc_state->enable && new_crtc_state->active) {
if (new_crtc_state->mode_changed || new_crtc_state->active_changed ||
- new_crtc_state->connectors_changed)
- return true;
+ new_crtc_state->connectors_changed) {
+ DRM_DEBUG_DRIVER("%s:%d MST_DSC dsc recompute required."
+ "stream 0x%p in new dc_state\n",
+ __func__, __LINE__, stream);
+ is_dsc_need_re_compute = true;
+ goto out;
+ }
}
}
- if (new_stream_on_link_num == 0)
- return false;
+ if (new_stream_on_link_num == 0) {
+ DRM_DEBUG_DRIVER("%s:%d MST_DSC no mode change request for streams in new dc_state\n",
+ __func__, __LINE__);
+ is_dsc_need_re_compute = false;
+ goto out;
+ }
+
+ DRM_DEBUG_DRIVER("%s: MST_DSC check on %d streams in current dc_state\n",
+ __func__, dc->current_state->stream_count);
/* check current_state if there stream on link but it is not in
* new request state
@@ -1310,11 +1366,18 @@ static bool is_dsc_need_re_compute(
if (j == new_stream_on_link_num) {
/* not in new state */
+ DRM_DEBUG_DRIVER("%s:%d MST_DSC dsc recompute required."
+ "stream 0x%p in current dc_state but not in new dc_state\n",
+ __func__, __LINE__, stream);
is_dsc_need_re_compute = true;
break;
}
}
+out:
+ DRM_DEBUG_DRIVER("%s: MST_DSC dsc recompute %s\n",
+ __func__, is_dsc_need_re_compute ? "required" : "not required");
+
return is_dsc_need_re_compute;
}
@@ -1343,6 +1406,9 @@ int compute_mst_dsc_configs_for_state(struct drm_atomic_state *state,
aconnector = (struct amdgpu_dm_connector *)stream->dm_stream_context;
+ DRM_DEBUG_DRIVER("%s: MST_DSC compute mst dsc configs for stream 0x%p, aconnector 0x%p\n",
+ __func__, stream, aconnector);
+
if (!aconnector || !aconnector->dc_sink || !aconnector->mst_output_port)
continue;
@@ -1375,8 +1441,11 @@ int compute_mst_dsc_configs_for_state(struct drm_atomic_state *state,
stream = dc_state->streams[i];
if (stream->timing.flags.DSC == 1)
- if (dc_stream_add_dsc_to_resource(stream->ctx->dc, dc_state, stream) != DC_OK)
+ if (dc_stream_add_dsc_to_resource(stream->ctx->dc, dc_state, stream) != DC_OK) {
+ DRM_DEBUG_DRIVER("%s:%d MST_DSC Failed to request dsc hw resource for stream 0x%p\n",
+ __func__, __LINE__, stream);
return -EINVAL;
+ }
}
return ret;
@@ -1405,6 +1474,9 @@ static int pre_compute_mst_dsc_configs_for_state(struct drm_atomic_state *state,
aconnector = (struct amdgpu_dm_connector *)stream->dm_stream_context;
+ DRM_DEBUG_DRIVER("MST_DSC pre compute mst dsc configs for #%d stream 0x%p, aconnector 0x%p\n",
+ i, stream, aconnector);
+
if (!aconnector || !aconnector->dc_sink || !aconnector->mst_output_port)
continue;
@@ -1494,12 +1566,12 @@ int pre_validate_dsc(struct drm_atomic_state *state,
int ret = 0;
if (!is_dsc_precompute_needed(state)) {
- DRM_INFO_ONCE("DSC precompute is not needed.\n");
+ DRM_INFO_ONCE("%s:%d MST_DSC dsc precompute is not needed\n", __func__, __LINE__);
return 0;
}
ret = dm_atomic_get_state(state, dm_state_ptr);
if (ret != 0) {
- DRM_INFO_ONCE("dm_atomic_get_state() failed\n");
+ DRM_INFO_ONCE("%s:%d MST_DSC dm_atomic_get_state() failed\n", __func__, __LINE__);
return ret;
}
dm_state = *dm_state_ptr;
@@ -1553,7 +1625,8 @@ int pre_validate_dsc(struct drm_atomic_state *state,
ret = pre_compute_mst_dsc_configs_for_state(state, local_dc_state, vars);
if (ret != 0) {
- DRM_INFO_ONCE("pre_compute_mst_dsc_configs_for_state() failed\n");
+ DRM_INFO_ONCE("%s:%d MST_DSC dsc pre_compute_mst_dsc_configs_for_state() failed\n",
+ __func__, __LINE__);
ret = -EINVAL;
goto clean_exit;
}
@@ -1567,12 +1640,15 @@ int pre_validate_dsc(struct drm_atomic_state *state,
if (local_dc_state->streams[i] &&
dc_is_timing_changed(stream, local_dc_state->streams[i])) {
- DRM_INFO_ONCE("crtc[%d] needs mode_changed\n", i);
+ DRM_INFO_ONCE("%s:%d MST_DSC crtc[%d] needs mode_change\n", __func__, __LINE__, i);
} else {
int ind = find_crtc_index_in_state_by_stream(state, stream);
- if (ind >= 0)
+ if (ind >= 0) {
+ DRM_INFO_ONCE("%s:%d MST_DSC no mode changed for stream 0x%p\n",
+ __func__, __LINE__, stream);
state->crtcs[ind].new_state->mode_changed = 0;
+ }
}
}
clean_exit:
@@ -1605,7 +1681,7 @@ static bool is_dsc_common_config_possible(struct dc_stream_state *stream,
{
struct dc_dsc_policy dsc_policy = {0};
- dc_dsc_get_policy_for_timing(&stream->timing, 0, &dsc_policy);
+ dc_dsc_get_policy_for_timing(&stream->timing, 0, &dsc_policy, dc_link_get_highest_encoding_format(stream->link));
dc_dsc_compute_bandwidth_range(stream->sink->ctx->dc->res_pool->dscs[0],
stream->sink->ctx->dc->debug.dsc_min_slice_height_override,
dsc_policy.min_target_bpp * 16,
@@ -1697,7 +1773,7 @@ enum dc_status dm_dp_mst_is_port_support_mode(
end_to_end_bw_in_kbps = min(root_link_bw_in_kbps, virtual_channel_bw_in_kbps);
if (stream_kbps <= end_to_end_bw_in_kbps) {
- DRM_DEBUG_DRIVER("No DSC needed. End-to-end bw sufficient.");
+ DRM_DEBUG_DRIVER("MST_DSC no dsc required. End-to-end bw sufficient\n");
return DC_OK;
}
@@ -1710,7 +1786,8 @@ enum dc_status dm_dp_mst_is_port_support_mode(
/*capable of dsc passthough. dsc bitstream along the entire path*/
if (aconnector->mst_output_port->passthrough_aux) {
if (bw_range.min_kbps > end_to_end_bw_in_kbps) {
- DRM_DEBUG_DRIVER("DSC passthrough. Max dsc compression can't fit into end-to-end bw\n");
+ DRM_DEBUG_DRIVER("MST_DSC dsc passthrough and decode at endpoint"
+ "Max dsc compression bw can't fit into end-to-end bw\n");
return DC_FAIL_BANDWIDTH_VALIDATE;
}
} else {
@@ -1721,7 +1798,8 @@ enum dc_status dm_dp_mst_is_port_support_mode(
/*Get last DP link BW capability*/
if (dp_get_link_current_set_bw(&aconnector->mst_output_port->aux, &end_link_bw)) {
if (stream_kbps > end_link_bw) {
- DRM_DEBUG_DRIVER("DSC decode at last link. Mode required bw can't fit into available bw\n");
+ DRM_DEBUG_DRIVER("MST_DSC dsc decode at last link."
+ "Mode required bw can't fit into last link\n");
return DC_FAIL_BANDWIDTH_VALIDATE;
}
}
@@ -1734,7 +1812,8 @@ enum dc_status dm_dp_mst_is_port_support_mode(
virtual_channel_bw_in_kbps = kbps_from_pbn(immediate_upstream_port->full_pbn);
virtual_channel_bw_in_kbps = min(root_link_bw_in_kbps, virtual_channel_bw_in_kbps);
if (bw_range.min_kbps > virtual_channel_bw_in_kbps) {
- DRM_DEBUG_DRIVER("DSC decode at last link. Max dsc compression can't fit into MST available bw\n");
+ DRM_DEBUG_DRIVER("MST_DSC dsc decode at last link."
+ "Max dsc compression can't fit into MST available bw\n");
return DC_FAIL_BANDWIDTH_VALIDATE;
}
}
@@ -1751,9 +1830,9 @@ enum dc_status dm_dp_mst_is_port_support_mode(
dc_link_get_highest_encoding_format(stream->link),
&stream->timing.dsc_cfg)) {
stream->timing.flags.DSC = 1;
- DRM_DEBUG_DRIVER("Require dsc and dsc config found\n");
+ DRM_DEBUG_DRIVER("MST_DSC require dsc and dsc config found\n");
} else {
- DRM_DEBUG_DRIVER("Require dsc but can't find appropriate dsc config\n");
+ DRM_DEBUG_DRIVER("MST_DSC require dsc but can't find appropriate dsc config\n");
return DC_FAIL_BANDWIDTH_VALIDATE;
}
@@ -1775,11 +1854,11 @@ enum dc_status dm_dp_mst_is_port_support_mode(
if (branch_max_throughput_mps != 0 &&
((stream->timing.pix_clk_100hz / 10) > branch_max_throughput_mps * 1000)) {
- DRM_DEBUG_DRIVER("DSC is required but max throughput mps fails");
+ DRM_DEBUG_DRIVER("MST_DSC require dsc but max throughput mps fails\n");
return DC_FAIL_BANDWIDTH_VALIDATE;
}
} else {
- DRM_DEBUG_DRIVER("DSC is required but can't find common dsc config.");
+ DRM_DEBUG_DRIVER("MST_DSC require dsc but can't find common dsc config\n");
return DC_FAIL_BANDWIDTH_VALIDATE;
}
#endif
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_plane.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_plane.c
index 5cb11cc2d063..495e3cd70426 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_plane.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_plane.c
@@ -961,6 +961,7 @@ static int amdgpu_dm_plane_helper_prepare_fb(struct drm_plane *plane,
else
domain = AMDGPU_GEM_DOMAIN_VRAM;
+ rbo->flags |= AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS;
r = amdgpu_bo_pin(rbo, domain);
if (unlikely(r != 0)) {
if (r != -ERESTARTSYS)
@@ -1283,6 +1284,7 @@ int amdgpu_dm_plane_get_cursor_position(struct drm_plane *plane, struct drm_crtc
struct dc_cursor_position *position)
{
struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
+ struct amdgpu_device *adev = drm_to_adev(plane->dev);
int x, y;
int xorigin = 0, yorigin = 0;
@@ -1314,12 +1316,14 @@ int amdgpu_dm_plane_get_cursor_position(struct drm_plane *plane, struct drm_crtc
y = 0;
}
position->enable = true;
- position->translate_by_source = true;
position->x = x;
position->y = y;
position->x_hotspot = xorigin;
position->y_hotspot = yorigin;
+ if (amdgpu_ip_version(adev, DCE_HWIP, 0) < IP_VERSION(4, 0, 1))
+ position->translate_by_source = true;
+
return 0;
}
@@ -1377,7 +1381,8 @@ void amdgpu_dm_plane_handle_cursor_update(struct drm_plane *plane,
adev->dm.dc->caps.color.dpp.gamma_corr)
attributes.attribute_flags.bits.ENABLE_CURSOR_DEGAMMA = 1;
- attributes.pitch = afb->base.pitches[0] / afb->base.format->cpp[0];
+ if (afb)
+ attributes.pitch = afb->base.pitches[0] / afb->base.format->cpp[0];
if (crtc_state->stream) {
mutex_lock(&adev->dm.dc_lock);
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_wb.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_wb.c
index 08c494a7a21b..0d5fefb0f591 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_wb.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_wb.c
@@ -114,6 +114,7 @@ static int amdgpu_dm_wb_prepare_job(struct drm_writeback_connector *wb_connector
domain = amdgpu_display_supported_domains(adev, rbo->flags);
+ rbo->flags |= AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS;
r = amdgpu_bo_pin(rbo, domain);
if (unlikely(r != 0)) {
if (r != -ERESTARTSYS)
diff --git a/drivers/gpu/drm/amd/display/dc/Makefile b/drivers/gpu/drm/amd/display/dc/Makefile
index 80069651def3..8992e697759f 100644
--- a/drivers/gpu/drm/amd/display/dc/Makefile
+++ b/drivers/gpu/drm/amd/display/dc/Makefile
@@ -35,7 +35,6 @@ DC_LIBS += dcn201
DC_LIBS += dcn30
DC_LIBS += dcn301
DC_LIBS += dcn31
-DC_LIBS += dcn314
DC_LIBS += dml
DC_LIBS += dml2
endif
diff --git a/drivers/gpu/drm/amd/display/dc/basics/dce_calcs.c b/drivers/gpu/drm/amd/display/dc/basics/dce_calcs.c
index e47e9db062f4..681799468487 100644
--- a/drivers/gpu/drm/amd/display/dc/basics/dce_calcs.c
+++ b/drivers/gpu/drm/amd/display/dc/basics/dce_calcs.c
@@ -569,7 +569,7 @@ static void calculate_bandwidth(
break;
}
data->lb_partitions[i] = bw_floor2(bw_div(data->lb_size_per_component[i], data->lb_line_pitch), bw_int_to_fixed(1));
- /*clamp the partitions to the maxium number supported by the lb*/
+ /* clamp the partitions to the maximum number supported by the lb */
if ((surface_type[i] != bw_def_graphics || dceip->graphics_lb_nodownscaling_multi_line_prefetching == 1)) {
data->lb_partitions_max[i] = bw_int_to_fixed(10);
}
diff --git a/drivers/gpu/drm/amd/display/dc/basics/fixpt31_32.c b/drivers/gpu/drm/amd/display/dc/basics/fixpt31_32.c
index 506f82cd5cc6..88d3f9d7dd55 100644
--- a/drivers/gpu/drm/amd/display/dc/basics/fixpt31_32.c
+++ b/drivers/gpu/drm/amd/display/dc/basics/fixpt31_32.c
@@ -486,3 +486,30 @@ int dc_fixpt_s4d19(struct fixed31_32 arg)
else
return ux_dy(arg.value, 4, 19);
}
+
+struct fixed31_32 dc_fixpt_from_ux_dy(unsigned int value,
+ unsigned int integer_bits,
+ unsigned int fractional_bits)
+{
+ struct fixed31_32 fixpt_value = dc_fixpt_zero;
+ struct fixed31_32 fixpt_int_value = dc_fixpt_zero;
+ long long frac_mask = ((long long)1 << (long long)integer_bits) - 1;
+
+ fixpt_value.value = (long long)value << (FIXED31_32_BITS_PER_FRACTIONAL_PART - fractional_bits);
+ frac_mask = frac_mask << fractional_bits;
+ fixpt_int_value.value = value & frac_mask;
+ fixpt_int_value.value <<= (FIXED31_32_BITS_PER_FRACTIONAL_PART - fractional_bits);
+ fixpt_value.value |= fixpt_int_value.value;
+ return fixpt_value;
+}
+
+struct fixed31_32 dc_fixpt_from_int_dy(unsigned int int_value,
+ unsigned int frac_value,
+ unsigned int integer_bits,
+ unsigned int fractional_bits)
+{
+ struct fixed31_32 fixpt_value = dc_fixpt_from_int(int_value);
+
+ fixpt_value.value |= (long long)frac_value << (FIXED31_32_BITS_PER_FRACTIONAL_PART - fractional_bits);
+ return fixpt_value;
+}
diff --git a/drivers/gpu/drm/amd/display/dc/bios/command_table2.c b/drivers/gpu/drm/amd/display/dc/bios/command_table2.c
index 4254bdfefe38..7d18f372ce7a 100644
--- a/drivers/gpu/drm/amd/display/dc/bios/command_table2.c
+++ b/drivers/gpu/drm/amd/display/dc/bios/command_table2.c
@@ -227,7 +227,7 @@ static void init_transmitter_control(struct bios_parser *bp)
uint8_t frev;
uint8_t crev = 0;
- if (!BIOS_CMD_TABLE_REVISION(dig1transmittercontrol, frev, crev))
+ if (!BIOS_CMD_TABLE_REVISION(dig1transmittercontrol, frev, crev) && (bp->base.ctx->dc->ctx->dce_version <= DCN_VERSION_2_0))
BREAK_TO_DEBUGGER();
switch (crev) {
diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/clk_mgr.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/clk_mgr.c
index f770828df149..0e243f4344d0 100644
--- a/drivers/gpu/drm/amd/display/dc/clk_mgr/clk_mgr.c
+++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/clk_mgr.c
@@ -59,6 +59,7 @@ int clk_mgr_helper_get_active_display_cnt(
display_count = 0;
for (i = 0; i < context->stream_count; i++) {
const struct dc_stream_state *stream = context->streams[i];
+ const struct dc_stream_status *stream_status = &context->stream_status[i];
/* Don't count SubVP phantom pipes as part of active
* display count
@@ -66,13 +67,7 @@ int clk_mgr_helper_get_active_display_cnt(
if (dc_state_get_stream_subvp_type(context, stream) == SUBVP_PHANTOM)
continue;
- /*
- * Only notify active stream or virtual stream.
- * Need to notify virtual stream to work around
- * headless case. HPD does not fire when system is in
- * S0i2.
- */
- if (!stream->dpms_off || stream->signal == SIGNAL_TYPE_VIRTUAL)
+ if (!stream->dpms_off || (stream_status && stream_status->plane_count))
display_count++;
}
diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dce110/dce110_clk_mgr.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dce110/dce110_clk_mgr.c
index 78df96882d6e..f8409453434c 100644
--- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dce110/dce110_clk_mgr.c
+++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dce110/dce110_clk_mgr.c
@@ -195,7 +195,7 @@ void dce11_pplib_apply_display_requirements(
* , then change minimum memory clock based on real-time bandwidth
* limitation.
*/
- if ((dc->ctx->asic_id.chip_family == FAMILY_AI) &&
+ if (dc->bw_vbios && (dc->ctx->asic_id.chip_family == FAMILY_AI) &&
ASICREV_IS_VEGA20_P(dc->ctx->asic_id.hw_internal_rev) && (context->stream_count >= 2)) {
pp_display_cfg->min_memory_clock_khz = max(pp_display_cfg->min_memory_clock_khz,
(uint32_t) div64_s64(
diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn35/dcn35_clk_mgr.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn35/dcn35_clk_mgr.c
index 70ee0089a20d..b46a3afe48ca 100644
--- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn35/dcn35_clk_mgr.c
+++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn35/dcn35_clk_mgr.c
@@ -120,25 +120,40 @@ static int dcn35_get_active_display_cnt_wa(
return display_count;
}
-
static void dcn35_disable_otg_wa(struct clk_mgr *clk_mgr_base, struct dc_state *context,
bool safe_to_lower, bool disable)
{
struct dc *dc = clk_mgr_base->ctx->dc;
int i;
+ if (dc->ctx->dce_environment == DCE_ENV_DIAG)
+ return;
+
for (i = 0; i < dc->res_pool->pipe_count; ++i) {
+ struct pipe_ctx *old_pipe = &dc->current_state->res_ctx.pipe_ctx[i];
+ struct pipe_ctx *new_pipe = &context->res_ctx.pipe_ctx[i];
struct pipe_ctx *pipe = safe_to_lower
? &context->res_ctx.pipe_ctx[i]
: &dc->current_state->res_ctx.pipe_ctx[i];
-
+ bool stream_changed_otg_dig_on = false;
if (pipe->top_pipe || pipe->prev_odm_pipe)
continue;
+ stream_changed_otg_dig_on = old_pipe->stream && new_pipe->stream &&
+ old_pipe->stream != new_pipe->stream &&
+ old_pipe->stream_res.tg == new_pipe->stream_res.tg &&
+ new_pipe->stream->link_enc && !new_pipe->stream->dpms_off &&
+ new_pipe->stream->link_enc->funcs->is_dig_enabled &&
+ new_pipe->stream->link_enc->funcs->is_dig_enabled(
+ new_pipe->stream->link_enc) &&
+ new_pipe->stream_res.stream_enc &&
+ new_pipe->stream_res.stream_enc->funcs->is_fifo_enabled &&
+ new_pipe->stream_res.stream_enc->funcs->is_fifo_enabled(new_pipe->stream_res.stream_enc);
if (pipe->stream && (pipe->stream->dpms_off || dc_is_virtual_signal(pipe->stream->signal) ||
- !pipe->stream->link_enc)) {
+ !pipe->stream->link_enc) && !stream_changed_otg_dig_on) {
+ /* This w/a should not trigger when we have a dig active */
if (disable) {
- if (pipe->stream_res.tg && pipe->stream_res.tg->funcs->disable_crtc)
- pipe->stream_res.tg->funcs->disable_crtc(pipe->stream_res.tg);
+ if (pipe->stream_res.tg && pipe->stream_res.tg->funcs->immediate_disable_crtc)
+ pipe->stream_res.tg->funcs->immediate_disable_crtc(pipe->stream_res.tg);
reset_sync_context_for_pipe(dc, context, i);
} else {
@@ -367,6 +382,9 @@ void dcn35_update_clocks(struct clk_mgr *clk_mgr_base,
if (should_set_clock(safe_to_lower, new_clocks->dispclk_khz, clk_mgr_base->clks.dispclk_khz)) {
dcn35_disable_otg_wa(clk_mgr_base, context, safe_to_lower, true);
+ if (dc->debug.min_disp_clk_khz > 0 && new_clocks->dispclk_khz < dc->debug.min_disp_clk_khz)
+ new_clocks->dispclk_khz = dc->debug.min_disp_clk_khz;
+
clk_mgr_base->clks.dispclk_khz = new_clocks->dispclk_khz;
dcn35_smu_set_dispclk(clk_mgr, clk_mgr_base->clks.dispclk_khz);
dcn35_disable_otg_wa(clk_mgr_base, context, safe_to_lower, false);
@@ -1082,7 +1100,7 @@ void dcn35_clk_mgr_construct(
clk_mgr->smu_wm_set.wm_set = (struct dcn35_watermarks *)dm_helpers_allocate_gpu_mem(
clk_mgr->base.base.ctx,
- DC_MEM_ALLOC_TYPE_FRAME_BUFFER,
+ DC_MEM_ALLOC_TYPE_GART,
sizeof(struct dcn35_watermarks),
&clk_mgr->smu_wm_set.mc_address.quad_part);
@@ -1094,7 +1112,7 @@ void dcn35_clk_mgr_construct(
smu_dpm_clks.dpm_clks = (DpmClocks_t_dcn35 *)dm_helpers_allocate_gpu_mem(
clk_mgr->base.base.ctx,
- DC_MEM_ALLOC_TYPE_FRAME_BUFFER,
+ DC_MEM_ALLOC_TYPE_GART,
sizeof(DpmClocks_t_dcn35),
&smu_dpm_clks.mc_address.quad_part);
@@ -1191,7 +1209,7 @@ void dcn35_clk_mgr_construct(
}
if (smu_dpm_clks.dpm_clks && smu_dpm_clks.mc_address.quad_part != 0)
- dm_helpers_free_gpu_mem(clk_mgr->base.base.ctx, DC_MEM_ALLOC_TYPE_FRAME_BUFFER,
+ dm_helpers_free_gpu_mem(clk_mgr->base.base.ctx, DC_MEM_ALLOC_TYPE_GART,
smu_dpm_clks.dpm_clks);
if (ctx->dc->config.disable_ips != DMUB_IPS_DISABLE_ALL) {
@@ -1204,6 +1222,12 @@ void dcn35_clk_mgr_construct(
ctx->dc->debug.disable_dpp_power_gate = false;
ctx->dc->debug.disable_hubp_power_gate = false;
ctx->dc->debug.disable_dsc_power_gate = false;
+
+ /* Disable dynamic IPS2 in older PMFW (93.12) for Z8 interop. */
+ if (ctx->dc->config.disable_ips == DMUB_IPS_ENABLE &&
+ ctx->dce_version == DCN_VERSION_3_5 &&
+ ((clk_mgr->base.smu_ver & 0x00FFFFFF) <= 0x005d0c00))
+ ctx->dc->config.disable_ips = DMUB_IPS_RCG_IN_ACTIVE_IPS2_IN_OFF;
} else {
/*let's reset the config control flag*/
ctx->dc->config.disable_ips = DMUB_IPS_DISABLE_ALL; /*pmfw not support it, disable it all*/
diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn401/dcn401_clk_mgr.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn401/dcn401_clk_mgr.c
index 45fe17a46890..8cfc5f435937 100644
--- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn401/dcn401_clk_mgr.c
+++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn401/dcn401_clk_mgr.c
@@ -14,6 +14,7 @@
#include "core_types.h"
#include "dm_helpers.h"
#include "link.h"
+#include "dc_state_priv.h"
#include "atomfirmware.h"
#include "dcn401_smu14_driver_if.h"
@@ -29,6 +30,7 @@
#define mmCLK01_CLK0_CLK2_DFS_CNTL 0x16E6F
#define mmCLK01_CLK0_CLK3_DFS_CNTL 0x16E72
#define mmCLK01_CLK0_CLK4_DFS_CNTL 0x16E75
+#define mmCLK20_CLK2_CLK2_DFS_CNTL 0x1B051
#define CLK0_CLK_PLL_REQ__FbMult_int_MASK 0x000001ffUL
#define CLK0_CLK_PLL_REQ__PllSpineDiv_MASK 0x0000f000UL
@@ -302,6 +304,197 @@ void dcn401_init_clocks(struct clk_mgr *clk_mgr_base)
dcn401_build_wm_range_table(clk_mgr_base);
}
+static void dcn401_dump_clk_registers(struct clk_state_registers_and_bypass *regs_and_bypass,
+ struct clk_mgr *clk_mgr_base, struct clk_log_info *log_info)
+{
+ struct clk_mgr_internal *clk_mgr = TO_CLK_MGR_INTERNAL(clk_mgr_base);
+ uint32_t dprefclk_did = 0;
+ uint32_t dcfclk_did = 0;
+ uint32_t dtbclk_did = 0;
+ uint32_t dispclk_did = 0;
+ uint32_t dppclk_did = 0;
+ uint32_t fclk_did = 0;
+ uint32_t target_div = 0;
+
+ /* DFS Slice 0 is used for DISPCLK */
+ dispclk_did = REG_READ(CLK0_CLK0_DFS_CNTL);
+ /* DFS Slice 1 is used for DPPCLK */
+ dppclk_did = REG_READ(CLK0_CLK1_DFS_CNTL);
+ /* DFS Slice 2 is used for DPREFCLK */
+ dprefclk_did = REG_READ(CLK0_CLK2_DFS_CNTL);
+ /* DFS Slice 3 is used for DCFCLK */
+ dcfclk_did = REG_READ(CLK0_CLK3_DFS_CNTL);
+ /* DFS Slice 4 is used for DTBCLK */
+ dtbclk_did = REG_READ(CLK0_CLK4_DFS_CNTL);
+ /* DFS Slice _ is used for FCLK */
+ fclk_did = REG_READ(CLK2_CLK2_DFS_CNTL);
+
+ /* Convert DISPCLK DFS Slice DID to divider*/
+ target_div = dentist_get_divider_from_did(dispclk_did);
+ //Get dispclk in khz
+ regs_and_bypass->dispclk = (DENTIST_DIVIDER_RANGE_SCALE_FACTOR
+ * clk_mgr->base.dentist_vco_freq_khz) / target_div;
+
+ /* Convert DISPCLK DFS Slice DID to divider*/
+ target_div = dentist_get_divider_from_did(dppclk_did);
+ //Get dppclk in khz
+ regs_and_bypass->dppclk = (DENTIST_DIVIDER_RANGE_SCALE_FACTOR
+ * clk_mgr->base.dentist_vco_freq_khz) / target_div;
+
+ /* Convert DPREFCLK DFS Slice DID to divider*/
+ target_div = dentist_get_divider_from_did(dprefclk_did);
+ //Get dprefclk in khz
+ regs_and_bypass->dprefclk = (DENTIST_DIVIDER_RANGE_SCALE_FACTOR
+ * clk_mgr->base.dentist_vco_freq_khz) / target_div;
+
+ /* Convert DCFCLK DFS Slice DID to divider*/
+ target_div = dentist_get_divider_from_did(dcfclk_did);
+ //Get dcfclk in khz
+ regs_and_bypass->dcfclk = (DENTIST_DIVIDER_RANGE_SCALE_FACTOR
+ * clk_mgr->base.dentist_vco_freq_khz) / target_div;
+
+ /* Convert DTBCLK DFS Slice DID to divider*/
+ target_div = dentist_get_divider_from_did(dtbclk_did);
+ //Get dtbclk in khz
+ regs_and_bypass->dtbclk = (DENTIST_DIVIDER_RANGE_SCALE_FACTOR
+ * clk_mgr->base.dentist_vco_freq_khz) / target_div;
+
+ /* Convert DTBCLK DFS Slice DID to divider*/
+ target_div = dentist_get_divider_from_did(fclk_did);
+ //Get fclk in khz
+ regs_and_bypass->fclk = (DENTIST_DIVIDER_RANGE_SCALE_FACTOR
+ * clk_mgr->base.dentist_vco_freq_khz) / target_div;
+}
+
+static bool dcn401_check_native_scaling(struct pipe_ctx *pipe)
+{
+ bool is_native_scaling = false;
+ int width = pipe->plane_state->src_rect.width;
+ int height = pipe->plane_state->src_rect.height;
+
+ if (pipe->stream->timing.h_addressable == width &&
+ pipe->stream->timing.v_addressable == height &&
+ pipe->plane_state->dst_rect.width == width &&
+ pipe->plane_state->dst_rect.height == height)
+ is_native_scaling = true;
+
+ return is_native_scaling;
+}
+
+static void dcn401_auto_dpm_test_log(
+ struct dc_clocks *new_clocks,
+ struct clk_mgr_internal *clk_mgr,
+ struct dc_state *context)
+{
+ unsigned int mall_ss_size_bytes;
+ int dramclk_khz_override, fclk_khz_override, num_fclk_levels;
+
+ struct pipe_ctx *pipe_ctx_list[MAX_PIPES];
+ int active_pipe_count = 0;
+
+ for (int i = 0; i < MAX_PIPES; i++) {
+ struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[i];
+
+ if (pipe_ctx->stream && dc_state_get_pipe_subvp_type(context, pipe_ctx) != SUBVP_PHANTOM) {
+ pipe_ctx_list[active_pipe_count] = pipe_ctx;
+ active_pipe_count++;
+ }
+ }
+
+ msleep(5);
+
+ mall_ss_size_bytes = context->bw_ctx.bw.dcn.mall_ss_size_bytes;
+
+ struct clk_log_info log_info = {0};
+ struct clk_state_registers_and_bypass clk_register_dump;
+
+ dcn401_dump_clk_registers(&clk_register_dump, &clk_mgr->base, &log_info);
+
+ // Overrides for these clocks in case there is no p_state change support
+ dramclk_khz_override = new_clocks->dramclk_khz;
+ fclk_khz_override = new_clocks->fclk_khz;
+
+ num_fclk_levels = clk_mgr->base.bw_params->clk_table.num_entries_per_clk.num_fclk_levels - 1;
+
+ if (!new_clocks->p_state_change_support)
+ dramclk_khz_override = clk_mgr->base.bw_params->max_memclk_mhz * 1000;
+
+ if (!new_clocks->fclk_p_state_change_support)
+ fclk_khz_override = clk_mgr->base.bw_params->clk_table.entries[num_fclk_levels].fclk_mhz * 1000;
+
+
+ ////////////////////////////////////////////////////////////////////////////
+ // IMPORTANT: When adding more clocks to these logs, do NOT put a newline
+ // anywhere other than at the very end of the string.
+ //
+ // Formatting example (make sure to have " - " between each entry):
+ //
+ // AutoDPMTest: clk1:%d - clk2:%d - clk3:%d - clk4:%d\n"
+ ////////////////////////////////////////////////////////////////////////////
+ if (active_pipe_count > 0 &&
+ new_clocks->dramclk_khz > 0 &&
+ new_clocks->fclk_khz > 0 &&
+ new_clocks->dcfclk_khz > 0 &&
+ new_clocks->dppclk_khz > 0) {
+
+ uint32_t pix_clk_list[MAX_PIPES] = {0};
+ int p_state_list[MAX_PIPES] = {0};
+ int disp_src_width_list[MAX_PIPES] = {0};
+ int disp_src_height_list[MAX_PIPES] = {0};
+ uint64_t disp_src_refresh_list[MAX_PIPES] = {0};
+ bool is_scaled_list[MAX_PIPES] = {0};
+
+ for (int i = 0; i < active_pipe_count; i++) {
+ struct pipe_ctx *curr_pipe_ctx = pipe_ctx_list[i];
+ uint64_t refresh_rate;
+
+ pix_clk_list[i] = curr_pipe_ctx->stream->timing.pix_clk_100hz;
+ p_state_list[i] = curr_pipe_ctx->p_state_type;
+
+ refresh_rate = (curr_pipe_ctx->stream->timing.pix_clk_100hz * (uint64_t)100 +
+ curr_pipe_ctx->stream->timing.v_total
+ * (uint64_t) curr_pipe_ctx->stream->timing.h_total - (uint64_t)1);
+ refresh_rate = div_u64(refresh_rate, curr_pipe_ctx->stream->timing.v_total);
+ refresh_rate = div_u64(refresh_rate, curr_pipe_ctx->stream->timing.h_total);
+ disp_src_refresh_list[i] = refresh_rate;
+
+ if (curr_pipe_ctx->plane_state) {
+ is_scaled_list[i] = !(dcn401_check_native_scaling(curr_pipe_ctx));
+ disp_src_width_list[i] = curr_pipe_ctx->plane_state->src_rect.width;
+ disp_src_height_list[i] = curr_pipe_ctx->plane_state->src_rect.height;
+ }
+ }
+
+ DC_LOG_AUTO_DPM_TEST("AutoDPMTest: dramclk:%d - fclk:%d - "
+ "dcfclk:%d - dppclk:%d - dispclk_hw:%d - "
+ "dppclk_hw:%d - dprefclk_hw:%d - dcfclk_hw:%d - "
+ "dtbclk_hw:%d - fclk_hw:%d - pix_clk_0:%d - pix_clk_1:%d - "
+ "pix_clk_2:%d - pix_clk_3:%d - mall_ss_size:%d - p_state_type_0:%d - "
+ "p_state_type_1:%d - p_state_type_2:%d - p_state_type_3:%d - "
+ "pix_width_0:%d - pix_height_0:%d - refresh_rate_0:%lld - is_scaled_0:%d - "
+ "pix_width_1:%d - pix_height_1:%d - refresh_rate_1:%lld - is_scaled_1:%d - "
+ "pix_width_2:%d - pix_height_2:%d - refresh_rate_2:%lld - is_scaled_2:%d - "
+ "pix_width_3:%d - pix_height_3:%d - refresh_rate_3:%lld - is_scaled_3:%d - LOG_END\n",
+ dramclk_khz_override,
+ fclk_khz_override,
+ new_clocks->dcfclk_khz,
+ new_clocks->dppclk_khz,
+ clk_register_dump.dispclk,
+ clk_register_dump.dppclk,
+ clk_register_dump.dprefclk,
+ clk_register_dump.dcfclk,
+ clk_register_dump.dtbclk,
+ clk_register_dump.fclk,
+ pix_clk_list[0], pix_clk_list[1], pix_clk_list[3], pix_clk_list[2],
+ mall_ss_size_bytes,
+ p_state_list[0], p_state_list[1], p_state_list[2], p_state_list[3],
+ disp_src_width_list[0], disp_src_height_list[0], disp_src_refresh_list[0], is_scaled_list[0],
+ disp_src_width_list[1], disp_src_height_list[1], disp_src_refresh_list[1], is_scaled_list[1],
+ disp_src_width_list[2], disp_src_height_list[2], disp_src_refresh_list[2], is_scaled_list[2],
+ disp_src_width_list[3], disp_src_height_list[3], disp_src_refresh_list[3], is_scaled_list[3]);
+ }
+}
+
static void dcn401_update_clocks_update_dtb_dto(struct clk_mgr_internal *clk_mgr,
struct dc_state *context,
int ref_dtbclk_khz)
@@ -324,10 +517,12 @@ static void dcn401_update_clocks_update_dtb_dto(struct clk_mgr_internal *clk_mgr
if (!use_hpo_encoder)
continue;
- otg_master->clock_source->funcs->program_pix_clk(
+ if (otg_master->stream_res.pix_clk_params.controller_id > CONTROLLER_ID_UNDEFINED)
+ otg_master->clock_source->funcs->program_pix_clk(
otg_master->clock_source,
&otg_master->stream_res.pix_clk_params,
- dccg->ctx->dc->link_srv->dp_get_encoding_format(&otg_master->link_config.dp_link_settings),
+ dccg->ctx->dc->link_srv->dp_get_encoding_format(
+ &otg_master->link_config.dp_link_settings),
&otg_master->pll_settings);
}
}
@@ -738,12 +933,12 @@ static void dcn401_execute_block_sequence(struct clk_mgr *clk_mgr_base, unsigned
static unsigned int dcn401_build_update_bandwidth_clocks_sequence(
struct clk_mgr *clk_mgr_base,
struct dc_state *context,
+ struct dc_clocks *new_clocks,
bool safe_to_lower)
{
struct clk_mgr_internal *clk_mgr_internal = TO_CLK_MGR_INTERNAL(clk_mgr_base);
struct dcn401_clk_mgr *clk_mgr401 = TO_DCN401_CLK_MGR(clk_mgr_internal);
struct dc *dc = clk_mgr_base->ctx->dc;
- struct dc_clocks *new_clocks = &context->bw_ctx.bw.dcn.clk;
struct dcn401_clk_mgr_block_sequence *block_sequence = clk_mgr401->block_sequence;
bool enter_display_off = false;
bool update_active_fclk = false;
@@ -1025,13 +1220,13 @@ static unsigned int dcn401_build_update_bandwidth_clocks_sequence(
static unsigned int dcn401_build_update_display_clocks_sequence(
struct clk_mgr *clk_mgr_base,
struct dc_state *context,
+ struct dc_clocks *new_clocks,
bool safe_to_lower)
{
struct clk_mgr_internal *clk_mgr_internal = TO_CLK_MGR_INTERNAL(clk_mgr_base);
struct dcn401_clk_mgr *clk_mgr401 = TO_DCN401_CLK_MGR(clk_mgr_internal);
struct dc *dc = clk_mgr_base->ctx->dc;
struct dmcu *dmcu = clk_mgr_base->ctx->dc->res_pool->dmcu;
- struct dc_clocks *new_clocks = &context->bw_ctx.bw.dcn.clk;
struct dcn401_clk_mgr_block_sequence *block_sequence = clk_mgr401->block_sequence;
bool force_reset = false;
bool update_dispclk = false;
@@ -1171,9 +1366,6 @@ static void dcn401_update_clocks(struct clk_mgr *clk_mgr_base,
unsigned int num_steps = 0;
- if (dc->work_arounds.skip_clock_update)
- return;
-
if (dc->debug.enable_legacy_clock_update) {
dcn401_update_clocks_legacy(clk_mgr_base, context, safe_to_lower);
return;
@@ -1182,6 +1374,7 @@ static void dcn401_update_clocks(struct clk_mgr *clk_mgr_base,
/* build bandwidth related clocks update sequence */
num_steps = dcn401_build_update_bandwidth_clocks_sequence(clk_mgr_base,
context,
+ &context->bw_ctx.bw.dcn.clk,
safe_to_lower);
/* execute sequence */
@@ -1190,10 +1383,15 @@ static void dcn401_update_clocks(struct clk_mgr *clk_mgr_base,
/* build display related clocks update sequence */
num_steps = dcn401_build_update_display_clocks_sequence(clk_mgr_base,
context,
+ &context->bw_ctx.bw.dcn.clk,
safe_to_lower);
/* execute sequence */
dcn401_execute_block_sequence(clk_mgr_base, num_steps);
+
+ if (dc->config.enable_auto_dpm_test_logs)
+ dcn401_auto_dpm_test_log(&context->bw_ctx.bw.dcn.clk, TO_CLK_MGR_INTERNAL(clk_mgr_base), context);
+
}
@@ -1218,59 +1416,6 @@ static uint32_t dcn401_get_vco_frequency_from_reg(struct clk_mgr_internal *clk_m
return dc_fixpt_floor(pll_req);
}
-static void dcn401_dump_clk_registers(struct clk_state_registers_and_bypass *regs_and_bypass,
- struct clk_mgr *clk_mgr_base, struct clk_log_info *log_info)
-{
- struct clk_mgr_internal *clk_mgr = TO_CLK_MGR_INTERNAL(clk_mgr_base);
- uint32_t dprefclk_did = 0;
- uint32_t dcfclk_did = 0;
- uint32_t dtbclk_did = 0;
- uint32_t dispclk_did = 0;
- uint32_t dppclk_did = 0;
- uint32_t target_div = 0;
-
- /* DFS Slice 0 is used for DISPCLK */
- dispclk_did = REG_READ(CLK0_CLK0_DFS_CNTL);
- /* DFS Slice 1 is used for DPPCLK */
- dppclk_did = REG_READ(CLK0_CLK1_DFS_CNTL);
- /* DFS Slice 2 is used for DPREFCLK */
- dprefclk_did = REG_READ(CLK0_CLK2_DFS_CNTL);
- /* DFS Slice 3 is used for DCFCLK */
- dcfclk_did = REG_READ(CLK0_CLK3_DFS_CNTL);
- /* DFS Slice 4 is used for DTBCLK */
- dtbclk_did = REG_READ(CLK0_CLK4_DFS_CNTL);
-
- /* Convert DISPCLK DFS Slice DID to divider*/
- target_div = dentist_get_divider_from_did(dispclk_did);
- //Get dispclk in khz
- regs_and_bypass->dispclk = (DENTIST_DIVIDER_RANGE_SCALE_FACTOR
- * clk_mgr->base.dentist_vco_freq_khz) / target_div;
-
- /* Convert DISPCLK DFS Slice DID to divider*/
- target_div = dentist_get_divider_from_did(dppclk_did);
- //Get dppclk in khz
- regs_and_bypass->dppclk = (DENTIST_DIVIDER_RANGE_SCALE_FACTOR
- * clk_mgr->base.dentist_vco_freq_khz) / target_div;
-
- /* Convert DPREFCLK DFS Slice DID to divider*/
- target_div = dentist_get_divider_from_did(dprefclk_did);
- //Get dprefclk in khz
- regs_and_bypass->dprefclk = (DENTIST_DIVIDER_RANGE_SCALE_FACTOR
- * clk_mgr->base.dentist_vco_freq_khz) / target_div;
-
- /* Convert DCFCLK DFS Slice DID to divider*/
- target_div = dentist_get_divider_from_did(dcfclk_did);
- //Get dcfclk in khz
- regs_and_bypass->dcfclk = (DENTIST_DIVIDER_RANGE_SCALE_FACTOR
- * clk_mgr->base.dentist_vco_freq_khz) / target_div;
-
- /* Convert DTBCLK DFS Slice DID to divider*/
- target_div = dentist_get_divider_from_did(dtbclk_did);
- //Get dtbclk in khz
- regs_and_bypass->dtbclk = (DENTIST_DIVIDER_RANGE_SCALE_FACTOR
- * clk_mgr->base.dentist_vco_freq_khz) / target_div;
-}
-
static void dcn401_clock_read_ss_info(struct clk_mgr_internal *clk_mgr)
{
struct dc_bios *bp = clk_mgr->base.ctx->dc_bios;
@@ -1330,33 +1475,34 @@ static void dcn401_notify_wm_ranges(struct clk_mgr *clk_mgr_base)
static void dcn401_set_hard_min_memclk(struct clk_mgr *clk_mgr_base, bool current_mode)
{
struct clk_mgr_internal *clk_mgr = TO_CLK_MGR_INTERNAL(clk_mgr_base);
+ const struct dc *dc = clk_mgr->base.ctx->dc;
+ struct dc_state *context = dc->current_state;
+ struct dc_clocks new_clocks;
+ int num_steps;
if (!clk_mgr->smu_present || !dcn401_is_ppclk_dpm_enabled(clk_mgr, PPCLK_UCLK))
return;
+ /* build clock update */
+ memcpy(&new_clocks, &clk_mgr_base->clks, sizeof(struct dc_clocks));
+
if (current_mode) {
- if (clk_mgr_base->clks.p_state_change_support)
- dcn401_smu_set_hard_min_by_freq(clk_mgr, PPCLK_UCLK,
- khz_to_mhz_ceil(clk_mgr_base->clks.dramclk_khz));
- else
- dcn401_smu_set_hard_min_by_freq(clk_mgr, PPCLK_UCLK,
- clk_mgr_base->bw_params->max_memclk_mhz);
+ new_clocks.dramclk_khz = context->bw_ctx.bw.dcn.clk.dramclk_khz;
+ new_clocks.idle_dramclk_khz = context->bw_ctx.bw.dcn.clk.idle_dramclk_khz;
+ new_clocks.p_state_change_support = context->bw_ctx.bw.dcn.clk.p_state_change_support;
} else {
- dcn401_smu_set_hard_min_by_freq(clk_mgr, PPCLK_UCLK,
- clk_mgr_base->bw_params->clk_table.entries[0].memclk_mhz);
+ new_clocks.dramclk_khz = clk_mgr_base->bw_params->clk_table.entries[0].memclk_mhz * 1000;
+ new_clocks.idle_dramclk_khz = new_clocks.dramclk_khz;
+ new_clocks.p_state_change_support = true;
}
-}
-
-/* Set max memclk to highest DPM value */
-static void dcn401_set_hard_max_memclk(struct clk_mgr *clk_mgr_base)
-{
- struct clk_mgr_internal *clk_mgr = TO_CLK_MGR_INTERNAL(clk_mgr_base);
- if (!clk_mgr->smu_present || !dcn401_is_ppclk_dpm_enabled(clk_mgr, PPCLK_UCLK))
- return;
+ num_steps = dcn401_build_update_bandwidth_clocks_sequence(clk_mgr_base,
+ context,
+ &new_clocks,
+ true);
- dcn30_smu_set_hard_max_by_freq(clk_mgr, PPCLK_UCLK,
- clk_mgr_base->bw_params->max_memclk_mhz);
+ /* execute sequence */
+ dcn401_execute_block_sequence(clk_mgr_base, num_steps);
}
/* Get current memclk states, update bounding box */
@@ -1487,7 +1633,6 @@ static struct clk_mgr_funcs dcn401_funcs = {
.init_clocks = dcn401_init_clocks,
.notify_wm_ranges = dcn401_notify_wm_ranges,
.set_hard_min_memclk = dcn401_set_hard_min_memclk,
- .set_hard_max_memclk = dcn401_set_hard_max_memclk,
.get_memclk_states_from_smu = dcn401_get_memclk_states_from_smu,
.are_clock_states_equal = dcn401_are_clock_states_equal,
.enable_pme_wa = dcn401_enable_pme_wa,
diff --git a/drivers/gpu/drm/amd/display/dc/core/dc.c b/drivers/gpu/drm/amd/display/dc/core/dc.c
index 85a2ef82afa5..5c39390ecbd5 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc.c
@@ -1254,7 +1254,8 @@ static void disable_dangling_plane(struct dc *dc, struct dc_state *context)
disable_all_writeback_pipes_for_stream(dc, old_stream, dangling_context);
if (pipe->stream && pipe->plane_state) {
- set_p_state_switch_method(dc, context, pipe);
+ if (!dc->debug.using_dml2)
+ set_p_state_switch_method(dc, context, pipe);
dc_update_visual_confirm_color(dc, context, pipe);
}
@@ -1351,80 +1352,6 @@ static void disable_vbios_mode_if_required(
}
}
-/**
- * wait_for_blank_complete - wait for all active OPPs to finish pending blank
- * pattern updates
- *
- * @dc: [in] dc reference
- * @context: [in] hardware context in use
- */
-static void wait_for_blank_complete(struct dc *dc,
- struct dc_state *context)
-{
- struct pipe_ctx *opp_head;
- struct dce_hwseq *hws = dc->hwseq;
- int i;
-
- if (!hws->funcs.wait_for_blank_complete)
- return;
-
- for (i = 0; i < MAX_PIPES; i++) {
- opp_head = &context->res_ctx.pipe_ctx[i];
-
- if (!resource_is_pipe_type(opp_head, OPP_HEAD) ||
- dc_state_get_pipe_subvp_type(context, opp_head) == SUBVP_PHANTOM)
- continue;
-
- hws->funcs.wait_for_blank_complete(opp_head->stream_res.opp);
- }
-}
-
-static void wait_for_odm_update_pending_complete(struct dc *dc, struct dc_state *context)
-{
- struct pipe_ctx *otg_master;
- struct timing_generator *tg;
- int i;
-
- for (i = 0; i < MAX_PIPES; i++) {
- otg_master = &context->res_ctx.pipe_ctx[i];
- if (!resource_is_pipe_type(otg_master, OTG_MASTER) ||
- dc_state_get_pipe_subvp_type(context, otg_master) == SUBVP_PHANTOM)
- continue;
- tg = otg_master->stream_res.tg;
- if (tg->funcs->wait_odm_doublebuffer_pending_clear)
- tg->funcs->wait_odm_doublebuffer_pending_clear(tg);
- }
-
- /* ODM update may require to reprogram blank pattern for each OPP */
- wait_for_blank_complete(dc, context);
-}
-
-static void wait_for_no_pipes_pending(struct dc *dc, struct dc_state *context)
-{
- int i;
- PERF_TRACE();
- for (i = 0; i < MAX_PIPES; i++) {
- int count = 0;
- struct pipe_ctx *pipe = &context->res_ctx.pipe_ctx[i];
-
- if (!pipe->plane_state || dc_state_get_pipe_subvp_type(context, pipe) == SUBVP_PHANTOM)
- continue;
-
- /* Timeout 100 ms */
- while (count < 100000) {
- /* Must set to false to start with, due to OR in update function */
- pipe->plane_state->status.is_flip_pending = false;
- dc->hwss.update_pending_status(pipe);
- if (!pipe->plane_state->status.is_flip_pending)
- break;
- udelay(1);
- count++;
- }
- ASSERT(!pipe->plane_state->status.is_flip_pending);
- }
- PERF_TRACE();
-}
-
/* Public functions */
struct dc *dc_create(const struct dc_init_data *init_params)
@@ -1822,17 +1749,25 @@ bool dc_validate_boot_timing(const struct dc *dc,
tg->funcs->get_optc_source(tg,
&numOdmPipes, &id_src[0], &id_src[1]);
- if (numOdmPipes == 2)
+ if (numOdmPipes == 2) {
pix_clk_100hz *= 2;
- if (numOdmPipes == 4)
+ } else if (numOdmPipes == 4) {
pix_clk_100hz *= 4;
+ } else if (se && se->funcs->get_pixels_per_cycle) {
+ uint32_t pixels_per_cycle = se->funcs->get_pixels_per_cycle(se);
+
+ if (pixels_per_cycle != 1 && !dc->debug.enable_dp_dig_pixel_rate_div_policy)
+ return false;
+
+ pix_clk_100hz *= pixels_per_cycle;
+ }
// Note: In rare cases, HW pixclk may differ from crtc's pixclk
// slightly due to rounding issues in 10 kHz units.
if (crtc_timing->pix_clk_100hz != pix_clk_100hz)
return false;
- if (!se->funcs->dp_get_pixel_format)
+ if (!se || !se->funcs->dp_get_pixel_format)
return false;
if (!se->funcs->dp_get_pixel_format(
@@ -2100,12 +2035,12 @@ static enum dc_status dc_commit_state_no_check(struct dc *dc, struct dc_state *c
if (context->stream_count > get_seamless_boot_stream_count(context) ||
context->stream_count == 0) {
/* Must wait for no flips to be pending before doing optimize bw */
- wait_for_no_pipes_pending(dc, context);
+ hwss_wait_for_no_pipes_pending(dc, context);
/*
* optimized dispclk depends on ODM setup. Need to wait for ODM
* update pending complete before optimizing bandwidth.
*/
- wait_for_odm_update_pending_complete(dc, context);
+ hwss_wait_for_odm_update_pending_complete(dc, context);
/* pplib is notified if disp_num changed */
dc->hwss.optimize_bandwidth(dc, context);
/* Need to do otg sync again as otg could be out of sync due to otg
@@ -2441,7 +2376,7 @@ static bool is_surface_in_context(
return false;
}
-static enum surface_update_type get_plane_info_update_type(const struct dc_surface_update *u)
+static enum surface_update_type get_plane_info_update_type(const struct dc *dc, const struct dc_surface_update *u)
{
union surface_update_flags *update_flags = &u->surface->update_flags;
enum surface_update_type update_type = UPDATE_TYPE_FAST;
@@ -2520,7 +2455,7 @@ static enum surface_update_type get_plane_info_update_type(const struct dc_surfa
/* todo: below are HW dependent, we should add a hook to
* DCE/N resource and validated there.
*/
- if (u->plane_info->tiling_info.gfx9.swizzle != DC_SW_LINEAR) {
+ if (!dc->debug.skip_full_updated_if_possible) {
/* swizzled mode requires RQ to be setup properly,
* thus need to run DML to calculate RQ settings
*/
@@ -2612,7 +2547,7 @@ static enum surface_update_type det_surface_update(const struct dc *dc,
update_flags->raw = 0; // Reset all flags
- type = get_plane_info_update_type(u);
+ type = get_plane_info_update_type(dc, u);
elevate_update_type(&overall_type, type);
type = get_scaling_info_update_type(dc, u);
@@ -2661,6 +2596,12 @@ static enum surface_update_type det_surface_update(const struct dc *dc,
elevate_update_type(&overall_type, UPDATE_TYPE_MED);
}
+ if (u->sdr_white_level_nits)
+ if (u->sdr_white_level_nits != u->surface->sdr_white_level_nits) {
+ update_flags->bits.sdr_white_level_nits = 1;
+ elevate_update_type(&overall_type, UPDATE_TYPE_FULL);
+ }
+
if (u->cm2_params) {
if ((u->cm2_params->component_settings.shaper_3dlut_setting
!= u->surface->mcm_shaper_3dlut_setting)
@@ -2716,6 +2657,10 @@ static enum surface_update_type check_update_surfaces_for_stream(
overall_type = UPDATE_TYPE_FULL;
}
+ if (stream_update && stream_update->hw_cursor_req) {
+ overall_type = UPDATE_TYPE_FULL;
+ }
+
/* some stream updates require passive update */
if (stream_update) {
union stream_update_flags *su_flags = &stream_update->stream->update_flags;
@@ -2751,6 +2696,9 @@ static enum surface_update_type check_update_surfaces_for_stream(
stream_update->vrr_active_variable || stream_update->vrr_active_fixed))
su_flags->bits.fams_changed = 1;
+ if (stream_update->scaler_sharpener_update)
+ su_flags->bits.scaler_sharpener = 1;
+
if (su_flags->raw != 0)
overall_type = UPDATE_TYPE_FULL;
@@ -2934,6 +2882,10 @@ static void copy_surface_update_to_plane(
surface->hdr_mult =
srf_update->hdr_mult;
+ if (srf_update->sdr_white_level_nits)
+ surface->sdr_white_level_nits =
+ srf_update->sdr_white_level_nits;
+
if (srf_update->blend_tf)
memcpy(&surface->blend_tf, srf_update->blend_tf,
sizeof(surface->blend_tf));
@@ -3011,6 +2963,9 @@ static void copy_stream_update_to_stream(struct dc *dc,
if (update->vrr_infopacket)
stream->vrr_infopacket = *update->vrr_infopacket;
+ if (update->hw_cursor_req)
+ stream->hw_cursor_req = *update->hw_cursor_req;
+
if (update->allow_freesync)
stream->allow_freesync = *update->allow_freesync;
@@ -3080,6 +3035,8 @@ static void copy_stream_update_to_stream(struct dc *dc,
update->dsc_config = NULL;
}
}
+ if (update->scaler_sharpener_update)
+ stream->scaler_sharpener_update = *update->scaler_sharpener_update;
}
static void backup_planes_and_stream_state(
@@ -3704,7 +3661,8 @@ static void commit_planes_for_stream_fast(struct dc *dc,
struct pipe_ctx *pipe = &context->res_ctx.pipe_ctx[i];
if (pipe->stream && pipe->plane_state) {
- set_p_state_switch_method(dc, context, pipe);
+ if (!dc->debug.using_dml2)
+ set_p_state_switch_method(dc, context, pipe);
if (dc->debug.visual_confirm)
dc_update_visual_confirm_color(dc, context, pipe);
@@ -3739,7 +3697,7 @@ static void commit_planes_for_stream_fast(struct dc *dc,
surface_count,
stream,
context);
- } else {
+ } else if (stream_status) {
build_dmub_cmd_list(dc,
srf_updates,
surface_count,
@@ -3769,47 +3727,6 @@ static void commit_planes_for_stream_fast(struct dc *dc,
top_pipe_to_program->stream->update_flags.raw = 0;
}
-static void wait_for_outstanding_hw_updates(struct dc *dc, struct dc_state *dc_context)
-{
-/*
- * This function calls HWSS to wait for any potentially double buffered
- * operations to complete. It should be invoked as a pre-amble prior
- * to full update programming before asserting any HW locks.
- */
- int pipe_idx;
- int opp_inst;
- int opp_count = dc->res_pool->res_cap->num_opp;
- struct hubp *hubp;
- int mpcc_inst;
- const struct pipe_ctx *pipe_ctx;
-
- for (pipe_idx = 0; pipe_idx < dc->res_pool->pipe_count; pipe_idx++) {
- pipe_ctx = &dc_context->res_ctx.pipe_ctx[pipe_idx];
-
- if (!pipe_ctx->stream)
- continue;
-
- if (pipe_ctx->stream_res.tg->funcs->wait_drr_doublebuffer_pending_clear)
- pipe_ctx->stream_res.tg->funcs->wait_drr_doublebuffer_pending_clear(pipe_ctx->stream_res.tg);
-
- hubp = pipe_ctx->plane_res.hubp;
- if (!hubp)
- continue;
-
- mpcc_inst = hubp->inst;
- // MPCC inst is equal to pipe index in practice
- for (opp_inst = 0; opp_inst < opp_count; opp_inst++) {
- if ((dc->res_pool->opps[opp_inst] != NULL) &&
- (dc->res_pool->opps[opp_inst]->mpcc_disconnect_pending[mpcc_inst])) {
- dc->res_pool->mpc->funcs->wait_for_idle(dc->res_pool->mpc, mpcc_inst);
- dc->res_pool->opps[opp_inst]->mpcc_disconnect_pending[mpcc_inst] = false;
- break;
- }
- }
- }
- wait_for_odm_update_pending_complete(dc, dc_context);
-}
-
static void commit_planes_for_stream(struct dc *dc,
struct dc_surface_update *srf_updates,
int surface_count,
@@ -3833,13 +3750,14 @@ static void commit_planes_for_stream(struct dc *dc,
dc_z10_restore(dc);
if (update_type == UPDATE_TYPE_FULL)
- wait_for_outstanding_hw_updates(dc, context);
+ hwss_process_outstanding_hw_updates(dc, dc->current_state);
for (i = 0; i < dc->res_pool->pipe_count; i++) {
struct pipe_ctx *pipe = &context->res_ctx.pipe_ctx[i];
if (pipe->stream && pipe->plane_state) {
- set_p_state_switch_method(dc, context, pipe);
+ if (!dc->debug.using_dml2)
+ set_p_state_switch_method(dc, context, pipe);
if (dc->debug.visual_confirm)
dc_update_visual_confirm_color(dc, context, pipe);
@@ -4127,7 +4045,8 @@ static void commit_planes_for_stream(struct dc *dc,
}
if ((update_type != UPDATE_TYPE_FAST) && stream->update_flags.bits.dsc_changed)
- if (top_pipe_to_program->stream_res.tg->funcs->lock_doublebuffer_enable) {
+ if (top_pipe_to_program &&
+ top_pipe_to_program->stream_res.tg->funcs->lock_doublebuffer_enable) {
top_pipe_to_program->stream_res.tg->funcs->wait_for_state(
top_pipe_to_program->stream_res.tg,
CRTC_STATE_VACTIVE);
@@ -4335,7 +4254,8 @@ static void backup_and_set_minimal_pipe_split_policy(struct dc *dc,
dc->debug.force_disable_subvp = true;
for (i = 0; i < context->stream_count; i++) {
policy->force_odm[i] = context->streams[i]->debug.force_odm_combine_segments;
- context->streams[i]->debug.force_odm_combine_segments = 0;
+ if (context->streams[i]->debug.allow_transition_for_forced_odm)
+ context->streams[i]->debug.force_odm_combine_segments = 0;
}
}
@@ -4686,7 +4606,7 @@ static bool commit_minimal_transition_state(struct dc *dc,
return true;
}
-static void populate_fast_updates(struct dc_fast_update *fast_update,
+void populate_fast_updates(struct dc_fast_update *fast_update,
struct dc_surface_update *srf_updates,
int surface_count,
struct dc_stream_update *stream_update)
@@ -4696,6 +4616,9 @@ static void populate_fast_updates(struct dc_fast_update *fast_update,
if (stream_update) {
fast_update[0].out_transfer_func = stream_update->out_transfer_func;
fast_update[0].output_csc_transform = stream_update->output_csc_transform;
+ } else {
+ fast_update[0].out_transfer_func = NULL;
+ fast_update[0].output_csc_transform = NULL;
}
for (i = 0; i < surface_count; i++) {
@@ -4729,6 +4652,26 @@ static bool fast_updates_exist(struct dc_fast_update *fast_update, int surface_c
return false;
}
+bool fast_nonaddr_updates_exist(struct dc_fast_update *fast_update, int surface_count)
+{
+ int i;
+
+ if (fast_update[0].out_transfer_func ||
+ fast_update[0].output_csc_transform)
+ return true;
+
+ for (i = 0; i < surface_count; i++) {
+ if (fast_update[i].input_csc_color_matrix ||
+ fast_update[i].gamma ||
+ fast_update[i].gamut_remap_matrix ||
+ fast_update[i].coeff_reduction_factor ||
+ fast_update[i].cursor_csc_color_matrix)
+ return true;
+ }
+
+ return false;
+}
+
static bool full_update_required(struct dc *dc,
struct dc_surface_update *srf_updates,
int surface_count,
@@ -4746,6 +4689,8 @@ static bool full_update_required(struct dc *dc,
srf_updates[i].scaling_info ||
(srf_updates[i].hdr_mult.value &&
srf_updates[i].hdr_mult.value != srf_updates->surface->hdr_mult.value) ||
+ (srf_updates[i].sdr_white_level_nits &&
+ srf_updates[i].sdr_white_level_nits != srf_updates->surface->sdr_white_level_nits) ||
srf_updates[i].in_transfer_func ||
srf_updates[i].func_shaper ||
srf_updates[i].lut3d_func ||
@@ -4785,7 +4730,8 @@ static bool full_update_required(struct dc *dc,
stream_update->func_shaper ||
stream_update->lut3d_func ||
stream_update->pending_test_pattern ||
- stream_update->crtc_timing_adjust))
+ stream_update->crtc_timing_adjust ||
+ stream_update->scaler_sharpener_update))
return true;
if (stream) {
@@ -5233,6 +5179,8 @@ void dc_set_power_state(struct dc *dc, enum dc_acpi_cm_power_state power_state)
dc_z10_restore(dc);
+ dc_dmub_srv_notify_fw_dc_power_state(dc->ctx->dmub_srv, power_state);
+
dc->hwss.init_hw(dc);
if (dc->hwss.init_sys_ctx != NULL &&
@@ -5244,6 +5192,8 @@ void dc_set_power_state(struct dc *dc, enum dc_acpi_cm_power_state power_state)
default:
ASSERT(dc->current_state->stream_count == 0);
+ dc_dmub_srv_notify_fw_dc_power_state(dc->ctx->dmub_srv, power_state);
+
dc_state_destruct(dc->current_state);
break;
@@ -5382,7 +5332,8 @@ void dc_allow_idle_optimizations_internal(struct dc *dc, bool allow, char const
if (allow == dc->idle_optimizations_allowed)
return;
- if (dc->hwss.apply_idle_power_optimizations && dc->hwss.apply_idle_power_optimizations(dc, allow))
+ if (dc->hwss.apply_idle_power_optimizations && dc->clk_mgr != NULL &&
+ dc->hwss.apply_idle_power_optimizations(dc, allow))
dc->idle_optimizations_allowed = allow;
}
@@ -5451,9 +5402,10 @@ static void blank_and_force_memclk(struct dc *dc, bool apply, unsigned int memcl
hubp->funcs->set_blank_regs(hubp, true);
}
}
-
- dc->clk_mgr->funcs->set_max_memclk(dc->clk_mgr, memclk_mhz);
- dc->clk_mgr->funcs->set_min_memclk(dc->clk_mgr, memclk_mhz);
+ if (dc->clk_mgr->funcs->set_max_memclk)
+ dc->clk_mgr->funcs->set_max_memclk(dc->clk_mgr, memclk_mhz);
+ if (dc->clk_mgr->funcs->set_min_memclk)
+ dc->clk_mgr->funcs->set_min_memclk(dc->clk_mgr, memclk_mhz);
for (i = 0; i < dc->res_pool->pipe_count; i++) {
pipe = &context->res_ctx.pipe_ctx[i];
@@ -5502,7 +5454,7 @@ void dc_enable_dcmode_clk_limit(struct dc *dc, bool enable)
if (enable && !dc->clk_mgr->dc_mode_softmax_enabled) {
if (p_state_change_support) {
- if (funcMin <= softMax)
+ if (funcMin <= softMax && dc->clk_mgr->funcs->set_max_memclk)
dc->clk_mgr->funcs->set_max_memclk(dc->clk_mgr, softMax);
// else: No-Op
} else {
@@ -5512,7 +5464,7 @@ void dc_enable_dcmode_clk_limit(struct dc *dc, bool enable)
}
} else if (!enable && dc->clk_mgr->dc_mode_softmax_enabled) {
if (p_state_change_support) {
- if (funcMin <= softMax)
+ if (funcMin <= softMax && dc->clk_mgr->funcs->set_max_memclk)
dc->clk_mgr->funcs->set_max_memclk(dc->clk_mgr, maxDPM);
// else: No-Op
} else {
@@ -5563,6 +5515,9 @@ void dc_mclk_switch_using_fw_based_vblank_stretch_shut_down(struct dc *dc)
*/
bool dc_is_dmub_outbox_supported(struct dc *dc)
{
+ if (!dc->caps.dmcub_support)
+ return false;
+
switch (dc->ctx->asic_id.chip_family) {
case FAMILY_YELLOW_CARP:
@@ -5801,6 +5756,27 @@ enum dc_status dc_process_dmub_set_mst_slots(const struct dc *dc,
}
/**
+ * dc_process_dmub_dpia_set_tps_notification - Submits tps notification
+ *
+ * @dc: [in] dc structure
+ * @link_index: [in] link index
+ * @tps: [in] request tps
+ *
+ * Submits set_tps_notification command to dmub via inbox message
+ */
+void dc_process_dmub_dpia_set_tps_notification(const struct dc *dc, uint32_t link_index, uint8_t tps)
+{
+ union dmub_rb_cmd cmd = {0};
+
+ cmd.set_tps_notification.header.type = DMUB_CMD__DPIA;
+ cmd.set_tps_notification.header.sub_type = DMUB_CMD__DPIA_SET_TPS_NOTIFICATION;
+ cmd.set_tps_notification.tps_notification.instance = dc->links[link_index]->ddc_hw_inst;
+ cmd.set_tps_notification.tps_notification.tps = tps;
+
+ dc_wake_and_execute_dmub_cmd(dc->ctx, &cmd, DM_DMUB_WAIT_TYPE_WAIT);
+}
+
+/**
* dc_process_dmub_dpia_hpd_int_enable - Submits DPIA DPD interruption
*
* @dc: [in] dc structure
diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_hw_sequencer.c b/drivers/gpu/drm/amd/display/dc/core/dc_hw_sequencer.c
index 87e36d51c56d..7ee2be8f82c4 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc_hw_sequencer.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc_hw_sequencer.c
@@ -636,57 +636,59 @@ void hwss_build_fast_sequence(struct dc *dc,
while (current_pipe) {
current_mpc_pipe = current_pipe;
while (current_mpc_pipe) {
- if (dc->hwss.set_flip_control_gsl && current_mpc_pipe->plane_state && current_mpc_pipe->plane_state->update_flags.raw) {
- block_sequence[*num_steps].params.set_flip_control_gsl_params.pipe_ctx = current_mpc_pipe;
- block_sequence[*num_steps].params.set_flip_control_gsl_params.flip_immediate = current_mpc_pipe->plane_state->flip_immediate;
- block_sequence[*num_steps].func = HUBP_SET_FLIP_CONTROL_GSL;
- (*num_steps)++;
- }
- if (dc->hwss.program_triplebuffer && dc->debug.enable_tri_buf && current_mpc_pipe->plane_state->update_flags.raw) {
- block_sequence[*num_steps].params.program_triplebuffer_params.dc = dc;
- block_sequence[*num_steps].params.program_triplebuffer_params.pipe_ctx = current_mpc_pipe;
- block_sequence[*num_steps].params.program_triplebuffer_params.enableTripleBuffer = current_mpc_pipe->plane_state->triplebuffer_flips;
- block_sequence[*num_steps].func = HUBP_PROGRAM_TRIPLEBUFFER;
- (*num_steps)++;
- }
- if (dc->hwss.update_plane_addr && current_mpc_pipe->plane_state->update_flags.bits.addr_update) {
- if (resource_is_pipe_type(current_mpc_pipe, OTG_MASTER) &&
- stream_status->mall_stream_config.type == SUBVP_MAIN) {
- block_sequence[*num_steps].params.subvp_save_surf_addr.dc_dmub_srv = dc->ctx->dmub_srv;
- block_sequence[*num_steps].params.subvp_save_surf_addr.addr = &current_mpc_pipe->plane_state->address;
- block_sequence[*num_steps].params.subvp_save_surf_addr.subvp_index = current_mpc_pipe->subvp_index;
- block_sequence[*num_steps].func = DMUB_SUBVP_SAVE_SURF_ADDR;
+ if (current_mpc_pipe->plane_state) {
+ if (dc->hwss.set_flip_control_gsl && current_mpc_pipe->plane_state->update_flags.raw) {
+ block_sequence[*num_steps].params.set_flip_control_gsl_params.pipe_ctx = current_mpc_pipe;
+ block_sequence[*num_steps].params.set_flip_control_gsl_params.flip_immediate = current_mpc_pipe->plane_state->flip_immediate;
+ block_sequence[*num_steps].func = HUBP_SET_FLIP_CONTROL_GSL;
+ (*num_steps)++;
+ }
+ if (dc->hwss.program_triplebuffer && dc->debug.enable_tri_buf && current_mpc_pipe->plane_state->update_flags.raw) {
+ block_sequence[*num_steps].params.program_triplebuffer_params.dc = dc;
+ block_sequence[*num_steps].params.program_triplebuffer_params.pipe_ctx = current_mpc_pipe;
+ block_sequence[*num_steps].params.program_triplebuffer_params.enableTripleBuffer = current_mpc_pipe->plane_state->triplebuffer_flips;
+ block_sequence[*num_steps].func = HUBP_PROGRAM_TRIPLEBUFFER;
+ (*num_steps)++;
+ }
+ if (dc->hwss.update_plane_addr && current_mpc_pipe->plane_state->update_flags.bits.addr_update) {
+ if (resource_is_pipe_type(current_mpc_pipe, OTG_MASTER) &&
+ stream_status->mall_stream_config.type == SUBVP_MAIN) {
+ block_sequence[*num_steps].params.subvp_save_surf_addr.dc_dmub_srv = dc->ctx->dmub_srv;
+ block_sequence[*num_steps].params.subvp_save_surf_addr.addr = &current_mpc_pipe->plane_state->address;
+ block_sequence[*num_steps].params.subvp_save_surf_addr.subvp_index = current_mpc_pipe->subvp_index;
+ block_sequence[*num_steps].func = DMUB_SUBVP_SAVE_SURF_ADDR;
+ (*num_steps)++;
+ }
+
+ block_sequence[*num_steps].params.update_plane_addr_params.dc = dc;
+ block_sequence[*num_steps].params.update_plane_addr_params.pipe_ctx = current_mpc_pipe;
+ block_sequence[*num_steps].func = HUBP_UPDATE_PLANE_ADDR;
(*num_steps)++;
}
- block_sequence[*num_steps].params.update_plane_addr_params.dc = dc;
- block_sequence[*num_steps].params.update_plane_addr_params.pipe_ctx = current_mpc_pipe;
- block_sequence[*num_steps].func = HUBP_UPDATE_PLANE_ADDR;
- (*num_steps)++;
- }
-
- if (hws->funcs.set_input_transfer_func && current_mpc_pipe->plane_state->update_flags.bits.gamma_change) {
- block_sequence[*num_steps].params.set_input_transfer_func_params.dc = dc;
- block_sequence[*num_steps].params.set_input_transfer_func_params.pipe_ctx = current_mpc_pipe;
- block_sequence[*num_steps].params.set_input_transfer_func_params.plane_state = current_mpc_pipe->plane_state;
- block_sequence[*num_steps].func = DPP_SET_INPUT_TRANSFER_FUNC;
- (*num_steps)++;
- }
+ if (hws->funcs.set_input_transfer_func && current_mpc_pipe->plane_state->update_flags.bits.gamma_change) {
+ block_sequence[*num_steps].params.set_input_transfer_func_params.dc = dc;
+ block_sequence[*num_steps].params.set_input_transfer_func_params.pipe_ctx = current_mpc_pipe;
+ block_sequence[*num_steps].params.set_input_transfer_func_params.plane_state = current_mpc_pipe->plane_state;
+ block_sequence[*num_steps].func = DPP_SET_INPUT_TRANSFER_FUNC;
+ (*num_steps)++;
+ }
- if (dc->hwss.program_gamut_remap && current_mpc_pipe->plane_state->update_flags.bits.gamut_remap_change) {
- block_sequence[*num_steps].params.program_gamut_remap_params.pipe_ctx = current_mpc_pipe;
- block_sequence[*num_steps].func = DPP_PROGRAM_GAMUT_REMAP;
- (*num_steps)++;
- }
- if (current_mpc_pipe->plane_state->update_flags.bits.input_csc_change) {
- block_sequence[*num_steps].params.setup_dpp_params.pipe_ctx = current_mpc_pipe;
- block_sequence[*num_steps].func = DPP_SETUP_DPP;
- (*num_steps)++;
- }
- if (current_mpc_pipe->plane_state->update_flags.bits.coeff_reduction_change) {
- block_sequence[*num_steps].params.program_bias_and_scale_params.pipe_ctx = current_mpc_pipe;
- block_sequence[*num_steps].func = DPP_PROGRAM_BIAS_AND_SCALE;
- (*num_steps)++;
+ if (dc->hwss.program_gamut_remap && current_mpc_pipe->plane_state->update_flags.bits.gamut_remap_change) {
+ block_sequence[*num_steps].params.program_gamut_remap_params.pipe_ctx = current_mpc_pipe;
+ block_sequence[*num_steps].func = DPP_PROGRAM_GAMUT_REMAP;
+ (*num_steps)++;
+ }
+ if (current_mpc_pipe->plane_state->update_flags.bits.input_csc_change) {
+ block_sequence[*num_steps].params.setup_dpp_params.pipe_ctx = current_mpc_pipe;
+ block_sequence[*num_steps].func = DPP_SETUP_DPP;
+ (*num_steps)++;
+ }
+ if (current_mpc_pipe->plane_state->update_flags.bits.coeff_reduction_change) {
+ block_sequence[*num_steps].params.program_bias_and_scale_params.pipe_ctx = current_mpc_pipe;
+ block_sequence[*num_steps].func = DPP_PROGRAM_BIAS_AND_SCALE;
+ (*num_steps)++;
+ }
}
if (hws->funcs.set_output_transfer_func && current_mpc_pipe->stream->update_flags.bits.out_tf) {
block_sequence[*num_steps].params.set_output_transfer_func_params.dc = dc;
@@ -901,12 +903,12 @@ void hwss_program_bias_and_scale(union block_sequence_params *params)
struct pipe_ctx *pipe_ctx = params->program_bias_and_scale_params.pipe_ctx;
struct dpp *dpp = pipe_ctx->plane_res.dpp;
struct dc_plane_state *plane_state = pipe_ctx->plane_state;
- struct dc_bias_and_scale bns_params = {0};
+ struct dc_bias_and_scale bns_params = plane_state->bias_and_scale;
//TODO :for CNVC set scale and bias registers if necessary
- build_prescale_params(&bns_params, plane_state);
- if (dpp->funcs->dpp_program_bias_and_scale)
+ if (dpp->funcs->dpp_program_bias_and_scale) {
dpp->funcs->dpp_program_bias_and_scale(dpp, &bns_params);
+ }
}
void hwss_power_on_mpc_mem_pwr(union block_sequence_params *params)
@@ -976,3 +978,126 @@ void get_surface_tile_visual_confirm_color(
break;
}
}
+
+/**
+ * hwss_wait_for_all_blank_complete - wait for all active OPPs to finish pending blank
+ * pattern updates
+ *
+ * @dc: [in] dc reference
+ * @context: [in] hardware context in use
+ */
+void hwss_wait_for_all_blank_complete(struct dc *dc,
+ struct dc_state *context)
+{
+ struct pipe_ctx *opp_head;
+ struct dce_hwseq *hws = dc->hwseq;
+ int i;
+
+ if (!hws->funcs.wait_for_blank_complete)
+ return;
+
+ for (i = 0; i < MAX_PIPES; i++) {
+ opp_head = &context->res_ctx.pipe_ctx[i];
+
+ if (!resource_is_pipe_type(opp_head, OPP_HEAD) ||
+ dc_state_get_pipe_subvp_type(context, opp_head) == SUBVP_PHANTOM)
+ continue;
+
+ hws->funcs.wait_for_blank_complete(opp_head->stream_res.opp);
+ }
+}
+
+void hwss_wait_for_odm_update_pending_complete(struct dc *dc, struct dc_state *context)
+{
+ struct pipe_ctx *otg_master;
+ struct timing_generator *tg;
+ int i;
+
+ for (i = 0; i < MAX_PIPES; i++) {
+ otg_master = &context->res_ctx.pipe_ctx[i];
+ if (!resource_is_pipe_type(otg_master, OTG_MASTER) ||
+ dc_state_get_pipe_subvp_type(context, otg_master) == SUBVP_PHANTOM)
+ continue;
+ tg = otg_master->stream_res.tg;
+ if (tg->funcs->wait_odm_doublebuffer_pending_clear)
+ tg->funcs->wait_odm_doublebuffer_pending_clear(tg);
+ }
+
+ /* ODM update may require to reprogram blank pattern for each OPP */
+ hwss_wait_for_all_blank_complete(dc, context);
+}
+
+void hwss_wait_for_no_pipes_pending(struct dc *dc, struct dc_state *context)
+{
+ int i;
+ for (i = 0; i < MAX_PIPES; i++) {
+ int count = 0;
+ struct pipe_ctx *pipe = &context->res_ctx.pipe_ctx[i];
+
+ if (!pipe->plane_state || dc_state_get_pipe_subvp_type(context, pipe) == SUBVP_PHANTOM)
+ continue;
+
+ /* Timeout 100 ms */
+ while (count < 100000) {
+ /* Must set to false to start with, due to OR in update function */
+ pipe->plane_state->status.is_flip_pending = false;
+ dc->hwss.update_pending_status(pipe);
+ if (!pipe->plane_state->status.is_flip_pending)
+ break;
+ udelay(1);
+ count++;
+ }
+ ASSERT(!pipe->plane_state->status.is_flip_pending);
+ }
+}
+
+void hwss_wait_for_outstanding_hw_updates(struct dc *dc, struct dc_state *dc_context)
+{
+/*
+ * This function calls HWSS to wait for any potentially double buffered
+ * operations to complete. It should be invoked as a pre-amble prior
+ * to full update programming before asserting any HW locks.
+ */
+ int pipe_idx;
+ int opp_inst;
+ int opp_count = dc->res_pool->res_cap->num_opp;
+ struct hubp *hubp;
+ int mpcc_inst;
+ const struct pipe_ctx *pipe_ctx;
+
+ for (pipe_idx = 0; pipe_idx < dc->res_pool->pipe_count; pipe_idx++) {
+ pipe_ctx = &dc_context->res_ctx.pipe_ctx[pipe_idx];
+
+ if (!pipe_ctx->stream)
+ continue;
+
+ if (pipe_ctx->stream_res.tg->funcs->wait_drr_doublebuffer_pending_clear)
+ pipe_ctx->stream_res.tg->funcs->wait_drr_doublebuffer_pending_clear(pipe_ctx->stream_res.tg);
+
+ hubp = pipe_ctx->plane_res.hubp;
+ if (!hubp)
+ continue;
+
+ mpcc_inst = hubp->inst;
+ // MPCC inst is equal to pipe index in practice
+ for (opp_inst = 0; opp_inst < opp_count; opp_inst++) {
+ if ((dc->res_pool->opps[opp_inst] != NULL) &&
+ (dc->res_pool->opps[opp_inst]->mpcc_disconnect_pending[mpcc_inst])) {
+ dc->res_pool->mpc->funcs->wait_for_idle(dc->res_pool->mpc, mpcc_inst);
+ dc->res_pool->opps[opp_inst]->mpcc_disconnect_pending[mpcc_inst] = false;
+ break;
+ }
+ }
+ }
+ hwss_wait_for_odm_update_pending_complete(dc, dc_context);
+}
+
+void hwss_process_outstanding_hw_updates(struct dc *dc, struct dc_state *dc_context)
+{
+ /* wait for outstanding updates */
+ hwss_wait_for_outstanding_hw_updates(dc, dc_context);
+
+ /* perform outstanding post update programming */
+ if (dc->hwss.program_outstanding_updates)
+ dc->hwss.program_outstanding_updates(dc, dc_context);
+}
diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_resource.c b/drivers/gpu/drm/amd/display/dc/core/dc_resource.c
index bcb5267b5a6b..c7599c40d4be 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc_resource.c
@@ -342,11 +342,6 @@ struct resource_pool *dc_create_resource_pool(struct dc *dc,
res_pool->ref_clocks.xtalin_clock_inKhz;
res_pool->ref_clocks.dchub_ref_clock_inKhz =
res_pool->ref_clocks.xtalin_clock_inKhz;
- if (dc->debug.using_dml2)
- if (res_pool->hubbub && res_pool->hubbub->funcs->get_dchub_ref_freq)
- res_pool->hubbub->funcs->get_dchub_ref_freq(res_pool->hubbub,
- res_pool->ref_clocks.dccg_ref_clock_inKhz,
- &res_pool->ref_clocks.dchub_ref_clock_inKhz);
} else
ASSERT_CRITICAL(false);
}
@@ -1511,8 +1506,6 @@ bool resource_build_scaling_params(struct pipe_ctx *pipe_ctx)
pipe_ctx->plane_res.scl_data.lb_params.depth = LB_PIXEL_DEPTH_30BPP;
pipe_ctx->plane_res.scl_data.lb_params.alpha_en = plane_state->per_pixel_alpha;
- spl_out->scl_data.h_active = pipe_ctx->plane_res.scl_data.h_active;
- spl_out->scl_data.v_active = pipe_ctx->plane_res.scl_data.v_active;
// Convert pipe_ctx to respective input params for SPL
translate_SPL_in_params_from_pipe_ctx(pipe_ctx, spl_in);
@@ -3241,6 +3234,8 @@ static bool are_stream_backends_same(
bool dc_is_stream_unchanged(
struct dc_stream_state *old_stream, struct dc_stream_state *stream)
{
+ if (!old_stream || !stream)
+ return false;
if (!are_stream_backends_same(old_stream, stream))
return false;
@@ -3771,8 +3766,10 @@ static bool planes_changed_for_existing_stream(struct dc_state *context,
}
}
- if (!stream_status)
+ if (!stream_status) {
ASSERT(0);
+ return false;
+ }
for (i = 0; i < set_count; i++)
if (set[i].stream == stream)
@@ -5164,7 +5161,7 @@ bool dc_resource_acquire_secondary_pipe_for_mpc_odm_legacy(
sec_pipe->stream_res.opp = sec_pipe->top_pipe->stream_res.opp;
if (sec_pipe->stream->timing.flags.DSC == 1) {
#if defined(CONFIG_DRM_AMD_DC_FP)
- dcn20_acquire_dsc(dc, &state->res_ctx, &sec_pipe->stream_res.dsc, pipe_idx);
+ dcn20_acquire_dsc(dc, &state->res_ctx, &sec_pipe->stream_res.dsc, sec_pipe->stream_res.opp->inst);
#endif
ASSERT(sec_pipe->stream_res.dsc);
if (sec_pipe->stream_res.dsc == NULL)
@@ -5271,3 +5268,44 @@ void resource_init_common_dml2_callbacks(struct dc *dc, struct dml2_configuratio
dml2_options->svp_pstate.callbacks.remove_phantom_streams_and_planes = &dc_state_remove_phantom_streams_and_planes;
dml2_options->svp_pstate.callbacks.release_phantom_streams_and_planes = &dc_state_release_phantom_streams_and_planes;
}
+
+/* Returns number of DET segments allocated for a given OTG_MASTER pipe */
+int resource_calculate_det_for_stream(struct dc_state *state, struct pipe_ctx *otg_master)
+{
+ struct pipe_ctx *opp_heads[MAX_PIPES];
+ struct pipe_ctx *dpp_pipes[MAX_PIPES];
+
+ int dpp_count = 0;
+ int det_segments = 0;
+
+ if (!otg_master->stream)
+ return 0;
+
+ int slice_count = resource_get_opp_heads_for_otg_master(otg_master,
+ &state->res_ctx, opp_heads);
+
+ for (int slice_idx = 0; slice_idx < slice_count; slice_idx++) {
+ if (opp_heads[slice_idx]->plane_state) {
+ dpp_count = resource_get_dpp_pipes_for_opp_head(
+ opp_heads[slice_idx],
+ &state->res_ctx,
+ dpp_pipes);
+ for (int dpp_idx = 0; dpp_idx < dpp_count; dpp_idx++)
+ det_segments += dpp_pipes[dpp_idx]->hubp_regs.det_size;
+ }
+ }
+ return det_segments;
+}
+
+bool resource_is_hpo_acquired(struct dc_state *context)
+{
+ int i;
+
+ for (i = 0; i < MAX_HPO_DP2_ENCODERS; i++) {
+ if (context->res_ctx.is_hpo_dp_stream_enc_acquired[i]) {
+ return true;
+ }
+ }
+
+ return false;
+}
diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_stat.c b/drivers/gpu/drm/amd/display/dc/core/dc_stat.c
index cd6570a1e20e..fe9f99f1bdf9 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc_stat.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc_stat.c
@@ -61,6 +61,7 @@ void dc_stat_get_dmub_notification(const struct dc *dc, struct dmub_notification
/* For HPD/HPD RX, convert dpia port index into link index */
if (notify->type == DMUB_NOTIFICATION_HPD ||
notify->type == DMUB_NOTIFICATION_HPD_IRQ ||
+ notify->type == DMUB_NOTIFICATION_AUX_REPLY ||
notify->type == DMUB_NOTIFICATION_DPIA_NOTIFICATION ||
notify->type == DMUB_NOTIFICATION_SET_CONFIG_REPLY) {
notify->link_index =
diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_state.c b/drivers/gpu/drm/amd/display/dc/core/dc_state.c
index e990346e51f6..2597e3fd562b 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc_state.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc_state.c
@@ -211,10 +211,16 @@ struct dc_state *dc_state_create(struct dc *dc, struct dc_state_create_params *p
#ifdef CONFIG_DRM_AMD_DC_FP
if (dc->debug.using_dml2) {
dml2_opt->use_clock_dc_limits = false;
- dml2_create(dc, dml2_opt, &state->bw_ctx.dml2);
+ if (!dml2_create(dc, dml2_opt, &state->bw_ctx.dml2)) {
+ dc_state_release(state);
+ return NULL;
+ }
dml2_opt->use_clock_dc_limits = true;
- dml2_create(dc, dml2_opt, &state->bw_ctx.dml2_dc_power_source);
+ if (!dml2_create(dc, dml2_opt, &state->bw_ctx.dml2_dc_power_source)) {
+ dc_state_release(state);
+ return NULL;
+ }
}
#endif
@@ -961,10 +967,10 @@ bool dc_state_is_fams2_in_use(
bool is_fams2_in_use = false;
if (state)
- is_fams2_in_use |= state->bw_ctx.bw.dcn.fams2_stream_count > 0;
+ is_fams2_in_use |= state->bw_ctx.bw.dcn.fams2_global_config.features.bits.enable;
if (dc->current_state)
- is_fams2_in_use |= dc->current_state->bw_ctx.bw.dcn.fams2_stream_count > 0;
+ is_fams2_in_use |= dc->current_state->bw_ctx.bw.dcn.fams2_global_config.features.bits.enable;
return is_fams2_in_use;
}
diff --git a/drivers/gpu/drm/amd/display/dc/dc.h b/drivers/gpu/drm/amd/display/dc/dc.h
index 73cdebcd9f37..3992ad73165b 100644
--- a/drivers/gpu/drm/amd/display/dc/dc.h
+++ b/drivers/gpu/drm/amd/display/dc/dc.h
@@ -55,7 +55,7 @@ struct aux_payload;
struct set_config_cmd_payload;
struct dmub_notification;
-#define DC_VER "3.2.291"
+#define DC_VER "3.2.301"
#define MAX_SURFACES 3
#define MAX_PLANES 6
@@ -261,10 +261,7 @@ struct dc_caps {
bool zstate_support;
bool ips_support;
uint32_t num_of_internal_disp;
- uint32_t max_dwb_htap;
- uint32_t max_dwb_vtap;
enum dp_protocol_version max_dp_protocol_version;
- bool spdif_aud;
unsigned int mall_size_per_mem_channel;
unsigned int mall_size_total;
unsigned int cursor_cache_size;
@@ -309,8 +306,6 @@ struct dc_bug_wa {
uint8_t dcfclk_ds: 1;
} clock_update_disable_mask;
bool skip_psr_ips_crtc_disable;
- //Customer Specific WAs
- uint32_t force_backlight_start_level;
};
struct dc_dcc_surface_param {
struct dc_size surface_size;
@@ -466,6 +461,8 @@ struct dc_config {
bool use_assr_psp_message;
bool support_edp0_on_dp1;
unsigned int enable_fpo_flicker_detection;
+ bool disable_hbr_audio_dp2;
+ bool consolidated_dpia_dp_lt;
};
enum visual_confirm {
@@ -513,6 +510,7 @@ enum in_game_fams_config {
INGAME_FAMS_SINGLE_DISP_ENABLE, // enable in-game fams
INGAME_FAMS_DISABLE, // disable in-game fams
INGAME_FAMS_MULTI_DISP_ENABLE, //enable in-game fams for multi-display
+ INGAME_FAMS_MULTI_DISP_CLAMPED_ONLY, //enable in-game fams for multi-display only for clamped RR strategies
};
/**
@@ -764,7 +762,9 @@ union dpia_debug_options {
uint32_t extend_aux_rd_interval:1; /* bit 2 */
uint32_t disable_mst_dsc_work_around:1; /* bit 3 */
uint32_t enable_force_tbt3_work_around:1; /* bit 4 */
- uint32_t reserved:27;
+ uint32_t disable_usb4_pm_support:1; /* bit 5 */
+ uint32_t enable_consolidated_dpia_dp_lt:1; /* bit 6 */
+ uint32_t reserved:25;
} bits;
uint32_t raw;
};
@@ -981,6 +981,7 @@ struct dc_debug_options {
bool disable_z10;
bool enable_z9_disable_interface;
bool psr_skip_crtc_disable;
+ uint32_t ips_skip_crtc_disable_mask;
union dpia_debug_options dpia_debug;
bool disable_fixed_vs_aux_timeout_wa;
uint32_t fixed_vs_aux_delay_config_wa;
@@ -1053,8 +1054,13 @@ struct dc_debug_options {
unsigned int disable_spl;
unsigned int force_easf;
unsigned int force_sharpness;
+ unsigned int force_sharpness_level;
unsigned int force_lls;
bool notify_dpia_hr_bw;
+ bool enable_ips_visual_confirm;
+ unsigned int sharpen_policy;
+ unsigned int scale_to_sharpness_policy;
+ bool skip_full_updated_if_possible;
};
@@ -1268,6 +1274,7 @@ union surface_update_flags {
uint32_t tmz_changed:1;
uint32_t mcm_transfer_function_enable_change:1; /* disable or enable MCM transfer func */
uint32_t full_update:1;
+ uint32_t sdr_white_level_nits:1;
} bits;
uint32_t raw;
@@ -1291,7 +1298,7 @@ struct dc_plane_state {
struct dc_gamma gamma_correction;
struct dc_transfer_func in_transfer_func;
- struct dc_bias_and_scale *bias_and_scale;
+ struct dc_bias_and_scale bias_and_scale;
struct dc_csc_transform input_csc_color_matrix;
struct fixed31_32 coeff_reduction_factor;
struct fixed31_32 hdr_mult;
@@ -1348,8 +1355,9 @@ struct dc_plane_state {
enum mpcc_movable_cm_location mcm_location;
struct dc_csc_transform cursor_csc_color_matrix;
bool adaptive_sharpness_en;
- unsigned int sharpnessX1000;
+ int sharpness_level;
enum linear_light_scaling linear_light_scaling;
+ unsigned int sdr_white_level_nits;
};
struct dc_plane_info {
@@ -1368,7 +1376,6 @@ struct dc_plane_info {
int global_alpha_value;
bool input_csc_enabled;
int layer_index;
- bool front_buffer_rendering_active;
enum chroma_cositing cositing;
};
@@ -1508,6 +1515,7 @@ struct dc_surface_update {
*/
struct dc_cm2_parameters *cm2_params;
const struct dc_csc_transform *cursor_csc_color_matrix;
+ unsigned int sdr_white_level_nits;
};
/*
@@ -1585,6 +1593,12 @@ bool dc_acquire_release_mpc_3dlut(
bool dc_resource_is_dsc_encoding_supported(const struct dc *dc);
void get_audio_check(struct audio_info *aud_modes,
struct audio_check *aud_chk);
+
+bool fast_nonaddr_updates_exist(struct dc_fast_update *fast_update, int surface_count);
+void populate_fast_updates(struct dc_fast_update *fast_update,
+ struct dc_surface_update *srf_updates,
+ int surface_count,
+ struct dc_stream_update *stream_update);
/*
* Set up streams and links associated to drive sinks
* The streams parameter is an absolute set of all active streams.
@@ -1756,6 +1770,7 @@ struct dc_link {
bool dongle_mode_timing_override;
bool blank_stream_on_ocs_change;
bool read_dpcd204h_on_irq_hpd;
+ bool disable_assr_for_uhbr;
} wa_flags;
struct link_mst_stream_allocation_table mst_stream_alloc_table;
@@ -2513,6 +2528,8 @@ enum dc_status dc_process_dmub_set_mst_slots(const struct dc *dc,
uint8_t mst_alloc_slots,
uint8_t *mst_slots_in_use);
+void dc_process_dmub_dpia_set_tps_notification(const struct dc *dc, uint32_t link_index, uint8_t tps);
+
void dc_process_dmub_dpia_hpd_int_enable(const struct dc *dc,
uint32_t hpd_int_enable);
diff --git a/drivers/gpu/drm/amd/display/dc/dc_dmub_srv.c b/drivers/gpu/drm/amd/display/dc/dc_dmub_srv.c
index ded13026c8ff..1e7de0f03290 100644
--- a/drivers/gpu/drm/amd/display/dc/dc_dmub_srv.c
+++ b/drivers/gpu/drm/amd/display/dc/dc_dmub_srv.c
@@ -979,6 +979,9 @@ void dc_dmub_srv_log_diagnostic_data(struct dc_dmub_srv *dc_dmub_srv)
DC_LOG_DEBUG(" inbox0_rptr : %08x", diag_data.inbox0_rptr);
DC_LOG_DEBUG(" inbox0_wptr : %08x", diag_data.inbox0_wptr);
DC_LOG_DEBUG(" inbox0_size : %08x", diag_data.inbox0_size);
+ DC_LOG_DEBUG(" outbox1_rptr : %08x", diag_data.outbox1_rptr);
+ DC_LOG_DEBUG(" outbox1_wptr : %08x", diag_data.outbox1_wptr);
+ DC_LOG_DEBUG(" outbox1_size : %08x", diag_data.outbox1_size);
DC_LOG_DEBUG(" is_enabled : %d", diag_data.is_dmcub_enabled);
DC_LOG_DEBUG(" is_soft_reset : %d", diag_data.is_dmcub_soft_reset);
DC_LOG_DEBUG(" is_secure_reset : %d", diag_data.is_dmcub_secure_reset);
@@ -1282,7 +1285,7 @@ static void dc_dmub_srv_notify_idle(const struct dc *dc, bool allow_idle)
union dmub_shared_state_ips_driver_signals new_signals;
DC_LOG_IPS(
- "%s wait idle (ips1_commit=%d ips2_commit=%d)",
+ "%s wait idle (ips1_commit=%u ips2_commit=%u)",
__func__,
ips_fw->signals.bits.ips1_commit,
ips_fw->signals.bits.ips2_commit);
@@ -1328,7 +1331,7 @@ static void dc_dmub_srv_notify_idle(const struct dc *dc, bool allow_idle)
}
DC_LOG_IPS(
- "%s send allow_idle=%d (ips1_commit=%d ips2_commit=%d)",
+ "%s send allow_idle=%d (ips1_commit=%u ips2_commit=%u)",
__func__,
allow_idle,
ips_fw->signals.bits.ips1_commit,
@@ -1371,7 +1374,7 @@ static void dc_dmub_srv_exit_low_power_state(const struct dc *dc)
dc_dmub_srv->driver_signals = ips_driver->signals;
DC_LOG_IPS(
- "%s (allow ips1=%d ips2=%d) (commit ips1=%d ips2=%d) (count rcg=%d ips1=%d ips2=%d)",
+ "%s (allow ips1=%u ips2=%u) (commit ips1=%u ips2=%u) (count rcg=%u ips1=%u ips2=%u)",
__func__,
ips_driver->signals.bits.allow_ips1,
ips_driver->signals.bits.allow_ips2,
@@ -1390,7 +1393,7 @@ static void dc_dmub_srv_exit_low_power_state(const struct dc *dc)
(!dc->debug.optimize_ips_handshake ||
ips_fw->signals.bits.ips2_commit || !ips_fw->signals.bits.in_idle)) {
DC_LOG_IPS(
- "wait IPS2 eval (ips1_commit=%d ips2_commit=%d)",
+ "wait IPS2 eval (ips1_commit=%u ips2_commit=%u)",
ips_fw->signals.bits.ips1_commit,
ips_fw->signals.bits.ips2_commit);
@@ -1399,7 +1402,7 @@ static void dc_dmub_srv_exit_low_power_state(const struct dc *dc)
if (ips_fw->signals.bits.ips2_commit) {
DC_LOG_IPS(
- "exit IPS2 #1 (ips1_commit=%d ips2_commit=%d)",
+ "exit IPS2 #1 (ips1_commit=%u ips2_commit=%u)",
ips_fw->signals.bits.ips1_commit,
ips_fw->signals.bits.ips2_commit);
@@ -1407,7 +1410,7 @@ static void dc_dmub_srv_exit_low_power_state(const struct dc *dc)
dc->clk_mgr->funcs->exit_low_power_state(dc->clk_mgr);
DC_LOG_IPS(
- "wait IPS2 entry delay (ips1_commit=%d ips2_commit=%d)",
+ "wait IPS2 entry delay (ips1_commit=%u ips2_commit=%u)",
ips_fw->signals.bits.ips1_commit,
ips_fw->signals.bits.ips2_commit);
@@ -1415,14 +1418,14 @@ static void dc_dmub_srv_exit_low_power_state(const struct dc *dc)
udelay(dc->debug.ips2_entry_delay_us);
DC_LOG_IPS(
- "exit IPS2 #2 (ips1_commit=%d ips2_commit=%d)",
+ "exit IPS2 #2 (ips1_commit=%u ips2_commit=%u)",
ips_fw->signals.bits.ips1_commit,
ips_fw->signals.bits.ips2_commit);
dc->clk_mgr->funcs->exit_low_power_state(dc->clk_mgr);
DC_LOG_IPS(
- "wait IPS2 commit clear (ips1_commit=%d ips2_commit=%d)",
+ "wait IPS2 commit clear (ips1_commit=%u ips2_commit=%u)",
ips_fw->signals.bits.ips1_commit,
ips_fw->signals.bits.ips2_commit);
@@ -1430,7 +1433,7 @@ static void dc_dmub_srv_exit_low_power_state(const struct dc *dc)
udelay(1);
DC_LOG_IPS(
- "wait hw_pwr_up (ips1_commit=%d ips2_commit=%d)",
+ "wait hw_pwr_up (ips1_commit=%u ips2_commit=%u)",
ips_fw->signals.bits.ips1_commit,
ips_fw->signals.bits.ips2_commit);
@@ -1438,7 +1441,7 @@ static void dc_dmub_srv_exit_low_power_state(const struct dc *dc)
ASSERT(0);
DC_LOG_IPS(
- "resync inbox1 (ips1_commit=%d ips2_commit=%d)",
+ "resync inbox1 (ips1_commit=%u ips2_commit=%u)",
ips_fw->signals.bits.ips1_commit,
ips_fw->signals.bits.ips2_commit);
@@ -1449,7 +1452,7 @@ static void dc_dmub_srv_exit_low_power_state(const struct dc *dc)
dc_dmub_srv_notify_idle(dc, false);
if (prev_driver_signals.bits.allow_ips1) {
DC_LOG_IPS(
- "wait for IPS1 commit clear (ips1_commit=%d ips2_commit=%d)",
+ "wait for IPS1 commit clear (ips1_commit=%u ips2_commit=%u)",
ips_fw->signals.bits.ips1_commit,
ips_fw->signals.bits.ips2_commit);
@@ -1457,7 +1460,7 @@ static void dc_dmub_srv_exit_low_power_state(const struct dc *dc)
udelay(1);
DC_LOG_IPS(
- "wait for IPS1 commit clear done (ips1_commit=%d ips2_commit=%d)",
+ "wait for IPS1 commit clear done (ips1_commit=%u ips2_commit=%u)",
ips_fw->signals.bits.ips1_commit,
ips_fw->signals.bits.ips2_commit);
}
@@ -1466,14 +1469,14 @@ static void dc_dmub_srv_exit_low_power_state(const struct dc *dc)
if (!dc_dmub_srv_is_hw_pwr_up(dc->ctx->dmub_srv, true))
ASSERT(0);
- DC_LOG_IPS("%s exit (count rcg=%d ips1=%d ips2=%d)",
+ DC_LOG_IPS("%s exit (count rcg=%u ips1=%u ips2=%u)",
__func__,
rcg_exit_count,
ips1_exit_count,
ips2_exit_count);
}
-void dc_dmub_srv_set_power_state(struct dc_dmub_srv *dc_dmub_srv, enum dc_acpi_cm_power_state powerState)
+void dc_dmub_srv_set_power_state(struct dc_dmub_srv *dc_dmub_srv, enum dc_acpi_cm_power_state power_state)
{
struct dmub_srv *dmub;
@@ -1482,12 +1485,38 @@ void dc_dmub_srv_set_power_state(struct dc_dmub_srv *dc_dmub_srv, enum dc_acpi_c
dmub = dc_dmub_srv->dmub;
- if (powerState == DC_ACPI_CM_POWER_STATE_D0)
+ if (power_state == DC_ACPI_CM_POWER_STATE_D0)
dmub_srv_set_power_state(dmub, DMUB_POWER_STATE_D0);
else
dmub_srv_set_power_state(dmub, DMUB_POWER_STATE_D3);
}
+void dc_dmub_srv_notify_fw_dc_power_state(struct dc_dmub_srv *dc_dmub_srv,
+ enum dc_acpi_cm_power_state power_state)
+{
+ union dmub_rb_cmd cmd;
+
+ if (!dc_dmub_srv)
+ return;
+
+ memset(&cmd, 0, sizeof(cmd));
+
+ cmd.idle_opt_set_dc_power_state.header.type = DMUB_CMD__IDLE_OPT;
+ cmd.idle_opt_set_dc_power_state.header.sub_type = DMUB_CMD__IDLE_OPT_SET_DC_POWER_STATE;
+ cmd.idle_opt_set_dc_power_state.header.payload_bytes =
+ sizeof(cmd.idle_opt_set_dc_power_state) - sizeof(cmd.idle_opt_set_dc_power_state.header);
+
+ if (power_state == DC_ACPI_CM_POWER_STATE_D0) {
+ cmd.idle_opt_set_dc_power_state.data.power_state = DMUB_IDLE_OPT_DC_POWER_STATE_D0;
+ } else if (power_state == DC_ACPI_CM_POWER_STATE_D3) {
+ cmd.idle_opt_set_dc_power_state.data.power_state = DMUB_IDLE_OPT_DC_POWER_STATE_D3;
+ } else {
+ cmd.idle_opt_set_dc_power_state.data.power_state = DMUB_IDLE_OPT_DC_POWER_STATE_UNKNOWN;
+ }
+
+ dc_wake_and_execute_dmub_cmd(dc_dmub_srv->ctx, &cmd, DM_DMUB_WAIT_TYPE_WAIT);
+}
+
bool dc_dmub_srv_should_detect(struct dc_dmub_srv *dc_dmub_srv)
{
volatile const struct dmub_shared_state_ips_fw *ips_fw;
@@ -1672,22 +1701,17 @@ void dc_dmub_srv_fams2_update_config(struct dc *dc,
global_cmd->header.sub_type = DMUB_CMD__FAMS2_CONFIG;
global_cmd->header.payload_bytes = sizeof(struct dmub_rb_cmd_fams2) - sizeof(struct dmub_cmd_header);
- /* send global configuration parameters */
- global_cmd->config.global.max_allow_delay_us = 100 * 1000; //100ms
- global_cmd->config.global.lock_wait_time_us = 5000; //5ms
- global_cmd->config.global.recovery_timeout_us = 5000; //5ms
- global_cmd->config.global.hwfq_flip_programming_delay_us = 100; //100us
-
- /* copy static feature configuration */
- global_cmd->config.global.features.all = dc->debug.fams2_config.all;
+ if (enable) {
+ /* send global configuration parameters */
+ memcpy(&global_cmd->config.global, &context->bw_ctx.bw.dcn.fams2_global_config, sizeof(struct dmub_cmd_fams2_global_config));
- /* apply feature configuration based on current driver state */
- global_cmd->config.global.features.bits.enable_visual_confirm = dc->debug.visual_confirm == VISUAL_CONFIRM_FAMS2;
- global_cmd->config.global.features.bits.enable = enable;
+ /* copy static feature configuration overrides */
+ global_cmd->config.global.features.bits.enable_stall_recovery = dc->debug.fams2_config.bits.enable_stall_recovery;
+ global_cmd->config.global.features.bits.enable_debug = dc->debug.fams2_config.bits.enable_debug;
+ global_cmd->config.global.features.bits.enable_offload_flip = dc->debug.fams2_config.bits.enable_offload_flip;
- /* construct per-stream configs */
- if (enable) {
- for (i = 0; i < context->bw_ctx.bw.dcn.fams2_stream_count; i++) {
+ /* construct per-stream configs */
+ for (i = 0; i < context->bw_ctx.bw.dcn.fams2_global_config.num_streams; i++) {
struct dmub_rb_cmd_fams2 *stream_cmd = &cmd[i+1].fams2_config;
/* configure command header */
@@ -1702,12 +1726,15 @@ void dc_dmub_srv_fams2_update_config(struct dc *dc,
}
}
- if (enable && context->bw_ctx.bw.dcn.fams2_stream_count) {
+ /* apply feature configuration based on current driver state */
+ global_cmd->config.global.features.bits.enable_visual_confirm = dc->debug.visual_confirm == VISUAL_CONFIRM_FAMS2;
+ global_cmd->config.global.features.bits.enable = enable;
+
+ if (enable && context->bw_ctx.bw.dcn.fams2_global_config.features.bits.enable) {
/* set multi pending for global, and unset for last stream cmd */
- global_cmd->config.global.num_streams = context->bw_ctx.bw.dcn.fams2_stream_count;
global_cmd->header.multi_cmd_pending = 1;
- cmd[context->bw_ctx.bw.dcn.fams2_stream_count].fams2_config.header.multi_cmd_pending = 0;
- num_cmds += context->bw_ctx.bw.dcn.fams2_stream_count;
+ cmd[context->bw_ctx.bw.dcn.fams2_global_config.num_streams].fams2_config.header.multi_cmd_pending = 0;
+ num_cmds += context->bw_ctx.bw.dcn.fams2_global_config.num_streams;
}
dm_execute_dmub_cmd_list(dc->ctx, num_cmds, cmd, DM_DMUB_WAIT_TYPE_WAIT);
diff --git a/drivers/gpu/drm/amd/display/dc/dc_dmub_srv.h b/drivers/gpu/drm/amd/display/dc/dc_dmub_srv.h
index 580940222777..42f0cb672d8b 100644
--- a/drivers/gpu/drm/amd/display/dc/dc_dmub_srv.h
+++ b/drivers/gpu/drm/amd/display/dc/dc_dmub_srv.h
@@ -109,7 +109,29 @@ bool dc_dmub_srv_is_hw_pwr_up(struct dc_dmub_srv *dc_dmub_srv, bool wait);
void dc_dmub_srv_apply_idle_power_optimizations(const struct dc *dc, bool allow_idle);
-void dc_dmub_srv_set_power_state(struct dc_dmub_srv *dc_dmub_srv, enum dc_acpi_cm_power_state powerState);
+/**
+ * dc_dmub_srv_set_power_state() - Sets the power state for DMUB service.
+ *
+ * Controls whether messaging the DMCUB or interfacing with it via HW register
+ * interaction is permittable.
+ *
+ * @dc_dmub_srv - The DC DMUB service pointer
+ * @power_state - the DC power state
+ */
+void dc_dmub_srv_set_power_state(struct dc_dmub_srv *dc_dmub_srv, enum dc_acpi_cm_power_state power_state);
+
+/**
+ * dc_dmub_srv_notify_fw_dc_power_state() - Notifies firmware of the DC power state.
+ *
+ * Differs from dc_dmub_srv_set_power_state in that it needs to access HW in order
+ * to message DMCUB of the state transition. Should come after the D0 exit and
+ * before D3 set power state.
+ *
+ * @dc_dmub_srv - The DC DMUB service pointer
+ * @power_state - the DC power state
+ */
+void dc_dmub_srv_notify_fw_dc_power_state(struct dc_dmub_srv *dc_dmub_srv,
+ enum dc_acpi_cm_power_state power_state);
/**
* @dc_dmub_srv_should_detect() - Checks if link detection is required.
diff --git a/drivers/gpu/drm/amd/display/dc/dc_dp_types.h b/drivers/gpu/drm/amd/display/dc/dc_dp_types.h
index 519c3df78ee5..41bd95e9177a 100644
--- a/drivers/gpu/drm/amd/display/dc/dc_dp_types.h
+++ b/drivers/gpu/drm/amd/display/dc/dc_dp_types.h
@@ -969,6 +969,14 @@ union dp_sink_video_fallback_formats {
uint8_t raw;
};
+union dpcd_max_uncompressed_pixel_rate_cap {
+ struct {
+ uint16_t max_uncompressed_pixel_rate_cap :15;
+ uint16_t valid :1;
+ } bits;
+ uint8_t raw[2];
+};
+
union dp_fec_capability1 {
struct {
uint8_t AGGREGATED_ERROR_COUNTERS_CAPABLE :1;
@@ -1170,6 +1178,7 @@ struct dpcd_caps {
struct dc_lttpr_caps lttpr_caps;
struct adaptive_sync_caps adaptive_sync_caps;
struct dpcd_usb4_dp_tunneling_info usb4_dp_tun_info;
+ union dpcd_max_uncompressed_pixel_rate_cap max_uncompressed_pixel_rate_cap;
union dp_128b_132b_supported_link_rates dp_128b_132b_supported_link_rates;
union dp_main_line_channel_coding_cap channel_coding_cap;
@@ -1340,6 +1349,9 @@ struct dp_trace {
#ifndef DP_CABLE_ATTRIBUTES_UPDATED_BY_DPTX
#define DP_CABLE_ATTRIBUTES_UPDATED_BY_DPTX 0x110
#endif
+#ifndef DPCD_MAX_UNCOMPRESSED_PIXEL_RATE_CAP
+#define DPCD_MAX_UNCOMPRESSED_PIXEL_RATE_CAP 0x221c
+#endif
#ifndef DP_REPEATER_CONFIGURATION_AND_STATUS_SIZE
#define DP_REPEATER_CONFIGURATION_AND_STATUS_SIZE 0x50
#endif
diff --git a/drivers/gpu/drm/amd/display/dc/dc_dsc.h b/drivers/gpu/drm/amd/display/dc/dc_dsc.h
index fe3078b8789e..9014c2409817 100644
--- a/drivers/gpu/drm/amd/display/dc/dc_dsc.h
+++ b/drivers/gpu/drm/amd/display/dc/dc_dsc.h
@@ -59,6 +59,7 @@ struct dc_dsc_config_options {
uint32_t max_target_bpp_limit_override_x16;
uint32_t slice_height_granularity;
uint32_t dsc_force_odm_hslice_override;
+ bool force_dsc_when_not_needed;
};
bool dc_dsc_parse_dsc_dpcd(const struct dc *dc,
@@ -100,7 +101,8 @@ uint32_t dc_dsc_stream_bandwidth_overhead_in_kbps(
*/
void dc_dsc_get_policy_for_timing(const struct dc_crtc_timing *timing,
uint32_t max_target_bpp_limit_override_x16,
- struct dc_dsc_policy *policy);
+ struct dc_dsc_policy *policy,
+ const enum dc_link_encoding_format link_encoding);
void dc_dsc_policy_set_max_target_bpp_limit(uint32_t limit);
diff --git a/drivers/gpu/drm/amd/display/dc/dc_hw_types.h b/drivers/gpu/drm/amd/display/dc/dc_hw_types.h
index 959ae0df1e56..c10567ec1c81 100644
--- a/drivers/gpu/drm/amd/display/dc/dc_hw_types.h
+++ b/drivers/gpu/drm/amd/display/dc/dc_hw_types.h
@@ -763,13 +763,6 @@ enum scanning_type {
SCANNING_TYPE_UNDEFINED
};
-enum chroma_cositing {
- CHROMA_COSITING_NONE,
- CHROMA_COSITING_LEFT,
- CHROMA_COSITING_TOPLEFT,
- CHROMA_COSITING_COUNT
-};
-
struct dc_crtc_timing_flags {
uint32_t INTERLACE :1;
uint32_t HSYNC_POSITIVE_POLARITY :1; /* when set to 1,
diff --git a/drivers/gpu/drm/amd/display/dc/dc_spl_translate.c b/drivers/gpu/drm/amd/display/dc/dc_spl_translate.c
index 582606319764..603552dbd771 100644
--- a/drivers/gpu/drm/amd/display/dc/dc_spl_translate.c
+++ b/drivers/gpu/drm/amd/display/dc/dc_spl_translate.c
@@ -42,26 +42,26 @@ static void populate_spltaps_from_taps(struct spl_taps *spl_scaling_quality,
static void populate_taps_from_spltaps(struct scaling_taps *scaling_quality,
const struct spl_taps *spl_scaling_quality)
{
- scaling_quality->h_taps_c = spl_scaling_quality->h_taps_c;
- scaling_quality->h_taps = spl_scaling_quality->h_taps;
- scaling_quality->v_taps_c = spl_scaling_quality->v_taps_c;
- scaling_quality->v_taps = spl_scaling_quality->v_taps;
+ scaling_quality->h_taps_c = spl_scaling_quality->h_taps_c + 1;
+ scaling_quality->h_taps = spl_scaling_quality->h_taps + 1;
+ scaling_quality->v_taps_c = spl_scaling_quality->v_taps_c + 1;
+ scaling_quality->v_taps = spl_scaling_quality->v_taps + 1;
}
static void populate_ratios_from_splratios(struct scaling_ratios *ratios,
- const struct spl_ratios *spl_ratios)
+ const struct ratio *spl_ratios)
{
- ratios->horz = spl_ratios->horz;
- ratios->vert = spl_ratios->vert;
- ratios->horz_c = spl_ratios->horz_c;
- ratios->vert_c = spl_ratios->vert_c;
+ ratios->horz = dc_fixpt_from_ux_dy(spl_ratios->h_scale_ratio >> 5, 3, 19);
+ ratios->vert = dc_fixpt_from_ux_dy(spl_ratios->v_scale_ratio >> 5, 3, 19);
+ ratios->horz_c = dc_fixpt_from_ux_dy(spl_ratios->h_scale_ratio_c >> 5, 3, 19);
+ ratios->vert_c = dc_fixpt_from_ux_dy(spl_ratios->v_scale_ratio_c >> 5, 3, 19);
}
static void populate_inits_from_splinits(struct scl_inits *inits,
- const struct spl_inits *spl_inits)
+ const struct init *spl_inits)
{
- inits->h = spl_inits->h;
- inits->v = spl_inits->v;
- inits->h_c = spl_inits->h_c;
- inits->v_c = spl_inits->v_c;
+ inits->h = dc_fixpt_from_int_dy(spl_inits->h_filter_init_int, spl_inits->h_filter_init_frac >> 5, 0, 19);
+ inits->v = dc_fixpt_from_int_dy(spl_inits->v_filter_init_int, spl_inits->v_filter_init_frac >> 5, 0, 19);
+ inits->h_c = dc_fixpt_from_int_dy(spl_inits->h_filter_init_int_c, spl_inits->h_filter_init_frac_c >> 5, 0, 19);
+ inits->v_c = dc_fixpt_from_int_dy(spl_inits->v_filter_init_int_c, spl_inits->v_filter_init_frac_c >> 5, 0, 19);
}
/// @brief Translate SPL input parameters from pipe context
/// @param pipe_ctx
@@ -128,6 +128,7 @@ void translate_SPL_in_params_from_pipe_ctx(struct pipe_ctx *pipe_ctx, struct spl
spl_in->basic_out.always_scale = pipe_ctx->stream->ctx->dc->debug.always_scale;
// Make spl input basic output info alpha_en field point to plane res scl_data lb_params alpha_en
spl_in->basic_out.alpha_en = pipe_ctx->plane_res.scl_data.lb_params.alpha_en;
+ spl_in->basic_out.use_two_pixels_per_container = pipe_ctx->stream_res.tg->funcs->is_two_pixels_per_container(&stream->timing);
// Make spl input basic input info scaling quality field point to plane state scaling_quality
populate_spltaps_from_taps(&spl_in->scaling_quality, &plane_state->scaling_quality);
// Translate edge adaptive scaler preference
@@ -138,24 +139,36 @@ void translate_SPL_in_params_from_pipe_ctx(struct pipe_ctx *pipe_ctx, struct spl
else if (pipe_ctx->stream->ctx->dc->debug.force_easf == 2)
spl_in->disable_easf = true;
/* Translate adaptive sharpening preference */
- if (pipe_ctx->stream->ctx->dc->debug.force_sharpness > 0) {
- spl_in->adaptive_sharpness.enable = (pipe_ctx->stream->ctx->dc->debug.force_sharpness > 1) ? true : false;
- if (pipe_ctx->stream->ctx->dc->debug.force_sharpness == 2)
- spl_in->adaptive_sharpness.sharpness = SHARPNESS_LOW;
- else if (pipe_ctx->stream->ctx->dc->debug.force_sharpness == 3)
- spl_in->adaptive_sharpness.sharpness = SHARPNESS_MID;
- else if (pipe_ctx->stream->ctx->dc->debug.force_sharpness >= 4)
- spl_in->adaptive_sharpness.sharpness = SHARPNESS_HIGH;
- } else {
- spl_in->adaptive_sharpness.enable = plane_state->adaptive_sharpness_en;
- if (plane_state->sharpnessX1000 == 0)
+ unsigned int sharpness_setting = pipe_ctx->stream->ctx->dc->debug.force_sharpness;
+ unsigned int force_sharpness_level = pipe_ctx->stream->ctx->dc->debug.force_sharpness_level;
+ if (sharpness_setting == SHARPNESS_HW_OFF)
+ spl_in->adaptive_sharpness.enable = false;
+ else if (sharpness_setting == SHARPNESS_ZERO) {
+ spl_in->adaptive_sharpness.enable = true;
+ spl_in->adaptive_sharpness.sharpness_level = 0;
+ } else if (sharpness_setting == SHARPNESS_CUSTOM) {
+ spl_in->adaptive_sharpness.sharpness_range.sdr_rgb_min = 0;
+ spl_in->adaptive_sharpness.sharpness_range.sdr_rgb_max = 1750;
+ spl_in->adaptive_sharpness.sharpness_range.sdr_rgb_mid = 750;
+ spl_in->adaptive_sharpness.sharpness_range.sdr_yuv_min = 0;
+ spl_in->adaptive_sharpness.sharpness_range.sdr_yuv_max = 3500;
+ spl_in->adaptive_sharpness.sharpness_range.sdr_yuv_mid = 1500;
+ spl_in->adaptive_sharpness.sharpness_range.hdr_rgb_min = 0;
+ spl_in->adaptive_sharpness.sharpness_range.hdr_rgb_max = 2750;
+ spl_in->adaptive_sharpness.sharpness_range.hdr_rgb_mid = 1500;
+
+ if (force_sharpness_level > 0) {
+ if (force_sharpness_level > 10)
+ force_sharpness_level = 10;
+ spl_in->adaptive_sharpness.enable = true;
+ spl_in->adaptive_sharpness.sharpness_level = force_sharpness_level;
+ } else if (!plane_state->adaptive_sharpness_en) {
spl_in->adaptive_sharpness.enable = false;
- else if (plane_state->sharpnessX1000 < 999)
- spl_in->adaptive_sharpness.sharpness = SHARPNESS_LOW;
- else if (plane_state->sharpnessX1000 < 1999)
- spl_in->adaptive_sharpness.sharpness = SHARPNESS_MID;
- else // Any other value is high sharpness
- spl_in->adaptive_sharpness.sharpness = SHARPNESS_HIGH;
+ spl_in->adaptive_sharpness.sharpness_level = 0;
+ } else {
+ spl_in->adaptive_sharpness.enable = true;
+ spl_in->adaptive_sharpness.sharpness_level = plane_state->sharpness_level;
+ }
}
// Translate linear light scaling preference
if (pipe_ctx->stream->ctx->dc->debug.force_lls > 0)
@@ -171,6 +184,19 @@ void translate_SPL_in_params_from_pipe_ctx(struct pipe_ctx *pipe_ctx, struct spl
spl_in->basic_in.tf_type = (enum spl_transfer_func_type) plane_state->in_transfer_func.type;
spl_in->basic_in.tf_predefined_type = (enum spl_transfer_func_predefined) plane_state->in_transfer_func.tf;
+ spl_in->h_active = pipe_ctx->plane_res.scl_data.h_active;
+ spl_in->v_active = pipe_ctx->plane_res.scl_data.v_active;
+
+ spl_in->debug.sharpen_policy = (enum sharpen_policy)pipe_ctx->stream->ctx->dc->debug.sharpen_policy;
+ spl_in->debug.scale_to_sharpness_policy =
+ (enum scale_to_sharpness_policy)pipe_ctx->stream->ctx->dc->debug.scale_to_sharpness_policy;
+
+ /* Check if it is stream is in fullscreen and if its HDR.
+ * Use this to determine sharpness levels
+ */
+ spl_in->is_fullscreen = dm_helpers_is_fullscreen(pipe_ctx->stream->ctx, pipe_ctx->stream);
+ spl_in->is_hdr_on = dm_helpers_is_hdr_on(pipe_ctx->stream->ctx, pipe_ctx->stream);
+ spl_in->sdr_white_level_nits = plane_state->sdr_white_level_nits;
}
/// @brief Translate SPL output parameters to pipe context
@@ -179,15 +205,15 @@ void translate_SPL_in_params_from_pipe_ctx(struct pipe_ctx *pipe_ctx, struct spl
void translate_SPL_out_params_to_pipe_ctx(struct pipe_ctx *pipe_ctx, struct spl_out *spl_out)
{
// Make scaler data recout point to spl output field recout
- populate_rect_from_splrect(&pipe_ctx->plane_res.scl_data.recout, &spl_out->scl_data.recout);
+ populate_rect_from_splrect(&pipe_ctx->plane_res.scl_data.recout, &spl_out->dscl_prog_data->recout);
// Make scaler data ratios point to spl output field ratios
- populate_ratios_from_splratios(&pipe_ctx->plane_res.scl_data.ratios, &spl_out->scl_data.ratios);
+ populate_ratios_from_splratios(&pipe_ctx->plane_res.scl_data.ratios, &spl_out->dscl_prog_data->ratios);
// Make scaler data viewport point to spl output field viewport
- populate_rect_from_splrect(&pipe_ctx->plane_res.scl_data.viewport, &spl_out->scl_data.viewport);
+ populate_rect_from_splrect(&pipe_ctx->plane_res.scl_data.viewport, &spl_out->dscl_prog_data->viewport);
// Make scaler data viewport_c point to spl output field viewport_c
- populate_rect_from_splrect(&pipe_ctx->plane_res.scl_data.viewport_c, &spl_out->scl_data.viewport_c);
+ populate_rect_from_splrect(&pipe_ctx->plane_res.scl_data.viewport_c, &spl_out->dscl_prog_data->viewport_c);
// Make scaler data taps point to spl output field scaling taps
- populate_taps_from_spltaps(&pipe_ctx->plane_res.scl_data.taps, &spl_out->scl_data.taps);
+ populate_taps_from_spltaps(&pipe_ctx->plane_res.scl_data.taps, &spl_out->dscl_prog_data->taps);
// Make scaler data init point to spl output field init
- populate_inits_from_splinits(&pipe_ctx->plane_res.scl_data.inits, &spl_out->scl_data.inits);
+ populate_inits_from_splinits(&pipe_ctx->plane_res.scl_data.inits, &spl_out->dscl_prog_data->init);
}
diff --git a/drivers/gpu/drm/amd/display/dc/dc_spl_translate.h b/drivers/gpu/drm/amd/display/dc/dc_spl_translate.h
index c73d640c3632..eaa5c5373b28 100644
--- a/drivers/gpu/drm/amd/display/dc/dc_spl_translate.h
+++ b/drivers/gpu/drm/amd/display/dc/dc_spl_translate.h
@@ -6,6 +6,7 @@
#define __DC_SPL_TRANSLATE_H__
#include "dc.h"
#include "resource.h"
+#include "dm_helpers.h"
/* Map SPL input parameters to pipe context
* @pipe_ctx: pipe context
diff --git a/drivers/gpu/drm/amd/display/dc/dc_stream.h b/drivers/gpu/drm/amd/display/dc/dc_stream.h
index 8ebd7e9e776e..14ea47eda0c8 100644
--- a/drivers/gpu/drm/amd/display/dc/dc_stream.h
+++ b/drivers/gpu/drm/amd/display/dc/dc_stream.h
@@ -142,6 +142,7 @@ union stream_update_flags {
uint32_t mst_bw : 1;
uint32_t crtc_timing_adjust : 1;
uint32_t fams_changed : 1;
+ uint32_t scaler_sharpener : 1;
} bits;
uint32_t raw;
@@ -159,6 +160,12 @@ struct test_pattern {
struct dc_stream_debug_options {
char force_odm_combine_segments;
+ /*
+ * When force_odm_combine_segments is non zero, allow dc to
+ * temporarily transition to ODM bypass when minimal transition state
+ * is required to prevent visual glitches showing on the screen
+ */
+ char allow_transition_for_forced_odm;
};
#define LUMINANCE_DATA_TABLE_SIZE 10
@@ -260,6 +267,8 @@ struct dc_stream_state {
struct dc_cursor_attributes cursor_attributes;
struct dc_cursor_position cursor_position;
+ bool hw_cursor_req;
+
uint32_t sdr_white_level; // for boosting (SDR) cursor in HDR mode
/* from stream struct */
@@ -300,6 +309,7 @@ struct dc_stream_state {
bool is_phantom;
struct luminance_data lumin_data;
+ bool scaler_sharpener_update;
};
#define ABM_LEVEL_IMMEDIATE_DISABLE 255
@@ -344,6 +354,8 @@ struct dc_stream_update {
struct dc_cursor_attributes *cursor_attributes;
struct dc_cursor_position *cursor_position;
+ bool *hw_cursor_req;
+ bool *scaler_sharpener_update;
};
bool dc_is_stream_unchanged(
diff --git a/drivers/gpu/drm/amd/display/dc/dc_types.h b/drivers/gpu/drm/amd/display/dc/dc_types.h
index c550e8997033..fd6dca735714 100644
--- a/drivers/gpu/drm/amd/display/dc/dc_types.h
+++ b/drivers/gpu/drm/amd/display/dc/dc_types.h
@@ -590,6 +590,7 @@ enum dc_psr_state {
PSR_STATE5c,
PSR_STATE_HWLOCK_MGR,
PSR_STATE_POLLVUPDATE,
+ PSR_STATE_RELEASE_HWLOCK_MGR_FULL_FRAME,
PSR_STATE_INVALID = 0xFF
};
@@ -1049,6 +1050,23 @@ union replay_error_status {
unsigned char raw;
};
+union replay_low_refresh_rate_enable_options {
+ struct {
+ //BIT[0-3]: Replay Low Hz Support control
+ unsigned int ENABLE_LOW_RR_SUPPORT :1;
+ unsigned int RESERVED_1_3 :3;
+ //BIT[4-15]: Replay Low Hz Enable Scenarios
+ unsigned int ENABLE_STATIC_SCREEN :1;
+ unsigned int ENABLE_FULL_SCREEN_VIDEO :1;
+ unsigned int ENABLE_GENERAL_UI :1;
+ unsigned int RESERVED_7_15 :9;
+ //BIT[16-31]: Replay Low Hz Enable Check
+ unsigned int ENABLE_STATIC_FLICKER_CHECK :1;
+ unsigned int RESERVED_17_31 :15;
+ } bits;
+ unsigned int raw;
+};
+
struct replay_config {
/* Replay feature is supported */
bool replay_supported;
@@ -1072,6 +1090,8 @@ struct replay_config {
bool replay_support_fast_resync_in_ultra_sleep_mode;
/* Replay error status */
union replay_error_status replay_error_status;
+ /* Replay Low Hz enable Options */
+ union replay_low_refresh_rate_enable_options low_rr_enable_options;
};
/* Replay feature flags*/
diff --git a/drivers/gpu/drm/amd/display/dc/dccg/dcn20/dcn20_dccg.h b/drivers/gpu/drm/amd/display/dc/dccg/dcn20/dcn20_dccg.h
index 1e0292861244..160c299419b7 100644
--- a/drivers/gpu/drm/amd/display/dc/dccg/dcn20/dcn20_dccg.h
+++ b/drivers/gpu/drm/amd/display/dc/dccg/dcn20/dcn20_dccg.h
@@ -328,6 +328,17 @@
type DPSTREAMCLK1_GATE_DISABLE;\
type DPSTREAMCLK2_GATE_DISABLE;\
type DPSTREAMCLK3_GATE_DISABLE;\
+ type SYMCLKA_FE_GATE_DISABLE;\
+ type SYMCLKB_FE_GATE_DISABLE;\
+ type SYMCLKC_FE_GATE_DISABLE;\
+ type SYMCLKD_FE_GATE_DISABLE;\
+ type SYMCLKE_FE_GATE_DISABLE;\
+ type SYMCLKA_GATE_DISABLE;\
+ type SYMCLKB_GATE_DISABLE;\
+ type SYMCLKC_GATE_DISABLE;\
+ type SYMCLKD_GATE_DISABLE;\
+ type SYMCLKE_GATE_DISABLE;\
+
#define DCCG401_REG_FIELD_LIST(type) \
type OTG0_TMDS_PIXEL_RATE_DIV;\
@@ -346,11 +357,7 @@
type SYMCLK32_LE3_SRC_SEL;\
type SYMCLK32_LE2_EN;\
type SYMCLK32_LE3_EN;\
- type DP_DTO_ENABLE[MAX_PIPES];\
- type DSCCLK0_DTO_DB_EN;\
- type DSCCLK1_DTO_DB_EN;\
- type DSCCLK2_DTO_DB_EN;\
- type DSCCLK3_DTO_DB_EN;
+ type DP_DTO_ENABLE[MAX_PIPES];
struct dccg_shift {
DCCG_REG_FIELD_LIST(uint8_t)
diff --git a/drivers/gpu/drm/amd/display/dc/dccg/dcn35/dcn35_dccg.c b/drivers/gpu/drm/amd/display/dc/dccg/dcn35/dcn35_dccg.c
index 68cd3258f4a9..838d72eaa87f 100644
--- a/drivers/gpu/drm/amd/display/dc/dccg/dcn35/dcn35_dccg.c
+++ b/drivers/gpu/drm/amd/display/dc/dccg/dcn35/dcn35_dccg.c
@@ -24,6 +24,7 @@
#include "reg_helper.h"
#include "core_types.h"
+#include "resource.h"
#include "dcn35_dccg.h"
#define TO_DCN_DCCG(dccg)\
@@ -41,13 +42,1069 @@
#define DC_LOGGER \
dccg->ctx->logger
+enum symclk_fe_source {
+ SYMCLK_FE_SYMCLK_A = 0, // Select functional clock from backend symclk A
+ SYMCLK_FE_SYMCLK_B,
+ SYMCLK_FE_SYMCLK_C,
+ SYMCLK_FE_SYMCLK_D,
+ SYMCLK_FE_SYMCLK_E,
+ SYMCLK_FE_REFCLK = 0xFF, // Arbitrary value to pass refclk selection in software
+};
+
+enum symclk_be_source {
+ SYMCLK_BE_PHYCLK = 0, // Select phy clk when sym_clk_enable = 1
+ SYMCLK_BE_DPIACLK_810 = 4,
+ SYMCLK_BE_DPIACLK_162 = 5,
+ SYMCLK_BE_DPIACLK_540 = 6,
+ SYMCLK_BE_DPIACLK_270 = 7,
+ SYMCLK_BE_REFCLK = 0xFF, // Arbitrary value to pass refclk selection in software
+};
+
+enum physymclk_source {
+ PHYSYMCLK_PHYCLK = 0, // Select symclk as source of clock which is output to PHY through DCIO.
+ PHYSYMCLK_PHYD18CLK, // Select phyd18clk as the source of clock which is output to PHY through DCIO.
+ PHYSYMCLK_PHYD32CLK, // Select phyd32clk as the source of clock which is output to PHY through DCIO.
+ PHYSYMCLK_REFCLK = 0xFF, // Arbitrary value to pass refclk selection in software
+};
+
+enum dtbclk_source {
+ DTBCLK_DPREFCLK = 0, // Selects source for DTBCLK_P# as DPREFCLK (src sel 0 and 1 are same)
+ DTBCLK_DPREFCLK_0, // Selects source for DTBCLK_P# as DPREFCLK (src sel 0 and 1 are same)
+ DTBCLK_DTBCLK0, // Selects source for DTBCLK_P# as DTBCLK0
+ DTBCLK_DTBCLK1, // Selects source for DTBCLK_P# as DTBCLK0
+ DTBCLK_REFCLK = 0xFF, // Arbitrary value to pass refclk selection in software
+};
+
+enum dppclk_clock_source {
+ DPP_REFCLK = 0, // refclk is selected
+ DPP_DCCG_DTO, // Functional clock selected is DTO tuned DPPCLK
+};
+
+enum dp_stream_clk_source {
+ DP_STREAM_DTBCLK_P0 = 0, // Selects functional for DP_STREAM_CLK as DTBCLK_P#
+ DP_STREAM_DTBCLK_P1,
+ DP_STREAM_DTBCLK_P2,
+ DP_STREAM_DTBCLK_P3,
+ DP_STREAM_DTBCLK_P4,
+ DP_STREAM_DTBCLK_P5,
+ DP_STREAM_REFCLK = 0xFF, // Arbitrary value to pass refclk selection in software
+};
+
+enum hdmi_char_clk {
+ HDMI_CHAR_PHYAD18CLK = 0, // Selects functional for hdmi_char_clk as UNIPHYA PHYD18CLK
+ HDMI_CHAR_PHYBD18CLK,
+ HDMI_CHAR_PHYCD18CLK,
+ HDMI_CHAR_PHYDD18CLK,
+ HDMI_CHAR_PHYED18CLK,
+ HDMI_CHAR_REFCLK = 0xFF, // Arbitrary value to pass refclk selection in software
+};
+
+enum hdmi_stream_clk_source {
+ HDMI_STREAM_DTBCLK_P0 = 0, // Selects functional for HDMI_STREAM_CLK as DTBCLK_P#
+ HDMI_STREAM_DTBCLK_P1,
+ HDMI_STREAM_DTBCLK_P2,
+ HDMI_STREAM_DTBCLK_P3,
+ HDMI_STREAM_DTBCLK_P4,
+ HDMI_STREAM_DTBCLK_P5,
+ HDMI_STREAM_REFCLK = 0xFF, // Arbitrary value to pass refclk selection in software
+};
+
+enum symclk32_se_clk_source {
+ SYMCLK32_SE_PHYAD32CLK = 0, // Selects functional for SYMCLK32 as UNIPHYA PHYD32CLK
+ SYMCLK32_SE_PHYBD32CLK,
+ SYMCLK32_SE_PHYCD32CLK,
+ SYMCLK32_SE_PHYDD32CLK,
+ SYMCLK32_SE_PHYED32CLK,
+ SYMCLK32_SE_REFCLK = 0xFF, // Arbitrary value to pass refclk selection in software
+};
+
+enum symclk32_le_clk_source {
+ SYMCLK32_LE_PHYAD32CLK = 0, // Selects functional for SYMCLK32 as UNIPHYA PHYD32CLK
+ SYMCLK32_LE_PHYBD32CLK,
+ SYMCLK32_LE_PHYCD32CLK,
+ SYMCLK32_LE_PHYDD32CLK,
+ SYMCLK32_LE_PHYED32CLK,
+ SYMCLK32_LE_REFCLK = 0xFF, // Arbitrary value to pass refclk selection in software
+};
+
+enum dsc_clk_source {
+ DSC_CLK_REF_CLK = 0, // Ref clock selected for DSC_CLK
+ DSC_DTO_TUNED_CK_GPU_DISCLK_3, // DTO divided clock selected as functional clock
+};
+
+
+static void dccg35_set_dsc_clk_rcg(struct dccg *dccg, int inst, bool enable)
+{
+ struct dcn_dccg *dccg_dcn = TO_DCN_DCCG(dccg);
+
+ if (!dccg->ctx->dc->debug.root_clock_optimization.bits.dsc && enable)
+ return;
+
+ switch (inst) {
+ case 0:
+ REG_UPDATE(DCCG_GATE_DISABLE_CNTL6, DSCCLK0_ROOT_GATE_DISABLE, enable ? 0 : 1);
+ break;
+ case 1:
+ REG_UPDATE(DCCG_GATE_DISABLE_CNTL6, DSCCLK1_ROOT_GATE_DISABLE, enable ? 0 : 1);
+ break;
+ case 2:
+ REG_UPDATE(DCCG_GATE_DISABLE_CNTL6, DSCCLK2_ROOT_GATE_DISABLE, enable ? 0 : 1);
+ break;
+ case 3:
+ REG_UPDATE(DCCG_GATE_DISABLE_CNTL6, DSCCLK3_ROOT_GATE_DISABLE, enable ? 0 : 1);
+ break;
+ default:
+ BREAK_TO_DEBUGGER();
+ return;
+ }
+}
+
+static void dccg35_set_symclk32_se_rcg(
+ struct dccg *dccg,
+ int inst,
+ bool enable)
+{
+ struct dcn_dccg *dccg_dcn = TO_DCN_DCCG(dccg);
+
+ if (!dccg->ctx->dc->debug.root_clock_optimization.bits.symclk32_se && enable)
+ return;
+
+ /* SYMCLK32_ROOT_SE#_GATE_DISABLE will clock gate in DCCG */
+ /* SYMCLK32_SE#_GATE_DISABLE will clock gate in HPO only */
+ switch (inst) {
+ case 0:
+ REG_UPDATE_2(DCCG_GATE_DISABLE_CNTL3,
+ SYMCLK32_SE0_GATE_DISABLE, enable ? 0 : 1,
+ SYMCLK32_ROOT_SE0_GATE_DISABLE, enable ? 0 : 1);
+ break;
+ case 1:
+ REG_UPDATE_2(DCCG_GATE_DISABLE_CNTL3,
+ SYMCLK32_SE1_GATE_DISABLE, enable ? 0 : 1,
+ SYMCLK32_ROOT_SE1_GATE_DISABLE, enable ? 0 : 1);
+ break;
+ case 2:
+ REG_UPDATE_2(DCCG_GATE_DISABLE_CNTL3,
+ SYMCLK32_SE2_GATE_DISABLE, enable ? 0 : 1,
+ SYMCLK32_ROOT_SE2_GATE_DISABLE, enable ? 0 : 1);
+ break;
+ case 3:
+ REG_UPDATE_2(DCCG_GATE_DISABLE_CNTL3,
+ SYMCLK32_SE3_GATE_DISABLE, enable ? 0 : 1,
+ SYMCLK32_ROOT_SE3_GATE_DISABLE, enable ? 0 : 1);
+ break;
+ default:
+ BREAK_TO_DEBUGGER();
+ return;
+ }
+}
+
+static void dccg35_set_symclk32_le_rcg(
+ struct dccg *dccg,
+ int inst,
+ bool enable)
+{
+ struct dcn_dccg *dccg_dcn = TO_DCN_DCCG(dccg);
+
+ if (!dccg->ctx->dc->debug.root_clock_optimization.bits.symclk32_le && enable)
+ return;
+
+ switch (inst) {
+ case 0:
+ REG_UPDATE_2(DCCG_GATE_DISABLE_CNTL3,
+ SYMCLK32_LE0_GATE_DISABLE, enable ? 0 : 1,
+ SYMCLK32_ROOT_LE0_GATE_DISABLE, enable ? 0 : 1);
+ break;
+ case 1:
+ REG_UPDATE_2(DCCG_GATE_DISABLE_CNTL3,
+ SYMCLK32_LE1_GATE_DISABLE, enable ? 0 : 1,
+ SYMCLK32_ROOT_LE1_GATE_DISABLE, enable ? 0 : 1);
+ break;
+ default:
+ BREAK_TO_DEBUGGER();
+ return;
+ }
+}
+
+static void dccg35_set_physymclk_rcg(
+ struct dccg *dccg,
+ int inst,
+ bool enable)
+{
+ struct dcn_dccg *dccg_dcn = TO_DCN_DCCG(dccg);
+
+ if (!dccg->ctx->dc->debug.root_clock_optimization.bits.physymclk && enable)
+ return;
+
+ switch (inst) {
+ case 0:
+ REG_UPDATE(DCCG_GATE_DISABLE_CNTL2,
+ PHYASYMCLK_ROOT_GATE_DISABLE, enable ? 0 : 1);
+ break;
+ case 1:
+ REG_UPDATE(DCCG_GATE_DISABLE_CNTL2,
+ PHYBSYMCLK_ROOT_GATE_DISABLE, enable ? 0 : 1);
+ break;
+ case 2:
+ REG_UPDATE(DCCG_GATE_DISABLE_CNTL2,
+ PHYCSYMCLK_ROOT_GATE_DISABLE, enable ? 0 : 1);
+ break;
+ case 3:
+ REG_UPDATE(DCCG_GATE_DISABLE_CNTL2,
+ PHYDSYMCLK_ROOT_GATE_DISABLE, enable ? 0 : 1);
+ break;
+ case 4:
+ REG_UPDATE(DCCG_GATE_DISABLE_CNTL2,
+ PHYESYMCLK_ROOT_GATE_DISABLE, enable ? 0 : 1);
+ break;
+ default:
+ BREAK_TO_DEBUGGER();
+ return;
+ }
+}
+
+static void dccg35_set_symclk_fe_rcg(
+ struct dccg *dccg,
+ int inst,
+ bool enable)
+{
+ struct dcn_dccg *dccg_dcn = TO_DCN_DCCG(dccg);
+
+ if (!dccg->ctx->dc->debug.root_clock_optimization.bits.symclk_fe && enable)
+ return;
+
+ switch (inst) {
+ case 0:
+ REG_UPDATE(DCCG_GATE_DISABLE_CNTL2,
+ SYMCLKA_FE_GATE_DISABLE, enable ? 0 : 1);
+ REG_UPDATE(DCCG_GATE_DISABLE_CNTL5,
+ SYMCLKA_FE_ROOT_GATE_DISABLE, enable ? 0 : 1);
+ break;
+ case 1:
+ REG_UPDATE(DCCG_GATE_DISABLE_CNTL2,
+ SYMCLKB_FE_GATE_DISABLE, enable ? 0 : 1);
+ REG_UPDATE(DCCG_GATE_DISABLE_CNTL5,
+ SYMCLKB_FE_ROOT_GATE_DISABLE, enable ? 0 : 1);
+ break;
+ case 2:
+ REG_UPDATE(DCCG_GATE_DISABLE_CNTL2,
+ SYMCLKC_FE_GATE_DISABLE, enable ? 0 : 1);
+ REG_UPDATE(DCCG_GATE_DISABLE_CNTL5,
+ SYMCLKC_FE_ROOT_GATE_DISABLE, enable ? 0 : 1);
+ break;
+ case 3:
+ REG_UPDATE(DCCG_GATE_DISABLE_CNTL2,
+ SYMCLKD_FE_GATE_DISABLE, enable ? 0 : 1);
+ REG_UPDATE(DCCG_GATE_DISABLE_CNTL5,
+ SYMCLKD_FE_ROOT_GATE_DISABLE, enable ? 0 : 1);
+ break;
+ case 4:
+ REG_UPDATE(DCCG_GATE_DISABLE_CNTL2,
+ SYMCLKE_FE_GATE_DISABLE, enable ? 0 : 1);
+ REG_UPDATE(DCCG_GATE_DISABLE_CNTL5,
+ SYMCLKE_FE_ROOT_GATE_DISABLE, enable ? 0 : 1);
+ break;
+ default:
+ BREAK_TO_DEBUGGER();
+ return;
+ }
+}
+
+static void dccg35_set_symclk_be_rcg(
+ struct dccg *dccg,
+ int inst,
+ bool enable)
+{
+
+ struct dcn_dccg *dccg_dcn = TO_DCN_DCCG(dccg);
+
+ /* TBD add symclk_be in rcg control bits */
+ if (!dccg->ctx->dc->debug.root_clock_optimization.bits.symclk_fe && enable)
+ return;
+
+ switch (inst) {
+ case 0:
+ REG_UPDATE(DCCG_GATE_DISABLE_CNTL2,
+ SYMCLKA_GATE_DISABLE, enable ? 0 : 1);
+ REG_UPDATE(DCCG_GATE_DISABLE_CNTL5,
+ SYMCLKA_ROOT_GATE_DISABLE, enable ? 0 : 1);
+ break;
+ case 1:
+ REG_UPDATE(DCCG_GATE_DISABLE_CNTL2,
+ SYMCLKB_GATE_DISABLE, enable ? 0 : 1);
+ REG_UPDATE(DCCG_GATE_DISABLE_CNTL5,
+ SYMCLKB_ROOT_GATE_DISABLE, enable ? 0 : 1);
+ break;
+ case 2:
+ REG_UPDATE(DCCG_GATE_DISABLE_CNTL2,
+ SYMCLKC_GATE_DISABLE, enable ? 0 : 1);
+ REG_UPDATE(DCCG_GATE_DISABLE_CNTL5,
+ SYMCLKC_ROOT_GATE_DISABLE, enable ? 0 : 1);
+ break;
+ case 3:
+ REG_UPDATE(DCCG_GATE_DISABLE_CNTL2,
+ SYMCLKD_GATE_DISABLE, enable ? 0 : 1);
+ REG_UPDATE(DCCG_GATE_DISABLE_CNTL5,
+ SYMCLKD_ROOT_GATE_DISABLE, enable ? 0 : 1);
+ break;
+ case 4:
+ REG_UPDATE(DCCG_GATE_DISABLE_CNTL2,
+ SYMCLKE_GATE_DISABLE, enable ? 0 : 1);
+ REG_UPDATE(DCCG_GATE_DISABLE_CNTL5,
+ SYMCLKE_ROOT_GATE_DISABLE, enable ? 0 : 1);
+ break;
+ default:
+ BREAK_TO_DEBUGGER();
+ return;
+ }
+}
+
+static void dccg35_set_dtbclk_p_rcg(struct dccg *dccg, int inst, bool enable)
+{
+
+ struct dcn_dccg *dccg_dcn = TO_DCN_DCCG(dccg);
+
+ if (!dccg->ctx->dc->debug.root_clock_optimization.bits.dpp && enable)
+ return;
+
+ switch (inst) {
+ case 0:
+ REG_UPDATE(DCCG_GATE_DISABLE_CNTL5, DTBCLK_P0_GATE_DISABLE, enable ? 0 : 1);
+ break;
+ case 1:
+ REG_UPDATE(DCCG_GATE_DISABLE_CNTL5, DTBCLK_P1_GATE_DISABLE, enable ? 0 : 1);
+ break;
+ case 2:
+ REG_UPDATE(DCCG_GATE_DISABLE_CNTL5, DTBCLK_P2_GATE_DISABLE, enable ? 0 : 1);
+ break;
+ case 3:
+ REG_UPDATE(DCCG_GATE_DISABLE_CNTL5, DTBCLK_P3_GATE_DISABLE, enable ? 0 : 1);
+ break;
+ default:
+ BREAK_TO_DEBUGGER();
+ break;
+ }
+}
+
+static void dccg35_set_dppclk_rcg(struct dccg *dccg,
+ int inst, bool enable)
+{
+
+ struct dcn_dccg *dccg_dcn = TO_DCN_DCCG(dccg);
+
+ if (!dccg->ctx->dc->debug.root_clock_optimization.bits.dpp && enable)
+ return;
+
+ switch (inst) {
+ case 0:
+ REG_UPDATE(DCCG_GATE_DISABLE_CNTL6, DPPCLK0_ROOT_GATE_DISABLE, enable ? 0 : 1);
+ break;
+ case 1:
+ REG_UPDATE(DCCG_GATE_DISABLE_CNTL6, DPPCLK1_ROOT_GATE_DISABLE, enable ? 0 : 1);
+ break;
+ case 2:
+ REG_UPDATE(DCCG_GATE_DISABLE_CNTL6, DPPCLK2_ROOT_GATE_DISABLE, enable ? 0 : 1);
+ break;
+ case 3:
+ REG_UPDATE(DCCG_GATE_DISABLE_CNTL6, DPPCLK3_ROOT_GATE_DISABLE, enable ? 0 : 1);
+ break;
+ default:
+ BREAK_TO_DEBUGGER();
+ break;
+ }
+}
+
+static void dccg35_set_dpstreamclk_rcg(
+ struct dccg *dccg,
+ int inst,
+ bool enable)
+{
+ struct dcn_dccg *dccg_dcn = TO_DCN_DCCG(dccg);
+
+ if (!dccg->ctx->dc->debug.root_clock_optimization.bits.dpstream && enable)
+ return;
+
+ switch (inst) {
+ case 0:
+ REG_UPDATE_2(DCCG_GATE_DISABLE_CNTL5,
+ DPSTREAMCLK0_GATE_DISABLE, enable ? 0 : 1,
+ DPSTREAMCLK0_ROOT_GATE_DISABLE, enable ? 0 : 1);
+ break;
+ case 1:
+ REG_UPDATE_2(DCCG_GATE_DISABLE_CNTL5,
+ DPSTREAMCLK1_GATE_DISABLE, enable ? 0 : 1,
+ DPSTREAMCLK1_ROOT_GATE_DISABLE, enable ? 0 : 1);
+ break;
+ case 2:
+ REG_UPDATE_2(DCCG_GATE_DISABLE_CNTL5,
+ DPSTREAMCLK2_GATE_DISABLE, enable ? 0 : 1,
+ DPSTREAMCLK2_ROOT_GATE_DISABLE, enable ? 0 : 1);
+ break;
+ case 3:
+ REG_UPDATE_2(DCCG_GATE_DISABLE_CNTL5,
+ DPSTREAMCLK3_GATE_DISABLE, enable ? 0 : 1,
+ DPSTREAMCLK3_ROOT_GATE_DISABLE, enable ? 0 : 1);
+ break;
+ default:
+ BREAK_TO_DEBUGGER();
+ return;
+ }
+}
+
+static void dccg35_set_smclk32_se_rcg(
+ struct dccg *dccg,
+ int inst,
+ bool enable)
+{
+ struct dcn_dccg *dccg_dcn = TO_DCN_DCCG(dccg);
+
+ if (!dccg->ctx->dc->debug.root_clock_optimization.bits.symclk32_se && enable)
+ return;
+
+ switch (inst) {
+ case 0:
+ REG_UPDATE_2(DCCG_GATE_DISABLE_CNTL3,
+ SYMCLK32_SE0_GATE_DISABLE, enable ? 0 : 1,
+ SYMCLK32_ROOT_SE0_GATE_DISABLE, enable ? 0 : 1);
+ break;
+ case 1:
+ REG_UPDATE_2(DCCG_GATE_DISABLE_CNTL3,
+ SYMCLK32_SE1_GATE_DISABLE, enable ? 0 : 1,
+ SYMCLK32_ROOT_SE1_GATE_DISABLE, enable ? 0 : 1);
+ break;
+ case 2:
+ REG_UPDATE_2(DCCG_GATE_DISABLE_CNTL3,
+ SYMCLK32_SE2_GATE_DISABLE, enable ? 0 : 1,
+ SYMCLK32_ROOT_SE2_GATE_DISABLE, enable ? 0 : 1);
+ break;
+ case 3:
+ REG_UPDATE_2(DCCG_GATE_DISABLE_CNTL3,
+ SYMCLK32_SE3_GATE_DISABLE, enable ? 0 : 1,
+ SYMCLK32_ROOT_SE3_GATE_DISABLE, enable ? 0 : 1);
+ break;
+ default:
+ BREAK_TO_DEBUGGER();
+ return;
+ }
+}
+
+static void dccg35_set_dsc_clk_src_new(struct dccg *dccg, int inst, enum dsc_clk_source src)
+{
+ struct dcn_dccg *dccg_dcn = TO_DCN_DCCG(dccg);
+
+ /* DSCCLK#_EN=0 switches to refclock from functional clock */
+
+ switch (inst) {
+ case 0:
+ REG_UPDATE(DSCCLK_DTO_CTRL, DSCCLK0_EN, src);
+ break;
+ case 1:
+ REG_UPDATE(DSCCLK_DTO_CTRL, DSCCLK1_EN, src);
+ break;
+ case 2:
+ REG_UPDATE(DSCCLK_DTO_CTRL, DSCCLK2_EN, src);
+ break;
+ case 3:
+ REG_UPDATE(DSCCLK_DTO_CTRL, DSCCLK3_EN, src);
+ break;
+ default:
+ BREAK_TO_DEBUGGER();
+ return;
+ }
+}
+
+static void dccg35_set_symclk32_se_src_new(
+ struct dccg *dccg,
+ int inst,
+ enum symclk32_se_clk_source src
+ )
+{
+ struct dcn_dccg *dccg_dcn = TO_DCN_DCCG(dccg);
+
+ switch (inst) {
+ case 0:
+ REG_UPDATE_2(SYMCLK32_SE_CNTL,
+ SYMCLK32_SE0_SRC_SEL, (src == SYMCLK32_SE_REFCLK) ? 0 : src,
+ SYMCLK32_SE0_EN, (src == SYMCLK32_SE_REFCLK) ? 0 : 1);
+ break;
+ case 1:
+ REG_UPDATE_2(SYMCLK32_SE_CNTL,
+ SYMCLK32_SE1_SRC_SEL, (src == SYMCLK32_SE_REFCLK) ? 0 : src,
+ SYMCLK32_SE1_EN, (src == SYMCLK32_SE_REFCLK) ? 0 : 1);
+ break;
+ case 2:
+ REG_UPDATE_2(SYMCLK32_SE_CNTL,
+ SYMCLK32_SE2_SRC_SEL, (src == SYMCLK32_SE_REFCLK) ? 0 : src,
+ SYMCLK32_SE2_EN, (src == SYMCLK32_SE_REFCLK) ? 0 : 1);
+ break;
+ case 3:
+ REG_UPDATE_2(SYMCLK32_SE_CNTL,
+ SYMCLK32_SE3_SRC_SEL, (src == SYMCLK32_SE_REFCLK) ? 0 : src,
+ SYMCLK32_SE3_EN, (src == SYMCLK32_SE_REFCLK) ? 0 : 1);
+ break;
+ default:
+ BREAK_TO_DEBUGGER();
+ return;
+ }
+}
+
+static int
+dccg35_is_symclk32_se_src_functional_le_new(struct dccg *dccg, int symclk_32_se_inst, int symclk_32_le_inst)
+{
+ uint32_t en;
+ uint32_t src_sel;
+
+ struct dcn_dccg *dccg_dcn = TO_DCN_DCCG(dccg);
+
+ REG_GET_2(SYMCLK32_SE_CNTL, SYMCLK32_SE3_SRC_SEL, &src_sel, SYMCLK32_SE3_EN, &en);
+
+ if (en == 1 && src_sel == symclk_32_le_inst)
+ return 1;
+
+ return 0;
+}
+
+
+static void dccg35_set_symclk32_le_src_new(
+ struct dccg *dccg,
+ int inst,
+ enum symclk32_le_clk_source src)
+{
+ struct dcn_dccg *dccg_dcn = TO_DCN_DCCG(dccg);
+
+ switch (inst) {
+ case 0:
+ REG_UPDATE_2(SYMCLK32_LE_CNTL,
+ SYMCLK32_LE0_SRC_SEL, (src == SYMCLK32_LE_REFCLK) ? 0 : src,
+ SYMCLK32_LE0_EN, (src == SYMCLK32_LE_REFCLK) ? 0 : 1);
+ break;
+ case 1:
+ REG_UPDATE_2(SYMCLK32_LE_CNTL,
+ SYMCLK32_LE1_SRC_SEL, (src == SYMCLK32_LE_REFCLK) ? 0 : src,
+ SYMCLK32_LE1_EN, (src == SYMCLK32_LE_REFCLK) ? 0 : 1);
+ break;
+ default:
+ BREAK_TO_DEBUGGER();
+ return;
+ }
+}
+
+static void dcn35_set_dppclk_src_new(struct dccg *dccg,
+ int inst, enum dppclk_clock_source src)
+{
+ struct dcn_dccg *dccg_dcn = TO_DCN_DCCG(dccg);
+
+ switch (inst) {
+ case 0:
+ REG_UPDATE(DPPCLK_CTRL, DPPCLK0_EN, src);
+ break;
+ case 1:
+ REG_UPDATE(DPPCLK_CTRL, DPPCLK1_EN, src);
+ break;
+ case 2:
+ REG_UPDATE(DPPCLK_CTRL, DPPCLK2_EN, src);
+ break;
+ case 3:
+ REG_UPDATE(DPPCLK_CTRL, DPPCLK3_EN, src);
+ break;
+ default:
+ BREAK_TO_DEBUGGER();
+ break;
+ }
+}
+
+static void dccg35_set_dtbclk_p_src_new(
+ struct dccg *dccg,
+ enum dtbclk_source src,
+ int inst)
+{
+ struct dcn_dccg *dccg_dcn = TO_DCN_DCCG(dccg);
+
+ /* If DTBCLK_P#_EN is 0 refclock is selected as functional clock
+ * If DTBCLK_P#_EN is 1 functional clock is selected as DTBCLK_P#_SRC_SEL
+ */
+
+ switch (inst) {
+ case 0:
+ REG_UPDATE_2(DTBCLK_P_CNTL,
+ DTBCLK_P0_SRC_SEL, (src == DTBCLK_REFCLK) ? 0 : src,
+ DTBCLK_P0_EN, (src == DTBCLK_REFCLK) ? 0 : 1);
+ break;
+ case 1:
+ REG_UPDATE_2(DTBCLK_P_CNTL,
+ DTBCLK_P1_SRC_SEL, (src == DTBCLK_REFCLK) ? 0 : src,
+ DTBCLK_P1_EN, (src == DTBCLK_REFCLK) ? 0 : 1);
+ break;
+ case 2:
+ REG_UPDATE_2(DTBCLK_P_CNTL,
+ DTBCLK_P2_SRC_SEL, (src == DTBCLK_REFCLK) ? 0 : src,
+ DTBCLK_P2_EN, (src == DTBCLK_REFCLK) ? 0 : 1);
+ break;
+ case 3:
+ REG_UPDATE_2(DTBCLK_P_CNTL,
+ DTBCLK_P3_SRC_SEL, (src == DTBCLK_REFCLK) ? 0 : src,
+ DTBCLK_P3_EN, (src == DTBCLK_REFCLK) ? 0 : 1);
+ break;
+ default:
+ BREAK_TO_DEBUGGER();
+ return;
+ }
+}
+
+static void dccg35_set_dpstreamclk_src_new(
+ struct dccg *dccg,
+ enum dp_stream_clk_source src,
+ int inst)
+{
+ struct dcn_dccg *dccg_dcn = TO_DCN_DCCG(dccg);
+
+ switch (inst) {
+ case 0:
+ REG_UPDATE_2(DPSTREAMCLK_CNTL, DPSTREAMCLK0_EN,
+ (src == DP_STREAM_REFCLK) ? 0 : 1,
+ DPSTREAMCLK0_SRC_SEL,
+ (src == DP_STREAM_REFCLK) ? 0 : src);
+ break;
+ case 1:
+ REG_UPDATE_2(DPSTREAMCLK_CNTL, DPSTREAMCLK1_EN,
+ (src == DP_STREAM_REFCLK) ? 0 : 1,
+ DPSTREAMCLK1_SRC_SEL,
+ (src == DP_STREAM_REFCLK) ? 0 : src);
+
+ break;
+ case 2:
+ REG_UPDATE_2(DPSTREAMCLK_CNTL, DPSTREAMCLK2_EN,
+ (src == DP_STREAM_REFCLK) ? 0 : 1,
+ DPSTREAMCLK2_SRC_SEL,
+ (src == DP_STREAM_REFCLK) ? 0 : src);
+
+ break;
+ case 3:
+ REG_UPDATE_2(DPSTREAMCLK_CNTL, DPSTREAMCLK3_EN,
+ (src == DP_STREAM_REFCLK) ? 0 : 1,
+ DPSTREAMCLK3_SRC_SEL,
+ (src == DP_STREAM_REFCLK) ? 0 : src);
+ break;
+ default:
+ BREAK_TO_DEBUGGER();
+ return;
+ }
+}
+
+static void dccg35_set_physymclk_src_new(
+ struct dccg *dccg,
+ enum physymclk_source src,
+ int inst)
+{
+ struct dcn_dccg *dccg_dcn = TO_DCN_DCCG(dccg);
+
+ switch (inst) {
+ case 0:
+ REG_UPDATE_2(PHYASYMCLK_CLOCK_CNTL, PHYASYMCLK_EN,
+ (src == PHYSYMCLK_REFCLK) ? 0 : 1,
+ PHYASYMCLK_SRC_SEL,
+ (src == PHYSYMCLK_REFCLK) ? 0 : src);
+ break;
+ case 1:
+ REG_UPDATE_2(PHYBSYMCLK_CLOCK_CNTL, PHYBSYMCLK_EN,
+ (src == PHYSYMCLK_REFCLK) ? 0 : 1,
+ PHYBSYMCLK_SRC_SEL,
+ (src == PHYSYMCLK_REFCLK) ? 0 : src);
+ break;
+ case 2:
+ REG_UPDATE_2(PHYCSYMCLK_CLOCK_CNTL, PHYCSYMCLK_EN,
+ (src == PHYSYMCLK_REFCLK) ? 0 : 1,
+ PHYCSYMCLK_SRC_SEL,
+ (src == PHYSYMCLK_REFCLK) ? 0 : src);
+ break;
+ case 3:
+ REG_UPDATE_2(PHYDSYMCLK_CLOCK_CNTL, PHYDSYMCLK_EN,
+ (src == PHYSYMCLK_REFCLK) ? 0 : 1,
+ PHYDSYMCLK_SRC_SEL,
+ (src == PHYSYMCLK_REFCLK) ? 0 : src);
+ break;
+ case 4:
+ REG_UPDATE_2(PHYESYMCLK_CLOCK_CNTL, PHYESYMCLK_EN,
+ (src == PHYSYMCLK_REFCLK) ? 0 : 1,
+ PHYESYMCLK_SRC_SEL,
+ (src == PHYSYMCLK_REFCLK) ? 0 : src);
+ break;
+ default:
+ BREAK_TO_DEBUGGER();
+ return;
+ }
+}
+
+static void dccg35_set_symclk_be_src_new(
+ struct dccg *dccg,
+ enum symclk_be_source src,
+ int inst)
+{
+ struct dcn_dccg *dccg_dcn = TO_DCN_DCCG(dccg);
+
+ switch (inst) {
+ case 0:
+ REG_UPDATE_2(SYMCLKA_CLOCK_ENABLE,
+ SYMCLKA_CLOCK_ENABLE, (src == SYMCLK_BE_REFCLK) ? 0 : 1,
+ SYMCLKA_SRC_SEL, (src == SYMCLK_BE_REFCLK) ? 0 : src);
+ break;
+ case 1:
+ REG_UPDATE_2(SYMCLKB_CLOCK_ENABLE,
+ SYMCLKB_CLOCK_ENABLE, (src == SYMCLK_BE_REFCLK) ? 0 : 1,
+ SYMCLKB_SRC_SEL, (src == SYMCLK_BE_REFCLK) ? 0 : src);
+ break;
+ case 2:
+ REG_UPDATE_2(SYMCLKC_CLOCK_ENABLE,
+ SYMCLKC_CLOCK_ENABLE, (src == SYMCLK_BE_REFCLK) ? 0 : 1,
+ SYMCLKC_SRC_SEL, (src == SYMCLK_BE_REFCLK) ? 0 : src);
+ break;
+ case 3:
+ REG_UPDATE_2(SYMCLKD_CLOCK_ENABLE,
+ SYMCLKD_CLOCK_ENABLE, (src == SYMCLK_BE_REFCLK) ? 0 : 1,
+ SYMCLKD_SRC_SEL, (src == SYMCLK_BE_REFCLK) ? 0 : src);
+ break;
+ case 4:
+ REG_UPDATE_2(SYMCLKE_CLOCK_ENABLE,
+ SYMCLKE_CLOCK_ENABLE, (src == SYMCLK_BE_REFCLK) ? 0 : 1,
+ SYMCLKE_SRC_SEL, (src == SYMCLK_BE_REFCLK) ? 0 : src);
+ break;
+ }
+}
+
+static int dccg35_is_symclk_fe_src_functional_be(struct dccg *dccg,
+ int symclk_fe_inst,
+ int symclk_be_inst)
+{
+
+ uint32_t en = 0;
+ uint32_t src_sel = 0;
+
+ struct dcn_dccg *dccg_dcn = TO_DCN_DCCG(dccg);
+
+ switch (symclk_fe_inst) {
+ case 0:
+ REG_GET_2(SYMCLKA_CLOCK_ENABLE, SYMCLKA_FE_SRC_SEL, &src_sel, SYMCLKA_FE_EN, &en);
+ break;
+ case 1:
+ REG_GET_2(SYMCLKB_CLOCK_ENABLE, SYMCLKB_FE_SRC_SEL, &src_sel, SYMCLKB_FE_EN, &en);
+ break;
+ case 2:
+ REG_GET_2(SYMCLKC_CLOCK_ENABLE, SYMCLKC_FE_SRC_SEL, &src_sel, SYMCLKC_FE_EN, &en);
+ break;
+ case 3:
+ REG_GET_2(SYMCLKD_CLOCK_ENABLE, SYMCLKD_FE_SRC_SEL, &src_sel, SYMCLKD_FE_EN, &en);
+ break;
+ case 4:
+ REG_GET_2(SYMCLKE_CLOCK_ENABLE, SYMCLKE_FE_SRC_SEL, &src_sel, SYMCLKE_FE_EN, &en);
+ break;
+ }
+
+ if (en == 1 && src_sel == symclk_be_inst)
+ return 1;
+
+ return 0;
+}
+
+static void dccg35_set_symclk_fe_src_new(struct dccg *dccg, enum symclk_fe_source src, int inst)
+{
+ struct dcn_dccg *dccg_dcn = TO_DCN_DCCG(dccg);
+
+ switch (inst) {
+ case 0:
+ REG_UPDATE_2(SYMCLKA_CLOCK_ENABLE,
+ SYMCLKA_FE_EN, (src == SYMCLK_FE_REFCLK) ? 0 : 1,
+ SYMCLKA_FE_SRC_SEL, (src == SYMCLK_FE_REFCLK) ? 0 : src);
+ break;
+ case 1:
+ REG_UPDATE_2(SYMCLKB_CLOCK_ENABLE,
+ SYMCLKB_FE_EN, (src == SYMCLK_FE_REFCLK) ? 0 : 1,
+ SYMCLKB_FE_SRC_SEL, (src == SYMCLK_FE_REFCLK) ? 0 : src);
+ break;
+ case 2:
+ REG_UPDATE_2(SYMCLKC_CLOCK_ENABLE,
+ SYMCLKC_FE_EN, (src == SYMCLK_FE_REFCLK) ? 0 : 1,
+ SYMCLKC_FE_SRC_SEL, (src == SYMCLK_FE_REFCLK) ? 0 : src);
+ break;
+ case 3:
+ REG_UPDATE_2(SYMCLKD_CLOCK_ENABLE,
+ SYMCLKD_FE_EN, (src == SYMCLK_FE_REFCLK) ? 0 : 1,
+ SYMCLKD_FE_SRC_SEL, (src == SYMCLK_FE_REFCLK) ? 0 : src);
+ break;
+ case 4:
+ REG_UPDATE_2(SYMCLKE_CLOCK_ENABLE,
+ SYMCLKE_FE_EN, (src == SYMCLK_FE_REFCLK) ? 0 : 1,
+ SYMCLKE_FE_SRC_SEL, (src == SYMCLK_FE_REFCLK) ? 0 : src);
+ break;
+ }
+}
+
+static uint32_t dccg35_is_fe_rcg(struct dccg *dccg, int inst)
+{
+ uint32_t enable = 0;
+ struct dcn_dccg *dccg_dcn = TO_DCN_DCCG(dccg);
+
+ switch (inst) {
+ case 0:
+ REG_GET(DCCG_GATE_DISABLE_CNTL5,
+ SYMCLKA_FE_ROOT_GATE_DISABLE, &enable);
+ break;
+ case 1:
+ REG_GET(DCCG_GATE_DISABLE_CNTL5,
+ SYMCLKB_FE_ROOT_GATE_DISABLE, &enable);
+ break;
+ case 2:
+ REG_GET(DCCG_GATE_DISABLE_CNTL5,
+ SYMCLKC_FE_ROOT_GATE_DISABLE, &enable);
+ break;
+ case 3:
+ REG_GET(DCCG_GATE_DISABLE_CNTL5,
+ SYMCLKD_FE_ROOT_GATE_DISABLE, &enable);
+ break;
+ case 4:
+ REG_GET(DCCG_GATE_DISABLE_CNTL5,
+ SYMCLKE_FE_ROOT_GATE_DISABLE, &enable);
+ break;
+ default:
+ BREAK_TO_DEBUGGER();
+ break;
+ }
+ return enable;
+}
+
+static uint32_t dccg35_is_symclk32_se_rcg(struct dccg *dccg, int inst)
+{
+ uint32_t disable_l1 = 0;
+ uint32_t disable_l2 = 0;
+ struct dcn_dccg *dccg_dcn = TO_DCN_DCCG(dccg);
+
+ switch (inst) {
+ case 0:
+ REG_GET_2(DCCG_GATE_DISABLE_CNTL3,
+ SYMCLK32_SE0_GATE_DISABLE, &disable_l1,
+ SYMCLK32_ROOT_SE0_GATE_DISABLE, &disable_l2);
+ break;
+ case 1:
+ REG_GET_2(DCCG_GATE_DISABLE_CNTL3,
+ SYMCLK32_SE1_GATE_DISABLE, &disable_l1,
+ SYMCLK32_ROOT_SE1_GATE_DISABLE, &disable_l2);
+ break;
+ case 2:
+ REG_GET_2(DCCG_GATE_DISABLE_CNTL3,
+ SYMCLK32_SE2_GATE_DISABLE, &disable_l1,
+ SYMCLK32_ROOT_SE2_GATE_DISABLE, &disable_l2);
+ break;
+ case 3:
+ REG_GET_2(DCCG_GATE_DISABLE_CNTL3,
+ SYMCLK32_SE3_GATE_DISABLE, &disable_l1,
+ SYMCLK32_ROOT_SE3_GATE_DISABLE, &disable_l2);
+ break;
+ default:
+ BREAK_TO_DEBUGGER();
+ return 0;
+ }
+
+ /* return true if either block level or DCCG level gating is active */
+ return (disable_l1 | disable_l2);
+}
+
+static void dccg35_enable_symclk_fe_new(
+ struct dccg *dccg,
+ int inst,
+ enum symclk_fe_source src)
+{
+ dccg35_set_symclk_fe_rcg(dccg, inst, false);
+ dccg35_set_symclk_fe_src_new(dccg, src, inst);
+}
+
+static void dccg35_disable_symclk_fe_new(
+ struct dccg *dccg,
+ int inst)
+{
+ dccg35_set_symclk_fe_src_new(dccg, SYMCLK_FE_REFCLK, inst);
+ dccg35_set_symclk_fe_rcg(dccg, inst, true);
+}
+
+static void dccg35_enable_symclk_be_new(
+ struct dccg *dccg,
+ int inst,
+ enum symclk_be_source src)
+{
+ dccg35_set_symclk_be_rcg(dccg, inst, false);
+ dccg35_set_symclk_be_src_new(dccg, inst, src);
+}
+
+static void dccg35_disable_symclk_be_new(
+ struct dccg *dccg,
+ int inst)
+{
+ int i;
+
+ /* Switch from functional clock to refclock */
+ dccg35_set_symclk_be_src_new(dccg, inst, SYMCLK_BE_REFCLK);
+
+ /* Check if any other SE connected LE and disable them */
+ for (i = 0; i < 4; i++) {
+ /* Make sure FE is not already in RCG */
+ if (dccg35_is_fe_rcg(dccg, i) == 0) {
+ if (dccg35_is_symclk_fe_src_functional_be(dccg, i, inst))
+ dccg35_disable_symclk_fe_new(dccg, i);
+ }
+ }
+ /* Safe to RCG SYMCLK*/
+ dccg35_set_symclk_be_rcg(dccg, inst, true);
+}
+
+static void dccg35_enable_symclk32_se_new(
+ struct dccg *dccg,
+ int inst,
+ enum symclk32_se_clk_source src)
+{
+ dccg35_set_symclk32_se_rcg(dccg, inst, false);
+ dccg35_set_symclk32_se_src_new(dccg, inst, src);
+}
+
+static void dccg35_disable_symclk32_se_new(
+ struct dccg *dccg,
+ int inst)
+{
+ dccg35_set_symclk32_se_src_new(dccg, SYMCLK32_SE_REFCLK, inst);
+ dccg35_set_symclk32_se_rcg(dccg, inst, true);
+}
+
+static void dccg35_enable_symclk32_le_new(
+ struct dccg *dccg,
+ int inst,
+ enum symclk32_le_clk_source src)
+{
+ dccg35_set_symclk32_le_rcg(dccg, inst, false);
+ dccg35_set_symclk32_le_src_new(dccg, inst, src);
+}
+
+static void dccg35_disable_symclk32_le_new(
+ struct dccg *dccg,
+ int inst)
+{
+ int i;
+
+ /* Switch from functional clock to refclock */
+ dccg35_set_symclk32_le_src_new(dccg, inst, SYMCLK32_LE_REFCLK);
+
+ /* Check if any SE are connected and disable SE as well */
+ for (i = 0; i < 4; i++) {
+ /* Make sure FE is not already in RCG */
+ if (dccg35_is_symclk32_se_rcg(dccg, i) == 0) {
+ /* Disable and SE connected to this LE before RCG */
+ if (dccg35_is_symclk32_se_src_functional_le_new(dccg, i, inst))
+ dccg35_disable_symclk32_se_new(dccg, i);
+ }
+ }
+ /* Safe to RCG SYM32_LE*/
+ dccg35_set_symclk32_le_rcg(dccg, inst, true);
+}
+
+static void dccg35_enable_physymclk_new(struct dccg *dccg,
+ int inst,
+ enum physymclk_source src)
+{
+ dccg35_set_physymclk_rcg(dccg, inst, false);
+ dccg35_set_physymclk_src_new(dccg, src, inst);
+}
+
+static void dccg35_disable_physymclk_new(struct dccg *dccg,
+ int inst)
+{
+ dccg35_set_physymclk_src_new(dccg, PHYSYMCLK_REFCLK, inst);
+ dccg35_set_physymclk_rcg(dccg, inst, true);
+}
+
+static void dccg35_enable_dpp_clk_new(
+ struct dccg *dccg,
+ int inst,
+ enum dppclk_clock_source src)
+{
+ struct dcn_dccg *dccg_dcn = TO_DCN_DCCG(dccg);
+ /* Sanitize inst before use in array de-ref */
+ if (inst < 0) {
+ BREAK_TO_DEBUGGER();
+ return;
+ }
+ dccg35_set_dppclk_rcg(dccg, inst, false);
+ dcn35_set_dppclk_src_new(dccg, inst, src);
+ /* Switch DPP clock to DTO */
+ REG_SET_2(DPPCLK_DTO_PARAM[inst], 0,
+ DPPCLK0_DTO_PHASE, 0xFF,
+ DPPCLK0_DTO_MODULO, 0xFF);
+}
+
+static void dccg35_disable_dpp_clk_new(
+ struct dccg *dccg,
+ int inst)
+{
+ struct dcn_dccg *dccg_dcn = TO_DCN_DCCG(dccg);
+ /* Sanitize inst before use in array de-ref */
+ if (inst < 0) {
+ BREAK_TO_DEBUGGER();
+ return;
+ }
+ dcn35_set_dppclk_src_new(dccg, inst, DPP_REFCLK);
+ REG_SET_2(DPPCLK_DTO_PARAM[inst], 0,
+ DPPCLK0_DTO_PHASE, 0,
+ DPPCLK0_DTO_MODULO, 1);
+ dccg35_set_dppclk_rcg(dccg, inst, true);
+}
+
+static void dccg35_disable_dscclk_new(struct dccg *dccg,
+ int inst)
+{
+ dccg35_set_dsc_clk_src_new(dccg, inst, DSC_CLK_REF_CLK);
+ dccg35_set_dsc_clk_rcg(dccg, inst, true);
+}
+
+static void dccg35_enable_dscclk_new(struct dccg *dccg,
+ int inst,
+ enum dsc_clk_source src)
+{
+ dccg35_set_dsc_clk_rcg(dccg, inst, false);
+ dccg35_set_dsc_clk_src_new(dccg, inst, src);
+}
+
+static void dccg35_enable_dtbclk_p_new(struct dccg *dccg,
+ enum dtbclk_source src,
+ int inst)
+{
+ dccg35_set_dtbclk_p_rcg(dccg, inst, false);
+ dccg35_set_dtbclk_p_src_new(dccg, src, inst);
+}
+
+static void dccg35_disable_dtbclk_p_new(struct dccg *dccg,
+ int inst)
+{
+ dccg35_set_dtbclk_p_src_new(dccg, DTBCLK_REFCLK, inst);
+ dccg35_set_dtbclk_p_rcg(dccg, inst, true);
+}
+
+static void dccg35_disable_dpstreamclk_new(struct dccg *dccg,
+ int inst)
+{
+ dccg35_set_dpstreamclk_src_new(dccg, DP_STREAM_REFCLK, inst);
+ dccg35_set_dpstreamclk_rcg(dccg, inst, true);
+}
+
+static void dccg35_enable_dpstreamclk_new(struct dccg *dccg,
+ enum dp_stream_clk_source src,
+ int inst)
+{
+ dccg35_set_dpstreamclk_rcg(dccg, inst, false);
+ dccg35_set_dpstreamclk_src_new(dccg, src, inst);
+}
+
static void dccg35_trigger_dio_fifo_resync(struct dccg *dccg)
{
struct dcn_dccg *dccg_dcn = TO_DCN_DCCG(dccg);
uint32_t dispclk_rdivider_value = 0;
REG_GET(DENTIST_DISPCLK_CNTL, DENTIST_DISPCLK_RDIVIDER, &dispclk_rdivider_value);
- REG_UPDATE(DENTIST_DISPCLK_CNTL, DENTIST_DISPCLK_WDIVIDER, dispclk_rdivider_value);
+ if (dispclk_rdivider_value != 0)
+ REG_UPDATE(DENTIST_DISPCLK_CNTL, DENTIST_DISPCLK_WDIVIDER, dispclk_rdivider_value);
}
static void dcn35_set_dppclk_enable(struct dccg *dccg,
@@ -657,6 +1714,12 @@ static void dccg35_disable_symclk32_se(
}
}
+static void dccg35_init_cb(struct dccg *dccg)
+{
+ (void)dccg;
+ /* Any RCG should be done when driver enter low power mode*/
+}
+
void dccg35_init(struct dccg *dccg)
{
int otg_inst;
@@ -685,10 +1748,6 @@ void dccg35_init(struct dccg *dccg)
dccg35_set_dpstreamclk_root_clock_gating(dccg, otg_inst, false);
}
- if (dccg->ctx->dc->debug.root_clock_optimization.bits.dpp)
- for (otg_inst = 0; otg_inst < 4; otg_inst++)
- dccg35_set_dppclk_root_clock_gating(dccg, otg_inst, 0);
-
/*
dccg35_enable_global_fgcg_rep(
dccg, dccg->ctx->dc->debug.enable_fine_grain_clock_gating.bits
@@ -869,47 +1928,32 @@ static void dccg35_enable_symclk_se(struct dccg *dccg, uint32_t stream_enc_inst,
}
/*get other front end connected to this backend*/
-static uint8_t dccg35_get_other_enabled_symclk_fe(struct dccg *dccg, uint32_t stream_enc_inst, uint32_t link_enc_inst)
+static uint8_t dccg35_get_number_enabled_symclk_fe_connected_to_be(struct dccg *dccg, uint32_t link_enc_inst)
{
uint8_t num_enabled_symclk_fe = 0;
- uint32_t be_clk_en = 0, fe_clk_en[5] = {0}, be_clk_sel[5] = {0};
+ uint32_t fe_clk_en[5] = {0}, be_clk_sel[5] = {0};
struct dcn_dccg *dccg_dcn = TO_DCN_DCCG(dccg);
- switch (link_enc_inst) {
- case 0:
- REG_GET_3(SYMCLKA_CLOCK_ENABLE, SYMCLKA_CLOCK_ENABLE, &be_clk_en,
- SYMCLKA_FE_EN, &fe_clk_en[0],
- SYMCLKA_FE_SRC_SEL, &be_clk_sel[0]);
- break;
- case 1:
- REG_GET_3(SYMCLKB_CLOCK_ENABLE, SYMCLKB_CLOCK_ENABLE, &be_clk_en,
- SYMCLKB_FE_EN, &fe_clk_en[1],
- SYMCLKB_FE_SRC_SEL, &be_clk_sel[1]);
- break;
- case 2:
- REG_GET_3(SYMCLKC_CLOCK_ENABLE, SYMCLKC_CLOCK_ENABLE, &be_clk_en,
- SYMCLKC_FE_EN, &fe_clk_en[2],
- SYMCLKC_FE_SRC_SEL, &be_clk_sel[2]);
- break;
- case 3:
- REG_GET_3(SYMCLKD_CLOCK_ENABLE, SYMCLKD_CLOCK_ENABLE, &be_clk_en,
- SYMCLKD_FE_EN, &fe_clk_en[3],
- SYMCLKD_FE_SRC_SEL, &be_clk_sel[3]);
- break;
- case 4:
- REG_GET_3(SYMCLKE_CLOCK_ENABLE, SYMCLKE_CLOCK_ENABLE, &be_clk_en,
- SYMCLKE_FE_EN, &fe_clk_en[4],
- SYMCLKE_FE_SRC_SEL, &be_clk_sel[4]);
- break;
- }
- if (be_clk_en) {
- /* for DPMST, this backend could be used by multiple front end.
- only disable the backend if this stream_enc_ins is the last active stream enc connected to this back_end*/
- uint8_t i;
- for (i = 0; i != link_enc_inst && i < ARRAY_SIZE(fe_clk_en); i++) {
- if (fe_clk_en[i] && be_clk_sel[i] == link_enc_inst)
- num_enabled_symclk_fe++;
- }
+ REG_GET_2(SYMCLKA_CLOCK_ENABLE, SYMCLKA_FE_EN, &fe_clk_en[0],
+ SYMCLKA_FE_SRC_SEL, &be_clk_sel[0]);
+
+ REG_GET_2(SYMCLKB_CLOCK_ENABLE, SYMCLKB_FE_EN, &fe_clk_en[1],
+ SYMCLKB_FE_SRC_SEL, &be_clk_sel[1]);
+
+ REG_GET_2(SYMCLKC_CLOCK_ENABLE, SYMCLKC_FE_EN, &fe_clk_en[2],
+ SYMCLKC_FE_SRC_SEL, &be_clk_sel[2]);
+
+ REG_GET_2(SYMCLKD_CLOCK_ENABLE, SYMCLKD_FE_EN, &fe_clk_en[3],
+ SYMCLKD_FE_SRC_SEL, &be_clk_sel[3]);
+
+ REG_GET_2(SYMCLKE_CLOCK_ENABLE, SYMCLKE_FE_EN, &fe_clk_en[4],
+ SYMCLKE_FE_SRC_SEL, &be_clk_sel[4]);
+
+ uint8_t i;
+
+ for (i = 0; i < ARRAY_SIZE(fe_clk_en); i++) {
+ if (fe_clk_en[i] && be_clk_sel[i] == link_enc_inst)
+ num_enabled_symclk_fe++;
}
return num_enabled_symclk_fe;
}
@@ -957,9 +2001,9 @@ static void dccg35_disable_symclk_se(struct dccg *dccg, uint32_t stream_enc_inst
break;
}
- /*check other enabled symclk fe */
- num_enabled_symclk_fe = dccg35_get_other_enabled_symclk_fe(dccg, stream_enc_inst, link_enc_inst);
- /*only turn off backend clk if other front end attachecd to this backend are all off,
+ /*check other enabled symclk fe connected to this be */
+ num_enabled_symclk_fe = dccg35_get_number_enabled_symclk_fe_connected_to_be(dccg, link_enc_inst);
+ /*only turn off backend clk if other front end attached to this backend are all off,
for mst, only turn off the backend if this is the last front end*/
if (num_enabled_symclk_fe == 0) {
switch (link_enc_inst) {
@@ -997,6 +2041,336 @@ static void dccg35_disable_symclk_se(struct dccg *dccg, uint32_t stream_enc_inst
}
}
+static void dccg35_set_dpstreamclk_cb(
+ struct dccg *dccg,
+ enum streamclk_source src,
+ int otg_inst,
+ int dp_hpo_inst)
+{
+
+ enum dtbclk_source dtb_clk_src;
+ enum dp_stream_clk_source dp_stream_clk_src;
+
+ switch (src) {
+ case REFCLK:
+ dtb_clk_src = DTBCLK_REFCLK;
+ dp_stream_clk_src = DP_STREAM_REFCLK;
+ break;
+ case DPREFCLK:
+ dtb_clk_src = DTBCLK_DPREFCLK;
+ dp_stream_clk_src = (enum dp_stream_clk_source)otg_inst;
+ break;
+ case DTBCLK0:
+ dtb_clk_src = DTBCLK_DTBCLK0;
+ dp_stream_clk_src = (enum dp_stream_clk_source)otg_inst;
+ break;
+ default:
+ BREAK_TO_DEBUGGER();
+ return;
+ }
+
+ if (dtb_clk_src == DTBCLK_REFCLK &&
+ dp_stream_clk_src == DP_STREAM_REFCLK) {
+ dccg35_disable_dtbclk_p_new(dccg, otg_inst);
+ dccg35_disable_dpstreamclk_new(dccg, dp_hpo_inst);
+ } else {
+ dccg35_enable_dtbclk_p_new(dccg, dtb_clk_src, otg_inst);
+ dccg35_enable_dpstreamclk_new(dccg,
+ dp_stream_clk_src,
+ dp_hpo_inst);
+ }
+}
+
+static void dccg35_set_dpstreamclk_root_clock_gating_cb(
+ struct dccg *dccg,
+ int dp_hpo_inst,
+ bool power_on)
+{
+ /* power_on set indicates we need to ungate
+ * Currently called from optimize_bandwidth and prepare_bandwidth calls
+ * Since clock source is not passed restore to refclock on ungate
+ * Instance 0 is implied here since only one streamclock resource
+ * Redundant as gating when enabled is acheived through set_dpstreamclk
+ */
+ if (power_on)
+ dccg35_enable_dpstreamclk_new(dccg,
+ DP_STREAM_REFCLK,
+ dp_hpo_inst);
+ else
+ dccg35_disable_dpstreamclk_new(dccg, dp_hpo_inst);
+}
+
+static void dccg35_update_dpp_dto_cb(struct dccg *dccg, int dpp_inst,
+ int req_dppclk)
+{
+ struct dcn_dccg *dccg_dcn = TO_DCN_DCCG(dccg);
+
+ if (dccg->dpp_clock_gated[dpp_inst]) {
+ /*
+ * Do not update the DPPCLK DTO if the clock is stopped.
+ */
+ return;
+ }
+
+ if (dccg->ref_dppclk && req_dppclk) {
+ int ref_dppclk = dccg->ref_dppclk;
+ int modulo, phase;
+
+ // phase / modulo = dpp pipe clk / dpp global clk
+ modulo = 0xff; // use FF at the end
+ phase = ((modulo * req_dppclk) + ref_dppclk - 1) / ref_dppclk;
+
+ if (phase > 0xff) {
+ ASSERT(false);
+ phase = 0xff;
+ }
+
+ /* Enable DPP CLK DTO output */
+ dccg35_enable_dpp_clk_new(dccg, dpp_inst, DPP_DCCG_DTO);
+
+ /* Program DTO */
+ REG_SET_2(DPPCLK_DTO_PARAM[dpp_inst], 0,
+ DPPCLK0_DTO_PHASE, phase,
+ DPPCLK0_DTO_MODULO, modulo);
+ } else
+ dccg35_disable_dpp_clk_new(dccg, dpp_inst);
+
+ dccg->pipe_dppclk_khz[dpp_inst] = req_dppclk;
+}
+
+static void dccg35_dpp_root_clock_control_cb(
+ struct dccg *dccg,
+ unsigned int dpp_inst,
+ bool power_on)
+{
+ if (dccg->dpp_clock_gated[dpp_inst] == power_on)
+ return;
+ /* power_on set indicates we need to ungate
+ * Currently called from optimize_bandwidth and prepare_bandwidth calls
+ * Since clock source is not passed restore to refclock on ungate
+ * Redundant as gating when enabled is acheived through update_dpp_dto
+ */
+ dccg35_set_dppclk_rcg(dccg, dpp_inst, !power_on);
+
+ dccg->dpp_clock_gated[dpp_inst] = !power_on;
+}
+
+static void dccg35_enable_symclk32_se_cb(
+ struct dccg *dccg,
+ int inst,
+ enum phyd32clk_clock_source phyd32clk)
+{
+ dccg35_enable_symclk32_se_new(dccg, inst, (enum symclk32_se_clk_source)phyd32clk);
+}
+
+static void dccg35_disable_symclk32_se_cb(struct dccg *dccg, int inst)
+{
+ dccg35_disable_symclk32_se_new(dccg, inst);
+}
+
+static void dccg35_enable_symclk32_le_cb(
+ struct dccg *dccg,
+ int inst,
+ enum phyd32clk_clock_source src)
+{
+ dccg35_enable_symclk32_le_new(dccg, inst, (enum symclk32_le_clk_source) src);
+}
+
+static void dccg35_disable_symclk32_le_cb(struct dccg *dccg, int inst)
+{
+ dccg35_disable_symclk32_le_new(dccg, inst);
+}
+
+static void dccg35_set_symclk32_le_root_clock_gating_cb(
+ struct dccg *dccg,
+ int inst,
+ bool power_on)
+{
+ /* power_on set indicates we need to ungate
+ * Currently called from optimize_bandwidth and prepare_bandwidth calls
+ * Since clock source is not passed restore to refclock on ungate
+ * Redundant as gating when enabled is acheived through disable_symclk32_le
+ */
+ if (power_on)
+ dccg35_enable_symclk32_le_new(dccg, inst, SYMCLK32_LE_REFCLK);
+ else
+ dccg35_disable_symclk32_le_new(dccg, inst);
+}
+
+static void dccg35_set_physymclk_cb(
+ struct dccg *dccg,
+ int inst,
+ enum physymclk_clock_source clk_src,
+ bool force_enable)
+{
+ /* force_enable = 0 indicates we can switch to ref clock */
+ if (force_enable)
+ dccg35_enable_physymclk_new(dccg, inst, (enum physymclk_source)clk_src);
+ else
+ dccg35_disable_physymclk_new(dccg, inst);
+}
+
+static void dccg35_set_physymclk_root_clock_gating_cb(
+ struct dccg *dccg,
+ int inst,
+ bool power_on)
+{
+ /* Redundant RCG already done in disable_physymclk
+ * power_on = 1 indicates we need to ungate
+ */
+ if (power_on)
+ dccg35_enable_physymclk_new(dccg, inst, PHYSYMCLK_REFCLK);
+ else
+ dccg35_disable_physymclk_new(dccg, inst);
+}
+
+static void dccg35_set_symclk32_le_root_clock_gating(
+ struct dccg *dccg,
+ int inst,
+ bool power_on)
+{
+ /* power_on set indicates we need to ungate
+ * Currently called from optimize_bandwidth and prepare_bandwidth calls
+ * Since clock source is not passed restore to refclock on ungate
+ * Redundant as gating when enabled is acheived through disable_symclk32_le
+ */
+ if (power_on)
+ dccg35_enable_symclk32_le_new(dccg, inst, SYMCLK32_LE_REFCLK);
+ else
+ dccg35_disable_symclk32_le_new(dccg, inst);
+}
+
+static void dccg35_set_dtbclk_p_src_cb(
+ struct dccg *dccg,
+ enum streamclk_source src,
+ uint32_t inst)
+{
+ if (src == DTBCLK0)
+ dccg35_enable_dtbclk_p_new(dccg, DTBCLK_DTBCLK0, inst);
+ else
+ dccg35_disable_dtbclk_p_new(dccg, inst);
+}
+
+static void dccg35_set_dtbclk_dto_cb(
+ struct dccg *dccg,
+ const struct dtbclk_dto_params *params)
+{
+ /* set_dtbclk_p_src typ called earlier to switch to DTBCLK
+ * if params->ref_dtbclk_khz and req_dtbclk_khz are 0 switch to ref-clock
+ */
+ struct dcn_dccg *dccg_dcn = TO_DCN_DCCG(dccg);
+ /* DTO Output Rate / Pixel Rate = 1/4 */
+ int req_dtbclk_khz = params->pixclk_khz / 4;
+
+ if (params->ref_dtbclk_khz && req_dtbclk_khz) {
+ uint32_t modulo, phase;
+
+ dccg35_enable_dtbclk_p_new(dccg, DTBCLK_DTBCLK0, params->otg_inst);
+
+ // phase / modulo = dtbclk / dtbclk ref
+ modulo = params->ref_dtbclk_khz * 1000;
+ phase = req_dtbclk_khz * 1000;
+
+ REG_WRITE(DTBCLK_DTO_MODULO[params->otg_inst], modulo);
+ REG_WRITE(DTBCLK_DTO_PHASE[params->otg_inst], phase);
+
+ REG_UPDATE(OTG_PIXEL_RATE_CNTL[params->otg_inst],
+ DTBCLK_DTO_ENABLE[params->otg_inst], 1);
+
+ REG_WAIT(OTG_PIXEL_RATE_CNTL[params->otg_inst],
+ DTBCLKDTO_ENABLE_STATUS[params->otg_inst], 1,
+ 1, 100);
+
+ /* program OTG_PIXEL_RATE_DIV for DIVK1 and DIVK2 fields */
+ dccg35_set_pixel_rate_div(dccg, params->otg_inst, PIXEL_RATE_DIV_BY_1, PIXEL_RATE_DIV_BY_1);
+
+ /* The recommended programming sequence to enable DTBCLK DTO to generate
+ * valid pixel HPO DPSTREAM ENCODER, specifies that DTO source select should
+ * be set only after DTO is enabled
+ */
+ REG_UPDATE(OTG_PIXEL_RATE_CNTL[params->otg_inst],
+ PIPE_DTO_SRC_SEL[params->otg_inst], 2);
+ } else {
+ dccg35_disable_dtbclk_p_new(dccg, params->otg_inst);
+
+ REG_UPDATE_2(OTG_PIXEL_RATE_CNTL[params->otg_inst],
+ DTBCLK_DTO_ENABLE[params->otg_inst], 0,
+ PIPE_DTO_SRC_SEL[params->otg_inst], params->is_hdmi ? 0 : 1);
+
+ REG_WRITE(DTBCLK_DTO_MODULO[params->otg_inst], 0);
+ REG_WRITE(DTBCLK_DTO_PHASE[params->otg_inst], 0);
+ }
+}
+
+static void dccg35_disable_dscclk_cb(struct dccg *dccg,
+ int inst)
+{
+ dccg35_disable_dscclk_new(dccg, inst);
+}
+
+static void dccg35_enable_dscclk_cb(struct dccg *dccg, int inst)
+{
+ dccg35_enable_dscclk_new(dccg, inst, DSC_DTO_TUNED_CK_GPU_DISCLK_3);
+}
+
+static void dccg35_enable_symclk_se_cb(struct dccg *dccg, uint32_t stream_enc_inst, uint32_t link_enc_inst)
+{
+ /* Switch to functional clock if already not selected */
+ dccg35_enable_symclk_be_new(dccg, SYMCLK_BE_PHYCLK, link_enc_inst);
+
+ dccg35_enable_symclk_fe_new(dccg, stream_enc_inst, (enum symclk_fe_source) link_enc_inst);
+
+}
+
+static void dccg35_disable_symclk_se_cb(
+ struct dccg *dccg,
+ uint32_t stream_enc_inst,
+ uint32_t link_enc_inst)
+{
+ dccg35_disable_symclk_fe_new(dccg, stream_enc_inst);
+
+ /* DMU PHY sequence switches SYMCLK_BE (link_enc_inst) to ref clock once PHY is turned off */
+}
+
+void dccg35_root_gate_disable_control(struct dccg *dccg, uint32_t pipe_idx, uint32_t disable_clock_gating)
+{
+
+ if (dccg->ctx->dc->debug.root_clock_optimization.bits.dpp) {
+ dccg35_set_dppclk_root_clock_gating(dccg, pipe_idx, disable_clock_gating);
+ }
+}
+
+static const struct dccg_funcs dccg35_funcs_new = {
+ .update_dpp_dto = dccg35_update_dpp_dto_cb,
+ .dpp_root_clock_control = dccg35_dpp_root_clock_control_cb,
+ .get_dccg_ref_freq = dccg31_get_dccg_ref_freq,
+ .dccg_init = dccg35_init_cb,
+ .set_dpstreamclk = dccg35_set_dpstreamclk_cb,
+ .set_dpstreamclk_root_clock_gating = dccg35_set_dpstreamclk_root_clock_gating_cb,
+ .enable_symclk32_se = dccg35_enable_symclk32_se_cb,
+ .disable_symclk32_se = dccg35_disable_symclk32_se_cb,
+ .enable_symclk32_le = dccg35_enable_symclk32_le_cb,
+ .disable_symclk32_le = dccg35_disable_symclk32_le_cb,
+ .set_symclk32_le_root_clock_gating = dccg35_set_symclk32_le_root_clock_gating_cb,
+ .set_physymclk = dccg35_set_physymclk_cb,
+ .set_physymclk_root_clock_gating = dccg35_set_physymclk_root_clock_gating_cb,
+ .set_dtbclk_dto = dccg35_set_dtbclk_dto_cb,
+ .set_audio_dtbclk_dto = dccg31_set_audio_dtbclk_dto,
+ .set_fifo_errdet_ovr_en = dccg2_set_fifo_errdet_ovr_en,
+ .otg_add_pixel = dccg31_otg_add_pixel,
+ .otg_drop_pixel = dccg31_otg_drop_pixel,
+ .set_dispclk_change_mode = dccg31_set_dispclk_change_mode,
+ .disable_dsc = dccg35_disable_dscclk_cb,
+ .enable_dsc = dccg35_enable_dscclk_cb,
+ .set_pixel_rate_div = dccg35_set_pixel_rate_div,
+ .get_pixel_rate_div = dccg35_get_pixel_rate_div,
+ .trigger_dio_fifo_resync = dccg35_trigger_dio_fifo_resync,
+ .set_valid_pixel_rate = dccg35_set_valid_pixel_rate,
+ .enable_symclk_se = dccg35_enable_symclk_se_cb,
+ .disable_symclk_se = dccg35_disable_symclk_se_cb,
+ .set_dtbclk_p_src = dccg35_set_dtbclk_p_src_cb,
+};
+
static const struct dccg_funcs dccg35_funcs = {
.update_dpp_dto = dccg35_update_dpp_dto,
.dpp_root_clock_control = dccg35_dpp_root_clock_control,
@@ -1026,6 +2400,7 @@ static const struct dccg_funcs dccg35_funcs = {
.enable_symclk_se = dccg35_enable_symclk_se,
.disable_symclk_se = dccg35_disable_symclk_se,
.set_dtbclk_p_src = dccg35_set_dtbclk_p_src,
+ .dccg_root_gate_disable_control = dccg35_root_gate_disable_control,
};
struct dccg *dccg35_create(
@@ -1041,6 +2416,10 @@ struct dccg *dccg35_create(
BREAK_TO_DEBUGGER();
return NULL;
}
+ (void)&dccg35_disable_symclk_be_new;
+ (void)&dccg35_set_symclk32_le_root_clock_gating;
+ (void)&dccg35_set_smclk32_se_rcg;
+ (void)&dccg35_funcs_new;
base = &dccg_dcn->base;
base->ctx = ctx;
diff --git a/drivers/gpu/drm/amd/display/dc/dccg/dcn35/dcn35_dccg.h b/drivers/gpu/drm/amd/display/dc/dccg/dcn35/dcn35_dccg.h
index 1586a45ca3bd..51f98c5c51c4 100644
--- a/drivers/gpu/drm/amd/display/dc/dccg/dcn35/dcn35_dccg.h
+++ b/drivers/gpu/drm/amd/display/dc/dccg/dcn35/dcn35_dccg.h
@@ -241,6 +241,7 @@ struct dccg *dccg35_create(
void dccg35_init(struct dccg *dccg);
void dccg35_enable_global_fgcg_rep(struct dccg *dccg, bool value);
+void dccg35_root_gate_disable_control(struct dccg *dccg, uint32_t pipe_idx, uint32_t disable_clock_gating);
#endif //__DCN35_DCCG_H__
diff --git a/drivers/gpu/drm/amd/display/dc/dccg/dcn401/dcn401_dccg.c b/drivers/gpu/drm/amd/display/dc/dccg/dcn401/dcn401_dccg.c
index 07f1f396ba52..0b889004509a 100644
--- a/drivers/gpu/drm/amd/display/dc/dccg/dcn401/dcn401_dccg.c
+++ b/drivers/gpu/drm/amd/display/dc/dccg/dcn401/dcn401_dccg.c
@@ -730,35 +730,35 @@ void dccg401_init(struct dccg *dccg)
}
}
-static void dccg401_set_dto_dscclk(struct dccg *dccg, uint32_t inst, bool enable)
+static void dccg401_set_dto_dscclk(struct dccg *dccg, uint32_t inst)
{
struct dcn_dccg *dccg_dcn = TO_DCN_DCCG(dccg);
- uint32_t phase = enable ? 1 : 0;
switch (inst) {
case 0:
- REG_UPDATE_2(DSCCLK_DTO_CTRL, DSCCLK0_EN, 1, DSCCLK0_DTO_DB_EN, 1);
REG_UPDATE_2(DSCCLK0_DTO_PARAM,
- DSCCLK0_DTO_PHASE, phase,
+ DSCCLK0_DTO_PHASE, 1,
DSCCLK0_DTO_MODULO, 1);
+ REG_UPDATE(DSCCLK_DTO_CTRL, DSCCLK0_EN, 1);
+
break;
case 1:
- REG_UPDATE_2(DSCCLK_DTO_CTRL, DSCCLK1_EN, 1, DSCCLK1_DTO_DB_EN, 1);
REG_UPDATE_2(DSCCLK1_DTO_PARAM,
- DSCCLK1_DTO_PHASE, phase,
+ DSCCLK1_DTO_PHASE, 1,
DSCCLK1_DTO_MODULO, 1);
+ REG_UPDATE(DSCCLK_DTO_CTRL, DSCCLK1_EN, 1);
break;
case 2:
- REG_UPDATE_2(DSCCLK_DTO_CTRL, DSCCLK2_EN, 1, DSCCLK2_DTO_DB_EN, 1);
REG_UPDATE_2(DSCCLK2_DTO_PARAM,
- DSCCLK2_DTO_PHASE, phase,
+ DSCCLK2_DTO_PHASE, 1,
DSCCLK2_DTO_MODULO, 1);
+ REG_UPDATE(DSCCLK_DTO_CTRL, DSCCLK2_EN, 1);
break;
case 3:
- REG_UPDATE_2(DSCCLK_DTO_CTRL, DSCCLK3_EN, 1, DSCCLK3_DTO_DB_EN, 1);
REG_UPDATE_2(DSCCLK3_DTO_PARAM,
- DSCCLK3_DTO_PHASE, phase,
+ DSCCLK3_DTO_PHASE, 1,
DSCCLK3_DTO_MODULO, 1);
+ REG_UPDATE(DSCCLK_DTO_CTRL, DSCCLK3_EN, 1);
break;
default:
BREAK_TO_DEBUGGER();
@@ -774,15 +774,27 @@ static void dccg401_set_ref_dscclk(struct dccg *dccg,
switch (dsc_inst) {
case 0:
REG_UPDATE(DSCCLK_DTO_CTRL, DSCCLK0_EN, 0);
+ REG_UPDATE_2(DSCCLK0_DTO_PARAM,
+ DSCCLK0_DTO_PHASE, 0,
+ DSCCLK0_DTO_MODULO, 0);
break;
case 1:
REG_UPDATE(DSCCLK_DTO_CTRL, DSCCLK1_EN, 0);
+ REG_UPDATE_2(DSCCLK1_DTO_PARAM,
+ DSCCLK1_DTO_PHASE, 0,
+ DSCCLK1_DTO_MODULO, 0);
break;
case 2:
REG_UPDATE(DSCCLK_DTO_CTRL, DSCCLK2_EN, 0);
+ REG_UPDATE_2(DSCCLK2_DTO_PARAM,
+ DSCCLK2_DTO_PHASE, 0,
+ DSCCLK2_DTO_MODULO, 0);
break;
case 3:
REG_UPDATE(DSCCLK_DTO_CTRL, DSCCLK3_EN, 0);
+ REG_UPDATE_2(DSCCLK3_DTO_PARAM,
+ DSCCLK3_DTO_PHASE, 0,
+ DSCCLK3_DTO_MODULO, 0);
break;
default:
return;
diff --git a/drivers/gpu/drm/amd/display/dc/dccg/dcn401/dcn401_dccg.h b/drivers/gpu/drm/amd/display/dc/dccg/dcn401/dcn401_dccg.h
index 8bcddc836347..a196ce9e8127 100644
--- a/drivers/gpu/drm/amd/display/dc/dccg/dcn401/dcn401_dccg.h
+++ b/drivers/gpu/drm/amd/display/dc/dccg/dcn401/dcn401_dccg.h
@@ -117,10 +117,6 @@
DCCG_SF(DSCCLK_DTO_CTRL, DSCCLK1_EN, mask_sh),\
DCCG_SF(DSCCLK_DTO_CTRL, DSCCLK2_EN, mask_sh),\
DCCG_SF(DSCCLK_DTO_CTRL, DSCCLK3_EN, mask_sh),\
- DCCG_SF(DSCCLK_DTO_CTRL, DSCCLK0_DTO_DB_EN, mask_sh),\
- DCCG_SF(DSCCLK_DTO_CTRL, DSCCLK1_DTO_DB_EN, mask_sh),\
- DCCG_SF(DSCCLK_DTO_CTRL, DSCCLK2_DTO_DB_EN, mask_sh),\
- DCCG_SF(DSCCLK_DTO_CTRL, DSCCLK3_DTO_DB_EN, mask_sh),\
DCCG_SF(DSCCLK0_DTO_PARAM, DSCCLK0_DTO_PHASE, mask_sh),\
DCCG_SF(DSCCLK0_DTO_PARAM, DSCCLK0_DTO_MODULO, mask_sh),\
DCCG_SF(DSCCLK1_DTO_PARAM, DSCCLK1_DTO_PHASE, mask_sh),\
diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_audio.c b/drivers/gpu/drm/amd/display/dc/dce/dce_audio.c
index cf5f84fb9c69..eeed840073fe 100644
--- a/drivers/gpu/drm/amd/display/dc/dce/dce_audio.c
+++ b/drivers/gpu/drm/amd/display/dc/dce/dce_audio.c
@@ -630,6 +630,11 @@ void dce_aud_az_enable(struct audio *audio)
audio->inst, value);
}
+void dce_aud_az_disable_hbr_audio(struct audio *audio)
+{
+ set_high_bit_rate_capable(audio, false);
+}
+
void dce_aud_az_disable(struct audio *audio)
{
uint32_t value;
@@ -1293,6 +1298,7 @@ static const struct audio_funcs funcs = {
.az_enable = dce_aud_az_enable,
.az_disable = dce_aud_az_disable,
.az_configure = dce_aud_az_configure,
+ .az_disable_hbr_audio = dce_aud_az_disable_hbr_audio,
.destroy = dce_aud_destroy,
};
diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_audio.h b/drivers/gpu/drm/amd/display/dc/dce/dce_audio.h
index 539f881928d1..1b7b8b079af4 100644
--- a/drivers/gpu/drm/amd/display/dc/dce/dce_audio.h
+++ b/drivers/gpu/drm/amd/display/dc/dce/dce_audio.h
@@ -166,6 +166,7 @@ void dce_aud_hw_init(struct audio *audio);
void dce_aud_az_enable(struct audio *audio);
void dce_aud_az_disable(struct audio *audio);
+void dce_aud_az_disable_hbr_audio(struct audio *audio);
void dce_aud_az_configure(struct audio *audio,
enum signal_type signal,
diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_aux.c b/drivers/gpu/drm/amd/display/dc/dce/dce_aux.c
index b8996d285f00..bb4ac5042c80 100644
--- a/drivers/gpu/drm/amd/display/dc/dce/dce_aux.c
+++ b/drivers/gpu/drm/amd/display/dc/dce/dce_aux.c
@@ -735,7 +735,15 @@ bool dce_aux_transfer_with_retries(struct ddc_service *ddc,
(unsigned int) payload->mot);
if (payload->write)
dce_aux_log_payload(" write", payload->data, payload->length, 16);
- ret = dce_aux_transfer_raw(ddc, payload, &operation_result);
+
+ /* Check whether aux to be processed via dmub or dcn directly */
+ if (ddc->ctx->dc->debug.enable_dmub_aux_for_legacy_ddc
+ || ddc->ddc_pin == NULL) {
+ ret = dce_aux_transfer_dmub_raw(ddc, payload, &operation_result);
+ } else {
+ ret = dce_aux_transfer_raw(ddc, payload, &operation_result);
+ }
+
DC_TRACE_LEVEL_MESSAGE(DAL_TRACE_LEVEL_INFORMATION,
LOG_FLAG_I2cAux_DceAux,
"dce_aux_transfer_with_retries: link_index=%u: END: retry %d of %d: address=0x%04x length=%u write=%d mot=%d: ret=%d operation_result=%d payload->reply=%u",
diff --git a/drivers/gpu/drm/amd/display/dc/dce/dmub_psr.c b/drivers/gpu/drm/amd/display/dc/dce/dmub_psr.c
index ccf153b7a467..cae18f8c1c9a 100644
--- a/drivers/gpu/drm/amd/display/dc/dce/dmub_psr.c
+++ b/drivers/gpu/drm/amd/display/dc/dce/dmub_psr.c
@@ -94,6 +94,8 @@ static enum dc_psr_state convert_psr_state(uint32_t raw_state)
state = PSR_STATE_HWLOCK_MGR;
else if (raw_state == 0x61)
state = PSR_STATE_POLLVUPDATE;
+ else if (raw_state == 0x62)
+ state = PSR_STATE_RELEASE_HWLOCK_MGR_FULL_FRAME;
else
state = PSR_STATE_INVALID;
@@ -363,6 +365,7 @@ static bool dmub_psr_copy_settings(struct dmub_psr *dmub,
copy_settings_data->debug.bitfields.visual_confirm = dc->dc->debug.visual_confirm == VISUAL_CONFIRM_PSR;
copy_settings_data->debug.bitfields.use_hw_lock_mgr = 1;
copy_settings_data->debug.bitfields.force_full_frame_update = 0;
+ copy_settings_data->debug.bitfields.enable_ips_visual_confirm = dc->dc->debug.enable_ips_visual_confirm;
if (psr_context->su_granularity_required == 0)
copy_settings_data->su_y_granularity = 0;
diff --git a/drivers/gpu/drm/amd/display/dc/dce/dmub_replay.c b/drivers/gpu/drm/amd/display/dc/dce/dmub_replay.c
index 4d960dc5ce89..c31e4f26a305 100644
--- a/drivers/gpu/drm/amd/display/dc/dce/dmub_replay.c
+++ b/drivers/gpu/drm/amd/display/dc/dce/dmub_replay.c
@@ -12,6 +12,8 @@
#define MAX_PIPES 6
+#define GPINT_RETRY_NUM 20
+
static const uint8_t DP_SINK_DEVICE_STR_ID_1[] = {7, 1, 8, 7, 3};
static const uint8_t DP_SINK_DEVICE_STR_ID_2[] = {7, 1, 8, 7, 5};
@@ -167,6 +169,8 @@ static bool dmub_replay_copy_settings(struct dmub_replay *dmub,
copy_settings_data->smu_optimizations_en = link->replay_settings.replay_smu_opt_enable;
copy_settings_data->replay_timing_sync_supported = link->replay_settings.config.replay_timing_sync_supported;
+ copy_settings_data->debug.bitfields.enable_ips_visual_confirm = dc->dc->debug.enable_ips_visual_confirm;
+
copy_settings_data->flags.u32All = 0;
copy_settings_data->flags.bitfields.fec_enable_status = (link->fec_state == dc_link_fec_enabled);
copy_settings_data->flags.bitfields.dsc_enable_status = (pipe_ctx->stream->timing.flags.DSC == 1);
@@ -220,6 +224,7 @@ static void dmub_replay_residency(struct dmub_replay *dmub, uint8_t panel_inst,
uint32_t *residency, const bool is_start, enum pr_residency_mode mode)
{
uint16_t param = (uint16_t)(panel_inst << 8);
+ uint32_t i = 0;
switch (mode) {
case PR_RESIDENCY_MODE_PHY:
@@ -247,10 +252,17 @@ static void dmub_replay_residency(struct dmub_replay *dmub, uint8_t panel_inst,
if (is_start)
param |= REPLAY_RESIDENCY_ENABLE;
- // Send gpint command and wait for ack
- if (!dc_wake_and_execute_gpint(dmub->ctx, DMUB_GPINT__REPLAY_RESIDENCY, param,
- residency, DM_DMUB_WAIT_TYPE_WAIT_WITH_REPLY))
- *residency = 0;
+ for (i = 0; i < GPINT_RETRY_NUM; i++) {
+ // Send gpint command and wait for ack
+ if (dc_wake_and_execute_gpint(dmub->ctx, DMUB_GPINT__REPLAY_RESIDENCY, param,
+ residency, DM_DMUB_WAIT_TYPE_WAIT_WITH_REPLY))
+ return;
+
+ udelay(100);
+ }
+
+ // it means gpint retry many times
+ *residency = 0;
}
/*
diff --git a/drivers/gpu/drm/amd/display/dc/dce110/dce110_timing_generator.c b/drivers/gpu/drm/amd/display/dc/dce110/dce110_timing_generator.c
index 49bcfe6ec999..fa422a8cbced 100644
--- a/drivers/gpu/drm/amd/display/dc/dce110/dce110_timing_generator.c
+++ b/drivers/gpu/drm/amd/display/dc/dce110/dce110_timing_generator.c
@@ -1955,6 +1955,7 @@ void dce110_tg_program_timing(struct timing_generator *tg,
int vstartup_start,
int vupdate_offset,
int vupdate_width,
+ int pstate_keepout,
const enum signal_type signal,
bool use_vbios)
{
diff --git a/drivers/gpu/drm/amd/display/dc/dce110/dce110_timing_generator.h b/drivers/gpu/drm/amd/display/dc/dce110/dce110_timing_generator.h
index 28c58f1dff2d..ee4de740aceb 100644
--- a/drivers/gpu/drm/amd/display/dc/dce110/dce110_timing_generator.h
+++ b/drivers/gpu/drm/amd/display/dc/dce110/dce110_timing_generator.h
@@ -261,6 +261,7 @@ void dce110_tg_program_timing(struct timing_generator *tg,
int vstartup_start,
int vupdate_offset,
int vupdate_width,
+ int pstate_keepout,
const enum signal_type signal,
bool use_vbios);
diff --git a/drivers/gpu/drm/amd/display/dc/dce110/dce110_timing_generator_v.c b/drivers/gpu/drm/amd/display/dc/dce110/dce110_timing_generator_v.c
index bf35dc65ca29..9837dec837ff 100644
--- a/drivers/gpu/drm/amd/display/dc/dce110/dce110_timing_generator_v.c
+++ b/drivers/gpu/drm/amd/display/dc/dce110/dce110_timing_generator_v.c
@@ -438,6 +438,7 @@ static void dce110_timing_generator_v_program_timing(struct timing_generator *tg
int vstartup_start,
int vupdate_offset,
int vupdate_width,
+ int pstate_keepout,
const enum signal_type signal,
bool use_vbios)
{
diff --git a/drivers/gpu/drm/amd/display/dc/dce120/dce120_timing_generator.c b/drivers/gpu/drm/amd/display/dc/dce120/dce120_timing_generator.c
index eb3557965781..fcf59348eb62 100644
--- a/drivers/gpu/drm/amd/display/dc/dce120/dce120_timing_generator.c
+++ b/drivers/gpu/drm/amd/display/dc/dce120/dce120_timing_generator.c
@@ -697,6 +697,7 @@ static void dce120_tg_program_timing(struct timing_generator *tg,
int vstartup_start,
int vupdate_offset,
int vupdate_width,
+ int pstate_keepout,
const enum signal_type signal,
bool use_vbios)
{
diff --git a/drivers/gpu/drm/amd/display/dc/dce60/dce60_timing_generator.c b/drivers/gpu/drm/amd/display/dc/dce60/dce60_timing_generator.c
index c1a85ee374d9..e5fb0e8333e4 100644
--- a/drivers/gpu/drm/amd/display/dc/dce60/dce60_timing_generator.c
+++ b/drivers/gpu/drm/amd/display/dc/dce60/dce60_timing_generator.c
@@ -111,13 +111,14 @@ static void program_timing(struct timing_generator *tg,
int vstartup_start,
int vupdate_offset,
int vupdate_width,
+ int pstate_keepout,
const enum signal_type signal,
bool use_vbios)
{
if (!use_vbios)
program_pix_dur(tg, timing->pix_clk_100hz);
- dce110_tg_program_timing(tg, timing, 0, 0, 0, 0, 0, use_vbios);
+ dce110_tg_program_timing(tg, timing, 0, 0, 0, 0, 0, 0, use_vbios);
}
static void dce60_timing_generator_enable_advanced_request(
diff --git a/drivers/gpu/drm/amd/display/dc/dce80/dce80_timing_generator.c b/drivers/gpu/drm/amd/display/dc/dce80/dce80_timing_generator.c
index 2df4654858be..003a9330c286 100644
--- a/drivers/gpu/drm/amd/display/dc/dce80/dce80_timing_generator.c
+++ b/drivers/gpu/drm/amd/display/dc/dce80/dce80_timing_generator.c
@@ -111,13 +111,14 @@ static void program_timing(struct timing_generator *tg,
int vstartup_start,
int vupdate_offset,
int vupdate_width,
+ int pstate_keepout,
const enum signal_type signal,
bool use_vbios)
{
if (!use_vbios)
program_pix_dur(tg, timing->pix_clk_100hz);
- dce110_tg_program_timing(tg, timing, 0, 0, 0, 0, 0, use_vbios);
+ dce110_tg_program_timing(tg, timing, 0, 0, 0, 0, 0, 0, use_vbios);
}
static void dce80_timing_generator_enable_advanced_request(
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/Makefile b/drivers/gpu/drm/amd/display/dc/dcn10/Makefile
index 9923d0d620d4..e1f6623d4936 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn10/Makefile
+++ b/drivers/gpu/drm/amd/display/dc/dcn10/Makefile
@@ -24,8 +24,6 @@
DCN10 = dcn10_ipp.o \
dcn10_hw_sequencer_debug.o \
- dcn10_opp.o \
- dcn10_mpc.o \
dcn10_cm_common.o \
AMD_DAL_DCN10 = $(addprefix $(AMDDALPATH)/dc/dcn10/,$(DCN10))
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_cm_common.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_cm_common.c
index 0b49362f71b0..eaed5d1c398a 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_cm_common.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_cm_common.c
@@ -591,6 +591,8 @@ bool cm_helper_translate_curve_to_degamma_hw_format(
i += increment) {
if (j == hw_points - 1)
break;
+ if (i >= TRANSFER_FUNC_POINTS)
+ return false;
rgb_resulted[j].red = output_tf->tf_pts.red[i];
rgb_resulted[j].green = output_tf->tf_pts.green[i];
rgb_resulted[j].blue = output_tf->tf_pts.blue[i];
diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/Makefile b/drivers/gpu/drm/amd/display/dc/dcn20/Makefile
index b3aeabc4d605..25ba0d310d46 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn20/Makefile
+++ b/drivers/gpu/drm/amd/display/dc/dcn20/Makefile
@@ -1,8 +1,7 @@
# SPDX-License-Identifier: MIT
# Copyright © 2019-2024 Advanced Micro Devices, Inc. All rights reserved.
-DCN20 = dcn20_mpc.o dcn20_opp.o dcn20_mmhubbub.o \
- dcn20_vmid.o dcn20_dwb.o dcn20_dwb_scl.o
+DCN20 = dcn20_vmid.o dcn20_dwb.o dcn20_dwb_scl.o
AMD_DAL_DCN20 = $(addprefix $(AMDDALPATH)/dc/dcn20/,$(DCN20))
diff --git a/drivers/gpu/drm/amd/display/dc/dcn30/Makefile b/drivers/gpu/drm/amd/display/dc/dcn30/Makefile
index 4c43af867d86..b17277de0340 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn30/Makefile
+++ b/drivers/gpu/drm/amd/display/dc/dcn30/Makefile
@@ -23,15 +23,11 @@
#
#
-DCN30 := dcn30_mpc.o dcn30_vpg.o \
+DCN30 := dcn30_vpg.o \
dcn30_afmt.o \
- dcn30_dwb.o \
- dcn30_dwb_cm.o \
dcn30_cm_common.o \
dcn30_mmhubbub.o \
-
-
AMD_DAL_DCN30 = $(addprefix $(AMDDALPATH)/dc/dcn30/,$(DCN30))
AMD_DISPLAY_FILES += $(AMD_DAL_DCN30)
diff --git a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_cm_common.c b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_cm_common.c
index b8327237ed44..f31f0e3abfc0 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_cm_common.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_cm_common.c
@@ -28,7 +28,7 @@
#include "reg_helper.h"
#include "dcn30/dcn30_dpp.h"
#include "basics/conversion.h"
-#include "dcn30_cm_common.h"
+#include "dcn30/dcn30_cm_common.h"
#include "custom_float.h"
#define REG(reg) reg
@@ -177,6 +177,8 @@ bool cm3_helper_translate_curve_to_hw_format(
i += increment) {
if (j == hw_points)
break;
+ if (i >= TRANSFER_FUNC_POINTS)
+ return false;
rgb_resulted[j].red = output_tf->tf_pts.red[i];
rgb_resulted[j].green = output_tf->tf_pts.green[i];
rgb_resulted[j].blue = output_tf->tf_pts.blue[i];
@@ -335,6 +337,8 @@ bool cm3_helper_translate_curve_to_degamma_hw_format(
i += increment) {
if (j == hw_points - 1)
break;
+ if (i >= TRANSFER_FUNC_POINTS)
+ return false;
rgb_resulted[j].red = output_tf->tf_pts.red[i];
rgb_resulted[j].green = output_tf->tf_pts.green[i];
rgb_resulted[j].blue = output_tf->tf_pts.blue[i];
diff --git a/drivers/gpu/drm/amd/display/dc/dcn301/Makefile b/drivers/gpu/drm/amd/display/dc/dcn301/Makefile
index dc37dbf870df..fb4814ab3f05 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn301/Makefile
+++ b/drivers/gpu/drm/amd/display/dc/dcn301/Makefile
@@ -3,7 +3,7 @@
#
# Makefile for dcn30.
-DCN301 = dcn301_dio_link_encoder.o dcn301_panel_cntl.o
+DCN301 = dcn301_panel_cntl.o
AMD_DAL_DCN301 = $(addprefix $(AMDDALPATH)/dc/dcn301/,$(DCN301))
diff --git a/drivers/gpu/drm/amd/display/dc/dcn303/Makefile b/drivers/gpu/drm/amd/display/dc/dcn303/Makefile
deleted file mode 100644
index a954e316aca2..000000000000
--- a/drivers/gpu/drm/amd/display/dc/dcn303/Makefile
+++ /dev/null
@@ -1,13 +0,0 @@
-# SPDX-License-Identifier: MIT
-#
-# Copyright (C) 2021 Advanced Micro Devices, Inc. All the rights reserved
-#
-# Authors: AMD
-#
-# Makefile for dcn303.
-
-DCN3_03 = dcn303_init.o
-
-AMD_DAL_DCN3_03 = $(addprefix $(AMDDALPATH)/dc/dcn303/,$(DCN3_03))
-
-AMD_DISPLAY_FILES += $(AMD_DAL_DCN3_03)
diff --git a/drivers/gpu/drm/amd/display/dc/dcn31/Makefile b/drivers/gpu/drm/amd/display/dc/dcn31/Makefile
index e2601d0aba41..d510e4652c18 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn31/Makefile
+++ b/drivers/gpu/drm/amd/display/dc/dcn31/Makefile
@@ -5,7 +5,7 @@
# Makefile for dcn31.
DCN31 = dcn31_panel_cntl.o \
- dcn31_apg.o dcn31_hpo_dp_stream_encoder.o dcn31_hpo_dp_link_encoder.o \
+ dcn31_apg.o \
dcn31_afmt.o dcn31_vpg.o
AMD_DAL_DCN31 = $(addprefix $(AMDDALPATH)/dc/dcn31/,$(DCN31))
diff --git a/drivers/gpu/drm/amd/display/dc/dcn314/Makefile b/drivers/gpu/drm/amd/display/dc/dcn314/Makefile
deleted file mode 100644
index 15fdcf7c6466..000000000000
--- a/drivers/gpu/drm/amd/display/dc/dcn314/Makefile
+++ /dev/null
@@ -1,10 +0,0 @@
-# SPDX-License-Identifier: MIT
-# Copyright © 2024 Advanced Micro Devices, Inc. All rights reserved.
-#
-# Makefile for dcn314.
-
-DCN314 = dcn314_dio_stream_encoder.o
-
-AMD_DAL_DCN314 = $(addprefix $(AMDDALPATH)/dc/dcn314/,$(DCN314))
-
-AMD_DISPLAY_FILES += $(AMD_DAL_DCN314)
diff --git a/drivers/gpu/drm/amd/display/dc/dcn401/Makefile b/drivers/gpu/drm/amd/display/dc/dcn401/Makefile
deleted file mode 100644
index ded1f3140beb..000000000000
--- a/drivers/gpu/drm/amd/display/dc/dcn401/Makefile
+++ /dev/null
@@ -1,10 +0,0 @@
-# SPDX-License-Identifier: MIT
-# Copyright © 2024 Advanced Micro Devices, Inc. All rights reserved.
-
-DCN401 += dcn401_dio_link_encoder.o
-DCN401 += dcn401_dio_stream_encoder.o
-DCN401 += dcn401_mpc.o
-
-AMD_DAL_DCN401 = $(addprefix $(AMDDALPATH)/dc/dcn401/,$(DCN401))
-
-AMD_DISPLAY_FILES += $(AMD_DAL_DCN401)
diff --git a/drivers/gpu/drm/amd/display/dc/dio/Makefile b/drivers/gpu/drm/amd/display/dc/dio/Makefile
index 67840e474d7a..0dfd480976f7 100644
--- a/drivers/gpu/drm/amd/display/dc/dio/Makefile
+++ b/drivers/gpu/drm/amd/display/dc/dio/Makefile
@@ -52,6 +52,15 @@ AMD_DAL_DIO_DCN30 = $(addprefix $(AMDDALPATH)/dc/dio/dcn30/,$(DIO_DCN30))
AMD_DISPLAY_FILES += $(AMD_DAL_DIO_DCN30)
###############################################################################
+# DCN301
+###############################################################################
+DIO_DCN301 = dcn301_dio_link_encoder.o
+
+AMD_DAL_DIO_DCN301 = $(addprefix $(AMDDALPATH)/dc/dio/dcn301/,$(DIO_DCN301))
+
+AMD_DISPLAY_FILES += $(AMD_DAL_DIO_DCN301)
+
+###############################################################################
# DCN31
###############################################################################
DIO_DCN31 = dcn31_dio_link_encoder.o
@@ -61,6 +70,15 @@ AMD_DAL_DIO_DCN31 = $(addprefix $(AMDDALPATH)/dc/dio/dcn31/,$(DIO_DCN31))
AMD_DISPLAY_FILES += $(AMD_DAL_DIO_DCN31)
###############################################################################
+# DCN314
+###############################################################################
+DIO_DCN314 = dcn314_dio_stream_encoder.o
+
+AMD_DAL_DIO_DCN314 = $(addprefix $(AMDDALPATH)/dc/dio/dcn314/,$(DIO_DCN314))
+
+AMD_DISPLAY_FILES += $(AMD_DAL_DIO_DCN314)
+
+###############################################################################
# DCN32
###############################################################################
DIO_DCN32 = dcn32_dio_link_encoder.o dcn32_dio_stream_encoder.o
diff --git a/drivers/gpu/drm/amd/display/dc/dcn301/dcn301_dio_link_encoder.c b/drivers/gpu/drm/amd/display/dc/dio/dcn301/dcn301_dio_link_encoder.c
index 1b39a6e8a1ac..1b39a6e8a1ac 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn301/dcn301_dio_link_encoder.c
+++ b/drivers/gpu/drm/amd/display/dc/dio/dcn301/dcn301_dio_link_encoder.c
diff --git a/drivers/gpu/drm/amd/display/dc/dcn301/dcn301_dio_link_encoder.h b/drivers/gpu/drm/amd/display/dc/dio/dcn301/dcn301_dio_link_encoder.h
index 49f8d91d4951..49f8d91d4951 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn301/dcn301_dio_link_encoder.h
+++ b/drivers/gpu/drm/amd/display/dc/dio/dcn301/dcn301_dio_link_encoder.h
diff --git a/drivers/gpu/drm/amd/display/dc/dcn314/dcn314_dio_stream_encoder.c b/drivers/gpu/drm/amd/display/dc/dio/dcn314/dcn314_dio_stream_encoder.c
index 5b343f745cf3..5b343f745cf3 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn314/dcn314_dio_stream_encoder.c
+++ b/drivers/gpu/drm/amd/display/dc/dio/dcn314/dcn314_dio_stream_encoder.c
diff --git a/drivers/gpu/drm/amd/display/dc/dcn314/dcn314_dio_stream_encoder.h b/drivers/gpu/drm/amd/display/dc/dio/dcn314/dcn314_dio_stream_encoder.h
index 86548be591be..86548be591be 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn314/dcn314_dio_stream_encoder.h
+++ b/drivers/gpu/drm/amd/display/dc/dio/dcn314/dcn314_dio_stream_encoder.h
diff --git a/drivers/gpu/drm/amd/display/dc/dio/dcn321/dcn321_dio_link_encoder.c b/drivers/gpu/drm/amd/display/dc/dio/dcn321/dcn321_dio_link_encoder.c
index 05783daa62ac..2ed382a8e79c 100644
--- a/drivers/gpu/drm/amd/display/dc/dio/dcn321/dcn321_dio_link_encoder.c
+++ b/drivers/gpu/drm/amd/display/dc/dio/dcn321/dcn321_dio_link_encoder.c
@@ -23,7 +23,6 @@
*
*/
-
#include "reg_helper.h"
#include "core_types.h"
diff --git a/drivers/gpu/drm/amd/display/dc/dio/dcn35/dcn35_dio_stream_encoder.c b/drivers/gpu/drm/amd/display/dc/dio/dcn35/dcn35_dio_stream_encoder.c
index 6a179e5ab417..6ab2a218b769 100644
--- a/drivers/gpu/drm/amd/display/dc/dio/dcn35/dcn35_dio_stream_encoder.c
+++ b/drivers/gpu/drm/amd/display/dc/dio/dcn35/dcn35_dio_stream_encoder.c
@@ -22,7 +22,6 @@
*
*/
-
#include "dc_bios_types.h"
#include "dcn30/dcn30_dio_stream_encoder.h"
#include "dcn314/dcn314_dio_stream_encoder.h"
@@ -392,6 +391,14 @@ static void enc35_reset_fifo(struct stream_encoder *enc, bool reset)
udelay(10);
}
+static bool enc35_is_fifo_enabled(struct stream_encoder *enc)
+{
+ struct dcn10_stream_encoder *enc1 = DCN10STRENC_FROM_STRENC(enc);
+ uint32_t reset_val;
+
+ REG_GET(DIG_FIFO_CTRL0, DIG_FIFO_ENABLE, &reset_val);
+ return (reset_val == 0) ? false : true;
+}
void enc35_disable_fifo(struct stream_encoder *enc)
{
struct dcn10_stream_encoder *enc1 = DCN10STRENC_FROM_STRENC(enc);
@@ -415,6 +422,24 @@ void enc35_enable_fifo(struct stream_encoder *enc)
REG_UPDATE(DIG_FIFO_CTRL0, DIG_FIFO_ENABLE, 1);
}
+static uint32_t enc35_get_pixels_per_cycle(struct stream_encoder *enc)
+{
+ struct dcn10_stream_encoder *enc1 = DCN10STRENC_FROM_STRENC(enc);
+ uint32_t value;
+
+ REG_GET(DIG_FIFO_CTRL0, DIG_FIFO_OUTPUT_PIXEL_MODE, &value);
+
+ switch (value) {
+ case 0:
+ return 1;
+ case 1:
+ return 2;
+ default:
+ ASSERT_CRITICAL(false);
+ return 1;
+ }
+}
+
static const struct stream_encoder_funcs dcn35_str_enc_funcs = {
.dp_set_odm_combine =
enc314_dp_set_odm_combine,
@@ -465,7 +490,9 @@ static const struct stream_encoder_funcs dcn35_str_enc_funcs = {
.set_input_mode = enc314_set_dig_input_mode,
.enable_fifo = enc35_enable_fifo,
.disable_fifo = enc35_disable_fifo,
+ .is_fifo_enabled = enc35_is_fifo_enabled,
.map_stream_to_link = enc35_stream_encoder_map_to_link,
+ .get_pixels_per_cycle = enc35_get_pixels_per_cycle,
};
void dcn35_dio_stream_encoder_construct(
diff --git a/drivers/gpu/drm/amd/display/dc/dm_helpers.h b/drivers/gpu/drm/amd/display/dc/dm_helpers.h
index 34adae7ab6e8..2e4a46f1b499 100644
--- a/drivers/gpu/drm/amd/display/dc/dm_helpers.h
+++ b/drivers/gpu/drm/amd/display/dc/dm_helpers.h
@@ -210,4 +210,7 @@ enum adaptive_sync_type dm_get_adaptive_sync_support_type(struct dc_link *link);
enum dc_edid_status dm_helpers_get_sbios_edid(struct dc_link *link, struct dc_edid *edid);
+bool dm_helpers_is_fullscreen(struct dc_context *ctx, struct dc_stream_state *stream);
+bool dm_helpers_is_hdr_on(struct dc_context *ctx, struct dc_stream_state *stream);
+
#endif /* __DM_HELPERS__ */
diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn20/dcn20_fpu.c b/drivers/gpu/drm/amd/display/dc/dml/dcn20/dcn20_fpu.c
index 8a8efe408a9d..e9fea9c2162e 100644
--- a/drivers/gpu/drm/amd/display/dc/dml/dcn20/dcn20_fpu.c
+++ b/drivers/gpu/drm/amd/display/dc/dml/dcn20/dcn20_fpu.c
@@ -1132,7 +1132,8 @@ static void dcn20_adjust_freesync_v_startup(
patched_crtc_timing.v_addressable -
patched_crtc_timing.v_border_top;
- newVstartup = asic_blank_end + (patched_crtc_timing.v_total - asic_blank_start);
+ /* The newVStartUp is 1 line before vsync point */
+ newVstartup = asic_blank_end + 1;
*vstartup_start = ((newVstartup > *vstartup_start) ? newVstartup : *vstartup_start);
}
@@ -1562,6 +1563,8 @@ int dcn20_populate_dml_pipes_from_context(struct dc *dc,
pipes[pipe_cnt].pipe.src.surface_width_c = pipes[pipe_cnt].pipe.src.viewport_width;
pipes[pipe_cnt].pipe.src.data_pitch = ((pipes[pipe_cnt].pipe.src.viewport_width + 255) / 256) * 256;
pipes[pipe_cnt].pipe.src.source_format = dm_444_32;
+ pipes[pipe_cnt].pipe.src.cur0_src_width = 0;
+ pipes[pipe_cnt].pipe.src.cur1_src_width = 0;
pipes[pipe_cnt].pipe.dest.recout_width = pipes[pipe_cnt].pipe.src.viewport_width; /*vp_width/hratio*/
pipes[pipe_cnt].pipe.dest.recout_height = pipes[pipe_cnt].pipe.src.viewport_height; /*vp_height/vratio*/
pipes[pipe_cnt].pipe.dest.full_recout_width = pipes[pipe_cnt].pipe.dest.recout_width; /*when is_hsplit != 1*/
diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn20/display_rq_dlg_calc_20.c b/drivers/gpu/drm/amd/display/dc/dml/dcn20/display_rq_dlg_calc_20.c
index 7c56ad0f8812..4fce64a030b6 100644
--- a/drivers/gpu/drm/amd/display/dc/dml/dcn20/display_rq_dlg_calc_20.c
+++ b/drivers/gpu/drm/amd/display/dc/dml/dcn20/display_rq_dlg_calc_20.c
@@ -78,7 +78,7 @@ static void calculate_ttu_cursor(struct display_mode_lib *mode_lib,
static unsigned int get_bytes_per_element(enum source_format_class source_format, bool is_chroma)
{
- unsigned int ret_val = 0;
+ unsigned int ret_val = 1;
if (source_format == dm_444_16) {
if (!is_chroma)
@@ -313,9 +313,6 @@ static void handle_det_buf_split(struct display_mode_lib *mode_lib,
if (swath_height_c > 0)
log2_swath_height_c = dml_log2(swath_height_c);
-
- if (req128_c && log2_swath_height_c > 0)
- log2_swath_height_c -= 1;
}
rq_param->dlg.rq_l.swath_height = 1 << log2_swath_height_l;
diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn20/display_rq_dlg_calc_20v2.c b/drivers/gpu/drm/amd/display/dc/dml/dcn20/display_rq_dlg_calc_20v2.c
index 3d95bfa5aca2..3fa9a5da02f6 100644
--- a/drivers/gpu/drm/amd/display/dc/dml/dcn20/display_rq_dlg_calc_20v2.c
+++ b/drivers/gpu/drm/amd/display/dc/dml/dcn20/display_rq_dlg_calc_20v2.c
@@ -78,7 +78,7 @@ static void calculate_ttu_cursor(struct display_mode_lib *mode_lib,
static unsigned int get_bytes_per_element(enum source_format_class source_format, bool is_chroma)
{
- unsigned int ret_val = 0;
+ unsigned int ret_val = 1;
if (source_format == dm_444_16) {
if (!is_chroma)
@@ -313,9 +313,6 @@ static void handle_det_buf_split(struct display_mode_lib *mode_lib,
if (swath_height_c > 0)
log2_swath_height_c = dml_log2(swath_height_c);
-
- if (req128_c && log2_swath_height_c > 0)
- log2_swath_height_c -= 1;
}
rq_param->dlg.rq_l.swath_height = 1 << log2_swath_height_l;
diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn21/display_rq_dlg_calc_21.c b/drivers/gpu/drm/amd/display/dc/dml/dcn21/display_rq_dlg_calc_21.c
index 98502a4f0567..9e1c18b90805 100644
--- a/drivers/gpu/drm/amd/display/dc/dml/dcn21/display_rq_dlg_calc_21.c
+++ b/drivers/gpu/drm/amd/display/dc/dml/dcn21/display_rq_dlg_calc_21.c
@@ -53,7 +53,7 @@ static void calculate_ttu_cursor(
static unsigned int get_bytes_per_element(enum source_format_class source_format, bool is_chroma)
{
- unsigned int ret_val = 0;
+ unsigned int ret_val = 1;
if (source_format == dm_444_16) {
if (!is_chroma)
diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn31/display_mode_vba_31.c b/drivers/gpu/drm/amd/display/dc/dml/dcn31/display_mode_vba_31.c
index 0b132ce1d2cd..2b275e680379 100644
--- a/drivers/gpu/drm/amd/display/dc/dml/dcn31/display_mode_vba_31.c
+++ b/drivers/gpu/drm/amd/display/dc/dml/dcn31/display_mode_vba_31.c
@@ -1924,15 +1924,6 @@ static unsigned int CalculateVMAndRowBytes(
*PixelPTEReqWidth = 32768.0 / BytePerPixel;
*PTERequestSize = 64;
FractionOfPTEReturnDrop = 0;
- } else if (MacroTileSizeBytes == 4096) {
- PixelPTEReqHeightPTEs = 1;
- *PixelPTEReqHeight = MacroTileHeight;
- *PixelPTEReqWidth = 8 * *MacroTileWidth;
- *PTERequestSize = 64;
- if (ScanDirection != dm_vert)
- FractionOfPTEReturnDrop = 0;
- else
- FractionOfPTEReturnDrop = 7.0 / 8;
} else if (GPUVMMinPageSize == 4 && MacroTileSizeBytes > 4096) {
PixelPTEReqHeightPTEs = 16;
*PixelPTEReqHeight = 16 * BlockHeight256Bytes;
diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn32/dcn32_fpu.c b/drivers/gpu/drm/amd/display/dc/dml/dcn32/dcn32_fpu.c
index 9d399c4ce957..6f490d8d7038 100644
--- a/drivers/gpu/drm/amd/display/dc/dml/dcn32/dcn32_fpu.c
+++ b/drivers/gpu/drm/amd/display/dc/dml/dcn32/dcn32_fpu.c
@@ -160,8 +160,8 @@ struct _vcs_dpi_soc_bounding_box_st dcn3_2_soc = {
.pct_ideal_sdp_bw_after_urgent = 90.0,
.pct_ideal_fabric_bw_after_urgent = 67.0,
.pct_ideal_dram_sdp_bw_after_urgent_pixel_only = 20.0,
- .pct_ideal_dram_sdp_bw_after_urgent_pixel_and_vm = 60.0, // N/A, for now keep as is until DML implemented
- .pct_ideal_dram_sdp_bw_after_urgent_vm_only = 30.0, // N/A, for now keep as is until DML implemented
+ .pct_ideal_dram_sdp_bw_after_urgent_pixel_and_vm = 60.0,
+ .pct_ideal_dram_sdp_bw_after_urgent_vm_only = 30.0,
.pct_ideal_dram_bw_after_urgent_strobe = 67.0,
.max_avg_sdp_bw_use_normal_percent = 80.0,
.max_avg_fabric_bw_use_normal_percent = 60.0,
@@ -871,8 +871,9 @@ static bool subvp_drr_schedulable(struct dc *dc, struct dc_state *context)
* for VBLANK: (VACTIVE region of the SubVP pipe can fit the MALL prefetch, VBLANK frame time,
* and the max of (VBLANK blanking time, MALL region)).
*/
- if (stretched_drr_us < (1 / (double)drr_timing->min_refresh_in_uhz) * 1000000 * 1000000 &&
- subvp_active_us - prefetch_us - stretched_drr_us - max_vblank_mallregion > 0)
+ if (drr_timing &&
+ stretched_drr_us < (1 / (double)drr_timing->min_refresh_in_uhz) * 1000000 * 1000000 &&
+ subvp_active_us - prefetch_us - stretched_drr_us - max_vblank_mallregion > 0)
schedulable = true;
return schedulable;
@@ -937,7 +938,7 @@ static bool subvp_vblank_schedulable(struct dc *dc, struct dc_state *context)
if (!subvp_pipe && pipe_mall_type == SUBVP_MAIN)
subvp_pipe = pipe;
}
- if (found) {
+ if (found && subvp_pipe) {
phantom_stream = dc_state_get_paired_subvp_stream(context, subvp_pipe->stream);
main_timing = &subvp_pipe->stream->timing;
phantom_timing = &phantom_stream->timing;
diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn321/dcn321_fpu.c b/drivers/gpu/drm/amd/display/dc/dml/dcn321/dcn321_fpu.c
index 4297402bdab3..8839faf42207 100644
--- a/drivers/gpu/drm/amd/display/dc/dml/dcn321/dcn321_fpu.c
+++ b/drivers/gpu/drm/amd/display/dc/dml/dcn321/dcn321_fpu.c
@@ -139,8 +139,8 @@ struct _vcs_dpi_soc_bounding_box_st dcn3_21_soc = {
.pct_ideal_sdp_bw_after_urgent = 90.0,
.pct_ideal_fabric_bw_after_urgent = 67.0,
.pct_ideal_dram_sdp_bw_after_urgent_pixel_only = 20.0,
- .pct_ideal_dram_sdp_bw_after_urgent_pixel_and_vm = 60.0, // N/A, for now keep as is until DML implemented
- .pct_ideal_dram_sdp_bw_after_urgent_vm_only = 30.0, // N/A, for now keep as is until DML implemented
+ .pct_ideal_dram_sdp_bw_after_urgent_pixel_and_vm = 60.0,
+ .pct_ideal_dram_sdp_bw_after_urgent_vm_only = 30.0,
.pct_ideal_dram_bw_after_urgent_strobe = 67.0,
.max_avg_sdp_bw_use_normal_percent = 80.0,
.max_avg_fabric_bw_use_normal_percent = 60.0,
diff --git a/drivers/gpu/drm/amd/display/dc/dml/display_mode_structs.h b/drivers/gpu/drm/amd/display/dc/dml/display_mode_structs.h
index 410e4b671228..641a8cd019cd 100644
--- a/drivers/gpu/drm/amd/display/dc/dml/display_mode_structs.h
+++ b/drivers/gpu/drm/amd/display/dc/dml/display_mode_structs.h
@@ -523,6 +523,7 @@ struct _vcs_dpi_display_pipe_dest_params_st {
unsigned int vupdate_offset;
unsigned int vupdate_width;
unsigned int vready_offset;
+ unsigned int pstate_keepout;
unsigned char interlaced;
double pixel_rate_mhz;
unsigned char synchronized_vblank_all_planes;
diff --git a/drivers/gpu/drm/amd/display/dc/dml/dml1_display_rq_dlg_calc.c b/drivers/gpu/drm/amd/display/dc/dml/dml1_display_rq_dlg_calc.c
index dae13f202220..d8bfc85e5dcd 100644
--- a/drivers/gpu/drm/amd/display/dc/dml/dml1_display_rq_dlg_calc.c
+++ b/drivers/gpu/drm/amd/display/dc/dml/dml1_display_rq_dlg_calc.c
@@ -39,7 +39,7 @@
static unsigned int get_bytes_per_element(enum source_format_class source_format, bool is_chroma)
{
- unsigned int ret_val = 0;
+ unsigned int ret_val = 1;
if (source_format == dm_444_16) {
if (!is_chroma)
diff --git a/drivers/gpu/drm/amd/display/dc/dml2/Makefile b/drivers/gpu/drm/amd/display/dc/dml2/Makefile
index fea857214c0f..c4378e620cbf 100644
--- a/drivers/gpu/drm/amd/display/dc/dml2/Makefile
+++ b/drivers/gpu/drm/amd/display/dc/dml2/Makefile
@@ -35,8 +35,6 @@ frame_warn_flag := -Wframe-larger-than=2048
endif
endif
-# DRIVER_BUILD is mostly used in DML2.1 source
-subdir-ccflags-y += -DDRIVER_BUILD=1
subdir-ccflags-y += -I$(FULL_AMD_DISPLAY_PATH)/dc/dml2
subdir-ccflags-y += -I$(FULL_AMD_DISPLAY_PATH)/dc/dml2/dml21/src/dml2_core
subdir-ccflags-y += -I$(FULL_AMD_DISPLAY_PATH)/dc/dml2/dml21/src/dml2_mcg/
@@ -81,13 +79,11 @@ CFLAGS_$(AMDDALPATH)/dc/dml2/dml21/src/dml2_top/dml2_top_optimization := $(dml2_
CFLAGS_$(AMDDALPATH)/dc/dml2/dml21/src/dml2_core/dml2_core_dcn4.o := $(dml2_ccflags)
CFLAGS_$(AMDDALPATH)/dc/dml2/dml21/src/dml2_core/dml2_core_dcn4_calcs.o := $(dml2_ccflags) $(frame_warn_flag)
CFLAGS_$(AMDDALPATH)/dc/dml2/dml21/src/dml2_core/dml2_core_factory.o := $(dml2_ccflags)
-CFLAGS_$(AMDDALPATH)/dc/dml2/dml21/src/dml2_core/dml2_core_shared.o := $(dml2_ccflags) $(frame_warn_flag)
CFLAGS_$(AMDDALPATH)/dc/dml2/dml21/src/dml2_dpmm/dml2_dpmm_dcn4.o := $(dml2_ccflags)
CFLAGS_$(AMDDALPATH)/dc/dml2/dml21/src/dml2_dpmm/dml2_dpmm_factory.o := $(dml2_ccflags)
CFLAGS_$(AMDDALPATH)/dc/dml2/dml21/src/dml2_mcg/dml2_mcg_dcn4.o := $(dml2_ccflags)
CFLAGS_$(AMDDALPATH)/dc/dml2/dml21/src/dml2_mcg/dml2_mcg_factory.o := $(dml2_ccflags)
CFLAGS_$(AMDDALPATH)/dc/dml2/dml21/src/dml2_pmo/dml2_pmo_dcn3.o := $(dml2_ccflags)
-CFLAGS_$(AMDDALPATH)/dc/dml2/dml21/src/dml2_pmo/dml2_pmo_dcn4.o := $(dml2_ccflags)
CFLAGS_$(AMDDALPATH)/dc/dml2/dml21/src/dml2_pmo/dml2_pmo_dcn4_fams2.o := $(dml2_ccflags)
CFLAGS_$(AMDDALPATH)/dc/dml2/dml21/src/dml2_pmo/dml2_pmo_factory.o := $(dml2_ccflags)
CFLAGS_$(AMDDALPATH)/dc/dml2/dml21/src/dml2_standalone_libraries/lib_float_math.o := $(dml2_ccflags)
@@ -104,13 +100,11 @@ CFLAGS_REMOVE_$(AMDDALPATH)/dc/dml2/dml21/src/dml2_top/dml2_top_optimization.o :
CFLAGS_REMOVE_$(AMDDALPATH)/dc/dml2/dml21/src/dml2_core/dml2_core_dcn4.o := $(dml2_rcflags)
CFLAGS_REMOVE_$(AMDDALPATH)/dc/dml2/dml21/src/dml2_core/dml2_core_dcn4_calcs.o := $(dml2_rcflags)
CFLAGS_REMOVE_$(AMDDALPATH)/dc/dml2/dml21/src/dml2_core/dml2_core_factory.o := $(dml2_rcflags)
-CFLAGS_REMOVE_$(AMDDALPATH)/dc/dml2/dml21/src/dml2_core/dml2_core_shared.o := $(dml2_rcflags)
CFLAGS_REMOVE_$(AMDDALPATH)/dc/dml2/dml21/src/dml2_dpmm/dml2_dpmm_dcn4.o := $(dml2_rcflags)
CFLAGS_REMOVE_$(AMDDALPATH)/dc/dml2/dml21/src/dml2_dpmm/dml2_dpmm_factory.o := $(dml2_rcflags)
CFLAGS_REMOVE_$(AMDDALPATH)/dc/dml2/dml21/src/dml2_mcg/dml2_mcg_dcn4.o := $(dml2_rcflags)
CFLAGS_REMOVE_$(AMDDALPATH)/dc/dml2/dml21/src/dml2_mcg/dml2_mcg_factory.o := $(dml2_rcflags)
CFLAGS_REMOVE_$(AMDDALPATH)/dc/dml2/dml21/src/dml2_pmo/dml2_pmo_dcn3.o := $(dml2_rcflags)
-CFLAGS_REMOVE_$(AMDDALPATH)/dc/dml2/dml21/src/dml2_pmo/dml2_pmo_dcn4.o := $(dml2_rcflags)
CFLAGS_REMOVE_$(AMDDALPATH)/dc/dml2/dml21/src/dml2_pmo/dml2_pmo_dcn4_fams2.o := $(dml2_rcflags)
CFLAGS_REMOVE_$(AMDDALPATH)/dc/dml2/dml21/src/dml2_pmo/dml2_pmo_factory.o := $(dml2_rcflags)
CFLAGS_REMOVE_$(AMDDALPATH)/dc/dml2/dml21/src/dml2_standalone_libraries/lib_float_math.o := $(dml2_rcflags)
@@ -126,13 +120,11 @@ DML21 += src/inc/dml2_debug.o
DML21 += src/dml2_core/dml2_core_dcn4.o
DML21 += src/dml2_core/dml2_core_factory.o
DML21 += src/dml2_core/dml2_core_dcn4_calcs.o
-DML21 += src/dml2_core/dml2_core_shared.o
DML21 += src/dml2_dpmm/dml2_dpmm_dcn4.o
DML21 += src/dml2_dpmm/dml2_dpmm_factory.o
DML21 += src/dml2_mcg/dml2_mcg_dcn4.o
DML21 += src/dml2_mcg/dml2_mcg_factory.o
DML21 += src/dml2_pmo/dml2_pmo_dcn3.o
-DML21 += src/dml2_pmo/dml2_pmo_dcn4.o
DML21 += src/dml2_pmo/dml2_pmo_factory.o
DML21 += src/dml2_pmo/dml2_pmo_dcn4_fams2.o
DML21 += src/dml2_standalone_libraries/lib_float_math.o
diff --git a/drivers/gpu/drm/amd/display/dc/dml2/display_mode_core.c b/drivers/gpu/drm/amd/display/dc/dml2/display_mode_core.c
index 547dfcc80fde..d851c081e376 100644
--- a/drivers/gpu/drm/amd/display/dc/dml2/display_mode_core.c
+++ b/drivers/gpu/drm/amd/display/dc/dml2/display_mode_core.c
@@ -8926,7 +8926,7 @@ void dml_core_mode_programming(struct display_mode_lib_st *mode_lib, const struc
// The prefetch scheduling should only be calculated once as per AllowForPStateChangeOrStutterInVBlank requirement
// If the AllowForPStateChangeOrStutterInVBlank requirement is not strict (i.e. only try those power saving feature
- // if possible, then will try to program for the best power saving features in order of diffculty (dram, fclk, stutter)
+ // if possible, then will try to program for the best power saving features in order of difficulty (dram, fclk, stutter)
s->iteration = 0;
s->MaxTotalRDBandwidth = 0;
s->AllPrefetchModeTested = false;
@@ -9977,7 +9977,7 @@ void dml_core_get_row_heights(
dml_print("DML_DLG: %s: GPUVMMinPageSizeKBytes = %u\n", __func__, GPUVMMinPageSizeKBytes);
#endif
- // just suppluy with enough parameters to calculate meta and dte
+ // just supply with enough parameters to calculate meta and dte
CalculateVMAndRowBytes(
0, // dml_bool_t ViewportStationary,
1, // dml_bool_t DCCEnable,
@@ -10110,7 +10110,7 @@ dml_bool_t dml_mode_support(
/// Note: In this function, it is assumed that DCFCLK, SOCCLK freq are the state values, and mode_program will just use the DML calculated DPPCLK and DISPCLK
/// @param mode_lib mode_lib data struct that house all the input/output/bbox and calculation values.
/// @param state_idx Power state idx chosen
-/// @param display_cfg Display Congiuration
+/// @param display_cfg Display Configuration
/// @param call_standalone Calling mode_programming without calling mode support. Some of the "support" struct member will be pre-calculated before doing mode programming
/// TODO: Add clk_cfg input, could be useful for standalone mode
dml_bool_t dml_mode_programming(
diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml21/dml21_translation_helper.c b/drivers/gpu/drm/amd/display/dc/dml2/dml21/dml21_translation_helper.c
index 06387b8b0aee..8697eac1e1f7 100644
--- a/drivers/gpu/drm/amd/display/dc/dml2/dml21/dml21_translation_helper.c
+++ b/drivers/gpu/drm/amd/display/dc/dml2/dml21/dml21_translation_helper.c
@@ -31,7 +31,7 @@ static void dml21_init_socbb_params(struct dml2_initialize_instance_in_out *dml_
else
soc_bb = &dml2_socbb_dcn401;
- qos_params = &dml_dcn401_soc_qos_params;
+ qos_params = &dml_dcn4_variant_a_soc_qos_params;
}
/* patch soc bb */
@@ -516,7 +516,7 @@ static void populate_dml21_stream_overrides_from_stream_state(
if (!stream->ctx->dc->debug.enable_single_display_2to1_odm_policy ||
stream->debug.force_odm_combine_segments > 0)
stream_desc->overrides.disable_dynamic_odm = true;
- stream_desc->overrides.disable_subvp = stream->ctx->dc->debug.force_disable_subvp;
+ stream_desc->overrides.disable_subvp = stream->ctx->dc->debug.force_disable_subvp || stream->hw_cursor_req;
}
static enum dml2_swizzle_mode gfx_addr3_to_dml2_swizzle_mode(enum swizzle_mode_addr3_values addr3_mode)
@@ -725,18 +725,7 @@ static void populate_dml21_plane_config_from_plane_state(struct dml2_context *dm
const struct scaler_data *scaler_data = get_scaler_data_for_plane(dml_ctx, plane_state, context);
struct dc_stream_state *stream = context->streams[stream_index];
- if (stream->cursor_attributes.color_format == CURSOR_MODE_MONO)
- plane->cursor.cursor_bpp = 2;
- else if (stream->cursor_attributes.color_format == CURSOR_MODE_COLOR_1BIT_AND
- || stream->cursor_attributes.color_format == CURSOR_MODE_COLOR_PRE_MULTIPLIED_ALPHA
- || stream->cursor_attributes.color_format == CURSOR_MODE_COLOR_UN_PRE_MULTIPLIED_ALPHA) {
- plane->cursor.cursor_bpp = 32;
- } else if (stream->cursor_attributes.color_format == CURSOR_MODE_COLOR_64BIT_FP_PRE_MULTIPLIED
- || stream->cursor_attributes.color_format == CURSOR_MODE_COLOR_64BIT_FP_UN_PRE_MULTIPLIED) {
- plane->cursor.cursor_bpp = 64;
- } else
- plane->cursor.cursor_bpp = 32;
-
+ plane->cursor.cursor_bpp = 32;
plane->cursor.cursor_width = 256;
plane->cursor.num_cursors = 1;
@@ -788,6 +777,14 @@ static void populate_dml21_plane_config_from_plane_state(struct dml2_context *dm
* certain cases. Hence do corrective active and disable scaling.
*/
plane->composition.scaler_info.enabled = false;
+ } else if ((plane_state->ctx->dc->config.use_spl == true) &&
+ (plane->composition.scaler_info.enabled == false)) {
+ /* To enable sharpener for 1:1, scaler must be enabled. If use_spl is set, then
+ * allow case where ratio is 1 but taps > 1
+ */
+ if ((scaler_data->taps.h_taps > 1) || (scaler_data->taps.v_taps > 1) ||
+ (scaler_data->taps.h_taps_c > 1) || (scaler_data->taps.v_taps_c > 1))
+ plane->composition.scaler_info.enabled = true;
}
/* always_scale is only used for debug purposes not used in production but has to be
@@ -827,6 +824,7 @@ static void populate_dml21_plane_config_from_plane_state(struct dml2_context *dm
if (plane_state->mcm_luts.lut3d_data.lut3d_src == DC_CM2_TRANSFER_FUNC_SOURCE_VIDMEM) {
plane->tdlut.setup_for_tdlut = true;
+
switch (plane_state->mcm_luts.lut3d_data.gpu_mem_params.layout) {
case DC_CM2_GPU_MEM_LAYOUT_3D_SWIZZLE_LINEAR_RGB:
case DC_CM2_GPU_MEM_LAYOUT_3D_SWIZZLE_LINEAR_BGR:
@@ -836,6 +834,7 @@ static void populate_dml21_plane_config_from_plane_state(struct dml2_context *dm
plane->tdlut.tdlut_addressing_mode = dml2_tdlut_simple_linear;
break;
}
+
switch (plane_state->mcm_luts.lut3d_data.gpu_mem_params.size) {
case DC_CM2_GPU_MEM_SIZE_171717:
plane->tdlut.tdlut_width_mode = dml2_tdlut_width_17_cube;
@@ -844,8 +843,8 @@ static void populate_dml21_plane_config_from_plane_state(struct dml2_context *dm
//plane->tdlut.tdlut_width_mode = dml2_tdlut_width_flatten; // dml2_tdlut_width_flatten undefined
break;
}
- } else
- plane->tdlut.setup_for_tdlut = false;
+ }
+ plane->tdlut.setup_for_tdlut |= dml_ctx->config.force_tdlut_enable;
plane->dynamic_meta_data.enable = false;
plane->dynamic_meta_data.lines_before_active_required = 0;
@@ -859,7 +858,9 @@ static void populate_dml21_plane_config_from_plane_state(struct dml2_context *dm
plane->immediate_flip = plane_state->flip_immediate;
- plane->composition.rect_out_height_spans_vactive = plane_state->dst_rect.height >= stream->timing.v_addressable;
+ plane->composition.rect_out_height_spans_vactive =
+ plane_state->dst_rect.height >= stream->timing.v_addressable &&
+ stream->dst.height >= stream->timing.v_addressable;
}
//TODO : Could be possibly moved to a common helper layer.
@@ -949,6 +950,7 @@ bool dml21_map_dc_state_into_dml_display_cfg(const struct dc *in_dc, struct dc_s
int stream_index, plane_index;
int disp_cfg_stream_location, disp_cfg_plane_location;
struct dml2_display_cfg *dml_dispcfg = &dml_ctx->v21.display_config;
+ unsigned int plane_count = 0;
memset(&dml_ctx->v21.dml_to_dc_pipe_mapping, 0, sizeof(struct dml2_dml_to_dc_pipe_mapping));
@@ -958,6 +960,11 @@ bool dml21_map_dc_state_into_dml_display_cfg(const struct dc *in_dc, struct dc_s
dml_dispcfg->minimize_det_reallocation = true;
dml_dispcfg->overrides.enable_subvp_implicit_pmo = true;
+ if (in_dc->debug.disable_unbounded_requesting) {
+ dml_dispcfg->overrides.hw.force_unbounded_requesting.enable = true;
+ dml_dispcfg->overrides.hw.force_unbounded_requesting.value = false;
+ }
+
for (stream_index = 0; stream_index < context->stream_count; stream_index++) {
disp_cfg_stream_location = map_stream_to_dml21_display_cfg(dml_ctx, context->streams[stream_index]);
@@ -1002,33 +1009,39 @@ bool dml21_map_dc_state_into_dml_display_cfg(const struct dc *in_dc, struct dc_s
dml_dispcfg->plane_descriptors[disp_cfg_plane_location].overrides.uclk_pstate_change_strategy =
dml21_force_pstate_method_to_uclk_state_change_strategy(dml_ctx->config.pmo.force_pstate_method_values[stream_index]);
}
+
+ plane_count++;
}
}
}
+ if (plane_count == 0) {
+ dml_dispcfg->overrides.all_streams_blanked = true;
+ }
+
return true;
}
void dml21_copy_clocks_to_dc_state(struct dml2_context *in_ctx, struct dc_state *context)
{
/* TODO these should be the max of active, svp prefetch and idle should be tracked seperately */
- context->bw_ctx.bw.dcn.clk.dispclk_khz = in_ctx->v21.mode_programming.programming->min_clocks.dcn4.dispclk_khz;
- context->bw_ctx.bw.dcn.clk.dcfclk_khz = in_ctx->v21.mode_programming.programming->min_clocks.dcn4.active.dcfclk_khz;
- context->bw_ctx.bw.dcn.clk.dramclk_khz = in_ctx->v21.mode_programming.programming->min_clocks.dcn4.active.uclk_khz;
- context->bw_ctx.bw.dcn.clk.fclk_khz = in_ctx->v21.mode_programming.programming->min_clocks.dcn4.active.fclk_khz;
- context->bw_ctx.bw.dcn.clk.idle_dramclk_khz = in_ctx->v21.mode_programming.programming->min_clocks.dcn4.idle.uclk_khz;
- context->bw_ctx.bw.dcn.clk.idle_fclk_khz = in_ctx->v21.mode_programming.programming->min_clocks.dcn4.idle.fclk_khz;
- context->bw_ctx.bw.dcn.clk.dcfclk_deep_sleep_khz = in_ctx->v21.mode_programming.programming->min_clocks.dcn4.deepsleep_dcfclk_khz;
+ context->bw_ctx.bw.dcn.clk.dispclk_khz = in_ctx->v21.mode_programming.programming->min_clocks.dcn4x.dispclk_khz;
+ context->bw_ctx.bw.dcn.clk.dcfclk_khz = in_ctx->v21.mode_programming.programming->min_clocks.dcn4x.active.dcfclk_khz;
+ context->bw_ctx.bw.dcn.clk.dramclk_khz = in_ctx->v21.mode_programming.programming->min_clocks.dcn4x.active.uclk_khz;
+ context->bw_ctx.bw.dcn.clk.fclk_khz = in_ctx->v21.mode_programming.programming->min_clocks.dcn4x.active.fclk_khz;
+ context->bw_ctx.bw.dcn.clk.idle_dramclk_khz = in_ctx->v21.mode_programming.programming->min_clocks.dcn4x.idle.uclk_khz;
+ context->bw_ctx.bw.dcn.clk.idle_fclk_khz = in_ctx->v21.mode_programming.programming->min_clocks.dcn4x.idle.fclk_khz;
+ context->bw_ctx.bw.dcn.clk.dcfclk_deep_sleep_khz = in_ctx->v21.mode_programming.programming->min_clocks.dcn4x.deepsleep_dcfclk_khz;
context->bw_ctx.bw.dcn.clk.fclk_p_state_change_support = in_ctx->v21.mode_programming.programming->fclk_pstate_supported;
context->bw_ctx.bw.dcn.clk.p_state_change_support = in_ctx->v21.mode_programming.programming->uclk_pstate_supported;
- context->bw_ctx.bw.dcn.clk.dtbclk_en = in_ctx->v21.mode_programming.programming->min_clocks.dcn4.dtbrefclk_khz > 0;
- context->bw_ctx.bw.dcn.clk.ref_dtbclk_khz = in_ctx->v21.mode_programming.programming->min_clocks.dcn4.dtbrefclk_khz;
+ context->bw_ctx.bw.dcn.clk.dtbclk_en = in_ctx->v21.mode_programming.programming->min_clocks.dcn4x.dtbrefclk_khz > 0;
+ context->bw_ctx.bw.dcn.clk.ref_dtbclk_khz = in_ctx->v21.mode_programming.programming->min_clocks.dcn4x.dtbrefclk_khz;
}
void dml21_extract_legacy_watermark_set(const struct dc *in_dc, struct dcn_watermarks *watermark, enum dml2_dchub_watermark_reg_set_index reg_set_idx, struct dml2_context *in_ctx)
{
struct dml2_core_internal_display_mode_lib *mode_lib = &in_ctx->v21.dml_init.dml2_instance->core_instance.clean_me_up.mode_lib;
- double refclk_freq_in_mhz = (in_ctx->v21.display_config.overrides.hw.dlg_ref_clk_mhz > 0) ? (double)in_ctx->v21.display_config.overrides.hw.dlg_ref_clk_mhz : mode_lib->soc.dchub_refclk_mhz;;
+ double refclk_freq_in_mhz = (in_ctx->v21.display_config.overrides.hw.dlg_ref_clk_mhz > 0) ? (double)in_ctx->v21.display_config.overrides.hw.dlg_ref_clk_mhz : mode_lib->soc.dchub_refclk_mhz;
if (reg_set_idx >= DML2_DCHUB_WATERMARK_SET_NUM) {
/* invalid register set index */
@@ -1053,16 +1066,16 @@ static struct dml2_dchub_watermark_regs *wm_set_index_to_dc_wm_set(union dcn_wat
switch (wm_index) {
case DML2_DCHUB_WATERMARK_SET_A:
- wm_regs = &watermarks->dcn4.a;
+ wm_regs = &watermarks->dcn4x.a;
break;
case DML2_DCHUB_WATERMARK_SET_B:
- wm_regs = &watermarks->dcn4.b;
+ wm_regs = &watermarks->dcn4x.b;
break;
case DML2_DCHUB_WATERMARK_SET_C:
- wm_regs = &watermarks->dcn4.c;
+ wm_regs = &watermarks->dcn4x.c;
break;
case DML2_DCHUB_WATERMARK_SET_D:
- wm_regs = &watermarks->dcn4.d;
+ wm_regs = &watermarks->dcn4x.d;
break;
case DML2_DCHUB_WATERMARK_SET_NUM:
default:
@@ -1110,10 +1123,11 @@ void dml21_populate_pipe_ctx_dlg_params(struct dml2_context *dml_ctx, struct dc_
global_sync = &stream_programming->phantom_stream.global_sync;
}
- pipe_ctx->pipe_dlg_param.vstartup_start = global_sync->dcn4.vstartup_lines;
- pipe_ctx->pipe_dlg_param.vupdate_offset = global_sync->dcn4.vupdate_offset_pixels;
- pipe_ctx->pipe_dlg_param.vupdate_width = global_sync->dcn4.vupdate_vupdate_width_pixels;
- pipe_ctx->pipe_dlg_param.vready_offset = global_sync->dcn4.vready_offset_pixels;
+ pipe_ctx->pipe_dlg_param.vstartup_start = global_sync->dcn4x.vstartup_lines;
+ pipe_ctx->pipe_dlg_param.vupdate_offset = global_sync->dcn4x.vupdate_offset_pixels;
+ pipe_ctx->pipe_dlg_param.vupdate_width = global_sync->dcn4x.vupdate_vupdate_width_pixels;
+ pipe_ctx->pipe_dlg_param.vready_offset = global_sync->dcn4x.vready_offset_pixels;
+ pipe_ctx->pipe_dlg_param.pstate_keepout = global_sync->dcn4x.pstate_keepout_start_lines;
pipe_ctx->pipe_dlg_param.otg_inst = pipe_ctx->stream_res.tg->inst;
@@ -1164,3 +1178,37 @@ void dml21_get_pipe_mcache_config(
mcache_pipe_config->plane1_enabled =
dml21_is_plane1_enabled(pln_prog->plane_descriptor->pixel_format);
}
+
+void dml21_set_dc_p_state_type(
+ struct pipe_ctx *pipe_ctx,
+ struct dml2_per_stream_programming *stream_programming,
+ bool sub_vp_enabled)
+{
+ switch (stream_programming->uclk_pstate_method) {
+ case dml2_uclk_pstate_support_method_vactive:
+ case dml2_uclk_pstate_support_method_fw_vactive_drr:
+ pipe_ctx->p_state_type = P_STATE_V_ACTIVE;
+ break;
+ case dml2_uclk_pstate_support_method_vblank:
+ case dml2_uclk_pstate_support_method_fw_vblank_drr:
+ if (sub_vp_enabled)
+ pipe_ctx->p_state_type = P_STATE_V_BLANK_SUB_VP;
+ else
+ pipe_ctx->p_state_type = P_STATE_V_BLANK;
+ break;
+ case dml2_uclk_pstate_support_method_fw_subvp_phantom:
+ case dml2_uclk_pstate_support_method_fw_subvp_phantom_drr:
+ pipe_ctx->p_state_type = P_STATE_SUB_VP;
+ break;
+ case dml2_uclk_pstate_support_method_fw_drr:
+ if (sub_vp_enabled)
+ pipe_ctx->p_state_type = P_STATE_DRR_SUB_VP;
+ else
+ pipe_ctx->p_state_type = P_STATE_FPO;
+ break;
+ default:
+ pipe_ctx->p_state_type = P_STATE_UNKNOWN;
+ break;
+ }
+}
+
diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml21/dml21_translation_helper.h b/drivers/gpu/drm/amd/display/dc/dml2/dml21/dml21_translation_helper.h
index 4cc0a1fbb93d..476a7f6e4875 100644
--- a/drivers/gpu/drm/amd/display/dc/dml2/dml21/dml21_translation_helper.h
+++ b/drivers/gpu/drm/amd/display/dc/dml2/dml21/dml21_translation_helper.h
@@ -26,4 +26,5 @@ void dml21_extract_legacy_watermark_set(const struct dc *in_dc, struct dcn_water
void dml21_extract_watermark_sets(const struct dc *in_dc, union dcn_watermark_set *watermarks, struct dml2_context *in_ctx);
void dml21_map_hw_resources(struct dml2_context *dml_ctx);
void dml21_get_pipe_mcache_config(struct dc_state *context, struct pipe_ctx *pipe_ctx, struct dml2_per_plane_programming *pln_prog, struct dml2_pipe_configuration_descriptor *mcache_pipe_config);
+void dml21_set_dc_p_state_type(struct pipe_ctx *pipe_ctx, struct dml2_per_stream_programming *stream_programming, bool sub_vp_enabled);
#endif
diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml21/dml21_utils.c b/drivers/gpu/drm/amd/display/dc/dml2/dml21/dml21_utils.c
index d276458e50fd..51d491bffa32 100644
--- a/drivers/gpu/drm/amd/display/dc/dml2/dml21/dml21_utils.c
+++ b/drivers/gpu/drm/amd/display/dc/dml2/dml21/dml21_utils.c
@@ -11,7 +11,6 @@
#include "dml2_core_dcn4_calcs.h"
-
int dml21_helper_find_dml_pipe_idx_by_stream_id(struct dml2_context *ctx, unsigned int stream_id)
{
int i;
@@ -280,6 +279,23 @@ bool check_dp2p0_output_encoder(const struct pipe_ctx *pipe_ctx)
dc_is_dp_signal(pipe_ctx->stream->signal));
}
+
+static bool is_sub_vp_enabled(struct dc *dc, struct dc_state *context)
+{
+ int i;
+
+ for (i = 0; i < dc->res_pool->pipe_count; i++) {
+ struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[i];
+
+ if (pipe_ctx->stream && dc_state_get_paired_subvp_stream(context, pipe_ctx->stream) &&
+ dc_state_get_pipe_subvp_type(context, pipe_ctx) == SUBVP_MAIN) {
+ return true;
+ }
+ }
+ return false;
+}
+
+
void dml21_program_dc_pipe(struct dml2_context *dml_ctx, struct dc_state *context, struct pipe_ctx *pipe_ctx, struct dml2_per_plane_programming *pln_prog,
struct dml2_per_stream_programming *stream_prog)
{
@@ -310,12 +326,16 @@ void dml21_program_dc_pipe(struct dml2_context *dml_ctx, struct dc_state *contex
pipe_ctx->det_buffer_size_kb = pln_prog->pipe_regs[pipe_reg_index]->det_size * 64;
}
- pipe_ctx->plane_res.bw.dppclk_khz = pln_prog->min_clocks.dcn4.dppclk_khz;
+ pipe_ctx->plane_res.bw.dppclk_khz = pln_prog->min_clocks.dcn4x.dppclk_khz;
if (context->bw_ctx.bw.dcn.clk.dppclk_khz < pipe_ctx->plane_res.bw.dppclk_khz)
context->bw_ctx.bw.dcn.clk.dppclk_khz = pipe_ctx->plane_res.bw.dppclk_khz;
dml21_populate_mall_allocation_size(context, dml_ctx, pln_prog, pipe_ctx);
memcpy(&context->bw_ctx.bw.dcn.mcache_allocations[pipe_ctx->pipe_idx], &pln_prog->mcache_allocation, sizeof(struct dml2_mcache_surface_allocation));
+
+ bool sub_vp_enabled = is_sub_vp_enabled(pipe_ctx->stream->ctx->dc, context);
+
+ dml21_set_dc_p_state_type(pipe_ctx, stream_prog, sub_vp_enabled);
}
static struct dc_stream_state *dml21_add_phantom_stream(struct dml2_context *dml_ctx,
@@ -459,94 +479,103 @@ void dml21_build_fams2_programming(const struct dc *dc,
struct dml2_context *dml_ctx)
{
int i, j, k;
+ unsigned int num_fams2_streams = 0;
/* reset fams2 data */
- context->bw_ctx.bw.dcn.fams2_stream_count = 0;
memset(&context->bw_ctx.bw.dcn.fams2_stream_params, 0, sizeof(struct dmub_fams2_stream_static_state) * DML2_MAX_PLANES);
+ memset(&context->bw_ctx.bw.dcn.fams2_global_config, 0, sizeof(struct dmub_cmd_fams2_global_config));
- if (!dml_ctx->v21.mode_programming.programming->fams2_required)
- return;
+ if (dml_ctx->v21.mode_programming.programming->fams2_required) {
+ for (i = 0; i < context->stream_count; i++) {
+ int dml_stream_idx;
+ struct dc_stream_state *phantom_stream;
+ struct dc_stream_status *phantom_status;
- for (i = 0; i < context->stream_count; i++) {
- int dml_stream_idx;
- struct dc_stream_state *phantom_stream;
- struct dc_stream_status *phantom_status;
-
- struct dmub_fams2_stream_static_state *static_state = &context->bw_ctx.bw.dcn.fams2_stream_params[context->bw_ctx.bw.dcn.fams2_stream_count];
-
- struct dc_stream_state *stream = context->streams[i];
-
- if (context->stream_status[i].plane_count == 0 ||
- dml_ctx->config.svp_pstate.callbacks.get_stream_subvp_type(context, stream) == SUBVP_PHANTOM) {
- /* can ignore blanked or phantom streams */
- continue;
- }
+ struct dmub_fams2_stream_static_state *static_state = &context->bw_ctx.bw.dcn.fams2_stream_params[num_fams2_streams];
- dml_stream_idx = dml21_helper_find_dml_pipe_idx_by_stream_id(dml_ctx, stream->stream_id);
- if (dml_stream_idx < 0) {
- ASSERT(dml_stream_idx >= 0);
- continue;
- }
+ struct dc_stream_state *stream = context->streams[i];
- /* copy static state from PMO */
- memcpy(static_state,
- &dml_ctx->v21.mode_programming.programming->stream_programming[dml_stream_idx].fams2_params,
- sizeof(struct dmub_fams2_stream_static_state));
-
- /* get information from context */
- static_state->num_planes = context->stream_status[i].plane_count;
- static_state->otg_inst = context->stream_status[i].primary_otg_inst;
-
- /* populate pipe masks for planes */
- for (j = 0; j < context->stream_status[i].plane_count; j++) {
- for (k = 0; k < dc->res_pool->pipe_count; k++) {
- if (context->res_ctx.pipe_ctx[k].stream &&
- context->res_ctx.pipe_ctx[k].stream->stream_id == stream->stream_id &&
- context->res_ctx.pipe_ctx[k].plane_state == context->stream_status[i].plane_states[j]) {
- static_state->pipe_mask |= (1 << k);
- static_state->plane_pipe_masks[j] |= (1 << k);
- }
+ if (context->stream_status[i].plane_count == 0 ||
+ dml_ctx->config.svp_pstate.callbacks.get_stream_subvp_type(context, stream) == SUBVP_PHANTOM) {
+ /* can ignore blanked or phantom streams */
+ continue;
}
- }
- /* get per method programming */
- switch (static_state->type) {
- case FAMS2_STREAM_TYPE_VBLANK:
- case FAMS2_STREAM_TYPE_VACTIVE:
- case FAMS2_STREAM_TYPE_DRR:
- break;
- case FAMS2_STREAM_TYPE_SUBVP:
- phantom_stream = dml_ctx->config.svp_pstate.callbacks.get_paired_subvp_stream(context, stream);
- if (!phantom_stream)
- break;
+ dml_stream_idx = dml21_helper_find_dml_pipe_idx_by_stream_id(dml_ctx, stream->stream_id);
+ if (dml_stream_idx < 0) {
+ ASSERT(dml_stream_idx >= 0);
+ continue;
+ }
- phantom_status = dml_ctx->config.callbacks.get_stream_status(context, phantom_stream);
+ /* copy static state from PMO */
+ memcpy(static_state,
+ &dml_ctx->v21.mode_programming.programming->stream_programming[dml_stream_idx].fams2_params,
+ sizeof(struct dmub_fams2_stream_static_state));
- /* phantom status should always be present */
- ASSERT(phantom_status);
- static_state->sub_state.subvp.phantom_otg_inst = phantom_status->primary_otg_inst;
+ /* get information from context */
+ static_state->num_planes = context->stream_status[i].plane_count;
+ static_state->otg_inst = context->stream_status[i].primary_otg_inst;
- /* populate pipe masks for phantom planes */
- for (j = 0; j < phantom_status->plane_count; j++) {
+ /* populate pipe masks for planes */
+ for (j = 0; j < context->stream_status[i].plane_count; j++) {
for (k = 0; k < dc->res_pool->pipe_count; k++) {
if (context->res_ctx.pipe_ctx[k].stream &&
- context->res_ctx.pipe_ctx[k].stream->stream_id == phantom_stream->stream_id &&
- context->res_ctx.pipe_ctx[k].plane_state == phantom_status->plane_states[j]) {
- static_state->sub_state.subvp.phantom_pipe_mask |= (1 << k);
- static_state->sub_state.subvp.phantom_plane_pipe_masks[j] |= (1 << k);
+ context->res_ctx.pipe_ctx[k].stream->stream_id == stream->stream_id &&
+ context->res_ctx.pipe_ctx[k].plane_state == context->stream_status[i].plane_states[j]) {
+ static_state->pipe_mask |= (1 << k);
+ static_state->plane_pipe_masks[j] |= (1 << k);
+ }
+ }
+ }
+
+ /* get per method programming */
+ switch (static_state->type) {
+ case FAMS2_STREAM_TYPE_VBLANK:
+ case FAMS2_STREAM_TYPE_VACTIVE:
+ case FAMS2_STREAM_TYPE_DRR:
+ break;
+ case FAMS2_STREAM_TYPE_SUBVP:
+ phantom_stream = dml_ctx->config.svp_pstate.callbacks.get_paired_subvp_stream(context, stream);
+ if (!phantom_stream)
+ break;
+
+ phantom_status = dml_ctx->config.callbacks.get_stream_status(context, phantom_stream);
+
+ /* phantom status should always be present */
+ ASSERT(phantom_status);
+ static_state->sub_state.subvp.phantom_otg_inst = phantom_status->primary_otg_inst;
+
+ /* populate pipe masks for phantom planes */
+ for (j = 0; j < phantom_status->plane_count; j++) {
+ for (k = 0; k < dc->res_pool->pipe_count; k++) {
+ if (context->res_ctx.pipe_ctx[k].stream &&
+ context->res_ctx.pipe_ctx[k].stream->stream_id == phantom_stream->stream_id &&
+ context->res_ctx.pipe_ctx[k].plane_state == phantom_status->plane_states[j]) {
+ static_state->sub_state.subvp.phantom_pipe_mask |= (1 << k);
+ static_state->sub_state.subvp.phantom_plane_pipe_masks[j] |= (1 << k);
+ }
}
}
+ break;
+ default:
+ ASSERT(false);
+ break;
}
- break;
- default:
- ASSERT(false);
- break;
+
+ num_fams2_streams++;
}
+ }
+
+ if (num_fams2_streams > 0) {
+ /* copy FAMS2 configuration */
+ memcpy(&context->bw_ctx.bw.dcn.fams2_global_config,
+ &dml_ctx->v21.mode_programming.programming->fams2_global_config,
+ sizeof(struct dmub_cmd_fams2_global_config));
- context->bw_ctx.bw.dcn.fams2_stream_count++;
+ context->bw_ctx.bw.dcn.fams2_global_config.num_streams = num_fams2_streams;
}
- context->bw_ctx.bw.dcn.clk.fw_based_mclk_switching = context->bw_ctx.bw.dcn.fams2_stream_count > 0;
+ context->bw_ctx.bw.dcn.clk.fw_based_mclk_switching = context->bw_ctx.bw.dcn.fams2_global_config.features.bits.enable;
}
bool dml21_is_plane1_enabled(enum dml2_source_format_class source_format)
diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml21/dml21_wrapper.c b/drivers/gpu/drm/amd/display/dc/dml2/dml21/dml21_wrapper.c
index 41ecf00ed196..d35dd507cb9f 100644
--- a/drivers/gpu/drm/amd/display/dc/dml2/dml21/dml21_wrapper.c
+++ b/drivers/gpu/drm/amd/display/dc/dml2/dml21/dml21_wrapper.c
@@ -66,7 +66,9 @@ static void dml21_apply_debug_options(const struct dc *in_dc, struct dml2_contex
disable_fams2;
pmo_options->disable_fams2 = disable_fams2;
- pmo_options->disable_drr_var_when_var_active = in_dc->debug.disable_fams_gaming;
+ pmo_options->disable_drr_var_when_var_active = in_dc->debug.disable_fams_gaming == INGAME_FAMS_DISABLE ||
+ in_dc->debug.disable_fams_gaming == INGAME_FAMS_MULTI_DISP_CLAMPED_ONLY;
+ pmo_options->disable_drr_clamped_when_var_active = in_dc->debug.disable_fams_gaming == INGAME_FAMS_DISABLE;
}
static void dml21_init(const struct dc *in_dc, struct dml2_context **dml_ctx, const struct dml2_configuration_options *config)
diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml21/inc/bounding_boxes/dcn3_soc_bb.h b/drivers/gpu/drm/amd/display/dc/dml2/dml21/inc/bounding_boxes/dcn3_soc_bb.h
index 521f77b8ac44..d82c681a5402 100644
--- a/drivers/gpu/drm/amd/display/dc/dml2/dml21/inc/bounding_boxes/dcn3_soc_bb.h
+++ b/drivers/gpu/drm/amd/display/dc/dml2/dml21/inc/bounding_boxes/dcn3_soc_bb.h
@@ -72,7 +72,7 @@ static const struct dml2_soc_qos_parameters dml_dcn31_soc_qos_params = {
.scaling_factor_mhz = 0,
},
.qos_params = {
- .dcn4 = {
+ .dcn4x = {
.df_qos_response_time_fclk_cycles = 300,
.max_round_trip_to_furthest_cs_fclk_cycles = 350,
.mall_overhead_fclk_cycles = 50,
@@ -128,7 +128,7 @@ static const struct dml2_soc_qos_parameters dml_dcn31_soc_qos_params = {
},
},
},
- .qos_type = dml2_qos_param_type_dcn4,
+ .qos_type = dml2_qos_param_type_dcn4x,
};
static const struct dml2_soc_bb dml2_socbb_dcn31 = {
@@ -228,7 +228,7 @@ static const struct dml2_soc_bb dml2_socbb_dcn31 = {
.scaling_factor_mhz = 0,
},
.qos_params = {
- .dcn4 = {
+ .dcn4x = {
.df_qos_response_time_fclk_cycles = 300,
.max_round_trip_to_furthest_cs_fclk_cycles = 350,
.mall_overhead_fclk_cycles = 50,
@@ -332,7 +332,7 @@ static const struct dml2_soc_bb dml2_socbb_dcn31 = {
},
},
},
- .qos_type = dml2_qos_param_type_dcn4,
+ .qos_type = dml2_qos_param_type_dcn4x,
},
.power_management_parameters = {
diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml21/inc/bounding_boxes/dcn4_soc_bb.h b/drivers/gpu/drm/amd/display/dc/dml2/dml21/inc/bounding_boxes/dcn4_soc_bb.h
index fe07fcc3d0d5..8ef7977841de 100644
--- a/drivers/gpu/drm/amd/display/dc/dml2/dml21/inc/bounding_boxes/dcn4_soc_bb.h
+++ b/drivers/gpu/drm/amd/display/dc/dml2/dml21/inc/bounding_boxes/dcn4_soc_bb.h
@@ -8,7 +8,7 @@
#include "dml_top_soc_parameter_types.h"
-static const struct dml2_soc_qos_parameters dml_dcn401_soc_qos_params = {
+static const struct dml2_soc_qos_parameters dml_dcn4_variant_a_soc_qos_params = {
.derate_table = {
.system_active_urgent = {
.dram_derate_percent_pixel = 22,
@@ -52,7 +52,7 @@ static const struct dml2_soc_qos_parameters dml_dcn401_soc_qos_params = {
.scaling_factor_mhz = 0,
},
.qos_params = {
- .dcn4 = {
+ .dcn4x = {
.df_qos_response_time_fclk_cycles = 300,
.max_round_trip_to_furthest_cs_fclk_cycles = 350,
.mall_overhead_fclk_cycles = 50,
@@ -78,7 +78,7 @@ static const struct dml2_soc_qos_parameters dml_dcn401_soc_qos_params = {
},
},
},
- .qos_type = dml2_qos_param_type_dcn4,
+ .qos_type = dml2_qos_param_type_dcn4x,
};
static const struct dml2_soc_bb dml2_socbb_dcn401 = {
@@ -178,7 +178,7 @@ static const struct dml2_soc_bb dml2_socbb_dcn401 = {
.scaling_factor_mhz = 0,
},
.qos_params = {
- .dcn4 = {
+ .dcn4x = {
.df_qos_response_time_fclk_cycles = 300,
.max_round_trip_to_furthest_cs_fclk_cycles = 350,
.mall_overhead_fclk_cycles = 50,
@@ -282,7 +282,7 @@ static const struct dml2_soc_bb dml2_socbb_dcn401 = {
},
},
},
- .qos_type = dml2_qos_param_type_dcn4,
+ .qos_type = dml2_qos_param_type_dcn4x,
},
.power_management_parameters = {
@@ -344,6 +344,9 @@ static const struct dml2_ip_capabilities dml2_dcn401_max_ip_caps = {
.config_return_buffer_segment_size_in_kbytes = 64,
.meta_fifo_size_in_kentries = 22,
.compressed_buffer_segment_size_in_kbytes = 64,
+ .max_flip_time_us = 80,
+ .max_flip_time_lines = 32,
+ .hostvm_mode = 0,
.subvp_drr_scheduling_margin_us = 100,
.subvp_prefetch_end_to_mall_start_us = 15,
.subvp_fw_processing_delay = 15,
@@ -351,14 +354,18 @@ static const struct dml2_ip_capabilities dml2_dcn401_max_ip_caps = {
.fams2 = {
.max_allow_delay_us = 100 * 1000,
- .scheduling_delay_us = 50,
- .vertical_interrupt_ack_delay_us = 18,
+ .scheduling_delay_us = 125,
+ .vertical_interrupt_ack_delay_us = 40,
.allow_programming_delay_us = 18,
.min_allow_width_us = 20,
.subvp_df_throttle_delay_us = 100,
- .subvp_programming_delay_us = 18,
+ .subvp_programming_delay_us = 200,
.subvp_prefetch_to_mall_delay_us = 18,
- .drr_programming_delay_us = 18,
+ .drr_programming_delay_us = 35,
+
+ .lock_timeout_us = 5000,
+ .recovery_timeout_us = 5000,
+ .flip_programming_delay_us = 300,
},
};
diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml21/inc/dml_top.h b/drivers/gpu/drm/amd/display/dc/dml2/dml21/inc/dml_top.h
index a25f4e5977cf..a64ec4dcf11a 100644
--- a/drivers/gpu/drm/amd/display/dc/dml2/dml21/inc/dml_top.h
+++ b/drivers/gpu/drm/amd/display/dc/dml2/dml21/inc/dml_top.h
@@ -2,7 +2,6 @@
//
// Copyright 2024 Advanced Micro Devices, Inc.
-
#ifndef __DML_TOP_H__
#define __DML_TOP_H__
diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml21/inc/dml_top_dchub_registers.h b/drivers/gpu/drm/amd/display/dc/dml2/dml21/inc/dml_top_dchub_registers.h
index 8247289ce7d3..83fc15bf13cf 100644
--- a/drivers/gpu/drm/amd/display/dc/dml2/dml21/inc/dml_top_dchub_registers.h
+++ b/drivers/gpu/drm/amd/display/dc/dml2/dml21/inc/dml_top_dchub_registers.h
@@ -2,7 +2,6 @@
//
// Copyright 2024 Advanced Micro Devices, Inc.
-
#ifndef __dml2_TOP_DCHUB_REGISTERS_H__
#define __dml2_TOP_DCHUB_REGISTERS_H__
diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml21/inc/dml_top_display_cfg_types.h b/drivers/gpu/drm/amd/display/dc/dml2/dml21/inc/dml_top_display_cfg_types.h
index daae77f2672b..b132f676a68d 100644
--- a/drivers/gpu/drm/amd/display/dc/dml2/dml21/inc/dml_top_display_cfg_types.h
+++ b/drivers/gpu/drm/amd/display/dc/dml2/dml21/inc/dml_top_display_cfg_types.h
@@ -2,7 +2,6 @@
//
// Copyright 2024 Advanced Micro Devices, Inc.
-
#ifndef __DML_TOP_DISPLAY_CFG_TYPES_H__
#define __DML_TOP_DISPLAY_CFG_TYPES_H__
@@ -411,7 +410,6 @@ struct dml2_stream_parameters {
enum dml2_odm_mode odm_mode;
bool disable_dynamic_odm;
bool disable_subvp;
- bool disable_fams2_drr;
int minimum_vblank_idle_requirement_us;
bool minimize_active_latency_hiding;
@@ -478,6 +476,7 @@ struct dml2_display_cfg {
bool max_outstanding_when_urgent_expected_disable;
bool enable_subvp_implicit_pmo; //enables PMO to switch pipe uclk strategy to subvp, and generate phantom programming
unsigned int best_effort_min_active_latency_hiding_us;
+ bool all_streams_blanked;
} overrides;
};
diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml21/inc/dml_top_policy_types.h b/drivers/gpu/drm/amd/display/dc/dml2/dml21/inc/dml_top_policy_types.h
index 2f444f448770..8f624a912e78 100644
--- a/drivers/gpu/drm/amd/display/dc/dml2/dml21/inc/dml_top_policy_types.h
+++ b/drivers/gpu/drm/amd/display/dc/dml2/dml21/inc/dml_top_policy_types.h
@@ -2,7 +2,6 @@
//
// Copyright 2024 Advanced Micro Devices, Inc.
-
#ifndef __DML_TOP_POLICY_TYPES_H__
#define __DML_TOP_POLICY_TYPES_H__
diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml21/inc/dml_top_soc_parameter_types.h b/drivers/gpu/drm/amd/display/dc/dml2/dml21/inc/dml_top_soc_parameter_types.h
index 065b2afab6fb..ebd8abe894a9 100644
--- a/drivers/gpu/drm/amd/display/dc/dml2/dml21/inc/dml_top_soc_parameter_types.h
+++ b/drivers/gpu/drm/amd/display/dc/dml2/dml21/inc/dml_top_soc_parameter_types.h
@@ -2,7 +2,6 @@
//
// Copyright 2024 Advanced Micro Devices, Inc.
-
#ifndef __DML_TOP_SOC_PARAMETER_TYPES_H__
#define __DML_TOP_SOC_PARAMETER_TYPES_H__
@@ -27,7 +26,7 @@ struct dml2_soc_derates {
struct dml2_soc_derate_values system_idle_average;
};
-struct dml2_dcn3_soc_qos_params {
+struct dml2_dcn32x_soc_qos_params {
struct {
unsigned int base_latency_us;
unsigned int base_latency_pixel_vm_us;
@@ -53,7 +52,7 @@ struct dml2_dcn4_uclk_dpm_dependent_qos_params {
unsigned int average_latency_when_non_urgent_uclk_cycles;
};
-struct dml2_dcn4_soc_qos_params {
+struct dml2_dcn4x_soc_qos_params {
unsigned int df_qos_response_time_fclk_cycles;
unsigned int max_round_trip_to_furthest_cs_fclk_cycles;
unsigned int mall_overhead_fclk_cycles;
@@ -69,7 +68,7 @@ struct dml2_dcn4_soc_qos_params {
enum dml2_qos_param_type {
dml2_qos_param_type_dcn3,
- dml2_qos_param_type_dcn4
+ dml2_qos_param_type_dcn4x
};
struct dml2_soc_qos_parameters {
@@ -81,8 +80,8 @@ struct dml2_soc_qos_parameters {
} writeback;
union {
- struct dml2_dcn3_soc_qos_params dcn3;
- struct dml2_dcn4_soc_qos_params dcn4;
+ struct dml2_dcn32x_soc_qos_params dcn32x;
+ struct dml2_dcn4x_soc_qos_params dcn4x;
} qos_params;
enum dml2_qos_param_type qos_type;
@@ -152,6 +151,7 @@ struct dml2_soc_bb {
double phy_downspread_percent;
double dcn_downspread_percent;
double dispclk_dppclk_vco_speed_mhz;
+ bool no_dfs;
bool do_urgent_latency_adjustment;
unsigned int mem_word_bytes;
unsigned int num_dcc_mcaches;
@@ -173,6 +173,7 @@ struct dml2_ip_capabilities {
unsigned int meta_fifo_size_in_kentries;
unsigned int compressed_buffer_segment_size_in_kbytes;
unsigned int max_flip_time_us;
+ unsigned int max_flip_time_lines;
unsigned int hostvm_mode;
unsigned int subvp_drr_scheduling_margin_us;
unsigned int subvp_prefetch_end_to_mall_start_us;
@@ -190,6 +191,10 @@ struct dml2_ip_capabilities {
unsigned int subvp_programming_delay_us;
unsigned int subvp_prefetch_to_mall_delay_us;
unsigned int drr_programming_delay_us;
+
+ unsigned int lock_timeout_us;
+ unsigned int recovery_timeout_us;
+ unsigned int flip_programming_delay_us;
} fams2;
};
diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml21/inc/dml_top_types.h b/drivers/gpu/drm/amd/display/dc/dml2/dml21/inc/dml_top_types.h
index 8aa77bb190ea..eeb96c455658 100644
--- a/drivers/gpu/drm/amd/display/dc/dml2/dml21/inc/dml_top_types.h
+++ b/drivers/gpu/drm/amd/display/dc/dml2/dml21/inc/dml_top_types.h
@@ -5,7 +5,6 @@
#ifndef __DML_TOP_TYPES_H__
#define __DML_TOP_TYPES_H__
-#include "dml_top_types.h"
#include "dml_top_display_cfg_types.h"
#include "dml_top_soc_parameter_types.h"
#include "dml_top_policy_types.h"
@@ -74,6 +73,7 @@ struct dml2_pmo_options {
bool disable_drr_var;
bool disable_drr_clamped;
bool disable_drr_var_when_var_active;
+ bool disable_drr_clamped_when_var_active;
bool disable_fams2;
bool disable_vactive_det_fill_bw_pad; /* dml2_project_dcn4x_stage2_auto_drr_svp and above only */
bool disable_dyn_odm;
@@ -228,7 +228,7 @@ struct dml2_per_plane_programming {
union {
struct {
unsigned long dppclk_khz;
- } dcn4;
+ } dcn4x;
} min_clocks;
struct dml2_mcache_surface_allocation mcache_allocation;
@@ -262,7 +262,8 @@ union dml2_global_sync_programming {
unsigned int vupdate_offset_pixels;
unsigned int vupdate_vupdate_width_pixels;
unsigned int vready_offset_pixels;
- } dcn4;
+ unsigned int pstate_keepout_start_lines;
+ } dcn4x;
};
struct dml2_per_stream_programming {
@@ -273,7 +274,7 @@ struct dml2_per_stream_programming {
unsigned long dscclk_khz;
unsigned long dtbclk_khz;
unsigned long phyclk_khz;
- } dcn4;
+ } dcn4x;
} min_clocks;
union dml2_global_sync_programming global_sync;
@@ -374,7 +375,7 @@ struct dml2_display_cfg_programming {
unsigned long dispclk_khz;
unsigned long dcfclk_deepsleep_khz;
unsigned long dpp_ref_khz;
- } dcn3;
+ } dcn32x;
struct {
struct {
unsigned long uclk_khz;
@@ -403,7 +404,7 @@ struct dml2_display_cfg_programming {
uint32_t dpprefclk_did;
uint32_t dtbrefclk_did;
} divider_ids;
- } dcn4;
+ } dcn4x;
} min_clocks;
bool uclk_pstate_supported;
@@ -411,6 +412,7 @@ struct dml2_display_cfg_programming {
/* indicates this configuration requires FW to support */
bool fams2_required;
+ struct dmub_cmd_fams2_global_config fams2_global_config;
struct {
bool supported_in_blank; // Changing to configurations where this is false requires stutter to be disabled during the transition
diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_core/dml2_core_dcn4.c b/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_core/dml2_core_dcn4.c
index 04edcde423a9..0aa4e4d343b0 100644
--- a/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_core/dml2_core_dcn4.c
+++ b/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_core/dml2_core_dcn4.c
@@ -2,7 +2,6 @@
//
// Copyright 2024 Advanced Micro Devices, Inc.
-
#include "dml2_internal_shared_types.h"
#include "dml2_core_shared_types.h"
#include "dml2_core_dcn4.h"
@@ -10,7 +9,7 @@
#include "dml2_debug.h"
#include "lib_float_math.h"
-struct dml2_core_ip_params core_dcn4_ip_caps_base = {
+static const struct dml2_core_ip_params core_dcn4_ip_caps_base = {
// Hardcoded values for DCN3x
.vblank_nom_default_us = 668,
.remote_iommu_outstanding_translations = 256,
@@ -70,6 +69,7 @@ struct dml2_core_ip_params core_dcn4_ip_caps_base = {
.max_num_dp2p0_streams = 4,
.imall_supported = 1,
.max_flip_time_us = 80,
+ .max_flip_time_lines = 32,
.words_per_channel = 16,
.subvp_fw_processing_delay_us = 15,
@@ -77,84 +77,6 @@ struct dml2_core_ip_params core_dcn4_ip_caps_base = {
.subvp_swath_height_margin_lines = 16,
};
-struct dml2_core_ip_params core_dcn4sw_ip_caps_base = {
- .vblank_nom_default_us = 668,
- .remote_iommu_outstanding_translations = 256,
- .rob_buffer_size_kbytes = 192,
- .config_return_buffer_size_in_kbytes = 1280,
- .config_return_buffer_segment_size_in_kbytes = 64,
- .compressed_buffer_segment_size_in_kbytes = 64,
- .dpte_buffer_size_in_pte_reqs_luma = 68,
- .dpte_buffer_size_in_pte_reqs_chroma = 36,
- .pixel_chunk_size_kbytes = 8,
- .alpha_pixel_chunk_size_kbytes = 4,
- .min_pixel_chunk_size_bytes = 1024,
- .writeback_chunk_size_kbytes = 8,
- .line_buffer_size_bits = 1171920,
- .max_line_buffer_lines = 32,
- .writeback_interface_buffer_size_kbytes = 90,
-
- //Number of pipes after DCN Pipe harvesting
- .max_num_dpp = 4,
- .max_num_otg = 4,
- .max_num_wb = 1,
- .max_dchub_pscl_bw_pix_per_clk = 4,
- .max_pscl_lb_bw_pix_per_clk = 2,
- .max_lb_vscl_bw_pix_per_clk = 4,
- .max_vscl_hscl_bw_pix_per_clk = 4,
- .max_hscl_ratio = 6,
- .max_vscl_ratio = 6,
- .max_hscl_taps = 8,
- .max_vscl_taps = 8,
- .dispclk_ramp_margin_percent = 1,
- .dppclk_delay_subtotal = 47,
- .dppclk_delay_scl = 50,
- .dppclk_delay_scl_lb_only = 16,
- .dppclk_delay_cnvc_formatter = 28,
- .dppclk_delay_cnvc_cursor = 6,
- .cursor_buffer_size = 24,
- .cursor_chunk_size = 2,
- .dispclk_delay_subtotal = 125,
- .max_inter_dcn_tile_repeaters = 8,
- .writeback_max_hscl_ratio = 1,
- .writeback_max_vscl_ratio = 1,
- .writeback_min_hscl_ratio = 1,
- .writeback_min_vscl_ratio = 1,
- .writeback_max_hscl_taps = 1,
- .writeback_max_vscl_taps = 1,
- .writeback_line_buffer_buffer_size = 0,
- .num_dsc = 4,
- .maximum_dsc_bits_per_component = 12,
- .maximum_pixels_per_line_per_dsc_unit = 5760,
- .dsc422_native_support = true,
- .dcc_supported = true,
- .ptoi_supported = false,
-
- .cursor_64bpp_support = true,
- .dynamic_metadata_vm_enabled = false,
-
- .max_num_hdmi_frl_outputs = 1,
- .max_num_dp2p0_outputs = 4,
- .max_num_dp2p0_streams = 4,
- .imall_supported = 1,
- .max_flip_time_us = 80,
- .words_per_channel = 16,
-
- .subvp_fw_processing_delay_us = 15,
- .subvp_pstate_allow_width_us = 20,
- .subvp_swath_height_margin_lines = 16,
-
- .dcn_mrq_present = 1,
- .zero_size_buffer_entries = 512,
- .compbuf_reserved_space_zs = 64,
- .dcc_meta_buffer_size_bytes = 6272,
- .meta_chunk_size_kbytes = 2,
- .min_meta_chunk_size_bytes = 256,
-
- .dchub_arb_to_ret_delay = 102,
- .hostvm_mode = 1,
-};
-
static void patch_ip_caps_with_explicit_ip_params(struct dml2_ip_capabilities *ip_caps, const struct dml2_core_ip_params *ip_params)
{
ip_caps->pipe_count = ip_params->max_num_dpp;
@@ -169,6 +91,7 @@ static void patch_ip_caps_with_explicit_ip_params(struct dml2_ip_capabilities *i
ip_caps->meta_fifo_size_in_kentries = ip_params->meta_fifo_size_in_kentries;
ip_caps->compressed_buffer_segment_size_in_kbytes = ip_params->compressed_buffer_segment_size_in_kbytes;
ip_caps->max_flip_time_us = ip_params->max_flip_time_us;
+ ip_caps->max_flip_time_lines = ip_params->max_flip_time_lines;
ip_caps->hostvm_mode = ip_params->hostvm_mode;
// FIXME_STAGE2: cleanup after adding all dv override to ip_caps
@@ -192,6 +115,7 @@ static void patch_ip_params_with_ip_caps(struct dml2_core_ip_params *ip_params,
ip_params->meta_fifo_size_in_kentries = ip_caps->meta_fifo_size_in_kentries;
ip_params->compressed_buffer_segment_size_in_kbytes = ip_caps->compressed_buffer_segment_size_in_kbytes;
ip_params->max_flip_time_us = ip_caps->max_flip_time_us;
+ ip_params->max_flip_time_lines = ip_caps->max_flip_time_lines;
ip_params->hostvm_mode = ip_caps->hostvm_mode;
}
@@ -222,6 +146,7 @@ bool core_dcn4_initialize(struct dml2_core_initialize_in_out *in_out)
}
memcpy(&core->clean_me_up.mode_lib.soc, in_out->soc_bb, sizeof(struct dml2_soc_bb));
+ memcpy(&core->clean_me_up.mode_lib.ip_caps, in_out->ip_caps, sizeof(struct dml2_ip_capabilities));
return true;
}
@@ -246,10 +171,12 @@ static void create_phantom_plane_from_main_plane(struct dml2_plane_parameters *p
phantom->stream_index = phantom_stream_index;
phantom->overrides.refresh_from_mall = dml2_refresh_from_mall_mode_override_force_disable;
phantom->overrides.legacy_svp_config = dml2_svp_mode_override_phantom_pipe_no_data_return;
- phantom->composition.viewport.plane0.height = (long int unsigned) math_ceil2(
- (double)phantom->composition.viewport.plane0.height * (double)phantom_stream->timing.v_active / (double)main_stream->timing.v_active, 16.0);
- phantom->composition.viewport.plane1.height = (long int unsigned) math_ceil2(
- (double)phantom->composition.viewport.plane1.height * (double)phantom_stream->timing.v_active / (double)main_stream->timing.v_active, 16.0);
+ phantom->composition.viewport.plane0.height = (long int unsigned) math_min2(math_ceil2(
+ (double)main->composition.scaler_info.plane0.v_ratio * (double)phantom_stream->timing.v_active, 16.0),
+ (double)main->composition.viewport.plane0.height);
+ phantom->composition.viewport.plane1.height = (long int unsigned) math_min2(math_ceil2(
+ (double)main->composition.scaler_info.plane1.v_ratio * (double)phantom_stream->timing.v_active, 16.0),
+ (double)main->composition.viewport.plane1.height);
phantom->immediate_flip = false;
phantom->dynamic_meta_data.enable = false;
phantom->cursor.num_cursors = 0;
@@ -344,6 +271,8 @@ static void pack_mode_programming_params_with_implicit_subvp(struct dml2_core_in
// Check if FAMS2 is required
if (display_cfg->stage3.performed && display_cfg->stage3.success) {
programming->fams2_required = display_cfg->stage3.fams2_required;
+
+ dml2_core_calcs_get_global_fams2_programming(&core->clean_me_up.mode_lib, display_cfg, &programming->fams2_global_config);
}
// Only loop over all the main streams (the implicit svp streams will be packed as part of the main stream)
@@ -621,7 +550,7 @@ bool core_dcn4_mode_programming(struct dml2_core_mode_programming_in_out *in_out
l->mode_programming_ex_params.min_clk_table = in_out->instance->minimum_clock_table;
l->mode_programming_ex_params.cfg_support_info = in_out->cfg_support_info;
l->mode_programming_ex_params.programming = in_out->programming;
- l->mode_programming_ex_params.min_clk_index = lookup_uclk_dpm_index_by_freq(in_out->programming->min_clocks.dcn4.active.uclk_khz,
+ l->mode_programming_ex_params.min_clk_index = lookup_uclk_dpm_index_by_freq(in_out->programming->min_clocks.dcn4x.active.uclk_khz,
&core->clean_me_up.mode_lib.soc);
result = dml2_core_calcs_mode_programming_ex(&l->mode_programming_ex_params);
@@ -641,20 +570,20 @@ bool core_dcn4_mode_programming(struct dml2_core_mode_programming_in_out *in_out
for (plane_index = 0; plane_index < in_out->programming->display_config.num_planes; plane_index++) {
in_out->programming->plane_programming[plane_index].num_dpps_required = core->clean_me_up.mode_lib.mp.NoOfDPP[plane_index];
- if (in_out->programming->display_config.plane_descriptors->overrides.legacy_svp_config == dml2_svp_mode_override_main_pipe)
- in_out->programming->plane_programming[plane_index].uclk_pstate_support_method = dml2_uclk_pstate_support_method_fw_subvp_phantom;
- else if (in_out->programming->display_config.plane_descriptors->overrides.legacy_svp_config == dml2_svp_mode_override_phantom_pipe)
- in_out->programming->plane_programming[plane_index].uclk_pstate_support_method = dml2_uclk_pstate_support_method_fw_subvp_phantom;
- else if (in_out->programming->display_config.plane_descriptors->overrides.legacy_svp_config == dml2_svp_mode_override_phantom_pipe_no_data_return)
- in_out->programming->plane_programming[plane_index].uclk_pstate_support_method = dml2_uclk_pstate_support_method_fw_subvp_phantom;
- else {
- if (core->clean_me_up.mode_lib.mp.MaxActiveDRAMClockChangeLatencySupported[plane_index] >= core->clean_me_up.mode_lib.soc.power_management_parameters.dram_clk_change_blackout_us)
- in_out->programming->plane_programming[plane_index].uclk_pstate_support_method = dml2_uclk_pstate_support_method_vactive;
- else if (core->clean_me_up.mode_lib.mp.TWait[plane_index] >= core->clean_me_up.mode_lib.soc.power_management_parameters.dram_clk_change_blackout_us)
- in_out->programming->plane_programming[plane_index].uclk_pstate_support_method = dml2_uclk_pstate_support_method_vblank;
- else
- in_out->programming->plane_programming[plane_index].uclk_pstate_support_method = dml2_uclk_pstate_support_method_not_supported;
- }
+ if (in_out->programming->display_config.plane_descriptors[plane_index].overrides.legacy_svp_config == dml2_svp_mode_override_main_pipe)
+ in_out->programming->plane_programming[plane_index].uclk_pstate_support_method = dml2_uclk_pstate_support_method_fw_subvp_phantom;
+ else if (in_out->programming->display_config.plane_descriptors[plane_index].overrides.legacy_svp_config == dml2_svp_mode_override_phantom_pipe)
+ in_out->programming->plane_programming[plane_index].uclk_pstate_support_method = dml2_uclk_pstate_support_method_fw_subvp_phantom;
+ else if (in_out->programming->display_config.plane_descriptors[plane_index].overrides.legacy_svp_config == dml2_svp_mode_override_phantom_pipe_no_data_return)
+ in_out->programming->plane_programming[plane_index].uclk_pstate_support_method = dml2_uclk_pstate_support_method_fw_subvp_phantom;
+ else {
+ if (core->clean_me_up.mode_lib.mp.MaxActiveDRAMClockChangeLatencySupported[plane_index] >= core->clean_me_up.mode_lib.soc.power_management_parameters.dram_clk_change_blackout_us)
+ in_out->programming->plane_programming[plane_index].uclk_pstate_support_method = dml2_uclk_pstate_support_method_vactive;
+ else if (core->clean_me_up.mode_lib.mp.TWait[plane_index] >= core->clean_me_up.mode_lib.soc.power_management_parameters.dram_clk_change_blackout_us)
+ in_out->programming->plane_programming[plane_index].uclk_pstate_support_method = dml2_uclk_pstate_support_method_vblank;
+ else
+ in_out->programming->plane_programming[plane_index].uclk_pstate_support_method = dml2_uclk_pstate_support_method_not_supported;
+ }
dml2_core_calcs_get_mall_allocation(&core->clean_me_up.mode_lib, &in_out->programming->plane_programming[plane_index].surface_size_mall_bytes, dml_internal_pipe_index);
diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_core/dml2_core_dcn4.h b/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_core/dml2_core_dcn4.h
index 235280c6dcf5..e62b2d3eeee6 100644
--- a/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_core/dml2_core_dcn4.h
+++ b/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_core/dml2_core_dcn4.h
@@ -2,7 +2,6 @@
//
// Copyright 2024 Advanced Micro Devices, Inc.
-
#ifndef __DML2_CORE_DCN4_H__
#define __DML2_CORE_DCN4_H__
bool core_dcn4_initialize(struct dml2_core_initialize_in_out *in_out);
diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_core/dml2_core_dcn4_calcs.c b/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_core/dml2_core_dcn4_calcs.c
index 6f4026e396e0..3ea54fd52e46 100644
--- a/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_core/dml2_core_dcn4_calcs.c
+++ b/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_core/dml2_core_dcn4_calcs.c
@@ -8,35 +8,55 @@
#include "dml2_debug.h"
#include "lib_float_math.h"
#include "dml_top_types.h"
-#include "dml2_core_shared.h"
-#define DML_VM_PTE_ADL_PATCH_EN
-//#define DML_TVM_UPDATE_EN
-#define DML_TDLUT_ROW_BYTES_FIX_EN
-#define DML_REG_LIMIT_CLAMP_EN
#define DML2_MAX_FMT_420_BUFFER_WIDTH 4096
#define DML_MAX_NUM_OF_SLICES_PER_DSC 4
-static void dml2_print_dml_mode_support_info(const struct dml2_core_internal_mode_support_info *support, bool fail_only)
+const char *dml2_core_internal_bw_type_str(enum dml2_core_internal_bw_type bw_type)
+{
+ switch (bw_type) {
+ case (dml2_core_internal_bw_sdp):
+ return("dml2_core_internal_bw_sdp");
+ case (dml2_core_internal_bw_dram):
+ return("dml2_core_internal_bw_dram");
+ case (dml2_core_internal_bw_max):
+ return("dml2_core_internal_bw_max");
+ default:
+ return("dml2_core_internal_bw_unknown");
+ }
+}
+
+const char *dml2_core_internal_soc_state_type_str(enum dml2_core_internal_soc_state_type dml2_core_internal_soc_state_type)
+{
+ switch (dml2_core_internal_soc_state_type) {
+ case (dml2_core_internal_soc_state_sys_idle):
+ return("dml2_core_internal_soc_state_sys_idle");
+ case (dml2_core_internal_soc_state_sys_active):
+ return("dml2_core_internal_soc_state_sys_active");
+ case (dml2_core_internal_soc_state_svp_prefetch):
+ return("dml2_core_internal_soc_state_svp_prefetch");
+ case dml2_core_internal_soc_state_max:
+ default:
+ return("dml2_core_internal_soc_state_unknown");
+ }
+}
+
+static double dml2_core_div_rem(double dividend, unsigned int divisor, unsigned int *remainder)
+{
+ *remainder = ((dividend / divisor) - (int)(dividend / divisor) > 0);
+ return dividend / divisor;
+}
+
+static void dml2_print_mode_support_info(const struct dml2_core_internal_mode_support_info *support, bool fail_only)
{
dml2_printf("DML: ===================================== \n");
dml2_printf("DML: DML_MODE_SUPPORT_INFO_ST\n");
- if (!fail_only || support->ImmediateFlipSupport == 0)
- dml2_printf("DML: support: ImmediateFlipSupport = %d\n", support->ImmediateFlipSupport);
- if (!fail_only || support->WritebackLatencySupport == 0)
- dml2_printf("DML: support: WritebackLatencySupport = %d\n", support->WritebackLatencySupport);
if (!fail_only || support->ScaleRatioAndTapsSupport == 0)
dml2_printf("DML: support: ScaleRatioAndTapsSupport = %d\n", support->ScaleRatioAndTapsSupport);
if (!fail_only || support->SourceFormatPixelAndScanSupport == 0)
dml2_printf("DML: support: SourceFormatPixelAndScanSupport = %d\n", support->SourceFormatPixelAndScanSupport);
- if (!fail_only || support->P2IWith420 == 1)
- dml2_printf("DML: support: P2IWith420 = %d\n", support->P2IWith420);
- if (!fail_only || support->DSCOnlyIfNecessaryWithBPP == 1)
- dml2_printf("DML: support: DSCOnlyIfNecessaryWithBPP = %d\n", support->DSCOnlyIfNecessaryWithBPP);
- if (!fail_only || support->DSC422NativeNotSupported == 1)
- dml2_printf("DML: support: DSC422NativeNotSupported = %d\n", support->DSC422NativeNotSupported);
- if (!fail_only || support->DSCSlicesODMModeSupported == 0)
- dml2_printf("DML: support: DSCSlicesODMModeSupported = %d\n", support->DSCSlicesODMModeSupported);
+ if (!fail_only || support->ViewportSizeSupport == 0)
+ dml2_printf("DML: support: ViewportSizeSupport = %d\n", support->ViewportSizeSupport);
if (!fail_only || support->LinkRateDoesNotMatchDPVersion == 1)
dml2_printf("DML: support: LinkRateDoesNotMatchDPVersion = %d\n", support->LinkRateDoesNotMatchDPVersion);
if (!fail_only || support->LinkRateForMultistreamNotIndicated == 1)
@@ -45,74 +65,87 @@ static void dml2_print_dml_mode_support_info(const struct dml2_core_internal_mod
dml2_printf("DML: support: BPPForMultistreamNotIndicated = %d\n", support->BPPForMultistreamNotIndicated);
if (!fail_only || support->MultistreamWithHDMIOreDP == 1)
dml2_printf("DML: support: MultistreamWithHDMIOreDP = %d\n", support->MultistreamWithHDMIOreDP);
+ if (!fail_only || support->ExceededMultistreamSlots == 1)
+ dml2_printf("DML: support: ExceededMultistreamSlots = %d\n", support->ExceededMultistreamSlots);
if (!fail_only || support->MSOOrODMSplitWithNonDPLink == 1)
dml2_printf("DML: support: MSOOrODMSplitWithNonDPLink = %d\n", support->MSOOrODMSplitWithNonDPLink);
if (!fail_only || support->NotEnoughLanesForMSO == 1)
dml2_printf("DML: support: NotEnoughLanesForMSO = %d\n", support->NotEnoughLanesForMSO);
- if (!fail_only || support->NumberOfOTGSupport == 0)
- dml2_printf("DML: support: NumberOfOTGSupport = %d\n", support->NumberOfOTGSupport);
- if (!fail_only || support->NumberOfHDMIFRLSupport == 0)
- dml2_printf("DML: support: NumberOfHDMIFRLSupport = %d\n", support->NumberOfHDMIFRLSupport);
- if (!fail_only || support->NumberOfDP2p0Support == 0)
- dml2_printf("DML: support: NumberOfDP2p0Support = %d\n", support->NumberOfDP2p0Support);
- if (!fail_only || support->WritebackScaleRatioAndTapsSupport == 0)
- dml2_printf("DML: support: WritebackScaleRatioAndTapsSupport = %d\n", support->WritebackScaleRatioAndTapsSupport);
- if (!fail_only || support->CursorSupport == 0)
- dml2_printf("DML: support: CursorSupport = %d\n", support->CursorSupport);
- if (!fail_only || support->PitchSupport == 0)
- dml2_printf("DML: support: PitchSupport = %d\n", support->PitchSupport);
- if (!fail_only || support->ViewportExceedsSurface == 1)
- dml2_printf("DML: support: ViewportExceedsSurface = %d\n", support->ViewportExceedsSurface);
- if (!fail_only || support->ExceededMALLSize == 1)
- dml2_printf("DML: support: ExceededMALLSize = %d\n", support->ExceededMALLSize);
- if (!fail_only || support->EnoughWritebackUnits == 0)
- dml2_printf("DML: support: EnoughWritebackUnits = %d\n", support->EnoughWritebackUnits);
- if (!fail_only || support->ImmediateFlipOrHostVMAndPStateWithMALLFullFrameOrPhantomPipe == 1)
- dml2_printf("DML: support: ImmediateFlipOrHostVMAndPStateWithMALLFullFrameOrPhantomPipe = %d\n", support->ImmediateFlipOrHostVMAndPStateWithMALLFullFrameOrPhantomPipe);
- if (!fail_only || support->InvalidCombinationOfMALLUseForPStateAndStaticScreen == 1)
- dml2_printf("DML: support: InvalidCombinationOfMALLUseForPStateAndStaticScreen = %d\n", support->InvalidCombinationOfMALLUseForPStateAndStaticScreen);
- if (!fail_only || support->InvalidCombinationOfMALLUseForPState == 1)
- dml2_printf("DML: support: InvalidCombinationOfMALLUseForPState = %d\n", support->InvalidCombinationOfMALLUseForPState);
- if (!fail_only || support->ExceededMultistreamSlots == 1)
- dml2_printf("DML: support: ExceededMultistreamSlots = %d\n", support->ExceededMultistreamSlots);
+ if (!fail_only || support->P2IWith420 == 1)
+ dml2_printf("DML: support: P2IWith420 = %d\n", support->P2IWith420);
+ if (!fail_only || support->DSC422NativeNotSupported == 1)
+ dml2_printf("DML: support: DSC422NativeNotSupported = %d\n", support->DSC422NativeNotSupported);
+ if (!fail_only || support->DSCSlicesODMModeSupported == 0)
+ dml2_printf("DML: support: DSCSlicesODMModeSupported = %d\n", support->DSCSlicesODMModeSupported);
if (!fail_only || support->NotEnoughDSCUnits == 1)
dml2_printf("DML: support: NotEnoughDSCUnits = %d\n", support->NotEnoughDSCUnits);
if (!fail_only || support->NotEnoughDSCSlices == 1)
dml2_printf("DML: support: NotEnoughDSCSlices = %d\n", support->NotEnoughDSCSlices);
- if (!fail_only || support->PixelsPerLinePerDSCUnitSupport == 0)
- dml2_printf("DML: support: PixelsPerLinePerDSCUnitSupport = %d\n", support->PixelsPerLinePerDSCUnitSupport);
+ if (!fail_only || support->ImmediateFlipOrHostVMAndPStateWithMALLFullFrameOrPhantomPipe == 1)
+ dml2_printf("DML: support: ImmediateFlipOrHostVMAndPStateWithMALLFullFrameOrPhantomPipe = %d\n", support->ImmediateFlipOrHostVMAndPStateWithMALLFullFrameOrPhantomPipe);
+ if (!fail_only || support->InvalidCombinationOfMALLUseForPStateAndStaticScreen == 1)
+ dml2_printf("DML: support: InvalidCombinationOfMALLUseForPStateAndStaticScreen = %d\n", support->InvalidCombinationOfMALLUseForPStateAndStaticScreen);
if (!fail_only || support->DSCCLKRequiredMoreThanSupported == 1)
dml2_printf("DML: support: DSCCLKRequiredMoreThanSupported = %d\n", support->DSCCLKRequiredMoreThanSupported);
+ if (!fail_only || support->PixelsPerLinePerDSCUnitSupport == 0)
+ dml2_printf("DML: support: PixelsPerLinePerDSCUnitSupport = %d\n", support->PixelsPerLinePerDSCUnitSupport);
if (!fail_only || support->DTBCLKRequiredMoreThanSupported == 1)
dml2_printf("DML: support: DTBCLKRequiredMoreThanSupported = %d\n", support->DTBCLKRequiredMoreThanSupported);
- if (!fail_only || support->LinkCapacitySupport == 0)
- dml2_printf("DML: support: LinkCapacitySupport = %d\n", support->LinkCapacitySupport);
+ if (!fail_only || support->InvalidCombinationOfMALLUseForPState == 1)
+ dml2_printf("DML: support: InvalidCombinationOfMALLUseForPState = %d\n", support->InvalidCombinationOfMALLUseForPState);
if (!fail_only || support->ROBSupport == 0)
dml2_printf("DML: support: ROBSupport = %d\n", support->ROBSupport);
if (!fail_only || support->OutstandingRequestsSupport == 0)
dml2_printf("DML: support: OutstandingRequestsSupport = %d\n", support->OutstandingRequestsSupport);
if (!fail_only || support->OutstandingRequestsUrgencyAvoidance == 0)
dml2_printf("DML: support: OutstandingRequestsUrgencyAvoidance = %d\n", support->OutstandingRequestsUrgencyAvoidance);
- if (!fail_only || support->PTEBufferSizeNotExceeded == 0)
- dml2_printf("DML: support: PTEBufferSizeNotExceeded = %d\n", support->PTEBufferSizeNotExceeded);
- if (!fail_only || support->AvgBandwidthSupport == 0)
- dml2_printf("DML: support: AvgBandwidthSupport = %d\n", support->AvgBandwidthSupport);
- if (!fail_only || support->EnoughUrgentLatencyHidingSupport == 0)
- dml2_printf("DML: support: EnoughUrgentLatencyHidingSupport = %d\n", support->EnoughUrgentLatencyHidingSupport);
+ if (!fail_only || support->DISPCLK_DPPCLK_Support == 0)
+ dml2_printf("DML: support: DISPCLK_DPPCLK_Support = %d\n", support->DISPCLK_DPPCLK_Support);
+ if (!fail_only || support->TotalAvailablePipesSupport == 0)
+ dml2_printf("DML: support: TotalAvailablePipesSupport = %d\n", support->TotalAvailablePipesSupport);
+ if (!fail_only || support->NumberOfOTGSupport == 0)
+ dml2_printf("DML: support: NumberOfOTGSupport = %d\n", support->NumberOfOTGSupport);
+ if (!fail_only || support->NumberOfHDMIFRLSupport == 0)
+ dml2_printf("DML: support: NumberOfHDMIFRLSupport = %d\n", support->NumberOfHDMIFRLSupport);
+ if (!fail_only || support->NumberOfDP2p0Support == 0)
+ dml2_printf("DML: support: NumberOfDP2p0Support = %d\n", support->NumberOfDP2p0Support);
+ if (!fail_only || support->EnoughWritebackUnits == 0)
+ dml2_printf("DML: support: EnoughWritebackUnits = %d\n", support->EnoughWritebackUnits);
+ if (!fail_only || support->WritebackScaleRatioAndTapsSupport == 0)
+ dml2_printf("DML: support: WritebackScaleRatioAndTapsSupport = %d\n", support->WritebackScaleRatioAndTapsSupport);
+ if (!fail_only || support->WritebackLatencySupport == 0)
+ dml2_printf("DML: support: WritebackLatencySupport = %d\n", support->WritebackLatencySupport);
+ if (!fail_only || support->CursorSupport == 0)
+ dml2_printf("DML: support: CursorSupport = %d\n", support->CursorSupport);
+ if (!fail_only || support->PitchSupport == 0)
+ dml2_printf("DML: support: PitchSupport = %d\n", support->PitchSupport);
+ if (!fail_only || support->ViewportExceedsSurface == 1)
+ dml2_printf("DML: support: ViewportExceedsSurface = %d\n", support->ViewportExceedsSurface);
if (!fail_only || support->PrefetchSupported == 0)
dml2_printf("DML: support: PrefetchSupported = %d\n", support->PrefetchSupported);
+ if (!fail_only || support->EnoughUrgentLatencyHidingSupport == 0)
+ dml2_printf("DML: support: EnoughUrgentLatencyHidingSupport = %d\n", support->EnoughUrgentLatencyHidingSupport);
+ if (!fail_only || support->AvgBandwidthSupport == 0)
+ dml2_printf("DML: support: AvgBandwidthSupport = %d\n", support->AvgBandwidthSupport);
if (!fail_only || support->DynamicMetadataSupported == 0)
dml2_printf("DML: support: DynamicMetadataSupported = %d\n", support->DynamicMetadataSupported);
if (!fail_only || support->VRatioInPrefetchSupported == 0)
dml2_printf("DML: support: VRatioInPrefetchSupported = %d\n", support->VRatioInPrefetchSupported);
- if (!fail_only || support->DISPCLK_DPPCLK_Support == 0)
- dml2_printf("DML: support: DISPCLK_DPPCLK_Support = %d\n", support->DISPCLK_DPPCLK_Support);
- if (!fail_only || support->TotalAvailablePipesSupport == 0)
- dml2_printf("DML: support: TotalAvailablePipesSupport = %d\n", support->TotalAvailablePipesSupport);
+ if (!fail_only || support->PTEBufferSizeNotExceeded == 1)
+ dml2_printf("DML: support: PTEBufferSizeNotExceeded = %d\n", support->PTEBufferSizeNotExceeded);
+ if (!fail_only || support->DCCMetaBufferSizeNotExceeded == 1)
+ dml2_printf("DML: support: DCCMetaBufferSizeNotExceeded = %d\n", support->DCCMetaBufferSizeNotExceeded);
+ if (!fail_only || support->ExceededMALLSize == 1)
+ dml2_printf("DML: support: ExceededMALLSize = %d\n", support->ExceededMALLSize);
+ if (!fail_only || support->g6_temp_read_support == 0)
+ dml2_printf("DML: support: g6_temp_read_support = %d\n", support->g6_temp_read_support);
+ if (!fail_only || support->ImmediateFlipSupport == 0)
+ dml2_printf("DML: support: ImmediateFlipSupport = %d\n", support->ImmediateFlipSupport);
+ if (!fail_only || support->LinkCapacitySupport == 0)
+ dml2_printf("DML: support: LinkCapacitySupport = %d\n", support->LinkCapacitySupport);
+
if (!fail_only || support->ModeSupport == 0)
dml2_printf("DML: support: ModeSupport = %d\n", support->ModeSupport);
- if (!fail_only || support->ViewportSizeSupport == 0)
- dml2_printf("DML: support: ViewportSizeSupport = %d\n", support->ViewportSizeSupport);
dml2_printf("DML: ===================================== \n");
}
@@ -235,6 +268,7 @@ dml_get_per_pipe_var_func(vstartup_calculated, unsigned int, mode_lib->mp.VStart
dml_get_per_pipe_var_func(vupdate_offset, unsigned int, mode_lib->mp.VUpdateOffsetPix);
dml_get_per_pipe_var_func(vupdate_width, unsigned int, mode_lib->mp.VUpdateWidthPix);
dml_get_per_pipe_var_func(vready_offset, unsigned int, mode_lib->mp.VReadyOffsetPix);
+dml_get_per_pipe_var_func(pstate_keepout_dst_lines, unsigned int, mode_lib->mp.pstate_keepout_dst_lines);
dml_get_per_pipe_var_func(det_stored_buffer_size_l_bytes, unsigned int, mode_lib->mp.DETBufferSizeY);
dml_get_per_pipe_var_func(det_stored_buffer_size_c_bytes, unsigned int, mode_lib->mp.DETBufferSizeC);
dml_get_per_pipe_var_func(det_buffer_size_kbytes, unsigned int, mode_lib->mp.DETBufferSizeInKByte);
@@ -480,7 +514,7 @@ static unsigned int dml_get_tile_block_size_bytes(enum dml2_swizzle_mode sw_mode
default:
DML2_ASSERT(0);
return 256;
- };
+ }
}
static bool dml_is_vertical_rotation(enum dml2_rotation_angle Scan)
@@ -2051,7 +2085,11 @@ static void CalculateDCCConfiguration(
unsigned int full_swath_bytes_vert_wc_l;
unsigned int full_swath_bytes_vert_wc_c;
- yuv420 = dml_is_420(SourcePixelFormat);
+ if (dml_is_420(SourcePixelFormat))
+ yuv420 = 1;
+ else
+ yuv420 = 0;
+
horz_div_l = 1;
horz_div_c = 1;
vert_div_l = 1;
@@ -2343,16 +2381,16 @@ static void calculate_mcache_row_bytes(
}
if (p->gpuvm_enable) {
- meta_per_mvmpg_per_channel = (float)vmpg_bytes / 256 / p->num_chans;
+ meta_per_mvmpg_per_channel = (float)vmpg_bytes / (float)256 / p->num_chans;
//but using the est_blk_per_vmpg between 2 and 4, to be not as pessimestic
if (p->surf_vert && vmpg_bytes > blk_bytes) {
- meta_per_mvmpg_per_channel = (float)est_blk_per_vmpg * blk_bytes / 256 / p->num_chans;
+ meta_per_mvmpg_per_channel = (float)est_blk_per_vmpg * blk_bytes / (float)256 / p->num_chans;
}
*p->dcc_dram_bw_nom_overhead_factor = 1 + math_max2(1.0 / 256.0, math_ceil2(meta_per_mvmpg_per_channel, p->mem_word_bytes) / (256 * meta_per_mvmpg_per_channel)); // dcc_dr_oh_nom
} else {
- meta_per_mvmpg_per_channel = (float) blk_bytes / 256 / p->num_chans;
+ meta_per_mvmpg_per_channel = (float) blk_bytes / (float)256 / p->num_chans;
if (!p->surf_vert)
*p->dcc_dram_bw_nom_overhead_factor = 1 + 1.0 / 256.0;
@@ -2519,8 +2557,11 @@ static void calculate_mcache_setting(
l->luma_time_factor = (double)l->mvmpg_width_c / l->mvmpg_width_l * 2;
// The algorithm starts with computing a non-integer, avg_mcache_element_size_l/c:
- l->avg_mcache_element_size_l = l->meta_row_width_l / *p->num_mcaches_l;
- if (l->is_dual_plane) {
+ if (*p->num_mcaches_l) {
+ l->avg_mcache_element_size_l = l->meta_row_width_l / *p->num_mcaches_l;
+ }
+
+ if (l->is_dual_plane && *p->num_mcaches_c) {
l->avg_mcache_element_size_c = l->meta_row_width_c / *p->num_mcaches_c;
if (!p->imall_enable || (*p->mall_comb_mcache_l == *p->mall_comb_mcache_c)) {
@@ -2649,9 +2690,9 @@ static double dml_get_return_bandwidth_available(
double ideal_fabric_bandwidth = fclk_mhz * (double)soc->fabric_datapath_to_dcn_data_return_bytes;
double ideal_dram_bandwidth = dram_bw_mbps; //dram_speed_mts * soc->clk_table.dram_config.channel_count * soc->clk_table.dram_config.channel_width_bytes;
- double derate_sdp_factor = 1;
- double derate_fabric_factor = 1;
- double derate_dram_factor = 1;
+ double derate_sdp_factor;
+ double derate_fabric_factor;
+ double derate_dram_factor;
double derate_sdp_bandwidth;
double derate_fabric_bandwidth;
@@ -2851,16 +2892,9 @@ static void CalculateVMRowAndSwath(struct dml2_core_internal_scratch *scratch,
s->HostVMDynamicLevels = CalculateHostVMDynamicLevels(p->display_cfg->gpuvm_enable, p->display_cfg->hostvm_enable, p->HostVMMinPageSize, p->display_cfg->hostvm_max_non_cached_page_table_levels);
for (unsigned int k = 0; k < p->NumberOfActiveSurfaces; ++k) {
- if (p->display_cfg->hostvm_enable == true) {
+ if (p->display_cfg->gpuvm_enable == true) {
p->vm_group_bytes[k] = 512;
p->dpte_group_bytes[k] = 512;
- } else if (p->display_cfg->gpuvm_enable == true) {
- p->vm_group_bytes[k] = 2048;
- if (p->display_cfg->plane_descriptors[k].overrides.gpuvm_min_page_size_kbytes >= 64 && dml_is_vertical_rotation(p->myPipe[k].RotationAngle)) {
- p->dpte_group_bytes[k] = 512;
- } else {
- p->dpte_group_bytes[k] = 2048;
- }
} else {
p->vm_group_bytes[k] = 0;
p->dpte_group_bytes[k] = 0;
@@ -3185,7 +3219,7 @@ static double CalculateUrgentLatency(
double fabric_max_transport_latency_margin)
{
double urgent_latency = 0;
- if (qos_type == dml2_qos_param_type_dcn4) {
+ if (qos_type == dml2_qos_param_type_dcn4x) {
urgent_latency = (df_qos_response_time_fclk_cycles + mall_overhead_fclk_cycles) / FabricClock
+ max_round_trip_to_furthest_cs_fclk_cycles / FabricClock * (1 + fabric_max_transport_latency_margin / 100.0)
+ urgent_ramp_uclk_cycles / uclk_freq_mhz * (1 + umc_urgent_ramp_latency_margin / 100.0);
@@ -3196,7 +3230,7 @@ static double CalculateUrgentLatency(
}
}
#ifdef __DML_VBA_DEBUG__
- if (qos_type == dml2_qos_param_type_dcn4) {
+ if (qos_type == dml2_qos_param_type_dcn4x) {
dml2_printf("DML::%s: qos_type = %d\n", __func__, qos_type);
dml2_printf("DML::%s: urgent_ramp_uclk_cycles = %d\n", __func__, urgent_ramp_uclk_cycles);
dml2_printf("DML::%s: uclk_freq_mhz = %f\n", __func__, uclk_freq_mhz);
@@ -3226,7 +3260,7 @@ static double CalculateTripToMemory(
double fabric_max_transport_latency_margin)
{
double trip_to_memory_us;
- if (qos_type == dml2_qos_param_type_dcn4) {
+ if (qos_type == dml2_qos_param_type_dcn4x) {
trip_to_memory_us = mall_overhead_fclk_cycles / FabricClock
+ max_round_trip_to_furthest_cs_fclk_cycles / FabricClock * (1.0 + fabric_max_transport_latency_margin / 100.0)
+ trip_to_memory_uclk_cycles / uclk_freq_mhz * (1.0 + umc_max_latency_margin / 100.0);
@@ -3235,7 +3269,7 @@ static double CalculateTripToMemory(
}
#ifdef __DML_VBA_DEBUG__
- if (qos_type == dml2_qos_param_type_dcn4) {
+ if (qos_type == dml2_qos_param_type_dcn4x) {
dml2_printf("DML::%s: qos_type = %d\n", __func__, qos_type);
dml2_printf("DML::%s: max_round_trip_to_furthest_cs_fclk_cycles = %d\n", __func__, max_round_trip_to_furthest_cs_fclk_cycles);
dml2_printf("DML::%s: mall_overhead_fclk_cycles = %d\n", __func__, mall_overhead_fclk_cycles);
@@ -3265,7 +3299,7 @@ static double CalculateMetaTripToMemory(
double fabric_max_transport_latency_margin)
{
double meta_trip_to_memory_us;
- if (qos_type == dml2_qos_param_type_dcn4) {
+ if (qos_type == dml2_qos_param_type_dcn4x) {
meta_trip_to_memory_us = meta_trip_to_memory_fclk_cycles / FabricClock * (1.0 + fabric_max_transport_latency_margin / 100.0)
+ meta_trip_to_memory_uclk_cycles / uclk_freq_mhz * (1.0 + umc_max_latency_margin / 100.0);
} else {
@@ -3273,7 +3307,7 @@ static double CalculateMetaTripToMemory(
}
#ifdef __DML_VBA_DEBUG__
- if (qos_type == dml2_qos_param_type_dcn4) {
+ if (qos_type == dml2_qos_param_type_dcn4x) {
dml2_printf("DML::%s: qos_type = %d\n", __func__, qos_type);
dml2_printf("DML::%s: meta_trip_to_memory_fclk_cycles = %d\n", __func__, meta_trip_to_memory_fclk_cycles);
dml2_printf("DML::%s: meta_trip_to_memory_uclk_cycles = %d\n", __func__, meta_trip_to_memory_uclk_cycles);
@@ -3781,8 +3815,8 @@ static void CalculateSwathAndDETConfiguration(struct dml2_core_internal_scratch
p->SwathHeightC[k] = MaximumSwathHeightC[k] / 2;
RoundedUpSwathSizeBytesY[k] = p->full_swath_bytes_l[k] / 2;
RoundedUpSwathSizeBytesC[k] = p->full_swath_bytes_c[k] / 2;
- p->request_size_bytes_luma[k] = ((p->BytePerPixY[k] == 2) == dml_is_vertical_rotation(p->display_cfg->plane_descriptors[k].composition.rotation_angle)) ? 128 : 64;;
- p->request_size_bytes_chroma[k] = ((p->BytePerPixC[k] == 2) == dml_is_vertical_rotation(p->display_cfg->plane_descriptors[k].composition.rotation_angle)) ? 128 : 64;;
+ p->request_size_bytes_luma[k] = ((p->BytePerPixY[k] == 2) == dml_is_vertical_rotation(p->display_cfg->plane_descriptors[k].composition.rotation_angle)) ? 128 : 64;
+ p->request_size_bytes_chroma[k] = ((p->BytePerPixC[k] == 2) == dml_is_vertical_rotation(p->display_cfg->plane_descriptors[k].composition.rotation_angle)) ? 128 : 64;
}
if (p->SwathHeightC[k] == 0)
@@ -3841,7 +3875,7 @@ static void CalculateSwathAndDETConfiguration(struct dml2_core_internal_scratch
*p->compbuf_reserved_space_64b = 2 * p->pixel_chunk_size_kbytes * 1024 / 64;
if (*p->UnboundedRequestEnabled) {
*p->compbuf_reserved_space_64b = (unsigned int)math_ceil2(math_max2(*p->compbuf_reserved_space_64b,
- (double)(p->rob_buffer_size_kbytes * 1024 / 64) - (double)(RoundedUpSwathSizeBytesY[SurfaceDoingUnboundedRequest] * TTUFIFODEPTH / 64)), 1.0);
+ (double)(p->rob_buffer_size_kbytes * 1024 / 64) - (double)(RoundedUpSwathSizeBytesY[SurfaceDoingUnboundedRequest] * TTUFIFODEPTH / (p->mrq_present ? MAXIMUMCOMPRESSION : 1) / 64)), 1.0);
#ifdef __DML_VBA_DEBUG__
dml2_printf("DML::%s: RoundedUpSwathSizeBytesY[%d] = %u\n", __func__, SurfaceDoingUnboundedRequest, RoundedUpSwathSizeBytesY[SurfaceDoingUnboundedRequest]);
dml2_printf("DML::%s: rob_buffer_size_kbytes = %u\n", __func__, p->rob_buffer_size_kbytes);
@@ -3852,21 +3886,20 @@ static void CalculateSwathAndDETConfiguration(struct dml2_core_internal_scratch
#endif
*p->hw_debug5 = false;
- if (!p->mrq_present) {
- for (unsigned int k = 0; k < p->NumberOfActiveSurfaces; ++k) {
- if (!(*p->UnboundedRequestEnabled)
- && p->display_cfg->plane_descriptors[k].surface.dcc.enable
- && ((p->rob_buffer_size_kbytes * 1024 + *p->CompressedBufferSizeInkByte * MAXIMUMCOMPRESSION * 1024) > TTUFIFODEPTH * (RoundedUpSwathSizeBytesY[k] + RoundedUpSwathSizeBytesC[k])))
- *p->hw_debug5 = true;
-#ifdef __DML_VBA_DEBUG__
- dml2_printf("DML::%s: k=%u UnboundedRequestEnabled = %u\n", __func__, k, *p->UnboundedRequestEnabled);
- dml2_printf("DML::%s: k=%u MAXIMUMCOMPRESSION = %lu\n", __func__, k, MAXIMUMCOMPRESSION);
- dml2_printf("DML::%s: k=%u TTUFIFODEPTH = %lu\n", __func__, k, TTUFIFODEPTH);
- dml2_printf("DML::%s: k=%u CompressedBufferSizeInkByte = %u\n", __func__, k, *p->CompressedBufferSizeInkByte);
- dml2_printf("DML::%s: k=%u RoundedUpSwathSizeBytesC = %u\n", __func__, k, RoundedUpSwathSizeBytesC[k]);
- dml2_printf("DML::%s: k=%u hw_debug5 = %u\n", __func__, k, *p->hw_debug5);
+ for (unsigned int k = 0; k < p->NumberOfActiveSurfaces; ++k) {
+ if (!(p->mrq_present) && (!(*p->UnboundedRequestEnabled)) && (TotalActiveDPP == 1)
+ && p->display_cfg->plane_descriptors[k].surface.dcc.enable
+ && ((p->rob_buffer_size_kbytes * 1024 * (p->mrq_present ? MAXIMUMCOMPRESSION : 1)
+ + *p->CompressedBufferSizeInkByte * MAXIMUMCOMPRESSION * 1024) > TTUFIFODEPTH * (RoundedUpSwathSizeBytesY[k] + RoundedUpSwathSizeBytesC[k])))
+ *p->hw_debug5 = true;
+#ifdef __DML_VBA_DEBUG__
+ dml2_printf("DML::%s: k=%u UnboundedRequestEnabled = %u\n", __func__, k, *p->UnboundedRequestEnabled);
+ dml2_printf("DML::%s: k=%u MAXIMUMCOMPRESSION = %lu\n", __func__, k, MAXIMUMCOMPRESSION);
+ dml2_printf("DML::%s: k=%u TTUFIFODEPTH = %lu\n", __func__, k, TTUFIFODEPTH);
+ dml2_printf("DML::%s: k=%u CompressedBufferSizeInkByte = %u\n", __func__, k, *p->CompressedBufferSizeInkByte);
+ dml2_printf("DML::%s: k=%u RoundedUpSwathSizeBytesC = %u\n", __func__, k, RoundedUpSwathSizeBytesC[k]);
+ dml2_printf("DML::%s: k=%u hw_debug5 = %u\n", __func__, k, *p->hw_debug5);
#endif
- }
}
}
@@ -4559,15 +4592,6 @@ static void calculate_tdlut_setting(
return;
}
-
- if (!p->setup_for_tdlut) {
- *p->tdlut_groups_per_2row_ub = 0;
- *p->tdlut_opt_time = 0;
- *p->tdlut_drain_time = 0;
- *p->tdlut_bytes_per_group = 0;
- return;
- }
-
if (p->tdlut_mpc_width_flag) {
tdlut_mpc_width = 33;
tdlut_bytes_per_group_simple = 39*256;
@@ -4616,7 +4640,7 @@ static void calculate_tdlut_setting(
*p->tdlut_bytes_per_group = tdlut_bytes_per_line * tdlut_mpc_width;
//the delivery cycles is DispClk cycles per line * number of lines * number of slices
tdlut_delivery_cycles = (unsigned int)math_ceil2(tdlut_mpc_width/2.0, 1) * tdlut_mpc_width * tdlut_mpc_width;
- tdlut_drain_rate = tdlut_bytes_per_line * p->dispclk_mhz / 9.0;
+ tdlut_drain_rate = tdlut_bytes_per_line * p->dispclk_mhz / math_ceil2(tdlut_mpc_width/2.0, 1);
} else {
//tdlut_addressing_mode = tdlut_simple_linear, 3dlut width should be 4*1229=4916 elements
*p->tdlut_bytes_per_frame = (unsigned int)math_ceil2(tdlut_width * tdlut_bpe, 256);
@@ -4627,7 +4651,7 @@ static void calculate_tdlut_setting(
//the tdlut is fetched during the 2 row times of prefetch.
if (p->setup_for_tdlut) {
- *p->tdlut_groups_per_2row_ub = (unsigned int)math_ceil2(*p->tdlut_bytes_per_frame / *p->tdlut_bytes_per_group, 1);
+ *p->tdlut_groups_per_2row_ub = (unsigned int)math_ceil2((double) *p->tdlut_bytes_per_frame / *p->tdlut_bytes_per_group, 1);
*p->tdlut_opt_time = (*p->tdlut_bytes_per_frame - p->cursor_buffer_size * 1024) / tdlut_drain_rate;
*p->tdlut_drain_time = p->cursor_buffer_size * 1024 / tdlut_drain_rate;
}
@@ -4640,7 +4664,7 @@ static void calculate_tdlut_setting(
dml2_printf("DML::%s: dispclk_mhz = %f\n", __func__, p->dispclk_mhz);
dml2_printf("DML::%s: tdlut_width = %u\n", __func__, tdlut_width);
- dml2_printf("DML::%s: tdlut_addressing_mode = %u\n", __func__, p->tdlut_addressing_mode);
+ dml2_printf("DML::%s: tdlut_addressing_mode = %s\n", __func__, (p->tdlut_addressing_mode == dml2_tdlut_sw_linear) ? "sw_linear" : "simple_linear");
dml2_printf("DML::%s: tdlut_pitch_bytes = %u\n", __func__, tdlut_pitch_bytes);
dml2_printf("DML::%s: tdlut_footprint_bytes = %u\n", __func__, tdlut_footprint_bytes);
dml2_printf("DML::%s: tdlut_bytes_per_frame = %u\n", __func__, *p->tdlut_bytes_per_frame);
@@ -4706,11 +4730,12 @@ static void CalculateTarb(
static double CalculateTWait(
long reserved_vblank_time_ns,
double UrgentLatency,
- double Ttrip)
+ double Ttrip,
+ double g6_temp_read_blackout_us)
{
double TWait;
double t_urg_trip = math_max2(UrgentLatency, Ttrip);
- TWait = reserved_vblank_time_ns/1000.0 + t_urg_trip;
+ TWait = math_max2(reserved_vblank_time_ns/1000.0, g6_temp_read_blackout_us) + t_urg_trip;
#ifdef __DML_VBA_DEBUG__
dml2_printf("DML::%s: reserved_vblank_time_ns = %d\n", __func__, reserved_vblank_time_ns);
@@ -4858,13 +4883,23 @@ static double get_urgent_bandwidth_required(
}
if (!exclude_this_plane) {
- surface_required_bw[k] = math_max4(NumberOfDPP[k] * prefetch_vmrow_bw[k],
- l->per_plane_flip_bw[k] + ReadBandwidthLuma[k] * l->adj_factor_p0 + ReadBandwidthChroma[k] * l->adj_factor_p1 + cursor_bw[k] * l->adj_factor_cur,
- l->per_plane_flip_bw[k] + NumberOfDPP[k] * (PrefetchBandwidthLuma[k] * l->adj_factor_p0_pre + PrefetchBandwidthChroma[k] * l->adj_factor_p1_pre) + prefetch_cursor_bw[k] * l->adj_factor_cur_pre,
- (ReadBandwidthLuma[k] + excess_vactive_fill_bw_l[k]) * l->tmp_nom_adj_factor_p0 + (ReadBandwidthChroma[k] + excess_vactive_fill_bw_c[k]) * l->tmp_nom_adj_factor_p1 + dpte_row_bw[k] + meta_row_bw[k]);
+ l->vm_row_bw = NumberOfDPP[k] * prefetch_vmrow_bw[k];
+ l->flip_and_active_bw = l->per_plane_flip_bw[k] + ReadBandwidthLuma[k] * l->adj_factor_p0 + ReadBandwidthChroma[k] * l->adj_factor_p1 + cursor_bw[k] * l->adj_factor_cur;
+ l->flip_and_prefetch_bw = l->per_plane_flip_bw[k] + NumberOfDPP[k] * (PrefetchBandwidthLuma[k] * l->adj_factor_p0_pre + PrefetchBandwidthChroma[k] * l->adj_factor_p1_pre) + prefetch_cursor_bw[k] * l->adj_factor_cur_pre;
+ l->active_and_excess_bw = (ReadBandwidthLuma[k] + excess_vactive_fill_bw_l[k]) * l->tmp_nom_adj_factor_p0 + (ReadBandwidthChroma[k] + excess_vactive_fill_bw_c[k]) * l->tmp_nom_adj_factor_p1 + dpte_row_bw[k] + meta_row_bw[k];
+ surface_required_bw[k] = math_max4(l->vm_row_bw, l->flip_and_active_bw, l->flip_and_prefetch_bw, l->active_and_excess_bw);
/* export peak required bandwidth for the surface */
surface_peak_required_bw[k] = math_max2(surface_required_bw[k], surface_peak_required_bw[k]);
+
+#ifdef __DML_VBA_DEBUG__
+ dml2_printf("DML::%s: k=%d, max1: vm_row_bw=%f\n", __func__, k, l->vm_row_bw);
+ dml2_printf("DML::%s: k=%d, max2: flip_and_active_bw=%f\n", __func__, k, l->flip_and_active_bw);
+ dml2_printf("DML::%s: k=%d, max3: flip_and_prefetch_bw=%f\n", __func__, k, l->flip_and_prefetch_bw);
+ dml2_printf("DML::%s: k=%d, max4: active_and_excess_bw=%f\n", __func__, k, l->active_and_excess_bw);
+ dml2_printf("DML::%s: k=%d, surface_required_bw=%f\n", __func__, k, surface_required_bw[k]);
+ dml2_printf("DML::%s: k=%d, surface_peak_required_bw=%f\n", __func__, k, surface_peak_required_bw[k]);
+#endif
} else {
surface_required_bw[k] = 0.0;
}
@@ -4873,6 +4908,8 @@ static double get_urgent_bandwidth_required(
#ifdef __DML_VBA_DEBUG__
dml2_printf("DML::%s: k=%d, NumberOfDPP=%d\n", __func__, k, NumberOfDPP[k]);
+ dml2_printf("DML::%s: k=%d, use_qual_row_bw=%d\n", __func__, k, use_qual_row_bw);
+ dml2_printf("DML::%s: k=%d, immediate_flip=%d\n", __func__, k, display_cfg->plane_descriptors[k].immediate_flip);
dml2_printf("DML::%s: k=%d, mall_svp_prefetch_factor=%f\n", __func__, k, l->mall_svp_prefetch_factor);
dml2_printf("DML::%s: k=%d, adj_factor_p0=%f\n", __func__, k, l->adj_factor_p0);
dml2_printf("DML::%s: k=%d, adj_factor_p1=%f\n", __func__, k, l->adj_factor_p1);
@@ -4886,6 +4923,8 @@ static double get_urgent_bandwidth_required(
dml2_printf("DML::%s: k=%d, prefetch_vmrow_bw=%f\n", __func__, k, prefetch_vmrow_bw[k]);
dml2_printf("DML::%s: k=%d, ReadBandwidthLuma=%f\n", __func__, k, ReadBandwidthLuma[k]);
dml2_printf("DML::%s: k=%d, ReadBandwidthChroma=%f\n", __func__, k, ReadBandwidthChroma[k]);
+ dml2_printf("DML::%s: k=%d, excess_vactive_fill_bw_l=%f\n", __func__, k, excess_vactive_fill_bw_l[k]);
+ dml2_printf("DML::%s: k=%d, excess_vactive_fill_bw_c=%f\n", __func__, k, excess_vactive_fill_bw_c[k]);
dml2_printf("DML::%s: k=%d, cursor_bw=%f\n", __func__, k, cursor_bw[k]);
dml2_printf("DML::%s: k=%d, meta_row_bw=%f\n", __func__, k, meta_row_bw[k]);
@@ -4964,7 +5003,7 @@ static void CalculateExtraLatency(
max_request_size_bytes = request_size_bytes_chroma[k];
}
- if (qos_type == dml2_qos_param_type_dcn4) {
+ if (qos_type == dml2_qos_param_type_dcn4x) {
*ExtraLatency_sr = dchub_arb_to_ret_delay / DCFCLK;
*ExtraLatency = *ExtraLatency_sr;
if (max_oustanding_when_urgent_expected)
@@ -4980,11 +5019,14 @@ static void CalculateExtraLatency(
#ifdef __DML_VBA_DEBUG__
dml2_printf("DML::%s: qos_type=%u\n", __func__, qos_type);
+ dml2_printf("DML::%s: hostvm_mode=%u\n", __func__, hostvm_mode);
+ dml2_printf("DML::%s: Tex_trips=%u\n", __func__, Tex_trips);
dml2_printf("DML::%s: max_oustanding_when_urgent_expected=%u\n", __func__, max_oustanding_when_urgent_expected);
dml2_printf("DML::%s: FabricClock=%f\n", __func__, FabricClock);
dml2_printf("DML::%s: DCFCLK=%f\n", __func__, DCFCLK);
dml2_printf("DML::%s: ReturnBW=%f\n", __func__, ReturnBW);
dml2_printf("DML::%s: RoundTripPingLatencyCycles=%u\n", __func__, RoundTripPingLatencyCycles);
+ dml2_printf("DML::%s: ReorderingBytes=%u\n", __func__, ReorderingBytes);
dml2_printf("DML::%s: Tarb=%f\n", __func__, Tarb);
dml2_printf("DML::%s: ExtraLatency=%f\n", __func__, *ExtraLatency);
dml2_printf("DML::%s: ExtraLatency_sr=%f\n", __func__, *ExtraLatency_sr);
@@ -5021,6 +5063,8 @@ static bool CalculatePrefetchSchedule(struct dml2_core_internal_scratch *scratch
s->trip_to_mem = 0.0;
*p->Tvm_trips = 0.0;
*p->Tr0_trips = 0.0;
+ s->Tvm_no_trip_oto = 0.0;
+ s->Tr0_no_trip_oto = 0.0;
s->Tvm_trips_rounded = 0.0;
s->Tr0_trips_rounded = 0.0;
s->max_Tsw = 0.0;
@@ -5037,7 +5081,9 @@ static bool CalculatePrefetchSchedule(struct dml2_core_internal_scratch *scratch
s->bytes_pp = 0.0;
s->dep_bytes = 0.0;
s->min_Lsw_oto = 0.0;
+ s->min_Lsw_equ = 0.0;
s->Tsw_est1 = 0.0;
+ s->Tsw_est2 = 0.0;
s->Tsw_est3 = 0.0;
s->cursor_prefetch_bytes = 0;
*p->prefetch_cursor_bw = 0;
@@ -5059,7 +5105,6 @@ static bool CalculatePrefetchSchedule(struct dml2_core_internal_scratch *scratch
dml2_printf("DML::%s: GPUVMPageTableLevels = %u\n", __func__, p->display_cfg->gpuvm_max_page_table_levels);
dml2_printf("DML::%s: DCCEnable = %u\n", __func__, p->myPipe->DCCEnable);
dml2_printf("DML::%s: VStartup = %u\n", __func__, p->VStartup);
- dml2_printf("DML::%s: MaxVStartup = %u\n", __func__, p->MaxVStartup);
dml2_printf("DML::%s: HostVMEnable = %u\n", __func__, p->display_cfg->hostvm_enable);
dml2_printf("DML::%s: HostVMInefficiencyFactor = %f\n", __func__, p->HostVMInefficiencyFactor);
dml2_printf("DML::%s: TWait = %f\n", __func__, p->TWait);
@@ -5092,21 +5137,15 @@ static bool CalculatePrefetchSchedule(struct dml2_core_internal_scratch *scratch
s->LineTime = p->myPipe->HTotal / p->myPipe->PixelClock;
s->trip_to_mem = p->Ttrip;
-#ifdef DML_TVM_UPDATE_EN
*p->Tvm_trips = p->ExtraLatencyPrefetch + math_max2(s->trip_to_mem * (p->display_cfg->gpuvm_max_page_table_levels * (s->HostVMDynamicLevelsTrips + 1)), p->Turg);
if (dcc_mrq_enable)
*p->Tvm_trips_flip = *p->Tvm_trips;
else
*p->Tvm_trips_flip = *p->Tvm_trips - s->trip_to_mem;
-#else
- *p->Tvm_trips = p->ExtraLatencyPrefetch + s->trip_to_mem * (p->display_cfg->gpuvm_max_page_table_levels * (s->HostVMDynamicLevelsTrips + 1));
- *p->Tvm_trips_flip = *p->Tvm_trips - s->trip_to_mem;
-#endif
*p->Tr0_trips_flip = s->trip_to_mem * (s->HostVMDynamicLevelsTrips + 1);
*p->Tr0_trips = math_max2(*p->Tr0_trips_flip, p->tdlut_opt_time / 2);
-#ifdef DML_TVM_UPDATE_EN
if (p->DynamicMetadataVMEnabled == true) {
*p->Tdmdl_vm = s->TWait_p + *p->Tvm_trips;
*p->Tdmdl = *p->Tdmdl_vm + p->Ttrip;
@@ -5114,15 +5153,6 @@ static bool CalculatePrefetchSchedule(struct dml2_core_internal_scratch *scratch
*p->Tdmdl_vm = 0;
*p->Tdmdl = s->TWait_p + p->ExtraLatencyPrefetch + p->Ttrip; // Tex
}
-#else
- if (p->DynamicMetadataVMEnabled == true) {
- *p->Tdmdl_vm = s->TWait_p + *p->Tvm_trips;
- *p->Tdmdl = *p->Tdmdl_vm + p->Ttrip;
- } else {
- *p->Tdmdl_vm = 0;
- *p->Tdmdl = p->TWait + p->ExtraLatencyPrefetch; // Tex
- }
-#endif
if (p->DynamicMetadataEnable == true) {
if (p->VStartup * s->LineTime < *p->TSetup + *p->Tdmdl + s->Tdmbf + s->Tdmec + s->Tdmsks) {
@@ -5186,7 +5216,6 @@ static bool CalculatePrefetchSchedule(struct dml2_core_internal_scratch *scratch
dml2_printf("DML::%s: DSTYAfterScaler = %u (final)\n", __func__, *p->DSTYAfterScaler);
#endif
- s->NoTimeToPrefetch = false;
#ifdef __DML_VBA_DEBUG__
dml2_printf("DML::%s: Tr0_trips = %f\n", __func__, *p->Tr0_trips);
dml2_printf("DML::%s: Tvm_trips = %f\n", __func__, *p->Tvm_trips);
@@ -5199,14 +5228,10 @@ static bool CalculatePrefetchSchedule(struct dml2_core_internal_scratch *scratch
s->Tvm_trips_rounded = math_ceil2(4.0 * *p->Tvm_trips / s->LineTime, 1.0) / 4.0 * s->LineTime;
*p->Tvm_trips_flip_rounded = math_ceil2(4.0 * *p->Tvm_trips_flip / s->LineTime, 1.0) / 4.0 * s->LineTime;
} else {
-#ifdef DML_TVM_UPDATE_EN
if (p->DynamicMetadataEnable || dcc_mrq_enable || p->setup_for_tdlut)
s->Tvm_trips_rounded = math_max2(s->LineTime * math_ceil2(4.0*math_max3(p->ExtraLatencyPrefetch, p->Turg, s->trip_to_mem)/s->LineTime, 1)/4, s->LineTime/4.0);
else
- s->Tvm_trips_rounded = s->LineTime / 4.0;
-#else
- s->Tvm_trips_rounded = s->LineTime / 4.0;
-#endif
+ s->Tvm_trips_rounded = s->LineTime / 4.0;
*p->Tvm_trips_flip_rounded = s->LineTime / 4.0;
}
@@ -5235,16 +5260,10 @@ static bool CalculatePrefetchSchedule(struct dml2_core_internal_scratch *scratch
*p->Tno_bw = 0;
}
-#ifdef DML_TVM_UPDATE_EN
if (p->mrq_present || p->display_cfg->gpuvm_max_page_table_levels >= 3)
*p->Tno_bw_flip = *p->Tno_bw;
else
*p->Tno_bw_flip = 0; //because there is no 3DLUT for iFlip
-#else
- *p->Tno_bw_flip = 0;
- if (p->display_cfg->gpuvm_enable == true)
- *p->Tno_bw_flip = *p->Tno_bw;
-#endif
if (dml_is_420(p->myPipe->SourcePixelFormat)) {
s->bytes_pp = p->myPipe->BytePerPixelY + p->myPipe->BytePerPixelC / 4.0;
@@ -5258,64 +5277,63 @@ static bool CalculatePrefetchSchedule(struct dml2_core_internal_scratch *scratch
s->max_Tsw = (math_max2(p->PrefetchSourceLinesY, p->PrefetchSourceLinesC) * s->LineTime);
s->prefetch_sw_bytes = p->PrefetchSourceLinesY * p->swath_width_luma_ub * p->myPipe->BytePerPixelY + p->PrefetchSourceLinesC * p->swath_width_chroma_ub * p->myPipe->BytePerPixelC;
-#ifdef DML_TDLUT_ROW_BYTES_FIX_EN
s->prefetch_bw_pr = s->prefetch_bw_pr * p->mall_prefetch_sdp_overhead_factor;
s->prefetch_sw_bytes = s->prefetch_sw_bytes * p->mall_prefetch_sdp_overhead_factor;
-#endif
s->prefetch_bw_oto = math_max2(s->prefetch_bw_pr, s->prefetch_sw_bytes / s->max_Tsw);
s->min_Lsw_oto = math_max2(p->PrefetchSourceLinesY, p->PrefetchSourceLinesC) / __DML2_CALCS_MAX_VRATIO_PRE_OTO__;
s->min_Lsw_oto = math_max2(s->min_Lsw_oto, 2.0);
s->min_Lsw_oto = math_max2(s->min_Lsw_oto, p->tdlut_drain_time / s->LineTime);
+ s->min_Lsw_equ = math_max2(p->PrefetchSourceLinesY, p->PrefetchSourceLinesC) / __DML2_CALCS_MAX_VRATIO_PRE_EQU__;
+ s->min_Lsw_equ = math_max2(s->min_Lsw_equ, 2.0);
+ s->min_Lsw_equ = math_max2(s->min_Lsw_equ, p->tdlut_drain_time / s->LineTime);
+
vm_bytes = p->vm_bytes; // vm_bytes is dpde0_bytes_per_frame_ub_l + dpde0_bytes_per_frame_ub_c + 2*extra_dpde_bytes;
extra_tdpe_bytes = (unsigned int)math_max2(0, (p->display_cfg->gpuvm_max_page_table_levels - 1) * 128);
if (p->setup_for_tdlut)
vm_bytes = vm_bytes + p->tdlut_pte_bytes_per_frame + (p->display_cfg->gpuvm_enable ? extra_tdpe_bytes : 0);
-#ifdef DML_TDLUT_ROW_BYTES_FIX_EN
tdlut_row_bytes = (unsigned long) math_ceil2(p->tdlut_bytes_per_frame/2.0, 1.0);
-#else
- tdlut_row_bytes = p->tdlut_pte_bytes_per_frame;
-#endif
-#ifdef DML_REG_LIMIT_CLAMP_EN
s->prefetch_bw_oto = math_max3(s->prefetch_bw_oto,
p->vm_bytes * p->HostVMInefficiencyFactor / (31 * s->LineTime) - *p->Tno_bw,
(p->PixelPTEBytesPerRow * p->HostVMInefficiencyFactor + p->meta_row_bytes + tdlut_row_bytes) / (15 * s->LineTime));
-#endif
s->Lsw_oto = math_ceil2(4.0 * math_max2(s->prefetch_sw_bytes / s->prefetch_bw_oto / s->LineTime, s->min_Lsw_oto), 1.0) / 4.0;
if (p->display_cfg->gpuvm_enable == true) {
- s->Tvm_oto = math_max3(
- *p->Tvm_trips,
+ s->Tvm_no_trip_oto = math_max2(
*p->Tno_bw + vm_bytes * p->HostVMInefficiencyFactor / s->prefetch_bw_oto,
s->LineTime / 4.0);
+ s->Tvm_oto = math_max2(
+ *p->Tvm_trips,
+ s->Tvm_no_trip_oto);
#ifdef __DML_VBA_DEBUG__
dml2_printf("DML::%s: Tvm_oto max0 = %f\n", __func__, *p->Tvm_trips);
dml2_printf("DML::%s: Tvm_oto max1 = %f\n", __func__, *p->Tno_bw + vm_bytes * p->HostVMInefficiencyFactor / s->prefetch_bw_oto);
dml2_printf("DML::%s: Tvm_oto max2 = %f\n", __func__, s->LineTime / 4.0);
#endif
} else {
-#ifdef DML_TVM_UPDATE_EN
+ s->Tvm_no_trip_oto = s->Tvm_trips_rounded;
s->Tvm_oto = s->Tvm_trips_rounded;
-#else
- s->Tvm_oto = s->LineTime / 4.0;
-#endif
}
if ((p->display_cfg->gpuvm_enable == true || p->setup_for_tdlut || dcc_mrq_enable)) {
- s->Tr0_oto = math_max3(
- *p->Tr0_trips,
+ s->Tr0_no_trip_oto = math_max2(
(p->PixelPTEBytesPerRow * p->HostVMInefficiencyFactor + p->meta_row_bytes + tdlut_row_bytes) / s->prefetch_bw_oto,
s->LineTime / 4.0);
+ s->Tr0_oto = math_max2(
+ *p->Tr0_trips,
+ s->Tr0_no_trip_oto);
#ifdef __DML_VBA_DEBUG__
dml2_printf("DML::%s: Tr0_oto max0 = %f\n", __func__, *p->Tr0_trips);
dml2_printf("DML::%s: Tr0_oto max1 = %f\n", __func__, (p->PixelPTEBytesPerRow * p->HostVMInefficiencyFactor + p->meta_row_bytes + tdlut_row_bytes) / s->prefetch_bw_oto);
dml2_printf("DML::%s: Tr0_oto max2 = %f\n", __func__, s->LineTime / 4);
#endif
- } else
- s->Tr0_oto = (s->LineTime - s->Tvm_oto) / 4.0;
+ } else {
+ s->Tr0_no_trip_oto = (s->LineTime - s->Tvm_oto) / 4.0;
+ s->Tr0_oto = s->Tr0_no_trip_oto;
+ }
s->Tvm_oto_lines = math_ceil2(4.0 * s->Tvm_oto / s->LineTime, 1) / 4.0;
s->Tr0_oto_lines = math_ceil2(4.0 * s->Tr0_oto / s->LineTime, 1) / 4.0;
@@ -5325,19 +5343,16 @@ static bool CalculatePrefetchSchedule(struct dml2_core_internal_scratch *scratch
Lo = (unsigned int)(*p->DSTYAfterScaler + (double)*p->DSTXAfterScaler / (double)p->myPipe->HTotal);
//Tpre_equ in line time
-#ifdef DML_TVM_UPDATE_EN
if (p->DynamicMetadataVMEnabled && p->DynamicMetadataEnable)
s->dst_y_prefetch_equ = p->VStartup - (*p->TSetup + math_max2(p->TCalc, *p->Tvm_trips) + s->TWait_p) / s->LineTime - Lo;
else
s->dst_y_prefetch_equ = p->VStartup - (*p->TSetup + math_max2(p->TCalc, p->ExtraLatencyPrefetch) + s->TWait_p) / s->LineTime - Lo;
-#else
- s->dst_y_prefetch_equ = p->VStartup - (*p->TSetup + math_max2(s->TWait_p + p->TCalc, *p->Tdmdl - p->Ttrip)) / s->LineTime - Lo;
-#endif
s->dst_y_prefetch_equ = math_min2(s->dst_y_prefetch_equ, 63.75); // limit to the reg limit of U6.2 for DST_Y_PREFETCH
#ifdef __DML_VBA_DEBUG__
dml2_printf("DML::%s: HTotal = %u\n", __func__, p->myPipe->HTotal);
dml2_printf("DML::%s: min_Lsw_oto = %f\n", __func__, s->min_Lsw_oto);
+ dml2_printf("DML::%s: min_Lsw_equ = %f\n", __func__, s->min_Lsw_equ);
dml2_printf("DML::%s: Tno_bw = %f\n", __func__, *p->Tno_bw);
dml2_printf("DML::%s: Tno_bw_flip = %f\n", __func__, *p->Tno_bw_flip);
dml2_printf("DML::%s: ExtraLatencyPrefetch = %f\n", __func__, p->ExtraLatencyPrefetch);
@@ -5375,6 +5390,7 @@ static bool CalculatePrefetchSchedule(struct dml2_core_internal_scratch *scratch
s->dst_y_prefetch_equ = math_floor2(4.0 * (s->dst_y_prefetch_equ + 0.125), 1) / 4.0;
s->Tpre_rounded = s->dst_y_prefetch_equ * s->LineTime;
+#ifdef __DML_VBA_DEBUG__
dml2_printf("DML::%s: dst_y_prefetch_equ: %f (after round)\n", __func__, s->dst_y_prefetch_equ);
dml2_printf("DML::%s: LineTime: %f\n", __func__, s->LineTime);
dml2_printf("DML::%s: VStartup: %u\n", __func__, p->VStartup);
@@ -5395,18 +5411,12 @@ static bool CalculatePrefetchSchedule(struct dml2_core_internal_scratch *scratch
dml2_printf("DML::%s: Ttrip: %fus\n", __func__, p->Ttrip);
dml2_printf("DML::%s: DSTXAfterScaler: %u pixels - number of pixel clocks pipeline and buffer delay after scaler \n", __func__, *p->DSTXAfterScaler);
dml2_printf("DML::%s: DSTYAfterScaler: %u lines - number of lines of pipeline and buffer delay after scaler \n", __func__, *p->DSTYAfterScaler);
-
- s->dep_bytes = math_max2(vm_bytes * p->HostVMInefficiencyFactor, p->PixelPTEBytesPerRow * p->HostVMInefficiencyFactor + p->meta_row_bytes + tdlut_row_bytes);
-
- dml2_printf("DML::%s: dep_bytes: %f\n", __func__, s->dep_bytes);
- dml2_printf("DML::%s: prefetch_sw_bytes: %f\n", __func__, s->prefetch_sw_bytes);
dml2_printf("DML::%s: vm_bytes: %f (hvm inefficiency scaled)\n", __func__, vm_bytes*p->HostVMInefficiencyFactor);
dml2_printf("DML::%s: row_bytes: %f (hvm inefficiency scaled, 1 row)\n", __func__, p->PixelPTEBytesPerRow*p->HostVMInefficiencyFactor+p->meta_row_bytes+tdlut_row_bytes);
-
- if (s->prefetch_sw_bytes < s->dep_bytes) {
- s->prefetch_sw_bytes = 2 * s->dep_bytes;
- dml2_printf("DML::%s: bump prefetch_sw_bytes to %f\n", __func__, s->prefetch_sw_bytes);
- }
+ dml2_printf("DML::%s: Tno_bw: %f\n", __func__, *p->Tno_bw);
+ dml2_printf("DML::%s: Tpre=%f Tpre_rounded: %f, delta=%f\n", __func__, Tpre, s->Tpre_rounded, (s->Tpre_rounded - Tpre));
+ dml2_printf("DML::%s: Tvm_trips=%f Tvm_trips_rounded: %f, delta=%f\n", __func__, *p->Tvm_trips, s->Tvm_trips_rounded, (s->Tvm_trips_rounded - *p->Tvm_trips));
+#endif
*p->dst_y_per_vm_vblank = 0;
*p->dst_y_per_row_vblank = 0;
@@ -5419,7 +5429,9 @@ static bool CalculatePrefetchSchedule(struct dml2_core_internal_scratch *scratch
// Tvm_trips_rounded is Tvm_trips ceiling to 1/4 line time
// Tr0_trips_rounded is Tr0_trips ceiling to 1/4 line time
// So that means prefetch bw calculated can be higher since the total time availabe for prefetch is less
- if (s->dst_y_prefetch_equ > 1) {
+ bool min_Lsw_equ_ok = s->Tpre_rounded >= s->Tvm_trips_rounded + 2.0*s->Tr0_trips_rounded + s->min_Lsw_equ*s->LineTime;
+
+ if (s->dst_y_prefetch_equ > 1 && min_Lsw_equ_ok) {
s->prefetch_bw1 = 0.;
s->prefetch_bw2 = 0.;
s->prefetch_bw3 = 0.;
@@ -5436,28 +5448,35 @@ static bool CalculatePrefetchSchedule(struct dml2_core_internal_scratch *scratch
s->prefetch_bw1 = 0;
dml2_printf("DML::%s: prefetch_bw1: %f\n", __func__, s->prefetch_bw1);
- if ((p->VStartup == p->MaxVStartup) && (s->Tsw_est1 / s->LineTime < s->min_Lsw_oto) && (s->Tpre_rounded - s->min_Lsw_oto * s->LineTime - 0.75 * s->LineTime - *p->Tno_bw > 0)) {
+ if ((s->Tsw_est1 < s->min_Lsw_equ * s->LineTime) && (s->Tpre_rounded - s->min_Lsw_equ * s->LineTime - 0.75 * s->LineTime - *p->Tno_bw > 0)) {
s->prefetch_bw1 = (vm_bytes * p->HostVMInefficiencyFactor + 2 * (p->PixelPTEBytesPerRow * p->HostVMInefficiencyFactor + p->meta_row_bytes + tdlut_row_bytes)) /
- (s->Tpre_rounded - s->min_Lsw_oto * s->LineTime - 0.75 * s->LineTime - *p->Tno_bw);
+ (s->Tpre_rounded - s->min_Lsw_equ * s->LineTime - 0.75 * s->LineTime - *p->Tno_bw);
#ifdef __DML_VBA_DEBUG__
dml2_printf("DML::%s: vm and 2 rows bytes = %f\n", __func__, (vm_bytes * p->HostVMInefficiencyFactor + 2 * (p->PixelPTEBytesPerRow * p->HostVMInefficiencyFactor + p->meta_row_bytes + tdlut_row_bytes)));
dml2_printf("DML::%s: Tpre_rounded = %f\n", __func__, s->Tpre_rounded);
- dml2_printf("DML::%s: minus term = %f\n", __func__, s->min_Lsw_oto * s->LineTime + 0.75 * s->LineTime + *p->Tno_bw);
- dml2_printf("DML::%s: min_Lsw_oto = %f\n", __func__, s->min_Lsw_oto);
+ dml2_printf("DML::%s: minus term = %f\n", __func__, s->min_Lsw_equ * s->LineTime + 0.75 * s->LineTime + *p->Tno_bw);
+ dml2_printf("DML::%s: min_Lsw_equ = %f\n", __func__, s->min_Lsw_equ);
dml2_printf("DML::%s: LineTime = %f\n", __func__, s->LineTime);
dml2_printf("DML::%s: Tno_bw = %f\n", __func__, *p->Tno_bw);
- dml2_printf("DML::%s: Time to fetch vm and 2 rows = %f\n", __func__, (s->Tpre_rounded - s->min_Lsw_oto * s->LineTime - 0.75 * s->LineTime - *p->Tno_bw));
+ dml2_printf("DML::%s: Time to fetch vm and 2 rows = %f\n", __func__, (s->Tpre_rounded - s->min_Lsw_equ * s->LineTime - 0.75 * s->LineTime - *p->Tno_bw));
dml2_printf("DML::%s: prefetch_bw1: %f (updated)\n", __func__, s->prefetch_bw1);
#endif
}
// prefetch_bw2: VM + SW
- if (s->Tpre_rounded - *p->Tno_bw - 2 * s->Tr0_trips_rounded > 0)
+ if (s->Tpre_rounded - *p->Tno_bw - 2.0 * s->Tr0_trips_rounded > 0) {
s->prefetch_bw2 = (vm_bytes * p->HostVMInefficiencyFactor + s->prefetch_sw_bytes) /
- (s->Tpre_rounded - *p->Tno_bw - 2 * s->Tr0_trips_rounded);
- else
+ (s->Tpre_rounded - *p->Tno_bw - 2.0 * s->Tr0_trips_rounded);
+ s->Tsw_est2 = s->prefetch_sw_bytes / s->prefetch_bw2;
+ } else
s->prefetch_bw2 = 0;
+ dml2_printf("DML::%s: prefetch_bw2: %f\n", __func__, s->prefetch_bw2);
+ if ((s->Tsw_est2 < s->min_Lsw_equ * s->LineTime) && ((s->Tpre_rounded - *p->Tno_bw - 2.0 * s->Tr0_trips_rounded - s->min_Lsw_equ * s->LineTime - 0.25 * s->LineTime) > 0)) {
+ s->prefetch_bw2 = vm_bytes * p->HostVMInefficiencyFactor / (s->Tpre_rounded - *p->Tno_bw - 2.0 * s->Tr0_trips_rounded - s->min_Lsw_equ * s->LineTime - 0.25 * s->LineTime);
+ dml2_printf("DML::%s: prefetch_bw2: %f (updated)\n", __func__, s->prefetch_bw2);
+ }
+
// prefetch_bw3: 2*R0 + SW
if (s->Tpre_rounded - s->Tvm_trips_rounded > 0) {
s->prefetch_bw3 = (2 * (p->PixelPTEBytesPerRow * p->HostVMInefficiencyFactor + p->meta_row_bytes + tdlut_row_bytes) + s->prefetch_sw_bytes) /
@@ -5467,8 +5486,8 @@ static bool CalculatePrefetchSchedule(struct dml2_core_internal_scratch *scratch
s->prefetch_bw3 = 0;
dml2_printf("DML::%s: prefetch_bw3: %f\n", __func__, s->prefetch_bw3);
- if (p->VStartup == p->MaxVStartup && (s->Tsw_est3 / s->LineTime < s->min_Lsw_oto) && ((s->Tpre_rounded - s->min_Lsw_oto * s->LineTime - 0.5 * s->LineTime - s->Tvm_trips_rounded) > 0)) {
- s->prefetch_bw3 = (2 * (p->PixelPTEBytesPerRow * p->HostVMInefficiencyFactor + p->meta_row_bytes + tdlut_row_bytes)) / (s->Tpre_rounded - s->min_Lsw_oto * s->LineTime - 0.5 * s->LineTime - s->Tvm_trips_rounded);
+ if ((s->Tsw_est3 < s->min_Lsw_equ * s->LineTime) && ((s->Tpre_rounded - s->min_Lsw_equ * s->LineTime - 0.5 * s->LineTime - s->Tvm_trips_rounded) > 0)) {
+ s->prefetch_bw3 = (2 * (p->PixelPTEBytesPerRow * p->HostVMInefficiencyFactor + p->meta_row_bytes + tdlut_row_bytes)) / (s->Tpre_rounded - s->min_Lsw_equ * s->LineTime - 0.5 * s->LineTime - s->Tvm_trips_rounded);
dml2_printf("DML::%s: prefetch_bw3: %f (updated)\n", __func__, s->prefetch_bw3);
}
@@ -5484,6 +5503,7 @@ static bool CalculatePrefetchSchedule(struct dml2_core_internal_scratch *scratch
dml2_printf("DML::%s: Tvm_trips=%f Tvm_trips_rounded: %f, delta=%f\n", __func__, *p->Tvm_trips, s->Tvm_trips_rounded, (s->Tvm_trips_rounded - *p->Tvm_trips));
dml2_printf("DML::%s: Tr0_trips=%f Tr0_trips_rounded: %f, delta=%f\n", __func__, *p->Tr0_trips, s->Tr0_trips_rounded, (s->Tr0_trips_rounded - *p->Tr0_trips));
dml2_printf("DML::%s: Tsw_est1: %f\n", __func__, s->Tsw_est1);
+ dml2_printf("DML::%s: Tsw_est2: %f\n", __func__, s->Tsw_est2);
dml2_printf("DML::%s: Tsw_est3: %f\n", __func__, s->Tsw_est3);
dml2_printf("DML::%s: prefetch_bw1: %f (final)\n", __func__, s->prefetch_bw1);
dml2_printf("DML::%s: prefetch_bw2: %f (final)\n", __func__, s->prefetch_bw2);
@@ -5504,9 +5524,18 @@ static bool CalculatePrefetchSchedule(struct dml2_core_internal_scratch *scratch
// here is to make sure equ bw wont be more agressive than the latency-based requirement.
// check vm time >= vm_trips
// check r0 time >= r0_trips
+
+ double total_row_bytes = (p->PixelPTEBytesPerRow * p->HostVMInefficiencyFactor + p->meta_row_bytes + tdlut_row_bytes);
+
+ dml2_printf("DML::%s: Tvm_trips_rounded = %f\n", __func__, s->Tvm_trips_rounded);
+ dml2_printf("DML::%s: Tr0_trips_rounded = %f\n", __func__, s->Tr0_trips_rounded);
+
if (s->prefetch_bw1 > 0) {
- if (*p->Tno_bw + vm_bytes * p->HostVMInefficiencyFactor / s->prefetch_bw1 >= s->Tvm_trips_rounded &&
- (p->PixelPTEBytesPerRow * p->HostVMInefficiencyFactor + p->meta_row_bytes + tdlut_row_bytes) / s->prefetch_bw1 >= s->Tr0_trips_rounded) {
+ double vm_transfer_time = *p->Tno_bw + vm_bytes * p->HostVMInefficiencyFactor / s->prefetch_bw1;
+ double row_transfer_time = total_row_bytes / s->prefetch_bw1;
+ dml2_printf("DML::%s: Case1: vm_transfer_time = %f\n", __func__, vm_transfer_time);
+ dml2_printf("DML::%s: Case1: row_transfer_time = %f\n", __func__, row_transfer_time);
+ if (vm_transfer_time >= s->Tvm_trips_rounded && row_transfer_time >= s->Tr0_trips_rounded) {
Case1OK = true;
}
}
@@ -5516,8 +5545,11 @@ static bool CalculatePrefetchSchedule(struct dml2_core_internal_scratch *scratch
// check vm time >= vm_trips
// check r0 time < r0_trips
if (s->prefetch_bw2 > 0) {
- if (*p->Tno_bw + vm_bytes * p->HostVMInefficiencyFactor / s->prefetch_bw2 >= s->Tvm_trips_rounded &&
- (p->PixelPTEBytesPerRow * p->HostVMInefficiencyFactor + p->meta_row_bytes + tdlut_row_bytes) / s->prefetch_bw2 < s->Tr0_trips_rounded) {
+ double vm_transfer_time = *p->Tno_bw + vm_bytes * p->HostVMInefficiencyFactor / s->prefetch_bw2;
+ double row_transfer_time = total_row_bytes / s->prefetch_bw2;
+ dml2_printf("DML::%s: Case2: vm_transfer_time = %f\n", __func__, vm_transfer_time);
+ dml2_printf("DML::%s: Case2: row_transfer_time = %f\n", __func__, row_transfer_time);
+ if (vm_transfer_time >= s->Tvm_trips_rounded && row_transfer_time < s->Tr0_trips_rounded) {
Case2OK = true;
}
}
@@ -5526,8 +5558,11 @@ static bool CalculatePrefetchSchedule(struct dml2_core_internal_scratch *scratch
// check vm time < vm_trips
// check r0 time >= r0_trips
if (s->prefetch_bw3 > 0) {
- if (*p->Tno_bw + vm_bytes * p->HostVMInefficiencyFactor / s->prefetch_bw3 < s->Tvm_trips_rounded &&
- (p->PixelPTEBytesPerRow * p->HostVMInefficiencyFactor + p->meta_row_bytes + tdlut_row_bytes) / s->prefetch_bw3 >= s->Tr0_trips_rounded) {
+ double vm_transfer_time = *p->Tno_bw + vm_bytes * p->HostVMInefficiencyFactor / s->prefetch_bw3;
+ double row_transfer_time = total_row_bytes / s->prefetch_bw3;
+ dml2_printf("DML::%s: Case3: vm_transfer_time = %f\n", __func__, vm_transfer_time);
+ dml2_printf("DML::%s: Case3: row_transfer_time = %f\n", __func__, row_transfer_time);
+ if (vm_transfer_time < s->Tvm_trips_rounded && row_transfer_time >= s->Tr0_trips_rounded) {
Case3OK = true;
}
}
@@ -5542,11 +5577,9 @@ static bool CalculatePrefetchSchedule(struct dml2_core_internal_scratch *scratch
s->prefetch_bw_equ = s->prefetch_bw4;
}
-#ifdef DML_REG_LIMIT_CLAMP_EN
s->prefetch_bw_equ = math_max3(s->prefetch_bw_equ,
p->vm_bytes * p->HostVMInefficiencyFactor / (31 * s->LineTime) - *p->Tno_bw,
(p->PixelPTEBytesPerRow * p->HostVMInefficiencyFactor + p->meta_row_bytes + tdlut_row_bytes) / (15 * s->LineTime));
-#endif
#ifdef __DML_VBA_DEBUG__
dml2_printf("DML::%s: Case1OK: %u\n", __func__, Case1OK);
dml2_printf("DML::%s: Case2OK: %u\n", __func__, Case2OK);
@@ -5578,6 +5611,9 @@ static bool CalculatePrefetchSchedule(struct dml2_core_internal_scratch *scratch
dml2_printf("DML::%s: Tvm_equ = %f\n", __func__, s->Tvm_equ);
dml2_printf("DML::%s: Tr0_equ = %f\n", __func__, s->Tr0_equ);
#endif
+ // Lsw = dst_y_prefetch - (dst_y_per_vm_vblank + 2*dst_y_per_row_vblank)
+ s->Lsw_equ = s->dst_y_prefetch_equ - math_ceil2(4.0 * (s->Tvm_equ + 2 * s->Tr0_equ) / s->LineTime, 1.0) / 4.0;
+
// Use the more stressful prefetch schedule
if (s->dst_y_prefetch_oto < s->dst_y_prefetch_equ) {
*p->dst_y_prefetch = s->dst_y_prefetch_oto;
@@ -5586,29 +5622,28 @@ static bool CalculatePrefetchSchedule(struct dml2_core_internal_scratch *scratch
*p->dst_y_per_vm_vblank = math_ceil2(4.0 * s->TimeForFetchingVM / s->LineTime, 1.0) / 4.0;
*p->dst_y_per_row_vblank = math_ceil2(4.0 * s->TimeForFetchingRowInVBlank / s->LineTime, 1.0) / 4.0;
+ s->dst_y_per_vm_no_trip_vblank = math_ceil2(4.0 * s->Tvm_no_trip_oto / s->LineTime, 1.0) / 4.0;
+ s->dst_y_per_row_no_trip_vblank = math_ceil2(4.0 * s->Tr0_no_trip_oto / s->LineTime, 1.0) / 4.0;
#ifdef __DML_VBA_DEBUG__
dml2_printf("DML::%s: Using oto scheduling for prefetch\n", __func__);
#endif
-
} else {
*p->dst_y_prefetch = s->dst_y_prefetch_equ;
s->TimeForFetchingVM = s->Tvm_equ;
s->TimeForFetchingRowInVBlank = s->Tr0_equ;
- if (p->VStartup == p->MaxVStartup) {
- *p->dst_y_per_vm_vblank = math_floor2(4.0 * s->TimeForFetchingVM / s->LineTime, 1.0) / 4.0;
- *p->dst_y_per_row_vblank = math_floor2(4.0 * s->TimeForFetchingRowInVBlank / s->LineTime, 1.0) / 4.0;
- } else {
- *p->dst_y_per_vm_vblank = math_ceil2(4.0 * s->TimeForFetchingVM / s->LineTime, 1.0) / 4.0;
- *p->dst_y_per_row_vblank = math_ceil2(4.0 * s->TimeForFetchingRowInVBlank / s->LineTime, 1.0) / 4.0;
- }
+ *p->dst_y_per_vm_vblank = math_ceil2(4.0 * s->TimeForFetchingVM / s->LineTime, 1.0) / 4.0;
+ *p->dst_y_per_row_vblank = math_ceil2(4.0 * s->TimeForFetchingRowInVBlank / s->LineTime, 1.0) / 4.0;
+ s->dst_y_per_vm_no_trip_vblank = *p->dst_y_per_vm_vblank;
+ s->dst_y_per_row_no_trip_vblank = *p->dst_y_per_row_vblank;
+
#ifdef __DML_VBA_DEBUG__
dml2_printf("DML::%s: Using equ bw scheduling for prefetch\n", __func__);
#endif
}
- // Lsw = dst_y_prefetch - (dst_y_per_vm_vblank + 2*dst_y_per_row_vblank)
- s->LinesToRequestPrefetchPixelData = *p->dst_y_prefetch - *p->dst_y_per_vm_vblank - 2 * *p->dst_y_per_row_vblank; // Lsw
+ /* take worst case Lsw to calculate bandwidth requirement regardless of schedule */
+ s->LinesToRequestPrefetchPixelData = math_min2(s->Lsw_equ, s->Lsw_oto); // Lsw
s->cursor_prefetch_bytes = (unsigned int)math_max2(p->cursor_bytes_per_chunk, 4 * p->cursor_bytes_per_line);
*p->prefetch_cursor_bw = p->num_cursors * s->cursor_prefetch_bytes / (s->LinesToRequestPrefetchPixelData * s->LineTime);
@@ -5645,7 +5680,7 @@ static bool CalculatePrefetchSchedule(struct dml2_core_internal_scratch *scratch
(double)p->MaxNumSwathY * p->SwathHeightY / (s->LinesToRequestPrefetchPixelData - (p->VInitPreFillY - 3.0) / 2.0));
} else {
s->NoTimeToPrefetch = true;
- dml2_printf("DML::%s: MyErr set. LinesToRequestPrefetchPixelData=%f VinitPreFillY=%u\n", __func__, s->LinesToRequestPrefetchPixelData, p->VInitPreFillY);
+ dml2_printf("DML::%s: No time to prefetch!. LinesToRequestPrefetchPixelData=%f VinitPreFillY=%u\n", __func__, s->LinesToRequestPrefetchPixelData, p->VInitPreFillY);
*p->VRatioPrefetchY = 0;
}
#ifdef __DML_VBA_DEBUG__
@@ -5668,7 +5703,7 @@ static bool CalculatePrefetchSchedule(struct dml2_core_internal_scratch *scratch
*p->VRatioPrefetchC = math_max2(*p->VRatioPrefetchC, (double)p->MaxNumSwathC * p->SwathHeightC / (s->LinesToRequestPrefetchPixelData - (p->VInitPreFillC - 3.0) / 2.0));
} else {
s->NoTimeToPrefetch = true;
- dml2_printf("DML::%s: MyErr set. LinesToRequestPrefetchPixelData=%f VInitPreFillC=%u\n", __func__, s->LinesToRequestPrefetchPixelData, p->VInitPreFillC);
+ dml2_printf("DML::%s: No time to prefetch!. LinesToRequestPrefetchPixelData=%f VInitPreFillC=%u\n", __func__, s->LinesToRequestPrefetchPixelData, p->VInitPreFillC);
*p->VRatioPrefetchC = 0;
}
#ifdef __DML_VBA_DEBUG__
@@ -5690,14 +5725,13 @@ static bool CalculatePrefetchSchedule(struct dml2_core_internal_scratch *scratch
#endif
} else {
s->NoTimeToPrefetch = true;
- dml2_printf("DML::%s: MyErr set, LinesToRequestPrefetchPixelData: %f, should be >= %d\n", __func__, s->LinesToRequestPrefetchPixelData, min_lsw_required);
- dml2_printf("DML::%s: MyErr set, prefetch_bw_equ: %f, should be > 0\n", __func__, s->prefetch_bw_equ);
+ dml2_printf("DML::%s: No time to prefetch!, LinesToRequestPrefetchPixelData: %f, should be >= %d\n", __func__, s->LinesToRequestPrefetchPixelData, min_lsw_required);
+ dml2_printf("DML::%s: No time to prefetch!, prefetch_bw_equ: %f, should be > 0\n", __func__, s->prefetch_bw_equ);
*p->VRatioPrefetchY = 0;
*p->VRatioPrefetchC = 0;
*p->RequiredPrefetchPixelDataBWLuma = 0;
*p->RequiredPrefetchPixelDataBWChroma = 0;
}
-
dml2_printf("DML: Tpre: %fus - sum of time to request 2 x data pte, swaths\n", (double)s->LinesToRequestPrefetchPixelData * s->LineTime + 2.0 * s->TimeForFetchingRowInVBlank + s->TimeForFetchingVM);
dml2_printf("DML: Tvm: %fus - time to fetch vm\n", s->TimeForFetchingVM);
dml2_printf("DML: Tr0: %fus - time to fetch first row of data pagetables\n", s->TimeForFetchingRowInVBlank);
@@ -5708,7 +5742,9 @@ static bool CalculatePrefetchSchedule(struct dml2_core_internal_scratch *scratch
dml2_printf("DML: row_bytes = dpte_row_bytes (per_pipe) = PixelPTEBytesPerRow = : %u\n", p->PixelPTEBytesPerRow);
} else {
- dml2_printf("DML::%s: MyErr set, dst_y_prefetch_equ = %f (should be > 1)\n", __func__, s->dst_y_prefetch_equ);
+ dml2_printf("DML::%s: No time to prefetch! dst_y_prefetch_equ = %f (should be > 1)\n", __func__, s->dst_y_prefetch_equ);
+ dml2_printf("DML::%s: No time to prefetch! min_Lsw_equ_ok = %d, Tpre_rounded (%f) should be >= Tvm_trips_rounded (%f) + 2.0*Tr0_trips_rounded (%f) + min_Tsw_equ (%f)\n",
+ __func__, min_Lsw_equ_ok, s->Tpre_rounded, s->Tvm_trips_rounded, 2.0*s->Tr0_trips_rounded, s->min_Lsw_equ*s->LineTime);
s->NoTimeToPrefetch = true;
s->TimeForFetchingVM = 0;
s->TimeForFetchingRowInVBlank = 0;
@@ -5727,26 +5763,26 @@ static bool CalculatePrefetchSchedule(struct dml2_core_internal_scratch *scratch
if (vm_bytes == 0) {
prefetch_vm_bw = 0;
- } else if (*p->dst_y_per_vm_vblank > 0) {
+ } else if (s->dst_y_per_vm_no_trip_vblank > 0) {
#ifdef __DML_VBA_DEBUG__
dml2_printf("DML::%s: HostVMInefficiencyFactor = %f\n", __func__, p->HostVMInefficiencyFactor);
dml2_printf("DML::%s: dst_y_per_vm_vblank = %f\n", __func__, *p->dst_y_per_vm_vblank);
dml2_printf("DML::%s: LineTime = %f\n", __func__, s->LineTime);
#endif
- prefetch_vm_bw = vm_bytes * p->HostVMInefficiencyFactor / (*p->dst_y_per_vm_vblank * s->LineTime);
+ prefetch_vm_bw = vm_bytes * p->HostVMInefficiencyFactor / (s->dst_y_per_vm_no_trip_vblank * s->LineTime);
#ifdef __DML_VBA_DEBUG__
dml2_printf("DML::%s: prefetch_vm_bw = %f\n", __func__, prefetch_vm_bw);
#endif
} else {
prefetch_vm_bw = 0;
s->NoTimeToPrefetch = true;
- dml2_printf("DML::%s: MyErr set. dst_y_per_vm_vblank=%f (should be > 0)\n", __func__, *p->dst_y_per_vm_vblank);
+ dml2_printf("DML::%s: No time to prefetch!. dst_y_per_vm_vblank=%f (should be > 0)\n", __func__, *p->dst_y_per_vm_vblank);
}
if (p->PixelPTEBytesPerRow == 0 && tdlut_row_bytes == 0) {
prefetch_row_bw = 0;
- } else if (*p->dst_y_per_row_vblank > 0) {
- prefetch_row_bw = (p->PixelPTEBytesPerRow * p->HostVMInefficiencyFactor + tdlut_row_bytes) / (*p->dst_y_per_row_vblank * s->LineTime);
+ } else if (s->dst_y_per_row_no_trip_vblank > 0) {
+ prefetch_row_bw = (p->PixelPTEBytesPerRow * p->HostVMInefficiencyFactor + tdlut_row_bytes) / (s->dst_y_per_row_no_trip_vblank * s->LineTime);
#ifdef __DML_VBA_DEBUG__
dml2_printf("DML::%s: PixelPTEBytesPerRow = %u\n", __func__, p->PixelPTEBytesPerRow);
@@ -5756,7 +5792,7 @@ static bool CalculatePrefetchSchedule(struct dml2_core_internal_scratch *scratch
} else {
prefetch_row_bw = 0;
s->NoTimeToPrefetch = true;
- dml2_printf("DML::%s: MyErr set. dst_y_per_row_vblank=%f (should be > 0)\n", __func__, *p->dst_y_per_row_vblank);
+ dml2_printf("DML::%s: No time to prefetch!. dst_y_per_row_vblank=%f (should be > 0)\n", __func__, *p->dst_y_per_row_vblank);
}
*p->prefetch_vmrow_bw = math_max2(prefetch_vm_bw, prefetch_row_bw);
@@ -5773,11 +5809,16 @@ static bool CalculatePrefetchSchedule(struct dml2_core_internal_scratch *scratch
*p->VRatioPrefetchC = 0;
*p->RequiredPrefetchPixelDataBWLuma = 0;
*p->RequiredPrefetchPixelDataBWChroma = 0;
+ *p->prefetch_vmrow_bw = 0;
}
dml2_printf("DML::%s: dst_y_per_vm_vblank = %f (final)\n", __func__, *p->dst_y_per_vm_vblank);
dml2_printf("DML::%s: dst_y_per_row_vblank = %f (final)\n", __func__, *p->dst_y_per_row_vblank);
+ dml2_printf("DML::%s: prefetch_vmrow_bw = %f (final)\n", __func__, *p->prefetch_vmrow_bw);
+ dml2_printf("DML::%s: RequiredPrefetchPixelDataBWLuma = %f (final)\n", __func__, *p->RequiredPrefetchPixelDataBWLuma);
+ dml2_printf("DML::%s: RequiredPrefetchPixelDataBWChroma = %f (final)\n", __func__, *p->RequiredPrefetchPixelDataBWChroma);
dml2_printf("DML::%s: NoTimeToPrefetch=%d\n", __func__, s->NoTimeToPrefetch);
+
return s->NoTimeToPrefetch;
}
@@ -6169,6 +6210,7 @@ static void CalculateFlipSchedule(
unsigned int dpte_row_height_chroma,
bool use_one_row_for_frame_flip,
unsigned int max_flip_time_us,
+ unsigned int max_flip_time_lines,
unsigned int per_pipe_flip_bytes,
unsigned int meta_row_bytes,
unsigned int meta_row_height,
@@ -6183,12 +6225,13 @@ static void CalculateFlipSchedule(
{
struct dml2_core_shared_CalculateFlipSchedule_locals *l = &s->CalculateFlipSchedule_locals;
- l->dual_plane = dml2_core_shared_is_420(SourcePixelFormat) || SourcePixelFormat == dml2_rgbe_alpha;
+ l->dual_plane = dml_is_420(SourcePixelFormat) || SourcePixelFormat == dml2_rgbe_alpha;
l->dpte_row_bytes = DPTEBytesPerRow;
#ifdef __DML_VBA_DEBUG__
dml2_printf("DML::%s: GPUVMEnable = %u\n", __func__, GPUVMEnable);
dml2_printf("DML::%s: ip.max_flip_time_us = %d\n", __func__, max_flip_time_us);
+ dml2_printf("DML::%s: ip.max_flip_time_lines = %d\n", __func__, max_flip_time_lines);
dml2_printf("DML::%s: BandwidthAvailableForImmediateFlip = %f\n", __func__, BandwidthAvailableForImmediateFlip);
dml2_printf("DML::%s: TotImmediateFlipBytes = %u\n", __func__, TotImmediateFlipBytes);
dml2_printf("DML::%s: use_lb_flip_bw = %u\n", __func__, use_lb_flip_bw);
@@ -6239,7 +6282,8 @@ static void CalculateFlipSchedule(
if (use_lb_flip_bw) {
// For mode check, calculation the flip bw requirement with worst case flip time
- l->max_flip_time = math_min2(l->min_row_time, math_max2(Tvm_trips_flip_rounded + 2 * Tr0_trips_flip_rounded, (double)max_flip_time_us));
+ l->max_flip_time = math_min2(math_min2(l->min_row_time, (double)max_flip_time_lines * LineTime / VRatio),
+ math_max2(Tvm_trips_flip_rounded + 2 * Tr0_trips_flip_rounded, (double)max_flip_time_us));
//The lower bound on flip bandwidth
// Note: The get_urgent_bandwidth_required already consider dpte_row_bw and meta_row_bw in bandwidth calculation, so leave final_flip_bw = 0 if iflip not required
@@ -6257,7 +6301,7 @@ static void CalculateFlipSchedule(
#ifdef __DML_VBA_DEBUG__
dml2_printf("DML::%s: max_flip_time = %f\n", __func__, l->max_flip_time);
dml2_printf("DML::%s: total vm bytes (hvm ineff scaled) = %f\n", __func__, l->hvm_scaled_vm_bytes);
- dml2_printf("DML::%s: total row bytes (hvm ineff scaled) = %f\n", __func__, l->hvm_scaled_row_bytes);
+ dml2_printf("DML::%s: total row bytes (%d row, hvm ineff scaled) = %f\n", __func__, l->num_rows, l->hvm_scaled_row_bytes);
dml2_printf("DML::%s: total vm+row bytes (hvm ineff scaled) = %f\n", __func__, l->hvm_scaled_vm_row_bytes);
dml2_printf("DML::%s: lb_flip_bw for vm and row = %f\n", __func__, l->hvm_scaled_vm_row_bytes / (l->max_flip_time - Tno_bw_flip));
dml2_printf("DML::%s: lb_flip_bw for vm = %f\n", __func__, l->hvm_scaled_vm_bytes / (l->max_flip_time - Tno_bw_flip - 2 * Tr0_trips_flip_rounded));
@@ -6268,6 +6312,7 @@ static void CalculateFlipSchedule(
dml2_printf("DML::%s: mode_support est Tr0_flip = %f (bw-based)\n", __func__, l->hvm_scaled_row_bytes / l->lb_flip_bw / l->num_rows);
dml2_printf("DML::%s: mode_support est dst_y_per_vm_flip = %f (bw-based)\n", __func__, Tno_bw_flip + l->hvm_scaled_vm_bytes / l->lb_flip_bw / LineTime);
dml2_printf("DML::%s: mode_support est dst_y_per_row_flip = %f (bw-based)\n", __func__, l->hvm_scaled_row_bytes / l->lb_flip_bw / LineTime / l->num_rows);
+ dml2_printf("DML::%s: Tvm_trips_flip_rounded + 2*Tr0_trips_flip_rounded = %f\n", __func__, (Tvm_trips_flip_rounded + 2 * Tr0_trips_flip_rounded));
}
#endif
l->lb_flip_bw = math_max3(l->lb_flip_bw,
@@ -6284,7 +6329,7 @@ static void CalculateFlipSchedule(
*dst_y_per_vm_flip = 1; // not used
*dst_y_per_row_flip = 1; // not used
- *ImmediateFlipSupportedForPipe = true;
+ *ImmediateFlipSupportedForPipe = l->min_row_time >= (Tvm_trips_flip_rounded + 2 * Tr0_trips_flip_rounded);
} else {
if (iflip_enable) {
l->ImmediateFlipBW = (double)per_pipe_flip_bytes * BandwidthAvailableForImmediateFlip / (double)TotImmediateFlipBytes; // flip_bw(i)
@@ -6350,6 +6395,7 @@ static void CalculateFlipSchedule(
dml2_printf("DML::%s: dst_y_per_row_flip = %f (should be < 16)\n", __func__, *dst_y_per_row_flip);
dml2_printf("DML::%s: Tvm_flip = %f (final)\n", __func__, l->Tvm_flip);
dml2_printf("DML::%s: Tr0_flip = %f (final)\n", __func__, l->Tr0_flip);
+ dml2_printf("DML::%s: Tvm_flip + 2*Tr0_flip = %f (should be <= min_row_time=%f)\n", __func__, l->Tvm_flip + 2 * l->Tr0_flip, l->min_row_time);
}
dml2_printf("DML::%s: final_flip_bw = %f\n", __func__, *final_flip_bw);
dml2_printf("DML::%s: ImmediateFlipSupportedForPipe = %u\n", __func__, *ImmediateFlipSupportedForPipe);
@@ -6380,6 +6426,12 @@ static void CalculateWatermarksMALLUseAndDRAMSpeedChangeSupport(
p->Watermark->StutterEnterPlusExitWatermark = p->mmSOCParameters.SREnterPlusExitTime + p->mmSOCParameters.ExtraLatency_sr + 10 / p->DCFClkDeepSleep;
p->Watermark->Z8StutterExitWatermark = p->mmSOCParameters.SRExitZ8Time + p->mmSOCParameters.ExtraLatency_sr + 10 / p->DCFClkDeepSleep;
p->Watermark->Z8StutterEnterPlusExitWatermark = p->mmSOCParameters.SREnterPlusExitZ8Time + p->mmSOCParameters.ExtraLatency_sr + 10 / p->DCFClkDeepSleep;
+ if (p->mmSOCParameters.qos_type == dml2_qos_param_type_dcn4x) {
+ p->Watermark->StutterExitWatermark += p->mmSOCParameters.max_urgent_latency_us + p->mmSOCParameters.df_response_time_us;
+ p->Watermark->StutterEnterPlusExitWatermark += p->mmSOCParameters.max_urgent_latency_us + p->mmSOCParameters.df_response_time_us;
+ p->Watermark->Z8StutterExitWatermark += p->mmSOCParameters.max_urgent_latency_us + p->mmSOCParameters.df_response_time_us;
+ p->Watermark->Z8StutterEnterPlusExitWatermark += p->mmSOCParameters.max_urgent_latency_us + p->mmSOCParameters.df_response_time_us;
+ }
p->Watermark->g6_temp_read_watermark_us = p->mmSOCParameters.g6_temp_read_blackout_us + p->Watermark->UrgentWatermark;
#ifdef __DML_VBA_DEBUG__
@@ -6541,7 +6593,8 @@ static void CalculateWatermarksMALLUseAndDRAMSpeedChangeSupport(
p->DRAMClockChangeSupport[k] = dml2_dram_clock_change_unsupported;
if (uclk_pstate_change_strategy == dml2_uclk_pstate_change_strategy_auto) {
- if (s->ActiveDRAMClockChangeLatencyMargin[k] > 0 && reserved_vblank_time_us >= p->mmSOCParameters.DRAMClockChangeLatency)
+ if (p->display_cfg->overrides.all_streams_blanked ||
+ (s->ActiveDRAMClockChangeLatencyMargin[k] > 0 && reserved_vblank_time_us >= p->mmSOCParameters.DRAMClockChangeLatency))
p->DRAMClockChangeSupport[k] = dml2_dram_clock_change_vblank_and_vactive;
else if (s->ActiveDRAMClockChangeLatencyMargin[k] > 0)
p->DRAMClockChangeSupport[k] = dml2_dram_clock_change_vactive;
@@ -6585,13 +6638,13 @@ static void CalculateWatermarksMALLUseAndDRAMSpeedChangeSupport(
s->src_y_ahead_c = (unsigned int)(math_floor2(p->DETBufferSizeC[k] / p->BytePerPixelDETC[k] / p->SwathWidthC[k], p->SwathHeightC[k]) + s->LBLatencyHidingSourceLinesC[k]);
s->sub_vp_lines_c = s->src_y_pstate_c + s->src_y_ahead_c + p->meta_row_height_c[k];
- if (dml2_core_shared_is_420(p->display_cfg->plane_descriptors[k].pixel_format))
+ if (dml_is_420(p->display_cfg->plane_descriptors[k].pixel_format))
p->SubViewportLinesNeededInMALL[k] = (unsigned int)(math_max2(s->sub_vp_lines_l, 2 * s->sub_vp_lines_c));
else
p->SubViewportLinesNeededInMALL[k] = (unsigned int)(math_max2(s->sub_vp_lines_l, s->sub_vp_lines_c));
#ifdef __DML_VBA_DEBUG__
- dml2_printf("DML::%s: k=%u, meta_row_height_c = %u\n", __func__, p->meta_row_height_c[k]);
+ dml2_printf("DML::%s: k=%u, meta_row_height_c = %u\n", __func__, k, p->meta_row_height_c[k]);
dml2_printf("DML::%s: k=%u, src_y_pstate_c = %u\n", __func__, k, s->src_y_pstate_c);
dml2_printf("DML::%s: k=%u, src_y_ahead_c = %u\n", __func__, k, s->src_y_ahead_c);
dml2_printf("DML::%s: k=%u, sub_vp_lines_c = %u\n", __func__, k, s->sub_vp_lines_c);
@@ -6856,7 +6909,8 @@ struct dml2_core_internal_g6_temp_read_blackouts_table {
} entries[DML_MAX_CLK_TABLE_SIZE];
};
-struct dml2_core_internal_g6_temp_read_blackouts_table core_dcn4_g6_temp_read_blackout_table = {
+static const struct dml2_core_internal_g6_temp_read_blackouts_table
+ core_dcn4_g6_temp_read_blackout_table = {
.entries = {
{
.uclk_khz = 96000,
@@ -6921,6 +6975,43 @@ static double get_g6_temp_read_blackout_us(
return (double)blackout_us;
}
+static double get_max_urgent_latency_us(
+ struct dml2_dcn4x_soc_qos_params *dcn4x,
+ double uclk_freq_mhz,
+ double FabricClock,
+ unsigned int min_clk_index)
+{
+ double latency;
+ latency = dcn4x->per_uclk_dpm_params[min_clk_index].maximum_latency_when_urgent_uclk_cycles / uclk_freq_mhz
+ * (1 + dcn4x->umc_max_latency_margin / 100.0)
+ + dcn4x->mall_overhead_fclk_cycles / FabricClock
+ + dcn4x->max_round_trip_to_furthest_cs_fclk_cycles / FabricClock
+ * (1 + dcn4x->fabric_max_transport_latency_margin / 100.0);
+ return latency;
+}
+
+static void calculate_pstate_keepout_dst_lines(
+ const struct dml2_display_cfg *display_cfg,
+ const struct dml2_core_internal_watermarks *watermarks,
+ unsigned int pstate_keepout_dst_lines[])
+{
+ const struct dml2_stream_parameters *stream_descriptor;
+ unsigned int i;
+
+ for (i = 0; i < display_cfg->num_planes; i++) {
+ if (!dml_is_phantom_pipe(&display_cfg->plane_descriptors[i])) {
+ stream_descriptor = &display_cfg->stream_descriptors[display_cfg->plane_descriptors[i].stream_index];
+
+ pstate_keepout_dst_lines[i] =
+ (unsigned int)math_ceil(watermarks->DRAMClockChangeWatermark / ((double)stream_descriptor->timing.h_total * 1000.0 / (double)stream_descriptor->timing.pixel_clock_khz));
+
+ if (pstate_keepout_dst_lines[i] > stream_descriptor->timing.v_total - 1) {
+ pstate_keepout_dst_lines[i] = stream_descriptor->timing.v_total - 1;
+ }
+ }
+ }
+}
+
static bool dml_core_mode_support(struct dml2_core_calcs_mode_support_ex *in_out_params)
{
struct dml2_core_internal_display_mode_lib *mode_lib = in_out_params->mode_lib;
@@ -6963,7 +7054,7 @@ static bool dml_core_mode_support(struct dml2_core_calcs_mode_support_ex *in_out
mode_lib->ms.uclk_freq_mhz = dram_bw_kbps_to_uclk_mhz(min_clk_table->dram_bw_table.entries[in_out_params->min_clk_index].pre_derate_dram_bw_kbps, &mode_lib->soc.clk_table.dram_config);
mode_lib->ms.dram_bw_mbps = ((double)min_clk_table->dram_bw_table.entries[in_out_params->min_clk_index].pre_derate_dram_bw_kbps / 1000);
mode_lib->ms.max_dram_bw_mbps = ((double)min_clk_table->dram_bw_table.entries[min_clk_table->dram_bw_table.num_entries - 1].pre_derate_dram_bw_kbps / 1000);
- mode_lib->ms.qos_param_index = get_qos_param_index((unsigned int) (mode_lib->ms.uclk_freq_mhz * 1000.0), mode_lib->soc.qos_parameters.qos_params.dcn4.per_uclk_dpm_params);
+ mode_lib->ms.qos_param_index = get_qos_param_index((unsigned int) (mode_lib->ms.uclk_freq_mhz * 1000.0), mode_lib->soc.qos_parameters.qos_params.dcn4x.per_uclk_dpm_params);
mode_lib->ms.active_min_uclk_dpm_index = get_active_min_uclk_dpm_index((unsigned int) (mode_lib->ms.uclk_freq_mhz * 1000.0), &mode_lib->soc.clk_table);
#if defined(__DML_VBA_DEBUG__)
@@ -6981,7 +7072,6 @@ static bool dml_core_mode_support(struct dml2_core_calcs_mode_support_ex *in_out
dml2_printf("DML::%s: max_dscclk_freq_mhz = %f\n", __func__, mode_lib->ms.max_dscclk_freq_mhz);
dml2_printf("DML::%s: max_dppclk_freq_mhz = %f\n", __func__, mode_lib->ms.max_dppclk_freq_mhz);
dml2_printf("DML::%s: MaxFabricClock = %f\n", __func__, mode_lib->ms.MaxFabricClock);
- dml2_printf("DML::%s: max_dscclk_freq_mhz = %f\n", __func__, mode_lib->ms.max_dscclk_freq_mhz);
dml2_printf("DML::%s: ip.compressed_buffer_segment_size_in_kbytes = %u\n", __func__, mode_lib->ip.compressed_buffer_segment_size_in_kbytes);
dml2_printf("DML::%s: ip.dcn_mrq_present = %u\n", __func__, mode_lib->ip.dcn_mrq_present);
@@ -7126,7 +7216,7 @@ static bool dml_core_mode_support(struct dml2_core_calcs_mode_support_ex *in_out
mode_lib->ms.support.WritebackLatencySupport = true;
for (k = 0; k <= mode_lib->ms.num_active_planes - 1; k++) {
if (display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].writeback.enable == true &&
- (mode_lib->ms.WriteBandwidth[k] > mode_lib->ip.writeback_interface_buffer_size_kbytes * 1024.0 / mode_lib->soc.qos_parameters.writeback.base_latency_us)) {
+ (mode_lib->ms.WriteBandwidth[k] > mode_lib->ip.writeback_interface_buffer_size_kbytes * 1024 / ((double)mode_lib->soc.qos_parameters.writeback.base_latency_us))) {
mode_lib->ms.support.WritebackLatencySupport = false;
}
}
@@ -7202,17 +7292,17 @@ static bool dml_core_mode_support(struct dml2_core_calcs_mode_support_ex *in_out
#if defined(DV_BUILD)
// Assume a memory config setting of 3 in 420 mode or get a new ip parameter that reflects the programming.
if (mode_lib->ms.BytePerPixelC[k] != 0.0 && display_cfg->plane_descriptors[k].pixel_format != dml2_rgbe_alpha) {
- lb_buffer_size_bits_luma = 34620 * 57;;
+ lb_buffer_size_bits_luma = 34620 * 57;
lb_buffer_size_bits_chroma = 13560 * 57;
}
#endif
*/
- mode_lib->ms.MaximumSwathWidthInLineBufferLuma = lb_buffer_size_bits_luma * math_max2(display_cfg->plane_descriptors[k].composition.scaler_info.plane0.h_ratio, 1.0) / 57 /*FIXME_STAGE2 was: LBBitPerPixel*/ /
+ mode_lib->ms.MaximumSwathWidthInLineBufferLuma = lb_buffer_size_bits_luma * math_max2(display_cfg->plane_descriptors[k].composition.scaler_info.plane0.h_ratio, 1.0) / 57 /
(display_cfg->plane_descriptors[k].composition.scaler_info.plane0.v_taps + math_max2(math_ceil2(display_cfg->plane_descriptors[k].composition.scaler_info.plane0.v_ratio, 1.0) - 2, 0.0));
if (mode_lib->ms.BytePerPixelC[k] == 0.0) {
mode_lib->ms.MaximumSwathWidthInLineBufferChroma = 0;
} else {
- mode_lib->ms.MaximumSwathWidthInLineBufferChroma = lb_buffer_size_bits_chroma * math_max2(display_cfg->plane_descriptors[k].composition.scaler_info.plane1.h_ratio, 1.0) / 57 /*FIXME_STAGE2 was: LBBitPerPixel*/ /
+ mode_lib->ms.MaximumSwathWidthInLineBufferChroma = lb_buffer_size_bits_chroma * math_max2(display_cfg->plane_descriptors[k].composition.scaler_info.plane1.h_ratio, 1.0) / 57 /
(display_cfg->plane_descriptors[k].composition.scaler_info.plane1.v_taps + math_max2(math_ceil2(display_cfg->plane_descriptors[k].composition.scaler_info.plane1.v_ratio, 1.0) - 2, 0.0));
}
@@ -7231,10 +7321,9 @@ static bool dml_core_mode_support(struct dml2_core_calcs_mode_support_ex *in_out
/* Cursor Support Check */
mode_lib->ms.support.CursorSupport = true;
for (k = 0; k < mode_lib->ms.num_active_planes; k++) {
- if (display_cfg->plane_descriptors[k].cursor.cursor_width > 0.0) {
- if (display_cfg->plane_descriptors[k].cursor.cursor_bpp == 64 && mode_lib->ip.cursor_64bpp_support == false) {
+ if (display_cfg->plane_descriptors[k].cursor.num_cursors > 0) {
+ if (display_cfg->plane_descriptors[k].cursor.cursor_bpp == 64 && mode_lib->ip.cursor_64bpp_support == false)
mode_lib->ms.support.CursorSupport = false;
- }
}
}
@@ -7295,7 +7384,8 @@ static bool dml_core_mode_support(struct dml2_core_calcs_mode_support_ex *in_out
mode_lib->ms.support.ViewportExceedsSurface = false;
if (!display_cfg->overrides.hw.surface_viewport_size_check_disable) {
for (k = 0; k < mode_lib->ms.num_active_planes; k++) {
- if (display_cfg->plane_descriptors[k].composition.viewport.plane0.width > display_cfg->plane_descriptors[k].surface.plane0.width || display_cfg->plane_descriptors[k].composition.viewport.plane0.height > display_cfg->plane_descriptors[k].surface.plane0.height) {
+ if (display_cfg->plane_descriptors[k].composition.viewport.plane0.width > display_cfg->plane_descriptors[k].surface.plane0.width ||
+ display_cfg->plane_descriptors[k].composition.viewport.plane0.height > display_cfg->plane_descriptors[k].surface.plane0.height) {
mode_lib->ms.support.ViewportExceedsSurface = true;
#if defined(__DML_VBA_DEBUG__)
dml2_printf("DML::%s: k=%u ViewportWidth = %d\n", __func__, k, display_cfg->plane_descriptors[k].composition.viewport.plane0.width);
@@ -7304,11 +7394,11 @@ static bool dml_core_mode_support(struct dml2_core_calcs_mode_support_ex *in_out
dml2_printf("DML::%s: k=%u SurfaceHeightY = %d\n", __func__, k, display_cfg->plane_descriptors[k].surface.plane0.height);
dml2_printf("DML::%s: k=%u ViewportExceedsSurface = %d\n", __func__, k, mode_lib->ms.support.ViewportExceedsSurface);
#endif
- if (dml_is_420(display_cfg->plane_descriptors[k].pixel_format) || display_cfg->plane_descriptors[k].pixel_format == dml2_rgbe_alpha) {
- if (display_cfg->plane_descriptors[k].composition.viewport.plane1.width > display_cfg->plane_descriptors[k].surface.plane1.width ||
- display_cfg->plane_descriptors[k].composition.viewport.plane1.height > display_cfg->plane_descriptors[k].surface.plane1.height) {
- mode_lib->ms.support.ViewportExceedsSurface = true;
- }
+ }
+ if (dml_is_420(display_cfg->plane_descriptors[k].pixel_format) || display_cfg->plane_descriptors[k].pixel_format == dml2_rgbe_alpha) {
+ if (display_cfg->plane_descriptors[k].composition.viewport.plane1.width > display_cfg->plane_descriptors[k].surface.plane1.width ||
+ display_cfg->plane_descriptors[k].composition.viewport.plane1.height > display_cfg->plane_descriptors[k].surface.plane1.height) {
+ mode_lib->ms.support.ViewportExceedsSurface = true;
}
}
}
@@ -7466,6 +7556,10 @@ static bool dml_core_mode_support(struct dml2_core_calcs_mode_support_ex *in_out
&mode_lib->ms.OutputRate[k],
&mode_lib->ms.RequiredSlots[k]);
+ if (s->OutputBpp[k] == 0.0) {
+ s->OutputBpp[k] = mode_lib->ms.OutputBpp[k];
+ }
+
if (mode_lib->ms.RequiresDSC[k] == false) {
mode_lib->ms.ODMMode[k] = s->ODMModeNoDSC;
mode_lib->ms.RequiredDISPCLKPerSurface[k] = s->RequiredDISPCLKPerSurfaceNoDSC;
@@ -7580,7 +7674,7 @@ static bool dml_core_mode_support(struct dml2_core_calcs_mode_support_ex *in_out
display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].writeback.scaling_info.h_taps,
display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].writeback.scaling_info.v_taps,
display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].writeback.scaling_info.input_width,
- display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].writeback.scaling_info.output_height,
+ display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].writeback.scaling_info.output_width,
display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].timing.h_total,
mode_lib->ip.writeback_line_buffer_buffer_size));
}
@@ -7665,8 +7759,6 @@ static bool dml_core_mode_support(struct dml2_core_calcs_mode_support_ex *in_out
if (display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].output.output_format == dml2_420 && display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].timing.interlaced == 1 && mode_lib->ip.ptoi_supported == true)
mode_lib->ms.support.P2IWith420 = true;
- if (display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].timing.dsc.enable == dml2_dsc_enable_if_necessary && s->OutputBpp[k] != 0)
- mode_lib->ms.support.DSCOnlyIfNecessaryWithBPP = true;
if ((display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].timing.dsc.enable == dml2_dsc_enable || display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].timing.dsc.enable == dml2_dsc_enable_if_necessary) && display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].output.output_format == dml2_n422 && !mode_lib->ip.dsc422_native_support)
mode_lib->ms.support.DSC422NativeNotSupported = true;
@@ -7819,7 +7911,7 @@ static bool dml_core_mode_support(struct dml2_core_calcs_mode_support_ex *in_out
mode_lib->ms.DSCDelay[k] = DSCDelayRequirement(mode_lib->ms.RequiresDSC[k],
mode_lib->ms.ODMMode[k],
mode_lib->ip.maximum_dsc_bits_per_component,
- mode_lib->ms.OutputBpp[k],
+ s->OutputBpp[k],
display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].timing.h_active,
display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].timing.h_total,
mode_lib->ms.support.NumberOfDSCSlices[k],
@@ -8059,59 +8151,63 @@ static bool dml_core_mode_support(struct dml2_core_calcs_mode_support_ex *in_out
mode_lib->ms.excess_vactive_fill_bw_c);
mode_lib->ms.UrgLatency = CalculateUrgentLatency(
- mode_lib->soc.qos_parameters.qos_params.dcn3.urgent_latency_us.base_latency_us,
- mode_lib->soc.qos_parameters.qos_params.dcn3.urgent_latency_us.base_latency_pixel_vm_us,
- mode_lib->soc.qos_parameters.qos_params.dcn3.urgent_latency_us.base_latency_vm_us,
+ mode_lib->soc.qos_parameters.qos_params.dcn32x.urgent_latency_us.base_latency_us,
+ mode_lib->soc.qos_parameters.qos_params.dcn32x.urgent_latency_us.base_latency_pixel_vm_us,
+ mode_lib->soc.qos_parameters.qos_params.dcn32x.urgent_latency_us.base_latency_vm_us,
mode_lib->soc.do_urgent_latency_adjustment,
- mode_lib->soc.qos_parameters.qos_params.dcn3.urgent_latency_us.scaling_factor_fclk_us,
- mode_lib->soc.qos_parameters.qos_params.dcn3.urgent_latency_us.scaling_factor_mhz,
+ mode_lib->soc.qos_parameters.qos_params.dcn32x.urgent_latency_us.scaling_factor_fclk_us,
+ mode_lib->soc.qos_parameters.qos_params.dcn32x.urgent_latency_us.scaling_factor_mhz,
mode_lib->ms.FabricClock,
mode_lib->ms.uclk_freq_mhz,
mode_lib->soc.qos_parameters.qos_type,
- mode_lib->soc.qos_parameters.qos_params.dcn4.per_uclk_dpm_params[mode_lib->ms.qos_param_index].urgent_ramp_uclk_cycles,
- mode_lib->soc.qos_parameters.qos_params.dcn4.df_qos_response_time_fclk_cycles,
- mode_lib->soc.qos_parameters.qos_params.dcn4.max_round_trip_to_furthest_cs_fclk_cycles,
- mode_lib->soc.qos_parameters.qos_params.dcn4.mall_overhead_fclk_cycles,
- mode_lib->soc.qos_parameters.qos_params.dcn4.umc_urgent_ramp_latency_margin,
- mode_lib->soc.qos_parameters.qos_params.dcn4.fabric_max_transport_latency_margin);
+ mode_lib->soc.qos_parameters.qos_params.dcn4x.per_uclk_dpm_params[mode_lib->ms.qos_param_index].urgent_ramp_uclk_cycles,
+ mode_lib->soc.qos_parameters.qos_params.dcn4x.df_qos_response_time_fclk_cycles,
+ mode_lib->soc.qos_parameters.qos_params.dcn4x.max_round_trip_to_furthest_cs_fclk_cycles,
+ mode_lib->soc.qos_parameters.qos_params.dcn4x.mall_overhead_fclk_cycles,
+ mode_lib->soc.qos_parameters.qos_params.dcn4x.umc_urgent_ramp_latency_margin,
+ mode_lib->soc.qos_parameters.qos_params.dcn4x.fabric_max_transport_latency_margin);
mode_lib->ms.TripToMemory = CalculateTripToMemory(
mode_lib->ms.UrgLatency,
mode_lib->ms.FabricClock,
mode_lib->ms.uclk_freq_mhz,
mode_lib->soc.qos_parameters.qos_type,
- mode_lib->soc.qos_parameters.qos_params.dcn4.per_uclk_dpm_params[mode_lib->ms.qos_param_index].trip_to_memory_uclk_cycles,
- mode_lib->soc.qos_parameters.qos_params.dcn4.max_round_trip_to_furthest_cs_fclk_cycles,
- mode_lib->soc.qos_parameters.qos_params.dcn4.mall_overhead_fclk_cycles,
- mode_lib->soc.qos_parameters.qos_params.dcn4.umc_max_latency_margin,
- mode_lib->soc.qos_parameters.qos_params.dcn4.fabric_max_transport_latency_margin);
+ mode_lib->soc.qos_parameters.qos_params.dcn4x.per_uclk_dpm_params[mode_lib->ms.qos_param_index].trip_to_memory_uclk_cycles,
+ mode_lib->soc.qos_parameters.qos_params.dcn4x.max_round_trip_to_furthest_cs_fclk_cycles,
+ mode_lib->soc.qos_parameters.qos_params.dcn4x.mall_overhead_fclk_cycles,
+ mode_lib->soc.qos_parameters.qos_params.dcn4x.umc_max_latency_margin,
+ mode_lib->soc.qos_parameters.qos_params.dcn4x.fabric_max_transport_latency_margin);
mode_lib->ms.TripToMemory = math_max2(mode_lib->ms.UrgLatency, mode_lib->ms.TripToMemory);
for (k = 0; k < mode_lib->ms.num_active_planes; ++k) {
double line_time_us = (double)display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].timing.h_total / ((double)display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].timing.pixel_clock_khz / 1000);
bool cursor_not_enough_urgent_latency_hiding = 0;
- calculate_cursor_req_attributes(
- display_cfg->plane_descriptors[k].cursor.cursor_width,
- display_cfg->plane_descriptors[k].cursor.cursor_bpp,
- // output
- &s->cursor_lines_per_chunk[k],
- &s->cursor_bytes_per_line[k],
- &s->cursor_bytes_per_chunk[k],
- &s->cursor_bytes[k]);
-
- calculate_cursor_urgent_burst_factor(
- mode_lib->ip.cursor_buffer_size,
- display_cfg->plane_descriptors[k].cursor.cursor_width,
- s->cursor_bytes_per_chunk[k],
- s->cursor_lines_per_chunk[k],
- line_time_us,
- mode_lib->ms.UrgLatency,
+ if (display_cfg->plane_descriptors[k].cursor.num_cursors > 0) {
+ calculate_cursor_req_attributes(
+ display_cfg->plane_descriptors[k].cursor.cursor_width,
+ display_cfg->plane_descriptors[k].cursor.cursor_bpp,
+
+ // output
+ &s->cursor_lines_per_chunk[k],
+ &s->cursor_bytes_per_line[k],
+ &s->cursor_bytes_per_chunk[k],
+ &s->cursor_bytes[k]);
+
+ calculate_cursor_urgent_burst_factor(
+ mode_lib->ip.cursor_buffer_size,
+ display_cfg->plane_descriptors[k].cursor.cursor_width,
+ s->cursor_bytes_per_chunk[k],
+ s->cursor_lines_per_chunk[k],
+ line_time_us,
+ mode_lib->ms.UrgLatency,
+
+ // output
+ &mode_lib->ms.UrgentBurstFactorCursor[k],
+ &cursor_not_enough_urgent_latency_hiding);
+ }
- // output
- &mode_lib->ms.UrgentBurstFactorCursor[k],
- &cursor_not_enough_urgent_latency_hiding);
mode_lib->ms.UrgentBurstFactorCursorPre[k] = mode_lib->ms.UrgentBurstFactorCursor[k];
#ifdef __DML_VBA_DEBUG__
@@ -8254,20 +8350,20 @@ static bool dml_core_mode_support(struct dml2_core_calcs_mode_support_ex *in_out
mode_lib->ms.support.OutstandingRequestsUrgencyAvoidance = true;
mode_lib->ms.support.avg_urgent_latency_us
- = (mode_lib->soc.qos_parameters.qos_params.dcn4.per_uclk_dpm_params[mode_lib->ms.qos_param_index].average_latency_when_urgent_uclk_cycles / mode_lib->ms.uclk_freq_mhz
- * (1 + mode_lib->soc.qos_parameters.qos_params.dcn4.umc_average_latency_margin / 100.0)
- + mode_lib->soc.qos_parameters.qos_params.dcn4.average_transport_distance_fclk_cycles / mode_lib->ms.FabricClock)
- * (1 + mode_lib->soc.qos_parameters.qos_params.dcn4.fabric_average_transport_latency_margin / 100.0);
+ = (mode_lib->soc.qos_parameters.qos_params.dcn4x.per_uclk_dpm_params[mode_lib->ms.qos_param_index].average_latency_when_urgent_uclk_cycles / mode_lib->ms.uclk_freq_mhz
+ * (1 + mode_lib->soc.qos_parameters.qos_params.dcn4x.umc_average_latency_margin / 100.0)
+ + mode_lib->soc.qos_parameters.qos_params.dcn4x.average_transport_distance_fclk_cycles / mode_lib->ms.FabricClock)
+ * (1 + mode_lib->soc.qos_parameters.qos_params.dcn4x.fabric_average_transport_latency_margin / 100.0);
mode_lib->ms.support.avg_non_urgent_latency_us
- = (mode_lib->soc.qos_parameters.qos_params.dcn4.per_uclk_dpm_params[mode_lib->ms.qos_param_index].average_latency_when_non_urgent_uclk_cycles / mode_lib->ms.uclk_freq_mhz
- * (1 + mode_lib->soc.qos_parameters.qos_params.dcn4.umc_average_latency_margin / 100.0)
- + mode_lib->soc.qos_parameters.qos_params.dcn4.average_transport_distance_fclk_cycles / mode_lib->ms.FabricClock)
- * (1 + mode_lib->soc.qos_parameters.qos_params.dcn4.fabric_average_transport_latency_margin / 100.0);
+ = (mode_lib->soc.qos_parameters.qos_params.dcn4x.per_uclk_dpm_params[mode_lib->ms.qos_param_index].average_latency_when_non_urgent_uclk_cycles / mode_lib->ms.uclk_freq_mhz
+ * (1 + mode_lib->soc.qos_parameters.qos_params.dcn4x.umc_average_latency_margin / 100.0)
+ + mode_lib->soc.qos_parameters.qos_params.dcn4x.average_transport_distance_fclk_cycles / mode_lib->ms.FabricClock)
+ * (1 + mode_lib->soc.qos_parameters.qos_params.dcn4x.fabric_average_transport_latency_margin / 100.0);
for (k = 0; k < mode_lib->ms.num_active_planes; k++) {
- if (mode_lib->soc.qos_parameters.qos_type == dml2_qos_param_type_dcn4) {
+ if (mode_lib->soc.qos_parameters.qos_type == dml2_qos_param_type_dcn4x) {
outstanding_latency_us = (mode_lib->soc.max_outstanding_reqs * mode_lib->ms.support.request_size_bytes_luma[k]
/ (mode_lib->ms.DCFCLK * mode_lib->soc.return_bus_width_bytes));
@@ -8287,7 +8383,7 @@ static bool dml_core_mode_support(struct dml2_core_calcs_mode_support_ex *in_out
#endif
}
- if (mode_lib->soc.qos_parameters.qos_type == dml2_qos_param_type_dcn4 && mode_lib->ms.BytePerPixelC[k] > 0) {
+ if (mode_lib->soc.qos_parameters.qos_type == dml2_qos_param_type_dcn4x && mode_lib->ms.BytePerPixelC[k] > 0) {
outstanding_latency_us = (mode_lib->soc.max_outstanding_reqs * mode_lib->ms.support.request_size_bytes_chroma[k]
/ (mode_lib->ms.DCFCLK * mode_lib->soc.return_bus_width_bytes));
@@ -8460,7 +8556,6 @@ static bool dml_core_mode_support(struct dml2_core_calcs_mode_support_ex *in_out
{
mode_lib->ms.TimeCalc = 24 / mode_lib->ms.dcfclk_deepsleep;
-
calculate_hostvm_inefficiency_factor(
&s->HostVMInefficiencyFactor,
&s->HostVMInefficiencyFactorPrefetch,
@@ -8501,10 +8596,15 @@ static bool dml_core_mode_support(struct dml2_core_calcs_mode_support_ex *in_out
min_return_bw_for_latency = mode_lib->ms.support.urg_bandwidth_available_min_latency[dml2_core_internal_soc_state_sys_active];
+ if (mode_lib->soc.qos_parameters.qos_type == dml2_qos_param_type_dcn3)
+ s->ReorderingBytes = (unsigned int)(mode_lib->soc.clk_table.dram_config.channel_count * math_max3(mode_lib->soc.qos_parameters.qos_params.dcn32x.urgent_out_of_order_return_per_channel_pixel_only_bytes,
+ mode_lib->soc.qos_parameters.qos_params.dcn32x.urgent_out_of_order_return_per_channel_pixel_and_vm_bytes,
+ mode_lib->soc.qos_parameters.qos_params.dcn32x.urgent_out_of_order_return_per_channel_vm_only_bytes));
+
CalculateExtraLatency(
display_cfg,
mode_lib->ip.rob_buffer_size_kbytes,
- 0, //mode_lib->soc.round_trip_ping_latency_dcfclk_cycles,
+ mode_lib->soc.qos_parameters.qos_params.dcn32x.loaded_round_trip_latency_fclk_cycles,
s->ReorderingBytes,
mode_lib->ms.DCFCLK,
mode_lib->ms.FabricClock,
@@ -8540,7 +8640,9 @@ static bool dml_core_mode_support(struct dml2_core_calcs_mode_support_ex *in_out
mode_lib->ms.TWait[k] = CalculateTWait(
display_cfg->plane_descriptors[k].overrides.reserved_vblank_time_ns,
mode_lib->ms.UrgLatency,
- mode_lib->ms.TripToMemory);
+ mode_lib->ms.TripToMemory,
+ !dml_is_phantom_pipe(&display_cfg->plane_descriptors[k]) && display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].timing.drr_config.enabled ?
+ get_g6_temp_read_blackout_us(&mode_lib->soc, (unsigned int)(mode_lib->ms.uclk_freq_mhz * 1000), in_out_params->min_clk_index) : 0.0);
myPipe->Dppclk = mode_lib->ms.RequiredDPPCLK[k];
myPipe->Dispclk = mode_lib->ms.RequiredDISPCLK;
@@ -8587,7 +8689,6 @@ static bool dml_core_mode_support(struct dml2_core_calcs_mode_support_ex *in_out
CalculatePrefetchSchedule_params->OutputFormat = display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].output.output_format;
CalculatePrefetchSchedule_params->MaxInterDCNTileRepeaters = mode_lib->ip.max_inter_dcn_tile_repeaters;
CalculatePrefetchSchedule_params->VStartup = s->MaximumVStartup[k];
- CalculatePrefetchSchedule_params->MaxVStartup = s->MaximumVStartup[k];
CalculatePrefetchSchedule_params->HostVMMinPageSize = mode_lib->soc.hostvm_min_page_size_kbytes;
CalculatePrefetchSchedule_params->DynamicMetadataEnable = display_cfg->plane_descriptors[k].dynamic_meta_data.enable;
CalculatePrefetchSchedule_params->DynamicMetadataVMEnabled = mode_lib->ip.dynamic_metadata_vm_enabled;
@@ -8669,8 +8770,8 @@ static bool dml_core_mode_support(struct dml2_core_calcs_mode_support_ex *in_out
dml2_printf("DML::%s: k=%d, dst_y_prefetch=%f (should not be < 2)\n", __func__, k, mode_lib->ms.dst_y_prefetch[k]);
dml2_printf("DML::%s: k=%d, LinesForVM=%f (should not be >= 32)\n", __func__, k, mode_lib->ms.LinesForVM[k]);
dml2_printf("DML::%s: k=%d, LinesForDPTERow=%f (should not be >= 16)\n", __func__, k, mode_lib->ms.LinesForDPTERow[k]);
- dml2_printf("DML::%s: k=%d, NoTimeForPrefetch=%d\n", __func__, k, mode_lib->ms.NoTimeForPrefetch[k]);
dml2_printf("DML::%s: k=%d, DSTYAfterScaler=%d (should be <= 8)\n", __func__, k, s->DSTYAfterScaler[k]);
+ dml2_printf("DML::%s: k=%d, NoTimeForPrefetch=%d\n", __func__, k, mode_lib->ms.NoTimeForPrefetch[k]);
}
}
@@ -8683,20 +8784,15 @@ static bool dml_core_mode_support(struct dml2_core_calcs_mode_support_ex *in_out
mode_lib->ms.support.VRatioInPrefetchSupported = true;
for (k = 0; k <= mode_lib->ms.num_active_planes - 1; k++) {
- if (mode_lib->ms.VRatioPreY[k] > __DML2_CALCS_MAX_VRATIO_PRE_ENHANCE_PREFETCH_ACC__ ||
- mode_lib->ms.VRatioPreC[k] > __DML2_CALCS_MAX_VRATIO_PRE_ENHANCE_PREFETCH_ACC__) {
+ if (mode_lib->ms.VRatioPreY[k] > __DML2_CALCS_MAX_VRATIO_PRE__ ||
+ mode_lib->ms.VRatioPreC[k] > __DML2_CALCS_MAX_VRATIO_PRE__) {
mode_lib->ms.support.VRatioInPrefetchSupported = false;
+ dml2_printf("DML::%s: k=%d VRatioPreY = %f (should be <= %f)\n", __func__, k, mode_lib->ms.VRatioPreY[k], __DML2_CALCS_MAX_VRATIO_PRE__);
+ dml2_printf("DML::%s: k=%d VRatioPreC = %f (should be <= %f)\n", __func__, k, mode_lib->ms.VRatioPreC[k], __DML2_CALCS_MAX_VRATIO_PRE__);
dml2_printf("DML::%s: VRatioInPrefetchSupported = %u\n", __func__, mode_lib->ms.support.VRatioInPrefetchSupported);
}
}
- s->AnyLinesForVMOrRowTooLarge = false;
- for (k = 0; k < mode_lib->ms.num_active_planes; ++k) {
- if (mode_lib->ms.LinesForDPTERow[k] >= 16 || mode_lib->ms.LinesForVM[k] >= 32) {
- s->AnyLinesForVMOrRowTooLarge = true;
- }
- }
-
// Only do urg vs prefetch bandwidth check, flip schedule check, power saving feature support check IF the Prefetch Schedule Check is ok
if (mode_lib->ms.support.PrefetchSupported) {
for (k = 0; k <= mode_lib->ms.num_active_planes - 1; k++) {
@@ -8845,6 +8941,7 @@ static bool dml_core_mode_support(struct dml2_core_calcs_mode_support_ex *in_out
mode_lib->ms.dpte_row_height_chroma[k],
mode_lib->ms.use_one_row_for_frame_flip[k],
mode_lib->ip.max_flip_time_us,
+ mode_lib->ip.max_flip_time_lines,
s->per_pipe_flip_bytes[k],
mode_lib->ms.meta_row_bytes[k],
s->meta_row_height_luma[k],
@@ -8932,6 +9029,9 @@ static bool dml_core_mode_support(struct dml2_core_calcs_mode_support_ex *in_out
s->mSOCParameters.USRRetrainingLatency = 0;
s->mSOCParameters.SMNLatency = 0;
s->mSOCParameters.g6_temp_read_blackout_us = get_g6_temp_read_blackout_us(&mode_lib->soc, (unsigned int)(mode_lib->ms.uclk_freq_mhz * 1000), in_out_params->min_clk_index);
+ s->mSOCParameters.max_urgent_latency_us = get_max_urgent_latency_us(&mode_lib->soc.qos_parameters.qos_params.dcn4x, mode_lib->ms.uclk_freq_mhz, mode_lib->ms.FabricClock, in_out_params->min_clk_index);
+ s->mSOCParameters.df_response_time_us = mode_lib->soc.qos_parameters.qos_params.dcn4x.df_qos_response_time_fclk_cycles / mode_lib->ms.FabricClock;
+ s->mSOCParameters.qos_type = mode_lib->soc.qos_parameters.qos_type;
CalculateWatermarks_params->display_cfg = display_cfg;
CalculateWatermarks_params->USRRetrainingRequired = false;
@@ -8951,7 +9051,6 @@ static bool dml_core_mode_support(struct dml2_core_calcs_mode_support_ex *in_out
CalculateWatermarks_params->DETBufferSizeC = mode_lib->ms.DETBufferSizeC;
CalculateWatermarks_params->SwathHeightY = mode_lib->ms.SwathHeightY;
CalculateWatermarks_params->SwathHeightC = mode_lib->ms.SwathHeightC;
- //CalculateWatermarks_params->LBBitPerPixel = 57; // FIXME_STAGE2, need a new ip param?
CalculateWatermarks_params->SwathWidthY = mode_lib->ms.SwathWidthY;
CalculateWatermarks_params->SwathWidthC = mode_lib->ms.SwathWidthC;
CalculateWatermarks_params->DPPPerSurface = mode_lib->ms.NoOfDPP;
@@ -8979,29 +9078,24 @@ static bool dml_core_mode_support(struct dml2_core_calcs_mode_support_ex *in_out
CalculateWatermarks_params->VActiveLatencyHidingUs = mode_lib->ms.VActiveLatencyHidingUs;
CalculateWatermarksMALLUseAndDRAMSpeedChangeSupport(&mode_lib->scratch, CalculateWatermarks_params);
- }
+ calculate_pstate_keepout_dst_lines(display_cfg, &mode_lib->ms.support.watermarks, s->dummy_integer_array[0]);
+ }
+ dml2_printf("DML::%s: Done prefetch calculation\n", __func__);
// End of Prefetch Check
- dml2_printf("DML::%s: Done prefetch calculation\n", __func__);
+ mode_lib->ms.support.max_urgent_latency_us = s->mSOCParameters.max_urgent_latency_us;
//Re-ordering Buffer Support Check
- mode_lib->ms.support.max_urgent_latency_us
- = mode_lib->soc.qos_parameters.qos_params.dcn4.per_uclk_dpm_params[mode_lib->ms.qos_param_index].maximum_latency_when_urgent_uclk_cycles / mode_lib->ms.uclk_freq_mhz
- * (1 + mode_lib->soc.qos_parameters.qos_params.dcn4.umc_max_latency_margin / 100.0)
- + mode_lib->soc.qos_parameters.qos_params.dcn4.mall_overhead_fclk_cycles / mode_lib->ms.FabricClock
- + mode_lib->soc.qos_parameters.qos_params.dcn4.max_round_trip_to_furthest_cs_fclk_cycles / mode_lib->ms.FabricClock
- * (1 + mode_lib->soc.qos_parameters.qos_params.dcn4.fabric_max_transport_latency_margin / 100.0);
-
- if (mode_lib->soc.qos_parameters.qos_type == dml2_qos_param_type_dcn4) {
+ if (mode_lib->soc.qos_parameters.qos_type == dml2_qos_param_type_dcn4x) {
if (((mode_lib->ip.rob_buffer_size_kbytes - mode_lib->ip.pixel_chunk_size_kbytes) * 1024
- / mode_lib->ms.support.non_urg_bandwidth_required_flip[dml2_core_internal_soc_state_sys_active][dml2_core_internal_bw_sdp]) >= mode_lib->ms.support.max_urgent_latency_us) {
+ / mode_lib->ms.support.non_urg_bandwidth_required_flip[dml2_core_internal_soc_state_sys_active][dml2_core_internal_bw_sdp]) >= s->mSOCParameters.max_urgent_latency_us) {
mode_lib->ms.support.ROBSupport = true;
} else {
mode_lib->ms.support.ROBSupport = false;
}
} else {
- if (mode_lib->ip.rob_buffer_size_kbytes * 1024 >= mode_lib->soc.qos_parameters.qos_params.dcn3.loaded_round_trip_latency_fclk_cycles * mode_lib->soc.fabric_datapath_to_dcn_data_return_bytes) {
+ if (mode_lib->ip.rob_buffer_size_kbytes * 1024 >= mode_lib->soc.qos_parameters.qos_params.dcn32x.loaded_round_trip_latency_fclk_cycles * mode_lib->soc.fabric_datapath_to_dcn_data_return_bytes) {
mode_lib->ms.support.ROBSupport = true;
} else {
mode_lib->ms.support.ROBSupport = false;
@@ -9024,15 +9118,12 @@ static bool dml_core_mode_support(struct dml2_core_calcs_mode_support_ex *in_out
mode_lib->ms.dram_change_vactive_det_fill_delay_us);
#ifdef __DML_VBA_DEBUG__
- dml2_printf("DML::%s: max_urgent_latency_us = %f\n", __func__, mode_lib->ms.support.max_urgent_latency_us);
+ dml2_printf("DML::%s: max_urgent_latency_us = %f\n", __func__, s->mSOCParameters.max_urgent_latency_us);
dml2_printf("DML::%s: ROBSupport = %u\n", __func__, mode_lib->ms.support.ROBSupport);
#endif
/*Mode Support, Voltage State and SOC Configuration*/
{
- // s->dram_clock_change_support = 1;
- // s->f_clock_change_support = 1;
-
if (mode_lib->ms.support.ScaleRatioAndTapsSupport
&& mode_lib->ms.support.SourceFormatPixelAndScanSupport
&& mode_lib->ms.support.ViewportSizeSupport
@@ -9043,9 +9134,7 @@ static bool dml_core_mode_support(struct dml2_core_calcs_mode_support_ex *in_out
&& !mode_lib->ms.support.ExceededMultistreamSlots
&& !mode_lib->ms.support.MSOOrODMSplitWithNonDPLink
&& !mode_lib->ms.support.NotEnoughLanesForMSO
- //&& mode_lib->ms.support.LinkCapacitySupport == true // FIXME_STAGE2
&& !mode_lib->ms.support.P2IWith420
- && !mode_lib->ms.support.DSCOnlyIfNecessaryWithBPP
&& !mode_lib->ms.support.DSC422NativeNotSupported
&& mode_lib->ms.support.DSCSlicesODMModeSupported
&& !mode_lib->ms.support.NotEnoughDSCUnits
@@ -9113,7 +9202,7 @@ static bool dml_core_mode_support(struct dml2_core_calcs_mode_support_ex *in_out
#if defined(__DML_VBA_DEBUG__)
if (!mode_lib->ms.support.ModeSupport)
- dml2_print_dml_mode_support_info(&mode_lib->ms.support, true);
+ dml2_print_mode_support_info(&mode_lib->ms.support, true);
dml2_printf("DML::%s: --- DONE --- \n", __func__);
#endif
@@ -9132,6 +9221,10 @@ unsigned int dml2_core_calcs_mode_support_ex(struct dml2_core_calcs_mode_support
*in_out_params->out_evaluation_info = in_out_params->mode_lib->ms.support;
dml2_printf("DML::%s: is_mode_support = %u (min_clk_index=%d)\n", __func__, result, in_out_params->min_clk_index);
+
+ for (unsigned int k = 0; k < in_out_params->in_display_cfg->num_planes; k++)
+ dml2_printf("DML::%s: plane_%d: reserved_vblank_time_ns = %u\n", __func__, k, in_out_params->in_display_cfg->plane_descriptors[k].overrides.reserved_vblank_time_ns);
+
dml2_printf("DML::%s: ------------- DONE ----------\n", __func__);
return result;
@@ -9373,11 +9466,9 @@ static void CalculateMetaAndPTETimes(struct dml2_core_shared_CalculateMetaAndPTE
} else {
dpte_groups_per_row_luma_ub = (unsigned int)(math_ceil2((double)p->dpte_row_width_luma_ub[k] / (double)dpte_group_width_luma, 1.0));
}
-#ifdef DML_VM_PTE_ADL_PATCH_EN
if (dpte_groups_per_row_luma_ub <= 2) {
dpte_groups_per_row_luma_ub = dpte_groups_per_row_luma_ub + 1;
}
-#endif
dml2_printf("DML::%s: k=%u, use_one_row_for_frame = %u\n", __func__, k, p->use_one_row_for_frame[k]);
dml2_printf("DML::%s: k=%u, dpte_group_bytes = %u\n", __func__, k, p->dpte_group_bytes[k]);
dml2_printf("DML::%s: k=%u, PTERequestSizeY = %u\n", __func__, k, p->PTERequestSizeY[k]);
@@ -9406,11 +9497,9 @@ static void CalculateMetaAndPTETimes(struct dml2_core_shared_CalculateMetaAndPTE
} else {
dpte_groups_per_row_chroma_ub = (unsigned int)(math_ceil2((double)p->dpte_row_width_chroma_ub[k] / (double)dpte_group_width_chroma, 1.0));
}
-#ifdef DML_VM_PTE_ADL_PATCH_EN
if (dpte_groups_per_row_chroma_ub <= 2) {
dpte_groups_per_row_chroma_ub = dpte_groups_per_row_chroma_ub + 1;
}
-#endif
dml2_printf("DML::%s: k=%u, dpte_row_width_chroma_ub = %u\n", __func__, k, p->dpte_row_width_chroma_ub[k]);
dml2_printf("DML::%s: k=%u, dpte_group_width_chroma = %u\n", __func__, k, dpte_group_width_chroma);
dml2_printf("DML::%s: k=%u, dpte_groups_per_row_chroma_ub = %u\n", __func__, k, dpte_groups_per_row_chroma_ub);
@@ -9535,17 +9624,16 @@ static void CalculateVMGroupAndRequestTimes(
line_time = display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].timing.h_total / pixel_clock_mhz;
-#ifdef DML_VM_PTE_ADL_PATCH_EN
- if (num_group_per_lower_vm_stage_flip <= 2) {
- num_group_per_lower_vm_stage_flip = num_group_per_lower_vm_stage_flip + 1;
- }
+ if (num_group_per_lower_vm_stage_pref > 0)
+ TimePerVMGroupVBlank[k] = dst_y_per_vm_vblank[k] * line_time / num_group_per_lower_vm_stage_pref;
+ else
+ TimePerVMGroupVBlank[k] = 0;
+
+ if (num_group_per_lower_vm_stage_flip > 0)
+ TimePerVMGroupFlip[k] = dst_y_per_vm_flip[k] * line_time / num_group_per_lower_vm_stage_flip;
+ else
+ TimePerVMGroupFlip[k] = 0;
- if (num_group_per_lower_vm_stage_pref <= 2) {
- num_group_per_lower_vm_stage_pref = num_group_per_lower_vm_stage_pref + 1;
- }
-#endif
- TimePerVMGroupVBlank[k] = dst_y_per_vm_vblank[k] * line_time / num_group_per_lower_vm_stage_pref;
- TimePerVMGroupFlip[k] = dst_y_per_vm_flip[k] * line_time / num_group_per_lower_vm_stage_flip;
if (num_req_per_lower_vm_stage_pref > 0)
TimePerVMRequestVBlank[k] = dst_y_per_vm_vblank[k] * line_time / num_req_per_lower_vm_stage_pref;
else
@@ -9599,10 +9687,6 @@ static void CalculateStutterEfficiency(struct dml2_core_internal_scratch *scratc
bool FoundCriticalSurface = false;
double LastZ8StutterPeriod = 0;
- unsigned int SwathSizeCriticalSurface;
- unsigned int LastChunkOfSwathSize;
- unsigned int MissingPartOfLastSwathOfDETSize;
-
memset(l, 0, sizeof(struct dml2_core_calcs_CalculateStutterEfficiency_locals));
for (unsigned int k = 0; k < p->NumberOfActiveSurfaces; ++k) {
@@ -9777,7 +9861,7 @@ static void CalculateStutterEfficiency(struct dml2_core_internal_scratch *scratc
l->StutterBurstTime = l->PartOfUncompressedPixelBurstThatFitsInROBAndCompressedBuffer
/ (p->ReturnBW * (p->hw_debug5 ? 1 : l->AverageDCCCompressionRate)) +
(*p->StutterPeriod * p->TotalDataReadBandwidth - l->PartOfUncompressedPixelBurstThatFitsInROBAndCompressedBuffer)
- / math_max2(p->DCFCLK * 64, p->ReturnBW * (p->hw_debug5 ? 1 : l->AverageDCCCompressionRate)) +
+ / math_min2(p->DCFCLK * 64, p->ReturnBW * (p->hw_debug5 ? 1 : l->AverageDCCCompressionRate)) +
*p->StutterPeriod * l->TotalRowReadBandwidth / p->ReturnBW;
#ifdef __DML_VBA_DEBUG__
dml2_printf("DML::%s: Part 1 = %f\n", __func__, l->PartOfUncompressedPixelBurstThatFitsInROBAndCompressedBuffer / p->ReturnBW / (p->hw_debug5 ? 1 : l->AverageDCCCompressionRate));
@@ -9871,19 +9955,11 @@ static void CalculateStutterEfficiency(struct dml2_core_internal_scratch *scratc
dml2_printf("DML::%s: Z8NumberOfStutterBurstsPerFrame = %u\n", __func__, *p->Z8NumberOfStutterBurstsPerFrame);
#endif
- SwathSizeCriticalSurface = (unsigned int)(l->BytePerPixelYCriticalSurface * l->SwathHeightYCriticalSurface * math_ceil2(l->SwathWidthYCriticalSurface, l->BlockWidth256BytesYCriticalSurface));
- LastChunkOfSwathSize = SwathSizeCriticalSurface % (p->PixelChunkSizeInKByte * 1024);
- MissingPartOfLastSwathOfDETSize = (unsigned int)(math_ceil2(l->DETBufferSizeYCriticalSurface, SwathSizeCriticalSurface) - l->DETBufferSizeYCriticalSurface);
-
- *p->DCHUBBUB_ARB_CSTATE_MAX_CAP_MODE = !(!p->UnboundedRequestEnabled && (p->NumberOfActiveSurfaces == 1) && l->SinglePlaneCriticalSurface && l->SinglePipeCriticalSurface && (LastChunkOfSwathSize > 0) &&
- (LastChunkOfSwathSize <= 4096) && (MissingPartOfLastSwathOfDETSize > 0) && (MissingPartOfLastSwathOfDETSize <= LastChunkOfSwathSize));
+ *p->DCHUBBUB_ARB_CSTATE_MAX_CAP_MODE = !(!p->UnboundedRequestEnabled && (p->NumberOfActiveSurfaces == 1) && l->SinglePlaneCriticalSurface && l->SinglePipeCriticalSurface);
#ifdef __DML_VBA_DEBUG__
- dml2_printf("DML::%s: SwathSizeCriticalSurface = %u\n", __func__, SwathSizeCriticalSurface);
dml2_printf("DML::%s: DETBufferSizeYCriticalSurface = %u\n", __func__, l->DETBufferSizeYCriticalSurface);
dml2_printf("DML::%s: PixelChunkSizeInKByte = %u\n", __func__, p->PixelChunkSizeInKByte);
- dml2_printf("DML::%s: LastChunkOfSwathSize = %u\n", __func__, LastChunkOfSwathSize);
- dml2_printf("DML::%s: MissingPartOfLastSwathOfDETSize = %u\n", __func__, MissingPartOfLastSwathOfDETSize);
dml2_printf("DML::%s: DCHUBBUB_ARB_CSTATE_MAX_CAP_MODE = %u\n", __func__, *p->DCHUBBUB_ARB_CSTATE_MAX_CAP_MODE);
#endif
}
@@ -9928,14 +10004,14 @@ static bool dml_core_mode_programming(struct dml2_core_calcs_mode_programming_ex
mode_lib->mp.num_active_pipes = dml_get_num_active_pipes(display_cfg->num_planes, cfg_support_info);
dml_calc_pipe_plane_mapping(cfg_support_info, mode_lib->mp.pipe_plane);
- mode_lib->mp.Dcfclk = programming->min_clocks.dcn4.active.dcfclk_khz / 1000.0;
- mode_lib->mp.FabricClock = programming->min_clocks.dcn4.active.fclk_khz / 1000.0;
- mode_lib->mp.dram_bw_mbps = uclk_khz_to_dram_bw_mbps(programming->min_clocks.dcn4.active.uclk_khz, &mode_lib->soc.clk_table.dram_config);
- mode_lib->mp.uclk_freq_mhz = programming->min_clocks.dcn4.active.uclk_khz / 1000.0;
- mode_lib->mp.GlobalDPPCLK = programming->min_clocks.dcn4.dpprefclk_khz / 1000.0;
- s->SOCCLK = (double)programming->min_clocks.dcn4.socclk_khz / 1000;
- mode_lib->mp.qos_param_index = get_qos_param_index(programming->min_clocks.dcn4.active.uclk_khz, mode_lib->soc.qos_parameters.qos_params.dcn4.per_uclk_dpm_params);
- mode_lib->mp.active_min_uclk_dpm_index = get_active_min_uclk_dpm_index(programming->min_clocks.dcn4.active.uclk_khz, &mode_lib->soc.clk_table);
+ mode_lib->mp.Dcfclk = programming->min_clocks.dcn4x.active.dcfclk_khz / 1000.0;
+ mode_lib->mp.FabricClock = programming->min_clocks.dcn4x.active.fclk_khz / 1000.0;
+ mode_lib->mp.dram_bw_mbps = uclk_khz_to_dram_bw_mbps(programming->min_clocks.dcn4x.active.uclk_khz, &mode_lib->soc.clk_table.dram_config);
+ mode_lib->mp.uclk_freq_mhz = programming->min_clocks.dcn4x.active.uclk_khz / 1000.0;
+ mode_lib->mp.GlobalDPPCLK = programming->min_clocks.dcn4x.dpprefclk_khz / 1000.0;
+ s->SOCCLK = (double)programming->min_clocks.dcn4x.socclk_khz / 1000;
+ mode_lib->mp.qos_param_index = get_qos_param_index(programming->min_clocks.dcn4x.active.uclk_khz, mode_lib->soc.qos_parameters.qos_params.dcn4x.per_uclk_dpm_params);
+ mode_lib->mp.active_min_uclk_dpm_index = get_active_min_uclk_dpm_index(programming->min_clocks.dcn4x.active.uclk_khz, &mode_lib->soc.clk_table);
for (k = 0; k < s->num_active_planes; ++k) {
unsigned int stream_index = display_cfg->plane_descriptors[k].stream_index;
@@ -9970,18 +10046,18 @@ static bool dml_core_mode_programming(struct dml2_core_calcs_mode_programming_ex
for (k = 0; k < s->num_active_planes; ++k) {
mode_lib->mp.NoOfDPP[k] = cfg_support_info->plane_support_info[k].dpps_used;
- mode_lib->mp.Dppclk[k] = programming->plane_programming[k].min_clocks.dcn4.dppclk_khz / 1000.0;
+ mode_lib->mp.Dppclk[k] = programming->plane_programming[k].min_clocks.dcn4x.dppclk_khz / 1000.0;
dml2_assert(mode_lib->mp.Dppclk[k] > 0);
}
for (k = 0; k < s->num_active_planes; ++k) {
unsigned int stream_index = display_cfg->plane_descriptors[k].stream_index;
- mode_lib->mp.DSCCLK[k] = programming->stream_programming[stream_index].min_clocks.dcn4.dscclk_khz / 1000.0;
+ mode_lib->mp.DSCCLK[k] = programming->stream_programming[stream_index].min_clocks.dcn4x.dscclk_khz / 1000.0;
dml2_printf("DML::%s: k=%d stream_index=%d, mode_lib->mp.DSCCLK = %f\n", __func__, k, stream_index, mode_lib->mp.DSCCLK[k]);
}
- mode_lib->mp.Dispclk = programming->min_clocks.dcn4.dispclk_khz / 1000.0;
- mode_lib->mp.DCFCLKDeepSleep = programming->min_clocks.dcn4.deepsleep_dcfclk_khz / 1000.0;
+ mode_lib->mp.Dispclk = programming->min_clocks.dcn4x.dispclk_khz / 1000.0;
+ mode_lib->mp.DCFCLKDeepSleep = programming->min_clocks.dcn4x.deepsleep_dcfclk_khz / 1000.0;
dml2_assert(mode_lib->mp.Dcfclk > 0);
dml2_assert(mode_lib->mp.FabricClock > 0);
@@ -10462,11 +10538,16 @@ static bool dml_core_mode_programming(struct dml2_core_calcs_mode_programming_ex
calculate_tdlut_setting(&mode_lib->scratch, calculate_tdlut_setting_params);
}
+ if (mode_lib->soc.qos_parameters.qos_type == dml2_qos_param_type_dcn3)
+ s->ReorderingBytes = (unsigned int)(mode_lib->soc.clk_table.dram_config.channel_count * math_max3(mode_lib->soc.qos_parameters.qos_params.dcn32x.urgent_out_of_order_return_per_channel_pixel_only_bytes,
+ mode_lib->soc.qos_parameters.qos_params.dcn32x.urgent_out_of_order_return_per_channel_pixel_and_vm_bytes,
+ mode_lib->soc.qos_parameters.qos_params.dcn32x.urgent_out_of_order_return_per_channel_vm_only_bytes));
+
CalculateExtraLatency(
display_cfg,
mode_lib->ip.rob_buffer_size_kbytes,
- 0, //mode_lib->soc.round_trip_ping_latency_dcfclk_cycles,
- s->ReorderBytes,
+ mode_lib->soc.qos_parameters.qos_params.dcn32x.loaded_round_trip_latency_fclk_cycles,
+ s->ReorderingBytes,
mode_lib->mp.Dcfclk,
mode_lib->mp.FabricClock,
mode_lib->ip.pixel_chunk_size_kbytes,
@@ -10551,32 +10632,32 @@ static bool dml_core_mode_programming(struct dml2_core_calcs_mode_programming_ex
mode_lib->mp.excess_vactive_fill_bw_c);
mode_lib->mp.UrgentLatency = CalculateUrgentLatency(
- mode_lib->soc.qos_parameters.qos_params.dcn3.urgent_latency_us.base_latency_us,
- mode_lib->soc.qos_parameters.qos_params.dcn3.urgent_latency_us.base_latency_pixel_vm_us,
- mode_lib->soc.qos_parameters.qos_params.dcn3.urgent_latency_us.base_latency_vm_us,
+ mode_lib->soc.qos_parameters.qos_params.dcn32x.urgent_latency_us.base_latency_us,
+ mode_lib->soc.qos_parameters.qos_params.dcn32x.urgent_latency_us.base_latency_pixel_vm_us,
+ mode_lib->soc.qos_parameters.qos_params.dcn32x.urgent_latency_us.base_latency_vm_us,
mode_lib->soc.do_urgent_latency_adjustment,
- mode_lib->soc.qos_parameters.qos_params.dcn3.urgent_latency_us.scaling_factor_fclk_us,
- mode_lib->soc.qos_parameters.qos_params.dcn3.urgent_latency_us.scaling_factor_mhz,
+ mode_lib->soc.qos_parameters.qos_params.dcn32x.urgent_latency_us.scaling_factor_fclk_us,
+ mode_lib->soc.qos_parameters.qos_params.dcn32x.urgent_latency_us.scaling_factor_mhz,
mode_lib->mp.FabricClock,
mode_lib->mp.uclk_freq_mhz,
mode_lib->soc.qos_parameters.qos_type,
- mode_lib->soc.qos_parameters.qos_params.dcn4.per_uclk_dpm_params[mode_lib->mp.qos_param_index].urgent_ramp_uclk_cycles,
- mode_lib->soc.qos_parameters.qos_params.dcn4.df_qos_response_time_fclk_cycles,
- mode_lib->soc.qos_parameters.qos_params.dcn4.max_round_trip_to_furthest_cs_fclk_cycles,
- mode_lib->soc.qos_parameters.qos_params.dcn4.mall_overhead_fclk_cycles,
- mode_lib->soc.qos_parameters.qos_params.dcn4.umc_urgent_ramp_latency_margin,
- mode_lib->soc.qos_parameters.qos_params.dcn4.fabric_max_transport_latency_margin);
+ mode_lib->soc.qos_parameters.qos_params.dcn4x.per_uclk_dpm_params[mode_lib->mp.qos_param_index].urgent_ramp_uclk_cycles,
+ mode_lib->soc.qos_parameters.qos_params.dcn4x.df_qos_response_time_fclk_cycles,
+ mode_lib->soc.qos_parameters.qos_params.dcn4x.max_round_trip_to_furthest_cs_fclk_cycles,
+ mode_lib->soc.qos_parameters.qos_params.dcn4x.mall_overhead_fclk_cycles,
+ mode_lib->soc.qos_parameters.qos_params.dcn4x.umc_urgent_ramp_latency_margin,
+ mode_lib->soc.qos_parameters.qos_params.dcn4x.fabric_max_transport_latency_margin);
mode_lib->mp.TripToMemory = CalculateTripToMemory(
mode_lib->mp.UrgentLatency,
mode_lib->mp.FabricClock,
mode_lib->mp.uclk_freq_mhz,
mode_lib->soc.qos_parameters.qos_type,
- mode_lib->soc.qos_parameters.qos_params.dcn4.per_uclk_dpm_params[mode_lib->mp.qos_param_index].trip_to_memory_uclk_cycles,
- mode_lib->soc.qos_parameters.qos_params.dcn4.max_round_trip_to_furthest_cs_fclk_cycles,
- mode_lib->soc.qos_parameters.qos_params.dcn4.mall_overhead_fclk_cycles,
- mode_lib->soc.qos_parameters.qos_params.dcn4.umc_max_latency_margin,
- mode_lib->soc.qos_parameters.qos_params.dcn4.fabric_max_transport_latency_margin);
+ mode_lib->soc.qos_parameters.qos_params.dcn4x.per_uclk_dpm_params[mode_lib->mp.qos_param_index].trip_to_memory_uclk_cycles,
+ mode_lib->soc.qos_parameters.qos_params.dcn4x.max_round_trip_to_furthest_cs_fclk_cycles,
+ mode_lib->soc.qos_parameters.qos_params.dcn4x.mall_overhead_fclk_cycles,
+ mode_lib->soc.qos_parameters.qos_params.dcn4x.umc_max_latency_margin,
+ mode_lib->soc.qos_parameters.qos_params.dcn4x.fabric_max_transport_latency_margin);
mode_lib->mp.TripToMemory = math_max2(mode_lib->mp.UrgentLatency, mode_lib->mp.TripToMemory);
@@ -10585,38 +10666,40 @@ static bool dml_core_mode_programming(struct dml2_core_calcs_mode_programming_ex
mode_lib->mp.FabricClock,
mode_lib->mp.uclk_freq_mhz,
mode_lib->soc.qos_parameters.qos_type,
- mode_lib->soc.qos_parameters.qos_params.dcn4.per_uclk_dpm_params[mode_lib->mp.qos_param_index].meta_trip_to_memory_uclk_cycles,
- mode_lib->soc.qos_parameters.qos_params.dcn4.meta_trip_adder_fclk_cycles,
- mode_lib->soc.qos_parameters.qos_params.dcn4.umc_max_latency_margin,
- mode_lib->soc.qos_parameters.qos_params.dcn4.fabric_max_transport_latency_margin);
+ mode_lib->soc.qos_parameters.qos_params.dcn4x.per_uclk_dpm_params[mode_lib->mp.qos_param_index].meta_trip_to_memory_uclk_cycles,
+ mode_lib->soc.qos_parameters.qos_params.dcn4x.meta_trip_adder_fclk_cycles,
+ mode_lib->soc.qos_parameters.qos_params.dcn4x.umc_max_latency_margin,
+ mode_lib->soc.qos_parameters.qos_params.dcn4x.fabric_max_transport_latency_margin);
for (k = 0; k < s->num_active_planes; ++k) {
bool cursor_not_enough_urgent_latency_hiding = 0;
- double line_time_us;
+ double line_time_us = 0.0;
- calculate_cursor_req_attributes(
- display_cfg->plane_descriptors[k].cursor.cursor_width,
- display_cfg->plane_descriptors[k].cursor.cursor_bpp,
+ line_time_us = display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].timing.h_total /
+ ((double)display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].timing.pixel_clock_khz / 1000);
+ if (display_cfg->plane_descriptors[k].cursor.num_cursors > 0) {
+ calculate_cursor_req_attributes(
+ display_cfg->plane_descriptors[k].cursor.cursor_width,
+ display_cfg->plane_descriptors[k].cursor.cursor_bpp,
- // output
- &s->cursor_lines_per_chunk[k],
- &s->cursor_bytes_per_line[k],
- &s->cursor_bytes_per_chunk[k],
- &s->cursor_bytes[k]);
-
- line_time_us = display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].timing.h_total / ((double)display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].timing.pixel_clock_khz / 1000);
-
- calculate_cursor_urgent_burst_factor(
- mode_lib->ip.cursor_buffer_size,
- display_cfg->plane_descriptors[k].cursor.cursor_width,
- s->cursor_bytes_per_chunk[k],
- s->cursor_lines_per_chunk[k],
- line_time_us,
- mode_lib->mp.UrgentLatency,
+ // output
+ &s->cursor_lines_per_chunk[k],
+ &s->cursor_bytes_per_line[k],
+ &s->cursor_bytes_per_chunk[k],
+ &s->cursor_bytes[k]);
+
+ calculate_cursor_urgent_burst_factor(
+ mode_lib->ip.cursor_buffer_size,
+ display_cfg->plane_descriptors[k].cursor.cursor_width,
+ s->cursor_bytes_per_chunk[k],
+ s->cursor_lines_per_chunk[k],
+ line_time_us,
+ mode_lib->mp.UrgentLatency,
- // output
- &mode_lib->mp.UrgentBurstFactorCursor[k],
- &cursor_not_enough_urgent_latency_hiding);
+ // output
+ &mode_lib->mp.UrgentBurstFactorCursor[k],
+ &cursor_not_enough_urgent_latency_hiding);
+ }
mode_lib->mp.UrgentBurstFactorCursorPre[k] = mode_lib->mp.UrgentBurstFactorCursor[k];
CalculateUrgentBurstFactor(
@@ -10676,7 +10759,9 @@ static bool dml_core_mode_programming(struct dml2_core_calcs_mode_programming_ex
mode_lib->mp.TWait[k] = CalculateTWait(
display_cfg->plane_descriptors[k].overrides.reserved_vblank_time_ns,
mode_lib->mp.UrgentLatency,
- mode_lib->mp.TripToMemory);
+ mode_lib->mp.TripToMemory,
+ !dml_is_phantom_pipe(&display_cfg->plane_descriptors[k]) && display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].timing.drr_config.enabled ?
+ get_g6_temp_read_blackout_us(&mode_lib->soc, (unsigned int)(mode_lib->mp.uclk_freq_mhz * 1000), in_out_params->min_clk_index) : 0.0);
myPipe->Dppclk = mode_lib->mp.Dppclk[k];
myPipe->Dispclk = mode_lib->mp.Dispclk;
@@ -10722,7 +10807,6 @@ static bool dml_core_mode_programming(struct dml2_core_calcs_mode_programming_ex
CalculatePrefetchSchedule_params->OutputFormat = display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].output.output_format;
CalculatePrefetchSchedule_params->MaxInterDCNTileRepeaters = mode_lib->ip.max_inter_dcn_tile_repeaters;
CalculatePrefetchSchedule_params->VStartup = s->MaxVStartupLines[k];
- CalculatePrefetchSchedule_params->MaxVStartup = s->MaxVStartupLines[k];
CalculatePrefetchSchedule_params->HostVMMinPageSize = mode_lib->soc.hostvm_min_page_size_kbytes;
CalculatePrefetchSchedule_params->DynamicMetadataEnable = display_cfg->plane_descriptors[k].dynamic_meta_data.enable;
CalculatePrefetchSchedule_params->DynamicMetadataVMEnabled = mode_lib->ip.dynamic_metadata_vm_enabled;
@@ -10808,9 +10892,13 @@ static bool dml_core_mode_programming(struct dml2_core_calcs_mode_programming_ex
if (mode_lib->mp.dst_y_prefetch[k] < 2)
s->DestinationLineTimesForPrefetchLessThan2 = true;
- if (mode_lib->mp.VRatioPrefetchY[k] > __DML2_CALCS_MAX_VRATIO_PRE_ENHANCE_PREFETCH_ACC__ ||
- mode_lib->mp.VRatioPrefetchC[k] > __DML2_CALCS_MAX_VRATIO_PRE_ENHANCE_PREFETCH_ACC__)
+ if (mode_lib->mp.VRatioPrefetchY[k] > __DML2_CALCS_MAX_VRATIO_PRE__ ||
+ mode_lib->mp.VRatioPrefetchC[k] > __DML2_CALCS_MAX_VRATIO_PRE__) {
s->VRatioPrefetchMoreThanMax = true;
+ dml2_printf("DML::%s: k=%d, VRatioPrefetchY=%f (should not be < %f)\n", __func__, k, mode_lib->mp.VRatioPrefetchY[k], __DML2_CALCS_MAX_VRATIO_PRE__);
+ dml2_printf("DML::%s: k=%d, VRatioPrefetchC=%f (should not be < %f)\n", __func__, k, mode_lib->mp.VRatioPrefetchC[k], __DML2_CALCS_MAX_VRATIO_PRE__);
+ dml2_printf("DML::%s: VRatioPrefetchMoreThanMax = %u\n", __func__, s->VRatioPrefetchMoreThanMax);
+ }
if (mode_lib->mp.NotEnoughUrgentLatencyHiding[k]) {
dml2_printf("DML::%s: k=%u, NotEnoughUrgentLatencyHiding = %u\n", __func__, k, mode_lib->mp.NotEnoughUrgentLatencyHiding[k]);
@@ -10994,6 +11082,7 @@ static bool dml_core_mode_programming(struct dml2_core_calcs_mode_programming_ex
mode_lib->mp.dpte_row_height_chroma[k],
mode_lib->mp.use_one_row_for_frame_flip[k],
mode_lib->ip.max_flip_time_us,
+ mode_lib->ip.max_flip_time_lines,
s->per_pipe_flip_bytes[k],
mode_lib->mp.meta_row_bytes[k],
mode_lib->mp.meta_row_height[k],
@@ -11143,6 +11232,9 @@ static bool dml_core_mode_programming(struct dml2_core_calcs_mode_programming_ex
s->mmSOCParameters.USRRetrainingLatency = 0;
s->mmSOCParameters.SMNLatency = 0;
s->mmSOCParameters.g6_temp_read_blackout_us = get_g6_temp_read_blackout_us(&mode_lib->soc, (unsigned int)(mode_lib->mp.uclk_freq_mhz * 1000), in_out_params->min_clk_index);
+ s->mmSOCParameters.max_urgent_latency_us = get_max_urgent_latency_us(&mode_lib->soc.qos_parameters.qos_params.dcn4x, mode_lib->ms.uclk_freq_mhz, mode_lib->ms.FabricClock, in_out_params->min_clk_index);
+ s->mmSOCParameters.df_response_time_us = mode_lib->soc.qos_parameters.qos_params.dcn4x.df_qos_response_time_fclk_cycles / mode_lib->ms.FabricClock;
+ s->mmSOCParameters.qos_type = mode_lib->soc.qos_parameters.qos_type;
CalculateWatermarks_params->display_cfg = display_cfg;
CalculateWatermarks_params->USRRetrainingRequired = false;
@@ -11162,7 +11254,6 @@ static bool dml_core_mode_programming(struct dml2_core_calcs_mode_programming_ex
CalculateWatermarks_params->DETBufferSizeC = mode_lib->mp.DETBufferSizeC;
CalculateWatermarks_params->SwathHeightY = mode_lib->mp.SwathHeightY;
CalculateWatermarks_params->SwathHeightC = mode_lib->mp.SwathHeightC;
- //CalculateWatermarks_params->LBBitPerPixel = 57; //FIXME_STAGE2
CalculateWatermarks_params->SwathWidthY = mode_lib->mp.SwathWidthY;
CalculateWatermarks_params->SwathWidthC = mode_lib->mp.SwathWidthC;
CalculateWatermarks_params->BytePerPixelDETY = mode_lib->mp.BytePerPixelInDETY;
@@ -11203,6 +11294,8 @@ static bool dml_core_mode_programming(struct dml2_core_calcs_mode_programming_ex
}
}
+ calculate_pstate_keepout_dst_lines(display_cfg, &mode_lib->mp.Watermark, mode_lib->mp.pstate_keepout_dst_lines);
+
dml2_printf("DML::%s: DEBUG stream_index = %0d\n", __func__, display_cfg->plane_descriptors[0].stream_index);
dml2_printf("DML::%s: DEBUG PixelClock = %d kHz\n", __func__, (display_cfg->stream_descriptors[display_cfg->plane_descriptors[0].stream_index].timing.pixel_clock_khz));
@@ -11491,9 +11584,9 @@ static bool dml_core_mode_programming(struct dml2_core_calcs_mode_programming_ex
bool dml2_core_calcs_mode_programming_ex(struct dml2_core_calcs_mode_programming_ex *in_out_params)
{
+ dml2_printf("DML::%s: ------------- START ----------\n", __func__);
bool result = dml_core_mode_programming(in_out_params);
- dml2_printf("DML::%s: ------------- START ----------\n", __func__);
dml2_printf("DML::%s: result = %0d\n", __func__, result);
dml2_printf("DML::%s: ------------- DONE ----------\n", __func__);
return result;
@@ -12186,10 +12279,11 @@ void dml2_core_calcs_get_pipe_regs(const struct dml2_display_cfg *display_cfg,
void dml2_core_calcs_get_global_sync_programming(const struct dml2_core_internal_display_mode_lib *mode_lib, union dml2_global_sync_programming *out, int pipe_index)
{
- out->dcn4.vready_offset_pixels = dml_get_vready_offset(mode_lib, pipe_index);
- out->dcn4.vstartup_lines = dml_get_vstartup_calculated(mode_lib, pipe_index);
- out->dcn4.vupdate_offset_pixels = dml_get_vupdate_offset(mode_lib, pipe_index);
- out->dcn4.vupdate_vupdate_width_pixels = dml_get_vupdate_width(mode_lib, pipe_index);
+ out->dcn4x.vready_offset_pixels = dml_get_vready_offset(mode_lib, pipe_index);
+ out->dcn4x.vstartup_lines = dml_get_vstartup_calculated(mode_lib, pipe_index);
+ out->dcn4x.vupdate_offset_pixels = dml_get_vupdate_offset(mode_lib, pipe_index);
+ out->dcn4x.vupdate_vupdate_width_pixels = dml_get_vupdate_width(mode_lib, pipe_index);
+ out->dcn4x.pstate_keepout_start_lines = dml_get_pstate_keepout_dst_lines(mode_lib, pipe_index);
}
void dml2_core_calcs_get_stream_programming(const struct dml2_core_internal_display_mode_lib *mode_lib, struct dml2_per_stream_programming *out, int pipe_index)
@@ -12197,6 +12291,25 @@ void dml2_core_calcs_get_stream_programming(const struct dml2_core_internal_disp
dml2_core_calcs_get_global_sync_programming(mode_lib, &out->global_sync, pipe_index);
}
+void dml2_core_calcs_get_global_fams2_programming(const struct dml2_core_internal_display_mode_lib *mode_lib,
+ const struct display_configuation_with_meta *display_cfg,
+ struct dmub_cmd_fams2_global_config *fams2_global_config)
+{
+ fams2_global_config->features.bits.enable = display_cfg->stage3.fams2_required;
+
+ if (fams2_global_config->features.bits.enable) {
+ fams2_global_config->features.bits.enable_stall_recovery = true;
+ fams2_global_config->features.bits.allow_delay_check_mode = FAMS2_ALLOW_DELAY_CHECK_FROM_START;
+
+ fams2_global_config->max_allow_delay_us = mode_lib->ip_caps.fams2.max_allow_delay_us;
+ fams2_global_config->lock_wait_time_us = mode_lib->ip_caps.fams2.lock_timeout_us;
+ fams2_global_config->recovery_timeout_us = mode_lib->ip_caps.fams2.recovery_timeout_us;
+ fams2_global_config->hwfq_flip_programming_delay_us = mode_lib->ip_caps.fams2.flip_programming_delay_us;
+
+ fams2_global_config->num_streams = display_cfg->display_config.num_streams;
+ }
+}
+
void dml2_core_calcs_get_stream_fams2_programming(const struct dml2_core_internal_display_mode_lib *mode_lib,
const struct display_configuation_with_meta *display_cfg,
struct dmub_fams2_stream_static_state *fams2_programming,
@@ -12209,6 +12322,11 @@ void dml2_core_calcs_get_stream_fams2_programming(const struct dml2_core_interna
unsigned int i;
+ if (display_cfg->display_config.overrides.all_streams_blanked) {
+ /* stream is blanked, so do nothing */
+ return;
+ }
+
/* from display configuration */
fams2_programming->htotal = (uint16_t)stream_descriptor->timing.h_total;
fams2_programming->vtotal = (uint16_t)stream_descriptor->timing.v_total;
@@ -12368,6 +12486,7 @@ void dml2_core_calcs_get_stream_support_info(const struct dml2_display_cfg *disp
{
double phantom_processing_delay_pix;
unsigned int phantom_processing_delay_lines;
+ unsigned int phantom_min_v_active_lines;
unsigned int phantom_v_active_lines;
unsigned int phantom_v_startup_lines;
unsigned int phantom_v_blank_lines;
@@ -12377,14 +12496,16 @@ void dml2_core_calcs_get_stream_support_info(const struct dml2_display_cfg *disp
phantom_processing_delay_pix = (double)((mode_lib->ip.subvp_fw_processing_delay_us + mode_lib->ip.subvp_pstate_allow_width_us) *
((double)display_cfg->stream_descriptors[display_cfg->plane_descriptors[plane_index].stream_index].timing.pixel_clock_khz / 1000));
phantom_processing_delay_lines = (unsigned int)(phantom_processing_delay_pix / (double)display_cfg->stream_descriptors[display_cfg->plane_descriptors[plane_index].stream_index].timing.h_total);
- dml2_core_shared_div_rem(phantom_processing_delay_pix,
+ dml2_core_div_rem(phantom_processing_delay_pix,
display_cfg->stream_descriptors[display_cfg->plane_descriptors[plane_index].stream_index].timing.h_total,
&rem);
if (rem)
phantom_processing_delay_lines++;
phantom_v_startup_lines = dml_get_plane_max_vstartup_lines(mode_lib, plane_index);
- phantom_v_active_lines = phantom_processing_delay_lines + dml_get_plane_subviewport_lines_needed_in_mall(mode_lib, plane_index) + mode_lib->ip.subvp_swath_height_margin_lines;
+ phantom_min_v_active_lines = (unsigned int)math_ceil((double)dml_get_plane_subviewport_lines_needed_in_mall(mode_lib, plane_index) /
+ display_cfg->plane_descriptors[plane_index].composition.scaler_info.plane0.v_ratio);
+ phantom_v_active_lines = phantom_processing_delay_lines + phantom_min_v_active_lines + mode_lib->ip.subvp_swath_height_margin_lines;
// phantom_vblank = max(vbp(vstartup) + vactive + vfp(always 1) + vsync(can be 1), main_vblank)
phantom_v_blank_lines = phantom_v_startup_lines + 1 + 1;
@@ -12396,8 +12517,8 @@ void dml2_core_calcs_get_stream_support_info(const struct dml2_display_cfg *disp
// phantom_vtotal = vactive + vblank
out->phantom_v_total = phantom_v_active_lines + phantom_v_blank_lines;
- out->phantom_min_v_active = dml_get_plane_subviewport_lines_needed_in_mall(mode_lib, plane_index);
- out->phantom_v_startup = dml_get_plane_max_vstartup_lines(mode_lib, plane_index);
+ out->phantom_min_v_active = phantom_min_v_active_lines;
+ out->phantom_v_startup = phantom_v_startup_lines;
out->vblank_reserved_time_us = display_cfg->plane_descriptors[plane_index].overrides.reserved_vblank_time_ns / 1000;
#if defined(__DML_VBA_DEBUG__)
@@ -12418,7 +12539,7 @@ void dml2_core_calcs_get_informative(const struct dml2_core_internal_display_mod
out->informative.mode_support_info.ScaleRatioAndTapsSupport = mode_lib->ms.support.ScaleRatioAndTapsSupport;
out->informative.mode_support_info.SourceFormatPixelAndScanSupport = mode_lib->ms.support.SourceFormatPixelAndScanSupport;
out->informative.mode_support_info.P2IWith420 = mode_lib->ms.support.P2IWith420;
- out->informative.mode_support_info.DSCOnlyIfNecessaryWithBPP = mode_lib->ms.support.DSCOnlyIfNecessaryWithBPP;
+ out->informative.mode_support_info.DSCOnlyIfNecessaryWithBPP = false;
out->informative.mode_support_info.DSC422NativeNotSupported = mode_lib->ms.support.DSC422NativeNotSupported;
out->informative.mode_support_info.LinkRateDoesNotMatchDPVersion = mode_lib->ms.support.LinkRateDoesNotMatchDPVersion;
out->informative.mode_support_info.LinkRateForMultistreamNotIndicated = mode_lib->ms.support.LinkRateForMultistreamNotIndicated;
@@ -12611,7 +12732,7 @@ void dml2_core_calcs_get_informative(const struct dml2_core_internal_display_mod
out->informative.misc.cstate_max_cap_mode = dml_get_cstate_max_cap_mode(mode_lib);
- out->min_clocks.dcn4.dpprefclk_khz = (int unsigned)dml_get_global_dppclk_khz(mode_lib);
+ out->min_clocks.dcn4x.dpprefclk_khz = (int unsigned)dml_get_global_dppclk_khz(mode_lib);
out->informative.qos.max_active_fclk_change_latency_supported = dml_get_fclk_change_latency(mode_lib);
@@ -12724,13 +12845,13 @@ void dml2_core_calcs_get_informative(const struct dml2_core_internal_display_mod
}
}
- out->informative.qos.max_non_urgent_latency_us = mode_lib->soc.qos_parameters.qos_params.dcn4.per_uclk_dpm_params[mode_lib->mp.qos_param_index].maximum_latency_when_non_urgent_uclk_cycles
- / mode_lib->mp.uclk_freq_mhz * (1 + mode_lib->soc.qos_parameters.qos_params.dcn4.umc_max_latency_margin / 100.0)
- + mode_lib->soc.qos_parameters.qos_params.dcn4.mall_overhead_fclk_cycles / mode_lib->mp.FabricClock
- + mode_lib->soc.qos_parameters.qos_params.dcn4.max_round_trip_to_furthest_cs_fclk_cycles / mode_lib->mp.FabricClock
- * (1 + mode_lib->soc.qos_parameters.qos_params.dcn4.fabric_max_transport_latency_margin / 100.0);
+ out->informative.qos.max_non_urgent_latency_us = mode_lib->soc.qos_parameters.qos_params.dcn4x.per_uclk_dpm_params[mode_lib->mp.qos_param_index].maximum_latency_when_non_urgent_uclk_cycles
+ / mode_lib->mp.uclk_freq_mhz * (1 + mode_lib->soc.qos_parameters.qos_params.dcn4x.umc_max_latency_margin / 100.0)
+ + mode_lib->soc.qos_parameters.qos_params.dcn4x.mall_overhead_fclk_cycles / mode_lib->mp.FabricClock
+ + mode_lib->soc.qos_parameters.qos_params.dcn4x.max_round_trip_to_furthest_cs_fclk_cycles / mode_lib->mp.FabricClock
+ * (1 + mode_lib->soc.qos_parameters.qos_params.dcn4x.fabric_max_transport_latency_margin / 100.0);
- if (mode_lib->soc.qos_parameters.qos_type == dml2_qos_param_type_dcn4) {
+ if (mode_lib->soc.qos_parameters.qos_type == dml2_qos_param_type_dcn4x) {
if (((mode_lib->ip.rob_buffer_size_kbytes - mode_lib->ip.pixel_chunk_size_kbytes) * 1024
/ mode_lib->mp.non_urg_bandwidth_required[dml2_core_internal_soc_state_sys_active][dml2_core_internal_bw_sdp]) >= out->informative.qos.max_non_urgent_latency_us) {
out->informative.misc.ROBUrgencyAvoidance = true;
diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_core/dml2_core_dcn4_calcs.h b/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_core/dml2_core_dcn4_calcs.h
index b280ab573fbb..df2d1550a14b 100644
--- a/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_core/dml2_core_dcn4_calcs.h
+++ b/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_core/dml2_core_dcn4_calcs.h
@@ -2,7 +2,6 @@
//
// Copyright 2024 Advanced Micro Devices, Inc.
-
#ifndef __DML2_CORE_DCN4_CALCS_H__
#define __DML2_CORE_DCN4_CALCS_H__
@@ -30,6 +29,7 @@ void dml2_core_calcs_get_informative(const struct dml2_core_internal_display_mod
void dml2_core_calcs_get_stream_support_info(const struct dml2_display_cfg *display_cfg, const struct dml2_core_internal_display_mode_lib *mode_lib, struct core_stream_support_info *out, int plane_index);
void dml2_core_calcs_get_mall_allocation(struct dml2_core_internal_display_mode_lib *mode_lib, unsigned int *out, int pipe_index);
void dml2_core_calcs_get_stream_fams2_programming(const struct dml2_core_internal_display_mode_lib *mode_lib, const struct display_configuation_with_meta *display_cfg, struct dmub_fams2_stream_static_state *fams2_programming, enum dml2_uclk_pstate_support_method pstate_method, int plane_index);
+void dml2_core_calcs_get_global_fams2_programming(const struct dml2_core_internal_display_mode_lib *mode_lib, const struct display_configuation_with_meta *display_cfg, struct dmub_cmd_fams2_global_config *fams2_global_config);
void dml2_core_calcs_get_dpte_row_height(unsigned int *dpte_row_height, struct dml2_core_internal_display_mode_lib *mode_lib, bool is_plane1, enum dml2_source_format_class SourcePixelFormat, enum dml2_swizzle_mode SurfaceTiling, enum dml2_rotation_angle ScanDirection, unsigned int pitch, unsigned int GPUVMMinPageSizeKBytes);
void dml2_core_calcs_cursor_dlg_reg(struct dml2_cursor_dlg_regs *cursor_dlg_regs, const struct dml2_get_cursor_dlg_reg *p);
diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_core/dml2_core_factory.c b/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_core/dml2_core_factory.c
index f56abe9ab919..28394de02885 100644
--- a/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_core/dml2_core_factory.c
+++ b/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_core/dml2_core_factory.c
@@ -2,7 +2,6 @@
//
// Copyright 2024 Advanced Micro Devices, Inc.
-
#include "dml2_core_factory.h"
#include "dml2_core_dcn4.h"
#include "dml2_external_lib_deps.h"
@@ -11,7 +10,7 @@ bool dml2_core_create(enum dml2_project_id project_id, struct dml2_core_instance
{
bool result = false;
- if (!out)
+ if (out == 0)
return false;
memset(out, 0, sizeof(struct dml2_core_instance));
diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_core/dml2_core_factory.h b/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_core/dml2_core_factory.h
index 53636a8f52aa..411c514fe65c 100644
--- a/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_core/dml2_core_factory.h
+++ b/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_core/dml2_core_factory.h
@@ -2,7 +2,6 @@
//
// Copyright 2024 Advanced Micro Devices, Inc.
-
#ifndef __DML2_CORE_FACTORY_H__
#define __DML2_CORE_FACTORY_H__
diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_core/dml2_core_shared.c b/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_core/dml2_core_shared.c
index 81f0a6f19f87..8f3c1c0b1cc1 100644
--- a/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_core/dml2_core_shared.c
+++ b/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_core/dml2_core_shared.c
@@ -779,7 +779,7 @@ bool dml2_core_shared_mode_support(struct dml2_core_calcs_mode_support_ex *in_ou
mode_lib->ms.max_dppclk_freq_mhz = (double)min_clk_table->max_clocks_khz.dppclk / 1000;
mode_lib->ms.uclk_freq_mhz = dram_bw_kbps_to_uclk_mhz(min_clk_table->dram_bw_table.entries[in_out_params->min_clk_index].pre_derate_dram_bw_kbps, &mode_lib->soc.clk_table.dram_config);
mode_lib->ms.dram_bw_mbps = ((double)min_clk_table->dram_bw_table.entries[in_out_params->min_clk_index].pre_derate_dram_bw_kbps / 1000);
- mode_lib->ms.qos_param_index = get_qos_param_index((unsigned int)(mode_lib->ms.uclk_freq_mhz * 1000.0), mode_lib->soc.qos_parameters.qos_params.dcn4.per_uclk_dpm_params);
+ mode_lib->ms.qos_param_index = get_qos_param_index((unsigned int)(mode_lib->ms.uclk_freq_mhz * 1000.0), mode_lib->soc.qos_parameters.qos_params.dcn4x.per_uclk_dpm_params);
mode_lib->ms.active_min_uclk_dpm_index = get_active_min_uclk_dpm_index((unsigned int)(mode_lib->ms.uclk_freq_mhz * 1000.0), &mode_lib->soc.clk_table);
#if defined(__DML_VBA_DEBUG__)
@@ -1776,32 +1776,32 @@ bool dml2_core_shared_mode_support(struct dml2_core_calcs_mode_support_ex *in_ou
#endif
mode_lib->ms.UrgLatency = CalculateUrgentLatency(
- mode_lib->soc.qos_parameters.qos_params.dcn3.urgent_latency_us.base_latency_us,
- mode_lib->soc.qos_parameters.qos_params.dcn3.urgent_latency_us.base_latency_pixel_vm_us,
- mode_lib->soc.qos_parameters.qos_params.dcn3.urgent_latency_us.base_latency_vm_us,
+ mode_lib->soc.qos_parameters.qos_params.dcn32x.urgent_latency_us.base_latency_us,
+ mode_lib->soc.qos_parameters.qos_params.dcn32x.urgent_latency_us.base_latency_pixel_vm_us,
+ mode_lib->soc.qos_parameters.qos_params.dcn32x.urgent_latency_us.base_latency_vm_us,
mode_lib->soc.do_urgent_latency_adjustment,
- mode_lib->soc.qos_parameters.qos_params.dcn3.urgent_latency_us.scaling_factor_fclk_us,
- mode_lib->soc.qos_parameters.qos_params.dcn3.urgent_latency_us.scaling_factor_mhz,
+ mode_lib->soc.qos_parameters.qos_params.dcn32x.urgent_latency_us.scaling_factor_fclk_us,
+ mode_lib->soc.qos_parameters.qos_params.dcn32x.urgent_latency_us.scaling_factor_mhz,
mode_lib->ms.FabricClock,
mode_lib->ms.uclk_freq_mhz,
mode_lib->soc.qos_parameters.qos_type,
- mode_lib->soc.qos_parameters.qos_params.dcn4.per_uclk_dpm_params[mode_lib->ms.qos_param_index].urgent_ramp_uclk_cycles,
- mode_lib->soc.qos_parameters.qos_params.dcn4.df_qos_response_time_fclk_cycles,
- mode_lib->soc.qos_parameters.qos_params.dcn4.max_round_trip_to_furthest_cs_fclk_cycles,
- mode_lib->soc.qos_parameters.qos_params.dcn4.mall_overhead_fclk_cycles,
- mode_lib->soc.qos_parameters.qos_params.dcn4.umc_urgent_ramp_latency_margin,
- mode_lib->soc.qos_parameters.qos_params.dcn4.fabric_max_transport_latency_margin);
+ mode_lib->soc.qos_parameters.qos_params.dcn4x.per_uclk_dpm_params[mode_lib->ms.qos_param_index].urgent_ramp_uclk_cycles,
+ mode_lib->soc.qos_parameters.qos_params.dcn4x.df_qos_response_time_fclk_cycles,
+ mode_lib->soc.qos_parameters.qos_params.dcn4x.max_round_trip_to_furthest_cs_fclk_cycles,
+ mode_lib->soc.qos_parameters.qos_params.dcn4x.mall_overhead_fclk_cycles,
+ mode_lib->soc.qos_parameters.qos_params.dcn4x.umc_urgent_ramp_latency_margin,
+ mode_lib->soc.qos_parameters.qos_params.dcn4x.fabric_max_transport_latency_margin);
mode_lib->ms.TripToMemory = CalculateTripToMemory(
mode_lib->ms.UrgLatency,
mode_lib->ms.FabricClock,
mode_lib->ms.uclk_freq_mhz,
mode_lib->soc.qos_parameters.qos_type,
- mode_lib->soc.qos_parameters.qos_params.dcn4.per_uclk_dpm_params[mode_lib->ms.qos_param_index].trip_to_memory_uclk_cycles,
- mode_lib->soc.qos_parameters.qos_params.dcn4.max_round_trip_to_furthest_cs_fclk_cycles,
- mode_lib->soc.qos_parameters.qos_params.dcn4.mall_overhead_fclk_cycles,
- mode_lib->soc.qos_parameters.qos_params.dcn4.umc_max_latency_margin,
- mode_lib->soc.qos_parameters.qos_params.dcn4.fabric_max_transport_latency_margin);
+ mode_lib->soc.qos_parameters.qos_params.dcn4x.per_uclk_dpm_params[mode_lib->ms.qos_param_index].trip_to_memory_uclk_cycles,
+ mode_lib->soc.qos_parameters.qos_params.dcn4x.max_round_trip_to_furthest_cs_fclk_cycles,
+ mode_lib->soc.qos_parameters.qos_params.dcn4x.mall_overhead_fclk_cycles,
+ mode_lib->soc.qos_parameters.qos_params.dcn4x.umc_max_latency_margin,
+ mode_lib->soc.qos_parameters.qos_params.dcn4x.fabric_max_transport_latency_margin);
mode_lib->ms.TripToMemory = math_max2(mode_lib->ms.UrgLatency, mode_lib->ms.TripToMemory);
@@ -1995,21 +1995,21 @@ bool dml2_core_shared_mode_support(struct dml2_core_calcs_mode_support_ex *in_ou
mode_lib->ms.support.OutstandingRequestsUrgencyAvoidance = true;
mode_lib->ms.support.avg_urgent_latency_us
- = (mode_lib->soc.qos_parameters.qos_params.dcn4.per_uclk_dpm_params[mode_lib->ms.qos_param_index].average_latency_when_urgent_uclk_cycles / mode_lib->ms.uclk_freq_mhz
- * (1 + mode_lib->soc.qos_parameters.qos_params.dcn4.umc_average_latency_margin / 100.0)
- + mode_lib->soc.qos_parameters.qos_params.dcn4.average_transport_distance_fclk_cycles / mode_lib->ms.FabricClock)
- * (1 + mode_lib->soc.qos_parameters.qos_params.dcn4.fabric_average_transport_latency_margin / 100.0);
+ = (mode_lib->soc.qos_parameters.qos_params.dcn4x.per_uclk_dpm_params[mode_lib->ms.qos_param_index].average_latency_when_urgent_uclk_cycles / mode_lib->ms.uclk_freq_mhz
+ * (1 + mode_lib->soc.qos_parameters.qos_params.dcn4x.umc_average_latency_margin / 100.0)
+ + mode_lib->soc.qos_parameters.qos_params.dcn4x.average_transport_distance_fclk_cycles / mode_lib->ms.FabricClock)
+ * (1 + mode_lib->soc.qos_parameters.qos_params.dcn4x.fabric_average_transport_latency_margin / 100.0);
mode_lib->ms.support.avg_non_urgent_latency_us
- = (mode_lib->soc.qos_parameters.qos_params.dcn4.per_uclk_dpm_params[mode_lib->ms.qos_param_index].average_latency_when_non_urgent_uclk_cycles / mode_lib->ms.uclk_freq_mhz
- * (1 + mode_lib->soc.qos_parameters.qos_params.dcn4.umc_average_latency_margin / 100.0)
- + mode_lib->soc.qos_parameters.qos_params.dcn4.average_transport_distance_fclk_cycles / mode_lib->ms.FabricClock)
- * (1 + mode_lib->soc.qos_parameters.qos_params.dcn4.fabric_average_transport_latency_margin / 100.0);
+ = (mode_lib->soc.qos_parameters.qos_params.dcn4x.per_uclk_dpm_params[mode_lib->ms.qos_param_index].average_latency_when_non_urgent_uclk_cycles / mode_lib->ms.uclk_freq_mhz
+ * (1 + mode_lib->soc.qos_parameters.qos_params.dcn4x.umc_average_latency_margin / 100.0)
+ + mode_lib->soc.qos_parameters.qos_params.dcn4x.average_transport_distance_fclk_cycles / mode_lib->ms.FabricClock)
+ * (1 + mode_lib->soc.qos_parameters.qos_params.dcn4x.fabric_average_transport_latency_margin / 100.0);
double outstanding_latency_us = 0;
for (k = 0; k < mode_lib->ms.num_active_planes; k++) {
- if (mode_lib->soc.qos_parameters.qos_type == dml2_qos_param_type_dcn4) {
+ if (mode_lib->soc.qos_parameters.qos_type == dml2_qos_param_type_dcn4x) {
outstanding_latency_us = (mode_lib->soc.max_outstanding_reqs * mode_lib->ms.support.request_size_bytes_luma[k]
/ (mode_lib->ms.DCFCLK * mode_lib->soc.return_bus_width_bytes));
@@ -2029,7 +2029,7 @@ bool dml2_core_shared_mode_support(struct dml2_core_calcs_mode_support_ex *in_ou
#endif
}
- if (mode_lib->soc.qos_parameters.qos_type == dml2_qos_param_type_dcn4 && mode_lib->ms.BytePerPixelC[k] > 0) {
+ if (mode_lib->soc.qos_parameters.qos_type == dml2_qos_param_type_dcn4x && mode_lib->ms.BytePerPixelC[k] > 0) {
outstanding_latency_us = (mode_lib->soc.max_outstanding_reqs * mode_lib->ms.support.request_size_bytes_chroma[k]
/ (mode_lib->ms.DCFCLK * mode_lib->soc.return_bus_width_bytes));
@@ -2242,11 +2242,15 @@ bool dml2_core_shared_mode_support(struct dml2_core_calcs_mode_support_ex *in_ou
}
double min_return_bw_for_latency = mode_lib->ms.support.urg_bandwidth_available_min_latency[dml2_core_internal_soc_state_sys_active];
+ if (mode_lib->soc.qos_parameters.qos_type == dml2_qos_param_type_dcn3)
+ s->ReorderingBytes = (unsigned int)(mode_lib->soc.clk_table.dram_config.channel_count * math_max3(mode_lib->soc.qos_parameters.qos_params.dcn32x.urgent_out_of_order_return_per_channel_pixel_only_bytes,
+ mode_lib->soc.qos_parameters.qos_params.dcn32x.urgent_out_of_order_return_per_channel_pixel_and_vm_bytes,
+ mode_lib->soc.qos_parameters.qos_params.dcn32x.urgent_out_of_order_return_per_channel_vm_only_bytes));
CalculateExtraLatency(
display_cfg,
mode_lib->ip.rob_buffer_size_kbytes,
- 0, //mode_lib->soc.round_trip_ping_latency_dcfclk_cycles,
+ mode_lib->soc.qos_parameters.qos_params.dcn32x.loaded_round_trip_latency_fclk_cycles,
s->ReorderingBytes,
mode_lib->ms.DCFCLK,
mode_lib->ms.FabricClock,
@@ -2713,13 +2717,13 @@ bool dml2_core_shared_mode_support(struct dml2_core_calcs_mode_support_ex *in_ou
//Re-ordering Buffer Support Check
mode_lib->ms.support.max_urgent_latency_us
- = mode_lib->soc.qos_parameters.qos_params.dcn4.per_uclk_dpm_params[mode_lib->ms.qos_param_index].maximum_latency_when_urgent_uclk_cycles / mode_lib->ms.uclk_freq_mhz
- * (1 + mode_lib->soc.qos_parameters.qos_params.dcn4.umc_max_latency_margin / 100.0)
- + mode_lib->soc.qos_parameters.qos_params.dcn4.mall_overhead_fclk_cycles / mode_lib->ms.FabricClock
- + mode_lib->soc.qos_parameters.qos_params.dcn4.max_round_trip_to_furthest_cs_fclk_cycles / mode_lib->ms.FabricClock
- * (1 + mode_lib->soc.qos_parameters.qos_params.dcn4.fabric_max_transport_latency_margin / 100.0);
+ = mode_lib->soc.qos_parameters.qos_params.dcn4x.per_uclk_dpm_params[mode_lib->ms.qos_param_index].maximum_latency_when_urgent_uclk_cycles / mode_lib->ms.uclk_freq_mhz
+ * (1 + mode_lib->soc.qos_parameters.qos_params.dcn4x.umc_max_latency_margin / 100.0)
+ + mode_lib->soc.qos_parameters.qos_params.dcn4x.mall_overhead_fclk_cycles / mode_lib->ms.FabricClock
+ + mode_lib->soc.qos_parameters.qos_params.dcn4x.max_round_trip_to_furthest_cs_fclk_cycles / mode_lib->ms.FabricClock
+ * (1 + mode_lib->soc.qos_parameters.qos_params.dcn4x.fabric_max_transport_latency_margin / 100.0);
- if (mode_lib->soc.qos_parameters.qos_type == dml2_qos_param_type_dcn4) {
+ if (mode_lib->soc.qos_parameters.qos_type == dml2_qos_param_type_dcn4x) {
if (((mode_lib->ip.rob_buffer_size_kbytes - mode_lib->ip.pixel_chunk_size_kbytes) * 1024
/ mode_lib->ms.support.non_urg_bandwidth_required_flip[dml2_core_internal_soc_state_sys_active][dml2_core_internal_bw_sdp]) >= mode_lib->ms.support.max_urgent_latency_us) {
mode_lib->ms.support.ROBSupport = true;
@@ -2727,7 +2731,7 @@ bool dml2_core_shared_mode_support(struct dml2_core_calcs_mode_support_ex *in_ou
mode_lib->ms.support.ROBSupport = false;
}
} else {
- if (mode_lib->ip.rob_buffer_size_kbytes * 1024 >= mode_lib->soc.qos_parameters.qos_params.dcn3.loaded_round_trip_latency_fclk_cycles * mode_lib->soc.fabric_datapath_to_dcn_data_return_bytes) {
+ if (mode_lib->ip.rob_buffer_size_kbytes * 1024 >= mode_lib->soc.qos_parameters.qos_params.dcn32x.loaded_round_trip_latency_fclk_cycles * mode_lib->soc.fabric_datapath_to_dcn_data_return_bytes) {
mode_lib->ms.support.ROBSupport = true;
} else {
mode_lib->ms.support.ROBSupport = false;
@@ -5050,7 +5054,7 @@ static void calculate_mcache_row_bytes(
unsigned int meta_per_mvmpg_per_channel_ub = 0;
if (p->gpuvm_enable) {
- meta_per_mvmpg_per_channel = (float)vmpg_bytes / 256 / p->num_chans;
+ meta_per_mvmpg_per_channel = (float)vmpg_bytes / (float)256 / p->num_chans;
//but using the est_blk_per_vmpg between 2 and 4, to be not as pessimestic
if (p->surf_vert && vmpg_bytes > blk_bytes) {
@@ -5059,7 +5063,7 @@ static void calculate_mcache_row_bytes(
*p->dcc_dram_bw_nom_overhead_factor = 1 + math_max2(1.0 / 256.0, math_ceil2(meta_per_mvmpg_per_channel, p->mem_word_bytes) / (256 * meta_per_mvmpg_per_channel)); // dcc_dr_oh_nom
} else {
- meta_per_mvmpg_per_channel = (float)blk_bytes / 256 / p->num_chans;
+ meta_per_mvmpg_per_channel = (float)blk_bytes / (float)256 / p->num_chans;
if (!p->surf_vert)
*p->dcc_dram_bw_nom_overhead_factor = 1 + 1.0 / 256.0;
@@ -5881,7 +5885,7 @@ static double CalculateUrgentLatency(
double fabric_max_transport_latency_margin)
{
double urgent_latency = 0;
- if (qos_type == dml2_qos_param_type_dcn4) {
+ if (qos_type == dml2_qos_param_type_dcn4x) {
urgent_latency = (df_qos_response_time_fclk_cycles + mall_overhead_fclk_cycles) / FabricClock
+ max_round_trip_to_furthest_cs_fclk_cycles / FabricClock * (1 + fabric_max_transport_latency_margin / 100.0)
+ urgent_ramp_uclk_cycles / uclk_freq_mhz * (1 + umc_urgent_ramp_latency_margin / 100.0);
@@ -5892,7 +5896,7 @@ static double CalculateUrgentLatency(
}
}
#ifdef __DML_VBA_DEBUG__
- if (qos_type == dml2_qos_param_type_dcn4) {
+ if (qos_type == dml2_qos_param_type_dcn4x) {
dml2_printf("DML::%s: qos_type = %d\n", __func__, qos_type);
dml2_printf("DML::%s: urgent_ramp_uclk_cycles = %d\n", __func__, urgent_ramp_uclk_cycles);
dml2_printf("DML::%s: uclk_freq_mhz = %f\n", __func__, uclk_freq_mhz);
@@ -5922,7 +5926,7 @@ static double CalculateTripToMemory(
double fabric_max_transport_latency_margin)
{
double trip_to_memory_us;
- if (qos_type == dml2_qos_param_type_dcn4) {
+ if (qos_type == dml2_qos_param_type_dcn4x) {
trip_to_memory_us = mall_overhead_fclk_cycles / FabricClock
+ max_round_trip_to_furthest_cs_fclk_cycles / FabricClock * (1.0 + fabric_max_transport_latency_margin / 100.0)
+ trip_to_memory_uclk_cycles / uclk_freq_mhz * (1.0 + umc_max_latency_margin / 100.0);
@@ -5931,7 +5935,7 @@ static double CalculateTripToMemory(
}
#ifdef __DML_VBA_DEBUG__
- if (qos_type == dml2_qos_param_type_dcn4) {
+ if (qos_type == dml2_qos_param_type_dcn4x) {
dml2_printf("DML::%s: qos_type = %d\n", __func__, qos_type);
dml2_printf("DML::%s: max_round_trip_to_furthest_cs_fclk_cycles = %d\n", __func__, max_round_trip_to_furthest_cs_fclk_cycles);
dml2_printf("DML::%s: mall_overhead_fclk_cycles = %d\n", __func__, mall_overhead_fclk_cycles);
@@ -5961,7 +5965,7 @@ static double CalculateMetaTripToMemory(
double fabric_max_transport_latency_margin)
{
double meta_trip_to_memory_us;
- if (qos_type == dml2_qos_param_type_dcn4) {
+ if (qos_type == dml2_qos_param_type_dcn4x) {
meta_trip_to_memory_us = meta_trip_to_memory_fclk_cycles / FabricClock * (1.0 + fabric_max_transport_latency_margin / 100.0)
+ meta_trip_to_memory_uclk_cycles / uclk_freq_mhz * (1.0 + umc_max_latency_margin / 100.0);
} else {
@@ -5969,7 +5973,7 @@ static double CalculateMetaTripToMemory(
}
#ifdef __DML_VBA_DEBUG__
- if (qos_type == dml2_qos_param_type_dcn4) {
+ if (qos_type == dml2_qos_param_type_dcn4x) {
dml2_printf("DML::%s: qos_type = %d\n", __func__, qos_type);
dml2_printf("DML::%s: meta_trip_to_memory_fclk_cycles = %d\n", __func__, meta_trip_to_memory_fclk_cycles);
dml2_printf("DML::%s: meta_trip_to_memory_uclk_cycles = %d\n", __func__, meta_trip_to_memory_uclk_cycles);
@@ -6460,8 +6464,8 @@ static void CalculateSwathAndDETConfiguration(struct dml2_core_internal_scratch
p->SwathHeightC[k] = l->MaximumSwathHeightC[k] / 2;
l->RoundedUpSwathSizeBytesY[k] = p->full_swath_bytes_l[k] / 2;
l->RoundedUpSwathSizeBytesC[k] = p->full_swath_bytes_c[k] / 2;
- p->request_size_bytes_luma[k] = ((p->BytePerPixY[k] == 2) == dml_is_vertical_rotation(p->display_cfg->plane_descriptors[k].composition.rotation_angle)) ? 128 : 64;;
- p->request_size_bytes_chroma[k] = ((p->BytePerPixC[k] == 2) == dml_is_vertical_rotation(p->display_cfg->plane_descriptors[k].composition.rotation_angle)) ? 128 : 64;;
+ p->request_size_bytes_luma[k] = ((p->BytePerPixY[k] == 2) == dml_is_vertical_rotation(p->display_cfg->plane_descriptors[k].composition.rotation_angle)) ? 128 : 64;
+ p->request_size_bytes_chroma[k] = ((p->BytePerPixC[k] == 2) == dml_is_vertical_rotation(p->display_cfg->plane_descriptors[k].composition.rotation_angle)) ? 128 : 64;
}
if (p->SwathHeightC[k] == 0)
@@ -7165,7 +7169,7 @@ static void calculate_tdlut_setting(
*p->tdlut_bytes_per_group = tdlut_bytes_per_line * tdlut_mpc_width;
//the delivery cycles is DispClk cycles per line * number of lines * number of slices
tdlut_delivery_cycles = (unsigned int)math_ceil2(tdlut_mpc_width / 2.0, 1) * tdlut_mpc_width * tdlut_mpc_width;
- tdlut_drain_rate = tdlut_bytes_per_line * p->dispclk_mhz / 9.0;
+ tdlut_drain_rate = tdlut_bytes_per_line * p->dispclk_mhz / math_ceil2(tdlut_mpc_width/2.0, 1);
} else {
//tdlut_addressing_mode = tdlut_simple_linear, 3dlut width should be 4*1229=4916 elements
*p->tdlut_bytes_per_frame = (unsigned int)math_ceil2(tdlut_width * tdlut_bpe, 256);
@@ -7485,7 +7489,7 @@ static void CalculateExtraLatency(
max_request_size_bytes = request_size_bytes_chroma[k];
}
- if (qos_type == dml2_qos_param_type_dcn4) {
+ if (qos_type == dml2_qos_param_type_dcn4x) {
*ExtraLatency_sr = dchub_arb_to_ret_delay / DCFCLK;
*ExtraLatency = *ExtraLatency_sr;
if (max_oustanding_when_urgent_expected)
@@ -7501,11 +7505,14 @@ static void CalculateExtraLatency(
#ifdef __DML_VBA_DEBUG__
dml2_printf("DML::%s: qos_type=%u\n", __func__, qos_type);
+ dml2_printf("DML::%s: hostvm_mode=%u\n", __func__, hostvm_mode);
+ dml2_printf("DML::%s: Tex_trips=%u\n", __func__, Tex_trips);
dml2_printf("DML::%s: max_oustanding_when_urgent_expected=%u\n", __func__, max_oustanding_when_urgent_expected);
dml2_printf("DML::%s: FabricClock=%f\n", __func__, FabricClock);
dml2_printf("DML::%s: DCFCLK=%f\n", __func__, DCFCLK);
dml2_printf("DML::%s: ReturnBW=%f\n", __func__, ReturnBW);
dml2_printf("DML::%s: RoundTripPingLatencyCycles=%u\n", __func__, RoundTripPingLatencyCycles);
+ dml2_printf("DML::%s: ReorderingBytes=%u\n", __func__, ReorderingBytes);
dml2_printf("DML::%s: Tarb=%f\n", __func__, Tarb);
dml2_printf("DML::%s: ExtraLatency=%f\n", __func__, *ExtraLatency);
dml2_printf("DML::%s: ExtraLatency_sr=%f\n", __func__, *ExtraLatency_sr);
@@ -7739,7 +7746,6 @@ static bool CalculatePrefetchSchedule(struct dml2_core_internal_scratch *scratch
s->max_Tsw = (math_max2(p->PrefetchSourceLinesY, p->PrefetchSourceLinesC) * s->LineTime);
s->prefetch_sw_bytes = p->PrefetchSourceLinesY * p->swath_width_luma_ub * p->myPipe->BytePerPixelY + p->PrefetchSourceLinesC * p->swath_width_chroma_ub * p->myPipe->BytePerPixelC;
-
s->prefetch_bw_pr = s->prefetch_bw_pr * p->mall_prefetch_sdp_overhead_factor;
s->prefetch_sw_bytes = s->prefetch_sw_bytes * p->mall_prefetch_sdp_overhead_factor;
s->prefetch_bw_oto = math_max2(s->prefetch_bw_pr, s->prefetch_sw_bytes / s->max_Tsw);
@@ -9304,6 +9310,10 @@ static void CalculateMetaAndPTETimes(struct dml2_core_shared_CalculateMetaAndPTE
dpte_groups_per_row_luma_ub = (unsigned int)(math_ceil2((double)p->dpte_row_width_luma_ub[k] / (double)dpte_group_width_luma, 1.0));
}
+ if (dpte_groups_per_row_luma_ub <= 2) {
+ dpte_groups_per_row_luma_ub = dpte_groups_per_row_luma_ub + 1;
+ }
+
dml2_printf("DML::%s: k=%u, use_one_row_for_frame = %u\n", __func__, k, p->use_one_row_for_frame[k]);
dml2_printf("DML::%s: k=%u, dpte_group_bytes = %u\n", __func__, k, p->dpte_group_bytes[k]);
dml2_printf("DML::%s: k=%u, PTERequestSizeY = %u\n", __func__, k, p->PTERequestSizeY[k]);
@@ -9332,6 +9342,9 @@ static void CalculateMetaAndPTETimes(struct dml2_core_shared_CalculateMetaAndPTE
} else {
dpte_groups_per_row_chroma_ub = (unsigned int)(math_ceil2((double)p->dpte_row_width_chroma_ub[k] / (double)dpte_group_width_chroma, 1.0));
}
+ if (dpte_groups_per_row_chroma_ub <= 2) {
+ dpte_groups_per_row_chroma_ub = dpte_groups_per_row_chroma_ub + 1;
+ }
dml2_printf("DML::%s: k=%u, dpte_row_width_chroma_ub = %u\n", __func__, k, p->dpte_row_width_chroma_ub[k]);
dml2_printf("DML::%s: k=%u, dpte_group_width_chroma = %u\n", __func__, k, dpte_group_width_chroma);
dml2_printf("DML::%s: k=%u, dpte_groups_per_row_chroma_ub = %u\n", __func__, k, dpte_groups_per_row_chroma_ub);
@@ -9386,8 +9399,8 @@ static void CalculateVMGroupAndRequestTimes(
double TimePerVMRequestVBlank[],
double TimePerVMRequestFlip[])
{
- unsigned int num_group_per_lower_vm_stage = 0;
- unsigned int num_req_per_lower_vm_stage = 0;
+ unsigned int num_group_per_lower_vm_stage = 1;
+ unsigned int num_req_per_lower_vm_stage = 1;
#ifdef __DML_VBA_DEBUG__
dml2_printf("DML::%s: NumberOfActiveSurfaces = %u\n", __func__, NumberOfActiveSurfaces);
@@ -9451,6 +9464,14 @@ static void CalculateVMGroupAndRequestTimes(
double line_time = display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].timing.h_total / pixel_clock_mhz;
+ if (num_group_per_lower_vm_stage_flip <= 2) {
+ num_group_per_lower_vm_stage_flip = num_group_per_lower_vm_stage_flip + 1;
+ }
+
+ if (num_group_per_lower_vm_stage_pref <= 2) {
+ num_group_per_lower_vm_stage_pref = num_group_per_lower_vm_stage_pref + 1;
+ }
+
TimePerVMGroupVBlank[k] = dst_y_per_vm_vblank[k] * line_time / num_group_per_lower_vm_stage_pref;
TimePerVMGroupFlip[k] = dst_y_per_vm_flip[k] * line_time / num_group_per_lower_vm_stage_flip;
TimePerVMRequestVBlank[k] = dst_y_per_vm_vblank[k] * line_time / num_req_per_lower_vm_stage_pref;
@@ -9814,14 +9835,14 @@ bool dml2_core_shared_mode_programming(struct dml2_core_calcs_mode_programming_e
mode_lib->mp.num_active_pipes = dml_get_num_active_pipes(display_cfg->num_planes, cfg_support_info);
dml_calc_pipe_plane_mapping(cfg_support_info, mode_lib->mp.pipe_plane);
- mode_lib->mp.Dcfclk = programming->min_clocks.dcn4.active.dcfclk_khz / 1000.0;
- mode_lib->mp.FabricClock = programming->min_clocks.dcn4.active.fclk_khz / 1000.0;
- mode_lib->mp.dram_bw_mbps = uclk_khz_to_dram_bw_mbps(programming->min_clocks.dcn4.active.uclk_khz, &mode_lib->soc.clk_table.dram_config);
- mode_lib->mp.uclk_freq_mhz = programming->min_clocks.dcn4.active.uclk_khz / 1000.0;
- mode_lib->mp.GlobalDPPCLK = programming->min_clocks.dcn4.dpprefclk_khz / 1000.0;
- s->SOCCLK = (double)programming->min_clocks.dcn4.socclk_khz / 1000;
- mode_lib->mp.qos_param_index = get_qos_param_index(programming->min_clocks.dcn4.active.uclk_khz, mode_lib->soc.qos_parameters.qos_params.dcn4.per_uclk_dpm_params);
- mode_lib->mp.active_min_uclk_dpm_index = get_active_min_uclk_dpm_index(programming->min_clocks.dcn4.active.uclk_khz, &mode_lib->soc.clk_table);
+ mode_lib->mp.Dcfclk = programming->min_clocks.dcn4x.active.dcfclk_khz / 1000.0;
+ mode_lib->mp.FabricClock = programming->min_clocks.dcn4x.active.fclk_khz / 1000.0;
+ mode_lib->mp.dram_bw_mbps = uclk_khz_to_dram_bw_mbps(programming->min_clocks.dcn4x.active.uclk_khz, &mode_lib->soc.clk_table.dram_config);
+ mode_lib->mp.uclk_freq_mhz = programming->min_clocks.dcn4x.active.uclk_khz / 1000.0;
+ mode_lib->mp.GlobalDPPCLK = programming->min_clocks.dcn4x.dpprefclk_khz / 1000.0;
+ s->SOCCLK = (double)programming->min_clocks.dcn4x.socclk_khz / 1000;
+ mode_lib->mp.qos_param_index = get_qos_param_index(programming->min_clocks.dcn4x.active.uclk_khz, mode_lib->soc.qos_parameters.qos_params.dcn4x.per_uclk_dpm_params);
+ mode_lib->mp.active_min_uclk_dpm_index = get_active_min_uclk_dpm_index(programming->min_clocks.dcn4x.active.uclk_khz, &mode_lib->soc.clk_table);
for (k = 0; k < s->num_active_planes; ++k) {
unsigned int stream_index = display_cfg->plane_descriptors[k].stream_index;
@@ -9856,18 +9877,18 @@ bool dml2_core_shared_mode_programming(struct dml2_core_calcs_mode_programming_e
for (k = 0; k < s->num_active_planes; ++k) {
mode_lib->mp.NoOfDPP[k] = cfg_support_info->plane_support_info[k].dpps_used;
- mode_lib->mp.Dppclk[k] = programming->plane_programming[k].min_clocks.dcn4.dppclk_khz / 1000.0;
+ mode_lib->mp.Dppclk[k] = programming->plane_programming[k].min_clocks.dcn4x.dppclk_khz / 1000.0;
dml2_assert(mode_lib->mp.Dppclk[k] > 0);
}
for (k = 0; k < s->num_active_planes; ++k) {
unsigned int stream_index = display_cfg->plane_descriptors[k].stream_index;
- mode_lib->mp.DSCCLK[k] = programming->stream_programming[stream_index].min_clocks.dcn4.dscclk_khz / 1000.0;
+ mode_lib->mp.DSCCLK[k] = programming->stream_programming[stream_index].min_clocks.dcn4x.dscclk_khz / 1000.0;
dml2_printf("DML::%s: k=%d stream_index=%d, mode_lib->mp.DSCCLK = %f\n", __func__, k, stream_index, mode_lib->mp.DSCCLK[k]);
}
- mode_lib->mp.Dispclk = programming->min_clocks.dcn4.dispclk_khz / 1000.0;
- mode_lib->mp.DCFCLKDeepSleep = programming->min_clocks.dcn4.deepsleep_dcfclk_khz / 1000.0;
+ mode_lib->mp.Dispclk = programming->min_clocks.dcn4x.dispclk_khz / 1000.0;
+ mode_lib->mp.DCFCLKDeepSleep = programming->min_clocks.dcn4x.deepsleep_dcfclk_khz / 1000.0;
dml2_assert(mode_lib->mp.Dcfclk > 0);
dml2_assert(mode_lib->mp.FabricClock > 0);
@@ -10388,11 +10409,16 @@ bool dml2_core_shared_mode_programming(struct dml2_core_calcs_mode_programming_e
calculate_tdlut_setting(&mode_lib->scratch, calculate_tdlut_setting_params);
}
+ if (mode_lib->soc.qos_parameters.qos_type == dml2_qos_param_type_dcn3)
+ s->ReorderingBytes = (unsigned int)(mode_lib->soc.clk_table.dram_config.channel_count * math_max3(mode_lib->soc.qos_parameters.qos_params.dcn32x.urgent_out_of_order_return_per_channel_pixel_only_bytes,
+ mode_lib->soc.qos_parameters.qos_params.dcn32x.urgent_out_of_order_return_per_channel_pixel_and_vm_bytes,
+ mode_lib->soc.qos_parameters.qos_params.dcn32x.urgent_out_of_order_return_per_channel_vm_only_bytes));
+
CalculateExtraLatency(
display_cfg,
mode_lib->ip.rob_buffer_size_kbytes,
- 0, //mode_lib->soc.round_trip_ping_latency_dcfclk_cycles,
- s->ReorderBytes,
+ mode_lib->soc.qos_parameters.qos_params.dcn32x.loaded_round_trip_latency_fclk_cycles,
+ s->ReorderingBytes,
mode_lib->mp.Dcfclk,
mode_lib->mp.FabricClock,
mode_lib->ip.pixel_chunk_size_kbytes,
@@ -10465,32 +10491,32 @@ bool dml2_core_shared_mode_programming(struct dml2_core_calcs_mode_programming_e
mode_lib->mp.WritebackDelay[k] = mode_lib->mp.WritebackDelay[j];
mode_lib->mp.UrgentLatency = CalculateUrgentLatency(
- mode_lib->soc.qos_parameters.qos_params.dcn3.urgent_latency_us.base_latency_us,
- mode_lib->soc.qos_parameters.qos_params.dcn3.urgent_latency_us.base_latency_pixel_vm_us,
- mode_lib->soc.qos_parameters.qos_params.dcn3.urgent_latency_us.base_latency_vm_us,
+ mode_lib->soc.qos_parameters.qos_params.dcn32x.urgent_latency_us.base_latency_us,
+ mode_lib->soc.qos_parameters.qos_params.dcn32x.urgent_latency_us.base_latency_pixel_vm_us,
+ mode_lib->soc.qos_parameters.qos_params.dcn32x.urgent_latency_us.base_latency_vm_us,
mode_lib->soc.do_urgent_latency_adjustment,
- mode_lib->soc.qos_parameters.qos_params.dcn3.urgent_latency_us.scaling_factor_fclk_us,
- mode_lib->soc.qos_parameters.qos_params.dcn3.urgent_latency_us.scaling_factor_mhz,
+ mode_lib->soc.qos_parameters.qos_params.dcn32x.urgent_latency_us.scaling_factor_fclk_us,
+ mode_lib->soc.qos_parameters.qos_params.dcn32x.urgent_latency_us.scaling_factor_mhz,
mode_lib->mp.FabricClock,
mode_lib->mp.uclk_freq_mhz,
mode_lib->soc.qos_parameters.qos_type,
- mode_lib->soc.qos_parameters.qos_params.dcn4.per_uclk_dpm_params[mode_lib->mp.qos_param_index].urgent_ramp_uclk_cycles,
- mode_lib->soc.qos_parameters.qos_params.dcn4.df_qos_response_time_fclk_cycles,
- mode_lib->soc.qos_parameters.qos_params.dcn4.max_round_trip_to_furthest_cs_fclk_cycles,
- mode_lib->soc.qos_parameters.qos_params.dcn4.mall_overhead_fclk_cycles,
- mode_lib->soc.qos_parameters.qos_params.dcn4.umc_urgent_ramp_latency_margin,
- mode_lib->soc.qos_parameters.qos_params.dcn4.fabric_max_transport_latency_margin);
+ mode_lib->soc.qos_parameters.qos_params.dcn4x.per_uclk_dpm_params[mode_lib->mp.qos_param_index].urgent_ramp_uclk_cycles,
+ mode_lib->soc.qos_parameters.qos_params.dcn4x.df_qos_response_time_fclk_cycles,
+ mode_lib->soc.qos_parameters.qos_params.dcn4x.max_round_trip_to_furthest_cs_fclk_cycles,
+ mode_lib->soc.qos_parameters.qos_params.dcn4x.mall_overhead_fclk_cycles,
+ mode_lib->soc.qos_parameters.qos_params.dcn4x.umc_urgent_ramp_latency_margin,
+ mode_lib->soc.qos_parameters.qos_params.dcn4x.fabric_max_transport_latency_margin);
mode_lib->mp.TripToMemory = CalculateTripToMemory(
mode_lib->mp.UrgentLatency,
mode_lib->mp.FabricClock,
mode_lib->mp.uclk_freq_mhz,
mode_lib->soc.qos_parameters.qos_type,
- mode_lib->soc.qos_parameters.qos_params.dcn4.per_uclk_dpm_params[mode_lib->mp.qos_param_index].trip_to_memory_uclk_cycles,
- mode_lib->soc.qos_parameters.qos_params.dcn4.max_round_trip_to_furthest_cs_fclk_cycles,
- mode_lib->soc.qos_parameters.qos_params.dcn4.mall_overhead_fclk_cycles,
- mode_lib->soc.qos_parameters.qos_params.dcn4.umc_max_latency_margin,
- mode_lib->soc.qos_parameters.qos_params.dcn4.fabric_max_transport_latency_margin);
+ mode_lib->soc.qos_parameters.qos_params.dcn4x.per_uclk_dpm_params[mode_lib->mp.qos_param_index].trip_to_memory_uclk_cycles,
+ mode_lib->soc.qos_parameters.qos_params.dcn4x.max_round_trip_to_furthest_cs_fclk_cycles,
+ mode_lib->soc.qos_parameters.qos_params.dcn4x.mall_overhead_fclk_cycles,
+ mode_lib->soc.qos_parameters.qos_params.dcn4x.umc_max_latency_margin,
+ mode_lib->soc.qos_parameters.qos_params.dcn4x.fabric_max_transport_latency_margin);
mode_lib->mp.TripToMemory = math_max2(mode_lib->mp.UrgentLatency, mode_lib->mp.TripToMemory);
@@ -10499,10 +10525,10 @@ bool dml2_core_shared_mode_programming(struct dml2_core_calcs_mode_programming_e
mode_lib->mp.FabricClock,
mode_lib->mp.uclk_freq_mhz,
mode_lib->soc.qos_parameters.qos_type,
- mode_lib->soc.qos_parameters.qos_params.dcn4.per_uclk_dpm_params[mode_lib->mp.qos_param_index].meta_trip_to_memory_uclk_cycles,
- mode_lib->soc.qos_parameters.qos_params.dcn4.meta_trip_adder_fclk_cycles,
- mode_lib->soc.qos_parameters.qos_params.dcn4.umc_max_latency_margin,
- mode_lib->soc.qos_parameters.qos_params.dcn4.fabric_max_transport_latency_margin);
+ mode_lib->soc.qos_parameters.qos_params.dcn4x.per_uclk_dpm_params[mode_lib->mp.qos_param_index].meta_trip_to_memory_uclk_cycles,
+ mode_lib->soc.qos_parameters.qos_params.dcn4x.meta_trip_adder_fclk_cycles,
+ mode_lib->soc.qos_parameters.qos_params.dcn4x.umc_max_latency_margin,
+ mode_lib->soc.qos_parameters.qos_params.dcn4x.fabric_max_transport_latency_margin);
for (k = 0; k < s->num_active_planes; ++k) {
calculate_cursor_req_attributes(
@@ -11945,14 +11971,14 @@ void dml2_core_shared_get_pipe_regs(const struct dml2_display_cfg *display_cfg,
void dml2_core_shared_get_stream_programming(const struct dml2_core_internal_display_mode_lib *mode_lib, struct dml2_per_stream_programming *out, int pipe_index)
{
- // out->min_clocks.dcn4.dscclk_khz = (unsigned int)(dml_get_dscclk_calculated(mode_lib, pipe_index) * 1000); // FIXME_STAGE2
- // out->min_clocks.dcn4.dtbclk_khz = (unsigned int)(dml_get_dscclk_calculated(mode_lib, pipe_index) * 1000);
- // out->min_clocks.dcn4.phyclk_khz = (unsigned int)(dml_get_dscclk_calculated(mode_lib, pipe_index) * 1000);
-
- out->global_sync.dcn4.vready_offset_pixels = mode_lib->mp.VReadyOffsetPix[mode_lib->mp.pipe_plane[pipe_index]];
- out->global_sync.dcn4.vstartup_lines = mode_lib->mp.VStartup[mode_lib->mp.pipe_plane[pipe_index]];
- out->global_sync.dcn4.vupdate_offset_pixels = mode_lib->mp.VUpdateOffsetPix[mode_lib->mp.pipe_plane[pipe_index]];
- out->global_sync.dcn4.vupdate_vupdate_width_pixels = mode_lib->mp.VUpdateWidthPix[mode_lib->mp.pipe_plane[pipe_index]];
+ // out->min_clocks.dcn4x.dscclk_khz = (unsigned int)(dml_get_dscclk_calculated(mode_lib, pipe_index) * 1000); // FIXME_STAGE2
+ // out->min_clocks.dcn4x.dtbclk_khz = (unsigned int)(dml_get_dscclk_calculated(mode_lib, pipe_index) * 1000);
+ // out->min_clocks.dcn4x.phyclk_khz = (unsigned int)(dml_get_dscclk_calculated(mode_lib, pipe_index) * 1000);
+
+ out->global_sync.dcn4x.vready_offset_pixels = mode_lib->mp.VReadyOffsetPix[mode_lib->mp.pipe_plane[pipe_index]];
+ out->global_sync.dcn4x.vstartup_lines = mode_lib->mp.VStartup[mode_lib->mp.pipe_plane[pipe_index]];
+ out->global_sync.dcn4x.vupdate_offset_pixels = mode_lib->mp.VUpdateOffsetPix[mode_lib->mp.pipe_plane[pipe_index]];
+ out->global_sync.dcn4x.vupdate_vupdate_width_pixels = mode_lib->mp.VUpdateWidthPix[mode_lib->mp.pipe_plane[pipe_index]];
}
void dml2_core_shared_get_mcache_allocation(const struct dml2_core_internal_display_mode_lib *mode_lib, struct dml2_mcache_surface_allocation *out, int plane_idx)
@@ -12255,7 +12281,7 @@ void dml2_core_shared_get_informative(const struct dml2_core_internal_display_mo
out->informative.misc.cstate_max_cap_mode = mode_lib->mp.DCHUBBUB_ARB_CSTATE_MAX_CAP_MODE;
- out->min_clocks.dcn4.dpprefclk_khz = (int unsigned)(mode_lib->mp.GlobalDPPCLK * 1000.0);
+ out->min_clocks.dcn4x.dpprefclk_khz = (int unsigned)(mode_lib->mp.GlobalDPPCLK * 1000.0);
out->informative.qos.max_active_fclk_change_latency_supported = mode_lib->mp.MaxActiveFCLKChangeLatencySupported;
@@ -12368,13 +12394,13 @@ void dml2_core_shared_get_informative(const struct dml2_core_internal_display_mo
}
}
- out->informative.qos.max_non_urgent_latency_us = mode_lib->soc.qos_parameters.qos_params.dcn4.per_uclk_dpm_params[mode_lib->mp.qos_param_index].maximum_latency_when_non_urgent_uclk_cycles
- / mode_lib->mp.uclk_freq_mhz * (1 + mode_lib->soc.qos_parameters.qos_params.dcn4.umc_max_latency_margin / 100.0)
- + mode_lib->soc.qos_parameters.qos_params.dcn4.mall_overhead_fclk_cycles / mode_lib->mp.FabricClock
- + mode_lib->soc.qos_parameters.qos_params.dcn4.max_round_trip_to_furthest_cs_fclk_cycles / mode_lib->mp.FabricClock
- * (1 + mode_lib->soc.qos_parameters.qos_params.dcn4.fabric_max_transport_latency_margin / 100.0);
+ out->informative.qos.max_non_urgent_latency_us = mode_lib->soc.qos_parameters.qos_params.dcn4x.per_uclk_dpm_params[mode_lib->mp.qos_param_index].maximum_latency_when_non_urgent_uclk_cycles
+ / mode_lib->mp.uclk_freq_mhz * (1 + mode_lib->soc.qos_parameters.qos_params.dcn4x.umc_max_latency_margin / 100.0)
+ + mode_lib->soc.qos_parameters.qos_params.dcn4x.mall_overhead_fclk_cycles / mode_lib->mp.FabricClock
+ + mode_lib->soc.qos_parameters.qos_params.dcn4x.max_round_trip_to_furthest_cs_fclk_cycles / mode_lib->mp.FabricClock
+ * (1 + mode_lib->soc.qos_parameters.qos_params.dcn4x.fabric_max_transport_latency_margin / 100.0);
- if (mode_lib->soc.qos_parameters.qos_type == dml2_qos_param_type_dcn4) {
+ if (mode_lib->soc.qos_parameters.qos_type == dml2_qos_param_type_dcn4x) {
if (((mode_lib->ip.rob_buffer_size_kbytes - mode_lib->ip.pixel_chunk_size_kbytes) * 1024
/ mode_lib->mp.non_urg_bandwidth_required[dml2_core_internal_soc_state_sys_active][dml2_core_internal_bw_sdp]) >= out->informative.qos.max_non_urgent_latency_us) {
out->informative.misc.ROBUrgencyAvoidance = true;
diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_core/dml2_core_shared.h b/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_core/dml2_core_shared.h
deleted file mode 100644
index d76bda907ec8..000000000000
--- a/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_core/dml2_core_shared.h
+++ /dev/null
@@ -1,38 +0,0 @@
-// SPDX-License-Identifier: MIT
-//
-// Copyright 2024 Advanced Micro Devices, Inc.
-
-
-#ifndef __DML2_CORE_SHARED_H__
-#define __DML2_CORE_SHARED_H__
-
-#define __DML_VBA_DEBUG__
-#define __DML2_CALCS_MAX_VRATIO_PRE_OTO__ 4.0 //<brief Prefetch schedule max vratio for one to one scheduling calculation for prefetch
-#define __DML2_CALCS_MAX_VRATIO_PRE_ENHANCE_PREFETCH_ACC__ 6.0 //<brief Prefetch schedule max vratio when enhance prefetch schedule acceleration is enabled and vstartup is earliest possible already
-#define __DML2_CALCS_DPP_INVALID__ 0
-#define __DML2_CALCS_DCFCLK_FACTOR__ 1.15 //<brief fudge factor for min dcfclk calclation
-#define __DML2_CALCS_PIPE_NO_PLANE__ 99
-
-#include "dml2_core_shared_types.h"
-#include "dml2_internal_shared_types.h"
-
-double dml2_core_shared_div_rem(double dividend, unsigned int divisor, unsigned int *remainder);
-
-const char *dml2_core_internal_bw_type_str(enum dml2_core_internal_bw_type bw_type);
-const char *dml2_core_internal_soc_state_type_str(enum dml2_core_internal_soc_state_type dml2_core_internal_soc_state_type);
-bool dml2_core_shared_is_420(enum dml2_source_format_class source_format);
-
-bool dml2_core_shared_mode_support(struct dml2_core_calcs_mode_support_ex *in_out_params);
-bool dml2_core_shared_mode_programming(struct dml2_core_calcs_mode_programming_ex *in_out_params);
-void dml2_core_shared_get_watermarks(const struct dml2_display_cfg *display_cfg, const struct dml2_core_internal_display_mode_lib *mode_lib, struct dml2_dchub_watermark_regs *out);
-void dml2_core_shared_get_arb_params(const struct dml2_core_internal_display_mode_lib *mode_lib, struct dml2_display_arb_regs *out);
-void dml2_core_shared_get_pipe_regs(const struct dml2_display_cfg *display_cfg, struct dml2_core_internal_display_mode_lib *mode_lib, struct dml2_dchub_per_pipe_register_set *out, int pipe_index);
-void dml2_core_shared_get_stream_programming(const struct dml2_core_internal_display_mode_lib *mode_lib, struct dml2_per_stream_programming *out, int pipe_index);
-void dml2_core_shared_get_mcache_allocation(const struct dml2_core_internal_display_mode_lib *mode_lib, struct dml2_mcache_surface_allocation *out, int plane_idx);
-void dml2_core_shared_get_mall_allocation(struct dml2_core_internal_display_mode_lib *mode_lib, unsigned int *out, int pipe_index);
-void dml2_core_shared_get_plane_support_info(const struct dml2_display_cfg *display_cfg, const struct dml2_core_internal_display_mode_lib *mode_lib, struct core_plane_support_info *out, int plane_idx);
-void dml2_core_shared_get_stream_support_info(const struct dml2_display_cfg *display_cfg, const struct dml2_core_internal_display_mode_lib *mode_lib, struct core_stream_support_info *out, int plane_index);
-void dml2_core_shared_get_informative(const struct dml2_core_internal_display_mode_lib *mode_lib, struct dml2_display_cfg_programming *out);
-void dml2_core_shared_cursor_dlg_reg(struct dml2_cursor_dlg_regs *cursor_dlg_regs, const struct dml2_get_cursor_dlg_reg *p);
-
-#endif
diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_core/dml2_core_shared_types.h b/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_core/dml2_core_shared_types.h
index 1343b744eeb3..cbdfbd5a0bde 100644
--- a/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_core/dml2_core_shared_types.h
+++ b/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_core/dml2_core_shared_types.h
@@ -2,7 +2,6 @@
//
// Copyright 2024 Advanced Micro Devices, Inc.
-
#ifndef __DML2_CORE_SHARED_TYPES_H__
#define __DML2_CORE_SHARED_TYPES_H__
@@ -10,6 +9,15 @@
#include "dml_top_display_cfg_types.h"
#include "dml_top_types.h"
+#define __DML_VBA_DEBUG__
+#define __DML2_CALCS_MAX_VRATIO_PRE_OTO__ 4.0 //<brief max vratio for one-to-one prefetch bw scheduling
+#define __DML2_CALCS_MAX_VRATIO_PRE_EQU__ 6.0 //<brief max vratio for equalized prefetch bw scheduling
+#define __DML2_CALCS_MAX_VRATIO_PRE__ 8.0 //<brief max prefetch vratio register limit
+
+#define __DML2_CALCS_DPP_INVALID__ 0
+#define __DML2_CALCS_DCFCLK_FACTOR__ 1.15 //<brief fudge factor for min dcfclk calclation
+#define __DML2_CALCS_PIPE_NO_PLANE__ 99
+
struct dml2_core_ip_params {
unsigned int vblank_nom_default_us;
unsigned int remote_iommu_outstanding_translations;
@@ -70,6 +78,7 @@ struct dml2_core_ip_params {
unsigned int words_per_channel;
bool imall_supported;
unsigned int max_flip_time_us;
+ unsigned int max_flip_time_lines;
unsigned int subvp_swath_height_margin_lines;
unsigned int subvp_fw_processing_delay_us;
unsigned int subvp_pstate_allow_width_us;
@@ -782,6 +791,7 @@ struct dml2_core_internal_mode_program {
unsigned int VUpdateOffsetPix[DML2_MAX_PLANES];
unsigned int VUpdateWidthPix[DML2_MAX_PLANES];
unsigned int VReadyOffsetPix[DML2_MAX_PLANES];
+ unsigned int pstate_keepout_dst_lines[DML2_MAX_PLANES];
// Latency and Support
double MaxActiveFCLKChangeLatencySupported;
@@ -852,6 +862,9 @@ struct dml2_core_internal_SOCParametersList {
double USRRetrainingLatency;
double SMNLatency;
double g6_temp_read_blackout_us;
+ double max_urgent_latency_us;
+ double df_response_time_us;
+ enum dml2_qos_param_type qos_type;
};
struct dml2_core_calcs_mode_support_locals {
@@ -865,7 +878,7 @@ struct dml2_core_calcs_mode_support_locals {
unsigned int dpte_row_bytes_per_row_l[DML2_MAX_PLANES];
unsigned int dpte_row_bytes_per_row_c[DML2_MAX_PLANES];
- bool dummy_boolean[2];
+ bool dummy_boolean[3];
unsigned int dummy_integer[3];
unsigned int dummy_integer_array[36][DML2_MAX_PLANES];
enum dml2_odm_mode dummy_odm_mode[DML2_MAX_PLANES];
@@ -913,9 +926,7 @@ struct dml2_core_calcs_mode_support_locals {
double HostVMInefficiencyFactor;
double HostVMInefficiencyFactorPrefetch;
- unsigned int NextMaxVStartup;
unsigned int MaxVStartup;
- bool AnyLinesForVMOrRowTooLarge;
double PixelClockBackEndFactor;
unsigned int NumDSCUnitRequired;
@@ -975,7 +986,7 @@ struct dml2_core_calcs_mode_programming_locals {
unsigned int DSCFormatFactor;
struct dml2_core_internal_DmlPipe SurfaceParameters[DML2_MAX_PLANES];
- unsigned int ReorderBytes;
+ unsigned int ReorderingBytes;
double HostVMInefficiencyFactor;
double HostVMInefficiencyFactorPrefetch;
unsigned int TotalDCCActiveDPP;
@@ -1176,11 +1187,15 @@ struct dml2_core_calcs_CalculatePrefetchSchedule_locals {
double prefetch_bw_oto;
double Tvm_oto;
double Tr0_oto;
+ double Tvm_no_trip_oto;
+ double Tr0_no_trip_oto;
double Tvm_oto_lines;
double Tr0_oto_lines;
double dst_y_prefetch_oto;
double TimeForFetchingVM;
double TimeForFetchingRowInVBlank;
+ double dst_y_per_vm_no_trip_vblank;
+ double dst_y_per_row_no_trip_vblank;
double LinesToRequestPrefetchPixelData;
unsigned int HostVMDynamicLevelsTrips;
double trip_to_mem;
@@ -1188,6 +1203,7 @@ struct dml2_core_calcs_CalculatePrefetchSchedule_locals {
double Tr0_trips_rounded;
double max_Tsw;
double Lsw_oto;
+ double Lsw_equ;
double Tpre_rounded;
double prefetch_bw_equ;
double Tvm_equ;
@@ -1196,11 +1212,14 @@ struct dml2_core_calcs_CalculatePrefetchSchedule_locals {
double Tdmec;
double Tdmsks;
double prefetch_sw_bytes;
+ double total_row_bytes;
double prefetch_bw_pr;
double bytes_pp;
double dep_bytes;
double min_Lsw_oto;
+ double min_Lsw_equ;
double Tsw_est1;
+ double Tsw_est2;
double Tsw_est3;
double prefetch_bw1;
double prefetch_bw2;
@@ -1332,6 +1351,10 @@ struct dml2_core_shared_get_urgent_bandwidth_required_locals {
double tmp_nom_adj_factor_p1;
double tmp_pref_adj_factor_p0;
double tmp_pref_adj_factor_p1;
+ double vm_row_bw;
+ double flip_and_active_bw;
+ double flip_and_prefetch_bw;
+ double active_and_excess_bw;
};
struct dml2_core_shared_calculate_peak_bandwidth_required_locals {
@@ -1688,7 +1711,6 @@ struct dml2_core_calcs_CalculatePrefetchSchedule_params {
enum dml2_output_format_class OutputFormat;
unsigned int MaxInterDCNTileRepeaters;
unsigned int VStartup;
- unsigned int MaxVStartup;
unsigned int HostVMMinPageSize;
bool DynamicMetadataEnable;
bool DynamicMetadataVMEnabled;
@@ -2010,6 +2032,7 @@ struct dml2_core_internal_scratch {
struct dml2_core_internal_display_mode_lib {
struct dml2_core_ip_params ip;
struct dml2_soc_bb soc;
+ struct dml2_ip_capabilities ip_caps;
//@brief Mode Support and Mode programming struct
// Used to hold input; intermediate and output of the calculations
diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_core/dml2_core_utils.c b/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_core/dml2_core_utils.c
new file mode 100644
index 000000000000..ab229e1598ae
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_core/dml2_core_utils.c
@@ -0,0 +1,631 @@
+// SPDX-License-Identifier: MIT
+//
+// Copyright 2024 Advanced Micro Devices, Inc.
+
+#include "dml2_core_utils.h"
+
+double dml2_core_utils_div_rem(double dividend, unsigned int divisor, unsigned int *remainder)
+{
+ *remainder = ((dividend / divisor) - (int)(dividend / divisor) > 0);
+ return dividend / divisor;
+
+}
+
+const char *dml2_core_utils_internal_bw_type_str(enum dml2_core_internal_bw_type bw_type)
+{
+ switch (bw_type) {
+ case (dml2_core_internal_bw_sdp):
+ return("dml2_core_internal_bw_sdp");
+ case (dml2_core_internal_bw_dram):
+ return("dml2_core_internal_bw_dram");
+ case (dml2_core_internal_bw_max):
+ return("dml2_core_internal_bw_max");
+ default:
+ return("dml2_core_internal_bw_unknown");
+ }
+}
+
+bool dml2_core_utils_is_420(enum dml2_source_format_class source_format)
+{
+ bool val = false;
+
+ switch (source_format) {
+ case dml2_444_8:
+ val = 0;
+ break;
+ case dml2_444_16:
+ val = 0;
+ break;
+ case dml2_444_32:
+ val = 0;
+ break;
+ case dml2_444_64:
+ val = 0;
+ break;
+ case dml2_420_8:
+ val = 1;
+ break;
+ case dml2_420_10:
+ val = 1;
+ break;
+ case dml2_420_12:
+ val = 1;
+ break;
+ case dml2_rgbe_alpha:
+ val = 0;
+ break;
+ case dml2_rgbe:
+ val = 0;
+ break;
+ case dml2_mono_8:
+ val = 0;
+ break;
+ case dml2_mono_16:
+ val = 0;
+ break;
+ default:
+ DML2_ASSERT(0);
+ break;
+ }
+ return val;
+}
+
+void dml2_core_utils_print_mode_support_info(const struct dml2_core_internal_mode_support_info *support, bool fail_only)
+{
+ dml2_printf("DML: ===================================== \n");
+ dml2_printf("DML: DML_MODE_SUPPORT_INFO_ST\n");
+ if (!fail_only || support->ScaleRatioAndTapsSupport == 0)
+ dml2_printf("DML: support: ScaleRatioAndTapsSupport = %d\n", support->ScaleRatioAndTapsSupport);
+ if (!fail_only || support->SourceFormatPixelAndScanSupport == 0)
+ dml2_printf("DML: support: SourceFormatPixelAndScanSupport = %d\n", support->SourceFormatPixelAndScanSupport);
+ if (!fail_only || support->ViewportSizeSupport == 0)
+ dml2_printf("DML: support: ViewportSizeSupport = %d\n", support->ViewportSizeSupport);
+ if (!fail_only || support->LinkRateDoesNotMatchDPVersion == 1)
+ dml2_printf("DML: support: LinkRateDoesNotMatchDPVersion = %d\n", support->LinkRateDoesNotMatchDPVersion);
+ if (!fail_only || support->LinkRateForMultistreamNotIndicated == 1)
+ dml2_printf("DML: support: LinkRateForMultistreamNotIndicated = %d\n", support->LinkRateForMultistreamNotIndicated);
+ if (!fail_only || support->BPPForMultistreamNotIndicated == 1)
+ dml2_printf("DML: support: BPPForMultistreamNotIndicated = %d\n", support->BPPForMultistreamNotIndicated);
+ if (!fail_only || support->MultistreamWithHDMIOreDP == 1)
+ dml2_printf("DML: support: MultistreamWithHDMIOreDP = %d\n", support->MultistreamWithHDMIOreDP);
+ if (!fail_only || support->ExceededMultistreamSlots == 1)
+ dml2_printf("DML: support: ExceededMultistreamSlots = %d\n", support->ExceededMultistreamSlots);
+ if (!fail_only || support->MSOOrODMSplitWithNonDPLink == 1)
+ dml2_printf("DML: support: MSOOrODMSplitWithNonDPLink = %d\n", support->MSOOrODMSplitWithNonDPLink);
+ if (!fail_only || support->NotEnoughLanesForMSO == 1)
+ dml2_printf("DML: support: NotEnoughLanesForMSO = %d\n", support->NotEnoughLanesForMSO);
+ if (!fail_only || support->P2IWith420 == 1)
+ dml2_printf("DML: support: P2IWith420 = %d\n", support->P2IWith420);
+ if (!fail_only || support->DSC422NativeNotSupported == 1)
+ dml2_printf("DML: support: DSC422NativeNotSupported = %d\n", support->DSC422NativeNotSupported);
+ if (!fail_only || support->DSCSlicesODMModeSupported == 0)
+ dml2_printf("DML: support: DSCSlicesODMModeSupported = %d\n", support->DSCSlicesODMModeSupported);
+ if (!fail_only || support->NotEnoughDSCUnits == 1)
+ dml2_printf("DML: support: NotEnoughDSCUnits = %d\n", support->NotEnoughDSCUnits);
+ if (!fail_only || support->NotEnoughDSCSlices == 1)
+ dml2_printf("DML: support: NotEnoughDSCSlices = %d\n", support->NotEnoughDSCSlices);
+ if (!fail_only || support->ImmediateFlipOrHostVMAndPStateWithMALLFullFrameOrPhantomPipe == 1)
+ dml2_printf("DML: support: ImmediateFlipOrHostVMAndPStateWithMALLFullFrameOrPhantomPipe = %d\n", support->ImmediateFlipOrHostVMAndPStateWithMALLFullFrameOrPhantomPipe);
+ if (!fail_only || support->InvalidCombinationOfMALLUseForPStateAndStaticScreen == 1)
+ dml2_printf("DML: support: InvalidCombinationOfMALLUseForPStateAndStaticScreen = %d\n", support->InvalidCombinationOfMALLUseForPStateAndStaticScreen);
+ if (!fail_only || support->DSCCLKRequiredMoreThanSupported == 1)
+ dml2_printf("DML: support: DSCCLKRequiredMoreThanSupported = %d\n", support->DSCCLKRequiredMoreThanSupported);
+ if (!fail_only || support->PixelsPerLinePerDSCUnitSupport == 0)
+ dml2_printf("DML: support: PixelsPerLinePerDSCUnitSupport = %d\n", support->PixelsPerLinePerDSCUnitSupport);
+ if (!fail_only || support->DTBCLKRequiredMoreThanSupported == 1)
+ dml2_printf("DML: support: DTBCLKRequiredMoreThanSupported = %d\n", support->DTBCLKRequiredMoreThanSupported);
+ if (!fail_only || support->InvalidCombinationOfMALLUseForPState == 1)
+ dml2_printf("DML: support: InvalidCombinationOfMALLUseForPState = %d\n", support->InvalidCombinationOfMALLUseForPState);
+ if (!fail_only || support->ROBSupport == 0)
+ dml2_printf("DML: support: ROBSupport = %d\n", support->ROBSupport);
+ if (!fail_only || support->OutstandingRequestsSupport == 0)
+ dml2_printf("DML: support: OutstandingRequestsSupport = %d\n", support->OutstandingRequestsSupport);
+ if (!fail_only || support->OutstandingRequestsUrgencyAvoidance == 0)
+ dml2_printf("DML: support: OutstandingRequestsUrgencyAvoidance = %d\n", support->OutstandingRequestsUrgencyAvoidance);
+ if (!fail_only || support->DISPCLK_DPPCLK_Support == 0)
+ dml2_printf("DML: support: DISPCLK_DPPCLK_Support = %d\n", support->DISPCLK_DPPCLK_Support);
+ if (!fail_only || support->TotalAvailablePipesSupport == 0)
+ dml2_printf("DML: support: TotalAvailablePipesSupport = %d\n", support->TotalAvailablePipesSupport);
+ if (!fail_only || support->NumberOfOTGSupport == 0)
+ dml2_printf("DML: support: NumberOfOTGSupport = %d\n", support->NumberOfOTGSupport);
+ if (!fail_only || support->NumberOfHDMIFRLSupport == 0)
+ dml2_printf("DML: support: NumberOfHDMIFRLSupport = %d\n", support->NumberOfHDMIFRLSupport);
+ if (!fail_only || support->NumberOfDP2p0Support == 0)
+ dml2_printf("DML: support: NumberOfDP2p0Support = %d\n", support->NumberOfDP2p0Support);
+ if (!fail_only || support->EnoughWritebackUnits == 0)
+ dml2_printf("DML: support: EnoughWritebackUnits = %d\n", support->EnoughWritebackUnits);
+ if (!fail_only || support->WritebackScaleRatioAndTapsSupport == 0)
+ dml2_printf("DML: support: WritebackScaleRatioAndTapsSupport = %d\n", support->WritebackScaleRatioAndTapsSupport);
+ if (!fail_only || support->WritebackLatencySupport == 0)
+ dml2_printf("DML: support: WritebackLatencySupport = %d\n", support->WritebackLatencySupport);
+ if (!fail_only || support->CursorSupport == 0)
+ dml2_printf("DML: support: CursorSupport = %d\n", support->CursorSupport);
+ if (!fail_only || support->PitchSupport == 0)
+ dml2_printf("DML: support: PitchSupport = %d\n", support->PitchSupport);
+ if (!fail_only || support->ViewportExceedsSurface == 1)
+ dml2_printf("DML: support: ViewportExceedsSurface = %d\n", support->ViewportExceedsSurface);
+ if (!fail_only || support->PrefetchSupported == 0)
+ dml2_printf("DML: support: PrefetchSupported = %d\n", support->PrefetchSupported);
+ if (!fail_only || support->EnoughUrgentLatencyHidingSupport == 0)
+ dml2_printf("DML: support: EnoughUrgentLatencyHidingSupport = %d\n", support->EnoughUrgentLatencyHidingSupport);
+ if (!fail_only || support->AvgBandwidthSupport == 0)
+ dml2_printf("DML: support: AvgBandwidthSupport = %d\n", support->AvgBandwidthSupport);
+ if (!fail_only || support->DynamicMetadataSupported == 0)
+ dml2_printf("DML: support: DynamicMetadataSupported = %d\n", support->DynamicMetadataSupported);
+ if (!fail_only || support->VRatioInPrefetchSupported == 0)
+ dml2_printf("DML: support: VRatioInPrefetchSupported = %d\n", support->VRatioInPrefetchSupported);
+ if (!fail_only || support->PTEBufferSizeNotExceeded == 1)
+ dml2_printf("DML: support: PTEBufferSizeNotExceeded = %d\n", support->PTEBufferSizeNotExceeded);
+ if (!fail_only || support->DCCMetaBufferSizeNotExceeded == 1)
+ dml2_printf("DML: support: DCCMetaBufferSizeNotExceeded = %d\n", support->DCCMetaBufferSizeNotExceeded);
+ if (!fail_only || support->ExceededMALLSize == 1)
+ dml2_printf("DML: support: ExceededMALLSize = %d\n", support->ExceededMALLSize);
+ if (!fail_only || support->g6_temp_read_support == 0)
+ dml2_printf("DML: support: g6_temp_read_support = %d\n", support->g6_temp_read_support);
+ if (!fail_only || support->ImmediateFlipSupport == 0)
+ dml2_printf("DML: support: ImmediateFlipSupport = %d\n", support->ImmediateFlipSupport);
+ if (!fail_only || support->LinkCapacitySupport == 0)
+ dml2_printf("DML: support: LinkCapacitySupport = %d\n", support->LinkCapacitySupport);
+
+ if (!fail_only || support->ModeSupport == 0)
+ dml2_printf("DML: support: ModeSupport = %d\n", support->ModeSupport);
+ dml2_printf("DML: ===================================== \n");
+}
+
+const char *dml2_core_utils_internal_soc_state_type_str(enum dml2_core_internal_soc_state_type dml2_core_internal_soc_state_type)
+{
+ switch (dml2_core_internal_soc_state_type) {
+ case (dml2_core_internal_soc_state_sys_idle):
+ return("dml2_core_internal_soc_state_sys_idle");
+ case (dml2_core_internal_soc_state_sys_active):
+ return("dml2_core_internal_soc_state_sys_active");
+ case (dml2_core_internal_soc_state_svp_prefetch):
+ return("dml2_core_internal_soc_state_svp_prefetch");
+ case dml2_core_internal_soc_state_max:
+ default:
+ return("dml2_core_internal_soc_state_unknown");
+ }
+}
+
+
+void dml2_core_utils_get_stream_output_bpp(double *out_bpp, const struct dml2_display_cfg *display_cfg)
+{
+ for (unsigned int k = 0; k < display_cfg->num_planes; k++) {
+ double bpc = (double)display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].timing.bpc;
+ if (display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].timing.dsc.enable == dml2_dsc_disable) {
+ switch (display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].output.output_format) {
+ case dml2_444:
+ out_bpp[k] = bpc * 3;
+ break;
+ case dml2_s422:
+ out_bpp[k] = bpc * 2;
+ break;
+ case dml2_n422:
+ out_bpp[k] = bpc * 2;
+ break;
+ case dml2_420:
+ default:
+ out_bpp[k] = bpc * 1.5;
+ break;
+ }
+ } else if (display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].timing.dsc.enable == dml2_dsc_enable) {
+ out_bpp[k] = (double)display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].timing.dsc.dsc_compressed_bpp_x16 / 16;
+ } else {
+ out_bpp[k] = 0;
+ }
+#ifdef __DML_VBA_DEBUG__
+ dml2_printf("DML::%s: k=%d bpc=%f\n", __func__, k, bpc);
+ dml2_printf("DML::%s: k=%d dsc.enable=%d\n", __func__, k, display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].timing.dsc.enable);
+ dml2_printf("DML::%s: k=%d out_bpp=%f\n", __func__, k, out_bpp[k]);
+#endif
+ }
+}
+
+unsigned int dml2_core_utils_round_to_multiple(unsigned int num, unsigned int multiple, bool up)
+{
+ unsigned int remainder;
+
+ if (multiple == 0)
+ return num;
+
+ remainder = num % multiple;
+ if (remainder == 0)
+ return num;
+
+ if (up)
+ return (num + multiple - remainder);
+ else
+ return (num - remainder);
+}
+
+unsigned int dml2_core_util_get_num_active_pipes(int unsigned num_planes, const struct core_display_cfg_support_info *cfg_support_info)
+{
+ unsigned int num_active_pipes = 0;
+
+ for (unsigned int k = 0; k < num_planes; k++) {
+ num_active_pipes = num_active_pipes + (unsigned int)cfg_support_info->plane_support_info[k].dpps_used;
+ }
+
+#ifdef __DML_VBA_DEBUG__
+ dml2_printf("DML::%s: num_active_pipes = %d\n", __func__, num_active_pipes);
+#endif
+ return num_active_pipes;
+}
+
+void dml2_core_utils_pipe_plane_mapping(const struct core_display_cfg_support_info *cfg_support_info, unsigned int *pipe_plane)
+{
+ unsigned int pipe_idx = 0;
+
+ for (unsigned int k = 0; k < DML2_MAX_PLANES; ++k) {
+ pipe_plane[k] = __DML2_CALCS_PIPE_NO_PLANE__;
+ }
+
+ for (unsigned int plane_idx = 0; plane_idx < DML2_MAX_PLANES; plane_idx++) {
+ for (int i = 0; i < cfg_support_info->plane_support_info[plane_idx].dpps_used; i++) {
+ pipe_plane[pipe_idx] = plane_idx;
+ pipe_idx++;
+ }
+ }
+}
+
+bool dml2_core_utils_is_phantom_pipe(const struct dml2_plane_parameters *plane_cfg)
+{
+ bool is_phantom = false;
+
+ if (plane_cfg->overrides.legacy_svp_config == dml2_svp_mode_override_phantom_pipe ||
+ plane_cfg->overrides.legacy_svp_config == dml2_svp_mode_override_phantom_pipe_no_data_return) {
+ is_phantom = true;
+ }
+
+ return is_phantom;
+}
+
+unsigned int dml2_core_utils_get_tile_block_size_bytes(enum dml2_swizzle_mode sw_mode)
+{
+ switch (sw_mode) {
+ case (dml2_sw_linear):
+ return 256; break;
+ case (dml2_sw_256b_2d):
+ return 256; break;
+ case (dml2_sw_4kb_2d):
+ return 4096; break;
+ case (dml2_sw_64kb_2d):
+ return 65536; break;
+ case (dml2_sw_256kb_2d):
+ return 262144; break;
+ case (dml2_gfx11_sw_linear):
+ return 256; break;
+ case (dml2_gfx11_sw_64kb_d):
+ return 65536; break;
+ case (dml2_gfx11_sw_64kb_d_t):
+ return 65536; break;
+ case (dml2_gfx11_sw_64kb_d_x):
+ return 65536; break;
+ case (dml2_gfx11_sw_64kb_r_x):
+ return 65536; break;
+ case (dml2_gfx11_sw_256kb_d_x):
+ return 262144; break;
+ case (dml2_gfx11_sw_256kb_r_x):
+ return 262144; break;
+ default:
+ DML2_ASSERT(0);
+ return 256;
+ };
+}
+
+
+bool dml2_core_utils_is_vertical_rotation(enum dml2_rotation_angle Scan)
+{
+ bool is_vert = false;
+ if (Scan == dml2_rotation_90 || Scan == dml2_rotation_270) {
+ is_vert = true;
+ } else {
+ is_vert = false;
+ }
+ return is_vert;
+}
+
+
+int unsigned dml2_core_utils_get_gfx_version(enum dml2_swizzle_mode sw_mode)
+{
+ int unsigned version = 0;
+
+ if (sw_mode == dml2_sw_linear ||
+ sw_mode == dml2_sw_256b_2d ||
+ sw_mode == dml2_sw_4kb_2d ||
+ sw_mode == dml2_sw_64kb_2d ||
+ sw_mode == dml2_sw_256kb_2d) {
+ version = 12;
+ } else if (sw_mode == dml2_gfx11_sw_linear ||
+ sw_mode == dml2_gfx11_sw_64kb_d ||
+ sw_mode == dml2_gfx11_sw_64kb_d_t ||
+ sw_mode == dml2_gfx11_sw_64kb_d_x ||
+ sw_mode == dml2_gfx11_sw_64kb_r_x ||
+ sw_mode == dml2_gfx11_sw_256kb_d_x ||
+ sw_mode == dml2_gfx11_sw_256kb_r_x) {
+ version = 11;
+ } else {
+ dml2_printf("ERROR: Invalid sw_mode setting! val=%u\n", sw_mode);
+ DML2_ASSERT(0);
+ }
+
+ return version;
+}
+
+unsigned int dml2_core_utils_get_qos_param_index(unsigned long uclk_freq_khz, const struct dml2_dcn4_uclk_dpm_dependent_qos_params *per_uclk_dpm_params)
+{
+ unsigned int i;
+ unsigned int index = 0;
+
+ for (i = 0; i < DML_MAX_CLK_TABLE_SIZE; i++) {
+ dml2_printf("DML::%s: per_uclk_dpm_params[%d].minimum_uclk_khz = %d\n", __func__, i, per_uclk_dpm_params[i].minimum_uclk_khz);
+
+ if (i == 0)
+ index = 0;
+ else
+ index = i - 1;
+
+ if (uclk_freq_khz < per_uclk_dpm_params[i].minimum_uclk_khz ||
+ per_uclk_dpm_params[i].minimum_uclk_khz == 0) {
+ break;
+ }
+ }
+#if defined(__DML_VBA_DEBUG__)
+ dml2_printf("DML::%s: uclk_freq_khz = %d\n", __func__, uclk_freq_khz);
+ dml2_printf("DML::%s: index = %d\n", __func__, index);
+#endif
+ return index;
+}
+
+unsigned int dml2_core_utils_get_active_min_uclk_dpm_index(unsigned long uclk_freq_khz, const struct dml2_soc_state_table *clk_table)
+{
+ unsigned int i;
+ bool clk_entry_found = 0;
+
+ for (i = 0; i < clk_table->uclk.num_clk_values; i++) {
+ dml2_printf("DML::%s: clk_table.uclk.clk_values_khz[%d] = %d\n", __func__, i, clk_table->uclk.clk_values_khz[i]);
+
+ if (uclk_freq_khz == clk_table->uclk.clk_values_khz[i]) {
+ clk_entry_found = 1;
+ break;
+ }
+ }
+
+ dml2_assert(clk_entry_found);
+#if defined(__DML_VBA_DEBUG__)
+ dml2_printf("DML::%s: uclk_freq_khz = %ld\n", __func__, uclk_freq_khz);
+ dml2_printf("DML::%s: index = %d\n", __func__, i);
+#endif
+ return i;
+}
+
+bool dml2_core_utils_is_dual_plane(enum dml2_source_format_class source_format)
+{
+ bool ret_val = 0;
+
+ if ((source_format == dml2_420_12) || (source_format == dml2_420_8) || (source_format == dml2_420_10) || (source_format == dml2_rgbe_alpha))
+ ret_val = 1;
+
+ return ret_val;
+}
+
+unsigned int dml2_core_utils_log_and_substract_if_non_zero(unsigned int a, unsigned int subtrahend)
+{
+ if (a == 0)
+ return 0;
+
+ return (math_log2_approx(a) - subtrahend);
+}
+
+static void create_phantom_stream_from_main_stream(struct dml2_stream_parameters *phantom, const struct dml2_stream_parameters *main,
+ const struct dml2_implicit_svp_meta *meta)
+{
+ memcpy(phantom, main, sizeof(struct dml2_stream_parameters));
+
+ phantom->timing.v_total = meta->v_total;
+ phantom->timing.v_active = meta->v_active;
+ phantom->timing.v_front_porch = meta->v_front_porch;
+ phantom->timing.vblank_nom = phantom->timing.v_total - phantom->timing.v_active;
+ phantom->timing.drr_config.enabled = false;
+}
+
+static void create_phantom_plane_from_main_plane(struct dml2_plane_parameters *phantom, const struct dml2_plane_parameters *main,
+ const struct dml2_stream_parameters *phantom_stream, int phantom_stream_index, const struct dml2_stream_parameters *main_stream)
+{
+ memcpy(phantom, main, sizeof(struct dml2_plane_parameters));
+
+ phantom->stream_index = phantom_stream_index;
+ phantom->overrides.refresh_from_mall = dml2_refresh_from_mall_mode_override_force_disable;
+ phantom->overrides.legacy_svp_config = dml2_svp_mode_override_phantom_pipe_no_data_return;
+ phantom->composition.viewport.plane0.height = (long int unsigned) math_min2(math_ceil2(
+ (double)main->composition.scaler_info.plane0.v_ratio * (double)phantom_stream->timing.v_active, 16.0),
+ (double)main->composition.viewport.plane0.height);
+ phantom->composition.viewport.plane1.height = (long int unsigned) math_min2(math_ceil2(
+ (double)main->composition.scaler_info.plane1.v_ratio * (double)phantom_stream->timing.v_active, 16.0),
+ (double)main->composition.viewport.plane1.height);
+ phantom->immediate_flip = false;
+ phantom->dynamic_meta_data.enable = false;
+ phantom->cursor.num_cursors = 0;
+ phantom->cursor.cursor_width = 0;
+ phantom->tdlut.setup_for_tdlut = false;
+}
+
+void dml2_core_utils_expand_implict_subvp(const struct display_configuation_with_meta *display_cfg, struct dml2_display_cfg *svp_expanded_display_cfg,
+ struct dml2_core_scratch *scratch)
+{
+ unsigned int stream_index, plane_index;
+ const struct dml2_plane_parameters *main_plane;
+ const struct dml2_stream_parameters *main_stream;
+ const struct dml2_stream_parameters *phantom_stream;
+
+ memcpy(svp_expanded_display_cfg, &display_cfg->display_config, sizeof(struct dml2_display_cfg));
+ memset(scratch->main_stream_index_from_svp_stream_index, 0, sizeof(int) * DML2_MAX_PLANES);
+ memset(scratch->svp_stream_index_from_main_stream_index, 0, sizeof(int) * DML2_MAX_PLANES);
+ memset(scratch->main_plane_index_to_phantom_plane_index, 0, sizeof(int) * DML2_MAX_PLANES);
+
+ if (!display_cfg->display_config.overrides.enable_subvp_implicit_pmo)
+ return;
+
+ /* disable unbounded requesting for all planes until stage 3 has been performed */
+ if (!display_cfg->stage3.performed) {
+ svp_expanded_display_cfg->overrides.hw.force_unbounded_requesting.enable = true;
+ svp_expanded_display_cfg->overrides.hw.force_unbounded_requesting.value = false;
+ }
+ // Create the phantom streams
+ for (stream_index = 0; stream_index < display_cfg->display_config.num_streams; stream_index++) {
+ main_stream = &display_cfg->display_config.stream_descriptors[stream_index];
+ scratch->main_stream_index_from_svp_stream_index[stream_index] = stream_index;
+ scratch->svp_stream_index_from_main_stream_index[stream_index] = stream_index;
+
+ if (display_cfg->stage3.stream_svp_meta[stream_index].valid) {
+ // Create the phantom stream
+ create_phantom_stream_from_main_stream(&svp_expanded_display_cfg->stream_descriptors[svp_expanded_display_cfg->num_streams],
+ main_stream, &display_cfg->stage3.stream_svp_meta[stream_index]);
+
+ // Associate this phantom stream to the main stream
+ scratch->main_stream_index_from_svp_stream_index[svp_expanded_display_cfg->num_streams] = stream_index;
+ scratch->svp_stream_index_from_main_stream_index[stream_index] = svp_expanded_display_cfg->num_streams;
+
+ // Increment num streams
+ svp_expanded_display_cfg->num_streams++;
+ }
+ }
+
+ // Create the phantom planes
+ for (plane_index = 0; plane_index < display_cfg->display_config.num_planes; plane_index++) {
+ main_plane = &display_cfg->display_config.plane_descriptors[plane_index];
+
+ if (display_cfg->stage3.stream_svp_meta[main_plane->stream_index].valid) {
+ main_stream = &display_cfg->display_config.stream_descriptors[main_plane->stream_index];
+ phantom_stream = &svp_expanded_display_cfg->stream_descriptors[scratch->svp_stream_index_from_main_stream_index[main_plane->stream_index]];
+ create_phantom_plane_from_main_plane(&svp_expanded_display_cfg->plane_descriptors[svp_expanded_display_cfg->num_planes],
+ main_plane, phantom_stream, scratch->svp_stream_index_from_main_stream_index[main_plane->stream_index], main_stream);
+
+ // Associate this phantom plane to the main plane
+ scratch->phantom_plane_index_to_main_plane_index[svp_expanded_display_cfg->num_planes] = plane_index;
+ scratch->main_plane_index_to_phantom_plane_index[plane_index] = svp_expanded_display_cfg->num_planes;
+
+ // Increment num planes
+ svp_expanded_display_cfg->num_planes++;
+
+ // Adjust the main plane settings
+ svp_expanded_display_cfg->plane_descriptors[plane_index].overrides.legacy_svp_config = dml2_svp_mode_override_main_pipe;
+ }
+ }
+}
+
+bool dml2_core_utils_is_stream_encoder_required(const struct dml2_stream_parameters *stream_descriptor)
+{
+ switch (stream_descriptor->output.output_encoder) {
+ case dml2_dp:
+ case dml2_dp2p0:
+ case dml2_edp:
+ case dml2_hdmi:
+ case dml2_hdmifrl:
+ return true;
+ case dml2_none:
+ default:
+ return false;
+ }
+}
+bool dml2_core_utils_is_encoder_dsc_capable(const struct dml2_stream_parameters *stream_descriptor)
+{
+ switch (stream_descriptor->output.output_encoder) {
+ case dml2_dp:
+ case dml2_dp2p0:
+ case dml2_edp:
+ case dml2_hdmifrl:
+ return true;
+ case dml2_hdmi:
+ case dml2_none:
+ default:
+ return false;
+ }
+}
+
+
+bool dml2_core_utils_is_dio_dp_encoder(const struct dml2_stream_parameters *stream_descriptor)
+{
+ switch (stream_descriptor->output.output_encoder) {
+ case dml2_dp:
+ case dml2_edp:
+ return true;
+ case dml2_dp2p0:
+ case dml2_hdmi:
+ case dml2_hdmifrl:
+ case dml2_none:
+ default:
+ return false;
+ }
+}
+
+bool dml2_core_utils_is_hpo_dp_encoder(const struct dml2_stream_parameters *stream_descriptor)
+{
+ switch (stream_descriptor->output.output_encoder) {
+ case dml2_dp2p0:
+ return true;
+ case dml2_dp:
+ case dml2_edp:
+ case dml2_hdmi:
+ case dml2_hdmifrl:
+ case dml2_none:
+ default:
+ return false;
+ }
+}
+
+bool dml2_core_utils_is_dp_encoder(const struct dml2_stream_parameters *stream_descriptor)
+{
+ return dml2_core_utils_is_dio_dp_encoder(stream_descriptor)
+ || dml2_core_utils_is_hpo_dp_encoder(stream_descriptor);
+}
+
+
+bool dml2_core_utils_is_dp_8b_10b_link_rate(enum dml2_output_link_dp_rate rate)
+{
+ switch (rate) {
+ case dml2_dp_rate_hbr:
+ case dml2_dp_rate_hbr2:
+ case dml2_dp_rate_hbr3:
+ return true;
+ case dml2_dp_rate_na:
+ case dml2_dp_rate_uhbr10:
+ case dml2_dp_rate_uhbr13p5:
+ case dml2_dp_rate_uhbr20:
+ default:
+ return false;
+ }
+}
+
+bool dml2_core_utils_is_dp_128b_132b_link_rate(enum dml2_output_link_dp_rate rate)
+{
+ switch (rate) {
+ case dml2_dp_rate_uhbr10:
+ case dml2_dp_rate_uhbr13p5:
+ case dml2_dp_rate_uhbr20:
+ return true;
+ case dml2_dp_rate_hbr:
+ case dml2_dp_rate_hbr2:
+ case dml2_dp_rate_hbr3:
+ case dml2_dp_rate_na:
+ default:
+ return false;
+ }
+}
+
+bool dml2_core_utils_is_odm_split(enum dml2_odm_mode odm_mode)
+{
+ switch (odm_mode) {
+ case dml2_odm_mode_split_1to2:
+ case dml2_odm_mode_mso_1to2:
+ case dml2_odm_mode_mso_1to4:
+ return true;
+ case dml2_odm_mode_auto:
+ case dml2_odm_mode_bypass:
+ case dml2_odm_mode_combine_2to1:
+ case dml2_odm_mode_combine_3to1:
+ case dml2_odm_mode_combine_4to1:
+ default:
+ return false;
+ }
+}
diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_core/dml2_core_utils.h b/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_core/dml2_core_utils.h
new file mode 100644
index 000000000000..a5cc6a07167a
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_core/dml2_core_utils.h
@@ -0,0 +1,39 @@
+// SPDX-License-Identifier: MIT
+//
+// Copyright 2024 Advanced Micro Devices, Inc.
+
+#ifndef __DML2_CORE_UTILS_H__
+#define __DML2_CORE_UTILS_H__
+#include "dml2_internal_shared_types.h"
+#include "dml2_debug.h"
+#include "lib_float_math.h"
+
+double dml2_core_utils_div_rem(double dividend, unsigned int divisor, unsigned int *remainder);
+const char *dml2_core_utils_internal_bw_type_str(enum dml2_core_internal_bw_type bw_type);
+bool dml2_core_utils_is_420(enum dml2_source_format_class source_format);
+void dml2_core_utils_print_mode_support_info(const struct dml2_core_internal_mode_support_info *support, bool fail_only);
+const char *dml2_core_utils_internal_soc_state_type_str(enum dml2_core_internal_soc_state_type dml2_core_internal_soc_state_type);
+void dml2_core_utils_get_stream_output_bpp(double *out_bpp, const struct dml2_display_cfg *display_cfg);
+unsigned int dml2_core_utils_round_to_multiple(unsigned int num, unsigned int multiple, bool up);
+unsigned int dml2_core_util_get_num_active_pipes(int unsigned num_planes, const struct core_display_cfg_support_info *cfg_support_info);
+void dml2_core_utils_pipe_plane_mapping(const struct core_display_cfg_support_info *cfg_support_info, unsigned int *pipe_plane);
+bool dml2_core_utils_is_phantom_pipe(const struct dml2_plane_parameters *plane_cfg);
+unsigned int dml2_core_utils_get_tile_block_size_bytes(enum dml2_swizzle_mode sw_mode);
+bool dml2_core_utils_is_vertical_rotation(enum dml2_rotation_angle Scan);
+int unsigned dml2_core_utils_get_gfx_version(enum dml2_swizzle_mode sw_mode);
+unsigned int dml2_core_utils_get_qos_param_index(unsigned long uclk_freq_khz, const struct dml2_dcn4_uclk_dpm_dependent_qos_params *per_uclk_dpm_params);
+unsigned int dml2_core_utils_get_active_min_uclk_dpm_index(unsigned long uclk_freq_khz, const struct dml2_soc_state_table *clk_table);
+bool dml2_core_utils_is_dual_plane(enum dml2_source_format_class source_format);
+unsigned int dml2_core_utils_log_and_substract_if_non_zero(unsigned int a, unsigned int subtrahend);
+void dml2_core_utils_expand_implict_subvp(const struct display_configuation_with_meta *display_cfg, struct dml2_display_cfg *svp_expanded_display_cfg,
+ struct dml2_core_scratch *scratch);
+bool dml2_core_utils_is_stream_encoder_required(const struct dml2_stream_parameters *stream_descriptor);
+bool dml2_core_utils_is_encoder_dsc_capable(const struct dml2_stream_parameters *stream_descriptor);
+bool dml2_core_utils_is_dp_encoder(const struct dml2_stream_parameters *stream_descriptor);
+bool dml2_core_utils_is_dio_dp_encoder(const struct dml2_stream_parameters *stream_descriptor);
+bool dml2_core_utils_is_hpo_dp_encoder(const struct dml2_stream_parameters *stream_descriptor);
+bool dml2_core_utils_is_dp_8b_10b_link_rate(enum dml2_output_link_dp_rate rate);
+bool dml2_core_utils_is_dp_128b_132b_link_rate(enum dml2_output_link_dp_rate rate);
+bool dml2_core_utils_is_odm_split(enum dml2_odm_mode odm_mode);
+
+#endif /* __DML2_CORE_UTILS_H__ */
diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_dpmm/dml2_dpmm_dcn4.c b/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_dpmm/dml2_dpmm_dcn4.c
index c94c4f32c957..8869ea089312 100644
--- a/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_dpmm/dml2_dpmm_dcn4.c
+++ b/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_dpmm/dml2_dpmm_dcn4.c
@@ -2,7 +2,6 @@
//
// Copyright 2024 Advanced Micro Devices, Inc.
-
#include "dml2_dpmm_dcn4.h"
#include "dml2_internal_shared_types.h"
#include "dml_top_types.h"
@@ -83,9 +82,9 @@ static void calculate_system_active_minimums(struct dml2_dpmm_map_mode_to_soc_dp
get_minimum_clocks_for_latency(in_out, &min_uclk_latency, &min_fclk_latency, &min_dcfclk_latency);
- in_out->programming->min_clocks.dcn4.active.uclk_khz = dml_round_up(min_uclk_bw > min_uclk_latency ? min_uclk_bw : min_uclk_latency);
- in_out->programming->min_clocks.dcn4.active.fclk_khz = dml_round_up(min_fclk_bw > min_fclk_latency ? min_fclk_bw : min_fclk_latency);
- in_out->programming->min_clocks.dcn4.active.dcfclk_khz = dml_round_up(min_dcfclk_bw > min_dcfclk_latency ? min_dcfclk_bw : min_dcfclk_latency);
+ in_out->programming->min_clocks.dcn4x.active.uclk_khz = dml_round_up(min_uclk_bw > min_uclk_latency ? min_uclk_bw : min_uclk_latency);
+ in_out->programming->min_clocks.dcn4x.active.fclk_khz = dml_round_up(min_fclk_bw > min_fclk_latency ? min_fclk_bw : min_fclk_latency);
+ in_out->programming->min_clocks.dcn4x.active.dcfclk_khz = dml_round_up(min_dcfclk_bw > min_dcfclk_latency ? min_dcfclk_bw : min_dcfclk_latency);
}
static void calculate_svp_prefetch_minimums(struct dml2_dpmm_map_mode_to_soc_dpm_params_in_out *in_out)
@@ -123,9 +122,9 @@ static void calculate_svp_prefetch_minimums(struct dml2_dpmm_map_mode_to_soc_dpm
get_minimum_clocks_for_latency(in_out, &min_uclk_latency, &min_fclk_latency, &min_dcfclk_latency);
- in_out->programming->min_clocks.dcn4.svp_prefetch.uclk_khz = dml_round_up(min_uclk_bw > min_uclk_latency ? min_uclk_bw : min_uclk_latency);
- in_out->programming->min_clocks.dcn4.svp_prefetch.fclk_khz = dml_round_up(min_fclk_bw > min_fclk_latency ? min_fclk_bw : min_fclk_latency);
- in_out->programming->min_clocks.dcn4.svp_prefetch.dcfclk_khz = dml_round_up(min_dcfclk_bw > min_dcfclk_latency ? min_dcfclk_bw : min_dcfclk_latency);
+ in_out->programming->min_clocks.dcn4x.svp_prefetch.uclk_khz = dml_round_up(min_uclk_bw > min_uclk_latency ? min_uclk_bw : min_uclk_latency);
+ in_out->programming->min_clocks.dcn4x.svp_prefetch.fclk_khz = dml_round_up(min_fclk_bw > min_fclk_latency ? min_fclk_bw : min_fclk_latency);
+ in_out->programming->min_clocks.dcn4x.svp_prefetch.dcfclk_khz = dml_round_up(min_dcfclk_bw > min_dcfclk_latency ? min_dcfclk_bw : min_dcfclk_latency);
}
static void calculate_idle_minimums(struct dml2_dpmm_map_mode_to_soc_dpm_params_in_out *in_out)
@@ -147,9 +146,9 @@ static void calculate_idle_minimums(struct dml2_dpmm_map_mode_to_soc_dpm_params_
get_minimum_clocks_for_latency(in_out, &min_uclk_latency, &min_fclk_latency, &min_dcfclk_latency);
- in_out->programming->min_clocks.dcn4.idle.uclk_khz = dml_round_up(min_uclk_avg > min_uclk_latency ? min_uclk_avg : min_uclk_latency);
- in_out->programming->min_clocks.dcn4.idle.fclk_khz = dml_round_up(min_fclk_avg > min_fclk_latency ? min_fclk_avg : min_fclk_latency);
- in_out->programming->min_clocks.dcn4.idle.dcfclk_khz = dml_round_up(min_dcfclk_avg > min_dcfclk_latency ? min_dcfclk_avg : min_dcfclk_latency);
+ in_out->programming->min_clocks.dcn4x.idle.uclk_khz = dml_round_up(min_uclk_avg > min_uclk_latency ? min_uclk_avg : min_uclk_latency);
+ in_out->programming->min_clocks.dcn4x.idle.fclk_khz = dml_round_up(min_fclk_avg > min_fclk_latency ? min_fclk_avg : min_fclk_latency);
+ in_out->programming->min_clocks.dcn4x.idle.dcfclk_khz = dml_round_up(min_dcfclk_avg > min_dcfclk_latency ? min_dcfclk_avg : min_dcfclk_latency);
}
static bool add_margin_and_round_to_dfs_grainularity(double clock_khz, double margin, unsigned long vco_freq_khz, unsigned long *rounded_khz, uint32_t *divider_id)
@@ -204,6 +203,26 @@ static bool add_margin_and_round_to_dfs_grainularity(double clock_khz, double ma
return true;
}
+static bool round_to_non_dfs_granularity(unsigned long dispclk_khz, unsigned long dpprefclk_khz, unsigned long dtbrefclk_khz,
+ unsigned long *rounded_dispclk_khz, unsigned long *rounded_dpprefclk_khz, unsigned long *rounded_dtbrefclk_khz)
+{
+ unsigned long pll_frequency_khz;
+
+ pll_frequency_khz = (unsigned long) math_max2(600000, math_ceil2(math_max3(dispclk_khz, dpprefclk_khz, dtbrefclk_khz), 1000));
+
+ *rounded_dispclk_khz = pll_frequency_khz / (unsigned long) math_min2(pll_frequency_khz / dispclk_khz, 32);
+
+ *rounded_dpprefclk_khz = pll_frequency_khz / (unsigned long) math_min2(pll_frequency_khz / dpprefclk_khz, 32);
+
+ if (dtbrefclk_khz > 0) {
+ *rounded_dtbrefclk_khz = pll_frequency_khz / (unsigned long) math_min2(pll_frequency_khz / dtbrefclk_khz, 32);
+ } else {
+ *rounded_dtbrefclk_khz = 0;
+ }
+
+ return true;
+}
+
static bool round_up_and_copy_to_next_dpm(unsigned long min_value, unsigned long *rounded_value, const struct dml2_clk_table *clock_table)
{
bool result = false;
@@ -233,25 +252,25 @@ static bool map_soc_min_clocks_to_dpm_fine_grained(struct dml2_display_cfg_progr
{
bool result;
- result = round_up_to_next_dpm(&display_cfg->min_clocks.dcn4.active.dcfclk_khz, &state_table->dcfclk);
+ result = round_up_to_next_dpm(&display_cfg->min_clocks.dcn4x.active.dcfclk_khz, &state_table->dcfclk);
if (result)
- result = round_up_to_next_dpm(&display_cfg->min_clocks.dcn4.active.fclk_khz, &state_table->fclk);
+ result = round_up_to_next_dpm(&display_cfg->min_clocks.dcn4x.active.fclk_khz, &state_table->fclk);
if (result)
- result = round_up_to_next_dpm(&display_cfg->min_clocks.dcn4.active.uclk_khz, &state_table->uclk);
+ result = round_up_to_next_dpm(&display_cfg->min_clocks.dcn4x.active.uclk_khz, &state_table->uclk);
if (result)
- result = round_up_to_next_dpm(&display_cfg->min_clocks.dcn4.svp_prefetch.dcfclk_khz, &state_table->dcfclk);
+ result = round_up_to_next_dpm(&display_cfg->min_clocks.dcn4x.svp_prefetch.dcfclk_khz, &state_table->dcfclk);
if (result)
- result = round_up_to_next_dpm(&display_cfg->min_clocks.dcn4.svp_prefetch.fclk_khz, &state_table->fclk);
+ result = round_up_to_next_dpm(&display_cfg->min_clocks.dcn4x.svp_prefetch.fclk_khz, &state_table->fclk);
if (result)
- result = round_up_to_next_dpm(&display_cfg->min_clocks.dcn4.svp_prefetch.uclk_khz, &state_table->uclk);
+ result = round_up_to_next_dpm(&display_cfg->min_clocks.dcn4x.svp_prefetch.uclk_khz, &state_table->uclk);
if (result)
- result = round_up_to_next_dpm(&display_cfg->min_clocks.dcn4.idle.dcfclk_khz, &state_table->dcfclk);
+ result = round_up_to_next_dpm(&display_cfg->min_clocks.dcn4x.idle.dcfclk_khz, &state_table->dcfclk);
if (result)
- result = round_up_to_next_dpm(&display_cfg->min_clocks.dcn4.idle.fclk_khz, &state_table->fclk);
+ result = round_up_to_next_dpm(&display_cfg->min_clocks.dcn4x.idle.fclk_khz, &state_table->fclk);
if (result)
- result = round_up_to_next_dpm(&display_cfg->min_clocks.dcn4.idle.uclk_khz, &state_table->uclk);
+ result = round_up_to_next_dpm(&display_cfg->min_clocks.dcn4x.idle.uclk_khz, &state_table->uclk);
return result;
}
@@ -263,12 +282,12 @@ static bool map_soc_min_clocks_to_dpm_coarse_grained(struct dml2_display_cfg_pro
result = false;
for (index = 0; index < state_table->uclk.num_clk_values; index++) {
- if (display_cfg->min_clocks.dcn4.active.dcfclk_khz <= state_table->dcfclk.clk_values_khz[index] &&
- display_cfg->min_clocks.dcn4.active.fclk_khz <= state_table->fclk.clk_values_khz[index] &&
- display_cfg->min_clocks.dcn4.active.uclk_khz <= state_table->uclk.clk_values_khz[index]) {
- display_cfg->min_clocks.dcn4.active.dcfclk_khz = state_table->dcfclk.clk_values_khz[index];
- display_cfg->min_clocks.dcn4.active.fclk_khz = state_table->fclk.clk_values_khz[index];
- display_cfg->min_clocks.dcn4.active.uclk_khz = state_table->uclk.clk_values_khz[index];
+ if (display_cfg->min_clocks.dcn4x.active.dcfclk_khz <= state_table->dcfclk.clk_values_khz[index] &&
+ display_cfg->min_clocks.dcn4x.active.fclk_khz <= state_table->fclk.clk_values_khz[index] &&
+ display_cfg->min_clocks.dcn4x.active.uclk_khz <= state_table->uclk.clk_values_khz[index]) {
+ display_cfg->min_clocks.dcn4x.active.dcfclk_khz = state_table->dcfclk.clk_values_khz[index];
+ display_cfg->min_clocks.dcn4x.active.fclk_khz = state_table->fclk.clk_values_khz[index];
+ display_cfg->min_clocks.dcn4x.active.uclk_khz = state_table->uclk.clk_values_khz[index];
result = true;
break;
}
@@ -277,12 +296,12 @@ static bool map_soc_min_clocks_to_dpm_coarse_grained(struct dml2_display_cfg_pro
if (result) {
result = false;
for (index = 0; index < state_table->uclk.num_clk_values; index++) {
- if (display_cfg->min_clocks.dcn4.idle.dcfclk_khz <= state_table->dcfclk.clk_values_khz[index] &&
- display_cfg->min_clocks.dcn4.idle.fclk_khz <= state_table->fclk.clk_values_khz[index] &&
- display_cfg->min_clocks.dcn4.idle.uclk_khz <= state_table->uclk.clk_values_khz[index]) {
- display_cfg->min_clocks.dcn4.idle.dcfclk_khz = state_table->dcfclk.clk_values_khz[index];
- display_cfg->min_clocks.dcn4.idle.fclk_khz = state_table->fclk.clk_values_khz[index];
- display_cfg->min_clocks.dcn4.idle.uclk_khz = state_table->uclk.clk_values_khz[index];
+ if (display_cfg->min_clocks.dcn4x.idle.dcfclk_khz <= state_table->dcfclk.clk_values_khz[index] &&
+ display_cfg->min_clocks.dcn4x.idle.fclk_khz <= state_table->fclk.clk_values_khz[index] &&
+ display_cfg->min_clocks.dcn4x.idle.uclk_khz <= state_table->uclk.clk_values_khz[index]) {
+ display_cfg->min_clocks.dcn4x.idle.dcfclk_khz = state_table->dcfclk.clk_values_khz[index];
+ display_cfg->min_clocks.dcn4x.idle.fclk_khz = state_table->fclk.clk_values_khz[index];
+ display_cfg->min_clocks.dcn4x.idle.uclk_khz = state_table->uclk.clk_values_khz[index];
result = true;
break;
}
@@ -290,9 +309,9 @@ static bool map_soc_min_clocks_to_dpm_coarse_grained(struct dml2_display_cfg_pro
}
// SVP is not supported on any coarse grained SoCs
- display_cfg->min_clocks.dcn4.svp_prefetch.dcfclk_khz = 0;
- display_cfg->min_clocks.dcn4.svp_prefetch.fclk_khz = 0;
- display_cfg->min_clocks.dcn4.svp_prefetch.uclk_khz = 0;
+ display_cfg->min_clocks.dcn4x.svp_prefetch.dcfclk_khz = 0;
+ display_cfg->min_clocks.dcn4x.svp_prefetch.fclk_khz = 0;
+ display_cfg->min_clocks.dcn4x.svp_prefetch.uclk_khz = 0;
return result;
}
@@ -325,30 +344,30 @@ static bool map_min_clocks_to_dpm(const struct dml2_core_mode_support_result *mo
result = map_soc_min_clocks_to_dpm_coarse_grained(display_cfg, state_table);
if (result)
- result = round_up_to_next_dpm(&display_cfg->min_clocks.dcn4.dispclk_khz, &state_table->dispclk);
+ result = round_up_to_next_dpm(&display_cfg->min_clocks.dcn4x.dispclk_khz, &state_table->dispclk);
if (result)
- result = round_up_to_next_dpm(&display_cfg->min_clocks.dcn4.deepsleep_dcfclk_khz, &state_table->dcfclk);
+ result = round_up_to_next_dpm(&display_cfg->min_clocks.dcn4x.deepsleep_dcfclk_khz, &state_table->dcfclk);
for (i = 0; i < DML2_MAX_DCN_PIPES; i++) {
if (result)
- result = round_up_to_next_dpm(&display_cfg->plane_programming[i].min_clocks.dcn4.dppclk_khz, &state_table->dppclk);
+ result = round_up_to_next_dpm(&display_cfg->plane_programming[i].min_clocks.dcn4x.dppclk_khz, &state_table->dppclk);
}
for (i = 0; i < display_cfg->display_config.num_streams; i++) {
if (result)
- result = round_up_and_copy_to_next_dpm(mode_support_result->per_stream[i].dscclk_khz, &display_cfg->stream_programming[i].min_clocks.dcn4.dscclk_khz, &state_table->dscclk);
+ result = round_up_and_copy_to_next_dpm(mode_support_result->per_stream[i].dscclk_khz, &display_cfg->stream_programming[i].min_clocks.dcn4x.dscclk_khz, &state_table->dscclk);
if (result)
- result = round_up_and_copy_to_next_dpm(mode_support_result->per_stream[i].dtbclk_khz, &display_cfg->stream_programming[i].min_clocks.dcn4.dtbclk_khz, &state_table->dtbclk);
+ result = round_up_and_copy_to_next_dpm(mode_support_result->per_stream[i].dtbclk_khz, &display_cfg->stream_programming[i].min_clocks.dcn4x.dtbclk_khz, &state_table->dtbclk);
if (result)
- result = round_up_and_copy_to_next_dpm(mode_support_result->per_stream[i].phyclk_khz, &display_cfg->stream_programming[i].min_clocks.dcn4.phyclk_khz, &state_table->phyclk);
+ result = round_up_and_copy_to_next_dpm(mode_support_result->per_stream[i].phyclk_khz, &display_cfg->stream_programming[i].min_clocks.dcn4x.phyclk_khz, &state_table->phyclk);
}
if (result)
- result = round_up_to_next_dpm(&display_cfg->min_clocks.dcn4.dpprefclk_khz, &state_table->dppclk);
+ result = round_up_to_next_dpm(&display_cfg->min_clocks.dcn4x.dpprefclk_khz, &state_table->dppclk);
if (result)
- result = round_up_to_next_dpm(&display_cfg->min_clocks.dcn4.dtbrefclk_khz, &state_table->dtbclk);
+ result = round_up_to_next_dpm(&display_cfg->min_clocks.dcn4x.dtbrefclk_khz, &state_table->dtbclk);
return result;
}
@@ -516,15 +535,15 @@ static bool determine_power_management_features_with_fams(struct dml2_dpmm_map_m
static void clamp_uclk_to_max(struct dml2_dpmm_map_mode_to_soc_dpm_params_in_out *in_out)
{
- in_out->programming->min_clocks.dcn4.active.uclk_khz = in_out->soc_bb->clk_table.uclk.clk_values_khz[in_out->soc_bb->clk_table.uclk.num_clk_values - 1];
- in_out->programming->min_clocks.dcn4.svp_prefetch.uclk_khz = in_out->soc_bb->clk_table.uclk.clk_values_khz[in_out->soc_bb->clk_table.uclk.num_clk_values - 1];
- in_out->programming->min_clocks.dcn4.idle.uclk_khz = in_out->soc_bb->clk_table.uclk.clk_values_khz[in_out->soc_bb->clk_table.uclk.num_clk_values - 1];
+ in_out->programming->min_clocks.dcn4x.active.uclk_khz = in_out->soc_bb->clk_table.uclk.clk_values_khz[in_out->soc_bb->clk_table.uclk.num_clk_values - 1];
+ in_out->programming->min_clocks.dcn4x.svp_prefetch.uclk_khz = in_out->soc_bb->clk_table.uclk.clk_values_khz[in_out->soc_bb->clk_table.uclk.num_clk_values - 1];
+ in_out->programming->min_clocks.dcn4x.idle.uclk_khz = in_out->soc_bb->clk_table.uclk.clk_values_khz[in_out->soc_bb->clk_table.uclk.num_clk_values - 1];
}
static void clamp_fclk_to_max(struct dml2_dpmm_map_mode_to_soc_dpm_params_in_out *in_out)
{
- in_out->programming->min_clocks.dcn4.active.fclk_khz = in_out->soc_bb->clk_table.fclk.clk_values_khz[in_out->soc_bb->clk_table.fclk.num_clk_values - 1];
- in_out->programming->min_clocks.dcn4.idle.fclk_khz = in_out->soc_bb->clk_table.fclk.clk_values_khz[in_out->soc_bb->clk_table.fclk.num_clk_values - 1];
+ in_out->programming->min_clocks.dcn4x.active.fclk_khz = in_out->soc_bb->clk_table.fclk.clk_values_khz[in_out->soc_bb->clk_table.fclk.num_clk_values - 1];
+ in_out->programming->min_clocks.dcn4x.idle.fclk_khz = in_out->soc_bb->clk_table.fclk.clk_values_khz[in_out->soc_bb->clk_table.fclk.num_clk_values - 1];
}
static bool map_mode_to_soc_dpm(struct dml2_dpmm_map_mode_to_soc_dpm_params_in_out *in_out)
@@ -540,14 +559,14 @@ static bool map_mode_to_soc_dpm(struct dml2_dpmm_map_mode_to_soc_dpm_params_in_o
// In NV4, there's no support for FCLK or DCFCLK DPM change before SVP prefetch starts, therefore
// active minimums must be boosted to prefetch minimums
- if (in_out->programming->min_clocks.dcn4.svp_prefetch.uclk_khz > in_out->programming->min_clocks.dcn4.active.uclk_khz)
- in_out->programming->min_clocks.dcn4.active.uclk_khz = in_out->programming->min_clocks.dcn4.svp_prefetch.uclk_khz;
+ if (in_out->programming->min_clocks.dcn4x.svp_prefetch.uclk_khz > in_out->programming->min_clocks.dcn4x.active.uclk_khz)
+ in_out->programming->min_clocks.dcn4x.active.uclk_khz = in_out->programming->min_clocks.dcn4x.svp_prefetch.uclk_khz;
- if (in_out->programming->min_clocks.dcn4.svp_prefetch.fclk_khz > in_out->programming->min_clocks.dcn4.active.fclk_khz)
- in_out->programming->min_clocks.dcn4.active.fclk_khz = in_out->programming->min_clocks.dcn4.svp_prefetch.fclk_khz;
+ if (in_out->programming->min_clocks.dcn4x.svp_prefetch.fclk_khz > in_out->programming->min_clocks.dcn4x.active.fclk_khz)
+ in_out->programming->min_clocks.dcn4x.active.fclk_khz = in_out->programming->min_clocks.dcn4x.svp_prefetch.fclk_khz;
- if (in_out->programming->min_clocks.dcn4.svp_prefetch.dcfclk_khz > in_out->programming->min_clocks.dcn4.active.dcfclk_khz)
- in_out->programming->min_clocks.dcn4.active.dcfclk_khz = in_out->programming->min_clocks.dcn4.svp_prefetch.dcfclk_khz;
+ if (in_out->programming->min_clocks.dcn4x.svp_prefetch.dcfclk_khz > in_out->programming->min_clocks.dcn4x.active.dcfclk_khz)
+ in_out->programming->min_clocks.dcn4x.active.dcfclk_khz = in_out->programming->min_clocks.dcn4x.svp_prefetch.dcfclk_khz;
// need some massaging for the dispclk ramping cases:
dispclk_khz = mode_support_result->global.dispclk_khz * (1 + in_out->soc_bb->dcn_downspread_percent / 100.0) * (1.0 + in_out->ip->dispclk_ramp_margin_percent / 100.0);
@@ -556,34 +575,42 @@ static bool map_mode_to_soc_dpm(struct dml2_dpmm_map_mode_to_soc_dpm_params_in_o
// but still the required dispclk can be more than the maximum dispclk speed:
dispclk_khz = math_max2(dispclk_khz, mode_support_result->global.dispclk_khz * (1 + in_out->soc_bb->dcn_downspread_percent / 100.0));
- add_margin_and_round_to_dfs_grainularity(dispclk_khz, 0.0,
- (unsigned long)(in_out->soc_bb->dispclk_dppclk_vco_speed_mhz * 1000), &in_out->programming->min_clocks.dcn4.dispclk_khz, &in_out->programming->min_clocks.dcn4.divider_ids.dispclk_did);
-
// DPP Ref is always set to max of all DPP clocks
for (i = 0; i < DML2_MAX_DCN_PIPES; i++) {
- if (in_out->programming->min_clocks.dcn4.dpprefclk_khz < mode_support_result->per_plane[i].dppclk_khz)
- in_out->programming->min_clocks.dcn4.dpprefclk_khz = mode_support_result->per_plane[i].dppclk_khz;
+ if (in_out->programming->min_clocks.dcn4x.dpprefclk_khz < mode_support_result->per_plane[i].dppclk_khz)
+ in_out->programming->min_clocks.dcn4x.dpprefclk_khz = mode_support_result->per_plane[i].dppclk_khz;
}
+ in_out->programming->min_clocks.dcn4x.dpprefclk_khz = (unsigned long) (in_out->programming->min_clocks.dcn4x.dpprefclk_khz * (1 + in_out->soc_bb->dcn_downspread_percent / 100.0));
- add_margin_and_round_to_dfs_grainularity(in_out->programming->min_clocks.dcn4.dpprefclk_khz, in_out->soc_bb->dcn_downspread_percent / 100.0,
- (unsigned long)(in_out->soc_bb->dispclk_dppclk_vco_speed_mhz * 1000), &in_out->programming->min_clocks.dcn4.dpprefclk_khz, &in_out->programming->min_clocks.dcn4.divider_ids.dpprefclk_did);
-
+ // DTB Ref is always set to max of all DTB clocks
for (i = 0; i < DML2_MAX_DCN_PIPES; i++) {
- in_out->programming->plane_programming[i].min_clocks.dcn4.dppclk_khz = (unsigned long)(in_out->programming->min_clocks.dcn4.dpprefclk_khz / 255.0
- * math_ceil2(in_out->display_cfg->mode_support_result.per_plane[i].dppclk_khz * (1.0 + in_out->soc_bb->dcn_downspread_percent / 100.0) * 255.0 / in_out->programming->min_clocks.dcn4.dpprefclk_khz, 1.0));
+ if (in_out->programming->min_clocks.dcn4x.dtbrefclk_khz < mode_support_result->per_stream[i].dtbclk_khz)
+ in_out->programming->min_clocks.dcn4x.dtbrefclk_khz = mode_support_result->per_stream[i].dtbclk_khz;
}
+ in_out->programming->min_clocks.dcn4x.dtbrefclk_khz = (unsigned long)(in_out->programming->min_clocks.dcn4x.dtbrefclk_khz * (1 + in_out->soc_bb->dcn_downspread_percent / 100.0));
- // DTB Ref is always set to max of all DTB clocks
- for (i = 0; i < DML2_MAX_DCN_PIPES; i++) {
- if (in_out->programming->min_clocks.dcn4.dtbrefclk_khz < mode_support_result->per_stream[i].dtbclk_khz)
- in_out->programming->min_clocks.dcn4.dtbrefclk_khz = mode_support_result->per_stream[i].dtbclk_khz;
+ if (in_out->soc_bb->no_dfs) {
+ round_to_non_dfs_granularity((unsigned long)dispclk_khz, in_out->programming->min_clocks.dcn4x.dpprefclk_khz, in_out->programming->min_clocks.dcn4x.dtbrefclk_khz,
+ &in_out->programming->min_clocks.dcn4x.dispclk_khz, &in_out->programming->min_clocks.dcn4x.dpprefclk_khz, &in_out->programming->min_clocks.dcn4x.dtbrefclk_khz);
+ } else {
+ add_margin_and_round_to_dfs_grainularity(dispclk_khz, 0.0,
+ (unsigned long)(in_out->soc_bb->dispclk_dppclk_vco_speed_mhz * 1000), &in_out->programming->min_clocks.dcn4x.dispclk_khz, &in_out->programming->min_clocks.dcn4x.divider_ids.dispclk_did);
+
+ add_margin_and_round_to_dfs_grainularity(in_out->programming->min_clocks.dcn4x.dpprefclk_khz, 0.0,
+ (unsigned long)(in_out->soc_bb->dispclk_dppclk_vco_speed_mhz * 1000), &in_out->programming->min_clocks.dcn4x.dpprefclk_khz, &in_out->programming->min_clocks.dcn4x.divider_ids.dpprefclk_did);
+
+ add_margin_and_round_to_dfs_grainularity(in_out->programming->min_clocks.dcn4x.dtbrefclk_khz, 0.0,
+ (unsigned long)(in_out->soc_bb->dispclk_dppclk_vco_speed_mhz * 1000), &in_out->programming->min_clocks.dcn4x.dtbrefclk_khz, &in_out->programming->min_clocks.dcn4x.divider_ids.dtbrefclk_did);
}
- add_margin_and_round_to_dfs_grainularity(in_out->programming->min_clocks.dcn4.dtbrefclk_khz, in_out->soc_bb->dcn_downspread_percent / 100.0,
- (unsigned long)(in_out->soc_bb->dispclk_dppclk_vco_speed_mhz * 1000), &in_out->programming->min_clocks.dcn4.dtbrefclk_khz, &in_out->programming->min_clocks.dcn4.divider_ids.dtbrefclk_did);
- in_out->programming->min_clocks.dcn4.deepsleep_dcfclk_khz = mode_support_result->global.dcfclk_deepsleep_khz;
- in_out->programming->min_clocks.dcn4.socclk_khz = mode_support_result->global.socclk_khz;
+ for (i = 0; i < DML2_MAX_DCN_PIPES; i++) {
+ in_out->programming->plane_programming[i].min_clocks.dcn4x.dppclk_khz = (unsigned long)(in_out->programming->min_clocks.dcn4x.dpprefclk_khz / 255.0
+ * math_ceil2(in_out->display_cfg->mode_support_result.per_plane[i].dppclk_khz * (1.0 + in_out->soc_bb->dcn_downspread_percent / 100.0) * 255.0 / in_out->programming->min_clocks.dcn4x.dpprefclk_khz, 1.0));
+ }
+
+ in_out->programming->min_clocks.dcn4x.deepsleep_dcfclk_khz = mode_support_result->global.dcfclk_deepsleep_khz;
+ in_out->programming->min_clocks.dcn4x.socclk_khz = mode_support_result->global.socclk_khz;
result = map_min_clocks_to_dpm(mode_support_result, in_out->programming, &in_out->soc_bb->clk_table);
diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_dpmm/dml2_dpmm_dcn4.h b/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_dpmm/dml2_dpmm_dcn4.h
index 3afb69dfd040..b165c58dfd11 100644
--- a/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_dpmm/dml2_dpmm_dcn4.h
+++ b/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_dpmm/dml2_dpmm_dcn4.h
@@ -2,7 +2,6 @@
//
// Copyright 2024 Advanced Micro Devices, Inc.
-
#ifndef __DML2_DPMM_DCN4_H__
#define __DML2_DPMM_DCN4_H__
diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_dpmm/dml2_dpmm_factory.c b/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_dpmm/dml2_dpmm_factory.c
index 2c983daf2dad..3861bc6c9621 100644
--- a/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_dpmm/dml2_dpmm_factory.c
+++ b/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_dpmm/dml2_dpmm_factory.c
@@ -2,7 +2,6 @@
//
// Copyright 2024 Advanced Micro Devices, Inc.
-
#include "dml2_dpmm_factory.h"
#include "dml2_dpmm_dcn4.h"
#include "dml2_external_lib_deps.h"
@@ -21,7 +20,7 @@ bool dml2_dpmm_create(enum dml2_project_id project_id, struct dml2_dpmm_instance
{
bool result = false;
- if (!out)
+ if (out == 0)
return false;
memset(out, 0, sizeof(struct dml2_dpmm_instance));
diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_dpmm/dml2_dpmm_factory.h b/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_dpmm/dml2_dpmm_factory.h
index 80b44b4c2e68..20ba2e446f1d 100644
--- a/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_dpmm/dml2_dpmm_factory.h
+++ b/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_dpmm/dml2_dpmm_factory.h
@@ -2,7 +2,6 @@
//
// Copyright 2024 Advanced Micro Devices, Inc.
-
#ifndef __DML2_DPMM_FACTORY_H__
#define __DML2_DPMM_FACTORY_H__
diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_mcg/dml2_mcg_dcn4.c b/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_mcg/dml2_mcg_dcn4.c
index 5d8887ac766d..f4b1a7d02d42 100644
--- a/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_mcg/dml2_mcg_dcn4.c
+++ b/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_mcg/dml2_mcg_dcn4.c
@@ -2,7 +2,6 @@
//
// Copyright 2024 Advanced Micro Devices, Inc.
-
#include "dml2_mcg_dcn4.h"
#include "dml_top_soc_parameter_types.h"
diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_mcg/dml2_mcg_dcn4.h b/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_mcg/dml2_mcg_dcn4.h
index 19d178651435..02da6f45cbf7 100644
--- a/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_mcg/dml2_mcg_dcn4.h
+++ b/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_mcg/dml2_mcg_dcn4.h
@@ -2,7 +2,6 @@
//
// Copyright 2024 Advanced Micro Devices, Inc.
-
#ifndef __DML2_MCG_DCN4_H__
#define __DML2_MCG_DCN4_H__
diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_mcg/dml2_mcg_factory.c b/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_mcg/dml2_mcg_factory.c
index 55085b85f8ed..c60b8fe90819 100644
--- a/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_mcg/dml2_mcg_factory.c
+++ b/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_mcg/dml2_mcg_factory.c
@@ -2,7 +2,6 @@
//
// Copyright 2024 Advanced Micro Devices, Inc.
-
#include "dml2_mcg_factory.h"
#include "dml2_mcg_dcn4.h"
#include "dml2_external_lib_deps.h"
diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_mcg/dml2_mcg_factory.h b/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_mcg/dml2_mcg_factory.h
index 5dfdfed04e22..ad307deca3b0 100644
--- a/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_mcg/dml2_mcg_factory.h
+++ b/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_mcg/dml2_mcg_factory.h
@@ -2,7 +2,6 @@
//
// Copyright 2024 Advanced Micro Devices, Inc.
-
#ifndef __DML2_MCG_FACTORY_H__
#define __DML2_MCG_FACTORY_H__
diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_pmo/dml2_pmo_dcn3.c b/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_pmo/dml2_pmo_dcn3.c
index 671f9ac2627c..a31db5742675 100644
--- a/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_pmo/dml2_pmo_dcn3.c
+++ b/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_pmo/dml2_pmo_dcn3.c
@@ -2,22 +2,17 @@
//
// Copyright 2024 Advanced Micro Devices, Inc.
-
#include "dml2_pmo_factory.h"
#include "dml2_pmo_dcn3.h"
static void sort(double *list_a, int list_a_size)
{
- double temp;
// For all elements b[i] in list_b[]
for (int i = 0; i < list_a_size - 1; i++) {
// Find the first element of list_a that's larger than b[i]
for (int j = i; j < list_a_size - 1; j++) {
- if (list_a[j] > list_a[j + 1]) {
- temp = list_a[j];
- list_a[j] = list_a[j + 1];
- list_a[j + 1] = temp;
- }
+ if (list_a[j] > list_a[j + 1])
+ swap(list_a[j], list_a[j + 1]);
}
}
}
@@ -502,7 +497,6 @@ bool pmo_dcn3_optimize_dcc_mcache(struct dml2_pmo_optimize_dcc_mcache_in_out *in
in_out->cfg_support_info->plane_support_info[i].dpps_used)) {
result = false;
} else {
- free_pipes -= planes_on_stream;
break;
}
} else {
@@ -671,7 +665,7 @@ bool pmo_dcn3_optimize_for_pstate_support(struct dml2_pmo_optimize_for_pstate_su
struct dml2_pmo_instance *pmo = in_out->instance;
unsigned int stream_index;
bool success = false;
- bool reached_end = true;
+ bool reached_end;
memcpy(in_out->optimized_display_config, in_out->base_display_config, sizeof(struct display_configuation_with_meta));
diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_pmo/dml2_pmo_dcn3.h b/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_pmo/dml2_pmo_dcn3.h
index cc350f88d4d2..f00bd9e72a86 100644
--- a/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_pmo/dml2_pmo_dcn3.h
+++ b/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_pmo/dml2_pmo_dcn3.h
@@ -2,7 +2,6 @@
//
// Copyright 2024 Advanced Micro Devices, Inc.
-
#ifndef __DML2_PMO_DCN3_H__
#define __DML2_PMO_DCN3_H__
diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_pmo/dml2_pmo_dcn4.c b/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_pmo/dml2_pmo_dcn4.c
deleted file mode 100644
index 8952dd7e36cb..000000000000
--- a/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_pmo/dml2_pmo_dcn4.c
+++ /dev/null
@@ -1,1250 +0,0 @@
-// SPDX-License-Identifier: MIT
-//
-// Copyright 2024 Advanced Micro Devices, Inc.
-
-
-#include "dml2_pmo_factory.h"
-#include "dml2_pmo_dcn4.h"
-
-static const int MIN_VACTIVE_MARGIN_US = 100; // We need more than non-zero margin because DET buffer granularity can alter vactive latency hiding
-static const int SUBVP_DRR_MARGIN_US = 100;
-
-static const enum dml2_pmo_pstate_strategy full_strategy_list_1_display[][4] = {
- // VActive Preferred
- { dml2_pmo_pstate_strategy_vactive, dml2_pmo_pstate_strategy_na, dml2_pmo_pstate_strategy_na, dml2_pmo_pstate_strategy_na },
-
- // Then SVP
- { dml2_pmo_pstate_strategy_fw_svp, dml2_pmo_pstate_strategy_na, dml2_pmo_pstate_strategy_na, dml2_pmo_pstate_strategy_na },
-
- // Then VBlank
- { dml2_pmo_pstate_strategy_vblank, dml2_pmo_pstate_strategy_na, dml2_pmo_pstate_strategy_na, dml2_pmo_pstate_strategy_na },
-
- // Finally DRR
- { dml2_pmo_pstate_strategy_fw_drr, dml2_pmo_pstate_strategy_na, dml2_pmo_pstate_strategy_na, dml2_pmo_pstate_strategy_na },
-};
-
-static const int full_strategy_list_1_display_size = sizeof(full_strategy_list_1_display) / (sizeof(enum dml2_pmo_pstate_strategy) * 4);
-
-static const enum dml2_pmo_pstate_strategy full_strategy_list_2_display[][4] = {
- // VActive only is preferred
- { dml2_pmo_pstate_strategy_vactive, dml2_pmo_pstate_strategy_vactive, dml2_pmo_pstate_strategy_na, dml2_pmo_pstate_strategy_na },
-
- // Then VActive + VBlank
- { dml2_pmo_pstate_strategy_vactive, dml2_pmo_pstate_strategy_vblank, dml2_pmo_pstate_strategy_na, dml2_pmo_pstate_strategy_na },
- { dml2_pmo_pstate_strategy_vblank, dml2_pmo_pstate_strategy_vactive, dml2_pmo_pstate_strategy_na, dml2_pmo_pstate_strategy_na },
-
- // Then VBlank only
- { dml2_pmo_pstate_strategy_vblank, dml2_pmo_pstate_strategy_vblank, dml2_pmo_pstate_strategy_na, dml2_pmo_pstate_strategy_na },
-
- // Then SVP + VBlank
- { dml2_pmo_pstate_strategy_fw_svp, dml2_pmo_pstate_strategy_vblank, dml2_pmo_pstate_strategy_na, dml2_pmo_pstate_strategy_na },
- { dml2_pmo_pstate_strategy_vblank, dml2_pmo_pstate_strategy_fw_svp, dml2_pmo_pstate_strategy_na, dml2_pmo_pstate_strategy_na },
-
- // Then SVP + SVP
- { dml2_pmo_pstate_strategy_fw_svp, dml2_pmo_pstate_strategy_fw_svp, dml2_pmo_pstate_strategy_na, dml2_pmo_pstate_strategy_na },
-
- // Finally DRR + DRR
- { dml2_pmo_pstate_strategy_fw_drr, dml2_pmo_pstate_strategy_fw_drr, dml2_pmo_pstate_strategy_na, dml2_pmo_pstate_strategy_na },
-};
-
-static const int full_strategy_list_2_display_size = sizeof(full_strategy_list_2_display) / (sizeof(enum dml2_pmo_pstate_strategy) * 4);
-
-static const enum dml2_pmo_pstate_strategy full_strategy_list_3_display[][4] = {
- { dml2_pmo_pstate_strategy_vactive, dml2_pmo_pstate_strategy_vactive, dml2_pmo_pstate_strategy_vactive, dml2_pmo_pstate_strategy_na }, // All VActive
-
- { dml2_pmo_pstate_strategy_vblank, dml2_pmo_pstate_strategy_vactive, dml2_pmo_pstate_strategy_vactive, dml2_pmo_pstate_strategy_na }, // VActive + 1 VBlank
- { dml2_pmo_pstate_strategy_vactive, dml2_pmo_pstate_strategy_vblank, dml2_pmo_pstate_strategy_vactive, dml2_pmo_pstate_strategy_na },
- { dml2_pmo_pstate_strategy_vactive, dml2_pmo_pstate_strategy_vactive, dml2_pmo_pstate_strategy_vblank, dml2_pmo_pstate_strategy_na },
-
-// { dml2_pmo_pstate_strategy_vblank, dml2_pmo_pstate_strategy_vblank, dml2_pmo_pstate_strategy_vactive, dml2_pmo_pstate_strategy_na }, // VActive + 2 VBlank
-// { dml2_pmo_pstate_strategy_vblank, dml2_pmo_pstate_strategy_vactive, dml2_pmo_pstate_strategy_vblank, dml2_pmo_pstate_strategy_na },
-// { dml2_pmo_pstate_strategy_vactive, dml2_pmo_pstate_strategy_vblank, dml2_pmo_pstate_strategy_vblank, dml2_pmo_pstate_strategy_na },
-
-// { dml2_pmo_pstate_strategy_vactive, dml2_pmo_pstate_strategy_vblank, dml2_pmo_pstate_strategy_vblank, dml2_pmo_pstate_strategy_na }, // VActive + 3 VBlank
-// { dml2_pmo_pstate_strategy_vblank, dml2_pmo_pstate_strategy_vactive, dml2_pmo_pstate_strategy_vblank, dml2_pmo_pstate_strategy_na },
-// { dml2_pmo_pstate_strategy_vblank, dml2_pmo_pstate_strategy_vblank, dml2_pmo_pstate_strategy_vactive, dml2_pmo_pstate_strategy_na },
-
- { dml2_pmo_pstate_strategy_vblank, dml2_pmo_pstate_strategy_vblank, dml2_pmo_pstate_strategy_vblank, dml2_pmo_pstate_strategy_na }, // All VBlank
-
- { dml2_pmo_pstate_strategy_fw_drr, dml2_pmo_pstate_strategy_fw_drr, dml2_pmo_pstate_strategy_fw_drr, dml2_pmo_pstate_strategy_na }, // All DRR
-};
-
-static const int full_strategy_list_3_display_size = sizeof(full_strategy_list_3_display) / (sizeof(enum dml2_pmo_pstate_strategy) * 4);
-
-static const enum dml2_pmo_pstate_strategy full_strategy_list_4_display[][4] = {
- { dml2_pmo_pstate_strategy_vactive, dml2_pmo_pstate_strategy_vactive, dml2_pmo_pstate_strategy_vactive, dml2_pmo_pstate_strategy_vactive }, // All VActive
-
- { dml2_pmo_pstate_strategy_vblank, dml2_pmo_pstate_strategy_vactive, dml2_pmo_pstate_strategy_vactive, dml2_pmo_pstate_strategy_vactive }, // VActive + 1 VBlank
- { dml2_pmo_pstate_strategy_vactive, dml2_pmo_pstate_strategy_vblank, dml2_pmo_pstate_strategy_vactive, dml2_pmo_pstate_strategy_vactive },
- { dml2_pmo_pstate_strategy_vactive, dml2_pmo_pstate_strategy_vactive, dml2_pmo_pstate_strategy_vblank, dml2_pmo_pstate_strategy_vactive },
- { dml2_pmo_pstate_strategy_vactive, dml2_pmo_pstate_strategy_vactive, dml2_pmo_pstate_strategy_vactive, dml2_pmo_pstate_strategy_vblank },
-
-// { dml2_pmo_pstate_strategy_vblank, dml2_pmo_pstate_strategy_vblank, dml2_pmo_pstate_strategy_vactive, dml2_pmo_pstate_strategy_vactive }, // VActive + 2 VBlank
-// { dml2_pmo_pstate_strategy_vblank, dml2_pmo_pstate_strategy_vactive, dml2_pmo_pstate_strategy_vblank, dml2_pmo_pstate_strategy_vactive },
-// { dml2_pmo_pstate_strategy_vblank, dml2_pmo_pstate_strategy_vactive, dml2_pmo_pstate_strategy_vactive, dml2_pmo_pstate_strategy_vblank },
-// { dml2_pmo_pstate_strategy_vactive, dml2_pmo_pstate_strategy_vblank, dml2_pmo_pstate_strategy_vblank, dml2_pmo_pstate_strategy_vactive },
-// { dml2_pmo_pstate_strategy_vactive, dml2_pmo_pstate_strategy_vblank, dml2_pmo_pstate_strategy_vactive, dml2_pmo_pstate_strategy_vblank },
-// { dml2_pmo_pstate_strategy_vactive, dml2_pmo_pstate_strategy_vactive, dml2_pmo_pstate_strategy_vblank, dml2_pmo_pstate_strategy_vblank },
-
-// { dml2_pmo_pstate_strategy_vactive, dml2_pmo_pstate_strategy_vblank, dml2_pmo_pstate_strategy_vblank, dml2_pmo_pstate_strategy_vblank }, // VActive + 3 VBlank
-// { dml2_pmo_pstate_strategy_vblank, dml2_pmo_pstate_strategy_vactive, dml2_pmo_pstate_strategy_vblank, dml2_pmo_pstate_strategy_vblank },
-// { dml2_pmo_pstate_strategy_vblank, dml2_pmo_pstate_strategy_vblank, dml2_pmo_pstate_strategy_vactive, dml2_pmo_pstate_strategy_vblank },
-// { dml2_pmo_pstate_strategy_vblank, dml2_pmo_pstate_strategy_vblank, dml2_pmo_pstate_strategy_vblank, dml2_pmo_pstate_strategy_vactive },
-
- { dml2_pmo_pstate_strategy_vblank, dml2_pmo_pstate_strategy_vblank, dml2_pmo_pstate_strategy_vblank, dml2_pmo_pstate_strategy_vblank }, // All Vblank
-
- { dml2_pmo_pstate_strategy_fw_drr, dml2_pmo_pstate_strategy_fw_drr, dml2_pmo_pstate_strategy_fw_drr, dml2_pmo_pstate_strategy_fw_drr }, // All DRR
-};
-
-static const int full_strategy_list_4_display_size = sizeof(full_strategy_list_4_display) / (sizeof(enum dml2_pmo_pstate_strategy) * 4);
-
-static bool increase_odm_combine_factor(enum dml2_odm_mode *odm_mode, int odms_calculated)
-{
- bool result = true;
-
- if (*odm_mode == dml2_odm_mode_auto) {
- switch (odms_calculated) {
- case 1:
- *odm_mode = dml2_odm_mode_bypass;
- break;
- case 2:
- *odm_mode = dml2_odm_mode_combine_2to1;
- break;
- case 3:
- *odm_mode = dml2_odm_mode_combine_3to1;
- break;
- case 4:
- *odm_mode = dml2_odm_mode_combine_4to1;
- break;
- default:
- result = false;
- break;
- }
- }
-
- if (result) {
- if (*odm_mode == dml2_odm_mode_bypass) {
- *odm_mode = dml2_odm_mode_combine_2to1;
- } else if (*odm_mode == dml2_odm_mode_combine_2to1) {
- *odm_mode = dml2_odm_mode_combine_3to1;
- } else if (*odm_mode == dml2_odm_mode_combine_3to1) {
- *odm_mode = dml2_odm_mode_combine_4to1;
- } else {
- result = false;
- }
- }
-
- return result;
-}
-
-static bool increase_mpc_combine_factor(unsigned int *mpc_combine_factor, unsigned int limit)
-{
- if (*mpc_combine_factor < limit) {
- (*mpc_combine_factor)++;
- return true;
- }
-
- return false;
-}
-
-static int count_planes_with_stream_index(const struct dml2_display_cfg *display_cfg, unsigned int stream_index)
-{
- unsigned int i;
- int count;
-
- count = 0;
- for (i = 0; i < display_cfg->num_planes; i++) {
- if (display_cfg->plane_descriptors[i].stream_index == stream_index)
- count++;
- }
-
- return count;
-}
-
-static bool optimize_dcc_mcache_no_odm(struct dml2_pmo_optimize_dcc_mcache_in_out *in_out,
- int free_pipes)
-{
- struct dml2_pmo_instance *pmo = in_out->instance;
-
- unsigned int i;
- bool result = true;
-
- for (i = 0; i < in_out->optimized_display_cfg->num_planes; i++) {
- // For pipes that failed dcc mcache check, we want to increase the pipe count.
- // The logic for doing this depends on how many pipes is already being used,
- // and whether it's mpcc or odm combine.
- if (!in_out->dcc_mcache_supported[i]) {
- // For the general case of "n displays", we can only optimize streams with an ODM combine factor of 1
- if (in_out->cfg_support_info->stream_support_info[in_out->optimized_display_cfg->plane_descriptors[i].stream_index].odms_used == 1) {
- in_out->optimized_display_cfg->plane_descriptors[i].overrides.mpcc_combine_factor =
- in_out->cfg_support_info->plane_support_info[i].dpps_used;
- // For each plane that is not passing mcache validation, just add another pipe to it, up to the limit.
- if (free_pipes > 0) {
- if (!increase_mpc_combine_factor(&in_out->optimized_display_cfg->plane_descriptors[i].overrides.mpcc_combine_factor,
- pmo->mpc_combine_limit)) {
- // We've reached max pipes allocatable to a single plane, so we fail.
- result = false;
- break;
- } else {
- // Successfully added another pipe to this failing plane.
- free_pipes--;
- }
- } else {
- // No free pipes to add.
- result = false;
- break;
- }
- } else {
- // If the stream of this plane needs ODM combine, no further optimization can be done.
- result = false;
- break;
- }
- }
- }
-
- return result;
-}
-
-bool pmo_dcn4_optimize_dcc_mcache(struct dml2_pmo_optimize_dcc_mcache_in_out *in_out)
-{
- struct dml2_pmo_instance *pmo = in_out->instance;
-
- unsigned int i, used_pipes, free_pipes, planes_on_stream;
- bool result;
-
- if (in_out->display_config != in_out->optimized_display_cfg) {
- memcpy(in_out->optimized_display_cfg, in_out->display_config, sizeof(struct dml2_display_cfg));
- }
-
- //Count number of free pipes, and check if any odm combine is in use.
- used_pipes = 0;
- for (i = 0; i < in_out->optimized_display_cfg->num_planes; i++) {
- used_pipes += in_out->cfg_support_info->plane_support_info[i].dpps_used;
- }
- free_pipes = pmo->ip_caps->pipe_count - used_pipes;
-
- // Optimization loop
- // The goal here is to add more pipes to any planes
- // which are failing mcache admissibility
- result = true;
-
- // The optimization logic depends on whether ODM combine is enabled, and the stream count.
- if (in_out->optimized_display_cfg->num_streams > 1) {
- // If there are multiple streams, we are limited to only be able to optimize mcache failures on planes
- // which are not ODM combined.
-
- result = optimize_dcc_mcache_no_odm(in_out, free_pipes);
- } else if (in_out->optimized_display_cfg->num_streams == 1) {
- // In single stream cases, we still optimize mcache failures when there's ODM combine with some
- // additional logic.
-
- if (in_out->cfg_support_info->stream_support_info[0].odms_used > 1) {
- // If ODM combine is enabled, then the logic is to increase ODM combine factor.
-
- // Optimization for streams with > 1 ODM combine factor is only supported for single display.
- planes_on_stream = count_planes_with_stream_index(in_out->optimized_display_cfg, 0);
-
- for (i = 0; i < in_out->optimized_display_cfg->num_planes; i++) {
- // For pipes that failed dcc mcache check, we want to increase the pipe count.
- // The logic for doing this depends on how many pipes is already being used,
- // and whether it's mpcc or odm combine.
- if (!in_out->dcc_mcache_supported[i]) {
- // Increasing ODM combine factor on a stream requires a free pipe for each plane on the stream.
- if (free_pipes >= planes_on_stream) {
- if (!increase_odm_combine_factor(&in_out->optimized_display_cfg->stream_descriptors[i].overrides.odm_mode,
- in_out->cfg_support_info->plane_support_info[i].dpps_used)) {
- result = false;
- } else {
- free_pipes -= planes_on_stream;
- break;
- }
- } else {
- result = false;
- break;
- }
- }
- }
- } else {
- // If ODM combine is not enabled, then we can actually use the same logic as before.
-
- result = optimize_dcc_mcache_no_odm(in_out, free_pipes);
- }
- } else {
- result = true;
- }
-
- return result;
-}
-
-bool pmo_dcn4_initialize(struct dml2_pmo_initialize_in_out *in_out)
-{
- struct dml2_pmo_instance *pmo = in_out->instance;
-
- pmo->soc_bb = in_out->soc_bb;
- pmo->ip_caps = in_out->ip_caps;
- pmo->mpc_combine_limit = 2;
- pmo->odm_combine_limit = 4;
- pmo->mcg_clock_table_size = in_out->mcg_clock_table_size;
-
- pmo->fams_params.v1.subvp.fw_processing_delay_us = 10;
- pmo->fams_params.v1.subvp.prefetch_end_to_mall_start_us = 50;
- pmo->fams_params.v1.subvp.refresh_rate_limit_max = 175;
- pmo->fams_params.v1.subvp.refresh_rate_limit_min = 0;
-
- pmo->options = in_out->options;
-
- return true;
-}
-
-static bool is_h_timing_divisible_by(const struct dml2_timing_cfg *timing, unsigned char denominator)
-{
- /*
- * Htotal, Hblank start/end, and Hsync start/end all must be divisible
- * in order for the horizontal timing params to be considered divisible
- * by 2. Hsync start is always 0.
- */
- unsigned long h_blank_start = timing->h_total - timing->h_front_porch;
-
- return (timing->h_total % denominator == 0) &&
- (h_blank_start % denominator == 0) &&
- (timing->h_blank_end % denominator == 0) &&
- (timing->h_sync_width % denominator == 0);
-}
-
-static bool is_dp_encoder(enum dml2_output_encoder_class encoder_type)
-{
- switch (encoder_type) {
- case dml2_dp:
- case dml2_edp:
- case dml2_dp2p0:
- case dml2_none:
- return true;
- case dml2_hdmi:
- case dml2_hdmifrl:
- default:
- return false;
- }
-}
-
-bool pmo_dcn4_init_for_vmin(struct dml2_pmo_init_for_vmin_in_out *in_out)
-{
- unsigned int i;
- const struct dml2_display_cfg *display_config =
- &in_out->base_display_config->display_config;
- const struct dml2_core_mode_support_result *mode_support_result =
- &in_out->base_display_config->mode_support_result;
-
- if (in_out->instance->options->disable_dyn_odm ||
- (in_out->instance->options->disable_dyn_odm_for_multi_stream && display_config->num_streams > 1))
- return false;
-
- for (i = 0; i < display_config->num_planes; i++)
- /*
- * vmin optimization is required to be seamlessly switched off
- * at any time when the new configuration is no longer
- * supported. However switching from ODM combine to MPC combine
- * is not always seamless. When there not enough free pipes, we
- * will have to use the same secondary OPP heads as secondary
- * DPP pipes in MPC combine in new state. This transition is
- * expected to cause glitches. To avoid the transition, we only
- * allow vmin optimization if the stream's base configuration
- * doesn't require MPC combine. This condition checks if MPC
- * combine is enabled. If so do not optimize the stream.
- */
- if (mode_support_result->cfg_support_info.plane_support_info[i].dpps_used > 1 &&
- mode_support_result->cfg_support_info.stream_support_info[display_config->plane_descriptors[i].stream_index].odms_used == 1)
- in_out->base_display_config->stage4.unoptimizable_streams[display_config->plane_descriptors[i].stream_index] = true;
-
- for (i = 0; i < display_config->num_streams; i++) {
- if (display_config->stream_descriptors[i].overrides.disable_dynamic_odm)
- in_out->base_display_config->stage4.unoptimizable_streams[i] = true;
- else if (in_out->base_display_config->stage3.stream_svp_meta[i].valid &&
- in_out->instance->options->disable_dyn_odm_for_stream_with_svp)
- in_out->base_display_config->stage4.unoptimizable_streams[i] = true;
- /*
- * ODM Combine requires horizontal timing divisible by 2 so each
- * ODM segment has the same size.
- */
- else if (!is_h_timing_divisible_by(&display_config->stream_descriptors[i].timing, 2))
- in_out->base_display_config->stage4.unoptimizable_streams[i] = true;
- /*
- * Our hardware support seamless ODM transitions for DP encoders
- * only.
- */
- else if (!is_dp_encoder(display_config->stream_descriptors[i].output.output_encoder))
- in_out->base_display_config->stage4.unoptimizable_streams[i] = true;
- }
-
- return true;
-}
-
-bool pmo_dcn4_test_for_vmin(struct dml2_pmo_test_for_vmin_in_out *in_out)
-{
- bool is_vmin = true;
-
- if (in_out->vmin_limits->dispclk_khz > 0 &&
- in_out->display_config->mode_support_result.global.dispclk_khz > in_out->vmin_limits->dispclk_khz)
- is_vmin = false;
-
- return is_vmin;
-}
-
-static int find_highest_odm_load_stream_index(
- const struct dml2_display_cfg *display_config,
- const struct dml2_core_mode_support_result *mode_support_result)
-{
- unsigned int i;
- int odm_load, highest_odm_load = -1, highest_odm_load_index = -1;
-
- for (i = 0; i < display_config->num_streams; i++) {
- odm_load = display_config->stream_descriptors[i].timing.pixel_clock_khz
- / mode_support_result->cfg_support_info.stream_support_info[i].odms_used;
- if (odm_load > highest_odm_load) {
- highest_odm_load_index = i;
- highest_odm_load = odm_load;
- }
- }
-
- return highest_odm_load_index;
-}
-
-bool pmo_dcn4_optimize_for_vmin(struct dml2_pmo_optimize_for_vmin_in_out *in_out)
-{
- int stream_index;
- const struct dml2_display_cfg *display_config =
- &in_out->base_display_config->display_config;
- const struct dml2_core_mode_support_result *mode_support_result =
- &in_out->base_display_config->mode_support_result;
- unsigned int odms_used;
- struct dml2_stream_parameters *stream_descriptor;
- bool optimizable = false;
-
- /*
- * highest odm load stream must be optimizable to continue as dispclk is
- * bounded by it.
- */
- stream_index = find_highest_odm_load_stream_index(display_config,
- mode_support_result);
-
- if (stream_index < 0 ||
- in_out->base_display_config->stage4.unoptimizable_streams[stream_index])
- return false;
-
- odms_used = mode_support_result->cfg_support_info.stream_support_info[stream_index].odms_used;
- if ((int)odms_used >= in_out->instance->odm_combine_limit)
- return false;
-
- memcpy(in_out->optimized_display_config,
- in_out->base_display_config,
- sizeof(struct display_configuation_with_meta));
-
- stream_descriptor = &in_out->optimized_display_config->display_config.stream_descriptors[stream_index];
- while (!optimizable && increase_odm_combine_factor(
- &stream_descriptor->overrides.odm_mode,
- odms_used)) {
- switch (stream_descriptor->overrides.odm_mode) {
- case dml2_odm_mode_combine_2to1:
- optimizable = true;
- break;
- case dml2_odm_mode_combine_3to1:
- /*
- * In ODM Combine 3:1 OTG_valid_pixel rate is 1/4 of
- * actual pixel rate. Therefore horizontal timing must
- * be divisible by 4.
- */
- if (is_h_timing_divisible_by(&display_config->stream_descriptors[stream_index].timing, 4)) {
- if (mode_support_result->cfg_support_info.stream_support_info[stream_index].dsc_enable) {
- /*
- * DSC h slice count must be divisible
- * by 3.
- */
- if (mode_support_result->cfg_support_info.stream_support_info[stream_index].num_dsc_slices % 3 == 0)
- optimizable = true;
- } else {
- optimizable = true;
- }
- }
- break;
- case dml2_odm_mode_combine_4to1:
- /*
- * In ODM Combine 4:1 OTG_valid_pixel rate is 1/4 of
- * actual pixel rate. Therefore horizontal timing must
- * be divisible by 4.
- */
- if (is_h_timing_divisible_by(&display_config->stream_descriptors[stream_index].timing, 4)) {
- if (mode_support_result->cfg_support_info.stream_support_info[stream_index].dsc_enable) {
- /*
- * DSC h slice count must be divisible
- * by 4.
- */
- if (mode_support_result->cfg_support_info.stream_support_info[stream_index].num_dsc_slices % 4 == 0)
- optimizable = true;
- } else {
- optimizable = true;
- }
- }
- break;
- case dml2_odm_mode_auto:
- case dml2_odm_mode_bypass:
- case dml2_odm_mode_split_1to2:
- case dml2_odm_mode_mso_1to2:
- case dml2_odm_mode_mso_1to4:
- default:
- break;
- }
- }
-
- return optimizable;
-}
-
-static bool are_timings_trivially_synchronizable(const struct display_configuation_with_meta *display_config, int mask)
-{
- unsigned char i;
- bool identical = true;
- bool contains_drr = false;
- unsigned char remap_array[DML2_MAX_PLANES];
- unsigned char remap_array_size = 0;
-
- // Create a remap array to enable simple iteration through only masked stream indicies
- for (i = 0; i < display_config->display_config.num_streams; i++) {
- if (mask & (0x1 << i)) {
- remap_array[remap_array_size++] = i;
- }
- }
-
- // 0 or 1 display is always trivially synchronizable
- if (remap_array_size <= 1)
- return true;
-
- for (i = 1; i < remap_array_size; i++) {
- if (memcmp(&display_config->display_config.stream_descriptors[remap_array[i - 1]].timing,
- &display_config->display_config.stream_descriptors[remap_array[i]].timing,
- sizeof(struct dml2_timing_cfg))) {
- identical = false;
- break;
- }
- }
-
- for (i = 0; i < remap_array_size; i++) {
- if (display_config->display_config.stream_descriptors[remap_array[i]].timing.drr_config.enabled) {
- contains_drr = true;
- break;
- }
- }
-
- return !contains_drr && identical;
-}
-
-static void set_bit_in_bitfield(unsigned int *bit_field, unsigned int bit_offset)
-{
- *bit_field = *bit_field | (0x1 << bit_offset);
-}
-
-static bool is_bit_set_in_bitfield(unsigned int bit_field, unsigned int bit_offset)
-{
- if (bit_field & (0x1 << bit_offset))
- return true;
-
- return false;
-}
-
-static bool are_all_timings_drr_enabled(const struct display_configuation_with_meta *display_config, int mask)
-{
- unsigned char i;
- for (i = 0; i < DML2_MAX_PLANES; i++) {
- if (is_bit_set_in_bitfield(mask, i)) {
- if (!display_config->display_config.stream_descriptors[i].timing.drr_config.enabled)
- return false;
- }
- }
-
- return true;
-}
-
-static void insert_into_candidate_list(const enum dml2_pmo_pstate_strategy *per_stream_pstate_strategy, int stream_count, struct dml2_pmo_scratch *scratch)
-{
- int stream_index;
-
- scratch->pmo_dcn4.allow_state_increase_for_strategy[scratch->pmo_dcn4.num_pstate_candidates] = true;
-
- for (stream_index = 0; stream_index < stream_count; stream_index++) {
- scratch->pmo_dcn4.per_stream_pstate_strategy[scratch->pmo_dcn4.num_pstate_candidates][stream_index] = per_stream_pstate_strategy[stream_index];
-
- if (per_stream_pstate_strategy[stream_index] == dml2_pmo_pstate_strategy_vblank)
- scratch->pmo_dcn4.allow_state_increase_for_strategy[scratch->pmo_dcn4.num_pstate_candidates] = false;
- }
-
- scratch->pmo_dcn4.num_pstate_candidates++;
-}
-
-static bool all_planes_match_strategy(const struct display_configuation_with_meta *display_cfg, int plane_mask, enum dml2_pmo_pstate_strategy strategy)
-{
- unsigned char i;
- enum dml2_uclk_pstate_change_strategy matching_strategy = (enum dml2_uclk_pstate_change_strategy) dml2_pmo_pstate_strategy_na;
-
- if (strategy == dml2_pmo_pstate_strategy_vactive)
- matching_strategy = dml2_uclk_pstate_change_strategy_force_vactive;
- else if (strategy == dml2_pmo_pstate_strategy_vblank)
- matching_strategy = dml2_uclk_pstate_change_strategy_force_vblank;
- else if (strategy == dml2_pmo_pstate_strategy_fw_svp)
- matching_strategy = dml2_uclk_pstate_change_strategy_force_mall_svp;
- else if (strategy == dml2_pmo_pstate_strategy_fw_drr)
- matching_strategy = dml2_uclk_pstate_change_strategy_force_drr;
-
- for (i = 0; i < DML2_MAX_PLANES; i++) {
- if (is_bit_set_in_bitfield(plane_mask, i)) {
- if (display_cfg->display_config.plane_descriptors[i].overrides.uclk_pstate_change_strategy != dml2_uclk_pstate_change_strategy_auto &&
- display_cfg->display_config.plane_descriptors[i].overrides.uclk_pstate_change_strategy != matching_strategy)
- return false;
- }
- }
-
- return true;
-}
-
-static bool subvp_subvp_schedulable(struct dml2_pmo_instance *pmo, const struct display_configuation_with_meta *display_cfg,
- unsigned char *svp_stream_indicies, char svp_stream_count)
-{
- struct dml2_pmo_scratch *s = &pmo->scratch;
- int i;
- int microschedule_lines, time_us, refresh_hz;
- int max_microschedule_us = 0;
- int vactive1_us, vactive2_us, vblank1_us, vblank2_us;
-
- const struct dml2_timing_cfg *svp_timing1 = 0;
- const struct dml2_implicit_svp_meta *svp_meta1 = 0;
-
- const struct dml2_timing_cfg *svp_timing2 = 0;
-
- if (svp_stream_count <= 1)
- return true;
- else if (svp_stream_count > 2)
- return false;
-
- /* Loop to calculate the maximum microschedule time between the two SubVP pipes,
- * and also to store the two main SubVP pipe pointers in subvp_pipes[2].
- */
- for (i = 0; i < svp_stream_count; i++) {
- svp_timing1 = &display_cfg->display_config.stream_descriptors[svp_stream_indicies[i]].timing;
- svp_meta1 = &s->pmo_dcn4.stream_svp_meta[svp_stream_indicies[i]];
-
- microschedule_lines = svp_meta1->v_active;
-
- // Round up when calculating microschedule time (+ 1 at the end)
- time_us = (int)((microschedule_lines * svp_timing1->h_total) / (double)(svp_timing1->pixel_clock_khz * 1000) * 1000000 +
- pmo->fams_params.v1.subvp.prefetch_end_to_mall_start_us + pmo->fams_params.v1.subvp.fw_processing_delay_us + 1);
-
- if (time_us > max_microschedule_us)
- max_microschedule_us = time_us;
-
- refresh_hz = (int)((double)(svp_timing1->pixel_clock_khz * 1000) / (svp_timing1->v_total * svp_timing1->h_total));
-
- if (refresh_hz < pmo->fams_params.v1.subvp.refresh_rate_limit_min ||
- refresh_hz > pmo->fams_params.v1.subvp.refresh_rate_limit_max) {
- return false;
- }
- }
-
- svp_timing1 = &display_cfg->display_config.stream_descriptors[svp_stream_indicies[0]].timing;
- svp_meta1 = &s->pmo_dcn4.stream_svp_meta[svp_stream_indicies[0]];
-
- vactive1_us = (int)((svp_timing1->v_active * svp_timing1->h_total) / (double)(svp_timing1->pixel_clock_khz * 1000) * 1000000);
-
- vblank1_us = (int)(((svp_timing1->v_total - svp_timing1->v_active) * svp_timing1->h_total) / (double)(svp_timing1->pixel_clock_khz * 1000) * 1000000);
-
- svp_timing2 = &display_cfg->display_config.stream_descriptors[svp_stream_indicies[1]].timing;
-
- vactive2_us = (int)((svp_timing2->v_active * svp_timing2->h_total) / (double)(svp_timing2->pixel_clock_khz * 1000) * 1000000);
-
- vblank2_us = (int)(((svp_timing2->v_total - svp_timing2->v_active) * svp_timing2->h_total) / (double)(svp_timing2->pixel_clock_khz * 1000) * 1000000);
-
- if ((vactive1_us - vblank2_us) / 2 > max_microschedule_us &&
- (vactive2_us - vblank1_us) / 2 > max_microschedule_us)
- return true;
-
- return false;
-}
-
-static bool validate_svp_cofunctionality(struct dml2_pmo_instance *pmo,
- const struct display_configuation_with_meta *display_cfg, int svp_stream_mask)
-{
- bool result = false;
- unsigned char stream_index;
-
- unsigned char svp_stream_indicies[2] = { 0 };
- unsigned char svp_stream_count = 0;
-
- // Find the SVP streams, store only the first 2, but count all of them
- for (stream_index = 0; stream_index < display_cfg->display_config.num_streams; stream_index++) {
- if (is_bit_set_in_bitfield(svp_stream_mask, stream_index)) {
- if (svp_stream_count < 2)
- svp_stream_indicies[svp_stream_count] = stream_index;
-
- svp_stream_count++;
- }
- }
-
- if (svp_stream_count == 1) {
- result = true; // 1 SVP is always co_functional
- } else if (svp_stream_count == 2) {
- result = subvp_subvp_schedulable(pmo, display_cfg, svp_stream_indicies, svp_stream_count);
- }
-
- return result;
-}
-
-static bool validate_drr_cofunctionality(struct dml2_pmo_instance *pmo,
- const struct display_configuation_with_meta *display_cfg, int drr_stream_mask)
-{
- unsigned char stream_index;
- int drr_stream_count = 0;
-
- // Find the SVP streams and count all of them
- for (stream_index = 0; stream_index < display_cfg->display_config.num_streams; stream_index++) {
- if (is_bit_set_in_bitfield(drr_stream_mask, stream_index)) {
- drr_stream_count++;
- }
- }
-
- return drr_stream_count <= 4;
-}
-
-static bool validate_svp_drr_cofunctionality(struct dml2_pmo_instance *pmo,
- const struct display_configuation_with_meta *display_cfg, int svp_stream_mask, int drr_stream_mask)
-{
- unsigned char stream_index;
- int drr_stream_count = 0;
- int svp_stream_count = 0;
-
- int prefetch_us = 0;
- int mall_region_us = 0;
- int drr_frame_us = 0; // nominal frame time
- int subvp_active_us = 0;
- int stretched_drr_us = 0;
- int drr_stretched_vblank_us = 0;
- int max_vblank_mallregion = 0;
-
- const struct dml2_timing_cfg *svp_timing = 0;
- const struct dml2_timing_cfg *drr_timing = 0;
- const struct dml2_implicit_svp_meta *svp_meta = 0;
-
- bool schedulable = false;
-
- // Find the SVP streams and count all of them
- for (stream_index = 0; stream_index < display_cfg->display_config.num_streams; stream_index++) {
- if (is_bit_set_in_bitfield(svp_stream_mask, stream_index)) {
- svp_timing = &display_cfg->display_config.stream_descriptors[stream_index].timing;
- svp_meta = &pmo->scratch.pmo_dcn4.stream_svp_meta[stream_index];
- svp_stream_count++;
- }
- if (is_bit_set_in_bitfield(drr_stream_mask, stream_index)) {
- drr_timing = &display_cfg->display_config.stream_descriptors[stream_index].timing;
- drr_stream_count++;
- }
- }
-
- if (svp_stream_count == 1 && drr_stream_count == 1 && svp_timing != drr_timing) {
- prefetch_us = (int)((svp_meta->v_total - svp_meta->v_front_porch)
- * svp_timing->h_total / (double)(svp_timing->pixel_clock_khz * 1000) * 1000000 +
- pmo->fams_params.v1.subvp.prefetch_end_to_mall_start_us);
-
- subvp_active_us = (int)(svp_timing->v_active * svp_timing->h_total /
- (double)(svp_timing->pixel_clock_khz * 1000) * 1000000);
-
- drr_frame_us = (int)(drr_timing->v_total * drr_timing->h_total /
- (double)(drr_timing->pixel_clock_khz * 1000) * 1000000);
-
- // P-State allow width and FW delays already included phantom_timing->v_addressable
- mall_region_us = (int)(svp_meta->v_active * svp_timing->h_total /
- (double)(svp_timing->pixel_clock_khz * 1000) * 1000000);
-
- stretched_drr_us = drr_frame_us + mall_region_us + SUBVP_DRR_MARGIN_US;
-
- drr_stretched_vblank_us = (int)((drr_timing->v_total - drr_timing->v_active) * drr_timing->h_total /
- (double)(drr_timing->pixel_clock_khz * 1000) * 1000000 + (stretched_drr_us - drr_frame_us));
-
- max_vblank_mallregion = drr_stretched_vblank_us > mall_region_us ? drr_stretched_vblank_us : mall_region_us;
-
- /* We consider SubVP + DRR schedulable if the stretched frame duration of the DRR display (i.e. the
- * highest refresh rate + margin that can support UCLK P-State switch) passes the static analysis
- * for VBLANK: (VACTIVE region of the SubVP pipe can fit the MALL prefetch, VBLANK frame time,
- * and the max of (VBLANK blanking time, MALL region)).
- */
- if (stretched_drr_us < (1 / (double)drr_timing->drr_config.min_refresh_uhz) * 1000000 * 1000000 &&
- subvp_active_us - prefetch_us - stretched_drr_us - max_vblank_mallregion > 0)
- schedulable = true;
- }
-
- return schedulable;
-}
-
-static bool validate_svp_vblank_cofunctionality(struct dml2_pmo_instance *pmo,
- const struct display_configuation_with_meta *display_cfg, int svp_stream_mask, int vblank_stream_mask)
-{
- unsigned char stream_index;
- int vblank_stream_count = 0;
- int svp_stream_count = 0;
-
- const struct dml2_timing_cfg *svp_timing = 0;
- const struct dml2_timing_cfg *vblank_timing = 0;
- const struct dml2_implicit_svp_meta *svp_meta = 0;
-
- int prefetch_us = 0;
- int mall_region_us = 0;
- int vblank_frame_us = 0;
- int subvp_active_us = 0;
- int vblank_blank_us = 0;
- int max_vblank_mallregion = 0;
-
- bool schedulable = false;
-
- // Find the SVP streams and count all of them
- for (stream_index = 0; stream_index < display_cfg->display_config.num_streams; stream_index++) {
- if (is_bit_set_in_bitfield(svp_stream_mask, stream_index)) {
- svp_timing = &display_cfg->display_config.stream_descriptors[stream_index].timing;
- svp_meta = &pmo->scratch.pmo_dcn4.stream_svp_meta[stream_index];
- svp_stream_count++;
- }
- if (is_bit_set_in_bitfield(vblank_stream_mask, stream_index)) {
- vblank_timing = &display_cfg->display_config.stream_descriptors[stream_index].timing;
- vblank_stream_count++;
- }
- }
-
- if (svp_stream_count == 1 && vblank_stream_count > 0) {
- // Prefetch time is equal to VACTIVE + BP + VSYNC of the phantom pipe
- // Also include the prefetch end to mallstart delay time
- prefetch_us = (int)((svp_meta->v_total - svp_meta->v_front_porch) * svp_timing->h_total
- / (double)(svp_timing->pixel_clock_khz * 1000) * 1000000 +
- pmo->fams_params.v1.subvp.prefetch_end_to_mall_start_us);
-
- // P-State allow width and FW delays already included phantom_timing->v_addressable
- mall_region_us = (int)(svp_meta->v_active * svp_timing->h_total /
- (double)(svp_timing->pixel_clock_khz * 1000) * 1000000);
-
- vblank_frame_us = (int)(vblank_timing->v_total * vblank_timing->h_total /
- (double)(vblank_timing->pixel_clock_khz * 1000) * 1000000);
-
- vblank_blank_us = (int)((vblank_timing->v_total - vblank_timing->v_active) * vblank_timing->h_total /
- (double)(vblank_timing->pixel_clock_khz * 1000) * 1000000);
-
- subvp_active_us = (int)(svp_timing->v_active * svp_timing->h_total /
- (double)(svp_timing->pixel_clock_khz * 1000) * 1000000);
-
- max_vblank_mallregion = vblank_blank_us > mall_region_us ? vblank_blank_us : mall_region_us;
-
- // Schedulable if VACTIVE region of the SubVP pipe can fit the MALL prefetch, VBLANK frame time,
- // and the max of (VBLANK blanking time, MALL region)
- // TODO: Possibly add some margin (i.e. the below conditions should be [...] > X instead of [...] > 0)
- if (subvp_active_us - prefetch_us - vblank_frame_us - max_vblank_mallregion > 0)
- schedulable = true;
- }
- return schedulable;
-}
-
-static bool validate_drr_vblank_cofunctionality(struct dml2_pmo_instance *pmo,
- const struct display_configuation_with_meta *display_cfg, int drr_stream_mask, int vblank_stream_mask)
-{
- return false;
-}
-
-static bool validate_pstate_support_strategy_cofunctionality(struct dml2_pmo_instance *pmo,
- const struct display_configuation_with_meta *display_cfg, const enum dml2_pmo_pstate_strategy per_stream_pstate_strategy[4])
-{
- struct dml2_pmo_scratch *s = &pmo->scratch;
-
- unsigned char stream_index = 0;
-
- unsigned int svp_count = 0;
- unsigned int svp_stream_mask = 0;
- unsigned int drr_count = 0;
- unsigned int drr_stream_mask = 0;
- unsigned int vactive_count = 0;
- unsigned int vactive_stream_mask = 0;
- unsigned int vblank_count = 0;
- unsigned int vblank_stream_mask = 0;
-
- bool strategy_matches_forced_requirements = true;
-
- bool admissible = false;
-
- // Tabulate everything
- for (stream_index = 0; stream_index < display_cfg->display_config.num_streams; stream_index++) {
-
- if (!all_planes_match_strategy(display_cfg, s->pmo_dcn4.stream_plane_mask[stream_index],
- per_stream_pstate_strategy[stream_index])) {
- strategy_matches_forced_requirements = false;
- break;
- }
-
- if (per_stream_pstate_strategy[stream_index] == dml2_pmo_pstate_strategy_fw_svp) {
- svp_count++;
- set_bit_in_bitfield(&svp_stream_mask, stream_index);
- } else if (per_stream_pstate_strategy[stream_index] == dml2_pmo_pstate_strategy_fw_drr) {
- drr_count++;
- set_bit_in_bitfield(&drr_stream_mask, stream_index);
- } else if (per_stream_pstate_strategy[stream_index] == dml2_pmo_pstate_strategy_vactive) {
- vactive_count++;
- set_bit_in_bitfield(&vactive_stream_mask, stream_index);
- } else if (per_stream_pstate_strategy[stream_index] == dml2_pmo_pstate_strategy_vblank) {
- vblank_count++;
- set_bit_in_bitfield(&vblank_stream_mask, stream_index);
- }
- }
-
- if (!strategy_matches_forced_requirements)
- return false;
-
- // Check for trivial synchronization for vblank
- if (vblank_count > 0 && (pmo->options->disable_vblank || !are_timings_trivially_synchronizable(display_cfg, vblank_stream_mask)))
- return false;
-
- if (svp_count > 0 && pmo->options->disable_svp)
- return false;
-
- if (drr_count > 0 && (pmo->options->disable_drr_var || !are_all_timings_drr_enabled(display_cfg, drr_stream_mask)))
- return false;
-
- // Validate for FAMS admissibiliy
- if (svp_count == 0 && drr_count == 0) {
- // No FAMS
- admissible = true;
- } else {
- admissible = false;
- if (svp_count > 0 && drr_count == 0 && vactive_count == 0 && vblank_count == 0) {
- // All SVP
- admissible = validate_svp_cofunctionality(pmo, display_cfg, svp_stream_mask);
- } else if (svp_count == 0 && drr_count > 0 && vactive_count == 0 && vblank_count == 0) {
- // All DRR
- admissible = validate_drr_cofunctionality(pmo, display_cfg, drr_stream_mask);
- } else if (svp_count > 0 && drr_count > 0 && vactive_count == 0 && vblank_count == 0) {
- // SVP + DRR
- admissible = validate_svp_drr_cofunctionality(pmo, display_cfg, svp_stream_mask, drr_stream_mask);
- } else if (svp_count > 0 && drr_count == 0 && vactive_count == 0 && vblank_count > 0) {
- // SVP + VBlank
- admissible = validate_svp_vblank_cofunctionality(pmo, display_cfg, svp_stream_mask, vblank_stream_mask);
- } else if (svp_count == 0 && drr_count > 0 && vactive_count == 0 && vblank_count > 0) {
- // DRR + VBlank
- admissible = validate_drr_vblank_cofunctionality(pmo, display_cfg, drr_stream_mask, vblank_stream_mask);
- }
- }
-
- return admissible;
-}
-
-static int get_vactive_pstate_margin(const struct display_configuation_with_meta *display_cfg, int plane_mask)
-{
- unsigned char i;
- int min_vactive_margin_us = 0xFFFFFFF;
-
- for (i = 0; i < DML2_MAX_PLANES; i++) {
- if (is_bit_set_in_bitfield(plane_mask, i)) {
- if (display_cfg->mode_support_result.cfg_support_info.plane_support_info[i].dram_change_latency_hiding_margin_in_active < min_vactive_margin_us)
- min_vactive_margin_us = display_cfg->mode_support_result.cfg_support_info.plane_support_info[i].dram_change_latency_hiding_margin_in_active;
- }
- }
-
- return min_vactive_margin_us;
-}
-
-bool pmo_dcn4_init_for_pstate_support(struct dml2_pmo_init_for_pstate_support_in_out *in_out)
-{
- struct dml2_pmo_instance *pmo = in_out->instance;
- struct dml2_optimization_stage3_state *state = &in_out->base_display_config->stage3;
- struct dml2_pmo_scratch *s = &pmo->scratch;
-
- struct display_configuation_with_meta *display_config;
- const struct dml2_plane_parameters *plane_descriptor;
- const enum dml2_pmo_pstate_strategy (*strategy_list)[4] = 0;
- unsigned int strategy_list_size = 0;
- unsigned int plane_index, stream_index, i;
-
- state->performed = true;
-
- display_config = in_out->base_display_config;
- display_config->display_config.overrides.enable_subvp_implicit_pmo = true;
-
- memset(s, 0, sizeof(struct dml2_pmo_scratch));
-
- pmo->scratch.pmo_dcn4.min_latency_index = in_out->base_display_config->stage1.min_clk_index_for_latency;
- pmo->scratch.pmo_dcn4.max_latency_index = pmo->mcg_clock_table_size - 1;
- pmo->scratch.pmo_dcn4.cur_latency_index = in_out->base_display_config->stage1.min_clk_index_for_latency;
-
- // First build the stream plane mask (array of bitfields indexed by stream, indicating plane mapping)
- for (plane_index = 0; plane_index < display_config->display_config.num_planes; plane_index++) {
- plane_descriptor = &display_config->display_config.plane_descriptors[plane_index];
-
- set_bit_in_bitfield(&s->pmo_dcn4.stream_plane_mask[plane_descriptor->stream_index], plane_index);
-
- state->pstate_switch_modes[plane_index] = dml2_uclk_pstate_support_method_vactive;
- }
-
- // Figure out which streams can do vactive, and also build up implicit SVP meta
- for (stream_index = 0; stream_index < display_config->display_config.num_streams; stream_index++) {
- if (get_vactive_pstate_margin(display_config, s->pmo_dcn4.stream_plane_mask[stream_index]) >=
- MIN_VACTIVE_MARGIN_US)
- set_bit_in_bitfield(&s->pmo_dcn4.stream_vactive_capability_mask, stream_index);
-
- s->pmo_dcn4.stream_svp_meta[stream_index].valid = true;
- s->pmo_dcn4.stream_svp_meta[stream_index].v_active =
- display_config->mode_support_result.cfg_support_info.stream_support_info[stream_index].phantom_v_active;
- s->pmo_dcn4.stream_svp_meta[stream_index].v_total =
- display_config->mode_support_result.cfg_support_info.stream_support_info[stream_index].phantom_v_total;
- s->pmo_dcn4.stream_svp_meta[stream_index].v_front_porch = 1;
- }
-
- switch (display_config->display_config.num_streams) {
- case 1:
- strategy_list = full_strategy_list_1_display;
- strategy_list_size = full_strategy_list_1_display_size;
- break;
- case 2:
- strategy_list = full_strategy_list_2_display;
- strategy_list_size = full_strategy_list_2_display_size;
- break;
- case 3:
- strategy_list = full_strategy_list_3_display;
- strategy_list_size = full_strategy_list_3_display_size;
- break;
- case 4:
- strategy_list = full_strategy_list_4_display;
- strategy_list_size = full_strategy_list_4_display_size;
- break;
- default:
- strategy_list_size = 0;
- break;
- }
-
- if (strategy_list_size == 0)
- return false;
-
- s->pmo_dcn4.num_pstate_candidates = 0;
-
- for (i = 0; i < strategy_list_size && i < DML2_PMO_PSTATE_CANDIDATE_LIST_SIZE; i++) {
- if (validate_pstate_support_strategy_cofunctionality(pmo, display_config, strategy_list[i])) {
- insert_into_candidate_list(strategy_list[i], display_config->display_config.num_streams, s);
- }
- }
-
- if (s->pmo_dcn4.num_pstate_candidates > 0) {
- // There's this funny case...
- // If the first entry in the candidate list is all vactive, then we can consider it "tested", so the current index is 0
- // Otherwise the current index should be -1 because we run the optimization at least once
- s->pmo_dcn4.cur_pstate_candidate = 0;
- for (i = 0; i < display_config->display_config.num_streams; i++) {
- if (s->pmo_dcn4.per_stream_pstate_strategy[0][i] != dml2_pmo_pstate_strategy_vactive) {
- s->pmo_dcn4.cur_pstate_candidate = -1;
- break;
- }
- }
- return true;
- } else {
- return false;
- }
-}
-
-static void reset_display_configuration(struct display_configuation_with_meta *display_config)
-{
- unsigned int plane_index;
- unsigned int stream_index;
- struct dml2_plane_parameters *plane;
-
- for (stream_index = 0; stream_index < display_config->display_config.num_streams; stream_index++) {
- display_config->stage3.stream_svp_meta[stream_index].valid = false;
- }
-
- for (plane_index = 0; plane_index < display_config->display_config.num_planes; plane_index++) {
- plane = &display_config->display_config.plane_descriptors[plane_index];
-
- // Unset SubVP
- plane->overrides.legacy_svp_config = dml2_svp_mode_override_auto;
-
- // Remove reserve time
- plane->overrides.reserved_vblank_time_ns = 0;
-
- // Reset strategy to auto
- plane->overrides.uclk_pstate_change_strategy = dml2_uclk_pstate_change_strategy_auto;
-
- display_config->stage3.pstate_switch_modes[plane_index] = dml2_uclk_pstate_support_method_not_supported;
- }
-}
-
-static void setup_planes_for_drr_by_mask(struct display_configuation_with_meta *display_config, int plane_mask)
-{
- unsigned char plane_index;
- struct dml2_plane_parameters *plane;
-
- for (plane_index = 0; plane_index < display_config->display_config.num_planes; plane_index++) {
- if (is_bit_set_in_bitfield(plane_mask, plane_index)) {
- plane = &display_config->display_config.plane_descriptors[plane_index];
-
- // Setup DRR
- plane->overrides.uclk_pstate_change_strategy = dml2_uclk_pstate_change_strategy_force_drr;
-
- display_config->stage3.pstate_switch_modes[plane_index] = dml2_uclk_pstate_support_method_fw_drr;
- }
- }
-}
-
-static void setup_planes_for_svp_by_mask(struct display_configuation_with_meta *display_config, int plane_mask)
-{
- unsigned char plane_index;
- int stream_index = -1;
-
- for (plane_index = 0; plane_index < display_config->display_config.num_planes; plane_index++) {
- if (is_bit_set_in_bitfield(plane_mask, plane_index)) {
- stream_index = (char)display_config->display_config.plane_descriptors[plane_index].stream_index;
- display_config->stage3.pstate_switch_modes[plane_index] = dml2_uclk_pstate_support_method_fw_subvp_phantom;
- }
- }
-
- if (stream_index >= 0) {
- display_config->stage3.stream_svp_meta[stream_index].valid = true;
- display_config->stage3.stream_svp_meta[stream_index].v_active =
- display_config->mode_support_result.cfg_support_info.stream_support_info[stream_index].phantom_v_active;
- display_config->stage3.stream_svp_meta[stream_index].v_total =
- display_config->mode_support_result.cfg_support_info.stream_support_info[stream_index].phantom_v_total;
- display_config->stage3.stream_svp_meta[stream_index].v_front_porch = 1;
- }
-}
-
-static void setup_planes_for_vblank_by_mask(struct display_configuation_with_meta *display_config, int plane_mask)
-{
- unsigned char plane_index;
- struct dml2_plane_parameters *plane;
-
- for (plane_index = 0; plane_index < display_config->display_config.num_planes; plane_index++) {
- if (is_bit_set_in_bitfield(plane_mask, plane_index)) {
- plane = &display_config->display_config.plane_descriptors[plane_index];
-
- // Setup reserve time
- plane->overrides.reserved_vblank_time_ns = 400 * 1000;
-
- display_config->stage3.pstate_switch_modes[plane_index] = dml2_uclk_pstate_support_method_vblank;
- }
- }
-}
-
-static void setup_planes_for_vactive_by_mask(struct display_configuation_with_meta *display_config, int plane_mask)
-{
- unsigned char plane_index;
-
- for (plane_index = 0; plane_index < display_config->display_config.num_planes; plane_index++) {
- if (is_bit_set_in_bitfield(plane_mask, plane_index)) {
- display_config->stage3.pstate_switch_modes[plane_index] = dml2_uclk_pstate_support_method_vactive;
- }
- }
-}
-
-static bool setup_display_config(struct display_configuation_with_meta *display_config, struct dml2_pmo_scratch *scratch, int strategy_index)
-{
- bool success = true;
- unsigned char stream_index;
-
- reset_display_configuration(display_config);
-
- for (stream_index = 0; stream_index < display_config->display_config.num_streams; stream_index++) {
- if (scratch->pmo_dcn4.per_stream_pstate_strategy[strategy_index][stream_index] == dml2_pmo_pstate_strategy_na) {
- success = false;
- break;
- } else if (scratch->pmo_dcn4.per_stream_pstate_strategy[strategy_index][stream_index] == dml2_pmo_pstate_strategy_vblank) {
- setup_planes_for_vblank_by_mask(display_config, scratch->pmo_dcn4.stream_plane_mask[stream_index]);
- } else if (scratch->pmo_dcn4.per_stream_pstate_strategy[strategy_index][stream_index] == dml2_pmo_pstate_strategy_fw_svp) {
- setup_planes_for_svp_by_mask(display_config, scratch->pmo_dcn4.stream_plane_mask[stream_index]);
- } else if (scratch->pmo_dcn4.per_stream_pstate_strategy[strategy_index][stream_index] == dml2_pmo_pstate_strategy_fw_drr) {
- setup_planes_for_drr_by_mask(display_config, scratch->pmo_dcn4.stream_plane_mask[stream_index]);
- } else if (scratch->pmo_dcn4.per_stream_pstate_strategy[strategy_index][stream_index] == dml2_pmo_pstate_strategy_vactive) {
- setup_planes_for_vactive_by_mask(display_config, scratch->pmo_dcn4.stream_plane_mask[stream_index]);
- }
- }
-
- return success;
-}
-
-static int get_minimum_reserved_time_us_for_planes(struct display_configuation_with_meta *display_config, int plane_mask)
-{
- int min_time_us = 0xFFFFFF;
- unsigned char plane_index = 0;
-
- for (plane_index = 0; plane_index < display_config->display_config.num_planes; plane_index++) {
- if (is_bit_set_in_bitfield(plane_mask, plane_index)) {
- if (min_time_us > (display_config->display_config.plane_descriptors[plane_index].overrides.reserved_vblank_time_ns / 1000))
- min_time_us = display_config->display_config.plane_descriptors[plane_index].overrides.reserved_vblank_time_ns / 1000;
- }
- }
- return min_time_us;
-}
-
-bool pmo_dcn4_test_for_pstate_support(struct dml2_pmo_test_for_pstate_support_in_out *in_out)
-{
- bool p_state_supported = true;
- unsigned int stream_index;
- struct dml2_pmo_scratch *s = &in_out->instance->scratch;
-
- if (s->pmo_dcn4.cur_pstate_candidate < 0)
- return false;
-
- for (stream_index = 0; stream_index < in_out->base_display_config->display_config.num_streams; stream_index++) {
-
- if (s->pmo_dcn4.per_stream_pstate_strategy[s->pmo_dcn4.cur_pstate_candidate][stream_index] == dml2_pmo_pstate_strategy_vactive) {
- if (get_vactive_pstate_margin(in_out->base_display_config, s->pmo_dcn4.stream_plane_mask[stream_index]) < MIN_VACTIVE_MARGIN_US) {
- p_state_supported = false;
- break;
- }
- } else if (s->pmo_dcn4.per_stream_pstate_strategy[s->pmo_dcn4.cur_pstate_candidate][stream_index] == dml2_pmo_pstate_strategy_vblank) {
- if (get_minimum_reserved_time_us_for_planes(in_out->base_display_config, s->pmo_dcn4.stream_plane_mask[stream_index]) <
- in_out->instance->soc_bb->power_management_parameters.dram_clk_change_blackout_us) {
- p_state_supported = false;
- break;
- }
- } else if (s->pmo_dcn4.per_stream_pstate_strategy[s->pmo_dcn4.cur_pstate_candidate][stream_index] == dml2_pmo_pstate_strategy_fw_svp) {
- if (in_out->base_display_config->stage3.stream_svp_meta[stream_index].valid == false) {
- p_state_supported = false;
- break;
- }
- } else if (s->pmo_dcn4.per_stream_pstate_strategy[s->pmo_dcn4.cur_pstate_candidate][stream_index] == dml2_pmo_pstate_strategy_fw_drr) {
- if (!all_planes_match_strategy(in_out->base_display_config, s->pmo_dcn4.stream_plane_mask[stream_index], dml2_pmo_pstate_strategy_fw_drr)) {
- p_state_supported = false;
- break;
- }
- } else if (s->pmo_dcn4.per_stream_pstate_strategy[s->pmo_dcn4.cur_pstate_candidate][stream_index] == dml2_pmo_pstate_strategy_na) {
- p_state_supported = false;
- break;
- }
- }
-
- return p_state_supported;
-}
-
-bool pmo_dcn4_optimize_for_pstate_support(struct dml2_pmo_optimize_for_pstate_support_in_out *in_out)
-{
- bool success = false;
- struct dml2_pmo_scratch *s = &in_out->instance->scratch;
-
- memcpy(in_out->optimized_display_config, in_out->base_display_config, sizeof(struct display_configuation_with_meta));
-
- if (in_out->last_candidate_failed) {
- if (s->pmo_dcn4.allow_state_increase_for_strategy[s->pmo_dcn4.cur_pstate_candidate] &&
- s->pmo_dcn4.cur_latency_index < s->pmo_dcn4.max_latency_index) {
- s->pmo_dcn4.cur_latency_index++;
-
- success = true;
- }
- }
-
- if (!success) {
- s->pmo_dcn4.cur_latency_index = s->pmo_dcn4.min_latency_index;
- s->pmo_dcn4.cur_pstate_candidate++;
-
- if (s->pmo_dcn4.cur_pstate_candidate < s->pmo_dcn4.num_pstate_candidates) {
- success = true;
- }
- }
-
- if (success) {
- in_out->optimized_display_config->stage3.min_clk_index_for_latency = s->pmo_dcn4.cur_latency_index;
- setup_display_config(in_out->optimized_display_config, &in_out->instance->scratch, in_out->instance->scratch.pmo_dcn4.cur_pstate_candidate);
- }
-
- return success;
-}
diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_pmo/dml2_pmo_dcn4.h b/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_pmo/dml2_pmo_dcn4.h
deleted file mode 100644
index 09cacc933d21..000000000000
--- a/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_pmo/dml2_pmo_dcn4.h
+++ /dev/null
@@ -1,25 +0,0 @@
-// SPDX-License-Identifier: MIT
-//
-// Copyright 2024 Advanced Micro Devices, Inc.
-
-
-#ifndef __DML2_PMO_DCN4_H__
-#define __DML2_PMO_DCN4_H__
-
-#include "dml2_internal_shared_types.h"
-
-bool pmo_dcn4_initialize(struct dml2_pmo_initialize_in_out *in_out);
-
-bool pmo_dcn4_optimize_dcc_mcache(struct dml2_pmo_optimize_dcc_mcache_in_out *in_out);
-
-bool pmo_dcn4_init_for_vmin(struct dml2_pmo_init_for_vmin_in_out *in_out);
-bool pmo_dcn4_test_for_vmin(struct dml2_pmo_test_for_vmin_in_out *in_out);
-bool pmo_dcn4_optimize_for_vmin(struct dml2_pmo_optimize_for_vmin_in_out *in_out);
-
-bool pmo_dcn4_init_for_pstate_support(struct dml2_pmo_init_for_pstate_support_in_out *in_out);
-bool pmo_dcn4_test_for_pstate_support(struct dml2_pmo_test_for_pstate_support_in_out *in_out);
-bool pmo_dcn4_optimize_for_pstate_support(struct dml2_pmo_optimize_for_pstate_support_in_out *in_out);
-
-bool pmo_dcn4_unit_test(void);
-
-#endif
diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_pmo/dml2_pmo_dcn4_fams2.c b/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_pmo/dml2_pmo_dcn4_fams2.c
index 6547cc2c2a77..1cf9015e854a 100644
--- a/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_pmo/dml2_pmo_dcn4_fams2.c
+++ b/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_pmo/dml2_pmo_dcn4_fams2.c
@@ -1,122 +1,181 @@
-/*
-* Copyright 2022 Advanced Micro Devices, Inc.
-*
-* Permission is hereby granted, free of charge, to any person obtaining a
-* copy of this software and associated documentation files (the "Software"),
-* to deal in the Software without restriction, including without limitation
-* the rights to use, copy, modify, merge, publish, distribute, sublicense,
-* and/or sell copies of the Software, and to permit persons to whom the
-* Software is furnished to do so, subject to the following conditions:
-*
-* The above copyright notice and this permission notice shall be included in
-* all copies or substantial portions of the Software.
-*
-* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
-* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
-* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
-* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
-* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
-* OTHER DEALINGS IN THE SOFTWARE.
-*
-* Authors: AMD
-*
-*/
+// SPDX-License-Identifier: MIT
+//
+// Copyright 2024 Advanced Micro Devices, Inc.
#include "dml2_pmo_factory.h"
-#include "dml2_pmo_dcn4.h"
#include "dml2_debug.h"
#include "lib_float_math.h"
#include "dml2_pmo_dcn4_fams2.h"
static const double MIN_VACTIVE_MARGIN_PCT = 0.25; // We need more than non-zero margin because DET buffer granularity can alter vactive latency hiding
-static const enum dml2_pmo_pstate_strategy base_strategy_list_1_display[][PMO_DCN4_MAX_DISPLAYS] = {
+static const struct dml2_pmo_pstate_strategy base_strategy_list_1_display[] = {
// VActive Preferred
- { dml2_pmo_pstate_strategy_vactive, dml2_pmo_pstate_strategy_na, dml2_pmo_pstate_strategy_na, dml2_pmo_pstate_strategy_na },
+ {
+ .per_stream_pstate_method = { dml2_pmo_pstate_strategy_vactive, dml2_pmo_pstate_strategy_na, dml2_pmo_pstate_strategy_na, dml2_pmo_pstate_strategy_na },
+ .allow_state_increase = true,
+ },
// Then SVP
- { dml2_pmo_pstate_strategy_fw_svp, dml2_pmo_pstate_strategy_na, dml2_pmo_pstate_strategy_na, dml2_pmo_pstate_strategy_na },
+ {
+ .per_stream_pstate_method = { dml2_pmo_pstate_strategy_fw_svp, dml2_pmo_pstate_strategy_na, dml2_pmo_pstate_strategy_na, dml2_pmo_pstate_strategy_na },
+ .allow_state_increase = true,
+ },
// Then VBlank
- { dml2_pmo_pstate_strategy_vblank, dml2_pmo_pstate_strategy_na, dml2_pmo_pstate_strategy_na, dml2_pmo_pstate_strategy_na },
-
- // Finally DRR
- { dml2_pmo_pstate_strategy_fw_drr, dml2_pmo_pstate_strategy_na, dml2_pmo_pstate_strategy_na, dml2_pmo_pstate_strategy_na },
+ {
+ .per_stream_pstate_method = { dml2_pmo_pstate_strategy_vblank, dml2_pmo_pstate_strategy_na, dml2_pmo_pstate_strategy_na, dml2_pmo_pstate_strategy_na },
+ .allow_state_increase = false,
+ },
+
+ // Then DRR
+ {
+ .per_stream_pstate_method = { dml2_pmo_pstate_strategy_fw_drr, dml2_pmo_pstate_strategy_na, dml2_pmo_pstate_strategy_na, dml2_pmo_pstate_strategy_na },
+ .allow_state_increase = true,
+ },
+
+ // Finally VBlank, but allow base clocks for latency to increase
+ /*
+ {
+ .per_stream_pstate_method = { dml2_pmo_pstate_strategy_vblank, dml2_pmo_pstate_strategy_na, dml2_pmo_pstate_strategy_na, dml2_pmo_pstate_strategy_na },
+ .allow_state_increase = true,
+ },
+ */
};
-static const int base_strategy_list_1_display_size = sizeof(base_strategy_list_1_display) / (sizeof(enum dml2_pmo_pstate_strategy) * PMO_DCN4_MAX_DISPLAYS);
+static const int base_strategy_list_1_display_size = sizeof(base_strategy_list_1_display) / sizeof(struct dml2_pmo_pstate_strategy);
-static const enum dml2_pmo_pstate_strategy base_strategy_list_2_display[][PMO_DCN4_MAX_DISPLAYS] = {
+static const struct dml2_pmo_pstate_strategy base_strategy_list_2_display[] = {
// VActive only is preferred
- { dml2_pmo_pstate_strategy_vactive, dml2_pmo_pstate_strategy_vactive, dml2_pmo_pstate_strategy_na, dml2_pmo_pstate_strategy_na },
+ {
+ .per_stream_pstate_method = { dml2_pmo_pstate_strategy_vactive, dml2_pmo_pstate_strategy_vactive, dml2_pmo_pstate_strategy_na, dml2_pmo_pstate_strategy_na },
+ .allow_state_increase = true,
+ },
// Then VActive + VBlank
- { dml2_pmo_pstate_strategy_vactive, dml2_pmo_pstate_strategy_vblank, dml2_pmo_pstate_strategy_na, dml2_pmo_pstate_strategy_na },
+ {
+ .per_stream_pstate_method = { dml2_pmo_pstate_strategy_vactive, dml2_pmo_pstate_strategy_vblank, dml2_pmo_pstate_strategy_na, dml2_pmo_pstate_strategy_na },
+ .allow_state_increase = false,
+ },
// Then VBlank only
- { dml2_pmo_pstate_strategy_vblank, dml2_pmo_pstate_strategy_vblank, dml2_pmo_pstate_strategy_na, dml2_pmo_pstate_strategy_na },
+ {
+ .per_stream_pstate_method = { dml2_pmo_pstate_strategy_vblank, dml2_pmo_pstate_strategy_vblank, dml2_pmo_pstate_strategy_na, dml2_pmo_pstate_strategy_na },
+ .allow_state_increase = false,
+ },
// Then SVP + VBlank
- { dml2_pmo_pstate_strategy_fw_svp, dml2_pmo_pstate_strategy_vblank, dml2_pmo_pstate_strategy_na, dml2_pmo_pstate_strategy_na },
+ {
+ .per_stream_pstate_method = { dml2_pmo_pstate_strategy_fw_svp, dml2_pmo_pstate_strategy_vblank, dml2_pmo_pstate_strategy_na, dml2_pmo_pstate_strategy_na },
+ .allow_state_increase = false,
+ },
// Then SVP + DRR
- { dml2_pmo_pstate_strategy_fw_svp, dml2_pmo_pstate_strategy_fw_drr, dml2_pmo_pstate_strategy_na, dml2_pmo_pstate_strategy_na },
+ {
+ .per_stream_pstate_method = { dml2_pmo_pstate_strategy_fw_svp, dml2_pmo_pstate_strategy_fw_drr, dml2_pmo_pstate_strategy_na, dml2_pmo_pstate_strategy_na },
+ .allow_state_increase = true,
+ },
// Then SVP + SVP
- { dml2_pmo_pstate_strategy_fw_svp, dml2_pmo_pstate_strategy_fw_svp, dml2_pmo_pstate_strategy_na, dml2_pmo_pstate_strategy_na },
+ {
+ .per_stream_pstate_method = { dml2_pmo_pstate_strategy_fw_svp, dml2_pmo_pstate_strategy_fw_svp, dml2_pmo_pstate_strategy_na, dml2_pmo_pstate_strategy_na },
+ .allow_state_increase = true,
+ },
// Then DRR + VActive
- { dml2_pmo_pstate_strategy_vactive, dml2_pmo_pstate_strategy_fw_drr, dml2_pmo_pstate_strategy_na, dml2_pmo_pstate_strategy_na },
-
- // Then DRR + VBlank
- //{ dml2_pmo_pstate_strategy_fw_drr, dml2_pmo_pstate_strategy_vblank, dml2_pmo_pstate_strategy_na, dml2_pmo_pstate_strategy_na },
-
- // Finally DRR + DRR
- { dml2_pmo_pstate_strategy_fw_drr, dml2_pmo_pstate_strategy_fw_drr, dml2_pmo_pstate_strategy_na, dml2_pmo_pstate_strategy_na },
+ {
+ .per_stream_pstate_method = { dml2_pmo_pstate_strategy_vactive, dml2_pmo_pstate_strategy_fw_drr, dml2_pmo_pstate_strategy_na, dml2_pmo_pstate_strategy_na },
+ .allow_state_increase = true,
+ },
+
+ // Then DRR + DRR
+ {
+ .per_stream_pstate_method = { dml2_pmo_pstate_strategy_fw_drr, dml2_pmo_pstate_strategy_fw_drr, dml2_pmo_pstate_strategy_na, dml2_pmo_pstate_strategy_na },
+ .allow_state_increase = true,
+ },
+
+ // Finally VBlank, but allow base clocks for latency to increase
+ /*
+ {
+ .per_stream_pstate_method = { dml2_pmo_pstate_strategy_vblank, dml2_pmo_pstate_strategy_vblank, dml2_pmo_pstate_strategy_na, dml2_pmo_pstate_strategy_na },
+ .allow_state_increase = true,
+ },
+ */
};
-static const int base_strategy_list_2_display_size = sizeof(base_strategy_list_2_display) / (sizeof(enum dml2_pmo_pstate_strategy) * PMO_DCN4_MAX_DISPLAYS);
-
-static const enum dml2_pmo_pstate_strategy base_strategy_list_3_display[][PMO_DCN4_MAX_DISPLAYS] = {
- { dml2_pmo_pstate_strategy_vactive, dml2_pmo_pstate_strategy_vactive, dml2_pmo_pstate_strategy_vactive, dml2_pmo_pstate_strategy_na }, // All VActive
-
- { dml2_pmo_pstate_strategy_vactive, dml2_pmo_pstate_strategy_vactive, dml2_pmo_pstate_strategy_vblank, dml2_pmo_pstate_strategy_na }, // VActive + 1 VBlank
-
- //{ dml2_pmo_pstate_strategy_vblank, dml2_pmo_pstate_strategy_vblank, dml2_pmo_pstate_strategy_vactive, dml2_pmo_pstate_strategy_na }, // VActive + 2 VBlank
-
- { dml2_pmo_pstate_strategy_vblank, dml2_pmo_pstate_strategy_vblank, dml2_pmo_pstate_strategy_vblank, dml2_pmo_pstate_strategy_na }, // All VBlank
-
- //{ dml2_pmo_pstate_strategy_vblank, dml2_pmo_pstate_strategy_vblank, dml2_pmo_pstate_strategy_fw_drr, dml2_pmo_pstate_strategy_na }, // VBlank + 1 DRR
-
- //{ dml2_pmo_pstate_strategy_vblank, dml2_pmo_pstate_strategy_fw_drr, dml2_pmo_pstate_strategy_fw_drr, dml2_pmo_pstate_strategy_na }, // VBlank + 2 DRR
-
- { dml2_pmo_pstate_strategy_fw_drr, dml2_pmo_pstate_strategy_fw_drr, dml2_pmo_pstate_strategy_fw_drr, dml2_pmo_pstate_strategy_na }, // All DRR
+static const int base_strategy_list_2_display_size = sizeof(base_strategy_list_2_display) / sizeof(struct dml2_pmo_pstate_strategy);
+
+static const struct dml2_pmo_pstate_strategy base_strategy_list_3_display[] = {
+ // All VActive
+ {
+ .per_stream_pstate_method = { dml2_pmo_pstate_strategy_vactive, dml2_pmo_pstate_strategy_vactive, dml2_pmo_pstate_strategy_vactive, dml2_pmo_pstate_strategy_na },
+ .allow_state_increase = true,
+ },
+
+ // VActive + 1 VBlank
+ {
+ .per_stream_pstate_method = { dml2_pmo_pstate_strategy_vactive, dml2_pmo_pstate_strategy_vactive, dml2_pmo_pstate_strategy_vblank, dml2_pmo_pstate_strategy_na },
+ .allow_state_increase = false,
+ },
+
+ // All VBlank
+ {
+ .per_stream_pstate_method = { dml2_pmo_pstate_strategy_vblank, dml2_pmo_pstate_strategy_vblank, dml2_pmo_pstate_strategy_vblank, dml2_pmo_pstate_strategy_na },
+ .allow_state_increase = false,
+ },
+
+ // All DRR
+ {
+ .per_stream_pstate_method = { dml2_pmo_pstate_strategy_fw_drr, dml2_pmo_pstate_strategy_fw_drr, dml2_pmo_pstate_strategy_fw_drr, dml2_pmo_pstate_strategy_na },
+ .allow_state_increase = true,
+ },
+
+ // All VBlank, with state increase allowed
+ /*
+ {
+ .per_stream_pstate_method = { dml2_pmo_pstate_strategy_vblank, dml2_pmo_pstate_strategy_vblank, dml2_pmo_pstate_strategy_vblank, dml2_pmo_pstate_strategy_na },
+ .allow_state_increase = true,
+ },
+ */
};
-static const int base_strategy_list_3_display_size = sizeof(base_strategy_list_3_display) / (sizeof(enum dml2_pmo_pstate_strategy) * PMO_DCN4_MAX_DISPLAYS);
-
-static const enum dml2_pmo_pstate_strategy base_strategy_list_4_display[][PMO_DCN4_MAX_DISPLAYS] = {
- { dml2_pmo_pstate_strategy_vactive, dml2_pmo_pstate_strategy_vactive, dml2_pmo_pstate_strategy_vactive, dml2_pmo_pstate_strategy_vactive }, // All VActive
-
- { dml2_pmo_pstate_strategy_vactive, dml2_pmo_pstate_strategy_vactive, dml2_pmo_pstate_strategy_vactive, dml2_pmo_pstate_strategy_vblank }, // VActive + 1 VBlank
-
- //{ dml2_pmo_pstate_strategy_vactive, dml2_pmo_pstate_strategy_vactive, dml2_pmo_pstate_strategy_vblank, dml2_pmo_pstate_strategy_vblank }, // VActive + 2 VBlank
-
- //{ dml2_pmo_pstate_strategy_vactive, dml2_pmo_pstate_strategy_vblank, dml2_pmo_pstate_strategy_vblank, dml2_pmo_pstate_strategy_vblank }, // VActive + 3 VBlank
-
- { dml2_pmo_pstate_strategy_vblank, dml2_pmo_pstate_strategy_vblank, dml2_pmo_pstate_strategy_vblank, dml2_pmo_pstate_strategy_vblank }, // All Vblank
-
- //{ dml2_pmo_pstate_strategy_vblank, dml2_pmo_pstate_strategy_vblank, dml2_pmo_pstate_strategy_vblank, dml2_pmo_pstate_strategy_fw_drr }, // VBlank + 1 DRR
-
- //{ dml2_pmo_pstate_strategy_vblank, dml2_pmo_pstate_strategy_vblank, dml2_pmo_pstate_strategy_fw_drr, dml2_pmo_pstate_strategy_fw_drr }, // VBlank + 2 DRR
-
- //{ dml2_pmo_pstate_strategy_vblank, dml2_pmo_pstate_strategy_fw_drr, dml2_pmo_pstate_strategy_fw_drr, dml2_pmo_pstate_strategy_fw_drr }, // VBlank + 3 DRR
-
- { dml2_pmo_pstate_strategy_fw_drr, dml2_pmo_pstate_strategy_fw_drr, dml2_pmo_pstate_strategy_fw_drr, dml2_pmo_pstate_strategy_fw_drr }, // All DRR
+static const int base_strategy_list_3_display_size = sizeof(base_strategy_list_3_display) / sizeof(struct dml2_pmo_pstate_strategy);
+
+static const struct dml2_pmo_pstate_strategy base_strategy_list_4_display[] = {
+ // All VActive
+ {
+ .per_stream_pstate_method = { dml2_pmo_pstate_strategy_vactive, dml2_pmo_pstate_strategy_vactive, dml2_pmo_pstate_strategy_vactive, dml2_pmo_pstate_strategy_vactive },
+ .allow_state_increase = true,
+ },
+
+ // VActive + 1 VBlank
+ {
+ .per_stream_pstate_method = { dml2_pmo_pstate_strategy_vactive, dml2_pmo_pstate_strategy_vactive, dml2_pmo_pstate_strategy_vactive, dml2_pmo_pstate_strategy_vblank },
+ .allow_state_increase = false,
+ },
+
+ // All Vblank
+ {
+ .per_stream_pstate_method = { dml2_pmo_pstate_strategy_vblank, dml2_pmo_pstate_strategy_vblank, dml2_pmo_pstate_strategy_vblank, dml2_pmo_pstate_strategy_vblank },
+ .allow_state_increase = false,
+ },
+
+ // All DRR
+ {
+ .per_stream_pstate_method = { dml2_pmo_pstate_strategy_fw_drr, dml2_pmo_pstate_strategy_fw_drr, dml2_pmo_pstate_strategy_fw_drr, dml2_pmo_pstate_strategy_fw_drr },
+ .allow_state_increase = true,
+ },
+
+ // All VBlank, with state increase allowed
+ /*
+ {
+ .per_stream_pstate_method = { dml2_pmo_pstate_strategy_vblank, dml2_pmo_pstate_strategy_vblank, dml2_pmo_pstate_strategy_vblank, dml2_pmo_pstate_strategy_vblank },
+ .allow_state_increase = true,
+ },
+ */
};
-static const int base_strategy_list_4_display_size = sizeof(base_strategy_list_4_display) / (sizeof(enum dml2_pmo_pstate_strategy) * PMO_DCN4_MAX_DISPLAYS);
+static const int base_strategy_list_4_display_size = sizeof(base_strategy_list_4_display) / sizeof(struct dml2_pmo_pstate_strategy);
static bool increase_odm_combine_factor(enum dml2_odm_mode *odm_mode, int odms_calculated)
@@ -275,7 +334,6 @@ bool pmo_dcn4_fams2_optimize_dcc_mcache(struct dml2_pmo_optimize_dcc_mcache_in_o
in_out->cfg_support_info->plane_support_info[i].dpps_used)) {
result = false;
} else {
- free_pipes -= planes_on_stream;
break;
}
} else {
@@ -296,9 +354,9 @@ bool pmo_dcn4_fams2_optimize_dcc_mcache(struct dml2_pmo_optimize_dcc_mcache_in_o
return result;
}
-static enum dml2_pmo_pstate_strategy convert_strategy_to_drr_variant(const enum dml2_pmo_pstate_strategy base_strategy)
+static enum dml2_pmo_pstate_method convert_strategy_to_drr_variant(const enum dml2_pmo_pstate_method base_strategy)
{
- enum dml2_pmo_pstate_strategy variant_strategy = 0;
+ enum dml2_pmo_pstate_method variant_strategy = 0;
switch (base_strategy) {
case dml2_pmo_pstate_strategy_vactive:
@@ -327,11 +385,9 @@ static enum dml2_pmo_pstate_strategy convert_strategy_to_drr_variant(const enum
return variant_strategy;
}
-static enum dml2_pmo_pstate_strategy(*get_expanded_strategy_list(
- struct dml2_pmo_init_data *init_data,
- int stream_count))[PMO_DCN4_MAX_DISPLAYS]
+static struct dml2_pmo_pstate_strategy *get_expanded_strategy_list(struct dml2_pmo_init_data *init_data, int stream_count)
{
- enum dml2_pmo_pstate_strategy(*expanded_strategy_list)[PMO_DCN4_MAX_DISPLAYS] = NULL;
+ struct dml2_pmo_pstate_strategy *expanded_strategy_list = NULL;
switch (stream_count) {
case 1:
@@ -361,23 +417,23 @@ static unsigned int get_num_expanded_strategies(
}
static void insert_strategy_into_expanded_list(
- const enum dml2_pmo_pstate_strategy per_stream_pstate_strategy[PMO_DCN4_MAX_DISPLAYS],
+ const struct dml2_pmo_pstate_strategy *per_stream_pstate_strategy,
int stream_count,
struct dml2_pmo_init_data *init_data)
{
- enum dml2_pmo_pstate_strategy(*expanded_strategy_list)[PMO_DCN4_MAX_DISPLAYS] = NULL;
+ struct dml2_pmo_pstate_strategy *expanded_strategy_list = NULL;
expanded_strategy_list = get_expanded_strategy_list(init_data, stream_count);
if (expanded_strategy_list) {
- memcpy(&expanded_strategy_list[init_data->pmo_dcn4.num_expanded_strategies_per_list[stream_count - 1]++],
- per_stream_pstate_strategy,
- sizeof(enum dml2_pmo_pstate_strategy) * PMO_DCN4_MAX_DISPLAYS);
+ memcpy(&expanded_strategy_list[init_data->pmo_dcn4.num_expanded_strategies_per_list[stream_count - 1]], per_stream_pstate_strategy, sizeof(struct dml2_pmo_pstate_strategy));
+
+ init_data->pmo_dcn4.num_expanded_strategies_per_list[stream_count - 1]++;
}
}
static void expand_base_strategy(struct dml2_pmo_instance *pmo,
- const enum dml2_pmo_pstate_strategy base_strategy_list[PMO_DCN4_MAX_DISPLAYS],
+ const struct dml2_pmo_pstate_strategy *base_strategy,
unsigned int stream_count)
{
bool skip_to_next_stream;
@@ -386,19 +442,21 @@ static void expand_base_strategy(struct dml2_pmo_instance *pmo,
unsigned int i, j;
unsigned int num_streams_per_method[PMO_DCN4_MAX_DISPLAYS] = { 0 };
unsigned int stream_iteration_indices[PMO_DCN4_MAX_DISPLAYS] = { 0 };
- enum dml2_pmo_pstate_strategy cur_strategy_list[PMO_DCN4_MAX_DISPLAYS] = { 0 };
+ struct dml2_pmo_pstate_strategy cur_strategy_list = { 0 };
/* determine number of displays per method */
for (i = 0; i < stream_count; i++) {
/* increment the count of the earliest index with the same method */
for (j = 0; j < stream_count; j++) {
- if (base_strategy_list[i] == base_strategy_list[j]) {
+ if (base_strategy->per_stream_pstate_method[i] == base_strategy->per_stream_pstate_method[j]) {
num_streams_per_method[j] = num_streams_per_method[j] + 1;
break;
}
}
}
+ cur_strategy_list.allow_state_increase = base_strategy->allow_state_increase;
+
i = 0;
/* uses a while loop instead of recursion to build permutations of base strategy */
while (stream_iteration_indices[0] < stream_count) {
@@ -409,12 +467,12 @@ static void expand_base_strategy(struct dml2_pmo_instance *pmo,
/* determine what to do for this iteration */
if (stream_iteration_indices[i] < stream_count && num_streams_per_method[stream_iteration_indices[i]] != 0) {
/* decrement count and assign method */
- cur_strategy_list[i] = base_strategy_list[stream_iteration_indices[i]];
+ cur_strategy_list.per_stream_pstate_method[i] = base_strategy->per_stream_pstate_method[stream_iteration_indices[i]];
num_streams_per_method[stream_iteration_indices[i]] -= 1;
if (i >= stream_count - 1) {
/* insert into strategy list */
- insert_strategy_into_expanded_list(cur_strategy_list, stream_count, &pmo->init_data);
+ insert_strategy_into_expanded_list(&cur_strategy_list, stream_count, &pmo->init_data);
expanded_strategy_added = true;
} else {
/* skip to next stream */
@@ -450,55 +508,122 @@ static void expand_base_strategy(struct dml2_pmo_instance *pmo,
}
}
-static void expand_variant_strategy(struct dml2_pmo_instance *pmo,
- const enum dml2_pmo_pstate_strategy base_strategy_list[PMO_DCN4_MAX_DISPLAYS],
+
+static bool is_variant_method_valid(const struct dml2_pmo_pstate_strategy *base_strategy,
+ const struct dml2_pmo_pstate_strategy *variant_strategy,
+ unsigned int num_streams_per_base_method[PMO_DCN4_MAX_DISPLAYS],
+ unsigned int num_streams_per_variant_method[PMO_DCN4_MAX_DISPLAYS],
unsigned int stream_count)
{
+ bool valid = true;
unsigned int i;
- bool variant_found = false;
- enum dml2_pmo_pstate_strategy cur_strategy_list[PMO_DCN4_MAX_DISPLAYS] = { 0 };
+ /* check all restrictions are met */
+ for (i = 0; i < stream_count; i++) {
+ /* vblank + vblank_drr variants are invalid */
+ if (base_strategy->per_stream_pstate_method[i] == dml2_pmo_pstate_strategy_vblank &&
+ ((num_streams_per_base_method[i] > 0 && num_streams_per_variant_method[i] > 0) ||
+ num_streams_per_variant_method[i] > 1)) {
+ valid = false;
+ break;
+ }
+ }
- /* setup variant list as base to start */
- memcpy(cur_strategy_list, base_strategy_list, sizeof(enum dml2_pmo_pstate_strategy) * PMO_DCN4_MAX_DISPLAYS);
+ return valid;
+}
+
+static void expand_variant_strategy(struct dml2_pmo_instance *pmo,
+ const struct dml2_pmo_pstate_strategy *base_strategy,
+ unsigned int stream_count)
+{
+ bool variant_found;
+ unsigned int i, j;
+ unsigned int method_index;
+ unsigned int stream_index;
+ unsigned int num_streams_per_method[PMO_DCN4_MAX_DISPLAYS] = { 0 };
+ unsigned int num_streams_per_base_method[PMO_DCN4_MAX_DISPLAYS] = { 0 };
+ unsigned int num_streams_per_variant_method[PMO_DCN4_MAX_DISPLAYS] = { 0 };
+ enum dml2_pmo_pstate_method per_stream_variant_method[DML2_MAX_PLANES];
+ struct dml2_pmo_pstate_strategy variant_strategy = { 0 };
+ /* determine number of displays per method */
for (i = 0; i < stream_count; i++) {
- cur_strategy_list[i] = convert_strategy_to_drr_variant(base_strategy_list[i]);
+ /* increment the count of the earliest index with the same method */
+ for (j = 0; j < stream_count; j++) {
+ if (base_strategy->per_stream_pstate_method[i] == base_strategy->per_stream_pstate_method[j]) {
+ num_streams_per_method[j] = num_streams_per_method[j] + 1;
+ break;
+ }
+ }
+
+ per_stream_variant_method[i] = convert_strategy_to_drr_variant(base_strategy->per_stream_pstate_method[i]);
+ }
+ memcpy(num_streams_per_base_method, num_streams_per_method, sizeof(unsigned int) * PMO_DCN4_MAX_DISPLAYS);
+
+ memcpy(&variant_strategy, base_strategy, sizeof(struct dml2_pmo_pstate_strategy));
+
+ method_index = 0;
+ /* uses a while loop instead of recursion to build permutations of base strategy */
+ while (num_streams_per_base_method[0] > 0 || method_index != 0) {
+ if (method_index == stream_count) {
+ /* construct variant strategy */
+ variant_found = false;
+ stream_index = 0;
+
+ for (i = 0; i < stream_count; i++) {
+ for (j = 0; j < num_streams_per_base_method[i]; j++) {
+ variant_strategy.per_stream_pstate_method[stream_index++] = base_strategy->per_stream_pstate_method[i];
+ }
+
+ for (j = 0; j < num_streams_per_variant_method[i]; j++) {
+ variant_strategy.per_stream_pstate_method[stream_index++] = per_stream_variant_method[i];
+ if (base_strategy->per_stream_pstate_method[i] != per_stream_variant_method[i]) {
+ variant_found = true;
+ }
+ }
+ }
- if (cur_strategy_list[i] != base_strategy_list[i]) {
- variant_found = true;
+ if (variant_found && is_variant_method_valid(base_strategy, &variant_strategy, num_streams_per_base_method, num_streams_per_variant_method, stream_count)) {
+ expand_base_strategy(pmo, &variant_strategy, stream_count);
+ }
+
+ /* rollback to earliest method with bases remaining */
+ for (method_index = stream_count - 1; method_index > 0; method_index--) {
+ if (num_streams_per_base_method[method_index]) {
+ /* bases remaining */
+ break;
+ } else {
+ /* reset counters */
+ num_streams_per_base_method[method_index] = num_streams_per_method[method_index];
+ num_streams_per_variant_method[method_index] = 0;
+ }
+ }
}
- if (i == stream_count - 1 && variant_found) {
- insert_strategy_into_expanded_list(cur_strategy_list, stream_count, &pmo->init_data);
+ if (num_streams_per_base_method[method_index]) {
+ num_streams_per_base_method[method_index]--;
+ num_streams_per_variant_method[method_index]++;
+
+ method_index++;
+ } else if (method_index != 0) {
+ method_index++;
}
}
}
static void expand_base_strategies(
struct dml2_pmo_instance *pmo,
- const enum dml2_pmo_pstate_strategy(*base_strategies_list)[PMO_DCN4_MAX_DISPLAYS],
+ const struct dml2_pmo_pstate_strategy *base_strategies_list,
const unsigned int num_base_strategies,
unsigned int stream_count)
{
unsigned int i;
- unsigned int num_pre_variant_strategies;
- enum dml2_pmo_pstate_strategy(*expanded_strategy_list)[PMO_DCN4_MAX_DISPLAYS];
/* expand every explicit base strategy (except all DRR) */
- for (i = 0; i < num_base_strategies - 1; i++) {
- expand_base_strategy(pmo, base_strategies_list[i], stream_count);
- }
-
- /* expand base strategies to DRR variants */
- num_pre_variant_strategies = get_num_expanded_strategies(&pmo->init_data, stream_count);
- expanded_strategy_list = get_expanded_strategy_list(&pmo->init_data, stream_count);
- for (i = 0; i < num_pre_variant_strategies; i++) {
- expand_variant_strategy(pmo, expanded_strategy_list[i], stream_count);
+ for (i = 0; i < num_base_strategies; i++) {
+ expand_base_strategy(pmo, &base_strategies_list[i], stream_count);
+ expand_variant_strategy(pmo, &base_strategies_list[i], stream_count);
}
-
- /* add back all DRR */
- insert_strategy_into_expanded_list(base_strategies_list[num_base_strategies - 1], stream_count, &pmo->init_data);
}
bool pmo_dcn4_fams2_initialize(struct dml2_pmo_initialize_in_out *in_out)
@@ -546,8 +671,6 @@ bool pmo_dcn4_fams2_initialize(struct dml2_pmo_initialize_in_out *in_out)
/* populate list */
expand_base_strategies(pmo, base_strategy_list_4_display, base_strategy_list_4_display_size, 4);
break;
- default:
- break;
}
}
@@ -591,6 +714,8 @@ bool pmo_dcn4_fams2_init_for_vmin(struct dml2_pmo_init_for_vmin_in_out *in_out)
&in_out->base_display_config->display_config;
const struct dml2_core_mode_support_result *mode_support_result =
&in_out->base_display_config->mode_support_result;
+ struct dml2_optimization_stage4_state *state =
+ &in_out->base_display_config->stage4;
if (in_out->instance->options->disable_dyn_odm ||
(in_out->instance->options->disable_dyn_odm_for_multi_stream && display_config->num_streams > 1))
@@ -611,28 +736,30 @@ bool pmo_dcn4_fams2_init_for_vmin(struct dml2_pmo_init_for_vmin_in_out *in_out)
*/
if (mode_support_result->cfg_support_info.plane_support_info[i].dpps_used > 1 &&
mode_support_result->cfg_support_info.stream_support_info[display_config->plane_descriptors[i].stream_index].odms_used == 1)
- in_out->base_display_config->stage4.unoptimizable_streams[display_config->plane_descriptors[i].stream_index] = true;
+ state->unoptimizable_streams[display_config->plane_descriptors[i].stream_index] = true;
for (i = 0; i < display_config->num_streams; i++) {
if (display_config->stream_descriptors[i].overrides.disable_dynamic_odm)
- in_out->base_display_config->stage4.unoptimizable_streams[i] = true;
+ state->unoptimizable_streams[i] = true;
else if (in_out->base_display_config->stage3.stream_svp_meta[i].valid &&
in_out->instance->options->disable_dyn_odm_for_stream_with_svp)
- in_out->base_display_config->stage4.unoptimizable_streams[i] = true;
+ state->unoptimizable_streams[i] = true;
/*
* ODM Combine requires horizontal timing divisible by 2 so each
* ODM segment has the same size.
*/
else if (!is_h_timing_divisible_by(&display_config->stream_descriptors[i].timing, 2))
- in_out->base_display_config->stage4.unoptimizable_streams[i] = true;
+ state->unoptimizable_streams[i] = true;
/*
* Our hardware support seamless ODM transitions for DP encoders
* only.
*/
else if (!is_dp_encoder(display_config->stream_descriptors[i].output.output_encoder))
- in_out->base_display_config->stage4.unoptimizable_streams[i] = true;
+ state->unoptimizable_streams[i] = true;
}
+ state->performed = true;
+
return true;
}
@@ -783,6 +910,7 @@ static void build_synchronized_timing_groups(
/* clear all group masks */
memset(s->pmo_dcn4.synchronized_timing_group_masks, 0, sizeof(s->pmo_dcn4.synchronized_timing_group_masks));
memset(s->pmo_dcn4.group_is_drr_enabled, 0, sizeof(s->pmo_dcn4.group_is_drr_enabled));
+ memset(s->pmo_dcn4.group_is_drr_active, 0, sizeof(s->pmo_dcn4.group_is_drr_active));
memset(s->pmo_dcn4.group_line_time_us, 0, sizeof(s->pmo_dcn4.group_line_time_us));
s->pmo_dcn4.num_timing_groups = 0;
@@ -804,15 +932,19 @@ static void build_synchronized_timing_groups(
/* if drr is in use, timing is not sychnronizable */
if (master_timing->drr_config.enabled) {
s->pmo_dcn4.group_is_drr_enabled[timing_group_idx] = true;
+ s->pmo_dcn4.group_is_drr_active[timing_group_idx] = !master_timing->drr_config.disallowed &&
+ (master_timing->drr_config.drr_active_fixed || master_timing->drr_config.drr_active_variable);
continue;
}
/* find synchronizable timing groups */
for (j = i + 1; j < display_config->display_config.num_streams; j++) {
if (memcmp(master_timing,
- &display_config->display_config.stream_descriptors[j].timing,
- sizeof(struct dml2_timing_cfg)) == 0 &&
- display_config->display_config.stream_descriptors[i].output.output_encoder == display_config->display_config.stream_descriptors[j].output.output_encoder) {
+ &display_config->display_config.stream_descriptors[j].timing,
+ sizeof(struct dml2_timing_cfg)) == 0 &&
+ display_config->display_config.stream_descriptors[i].output.output_encoder == display_config->display_config.stream_descriptors[j].output.output_encoder &&
+ (display_config->display_config.stream_descriptors[i].output.output_encoder != dml2_hdmi || //hdmi requires formats match
+ display_config->display_config.stream_descriptors[i].output.output_format == display_config->display_config.stream_descriptors[j].output.output_format)) {
set_bit_in_bitfield(&pmo->scratch.pmo_dcn4.synchronized_timing_group_masks[timing_group_idx], j);
set_bit_in_bitfield(&stream_mapped_mask, j);
}
@@ -884,7 +1016,7 @@ static bool all_timings_support_drr(const struct dml2_pmo_instance *pmo,
stream_descriptor = &display_config->display_config.stream_descriptors[i];
stream_fams2_meta = &pmo->scratch.pmo_dcn4.stream_fams2_meta[i];
- if (!stream_descriptor->timing.drr_config.enabled || stream_descriptor->overrides.disable_fams2_drr)
+ if (!stream_descriptor->timing.drr_config.enabled)
return false;
/* cannot support required vtotal */
@@ -967,35 +1099,24 @@ static bool all_timings_support_svp(const struct dml2_pmo_instance *pmo,
return true;
}
-static void insert_into_candidate_list(const enum dml2_pmo_pstate_strategy *per_stream_pstate_strategy, int stream_count, struct dml2_pmo_scratch *scratch)
+static void insert_into_candidate_list(const struct dml2_pmo_pstate_strategy *pstate_strategy, int stream_count, struct dml2_pmo_scratch *scratch)
{
- int stream_index;
-
- scratch->pmo_dcn4.allow_state_increase_for_strategy[scratch->pmo_dcn4.num_pstate_candidates] = true;
-
- for (stream_index = 0; stream_index < stream_count; stream_index++) {
- scratch->pmo_dcn4.per_stream_pstate_strategy[scratch->pmo_dcn4.num_pstate_candidates][stream_index] = per_stream_pstate_strategy[stream_index];
-
- if (per_stream_pstate_strategy[stream_index] == dml2_pmo_pstate_strategy_vblank ||
- per_stream_pstate_strategy[stream_index] == dml2_pmo_pstate_strategy_fw_vblank_drr)
- scratch->pmo_dcn4.allow_state_increase_for_strategy[scratch->pmo_dcn4.num_pstate_candidates] = false;
- }
-
+ scratch->pmo_dcn4.pstate_strategy_candidates[scratch->pmo_dcn4.num_pstate_candidates] = *pstate_strategy;
scratch->pmo_dcn4.num_pstate_candidates++;
}
-static bool all_planes_match_strategy(const struct display_configuation_with_meta *display_cfg, int plane_mask, enum dml2_pmo_pstate_strategy strategy)
+static bool all_planes_match_method(const struct display_configuation_with_meta *display_cfg, int plane_mask, enum dml2_pmo_pstate_method method)
{
unsigned char i;
enum dml2_uclk_pstate_change_strategy matching_strategy = (enum dml2_uclk_pstate_change_strategy) dml2_pmo_pstate_strategy_na;
- if (strategy == dml2_pmo_pstate_strategy_vactive || strategy == dml2_pmo_pstate_strategy_fw_vactive_drr)
+ if (method == dml2_pmo_pstate_strategy_vactive || method == dml2_pmo_pstate_strategy_fw_vactive_drr)
matching_strategy = dml2_uclk_pstate_change_strategy_force_vactive;
- else if (strategy == dml2_pmo_pstate_strategy_vblank || strategy == dml2_pmo_pstate_strategy_fw_vblank_drr)
+ else if (method == dml2_pmo_pstate_strategy_vblank || method == dml2_pmo_pstate_strategy_fw_vblank_drr)
matching_strategy = dml2_uclk_pstate_change_strategy_force_vblank;
- else if (strategy == dml2_pmo_pstate_strategy_fw_svp)
+ else if (method == dml2_pmo_pstate_strategy_fw_svp)
matching_strategy = dml2_uclk_pstate_change_strategy_force_mall_svp;
- else if (strategy == dml2_pmo_pstate_strategy_fw_drr)
+ else if (method == dml2_pmo_pstate_strategy_fw_drr)
matching_strategy = dml2_uclk_pstate_change_strategy_force_drr;
for (i = 0; i < DML2_MAX_PLANES; i++) {
@@ -1027,12 +1148,12 @@ static void build_method_scheduling_params(
static struct dml2_fams2_per_method_common_meta *get_per_method_common_meta(
struct dml2_pmo_instance *pmo,
- enum dml2_pmo_pstate_strategy stream_pstate_strategy,
+ enum dml2_pmo_pstate_method stream_pstate_method,
int stream_idx)
{
struct dml2_fams2_per_method_common_meta *stream_method_fams2_meta = NULL;
- switch (stream_pstate_strategy) {
+ switch (stream_pstate_method) {
case dml2_pmo_pstate_strategy_vactive:
case dml2_pmo_pstate_strategy_fw_vactive_drr:
stream_method_fams2_meta = &pmo->scratch.pmo_dcn4.stream_fams2_meta[stream_idx].method_vactive.common;
@@ -1063,7 +1184,7 @@ static struct dml2_fams2_per_method_common_meta *get_per_method_common_meta(
static bool is_timing_group_schedulable(
struct dml2_pmo_instance *pmo,
const struct display_configuation_with_meta *display_cfg,
- const enum dml2_pmo_pstate_strategy per_stream_pstate_strategy[PMO_DCN4_MAX_DISPLAYS],
+ const struct dml2_pmo_pstate_strategy *pstate_strategy,
const unsigned int timing_group_idx,
struct dml2_fams2_per_method_common_meta *group_fams2_meta)
{
@@ -1082,7 +1203,7 @@ static bool is_timing_group_schedulable(
}
/* init allow start and end lines for timing group */
- stream_method_fams2_meta = get_per_method_common_meta(pmo, per_stream_pstate_strategy[base_stream_idx], base_stream_idx);
+ stream_method_fams2_meta = get_per_method_common_meta(pmo, pstate_strategy->per_stream_pstate_method[base_stream_idx], base_stream_idx);
if (!stream_method_fams2_meta)
return false;
@@ -1091,9 +1212,9 @@ static bool is_timing_group_schedulable(
group_fams2_meta->period_us = stream_method_fams2_meta->period_us;
for (i = base_stream_idx + 1; i < display_cfg->display_config.num_streams; i++) {
if (is_bit_set_in_bitfield(pmo->scratch.pmo_dcn4.synchronized_timing_group_masks[timing_group_idx], i)) {
- stream_method_fams2_meta = get_per_method_common_meta(pmo, per_stream_pstate_strategy[i], i);
+ stream_method_fams2_meta = get_per_method_common_meta(pmo, pstate_strategy->per_stream_pstate_method[i], i);
if (!stream_method_fams2_meta)
- continue;
+ return false;
if (group_fams2_meta->allow_start_otg_vline < stream_method_fams2_meta->allow_start_otg_vline) {
/* set group allow start to larger otg vline */
@@ -1123,7 +1244,7 @@ static bool is_timing_group_schedulable(
static bool is_config_schedulable(
struct dml2_pmo_instance *pmo,
const struct display_configuation_with_meta *display_cfg,
- const enum dml2_pmo_pstate_strategy per_stream_pstate_strategy[PMO_DCN4_MAX_DISPLAYS])
+ const struct dml2_pmo_pstate_strategy *pstate_strategy)
{
unsigned int i, j;
bool schedulable;
@@ -1146,7 +1267,7 @@ static bool is_config_schedulable(
for (i = 0; i < s->pmo_dcn4.num_timing_groups; i++) {
s->pmo_dcn4.sorted_group_gtl_disallow_index[i] = i;
s->pmo_dcn4.sorted_group_gtl_period_index[i] = i;
- if (!is_timing_group_schedulable(pmo, display_cfg, per_stream_pstate_strategy, i, &s->pmo_dcn4.group_common_fams2_meta[i])) {
+ if (!is_timing_group_schedulable(pmo, display_cfg, pstate_strategy, i, &s->pmo_dcn4.group_common_fams2_meta[i])) {
/* synchronized timing group was not schedulable */
schedulable = false;
break;
@@ -1248,7 +1369,7 @@ static bool is_config_schedulable(
unsigned int sorted_ip1 = s->pmo_dcn4.sorted_group_gtl_period_index[i + 1];
if (s->pmo_dcn4.group_common_fams2_meta[sorted_i].allow_time_us < s->pmo_dcn4.group_common_fams2_meta[sorted_ip1].period_us ||
- s->pmo_dcn4.group_is_drr_enabled[sorted_ip1]) {
+ (s->pmo_dcn4.group_is_drr_enabled[sorted_ip1] && s->pmo_dcn4.group_is_drr_active[sorted_ip1])) {
schedulable = false;
break;
}
@@ -1260,8 +1381,8 @@ static bool is_config_schedulable(
/* STAGE 4: When using HW exclusive modes, check disallow alignments are within allowed threshold */
if (s->pmo_dcn4.num_timing_groups == 2 &&
- !is_bit_set_in_bitfield(PMO_FW_STRATEGY_MASK, per_stream_pstate_strategy[0]) &&
- !is_bit_set_in_bitfield(PMO_FW_STRATEGY_MASK, per_stream_pstate_strategy[1])) {
+ !is_bit_set_in_bitfield(PMO_FW_STRATEGY_MASK, pstate_strategy->per_stream_pstate_method[0]) &&
+ !is_bit_set_in_bitfield(PMO_FW_STRATEGY_MASK, pstate_strategy->per_stream_pstate_method[1])) {
double period_ratio;
double max_shift_us;
double shift_per_period;
@@ -1290,44 +1411,48 @@ static bool is_config_schedulable(
}
static bool stream_matches_drr_policy(struct dml2_pmo_instance *pmo,
- const struct display_configuation_with_meta *display_cfg,
- const enum dml2_pmo_pstate_strategy stream_pstate_strategy,
- unsigned int stream_index)
+ const struct display_configuation_with_meta *display_cfg,
+ const enum dml2_pmo_pstate_method stream_pstate_method,
+ unsigned int stream_index)
{
const struct dml2_stream_parameters *stream_descriptor = &display_cfg->display_config.stream_descriptors[stream_index];
bool strategy_matches_drr_requirements = true;
/* check if strategy is compatible with stream drr capability and strategy */
- if (is_bit_set_in_bitfield(PMO_NO_DRR_STRATEGY_MASK, stream_pstate_strategy) &&
+ if (is_bit_set_in_bitfield(PMO_NO_DRR_STRATEGY_MASK, stream_pstate_method) &&
display_cfg->display_config.num_streams > 1 &&
stream_descriptor->timing.drr_config.enabled &&
(stream_descriptor->timing.drr_config.drr_active_fixed || stream_descriptor->timing.drr_config.drr_active_variable)) {
/* DRR is active, so config may become unschedulable */
strategy_matches_drr_requirements = false;
- } else if (is_bit_set_in_bitfield(PMO_NO_DRR_STRATEGY_MASK, stream_pstate_strategy) &&
- is_bit_set_in_bitfield(PMO_FW_STRATEGY_MASK, stream_pstate_strategy) &&
+ } else if (is_bit_set_in_bitfield(PMO_NO_DRR_STRATEGY_MASK, stream_pstate_method) &&
+ is_bit_set_in_bitfield(PMO_FW_STRATEGY_MASK, stream_pstate_method) &&
stream_descriptor->timing.drr_config.enabled &&
stream_descriptor->timing.drr_config.drr_active_variable) {
/* DRR is variable, fw exclusive methods require DRR to be clamped */
strategy_matches_drr_requirements = false;
- } else if (is_bit_set_in_bitfield(PMO_DRR_VAR_STRATEGY_MASK, stream_pstate_strategy) &&
+ } else if (is_bit_set_in_bitfield(PMO_DRR_VAR_STRATEGY_MASK, stream_pstate_method) &&
pmo->options->disable_drr_var_when_var_active &&
stream_descriptor->timing.drr_config.enabled &&
stream_descriptor->timing.drr_config.drr_active_variable) {
/* DRR variable is active, but policy blocks DRR for p-state when this happens */
strategy_matches_drr_requirements = false;
- } else if (is_bit_set_in_bitfield(PMO_DRR_VAR_STRATEGY_MASK, stream_pstate_strategy) &&
+ } else if (is_bit_set_in_bitfield(PMO_DRR_VAR_STRATEGY_MASK, stream_pstate_method) &&
(pmo->options->disable_drr_var ||
!stream_descriptor->timing.drr_config.enabled ||
stream_descriptor->timing.drr_config.disallowed)) {
/* DRR variable strategies are disallowed due to settings or policy */
strategy_matches_drr_requirements = false;
- } else if (is_bit_set_in_bitfield(PMO_DRR_CLAMPED_STRATEGY_MASK, stream_pstate_strategy) &&
- (pmo->options->disable_drr_clamped ||
- !stream_descriptor->timing.drr_config.enabled)) {
+ } else if (is_bit_set_in_bitfield(PMO_DRR_CLAMPED_STRATEGY_MASK, stream_pstate_method) &&
+ (pmo->options->disable_drr_clamped ||
+ (!stream_descriptor->timing.drr_config.enabled ||
+ (!stream_descriptor->timing.drr_config.drr_active_fixed && !stream_descriptor->timing.drr_config.drr_active_variable)) ||
+ (pmo->options->disable_drr_clamped_when_var_active &&
+ stream_descriptor->timing.drr_config.enabled &&
+ stream_descriptor->timing.drr_config.drr_active_variable))) {
/* DRR fixed strategies are disallowed due to settings or policy */
strategy_matches_drr_requirements = false;
- } else if (is_bit_set_in_bitfield(PMO_FW_STRATEGY_MASK, stream_pstate_strategy) &&
+ } else if (is_bit_set_in_bitfield(PMO_FW_STRATEGY_MASK, stream_pstate_method) &&
pmo->options->disable_fams2) {
/* FW modes require FAMS2 */
strategy_matches_drr_requirements = false;
@@ -1338,7 +1463,7 @@ static bool stream_matches_drr_policy(struct dml2_pmo_instance *pmo,
static bool validate_pstate_support_strategy_cofunctionality(struct dml2_pmo_instance *pmo,
const struct display_configuation_with_meta *display_cfg,
- const enum dml2_pmo_pstate_strategy per_stream_pstate_strategy[PMO_DCN4_MAX_DISPLAYS])
+ const struct dml2_pmo_pstate_strategy *pstate_strategy)
{
struct dml2_pmo_scratch *s = &pmo->scratch;
@@ -1359,28 +1484,28 @@ static bool validate_pstate_support_strategy_cofunctionality(struct dml2_pmo_ins
// Tabulate everything
for (stream_index = 0; stream_index < display_cfg->display_config.num_streams; stream_index++) {
- if (!all_planes_match_strategy(display_cfg, s->pmo_dcn4.stream_plane_mask[stream_index],
- per_stream_pstate_strategy[stream_index])) {
+ if (!all_planes_match_method(display_cfg, s->pmo_dcn4.stream_plane_mask[stream_index],
+ pstate_strategy->per_stream_pstate_method[stream_index])) {
strategy_matches_forced_requirements = false;
break;
}
strategy_matches_drr_requirements &=
- stream_matches_drr_policy(pmo, display_cfg, per_stream_pstate_strategy[stream_index], stream_index);
+ stream_matches_drr_policy(pmo, display_cfg, pstate_strategy->per_stream_pstate_method[stream_index], stream_index);
- if (per_stream_pstate_strategy[stream_index] == dml2_pmo_pstate_strategy_fw_svp ||
- per_stream_pstate_strategy[stream_index] == dml2_pmo_pstate_strategy_fw_svp_drr) {
+ if (pstate_strategy->per_stream_pstate_method[stream_index] == dml2_pmo_pstate_strategy_fw_svp ||
+ pstate_strategy->per_stream_pstate_method[stream_index] == dml2_pmo_pstate_strategy_fw_svp_drr) {
svp_count++;
set_bit_in_bitfield(&svp_stream_mask, stream_index);
- } else if (per_stream_pstate_strategy[stream_index] == dml2_pmo_pstate_strategy_fw_drr) {
+ } else if (pstate_strategy->per_stream_pstate_method[stream_index] == dml2_pmo_pstate_strategy_fw_drr) {
drr_count++;
set_bit_in_bitfield(&drr_stream_mask, stream_index);
- } else if (per_stream_pstate_strategy[stream_index] == dml2_pmo_pstate_strategy_vactive ||
- per_stream_pstate_strategy[stream_index] == dml2_pmo_pstate_strategy_fw_vactive_drr) {
+ } else if (pstate_strategy->per_stream_pstate_method[stream_index] == dml2_pmo_pstate_strategy_vactive ||
+ pstate_strategy->per_stream_pstate_method[stream_index] == dml2_pmo_pstate_strategy_fw_vactive_drr) {
vactive_count++;
set_bit_in_bitfield(&vactive_stream_mask, stream_index);
- } else if (per_stream_pstate_strategy[stream_index] == dml2_pmo_pstate_strategy_vblank ||
- per_stream_pstate_strategy[stream_index] == dml2_pmo_pstate_strategy_fw_vblank_drr) {
+ } else if (pstate_strategy->per_stream_pstate_method[stream_index] == dml2_pmo_pstate_strategy_vblank ||
+ pstate_strategy->per_stream_pstate_method[stream_index] == dml2_pmo_pstate_strategy_fw_vblank_drr) {
vblank_count++;
set_bit_in_bitfield(&vblank_stream_mask, stream_index);
}
@@ -1389,7 +1514,7 @@ static bool validate_pstate_support_strategy_cofunctionality(struct dml2_pmo_ins
if (!strategy_matches_forced_requirements || !strategy_matches_drr_requirements)
return false;
- if (vactive_count > 0 && (pmo->options->disable_vblank || !all_timings_support_vactive(pmo, display_cfg, vactive_stream_mask)))
+ if (vactive_count > 0 && !all_timings_support_vactive(pmo, display_cfg, vactive_stream_mask))
return false;
if (vblank_count > 0 && (pmo->options->disable_vblank || !all_timings_support_vblank(pmo, display_cfg, vblank_stream_mask)))
@@ -1401,7 +1526,7 @@ static bool validate_pstate_support_strategy_cofunctionality(struct dml2_pmo_ins
if (svp_count > 0 && (pmo->options->disable_svp || !all_timings_support_svp(pmo, display_cfg, svp_stream_mask)))
return false;
- return is_config_schedulable(pmo, display_cfg, per_stream_pstate_strategy);
+ return is_config_schedulable(pmo, display_cfg, pstate_strategy);
}
static int get_vactive_pstate_margin(const struct display_configuation_with_meta *display_cfg, int plane_mask)
@@ -1457,6 +1582,7 @@ static void build_fams2_meta_per_stream(struct dml2_pmo_instance *pmo,
(stream_fams2_meta->nom_vtotal * timing->h_total);
stream_fams2_meta->nom_frame_time_us =
(double)stream_fams2_meta->nom_vtotal * stream_fams2_meta->otg_vline_time_us;
+ stream_fams2_meta->vblank_start = timing->v_blank_end + timing->v_active;
if (stream_descriptor->timing.drr_config.enabled == true) {
if (stream_descriptor->timing.drr_config.min_refresh_uhz != 0.0) {
@@ -1510,7 +1636,7 @@ static void build_fams2_meta_per_stream(struct dml2_pmo_instance *pmo,
stream_fams2_meta->method_vactive.common.allow_start_otg_vline =
timing->v_blank_end + stream_fams2_meta->method_vactive.max_vactive_det_fill_delay_otg_vlines;
stream_fams2_meta->method_vactive.common.allow_end_otg_vline =
- timing->v_blank_end + timing->v_active -
+ stream_fams2_meta->vblank_start -
stream_fams2_meta->dram_clk_change_blackout_otg_vlines;
} else {
stream_fams2_meta->method_vactive.common.allow_start_otg_vline = 0;
@@ -1520,8 +1646,7 @@ static void build_fams2_meta_per_stream(struct dml2_pmo_instance *pmo,
build_method_scheduling_params(&stream_fams2_meta->method_vactive.common, stream_fams2_meta);
/* vblank */
- stream_fams2_meta->method_vblank.common.allow_start_otg_vline =
- timing->v_blank_end + timing->v_active;
+ stream_fams2_meta->method_vblank.common.allow_start_otg_vline = stream_fams2_meta->vblank_start;
stream_fams2_meta->method_vblank.common.allow_end_otg_vline =
stream_fams2_meta->method_vblank.common.allow_start_otg_vline + 1;
stream_fams2_meta->method_vblank.common.period_us = stream_fams2_meta->nom_frame_time_us;
@@ -1555,8 +1680,7 @@ static void build_fams2_meta_per_stream(struct dml2_pmo_instance *pmo,
stream_fams2_meta->method_subvp.prefetch_to_mall_delay_otg_vlines +
stream_fams2_meta->allow_to_target_delay_otg_vlines;
stream_fams2_meta->method_subvp.common.allow_end_otg_vline =
- stream_fams2_meta->nom_vtotal -
- timing->v_front_porch -
+ stream_fams2_meta->vblank_start -
stream_fams2_meta->dram_clk_change_blackout_otg_vlines;
stream_fams2_meta->method_subvp.common.period_us = stream_fams2_meta->nom_frame_time_us;
build_method_scheduling_params(&stream_fams2_meta->method_subvp.common, stream_fams2_meta);
@@ -1565,20 +1689,21 @@ static void build_fams2_meta_per_stream(struct dml2_pmo_instance *pmo,
stream_fams2_meta->method_drr.programming_delay_otg_vlines =
(unsigned int)math_ceil(ip_caps->fams2.drr_programming_delay_us / stream_fams2_meta->otg_vline_time_us);
stream_fams2_meta->method_drr.common.allow_start_otg_vline =
- stream_fams2_meta->nom_vtotal +
+ stream_fams2_meta->vblank_start +
stream_fams2_meta->allow_to_target_delay_otg_vlines;
stream_fams2_meta->method_drr.common.period_us = stream_fams2_meta->nom_frame_time_us;
if (display_config->display_config.num_streams <= 1) {
/* only need to stretch vblank for blackout time */
stream_fams2_meta->method_drr.stretched_vtotal =
- stream_fams2_meta->method_drr.common.allow_start_otg_vline +
+ stream_fams2_meta->nom_vtotal +
+ stream_fams2_meta->allow_to_target_delay_otg_vlines +
stream_fams2_meta->min_allow_width_otg_vlines +
stream_fams2_meta->dram_clk_change_blackout_otg_vlines;
} else {
/* multi display needs to always be schedulable */
stream_fams2_meta->method_drr.stretched_vtotal =
- stream_fams2_meta->method_drr.common.allow_start_otg_vline +
- stream_fams2_meta->nom_vtotal +
+ stream_fams2_meta->nom_vtotal * 2 +
+ stream_fams2_meta->allow_to_target_delay_otg_vlines +
stream_fams2_meta->min_allow_width_otg_vlines +
stream_fams2_meta->dram_clk_change_blackout_otg_vlines;
}
@@ -1611,7 +1736,7 @@ bool pmo_dcn4_fams2_init_for_pstate_support(struct dml2_pmo_init_for_pstate_supp
struct display_configuation_with_meta *display_config;
const struct dml2_plane_parameters *plane_descriptor;
- const enum dml2_pmo_pstate_strategy(*strategy_list)[PMO_DCN4_MAX_DISPLAYS] = NULL;
+ const struct dml2_pmo_pstate_strategy *strategy_list = NULL;
unsigned int strategy_list_size = 0;
unsigned char plane_index, stream_index, i;
@@ -1623,6 +1748,10 @@ bool pmo_dcn4_fams2_init_for_pstate_support(struct dml2_pmo_init_for_pstate_supp
memset(s, 0, sizeof(struct dml2_pmo_scratch));
+ if (display_config->display_config.overrides.all_streams_blanked) {
+ return true;
+ }
+
pmo->scratch.pmo_dcn4.min_latency_index = in_out->base_display_config->stage1.min_clk_index_for_latency;
pmo->scratch.pmo_dcn4.max_latency_index = pmo->mcg_clock_table_size;
pmo->scratch.pmo_dcn4.cur_latency_index = in_out->base_display_config->stage1.min_clk_index_for_latency;
@@ -1652,6 +1781,9 @@ bool pmo_dcn4_fams2_init_for_pstate_support(struct dml2_pmo_init_for_pstate_supp
build_synchronized_timing_groups(pmo, display_config);
strategy_list = get_expanded_strategy_list(&pmo->init_data, display_config->display_config.num_streams);
+ if (!strategy_list)
+ return false;
+
strategy_list_size = get_num_expanded_strategies(&pmo->init_data, display_config->display_config.num_streams);
if (strategy_list_size == 0)
@@ -1660,8 +1792,8 @@ bool pmo_dcn4_fams2_init_for_pstate_support(struct dml2_pmo_init_for_pstate_supp
s->pmo_dcn4.num_pstate_candidates = 0;
for (i = 0; i < strategy_list_size && s->pmo_dcn4.num_pstate_candidates < DML2_PMO_PSTATE_CANDIDATE_LIST_SIZE; i++) {
- if (validate_pstate_support_strategy_cofunctionality(pmo, display_config, strategy_list[i])) {
- insert_into_candidate_list(strategy_list[i], display_config->display_config.num_streams, s);
+ if (validate_pstate_support_strategy_cofunctionality(pmo, display_config, &strategy_list[i])) {
+ insert_into_candidate_list(&strategy_list[i], display_config->display_config.num_streams, s);
}
}
@@ -1778,7 +1910,8 @@ static void setup_planes_for_vblank_by_mask(struct display_configuation_with_met
if (is_bit_set_in_bitfield(plane_mask, plane_index)) {
plane = &display_config->display_config.plane_descriptors[plane_index];
- plane->overrides.reserved_vblank_time_ns = (long)(pmo->soc_bb->power_management_parameters.dram_clk_change_blackout_us * 1000);
+ plane->overrides.reserved_vblank_time_ns = (long)math_max2(pmo->soc_bb->power_management_parameters.dram_clk_change_blackout_us * 1000.0,
+ plane->overrides.reserved_vblank_time_ns);
display_config->stage3.pstate_switch_modes[plane_index] = dml2_uclk_pstate_support_method_vblank;
@@ -1857,26 +1990,26 @@ static bool setup_display_config(struct display_configuation_with_meta *display_
for (stream_index = 0; stream_index < display_config->display_config.num_streams; stream_index++) {
- if (pmo->scratch.pmo_dcn4.per_stream_pstate_strategy[strategy_index][stream_index] == dml2_pmo_pstate_strategy_na) {
+ if (pmo->scratch.pmo_dcn4.pstate_strategy_candidates[strategy_index].per_stream_pstate_method[stream_index] == dml2_pmo_pstate_strategy_na) {
success = false;
break;
- } else if (scratch->pmo_dcn4.per_stream_pstate_strategy[strategy_index][stream_index] == dml2_pmo_pstate_strategy_vactive) {
+ } else if (scratch->pmo_dcn4.pstate_strategy_candidates[strategy_index].per_stream_pstate_method[stream_index] == dml2_pmo_pstate_strategy_vactive) {
setup_planes_for_vactive_by_mask(display_config, pmo, scratch->pmo_dcn4.stream_plane_mask[stream_index]);
- } else if (scratch->pmo_dcn4.per_stream_pstate_strategy[strategy_index][stream_index] == dml2_pmo_pstate_strategy_vblank) {
+ } else if (scratch->pmo_dcn4.pstate_strategy_candidates[strategy_index].per_stream_pstate_method[stream_index] == dml2_pmo_pstate_strategy_vblank) {
setup_planes_for_vblank_by_mask(display_config, pmo, scratch->pmo_dcn4.stream_plane_mask[stream_index]);
- } else if (scratch->pmo_dcn4.per_stream_pstate_strategy[strategy_index][stream_index] == dml2_pmo_pstate_strategy_fw_svp) {
+ } else if (scratch->pmo_dcn4.pstate_strategy_candidates[strategy_index].per_stream_pstate_method[stream_index] == dml2_pmo_pstate_strategy_fw_svp) {
fams2_required = true;
setup_planes_for_svp_by_mask(display_config, pmo, scratch->pmo_dcn4.stream_plane_mask[stream_index]);
- } else if (scratch->pmo_dcn4.per_stream_pstate_strategy[strategy_index][stream_index] == dml2_pmo_pstate_strategy_fw_vactive_drr) {
+ } else if (scratch->pmo_dcn4.pstate_strategy_candidates[strategy_index].per_stream_pstate_method[stream_index] == dml2_pmo_pstate_strategy_fw_vactive_drr) {
fams2_required = true;
setup_planes_for_vactive_drr_by_mask(display_config, pmo, scratch->pmo_dcn4.stream_plane_mask[stream_index]);
- } else if (scratch->pmo_dcn4.per_stream_pstate_strategy[strategy_index][stream_index] == dml2_pmo_pstate_strategy_fw_vblank_drr) {
+ } else if (scratch->pmo_dcn4.pstate_strategy_candidates[strategy_index].per_stream_pstate_method[stream_index] == dml2_pmo_pstate_strategy_fw_vblank_drr) {
fams2_required = true;
setup_planes_for_vblank_drr_by_mask(display_config, pmo, scratch->pmo_dcn4.stream_plane_mask[stream_index]);
- } else if (scratch->pmo_dcn4.per_stream_pstate_strategy[strategy_index][stream_index] == dml2_pmo_pstate_strategy_fw_svp_drr) {
+ } else if (scratch->pmo_dcn4.pstate_strategy_candidates[strategy_index].per_stream_pstate_method[stream_index] == dml2_pmo_pstate_strategy_fw_svp_drr) {
fams2_required = true;
setup_planes_for_svp_drr_by_mask(display_config, pmo, scratch->pmo_dcn4.stream_plane_mask[stream_index]);
- } else if (scratch->pmo_dcn4.per_stream_pstate_strategy[strategy_index][stream_index] == dml2_pmo_pstate_strategy_fw_drr) {
+ } else if (scratch->pmo_dcn4.pstate_strategy_candidates[strategy_index].per_stream_pstate_method[stream_index] == dml2_pmo_pstate_strategy_fw_drr) {
fams2_required = true;
setup_planes_for_drr_by_mask(display_config, pmo, scratch->pmo_dcn4.stream_plane_mask[stream_index]);
}
@@ -1917,6 +2050,10 @@ bool pmo_dcn4_fams2_test_for_pstate_support(struct dml2_pmo_test_for_pstate_supp
int MIN_VACTIVE_MARGIN_DRR = 0;
int REQUIRED_RESERVED_TIME = 0;
+ if (in_out->base_display_config->display_config.overrides.all_streams_blanked) {
+ return true;
+ }
+
MIN_VACTIVE_MARGIN_VBLANK = INT_MIN;
MIN_VACTIVE_MARGIN_DRR = INT_MIN;
REQUIRED_RESERVED_TIME = (int)in_out->instance->soc_bb->power_management_parameters.dram_clk_change_blackout_us;
@@ -1927,34 +2064,34 @@ bool pmo_dcn4_fams2_test_for_pstate_support(struct dml2_pmo_test_for_pstate_supp
for (stream_index = 0; stream_index < in_out->base_display_config->display_config.num_streams; stream_index++) {
struct dml2_fams2_meta *stream_fams2_meta = &s->pmo_dcn4.stream_fams2_meta[stream_index];
- if (s->pmo_dcn4.per_stream_pstate_strategy[s->pmo_dcn4.cur_pstate_candidate][stream_index] == dml2_pmo_pstate_strategy_vactive ||
- s->pmo_dcn4.per_stream_pstate_strategy[s->pmo_dcn4.cur_pstate_candidate][stream_index] == dml2_pmo_pstate_strategy_fw_vactive_drr) {
+ if (s->pmo_dcn4.pstate_strategy_candidates[s->pmo_dcn4.cur_pstate_candidate].per_stream_pstate_method[stream_index] == dml2_pmo_pstate_strategy_vactive ||
+ s->pmo_dcn4.pstate_strategy_candidates[s->pmo_dcn4.cur_pstate_candidate].per_stream_pstate_method[stream_index] == dml2_pmo_pstate_strategy_fw_vactive_drr) {
if (get_vactive_pstate_margin(in_out->base_display_config, s->pmo_dcn4.stream_plane_mask[stream_index]) < (MIN_VACTIVE_MARGIN_PCT * in_out->instance->soc_bb->power_management_parameters.dram_clk_change_blackout_us) ||
get_vactive_det_fill_latency_delay_us(in_out->base_display_config, s->pmo_dcn4.stream_plane_mask[stream_index]) > stream_fams2_meta->method_vactive.max_vactive_det_fill_delay_us) {
p_state_supported = false;
break;
}
- } else if (s->pmo_dcn4.per_stream_pstate_strategy[s->pmo_dcn4.cur_pstate_candidate][stream_index] == dml2_pmo_pstate_strategy_vblank ||
- s->pmo_dcn4.per_stream_pstate_strategy[s->pmo_dcn4.cur_pstate_candidate][stream_index] == dml2_pmo_pstate_strategy_fw_vblank_drr) {
+ } else if (s->pmo_dcn4.pstate_strategy_candidates[s->pmo_dcn4.cur_pstate_candidate].per_stream_pstate_method[stream_index] == dml2_pmo_pstate_strategy_vblank ||
+ s->pmo_dcn4.pstate_strategy_candidates[s->pmo_dcn4.cur_pstate_candidate].per_stream_pstate_method[stream_index] == dml2_pmo_pstate_strategy_fw_vblank_drr) {
if (get_minimum_reserved_time_us_for_planes(in_out->base_display_config, s->pmo_dcn4.stream_plane_mask[stream_index]) <
REQUIRED_RESERVED_TIME ||
get_vactive_pstate_margin(in_out->base_display_config, s->pmo_dcn4.stream_plane_mask[stream_index]) < MIN_VACTIVE_MARGIN_VBLANK) {
p_state_supported = false;
break;
}
- } else if (s->pmo_dcn4.per_stream_pstate_strategy[s->pmo_dcn4.cur_pstate_candidate][stream_index] == dml2_pmo_pstate_strategy_fw_svp ||
- s->pmo_dcn4.per_stream_pstate_strategy[s->pmo_dcn4.cur_pstate_candidate][stream_index] == dml2_pmo_pstate_strategy_fw_svp_drr) {
+ } else if (s->pmo_dcn4.pstate_strategy_candidates[s->pmo_dcn4.cur_pstate_candidate].per_stream_pstate_method[stream_index] == dml2_pmo_pstate_strategy_fw_svp ||
+ s->pmo_dcn4.pstate_strategy_candidates[s->pmo_dcn4.cur_pstate_candidate].per_stream_pstate_method[stream_index] == dml2_pmo_pstate_strategy_fw_svp_drr) {
if (in_out->base_display_config->stage3.stream_svp_meta[stream_index].valid == false) {
p_state_supported = false;
break;
}
- } else if (s->pmo_dcn4.per_stream_pstate_strategy[s->pmo_dcn4.cur_pstate_candidate][stream_index] == dml2_pmo_pstate_strategy_fw_drr) {
- if (!all_planes_match_strategy(in_out->base_display_config, s->pmo_dcn4.stream_plane_mask[stream_index], dml2_pmo_pstate_strategy_fw_drr) ||
+ } else if (s->pmo_dcn4.pstate_strategy_candidates[s->pmo_dcn4.cur_pstate_candidate].per_stream_pstate_method[stream_index] == dml2_pmo_pstate_strategy_fw_drr) {
+ if (!all_planes_match_method(in_out->base_display_config, s->pmo_dcn4.stream_plane_mask[stream_index], dml2_pmo_pstate_strategy_fw_drr) ||
get_vactive_pstate_margin(in_out->base_display_config, s->pmo_dcn4.stream_plane_mask[stream_index]) < MIN_VACTIVE_MARGIN_DRR) {
p_state_supported = false;
break;
}
- } else if (s->pmo_dcn4.per_stream_pstate_strategy[s->pmo_dcn4.cur_pstate_candidate][stream_index] == dml2_pmo_pstate_strategy_na) {
+ } else if (s->pmo_dcn4.pstate_strategy_candidates[s->pmo_dcn4.cur_pstate_candidate].per_stream_pstate_method[stream_index] == dml2_pmo_pstate_strategy_na) {
p_state_supported = false;
break;
}
@@ -1971,8 +2108,8 @@ bool pmo_dcn4_fams2_optimize_for_pstate_support(struct dml2_pmo_optimize_for_pst
memcpy(in_out->optimized_display_config, in_out->base_display_config, sizeof(struct display_configuation_with_meta));
if (in_out->last_candidate_failed) {
- if (s->pmo_dcn4.allow_state_increase_for_strategy[s->pmo_dcn4.cur_pstate_candidate] &&
- s->pmo_dcn4.cur_latency_index < s->pmo_dcn4.max_latency_index) {
+ if (s->pmo_dcn4.pstate_strategy_candidates[s->pmo_dcn4.cur_pstate_candidate].allow_state_increase &&
+ s->pmo_dcn4.cur_latency_index < s->pmo_dcn4.max_latency_index - 1) {
s->pmo_dcn4.cur_latency_index++;
success = true;
@@ -2060,15 +2197,15 @@ bool pmo_dcn4_fams2_test_for_stutter(struct dml2_pmo_test_for_stutter_in_out *in
unsigned int i;
- for (i = 0; i < in_out->base_display_config->display_config.num_streams; i++) {
+ for (i = 0; i < in_out->base_display_config->display_config.num_planes; i++) {
if (pmo->soc_bb->power_management_parameters.z8_stutter_exit_latency_us > 0 &&
pmo->scratch.pmo_dcn4.z8_vblank_optimizable &&
- in_out->base_display_config->display_config.stream_descriptors[i].overrides.minimum_vblank_idle_requirement_us < (int)pmo->soc_bb->power_management_parameters.z8_stutter_exit_latency_us) {
+ in_out->base_display_config->display_config.plane_descriptors[i].overrides.reserved_vblank_time_ns < (int)pmo->soc_bb->power_management_parameters.z8_stutter_exit_latency_us * 1000) {
success = false;
break;
}
if (pmo->soc_bb->power_management_parameters.stutter_enter_plus_exit_latency_us > 0 &&
- in_out->base_display_config->display_config.stream_descriptors[i].overrides.minimum_vblank_idle_requirement_us < (int)pmo->soc_bb->power_management_parameters.stutter_enter_plus_exit_latency_us) {
+ in_out->base_display_config->display_config.plane_descriptors[i].overrides.reserved_vblank_time_ns < (int)pmo->soc_bb->power_management_parameters.stutter_enter_plus_exit_latency_us * 1000) {
success = false;
break;
}
@@ -2087,8 +2224,11 @@ bool pmo_dcn4_fams2_optimize_for_stutter(struct dml2_pmo_optimize_for_stutter_in
if (!in_out->last_candidate_failed) {
if (pmo->scratch.pmo_dcn4.cur_stutter_candidate < pmo->scratch.pmo_dcn4.num_stutter_candidates) {
- for (i = 0; i < in_out->optimized_display_config->display_config.num_streams; i++) {
- in_out->optimized_display_config->display_config.stream_descriptors[i].overrides.minimum_vblank_idle_requirement_us = pmo->scratch.pmo_dcn4.optimal_vblank_reserved_time_for_stutter_us[pmo->scratch.pmo_dcn4.cur_stutter_candidate];
+ for (i = 0; i < in_out->optimized_display_config->display_config.num_planes; i++) {
+ /* take the max of the current and the optimal reserved time */
+ in_out->optimized_display_config->display_config.plane_descriptors[i].overrides.reserved_vblank_time_ns =
+ (long)math_max2(pmo->scratch.pmo_dcn4.optimal_vblank_reserved_time_for_stutter_us[pmo->scratch.pmo_dcn4.cur_stutter_candidate] * 1000,
+ in_out->optimized_display_config->display_config.plane_descriptors[i].overrides.reserved_vblank_time_ns);
}
success = true;
diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_pmo/dml2_pmo_dcn4_fams2.h b/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_pmo/dml2_pmo_dcn4_fams2.h
index 75175d93add4..0c25bd3e9ac0 100644
--- a/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_pmo/dml2_pmo_dcn4_fams2.h
+++ b/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_pmo/dml2_pmo_dcn4_fams2.h
@@ -2,7 +2,6 @@
//
// Copyright 2024 Advanced Micro Devices, Inc.
-
#ifndef __DML2_PMO_FAMS2_DCN4_H__
#define __DML2_PMO_FAMS2_DCN4_H__
diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_pmo/dml2_pmo_factory.c b/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_pmo/dml2_pmo_factory.c
index e0b9ece7901d..add51d41a515 100644
--- a/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_pmo/dml2_pmo_factory.c
+++ b/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_pmo/dml2_pmo_factory.c
@@ -2,10 +2,8 @@
//
// Copyright 2024 Advanced Micro Devices, Inc.
-
#include "dml2_pmo_factory.h"
#include "dml2_pmo_dcn4_fams2.h"
-#include "dml2_pmo_dcn4.h"
#include "dml2_pmo_dcn3.h"
#include "dml2_external_lib_deps.h"
@@ -28,15 +26,15 @@ bool dml2_pmo_create(enum dml2_project_id project_id, struct dml2_pmo_instance *
{
bool result = false;
- if (!out)
+ if (out == 0)
return false;
memset(out, 0, sizeof(struct dml2_pmo_instance));
switch (project_id) {
case dml2_project_dcn4x_stage1:
- out->initialize = pmo_dcn4_initialize;
- out->optimize_dcc_mcache = pmo_dcn4_optimize_dcc_mcache;
+ out->initialize = pmo_dcn4_fams2_initialize;
+ out->optimize_dcc_mcache = pmo_dcn4_fams2_optimize_dcc_mcache;
result = true;
break;
case dml2_project_dcn4x_stage2:
diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_pmo/dml2_pmo_factory.h b/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_pmo/dml2_pmo_factory.h
index 9d3dc5e94be1..7218de1824cc 100644
--- a/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_pmo/dml2_pmo_factory.h
+++ b/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_pmo/dml2_pmo_factory.h
@@ -2,7 +2,6 @@
//
// Copyright 2024 Advanced Micro Devices, Inc.
-
#ifndef __DML2_PMO_FACTORY_H__
#define __DML2_PMO_FACTORY_H__
diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_standalone_libraries/lib_float_math.c b/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_standalone_libraries/lib_float_math.c
index e73579f1a88e..e17b5ceba447 100644
--- a/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_standalone_libraries/lib_float_math.c
+++ b/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_standalone_libraries/lib_float_math.c
@@ -2,7 +2,6 @@
//
// Copyright 2024 Advanced Micro Devices, Inc.
-
#include "lib_float_math.h"
#define ASSERT(condition)
diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_standalone_libraries/lib_float_math.h b/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_standalone_libraries/lib_float_math.h
index 537cf6fd4c15..e13b0c5939b0 100644
--- a/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_standalone_libraries/lib_float_math.h
+++ b/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_standalone_libraries/lib_float_math.h
@@ -2,7 +2,6 @@
//
// Copyright 2024 Advanced Micro Devices, Inc.
-
#ifndef __LIB_FLOAT_MATH_H__
#define __LIB_FLOAT_MATH_H__
diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_top/dml2_top_optimization.c b/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_top/dml2_top_optimization.c
index 1b6dbfaa7ae8..d0e026d981b5 100644
--- a/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_top/dml2_top_optimization.c
+++ b/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_top/dml2_top_optimization.c
@@ -2,7 +2,6 @@
//
// Copyright 2024 Advanced Micro Devices, Inc.
-
#include "dml2_top_optimization.h"
#include "dml2_internal_shared_types.h"
#include "dml_top_mcache.h"
@@ -220,7 +219,6 @@ bool dml2_top_optimization_perform_optimization_phase_1(struct dml2_optimization
copy_display_configuration_with_meta(&l->cur_candidate_display_cfg, params->display_config);
highest_state = l->cur_candidate_display_cfg.stage1.min_clk_index_for_latency;
lowest_state = 0;
- cur_state = 0;
while (highest_state > lowest_state) {
cur_state = (highest_state + lowest_state) / 2;
diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_top/dml2_top_optimization.h b/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_top/dml2_top_optimization.h
index 1536afcbf73a..9f22ab33eab1 100644
--- a/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_top/dml2_top_optimization.h
+++ b/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_top/dml2_top_optimization.h
@@ -2,7 +2,6 @@
//
// Copyright 2024 Advanced Micro Devices, Inc.
-
#ifndef __DML2_TOP_OPTIMIZATION_H__
#define __DML2_TOP_OPTIMIZATION_H__
diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_top/dml_top.c b/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_top/dml_top.c
index 2fb3e2f45e07..f9f8869cd8b8 100644
--- a/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_top/dml_top.c
+++ b/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_top/dml_top.c
@@ -2,7 +2,6 @@
//
// Copyright 2024 Advanced Micro Devices, Inc.
-
#include "dml2_internal_shared_types.h"
#include "dml_top.h"
#include "dml2_mcg_factory.h"
@@ -28,6 +27,7 @@ bool dml2_initialize_instance(struct dml2_initialize_instance_in_out *in_out)
bool result = false;
memset(l, 0, sizeof(struct dml2_initialize_instance_locals));
+ memset(dml, 0, sizeof(struct dml2_instance));
memcpy(&dml->ip_caps, &in_out->ip_caps, sizeof(struct dml2_ip_capabilities));
memcpy(&dml->soc_bbox, &in_out->soc_bb, sizeof(struct dml2_soc_bb));
@@ -96,14 +96,12 @@ bool dml2_check_mode_supported(struct dml2_check_mode_supported_in_out *in_out)
{
struct dml2_instance *dml = (struct dml2_instance *)in_out->dml2_instance;
struct dml2_check_mode_supported_locals *l = &dml->scratch.check_mode_supported_locals;
- /* Borrow the build_mode_programming_locals programming struct for DPMM call. */
- struct dml2_display_cfg_programming *dpmm_programming = dml->scratch.build_mode_programming_locals.mode_programming_params.programming;
+ struct dml2_display_cfg_programming *dpmm_programming = &dml->dpmm_instance.dpmm_scratch.programming;
bool result = false;
bool mcache_success = false;
- if (dpmm_programming)
- memset(dpmm_programming, 0, sizeof(struct dml2_display_cfg_programming));
+ memset(dpmm_programming, 0, sizeof(struct dml2_display_cfg_programming));
setup_unoptimized_display_config_with_meta(dml, &l->base_display_config_with_meta, in_out->display_config);
@@ -130,7 +128,7 @@ bool dml2_check_mode_supported(struct dml2_check_mode_supported_in_out *in_out)
/*
* Call DPMM to map all requirements to minimum clock state
*/
- if (result && dpmm_programming) {
+ if (result) {
l->dppm_map_mode_params.min_clk_table = &dml->min_clk_table;
l->dppm_map_mode_params.display_cfg = &l->base_display_config_with_meta;
l->dppm_map_mode_params.programming = dpmm_programming;
@@ -268,9 +266,18 @@ bool dml2_build_mode_programming(struct dml2_build_mode_programming_in_out *in_o
vmin_success = dml2_top_optimization_perform_optimization_phase(&l->optimization_phase_locals, &l->vmin_phase);
- if (vmin_success) {
+ if (l->optimized_display_config_with_meta.stage4.performed) {
+ /*
+ * when performed is true, optimization has applied to
+ * optimized_display_config_with_meta and it has passed mode
+ * support. However it may or may not pass the test function to
+ * reach actual Vmin. As long as voltage is optimized even if it
+ * doesn't reach Vmin level, there is still power benefit so in
+ * this case we will still copy this optimization into base
+ * display config.
+ */
memcpy(&l->base_display_config_with_meta, &l->optimized_display_config_with_meta, sizeof(struct display_configuation_with_meta));
- l->base_display_config_with_meta.stage4.success = true;
+ l->base_display_config_with_meta.stage4.success = vmin_success;
}
/*
diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_top/dml_top_mcache.c b/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_top/dml_top_mcache.c
index 7afd417071a5..a342ebfbe4e7 100644
--- a/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_top/dml_top_mcache.c
+++ b/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_top/dml_top_mcache.c
@@ -2,7 +2,6 @@
//
// Copyright 2024 Advanced Micro Devices, Inc.
-
#include "dml2_debug.h"
#include "dml_top_mcache.h"
@@ -143,12 +142,12 @@ static unsigned int count_elements_in_span(int *array, unsigned int array_size,
while (span_start_index < array_size) {
for (i = span_start_index; i < array_size; i++) {
- if (array[i] - span_start_value > span) {
+ if (array[i] - span_start_value <= span) {
if (i - span_start_index + 1 > greatest_element_count) {
greatest_element_count = i - span_start_index + 1;
}
+ } else
break;
- }
}
span_start_index++;
@@ -208,9 +207,9 @@ bool dml2_top_mcache_validate_admissability(struct top_mcache_validate_admissabi
int temp, p0shift, p1shift;
unsigned int plane_index = 0;
unsigned int i;
- char odm_combine_factor = 1;
- char mpc_combine_factor = 1;
- char num_dpps;
+ unsigned int odm_combine_factor;
+ unsigned int mpc_combine_factor;
+ unsigned int num_dpps;
unsigned int num_boundaries;
enum dml2_scaling_transform scaling_transform;
const struct dml2_plane_parameters *plane;
@@ -227,10 +226,10 @@ bool dml2_top_mcache_validate_admissability(struct top_mcache_validate_admissabi
plane = &params->display_cfg->plane_descriptors[plane_index];
stream = &params->display_cfg->stream_descriptors[plane->stream_index];
- odm_combine_factor = (char)params->cfg_support_info->stream_support_info[plane->stream_index].odms_used;
+ num_dpps = odm_combine_factor = params->cfg_support_info->stream_support_info[plane->stream_index].odms_used;
if (odm_combine_factor == 1)
- mpc_combine_factor = (char)params->cfg_support_info->plane_support_info[plane_index].dpps_used;
+ num_dpps = mpc_combine_factor = (unsigned int)params->cfg_support_info->plane_support_info[plane_index].dpps_used;
else
mpc_combine_factor = 1;
@@ -260,13 +259,13 @@ bool dml2_top_mcache_validate_admissability(struct top_mcache_validate_admissabi
// The last element in the unshifted boundary array will always be the first pixel outside the
// plane, which means theres no mcache associated with it, so -1
num_boundaries = params->mcache_allocations[plane_index].num_mcaches_plane0 == 0 ? 0 : params->mcache_allocations[plane_index].num_mcaches_plane0 - 1;
- if (count_elements_in_span(params->mcache_allocations[plane_index].mcache_x_offsets_plane0,
- num_boundaries, max_per_pipe_vp_p0) <= 1) {
+ if ((count_elements_in_span(params->mcache_allocations[plane_index].mcache_x_offsets_plane0,
+ num_boundaries, max_per_pipe_vp_p0) <= 1) && (num_boundaries <= num_dpps)) {
p0pass = true;
}
num_boundaries = params->mcache_allocations[plane_index].num_mcaches_plane1 == 0 ? 0 : params->mcache_allocations[plane_index].num_mcaches_plane1 - 1;
- if (count_elements_in_span(params->mcache_allocations[plane_index].mcache_x_offsets_plane1,
- num_boundaries, max_per_pipe_vp_p1) <= 1) {
+ if ((count_elements_in_span(params->mcache_allocations[plane_index].mcache_x_offsets_plane1,
+ num_boundaries, max_per_pipe_vp_p1) <= 1) && (num_boundaries <= num_dpps)) {
p1pass = true;
}
diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_top/dml_top_mcache.h b/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_top/dml_top_mcache.h
index bb12e4c30690..7b1f6f7143d0 100644
--- a/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_top/dml_top_mcache.h
+++ b/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_top/dml_top_mcache.h
@@ -2,7 +2,6 @@
//
// Copyright 2024 Advanced Micro Devices, Inc.
-
#ifndef __DML_TOP_MCACHE_H__
#define __DML_TOP_MCACHE_H__
diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/inc/dml2_debug.c b/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/inc/dml2_debug.c
index de7d8a6a2d3d..e9b8e10695ae 100644
--- a/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/inc/dml2_debug.c
+++ b/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/inc/dml2_debug.c
@@ -2,7 +2,6 @@
//
// Copyright 2024 Advanced Micro Devices, Inc.
-
#include "dml2_debug.h"
int dml2_printf(const char *format, ...)
diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/inc/dml2_debug.h b/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/inc/dml2_debug.h
index 0403238df107..d51a1b6c62f2 100644
--- a/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/inc/dml2_debug.h
+++ b/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/inc/dml2_debug.h
@@ -2,7 +2,6 @@
//
// Copyright 2024 Advanced Micro Devices, Inc.
-
#ifndef __DML2_DEBUG_H__
#define __DML2_DEBUG_H__
diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/inc/dml2_internal_shared_types.h b/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/inc/dml2_internal_shared_types.h
index 5632cdacb7f4..aeac9f159fa5 100644
--- a/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/inc/dml2_internal_shared_types.h
+++ b/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/inc/dml2_internal_shared_types.h
@@ -2,7 +2,6 @@
//
// Copyright 2024 Advanced Micro Devices, Inc.
-
#ifndef __DML2_INTERNAL_SHARED_TYPES_H__
#define __DML2_INTERNAL_SHARED_TYPES_H__
@@ -107,10 +106,16 @@ struct dml2_dpmm_map_watermarks_params_in_out {
struct dml2_display_cfg_programming *programming;
};
+struct dml2_dpmm_scratch {
+ struct dml2_display_cfg_programming programming;
+};
+
struct dml2_dpmm_instance {
bool (*map_mode_to_soc_dpm)(struct dml2_dpmm_map_mode_to_soc_dpm_params_in_out *in_out);
bool (*map_watermarks)(struct dml2_dpmm_map_watermarks_params_in_out *in_out);
bool (*unit_test)(void);
+
+ struct dml2_dpmm_scratch dpmm_scratch;
};
/*
@@ -266,6 +271,7 @@ struct dml2_fams2_meta {
unsigned int contention_delay_otg_vlines;
unsigned int min_allow_width_otg_vlines;
unsigned int nom_vtotal;
+ unsigned int vblank_start;
double nom_refresh_rate_hz;
double nom_frame_time_us;
unsigned int max_vtotal;
@@ -594,7 +600,7 @@ struct dml2_pmo_optimize_for_stutter_in_out {
struct display_configuation_with_meta *optimized_display_config;
};
-enum dml2_pmo_pstate_strategy {
+enum dml2_pmo_pstate_method {
dml2_pmo_pstate_strategy_na = 0,
/* hw exclusive modes */
dml2_pmo_pstate_strategy_vactive = 1,
@@ -612,6 +618,11 @@ enum dml2_pmo_pstate_strategy {
dml2_pmo_pstate_strategy_reserved_fw_drr_var = 22,
};
+struct dml2_pmo_pstate_strategy {
+ enum dml2_pmo_pstate_method per_stream_pstate_method[DML2_MAX_PLANES];
+ bool allow_state_increase;
+};
+
#define PMO_NO_DRR_STRATEGY_MASK (((1 << (dml2_pmo_pstate_strategy_reserved_fw - dml2_pmo_pstate_strategy_na + 1)) - 1) << dml2_pmo_pstate_strategy_na)
#define PMO_DRR_STRATEGY_MASK (((1 << (dml2_pmo_pstate_strategy_reserved_fw_drr_var - dml2_pmo_pstate_strategy_fw_vactive_drr + 1)) - 1) << dml2_pmo_pstate_strategy_fw_vactive_drr)
#define PMO_DRR_CLAMPED_STRATEGY_MASK (((1 << (dml2_pmo_pstate_strategy_reserved_fw_drr_clamped - dml2_pmo_pstate_strategy_fw_vactive_drr + 1)) - 1) << dml2_pmo_pstate_strategy_fw_vactive_drr)
@@ -634,8 +645,7 @@ struct dml2_pmo_scratch {
int stream_mask;
} pmo_dcn3;
struct {
- enum dml2_pmo_pstate_strategy per_stream_pstate_strategy[DML2_MAX_PLANES][DML2_PMO_PSTATE_CANDIDATE_LIST_SIZE];
- bool allow_state_increase_for_strategy[DML2_PMO_PSTATE_CANDIDATE_LIST_SIZE];
+ struct dml2_pmo_pstate_strategy pstate_strategy_candidates[DML2_PMO_PSTATE_CANDIDATE_LIST_SIZE];
int num_pstate_candidates;
int cur_pstate_candidate;
@@ -661,6 +671,7 @@ struct dml2_pmo_scratch {
unsigned int num_timing_groups;
unsigned int synchronized_timing_group_masks[DML2_MAX_PLANES];
bool group_is_drr_enabled[DML2_MAX_PLANES];
+ bool group_is_drr_active[DML2_MAX_PLANES];
double group_line_time_us[DML2_MAX_PLANES];
/* scheduling check locals */
@@ -676,10 +687,10 @@ struct dml2_pmo_init_data {
union {
struct {
/* populated once during initialization */
- enum dml2_pmo_pstate_strategy expanded_strategy_list_1_display[PMO_DCN4_MAX_BASE_STRATEGIES * 2][PMO_DCN4_MAX_DISPLAYS];
- enum dml2_pmo_pstate_strategy expanded_strategy_list_2_display[PMO_DCN4_MAX_BASE_STRATEGIES * 2 * 2][PMO_DCN4_MAX_DISPLAYS];
- enum dml2_pmo_pstate_strategy expanded_strategy_list_3_display[PMO_DCN4_MAX_BASE_STRATEGIES * 6 * 2][PMO_DCN4_MAX_DISPLAYS];
- enum dml2_pmo_pstate_strategy expanded_strategy_list_4_display[PMO_DCN4_MAX_BASE_STRATEGIES * 24 * 2][PMO_DCN4_MAX_DISPLAYS];
+ struct dml2_pmo_pstate_strategy expanded_strategy_list_1_display[PMO_DCN4_MAX_BASE_STRATEGIES * 2];
+ struct dml2_pmo_pstate_strategy expanded_strategy_list_2_display[PMO_DCN4_MAX_BASE_STRATEGIES * 4 * 4];
+ struct dml2_pmo_pstate_strategy expanded_strategy_list_3_display[PMO_DCN4_MAX_BASE_STRATEGIES * 6 * 6 * 6];
+ struct dml2_pmo_pstate_strategy expanded_strategy_list_4_display[PMO_DCN4_MAX_BASE_STRATEGIES * 8 * 8 * 8 * 8];
unsigned int num_expanded_strategies_per_list[PMO_DCN4_MAX_DISPLAYS];
} pmo_dcn4;
};
diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml2_internal_types.h b/drivers/gpu/drm/amd/display/dc/dml2/dml2_internal_types.h
index b566f53608c6..140ec01545db 100644
--- a/drivers/gpu/drm/amd/display/dc/dml2/dml2_internal_types.h
+++ b/drivers/gpu/drm/amd/display/dc/dml2/dml2_internal_types.h
@@ -101,6 +101,7 @@ struct dml2_wrapper_scratch {
struct dml2_dml_to_dc_pipe_mapping dml_to_dc_pipe_mapping;
bool enable_flexible_pipe_mapping;
bool plane_duplicate_exists;
+ int hpo_stream_to_link_encoder_mapping[MAX_HPO_DP2_ENCODERS];
};
struct dml2_helper_det_policy_scratch {
diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml2_translation_helper.c b/drivers/gpu/drm/amd/display/dc/dml2/dml2_translation_helper.c
index 8b9dcee77266..bde4250853b1 100644
--- a/drivers/gpu/drm/amd/display/dc/dml2/dml2_translation_helper.c
+++ b/drivers/gpu/drm/amd/display/dc/dml2/dml2_translation_helper.c
@@ -733,7 +733,7 @@ static void populate_dml_timing_cfg_from_stream_state(struct dml_timing_cfg_st *
}
static void populate_dml_output_cfg_from_stream_state(struct dml_output_cfg_st *out, unsigned int location,
- const struct dc_stream_state *in, const struct pipe_ctx *pipe)
+ const struct dc_stream_state *in, const struct pipe_ctx *pipe, struct dml2_context *dml2)
{
unsigned int output_bpc;
@@ -746,8 +746,8 @@ static void populate_dml_output_cfg_from_stream_state(struct dml_output_cfg_st *
case SIGNAL_TYPE_DISPLAY_PORT_MST:
case SIGNAL_TYPE_DISPLAY_PORT:
out->OutputEncoder[location] = dml_dp;
- if (is_dp2p0_output_encoder(pipe))
- out->OutputEncoder[location] = dml_dp2p0;
+ if (dml2->v20.scratch.hpo_stream_to_link_encoder_mapping[location] != -1)
+ out->OutputEncoder[dml2->v20.scratch.hpo_stream_to_link_encoder_mapping[location]] = dml_dp2p0;
break;
case SIGNAL_TYPE_EDP:
out->OutputEncoder[location] = dml_edp;
@@ -953,7 +953,9 @@ static void get_scaler_data_for_plane(const struct dc_plane_state *in, struct dc
memcpy(out, &temp_pipe->plane_res.scl_data, sizeof(*out));
}
-static void populate_dummy_dml_plane_cfg(struct dml_plane_cfg_st *out, unsigned int location, const struct dc_stream_state *in)
+static void populate_dummy_dml_plane_cfg(struct dml_plane_cfg_st *out, unsigned int location,
+ const struct dc_stream_state *in,
+ const struct soc_bounding_box_st *soc)
{
dml_uint_t width, height;
@@ -970,7 +972,7 @@ static void populate_dummy_dml_plane_cfg(struct dml_plane_cfg_st *out, unsigned
out->CursorBPP[location] = dml_cur_32bit;
out->CursorWidth[location] = 256;
- out->GPUVMMinPageSizeKBytes[location] = 256;
+ out->GPUVMMinPageSizeKBytes[location] = soc->gpuvm_min_page_size_kbytes;
out->ViewportWidth[location] = width;
out->ViewportHeight[location] = height;
@@ -1007,7 +1009,9 @@ static void populate_dummy_dml_plane_cfg(struct dml_plane_cfg_st *out, unsigned
out->ScalerEnabled[location] = false;
}
-static void populate_dml_plane_cfg_from_plane_state(struct dml_plane_cfg_st *out, unsigned int location, const struct dc_plane_state *in, struct dc_state *context)
+static void populate_dml_plane_cfg_from_plane_state(struct dml_plane_cfg_st *out, unsigned int location,
+ const struct dc_plane_state *in, struct dc_state *context,
+ const struct soc_bounding_box_st *soc)
{
struct scaler_data *scaler_data = kzalloc(sizeof(*scaler_data), GFP_KERNEL);
if (!scaler_data)
@@ -1018,7 +1022,7 @@ static void populate_dml_plane_cfg_from_plane_state(struct dml_plane_cfg_st *out
out->CursorBPP[location] = dml_cur_32bit;
out->CursorWidth[location] = 256;
- out->GPUVMMinPageSizeKBytes[location] = 256;
+ out->GPUVMMinPageSizeKBytes[location] = soc->gpuvm_min_page_size_kbytes;
out->ViewportWidth[location] = scaler_data->viewport.width;
out->ViewportHeight[location] = scaler_data->viewport.height;
@@ -1193,6 +1197,7 @@ static void dml2_populate_pipe_to_plane_index_mapping(struct dml2_context *dml2,
plane_index = 0;
}
}
+
static void populate_dml_writeback_cfg_from_stream_state(struct dml_writeback_cfg_st *out,
unsigned int location, const struct dc_stream_state *in)
{
@@ -1233,6 +1238,30 @@ static void populate_dml_writeback_cfg_from_stream_state(struct dml_writeback_cf
}
}
}
+
+static void dml2_map_hpo_stream_encoder_to_hpo_link_encoder_index(struct dml2_context *dml2, struct dc_state *context)
+{
+ int i;
+ struct pipe_ctx *current_pipe_context;
+
+ /* Scratch gets reset to zero in dml, but link encoder instance can be zero, so reset to -1 */
+ for (i = 0; i < MAX_HPO_DP2_ENCODERS; i++) {
+ dml2->v20.scratch.hpo_stream_to_link_encoder_mapping[i] = -1;
+ }
+
+ /* If an HPO stream encoder is allocated to a pipe, get the instance of it's allocated HPO Link encoder */
+ for (i = 0; i < MAX_PIPES; i++) {
+ current_pipe_context = &context->res_ctx.pipe_ctx[i];
+ if (current_pipe_context->stream &&
+ current_pipe_context->stream_res.hpo_dp_stream_enc &&
+ current_pipe_context->link_res.hpo_dp_link_enc &&
+ dc_is_dp_signal(current_pipe_context->stream->signal)) {
+ dml2->v20.scratch.hpo_stream_to_link_encoder_mapping[current_pipe_context->stream_res.hpo_dp_stream_enc->inst] =
+ current_pipe_context->link_res.hpo_dp_link_enc->inst;
+ }
+ }
+}
+
void map_dc_state_into_dml_display_cfg(struct dml2_context *dml2, struct dc_state *context, struct dml_display_cfg_st *dml_dispcfg)
{
int i = 0, j = 0, k = 0;
@@ -1256,6 +1285,7 @@ void map_dc_state_into_dml_display_cfg(struct dml2_context *dml2, struct dc_stat
dml2->v20.dml_core_ctx.policy.AllowForPStateChangeOrStutterInVBlankFinal = dml_prefetch_support_uclk_fclk_and_stutter;
dml2_populate_pipe_to_plane_index_mapping(dml2, context);
+ dml2_map_hpo_stream_encoder_to_hpo_link_encoder_index(dml2, context);
for (i = 0; i < context->stream_count; i++) {
current_pipe_context = NULL;
@@ -1276,7 +1306,7 @@ void map_dc_state_into_dml_display_cfg(struct dml2_context *dml2, struct dc_stat
ASSERT(disp_cfg_stream_location >= 0 && disp_cfg_stream_location <= __DML2_WRAPPER_MAX_STREAMS_PLANES__);
populate_dml_timing_cfg_from_stream_state(&dml_dispcfg->timing, disp_cfg_stream_location, context->streams[i]);
- populate_dml_output_cfg_from_stream_state(&dml_dispcfg->output, disp_cfg_stream_location, context->streams[i], current_pipe_context);
+ populate_dml_output_cfg_from_stream_state(&dml_dispcfg->output, disp_cfg_stream_location, context->streams[i], current_pipe_context, dml2);
/*Call site for populate_dml_writeback_cfg_from_stream_state*/
populate_dml_writeback_cfg_from_stream_state(&dml_dispcfg->writeback,
disp_cfg_stream_location, context->streams[i]);
@@ -1299,7 +1329,8 @@ void map_dc_state_into_dml_display_cfg(struct dml2_context *dml2, struct dc_stat
disp_cfg_plane_location = dml_dispcfg->num_surfaces++;
populate_dummy_dml_surface_cfg(&dml_dispcfg->surface, disp_cfg_plane_location, context->streams[i]);
- populate_dummy_dml_plane_cfg(&dml_dispcfg->plane, disp_cfg_plane_location, context->streams[i]);
+ populate_dummy_dml_plane_cfg(&dml_dispcfg->plane, disp_cfg_plane_location,
+ context->streams[i], &dml2->v20.dml_core_ctx.soc);
dml_dispcfg->plane.BlendingAndTiming[disp_cfg_plane_location] = disp_cfg_stream_location;
@@ -1315,7 +1346,10 @@ void map_dc_state_into_dml_display_cfg(struct dml2_context *dml2, struct dc_stat
ASSERT(disp_cfg_plane_location >= 0 && disp_cfg_plane_location <= __DML2_WRAPPER_MAX_STREAMS_PLANES__);
populate_dml_surface_cfg_from_plane_state(dml2->v20.dml_core_ctx.project, &dml_dispcfg->surface, disp_cfg_plane_location, context->stream_status[i].plane_states[j]);
- populate_dml_plane_cfg_from_plane_state(&dml_dispcfg->plane, disp_cfg_plane_location, context->stream_status[i].plane_states[j], context);
+ populate_dml_plane_cfg_from_plane_state(
+ &dml_dispcfg->plane, disp_cfg_plane_location,
+ context->stream_status[i].plane_states[j], context,
+ &dml2->v20.dml_core_ctx.soc);
if (stream_mall_type == SUBVP_MAIN) {
dml_dispcfg->plane.UseMALLForPStateChange[disp_cfg_plane_location] = dml_use_mall_pstate_change_sub_viewport;
@@ -1337,7 +1371,7 @@ void map_dc_state_into_dml_display_cfg(struct dml2_context *dml2, struct dc_stat
if (j >= 1) {
populate_dml_timing_cfg_from_stream_state(&dml_dispcfg->timing, disp_cfg_plane_location, context->streams[i]);
- populate_dml_output_cfg_from_stream_state(&dml_dispcfg->output, disp_cfg_plane_location, context->streams[i], current_pipe_context);
+ populate_dml_output_cfg_from_stream_state(&dml_dispcfg->output, disp_cfg_plane_location, context->streams[i], current_pipe_context, dml2);
switch (context->streams[i]->debug.force_odm_combine_segments) {
case 2:
dml2->v20.dml_core_ctx.policy.ODMUse[disp_cfg_plane_location] = dml_odm_use_policy_combine_2to1;
diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml2_utils.c b/drivers/gpu/drm/amd/display/dc/dml2/dml2_utils.c
index 92238ff333a4..9a33158b63bf 100644
--- a/drivers/gpu/drm/amd/display/dc/dml2/dml2_utils.c
+++ b/drivers/gpu/drm/amd/display/dc/dml2/dml2_utils.c
@@ -161,14 +161,6 @@ bool is_dp2p0_output_encoder(const struct pipe_ctx *pipe_ctx)
/* If this assert is hit then we have a link encoder dynamic management issue */
ASSERT(pipe_ctx->stream_res.hpo_dp_stream_enc ? pipe_ctx->link_res.hpo_dp_link_enc != NULL : true);
- /* Count MST hubs once by treating only 1st remote sink in topology as an encoder */
- if (pipe_ctx->stream->link && pipe_ctx->stream->link->remote_sinks[0]) {
- return (pipe_ctx->stream_res.hpo_dp_stream_enc &&
- pipe_ctx->link_res.hpo_dp_link_enc &&
- dc_is_dp_signal(pipe_ctx->stream->signal) &&
- (pipe_ctx->stream->link->remote_sinks[0]->sink_id == pipe_ctx->stream->sink->sink_id));
- }
-
return (pipe_ctx->stream_res.hpo_dp_stream_enc &&
pipe_ctx->link_res.hpo_dp_link_enc &&
dc_is_dp_signal(pipe_ctx->stream->signal));
@@ -421,7 +413,7 @@ unsigned int dml2_calc_max_scaled_time(
void dml2_extract_writeback_wm(struct dc_state *context, struct display_mode_lib_st *dml_core_ctx)
{
- int i, j = 0;;
+ int i, j = 0;
struct mcif_arb_params *wb_arb_params = NULL;
struct dcn_bw_writeback *bw_writeback = NULL;
enum mmhubbub_wbif_mode wbif_mode = PACKED_444_FP16; /*for now*/
diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml2_wrapper.c b/drivers/gpu/drm/amd/display/dc/dml2/dml2_wrapper.c
index d5dcc8b77281..866b0abcff1b 100644
--- a/drivers/gpu/drm/amd/display/dc/dml2/dml2_wrapper.c
+++ b/drivers/gpu/drm/amd/display/dc/dml2/dml2_wrapper.c
@@ -575,7 +575,7 @@ static bool dml2_validate_and_build_resource(const struct dc *in_dc, struct dc_s
unsigned int lowest_state_idx = 0;
out_clks.p_state_supported = true;
- out_clks.dispclk_khz = (unsigned int)dml2->v20.dml_core_ctx.states.state_array[lowest_state_idx].dispclk_mhz * 1000;
+ out_clks.dispclk_khz = 0; /* No requirement, and lowest index will generally be maximum dispclk. */
out_clks.dcfclk_khz = (unsigned int)dml2->v20.dml_core_ctx.states.state_array[lowest_state_idx].dcfclk_mhz * 1000;
out_clks.fclk_khz = (unsigned int)dml2->v20.dml_core_ctx.states.state_array[lowest_state_idx].fabricclk_mhz * 1000;
out_clks.uclk_mts = (unsigned int)dml2->v20.dml_core_ctx.states.state_array[lowest_state_idx].dram_speed_mts;
diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml2_wrapper.h b/drivers/gpu/drm/amd/display/dc/dml2/dml2_wrapper.h
index 023325e8f6e2..0f944fcfd5a5 100644
--- a/drivers/gpu/drm/amd/display/dc/dml2/dml2_wrapper.h
+++ b/drivers/gpu/drm/amd/display/dc/dml2/dml2_wrapper.h
@@ -236,6 +236,7 @@ struct dml2_configuration_options {
bool use_clock_dc_limits;
bool gpuvm_enable;
+ bool force_tdlut_enable;
struct dml2_soc_bb *bb_from_dmub;
};
diff --git a/drivers/gpu/drm/amd/display/dc/dpp/dcn10/dcn10_dpp_cm.c b/drivers/gpu/drm/amd/display/dc/dpp/dcn10/dcn10_dpp_cm.c
index f2a2d53e9689..f8f6019d8304 100644
--- a/drivers/gpu/drm/amd/display/dc/dpp/dcn10/dcn10_dpp_cm.c
+++ b/drivers/gpu/drm/amd/display/dc/dpp/dcn10/dcn10_dpp_cm.c
@@ -684,9 +684,6 @@ void dpp1_set_degamma(
BREAK_TO_DEBUGGER();
break;
}
-
- REG_SEQ_SUBMIT();
- REG_SEQ_WAIT_DONE();
}
void dpp1_degamma_ram_select(
diff --git a/drivers/gpu/drm/amd/display/dc/dpp/dcn35/dcn35_dpp.c b/drivers/gpu/drm/amd/display/dc/dpp/dcn35/dcn35_dpp.c
index e16274fee31d..8473c694bfdc 100644
--- a/drivers/gpu/drm/amd/display/dc/dpp/dcn35/dcn35_dpp.c
+++ b/drivers/gpu/drm/amd/display/dc/dpp/dcn35/dcn35_dpp.c
@@ -59,6 +59,31 @@ void dpp35_dppclk_control(
DISPCLK_R_GATE_DISABLE, 0);
}
+void dpp35_program_bias_and_scale_fcnv(
+ struct dpp *dpp_base,
+ struct dc_bias_and_scale *params)
+{
+ struct dcn20_dpp *dpp = TO_DCN20_DPP(dpp_base);
+
+ if (!params->bias_and_scale_valid) {
+ REG_SET(FCNV_FP_BIAS_R, 0, FCNV_FP_BIAS_R, 0);
+ REG_SET(FCNV_FP_BIAS_G, 0, FCNV_FP_BIAS_G, 0);
+ REG_SET(FCNV_FP_BIAS_B, 0, FCNV_FP_BIAS_B, 0);
+
+ REG_SET(FCNV_FP_SCALE_R, 0, FCNV_FP_SCALE_R, 0x1F000);
+ REG_SET(FCNV_FP_SCALE_G, 0, FCNV_FP_SCALE_G, 0x1F000);
+ REG_SET(FCNV_FP_SCALE_B, 0, FCNV_FP_SCALE_B, 0x1F000);
+ } else {
+ REG_SET(FCNV_FP_BIAS_R, 0, FCNV_FP_BIAS_R, params->bias_red);
+ REG_SET(FCNV_FP_BIAS_G, 0, FCNV_FP_BIAS_G, params->bias_green);
+ REG_SET(FCNV_FP_BIAS_B, 0, FCNV_FP_BIAS_B, params->bias_blue);
+
+ REG_SET(FCNV_FP_SCALE_R, 0, FCNV_FP_SCALE_R, params->scale_red);
+ REG_SET(FCNV_FP_SCALE_G, 0, FCNV_FP_SCALE_G, params->scale_green);
+ REG_SET(FCNV_FP_SCALE_B, 0, FCNV_FP_SCALE_B, params->scale_blue);
+ }
+}
+
static struct dpp_funcs dcn35_dpp_funcs = {
.dpp_program_gamcor_lut = dpp3_program_gamcor_lut,
.dpp_read_state = dpp30_read_state,
@@ -81,7 +106,7 @@ static struct dpp_funcs dcn35_dpp_funcs = {
.dpp_program_shaper_lut = NULL, // CM SHAPER block is removed in DCN3.2 DPP, (it is in MPCC, programmable before or after BLND)
.dpp_program_3dlut = NULL, // CM 3DLUT block is removed in DCN3.2 DPP, (it is in MPCC, programmable before or after BLND)
- .dpp_program_bias_and_scale = NULL,
+ .dpp_program_bias_and_scale = dpp35_program_bias_and_scale_fcnv,
.dpp_cnv_set_alpha_keyer = dpp2_cnv_set_alpha_keyer,
.set_cursor_attributes = dpp3_set_cursor_attributes,
.set_cursor_position = dpp1_set_cursor_position,
diff --git a/drivers/gpu/drm/amd/display/dc/dpp/dcn35/dcn35_dpp.h b/drivers/gpu/drm/amd/display/dc/dpp/dcn35/dcn35_dpp.h
index 135872d88219..3ca339a16e5b 100644
--- a/drivers/gpu/drm/amd/display/dc/dpp/dcn35/dcn35_dpp.h
+++ b/drivers/gpu/drm/amd/display/dc/dpp/dcn35/dcn35_dpp.h
@@ -61,4 +61,7 @@ bool dpp35_construct(struct dcn3_dpp *dpp3, struct dc_context *ctx,
void dpp35_set_fgcg(struct dcn3_dpp *dpp, bool enable);
+void dpp35_program_bias_and_scale_fcnv(struct dpp *dpp_base,
+ struct dc_bias_and_scale *bias_and_scale);
+
#endif // __DCN35_DPP_H
diff --git a/drivers/gpu/drm/amd/display/dc/dpp/dcn401/dcn401_dpp.c b/drivers/gpu/drm/amd/display/dc/dpp/dcn401/dcn401_dpp.c
index 7cae18fd7be9..97bf26fa3573 100644
--- a/drivers/gpu/drm/amd/display/dc/dpp/dcn401/dcn401_dpp.c
+++ b/drivers/gpu/drm/amd/display/dc/dpp/dcn401/dcn401_dpp.c
@@ -30,6 +30,7 @@
#include "basics/conversion.h"
#include "dcn30/dcn30_cm_common.h"
#include "dcn32/dcn32_dpp.h"
+#include "dcn35/dcn35_dpp.h"
#define REG(reg)\
dpp->tf_regs->reg
@@ -240,7 +241,7 @@ static struct dpp_funcs dcn401_dpp_funcs = {
.dpp_program_shaper_lut = NULL, // CM SHAPER block is removed in DCN3.2 DPP, (it is in MPCC, programmable before or after BLND)
.dpp_program_3dlut = NULL, // CM 3DLUT block is removed in DCN3.2 DPP, (it is in MPCC, programmable before or after BLND)
- .dpp_program_bias_and_scale = NULL,
+ .dpp_program_bias_and_scale = dpp35_program_bias_and_scale_fcnv,
.dpp_cnv_set_alpha_keyer = dpp2_cnv_set_alpha_keyer,
.set_cursor_attributes = dpp401_set_cursor_attributes,
.set_cursor_position = dpp401_set_cursor_position,
diff --git a/drivers/gpu/drm/amd/display/dc/dpp/dcn401/dcn401_dpp_cm.c b/drivers/gpu/drm/amd/display/dc/dpp/dcn401/dcn401_dpp_cm.c
index d0f8c9ff5232..3b6ca7974e18 100644
--- a/drivers/gpu/drm/amd/display/dc/dpp/dcn401/dcn401_dpp_cm.c
+++ b/drivers/gpu/drm/amd/display/dc/dpp/dcn401/dcn401_dpp_cm.c
@@ -120,11 +120,10 @@ void dpp401_set_cursor_attributes(
enum dc_cursor_color_format color_format = cursor_attributes->color_format;
int cur_rom_en = 0;
+ // DCN4 should always do Cursor degamma for Cursor Color modes
if (color_format == CURSOR_MODE_COLOR_PRE_MULTIPLIED_ALPHA ||
color_format == CURSOR_MODE_COLOR_UN_PRE_MULTIPLIED_ALPHA) {
- if (cursor_attributes->attribute_flags.bits.ENABLE_CURSOR_DEGAMMA) {
- cur_rom_en = 1;
- }
+ cur_rom_en = 1;
}
REG_UPDATE_3(CURSOR0_CONTROL,
@@ -246,16 +245,6 @@ void dpp401_set_cursor_matrix(
enum dc_color_space color_space,
struct dc_csc_transform cursor_csc_color_matrix)
{
- struct dpp_input_csc_matrix cursor_tbl_entry;
- unsigned int i;
-
- if (cursor_csc_color_matrix.enable_adjustment == true) {
- for (i = 0; i < 12; i++)
- cursor_tbl_entry.regval[i] = cursor_csc_color_matrix.matrix[i];
-
- cursor_tbl_entry.color_space = color_space;
- dpp401_program_cursor_csc(dpp_base, color_space, &cursor_tbl_entry);
- } else {
- dpp401_program_cursor_csc(dpp_base, color_space, NULL);
- }
+ //Since we don't have cursor matrix information, force bypass mode by passing in unknown color space
+ dpp401_program_cursor_csc(dpp_base, COLOR_SPACE_UNKNOWN, NULL);
}
diff --git a/drivers/gpu/drm/amd/display/dc/dpp/dcn401/dcn401_dpp_dscl.c b/drivers/gpu/drm/amd/display/dc/dpp/dcn401/dcn401_dpp_dscl.c
index 505929800426..5105fd580017 100644
--- a/drivers/gpu/drm/amd/display/dc/dpp/dcn401/dcn401_dpp_dscl.c
+++ b/drivers/gpu/drm/amd/display/dc/dpp/dcn401/dcn401_dpp_dscl.c
@@ -280,7 +280,8 @@ static void dpp401_dscl_set_scaler_filter(
static void dpp401_dscl_set_scl_filter(
struct dcn401_dpp *dpp,
const struct scaler_data *scl_data,
- bool chroma_coef_mode)
+ bool chroma_coef_mode,
+ bool force_coeffs_update)
{
bool h_2tap_hardcode_coef_en = false;
bool v_2tap_hardcode_coef_en = false;
@@ -343,7 +344,7 @@ static void dpp401_dscl_set_scl_filter(
|| (filter_v_c && (filter_v_c != dpp->filter_v_c));
}
- if (filter_updated) {
+ if ((filter_updated) || (force_coeffs_update)) {
uint32_t scl_mode = REG_READ(SCL_MODE);
if (!h_2tap_hardcode_coef_en && filter_h) {
@@ -656,274 +657,252 @@ static void dpp401_dscl_set_recout(struct dcn401_dpp *dpp,
RECOUT_HEIGHT, recout->height);
}
/**
- * dpp401_dscl_program_easf - Program EASF
+ * dpp401_dscl_program_easf_v - Program EASF_V
*
* @dpp_base: High level DPP struct
* @scl_data: scalaer_data info
*
- * This is the primary function to program EASF
+ * This is the primary function to program vertical EASF registers
*
*/
-static void dpp401_dscl_program_easf(struct dpp *dpp_base, const struct scaler_data *scl_data)
+static void dpp401_dscl_program_easf_v(struct dpp *dpp_base, const struct scaler_data *scl_data)
{
struct dcn401_dpp *dpp = TO_DCN401_DPP(dpp_base);
PERF_TRACE();
- REG_UPDATE(DSCL_SC_MODE,
- SCL_SC_MATRIX_MODE, scl_data->dscl_prog_data.easf_matrix_mode);
- REG_UPDATE(DSCL_SC_MODE,
- SCL_SC_LTONL_EN, scl_data->dscl_prog_data.easf_ltonl_en);
/* DSCL_EASF_V_MODE */
- REG_UPDATE(DSCL_EASF_V_MODE,
- SCL_EASF_V_EN, scl_data->dscl_prog_data.easf_v_en);
- REG_UPDATE(DSCL_EASF_V_MODE,
- SCL_EASF_V_2TAP_SHARP_FACTOR, scl_data->dscl_prog_data.easf_v_sharp_factor);
- REG_UPDATE(DSCL_EASF_V_MODE,
+ REG_SET_3(DSCL_EASF_V_MODE, 0,
+ SCL_EASF_V_EN, scl_data->dscl_prog_data.easf_v_en,
+ SCL_EASF_V_2TAP_SHARP_FACTOR, scl_data->dscl_prog_data.easf_v_sharp_factor,
SCL_EASF_V_RINGEST_FORCE_EN, scl_data->dscl_prog_data.easf_v_ring);
- REG_UPDATE(DSCL_EASF_V_BF_CNTL,
- SCL_EASF_V_BF1_EN, scl_data->dscl_prog_data.easf_v_bf1_en);
- REG_UPDATE(DSCL_EASF_V_BF_CNTL,
- SCL_EASF_V_BF2_MODE, scl_data->dscl_prog_data.easf_v_bf2_mode);
- REG_UPDATE(DSCL_EASF_V_BF_CNTL,
- SCL_EASF_V_BF3_MODE, scl_data->dscl_prog_data.easf_v_bf3_mode);
- REG_UPDATE(DSCL_EASF_V_BF_CNTL,
- SCL_EASF_V_BF2_FLAT1_GAIN, scl_data->dscl_prog_data.easf_v_bf2_flat1_gain);
- REG_UPDATE(DSCL_EASF_V_BF_CNTL,
- SCL_EASF_V_BF2_FLAT2_GAIN, scl_data->dscl_prog_data.easf_v_bf2_flat2_gain);
- REG_UPDATE(DSCL_EASF_V_BF_CNTL,
+
+ if (!scl_data->dscl_prog_data.easf_v_en) {
+ PERF_TRACE();
+ return;
+ }
+
+ /* DSCL_EASF_V_BF_CNTL */
+ REG_SET_6(DSCL_EASF_V_BF_CNTL, 0,
+ SCL_EASF_V_BF1_EN, scl_data->dscl_prog_data.easf_v_bf1_en,
+ SCL_EASF_V_BF2_MODE, scl_data->dscl_prog_data.easf_v_bf2_mode,
+ SCL_EASF_V_BF3_MODE, scl_data->dscl_prog_data.easf_v_bf3_mode,
+ SCL_EASF_V_BF2_FLAT1_GAIN, scl_data->dscl_prog_data.easf_v_bf2_flat1_gain,
+ SCL_EASF_V_BF2_FLAT2_GAIN, scl_data->dscl_prog_data.easf_v_bf2_flat2_gain,
SCL_EASF_V_BF2_ROC_GAIN, scl_data->dscl_prog_data.easf_v_bf2_roc_gain);
- REG_UPDATE(DSCL_EASF_V_RINGEST_3TAP_CNTL1,
- SCL_EASF_V_RINGEST_3TAP_DNTILT_UPTILT, scl_data->dscl_prog_data.easf_v_ringest_3tap_dntilt_uptilt);
- REG_UPDATE(DSCL_EASF_V_RINGEST_3TAP_CNTL1,
+ /* DSCL_EASF_V_RINGEST_3TAP_CNTLn */
+ REG_SET_2(DSCL_EASF_V_RINGEST_3TAP_CNTL1, 0,
+ SCL_EASF_V_RINGEST_3TAP_DNTILT_UPTILT, scl_data->dscl_prog_data.easf_v_ringest_3tap_dntilt_uptilt,
SCL_EASF_V_RINGEST_3TAP_UPTILT_MAXVAL, scl_data->dscl_prog_data.easf_v_ringest_3tap_uptilt_max);
- REG_UPDATE(DSCL_EASF_V_RINGEST_3TAP_CNTL2,
- SCL_EASF_V_RINGEST_3TAP_DNTILT_SLOPE, scl_data->dscl_prog_data.easf_v_ringest_3tap_dntilt_slope);
- REG_UPDATE(DSCL_EASF_V_RINGEST_3TAP_CNTL2,
+ REG_SET_2(DSCL_EASF_V_RINGEST_3TAP_CNTL2, 0,
+ SCL_EASF_V_RINGEST_3TAP_DNTILT_SLOPE, scl_data->dscl_prog_data.easf_v_ringest_3tap_dntilt_slope,
SCL_EASF_V_RINGEST_3TAP_UPTILT1_SLOPE, scl_data->dscl_prog_data.easf_v_ringest_3tap_uptilt1_slope);
- REG_UPDATE(DSCL_EASF_V_RINGEST_3TAP_CNTL3,
- SCL_EASF_V_RINGEST_3TAP_UPTILT2_SLOPE, scl_data->dscl_prog_data.easf_v_ringest_3tap_uptilt2_slope);
- REG_UPDATE(DSCL_EASF_V_RINGEST_3TAP_CNTL3,
+ REG_SET_2(DSCL_EASF_V_RINGEST_3TAP_CNTL3, 0,
+ SCL_EASF_V_RINGEST_3TAP_UPTILT2_SLOPE, scl_data->dscl_prog_data.easf_v_ringest_3tap_uptilt2_slope,
SCL_EASF_V_RINGEST_3TAP_UPTILT2_OFFSET, scl_data->dscl_prog_data.easf_v_ringest_3tap_uptilt2_offset);
- REG_UPDATE(DSCL_EASF_V_RINGEST_EVENTAP_REDUCE,
- SCL_EASF_V_RINGEST_EVENTAP_REDUCEG1, scl_data->dscl_prog_data.easf_v_ringest_eventap_reduceg1);
- REG_UPDATE(DSCL_EASF_V_RINGEST_EVENTAP_REDUCE,
+ /* DSCL_EASF_V_RINGEST_EVENTAP_REDUCE */
+ REG_SET_2(DSCL_EASF_V_RINGEST_EVENTAP_REDUCE, 0,
+ SCL_EASF_V_RINGEST_EVENTAP_REDUCEG1, scl_data->dscl_prog_data.easf_v_ringest_eventap_reduceg1,
SCL_EASF_V_RINGEST_EVENTAP_REDUCEG2, scl_data->dscl_prog_data.easf_v_ringest_eventap_reduceg2);
- REG_UPDATE(DSCL_EASF_V_RINGEST_EVENTAP_GAIN,
- SCL_EASF_V_RINGEST_EVENTAP_GAIN1, scl_data->dscl_prog_data.easf_v_ringest_eventap_gain1);
- REG_UPDATE(DSCL_EASF_V_RINGEST_EVENTAP_GAIN,
+ /* DSCL_EASF_V_RINGEST_EVENTAP_GAIN */
+ REG_SET_2(DSCL_EASF_V_RINGEST_EVENTAP_GAIN, 0,
+ SCL_EASF_V_RINGEST_EVENTAP_GAIN1, scl_data->dscl_prog_data.easf_v_ringest_eventap_gain1,
SCL_EASF_V_RINGEST_EVENTAP_GAIN2, scl_data->dscl_prog_data.easf_v_ringest_eventap_gain2);
- REG_UPDATE(DSCL_EASF_V_BF_FINAL_MAX_MIN,
- SCL_EASF_V_BF_MAXA, scl_data->dscl_prog_data.easf_v_bf_maxa);
- REG_UPDATE(DSCL_EASF_V_BF_FINAL_MAX_MIN,
- SCL_EASF_V_BF_MAXB, scl_data->dscl_prog_data.easf_v_bf_maxb);
- REG_UPDATE(DSCL_EASF_V_BF_FINAL_MAX_MIN,
- SCL_EASF_V_BF_MINA, scl_data->dscl_prog_data.easf_v_bf_mina);
- REG_UPDATE(DSCL_EASF_V_BF_FINAL_MAX_MIN,
+ /* DSCL_EASF_V_BF_FINAL_MAX_MIN */
+ REG_SET_4(DSCL_EASF_V_BF_FINAL_MAX_MIN, 0,
+ SCL_EASF_V_BF_MAXA, scl_data->dscl_prog_data.easf_v_bf_maxa,
+ SCL_EASF_V_BF_MAXB, scl_data->dscl_prog_data.easf_v_bf_maxb,
+ SCL_EASF_V_BF_MINA, scl_data->dscl_prog_data.easf_v_bf_mina,
SCL_EASF_V_BF_MINB, scl_data->dscl_prog_data.easf_v_bf_minb);
- REG_UPDATE(DSCL_EASF_V_BF1_PWL_SEG0,
- SCL_EASF_V_BF1_PWL_IN_SEG0, scl_data->dscl_prog_data.easf_v_bf1_pwl_in_seg0);
- REG_UPDATE(DSCL_EASF_V_BF1_PWL_SEG0,
- SCL_EASF_V_BF1_PWL_BASE_SEG0, scl_data->dscl_prog_data.easf_v_bf1_pwl_base_seg0);
- REG_UPDATE(DSCL_EASF_V_BF1_PWL_SEG0,
+ /* DSCL_EASF_V_BF1_PWL_SEGn */
+ REG_SET_3(DSCL_EASF_V_BF1_PWL_SEG0, 0,
+ SCL_EASF_V_BF1_PWL_IN_SEG0, scl_data->dscl_prog_data.easf_v_bf1_pwl_in_seg0,
+ SCL_EASF_V_BF1_PWL_BASE_SEG0, scl_data->dscl_prog_data.easf_v_bf1_pwl_base_seg0,
SCL_EASF_V_BF1_PWL_SLOPE_SEG0, scl_data->dscl_prog_data.easf_v_bf1_pwl_slope_seg0);
- REG_UPDATE(DSCL_EASF_V_BF1_PWL_SEG1,
- SCL_EASF_V_BF1_PWL_IN_SEG1, scl_data->dscl_prog_data.easf_v_bf1_pwl_in_seg1);
- REG_UPDATE(DSCL_EASF_V_BF1_PWL_SEG1,
- SCL_EASF_V_BF1_PWL_BASE_SEG1, scl_data->dscl_prog_data.easf_v_bf1_pwl_base_seg1);
- REG_UPDATE(DSCL_EASF_V_BF1_PWL_SEG1,
+ REG_SET_3(DSCL_EASF_V_BF1_PWL_SEG1, 0,
+ SCL_EASF_V_BF1_PWL_IN_SEG1, scl_data->dscl_prog_data.easf_v_bf1_pwl_in_seg1,
+ SCL_EASF_V_BF1_PWL_BASE_SEG1, scl_data->dscl_prog_data.easf_v_bf1_pwl_base_seg1,
SCL_EASF_V_BF1_PWL_SLOPE_SEG1, scl_data->dscl_prog_data.easf_v_bf1_pwl_slope_seg1);
- REG_UPDATE(DSCL_EASF_V_BF1_PWL_SEG2,
- SCL_EASF_V_BF1_PWL_IN_SEG2, scl_data->dscl_prog_data.easf_v_bf1_pwl_in_seg2);
- REG_UPDATE(DSCL_EASF_V_BF1_PWL_SEG2,
- SCL_EASF_V_BF1_PWL_BASE_SEG2, scl_data->dscl_prog_data.easf_v_bf1_pwl_base_seg2);
- REG_UPDATE(DSCL_EASF_V_BF1_PWL_SEG2,
+ REG_SET_3(DSCL_EASF_V_BF1_PWL_SEG2, 0,
+ SCL_EASF_V_BF1_PWL_IN_SEG2, scl_data->dscl_prog_data.easf_v_bf1_pwl_in_seg2,
+ SCL_EASF_V_BF1_PWL_BASE_SEG2, scl_data->dscl_prog_data.easf_v_bf1_pwl_base_seg2,
SCL_EASF_V_BF1_PWL_SLOPE_SEG2, scl_data->dscl_prog_data.easf_v_bf1_pwl_slope_seg2);
- REG_UPDATE(DSCL_EASF_V_BF1_PWL_SEG3,
- SCL_EASF_V_BF1_PWL_IN_SEG3, scl_data->dscl_prog_data.easf_v_bf1_pwl_in_seg3);
- REG_UPDATE(DSCL_EASF_V_BF1_PWL_SEG3,
- SCL_EASF_V_BF1_PWL_BASE_SEG3, scl_data->dscl_prog_data.easf_v_bf1_pwl_base_seg3);
- REG_UPDATE(DSCL_EASF_V_BF1_PWL_SEG3,
+ REG_SET_3(DSCL_EASF_V_BF1_PWL_SEG3, 0,
+ SCL_EASF_V_BF1_PWL_IN_SEG3, scl_data->dscl_prog_data.easf_v_bf1_pwl_in_seg3,
+ SCL_EASF_V_BF1_PWL_BASE_SEG3, scl_data->dscl_prog_data.easf_v_bf1_pwl_base_seg3,
SCL_EASF_V_BF1_PWL_SLOPE_SEG3, scl_data->dscl_prog_data.easf_v_bf1_pwl_slope_seg3);
- REG_UPDATE(DSCL_EASF_V_BF1_PWL_SEG4,
- SCL_EASF_V_BF1_PWL_IN_SEG4, scl_data->dscl_prog_data.easf_v_bf1_pwl_in_seg4);
- REG_UPDATE(DSCL_EASF_V_BF1_PWL_SEG4,
- SCL_EASF_V_BF1_PWL_BASE_SEG4, scl_data->dscl_prog_data.easf_v_bf1_pwl_base_seg4);
- REG_UPDATE(DSCL_EASF_V_BF1_PWL_SEG4,
+ REG_SET_3(DSCL_EASF_V_BF1_PWL_SEG4, 0,
+ SCL_EASF_V_BF1_PWL_IN_SEG4, scl_data->dscl_prog_data.easf_v_bf1_pwl_in_seg4,
+ SCL_EASF_V_BF1_PWL_BASE_SEG4, scl_data->dscl_prog_data.easf_v_bf1_pwl_base_seg4,
SCL_EASF_V_BF1_PWL_SLOPE_SEG4, scl_data->dscl_prog_data.easf_v_bf1_pwl_slope_seg4);
- REG_UPDATE(DSCL_EASF_V_BF1_PWL_SEG5,
- SCL_EASF_V_BF1_PWL_IN_SEG5, scl_data->dscl_prog_data.easf_v_bf1_pwl_in_seg5);
- REG_UPDATE(DSCL_EASF_V_BF1_PWL_SEG5,
- SCL_EASF_V_BF1_PWL_BASE_SEG5, scl_data->dscl_prog_data.easf_v_bf1_pwl_base_seg5);
- REG_UPDATE(DSCL_EASF_V_BF1_PWL_SEG5,
+ REG_SET_3(DSCL_EASF_V_BF1_PWL_SEG5, 0,
+ SCL_EASF_V_BF1_PWL_IN_SEG5, scl_data->dscl_prog_data.easf_v_bf1_pwl_in_seg5,
+ SCL_EASF_V_BF1_PWL_BASE_SEG5, scl_data->dscl_prog_data.easf_v_bf1_pwl_base_seg5,
SCL_EASF_V_BF1_PWL_SLOPE_SEG5, scl_data->dscl_prog_data.easf_v_bf1_pwl_slope_seg5);
- REG_UPDATE(DSCL_EASF_V_BF1_PWL_SEG6,
- SCL_EASF_V_BF1_PWL_IN_SEG6, scl_data->dscl_prog_data.easf_v_bf1_pwl_in_seg6);
- REG_UPDATE(DSCL_EASF_V_BF1_PWL_SEG6,
- SCL_EASF_V_BF1_PWL_BASE_SEG6, scl_data->dscl_prog_data.easf_v_bf1_pwl_base_seg6);
- REG_UPDATE(DSCL_EASF_V_BF1_PWL_SEG6,
+ REG_SET_3(DSCL_EASF_V_BF1_PWL_SEG6, 0,
+ SCL_EASF_V_BF1_PWL_IN_SEG6, scl_data->dscl_prog_data.easf_v_bf1_pwl_in_seg6,
+ SCL_EASF_V_BF1_PWL_BASE_SEG6, scl_data->dscl_prog_data.easf_v_bf1_pwl_base_seg6,
SCL_EASF_V_BF1_PWL_SLOPE_SEG6, scl_data->dscl_prog_data.easf_v_bf1_pwl_slope_seg6);
- REG_UPDATE(DSCL_EASF_V_BF1_PWL_SEG7,
- SCL_EASF_V_BF1_PWL_IN_SEG7, scl_data->dscl_prog_data.easf_v_bf1_pwl_in_seg7);
- REG_UPDATE(DSCL_EASF_V_BF1_PWL_SEG7,
+ REG_SET_2(DSCL_EASF_V_BF1_PWL_SEG7, 0,
+ SCL_EASF_V_BF1_PWL_IN_SEG7, scl_data->dscl_prog_data.easf_v_bf1_pwl_in_seg7,
SCL_EASF_V_BF1_PWL_BASE_SEG7, scl_data->dscl_prog_data.easf_v_bf1_pwl_base_seg7);
- REG_UPDATE(DSCL_EASF_V_BF3_PWL_SEG0,
- SCL_EASF_V_BF3_PWL_IN_SEG0, scl_data->dscl_prog_data.easf_v_bf3_pwl_in_set0);
- REG_UPDATE(DSCL_EASF_V_BF3_PWL_SEG0,
- SCL_EASF_V_BF3_PWL_BASE_SEG0, scl_data->dscl_prog_data.easf_v_bf3_pwl_base_set0);
- REG_UPDATE(DSCL_EASF_V_BF3_PWL_SEG0,
+ /* DSCL_EASF_V_BF3_PWL_SEGn */
+ REG_SET_3(DSCL_EASF_V_BF3_PWL_SEG0, 0,
+ SCL_EASF_V_BF3_PWL_IN_SEG0, scl_data->dscl_prog_data.easf_v_bf3_pwl_in_set0,
+ SCL_EASF_V_BF3_PWL_BASE_SEG0, scl_data->dscl_prog_data.easf_v_bf3_pwl_base_set0,
SCL_EASF_V_BF3_PWL_SLOPE_SEG0, scl_data->dscl_prog_data.easf_v_bf3_pwl_slope_set0);
- REG_UPDATE(DSCL_EASF_V_BF3_PWL_SEG1,
- SCL_EASF_V_BF3_PWL_IN_SEG1, scl_data->dscl_prog_data.easf_v_bf3_pwl_in_set1);
- REG_UPDATE(DSCL_EASF_V_BF3_PWL_SEG1,
- SCL_EASF_V_BF3_PWL_BASE_SEG1, scl_data->dscl_prog_data.easf_v_bf3_pwl_base_set1);
- REG_UPDATE(DSCL_EASF_V_BF3_PWL_SEG1,
+ REG_SET_3(DSCL_EASF_V_BF3_PWL_SEG1, 0,
+ SCL_EASF_V_BF3_PWL_IN_SEG1, scl_data->dscl_prog_data.easf_v_bf3_pwl_in_set1,
+ SCL_EASF_V_BF3_PWL_BASE_SEG1, scl_data->dscl_prog_data.easf_v_bf3_pwl_base_set1,
SCL_EASF_V_BF3_PWL_SLOPE_SEG1, scl_data->dscl_prog_data.easf_v_bf3_pwl_slope_set1);
- REG_UPDATE(DSCL_EASF_V_BF3_PWL_SEG2,
- SCL_EASF_V_BF3_PWL_IN_SEG2, scl_data->dscl_prog_data.easf_v_bf3_pwl_in_set2);
- REG_UPDATE(DSCL_EASF_V_BF3_PWL_SEG2,
- SCL_EASF_V_BF3_PWL_BASE_SEG2, scl_data->dscl_prog_data.easf_v_bf3_pwl_base_set2);
- REG_UPDATE(DSCL_EASF_V_BF3_PWL_SEG2,
+ REG_SET_3(DSCL_EASF_V_BF3_PWL_SEG2, 0,
+ SCL_EASF_V_BF3_PWL_IN_SEG2, scl_data->dscl_prog_data.easf_v_bf3_pwl_in_set2,
+ SCL_EASF_V_BF3_PWL_BASE_SEG2, scl_data->dscl_prog_data.easf_v_bf3_pwl_base_set2,
SCL_EASF_V_BF3_PWL_SLOPE_SEG2, scl_data->dscl_prog_data.easf_v_bf3_pwl_slope_set2);
- REG_UPDATE(DSCL_EASF_V_BF3_PWL_SEG3,
- SCL_EASF_V_BF3_PWL_IN_SEG3, scl_data->dscl_prog_data.easf_v_bf3_pwl_in_set3);
- REG_UPDATE(DSCL_EASF_V_BF3_PWL_SEG3,
- SCL_EASF_V_BF3_PWL_BASE_SEG3, scl_data->dscl_prog_data.easf_v_bf3_pwl_base_set3);
- REG_UPDATE(DSCL_EASF_V_BF3_PWL_SEG3,
+ REG_SET_3(DSCL_EASF_V_BF3_PWL_SEG3, 0,
+ SCL_EASF_V_BF3_PWL_IN_SEG3, scl_data->dscl_prog_data.easf_v_bf3_pwl_in_set3,
+ SCL_EASF_V_BF3_PWL_BASE_SEG3, scl_data->dscl_prog_data.easf_v_bf3_pwl_base_set3,
SCL_EASF_V_BF3_PWL_SLOPE_SEG3, scl_data->dscl_prog_data.easf_v_bf3_pwl_slope_set3);
- REG_UPDATE(DSCL_EASF_V_BF3_PWL_SEG4,
- SCL_EASF_V_BF3_PWL_IN_SEG4, scl_data->dscl_prog_data.easf_v_bf3_pwl_in_set4);
- REG_UPDATE(DSCL_EASF_V_BF3_PWL_SEG4,
- SCL_EASF_V_BF3_PWL_BASE_SEG4, scl_data->dscl_prog_data.easf_v_bf3_pwl_base_set4);
- REG_UPDATE(DSCL_EASF_V_BF3_PWL_SEG4,
+ REG_SET_3(DSCL_EASF_V_BF3_PWL_SEG4, 0,
+ SCL_EASF_V_BF3_PWL_IN_SEG4, scl_data->dscl_prog_data.easf_v_bf3_pwl_in_set4,
+ SCL_EASF_V_BF3_PWL_BASE_SEG4, scl_data->dscl_prog_data.easf_v_bf3_pwl_base_set4,
SCL_EASF_V_BF3_PWL_SLOPE_SEG4, scl_data->dscl_prog_data.easf_v_bf3_pwl_slope_set4);
- REG_UPDATE(DSCL_EASF_V_BF3_PWL_SEG5,
- SCL_EASF_V_BF3_PWL_IN_SEG5, scl_data->dscl_prog_data.easf_v_bf3_pwl_in_set5);
- REG_UPDATE(DSCL_EASF_V_BF3_PWL_SEG5,
+ REG_SET_2(DSCL_EASF_V_BF3_PWL_SEG5, 0,
+ SCL_EASF_V_BF3_PWL_IN_SEG5, scl_data->dscl_prog_data.easf_v_bf3_pwl_in_set5,
SCL_EASF_V_BF3_PWL_BASE_SEG5, scl_data->dscl_prog_data.easf_v_bf3_pwl_base_set5);
+ PERF_TRACE();
+}
+/**
+ * dpp401_dscl_program_easf_h - Program EASF_H
+ *
+ * @dpp_base: High level DPP struct
+ * @scl_data: scalaer_data info
+ *
+ * This is the primary function to program horizontal EASF registers
+ *
+ */
+static void dpp401_dscl_program_easf_h(struct dpp *dpp_base, const struct scaler_data *scl_data)
+{
+ struct dcn401_dpp *dpp = TO_DCN401_DPP(dpp_base);
+
+ PERF_TRACE();
/* DSCL_EASF_H_MODE */
- REG_UPDATE(DSCL_EASF_H_MODE,
- SCL_EASF_H_EN, scl_data->dscl_prog_data.easf_h_en);
- REG_UPDATE(DSCL_EASF_H_MODE,
- SCL_EASF_H_2TAP_SHARP_FACTOR, scl_data->dscl_prog_data.easf_h_sharp_factor);
- REG_UPDATE(DSCL_EASF_H_MODE,
+ REG_SET_3(DSCL_EASF_H_MODE, 0,
+ SCL_EASF_H_EN, scl_data->dscl_prog_data.easf_h_en,
+ SCL_EASF_H_2TAP_SHARP_FACTOR, scl_data->dscl_prog_data.easf_h_sharp_factor,
SCL_EASF_H_RINGEST_FORCE_EN, scl_data->dscl_prog_data.easf_h_ring);
- REG_UPDATE(DSCL_EASF_H_BF_CNTL,
- SCL_EASF_H_BF1_EN, scl_data->dscl_prog_data.easf_h_bf1_en);
- REG_UPDATE(DSCL_EASF_H_BF_CNTL,
- SCL_EASF_H_BF2_MODE, scl_data->dscl_prog_data.easf_h_bf2_mode);
- REG_UPDATE(DSCL_EASF_H_BF_CNTL,
- SCL_EASF_H_BF3_MODE, scl_data->dscl_prog_data.easf_h_bf3_mode);
- REG_UPDATE(DSCL_EASF_H_BF_CNTL,
- SCL_EASF_H_BF2_FLAT1_GAIN, scl_data->dscl_prog_data.easf_h_bf2_flat1_gain);
- REG_UPDATE(DSCL_EASF_H_BF_CNTL,
- SCL_EASF_H_BF2_FLAT2_GAIN, scl_data->dscl_prog_data.easf_h_bf2_flat2_gain);
- REG_UPDATE(DSCL_EASF_H_BF_CNTL,
+
+ if (!scl_data->dscl_prog_data.easf_h_en) {
+ PERF_TRACE();
+ return;
+ }
+
+ /* DSCL_EASF_H_BF_CNTL */
+ REG_SET_6(DSCL_EASF_H_BF_CNTL, 0,
+ SCL_EASF_H_BF1_EN, scl_data->dscl_prog_data.easf_h_bf1_en,
+ SCL_EASF_H_BF2_MODE, scl_data->dscl_prog_data.easf_h_bf2_mode,
+ SCL_EASF_H_BF3_MODE, scl_data->dscl_prog_data.easf_h_bf3_mode,
+ SCL_EASF_H_BF2_FLAT1_GAIN, scl_data->dscl_prog_data.easf_h_bf2_flat1_gain,
+ SCL_EASF_H_BF2_FLAT2_GAIN, scl_data->dscl_prog_data.easf_h_bf2_flat2_gain,
SCL_EASF_H_BF2_ROC_GAIN, scl_data->dscl_prog_data.easf_h_bf2_roc_gain);
- REG_UPDATE(DSCL_EASF_H_RINGEST_EVENTAP_REDUCE,
- SCL_EASF_H_RINGEST_EVENTAP_REDUCEG1, scl_data->dscl_prog_data.easf_h_ringest_eventap_reduceg1);
- REG_UPDATE(DSCL_EASF_H_RINGEST_EVENTAP_REDUCE,
+ /* DSCL_EASF_H_RINGEST_EVENTAP_REDUCE */
+ REG_SET_2(DSCL_EASF_H_RINGEST_EVENTAP_REDUCE, 0,
+ SCL_EASF_H_RINGEST_EVENTAP_REDUCEG1, scl_data->dscl_prog_data.easf_h_ringest_eventap_reduceg1,
SCL_EASF_H_RINGEST_EVENTAP_REDUCEG2, scl_data->dscl_prog_data.easf_h_ringest_eventap_reduceg2);
- REG_UPDATE(DSCL_EASF_H_RINGEST_EVENTAP_GAIN,
- SCL_EASF_H_RINGEST_EVENTAP_GAIN1, scl_data->dscl_prog_data.easf_h_ringest_eventap_gain1);
- REG_UPDATE(DSCL_EASF_H_RINGEST_EVENTAP_GAIN,
+ /* DSCL_EASF_H_RINGEST_EVENTAP_GAIN */
+ REG_SET_2(DSCL_EASF_H_RINGEST_EVENTAP_GAIN, 0,
+ SCL_EASF_H_RINGEST_EVENTAP_GAIN1, scl_data->dscl_prog_data.easf_h_ringest_eventap_gain1,
SCL_EASF_H_RINGEST_EVENTAP_GAIN2, scl_data->dscl_prog_data.easf_h_ringest_eventap_gain2);
- REG_UPDATE(DSCL_EASF_H_BF_FINAL_MAX_MIN,
- SCL_EASF_H_BF_MAXA, scl_data->dscl_prog_data.easf_h_bf_maxa);
- REG_UPDATE(DSCL_EASF_H_BF_FINAL_MAX_MIN,
- SCL_EASF_H_BF_MAXB, scl_data->dscl_prog_data.easf_h_bf_maxb);
- REG_UPDATE(DSCL_EASF_H_BF_FINAL_MAX_MIN,
- SCL_EASF_H_BF_MINA, scl_data->dscl_prog_data.easf_h_bf_mina);
- REG_UPDATE(DSCL_EASF_H_BF_FINAL_MAX_MIN,
+ /* DSCL_EASF_H_BF_FINAL_MAX_MIN */
+ REG_SET_4(DSCL_EASF_H_BF_FINAL_MAX_MIN, 0,
+ SCL_EASF_H_BF_MAXA, scl_data->dscl_prog_data.easf_h_bf_maxa,
+ SCL_EASF_H_BF_MAXB, scl_data->dscl_prog_data.easf_h_bf_maxb,
+ SCL_EASF_H_BF_MINA, scl_data->dscl_prog_data.easf_h_bf_mina,
SCL_EASF_H_BF_MINB, scl_data->dscl_prog_data.easf_h_bf_minb);
- REG_UPDATE(DSCL_EASF_H_BF1_PWL_SEG0,
- SCL_EASF_H_BF1_PWL_IN_SEG0, scl_data->dscl_prog_data.easf_h_bf1_pwl_in_seg0);
- REG_UPDATE(DSCL_EASF_H_BF1_PWL_SEG0,
- SCL_EASF_H_BF1_PWL_BASE_SEG0, scl_data->dscl_prog_data.easf_h_bf1_pwl_base_seg0);
- REG_UPDATE(DSCL_EASF_H_BF1_PWL_SEG0,
+ /* DSCL_EASF_H_BF1_PWL_SEGn */
+ REG_SET_3(DSCL_EASF_H_BF1_PWL_SEG0, 0,
+ SCL_EASF_H_BF1_PWL_IN_SEG0, scl_data->dscl_prog_data.easf_h_bf1_pwl_in_seg0,
+ SCL_EASF_H_BF1_PWL_BASE_SEG0, scl_data->dscl_prog_data.easf_h_bf1_pwl_base_seg0,
SCL_EASF_H_BF1_PWL_SLOPE_SEG0, scl_data->dscl_prog_data.easf_h_bf1_pwl_slope_seg0);
- REG_UPDATE(DSCL_EASF_H_BF1_PWL_SEG1,
- SCL_EASF_H_BF1_PWL_IN_SEG1, scl_data->dscl_prog_data.easf_h_bf1_pwl_in_seg1);
- REG_UPDATE(DSCL_EASF_H_BF1_PWL_SEG1,
- SCL_EASF_H_BF1_PWL_BASE_SEG1, scl_data->dscl_prog_data.easf_h_bf1_pwl_base_seg1);
- REG_UPDATE(DSCL_EASF_H_BF1_PWL_SEG1,
+ REG_SET_3(DSCL_EASF_H_BF1_PWL_SEG1, 0,
+ SCL_EASF_H_BF1_PWL_IN_SEG1, scl_data->dscl_prog_data.easf_h_bf1_pwl_in_seg1,
+ SCL_EASF_H_BF1_PWL_BASE_SEG1, scl_data->dscl_prog_data.easf_h_bf1_pwl_base_seg1,
SCL_EASF_H_BF1_PWL_SLOPE_SEG1, scl_data->dscl_prog_data.easf_h_bf1_pwl_slope_seg1);
- REG_UPDATE(DSCL_EASF_H_BF1_PWL_SEG2,
- SCL_EASF_H_BF1_PWL_IN_SEG2, scl_data->dscl_prog_data.easf_h_bf1_pwl_in_seg2);
- REG_UPDATE(DSCL_EASF_H_BF1_PWL_SEG2,
- SCL_EASF_H_BF1_PWL_BASE_SEG2, scl_data->dscl_prog_data.easf_h_bf1_pwl_base_seg2);
- REG_UPDATE(DSCL_EASF_H_BF1_PWL_SEG2,
+ REG_SET_3(DSCL_EASF_H_BF1_PWL_SEG2, 0,
+ SCL_EASF_H_BF1_PWL_IN_SEG2, scl_data->dscl_prog_data.easf_h_bf1_pwl_in_seg2,
+ SCL_EASF_H_BF1_PWL_BASE_SEG2, scl_data->dscl_prog_data.easf_h_bf1_pwl_base_seg2,
SCL_EASF_H_BF1_PWL_SLOPE_SEG2, scl_data->dscl_prog_data.easf_h_bf1_pwl_slope_seg2);
- REG_UPDATE(DSCL_EASF_H_BF1_PWL_SEG3,
- SCL_EASF_H_BF1_PWL_IN_SEG3, scl_data->dscl_prog_data.easf_h_bf1_pwl_in_seg3);
- REG_UPDATE(DSCL_EASF_H_BF1_PWL_SEG3,
- SCL_EASF_H_BF1_PWL_BASE_SEG3, scl_data->dscl_prog_data.easf_h_bf1_pwl_base_seg3);
- REG_UPDATE(DSCL_EASF_H_BF1_PWL_SEG3,
+ REG_SET_3(DSCL_EASF_H_BF1_PWL_SEG3, 0,
+ SCL_EASF_H_BF1_PWL_IN_SEG3, scl_data->dscl_prog_data.easf_h_bf1_pwl_in_seg3,
+ SCL_EASF_H_BF1_PWL_BASE_SEG3, scl_data->dscl_prog_data.easf_h_bf1_pwl_base_seg3,
SCL_EASF_H_BF1_PWL_SLOPE_SEG3, scl_data->dscl_prog_data.easf_h_bf1_pwl_slope_seg3);
- REG_UPDATE(DSCL_EASF_H_BF1_PWL_SEG4,
- SCL_EASF_H_BF1_PWL_IN_SEG4, scl_data->dscl_prog_data.easf_h_bf1_pwl_in_seg4);
- REG_UPDATE(DSCL_EASF_H_BF1_PWL_SEG4,
- SCL_EASF_H_BF1_PWL_BASE_SEG4, scl_data->dscl_prog_data.easf_h_bf1_pwl_base_seg4);
- REG_UPDATE(DSCL_EASF_H_BF1_PWL_SEG4,
+ REG_SET_3(DSCL_EASF_H_BF1_PWL_SEG4, 0,
+ SCL_EASF_H_BF1_PWL_IN_SEG4, scl_data->dscl_prog_data.easf_h_bf1_pwl_in_seg4,
+ SCL_EASF_H_BF1_PWL_BASE_SEG4, scl_data->dscl_prog_data.easf_h_bf1_pwl_base_seg4,
SCL_EASF_H_BF1_PWL_SLOPE_SEG4, scl_data->dscl_prog_data.easf_h_bf1_pwl_slope_seg4);
- REG_UPDATE(DSCL_EASF_H_BF1_PWL_SEG5,
- SCL_EASF_H_BF1_PWL_IN_SEG5, scl_data->dscl_prog_data.easf_h_bf1_pwl_in_seg5);
- REG_UPDATE(DSCL_EASF_H_BF1_PWL_SEG5,
- SCL_EASF_H_BF1_PWL_BASE_SEG5, scl_data->dscl_prog_data.easf_h_bf1_pwl_base_seg5);
- REG_UPDATE(DSCL_EASF_H_BF1_PWL_SEG5,
+ REG_SET_3(DSCL_EASF_H_BF1_PWL_SEG5, 0,
+ SCL_EASF_H_BF1_PWL_IN_SEG5, scl_data->dscl_prog_data.easf_h_bf1_pwl_in_seg5,
+ SCL_EASF_H_BF1_PWL_BASE_SEG5, scl_data->dscl_prog_data.easf_h_bf1_pwl_base_seg5,
SCL_EASF_H_BF1_PWL_SLOPE_SEG5, scl_data->dscl_prog_data.easf_h_bf1_pwl_slope_seg5);
- REG_UPDATE(DSCL_EASF_H_BF1_PWL_SEG6,
- SCL_EASF_H_BF1_PWL_IN_SEG6, scl_data->dscl_prog_data.easf_h_bf1_pwl_in_seg6);
- REG_UPDATE(DSCL_EASF_H_BF1_PWL_SEG6,
- SCL_EASF_H_BF1_PWL_BASE_SEG6, scl_data->dscl_prog_data.easf_h_bf1_pwl_base_seg6);
- REG_UPDATE(DSCL_EASF_H_BF1_PWL_SEG6,
+ REG_SET_3(DSCL_EASF_H_BF1_PWL_SEG6, 0,
+ SCL_EASF_H_BF1_PWL_IN_SEG6, scl_data->dscl_prog_data.easf_h_bf1_pwl_in_seg6,
+ SCL_EASF_H_BF1_PWL_BASE_SEG6, scl_data->dscl_prog_data.easf_h_bf1_pwl_base_seg6,
SCL_EASF_H_BF1_PWL_SLOPE_SEG6, scl_data->dscl_prog_data.easf_h_bf1_pwl_slope_seg6);
- REG_UPDATE(DSCL_EASF_H_BF1_PWL_SEG7,
- SCL_EASF_H_BF1_PWL_IN_SEG7, scl_data->dscl_prog_data.easf_h_bf1_pwl_in_seg7);
- REG_UPDATE(DSCL_EASF_H_BF1_PWL_SEG7,
+ REG_SET_2(DSCL_EASF_H_BF1_PWL_SEG7, 0,
+ SCL_EASF_H_BF1_PWL_IN_SEG7, scl_data->dscl_prog_data.easf_h_bf1_pwl_in_seg7,
SCL_EASF_H_BF1_PWL_BASE_SEG7, scl_data->dscl_prog_data.easf_h_bf1_pwl_base_seg7);
- REG_UPDATE(DSCL_EASF_H_BF3_PWL_SEG0,
- SCL_EASF_H_BF3_PWL_IN_SEG0, scl_data->dscl_prog_data.easf_h_bf3_pwl_in_set0);
- REG_UPDATE(DSCL_EASF_H_BF3_PWL_SEG0,
- SCL_EASF_H_BF3_PWL_BASE_SEG0, scl_data->dscl_prog_data.easf_h_bf3_pwl_base_set0);
- REG_UPDATE(DSCL_EASF_H_BF3_PWL_SEG0,
+ /* DSCL_EASF_H_BF3_PWL_SEGn */
+ REG_SET_3(DSCL_EASF_H_BF3_PWL_SEG0, 0,
+ SCL_EASF_H_BF3_PWL_IN_SEG0, scl_data->dscl_prog_data.easf_h_bf3_pwl_in_set0,
+ SCL_EASF_H_BF3_PWL_BASE_SEG0, scl_data->dscl_prog_data.easf_h_bf3_pwl_base_set0,
SCL_EASF_H_BF3_PWL_SLOPE_SEG0, scl_data->dscl_prog_data.easf_h_bf3_pwl_slope_set0);
- REG_UPDATE(DSCL_EASF_H_BF3_PWL_SEG1,
- SCL_EASF_H_BF3_PWL_IN_SEG1, scl_data->dscl_prog_data.easf_h_bf3_pwl_in_set1);
- REG_UPDATE(DSCL_EASF_H_BF3_PWL_SEG1,
- SCL_EASF_H_BF3_PWL_BASE_SEG1, scl_data->dscl_prog_data.easf_h_bf3_pwl_base_set1);
- REG_UPDATE(DSCL_EASF_H_BF3_PWL_SEG1,
+ REG_SET_3(DSCL_EASF_H_BF3_PWL_SEG1, 0,
+ SCL_EASF_H_BF3_PWL_IN_SEG1, scl_data->dscl_prog_data.easf_h_bf3_pwl_in_set1,
+ SCL_EASF_H_BF3_PWL_BASE_SEG1, scl_data->dscl_prog_data.easf_h_bf3_pwl_base_set1,
SCL_EASF_H_BF3_PWL_SLOPE_SEG1, scl_data->dscl_prog_data.easf_h_bf3_pwl_slope_set1);
- REG_UPDATE(DSCL_EASF_H_BF3_PWL_SEG2,
- SCL_EASF_H_BF3_PWL_IN_SEG2, scl_data->dscl_prog_data.easf_h_bf3_pwl_in_set2);
- REG_UPDATE(DSCL_EASF_H_BF3_PWL_SEG2,
- SCL_EASF_H_BF3_PWL_BASE_SEG2, scl_data->dscl_prog_data.easf_h_bf3_pwl_base_set2);
- REG_UPDATE(DSCL_EASF_H_BF3_PWL_SEG2,
+ REG_SET_3(DSCL_EASF_H_BF3_PWL_SEG2, 0,
+ SCL_EASF_H_BF3_PWL_IN_SEG2, scl_data->dscl_prog_data.easf_h_bf3_pwl_in_set2,
+ SCL_EASF_H_BF3_PWL_BASE_SEG2, scl_data->dscl_prog_data.easf_h_bf3_pwl_base_set2,
SCL_EASF_H_BF3_PWL_SLOPE_SEG2, scl_data->dscl_prog_data.easf_h_bf3_pwl_slope_set2);
- REG_UPDATE(DSCL_EASF_H_BF3_PWL_SEG3,
- SCL_EASF_H_BF3_PWL_IN_SEG3, scl_data->dscl_prog_data.easf_h_bf3_pwl_in_set3);
- REG_UPDATE(DSCL_EASF_H_BF3_PWL_SEG3,
- SCL_EASF_H_BF3_PWL_BASE_SEG3, scl_data->dscl_prog_data.easf_h_bf3_pwl_base_set3);
- REG_UPDATE(DSCL_EASF_H_BF3_PWL_SEG3,
+ REG_SET_3(DSCL_EASF_H_BF3_PWL_SEG3, 0,
+ SCL_EASF_H_BF3_PWL_IN_SEG3, scl_data->dscl_prog_data.easf_h_bf3_pwl_in_set3,
+ SCL_EASF_H_BF3_PWL_BASE_SEG3, scl_data->dscl_prog_data.easf_h_bf3_pwl_base_set3,
SCL_EASF_H_BF3_PWL_SLOPE_SEG3, scl_data->dscl_prog_data.easf_h_bf3_pwl_slope_set3);
- REG_UPDATE(DSCL_EASF_H_BF3_PWL_SEG4,
- SCL_EASF_H_BF3_PWL_IN_SEG4, scl_data->dscl_prog_data.easf_h_bf3_pwl_in_set4);
- REG_UPDATE(DSCL_EASF_H_BF3_PWL_SEG4,
- SCL_EASF_H_BF3_PWL_BASE_SEG4, scl_data->dscl_prog_data.easf_h_bf3_pwl_base_set4);
- REG_UPDATE(DSCL_EASF_H_BF3_PWL_SEG4,
+ REG_SET_3(DSCL_EASF_H_BF3_PWL_SEG4, 0,
+ SCL_EASF_H_BF3_PWL_IN_SEG4, scl_data->dscl_prog_data.easf_h_bf3_pwl_in_set4,
+ SCL_EASF_H_BF3_PWL_BASE_SEG4, scl_data->dscl_prog_data.easf_h_bf3_pwl_base_set4,
SCL_EASF_H_BF3_PWL_SLOPE_SEG4, scl_data->dscl_prog_data.easf_h_bf3_pwl_slope_set4);
- REG_UPDATE(DSCL_EASF_H_BF3_PWL_SEG5,
- SCL_EASF_H_BF3_PWL_IN_SEG5, scl_data->dscl_prog_data.easf_h_bf3_pwl_in_set5);
- REG_UPDATE(DSCL_EASF_H_BF3_PWL_SEG5,
+ REG_SET_2(DSCL_EASF_H_BF3_PWL_SEG5, 0,
+ SCL_EASF_H_BF3_PWL_IN_SEG5, scl_data->dscl_prog_data.easf_h_bf3_pwl_in_set5,
SCL_EASF_H_BF3_PWL_BASE_SEG5, scl_data->dscl_prog_data.easf_h_bf3_pwl_base_set5);
+ PERF_TRACE();
+}
+/**
+ * dpp401_dscl_program_easf - Program EASF
+ *
+ * @dpp_base: High level DPP struct
+ * @scl_data: scalaer_data info
+ *
+ * This is the primary function to program EASF
+ *
+ */
+static void dpp401_dscl_program_easf(struct dpp *dpp_base, const struct scaler_data *scl_data)
+{
+ struct dcn401_dpp *dpp = TO_DCN401_DPP(dpp_base);
+
+ PERF_TRACE();
+ /* DSCL_SC_MODE */
+ REG_SET_2(DSCL_SC_MODE, 0,
+ SCL_SC_MATRIX_MODE, scl_data->dscl_prog_data.easf_matrix_mode,
+ SCL_SC_LTONL_EN, scl_data->dscl_prog_data.easf_ltonl_en);
/* DSCL_EASF_SC_MATRIX_C0C1, DSCL_EASF_SC_MATRIX_C2C3 */
- REG_UPDATE(DSCL_SC_MATRIX_C0C1,
- SCL_SC_MATRIX_C0, scl_data->dscl_prog_data.easf_matrix_c0);
- REG_UPDATE(DSCL_SC_MATRIX_C0C1,
+ REG_SET_2(DSCL_SC_MATRIX_C0C1, 0,
+ SCL_SC_MATRIX_C0, scl_data->dscl_prog_data.easf_matrix_c0,
SCL_SC_MATRIX_C1, scl_data->dscl_prog_data.easf_matrix_c1);
- REG_UPDATE(DSCL_SC_MATRIX_C2C3,
- SCL_SC_MATRIX_C2, scl_data->dscl_prog_data.easf_matrix_c2);
- REG_UPDATE(DSCL_SC_MATRIX_C2C3,
+ REG_SET_2(DSCL_SC_MATRIX_C2C3, 0,
+ SCL_SC_MATRIX_C2, scl_data->dscl_prog_data.easf_matrix_c2,
SCL_SC_MATRIX_C3, scl_data->dscl_prog_data.easf_matrix_c3);
+ dpp401_dscl_program_easf_v(dpp_base, scl_data);
+ dpp401_dscl_program_easf_h(dpp_base, scl_data);
PERF_TRACE();
}
/**
@@ -958,10 +937,11 @@ static void dpp401_dscl_set_isharp_filter(
REG_UPDATE(ISHARP_DELTA_CTRL,
ISHARP_DELTA_LUT_HOST_SELECT, 0);
+ /* LUT data write is auto-indexed. Write index once */
+ REG_SET(ISHARP_DELTA_INDEX, 0,
+ ISHARP_DELTA_INDEX, 0);
for (level = 0; level < NUM_LEVELS; level++) {
filter_data = filter[level];
- REG_SET(ISHARP_DELTA_INDEX, 0,
- ISHARP_DELTA_INDEX, level);
REG_SET(ISHARP_DELTA_DATA, 0,
ISHARP_DELTA_DATA, filter_data);
}
@@ -971,112 +951,83 @@ static void dpp401_dscl_set_isharp_filter(
*
* @dpp_base: High level DPP struct
* @scl_data: scalaer_data info
+ * @program_isharp_1dlut: flag to program isharp 1D LUT
+ * @bs_coeffs_updated: Blur and Scale Coefficients update flag
*
* This is the primary function to program isharp
*
*/
static void dpp401_dscl_program_isharp(struct dpp *dpp_base,
- const struct scaler_data *scl_data)
+ const struct scaler_data *scl_data,
+ bool program_isharp_1dlut,
+ bool *bs_coeffs_updated)
{
struct dcn401_dpp *dpp = TO_DCN401_DPP(dpp_base);
+ *bs_coeffs_updated = false;
PERF_TRACE();
- /* ISHARP_EN */
- REG_UPDATE(ISHARP_MODE,
- ISHARP_EN, scl_data->dscl_prog_data.isharp_en);
- /* ISHARP_NOISEDET_EN */
- REG_UPDATE(ISHARP_MODE,
- ISHARP_NOISEDET_EN, scl_data->dscl_prog_data.isharp_noise_det.enable);
- /* ISHARP_NOISEDET_MODE */
- REG_UPDATE(ISHARP_MODE,
- ISHARP_NOISEDET_MODE, scl_data->dscl_prog_data.isharp_noise_det.mode);
- /* ISHARP_NOISEDET_UTHRE */
- REG_UPDATE(ISHARP_NOISEDET_THRESHOLD,
- ISHARP_NOISEDET_UTHRE, scl_data->dscl_prog_data.isharp_noise_det.uthreshold);
- /* ISHARP_NOISEDET_DTHRE */
- REG_UPDATE(ISHARP_NOISEDET_THRESHOLD,
- ISHARP_NOISEDET_DTHRE, scl_data->dscl_prog_data.isharp_noise_det.dthreshold);
- REG_UPDATE(ISHARP_MODE,
- ISHARP_NOISEDET_MODE, scl_data->dscl_prog_data.isharp_noise_det.mode);
- /* ISHARP_NOISEDET_UTHRE */
- REG_UPDATE(ISHARP_NOISEDET_THRESHOLD,
- ISHARP_NOISEDET_UTHRE, scl_data->dscl_prog_data.isharp_noise_det.uthreshold);
- /* ISHARP_NOISEDET_DTHRE */
- REG_UPDATE(ISHARP_NOISEDET_THRESHOLD,
+ /* ISHARP_MODE */
+ REG_SET_6(ISHARP_MODE, 0,
+ ISHARP_EN, scl_data->dscl_prog_data.isharp_en,
+ ISHARP_NOISEDET_EN, scl_data->dscl_prog_data.isharp_noise_det.enable,
+ ISHARP_NOISEDET_MODE, scl_data->dscl_prog_data.isharp_noise_det.mode,
+ ISHARP_LBA_MODE, scl_data->dscl_prog_data.isharp_lba.mode,
+ ISHARP_FMT_MODE, scl_data->dscl_prog_data.isharp_fmt.mode,
+ ISHARP_FMT_NORM, scl_data->dscl_prog_data.isharp_fmt.norm);
+
+ /* Skip remaining register programming if ISHARP is disabled */
+ if (!scl_data->dscl_prog_data.isharp_en) {
+ PERF_TRACE();
+ return;
+ }
+
+ /* ISHARP_NOISEDET_THRESHOLD */
+ REG_SET_2(ISHARP_NOISEDET_THRESHOLD, 0,
+ ISHARP_NOISEDET_UTHRE, scl_data->dscl_prog_data.isharp_noise_det.uthreshold,
ISHARP_NOISEDET_DTHRE, scl_data->dscl_prog_data.isharp_noise_det.dthreshold);
- /* ISHARP_NOISEDET_PWL_START_IN */
- REG_UPDATE(ISHARP_NOISE_GAIN_PWL,
- ISHARP_NOISEDET_PWL_START_IN, scl_data->dscl_prog_data.isharp_noise_det.pwl_start_in);
- /* ISHARP_NOISEDET_PWL_END_IN */
- REG_UPDATE(ISHARP_NOISE_GAIN_PWL,
- ISHARP_NOISEDET_PWL_END_IN, scl_data->dscl_prog_data.isharp_noise_det.pwl_end_in);
- /* ISHARP_NOISEDET_PWL_SLOPE */
- REG_UPDATE(ISHARP_NOISE_GAIN_PWL,
+
+ /* ISHARP_NOISE_GAIN_PWL */
+ REG_SET_3(ISHARP_NOISE_GAIN_PWL, 0,
+ ISHARP_NOISEDET_PWL_START_IN, scl_data->dscl_prog_data.isharp_noise_det.pwl_start_in,
+ ISHARP_NOISEDET_PWL_END_IN, scl_data->dscl_prog_data.isharp_noise_det.pwl_end_in,
ISHARP_NOISEDET_PWL_SLOPE, scl_data->dscl_prog_data.isharp_noise_det.pwl_slope);
- /* ISHARP_LBA_MODE */
- REG_UPDATE(ISHARP_MODE,
- ISHARP_LBA_MODE, scl_data->dscl_prog_data.isharp_lba.mode);
+
/* ISHARP_LBA: IN_SEG, BASE_SEG, SLOPE_SEG */
- REG_UPDATE(ISHARP_LBA_PWL_SEG0,
- ISHARP_LBA_PWL_IN_SEG0, scl_data->dscl_prog_data.isharp_lba.in_seg[0]);
- REG_UPDATE(ISHARP_LBA_PWL_SEG0,
- ISHARP_LBA_PWL_BASE_SEG0, scl_data->dscl_prog_data.isharp_lba.base_seg[0]);
- REG_UPDATE(ISHARP_LBA_PWL_SEG0,
+ REG_SET_3(ISHARP_LBA_PWL_SEG0, 0,
+ ISHARP_LBA_PWL_IN_SEG0, scl_data->dscl_prog_data.isharp_lba.in_seg[0],
+ ISHARP_LBA_PWL_BASE_SEG0, scl_data->dscl_prog_data.isharp_lba.base_seg[0],
ISHARP_LBA_PWL_SLOPE_SEG0, scl_data->dscl_prog_data.isharp_lba.slope_seg[0]);
- REG_UPDATE(ISHARP_LBA_PWL_SEG1,
- ISHARP_LBA_PWL_IN_SEG1, scl_data->dscl_prog_data.isharp_lba.in_seg[1]);
- REG_UPDATE(ISHARP_LBA_PWL_SEG1,
- ISHARP_LBA_PWL_BASE_SEG1, scl_data->dscl_prog_data.isharp_lba.base_seg[1]);
- REG_UPDATE(ISHARP_LBA_PWL_SEG1,
+ REG_SET_3(ISHARP_LBA_PWL_SEG1, 0,
+ ISHARP_LBA_PWL_IN_SEG1, scl_data->dscl_prog_data.isharp_lba.in_seg[1],
+ ISHARP_LBA_PWL_BASE_SEG1, scl_data->dscl_prog_data.isharp_lba.base_seg[1],
ISHARP_LBA_PWL_SLOPE_SEG1, scl_data->dscl_prog_data.isharp_lba.slope_seg[1]);
- REG_UPDATE(ISHARP_LBA_PWL_SEG2,
- ISHARP_LBA_PWL_IN_SEG2, scl_data->dscl_prog_data.isharp_lba.in_seg[2]);
- REG_UPDATE(ISHARP_LBA_PWL_SEG2,
- ISHARP_LBA_PWL_BASE_SEG2, scl_data->dscl_prog_data.isharp_lba.base_seg[2]);
- REG_UPDATE(ISHARP_LBA_PWL_SEG2,
+ REG_SET_3(ISHARP_LBA_PWL_SEG2, 0,
+ ISHARP_LBA_PWL_IN_SEG2, scl_data->dscl_prog_data.isharp_lba.in_seg[2],
+ ISHARP_LBA_PWL_BASE_SEG2, scl_data->dscl_prog_data.isharp_lba.base_seg[2],
ISHARP_LBA_PWL_SLOPE_SEG2, scl_data->dscl_prog_data.isharp_lba.slope_seg[2]);
- REG_UPDATE(ISHARP_LBA_PWL_SEG3,
- ISHARP_LBA_PWL_IN_SEG3, scl_data->dscl_prog_data.isharp_lba.in_seg[3]);
- REG_UPDATE(ISHARP_LBA_PWL_SEG3,
- ISHARP_LBA_PWL_BASE_SEG3, scl_data->dscl_prog_data.isharp_lba.base_seg[3]);
- REG_UPDATE(ISHARP_LBA_PWL_SEG3,
+ REG_SET_3(ISHARP_LBA_PWL_SEG3, 0,
+ ISHARP_LBA_PWL_IN_SEG3, scl_data->dscl_prog_data.isharp_lba.in_seg[3],
+ ISHARP_LBA_PWL_BASE_SEG3, scl_data->dscl_prog_data.isharp_lba.base_seg[3],
ISHARP_LBA_PWL_SLOPE_SEG3, scl_data->dscl_prog_data.isharp_lba.slope_seg[3]);
- REG_UPDATE(ISHARP_LBA_PWL_SEG4,
- ISHARP_LBA_PWL_IN_SEG4, scl_data->dscl_prog_data.isharp_lba.in_seg[4]);
- REG_UPDATE(ISHARP_LBA_PWL_SEG4,
- ISHARP_LBA_PWL_BASE_SEG4, scl_data->dscl_prog_data.isharp_lba.base_seg[4]);
- REG_UPDATE(ISHARP_LBA_PWL_SEG4,
+ REG_SET_3(ISHARP_LBA_PWL_SEG4, 0,
+ ISHARP_LBA_PWL_IN_SEG4, scl_data->dscl_prog_data.isharp_lba.in_seg[4],
+ ISHARP_LBA_PWL_BASE_SEG4, scl_data->dscl_prog_data.isharp_lba.base_seg[4],
ISHARP_LBA_PWL_SLOPE_SEG4, scl_data->dscl_prog_data.isharp_lba.slope_seg[4]);
- REG_UPDATE(ISHARP_LBA_PWL_SEG5,
- ISHARP_LBA_PWL_IN_SEG5, scl_data->dscl_prog_data.isharp_lba.in_seg[5]);
- REG_UPDATE(ISHARP_LBA_PWL_SEG5,
+ REG_SET_2(ISHARP_LBA_PWL_SEG5, 0,
+ ISHARP_LBA_PWL_IN_SEG5, scl_data->dscl_prog_data.isharp_lba.in_seg[5],
ISHARP_LBA_PWL_BASE_SEG5, scl_data->dscl_prog_data.isharp_lba.base_seg[5]);
- /* ISHARP_FMT_MODE */
- REG_UPDATE(ISHARP_MODE,
- ISHARP_FMT_MODE, scl_data->dscl_prog_data.isharp_fmt.mode);
- /* ISHARP_FMT_NORM */
- REG_UPDATE(ISHARP_MODE,
- ISHARP_FMT_NORM, scl_data->dscl_prog_data.isharp_fmt.norm);
/* ISHARP_DELTA_LUT */
- dpp401_dscl_set_isharp_filter(dpp, scl_data->dscl_prog_data.isharp_delta);
- /* ISHARP_NLDELTA_SCLIP_EN_P */
- REG_UPDATE(ISHARP_NLDELTA_SOFT_CLIP,
- ISHARP_NLDELTA_SCLIP_EN_P, scl_data->dscl_prog_data.isharp_nldelta_sclip.enable_p);
- /* ISHARP_NLDELTA_SCLIP_PIVOT_P */
- REG_UPDATE(ISHARP_NLDELTA_SOFT_CLIP,
- ISHARP_NLDELTA_SCLIP_PIVOT_P, scl_data->dscl_prog_data.isharp_nldelta_sclip.pivot_p);
- /* ISHARP_NLDELTA_SCLIP_SLOPE_P */
- REG_UPDATE(ISHARP_NLDELTA_SOFT_CLIP,
- ISHARP_NLDELTA_SCLIP_SLOPE_P, scl_data->dscl_prog_data.isharp_nldelta_sclip.slope_p);
- /* ISHARP_NLDELTA_SCLIP_EN_N */
- REG_UPDATE(ISHARP_NLDELTA_SOFT_CLIP,
- ISHARP_NLDELTA_SCLIP_EN_N, scl_data->dscl_prog_data.isharp_nldelta_sclip.enable_n);
- /* ISHARP_NLDELTA_SCLIP_PIVOT_N */
- REG_UPDATE(ISHARP_NLDELTA_SOFT_CLIP,
- ISHARP_NLDELTA_SCLIP_PIVOT_N, scl_data->dscl_prog_data.isharp_nldelta_sclip.pivot_n);
- /* ISHARP_NLDELTA_SCLIP_SLOPE_N */
- REG_UPDATE(ISHARP_NLDELTA_SOFT_CLIP,
+ if (!program_isharp_1dlut)
+ dpp401_dscl_set_isharp_filter(dpp, scl_data->dscl_prog_data.isharp_delta);
+
+ /* ISHARP_NLDELTA_SOFT_CLIP */
+ REG_SET_6(ISHARP_NLDELTA_SOFT_CLIP, 0,
+ ISHARP_NLDELTA_SCLIP_EN_P, scl_data->dscl_prog_data.isharp_nldelta_sclip.enable_p,
+ ISHARP_NLDELTA_SCLIP_PIVOT_P, scl_data->dscl_prog_data.isharp_nldelta_sclip.pivot_p,
+ ISHARP_NLDELTA_SCLIP_SLOPE_P, scl_data->dscl_prog_data.isharp_nldelta_sclip.slope_p,
+ ISHARP_NLDELTA_SCLIP_EN_N, scl_data->dscl_prog_data.isharp_nldelta_sclip.enable_n,
+ ISHARP_NLDELTA_SCLIP_PIVOT_N, scl_data->dscl_prog_data.isharp_nldelta_sclip.pivot_n,
ISHARP_NLDELTA_SCLIP_SLOPE_N, scl_data->dscl_prog_data.isharp_nldelta_sclip.slope_n);
/* Blur and Scale Coefficients - SCL_COEF_RAM_TAP_SELECT */
@@ -1086,12 +1037,14 @@ static void dpp401_dscl_program_isharp(struct dpp *dpp_base,
dpp, scl_data->taps.v_taps,
SCL_COEF_VERTICAL_BLUR_SCALE,
scl_data->dscl_prog_data.filter_blur_scale_v);
+ *bs_coeffs_updated = true;
}
if (scl_data->dscl_prog_data.filter_blur_scale_h) {
dpp401_dscl_set_scaler_filter(
dpp, scl_data->taps.h_taps,
SCL_COEF_HORIZONTAL_BLUR_SCALE,
scl_data->dscl_prog_data.filter_blur_scale_h);
+ *bs_coeffs_updated = true;
}
}
PERF_TRACE();
@@ -1122,12 +1075,29 @@ void dpp401_dscl_set_scaler_manual_scale(struct dpp *dpp_base,
dpp_base, scl_data, dpp_base->ctx->dc->debug.always_scale);
bool ycbcr = scl_data->format >= PIXEL_FORMAT_VIDEO_BEGIN
&& scl_data->format <= PIXEL_FORMAT_VIDEO_END;
+ bool program_isharp_1dlut = false;
+ bool bs_coeffs_updated = false;
+
if (memcmp(&dpp->scl_data, scl_data, sizeof(*scl_data)) == 0)
return;
PERF_TRACE();
+ /* If only sharpness has changed, then only update 1dlut, then return */
+ if (scl_data->dscl_prog_data.isharp_en &&
+ (dpp->scl_data.dscl_prog_data.sharpness_level
+ != scl_data->dscl_prog_data.sharpness_level)) {
+ /* ISHARP_DELTA_LUT */
+ dpp401_dscl_set_isharp_filter(dpp, scl_data->dscl_prog_data.isharp_delta);
+ dpp->scl_data.dscl_prog_data.sharpness_level = scl_data->dscl_prog_data.sharpness_level;
+ dpp->scl_data.dscl_prog_data.isharp_delta = scl_data->dscl_prog_data.isharp_delta;
+
+ if (memcmp(&dpp->scl_data, scl_data, sizeof(*scl_data)) == 0)
+ return;
+ program_isharp_1dlut = true;
+ }
+
dpp->scl_data = *scl_data;
if ((dpp->base.ctx->dc->config.use_spl) && (!dpp->base.ctx->dc->debug.disable_spl)) {
@@ -1181,7 +1151,7 @@ void dpp401_dscl_set_scaler_manual_scale(struct dpp *dpp_base,
if (dscl_mode == DSCL_MODE_SCALING_444_BYPASS) {
if (dpp->base.ctx->dc->config.prefer_easf)
dpp401_dscl_disable_easf(dpp_base, scl_data);
- dpp401_dscl_program_isharp(dpp_base, scl_data);
+ dpp401_dscl_program_isharp(dpp_base, scl_data, program_isharp_1dlut, &bs_coeffs_updated);
return;
}
@@ -1208,12 +1178,18 @@ void dpp401_dscl_set_scaler_manual_scale(struct dpp *dpp_base,
SCL_V_NUM_TAPS_C, v_num_taps_c,
SCL_H_NUM_TAPS_C, h_num_taps_c);
- dpp401_dscl_set_scl_filter(dpp, scl_data, ycbcr);
+ /* ISharp configuration
+ * - B&S coeffs are written to same coeff RAM as WB scaler coeffs
+ * - coeff RAM toggle is in EASF programming
+ * - if we are only programming B&S coeffs, then need to reprogram
+ * WB scaler coeffs and toggle coeff RAM together
+ */
+ //if (dpp->base.ctx->dc->config.prefer_easf)
+ dpp401_dscl_program_isharp(dpp_base, scl_data, program_isharp_1dlut, &bs_coeffs_updated);
+
+ dpp401_dscl_set_scl_filter(dpp, scl_data, ycbcr, bs_coeffs_updated);
/* Edge adaptive scaler function configuration */
if (dpp->base.ctx->dc->config.prefer_easf)
dpp401_dscl_program_easf(dpp_base, scl_data);
- /* isharp configuration */
- //if (dpp->base.ctx->dc->config.prefer_easf)
- dpp401_dscl_program_isharp(dpp_base, scl_data);
PERF_TRACE();
}
diff --git a/drivers/gpu/drm/amd/display/dc/dsc/dc_dsc.c b/drivers/gpu/drm/amd/display/dc/dsc/dc_dsc.c
index a1727e5bf024..ebd5df1a36e8 100644
--- a/drivers/gpu/drm/amd/display/dc/dsc/dc_dsc.c
+++ b/drivers/gpu/drm/amd/display/dc/dsc/dc_dsc.c
@@ -668,6 +668,7 @@ static bool decide_dsc_bandwidth_range(
*/
static bool decide_dsc_target_bpp_x16(
const struct dc_dsc_policy *policy,
+ const struct dc_dsc_config_options *options,
const struct dsc_enc_caps *dsc_common_caps,
const int target_bandwidth_kbps,
const struct dc_crtc_timing *timing,
@@ -682,7 +683,7 @@ static bool decide_dsc_target_bpp_x16(
if (decide_dsc_bandwidth_range(policy->min_target_bpp * 16, policy->max_target_bpp * 16,
num_slices_h, dsc_common_caps, timing, link_encoding, &range)) {
if (target_bandwidth_kbps >= range.stream_kbps) {
- if (policy->enable_dsc_when_not_needed)
+ if (policy->enable_dsc_when_not_needed || options->force_dsc_when_not_needed)
/* enable max bpp even dsc is not needed */
*target_bpp_x16 = range.max_target_bpp_x16;
} else if (target_bandwidth_kbps >= range.max_kbps) {
@@ -882,7 +883,7 @@ static bool setup_dsc_config(
memset(dsc_cfg, 0, sizeof(struct dc_dsc_config));
- dc_dsc_get_policy_for_timing(timing, options->max_target_bpp_limit_override_x16, &policy);
+ dc_dsc_get_policy_for_timing(timing, options->max_target_bpp_limit_override_x16, &policy, link_encoding);
pic_width = timing->h_addressable + timing->h_border_left + timing->h_border_right;
pic_height = timing->v_addressable + timing->v_border_top + timing->v_border_bottom;
@@ -1080,6 +1081,7 @@ static bool setup_dsc_config(
if (target_bandwidth_kbps > 0) {
is_dsc_possible = decide_dsc_target_bpp_x16(
&policy,
+ options,
&dsc_common_caps,
target_bandwidth_kbps,
timing,
@@ -1171,7 +1173,8 @@ uint32_t dc_dsc_stream_bandwidth_overhead_in_kbps(
void dc_dsc_get_policy_for_timing(const struct dc_crtc_timing *timing,
uint32_t max_target_bpp_limit_override_x16,
- struct dc_dsc_policy *policy)
+ struct dc_dsc_policy *policy,
+ const enum dc_link_encoding_format link_encoding)
{
uint32_t bpc = 0;
@@ -1235,10 +1238,7 @@ void dc_dsc_get_policy_for_timing(const struct dc_crtc_timing *timing,
policy->max_target_bpp = max_target_bpp_limit_override_x16 / 16;
/* enable DSC when not needed, default false */
- if (dsc_policy_enable_dsc_when_not_needed)
- policy->enable_dsc_when_not_needed = dsc_policy_enable_dsc_when_not_needed;
- else
- policy->enable_dsc_when_not_needed = false;
+ policy->enable_dsc_when_not_needed = dsc_policy_enable_dsc_when_not_needed;
}
void dc_dsc_policy_set_max_target_bpp_limit(uint32_t limit)
@@ -1267,4 +1267,5 @@ void dc_dsc_get_default_config_option(const struct dc *dc, struct dc_dsc_config_
options->dsc_force_odm_hslice_override = dc->debug.force_odm_combine;
options->max_target_bpp_limit_override_x16 = 0;
options->slice_height_granularity = 1;
+ options->force_dsc_when_not_needed = false;
}
diff --git a/drivers/gpu/drm/amd/display/dc/dsc/dcn401/dcn401_dsc.c b/drivers/gpu/drm/amd/display/dc/dsc/dcn401/dcn401_dsc.c
index 6acb6699f146..61678b0a5a1e 100644
--- a/drivers/gpu/drm/amd/display/dc/dsc/dcn401/dcn401_dsc.c
+++ b/drivers/gpu/drm/amd/display/dc/dsc/dcn401/dcn401_dsc.c
@@ -27,7 +27,7 @@ static void dsc401_disconnect(struct display_stream_compressor *dsc);
static void dsc401_wait_disconnect_pending_clear(struct display_stream_compressor *dsc);
static void dsc401_get_enc_caps(struct dsc_enc_caps *dsc_enc_caps, int pixel_clock_100Hz);
-const struct dsc_funcs dcn401_dsc_funcs = {
+static const struct dsc_funcs dcn401_dsc_funcs = {
.dsc_get_enc_caps = dsc401_get_enc_caps,
.dsc_read_state = dsc401_read_state,
.dsc_validate_stream = dsc401_validate_stream,
diff --git a/drivers/gpu/drm/amd/display/dc/dwb/Makefile b/drivers/gpu/drm/amd/display/dc/dwb/Makefile
index 16f7a454fed9..3952ba4cd508 100644
--- a/drivers/gpu/drm/amd/display/dc/dwb/Makefile
+++ b/drivers/gpu/drm/amd/display/dc/dwb/Makefile
@@ -25,6 +25,15 @@
ifdef CONFIG_DRM_AMD_DC_FP
###############################################################################
+# DCN30
+###############################################################################
+DWB_DCN30 = dcn30_dwb.o dcn30_dwb_cm.o
+
+AMD_DAL_DWB_DCN30 = $(addprefix $(AMDDALPATH)/dc/dwb/dcn30/,$(DWB_DCN30))
+
+AMD_DISPLAY_FILES += $(AMD_DAL_DWB_DCN30)
+
+###############################################################################
# DCN35
###############################################################################
DWB_DCN35 = dcn35_dwb.o
diff --git a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_cm_common.h b/drivers/gpu/drm/amd/display/dc/dwb/dcn30/dcn30_cm_common.h
index bd98b327a6c7..bd98b327a6c7 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_cm_common.h
+++ b/drivers/gpu/drm/amd/display/dc/dwb/dcn30/dcn30_cm_common.h
diff --git a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_dwb.c b/drivers/gpu/drm/amd/display/dc/dwb/dcn30/dcn30_dwb.c
index fae98cf52020..fae98cf52020 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_dwb.c
+++ b/drivers/gpu/drm/amd/display/dc/dwb/dcn30/dcn30_dwb.c
diff --git a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_dwb.h b/drivers/gpu/drm/amd/display/dc/dwb/dcn30/dcn30_dwb.h
index 0f3f7c5fbaec..0f3f7c5fbaec 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_dwb.h
+++ b/drivers/gpu/drm/amd/display/dc/dwb/dcn30/dcn30_dwb.h
diff --git a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_dwb_cm.c b/drivers/gpu/drm/amd/display/dc/dwb/dcn30/dcn30_dwb_cm.c
index 03a50c32fcfe..03a50c32fcfe 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_dwb_cm.c
+++ b/drivers/gpu/drm/amd/display/dc/dwb/dcn30/dcn30_dwb_cm.c
diff --git a/drivers/gpu/drm/amd/display/dc/dwb/dcn35/dcn35_dwb.c b/drivers/gpu/drm/amd/display/dc/dwb/dcn35/dcn35_dwb.c
index b23a809999ed..d5e8294f5a16 100644
--- a/drivers/gpu/drm/amd/display/dc/dwb/dcn35/dcn35_dwb.c
+++ b/drivers/gpu/drm/amd/display/dc/dwb/dcn35/dcn35_dwb.c
@@ -21,7 +21,6 @@
* OTHER DEALINGS IN THE SOFTWARE.
*
*/
-
#include "reg_helper.h"
#include "dcn35_dwb.h"
diff --git a/drivers/gpu/drm/amd/display/dc/gpio/dcn401/hw_factory_dcn401.c b/drivers/gpu/drm/amd/display/dc/gpio/dcn401/hw_factory_dcn401.c
index 46415cab23ab..928abca18a18 100644
--- a/drivers/gpu/drm/amd/display/dc/gpio/dcn401/hw_factory_dcn401.c
+++ b/drivers/gpu/drm/amd/display/dc/gpio/dcn401/hw_factory_dcn401.c
@@ -86,7 +86,13 @@ static const struct ddc_registers ddc_data_regs_dcn[] = {
ddc_data_regs_dcn2(2),
ddc_data_regs_dcn2(3),
ddc_data_regs_dcn2(4),
-// ddc_data_regs_dcn2(5),
+ {
+ // add a dummy entry for cases no such port
+ {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,},
+ .ddc_setup = 0,
+ .phy_aux_cntl = 0,
+ .dc_gpio_aux_ctrl_5 = 0
+ },
{
// add a dummy entry for cases no such port
{0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,},
@@ -107,7 +113,13 @@ static const struct ddc_registers ddc_clk_regs_dcn[] = {
ddc_clk_regs_dcn2(2),
ddc_clk_regs_dcn2(3),
ddc_clk_regs_dcn2(4),
-// ddc_clk_regs_dcn2(5),
+ {
+ // add a dummy entry for cases no such port
+ {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,},
+ .ddc_setup = 0,
+ .phy_aux_cntl = 0,
+ .dc_gpio_aux_ctrl_5 = 0
+ },
{
// add a dummy entry for cases no such port
{0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,},
diff --git a/drivers/gpu/drm/amd/display/dc/hpo/Makefile b/drivers/gpu/drm/amd/display/dc/hpo/Makefile
index c248bd86b477..7f2c9ee0dff1 100644
--- a/drivers/gpu/drm/amd/display/dc/hpo/Makefile
+++ b/drivers/gpu/drm/amd/display/dc/hpo/Makefile
@@ -25,6 +25,21 @@
ifdef CONFIG_DRM_AMD_DC_FP
###############################################################################
+# DCN30
+###############################################################################
+
+AMD_DAL_HPO_DCN30 = $(addprefix $(AMDDALPATH)/dc/hpo/dcn30/,$(HPO_DCN30))
+
+AMD_DISPLAY_FILES += $(AMD_DAL_HPO_DCN30)
+###############################################################################
+# DCN31
+###############################################################################
+HPO_DCN31 = dcn31_hpo_dp_stream_encoder.o dcn31_hpo_dp_link_encoder.o
+
+AMD_DAL_HPO_DCN31 = $(addprefix $(AMDDALPATH)/dc/hpo/dcn31/,$(HPO_DCN31))
+
+AMD_DISPLAY_FILES += $(AMD_DAL_HPO_DCN31)
+###############################################################################
# DCN32
###############################################################################
HPO_DCN32 = dcn32_hpo_dp_link_encoder.o
diff --git a/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_hpo_dp_link_encoder.c b/drivers/gpu/drm/amd/display/dc/hpo/dcn31/dcn31_hpo_dp_link_encoder.c
index 03b4ac2f1991..03b4ac2f1991 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_hpo_dp_link_encoder.c
+++ b/drivers/gpu/drm/amd/display/dc/hpo/dcn31/dcn31_hpo_dp_link_encoder.c
diff --git a/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_hpo_dp_link_encoder.h b/drivers/gpu/drm/amd/display/dc/hpo/dcn31/dcn31_hpo_dp_link_encoder.h
index 51f5781325e8..51f5781325e8 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_hpo_dp_link_encoder.h
+++ b/drivers/gpu/drm/amd/display/dc/hpo/dcn31/dcn31_hpo_dp_link_encoder.h
diff --git a/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_hpo_dp_stream_encoder.c b/drivers/gpu/drm/amd/display/dc/hpo/dcn31/dcn31_hpo_dp_stream_encoder.c
index 678db949cfe3..678db949cfe3 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_hpo_dp_stream_encoder.c
+++ b/drivers/gpu/drm/amd/display/dc/hpo/dcn31/dcn31_hpo_dp_stream_encoder.c
diff --git a/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_hpo_dp_stream_encoder.h b/drivers/gpu/drm/amd/display/dc/hpo/dcn31/dcn31_hpo_dp_stream_encoder.h
index 82c3b3ac1f0d..82c3b3ac1f0d 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_hpo_dp_stream_encoder.h
+++ b/drivers/gpu/drm/amd/display/dc/hpo/dcn31/dcn31_hpo_dp_stream_encoder.h
diff --git a/drivers/gpu/drm/amd/display/dc/hubbub/dcn35/dcn35_hubbub.c b/drivers/gpu/drm/amd/display/dc/hubbub/dcn35/dcn35_hubbub.c
index 6293173ba2b9..5eb3da8d5206 100644
--- a/drivers/gpu/drm/amd/display/dc/hubbub/dcn35/dcn35_hubbub.c
+++ b/drivers/gpu/drm/amd/display/dc/hubbub/dcn35/dcn35_hubbub.c
@@ -545,6 +545,7 @@ static void hubbub35_init(struct hubbub *hubbub)
DCHUBBUB_ARB_MAX_REQ_OUTSTAND, 256,
DCHUBBUB_ARB_MIN_REQ_OUTSTAND, 256);
+ memset(&hubbub2->watermarks.a.cstate_pstate, 0, sizeof(hubbub2->watermarks.a.cstate_pstate));
}
/*static void hubbub35_set_request_limit(struct hubbub *hubbub,
diff --git a/drivers/gpu/drm/amd/display/dc/hubbub/dcn401/dcn401_hubbub.c b/drivers/gpu/drm/amd/display/dc/hubbub/dcn401/dcn401_hubbub.c
index 181041d6d177..37d26fa0b6fb 100644
--- a/drivers/gpu/drm/amd/display/dc/hubbub/dcn401/dcn401_hubbub.c
+++ b/drivers/gpu/drm/amd/display/dc/hubbub/dcn401/dcn401_hubbub.c
@@ -75,108 +75,108 @@ bool hubbub401_program_urgent_watermarks(
/* Repeat for water mark set A and B */
/* clock state A */
- if (safe_to_lower || watermarks->dcn4.a.urgent > hubbub2->watermarks.dcn4.a.urgent) {
- hubbub2->watermarks.dcn4.a.urgent = watermarks->dcn4.a.urgent;
+ if (safe_to_lower || watermarks->dcn4x.a.urgent > hubbub2->watermarks.dcn4x.a.urgent) {
+ hubbub2->watermarks.dcn4x.a.urgent = watermarks->dcn4x.a.urgent;
REG_SET(DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_A, 0,
- DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_A, watermarks->dcn4.a.urgent);
+ DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_A, watermarks->dcn4x.a.urgent);
DC_LOG_BANDWIDTH_CALCS("URGENCY_WATERMARK_A calculated =%d\n"
"HW register value = 0x%x\n",
- watermarks->dcn4.a.urgent, watermarks->dcn4.a.urgent);
- } else if (watermarks->dcn4.a.urgent < hubbub2->watermarks.dcn4.a.urgent)
+ watermarks->dcn4x.a.urgent, watermarks->dcn4x.a.urgent);
+ } else if (watermarks->dcn4x.a.urgent < hubbub2->watermarks.dcn4x.a.urgent)
wm_pending = true;
/* determine the transfer time for a quantity of data for a particular requestor.*/
- if (safe_to_lower || watermarks->dcn4.a.frac_urg_bw_flip
- > hubbub2->watermarks.dcn4.a.frac_urg_bw_flip) {
- hubbub2->watermarks.dcn4.a.frac_urg_bw_flip = watermarks->dcn4.a.frac_urg_bw_flip;
+ if (safe_to_lower || watermarks->dcn4x.a.frac_urg_bw_flip
+ > hubbub2->watermarks.dcn4x.a.frac_urg_bw_flip) {
+ hubbub2->watermarks.dcn4x.a.frac_urg_bw_flip = watermarks->dcn4x.a.frac_urg_bw_flip;
REG_SET(DCHUBBUB_ARB_FRAC_URG_BW_FLIP_A, 0,
- DCHUBBUB_ARB_FRAC_URG_BW_FLIP_A, watermarks->dcn4.a.frac_urg_bw_flip);
- } else if (watermarks->dcn4.a.frac_urg_bw_flip
- < hubbub2->watermarks.dcn4.a.frac_urg_bw_flip)
+ DCHUBBUB_ARB_FRAC_URG_BW_FLIP_A, watermarks->dcn4x.a.frac_urg_bw_flip);
+ } else if (watermarks->dcn4x.a.frac_urg_bw_flip
+ < hubbub2->watermarks.dcn4x.a.frac_urg_bw_flip)
wm_pending = true;
- if (safe_to_lower || watermarks->dcn4.a.frac_urg_bw_nom
- > hubbub2->watermarks.dcn4.a.frac_urg_bw_nom) {
- hubbub2->watermarks.dcn4.a.frac_urg_bw_nom = watermarks->dcn4.a.frac_urg_bw_nom;
+ if (safe_to_lower || watermarks->dcn4x.a.frac_urg_bw_nom
+ > hubbub2->watermarks.dcn4x.a.frac_urg_bw_nom) {
+ hubbub2->watermarks.dcn4x.a.frac_urg_bw_nom = watermarks->dcn4x.a.frac_urg_bw_nom;
REG_SET(DCHUBBUB_ARB_FRAC_URG_BW_NOM_A, 0,
- DCHUBBUB_ARB_FRAC_URG_BW_NOM_A, watermarks->dcn4.a.frac_urg_bw_nom);
- } else if (watermarks->dcn4.a.frac_urg_bw_nom
- < hubbub2->watermarks.dcn4.a.frac_urg_bw_nom)
+ DCHUBBUB_ARB_FRAC_URG_BW_NOM_A, watermarks->dcn4x.a.frac_urg_bw_nom);
+ } else if (watermarks->dcn4x.a.frac_urg_bw_nom
+ < hubbub2->watermarks.dcn4x.a.frac_urg_bw_nom)
wm_pending = true;
- if (safe_to_lower || watermarks->dcn4.a.frac_urg_bw_mall
- > hubbub2->watermarks.dcn4.a.frac_urg_bw_mall) {
- hubbub2->watermarks.dcn4.a.frac_urg_bw_mall = watermarks->dcn4.a.frac_urg_bw_mall;
+ if (safe_to_lower || watermarks->dcn4x.a.frac_urg_bw_mall
+ > hubbub2->watermarks.dcn4x.a.frac_urg_bw_mall) {
+ hubbub2->watermarks.dcn4x.a.frac_urg_bw_mall = watermarks->dcn4x.a.frac_urg_bw_mall;
REG_SET(DCHUBBUB_ARB_FRAC_URG_BW_MALL_A, 0,
- DCHUBBUB_ARB_FRAC_URG_BW_MALL_A, watermarks->dcn4.a.frac_urg_bw_mall);
- } else if (watermarks->dcn4.a.frac_urg_bw_mall < hubbub2->watermarks.dcn4.a.frac_urg_bw_mall)
+ DCHUBBUB_ARB_FRAC_URG_BW_MALL_A, watermarks->dcn4x.a.frac_urg_bw_mall);
+ } else if (watermarks->dcn4x.a.frac_urg_bw_mall < hubbub2->watermarks.dcn4x.a.frac_urg_bw_mall)
wm_pending = true;
- if (safe_to_lower || watermarks->dcn4.a.refcyc_per_trip_to_mem > hubbub2->watermarks.dcn4.a.refcyc_per_trip_to_mem) {
- hubbub2->watermarks.dcn4.a.refcyc_per_trip_to_mem = watermarks->dcn4.a.refcyc_per_trip_to_mem;
+ if (safe_to_lower || watermarks->dcn4x.a.refcyc_per_trip_to_mem > hubbub2->watermarks.dcn4x.a.refcyc_per_trip_to_mem) {
+ hubbub2->watermarks.dcn4x.a.refcyc_per_trip_to_mem = watermarks->dcn4x.a.refcyc_per_trip_to_mem;
REG_SET(DCHUBBUB_ARB_REFCYC_PER_TRIP_TO_MEMORY_A, 0,
- DCHUBBUB_ARB_REFCYC_PER_TRIP_TO_MEMORY_A, watermarks->dcn4.a.refcyc_per_trip_to_mem);
- } else if (watermarks->dcn4.a.refcyc_per_trip_to_mem < hubbub2->watermarks.dcn4.a.refcyc_per_trip_to_mem)
+ DCHUBBUB_ARB_REFCYC_PER_TRIP_TO_MEMORY_A, watermarks->dcn4x.a.refcyc_per_trip_to_mem);
+ } else if (watermarks->dcn4x.a.refcyc_per_trip_to_mem < hubbub2->watermarks.dcn4x.a.refcyc_per_trip_to_mem)
wm_pending = true;
- if (safe_to_lower || watermarks->dcn4.a.refcyc_per_meta_trip_to_mem > hubbub2->watermarks.dcn4.a.refcyc_per_meta_trip_to_mem) {
- hubbub2->watermarks.dcn4.a.refcyc_per_meta_trip_to_mem = watermarks->dcn4.a.refcyc_per_meta_trip_to_mem;
+ if (safe_to_lower || watermarks->dcn4x.a.refcyc_per_meta_trip_to_mem > hubbub2->watermarks.dcn4x.a.refcyc_per_meta_trip_to_mem) {
+ hubbub2->watermarks.dcn4x.a.refcyc_per_meta_trip_to_mem = watermarks->dcn4x.a.refcyc_per_meta_trip_to_mem;
REG_SET(DCHUBBUB_ARB_REFCYC_PER_META_TRIP_A, 0,
- DCHUBBUB_ARB_REFCYC_PER_META_TRIP_A, watermarks->dcn4.a.refcyc_per_meta_trip_to_mem);
- } else if (watermarks->dcn4.a.refcyc_per_meta_trip_to_mem < hubbub2->watermarks.dcn4.a.refcyc_per_meta_trip_to_mem)
+ DCHUBBUB_ARB_REFCYC_PER_META_TRIP_A, watermarks->dcn4x.a.refcyc_per_meta_trip_to_mem);
+ } else if (watermarks->dcn4x.a.refcyc_per_meta_trip_to_mem < hubbub2->watermarks.dcn4x.a.refcyc_per_meta_trip_to_mem)
wm_pending = true;
/* clock state B */
- if (safe_to_lower || watermarks->dcn4.b.urgent > hubbub2->watermarks.dcn4.b.urgent) {
- hubbub2->watermarks.dcn4.b.urgent = watermarks->dcn4.b.urgent;
+ if (safe_to_lower || watermarks->dcn4x.b.urgent > hubbub2->watermarks.dcn4x.b.urgent) {
+ hubbub2->watermarks.dcn4x.b.urgent = watermarks->dcn4x.b.urgent;
REG_SET(DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_B, 0,
- DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_B, watermarks->dcn4.b.urgent);
+ DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_B, watermarks->dcn4x.b.urgent);
DC_LOG_BANDWIDTH_CALCS("URGENCY_WATERMARK_B calculated =%d\n"
"HW register value = 0x%x\n",
- watermarks->dcn4.b.urgent, watermarks->dcn4.b.urgent);
- } else if (watermarks->dcn4.b.urgent < hubbub2->watermarks.dcn4.b.urgent)
+ watermarks->dcn4x.b.urgent, watermarks->dcn4x.b.urgent);
+ } else if (watermarks->dcn4x.b.urgent < hubbub2->watermarks.dcn4x.b.urgent)
wm_pending = true;
/* determine the transfer time for a quantity of data for a particular requestor.*/
- if (safe_to_lower || watermarks->dcn4.b.frac_urg_bw_flip
- > hubbub2->watermarks.dcn4.b.frac_urg_bw_flip) {
- hubbub2->watermarks.dcn4.b.frac_urg_bw_flip = watermarks->dcn4.b.frac_urg_bw_flip;
+ if (safe_to_lower || watermarks->dcn4x.b.frac_urg_bw_flip
+ > hubbub2->watermarks.dcn4x.b.frac_urg_bw_flip) {
+ hubbub2->watermarks.dcn4x.b.frac_urg_bw_flip = watermarks->dcn4x.b.frac_urg_bw_flip;
REG_SET(DCHUBBUB_ARB_FRAC_URG_BW_FLIP_B, 0,
- DCHUBBUB_ARB_FRAC_URG_BW_FLIP_B, watermarks->dcn4.b.frac_urg_bw_flip);
- } else if (watermarks->dcn4.b.frac_urg_bw_flip
- < hubbub2->watermarks.dcn4.b.frac_urg_bw_flip)
+ DCHUBBUB_ARB_FRAC_URG_BW_FLIP_B, watermarks->dcn4x.b.frac_urg_bw_flip);
+ } else if (watermarks->dcn4x.b.frac_urg_bw_flip
+ < hubbub2->watermarks.dcn4x.b.frac_urg_bw_flip)
wm_pending = true;
- if (safe_to_lower || watermarks->dcn4.b.frac_urg_bw_nom
- > hubbub2->watermarks.dcn4.b.frac_urg_bw_nom) {
- hubbub2->watermarks.dcn4.b.frac_urg_bw_nom = watermarks->dcn4.b.frac_urg_bw_nom;
+ if (safe_to_lower || watermarks->dcn4x.b.frac_urg_bw_nom
+ > hubbub2->watermarks.dcn4x.b.frac_urg_bw_nom) {
+ hubbub2->watermarks.dcn4x.b.frac_urg_bw_nom = watermarks->dcn4x.b.frac_urg_bw_nom;
REG_SET(DCHUBBUB_ARB_FRAC_URG_BW_NOM_B, 0,
- DCHUBBUB_ARB_FRAC_URG_BW_NOM_B, watermarks->dcn4.b.frac_urg_bw_nom);
- } else if (watermarks->dcn4.b.frac_urg_bw_nom
- < hubbub2->watermarks.dcn4.b.frac_urg_bw_nom)
+ DCHUBBUB_ARB_FRAC_URG_BW_NOM_B, watermarks->dcn4x.b.frac_urg_bw_nom);
+ } else if (watermarks->dcn4x.b.frac_urg_bw_nom
+ < hubbub2->watermarks.dcn4x.b.frac_urg_bw_nom)
wm_pending = true;
- if (safe_to_lower || watermarks->dcn4.b.frac_urg_bw_mall
- > hubbub2->watermarks.dcn4.b.frac_urg_bw_mall) {
- hubbub2->watermarks.dcn4.b.frac_urg_bw_mall = watermarks->dcn4.b.frac_urg_bw_mall;
+ if (safe_to_lower || watermarks->dcn4x.b.frac_urg_bw_mall
+ > hubbub2->watermarks.dcn4x.b.frac_urg_bw_mall) {
+ hubbub2->watermarks.dcn4x.b.frac_urg_bw_mall = watermarks->dcn4x.b.frac_urg_bw_mall;
REG_SET(DCHUBBUB_ARB_FRAC_URG_BW_MALL_B, 0,
- DCHUBBUB_ARB_FRAC_URG_BW_MALL_B, watermarks->dcn4.b.frac_urg_bw_mall);
- } else if (watermarks->dcn4.b.frac_urg_bw_mall < hubbub2->watermarks.dcn4.b.frac_urg_bw_mall)
+ DCHUBBUB_ARB_FRAC_URG_BW_MALL_B, watermarks->dcn4x.b.frac_urg_bw_mall);
+ } else if (watermarks->dcn4x.b.frac_urg_bw_mall < hubbub2->watermarks.dcn4x.b.frac_urg_bw_mall)
wm_pending = true;
- if (safe_to_lower || watermarks->dcn4.b.refcyc_per_trip_to_mem > hubbub2->watermarks.dcn4.b.refcyc_per_trip_to_mem) {
- hubbub2->watermarks.dcn4.b.refcyc_per_trip_to_mem = watermarks->dcn4.b.refcyc_per_trip_to_mem;
+ if (safe_to_lower || watermarks->dcn4x.b.refcyc_per_trip_to_mem > hubbub2->watermarks.dcn4x.b.refcyc_per_trip_to_mem) {
+ hubbub2->watermarks.dcn4x.b.refcyc_per_trip_to_mem = watermarks->dcn4x.b.refcyc_per_trip_to_mem;
REG_SET(DCHUBBUB_ARB_REFCYC_PER_TRIP_TO_MEMORY_B, 0,
- DCHUBBUB_ARB_REFCYC_PER_TRIP_TO_MEMORY_B, watermarks->dcn4.b.refcyc_per_trip_to_mem);
- } else if (watermarks->dcn4.b.refcyc_per_trip_to_mem < hubbub2->watermarks.dcn4.b.refcyc_per_trip_to_mem)
+ DCHUBBUB_ARB_REFCYC_PER_TRIP_TO_MEMORY_B, watermarks->dcn4x.b.refcyc_per_trip_to_mem);
+ } else if (watermarks->dcn4x.b.refcyc_per_trip_to_mem < hubbub2->watermarks.dcn4x.b.refcyc_per_trip_to_mem)
wm_pending = true;
- if (safe_to_lower || watermarks->dcn4.b.refcyc_per_meta_trip_to_mem > hubbub2->watermarks.dcn4.b.refcyc_per_meta_trip_to_mem) {
- hubbub2->watermarks.dcn4.b.refcyc_per_meta_trip_to_mem = watermarks->dcn4.b.refcyc_per_meta_trip_to_mem;
+ if (safe_to_lower || watermarks->dcn4x.b.refcyc_per_meta_trip_to_mem > hubbub2->watermarks.dcn4x.b.refcyc_per_meta_trip_to_mem) {
+ hubbub2->watermarks.dcn4x.b.refcyc_per_meta_trip_to_mem = watermarks->dcn4x.b.refcyc_per_meta_trip_to_mem;
REG_SET(DCHUBBUB_ARB_REFCYC_PER_META_TRIP_B, 0,
- DCHUBBUB_ARB_REFCYC_PER_META_TRIP_B, watermarks->dcn4.b.refcyc_per_meta_trip_to_mem);
- } else if (watermarks->dcn4.b.refcyc_per_meta_trip_to_mem < hubbub2->watermarks.dcn4.b.refcyc_per_meta_trip_to_mem)
+ DCHUBBUB_ARB_REFCYC_PER_META_TRIP_B, watermarks->dcn4x.b.refcyc_per_meta_trip_to_mem);
+ } else if (watermarks->dcn4x.b.refcyc_per_meta_trip_to_mem < hubbub2->watermarks.dcn4x.b.refcyc_per_meta_trip_to_mem)
wm_pending = true;
return wm_pending;
@@ -192,89 +192,89 @@ bool hubbub401_program_stutter_watermarks(
bool wm_pending = false;
/* clock state A */
- if (safe_to_lower || watermarks->dcn4.a.sr_enter
- > hubbub2->watermarks.dcn4.a.sr_enter) {
- hubbub2->watermarks.dcn4.a.sr_enter =
- watermarks->dcn4.a.sr_enter;
+ if (safe_to_lower || watermarks->dcn4x.a.sr_enter
+ > hubbub2->watermarks.dcn4x.a.sr_enter) {
+ hubbub2->watermarks.dcn4x.a.sr_enter =
+ watermarks->dcn4x.a.sr_enter;
REG_SET(DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_A, 0,
- DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_A, watermarks->dcn4.a.sr_enter);
+ DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_A, watermarks->dcn4x.a.sr_enter);
DC_LOG_BANDWIDTH_CALCS("SR_ENTER_EXIT_WATERMARK_A calculated =%d\n"
"HW register value = 0x%x\n",
- watermarks->dcn4.a.sr_enter, watermarks->dcn4.a.sr_enter);
+ watermarks->dcn4x.a.sr_enter, watermarks->dcn4x.a.sr_enter);
// On dGPU Z states are N/A, so program all other 3 Stutter Enter wm A with the same value
REG_SET(DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK1_A, 0,
- DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK1_A, watermarks->dcn4.a.sr_enter);
+ DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK1_A, watermarks->dcn4x.a.sr_enter);
REG_SET(DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK2_A, 0,
- DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK2_A, watermarks->dcn4.a.sr_enter);
+ DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK2_A, watermarks->dcn4x.a.sr_enter);
REG_SET(DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK3_A, 0,
- DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK3_A, watermarks->dcn4.a.sr_enter);
+ DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK3_A, watermarks->dcn4x.a.sr_enter);
- } else if (watermarks->dcn4.a.sr_enter
- < hubbub2->watermarks.dcn4.a.sr_enter)
+ } else if (watermarks->dcn4x.a.sr_enter
+ < hubbub2->watermarks.dcn4x.a.sr_enter)
wm_pending = true;
- if (safe_to_lower || watermarks->dcn4.a.sr_exit
- > hubbub2->watermarks.dcn4.a.sr_exit) {
- hubbub2->watermarks.dcn4.a.sr_exit =
- watermarks->dcn4.a.sr_exit;
+ if (safe_to_lower || watermarks->dcn4x.a.sr_exit
+ > hubbub2->watermarks.dcn4x.a.sr_exit) {
+ hubbub2->watermarks.dcn4x.a.sr_exit =
+ watermarks->dcn4x.a.sr_exit;
REG_SET(DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_A, 0,
- DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_A, watermarks->dcn4.a.sr_exit);
+ DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_A, watermarks->dcn4x.a.sr_exit);
DC_LOG_BANDWIDTH_CALCS("SR_EXIT_WATERMARK_A calculated =%d\n"
"HW register value = 0x%x\n",
- watermarks->dcn4.a.sr_exit, watermarks->dcn4.a.sr_exit);
+ watermarks->dcn4x.a.sr_exit, watermarks->dcn4x.a.sr_exit);
// On dGPU Z states are N/A, so program all other 3 Stutter Exit wm A with the same value
REG_SET(DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK1_A, 0,
- DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK1_A, watermarks->dcn4.a.sr_exit);
+ DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK1_A, watermarks->dcn4x.a.sr_exit);
REG_SET(DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK2_A, 0,
- DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK2_A, watermarks->dcn4.a.sr_exit);
+ DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK2_A, watermarks->dcn4x.a.sr_exit);
REG_SET(DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK3_A, 0,
- DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK3_A, watermarks->dcn4.a.sr_exit);
+ DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK3_A, watermarks->dcn4x.a.sr_exit);
- } else if (watermarks->dcn4.a.sr_exit
- < hubbub2->watermarks.dcn4.a.sr_exit)
+ } else if (watermarks->dcn4x.a.sr_exit
+ < hubbub2->watermarks.dcn4x.a.sr_exit)
wm_pending = true;
/* clock state B */
- if (safe_to_lower || watermarks->dcn4.b.sr_enter
- > hubbub2->watermarks.dcn4.b.sr_enter) {
- hubbub2->watermarks.dcn4.b.sr_enter =
- watermarks->dcn4.b.sr_enter;
+ if (safe_to_lower || watermarks->dcn4x.b.sr_enter
+ > hubbub2->watermarks.dcn4x.b.sr_enter) {
+ hubbub2->watermarks.dcn4x.b.sr_enter =
+ watermarks->dcn4x.b.sr_enter;
REG_SET(DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_B, 0,
- DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_B, watermarks->dcn4.b.sr_enter);
+ DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_B, watermarks->dcn4x.b.sr_enter);
DC_LOG_BANDWIDTH_CALCS("SR_ENTER_EXIT_WATERMARK_B calculated =%d\n"
"HW register value = 0x%x\n",
- watermarks->dcn4.b.sr_enter, watermarks->dcn4.b.sr_enter);
+ watermarks->dcn4x.b.sr_enter, watermarks->dcn4x.b.sr_enter);
// On dGPU Z states are N/A, so program all other 3 Stutter Enter wm A with the same value
REG_SET(DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK1_B, 0,
- DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK1_B, watermarks->dcn4.b.sr_enter);
+ DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK1_B, watermarks->dcn4x.b.sr_enter);
REG_SET(DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK2_B, 0,
- DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK2_B, watermarks->dcn4.b.sr_enter);
+ DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK2_B, watermarks->dcn4x.b.sr_enter);
REG_SET(DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK3_B, 0,
- DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK3_B, watermarks->dcn4.b.sr_enter);
+ DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK3_B, watermarks->dcn4x.b.sr_enter);
- } else if (watermarks->dcn4.b.sr_enter
- < hubbub2->watermarks.dcn4.b.sr_enter)
+ } else if (watermarks->dcn4x.b.sr_enter
+ < hubbub2->watermarks.dcn4x.b.sr_enter)
wm_pending = true;
- if (safe_to_lower || watermarks->dcn4.b.sr_exit
- > hubbub2->watermarks.dcn4.b.sr_exit) {
- hubbub2->watermarks.dcn4.b.sr_exit =
- watermarks->dcn4.b.sr_exit;
+ if (safe_to_lower || watermarks->dcn4x.b.sr_exit
+ > hubbub2->watermarks.dcn4x.b.sr_exit) {
+ hubbub2->watermarks.dcn4x.b.sr_exit =
+ watermarks->dcn4x.b.sr_exit;
REG_SET(DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_B, 0,
- DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_B, watermarks->dcn4.b.sr_exit);
+ DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_B, watermarks->dcn4x.b.sr_exit);
DC_LOG_BANDWIDTH_CALCS("SR_EXIT_WATERMARK_B calculated =%d\n"
"HW register value = 0x%x\n",
- watermarks->dcn4.b.sr_exit, watermarks->dcn4.b.sr_exit);
+ watermarks->dcn4x.b.sr_exit, watermarks->dcn4x.b.sr_exit);
// On dGPU Z states are N/A, so program all other 3 Stutter Exit wm A with the same value
REG_SET(DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK1_B, 0,
- DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK1_B, watermarks->dcn4.b.sr_exit);
+ DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK1_B, watermarks->dcn4x.b.sr_exit);
REG_SET(DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK2_B, 0,
- DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK2_B, watermarks->dcn4.b.sr_exit);
+ DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK2_B, watermarks->dcn4x.b.sr_exit);
REG_SET(DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK3_B, 0,
- DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK3_B, watermarks->dcn4.b.sr_exit);
+ DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK3_B, watermarks->dcn4x.b.sr_exit);
- } else if (watermarks->dcn4.b.sr_exit
- < hubbub2->watermarks.dcn4.b.sr_exit)
+ } else if (watermarks->dcn4x.b.sr_exit
+ < hubbub2->watermarks.dcn4x.b.sr_exit)
wm_pending = true;
return wm_pending;
@@ -292,116 +292,116 @@ bool hubbub401_program_pstate_watermarks(
/* Section for UCLK_PSTATE_CHANGE_WATERMARKS */
/* clock state A */
- if (safe_to_lower || watermarks->dcn4.a.uclk_pstate
- > hubbub2->watermarks.dcn4.a.uclk_pstate) {
- hubbub2->watermarks.dcn4.a.uclk_pstate =
- watermarks->dcn4.a.uclk_pstate;
+ if (safe_to_lower || watermarks->dcn4x.a.uclk_pstate
+ > hubbub2->watermarks.dcn4x.a.uclk_pstate) {
+ hubbub2->watermarks.dcn4x.a.uclk_pstate =
+ watermarks->dcn4x.a.uclk_pstate;
REG_SET(DCHUBBUB_ARB_UCLK_PSTATE_CHANGE_WATERMARK_A, 0,
- DCHUBBUB_ARB_UCLK_PSTATE_CHANGE_WATERMARK_A, watermarks->dcn4.a.uclk_pstate);
+ DCHUBBUB_ARB_UCLK_PSTATE_CHANGE_WATERMARK_A, watermarks->dcn4x.a.uclk_pstate);
DC_LOG_BANDWIDTH_CALCS("DRAM_CLK_CHANGE_WATERMARK_A calculated =%d\n"
"HW register value = 0x%x\n\n",
- watermarks->dcn4.a.uclk_pstate, watermarks->dcn4.a.uclk_pstate);
- } else if (watermarks->dcn4.a.uclk_pstate
- < hubbub2->watermarks.dcn4.a.uclk_pstate)
+ watermarks->dcn4x.a.uclk_pstate, watermarks->dcn4x.a.uclk_pstate);
+ } else if (watermarks->dcn4x.a.uclk_pstate
+ < hubbub2->watermarks.dcn4x.a.uclk_pstate)
wm_pending = true;
/* clock state B */
- if (safe_to_lower || watermarks->dcn4.b.uclk_pstate
- > hubbub2->watermarks.dcn4.b.uclk_pstate) {
- hubbub2->watermarks.dcn4.b.uclk_pstate =
- watermarks->dcn4.b.uclk_pstate;
+ if (safe_to_lower || watermarks->dcn4x.b.uclk_pstate
+ > hubbub2->watermarks.dcn4x.b.uclk_pstate) {
+ hubbub2->watermarks.dcn4x.b.uclk_pstate =
+ watermarks->dcn4x.b.uclk_pstate;
REG_SET(DCHUBBUB_ARB_UCLK_PSTATE_CHANGE_WATERMARK_B, 0,
- DCHUBBUB_ARB_UCLK_PSTATE_CHANGE_WATERMARK_B, watermarks->dcn4.b.uclk_pstate);
+ DCHUBBUB_ARB_UCLK_PSTATE_CHANGE_WATERMARK_B, watermarks->dcn4x.b.uclk_pstate);
DC_LOG_BANDWIDTH_CALCS("DRAM_CLK_CHANGE_WATERMARK_B calculated =%d\n"
"HW register value = 0x%x\n\n",
- watermarks->dcn4.b.uclk_pstate, watermarks->dcn4.b.uclk_pstate);
- } else if (watermarks->dcn4.b.uclk_pstate
- < hubbub2->watermarks.dcn4.b.uclk_pstate)
+ watermarks->dcn4x.b.uclk_pstate, watermarks->dcn4x.b.uclk_pstate);
+ } else if (watermarks->dcn4x.b.uclk_pstate
+ < hubbub2->watermarks.dcn4x.b.uclk_pstate)
wm_pending = true;
/* Section for UCLK_PSTATE_CHANGE_WATERMARKS1 (DUMMY_PSTATE/TEMP_READ/PPT) */
- if (safe_to_lower || watermarks->dcn4.a.temp_read_or_ppt
- > hubbub2->watermarks.dcn4.a.temp_read_or_ppt) {
- hubbub2->watermarks.dcn4.a.temp_read_or_ppt =
- watermarks->dcn4.a.temp_read_or_ppt;
+ if (safe_to_lower || watermarks->dcn4x.a.temp_read_or_ppt
+ > hubbub2->watermarks.dcn4x.a.temp_read_or_ppt) {
+ hubbub2->watermarks.dcn4x.a.temp_read_or_ppt =
+ watermarks->dcn4x.a.temp_read_or_ppt;
REG_SET(DCHUBBUB_ARB_UCLK_PSTATE_CHANGE_WATERMARK1_A, 0,
- DCHUBBUB_ARB_UCLK_PSTATE_CHANGE_WATERMARK1_A, watermarks->dcn4.a.temp_read_or_ppt);
+ DCHUBBUB_ARB_UCLK_PSTATE_CHANGE_WATERMARK1_A, watermarks->dcn4x.a.temp_read_or_ppt);
DC_LOG_BANDWIDTH_CALCS("DRAM_CLK_CHANGE_WATERMARK1_A calculated =%d\n"
"HW register value = 0x%x\n\n",
- watermarks->dcn4.a.temp_read_or_ppt, watermarks->dcn4.a.temp_read_or_ppt);
- } else if (watermarks->dcn4.a.temp_read_or_ppt
- < hubbub2->watermarks.dcn4.a.temp_read_or_ppt)
+ watermarks->dcn4x.a.temp_read_or_ppt, watermarks->dcn4x.a.temp_read_or_ppt);
+ } else if (watermarks->dcn4x.a.temp_read_or_ppt
+ < hubbub2->watermarks.dcn4x.a.temp_read_or_ppt)
wm_pending = true;
/* clock state B */
- if (safe_to_lower || watermarks->dcn4.b.temp_read_or_ppt
- > hubbub2->watermarks.dcn4.b.temp_read_or_ppt) {
- hubbub2->watermarks.dcn4.b.temp_read_or_ppt =
- watermarks->dcn4.b.temp_read_or_ppt;
+ if (safe_to_lower || watermarks->dcn4x.b.temp_read_or_ppt
+ > hubbub2->watermarks.dcn4x.b.temp_read_or_ppt) {
+ hubbub2->watermarks.dcn4x.b.temp_read_or_ppt =
+ watermarks->dcn4x.b.temp_read_or_ppt;
REG_SET(DCHUBBUB_ARB_UCLK_PSTATE_CHANGE_WATERMARK1_B, 0,
- DCHUBBUB_ARB_UCLK_PSTATE_CHANGE_WATERMARK1_B, watermarks->dcn4.b.temp_read_or_ppt);
+ DCHUBBUB_ARB_UCLK_PSTATE_CHANGE_WATERMARK1_B, watermarks->dcn4x.b.temp_read_or_ppt);
DC_LOG_BANDWIDTH_CALCS("DRAM_CLK_CHANGE_WATERMARK1_B calculated =%d\n"
"HW register value = 0x%x\n\n",
- watermarks->dcn4.b.temp_read_or_ppt, watermarks->dcn4.b.temp_read_or_ppt);
- } else if (watermarks->dcn4.b.temp_read_or_ppt
- < hubbub2->watermarks.dcn4.b.temp_read_or_ppt)
+ watermarks->dcn4x.b.temp_read_or_ppt, watermarks->dcn4x.b.temp_read_or_ppt);
+ } else if (watermarks->dcn4x.b.temp_read_or_ppt
+ < hubbub2->watermarks.dcn4x.b.temp_read_or_ppt)
wm_pending = true;
/* Section for FCLK_PSTATE_CHANGE_WATERMARKS */
/* clock state A */
- if (safe_to_lower || watermarks->dcn4.a.fclk_pstate
- > hubbub2->watermarks.dcn4.a.fclk_pstate) {
- hubbub2->watermarks.dcn4.a.fclk_pstate =
- watermarks->dcn4.a.fclk_pstate;
+ if (safe_to_lower || watermarks->dcn4x.a.fclk_pstate
+ > hubbub2->watermarks.dcn4x.a.fclk_pstate) {
+ hubbub2->watermarks.dcn4x.a.fclk_pstate =
+ watermarks->dcn4x.a.fclk_pstate;
REG_SET(DCHUBBUB_ARB_FCLK_PSTATE_CHANGE_WATERMARK_A, 0,
- DCHUBBUB_ARB_FCLK_PSTATE_CHANGE_WATERMARK_A, watermarks->dcn4.a.fclk_pstate);
+ DCHUBBUB_ARB_FCLK_PSTATE_CHANGE_WATERMARK_A, watermarks->dcn4x.a.fclk_pstate);
DC_LOG_BANDWIDTH_CALCS("FCLK_CHANGE_WATERMARK_A calculated =%d\n"
"HW register value = 0x%x\n\n",
- watermarks->dcn4.a.fclk_pstate, watermarks->dcn4.a.fclk_pstate);
- } else if (watermarks->dcn4.a.fclk_pstate
- < hubbub2->watermarks.dcn4.a.fclk_pstate)
+ watermarks->dcn4x.a.fclk_pstate, watermarks->dcn4x.a.fclk_pstate);
+ } else if (watermarks->dcn4x.a.fclk_pstate
+ < hubbub2->watermarks.dcn4x.a.fclk_pstate)
wm_pending = true;
/* clock state B */
- if (safe_to_lower || watermarks->dcn4.b.fclk_pstate
- > hubbub2->watermarks.dcn4.b.fclk_pstate) {
- hubbub2->watermarks.dcn4.b.fclk_pstate =
- watermarks->dcn4.b.fclk_pstate;
+ if (safe_to_lower || watermarks->dcn4x.b.fclk_pstate
+ > hubbub2->watermarks.dcn4x.b.fclk_pstate) {
+ hubbub2->watermarks.dcn4x.b.fclk_pstate =
+ watermarks->dcn4x.b.fclk_pstate;
REG_SET(DCHUBBUB_ARB_FCLK_PSTATE_CHANGE_WATERMARK_B, 0,
- DCHUBBUB_ARB_FCLK_PSTATE_CHANGE_WATERMARK_B, watermarks->dcn4.b.fclk_pstate);
+ DCHUBBUB_ARB_FCLK_PSTATE_CHANGE_WATERMARK_B, watermarks->dcn4x.b.fclk_pstate);
DC_LOG_BANDWIDTH_CALCS("FCLK_CHANGE_WATERMARK_B calculated =%d\n"
"HW register value = 0x%x\n\n",
- watermarks->dcn4.b.fclk_pstate, watermarks->dcn4.b.fclk_pstate);
- } else if (watermarks->dcn4.b.fclk_pstate
- < hubbub2->watermarks.dcn4.b.fclk_pstate)
+ watermarks->dcn4x.b.fclk_pstate, watermarks->dcn4x.b.fclk_pstate);
+ } else if (watermarks->dcn4x.b.fclk_pstate
+ < hubbub2->watermarks.dcn4x.b.fclk_pstate)
wm_pending = true;
/* Section for FCLK_CHANGE_WATERMARKS1 (DUMMY_PSTATE/TEMP_READ/PPT) */
- if (safe_to_lower || watermarks->dcn4.a.temp_read_or_ppt
- > hubbub2->watermarks.dcn4.a.temp_read_or_ppt) {
- hubbub2->watermarks.dcn4.a.temp_read_or_ppt =
- watermarks->dcn4.a.temp_read_or_ppt;
+ if (safe_to_lower || watermarks->dcn4x.a.temp_read_or_ppt
+ > hubbub2->watermarks.dcn4x.a.temp_read_or_ppt) {
+ hubbub2->watermarks.dcn4x.a.temp_read_or_ppt =
+ watermarks->dcn4x.a.temp_read_or_ppt;
REG_SET(DCHUBBUB_ARB_FCLK_PSTATE_CHANGE_WATERMARK1_A, 0,
- DCHUBBUB_ARB_FCLK_PSTATE_CHANGE_WATERMARK1_A, watermarks->dcn4.a.temp_read_or_ppt);
+ DCHUBBUB_ARB_FCLK_PSTATE_CHANGE_WATERMARK1_A, watermarks->dcn4x.a.temp_read_or_ppt);
DC_LOG_BANDWIDTH_CALCS("FCLK_CHANGE_WATERMARK1_A calculated =%d\n"
"HW register value = 0x%x\n\n",
- watermarks->dcn4.a.temp_read_or_ppt, watermarks->dcn4.a.temp_read_or_ppt);
- } else if (watermarks->dcn4.a.temp_read_or_ppt
- < hubbub2->watermarks.dcn4.a.temp_read_or_ppt)
+ watermarks->dcn4x.a.temp_read_or_ppt, watermarks->dcn4x.a.temp_read_or_ppt);
+ } else if (watermarks->dcn4x.a.temp_read_or_ppt
+ < hubbub2->watermarks.dcn4x.a.temp_read_or_ppt)
wm_pending = true;
/* clock state B */
- if (safe_to_lower || watermarks->dcn4.b.temp_read_or_ppt
- > hubbub2->watermarks.dcn4.b.temp_read_or_ppt) {
- hubbub2->watermarks.dcn4.b.temp_read_or_ppt =
- watermarks->dcn4.b.temp_read_or_ppt;
+ if (safe_to_lower || watermarks->dcn4x.b.temp_read_or_ppt
+ > hubbub2->watermarks.dcn4x.b.temp_read_or_ppt) {
+ hubbub2->watermarks.dcn4x.b.temp_read_or_ppt =
+ watermarks->dcn4x.b.temp_read_or_ppt;
REG_SET(DCHUBBUB_ARB_FCLK_PSTATE_CHANGE_WATERMARK1_B, 0,
- DCHUBBUB_ARB_FCLK_PSTATE_CHANGE_WATERMARK1_B, watermarks->dcn4.b.temp_read_or_ppt);
+ DCHUBBUB_ARB_FCLK_PSTATE_CHANGE_WATERMARK1_B, watermarks->dcn4x.b.temp_read_or_ppt);
DC_LOG_BANDWIDTH_CALCS("FCLK_CHANGE_WATERMARK1_B calculated =%d\n"
"HW register value = 0x%x\n\n",
- watermarks->dcn4.b.temp_read_or_ppt, watermarks->dcn4.b.temp_read_or_ppt);
- } else if (watermarks->dcn4.b.temp_read_or_ppt
- < hubbub2->watermarks.dcn4.b.temp_read_or_ppt)
+ watermarks->dcn4x.b.temp_read_or_ppt, watermarks->dcn4x.b.temp_read_or_ppt);
+ } else if (watermarks->dcn4x.b.temp_read_or_ppt
+ < hubbub2->watermarks.dcn4x.b.temp_read_or_ppt)
wm_pending = true;
return wm_pending;
@@ -418,29 +418,29 @@ bool hubbub401_program_usr_watermarks(
bool wm_pending = false;
/* clock state A */
- if (safe_to_lower || watermarks->dcn4.a.usr
- > hubbub2->watermarks.dcn4.a.usr) {
- hubbub2->watermarks.dcn4.a.usr = watermarks->dcn4.a.usr;
+ if (safe_to_lower || watermarks->dcn4x.a.usr
+ > hubbub2->watermarks.dcn4x.a.usr) {
+ hubbub2->watermarks.dcn4x.a.usr = watermarks->dcn4x.a.usr;
REG_SET(DCHUBBUB_ARB_USR_RETRAINING_WATERMARK_A, 0,
- DCHUBBUB_ARB_USR_RETRAINING_WATERMARK_A, watermarks->dcn4.a.usr);
+ DCHUBBUB_ARB_USR_RETRAINING_WATERMARK_A, watermarks->dcn4x.a.usr);
DC_LOG_BANDWIDTH_CALCS("USR_RETRAINING_WATERMARK_A calculated =%d\n"
"HW register value = 0x%x\n\n",
- watermarks->dcn4.a.usr, watermarks->dcn4.a.usr);
- } else if (watermarks->dcn4.a.usr
- < hubbub2->watermarks.dcn4.a.usr)
+ watermarks->dcn4x.a.usr, watermarks->dcn4x.a.usr);
+ } else if (watermarks->dcn4x.a.usr
+ < hubbub2->watermarks.dcn4x.a.usr)
wm_pending = true;
/* clock state B */
- if (safe_to_lower || watermarks->dcn4.b.usr
- > hubbub2->watermarks.dcn4.b.usr) {
- hubbub2->watermarks.dcn4.b.usr = watermarks->dcn4.b.usr;
+ if (safe_to_lower || watermarks->dcn4x.b.usr
+ > hubbub2->watermarks.dcn4x.b.usr) {
+ hubbub2->watermarks.dcn4x.b.usr = watermarks->dcn4x.b.usr;
REG_SET(DCHUBBUB_ARB_USR_RETRAINING_WATERMARK_B, 0,
- DCHUBBUB_ARB_USR_RETRAINING_WATERMARK_B, watermarks->dcn4.b.usr);
+ DCHUBBUB_ARB_USR_RETRAINING_WATERMARK_B, watermarks->dcn4x.b.usr);
DC_LOG_BANDWIDTH_CALCS("USR_RETRAINING_WATERMARK_B calculated =%d\n"
"HW register value = 0x%x\n\n",
- watermarks->dcn4.b.usr, watermarks->dcn4.b.usr);
- } else if (watermarks->dcn4.b.usr
- < hubbub2->watermarks.dcn4.b.usr)
+ watermarks->dcn4x.b.usr, watermarks->dcn4x.b.usr);
+ } else if (watermarks->dcn4x.b.usr
+ < hubbub2->watermarks.dcn4x.b.usr)
wm_pending = true;
return wm_pending;
@@ -1170,6 +1170,28 @@ static void dcn401_program_compbuf_segments(struct hubbub *hubbub, unsigned comp
}
}
+static void dcn401_wait_for_det_update(struct hubbub *hubbub, int hubp_inst)
+{
+ struct dcn20_hubbub *hubbub2 = TO_DCN20_HUBBUB(hubbub);
+
+ switch (hubp_inst) {
+ case 0:
+ REG_WAIT(DCHUBBUB_DET0_CTRL, DET0_SIZE_CURRENT, hubbub2->det0_size, 1, 100000); /* 1 vupdate at 10hz */
+ break;
+ case 1:
+ REG_WAIT(DCHUBBUB_DET1_CTRL, DET1_SIZE_CURRENT, hubbub2->det1_size, 1, 100000);
+ break;
+ case 2:
+ REG_WAIT(DCHUBBUB_DET2_CTRL, DET2_SIZE_CURRENT, hubbub2->det2_size, 1, 100000);
+ break;
+ case 3:
+ REG_WAIT(DCHUBBUB_DET3_CTRL, DET3_SIZE_CURRENT, hubbub2->det3_size, 1, 100000);
+ break;
+ default:
+ break;
+ }
+}
+
static const struct hubbub_funcs hubbub4_01_funcs = {
.update_dchub = hubbub2_update_dchub,
.init_dchub_sys_ctx = hubbub3_init_dchub_sys_ctx,
@@ -1192,6 +1214,7 @@ static const struct hubbub_funcs hubbub4_01_funcs = {
.set_request_limit = hubbub32_set_request_limit,
.program_det_segments = dcn401_program_det_segments,
.program_compbuf_segments = dcn401_program_compbuf_segments,
+ .wait_for_det_update = dcn401_wait_for_det_update,
};
void hubbub401_construct(struct dcn20_hubbub *hubbub2,
diff --git a/drivers/gpu/drm/amd/display/dc/hubp/dcn10/dcn10_hubp.c b/drivers/gpu/drm/amd/display/dc/hubp/dcn10/dcn10_hubp.c
index bf399819ca80..22ac2b7e49ae 100644
--- a/drivers/gpu/drm/amd/display/dc/hubp/dcn10/dcn10_hubp.c
+++ b/drivers/gpu/drm/amd/display/dc/hubp/dcn10/dcn10_hubp.c
@@ -749,7 +749,8 @@ bool hubp1_is_flip_pending(struct hubp *hubp)
if (flip_pending)
return true;
- if (earliest_inuse_address.grph.addr.quad_part != hubp->request_address.grph.addr.quad_part)
+ if (hubp &&
+ earliest_inuse_address.grph.addr.quad_part != hubp->request_address.grph.addr.quad_part)
return true;
return false;
diff --git a/drivers/gpu/drm/amd/display/dc/hubp/dcn20/dcn20_hubp.c b/drivers/gpu/drm/amd/display/dc/hubp/dcn20/dcn20_hubp.c
index 6bba020ad6fb..0637e4c552d8 100644
--- a/drivers/gpu/drm/amd/display/dc/hubp/dcn20/dcn20_hubp.c
+++ b/drivers/gpu/drm/amd/display/dc/hubp/dcn20/dcn20_hubp.c
@@ -927,7 +927,8 @@ bool hubp2_is_flip_pending(struct hubp *hubp)
if (flip_pending)
return true;
- if (earliest_inuse_address.grph.addr.quad_part != hubp->request_address.grph.addr.quad_part)
+ if (hubp &&
+ earliest_inuse_address.grph.addr.quad_part != hubp->request_address.grph.addr.quad_part)
return true;
return false;
diff --git a/drivers/gpu/drm/amd/display/dc/hubp/dcn35/dcn35_hubp.c b/drivers/gpu/drm/amd/display/dc/hubp/dcn35/dcn35_hubp.c
index 771fcd0d3b99..d1f05b82b3dd 100644
--- a/drivers/gpu/drm/amd/display/dc/hubp/dcn35/dcn35_hubp.c
+++ b/drivers/gpu/drm/amd/display/dc/hubp/dcn35/dcn35_hubp.c
@@ -188,7 +188,7 @@ void hubp35_program_surface_config(
hubp35_program_pixel_format(hubp, format);
}
-struct hubp_funcs dcn35_hubp_funcs = {
+static struct hubp_funcs dcn35_hubp_funcs = {
.hubp_enable_tripleBuffer = hubp2_enable_triplebuffer,
.hubp_is_triplebuffer_enabled = hubp2_is_triplebuffer_enabled,
.hubp_program_surface_flip_and_addr = hubp3_program_surface_flip_and_addr,
diff --git a/drivers/gpu/drm/amd/display/dc/hubp/dcn401/dcn401_hubp.c b/drivers/gpu/drm/amd/display/dc/hubp/dcn401/dcn401_hubp.c
index eb0da6c6b87c..b1ebf5053b4f 100644
--- a/drivers/gpu/drm/amd/display/dc/hubp/dcn401/dcn401_hubp.c
+++ b/drivers/gpu/drm/amd/display/dc/hubp/dcn401/dcn401_hubp.c
@@ -725,8 +725,8 @@ void hubp401_cursor_set_position(
CURSOR_ENABLE, cur_en);
REG_SET_2(CURSOR_POSITION, 0,
- CURSOR_X_POSITION, pos->x,
- CURSOR_Y_POSITION, pos->y);
+ CURSOR_X_POSITION, x_pos,
+ CURSOR_Y_POSITION, y_pos);
REG_SET_2(CURSOR_HOT_SPOT, 0,
CURSOR_HOT_SPOT_X, pos->x_hotspot,
@@ -990,7 +990,6 @@ static struct hubp_funcs dcn401_hubp_funcs = {
.hubp_soft_reset = hubp31_soft_reset,
.hubp_set_flip_int = hubp401_set_flip_int,
.hubp_in_blank = hubp401_in_blank,
- .hubp_update_force_pstate_disallow = hubp32_update_force_pstate_disallow,
.phantom_hubp_post_enable = hubp32_phantom_hubp_post_enable,
.hubp_update_mall_sel = hubp401_update_mall_sel,
.hubp_prepare_subvp_buffering = hubp32_prepare_subvp_buffering,
diff --git a/drivers/gpu/drm/amd/display/dc/hwss/dce110/dce110_hwseq.c b/drivers/gpu/drm/amd/display/dc/hwss/dce110/dce110_hwseq.c
index 1f2eb2f727dc..4fbed0298adf 100644
--- a/drivers/gpu/drm/amd/display/dc/hwss/dce110/dce110_hwseq.c
+++ b/drivers/gpu/drm/amd/display/dc/hwss/dce110/dce110_hwseq.c
@@ -57,6 +57,7 @@
#include "panel_cntl.h"
#include "dc_state_priv.h"
#include "dpcd_defs.h"
+#include "dsc.h"
/* include DCE11 register header files */
#include "dce/dce_11_0_d.h"
#include "dce/dce_11_0_sh_mask.h"
@@ -949,7 +950,7 @@ void dce110_edp_backlight_control(
{
struct dc_context *ctx = link->ctx;
struct bp_transmitter_control cntl = { 0 };
- uint8_t pwrseq_instance;
+ uint8_t pwrseq_instance = 0;
unsigned int pre_T11_delay = OLED_PRE_T11_DELAY;
unsigned int post_T7_delay = OLED_POST_T7_DELAY;
@@ -1002,7 +1003,8 @@ void dce110_edp_backlight_control(
*/
/* dc_service_sleep_in_milliseconds(50); */
/*edp 1.2*/
- pwrseq_instance = link->panel_cntl->pwrseq_inst;
+ if (link->panel_cntl)
+ pwrseq_instance = link->panel_cntl->pwrseq_inst;
if (cntl.action == TRANSMITTER_CONTROL_BACKLIGHT_ON) {
if (!link->dc->config.edp_no_power_sequencing)
@@ -1231,20 +1233,21 @@ void dce110_blank_stream(struct pipe_ctx *pipe_ctx)
* has changed or they enter protection state and hang.
*/
msleep(60);
- } else if (pipe_ctx->stream->signal == SIGNAL_TYPE_EDP) {
- if (!link->dc->config.edp_no_power_sequencing) {
- /*
- * Sometimes, DP receiver chip power-controlled externally by an
- * Embedded Controller could be treated and used as eDP,
- * if it drives mobile display. In this case,
- * we shouldn't be doing power-sequencing, hence we can skip
- * waiting for T9-ready.
- */
- link->dc->link_srv->edp_receiver_ready_T9(link);
- }
}
}
+ if (pipe_ctx->stream->signal == SIGNAL_TYPE_EDP &&
+ !link->dc->config.edp_no_power_sequencing) {
+ /*
+ * Sometimes, DP receiver chip power-controlled externally by an
+ * Embedded Controller could be treated and used as eDP,
+ * if it drives mobile display. In this case,
+ * we shouldn't be doing power-sequencing, hence we can skip
+ * waiting for T9-ready.
+ */
+ link->dc->link_srv->edp_receiver_ready_T9(link);
+ }
+
}
@@ -1549,6 +1552,7 @@ static enum dc_status dce110_enable_stream_timing(
0,
0,
0,
+ 0,
pipe_ctx->stream->signal,
true);
}
@@ -1597,6 +1601,11 @@ enum dc_status dce110_apply_single_controller_ctx_to_hw(
&audio_output.crtc_info,
&pipe_ctx->stream->audio_info,
&audio_output.dp_link_info);
+
+ if (dc->config.disable_hbr_audio_dp2)
+ if (pipe_ctx->stream_res.audio->funcs->az_disable_hbr_audio &&
+ dc->link_srv->dp_is_128b_132b_signal(pipe_ctx))
+ pipe_ctx->stream_res.audio->funcs->az_disable_hbr_audio(pipe_ctx->stream_res.audio);
}
/* make sure no pipes syncd to the pipe being enabled */
@@ -1815,6 +1824,48 @@ static void get_edp_links_with_sink(
}
}
+static void clean_up_dsc_blocks(struct dc *dc)
+{
+ struct display_stream_compressor *dsc = NULL;
+ struct timing_generator *tg = NULL;
+ struct stream_encoder *se = NULL;
+ struct dccg *dccg = dc->res_pool->dccg;
+ struct pg_cntl *pg_cntl = dc->res_pool->pg_cntl;
+ int i;
+
+ if (dc->ctx->dce_version != DCN_VERSION_3_5 &&
+ dc->ctx->dce_version != DCN_VERSION_3_51)
+ return;
+
+ for (i = 0; i < dc->res_pool->res_cap->num_dsc; i++) {
+ struct dcn_dsc_state s = {0};
+
+ dsc = dc->res_pool->dscs[i];
+ dsc->funcs->dsc_read_state(dsc, &s);
+ if (s.dsc_fw_en) {
+ /* disable DSC in OPTC */
+ if (i < dc->res_pool->timing_generator_count) {
+ tg = dc->res_pool->timing_generators[i];
+ tg->funcs->set_dsc_config(tg, OPTC_DSC_DISABLED, 0, 0);
+ }
+ /* disable DSC in stream encoder */
+ if (i < dc->res_pool->stream_enc_count) {
+ se = dc->res_pool->stream_enc[i];
+ se->funcs->dp_set_dsc_config(se, OPTC_DSC_DISABLED, 0, 0);
+ se->funcs->dp_set_dsc_pps_info_packet(se, false, NULL, true);
+ }
+ /* disable DSC block */
+ if (dccg->funcs->set_ref_dscclk)
+ dccg->funcs->set_ref_dscclk(dccg, dsc->inst);
+ dsc->funcs->dsc_disable(dsc);
+
+ /* power down DSC */
+ if (pg_cntl != NULL)
+ pg_cntl->funcs->dsc_pg_control(pg_cntl, dsc->inst, false);
+ }
+ }
+}
+
/*
* When ASIC goes from VBIOS/VGA mode to driver/accelerated mode we need:
* 1. Power down all DC HW blocks
@@ -1838,6 +1889,7 @@ void dce110_enable_accelerated_mode(struct dc *dc, struct dc_state *context)
bool can_apply_edp_fast_boot = false;
bool can_apply_seamless_boot = false;
bool keep_edp_vdd_on = false;
+ struct dc_bios *dcb = dc->ctx->dc_bios;
DC_LOGGER_INIT();
@@ -1914,13 +1966,22 @@ void dce110_enable_accelerated_mode(struct dc *dc, struct dc_state *context)
hws->funcs.edp_backlight_control(edp_link_with_sink, false);
}
/*resume from S3, no vbios posting, no need to power down again*/
- clk_mgr_exit_optimized_pwr_state(dc, dc->clk_mgr);
+ if (dcb && dcb->funcs && !dcb->funcs->is_accelerated_mode(dcb))
+ clk_mgr_exit_optimized_pwr_state(dc, dc->clk_mgr);
power_down_all_hw_blocks(dc);
+
+ /* DSC could be enabled on eDP during VBIOS post.
+ * To clean up dsc blocks if eDP is in link but not active.
+ */
+ if (edp_link_with_sink && (edp_stream_num == 0))
+ clean_up_dsc_blocks(dc);
+
disable_vga_and_power_gate_all_controllers(dc);
if (edp_link_with_sink && !keep_edp_vdd_on)
dc->hwss.edp_power_control(edp_link_with_sink, false);
- clk_mgr_optimize_pwr_state(dc, dc->clk_mgr);
+ if (dcb && dcb->funcs && !dcb->funcs->is_accelerated_mode(dcb))
+ clk_mgr_optimize_pwr_state(dc, dc->clk_mgr);
}
bios_set_scratch_acc_mode_change(dc->ctx->dc_bios, 1);
}
@@ -2035,13 +2096,20 @@ static void set_drr(struct pipe_ctx **pipe_ctx,
* as well.
*/
for (i = 0; i < num_pipes; i++) {
- pipe_ctx[i]->stream_res.tg->funcs->set_drr(
- pipe_ctx[i]->stream_res.tg, &params);
-
- if (adjust.v_total_max != 0 && adjust.v_total_min != 0)
- pipe_ctx[i]->stream_res.tg->funcs->set_static_screen_control(
- pipe_ctx[i]->stream_res.tg,
- event_triggers, num_frames);
+ /* dc_state_destruct() might null the stream resources, so fetch tg
+ * here first to avoid a race condition. The lifetime of the pointee
+ * itself (the timing_generator object) is not a problem here.
+ */
+ struct timing_generator *tg = pipe_ctx[i]->stream_res.tg;
+
+ if ((tg != NULL) && tg->funcs) {
+ if (tg->funcs->set_drr)
+ tg->funcs->set_drr(tg, &params);
+ if (adjust.v_total_max != 0 && adjust.v_total_min != 0)
+ if (tg->funcs->set_static_screen_control)
+ tg->funcs->set_static_screen_control(
+ tg, event_triggers, num_frames);
+ }
}
}
@@ -2340,19 +2408,6 @@ static void dce110_setup_audio_dto(
}
}
-static bool dce110_is_hpo_enabled(struct dc_state *context)
-{
- int i;
-
- for (i = 0; i < MAX_HPO_DP2_ENCODERS; i++) {
- if (context->res_ctx.is_hpo_dp_stream_enc_acquired[i]) {
- return true;
- }
- }
-
- return false;
-}
-
enum dc_status dce110_apply_ctx_to_hw(
struct dc *dc,
struct dc_state *context)
@@ -2361,8 +2416,8 @@ enum dc_status dce110_apply_ctx_to_hw(
struct dc_bios *dcb = dc->ctx->dc_bios;
enum dc_status status;
int i;
- bool was_hpo_enabled = dce110_is_hpo_enabled(dc->current_state);
- bool is_hpo_enabled = dce110_is_hpo_enabled(context);
+ bool was_hpo_acquired = resource_is_hpo_acquired(dc->current_state);
+ bool is_hpo_acquired = resource_is_hpo_acquired(context);
/* reset syncd pipes from disabled pipes */
if (dc->config.use_pipe_ctx_sync_logic)
@@ -2405,8 +2460,8 @@ enum dc_status dce110_apply_ctx_to_hw(
dce110_setup_audio_dto(dc, context);
- if (dc->hwseq->funcs.setup_hpo_hw_control && was_hpo_enabled != is_hpo_enabled) {
- dc->hwseq->funcs.setup_hpo_hw_control(dc->hwseq, is_hpo_enabled);
+ if (dc->hwseq->funcs.setup_hpo_hw_control && was_hpo_acquired != is_hpo_acquired) {
+ dc->hwseq->funcs.setup_hpo_hw_control(dc->hwseq, is_hpo_acquired);
}
for (i = 0; i < dc->res_pool->pipe_count; i++) {
@@ -2438,7 +2493,7 @@ enum dc_status dce110_apply_ctx_to_hw(
#ifdef CONFIG_DRM_AMD_DC_FP
if (hws->funcs.resync_fifo_dccg_dio)
- hws->funcs.resync_fifo_dccg_dio(hws, dc, context);
+ hws->funcs.resync_fifo_dccg_dio(hws, dc, context, i);
#endif
}
@@ -3312,7 +3367,6 @@ static const struct hw_sequencer_funcs dce110_funcs = {
static const struct hwseq_private_funcs dce110_private_funcs = {
.init_pipes = init_pipes,
- .update_plane_addr = update_plane_addr,
.set_input_transfer_func = dce110_set_input_transfer_func,
.set_output_transfer_func = dce110_set_output_transfer_func,
.power_down = dce110_power_down,
diff --git a/drivers/gpu/drm/amd/display/dc/hwss/dcn10/dcn10_hwseq.c b/drivers/gpu/drm/amd/display/dc/hwss/dcn10/dcn10_hwseq.c
index 1d2be574f668..a6a1db5ba8ba 100644
--- a/drivers/gpu/drm/amd/display/dc/hwss/dcn10/dcn10_hwseq.c
+++ b/drivers/gpu/drm/amd/display/dc/hwss/dcn10/dcn10_hwseq.c
@@ -1005,6 +1005,7 @@ enum dc_status dcn10_enable_stream_timing(
pipe_ctx->pipe_dlg_param.vstartup_start,
pipe_ctx->pipe_dlg_param.vupdate_offset,
pipe_ctx->pipe_dlg_param.vupdate_width,
+ pipe_ctx->pipe_dlg_param.pstate_keepout,
pipe_ctx->stream->signal,
true);
@@ -1554,7 +1555,7 @@ void dcn10_init_hw(struct dc *dc)
dc->clk_mgr->funcs->init_clocks(dc->clk_mgr);
/* Align bw context with hw config when system resume. */
- if (dc->clk_mgr->clks.dispclk_khz != 0 && dc->clk_mgr->clks.dppclk_khz != 0) {
+ if (dc->clk_mgr && dc->clk_mgr->clks.dispclk_khz != 0 && dc->clk_mgr->clks.dppclk_khz != 0) {
dc->current_state->bw_ctx.bw.dcn.clk.dispclk_khz = dc->clk_mgr->clks.dispclk_khz;
dc->current_state->bw_ctx.bw.dcn.clk.dppclk_khz = dc->clk_mgr->clks.dppclk_khz;
}
@@ -1674,7 +1675,7 @@ void dcn10_init_hw(struct dc *dc)
REG_UPDATE(DCFCLK_CNTL, DCFCLK_GATE_DIS, 0);
}
- if (dc->clk_mgr->funcs->notify_wm_ranges)
+ if (dc->clk_mgr && dc->clk_mgr->funcs->notify_wm_ranges)
dc->clk_mgr->funcs->notify_wm_ranges(dc->clk_mgr);
}
@@ -1697,10 +1698,10 @@ void dcn10_power_down_on_boot(struct dc *dc)
if (edp_link && edp_link->link_enc->funcs->is_dig_enabled &&
edp_link->link_enc->funcs->is_dig_enabled(edp_link->link_enc) &&
dc->hwseq->funcs.edp_backlight_control &&
- dc->hwss.power_down &&
+ dc->hwseq->funcs.power_down &&
dc->hwss.edp_power_control) {
dc->hwseq->funcs.edp_backlight_control(edp_link, false);
- dc->hwss.power_down(dc);
+ dc->hwseq->funcs.power_down(dc);
dc->hwss.edp_power_control(edp_link, false);
} else {
for (i = 0; i < dc->link_count; i++) {
@@ -1708,8 +1709,8 @@ void dcn10_power_down_on_boot(struct dc *dc)
if (link->link_enc && link->link_enc->funcs->is_dig_enabled &&
link->link_enc->funcs->is_dig_enabled(link->link_enc) &&
- dc->hwss.power_down) {
- dc->hwss.power_down(dc);
+ dc->hwseq->funcs.power_down) {
+ dc->hwseq->funcs.power_down(dc);
break;
}
@@ -2585,8 +2586,11 @@ static bool dcn10_is_rear_mpo_fix_required(struct pipe_ctx *pipe_ctx, enum dc_co
while (top->top_pipe)
top = top->top_pipe; // Traverse to top pipe_ctx
- if (top->plane_state && top->plane_state->layer_index == 0)
- return true; // Front MPO plane not hidden
+ if (top->plane_state && top->plane_state->layer_index == 0 && !top->plane_state->global_alpha)
+ // Global alpha used by top plane for PIP overlay
+ // Pre-multiplied/per-pixel alpha used by MPO
+ // Check top plane's global alpha to ensure layer_index > 0 not caused by PIP
+ return true; // MPO in use and front plane not hidden
}
}
return false;
@@ -2914,7 +2918,7 @@ static void dcn10_update_dchubp_dpp(
hubp->power_gated = false;
- hws->funcs.update_plane_addr(dc, pipe_ctx);
+ dc->hwss.update_plane_addr(dc, pipe_ctx);
if (is_pipe_tree_visible(pipe_ctx))
hubp->funcs->set_blank(hubp, false);
@@ -2997,7 +3001,8 @@ void dcn10_program_pipe(
calculate_vready_offset_for_group(pipe_ctx),
pipe_ctx->pipe_dlg_param.vstartup_start,
pipe_ctx->pipe_dlg_param.vupdate_offset,
- pipe_ctx->pipe_dlg_param.vupdate_width);
+ pipe_ctx->pipe_dlg_param.vupdate_width,
+ pipe_ctx->pipe_dlg_param.pstate_keepout);
pipe_ctx->stream_res.tg->funcs->set_vtg_params(
pipe_ctx->stream_res.tg, &pipe_ctx->stream->timing, true);
diff --git a/drivers/gpu/drm/amd/display/dc/hwss/dcn10/dcn10_init.c b/drivers/gpu/drm/amd/display/dc/hwss/dcn10/dcn10_init.c
index a5bdac79a744..5e51e1761707 100644
--- a/drivers/gpu/drm/amd/display/dc/hwss/dcn10/dcn10_init.c
+++ b/drivers/gpu/drm/amd/display/dc/hwss/dcn10/dcn10_init.c
@@ -78,7 +78,6 @@ static const struct hw_sequencer_funcs dcn10_funcs = {
.get_clock = dcn10_get_clock,
.get_vupdate_offset_from_vsync = dcn10_get_vupdate_offset_from_vsync,
.calc_vupdate_position = dcn10_calc_vupdate_position,
- .power_down = dce110_power_down,
.set_backlight_level = dce110_set_backlight_level,
.set_abm_immediate_disable = dce110_set_abm_immediate_disable,
.set_pipe = dce110_set_pipe,
@@ -92,7 +91,6 @@ static const struct hw_sequencer_funcs dcn10_funcs = {
static const struct hwseq_private_funcs dcn10_private_funcs = {
.init_pipes = dcn10_init_pipes,
- .update_plane_addr = dcn10_update_plane_addr,
.plane_atomic_disconnect = dcn10_plane_atomic_disconnect,
.program_pipe = dcn10_program_pipe,
.update_mpcc = dcn10_update_mpcc,
diff --git a/drivers/gpu/drm/amd/display/dc/hwss/dcn20/dcn20_hwseq.c b/drivers/gpu/drm/amd/display/dc/hwss/dcn20/dcn20_hwseq.c
index 2532ad410cb5..a80c08582932 100644
--- a/drivers/gpu/drm/amd/display/dc/hwss/dcn20/dcn20_hwseq.c
+++ b/drivers/gpu/drm/amd/display/dc/hwss/dcn20/dcn20_hwseq.c
@@ -909,6 +909,7 @@ enum dc_status dcn20_enable_stream_timing(
pipe_ctx->pipe_dlg_param.vstartup_start,
pipe_ctx->pipe_dlg_param.vupdate_offset,
pipe_ctx->pipe_dlg_param.vupdate_width,
+ pipe_ctx->pipe_dlg_param.pstate_keepout,
pipe_ctx->stream->signal,
true);
@@ -1044,7 +1045,8 @@ bool dcn20_set_output_transfer_func(struct dc *dc, struct pipe_ctx *pipe_ctx,
/*
* if above if is not executed then 'params' equal to 0 and set in bypass
*/
- mpc->funcs->set_output_gamma(mpc, mpcc_id, params);
+ if (mpc->funcs->set_output_gamma)
+ mpc->funcs->set_output_gamma(mpc, mpcc_id, params);
return true;
}
@@ -1698,7 +1700,7 @@ static void dcn20_update_dchubp_dpp(
plane_state->update_flags.bits.input_csc_change ||
plane_state->update_flags.bits.color_space_change ||
plane_state->update_flags.bits.coeff_reduction_change) {
- struct dc_bias_and_scale bns_params = {0};
+ struct dc_bias_and_scale bns_params = plane_state->bias_and_scale;
// program the input csc
dpp->funcs->dpp_setup(dpp,
@@ -1715,7 +1717,6 @@ static void dcn20_update_dchubp_dpp(
}
if (dpp->funcs->dpp_program_bias_and_scale) {
//TODO :for CNVC set scale and bias registers if necessary
- build_prescale_params(&bns_params, plane_state);
dpp->funcs->dpp_program_bias_and_scale(dpp, &bns_params);
}
}
@@ -1825,7 +1826,7 @@ static void dcn20_update_dchubp_dpp(
params.subvp_save_surf_addr.subvp_index = pipe_ctx->subvp_index;
hwss_subvp_save_surf_addr(&params);
}
- hws->funcs.update_plane_addr(dc, pipe_ctx);
+ dc->hwss.update_plane_addr(dc, pipe_ctx);
}
if (pipe_ctx->update_flags.bits.enable)
@@ -1886,7 +1887,8 @@ static void dcn20_program_pipe(
calculate_vready_offset_for_group(pipe_ctx),
pipe_ctx->pipe_dlg_param.vstartup_start,
pipe_ctx->pipe_dlg_param.vupdate_offset,
- pipe_ctx->pipe_dlg_param.vupdate_width);
+ pipe_ctx->pipe_dlg_param.vupdate_width,
+ pipe_ctx->pipe_dlg_param.pstate_keepout);
if (dc_state_get_pipe_subvp_type(context, pipe_ctx) != SUBVP_PHANTOM)
pipe_ctx->stream_res.tg->funcs->wait_for_state(pipe_ctx->stream_res.tg, CRTC_STATE_VACTIVE);
@@ -1921,22 +1923,28 @@ static void dcn20_program_pipe(
dc->res_pool->hubbub, pipe_ctx->plane_res.hubp->inst, pipe_ctx->hubp_regs.det_size);
}
- if (pipe_ctx->update_flags.raw || pipe_ctx->plane_state->update_flags.raw || pipe_ctx->stream->update_flags.raw)
+ if (pipe_ctx->update_flags.raw ||
+ (pipe_ctx->plane_state && pipe_ctx->plane_state->update_flags.raw) ||
+ pipe_ctx->stream->update_flags.raw)
dcn20_update_dchubp_dpp(dc, pipe_ctx, context);
- if (pipe_ctx->update_flags.bits.enable
- || pipe_ctx->plane_state->update_flags.bits.hdr_mult)
+ if (pipe_ctx->plane_state && (pipe_ctx->update_flags.bits.enable ||
+ pipe_ctx->plane_state->update_flags.bits.hdr_mult))
hws->funcs.set_hdr_multiplier(pipe_ctx);
if (hws->funcs.populate_mcm_luts) {
- hws->funcs.populate_mcm_luts(dc, pipe_ctx, pipe_ctx->plane_state->mcm_luts,
- pipe_ctx->plane_state->lut_bank_a);
- pipe_ctx->plane_state->lut_bank_a = !pipe_ctx->plane_state->lut_bank_a;
+ if (pipe_ctx->plane_state) {
+ hws->funcs.populate_mcm_luts(dc, pipe_ctx, pipe_ctx->plane_state->mcm_luts,
+ pipe_ctx->plane_state->lut_bank_a);
+ pipe_ctx->plane_state->lut_bank_a = !pipe_ctx->plane_state->lut_bank_a;
+ }
}
- if (pipe_ctx->update_flags.bits.enable ||
- pipe_ctx->plane_state->update_flags.bits.in_transfer_func_change ||
+
+ if (pipe_ctx->plane_state &&
+ (pipe_ctx->plane_state->update_flags.bits.in_transfer_func_change ||
pipe_ctx->plane_state->update_flags.bits.gamma_change ||
- pipe_ctx->plane_state->update_flags.bits.lut_3d)
+ pipe_ctx->plane_state->update_flags.bits.lut_3d ||
+ pipe_ctx->update_flags.bits.enable))
hws->funcs.set_input_transfer_func(dc, pipe_ctx, pipe_ctx->plane_state);
/* dcn10_translate_regamma_to_hw_format takes 750us to finish
@@ -1946,7 +1954,8 @@ static void dcn20_program_pipe(
if (pipe_ctx->update_flags.bits.enable ||
pipe_ctx->update_flags.bits.plane_changed ||
pipe_ctx->stream->update_flags.bits.out_tf ||
- pipe_ctx->plane_state->update_flags.bits.output_tf_change)
+ (pipe_ctx->plane_state &&
+ pipe_ctx->plane_state->update_flags.bits.output_tf_change))
hws->funcs.set_output_transfer_func(dc, pipe_ctx, pipe_ctx->stream);
/* If the pipe has been enabled or has a different opp, we
@@ -1970,7 +1979,7 @@ static void dcn20_program_pipe(
}
/* Set ABM pipe after other pipe configurations done */
- if (pipe_ctx->plane_state->visible) {
+ if ((pipe_ctx->plane_state && pipe_ctx->plane_state->visible)) {
if (pipe_ctx->stream_res.abm) {
dc->hwss.set_pipe(pipe_ctx);
pipe_ctx->stream_res.abm->funcs->set_abm_level(pipe_ctx->stream_res.abm,
@@ -2186,9 +2195,9 @@ static void post_unlock_reset_opp(struct dc *dc,
* yet power gated.
*/
dsc->funcs->dsc_wait_disconnect_pending_clear(dsc);
+ dsc->funcs->dsc_disable(dsc);
if (dccg->funcs->set_ref_dscclk)
dccg->funcs->set_ref_dscclk(dccg, dsc->inst);
- dsc->funcs->dsc_disable(dsc);
}
}
}
@@ -2283,6 +2292,9 @@ void dcn20_post_unlock_program_front_end(
}
}
+ if (!hwseq)
+ return;
+
/* P-State support transitions:
* Natural -> FPO: P-State disabled in prepare, force disallow anytime is safe
* FPO -> Natural: Unforce anytime after FW disable is safe (P-State will assert naturally)
@@ -2290,7 +2302,7 @@ void dcn20_post_unlock_program_front_end(
* FPO -> Unsupported: P-State disabled in prepare, unforce disallow anytime is safe
* FPO <-> SubVP: Force disallow is maintained on the FPO / SubVP pipes
*/
- if (hwseq && hwseq->funcs.update_force_pstate)
+ if (hwseq->funcs.update_force_pstate)
dc->hwseq->funcs.update_force_pstate(dc, context);
/* Only program the MALL registers after all the main and phantom pipes
@@ -2459,7 +2471,8 @@ bool dcn20_update_bandwidth(
calculate_vready_offset_for_group(pipe_ctx),
pipe_ctx->pipe_dlg_param.vstartup_start,
pipe_ctx->pipe_dlg_param.vupdate_offset,
- pipe_ctx->pipe_dlg_param.vupdate_width);
+ pipe_ctx->pipe_dlg_param.vupdate_width,
+ pipe_ctx->pipe_dlg_param.pstate_keepout);
pipe_ctx->stream_res.tg->funcs->set_vtg_params(
pipe_ctx->stream_res.tg, &pipe_ctx->stream->timing, false);
@@ -2529,6 +2542,9 @@ bool dcn20_wait_for_blank_complete(
{
int counter;
+ if (!opp)
+ return false;
+
for (counter = 0; counter < 1000; counter++) {
if (!opp->funcs->dpg_is_pending(opp))
break;
diff --git a/drivers/gpu/drm/amd/display/dc/hwss/dcn20/dcn20_init.c b/drivers/gpu/drm/amd/display/dc/hwss/dcn20/dcn20_init.c
index ef6488165b8f..32707b344f0b 100644
--- a/drivers/gpu/drm/amd/display/dc/hwss/dcn20/dcn20_init.c
+++ b/drivers/gpu/drm/amd/display/dc/hwss/dcn20/dcn20_init.c
@@ -105,7 +105,6 @@ static const struct hw_sequencer_funcs dcn20_funcs = {
static const struct hwseq_private_funcs dcn20_private_funcs = {
.init_pipes = dcn10_init_pipes,
- .update_plane_addr = dcn20_update_plane_addr,
.plane_atomic_disconnect = dcn10_plane_atomic_disconnect,
.update_mpcc = dcn20_update_mpcc,
.set_input_transfer_func = dcn20_set_input_transfer_func,
diff --git a/drivers/gpu/drm/amd/display/dc/hwss/dcn201/dcn201_init.c b/drivers/gpu/drm/amd/display/dc/hwss/dcn201/dcn201_init.c
index a13bf6c9386e..78351408e864 100644
--- a/drivers/gpu/drm/amd/display/dc/hwss/dcn201/dcn201_init.c
+++ b/drivers/gpu/drm/amd/display/dc/hwss/dcn201/dcn201_init.c
@@ -96,7 +96,6 @@ static const struct hw_sequencer_funcs dcn201_funcs = {
static const struct hwseq_private_funcs dcn201_private_funcs = {
.init_pipes = NULL,
- .update_plane_addr = dcn201_update_plane_addr,
.plane_atomic_disconnect = dcn201_plane_atomic_disconnect,
.program_pipe = dcn10_program_pipe,
.update_mpcc = dcn201_update_mpcc,
diff --git a/drivers/gpu/drm/amd/display/dc/hwss/dcn21/dcn21_init.c b/drivers/gpu/drm/amd/display/dc/hwss/dcn21/dcn21_init.c
index 3dfac372d165..e044e9e0a3a1 100644
--- a/drivers/gpu/drm/amd/display/dc/hwss/dcn21/dcn21_init.c
+++ b/drivers/gpu/drm/amd/display/dc/hwss/dcn21/dcn21_init.c
@@ -93,7 +93,6 @@ static const struct hw_sequencer_funcs dcn21_funcs = {
.exit_optimized_pwr_state = dcn21_exit_optimized_pwr_state,
.get_vupdate_offset_from_vsync = dcn10_get_vupdate_offset_from_vsync,
.calc_vupdate_position = dcn10_calc_vupdate_position,
- .power_down = dce110_power_down,
.set_backlight_level = dcn21_set_backlight_level,
.set_abm_immediate_disable = dcn21_set_abm_immediate_disable,
.set_pipe = dcn21_set_pipe,
@@ -109,7 +108,6 @@ static const struct hw_sequencer_funcs dcn21_funcs = {
static const struct hwseq_private_funcs dcn21_private_funcs = {
.init_pipes = dcn10_init_pipes,
- .update_plane_addr = dcn20_update_plane_addr,
.plane_atomic_disconnect = dcn10_plane_atomic_disconnect,
.update_mpcc = dcn20_update_mpcc,
.set_input_transfer_func = dcn20_set_input_transfer_func,
diff --git a/drivers/gpu/drm/amd/display/dc/hwss/dcn30/dcn30_hwseq.c b/drivers/gpu/drm/amd/display/dc/hwss/dcn30/dcn30_hwseq.c
index eaeeade31ed7..bded33575493 100644
--- a/drivers/gpu/drm/amd/display/dc/hwss/dcn30/dcn30_hwseq.c
+++ b/drivers/gpu/drm/amd/display/dc/hwss/dcn30/dcn30_hwseq.c
@@ -398,7 +398,11 @@ bool dcn30_set_output_transfer_func(struct dc *dc,
}
}
- mpc->funcs->set_output_gamma(mpc, mpcc_id, params);
+ if (mpc->funcs->set_output_gamma)
+ mpc->funcs->set_output_gamma(mpc, mpcc_id, params);
+ else
+ DC_LOG_ERROR("%s: set_output_gamma function pointer is NULL.\n", __func__);
+
return ret;
}
@@ -451,7 +455,7 @@ bool dcn30_mmhubbub_warmup(
struct mcif_wb *mcif_wb;
struct mcif_warmup_params warmup_params = {0};
unsigned int i, i_buf;
- /*make sure there is no active DWB eanbled */
+ /* make sure there is no active DWB enabled */
for (i = 0; i < num_dwb; i++) {
dwb = dc->res_pool->dwbc[wb_info[i].dwb_pipe_inst];
if (dwb->dwb_is_efc_transition || dwb->dwb_is_drc) {
@@ -625,7 +629,7 @@ void dcn30_init_hw(struct dc *dc)
uint32_t backlight = MAX_BACKLIGHT_LEVEL;
uint32_t user_level = MAX_BACKLIGHT_LEVEL;
- if (dc->clk_mgr && dc->clk_mgr->funcs->init_clocks)
+ if (dc->clk_mgr && dc->clk_mgr->funcs && dc->clk_mgr->funcs->init_clocks)
dc->clk_mgr->funcs->init_clocks(dc->clk_mgr);
// Initialize the dccg
@@ -731,10 +735,10 @@ void dcn30_init_hw(struct dc *dc)
if (edp_link && edp_link->link_enc->funcs->is_dig_enabled &&
edp_link->link_enc->funcs->is_dig_enabled(edp_link->link_enc) &&
dc->hwss.edp_backlight_control &&
- dc->hwss.power_down &&
+ hws->funcs.power_down &&
dc->hwss.edp_power_control) {
dc->hwss.edp_backlight_control(edp_link, false);
- dc->hwss.power_down(dc);
+ hws->funcs.power_down(dc);
dc->hwss.edp_power_control(edp_link, false);
} else {
for (i = 0; i < dc->link_count; i++) {
@@ -742,8 +746,8 @@ void dcn30_init_hw(struct dc *dc)
if (link->link_enc->funcs->is_dig_enabled &&
link->link_enc->funcs->is_dig_enabled(link->link_enc) &&
- dc->hwss.power_down) {
- dc->hwss.power_down(dc);
+ hws->funcs.power_down) {
+ hws->funcs.power_down(dc);
break;
}
@@ -786,11 +790,12 @@ void dcn30_init_hw(struct dc *dc)
if (!dcb->funcs->is_accelerated_mode(dcb) && dc->res_pool->hubbub->funcs->init_watermarks)
dc->res_pool->hubbub->funcs->init_watermarks(dc->res_pool->hubbub);
- if (dc->clk_mgr->funcs->notify_wm_ranges)
+ if (dc->clk_mgr && dc->clk_mgr->funcs && dc->clk_mgr->funcs->notify_wm_ranges)
dc->clk_mgr->funcs->notify_wm_ranges(dc->clk_mgr);
//if softmax is enabled then hardmax will be set by a different call
- if (dc->clk_mgr->funcs->set_hard_max_memclk && !dc->clk_mgr->dc_mode_softmax_enabled)
+ if (dc->clk_mgr && dc->clk_mgr->funcs && dc->clk_mgr->funcs->set_hard_max_memclk &&
+ !dc->clk_mgr->dc_mode_softmax_enabled)
dc->clk_mgr->funcs->set_hard_max_memclk(dc->clk_mgr);
if (dc->res_pool->hubbub->funcs->force_pstate_change_control)
diff --git a/drivers/gpu/drm/amd/display/dc/hwss/dcn30/dcn30_init.c b/drivers/gpu/drm/amd/display/dc/hwss/dcn30/dcn30_init.c
index 4b32497c09d0..2a8dc40d2847 100644
--- a/drivers/gpu/drm/amd/display/dc/hwss/dcn30/dcn30_init.c
+++ b/drivers/gpu/drm/amd/display/dc/hwss/dcn30/dcn30_init.c
@@ -113,7 +113,6 @@ static const struct hw_sequencer_funcs dcn30_funcs = {
static const struct hwseq_private_funcs dcn30_private_funcs = {
.init_pipes = dcn10_init_pipes,
- .update_plane_addr = dcn20_update_plane_addr,
.plane_atomic_disconnect = dcn10_plane_atomic_disconnect,
.update_mpcc = dcn20_update_mpcc,
.set_input_transfer_func = dcn30_set_input_transfer_func,
diff --git a/drivers/gpu/drm/amd/display/dc/hwss/dcn301/dcn301_init.c b/drivers/gpu/drm/amd/display/dc/hwss/dcn301/dcn301_init.c
index 97e33eb7ac5a..93e49d87a67c 100644
--- a/drivers/gpu/drm/amd/display/dc/hwss/dcn301/dcn301_init.c
+++ b/drivers/gpu/drm/amd/display/dc/hwss/dcn301/dcn301_init.c
@@ -111,7 +111,6 @@ static const struct hw_sequencer_funcs dcn301_funcs = {
static const struct hwseq_private_funcs dcn301_private_funcs = {
.init_pipes = dcn10_init_pipes,
- .update_plane_addr = dcn20_update_plane_addr,
.plane_atomic_disconnect = dcn10_plane_atomic_disconnect,
.update_mpcc = dcn20_update_mpcc,
.set_input_transfer_func = dcn30_set_input_transfer_func,
diff --git a/drivers/gpu/drm/amd/display/dc/hwss/dcn31/dcn31_hwseq.c b/drivers/gpu/drm/amd/display/dc/hwss/dcn31/dcn31_hwseq.c
index 746c522adf84..3d4b31bd9946 100644
--- a/drivers/gpu/drm/amd/display/dc/hwss/dcn31/dcn31_hwseq.c
+++ b/drivers/gpu/drm/amd/display/dc/hwss/dcn31/dcn31_hwseq.c
@@ -256,10 +256,10 @@ void dcn31_init_hw(struct dc *dc)
if (!dcb->funcs->is_accelerated_mode(dcb) && dc->res_pool->hubbub->funcs->init_watermarks)
dc->res_pool->hubbub->funcs->init_watermarks(dc->res_pool->hubbub);
- if (dc->clk_mgr->funcs->notify_wm_ranges)
+ if (dc->clk_mgr && dc->clk_mgr->funcs->notify_wm_ranges)
dc->clk_mgr->funcs->notify_wm_ranges(dc->clk_mgr);
- if (dc->clk_mgr->funcs->set_hard_max_memclk && !dc->clk_mgr->dc_mode_softmax_enabled)
+ if (dc->clk_mgr && dc->clk_mgr->funcs->set_hard_max_memclk && !dc->clk_mgr->dc_mode_softmax_enabled)
dc->clk_mgr->funcs->set_hard_max_memclk(dc->clk_mgr);
if (dc->res_pool->hubbub->funcs->force_pstate_change_control)
diff --git a/drivers/gpu/drm/amd/display/dc/hwss/dcn31/dcn31_init.c b/drivers/gpu/drm/amd/display/dc/hwss/dcn31/dcn31_init.c
index 9cb7afe0e731..56f3c70d4b55 100644
--- a/drivers/gpu/drm/amd/display/dc/hwss/dcn31/dcn31_init.c
+++ b/drivers/gpu/drm/amd/display/dc/hwss/dcn31/dcn31_init.c
@@ -98,7 +98,6 @@ static const struct hw_sequencer_funcs dcn31_funcs = {
.set_flip_control_gsl = dcn20_set_flip_control_gsl,
.get_vupdate_offset_from_vsync = dcn10_get_vupdate_offset_from_vsync,
.calc_vupdate_position = dcn10_calc_vupdate_position,
- .power_down = dce110_power_down,
.set_backlight_level = dcn21_set_backlight_level,
.set_abm_immediate_disable = dcn21_set_abm_immediate_disable,
.set_pipe = dcn21_set_pipe,
@@ -112,11 +111,11 @@ static const struct hw_sequencer_funcs dcn31_funcs = {
.optimize_pwr_state = dcn21_optimize_pwr_state,
.exit_optimized_pwr_state = dcn21_exit_optimized_pwr_state,
.update_visual_confirm_color = dcn10_update_visual_confirm_color,
+ .setup_hpo_hw_control = dcn31_setup_hpo_hw_control,
};
static const struct hwseq_private_funcs dcn31_private_funcs = {
.init_pipes = dcn10_init_pipes,
- .update_plane_addr = dcn20_update_plane_addr,
.plane_atomic_disconnect = dcn10_plane_atomic_disconnect,
.update_mpcc = dcn20_update_mpcc,
.set_input_transfer_func = dcn30_set_input_transfer_func,
diff --git a/drivers/gpu/drm/amd/display/dc/hwss/dcn314/dcn314_hwseq.c b/drivers/gpu/drm/amd/display/dc/hwss/dcn314/dcn314_hwseq.c
index 388404cdeeaa..4e93eeedfc1b 100644
--- a/drivers/gpu/drm/amd/display/dc/hwss/dcn314/dcn314_hwseq.c
+++ b/drivers/gpu/drm/amd/display/dc/hwss/dcn314/dcn314_hwseq.c
@@ -355,14 +355,18 @@ void dcn314_calculate_pix_rate_divider(
}
}
-void dcn314_resync_fifo_dccg_dio(struct dce_hwseq *hws, struct dc *dc, struct dc_state *context)
+void dcn314_resync_fifo_dccg_dio(struct dce_hwseq *hws, struct dc *dc, struct dc_state *context, unsigned int current_pipe_idx)
{
unsigned int i;
struct pipe_ctx *pipe = NULL;
bool otg_disabled[MAX_PIPES] = {false};
for (i = 0; i < dc->res_pool->pipe_count; i++) {
- pipe = &dc->current_state->res_ctx.pipe_ctx[i];
+ if (i <= current_pipe_idx) {
+ pipe = &context->res_ctx.pipe_ctx[i];
+ } else {
+ pipe = &dc->current_state->res_ctx.pipe_ctx[i];
+ }
if (pipe->top_pipe || pipe->prev_odm_pipe)
continue;
@@ -377,7 +381,10 @@ void dcn314_resync_fifo_dccg_dio(struct dce_hwseq *hws, struct dc *dc, struct dc
hws->ctx->dc->res_pool->dccg->funcs->trigger_dio_fifo_resync(hws->ctx->dc->res_pool->dccg);
for (i = 0; i < dc->res_pool->pipe_count; i++) {
- pipe = &dc->current_state->res_ctx.pipe_ctx[i];
+ if (i <= current_pipe_idx)
+ pipe = &context->res_ctx.pipe_ctx[i];
+ else
+ pipe = &dc->current_state->res_ctx.pipe_ctx[i];
if (otg_disabled[i]) {
int opp_inst[MAX_PIPES] = { pipe->stream_res.opp->inst };
diff --git a/drivers/gpu/drm/amd/display/dc/hwss/dcn314/dcn314_hwseq.h b/drivers/gpu/drm/amd/display/dc/hwss/dcn314/dcn314_hwseq.h
index fb4f90f61b22..2305ad282f21 100644
--- a/drivers/gpu/drm/amd/display/dc/hwss/dcn314/dcn314_hwseq.h
+++ b/drivers/gpu/drm/amd/display/dc/hwss/dcn314/dcn314_hwseq.h
@@ -41,7 +41,7 @@ unsigned int dcn314_calculate_dccg_k1_k2_values(struct pipe_ctx *pipe_ctx, unsig
void dcn314_calculate_pix_rate_divider(struct dc *dc, struct dc_state *context, const struct dc_stream_state *stream);
-void dcn314_resync_fifo_dccg_dio(struct dce_hwseq *hws, struct dc *dc, struct dc_state *context);
+void dcn314_resync_fifo_dccg_dio(struct dce_hwseq *hws, struct dc *dc, struct dc_state *context, unsigned int current_pipe_idx);
void dcn314_dpp_root_clock_control(struct dce_hwseq *hws, unsigned int dpp_inst, bool clock_on);
diff --git a/drivers/gpu/drm/amd/display/dc/hwss/dcn314/dcn314_init.c b/drivers/gpu/drm/amd/display/dc/hwss/dcn314/dcn314_init.c
index 7a8db4b81471..68e6de6b5758 100644
--- a/drivers/gpu/drm/amd/display/dc/hwss/dcn314/dcn314_init.c
+++ b/drivers/gpu/drm/amd/display/dc/hwss/dcn314/dcn314_init.c
@@ -100,7 +100,6 @@ static const struct hw_sequencer_funcs dcn314_funcs = {
.set_flip_control_gsl = dcn20_set_flip_control_gsl,
.get_vupdate_offset_from_vsync = dcn10_get_vupdate_offset_from_vsync,
.calc_vupdate_position = dcn10_calc_vupdate_position,
- .power_down = dce110_power_down,
.set_backlight_level = dcn21_set_backlight_level,
.set_abm_immediate_disable = dcn21_set_abm_immediate_disable,
.set_pipe = dcn21_set_pipe,
@@ -115,11 +114,11 @@ static const struct hw_sequencer_funcs dcn314_funcs = {
.exit_optimized_pwr_state = dcn21_exit_optimized_pwr_state,
.update_visual_confirm_color = dcn10_update_visual_confirm_color,
.calculate_pix_rate_divider = dcn314_calculate_pix_rate_divider,
+ .setup_hpo_hw_control = dcn31_setup_hpo_hw_control,
};
static const struct hwseq_private_funcs dcn314_private_funcs = {
.init_pipes = dcn10_init_pipes,
- .update_plane_addr = dcn20_update_plane_addr,
.plane_atomic_disconnect = dcn10_plane_atomic_disconnect,
.update_mpcc = dcn20_update_mpcc,
.set_input_transfer_func = dcn30_set_input_transfer_func,
diff --git a/drivers/gpu/drm/amd/display/dc/hwss/dcn32/dcn32_hwseq.c b/drivers/gpu/drm/amd/display/dc/hwss/dcn32/dcn32_hwseq.c
index 05d8f81daa06..2e8c9f738259 100644
--- a/drivers/gpu/drm/amd/display/dc/hwss/dcn32/dcn32_hwseq.c
+++ b/drivers/gpu/drm/amd/display/dc/hwss/dcn32/dcn32_hwseq.c
@@ -582,7 +582,9 @@ bool dcn32_set_output_transfer_func(struct dc *dc,
}
}
- mpc->funcs->set_output_gamma(mpc, mpcc_id, params);
+ if (mpc->funcs->set_output_gamma)
+ mpc->funcs->set_output_gamma(mpc, mpcc_id, params);
+
return ret;
}
@@ -779,7 +781,7 @@ void dcn32_init_hw(struct dc *dc)
uint32_t backlight = MAX_BACKLIGHT_LEVEL;
uint32_t user_level = MAX_BACKLIGHT_LEVEL;
- if (dc->clk_mgr && dc->clk_mgr->funcs->init_clocks)
+ if (dc->clk_mgr && dc->clk_mgr->funcs && dc->clk_mgr->funcs->init_clocks)
dc->clk_mgr->funcs->init_clocks(dc->clk_mgr);
// Initialize the dccg
@@ -901,10 +903,10 @@ void dcn32_init_hw(struct dc *dc)
if (edp_link->link_enc->funcs->is_dig_enabled &&
edp_link->link_enc->funcs->is_dig_enabled(edp_link->link_enc) &&
dc->hwss.edp_backlight_control &&
- dc->hwss.power_down &&
+ hws->funcs.power_down &&
dc->hwss.edp_power_control) {
dc->hwss.edp_backlight_control(edp_link, false);
- dc->hwss.power_down(dc);
+ hws->funcs.power_down(dc);
dc->hwss.edp_power_control(edp_link, false);
}
}
@@ -914,8 +916,8 @@ void dcn32_init_hw(struct dc *dc)
if (link->link_enc->funcs->is_dig_enabled &&
link->link_enc->funcs->is_dig_enabled(link->link_enc) &&
- dc->hwss.power_down) {
- dc->hwss.power_down(dc);
+ hws->funcs.power_down) {
+ hws->funcs.power_down(dc);
break;
}
@@ -958,10 +960,11 @@ void dcn32_init_hw(struct dc *dc)
if (!dcb->funcs->is_accelerated_mode(dcb) && dc->res_pool->hubbub->funcs->init_watermarks)
dc->res_pool->hubbub->funcs->init_watermarks(dc->res_pool->hubbub);
- if (dc->clk_mgr->funcs->notify_wm_ranges)
+ if (dc->clk_mgr && dc->clk_mgr->funcs && dc->clk_mgr->funcs->notify_wm_ranges)
dc->clk_mgr->funcs->notify_wm_ranges(dc->clk_mgr);
- if (dc->clk_mgr->funcs->set_hard_max_memclk && !dc->clk_mgr->dc_mode_softmax_enabled)
+ if (dc->clk_mgr && dc->clk_mgr->funcs && dc->clk_mgr->funcs->set_hard_max_memclk &&
+ !dc->clk_mgr->dc_mode_softmax_enabled)
dc->clk_mgr->funcs->set_hard_max_memclk(dc->clk_mgr);
if (dc->res_pool->hubbub->funcs->force_pstate_change_control)
@@ -982,8 +985,19 @@ void dcn32_init_hw(struct dc *dc)
dc->caps.dmub_caps.gecc_enable = dc->ctx->dmub_srv->dmub->feature_caps.gecc_enable;
dc->caps.dmub_caps.mclk_sw = dc->ctx->dmub_srv->dmub->feature_caps.fw_assisted_mclk_switch_ver;
- if (dc->ctx->dmub_srv->dmub->fw_version <
+ /* for DCN401 testing only */
+ dc->caps.dmub_caps.fams_ver = dc->ctx->dmub_srv->dmub->feature_caps.fw_assisted_mclk_switch_ver;
+ if (dc->caps.dmub_caps.fams_ver == 2) {
+ /* FAMS2 is enabled */
+ dc->debug.fams2_config.bits.enable &= true;
+ } else if (dc->ctx->dmub_srv->dmub->fw_version <
DMUB_FW_VERSION(7, 0, 35)) {
+ /* FAMS2 is disabled */
+ dc->debug.fams2_config.bits.enable = false;
+ if (dc->debug.using_dml2 && dc->res_pool->funcs->update_bw_bounding_box) {
+ /* update bounding box if FAMS2 disabled */
+ dc->res_pool->funcs->update_bw_bounding_box(dc, dc->clk_mgr->bw_params);
+ }
dc->debug.force_disable_subvp = true;
dc->debug.disable_fpo_optimizations = true;
}
@@ -1018,6 +1032,20 @@ void dcn32_update_dsc_on_stream(struct pipe_ctx *pipe_ctx, bool enable)
struct dsc_config dsc_cfg;
struct dsc_optc_config dsc_optc_cfg = {0};
enum optc_dsc_mode optc_dsc_mode;
+ struct dcn_dsc_state dsc_state = {0};
+
+ if (!dsc) {
+ DC_LOG_DSC("DSC is NULL for tg instance %d:", pipe_ctx->stream_res.tg->inst);
+ return;
+ }
+
+ if (dsc->funcs->dsc_read_state) {
+ dsc->funcs->dsc_read_state(dsc, &dsc_state);
+ if (!dsc_state.dsc_fw_en) {
+ DC_LOG_DSC("DSC has been disabled for tg instance %d:", pipe_ctx->stream_res.tg->inst);
+ return;
+ }
+ }
/* Enable DSC hw block */
dsc_cfg.pic_width = (stream->timing.h_addressable + stream->timing.h_border_left + stream->timing.h_border_right) / opp_cnt;
@@ -1029,24 +1057,20 @@ void dcn32_update_dsc_on_stream(struct pipe_ctx *pipe_ctx, bool enable)
ASSERT(dsc_cfg.dc_dsc_cfg.num_slices_h % opp_cnt == 0);
dsc_cfg.dc_dsc_cfg.num_slices_h /= opp_cnt;
+ if (should_use_dto_dscclk)
+ dccg->funcs->set_dto_dscclk(dccg, dsc->inst);
dsc->funcs->dsc_set_config(dsc, &dsc_cfg, &dsc_optc_cfg);
dsc->funcs->dsc_enable(dsc, pipe_ctx->stream_res.opp->inst);
- if (should_use_dto_dscclk)
- dccg->funcs->set_dto_dscclk(dccg, dsc->inst, true);
for (odm_pipe = pipe_ctx->next_odm_pipe; odm_pipe; odm_pipe = odm_pipe->next_odm_pipe) {
struct display_stream_compressor *odm_dsc = odm_pipe->stream_res.dsc;
ASSERT(odm_dsc);
+ if (should_use_dto_dscclk)
+ dccg->funcs->set_dto_dscclk(dccg, odm_dsc->inst);
odm_dsc->funcs->dsc_set_config(odm_dsc, &dsc_cfg, &dsc_optc_cfg);
odm_dsc->funcs->dsc_enable(odm_dsc, odm_pipe->stream_res.opp->inst);
- if (should_use_dto_dscclk)
- dccg->funcs->set_dto_dscclk(dccg, odm_dsc->inst, true);
}
- dsc_cfg.dc_dsc_cfg.num_slices_h *= opp_cnt;
- dsc_cfg.pic_width *= opp_cnt;
-
optc_dsc_mode = dsc_optc_cfg.is_pixel_format_444 ? OPTC_DSC_ENABLED_444 : OPTC_DSC_ENABLED_NATIVE_SUBSAMPLED;
-
/* Enable DSC in OPTC */
DC_LOG_DSC("Setting optc DSC config for tg instance %d:", pipe_ctx->stream_res.tg->inst);
pipe_ctx->stream_res.tg->funcs->set_dsc_config(pipe_ctx->stream_res.tg,
@@ -1060,13 +1084,9 @@ void dcn32_update_dsc_on_stream(struct pipe_ctx *pipe_ctx, bool enable)
OPTC_DSC_DISABLED, 0, 0);
/* only disconnect DSC block, DSC is disabled when OPP head pipe is reset */
- if (dccg->funcs->set_dto_dscclk)
- dccg->funcs->set_dto_dscclk(dccg, pipe_ctx->stream_res.dsc->inst, false);
- dsc->funcs->dsc_disable(pipe_ctx->stream_res.dsc);
+ dsc->funcs->dsc_disconnect(pipe_ctx->stream_res.dsc);
for (odm_pipe = pipe_ctx->next_odm_pipe; odm_pipe; odm_pipe = odm_pipe->next_odm_pipe) {
ASSERT(odm_pipe->stream_res.dsc);
- if (dccg->funcs->set_dto_dscclk)
- dccg->funcs->set_dto_dscclk(dccg, odm_pipe->stream_res.dsc->inst, false);
odm_pipe->stream_res.dsc->funcs->dsc_disconnect(odm_pipe->stream_res.dsc);
}
}
@@ -1137,10 +1157,7 @@ void dcn32_update_odm(struct dc *dc, struct dc_state *context, struct pipe_ctx *
if (!pipe_ctx->next_odm_pipe && current_pipe_ctx->next_odm_pipe &&
current_pipe_ctx->next_odm_pipe->stream_res.dsc) {
struct display_stream_compressor *dsc = current_pipe_ctx->next_odm_pipe->stream_res.dsc;
- struct dccg *dccg = dc->res_pool->dccg;
- if (dccg->funcs->set_dto_dscclk)
- dccg->funcs->set_dto_dscclk(dccg, dsc->inst, false);
/* disconnect DSC block from stream */
dsc->funcs->dsc_disconnect(dsc);
}
@@ -1212,20 +1229,27 @@ void dcn32_calculate_pix_rate_divider(
}
}
-void dcn32_resync_fifo_dccg_dio(struct dce_hwseq *hws, struct dc *dc, struct dc_state *context)
+void dcn32_resync_fifo_dccg_dio(struct dce_hwseq *hws, struct dc *dc, struct dc_state *context, unsigned int current_pipe_idx)
{
unsigned int i;
struct pipe_ctx *pipe = NULL;
bool otg_disabled[MAX_PIPES] = {false};
+ struct dc_state *dc_state = NULL;
for (i = 0; i < dc->res_pool->pipe_count; i++) {
- pipe = &dc->current_state->res_ctx.pipe_ctx[i];
+ if (i <= current_pipe_idx) {
+ pipe = &context->res_ctx.pipe_ctx[i];
+ dc_state = context;
+ } else {
+ pipe = &dc->current_state->res_ctx.pipe_ctx[i];
+ dc_state = dc->current_state;
+ }
if (!resource_is_pipe_type(pipe, OTG_MASTER))
continue;
if ((pipe->stream->dpms_off || dc_is_virtual_signal(pipe->stream->signal))
- && dc_state_get_pipe_subvp_type(dc->current_state, pipe) != SUBVP_PHANTOM) {
+ && dc_state_get_pipe_subvp_type(dc_state, pipe) != SUBVP_PHANTOM) {
pipe->stream_res.tg->funcs->disable_crtc(pipe->stream_res.tg);
reset_sync_context_for_pipe(dc, context, i);
otg_disabled[i] = true;
@@ -1235,7 +1259,10 @@ void dcn32_resync_fifo_dccg_dio(struct dce_hwseq *hws, struct dc *dc, struct dc_
hws->ctx->dc->res_pool->dccg->funcs->trigger_dio_fifo_resync(hws->ctx->dc->res_pool->dccg);
for (i = 0; i < dc->res_pool->pipe_count; i++) {
- pipe = &dc->current_state->res_ctx.pipe_ctx[i];
+ if (i <= current_pipe_idx)
+ pipe = &context->res_ctx.pipe_ctx[i];
+ else
+ pipe = &dc->current_state->res_ctx.pipe_ctx[i];
if (otg_disabled[i]) {
int opp_inst[MAX_PIPES] = { pipe->stream_res.opp->inst };
@@ -1583,7 +1610,7 @@ void dcn32_enable_phantom_streams(struct dc *dc, struct dc_state *context)
#ifdef CONFIG_DRM_AMD_DC_FP
if (hws->funcs.resync_fifo_dccg_dio)
- hws->funcs.resync_fifo_dccg_dio(hws, dc, context);
+ hws->funcs.resync_fifo_dccg_dio(hws, dc, context, i);
#endif
}
}
@@ -1717,6 +1744,28 @@ void dcn32_blank_phantom(struct dc *dc,
hws->funcs.wait_for_blank_complete(opp);
}
+/* phantom stream id's can change often, but can be identical between contexts.
+* This function checks for the condition the streams are identical to avoid
+* redundant pipe transitions.
+*/
+static bool is_subvp_phantom_topology_transition_seamless(
+ const struct dc_state *cur_ctx,
+ const struct dc_state *new_ctx,
+ const struct pipe_ctx *cur_pipe,
+ const struct pipe_ctx *new_pipe)
+{
+ enum mall_stream_type cur_pipe_type = dc_state_get_pipe_subvp_type(cur_ctx, cur_pipe);
+ enum mall_stream_type new_pipe_type = dc_state_get_pipe_subvp_type(new_ctx, new_pipe);
+
+ const struct dc_stream_state *cur_paired_stream = dc_state_get_paired_subvp_stream(cur_ctx, cur_pipe->stream);
+ const struct dc_stream_state *new_paired_stream = dc_state_get_paired_subvp_stream(new_ctx, new_pipe->stream);
+
+ return cur_pipe_type == SUBVP_PHANTOM &&
+ cur_pipe_type == new_pipe_type &&
+ cur_paired_stream && new_paired_stream &&
+ cur_paired_stream->stream_id == new_paired_stream->stream_id;
+}
+
bool dcn32_is_pipe_topology_transition_seamless(struct dc *dc,
const struct dc_state *cur_ctx,
const struct dc_state *new_ctx)
@@ -1735,7 +1784,8 @@ bool dcn32_is_pipe_topology_transition_seamless(struct dc *dc,
continue;
else if (resource_is_pipe_type(cur_pipe, OTG_MASTER)) {
if (resource_is_pipe_type(new_pipe, OTG_MASTER))
- if (cur_pipe->stream->stream_id == new_pipe->stream->stream_id)
+ if (cur_pipe->stream->stream_id == new_pipe->stream->stream_id ||
+ is_subvp_phantom_topology_transition_seamless(cur_ctx, new_ctx, cur_pipe, new_pipe))
/* OTG master with the same stream is seamless */
continue;
} else if (resource_is_pipe_type(cur_pipe, OPP_HEAD)) {
@@ -1821,3 +1871,13 @@ void dcn32_interdependent_update_lock(struct dc *dc,
dc->hwss.pipe_control_lock(dc, pipe, false);
}
}
+
+void dcn32_program_outstanding_updates(struct dc *dc,
+ struct dc_state *context)
+{
+ struct hubbub *hubbub = dc->res_pool->hubbub;
+
+ /* update compbuf if required */
+ if (hubbub->funcs->program_compbuf_size)
+ hubbub->funcs->program_compbuf_size(hubbub, context->bw_ctx.bw.dcn.compbuf_size_kb, true);
+}
diff --git a/drivers/gpu/drm/amd/display/dc/hwss/dcn32/dcn32_hwseq.h b/drivers/gpu/drm/amd/display/dc/hwss/dcn32/dcn32_hwseq.h
index db562e45d6ff..cac4a08b92a4 100644
--- a/drivers/gpu/drm/amd/display/dc/hwss/dcn32/dcn32_hwseq.h
+++ b/drivers/gpu/drm/amd/display/dc/hwss/dcn32/dcn32_hwseq.h
@@ -75,7 +75,7 @@ void dcn32_update_dsc_on_stream(struct pipe_ctx *pipe_ctx, bool enable);
unsigned int dcn32_calculate_dccg_k1_k2_values(struct pipe_ctx *pipe_ctx, unsigned int *k1_div, unsigned int *k2_div);
-void dcn32_resync_fifo_dccg_dio(struct dce_hwseq *hws, struct dc *dc, struct dc_state *context);
+void dcn32_resync_fifo_dccg_dio(struct dce_hwseq *hws, struct dc *dc, struct dc_state *context, unsigned int current_pipe_idx);
void dcn32_subvp_pipe_control_lock(struct dc *dc,
struct dc_state *context,
@@ -133,4 +133,8 @@ void dcn32_prepare_bandwidth(struct dc *dc,
void dcn32_interdependent_update_lock(struct dc *dc,
struct dc_state *context, bool lock);
+
+void dcn32_program_outstanding_updates(struct dc *dc,
+ struct dc_state *context);
+
#endif /* __DC_HWSS_DCN32_H__ */
diff --git a/drivers/gpu/drm/amd/display/dc/hwss/dcn32/dcn32_init.c b/drivers/gpu/drm/amd/display/dc/hwss/dcn32/dcn32_init.c
index 5c50458b12cb..3422b564ae98 100644
--- a/drivers/gpu/drm/amd/display/dc/hwss/dcn32/dcn32_init.c
+++ b/drivers/gpu/drm/amd/display/dc/hwss/dcn32/dcn32_init.c
@@ -120,11 +120,11 @@ static const struct hw_sequencer_funcs dcn32_funcs = {
.blank_phantom = dcn32_blank_phantom,
.is_pipe_topology_transition_seamless = dcn32_is_pipe_topology_transition_seamless,
.calculate_pix_rate_divider = dcn32_calculate_pix_rate_divider,
+ .program_outstanding_updates = dcn32_program_outstanding_updates,
};
static const struct hwseq_private_funcs dcn32_private_funcs = {
.init_pipes = dcn10_init_pipes,
- .update_plane_addr = dcn20_update_plane_addr,
.plane_atomic_disconnect = dcn10_plane_atomic_disconnect,
.update_mpcc = dcn20_update_mpcc,
.set_input_transfer_func = dcn32_set_input_transfer_func,
@@ -163,7 +163,6 @@ static const struct hwseq_private_funcs dcn32_private_funcs = {
.is_dp_dig_pixel_rate_div_policy = dcn32_is_dp_dig_pixel_rate_div_policy,
.apply_single_controller_ctx_to_hw = dce110_apply_single_controller_ctx_to_hw,
.reset_back_end_for_pipe = dcn20_reset_back_end_for_pipe,
- .populate_mcm_luts = dcn401_populate_mcm_luts,
};
void dcn32_hw_sequencer_init_functions(struct dc *dc)
diff --git a/drivers/gpu/drm/amd/display/dc/hwss/dcn35/dcn35_hwseq.c b/drivers/gpu/drm/amd/display/dc/hwss/dcn35/dcn35_hwseq.c
index d5e9aec52a05..bd309dbdf7b2 100644
--- a/drivers/gpu/drm/amd/display/dc/hwss/dcn35/dcn35_hwseq.c
+++ b/drivers/gpu/drm/amd/display/dc/hwss/dcn35/dcn35_hwseq.c
@@ -147,37 +147,6 @@ void dcn35_init_hw(struct dc *dc)
hws->funcs.bios_golden_init(dc);
}
- if (!dc->debug.disable_clock_gate) {
- REG_WRITE(DCCG_GATE_DISABLE_CNTL, 0);
- REG_WRITE(DCCG_GATE_DISABLE_CNTL2, 0);
-
- /* Disable gating for PHYASYMCLK. This will be enabled in dccg if needed */
- REG_UPDATE_5(DCCG_GATE_DISABLE_CNTL2, PHYASYMCLK_ROOT_GATE_DISABLE, 1,
- PHYBSYMCLK_ROOT_GATE_DISABLE, 1,
- PHYCSYMCLK_ROOT_GATE_DISABLE, 1,
- PHYDSYMCLK_ROOT_GATE_DISABLE, 1,
- PHYESYMCLK_ROOT_GATE_DISABLE, 1);
-
- REG_UPDATE_4(DCCG_GATE_DISABLE_CNTL4,
- DPIASYMCLK0_GATE_DISABLE, 0,
- DPIASYMCLK1_GATE_DISABLE, 0,
- DPIASYMCLK2_GATE_DISABLE, 0,
- DPIASYMCLK3_GATE_DISABLE, 0);
-
- REG_WRITE(DCCG_GATE_DISABLE_CNTL5, 0xFFFFFFFF);
- REG_UPDATE_4(DCCG_GATE_DISABLE_CNTL5,
- DTBCLK_P0_GATE_DISABLE, 0,
- DTBCLK_P1_GATE_DISABLE, 0,
- DTBCLK_P2_GATE_DISABLE, 0,
- DTBCLK_P3_GATE_DISABLE, 0);
- REG_UPDATE_4(DCCG_GATE_DISABLE_CNTL5,
- DPSTREAMCLK0_GATE_DISABLE, 0,
- DPSTREAMCLK1_GATE_DISABLE, 0,
- DPSTREAMCLK2_GATE_DISABLE, 0,
- DPSTREAMCLK3_GATE_DISABLE, 0);
-
- }
-
// Initialize the dccg
if (res_pool->dccg->funcs->dccg_init)
res_pool->dccg->funcs->dccg_init(res_pool->dccg);
@@ -235,7 +204,7 @@ void dcn35_init_hw(struct dc *dc)
if (hws->funcs.enable_power_gating_plane)
hws->funcs.enable_power_gating_plane(dc->hwseq, true);
*/
- if (res_pool->hubbub->funcs->dchubbub_init)
+ if (res_pool->hubbub && res_pool->hubbub->funcs->dchubbub_init)
res_pool->hubbub->funcs->dchubbub_init(dc->res_pool->hubbub);
/* If taking control over from VBIOS, we may want to optimize our first
* mode set, so we need to skip powering down pipes until we know which
@@ -271,6 +240,10 @@ void dcn35_init_hw(struct dc *dc)
dc->res_pool->hubbub->funcs->allow_self_refresh_control(dc->res_pool->hubbub,
!dc->res_pool->hubbub->ctx->dc->debug.disable_stutter);
}
+ if (res_pool->dccg->funcs->dccg_root_gate_disable_control) {
+ for (i = 0; i < res_pool->pipe_count; i++)
+ res_pool->dccg->funcs->dccg_root_gate_disable_control(res_pool->dccg, i, 0);
+ }
for (i = 0; i < res_pool->audio_count; i++) {
struct audio *audio = res_pool->audios[i];
@@ -305,20 +278,6 @@ void dcn35_init_hw(struct dc *dc)
if (!dc->debug.disable_clock_gate) {
/* enable all DCN clock gating */
- REG_WRITE(DCCG_GATE_DISABLE_CNTL, 0);
-
- REG_UPDATE_5(DCCG_GATE_DISABLE_CNTL2, SYMCLKA_FE_GATE_DISABLE, 0,
- SYMCLKB_FE_GATE_DISABLE, 0,
- SYMCLKC_FE_GATE_DISABLE, 0,
- SYMCLKD_FE_GATE_DISABLE, 0,
- SYMCLKE_FE_GATE_DISABLE, 0);
- REG_UPDATE(DCCG_GATE_DISABLE_CNTL2, HDMICHARCLK0_GATE_DISABLE, 0);
- REG_UPDATE_5(DCCG_GATE_DISABLE_CNTL2, SYMCLKA_GATE_DISABLE, 0,
- SYMCLKB_GATE_DISABLE, 0,
- SYMCLKC_GATE_DISABLE, 0,
- SYMCLKD_GATE_DISABLE, 0,
- SYMCLKE_GATE_DISABLE, 0);
-
REG_UPDATE(DCFCLK_CNTL, DCFCLK_GATE_DIS, 0);
}
@@ -328,10 +287,10 @@ void dcn35_init_hw(struct dc *dc)
if (!dcb->funcs->is_accelerated_mode(dcb) && dc->res_pool->hubbub->funcs->init_watermarks)
dc->res_pool->hubbub->funcs->init_watermarks(dc->res_pool->hubbub);
- if (dc->clk_mgr->funcs->notify_wm_ranges)
+ if (dc->clk_mgr && dc->clk_mgr->funcs->notify_wm_ranges)
dc->clk_mgr->funcs->notify_wm_ranges(dc->clk_mgr);
- if (dc->clk_mgr->funcs->set_hard_max_memclk && !dc->clk_mgr->dc_mode_softmax_enabled)
+ if (dc->clk_mgr && dc->clk_mgr->funcs->set_hard_max_memclk && !dc->clk_mgr->dc_mode_softmax_enabled)
dc->clk_mgr->funcs->set_hard_max_memclk(dc->clk_mgr);
@@ -375,7 +334,20 @@ static void update_dsc_on_stream(struct pipe_ctx *pipe_ctx, bool enable)
struct dsc_config dsc_cfg;
struct dsc_optc_config dsc_optc_cfg = {0};
enum optc_dsc_mode optc_dsc_mode;
+ struct dcn_dsc_state dsc_state = {0};
+
+ if (!dsc) {
+ DC_LOG_DSC("DSC is NULL for tg instance %d:", pipe_ctx->stream_res.tg->inst);
+ return;
+ }
+ if (dsc->funcs->dsc_read_state) {
+ dsc->funcs->dsc_read_state(dsc, &dsc_state);
+ if (!dsc_state.dsc_fw_en) {
+ DC_LOG_DSC("DSC has been disabled for tg instance %d:", pipe_ctx->stream_res.tg->inst);
+ return;
+ }
+ }
/* Enable DSC hw block */
dsc_cfg.pic_width = (stream->timing.h_addressable + stream->timing.h_border_left + stream->timing.h_border_right) / opp_cnt;
dsc_cfg.pic_height = stream->timing.v_addressable + stream->timing.v_border_top + stream->timing.v_border_bottom;
@@ -629,10 +601,10 @@ void dcn35_power_down_on_boot(struct dc *dc)
if (edp_link && edp_link->link_enc->funcs->is_dig_enabled &&
edp_link->link_enc->funcs->is_dig_enabled(edp_link->link_enc) &&
dc->hwseq->funcs.edp_backlight_control &&
- dc->hwss.power_down &&
+ dc->hwseq->funcs.power_down &&
dc->hwss.edp_power_control) {
dc->hwseq->funcs.edp_backlight_control(edp_link, false);
- dc->hwss.power_down(dc);
+ dc->hwseq->funcs.power_down(dc);
dc->hwss.edp_power_control(edp_link, false);
} else {
for (i = 0; i < dc->link_count; i++) {
@@ -640,8 +612,8 @@ void dcn35_power_down_on_boot(struct dc *dc)
if (link->link_enc && link->link_enc->funcs->is_dig_enabled &&
link->link_enc->funcs->is_dig_enabled(link->link_enc) &&
- dc->hwss.power_down) {
- dc->hwss.power_down(dc);
+ dc->hwseq->funcs.power_down) {
+ dc->hwseq->funcs.power_down(dc);
break;
}
@@ -1024,9 +996,6 @@ void dcn35_calc_blocks_to_gate(struct dc *dc, struct dc_state *context,
if (!hpo_frl_stream_enc_acquired && !hpo_dp_stream_enc_acquired)
update_state->pg_res_update[PG_HPO] = true;
- if (hpo_frl_stream_enc_acquired)
- update_state->pg_pipe_res_update[PG_HDMISTREAM][0] = true;
-
update_state->pg_res_update[PG_DWB] = true;
for (i = 0; i < dc->res_pool->pipe_count; i++) {
@@ -1041,7 +1010,7 @@ void dcn35_calc_blocks_to_gate(struct dc *dc, struct dc_state *context,
if (pipe_ctx->plane_res.hubp)
update_state->pg_pipe_res_update[PG_HUBP][pipe_ctx->plane_res.hubp->inst] = false;
- if (pipe_ctx->plane_res.dpp)
+ if (pipe_ctx->plane_res.dpp && pipe_ctx->plane_res.hubp)
update_state->pg_pipe_res_update[PG_DPP][pipe_ctx->plane_res.hubp->inst] = false;
if (pipe_ctx->plane_res.dpp || pipe_ctx->stream_res.opp)
@@ -1469,10 +1438,9 @@ void dcn35_set_drr(struct pipe_ctx **pipe_ctx,
struct timing_generator *tg = pipe_ctx[i]->stream_res.tg;
if ((tg != NULL) && tg->funcs) {
- struct dc_crtc_timing *timing = &pipe_ctx[i]->stream->timing;
- struct dc *dc = pipe_ctx[i]->stream->ctx->dc;
-
- if (dc->debug.static_screen_wait_frames) {
+ if (pipe_ctx[i]->stream && pipe_ctx[i]->stream->ctx->dc->debug.static_screen_wait_frames) {
+ struct dc_crtc_timing *timing = &pipe_ctx[i]->stream->timing;
+ struct dc *dc = pipe_ctx[i]->stream->ctx->dc;
unsigned int frame_rate = timing->pix_clk_100hz / (timing->h_total * timing->v_total);
if (frame_rate >= 120 && dc->caps.ips_support &&
diff --git a/drivers/gpu/drm/amd/display/dc/hwss/dcn35/dcn35_init.c b/drivers/gpu/drm/amd/display/dc/hwss/dcn35/dcn35_init.c
index 428912f37129..2bbf1fef94fd 100644
--- a/drivers/gpu/drm/amd/display/dc/hwss/dcn35/dcn35_init.c
+++ b/drivers/gpu/drm/amd/display/dc/hwss/dcn35/dcn35_init.c
@@ -101,7 +101,6 @@ static const struct hw_sequencer_funcs dcn35_funcs = {
.set_flip_control_gsl = dcn20_set_flip_control_gsl,
.get_vupdate_offset_from_vsync = dcn10_get_vupdate_offset_from_vsync,
.calc_vupdate_position = dcn10_calc_vupdate_position,
- .power_down = dce110_power_down,
.set_backlight_level = dcn21_set_backlight_level,
.set_abm_immediate_disable = dcn21_set_abm_immediate_disable,
.set_pipe = dcn21_set_pipe,
@@ -124,11 +123,11 @@ static const struct hw_sequencer_funcs dcn35_funcs = {
.root_clock_control = dcn35_root_clock_control,
.set_long_vtotal = dcn35_set_long_vblank,
.calculate_pix_rate_divider = dcn32_calculate_pix_rate_divider,
+ .program_outstanding_updates = dcn32_program_outstanding_updates,
};
static const struct hwseq_private_funcs dcn35_private_funcs = {
.init_pipes = dcn35_init_pipes,
- .update_plane_addr = dcn20_update_plane_addr,
.plane_atomic_disconnect = dcn10_plane_atomic_disconnect,
.update_mpcc = dcn20_update_mpcc,
.set_input_transfer_func = dcn32_set_input_transfer_func,
diff --git a/drivers/gpu/drm/amd/display/dc/hwss/dcn351/dcn351_init.c b/drivers/gpu/drm/amd/display/dc/hwss/dcn351/dcn351_init.c
index 55e791552bca..d00822e8daa5 100644
--- a/drivers/gpu/drm/amd/display/dc/hwss/dcn351/dcn351_init.c
+++ b/drivers/gpu/drm/amd/display/dc/hwss/dcn351/dcn351_init.c
@@ -100,7 +100,6 @@ static const struct hw_sequencer_funcs dcn351_funcs = {
.set_flip_control_gsl = dcn20_set_flip_control_gsl,
.get_vupdate_offset_from_vsync = dcn10_get_vupdate_offset_from_vsync,
.calc_vupdate_position = dcn10_calc_vupdate_position,
- .power_down = dce110_power_down,
.set_backlight_level = dcn21_set_backlight_level,
.set_abm_immediate_disable = dcn21_set_abm_immediate_disable,
.set_pipe = dcn21_set_pipe,
@@ -123,11 +122,12 @@ static const struct hw_sequencer_funcs dcn351_funcs = {
.root_clock_control = dcn35_root_clock_control,
.set_long_vtotal = dcn35_set_long_vblank,
.calculate_pix_rate_divider = dcn32_calculate_pix_rate_divider,
+ .program_outstanding_updates = dcn32_program_outstanding_updates,
+ .setup_hpo_hw_control = dcn35_setup_hpo_hw_control,
};
static const struct hwseq_private_funcs dcn351_private_funcs = {
.init_pipes = dcn35_init_pipes,
- .update_plane_addr = dcn20_update_plane_addr,
.plane_atomic_disconnect = dcn10_plane_atomic_disconnect,
.update_mpcc = dcn20_update_mpcc,
.set_input_transfer_func = dcn32_set_input_transfer_func,
diff --git a/drivers/gpu/drm/amd/display/dc/hwss/dcn401/dcn401_hwseq.c b/drivers/gpu/drm/amd/display/dc/hwss/dcn401/dcn401_hwseq.c
index 2c50c0f745a0..0b743669f23b 100644
--- a/drivers/gpu/drm/amd/display/dc/hwss/dcn401/dcn401_hwseq.c
+++ b/drivers/gpu/drm/amd/display/dc/hwss/dcn401/dcn401_hwseq.c
@@ -221,8 +221,9 @@ void dcn401_init_hw(struct dc *dc)
int edp_num;
uint32_t backlight = MAX_BACKLIGHT_LEVEL;
uint32_t user_level = MAX_BACKLIGHT_LEVEL;
+ int current_dchub_ref_freq = 0;
- if (dc->clk_mgr && dc->clk_mgr->funcs->init_clocks) {
+ if (dc->clk_mgr && dc->clk_mgr->funcs && dc->clk_mgr->funcs->init_clocks) {
dc->clk_mgr->funcs->init_clocks(dc->clk_mgr);
// mark dcmode limits present if any clock has distinct AC and DC values from SMU
@@ -264,6 +265,8 @@ void dcn401_init_hw(struct dc *dc)
dc->ctx->dc_bios->fw_info.pll_info.crystal_frequency,
&res_pool->ref_clocks.dccg_ref_clock_inKhz);
+ current_dchub_ref_freq = res_pool->ref_clocks.dchub_ref_clock_inKhz / 1000;
+
(res_pool->hubbub->funcs->get_dchub_ref_freq)(res_pool->hubbub,
res_pool->ref_clocks.dccg_ref_clock_inKhz,
&res_pool->ref_clocks.dchub_ref_clock_inKhz);
@@ -354,10 +357,10 @@ void dcn401_init_hw(struct dc *dc)
if (edp_link->link_enc->funcs->is_dig_enabled &&
edp_link->link_enc->funcs->is_dig_enabled(edp_link->link_enc) &&
dc->hwss.edp_backlight_control &&
- dc->hwss.power_down &&
+ hws->funcs.power_down &&
dc->hwss.edp_power_control) {
dc->hwss.edp_backlight_control(edp_link, false);
- dc->hwss.power_down(dc);
+ hws->funcs.power_down(dc);
dc->hwss.edp_power_control(edp_link, false);
}
}
@@ -367,8 +370,8 @@ void dcn401_init_hw(struct dc *dc)
if (link->link_enc->funcs->is_dig_enabled &&
link->link_enc->funcs->is_dig_enabled(link->link_enc) &&
- dc->hwss.power_down) {
- dc->hwss.power_down(dc);
+ hws->funcs.power_down) {
+ hws->funcs.power_down(dc);
break;
}
@@ -413,12 +416,9 @@ void dcn401_init_hw(struct dc *dc)
if (!dcb->funcs->is_accelerated_mode(dcb) && dc->res_pool->hubbub->funcs->init_watermarks)
dc->res_pool->hubbub->funcs->init_watermarks(dc->res_pool->hubbub);
- if (dc->clk_mgr->funcs->notify_wm_ranges)
+ if (dc->clk_mgr && dc->clk_mgr->funcs && dc->clk_mgr->funcs->notify_wm_ranges)
dc->clk_mgr->funcs->notify_wm_ranges(dc->clk_mgr);
- if (dc->clk_mgr->funcs->set_hard_max_memclk && !dc->clk_mgr->dc_mode_softmax_enabled)
- dc->clk_mgr->funcs->set_hard_max_memclk(dc->clk_mgr);
-
if (dc->res_pool->hubbub->funcs->force_pstate_change_control)
dc->res_pool->hubbub->funcs->force_pstate_change_control(
dc->res_pool->hubbub, false, false);
@@ -436,9 +436,12 @@ void dcn401_init_hw(struct dc *dc)
dc->caps.dmub_caps.mclk_sw = dc->ctx->dmub_srv->dmub->feature_caps.fw_assisted_mclk_switch_ver > 0;
dc->caps.dmub_caps.fams_ver = dc->ctx->dmub_srv->dmub->feature_caps.fw_assisted_mclk_switch_ver;
dc->debug.fams2_config.bits.enable &= dc->ctx->dmub_srv->dmub->feature_caps.fw_assisted_mclk_switch_ver == 2;
- if (!dc->debug.fams2_config.bits.enable && dc->res_pool->funcs->update_bw_bounding_box) {
- /* update bounding box if FAMS2 disabled */
- dc->res_pool->funcs->update_bw_bounding_box(dc, dc->clk_mgr->bw_params);
+ if ((!dc->debug.fams2_config.bits.enable && dc->res_pool->funcs->update_bw_bounding_box)
+ || res_pool->ref_clocks.dchub_ref_clock_inKhz / 1000 != current_dchub_ref_freq) {
+ /* update bounding box if FAMS2 disabled, or if dchub clk has changed */
+ if (dc->clk_mgr)
+ dc->res_pool->funcs->update_bw_bounding_box(dc,
+ dc->clk_mgr->bw_params);
}
}
}
@@ -498,6 +501,7 @@ void dcn401_populate_mcm_luts(struct dc *dc,
enum MCM_LUT_XABLE lut3d_xable = MCM_LUT_DISABLE;
enum MCM_LUT_XABLE lut1d_xable = MCM_LUT_DISABLE;
bool is_17x17x17 = true;
+ bool rval;
dcn401_get_mcm_lut_xable_from_pipe_ctx(dc, pipe_ctx, &shaper_xable, &lut3d_xable, &lut1d_xable);
@@ -507,11 +511,10 @@ void dcn401_populate_mcm_luts(struct dc *dc,
if (mcm_luts.lut1d_func->type == TF_TYPE_HWPWL)
m_lut_params.pwl = &mcm_luts.lut1d_func->pwl;
else if (mcm_luts.lut1d_func->type == TF_TYPE_DISTRIBUTED_POINTS) {
- cm_helper_translate_curve_to_hw_format(
- dc->ctx,
+ rval = cm3_helper_translate_curve_to_hw_format(
mcm_luts.lut1d_func,
&dpp_base->regamma_params, false);
- m_lut_params.pwl = &dpp_base->regamma_params;
+ m_lut_params.pwl = rval ? &dpp_base->regamma_params : NULL;
}
if (m_lut_params.pwl) {
if (mpc->funcs->populate_lut)
@@ -528,11 +531,10 @@ void dcn401_populate_mcm_luts(struct dc *dc,
m_lut_params.pwl = &mcm_luts.shaper->pwl;
else if (mcm_luts.shaper->type == TF_TYPE_DISTRIBUTED_POINTS) {
ASSERT(false);
- cm_helper_translate_curve_to_hw_format(
- dc->ctx,
+ rval = cm3_helper_translate_curve_to_hw_format(
mcm_luts.shaper,
&dpp_base->regamma_params, true);
- m_lut_params.pwl = &dpp_base->regamma_params;
+ m_lut_params.pwl = rval ? &dpp_base->regamma_params : NULL;
}
if (m_lut_params.pwl) {
if (mpc->funcs->populate_lut)
@@ -668,47 +670,40 @@ bool dcn401_set_mcm_luts(struct pipe_ctx *pipe_ctx,
struct dpp *dpp_base = pipe_ctx->plane_res.dpp;
int mpcc_id = pipe_ctx->plane_res.hubp->inst;
struct mpc *mpc = pipe_ctx->stream_res.opp->ctx->dc->res_pool->mpc;
- bool result = true;
+ bool result;
const struct pwl_params *lut_params = NULL;
+ bool rval;
mpc->funcs->set_movable_cm_location(mpc, MPCC_MOVABLE_CM_LOCATION_BEFORE, mpcc_id);
pipe_ctx->plane_state->mcm_location = MPCC_MOVABLE_CM_LOCATION_BEFORE;
// 1D LUT
- if (plane_state->mcm_shaper_3dlut_setting == DC_CM2_SHAPER_3DLUT_SETTING_BYPASS_ALL) {
- if (plane_state->blend_tf.type == TF_TYPE_HWPWL)
- lut_params = &plane_state->blend_tf.pwl;
- else if (plane_state->blend_tf.type == TF_TYPE_DISTRIBUTED_POINTS) {
- cm_helper_translate_curve_to_hw_format(plane_state->ctx,
- &plane_state->blend_tf,
- &dpp_base->regamma_params, false);
- lut_params = &dpp_base->regamma_params;
- }
- result = mpc->funcs->program_1dlut(mpc, lut_params, mpcc_id);
- lut_params = NULL;
+ if (plane_state->blend_tf.type == TF_TYPE_HWPWL)
+ lut_params = &plane_state->blend_tf.pwl;
+ else if (plane_state->blend_tf.type == TF_TYPE_DISTRIBUTED_POINTS) {
+ rval = cm3_helper_translate_curve_to_hw_format(&plane_state->blend_tf,
+ &dpp_base->regamma_params, false);
+ lut_params = rval ? &dpp_base->regamma_params : NULL;
}
+ result = mpc->funcs->program_1dlut(mpc, lut_params, mpcc_id);
+ lut_params = NULL;
// Shaper
- if (plane_state->mcm_shaper_3dlut_setting == DC_CM2_SHAPER_3DLUT_SETTING_BYPASS_ALL) {
- if (plane_state->in_shaper_func.type == TF_TYPE_HWPWL)
- lut_params = &plane_state->in_shaper_func.pwl;
- else if (plane_state->in_shaper_func.type == TF_TYPE_DISTRIBUTED_POINTS) {
- // TODO: dpp_base replace
- ASSERT(false);
- cm_helper_translate_curve_to_hw_format(plane_state->ctx,
- &plane_state->in_shaper_func,
- &dpp_base->shaper_params, true);
- lut_params = &dpp_base->shaper_params;
- }
-
- result = mpc->funcs->program_shaper(mpc, lut_params, mpcc_id);
+ if (plane_state->in_shaper_func.type == TF_TYPE_HWPWL)
+ lut_params = &plane_state->in_shaper_func.pwl;
+ else if (plane_state->in_shaper_func.type == TF_TYPE_DISTRIBUTED_POINTS) {
+ // TODO: dpp_base replace
+ rval = cm3_helper_translate_curve_to_hw_format(&plane_state->in_shaper_func,
+ &dpp_base->shaper_params, true);
+ lut_params = rval ? &dpp_base->shaper_params : NULL;
}
+ result &= mpc->funcs->program_shaper(mpc, lut_params, mpcc_id);
// 3D
- if (plane_state->mcm_shaper_3dlut_setting == DC_CM2_SHAPER_3DLUT_SETTING_BYPASS_ALL) {
+ if (mpc->funcs->program_3dlut) {
if (plane_state->lut3d_func.state.bits.initialized == 1)
- result = mpc->funcs->program_3dlut(mpc, &plane_state->lut3d_func.lut_3d, mpcc_id);
+ result &= mpc->funcs->program_3dlut(mpc, &plane_state->lut3d_func.lut_3d, mpcc_id);
else
- result = mpc->funcs->program_3dlut(mpc, NULL, mpcc_id);
+ result &= mpc->funcs->program_3dlut(mpc, NULL, mpcc_id);
}
return result;
@@ -742,7 +737,9 @@ bool dcn401_set_output_transfer_func(struct dc *dc,
}
}
- mpc->funcs->set_output_gamma(mpc, mpcc_id, params);
+ if (mpc->funcs->set_output_gamma)
+ mpc->funcs->set_output_gamma(mpc, mpcc_id, params);
+
return ret;
}
@@ -871,6 +868,7 @@ enum dc_status dcn401_enable_stream_timing(
pipe_ctx->pipe_dlg_param.vstartup_start,
pipe_ctx->pipe_dlg_param.vupdate_offset,
pipe_ctx->pipe_dlg_param.vupdate_width,
+ pipe_ctx->pipe_dlg_param.pstate_keepout,
pipe_ctx->stream->signal,
true);
@@ -1115,10 +1113,10 @@ void dcn401_set_cursor_position(struct pipe_ctx *pipe_ctx)
.mirror = pipe_ctx->plane_state->horizontal_mirror,
.stream = pipe_ctx->stream
};
+ struct rect odm_slice_src = { 0 };
bool odm_combine_on = (pipe_ctx->next_odm_pipe != NULL) ||
(pipe_ctx->prev_odm_pipe != NULL);
int prev_odm_width = 0;
- int prev_odm_offset = 0;
struct pipe_ctx *prev_odm_pipe = NULL;
bool mpc_combine_on = false;
int bottom_pipe_x_pos = 0;
@@ -1183,12 +1181,12 @@ void dcn401_set_cursor_position(struct pipe_ctx *pipe_ctx)
prev_odm_pipe = pipe_ctx->prev_odm_pipe;
while (prev_odm_pipe != NULL) {
- prev_odm_width += prev_odm_pipe->plane_res.scl_data.recout.width;
- prev_odm_offset += prev_odm_pipe->plane_res.scl_data.recout.x;
+ odm_slice_src = resource_get_odm_slice_src_rect(prev_odm_pipe);
+ prev_odm_width += odm_slice_src.width;
prev_odm_pipe = prev_odm_pipe->prev_odm_pipe;
}
- x_pos -= (prev_odm_width + prev_odm_offset);
+ x_pos -= (prev_odm_width);
}
/* If the position is negative then we need to add to the hotspot
@@ -1311,8 +1309,10 @@ bool dcn401_apply_idle_power_optimizations(struct dc *dc, bool enable)
for (i = 0; i < dc->current_state->stream_count; i++) {
/* MALL SS messaging is not supported with PSR at this time */
if (dc->current_state->streams[i] != NULL &&
- dc->current_state->streams[i]->link->psr_settings.psr_version != DC_PSR_VERSION_UNSUPPORTED)
+ dc->current_state->streams[i]->link->psr_settings.psr_version != DC_PSR_VERSION_UNSUPPORTED) {
+ DC_LOG_MALL("MALL SS not supported with PSR at this time\n");
return false;
+ }
}
memset(&cmd, 0, sizeof(cmd));
@@ -1322,8 +1322,9 @@ bool dcn401_apply_idle_power_optimizations(struct dc *dc, bool enable)
if (enable) {
if (dcn401_check_no_memory_request_for_cab(dc)) {
/* 1. Check no memory request case for CAB.
- * If no memory request case, send CAB_ACTION NO_DF_REQ DMUB message
+ * If no memory request case, send CAB_ACTION NO_DCN_REQ DMUB message
*/
+ DC_LOG_MALL("sending CAB action NO_DCN_REQ\n");
cmd.cab.header.sub_type = DMUB_CMD__CAB_NO_DCN_REQ;
} else {
/* 2. Check if all surfaces can fit in CAB.
@@ -1351,13 +1352,16 @@ bool dcn401_apply_idle_power_optimizations(struct dc *dc, bool enable)
if (ways <= dc->caps.cache_num_ways && !mall_ss_unsupported) {
cmd.cab.header.sub_type = DMUB_CMD__CAB_DCN_SS_FIT_IN_CAB;
cmd.cab.cab_alloc_ways = ways;
+ DC_LOG_MALL("cab allocation: %d ways. CAB action: DCN_SS_FIT_IN_CAB\n", ways);
} else {
cmd.cab.header.sub_type = DMUB_CMD__CAB_DCN_SS_NOT_FIT_IN_CAB;
+ DC_LOG_MALL("frame does not fit in CAB: %d ways required. CAB action: DCN_SS_NOT_FIT_IN_CAB\n", ways);
}
}
} else {
/* Disable CAB */
cmd.cab.header.sub_type = DMUB_CMD__CAB_NO_IDLE_OPTIMIZATION;
+ DC_LOG_MALL("idle optimization disabled\n");
}
dm_execute_dmub_cmd(dc->ctx, &cmd, DM_DMUB_WAIT_TYPE_WAIT);
@@ -1395,10 +1399,10 @@ void dcn401_prepare_bandwidth(struct dc *dc,
{
struct hubbub *hubbub = dc->res_pool->hubbub;
bool p_state_change_support = context->bw_ctx.bw.dcn.clk.p_state_change_support;
- unsigned int compbuf_size_kb = 0;
+ unsigned int compbuf_size = 0;
- /* Any transition into or out of a FAMS config should disable MCLK switching first to avoid hangs */
- if (context->bw_ctx.bw.dcn.clk.fw_based_mclk_switching || dc->clk_mgr->clks.fw_based_mclk_switching) {
+ /* Any transition into P-State support should disable MCLK switching first to avoid hangs */
+ if (p_state_change_support) {
dc->optimized_required = true;
context->bw_ctx.bw.dcn.clk.p_state_change_support = false;
}
@@ -1425,10 +1429,10 @@ void dcn401_prepare_bandwidth(struct dc *dc,
/* decrease compbuf size */
if (hubbub->funcs->program_compbuf_segments) {
- compbuf_size_kb = context->bw_ctx.bw.dcn.arb_regs.compbuf_size;
- dc->wm_optimized_required |= (compbuf_size_kb != dc->current_state->bw_ctx.bw.dcn.arb_regs.compbuf_size);
+ compbuf_size = context->bw_ctx.bw.dcn.arb_regs.compbuf_size;
+ dc->wm_optimized_required |= (compbuf_size != dc->current_state->bw_ctx.bw.dcn.arb_regs.compbuf_size);
- hubbub->funcs->program_compbuf_segments(hubbub, compbuf_size_kb, false);
+ hubbub->funcs->program_compbuf_segments(hubbub, compbuf_size, false);
}
if (dc->debug.fams2_config.bits.enable) {
@@ -1437,7 +1441,7 @@ void dcn401_prepare_bandwidth(struct dc *dc,
dcn401_fams2_global_control_lock(dc, context, false);
}
- if (context->bw_ctx.bw.dcn.clk.fw_based_mclk_switching || dc->clk_mgr->clks.fw_based_mclk_switching) {
+ if (p_state_change_support != context->bw_ctx.bw.dcn.clk.p_state_change_support) {
/* After disabling P-State, restore the original value to ensure we get the correct P-State
* on the next optimize. */
context->bw_ctx.bw.dcn.clk.p_state_change_support = p_state_change_support;
@@ -1530,7 +1534,7 @@ void dcn401_fams2_update_config(struct dc *dc, struct dc_state *context, bool en
if (!dc->ctx || !dc->ctx->dmub_srv || !dc->debug.fams2_config.bits.enable)
return;
- fams2_required = context->bw_ctx.bw.dcn.fams2_stream_count > 0;
+ fams2_required = context->bw_ctx.bw.dcn.fams2_global_config.features.bits.enable;
dc_dmub_srv_fams2_update_config(dc, context, enable && fams2_required);
}
@@ -1542,7 +1546,6 @@ static void update_dsc_for_odm_change(struct dc *dc, struct dc_state *context,
struct pipe_ctx *old_pipe;
struct pipe_ctx *new_pipe;
struct pipe_ctx *old_opp_heads[MAX_PIPES];
- struct dccg *dccg = dc->res_pool->dccg;
struct pipe_ctx *old_otg_master;
int old_opp_head_count = 0;
@@ -1568,12 +1571,9 @@ static void update_dsc_for_odm_change(struct dc *dc, struct dc_state *context,
for (i = 0; i < old_opp_head_count; i++) {
old_pipe = old_opp_heads[i];
new_pipe = &context->res_ctx.pipe_ctx[old_pipe->pipe_idx];
- if (old_pipe->stream_res.dsc && !new_pipe->stream_res.dsc) {
- dccg->funcs->set_dto_dscclk(dccg,
- old_pipe->stream_res.dsc->inst, false);
+ if (old_pipe->stream_res.dsc && !new_pipe->stream_res.dsc)
old_pipe->stream_res.dsc->funcs->dsc_disconnect(
old_pipe->stream_res.dsc);
- }
}
}
}
@@ -1659,7 +1659,7 @@ void dcn401_hardware_release(struct dc *dc)
*/
if (dc->current_state) {
if ((!dc->clk_mgr->clks.p_state_change_support ||
- dc->current_state->bw_ctx.bw.dcn.fams2_stream_count > 0) &&
+ dc->current_state->bw_ctx.bw.dcn.fams2_global_config.features.bits.enable) &&
dc->res_pool->hubbub->funcs->force_pstate_change_control)
dc->res_pool->hubbub->funcs->force_pstate_change_control(
dc->res_pool->hubbub, true, true);
@@ -1669,3 +1669,104 @@ void dcn401_hardware_release(struct dc *dc)
}
}
+void dcn401_wait_for_det_buffer_update(struct dc *dc, struct dc_state *context, struct pipe_ctx *otg_master)
+{
+ struct pipe_ctx *opp_heads[MAX_PIPES];
+ struct pipe_ctx *dpp_pipes[MAX_PIPES];
+ struct hubbub *hubbub = dc->res_pool->hubbub;
+ int dpp_count = 0;
+
+ if (!otg_master->stream)
+ return;
+
+ int slice_count = resource_get_opp_heads_for_otg_master(otg_master,
+ &context->res_ctx, opp_heads);
+
+ for (int slice_idx = 0; slice_idx < slice_count; slice_idx++) {
+ if (opp_heads[slice_idx]->plane_state) {
+ dpp_count = resource_get_dpp_pipes_for_opp_head(
+ opp_heads[slice_idx],
+ &context->res_ctx,
+ dpp_pipes);
+ for (int dpp_idx = 0; dpp_idx < dpp_count; dpp_idx++) {
+ struct pipe_ctx *dpp_pipe = dpp_pipes[dpp_idx];
+ if (dpp_pipe && hubbub &&
+ dpp_pipe->plane_res.hubp &&
+ hubbub->funcs->wait_for_det_update)
+ hubbub->funcs->wait_for_det_update(hubbub, dpp_pipe->plane_res.hubp->inst);
+ }
+ }
+ }
+}
+
+void dcn401_interdependent_update_lock(struct dc *dc,
+ struct dc_state *context, bool lock)
+{
+ unsigned int i = 0;
+ struct pipe_ctx *pipe = NULL;
+ struct timing_generator *tg = NULL;
+ bool pipe_unlocked[MAX_PIPES] = {0};
+
+ if (lock) {
+ for (i = 0; i < dc->res_pool->pipe_count; i++) {
+ pipe = &context->res_ctx.pipe_ctx[i];
+ tg = pipe->stream_res.tg;
+
+ if (!resource_is_pipe_type(pipe, OTG_MASTER) ||
+ !tg->funcs->is_tg_enabled(tg) ||
+ dc_state_get_pipe_subvp_type(context, pipe) == SUBVP_PHANTOM)
+ continue;
+ dc->hwss.pipe_control_lock(dc, pipe, true);
+ }
+ } else {
+ /* Unlock pipes based on the change in DET allocation instead of pipe index
+ * Prevents over allocation of DET during unlock process
+ * e.g. 2 pipe config with different streams with a max of 20 DET segments
+ * Before: After:
+ * - Pipe0: 10 DET segments - Pipe0: 12 DET segments
+ * - Pipe1: 10 DET segments - Pipe1: 8 DET segments
+ * If Pipe0 gets updated first, 22 DET segments will be allocated
+ */
+ for (i = 0; i < dc->res_pool->pipe_count; i++) {
+ pipe = &context->res_ctx.pipe_ctx[i];
+ tg = pipe->stream_res.tg;
+ int current_pipe_idx = i;
+
+ if (!resource_is_pipe_type(pipe, OTG_MASTER) ||
+ !tg->funcs->is_tg_enabled(tg) ||
+ dc_state_get_pipe_subvp_type(context, pipe) == SUBVP_PHANTOM) {
+ pipe_unlocked[i] = true;
+ continue;
+ }
+
+ // If the same stream exists in old context, ensure the OTG_MASTER pipes for the same stream get compared
+ struct pipe_ctx *old_otg_master = resource_get_otg_master_for_stream(&dc->current_state->res_ctx, pipe->stream);
+
+ if (old_otg_master)
+ current_pipe_idx = old_otg_master->pipe_idx;
+ if (resource_calculate_det_for_stream(context, pipe) <
+ resource_calculate_det_for_stream(dc->current_state, &dc->current_state->res_ctx.pipe_ctx[current_pipe_idx])) {
+ dc->hwss.pipe_control_lock(dc, pipe, false);
+ pipe_unlocked[i] = true;
+ dcn401_wait_for_det_buffer_update(dc, context, pipe);
+ }
+ }
+
+ for (i = 0; i < dc->res_pool->pipe_count; i++) {
+ if (pipe_unlocked[i])
+ continue;
+ pipe = &context->res_ctx.pipe_ctx[i];
+ dc->hwss.pipe_control_lock(dc, pipe, false);
+ }
+ }
+}
+
+void dcn401_program_outstanding_updates(struct dc *dc,
+ struct dc_state *context)
+{
+ struct hubbub *hubbub = dc->res_pool->hubbub;
+
+ /* update compbuf if required */
+ if (hubbub->funcs->program_compbuf_segments)
+ hubbub->funcs->program_compbuf_segments(hubbub, context->bw_ctx.bw.dcn.arb_regs.compbuf_size, true);
+}
diff --git a/drivers/gpu/drm/amd/display/dc/hwss/dcn401/dcn401_hwseq.h b/drivers/gpu/drm/amd/display/dc/hwss/dcn401/dcn401_hwseq.h
index 8e9c1c17aa66..a27e62081685 100644
--- a/drivers/gpu/drm/amd/display/dc/hwss/dcn401/dcn401_hwseq.h
+++ b/drivers/gpu/drm/amd/display/dc/hwss/dcn401/dcn401_hwseq.h
@@ -81,4 +81,7 @@ void dcn401_hardware_release(struct dc *dc);
void dcn401_update_odm(struct dc *dc, struct dc_state *context,
struct pipe_ctx *otg_master);
void adjust_hotspot_between_slices_for_2x_magnify(uint32_t cursor_width, struct dc_cursor_position *pos_cpy);
+void dcn401_wait_for_det_buffer_update(struct dc *dc, struct dc_state *context, struct pipe_ctx *otg_master);
+void dcn401_interdependent_update_lock(struct dc *dc, struct dc_state *context, bool lock);
+void dcn401_program_outstanding_updates(struct dc *dc, struct dc_state *context);
#endif /* __DC_HWSS_DCN401_H__ */
diff --git a/drivers/gpu/drm/amd/display/dc/hwss/dcn401/dcn401_init.c b/drivers/gpu/drm/amd/display/dc/hwss/dcn401/dcn401_init.c
index 6a768702c7bd..a2ca07235c83 100644
--- a/drivers/gpu/drm/amd/display/dc/hwss/dcn401/dcn401_init.c
+++ b/drivers/gpu/drm/amd/display/dc/hwss/dcn401/dcn401_init.c
@@ -38,7 +38,7 @@ static const struct hw_sequencer_funcs dcn401_funcs = {
.disable_audio_stream = dce110_disable_audio_stream,
.disable_plane = dcn20_disable_plane,
.pipe_control_lock = dcn20_pipe_control_lock,
- .interdependent_update_lock = dcn32_interdependent_update_lock,
+ .interdependent_update_lock = dcn401_interdependent_update_lock,
.cursor_lock = dcn10_cursor_lock,
.prepare_bandwidth = dcn401_prepare_bandwidth,
.optimize_bandwidth = dcn401_optimize_bandwidth,
@@ -99,12 +99,11 @@ static const struct hw_sequencer_funcs dcn401_funcs = {
.fams2_global_control_lock = dcn401_fams2_global_control_lock,
.fams2_update_config = dcn401_fams2_update_config,
.fams2_global_control_lock_fast = dcn401_fams2_global_control_lock_fast,
- .power_down = dce110_power_down,
+ .program_outstanding_updates = dcn401_program_outstanding_updates,
};
static const struct hwseq_private_funcs dcn401_private_funcs = {
.init_pipes = dcn10_init_pipes,
- .update_plane_addr = dcn20_update_plane_addr,
.plane_atomic_disconnect = dcn10_plane_atomic_disconnect,
.update_mpcc = dcn20_update_mpcc,
.set_input_transfer_func = dcn32_set_input_transfer_func,
@@ -115,8 +114,6 @@ static const struct hwseq_private_funcs dcn401_private_funcs = {
.reset_hw_ctx_wrap = dcn20_reset_hw_ctx_wrap,
.enable_stream_timing = dcn401_enable_stream_timing,
.edp_backlight_control = dce110_edp_backlight_control,
- .disable_stream_gating = dcn20_disable_stream_gating,
- .enable_stream_gating = dcn20_enable_stream_gating,
.setup_vupdate_interrupt = dcn20_setup_vupdate_interrupt,
.did_underflow_occur = dcn10_did_underflow_occur,
.init_blank = dcn32_init_blank,
@@ -136,12 +133,11 @@ static const struct hwseq_private_funcs dcn401_private_funcs = {
.dccg_init = dcn20_dccg_init,
.set_mcm_luts = dcn401_set_mcm_luts,
.program_mall_pipe_config = dcn32_program_mall_pipe_config,
- .update_force_pstate = dcn32_update_force_pstate,
.update_mall_sel = dcn32_update_mall_sel,
.calculate_dccg_k1_k2_values = NULL,
.apply_single_controller_ctx_to_hw = dce110_apply_single_controller_ctx_to_hw,
.reset_back_end_for_pipe = dcn20_reset_back_end_for_pipe,
- .populate_mcm_luts = dcn401_populate_mcm_luts,
+ .populate_mcm_luts = NULL,
};
void dcn401_hw_sequencer_init_functions(struct dc *dc)
diff --git a/drivers/gpu/drm/amd/display/dc/hwss/hw_sequencer.h b/drivers/gpu/drm/amd/display/dc/hwss/hw_sequencer.h
index d05be65a2256..ac9205625623 100644
--- a/drivers/gpu/drm/amd/display/dc/hwss/hw_sequencer.h
+++ b/drivers/gpu/drm/amd/display/dc/hwss/hw_sequencer.h
@@ -240,7 +240,6 @@ struct hw_sequencer_funcs {
void (*program_triplebuffer)(const struct dc *dc,
struct pipe_ctx *pipe_ctx, bool enableTripleBuffer);
void (*update_pending_status)(struct pipe_ctx *pipe_ctx);
- void (*power_down)(struct dc *dc);
void (*update_dsc_pg)(struct dc *dc, struct dc_state *context, bool safe_to_disable);
/* Pipe Lock Related */
@@ -460,6 +459,9 @@ struct hw_sequencer_funcs {
bool enable);
void (*fams2_global_control_lock_fast)(union block_sequence_params *params);
void (*set_long_vtotal)(struct pipe_ctx **pipe_ctx, int num_pipes, uint32_t v_total_min, uint32_t v_total_max);
+ void (*program_outstanding_updates)(struct dc *dc,
+ struct dc_state *context);
+ void (*setup_hpo_hw_control)(const struct dce_hwseq *hws, bool enable);
};
void color_space_to_black_color(
@@ -520,6 +522,21 @@ void hwss_build_fast_sequence(struct dc *dc,
struct dc_stream_status *stream_status,
struct dc_state *context);
+void hwss_wait_for_all_blank_complete(struct dc *dc,
+ struct dc_state *context);
+
+void hwss_wait_for_odm_update_pending_complete(struct dc *dc,
+ struct dc_state *context);
+
+void hwss_wait_for_no_pipes_pending(struct dc *dc,
+ struct dc_state *context);
+
+void hwss_wait_for_outstanding_hw_updates(struct dc *dc,
+ struct dc_state *dc_context);
+
+void hwss_process_outstanding_hw_updates(struct dc *dc,
+ struct dc_state *dc_context);
+
void hwss_send_dmcub_cmd(union block_sequence_params *params);
void hwss_program_manual_trigger(union block_sequence_params *params);
diff --git a/drivers/gpu/drm/amd/display/dc/hwss/hw_sequencer_private.h b/drivers/gpu/drm/amd/display/dc/hwss/hw_sequencer_private.h
index 7ac3f2a09487..0ac675456979 100644
--- a/drivers/gpu/drm/amd/display/dc/hwss/hw_sequencer_private.h
+++ b/drivers/gpu/drm/amd/display/dc/hwss/hw_sequencer_private.h
@@ -76,8 +76,6 @@ struct hwseq_private_funcs {
void (*enable_stream_gating)(struct dc *dc, struct pipe_ctx *pipe_ctx);
void (*init_pipes)(struct dc *dc, struct dc_state *context);
void (*reset_hw_ctx_wrap)(struct dc *dc, struct dc_state *context);
- void (*update_plane_addr)(const struct dc *dc,
- struct pipe_ctx *pipe_ctx);
void (*plane_atomic_disconnect)(struct dc *dc,
struct dc_state *state,
struct pipe_ctx *pipe_ctx);
@@ -170,7 +168,8 @@ struct hwseq_private_funcs {
unsigned int *k1_div,
unsigned int *k2_div);
void (*resync_fifo_dccg_dio)(struct dce_hwseq *hws, struct dc *dc,
- struct dc_state *context);
+ struct dc_state *context,
+ unsigned int current_pipe_idx);
enum dc_status (*apply_single_controller_ctx_to_hw)(
struct pipe_ctx *pipe_ctx,
struct dc_state *context,
diff --git a/drivers/gpu/drm/amd/display/dc/inc/core_types.h b/drivers/gpu/drm/amd/display/dc/inc/core_types.h
index 4c8e6436c7e1..bfb8b8502d20 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/core_types.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/core_types.h
@@ -534,8 +534,8 @@ struct dcn_bw_output {
unsigned int legacy_svp_drr_stream_index;
bool legacy_svp_drr_stream_index_valid;
struct dml2_mcache_surface_allocation mcache_allocations[DML2_MAX_PLANES];
+ struct dmub_cmd_fams2_global_config fams2_global_config;
struct dmub_fams2_stream_static_state fams2_stream_params[DML2_MAX_PLANES];
- unsigned fams2_stream_count;
struct dml2_display_arb_regs arb_regs;
};
diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/audio.h b/drivers/gpu/drm/amd/display/dc/inc/hw/audio.h
index b6203253111c..8c18efc2aa70 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/hw/audio.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/hw/audio.h
@@ -46,6 +46,8 @@ struct audio_funcs {
const struct audio_info *audio_info,
const struct audio_dp_link_info *dp_link_info);
+ void (*az_disable_hbr_audio)(struct audio *audio);
+
void (*wall_dto_setup)(struct audio *audio,
enum signal_type signal,
const struct audio_crtc_info *crtc_info,
diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/clk_mgr.h b/drivers/gpu/drm/amd/display/dc/inc/hw/clk_mgr.h
index d5fefce3e74b..2d06067ff36d 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/hw/clk_mgr.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/hw/clk_mgr.h
@@ -29,9 +29,6 @@
#include "dc.h"
#include "dm_pp_smu.h"
-#define DCN_MINIMUM_DISPCLK_Khz 100000
-#define DCN_MINIMUM_DPPCLK_Khz 100000
-
/* Constants */
#define DDR4_DRAM_WIDTH 64
#define WM_A 0
@@ -180,6 +177,7 @@ struct clk_state_registers_and_bypass {
uint32_t dispclk;
uint32_t dppclk;
uint32_t dtbclk;
+ uint32_t fclk;
uint32_t dppclk_bypass;
uint32_t dcfclk_bypass;
diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/clk_mgr_internal.h b/drivers/gpu/drm/amd/display/dc/inc/hw/clk_mgr_internal.h
index 12282f96dfe1..c2dd061892f4 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/hw/clk_mgr_internal.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/hw/clk_mgr_internal.h
@@ -191,7 +191,8 @@ enum dentist_divider_range {
CLK_SR_DCN401(CLK0_CLK1_DFS_CNTL, CLK01, 0), \
CLK_SR_DCN401(CLK0_CLK2_DFS_CNTL, CLK01, 0), \
CLK_SR_DCN401(CLK0_CLK3_DFS_CNTL, CLK01, 0), \
- CLK_SR_DCN401(CLK0_CLK4_DFS_CNTL, CLK01, 0)
+ CLK_SR_DCN401(CLK0_CLK4_DFS_CNTL, CLK01, 0), \
+ CLK_SR_DCN401(CLK2_CLK2_DFS_CNTL, CLK20, 0)
#define CLK_COMMON_MASK_SH_LIST_DCN401(mask_sh) \
CLK_COMMON_MASK_SH_LIST_DCN321(mask_sh)
@@ -235,6 +236,7 @@ struct clk_mgr_registers {
uint32_t CLK1_CLK2_DFS_CNTL;
uint32_t CLK1_CLK3_DFS_CNTL;
uint32_t CLK1_CLK4_DFS_CNTL;
+ uint32_t CLK2_CLK2_DFS_CNTL;
uint32_t CLK1_CLK0_CURRENT_CNT;
uint32_t CLK1_CLK1_CURRENT_CNT;
diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/dccg.h b/drivers/gpu/drm/amd/display/dc/inc/hw/dccg.h
index 4fb1aacee894..e94e9ba60f55 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/hw/dccg.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/hw/dccg.h
@@ -211,11 +211,9 @@ struct dccg_funcs {
struct dccg *dccg,
enum streamclk_source src,
uint32_t otg_inst);
- void (*set_dto_dscclk)(
- struct dccg *dccg,
- uint32_t dsc_inst,
- bool enable);
+ void (*set_dto_dscclk)(struct dccg *dccg, uint32_t dsc_inst);
void (*set_ref_dscclk)(struct dccg *dccg, uint32_t dsc_inst);
+ void (*dccg_root_gate_disable_control)(struct dccg *dccg, uint32_t pipe_idx, uint32_t disable_clock_gating);
};
#endif //__DAL_DCCG_H__
diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/dchubbub.h b/drivers/gpu/drm/amd/display/dc/inc/hw/dchubbub.h
index dd2b2864876c..67c32401893e 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/hw/dchubbub.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/hw/dchubbub.h
@@ -227,6 +227,7 @@ struct hubbub_funcs {
void (*get_mall_en)(struct hubbub *hubbub, unsigned int *mall_in_use);
void (*program_det_segments)(struct hubbub *hubbub, int hubp_inst, unsigned det_buffer_size_seg);
void (*program_compbuf_segments)(struct hubbub *hubbub, unsigned compbuf_size_seg, bool safe_to_increase);
+ void (*wait_for_det_update)(struct hubbub *hubbub, int hubp_inst);
};
struct hubbub {
diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/hw_shared.h b/drivers/gpu/drm/amd/display/dc/inc/hw/hw_shared.h
index 27bba47186e9..41c76ba9ba56 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/hw/hw_shared.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/hw/hw_shared.h
@@ -217,12 +217,13 @@ enum optc_dsc_mode {
};
struct dc_bias_and_scale {
- uint16_t scale_red;
- uint16_t bias_red;
- uint16_t scale_green;
- uint16_t bias_green;
- uint16_t scale_blue;
- uint16_t bias_blue;
+ uint32_t scale_red;
+ uint32_t bias_red;
+ uint32_t scale_green;
+ uint32_t bias_green;
+ uint32_t scale_blue;
+ uint32_t bias_blue;
+ bool bias_and_scale_valid;
};
enum test_pattern_dyn_range {
diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/mem_input.h b/drivers/gpu/drm/amd/display/dc/inc/hw/mem_input.h
index 5f6c7daa14d9..a8b44f398ce6 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/hw/mem_input.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/hw/mem_input.h
@@ -63,7 +63,7 @@ union dcn_watermark_set {
struct dml2_dchub_watermark_regs b;
struct dml2_dchub_watermark_regs c;
struct dml2_dchub_watermark_regs d;
- } dcn4; //dcn4+
+ } dcn4x; //dcn4+
};
struct dce_watermarks {
diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/optc.h b/drivers/gpu/drm/amd/display/dc/inc/hw/optc.h
index 287bf8a90ff6..03cbcbb36f1c 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/hw/optc.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/hw/optc.h
@@ -65,6 +65,7 @@ struct optc {
int vupdate_offset;
int vupdate_width;
int vready_offset;
+ int pstate_keepout;
struct dc_crtc_timing orginal_patched_timing;
enum signal_type signal;
};
@@ -110,6 +111,7 @@ void optc1_program_timing(struct timing_generator *optc,
int vstartup_start,
int vupdate_offset,
int vupdate_width,
+ int pstate_keepout,
const enum signal_type signal,
bool use_vbios);
@@ -127,7 +129,8 @@ void optc1_program_global_sync(struct timing_generator *optc,
int vready_offset,
int vstartup_start,
int vupdate_offset,
- int vupdate_width);
+ int vupdate_width,
+ int pstate_keepout);
bool optc1_disable_crtc(struct timing_generator *optc);
diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/stream_encoder.h b/drivers/gpu/drm/amd/display/dc/inc/hw/stream_encoder.h
index e5e11c84e9e2..fe7f3137f228 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/hw/stream_encoder.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/hw/stream_encoder.h
@@ -271,7 +271,9 @@ struct stream_encoder_funcs {
struct stream_encoder *enc, unsigned int pix_per_container);
void (*enable_fifo)(struct stream_encoder *enc);
void (*disable_fifo)(struct stream_encoder *enc);
+ bool (*is_fifo_enabled)(struct stream_encoder *enc);
void (*map_stream_to_link)(struct stream_encoder *enc, uint32_t stream_enc_inst, uint32_t link_enc_inst);
+ uint32_t (*get_pixels_per_cycle)(struct stream_encoder *enc);
};
struct hpo_dp_stream_encoder_state {
diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/timing_generator.h b/drivers/gpu/drm/amd/display/dc/inc/hw/timing_generator.h
index 0f453452234c..3d4c8bd42b49 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/hw/timing_generator.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/hw/timing_generator.h
@@ -172,6 +172,7 @@ struct timing_generator_funcs {
int vstartup_start,
int vupdate_offset,
int vupdate_width,
+ int pstate_keepout,
const enum signal_type signal,
bool use_vbios
);
@@ -256,7 +257,8 @@ struct timing_generator_funcs {
int vready_offset,
int vstartup_start,
int vupdate_offset,
- int vupdate_width);
+ int vupdate_width,
+ int pstate_keepout);
void (*enable_optc_clock)(struct timing_generator *tg, bool enable);
void (*program_stereo)(struct timing_generator *tg,
const struct dc_crtc_timing *timing, struct crtc_stereo_flags *flags);
diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/transform.h b/drivers/gpu/drm/amd/display/dc/inc/hw/transform.h
index 28da1dddf0a0..45262cba675e 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/hw/transform.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/hw/transform.h
@@ -245,16 +245,6 @@ struct transform_funcs {
void (*set_cursor_attributes)(
struct transform *xfm_base,
const struct dc_cursor_attributes *attr);
-
- bool (*transform_program_blnd_lut)(
- struct transform *xfm,
- const struct pwl_params *params);
- bool (*transform_program_shaper_lut)(
- struct transform *xfm,
- const struct pwl_params *params);
- bool (*transform_program_3dlut)(
- struct transform *xfm,
- struct tetrahedral_params *params);
};
const uint16_t *get_filter_2tap_16p(void);
diff --git a/drivers/gpu/drm/amd/display/dc/inc/resource.h b/drivers/gpu/drm/amd/display/dc/inc/resource.h
index 96d40d33a1f9..cd1157d225ab 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/resource.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/resource.h
@@ -639,4 +639,11 @@ struct dscl_prog_data *resource_get_dscl_prog_data(struct pipe_ctx *pipe_ctx);
* @dml2_options: struct to hold callbacks
*/
void resource_init_common_dml2_callbacks(struct dc *dc, struct dml2_configuration_options *dml2_options);
+
+/*
+ *Calculate total DET allocated for all pipes for a given OTG_MASTER pipe
+ */
+int resource_calculate_det_for_stream(struct dc_state *state, struct pipe_ctx *otg_master);
+
+bool resource_is_hpo_acquired(struct dc_state *context);
#endif /* DRIVERS_GPU_DRM_AMD_DC_DEV_DC_INC_RESOURCE_H_ */
diff --git a/drivers/gpu/drm/amd/display/dc/link/accessories/link_dp_cts.c b/drivers/gpu/drm/amd/display/dc/link/accessories/link_dp_cts.c
index 555c1c484cfd..ff8fe1a94965 100644
--- a/drivers/gpu/drm/amd/display/dc/link/accessories/link_dp_cts.c
+++ b/drivers/gpu/drm/amd/display/dc/link/accessories/link_dp_cts.c
@@ -67,6 +67,8 @@ static void dp_retrain_link_dp_test(struct dc_link *link,
{
struct pipe_ctx *pipes[MAX_PIPES];
struct dc_state *state = link->dc->current_state;
+ bool was_hpo_acquired = resource_is_hpo_acquired(link->dc->current_state);
+ bool is_hpo_acquired;
uint8_t count;
int i;
@@ -83,6 +85,12 @@ static void dp_retrain_link_dp_test(struct dc_link *link,
pipes[i]);
}
+ if (link->dc->hwss.setup_hpo_hw_control) {
+ is_hpo_acquired = resource_is_hpo_acquired(state);
+ if (was_hpo_acquired != is_hpo_acquired)
+ link->dc->hwss.setup_hpo_hw_control(link->dc->hwseq, is_hpo_acquired);
+ }
+
for (i = count-1; i >= 0; i--)
link_set_dpms_on(state, pipes[i]);
}
@@ -804,8 +812,11 @@ bool dp_set_test_pattern(
break;
}
+ if (!pipe_ctx->stream)
+ return false;
+
if (pipe_ctx->stream_res.tg->funcs->lock_doublebuffer_enable) {
- if (pipe_ctx->stream && should_use_dmub_lock(pipe_ctx->stream->link)) {
+ if (should_use_dmub_lock(pipe_ctx->stream->link)) {
union dmub_hw_lock_flags hw_locks = { 0 };
struct dmub_hw_lock_inst_flags inst_flags = { 0 };
diff --git a/drivers/gpu/drm/amd/display/dc/link/hwss/link_hwss_dio.c b/drivers/gpu/drm/amd/display/dc/link/hwss/link_hwss_dio.c
index b76737b7b9e4..3e47a6735912 100644
--- a/drivers/gpu/drm/amd/display/dc/link/hwss/link_hwss_dio.c
+++ b/drivers/gpu/drm/amd/display/dc/link/hwss/link_hwss_dio.c
@@ -74,7 +74,10 @@ void reset_dio_stream_encoder(struct pipe_ctx *pipe_ctx)
struct link_encoder *link_enc = link_enc_cfg_get_link_enc(pipe_ctx->stream->link);
struct stream_encoder *stream_enc = pipe_ctx->stream_res.stream_enc;
- if (stream_enc && stream_enc->funcs->disable_fifo)
+ if (!stream_enc)
+ return;
+
+ if (stream_enc->funcs->disable_fifo)
stream_enc->funcs->disable_fifo(stream_enc);
if (stream_enc->funcs->set_input_mode)
stream_enc->funcs->set_input_mode(stream_enc, 0);
diff --git a/drivers/gpu/drm/amd/display/dc/link/hwss/link_hwss_dpia.c b/drivers/gpu/drm/amd/display/dc/link/hwss/link_hwss_dpia.c
index 46fb3649bc86..6499807af72a 100644
--- a/drivers/gpu/drm/amd/display/dc/link/hwss/link_hwss_dpia.c
+++ b/drivers/gpu/drm/amd/display/dc/link/hwss/link_hwss_dpia.c
@@ -50,8 +50,31 @@ static void update_dpia_stream_allocation_table(struct dc_link *link,
DC_LOG_MST("dpia : status[%d]: alloc_slots[%d]: used_slots[%d]\n",
status, mst_alloc_slots, prev_mst_slots_in_use);
- ASSERT(link_enc);
- link_enc->funcs->update_mst_stream_allocation_table(link_enc, table);
+ if (link_enc)
+ link_enc->funcs->update_mst_stream_allocation_table(link_enc, table);
+}
+
+static void set_dio_dpia_link_test_pattern(struct dc_link *link,
+ const struct link_resource *link_res,
+ struct encoder_set_dp_phy_pattern_param *tp_params)
+{
+ if (tp_params->dp_phy_pattern != DP_TEST_PATTERN_VIDEO_MODE)
+ return;
+
+ struct link_encoder *link_enc = link_enc_cfg_get_link_enc(link);
+
+ if (!link_enc)
+ return;
+
+ link_enc->funcs->dp_set_phy_pattern(link_enc, tp_params);
+ link->dc->link_srv->dp_trace_source_sequence(link, DPCD_SOURCE_SEQ_AFTER_SET_SOURCE_PATTERN);
+}
+
+static void set_dio_dpia_lane_settings(struct dc_link *link,
+ const struct link_resource *link_res,
+ const struct dc_link_settings *link_settings,
+ const struct dc_lane_settings lane_settings[LANE_COUNT_DP_MAX])
+{
}
static const struct link_hwss dpia_link_hwss = {
@@ -65,8 +88,8 @@ static const struct link_hwss dpia_link_hwss = {
.ext = {
.set_throttled_vcp_size = set_dio_throttled_vcp_size,
.enable_dp_link_output = enable_dio_dp_link_output,
- .set_dp_link_test_pattern = set_dio_dp_link_test_pattern,
- .set_dp_lane_settings = set_dio_dp_lane_settings,
+ .set_dp_link_test_pattern = set_dio_dpia_link_test_pattern,
+ .set_dp_lane_settings = set_dio_dpia_lane_settings,
.update_stream_allocation_table = update_dpia_stream_allocation_table,
},
};
diff --git a/drivers/gpu/drm/amd/display/dc/link/hwss/link_hwss_hpo_dp.c b/drivers/gpu/drm/amd/display/dc/link/hwss/link_hwss_hpo_dp.c
index e1257404357b..cec68c5dba13 100644
--- a/drivers/gpu/drm/amd/display/dc/link/hwss/link_hwss_hpo_dp.c
+++ b/drivers/gpu/drm/amd/display/dc/link/hwss/link_hwss_hpo_dp.c
@@ -28,6 +28,8 @@
#include "dccg.h"
#include "clk_mgr.h"
+#define DC_LOGGER link->ctx->logger
+
void set_hpo_dp_throttled_vcp_size(struct pipe_ctx *pipe_ctx,
struct fixed31_32 throttled_vcp_size)
{
@@ -108,6 +110,11 @@ void enable_hpo_dp_link_output(struct dc_link *link,
enum clock_source_id clock_source,
const struct dc_link_settings *link_settings)
{
+ if (!link_res->hpo_dp_link_enc) {
+ DC_LOG_ERROR("%s: invalid hpo_dp_link_enc\n", __func__);
+ return;
+ }
+
if (link->dc->res_pool->dccg->funcs->set_symclk32_le_root_clock_gating)
link->dc->res_pool->dccg->funcs->set_symclk32_le_root_clock_gating(
link->dc->res_pool->dccg,
@@ -124,6 +131,11 @@ void disable_hpo_dp_link_output(struct dc_link *link,
const struct link_resource *link_res,
enum signal_type signal)
{
+ if (!link_res->hpo_dp_link_enc) {
+ DC_LOG_ERROR("%s: invalid hpo_dp_link_enc\n", __func__);
+ return;
+ }
+
link_res->hpo_dp_link_enc->funcs->link_disable(link_res->hpo_dp_link_enc);
link_res->hpo_dp_link_enc->funcs->disable_link_phy(
link_res->hpo_dp_link_enc, signal);
diff --git a/drivers/gpu/drm/amd/display/dc/link/link_detection.c b/drivers/gpu/drm/amd/display/dc/link/link_detection.c
index bba644024780..d21ee9d12d26 100644
--- a/drivers/gpu/drm/amd/display/dc/link/link_detection.c
+++ b/drivers/gpu/drm/amd/display/dc/link/link_detection.c
@@ -863,7 +863,6 @@ static bool detect_link_and_local_sink(struct dc_link *link,
struct dc_sink *prev_sink = NULL;
struct dpcd_caps prev_dpcd_caps;
enum dc_connection_type new_connection_type = dc_connection_none;
- enum dc_connection_type pre_connection_type = link->type;
const uint32_t post_oui_delay = 30; // 30ms
DC_LOGGER_INIT(link->ctx->logger);
@@ -965,7 +964,6 @@ static bool detect_link_and_local_sink(struct dc_link *link,
}
if (!detect_dp(link, &sink_caps, reason)) {
- link->type = pre_connection_type;
if (prev_sink)
dc_sink_release(prev_sink);
@@ -1191,8 +1189,7 @@ static bool detect_link_and_local_sink(struct dc_link *link,
//sink only can use supported link rate table, we are foreced to enable it
if (link->reported_link_cap.link_rate == LINK_RATE_UNKNOWN)
link->panel_config.ilr.optimize_edp_link_rate = true;
- if (edp_is_ilr_optimization_enabled(link))
- link->reported_link_cap.link_rate = get_max_link_rate_from_ilr_table(link);
+ link->reported_link_cap.link_rate = get_max_edp_link_rate(link);
}
} else {
@@ -1299,8 +1296,7 @@ bool link_detect(struct dc_link *link, enum dc_detect_reason reason)
link->dpcd_caps.is_mst_capable)
is_delegated_to_mst_top_mgr = discover_dp_mst_topology(link, reason);
- if (is_local_sink_detect_success &&
- pre_link_type == dc_connection_mst_branch &&
+ if (pre_link_type == dc_connection_mst_branch &&
link->type != dc_connection_mst_branch)
is_delegated_to_mst_top_mgr = link_reset_cur_dp_mst_topology(link);
diff --git a/drivers/gpu/drm/amd/display/dc/link/link_dpms.c b/drivers/gpu/drm/amd/display/dc/link/link_dpms.c
index 65607589495f..c4e03482ba9a 100644
--- a/drivers/gpu/drm/amd/display/dc/link/link_dpms.c
+++ b/drivers/gpu/drm/amd/display/dc/link/link_dpms.c
@@ -817,17 +817,17 @@ void link_set_dsc_on_stream(struct pipe_ctx *pipe_ctx, bool enable)
ASSERT(dsc_cfg.dc_dsc_cfg.num_slices_h % opp_cnt == 0);
dsc_cfg.dc_dsc_cfg.num_slices_h /= opp_cnt;
+ if (should_use_dto_dscclk)
+ dccg->funcs->set_dto_dscclk(dccg, dsc->inst);
dsc->funcs->dsc_set_config(dsc, &dsc_cfg, &dsc_optc_cfg);
dsc->funcs->dsc_enable(dsc, pipe_ctx->stream_res.opp->inst);
- if (should_use_dto_dscclk)
- dccg->funcs->set_dto_dscclk(dccg, dsc->inst, true);
for (odm_pipe = pipe_ctx->next_odm_pipe; odm_pipe; odm_pipe = odm_pipe->next_odm_pipe) {
struct display_stream_compressor *odm_dsc = odm_pipe->stream_res.dsc;
+ if (should_use_dto_dscclk)
+ dccg->funcs->set_dto_dscclk(dccg, odm_dsc->inst);
odm_dsc->funcs->dsc_set_config(odm_dsc, &dsc_cfg, &dsc_optc_cfg);
odm_dsc->funcs->dsc_enable(odm_dsc, odm_pipe->stream_res.opp->inst);
- if (should_use_dto_dscclk)
- dccg->funcs->set_dto_dscclk(dccg, odm_dsc->inst, true);
}
dsc_cfg.dc_dsc_cfg.num_slices_h *= opp_cnt;
dsc_cfg.pic_width *= opp_cnt;
@@ -879,19 +879,32 @@ void link_set_dsc_on_stream(struct pipe_ctx *pipe_ctx, bool enable)
}
/* disable DSC block */
- if (dccg->funcs->set_dto_dscclk)
- dccg->funcs->set_dto_dscclk(dccg, pipe_ctx->stream_res.dsc->inst, false);
- pipe_ctx->stream_res.dsc->funcs->dsc_disconnect(pipe_ctx->stream_res.dsc);
- if (dccg->funcs->set_ref_dscclk)
- dccg->funcs->set_ref_dscclk(dccg, pipe_ctx->stream_res.dsc->inst);
- pipe_ctx->stream_res.dsc->funcs->dsc_disable(pipe_ctx->stream_res.dsc);
- for (odm_pipe = pipe_ctx->next_odm_pipe; odm_pipe; odm_pipe = odm_pipe->next_odm_pipe) {
- if (dccg->funcs->set_dto_dscclk)
- dccg->funcs->set_dto_dscclk(dccg, odm_pipe->stream_res.dsc->inst, false);
+ for (odm_pipe = pipe_ctx; odm_pipe; odm_pipe = odm_pipe->next_odm_pipe) {
odm_pipe->stream_res.dsc->funcs->dsc_disconnect(odm_pipe->stream_res.dsc);
+ /*
+ * TODO - dsc_disconnect is a double buffered register.
+ * by the time we call dsc_disable, dsc may still remain
+ * connected to OPP. In this case OPTC will no longer
+ * get correct pixel data because DSCC is off. However
+ * we also can't wait for the disconnect pending
+ * complete, because this function can be called
+ * with/without OTG master lock acquired. When the lock
+ * is acquired we will never get pending complete until
+ * we release the lock later. So there is no easy way to
+ * solve this problem especially when the lock is
+ * acquired. DSC is a front end hw block it should be
+ * programmed as part of front end sequence, where the
+ * commit sequence without lock and update sequence
+ * with lock are completely separated. However because
+ * we are programming dsc as part of back end link
+ * programming sequence, we don't know if front end OPTC
+ * master lock is acquired. The back end should be
+ * agnostic to front end lock. DSC programming shouldn't
+ * belong to this sequence.
+ */
+ odm_pipe->stream_res.dsc->funcs->dsc_disable(odm_pipe->stream_res.dsc);
if (dccg->funcs->set_ref_dscclk)
dccg->funcs->set_ref_dscclk(dccg, odm_pipe->stream_res.dsc->inst);
- odm_pipe->stream_res.dsc->funcs->dsc_disable(odm_pipe->stream_res.dsc);
}
}
}
@@ -2345,7 +2358,7 @@ void link_set_dpms_off(struct pipe_ctx *pipe_ctx)
if (pipe_ctx->stream->signal == SIGNAL_TYPE_DISPLAY_PORT_MST)
deallocate_mst_payload(pipe_ctx);
- else if (pipe_ctx->stream->signal == SIGNAL_TYPE_DISPLAY_PORT &&
+ else if (dc_is_dp_sst_signal(pipe_ctx->stream->signal) &&
dp_is_128b_132b_signal(pipe_ctx))
update_sst_payload(pipe_ctx, false);
@@ -2578,7 +2591,7 @@ void link_set_dpms_on(
if (pipe_ctx->stream->signal == SIGNAL_TYPE_DISPLAY_PORT_MST)
allocate_mst_payload(pipe_ctx);
- else if (pipe_ctx->stream->signal == SIGNAL_TYPE_DISPLAY_PORT &&
+ else if (dc_is_dp_sst_signal(pipe_ctx->stream->signal) &&
dp_is_128b_132b_signal(pipe_ctx))
update_sst_payload(pipe_ctx, true);
diff --git a/drivers/gpu/drm/amd/display/dc/link/link_factory.c b/drivers/gpu/drm/amd/display/dc/link/link_factory.c
index 8246006857b3..5e1b5ab9fbc6 100644
--- a/drivers/gpu/drm/amd/display/dc/link/link_factory.c
+++ b/drivers/gpu/drm/amd/display/dc/link/link_factory.c
@@ -385,7 +385,7 @@ static void link_destruct(struct dc_link *link)
if (link->panel_cntl)
link->panel_cntl->funcs->destroy(&link->panel_cntl);
- if (link->link_enc) {
+ if (link->link_enc && !link->is_dig_mapping_flexible) {
/* Update link encoder resource tracking variables. These are used for
* the dynamic assignment of link encoders to streams. Virtual links
* are not assigned encoder resources on creation.
@@ -524,6 +524,7 @@ static bool construct_phy(struct dc_link *link,
link->connector_signal = SIGNAL_TYPE_DVI_DUAL_LINK;
break;
case CONNECTOR_ID_DISPLAY_PORT:
+ case CONNECTOR_ID_MXM:
case CONNECTOR_ID_USBC:
link->connector_signal = SIGNAL_TYPE_DISPLAY_PORT;
diff --git a/drivers/gpu/drm/amd/display/dc/link/link_validation.c b/drivers/gpu/drm/amd/display/dc/link/link_validation.c
index 1aed55b0ab6a..60f15a9ba7a5 100644
--- a/drivers/gpu/drm/amd/display/dc/link/link_validation.c
+++ b/drivers/gpu/drm/amd/display/dc/link/link_validation.c
@@ -287,6 +287,13 @@ static bool dp_validate_mode_timing(
req_bw = dc_bandwidth_in_kbps_from_timing(timing, dc_link_get_highest_encoding_format(link));
max_bw = dp_link_bandwidth_kbps(link, link_setting);
+ bool is_max_uncompressed_pixel_rate_exceeded = link->dpcd_caps.max_uncompressed_pixel_rate_cap.bits.valid &&
+ timing->pix_clk_100hz > link->dpcd_caps.max_uncompressed_pixel_rate_cap.bits.max_uncompressed_pixel_rate_cap * 10000;
+
+ if (is_max_uncompressed_pixel_rate_exceeded && !timing->flags.DSC) {
+ return false;
+ }
+
if (req_bw <= max_bw) {
/* remember the biggest mode here, during
* initial link training (to get
diff --git a/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_capability.c b/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_capability.c
index 46bb7a855bc2..d78c8ec4de79 100644
--- a/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_capability.c
+++ b/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_capability.c
@@ -212,6 +212,13 @@ static enum dc_link_rate linkRateInKHzToLinkRateMultiplier(uint32_t link_rate_in
case 10000000:
link_rate = LINK_RATE_UHBR10; // UHBR10 - 10.0 Gbps/Lane
break;
+ case 13500000:
+ link_rate = LINK_RATE_UHBR13_5; // UHBR13.5 - 13.5 Gbps/Lane
+ break;
+ case 20000000:
+ link_rate = LINK_RATE_UHBR20; // UHBR20 - 20.0 Gbps/Lane
+ break;
+
default:
link_rate = LINK_RATE_UNKNOWN;
break;
@@ -541,6 +548,23 @@ static enum dc_link_rate increase_link_rate(struct dc_link *link,
}
}
+static void increase_edp_link_rate(struct dc_link *link,
+ struct dc_link_settings *current_link_setting)
+{
+ if (current_link_setting->use_link_rate_set) {
+ if (current_link_setting->link_rate_set < link->dpcd_caps.edp_supported_link_rates_count) {
+ current_link_setting->link_rate_set++;
+ current_link_setting->link_rate =
+ link->dpcd_caps.edp_supported_link_rates[current_link_setting->link_rate_set];
+ } else {
+ current_link_setting->use_link_rate_set = false;
+ current_link_setting->link_rate = LINK_RATE_UHBR10;
+ }
+ } else {
+ current_link_setting->link_rate = increase_link_rate(link, current_link_setting->link_rate);
+ }
+}
+
static bool decide_fallback_link_setting_max_bw_policy(
struct dc_link *link,
const struct dc_link_settings *max,
@@ -759,14 +783,7 @@ bool edp_decide_link_settings(struct dc_link *link,
increase_lane_count(
current_link_setting.lane_count);
} else {
- if (current_link_setting.link_rate_set < link->dpcd_caps.edp_supported_link_rates_count) {
- current_link_setting.link_rate_set++;
- current_link_setting.link_rate =
- link->dpcd_caps.edp_supported_link_rates[current_link_setting.link_rate_set];
- current_link_setting.lane_count =
- initial_link_setting.lane_count;
- } else
- break;
+ increase_edp_link_rate(link, &current_link_setting);
}
}
return false;
@@ -818,9 +835,7 @@ bool decide_edp_link_settings_with_dsc(struct dc_link *link,
if (policy) {
/* minimize lane */
if (current_link_setting.link_rate < max_link_rate) {
- current_link_setting.link_rate =
- increase_link_rate(link,
- current_link_setting.link_rate);
+ increase_edp_link_rate(link, &current_link_setting);
} else {
if (current_link_setting.lane_count <
link->verified_link_cap.lane_count) {
@@ -839,9 +854,7 @@ bool decide_edp_link_settings_with_dsc(struct dc_link *link,
increase_lane_count(
current_link_setting.lane_count);
} else {
- current_link_setting.link_rate =
- increase_link_rate(link,
- current_link_setting.link_rate);
+ increase_edp_link_rate(link, &current_link_setting);
current_link_setting.lane_count =
initial_link_setting.lane_count;
}
@@ -874,18 +887,15 @@ bool decide_edp_link_settings_with_dsc(struct dc_link *link,
}
if (policy) {
/* minimize lane */
- if (current_link_setting.link_rate_set <
- link->dpcd_caps.edp_supported_link_rates_count
- && current_link_setting.link_rate < max_link_rate) {
- current_link_setting.link_rate_set++;
- current_link_setting.link_rate =
- link->dpcd_caps.edp_supported_link_rates[current_link_setting.link_rate_set];
+ if (current_link_setting.link_rate < max_link_rate) {
+ increase_edp_link_rate(link, &current_link_setting);
} else {
if (current_link_setting.lane_count < link->verified_link_cap.lane_count) {
current_link_setting.lane_count =
increase_lane_count(
current_link_setting.lane_count);
current_link_setting.link_rate_set = initial_link_setting.link_rate_set;
+ current_link_setting.use_link_rate_set = initial_link_setting.use_link_rate_set;
current_link_setting.link_rate =
link->dpcd_caps.edp_supported_link_rates[current_link_setting.link_rate_set];
} else
@@ -899,13 +909,8 @@ bool decide_edp_link_settings_with_dsc(struct dc_link *link,
increase_lane_count(
current_link_setting.lane_count);
} else {
- if (current_link_setting.link_rate_set < link->dpcd_caps.edp_supported_link_rates_count) {
- current_link_setting.link_rate_set++;
- current_link_setting.link_rate =
- link->dpcd_caps.edp_supported_link_rates[current_link_setting.link_rate_set];
- current_link_setting.lane_count =
- initial_link_setting.lane_count;
- } else
+ increase_edp_link_rate(link, &current_link_setting);
+ if (current_link_setting.link_rate == LINK_RATE_UNKNOWN)
break;
}
}
@@ -1166,6 +1171,8 @@ static void get_active_converter_info(
link->dpcd_caps.dongle_caps.dp_hdmi_frl_max_link_bw_in_kbps = intersect_frl_link_bw_support(
link->dpcd_caps.dongle_caps.dp_hdmi_frl_max_link_bw_in_kbps,
hdmi_encoded_link_bw);
+ DC_LOG_DC("%s: pcon frl link bw = %u\n", __func__,
+ link->dpcd_caps.dongle_caps.dp_hdmi_frl_max_link_bw_in_kbps);
}
if (link->dpcd_caps.dongle_caps.dp_hdmi_frl_max_link_bw_in_kbps > 0)
@@ -1541,7 +1548,11 @@ enum dc_status dp_retrieve_lttpr_cap(struct dc_link *link)
* Override count to 1 if we receive a known bad count (0 or an invalid value) */
if ((link->chip_caps & EXT_DISPLAY_PATH_CAPS__DP_FIXED_VS_EN) &&
(dp_parse_lttpr_repeater_count(link->dpcd_caps.lttpr_caps.phy_repeater_cnt) == 0)) {
- ASSERT(0);
+ /* If you see this message consistently, either the host platform has FIXED_VS flag
+ * incorrectly configured or the sink device is returning an invalid count.
+ */
+ DC_LOG_ERROR("lttpr_caps phy_repeater_cnt is 0x%x, forcing it to 0x80.",
+ link->dpcd_caps.lttpr_caps.phy_repeater_cnt);
link->dpcd_caps.lttpr_caps.phy_repeater_cnt = 0x80;
DC_LOG_DC("lttpr_caps forced phy_repeater_cnt = %d\n", link->dpcd_caps.lttpr_caps.phy_repeater_cnt);
}
@@ -1931,6 +1942,11 @@ static bool retrieve_link_cap(struct dc_link *link)
DC_LOG_DP2("\tFEC aggregated error counters are supported");
}
+ core_link_read_dpcd(link,
+ DPCD_MAX_UNCOMPRESSED_PIXEL_RATE_CAP,
+ link->dpcd_caps.max_uncompressed_pixel_rate_cap.raw,
+ sizeof(link->dpcd_caps.max_uncompressed_pixel_rate_cap.raw));
+
retrieve_cable_id(link);
dpcd_write_cable_id_to_dprx(link);
@@ -2254,7 +2270,7 @@ bool dp_verify_link_cap_with_retries(
memset(&link->verified_link_cap, 0,
sizeof(struct dc_link_settings));
- if (!link_detect_connection_type(link, &type) || type == dc_connection_none) {
+ if (link->link_enc && (!link_detect_connection_type(link, &type) || type == dc_connection_none)) {
link->verified_link_cap = fail_safe_link_settings;
break;
} else if (dp_verify_link_cap(link, known_limit_link_setting, &fail_count)) {
diff --git a/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_training.c b/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_training.c
index 988999c44475..27b881f947e8 100644
--- a/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_training.c
+++ b/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_training.c
@@ -515,6 +515,41 @@ bool dp_is_interlane_aligned(union lane_align_status_updated align_status)
return align_status.bits.INTERLANE_ALIGN_DONE == 1;
}
+bool dp_check_interlane_aligned(union lane_align_status_updated align_status,
+ struct dc_link *link,
+ uint8_t retries)
+{
+ /* Take into consideration corner case for DP 1.4a LL Compliance CTS as USB4
+ * has to share encoders unlike DP and USBC
+ */
+ return (dp_is_interlane_aligned(align_status) ||
+ (link->skip_fallback_on_link_loss && retries));
+}
+
+uint32_t dp_get_eq_aux_rd_interval(
+ const struct dc_link *link,
+ const struct link_training_settings *lt_settings,
+ uint32_t offset,
+ uint8_t retries)
+{
+ if (link->ep_type == DISPLAY_ENDPOINT_USB4_DPIA) {
+ if (offset == 0 && retries == 1 && lt_settings->lttpr_mode == LTTPR_MODE_NON_TRANSPARENT)
+ return max(lt_settings->eq_pattern_time, (uint32_t) DPIA_CLK_SYNC_DELAY);
+ else
+ return dpia_get_eq_aux_rd_interval(link, lt_settings, offset);
+ } else if (is_repeater(lt_settings, offset))
+ return dp_translate_training_aux_read_interval(
+ link->dpcd_caps.lttpr_caps.aux_rd_interval[offset - 1]);
+ else
+ return lt_settings->eq_pattern_time;
+}
+
+bool dp_check_dpcd_reqeust_status(const struct dc_link *link,
+ enum dc_status status)
+{
+ return (status != DC_OK && link->ep_type == DISPLAY_ENDPOINT_USB4_DPIA);
+}
+
enum link_training_result dp_check_link_loss_status(
struct dc_link *link,
const struct link_training_settings *link_training_setting)
@@ -973,13 +1008,17 @@ void repeater_training_done(struct dc_link *link, uint32_t offset)
dpcd_pattern.v1_4.TRAINING_PATTERN_SET);
}
-static void dpcd_exit_training_mode(struct dc_link *link, enum dp_link_encoding encoding)
+static enum link_training_result dpcd_exit_training_mode(struct dc_link *link, enum dp_link_encoding encoding)
{
+ enum dc_status status;
uint8_t sink_status = 0;
uint8_t i;
/* clear training pattern set */
- dpcd_set_training_pattern(link, DP_TRAINING_PATTERN_VIDEOIDLE);
+ status = dpcd_set_training_pattern(link, DP_TRAINING_PATTERN_VIDEOIDLE);
+
+ if (dp_check_dpcd_reqeust_status(link, status))
+ return LINK_TRAINING_ABORT;
if (encoding == DP_128b_132b_ENCODING) {
/* poll for intra-hop disable */
@@ -990,6 +1029,8 @@ static void dpcd_exit_training_mode(struct dc_link *link, enum dp_link_encoding
fsleep(1000);
}
}
+
+ return LINK_TRAINING_SUCCESS;
}
enum dc_status dpcd_configure_channel_coding(struct dc_link *link,
@@ -1013,17 +1054,18 @@ enum dc_status dpcd_configure_channel_coding(struct dc_link *link,
return status;
}
-void dpcd_set_training_pattern(
+enum dc_status dpcd_set_training_pattern(
struct dc_link *link,
enum dc_dp_training_pattern training_pattern)
{
+ enum dc_status status;
union dpcd_training_pattern dpcd_pattern = {0};
dpcd_pattern.v1_4.TRAINING_PATTERN_SET =
dp_training_pattern_to_dpcd_training_pattern(
link, training_pattern);
- core_link_write_dpcd(
+ status = core_link_write_dpcd(
link,
DP_TRAINING_PATTERN_SET,
&dpcd_pattern.raw,
@@ -1033,6 +1075,8 @@ void dpcd_set_training_pattern(
__func__,
DP_TRAINING_PATTERN_SET,
dpcd_pattern.v1_4.TRAINING_PATTERN_SET);
+
+ return status;
}
enum dc_status dpcd_set_link_settings(
@@ -1185,6 +1229,13 @@ void dpcd_set_lt_pattern_and_lane_settings(
dpcd_lt_buffer[DP_TRAINING_PATTERN_SET - DP_TRAINING_PATTERN_SET]
= dpcd_pattern.raw;
+ if (link->ep_type == DISPLAY_ENDPOINT_USB4_DPIA)
+ dpia_set_tps_notification(
+ link,
+ lt_settings,
+ dpcd_pattern.v1_4.TRAINING_PATTERN_SET,
+ offset);
+
if (is_repeater(lt_settings, offset)) {
DC_LOG_HW_LINK_TRAINING("%s\n LTTPR Repeater ID: %d\n 0x%X pattern = %x\n",
__func__,
@@ -1455,7 +1506,8 @@ static enum link_training_result dp_transition_to_video_idle(
*/
if (link->connector_signal != SIGNAL_TYPE_EDP && status == LINK_TRAINING_SUCCESS) {
msleep(5);
- status = dp_check_link_loss_status(link, lt_settings);
+ if (!link->skip_fallback_on_link_loss)
+ status = dp_check_link_loss_status(link, lt_settings);
}
return status;
}
@@ -1521,7 +1573,9 @@ enum link_training_result dp_perform_link_training(
ASSERT(0);
/* exit training mode */
- dpcd_exit_training_mode(link, encoding);
+ if ((dpcd_exit_training_mode(link, encoding) != LINK_TRAINING_SUCCESS || status == LINK_TRAINING_ABORT) &&
+ link->ep_type == DISPLAY_ENDPOINT_USB4_DPIA)
+ dpia_training_abort(link, &lt_settings, 0);
/* switch to video idle */
if ((status == LINK_TRAINING_SUCCESS) || !skip_video_pattern)
@@ -1599,8 +1653,7 @@ bool perform_link_training_with_retries(
dp_perform_link_training_skip_aux(link, &pipe_ctx->link_res, &cur_link_settings);
return true;
} else {
- /** @todo Consolidate USB4 DP and DPx.x training. */
- if (link->ep_type == DISPLAY_ENDPOINT_USB4_DPIA) {
+ if (!link->dc->config.consolidated_dpia_dp_lt && link->ep_type == DISPLAY_ENDPOINT_USB4_DPIA) {
status = dpia_perform_link_training(
link,
&pipe_ctx->link_res,
@@ -1629,8 +1682,17 @@ bool perform_link_training_with_retries(
dp_trace_lt_total_count_increment(link, false);
dp_trace_lt_result_update(link, status, false);
dp_trace_set_lt_end_timestamp(link, false);
- if (status == LINK_TRAINING_SUCCESS && !is_link_bw_low)
+ if (status == LINK_TRAINING_SUCCESS && !is_link_bw_low) {
+ // Update verified link settings to current one
+ // Because DPIA LT might fallback to lower link setting.
+ if (link->ep_type == DISPLAY_ENDPOINT_USB4_DPIA &&
+ stream->signal == SIGNAL_TYPE_DISPLAY_PORT_MST) {
+ link->verified_link_cap.link_rate = link->cur_link_settings.link_rate;
+ link->verified_link_cap.lane_count = link->cur_link_settings.lane_count;
+ dm_helpers_dp_mst_update_branch_bandwidth(link->ctx, link);
+ }
return true;
+ }
}
fail_count++;
diff --git a/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_training.h b/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_training.h
index 851bd17317a0..0b18aa35c33c 100644
--- a/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_training.h
+++ b/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_training.h
@@ -55,7 +55,7 @@ void dp_set_hw_test_pattern(
uint8_t *custom_pattern,
uint32_t custom_pattern_size);
-void dpcd_set_training_pattern(
+enum dc_status dpcd_set_training_pattern(
struct dc_link *link,
enum dc_dp_training_pattern training_pattern);
@@ -182,4 +182,18 @@ uint32_t dp_translate_training_aux_read_interval(
uint8_t dp_get_nibble_at_index(const uint8_t *buf,
uint32_t index);
+
+bool dp_check_interlane_aligned(union lane_align_status_updated align_status,
+ struct dc_link *link,
+ uint8_t retries);
+
+uint32_t dp_get_eq_aux_rd_interval(
+ const struct dc_link *link,
+ const struct link_training_settings *lt_settings,
+ uint32_t offset,
+ uint8_t retries);
+
+bool dp_check_dpcd_reqeust_status(const struct dc_link *link,
+ enum dc_status status);
+
#endif /* __DC_LINK_DP_TRAINING_H__ */
diff --git a/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_training_8b_10b.c b/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_training_8b_10b.c
index 2b4c15b0b407..3bdce32a85e3 100644
--- a/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_training_8b_10b.c
+++ b/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_training_8b_10b.c
@@ -157,6 +157,7 @@ enum link_training_result perform_8b_10b_clock_recovery_sequence(
struct link_training_settings *lt_settings,
uint32_t offset)
{
+ enum dc_status status;
uint32_t retries_cr;
uint32_t retry_count;
uint32_t wait_time_microsec;
@@ -216,7 +217,7 @@ enum link_training_result perform_8b_10b_clock_recovery_sequence(
/* 4. Read lane status and requested drive
* settings as set by the sink
*/
- dp_get_lane_status_and_lane_adjust(
+ status = dp_get_lane_status_and_lane_adjust(
link,
lt_settings,
dpcd_lane_status,
@@ -224,6 +225,9 @@ enum link_training_result perform_8b_10b_clock_recovery_sequence(
dpcd_lane_adjust,
offset);
+ if (dp_check_dpcd_reqeust_status(link, status))
+ return LINK_TRAINING_ABORT;
+
/* 5. check CR done*/
if (dp_is_cr_done(lane_count, dpcd_lane_status)) {
DC_LOG_HW_LINK_TRAINING("%s: Clock recovery OK\n", __func__);
@@ -273,6 +277,7 @@ enum link_training_result perform_8b_10b_channel_equalization_sequence(
struct link_training_settings *lt_settings,
uint32_t offset)
{
+ enum dc_status status;
enum dc_dp_training_pattern tr_pattern;
uint32_t retries_ch_eq;
uint32_t wait_time_microsec;
@@ -308,12 +313,7 @@ enum link_training_result perform_8b_10b_channel_equalization_sequence(
dpcd_set_lane_settings(link, lt_settings, offset);
/* 3. wait for receiver to lock-on*/
- wait_time_microsec = lt_settings->eq_pattern_time;
-
- if (is_repeater(lt_settings, offset))
- wait_time_microsec =
- dp_translate_training_aux_read_interval(
- link->dpcd_caps.lttpr_caps.aux_rd_interval[offset - 1]);
+ wait_time_microsec = dp_get_eq_aux_rd_interval(link, lt_settings, offset, retries_ch_eq);
dp_wait_for_training_aux_rd_interval(
link,
@@ -322,7 +322,7 @@ enum link_training_result perform_8b_10b_channel_equalization_sequence(
/* 4. Read lane status and requested
* drive settings as set by the sink*/
- dp_get_lane_status_and_lane_adjust(
+ status = dp_get_lane_status_and_lane_adjust(
link,
lt_settings,
dpcd_lane_status,
@@ -330,6 +330,9 @@ enum link_training_result perform_8b_10b_channel_equalization_sequence(
dpcd_lane_adjust,
offset);
+ if (dp_check_dpcd_reqeust_status(link, status))
+ return LINK_TRAINING_ABORT;
+
/* 5. check CR done*/
if (!dp_is_cr_done(lane_count, dpcd_lane_status))
return dpcd_lane_status[0].bits.CR_DONE_0 ?
@@ -339,7 +342,7 @@ enum link_training_result perform_8b_10b_channel_equalization_sequence(
/* 6. check CHEQ done*/
if (dp_is_ch_eq_done(lane_count, dpcd_lane_status) &&
dp_is_symbol_locked(lane_count, dpcd_lane_status) &&
- dp_is_interlane_aligned(dpcd_lane_status_updated))
+ dp_check_interlane_aligned(dpcd_lane_status_updated, link, retries_ch_eq))
return LINK_TRAINING_SUCCESS;
/* 7. update VS/PE/PC2 in lt_settings*/
diff --git a/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_training_dpia.c b/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_training_dpia.c
index cd1975c03f38..39e4b7dc9588 100644
--- a/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_training_dpia.c
+++ b/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_training_dpia.c
@@ -43,9 +43,6 @@
#define DC_LOGGER \
link->ctx->logger
-/* The approximate time (us) it takes to transmit 9 USB4 DP clock sync packets. */
-#define DPIA_CLK_SYNC_DELAY 16000
-
/* Extend interval between training status checks for manual testing. */
#define DPIA_DEBUG_EXTENDED_AUX_RD_INTERVAL_US 60000000
@@ -566,28 +563,6 @@ static enum link_training_result dpia_training_cr_phase(
return result;
}
-/* Return status read interval during equalization phase. */
-static uint32_t dpia_get_eq_aux_rd_interval(
- const struct dc_link *link,
- const struct link_training_settings *lt_settings,
- uint32_t hop)
-{
- uint32_t wait_time_microsec;
-
- if (hop == DPRX)
- wait_time_microsec = lt_settings->eq_pattern_time;
- else
- wait_time_microsec =
- dp_translate_training_aux_read_interval(
- link->dpcd_caps.lttpr_caps.aux_rd_interval[hop - 1]);
-
- /* Check debug option for extending aux read interval. */
- if (link->dc->debug.dpia_debug.bits.extend_aux_rd_interval)
- wait_time_microsec = DPIA_DEBUG_EXTENDED_AUX_RD_INTERVAL_US;
-
- return wait_time_microsec;
-}
-
/* Execute equalization phase of link training for specified hop in display
* path in non-transparent mode:
* - driver issues both DPCD and SET_CONFIG transactions.
@@ -936,6 +911,22 @@ static enum link_training_result dpia_training_end(
return result;
}
+/* Return status read interval during equalization phase. */
+uint32_t dpia_get_eq_aux_rd_interval(
+ const struct dc_link *link,
+ const struct link_training_settings *lt_settings,
+ uint32_t hop)
+{
+ /* Check debug option for extending aux read interval. */
+ if (link->dc->debug.dpia_debug.bits.extend_aux_rd_interval)
+ return DPIA_DEBUG_EXTENDED_AUX_RD_INTERVAL_US;
+ else if (hop == DPRX)
+ return lt_settings->eq_pattern_time;
+ else
+ return dp_translate_training_aux_read_interval(
+ link->dpcd_caps.lttpr_caps.aux_rd_interval[hop - 1]);
+}
+
/* When aborting training of specified hop in display path, clean up by:
* - Attempting to clear DPCD TRAINING_PATTERN_SET, LINK_BW_SET and LANE_COUNT_SET.
* - Sending SET_CONFIG(SET_LINK) with lane count and link rate set to 0.
@@ -943,7 +934,7 @@ static enum link_training_result dpia_training_end(
* @param link DPIA link being trained.
* @param hop Hop in display path. DPRX = 0.
*/
-static void dpia_training_abort(
+void dpia_training_abort(
struct dc_link *link,
struct link_training_settings *lt_settings,
uint32_t hop)
@@ -968,7 +959,26 @@ static void dpia_training_abort(
core_link_write_dpcd(link, dpcd_tps_offset, &data, 1);
core_link_write_dpcd(link, DP_LINK_BW_SET, &data, 1);
core_link_write_dpcd(link, DP_LANE_COUNT_SET, &data, 1);
- core_link_send_set_config(link, DPIA_SET_CFG_SET_LINK, data);
+
+ if (!link->dc->config.consolidated_dpia_dp_lt)
+ core_link_send_set_config(link, DPIA_SET_CFG_SET_LINK, data);
+}
+
+void dpia_set_tps_notification(
+ struct dc_link *link,
+ const struct link_training_settings *lt_settings,
+ uint8_t pattern,
+ uint32_t hop)
+{
+ uint8_t repeater_cnt = 0; /* Number of hops/repeaters in display path. */
+
+ if (lt_settings->lttpr_mode != LTTPR_MODE_NON_TRANSPARENT || pattern == DPCD_TRAINING_PATTERN_VIDEOIDLE)
+ return;
+
+ repeater_cnt = dp_parse_lttpr_repeater_count(link->dpcd_caps.lttpr_caps.phy_repeater_cnt);
+
+ if (hop != repeater_cnt)
+ dc_process_dmub_dpia_set_tps_notification(link->ctx->dc, link->link_index, pattern);
}
enum link_training_result dpia_perform_link_training(
diff --git a/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_training_dpia.h b/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_training_dpia.h
index b39fb9faf1c2..9f4eceb494c2 100644
--- a/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_training_dpia.h
+++ b/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_training_dpia.h
@@ -28,6 +28,9 @@
#define __DC_LINK_DP_TRAINING_DPIA_H__
#include "link_dp_training.h"
+/* The approximate time (us) it takes to transmit 9 USB4 DP clock sync packets. */
+#define DPIA_CLK_SYNC_DELAY 16000
+
/* Train DP tunneling link for USB4 DPIA display endpoint.
* DPIA equivalent of dc_link_dp_perfrorm_link_training.
* Aborts link training upon detection of sink unplug.
@@ -38,4 +41,20 @@ enum link_training_result dpia_perform_link_training(
const struct dc_link_settings *link_setting,
bool skip_video_pattern);
+void dpia_training_abort(
+ struct dc_link *link,
+ struct link_training_settings *lt_settings,
+ uint32_t hop);
+
+uint32_t dpia_get_eq_aux_rd_interval(
+ const struct dc_link *link,
+ const struct link_training_settings *lt_settings,
+ uint32_t hop);
+
+void dpia_set_tps_notification(
+ struct dc_link *link,
+ const struct link_training_settings *lt_settings,
+ uint8_t pattern,
+ uint32_t offset);
+
#endif /* __DC_LINK_DP_TRAINING_DPIA_H__ */
diff --git a/drivers/gpu/drm/amd/display/dc/link/protocols/link_edp_panel_control.c b/drivers/gpu/drm/amd/display/dc/link/protocols/link_edp_panel_control.c
index bf820d2b4dc4..3aa05a2be6c0 100644
--- a/drivers/gpu/drm/amd/display/dc/link/protocols/link_edp_panel_control.c
+++ b/drivers/gpu/drm/amd/display/dc/link/protocols/link_edp_panel_control.c
@@ -305,16 +305,17 @@ bool edp_is_ilr_optimization_enabled(struct dc_link *link)
return true;
}
-enum dc_link_rate get_max_link_rate_from_ilr_table(struct dc_link *link)
+enum dc_link_rate get_max_edp_link_rate(struct dc_link *link)
{
- enum dc_link_rate link_rate = link->reported_link_cap.link_rate;
+ enum dc_link_rate max_ilr_rate = LINK_RATE_UNKNOWN;
+ enum dc_link_rate max_non_ilr_rate = dp_get_max_link_cap(link).link_rate;
for (int i = 0; i < link->dpcd_caps.edp_supported_link_rates_count; i++) {
- if (link_rate < link->dpcd_caps.edp_supported_link_rates[i])
- link_rate = link->dpcd_caps.edp_supported_link_rates[i];
+ if (max_ilr_rate < link->dpcd_caps.edp_supported_link_rates[i])
+ max_ilr_rate = link->dpcd_caps.edp_supported_link_rates[i];
}
- return link_rate;
+ return (max_ilr_rate > max_non_ilr_rate ? max_ilr_rate : max_non_ilr_rate);
}
bool edp_is_ilr_optimization_required(struct dc_link *link,
@@ -1167,6 +1168,9 @@ static void edp_set_assr_enable(const struct dc *pDC, struct dc_link *link,
link_enc_index = link->link_enc->transmitter - TRANSMITTER_UNIPHY_A;
if (link_res->hpo_dp_link_enc) {
+ if (link->wa_flags.disable_assr_for_uhbr)
+ return;
+
link_enc_index = link_res->hpo_dp_link_enc->inst;
use_hpo_dp_link_enc = true;
}
diff --git a/drivers/gpu/drm/amd/display/dc/link/protocols/link_edp_panel_control.h b/drivers/gpu/drm/amd/display/dc/link/protocols/link_edp_panel_control.h
index 8df8ac5bde5b..30dc8c24c008 100644
--- a/drivers/gpu/drm/amd/display/dc/link/protocols/link_edp_panel_control.h
+++ b/drivers/gpu/drm/amd/display/dc/link/protocols/link_edp_panel_control.h
@@ -69,7 +69,7 @@ bool edp_wait_for_t12(struct dc_link *link);
bool edp_is_ilr_optimization_required(struct dc_link *link,
struct dc_crtc_timing *crtc_timing);
bool edp_is_ilr_optimization_enabled(struct dc_link *link);
-enum dc_link_rate get_max_link_rate_from_ilr_table(struct dc_link *link);
+enum dc_link_rate get_max_edp_link_rate(struct dc_link *link);
bool edp_backlight_enable_aux(struct dc_link *link, bool enable);
void edp_add_delay_for_T9(struct dc_link *link);
bool edp_receiver_ready_T9(struct dc_link *link);
diff --git a/drivers/gpu/drm/amd/display/dc/mmhubbub/Makefile b/drivers/gpu/drm/amd/display/dc/mmhubbub/Makefile
index 505bc0517e08..eab196c57c6c 100644
--- a/drivers/gpu/drm/amd/display/dc/mmhubbub/Makefile
+++ b/drivers/gpu/drm/amd/display/dc/mmhubbub/Makefile
@@ -25,6 +25,15 @@
ifdef CONFIG_DRM_AMD_DC_FP
###############################################################################
+# DCN20
+###############################################################################
+MMHUBBUB_DCN20 = dcn20_mmhubbub.o
+
+AMD_DAL_MMHUBBUB_DCN20 = $(addprefix $(AMDDALPATH)/dc/mmhubbub/dcn20/,$(MMHUBBUB_DCN20))
+
+AMD_DISPLAY_FILES += $(AMD_DAL_MMHUBBUB_DCN20)
+
+###############################################################################
# DCN32
###############################################################################
MMHUBBUB_DCN32 = dcn32_mmhubbub.o
diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_mmhubbub.c b/drivers/gpu/drm/amd/display/dc/mmhubbub/dcn20/dcn20_mmhubbub.c
index 259a98e4ee2c..259a98e4ee2c 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_mmhubbub.c
+++ b/drivers/gpu/drm/amd/display/dc/mmhubbub/dcn20/dcn20_mmhubbub.c
diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_mmhubbub.h b/drivers/gpu/drm/amd/display/dc/mmhubbub/dcn20/dcn20_mmhubbub.h
index 5ab32aa51e13..5ab32aa51e13 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_mmhubbub.h
+++ b/drivers/gpu/drm/amd/display/dc/mmhubbub/dcn20/dcn20_mmhubbub.h
diff --git a/drivers/gpu/drm/amd/display/dc/mpc/Makefile b/drivers/gpu/drm/amd/display/dc/mpc/Makefile
index 7f7458c07e2a..5402c3529f5e 100644
--- a/drivers/gpu/drm/amd/display/dc/mpc/Makefile
+++ b/drivers/gpu/drm/amd/display/dc/mpc/Makefile
@@ -25,6 +25,33 @@
ifdef CONFIG_DRM_AMD_DC_FP
###############################################################################
+# DCN10
+###############################################################################
+MPC_DCN10 = dcn10_mpc.o
+
+AMD_DAL_MPC_DCN10 = $(addprefix $(AMDDALPATH)/dc/mpc/dcn10/,$(MPC_DCN10))
+
+AMD_DISPLAY_FILES += $(AMD_DAL_MPC_DCN10)
+
+###############################################################################
+# DCN20
+###############################################################################
+MPC_DCN20 = dcn20_mpc.o
+
+AMD_DAL_MPC_DCN20 = $(addprefix $(AMDDALPATH)/dc/mpc/dcn20/,$(MPC_DCN20))
+
+AMD_DISPLAY_FILES += $(AMD_DAL_MPC_DCN20)
+
+###############################################################################
+# DCN30
+###############################################################################
+MPC_DCN30 = dcn30_mpc.o
+
+AMD_DAL_MPC_DCN30 = $(addprefix $(AMDDALPATH)/dc/mpc/dcn30/,$(MPC_DCN30))
+
+AMD_DISPLAY_FILES += $(AMD_DAL_MPC_DCN30)
+
+###############################################################################
# DCN32
###############################################################################
MPC_DCN32 = dcn32_mpc.o
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_mpc.c b/drivers/gpu/drm/amd/display/dc/mpc/dcn10/dcn10_mpc.c
index f2f55565e98a..f2f55565e98a 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_mpc.c
+++ b/drivers/gpu/drm/amd/display/dc/mpc/dcn10/dcn10_mpc.c
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_mpc.h b/drivers/gpu/drm/amd/display/dc/mpc/dcn10/dcn10_mpc.h
index dbfffc6383dc..dbfffc6383dc 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_mpc.h
+++ b/drivers/gpu/drm/amd/display/dc/mpc/dcn10/dcn10_mpc.h
diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_mpc.c b/drivers/gpu/drm/amd/display/dc/mpc/dcn20/dcn20_mpc.c
index ea73473b970a..ea73473b970a 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_mpc.c
+++ b/drivers/gpu/drm/amd/display/dc/mpc/dcn20/dcn20_mpc.c
diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_mpc.h b/drivers/gpu/drm/amd/display/dc/mpc/dcn20/dcn20_mpc.h
index 496658f420db..496658f420db 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_mpc.h
+++ b/drivers/gpu/drm/amd/display/dc/mpc/dcn20/dcn20_mpc.h
diff --git a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_mpc.c b/drivers/gpu/drm/amd/display/dc/mpc/dcn30/dcn30_mpc.c
index 3aeb85ec40b0..fe26fde12eeb 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_mpc.c
+++ b/drivers/gpu/drm/amd/display/dc/mpc/dcn30/dcn30_mpc.c
@@ -25,7 +25,7 @@
#include "reg_helper.h"
#include "dcn30_mpc.h"
-#include "dcn30_cm_common.h"
+#include "dcn30/dcn30_cm_common.h"
#include "basics/conversion.h"
#include "dcn10/dcn10_cm_common.h"
#include "dc.h"
diff --git a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_mpc.h b/drivers/gpu/drm/amd/display/dc/mpc/dcn30/dcn30_mpc.h
index ce93003dae01..ce93003dae01 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_mpc.h
+++ b/drivers/gpu/drm/amd/display/dc/mpc/dcn30/dcn30_mpc.h
diff --git a/drivers/gpu/drm/amd/display/dc/opp/Makefile b/drivers/gpu/drm/amd/display/dc/opp/Makefile
index fbfb3c3ad819..1be76754db30 100644
--- a/drivers/gpu/drm/amd/display/dc/opp/Makefile
+++ b/drivers/gpu/drm/amd/display/dc/opp/Makefile
@@ -25,6 +25,22 @@
ifdef CONFIG_DRM_AMD_DC_FP
###############################################################################
+# DCN10
+###############################################################################
+OPP_DCN10 = dcn10_opp.o
+
+AMD_DAL_OPP_DCN10 = $(addprefix $(AMDDALPATH)/dc/opp/dcn10/,$(OPP_DCN10))
+
+AMD_DISPLAY_FILES += $(AMD_DAL_OPP_DCN10)
+###############################################################################
+# DCN20
+###############################################################################
+OPP_DCN20 = dcn20_opp.o
+
+AMD_DAL_OPP_DCN20 = $(addprefix $(AMDDALPATH)/dc/opp/dcn20/,$(OPP_DCN20))
+
+AMD_DISPLAY_FILES += $(AMD_DAL_OPP_DCN20)
+###############################################################################
# DCN35
###############################################################################
OPP_DCN35 = dcn35_opp.o
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_opp.c b/drivers/gpu/drm/amd/display/dc/opp/dcn10/dcn10_opp.c
index 71e9288d60ed..71e9288d60ed 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_opp.c
+++ b/drivers/gpu/drm/amd/display/dc/opp/dcn10/dcn10_opp.c
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_opp.h b/drivers/gpu/drm/amd/display/dc/opp/dcn10/dcn10_opp.h
index c87de68a509e..c87de68a509e 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_opp.h
+++ b/drivers/gpu/drm/amd/display/dc/opp/dcn10/dcn10_opp.h
diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_opp.c b/drivers/gpu/drm/amd/display/dc/opp/dcn20/dcn20_opp.c
index f5fe0cac7cb0..f5fe0cac7cb0 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_opp.c
+++ b/drivers/gpu/drm/amd/display/dc/opp/dcn20/dcn20_opp.c
diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_opp.h b/drivers/gpu/drm/amd/display/dc/opp/dcn20/dcn20_opp.h
index 34936e6c49f3..34936e6c49f3 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_opp.h
+++ b/drivers/gpu/drm/amd/display/dc/opp/dcn20/dcn20_opp.h
diff --git a/drivers/gpu/drm/amd/display/dc/optc/dcn10/dcn10_optc.c b/drivers/gpu/drm/amd/display/dc/optc/dcn10/dcn10_optc.c
index 94427875bcdd..097d06023e64 100644
--- a/drivers/gpu/drm/amd/display/dc/optc/dcn10/dcn10_optc.c
+++ b/drivers/gpu/drm/amd/display/dc/optc/dcn10/dcn10_optc.c
@@ -65,7 +65,8 @@ void optc1_program_global_sync(
int vready_offset,
int vstartup_start,
int vupdate_offset,
- int vupdate_width)
+ int vupdate_width,
+ int pstate_keepout)
{
struct optc *optc1 = DCN10TG_FROM_TG(optc);
@@ -73,6 +74,7 @@ void optc1_program_global_sync(
optc1->vstartup_start = vstartup_start;
optc1->vupdate_offset = vupdate_offset;
optc1->vupdate_width = vupdate_width;
+ optc1->pstate_keepout = pstate_keepout;
if (optc1->vstartup_start == 0) {
BREAK_TO_DEBUGGER();
@@ -146,6 +148,7 @@ void optc1_setup_vertical_interrupt2(
* @vstartup_start: Vstartup period.
* @vupdate_offset: Vupdate starting position.
* @vupdate_width: Vupdate duration.
+ * @pstate_keepout: determines low power mode timing during refresh
* @signal: DC signal types.
* @use_vbios: to program timings from BIOS command table.
*
@@ -157,6 +160,7 @@ void optc1_program_timing(
int vstartup_start,
int vupdate_offset,
int vupdate_width,
+ int pstate_keepout,
const enum signal_type signal,
bool use_vbios)
{
@@ -177,6 +181,7 @@ void optc1_program_timing(
optc1->vstartup_start = vstartup_start;
optc1->vupdate_offset = vupdate_offset;
optc1->vupdate_width = vupdate_width;
+ optc1->pstate_keepout = pstate_keepout;
patched_crtc_timing = *dc_crtc_timing;
apply_front_porch_workaround(&patched_crtc_timing);
optc1->orginal_patched_timing = patched_crtc_timing;
@@ -282,7 +287,8 @@ void optc1_program_timing(
vready_offset,
vstartup_start,
vupdate_offset,
- vupdate_width);
+ vupdate_width,
+ pstate_keepout);
optc->funcs->set_vtg_params(optc, dc_crtc_timing, true);
diff --git a/drivers/gpu/drm/amd/display/dc/optc/dcn10/dcn10_optc.h b/drivers/gpu/drm/amd/display/dc/optc/dcn10/dcn10_optc.h
index 369a13244e5e..b7a57f98553d 100644
--- a/drivers/gpu/drm/amd/display/dc/optc/dcn10/dcn10_optc.h
+++ b/drivers/gpu/drm/amd/display/dc/optc/dcn10/dcn10_optc.h
@@ -201,6 +201,7 @@ struct dcn_optc_registers {
uint32_t OTG_CRC1_WINDOWB_Y_CONTROL_READBACK;
uint32_t OPTC_CLOCK_CONTROL;
uint32_t OPTC_WIDTH_CONTROL2;
+ uint32_t OTG_PSTATE_REGISTER;
};
#define TG_COMMON_MASK_SH_LIST_DCN(mask_sh)\
@@ -590,7 +591,11 @@ struct dcn_optc_registers {
type OTG_V_COUNT_STOP_TIMER;
#define TG_REG_FIELD_LIST_DCN401(type) \
- type OPTC_SEGMENT_WIDTH_LAST;
+ type OPTC_SEGMENT_WIDTH_LAST;\
+ type OTG_PSTATE_KEEPOUT_START;\
+ type OTG_PSTATE_EXTEND;\
+ type OTG_UNBLANK;\
+ type OTG_PSTATE_ALLOW_WIDTH_MIN;
struct dcn_optc_shift {
diff --git a/drivers/gpu/drm/amd/display/dc/optc/dcn31/dcn31_optc.c b/drivers/gpu/drm/amd/display/dc/optc/dcn31/dcn31_optc.c
index 6bbbf313b2bb..4b6446ed4ce4 100644
--- a/drivers/gpu/drm/amd/display/dc/optc/dcn31/dcn31_optc.c
+++ b/drivers/gpu/drm/amd/display/dc/optc/dcn31/dcn31_optc.c
@@ -149,7 +149,9 @@ static bool optc31_disable_crtc(struct timing_generator *optc)
return true;
}
-
+/*
+ * Immediate_Disable_Crtc - this is to temp disable Timing generator without reset ODM.
+ */
bool optc31_immediate_disable_crtc(struct timing_generator *optc)
{
struct optc *optc1 = DCN10TG_FROM_TG(optc);
@@ -162,10 +164,12 @@ bool optc31_immediate_disable_crtc(struct timing_generator *optc)
VTG0_ENABLE, 0);
/* CRTC disabled, so disable clock. */
- REG_WAIT(OTG_CLOCK_CONTROL,
+ if (optc->ctx->dce_environment != DCE_ENV_DIAG)
+ REG_WAIT(OTG_CLOCK_CONTROL,
OTG_BUSY, 0,
1, 100000);
+
/* clear the false state */
optc1_clear_optc_underflow(optc);
diff --git a/drivers/gpu/drm/amd/display/dc/optc/dcn401/dcn401_optc.c b/drivers/gpu/drm/amd/display/dc/optc/dcn401/dcn401_optc.c
index 9f5c2efa7560..a5d6a7dca554 100644
--- a/drivers/gpu/drm/amd/display/dc/optc/dcn401/dcn401_optc.c
+++ b/drivers/gpu/drm/amd/display/dc/optc/dcn401/dcn401_optc.c
@@ -396,13 +396,47 @@ void optc401_set_vtotal_min_max(struct timing_generator *optc, int vtotal_min, i
}
}
+static void optc401_program_global_sync(
+ struct timing_generator *optc,
+ int vready_offset,
+ int vstartup_start,
+ int vupdate_offset,
+ int vupdate_width,
+ int pstate_keepout)
+{
+ struct optc *optc1 = DCN10TG_FROM_TG(optc);
+
+ optc1->vready_offset = vready_offset;
+ optc1->vstartup_start = vstartup_start;
+ optc1->vupdate_offset = vupdate_offset;
+ optc1->vupdate_width = vupdate_width;
+ optc1->pstate_keepout = pstate_keepout;
+
+ if (optc1->vstartup_start == 0) {
+ BREAK_TO_DEBUGGER();
+ return;
+ }
+
+ REG_SET(OTG_VSTARTUP_PARAM, 0,
+ VSTARTUP_START, optc1->vstartup_start);
+
+ REG_SET_2(OTG_VUPDATE_PARAM, 0,
+ VUPDATE_OFFSET, optc1->vupdate_offset,
+ VUPDATE_WIDTH, optc1->vupdate_width);
+
+ REG_SET(OTG_VREADY_PARAM, 0,
+ VREADY_OFFSET, optc1->vready_offset);
+
+ REG_UPDATE(OTG_PSTATE_REGISTER, OTG_PSTATE_KEEPOUT_START, pstate_keepout);
+}
+
static struct timing_generator_funcs dcn401_tg_funcs = {
.validate_timing = optc1_validate_timing,
.program_timing = optc1_program_timing,
.setup_vertical_interrupt0 = optc1_setup_vertical_interrupt0,
.setup_vertical_interrupt1 = optc1_setup_vertical_interrupt1,
.setup_vertical_interrupt2 = optc1_setup_vertical_interrupt2,
- .program_global_sync = optc1_program_global_sync,
+ .program_global_sync = optc401_program_global_sync,
.enable_crtc = optc401_enable_crtc,
.disable_crtc = optc401_disable_crtc,
.phantom_crtc_post_enable = optc401_phantom_crtc_post_enable,
diff --git a/drivers/gpu/drm/amd/display/dc/optc/dcn401/dcn401_optc.h b/drivers/gpu/drm/amd/display/dc/optc/dcn401/dcn401_optc.h
index 3114ecef332a..bb13a645802d 100644
--- a/drivers/gpu/drm/amd/display/dc/optc/dcn401/dcn401_optc.h
+++ b/drivers/gpu/drm/amd/display/dc/optc/dcn401/dcn401_optc.h
@@ -155,7 +155,11 @@
SF(OTG0_OTG_H_TIMING_CNTL, OTG_H_TIMING_DIV_MODE, mask_sh),\
SF(OTG0_OTG_H_TIMING_CNTL, OTG_H_TIMING_DIV_MODE_MANUAL, mask_sh),\
SF(OTG0_OTG_DOUBLE_BUFFER_CONTROL, OTG_DRR_TIMING_DBUF_UPDATE_MODE, mask_sh),\
- SF(OTG0_OTG_DRR_CONTROL, OTG_V_TOTAL_LAST_USED_BY_DRR, mask_sh)
+ SF(OTG0_OTG_DRR_CONTROL, OTG_V_TOTAL_LAST_USED_BY_DRR, mask_sh),\
+ SF(OTG0_OTG_PSTATE_REGISTER, OTG_PSTATE_KEEPOUT_START, mask_sh),\
+ SF(OTG0_OTG_PSTATE_REGISTER, OTG_PSTATE_EXTEND, mask_sh),\
+ SF(OTG0_OTG_PSTATE_REGISTER, OTG_UNBLANK, mask_sh),\
+ SF(OTG0_OTG_PSTATE_REGISTER, OTG_PSTATE_ALLOW_WIDTH_MIN, mask_sh)
void dcn401_timing_generator_init(struct optc *optc1);
diff --git a/drivers/gpu/drm/amd/display/dc/resource/Makefile b/drivers/gpu/drm/amd/display/dc/resource/Makefile
index 4860bb2531a1..09320344d8e9 100644
--- a/drivers/gpu/drm/amd/display/dc/resource/Makefile
+++ b/drivers/gpu/drm/amd/display/dc/resource/Makefile
@@ -198,8 +198,6 @@ AMD_DISPLAY_FILES += $(AMD_DAL_RESOURCE_DCN351)
###############################################################################
-###############################################################################
-
RESOURCE_DCN401 = dcn401_resource.o
AMD_DAL_RESOURCE_DCN401 = $(addprefix $(AMDDALPATH)/dc/resource/dcn401/,$(RESOURCE_DCN401))
diff --git a/drivers/gpu/drm/amd/display/dc/resource/dce110/dce110_resource.c b/drivers/gpu/drm/amd/display/dc/resource/dce110/dce110_resource.c
index fe518fd27b08..91da5cf85b69 100644
--- a/drivers/gpu/drm/amd/display/dc/resource/dce110/dce110_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/resource/dce110/dce110_resource.c
@@ -1163,6 +1163,7 @@ static struct pipe_ctx *dce110_acquire_underlay(
0,
0,
0,
+ 0,
pipe_ctx->stream->signal,
false);
diff --git a/drivers/gpu/drm/amd/display/dc/resource/dce112/dce112_resource.c b/drivers/gpu/drm/amd/display/dc/resource/dce112/dce112_resource.c
index 88afb2a30eef..162856c523e4 100644
--- a/drivers/gpu/drm/amd/display/dc/resource/dce112/dce112_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/resource/dce112/dce112_resource.c
@@ -1067,7 +1067,10 @@ static void bw_calcs_data_update_from_pplib(struct dc *dc)
struct dm_pp_clock_levels clks = {0};
int memory_type_multiplier = MEMORY_TYPE_MULTIPLIER_CZ;
- if (dc->bw_vbios && dc->bw_vbios->memory_type == bw_def_hbm)
+ if (!dc->bw_vbios)
+ return;
+
+ if (dc->bw_vbios->memory_type == bw_def_hbm)
memory_type_multiplier = MEMORY_TYPE_HBM;
/*do system clock TODO PPLIB: after PPLIB implement,
diff --git a/drivers/gpu/drm/amd/display/dc/resource/dcn20/dcn20_resource.c b/drivers/gpu/drm/amd/display/dc/resource/dcn20/dcn20_resource.c
index 5e7cfa8e8ec9..eea2b3b307cd 100644
--- a/drivers/gpu/drm/amd/display/dc/resource/dcn20/dcn20_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/resource/dcn20/dcn20_resource.c
@@ -2040,6 +2040,7 @@ bool dcn20_fast_validate_bw(
{
bool out = false;
int split[MAX_PIPES] = { 0 };
+ bool merge[MAX_PIPES] = { false };
int pipe_cnt, i, pipe_idx, vlevel;
ASSERT(pipes);
@@ -2064,7 +2065,7 @@ bool dcn20_fast_validate_bw(
if (vlevel > context->bw_ctx.dml.soc.num_states)
goto validate_fail;
- vlevel = dcn20_validate_apply_pipe_split_flags(dc, context, vlevel, split, NULL);
+ vlevel = dcn20_validate_apply_pipe_split_flags(dc, context, vlevel, split, merge);
/*initialize pipe_just_split_from to invalid idx*/
for (i = 0; i < MAX_PIPES; i++)
diff --git a/drivers/gpu/drm/amd/display/dc/resource/dcn201/dcn201_resource.c b/drivers/gpu/drm/amd/display/dc/resource/dcn201/dcn201_resource.c
index 131d98025bd4..fc54483b9104 100644
--- a/drivers/gpu/drm/amd/display/dc/resource/dcn201/dcn201_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/resource/dcn201/dcn201_resource.c
@@ -1007,8 +1007,10 @@ static struct pipe_ctx *dcn201_acquire_free_pipe_for_layer(
struct pipe_ctx *head_pipe = resource_get_otg_master_for_stream(res_ctx, opp_head_pipe->stream);
struct pipe_ctx *idle_pipe = resource_find_free_secondary_pipe_legacy(res_ctx, pool, head_pipe);
- if (!head_pipe)
+ if (!head_pipe) {
ASSERT(0);
+ return NULL;
+ }
if (!idle_pipe)
return NULL;
diff --git a/drivers/gpu/drm/amd/display/dc/resource/dcn21/dcn21_resource.c b/drivers/gpu/drm/amd/display/dc/resource/dcn21/dcn21_resource.c
index 8663cbc3d1cf..347e6aaea582 100644
--- a/drivers/gpu/drm/amd/display/dc/resource/dcn21/dcn21_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/resource/dcn21/dcn21_resource.c
@@ -774,6 +774,7 @@ bool dcn21_fast_validate_bw(struct dc *dc,
{
bool out = false;
int split[MAX_PIPES] = { 0 };
+ bool merge[MAX_PIPES] = { false };
int pipe_cnt, i, pipe_idx, vlevel;
ASSERT(pipes);
@@ -816,7 +817,7 @@ bool dcn21_fast_validate_bw(struct dc *dc,
goto validate_fail;
}
- vlevel = dcn20_validate_apply_pipe_split_flags(dc, context, vlevel, split, NULL);
+ vlevel = dcn20_validate_apply_pipe_split_flags(dc, context, vlevel, split, merge);
for (i = 0, pipe_idx = 0; i < dc->res_pool->pipe_count; i++) {
struct pipe_ctx *pipe = &context->res_ctx.pipe_ctx[i];
diff --git a/drivers/gpu/drm/amd/display/dc/resource/dcn31/dcn31_resource.c b/drivers/gpu/drm/amd/display/dc/resource/dcn31/dcn31_resource.c
index 5d1801dce273..ac8cb20e2e3b 100644
--- a/drivers/gpu/drm/amd/display/dc/resource/dcn31/dcn31_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/resource/dcn31/dcn31_resource.c
@@ -1948,6 +1948,7 @@ static bool dcn31_resource_construct(
/* Use pipe context based otg sync logic */
dc->config.use_pipe_ctx_sync_logic = true;
+ dc->config.disable_hbr_audio_dp2 = true;
/* read VBIOS LTTPR caps */
{
diff --git a/drivers/gpu/drm/amd/display/dc/resource/dcn32/dcn32_resource.c b/drivers/gpu/drm/amd/display/dc/resource/dcn32/dcn32_resource.c
index 969658313fd6..a124ad9bd108 100644
--- a/drivers/gpu/drm/amd/display/dc/resource/dcn32/dcn32_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/resource/dcn32/dcn32_resource.c
@@ -1651,6 +1651,9 @@ static void dcn32_enable_phantom_plane(struct dc *dc,
else
phantom_plane = dc_state_create_phantom_plane(dc, context, curr_pipe->plane_state);
+ if (!phantom_plane)
+ continue;
+
memcpy(&phantom_plane->address, &curr_pipe->plane_state->address, sizeof(phantom_plane->address));
memcpy(&phantom_plane->scaling_quality, &curr_pipe->plane_state->scaling_quality,
sizeof(phantom_plane->scaling_quality));
@@ -1717,6 +1720,9 @@ void dcn32_add_phantom_pipes(struct dc *dc, struct dc_state *context,
// be a valid candidate for SubVP (i.e. has a plane, stream, doesn't
// already have phantom pipe assigned, etc.) by previous checks.
phantom_stream = dcn32_enable_phantom_stream(dc, context, pipes, pipe_cnt, index);
+ if (!phantom_stream)
+ return;
+
dcn32_enable_phantom_plane(dc, context, phantom_stream, index);
for (i = 0; i < dc->res_pool->pipe_count; i++) {
@@ -2220,6 +2226,7 @@ static bool dcn32_resource_construct(
dc->config.dc_mode_clk_limit_support = true;
dc->config.enable_windowed_mpo_odm = true;
+ dc->config.disable_hbr_audio_dp2 = true;
/* read VBIOS LTTPR caps */
{
if (ctx->dc_bios->funcs->get_lttpr_caps) {
@@ -2671,8 +2678,10 @@ static struct pipe_ctx *dcn32_acquire_idle_pipe_for_head_pipe_in_layer(
struct resource_context *old_ctx = &stream->ctx->dc->current_state->res_ctx;
int head_index;
- if (!head_pipe)
+ if (!head_pipe) {
ASSERT(0);
+ return NULL;
+ }
/*
* Modified from dcn20_acquire_idle_pipe_for_layer
diff --git a/drivers/gpu/drm/amd/display/dc/resource/dcn32/dcn32_resource.h b/drivers/gpu/drm/amd/display/dc/resource/dcn32/dcn32_resource.h
index fee67fbab8e2..7901792afb7b 100644
--- a/drivers/gpu/drm/amd/display/dc/resource/dcn32/dcn32_resource.h
+++ b/drivers/gpu/drm/amd/display/dc/resource/dcn32/dcn32_resource.h
@@ -505,6 +505,8 @@ unsigned int dcn32_calculate_mall_ways_from_bytes(const struct dc *dc, unsigned
SRI_ARR(CM_POST_CSC_B_C11_C12, CM, id), \
SRI_ARR(CM_POST_CSC_B_C33_C34, CM, id), \
SRI_ARR(CM_MEM_PWR_CTRL, CM, id), SRI_ARR(CM_CONTROL, CM, id), \
+ SRI_ARR(CM_TEST_DEBUG_INDEX, CM, id), \
+ SRI_ARR(CM_TEST_DEBUG_DATA, CM, id), \
SRI_ARR(FORMAT_CONTROL, CNVC_CFG, id), \
SRI_ARR(CNVC_SURFACE_PIXEL_FORMAT, CNVC_CFG, id), \
SRI_ARR(CURSOR0_CONTROL, CNVC_CUR, id), \
@@ -761,6 +763,7 @@ unsigned int dcn32_calculate_mall_ways_from_bytes(const struct dc *dc, unsigned
SRI_ARR(DSCC_RATE_CONTROL_BUFFER1_MAX_FULLNESS_LEVEL, DSCC, id), \
SRI_ARR(DSCC_RATE_CONTROL_BUFFER2_MAX_FULLNESS_LEVEL, DSCC, id), \
SRI_ARR(DSCC_RATE_CONTROL_BUFFER3_MAX_FULLNESS_LEVEL, DSCC, id), \
+ SRI_ARR(DSCC_TEST_DEBUG_BUS_ROTATE, DSCC, id), \
SRI_ARR(DSCCIF_CONFIG0, DSCCIF, id), \
SRI_ARR(DSCCIF_CONFIG1, DSCCIF, id), \
SRI_ARR(DSCRM_DSC_FORWARD_CONFIG, DSCRM, id)
@@ -1185,6 +1188,8 @@ unsigned int dcn32_calculate_mall_ways_from_bytes(const struct dc *dc, unsigned
SR(DCHUBBUB_ARB_WATERMARK_CHANGE_CNTL), \
SR(DCHUBBUB_ARB_DRAM_STATE_CNTL), SR(DCHUBBUB_ARB_SAT_LEVEL), \
SR(DCHUBBUB_ARB_DF_REQ_OUTSTAND), SR(DCHUBBUB_GLOBAL_TIMER_CNTL), \
+ SR(DCHUBBUB_TEST_DEBUG_INDEX), \
+ SR(DCHUBBUB_TEST_DEBUG_DATA), \
SR(DCHUBBUB_SOFT_RESET), SR(DCHUBBUB_CRC_CTRL), \
SR(DCN_VM_FB_LOCATION_BASE), SR(DCN_VM_FB_LOCATION_TOP), \
SR(DCN_VM_FB_OFFSET), SR(DCN_VM_AGP_BOT), SR(DCN_VM_AGP_TOP), \
diff --git a/drivers/gpu/drm/amd/display/dc/resource/dcn32/dcn32_resource_helpers.c b/drivers/gpu/drm/amd/display/dc/resource/dcn32/dcn32_resource_helpers.c
index d184105ce2b3..f5a4e97c40ce 100644
--- a/drivers/gpu/drm/amd/display/dc/resource/dcn32/dcn32_resource_helpers.c
+++ b/drivers/gpu/drm/amd/display/dc/resource/dcn32/dcn32_resource_helpers.c
@@ -218,12 +218,12 @@ bool dcn32_is_center_timing(struct pipe_ctx *pipe)
pipe->stream->timing.v_addressable != pipe->stream->src.height) {
is_center_timing = true;
}
- }
- if (pipe->plane_state) {
- if (pipe->stream->timing.v_addressable != pipe->plane_state->dst_rect.height &&
- pipe->stream->timing.v_addressable != pipe->plane_state->src_rect.height) {
- is_center_timing = true;
+ if (pipe->plane_state) {
+ if (pipe->stream->timing.v_addressable != pipe->plane_state->dst_rect.height &&
+ pipe->stream->timing.v_addressable != pipe->plane_state->src_rect.height) {
+ is_center_timing = true;
+ }
}
}
@@ -663,7 +663,7 @@ bool dcn32_subvp_drr_admissable(struct dc *dc, struct dc_state *context)
subvp_disallow |= disallow_subvp_in_active_plus_blank(pipe);
refresh_rate = (pipe->stream->timing.pix_clk_100hz * (uint64_t)100 +
- pipe->stream->timing.v_total * pipe->stream->timing.h_total - (uint64_t)1);
+ pipe->stream->timing.v_total * (unsigned long long)pipe->stream->timing.h_total - (uint64_t)1);
refresh_rate = div_u64(refresh_rate, pipe->stream->timing.v_total);
refresh_rate = div_u64(refresh_rate, pipe->stream->timing.h_total);
}
@@ -724,7 +724,7 @@ bool dcn32_subvp_vblank_admissable(struct dc *dc, struct dc_state *context, int
subvp_disallow |= disallow_subvp_in_active_plus_blank(pipe);
refresh_rate = (pipe->stream->timing.pix_clk_100hz * (uint64_t)100 +
- pipe->stream->timing.v_total * pipe->stream->timing.h_total - (uint64_t)1);
+ pipe->stream->timing.v_total * (unsigned long long)pipe->stream->timing.h_total - (uint64_t)1);
refresh_rate = div_u64(refresh_rate, pipe->stream->timing.v_total);
refresh_rate = div_u64(refresh_rate, pipe->stream->timing.h_total);
}
diff --git a/drivers/gpu/drm/amd/display/dc/resource/dcn321/dcn321_resource.c b/drivers/gpu/drm/amd/display/dc/resource/dcn321/dcn321_resource.c
index 8e0588b1cf30..827a94f84f10 100644
--- a/drivers/gpu/drm/amd/display/dc/resource/dcn321/dcn321_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/resource/dcn321/dcn321_resource.c
@@ -1783,6 +1783,7 @@ static bool dcn321_resource_construct(
dc->config.dc_mode_clk_limit_support = true;
dc->config.enable_windowed_mpo_odm = true;
+ dc->config.disable_hbr_audio_dp2 = true;
/* read VBIOS LTTPR caps */
{
if (ctx->dc_bios->funcs->get_lttpr_caps) {
diff --git a/drivers/gpu/drm/amd/display/dc/resource/dcn35/dcn35_resource.c b/drivers/gpu/drm/amd/display/dc/resource/dcn35/dcn35_resource.c
index ddf251901fb3..893a9d9ee870 100644
--- a/drivers/gpu/drm/amd/display/dc/resource/dcn35/dcn35_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/resource/dcn35/dcn35_resource.c
@@ -786,6 +786,7 @@ static const struct dc_debug_options debug_defaults_drv = {
.disable_dmub_reallow_idle = false,
.static_screen_wait_frames = 2,
.disable_timeout = true,
+ .min_disp_clk_khz = 50000,
};
static const struct dc_panel_config panel_config_defaults = {
@@ -1899,6 +1900,7 @@ static bool dcn35_resource_construct(
/* Use pipe context based otg sync logic */
dc->config.use_pipe_ctx_sync_logic = true;
+ dc->config.disable_hbr_audio_dp2 = true;
/* read VBIOS LTTPR caps */
{
if (ctx->dc_bios->funcs->get_lttpr_caps) {
@@ -2153,6 +2155,7 @@ static bool dcn35_resource_construct(
dc->dml2_options.max_segments_per_hubp = 24;
dc->dml2_options.det_segment_size = DCN3_2_DET_SEG_SIZE;/*todo*/
+ dc->dml2_options.override_det_buffer_size_kbytes = true;
if (dc->config.sdpif_request_limit_words_per_umc == 0)
dc->config.sdpif_request_limit_words_per_umc = 16;/*todo*/
diff --git a/drivers/gpu/drm/amd/display/dc/resource/dcn351/dcn351_resource.c b/drivers/gpu/drm/amd/display/dc/resource/dcn351/dcn351_resource.c
index 4c5e722baa3a..da9101b83e8c 100644
--- a/drivers/gpu/drm/amd/display/dc/resource/dcn351/dcn351_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/resource/dcn351/dcn351_resource.c
@@ -736,7 +736,7 @@ static const struct dc_debug_options debug_defaults_drv = {
.hdmichar = true,
.dpstream = true,
.symclk32_se = true,
- .symclk32_le = true,
+ .symclk32_le = false,
.symclk_fe = true,
.physymclk = false,
.dpiasymclk = true,
@@ -2133,6 +2133,7 @@ static bool dcn351_resource_construct(
dc->dml2_options.max_segments_per_hubp = 24;
dc->dml2_options.det_segment_size = DCN3_2_DET_SEG_SIZE;/*todo*/
+ dc->dml2_options.override_det_buffer_size_kbytes = true;
if (dc->config.sdpif_request_limit_words_per_umc == 0)
dc->config.sdpif_request_limit_words_per_umc = 16;/*todo*/
diff --git a/drivers/gpu/drm/amd/display/dc/resource/dcn401/dcn401_resource.c b/drivers/gpu/drm/amd/display/dc/resource/dcn401/dcn401_resource.c
index 34b02147881d..9d56fbdcd06a 100644
--- a/drivers/gpu/drm/amd/display/dc/resource/dcn401/dcn401_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/resource/dcn401/dcn401_resource.c
@@ -76,6 +76,9 @@
#include "dml2/dml2_wrapper.h"
+#include "spl/dc_spl_scl_easf_filters.h"
+#include "spl/dc_spl_isharp_filters.h"
+
#define DC_LOGGER_INIT(logger)
enum dcn401_clk_src_array_id {
@@ -1188,7 +1191,7 @@ static struct stream_encoder *dcn401_stream_encoder_create(
vpg = dcn401_vpg_create(ctx, vpg_inst);
afmt = dcn401_afmt_create(ctx, afmt_inst);
- if (!enc1 || !vpg || !afmt) {
+ if (!enc1 || !vpg || !afmt || eng_id >= ARRAY_SIZE(stream_enc_regs)) {
kfree(enc1);
kfree(vpg);
kfree(afmt);
@@ -1822,6 +1825,7 @@ static bool dcn401_resource_construct(
dc->caps.edp_dsc_support = true;
dc->caps.extended_aux_timeout_support = true;
dc->caps.dmcub_support = true;
+ dc->caps.max_v_total = (1 << 15) - 1;
if (ASICREV_IS_GC_12_0_1_A0(dc->ctx->asic_id.hw_internal_rev))
dc->caps.dcc_plane_width_limit = 7680;
@@ -2099,6 +2103,7 @@ static bool dcn401_resource_construct(
dc->dml2_options.use_native_soc_bb_construction = true;
dc->dml2_options.minimize_dispclk_using_odm = true;
dc->dml2_options.map_dc_pipes_with_callbacks = true;
+ dc->dml2_options.force_tdlut_enable = true;
resource_init_common_dml2_callbacks(dc, &dc->dml2_options);
dc->dml2_options.callbacks.can_support_mclk_switch_using_fw_based_vblank_stretch = &dcn30_can_support_mclk_switch_using_fw_based_vblank_stretch;
@@ -2124,6 +2129,10 @@ static bool dcn401_resource_construct(
dc->dml2_options.max_segments_per_hubp = 20;
dc->dml2_options.det_segment_size = DCN4_01_CRB_SEGMENT_SIZE_KB;
+ /* SPL */
+ spl_init_easf_filter_coeffs();
+ spl_init_blur_scale_coeffs();
+
return true;
create_fail:
diff --git a/drivers/gpu/drm/amd/display/dc/resource/dcn401/dcn401_resource.h b/drivers/gpu/drm/amd/display/dc/resource/dcn401/dcn401_resource.h
index bb46f30d11d0..514d1ce20df9 100644
--- a/drivers/gpu/drm/amd/display/dc/resource/dcn401/dcn401_resource.h
+++ b/drivers/gpu/drm/amd/display/dc/resource/dcn401/dcn401_resource.h
@@ -536,7 +536,8 @@ void dcn401_prepare_mcache_programming(struct dc *dc, struct dc_state *context);
SRI_ARR(OPTC_WIDTH_CONTROL, ODM, inst), \
SRI_ARR(OPTC_WIDTH_CONTROL2, ODM, inst), \
SRI_ARR(OPTC_MEMORY_CONFIG, ODM, inst), \
- SRI_ARR(OTG_DRR_CONTROL, OTG, inst)
+ SRI_ARR(OTG_DRR_CONTROL, OTG, inst), \
+ SRI_ARR(OTG_PSTATE_REGISTER, OTG, inst)
/* HUBBUB */
#define HUBBUB_REG_LIST_DCN4_01_RI(id) \
diff --git a/drivers/gpu/drm/amd/display/dc/spl/Makefile b/drivers/gpu/drm/amd/display/dc/spl/Makefile
index 89cad60b1a10..5edf3c6cf3e2 100644
--- a/drivers/gpu/drm/amd/display/dc/spl/Makefile
+++ b/drivers/gpu/drm/amd/display/dc/spl/Makefile
@@ -23,7 +23,7 @@
# Makefile for the 'spl' sub-component of DAL.
# It provides the scaling library interface.
-SPL = dc_spl.o dc_spl_scl_filters.o dc_spl_isharp_filters.o
+SPL = dc_spl.o dc_spl_scl_filters.o dc_spl_scl_easf_filters.o dc_spl_isharp_filters.o dc_spl_filters.o spl_fixpt31_32.o spl_custom_float.o
AMD_DAL_SPL = $(addprefix $(AMDDALPATH)/dc/spl/,$(SPL))
diff --git a/drivers/gpu/drm/amd/display/dc/spl/dc_spl.c b/drivers/gpu/drm/amd/display/dc/spl/dc_spl.c
index e3e20cd86af6..014e8a296f0c 100644
--- a/drivers/gpu/drm/amd/display/dc/spl/dc_spl.c
+++ b/drivers/gpu/drm/amd/display/dc/spl/dc_spl.c
@@ -4,9 +4,11 @@
#include "dc_spl.h"
#include "dc_spl_scl_filters.h"
+#include "dc_spl_scl_easf_filters.h"
#include "dc_spl_isharp_filters.h"
+#include "spl_debug.h"
-#define IDENTITY_RATIO(ratio) (dc_fixpt_u2d19(ratio) == (1 << 19))
+#define IDENTITY_RATIO(ratio) (spl_fixpt_u2d19(ratio) == (1 << 19))
#define MIN_VIEWPORT_SIZE 12
static struct spl_rect intersect_rec(const struct spl_rect *r0, const struct spl_rect *r1)
@@ -107,26 +109,26 @@ static struct spl_rect calculate_plane_rec_in_timing_active(
const struct spl_rect *stream_src = &spl_in->basic_out.src_rect;
const struct spl_rect *stream_dst = &spl_in->basic_out.dst_rect;
struct spl_rect rec_out = {0};
- struct fixed31_32 temp;
+ struct spl_fixed31_32 temp;
- temp = dc_fixpt_from_fraction(rec_in->x * (long long)stream_dst->width,
+ temp = spl_fixpt_from_fraction(rec_in->x * (long long)stream_dst->width,
stream_src->width);
- rec_out.x = stream_dst->x + dc_fixpt_round(temp);
+ rec_out.x = stream_dst->x + spl_fixpt_round(temp);
- temp = dc_fixpt_from_fraction(
+ temp = spl_fixpt_from_fraction(
(rec_in->x + rec_in->width) * (long long)stream_dst->width,
stream_src->width);
- rec_out.width = stream_dst->x + dc_fixpt_round(temp) - rec_out.x;
+ rec_out.width = stream_dst->x + spl_fixpt_round(temp) - rec_out.x;
- temp = dc_fixpt_from_fraction(rec_in->y * (long long)stream_dst->height,
+ temp = spl_fixpt_from_fraction(rec_in->y * (long long)stream_dst->height,
stream_src->height);
- rec_out.y = stream_dst->y + dc_fixpt_round(temp);
+ rec_out.y = stream_dst->y + spl_fixpt_round(temp);
- temp = dc_fixpt_from_fraction(
+ temp = spl_fixpt_from_fraction(
(rec_in->y + rec_in->height) * (long long)stream_dst->height,
stream_src->height);
- rec_out.height = stream_dst->y + dc_fixpt_round(temp) - rec_out.y;
+ rec_out.height = stream_dst->y + spl_fixpt_round(temp) - rec_out.y;
return rec_out;
}
@@ -144,7 +146,7 @@ static struct spl_rect calculate_mpc_slice_in_timing_active(
mpc_rec.x = plane_clip_rec->x + mpc_rec.width * mpc_slice_idx;
mpc_rec.height = plane_clip_rec->height;
mpc_rec.y = plane_clip_rec->y;
- ASSERT(mpc_slice_count == 1 ||
+ SPL_ASSERT(mpc_slice_count == 1 ||
spl_in->basic_out.view_format != SPL_VIEW_3D_SIDE_BY_SIDE ||
mpc_rec.width % 2 == 0);
@@ -157,7 +159,7 @@ static struct spl_rect calculate_mpc_slice_in_timing_active(
}
if (spl_in->basic_out.view_format == SPL_VIEW_3D_TOP_AND_BOTTOM) {
- ASSERT(mpc_rec.height % 2 == 0);
+ SPL_ASSERT(mpc_rec.height % 2 == 0);
mpc_rec.height /= 2;
}
return mpc_rec;
@@ -197,7 +199,7 @@ static struct spl_rect calculate_odm_slice_in_timing_active(struct spl_in *spl_i
return spl_in->basic_out.odm_slice_rect;
}
-static void spl_calculate_recout(struct spl_in *spl_in, struct spl_out *spl_out)
+static void spl_calculate_recout(struct spl_in *spl_in, struct spl_scratch *spl_scratch, struct spl_out *spl_out)
{
/*
* A plane clip represents the desired plane size and position in Stream
@@ -340,20 +342,23 @@ static void spl_calculate_recout(struct spl_in *spl_in, struct spl_out *spl_out)
/* shift the overlapping area so it is with respect to current
* ODM slice's position
*/
- spl_out->scl_data.recout = shift_rec(
+ spl_scratch->scl_data.recout = shift_rec(
&overlapping_area,
-odm_slice.x, -odm_slice.y);
- spl_out->scl_data.recout.height -=
+ spl_scratch->scl_data.recout.height -=
spl_in->debug.visual_confirm_base_offset;
- spl_out->scl_data.recout.height -=
+ spl_scratch->scl_data.recout.height -=
spl_in->debug.visual_confirm_dpp_offset;
} else
/* if there is no overlap, zero recout */
- memset(&spl_out->scl_data.recout, 0,
+ memset(&spl_scratch->scl_data.recout, 0,
sizeof(struct spl_rect));
}
+
/* Calculate scaling ratios */
-static void spl_calculate_scaling_ratios(struct spl_in *spl_in, struct spl_out *spl_out)
+static void spl_calculate_scaling_ratios(struct spl_in *spl_in,
+ struct spl_scratch *spl_scratch,
+ struct spl_out *spl_out)
{
const int in_w = spl_in->basic_out.src_rect.width;
const int in_h = spl_in->basic_out.src_rect.height;
@@ -364,59 +369,75 @@ static void spl_calculate_scaling_ratios(struct spl_in *spl_in, struct spl_out *
/*Swap surf_src height and width since scaling ratios are in recout rotation*/
if (spl_in->basic_in.rotation == SPL_ROTATION_ANGLE_90 ||
spl_in->basic_in.rotation == SPL_ROTATION_ANGLE_270)
- swap(surf_src.height, surf_src.width);
+ spl_swap(surf_src.height, surf_src.width);
- spl_out->scl_data.ratios.horz = dc_fixpt_from_fraction(
+ spl_scratch->scl_data.ratios.horz = spl_fixpt_from_fraction(
surf_src.width,
spl_in->basic_in.dst_rect.width);
- spl_out->scl_data.ratios.vert = dc_fixpt_from_fraction(
+ spl_scratch->scl_data.ratios.vert = spl_fixpt_from_fraction(
surf_src.height,
spl_in->basic_in.dst_rect.height);
if (spl_in->basic_out.view_format == SPL_VIEW_3D_SIDE_BY_SIDE)
- spl_out->scl_data.ratios.horz.value *= 2;
+ spl_scratch->scl_data.ratios.horz.value *= 2;
else if (spl_in->basic_out.view_format == SPL_VIEW_3D_TOP_AND_BOTTOM)
- spl_out->scl_data.ratios.vert.value *= 2;
+ spl_scratch->scl_data.ratios.vert.value *= 2;
- spl_out->scl_data.ratios.vert.value = div64_s64(
- spl_out->scl_data.ratios.vert.value * in_h, out_h);
- spl_out->scl_data.ratios.horz.value = div64_s64(
- spl_out->scl_data.ratios.horz.value * in_w, out_w);
+ spl_scratch->scl_data.ratios.vert.value = spl_div64_s64(
+ spl_scratch->scl_data.ratios.vert.value * in_h, out_h);
+ spl_scratch->scl_data.ratios.horz.value = spl_div64_s64(
+ spl_scratch->scl_data.ratios.horz.value * in_w, out_w);
- spl_out->scl_data.ratios.horz_c = spl_out->scl_data.ratios.horz;
- spl_out->scl_data.ratios.vert_c = spl_out->scl_data.ratios.vert;
+ spl_scratch->scl_data.ratios.horz_c = spl_scratch->scl_data.ratios.horz;
+ spl_scratch->scl_data.ratios.vert_c = spl_scratch->scl_data.ratios.vert;
if (spl_in->basic_in.format == SPL_PIXEL_FORMAT_420BPP8
|| spl_in->basic_in.format == SPL_PIXEL_FORMAT_420BPP10) {
- spl_out->scl_data.ratios.horz_c.value /= 2;
- spl_out->scl_data.ratios.vert_c.value /= 2;
+ spl_scratch->scl_data.ratios.horz_c.value /= 2;
+ spl_scratch->scl_data.ratios.vert_c.value /= 2;
}
- spl_out->scl_data.ratios.horz = dc_fixpt_truncate(
- spl_out->scl_data.ratios.horz, 19);
- spl_out->scl_data.ratios.vert = dc_fixpt_truncate(
- spl_out->scl_data.ratios.vert, 19);
- spl_out->scl_data.ratios.horz_c = dc_fixpt_truncate(
- spl_out->scl_data.ratios.horz_c, 19);
- spl_out->scl_data.ratios.vert_c = dc_fixpt_truncate(
- spl_out->scl_data.ratios.vert_c, 19);
+ spl_scratch->scl_data.ratios.horz = spl_fixpt_truncate(
+ spl_scratch->scl_data.ratios.horz, 19);
+ spl_scratch->scl_data.ratios.vert = spl_fixpt_truncate(
+ spl_scratch->scl_data.ratios.vert, 19);
+ spl_scratch->scl_data.ratios.horz_c = spl_fixpt_truncate(
+ spl_scratch->scl_data.ratios.horz_c, 19);
+ spl_scratch->scl_data.ratios.vert_c = spl_fixpt_truncate(
+ spl_scratch->scl_data.ratios.vert_c, 19);
+
+ /*
+ * Coefficient table and some registers are different based on ratio
+ * that is output/input. Currently we calculate input/output
+ * Store 1/ratio in recip_ratio for those lookups
+ */
+ spl_scratch->scl_data.recip_ratios.horz = spl_fixpt_recip(
+ spl_scratch->scl_data.ratios.horz);
+ spl_scratch->scl_data.recip_ratios.vert = spl_fixpt_recip(
+ spl_scratch->scl_data.ratios.vert);
+ spl_scratch->scl_data.recip_ratios.horz_c = spl_fixpt_recip(
+ spl_scratch->scl_data.ratios.horz_c);
+ spl_scratch->scl_data.recip_ratios.vert_c = spl_fixpt_recip(
+ spl_scratch->scl_data.ratios.vert_c);
}
+
/* Calculate Viewport size */
-static void spl_calculate_viewport_size(struct spl_in *spl_in, struct spl_out *spl_out)
+static void spl_calculate_viewport_size(struct spl_in *spl_in, struct spl_scratch *spl_scratch)
{
- spl_out->scl_data.viewport.width = dc_fixpt_ceil(dc_fixpt_mul_int(spl_out->scl_data.ratios.horz,
- spl_out->scl_data.recout.width));
- spl_out->scl_data.viewport.height = dc_fixpt_ceil(dc_fixpt_mul_int(spl_out->scl_data.ratios.vert,
- spl_out->scl_data.recout.height));
- spl_out->scl_data.viewport_c.width = dc_fixpt_ceil(dc_fixpt_mul_int(spl_out->scl_data.ratios.horz_c,
- spl_out->scl_data.recout.width));
- spl_out->scl_data.viewport_c.height = dc_fixpt_ceil(dc_fixpt_mul_int(spl_out->scl_data.ratios.vert_c,
- spl_out->scl_data.recout.height));
+ spl_scratch->scl_data.viewport.width = spl_fixpt_ceil(spl_fixpt_mul_int(spl_scratch->scl_data.ratios.horz,
+ spl_scratch->scl_data.recout.width));
+ spl_scratch->scl_data.viewport.height = spl_fixpt_ceil(spl_fixpt_mul_int(spl_scratch->scl_data.ratios.vert,
+ spl_scratch->scl_data.recout.height));
+ spl_scratch->scl_data.viewport_c.width = spl_fixpt_ceil(spl_fixpt_mul_int(spl_scratch->scl_data.ratios.horz_c,
+ spl_scratch->scl_data.recout.width));
+ spl_scratch->scl_data.viewport_c.height = spl_fixpt_ceil(spl_fixpt_mul_int(spl_scratch->scl_data.ratios.vert_c,
+ spl_scratch->scl_data.recout.height));
if (spl_in->basic_in.rotation == SPL_ROTATION_ANGLE_90 ||
spl_in->basic_in.rotation == SPL_ROTATION_ANGLE_270) {
- swap(spl_out->scl_data.viewport.width, spl_out->scl_data.viewport.height);
- swap(spl_out->scl_data.viewport_c.width, spl_out->scl_data.viewport_c.height);
+ spl_swap(spl_scratch->scl_data.viewport.width, spl_scratch->scl_data.viewport.height);
+ spl_swap(spl_scratch->scl_data.viewport_c.width, spl_scratch->scl_data.viewport_c.height);
}
}
+
static void spl_get_vp_scan_direction(enum spl_rotation_angle rotation,
bool horizontal_mirror,
bool *orthogonal_rotation,
@@ -440,6 +461,7 @@ static void spl_get_vp_scan_direction(enum spl_rotation_angle rotation,
if (horizontal_mirror)
*flip_horz_scan_dir = !*flip_horz_scan_dir;
}
+
/*
* We completely calculate vp offset, size and inits here based entirely on scaling
* ratios and recout for pixel perfect pipe combine.
@@ -449,13 +471,13 @@ static void spl_calculate_init_and_vp(bool flip_scan_dir,
int recout_size,
int src_size,
int taps,
- struct fixed31_32 ratio,
- struct fixed31_32 init_adj,
- struct fixed31_32 *init,
+ struct spl_fixed31_32 ratio,
+ struct spl_fixed31_32 init_adj,
+ struct spl_fixed31_32 *init,
int *vp_offset,
int *vp_size)
{
- struct fixed31_32 temp;
+ struct spl_fixed31_32 temp;
int int_part;
/*
@@ -468,33 +490,33 @@ static void spl_calculate_init_and_vp(bool flip_scan_dir,
* init_bot = init + scaling_ratio
* to get pixel perfect combine add the fraction from calculating vp offset
*/
- temp = dc_fixpt_mul_int(ratio, recout_offset_within_recout_full);
- *vp_offset = dc_fixpt_floor(temp);
+ temp = spl_fixpt_mul_int(ratio, recout_offset_within_recout_full);
+ *vp_offset = spl_fixpt_floor(temp);
temp.value &= 0xffffffff;
- *init = dc_fixpt_add(dc_fixpt_div_int(dc_fixpt_add_int(ratio, taps + 1), 2), temp);
- *init = dc_fixpt_add(*init, init_adj);
- *init = dc_fixpt_truncate(*init, 19);
+ *init = spl_fixpt_add(spl_fixpt_div_int(spl_fixpt_add_int(ratio, taps + 1), 2), temp);
+ *init = spl_fixpt_add(*init, init_adj);
+ *init = spl_fixpt_truncate(*init, 19);
/*
* If viewport has non 0 offset and there are more taps than covered by init then
* we should decrease the offset and increase init so we are never sampling
* outside of viewport.
*/
- int_part = dc_fixpt_floor(*init);
+ int_part = spl_fixpt_floor(*init);
if (int_part < taps) {
int_part = taps - int_part;
if (int_part > *vp_offset)
int_part = *vp_offset;
*vp_offset -= int_part;
- *init = dc_fixpt_add_int(*init, int_part);
+ *init = spl_fixpt_add_int(*init, int_part);
}
/*
* If taps are sampling outside of viewport at end of recout and there are more pixels
* available in the surface we should increase the viewport size, regardless set vp to
* only what is used.
*/
- temp = dc_fixpt_add(*init, dc_fixpt_mul_int(ratio, recout_size - 1));
- *vp_size = dc_fixpt_floor(temp);
+ temp = spl_fixpt_add(*init, spl_fixpt_mul_int(ratio, recout_size - 1));
+ *vp_size = spl_fixpt_floor(temp);
if (*vp_size + *vp_offset > src_size)
*vp_size = src_size - *vp_offset;
@@ -509,15 +531,24 @@ static void spl_calculate_init_and_vp(bool flip_scan_dir,
static bool spl_is_yuv420(enum spl_pixel_format format)
{
- if ((format >= SPL_PIXEL_FORMAT_VIDEO_BEGIN) &&
- (format <= SPL_PIXEL_FORMAT_VIDEO_END))
+ if ((format >= SPL_PIXEL_FORMAT_420BPP8) &&
+ (format <= SPL_PIXEL_FORMAT_420BPP10))
+ return true;
+
+ return false;
+}
+
+static bool spl_is_rgb8(enum spl_pixel_format format)
+{
+ if (format == SPL_PIXEL_FORMAT_ARGB8888)
return true;
return false;
}
/*Calculate inits and viewport */
-static void spl_calculate_inits_and_viewports(struct spl_in *spl_in, struct spl_out *spl_out)
+static void spl_calculate_inits_and_viewports(struct spl_in *spl_in,
+ struct spl_scratch *spl_scratch)
{
struct spl_rect src = spl_in->basic_in.src_rect;
struct spl_rect recout_dst_in_active_timing;
@@ -528,11 +559,11 @@ static void spl_calculate_inits_and_viewports(struct spl_in *spl_in, struct spl_
int vpc_div = (spl_in->basic_in.format == SPL_PIXEL_FORMAT_420BPP8
|| spl_in->basic_in.format == SPL_PIXEL_FORMAT_420BPP10) ? 2 : 1;
bool orthogonal_rotation, flip_vert_scan_dir, flip_horz_scan_dir;
- struct fixed31_32 init_adj_h = dc_fixpt_zero;
- struct fixed31_32 init_adj_v = dc_fixpt_zero;
+ struct spl_fixed31_32 init_adj_h = spl_fixpt_zero;
+ struct spl_fixed31_32 init_adj_v = spl_fixpt_zero;
recout_clip_in_active_timing = shift_rec(
- &spl_out->scl_data.recout, odm_slice.x, odm_slice.y);
+ &spl_scratch->scl_data.recout, odm_slice.x, odm_slice.y);
recout_dst_in_active_timing = calculate_plane_rec_in_timing_active(
spl_in, &spl_in->basic_in.dst_rect);
overlap_in_active_timing = intersect_rec(&recout_clip_in_active_timing,
@@ -555,8 +586,8 @@ static void spl_calculate_inits_and_viewports(struct spl_in *spl_in, struct spl_
&flip_horz_scan_dir);
if (orthogonal_rotation) {
- swap(src.width, src.height);
- swap(flip_vert_scan_dir, flip_horz_scan_dir);
+ spl_swap(src.width, src.height);
+ spl_swap(flip_vert_scan_dir, flip_horz_scan_dir);
}
if (spl_is_yuv420(spl_in->basic_in.format)) {
@@ -568,17 +599,17 @@ static void spl_calculate_inits_and_viewports(struct spl_in *spl_in, struct spl_
switch (spl_in->basic_in.cositing) {
case CHROMA_COSITING_LEFT:
- init_adj_h = dc_fixpt_zero;
- init_adj_v = dc_fixpt_from_fraction(sign, 2);
+ init_adj_h = spl_fixpt_zero;
+ init_adj_v = spl_fixpt_from_fraction(sign, 4);
break;
case CHROMA_COSITING_NONE:
- init_adj_h = dc_fixpt_from_fraction(sign, 2);
- init_adj_v = dc_fixpt_from_fraction(sign, 2);
+ init_adj_h = spl_fixpt_from_fraction(sign, 4);
+ init_adj_v = spl_fixpt_from_fraction(sign, 4);
break;
case CHROMA_COSITING_TOPLEFT:
default:
- init_adj_h = dc_fixpt_zero;
- init_adj_v = dc_fixpt_zero;
+ init_adj_h = spl_fixpt_zero;
+ init_adj_v = spl_fixpt_zero;
break;
}
}
@@ -586,59 +617,60 @@ static void spl_calculate_inits_and_viewports(struct spl_in *spl_in, struct spl_
spl_calculate_init_and_vp(
flip_horz_scan_dir,
recout_clip_in_recout_dst.x,
- spl_out->scl_data.recout.width,
+ spl_scratch->scl_data.recout.width,
src.width,
- spl_out->scl_data.taps.h_taps,
- spl_out->scl_data.ratios.horz,
- dc_fixpt_zero,
- &spl_out->scl_data.inits.h,
- &spl_out->scl_data.viewport.x,
- &spl_out->scl_data.viewport.width);
+ spl_scratch->scl_data.taps.h_taps,
+ spl_scratch->scl_data.ratios.horz,
+ spl_fixpt_zero,
+ &spl_scratch->scl_data.inits.h,
+ &spl_scratch->scl_data.viewport.x,
+ &spl_scratch->scl_data.viewport.width);
spl_calculate_init_and_vp(
flip_horz_scan_dir,
recout_clip_in_recout_dst.x,
- spl_out->scl_data.recout.width,
+ spl_scratch->scl_data.recout.width,
src.width / vpc_div,
- spl_out->scl_data.taps.h_taps_c,
- spl_out->scl_data.ratios.horz_c,
+ spl_scratch->scl_data.taps.h_taps_c,
+ spl_scratch->scl_data.ratios.horz_c,
init_adj_h,
- &spl_out->scl_data.inits.h_c,
- &spl_out->scl_data.viewport_c.x,
- &spl_out->scl_data.viewport_c.width);
+ &spl_scratch->scl_data.inits.h_c,
+ &spl_scratch->scl_data.viewport_c.x,
+ &spl_scratch->scl_data.viewport_c.width);
spl_calculate_init_and_vp(
flip_vert_scan_dir,
recout_clip_in_recout_dst.y,
- spl_out->scl_data.recout.height,
+ spl_scratch->scl_data.recout.height,
src.height,
- spl_out->scl_data.taps.v_taps,
- spl_out->scl_data.ratios.vert,
- dc_fixpt_zero,
- &spl_out->scl_data.inits.v,
- &spl_out->scl_data.viewport.y,
- &spl_out->scl_data.viewport.height);
+ spl_scratch->scl_data.taps.v_taps,
+ spl_scratch->scl_data.ratios.vert,
+ spl_fixpt_zero,
+ &spl_scratch->scl_data.inits.v,
+ &spl_scratch->scl_data.viewport.y,
+ &spl_scratch->scl_data.viewport.height);
spl_calculate_init_and_vp(
flip_vert_scan_dir,
recout_clip_in_recout_dst.y,
- spl_out->scl_data.recout.height,
+ spl_scratch->scl_data.recout.height,
src.height / vpc_div,
- spl_out->scl_data.taps.v_taps_c,
- spl_out->scl_data.ratios.vert_c,
+ spl_scratch->scl_data.taps.v_taps_c,
+ spl_scratch->scl_data.ratios.vert_c,
init_adj_v,
- &spl_out->scl_data.inits.v_c,
- &spl_out->scl_data.viewport_c.y,
- &spl_out->scl_data.viewport_c.height);
+ &spl_scratch->scl_data.inits.v_c,
+ &spl_scratch->scl_data.viewport_c.y,
+ &spl_scratch->scl_data.viewport_c.height);
if (orthogonal_rotation) {
- swap(spl_out->scl_data.viewport.x, spl_out->scl_data.viewport.y);
- swap(spl_out->scl_data.viewport.width, spl_out->scl_data.viewport.height);
- swap(spl_out->scl_data.viewport_c.x, spl_out->scl_data.viewport_c.y);
- swap(spl_out->scl_data.viewport_c.width, spl_out->scl_data.viewport_c.height);
+ spl_swap(spl_scratch->scl_data.viewport.x, spl_scratch->scl_data.viewport.y);
+ spl_swap(spl_scratch->scl_data.viewport.width, spl_scratch->scl_data.viewport.height);
+ spl_swap(spl_scratch->scl_data.viewport_c.x, spl_scratch->scl_data.viewport_c.y);
+ spl_swap(spl_scratch->scl_data.viewport_c.width, spl_scratch->scl_data.viewport_c.height);
}
- spl_out->scl_data.viewport.x += src.x;
- spl_out->scl_data.viewport.y += src.y;
- ASSERT(src.x % vpc_div == 0 && src.y % vpc_div == 0);
- spl_out->scl_data.viewport_c.x += src.x / vpc_div;
- spl_out->scl_data.viewport_c.y += src.y / vpc_div;
+ spl_scratch->scl_data.viewport.x += src.x;
+ spl_scratch->scl_data.viewport.y += src.y;
+ SPL_ASSERT(src.x % vpc_div == 0 && src.y % vpc_div == 0);
+ spl_scratch->scl_data.viewport_c.x += src.x / vpc_div;
+ spl_scratch->scl_data.viewport_c.y += src.y / vpc_div;
}
+
static void spl_handle_3d_recout(struct spl_in *spl_in, struct spl_rect *recout)
{
/*
@@ -647,7 +679,7 @@ static void spl_handle_3d_recout(struct spl_in *spl_in, struct spl_rect *recout)
* This may break with rotation, good thing we aren't mixing hw rotation and 3d
*/
if (spl_in->basic_in.mpc_combine_v) {
- ASSERT(spl_in->basic_in.rotation == SPL_ROTATION_ANGLE_0 ||
+ SPL_ASSERT(spl_in->basic_in.rotation == SPL_ROTATION_ANGLE_0 ||
(spl_in->basic_out.view_format != SPL_VIEW_3D_TOP_AND_BOTTOM &&
spl_in->basic_out.view_format != SPL_VIEW_3D_SIDE_BY_SIDE));
if (spl_in->basic_out.view_format == SPL_VIEW_3D_TOP_AND_BOTTOM)
@@ -665,6 +697,7 @@ static void spl_clamp_viewport(struct spl_rect *viewport)
if (viewport->width < MIN_VIEWPORT_SIZE)
viewport->width = MIN_VIEWPORT_SIZE;
}
+
static bool spl_dscl_is_420_format(enum spl_pixel_format format)
{
if (format == SPL_PIXEL_FORMAT_420BPP8 ||
@@ -673,6 +706,7 @@ static bool spl_dscl_is_420_format(enum spl_pixel_format format)
else
return false;
}
+
static bool spl_dscl_is_video_format(enum spl_pixel_format format)
{
if (format >= SPL_PIXEL_FORMAT_VIDEO_BEGIN
@@ -681,17 +715,21 @@ static bool spl_dscl_is_video_format(enum spl_pixel_format format)
else
return false;
}
+
static enum scl_mode spl_get_dscl_mode(const struct spl_in *spl_in,
- const struct spl_scaler_data *data)
+ const struct spl_scaler_data *data,
+ bool enable_isharp, bool enable_easf)
{
- const long long one = dc_fixpt_one.value;
+ const long long one = spl_fixpt_one.value;
enum spl_pixel_format pixel_format = spl_in->basic_in.format;
+ /* Bypass if ratio is 1:1 with no ISHARP or force scale on */
if (data->ratios.horz.value == one
&& data->ratios.vert.value == one
&& data->ratios.horz_c.value == one
&& data->ratios.vert_c.value == one
- && !spl_in->basic_out.always_scale)
+ && !spl_in->basic_out.always_scale
+ && !enable_isharp)
return SCL_MODE_SCALING_444_BYPASS;
if (!spl_dscl_is_420_format(pixel_format)) {
@@ -700,69 +738,214 @@ static enum scl_mode spl_get_dscl_mode(const struct spl_in *spl_in,
else
return SCL_MODE_SCALING_444_RGB_ENABLE;
}
- if (data->ratios.horz.value == one && data->ratios.vert.value == one)
- return SCL_MODE_SCALING_420_LUMA_BYPASS;
- if (data->ratios.horz_c.value == one && data->ratios.vert_c.value == one)
- return SCL_MODE_SCALING_420_CHROMA_BYPASS;
+
+ /* Bypass YUV if at 1:1 with no ISHARP or if doing 2:1 YUV
+ * downscale without EASF
+ */
+ if ((!enable_isharp) && (!enable_easf)) {
+ if (data->ratios.horz.value == one && data->ratios.vert.value == one)
+ return SCL_MODE_SCALING_420_LUMA_BYPASS;
+ if (data->ratios.horz_c.value == one && data->ratios.vert_c.value == one)
+ return SCL_MODE_SCALING_420_CHROMA_BYPASS;
+ }
return SCL_MODE_SCALING_420_YCBCR_ENABLE;
}
+
+static bool spl_choose_lls_policy(enum spl_pixel_format format,
+ enum spl_transfer_func_type tf_type,
+ enum spl_transfer_func_predefined tf_predefined_type,
+ enum linear_light_scaling *lls_pref)
+{
+ if (spl_is_yuv420(format)) {
+ *lls_pref = LLS_PREF_NO;
+ if ((tf_type == SPL_TF_TYPE_PREDEFINED) ||
+ (tf_type == SPL_TF_TYPE_DISTRIBUTED_POINTS))
+ return true;
+ } else { /* RGB or YUV444 */
+ if ((tf_type == SPL_TF_TYPE_PREDEFINED) ||
+ (tf_type == SPL_TF_TYPE_BYPASS)) {
+ *lls_pref = LLS_PREF_YES;
+ return true;
+ }
+ }
+ *lls_pref = LLS_PREF_NO;
+ return false;
+}
+
+/* Enable EASF ?*/
+static bool enable_easf(struct spl_in *spl_in, struct spl_scratch *spl_scratch)
+{
+ int vratio = 0;
+ int hratio = 0;
+ bool skip_easf = false;
+ bool lls_enable_easf = true;
+
+ if (spl_in->disable_easf)
+ skip_easf = true;
+
+ vratio = spl_fixpt_ceil(spl_scratch->scl_data.ratios.vert);
+ hratio = spl_fixpt_ceil(spl_scratch->scl_data.ratios.horz);
+
+ /*
+ * No EASF support for downscaling > 2:1
+ * EASF support for upscaling or downscaling up to 2:1
+ */
+ if ((vratio > 2) || (hratio > 2))
+ skip_easf = true;
+
+ /*
+ * If lls_pref is LLS_PREF_DONT_CARE, then use pixel format and transfer
+ * function to determine whether to use LINEAR or NONLINEAR scaling
+ */
+ if (spl_in->lls_pref == LLS_PREF_DONT_CARE)
+ lls_enable_easf = spl_choose_lls_policy(spl_in->basic_in.format,
+ spl_in->basic_in.tf_type, spl_in->basic_in.tf_predefined_type,
+ &spl_in->lls_pref);
+
+ if (!lls_enable_easf)
+ skip_easf = true;
+
+ /* Check for linear scaling or EASF preferred */
+ if (spl_in->lls_pref != LLS_PREF_YES && !spl_in->prefer_easf)
+ skip_easf = true;
+
+ return skip_easf;
+}
+
+/* Check if video is in fullscreen mode */
+static bool spl_is_video_fullscreen(struct spl_in *spl_in)
+{
+ if (spl_is_yuv420(spl_in->basic_in.format) && spl_in->is_fullscreen)
+ return true;
+ return false;
+}
+
+static bool spl_get_isharp_en(struct spl_in *spl_in,
+ struct spl_scratch *spl_scratch)
+{
+ bool enable_isharp = false;
+ int vratio = 0;
+ int hratio = 0;
+ struct spl_taps taps = spl_scratch->scl_data.taps;
+ bool fullscreen = spl_is_video_fullscreen(spl_in);
+
+ /* Return if adaptive sharpness is disabled */
+ if (spl_in->adaptive_sharpness.enable == false)
+ return enable_isharp;
+
+ vratio = spl_fixpt_ceil(spl_scratch->scl_data.ratios.vert);
+ hratio = spl_fixpt_ceil(spl_scratch->scl_data.ratios.horz);
+
+ /* No iSHARP support for downscaling */
+ if (vratio > 1 || hratio > 1)
+ return enable_isharp;
+
+ // Scaling is up to 1:1 (no scaling) or upscaling
+
+ /*
+ * Apply sharpness to RGB and YUV (NV12/P010)
+ * surfaces based on policy setting
+ */
+ if (!spl_is_yuv420(spl_in->basic_in.format) &&
+ (spl_in->debug.sharpen_policy == SHARPEN_YUV))
+ return enable_isharp;
+ else if ((spl_is_yuv420(spl_in->basic_in.format) && !fullscreen) &&
+ (spl_in->debug.sharpen_policy == SHARPEN_RGB_FULLSCREEN_YUV))
+ return enable_isharp;
+ else if (!spl_in->is_fullscreen &&
+ spl_in->debug.sharpen_policy == SHARPEN_FULLSCREEN_ALL)
+ return enable_isharp;
+
+ /*
+ * Apply sharpness if supports horizontal taps 4,6 AND
+ * vertical taps 3, 4, 6
+ */
+ if ((taps.h_taps == 4 || taps.h_taps == 6) &&
+ (taps.v_taps == 3 || taps.v_taps == 4 || taps.v_taps == 6))
+ enable_isharp = true;
+
+ return enable_isharp;
+}
+
/* Calculate optimal number of taps */
static bool spl_get_optimal_number_of_taps(
- int max_downscale_src_width, struct spl_in *spl_in, struct spl_out *spl_out,
- const struct spl_taps *in_taps)
+ int max_downscale_src_width, struct spl_in *spl_in, struct spl_scratch *spl_scratch,
+ const struct spl_taps *in_taps, bool *enable_easf_v, bool *enable_easf_h,
+ bool *enable_isharp)
{
int num_part_y, num_part_c;
int max_taps_y, max_taps_c;
int min_taps_y, min_taps_c;
enum lb_memory_config lb_config;
+ bool skip_easf = false;
- if (spl_out->scl_data.viewport.width > spl_out->scl_data.h_active &&
+ if (spl_scratch->scl_data.viewport.width > spl_scratch->scl_data.h_active &&
max_downscale_src_width != 0 &&
- spl_out->scl_data.viewport.width > max_downscale_src_width)
+ spl_scratch->scl_data.viewport.width > max_downscale_src_width)
return false;
+
+ /* Check if we are using EASF or not */
+ skip_easf = enable_easf(spl_in, spl_scratch);
+
/*
* Set default taps if none are provided
* From programming guide: taps = min{ ceil(2*H_RATIO,1), 8} for downscaling
* taps = 4 for upscaling
*/
- if (in_taps->h_taps == 0) {
- if (dc_fixpt_ceil(spl_out->scl_data.ratios.horz) > 1)
- spl_out->scl_data.taps.h_taps = min(2 * dc_fixpt_ceil(spl_out->scl_data.ratios.horz), 8);
- else
- spl_out->scl_data.taps.h_taps = 4;
- } else
- spl_out->scl_data.taps.h_taps = in_taps->h_taps;
- if (in_taps->v_taps == 0) {
- if (dc_fixpt_ceil(spl_out->scl_data.ratios.vert) > 1)
- spl_out->scl_data.taps.v_taps = min(dc_fixpt_ceil(dc_fixpt_mul_int(
- spl_out->scl_data.ratios.vert, 2)), 8);
- else
- spl_out->scl_data.taps.v_taps = 4;
- } else
- spl_out->scl_data.taps.v_taps = in_taps->v_taps;
- if (in_taps->v_taps_c == 0) {
- if (dc_fixpt_ceil(spl_out->scl_data.ratios.vert_c) > 1)
- spl_out->scl_data.taps.v_taps_c = min(dc_fixpt_ceil(dc_fixpt_mul_int(
- spl_out->scl_data.ratios.vert_c, 2)), 8);
- else
- spl_out->scl_data.taps.v_taps_c = 4;
- } else
- spl_out->scl_data.taps.v_taps_c = in_taps->v_taps_c;
- if (in_taps->h_taps_c == 0) {
- if (dc_fixpt_ceil(spl_out->scl_data.ratios.horz_c) > 1)
- spl_out->scl_data.taps.h_taps_c = min(2 * dc_fixpt_ceil(spl_out->scl_data.ratios.horz_c), 8);
+ if (skip_easf) {
+ if (in_taps->h_taps == 0) {
+ if (spl_fixpt_ceil(spl_scratch->scl_data.ratios.horz) > 1)
+ spl_scratch->scl_data.taps.h_taps = spl_min(2 * spl_fixpt_ceil(
+ spl_scratch->scl_data.ratios.horz), 8);
+ else
+ spl_scratch->scl_data.taps.h_taps = 4;
+ } else
+ spl_scratch->scl_data.taps.h_taps = in_taps->h_taps;
+ if (in_taps->v_taps == 0) {
+ if (spl_fixpt_ceil(spl_scratch->scl_data.ratios.vert) > 1)
+ spl_scratch->scl_data.taps.v_taps = spl_min(spl_fixpt_ceil(spl_fixpt_mul_int(
+ spl_scratch->scl_data.ratios.vert, 2)), 8);
+ else
+ spl_scratch->scl_data.taps.v_taps = 4;
+ } else
+ spl_scratch->scl_data.taps.v_taps = in_taps->v_taps;
+ if (in_taps->v_taps_c == 0) {
+ if (spl_fixpt_ceil(spl_scratch->scl_data.ratios.vert_c) > 1)
+ spl_scratch->scl_data.taps.v_taps_c = spl_min(spl_fixpt_ceil(spl_fixpt_mul_int(
+ spl_scratch->scl_data.ratios.vert_c, 2)), 8);
+ else
+ spl_scratch->scl_data.taps.v_taps_c = 4;
+ } else
+ spl_scratch->scl_data.taps.v_taps_c = in_taps->v_taps_c;
+ if (in_taps->h_taps_c == 0) {
+ if (spl_fixpt_ceil(spl_scratch->scl_data.ratios.horz_c) > 1)
+ spl_scratch->scl_data.taps.h_taps_c = spl_min(2 * spl_fixpt_ceil(
+ spl_scratch->scl_data.ratios.horz_c), 8);
+ else
+ spl_scratch->scl_data.taps.h_taps_c = 4;
+ } else if ((in_taps->h_taps_c % 2) != 0 && in_taps->h_taps_c != 1)
+ /* Only 1 and even h_taps_c are supported by hw */
+ spl_scratch->scl_data.taps.h_taps_c = in_taps->h_taps_c - 1;
else
- spl_out->scl_data.taps.h_taps_c = 4;
- } else if ((in_taps->h_taps_c % 2) != 0 && in_taps->h_taps_c != 1)
- /* Only 1 and even h_taps_c are supported by hw */
- spl_out->scl_data.taps.h_taps_c = in_taps->h_taps_c - 1;
- else
- spl_out->scl_data.taps.h_taps_c = in_taps->h_taps_c;
+ spl_scratch->scl_data.taps.h_taps_c = in_taps->h_taps_c;
+ } else {
+ if (spl_is_yuv420(spl_in->basic_in.format)) {
+ spl_scratch->scl_data.taps.h_taps = 6;
+ spl_scratch->scl_data.taps.v_taps = 6;
+ spl_scratch->scl_data.taps.h_taps_c = 4;
+ spl_scratch->scl_data.taps.v_taps_c = 4;
+ } else { /* RGB */
+ spl_scratch->scl_data.taps.h_taps = 6;
+ spl_scratch->scl_data.taps.v_taps = 6;
+ spl_scratch->scl_data.taps.h_taps_c = 6;
+ spl_scratch->scl_data.taps.v_taps_c = 6;
+ }
+ }
/*Ensure we can support the requested number of vtaps*/
- min_taps_y = dc_fixpt_ceil(spl_out->scl_data.ratios.vert);
- min_taps_c = dc_fixpt_ceil(spl_out->scl_data.ratios.vert_c);
+ min_taps_y = spl_fixpt_ceil(spl_scratch->scl_data.ratios.vert);
+ min_taps_c = spl_fixpt_ceil(spl_scratch->scl_data.ratios.vert_c);
/* Use LB_MEMORY_CONFIG_3 for 4:2:0 */
if ((spl_in->basic_in.format == SPL_PIXEL_FORMAT_420BPP8)
@@ -771,16 +954,16 @@ static bool spl_get_optimal_number_of_taps(
else
lb_config = LB_MEMORY_CONFIG_0;
// Determine max vtap support by calculating how much line buffer can fit
- spl_in->funcs->spl_calc_lb_num_partitions(spl_in->basic_out.alpha_en, &spl_out->scl_data,
+ spl_in->funcs->spl_calc_lb_num_partitions(spl_in->basic_out.alpha_en, &spl_scratch->scl_data,
lb_config, &num_part_y, &num_part_c);
/* MAX_V_TAPS = MIN (NUM_LINES - MAX(CEILING(V_RATIO,1)-2, 0), 8) */
- if (dc_fixpt_ceil(spl_out->scl_data.ratios.vert) > 2)
- max_taps_y = num_part_y - (dc_fixpt_ceil(spl_out->scl_data.ratios.vert) - 2);
+ if (spl_fixpt_ceil(spl_scratch->scl_data.ratios.vert) > 2)
+ max_taps_y = num_part_y - (spl_fixpt_ceil(spl_scratch->scl_data.ratios.vert) - 2);
else
max_taps_y = num_part_y;
- if (dc_fixpt_ceil(spl_out->scl_data.ratios.vert_c) > 2)
- max_taps_c = num_part_c - (dc_fixpt_ceil(spl_out->scl_data.ratios.vert_c) - 2);
+ if (spl_fixpt_ceil(spl_scratch->scl_data.ratios.vert_c) > 2)
+ max_taps_c = num_part_c - (spl_fixpt_ceil(spl_scratch->scl_data.ratios.vert_c) - 2);
else
max_taps_c = num_part_c;
@@ -789,48 +972,108 @@ static bool spl_get_optimal_number_of_taps(
else if (max_taps_c < min_taps_c)
return false;
- if (spl_out->scl_data.taps.v_taps > max_taps_y)
- spl_out->scl_data.taps.v_taps = max_taps_y;
-
- if (spl_out->scl_data.taps.v_taps_c > max_taps_c)
- spl_out->scl_data.taps.v_taps_c = max_taps_c;
- if (spl_in->prefer_easf) {
- // EASF can be enabled only for taps 3,4,6
- // If optimal no of taps is 5, then set it to 4
- // If optimal no of taps is 7 or 8, then set it to 6
- if (spl_out->scl_data.taps.v_taps == 5)
- spl_out->scl_data.taps.v_taps = 4;
- if (spl_out->scl_data.taps.v_taps == 7 || spl_out->scl_data.taps.v_taps == 8)
- spl_out->scl_data.taps.v_taps = 6;
-
- if (spl_out->scl_data.taps.v_taps_c == 5)
- spl_out->scl_data.taps.v_taps_c = 4;
- if (spl_out->scl_data.taps.v_taps_c == 7 || spl_out->scl_data.taps.v_taps_c == 8)
- spl_out->scl_data.taps.v_taps_c = 6;
-
- if (spl_out->scl_data.taps.h_taps == 5)
- spl_out->scl_data.taps.h_taps = 4;
- if (spl_out->scl_data.taps.h_taps == 7 || spl_out->scl_data.taps.h_taps == 8)
- spl_out->scl_data.taps.h_taps = 6;
-
- if (spl_out->scl_data.taps.h_taps_c == 5)
- spl_out->scl_data.taps.h_taps_c = 4;
- if (spl_out->scl_data.taps.h_taps_c == 7 || spl_out->scl_data.taps.h_taps_c == 8)
- spl_out->scl_data.taps.h_taps_c = 6;
+ if (spl_scratch->scl_data.taps.v_taps > max_taps_y)
+ spl_scratch->scl_data.taps.v_taps = max_taps_y;
+
+ if (spl_scratch->scl_data.taps.v_taps_c > max_taps_c)
+ spl_scratch->scl_data.taps.v_taps_c = max_taps_c;
+ if (!skip_easf) {
+ /*
+ * RGB ( L + NL ) and Linear HDR support 6x6, 6x4, 6x3, 4x4, 4x3
+ * NL YUV420 only supports 6x6, 6x4 for Y and 4x4 for UV
+ *
+ * If LB does not support 3, 4, or 6 taps, then disable EASF_V
+ * and only enable EASF_H. So for RGB, support 6x2, 4x2
+ * and for NL YUV420, support 6x2 for Y and 4x2 for UV
+ *
+ * All other cases, have to disable EASF_V and EASF_H
+ *
+ * If optimal no of taps is 5, then set it to 4
+ * If optimal no of taps is 7 or 8, then fine since max tap is 6
+ *
+ */
+ if (spl_scratch->scl_data.taps.v_taps == 5)
+ spl_scratch->scl_data.taps.v_taps = 4;
+
+ if (spl_scratch->scl_data.taps.v_taps_c == 5)
+ spl_scratch->scl_data.taps.v_taps_c = 4;
+
+ if (spl_scratch->scl_data.taps.h_taps == 5)
+ spl_scratch->scl_data.taps.h_taps = 4;
+
+ if (spl_scratch->scl_data.taps.h_taps_c == 5)
+ spl_scratch->scl_data.taps.h_taps_c = 4;
+
+ if (spl_is_yuv420(spl_in->basic_in.format)) {
+ if ((spl_scratch->scl_data.taps.h_taps <= 4) ||
+ (spl_scratch->scl_data.taps.h_taps_c <= 3)) {
+ *enable_easf_v = false;
+ *enable_easf_h = false;
+ } else if ((spl_scratch->scl_data.taps.v_taps <= 3) ||
+ (spl_scratch->scl_data.taps.v_taps_c <= 3)) {
+ *enable_easf_v = false;
+ *enable_easf_h = true;
+ } else {
+ *enable_easf_v = true;
+ *enable_easf_h = true;
+ }
+ SPL_ASSERT((spl_scratch->scl_data.taps.v_taps > 1) &&
+ (spl_scratch->scl_data.taps.v_taps_c > 1));
+ } else { /* RGB */
+ if (spl_scratch->scl_data.taps.h_taps <= 3) {
+ *enable_easf_v = false;
+ *enable_easf_h = false;
+ } else if (spl_scratch->scl_data.taps.v_taps < 3) {
+ *enable_easf_v = false;
+ *enable_easf_h = true;
+ } else {
+ *enable_easf_v = true;
+ *enable_easf_h = true;
+ }
+ SPL_ASSERT(spl_scratch->scl_data.taps.v_taps > 1);
+ }
+ } else {
+ *enable_easf_v = false;
+ *enable_easf_h = false;
} // end of if prefer_easf
- if (!spl_in->basic_out.always_scale) {
- if (IDENTITY_RATIO(spl_out->scl_data.ratios.horz))
- spl_out->scl_data.taps.h_taps = 1;
- if (IDENTITY_RATIO(spl_out->scl_data.ratios.vert))
- spl_out->scl_data.taps.v_taps = 1;
- if (IDENTITY_RATIO(spl_out->scl_data.ratios.horz_c))
- spl_out->scl_data.taps.h_taps_c = 1;
- if (IDENTITY_RATIO(spl_out->scl_data.ratios.vert_c))
- spl_out->scl_data.taps.v_taps_c = 1;
+
+ /* Sharpener requires scaler to be enabled, including for 1:1
+ * Check if ISHARP can be enabled
+ * If ISHARP is not enabled, for 1:1, set taps to 1 and disable
+ * EASF
+ * For case of 2:1 YUV where chroma is 1:1, set taps to 1 if
+ * EASF is not enabled
+ */
+
+ *enable_isharp = spl_get_isharp_en(spl_in, spl_scratch);
+ if (!*enable_isharp && !spl_in->basic_out.always_scale) {
+ if ((IDENTITY_RATIO(spl_scratch->scl_data.ratios.horz)) &&
+ (IDENTITY_RATIO(spl_scratch->scl_data.ratios.vert))) {
+ spl_scratch->scl_data.taps.h_taps = 1;
+ spl_scratch->scl_data.taps.v_taps = 1;
+
+ if (IDENTITY_RATIO(spl_scratch->scl_data.ratios.horz_c))
+ spl_scratch->scl_data.taps.h_taps_c = 1;
+
+ if (IDENTITY_RATIO(spl_scratch->scl_data.ratios.vert_c))
+ spl_scratch->scl_data.taps.v_taps_c = 1;
+
+ *enable_easf_v = false;
+ *enable_easf_h = false;
+ } else {
+ if ((!*enable_easf_h) &&
+ (IDENTITY_RATIO(spl_scratch->scl_data.ratios.horz_c)))
+ spl_scratch->scl_data.taps.h_taps_c = 1;
+
+ if ((!*enable_easf_v) &&
+ (IDENTITY_RATIO(spl_scratch->scl_data.ratios.vert_c)))
+ spl_scratch->scl_data.taps.v_taps_c = 1;
+ }
}
return true;
}
+
static void spl_set_black_color_data(enum spl_pixel_format format,
struct scl_black_color *scl_black_color)
{
@@ -848,38 +1091,38 @@ static void spl_set_black_color_data(enum spl_pixel_format format,
static void spl_set_manual_ratio_init_data(struct dscl_prog_data *dscl_prog_data,
const struct spl_scaler_data *scl_data)
{
- struct fixed31_32 bot;
+ struct spl_fixed31_32 bot;
- dscl_prog_data->ratios.h_scale_ratio = dc_fixpt_u3d19(scl_data->ratios.horz) << 5;
- dscl_prog_data->ratios.v_scale_ratio = dc_fixpt_u3d19(scl_data->ratios.vert) << 5;
- dscl_prog_data->ratios.h_scale_ratio_c = dc_fixpt_u3d19(scl_data->ratios.horz_c) << 5;
- dscl_prog_data->ratios.v_scale_ratio_c = dc_fixpt_u3d19(scl_data->ratios.vert_c) << 5;
+ dscl_prog_data->ratios.h_scale_ratio = spl_fixpt_u3d19(scl_data->ratios.horz) << 5;
+ dscl_prog_data->ratios.v_scale_ratio = spl_fixpt_u3d19(scl_data->ratios.vert) << 5;
+ dscl_prog_data->ratios.h_scale_ratio_c = spl_fixpt_u3d19(scl_data->ratios.horz_c) << 5;
+ dscl_prog_data->ratios.v_scale_ratio_c = spl_fixpt_u3d19(scl_data->ratios.vert_c) << 5;
/*
* 0.24 format for fraction, first five bits zeroed
*/
dscl_prog_data->init.h_filter_init_frac =
- dc_fixpt_u0d19(scl_data->inits.h) << 5;
+ spl_fixpt_u0d19(scl_data->inits.h) << 5;
dscl_prog_data->init.h_filter_init_int =
- dc_fixpt_floor(scl_data->inits.h);
+ spl_fixpt_floor(scl_data->inits.h);
dscl_prog_data->init.h_filter_init_frac_c =
- dc_fixpt_u0d19(scl_data->inits.h_c) << 5;
+ spl_fixpt_u0d19(scl_data->inits.h_c) << 5;
dscl_prog_data->init.h_filter_init_int_c =
- dc_fixpt_floor(scl_data->inits.h_c);
+ spl_fixpt_floor(scl_data->inits.h_c);
dscl_prog_data->init.v_filter_init_frac =
- dc_fixpt_u0d19(scl_data->inits.v) << 5;
+ spl_fixpt_u0d19(scl_data->inits.v) << 5;
dscl_prog_data->init.v_filter_init_int =
- dc_fixpt_floor(scl_data->inits.v);
+ spl_fixpt_floor(scl_data->inits.v);
dscl_prog_data->init.v_filter_init_frac_c =
- dc_fixpt_u0d19(scl_data->inits.v_c) << 5;
+ spl_fixpt_u0d19(scl_data->inits.v_c) << 5;
dscl_prog_data->init.v_filter_init_int_c =
- dc_fixpt_floor(scl_data->inits.v_c);
-
- bot = dc_fixpt_add(scl_data->inits.v, scl_data->ratios.vert);
- dscl_prog_data->init.v_filter_init_bot_frac = dc_fixpt_u0d19(bot) << 5;
- dscl_prog_data->init.v_filter_init_bot_int = dc_fixpt_floor(bot);
- bot = dc_fixpt_add(scl_data->inits.v_c, scl_data->ratios.vert_c);
- dscl_prog_data->init.v_filter_init_bot_frac_c = dc_fixpt_u0d19(bot) << 5;
- dscl_prog_data->init.v_filter_init_bot_int_c = dc_fixpt_floor(bot);
+ spl_fixpt_floor(scl_data->inits.v_c);
+
+ bot = spl_fixpt_add(scl_data->inits.v, scl_data->ratios.vert);
+ dscl_prog_data->init.v_filter_init_bot_frac = spl_fixpt_u0d19(bot) << 5;
+ dscl_prog_data->init.v_filter_init_bot_int = spl_fixpt_floor(bot);
+ bot = spl_fixpt_add(scl_data->inits.v_c, scl_data->ratios.vert_c);
+ dscl_prog_data->init.v_filter_init_bot_frac_c = spl_fixpt_u0d19(bot) << 5;
+ dscl_prog_data->init.v_filter_init_bot_int_c = spl_fixpt_floor(bot);
}
static void spl_set_taps_data(struct dscl_prog_data *dscl_prog_data,
@@ -890,77 +1133,28 @@ static void spl_set_taps_data(struct dscl_prog_data *dscl_prog_data,
dscl_prog_data->taps.v_taps_c = scl_data->taps.v_taps_c - 1;
dscl_prog_data->taps.h_taps_c = scl_data->taps.h_taps_c - 1;
}
-static const uint16_t *spl_dscl_get_filter_coeffs_64p(int taps, struct fixed31_32 ratio)
-{
- if (taps == 8)
- return spl_get_filter_8tap_64p(ratio);
- else if (taps == 7)
- return spl_get_filter_7tap_64p(ratio);
- else if (taps == 6)
- return spl_get_filter_6tap_64p(ratio);
- else if (taps == 5)
- return spl_get_filter_5tap_64p(ratio);
- else if (taps == 4)
- return spl_get_filter_4tap_64p(ratio);
- else if (taps == 3)
- return spl_get_filter_3tap_64p(ratio);
- else if (taps == 2)
- return spl_get_filter_2tap_64p();
- else if (taps == 1)
- return NULL;
- else {
- /* should never happen, bug */
- return NULL;
- }
-}
-static void spl_set_filters_data(struct dscl_prog_data *dscl_prog_data,
- const struct spl_scaler_data *data)
-{
- dscl_prog_data->filter_h = spl_dscl_get_filter_coeffs_64p(
- data->taps.h_taps, data->ratios.horz);
- dscl_prog_data->filter_v = spl_dscl_get_filter_coeffs_64p(
- data->taps.v_taps, data->ratios.vert);
- dscl_prog_data->filter_h_c = spl_dscl_get_filter_coeffs_64p(
- data->taps.h_taps_c, data->ratios.horz_c);
- dscl_prog_data->filter_v_c = spl_dscl_get_filter_coeffs_64p(
- data->taps.v_taps_c, data->ratios.vert_c);
-}
-
-static const uint16_t *spl_dscl_get_blur_scale_coeffs_64p(int taps)
-{
- if ((taps == 3) || (taps == 4) || (taps == 6))
- return spl_get_filter_isharp_bs_4tap_64p();
- else {
- /* should never happen, bug */
- return NULL;
- }
-}
-static void spl_set_blur_scale_data(struct dscl_prog_data *dscl_prog_data,
- const struct spl_scaler_data *data)
-{
- dscl_prog_data->filter_blur_scale_h = spl_dscl_get_blur_scale_coeffs_64p(
- data->taps.h_taps);
- dscl_prog_data->filter_blur_scale_v = spl_dscl_get_blur_scale_coeffs_64p(
- data->taps.v_taps);
-}
/* Populate dscl prog data structure from scaler data calculated by SPL */
-static void spl_set_dscl_prog_data(struct spl_in *spl_in, struct spl_out *spl_out)
+static void spl_set_dscl_prog_data(struct spl_in *spl_in, struct spl_scratch *spl_scratch,
+ struct spl_out *spl_out, bool enable_easf_v, bool enable_easf_h, bool enable_isharp)
{
struct dscl_prog_data *dscl_prog_data = spl_out->dscl_prog_data;
- const struct spl_scaler_data *data = &spl_out->scl_data;
+ const struct spl_scaler_data *data = &spl_scratch->scl_data;
struct scl_black_color *scl_black_color = &dscl_prog_data->scl_black_color;
+ bool enable_easf = enable_easf_v || enable_easf_h;
+
// Set values for recout
- dscl_prog_data->recout = spl_out->scl_data.recout;
+ dscl_prog_data->recout = spl_scratch->scl_data.recout;
// Set values for MPC Size
- dscl_prog_data->mpc_size.width = spl_out->scl_data.h_active;
- dscl_prog_data->mpc_size.height = spl_out->scl_data.v_active;
+ dscl_prog_data->mpc_size.width = spl_scratch->scl_data.h_active;
+ dscl_prog_data->mpc_size.height = spl_scratch->scl_data.v_active;
// SCL_MODE - Set SCL_MODE data
- dscl_prog_data->dscl_mode = spl_get_dscl_mode(spl_in, data);
+ dscl_prog_data->dscl_mode = spl_get_dscl_mode(spl_in, data, enable_isharp,
+ enable_easf);
// SCL_BLACK_COLOR
spl_set_black_color_data(spl_in->basic_in.format, scl_black_color);
@@ -971,103 +1165,140 @@ static void spl_set_dscl_prog_data(struct spl_in *spl_in, struct spl_out *spl_ou
// Set HTaps/VTaps
spl_set_taps_data(dscl_prog_data, data);
// Set viewport
- dscl_prog_data->viewport = spl_out->scl_data.viewport;
+ dscl_prog_data->viewport = spl_scratch->scl_data.viewport;
// Set viewport_c
- dscl_prog_data->viewport_c = spl_out->scl_data.viewport_c;
+ dscl_prog_data->viewport_c = spl_scratch->scl_data.viewport_c;
// Set filters data
- spl_set_filters_data(dscl_prog_data, data);
+ spl_set_filters_data(dscl_prog_data, data, enable_easf_v, enable_easf_h);
}
-/* Enable EASF ?*/
-static bool enable_easf(int scale_ratio, int taps,
- enum linear_light_scaling lls_pref, bool prefer_easf)
+
+/* Calculate C0-C3 coefficients based on HDR_mult */
+static void spl_calculate_c0_c3_hdr(struct dscl_prog_data *dscl_prog_data, uint32_t sdr_white_level_nits)
{
- // Is downscaling > 6:1 ?
- if (scale_ratio > 6) {
- // END - No EASF support for downscaling > 6:1
- return false;
- }
- // Is upscaling or downscaling up to 2:1?
- if (scale_ratio <= 2) {
- // Is linear scaling or EASF preferred?
- if (lls_pref == LLS_PREF_YES || prefer_easf) {
- // LB support taps 3, 4, 6
- if (taps == 3 || taps == 4 || taps == 6) {
- // END - EASF supported
- return true;
- }
- }
- }
- // END - EASF not supported
- return false;
+ struct spl_fixed31_32 hdr_mult, c0_mult, c1_mult, c2_mult;
+ struct spl_fixed31_32 c0_calc, c1_calc, c2_calc;
+ struct spl_custom_float_format fmt;
+ uint32_t hdr_multx100_int;
+
+ if ((sdr_white_level_nits >= 80) && (sdr_white_level_nits <= 480))
+ hdr_multx100_int = sdr_white_level_nits * 100 / 80;
+ else
+ hdr_multx100_int = 100; /* default for 80 nits otherwise */
+
+ hdr_mult = spl_fixpt_from_fraction((long long)hdr_multx100_int, 100LL);
+ c0_mult = spl_fixpt_from_fraction(2126LL, 10000LL);
+ c1_mult = spl_fixpt_from_fraction(7152LL, 10000LL);
+ c2_mult = spl_fixpt_from_fraction(722LL, 10000LL);
+
+ c0_calc = spl_fixpt_mul(hdr_mult, spl_fixpt_mul(c0_mult, spl_fixpt_from_fraction(
+ 16384LL, 125LL)));
+ c1_calc = spl_fixpt_mul(hdr_mult, spl_fixpt_mul(c1_mult, spl_fixpt_from_fraction(
+ 16384LL, 125LL)));
+ c2_calc = spl_fixpt_mul(hdr_mult, spl_fixpt_mul(c2_mult, spl_fixpt_from_fraction(
+ 16384LL, 125LL)));
+
+ fmt.exponenta_bits = 5;
+ fmt.mantissa_bits = 10;
+ fmt.sign = true;
+
+ // fp1.5.10, C0 coefficient (LN_rec709: HDR_MULT * 0.212600 * 2^14/125)
+ spl_convert_to_custom_float_format(c0_calc, &fmt, &dscl_prog_data->easf_matrix_c0);
+ // fp1.5.10, C1 coefficient (LN_rec709: HDR_MULT * 0.715200 * 2^14/125)
+ spl_convert_to_custom_float_format(c1_calc, &fmt, &dscl_prog_data->easf_matrix_c1);
+ // fp1.5.10, C2 coefficient (LN_rec709: HDR_MULT * 0.072200 * 2^14/125)
+ spl_convert_to_custom_float_format(c2_calc, &fmt, &dscl_prog_data->easf_matrix_c2);
+ dscl_prog_data->easf_matrix_c3 = 0x0; // fp1.5.10, C3 coefficient
}
+
/* Set EASF data */
-static void spl_set_easf_data(struct dscl_prog_data *dscl_prog_data,
- bool enable_easf_v, bool enable_easf_h, enum linear_light_scaling lls_pref,
- enum spl_pixel_format format)
+static void spl_set_easf_data(struct spl_scratch *spl_scratch, struct spl_out *spl_out, bool enable_easf_v,
+ bool enable_easf_h, enum linear_light_scaling lls_pref,
+ enum spl_pixel_format format, enum system_setup setup,
+ uint32_t sdr_white_level_nits)
{
- if (spl_is_yuv420(format)) /* TODO: 0 = RGB, 1 = YUV */
- dscl_prog_data->easf_matrix_mode = 1;
- else
- dscl_prog_data->easf_matrix_mode = 0;
-
+ struct dscl_prog_data *dscl_prog_data = spl_out->dscl_prog_data;
if (enable_easf_v) {
dscl_prog_data->easf_v_en = true;
dscl_prog_data->easf_v_ring = 0;
- dscl_prog_data->easf_v_sharp_factor = 1;
+ dscl_prog_data->easf_v_sharp_factor = 0;
dscl_prog_data->easf_v_bf1_en = 1; // 1-bit, BF1 calculation enable, 0=disable, 1=enable
dscl_prog_data->easf_v_bf2_mode = 0xF; // 4-bit, BF2 calculation mode
- dscl_prog_data->easf_v_bf3_mode = 2; // 2-bit, BF3 chroma mode correction calculation mode
- dscl_prog_data->easf_v_bf2_flat1_gain = 4; // U1.3, BF2 Flat1 Gain control
- dscl_prog_data->easf_v_bf2_flat2_gain = 8; // U4.0, BF2 Flat2 Gain control
- dscl_prog_data->easf_v_bf2_roc_gain = 4; // U2.2, Rate Of Change control
+ /* 2-bit, BF3 chroma mode correction calculation mode */
+ dscl_prog_data->easf_v_bf3_mode = spl_get_v_bf3_mode(
+ spl_scratch->scl_data.recip_ratios.vert);
+ /* FP1.5.10 [ minCoef ]*/
dscl_prog_data->easf_v_ringest_3tap_dntilt_uptilt =
- 0x9F00;// FP1.5.10 [minCoef] (-0.036109167214271)
+ spl_get_3tap_dntilt_uptilt_offset(spl_scratch->scl_data.taps.v_taps,
+ spl_scratch->scl_data.recip_ratios.vert);
+ /* FP1.5.10 [ upTiltMaxVal ]*/
dscl_prog_data->easf_v_ringest_3tap_uptilt_max =
- 0x24FE; // FP1.5.10 [upTiltMaxVal] ( 0.904556445553545)
+ spl_get_3tap_uptilt_maxval(spl_scratch->scl_data.taps.v_taps,
+ spl_scratch->scl_data.recip_ratios.vert);
+ /* FP1.5.10 [ dnTiltSlope ]*/
dscl_prog_data->easf_v_ringest_3tap_dntilt_slope =
- 0x3940; // FP1.5.10 [dnTiltSlope] ( 0.910488988173371)
+ spl_get_3tap_dntilt_slope(spl_scratch->scl_data.taps.v_taps,
+ spl_scratch->scl_data.recip_ratios.vert);
+ /* FP1.5.10 [ upTilt1Slope ]*/
dscl_prog_data->easf_v_ringest_3tap_uptilt1_slope =
- 0x359C; // FP1.5.10 [upTilt1Slope] ( 0.125620179040899)
+ spl_get_3tap_uptilt1_slope(spl_scratch->scl_data.taps.v_taps,
+ spl_scratch->scl_data.recip_ratios.vert);
+ /* FP1.5.10 [ upTilt2Slope ]*/
dscl_prog_data->easf_v_ringest_3tap_uptilt2_slope =
- 0x359C; // FP1.5.10 [upTilt2Slope] ( 0.006786817723568)
+ spl_get_3tap_uptilt2_slope(spl_scratch->scl_data.taps.v_taps,
+ spl_scratch->scl_data.recip_ratios.vert);
+ /* FP1.5.10 [ upTilt2Offset ]*/
dscl_prog_data->easf_v_ringest_3tap_uptilt2_offset =
- 0x9F00; // FP1.5.10 [upTilt2Offset] (-0.006139059716651)
+ spl_get_3tap_uptilt2_offset(spl_scratch->scl_data.taps.v_taps,
+ spl_scratch->scl_data.recip_ratios.vert);
+ /* FP1.5.10; (2.0) Ring reducer gain for 4 or 6-tap mode [H_REDUCER_GAIN4] */
dscl_prog_data->easf_v_ringest_eventap_reduceg1 =
- 0x4000; // FP1.5.10; (2.0) Ring reducer gain for 4 or 6-tap mode [H_REDUCER_GAIN4]
+ spl_get_reducer_gain4(spl_scratch->scl_data.taps.v_taps,
+ spl_scratch->scl_data.recip_ratios.vert);
+ /* FP1.5.10; (2.5) Ring reducer gain for 6-tap mode [V_REDUCER_GAIN6] */
dscl_prog_data->easf_v_ringest_eventap_reduceg2 =
- 0x4100; // FP1.5.10; (2.5) Ring reducer gain for 6-tap mode [V_REDUCER_GAIN6]
+ spl_get_reducer_gain6(spl_scratch->scl_data.taps.v_taps,
+ spl_scratch->scl_data.recip_ratios.vert);
+ /* FP1.5.10; (-0.135742) Ring gain for 6-tap set to -139/1024 */
dscl_prog_data->easf_v_ringest_eventap_gain1 =
- 0xB058; // FP1.5.10; (-0.135742) Ring gain for 6-tap set to -139/1024
+ spl_get_gainRing4(spl_scratch->scl_data.taps.v_taps,
+ spl_scratch->scl_data.recip_ratios.vert);
+ /* FP1.5.10; (-0.024414) Ring gain for 6-tap set to -25/1024 */
dscl_prog_data->easf_v_ringest_eventap_gain2 =
- 0xA640; // FP1.5.10; (-0.024414) Ring gain for 6-tap set to -25/1024
+ spl_get_gainRing6(spl_scratch->scl_data.taps.v_taps,
+ spl_scratch->scl_data.recip_ratios.vert);
dscl_prog_data->easf_v_bf_maxa = 63; //Vertical Max BF value A in U0.6 format.Selected if V_FCNTL == 0
dscl_prog_data->easf_v_bf_maxb = 63; //Vertical Max BF value A in U0.6 format.Selected if V_FCNTL == 1
dscl_prog_data->easf_v_bf_mina = 0; //Vertical Min BF value A in U0.6 format.Selected if V_FCNTL == 0
dscl_prog_data->easf_v_bf_minb = 0; //Vertical Min BF value A in U0.6 format.Selected if V_FCNTL == 1
- dscl_prog_data->easf_v_bf1_pwl_in_seg0 = -512; // S0.10, BF1 PWL Segment 0
- dscl_prog_data->easf_v_bf1_pwl_base_seg0 = 0; // U0.6, BF1 Base PWL Segment 0
- dscl_prog_data->easf_v_bf1_pwl_slope_seg0 = 3; // S7.3, BF1 Slope PWL Segment 0
- dscl_prog_data->easf_v_bf1_pwl_in_seg1 = -20; // S0.10, BF1 PWL Segment 1
- dscl_prog_data->easf_v_bf1_pwl_base_seg1 = 12; // U0.6, BF1 Base PWL Segment 1
- dscl_prog_data->easf_v_bf1_pwl_slope_seg1 = 326; // S7.3, BF1 Slope PWL Segment 1
- dscl_prog_data->easf_v_bf1_pwl_in_seg2 = 0; // S0.10, BF1 PWL Segment 2
- dscl_prog_data->easf_v_bf1_pwl_base_seg2 = 63; // U0.6, BF1 Base PWL Segment 2
- dscl_prog_data->easf_v_bf1_pwl_slope_seg2 = 0; // S7.3, BF1 Slope PWL Segment 2
- dscl_prog_data->easf_v_bf1_pwl_in_seg3 = 16; // S0.10, BF1 PWL Segment 3
- dscl_prog_data->easf_v_bf1_pwl_base_seg3 = 63; // U0.6, BF1 Base PWL Segment 3
- dscl_prog_data->easf_v_bf1_pwl_slope_seg3 = -56; // S7.3, BF1 Slope PWL Segment 3
- dscl_prog_data->easf_v_bf1_pwl_in_seg4 = 32; // S0.10, BF1 PWL Segment 4
- dscl_prog_data->easf_v_bf1_pwl_base_seg4 = 56; // U0.6, BF1 Base PWL Segment 4
- dscl_prog_data->easf_v_bf1_pwl_slope_seg4 = -48; // S7.3, BF1 Slope PWL Segment 4
- dscl_prog_data->easf_v_bf1_pwl_in_seg5 = 48; // S0.10, BF1 PWL Segment 5
- dscl_prog_data->easf_v_bf1_pwl_base_seg5 = 50; // U0.6, BF1 Base PWL Segment 5
- dscl_prog_data->easf_v_bf1_pwl_slope_seg5 = -240; // S7.3, BF1 Slope PWL Segment 5
- dscl_prog_data->easf_v_bf1_pwl_in_seg6 = 64; // S0.10, BF1 PWL Segment 6
- dscl_prog_data->easf_v_bf1_pwl_base_seg6 = 20; // U0.6, BF1 Base PWL Segment 6
- dscl_prog_data->easf_v_bf1_pwl_slope_seg6 = -160; // S7.3, BF1 Slope PWL Segment 6
- dscl_prog_data->easf_v_bf1_pwl_in_seg7 = 80; // S0.10, BF1 PWL Segment 7
- dscl_prog_data->easf_v_bf1_pwl_base_seg7 = 0; // U0.6, BF1 Base PWL Segment 7
if (lls_pref == LLS_PREF_YES) {
+ dscl_prog_data->easf_v_bf2_flat1_gain = 4; // U1.3, BF2 Flat1 Gain control
+ dscl_prog_data->easf_v_bf2_flat2_gain = 8; // U4.0, BF2 Flat2 Gain control
+ dscl_prog_data->easf_v_bf2_roc_gain = 4; // U2.2, Rate Of Change control
+
+ dscl_prog_data->easf_v_bf1_pwl_in_seg0 = 0x600; // S0.10, BF1 PWL Segment 0 = -512
+ dscl_prog_data->easf_v_bf1_pwl_base_seg0 = 0; // U0.6, BF1 Base PWL Segment 0
+ dscl_prog_data->easf_v_bf1_pwl_slope_seg0 = 3; // S7.3, BF1 Slope PWL Segment 0
+ dscl_prog_data->easf_v_bf1_pwl_in_seg1 = 0x7EC; // S0.10, BF1 PWL Segment 1 = -20
+ dscl_prog_data->easf_v_bf1_pwl_base_seg1 = 12; // U0.6, BF1 Base PWL Segment 1
+ dscl_prog_data->easf_v_bf1_pwl_slope_seg1 = 326; // S7.3, BF1 Slope PWL Segment 1
+ dscl_prog_data->easf_v_bf1_pwl_in_seg2 = 0; // S0.10, BF1 PWL Segment 2
+ dscl_prog_data->easf_v_bf1_pwl_base_seg2 = 63; // U0.6, BF1 Base PWL Segment 2
+ dscl_prog_data->easf_v_bf1_pwl_slope_seg2 = 0; // S7.3, BF1 Slope PWL Segment 2
+ dscl_prog_data->easf_v_bf1_pwl_in_seg3 = 16; // S0.10, BF1 PWL Segment 3
+ dscl_prog_data->easf_v_bf1_pwl_base_seg3 = 63; // U0.6, BF1 Base PWL Segment 3
+ dscl_prog_data->easf_v_bf1_pwl_slope_seg3 = 0x7C8; // S7.3, BF1 Slope PWL Segment 3 = -56
+ dscl_prog_data->easf_v_bf1_pwl_in_seg4 = 32; // S0.10, BF1 PWL Segment 4
+ dscl_prog_data->easf_v_bf1_pwl_base_seg4 = 56; // U0.6, BF1 Base PWL Segment 4
+ dscl_prog_data->easf_v_bf1_pwl_slope_seg4 = 0x7D0; // S7.3, BF1 Slope PWL Segment 4 = -48
+ dscl_prog_data->easf_v_bf1_pwl_in_seg5 = 48; // S0.10, BF1 PWL Segment 5
+ dscl_prog_data->easf_v_bf1_pwl_base_seg5 = 50; // U0.6, BF1 Base PWL Segment 5
+ dscl_prog_data->easf_v_bf1_pwl_slope_seg5 = 0x710; // S7.3, BF1 Slope PWL Segment 5 = -240
+ dscl_prog_data->easf_v_bf1_pwl_in_seg6 = 64; // S0.10, BF1 PWL Segment 6
+ dscl_prog_data->easf_v_bf1_pwl_base_seg6 = 20; // U0.6, BF1 Base PWL Segment 6
+ dscl_prog_data->easf_v_bf1_pwl_slope_seg6 = 0x760; // S7.3, BF1 Slope PWL Segment 6 = -160
+ dscl_prog_data->easf_v_bf1_pwl_in_seg7 = 80; // S0.10, BF1 PWL Segment 7
+ dscl_prog_data->easf_v_bf1_pwl_base_seg7 = 0; // U0.6, BF1 Base PWL Segment 7
+
dscl_prog_data->easf_v_bf3_pwl_in_set0 = 0x000; // FP0.6.6, BF3 Input value PWL Segment 0
dscl_prog_data->easf_v_bf3_pwl_base_set0 = 63; // S0.6, BF3 Base PWL Segment 0
dscl_prog_data->easf_v_bf3_pwl_slope_set0 = 0x12C5; // FP1.6.6, BF3 Slope PWL Segment 0
@@ -1088,13 +1319,41 @@ static void spl_set_easf_data(struct dscl_prog_data *dscl_prog_data,
0x136B; // FP1.6.6, BF3 Slope PWL Segment 3
dscl_prog_data->easf_v_bf3_pwl_in_set4 =
0x0C37; // FP0.6.6, BF3 Input value PWL Segment 4 (0.125 * 125^3)
- dscl_prog_data->easf_v_bf3_pwl_base_set4 = -50; // S0.6, BF3 Base PWL Segment 4
+ dscl_prog_data->easf_v_bf3_pwl_base_set4 = 0x4E; // S0.6, BF3 Base PWL Segment 4 = -50
dscl_prog_data->easf_v_bf3_pwl_slope_set4 =
0x1200; // FP1.6.6, BF3 Slope PWL Segment 4
dscl_prog_data->easf_v_bf3_pwl_in_set5 =
0x0CF7; // FP0.6.6, BF3 Input value PWL Segment 5 (1.0 * 125^3)
- dscl_prog_data->easf_v_bf3_pwl_base_set5 = -63; // S0.6, BF3 Base PWL Segment 5
+ dscl_prog_data->easf_v_bf3_pwl_base_set5 = 0x41; // S0.6, BF3 Base PWL Segment 5 = -63
} else {
+ dscl_prog_data->easf_v_bf2_flat1_gain = 13; // U1.3, BF2 Flat1 Gain control
+ dscl_prog_data->easf_v_bf2_flat2_gain = 15; // U4.0, BF2 Flat2 Gain control
+ dscl_prog_data->easf_v_bf2_roc_gain = 14; // U2.2, Rate Of Change control
+
+ dscl_prog_data->easf_v_bf1_pwl_in_seg0 = 0x440; // S0.10, BF1 PWL Segment 0 = -960
+ dscl_prog_data->easf_v_bf1_pwl_base_seg0 = 0; // U0.6, BF1 Base PWL Segment 0
+ dscl_prog_data->easf_v_bf1_pwl_slope_seg0 = 2; // S7.3, BF1 Slope PWL Segment 0
+ dscl_prog_data->easf_v_bf1_pwl_in_seg1 = 0x7C4; // S0.10, BF1 PWL Segment 1 = -60
+ dscl_prog_data->easf_v_bf1_pwl_base_seg1 = 12; // U0.6, BF1 Base PWL Segment 1
+ dscl_prog_data->easf_v_bf1_pwl_slope_seg1 = 109; // S7.3, BF1 Slope PWL Segment 1
+ dscl_prog_data->easf_v_bf1_pwl_in_seg2 = 0; // S0.10, BF1 PWL Segment 2
+ dscl_prog_data->easf_v_bf1_pwl_base_seg2 = 63; // U0.6, BF1 Base PWL Segment 2
+ dscl_prog_data->easf_v_bf1_pwl_slope_seg2 = 0; // S7.3, BF1 Slope PWL Segment 2
+ dscl_prog_data->easf_v_bf1_pwl_in_seg3 = 48; // S0.10, BF1 PWL Segment 3
+ dscl_prog_data->easf_v_bf1_pwl_base_seg3 = 63; // U0.6, BF1 Base PWL Segment 3
+ dscl_prog_data->easf_v_bf1_pwl_slope_seg3 = 0x7ED; // S7.3, BF1 Slope PWL Segment 3 = -19
+ dscl_prog_data->easf_v_bf1_pwl_in_seg4 = 96; // S0.10, BF1 PWL Segment 4
+ dscl_prog_data->easf_v_bf1_pwl_base_seg4 = 56; // U0.6, BF1 Base PWL Segment 4
+ dscl_prog_data->easf_v_bf1_pwl_slope_seg4 = 0x7F0; // S7.3, BF1 Slope PWL Segment 4 = -16
+ dscl_prog_data->easf_v_bf1_pwl_in_seg5 = 144; // S0.10, BF1 PWL Segment 5
+ dscl_prog_data->easf_v_bf1_pwl_base_seg5 = 50; // U0.6, BF1 Base PWL Segment 5
+ dscl_prog_data->easf_v_bf1_pwl_slope_seg5 = 0x7B0; // S7.3, BF1 Slope PWL Segment 5 = -80
+ dscl_prog_data->easf_v_bf1_pwl_in_seg6 = 192; // S0.10, BF1 PWL Segment 6
+ dscl_prog_data->easf_v_bf1_pwl_base_seg6 = 20; // U0.6, BF1 Base PWL Segment 6
+ dscl_prog_data->easf_v_bf1_pwl_slope_seg6 = 0x7CB; // S7.3, BF1 Slope PWL Segment 6 = -53
+ dscl_prog_data->easf_v_bf1_pwl_in_seg7 = 240; // S0.10, BF1 PWL Segment 7
+ dscl_prog_data->easf_v_bf1_pwl_base_seg7 = 0; // U0.6, BF1 Base PWL Segment 7
+
dscl_prog_data->easf_v_bf3_pwl_in_set0 = 0x000; // FP0.6.6, BF3 Input value PWL Segment 0
dscl_prog_data->easf_v_bf3_pwl_base_set0 = 63; // S0.6, BF3 Base PWL Segment 0
dscl_prog_data->easf_v_bf3_pwl_slope_set0 = 0x0000; // FP1.6.6, BF3 Slope PWL Segment 0
@@ -1113,11 +1372,11 @@ static void spl_set_easf_data(struct dscl_prog_data *dscl_prog_data,
0x1878; // FP1.6.6, BF3 Slope PWL Segment 3
dscl_prog_data->easf_v_bf3_pwl_in_set4 =
0x0761; // FP0.6.6, BF3 Input value PWL Segment 4 (0.375)
- dscl_prog_data->easf_v_bf3_pwl_base_set4 = -60; // S0.6, BF3 Base PWL Segment 4
+ dscl_prog_data->easf_v_bf3_pwl_base_set4 = 0x44; // S0.6, BF3 Base PWL Segment 4 = -60
dscl_prog_data->easf_v_bf3_pwl_slope_set4 = 0x1760; // FP1.6.6, BF3 Slope PWL Segment 4
dscl_prog_data->easf_v_bf3_pwl_in_set5 =
0x0780; // FP0.6.6, BF3 Input value PWL Segment 5 (0.5)
- dscl_prog_data->easf_v_bf3_pwl_base_set5 = -63; // S0.6, BF3 Base PWL Segment 5
+ dscl_prog_data->easf_v_bf3_pwl_base_set5 = 0x41; // S0.6, BF3 Base PWL Segment 5 = -63
}
} else
dscl_prog_data->easf_v_en = false;
@@ -1125,52 +1384,63 @@ static void spl_set_easf_data(struct dscl_prog_data *dscl_prog_data,
if (enable_easf_h) {
dscl_prog_data->easf_h_en = true;
dscl_prog_data->easf_h_ring = 0;
- dscl_prog_data->easf_h_sharp_factor = 1;
+ dscl_prog_data->easf_h_sharp_factor = 0;
dscl_prog_data->easf_h_bf1_en =
1; // 1-bit, BF1 calculation enable, 0=disable, 1=enable
dscl_prog_data->easf_h_bf2_mode =
0xF; // 4-bit, BF2 calculation mode
- dscl_prog_data->easf_h_bf3_mode =
- 2; // 2-bit, BF3 chroma mode correction calculation mode
- dscl_prog_data->easf_h_bf2_flat1_gain = 4; // U1.3, BF2 Flat1 Gain control
- dscl_prog_data->easf_h_bf2_flat2_gain = 8; // U4.0, BF2 Flat2 Gain control
- dscl_prog_data->easf_h_bf2_roc_gain = 4; // U2.2, Rate Of Change control
+ /* 2-bit, BF3 chroma mode correction calculation mode */
+ dscl_prog_data->easf_h_bf3_mode = spl_get_h_bf3_mode(
+ spl_scratch->scl_data.recip_ratios.horz);
+ /* FP1.5.10; (2.0) Ring reducer gain for 4 or 6-tap mode [H_REDUCER_GAIN4] */
dscl_prog_data->easf_h_ringest_eventap_reduceg1 =
- 0x4000; // FP1.5.10; (2.0) Ring reducer gain for 4 or 6-tap mode [H_REDUCER_GAIN4]
+ spl_get_reducer_gain4(spl_scratch->scl_data.taps.h_taps,
+ spl_scratch->scl_data.recip_ratios.horz);
+ /* FP1.5.10; (2.5) Ring reducer gain for 6-tap mode [V_REDUCER_GAIN6] */
dscl_prog_data->easf_h_ringest_eventap_reduceg2 =
- 0x4100; // FP1.5.10; (2.5) Ring reducer gain for 6-tap mode [V_REDUCER_GAIN6]
+ spl_get_reducer_gain6(spl_scratch->scl_data.taps.h_taps,
+ spl_scratch->scl_data.recip_ratios.horz);
+ /* FP1.5.10; (-0.135742) Ring gain for 6-tap set to -139/1024 */
dscl_prog_data->easf_h_ringest_eventap_gain1 =
- 0xB058; // FP1.5.10; (-0.135742) Ring gain for 6-tap set to -139/1024
+ spl_get_gainRing4(spl_scratch->scl_data.taps.h_taps,
+ spl_scratch->scl_data.recip_ratios.horz);
+ /* FP1.5.10; (-0.024414) Ring gain for 6-tap set to -25/1024 */
dscl_prog_data->easf_h_ringest_eventap_gain2 =
- 0xA640; // FP1.5.10; (-0.024414) Ring gain for 6-tap set to -25/1024
+ spl_get_gainRing6(spl_scratch->scl_data.taps.h_taps,
+ spl_scratch->scl_data.recip_ratios.horz);
dscl_prog_data->easf_h_bf_maxa = 63; //Horz Max BF value A in U0.6 format.Selected if H_FCNTL==0
dscl_prog_data->easf_h_bf_maxb = 63; //Horz Max BF value B in U0.6 format.Selected if H_FCNTL==1
dscl_prog_data->easf_h_bf_mina = 0; //Horz Min BF value B in U0.6 format.Selected if H_FCNTL==0
dscl_prog_data->easf_h_bf_minb = 0; //Horz Min BF value B in U0.6 format.Selected if H_FCNTL==1
- dscl_prog_data->easf_h_bf1_pwl_in_seg0 = -512; // S0.10, BF1 PWL Segment 0
- dscl_prog_data->easf_h_bf1_pwl_base_seg0 = 0; // U0.6, BF1 Base PWL Segment 0
- dscl_prog_data->easf_h_bf1_pwl_slope_seg0 = 3; // S7.3, BF1 Slope PWL Segment 0
- dscl_prog_data->easf_h_bf1_pwl_in_seg1 = -20; // S0.10, BF1 PWL Segment 1
- dscl_prog_data->easf_h_bf1_pwl_base_seg1 = 12; // U0.6, BF1 Base PWL Segment 1
- dscl_prog_data->easf_h_bf1_pwl_slope_seg1 = 326; // S7.3, BF1 Slope PWL Segment 1
- dscl_prog_data->easf_h_bf1_pwl_in_seg2 = 0; // S0.10, BF1 PWL Segment 2
- dscl_prog_data->easf_h_bf1_pwl_base_seg2 = 63; // U0.6, BF1 Base PWL Segment 2
- dscl_prog_data->easf_h_bf1_pwl_slope_seg2 = 0; // S7.3, BF1 Slope PWL Segment 2
- dscl_prog_data->easf_h_bf1_pwl_in_seg3 = 16; // S0.10, BF1 PWL Segment 3
- dscl_prog_data->easf_h_bf1_pwl_base_seg3 = 63; // U0.6, BF1 Base PWL Segment 3
- dscl_prog_data->easf_h_bf1_pwl_slope_seg3 = -56; // S7.3, BF1 Slope PWL Segment 3
- dscl_prog_data->easf_h_bf1_pwl_in_seg4 = 32; // S0.10, BF1 PWL Segment 4
- dscl_prog_data->easf_h_bf1_pwl_base_seg4 = 56; // U0.6, BF1 Base PWL Segment 4
- dscl_prog_data->easf_h_bf1_pwl_slope_seg4 = -48; // S7.3, BF1 Slope PWL Segment 4
- dscl_prog_data->easf_h_bf1_pwl_in_seg5 = 48; // S0.10, BF1 PWL Segment 5
- dscl_prog_data->easf_h_bf1_pwl_base_seg5 = 50; // U0.6, BF1 Base PWL Segment 5
- dscl_prog_data->easf_h_bf1_pwl_slope_seg5 = -240; // S7.3, BF1 Slope PWL Segment 5
- dscl_prog_data->easf_h_bf1_pwl_in_seg6 = 64; // S0.10, BF1 PWL Segment 6
- dscl_prog_data->easf_h_bf1_pwl_base_seg6 = 20; // U0.6, BF1 Base PWL Segment 6
- dscl_prog_data->easf_h_bf1_pwl_slope_seg6 = -160; // S7.3, BF1 Slope PWL Segment 6
- dscl_prog_data->easf_h_bf1_pwl_in_seg7 = 80; // S0.10, BF1 PWL Segment 7
- dscl_prog_data->easf_h_bf1_pwl_base_seg7 = 0; // U0.6, BF1 Base PWL Segment 7
if (lls_pref == LLS_PREF_YES) {
+ dscl_prog_data->easf_h_bf2_flat1_gain = 4; // U1.3, BF2 Flat1 Gain control
+ dscl_prog_data->easf_h_bf2_flat2_gain = 8; // U4.0, BF2 Flat2 Gain control
+ dscl_prog_data->easf_h_bf2_roc_gain = 4; // U2.2, Rate Of Change control
+
+ dscl_prog_data->easf_h_bf1_pwl_in_seg0 = 0x600; // S0.10, BF1 PWL Segment 0 = -512
+ dscl_prog_data->easf_h_bf1_pwl_base_seg0 = 0; // U0.6, BF1 Base PWL Segment 0
+ dscl_prog_data->easf_h_bf1_pwl_slope_seg0 = 3; // S7.3, BF1 Slope PWL Segment 0
+ dscl_prog_data->easf_h_bf1_pwl_in_seg1 = 0x7EC; // S0.10, BF1 PWL Segment 1 = -20
+ dscl_prog_data->easf_h_bf1_pwl_base_seg1 = 12; // U0.6, BF1 Base PWL Segment 1
+ dscl_prog_data->easf_h_bf1_pwl_slope_seg1 = 326; // S7.3, BF1 Slope PWL Segment 1
+ dscl_prog_data->easf_h_bf1_pwl_in_seg2 = 0; // S0.10, BF1 PWL Segment 2
+ dscl_prog_data->easf_h_bf1_pwl_base_seg2 = 63; // U0.6, BF1 Base PWL Segment 2
+ dscl_prog_data->easf_h_bf1_pwl_slope_seg2 = 0; // S7.3, BF1 Slope PWL Segment 2
+ dscl_prog_data->easf_h_bf1_pwl_in_seg3 = 16; // S0.10, BF1 PWL Segment 3
+ dscl_prog_data->easf_h_bf1_pwl_base_seg3 = 63; // U0.6, BF1 Base PWL Segment 3
+ dscl_prog_data->easf_h_bf1_pwl_slope_seg3 = 0x7C8; // S7.3, BF1 Slope PWL Segment 3 = -56
+ dscl_prog_data->easf_h_bf1_pwl_in_seg4 = 32; // S0.10, BF1 PWL Segment 4
+ dscl_prog_data->easf_h_bf1_pwl_base_seg4 = 56; // U0.6, BF1 Base PWL Segment 4
+ dscl_prog_data->easf_h_bf1_pwl_slope_seg4 = 0x7D0; // S7.3, BF1 Slope PWL Segment 4 = -48
+ dscl_prog_data->easf_h_bf1_pwl_in_seg5 = 48; // S0.10, BF1 PWL Segment 5
+ dscl_prog_data->easf_h_bf1_pwl_base_seg5 = 50; // U0.6, BF1 Base PWL Segment 5
+ dscl_prog_data->easf_h_bf1_pwl_slope_seg5 = 0x710; // S7.3, BF1 Slope PWL Segment 5 = -240
+ dscl_prog_data->easf_h_bf1_pwl_in_seg6 = 64; // S0.10, BF1 PWL Segment 6
+ dscl_prog_data->easf_h_bf1_pwl_base_seg6 = 20; // U0.6, BF1 Base PWL Segment 6
+ dscl_prog_data->easf_h_bf1_pwl_slope_seg6 = 0x760; // S7.3, BF1 Slope PWL Segment 6 = -160
+ dscl_prog_data->easf_h_bf1_pwl_in_seg7 = 80; // S0.10, BF1 PWL Segment 7
+ dscl_prog_data->easf_h_bf1_pwl_base_seg7 = 0; // U0.6, BF1 Base PWL Segment 7
+
dscl_prog_data->easf_h_bf3_pwl_in_set0 = 0x000; // FP0.6.6, BF3 Input value PWL Segment 0
dscl_prog_data->easf_h_bf3_pwl_base_set0 = 63; // S0.6, BF3 Base PWL Segment 0
dscl_prog_data->easf_h_bf3_pwl_slope_set0 = 0x12C5; // FP1.6.6, BF3 Slope PWL Segment 0
@@ -1188,12 +1458,40 @@ static void spl_set_easf_data(struct dscl_prog_data *dscl_prog_data,
dscl_prog_data->easf_h_bf3_pwl_slope_set3 = 0x136B; // FP1.6.6, BF3 Slope PWL Segment 3
dscl_prog_data->easf_h_bf3_pwl_in_set4 =
0x0C37; // FP0.6.6, BF3 Input value PWL Segment 4 (0.125 * 125^3)
- dscl_prog_data->easf_h_bf3_pwl_base_set4 = -50; // S0.6, BF3 Base PWL Segment 4
+ dscl_prog_data->easf_h_bf3_pwl_base_set4 = 0x4E; // S0.6, BF3 Base PWL Segment 4 = -50
dscl_prog_data->easf_h_bf3_pwl_slope_set4 = 0x1200; // FP1.6.6, BF3 Slope PWL Segment 4
dscl_prog_data->easf_h_bf3_pwl_in_set5 =
0x0CF7; // FP0.6.6, BF3 Input value PWL Segment 5 (1.0 * 125^3)
- dscl_prog_data->easf_h_bf3_pwl_base_set5 = -63; // S0.6, BF3 Base PWL Segment 5
+ dscl_prog_data->easf_h_bf3_pwl_base_set5 = 0x41; // S0.6, BF3 Base PWL Segment 5 = -63
} else {
+ dscl_prog_data->easf_h_bf2_flat1_gain = 13; // U1.3, BF2 Flat1 Gain control
+ dscl_prog_data->easf_h_bf2_flat2_gain = 15; // U4.0, BF2 Flat2 Gain control
+ dscl_prog_data->easf_h_bf2_roc_gain = 14; // U2.2, Rate Of Change control
+
+ dscl_prog_data->easf_h_bf1_pwl_in_seg0 = 0x440; // S0.10, BF1 PWL Segment 0 = -960
+ dscl_prog_data->easf_h_bf1_pwl_base_seg0 = 0; // U0.6, BF1 Base PWL Segment 0
+ dscl_prog_data->easf_h_bf1_pwl_slope_seg0 = 2; // S7.3, BF1 Slope PWL Segment 0
+ dscl_prog_data->easf_h_bf1_pwl_in_seg1 = 0x7C4; // S0.10, BF1 PWL Segment 1 = -60
+ dscl_prog_data->easf_h_bf1_pwl_base_seg1 = 12; // U0.6, BF1 Base PWL Segment 1
+ dscl_prog_data->easf_h_bf1_pwl_slope_seg1 = 109; // S7.3, BF1 Slope PWL Segment 1
+ dscl_prog_data->easf_h_bf1_pwl_in_seg2 = 0; // S0.10, BF1 PWL Segment 2
+ dscl_prog_data->easf_h_bf1_pwl_base_seg2 = 63; // U0.6, BF1 Base PWL Segment 2
+ dscl_prog_data->easf_h_bf1_pwl_slope_seg2 = 0; // S7.3, BF1 Slope PWL Segment 2
+ dscl_prog_data->easf_h_bf1_pwl_in_seg3 = 48; // S0.10, BF1 PWL Segment 3
+ dscl_prog_data->easf_h_bf1_pwl_base_seg3 = 63; // U0.6, BF1 Base PWL Segment 3
+ dscl_prog_data->easf_h_bf1_pwl_slope_seg3 = 0x7ED; // S7.3, BF1 Slope PWL Segment 3 = -19
+ dscl_prog_data->easf_h_bf1_pwl_in_seg4 = 96; // S0.10, BF1 PWL Segment 4
+ dscl_prog_data->easf_h_bf1_pwl_base_seg4 = 56; // U0.6, BF1 Base PWL Segment 4
+ dscl_prog_data->easf_h_bf1_pwl_slope_seg4 = 0x7F0; // S7.3, BF1 Slope PWL Segment 4 = -16
+ dscl_prog_data->easf_h_bf1_pwl_in_seg5 = 144; // S0.10, BF1 PWL Segment 5
+ dscl_prog_data->easf_h_bf1_pwl_base_seg5 = 50; // U0.6, BF1 Base PWL Segment 5
+ dscl_prog_data->easf_h_bf1_pwl_slope_seg5 = 0x7B0; // S7.3, BF1 Slope PWL Segment 5 = -80
+ dscl_prog_data->easf_h_bf1_pwl_in_seg6 = 192; // S0.10, BF1 PWL Segment 6
+ dscl_prog_data->easf_h_bf1_pwl_base_seg6 = 20; // U0.6, BF1 Base PWL Segment 6
+ dscl_prog_data->easf_h_bf1_pwl_slope_seg6 = 0x7CB; // S7.3, BF1 Slope PWL Segment 6 = -53
+ dscl_prog_data->easf_h_bf1_pwl_in_seg7 = 240; // S0.10, BF1 PWL Segment 7
+ dscl_prog_data->easf_h_bf1_pwl_base_seg7 = 0; // U0.6, BF1 Base PWL Segment 7
+
dscl_prog_data->easf_h_bf3_pwl_in_set0 = 0x000; // FP0.6.6, BF3 Input value PWL Segment 0
dscl_prog_data->easf_h_bf3_pwl_base_set0 = 63; // S0.6, BF3 Base PWL Segment 0
dscl_prog_data->easf_h_bf3_pwl_slope_set0 = 0x0000; // FP1.6.6, BF3 Slope PWL Segment 0
@@ -1211,25 +1509,30 @@ static void spl_set_easf_data(struct dscl_prog_data *dscl_prog_data,
dscl_prog_data->easf_h_bf3_pwl_slope_set3 = 0x1878; // FP1.6.6, BF3 Slope PWL Segment 3
dscl_prog_data->easf_h_bf3_pwl_in_set4 =
0x0761; // FP0.6.6, BF3 Input value PWL Segment 4 (0.375)
- dscl_prog_data->easf_h_bf3_pwl_base_set4 = -60; // S0.6, BF3 Base PWL Segment 4
+ dscl_prog_data->easf_h_bf3_pwl_base_set4 = 0x44; // S0.6, BF3 Base PWL Segment 4 = -60
dscl_prog_data->easf_h_bf3_pwl_slope_set4 = 0x1760; // FP1.6.6, BF3 Slope PWL Segment 4
dscl_prog_data->easf_h_bf3_pwl_in_set5 =
0x0780; // FP0.6.6, BF3 Input value PWL Segment 5 (0.5)
- dscl_prog_data->easf_h_bf3_pwl_base_set5 = -63; // S0.6, BF3 Base PWL Segment 5
+ dscl_prog_data->easf_h_bf3_pwl_base_set5 = 0x41; // S0.6, BF3 Base PWL Segment 5 = -63
} // if (lls_pref == LLS_PREF_YES)
} else
dscl_prog_data->easf_h_en = false;
if (lls_pref == LLS_PREF_YES) {
dscl_prog_data->easf_ltonl_en = 1; // Linear input
- dscl_prog_data->easf_matrix_c0 =
- 0x504E; // fp1.5.10, C0 coefficient (LN_BT2020: 0.2627 * (2^14)/125 = 34.43750000)
- dscl_prog_data->easf_matrix_c1 =
- 0x558E; // fp1.5.10, C1 coefficient (LN_BT2020: 0.6780 * (2^14)/125 = 88.87500000)
- dscl_prog_data->easf_matrix_c2 =
- 0x47C6; // fp1.5.10, C2 coefficient (LN_BT2020: 0.0593 * (2^14)/125 = 7.77343750)
- dscl_prog_data->easf_matrix_c3 =
- 0x0; // fp1.5.10, C3 coefficient
+ if ((setup == HDR_L) && (spl_is_rgb8(format))) {
+ /* Calculate C0-C3 coefficients based on HDR multiplier */
+ spl_calculate_c0_c3_hdr(dscl_prog_data, sdr_white_level_nits);
+ } else { // HDR_L ( DWM ) and SDR_L
+ dscl_prog_data->easf_matrix_c0 =
+ 0x4EF7; // fp1.5.10, C0 coefficient (LN_rec709: 0.2126 * (2^14)/125 = 27.86590720)
+ dscl_prog_data->easf_matrix_c1 =
+ 0x55DC; // fp1.5.10, C1 coefficient (LN_rec709: 0.7152 * (2^14)/125 = 93.74269440)
+ dscl_prog_data->easf_matrix_c2 =
+ 0x48BB; // fp1.5.10, C2 coefficient (LN_rec709: 0.0722 * (2^14)/125 = 9.46339840)
+ dscl_prog_data->easf_matrix_c3 =
+ 0x0; // fp1.5.10, C3 coefficient
+ }
} else {
dscl_prog_data->easf_ltonl_en = 0; // Non-Linear input
dscl_prog_data->easf_matrix_c0 =
@@ -1241,27 +1544,43 @@ static void spl_set_easf_data(struct dscl_prog_data *dscl_prog_data,
dscl_prog_data->easf_matrix_c3 =
0x0; // fp1.5.10, C3 coefficient
}
+
+ if (spl_is_yuv420(format)) { /* TODO: 0 = RGB, 1 = YUV */
+ dscl_prog_data->easf_matrix_mode = 1;
+ /*
+ * 2-bit, BF3 chroma mode correction calculation mode
+ * Needs to be disabled for YUV420 mode
+ * Override lookup value
+ */
+ dscl_prog_data->easf_v_bf3_mode = 0;
+ dscl_prog_data->easf_h_bf3_mode = 0;
+ } else
+ dscl_prog_data->easf_matrix_mode = 0;
+
}
+
/*Set isharp noise detection */
-static void spl_set_isharp_noise_det_mode(struct dscl_prog_data *dscl_prog_data)
+static void spl_set_isharp_noise_det_mode(struct dscl_prog_data *dscl_prog_data,
+ const struct spl_scaler_data *data)
{
// ISHARP_NOISEDET_MODE
// 0: 3x5 as VxH
// 1: 4x5 as VxH
// 2:
// 3: 5x5 as VxH
- if (dscl_prog_data->taps.v_taps == 6)
- dscl_prog_data->isharp_noise_det.mode = 3; // ISHARP_NOISEDET_MODE
- else if (dscl_prog_data->taps.h_taps == 4)
- dscl_prog_data->isharp_noise_det.mode = 1; // ISHARP_NOISEDET_MODE
- else if (dscl_prog_data->taps.h_taps == 3)
- dscl_prog_data->isharp_noise_det.mode = 0; // ISHARP_NOISEDET_MODE
+ if (data->taps.v_taps == 6)
+ dscl_prog_data->isharp_noise_det.mode = 3;
+ else if (data->taps.v_taps == 4)
+ dscl_prog_data->isharp_noise_det.mode = 1;
+ else if (data->taps.v_taps == 3)
+ dscl_prog_data->isharp_noise_det.mode = 0;
};
/* Set Sharpener data */
static void spl_set_isharp_data(struct dscl_prog_data *dscl_prog_data,
struct adaptive_sharpness adp_sharpness, bool enable_isharp,
enum linear_light_scaling lls_pref, enum spl_pixel_format format,
- const struct spl_scaler_data *data)
+ const struct spl_scaler_data *data, struct spl_fixed31_32 ratio,
+ enum system_setup setup, enum scale_to_sharpness_policy scale_to_sharpness_policy)
{
/* Turn off sharpener if not required */
if (!enable_isharp) {
@@ -1269,11 +1588,18 @@ static void spl_set_isharp_data(struct dscl_prog_data *dscl_prog_data,
return;
}
+ spl_build_isharp_1dlut_from_reference_curve(ratio, setup, adp_sharpness,
+ scale_to_sharpness_policy);
+ dscl_prog_data->isharp_delta = spl_get_pregen_filter_isharp_1D_lut(setup);
+ dscl_prog_data->sharpness_level = adp_sharpness.sharpness_level;
+
dscl_prog_data->isharp_en = 1; // ISHARP_EN
- dscl_prog_data->isharp_noise_det.enable = 1; // ISHARP_NOISEDET_EN
// Set ISHARP_NOISEDET_MODE if htaps = 6-tap
- if (dscl_prog_data->taps.h_taps == 6)
- spl_set_isharp_noise_det_mode(dscl_prog_data); // ISHARP_NOISEDET_MODE
+ if (data->taps.h_taps == 6) {
+ dscl_prog_data->isharp_noise_det.enable = 1; /* ISHARP_NOISEDET_EN */
+ spl_set_isharp_noise_det_mode(dscl_prog_data, data); /* ISHARP_NOISEDET_MODE */
+ } else
+ dscl_prog_data->isharp_noise_det.enable = 0; // ISHARP_NOISEDET_EN
// Program noise detection threshold
dscl_prog_data->isharp_noise_det.uthreshold = 24; // ISHARP_NOISEDET_UTHRE
dscl_prog_data->isharp_noise_det.dthreshold = 4; // ISHARP_NOISEDET_DTHRE
@@ -1282,48 +1608,86 @@ static void spl_set_isharp_data(struct dscl_prog_data *dscl_prog_data,
dscl_prog_data->isharp_noise_det.pwl_end_in = 13; // ISHARP_NOISEDET_PWL_END_IN
dscl_prog_data->isharp_noise_det.pwl_slope = 1623; // ISHARP_NOISEDET_PWL_SLOPE
- if ((lls_pref == LLS_PREF_NO) && !spl_is_yuv420(format)) /* ISHARP_FMT_MODE */
+ if (lls_pref == LLS_PREF_NO) /* ISHARP_FMT_MODE */
dscl_prog_data->isharp_fmt.mode = 1;
else
dscl_prog_data->isharp_fmt.mode = 0;
dscl_prog_data->isharp_fmt.norm = 0x3C00; // ISHARP_FMT_NORM
dscl_prog_data->isharp_lba.mode = 0; // ISHARP_LBA_MODE
- // ISHARP_LBA_PWL_SEG0: ISHARP Local Brightness Adjustment PWL Segment 0
- dscl_prog_data->isharp_lba.in_seg[0] = 0; // ISHARP LBA PWL for Seg 0. INPUT value in U0.10 format
- dscl_prog_data->isharp_lba.base_seg[0] = 0; // ISHARP LBA PWL for Seg 0. BASE value in U0.6 format
- dscl_prog_data->isharp_lba.slope_seg[0] = 32; // ISHARP LBA for Seg 0. SLOPE value in S5.3 format
- // ISHARP_LBA_PWL_SEG1: ISHARP LBA PWL Segment 1
- dscl_prog_data->isharp_lba.in_seg[1] = 256; // ISHARP LBA PWL for Seg 1. INPUT value in U0.10 format
- dscl_prog_data->isharp_lba.base_seg[1] = 63; // ISHARP LBA PWL for Seg 1. BASE value in U0.6 format
- dscl_prog_data->isharp_lba.slope_seg[1] = 0; // ISHARP LBA for Seg 1. SLOPE value in S5.3 format
- // ISHARP_LBA_PWL_SEG2: ISHARP LBA PWL Segment 2
- dscl_prog_data->isharp_lba.in_seg[2] = 614; // ISHARP LBA PWL for Seg 2. INPUT value in U0.10 format
- dscl_prog_data->isharp_lba.base_seg[2] = 63; // ISHARP LBA PWL for Seg 2. BASE value in U0.6 format
- dscl_prog_data->isharp_lba.slope_seg[2] = -20; // ISHARP LBA for Seg 2. SLOPE value in S5.3 format
- // ISHARP_LBA_PWL_SEG3: ISHARP LBA PWL Segment 3
- dscl_prog_data->isharp_lba.in_seg[3] = 1023; // ISHARP LBA PWL for Seg 3.INPUT value in U0.10 format
- dscl_prog_data->isharp_lba.base_seg[3] = 0; // ISHARP LBA PWL for Seg 3. BASE value in U0.6 format
- dscl_prog_data->isharp_lba.slope_seg[3] = 0; // ISHARP LBA for Seg 3. SLOPE value in S5.3 format
- // ISHARP_LBA_PWL_SEG4: ISHARP LBA PWL Segment 4
- dscl_prog_data->isharp_lba.in_seg[4] = 1023; // ISHARP LBA PWL for Seg 4.INPUT value in U0.10 format
- dscl_prog_data->isharp_lba.base_seg[4] = 0; // ISHARP LBA PWL for Seg 4. BASE value in U0.6 format
- dscl_prog_data->isharp_lba.slope_seg[4] = 0; // ISHARP LBA for Seg 4. SLOPE value in S5.3 format
- // ISHARP_LBA_PWL_SEG5: ISHARP LBA PWL Segment 5
- dscl_prog_data->isharp_lba.in_seg[5] = 1023; // ISHARP LBA PWL for Seg 5.INPUT value in U0.10 format
- dscl_prog_data->isharp_lba.base_seg[5] = 0; // ISHARP LBA PWL for Seg 5. BASE value in U0.6 format
- switch (adp_sharpness.sharpness) {
- case SHARPNESS_LOW:
- dscl_prog_data->isharp_delta = spl_get_filter_isharp_1D_lut_0p5x();
- break;
- case SHARPNESS_MID:
- dscl_prog_data->isharp_delta = spl_get_filter_isharp_1D_lut_1p0x();
- break;
- case SHARPNESS_HIGH:
- dscl_prog_data->isharp_delta = spl_get_filter_isharp_1D_lut_2p0x();
- break;
- default:
- BREAK_TO_DEBUGGER();
+
+ if (setup == SDR_L) {
+ // ISHARP_LBA_PWL_SEG0: ISHARP Local Brightness Adjustment PWL Segment 0
+ dscl_prog_data->isharp_lba.in_seg[0] = 0; // ISHARP LBA PWL for Seg 0. INPUT value in U0.10 format
+ dscl_prog_data->isharp_lba.base_seg[0] = 0; // ISHARP LBA PWL for Seg 0. BASE value in U0.6 format
+ dscl_prog_data->isharp_lba.slope_seg[0] = 62; // ISHARP LBA for Seg 0. SLOPE value in S5.3 format
+ // ISHARP_LBA_PWL_SEG1: ISHARP LBA PWL Segment 1
+ dscl_prog_data->isharp_lba.in_seg[1] = 130; // ISHARP LBA PWL for Seg 1. INPUT value in U0.10 format
+ dscl_prog_data->isharp_lba.base_seg[1] = 63; // ISHARP LBA PWL for Seg 1. BASE value in U0.6 format
+ dscl_prog_data->isharp_lba.slope_seg[1] = 0; // ISHARP LBA for Seg 1. SLOPE value in S5.3 format
+ // ISHARP_LBA_PWL_SEG2: ISHARP LBA PWL Segment 2
+ dscl_prog_data->isharp_lba.in_seg[2] = 450; // ISHARP LBA PWL for Seg 2. INPUT value in U0.10 format
+ dscl_prog_data->isharp_lba.base_seg[2] = 63; // ISHARP LBA PWL for Seg 2. BASE value in U0.6 format
+ dscl_prog_data->isharp_lba.slope_seg[2] = 0x18D; // ISHARP LBA for Seg 2. SLOPE value in S5.3 format = -115
+ // ISHARP_LBA_PWL_SEG3: ISHARP LBA PWL Segment 3
+ dscl_prog_data->isharp_lba.in_seg[3] = 520; // ISHARP LBA PWL for Seg 3.INPUT value in U0.10 format
+ dscl_prog_data->isharp_lba.base_seg[3] = 0; // ISHARP LBA PWL for Seg 3. BASE value in U0.6 format
+ dscl_prog_data->isharp_lba.slope_seg[3] = 0; // ISHARP LBA for Seg 3. SLOPE value in S5.3 format
+ // ISHARP_LBA_PWL_SEG4: ISHARP LBA PWL Segment 4
+ dscl_prog_data->isharp_lba.in_seg[4] = 520; // ISHARP LBA PWL for Seg 4.INPUT value in U0.10 format
+ dscl_prog_data->isharp_lba.base_seg[4] = 0; // ISHARP LBA PWL for Seg 4. BASE value in U0.6 format
+ dscl_prog_data->isharp_lba.slope_seg[4] = 0; // ISHARP LBA for Seg 4. SLOPE value in S5.3 format
+ // ISHARP_LBA_PWL_SEG5: ISHARP LBA PWL Segment 5
+ dscl_prog_data->isharp_lba.in_seg[5] = 520; // ISHARP LBA PWL for Seg 5.INPUT value in U0.10 format
+ dscl_prog_data->isharp_lba.base_seg[5] = 0; // ISHARP LBA PWL for Seg 5. BASE value in U0.6 format
+ } else if (setup == HDR_L) {
+ // ISHARP_LBA_PWL_SEG0: ISHARP Local Brightness Adjustment PWL Segment 0
+ dscl_prog_data->isharp_lba.in_seg[0] = 0; // ISHARP LBA PWL for Seg 0. INPUT value in U0.10 format
+ dscl_prog_data->isharp_lba.base_seg[0] = 0; // ISHARP LBA PWL for Seg 0. BASE value in U0.6 format
+ dscl_prog_data->isharp_lba.slope_seg[0] = 32; // ISHARP LBA for Seg 0. SLOPE value in S5.3 format
+ // ISHARP_LBA_PWL_SEG1: ISHARP LBA PWL Segment 1
+ dscl_prog_data->isharp_lba.in_seg[1] = 254; // ISHARP LBA PWL for Seg 1. INPUT value in U0.10 format
+ dscl_prog_data->isharp_lba.base_seg[1] = 63; // ISHARP LBA PWL for Seg 1. BASE value in U0.6 format
+ dscl_prog_data->isharp_lba.slope_seg[1] = 0; // ISHARP LBA for Seg 1. SLOPE value in S5.3 format
+ // ISHARP_LBA_PWL_SEG2: ISHARP LBA PWL Segment 2
+ dscl_prog_data->isharp_lba.in_seg[2] = 559; // ISHARP LBA PWL for Seg 2. INPUT value in U0.10 format
+ dscl_prog_data->isharp_lba.base_seg[2] = 63; // ISHARP LBA PWL for Seg 2. BASE value in U0.6 format
+ dscl_prog_data->isharp_lba.slope_seg[2] = 0x10C; // ISHARP LBA for Seg 2. SLOPE value in S5.3 format = -244
+ // ISHARP_LBA_PWL_SEG3: ISHARP LBA PWL Segment 3
+ dscl_prog_data->isharp_lba.in_seg[3] = 592; // ISHARP LBA PWL for Seg 3.INPUT value in U0.10 format
+ dscl_prog_data->isharp_lba.base_seg[3] = 0; // ISHARP LBA PWL for Seg 3. BASE value in U0.6 format
+ dscl_prog_data->isharp_lba.slope_seg[3] = 0; // ISHARP LBA for Seg 3. SLOPE value in S5.3 format
+ // ISHARP_LBA_PWL_SEG4: ISHARP LBA PWL Segment 4
+ dscl_prog_data->isharp_lba.in_seg[4] = 1023; // ISHARP LBA PWL for Seg 4.INPUT value in U0.10 format
+ dscl_prog_data->isharp_lba.base_seg[4] = 0; // ISHARP LBA PWL for Seg 4. BASE value in U0.6 format
+ dscl_prog_data->isharp_lba.slope_seg[4] = 0; // ISHARP LBA for Seg 4. SLOPE value in S5.3 format
+ // ISHARP_LBA_PWL_SEG5: ISHARP LBA PWL Segment 5
+ dscl_prog_data->isharp_lba.in_seg[5] = 1023; // ISHARP LBA PWL for Seg 5.INPUT value in U0.10 format
+ dscl_prog_data->isharp_lba.base_seg[5] = 0; // ISHARP LBA PWL for Seg 5. BASE value in U0.6 format
+ } else {
+ // ISHARP_LBA_PWL_SEG0: ISHARP Local Brightness Adjustment PWL Segment 0
+ dscl_prog_data->isharp_lba.in_seg[0] = 0; // ISHARP LBA PWL for Seg 0. INPUT value in U0.10 format
+ dscl_prog_data->isharp_lba.base_seg[0] = 0; // ISHARP LBA PWL for Seg 0. BASE value in U0.6 format
+ dscl_prog_data->isharp_lba.slope_seg[0] = 40; // ISHARP LBA for Seg 0. SLOPE value in S5.3 format
+ // ISHARP_LBA_PWL_SEG1: ISHARP LBA PWL Segment 1
+ dscl_prog_data->isharp_lba.in_seg[1] = 204; // ISHARP LBA PWL for Seg 1. INPUT value in U0.10 format
+ dscl_prog_data->isharp_lba.base_seg[1] = 63; // ISHARP LBA PWL for Seg 1. BASE value in U0.6 format
+ dscl_prog_data->isharp_lba.slope_seg[1] = 0; // ISHARP LBA for Seg 1. SLOPE value in S5.3 format
+ // ISHARP_LBA_PWL_SEG2: ISHARP LBA PWL Segment 2
+ dscl_prog_data->isharp_lba.in_seg[2] = 818; // ISHARP LBA PWL for Seg 2. INPUT value in U0.10 format
+ dscl_prog_data->isharp_lba.base_seg[2] = 63; // ISHARP LBA PWL for Seg 2. BASE value in U0.6 format
+ dscl_prog_data->isharp_lba.slope_seg[2] = 0x1D9; // ISHARP LBA for Seg 2. SLOPE value in S5.3 format = -39
+ // ISHARP_LBA_PWL_SEG3: ISHARP LBA PWL Segment 3
+ dscl_prog_data->isharp_lba.in_seg[3] = 1023; // ISHARP LBA PWL for Seg 3.INPUT value in U0.10 format
+ dscl_prog_data->isharp_lba.base_seg[3] = 0; // ISHARP LBA PWL for Seg 3. BASE value in U0.6 format
+ dscl_prog_data->isharp_lba.slope_seg[3] = 0; // ISHARP LBA for Seg 3. SLOPE value in S5.3 format
+ // ISHARP_LBA_PWL_SEG4: ISHARP LBA PWL Segment 4
+ dscl_prog_data->isharp_lba.in_seg[4] = 1023; // ISHARP LBA PWL for Seg 4.INPUT value in U0.10 format
+ dscl_prog_data->isharp_lba.base_seg[4] = 0; // ISHARP LBA PWL for Seg 4. BASE value in U0.6 format
+ dscl_prog_data->isharp_lba.slope_seg[4] = 0; // ISHARP LBA for Seg 4. SLOPE value in S5.3 format
+ // ISHARP_LBA_PWL_SEG5: ISHARP LBA PWL Segment 5
+ dscl_prog_data->isharp_lba.in_seg[5] = 1023; // ISHARP LBA PWL for Seg 5.INPUT value in U0.10 format
+ dscl_prog_data->isharp_lba.base_seg[5] = 0; // ISHARP LBA PWL for Seg 5. BASE value in U0.6 format
}
// Program the nldelta soft clip values
@@ -1346,59 +1710,6 @@ static void spl_set_isharp_data(struct dscl_prog_data *dscl_prog_data,
// Set the values as per lookup table
spl_set_blur_scale_data(dscl_prog_data, data);
}
-static bool spl_get_isharp_en(struct adaptive_sharpness adp_sharpness,
- int vscale_ratio, int hscale_ratio, struct spl_taps taps,
- enum spl_pixel_format format)
-{
- bool enable_isharp = false;
-
- if (adp_sharpness.enable == false)
- return enable_isharp; // Return if adaptive sharpness is disabled
- // Is downscaling ?
- if (vscale_ratio > 1 || hscale_ratio > 1) {
- // END - No iSHARP support for downscaling
- return enable_isharp;
- }
- // Scaling is up to 1:1 (no scaling) or upscaling
-
- /* Only apply sharpness to NV12 and not P010 */
- if (format != SPL_PIXEL_FORMAT_420BPP8)
- return enable_isharp;
-
- // LB support horizontal taps 4,6 or vertical taps 3, 4, 6
- if (taps.h_taps == 4 || taps.h_taps == 6 ||
- taps.v_taps == 3 || taps.v_taps == 4 || taps.v_taps == 6) {
- // END - iSHARP supported
- enable_isharp = true;
- }
- return enable_isharp;
-}
-
-static bool spl_choose_lls_policy(enum spl_pixel_format format,
- enum spl_transfer_func_type tf_type,
- enum spl_transfer_func_predefined tf_predefined_type,
- enum linear_light_scaling *lls_pref)
-{
- if (spl_is_yuv420(format)) {
- *lls_pref = LLS_PREF_NO;
- if ((tf_type == SPL_TF_TYPE_PREDEFINED) || (tf_type == SPL_TF_TYPE_DISTRIBUTED_POINTS))
- return true;
- } else { /* RGB or YUV444 */
- if (tf_type == SPL_TF_TYPE_PREDEFINED) {
- if ((tf_predefined_type == SPL_TRANSFER_FUNCTION_HLG) ||
- (tf_predefined_type == SPL_TRANSFER_FUNCTION_HLG12))
- *lls_pref = LLS_PREF_NO;
- else
- *lls_pref = LLS_PREF_YES;
- return true;
- } else if (tf_type == SPL_TF_TYPE_BYPASS) {
- *lls_pref = LLS_PREF_YES;
- return true;
- }
- }
- *lls_pref = LLS_PREF_NO;
- return false;
-}
/* Calculate scaler parameters */
bool spl_calculate_scaler_params(struct spl_in *spl_in, struct spl_out *spl_out)
@@ -1406,65 +1717,75 @@ bool spl_calculate_scaler_params(struct spl_in *spl_in, struct spl_out *spl_out)
bool res = false;
bool enable_easf_v = false;
bool enable_easf_h = false;
- bool lls_enable_easf = true;
- const struct spl_scaler_data *data = &spl_out->scl_data;
+ int vratio = 0;
+ int hratio = 0;
+ struct spl_scratch spl_scratch;
+ struct spl_fixed31_32 isharp_scale_ratio;
+ enum system_setup setup;
+ bool enable_isharp = false;
+ const struct spl_scaler_data *data = &spl_scratch.scl_data;
+
+ memset(&spl_scratch, 0, sizeof(struct spl_scratch));
+ spl_scratch.scl_data.h_active = spl_in->h_active;
+ spl_scratch.scl_data.v_active = spl_in->v_active;
+
// All SPL calls
/* recout calculation */
/* depends on h_active */
- spl_calculate_recout(spl_in, spl_out);
+ spl_calculate_recout(spl_in, &spl_scratch, spl_out);
/* depends on pixel format */
- spl_calculate_scaling_ratios(spl_in, spl_out);
+ spl_calculate_scaling_ratios(spl_in, &spl_scratch, spl_out);
/* depends on scaling ratios and recout, does not calculate offset yet */
- spl_calculate_viewport_size(spl_in, spl_out);
+ spl_calculate_viewport_size(spl_in, &spl_scratch);
res = spl_get_optimal_number_of_taps(
spl_in->basic_out.max_downscale_src_width, spl_in,
- spl_out, &spl_in->scaling_quality);
+ &spl_scratch, &spl_in->scaling_quality, &enable_easf_v,
+ &enable_easf_h, &enable_isharp);
/*
* Depends on recout, scaling ratios, h_active and taps
* May need to re-check lb size after this in some obscure scenario
*/
if (res)
- spl_calculate_inits_and_viewports(spl_in, spl_out);
+ spl_calculate_inits_and_viewports(spl_in, &spl_scratch);
// Handle 3d recout
- spl_handle_3d_recout(spl_in, &spl_out->scl_data.recout);
+ spl_handle_3d_recout(spl_in, &spl_scratch.scl_data.recout);
// Clamp
- spl_clamp_viewport(&spl_out->scl_data.viewport);
+ spl_clamp_viewport(&spl_scratch.scl_data.viewport);
if (!res)
return res;
- /*
- * If lls_pref is LLS_PREF_DONT_CARE, then use pixel format and transfer
- * function to determine whether to use LINEAR or NONLINEAR scaling
- */
- if (spl_in->lls_pref == LLS_PREF_DONT_CARE)
- lls_enable_easf = spl_choose_lls_policy(spl_in->basic_in.format,
- spl_in->basic_in.tf_type, spl_in->basic_in.tf_predefined_type,
- &spl_in->lls_pref);
-
// Save all calculated parameters in dscl_prog_data structure to program hw registers
- spl_set_dscl_prog_data(spl_in, spl_out);
+ spl_set_dscl_prog_data(spl_in, &spl_scratch, spl_out, enable_easf_v, enable_easf_h, enable_isharp);
- int vratio = dc_fixpt_ceil(spl_out->scl_data.ratios.vert);
- int hratio = dc_fixpt_ceil(spl_out->scl_data.ratios.horz);
- if (!lls_enable_easf || spl_in->disable_easf) {
- enable_easf_v = false;
- enable_easf_h = false;
+ if (spl_in->lls_pref == LLS_PREF_YES) {
+ if (spl_in->is_hdr_on)
+ setup = HDR_L;
+ else
+ setup = SDR_L;
} else {
- /* Enable EASF on vertical? */
- enable_easf_v = enable_easf(vratio, spl_out->scl_data.taps.v_taps, spl_in->lls_pref, spl_in->prefer_easf);
- /* Enable EASF on horizontal? */
- enable_easf_h = enable_easf(hratio, spl_out->scl_data.taps.h_taps, spl_in->lls_pref, spl_in->prefer_easf);
+ if (spl_in->is_hdr_on)
+ setup = HDR_NL;
+ else
+ setup = SDR_NL;
}
+
// Set EASF
- spl_set_easf_data(spl_out->dscl_prog_data, enable_easf_v, enable_easf_h, spl_in->lls_pref,
- spl_in->basic_in.format);
+ spl_set_easf_data(&spl_scratch, spl_out, enable_easf_v, enable_easf_h, spl_in->lls_pref,
+ spl_in->basic_in.format, setup, spl_in->sdr_white_level_nits);
+
// Set iSHARP
- bool enable_isharp = spl_get_isharp_en(spl_in->adaptive_sharpness, vratio, hratio,
- spl_out->scl_data.taps, spl_in->basic_in.format);
+ vratio = spl_fixpt_ceil(spl_scratch.scl_data.ratios.vert);
+ hratio = spl_fixpt_ceil(spl_scratch.scl_data.ratios.horz);
+ if (vratio <= hratio)
+ isharp_scale_ratio = spl_scratch.scl_data.recip_ratios.vert;
+ else
+ isharp_scale_ratio = spl_scratch.scl_data.recip_ratios.horz;
+
spl_set_isharp_data(spl_out->dscl_prog_data, spl_in->adaptive_sharpness, enable_isharp,
- spl_in->lls_pref, spl_in->basic_in.format, data);
+ spl_in->lls_pref, spl_in->basic_in.format, data, isharp_scale_ratio, setup,
+ spl_in->debug.scale_to_sharpness_policy);
return res;
}
diff --git a/drivers/gpu/drm/amd/display/dc/spl/dc_spl.h b/drivers/gpu/drm/amd/display/dc/spl/dc_spl.h
index f1fd3eb92f8a..205e59a2a8ee 100644
--- a/drivers/gpu/drm/amd/display/dc/spl/dc_spl.h
+++ b/drivers/gpu/drm/amd/display/dc/spl/dc_spl.h
@@ -9,16 +9,8 @@
#define BLACK_OFFSET_RGB_Y 0x0
#define BLACK_OFFSET_CBCR 0x8000
-#ifdef __cplusplus
-extern "C" {
-#endif
-
/* SPL interfaces */
bool spl_calculate_scaler_params(struct spl_in *spl_in, struct spl_out *spl_out);
-#ifdef __cplusplus
-}
-#endif
-
#endif /* __DC_SPL_H__ */
diff --git a/drivers/gpu/drm/amd/display/dc/spl/dc_spl_filters.c b/drivers/gpu/drm/amd/display/dc/spl/dc_spl_filters.c
new file mode 100644
index 000000000000..99238644e0a1
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/spl/dc_spl_filters.c
@@ -0,0 +1,15 @@
+// SPDX-License-Identifier: MIT
+//
+// Copyright 2024 Advanced Micro Devices, Inc.
+
+#include "dc_spl_filters.h"
+
+void convert_filter_s1_10_to_s1_12(const uint16_t *s1_10_filter,
+ uint16_t *s1_12_filter, int num_taps)
+{
+ int num_entries = NUM_PHASES_COEFF * num_taps;
+ int i;
+
+ for (i = 0; i < num_entries; i++)
+ *(s1_12_filter + i) = *(s1_10_filter + i) * 4;
+}
diff --git a/drivers/gpu/drm/amd/display/dc/spl/dc_spl_filters.h b/drivers/gpu/drm/amd/display/dc/spl/dc_spl_filters.h
new file mode 100644
index 000000000000..20439cdbdb10
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/spl/dc_spl_filters.h
@@ -0,0 +1,15 @@
+/* SPDX-License-Identifier: MIT */
+
+/* Copyright 2024 Advanced Micro Devices, Inc. */
+
+#ifndef __DC_SPL_FILTERS_H__
+#define __DC_SPL_FILTERS_H__
+
+#include "dc_spl_types.h"
+
+#define NUM_PHASES_COEFF 33
+
+void convert_filter_s1_10_to_s1_12(const uint16_t *s1_10_filter,
+ uint16_t *s1_12_filter, int num_taps);
+
+#endif /* __DC_SPL_FILTERS_H__ */
diff --git a/drivers/gpu/drm/amd/display/dc/spl/dc_spl_isharp_filters.c b/drivers/gpu/drm/amd/display/dc/spl/dc_spl_isharp_filters.c
index 8bc838c7c3c5..e0572252c640 100644
--- a/drivers/gpu/drm/amd/display/dc/spl/dc_spl_isharp_filters.c
+++ b/drivers/gpu/drm/amd/display/dc/spl/dc_spl_isharp_filters.c
@@ -2,7 +2,8 @@
//
// Copyright 2024 Advanced Micro Devices, Inc.
-#include "dc_spl_types.h"
+#include "spl_debug.h"
+#include "dc_spl_filters.h"
#include "dc_spl_isharp_filters.h"
//========================================
@@ -16,7 +17,7 @@
// C_start = 40.000000
// C_end = 64.000000
//========================================
-static const uint32_t filter_isharp_1D_lut_0[32] = {
+static const uint32_t filter_isharp_1D_lut_0[ISHARP_LUT_TABLE_SIZE] = {
0x02010000,
0x0A070503,
0x1614100D,
@@ -62,7 +63,7 @@ static const uint32_t filter_isharp_1D_lut_0[32] = {
// C_end = 127.000000
//========================================
-static const uint32_t filter_isharp_1D_lut_0p5x[32] = {
+static const uint32_t filter_isharp_1D_lut_0p5x[ISHARP_LUT_TABLE_SIZE] = {
0x00000000,
0x02020101,
0x06050403,
@@ -107,7 +108,7 @@ static const uint32_t filter_isharp_1D_lut_0p5x[32] = {
// C_start = 96.000000
// C_end = 127.000000
//========================================
-static const uint32_t filter_isharp_1D_lut_1p0x[32] = {
+static const uint32_t filter_isharp_1D_lut_1p0x[ISHARP_LUT_TABLE_SIZE] = {
0x01000000,
0x05040302,
0x0B0A0806,
@@ -152,7 +153,7 @@ static const uint32_t filter_isharp_1D_lut_1p0x[32] = {
// C_start = 96.000000
// C_end = 127.000000
//========================================
-static const uint32_t filter_isharp_1D_lut_1p5x[32] = {
+static const uint32_t filter_isharp_1D_lut_1p5x[ISHARP_LUT_TABLE_SIZE] = {
0x01010000,
0x07050402,
0x110F0C0A,
@@ -197,7 +198,7 @@ static const uint32_t filter_isharp_1D_lut_1p5x[32] = {
// C_start = 40.000000
// C_end = 127.000000
//========================================
-static const uint32_t filter_isharp_1D_lut_2p0x[32] = {
+static const uint32_t filter_isharp_1D_lut_2p0x[ISHARP_LUT_TABLE_SIZE] = {
0x02010000,
0x0A070503,
0x1614100D,
@@ -231,6 +232,53 @@ static const uint32_t filter_isharp_1D_lut_2p0x[32] = {
0x080B0D0E,
0x00020406,
};
+//========================================
+// Delta Gain 1DLUT
+// LUT content is packed as 4-bytes into one DWORD/entry
+// A_start = 0.000000
+// A_end = 10.000000
+// A_gain = 3.000000
+// B_start = 11.000000
+// B_end = 127.000000
+// C_start = 40.000000
+// C_end = 127.000000
+//========================================
+static const uint32_t filter_isharp_1D_lut_3p0x[ISHARP_LUT_TABLE_SIZE] = {
+0x03010000,
+0x0F0B0805,
+0x211E1813,
+0x2B292624,
+0x3533302E,
+0x3E3C3A37,
+0x46444240,
+0x4D4B4A48,
+0x5352504F,
+0x59575655,
+0x5D5C5B5A,
+0x61605F5E,
+0x64646362,
+0x66666565,
+0x68686767,
+0x68686868,
+0x68686868,
+0x67676868,
+0x65656666,
+0x62636464,
+0x5E5F6061,
+0x5A5B5C5D,
+0x55565759,
+0x4F505253,
+0x484A4B4D,
+0x40424446,
+0x373A3C3E,
+0x2E303335,
+0x2426292B,
+0x191B1E21,
+0x0D101316,
+0x0003060A,
+};
+
+//========================================
// Wide scaler coefficients
//========================================================
// <using> gen_scaler_coeffs.m
@@ -285,7 +333,7 @@ static const uint16_t filter_isharp_wide_6tap_64p[198] = {
// <CoefType> Blur & Scale LPF
// <CoefQuant> S1.10
//========================================================
-static const uint16_t filter_isharp_bs_4tap_64p[198] = {
+static const uint16_t filter_isharp_bs_4tap_in_6_64p[198] = {
0x0000, 0x00E5, 0x0237, 0x00E4, 0x0000, 0x0000,
0x0000, 0x00DE, 0x0237, 0x00EB, 0x0000, 0x0000,
0x0000, 0x00D7, 0x0236, 0x00F2, 0x0001, 0x0000,
@@ -320,6 +368,147 @@ static const uint16_t filter_isharp_bs_4tap_64p[198] = {
0x0000, 0x003B, 0x01CF, 0x01C2, 0x0034, 0x0000,
0x0000, 0x0037, 0x01C9, 0x01C9, 0x0037, 0x0000
};
+//========================================================
+// <using> gen_BlurScale_coeffs.m
+// <date> 25-Apr-2022
+// <num_taps> 4
+// <num_phases> 64
+// <CoefType> Blur & Scale LPF
+// <CoefQuant> S1.10
+//========================================================
+static const uint16_t filter_isharp_bs_4tap_64p[132] = {
+0x00E5, 0x0237, 0x00E4, 0x0000,
+0x00DE, 0x0237, 0x00EB, 0x0000,
+0x00D7, 0x0236, 0x00F2, 0x0001,
+0x00D0, 0x0235, 0x00FA, 0x0001,
+0x00C9, 0x0234, 0x0101, 0x0002,
+0x00C2, 0x0233, 0x0108, 0x0003,
+0x00BB, 0x0232, 0x0110, 0x0003,
+0x00B5, 0x0230, 0x0117, 0x0004,
+0x00AE, 0x022E, 0x011F, 0x0005,
+0x00A8, 0x022C, 0x0126, 0x0006,
+0x00A2, 0x022A, 0x012D, 0x0007,
+0x009C, 0x0228, 0x0134, 0x0008,
+0x0096, 0x0225, 0x013C, 0x0009,
+0x0090, 0x0222, 0x0143, 0x000B,
+0x008A, 0x021F, 0x014B, 0x000C,
+0x0085, 0x021C, 0x0151, 0x000E,
+0x007F, 0x0218, 0x015A, 0x000F,
+0x007A, 0x0215, 0x0160, 0x0011,
+0x0074, 0x0211, 0x0168, 0x0013,
+0x006F, 0x020D, 0x016F, 0x0015,
+0x006A, 0x0209, 0x0176, 0x0017,
+0x0065, 0x0204, 0x017E, 0x0019,
+0x0060, 0x0200, 0x0185, 0x001B,
+0x005C, 0x01FB, 0x018C, 0x001D,
+0x0057, 0x01F6, 0x0193, 0x0020,
+0x0053, 0x01F1, 0x019A, 0x0022,
+0x004E, 0x01EC, 0x01A1, 0x0025,
+0x004A, 0x01E6, 0x01A8, 0x0028,
+0x0046, 0x01E1, 0x01AF, 0x002A,
+0x0042, 0x01DB, 0x01B6, 0x002D,
+0x003F, 0x01D5, 0x01BB, 0x0031,
+0x003B, 0x01CF, 0x01C2, 0x0034,
+0x0037, 0x01C9, 0x01C9, 0x0037,
+};
+//========================================================
+// <using> gen_BlurScale_coeffs.m
+// <date> 09-Jun-2022
+// <num_taps> 3
+// <num_phases> 64
+// <CoefType> Blur & Scale LPF
+// <CoefQuant> S1.10
+//========================================================
+static const uint16_t filter_isharp_bs_3tap_64p[99] = {
+0x0200, 0x0200, 0x0000,
+0x01F6, 0x0206, 0x0004,
+0x01EC, 0x020B, 0x0009,
+0x01E2, 0x0211, 0x000D,
+0x01D8, 0x0216, 0x0012,
+0x01CE, 0x021C, 0x0016,
+0x01C4, 0x0221, 0x001B,
+0x01BA, 0x0226, 0x0020,
+0x01B0, 0x022A, 0x0026,
+0x01A6, 0x022F, 0x002B,
+0x019C, 0x0233, 0x0031,
+0x0192, 0x0238, 0x0036,
+0x0188, 0x023C, 0x003C,
+0x017E, 0x0240, 0x0042,
+0x0174, 0x0244, 0x0048,
+0x016A, 0x0248, 0x004E,
+0x0161, 0x024A, 0x0055,
+0x0157, 0x024E, 0x005B,
+0x014D, 0x0251, 0x0062,
+0x0144, 0x0253, 0x0069,
+0x013A, 0x0256, 0x0070,
+0x0131, 0x0258, 0x0077,
+0x0127, 0x025B, 0x007E,
+0x011E, 0x025C, 0x0086,
+0x0115, 0x025E, 0x008D,
+0x010B, 0x0260, 0x0095,
+0x0102, 0x0262, 0x009C,
+0x00F9, 0x0263, 0x00A4,
+0x00F0, 0x0264, 0x00AC,
+0x00E7, 0x0265, 0x00B4,
+0x00DF, 0x0264, 0x00BD,
+0x00D6, 0x0265, 0x00C5,
+0x00CD, 0x0266, 0x00CD,
+};
+
+/* Converted Blur & Scale coeff tables from S1.10 to S1.12 */
+static uint16_t filter_isharp_bs_4tap_in_6_64p_s1_12[198];
+static uint16_t filter_isharp_bs_4tap_64p_s1_12[132];
+static uint16_t filter_isharp_bs_3tap_64p_s1_12[99];
+
+/* Pre-generated 1DLUT for given setup and sharpness level */
+struct isharp_1D_lut_pregen filter_isharp_1D_lut_pregen[NUM_SHARPNESS_SETUPS] = {
+ {
+ 0, 0,
+ {
+ 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0,
+ }
+ },
+ {
+ 0, 0,
+ {
+ 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0,
+ }
+ },
+ {
+ 0, 0,
+ {
+ 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0,
+ }
+ },
+ {
+ 0, 0,
+ {
+ 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0,
+ }
+ },
+};
+
+struct scale_ratio_to_sharpness_level_adj sharpness_level_adj[NUM_SHARPNESS_ADJ_LEVELS] = {
+ {1125, 1000, 0},
+ {11, 10, 1},
+ {1075, 1000, 2},
+ {105, 100, 3},
+ {1025, 1000, 4},
+ {1, 1, 5},
+};
+
const uint32_t *spl_get_filter_isharp_1D_lut_0(void)
{
return filter_isharp_1D_lut_0;
@@ -340,11 +529,229 @@ const uint32_t *spl_get_filter_isharp_1D_lut_2p0x(void)
{
return filter_isharp_1D_lut_2p0x;
}
+const uint32_t *spl_get_filter_isharp_1D_lut_3p0x(void)
+{
+ return filter_isharp_1D_lut_3p0x;
+}
const uint16_t *spl_get_filter_isharp_wide_6tap_64p(void)
{
return filter_isharp_wide_6tap_64p;
}
-const uint16_t *spl_get_filter_isharp_bs_4tap_64p(void)
+uint16_t *spl_get_filter_isharp_bs_4tap_in_6_64p(void)
+{
+ return filter_isharp_bs_4tap_in_6_64p_s1_12;
+}
+uint16_t *spl_get_filter_isharp_bs_4tap_64p(void)
{
- return filter_isharp_bs_4tap_64p;
+ return filter_isharp_bs_4tap_64p_s1_12;
}
+uint16_t *spl_get_filter_isharp_bs_3tap_64p(void)
+{
+ return filter_isharp_bs_3tap_64p_s1_12;
+}
+
+static unsigned int spl_calculate_sharpness_level_adj(struct spl_fixed31_32 ratio)
+{
+ int j;
+ struct spl_fixed31_32 ratio_level;
+ struct scale_ratio_to_sharpness_level_adj *lookup_ptr;
+ unsigned int sharpness_level_down_adj;
+
+ /*
+ * Adjust sharpness level based on current scaling ratio
+ *
+ * We have 5 discrete scaling ratios which we will use to adjust the
+ * sharpness level down by 1 as we pass each ratio. The ratios
+ * are
+ *
+ * 1.125 upscale and higher - no adj
+ * 1.100 - under 1.125 - adj level down 1
+ * 1.075 - under 1.100 - adj level down 2
+ * 1.050 - under 1.075 - adj level down 3
+ * 1.025 - under 1.050 - adj level down 4
+ * 1.000 - under 1.025 - adj level down 5
+ *
+ */
+ j = 0;
+ sharpness_level_down_adj = 0;
+ lookup_ptr = sharpness_level_adj;
+ while (j < NUM_SHARPNESS_ADJ_LEVELS) {
+ ratio_level = spl_fixpt_from_fraction(lookup_ptr->ratio_numer,
+ lookup_ptr->ratio_denom);
+ if (ratio.value >= ratio_level.value) {
+ sharpness_level_down_adj = lookup_ptr->level_down_adj;
+ break;
+ }
+ lookup_ptr++;
+ j++;
+ }
+ return sharpness_level_down_adj;
+}
+
+static unsigned int spl_calculate_sharpness_level(struct spl_fixed31_32 ratio,
+ int discrete_sharpness_level, enum system_setup setup,
+ struct spl_sharpness_range sharpness_range,
+ enum scale_to_sharpness_policy scale_to_sharpness_policy)
+{
+ unsigned int sharpness_level = 0;
+ unsigned int sharpness_level_down_adj = 0;
+
+ int min_sharpness, max_sharpness, mid_sharpness;
+
+ /*
+ * Adjust sharpness level if policy requires we adjust it based on
+ * scale ratio. Based on scale ratio, we may adjust the sharpness
+ * level down by a certain number of steps. We will not select
+ * a sharpness value of 0 so the lowest sharpness level will be
+ * 0 or 1 depending on what the min_sharpness is
+ *
+ * If the policy is no required, this code maybe removed at a later
+ * date
+ */
+ switch (setup) {
+
+ case HDR_L:
+ min_sharpness = sharpness_range.hdr_rgb_min;
+ max_sharpness = sharpness_range.hdr_rgb_max;
+ mid_sharpness = sharpness_range.hdr_rgb_mid;
+ if (scale_to_sharpness_policy == SCALE_TO_SHARPNESS_ADJ_ALL)
+ sharpness_level_down_adj = spl_calculate_sharpness_level_adj(ratio);
+ break;
+ case HDR_NL:
+ /* currently no use case, use Non-linear SDR values for now */
+ case SDR_NL:
+ min_sharpness = sharpness_range.sdr_yuv_min;
+ max_sharpness = sharpness_range.sdr_yuv_max;
+ mid_sharpness = sharpness_range.sdr_yuv_mid;
+ if (scale_to_sharpness_policy >= SCALE_TO_SHARPNESS_ADJ_YUV)
+ sharpness_level_down_adj = spl_calculate_sharpness_level_adj(ratio);
+ break;
+ case SDR_L:
+ default:
+ min_sharpness = sharpness_range.sdr_rgb_min;
+ max_sharpness = sharpness_range.sdr_rgb_max;
+ mid_sharpness = sharpness_range.sdr_rgb_mid;
+ if (scale_to_sharpness_policy == SCALE_TO_SHARPNESS_ADJ_ALL)
+ sharpness_level_down_adj = spl_calculate_sharpness_level_adj(ratio);
+ break;
+ }
+
+ if ((min_sharpness == 0) && (sharpness_level_down_adj >= discrete_sharpness_level))
+ discrete_sharpness_level = 1;
+ else if (sharpness_level_down_adj >= discrete_sharpness_level)
+ discrete_sharpness_level = 0;
+ else
+ discrete_sharpness_level -= sharpness_level_down_adj;
+
+ int lower_half_step_size = (mid_sharpness - min_sharpness) / 5;
+ int upper_half_step_size = (max_sharpness - mid_sharpness) / 5;
+
+ // lower half linear approximation
+ if (discrete_sharpness_level < 5)
+ sharpness_level = min_sharpness + (lower_half_step_size * discrete_sharpness_level);
+ // upper half linear approximation
+ else
+ sharpness_level = mid_sharpness + (upper_half_step_size * (discrete_sharpness_level - 5));
+
+ return sharpness_level;
+}
+
+void spl_build_isharp_1dlut_from_reference_curve(struct spl_fixed31_32 ratio, enum system_setup setup,
+ struct adaptive_sharpness sharpness, enum scale_to_sharpness_policy scale_to_sharpness_policy)
+{
+ uint8_t *byte_ptr_1dlut_src, *byte_ptr_1dlut_dst;
+ struct spl_fixed31_32 sharp_base, sharp_calc, sharp_level;
+ int j;
+ int size_1dlut;
+ int sharp_calc_int;
+ uint32_t filter_pregen_store[ISHARP_LUT_TABLE_SIZE];
+
+ /* Custom sharpnessX1000 value */
+ unsigned int sharpnessX1000 = spl_calculate_sharpness_level(ratio,
+ sharpness.sharpness_level, setup,
+ sharpness.sharpness_range, scale_to_sharpness_policy);
+ sharp_level = spl_fixpt_from_fraction(sharpnessX1000, 1000);
+
+ /*
+ * Check if pregen 1dlut table is already precalculated
+ * If numer/denom is different, then recalculate
+ */
+ if ((filter_isharp_1D_lut_pregen[setup].sharpness_numer == sharpnessX1000) &&
+ (filter_isharp_1D_lut_pregen[setup].sharpness_denom == 1000))
+ return;
+
+ /*
+ * Calculate LUT_128_gained with this equation:
+ *
+ * LUT_128_gained[i] = (uint8)(0.5 + min(255,(double)(LUT_128[i])*sharpLevel/iGain))
+ * where LUT_128[i] is contents of 3p0x isharp 1dlut
+ * where sharpLevel is desired sharpness level
+ * where iGain is base sharpness level 3.0
+ * where LUT_128_gained[i] is adjusted 1dlut value based on desired sharpness level
+ */
+ byte_ptr_1dlut_src = (uint8_t *)filter_isharp_1D_lut_3p0x;
+ byte_ptr_1dlut_dst = (uint8_t *)filter_pregen_store;
+ size_1dlut = sizeof(filter_isharp_1D_lut_3p0x);
+ memset(byte_ptr_1dlut_dst, 0, size_1dlut);
+ for (j = 0; j < size_1dlut; j++) {
+ sharp_base = spl_fixpt_from_int((int)*byte_ptr_1dlut_src);
+ sharp_calc = spl_fixpt_mul(sharp_base, sharp_level);
+ sharp_calc = spl_fixpt_div(sharp_calc, spl_fixpt_from_int(3));
+ sharp_calc = spl_fixpt_min(spl_fixpt_from_int(255), sharp_calc);
+ sharp_calc = spl_fixpt_add(sharp_calc, spl_fixpt_from_fraction(1, 2));
+ sharp_calc_int = spl_fixpt_floor(sharp_calc);
+ /* Clamp it at 0x7F so it doesn't wrap */
+ if (sharp_calc_int > 127)
+ sharp_calc_int = 127;
+ *byte_ptr_1dlut_dst = (uint8_t)sharp_calc_int;
+
+ byte_ptr_1dlut_src++;
+ byte_ptr_1dlut_dst++;
+ }
+
+ /* Update 1dlut table and sharpness level */
+ memcpy((void *)filter_isharp_1D_lut_pregen[setup].value, (void *)filter_pregen_store, size_1dlut);
+ filter_isharp_1D_lut_pregen[setup].sharpness_numer = sharpnessX1000;
+ filter_isharp_1D_lut_pregen[setup].sharpness_denom = 1000;
+}
+
+uint32_t *spl_get_pregen_filter_isharp_1D_lut(enum system_setup setup)
+{
+ return filter_isharp_1D_lut_pregen[setup].value;
+}
+
+void spl_init_blur_scale_coeffs(void)
+{
+ convert_filter_s1_10_to_s1_12(filter_isharp_bs_3tap_64p,
+ filter_isharp_bs_3tap_64p_s1_12, 3);
+ convert_filter_s1_10_to_s1_12(filter_isharp_bs_4tap_64p,
+ filter_isharp_bs_4tap_64p_s1_12, 4);
+ convert_filter_s1_10_to_s1_12(filter_isharp_bs_4tap_in_6_64p,
+ filter_isharp_bs_4tap_in_6_64p_s1_12, 6);
+}
+
+uint16_t *spl_dscl_get_blur_scale_coeffs_64p(int taps)
+{
+ if (taps == 3)
+ return spl_get_filter_isharp_bs_3tap_64p();
+ else if (taps == 4)
+ return spl_get_filter_isharp_bs_4tap_64p();
+ else if (taps == 6)
+ return spl_get_filter_isharp_bs_4tap_in_6_64p();
+ else {
+ /* should never happen, bug */
+ SPL_BREAK_TO_DEBUGGER();
+ return NULL;
+ }
+}
+
+void spl_set_blur_scale_data(struct dscl_prog_data *dscl_prog_data,
+ const struct spl_scaler_data *data)
+{
+ dscl_prog_data->filter_blur_scale_h =
+ spl_dscl_get_blur_scale_coeffs_64p(data->taps.h_taps);
+
+ dscl_prog_data->filter_blur_scale_v =
+ spl_dscl_get_blur_scale_coeffs_64p(data->taps.v_taps);
+}
+
diff --git a/drivers/gpu/drm/amd/display/dc/spl/dc_spl_isharp_filters.h b/drivers/gpu/drm/amd/display/dc/spl/dc_spl_isharp_filters.h
index 1aaf4c50c1bc..afcc66206ca2 100644
--- a/drivers/gpu/drm/amd/display/dc/spl/dc_spl_isharp_filters.h
+++ b/drivers/gpu/drm/amd/display/dc/spl/dc_spl_isharp_filters.h
@@ -7,11 +7,45 @@
#include "dc_spl_types.h"
+#define ISHARP_LUT_TABLE_SIZE 32
const uint32_t *spl_get_filter_isharp_1D_lut_0(void);
const uint32_t *spl_get_filter_isharp_1D_lut_0p5x(void);
const uint32_t *spl_get_filter_isharp_1D_lut_1p0x(void);
const uint32_t *spl_get_filter_isharp_1D_lut_1p5x(void);
const uint32_t *spl_get_filter_isharp_1D_lut_2p0x(void);
-const uint16_t *spl_get_filter_isharp_bs_4tap_64p(void);
+const uint32_t *spl_get_filter_isharp_1D_lut_3p0x(void);
+uint16_t *spl_get_filter_isharp_bs_4tap_in_6_64p(void);
+uint16_t *spl_get_filter_isharp_bs_4tap_64p(void);
+uint16_t *spl_get_filter_isharp_bs_3tap_64p(void);
const uint16_t *spl_get_filter_isharp_wide_6tap_64p(void);
+uint16_t *spl_dscl_get_blur_scale_coeffs_64p(int taps);
+
+#define NUM_SHARPNESS_ADJ_LEVELS 6
+struct scale_ratio_to_sharpness_level_adj {
+ unsigned int ratio_numer;
+ unsigned int ratio_denom;
+ unsigned int level_down_adj; /* adjust sharpness level down */
+};
+
+struct isharp_1D_lut_pregen {
+ unsigned int sharpness_numer;
+ unsigned int sharpness_denom;
+ uint32_t value[ISHARP_LUT_TABLE_SIZE];
+};
+
+enum system_setup {
+ SDR_NL = 0,
+ SDR_L,
+ HDR_NL,
+ HDR_L,
+ NUM_SHARPNESS_SETUPS
+};
+
+void spl_init_blur_scale_coeffs(void);
+void spl_set_blur_scale_data(struct dscl_prog_data *dscl_prog_data,
+ const struct spl_scaler_data *data);
+
+void spl_build_isharp_1dlut_from_reference_curve(struct spl_fixed31_32 ratio, enum system_setup setup,
+ struct adaptive_sharpness sharpness, enum scale_to_sharpness_policy scale_to_sharpness_policy);
+uint32_t *spl_get_pregen_filter_isharp_1D_lut(enum system_setup setup);
#endif /* __DC_SPL_ISHARP_FILTERS_H__ */
diff --git a/drivers/gpu/drm/amd/display/dc/spl/dc_spl_scl_easf_filters.c b/drivers/gpu/drm/amd/display/dc/spl/dc_spl_scl_easf_filters.c
new file mode 100644
index 000000000000..09bf82f7d468
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/spl/dc_spl_scl_easf_filters.c
@@ -0,0 +1,1726 @@
+// SPDX-License-Identifier: MIT
+//
+// Copyright 2024 Advanced Micro Devices, Inc.
+
+#include "spl_debug.h"
+#include "dc_spl_filters.h"
+#include "dc_spl_scl_filters.h"
+#include "dc_spl_scl_easf_filters.h"
+
+//========================================================
+// <using> gen_scaler_coeffs_cnf_file.m
+// <using> make_test_script.m
+// <date> 03-Apr-2024
+// <coeffDescrip> 3t_64p_LanczosEd_p_0.3_p_10qb_
+// <num_taps> 3
+// <num_phases> 64
+// <scale_ratio> input/output = 0.300000000000
+// <CoefType> LanczosEd
+// <CoefQuant> S1.10
+//========================================================
+static const uint16_t easf_filter_3tap_64p_ratio_0_30[99] = {
+ 0x0200, 0x0200, 0x0000,
+ 0x01F6, 0x0206, 0x0004,
+ 0x01EC, 0x020B, 0x0009,
+ 0x01E2, 0x0211, 0x000D,
+ 0x01D8, 0x0216, 0x0012,
+ 0x01CE, 0x021C, 0x0016,
+ 0x01C4, 0x0221, 0x001B,
+ 0x01BA, 0x0226, 0x0020,
+ 0x01B0, 0x022A, 0x0026,
+ 0x01A6, 0x022F, 0x002B,
+ 0x019C, 0x0233, 0x0031,
+ 0x0192, 0x0238, 0x0036,
+ 0x0188, 0x023C, 0x003C,
+ 0x017E, 0x0240, 0x0042,
+ 0x0174, 0x0244, 0x0048,
+ 0x016A, 0x0248, 0x004E,
+ 0x0161, 0x024A, 0x0055,
+ 0x0157, 0x024E, 0x005B,
+ 0x014D, 0x0251, 0x0062,
+ 0x0144, 0x0253, 0x0069,
+ 0x013A, 0x0256, 0x0070,
+ 0x0131, 0x0258, 0x0077,
+ 0x0127, 0x025B, 0x007E,
+ 0x011E, 0x025C, 0x0086,
+ 0x0115, 0x025E, 0x008D,
+ 0x010B, 0x0260, 0x0095,
+ 0x0102, 0x0262, 0x009C,
+ 0x00F9, 0x0263, 0x00A4,
+ 0x00F0, 0x0264, 0x00AC,
+ 0x00E7, 0x0265, 0x00B4,
+ 0x00DF, 0x0264, 0x00BD,
+ 0x00D6, 0x0265, 0x00C5,
+ 0x00CD, 0x0266, 0x00CD,
+};
+
+//========================================================
+// <using> gen_scaler_coeffs_cnf_file.m
+// <using> make_test_script.m
+// <date> 03-Apr-2024
+// <coeffDescrip> 3t_64p_LanczosEd_p_0.4_p_10qb_
+// <num_taps> 3
+// <num_phases> 64
+// <scale_ratio> input/output = 0.400000000000
+// <CoefType> LanczosEd
+// <CoefQuant> S1.10
+//========================================================
+static const uint16_t easf_filter_3tap_64p_ratio_0_40[99] = {
+ 0x0200, 0x0200, 0x0000,
+ 0x01F6, 0x0206, 0x0004,
+ 0x01EB, 0x020E, 0x0007,
+ 0x01E1, 0x0214, 0x000B,
+ 0x01D7, 0x021A, 0x000F,
+ 0x01CD, 0x0220, 0x0013,
+ 0x01C2, 0x0226, 0x0018,
+ 0x01B8, 0x022C, 0x001C,
+ 0x01AE, 0x0231, 0x0021,
+ 0x01A3, 0x0237, 0x0026,
+ 0x0199, 0x023C, 0x002B,
+ 0x018F, 0x0240, 0x0031,
+ 0x0185, 0x0245, 0x0036,
+ 0x017A, 0x024A, 0x003C,
+ 0x0170, 0x024F, 0x0041,
+ 0x0166, 0x0253, 0x0047,
+ 0x015C, 0x0257, 0x004D,
+ 0x0152, 0x025A, 0x0054,
+ 0x0148, 0x025E, 0x005A,
+ 0x013E, 0x0261, 0x0061,
+ 0x0134, 0x0264, 0x0068,
+ 0x012B, 0x0266, 0x006F,
+ 0x0121, 0x0269, 0x0076,
+ 0x0117, 0x026C, 0x007D,
+ 0x010E, 0x026E, 0x0084,
+ 0x0104, 0x0270, 0x008C,
+ 0x00FB, 0x0271, 0x0094,
+ 0x00F2, 0x0272, 0x009C,
+ 0x00E9, 0x0273, 0x00A4,
+ 0x00E0, 0x0274, 0x00AC,
+ 0x00D7, 0x0275, 0x00B4,
+ 0x00CE, 0x0275, 0x00BD,
+ 0x00C5, 0x0276, 0x00C5,
+};
+
+//========================================================
+// <using> gen_scaler_coeffs_cnf_file.m
+// <using> make_test_script.m
+// <date> 03-Apr-2024
+// <coeffDescrip> 3t_64p_LanczosEd_p_0.5_p_10qb_
+// <num_taps> 3
+// <num_phases> 64
+// <scale_ratio> input/output = 0.500000000000
+// <CoefType> LanczosEd
+// <CoefQuant> S1.10
+//========================================================
+static const uint16_t easf_filter_3tap_64p_ratio_0_50[99] = {
+ 0x0200, 0x0200, 0x0000,
+ 0x01F5, 0x0209, 0x0002,
+ 0x01EA, 0x0211, 0x0005,
+ 0x01DF, 0x021A, 0x0007,
+ 0x01D4, 0x0222, 0x000A,
+ 0x01C9, 0x022A, 0x000D,
+ 0x01BE, 0x0232, 0x0010,
+ 0x01B3, 0x0239, 0x0014,
+ 0x01A8, 0x0241, 0x0017,
+ 0x019D, 0x0248, 0x001B,
+ 0x0192, 0x024F, 0x001F,
+ 0x0187, 0x0255, 0x0024,
+ 0x017C, 0x025C, 0x0028,
+ 0x0171, 0x0262, 0x002D,
+ 0x0166, 0x0268, 0x0032,
+ 0x015B, 0x026E, 0x0037,
+ 0x0150, 0x0273, 0x003D,
+ 0x0146, 0x0278, 0x0042,
+ 0x013B, 0x027D, 0x0048,
+ 0x0130, 0x0282, 0x004E,
+ 0x0126, 0x0286, 0x0054,
+ 0x011B, 0x028A, 0x005B,
+ 0x0111, 0x028D, 0x0062,
+ 0x0107, 0x0290, 0x0069,
+ 0x00FD, 0x0293, 0x0070,
+ 0x00F3, 0x0296, 0x0077,
+ 0x00E9, 0x0298, 0x007F,
+ 0x00DF, 0x029A, 0x0087,
+ 0x00D5, 0x029C, 0x008F,
+ 0x00CC, 0x029D, 0x0097,
+ 0x00C3, 0x029E, 0x009F,
+ 0x00BA, 0x029E, 0x00A8,
+ 0x00B1, 0x029E, 0x00B1,
+};
+
+//========================================================
+// <using> gen_scaler_coeffs_cnf_file.m
+// <using> make_test_script.m
+// <date> 03-Apr-2024
+// <coeffDescrip> 3t_64p_LanczosEd_p_0.6_p_10qb_
+// <num_taps> 3
+// <num_phases> 64
+// <scale_ratio> input/output = 0.600000000000
+// <CoefType> LanczosEd
+// <CoefQuant> S1.10
+//========================================================
+static const uint16_t easf_filter_3tap_64p_ratio_0_60[99] = {
+ 0x0200, 0x0200, 0x0000,
+ 0x01F4, 0x020B, 0x0001,
+ 0x01E8, 0x0216, 0x0002,
+ 0x01DC, 0x0221, 0x0003,
+ 0x01D0, 0x022B, 0x0005,
+ 0x01C4, 0x0235, 0x0007,
+ 0x01B8, 0x0240, 0x0008,
+ 0x01AC, 0x0249, 0x000B,
+ 0x01A0, 0x0253, 0x000D,
+ 0x0194, 0x025C, 0x0010,
+ 0x0188, 0x0265, 0x0013,
+ 0x017C, 0x026E, 0x0016,
+ 0x0170, 0x0277, 0x0019,
+ 0x0164, 0x027F, 0x001D,
+ 0x0158, 0x0287, 0x0021,
+ 0x014C, 0x028F, 0x0025,
+ 0x0140, 0x0297, 0x0029,
+ 0x0135, 0x029D, 0x002E,
+ 0x0129, 0x02A4, 0x0033,
+ 0x011D, 0x02AB, 0x0038,
+ 0x0112, 0x02B0, 0x003E,
+ 0x0107, 0x02B5, 0x0044,
+ 0x00FC, 0x02BA, 0x004A,
+ 0x00F1, 0x02BF, 0x0050,
+ 0x00E6, 0x02C3, 0x0057,
+ 0x00DB, 0x02C7, 0x005E,
+ 0x00D1, 0x02CA, 0x0065,
+ 0x00C7, 0x02CC, 0x006D,
+ 0x00BD, 0x02CE, 0x0075,
+ 0x00B3, 0x02D0, 0x007D,
+ 0x00A9, 0x02D2, 0x0085,
+ 0x00A0, 0x02D2, 0x008E,
+ 0x0097, 0x02D2, 0x0097,
+};
+
+//========================================================
+// <using> gen_scaler_coeffs_cnf_file.m
+// <using> make_test_script.m
+// <date> 03-Apr-2024
+// <coeffDescrip> 3t_64p_LanczosEd_p_0.7_p_10qb_
+// <num_taps> 3
+// <num_phases> 64
+// <scale_ratio> input/output = 0.700000000000
+// <CoefType> LanczosEd
+// <CoefQuant> S1.10
+//========================================================
+static const uint16_t easf_filter_3tap_64p_ratio_0_70[99] = {
+ 0x0200, 0x0200, 0x0000,
+ 0x01F3, 0x020D, 0x0000,
+ 0x01E5, 0x021B, 0x0000,
+ 0x01D8, 0x0228, 0x0000,
+ 0x01CB, 0x0235, 0x0000,
+ 0x01BD, 0x0243, 0x0000,
+ 0x01B0, 0x024F, 0x0001,
+ 0x01A2, 0x025C, 0x0002,
+ 0x0195, 0x0268, 0x0003,
+ 0x0187, 0x0275, 0x0004,
+ 0x017A, 0x0280, 0x0006,
+ 0x016D, 0x028C, 0x0007,
+ 0x015F, 0x0298, 0x0009,
+ 0x0152, 0x02A2, 0x000C,
+ 0x0145, 0x02AD, 0x000E,
+ 0x0138, 0x02B7, 0x0011,
+ 0x012B, 0x02C0, 0x0015,
+ 0x011E, 0x02CA, 0x0018,
+ 0x0111, 0x02D3, 0x001C,
+ 0x0105, 0x02DB, 0x0020,
+ 0x00F8, 0x02E3, 0x0025,
+ 0x00EC, 0x02EA, 0x002A,
+ 0x00E0, 0x02F1, 0x002F,
+ 0x00D5, 0x02F6, 0x0035,
+ 0x00C9, 0x02FC, 0x003B,
+ 0x00BE, 0x0301, 0x0041,
+ 0x00B3, 0x0305, 0x0048,
+ 0x00A8, 0x0309, 0x004F,
+ 0x009E, 0x030C, 0x0056,
+ 0x0094, 0x030E, 0x005E,
+ 0x008A, 0x0310, 0x0066,
+ 0x0081, 0x0310, 0x006F,
+ 0x0077, 0x0312, 0x0077,
+};
+
+//========================================================
+// <using> gen_scaler_coeffs_cnf_file.m
+// <using> make_test_script.m
+// <date> 03-Apr-2024
+// <coeffDescrip> 3t_64p_LanczosEd_p_0.8_p_10qb_
+// <num_taps> 3
+// <num_phases> 64
+// <scale_ratio> input/output = 0.800000000000
+// <CoefType> LanczosEd
+// <CoefQuant> S1.10
+//========================================================
+static const uint16_t easf_filter_3tap_64p_ratio_0_80[99] = {
+ 0x0200, 0x0200, 0x0000,
+ 0x01F1, 0x0210, 0x0FFF,
+ 0x01E2, 0x0220, 0x0FFE,
+ 0x01D2, 0x0232, 0x0FFC,
+ 0x01C3, 0x0241, 0x0FFC,
+ 0x01B4, 0x0251, 0x0FFB,
+ 0x01A4, 0x0262, 0x0FFA,
+ 0x0195, 0x0271, 0x0FFA,
+ 0x0186, 0x0281, 0x0FF9,
+ 0x0176, 0x0291, 0x0FF9,
+ 0x0167, 0x02A0, 0x0FF9,
+ 0x0158, 0x02AE, 0x0FFA,
+ 0x0149, 0x02BD, 0x0FFA,
+ 0x013A, 0x02CB, 0x0FFB,
+ 0x012C, 0x02D7, 0x0FFD,
+ 0x011D, 0x02E5, 0x0FFE,
+ 0x010F, 0x02F1, 0x0000,
+ 0x0101, 0x02FD, 0x0002,
+ 0x00F3, 0x0308, 0x0005,
+ 0x00E5, 0x0313, 0x0008,
+ 0x00D8, 0x031D, 0x000B,
+ 0x00CB, 0x0326, 0x000F,
+ 0x00BE, 0x032F, 0x0013,
+ 0x00B2, 0x0337, 0x0017,
+ 0x00A6, 0x033E, 0x001C,
+ 0x009A, 0x0345, 0x0021,
+ 0x008F, 0x034A, 0x0027,
+ 0x0084, 0x034F, 0x002D,
+ 0x0079, 0x0353, 0x0034,
+ 0x006F, 0x0356, 0x003B,
+ 0x0065, 0x0358, 0x0043,
+ 0x005C, 0x0359, 0x004B,
+ 0x0053, 0x035A, 0x0053,
+};
+
+//========================================================
+// <using> gen_scaler_coeffs_cnf_file.m
+// <using> make_test_script.m
+// <date> 03-Apr-2024
+// <coeffDescrip> 3t_64p_LanczosEd_p_0.9_p_10qb_
+// <num_taps> 3
+// <num_phases> 64
+// <scale_ratio> input/output = 0.900000000000
+// <CoefType> LanczosEd
+// <CoefQuant> S1.10
+//========================================================
+static const uint16_t easf_filter_3tap_64p_ratio_0_90[99] = {
+ 0x0200, 0x0200, 0x0000,
+ 0x01EE, 0x0214, 0x0FFE,
+ 0x01DC, 0x0228, 0x0FFC,
+ 0x01CA, 0x023C, 0x0FFA,
+ 0x01B9, 0x024F, 0x0FF8,
+ 0x01A7, 0x0262, 0x0FF7,
+ 0x0195, 0x0276, 0x0FF5,
+ 0x0183, 0x028A, 0x0FF3,
+ 0x0172, 0x029C, 0x0FF2,
+ 0x0160, 0x02AF, 0x0FF1,
+ 0x014F, 0x02C2, 0x0FEF,
+ 0x013E, 0x02D4, 0x0FEE,
+ 0x012D, 0x02E5, 0x0FEE,
+ 0x011C, 0x02F7, 0x0FED,
+ 0x010C, 0x0307, 0x0FED,
+ 0x00FB, 0x0318, 0x0FED,
+ 0x00EC, 0x0327, 0x0FED,
+ 0x00DC, 0x0336, 0x0FEE,
+ 0x00CD, 0x0344, 0x0FEF,
+ 0x00BE, 0x0352, 0x0FF0,
+ 0x00B0, 0x035E, 0x0FF2,
+ 0x00A2, 0x036A, 0x0FF4,
+ 0x0095, 0x0375, 0x0FF6,
+ 0x0088, 0x037F, 0x0FF9,
+ 0x007B, 0x0388, 0x0FFD,
+ 0x006F, 0x0391, 0x0000,
+ 0x0064, 0x0397, 0x0005,
+ 0x0059, 0x039D, 0x000A,
+ 0x004E, 0x03A3, 0x000F,
+ 0x0045, 0x03A6, 0x0015,
+ 0x003B, 0x03A9, 0x001C,
+ 0x0033, 0x03AA, 0x0023,
+ 0x002A, 0x03AC, 0x002A,
+};
+
+//========================================================
+// <using> gen_scaler_coeffs_cnf_file.m
+// <using> make_test_script.m
+// <date> 03-Apr-2024
+// <coeffDescrip> 3t_64p_LanczosEd_p_1_p_10qb_
+// <num_taps> 3
+// <num_phases> 64
+// <scale_ratio> input/output = 1.000000000000
+// <CoefType> LanczosEd
+// <CoefQuant> S1.10
+//========================================================
+static const uint16_t easf_filter_3tap_64p_ratio_1_00[99] = {
+ 0x0200, 0x0200, 0x0000,
+ 0x01EB, 0x0217, 0x0FFE,
+ 0x01D5, 0x022F, 0x0FFC,
+ 0x01C0, 0x0247, 0x0FF9,
+ 0x01AB, 0x025E, 0x0FF7,
+ 0x0196, 0x0276, 0x0FF4,
+ 0x0181, 0x028D, 0x0FF2,
+ 0x016C, 0x02A5, 0x0FEF,
+ 0x0158, 0x02BB, 0x0FED,
+ 0x0144, 0x02D1, 0x0FEB,
+ 0x0130, 0x02E8, 0x0FE8,
+ 0x011C, 0x02FE, 0x0FE6,
+ 0x0109, 0x0313, 0x0FE4,
+ 0x00F6, 0x0328, 0x0FE2,
+ 0x00E4, 0x033C, 0x0FE0,
+ 0x00D2, 0x034F, 0x0FDF,
+ 0x00C0, 0x0363, 0x0FDD,
+ 0x00B0, 0x0374, 0x0FDC,
+ 0x009F, 0x0385, 0x0FDC,
+ 0x0090, 0x0395, 0x0FDB,
+ 0x0081, 0x03A4, 0x0FDB,
+ 0x0072, 0x03B3, 0x0FDB,
+ 0x0064, 0x03C0, 0x0FDC,
+ 0x0057, 0x03CC, 0x0FDD,
+ 0x004B, 0x03D6, 0x0FDF,
+ 0x003F, 0x03E0, 0x0FE1,
+ 0x0034, 0x03E8, 0x0FE4,
+ 0x002A, 0x03EF, 0x0FE7,
+ 0x0020, 0x03F5, 0x0FEB,
+ 0x0017, 0x03FA, 0x0FEF,
+ 0x000F, 0x03FD, 0x0FF4,
+ 0x0007, 0x03FF, 0x0FFA,
+ 0x0000, 0x0400, 0x0000,
+};
+
+//========================================================
+// <using> gen_scaler_coeffs_cnf_file.m
+// <using> make_test_script.m
+// <date> 03-Apr-2024
+// <coeffDescrip> 4t_64p_LanczosEd_p_0.3_p_10qb_
+// <num_taps> 4
+// <num_phases> 64
+// <scale_ratio> input/output = 0.300000000000
+// <CoefType> LanczosEd
+// <CoefQuant> S1.10
+//========================================================
+static const uint16_t easf_filter_4tap_64p_ratio_0_30[132] = {
+ 0x0104, 0x01F8, 0x0104, 0x0000,
+ 0x00FE, 0x01F7, 0x010A, 0x0001,
+ 0x00F8, 0x01F6, 0x010F, 0x0003,
+ 0x00F2, 0x01F5, 0x0114, 0x0005,
+ 0x00EB, 0x01F4, 0x011B, 0x0006,
+ 0x00E5, 0x01F3, 0x0120, 0x0008,
+ 0x00DF, 0x01F2, 0x0125, 0x000A,
+ 0x00DA, 0x01F0, 0x012A, 0x000C,
+ 0x00D4, 0x01EE, 0x0130, 0x000E,
+ 0x00CE, 0x01ED, 0x0135, 0x0010,
+ 0x00C8, 0x01EB, 0x013A, 0x0013,
+ 0x00C2, 0x01E9, 0x0140, 0x0015,
+ 0x00BD, 0x01E7, 0x0145, 0x0017,
+ 0x00B7, 0x01E5, 0x014A, 0x001A,
+ 0x00B1, 0x01E2, 0x0151, 0x001C,
+ 0x00AC, 0x01E0, 0x0155, 0x001F,
+ 0x00A7, 0x01DD, 0x015A, 0x0022,
+ 0x00A1, 0x01DB, 0x015F, 0x0025,
+ 0x009C, 0x01D8, 0x0165, 0x0027,
+ 0x0097, 0x01D5, 0x016A, 0x002A,
+ 0x0092, 0x01D2, 0x016E, 0x002E,
+ 0x008C, 0x01CF, 0x0174, 0x0031,
+ 0x0087, 0x01CC, 0x0179, 0x0034,
+ 0x0083, 0x01C9, 0x017D, 0x0037,
+ 0x007E, 0x01C5, 0x0182, 0x003B,
+ 0x0079, 0x01C2, 0x0187, 0x003E,
+ 0x0074, 0x01BE, 0x018C, 0x0042,
+ 0x0070, 0x01BA, 0x0190, 0x0046,
+ 0x006B, 0x01B7, 0x0195, 0x0049,
+ 0x0066, 0x01B3, 0x019A, 0x004D,
+ 0x0062, 0x01AF, 0x019E, 0x0051,
+ 0x005E, 0x01AB, 0x01A2, 0x0055,
+ 0x005A, 0x01A6, 0x01A6, 0x005A,
+};
+
+//========================================================
+// <using> gen_scaler_coeffs_cnf_file.m
+// <using> make_test_script.m
+// <date> 03-Apr-2024
+// <coeffDescrip> 4t_64p_LanczosEd_p_0.4_p_10qb_
+// <num_taps> 4
+// <num_phases> 64
+// <scale_ratio> input/output = 0.400000000000
+// <CoefType> LanczosEd
+// <CoefQuant> S1.10
+//========================================================
+static const uint16_t easf_filter_4tap_64p_ratio_0_40[132] = {
+ 0x00FB, 0x0209, 0x00FC, 0x0000,
+ 0x00F5, 0x0209, 0x0101, 0x0001,
+ 0x00EE, 0x0208, 0x0108, 0x0002,
+ 0x00E8, 0x0207, 0x010E, 0x0003,
+ 0x00E2, 0x0206, 0x0114, 0x0004,
+ 0x00DB, 0x0205, 0x011A, 0x0006,
+ 0x00D5, 0x0204, 0x0120, 0x0007,
+ 0x00CF, 0x0203, 0x0125, 0x0009,
+ 0x00C9, 0x0201, 0x012C, 0x000A,
+ 0x00C3, 0x01FF, 0x0132, 0x000C,
+ 0x00BD, 0x01FD, 0x0138, 0x000E,
+ 0x00B7, 0x01FB, 0x013E, 0x0010,
+ 0x00B1, 0x01F9, 0x0144, 0x0012,
+ 0x00AC, 0x01F7, 0x0149, 0x0014,
+ 0x00A6, 0x01F4, 0x0150, 0x0016,
+ 0x00A0, 0x01F2, 0x0156, 0x0018,
+ 0x009B, 0x01EF, 0x015C, 0x001A,
+ 0x0095, 0x01EC, 0x0162, 0x001D,
+ 0x0090, 0x01E9, 0x0168, 0x001F,
+ 0x008B, 0x01E6, 0x016D, 0x0022,
+ 0x0085, 0x01E3, 0x0173, 0x0025,
+ 0x0080, 0x01DF, 0x0179, 0x0028,
+ 0x007B, 0x01DC, 0x017E, 0x002B,
+ 0x0076, 0x01D8, 0x0184, 0x002E,
+ 0x0071, 0x01D4, 0x018A, 0x0031,
+ 0x006D, 0x01D1, 0x018E, 0x0034,
+ 0x0068, 0x01CD, 0x0193, 0x0038,
+ 0x0063, 0x01C8, 0x019A, 0x003B,
+ 0x005F, 0x01C4, 0x019E, 0x003F,
+ 0x005B, 0x01C0, 0x01A3, 0x0042,
+ 0x0056, 0x01BB, 0x01A9, 0x0046,
+ 0x0052, 0x01B7, 0x01AD, 0x004A,
+ 0x004E, 0x01B2, 0x01B2, 0x004E,
+};
+
+//========================================================
+// <using> gen_scaler_coeffs_cnf_file.m
+// <using> make_test_script.m
+// <date> 03-Apr-2024
+// <coeffDescrip> 4t_64p_LanczosEd_p_0.5_p_10qb_
+// <num_taps> 4
+// <num_phases> 64
+// <scale_ratio> input/output = 0.500000000000
+// <CoefType> LanczosEd
+// <CoefQuant> S1.10
+//========================================================
+static const uint16_t easf_filter_4tap_64p_ratio_0_50[132] = {
+ 0x00E5, 0x0236, 0x00E5, 0x0000,
+ 0x00DE, 0x0235, 0x00ED, 0x0000,
+ 0x00D7, 0x0235, 0x00F4, 0x0000,
+ 0x00D0, 0x0235, 0x00FB, 0x0000,
+ 0x00C9, 0x0234, 0x0102, 0x0001,
+ 0x00C2, 0x0233, 0x010A, 0x0001,
+ 0x00BC, 0x0232, 0x0111, 0x0001,
+ 0x00B5, 0x0230, 0x0119, 0x0002,
+ 0x00AE, 0x022F, 0x0121, 0x0002,
+ 0x00A8, 0x022D, 0x0128, 0x0003,
+ 0x00A2, 0x022B, 0x012F, 0x0004,
+ 0x009B, 0x0229, 0x0137, 0x0005,
+ 0x0095, 0x0226, 0x013F, 0x0006,
+ 0x008F, 0x0224, 0x0146, 0x0007,
+ 0x0089, 0x0221, 0x014E, 0x0008,
+ 0x0083, 0x021E, 0x0155, 0x000A,
+ 0x007E, 0x021B, 0x015C, 0x000B,
+ 0x0078, 0x0217, 0x0164, 0x000D,
+ 0x0072, 0x0213, 0x016D, 0x000E,
+ 0x006D, 0x0210, 0x0173, 0x0010,
+ 0x0068, 0x020C, 0x017A, 0x0012,
+ 0x0063, 0x0207, 0x0182, 0x0014,
+ 0x005E, 0x0203, 0x0189, 0x0016,
+ 0x0059, 0x01FE, 0x0191, 0x0018,
+ 0x0054, 0x01F9, 0x0198, 0x001B,
+ 0x0050, 0x01F4, 0x019F, 0x001D,
+ 0x004B, 0x01EF, 0x01A6, 0x0020,
+ 0x0047, 0x01EA, 0x01AC, 0x0023,
+ 0x0043, 0x01E4, 0x01B3, 0x0026,
+ 0x003F, 0x01DF, 0x01B9, 0x0029,
+ 0x003B, 0x01D9, 0x01C0, 0x002C,
+ 0x0037, 0x01D3, 0x01C6, 0x0030,
+ 0x0033, 0x01CD, 0x01CD, 0x0033,
+};
+
+//========================================================
+// <using> gen_scaler_coeffs_cnf_file.m
+// <using> make_test_script.m
+// <date> 03-Apr-2024
+// <coeffDescrip> 4t_64p_LanczosEd_p_0.6_p_10qb_
+// <num_taps> 4
+// <num_phases> 64
+// <scale_ratio> input/output = 0.600000000000
+// <CoefType> LanczosEd
+// <CoefQuant> S1.10
+//========================================================
+static const uint16_t easf_filter_4tap_64p_ratio_0_60[132] = {
+ 0x00C8, 0x026F, 0x00C9, 0x0000,
+ 0x00C0, 0x0270, 0x00D1, 0x0FFF,
+ 0x00B8, 0x0270, 0x00D9, 0x0FFF,
+ 0x00B1, 0x0270, 0x00E1, 0x0FFE,
+ 0x00A9, 0x026F, 0x00EB, 0x0FFD,
+ 0x00A2, 0x026E, 0x00F3, 0x0FFD,
+ 0x009A, 0x026D, 0x00FD, 0x0FFC,
+ 0x0093, 0x026C, 0x0105, 0x0FFC,
+ 0x008C, 0x026A, 0x010F, 0x0FFB,
+ 0x0085, 0x0268, 0x0118, 0x0FFB,
+ 0x007E, 0x0265, 0x0122, 0x0FFB,
+ 0x0078, 0x0263, 0x012A, 0x0FFB,
+ 0x0071, 0x0260, 0x0134, 0x0FFB,
+ 0x006B, 0x025C, 0x013E, 0x0FFB,
+ 0x0065, 0x0259, 0x0147, 0x0FFB,
+ 0x005F, 0x0255, 0x0151, 0x0FFB,
+ 0x0059, 0x0251, 0x015A, 0x0FFC,
+ 0x0054, 0x024D, 0x0163, 0x0FFC,
+ 0x004E, 0x0248, 0x016D, 0x0FFD,
+ 0x0049, 0x0243, 0x0176, 0x0FFE,
+ 0x0044, 0x023E, 0x017F, 0x0FFF,
+ 0x003F, 0x0238, 0x0189, 0x0000,
+ 0x003A, 0x0232, 0x0193, 0x0001,
+ 0x0036, 0x022C, 0x019C, 0x0002,
+ 0x0031, 0x0226, 0x01A5, 0x0004,
+ 0x002D, 0x021F, 0x01AF, 0x0005,
+ 0x0029, 0x0218, 0x01B8, 0x0007,
+ 0x0025, 0x0211, 0x01C1, 0x0009,
+ 0x0022, 0x020A, 0x01C9, 0x000B,
+ 0x001E, 0x0203, 0x01D2, 0x000D,
+ 0x001B, 0x01FB, 0x01DA, 0x0010,
+ 0x0018, 0x01F3, 0x01E3, 0x0012,
+ 0x0015, 0x01EB, 0x01EB, 0x0015,
+};
+
+//========================================================
+// <using> gen_scaler_coeffs_cnf_file.m
+// <using> make_test_script.m
+// <date> 03-Apr-2024
+// <coeffDescrip> 4t_64p_LanczosEd_p_0.7_p_10qb_
+// <num_taps> 4
+// <num_phases> 64
+// <scale_ratio> input/output = 0.700000000000
+// <CoefType> LanczosEd
+// <CoefQuant> S1.10
+//========================================================
+static const uint16_t easf_filter_4tap_64p_ratio_0_70[132] = {
+ 0x00A3, 0x02B9, 0x00A4, 0x0000,
+ 0x009A, 0x02BA, 0x00AD, 0x0FFF,
+ 0x0092, 0x02BA, 0x00B6, 0x0FFE,
+ 0x0089, 0x02BA, 0x00C1, 0x0FFC,
+ 0x0081, 0x02B9, 0x00CB, 0x0FFB,
+ 0x0079, 0x02B8, 0x00D5, 0x0FFA,
+ 0x0071, 0x02B7, 0x00DF, 0x0FF9,
+ 0x0069, 0x02B5, 0x00EA, 0x0FF8,
+ 0x0062, 0x02B3, 0x00F4, 0x0FF7,
+ 0x005B, 0x02B0, 0x00FF, 0x0FF6,
+ 0x0054, 0x02AD, 0x010B, 0x0FF4,
+ 0x004D, 0x02A9, 0x0117, 0x0FF3,
+ 0x0046, 0x02A5, 0x0123, 0x0FF2,
+ 0x0040, 0x02A1, 0x012D, 0x0FF2,
+ 0x003A, 0x029C, 0x0139, 0x0FF1,
+ 0x0034, 0x0297, 0x0145, 0x0FF0,
+ 0x002F, 0x0292, 0x0150, 0x0FEF,
+ 0x0029, 0x028C, 0x015C, 0x0FEF,
+ 0x0024, 0x0285, 0x0169, 0x0FEE,
+ 0x001F, 0x027F, 0x0174, 0x0FEE,
+ 0x001B, 0x0278, 0x017F, 0x0FEE,
+ 0x0016, 0x0270, 0x018D, 0x0FED,
+ 0x0012, 0x0268, 0x0199, 0x0FED,
+ 0x000E, 0x0260, 0x01A4, 0x0FEE,
+ 0x000B, 0x0258, 0x01AF, 0x0FEE,
+ 0x0007, 0x024F, 0x01BC, 0x0FEE,
+ 0x0004, 0x0246, 0x01C7, 0x0FEF,
+ 0x0001, 0x023D, 0x01D3, 0x0FEF,
+ 0x0FFE, 0x0233, 0x01DF, 0x0FF0,
+ 0x0FFC, 0x0229, 0x01EA, 0x0FF1,
+ 0x0FFA, 0x021F, 0x01F4, 0x0FF3,
+ 0x0FF8, 0x0215, 0x01FF, 0x0FF4,
+ 0x0FF6, 0x020A, 0x020A, 0x0FF6,
+};
+
+//========================================================
+// <using> gen_scaler_coeffs_cnf_file.m
+// <using> make_test_script.m
+// <date> 03-Apr-2024
+// <coeffDescrip> 4t_64p_LanczosEd_p_0.8_p_10qb_
+// <num_taps> 4
+// <num_phases> 64
+// <scale_ratio> input/output = 0.800000000000
+// <CoefType> LanczosEd
+// <CoefQuant> S1.10
+//========================================================
+static const uint16_t easf_filter_4tap_64p_ratio_0_80[132] = {
+ 0x0075, 0x0315, 0x0076, 0x0000,
+ 0x006C, 0x0316, 0x007F, 0x0FFF,
+ 0x0062, 0x0316, 0x008A, 0x0FFE,
+ 0x0059, 0x0315, 0x0096, 0x0FFC,
+ 0x0050, 0x0314, 0x00A1, 0x0FFB,
+ 0x0048, 0x0312, 0x00AD, 0x0FF9,
+ 0x0040, 0x0310, 0x00B8, 0x0FF8,
+ 0x0038, 0x030D, 0x00C5, 0x0FF6,
+ 0x0030, 0x030A, 0x00D1, 0x0FF5,
+ 0x0029, 0x0306, 0x00DE, 0x0FF3,
+ 0x0022, 0x0301, 0x00EB, 0x0FF2,
+ 0x001C, 0x02FC, 0x00F8, 0x0FF0,
+ 0x0015, 0x02F7, 0x0106, 0x0FEE,
+ 0x0010, 0x02F1, 0x0112, 0x0FED,
+ 0x000A, 0x02EA, 0x0121, 0x0FEB,
+ 0x0005, 0x02E3, 0x012F, 0x0FE9,
+ 0x0000, 0x02DB, 0x013D, 0x0FE8,
+ 0x0FFB, 0x02D3, 0x014C, 0x0FE6,
+ 0x0FF7, 0x02CA, 0x015A, 0x0FE5,
+ 0x0FF3, 0x02C1, 0x0169, 0x0FE3,
+ 0x0FF0, 0x02B7, 0x0177, 0x0FE2,
+ 0x0FEC, 0x02AD, 0x0186, 0x0FE1,
+ 0x0FE9, 0x02A2, 0x0196, 0x0FDF,
+ 0x0FE7, 0x0297, 0x01A4, 0x0FDE,
+ 0x0FE4, 0x028C, 0x01B3, 0x0FDD,
+ 0x0FE2, 0x0280, 0x01C2, 0x0FDC,
+ 0x0FE0, 0x0274, 0x01D0, 0x0FDC,
+ 0x0FDF, 0x0268, 0x01DE, 0x0FDB,
+ 0x0FDD, 0x025B, 0x01EE, 0x0FDA,
+ 0x0FDC, 0x024E, 0x01FC, 0x0FDA,
+ 0x0FDB, 0x0241, 0x020A, 0x0FDA,
+ 0x0FDB, 0x0233, 0x0218, 0x0FDA,
+ 0x0FDA, 0x0226, 0x0226, 0x0FDA,
+};
+
+//========================================================
+// <using> gen_scaler_coeffs_cnf_file.m
+// <using> make_test_script.m
+// <date> 03-Apr-2024
+// <coeffDescrip> 4t_64p_LanczosEd_p_0.9_p_10qb_
+// <num_taps> 4
+// <num_phases> 64
+// <scale_ratio> input/output = 0.900000000000
+// <CoefType> LanczosEd
+// <CoefQuant> S1.10
+//========================================================
+static const uint16_t easf_filter_4tap_64p_ratio_0_90[132] = {
+ 0x003F, 0x0383, 0x003E, 0x0000,
+ 0x0034, 0x0383, 0x004A, 0x0FFF,
+ 0x002B, 0x0383, 0x0054, 0x0FFE,
+ 0x0021, 0x0381, 0x0061, 0x0FFD,
+ 0x0019, 0x037F, 0x006C, 0x0FFC,
+ 0x0010, 0x037C, 0x0079, 0x0FFB,
+ 0x0008, 0x0378, 0x0086, 0x0FFA,
+ 0x0001, 0x0374, 0x0093, 0x0FF8,
+ 0x0FFA, 0x036E, 0x00A1, 0x0FF7,
+ 0x0FF3, 0x0368, 0x00B0, 0x0FF5,
+ 0x0FED, 0x0361, 0x00BF, 0x0FF3,
+ 0x0FE8, 0x035A, 0x00CD, 0x0FF1,
+ 0x0FE2, 0x0352, 0x00DC, 0x0FF0,
+ 0x0FDE, 0x0349, 0x00EB, 0x0FEE,
+ 0x0FD9, 0x033F, 0x00FC, 0x0FEC,
+ 0x0FD5, 0x0335, 0x010D, 0x0FE9,
+ 0x0FD2, 0x032A, 0x011D, 0x0FE7,
+ 0x0FCF, 0x031E, 0x012E, 0x0FE5,
+ 0x0FCC, 0x0312, 0x013F, 0x0FE3,
+ 0x0FCA, 0x0305, 0x0150, 0x0FE1,
+ 0x0FC8, 0x02F8, 0x0162, 0x0FDE,
+ 0x0FC6, 0x02EA, 0x0174, 0x0FDC,
+ 0x0FC5, 0x02DC, 0x0185, 0x0FDA,
+ 0x0FC4, 0x02CD, 0x0197, 0x0FD8,
+ 0x0FC3, 0x02BE, 0x01AA, 0x0FD5,
+ 0x0FC3, 0x02AF, 0x01BB, 0x0FD3,
+ 0x0FC3, 0x029F, 0x01CD, 0x0FD1,
+ 0x0FC3, 0x028E, 0x01E0, 0x0FCF,
+ 0x0FC3, 0x027E, 0x01F2, 0x0FCD,
+ 0x0FC4, 0x026D, 0x0203, 0x0FCC,
+ 0x0FC5, 0x025C, 0x0215, 0x0FCA,
+ 0x0FC6, 0x024B, 0x0227, 0x0FC8,
+ 0x0FC7, 0x0239, 0x0239, 0x0FC7,
+};
+
+//========================================================
+// <using> gen_scaler_coeffs_cnf_file.m
+// <using> make_test_script.m
+// <date> 03-Apr-2024
+// <coeffDescrip> 4t_64p_LanczosEd_p_1_p_10qb_
+// <num_taps> 4
+// <num_phases> 64
+// <scale_ratio> input/output = 1.000000000000
+// <CoefType> LanczosEd
+// <CoefQuant> S1.10
+//========================================================
+static const uint16_t easf_filter_4tap_64p_ratio_1_00[132] = {
+ 0x0000, 0x0400, 0x0000, 0x0000,
+ 0x0FF6, 0x03FF, 0x000B, 0x0000,
+ 0x0FED, 0x03FE, 0x0015, 0x0000,
+ 0x0FE4, 0x03FB, 0x0022, 0x0FFF,
+ 0x0FDC, 0x03F7, 0x002E, 0x0FFF,
+ 0x0FD5, 0x03F2, 0x003B, 0x0FFE,
+ 0x0FCE, 0x03EC, 0x0048, 0x0FFE,
+ 0x0FC8, 0x03E5, 0x0056, 0x0FFD,
+ 0x0FC3, 0x03DC, 0x0065, 0x0FFC,
+ 0x0FBE, 0x03D3, 0x0075, 0x0FFA,
+ 0x0FB9, 0x03C9, 0x0085, 0x0FF9,
+ 0x0FB6, 0x03BE, 0x0094, 0x0FF8,
+ 0x0FB2, 0x03B2, 0x00A6, 0x0FF6,
+ 0x0FB0, 0x03A5, 0x00B7, 0x0FF4,
+ 0x0FAD, 0x0397, 0x00CA, 0x0FF2,
+ 0x0FAB, 0x0389, 0x00DC, 0x0FF0,
+ 0x0FAA, 0x0379, 0x00EF, 0x0FEE,
+ 0x0FA9, 0x0369, 0x0102, 0x0FEC,
+ 0x0FA9, 0x0359, 0x0115, 0x0FE9,
+ 0x0FA9, 0x0348, 0x0129, 0x0FE6,
+ 0x0FA9, 0x0336, 0x013D, 0x0FE4,
+ 0x0FA9, 0x0323, 0x0153, 0x0FE1,
+ 0x0FAA, 0x0310, 0x0168, 0x0FDE,
+ 0x0FAC, 0x02FD, 0x017C, 0x0FDB,
+ 0x0FAD, 0x02E9, 0x0192, 0x0FD8,
+ 0x0FAF, 0x02D5, 0x01A7, 0x0FD5,
+ 0x0FB1, 0x02C0, 0x01BD, 0x0FD2,
+ 0x0FB3, 0x02AC, 0x01D2, 0x0FCF,
+ 0x0FB5, 0x0296, 0x01E9, 0x0FCC,
+ 0x0FB8, 0x0281, 0x01FE, 0x0FC9,
+ 0x0FBA, 0x026C, 0x0214, 0x0FC6,
+ 0x0FBD, 0x0256, 0x022A, 0x0FC3,
+ 0x0FC0, 0x0240, 0x0240, 0x0FC0,
+};
+
+//========================================================
+// <using> gen_scaler_coeffs_cnf_file.m
+// <using> make_test_script.m
+// <date> 02-Apr-2024
+// <coeffDescrip> 6t_64p_LanczosEd_p_0.3_p_10qb_
+// <num_taps> 6
+// <num_phases> 64
+// <scale_ratio> input/output = 0.300000000000
+// <CoefType> LanczosEd
+// <CoefQuant> S1.10
+//========================================================
+static const uint16_t easf_filter_6tap_64p_ratio_0_30[198] = {
+ 0x004B, 0x0100, 0x0169, 0x0101, 0x004B, 0x0000,
+ 0x0049, 0x00FD, 0x0169, 0x0103, 0x004E, 0x0000,
+ 0x0047, 0x00FA, 0x0169, 0x0106, 0x0050, 0x0000,
+ 0x0045, 0x00F7, 0x0168, 0x0109, 0x0052, 0x0001,
+ 0x0043, 0x00F5, 0x0168, 0x010B, 0x0054, 0x0001,
+ 0x0040, 0x00F2, 0x0168, 0x010E, 0x0057, 0x0001,
+ 0x003E, 0x00EF, 0x0168, 0x0110, 0x0059, 0x0002,
+ 0x003C, 0x00EC, 0x0167, 0x0113, 0x005C, 0x0002,
+ 0x003A, 0x00E9, 0x0167, 0x0116, 0x005E, 0x0002,
+ 0x0038, 0x00E6, 0x0166, 0x0118, 0x0061, 0x0003,
+ 0x0036, 0x00E3, 0x0165, 0x011C, 0x0063, 0x0003,
+ 0x0034, 0x00E0, 0x0165, 0x011D, 0x0066, 0x0004,
+ 0x0033, 0x00DD, 0x0164, 0x0120, 0x0068, 0x0004,
+ 0x0031, 0x00DA, 0x0163, 0x0122, 0x006B, 0x0005,
+ 0x002F, 0x00D7, 0x0163, 0x0125, 0x006D, 0x0005,
+ 0x002D, 0x00D3, 0x0162, 0x0128, 0x0070, 0x0006,
+ 0x002B, 0x00D0, 0x0161, 0x012A, 0x0073, 0x0007,
+ 0x002A, 0x00CD, 0x0160, 0x012D, 0x0075, 0x0007,
+ 0x0028, 0x00CA, 0x015F, 0x012F, 0x0078, 0x0008,
+ 0x0026, 0x00C7, 0x015E, 0x0131, 0x007B, 0x0009,
+ 0x0025, 0x00C4, 0x015D, 0x0133, 0x007E, 0x0009,
+ 0x0023, 0x00C1, 0x015C, 0x0136, 0x0080, 0x000A,
+ 0x0022, 0x00BE, 0x015A, 0x0138, 0x0083, 0x000B,
+ 0x0020, 0x00BB, 0x0159, 0x013A, 0x0086, 0x000C,
+ 0x001F, 0x00B8, 0x0158, 0x013B, 0x0089, 0x000D,
+ 0x001E, 0x00B5, 0x0156, 0x013E, 0x008C, 0x000D,
+ 0x001C, 0x00B2, 0x0155, 0x0140, 0x008F, 0x000E,
+ 0x001B, 0x00AF, 0x0153, 0x0143, 0x0091, 0x000F,
+ 0x0019, 0x00AC, 0x0152, 0x0145, 0x0094, 0x0010,
+ 0x0018, 0x00A9, 0x0150, 0x0147, 0x0097, 0x0011,
+ 0x0017, 0x00A6, 0x014F, 0x0148, 0x009A, 0x0012,
+ 0x0016, 0x00A3, 0x014D, 0x0149, 0x009D, 0x0014,
+ 0x0015, 0x00A0, 0x014B, 0x014B, 0x00A0, 0x0015,
+};
+
+//========================================================
+// <using> gen_scaler_coeffs_cnf_file.m
+// <using> make_test_script.m
+// <date> 02-Apr-2024
+// <coeffDescrip> 6t_64p_LanczosEd_p_0.4_p_10qb_
+// <num_taps> 6
+// <num_phases> 64
+// <scale_ratio> input/output = 0.400000000000
+// <CoefType> LanczosEd
+// <CoefQuant> S1.10
+//========================================================
+static const uint16_t easf_filter_6tap_64p_ratio_0_40[198] = {
+ 0x0028, 0x0106, 0x01A3, 0x0107, 0x0028, 0x0000,
+ 0x0026, 0x0102, 0x01A3, 0x010A, 0x002B, 0x0000,
+ 0x0024, 0x00FE, 0x01A3, 0x010F, 0x002D, 0x0FFF,
+ 0x0022, 0x00FA, 0x01A3, 0x0113, 0x002F, 0x0FFF,
+ 0x0021, 0x00F6, 0x01A3, 0x0116, 0x0031, 0x0FFF,
+ 0x001F, 0x00F2, 0x01A2, 0x011B, 0x0034, 0x0FFE,
+ 0x001D, 0x00EE, 0x01A2, 0x011F, 0x0036, 0x0FFE,
+ 0x001B, 0x00EA, 0x01A1, 0x0123, 0x0039, 0x0FFE,
+ 0x0019, 0x00E6, 0x01A1, 0x0127, 0x003B, 0x0FFE,
+ 0x0018, 0x00E2, 0x01A0, 0x012A, 0x003E, 0x0FFE,
+ 0x0016, 0x00DE, 0x01A0, 0x012E, 0x0041, 0x0FFD,
+ 0x0015, 0x00DA, 0x019F, 0x0132, 0x0043, 0x0FFD,
+ 0x0013, 0x00D6, 0x019E, 0x0136, 0x0046, 0x0FFD,
+ 0x0012, 0x00D2, 0x019D, 0x0139, 0x0049, 0x0FFD,
+ 0x0010, 0x00CE, 0x019C, 0x013D, 0x004C, 0x0FFD,
+ 0x000F, 0x00CA, 0x019A, 0x0141, 0x004F, 0x0FFD,
+ 0x000E, 0x00C6, 0x0199, 0x0144, 0x0052, 0x0FFD,
+ 0x000D, 0x00C2, 0x0197, 0x0148, 0x0055, 0x0FFD,
+ 0x000B, 0x00BE, 0x0196, 0x014C, 0x0058, 0x0FFD,
+ 0x000A, 0x00BA, 0x0195, 0x014F, 0x005B, 0x0FFD,
+ 0x0009, 0x00B6, 0x0193, 0x0153, 0x005E, 0x0FFD,
+ 0x0008, 0x00B2, 0x0191, 0x0157, 0x0061, 0x0FFD,
+ 0x0007, 0x00AE, 0x0190, 0x015A, 0x0064, 0x0FFD,
+ 0x0006, 0x00AA, 0x018E, 0x015D, 0x0068, 0x0FFD,
+ 0x0005, 0x00A6, 0x018C, 0x0161, 0x006B, 0x0FFD,
+ 0x0005, 0x00A2, 0x0189, 0x0164, 0x006F, 0x0FFD,
+ 0x0004, 0x009E, 0x0187, 0x0167, 0x0072, 0x0FFE,
+ 0x0003, 0x009A, 0x0185, 0x016B, 0x0075, 0x0FFE,
+ 0x0002, 0x0096, 0x0183, 0x016E, 0x0079, 0x0FFE,
+ 0x0002, 0x0093, 0x0180, 0x016F, 0x007D, 0x0FFF,
+ 0x0001, 0x008F, 0x017E, 0x0173, 0x0080, 0x0FFF,
+ 0x0001, 0x008B, 0x017B, 0x0175, 0x0084, 0x0000,
+ 0x0000, 0x0087, 0x0179, 0x0179, 0x0087, 0x0000,
+};
+
+//========================================================
+// <using> gen_scaler_coeffs_cnf_file.m
+// <using> make_test_script.m
+// <date> 02-Apr-2024
+// <coeffDescrip> 6t_64p_LanczosEd_p_0.5_p_10qb_
+// <num_taps> 6
+// <num_phases> 64
+// <scale_ratio> input/output = 0.500000000000
+// <CoefType> LanczosEd
+// <CoefQuant> S1.10
+//========================================================
+static const uint16_t easf_filter_6tap_64p_ratio_0_50[198] = {
+ 0x0000, 0x0107, 0x01F3, 0x0106, 0x0000, 0x0000,
+ 0x0FFE, 0x0101, 0x01F3, 0x010D, 0x0002, 0x0FFF,
+ 0x0FFD, 0x00FB, 0x01F3, 0x0113, 0x0003, 0x0FFF,
+ 0x0FFC, 0x00F6, 0x01F3, 0x0118, 0x0005, 0x0FFE,
+ 0x0FFA, 0x00F0, 0x01F3, 0x011E, 0x0007, 0x0FFE,
+ 0x0FF9, 0x00EB, 0x01F2, 0x0124, 0x0009, 0x0FFD,
+ 0x0FF8, 0x00E5, 0x01F2, 0x0129, 0x000B, 0x0FFD,
+ 0x0FF7, 0x00E0, 0x01F1, 0x012F, 0x000D, 0x0FFC,
+ 0x0FF6, 0x00DA, 0x01F0, 0x0135, 0x0010, 0x0FFB,
+ 0x0FF5, 0x00D4, 0x01EF, 0x013B, 0x0012, 0x0FFB,
+ 0x0FF4, 0x00CF, 0x01EE, 0x0141, 0x0014, 0x0FFA,
+ 0x0FF3, 0x00C9, 0x01ED, 0x0147, 0x0017, 0x0FF9,
+ 0x0FF2, 0x00C4, 0x01EB, 0x014C, 0x001A, 0x0FF9,
+ 0x0FF1, 0x00BF, 0x01EA, 0x0152, 0x001C, 0x0FF8,
+ 0x0FF1, 0x00B9, 0x01E8, 0x0157, 0x001F, 0x0FF8,
+ 0x0FF0, 0x00B4, 0x01E6, 0x015D, 0x0022, 0x0FF7,
+ 0x0FF0, 0x00AE, 0x01E4, 0x0163, 0x0025, 0x0FF6,
+ 0x0FEF, 0x00A9, 0x01E2, 0x0168, 0x0028, 0x0FF6,
+ 0x0FEF, 0x00A4, 0x01DF, 0x016E, 0x002B, 0x0FF5,
+ 0x0FEF, 0x009F, 0x01DD, 0x0172, 0x002E, 0x0FF5,
+ 0x0FEE, 0x009A, 0x01DA, 0x0178, 0x0032, 0x0FF4,
+ 0x0FEE, 0x0094, 0x01D8, 0x017E, 0x0035, 0x0FF3,
+ 0x0FEE, 0x008F, 0x01D5, 0x0182, 0x0039, 0x0FF3,
+ 0x0FEE, 0x008A, 0x01D2, 0x0188, 0x003C, 0x0FF2,
+ 0x0FEE, 0x0085, 0x01CF, 0x018C, 0x0040, 0x0FF2,
+ 0x0FEE, 0x0081, 0x01CB, 0x0191, 0x0044, 0x0FF1,
+ 0x0FEE, 0x007C, 0x01C8, 0x0196, 0x0047, 0x0FF1,
+ 0x0FEE, 0x0077, 0x01C4, 0x019C, 0x004B, 0x0FF0,
+ 0x0FEE, 0x0072, 0x01C1, 0x01A0, 0x004F, 0x0FF0,
+ 0x0FEE, 0x006E, 0x01BD, 0x01A4, 0x0053, 0x0FF0,
+ 0x0FEE, 0x0069, 0x01B9, 0x01A9, 0x0058, 0x0FEF,
+ 0x0FEE, 0x0065, 0x01B5, 0x01AD, 0x005C, 0x0FEF,
+ 0x0FEF, 0x0060, 0x01B1, 0x01B1, 0x0060, 0x0FEF,
+};
+
+//========================================================
+// <using> gen_scaler_coeffs_cnf_file.m
+// <using> make_test_script.m
+// <date> 02-Apr-2024
+// <coeffDescrip> 6t_64p_LanczosEd_p_0.6_p_10qb_
+// <num_taps> 6
+// <num_phases> 64
+// <scale_ratio> input/output = 0.600000000000
+// <CoefType> LanczosEd
+// <CoefQuant> S1.10
+//========================================================
+static const uint16_t easf_filter_6tap_64p_ratio_0_60[198] = {
+ 0x0FD9, 0x00FB, 0x0258, 0x00FB, 0x0FD9, 0x0000,
+ 0x0FD9, 0x00F3, 0x0258, 0x0102, 0x0FDA, 0x0000,
+ 0x0FD8, 0x00EB, 0x0258, 0x010B, 0x0FDB, 0x0FFF,
+ 0x0FD8, 0x00E3, 0x0258, 0x0112, 0x0FDC, 0x0FFF,
+ 0x0FD8, 0x00DC, 0x0257, 0x011B, 0x0FDC, 0x0FFE,
+ 0x0FD7, 0x00D4, 0x0256, 0x0123, 0x0FDE, 0x0FFE,
+ 0x0FD7, 0x00CD, 0x0255, 0x012B, 0x0FDF, 0x0FFD,
+ 0x0FD7, 0x00C5, 0x0254, 0x0133, 0x0FE0, 0x0FFD,
+ 0x0FD7, 0x00BE, 0x0252, 0x013C, 0x0FE1, 0x0FFC,
+ 0x0FD7, 0x00B6, 0x0251, 0x0143, 0x0FE3, 0x0FFC,
+ 0x0FD8, 0x00AF, 0x024F, 0x014B, 0x0FE4, 0x0FFB,
+ 0x0FD8, 0x00A8, 0x024C, 0x0154, 0x0FE6, 0x0FFA,
+ 0x0FD8, 0x00A1, 0x024A, 0x015B, 0x0FE8, 0x0FFA,
+ 0x0FD9, 0x009A, 0x0247, 0x0163, 0x0FEA, 0x0FF9,
+ 0x0FD9, 0x0093, 0x0244, 0x016C, 0x0FEC, 0x0FF8,
+ 0x0FD9, 0x008C, 0x0241, 0x0174, 0x0FEF, 0x0FF7,
+ 0x0FDA, 0x0085, 0x023E, 0x017B, 0x0FF1, 0x0FF7,
+ 0x0FDB, 0x007F, 0x023A, 0x0183, 0x0FF3, 0x0FF6,
+ 0x0FDB, 0x0078, 0x0237, 0x018B, 0x0FF6, 0x0FF5,
+ 0x0FDC, 0x0072, 0x0233, 0x0192, 0x0FF9, 0x0FF4,
+ 0x0FDD, 0x006C, 0x022F, 0x0199, 0x0FFC, 0x0FF3,
+ 0x0FDD, 0x0065, 0x022A, 0x01A3, 0x0FFF, 0x0FF2,
+ 0x0FDE, 0x005F, 0x0226, 0x01AA, 0x0002, 0x0FF1,
+ 0x0FDF, 0x005A, 0x0221, 0x01B0, 0x0006, 0x0FF0,
+ 0x0FE0, 0x0054, 0x021C, 0x01B7, 0x0009, 0x0FF0,
+ 0x0FE1, 0x004E, 0x0217, 0x01BE, 0x000D, 0x0FEF,
+ 0x0FE2, 0x0048, 0x0212, 0x01C6, 0x0010, 0x0FEE,
+ 0x0FE3, 0x0043, 0x020C, 0x01CD, 0x0014, 0x0FED,
+ 0x0FE4, 0x003E, 0x0207, 0x01D3, 0x0018, 0x0FEC,
+ 0x0FE5, 0x0039, 0x0200, 0x01DA, 0x001D, 0x0FEB,
+ 0x0FE6, 0x0034, 0x01FA, 0x01E1, 0x0021, 0x0FEA,
+ 0x0FE7, 0x002F, 0x01F5, 0x01E7, 0x0025, 0x0FE9,
+ 0x0FE8, 0x002A, 0x01EE, 0x01EE, 0x002A, 0x0FE8,
+};
+
+//========================================================
+// <using> gen_scaler_coeffs_cnf_file.m
+// <using> make_test_script.m
+// <date> 02-Apr-2024
+// <coeffDescrip> 6t_64p_LanczosEd_p_0.7_p_10qb_
+// <num_taps> 6
+// <num_phases> 64
+// <scale_ratio> input/output = 0.700000000000
+// <CoefType> LanczosEd
+// <CoefQuant> S1.10
+//========================================================
+static const uint16_t easf_filter_6tap_64p_ratio_0_70[198] = {
+ 0x0FC0, 0x00DA, 0x02CC, 0x00DA, 0x0FC0, 0x0000,
+ 0x0FC1, 0x00D0, 0x02CC, 0x00E4, 0x0FBF, 0x0000,
+ 0x0FC2, 0x00C6, 0x02CB, 0x00EF, 0x0FBE, 0x0000,
+ 0x0FC3, 0x00BC, 0x02CA, 0x00F9, 0x0FBE, 0x0000,
+ 0x0FC4, 0x00B2, 0x02C9, 0x0104, 0x0FBD, 0x0000,
+ 0x0FC5, 0x00A8, 0x02C7, 0x010F, 0x0FBD, 0x0000,
+ 0x0FC7, 0x009F, 0x02C5, 0x0119, 0x0FBC, 0x0000,
+ 0x0FC8, 0x0095, 0x02C3, 0x0124, 0x0FBC, 0x0000,
+ 0x0FC9, 0x008C, 0x02C0, 0x012F, 0x0FBC, 0x0000,
+ 0x0FCB, 0x0083, 0x02BD, 0x0139, 0x0FBC, 0x0000,
+ 0x0FCC, 0x007A, 0x02BA, 0x0144, 0x0FBC, 0x0000,
+ 0x0FCE, 0x0072, 0x02B6, 0x014D, 0x0FBD, 0x0000,
+ 0x0FD0, 0x0069, 0x02B2, 0x0159, 0x0FBD, 0x0FFF,
+ 0x0FD1, 0x0061, 0x02AD, 0x0164, 0x0FBE, 0x0FFF,
+ 0x0FD3, 0x0059, 0x02A9, 0x016E, 0x0FBF, 0x0FFE,
+ 0x0FD4, 0x0051, 0x02A4, 0x017A, 0x0FBF, 0x0FFE,
+ 0x0FD6, 0x0049, 0x029E, 0x0184, 0x0FC1, 0x0FFE,
+ 0x0FD8, 0x0042, 0x0299, 0x018E, 0x0FC2, 0x0FFD,
+ 0x0FD9, 0x003A, 0x0293, 0x019B, 0x0FC3, 0x0FFC,
+ 0x0FDB, 0x0033, 0x028D, 0x01A4, 0x0FC5, 0x0FFC,
+ 0x0FDC, 0x002D, 0x0286, 0x01AF, 0x0FC7, 0x0FFB,
+ 0x0FDE, 0x0026, 0x0280, 0x01BA, 0x0FC8, 0x0FFA,
+ 0x0FE0, 0x001F, 0x0279, 0x01C4, 0x0FCB, 0x0FF9,
+ 0x0FE1, 0x0019, 0x0272, 0x01CE, 0x0FCD, 0x0FF9,
+ 0x0FE3, 0x0013, 0x026A, 0x01D9, 0x0FCF, 0x0FF8,
+ 0x0FE4, 0x000D, 0x0263, 0x01E3, 0x0FD2, 0x0FF7,
+ 0x0FE6, 0x0008, 0x025B, 0x01EC, 0x0FD5, 0x0FF6,
+ 0x0FE7, 0x0002, 0x0253, 0x01F7, 0x0FD8, 0x0FF5,
+ 0x0FE9, 0x0FFD, 0x024A, 0x0202, 0x0FDB, 0x0FF3,
+ 0x0FEA, 0x0FF8, 0x0242, 0x020B, 0x0FDF, 0x0FF2,
+ 0x0FEC, 0x0FF3, 0x0239, 0x0215, 0x0FE2, 0x0FF1,
+ 0x0FED, 0x0FEF, 0x0230, 0x021E, 0x0FE6, 0x0FF0,
+ 0x0FEF, 0x0FEB, 0x0226, 0x0226, 0x0FEB, 0x0FEF,
+};
+
+//========================================================
+// <using> gen_scaler_coeffs_cnf_file.m
+// <using> make_test_script.m
+// <date> 02-Apr-2024
+// <coeffDescrip> 6t_64p_LanczosEd_p_0.8_p_10qb_
+// <num_taps> 6
+// <num_phases> 64
+// <scale_ratio> input/output = 0.800000000000
+// <CoefType> LanczosEd
+// <CoefQuant> S1.10
+//========================================================
+static const uint16_t easf_filter_6tap_64p_ratio_0_80[198] = {
+ 0x0FBF, 0x00A1, 0x0340, 0x00A1, 0x0FBF, 0x0000,
+ 0x0FC1, 0x0095, 0x0340, 0x00AD, 0x0FBC, 0x0001,
+ 0x0FC4, 0x0089, 0x033E, 0x00BA, 0x0FBA, 0x0001,
+ 0x0FC6, 0x007D, 0x033D, 0x00C6, 0x0FB8, 0x0002,
+ 0x0FC9, 0x0072, 0x033A, 0x00D3, 0x0FB6, 0x0002,
+ 0x0FCC, 0x0067, 0x0338, 0x00DF, 0x0FB3, 0x0003,
+ 0x0FCE, 0x005C, 0x0334, 0x00EE, 0x0FB1, 0x0003,
+ 0x0FD1, 0x0051, 0x0331, 0x00FA, 0x0FAF, 0x0004,
+ 0x0FD3, 0x0047, 0x032D, 0x0108, 0x0FAD, 0x0004,
+ 0x0FD6, 0x003D, 0x0328, 0x0116, 0x0FAB, 0x0004,
+ 0x0FD8, 0x0033, 0x0323, 0x0123, 0x0FAA, 0x0005,
+ 0x0FDB, 0x002A, 0x031D, 0x0131, 0x0FA8, 0x0005,
+ 0x0FDD, 0x0021, 0x0317, 0x013F, 0x0FA7, 0x0005,
+ 0x0FDF, 0x0018, 0x0311, 0x014D, 0x0FA5, 0x0006,
+ 0x0FE2, 0x0010, 0x030A, 0x015A, 0x0FA4, 0x0006,
+ 0x0FE4, 0x0008, 0x0302, 0x0169, 0x0FA3, 0x0006,
+ 0x0FE6, 0x0000, 0x02FB, 0x0177, 0x0FA2, 0x0006,
+ 0x0FE8, 0x0FF9, 0x02F3, 0x0185, 0x0FA1, 0x0006,
+ 0x0FEB, 0x0FF1, 0x02EA, 0x0193, 0x0FA1, 0x0006,
+ 0x0FED, 0x0FEB, 0x02E1, 0x01A1, 0x0FA0, 0x0006,
+ 0x0FEE, 0x0FE4, 0x02D8, 0x01B0, 0x0FA0, 0x0006,
+ 0x0FF0, 0x0FDE, 0x02CE, 0x01BE, 0x0FA0, 0x0006,
+ 0x0FF2, 0x0FD8, 0x02C5, 0x01CB, 0x0FA0, 0x0006,
+ 0x0FF4, 0x0FD3, 0x02BA, 0x01D8, 0x0FA1, 0x0006,
+ 0x0FF6, 0x0FCD, 0x02B0, 0x01E7, 0x0FA1, 0x0005,
+ 0x0FF7, 0x0FC8, 0x02A5, 0x01F5, 0x0FA2, 0x0005,
+ 0x0FF9, 0x0FC4, 0x029A, 0x0202, 0x0FA3, 0x0004,
+ 0x0FFA, 0x0FC0, 0x028E, 0x0210, 0x0FA4, 0x0004,
+ 0x0FFB, 0x0FBC, 0x0283, 0x021D, 0x0FA6, 0x0003,
+ 0x0FFD, 0x0FB8, 0x0276, 0x022A, 0x0FA8, 0x0003,
+ 0x0FFE, 0x0FB4, 0x026B, 0x0237, 0x0FAA, 0x0002,
+ 0x0FFF, 0x0FB1, 0x025E, 0x0245, 0x0FAC, 0x0001,
+ 0x0000, 0x0FAE, 0x0252, 0x0252, 0x0FAE, 0x0000,
+};
+
+//========================================================
+// <using> gen_scaler_coeffs_cnf_file.m
+// <using> make_test_script.m
+// <date> 02-Apr-2024
+// <coeffDescrip> 6t_64p_LanczosEd_p_0.9_p_10qb_
+// <num_taps> 6
+// <num_phases> 64
+// <scale_ratio> input/output = 0.900000000000
+// <CoefType> LanczosEd
+// <CoefQuant> S1.10
+//========================================================
+static const uint16_t easf_filter_6tap_64p_ratio_0_90[198] = {
+ 0x0FD8, 0x0055, 0x03A7, 0x0054, 0x0FD8, 0x0000,
+ 0x0FDB, 0x0047, 0x03A7, 0x0063, 0x0FD4, 0x0000,
+ 0x0FDF, 0x003B, 0x03A5, 0x006F, 0x0FD1, 0x0001,
+ 0x0FE2, 0x002E, 0x03A3, 0x007E, 0x0FCD, 0x0002,
+ 0x0FE5, 0x0022, 0x03A0, 0x008D, 0x0FCA, 0x0002,
+ 0x0FE8, 0x0017, 0x039D, 0x009B, 0x0FC6, 0x0003,
+ 0x0FEB, 0x000C, 0x0398, 0x00AC, 0x0FC2, 0x0003,
+ 0x0FEE, 0x0001, 0x0394, 0x00BA, 0x0FBF, 0x0004,
+ 0x0FF1, 0x0FF7, 0x038E, 0x00CA, 0x0FBB, 0x0005,
+ 0x0FF4, 0x0FED, 0x0388, 0x00DA, 0x0FB8, 0x0005,
+ 0x0FF6, 0x0FE4, 0x0381, 0x00EB, 0x0FB4, 0x0006,
+ 0x0FF9, 0x0FDB, 0x037A, 0x00FA, 0x0FB1, 0x0007,
+ 0x0FFB, 0x0FD3, 0x0372, 0x010B, 0x0FAD, 0x0008,
+ 0x0FFD, 0x0FCB, 0x0369, 0x011D, 0x0FAA, 0x0008,
+ 0x0000, 0x0FC3, 0x0360, 0x012E, 0x0FA6, 0x0009,
+ 0x0002, 0x0FBC, 0x0356, 0x013F, 0x0FA3, 0x000A,
+ 0x0003, 0x0FB6, 0x034C, 0x0150, 0x0FA0, 0x000B,
+ 0x0005, 0x0FB0, 0x0341, 0x0162, 0x0F9D, 0x000B,
+ 0x0007, 0x0FAA, 0x0336, 0x0173, 0x0F9A, 0x000C,
+ 0x0008, 0x0FA5, 0x032A, 0x0185, 0x0F97, 0x000D,
+ 0x000A, 0x0FA0, 0x031E, 0x0197, 0x0F94, 0x000D,
+ 0x000B, 0x0F9B, 0x0311, 0x01A9, 0x0F92, 0x000E,
+ 0x000C, 0x0F97, 0x0303, 0x01BC, 0x0F8F, 0x000F,
+ 0x000D, 0x0F94, 0x02F6, 0x01CD, 0x0F8D, 0x000F,
+ 0x000E, 0x0F91, 0x02E8, 0x01DE, 0x0F8B, 0x0010,
+ 0x000F, 0x0F8E, 0x02D9, 0x01F1, 0x0F89, 0x0010,
+ 0x0010, 0x0F8B, 0x02CA, 0x0202, 0x0F88, 0x0011,
+ 0x0010, 0x0F89, 0x02BB, 0x0214, 0x0F87, 0x0011,
+ 0x0011, 0x0F87, 0x02AB, 0x0226, 0x0F86, 0x0011,
+ 0x0011, 0x0F86, 0x029C, 0x0236, 0x0F85, 0x0012,
+ 0x0011, 0x0F85, 0x028B, 0x0249, 0x0F84, 0x0012,
+ 0x0012, 0x0F84, 0x027B, 0x0259, 0x0F84, 0x0012,
+ 0x0012, 0x0F84, 0x026A, 0x026A, 0x0F84, 0x0012,
+};
+
+//========================================================
+// <using> gen_scaler_coeffs_cnf_file.m
+// <using> make_test_script.m
+// <date> 02-Apr-2024
+// <coeffDescrip> 6t_64p_LanczosEd_p_1_p_10qb_
+// <num_taps> 6
+// <num_phases> 64
+// <scale_ratio> input/output = 1.000000000000
+// <CoefType> LanczosEd
+// <CoefQuant> S1.10
+//========================================================
+static const uint16_t easf_filter_6tap_64p_ratio_1_00[198] = {
+ 0x0000, 0x0000, 0x0400, 0x0000, 0x0000, 0x0000,
+ 0x0003, 0x0FF3, 0x0400, 0x000D, 0x0FFD, 0x0000,
+ 0x0006, 0x0FE7, 0x03FE, 0x001C, 0x0FF9, 0x0000,
+ 0x0009, 0x0FDB, 0x03FC, 0x002B, 0x0FF5, 0x0000,
+ 0x000C, 0x0FD0, 0x03F9, 0x003A, 0x0FF1, 0x0000,
+ 0x000E, 0x0FC5, 0x03F5, 0x004A, 0x0FED, 0x0001,
+ 0x0011, 0x0FBB, 0x03F0, 0x005A, 0x0FE9, 0x0001,
+ 0x0013, 0x0FB2, 0x03EB, 0x006A, 0x0FE5, 0x0001,
+ 0x0015, 0x0FA9, 0x03E4, 0x007B, 0x0FE1, 0x0002,
+ 0x0017, 0x0FA1, 0x03DD, 0x008D, 0x0FDC, 0x0002,
+ 0x0018, 0x0F99, 0x03D4, 0x00A0, 0x0FD8, 0x0003,
+ 0x001A, 0x0F92, 0x03CB, 0x00B2, 0x0FD3, 0x0004,
+ 0x001B, 0x0F8C, 0x03C1, 0x00C6, 0x0FCE, 0x0004,
+ 0x001C, 0x0F86, 0x03B7, 0x00D9, 0x0FC9, 0x0005,
+ 0x001D, 0x0F80, 0x03AB, 0x00EE, 0x0FC4, 0x0006,
+ 0x001E, 0x0F7C, 0x039F, 0x0101, 0x0FBF, 0x0007,
+ 0x001F, 0x0F78, 0x0392, 0x0115, 0x0FBA, 0x0008,
+ 0x001F, 0x0F74, 0x0385, 0x012B, 0x0FB5, 0x0008,
+ 0x0020, 0x0F71, 0x0376, 0x0140, 0x0FB0, 0x0009,
+ 0x0020, 0x0F6E, 0x0367, 0x0155, 0x0FAB, 0x000B,
+ 0x0020, 0x0F6C, 0x0357, 0x016B, 0x0FA6, 0x000C,
+ 0x0020, 0x0F6A, 0x0347, 0x0180, 0x0FA2, 0x000D,
+ 0x0020, 0x0F69, 0x0336, 0x0196, 0x0F9D, 0x000E,
+ 0x0020, 0x0F69, 0x0325, 0x01AB, 0x0F98, 0x000F,
+ 0x001F, 0x0F68, 0x0313, 0x01C3, 0x0F93, 0x0010,
+ 0x001F, 0x0F69, 0x0300, 0x01D8, 0x0F8F, 0x0011,
+ 0x001E, 0x0F69, 0x02ED, 0x01EF, 0x0F8B, 0x0012,
+ 0x001D, 0x0F6A, 0x02D9, 0x0205, 0x0F87, 0x0014,
+ 0x001D, 0x0F6C, 0x02C5, 0x021A, 0x0F83, 0x0015,
+ 0x001C, 0x0F6E, 0x02B1, 0x0230, 0x0F7F, 0x0016,
+ 0x001B, 0x0F70, 0x029C, 0x0247, 0x0F7B, 0x0017,
+ 0x001A, 0x0F72, 0x0287, 0x025D, 0x0F78, 0x0018,
+ 0x0019, 0x0F75, 0x0272, 0x0272, 0x0F75, 0x0019,
+};
+
+/* Converted scaler coeff tables from S1.10 to S1.12 */
+static uint16_t easf_filter_3tap_64p_ratio_0_30_s1_12[99];
+static uint16_t easf_filter_3tap_64p_ratio_0_40_s1_12[99];
+static uint16_t easf_filter_3tap_64p_ratio_0_50_s1_12[99];
+static uint16_t easf_filter_3tap_64p_ratio_0_60_s1_12[99];
+static uint16_t easf_filter_3tap_64p_ratio_0_70_s1_12[99];
+static uint16_t easf_filter_3tap_64p_ratio_0_80_s1_12[99];
+static uint16_t easf_filter_3tap_64p_ratio_0_90_s1_12[99];
+static uint16_t easf_filter_3tap_64p_ratio_1_00_s1_12[99];
+static uint16_t easf_filter_4tap_64p_ratio_0_30_s1_12[132];
+static uint16_t easf_filter_4tap_64p_ratio_0_40_s1_12[132];
+static uint16_t easf_filter_4tap_64p_ratio_0_50_s1_12[132];
+static uint16_t easf_filter_4tap_64p_ratio_0_60_s1_12[132];
+static uint16_t easf_filter_4tap_64p_ratio_0_70_s1_12[132];
+static uint16_t easf_filter_4tap_64p_ratio_0_80_s1_12[132];
+static uint16_t easf_filter_4tap_64p_ratio_0_90_s1_12[132];
+static uint16_t easf_filter_4tap_64p_ratio_1_00_s1_12[132];
+static uint16_t easf_filter_6tap_64p_ratio_0_30_s1_12[198];
+static uint16_t easf_filter_6tap_64p_ratio_0_40_s1_12[198];
+static uint16_t easf_filter_6tap_64p_ratio_0_50_s1_12[198];
+static uint16_t easf_filter_6tap_64p_ratio_0_60_s1_12[198];
+static uint16_t easf_filter_6tap_64p_ratio_0_70_s1_12[198];
+static uint16_t easf_filter_6tap_64p_ratio_0_80_s1_12[198];
+static uint16_t easf_filter_6tap_64p_ratio_0_90_s1_12[198];
+static uint16_t easf_filter_6tap_64p_ratio_1_00_s1_12[198];
+
+struct scale_ratio_to_reg_value_lookup easf_v_bf3_mode_lookup[] = {
+ {3, 10, 0x0000},
+ {4, 10, 0x0000},
+ {5, 10, 0x0000},
+ {6, 10, 0x0000},
+ {7, 10, 0x0000},
+ {8, 10, 0x0000},
+ {9, 10, 0x0000},
+ {1, 1, 0x0000},
+ {-1, -1, 0x0002},
+};
+
+struct scale_ratio_to_reg_value_lookup easf_h_bf3_mode_lookup[] = {
+ {3, 10, 0x0000},
+ {4, 10, 0x0000},
+ {5, 10, 0x0000},
+ {6, 10, 0x0000},
+ {7, 10, 0x0000},
+ {8, 10, 0x0000},
+ {9, 10, 0x0000},
+ {1, 1, 0x0000},
+ {-1, -1, 0x0002},
+};
+
+struct scale_ratio_to_reg_value_lookup easf_reducer_gain6_6tap_lookup[] = {
+ {3, 10, 0x4100},
+ {4, 10, 0x4100},
+ {5, 10, 0x4100},
+ {6, 10, 0x4100},
+ {7, 10, 0x4100},
+ {8, 10, 0x4100},
+ {9, 10, 0x4100},
+ {1, 1, 0x4100},
+ {-1, -1, 0x4100},
+};
+
+struct scale_ratio_to_reg_value_lookup easf_reducer_gain4_6tap_lookup[] = {
+ {3, 10, 0x4000},
+ {4, 10, 0x4000},
+ {5, 10, 0x4000},
+ {6, 10, 0x4000},
+ {7, 10, 0x4000},
+ {8, 10, 0x4000},
+ {9, 10, 0x4000},
+ {1, 1, 0x4000},
+ {-1, -1, 0x4000},
+};
+
+struct scale_ratio_to_reg_value_lookup easf_gain_ring6_6tap_lookup[] = {
+ {3, 10, 0x0000},
+ {4, 10, 0x251F},
+ {5, 10, 0x291F},
+ {6, 10, 0xA51F},
+ {7, 10, 0xA51F},
+ {8, 10, 0xAA66},
+ {9, 10, 0xA51F},
+ {1, 1, 0xA640},
+ {-1, -1, 0xA640},
+};
+
+struct scale_ratio_to_reg_value_lookup easf_gain_ring4_6tap_lookup[] = {
+ {3, 10, 0x0000},
+ {4, 10, 0x9600},
+ {5, 10, 0xA460},
+ {6, 10, 0xA8E0},
+ {7, 10, 0xAC00},
+ {8, 10, 0xAD20},
+ {9, 10, 0xAFC0},
+ {1, 1, 0xB058},
+ {-1, -1, 0xB058},
+};
+
+struct scale_ratio_to_reg_value_lookup easf_reducer_gain6_4tap_lookup[] = {
+ {3, 10, 0x4100},
+ {4, 10, 0x4100},
+ {5, 10, 0x4100},
+ {6, 10, 0x4100},
+ {7, 10, 0x4100},
+ {8, 10, 0x4100},
+ {9, 10, 0x4100},
+ {1, 1, 0x4100},
+ {-1, -1, 0x4100},
+};
+
+struct scale_ratio_to_reg_value_lookup easf_reducer_gain4_4tap_lookup[] = {
+ {3, 10, 0x4000},
+ {4, 10, 0x4000},
+ {5, 10, 0x4000},
+ {6, 10, 0x4000},
+ {7, 10, 0x4000},
+ {8, 10, 0x4000},
+ {9, 10, 0x4000},
+ {1, 1, 0x4000},
+ {-1, -1, 0x4000},
+};
+
+struct scale_ratio_to_reg_value_lookup easf_gain_ring6_4tap_lookup[] = {
+ {3, 10, 0x0000},
+ {4, 10, 0x0000},
+ {5, 10, 0x0000},
+ {6, 10, 0x0000},
+ {7, 10, 0x0000},
+ {8, 10, 0x0000},
+ {9, 10, 0x0000},
+ {1, 1, 0x0000},
+ {-1, -1, 0x0000},
+};
+
+struct scale_ratio_to_reg_value_lookup easf_gain_ring4_4tap_lookup[] = {
+ {3, 10, 0x0000},
+ {4, 10, 0x0000},
+ {5, 10, 0x0000},
+ {6, 10, 0x9900},
+ {7, 10, 0xA100},
+ {8, 10, 0xA8C0},
+ {9, 10, 0xAB20},
+ {1, 1, 0xAC00},
+ {-1, -1, 0xAC00},
+};
+
+struct scale_ratio_to_reg_value_lookup easf_3tap_dntilt_uptilt_offset_lookup[] = {
+ {3, 10, 0x0000},
+ {4, 10, 0x0000},
+ {5, 10, 0x0000},
+ {6, 10, 0x0000},
+ {7, 10, 0x0000},
+ {8, 10, 0x4100},
+ {9, 10, 0x9F00},
+ {1, 1, 0xA4C0},
+ {-1, -1, 0xA8D8},
+};
+
+struct scale_ratio_to_reg_value_lookup easf_3tap_uptilt_maxval_lookup[] = {
+ {3, 10, 0x0000},
+ {4, 10, 0x0000},
+ {5, 10, 0x0000},
+ {6, 10, 0x0000},
+ {7, 10, 0x0000},
+ {8, 10, 0x4000},
+ {9, 10, 0x24FE},
+ {1, 1, 0x2D64},
+ {-1, -1, 0x3ADB},
+};
+
+struct scale_ratio_to_reg_value_lookup easf_3tap_dntilt_slope_lookup[] = {
+ {3, 10, 0x3800},
+ {4, 10, 0x3800},
+ {5, 10, 0x3800},
+ {6, 10, 0x3800},
+ {7, 10, 0x3800},
+ {8, 10, 0x3886},
+ {9, 10, 0x3940},
+ {1, 1, 0x3A4E},
+ {-1, -1, 0x3B66},
+};
+
+struct scale_ratio_to_reg_value_lookup easf_3tap_uptilt1_slope_lookup[] = {
+ {3, 10, 0x3800},
+ {4, 10, 0x3800},
+ {5, 10, 0x3800},
+ {6, 10, 0x3800},
+ {7, 10, 0x3800},
+ {8, 10, 0x36F4},
+ {9, 10, 0x359C},
+ {1, 1, 0x3360},
+ {-1, -1, 0x2F20},
+};
+
+struct scale_ratio_to_reg_value_lookup easf_3tap_uptilt2_slope_lookup[] = {
+ {3, 10, 0x0000},
+ {4, 10, 0x0000},
+ {5, 10, 0x0000},
+ {6, 10, 0x0000},
+ {7, 10, 0x0000},
+ {8, 10, 0x0000},
+ {9, 10, 0x359C},
+ {1, 1, 0x31F0},
+ {-1, -1, 0x1F00},
+};
+
+struct scale_ratio_to_reg_value_lookup easf_3tap_uptilt2_offset_lookup[] = {
+ {3, 10, 0x0000},
+ {4, 10, 0x0000},
+ {5, 10, 0x0000},
+ {6, 10, 0x0000},
+ {7, 10, 0x0000},
+ {8, 10, 0x0000},
+ {9, 10, 0x9F00},
+ {1, 1, 0xA400},
+ {-1, -1, 0x9E00},
+};
+
+void spl_init_easf_filter_coeffs(void)
+{
+ convert_filter_s1_10_to_s1_12(easf_filter_3tap_64p_ratio_0_30,
+ easf_filter_3tap_64p_ratio_0_30_s1_12, 3);
+ convert_filter_s1_10_to_s1_12(easf_filter_3tap_64p_ratio_0_40,
+ easf_filter_3tap_64p_ratio_0_40_s1_12, 3);
+ convert_filter_s1_10_to_s1_12(easf_filter_3tap_64p_ratio_0_50,
+ easf_filter_3tap_64p_ratio_0_50_s1_12, 3);
+ convert_filter_s1_10_to_s1_12(easf_filter_3tap_64p_ratio_0_60,
+ easf_filter_3tap_64p_ratio_0_60_s1_12, 3);
+ convert_filter_s1_10_to_s1_12(easf_filter_3tap_64p_ratio_0_70,
+ easf_filter_3tap_64p_ratio_0_70_s1_12, 3);
+ convert_filter_s1_10_to_s1_12(easf_filter_3tap_64p_ratio_0_80,
+ easf_filter_3tap_64p_ratio_0_80_s1_12, 3);
+ convert_filter_s1_10_to_s1_12(easf_filter_3tap_64p_ratio_0_90,
+ easf_filter_3tap_64p_ratio_0_90_s1_12, 3);
+ convert_filter_s1_10_to_s1_12(easf_filter_3tap_64p_ratio_1_00,
+ easf_filter_3tap_64p_ratio_1_00_s1_12, 3);
+
+ convert_filter_s1_10_to_s1_12(easf_filter_4tap_64p_ratio_0_30,
+ easf_filter_4tap_64p_ratio_0_30_s1_12, 4);
+ convert_filter_s1_10_to_s1_12(easf_filter_4tap_64p_ratio_0_40,
+ easf_filter_4tap_64p_ratio_0_40_s1_12, 4);
+ convert_filter_s1_10_to_s1_12(easf_filter_4tap_64p_ratio_0_50,
+ easf_filter_4tap_64p_ratio_0_50_s1_12, 4);
+ convert_filter_s1_10_to_s1_12(easf_filter_4tap_64p_ratio_0_60,
+ easf_filter_4tap_64p_ratio_0_60_s1_12, 4);
+ convert_filter_s1_10_to_s1_12(easf_filter_4tap_64p_ratio_0_70,
+ easf_filter_4tap_64p_ratio_0_70_s1_12, 4);
+ convert_filter_s1_10_to_s1_12(easf_filter_4tap_64p_ratio_0_80,
+ easf_filter_4tap_64p_ratio_0_80_s1_12, 4);
+ convert_filter_s1_10_to_s1_12(easf_filter_4tap_64p_ratio_0_90,
+ easf_filter_4tap_64p_ratio_0_90_s1_12, 4);
+ convert_filter_s1_10_to_s1_12(easf_filter_4tap_64p_ratio_1_00,
+ easf_filter_4tap_64p_ratio_1_00_s1_12, 4);
+
+ convert_filter_s1_10_to_s1_12(easf_filter_6tap_64p_ratio_0_30,
+ easf_filter_6tap_64p_ratio_0_30_s1_12, 6);
+ convert_filter_s1_10_to_s1_12(easf_filter_6tap_64p_ratio_0_40,
+ easf_filter_6tap_64p_ratio_0_40_s1_12, 6);
+ convert_filter_s1_10_to_s1_12(easf_filter_6tap_64p_ratio_0_50,
+ easf_filter_6tap_64p_ratio_0_50_s1_12, 6);
+ convert_filter_s1_10_to_s1_12(easf_filter_6tap_64p_ratio_0_60,
+ easf_filter_6tap_64p_ratio_0_60_s1_12, 6);
+ convert_filter_s1_10_to_s1_12(easf_filter_6tap_64p_ratio_0_70,
+ easf_filter_6tap_64p_ratio_0_70_s1_12, 6);
+ convert_filter_s1_10_to_s1_12(easf_filter_6tap_64p_ratio_0_80,
+ easf_filter_6tap_64p_ratio_0_80_s1_12, 6);
+ convert_filter_s1_10_to_s1_12(easf_filter_6tap_64p_ratio_0_90,
+ easf_filter_6tap_64p_ratio_0_90_s1_12, 6);
+ convert_filter_s1_10_to_s1_12(easf_filter_6tap_64p_ratio_1_00,
+ easf_filter_6tap_64p_ratio_1_00_s1_12, 6);
+}
+
+uint16_t *spl_get_easf_filter_3tap_64p(struct spl_fixed31_32 ratio)
+{
+ if (ratio.value < spl_fixpt_from_fraction(3, 10).value)
+ return easf_filter_3tap_64p_ratio_0_30_s1_12;
+ else if (ratio.value < spl_fixpt_from_fraction(4, 10).value)
+ return easf_filter_3tap_64p_ratio_0_40_s1_12;
+ else if (ratio.value < spl_fixpt_from_fraction(5, 10).value)
+ return easf_filter_3tap_64p_ratio_0_50_s1_12;
+ else if (ratio.value < spl_fixpt_from_fraction(6, 10).value)
+ return easf_filter_3tap_64p_ratio_0_60_s1_12;
+ else if (ratio.value < spl_fixpt_from_fraction(7, 10).value)
+ return easf_filter_3tap_64p_ratio_0_70_s1_12;
+ else if (ratio.value < spl_fixpt_from_fraction(8, 10).value)
+ return easf_filter_3tap_64p_ratio_0_80_s1_12;
+ else if (ratio.value < spl_fixpt_from_fraction(9, 10).value)
+ return easf_filter_3tap_64p_ratio_0_90_s1_12;
+ else
+ return easf_filter_3tap_64p_ratio_1_00_s1_12;
+}
+
+uint16_t *spl_get_easf_filter_4tap_64p(struct spl_fixed31_32 ratio)
+{
+ if (ratio.value < spl_fixpt_from_fraction(3, 10).value)
+ return easf_filter_4tap_64p_ratio_0_30_s1_12;
+ else if (ratio.value < spl_fixpt_from_fraction(4, 10).value)
+ return easf_filter_4tap_64p_ratio_0_40_s1_12;
+ else if (ratio.value < spl_fixpt_from_fraction(5, 10).value)
+ return easf_filter_4tap_64p_ratio_0_50_s1_12;
+ else if (ratio.value < spl_fixpt_from_fraction(6, 10).value)
+ return easf_filter_4tap_64p_ratio_0_60_s1_12;
+ else if (ratio.value < spl_fixpt_from_fraction(7, 10).value)
+ return easf_filter_4tap_64p_ratio_0_70_s1_12;
+ else if (ratio.value < spl_fixpt_from_fraction(8, 10).value)
+ return easf_filter_4tap_64p_ratio_0_80_s1_12;
+ else if (ratio.value < spl_fixpt_from_fraction(9, 10).value)
+ return easf_filter_4tap_64p_ratio_0_90_s1_12;
+ else
+ return easf_filter_4tap_64p_ratio_1_00_s1_12;
+}
+
+uint16_t *spl_get_easf_filter_6tap_64p(struct spl_fixed31_32 ratio)
+{
+ if (ratio.value < spl_fixpt_from_fraction(3, 10).value)
+ return easf_filter_6tap_64p_ratio_0_30_s1_12;
+ else if (ratio.value < spl_fixpt_from_fraction(4, 10).value)
+ return easf_filter_6tap_64p_ratio_0_40_s1_12;
+ else if (ratio.value < spl_fixpt_from_fraction(5, 10).value)
+ return easf_filter_6tap_64p_ratio_0_50_s1_12;
+ else if (ratio.value < spl_fixpt_from_fraction(6, 10).value)
+ return easf_filter_6tap_64p_ratio_0_60_s1_12;
+ else if (ratio.value < spl_fixpt_from_fraction(7, 10).value)
+ return easf_filter_6tap_64p_ratio_0_70_s1_12;
+ else if (ratio.value < spl_fixpt_from_fraction(8, 10).value)
+ return easf_filter_6tap_64p_ratio_0_80_s1_12;
+ else if (ratio.value < spl_fixpt_from_fraction(9, 10).value)
+ return easf_filter_6tap_64p_ratio_0_90_s1_12;
+ else
+ return easf_filter_6tap_64p_ratio_1_00_s1_12;
+}
+
+uint16_t *spl_dscl_get_easf_filter_coeffs_64p(int taps, struct spl_fixed31_32 ratio)
+{
+ if (taps == 6)
+ return spl_get_easf_filter_6tap_64p(ratio);
+ else if (taps == 4)
+ return spl_get_easf_filter_4tap_64p(ratio);
+ else if (taps == 3)
+ return spl_get_easf_filter_3tap_64p(ratio);
+ else {
+ /* should never happen, bug */
+ SPL_BREAK_TO_DEBUGGER();
+ return NULL;
+ }
+}
+
+void spl_set_filters_data(struct dscl_prog_data *dscl_prog_data,
+ const struct spl_scaler_data *data, bool enable_easf_v,
+ bool enable_easf_h)
+{
+ /*
+ * Old coefficients calculated scaling ratio = input / output
+ * New coefficients are calculated based on = output / input
+ */
+ if (enable_easf_h) {
+ dscl_prog_data->filter_h = spl_dscl_get_easf_filter_coeffs_64p(
+ data->taps.h_taps, data->recip_ratios.horz);
+
+ dscl_prog_data->filter_h_c = spl_dscl_get_easf_filter_coeffs_64p(
+ data->taps.h_taps_c, data->recip_ratios.horz_c);
+ } else {
+ dscl_prog_data->filter_h = spl_dscl_get_filter_coeffs_64p(
+ data->taps.h_taps, data->ratios.horz);
+
+ dscl_prog_data->filter_h_c = spl_dscl_get_filter_coeffs_64p(
+ data->taps.h_taps_c, data->ratios.horz_c);
+ }
+ if (enable_easf_v) {
+ dscl_prog_data->filter_v = spl_dscl_get_easf_filter_coeffs_64p(
+ data->taps.v_taps, data->recip_ratios.vert);
+
+ dscl_prog_data->filter_v_c = spl_dscl_get_easf_filter_coeffs_64p(
+ data->taps.v_taps_c, data->recip_ratios.vert_c);
+ } else {
+ dscl_prog_data->filter_v = spl_dscl_get_filter_coeffs_64p(
+ data->taps.v_taps, data->ratios.vert);
+
+ dscl_prog_data->filter_v_c = spl_dscl_get_filter_coeffs_64p(
+ data->taps.v_taps_c, data->ratios.vert_c);
+ }
+}
+
+static uint32_t spl_easf_get_scale_ratio_to_reg_value(struct spl_fixed31_32 ratio,
+ struct scale_ratio_to_reg_value_lookup *lookup_table_base_ptr,
+ unsigned int num_entries)
+{
+ unsigned int count = 0;
+ uint32_t value = 0;
+ struct scale_ratio_to_reg_value_lookup *lookup_table_index_ptr;
+
+ lookup_table_index_ptr = (lookup_table_base_ptr + num_entries - 1);
+ value = lookup_table_index_ptr->reg_value;
+
+ while (count < num_entries) {
+
+ lookup_table_index_ptr = (lookup_table_base_ptr + count);
+ if (lookup_table_index_ptr->numer < 0)
+ break;
+
+ if (ratio.value < spl_fixpt_from_fraction(
+ lookup_table_index_ptr->numer,
+ lookup_table_index_ptr->denom).value) {
+ value = lookup_table_index_ptr->reg_value;
+ break;
+ }
+
+ count++;
+ }
+ return value;
+}
+uint32_t spl_get_v_bf3_mode(struct spl_fixed31_32 ratio)
+{
+ uint32_t value;
+ unsigned int num_entries = sizeof(easf_v_bf3_mode_lookup) /
+ sizeof(struct scale_ratio_to_reg_value_lookup);
+ value = spl_easf_get_scale_ratio_to_reg_value(ratio,
+ easf_v_bf3_mode_lookup, num_entries);
+ return value;
+}
+uint32_t spl_get_h_bf3_mode(struct spl_fixed31_32 ratio)
+{
+ uint32_t value;
+ unsigned int num_entries = sizeof(easf_h_bf3_mode_lookup) /
+ sizeof(struct scale_ratio_to_reg_value_lookup);
+ value = spl_easf_get_scale_ratio_to_reg_value(ratio,
+ easf_h_bf3_mode_lookup, num_entries);
+ return value;
+}
+uint32_t spl_get_reducer_gain6(int taps, struct spl_fixed31_32 ratio)
+{
+ uint32_t value;
+ unsigned int num_entries;
+
+ if (taps == 4) {
+ num_entries = sizeof(easf_reducer_gain6_4tap_lookup) /
+ sizeof(struct scale_ratio_to_reg_value_lookup);
+ value = spl_easf_get_scale_ratio_to_reg_value(ratio,
+ easf_reducer_gain6_4tap_lookup, num_entries);
+ } else if (taps == 6) {
+ num_entries = sizeof(easf_reducer_gain6_6tap_lookup) /
+ sizeof(struct scale_ratio_to_reg_value_lookup);
+ value = spl_easf_get_scale_ratio_to_reg_value(ratio,
+ easf_reducer_gain6_6tap_lookup, num_entries);
+ } else
+ value = 0;
+ return value;
+}
+uint32_t spl_get_reducer_gain4(int taps, struct spl_fixed31_32 ratio)
+{
+ uint32_t value;
+ unsigned int num_entries;
+
+ if (taps == 4) {
+ num_entries = sizeof(easf_reducer_gain4_4tap_lookup) /
+ sizeof(struct scale_ratio_to_reg_value_lookup);
+ value = spl_easf_get_scale_ratio_to_reg_value(ratio,
+ easf_reducer_gain4_4tap_lookup, num_entries);
+ } else if (taps == 6) {
+ num_entries = sizeof(easf_reducer_gain4_6tap_lookup) /
+ sizeof(struct scale_ratio_to_reg_value_lookup);
+ value = spl_easf_get_scale_ratio_to_reg_value(ratio,
+ easf_reducer_gain4_6tap_lookup, num_entries);
+ } else
+ value = 0;
+ return value;
+}
+uint32_t spl_get_gainRing6(int taps, struct spl_fixed31_32 ratio)
+{
+ uint32_t value;
+ unsigned int num_entries;
+
+ if (taps == 4) {
+ num_entries = sizeof(easf_gain_ring6_4tap_lookup) /
+ sizeof(struct scale_ratio_to_reg_value_lookup);
+ value = spl_easf_get_scale_ratio_to_reg_value(ratio,
+ easf_gain_ring6_4tap_lookup, num_entries);
+ } else if (taps == 6) {
+ num_entries = sizeof(easf_gain_ring6_6tap_lookup) /
+ sizeof(struct scale_ratio_to_reg_value_lookup);
+ value = spl_easf_get_scale_ratio_to_reg_value(ratio,
+ easf_gain_ring6_6tap_lookup, num_entries);
+ } else
+ value = 0;
+ return value;
+}
+uint32_t spl_get_gainRing4(int taps, struct spl_fixed31_32 ratio)
+{
+ uint32_t value;
+ unsigned int num_entries;
+
+ if (taps == 4) {
+ num_entries = sizeof(easf_gain_ring4_4tap_lookup) /
+ sizeof(struct scale_ratio_to_reg_value_lookup);
+ value = spl_easf_get_scale_ratio_to_reg_value(ratio,
+ easf_gain_ring4_4tap_lookup, num_entries);
+ } else if (taps == 6) {
+ num_entries = sizeof(easf_gain_ring4_6tap_lookup) /
+ sizeof(struct scale_ratio_to_reg_value_lookup);
+ value = spl_easf_get_scale_ratio_to_reg_value(ratio,
+ easf_gain_ring4_6tap_lookup, num_entries);
+ } else
+ value = 0;
+ return value;
+}
+uint32_t spl_get_3tap_dntilt_uptilt_offset(int taps, struct spl_fixed31_32 ratio)
+{
+ uint32_t value;
+ unsigned int num_entries;
+
+ if (taps == 3) {
+ num_entries = sizeof(easf_3tap_dntilt_uptilt_offset_lookup) /
+ sizeof(struct scale_ratio_to_reg_value_lookup);
+ value = spl_easf_get_scale_ratio_to_reg_value(ratio,
+ easf_3tap_dntilt_uptilt_offset_lookup, num_entries);
+ } else
+ value = 0;
+ return value;
+}
+uint32_t spl_get_3tap_uptilt_maxval(int taps, struct spl_fixed31_32 ratio)
+{
+ uint32_t value;
+ unsigned int num_entries;
+
+ if (taps == 3) {
+ num_entries = sizeof(easf_3tap_uptilt_maxval_lookup) /
+ sizeof(struct scale_ratio_to_reg_value_lookup);
+ value = spl_easf_get_scale_ratio_to_reg_value(ratio,
+ easf_3tap_uptilt_maxval_lookup, num_entries);
+ } else
+ value = 0;
+ return value;
+}
+uint32_t spl_get_3tap_dntilt_slope(int taps, struct spl_fixed31_32 ratio)
+{
+ uint32_t value;
+ unsigned int num_entries;
+
+ if (taps == 3) {
+ num_entries = sizeof(easf_3tap_dntilt_slope_lookup) /
+ sizeof(struct scale_ratio_to_reg_value_lookup);
+ value = spl_easf_get_scale_ratio_to_reg_value(ratio,
+ easf_3tap_dntilt_slope_lookup, num_entries);
+ } else
+ value = 0;
+ return value;
+}
+uint32_t spl_get_3tap_uptilt1_slope(int taps, struct spl_fixed31_32 ratio)
+{
+ uint32_t value;
+ unsigned int num_entries;
+
+ if (taps == 3) {
+ num_entries = sizeof(easf_3tap_uptilt1_slope_lookup) /
+ sizeof(struct scale_ratio_to_reg_value_lookup);
+ value = spl_easf_get_scale_ratio_to_reg_value(ratio,
+ easf_3tap_uptilt1_slope_lookup, num_entries);
+ } else
+ value = 0;
+ return value;
+}
+uint32_t spl_get_3tap_uptilt2_slope(int taps, struct spl_fixed31_32 ratio)
+{
+ uint32_t value;
+ unsigned int num_entries;
+
+ if (taps == 3) {
+ num_entries = sizeof(easf_3tap_uptilt2_slope_lookup) /
+ sizeof(struct scale_ratio_to_reg_value_lookup);
+ value = spl_easf_get_scale_ratio_to_reg_value(ratio,
+ easf_3tap_uptilt2_slope_lookup, num_entries);
+ } else
+ value = 0;
+ return value;
+}
+uint32_t spl_get_3tap_uptilt2_offset(int taps, struct spl_fixed31_32 ratio)
+{
+ uint32_t value;
+ unsigned int num_entries;
+
+ if (taps == 3) {
+ num_entries = sizeof(easf_3tap_uptilt2_offset_lookup) /
+ sizeof(struct scale_ratio_to_reg_value_lookup);
+ value = spl_easf_get_scale_ratio_to_reg_value(ratio,
+ easf_3tap_uptilt2_offset_lookup, num_entries);
+ } else
+ value = 0;
+ return value;
+}
diff --git a/drivers/gpu/drm/amd/display/dc/spl/dc_spl_scl_easf_filters.h b/drivers/gpu/drm/amd/display/dc/spl/dc_spl_scl_easf_filters.h
new file mode 100644
index 000000000000..8bb2b8108e38
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/spl/dc_spl_scl_easf_filters.h
@@ -0,0 +1,38 @@
+/* SPDX-License-Identifier: MIT */
+
+/* Copyright 2024 Advanced Micro Devices, Inc. */
+
+#ifndef __DC_SPL_SCL_EASF_FILTERS_H__
+#define __DC_SPL_SCL_EASF_FILTERS_H__
+
+#include "dc_spl_types.h"
+
+struct scale_ratio_to_reg_value_lookup {
+ int numer;
+ int denom;
+ const uint32_t reg_value;
+};
+
+void spl_init_easf_filter_coeffs(void);
+uint16_t *spl_get_easf_filter_3tap_64p(struct spl_fixed31_32 ratio);
+uint16_t *spl_get_easf_filter_4tap_64p(struct spl_fixed31_32 ratio);
+uint16_t *spl_get_easf_filter_6tap_64p(struct spl_fixed31_32 ratio);
+uint16_t *spl_dscl_get_easf_filter_coeffs_64p(int taps, struct spl_fixed31_32 ratio);
+void spl_set_filters_data(struct dscl_prog_data *dscl_prog_data,
+ const struct spl_scaler_data *data, bool enable_easf_v,
+ bool enable_easf_h);
+
+uint32_t spl_get_v_bf3_mode(struct spl_fixed31_32 ratio);
+uint32_t spl_get_h_bf3_mode(struct spl_fixed31_32 ratio);
+uint32_t spl_get_reducer_gain6(int taps, struct spl_fixed31_32 ratio);
+uint32_t spl_get_reducer_gain4(int taps, struct spl_fixed31_32 ratio);
+uint32_t spl_get_gainRing6(int taps, struct spl_fixed31_32 ratio);
+uint32_t spl_get_gainRing4(int taps, struct spl_fixed31_32 ratio);
+uint32_t spl_get_3tap_dntilt_uptilt_offset(int taps, struct spl_fixed31_32 ratio);
+uint32_t spl_get_3tap_uptilt_maxval(int taps, struct spl_fixed31_32 ratio);
+uint32_t spl_get_3tap_dntilt_slope(int taps, struct spl_fixed31_32 ratio);
+uint32_t spl_get_3tap_uptilt1_slope(int taps, struct spl_fixed31_32 ratio);
+uint32_t spl_get_3tap_uptilt2_slope(int taps, struct spl_fixed31_32 ratio);
+uint32_t spl_get_3tap_uptilt2_offset(int taps, struct spl_fixed31_32 ratio);
+
+#endif /* __DC_SPL_SCL_EASF_FILTERS_H__ */
diff --git a/drivers/gpu/drm/amd/display/dc/spl/dc_spl_scl_filters.c b/drivers/gpu/drm/amd/display/dc/spl/dc_spl_scl_filters.c
index c174b2e8a150..b02c7b0b262b 100644
--- a/drivers/gpu/drm/amd/display/dc/spl/dc_spl_scl_filters.c
+++ b/drivers/gpu/drm/amd/display/dc/spl/dc_spl_scl_filters.c
@@ -2,7 +2,7 @@
//
// Copyright 2024 Advanced Micro Devices, Inc.
-#include "dc_spl_types.h"
+#include "spl_debug.h"
#include "dc_spl_scl_filters.h"
//=========================================
// <num_taps> = 2
@@ -1318,97 +1318,97 @@ static const uint16_t filter_8tap_64p_183[264] = {
0x3FD4, 0x3F84, 0x0214, 0x0694, 0x0694, 0x0214, 0x3F84, 0x3FD4
};
-const uint16_t *spl_get_filter_3tap_16p(struct fixed31_32 ratio)
+const uint16_t *spl_get_filter_3tap_16p(struct spl_fixed31_32 ratio)
{
- if (ratio.value < dc_fixpt_one.value)
+ if (ratio.value < spl_fixpt_one.value)
return filter_3tap_16p_upscale;
- else if (ratio.value < dc_fixpt_from_fraction(4, 3).value)
+ else if (ratio.value < spl_fixpt_from_fraction(4, 3).value)
return filter_3tap_16p_116;
- else if (ratio.value < dc_fixpt_from_fraction(5, 3).value)
+ else if (ratio.value < spl_fixpt_from_fraction(5, 3).value)
return filter_3tap_16p_149;
else
return filter_3tap_16p_183;
}
-const uint16_t *spl_get_filter_3tap_64p(struct fixed31_32 ratio)
+const uint16_t *spl_get_filter_3tap_64p(struct spl_fixed31_32 ratio)
{
- if (ratio.value < dc_fixpt_one.value)
+ if (ratio.value < spl_fixpt_one.value)
return filter_3tap_64p_upscale;
- else if (ratio.value < dc_fixpt_from_fraction(4, 3).value)
+ else if (ratio.value < spl_fixpt_from_fraction(4, 3).value)
return filter_3tap_64p_116;
- else if (ratio.value < dc_fixpt_from_fraction(5, 3).value)
+ else if (ratio.value < spl_fixpt_from_fraction(5, 3).value)
return filter_3tap_64p_149;
else
return filter_3tap_64p_183;
}
-const uint16_t *spl_get_filter_4tap_16p(struct fixed31_32 ratio)
+const uint16_t *spl_get_filter_4tap_16p(struct spl_fixed31_32 ratio)
{
- if (ratio.value < dc_fixpt_one.value)
+ if (ratio.value < spl_fixpt_one.value)
return filter_4tap_16p_upscale;
- else if (ratio.value < dc_fixpt_from_fraction(4, 3).value)
+ else if (ratio.value < spl_fixpt_from_fraction(4, 3).value)
return filter_4tap_16p_116;
- else if (ratio.value < dc_fixpt_from_fraction(5, 3).value)
+ else if (ratio.value < spl_fixpt_from_fraction(5, 3).value)
return filter_4tap_16p_149;
else
return filter_4tap_16p_183;
}
-const uint16_t *spl_get_filter_4tap_64p(struct fixed31_32 ratio)
+const uint16_t *spl_get_filter_4tap_64p(struct spl_fixed31_32 ratio)
{
- if (ratio.value < dc_fixpt_one.value)
+ if (ratio.value < spl_fixpt_one.value)
return filter_4tap_64p_upscale;
- else if (ratio.value < dc_fixpt_from_fraction(4, 3).value)
+ else if (ratio.value < spl_fixpt_from_fraction(4, 3).value)
return filter_4tap_64p_116;
- else if (ratio.value < dc_fixpt_from_fraction(5, 3).value)
+ else if (ratio.value < spl_fixpt_from_fraction(5, 3).value)
return filter_4tap_64p_149;
else
return filter_4tap_64p_183;
}
-const uint16_t *spl_get_filter_5tap_64p(struct fixed31_32 ratio)
+const uint16_t *spl_get_filter_5tap_64p(struct spl_fixed31_32 ratio)
{
- if (ratio.value < dc_fixpt_one.value)
+ if (ratio.value < spl_fixpt_one.value)
return filter_5tap_64p_upscale;
- else if (ratio.value < dc_fixpt_from_fraction(4, 3).value)
+ else if (ratio.value < spl_fixpt_from_fraction(4, 3).value)
return filter_5tap_64p_116;
- else if (ratio.value < dc_fixpt_from_fraction(5, 3).value)
+ else if (ratio.value < spl_fixpt_from_fraction(5, 3).value)
return filter_5tap_64p_149;
else
return filter_5tap_64p_183;
}
-const uint16_t *spl_get_filter_6tap_64p(struct fixed31_32 ratio)
+const uint16_t *spl_get_filter_6tap_64p(struct spl_fixed31_32 ratio)
{
- if (ratio.value < dc_fixpt_one.value)
+ if (ratio.value < spl_fixpt_one.value)
return filter_6tap_64p_upscale;
- else if (ratio.value < dc_fixpt_from_fraction(4, 3).value)
+ else if (ratio.value < spl_fixpt_from_fraction(4, 3).value)
return filter_6tap_64p_116;
- else if (ratio.value < dc_fixpt_from_fraction(5, 3).value)
+ else if (ratio.value < spl_fixpt_from_fraction(5, 3).value)
return filter_6tap_64p_149;
else
return filter_6tap_64p_183;
}
-const uint16_t *spl_get_filter_7tap_64p(struct fixed31_32 ratio)
+const uint16_t *spl_get_filter_7tap_64p(struct spl_fixed31_32 ratio)
{
- if (ratio.value < dc_fixpt_one.value)
+ if (ratio.value < spl_fixpt_one.value)
return filter_7tap_64p_upscale;
- else if (ratio.value < dc_fixpt_from_fraction(4, 3).value)
+ else if (ratio.value < spl_fixpt_from_fraction(4, 3).value)
return filter_7tap_64p_116;
- else if (ratio.value < dc_fixpt_from_fraction(5, 3).value)
+ else if (ratio.value < spl_fixpt_from_fraction(5, 3).value)
return filter_7tap_64p_149;
else
return filter_7tap_64p_183;
}
-const uint16_t *spl_get_filter_8tap_64p(struct fixed31_32 ratio)
+const uint16_t *spl_get_filter_8tap_64p(struct spl_fixed31_32 ratio)
{
- if (ratio.value < dc_fixpt_one.value)
+ if (ratio.value < spl_fixpt_one.value)
return filter_8tap_64p_upscale;
- else if (ratio.value < dc_fixpt_from_fraction(4, 3).value)
+ else if (ratio.value < spl_fixpt_from_fraction(4, 3).value)
return filter_8tap_64p_116;
- else if (ratio.value < dc_fixpt_from_fraction(5, 3).value)
+ else if (ratio.value < spl_fixpt_from_fraction(5, 3).value)
return filter_8tap_64p_149;
else
return filter_8tap_64p_183;
@@ -1423,3 +1423,29 @@ const uint16_t *spl_get_filter_2tap_64p(void)
{
return filter_2tap_64p;
}
+
+const uint16_t *spl_dscl_get_filter_coeffs_64p(int taps, struct spl_fixed31_32 ratio)
+{
+ if (taps == 8)
+ return spl_get_filter_8tap_64p(ratio);
+ else if (taps == 7)
+ return spl_get_filter_7tap_64p(ratio);
+ else if (taps == 6)
+ return spl_get_filter_6tap_64p(ratio);
+ else if (taps == 5)
+ return spl_get_filter_5tap_64p(ratio);
+ else if (taps == 4)
+ return spl_get_filter_4tap_64p(ratio);
+ else if (taps == 3)
+ return spl_get_filter_3tap_64p(ratio);
+ else if (taps == 2)
+ return spl_get_filter_2tap_64p();
+ else if (taps == 1)
+ return NULL;
+ else {
+ /* should never happen, bug */
+ SPL_BREAK_TO_DEBUGGER();
+ return NULL;
+ }
+}
+
diff --git a/drivers/gpu/drm/amd/display/dc/spl/dc_spl_scl_filters.h b/drivers/gpu/drm/amd/display/dc/spl/dc_spl_scl_filters.h
index 6d96aca53b24..48202bc4f81e 100644
--- a/drivers/gpu/drm/amd/display/dc/spl/dc_spl_scl_filters.h
+++ b/drivers/gpu/drm/amd/display/dc/spl/dc_spl_scl_filters.h
@@ -7,53 +7,16 @@
#include "dc_spl_types.h"
-const uint16_t *spl_get_filter_3tap_16p(struct fixed31_32 ratio);
-const uint16_t *spl_get_filter_3tap_64p(struct fixed31_32 ratio);
-const uint16_t *spl_get_filter_4tap_16p(struct fixed31_32 ratio);
-const uint16_t *spl_get_filter_4tap_64p(struct fixed31_32 ratio);
-const uint16_t *spl_get_filter_5tap_64p(struct fixed31_32 ratio);
-const uint16_t *spl_get_filter_6tap_64p(struct fixed31_32 ratio);
-const uint16_t *spl_get_filter_7tap_64p(struct fixed31_32 ratio);
-const uint16_t *spl_get_filter_8tap_64p(struct fixed31_32 ratio);
+const uint16_t *spl_get_filter_3tap_16p(struct spl_fixed31_32 ratio);
+const uint16_t *spl_get_filter_3tap_64p(struct spl_fixed31_32 ratio);
+const uint16_t *spl_get_filter_4tap_16p(struct spl_fixed31_32 ratio);
+const uint16_t *spl_get_filter_4tap_64p(struct spl_fixed31_32 ratio);
+const uint16_t *spl_get_filter_5tap_64p(struct spl_fixed31_32 ratio);
+const uint16_t *spl_get_filter_6tap_64p(struct spl_fixed31_32 ratio);
+const uint16_t *spl_get_filter_7tap_64p(struct spl_fixed31_32 ratio);
+const uint16_t *spl_get_filter_8tap_64p(struct spl_fixed31_32 ratio);
const uint16_t *spl_get_filter_2tap_16p(void);
const uint16_t *spl_get_filter_2tap_64p(void);
-const uint16_t *spl_get_filter_3tap_16p_upscale(void);
-const uint16_t *spl_get_filter_3tap_16p_116(void);
-const uint16_t *spl_get_filter_3tap_16p_149(void);
-const uint16_t *spl_get_filter_3tap_16p_183(void);
+const uint16_t *spl_dscl_get_filter_coeffs_64p(int taps, struct spl_fixed31_32 ratio);
-const uint16_t *spl_get_filter_4tap_16p_upscale(void);
-const uint16_t *spl_get_filter_4tap_16p_116(void);
-const uint16_t *spl_get_filter_4tap_16p_149(void);
-const uint16_t *spl_get_filter_4tap_16p_183(void);
-
-const uint16_t *spl_get_filter_3tap_64p_upscale(void);
-const uint16_t *spl_get_filter_3tap_64p_116(void);
-const uint16_t *spl_get_filter_3tap_64p_149(void);
-const uint16_t *spl_get_filter_3tap_64p_183(void);
-
-const uint16_t *spl_get_filter_4tap_64p_upscale(void);
-const uint16_t *spl_get_filter_4tap_64p_116(void);
-const uint16_t *spl_get_filter_4tap_64p_149(void);
-const uint16_t *spl_get_filter_4tap_64p_183(void);
-
-const uint16_t *spl_get_filter_5tap_64p_upscale(void);
-const uint16_t *spl_get_filter_5tap_64p_116(void);
-const uint16_t *spl_get_filter_5tap_64p_149(void);
-const uint16_t *spl_get_filter_5tap_64p_183(void);
-
-const uint16_t *spl_get_filter_6tap_64p_upscale(void);
-const uint16_t *spl_get_filter_6tap_64p_116(void);
-const uint16_t *spl_get_filter_6tap_64p_149(void);
-const uint16_t *spl_get_filter_6tap_64p_183(void);
-
-const uint16_t *spl_get_filter_7tap_64p_upscale(void);
-const uint16_t *spl_get_filter_7tap_64p_116(void);
-const uint16_t *spl_get_filter_7tap_64p_149(void);
-const uint16_t *spl_get_filter_7tap_64p_183(void);
-
-const uint16_t *spl_get_filter_8tap_64p_upscale(void);
-const uint16_t *spl_get_filter_8tap_64p_116(void);
-const uint16_t *spl_get_filter_8tap_64p_149(void);
-const uint16_t *spl_get_filter_8tap_64p_183(void);
#endif /* __DC_SPL_SCL_FILTERS_H__ */
diff --git a/drivers/gpu/drm/amd/display/dc/spl/dc_spl_types.h b/drivers/gpu/drm/amd/display/dc/spl/dc_spl_types.h
index 201201d3f55b..2a74ff5fdfdb 100644
--- a/drivers/gpu/drm/amd/display/dc/spl/dc_spl_types.h
+++ b/drivers/gpu/drm/amd/display/dc/spl/dc_spl_types.h
@@ -2,30 +2,15 @@
//
// Copyright 2024 Advanced Micro Devices, Inc.
-#include "os_types.h"
-#include "dc_hw_types.h"
-#ifndef ASSERT
-#define ASSERT(_bool) (void *)0
-#endif
-#include "include/fixed31_32.h" // fixed31_32 and related functions
#ifndef __DC_SPL_TYPES_H__
#define __DC_SPL_TYPES_H__
-enum lb_memory_config {
- /* Enable all 3 pieces of memory */
- LB_MEMORY_CONFIG_0 = 0,
-
- /* Enable only the first piece of memory */
- LB_MEMORY_CONFIG_1 = 1,
-
- /* Enable only the second piece of memory */
- LB_MEMORY_CONFIG_2 = 2,
-
- /* Only applicable in 4:2:0 mode, enable all 3 pieces of memory and the
- * last piece of chroma memory used for the luma storage
- */
- LB_MEMORY_CONFIG_3 = 3
-};
+#include "spl_os_types.h" // swap
+#ifndef SPL_ASSERT
+#define SPL_ASSERT(_bool) ((void *)0)
+#endif
+#include "spl_fixpt31_32.h" // fixed31_32 and related functions
+#include "spl_custom_float.h" // custom float and related functions
struct spl_size {
uint32_t width;
@@ -39,16 +24,16 @@ struct spl_rect {
};
struct spl_ratios {
- struct fixed31_32 horz;
- struct fixed31_32 vert;
- struct fixed31_32 horz_c;
- struct fixed31_32 vert_c;
+ struct spl_fixed31_32 horz;
+ struct spl_fixed31_32 vert;
+ struct spl_fixed31_32 horz_c;
+ struct spl_fixed31_32 vert_c;
};
struct spl_inits {
- struct fixed31_32 h;
- struct fixed31_32 h_c;
- struct fixed31_32 v;
- struct fixed31_32 v_c;
+ struct spl_fixed31_32 h;
+ struct spl_fixed31_32 h_c;
+ struct spl_fixed31_32 v;
+ struct spl_fixed31_32 v_c;
};
struct spl_taps {
@@ -81,6 +66,8 @@ enum spl_pixel_format {
SPL_PIXEL_FORMAT_420BPP10,
/*end of pixel format definition*/
SPL_PIXEL_FORMAT_INVALID,
+ SPL_PIXEL_FORMAT_422BPP8,
+ SPL_PIXEL_FORMAT_422BPP10,
SPL_PIXEL_FORMAT_GRPH_BEGIN = SPL_PIXEL_FORMAT_INDEX8,
SPL_PIXEL_FORMAT_GRPH_END = SPL_PIXEL_FORMAT_FP16,
SPL_PIXEL_FORMAT_VIDEO_BEGIN = SPL_PIXEL_FORMAT_420BPP8,
@@ -88,6 +75,22 @@ enum spl_pixel_format {
SPL_PIXEL_FORMAT_UNKNOWN
};
+enum lb_memory_config {
+ /* Enable all 3 pieces of memory */
+ LB_MEMORY_CONFIG_0 = 0,
+
+ /* Enable only the first piece of memory */
+ LB_MEMORY_CONFIG_1 = 1,
+
+ /* Enable only the second piece of memory */
+ LB_MEMORY_CONFIG_2 = 2,
+
+ /* Only applicable in 4:2:0 mode, enable all 3 pieces of memory and the
+ * last piece of chroma memory used for the luma storage
+ */
+ LB_MEMORY_CONFIG_3 = 3
+};
+
/* Rotation angle */
enum spl_rotation_angle {
SPL_ROTATION_ANGLE_0 = 0,
@@ -120,6 +123,13 @@ enum spl_color_space {
SPL_COLOR_SPACE_YCBCR709_BLACK,
};
+enum chroma_cositing {
+ CHROMA_COSITING_NONE,
+ CHROMA_COSITING_LEFT,
+ CHROMA_COSITING_TOPLEFT,
+ CHROMA_COSITING_COUNT
+};
+
// Scratch space for calculating scaler params
struct spl_scaler_data {
int h_active;
@@ -129,6 +139,7 @@ struct spl_scaler_data {
struct spl_rect viewport_c;
struct spl_rect recout;
struct spl_ratios ratios;
+ struct spl_ratios recip_ratios;
struct spl_inits inits;
};
@@ -396,13 +407,19 @@ struct dscl_prog_data {
/* blur and scale filter */
const uint16_t *filter_blur_scale_v;
const uint16_t *filter_blur_scale_h;
+ int sharpness_level; /* Track sharpness level */
};
/* SPL input and output definitions */
-// SPL outputs struct
-struct spl_out {
+// SPL scratch struct
+struct spl_scratch {
// Pack all SPL outputs in scl_data
struct spl_scaler_data scl_data;
+};
+
+/* SPL input and output definitions */
+// SPL outputs struct
+struct spl_out {
// Pack all output need to program hw registers
struct dscl_prog_data *dscl_prog_data;
};
@@ -444,20 +461,43 @@ struct basic_out {
bool alpha_en;
bool use_two_pixels_per_container;
};
-enum explicit_sharpness {
- SHARPNESS_LOW = 0,
- SHARPNESS_MID,
- SHARPNESS_HIGH
-};
-struct adaptive_sharpness {
+enum sharpness_setting {
+ SHARPNESS_HW_OFF = 0,
+ SHARPNESS_ZERO,
+ SHARPNESS_CUSTOM
+};
+struct spl_sharpness_range {
+ int sdr_rgb_min;
+ int sdr_rgb_max;
+ int sdr_rgb_mid;
+ int sdr_yuv_min;
+ int sdr_yuv_max;
+ int sdr_yuv_mid;
+ int hdr_rgb_min;
+ int hdr_rgb_max;
+ int hdr_rgb_mid;
+};
+struct adaptive_sharpness {
bool enable;
- enum explicit_sharpness sharpness;
+ int sharpness_level;
+ struct spl_sharpness_range sharpness_range;
};
enum linear_light_scaling { // convert it in translation logic
LLS_PREF_DONT_CARE = 0,
LLS_PREF_YES,
LLS_PREF_NO
};
+enum sharpen_policy {
+ SHARPEN_ALWAYS = 0,
+ SHARPEN_YUV = 1,
+ SHARPEN_RGB_FULLSCREEN_YUV = 2,
+ SHARPEN_FULLSCREEN_ALL = 3
+};
+enum scale_to_sharpness_policy {
+ NO_SCALE_TO_SHARPNESS_ADJ = 0,
+ SCALE_TO_SHARPNESS_ADJ_YUV = 1,
+ SCALE_TO_SHARPNESS_ADJ_ALL = 2
+};
struct spl_funcs {
void (*spl_calc_lb_num_partitions)
(bool alpha_en,
@@ -470,6 +510,8 @@ struct spl_funcs {
struct spl_debug {
int visual_confirm_base_offset;
int visual_confirm_dpp_offset;
+ enum sharpen_policy sharpen_policy;
+ enum scale_to_sharpness_policy scale_to_sharpness_policy;
};
struct spl_in {
@@ -485,6 +527,11 @@ struct spl_in {
bool prefer_easf;
bool disable_easf;
struct spl_debug debug;
+ bool is_fullscreen;
+ bool is_hdr_on;
+ int h_active;
+ int v_active;
+ int sdr_white_level_nits;
};
// end of SPL inputs
diff --git a/drivers/gpu/drm/amd/display/dc/spl/spl_custom_float.c b/drivers/gpu/drm/amd/display/dc/spl/spl_custom_float.c
new file mode 100644
index 000000000000..be2f34d034c5
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/spl/spl_custom_float.c
@@ -0,0 +1,151 @@
+// SPDX-License-Identifier: MIT
+//
+// Copyright 2024 Advanced Micro Devices, Inc.
+
+#include "spl_debug.h"
+#include "spl_custom_float.h"
+
+static bool spl_build_custom_float(struct spl_fixed31_32 value,
+ const struct spl_custom_float_format *format,
+ bool *negative,
+ uint32_t *mantissa,
+ uint32_t *exponenta)
+{
+ uint32_t exp_offset = (1 << (format->exponenta_bits - 1)) - 1;
+
+ const struct spl_fixed31_32 mantissa_constant_plus_max_fraction =
+ spl_fixpt_from_fraction((1LL << (format->mantissa_bits + 1)) - 1,
+ 1LL << format->mantissa_bits);
+
+ struct spl_fixed31_32 mantiss;
+
+ if (spl_fixpt_eq(value, spl_fixpt_zero)) {
+ *negative = false;
+ *mantissa = 0;
+ *exponenta = 0;
+ return true;
+ }
+
+ if (spl_fixpt_lt(value, spl_fixpt_zero)) {
+ *negative = format->sign;
+ value = spl_fixpt_neg(value);
+ } else {
+ *negative = false;
+ }
+
+ if (spl_fixpt_lt(value, spl_fixpt_one)) {
+ uint32_t i = 1;
+
+ do {
+ value = spl_fixpt_shl(value, 1);
+ ++i;
+ } while (spl_fixpt_lt(value, spl_fixpt_one));
+
+ --i;
+
+ if (exp_offset <= i) {
+ *mantissa = 0;
+ *exponenta = 0;
+ return true;
+ }
+
+ *exponenta = exp_offset - i;
+ } else if (spl_fixpt_le(mantissa_constant_plus_max_fraction, value)) {
+ uint32_t i = 1;
+
+ do {
+ value = spl_fixpt_shr(value, 1);
+ ++i;
+ } while (spl_fixpt_lt(mantissa_constant_plus_max_fraction, value));
+
+ *exponenta = exp_offset + i - 1;
+ } else {
+ *exponenta = exp_offset;
+ }
+
+ mantiss = spl_fixpt_sub(value, spl_fixpt_one);
+
+ if (spl_fixpt_lt(mantiss, spl_fixpt_zero) ||
+ spl_fixpt_lt(spl_fixpt_one, mantiss))
+ mantiss = spl_fixpt_zero;
+ else
+ mantiss = spl_fixpt_shl(mantiss, format->mantissa_bits);
+
+ *mantissa = spl_fixpt_floor(mantiss);
+
+ return true;
+}
+
+static bool spl_setup_custom_float(const struct spl_custom_float_format *format,
+ bool negative,
+ uint32_t mantissa,
+ uint32_t exponenta,
+ uint32_t *result)
+{
+ uint32_t i = 0;
+ uint32_t j = 0;
+ uint32_t value = 0;
+
+ /* verification code:
+ * once calculation is ok we can remove it
+ */
+
+ const uint32_t mantissa_mask =
+ (1 << (format->mantissa_bits + 1)) - 1;
+
+ const uint32_t exponenta_mask =
+ (1 << (format->exponenta_bits + 1)) - 1;
+
+ if (mantissa & ~mantissa_mask) {
+ SPL_BREAK_TO_DEBUGGER();
+ mantissa = mantissa_mask;
+ }
+
+ if (exponenta & ~exponenta_mask) {
+ SPL_BREAK_TO_DEBUGGER();
+ exponenta = exponenta_mask;
+ }
+
+ /* end of verification code */
+
+ while (i < format->mantissa_bits) {
+ uint32_t mask = 1 << i;
+
+ if (mantissa & mask)
+ value |= mask;
+
+ ++i;
+ }
+
+ while (j < format->exponenta_bits) {
+ uint32_t mask = 1 << j;
+
+ if (exponenta & mask)
+ value |= mask << i;
+
+ ++j;
+ }
+
+ if (negative && format->sign)
+ value |= 1 << (i + j);
+
+ *result = value;
+
+ return true;
+}
+
+bool spl_convert_to_custom_float_format(struct spl_fixed31_32 value,
+ const struct spl_custom_float_format *format,
+ uint32_t *result)
+{
+ uint32_t mantissa;
+ uint32_t exponenta;
+ bool negative;
+
+ return spl_build_custom_float(value, format, &negative, &mantissa, &exponenta) &&
+ spl_setup_custom_float(format,
+ negative,
+ mantissa,
+ exponenta,
+ result);
+}
diff --git a/drivers/gpu/drm/amd/display/dc/spl/spl_custom_float.h b/drivers/gpu/drm/amd/display/dc/spl/spl_custom_float.h
new file mode 100644
index 000000000000..cdc4e107b9de
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/spl/spl_custom_float.h
@@ -0,0 +1,29 @@
+/* SPDX-License-Identifier: MIT */
+
+/* Copyright 2024 Advanced Micro Devices, Inc. */
+
+#ifndef SPL_CUSTOM_FLOAT_H_
+#define SPL_CUSTOM_FLOAT_H_
+
+#include "spl_os_types.h"
+#include "spl_fixpt31_32.h"
+
+struct spl_custom_float_format {
+ uint32_t mantissa_bits;
+ uint32_t exponenta_bits;
+ bool sign;
+};
+
+struct spl_custom_float_value {
+ uint32_t mantissa;
+ uint32_t exponenta;
+ uint32_t value;
+ bool negative;
+};
+
+bool spl_convert_to_custom_float_format(
+ struct spl_fixed31_32 value,
+ const struct spl_custom_float_format *format,
+ uint32_t *result);
+
+#endif //SPL_CUSTOM_FLOAT_H_
diff --git a/drivers/gpu/drm/amd/display/dc/spl/spl_debug.h b/drivers/gpu/drm/amd/display/dc/spl/spl_debug.h
new file mode 100644
index 000000000000..5696dafd0894
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/spl/spl_debug.h
@@ -0,0 +1,25 @@
+/* SPDX-License-Identifier: MIT */
+
+/* Copyright 2024 Advanced Micro Devices, Inc. */
+
+#ifndef SPL_DEBUG_H
+#define SPL_DEBUG_H
+
+#ifdef SPL_ASSERT
+#undef SPL_ASSERT
+#endif
+#define SPL_ASSERT(b)
+
+#define SPL_ASSERT_CRITICAL(expr) do {if (expr)/* Do nothing */; } while (0)
+
+#ifdef SPL_DALMSG
+#undef SPL_DALMSG
+#endif
+#define SPL_DALMSG(b)
+
+#ifdef SPL_DAL_ASSERT_MSG
+#undef SPL_DAL_ASSERT_MSG
+#endif
+#define SPL_DAL_ASSERT_MSG(b, m)
+
+#endif // SPL_DEBUG_H
diff --git a/drivers/gpu/drm/amd/display/dc/spl/spl_fixpt31_32.c b/drivers/gpu/drm/amd/display/dc/spl/spl_fixpt31_32.c
new file mode 100644
index 000000000000..a95565df5487
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/spl/spl_fixpt31_32.c
@@ -0,0 +1,497 @@
+// SPDX-License-Identifier: MIT
+//
+// Copyright 2024 Advanced Micro Devices, Inc.
+
+#include "spl_fixpt31_32.h"
+
+static const struct spl_fixed31_32 spl_fixpt_two_pi = { 26986075409LL };
+static const struct spl_fixed31_32 spl_fixpt_ln2 = { 2977044471LL };
+static const struct spl_fixed31_32 spl_fixpt_ln2_div_2 = { 1488522236LL };
+
+static inline unsigned long long abs_i64(
+ long long arg)
+{
+ if (arg > 0)
+ return (unsigned long long)arg;
+ else
+ return (unsigned long long)(-arg);
+}
+
+/*
+ * @brief
+ * result = dividend / divisor
+ * *remainder = dividend % divisor
+ */
+static inline unsigned long long complete_integer_division_u64(
+ unsigned long long dividend,
+ unsigned long long divisor,
+ unsigned long long *remainder)
+{
+ unsigned long long result;
+
+ ASSERT(divisor);
+
+ result = spl_div64_u64_rem(dividend, divisor, remainder);
+
+ return result;
+}
+
+
+#define FRACTIONAL_PART_MASK \
+ ((1ULL << FIXED31_32_BITS_PER_FRACTIONAL_PART) - 1)
+
+#define GET_INTEGER_PART(x) \
+ ((x) >> FIXED31_32_BITS_PER_FRACTIONAL_PART)
+
+#define GET_FRACTIONAL_PART(x) \
+ (FRACTIONAL_PART_MASK & (x))
+
+struct spl_fixed31_32 spl_fixpt_from_fraction(long long numerator, long long denominator)
+{
+ struct spl_fixed31_32 res;
+
+ bool arg1_negative = numerator < 0;
+ bool arg2_negative = denominator < 0;
+
+ unsigned long long arg1_value = arg1_negative ? -numerator : numerator;
+ unsigned long long arg2_value = arg2_negative ? -denominator : denominator;
+
+ unsigned long long remainder;
+
+ /* determine integer part */
+
+ unsigned long long res_value = complete_integer_division_u64(
+ arg1_value, arg2_value, &remainder);
+
+ ASSERT(res_value <= LONG_MAX);
+
+ /* determine fractional part */
+ {
+ unsigned int i = FIXED31_32_BITS_PER_FRACTIONAL_PART;
+
+ do {
+ remainder <<= 1;
+
+ res_value <<= 1;
+
+ if (remainder >= arg2_value) {
+ res_value |= 1;
+ remainder -= arg2_value;
+ }
+ } while (--i != 0);
+ }
+
+ /* round up LSB */
+ {
+ unsigned long long summand = (remainder << 1) >= arg2_value;
+
+ ASSERT(res_value <= LLONG_MAX - summand);
+
+ res_value += summand;
+ }
+
+ res.value = (long long)res_value;
+
+ if (arg1_negative ^ arg2_negative)
+ res.value = -res.value;
+
+ return res;
+}
+
+struct spl_fixed31_32 spl_fixpt_mul(struct spl_fixed31_32 arg1, struct spl_fixed31_32 arg2)
+{
+ struct spl_fixed31_32 res;
+
+ bool arg1_negative = arg1.value < 0;
+ bool arg2_negative = arg2.value < 0;
+
+ unsigned long long arg1_value = arg1_negative ? -arg1.value : arg1.value;
+ unsigned long long arg2_value = arg2_negative ? -arg2.value : arg2.value;
+
+ unsigned long long arg1_int = GET_INTEGER_PART(arg1_value);
+ unsigned long long arg2_int = GET_INTEGER_PART(arg2_value);
+
+ unsigned long long arg1_fra = GET_FRACTIONAL_PART(arg1_value);
+ unsigned long long arg2_fra = GET_FRACTIONAL_PART(arg2_value);
+
+ unsigned long long tmp;
+
+ res.value = arg1_int * arg2_int;
+
+ ASSERT(res.value <= (long long)LONG_MAX);
+
+ res.value <<= FIXED31_32_BITS_PER_FRACTIONAL_PART;
+
+ tmp = arg1_int * arg2_fra;
+
+ ASSERT(tmp <= (unsigned long long)(LLONG_MAX - res.value));
+
+ res.value += tmp;
+
+ tmp = arg2_int * arg1_fra;
+
+ ASSERT(tmp <= (unsigned long long)(LLONG_MAX - res.value));
+
+ res.value += tmp;
+
+ tmp = arg1_fra * arg2_fra;
+
+ tmp = (tmp >> FIXED31_32_BITS_PER_FRACTIONAL_PART) +
+ (tmp >= (unsigned long long)spl_fixpt_half.value);
+
+ ASSERT(tmp <= (unsigned long long)(LLONG_MAX - res.value));
+
+ res.value += tmp;
+
+ if (arg1_negative ^ arg2_negative)
+ res.value = -res.value;
+
+ return res;
+}
+
+struct spl_fixed31_32 spl_fixpt_sqr(struct spl_fixed31_32 arg)
+{
+ struct spl_fixed31_32 res;
+
+ unsigned long long arg_value = abs_i64(arg.value);
+
+ unsigned long long arg_int = GET_INTEGER_PART(arg_value);
+
+ unsigned long long arg_fra = GET_FRACTIONAL_PART(arg_value);
+
+ unsigned long long tmp;
+
+ res.value = arg_int * arg_int;
+
+ ASSERT(res.value <= (long long)LONG_MAX);
+
+ res.value <<= FIXED31_32_BITS_PER_FRACTIONAL_PART;
+
+ tmp = arg_int * arg_fra;
+
+ ASSERT(tmp <= (unsigned long long)(LLONG_MAX - res.value));
+
+ res.value += tmp;
+
+ ASSERT(tmp <= (unsigned long long)(LLONG_MAX - res.value));
+
+ res.value += tmp;
+
+ tmp = arg_fra * arg_fra;
+
+ tmp = (tmp >> FIXED31_32_BITS_PER_FRACTIONAL_PART) +
+ (tmp >= (unsigned long long)spl_fixpt_half.value);
+
+ ASSERT(tmp <= (unsigned long long)(LLONG_MAX - res.value));
+
+ res.value += tmp;
+
+ return res;
+}
+
+struct spl_fixed31_32 spl_fixpt_recip(struct spl_fixed31_32 arg)
+{
+ /*
+ * @note
+ * Good idea to use Newton's method
+ */
+
+ ASSERT(arg.value);
+
+ return spl_fixpt_from_fraction(
+ spl_fixpt_one.value,
+ arg.value);
+}
+
+struct spl_fixed31_32 spl_fixpt_sinc(struct spl_fixed31_32 arg)
+{
+ struct spl_fixed31_32 square;
+
+ struct spl_fixed31_32 res = spl_fixpt_one;
+
+ int n = 27;
+
+ struct spl_fixed31_32 arg_norm = arg;
+
+ if (spl_fixpt_le(
+ spl_fixpt_two_pi,
+ spl_fixpt_abs(arg))) {
+ arg_norm = spl_fixpt_sub(
+ arg_norm,
+ spl_fixpt_mul_int(
+ spl_fixpt_two_pi,
+ (int)spl_div64_s64(
+ arg_norm.value,
+ spl_fixpt_two_pi.value)));
+ }
+
+ square = spl_fixpt_sqr(arg_norm);
+
+ do {
+ res = spl_fixpt_sub(
+ spl_fixpt_one,
+ spl_fixpt_div_int(
+ spl_fixpt_mul(
+ square,
+ res),
+ n * (n - 1)));
+
+ n -= 2;
+ } while (n > 2);
+
+ if (arg.value != arg_norm.value)
+ res = spl_fixpt_div(
+ spl_fixpt_mul(res, arg_norm),
+ arg);
+
+ return res;
+}
+
+struct spl_fixed31_32 spl_fixpt_sin(struct spl_fixed31_32 arg)
+{
+ return spl_fixpt_mul(
+ arg,
+ spl_fixpt_sinc(arg));
+}
+
+struct spl_fixed31_32 spl_fixpt_cos(struct spl_fixed31_32 arg)
+{
+ /* TODO implement argument normalization */
+
+ const struct spl_fixed31_32 square = spl_fixpt_sqr(arg);
+
+ struct spl_fixed31_32 res = spl_fixpt_one;
+
+ int n = 26;
+
+ do {
+ res = spl_fixpt_sub(
+ spl_fixpt_one,
+ spl_fixpt_div_int(
+ spl_fixpt_mul(
+ square,
+ res),
+ n * (n - 1)));
+
+ n -= 2;
+ } while (n != 0);
+
+ return res;
+}
+
+/*
+ * @brief
+ * result = exp(arg),
+ * where abs(arg) < 1
+ *
+ * Calculated as Taylor series.
+ */
+static struct spl_fixed31_32 fixed31_32_exp_from_taylor_series(struct spl_fixed31_32 arg)
+{
+ unsigned int n = 9;
+
+ struct spl_fixed31_32 res = spl_fixpt_from_fraction(
+ n + 2,
+ n + 1);
+ /* TODO find correct res */
+
+ ASSERT(spl_fixpt_lt(arg, spl_fixpt_one));
+
+ do
+ res = spl_fixpt_add(
+ spl_fixpt_one,
+ spl_fixpt_div_int(
+ spl_fixpt_mul(
+ arg,
+ res),
+ n));
+ while (--n != 1);
+
+ return spl_fixpt_add(
+ spl_fixpt_one,
+ spl_fixpt_mul(
+ arg,
+ res));
+}
+
+struct spl_fixed31_32 spl_fixpt_exp(struct spl_fixed31_32 arg)
+{
+ /*
+ * @brief
+ * Main equation is:
+ * exp(x) = exp(r + m * ln(2)) = (1 << m) * exp(r),
+ * where m = round(x / ln(2)), r = x - m * ln(2)
+ */
+
+ if (spl_fixpt_le(
+ spl_fixpt_ln2_div_2,
+ spl_fixpt_abs(arg))) {
+ int m = spl_fixpt_round(
+ spl_fixpt_div(
+ arg,
+ spl_fixpt_ln2));
+
+ struct spl_fixed31_32 r = spl_fixpt_sub(
+ arg,
+ spl_fixpt_mul_int(
+ spl_fixpt_ln2,
+ m));
+
+ ASSERT(m != 0);
+
+ ASSERT(spl_fixpt_lt(
+ spl_fixpt_abs(r),
+ spl_fixpt_one));
+
+ if (m > 0)
+ return spl_fixpt_shl(
+ fixed31_32_exp_from_taylor_series(r),
+ (unsigned char)m);
+ else
+ return spl_fixpt_div_int(
+ fixed31_32_exp_from_taylor_series(r),
+ 1LL << -m);
+ } else if (arg.value != 0)
+ return fixed31_32_exp_from_taylor_series(arg);
+ else
+ return spl_fixpt_one;
+}
+
+struct spl_fixed31_32 spl_fixpt_log(struct spl_fixed31_32 arg)
+{
+ struct spl_fixed31_32 res = spl_fixpt_neg(spl_fixpt_one);
+ /* TODO improve 1st estimation */
+
+ struct spl_fixed31_32 error;
+
+ ASSERT(arg.value > 0);
+ /* TODO if arg is negative, return NaN */
+ /* TODO if arg is zero, return -INF */
+
+ do {
+ struct spl_fixed31_32 res1 = spl_fixpt_add(
+ spl_fixpt_sub(
+ res,
+ spl_fixpt_one),
+ spl_fixpt_div(
+ arg,
+ spl_fixpt_exp(res)));
+
+ error = spl_fixpt_sub(
+ res,
+ res1);
+
+ res = res1;
+ /* TODO determine max_allowed_error based on quality of exp() */
+ } while (abs_i64(error.value) > 100ULL);
+
+ return res;
+}
+
+
+/* this function is a generic helper to translate fixed point value to
+ * specified integer format that will consist of integer_bits integer part and
+ * fractional_bits fractional part. For example it is used in
+ * spl_fixpt_u2d19 to receive 2 bits integer part and 19 bits fractional
+ * part in 32 bits. It is used in hw programming (scaler)
+ */
+
+static inline unsigned int ux_dy(
+ long long value,
+ unsigned int integer_bits,
+ unsigned int fractional_bits)
+{
+ /* 1. create mask of integer part */
+ unsigned int result = (1 << integer_bits) - 1;
+ /* 2. mask out fractional part */
+ unsigned int fractional_part = FRACTIONAL_PART_MASK & value;
+ /* 3. shrink fixed point integer part to be of integer_bits width*/
+ result &= GET_INTEGER_PART(value);
+ /* 4. make space for fractional part to be filled in after integer */
+ result <<= fractional_bits;
+ /* 5. shrink fixed point fractional part to of fractional_bits width*/
+ fractional_part >>= FIXED31_32_BITS_PER_FRACTIONAL_PART - fractional_bits;
+ /* 6. merge the result */
+ return result | fractional_part;
+}
+
+static inline unsigned int clamp_ux_dy(
+ long long value,
+ unsigned int integer_bits,
+ unsigned int fractional_bits,
+ unsigned int min_clamp)
+{
+ unsigned int truncated_val = ux_dy(value, integer_bits, fractional_bits);
+
+ if (value >= (1LL << (integer_bits + FIXED31_32_BITS_PER_FRACTIONAL_PART)))
+ return (1 << (integer_bits + fractional_bits)) - 1;
+ else if (truncated_val > min_clamp)
+ return truncated_val;
+ else
+ return min_clamp;
+}
+
+unsigned int spl_fixpt_u4d19(struct spl_fixed31_32 arg)
+{
+ return ux_dy(arg.value, 4, 19);
+}
+
+unsigned int spl_fixpt_u3d19(struct spl_fixed31_32 arg)
+{
+ return ux_dy(arg.value, 3, 19);
+}
+
+unsigned int spl_fixpt_u2d19(struct spl_fixed31_32 arg)
+{
+ return ux_dy(arg.value, 2, 19);
+}
+
+unsigned int spl_fixpt_u0d19(struct spl_fixed31_32 arg)
+{
+ return ux_dy(arg.value, 0, 19);
+}
+
+unsigned int spl_fixpt_clamp_u0d14(struct spl_fixed31_32 arg)
+{
+ return clamp_ux_dy(arg.value, 0, 14, 1);
+}
+
+unsigned int spl_fixpt_clamp_u0d10(struct spl_fixed31_32 arg)
+{
+ return clamp_ux_dy(arg.value, 0, 10, 1);
+}
+
+int spl_fixpt_s4d19(struct spl_fixed31_32 arg)
+{
+ if (arg.value < 0)
+ return -(int)ux_dy(spl_fixpt_abs(arg).value, 4, 19);
+ else
+ return ux_dy(arg.value, 4, 19);
+}
+
+struct spl_fixed31_32 spl_fixpt_from_ux_dy(unsigned int value,
+ unsigned int integer_bits,
+ unsigned int fractional_bits)
+{
+ struct spl_fixed31_32 fixpt_value = spl_fixpt_zero;
+ struct spl_fixed31_32 fixpt_int_value = spl_fixpt_zero;
+ long long frac_mask = ((long long)1 << (long long)integer_bits) - 1;
+
+ fixpt_value.value = (long long)value << (FIXED31_32_BITS_PER_FRACTIONAL_PART - fractional_bits);
+ frac_mask = frac_mask << fractional_bits;
+ fixpt_int_value.value = value & frac_mask;
+ fixpt_int_value.value <<= (FIXED31_32_BITS_PER_FRACTIONAL_PART - fractional_bits);
+ fixpt_value.value |= fixpt_int_value.value;
+ return fixpt_value;
+}
+
+struct spl_fixed31_32 spl_fixpt_from_int_dy(unsigned int int_value,
+ unsigned int frac_value,
+ unsigned int integer_bits,
+ unsigned int fractional_bits)
+{
+ struct spl_fixed31_32 fixpt_value = spl_fixpt_from_int(int_value);
+
+ fixpt_value.value |= (long long)frac_value << (FIXED31_32_BITS_PER_FRACTIONAL_PART - fractional_bits);
+ return fixpt_value;
+}
diff --git a/drivers/gpu/drm/amd/display/dc/spl/spl_fixpt31_32.h b/drivers/gpu/drm/amd/display/dc/spl/spl_fixpt31_32.h
new file mode 100644
index 000000000000..8a045e2f8699
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/spl/spl_fixpt31_32.h
@@ -0,0 +1,525 @@
+/* SPDX-License-Identifier: MIT */
+
+/* Copyright 2024 Advanced Micro Devices, Inc. */
+
+#ifndef __SPL_FIXED31_32_H__
+#define __SPL_FIXED31_32_H__
+
+#include "os_types.h"
+#include "spl_os_types.h" // swap
+#ifndef ASSERT
+#define ASSERT(_bool) ((void *)0)
+#endif
+
+#ifndef LLONG_MAX
+#define LLONG_MAX 9223372036854775807ll
+#endif
+#ifndef LLONG_MIN
+#define LLONG_MIN (-LLONG_MAX - 1ll)
+#endif
+
+#define FIXED31_32_BITS_PER_FRACTIONAL_PART 32
+#ifndef LLONG_MIN
+#define LLONG_MIN (1LL<<63)
+#endif
+#ifndef LLONG_MAX
+#define LLONG_MAX (-1LL>>1)
+#endif
+
+/*
+ * @brief
+ * Arithmetic operations on real numbers
+ * represented as fixed-point numbers.
+ * There are: 1 bit for sign,
+ * 31 bit for integer part,
+ * 32 bits for fractional part.
+ *
+ * @note
+ * Currently, overflows and underflows are asserted;
+ * no special result returned.
+ */
+
+struct spl_fixed31_32 {
+ long long value;
+};
+
+
+/*
+ * @brief
+ * Useful constants
+ */
+
+static const struct spl_fixed31_32 spl_fixpt_zero = { 0 };
+static const struct spl_fixed31_32 spl_fixpt_epsilon = { 1LL };
+static const struct spl_fixed31_32 spl_fixpt_half = { 0x80000000LL };
+static const struct spl_fixed31_32 spl_fixpt_one = { 0x100000000LL };
+
+/*
+ * @brief
+ * Initialization routines
+ */
+
+/*
+ * @brief
+ * result = numerator / denominator
+ */
+struct spl_fixed31_32 spl_fixpt_from_fraction(long long numerator, long long denominator);
+
+/*
+ * @brief
+ * result = arg
+ */
+static inline struct spl_fixed31_32 spl_fixpt_from_int(int arg)
+{
+ struct spl_fixed31_32 res;
+
+ res.value = (long long) arg << FIXED31_32_BITS_PER_FRACTIONAL_PART;
+
+ return res;
+}
+
+/*
+ * @brief
+ * Unary operators
+ */
+
+/*
+ * @brief
+ * result = -arg
+ */
+static inline struct spl_fixed31_32 spl_fixpt_neg(struct spl_fixed31_32 arg)
+{
+ struct spl_fixed31_32 res;
+
+ res.value = -arg.value;
+
+ return res;
+}
+
+/*
+ * @brief
+ * result = abs(arg) := (arg >= 0) ? arg : -arg
+ */
+static inline struct spl_fixed31_32 spl_fixpt_abs(struct spl_fixed31_32 arg)
+{
+ if (arg.value < 0)
+ return spl_fixpt_neg(arg);
+ else
+ return arg;
+}
+
+/*
+ * @brief
+ * Binary relational operators
+ */
+
+/*
+ * @brief
+ * result = arg1 < arg2
+ */
+static inline bool spl_fixpt_lt(struct spl_fixed31_32 arg1, struct spl_fixed31_32 arg2)
+{
+ return arg1.value < arg2.value;
+}
+
+/*
+ * @brief
+ * result = arg1 <= arg2
+ */
+static inline bool spl_fixpt_le(struct spl_fixed31_32 arg1, struct spl_fixed31_32 arg2)
+{
+ return arg1.value <= arg2.value;
+}
+
+/*
+ * @brief
+ * result = arg1 == arg2
+ */
+static inline bool spl_fixpt_eq(struct spl_fixed31_32 arg1, struct spl_fixed31_32 arg2)
+{
+ return arg1.value == arg2.value;
+}
+
+/*
+ * @brief
+ * result = min(arg1, arg2) := (arg1 <= arg2) ? arg1 : arg2
+ */
+static inline struct spl_fixed31_32 spl_fixpt_min(struct spl_fixed31_32 arg1, struct spl_fixed31_32 arg2)
+{
+ if (arg1.value <= arg2.value)
+ return arg1;
+ else
+ return arg2;
+}
+
+/*
+ * @brief
+ * result = max(arg1, arg2) := (arg1 <= arg2) ? arg2 : arg1
+ */
+static inline struct spl_fixed31_32 spl_fixpt_max(struct spl_fixed31_32 arg1, struct spl_fixed31_32 arg2)
+{
+ if (arg1.value <= arg2.value)
+ return arg2;
+ else
+ return arg1;
+}
+
+/*
+ * @brief
+ * | min_value, when arg <= min_value
+ * result = | arg, when min_value < arg < max_value
+ * | max_value, when arg >= max_value
+ */
+static inline struct spl_fixed31_32 spl_fixpt_clamp(
+ struct spl_fixed31_32 arg,
+ struct spl_fixed31_32 min_value,
+ struct spl_fixed31_32 max_value)
+{
+ if (spl_fixpt_le(arg, min_value))
+ return min_value;
+ else if (spl_fixpt_le(max_value, arg))
+ return max_value;
+ else
+ return arg;
+}
+
+/*
+ * @brief
+ * Binary shift operators
+ */
+
+/*
+ * @brief
+ * result = arg << shift
+ */
+static inline struct spl_fixed31_32 spl_fixpt_shl(struct spl_fixed31_32 arg, unsigned char shift)
+{
+ ASSERT(((arg.value >= 0) && (arg.value <= LLONG_MAX >> shift)) ||
+ ((arg.value < 0) && (arg.value >= ~(LLONG_MAX >> shift))));
+
+ arg.value = arg.value << shift;
+
+ return arg;
+}
+
+/*
+ * @brief
+ * result = arg >> shift
+ */
+static inline struct spl_fixed31_32 spl_fixpt_shr(struct spl_fixed31_32 arg, unsigned char shift)
+{
+ bool negative = arg.value < 0;
+
+ if (negative)
+ arg.value = -arg.value;
+ arg.value = arg.value >> shift;
+ if (negative)
+ arg.value = -arg.value;
+ return arg;
+}
+
+/*
+ * @brief
+ * Binary additive operators
+ */
+
+/*
+ * @brief
+ * result = arg1 + arg2
+ */
+static inline struct spl_fixed31_32 spl_fixpt_add(struct spl_fixed31_32 arg1, struct spl_fixed31_32 arg2)
+{
+ struct spl_fixed31_32 res;
+
+ ASSERT(((arg1.value >= 0) && (LLONG_MAX - arg1.value >= arg2.value)) ||
+ ((arg1.value < 0) && (LLONG_MIN - arg1.value <= arg2.value)));
+
+ res.value = arg1.value + arg2.value;
+
+ return res;
+}
+
+/*
+ * @brief
+ * result = arg1 + arg2
+ */
+static inline struct spl_fixed31_32 spl_fixpt_add_int(struct spl_fixed31_32 arg1, int arg2)
+{
+ return spl_fixpt_add(arg1, spl_fixpt_from_int(arg2));
+}
+
+/*
+ * @brief
+ * result = arg1 - arg2
+ */
+static inline struct spl_fixed31_32 spl_fixpt_sub(struct spl_fixed31_32 arg1, struct spl_fixed31_32 arg2)
+{
+ struct spl_fixed31_32 res;
+
+ ASSERT(((arg2.value >= 0) && (LLONG_MIN + arg2.value <= arg1.value)) ||
+ ((arg2.value < 0) && (LLONG_MAX + arg2.value >= arg1.value)));
+
+ res.value = arg1.value - arg2.value;
+
+ return res;
+}
+
+/*
+ * @brief
+ * result = arg1 - arg2
+ */
+static inline struct spl_fixed31_32 spl_fixpt_sub_int(struct spl_fixed31_32 arg1, int arg2)
+{
+ return spl_fixpt_sub(arg1, spl_fixpt_from_int(arg2));
+}
+
+
+/*
+ * @brief
+ * Binary multiplicative operators
+ */
+
+/*
+ * @brief
+ * result = arg1 * arg2
+ */
+struct spl_fixed31_32 spl_fixpt_mul(struct spl_fixed31_32 arg1, struct spl_fixed31_32 arg2);
+
+
+/*
+ * @brief
+ * result = arg1 * arg2
+ */
+static inline struct spl_fixed31_32 spl_fixpt_mul_int(struct spl_fixed31_32 arg1, int arg2)
+{
+ return spl_fixpt_mul(arg1, spl_fixpt_from_int(arg2));
+}
+
+/*
+ * @brief
+ * result = square(arg) := arg * arg
+ */
+struct spl_fixed31_32 spl_fixpt_sqr(struct spl_fixed31_32 arg);
+
+/*
+ * @brief
+ * result = arg1 / arg2
+ */
+static inline struct spl_fixed31_32 spl_fixpt_div_int(struct spl_fixed31_32 arg1, long long arg2)
+{
+ return spl_fixpt_from_fraction(arg1.value, spl_fixpt_from_int((int)arg2).value);
+}
+
+/*
+ * @brief
+ * result = arg1 / arg2
+ */
+static inline struct spl_fixed31_32 spl_fixpt_div(struct spl_fixed31_32 arg1, struct spl_fixed31_32 arg2)
+{
+ return spl_fixpt_from_fraction(arg1.value, arg2.value);
+}
+
+/*
+ * @brief
+ * Reciprocal function
+ */
+
+/*
+ * @brief
+ * result = reciprocal(arg) := 1 / arg
+ *
+ * @note
+ * No special actions taken in case argument is zero.
+ */
+struct spl_fixed31_32 spl_fixpt_recip(struct spl_fixed31_32 arg);
+
+/*
+ * @brief
+ * Trigonometric functions
+ */
+
+/*
+ * @brief
+ * result = sinc(arg) := sin(arg) / arg
+ *
+ * @note
+ * Argument specified in radians,
+ * internally it's normalized to [-2pi...2pi] range.
+ */
+struct spl_fixed31_32 spl_fixpt_sinc(struct spl_fixed31_32 arg);
+
+/*
+ * @brief
+ * result = sin(arg)
+ *
+ * @note
+ * Argument specified in radians,
+ * internally it's normalized to [-2pi...2pi] range.
+ */
+struct spl_fixed31_32 spl_fixpt_sin(struct spl_fixed31_32 arg);
+
+/*
+ * @brief
+ * result = cos(arg)
+ *
+ * @note
+ * Argument specified in radians
+ * and should be in [-2pi...2pi] range -
+ * passing arguments outside that range
+ * will cause incorrect result!
+ */
+struct spl_fixed31_32 spl_fixpt_cos(struct spl_fixed31_32 arg);
+
+/*
+ * @brief
+ * Transcendent functions
+ */
+
+/*
+ * @brief
+ * result = exp(arg)
+ *
+ * @note
+ * Currently, function is verified for abs(arg) <= 1.
+ */
+struct spl_fixed31_32 spl_fixpt_exp(struct spl_fixed31_32 arg);
+
+/*
+ * @brief
+ * result = log(arg)
+ *
+ * @note
+ * Currently, abs(arg) should be less than 1.
+ * No normalization is done.
+ * Currently, no special actions taken
+ * in case of invalid argument(s). Take care!
+ */
+struct spl_fixed31_32 spl_fixpt_log(struct spl_fixed31_32 arg);
+
+/*
+ * @brief
+ * Power function
+ */
+
+/*
+ * @brief
+ * result = pow(arg1, arg2)
+ *
+ * @note
+ * Currently, abs(arg1) should be less than 1. Take care!
+ */
+static inline struct spl_fixed31_32 spl_fixpt_pow(struct spl_fixed31_32 arg1, struct spl_fixed31_32 arg2)
+{
+ if (arg1.value == 0)
+ return arg2.value == 0 ? spl_fixpt_one : spl_fixpt_zero;
+
+ return spl_fixpt_exp(
+ spl_fixpt_mul(
+ spl_fixpt_log(arg1),
+ arg2));
+}
+
+/*
+ * @brief
+ * Rounding functions
+ */
+
+/*
+ * @brief
+ * result = floor(arg) := greatest integer lower than or equal to arg
+ */
+static inline int spl_fixpt_floor(struct spl_fixed31_32 arg)
+{
+ unsigned long long arg_value = arg.value > 0 ? arg.value : -arg.value;
+
+ if (arg.value >= 0)
+ return (int)(arg_value >> FIXED31_32_BITS_PER_FRACTIONAL_PART);
+ else
+ return -(int)(arg_value >> FIXED31_32_BITS_PER_FRACTIONAL_PART);
+}
+
+/*
+ * @brief
+ * result = round(arg) := integer nearest to arg
+ */
+static inline int spl_fixpt_round(struct spl_fixed31_32 arg)
+{
+ unsigned long long arg_value = arg.value > 0 ? arg.value : -arg.value;
+
+ const long long summand = spl_fixpt_half.value;
+
+ ASSERT(LLONG_MAX - (long long)arg_value >= summand);
+
+ arg_value += summand;
+
+ if (arg.value >= 0)
+ return (int)(arg_value >> FIXED31_32_BITS_PER_FRACTIONAL_PART);
+ else
+ return -(int)(arg_value >> FIXED31_32_BITS_PER_FRACTIONAL_PART);
+}
+
+/*
+ * @brief
+ * result = ceil(arg) := lowest integer greater than or equal to arg
+ */
+static inline int spl_fixpt_ceil(struct spl_fixed31_32 arg)
+{
+ unsigned long long arg_value = arg.value > 0 ? arg.value : -arg.value;
+
+ const long long summand = spl_fixpt_one.value -
+ spl_fixpt_epsilon.value;
+
+ ASSERT(LLONG_MAX - (long long)arg_value >= summand);
+
+ arg_value += summand;
+
+ if (arg.value >= 0)
+ return (int)(arg_value >> FIXED31_32_BITS_PER_FRACTIONAL_PART);
+ else
+ return -(int)(arg_value >> FIXED31_32_BITS_PER_FRACTIONAL_PART);
+}
+
+/* the following two function are used in scaler hw programming to convert fixed
+ * point value to format 2 bits from integer part and 19 bits from fractional
+ * part. The same applies for u0d19, 0 bits from integer part and 19 bits from
+ * fractional
+ */
+
+unsigned int spl_fixpt_u4d19(struct spl_fixed31_32 arg);
+
+unsigned int spl_fixpt_u3d19(struct spl_fixed31_32 arg);
+
+unsigned int spl_fixpt_u2d19(struct spl_fixed31_32 arg);
+
+unsigned int spl_fixpt_u0d19(struct spl_fixed31_32 arg);
+
+unsigned int spl_fixpt_clamp_u0d14(struct spl_fixed31_32 arg);
+
+unsigned int spl_fixpt_clamp_u0d10(struct spl_fixed31_32 arg);
+
+int spl_fixpt_s4d19(struct spl_fixed31_32 arg);
+
+static inline struct spl_fixed31_32 spl_fixpt_truncate(struct spl_fixed31_32 arg, unsigned int frac_bits)
+{
+ bool negative = arg.value < 0;
+
+ if (frac_bits >= FIXED31_32_BITS_PER_FRACTIONAL_PART) {
+ ASSERT(frac_bits == FIXED31_32_BITS_PER_FRACTIONAL_PART);
+ return arg;
+ }
+
+ if (negative)
+ arg.value = -arg.value;
+ arg.value &= (~0ULL) << (FIXED31_32_BITS_PER_FRACTIONAL_PART - frac_bits);
+ if (negative)
+ arg.value = -arg.value;
+ return arg;
+}
+
+struct spl_fixed31_32 spl_fixpt_from_ux_dy(unsigned int value, unsigned int integer_bits, unsigned int fractional_bits);
+struct spl_fixed31_32 spl_fixpt_from_int_dy(unsigned int int_value,
+ unsigned int frac_value,
+ unsigned int integer_bits,
+ unsigned int fractional_bits);
+
+#endif
diff --git a/drivers/gpu/drm/amd/display/dc/spl/spl_os_types.h b/drivers/gpu/drm/amd/display/dc/spl/spl_os_types.h
new file mode 100644
index 000000000000..709706ed4f2c
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/spl/spl_os_types.h
@@ -0,0 +1,55 @@
+/* SPDX-License-Identifier: MIT */
+
+/* Copyright 2024 Advanced Micro Devices, Inc. */
+/* Copyright 2019 Raptor Engineering, LLC */
+
+#ifndef _SPL_OS_TYPES_H_
+#define _SPL_OS_TYPES_H_
+
+#include <linux/slab.h>
+#include <linux/kgdb.h>
+#include <linux/kref.h>
+#include <linux/types.h>
+#include <linux/delay.h>
+#include <linux/mm.h>
+
+/*
+ *
+ * general debug capabilities
+ *
+ */
+#define SPL_BREAK_TO_DEBUGGER() ASSERT(0)
+
+static inline uint64_t spl_div_u64_rem(uint64_t dividend, uint32_t divisor, uint32_t *remainder)
+{
+ return div_u64_rem(dividend, divisor, remainder);
+}
+
+static inline uint64_t spl_div_u64(uint64_t dividend, uint32_t divisor)
+{
+ return div_u64(dividend, divisor);
+}
+
+static inline uint64_t spl_div64_u64(uint64_t dividend, uint64_t divisor)
+{
+ return div64_u64(dividend, divisor);
+}
+
+static inline uint64_t spl_div64_u64_rem(uint64_t dividend, uint64_t divisor, uint64_t *remainder)
+{
+ return div64_u64_rem(dividend, divisor, remainder);
+}
+
+static inline int64_t spl_div64_s64(int64_t dividend, int64_t divisor)
+{
+ return div64_s64(dividend, divisor);
+}
+
+#define spl_swap(a, b) \
+ do { typeof(a) __tmp = (a); (a) = (b); (b) = __tmp; } while (0)
+
+#ifndef spl_min
+#define spl_min(a, b) (((a) < (b)) ? (a):(b))
+#endif
+
+#endif /* _SPL_OS_TYPES_H_ */
diff --git a/drivers/gpu/drm/amd/display/dmub/dmub_srv.h b/drivers/gpu/drm/amd/display/dmub/dmub_srv.h
index 6589bb9aea6b..fe5b6f7a3eb1 100644
--- a/drivers/gpu/drm/amd/display/dmub/dmub_srv.h
+++ b/drivers/gpu/drm/amd/display/dmub/dmub_srv.h
@@ -300,6 +300,7 @@ struct dmub_srv_hw_params {
enum dmub_ips_disable_type disable_ips;
bool disallow_phy_access;
bool disable_sldo_opt;
+ bool enable_non_transparent_setconfig;
};
/**
@@ -330,6 +331,9 @@ struct dmub_diagnostic_data {
uint32_t inbox0_rptr;
uint32_t inbox0_wptr;
uint32_t inbox0_size;
+ uint32_t outbox1_rptr;
+ uint32_t outbox1_wptr;
+ uint32_t outbox1_size;
uint32_t gpint_datain0;
struct dmub_srv_debug timeout_info;
uint8_t is_dmcub_enabled : 1;
diff --git a/drivers/gpu/drm/amd/display/dmub/inc/dmub_cmd.h b/drivers/gpu/drm/amd/display/dmub/inc/dmub_cmd.h
index 5ff0a865705f..ebcf68bfae2b 100644
--- a/drivers/gpu/drm/amd/display/dmub/inc/dmub_cmd.h
+++ b/drivers/gpu/drm/amd/display/dmub/inc/dmub_cmd.h
@@ -111,7 +111,7 @@
#define DMUB_MAX_PHANTOM_PLANES ((DMUB_MAX_PLANES) / 2)
/* Trace buffer offset for entry */
-#define TRACE_BUFFER_ENTRY_OFFSET 16
+#define TRACE_BUFFER_ENTRY_OFFSET 16
/**
* Maximum number of dirty rects supported by FW.
@@ -336,6 +336,10 @@ union dmub_psr_debug_flags {
*/
uint32_t back_to_back_flip : 1;
+ /**
+ * Enable visual confirm for IPS
+ */
+ uint32_t enable_ips_visual_confirm : 1;
} bitfields;
/**
@@ -678,7 +682,7 @@ union dmub_fw_boot_options {
uint32_t gpint_scratch8: 1; /* 1 if GPINT is in scratch8*/
uint32_t usb4_cm_version: 1; /**< 1 CM support */
uint32_t dpia_hpd_int_enable_supported: 1; /* 1 if dpia hpd int enable supported */
- uint32_t reserved0: 1;
+ uint32_t enable_non_transparent_setconfig: 1; /* 1 if dpia use conventional dp lt flow*/
uint32_t disable_clk_ds: 1; /* 1 if disallow dispclk_ds and dppclk_ds*/
uint32_t disable_timeout_recovery : 1; /* 1 if timeout recovery should be disabled */
uint32_t ips_pg_disable: 1; /* 1 to disable ONO domains power gating*/
@@ -1304,6 +1308,7 @@ enum dmub_cmd_dpia_type {
DMUB_CMD__DPIA_DIG1_DPIA_CONTROL = 0,
DMUB_CMD__DPIA_SET_CONFIG_ACCESS = 1,
DMUB_CMD__DPIA_MST_ALLOC_SLOTS = 2,
+ DMUB_CMD__DPIA_SET_TPS_NOTIFICATION = 3,
};
/* DMUB_OUT_CMD__DPIA_NOTIFICATION command types. */
@@ -1875,7 +1880,12 @@ enum dmub_cmd_idle_opt_type {
/**
* DCN hardware notify idle.
*/
- DMUB_CMD__IDLE_OPT_DCN_NOTIFY_IDLE = 2
+ DMUB_CMD__IDLE_OPT_DCN_NOTIFY_IDLE = 2,
+
+ /**
+ * DCN hardware notify power state.
+ */
+ DMUB_CMD__IDLE_OPT_SET_DC_POWER_STATE = 3,
};
/**
@@ -1903,6 +1913,33 @@ struct dmub_rb_cmd_idle_opt_dcn_notify_idle {
};
/**
+ * enum dmub_idle_opt_dc_power_state - DC power states.
+ */
+enum dmub_idle_opt_dc_power_state {
+ DMUB_IDLE_OPT_DC_POWER_STATE_UNKNOWN = 0,
+ DMUB_IDLE_OPT_DC_POWER_STATE_D0 = 1,
+ DMUB_IDLE_OPT_DC_POWER_STATE_D1 = 2,
+ DMUB_IDLE_OPT_DC_POWER_STATE_D2 = 4,
+ DMUB_IDLE_OPT_DC_POWER_STATE_D3 = 8,
+};
+
+/**
+ * struct dmub_idle_opt_set_dc_power_state_data - Data passed to FW in a DMUB_CMD__IDLE_OPT_SET_DC_POWER_STATE command.
+ */
+struct dmub_idle_opt_set_dc_power_state_data {
+ uint8_t power_state; /**< power state */
+ uint8_t pad[3]; /**< padding */
+};
+
+/**
+ * struct dmub_rb_cmd_idle_opt_set_dc_power_state - Data passed to FW in a DMUB_CMD__IDLE_OPT_SET_DC_POWER_STATE command.
+ */
+struct dmub_rb_cmd_idle_opt_set_dc_power_state {
+ struct dmub_cmd_header header; /**< header */
+ struct dmub_idle_opt_set_dc_power_state_data data;
+};
+
+/**
* struct dmub_clocks - Clock update notification.
*/
struct dmub_clocks {
@@ -2103,6 +2140,24 @@ struct dmub_rb_cmd_set_mst_alloc_slots {
};
/**
+ * Data passed from driver to FW in a DMUB_CMD__SET_TPS_NOTIFICATION command.
+ */
+struct dmub_cmd_tps_notification_data {
+ uint8_t instance; /* DPIA instance */
+ uint8_t tps; /* requested training pattern */
+ uint8_t reserved1;
+ uint8_t reserved2;
+};
+
+/**
+ * DMUB command structure for SET_TPS_NOTIFICATION command.
+ */
+struct dmub_rb_cmd_set_tps_notification {
+ struct dmub_cmd_header header; /* header */
+ struct dmub_cmd_tps_notification_data tps_notification; /* set tps_notification data */
+};
+
+/**
* DMUB command structure for DPIA HPD int enable control.
*/
struct dmub_rb_cmd_dpia_hpd_int_enable {
@@ -3024,14 +3079,6 @@ struct dmub_cmd_update_dirty_rect_data {
* Currently the support is only for 0 or 1
*/
uint8_t panel_inst;
- /**
- * 16-bit value dicated by driver that indicates the coasting vtotal high byte part.
- */
- uint16_t coasting_vtotal_high;
- /**
- * Explicit padding to 4 byte boundary.
- */
- uint8_t pad[2];
};
/**
@@ -5277,6 +5324,10 @@ union dmub_rb_cmd {
*/
struct dmub_rb_cmd_set_mst_alloc_slots set_mst_alloc_slots;
/**
+ * Definition of a DMUB_CMD__DPIA_SET_TPS_NOTIFICATION command.
+ */
+ struct dmub_rb_cmd_set_tps_notification set_tps_notification;
+ /**
* Definition of a DMUB_CMD__EDID_CEA command.
*/
struct dmub_rb_cmd_edid_cea edid_cea;
@@ -5302,6 +5353,10 @@ union dmub_rb_cmd {
* Definition of a DMUB_CMD__IDLE_OPT_DCN_NOTIFY_IDLE command.
*/
struct dmub_rb_cmd_idle_opt_dcn_notify_idle idle_opt_notify_idle;
+ /**
+ * Definition of a DMUB_CMD__IDLE_OPT_SET_DC_POWER_STATE command.
+ */
+ struct dmub_rb_cmd_idle_opt_set_dc_power_state idle_opt_set_dc_power_state;
/*
* Definition of a DMUB_CMD__REPLAY_COPY_SETTINGS command.
*/
diff --git a/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn31.c b/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn31.c
index 662c34e9495c..d9f31b191c69 100644
--- a/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn31.c
+++ b/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn31.c
@@ -449,6 +449,10 @@ void dmub_dcn31_get_diagnostic_data(struct dmub_srv *dmub, struct dmub_diagnosti
diag_data->inbox0_wptr = REG_READ(DMCUB_INBOX0_WPTR);
diag_data->inbox0_size = REG_READ(DMCUB_INBOX0_SIZE);
+ diag_data->outbox1_rptr = REG_READ(DMCUB_OUTBOX1_RPTR);
+ diag_data->outbox1_wptr = REG_READ(DMCUB_OUTBOX1_WPTR);
+ diag_data->outbox1_size = REG_READ(DMCUB_OUTBOX1_SIZE);
+
REG_GET(DMCUB_CNTL, DMCUB_ENABLE, &is_dmub_enabled);
diag_data->is_dmcub_enabled = is_dmub_enabled;
diff --git a/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn32.c b/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn32.c
index e1da270502cc..9600b7f858b0 100644
--- a/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn32.c
+++ b/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn32.c
@@ -459,6 +459,10 @@ void dmub_dcn32_get_diagnostic_data(struct dmub_srv *dmub, struct dmub_diagnosti
diag_data->inbox0_wptr = REG_READ(DMCUB_INBOX0_WPTR);
diag_data->inbox0_size = REG_READ(DMCUB_INBOX0_SIZE);
+ diag_data->outbox1_rptr = REG_READ(DMCUB_OUTBOX1_RPTR);
+ diag_data->outbox1_wptr = REG_READ(DMCUB_OUTBOX1_WPTR);
+ diag_data->outbox1_size = REG_READ(DMCUB_OUTBOX1_SIZE);
+
REG_GET(DMCUB_CNTL, DMCUB_ENABLE, &is_dmub_enabled);
diag_data->is_dmcub_enabled = is_dmub_enabled;
diff --git a/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn35.c b/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn35.c
index 916ed022e96b..2ccad79053c5 100644
--- a/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn35.c
+++ b/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn35.c
@@ -425,6 +425,7 @@ void dmub_dcn35_enable_dmub_boot_options(struct dmub_srv *dmub, const struct dmu
boot_options.bits.ips_disable = params->disable_ips;
boot_options.bits.ips_sequential_ono = params->ips_sequential_ono;
boot_options.bits.disable_sldo_opt = params->disable_sldo_opt;
+ boot_options.bits.enable_non_transparent_setconfig = params->enable_non_transparent_setconfig;
REG_WRITE(DMCUB_SCRATCH14, boot_options.all);
}
@@ -502,6 +503,10 @@ void dmub_dcn35_get_diagnostic_data(struct dmub_srv *dmub, struct dmub_diagnosti
diag_data->inbox0_wptr = REG_READ(DMCUB_INBOX0_WPTR);
diag_data->inbox0_size = REG_READ(DMCUB_INBOX0_SIZE);
+ diag_data->outbox1_rptr = REG_READ(DMCUB_OUTBOX1_RPTR);
+ diag_data->outbox1_wptr = REG_READ(DMCUB_OUTBOX1_WPTR);
+ diag_data->outbox1_size = REG_READ(DMCUB_OUTBOX1_SIZE);
+
REG_GET(DMCUB_CNTL, DMCUB_ENABLE, &is_dmub_enabled);
diag_data->is_dmcub_enabled = is_dmub_enabled;
diff --git a/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn401.c b/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn401.c
index cf139e9cc20e..39a8cb6d7523 100644
--- a/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn401.c
+++ b/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn401.c
@@ -444,6 +444,10 @@ void dmub_dcn401_get_diagnostic_data(struct dmub_srv *dmub, struct dmub_diagnost
diag_data->inbox0_wptr = REG_READ(DMCUB_INBOX0_WPTR);
diag_data->inbox0_size = REG_READ(DMCUB_INBOX0_SIZE);
+ diag_data->outbox1_rptr = REG_READ(DMCUB_OUTBOX1_RPTR);
+ diag_data->outbox1_wptr = REG_READ(DMCUB_OUTBOX1_WPTR);
+ diag_data->outbox1_size = REG_READ(DMCUB_OUTBOX1_SIZE);
+
REG_GET(DMCUB_CNTL, DMCUB_ENABLE, &is_dmub_enabled);
diag_data->is_dmcub_enabled = is_dmub_enabled;
diff --git a/drivers/gpu/drm/amd/display/include/fixed31_32.h b/drivers/gpu/drm/amd/display/include/fixed31_32.h
index d4cf7ead1d87..990fa1f19c22 100644
--- a/drivers/gpu/drm/amd/display/include/fixed31_32.h
+++ b/drivers/gpu/drm/amd/display/include/fixed31_32.h
@@ -531,4 +531,10 @@ static inline struct fixed31_32 dc_fixpt_truncate(struct fixed31_32 arg, unsigne
return arg;
}
+struct fixed31_32 dc_fixpt_from_ux_dy(unsigned int value, unsigned int integer_bits, unsigned int fractional_bits);
+struct fixed31_32 dc_fixpt_from_int_dy(unsigned int int_value,
+ unsigned int frac_value,
+ unsigned int integer_bits,
+ unsigned int fractional_bits);
+
#endif
diff --git a/drivers/gpu/drm/amd/display/include/logger_types.h b/drivers/gpu/drm/amd/display/include/logger_types.h
index 83479951732a..a48d564d1660 100644
--- a/drivers/gpu/drm/amd/display/include/logger_types.h
+++ b/drivers/gpu/drm/amd/display/include/logger_types.h
@@ -61,6 +61,7 @@
#define DC_LOG_ALL_TF_CHANNELS(...) pr_debug("[GAMMA]:"__VA_ARGS__)
#define DC_LOG_DSC(...) drm_dbg_dp((DC_LOGGER)->dev, __VA_ARGS__)
#define DC_LOG_SMU(...) pr_debug("[SMU_MSG]:"__VA_ARGS__)
+#define DC_LOG_MALL(...) pr_debug("[MALL]:"__VA_ARGS__)
#define DC_LOG_DWB(...) drm_dbg((DC_LOGGER)->dev, __VA_ARGS__)
#define DC_LOG_DP2(...) drm_dbg_dp((DC_LOGGER)->dev, __VA_ARGS__)
#define DC_LOG_AUTO_DPM_TEST(...) pr_debug("[AutoDPMTest]: "__VA_ARGS__)
diff --git a/drivers/gpu/drm/amd/display/modules/freesync/freesync.c b/drivers/gpu/drm/amd/display/modules/freesync/freesync.c
index a40e6590215a..bbd259cea4f4 100644
--- a/drivers/gpu/drm/amd/display/modules/freesync/freesync.c
+++ b/drivers/gpu/drm/amd/display/modules/freesync/freesync.c
@@ -134,7 +134,7 @@ unsigned int mod_freesync_calc_v_total_from_refresh(
v_total = div64_u64(div64_u64(((unsigned long long)(
frame_duration_in_ns) * (stream->timing.pix_clk_100hz / 10)),
- stream->timing.h_total), 1000000);
+ stream->timing.h_total) + 500000, 1000000);
/* v_total cannot be less than nominal */
if (v_total < stream->timing.v_total) {
diff --git a/drivers/gpu/drm/amd/display/modules/hdcp/hdcp1_execution.c b/drivers/gpu/drm/amd/display/modules/hdcp/hdcp1_execution.c
index 1e495e884484..8bc377560787 100644
--- a/drivers/gpu/drm/amd/display/modules/hdcp/hdcp1_execution.c
+++ b/drivers/gpu/drm/amd/display/modules/hdcp/hdcp1_execution.c
@@ -432,18 +432,18 @@ static enum mod_hdcp_status authenticated_dp(struct mod_hdcp *hdcp,
goto out;
}
- if (!mod_hdcp_execute_and_set(mod_hdcp_read_bstatus,
+ mod_hdcp_execute_and_set(mod_hdcp_read_bstatus,
&input->bstatus_read, &status,
- hdcp, "bstatus_read"))
- goto out;
- if (!mod_hdcp_execute_and_set(check_link_integrity_dp,
+ hdcp, "bstatus_read");
+
+ mod_hdcp_execute_and_set(check_link_integrity_dp,
&input->link_integrity_check, &status,
- hdcp, "link_integrity_check"))
- goto out;
- if (!mod_hdcp_execute_and_set(check_no_reauthentication_request_dp,
+ hdcp, "link_integrity_check");
+
+ mod_hdcp_execute_and_set(check_no_reauthentication_request_dp,
&input->reauth_request_check, &status,
- hdcp, "reauth_request_check"))
- goto out;
+ hdcp, "reauth_request_check");
+
out:
return status;
}
diff --git a/drivers/gpu/drm/amd/include/amd_shared.h b/drivers/gpu/drm/amd/include/amd_shared.h
index f5b725f10a7c..3f91926a50e9 100644
--- a/drivers/gpu/drm/amd/include/amd_shared.h
+++ b/drivers/gpu/drm/amd/include/amd_shared.h
@@ -61,7 +61,7 @@ enum amd_apu_flags {
* acquires the list of IP blocks for the GPU in use on initialization.
* It can then operate on this list to perform standard driver operations
* such as: init, fini, suspend, resume, etc.
-*
+*
*
* IP block implementations are named using the following convention:
* <functionality>_v<version> (E.g.: gfx_v6_0).
@@ -85,7 +85,7 @@ enum amd_apu_flags {
* @AMD_IP_BLOCK_TYPE_MES: Micro-Engine Scheduler
* @AMD_IP_BLOCK_TYPE_JPEG: JPEG Engine
* @AMD_IP_BLOCK_TYPE_VPE: Video Processing Engine
-* @AMD_IP_BLOCK_TYPE_UMSCH_MM: User Mode Schduler for Multimedia
+* @AMD_IP_BLOCK_TYPE_UMSCH_MM: User Mode Scheduler for Multimedia
* @AMD_IP_BLOCK_TYPE_ISP: Image Signal Processor
* @AMD_IP_BLOCK_TYPE_NUM: Total number of IP block types
*/
@@ -251,19 +251,92 @@ enum DC_FEATURE_MASK {
DC_REPLAY_MASK = (1 << 9), //0x200, disabled by default for dcn < 3.1.4
};
+/**
+ * enum DC_DEBUG_MASK - Bits that are useful for debugging the Display Core IP
+ */
enum DC_DEBUG_MASK {
+ /**
+ * @DC_DISABLE_PIPE_SPLIT: If set, disable pipe-splitting
+ */
DC_DISABLE_PIPE_SPLIT = 0x1,
+
+ /**
+ * @DC_DISABLE_STUTTER: If set, disable memory stutter mode
+ */
DC_DISABLE_STUTTER = 0x2,
+
+ /**
+ * @DC_DISABLE_DSC: If set, disable display stream compression
+ */
DC_DISABLE_DSC = 0x4,
+
+ /**
+ * @DC_DISABLE_CLOCK_GATING: If set, disable clock gating optimizations
+ */
DC_DISABLE_CLOCK_GATING = 0x8,
+
+ /**
+ * @DC_DISABLE_PSR: If set, disable Panel self refresh v1 and PSR-SU
+ */
DC_DISABLE_PSR = 0x10,
+
+ /**
+ * @DC_FORCE_SUBVP_MCLK_SWITCH: If set, force mclk switch in subvp, even
+ * if mclk switch in vblank is possible
+ */
DC_FORCE_SUBVP_MCLK_SWITCH = 0x20,
+
+ /**
+ * @DC_DISABLE_MPO: If set, disable multi-plane offloading
+ */
DC_DISABLE_MPO = 0x40,
+
+ /**
+ * @DC_ENABLE_DPIA_TRACE: If set, enable trace logging for DPIA
+ */
DC_ENABLE_DPIA_TRACE = 0x80,
+
+ /**
+ * @DC_ENABLE_DML2: If set, force usage of DML2, even if the DCN version
+ * does not default to it.
+ */
DC_ENABLE_DML2 = 0x100,
+
+ /**
+ * @DC_DISABLE_PSR_SU: If set, disable PSR SU
+ */
DC_DISABLE_PSR_SU = 0x200,
+
+ /**
+ * @DC_DISABLE_REPLAY: If set, disable Panel Replay
+ */
DC_DISABLE_REPLAY = 0x400,
+
+ /**
+ * @DC_DISABLE_IPS: If set, disable all Idle Power States, all the time.
+ * If more than one IPS debug bit is set, the lowest bit takes
+ * precedence. For example, if DC_FORCE_IPS_ENABLE and
+ * DC_DISABLE_IPS_DYNAMIC are set, then DC_DISABLE_IPS_DYNAMIC takes
+ * precedence.
+ */
DC_DISABLE_IPS = 0x800,
+
+ /**
+ * @DC_DISABLE_IPS_DYNAMIC: If set, disable all IPS, all the time,
+ * *except* when driver goes into suspend.
+ */
+ DC_DISABLE_IPS_DYNAMIC = 0x1000,
+
+ /**
+ * @DC_DISABLE_IPS2_DYNAMIC: If set, disable IPS2 (IPS1 allowed) if
+ * there is an enabled display. Otherwise, enable all IPS.
+ */
+ DC_DISABLE_IPS2_DYNAMIC = 0x2000,
+
+ /**
+ * @DC_FORCE_IPS_ENABLE: If set, force enable all IPS, all the time.
+ */
+ DC_FORCE_IPS_ENABLE = 0x4000,
};
enum amd_dpm_forced_level;
diff --git a/drivers/gpu/drm/amd/include/asic_reg/uvd/uvd_4_0_sh_mask.h b/drivers/gpu/drm/amd/include/asic_reg/uvd/uvd_4_0_sh_mask.h
index 8ee3149df5b7..2ef1273e65ab 100644
--- a/drivers/gpu/drm/amd/include/asic_reg/uvd/uvd_4_0_sh_mask.h
+++ b/drivers/gpu/drm/amd/include/asic_reg/uvd/uvd_4_0_sh_mask.h
@@ -340,8 +340,6 @@
#define UVD_LMI_CTRL__REQ_MODE_MASK 0x00000200L
#define UVD_LMI_CTRL__REQ_MODE__SHIFT 0x00000009
#define UVD_LMI_CTRL__RFU_MASK 0xf8000000L
-#define UVD_LMI_CTRL__RFU_MASK 0xfc000000L
-#define UVD_LMI_CTRL__RFU__SHIFT 0x0000001a
#define UVD_LMI_CTRL__RFU__SHIFT 0x0000001b
#define UVD_LMI_CTRL__VCPU_DATA_COHERENCY_EN_MASK 0x00200000L
#define UVD_LMI_CTRL__VCPU_DATA_COHERENCY_EN__SHIFT 0x00000015
diff --git a/drivers/gpu/drm/amd/include/kgd_kfd_interface.h b/drivers/gpu/drm/amd/include/kgd_kfd_interface.h
index 6d094cf3587d..e3e635a31b8a 100644
--- a/drivers/gpu/drm/amd/include/kgd_kfd_interface.h
+++ b/drivers/gpu/drm/amd/include/kgd_kfd_interface.h
@@ -71,6 +71,11 @@ enum kgd_memory_pool {
KGD_POOL_FRAMEBUFFER = 3,
};
+struct kfd_cu_occupancy {
+ u32 wave_cnt;
+ u32 doorbell_off;
+};
+
/**
* enum kfd_sched_policy
*
@@ -313,11 +318,18 @@ struct kfd2kgd_calls {
uint32_t grace_period,
uint32_t *reg_offset,
uint32_t *reg_data);
- void (*get_cu_occupancy)(struct amdgpu_device *adev, int pasid,
- int *wave_cnt, int *max_waves_per_cu, uint32_t inst);
+ void (*get_cu_occupancy)(struct amdgpu_device *adev,
+ struct kfd_cu_occupancy *cu_occupancy,
+ int *max_waves_per_cu, uint32_t inst);
void (*program_trap_handler_settings)(struct amdgpu_device *adev,
uint32_t vmid, uint64_t tba_addr, uint64_t tma_addr,
uint32_t inst);
+ uint64_t (*hqd_get_pq_addr)(struct amdgpu_device *adev,
+ uint32_t pipe_id, uint32_t queue_id,
+ uint32_t inst);
+ uint64_t (*hqd_reset)(struct amdgpu_device *adev,
+ uint32_t pipe_id, uint32_t queue_id,
+ uint32_t inst, unsigned int utimeout);
};
#endif /* KGD_KFD_INTERFACE_H_INCLUDED */
diff --git a/drivers/gpu/drm/amd/include/kgd_pp_interface.h b/drivers/gpu/drm/amd/include/kgd_pp_interface.h
index 4b20e2274313..19a48d98830a 100644
--- a/drivers/gpu/drm/amd/include/kgd_pp_interface.h
+++ b/drivers/gpu/drm/amd/include/kgd_pp_interface.h
@@ -218,6 +218,7 @@ enum pp_mp1_state {
PP_MP1_STATE_SHUTDOWN,
PP_MP1_STATE_UNLOAD,
PP_MP1_STATE_RESET,
+ PP_MP1_STATE_FLR,
};
enum pp_df_cstate {
diff --git a/drivers/gpu/drm/amd/pm/amdgpu_dpm.c b/drivers/gpu/drm/amd/pm/amdgpu_dpm.c
index 8b7d6ed7e2ed..9dc82f4d7c93 100644
--- a/drivers/gpu/drm/amd/pm/amdgpu_dpm.c
+++ b/drivers/gpu/drm/amd/pm/amdgpu_dpm.c
@@ -168,7 +168,11 @@ int amdgpu_dpm_set_mp1_state(struct amdgpu_device *adev,
int ret = 0;
const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
- if (pp_funcs && pp_funcs->set_mp1_state) {
+ if (mp1_state == PP_MP1_STATE_FLR) {
+ /* VF lost access to SMU */
+ if (amdgpu_sriov_vf(adev))
+ adev->pm.dpm_enabled = false;
+ } else if (pp_funcs && pp_funcs->set_mp1_state) {
mutex_lock(&adev->pm.mutex);
ret = pp_funcs->set_mp1_state(
diff --git a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/processpptables.c b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/processpptables.c
index ca1c7ae8d146..f06b29e33ba4 100644
--- a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/processpptables.c
+++ b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/processpptables.c
@@ -1183,6 +1183,8 @@ static int init_overdrive_limits(struct pp_hwmgr *hwmgr,
fw_info = smu_atom_get_data_table(hwmgr->adev,
GetIndexIntoMasterTable(DATA, FirmwareInfo),
&size, &frev, &crev);
+ PP_ASSERT_WITH_CODE(fw_info != NULL,
+ "Missing firmware info!", return -EINVAL);
if ((fw_info->ucTableFormatRevision == 1)
&& (le16_to_cpu(fw_info->usStructureSize) >= sizeof(ATOM_FIRMWARE_INFO_V1_4)))
diff --git a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega10_hwmgr.c b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega10_hwmgr.c
index 6e717ddbb029..9ace863792d4 100644
--- a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega10_hwmgr.c
+++ b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega10_hwmgr.c
@@ -2934,9 +2934,7 @@ static int vega10_stop_dpm(struct pp_hwmgr *hwmgr, uint32_t bitmap)
}
}
- vega10_enable_smc_features(hwmgr, false, feature_mask);
-
- return 0;
+ return vega10_enable_smc_features(hwmgr, false, feature_mask);
}
/**
diff --git a/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c b/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c
index 2cf951184561..bb3bc68dfc39 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c
+++ b/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c
@@ -1257,7 +1257,6 @@ static int smu_sw_init(void *handle)
atomic_set(&smu->smu_power.power_gate.vpe_gated, 1);
atomic_set(&smu->smu_power.power_gate.umsch_mm_gated, 1);
- smu->workload_mask = 1 << smu->workload_prority[PP_SMC_POWER_PROFILE_BOOTUP_DEFAULT];
smu->workload_prority[PP_SMC_POWER_PROFILE_BOOTUP_DEFAULT] = 0;
smu->workload_prority[PP_SMC_POWER_PROFILE_FULLSCREEN3D] = 1;
smu->workload_prority[PP_SMC_POWER_PROFILE_POWERSAVING] = 2;
@@ -1265,6 +1264,7 @@ static int smu_sw_init(void *handle)
smu->workload_prority[PP_SMC_POWER_PROFILE_VR] = 4;
smu->workload_prority[PP_SMC_POWER_PROFILE_COMPUTE] = 5;
smu->workload_prority[PP_SMC_POWER_PROFILE_CUSTOM] = 6;
+ smu->workload_mask = 1 << smu->workload_prority[PP_SMC_POWER_PROFILE_BOOTUP_DEFAULT];
smu->workload_setting[0] = PP_SMC_POWER_PROFILE_BOOTUP_DEFAULT;
smu->workload_setting[1] = PP_SMC_POWER_PROFILE_FULLSCREEN3D;
diff --git a/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu_v13_0_6_pmfw.h b/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu_v13_0_6_pmfw.h
index 0b3c2f54a343..822c6425d90e 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu_v13_0_6_pmfw.h
+++ b/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu_v13_0_6_pmfw.h
@@ -123,7 +123,7 @@ typedef enum {
VOLTAGE_GUARDBAND_COUNT
} GFX_GUARDBAND_e;
-#define SMU_METRICS_TABLE_VERSION 0xC
+#define SMU_METRICS_TABLE_VERSION 0xD
typedef struct __attribute__((packed, aligned(4))) {
uint32_t AccumulationCounter;
@@ -227,6 +227,10 @@ typedef struct __attribute__((packed, aligned(4))) {
// PCIE LINK Speed and width
uint32_t PCIeLinkSpeed;
uint32_t PCIeLinkWidth;
+
+ // PER XCD ACTIVITY
+ uint32_t GfxBusy[8];
+ uint64_t GfxBusyAcc[8];
} MetricsTableX_t;
typedef struct __attribute__((packed, aligned(4))) {
diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu11/navi10_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu11/navi10_ppt.c
index 076620fa3ef5..16af1a329621 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/smu11/navi10_ppt.c
+++ b/drivers/gpu/drm/amd/pm/swsmu/smu11/navi10_ppt.c
@@ -1989,7 +1989,7 @@ static int navi10_get_power_profile_mode(struct smu_context *smu, char *buf)
size += sysfs_emit_at(buf, size, "%19s %d(%13s) %7d %7d %7d %7d %7d %7d %7d %7d %7d\n",
" ",
2,
- "MEMLK",
+ "MEMCLK",
activity_monitor.Mem_FPS,
activity_monitor.Mem_MinFreqStep,
activity_monitor.Mem_MinActiveFreqType,
@@ -2051,7 +2051,7 @@ static int navi10_set_power_profile_mode(struct smu_context *smu, long *input, u
activity_monitor.Soc_PD_Data_error_coeff = input[8];
activity_monitor.Soc_PD_Data_error_rate_coeff = input[9];
break;
- case 2: /* Memlk */
+ case 2: /* Memclk */
activity_monitor.Mem_FPS = input[1];
activity_monitor.Mem_MinFreqStep = input[2];
activity_monitor.Mem_MinActiveFreqType = input[3];
diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu11/sienna_cichlid_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu11/sienna_cichlid_ppt.c
index 0d3e1a121b67..9c3c48297cba 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/smu11/sienna_cichlid_ppt.c
+++ b/drivers/gpu/drm/amd/pm/swsmu/smu11/sienna_cichlid_ppt.c
@@ -1691,7 +1691,7 @@ static int sienna_cichlid_get_power_profile_mode(struct smu_context *smu, char *
size += sysfs_emit_at(buf, size, "%19s %d(%13s) %7d %7d %7d %7d %7d %7d %7d %7d %7d\n",
" ",
2,
- "MEMLK",
+ "MEMCLK",
activity_monitor->Mem_FPS,
activity_monitor->Mem_MinFreqStep,
activity_monitor->Mem_MinActiveFreqType,
@@ -1756,7 +1756,7 @@ static int sienna_cichlid_set_power_profile_mode(struct smu_context *smu, long *
activity_monitor->Fclk_PD_Data_error_coeff = input[8];
activity_monitor->Fclk_PD_Data_error_rate_coeff = input[9];
break;
- case 2: /* Memlk */
+ case 2: /* Memclk */
activity_monitor->Mem_FPS = input[1];
activity_monitor->Mem_MinFreqStep = input[2];
activity_monitor->Mem_MinActiveFreqType = input[3];
diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_0_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_0_ppt.c
index a887ab945dfa..1d024b122b0c 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_0_ppt.c
+++ b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_0_ppt.c
@@ -2569,10 +2569,14 @@ static int smu_v13_0_0_set_power_profile_mode(struct smu_context *smu,
}
}
- return smu_cmn_send_smc_msg_with_param(smu,
+ ret = smu_cmn_send_smc_msg_with_param(smu,
SMU_MSG_SetWorkloadMask,
workload_mask,
NULL);
+ if (!ret)
+ smu->workload_mask = workload_mask;
+
+ return ret;
}
static bool smu_v13_0_0_is_mode1_reset_supported(struct smu_context *smu)
diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_6_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_6_ppt.c
index 9974c9f8135e..55ed6247eb61 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_6_ppt.c
+++ b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_6_ppt.c
@@ -2107,8 +2107,12 @@ static int smu_v13_0_6_i2c_xfer(struct i2c_adapter *i2c_adap,
}
mutex_lock(&adev->pm.mutex);
r = smu_v13_0_6_request_i2c_xfer(smu, req);
- if (r)
- goto fail;
+ if (r) {
+ /* Retry once, in case of an i2c collision */
+ r = smu_v13_0_6_request_i2c_xfer(smu, req);
+ if (r)
+ goto fail;
+ }
for (c = i = 0; i < num_msgs; i++) {
if (!(msg[i].flags & I2C_M_RD)) {
diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_7_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_7_ppt.c
index 7bc95c404377..b891a5e0a396 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_7_ppt.c
+++ b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_7_ppt.c
@@ -2501,8 +2501,11 @@ static int smu_v13_0_7_set_power_profile_mode(struct smu_context *smu, long *inp
return -EINVAL;
ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_SetWorkloadMask,
1 << workload_type, NULL);
+
if (ret)
dev_err(smu->adev->dev, "[%s] Failed to set work load mask!", __func__);
+ else
+ smu->workload_mask = (1 << workload_type);
return ret;
}
diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu14/smu_v14_0.c b/drivers/gpu/drm/amd/pm/swsmu/smu14/smu_v14_0.c
index 09973615f210..865e916fc425 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/smu14/smu_v14_0.c
+++ b/drivers/gpu/drm/amd/pm/swsmu/smu14/smu_v14_0.c
@@ -452,17 +452,26 @@ int smu_v14_0_init_smc_tables(struct smu_context *smu)
ret = -ENOMEM;
goto err3_out;
}
+
+ smu_table->user_overdrive_table =
+ kzalloc(tables[SMU_TABLE_OVERDRIVE].size, GFP_KERNEL);
+ if (!smu_table->user_overdrive_table) {
+ ret = -ENOMEM;
+ goto err4_out;
+ }
}
smu_table->combo_pptable =
kzalloc(tables[SMU_TABLE_COMBO_PPTABLE].size, GFP_KERNEL);
if (!smu_table->combo_pptable) {
ret = -ENOMEM;
- goto err4_out;
+ goto err5_out;
}
return 0;
+err5_out:
+ kfree(smu_table->user_overdrive_table);
err4_out:
kfree(smu_table->boot_overdrive_table);
err3_out:
diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu14/smu_v14_0_2_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu14/smu_v14_0_2_ppt.c
index 2b45adecbed2..5899d01fa73d 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/smu14/smu_v14_0_2_ppt.c
+++ b/drivers/gpu/drm/amd/pm/swsmu/smu14/smu_v14_0_2_ppt.c
@@ -68,6 +68,18 @@
#define DEBUGSMC_MSG_Mode1Reset 2
#define LINK_SPEED_MAX 3
+#define PP_OD_FEATURE_GFXCLK_FMIN 0
+#define PP_OD_FEATURE_GFXCLK_FMAX 1
+#define PP_OD_FEATURE_UCLK_FMIN 2
+#define PP_OD_FEATURE_UCLK_FMAX 3
+#define PP_OD_FEATURE_GFX_VF_CURVE 4
+#define PP_OD_FEATURE_FAN_CURVE_TEMP 5
+#define PP_OD_FEATURE_FAN_CURVE_PWM 6
+#define PP_OD_FEATURE_FAN_ACOUSTIC_LIMIT 7
+#define PP_OD_FEATURE_FAN_ACOUSTIC_TARGET 8
+#define PP_OD_FEATURE_FAN_TARGET_TEMPERATURE 9
+#define PP_OD_FEATURE_FAN_MINIMUM_PWM 10
+
static struct cmn2asic_msg_mapping smu_v14_0_2_message_map[SMU_MSG_MAX_COUNT] = {
MSG_MAP(TestMessage, PPSMC_MSG_TestMessage, 1),
MSG_MAP(GetSmuVersion, PPSMC_MSG_GetSmuVersion, 1),
@@ -212,6 +224,7 @@ static struct cmn2asic_mapping smu_v14_0_2_table_map[SMU_TABLE_COUNT] = {
[SMU_TABLE_COMBO_PPTABLE] = {1, TABLE_COMBO_PPTABLE},
TAB_MAP(I2C_COMMANDS),
TAB_MAP(ECCINFO),
+ TAB_MAP(OVERDRIVE),
};
static struct cmn2asic_mapping smu_v14_0_2_pwr_src_map[SMU_POWER_SOURCE_COUNT] = {
@@ -1040,16 +1053,97 @@ static int smu_v14_0_2_get_current_clk_freq_by_table(struct smu_context *smu,
value);
}
+static bool smu_v14_0_2_is_od_feature_supported(struct smu_context *smu,
+ int od_feature_bit)
+{
+ PPTable_t *pptable = smu->smu_table.driver_pptable;
+ const OverDriveLimits_t * const overdrive_upperlimits =
+ &pptable->SkuTable.OverDriveLimitsBasicMax;
+
+ return overdrive_upperlimits->FeatureCtrlMask & (1U << od_feature_bit);
+}
+
+static void smu_v14_0_2_get_od_setting_limits(struct smu_context *smu,
+ int od_feature_bit,
+ int32_t *min,
+ int32_t *max)
+{
+ PPTable_t *pptable = smu->smu_table.driver_pptable;
+ const OverDriveLimits_t * const overdrive_upperlimits =
+ &pptable->SkuTable.OverDriveLimitsBasicMax;
+ const OverDriveLimits_t * const overdrive_lowerlimits =
+ &pptable->SkuTable.OverDriveLimitsBasicMin;
+ int32_t od_min_setting, od_max_setting;
+
+ switch (od_feature_bit) {
+ case PP_OD_FEATURE_GFXCLK_FMIN:
+ od_min_setting = overdrive_lowerlimits->GfxclkFmin;
+ od_max_setting = overdrive_upperlimits->GfxclkFmin;
+ break;
+ case PP_OD_FEATURE_GFXCLK_FMAX:
+ od_min_setting = overdrive_lowerlimits->GfxclkFmax;
+ od_max_setting = overdrive_upperlimits->GfxclkFmax;
+ break;
+ case PP_OD_FEATURE_UCLK_FMIN:
+ od_min_setting = overdrive_lowerlimits->UclkFmin;
+ od_max_setting = overdrive_upperlimits->UclkFmin;
+ break;
+ case PP_OD_FEATURE_UCLK_FMAX:
+ od_min_setting = overdrive_lowerlimits->UclkFmax;
+ od_max_setting = overdrive_upperlimits->UclkFmax;
+ break;
+ case PP_OD_FEATURE_GFX_VF_CURVE:
+ od_min_setting = overdrive_lowerlimits->VoltageOffsetPerZoneBoundary[0];
+ od_max_setting = overdrive_upperlimits->VoltageOffsetPerZoneBoundary[0];
+ break;
+ case PP_OD_FEATURE_FAN_CURVE_TEMP:
+ od_min_setting = overdrive_lowerlimits->FanLinearTempPoints[0];
+ od_max_setting = overdrive_upperlimits->FanLinearTempPoints[0];
+ break;
+ case PP_OD_FEATURE_FAN_CURVE_PWM:
+ od_min_setting = overdrive_lowerlimits->FanLinearPwmPoints[0];
+ od_max_setting = overdrive_upperlimits->FanLinearPwmPoints[0];
+ break;
+ case PP_OD_FEATURE_FAN_ACOUSTIC_LIMIT:
+ od_min_setting = overdrive_lowerlimits->AcousticLimitRpmThreshold;
+ od_max_setting = overdrive_upperlimits->AcousticLimitRpmThreshold;
+ break;
+ case PP_OD_FEATURE_FAN_ACOUSTIC_TARGET:
+ od_min_setting = overdrive_lowerlimits->AcousticTargetRpmThreshold;
+ od_max_setting = overdrive_upperlimits->AcousticTargetRpmThreshold;
+ break;
+ case PP_OD_FEATURE_FAN_TARGET_TEMPERATURE:
+ od_min_setting = overdrive_lowerlimits->FanTargetTemperature;
+ od_max_setting = overdrive_upperlimits->FanTargetTemperature;
+ break;
+ case PP_OD_FEATURE_FAN_MINIMUM_PWM:
+ od_min_setting = overdrive_lowerlimits->FanMinimumPwm;
+ od_max_setting = overdrive_upperlimits->FanMinimumPwm;
+ break;
+ default:
+ od_min_setting = od_max_setting = INT_MAX;
+ break;
+ }
+
+ if (min)
+ *min = od_min_setting;
+ if (max)
+ *max = od_max_setting;
+}
+
static int smu_v14_0_2_print_clk_levels(struct smu_context *smu,
enum smu_clk_type clk_type,
char *buf)
{
struct smu_dpm_context *smu_dpm = &smu->smu_dpm;
struct smu_14_0_dpm_context *dpm_context = smu_dpm->dpm_context;
+ OverDriveTableExternal_t *od_table =
+ (OverDriveTableExternal_t *)smu->smu_table.overdrive_table;
struct smu_14_0_dpm_table *single_dpm_table;
struct smu_14_0_pcie_table *pcie_table;
uint32_t gen_speed, lane_width;
int i, curr_freq, size = 0;
+ int32_t min_value, max_value;
int ret = 0;
smu_cmn_get_sysfs_buf(&buf, &size);
@@ -1170,6 +1264,183 @@ static int smu_v14_0_2_print_clk_levels(struct smu_context *smu,
"*" : "");
break;
+ case SMU_OD_SCLK:
+ if (!smu_v14_0_2_is_od_feature_supported(smu,
+ PP_OD_FEATURE_GFXCLK_BIT))
+ break;
+
+ size += sysfs_emit_at(buf, size, "OD_SCLK:\n");
+ size += sysfs_emit_at(buf, size, "0: %uMhz\n1: %uMhz\n",
+ od_table->OverDriveTable.GfxclkFmin,
+ od_table->OverDriveTable.GfxclkFmax);
+ break;
+
+ case SMU_OD_MCLK:
+ if (!smu_v14_0_2_is_od_feature_supported(smu,
+ PP_OD_FEATURE_UCLK_BIT))
+ break;
+
+ size += sysfs_emit_at(buf, size, "OD_MCLK:\n");
+ size += sysfs_emit_at(buf, size, "0: %uMhz\n1: %uMHz\n",
+ od_table->OverDriveTable.UclkFmin,
+ od_table->OverDriveTable.UclkFmax);
+ break;
+
+ case SMU_OD_VDDGFX_OFFSET:
+ if (!smu_v14_0_2_is_od_feature_supported(smu,
+ PP_OD_FEATURE_GFX_VF_CURVE_BIT))
+ break;
+
+ size += sysfs_emit_at(buf, size, "OD_VDDGFX_OFFSET:\n");
+ size += sysfs_emit_at(buf, size, "%dmV\n",
+ od_table->OverDriveTable.VoltageOffsetPerZoneBoundary[0]);
+ break;
+
+ case SMU_OD_FAN_CURVE:
+ if (!smu_v14_0_2_is_od_feature_supported(smu,
+ PP_OD_FEATURE_FAN_CURVE_BIT))
+ break;
+
+ size += sysfs_emit_at(buf, size, "OD_FAN_CURVE:\n");
+ for (i = 0; i < NUM_OD_FAN_MAX_POINTS - 1; i++)
+ size += sysfs_emit_at(buf, size, "%d: %dC %d%%\n",
+ i,
+ (int)od_table->OverDriveTable.FanLinearTempPoints[i],
+ (int)od_table->OverDriveTable.FanLinearPwmPoints[i]);
+
+ size += sysfs_emit_at(buf, size, "%s:\n", "OD_RANGE");
+ smu_v14_0_2_get_od_setting_limits(smu,
+ PP_OD_FEATURE_FAN_CURVE_TEMP,
+ &min_value,
+ &max_value);
+ size += sysfs_emit_at(buf, size, "FAN_CURVE(hotspot temp): %uC %uC\n",
+ min_value, max_value);
+
+ smu_v14_0_2_get_od_setting_limits(smu,
+ PP_OD_FEATURE_FAN_CURVE_PWM,
+ &min_value,
+ &max_value);
+ size += sysfs_emit_at(buf, size, "FAN_CURVE(fan speed): %u%% %u%%\n",
+ min_value, max_value);
+
+ break;
+
+ case SMU_OD_ACOUSTIC_LIMIT:
+ if (!smu_v14_0_2_is_od_feature_supported(smu,
+ PP_OD_FEATURE_FAN_CURVE_BIT))
+ break;
+
+ size += sysfs_emit_at(buf, size, "OD_ACOUSTIC_LIMIT:\n");
+ size += sysfs_emit_at(buf, size, "%d\n",
+ (int)od_table->OverDriveTable.AcousticLimitRpmThreshold);
+
+ size += sysfs_emit_at(buf, size, "%s:\n", "OD_RANGE");
+ smu_v14_0_2_get_od_setting_limits(smu,
+ PP_OD_FEATURE_FAN_ACOUSTIC_LIMIT,
+ &min_value,
+ &max_value);
+ size += sysfs_emit_at(buf, size, "ACOUSTIC_LIMIT: %u %u\n",
+ min_value, max_value);
+ break;
+
+ case SMU_OD_ACOUSTIC_TARGET:
+ if (!smu_v14_0_2_is_od_feature_supported(smu,
+ PP_OD_FEATURE_FAN_CURVE_BIT))
+ break;
+
+ size += sysfs_emit_at(buf, size, "OD_ACOUSTIC_TARGET:\n");
+ size += sysfs_emit_at(buf, size, "%d\n",
+ (int)od_table->OverDriveTable.AcousticTargetRpmThreshold);
+
+ size += sysfs_emit_at(buf, size, "%s:\n", "OD_RANGE");
+ smu_v14_0_2_get_od_setting_limits(smu,
+ PP_OD_FEATURE_FAN_ACOUSTIC_TARGET,
+ &min_value,
+ &max_value);
+ size += sysfs_emit_at(buf, size, "ACOUSTIC_TARGET: %u %u\n",
+ min_value, max_value);
+ break;
+
+ case SMU_OD_FAN_TARGET_TEMPERATURE:
+ if (!smu_v14_0_2_is_od_feature_supported(smu,
+ PP_OD_FEATURE_FAN_CURVE_BIT))
+ break;
+
+ size += sysfs_emit_at(buf, size, "FAN_TARGET_TEMPERATURE:\n");
+ size += sysfs_emit_at(buf, size, "%d\n",
+ (int)od_table->OverDriveTable.FanTargetTemperature);
+
+ size += sysfs_emit_at(buf, size, "%s:\n", "OD_RANGE");
+ smu_v14_0_2_get_od_setting_limits(smu,
+ PP_OD_FEATURE_FAN_TARGET_TEMPERATURE,
+ &min_value,
+ &max_value);
+ size += sysfs_emit_at(buf, size, "TARGET_TEMPERATURE: %u %u\n",
+ min_value, max_value);
+ break;
+
+ case SMU_OD_FAN_MINIMUM_PWM:
+ if (!smu_v14_0_2_is_od_feature_supported(smu,
+ PP_OD_FEATURE_FAN_CURVE_BIT))
+ break;
+
+ size += sysfs_emit_at(buf, size, "FAN_MINIMUM_PWM:\n");
+ size += sysfs_emit_at(buf, size, "%d\n",
+ (int)od_table->OverDriveTable.FanMinimumPwm);
+
+ size += sysfs_emit_at(buf, size, "%s:\n", "OD_RANGE");
+ smu_v14_0_2_get_od_setting_limits(smu,
+ PP_OD_FEATURE_FAN_MINIMUM_PWM,
+ &min_value,
+ &max_value);
+ size += sysfs_emit_at(buf, size, "MINIMUM_PWM: %u %u\n",
+ min_value, max_value);
+ break;
+
+ case SMU_OD_RANGE:
+ if (!smu_v14_0_2_is_od_feature_supported(smu, PP_OD_FEATURE_GFXCLK_BIT) &&
+ !smu_v14_0_2_is_od_feature_supported(smu, PP_OD_FEATURE_UCLK_BIT) &&
+ !smu_v14_0_2_is_od_feature_supported(smu, PP_OD_FEATURE_GFX_VF_CURVE_BIT))
+ break;
+
+ size += sysfs_emit_at(buf, size, "%s:\n", "OD_RANGE");
+
+ if (smu_v14_0_2_is_od_feature_supported(smu, PP_OD_FEATURE_GFXCLK_BIT)) {
+ smu_v14_0_2_get_od_setting_limits(smu,
+ PP_OD_FEATURE_GFXCLK_FMIN,
+ &min_value,
+ NULL);
+ smu_v14_0_2_get_od_setting_limits(smu,
+ PP_OD_FEATURE_GFXCLK_FMAX,
+ NULL,
+ &max_value);
+ size += sysfs_emit_at(buf, size, "SCLK: %7uMhz %10uMhz\n",
+ min_value, max_value);
+ }
+
+ if (smu_v14_0_2_is_od_feature_supported(smu, PP_OD_FEATURE_UCLK_BIT)) {
+ smu_v14_0_2_get_od_setting_limits(smu,
+ PP_OD_FEATURE_UCLK_FMIN,
+ &min_value,
+ NULL);
+ smu_v14_0_2_get_od_setting_limits(smu,
+ PP_OD_FEATURE_UCLK_FMAX,
+ NULL,
+ &max_value);
+ size += sysfs_emit_at(buf, size, "MCLK: %7uMhz %10uMhz\n",
+ min_value, max_value);
+ }
+
+ if (smu_v14_0_2_is_od_feature_supported(smu, PP_OD_FEATURE_GFX_VF_CURVE_BIT)) {
+ smu_v14_0_2_get_od_setting_limits(smu,
+ PP_OD_FEATURE_GFX_VF_CURVE,
+ &min_value,
+ &max_value);
+ size += sysfs_emit_at(buf, size, "VDDGFX_OFFSET: %7dmv %10dmv\n",
+ min_value, max_value);
+ }
+ break;
+
default:
break;
}
@@ -1411,7 +1682,27 @@ static int smu_v14_0_2_get_power_limit(struct smu_context *smu,
uint32_t *max_power_limit,
uint32_t *min_power_limit)
{
- // TODO
+ struct smu_table_context *table_context = &smu->smu_table;
+ PPTable_t *pptable = table_context->driver_pptable;
+ CustomSkuTable_t *skutable = &pptable->CustomSkuTable;
+ uint32_t power_limit;
+ uint32_t msg_limit = pptable->SkuTable.MsgLimits.Power[PPT_THROTTLER_PPT0][POWER_SOURCE_AC];
+
+ if (smu_v14_0_get_current_power_limit(smu, &power_limit))
+ power_limit = smu->adev->pm.ac_power ?
+ skutable->SocketPowerLimitAc[PPT_THROTTLER_PPT0] :
+ skutable->SocketPowerLimitDc[PPT_THROTTLER_PPT0];
+
+ if (current_power_limit)
+ *current_power_limit = power_limit;
+ if (default_power_limit)
+ *default_power_limit = power_limit;
+
+ if (max_power_limit)
+ *max_power_limit = msg_limit;
+
+ if (min_power_limit)
+ *min_power_limit = 0;
return 0;
}
@@ -1570,10 +1861,14 @@ static int smu_v14_0_2_set_power_profile_mode(struct smu_context *smu,
if (workload_type < 0)
return -EINVAL;
- return smu_cmn_send_smc_msg_with_param(smu,
+ ret = smu_cmn_send_smc_msg_with_param(smu,
SMU_MSG_SetWorkloadMask,
1 << workload_type,
NULL);
+ if (!ret)
+ smu->workload_mask = 1 << workload_type;
+
+ return ret;
}
static int smu_v14_0_2_baco_enter(struct smu_context *smu)
@@ -1917,6 +2212,594 @@ static ssize_t smu_v14_0_2_get_gpu_metrics(struct smu_context *smu,
return sizeof(struct gpu_metrics_v1_3);
}
+static void smu_v14_0_2_dump_od_table(struct smu_context *smu,
+ OverDriveTableExternal_t *od_table)
+{
+ struct amdgpu_device *adev = smu->adev;
+
+ dev_dbg(adev->dev, "OD: Gfxclk: (%d, %d)\n", od_table->OverDriveTable.GfxclkFmin,
+ od_table->OverDriveTable.GfxclkFmax);
+ dev_dbg(adev->dev, "OD: Uclk: (%d, %d)\n", od_table->OverDriveTable.UclkFmin,
+ od_table->OverDriveTable.UclkFmax);
+}
+
+static int smu_v14_0_2_upload_overdrive_table(struct smu_context *smu,
+ OverDriveTableExternal_t *od_table)
+{
+ int ret;
+ ret = smu_cmn_update_table(smu,
+ SMU_TABLE_OVERDRIVE,
+ 0,
+ (void *)od_table,
+ true);
+ if (ret)
+ dev_err(smu->adev->dev, "Failed to upload overdrive table!\n");
+
+ return ret;
+}
+
+static void smu_v14_0_2_set_supported_od_feature_mask(struct smu_context *smu)
+{
+ struct amdgpu_device *adev = smu->adev;
+
+ if (smu_v14_0_2_is_od_feature_supported(smu,
+ PP_OD_FEATURE_FAN_CURVE_BIT))
+ adev->pm.od_feature_mask |= OD_OPS_SUPPORT_FAN_CURVE_RETRIEVE |
+ OD_OPS_SUPPORT_FAN_CURVE_SET |
+ OD_OPS_SUPPORT_ACOUSTIC_LIMIT_THRESHOLD_RETRIEVE |
+ OD_OPS_SUPPORT_ACOUSTIC_LIMIT_THRESHOLD_SET |
+ OD_OPS_SUPPORT_ACOUSTIC_TARGET_THRESHOLD_RETRIEVE |
+ OD_OPS_SUPPORT_ACOUSTIC_TARGET_THRESHOLD_SET |
+ OD_OPS_SUPPORT_FAN_TARGET_TEMPERATURE_RETRIEVE |
+ OD_OPS_SUPPORT_FAN_TARGET_TEMPERATURE_SET |
+ OD_OPS_SUPPORT_FAN_MINIMUM_PWM_RETRIEVE |
+ OD_OPS_SUPPORT_FAN_MINIMUM_PWM_SET;
+}
+
+static int smu_v14_0_2_get_overdrive_table(struct smu_context *smu,
+ OverDriveTableExternal_t *od_table)
+{
+ int ret;
+ ret = smu_cmn_update_table(smu,
+ SMU_TABLE_OVERDRIVE,
+ 0,
+ (void *)od_table,
+ false);
+ if (ret)
+ dev_err(smu->adev->dev, "Failed to get overdrive table!\n");
+
+ return ret;
+}
+
+static int smu_v14_0_2_set_default_od_settings(struct smu_context *smu)
+{
+ OverDriveTableExternal_t *od_table =
+ (OverDriveTableExternal_t *)smu->smu_table.overdrive_table;
+ OverDriveTableExternal_t *boot_od_table =
+ (OverDriveTableExternal_t *)smu->smu_table.boot_overdrive_table;
+ OverDriveTableExternal_t *user_od_table =
+ (OverDriveTableExternal_t *)smu->smu_table.user_overdrive_table;
+ OverDriveTableExternal_t user_od_table_bak;
+ int ret;
+ int i;
+
+ ret = smu_v14_0_2_get_overdrive_table(smu, boot_od_table);
+ if (ret)
+ return ret;
+
+ smu_v14_0_2_dump_od_table(smu, boot_od_table);
+
+ memcpy(od_table,
+ boot_od_table,
+ sizeof(OverDriveTableExternal_t));
+
+ /*
+ * For S3/S4/Runpm resume, we need to setup those overdrive tables again,
+ * but we have to preserve user defined values in "user_od_table".
+ */
+ if (!smu->adev->in_suspend) {
+ memcpy(user_od_table,
+ boot_od_table,
+ sizeof(OverDriveTableExternal_t));
+ smu->user_dpm_profile.user_od = false;
+ } else if (smu->user_dpm_profile.user_od) {
+ memcpy(&user_od_table_bak,
+ user_od_table,
+ sizeof(OverDriveTableExternal_t));
+ memcpy(user_od_table,
+ boot_od_table,
+ sizeof(OverDriveTableExternal_t));
+ user_od_table->OverDriveTable.GfxclkFmin =
+ user_od_table_bak.OverDriveTable.GfxclkFmin;
+ user_od_table->OverDriveTable.GfxclkFmax =
+ user_od_table_bak.OverDriveTable.GfxclkFmax;
+ user_od_table->OverDriveTable.UclkFmin =
+ user_od_table_bak.OverDriveTable.UclkFmin;
+ user_od_table->OverDriveTable.UclkFmax =
+ user_od_table_bak.OverDriveTable.UclkFmax;
+ for (i = 0; i < PP_NUM_OD_VF_CURVE_POINTS; i++)
+ user_od_table->OverDriveTable.VoltageOffsetPerZoneBoundary[i] =
+ user_od_table_bak.OverDriveTable.VoltageOffsetPerZoneBoundary[i];
+ for (i = 0; i < NUM_OD_FAN_MAX_POINTS - 1; i++) {
+ user_od_table->OverDriveTable.FanLinearTempPoints[i] =
+ user_od_table_bak.OverDriveTable.FanLinearTempPoints[i];
+ user_od_table->OverDriveTable.FanLinearPwmPoints[i] =
+ user_od_table_bak.OverDriveTable.FanLinearPwmPoints[i];
+ }
+ user_od_table->OverDriveTable.AcousticLimitRpmThreshold =
+ user_od_table_bak.OverDriveTable.AcousticLimitRpmThreshold;
+ user_od_table->OverDriveTable.AcousticTargetRpmThreshold =
+ user_od_table_bak.OverDriveTable.AcousticTargetRpmThreshold;
+ user_od_table->OverDriveTable.FanTargetTemperature =
+ user_od_table_bak.OverDriveTable.FanTargetTemperature;
+ user_od_table->OverDriveTable.FanMinimumPwm =
+ user_od_table_bak.OverDriveTable.FanMinimumPwm;
+ }
+
+ smu_v14_0_2_set_supported_od_feature_mask(smu);
+
+ return 0;
+}
+
+static int smu_v14_0_2_restore_user_od_settings(struct smu_context *smu)
+{
+ struct smu_table_context *table_context = &smu->smu_table;
+ OverDriveTableExternal_t *od_table = table_context->overdrive_table;
+ OverDriveTableExternal_t *user_od_table = table_context->user_overdrive_table;
+ int res;
+
+ user_od_table->OverDriveTable.FeatureCtrlMask = BIT(PP_OD_FEATURE_GFXCLK_BIT) |
+ BIT(PP_OD_FEATURE_UCLK_BIT) |
+ BIT(PP_OD_FEATURE_GFX_VF_CURVE_BIT) |
+ BIT(PP_OD_FEATURE_FAN_CURVE_BIT);
+ res = smu_v14_0_2_upload_overdrive_table(smu, user_od_table);
+ user_od_table->OverDriveTable.FeatureCtrlMask = 0;
+ if (res == 0)
+ memcpy(od_table, user_od_table, sizeof(OverDriveTableExternal_t));
+
+ return res;
+}
+
+static int smu_v14_0_2_od_restore_table_single(struct smu_context *smu, long input)
+{
+ struct smu_table_context *table_context = &smu->smu_table;
+ OverDriveTableExternal_t *boot_overdrive_table =
+ (OverDriveTableExternal_t *)table_context->boot_overdrive_table;
+ OverDriveTableExternal_t *od_table =
+ (OverDriveTableExternal_t *)table_context->overdrive_table;
+ struct amdgpu_device *adev = smu->adev;
+ int i;
+
+ switch (input) {
+ case PP_OD_EDIT_FAN_CURVE:
+ for (i = 0; i < NUM_OD_FAN_MAX_POINTS; i++) {
+ od_table->OverDriveTable.FanLinearTempPoints[i] =
+ boot_overdrive_table->OverDriveTable.FanLinearTempPoints[i];
+ od_table->OverDriveTable.FanLinearPwmPoints[i] =
+ boot_overdrive_table->OverDriveTable.FanLinearPwmPoints[i];
+ }
+ od_table->OverDriveTable.FanMode = FAN_MODE_AUTO;
+ od_table->OverDriveTable.FeatureCtrlMask |= BIT(PP_OD_FEATURE_FAN_CURVE_BIT);
+ break;
+ case PP_OD_EDIT_ACOUSTIC_LIMIT:
+ od_table->OverDriveTable.AcousticLimitRpmThreshold =
+ boot_overdrive_table->OverDriveTable.AcousticLimitRpmThreshold;
+ od_table->OverDriveTable.FanMode = FAN_MODE_AUTO;
+ od_table->OverDriveTable.FeatureCtrlMask |= BIT(PP_OD_FEATURE_FAN_CURVE_BIT);
+ break;
+ case PP_OD_EDIT_ACOUSTIC_TARGET:
+ od_table->OverDriveTable.AcousticTargetRpmThreshold =
+ boot_overdrive_table->OverDriveTable.AcousticTargetRpmThreshold;
+ od_table->OverDriveTable.FanMode = FAN_MODE_AUTO;
+ od_table->OverDriveTable.FeatureCtrlMask |= BIT(PP_OD_FEATURE_FAN_CURVE_BIT);
+ break;
+ case PP_OD_EDIT_FAN_TARGET_TEMPERATURE:
+ od_table->OverDriveTable.FanTargetTemperature =
+ boot_overdrive_table->OverDriveTable.FanTargetTemperature;
+ od_table->OverDriveTable.FanMode = FAN_MODE_AUTO;
+ od_table->OverDriveTable.FeatureCtrlMask |= BIT(PP_OD_FEATURE_FAN_CURVE_BIT);
+ break;
+ case PP_OD_EDIT_FAN_MINIMUM_PWM:
+ od_table->OverDriveTable.FanMinimumPwm =
+ boot_overdrive_table->OverDriveTable.FanMinimumPwm;
+ od_table->OverDriveTable.FanMode = FAN_MODE_AUTO;
+ od_table->OverDriveTable.FeatureCtrlMask |= BIT(PP_OD_FEATURE_FAN_CURVE_BIT);
+ break;
+ default:
+ dev_info(adev->dev, "Invalid table index: %ld\n", input);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int smu_v14_0_2_od_edit_dpm_table(struct smu_context *smu,
+ enum PP_OD_DPM_TABLE_COMMAND type,
+ long input[],
+ uint32_t size)
+{
+ struct smu_table_context *table_context = &smu->smu_table;
+ OverDriveTableExternal_t *od_table =
+ (OverDriveTableExternal_t *)table_context->overdrive_table;
+ struct amdgpu_device *adev = smu->adev;
+ uint32_t offset_of_voltageoffset;
+ int32_t minimum, maximum;
+ uint32_t feature_ctrlmask;
+ int i, ret = 0;
+
+ switch (type) {
+ case PP_OD_EDIT_SCLK_VDDC_TABLE:
+ if (!smu_v14_0_2_is_od_feature_supported(smu, PP_OD_FEATURE_GFXCLK_BIT)) {
+ dev_warn(adev->dev, "GFXCLK_LIMITS setting not supported!\n");
+ return -ENOTSUPP;
+ }
+
+ for (i = 0; i < size; i += 2) {
+ if (i + 2 > size) {
+ dev_info(adev->dev, "invalid number of input parameters %d\n", size);
+ return -EINVAL;
+ }
+
+ switch (input[i]) {
+ case 0:
+ smu_v14_0_2_get_od_setting_limits(smu,
+ PP_OD_FEATURE_GFXCLK_FMIN,
+ &minimum,
+ &maximum);
+ if (input[i + 1] < minimum ||
+ input[i + 1] > maximum) {
+ dev_info(adev->dev, "GfxclkFmin (%ld) must be within [%u, %u]!\n",
+ input[i + 1], minimum, maximum);
+ return -EINVAL;
+ }
+
+ od_table->OverDriveTable.GfxclkFmin = input[i + 1];
+ od_table->OverDriveTable.FeatureCtrlMask |= 1U << PP_OD_FEATURE_GFXCLK_BIT;
+ break;
+
+ case 1:
+ smu_v14_0_2_get_od_setting_limits(smu,
+ PP_OD_FEATURE_GFXCLK_FMAX,
+ &minimum,
+ &maximum);
+ if (input[i + 1] < minimum ||
+ input[i + 1] > maximum) {
+ dev_info(adev->dev, "GfxclkFmax (%ld) must be within [%u, %u]!\n",
+ input[i + 1], minimum, maximum);
+ return -EINVAL;
+ }
+
+ od_table->OverDriveTable.GfxclkFmax = input[i + 1];
+ od_table->OverDriveTable.FeatureCtrlMask |= 1U << PP_OD_FEATURE_GFXCLK_BIT;
+ break;
+
+ default:
+ dev_info(adev->dev, "Invalid SCLK_VDDC_TABLE index: %ld\n", input[i]);
+ dev_info(adev->dev, "Supported indices: [0:min,1:max]\n");
+ return -EINVAL;
+ }
+ }
+
+ if (od_table->OverDriveTable.GfxclkFmin > od_table->OverDriveTable.GfxclkFmax) {
+ dev_err(adev->dev,
+ "Invalid setting: GfxclkFmin(%u) is bigger than GfxclkFmax(%u)\n",
+ (uint32_t)od_table->OverDriveTable.GfxclkFmin,
+ (uint32_t)od_table->OverDriveTable.GfxclkFmax);
+ return -EINVAL;
+ }
+ break;
+
+ case PP_OD_EDIT_MCLK_VDDC_TABLE:
+ if (!smu_v14_0_2_is_od_feature_supported(smu, PP_OD_FEATURE_UCLK_BIT)) {
+ dev_warn(adev->dev, "UCLK_LIMITS setting not supported!\n");
+ return -ENOTSUPP;
+ }
+
+ for (i = 0; i < size; i += 2) {
+ if (i + 2 > size) {
+ dev_info(adev->dev, "invalid number of input parameters %d\n", size);
+ return -EINVAL;
+ }
+
+ switch (input[i]) {
+ case 0:
+ smu_v14_0_2_get_od_setting_limits(smu,
+ PP_OD_FEATURE_UCLK_FMIN,
+ &minimum,
+ &maximum);
+ if (input[i + 1] < minimum ||
+ input[i + 1] > maximum) {
+ dev_info(adev->dev, "UclkFmin (%ld) must be within [%u, %u]!\n",
+ input[i + 1], minimum, maximum);
+ return -EINVAL;
+ }
+
+ od_table->OverDriveTable.UclkFmin = input[i + 1];
+ od_table->OverDriveTable.FeatureCtrlMask |= 1U << PP_OD_FEATURE_UCLK_BIT;
+ break;
+
+ case 1:
+ smu_v14_0_2_get_od_setting_limits(smu,
+ PP_OD_FEATURE_UCLK_FMAX,
+ &minimum,
+ &maximum);
+ if (input[i + 1] < minimum ||
+ input[i + 1] > maximum) {
+ dev_info(adev->dev, "UclkFmax (%ld) must be within [%u, %u]!\n",
+ input[i + 1], minimum, maximum);
+ return -EINVAL;
+ }
+
+ od_table->OverDriveTable.UclkFmax = input[i + 1];
+ od_table->OverDriveTable.FeatureCtrlMask |= 1U << PP_OD_FEATURE_UCLK_BIT;
+ break;
+
+ default:
+ dev_info(adev->dev, "Invalid MCLK_VDDC_TABLE index: %ld\n", input[i]);
+ dev_info(adev->dev, "Supported indices: [0:min,1:max]\n");
+ return -EINVAL;
+ }
+ }
+
+ if (od_table->OverDriveTable.UclkFmin > od_table->OverDriveTable.UclkFmax) {
+ dev_err(adev->dev,
+ "Invalid setting: UclkFmin(%u) is bigger than UclkFmax(%u)\n",
+ (uint32_t)od_table->OverDriveTable.UclkFmin,
+ (uint32_t)od_table->OverDriveTable.UclkFmax);
+ return -EINVAL;
+ }
+ break;
+
+ case PP_OD_EDIT_VDDGFX_OFFSET:
+ if (!smu_v14_0_2_is_od_feature_supported(smu, PP_OD_FEATURE_GFX_VF_CURVE_BIT)) {
+ dev_warn(adev->dev, "Gfx offset setting not supported!\n");
+ return -ENOTSUPP;
+ }
+
+ smu_v14_0_2_get_od_setting_limits(smu,
+ PP_OD_FEATURE_GFX_VF_CURVE,
+ &minimum,
+ &maximum);
+ if (input[0] < minimum ||
+ input[0] > maximum) {
+ dev_info(adev->dev, "Voltage offset (%ld) must be within [%d, %d]!\n",
+ input[0], minimum, maximum);
+ return -EINVAL;
+ }
+
+ for (i = 0; i < PP_NUM_OD_VF_CURVE_POINTS; i++)
+ od_table->OverDriveTable.VoltageOffsetPerZoneBoundary[i] = input[0];
+ od_table->OverDriveTable.FeatureCtrlMask |= BIT(PP_OD_FEATURE_GFX_VF_CURVE_BIT);
+ break;
+
+ case PP_OD_EDIT_FAN_CURVE:
+ if (!smu_v14_0_2_is_od_feature_supported(smu, PP_OD_FEATURE_FAN_CURVE_BIT)) {
+ dev_warn(adev->dev, "Fan curve setting not supported!\n");
+ return -ENOTSUPP;
+ }
+
+ if (input[0] >= NUM_OD_FAN_MAX_POINTS - 1 ||
+ input[0] < 0)
+ return -EINVAL;
+
+ smu_v14_0_2_get_od_setting_limits(smu,
+ PP_OD_FEATURE_FAN_CURVE_TEMP,
+ &minimum,
+ &maximum);
+ if (input[1] < minimum ||
+ input[1] > maximum) {
+ dev_info(adev->dev, "Fan curve temp setting(%ld) must be within [%d, %d]!\n",
+ input[1], minimum, maximum);
+ return -EINVAL;
+ }
+
+ smu_v14_0_2_get_od_setting_limits(smu,
+ PP_OD_FEATURE_FAN_CURVE_PWM,
+ &minimum,
+ &maximum);
+ if (input[2] < minimum ||
+ input[2] > maximum) {
+ dev_info(adev->dev, "Fan curve pwm setting(%ld) must be within [%d, %d]!\n",
+ input[2], minimum, maximum);
+ return -EINVAL;
+ }
+
+ od_table->OverDriveTable.FanLinearTempPoints[input[0]] = input[1];
+ od_table->OverDriveTable.FanLinearPwmPoints[input[0]] = input[2];
+ od_table->OverDriveTable.FanMode = FAN_MODE_MANUAL_LINEAR;
+ od_table->OverDriveTable.FeatureCtrlMask |= BIT(PP_OD_FEATURE_FAN_CURVE_BIT);
+ break;
+
+ case PP_OD_EDIT_ACOUSTIC_LIMIT:
+ if (!smu_v14_0_2_is_od_feature_supported(smu, PP_OD_FEATURE_FAN_CURVE_BIT)) {
+ dev_warn(adev->dev, "Fan curve setting not supported!\n");
+ return -ENOTSUPP;
+ }
+
+ smu_v14_0_2_get_od_setting_limits(smu,
+ PP_OD_FEATURE_FAN_ACOUSTIC_LIMIT,
+ &minimum,
+ &maximum);
+ if (input[0] < minimum ||
+ input[0] > maximum) {
+ dev_info(adev->dev, "acoustic limit threshold setting(%ld) must be within [%d, %d]!\n",
+ input[0], minimum, maximum);
+ return -EINVAL;
+ }
+
+ od_table->OverDriveTable.AcousticLimitRpmThreshold = input[0];
+ od_table->OverDriveTable.FanMode = FAN_MODE_AUTO;
+ od_table->OverDriveTable.FeatureCtrlMask |= BIT(PP_OD_FEATURE_FAN_CURVE_BIT);
+ break;
+
+ case PP_OD_EDIT_ACOUSTIC_TARGET:
+ if (!smu_v14_0_2_is_od_feature_supported(smu, PP_OD_FEATURE_FAN_CURVE_BIT)) {
+ dev_warn(adev->dev, "Fan curve setting not supported!\n");
+ return -ENOTSUPP;
+ }
+
+ smu_v14_0_2_get_od_setting_limits(smu,
+ PP_OD_FEATURE_FAN_ACOUSTIC_TARGET,
+ &minimum,
+ &maximum);
+ if (input[0] < minimum ||
+ input[0] > maximum) {
+ dev_info(adev->dev, "acoustic target threshold setting(%ld) must be within [%d, %d]!\n",
+ input[0], minimum, maximum);
+ return -EINVAL;
+ }
+
+ od_table->OverDriveTable.AcousticTargetRpmThreshold = input[0];
+ od_table->OverDriveTable.FanMode = FAN_MODE_AUTO;
+ od_table->OverDriveTable.FeatureCtrlMask |= BIT(PP_OD_FEATURE_FAN_CURVE_BIT);
+ break;
+
+ case PP_OD_EDIT_FAN_TARGET_TEMPERATURE:
+ if (!smu_v14_0_2_is_od_feature_supported(smu, PP_OD_FEATURE_FAN_CURVE_BIT)) {
+ dev_warn(adev->dev, "Fan curve setting not supported!\n");
+ return -ENOTSUPP;
+ }
+
+ smu_v14_0_2_get_od_setting_limits(smu,
+ PP_OD_FEATURE_FAN_TARGET_TEMPERATURE,
+ &minimum,
+ &maximum);
+ if (input[0] < minimum ||
+ input[0] > maximum) {
+ dev_info(adev->dev, "fan target temperature setting(%ld) must be within [%d, %d]!\n",
+ input[0], minimum, maximum);
+ return -EINVAL;
+ }
+
+ od_table->OverDriveTable.FanTargetTemperature = input[0];
+ od_table->OverDriveTable.FanMode = FAN_MODE_AUTO;
+ od_table->OverDriveTable.FeatureCtrlMask |= BIT(PP_OD_FEATURE_FAN_CURVE_BIT);
+ break;
+
+ case PP_OD_EDIT_FAN_MINIMUM_PWM:
+ if (!smu_v14_0_2_is_od_feature_supported(smu, PP_OD_FEATURE_FAN_CURVE_BIT)) {
+ dev_warn(adev->dev, "Fan curve setting not supported!\n");
+ return -ENOTSUPP;
+ }
+
+ smu_v14_0_2_get_od_setting_limits(smu,
+ PP_OD_FEATURE_FAN_MINIMUM_PWM,
+ &minimum,
+ &maximum);
+ if (input[0] < minimum ||
+ input[0] > maximum) {
+ dev_info(adev->dev, "fan minimum pwm setting(%ld) must be within [%d, %d]!\n",
+ input[0], minimum, maximum);
+ return -EINVAL;
+ }
+
+ od_table->OverDriveTable.FanMinimumPwm = input[0];
+ od_table->OverDriveTable.FanMode = FAN_MODE_AUTO;
+ od_table->OverDriveTable.FeatureCtrlMask |= BIT(PP_OD_FEATURE_FAN_CURVE_BIT);
+ break;
+
+ case PP_OD_RESTORE_DEFAULT_TABLE:
+ if (size == 1) {
+ ret = smu_v14_0_2_od_restore_table_single(smu, input[0]);
+ if (ret)
+ return ret;
+ } else {
+ feature_ctrlmask = od_table->OverDriveTable.FeatureCtrlMask;
+ memcpy(od_table,
+ table_context->boot_overdrive_table,
+ sizeof(OverDriveTableExternal_t));
+ od_table->OverDriveTable.FeatureCtrlMask = feature_ctrlmask;
+ }
+ fallthrough;
+ case PP_OD_COMMIT_DPM_TABLE:
+ /*
+ * The member below instructs PMFW the settings focused in
+ * this single operation.
+ * `uint32_t FeatureCtrlMask;`
+ * It does not contain actual informations about user's custom
+ * settings. Thus we do not cache it.
+ */
+ offset_of_voltageoffset = offsetof(OverDriveTable_t, VoltageOffsetPerZoneBoundary);
+ if (memcmp((u8 *)od_table + offset_of_voltageoffset,
+ table_context->user_overdrive_table + offset_of_voltageoffset,
+ sizeof(OverDriveTableExternal_t) - offset_of_voltageoffset)) {
+ smu_v14_0_2_dump_od_table(smu, od_table);
+
+ ret = smu_v14_0_2_upload_overdrive_table(smu, od_table);
+ if (ret) {
+ dev_err(adev->dev, "Failed to upload overdrive table!\n");
+ return ret;
+ }
+
+ od_table->OverDriveTable.FeatureCtrlMask = 0;
+ memcpy(table_context->user_overdrive_table + offset_of_voltageoffset,
+ (u8 *)od_table + offset_of_voltageoffset,
+ sizeof(OverDriveTableExternal_t) - offset_of_voltageoffset);
+
+ if (!memcmp(table_context->user_overdrive_table,
+ table_context->boot_overdrive_table,
+ sizeof(OverDriveTableExternal_t)))
+ smu->user_dpm_profile.user_od = false;
+ else
+ smu->user_dpm_profile.user_od = true;
+ }
+ break;
+
+ default:
+ return -ENOSYS;
+ }
+
+ return ret;
+}
+
+static int smu_v14_0_2_set_power_limit(struct smu_context *smu,
+ enum smu_ppt_limit_type limit_type,
+ uint32_t limit)
+{
+ PPTable_t *pptable = smu->smu_table.driver_pptable;
+ uint32_t msg_limit = pptable->SkuTable.MsgLimits.Power[PPT_THROTTLER_PPT0][POWER_SOURCE_AC];
+ struct smu_table_context *table_context = &smu->smu_table;
+ OverDriveTableExternal_t *od_table =
+ (OverDriveTableExternal_t *)table_context->overdrive_table;
+ int ret = 0;
+
+ if (limit_type != SMU_DEFAULT_PPT_LIMIT)
+ return -EINVAL;
+
+ if (limit <= msg_limit) {
+ if (smu->current_power_limit > msg_limit) {
+ od_table->OverDriveTable.Ppt = 0;
+ od_table->OverDriveTable.FeatureCtrlMask |= 1U << PP_OD_FEATURE_PPT_BIT;
+
+ ret = smu_v14_0_2_upload_overdrive_table(smu, od_table);
+ if (ret) {
+ dev_err(smu->adev->dev, "Failed to upload overdrive table!\n");
+ return ret;
+ }
+ }
+ return smu_v14_0_set_power_limit(smu, limit_type, limit);
+ } else if (smu->od_enabled) {
+ ret = smu_v14_0_set_power_limit(smu, limit_type, msg_limit);
+ if (ret)
+ return ret;
+
+ od_table->OverDriveTable.Ppt = (limit * 100) / msg_limit - 100;
+ od_table->OverDriveTable.FeatureCtrlMask |= 1U << PP_OD_FEATURE_PPT_BIT;
+
+ ret = smu_v14_0_2_upload_overdrive_table(smu, od_table);
+ if (ret) {
+ dev_err(smu->adev->dev, "Failed to upload overdrive table!\n");
+ return ret;
+ }
+
+ smu->current_power_limit = limit;
+ } else {
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
static const struct pptable_funcs smu_v14_0_2_ppt_funcs = {
.get_allowed_feature_mask = smu_v14_0_2_get_allowed_feature_mask,
.set_default_dpm_table = smu_v14_0_2_set_default_dpm_table,
@@ -1955,13 +2838,16 @@ static const struct pptable_funcs smu_v14_0_2_ppt_funcs = {
.notify_memory_pool_location = smu_v14_0_notify_memory_pool_location,
.get_gpu_metrics = smu_v14_0_2_get_gpu_metrics,
.set_soft_freq_limited_range = smu_v14_0_set_soft_freq_limited_range,
+ .set_default_od_settings = smu_v14_0_2_set_default_od_settings,
+ .restore_user_od_settings = smu_v14_0_2_restore_user_od_settings,
+ .od_edit_dpm_table = smu_v14_0_2_od_edit_dpm_table,
.init_pptable_microcode = smu_v14_0_init_pptable_microcode,
.populate_umd_state_clk = smu_v14_0_2_populate_umd_state_clk,
.set_performance_level = smu_v14_0_set_performance_level,
.gfx_off_control = smu_v14_0_gfx_off_control,
.get_unique_id = smu_v14_0_2_get_unique_id,
.get_power_limit = smu_v14_0_2_get_power_limit,
- .set_power_limit = smu_v14_0_set_power_limit,
+ .set_power_limit = smu_v14_0_2_set_power_limit,
.set_power_source = smu_v14_0_set_power_source,
.get_power_profile_mode = smu_v14_0_2_get_power_profile_mode,
.set_power_profile_mode = smu_v14_0_2_set_power_profile_mode,
diff --git a/drivers/gpu/drm/ast/Makefile b/drivers/gpu/drm/ast/Makefile
index d794c076bc24..47da848fa3fc 100644
--- a/drivers/gpu/drm/ast/Makefile
+++ b/drivers/gpu/drm/ast/Makefile
@@ -11,6 +11,8 @@ ast-y := \
ast_main.o \
ast_mm.o \
ast_mode.o \
- ast_post.o
+ ast_post.o \
+ ast_sil164.o \
+ ast_vga.o
obj-$(CONFIG_DRM_AST) := ast.o
diff --git a/drivers/gpu/drm/ast/ast_dp.c b/drivers/gpu/drm/ast/ast_dp.c
index e6c7f0d64e99..00b364f9a71e 100644
--- a/drivers/gpu/drm/ast/ast_dp.c
+++ b/drivers/gpu/drm/ast/ast_dp.c
@@ -4,87 +4,94 @@
#include <linux/firmware.h>
#include <linux/delay.h>
+
+#include <drm/drm_atomic_state_helper.h>
+#include <drm/drm_edid.h>
+#include <drm/drm_modeset_helper_vtables.h>
#include <drm/drm_print.h>
+#include <drm/drm_probe_helper.h>
+
#include "ast_drv.h"
-bool ast_astdp_is_connected(struct ast_device *ast)
+static bool ast_astdp_is_connected(struct ast_device *ast)
{
- if (!ast_get_index_reg_mask(ast, AST_IO_VGACRI, 0xD1, ASTDP_MCU_FW_EXECUTING))
- return false;
- if (!ast_get_index_reg_mask(ast, AST_IO_VGACRI, 0xDF, ASTDP_HPD))
- return false;
- if (!ast_get_index_reg_mask(ast, AST_IO_VGACRI, 0xDC, ASTDP_LINK_SUCCESS))
+ if (!ast_get_index_reg_mask(ast, AST_IO_VGACRI, 0xDF, AST_IO_VGACRDF_HPD))
return false;
return true;
}
-int ast_astdp_read_edid(struct drm_device *dev, u8 *ediddata)
+static int ast_astdp_read_edid_block(void *data, u8 *buf, unsigned int block, size_t len)
{
- struct ast_device *ast = to_ast_device(dev);
- u8 i = 0, j = 0;
+ struct ast_device *ast = data;
+ size_t rdlen = round_up(len, 4);
+ int ret = 0;
+ unsigned int i;
+
+ if (block > 0)
+ return -EIO; /* extension headers not supported */
/*
- * CRD1[b5]: DP MCU FW is executing
- * CRDC[b0]: DP link success
- * CRDF[b0]: DP HPD
- * CRE5[b0]: Host reading EDID process is done
+ * Protect access to I/O registers from concurrent modesetting
+ * by acquiring the I/O-register lock.
*/
- if (!(ast_get_index_reg_mask(ast, AST_IO_VGACRI, 0xD1, ASTDP_MCU_FW_EXECUTING) &&
- ast_get_index_reg_mask(ast, AST_IO_VGACRI, 0xDC, ASTDP_LINK_SUCCESS) &&
- ast_get_index_reg_mask(ast, AST_IO_VGACRI, 0xDF, ASTDP_HPD) &&
- ast_get_index_reg_mask(ast, AST_IO_VGACRI, 0xE5,
- ASTDP_HOST_EDID_READ_DONE_MASK))) {
- goto err_astdp_edid_not_ready;
- }
+ mutex_lock(&ast->modeset_lock);
+
+ /* Start reading EDID data */
+ ast_set_index_reg_mask(ast, AST_IO_VGACRI, 0xe5, (u8)~AST_IO_VGACRE5_EDID_READ_DONE, 0x00);
+
+ for (i = 0; i < rdlen; i += 4) {
+ unsigned int offset;
+ unsigned int j;
+ u8 ediddata[4];
+ u8 vgacre4;
- ast_set_index_reg_mask(ast, AST_IO_VGACRI, 0xE5, (u8) ~ASTDP_HOST_EDID_READ_DONE_MASK,
- 0x00);
+ offset = (i + block * EDID_LENGTH) / 4;
+ if (offset >= 64) {
+ ret = -EIO;
+ goto out;
+ }
+ vgacre4 = offset;
- for (i = 0; i < 32; i++) {
/*
* CRE4[7:0]: Read-Pointer for EDID (Unit: 4bytes); valid range: 0~64
*/
- ast_set_index_reg_mask(ast, AST_IO_VGACRI, 0xE4,
- ASTDP_AND_CLEAR_MASK, (u8)i);
- j = 0;
+ ast_set_index_reg(ast, AST_IO_VGACRI, 0xe4, vgacre4);
/*
* CRD7[b0]: valid flag for EDID
* CRD6[b0]: mirror read pointer for EDID
*/
- while ((ast_get_index_reg_mask(ast, AST_IO_VGACRI, 0xD7,
- ASTDP_EDID_VALID_FLAG_MASK) != 0x01) ||
- (ast_get_index_reg_mask(ast, AST_IO_VGACRI, 0xD6,
- ASTDP_EDID_READ_POINTER_MASK) != i)) {
+ for (j = 0; j < 200; ++j) {
+ u8 vgacrd7, vgacrd6;
+
/*
* Delay are getting longer with each retry.
- * 1. The Delays are often 2 loops when users request "Display Settings"
+ *
+ * 1. No delay on first try
+ * 2. The Delays are often 2 loops when users request "Display Settings"
* of right-click of mouse.
- * 2. The Delays are often longer a lot when system resume from S3/S4.
+ * 3. The Delays are often longer a lot when system resume from S3/S4.
*/
- mdelay(j+1);
-
- if (!(ast_get_index_reg_mask(ast, AST_IO_VGACRI, 0xD1,
- ASTDP_MCU_FW_EXECUTING) &&
- ast_get_index_reg_mask(ast, AST_IO_VGACRI, 0xDC,
- ASTDP_LINK_SUCCESS) &&
- ast_get_index_reg_mask(ast, AST_IO_VGACRI, 0xDF, ASTDP_HPD))) {
- goto err_astdp_jump_out_loop_of_edid;
+ if (j)
+ mdelay(j + 1);
+
+ /* Wait for EDID offset to show up in mirror register */
+ vgacrd7 = ast_get_index_reg(ast, AST_IO_VGACRI, 0xd7);
+ if (vgacrd7 & AST_IO_VGACRD7_EDID_VALID_FLAG) {
+ vgacrd6 = ast_get_index_reg(ast, AST_IO_VGACRI, 0xd6);
+ if (vgacrd6 == offset)
+ break;
}
-
- j++;
- if (j > 200)
- goto err_astdp_jump_out_loop_of_edid;
+ }
+ if (j == 200) {
+ ret = -EBUSY;
+ goto out;
}
- *(ediddata) = ast_get_index_reg_mask(ast, AST_IO_VGACRI,
- 0xD8, ASTDP_EDID_READ_DATA_MASK);
- *(ediddata + 1) = ast_get_index_reg_mask(ast, AST_IO_VGACRI, 0xD9,
- ASTDP_EDID_READ_DATA_MASK);
- *(ediddata + 2) = ast_get_index_reg_mask(ast, AST_IO_VGACRI, 0xDA,
- ASTDP_EDID_READ_DATA_MASK);
- *(ediddata + 3) = ast_get_index_reg_mask(ast, AST_IO_VGACRI, 0xDB,
- ASTDP_EDID_READ_DATA_MASK);
+ ediddata[0] = ast_get_index_reg(ast, AST_IO_VGACRI, 0xd8);
+ ediddata[1] = ast_get_index_reg(ast, AST_IO_VGACRI, 0xd9);
+ ediddata[2] = ast_get_index_reg(ast, AST_IO_VGACRI, 0xda);
+ ediddata[3] = ast_get_index_reg(ast, AST_IO_VGACRI, 0xdb);
if (i == 31) {
/*
@@ -96,69 +103,53 @@ int ast_astdp_read_edid(struct drm_device *dev, u8 *ediddata)
* The Bytes-126 indicates the Number of extensions to
* follow. 0 represents noextensions.
*/
- *(ediddata + 3) = *(ediddata + 3) + *(ediddata + 2);
- *(ediddata + 2) = 0;
+ ediddata[3] = ediddata[3] + ediddata[2];
+ ediddata[2] = 0;
}
- ediddata += 4;
+ memcpy(buf, ediddata, min((len - i), 4));
+ buf += 4;
}
- ast_set_index_reg_mask(ast, AST_IO_VGACRI, 0xE5, (u8) ~ASTDP_HOST_EDID_READ_DONE_MASK,
- ASTDP_HOST_EDID_READ_DONE);
+out:
+ /* Signal end of reading */
+ ast_set_index_reg_mask(ast, AST_IO_VGACRI, 0xe5, (u8)~AST_IO_VGACRE5_EDID_READ_DONE,
+ AST_IO_VGACRE5_EDID_READ_DONE);
- return 0;
+ mutex_unlock(&ast->modeset_lock);
-err_astdp_jump_out_loop_of_edid:
- ast_set_index_reg_mask(ast, AST_IO_VGACRI, 0xE5,
- (u8) ~ASTDP_HOST_EDID_READ_DONE_MASK,
- ASTDP_HOST_EDID_READ_DONE);
- return (~(j+256) + 1);
-
-err_astdp_edid_not_ready:
- if (!(ast_get_index_reg_mask(ast, AST_IO_VGACRI, 0xD1, ASTDP_MCU_FW_EXECUTING)))
- return (~0xD1 + 1);
- if (!(ast_get_index_reg_mask(ast, AST_IO_VGACRI, 0xDC, ASTDP_LINK_SUCCESS)))
- return (~0xDC + 1);
- if (!(ast_get_index_reg_mask(ast, AST_IO_VGACRI, 0xDF, ASTDP_HPD)))
- return (~0xDF + 1);
- if (!(ast_get_index_reg_mask(ast, AST_IO_VGACRI, 0xE5, ASTDP_HOST_EDID_READ_DONE_MASK)))
- return (~0xE5 + 1);
-
- return 0;
+ return ret;
}
/*
* Launch Aspeed DP
*/
-void ast_dp_launch(struct drm_device *dev)
+int ast_dp_launch(struct ast_device *ast)
{
- u32 i = 0;
- u8 bDPExecute = 1;
- struct ast_device *ast = to_ast_device(dev);
+ struct drm_device *dev = &ast->base;
+ unsigned int i = 10;
- // Wait one second then timeout.
- while (ast_get_index_reg_mask(ast, AST_IO_VGACRI, 0xD1, ASTDP_MCU_FW_EXECUTING) !=
- ASTDP_MCU_FW_EXECUTING) {
- i++;
- // wait 100 ms
- msleep(100);
+ while (i) {
+ u8 vgacrd1 = ast_get_index_reg(ast, AST_IO_VGACRI, 0xd1);
- if (i >= 10) {
- // DP would not be ready.
- bDPExecute = 0;
+ if (vgacrd1 & AST_IO_VGACRD1_MCU_FW_EXECUTING)
break;
- }
+ --i;
+ msleep(100);
}
-
- if (!bDPExecute)
+ if (!i) {
drm_err(dev, "Wait DPMCU executing timeout\n");
+ return -ENODEV;
+ }
- ast_set_index_reg_mask(ast, AST_IO_VGACRI, 0xE5,
- (u8) ~ASTDP_HOST_EDID_READ_DONE_MASK,
- ASTDP_HOST_EDID_READ_DONE);
+ ast_set_index_reg_mask(ast, AST_IO_VGACRI, 0xe5,
+ (u8) ~AST_IO_VGACRE5_EDID_READ_DONE,
+ AST_IO_VGACRE5_EDID_READ_DONE);
+
+ return 0;
}
-bool ast_dp_power_is_on(struct ast_device *ast)
+static bool ast_dp_power_is_on(struct ast_device *ast)
{
u8 vgacre3;
@@ -167,7 +158,7 @@ bool ast_dp_power_is_on(struct ast_device *ast)
return !(vgacre3 & AST_DP_PHY_SLEEP);
}
-void ast_dp_power_on_off(struct drm_device *dev, bool on)
+static void ast_dp_power_on_off(struct drm_device *dev, bool on)
{
struct ast_device *ast = to_ast_device(dev);
// Read and Turn off DP PHY sleep
@@ -179,11 +170,29 @@ void ast_dp_power_on_off(struct drm_device *dev, bool on)
// DP Power on/off
ast_set_index_reg_mask(ast, AST_IO_VGACRI, 0xE3, (u8) ~AST_DP_PHY_SLEEP, bE3);
+
+ msleep(50);
}
+static void ast_dp_link_training(struct ast_device *ast)
+{
+ struct drm_device *dev = &ast->base;
+ int i;
+
+ for (i = 0; i < 10; i++) {
+ u8 vgacrdc;
+
+ if (i)
+ msleep(100);
+ vgacrdc = ast_get_index_reg(ast, AST_IO_VGACRI, 0xdc);
+ if (vgacrdc & AST_IO_VGACRDC_LINK_SUCCESS)
+ return;
+ }
+ drm_err(dev, "Link training failed\n");
+}
-void ast_dp_set_on_off(struct drm_device *dev, bool on)
+static void ast_dp_set_on_off(struct drm_device *dev, bool on)
{
struct ast_device *ast = to_ast_device(dev);
u8 video_on_off = on;
@@ -192,21 +201,17 @@ void ast_dp_set_on_off(struct drm_device *dev, bool on)
// Video On/Off
ast_set_index_reg_mask(ast, AST_IO_VGACRI, 0xE3, (u8) ~AST_DP_VIDEO_ENABLE, on);
- // If DP plug in and link successful then check video on / off status
- if (ast_get_index_reg_mask(ast, AST_IO_VGACRI, 0xDC, ASTDP_LINK_SUCCESS) &&
- ast_get_index_reg_mask(ast, AST_IO_VGACRI, 0xDF, ASTDP_HPD)) {
- video_on_off <<= 4;
- while (ast_get_index_reg_mask(ast, AST_IO_VGACRI, 0xDF,
+ video_on_off <<= 4;
+ while (ast_get_index_reg_mask(ast, AST_IO_VGACRI, 0xDF,
ASTDP_MIRROR_VIDEO_ENABLE) != video_on_off) {
- // wait 1 ms
- mdelay(1);
- if (++i > 200)
- break;
- }
+ // wait 1 ms
+ mdelay(1);
+ if (++i > 200)
+ break;
}
}
-void ast_dp_set_mode(struct drm_crtc *crtc, struct ast_vbios_mode_info *vbios_mode)
+static void ast_dp_set_mode(struct drm_crtc *crtc, struct ast_vbios_mode_info *vbios_mode)
{
struct ast_device *ast = to_ast_device(crtc->dev);
@@ -279,3 +284,188 @@ void ast_dp_set_mode(struct drm_crtc *crtc, struct ast_vbios_mode_info *vbios_mo
ast_set_index_reg_mask(ast, AST_IO_VGACRI, 0xE1, ASTDP_AND_CLEAR_MASK, ASTDP_MISC1);
ast_set_index_reg_mask(ast, AST_IO_VGACRI, 0xE2, ASTDP_AND_CLEAR_MASK, ModeIdx);
}
+
+static void ast_wait_for_vretrace(struct ast_device *ast)
+{
+ unsigned long timeout = jiffies + HZ;
+ u8 vgair1;
+
+ do {
+ vgair1 = ast_io_read8(ast, AST_IO_VGAIR1_R);
+ } while (!(vgair1 & AST_IO_VGAIR1_VREFRESH) && time_before(jiffies, timeout));
+}
+
+/*
+ * Encoder
+ */
+
+static const struct drm_encoder_funcs ast_astdp_encoder_funcs = {
+ .destroy = drm_encoder_cleanup,
+};
+
+static void ast_astdp_encoder_helper_atomic_mode_set(struct drm_encoder *encoder,
+ struct drm_crtc_state *crtc_state,
+ struct drm_connector_state *conn_state)
+{
+ struct drm_crtc *crtc = crtc_state->crtc;
+ struct ast_crtc_state *ast_crtc_state = to_ast_crtc_state(crtc_state);
+ struct ast_vbios_mode_info *vbios_mode_info = &ast_crtc_state->vbios_mode_info;
+
+ ast_dp_set_mode(crtc, vbios_mode_info);
+}
+
+static void ast_astdp_encoder_helper_atomic_enable(struct drm_encoder *encoder,
+ struct drm_atomic_state *state)
+{
+ struct drm_device *dev = encoder->dev;
+ struct ast_device *ast = to_ast_device(dev);
+ struct ast_connector *ast_connector = &ast->output.astdp.connector;
+
+ if (ast_connector->physical_status == connector_status_connected) {
+ ast_dp_power_on_off(dev, AST_DP_POWER_ON);
+ ast_dp_link_training(ast);
+
+ ast_wait_for_vretrace(ast);
+ ast_dp_set_on_off(dev, 1);
+ }
+}
+
+static void ast_astdp_encoder_helper_atomic_disable(struct drm_encoder *encoder,
+ struct drm_atomic_state *state)
+{
+ struct drm_device *dev = encoder->dev;
+
+ ast_dp_set_on_off(dev, 0);
+ ast_dp_power_on_off(dev, AST_DP_POWER_OFF);
+}
+
+static const struct drm_encoder_helper_funcs ast_astdp_encoder_helper_funcs = {
+ .atomic_mode_set = ast_astdp_encoder_helper_atomic_mode_set,
+ .atomic_enable = ast_astdp_encoder_helper_atomic_enable,
+ .atomic_disable = ast_astdp_encoder_helper_atomic_disable,
+};
+
+/*
+ * Connector
+ */
+
+static int ast_astdp_connector_helper_get_modes(struct drm_connector *connector)
+{
+ struct ast_connector *ast_connector = to_ast_connector(connector);
+ int count;
+
+ if (ast_connector->physical_status == connector_status_connected) {
+ struct ast_device *ast = to_ast_device(connector->dev);
+ const struct drm_edid *drm_edid;
+
+ drm_edid = drm_edid_read_custom(connector, ast_astdp_read_edid_block, ast);
+ drm_edid_connector_update(connector, drm_edid);
+ count = drm_edid_connector_add_modes(connector);
+ drm_edid_free(drm_edid);
+ } else {
+ drm_edid_connector_update(connector, NULL);
+
+ /*
+ * There's no EDID data without a connected monitor. Set BMC-
+ * compatible modes in this case. The XGA default resolution
+ * should work well for all BMCs.
+ */
+ count = drm_add_modes_noedid(connector, 4096, 4096);
+ if (count)
+ drm_set_preferred_mode(connector, 1024, 768);
+ }
+
+ return count;
+}
+
+static int ast_astdp_connector_helper_detect_ctx(struct drm_connector *connector,
+ struct drm_modeset_acquire_ctx *ctx,
+ bool force)
+{
+ struct ast_connector *ast_connector = to_ast_connector(connector);
+ struct drm_device *dev = connector->dev;
+ struct ast_device *ast = to_ast_device(connector->dev);
+ enum drm_connector_status status = connector_status_disconnected;
+ bool power_is_on;
+
+ mutex_lock(&ast->modeset_lock);
+
+ power_is_on = ast_dp_power_is_on(ast);
+ if (!power_is_on)
+ ast_dp_power_on_off(dev, true);
+
+ if (ast_astdp_is_connected(ast))
+ status = connector_status_connected;
+
+ if (!power_is_on && status == connector_status_disconnected)
+ ast_dp_power_on_off(dev, false);
+
+ mutex_unlock(&ast->modeset_lock);
+
+ if (status != ast_connector->physical_status)
+ ++connector->epoch_counter;
+ ast_connector->physical_status = status;
+
+ return connector_status_connected;
+}
+
+static const struct drm_connector_helper_funcs ast_astdp_connector_helper_funcs = {
+ .get_modes = ast_astdp_connector_helper_get_modes,
+ .detect_ctx = ast_astdp_connector_helper_detect_ctx,
+};
+
+static const struct drm_connector_funcs ast_astdp_connector_funcs = {
+ .reset = drm_atomic_helper_connector_reset,
+ .fill_modes = drm_helper_probe_single_connector_modes,
+ .destroy = drm_connector_cleanup,
+ .atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state,
+ .atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
+};
+
+static int ast_astdp_connector_init(struct drm_device *dev, struct drm_connector *connector)
+{
+ int ret;
+
+ ret = drm_connector_init(dev, connector, &ast_astdp_connector_funcs,
+ DRM_MODE_CONNECTOR_DisplayPort);
+ if (ret)
+ return ret;
+
+ drm_connector_helper_add(connector, &ast_astdp_connector_helper_funcs);
+
+ connector->interlace_allowed = 0;
+ connector->doublescan_allowed = 0;
+
+ connector->polled = DRM_CONNECTOR_POLL_CONNECT | DRM_CONNECTOR_POLL_DISCONNECT;
+
+ return 0;
+}
+
+int ast_astdp_output_init(struct ast_device *ast)
+{
+ struct drm_device *dev = &ast->base;
+ struct drm_crtc *crtc = &ast->crtc;
+ struct drm_encoder *encoder = &ast->output.astdp.encoder;
+ struct ast_connector *ast_connector = &ast->output.astdp.connector;
+ struct drm_connector *connector = &ast_connector->base;
+ int ret;
+
+ ret = drm_encoder_init(dev, encoder, &ast_astdp_encoder_funcs,
+ DRM_MODE_ENCODER_TMDS, NULL);
+ if (ret)
+ return ret;
+ drm_encoder_helper_add(encoder, &ast_astdp_encoder_helper_funcs);
+
+ encoder->possible_crtcs = drm_crtc_mask(crtc);
+
+ ret = ast_astdp_connector_init(dev, connector);
+ if (ret)
+ return ret;
+ ast_connector->physical_status = connector->status;
+
+ ret = drm_connector_attach_encoder(connector, encoder);
+ if (ret)
+ return ret;
+
+ return 0;
+}
diff --git a/drivers/gpu/drm/ast/ast_dp501.c b/drivers/gpu/drm/ast/ast_dp501.c
index 9a4c3a0963f9..e4c636f45082 100644
--- a/drivers/gpu/drm/ast/ast_dp501.c
+++ b/drivers/gpu/drm/ast/ast_dp501.c
@@ -4,6 +4,11 @@
#include <linux/firmware.h>
#include <linux/module.h>
+#include <drm/drm_atomic_state_helper.h>
+#include <drm/drm_edid.h>
+#include <drm/drm_modeset_helper_vtables.h>
+#include <drm/drm_probe_helper.h>
+
#include "ast_drv.h"
MODULE_FIRMWARE("ast_dp501_fw.bin");
@@ -170,7 +175,7 @@ static void clear_cmd(struct ast_device *ast)
}
#endif
-void ast_set_dp501_video_output(struct drm_device *dev, u8 mode)
+static void ast_set_dp501_video_output(struct drm_device *dev, u8 mode)
{
ast_write_cmd(dev, 0x40);
ast_write_data(dev, mode);
@@ -272,7 +277,7 @@ static bool ast_launch_m68k(struct drm_device *dev)
return true;
}
-bool ast_dp501_is_connected(struct ast_device *ast)
+static bool ast_dp501_is_connected(struct ast_device *ast)
{
u32 boot_address, offset, data;
@@ -313,32 +318,30 @@ bool ast_dp501_is_connected(struct ast_device *ast)
return true;
}
-bool ast_dp501_read_edid(struct drm_device *dev, u8 *ediddata)
+static int ast_dp512_read_edid_block(void *data, u8 *buf, unsigned int block, size_t len)
{
- struct ast_device *ast = to_ast_device(dev);
- u32 i, boot_address, offset, data;
- u32 *pEDIDidx;
+ struct ast_device *ast = data;
+ size_t rdlen = round_up(len, 4);
+ u32 i, boot_address, offset, ediddata;
- if (!ast_dp501_is_connected(ast))
- return false;
+ if (block > (512 / EDID_LENGTH))
+ return -EIO;
+
+ offset = AST_DP501_EDID_DATA + block * EDID_LENGTH;
if (ast->config_mode == ast_use_p2a) {
boot_address = get_fw_base(ast);
- /* Read EDID */
- offset = AST_DP501_EDID_DATA;
- for (i = 0; i < 128; i += 4) {
- data = ast_mindwm(ast, boot_address + offset + i);
- pEDIDidx = (u32 *)(ediddata + i);
- *pEDIDidx = data;
+ for (i = 0; i < rdlen; i += 4) {
+ ediddata = ast_mindwm(ast, boot_address + offset + i);
+ memcpy(buf, &ediddata, min((len - i), 4));
+ buf += 4;
}
} else {
- /* Read EDID */
- offset = AST_DP501_EDID_DATA;
- for (i = 0; i < 128; i += 4) {
- data = readl(ast->dp501_fw_buf + offset + i);
- pEDIDidx = (u32 *)(ediddata + i);
- *pEDIDidx = data;
+ for (i = 0; i < rdlen; i += 4) {
+ ediddata = readl(ast->dp501_fw_buf + offset + i);
+ memcpy(buf, &ediddata, min((len - i), 4));
+ buf += 4;
}
}
@@ -470,3 +473,144 @@ void ast_init_3rdtx(struct drm_device *dev)
}
}
}
+
+/*
+ * Encoder
+ */
+
+static const struct drm_encoder_funcs ast_dp501_encoder_funcs = {
+ .destroy = drm_encoder_cleanup,
+};
+
+static void ast_dp501_encoder_helper_atomic_enable(struct drm_encoder *encoder,
+ struct drm_atomic_state *state)
+{
+ struct drm_device *dev = encoder->dev;
+
+ ast_set_dp501_video_output(dev, 1);
+}
+
+static void ast_dp501_encoder_helper_atomic_disable(struct drm_encoder *encoder,
+ struct drm_atomic_state *state)
+{
+ struct drm_device *dev = encoder->dev;
+
+ ast_set_dp501_video_output(dev, 0);
+}
+
+static const struct drm_encoder_helper_funcs ast_dp501_encoder_helper_funcs = {
+ .atomic_enable = ast_dp501_encoder_helper_atomic_enable,
+ .atomic_disable = ast_dp501_encoder_helper_atomic_disable,
+};
+
+/*
+ * Connector
+ */
+
+static int ast_dp501_connector_helper_get_modes(struct drm_connector *connector)
+{
+ struct ast_connector *ast_connector = to_ast_connector(connector);
+ int count;
+
+ if (ast_connector->physical_status == connector_status_connected) {
+ struct ast_device *ast = to_ast_device(connector->dev);
+ const struct drm_edid *drm_edid;
+
+ drm_edid = drm_edid_read_custom(connector, ast_dp512_read_edid_block, ast);
+ drm_edid_connector_update(connector, drm_edid);
+ count = drm_edid_connector_add_modes(connector);
+ drm_edid_free(drm_edid);
+ } else {
+ drm_edid_connector_update(connector, NULL);
+
+ /*
+ * There's no EDID data without a connected monitor. Set BMC-
+ * compatible modes in this case. The XGA default resolution
+ * should work well for all BMCs.
+ */
+ count = drm_add_modes_noedid(connector, 4096, 4096);
+ if (count)
+ drm_set_preferred_mode(connector, 1024, 768);
+ }
+
+ return count;
+}
+
+static int ast_dp501_connector_helper_detect_ctx(struct drm_connector *connector,
+ struct drm_modeset_acquire_ctx *ctx,
+ bool force)
+{
+ struct ast_connector *ast_connector = to_ast_connector(connector);
+ struct ast_device *ast = to_ast_device(connector->dev);
+ enum drm_connector_status status = connector_status_disconnected;
+
+ if (ast_dp501_is_connected(ast))
+ status = connector_status_connected;
+
+ if (status != ast_connector->physical_status)
+ ++connector->epoch_counter;
+ ast_connector->physical_status = status;
+
+ return connector_status_connected;
+}
+
+static const struct drm_connector_helper_funcs ast_dp501_connector_helper_funcs = {
+ .get_modes = ast_dp501_connector_helper_get_modes,
+ .detect_ctx = ast_dp501_connector_helper_detect_ctx,
+};
+
+static const struct drm_connector_funcs ast_dp501_connector_funcs = {
+ .reset = drm_atomic_helper_connector_reset,
+ .fill_modes = drm_helper_probe_single_connector_modes,
+ .destroy = drm_connector_cleanup,
+ .atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state,
+ .atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
+};
+
+static int ast_dp501_connector_init(struct drm_device *dev, struct drm_connector *connector)
+{
+ int ret;
+
+ ret = drm_connector_init(dev, connector, &ast_dp501_connector_funcs,
+ DRM_MODE_CONNECTOR_DisplayPort);
+ if (ret)
+ return ret;
+
+ drm_connector_helper_add(connector, &ast_dp501_connector_helper_funcs);
+
+ connector->interlace_allowed = 0;
+ connector->doublescan_allowed = 0;
+
+ connector->polled = DRM_CONNECTOR_POLL_CONNECT | DRM_CONNECTOR_POLL_DISCONNECT;
+
+ return 0;
+}
+
+int ast_dp501_output_init(struct ast_device *ast)
+{
+ struct drm_device *dev = &ast->base;
+ struct drm_crtc *crtc = &ast->crtc;
+ struct drm_encoder *encoder = &ast->output.dp501.encoder;
+ struct ast_connector *ast_connector = &ast->output.dp501.connector;
+ struct drm_connector *connector = &ast_connector->base;
+ int ret;
+
+ ret = drm_encoder_init(dev, encoder, &ast_dp501_encoder_funcs,
+ DRM_MODE_ENCODER_TMDS, NULL);
+ if (ret)
+ return ret;
+ drm_encoder_helper_add(encoder, &ast_dp501_encoder_helper_funcs);
+
+ encoder->possible_crtcs = drm_crtc_mask(crtc);
+
+ ret = ast_dp501_connector_init(dev, connector);
+ if (ret)
+ return ret;
+ ast_connector->physical_status = connector->status;
+
+ ret = drm_connector_attach_encoder(connector, encoder);
+ if (ret)
+ return ret;
+
+ return 0;
+}
diff --git a/drivers/gpu/drm/ast/ast_drv.c b/drivers/gpu/drm/ast/ast_drv.c
index 225817087b4d..3a908bb015fe 100644
--- a/drivers/gpu/drm/ast/ast_drv.c
+++ b/drivers/gpu/drm/ast/ast_drv.c
@@ -287,9 +287,9 @@ static int ast_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
if (ret)
return ret;
- regs = pcim_iomap(pdev, 1, 0);
- if (!regs)
- return -EIO;
+ regs = pcim_iomap_region(pdev, 1, "ast");
+ if (IS_ERR(regs))
+ return PTR_ERR(regs);
if (pdev->revision >= 0x40) {
/*
@@ -311,9 +311,9 @@ static int ast_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
if (len < AST_IO_MM_LENGTH)
return -EIO;
- ioregs = pcim_iomap(pdev, 2, 0);
- if (!ioregs)
- return -EIO;
+ ioregs = pcim_iomap_region(pdev, 2, "ast");
+ if (IS_ERR(ioregs))
+ return PTR_ERR(ioregs);
} else {
/*
* Anything else is best effort.
diff --git a/drivers/gpu/drm/ast/ast_drv.h b/drivers/gpu/drm/ast/ast_drv.h
index 47bab5596c16..91fe07cf7b07 100644
--- a/drivers/gpu/drm/ast/ast_drv.h
+++ b/drivers/gpu/drm/ast/ast_drv.h
@@ -147,18 +147,19 @@ static inline struct ast_plane *to_ast_plane(struct drm_plane *plane)
}
/*
- * BMC
+ * Connector
*/
-struct ast_bmc_connector {
+struct ast_connector {
struct drm_connector base;
- struct drm_connector *physical_connector;
+
+ enum drm_connector_status physical_status;
};
-static inline struct ast_bmc_connector *
-to_ast_bmc_connector(struct drm_connector *connector)
+static inline struct ast_connector *
+to_ast_connector(struct drm_connector *connector)
{
- return container_of(connector, struct ast_bmc_connector, base);
+ return container_of(connector, struct ast_connector, base);
}
/*
@@ -192,24 +193,20 @@ struct ast_device {
struct {
struct {
struct drm_encoder encoder;
- struct drm_connector connector;
+ struct ast_connector connector;
} vga;
struct {
struct drm_encoder encoder;
- struct drm_connector connector;
+ struct ast_connector connector;
} sil164;
struct {
struct drm_encoder encoder;
- struct drm_connector connector;
+ struct ast_connector connector;
} dp501;
struct {
struct drm_encoder encoder;
- struct drm_connector connector;
+ struct ast_connector connector;
} astdp;
- struct {
- struct drm_encoder encoder;
- struct ast_bmc_connector bmc_connector;
- } bmc;
} output;
bool support_wide_screen;
@@ -460,21 +457,17 @@ void ast_post_gpu(struct drm_device *dev);
u32 ast_mindwm(struct ast_device *ast, u32 r);
void ast_moutdwm(struct ast_device *ast, u32 r, u32 v);
void ast_patch_ahb_2500(void __iomem *regs);
+
+int ast_vga_output_init(struct ast_device *ast);
+int ast_sil164_output_init(struct ast_device *ast);
+
/* ast dp501 */
-void ast_set_dp501_video_output(struct drm_device *dev, u8 mode);
bool ast_backup_fw(struct drm_device *dev, u8 *addr, u32 size);
-bool ast_dp501_is_connected(struct ast_device *ast);
-bool ast_dp501_read_edid(struct drm_device *dev, u8 *ediddata);
-u8 ast_get_dp501_max_clk(struct drm_device *dev);
void ast_init_3rdtx(struct drm_device *dev);
+int ast_dp501_output_init(struct ast_device *ast);
/* aspeed DP */
-bool ast_astdp_is_connected(struct ast_device *ast);
-int ast_astdp_read_edid(struct drm_device *dev, u8 *ediddata);
-void ast_dp_launch(struct drm_device *dev);
-bool ast_dp_power_is_on(struct ast_device *ast);
-void ast_dp_power_on_off(struct drm_device *dev, bool no);
-void ast_dp_set_on_off(struct drm_device *dev, bool no);
-void ast_dp_set_mode(struct drm_crtc *crtc, struct ast_vbios_mode_info *vbios_mode);
+int ast_dp_launch(struct ast_device *ast);
+int ast_astdp_output_init(struct ast_device *ast);
#endif
diff --git a/drivers/gpu/drm/ast/ast_main.c b/drivers/gpu/drm/ast/ast_main.c
index 0637abb70361..d836f2a4f9f3 100644
--- a/drivers/gpu/drm/ast/ast_main.c
+++ b/drivers/gpu/drm/ast/ast_main.c
@@ -115,8 +115,10 @@ static void ast_detect_tx_chip(struct ast_device *ast, bool need_post)
} else if (IS_AST_GEN7(ast)) {
if (ast_get_index_reg_mask(ast, AST_IO_VGACRI, 0xD1, TX_TYPE_MASK) ==
ASTDP_DPMCU_TX) {
- ast->tx_chip_types = AST_TX_ASTDP_BIT;
- ast_dp_launch(&ast->base);
+ int ret = ast_dp_launch(ast);
+
+ if (!ret)
+ ast->tx_chip_types = AST_TX_ASTDP_BIT;
}
}
diff --git a/drivers/gpu/drm/ast/ast_mode.c b/drivers/gpu/drm/ast/ast_mode.c
index 049ee1477c33..ed496fb32bf3 100644
--- a/drivers/gpu/drm/ast/ast_mode.c
+++ b/drivers/gpu/drm/ast/ast_mode.c
@@ -34,10 +34,8 @@
#include <drm/drm_atomic.h>
#include <drm/drm_atomic_helper.h>
-#include <drm/drm_atomic_state_helper.h>
#include <drm/drm_crtc.h>
#include <drm/drm_damage_helper.h>
-#include <drm/drm_edid.h>
#include <drm/drm_format_helper.h>
#include <drm/drm_fourcc.h>
#include <drm/drm_gem_atomic_helper.h>
@@ -47,7 +45,6 @@
#include <drm/drm_panic.h>
#include <drm/drm_probe_helper.h>
-#include "ast_ddc.h"
#include "ast_drv.h"
#include "ast_tables.h"
@@ -1311,571 +1308,6 @@ static int ast_crtc_init(struct drm_device *dev)
}
/*
- * VGA Encoder
- */
-
-static const struct drm_encoder_funcs ast_vga_encoder_funcs = {
- .destroy = drm_encoder_cleanup,
-};
-
-/*
- * VGA Connector
- */
-
-static const struct drm_connector_helper_funcs ast_vga_connector_helper_funcs = {
- .get_modes = drm_connector_helper_get_modes,
- .detect_ctx = drm_connector_helper_detect_from_ddc,
-};
-
-static const struct drm_connector_funcs ast_vga_connector_funcs = {
- .reset = drm_atomic_helper_connector_reset,
- .fill_modes = drm_helper_probe_single_connector_modes,
- .destroy = drm_connector_cleanup,
- .atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state,
- .atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
-};
-
-static int ast_vga_connector_init(struct drm_device *dev, struct drm_connector *connector)
-{
- struct ast_device *ast = to_ast_device(dev);
- struct i2c_adapter *ddc;
- int ret;
-
- ddc = ast_ddc_create(ast);
- if (IS_ERR(ddc)) {
- ret = PTR_ERR(ddc);
- drm_err(dev, "failed to add DDC bus for connector; ret=%d\n", ret);
- return ret;
- }
-
- ret = drm_connector_init_with_ddc(dev, connector, &ast_vga_connector_funcs,
- DRM_MODE_CONNECTOR_VGA, ddc);
- if (ret)
- return ret;
-
- drm_connector_helper_add(connector, &ast_vga_connector_helper_funcs);
-
- connector->interlace_allowed = 0;
- connector->doublescan_allowed = 0;
-
- connector->polled = DRM_CONNECTOR_POLL_CONNECT | DRM_CONNECTOR_POLL_DISCONNECT;
-
- return 0;
-}
-
-static int ast_vga_output_init(struct ast_device *ast)
-{
- struct drm_device *dev = &ast->base;
- struct drm_crtc *crtc = &ast->crtc;
- struct drm_encoder *encoder = &ast->output.vga.encoder;
- struct drm_connector *connector = &ast->output.vga.connector;
- int ret;
-
- ret = drm_encoder_init(dev, encoder, &ast_vga_encoder_funcs,
- DRM_MODE_ENCODER_DAC, NULL);
- if (ret)
- return ret;
- encoder->possible_crtcs = drm_crtc_mask(crtc);
-
- ret = ast_vga_connector_init(dev, connector);
- if (ret)
- return ret;
-
- ret = drm_connector_attach_encoder(connector, encoder);
- if (ret)
- return ret;
-
- return 0;
-}
-
-/*
- * SIL164 Encoder
- */
-
-static const struct drm_encoder_funcs ast_sil164_encoder_funcs = {
- .destroy = drm_encoder_cleanup,
-};
-
-/*
- * SIL164 Connector
- */
-
-static const struct drm_connector_helper_funcs ast_sil164_connector_helper_funcs = {
- .get_modes = drm_connector_helper_get_modes,
- .detect_ctx = drm_connector_helper_detect_from_ddc,
-};
-
-static const struct drm_connector_funcs ast_sil164_connector_funcs = {
- .reset = drm_atomic_helper_connector_reset,
- .fill_modes = drm_helper_probe_single_connector_modes,
- .destroy = drm_connector_cleanup,
- .atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state,
- .atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
-};
-
-static int ast_sil164_connector_init(struct drm_device *dev, struct drm_connector *connector)
-{
- struct ast_device *ast = to_ast_device(dev);
- struct i2c_adapter *ddc;
- int ret;
-
- ddc = ast_ddc_create(ast);
- if (IS_ERR(ddc)) {
- ret = PTR_ERR(ddc);
- drm_err(dev, "failed to add DDC bus for connector; ret=%d\n", ret);
- return ret;
- }
-
- ret = drm_connector_init_with_ddc(dev, connector, &ast_sil164_connector_funcs,
- DRM_MODE_CONNECTOR_DVII, ddc);
- if (ret)
- return ret;
-
- drm_connector_helper_add(connector, &ast_sil164_connector_helper_funcs);
-
- connector->interlace_allowed = 0;
- connector->doublescan_allowed = 0;
-
- connector->polled = DRM_CONNECTOR_POLL_CONNECT | DRM_CONNECTOR_POLL_DISCONNECT;
-
- return 0;
-}
-
-static int ast_sil164_output_init(struct ast_device *ast)
-{
- struct drm_device *dev = &ast->base;
- struct drm_crtc *crtc = &ast->crtc;
- struct drm_encoder *encoder = &ast->output.sil164.encoder;
- struct drm_connector *connector = &ast->output.sil164.connector;
- int ret;
-
- ret = drm_encoder_init(dev, encoder, &ast_sil164_encoder_funcs,
- DRM_MODE_ENCODER_TMDS, NULL);
- if (ret)
- return ret;
- encoder->possible_crtcs = drm_crtc_mask(crtc);
-
- ret = ast_sil164_connector_init(dev, connector);
- if (ret)
- return ret;
-
- ret = drm_connector_attach_encoder(connector, encoder);
- if (ret)
- return ret;
-
- return 0;
-}
-
-/*
- * DP501 Encoder
- */
-
-static const struct drm_encoder_funcs ast_dp501_encoder_funcs = {
- .destroy = drm_encoder_cleanup,
-};
-
-static void ast_dp501_encoder_helper_atomic_enable(struct drm_encoder *encoder,
- struct drm_atomic_state *state)
-{
- struct drm_device *dev = encoder->dev;
-
- ast_set_dp501_video_output(dev, 1);
-}
-
-static void ast_dp501_encoder_helper_atomic_disable(struct drm_encoder *encoder,
- struct drm_atomic_state *state)
-{
- struct drm_device *dev = encoder->dev;
-
- ast_set_dp501_video_output(dev, 0);
-}
-
-static const struct drm_encoder_helper_funcs ast_dp501_encoder_helper_funcs = {
- .atomic_enable = ast_dp501_encoder_helper_atomic_enable,
- .atomic_disable = ast_dp501_encoder_helper_atomic_disable,
-};
-
-/*
- * DP501 Connector
- */
-
-static int ast_dp501_connector_helper_get_modes(struct drm_connector *connector)
-{
- void *edid;
- bool succ;
- int count;
-
- edid = kmalloc(EDID_LENGTH, GFP_KERNEL);
- if (!edid)
- goto err_drm_connector_update_edid_property;
-
- succ = ast_dp501_read_edid(connector->dev, edid);
- if (!succ)
- goto err_kfree;
-
- drm_connector_update_edid_property(connector, edid);
- count = drm_add_edid_modes(connector, edid);
- kfree(edid);
-
- return count;
-
-err_kfree:
- kfree(edid);
-err_drm_connector_update_edid_property:
- drm_connector_update_edid_property(connector, NULL);
- return 0;
-}
-
-static int ast_dp501_connector_helper_detect_ctx(struct drm_connector *connector,
- struct drm_modeset_acquire_ctx *ctx,
- bool force)
-{
- struct ast_device *ast = to_ast_device(connector->dev);
-
- if (ast_dp501_is_connected(ast))
- return connector_status_connected;
- return connector_status_disconnected;
-}
-
-static const struct drm_connector_helper_funcs ast_dp501_connector_helper_funcs = {
- .get_modes = ast_dp501_connector_helper_get_modes,
- .detect_ctx = ast_dp501_connector_helper_detect_ctx,
-};
-
-static const struct drm_connector_funcs ast_dp501_connector_funcs = {
- .reset = drm_atomic_helper_connector_reset,
- .fill_modes = drm_helper_probe_single_connector_modes,
- .destroy = drm_connector_cleanup,
- .atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state,
- .atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
-};
-
-static int ast_dp501_connector_init(struct drm_device *dev, struct drm_connector *connector)
-{
- int ret;
-
- ret = drm_connector_init(dev, connector, &ast_dp501_connector_funcs,
- DRM_MODE_CONNECTOR_DisplayPort);
- if (ret)
- return ret;
-
- drm_connector_helper_add(connector, &ast_dp501_connector_helper_funcs);
-
- connector->interlace_allowed = 0;
- connector->doublescan_allowed = 0;
-
- connector->polled = DRM_CONNECTOR_POLL_CONNECT | DRM_CONNECTOR_POLL_DISCONNECT;
-
- return 0;
-}
-
-static int ast_dp501_output_init(struct ast_device *ast)
-{
- struct drm_device *dev = &ast->base;
- struct drm_crtc *crtc = &ast->crtc;
- struct drm_encoder *encoder = &ast->output.dp501.encoder;
- struct drm_connector *connector = &ast->output.dp501.connector;
- int ret;
-
- ret = drm_encoder_init(dev, encoder, &ast_dp501_encoder_funcs,
- DRM_MODE_ENCODER_TMDS, NULL);
- if (ret)
- return ret;
- drm_encoder_helper_add(encoder, &ast_dp501_encoder_helper_funcs);
-
- encoder->possible_crtcs = drm_crtc_mask(crtc);
-
- ret = ast_dp501_connector_init(dev, connector);
- if (ret)
- return ret;
-
- ret = drm_connector_attach_encoder(connector, encoder);
- if (ret)
- return ret;
-
- return 0;
-}
-
-/*
- * ASPEED Display-Port Encoder
- */
-
-static const struct drm_encoder_funcs ast_astdp_encoder_funcs = {
- .destroy = drm_encoder_cleanup,
-};
-
-static void ast_astdp_encoder_helper_atomic_mode_set(struct drm_encoder *encoder,
- struct drm_crtc_state *crtc_state,
- struct drm_connector_state *conn_state)
-{
- struct drm_crtc *crtc = crtc_state->crtc;
- struct ast_crtc_state *ast_crtc_state = to_ast_crtc_state(crtc_state);
- struct ast_vbios_mode_info *vbios_mode_info = &ast_crtc_state->vbios_mode_info;
-
- ast_dp_set_mode(crtc, vbios_mode_info);
-}
-
-static void ast_astdp_encoder_helper_atomic_enable(struct drm_encoder *encoder,
- struct drm_atomic_state *state)
-{
- struct drm_device *dev = encoder->dev;
- struct ast_device *ast = to_ast_device(dev);
-
- ast_dp_power_on_off(dev, AST_DP_POWER_ON);
- ast_wait_for_vretrace(ast);
- ast_dp_set_on_off(dev, 1);
-}
-
-static void ast_astdp_encoder_helper_atomic_disable(struct drm_encoder *encoder,
- struct drm_atomic_state *state)
-{
- struct drm_device *dev = encoder->dev;
-
- ast_dp_set_on_off(dev, 0);
- ast_dp_power_on_off(dev, AST_DP_POWER_OFF);
-}
-
-static const struct drm_encoder_helper_funcs ast_astdp_encoder_helper_funcs = {
- .atomic_mode_set = ast_astdp_encoder_helper_atomic_mode_set,
- .atomic_enable = ast_astdp_encoder_helper_atomic_enable,
- .atomic_disable = ast_astdp_encoder_helper_atomic_disable,
-};
-
-/*
- * ASPEED Display-Port Connector
- */
-
-static int ast_astdp_connector_helper_get_modes(struct drm_connector *connector)
-{
- void *edid;
- struct drm_device *dev = connector->dev;
- struct ast_device *ast = to_ast_device(dev);
-
- int succ;
- int count;
-
- edid = kmalloc(EDID_LENGTH, GFP_KERNEL);
- if (!edid)
- goto err_drm_connector_update_edid_property;
-
- /*
- * Protect access to I/O registers from concurrent modesetting
- * by acquiring the I/O-register lock.
- */
- mutex_lock(&ast->modeset_lock);
-
- succ = ast_astdp_read_edid(connector->dev, edid);
- if (succ < 0)
- goto err_mutex_unlock;
-
- mutex_unlock(&ast->modeset_lock);
-
- drm_connector_update_edid_property(connector, edid);
- count = drm_add_edid_modes(connector, edid);
- kfree(edid);
-
- return count;
-
-err_mutex_unlock:
- mutex_unlock(&ast->modeset_lock);
- kfree(edid);
-err_drm_connector_update_edid_property:
- drm_connector_update_edid_property(connector, NULL);
- return 0;
-}
-
-static int ast_astdp_connector_helper_detect_ctx(struct drm_connector *connector,
- struct drm_modeset_acquire_ctx *ctx,
- bool force)
-{
- struct drm_device *dev = connector->dev;
- struct ast_device *ast = to_ast_device(connector->dev);
- enum drm_connector_status status = connector_status_disconnected;
- struct drm_connector_state *connector_state = connector->state;
- bool is_active = false;
-
- mutex_lock(&ast->modeset_lock);
-
- if (connector_state && connector_state->crtc) {
- struct drm_crtc_state *crtc_state = connector_state->crtc->state;
-
- if (crtc_state && crtc_state->active)
- is_active = true;
- }
-
- if (!is_active && !ast_dp_power_is_on(ast)) {
- ast_dp_power_on_off(dev, true);
- msleep(50);
- }
-
- if (ast_astdp_is_connected(ast))
- status = connector_status_connected;
-
- if (!is_active && status == connector_status_disconnected)
- ast_dp_power_on_off(dev, false);
-
- mutex_unlock(&ast->modeset_lock);
-
- return status;
-}
-
-static const struct drm_connector_helper_funcs ast_astdp_connector_helper_funcs = {
- .get_modes = ast_astdp_connector_helper_get_modes,
- .detect_ctx = ast_astdp_connector_helper_detect_ctx,
-};
-
-static const struct drm_connector_funcs ast_astdp_connector_funcs = {
- .reset = drm_atomic_helper_connector_reset,
- .fill_modes = drm_helper_probe_single_connector_modes,
- .destroy = drm_connector_cleanup,
- .atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state,
- .atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
-};
-
-static int ast_astdp_connector_init(struct drm_device *dev, struct drm_connector *connector)
-{
- int ret;
-
- ret = drm_connector_init(dev, connector, &ast_astdp_connector_funcs,
- DRM_MODE_CONNECTOR_DisplayPort);
- if (ret)
- return ret;
-
- drm_connector_helper_add(connector, &ast_astdp_connector_helper_funcs);
-
- connector->interlace_allowed = 0;
- connector->doublescan_allowed = 0;
-
- connector->polled = DRM_CONNECTOR_POLL_CONNECT | DRM_CONNECTOR_POLL_DISCONNECT;
-
- return 0;
-}
-
-static int ast_astdp_output_init(struct ast_device *ast)
-{
- struct drm_device *dev = &ast->base;
- struct drm_crtc *crtc = &ast->crtc;
- struct drm_encoder *encoder = &ast->output.astdp.encoder;
- struct drm_connector *connector = &ast->output.astdp.connector;
- int ret;
-
- ret = drm_encoder_init(dev, encoder, &ast_astdp_encoder_funcs,
- DRM_MODE_ENCODER_TMDS, NULL);
- if (ret)
- return ret;
- drm_encoder_helper_add(encoder, &ast_astdp_encoder_helper_funcs);
-
- encoder->possible_crtcs = drm_crtc_mask(crtc);
-
- ret = ast_astdp_connector_init(dev, connector);
- if (ret)
- return ret;
-
- ret = drm_connector_attach_encoder(connector, encoder);
- if (ret)
- return ret;
-
- return 0;
-}
-
-/*
- * BMC virtual Connector
- */
-
-static const struct drm_encoder_funcs ast_bmc_encoder_funcs = {
- .destroy = drm_encoder_cleanup,
-};
-
-static int ast_bmc_connector_helper_detect_ctx(struct drm_connector *connector,
- struct drm_modeset_acquire_ctx *ctx,
- bool force)
-{
- struct ast_bmc_connector *bmc_connector = to_ast_bmc_connector(connector);
- struct drm_connector *physical_connector = bmc_connector->physical_connector;
-
- /*
- * Most user-space compositors cannot handle more than one connected
- * connector per CRTC. Hence, we only mark the BMC as connected if the
- * physical connector is disconnected. If the physical connector's status
- * is connected or unknown, the BMC remains disconnected. This has no
- * effect on the output of the BMC.
- *
- * FIXME: Remove this logic once user-space compositors can handle more
- * than one connector per CRTC. The BMC should always be connected.
- */
-
- if (physical_connector && physical_connector->status == connector_status_disconnected)
- return connector_status_connected;
-
- return connector_status_disconnected;
-}
-
-static int ast_bmc_connector_helper_get_modes(struct drm_connector *connector)
-{
- return drm_add_modes_noedid(connector, 4096, 4096);
-}
-
-static const struct drm_connector_helper_funcs ast_bmc_connector_helper_funcs = {
- .get_modes = ast_bmc_connector_helper_get_modes,
- .detect_ctx = ast_bmc_connector_helper_detect_ctx,
-};
-
-static const struct drm_connector_funcs ast_bmc_connector_funcs = {
- .reset = drm_atomic_helper_connector_reset,
- .fill_modes = drm_helper_probe_single_connector_modes,
- .destroy = drm_connector_cleanup,
- .atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state,
- .atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
-};
-
-static int ast_bmc_connector_init(struct drm_device *dev,
- struct ast_bmc_connector *bmc_connector,
- struct drm_connector *physical_connector)
-{
- struct drm_connector *connector = &bmc_connector->base;
- int ret;
-
- ret = drm_connector_init(dev, connector, &ast_bmc_connector_funcs,
- DRM_MODE_CONNECTOR_VIRTUAL);
- if (ret)
- return ret;
-
- drm_connector_helper_add(connector, &ast_bmc_connector_helper_funcs);
-
- bmc_connector->physical_connector = physical_connector;
-
- return 0;
-}
-
-static int ast_bmc_output_init(struct ast_device *ast,
- struct drm_connector *physical_connector)
-{
- struct drm_device *dev = &ast->base;
- struct drm_crtc *crtc = &ast->crtc;
- struct drm_encoder *encoder = &ast->output.bmc.encoder;
- struct ast_bmc_connector *bmc_connector = &ast->output.bmc.bmc_connector;
- struct drm_connector *connector = &bmc_connector->base;
- int ret;
-
- ret = drm_encoder_init(dev, encoder,
- &ast_bmc_encoder_funcs,
- DRM_MODE_ENCODER_VIRTUAL, "ast_bmc");
- if (ret)
- return ret;
- encoder->possible_crtcs = drm_crtc_mask(crtc);
-
- ret = ast_bmc_connector_init(dev, bmc_connector, physical_connector);
- if (ret)
- return ret;
-
- ret = drm_connector_attach_encoder(connector, encoder);
- if (ret)
- return ret;
-
- return 0;
-}
-
-/*
* Mode config
*/
@@ -1926,7 +1358,6 @@ static const struct drm_mode_config_funcs ast_mode_config_funcs = {
int ast_mode_config_init(struct ast_device *ast)
{
struct drm_device *dev = &ast->base;
- struct drm_connector *physical_connector = NULL;
int ret;
ret = drmm_mutex_init(dev, &ast->modeset_lock);
@@ -1971,29 +1402,22 @@ int ast_mode_config_init(struct ast_device *ast)
ret = ast_vga_output_init(ast);
if (ret)
return ret;
- physical_connector = &ast->output.vga.connector;
}
if (ast->tx_chip_types & AST_TX_SIL164_BIT) {
ret = ast_sil164_output_init(ast);
if (ret)
return ret;
- physical_connector = &ast->output.sil164.connector;
}
if (ast->tx_chip_types & AST_TX_DP501_BIT) {
ret = ast_dp501_output_init(ast);
if (ret)
return ret;
- physical_connector = &ast->output.dp501.connector;
}
if (ast->tx_chip_types & AST_TX_ASTDP_BIT) {
ret = ast_astdp_output_init(ast);
if (ret)
return ret;
- physical_connector = &ast->output.astdp.connector;
}
- ret = ast_bmc_output_init(ast, physical_connector);
- if (ret)
- return ret;
drm_mode_config_reset(dev);
diff --git a/drivers/gpu/drm/ast/ast_post.c b/drivers/gpu/drm/ast/ast_post.c
index 22f548805dfb..65755798ab94 100644
--- a/drivers/gpu/drm/ast/ast_post.c
+++ b/drivers/gpu/drm/ast/ast_post.c
@@ -351,7 +351,7 @@ void ast_post_gpu(struct drm_device *dev)
if (IS_AST_GEN7(ast)) {
if (ast->tx_chip_types & AST_TX_ASTDP_BIT)
- ast_dp_launch(dev);
+ ast_dp_launch(ast);
} else if (ast->config_mode == ast_use_p2a) {
if (IS_AST_GEN6(ast))
ast_post_chip_2500(dev);
diff --git a/drivers/gpu/drm/ast/ast_reg.h b/drivers/gpu/drm/ast/ast_reg.h
index 75671d345057..040961cc1a19 100644
--- a/drivers/gpu/drm/ast/ast_reg.h
+++ b/drivers/gpu/drm/ast/ast_reg.h
@@ -37,6 +37,12 @@
#define AST_IO_VGACRCB_HWC_16BPP BIT(0) /* set: ARGB4444, cleared: 2bpp palette */
#define AST_IO_VGACRCB_HWC_ENABLED BIT(1)
+#define AST_IO_VGACRD1_MCU_FW_EXECUTING BIT(5)
+#define AST_IO_VGACRD7_EDID_VALID_FLAG BIT(0)
+#define AST_IO_VGACRDC_LINK_SUCCESS BIT(0)
+#define AST_IO_VGACRDF_HPD BIT(0)
+#define AST_IO_VGACRE5_EDID_READ_DONE BIT(0)
+
#define AST_IO_VGAIR1_R (0x5A)
#define AST_IO_VGAIR1_VREFRESH BIT(3)
@@ -67,18 +73,6 @@
#define AST_DP_VIDEO_ENABLE BIT(0)
/*
- * CRD1[b5]: DP MCU FW is executing
- * CRDC[b0]: DP link success
- * CRDF[b0]: DP HPD
- * CRE5[b0]: Host reading EDID process is done
- */
-#define ASTDP_MCU_FW_EXECUTING BIT(5)
-#define ASTDP_LINK_SUCCESS BIT(0)
-#define ASTDP_HPD BIT(0)
-#define ASTDP_HOST_EDID_READ_DONE BIT(0)
-#define ASTDP_HOST_EDID_READ_DONE_MASK GENMASK(0, 0)
-
-/*
* CRDF[b4]: Mirror of AST_DP_VIDEO_ENABLE
* Precondition: A. ~AST_DP_PHY_SLEEP &&
* B. DP_HPD &&
@@ -86,10 +80,6 @@
*/
#define ASTDP_MIRROR_VIDEO_ENABLE BIT(4)
-#define ASTDP_EDID_READ_POINTER_MASK GENMASK(7, 0)
-#define ASTDP_EDID_VALID_FLAG_MASK GENMASK(0, 0)
-#define ASTDP_EDID_READ_DATA_MASK GENMASK(7, 0)
-
/*
* ASTDP setmode registers:
* CRE0[7:0]: MISC0 ((0x00: 18-bpp) or (0x20: 24-bpp)
diff --git a/drivers/gpu/drm/ast/ast_sil164.c b/drivers/gpu/drm/ast/ast_sil164.c
new file mode 100644
index 000000000000..496c7120e515
--- /dev/null
+++ b/drivers/gpu/drm/ast/ast_sil164.c
@@ -0,0 +1,127 @@
+// SPDX-License-Identifier: MIT
+
+#include <drm/drm_atomic_state_helper.h>
+#include <drm/drm_edid.h>
+#include <drm/drm_modeset_helper_vtables.h>
+#include <drm/drm_print.h>
+#include <drm/drm_probe_helper.h>
+
+#include "ast_ddc.h"
+#include "ast_drv.h"
+
+/*
+ * Encoder
+ */
+
+static const struct drm_encoder_funcs ast_sil164_encoder_funcs = {
+ .destroy = drm_encoder_cleanup,
+};
+
+/*
+ * Connector
+ */
+
+static int ast_sil164_connector_helper_get_modes(struct drm_connector *connector)
+{
+ struct ast_connector *ast_connector = to_ast_connector(connector);
+ int count;
+
+ if (ast_connector->physical_status == connector_status_connected) {
+ count = drm_connector_helper_get_modes(connector);
+ } else {
+ /*
+ * There's no EDID data without a connected monitor. Set BMC-
+ * compatible modes in this case. The XGA default resolution
+ * should work well for all BMCs.
+ */
+ count = drm_add_modes_noedid(connector, 4096, 4096);
+ if (count)
+ drm_set_preferred_mode(connector, 1024, 768);
+ }
+
+ return count;
+}
+
+static int ast_sil164_connector_helper_detect_ctx(struct drm_connector *connector,
+ struct drm_modeset_acquire_ctx *ctx,
+ bool force)
+{
+ struct ast_connector *ast_connector = to_ast_connector(connector);
+ enum drm_connector_status status;
+
+ status = drm_connector_helper_detect_from_ddc(connector, ctx, force);
+
+ if (status != ast_connector->physical_status)
+ ++connector->epoch_counter;
+ ast_connector->physical_status = status;
+
+ return connector_status_connected;
+}
+
+static const struct drm_connector_helper_funcs ast_sil164_connector_helper_funcs = {
+ .get_modes = ast_sil164_connector_helper_get_modes,
+ .detect_ctx = ast_sil164_connector_helper_detect_ctx,
+};
+
+static const struct drm_connector_funcs ast_sil164_connector_funcs = {
+ .reset = drm_atomic_helper_connector_reset,
+ .fill_modes = drm_helper_probe_single_connector_modes,
+ .destroy = drm_connector_cleanup,
+ .atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state,
+ .atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
+};
+
+static int ast_sil164_connector_init(struct drm_device *dev, struct drm_connector *connector)
+{
+ struct ast_device *ast = to_ast_device(dev);
+ struct i2c_adapter *ddc;
+ int ret;
+
+ ddc = ast_ddc_create(ast);
+ if (IS_ERR(ddc)) {
+ ret = PTR_ERR(ddc);
+ drm_err(dev, "failed to add DDC bus for connector; ret=%d\n", ret);
+ return ret;
+ }
+
+ ret = drm_connector_init_with_ddc(dev, connector, &ast_sil164_connector_funcs,
+ DRM_MODE_CONNECTOR_DVII, ddc);
+ if (ret)
+ return ret;
+
+ drm_connector_helper_add(connector, &ast_sil164_connector_helper_funcs);
+
+ connector->interlace_allowed = 0;
+ connector->doublescan_allowed = 0;
+
+ connector->polled = DRM_CONNECTOR_POLL_CONNECT | DRM_CONNECTOR_POLL_DISCONNECT;
+
+ return 0;
+}
+
+int ast_sil164_output_init(struct ast_device *ast)
+{
+ struct drm_device *dev = &ast->base;
+ struct drm_crtc *crtc = &ast->crtc;
+ struct drm_encoder *encoder = &ast->output.sil164.encoder;
+ struct ast_connector *ast_connector = &ast->output.sil164.connector;
+ struct drm_connector *connector = &ast_connector->base;
+ int ret;
+
+ ret = drm_encoder_init(dev, encoder, &ast_sil164_encoder_funcs,
+ DRM_MODE_ENCODER_TMDS, NULL);
+ if (ret)
+ return ret;
+ encoder->possible_crtcs = drm_crtc_mask(crtc);
+
+ ret = ast_sil164_connector_init(dev, connector);
+ if (ret)
+ return ret;
+ ast_connector->physical_status = connector->status;
+
+ ret = drm_connector_attach_encoder(connector, encoder);
+ if (ret)
+ return ret;
+
+ return 0;
+}
diff --git a/drivers/gpu/drm/ast/ast_vga.c b/drivers/gpu/drm/ast/ast_vga.c
new file mode 100644
index 000000000000..3e815da43fbd
--- /dev/null
+++ b/drivers/gpu/drm/ast/ast_vga.c
@@ -0,0 +1,127 @@
+// SPDX-License-Identifier: MIT
+
+#include <drm/drm_atomic_state_helper.h>
+#include <drm/drm_edid.h>
+#include <drm/drm_modeset_helper_vtables.h>
+#include <drm/drm_print.h>
+#include <drm/drm_probe_helper.h>
+
+#include "ast_ddc.h"
+#include "ast_drv.h"
+
+/*
+ * Encoder
+ */
+
+static const struct drm_encoder_funcs ast_vga_encoder_funcs = {
+ .destroy = drm_encoder_cleanup,
+};
+
+/*
+ * Connector
+ */
+
+static int ast_vga_connector_helper_get_modes(struct drm_connector *connector)
+{
+ struct ast_connector *ast_connector = to_ast_connector(connector);
+ int count;
+
+ if (ast_connector->physical_status == connector_status_connected) {
+ count = drm_connector_helper_get_modes(connector);
+ } else {
+ /*
+ * There's no EDID data without a connected monitor. Set BMC-
+ * compatible modes in this case. The XGA default resolution
+ * should work well for all BMCs.
+ */
+ count = drm_add_modes_noedid(connector, 4096, 4096);
+ if (count)
+ drm_set_preferred_mode(connector, 1024, 768);
+ }
+
+ return count;
+}
+
+static int ast_vga_connector_helper_detect_ctx(struct drm_connector *connector,
+ struct drm_modeset_acquire_ctx *ctx,
+ bool force)
+{
+ struct ast_connector *ast_connector = to_ast_connector(connector);
+ enum drm_connector_status status;
+
+ status = drm_connector_helper_detect_from_ddc(connector, ctx, force);
+
+ if (status != ast_connector->physical_status)
+ ++connector->epoch_counter;
+ ast_connector->physical_status = status;
+
+ return connector_status_connected;
+}
+
+static const struct drm_connector_helper_funcs ast_vga_connector_helper_funcs = {
+ .get_modes = ast_vga_connector_helper_get_modes,
+ .detect_ctx = ast_vga_connector_helper_detect_ctx,
+};
+
+static const struct drm_connector_funcs ast_vga_connector_funcs = {
+ .reset = drm_atomic_helper_connector_reset,
+ .fill_modes = drm_helper_probe_single_connector_modes,
+ .destroy = drm_connector_cleanup,
+ .atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state,
+ .atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
+};
+
+static int ast_vga_connector_init(struct drm_device *dev, struct drm_connector *connector)
+{
+ struct ast_device *ast = to_ast_device(dev);
+ struct i2c_adapter *ddc;
+ int ret;
+
+ ddc = ast_ddc_create(ast);
+ if (IS_ERR(ddc)) {
+ ret = PTR_ERR(ddc);
+ drm_err(dev, "failed to add DDC bus for connector; ret=%d\n", ret);
+ return ret;
+ }
+
+ ret = drm_connector_init_with_ddc(dev, connector, &ast_vga_connector_funcs,
+ DRM_MODE_CONNECTOR_VGA, ddc);
+ if (ret)
+ return ret;
+
+ drm_connector_helper_add(connector, &ast_vga_connector_helper_funcs);
+
+ connector->interlace_allowed = 0;
+ connector->doublescan_allowed = 0;
+
+ connector->polled = DRM_CONNECTOR_POLL_CONNECT | DRM_CONNECTOR_POLL_DISCONNECT;
+
+ return 0;
+}
+
+int ast_vga_output_init(struct ast_device *ast)
+{
+ struct drm_device *dev = &ast->base;
+ struct drm_crtc *crtc = &ast->crtc;
+ struct drm_encoder *encoder = &ast->output.vga.encoder;
+ struct ast_connector *ast_connector = &ast->output.vga.connector;
+ struct drm_connector *connector = &ast_connector->base;
+ int ret;
+
+ ret = drm_encoder_init(dev, encoder, &ast_vga_encoder_funcs,
+ DRM_MODE_ENCODER_DAC, NULL);
+ if (ret)
+ return ret;
+ encoder->possible_crtcs = drm_crtc_mask(crtc);
+
+ ret = ast_vga_connector_init(dev, connector);
+ if (ret)
+ return ret;
+ ast_connector->physical_status = connector->status;
+
+ ret = drm_connector_attach_encoder(connector, encoder);
+ if (ret)
+ return ret;
+
+ return 0;
+}
diff --git a/drivers/gpu/drm/bridge/analogix/analogix_dp_core.c b/drivers/gpu/drm/bridge/analogix/analogix_dp_core.c
index ddf1e4424ffd..bfa88409a7ff 100644
--- a/drivers/gpu/drm/bridge/analogix/analogix_dp_core.c
+++ b/drivers/gpu/drm/bridge/analogix/analogix_dp_core.c
@@ -36,11 +36,6 @@
static const bool verify_fast_training;
-struct bridge_init {
- struct i2c_client *client;
- struct device_node *node;
-};
-
static void analogix_dp_init_dp(struct analogix_dp_device *dp)
{
analogix_dp_reset(dp);
diff --git a/drivers/gpu/drm/bridge/analogix/anx7625.c b/drivers/gpu/drm/bridge/analogix/anx7625.c
index 88e4aa5830f3..a2e9bb485c36 100644
--- a/drivers/gpu/drm/bridge/analogix/anx7625.c
+++ b/drivers/gpu/drm/bridge/analogix/anx7625.c
@@ -1647,25 +1647,15 @@ static int anx7625_get_swing_setting(struct device *dev,
{
int num_regs;
- if (of_get_property(dev->of_node,
- "analogix,lane0-swing", &num_regs)) {
- if (num_regs > DP_TX_SWING_REG_CNT)
- num_regs = DP_TX_SWING_REG_CNT;
-
+ num_regs = of_property_read_variable_u8_array(dev->of_node, "analogix,lane0-swing",
+ pdata->lane0_reg_data, 1, DP_TX_SWING_REG_CNT);
+ if (num_regs > 0)
pdata->dp_lane0_swing_reg_cnt = num_regs;
- of_property_read_u8_array(dev->of_node, "analogix,lane0-swing",
- pdata->lane0_reg_data, num_regs);
- }
-
- if (of_get_property(dev->of_node,
- "analogix,lane1-swing", &num_regs)) {
- if (num_regs > DP_TX_SWING_REG_CNT)
- num_regs = DP_TX_SWING_REG_CNT;
+ num_regs = of_property_read_variable_u8_array(dev->of_node, "analogix,lane1-swing",
+ pdata->lane1_reg_data, 1, DP_TX_SWING_REG_CNT);
+ if (num_regs > 0)
pdata->dp_lane1_swing_reg_cnt = num_regs;
- of_property_read_u8_array(dev->of_node, "analogix,lane1-swing",
- pdata->lane1_reg_data, num_regs);
- }
return 0;
}
diff --git a/drivers/gpu/drm/bridge/ite-it6505.c b/drivers/gpu/drm/bridge/ite-it6505.c
index 1e1c06fdf206..87b8545fccc0 100644
--- a/drivers/gpu/drm/bridge/ite-it6505.c
+++ b/drivers/gpu/drm/bridge/ite-it6505.c
@@ -460,6 +460,8 @@ struct it6505 {
bool enable_drv_hold;
const struct drm_edid *cached_edid;
+
+ int irq;
};
struct it6505_step_train_para {
@@ -2624,6 +2626,8 @@ static int it6505_poweron(struct it6505 *it6505)
it6505_init(it6505);
it6505_lane_off(it6505);
+ enable_irq(it6505->irq);
+
return 0;
}
@@ -2640,6 +2644,8 @@ static int it6505_poweroff(struct it6505 *it6505)
return 0;
}
+ disable_irq_nosync(it6505->irq);
+
if (pdata->gpiod_reset)
gpiod_set_value_cansleep(pdata->gpiod_reset, 0);
@@ -3389,7 +3395,7 @@ static int it6505_i2c_probe(struct i2c_client *client)
struct it6505 *it6505;
struct device *dev = &client->dev;
struct extcon_dev *extcon;
- int err, intp_irq;
+ int err;
it6505 = devm_kzalloc(&client->dev, sizeof(*it6505), GFP_KERNEL);
if (!it6505)
@@ -3430,17 +3436,18 @@ static int it6505_i2c_probe(struct i2c_client *client)
it6505_parse_dt(it6505);
- intp_irq = client->irq;
+ it6505->irq = client->irq;
- if (!intp_irq) {
+ if (!it6505->irq) {
dev_err(dev, "Failed to get INTP IRQ");
err = -ENODEV;
return err;
}
- err = devm_request_threaded_irq(&client->dev, intp_irq, NULL,
+ err = devm_request_threaded_irq(&client->dev, it6505->irq, NULL,
it6505_int_threaded_handler,
- IRQF_TRIGGER_LOW | IRQF_ONESHOT,
+ IRQF_TRIGGER_LOW | IRQF_ONESHOT |
+ IRQF_NO_AUTOEN,
"it6505-intp", it6505);
if (err) {
dev_err(dev, "Failed to request INTP threaded IRQ: %d", err);
diff --git a/drivers/gpu/drm/bridge/lontium-lt8912b.c b/drivers/gpu/drm/bridge/lontium-lt8912b.c
index 1a9defa15663..e265ab3c8c92 100644
--- a/drivers/gpu/drm/bridge/lontium-lt8912b.c
+++ b/drivers/gpu/drm/bridge/lontium-lt8912b.c
@@ -422,22 +422,6 @@ static const struct drm_connector_funcs lt8912_connector_funcs = {
.atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
};
-static enum drm_mode_status
-lt8912_connector_mode_valid(struct drm_connector *connector,
- struct drm_display_mode *mode)
-{
- if (mode->clock > 150000)
- return MODE_CLOCK_HIGH;
-
- if (mode->hdisplay > 1920)
- return MODE_BAD_HVALUE;
-
- if (mode->vdisplay > 1080)
- return MODE_BAD_VVALUE;
-
- return MODE_OK;
-}
-
static int lt8912_connector_get_modes(struct drm_connector *connector)
{
const struct drm_edid *drm_edid;
@@ -463,7 +447,6 @@ static int lt8912_connector_get_modes(struct drm_connector *connector)
static const struct drm_connector_helper_funcs lt8912_connector_helper_funcs = {
.get_modes = lt8912_connector_get_modes,
- .mode_valid = lt8912_connector_mode_valid,
};
static void lt8912_bridge_mode_set(struct drm_bridge *bridge,
@@ -605,6 +588,23 @@ static void lt8912_bridge_detach(struct drm_bridge *bridge)
drm_bridge_hpd_disable(lt->hdmi_port);
}
+static enum drm_mode_status
+lt8912_bridge_mode_valid(struct drm_bridge *bridge,
+ const struct drm_display_info *info,
+ const struct drm_display_mode *mode)
+{
+ if (mode->clock > 150000)
+ return MODE_CLOCK_HIGH;
+
+ if (mode->hdisplay > 1920)
+ return MODE_BAD_HVALUE;
+
+ if (mode->vdisplay > 1080)
+ return MODE_BAD_VVALUE;
+
+ return MODE_OK;
+}
+
static enum drm_connector_status
lt8912_bridge_detect(struct drm_bridge *bridge)
{
@@ -635,6 +635,7 @@ static const struct drm_edid *lt8912_bridge_edid_read(struct drm_bridge *bridge,
static const struct drm_bridge_funcs lt8912_bridge_funcs = {
.attach = lt8912_bridge_attach,
.detach = lt8912_bridge_detach,
+ .mode_valid = lt8912_bridge_mode_valid,
.mode_set = lt8912_bridge_mode_set,
.enable = lt8912_bridge_enable,
.detect = lt8912_bridge_detect,
diff --git a/drivers/gpu/drm/bridge/lontium-lt9611uxc.c b/drivers/gpu/drm/bridge/lontium-lt9611uxc.c
index 4e802b54a1cb..4d1d40e1f1b4 100644
--- a/drivers/gpu/drm/bridge/lontium-lt9611uxc.c
+++ b/drivers/gpu/drm/bridge/lontium-lt9611uxc.c
@@ -23,6 +23,7 @@
#include <drm/drm_bridge.h>
#include <drm/drm_edid.h>
#include <drm/drm_mipi_dsi.h>
+#include <drm/drm_of.h>
#include <drm/drm_print.h>
#include <drm/drm_probe_helper.h>
@@ -34,7 +35,7 @@
struct lt9611uxc {
struct device *dev;
struct drm_bridge bridge;
- struct drm_connector connector;
+ struct drm_bridge *next_bridge;
struct regmap *regmap;
/* Protects all accesses to registers by stopping the on-chip MCU */
@@ -120,11 +121,6 @@ static struct lt9611uxc *bridge_to_lt9611uxc(struct drm_bridge *bridge)
return container_of(bridge, struct lt9611uxc, bridge);
}
-static struct lt9611uxc *connector_to_lt9611uxc(struct drm_connector *connector)
-{
- return container_of(connector, struct lt9611uxc, connector);
-}
-
static void lt9611uxc_lock(struct lt9611uxc *lt9611uxc)
{
mutex_lock(&lt9611uxc->ocm_lock);
@@ -171,20 +167,14 @@ static void lt9611uxc_hpd_work(struct work_struct *work)
struct lt9611uxc *lt9611uxc = container_of(work, struct lt9611uxc, work);
bool connected;
- if (lt9611uxc->connector.dev) {
- if (lt9611uxc->connector.dev->mode_config.funcs)
- drm_kms_helper_hotplug_event(lt9611uxc->connector.dev);
- } else {
-
- mutex_lock(&lt9611uxc->ocm_lock);
- connected = lt9611uxc->hdmi_connected;
- mutex_unlock(&lt9611uxc->ocm_lock);
+ mutex_lock(&lt9611uxc->ocm_lock);
+ connected = lt9611uxc->hdmi_connected;
+ mutex_unlock(&lt9611uxc->ocm_lock);
- drm_bridge_hpd_notify(&lt9611uxc->bridge,
- connected ?
- connector_status_connected :
- connector_status_disconnected);
- }
+ drm_bridge_hpd_notify(&lt9611uxc->bridge,
+ connected ?
+ connector_status_connected :
+ connector_status_disconnected);
}
static void lt9611uxc_reset(struct lt9611uxc *lt9611uxc)
@@ -289,82 +279,13 @@ static struct mipi_dsi_device *lt9611uxc_attach_dsi(struct lt9611uxc *lt9611uxc,
return dsi;
}
-static int lt9611uxc_connector_get_modes(struct drm_connector *connector)
-{
- struct lt9611uxc *lt9611uxc = connector_to_lt9611uxc(connector);
- const struct drm_edid *drm_edid;
- int count;
-
- drm_edid = drm_bridge_edid_read(&lt9611uxc->bridge, connector);
- drm_edid_connector_update(connector, drm_edid);
- count = drm_edid_connector_add_modes(connector);
- drm_edid_free(drm_edid);
-
- return count;
-}
-
-static enum drm_connector_status lt9611uxc_connector_detect(struct drm_connector *connector,
- bool force)
-{
- struct lt9611uxc *lt9611uxc = connector_to_lt9611uxc(connector);
-
- return lt9611uxc->bridge.funcs->detect(&lt9611uxc->bridge);
-}
-
-static enum drm_mode_status lt9611uxc_connector_mode_valid(struct drm_connector *connector,
- struct drm_display_mode *mode)
-{
- struct lt9611uxc_mode *lt9611uxc_mode = lt9611uxc_find_mode(mode);
-
- return lt9611uxc_mode ? MODE_OK : MODE_BAD;
-}
-
-static const struct drm_connector_helper_funcs lt9611uxc_bridge_connector_helper_funcs = {
- .get_modes = lt9611uxc_connector_get_modes,
- .mode_valid = lt9611uxc_connector_mode_valid,
-};
-
-static const struct drm_connector_funcs lt9611uxc_bridge_connector_funcs = {
- .fill_modes = drm_helper_probe_single_connector_modes,
- .detect = lt9611uxc_connector_detect,
- .destroy = drm_connector_cleanup,
- .reset = drm_atomic_helper_connector_reset,
- .atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state,
- .atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
-};
-
-static int lt9611uxc_connector_init(struct drm_bridge *bridge, struct lt9611uxc *lt9611uxc)
-{
- int ret;
-
- lt9611uxc->connector.polled = DRM_CONNECTOR_POLL_HPD;
-
- drm_connector_helper_add(&lt9611uxc->connector,
- &lt9611uxc_bridge_connector_helper_funcs);
- ret = drm_connector_init(bridge->dev, &lt9611uxc->connector,
- &lt9611uxc_bridge_connector_funcs,
- DRM_MODE_CONNECTOR_HDMIA);
- if (ret) {
- DRM_ERROR("Failed to initialize connector with drm\n");
- return ret;
- }
-
- return drm_connector_attach_encoder(&lt9611uxc->connector, bridge->encoder);
-}
-
static int lt9611uxc_bridge_attach(struct drm_bridge *bridge,
enum drm_bridge_attach_flags flags)
{
struct lt9611uxc *lt9611uxc = bridge_to_lt9611uxc(bridge);
- int ret;
-
- if (!(flags & DRM_BRIDGE_ATTACH_NO_CONNECTOR)) {
- ret = lt9611uxc_connector_init(bridge, lt9611uxc);
- if (ret < 0)
- return ret;
- }
- return 0;
+ return drm_bridge_attach(bridge->encoder, lt9611uxc->next_bridge,
+ bridge, flags);
}
static enum drm_mode_status
@@ -525,7 +446,7 @@ static int lt9611uxc_parse_dt(struct device *dev,
lt9611uxc->dsi1_node = of_graph_get_remote_node(dev->of_node, 1, -1);
- return 0;
+ return drm_of_find_panel_or_bridge(dev->of_node, 2, -1, NULL, &lt9611uxc->next_bridge);
}
static int lt9611uxc_gpio_init(struct lt9611uxc *lt9611uxc)
diff --git a/drivers/gpu/drm/bridge/nwl-dsi.c b/drivers/gpu/drm/bridge/nwl-dsi.c
index 8d54091ec66e..5f05647a3bea 100644
--- a/drivers/gpu/drm/bridge/nwl-dsi.c
+++ b/drivers/gpu/drm/bridge/nwl-dsi.c
@@ -289,13 +289,13 @@ static int nwl_dsi_config_dpi(struct nwl_dsi *dsi)
nwl_dsi_write(dsi, NWL_DSI_INTERFACE_COLOR_CODING, NWL_DSI_DPI_24_BIT);
nwl_dsi_write(dsi, NWL_DSI_PIXEL_FORMAT, color_format);
- /*
- * Adjusting input polarity based on the video mode results in
- * a black screen so always pick active low:
- */
nwl_dsi_write(dsi, NWL_DSI_VSYNC_POLARITY,
+ dsi->mode.flags & DRM_MODE_FLAG_PVSYNC ?
+ NWL_DSI_VSYNC_POLARITY_ACTIVE_HIGH :
NWL_DSI_VSYNC_POLARITY_ACTIVE_LOW);
nwl_dsi_write(dsi, NWL_DSI_HSYNC_POLARITY,
+ dsi->mode.flags & DRM_MODE_FLAG_PHSYNC ?
+ NWL_DSI_HSYNC_POLARITY_ACTIVE_HIGH :
NWL_DSI_HSYNC_POLARITY_ACTIVE_LOW);
burst_mode = (dsi->dsi_mode_flags & MIPI_DSI_MODE_VIDEO_BURST) &&
diff --git a/drivers/gpu/drm/bridge/nwl-dsi.h b/drivers/gpu/drm/bridge/nwl-dsi.h
index a247a8a11c7c..61e7d65cb1eb 100644
--- a/drivers/gpu/drm/bridge/nwl-dsi.h
+++ b/drivers/gpu/drm/bridge/nwl-dsi.h
@@ -30,11 +30,11 @@
#define NWL_DSI_PIXEL_FORMAT 0x20c
#define NWL_DSI_VSYNC_POLARITY 0x210
#define NWL_DSI_VSYNC_POLARITY_ACTIVE_LOW 0
-#define NWL_DSI_VSYNC_POLARITY_ACTIVE_HIGH BIT(1)
+#define NWL_DSI_VSYNC_POLARITY_ACTIVE_HIGH BIT(0)
#define NWL_DSI_HSYNC_POLARITY 0x214
#define NWL_DSI_HSYNC_POLARITY_ACTIVE_LOW 0
-#define NWL_DSI_HSYNC_POLARITY_ACTIVE_HIGH BIT(1)
+#define NWL_DSI_HSYNC_POLARITY_ACTIVE_HIGH BIT(0)
#define NWL_DSI_VIDEO_MODE 0x218
#define NWL_DSI_HFP 0x21c
diff --git a/drivers/gpu/drm/bridge/synopsys/dw-hdmi.c b/drivers/gpu/drm/bridge/synopsys/dw-hdmi.c
index 9f2bc932c371..0031f3c54882 100644
--- a/drivers/gpu/drm/bridge/synopsys/dw-hdmi.c
+++ b/drivers/gpu/drm/bridge/synopsys/dw-hdmi.c
@@ -138,9 +138,6 @@ struct dw_hdmi {
struct platform_device *audio;
struct platform_device *cec;
struct device *dev;
- struct clk *isfr_clk;
- struct clk *iahb_clk;
- struct clk *cec_clk;
struct dw_hdmi_i2c *i2c;
struct hdmi_data_info hdmi_data;
@@ -3326,6 +3323,7 @@ struct dw_hdmi *dw_hdmi_probe(struct platform_device *pdev,
struct device_node *ddc_node;
struct dw_hdmi_cec_data cec;
struct dw_hdmi *hdmi;
+ struct clk *clk;
struct resource *iores = NULL;
int irq;
int ret;
@@ -3405,50 +3403,27 @@ struct dw_hdmi *dw_hdmi_probe(struct platform_device *pdev,
hdmi->regm = plat_data->regm;
}
- hdmi->isfr_clk = devm_clk_get(hdmi->dev, "isfr");
- if (IS_ERR(hdmi->isfr_clk)) {
- ret = PTR_ERR(hdmi->isfr_clk);
+ clk = devm_clk_get_enabled(hdmi->dev, "isfr");
+ if (IS_ERR(clk)) {
+ ret = PTR_ERR(clk);
dev_err(hdmi->dev, "Unable to get HDMI isfr clk: %d\n", ret);
goto err_res;
}
- ret = clk_prepare_enable(hdmi->isfr_clk);
- if (ret) {
- dev_err(hdmi->dev, "Cannot enable HDMI isfr clock: %d\n", ret);
- goto err_res;
- }
-
- hdmi->iahb_clk = devm_clk_get(hdmi->dev, "iahb");
- if (IS_ERR(hdmi->iahb_clk)) {
- ret = PTR_ERR(hdmi->iahb_clk);
+ clk = devm_clk_get_enabled(hdmi->dev, "iahb");
+ if (IS_ERR(clk)) {
+ ret = PTR_ERR(clk);
dev_err(hdmi->dev, "Unable to get HDMI iahb clk: %d\n", ret);
- goto err_isfr;
- }
-
- ret = clk_prepare_enable(hdmi->iahb_clk);
- if (ret) {
- dev_err(hdmi->dev, "Cannot enable HDMI iahb clock: %d\n", ret);
- goto err_isfr;
+ goto err_res;
}
- hdmi->cec_clk = devm_clk_get(hdmi->dev, "cec");
- if (PTR_ERR(hdmi->cec_clk) == -ENOENT) {
- hdmi->cec_clk = NULL;
- } else if (IS_ERR(hdmi->cec_clk)) {
- ret = PTR_ERR(hdmi->cec_clk);
+ clk = devm_clk_get_optional_enabled(hdmi->dev, "cec");
+ if (IS_ERR(clk)) {
+ ret = PTR_ERR(clk);
if (ret != -EPROBE_DEFER)
dev_err(hdmi->dev, "Cannot get HDMI cec clock: %d\n",
ret);
-
- hdmi->cec_clk = NULL;
- goto err_iahb;
- } else {
- ret = clk_prepare_enable(hdmi->cec_clk);
- if (ret) {
- dev_err(hdmi->dev, "Cannot enable HDMI cec clock: %d\n",
- ret);
- goto err_iahb;
- }
+ goto err_res;
}
/* Product and revision IDs */
@@ -3462,12 +3437,12 @@ struct dw_hdmi *dw_hdmi_probe(struct platform_device *pdev,
dev_err(dev, "Unsupported HDMI controller (%04x:%02x:%02x)\n",
hdmi->version, prod_id0, prod_id1);
ret = -ENODEV;
- goto err_iahb;
+ goto err_res;
}
ret = dw_hdmi_detect_phy(hdmi);
if (ret < 0)
- goto err_iahb;
+ goto err_res;
dev_info(dev, "Detected HDMI TX controller v%x.%03x %s HDCP (%s)\n",
hdmi->version >> 12, hdmi->version & 0xfff,
@@ -3479,14 +3454,14 @@ struct dw_hdmi *dw_hdmi_probe(struct platform_device *pdev,
irq = platform_get_irq(pdev, 0);
if (irq < 0) {
ret = irq;
- goto err_iahb;
+ goto err_res;
}
ret = devm_request_threaded_irq(dev, irq, dw_hdmi_hardirq,
dw_hdmi_irq, IRQF_SHARED,
dev_name(dev), hdmi);
if (ret)
- goto err_iahb;
+ goto err_res;
/*
* To prevent overflows in HDMI_IH_FC_STAT2, set the clk regenerator
@@ -3603,11 +3578,6 @@ struct dw_hdmi *dw_hdmi_probe(struct platform_device *pdev,
return hdmi;
-err_iahb:
- clk_disable_unprepare(hdmi->iahb_clk);
- clk_disable_unprepare(hdmi->cec_clk);
-err_isfr:
- clk_disable_unprepare(hdmi->isfr_clk);
err_res:
i2c_put_adapter(hdmi->ddc);
@@ -3627,10 +3597,6 @@ void dw_hdmi_remove(struct dw_hdmi *hdmi)
/* Disable all interrupts */
hdmi_writeb(hdmi, ~0, HDMI_IH_MUTE_PHY_STAT0);
- clk_disable_unprepare(hdmi->iahb_clk);
- clk_disable_unprepare(hdmi->isfr_clk);
- clk_disable_unprepare(hdmi->cec_clk);
-
if (hdmi->i2c)
i2c_del_adapter(&hdmi->i2c->adap);
else
diff --git a/drivers/gpu/drm/bridge/synopsys/dw-mipi-dsi.c b/drivers/gpu/drm/bridge/synopsys/dw-mipi-dsi.c
index c4e9d96933dc..0fb02e4e7f4e 100644
--- a/drivers/gpu/drm/bridge/synopsys/dw-mipi-dsi.c
+++ b/drivers/gpu/drm/bridge/synopsys/dw-mipi-dsi.c
@@ -722,7 +722,12 @@ static void dw_mipi_dsi_dpi_config(struct dw_mipi_dsi *dsi,
static void dw_mipi_dsi_packet_handler_config(struct dw_mipi_dsi *dsi)
{
- dsi_write(dsi, DSI_PCKHDL_CFG, CRC_RX_EN | ECC_RX_EN | BTA_EN);
+ u32 val = CRC_RX_EN | ECC_RX_EN | BTA_EN | EOTP_TX_EN;
+
+ if (dsi->mode_flags & MIPI_DSI_MODE_NO_EOT_PACKET)
+ val &= ~EOTP_TX_EN;
+
+ dsi_write(dsi, DSI_PCKHDL_CFG, val);
}
static void dw_mipi_dsi_video_packet_config(struct dw_mipi_dsi *dsi,
diff --git a/drivers/gpu/drm/bridge/tc358767.c b/drivers/gpu/drm/bridge/tc358767.c
index b8b7a227addf..290e2532fab1 100644
--- a/drivers/gpu/drm/bridge/tc358767.c
+++ b/drivers/gpu/drm/bridge/tc358767.c
@@ -241,6 +241,10 @@
/* Link Training */
#define DP0_SRCCTRL 0x06a0
+#define DP0_SRCCTRL_PRE1 GENMASK(29, 28)
+#define DP0_SRCCTRL_SWG1 GENMASK(25, 24)
+#define DP0_SRCCTRL_PRE0 GENMASK(21, 20)
+#define DP0_SRCCTRL_SWG0 GENMASK(17, 16)
#define DP0_SRCCTRL_SCRMBLDIS BIT(13)
#define DP0_SRCCTRL_EN810B BIT(12)
#define DP0_SRCCTRL_NOTP (0 << 8)
@@ -278,6 +282,8 @@
#define AUDIFDATA6 0x0720 /* DP0 Audio Info Frame Bytes 27 to 24 */
#define DP1_SRCCTRL 0x07a0 /* DP1 Control Register */
+#define DP1_SRCCTRL_PRE GENMASK(21, 20)
+#define DP1_SRCCTRL_SWG GENMASK(17, 16)
/* PHY */
#define DP_PHY_CTRL 0x0800
@@ -369,6 +375,7 @@ struct tc_data {
u32 rev;
u8 assr;
+ u8 pre_emphasis[2];
struct gpio_desc *sd_gpio;
struct gpio_desc *reset_gpio;
@@ -1090,13 +1097,17 @@ static int tc_main_link_enable(struct tc_data *tc)
return ret;
}
- ret = regmap_write(tc->regmap, DP0_SRCCTRL, tc_srcctrl(tc));
+ ret = regmap_write(tc->regmap, DP0_SRCCTRL,
+ tc_srcctrl(tc) |
+ FIELD_PREP(DP0_SRCCTRL_PRE0, tc->pre_emphasis[0]) |
+ FIELD_PREP(DP0_SRCCTRL_PRE1, tc->pre_emphasis[1]));
if (ret)
return ret;
/* SSCG and BW27 on DP1 must be set to the same as on DP0 */
ret = regmap_write(tc->regmap, DP1_SRCCTRL,
(tc->link.spread ? DP0_SRCCTRL_SSCG : 0) |
- ((tc->link.rate != 162000) ? DP0_SRCCTRL_BW27 : 0));
+ ((tc->link.rate != 162000) ? DP0_SRCCTRL_BW27 : 0) |
+ FIELD_PREP(DP1_SRCCTRL_PRE, tc->pre_emphasis[1]));
if (ret)
return ret;
@@ -1188,8 +1199,10 @@ static int tc_main_link_enable(struct tc_data *tc)
goto err_dpcd_write;
/* Reset voltage-swing & pre-emphasis */
- tmp[0] = tmp[1] = DP_TRAIN_VOLTAGE_SWING_LEVEL_0 |
- DP_TRAIN_PRE_EMPH_LEVEL_0;
+ tmp[0] = DP_TRAIN_VOLTAGE_SWING_LEVEL_0 |
+ FIELD_PREP(DP_TRAIN_PRE_EMPHASIS_MASK, tc->pre_emphasis[0]);
+ tmp[1] = DP_TRAIN_VOLTAGE_SWING_LEVEL_0 |
+ FIELD_PREP(DP_TRAIN_PRE_EMPHASIS_MASK, tc->pre_emphasis[1]);
ret = drm_dp_dpcd_write(aux, DP_TRAINING_LANE0_SET, tmp, 2);
if (ret < 0)
goto err_dpcd_write;
@@ -1213,7 +1226,9 @@ static int tc_main_link_enable(struct tc_data *tc)
ret = regmap_write(tc->regmap, DP0_SRCCTRL,
tc_srcctrl(tc) | DP0_SRCCTRL_SCRMBLDIS |
DP0_SRCCTRL_AUTOCORRECT |
- DP0_SRCCTRL_TP1);
+ DP0_SRCCTRL_TP1 |
+ FIELD_PREP(DP0_SRCCTRL_PRE0, tc->pre_emphasis[0]) |
+ FIELD_PREP(DP0_SRCCTRL_PRE1, tc->pre_emphasis[1]));
if (ret)
return ret;
@@ -1248,7 +1263,9 @@ static int tc_main_link_enable(struct tc_data *tc)
ret = regmap_write(tc->regmap, DP0_SRCCTRL,
tc_srcctrl(tc) | DP0_SRCCTRL_SCRMBLDIS |
DP0_SRCCTRL_AUTOCORRECT |
- DP0_SRCCTRL_TP2);
+ DP0_SRCCTRL_TP2 |
+ FIELD_PREP(DP0_SRCCTRL_PRE0, tc->pre_emphasis[0]) |
+ FIELD_PREP(DP0_SRCCTRL_PRE1, tc->pre_emphasis[1]));
if (ret)
return ret;
@@ -1274,7 +1291,9 @@ static int tc_main_link_enable(struct tc_data *tc)
/* Clear Training Pattern, set AutoCorrect Mode = 1 */
ret = regmap_write(tc->regmap, DP0_SRCCTRL, tc_srcctrl(tc) |
- DP0_SRCCTRL_AUTOCORRECT);
+ DP0_SRCCTRL_AUTOCORRECT |
+ FIELD_PREP(DP0_SRCCTRL_PRE0, tc->pre_emphasis[0]) |
+ FIELD_PREP(DP0_SRCCTRL_PRE1, tc->pre_emphasis[1]));
if (ret)
return ret;
@@ -2363,6 +2382,18 @@ static int tc_probe_bridge_endpoint(struct tc_data *tc)
return -EINVAL;
}
mode |= BIT(endpoint.port);
+
+ if (endpoint.port == 2) {
+ of_property_read_u8_array(node, "toshiba,pre-emphasis",
+ tc->pre_emphasis,
+ ARRAY_SIZE(tc->pre_emphasis));
+
+ if (tc->pre_emphasis[0] < 0 || tc->pre_emphasis[0] > 2 ||
+ tc->pre_emphasis[1] < 0 || tc->pre_emphasis[1] > 2) {
+ dev_err(dev, "Incorrect Pre-Emphasis setting, use either 0=0dB 1=3.5dB 2=6dB\n");
+ return -EINVAL;
+ }
+ }
}
if (mode == mode_dpi_to_edp || mode == mode_dpi_to_dp) {
diff --git a/drivers/gpu/drm/ci/arm64.config b/drivers/gpu/drm/ci/arm64.config
index 4140303d6260..66e70ced796f 100644
--- a/drivers/gpu/drm/ci/arm64.config
+++ b/drivers/gpu/drm/ci/arm64.config
@@ -187,6 +187,7 @@ CONFIG_MTK_DEVAPC=y
CONFIG_PWM_MTK_DISP=y
CONFIG_MTK_CMDQ=y
CONFIG_REGULATOR_DA9211=y
+CONFIG_DRM_ANALOGIX_ANX7625=y
# For nouveau. Note that DRM must be a module so that it's loaded after NFS is up to provide the firmware.
CONFIG_ARCH_TEGRA=y
diff --git a/drivers/gpu/drm/ci/container.yml b/drivers/gpu/drm/ci/container.yml
index d6edf3635b23..2a94f54ce4cf 100644
--- a/drivers/gpu/drm/ci/container.yml
+++ b/drivers/gpu/drm/ci/container.yml
@@ -28,6 +28,14 @@ debian/x86_64_test-vk:
rules:
- when: never
+debian/arm64_test-vk:
+ rules:
+ - when: never
+
+debian/arm64_test-gl:
+ rules:
+ - when: never
+
fedora/x86_64_build:
rules:
- when: never
diff --git a/drivers/gpu/drm/ci/gitlab-ci.yml b/drivers/gpu/drm/ci/gitlab-ci.yml
index 80fb0f57ae46..eca47d4f816f 100644
--- a/drivers/gpu/drm/ci/gitlab-ci.yml
+++ b/drivers/gpu/drm/ci/gitlab-ci.yml
@@ -1,13 +1,13 @@
variables:
DRM_CI_PROJECT_PATH: &drm-ci-project-path mesa/mesa
- DRM_CI_COMMIT_SHA: &drm-ci-commit-sha e2b9c5a9e3e4f9b532067af8022eaef8d6fc6c00
+ DRM_CI_COMMIT_SHA: &drm-ci-commit-sha d9849ac46623797a9f56fb9d46dc52460ac477de
- UPSTREAM_REPO: git://anongit.freedesktop.org/drm/drm
+ UPSTREAM_REPO: https://gitlab.freedesktop.org/drm/kernel.git
TARGET_BRANCH: drm-next
- IGT_VERSION: 0df7b9b97f9da0e364f5ee30fe331004b8c86b56
+ IGT_VERSION: f13702b8e4e847c56da3ef6f0969065d686049c5
- DEQP_RUNNER_GIT_URL: https://gitlab.freedesktop.org/anholt/deqp-runner.git
+ DEQP_RUNNER_GIT_URL: https://gitlab.freedesktop.org/mesa/deqp-runner.git
DEQP_RUNNER_GIT_TAG: v0.15.0
FDO_UPSTREAM_REPO: helen.fornazier/linux # The repo where the git-archive daily runs
@@ -85,22 +85,24 @@ include:
- project: *drm-ci-project-path
ref: *drm-ci-commit-sha
file:
+ - '/.gitlab-ci/container/gitlab-ci.yml'
- '/.gitlab-ci/farm-rules.yml'
+ - '/.gitlab-ci/lava/lava-gitlab-ci.yml'
- '/.gitlab-ci/test-source-dep.yml'
- - '/.gitlab-ci/container/gitlab-ci.yml'
- '/.gitlab-ci/test/gitlab-ci.yml'
- - '/.gitlab-ci/lava/lava-gitlab-ci.yml'
- - '/src/microsoft/ci/gitlab-ci-inc.yml'
- - '/src/gallium/drivers/zink/ci/gitlab-ci-inc.yml'
+ - '/src/amd/ci/gitlab-ci-inc.yml'
+ - '/src/freedreno/ci/gitlab-ci-inc.yml'
- '/src/gallium/drivers/crocus/ci/gitlab-ci-inc.yml'
- - '/src/gallium/drivers/softpipe/ci/gitlab-ci-inc.yml'
- '/src/gallium/drivers/llvmpipe/ci/gitlab-ci-inc.yml'
- - '/src/gallium/drivers/virgl/ci/gitlab-ci-inc.yml'
- '/src/gallium/drivers/nouveau/ci/gitlab-ci-inc.yml'
+ - '/src/gallium/drivers/softpipe/ci/gitlab-ci-inc.yml'
+ - '/src/gallium/drivers/virgl/ci/gitlab-ci-inc.yml'
+ - '/src/gallium/drivers/zink/ci/gitlab-ci-inc.yml'
- '/src/gallium/frontends/lavapipe/ci/gitlab-ci-inc.yml'
+ - '/src/gallium/frontends/rusticl/ci/gitlab-ci.yml'
- '/src/intel/ci/gitlab-ci-inc.yml'
- - '/src/freedreno/ci/gitlab-ci-inc.yml'
- - '/src/amd/ci/gitlab-ci-inc.yml'
+ - '/src/microsoft/ci/gitlab-ci-inc.yml'
+ - '/src/nouveau/ci/gitlab-ci-inc.yml'
- '/src/virtio/ci/gitlab-ci-inc.yml'
- drivers/gpu/drm/ci/image-tags.yml
- drivers/gpu/drm/ci/container.yml
@@ -121,8 +123,9 @@ stages:
- mediatek
- meson
- msm
+ - panfrost
+ - powervr
- rockchip
- - virtio-gpu
- software-driver
# YAML anchors for rule conditions
diff --git a/drivers/gpu/drm/ci/igt_runner.sh b/drivers/gpu/drm/ci/igt_runner.sh
index 79f41d7da772..f38836ec837c 100755
--- a/drivers/gpu/drm/ci/igt_runner.sh
+++ b/drivers/gpu/drm/ci/igt_runner.sh
@@ -20,16 +20,6 @@ cat /sys/kernel/debug/dri/*/state
set -e
case "$DRIVER_NAME" in
- rockchip|meson)
- export IGT_FORCE_DRIVER="panfrost"
- ;;
- mediatek)
- if [ "$GPU_VERSION" = "mt8173" ]; then
- export IGT_FORCE_DRIVER=${DRIVER_NAME}
- elif [ "$GPU_VERSION" = "mt8183" ]; then
- export IGT_FORCE_DRIVER="panfrost"
- fi
- ;;
amdgpu|vkms)
# Cannot use HWCI_KERNEL_MODULES as at that point we don't have the module in /lib
mv /install/modules/lib/modules/* /lib/modules/. || true
@@ -80,6 +70,7 @@ igt-runner \
--igt-folder /igt/libexec/igt-gpu-tools \
--caselist $TESTLIST \
--output /results \
+ -vvvv \
$IGT_SKIPS \
$IGT_FLAKES \
$IGT_FAILS \
diff --git a/drivers/gpu/drm/ci/image-tags.yml b/drivers/gpu/drm/ci/image-tags.yml
index 13eda37bdf05..2c340d063a96 100644
--- a/drivers/gpu/drm/ci/image-tags.yml
+++ b/drivers/gpu/drm/ci/image-tags.yml
@@ -1,15 +1,15 @@
variables:
- CONTAINER_TAG: "2024-05-09-mesa-uprev"
+ CONTAINER_TAG: "2024-08-07-mesa-uprev"
DEBIAN_X86_64_BUILD_BASE_IMAGE: "debian/x86_64_build-base"
DEBIAN_BASE_TAG: "${CONTAINER_TAG}"
DEBIAN_X86_64_BUILD_IMAGE_PATH: "debian/x86_64_build"
- DEBIAN_BUILD_TAG: "2024-06-10-vkms"
+ DEBIAN_BUILD_TAG: "${CONTAINER_TAG}"
- KERNEL_ROOTFS_TAG: "2023-10-06-amd"
+ KERNEL_ROOTFS_TAG: "${CONTAINER_TAG}"
DEBIAN_X86_64_TEST_BASE_IMAGE: "debian/x86_64_test-base"
DEBIAN_X86_64_TEST_IMAGE_GL_PATH: "debian/x86_64_test-gl"
- DEBIAN_X86_64_TEST_GL_TAG: "${CONTAINER_TAG}"
+ DEBIAN_TEST_GL_TAG: "${CONTAINER_TAG}"
ALPINE_X86_64_LAVA_SSH_TAG: "${CONTAINER_TAG}" \ No newline at end of file
diff --git a/drivers/gpu/drm/ci/lava-submit.sh b/drivers/gpu/drm/ci/lava-submit.sh
index 0707fa706a48..6add15083c78 100755
--- a/drivers/gpu/drm/ci/lava-submit.sh
+++ b/drivers/gpu/drm/ci/lava-submit.sh
@@ -44,6 +44,7 @@ PYTHONPATH=artifacts/ artifacts/lava/lava_job_submitter.py \
--first-stage-init artifacts/ci-common/init-stage1.sh \
--ci-project-dir "${CI_PROJECT_DIR}" \
--device-type "${DEVICE_TYPE}" \
+ --farm "${FARM}" \
--dtb-filename "${DTB}" \
--jwt-file "${S3_JWT_FILE}" \
--kernel-image-name "${KERNEL_IMAGE_NAME}" \
diff --git a/drivers/gpu/drm/ci/test.yml b/drivers/gpu/drm/ci/test.yml
index ee908b66aad2..09d8447840e9 100644
--- a/drivers/gpu/drm/ci/test.yml
+++ b/drivers/gpu/drm/ci/test.yml
@@ -10,6 +10,7 @@
.lava-test:
extends:
- .test-rules
+ timeout: "1h30m"
script:
# Note: Build dir (and thus install) may be dirty due to GIT_STRATEGY
- rm -rf install
@@ -69,8 +70,9 @@
.baremetal-igt-arm64:
extends:
- .baremetal-test-arm64
- - .use-debian/arm64_test
+ - .use-debian/baremetal_arm64_test
- .test-rules
+ timeout: "1h30m"
variables:
FDO_CI_CONCURRENT: 10
HWCI_TEST_SCRIPT: "/install/igt_runner.sh"
@@ -79,7 +81,7 @@
BM_CMDLINE: "ip=dhcp console=ttyMSM0,115200n8 $BM_KERNEL_EXTRA_ARGS root=/dev/nfs rw nfsrootdebug nfsroot=,tcp,nfsvers=4.2 init=/init $BM_KERNELARGS"
FARM: google
needs:
- - debian/arm64_test
+ - debian/baremetal_arm64_test
- job: testing:arm64
artifacts: false
- igt:arm64
@@ -160,38 +162,61 @@ msm:sdm845:
script:
- ./install/bare-metal/cros-servo.sh
-rockchip:rk3288:
- extends:
- - .lava-igt:arm32
+.rockchip-device:
+ variables:
+ DTB: ${DEVICE_TYPE}
+ BOOT_METHOD: depthcharge
+
+.rockchip-display:
stage: rockchip
variables:
DRIVER_NAME: rockchip
+
+.rk3288:
+ extends:
+ - .lava-igt:arm32
+ - .rockchip-device
+ variables:
DEVICE_TYPE: rk3288-veyron-jaq
- DTB: ${DEVICE_TYPE}
- BOOT_METHOD: depthcharge
- KERNEL_IMAGE_TYPE: "zimage"
GPU_VERSION: rk3288
+ KERNEL_IMAGE_TYPE: "zimage"
RUNNER_TAG: mesa-ci-x86-64-lava-rk3288-veyron-jaq
-rockchip:rk3399:
+.rk3399:
extends:
- .lava-igt:arm64
- stage: rockchip
+ - .rockchip-device
parallel: 2
variables:
- DRIVER_NAME: rockchip
DEVICE_TYPE: rk3399-gru-kevin
- DTB: ${DEVICE_TYPE}
- BOOT_METHOD: depthcharge
- KERNEL_IMAGE_TYPE: ""
GPU_VERSION: rk3399
+ KERNEL_IMAGE_TYPE: ""
RUNNER_TAG: mesa-ci-x86-64-lava-rk3399-gru-kevin
+rockchip:rk3288:
+ extends:
+ - .rk3288
+ - .rockchip-display
+
+panfrost:rk3288:
+ extends:
+ - .rk3288
+ - .panfrost-gpu
+
+rockchip:rk3399:
+ extends:
+ - .rk3399
+ - .rockchip-display
+
+panfrost:rk3399:
+ extends:
+ - .rk3399
+ - .panfrost-gpu
+
.i915:
extends:
- .lava-igt:x86_64
stage: i915
- timeout: "1h30m"
variables:
DRIVER_NAME: i915
DTB: ""
@@ -280,65 +305,117 @@ amdgpu:stoney:
GPU_VERSION: stoney
RUNNER_TAG: mesa-ci-x86-64-lava-hp-11A-G6-EE-grunt
-.mediatek:
+.mediatek-device:
extends:
- .lava-igt:arm64
stage: mediatek
variables:
- DRIVER_NAME: mediatek
DTB: ${DEVICE_TYPE}
BOOT_METHOD: depthcharge
KERNEL_IMAGE_TYPE: ""
-mediatek:mt8173:
+.mediatek-display:
+ stage: mediatek
+ variables:
+ DRIVER_NAME: mediatek
+
+.powervr-gpu:
+ stage: powervr
+ variables:
+ DRIVER_NAME: powervr
+
+.panfrost-gpu:
+ stage: panfrost
+ variables:
+ DRIVER_NAME: panfrost
+
+.mt8173:
extends:
- - .mediatek
+ - .mediatek-device
parallel: 4
variables:
DEVICE_TYPE: mt8173-elm-hana
GPU_VERSION: mt8173
RUNNER_TAG: mesa-ci-x86-64-lava-mt8173-elm-hana
-mediatek:mt8183:
+.mt8183:
extends:
- - .mediatek
+ - .mediatek-device
parallel: 3
variables:
DEVICE_TYPE: mt8183-kukui-jacuzzi-juniper-sku16
GPU_VERSION: mt8183
RUNNER_TAG: mesa-ci-x86-64-lava-mt8183-kukui-jacuzzi-juniper-sku16
+mediatek:mt8173:
+ extends:
+ - .mt8173
+ - .mediatek-display
+
+powervr:mt8173:
+ extends:
+ - .mt8173
+ - .powervr-gpu
+ rules:
+ # TODO: powervr driver was merged in linux kernel, but there's no mediatek support yet
+ # Remove the rule once mediatek support is added for powervr
+ - when: never
+
+mediatek:mt8183:
+ extends:
+ - .mt8183
+ - .mediatek-display
+
+panfrost:mt8183:
+ extends:
+ - .mt8183
+ - .panfrost-gpu
+
# drm-mtk doesn't even probe yet in mainline for mt8192
.mediatek:mt8192:
extends:
- - .mediatek
+ - .mediatek-device
parallel: 3
variables:
DEVICE_TYPE: mt8192-asurada-spherion-r0
GPU_VERSION: mt8192
RUNNER_TAG: mesa-ci-x86-64-lava-mt8192-asurada-spherion-r0
-.meson:
+.meson-device:
extends:
- .lava-igt:arm64
- stage: meson
variables:
- DRIVER_NAME: meson
DTB: ${DEVICE_TYPE}
BOOT_METHOD: u-boot
KERNEL_IMAGE_TYPE: "image"
-meson:g12b:
+.meson-display:
+ stage: meson
+ variables:
+ DRIVER_NAME: meson
+
+.g12b:
extends:
- - .meson
+ - .meson-device
parallel: 3
variables:
DEVICE_TYPE: meson-g12b-a311d-khadas-vim3
GPU_VERSION: g12b
RUNNER_TAG: mesa-ci-x86-64-lava-meson-g12b-a311d-khadas-vim3
+meson:g12b:
+ extends:
+ - .g12b
+ - .meson-display
+
+panfrost:g12b:
+ extends:
+ - .g12b
+ - .panfrost-gpu
+
virtio_gpu:none:
stage: software-driver
+ timeout: "1h30m"
variables:
CROSVM_GALLIUM_DRIVER: llvmpipe
DRIVER_NAME: virtio_gpu
@@ -361,6 +438,7 @@ virtio_gpu:none:
vkms:none:
stage: software-driver
+ timeout: "1h30m"
variables:
DRIVER_NAME: vkms
GPU_VERSION: none
diff --git a/drivers/gpu/drm/ci/xfails/amdgpu-stoney-fails.txt b/drivers/gpu/drm/ci/xfails/amdgpu-stoney-fails.txt
index e8c2f4044a92..8e2fed6d76a3 100644
--- a/drivers/gpu/drm/ci/xfails/amdgpu-stoney-fails.txt
+++ b/drivers/gpu/drm/ci/xfails/amdgpu-stoney-fails.txt
@@ -30,6 +30,7 @@ kms_cursor_crc@cursor-random-64x64,Fail
kms_cursor_crc@cursor-size-change,Fail
kms_cursor_crc@cursor-sliding-64x21,Fail
kms_cursor_crc@cursor-sliding-64x64,Fail
+kms_cursor_edge_walk@64x64-left-edge,Fail
kms_flip@flip-vs-modeset-vs-hang,Fail
kms_flip@flip-vs-panning-vs-hang,Fail
kms_lease@lease-uevent,Fail
@@ -37,4 +38,3 @@ kms_plane@pixel-format,Fail
kms_plane_cursor@primary,Fail
kms_rotation_crc@primary-rotation-180,Fail
perf@i915-ref-count,Fail
-tools_test@tools_test,Fail
diff --git a/drivers/gpu/drm/ci/xfails/amdgpu-stoney-flakes.txt b/drivers/gpu/drm/ci/xfails/amdgpu-stoney-flakes.txt
index ea512ff8c352..e4faa96fa000 100644
--- a/drivers/gpu/drm/ci/xfails/amdgpu-stoney-flakes.txt
+++ b/drivers/gpu/drm/ci/xfails/amdgpu-stoney-flakes.txt
@@ -1,8 +1,20 @@
# Board Name: hp-11A-G6-EE-grunt
# Bug Report: https://lore.kernel.org/amd-gfx/3542730f-b8d7-404d-a947-b7a5e95d661c@collabora.com/T/#u
+# Failure Rate: 50
# IGT Version: 1.28-g0df7b9b97
# Linux Version: 6.9.0-rc7
-# Failure Rate: 50
kms_async_flips@async-flip-with-page-flip-events
+
+# Board Name: hp-11A-G6-EE-grunt
+# Bug Report: https://lore.kernel.org/amd-gfx/3542730f-b8d7-404d-a947-b7a5e95d661c@collabora.com/T/#u
+# Failure Rate: 50
+# IGT Version: 1.28-g0df7b9b97
+# Linux Version: 6.9.0-rc7
kms_async_flips@crc
+
+# Board Name: hp-11A-G6-EE-grunt
+# Bug Report: https://lore.kernel.org/amd-gfx/3542730f-b8d7-404d-a947-b7a5e95d661c@collabora.com/T/#u
+# Failure Rate: 50
+# IGT Version: 1.28-g0df7b9b97
+# Linux Version: 6.9.0-rc7
kms_plane@pixel-format-source-clamping
diff --git a/drivers/gpu/drm/ci/xfails/amdgpu-stoney-skips.txt b/drivers/gpu/drm/ci/xfails/amdgpu-stoney-skips.txt
index 3a2ce45d3cb9..f41b3e112976 100644
--- a/drivers/gpu/drm/ci/xfails/amdgpu-stoney-skips.txt
+++ b/drivers/gpu/drm/ci/xfails/amdgpu-stoney-skips.txt
@@ -2,9 +2,9 @@
.*suspend.*
# Skip driver specific tests
-msm_.*
+^msm.*
nouveau_.*
-panfrost_.*
+^panfrost.*
^v3d.*
^vc4.*
^vmwgfx*
@@ -13,6 +13,7 @@ panfrost_.*
gem_.*
i915_.*
xe_.*
+tools_test.*
# Currently fails and causes coverage loss for other tests
# since core_getversion also fails.
diff --git a/drivers/gpu/drm/ci/xfails/i915-amly-fails.txt b/drivers/gpu/drm/ci/xfails/i915-amly-fails.txt
index 6641520ac587..9b84f68a5122 100644
--- a/drivers/gpu/drm/ci/xfails/i915-amly-fails.txt
+++ b/drivers/gpu/drm/ci/xfails/i915-amly-fails.txt
@@ -6,11 +6,11 @@ i915_module_load@reload-no-display,Fail
i915_module_load@resize-bar,Fail
i915_pm_rpm@gem-execbuf-stress,Timeout
i915_pm_rpm@module-reload,Fail
-kms_async_flips@invalid-async-flip,Timeout
-kms_atomic_transition@modeset-transition-fencing,Timeout
kms_ccs@crc-primary-rotation-180-yf-tiled-ccs,Timeout
+kms_cursor_legacy@short-flip-before-cursor-atomic-transitions,Timeout
kms_fb_coherency@memset-crc,Crash
-kms_flip@flip-vs-dpms-off-vs-modeset,Timeout
+kms_flip@busy-flip,Timeout
+kms_flip@single-buffer-flip-vs-dpms-off-vs-modeset-interruptible,Fail
kms_flip_scaled_crc@flip-32bpp-linear-to-64bpp-linear-downscaling,Fail
kms_flip_scaled_crc@flip-32bpp-linear-to-64bpp-linear-upscaling,Fail
kms_flip_scaled_crc@flip-32bpp-xtile-to-64bpp-xtile-downscaling,Fail
@@ -33,16 +33,20 @@ kms_flip_scaled_crc@flip-64bpp-ytile-to-32bpp-ytile-downscaling,Fail
kms_flip_scaled_crc@flip-64bpp-ytile-to-32bpp-ytile-upscaling,Fail
kms_flip_scaled_crc@flip-64bpp-ytile-to-32bpp-ytilegen12rcccs-upscaling,Fail
kms_flip_scaled_crc@flip-64bpp-ytile-to-32bpp-ytilercccs-downscaling,Fail
+kms_frontbuffer_tracking@fbc-rgb565-draw-mmap-cpu,Timeout
kms_lease@lease-uevent,Fail
kms_plane_alpha_blend@alpha-basic,Fail
kms_plane_alpha_blend@alpha-opaque-fb,Fail
kms_plane_alpha_blend@alpha-transparent-fb,Fail
kms_plane_alpha_blend@constant-alpha-max,Fail
kms_plane_scaling@plane-scaler-with-clipping-clamping-rotation,Timeout
-kms_pm_rpm@modeset-lpsp-stress,Timeout
+kms_plane_scaling@planes-upscale-factor-0-25-downscale-factor-0-5,Timeout
kms_pm_rpm@modeset-stress-extra-wait,Timeout
kms_pm_rpm@universal-planes,Timeout
kms_pm_rpm@universal-planes-dpms,Timeout
+kms_prop_blob@invalid-set-prop,Fail
+kms_rotation_crc@primary-rotation-180,Timeout
+kms_vblank@query-forked-hang,Timeout
perf@i915-ref-count,Fail
perf_pmu@module-unload,Fail
perf_pmu@rc6,Crash
diff --git a/drivers/gpu/drm/ci/xfails/i915-amly-flakes.txt b/drivers/gpu/drm/ci/xfails/i915-amly-flakes.txt
index 0a76547a103d..581f0da4d0f2 100644
--- a/drivers/gpu/drm/ci/xfails/i915-amly-flakes.txt
+++ b/drivers/gpu/drm/ci/xfails/i915-amly-flakes.txt
@@ -1,9 +1,48 @@
# Board Name: asus-C433TA-AJ0005-rammus
# Bug Report: https://lore.kernel.org/intel-gfx/af4ca4df-a3ef-4943-bdbf-4c3af2c333af@collabora.com/T/#u
+# Failure Rate: 50
# IGT Version: 1.28-g0df7b9b97
# Linux Version: 6.9.0-rc7
-# Failure Rate: 50
i915_hangman@engine-engine-error
+
+# Board Name: asus-C433TA-AJ0005-rammus
+# Bug Report: https://lore.kernel.org/intel-gfx/af4ca4df-a3ef-4943-bdbf-4c3af2c333af@collabora.com/T/#u
+# Failure Rate: 50
+# IGT Version: 1.28-g0df7b9b97
+# Linux Version: 6.9.0-rc7
i915_hangman@gt-engine-hang
+
+# Board Name: asus-C433TA-AJ0005-rammus
+# Bug Report: https://lore.kernel.org/intel-gfx/af4ca4df-a3ef-4943-bdbf-4c3af2c333af@collabora.com/T/#u
+# Failure Rate: 50
+# IGT Version: 1.28-g0df7b9b97
+# Linux Version: 6.9.0-rc7
kms_async_flips@crc
+
+# Board Name: asus-C433TA-AJ0005-rammus
+# Bug Report: https://lore.kernel.org/intel-gfx/af4ca4df-a3ef-4943-bdbf-4c3af2c333af@collabora.com/T/#u
+# Failure Rate: 50
+# IGT Version: 1.28-g0df7b9b97
+# Linux Version: 6.9.0-rc7
kms_universal_plane@cursor-fb-leak
+
+# Board Name: asus-C433TA-AJ0005-rammus
+# Bug Report: https://lore.kernel.org/intel-gfx/af4ca4df-a3ef-4943-bdbf-4c3af2c333af@collabora.com/T/#u
+# Failure Rate: 50
+# IGT Version: 1.28-gf13702b8e
+# Linux Version: 6.10.0-rc5
+kms_sysfs_edid_timing
+
+# Board Name: asus-C433TA-AJ0005-rammus
+# Bug Report: https://lore.kernel.org/intel-gfx/af4ca4df-a3ef-4943-bdbf-4c3af2c333af@collabora.com/T/#u
+# Failure Rate: 50
+# IGT Version: 1.28-gf13702b8e
+# Linux Version: 6.10.0-rc5
+i915_hangman@engine-engine-hang
+
+# Board Name: asus-C433TA-AJ0005-rammus
+# Bug Report: https://lore.kernel.org/intel-gfx/af4ca4df-a3ef-4943-bdbf-4c3af2c333af@collabora.com/T/#u
+# Failure Rate: 50
+# IGT Version: 1.28-gf13702b8e
+# Linux Version: 6.10.0-rc5
+kms_pm_rpm@modeset-lpsp-stress
diff --git a/drivers/gpu/drm/ci/xfails/i915-amly-skips.txt b/drivers/gpu/drm/ci/xfails/i915-amly-skips.txt
index 5663ed0420a7..5186ba3dbbc6 100644
--- a/drivers/gpu/drm/ci/xfails/i915-amly-skips.txt
+++ b/drivers/gpu/drm/ci/xfails/i915-amly-skips.txt
@@ -5,9 +5,9 @@ kms_plane_scaling@invalid-parameters
# Skip driver specific tests
^amdgpu.*
-msm_.*
+^msm.*
nouveau_.*
-panfrost_.*
+^panfrost.*
^v3d.*
^vc4.*
^vmwgfx*
@@ -19,6 +19,7 @@ gem_.*
i915_pm_rc6_residency.*
i915_suspend.*
kms_scaling_modes.*
+i915_pm_rpm.*
# Kernel panic
drm_fdinfo.*
diff --git a/drivers/gpu/drm/ci/xfails/i915-apl-flakes.txt b/drivers/gpu/drm/ci/xfails/i915-apl-flakes.txt
index cb010c153a6a..4663d4d13f35 100644
--- a/drivers/gpu/drm/ci/xfails/i915-apl-flakes.txt
+++ b/drivers/gpu/drm/ci/xfails/i915-apl-flakes.txt
@@ -1,6 +1,6 @@
# Board Name: asus-C523NA-A20057-coral
# Bug Report: https://lore.kernel.org/intel-gfx/af4ca4df-a3ef-4943-bdbf-4c3af2c333af@collabora.com/T/#u
+# Failure Rate: 50
# IGT Version: 1.28-g0df7b9b97
# Linux Version: 6.9.0-rc7
-# Failure Rate: 50
kms_fb_coherency@memset-crc
diff --git a/drivers/gpu/drm/ci/xfails/i915-apl-skips.txt b/drivers/gpu/drm/ci/xfails/i915-apl-skips.txt
index ab588e7a447c..4f50e0240ff4 100644
--- a/drivers/gpu/drm/ci/xfails/i915-apl-skips.txt
+++ b/drivers/gpu/drm/ci/xfails/i915-apl-skips.txt
@@ -7,9 +7,9 @@ kms_3d
# Skip driver specific tests
^amdgpu.*
-msm_.*
+^msm.*
nouveau_.*
-panfrost_.*
+^panfrost.*
^v3d.*
^vc4.*
^vmwgfx*
diff --git a/drivers/gpu/drm/ci/xfails/i915-cml-fails.txt b/drivers/gpu/drm/ci/xfails/i915-cml-fails.txt
index 26cd62bbf30a..2723e2832797 100644
--- a/drivers/gpu/drm/ci/xfails/i915-cml-fails.txt
+++ b/drivers/gpu/drm/ci/xfails/i915-cml-fails.txt
@@ -9,11 +9,10 @@ i915_pipe_stress@stress-xrgb8888-ytiled,Fail
i915_pm_rpm@gem-execbuf-stress,Timeout
i915_pm_rpm@module-reload,Fail
i915_pm_rpm@system-suspend-execbuf,Timeout
-kms_async_flips@invalid-async-flip,Timeout
-kms_atomic_transition@modeset-transition-fencing,Timeout
kms_ccs@crc-primary-rotation-180-yf-tiled-ccs,Timeout
kms_fb_coherency@memset-crc,Crash
-kms_flip@flip-vs-dpms-off-vs-modeset,Timeout
+kms_flip@busy-flip,Timeout
+kms_flip@single-buffer-flip-vs-dpms-off-vs-modeset-interruptible,Fail
kms_flip_scaled_crc@flip-32bpp-linear-to-64bpp-linear-downscaling,Fail
kms_flip_scaled_crc@flip-32bpp-linear-to-64bpp-linear-upscaling,Fail
kms_flip_scaled_crc@flip-32bpp-xtile-to-64bpp-xtile-downscaling,Fail
@@ -41,20 +40,25 @@ kms_plane_alpha_blend@alpha-basic,Fail
kms_plane_alpha_blend@alpha-opaque-fb,Fail
kms_plane_alpha_blend@alpha-transparent-fb,Fail
kms_plane_alpha_blend@constant-alpha-max,Fail
-kms_plane_alpha_blend@constant-alpha-min,Fail
kms_plane_scaling@plane-scaler-with-clipping-clamping-rotation,Timeout
+kms_plane_scaling@planes-upscale-factor-0-25-downscale-factor-0-5,Timeout
kms_pm_rpm@modeset-stress-extra-wait,Timeout
kms_pm_rpm@universal-planes,Timeout
kms_pm_rpm@universal-planes-dpms,Timeout
+kms_prop_blob@invalid-set-prop,Fail
+kms_psr2_sf@cursor-plane-update-sf,Fail
kms_psr2_sf@fbc-plane-move-sf-dmg-area,Timeout
kms_psr2_sf@overlay-plane-update-continuous-sf,Fail
kms_psr2_sf@overlay-plane-update-sf-dmg-area,Fail
+kms_psr2_sf@overlay-primary-update-sf-dmg-area,Fail
+kms_psr2_sf@plane-move-sf-dmg-area,Fail
kms_psr2_sf@primary-plane-update-sf-dmg-area,Fail
kms_psr2_sf@primary-plane-update-sf-dmg-area-big-fb,Fail
kms_psr2_su@page_flip-NV12,Fail
kms_psr2_su@page_flip-P010,Fail
-kms_psr@psr-sprite-render,Timeout
+kms_rotation_crc@primary-rotation-180,Timeout
kms_setmode@basic,Fail
+kms_vblank@query-forked-hang,Timeout
perf@i915-ref-count,Fail
perf_pmu@module-unload,Fail
perf_pmu@rc6,Crash
diff --git a/drivers/gpu/drm/ci/xfails/i915-cml-flakes.txt b/drivers/gpu/drm/ci/xfails/i915-cml-flakes.txt
index bb560ff1e2cd..58a6001abb28 100644
--- a/drivers/gpu/drm/ci/xfails/i915-cml-flakes.txt
+++ b/drivers/gpu/drm/ci/xfails/i915-cml-flakes.txt
@@ -1,6 +1,13 @@
# Board Name: asus-C436FA-Flip-hatch
# Bug Report: https://lore.kernel.org/intel-gfx/af4ca4df-a3ef-4943-bdbf-4c3af2c333af@collabora.com/T/#u
+# Failure Rate: 50
# IGT Version: 1.28-g0df7b9b97
# Linux Version: 6.9.0-rc7
-# Failure Rate: 50
kms_plane_alpha_blend@constant-alpha-min
+
+# Board Name: asus-C436FA-Flip-hatch
+# Bug Report: https://lore.kernel.org/intel-gfx/af4ca4df-a3ef-4943-bdbf-4c3af2c333af@collabora.com/T/#u
+# Failure Rate: 50
+# IGT Version: 1.28-gf13702b8e
+# Linux Version: 6.10.0-rc5
+kms_atomic_transition@plane-all-modeset-transition-internal-panels
diff --git a/drivers/gpu/drm/ci/xfails/i915-cml-skips.txt b/drivers/gpu/drm/ci/xfails/i915-cml-skips.txt
index 93b7736fffbb..9d753d97c9ab 100644
--- a/drivers/gpu/drm/ci/xfails/i915-cml-skips.txt
+++ b/drivers/gpu/drm/ci/xfails/i915-cml-skips.txt
@@ -3,9 +3,9 @@ kms_plane_scaling@invalid-parameters
# Skip driver specific tests
^amdgpu.*
-msm_.*
+^msm.*
nouveau_.*
-panfrost_.*
+^panfrost.*
^v3d.*
^vc4.*
^vmwgfx*
@@ -19,6 +19,7 @@ i915_suspend.*
xe_module_load.*
api_intel_allocator.*
kms_cursor_legacy.*
+i915_pm_rpm.*
# Kernel panic
drm_fdinfo.*
diff --git a/drivers/gpu/drm/ci/xfails/i915-glk-fails.txt b/drivers/gpu/drm/ci/xfails/i915-glk-fails.txt
index fca15b487929..4821c9adefd1 100644
--- a/drivers/gpu/drm/ci/xfails/i915-glk-fails.txt
+++ b/drivers/gpu/drm/ci/xfails/i915-glk-fails.txt
@@ -1,20 +1,16 @@
core_setmaster@master-drop-set-user,Fail
+core_setmaster_vs_auth,Fail
i915_module_load@load,Fail
i915_module_load@reload,Fail
i915_module_load@reload-no-display,Fail
i915_module_load@resize-bar,Fail
-kms_async_flips@invalid-async-flip,Timeout
-kms_atomic_transition@modeset-transition-fencing,Timeout
-kms_big_fb@linear-16bpp-rotate-0,Fail
-kms_big_fb@linear-16bpp-rotate-180,Fail
-kms_big_fb@linear-32bpp-rotate-0,Fail
-kms_big_fb@linear-32bpp-rotate-180,Fail
-kms_big_fb@linear-8bpp-rotate-0,Fail
-kms_big_fb@linear-8bpp-rotate-180,Fail
-kms_big_fb@linear-max-hw-stride-32bpp-rotate-0,Fail
+kms_cursor_legacy@short-flip-before-cursor-atomic-transitions,Timeout
kms_dirtyfb@default-dirtyfb-ioctl,Fail
-kms_draw_crc@draw-method-render,Fail
-kms_flip@flip-vs-dpms-off-vs-modeset,Timeout
+kms_dirtyfb@drrs-dirtyfb-ioctl,Fail
+kms_dirtyfb@fbc-dirtyfb-ioctl,Fail
+kms_flip@blocking-wf_vblank,Fail
+kms_flip@busy-flip,Timeout
+kms_flip@single-buffer-flip-vs-dpms-off-vs-modeset-interruptible,Fail
kms_flip@wf_vblank-ts-check,Fail
kms_flip@wf_vblank-ts-check-interruptible,Fail
kms_flip_scaled_crc@flip-32bpp-linear-to-64bpp-linear-downscaling,Fail
@@ -26,6 +22,7 @@ kms_flip_scaled_crc@flip-32bpp-ytile-to-64bpp-ytile-upscaling,Fail
kms_flip_scaled_crc@flip-32bpp-ytileccs-to-64bpp-ytile-downscaling,Fail
kms_flip_scaled_crc@flip-32bpp-ytileccs-to-64bpp-ytile-upscaling,Fail
kms_flip_scaled_crc@flip-64bpp-linear-to-16bpp-linear-downscaling,Fail
+kms_flip_scaled_crc@flip-64bpp-linear-to-16bpp-linear-upscaling,Fail
kms_flip_scaled_crc@flip-64bpp-linear-to-32bpp-linear-downscaling,Fail
kms_flip_scaled_crc@flip-64bpp-linear-to-32bpp-linear-upscaling,Fail
kms_flip_scaled_crc@flip-64bpp-xtile-to-16bpp-xtile-downscaling,Fail
@@ -38,19 +35,24 @@ kms_flip_scaled_crc@flip-64bpp-ytile-to-32bpp-ytile-downscaling,Fail
kms_flip_scaled_crc@flip-64bpp-ytile-to-32bpp-ytile-upscaling,Fail
kms_flip_scaled_crc@flip-64bpp-ytile-to-32bpp-ytilegen12rcccs-upscaling,Fail
kms_flip_scaled_crc@flip-64bpp-ytile-to-32bpp-ytilercccs-downscaling,Fail
+kms_frontbuffer_tracking@fbc-rgb565-draw-mmap-cpu,Timeout
kms_frontbuffer_tracking@fbc-tiling-linear,Fail
kms_frontbuffer_tracking@fbcdrrs-tiling-linear,Fail
kms_lease@lease-uevent,Fail
kms_plane_alpha_blend@alpha-opaque-fb,Fail
kms_plane_scaling@plane-scaler-with-clipping-clamping-rotation,Timeout
+kms_plane_scaling@planes-upscale-factor-0-25-downscale-factor-0-5,Timeout
kms_pm_rpm@legacy-planes,Timeout
kms_pm_rpm@legacy-planes-dpms,Timeout
kms_pm_rpm@modeset-stress-extra-wait,Timeout
kms_pm_rpm@universal-planes,Timeout
kms_pm_rpm@universal-planes-dpms,Timeout
+kms_prop_blob@invalid-set-prop,Fail
kms_rotation_crc@multiplane-rotation,Fail
kms_rotation_crc@multiplane-rotation-cropping-bottom,Fail
kms_rotation_crc@multiplane-rotation-cropping-top,Fail
+kms_rotation_crc@primary-rotation-180,Timeout
+kms_vblank@query-forked-hang,Timeout
perf@non-zero-reason,Timeout
sysfs_heartbeat_interval@long,Timeout
sysfs_heartbeat_interval@off,Timeout
diff --git a/drivers/gpu/drm/ci/xfails/i915-glk-flakes.txt b/drivers/gpu/drm/ci/xfails/i915-glk-flakes.txt
index 58fc424f8a42..077886b76093 100644
--- a/drivers/gpu/drm/ci/xfails/i915-glk-flakes.txt
+++ b/drivers/gpu/drm/ci/xfails/i915-glk-flakes.txt
@@ -1,7 +1,13 @@
# Board Name: hp-x360-12b-ca0010nr-n4020-octopus
# Bug Report: https://lore.kernel.org/intel-gfx/af4ca4df-a3ef-4943-bdbf-4c3af2c333af@collabora.com/T/#u
+# Failure Rate: 50
# IGT Version: 1.28-g0df7b9b97
# Linux Version: 6.9.0-rc7
-# Failure Rate: 50
core_hotunplug@unplug-rescan
+
+# Board Name: hp-x360-12b-ca0010nr-n4020-octopus
+# Bug Report: https://lore.kernel.org/intel-gfx/af4ca4df-a3ef-4943-bdbf-4c3af2c333af@collabora.com/T/#u
+# Failure Rate: 50
+# IGT Version: 1.28-g0df7b9b97
+# Linux Version: 6.9.0-rc7
kms_fb_coherency@memset-crc
diff --git a/drivers/gpu/drm/ci/xfails/i915-glk-skips.txt b/drivers/gpu/drm/ci/xfails/i915-glk-skips.txt
index b3226b2d9ba1..9c64146aed90 100644
--- a/drivers/gpu/drm/ci/xfails/i915-glk-skips.txt
+++ b/drivers/gpu/drm/ci/xfails/i915-glk-skips.txt
@@ -6,9 +6,9 @@ kms_plane_scaling@invalid-parameters
# Skip driver specific tests
^amdgpu.*
-msm_.*
+^msm.*
nouveau_.*
-panfrost_.*
+^panfrost.*
^v3d.*
^vc4.*
^vmwgfx*
diff --git a/drivers/gpu/drm/ci/xfails/i915-kbl-fails.txt b/drivers/gpu/drm/ci/xfails/i915-kbl-fails.txt
index d4fba4f55ec1..1de04a3308c4 100644
--- a/drivers/gpu/drm/ci/xfails/i915-kbl-fails.txt
+++ b/drivers/gpu/drm/ci/xfails/i915-kbl-fails.txt
@@ -17,10 +17,12 @@ perf@i915-ref-count,Fail
perf_pmu@busy-accuracy-50,Fail
perf_pmu@module-unload,Fail
perf_pmu@rc6,Crash
+prime_busy@after,Fail
sysfs_heartbeat_interval@long,Timeout
sysfs_heartbeat_interval@off,Timeout
sysfs_preempt_timeout@off,Timeout
sysfs_timeslice_duration@off,Timeout
+testdisplay,Timeout
xe_module_load@force-load,Fail
xe_module_load@load,Fail
xe_module_load@many-reload,Fail
diff --git a/drivers/gpu/drm/ci/xfails/i915-kbl-flakes.txt b/drivers/gpu/drm/ci/xfails/i915-kbl-flakes.txt
index 6cf1fed2e575..549501e40461 100644
--- a/drivers/gpu/drm/ci/xfails/i915-kbl-flakes.txt
+++ b/drivers/gpu/drm/ci/xfails/i915-kbl-flakes.txt
@@ -1,6 +1,6 @@
# Board Name: hp-x360-14-G1-sona
# Bug Report: https://lore.kernel.org/intel-gfx/af4ca4df-a3ef-4943-bdbf-4c3af2c333af@collabora.com/T/#u
+# Failure Rate: 50
# IGT Version: 1.28-g0df7b9b97
# Linux Version: 6.9.0-rc7
-# Failure Rate: 50
prime_busy@hang
diff --git a/drivers/gpu/drm/ci/xfails/i915-kbl-skips.txt b/drivers/gpu/drm/ci/xfails/i915-kbl-skips.txt
index f0cf8a6dda25..6ec2f83ffe13 100644
--- a/drivers/gpu/drm/ci/xfails/i915-kbl-skips.txt
+++ b/drivers/gpu/drm/ci/xfails/i915-kbl-skips.txt
@@ -6,9 +6,9 @@ kms_plane_scaling@invalid-parameters
# Skip driver specific tests
^amdgpu.*
-msm_.*
+^msm.*
nouveau_.*
-panfrost_.*
+^panfrost.*
^v3d.*
^vc4.*
^vmwgfx*
diff --git a/drivers/gpu/drm/ci/xfails/i915-tgl-fails.txt b/drivers/gpu/drm/ci/xfails/i915-tgl-fails.txt
index 9a50e894c3e7..e728ccc62326 100644
--- a/drivers/gpu/drm/ci/xfails/i915-tgl-fails.txt
+++ b/drivers/gpu/drm/ci/xfails/i915-tgl-fails.txt
@@ -1,34 +1,39 @@
-api_intel_bb@blit-noreloc-keep-cache,Timeout
+api_intel_allocator@simple-allocator,Timeout
+api_intel_bb@object-reloc-keep-cache,Timeout
api_intel_bb@offset-control,Timeout
-api_intel_bb@render-ccs,Timeout
-core_getclient,Timeout
-core_hotunplug@hotreplug-lateclose,Timeout
-drm_read@short-buffer-block,Timeout
+core_auth@getclient-simple,Timeout
+core_hotunplug@hotunbind-rebind,Timeout
+debugfs_test@read_all_entries_display_on,Timeout
+drm_read@invalid-buffer,Timeout
drm_read@short-buffer-nonblock,Timeout
-dumb_buffer@map-uaf,Timeout
gen3_render_tiledx_blits,Timeout
gen7_exec_parse@basic-allocation,Timeout
gen7_exec_parse@batch-without-end,Timeout
gen9_exec_parse@batch-invalid-length,Timeout
gen9_exec_parse@bb-secure,Timeout
+gen9_exec_parse@secure-batches,Timeout
+gen9_exec_parse@shadow-peek,Timeout
+gen9_exec_parse@unaligned-jump,Timeout
i915_module_load@load,Fail
i915_module_load@reload,Fail
i915_module_load@reload-no-display,Fail
i915_module_load@resize-bar,Fail
-i915_pciid,Timeout
i915_query@engine-info,Timeout
+i915_query@query-topology-kernel-writes,Timeout
+i915_query@test-query-geometry-subslices,Timeout
kms_lease@lease-uevent,Fail
kms_rotation_crc@multiplane-rotation,Fail
perf@i915-ref-count,Fail
-perf_pmu@busy,Timeout
perf_pmu@enable-race,Timeout
perf_pmu@event-wait,Timeout
perf_pmu@gt-awake,Timeout
+perf_pmu@interrupts,Timeout
perf_pmu@module-unload,Fail
perf_pmu@rc6,Crash
prime_mmap@test_map_unmap,Timeout
+prime_mmap@test_refcounting,Timeout
prime_self_import@basic-with_one_bo,Timeout
-syncobj_basic@bad-destroy,Timeout
+syncobj_basic@bad-flags-fd-to-handle,Timeout
syncobj_eventfd@invalid-bad-pad,Timeout
syncobj_wait@invalid-multi-wait-unsubmitted-signaled,Timeout
syncobj_wait@invalid-signal-illegal-handle,Timeout
@@ -37,7 +42,9 @@ syncobj_wait@multi-wait-all-submitted,Timeout
syncobj_wait@multi-wait-for-submit-submitted-signaled,Timeout
syncobj_wait@wait-any-complex,Timeout
syncobj_wait@wait-delayed-signal,Timeout
+template@A,Timeout
xe_module_load@force-load,Fail
xe_module_load@load,Fail
+xe_module_load@many-reload,Fail
xe_module_load@reload,Fail
xe_module_load@reload-no-display,Fail
diff --git a/drivers/gpu/drm/ci/xfails/i915-tgl-skips.txt b/drivers/gpu/drm/ci/xfails/i915-tgl-skips.txt
index e600782ef96a..b47df5855e8d 100644
--- a/drivers/gpu/drm/ci/xfails/i915-tgl-skips.txt
+++ b/drivers/gpu/drm/ci/xfails/i915-tgl-skips.txt
@@ -12,9 +12,9 @@ kms_plane_scaling@invalid-parameters
# Skip driver specific tests
^amdgpu.*
-msm_.*
+^msm.*
nouveau_.*
-panfrost_.*
+^panfrost.*
^v3d.*
^vc4.*
^vmwgfx*
diff --git a/drivers/gpu/drm/ci/xfails/i915-whl-fails.txt b/drivers/gpu/drm/ci/xfails/i915-whl-fails.txt
index 7582d313dd9b..2adae2175501 100644
--- a/drivers/gpu/drm/ci/xfails/i915-whl-fails.txt
+++ b/drivers/gpu/drm/ci/xfails/i915-whl-fails.txt
@@ -7,18 +7,10 @@ i915_module_load@resize-bar,Fail
i915_pm_rpm@gem-execbuf-stress,Timeout
i915_pm_rpm@module-reload,Fail
i915_pm_rpm@system-suspend-execbuf,Timeout
-kms_async_flips@invalid-async-flip,Timeout
-kms_atomic_transition@modeset-transition-fencing,Timeout
-kms_big_fb@linear-16bpp-rotate-0,Fail
-kms_big_fb@linear-16bpp-rotate-180,Fail
-kms_big_fb@linear-32bpp-rotate-0,Fail
-kms_big_fb@linear-32bpp-rotate-180,Fail
-kms_big_fb@linear-8bpp-rotate-0,Fail
-kms_big_fb@linear-8bpp-rotate-180,Fail
-kms_big_fb@linear-max-hw-stride-32bpp-rotate-0,Fail
kms_ccs@crc-primary-rotation-180-yf-tiled-ccs,Timeout
+kms_cursor_legacy@short-flip-before-cursor-atomic-transitions,Timeout
kms_dirtyfb@default-dirtyfb-ioctl,Fail
-kms_draw_crc@draw-method-render,Fail
+kms_dirtyfb@fbc-dirtyfb-ioctl,Fail
kms_fb_coherency@memset-crc,Crash
kms_flip_scaled_crc@flip-32bpp-linear-to-64bpp-linear-downscaling,Fail
kms_flip_scaled_crc@flip-32bpp-linear-to-64bpp-linear-upscaling,Fail
@@ -40,6 +32,7 @@ kms_flip_scaled_crc@flip-64bpp-ytile-to-32bpp-ytile-downscaling,Fail
kms_flip_scaled_crc@flip-64bpp-ytile-to-32bpp-ytile-upscaling,Fail
kms_flip_scaled_crc@flip-64bpp-ytile-to-32bpp-ytilegen12rcccs-upscaling,Fail
kms_flip_scaled_crc@flip-64bpp-ytile-to-32bpp-ytilercccs-downscaling,Fail
+kms_frontbuffer_tracking@fbc-rgb565-draw-mmap-cpu,Timeout
kms_frontbuffer_tracking@fbc-tiling-linear,Fail
kms_lease@lease-uevent,Fail
kms_plane_alpha_blend@alpha-basic,Fail
@@ -47,9 +40,13 @@ kms_plane_alpha_blend@alpha-opaque-fb,Fail
kms_plane_alpha_blend@alpha-transparent-fb,Fail
kms_plane_alpha_blend@constant-alpha-max,Fail
kms_plane_scaling@plane-scaler-with-clipping-clamping-rotation,Timeout
+kms_plane_scaling@planes-upscale-factor-0-25-downscale-factor-0-5,Timeout
kms_pm_rpm@modeset-stress-extra-wait,Timeout
kms_pm_rpm@universal-planes,Timeout
kms_pm_rpm@universal-planes-dpms,Timeout
+kms_prop_blob@invalid-set-prop,Fail
+kms_rotation_crc@primary-rotation-180,Timeout
+kms_vblank@query-forked-hang,Timeout
perf@i915-ref-count,Fail
perf_pmu@module-unload,Fail
perf_pmu@rc6,Crash
diff --git a/drivers/gpu/drm/ci/xfails/i915-whl-flakes.txt b/drivers/gpu/drm/ci/xfails/i915-whl-flakes.txt
index 1167a58c7dd1..60b8d1c64e70 100644
--- a/drivers/gpu/drm/ci/xfails/i915-whl-flakes.txt
+++ b/drivers/gpu/drm/ci/xfails/i915-whl-flakes.txt
@@ -1,6 +1,6 @@
# Board Name: dell-latitude-5400-8665U-sarien
# Bug Report: https://lore.kernel.org/intel-gfx/af4ca4df-a3ef-4943-bdbf-4c3af2c333af@collabora.com/T/#u
+# Failure Rate: 50
# IGT Version: 1.28-g0df7b9b97
# Linux Version: 6.9.0-rc7
-# Failure Rate: 50
kms_pm_rpm@modeset-lpsp-stress
diff --git a/drivers/gpu/drm/ci/xfails/i915-whl-skips.txt b/drivers/gpu/drm/ci/xfails/i915-whl-skips.txt
index 20bd91525f45..29bff8922ae1 100644
--- a/drivers/gpu/drm/ci/xfails/i915-whl-skips.txt
+++ b/drivers/gpu/drm/ci/xfails/i915-whl-skips.txt
@@ -3,9 +3,9 @@ kms_plane_scaling@invalid-parameters
# Skip driver specific tests
^amdgpu.*
-msm_.*
+^msm.*
nouveau_.*
-panfrost_.*
+^panfrost.*
^v3d.*
^vc4.*
^vmwgfx*
@@ -17,6 +17,7 @@ gem_.*
i915_pm_rc6_residency.*
i915_suspend.*
kms_flip.*
+i915_pm_rpm.*
# Kernel panic
drm_fdinfo.*
diff --git a/drivers/gpu/drm/ci/xfails/mediatek-mt8173-fails.txt b/drivers/gpu/drm/ci/xfails/mediatek-mt8173-fails.txt
index cc5e9c1c2d57..a14349a1967f 100644
--- a/drivers/gpu/drm/ci/xfails/mediatek-mt8173-fails.txt
+++ b/drivers/gpu/drm/ci/xfails/mediatek-mt8173-fails.txt
@@ -5,8 +5,15 @@ device_reset@unbind-reset-rebind,Fail
dumb_buffer@invalid-bpp,Fail
fbdev@eof,Fail
fbdev@read,Fail
-fbdev@unaligned-write,Fail
kms_3d,Fail
+kms_bw@connected-linear-tiling-1-displays-1920x1080p,Fail
+kms_bw@connected-linear-tiling-1-displays-2160x1440p,Fail
+kms_bw@connected-linear-tiling-1-displays-2560x1440p,Fail
+kms_bw@connected-linear-tiling-1-displays-3840x2160p,Fail
+kms_bw@connected-linear-tiling-2-displays-1920x1080p,Fail
+kms_bw@connected-linear-tiling-2-displays-2160x1440p,Fail
+kms_bw@connected-linear-tiling-2-displays-2560x1440p,Fail
+kms_bw@connected-linear-tiling-2-displays-3840x2160p,Fail
kms_bw@linear-tiling-1-displays-1920x1080p,Fail
kms_bw@linear-tiling-1-displays-2160x1440p,Fail
kms_bw@linear-tiling-1-displays-2560x1440p,Fail
@@ -27,4 +34,3 @@ kms_properties@get_properties-sanity-atomic,Fail
kms_properties@plane-properties-atomic,Fail
kms_properties@plane-properties-legacy,Fail
kms_rmfb@close-fd,Fail
-tools_test@tools_test,Fail
diff --git a/drivers/gpu/drm/ci/xfails/mediatek-mt8173-flakes.txt b/drivers/gpu/drm/ci/xfails/mediatek-mt8173-flakes.txt
index 395ac0463404..2e5bf6ae25f2 100644
--- a/drivers/gpu/drm/ci/xfails/mediatek-mt8173-flakes.txt
+++ b/drivers/gpu/drm/ci/xfails/mediatek-mt8173-flakes.txt
@@ -1,11 +1,41 @@
# Board Name: mt8173-elm-hana
# Bug Report: https://lore.kernel.org/linux-mediatek/0b2a1899-15dd-42fa-8f63-ea0ca28dbb17@collabora.com/T/#u
+# Failure Rate: 50
# IGT Version: 1.28-g0df7b9b97
# Linux Version: 6.9.0-rc7
-# Failure Rate: 50
core_setmaster_vs_auth
+
+# Board Name: mt8173-elm-hana
+# Bug Report: https://lore.kernel.org/linux-mediatek/0b2a1899-15dd-42fa-8f63-ea0ca28dbb17@collabora.com/T/#u
+# Failure Rate: 50
+# IGT Version: 1.28-g0df7b9b97
+# Linux Version: 6.9.0-rc7
dumb_buffer@create-clear
+
+# Board Name: mt8173-elm-hana
+# Bug Report: https://lore.kernel.org/linux-mediatek/0b2a1899-15dd-42fa-8f63-ea0ca28dbb17@collabora.com/T/#u
+# Failure Rate: 50
+# IGT Version: 1.28-g0df7b9b97
+# Linux Version: 6.9.0-rc7
fbdev@unaligned-write
+
+# Board Name: mt8173-elm-hana
+# Bug Report: https://lore.kernel.org/linux-mediatek/0b2a1899-15dd-42fa-8f63-ea0ca28dbb17@collabora.com/T/#u
+# Failure Rate: 50
+# IGT Version: 1.28-g0df7b9b97
+# Linux Version: 6.9.0-rc7
fbdev@write
+
+# Board Name: mt8173-elm-hana
+# Bug Report: https://lore.kernel.org/linux-mediatek/0b2a1899-15dd-42fa-8f63-ea0ca28dbb17@collabora.com/T/#u
+# Failure Rate: 50
+# IGT Version: 1.28-g0df7b9b97
+# Linux Version: 6.9.0-rc7
kms_cursor_legacy@cursor-vs-flip-atomic-transitions
+
+# Board Name: mt8173-elm-hana
+# Bug Report: https://lore.kernel.org/linux-mediatek/0b2a1899-15dd-42fa-8f63-ea0ca28dbb17@collabora.com/T/#u
+# Failure Rate: 50
+# IGT Version: 1.28-g0df7b9b97
+# Linux Version: 6.9.0-rc7
kms_prop_blob@invalid-set-prop
diff --git a/drivers/gpu/drm/ci/xfails/mediatek-mt8173-skips.txt b/drivers/gpu/drm/ci/xfails/mediatek-mt8173-skips.txt
index 0c6108392140..8198e06344a3 100644
--- a/drivers/gpu/drm/ci/xfails/mediatek-mt8173-skips.txt
+++ b/drivers/gpu/drm/ci/xfails/mediatek-mt8173-skips.txt
@@ -1,8 +1,8 @@
# Skip driver specific tests
^amdgpu.*
-msm_.*
+^msm.*
nouveau_.*
-panfrost_.*
+^panfrost.*
^v3d.*
^vc4.*
^vmwgfx*
@@ -10,6 +10,7 @@ panfrost_.*
# Skip intel specific tests
gem_.*
i915_.*
+tools_test.*
# Currently fails and causes coverage loss for other tests
# since core_getversion also fails.
diff --git a/drivers/gpu/drm/ci/xfails/mediatek-mt8183-fails.txt b/drivers/gpu/drm/ci/xfails/mediatek-mt8183-fails.txt
index 9ef460646d76..8cb2cb67853d 100644
--- a/drivers/gpu/drm/ci/xfails/mediatek-mt8183-fails.txt
+++ b/drivers/gpu/drm/ci/xfails/mediatek-mt8183-fails.txt
@@ -1,8 +1,22 @@
-dumb_buffer@create-clear,Fail
-dumb_buffer@create-valid-dumb,Fail
+core_setmaster@master-drop-set-shared-fd,Fail
+device_reset@cold-reset-bound,Fail
+device_reset@reset-bound,Fail
+device_reset@unbind-cold-reset-rebind,Fail
+device_reset@unbind-reset-rebind,Fail
+dumb_buffer@create-clear,Crash
dumb_buffer@invalid-bpp,Fail
-dumb_buffer@map-invalid-size,Fail
-dumb_buffer@map-uaf,Fail
-dumb_buffer@map-valid,Fail
-panfrost_prime@gem-prime-import,Fail
-tools_test@tools_test,Fail
+fbdev@eof,Fail
+fbdev@pan,Fail
+fbdev@read,Fail
+fbdev@unaligned-read,Fail
+kms_bw@connected-linear-tiling-1-displays-1920x1080p,Fail
+kms_bw@connected-linear-tiling-1-displays-2160x1440p,Fail
+kms_bw@connected-linear-tiling-1-displays-2560x1440p,Fail
+kms_bw@linear-tiling-1-displays-1920x1080p,Fail
+kms_bw@linear-tiling-1-displays-3840x2160p,Fail
+kms_color@invalid-gamma-lut-sizes,Fail
+kms_flip@flip-vs-panning-vs-hang,Fail
+kms_flip@flip-vs-suspend,Fail
+kms_lease@lease-uevent,Fail
+kms_properties@plane-properties-atomic,Fail
+kms_rmfb@close-fd,Fail
diff --git a/drivers/gpu/drm/ci/xfails/mediatek-mt8183-flakes.txt b/drivers/gpu/drm/ci/xfails/mediatek-mt8183-flakes.txt
new file mode 100644
index 000000000000..df7e5ce7a036
--- /dev/null
+++ b/drivers/gpu/drm/ci/xfails/mediatek-mt8183-flakes.txt
@@ -0,0 +1,20 @@
+# Board Name: mt8183-kukui-jacuzzi-juniper-sku16
+# Bug Report: https://lore.kernel.org/linux-mediatek/0b2a1899-15dd-42fa-8f63-ea0ca28dbb17@collabora.com/T/#u
+# Failure Rate: 100
+# IGT Version: 1.28-gf13702b8e
+# Linux Version: 6.10.0-rc5
+kms_bw@linear-tiling-1-displays-2560x1440p
+
+# Board Name: mt8183-kukui-jacuzzi-juniper-sku16
+# Bug Report: https://lore.kernel.org/linux-mediatek/0b2a1899-15dd-42fa-8f63-ea0ca28dbb17@collabora.com/T/#u
+# Failure Rate: 100
+# IGT Version: 1.28-gf13702b8e
+# Linux Version: 6.10.0-rc5
+kms_cursor_legacy@cursor-vs-flip-atomic-transitions
+
+# Board Name: mt8183-kukui-jacuzzi-juniper-sku16
+# Bug Report: https://lore.kernel.org/linux-mediatek/0b2a1899-15dd-42fa-8f63-ea0ca28dbb17@collabora.com/T/#u
+# Failure Rate: 100
+# IGT Version: 1.28-gf13702b8e
+# Linux Version: 6.10.0-rc5
+fbdev@write
diff --git a/drivers/gpu/drm/ci/xfails/mediatek-mt8183-skips.txt b/drivers/gpu/drm/ci/xfails/mediatek-mt8183-skips.txt
index 715b9a8f4997..8198e06344a3 100644
--- a/drivers/gpu/drm/ci/xfails/mediatek-mt8183-skips.txt
+++ b/drivers/gpu/drm/ci/xfails/mediatek-mt8183-skips.txt
@@ -1,7 +1,8 @@
# Skip driver specific tests
^amdgpu.*
-msm_.*
+^msm.*
nouveau_.*
+^panfrost.*
^v3d.*
^vc4.*
^vmwgfx*
@@ -9,9 +10,7 @@ nouveau_.*
# Skip intel specific tests
gem_.*
i915_.*
-
-# Panfrost is not a KMS driver, so skip the KMS tests
-kms_.*
+tools_test.*
# Currently fails and causes coverage loss for other tests
# since core_getversion also fails.
diff --git a/drivers/gpu/drm/ci/xfails/meson-g12b-fails.txt b/drivers/gpu/drm/ci/xfails/meson-g12b-fails.txt
index 9ef460646d76..328967d3e23d 100644
--- a/drivers/gpu/drm/ci/xfails/meson-g12b-fails.txt
+++ b/drivers/gpu/drm/ci/xfails/meson-g12b-fails.txt
@@ -1,8 +1,13 @@
-dumb_buffer@create-clear,Fail
-dumb_buffer@create-valid-dumb,Fail
dumb_buffer@invalid-bpp,Fail
-dumb_buffer@map-invalid-size,Fail
-dumb_buffer@map-uaf,Fail
-dumb_buffer@map-valid,Fail
-panfrost_prime@gem-prime-import,Fail
-tools_test@tools_test,Fail
+kms_3d,Fail
+kms_cursor_legacy@forked-bo,Fail
+kms_cursor_legacy@forked-move,Fail
+kms_cursor_legacy@single-bo,Fail
+kms_cursor_legacy@single-move,Fail
+kms_cursor_legacy@torture-bo,Fail
+kms_cursor_legacy@torture-move,Fail
+kms_lease@lease-uevent,Fail
+kms_properties@connector-properties-atomic,Fail
+kms_properties@connector-properties-legacy,Fail
+kms_properties@get_properties-sanity-atomic,Fail
+kms_properties@get_properties-sanity-non-atomic,Fail
diff --git a/drivers/gpu/drm/ci/xfails/meson-g12b-skips.txt b/drivers/gpu/drm/ci/xfails/meson-g12b-skips.txt
index 715b9a8f4997..8198e06344a3 100644
--- a/drivers/gpu/drm/ci/xfails/meson-g12b-skips.txt
+++ b/drivers/gpu/drm/ci/xfails/meson-g12b-skips.txt
@@ -1,7 +1,8 @@
# Skip driver specific tests
^amdgpu.*
-msm_.*
+^msm.*
nouveau_.*
+^panfrost.*
^v3d.*
^vc4.*
^vmwgfx*
@@ -9,9 +10,7 @@ nouveau_.*
# Skip intel specific tests
gem_.*
i915_.*
-
-# Panfrost is not a KMS driver, so skip the KMS tests
-kms_.*
+tools_test.*
# Currently fails and causes coverage loss for other tests
# since core_getversion also fails.
diff --git a/drivers/gpu/drm/ci/xfails/msm-apq8016-fails.txt b/drivers/gpu/drm/ci/xfails/msm-apq8016-fails.txt
index 6e7fd1ccd1e3..4ac46168eff3 100644
--- a/drivers/gpu/drm/ci/xfails/msm-apq8016-fails.txt
+++ b/drivers/gpu/drm/ci/xfails/msm-apq8016-fails.txt
@@ -4,12 +4,8 @@ device_reset@unbind-cold-reset-rebind,Fail
device_reset@unbind-reset-rebind,Fail
dumb_buffer@invalid-bpp,Fail
kms_3d,Fail
-kms_cursor_legacy@forked-move,Fail
-kms_cursor_legacy@single-bo,Fail
kms_cursor_legacy@torture-bo,Fail
-kms_cursor_legacy@torture-move,Fail
kms_force_connector_basic@force-edid,Fail
kms_hdmi_inject@inject-4k,Fail
kms_lease@lease-uevent,Fail
-msm_mapping@ring,Fail
-tools_test@tools_test,Fail
+msm/msm_mapping@ring,Fail
diff --git a/drivers/gpu/drm/ci/xfails/msm-apq8016-skips.txt b/drivers/gpu/drm/ci/xfails/msm-apq8016-skips.txt
index ff12202abb6e..1674c8e214d6 100644
--- a/drivers/gpu/drm/ci/xfails/msm-apq8016-skips.txt
+++ b/drivers/gpu/drm/ci/xfails/msm-apq8016-skips.txt
@@ -1,7 +1,7 @@
# Skip driver specific tests
^amdgpu.*
nouveau_.*
-panfrost_.*
+^panfrost.*
^v3d.*
^vc4.*
^vmwgfx*
@@ -9,6 +9,7 @@ panfrost_.*
# Skip intel specific tests
gem_.*
i915_.*
+tools_test.*
# Currently fails and causes coverage loss for other tests
# since core_getversion also fails.
diff --git a/drivers/gpu/drm/ci/xfails/msm-apq8096-fails.txt b/drivers/gpu/drm/ci/xfails/msm-apq8096-fails.txt
index 46ca69ce2ffe..bd0653caf7a0 100644
--- a/drivers/gpu/drm/ci/xfails/msm-apq8096-fails.txt
+++ b/drivers/gpu/drm/ci/xfails/msm-apq8096-fails.txt
@@ -5,4 +5,3 @@ device_reset@unbind-reset-rebind,Fail
dumb_buffer@invalid-bpp,Fail
kms_3d,Fail
kms_lease@lease-uevent,Fail
-tools_test@tools_test,Fail
diff --git a/drivers/gpu/drm/ci/xfails/msm-apq8096-flakes.txt b/drivers/gpu/drm/ci/xfails/msm-apq8096-flakes.txt
index a275584c8bbb..123d92cb4470 100644
--- a/drivers/gpu/drm/ci/xfails/msm-apq8096-flakes.txt
+++ b/drivers/gpu/drm/ci/xfails/msm-apq8096-flakes.txt
@@ -1,6 +1,6 @@
# Board Name: apq8096-db820c
# Bug Report: https://lore.kernel.org/linux-arm-msm/661483c8-ad82-400d-bcd8-e94986d20d7d@collabora.com/T/#u
+# Failure Rate: 50
# IGT Version: 1.28-g0df7b9b97
# Linux Version: 6.9.0-rc7
-# Failure Rate: 50
dumb_buffer@create-clear
diff --git a/drivers/gpu/drm/ci/xfails/msm-apq8096-skips.txt b/drivers/gpu/drm/ci/xfails/msm-apq8096-skips.txt
index 1c45fc6c512d..5550be5486ed 100644
--- a/drivers/gpu/drm/ci/xfails/msm-apq8096-skips.txt
+++ b/drivers/gpu/drm/ci/xfails/msm-apq8096-skips.txt
@@ -4,7 +4,7 @@ kms_cursor_legacy@all-pipes-torture-move
# Skip driver specific tests
^amdgpu.*
nouveau_.*
-panfrost_.*
+^panfrost.*
^v3d.*
^vc4.*
^vmwgfx*
@@ -12,6 +12,7 @@ panfrost_.*
# Skip intel specific tests
gem_.*
i915_.*
+tools_test.*
# Currently fails and causes coverage loss for other tests
# since core_getversion also fails.
@@ -23,4 +24,4 @@ core_hotunplug.*
# *** gpu fault: ttbr0=00000001030ea000 iova=0000000001074000 dir=WRITE type=PERMISSION source=1f030000 (0,0,0,0)
# msm_mdp 901000.display-controller: RBBM | ME master split | status=0x701000B0
# watchdog: BUG: soft lockup - CPU#0 stuck for 26s! [kworker/u16:3:46]
-msm_mapping@shadow
+msm/msm_mapping@shadow
diff --git a/drivers/gpu/drm/ci/xfails/msm-sc7180-trogdor-kingoftown-fails.txt b/drivers/gpu/drm/ci/xfails/msm-sc7180-trogdor-kingoftown-fails.txt
index eb7a3886d397..d42004cd6977 100644
--- a/drivers/gpu/drm/ci/xfails/msm-sc7180-trogdor-kingoftown-fails.txt
+++ b/drivers/gpu/drm/ci/xfails/msm-sc7180-trogdor-kingoftown-fails.txt
@@ -3,13 +3,11 @@ device_reset@reset-bound,Fail
device_reset@unbind-cold-reset-rebind,Fail
device_reset@unbind-reset-rebind,Fail
dumb_buffer@invalid-bpp,Fail
-kms_atomic_transition@plane-primary-toggle-with-vblank-wait,Fail
kms_color@ctm-0-25,Fail
kms_color@ctm-0-50,Fail
kms_color@ctm-0-75,Fail
kms_color@ctm-blue-to-red,Fail
kms_color@ctm-green-to-red,Fail
-kms_color@ctm-max,Fail
kms_color@ctm-negative,Fail
kms_color@ctm-red-to-blue,Fail
kms_color@ctm-signed,Fail
@@ -21,72 +19,6 @@ kms_content_protection@lic-type-1,Crash
kms_content_protection@srm,Crash
kms_content_protection@type1,Crash
kms_content_protection@uevent,Crash
-kms_cursor_crc@cursor-alpha-opaque,Fail
-kms_cursor_crc@cursor-alpha-transparent,Fail
-kms_cursor_crc@cursor-dpms,Fail
-kms_cursor_crc@cursor-offscreen-128x128,Fail
-kms_cursor_crc@cursor-offscreen-128x42,Fail
-kms_cursor_crc@cursor-offscreen-256x256,Fail
-kms_cursor_crc@cursor-offscreen-256x85,Fail
-kms_cursor_crc@cursor-offscreen-32x10,Fail
-kms_cursor_crc@cursor-offscreen-32x32,Fail
-kms_cursor_crc@cursor-offscreen-512x170,Fail
-kms_cursor_crc@cursor-offscreen-512x512,Fail
-kms_cursor_crc@cursor-offscreen-64x21,Fail
-kms_cursor_crc@cursor-offscreen-64x64,Fail
-kms_cursor_crc@cursor-onscreen-128x128,Fail
-kms_cursor_crc@cursor-onscreen-128x42,Fail
-kms_cursor_crc@cursor-onscreen-256x256,Fail
-kms_cursor_crc@cursor-onscreen-256x85,Fail
-kms_cursor_crc@cursor-onscreen-32x10,Fail
-kms_cursor_crc@cursor-onscreen-32x32,Fail
-kms_cursor_crc@cursor-onscreen-512x170,Fail
-kms_cursor_crc@cursor-onscreen-512x512,Fail
-kms_cursor_crc@cursor-onscreen-64x21,Fail
-kms_cursor_crc@cursor-onscreen-64x64,Fail
-kms_cursor_crc@cursor-random-128x128,Fail
-kms_cursor_crc@cursor-random-128x42,Fail
-kms_cursor_crc@cursor-random-256x256,Fail
-kms_cursor_crc@cursor-random-256x85,Fail
-kms_cursor_crc@cursor-random-32x10,Fail
-kms_cursor_crc@cursor-random-32x32,Fail
-kms_cursor_crc@cursor-random-512x170,Fail
-kms_cursor_crc@cursor-random-512x512,Fail
-kms_cursor_crc@cursor-random-64x21,Fail
-kms_cursor_crc@cursor-random-64x64,Fail
-kms_cursor_crc@cursor-rapid-movement-128x128,Fail
-kms_cursor_crc@cursor-rapid-movement-128x42,Fail
-kms_cursor_crc@cursor-rapid-movement-256x256,Fail
-kms_cursor_crc@cursor-rapid-movement-256x85,Fail
-kms_cursor_crc@cursor-rapid-movement-32x10,Fail
-kms_cursor_crc@cursor-rapid-movement-32x32,Fail
-kms_cursor_crc@cursor-rapid-movement-512x170,Fail
-kms_cursor_crc@cursor-rapid-movement-512x512,Fail
-kms_cursor_crc@cursor-rapid-movement-64x21,Fail
-kms_cursor_crc@cursor-rapid-movement-64x64,Fail
-kms_cursor_crc@cursor-size-change,Fail
-kms_cursor_crc@cursor-sliding-128x128,Fail
-kms_cursor_crc@cursor-sliding-128x42,Fail
-kms_cursor_crc@cursor-sliding-256x256,Fail
-kms_cursor_crc@cursor-sliding-256x85,Fail
-kms_cursor_crc@cursor-sliding-32x10,Fail
-kms_cursor_crc@cursor-sliding-32x32,Fail
-kms_cursor_crc@cursor-sliding-512x170,Fail
-kms_cursor_crc@cursor-sliding-512x512,Fail
-kms_cursor_crc@cursor-sliding-64x21,Fail
-kms_cursor_crc@cursor-sliding-64x64,Fail
-kms_cursor_edge_walk@128x128-left-edge,Fail
-kms_cursor_edge_walk@128x128-right-edge,Fail
-kms_cursor_edge_walk@128x128-top-bottom,Fail
-kms_cursor_edge_walk@128x128-top-edge,Fail
-kms_cursor_edge_walk@256x256-left-edge,Fail
-kms_cursor_edge_walk@256x256-right-edge,Fail
-kms_cursor_edge_walk@256x256-top-bottom,Fail
-kms_cursor_edge_walk@256x256-top-edge,Fail
-kms_cursor_edge_walk@64x64-left-edge,Fail
-kms_cursor_edge_walk@64x64-right-edge,Fail
-kms_cursor_edge_walk@64x64-top-bottom,Fail
-kms_cursor_edge_walk@64x64-top-edge,Fail
kms_cursor_legacy@2x-cursor-vs-flip-atomic,Fail
kms_cursor_legacy@2x-cursor-vs-flip-legacy,Fail
kms_cursor_legacy@2x-flip-vs-cursor-atomic,Fail
@@ -100,92 +32,14 @@ kms_cursor_legacy@cursor-vs-flip-varying-size,Fail
kms_display_modes@extended-mode-basic,Fail
kms_flip@2x-flip-vs-modeset-vs-hang,Fail
kms_flip@2x-flip-vs-panning-vs-hang,Fail
-kms_flip@absolute-wf_vblank,Fail
-kms_flip@absolute-wf_vblank-interruptible,Fail
-kms_flip@basic-flip-vs-wf_vblank,Fail
-kms_flip@basic-plain-flip,Fail
-kms_flip@blocking-absolute-wf_vblank,Fail
-kms_flip@blocking-absolute-wf_vblank-interruptible,Fail
-kms_flip@blocking-wf_vblank,Fail
-kms_flip@busy-flip,Fail
-kms_flip@dpms-off-confusion,Fail
-kms_flip@dpms-off-confusion-interruptible,Fail
-kms_flip@dpms-vs-vblank-race,Fail
-kms_flip@dpms-vs-vblank-race-interruptible,Fail
-kms_flip@flip-vs-absolute-wf_vblank,Fail
-kms_flip@flip-vs-absolute-wf_vblank-interruptible,Fail
-kms_flip@flip-vs-blocking-wf-vblank,Fail
-kms_flip@flip-vs-expired-vblank,Fail
-kms_flip@flip-vs-expired-vblank-interruptible,Fail
kms_flip@flip-vs-modeset-vs-hang,Fail
-kms_flip@flip-vs-panning,Fail
-kms_flip@flip-vs-panning-interruptible,Fail
kms_flip@flip-vs-panning-vs-hang,Fail
-kms_flip@flip-vs-rmfb,Fail
-kms_flip@flip-vs-rmfb-interruptible,Fail
-kms_flip@flip-vs-wf_vblank-interruptible,Fail
-kms_flip@modeset-vs-vblank-race,Fail
-kms_flip@modeset-vs-vblank-race-interruptible,Fail
-kms_flip@plain-flip-fb-recreate,Fail
-kms_flip@plain-flip-fb-recreate-interruptible,Fail
-kms_flip@plain-flip-interruptible,Fail
-kms_flip@plain-flip-ts-check,Fail
-kms_flip@plain-flip-ts-check-interruptible,Fail
-kms_flip@wf_vblank-ts-check,Fail
-kms_flip@wf_vblank-ts-check-interruptible,Fail
-kms_lease@cursor-implicit-plane,Fail
kms_lease@lease-uevent,Fail
-kms_lease@page-flip-implicit-plane,Fail
-kms_lease@setcrtc-implicit-plane,Fail
-kms_lease@simple-lease,Fail
kms_multipipe_modeset@basic-max-pipe-crc-check,Fail
kms_pipe_crc_basic@compare-crc-sanitycheck-nv12,Fail
-kms_pipe_crc_basic@compare-crc-sanitycheck-xr24,Fail
-kms_pipe_crc_basic@disable-crc-after-crtc,Fail
-kms_pipe_crc_basic@nonblocking-crc,Fail
-kms_pipe_crc_basic@nonblocking-crc-frame-sequence,Fail
-kms_pipe_crc_basic@read-crc,Fail
-kms_pipe_crc_basic@read-crc-frame-sequence,Fail
-kms_plane@pixel-format,Fail
-kms_plane@pixel-format-source-clamping,Fail
-kms_plane@plane-panning-bottom-right,Fail
-kms_plane@plane-panning-top-left,Fail
-kms_plane@plane-position-covered,Fail
-kms_plane@plane-position-hole,Fail
-kms_plane@plane-position-hole-dpms,Fail
kms_plane_alpha_blend@alpha-7efc,Fail
-kms_plane_alpha_blend@alpha-basic,Fail
-kms_plane_alpha_blend@alpha-opaque-fb,Fail
-kms_plane_alpha_blend@alpha-transparent-fb,Fail
-kms_plane_alpha_blend@constant-alpha-max,Fail
-kms_plane_alpha_blend@constant-alpha-mid,Fail
-kms_plane_alpha_blend@constant-alpha-min,Fail
kms_plane_alpha_blend@coverage-7efc,Fail
kms_plane_alpha_blend@coverage-vs-premult-vs-constant,Fail
-kms_plane_cursor@primary,Fail
kms_plane_lowres@tiling-none,Fail
-kms_plane_multiple@tiling-none,Fail
kms_rmfb@close-fd,Fail
-kms_rotation_crc@cursor-rotation-180,Fail
-kms_rotation_crc@primary-rotation-180,Fail
-kms_sequence@get-busy,Fail
-kms_sequence@get-forked,Fail
-kms_sequence@get-forked-busy,Fail
-kms_sequence@get-idle,Fail
-kms_sequence@queue-busy,Fail
-kms_sequence@queue-idle,Fail
-kms_vblank@accuracy-idle,Fail
-kms_vblank@crtc-id,Fail
-kms_vblank@query-busy,Fail
-kms_vblank@query-forked,Fail
-kms_vblank@query-forked-busy,Fail
-kms_vblank@query-idle,Fail
kms_vblank@ts-continuation-dpms-rpm,Fail
-kms_vblank@ts-continuation-idle,Fail
-kms_vblank@ts-continuation-modeset,Fail
-kms_vblank@ts-continuation-modeset-rpm,Fail
-kms_vblank@wait-busy,Fail
-kms_vblank@wait-forked,Fail
-kms_vblank@wait-forked-busy,Fail
-kms_vblank@wait-idle,Fail
-tools_test@tools_test,Fail
diff --git a/drivers/gpu/drm/ci/xfails/msm-sc7180-trogdor-kingoftown-flakes.txt b/drivers/gpu/drm/ci/xfails/msm-sc7180-trogdor-kingoftown-flakes.txt
index 6dec63d48cfb..d74e04405e65 100644
--- a/drivers/gpu/drm/ci/xfails/msm-sc7180-trogdor-kingoftown-flakes.txt
+++ b/drivers/gpu/drm/ci/xfails/msm-sc7180-trogdor-kingoftown-flakes.txt
@@ -1,8 +1,20 @@
# Board Name: sc7180-trogdor-kingoftown
# Bug Report: https://lore.kernel.org/linux-arm-msm/661483c8-ad82-400d-bcd8-e94986d20d7d@collabora.com/T/#u
+# Failure Rate: 50
+# IGT Version: 1.28-g0df7b9b97
+# Linux Version: 6.9.0-rc7
+msm/msm_mapping@shadow
+
+# Board Name: sc7180-trogdor-kingoftown
+# Bug Report: https://lore.kernel.org/linux-arm-msm/661483c8-ad82-400d-bcd8-e94986d20d7d@collabora.com/T/#u
+# Failure Rate: 50
# IGT Version: 1.28-g0df7b9b97
# Linux Version: 6.9.0-rc7
+msm/msm_shrink@copy-gpu-oom-32
+
+# Board Name: sc7180-trogdor-kingoftown
+# Bug Report: https://lore.kernel.org/linux-arm-msm/661483c8-ad82-400d-bcd8-e94986d20d7d@collabora.com/T/#u
# Failure Rate: 50
-msm_mapping@shadow
-msm_shrink@copy-gpu-oom-32
-msm_shrink@copy-gpu-oom-8
+# IGT Version: 1.28-g0df7b9b97
+# Linux Version: 6.9.0-rc7
+msm/msm_shrink@copy-gpu-oom-8
diff --git a/drivers/gpu/drm/ci/xfails/msm-sc7180-trogdor-kingoftown-skips.txt b/drivers/gpu/drm/ci/xfails/msm-sc7180-trogdor-kingoftown-skips.txt
index 68c96005ba54..c2833eee1c4b 100644
--- a/drivers/gpu/drm/ci/xfails/msm-sc7180-trogdor-kingoftown-skips.txt
+++ b/drivers/gpu/drm/ci/xfails/msm-sc7180-trogdor-kingoftown-skips.txt
@@ -4,7 +4,7 @@
# Skip driver specific tests
^amdgpu.*
nouveau_.*
-panfrost_.*
+^panfrost.*
^v3d.*
^vc4.*
^vmwgfx*
@@ -12,6 +12,7 @@ panfrost_.*
# Skip intel specific tests
gem_.*
i915_.*
+tools_test.*
# Currently fails and causes coverage loss for other tests
# since core_getversion also fails.
@@ -19,3 +20,6 @@ core_hotunplug.*
# Timeout occurs
kms_flip@2x-wf_vblank-ts-check
+
+# Hangs the machine
+kms_cursor_crc@cursor-random-max-size
diff --git a/drivers/gpu/drm/ci/xfails/msm-sc7180-trogdor-lazor-limozeen-fails.txt b/drivers/gpu/drm/ci/xfails/msm-sc7180-trogdor-lazor-limozeen-fails.txt
index eb7a3886d397..d42004cd6977 100644
--- a/drivers/gpu/drm/ci/xfails/msm-sc7180-trogdor-lazor-limozeen-fails.txt
+++ b/drivers/gpu/drm/ci/xfails/msm-sc7180-trogdor-lazor-limozeen-fails.txt
@@ -3,13 +3,11 @@ device_reset@reset-bound,Fail
device_reset@unbind-cold-reset-rebind,Fail
device_reset@unbind-reset-rebind,Fail
dumb_buffer@invalid-bpp,Fail
-kms_atomic_transition@plane-primary-toggle-with-vblank-wait,Fail
kms_color@ctm-0-25,Fail
kms_color@ctm-0-50,Fail
kms_color@ctm-0-75,Fail
kms_color@ctm-blue-to-red,Fail
kms_color@ctm-green-to-red,Fail
-kms_color@ctm-max,Fail
kms_color@ctm-negative,Fail
kms_color@ctm-red-to-blue,Fail
kms_color@ctm-signed,Fail
@@ -21,72 +19,6 @@ kms_content_protection@lic-type-1,Crash
kms_content_protection@srm,Crash
kms_content_protection@type1,Crash
kms_content_protection@uevent,Crash
-kms_cursor_crc@cursor-alpha-opaque,Fail
-kms_cursor_crc@cursor-alpha-transparent,Fail
-kms_cursor_crc@cursor-dpms,Fail
-kms_cursor_crc@cursor-offscreen-128x128,Fail
-kms_cursor_crc@cursor-offscreen-128x42,Fail
-kms_cursor_crc@cursor-offscreen-256x256,Fail
-kms_cursor_crc@cursor-offscreen-256x85,Fail
-kms_cursor_crc@cursor-offscreen-32x10,Fail
-kms_cursor_crc@cursor-offscreen-32x32,Fail
-kms_cursor_crc@cursor-offscreen-512x170,Fail
-kms_cursor_crc@cursor-offscreen-512x512,Fail
-kms_cursor_crc@cursor-offscreen-64x21,Fail
-kms_cursor_crc@cursor-offscreen-64x64,Fail
-kms_cursor_crc@cursor-onscreen-128x128,Fail
-kms_cursor_crc@cursor-onscreen-128x42,Fail
-kms_cursor_crc@cursor-onscreen-256x256,Fail
-kms_cursor_crc@cursor-onscreen-256x85,Fail
-kms_cursor_crc@cursor-onscreen-32x10,Fail
-kms_cursor_crc@cursor-onscreen-32x32,Fail
-kms_cursor_crc@cursor-onscreen-512x170,Fail
-kms_cursor_crc@cursor-onscreen-512x512,Fail
-kms_cursor_crc@cursor-onscreen-64x21,Fail
-kms_cursor_crc@cursor-onscreen-64x64,Fail
-kms_cursor_crc@cursor-random-128x128,Fail
-kms_cursor_crc@cursor-random-128x42,Fail
-kms_cursor_crc@cursor-random-256x256,Fail
-kms_cursor_crc@cursor-random-256x85,Fail
-kms_cursor_crc@cursor-random-32x10,Fail
-kms_cursor_crc@cursor-random-32x32,Fail
-kms_cursor_crc@cursor-random-512x170,Fail
-kms_cursor_crc@cursor-random-512x512,Fail
-kms_cursor_crc@cursor-random-64x21,Fail
-kms_cursor_crc@cursor-random-64x64,Fail
-kms_cursor_crc@cursor-rapid-movement-128x128,Fail
-kms_cursor_crc@cursor-rapid-movement-128x42,Fail
-kms_cursor_crc@cursor-rapid-movement-256x256,Fail
-kms_cursor_crc@cursor-rapid-movement-256x85,Fail
-kms_cursor_crc@cursor-rapid-movement-32x10,Fail
-kms_cursor_crc@cursor-rapid-movement-32x32,Fail
-kms_cursor_crc@cursor-rapid-movement-512x170,Fail
-kms_cursor_crc@cursor-rapid-movement-512x512,Fail
-kms_cursor_crc@cursor-rapid-movement-64x21,Fail
-kms_cursor_crc@cursor-rapid-movement-64x64,Fail
-kms_cursor_crc@cursor-size-change,Fail
-kms_cursor_crc@cursor-sliding-128x128,Fail
-kms_cursor_crc@cursor-sliding-128x42,Fail
-kms_cursor_crc@cursor-sliding-256x256,Fail
-kms_cursor_crc@cursor-sliding-256x85,Fail
-kms_cursor_crc@cursor-sliding-32x10,Fail
-kms_cursor_crc@cursor-sliding-32x32,Fail
-kms_cursor_crc@cursor-sliding-512x170,Fail
-kms_cursor_crc@cursor-sliding-512x512,Fail
-kms_cursor_crc@cursor-sliding-64x21,Fail
-kms_cursor_crc@cursor-sliding-64x64,Fail
-kms_cursor_edge_walk@128x128-left-edge,Fail
-kms_cursor_edge_walk@128x128-right-edge,Fail
-kms_cursor_edge_walk@128x128-top-bottom,Fail
-kms_cursor_edge_walk@128x128-top-edge,Fail
-kms_cursor_edge_walk@256x256-left-edge,Fail
-kms_cursor_edge_walk@256x256-right-edge,Fail
-kms_cursor_edge_walk@256x256-top-bottom,Fail
-kms_cursor_edge_walk@256x256-top-edge,Fail
-kms_cursor_edge_walk@64x64-left-edge,Fail
-kms_cursor_edge_walk@64x64-right-edge,Fail
-kms_cursor_edge_walk@64x64-top-bottom,Fail
-kms_cursor_edge_walk@64x64-top-edge,Fail
kms_cursor_legacy@2x-cursor-vs-flip-atomic,Fail
kms_cursor_legacy@2x-cursor-vs-flip-legacy,Fail
kms_cursor_legacy@2x-flip-vs-cursor-atomic,Fail
@@ -100,92 +32,14 @@ kms_cursor_legacy@cursor-vs-flip-varying-size,Fail
kms_display_modes@extended-mode-basic,Fail
kms_flip@2x-flip-vs-modeset-vs-hang,Fail
kms_flip@2x-flip-vs-panning-vs-hang,Fail
-kms_flip@absolute-wf_vblank,Fail
-kms_flip@absolute-wf_vblank-interruptible,Fail
-kms_flip@basic-flip-vs-wf_vblank,Fail
-kms_flip@basic-plain-flip,Fail
-kms_flip@blocking-absolute-wf_vblank,Fail
-kms_flip@blocking-absolute-wf_vblank-interruptible,Fail
-kms_flip@blocking-wf_vblank,Fail
-kms_flip@busy-flip,Fail
-kms_flip@dpms-off-confusion,Fail
-kms_flip@dpms-off-confusion-interruptible,Fail
-kms_flip@dpms-vs-vblank-race,Fail
-kms_flip@dpms-vs-vblank-race-interruptible,Fail
-kms_flip@flip-vs-absolute-wf_vblank,Fail
-kms_flip@flip-vs-absolute-wf_vblank-interruptible,Fail
-kms_flip@flip-vs-blocking-wf-vblank,Fail
-kms_flip@flip-vs-expired-vblank,Fail
-kms_flip@flip-vs-expired-vblank-interruptible,Fail
kms_flip@flip-vs-modeset-vs-hang,Fail
-kms_flip@flip-vs-panning,Fail
-kms_flip@flip-vs-panning-interruptible,Fail
kms_flip@flip-vs-panning-vs-hang,Fail
-kms_flip@flip-vs-rmfb,Fail
-kms_flip@flip-vs-rmfb-interruptible,Fail
-kms_flip@flip-vs-wf_vblank-interruptible,Fail
-kms_flip@modeset-vs-vblank-race,Fail
-kms_flip@modeset-vs-vblank-race-interruptible,Fail
-kms_flip@plain-flip-fb-recreate,Fail
-kms_flip@plain-flip-fb-recreate-interruptible,Fail
-kms_flip@plain-flip-interruptible,Fail
-kms_flip@plain-flip-ts-check,Fail
-kms_flip@plain-flip-ts-check-interruptible,Fail
-kms_flip@wf_vblank-ts-check,Fail
-kms_flip@wf_vblank-ts-check-interruptible,Fail
-kms_lease@cursor-implicit-plane,Fail
kms_lease@lease-uevent,Fail
-kms_lease@page-flip-implicit-plane,Fail
-kms_lease@setcrtc-implicit-plane,Fail
-kms_lease@simple-lease,Fail
kms_multipipe_modeset@basic-max-pipe-crc-check,Fail
kms_pipe_crc_basic@compare-crc-sanitycheck-nv12,Fail
-kms_pipe_crc_basic@compare-crc-sanitycheck-xr24,Fail
-kms_pipe_crc_basic@disable-crc-after-crtc,Fail
-kms_pipe_crc_basic@nonblocking-crc,Fail
-kms_pipe_crc_basic@nonblocking-crc-frame-sequence,Fail
-kms_pipe_crc_basic@read-crc,Fail
-kms_pipe_crc_basic@read-crc-frame-sequence,Fail
-kms_plane@pixel-format,Fail
-kms_plane@pixel-format-source-clamping,Fail
-kms_plane@plane-panning-bottom-right,Fail
-kms_plane@plane-panning-top-left,Fail
-kms_plane@plane-position-covered,Fail
-kms_plane@plane-position-hole,Fail
-kms_plane@plane-position-hole-dpms,Fail
kms_plane_alpha_blend@alpha-7efc,Fail
-kms_plane_alpha_blend@alpha-basic,Fail
-kms_plane_alpha_blend@alpha-opaque-fb,Fail
-kms_plane_alpha_blend@alpha-transparent-fb,Fail
-kms_plane_alpha_blend@constant-alpha-max,Fail
-kms_plane_alpha_blend@constant-alpha-mid,Fail
-kms_plane_alpha_blend@constant-alpha-min,Fail
kms_plane_alpha_blend@coverage-7efc,Fail
kms_plane_alpha_blend@coverage-vs-premult-vs-constant,Fail
-kms_plane_cursor@primary,Fail
kms_plane_lowres@tiling-none,Fail
-kms_plane_multiple@tiling-none,Fail
kms_rmfb@close-fd,Fail
-kms_rotation_crc@cursor-rotation-180,Fail
-kms_rotation_crc@primary-rotation-180,Fail
-kms_sequence@get-busy,Fail
-kms_sequence@get-forked,Fail
-kms_sequence@get-forked-busy,Fail
-kms_sequence@get-idle,Fail
-kms_sequence@queue-busy,Fail
-kms_sequence@queue-idle,Fail
-kms_vblank@accuracy-idle,Fail
-kms_vblank@crtc-id,Fail
-kms_vblank@query-busy,Fail
-kms_vblank@query-forked,Fail
-kms_vblank@query-forked-busy,Fail
-kms_vblank@query-idle,Fail
kms_vblank@ts-continuation-dpms-rpm,Fail
-kms_vblank@ts-continuation-idle,Fail
-kms_vblank@ts-continuation-modeset,Fail
-kms_vblank@ts-continuation-modeset-rpm,Fail
-kms_vblank@wait-busy,Fail
-kms_vblank@wait-forked,Fail
-kms_vblank@wait-forked-busy,Fail
-kms_vblank@wait-idle,Fail
-tools_test@tools_test,Fail
diff --git a/drivers/gpu/drm/ci/xfails/msm-sc7180-trogdor-lazor-limozeen-flakes.txt b/drivers/gpu/drm/ci/xfails/msm-sc7180-trogdor-lazor-limozeen-flakes.txt
index dcb24b835dc3..cd3d3b0befe4 100644
--- a/drivers/gpu/drm/ci/xfails/msm-sc7180-trogdor-lazor-limozeen-flakes.txt
+++ b/drivers/gpu/drm/ci/xfails/msm-sc7180-trogdor-lazor-limozeen-flakes.txt
@@ -1,6 +1,13 @@
# Board Name: sc7180-trogdor-lazor-limozeen-nots-r5
# Bug Report: https://lore.kernel.org/linux-arm-msm/661483c8-ad82-400d-bcd8-e94986d20d7d@collabora.com/T/#u
+# Failure Rate: 50
# IGT Version: 1.28-g0df7b9b97
# Linux Version: 6.9.0-rc7
-# Failure Rate: 50
-msm_mapping@shadow
+msm/msm_mapping@shadow
+
+# Board Name: sc7180-trogdor-lazor-limozeen-nots-r5
+# Bug Report: https://lore.kernel.org/linux-arm-msm/661483c8-ad82-400d-bcd8-e94986d20d7d@collabora.com/T/#u
+# Failure Rate: 100
+# IGT Version: 1.28-gf13702b8e
+# Linux Version: 6.10.0-rc5
+kms_lease@page-flip-implicit-plane
diff --git a/drivers/gpu/drm/ci/xfails/msm-sc7180-trogdor-lazor-limozeen-skips.txt b/drivers/gpu/drm/ci/xfails/msm-sc7180-trogdor-lazor-limozeen-skips.txt
index 1168c53acd2d..7c69c1f1d55b 100644
--- a/drivers/gpu/drm/ci/xfails/msm-sc7180-trogdor-lazor-limozeen-skips.txt
+++ b/drivers/gpu/drm/ci/xfails/msm-sc7180-trogdor-lazor-limozeen-skips.txt
@@ -4,7 +4,7 @@
# Skip driver specific tests
^amdgpu.*
nouveau_.*
-panfrost_.*
+^panfrost.*
^v3d.*
^vc4.*
^vmwgfx*
@@ -12,6 +12,7 @@ panfrost_.*
# Skip intel specific tests
gem_.*
i915_.*
+tools_test.*
# Currently fails and causes coverage loss for other tests
# since core_getversion also fails.
diff --git a/drivers/gpu/drm/ci/xfails/msm-sdm845-fails.txt b/drivers/gpu/drm/ci/xfails/msm-sdm845-fails.txt
index 8f010c8a9c4f..770a1c685fde 100644
--- a/drivers/gpu/drm/ci/xfails/msm-sdm845-fails.txt
+++ b/drivers/gpu/drm/ci/xfails/msm-sdm845-fails.txt
@@ -33,4 +33,3 @@ kms_plane_alpha_blend@coverage-vs-premult-vs-constant,Fail
kms_plane_cursor@overlay,Fail
kms_plane_cursor@viewport,Fail
kms_rmfb@close-fd,Fail
-tools_test@tools_test,Fail
diff --git a/drivers/gpu/drm/ci/xfails/msm-sdm845-flakes.txt b/drivers/gpu/drm/ci/xfails/msm-sdm845-flakes.txt
index 2c5f62b07632..2aa96b1241c3 100644
--- a/drivers/gpu/drm/ci/xfails/msm-sdm845-flakes.txt
+++ b/drivers/gpu/drm/ci/xfails/msm-sdm845-flakes.txt
@@ -1,19 +1,118 @@
# Board Name: sdm845-cheza-r3
# Bug Report: https://lore.kernel.org/linux-arm-msm/661483c8-ad82-400d-bcd8-e94986d20d7d@collabora.com/T/#u
+# Failure Rate: 50
# IGT Version: 1.28-g0df7b9b97
# Linux Version: 6.9.0-rc7
-# Failure Rate: 50
kms_cursor_legacy@basic-flip-after-cursor-atomic
+
+# Board Name: sdm845-cheza-r3
+# Bug Report: https://lore.kernel.org/linux-arm-msm/661483c8-ad82-400d-bcd8-e94986d20d7d@collabora.com/T/#u
+# Failure Rate: 50
+# IGT Version: 1.28-g0df7b9b97
+# Linux Version: 6.9.0-rc7
kms_cursor_legacy@basic-flip-after-cursor-legacy
+
+# Board Name: sdm845-cheza-r3
+# Bug Report: https://lore.kernel.org/linux-arm-msm/661483c8-ad82-400d-bcd8-e94986d20d7d@collabora.com/T/#u
+# Failure Rate: 50
+# IGT Version: 1.28-g0df7b9b97
+# Linux Version: 6.9.0-rc7
kms_cursor_legacy@basic-flip-after-cursor-varying-size
+
+# Board Name: sdm845-cheza-r3
+# Bug Report: https://lore.kernel.org/linux-arm-msm/661483c8-ad82-400d-bcd8-e94986d20d7d@collabora.com/T/#u
+# Failure Rate: 50
+# IGT Version: 1.28-g0df7b9b97
+# Linux Version: 6.9.0-rc7
kms_cursor_legacy@basic-flip-before-cursor-varying-size
+
+# Board Name: sdm845-cheza-r3
+# Bug Report: https://lore.kernel.org/linux-arm-msm/661483c8-ad82-400d-bcd8-e94986d20d7d@collabora.com/T/#u
+# Failure Rate: 50
+# IGT Version: 1.28-g0df7b9b97
+# Linux Version: 6.9.0-rc7
kms_cursor_legacy@flip-vs-cursor-atomic-transitions
+
+# Board Name: sdm845-cheza-r3
+# Bug Report: https://lore.kernel.org/linux-arm-msm/661483c8-ad82-400d-bcd8-e94986d20d7d@collabora.com/T/#u
+# Failure Rate: 50
+# IGT Version: 1.28-g0df7b9b97
+# Linux Version: 6.9.0-rc7
kms_cursor_legacy@flip-vs-cursor-atomic-transitions-varying-size
+
+# Board Name: sdm845-cheza-r3
+# Bug Report: https://lore.kernel.org/linux-arm-msm/661483c8-ad82-400d-bcd8-e94986d20d7d@collabora.com/T/#u
+# Failure Rate: 50
+# IGT Version: 1.28-g0df7b9b97
+# Linux Version: 6.9.0-rc7
kms_cursor_legacy@flip-vs-cursor-varying-size
+
+# Board Name: sdm845-cheza-r3
+# Bug Report: https://lore.kernel.org/linux-arm-msm/661483c8-ad82-400d-bcd8-e94986d20d7d@collabora.com/T/#u
+# Failure Rate: 50
+# IGT Version: 1.28-g0df7b9b97
+# Linux Version: 6.9.0-rc7
kms_cursor_legacy@short-flip-after-cursor-atomic-transitions
+
+# Board Name: sdm845-cheza-r3
+# Bug Report: https://lore.kernel.org/linux-arm-msm/661483c8-ad82-400d-bcd8-e94986d20d7d@collabora.com/T/#u
+# Failure Rate: 50
+# IGT Version: 1.28-g0df7b9b97
+# Linux Version: 6.9.0-rc7
kms_cursor_legacy@short-flip-after-cursor-atomic-transitions-varying-size
+
+# Board Name: sdm845-cheza-r3
+# Bug Report: https://lore.kernel.org/linux-arm-msm/661483c8-ad82-400d-bcd8-e94986d20d7d@collabora.com/T/#u
+# Failure Rate: 50
+# IGT Version: 1.28-g0df7b9b97
+# Linux Version: 6.9.0-rc7
kms_cursor_legacy@short-flip-after-cursor-toggle
+
+# Board Name: sdm845-cheza-r3
+# Bug Report: https://lore.kernel.org/linux-arm-msm/661483c8-ad82-400d-bcd8-e94986d20d7d@collabora.com/T/#u
+# Failure Rate: 50
+# IGT Version: 1.28-g0df7b9b97
+# Linux Version: 6.9.0-rc7
kms_cursor_legacy@short-flip-before-cursor-atomic-transitions
+
+# Board Name: sdm845-cheza-r3
+# Bug Report: https://lore.kernel.org/linux-arm-msm/661483c8-ad82-400d-bcd8-e94986d20d7d@collabora.com/T/#u
+# Failure Rate: 50
+# IGT Version: 1.28-g0df7b9b97
+# Linux Version: 6.9.0-rc7
kms_cursor_legacy@short-flip-before-cursor-atomic-transitions-varying-size
-msm_shrink@copy-gpu-32
-msm_shrink@copy-gpu-oom-32
+
+# Board Name: sdm845-cheza-r3
+# Bug Report: https://lore.kernel.org/linux-arm-msm/661483c8-ad82-400d-bcd8-e94986d20d7d@collabora.com/T/#u
+# Failure Rate: 50
+# IGT Version: 1.28-g0df7b9b97
+# Linux Version: 6.9.0-rc7
+msm/msm_shrink@copy-gpu-32
+
+# Board Name: sdm845-cheza-r3
+# Bug Report: https://lore.kernel.org/linux-arm-msm/661483c8-ad82-400d-bcd8-e94986d20d7d@collabora.com/T/#u
+# Failure Rate: 50
+# IGT Version: 1.28-g0df7b9b97
+# Linux Version: 6.9.0-rc7
+msm/msm_shrink@copy-gpu-oom-32
+
+# Board Name: sdm845-cheza-r3
+# Bug Report: https://lore.kernel.org/linux-arm-msm/661483c8-ad82-400d-bcd8-e94986d20d7d@collabora.com/T/#u
+# Failure Rate: 50
+# IGT Version: 1.28-gf13702b8e
+# Linux Version: 6.10.0-rc5
+kms_cursor_legacy@short-flip-before-cursor-toggle
+
+# Board Name: sdm845-cheza-r3
+# Bug Report: https://lore.kernel.org/linux-arm-msm/661483c8-ad82-400d-bcd8-e94986d20d7d@collabora.com/T/#u
+# Failure Rate: 50
+# IGT Version: 1.28-gf13702b8e
+# Linux Version: 6.10.0-rc5
+kms_cursor_legacy@flip-vs-cursor-toggle
+
+# Board Name: sdm845-cheza-r3
+# Bug Report: https://lore.kernel.org/linux-arm-msm/661483c8-ad82-400d-bcd8-e94986d20d7d@collabora.com/T/#u
+# Failure Rate: 50
+# IGT Version: 1.28-gf13702b8e
+# Linux Version: 6.10.0-rc5
+msm/msm_shrink@copy-mmap-oom-8
diff --git a/drivers/gpu/drm/ci/xfails/msm-sdm845-skips.txt b/drivers/gpu/drm/ci/xfails/msm-sdm845-skips.txt
index 5185212c8fb2..90651048ab61 100644
--- a/drivers/gpu/drm/ci/xfails/msm-sdm845-skips.txt
+++ b/drivers/gpu/drm/ci/xfails/msm-sdm845-skips.txt
@@ -4,12 +4,12 @@ kms_bw.*
# Failing due to a bootloader/fw issue. The workaround in mesa CI involves these two patches
# https://gitlab.freedesktop.org/gfx-ci/linux/-/commit/4b49f902ec6f2bb382cbbf489870573f4b43371e
# https://gitlab.freedesktop.org/gfx-ci/linux/-/commit/38cdf4c5559771e2474ae0fecef8469f65147bc1
-msm_mapping@*
+msm/msm_mapping@*
# Skip driver specific tests
^amdgpu.*
nouveau_.*
-panfrost_.*
+^panfrost.*
^v3d.*
^vc4.*
^vmwgfx*
@@ -17,6 +17,7 @@ panfrost_.*
# Skip intel specific tests
gem_.*
i915_.*
+tools_test.*
# Currently fails and causes coverage loss for other tests
# since core_getversion also fails.
diff --git a/drivers/gpu/drm/ci/xfails/panfrost-g12b-fails.txt b/drivers/gpu/drm/ci/xfails/panfrost-g12b-fails.txt
new file mode 100644
index 000000000000..fe8ce2ce33e6
--- /dev/null
+++ b/drivers/gpu/drm/ci/xfails/panfrost-g12b-fails.txt
@@ -0,0 +1 @@
+panfrost/panfrost_prime@gem-prime-import,Fail
diff --git a/drivers/gpu/drm/ci/xfails/panfrost-g12b-skips.txt b/drivers/gpu/drm/ci/xfails/panfrost-g12b-skips.txt
new file mode 100644
index 000000000000..3c7e494857b5
--- /dev/null
+++ b/drivers/gpu/drm/ci/xfails/panfrost-g12b-skips.txt
@@ -0,0 +1,23 @@
+# Skip driver specific tests
+^amdgpu.*
+^msm.*
+nouveau_.*
+^v3d.*
+^vc4.*
+^vmwgfx*
+
+# Skip intel specific tests
+gem_.*
+i915_.*
+tools_test.*
+
+# Panfrost is not a KMS driver, so skip the KMS tests
+kms_.*
+
+# Skip display functionality tests for GPU-only drivers
+dumb_buffer.*
+fbdev.*
+
+# Currently fails and causes coverage loss for other tests
+# since core_getversion also fails.
+core_hotunplug.*
diff --git a/drivers/gpu/drm/ci/xfails/panfrost-mt8183-fails.txt b/drivers/gpu/drm/ci/xfails/panfrost-mt8183-fails.txt
new file mode 100644
index 000000000000..fe8ce2ce33e6
--- /dev/null
+++ b/drivers/gpu/drm/ci/xfails/panfrost-mt8183-fails.txt
@@ -0,0 +1 @@
+panfrost/panfrost_prime@gem-prime-import,Fail
diff --git a/drivers/gpu/drm/ci/xfails/panfrost-mt8183-skips.txt b/drivers/gpu/drm/ci/xfails/panfrost-mt8183-skips.txt
new file mode 100644
index 000000000000..3c7e494857b5
--- /dev/null
+++ b/drivers/gpu/drm/ci/xfails/panfrost-mt8183-skips.txt
@@ -0,0 +1,23 @@
+# Skip driver specific tests
+^amdgpu.*
+^msm.*
+nouveau_.*
+^v3d.*
+^vc4.*
+^vmwgfx*
+
+# Skip intel specific tests
+gem_.*
+i915_.*
+tools_test.*
+
+# Panfrost is not a KMS driver, so skip the KMS tests
+kms_.*
+
+# Skip display functionality tests for GPU-only drivers
+dumb_buffer.*
+fbdev.*
+
+# Currently fails and causes coverage loss for other tests
+# since core_getversion also fails.
+core_hotunplug.*
diff --git a/drivers/gpu/drm/ci/xfails/panfrost-rk3288-fails.txt b/drivers/gpu/drm/ci/xfails/panfrost-rk3288-fails.txt
new file mode 100644
index 000000000000..4a2f4b6b14c1
--- /dev/null
+++ b/drivers/gpu/drm/ci/xfails/panfrost-rk3288-fails.txt
@@ -0,0 +1 @@
+panfrost/panfrost_prime@gem-prime-import,Crash
diff --git a/drivers/gpu/drm/ci/xfails/panfrost-rk3288-skips.txt b/drivers/gpu/drm/ci/xfails/panfrost-rk3288-skips.txt
new file mode 100644
index 000000000000..feeed89b6c3f
--- /dev/null
+++ b/drivers/gpu/drm/ci/xfails/panfrost-rk3288-skips.txt
@@ -0,0 +1,26 @@
+# Suspend to RAM seems to be broken on this machine
+.*suspend.*
+
+# Skip driver specific tests
+^amdgpu.*
+^msm.*
+nouveau_.*
+^v3d.*
+^vc4.*
+^vmwgfx*
+
+# Skip intel specific tests
+gem_.*
+i915_.*
+tools_test.*
+
+# Panfrost is not a KMS driver, so skip the KMS tests
+kms_.*
+
+# Skip display functionality tests for GPU-only drivers
+dumb_buffer.*
+fbdev.*
+
+# Currently fails and causes coverage loss for other tests
+# since core_getversion also fails.
+core_hotunplug.*
diff --git a/drivers/gpu/drm/ci/xfails/panfrost-rk3399-fails.txt b/drivers/gpu/drm/ci/xfails/panfrost-rk3399-fails.txt
new file mode 100644
index 000000000000..fe8ce2ce33e6
--- /dev/null
+++ b/drivers/gpu/drm/ci/xfails/panfrost-rk3399-fails.txt
@@ -0,0 +1 @@
+panfrost/panfrost_prime@gem-prime-import,Fail
diff --git a/drivers/gpu/drm/ci/xfails/panfrost-rk3399-flakes.txt b/drivers/gpu/drm/ci/xfails/panfrost-rk3399-flakes.txt
new file mode 100644
index 000000000000..ac4f8f7244d4
--- /dev/null
+++ b/drivers/gpu/drm/ci/xfails/panfrost-rk3399-flakes.txt
@@ -0,0 +1,6 @@
+# Board Name: rk3399-gru-kevin
+# Bug Report: https://lore.kernel.org/dri-devel/5cc34a8b-c1fa-4744-9031-2d33ecf41011@collabora.com/T/#u
+# Failure Rate: 50
+# IGT Version: 1.28-g0df7b9b97
+# Linux Version: 6.9.0-rc7
+panfrost/panfrost_submit@pan-unhandled-pagefault
diff --git a/drivers/gpu/drm/ci/xfails/panfrost-rk3399-skips.txt b/drivers/gpu/drm/ci/xfails/panfrost-rk3399-skips.txt
new file mode 100644
index 000000000000..feeed89b6c3f
--- /dev/null
+++ b/drivers/gpu/drm/ci/xfails/panfrost-rk3399-skips.txt
@@ -0,0 +1,26 @@
+# Suspend to RAM seems to be broken on this machine
+.*suspend.*
+
+# Skip driver specific tests
+^amdgpu.*
+^msm.*
+nouveau_.*
+^v3d.*
+^vc4.*
+^vmwgfx*
+
+# Skip intel specific tests
+gem_.*
+i915_.*
+tools_test.*
+
+# Panfrost is not a KMS driver, so skip the KMS tests
+kms_.*
+
+# Skip display functionality tests for GPU-only drivers
+dumb_buffer.*
+fbdev.*
+
+# Currently fails and causes coverage loss for other tests
+# since core_getversion also fails.
+core_hotunplug.*
diff --git a/drivers/gpu/drm/ci/xfails/requirements.txt b/drivers/gpu/drm/ci/xfails/requirements.txt
index e9994c9db799..5e6d48d98e4e 100644
--- a/drivers/gpu/drm/ci/xfails/requirements.txt
+++ b/drivers/gpu/drm/ci/xfails/requirements.txt
@@ -11,7 +11,7 @@ requests==2.31.0
requests-toolbelt==1.0.0
ruamel.yaml==0.17.32
ruamel.yaml.clib==0.2.7
-setuptools==68.0.0
+setuptools==70.0.0
tenacity==8.2.3
urllib3==2.0.7
wheel==0.41.1
diff --git a/drivers/gpu/drm/ci/xfails/rockchip-rk3288-fails.txt b/drivers/gpu/drm/ci/xfails/rockchip-rk3288-fails.txt
index f9b99bf27105..ea7b2ceb95b9 100644
--- a/drivers/gpu/drm/ci/xfails/rockchip-rk3288-fails.txt
+++ b/drivers/gpu/drm/ci/xfails/rockchip-rk3288-fails.txt
@@ -1,8 +1,18 @@
+core_setmaster@master-drop-set-root,Crash
+core_setmaster@master-drop-set-user,Crash
+core_setmaster_vs_auth,Crash
+device_reset@cold-reset-bound,Crash
+device_reset@reset-bound,Crash
+device_reset@unbind-cold-reset-rebind,Crash
+device_reset@unbind-reset-rebind,Crash
dumb_buffer@create-clear,Crash
-dumb_buffer@create-valid-dumb,Crash
dumb_buffer@invalid-bpp,Crash
-dumb_buffer@map-invalid-size,Crash
-dumb_buffer@map-uaf,Crash
-dumb_buffer@map-valid,Crash
-panfrost_prime@gem-prime-import,Crash
-tools_test@tools_test,Crash
+fbdev@pan,Crash
+kms_cursor_crc@cursor-onscreen-32x10,Crash
+kms_cursor_crc@cursor-onscreen-32x32,Crash
+kms_cursor_crc@cursor-random-32x10,Crash
+kms_cursor_crc@cursor-sliding-32x32,Crash
+kms_cursor_legacy@basic-flip-before-cursor-atomic,Fail
+kms_cursor_legacy@cursor-vs-flip-legacy,Fail
+kms_prop_blob@invalid-set-prop,Crash
+kms_prop_blob@invalid-set-prop-any,Crash
diff --git a/drivers/gpu/drm/ci/xfails/rockchip-rk3288-flakes.txt b/drivers/gpu/drm/ci/xfails/rockchip-rk3288-flakes.txt
new file mode 100644
index 000000000000..7ede273aab20
--- /dev/null
+++ b/drivers/gpu/drm/ci/xfails/rockchip-rk3288-flakes.txt
@@ -0,0 +1,6 @@
+# Board Name: rk3288-veyron-jaq
+# Bug Report: https://lore.kernel.org/linux-rockchip/3e267d0c-fde4-4533-b001-6ab7d7c03546@collabora.com/T/#u
+# Failure Rate: 100
+# IGT Version: 1.28-gf13702b8e
+# Linux Version: 6.10.0-rc5
+kms_cursor_legacy@flip-vs-cursor-atomic
diff --git a/drivers/gpu/drm/ci/xfails/rockchip-rk3288-skips.txt b/drivers/gpu/drm/ci/xfails/rockchip-rk3288-skips.txt
index 6d3757dca83b..eb16b29dee48 100644
--- a/drivers/gpu/drm/ci/xfails/rockchip-rk3288-skips.txt
+++ b/drivers/gpu/drm/ci/xfails/rockchip-rk3288-skips.txt
@@ -1,60 +1,11 @@
# Suspend to RAM seems to be broken on this machine
.*suspend.*
-# Too unstable, machine ends up hanging after lots of Oopses
-kms_cursor_legacy.*
-
-# Started hanging the machine on Linux 5.19-rc2:
-#
-# [IGT] kms_plane_lowres: executing
-# [IGT] kms_plane_lowres: starting subtest pipe-F-tiling-y
-# [IGT] kms_plane_lowres: exiting, ret=77
-# Console: switching to colour frame buffer device 170x48
-# rockchip-drm display-subsystem: [drm] *ERROR* flip_done timed out
-# rockchip-drm display-subsystem: [drm] *ERROR* [CRTC:35:crtc-0] commit wait timed out
-# BUG: spinlock bad magic on CPU#3, kms_plane_lowre/482
-# 8<--- cut here ---
-# Unable to handle kernel paging request at virtual address 7812078e
-# [7812078e] *pgd=00000000
-# Internal error: Oops: 5 [#1] SMP ARM
-# Modules linked in:
-# CPU: 3 PID: 482 Comm: kms_plane_lowre Tainted: G W 5.19.0-rc2-323596-g00535de92171 #1
-# Hardware name: Rockchip (Device Tree)
-# Process kms_plane_lowre (pid: 482, stack limit = 0x1193ac2b)
-# spin_dump from do_raw_spin_lock+0xa4/0xe8
-# do_raw_spin_lock from wait_for_completion_timeout+0x2c/0x120
-# wait_for_completion_timeout from drm_crtc_commit_wait+0x18/0x7c
-# drm_crtc_commit_wait from drm_atomic_helper_wait_for_dependencies+0x44/0x168
-# drm_atomic_helper_wait_for_dependencies from commit_tail+0x34/0x180
-# commit_tail from drm_atomic_helper_commit+0x164/0x18c
-# drm_atomic_helper_commit from drm_atomic_commit+0xac/0xe4
-# drm_atomic_commit from drm_client_modeset_commit_atomic+0x23c/0x284
-# drm_client_modeset_commit_atomic from drm_client_modeset_commit_locked+0x60/0x1c8
-# drm_client_modeset_commit_locked from drm_client_modeset_commit+0x24/0x40
-# drm_client_modeset_commit from drm_fbdev_client_restore+0x58/0x94
-# drm_fbdev_client_restore from drm_client_dev_restore+0x70/0xbc
-# drm_client_dev_restore from drm_release+0xf4/0x114
-# drm_release from __fput+0x74/0x240
-# __fput from task_work_run+0x84/0xb4
-# task_work_run from do_exit+0x34c/0xa20
-# do_exit from do_group_exit+0x34/0x98
-# do_group_exit from __wake_up_parent+0x0/0x18
-# Code: e595c008 12843d19 03e00000 03093168 (15940508)
-# ---[ end trace 0000000000000000 ]---
-# note: kms_plane_lowre[482] exited with preempt_count 1
-# Fixing recursive fault but reboot is needed!
-kms_plane_lowres@pipe-F-tiling-y
-
-# Take too long, we have only two machines, and these are very flaky
-kms_cursor_crc.*
-
-# Machine is hanging in this test, so skip it
-kms_pipe_crc_basic@disable-crc-after-crtc
-
# Skip driver specific tests
^amdgpu.*
-msm_.*
+^msm.*
nouveau_.*
+^panfrost.*
^v3d.*
^vc4.*
^vmwgfx*
@@ -62,9 +13,7 @@ nouveau_.*
# Skip intel specific tests
gem_.*
i915_.*
-
-# Panfrost is not a KMS driver, so skip the KMS tests
-kms_.*
+tools_test.*
# Currently fails and causes coverage loss for other tests
# since core_getversion also fails.
diff --git a/drivers/gpu/drm/ci/xfails/rockchip-rk3399-fails.txt b/drivers/gpu/drm/ci/xfails/rockchip-rk3399-fails.txt
index 9ef460646d76..9309ff15e23a 100644
--- a/drivers/gpu/drm/ci/xfails/rockchip-rk3399-fails.txt
+++ b/drivers/gpu/drm/ci/xfails/rockchip-rk3399-fails.txt
@@ -1,8 +1,84 @@
-dumb_buffer@create-clear,Fail
-dumb_buffer@create-valid-dumb,Fail
+device_reset@cold-reset-bound,Fail
+device_reset@reset-bound,Fail
+device_reset@unbind-cold-reset-rebind,Fail
+device_reset@unbind-reset-rebind,Fail
+dumb_buffer@create-clear,Crash
dumb_buffer@invalid-bpp,Fail
-dumb_buffer@map-invalid-size,Fail
-dumb_buffer@map-uaf,Fail
-dumb_buffer@map-valid,Fail
-panfrost_prime@gem-prime-import,Fail
-tools_test@tools_test,Fail
+kms_atomic_transition@modeset-transition,Fail
+kms_atomic_transition@modeset-transition-fencing,Fail
+kms_atomic_transition@plane-toggle-modeset-transition,Fail
+kms_color@gamma,Fail
+kms_color@legacy-gamma,Fail
+kms_cursor_crc@cursor-alpha-opaque,Fail
+kms_cursor_crc@cursor-alpha-transparent,Fail
+kms_cursor_crc@cursor-dpms,Fail
+kms_cursor_crc@cursor-offscreen-32x10,Fail
+kms_cursor_crc@cursor-offscreen-32x32,Fail
+kms_cursor_crc@cursor-offscreen-64x21,Fail
+kms_cursor_crc@cursor-offscreen-64x64,Fail
+kms_cursor_crc@cursor-onscreen-32x10,Fail
+kms_cursor_crc@cursor-onscreen-32x32,Fail
+kms_cursor_crc@cursor-onscreen-64x21,Fail
+kms_cursor_crc@cursor-onscreen-64x64,Fail
+kms_cursor_crc@cursor-random-32x10,Fail
+kms_cursor_crc@cursor-random-32x32,Fail
+kms_cursor_crc@cursor-random-64x21,Fail
+kms_cursor_crc@cursor-random-64x64,Fail
+kms_cursor_crc@cursor-rapid-movement-32x10,Fail
+kms_cursor_crc@cursor-rapid-movement-32x32,Fail
+kms_cursor_crc@cursor-rapid-movement-64x21,Fail
+kms_cursor_crc@cursor-rapid-movement-64x64,Fail
+kms_cursor_crc@cursor-size-change,Fail
+kms_cursor_crc@cursor-sliding-32x10,Fail
+kms_cursor_crc@cursor-sliding-32x32,Fail
+kms_cursor_crc@cursor-sliding-64x21,Fail
+kms_cursor_crc@cursor-sliding-64x64,Fail
+kms_cursor_edge_walk@64x64-left-edge,Fail
+kms_cursor_legacy@basic-flip-before-cursor-atomic,Fail
+kms_cursor_legacy@basic-flip-before-cursor-legacy,Fail
+kms_cursor_legacy@cursor-vs-flip-atomic,Fail
+kms_cursor_legacy@cursor-vs-flip-legacy,Fail
+kms_cursor_legacy@cursor-vs-flip-toggle,Fail
+kms_cursor_legacy@flip-vs-cursor-atomic,Fail
+kms_cursor_legacy@flip-vs-cursor-crc-atomic,Fail
+kms_cursor_legacy@flip-vs-cursor-crc-legacy,Fail
+kms_cursor_legacy@flip-vs-cursor-legacy,Fail
+kms_cursor_legacy@long-nonblocking-modeset-vs-cursor-atomic,Fail
+kms_flip@basic-flip-vs-wf_vblank,Fail
+kms_flip@blocking-wf_vblank,Fail
+kms_flip@dpms-vs-vblank-race,Fail
+kms_flip@flip-vs-absolute-wf_vblank,Fail
+kms_flip@flip-vs-blocking-wf-vblank,Fail
+kms_flip@flip-vs-modeset-vs-hang,Fail
+kms_flip@flip-vs-panning,Fail
+kms_flip@flip-vs-panning-interruptible,Fail
+kms_flip@flip-vs-panning-vs-hang,Fail
+kms_flip@modeset-vs-vblank-race,Fail
+kms_flip@modeset-vs-vblank-race-interruptible,Fail
+kms_flip@plain-flip-fb-recreate,Fail
+kms_flip@plain-flip-fb-recreate-interruptible,Fail
+kms_flip@plain-flip-ts-check,Fail
+kms_flip@plain-flip-ts-check-interruptible,Fail
+kms_flip@wf_vblank-ts-check,Fail
+kms_flip@wf_vblank-ts-check-interruptible,Fail
+kms_invalid_mode@int-max-clock,Fail
+kms_lease@lease-uevent,Fail
+kms_lease@page-flip-implicit-plane,Fail
+kms_pipe_crc_basic@compare-crc-sanitycheck-nv12,Fail
+kms_pipe_crc_basic@compare-crc-sanitycheck-xr24,Fail
+kms_pipe_crc_basic@disable-crc-after-crtc,Fail
+kms_pipe_crc_basic@nonblocking-crc,Fail
+kms_pipe_crc_basic@nonblocking-crc-frame-sequence,Fail
+kms_pipe_crc_basic@read-crc,Fail
+kms_pipe_crc_basic@read-crc-frame-sequence,Fail
+kms_plane@pixel-format,Crash
+kms_plane@pixel-format-source-clamping,Crash
+kms_plane@plane-panning-bottom-right,Fail
+kms_plane@plane-panning-top-left,Fail
+kms_plane@plane-position-covered,Fail
+kms_plane@plane-position-hole,Fail
+kms_plane@plane-position-hole-dpms,Fail
+kms_plane_cursor@primary,Fail
+kms_plane_multiple@tiling-none,Fail
+kms_rmfb@close-fd,Fail
+kms_universal_plane@universal-plane-functional,Fail
diff --git a/drivers/gpu/drm/ci/xfails/rockchip-rk3399-flakes.txt b/drivers/gpu/drm/ci/xfails/rockchip-rk3399-flakes.txt
index 742c27d9a598..d98f6a17343c 100644
--- a/drivers/gpu/drm/ci/xfails/rockchip-rk3399-flakes.txt
+++ b/drivers/gpu/drm/ci/xfails/rockchip-rk3399-flakes.txt
@@ -1,6 +1,48 @@
# Board Name: rk3399-gru-kevin
-# Bug Report: https://lore.kernel.org/dri-devel/5cc34a8b-c1fa-4744-9031-2d33ecf41011@collabora.com/T/#u
-# IGT Version: 1.28-g0df7b9b97
-# Linux Version: 6.9.0-rc7
+# Bug Report: https://lore.kernel.org/linux-rockchip/3e267d0c-fde4-4533-b001-6ab7d7c03546@collabora.com/T/#u
# Failure Rate: 50
-panfrost_submit@pan-unhandled-pagefault
+# IGT Version: 1.28-gf13702b8e
+# Linux Version: 6.10.0-rc5
+kms_bw@linear-tiling-1-displays-2560x1440p
+
+# Board Name: rk3399-gru-kevin
+# Bug Report: https://lore.kernel.org/linux-rockchip/3e267d0c-fde4-4533-b001-6ab7d7c03546@collabora.com/T/#u
+# Failure Rate: 50
+# IGT Version: 1.28-gf13702b8e
+# Linux Version: 6.10.0-rc5
+kms_cursor_legacy@nonblocking-modeset-vs-cursor-atomic
+
+# Board Name: rk3399-gru-kevin
+# Bug Report: https://lore.kernel.org/linux-rockchip/3e267d0c-fde4-4533-b001-6ab7d7c03546@collabora.com/T/#u
+# Failure Rate: 50
+# IGT Version: 1.28-gf13702b8e
+# Linux Version: 6.10.0-rc5
+kms_flip@dpms-vs-vblank-race-interruptible
+
+# Board Name: rk3399-gru-kevin
+# Bug Report: https://lore.kernel.org/linux-rockchip/3e267d0c-fde4-4533-b001-6ab7d7c03546@collabora.com/T/#u
+# Failure Rate: 50
+# IGT Version: 1.28-gf13702b8e
+# Linux Version: 6.10.0-rc5
+kms_flip@flip-vs-absolute-wf_vblank-interruptible
+
+# Board Name: rk3399-gru-kevin
+# Bug Report: https://lore.kernel.org/linux-rockchip/3e267d0c-fde4-4533-b001-6ab7d7c03546@collabora.com/T/#u
+# Failure Rate: 50
+# IGT Version: 1.28-gf13702b8e
+# Linux Version: 6.10.0-rc5
+kms_flip@flip-vs-wf_vblank-interruptible
+
+# Board Name: rk3399-gru-kevin
+# Bug Report: https://lore.kernel.org/linux-rockchip/3e267d0c-fde4-4533-b001-6ab7d7c03546@collabora.com/T/#u
+# Failure Rate: 50
+# IGT Version: 1.28-gf13702b8e
+# Linux Version: 6.10.0-rc5
+kms_setmode@basic
+
+# Board Name: rk3399-gru-kevin
+# Bug Report: https://lore.kernel.org/linux-rockchip/3e267d0c-fde4-4533-b001-6ab7d7c03546@collabora.com/T/#u
+# Failure Rate: 50
+# IGT Version: 1.28-gf13702b8e
+# Linux Version: 6.10.0-rc5
+kms_bw@connected-linear-tiling-1-displays-2560x1440p
diff --git a/drivers/gpu/drm/ci/xfails/rockchip-rk3399-skips.txt b/drivers/gpu/drm/ci/xfails/rockchip-rk3399-skips.txt
index 5c52b25b4213..eb16b29dee48 100644
--- a/drivers/gpu/drm/ci/xfails/rockchip-rk3399-skips.txt
+++ b/drivers/gpu/drm/ci/xfails/rockchip-rk3399-skips.txt
@@ -1,13 +1,11 @@
# Suspend to RAM seems to be broken on this machine
.*suspend.*
-# Too unstable, machine ends up hanging after lots of Oopses
-kms_cursor_legacy.*
-
# Skip driver specific tests
^amdgpu.*
-msm_.*
+^msm.*
nouveau_.*
+^panfrost.*
^v3d.*
^vc4.*
^vmwgfx*
@@ -15,9 +13,7 @@ nouveau_.*
# Skip intel specific tests
gem_.*
i915_.*
-
-# Panfrost is not a KMS driver, so skip the KMS tests
-kms_.*
+tools_test.*
# Currently fails and causes coverage loss for other tests
# since core_getversion also fails.
diff --git a/drivers/gpu/drm/ci/xfails/virtio_gpu-none-fails.txt b/drivers/gpu/drm/ci/xfails/virtio_gpu-none-fails.txt
index fdf09fe11566..c72fee70e739 100644
--- a/drivers/gpu/drm/ci/xfails/virtio_gpu-none-fails.txt
+++ b/drivers/gpu/drm/ci/xfails/virtio_gpu-none-fails.txt
@@ -3,6 +3,70 @@ kms_addfb_basic@bo-too-small,Fail
kms_addfb_basic@size-max,Fail
kms_addfb_basic@too-high,Fail
kms_atomic_transition@plane-primary-toggle-with-vblank-wait,Fail
+kms_bw@connected-linear-tiling-1-displays-1920x1080p,Fail
+kms_bw@connected-linear-tiling-1-displays-2160x1440p,Fail
+kms_bw@connected-linear-tiling-1-displays-2560x1440p,Fail
+kms_bw@connected-linear-tiling-1-displays-3840x2160p,Fail
+kms_bw@connected-linear-tiling-10-displays-1920x1080p,Fail
+kms_bw@connected-linear-tiling-10-displays-2160x1440p,Fail
+kms_bw@connected-linear-tiling-10-displays-2560x1440p,Fail
+kms_bw@connected-linear-tiling-10-displays-3840x2160p,Fail
+kms_bw@connected-linear-tiling-11-displays-1920x1080p,Fail
+kms_bw@connected-linear-tiling-11-displays-2160x1440p,Fail
+kms_bw@connected-linear-tiling-11-displays-2560x1440p,Fail
+kms_bw@connected-linear-tiling-11-displays-3840x2160p,Fail
+kms_bw@connected-linear-tiling-12-displays-1920x1080p,Fail
+kms_bw@connected-linear-tiling-12-displays-2160x1440p,Fail
+kms_bw@connected-linear-tiling-12-displays-2560x1440p,Fail
+kms_bw@connected-linear-tiling-12-displays-3840x2160p,Fail
+kms_bw@connected-linear-tiling-13-displays-1920x1080p,Fail
+kms_bw@connected-linear-tiling-13-displays-2160x1440p,Fail
+kms_bw@connected-linear-tiling-13-displays-2560x1440p,Fail
+kms_bw@connected-linear-tiling-13-displays-3840x2160p,Fail
+kms_bw@connected-linear-tiling-14-displays-1920x1080p,Fail
+kms_bw@connected-linear-tiling-14-displays-2160x1440p,Fail
+kms_bw@connected-linear-tiling-14-displays-2560x1440p,Fail
+kms_bw@connected-linear-tiling-14-displays-3840x2160p,Fail
+kms_bw@connected-linear-tiling-15-displays-1920x1080p,Fail
+kms_bw@connected-linear-tiling-15-displays-2160x1440p,Fail
+kms_bw@connected-linear-tiling-15-displays-2560x1440p,Fail
+kms_bw@connected-linear-tiling-15-displays-3840x2160p,Fail
+kms_bw@connected-linear-tiling-16-displays-1920x1080p,Fail
+kms_bw@connected-linear-tiling-16-displays-2160x1440p,Fail
+kms_bw@connected-linear-tiling-16-displays-2560x1440p,Fail
+kms_bw@connected-linear-tiling-16-displays-3840x2160p,Fail
+kms_bw@connected-linear-tiling-2-displays-1920x1080p,Fail
+kms_bw@connected-linear-tiling-2-displays-2160x1440p,Fail
+kms_bw@connected-linear-tiling-2-displays-2560x1440p,Fail
+kms_bw@connected-linear-tiling-2-displays-3840x2160p,Fail
+kms_bw@connected-linear-tiling-3-displays-1920x1080p,Fail
+kms_bw@connected-linear-tiling-3-displays-2160x1440p,Fail
+kms_bw@connected-linear-tiling-3-displays-2560x1440p,Fail
+kms_bw@connected-linear-tiling-3-displays-3840x2160p,Fail
+kms_bw@connected-linear-tiling-4-displays-1920x1080p,Fail
+kms_bw@connected-linear-tiling-4-displays-2160x1440p,Fail
+kms_bw@connected-linear-tiling-4-displays-2560x1440p,Fail
+kms_bw@connected-linear-tiling-4-displays-3840x2160p,Fail
+kms_bw@connected-linear-tiling-5-displays-1920x1080p,Fail
+kms_bw@connected-linear-tiling-5-displays-2160x1440p,Fail
+kms_bw@connected-linear-tiling-5-displays-2560x1440p,Fail
+kms_bw@connected-linear-tiling-5-displays-3840x2160p,Fail
+kms_bw@connected-linear-tiling-6-displays-1920x1080p,Fail
+kms_bw@connected-linear-tiling-6-displays-2160x1440p,Fail
+kms_bw@connected-linear-tiling-6-displays-2560x1440p,Fail
+kms_bw@connected-linear-tiling-6-displays-3840x2160p,Fail
+kms_bw@connected-linear-tiling-7-displays-1920x1080p,Fail
+kms_bw@connected-linear-tiling-7-displays-2160x1440p,Fail
+kms_bw@connected-linear-tiling-7-displays-2560x1440p,Fail
+kms_bw@connected-linear-tiling-7-displays-3840x2160p,Fail
+kms_bw@connected-linear-tiling-8-displays-1920x1080p,Fail
+kms_bw@connected-linear-tiling-8-displays-2160x1440p,Fail
+kms_bw@connected-linear-tiling-8-displays-2560x1440p,Fail
+kms_bw@connected-linear-tiling-8-displays-3840x2160p,Fail
+kms_bw@connected-linear-tiling-9-displays-1920x1080p,Fail
+kms_bw@connected-linear-tiling-9-displays-2160x1440p,Fail
+kms_bw@connected-linear-tiling-9-displays-2560x1440p,Fail
+kms_bw@connected-linear-tiling-9-displays-3840x2160p,Fail
kms_bw@linear-tiling-1-displays-1920x1080p,Fail
kms_bw@linear-tiling-1-displays-2160x1440p,Fail
kms_bw@linear-tiling-1-displays-2560x1440p,Fail
@@ -123,4 +187,3 @@ kms_vblank@wait-forked,Fail
kms_vblank@wait-forked-busy,Fail
kms_vblank@wait-idle,Fail
perf@i915-ref-count,Fail
-tools_test@tools_test,Fail
diff --git a/drivers/gpu/drm/ci/xfails/virtio_gpu-none-skips.txt b/drivers/gpu/drm/ci/xfails/virtio_gpu-none-skips.txt
index e0ca4fadb84f..9c9e048725f8 100644
--- a/drivers/gpu/drm/ci/xfails/virtio_gpu-none-skips.txt
+++ b/drivers/gpu/drm/ci/xfails/virtio_gpu-none-skips.txt
@@ -7,9 +7,9 @@ kms_flip@flip-vs-suspend.*
# Skip driver specific tests
^amdgpu.*
-msm_.*
+^msm.*
nouveau_.*
-panfrost_.*
+^panfrost.*
^v3d.*
^vc4.*
^vmwgfx*
@@ -18,6 +18,7 @@ panfrost_.*
gem_.*
i915_.*
xe_.*
+tools_test.*
# Currently fails and causes coverage loss for other tests
# since core_getversion also fails.
diff --git a/drivers/gpu/drm/ci/xfails/vkms-none-fails.txt b/drivers/gpu/drm/ci/xfails/vkms-none-fails.txt
index 691c383b21a0..5408110f4c60 100644
--- a/drivers/gpu/drm/ci/xfails/vkms-none-fails.txt
+++ b/drivers/gpu/drm/ci/xfails/vkms-none-fails.txt
@@ -41,12 +41,8 @@ kms_cursor_legacy@flip-vs-cursor-crc-legacy,Fail
kms_cursor_legacy@flip-vs-cursor-legacy,Fail
kms_flip@flip-vs-modeset-vs-hang,Fail
kms_flip@flip-vs-panning-vs-hang,Fail
-kms_flip@flip-vs-suspend,Timeout
-kms_flip@flip-vs-suspend-interruptible,Timeout
-kms_flip@plain-flip-fb-recreate,Fail
kms_lease@lease-uevent,Fail
kms_pipe_crc_basic@nonblocking-crc,Fail
-kms_pipe_crc_basic@nonblocking-crc-frame-sequence,Fail
kms_writeback@writeback-check-output,Fail
kms_writeback@writeback-check-output-XRGB2101010,Fail
kms_writeback@writeback-fb-id,Fail
@@ -54,4 +50,3 @@ kms_writeback@writeback-fb-id-XRGB2101010,Fail
kms_writeback@writeback-invalid-parameters,Fail
kms_writeback@writeback-pixel-formats,Fail
perf@i915-ref-count,Fail
-tools_test@tools_test,Fail
diff --git a/drivers/gpu/drm/ci/xfails/vkms-none-flakes.txt b/drivers/gpu/drm/ci/xfails/vkms-none-flakes.txt
index eeaa1d5825af..62428f3c8f31 100644
--- a/drivers/gpu/drm/ci/xfails/vkms-none-flakes.txt
+++ b/drivers/gpu/drm/ci/xfails/vkms-none-flakes.txt
@@ -67,3 +67,24 @@ kms_flip@flip-vs-absolute-wf_vblank-interruptible
# IGT Version: 1.28-g0df7b9b97
# Linux Version: 6.9.0-rc7
kms_flip@flip-vs-blocking-wf-vblank
+
+# Board Name: vkms
+# Bug Report: https://lore.kernel.org/dri-devel/61ed26af-062c-443c-9df2-d1ee319f3fb0@collabora.com/T/#u
+# Failure Rate: 50
+# IGT Version: 1.28-gf13702b8e
+# Linux Version: 6.10.0-rc5
+kms_cursor_legacy@flip-vs-cursor-varying-size
+
+# Board Name: vkms
+# Bug Report: https://lore.kernel.org/dri-devel/61ed26af-062c-443c-9df2-d1ee319f3fb0@collabora.com/T/#u
+# Failure Rate: 50
+# IGT Version: 1.28-gf13702b8e
+# Linux Version: 6.10.0-rc5
+kms_flip@flip-vs-expired-vblank
+
+# Board Name: vkms
+# Bug Report: https://lore.kernel.org/dri-devel/61ed26af-062c-443c-9df2-d1ee319f3fb0@collabora.com/T/#u
+# Failure Rate: 50
+# IGT Version: 1.28-gf13702b8e
+# Linux Version: 6.10.0-rc5
+kms_pipe_crc_basic@nonblocking-crc-frame-sequence
diff --git a/drivers/gpu/drm/ci/xfails/vkms-none-skips.txt b/drivers/gpu/drm/ci/xfails/vkms-none-skips.txt
index fd5d1271115f..5ccc771fbb36 100644
--- a/drivers/gpu/drm/ci/xfails/vkms-none-skips.txt
+++ b/drivers/gpu/drm/ci/xfails/vkms-none-skips.txt
@@ -104,11 +104,112 @@ kms_cursor_crc@cursor-rapid-movement-256x85
# CS: 0010 DS: 0000 ES: 0000 CR0: 0000000080050033
# CR2: 0000000000000078 CR3: 0000000109b38000 CR4: 0000000000350ef0
+kms_cursor_crc@cursor-onscreen-256x256
+# Oops: Oops: 0000 [#1] PREEMPT SMP NOPTI
+# CPU: 1 PID: 1913 Comm: kworker/u8:6 Not tainted 6.10.0-rc5-g8a28e73ebead #1
+# Hardware name: ChromiumOS crosvm, BIOS 0
+# Workqueue: vkms_composer vkms_composer_worker [vkms]
+# RIP: 0010:compose_active_planes+0x344/0x4e0 [vkms]
+# Code: 6a 34 0f 8e 91 fe ff ff 44 89 ea 48 8d 7c 24 48 e8 71 f0 ff ff 4b 8b 04 fc 48 8b 4c 24 50 48 8b 7c 24 40 48 8b 80 48 01 00 00 <48> 63 70 18 8b 40 20 48 89 f2 48 c1 e6 03 29 d0 48 8b 54 24 48 48
+# RSP: 0018:ffffb477409fbd58 EFLAGS: 00010282
+# RAX: 0000000000000000 RBX: 0000000000000002 RCX: ffff8b124a242000
+# RDX: 00000000000000ff RSI: ffff8b124a243ff8 RDI: ffff8b124a244000
+# RBP: 0000000000000002 R08: 0000000000000000 R09: 00000000000003ff
+# R10: ffff8b124a244000 R11: 0000000000000000 R12: ffff8b1249282f30
+# R13: 0000000000000002 R14: 0000000000000002 R15: 0000000000000000
+# FS: 0000000000000000(0000) GS:ffff8b126bd00000(0000) knlGS:0000000000000000
+# CS: 0010 DS: 0000 ES: 0000 CR0: 0000000080050033
+# CR2: 0000000000000018 CR3: 0000000107a86000 CR4: 0000000000350ef0
+# Call Trace:
+# <TASK>
+# ? __die+0x1e/0x60
+# ? page_fault_oops+0x17b/0x4a0
+# ? exc_page_fault+0x6d/0x230
+# ? asm_exc_page_fault+0x26/0x30
+# ? compose_active_planes+0x344/0x4e0 [vkms]
+# ? compose_active_planes+0x32f/0x4e0 [vkms]
+# ? srso_return_thunk+0x5/0x5f
+# vkms_composer_worker+0x205/0x240 [vkms]
+# process_one_work+0x201/0x6c0
+# ? lock_is_held_type+0x9e/0x110
+# worker_thread+0x17e/0x350
+# ? __pfx_worker_thread+0x10/0x10
+# kthread+0xce/0x100
+# ? __pfx_kthread+0x10/0x10
+# ret_from_fork+0x2f/0x50
+# ? __pfx_kthread+0x10/0x10
+# ret_from_fork_asm+0x1a/0x30
+# </TASK>
+# Modules linked in: vkms
+# CR2: 0000000000000018
+# ---[ end trace 0000000000000000 ]---
+# RIP: 0010:compose_active_planes+0x344/0x4e0 [vkms]
+# Code: 6a 34 0f 8e 91 fe ff ff 44 89 ea 48 8d 7c 24 48 e8 71 f0 ff ff 4b 8b 04 fc 48 8b 4c 24 50 48 8b 7c 24 40 48 8b 80 48 01 00 00 <48> 63 70 18 8b 40 20 48 89 f2 48 c1 e6 03 29 d0 48 8b 54 24 48 48
+# RSP: 0018:ffffb477409fbd58 EFLAGS: 00010282
+# RAX: 0000000000000000 RBX: 0000000000000002 RCX: ffff8b124a242000
+# RDX: 00000000000000ff RSI: ffff8b124a243ff8 RDI: ffff8b124a244000
+# RBP: 0000000000000002 R08: 0000000000000000 R09: 00000000000003ff
+# R10: ffff8b124a244000 R11: 0000000000000000 R12: ffff8b1249282f30
+# R13: 0000000000000002 R14: 0000000000000002 R15: 0000000000000000
+# FS: 0000000000000000(0000) GS:ffff8b126bd00000(0000) knlGS:0000000000000000
+# CS: 0010 DS: 0000 ES: 0000 CR0: 0000000080050033
+# CR2: 0000000000000018 CR3: 0000000107a86000 CR4: 0000000000350ef0
+
+kms_cursor_edge_walk@128x128-right-edge
+# Oops: Oops: 0000 [#1] PREEMPT SMP NOPTI
+# CPU: 0 PID: 1911 Comm: kworker/u8:3 Not tainted 6.10.0-rc5-g5e7a002eefe5 #1
+# Hardware name: ChromiumOS crosvm, BIOS 0
+# Workqueue: vkms_composer vkms_composer_worker [vkms]
+# RIP: 0010:compose_active_planes+0x344/0x4e0 [vkms]
+# Code: 6a 34 0f 8e 91 fe ff ff 44 89 ea 48 8d 7c 24 48 e8 71 f0 ff ff 4b 8b 04 fc 48 8b 4c 24 50 48 8b 7c 24 40 48 8b 80 48 01 00 00 <48> 63 70 18 8b 40 20 48 89 f2 48 c1 e6 03 29 d0 48 8b 54 24 48 48
+# RSP: 0018:ffffb2f040a43d58 EFLAGS: 00010282
+# RAX: 0000000000000000 RBX: 0000000000000002 RCX: ffffa2c181792000
+# RDX: 0000000000000000 RSI: ffffa2c181793ff8 RDI: ffffa2c181790000
+# RBP: 0000000000000031 R08: 0000000000000000 R09: 00000000000003ff
+# R10: ffffa2c181790000 R11: 0000000000000000 R12: ffffa2c1814fa810
+# R13: 0000000000000031 R14: 0000000000000031 R15: 0000000000000000
+# FS: 0000000000000000(0000) GS:ffffa2c1abc00000(0000) knlGS:0000000000000000
+# CS: 0010 DS: 0000 ES: 0000 CR0: 0000000080050033
+# CR2: 0000000000000018 CR3: 0000000106768000 CR4: 0000000000350ef0
+# Call Trace:
+# <TASK>
+# ? __die+0x1e/0x60
+# ? page_fault_oops+0x17b/0x4a0
+# ? srso_return_thunk+0x5/0x5f
+# ? mark_held_locks+0x49/0x80
+# ? exc_page_fault+0x6d/0x230
+# ? asm_exc_page_fault+0x26/0x30
+# ? compose_active_planes+0x344/0x4e0 [vkms]
+# ? compose_active_planes+0x32f/0x4e0 [vkms]
+# ? srso_return_thunk+0x5/0x5f
+# vkms_composer_worker+0x205/0x240 [vkms]
+# process_one_work+0x201/0x6c0
+# ? lock_is_held_type+0x9e/0x110
+# worker_thread+0x17e/0x350
+# ? __pfx_worker_thread+0x10/0x10
+# kthread+0xce/0x100
+# ? __pfx_kthread+0x10/0x10
+# ret_from_fork+0x2f/0x50
+# ? __pfx_kthread+0x10/0x10
+# ret_from_fork_asm+0x1a/0x30
+# </TASK>
+# Modules linked in: vkms
+# CR2: 0000000000000018
+# ---[ end trace 0000000000000000 ]---
+# RIP: 0010:compose_active_planes+0x344/0x4e0 [vkms]
+# Code: 6a 34 0f 8e 91 fe ff ff 44 89 ea 48 8d 7c 24 48 e8 71 f0 ff ff 4b 8b 04 fc 48 8b 4c 24 50 48 8b 7c 24 40 48 8b 80 48 01 00 00 <48> 63 70 18 8b 40 20 48 89 f2 48 c1 e6 03 29 d0 48 8b 54 24 48 48
+# RSP: 0018:ffffb2f040a43d58 EFLAGS: 00010282
+# RAX: 0000000000000000 RBX: 0000000000000002 RCX: ffffa2c181792000
+# RDX: 0000000000000000 RSI: ffffa2c181793ff8 RDI: ffffa2c181790000
+# RBP: 0000000000000031 R08: 0000000000000000 R09: 00000000000003ff
+# R10: ffffa2c181790000 R11: 0000000000000000 R12: ffffa2c1814fa810
+# R13: 0000000000000031 R14: 0000000000000031 R15: 000000000000
+
# Skip driver specific tests
^amdgpu.*
-msm_.*
+^msm.*
nouveau_.*
-panfrost_.*
+^panfrost.*
^v3d.*
^vc4.*
^vmwgfx*
@@ -117,3 +218,4 @@ panfrost_.*
gem_.*
i915_.*
xe_.*
+tools_test.*
diff --git a/drivers/gpu/drm/display/drm_dp_helper.c b/drivers/gpu/drm/display/drm_dp_helper.c
index d4c34f364140..6ee51003de3c 100644
--- a/drivers/gpu/drm/display/drm_dp_helper.c
+++ b/drivers/gpu/drm/display/drm_dp_helper.c
@@ -2328,6 +2328,31 @@ drm_dp_get_quirks(const struct drm_dp_dpcd_ident *ident, bool is_branch)
#undef DEVICE_ID_ANY
#undef DEVICE_ID
+static int drm_dp_read_ident(struct drm_dp_aux *aux, unsigned int offset,
+ struct drm_dp_dpcd_ident *ident)
+{
+ int ret;
+
+ ret = drm_dp_dpcd_read(aux, offset, ident, sizeof(*ident));
+
+ return ret < 0 ? ret : 0;
+}
+
+static void drm_dp_dump_desc(struct drm_dp_aux *aux,
+ const char *device_name, const struct drm_dp_desc *desc)
+{
+ const struct drm_dp_dpcd_ident *ident = &desc->ident;
+
+ drm_dbg_kms(aux->drm_dev,
+ "%s: %s: OUI %*phD dev-ID %*pE HW-rev %d.%d SW-rev %d.%d quirks 0x%04x\n",
+ aux->name, device_name,
+ (int)sizeof(ident->oui), ident->oui,
+ (int)strnlen(ident->device_id, sizeof(ident->device_id)), ident->device_id,
+ ident->hw_rev >> 4, ident->hw_rev & 0xf,
+ ident->sw_major_rev, ident->sw_minor_rev,
+ desc->quirks);
+}
+
/**
* drm_dp_read_desc - read sink/branch descriptor from DPCD
* @aux: DisplayPort AUX channel
@@ -2344,28 +2369,49 @@ int drm_dp_read_desc(struct drm_dp_aux *aux, struct drm_dp_desc *desc,
{
struct drm_dp_dpcd_ident *ident = &desc->ident;
unsigned int offset = is_branch ? DP_BRANCH_OUI : DP_SINK_OUI;
- int ret, dev_id_len;
+ int ret;
- ret = drm_dp_dpcd_read(aux, offset, ident, sizeof(*ident));
+ ret = drm_dp_read_ident(aux, offset, ident);
if (ret < 0)
return ret;
desc->quirks = drm_dp_get_quirks(ident, is_branch);
- dev_id_len = strnlen(ident->device_id, sizeof(ident->device_id));
-
- drm_dbg_kms(aux->drm_dev,
- "%s: DP %s: OUI %*phD dev-ID %*pE HW-rev %d.%d SW-rev %d.%d quirks 0x%04x\n",
- aux->name, is_branch ? "branch" : "sink",
- (int)sizeof(ident->oui), ident->oui, dev_id_len,
- ident->device_id, ident->hw_rev >> 4, ident->hw_rev & 0xf,
- ident->sw_major_rev, ident->sw_minor_rev, desc->quirks);
+ drm_dp_dump_desc(aux, is_branch ? "DP branch" : "DP sink", desc);
return 0;
}
EXPORT_SYMBOL(drm_dp_read_desc);
/**
+ * drm_dp_dump_lttpr_desc - read and dump the DPCD descriptor for an LTTPR PHY
+ * @aux: DisplayPort AUX channel
+ * @dp_phy: LTTPR PHY instance
+ *
+ * Read the DPCD LTTPR PHY descriptor for @dp_phy and print a debug message
+ * with its details to dmesg.
+ *
+ * Returns 0 on success or a negative error code on failure.
+ */
+int drm_dp_dump_lttpr_desc(struct drm_dp_aux *aux, enum drm_dp_phy dp_phy)
+{
+ struct drm_dp_desc desc = {};
+ int ret;
+
+ if (drm_WARN_ON(aux->drm_dev, dp_phy < DP_PHY_LTTPR1 || dp_phy > DP_MAX_LTTPR_COUNT))
+ return -EINVAL;
+
+ ret = drm_dp_read_ident(aux, DP_OUI_PHY_REPEATER(dp_phy), &desc.ident);
+ if (ret < 0)
+ return ret;
+
+ drm_dp_dump_desc(aux, drm_dp_phy_name(dp_phy), &desc);
+
+ return 0;
+}
+EXPORT_SYMBOL(drm_dp_dump_lttpr_desc);
+
+/**
* drm_dp_dsc_sink_bpp_incr() - Get bits per pixel increment
* @dsc_dpcd: DSC capabilities from DPCD
*
diff --git a/drivers/gpu/drm/display/drm_dp_mst_topology.c b/drivers/gpu/drm/display/drm_dp_mst_topology.c
index fc2ceae61db2..a040d7dfced1 100644
--- a/drivers/gpu/drm/display/drm_dp_mst_topology.c
+++ b/drivers/gpu/drm/display/drm_dp_mst_topology.c
@@ -89,7 +89,7 @@ static int drm_dp_send_enum_path_resources(struct drm_dp_mst_topology_mgr *mgr,
struct drm_dp_mst_branch *mstb,
struct drm_dp_mst_port *port);
static bool drm_dp_validate_guid(struct drm_dp_mst_topology_mgr *mgr,
- u8 *guid);
+ guid_t *guid);
static int drm_dp_mst_register_i2c_bus(struct drm_dp_mst_port *port);
static void drm_dp_mst_unregister_i2c_bus(struct drm_dp_mst_port *port);
@@ -801,7 +801,7 @@ static bool drm_dp_sideband_parse_link_address(const struct drm_dp_mst_topology_
int idx = 1;
int i;
- memcpy(repmsg->u.link_addr.guid, &raw->msg[idx], 16);
+ import_guid(&repmsg->u.link_addr.guid, &raw->msg[idx]);
idx += 16;
repmsg->u.link_addr.nports = raw->msg[idx] & 0xf;
idx++;
@@ -829,7 +829,7 @@ static bool drm_dp_sideband_parse_link_address(const struct drm_dp_mst_topology_
idx++;
if (idx > raw->curlen)
goto fail_len;
- memcpy(repmsg->u.link_addr.ports[i].peer_guid, &raw->msg[idx], 16);
+ import_guid(&repmsg->u.link_addr.ports[i].peer_guid, &raw->msg[idx]);
idx += 16;
if (idx > raw->curlen)
goto fail_len;
@@ -1029,7 +1029,7 @@ static bool drm_dp_sideband_parse_reply(const struct drm_dp_mst_topology_mgr *mg
msg->req_type = (raw->msg[0] & 0x7f);
if (msg->reply_type == DP_SIDEBAND_REPLY_NAK) {
- memcpy(msg->u.nak.guid, &raw->msg[1], 16);
+ import_guid(&msg->u.nak.guid, &raw->msg[1]);
msg->u.nak.reason = raw->msg[17];
msg->u.nak.nak_data = raw->msg[18];
return false;
@@ -1078,7 +1078,7 @@ drm_dp_sideband_parse_connection_status_notify(const struct drm_dp_mst_topology_
if (idx > raw->curlen)
goto fail_len;
- memcpy(msg->u.conn_stat.guid, &raw->msg[idx], 16);
+ import_guid(&msg->u.conn_stat.guid, &raw->msg[idx]);
idx += 16;
if (idx > raw->curlen)
goto fail_len;
@@ -1107,7 +1107,7 @@ static bool drm_dp_sideband_parse_resource_status_notify(const struct drm_dp_mst
if (idx > raw->curlen)
goto fail_len;
- memcpy(msg->u.resource_stat.guid, &raw->msg[idx], 16);
+ import_guid(&msg->u.resource_stat.guid, &raw->msg[idx]);
idx += 16;
if (idx > raw->curlen)
goto fail_len;
@@ -2174,20 +2174,24 @@ ssize_t drm_dp_mst_dpcd_write(struct drm_dp_aux *aux,
offset, size, buffer);
}
-static int drm_dp_check_mstb_guid(struct drm_dp_mst_branch *mstb, u8 *guid)
+static int drm_dp_check_mstb_guid(struct drm_dp_mst_branch *mstb, guid_t *guid)
{
int ret = 0;
- memcpy(mstb->guid, guid, 16);
+ guid_copy(&mstb->guid, guid);
+
+ if (!drm_dp_validate_guid(mstb->mgr, &mstb->guid)) {
+ u8 buf[UUID_SIZE];
+
+ export_guid(buf, &mstb->guid);
- if (!drm_dp_validate_guid(mstb->mgr, mstb->guid)) {
if (mstb->port_parent) {
ret = drm_dp_send_dpcd_write(mstb->mgr,
mstb->port_parent,
- DP_GUID, 16, mstb->guid);
+ DP_GUID, sizeof(buf), buf);
} else {
ret = drm_dp_dpcd_write(mstb->mgr->aux,
- DP_GUID, mstb->guid, 16);
+ DP_GUID, buf, sizeof(buf));
}
}
@@ -2339,7 +2343,7 @@ drm_dp_mst_handle_link_address_port(struct drm_dp_mst_branch *mstb,
{
struct drm_dp_mst_topology_mgr *mgr = mstb->mgr;
struct drm_dp_mst_port *port;
- int old_ddps = 0, ret;
+ int ret;
u8 new_pdt = DP_PEER_DEVICE_NONE;
bool new_mcs = 0;
bool created = false, send_link_addr = false, changed = false;
@@ -2372,7 +2376,6 @@ drm_dp_mst_handle_link_address_port(struct drm_dp_mst_branch *mstb,
*/
drm_modeset_lock(&mgr->base.lock, NULL);
- old_ddps = port->ddps;
changed = port->ddps != port_msg->ddps ||
(port->ddps &&
(port->ldps != port_msg->legacy_device_plug_status ||
@@ -2407,15 +2410,13 @@ drm_dp_mst_handle_link_address_port(struct drm_dp_mst_branch *mstb,
* Reprobe PBN caps on both hotplug, and when re-probing the link
* for our parent mstb
*/
- if (old_ddps != port->ddps || !created) {
- if (port->ddps && !port->input) {
- ret = drm_dp_send_enum_path_resources(mgr, mstb,
- port);
- if (ret == 1)
- changed = true;
- } else {
- port->full_pbn = 0;
- }
+ if (port->ddps && !port->input) {
+ ret = drm_dp_send_enum_path_resources(mgr, mstb,
+ port);
+ if (ret == 1)
+ changed = true;
+ } else {
+ port->full_pbn = 0;
}
ret = drm_dp_port_set_pdt(port, new_pdt, new_mcs);
@@ -2570,9 +2571,9 @@ out:
return mstb;
}
-static struct drm_dp_mst_branch *get_mst_branch_device_by_guid_helper(
- struct drm_dp_mst_branch *mstb,
- const uint8_t *guid)
+static struct drm_dp_mst_branch *
+get_mst_branch_device_by_guid_helper(struct drm_dp_mst_branch *mstb,
+ const guid_t *guid)
{
struct drm_dp_mst_branch *found_mstb;
struct drm_dp_mst_port *port;
@@ -2580,10 +2581,9 @@ static struct drm_dp_mst_branch *get_mst_branch_device_by_guid_helper(
if (!mstb)
return NULL;
- if (memcmp(mstb->guid, guid, 16) == 0)
+ if (guid_equal(&mstb->guid, guid))
return mstb;
-
list_for_each_entry(port, &mstb->ports, next) {
found_mstb = get_mst_branch_device_by_guid_helper(port->mstb, guid);
@@ -2596,7 +2596,7 @@ static struct drm_dp_mst_branch *get_mst_branch_device_by_guid_helper(
static struct drm_dp_mst_branch *
drm_dp_get_mst_branch_device_by_guid(struct drm_dp_mst_topology_mgr *mgr,
- const uint8_t *guid)
+ const guid_t *guid)
{
struct drm_dp_mst_branch *mstb;
int ret;
@@ -2692,18 +2692,18 @@ static void drm_dp_mst_link_probe_work(struct work_struct *work)
drm_kms_helper_hotplug_event(dev);
}
-static bool drm_dp_validate_guid(struct drm_dp_mst_topology_mgr *mgr,
- u8 *guid)
+static void drm_dp_mst_queue_probe_work(struct drm_dp_mst_topology_mgr *mgr)
{
- u64 salt;
+ queue_work(system_long_wq, &mgr->work);
+}
- if (memchr_inv(guid, 0, 16))
+static bool drm_dp_validate_guid(struct drm_dp_mst_topology_mgr *mgr,
+ guid_t *guid)
+{
+ if (!guid_is_null(guid))
return true;
- salt = get_jiffies_64();
-
- memcpy(&guid[0], &salt, sizeof(u64));
- memcpy(&guid[8], &salt, sizeof(u64));
+ guid_gen(guid);
return false;
}
@@ -2943,7 +2943,7 @@ static int drm_dp_send_link_address(struct drm_dp_mst_topology_mgr *mgr,
drm_dbg_kms(mgr->dev, "link address reply: %d\n", reply->nports);
drm_dp_dump_link_address(mgr, reply);
- ret = drm_dp_check_mstb_guid(mstb, reply->guid);
+ ret = drm_dp_check_mstb_guid(mstb, &reply->guid);
if (ret) {
char buf[64];
@@ -3685,7 +3685,7 @@ int drm_dp_mst_topology_mgr_set_mst(struct drm_dp_mst_topology_mgr *mgr, bool ms
/* Write reset payload */
drm_dp_dpcd_write_payload(mgr, 0, 0, 0x3f);
- queue_work(system_long_wq, &mgr->work);
+ drm_dp_mst_queue_probe_work(mgr);
ret = 0;
} else {
@@ -3724,6 +3724,33 @@ drm_dp_mst_topology_mgr_invalidate_mstb(struct drm_dp_mst_branch *mstb)
}
/**
+ * drm_dp_mst_topology_queue_probe - Queue a topology probe
+ * @mgr: manager to probe
+ *
+ * Queue a work to probe the MST topology. Driver's should call this only to
+ * sync the topology's HW->SW state after the MST link's parameters have
+ * changed in a way the state could've become out-of-sync. This is the case
+ * for instance when the link rate between the source and first downstream
+ * branch device has switched between UHBR and non-UHBR rates. Except of those
+ * cases - for instance when a sink gets plugged/unplugged to a port - the SW
+ * state will get updated automatically via MST UP message notifications.
+ */
+void drm_dp_mst_topology_queue_probe(struct drm_dp_mst_topology_mgr *mgr)
+{
+ mutex_lock(&mgr->lock);
+
+ if (drm_WARN_ON(mgr->dev, !mgr->mst_state || !mgr->mst_primary))
+ goto out_unlock;
+
+ drm_dp_mst_topology_mgr_invalidate_mstb(mgr->mst_primary);
+ drm_dp_mst_queue_probe_work(mgr);
+
+out_unlock:
+ mutex_unlock(&mgr->lock);
+}
+EXPORT_SYMBOL(drm_dp_mst_topology_queue_probe);
+
+/**
* drm_dp_mst_topology_mgr_suspend() - suspend the MST manager
* @mgr: manager to suspend
*
@@ -3770,8 +3797,9 @@ EXPORT_SYMBOL(drm_dp_mst_topology_mgr_suspend);
int drm_dp_mst_topology_mgr_resume(struct drm_dp_mst_topology_mgr *mgr,
bool sync)
{
+ u8 buf[UUID_SIZE];
+ guid_t guid;
int ret;
- u8 guid[16];
mutex_lock(&mgr->lock);
if (!mgr->mst_primary)
@@ -3792,13 +3820,15 @@ int drm_dp_mst_topology_mgr_resume(struct drm_dp_mst_topology_mgr *mgr,
}
/* Some hubs forget their guids after they resume */
- ret = drm_dp_dpcd_read(mgr->aux, DP_GUID, guid, 16);
- if (ret != 16) {
+ ret = drm_dp_dpcd_read(mgr->aux, DP_GUID, buf, sizeof(buf));
+ if (ret != sizeof(buf)) {
drm_dbg_kms(mgr->dev, "dpcd read failed - undocked during suspend?\n");
goto out_fail;
}
- ret = drm_dp_check_mstb_guid(mgr->mst_primary, guid);
+ import_guid(&guid, buf);
+
+ ret = drm_dp_check_mstb_guid(mgr->mst_primary, &guid);
if (ret) {
drm_dbg_kms(mgr->dev, "check mstb failed - undocked during suspend?\n");
goto out_fail;
@@ -3809,7 +3839,7 @@ int drm_dp_mst_topology_mgr_resume(struct drm_dp_mst_topology_mgr *mgr,
* state of our in-memory topology back into sync with reality. So,
* restart the probing process as if we're probing a new hub
*/
- queue_work(system_long_wq, &mgr->work);
+ drm_dp_mst_queue_probe_work(mgr);
mutex_unlock(&mgr->lock);
if (sync) {
@@ -3976,12 +4006,12 @@ drm_dp_mst_process_up_req(struct drm_dp_mst_topology_mgr *mgr,
bool hotplug = false, dowork = false;
if (hdr->broadcast) {
- const u8 *guid = NULL;
+ const guid_t *guid = NULL;
if (msg->req_type == DP_CONNECTION_STATUS_NOTIFY)
- guid = msg->u.conn_stat.guid;
+ guid = &msg->u.conn_stat.guid;
else if (msg->req_type == DP_RESOURCE_STATUS_NOTIFY)
- guid = msg->u.resource_stat.guid;
+ guid = &msg->u.resource_stat.guid;
if (guid)
mstb = drm_dp_get_mst_branch_device_by_guid(mgr, guid);
@@ -4963,7 +4993,7 @@ void drm_dp_mst_dump_topology(struct seq_file *m,
seq_printf(m, "branch oui: %*phN devid: ", 3, buf);
for (i = 0x3; i < 0x8 && buf[i]; i++)
- seq_printf(m, "%c", buf[i]);
+ seq_putc(m, buf[i]);
seq_printf(m, " revision: hw: %x.%x sw: %x.%x\n",
buf[0x9] >> 4, buf[0x9] & 0xf, buf[0xa], buf[0xb]);
if (dump_dp_payload_table(mgr, buf))
@@ -5569,7 +5599,6 @@ EXPORT_SYMBOL(drm_dp_mst_atomic_check_mgr);
* drm_dp_atomic_release_time_slots()
*
* Returns:
- *
* 0 if the new state is valid, negative error code otherwise.
*/
int drm_dp_mst_atomic_check(struct drm_atomic_state *state)
@@ -5606,7 +5635,6 @@ EXPORT_SYMBOL(drm_dp_mst_topology_state_funcs);
* topology object.
*
* RETURNS:
- *
* The MST topology state or error pointer.
*/
struct drm_dp_mst_topology_state *drm_atomic_get_mst_topology_state(struct drm_atomic_state *state,
@@ -5626,7 +5654,6 @@ EXPORT_SYMBOL(drm_atomic_get_mst_topology_state);
* topology object.
*
* Returns:
- *
* The old MST topology state, or NULL if there's no topology state for this MST mgr
* in the global atomic state
*/
@@ -5651,7 +5678,6 @@ EXPORT_SYMBOL(drm_atomic_get_old_mst_topology_state);
* topology object.
*
* Returns:
- *
* The new MST topology state, or NULL if there's no topology state for this MST mgr
* in the global atomic state
*/
diff --git a/drivers/gpu/drm/drm_atomic.c b/drivers/gpu/drm/drm_atomic.c
index 6e516c39a372..0fc99da93afe 100644
--- a/drivers/gpu/drm/drm_atomic.c
+++ b/drivers/gpu/drm/drm_atomic.c
@@ -63,7 +63,6 @@ EXPORT_SYMBOL(__drm_crtc_commit_free);
* hardware and flipped to.
*
* Returns:
- *
* 0 on success, a negative error code otherwise.
*/
int drm_crtc_commit_wait(struct drm_crtc_commit *commit)
@@ -337,7 +336,6 @@ EXPORT_SYMBOL(__drm_atomic_state_free);
* not created by userspace through an IOCTL call.
*
* Returns:
- *
* Either the allocated state or the error code encoded into the pointer. When
* the error is EDEADLK then the w/w mutex code has detected a deadlock and the
* entire atomic sequence must be restarted. All other errors are fatal.
@@ -518,7 +516,6 @@ static int drm_atomic_connector_check(struct drm_connector *connector,
* is consistent.
*
* Returns:
- *
* Either the allocated state or the error code encoded into the pointer. When
* the error is EDEADLK then the w/w mutex code has detected a deadlock and the
* entire atomic sequence must be restarted. All other errors are fatal.
@@ -828,7 +825,6 @@ EXPORT_SYMBOL(drm_atomic_private_obj_fini);
* object lock to make sure that the state is consistent.
*
* RETURNS:
- *
* Either the allocated state or the error code encoded into a pointer.
*/
struct drm_private_state *
@@ -1061,7 +1057,6 @@ EXPORT_SYMBOL(drm_atomic_get_new_crtc_for_encoder);
* make sure that the state is consistent.
*
* Returns:
- *
* Either the allocated state or the error code encoded into the pointer. When
* the error is EDEADLK then the w/w mutex code has detected a deadlock and the
* entire atomic sequence must be restarted. All other errors are fatal.
@@ -1169,7 +1164,6 @@ static void drm_atomic_connector_print_state(struct drm_printer *p,
* state is consistent.
*
* Returns:
- *
* Either the allocated state or the error code encoded into the pointer. When
* the error is EDEADLK then the w/w mutex code has detected a deadlock and the
* entire atomic sequence must be restarted.
diff --git a/drivers/gpu/drm/drm_atomic_helper.c b/drivers/gpu/drm/drm_atomic_helper.c
index fb97b51b38f1..43cdf39019a4 100644
--- a/drivers/gpu/drm/drm_atomic_helper.c
+++ b/drivers/gpu/drm/drm_atomic_helper.c
@@ -2266,7 +2266,6 @@ crtc_or_fake_commit(struct drm_atomic_state *state, struct drm_crtc *crtc)
* automatically.
*
* Returns:
- *
* 0 on success. -EBUSY when userspace schedules nonblocking commits too fast,
* -ENOMEM on allocation failures and -EINTR when a signal is pending.
*/
@@ -3009,7 +3008,6 @@ EXPORT_SYMBOL(drm_atomic_helper_cleanup_planes);
* don't pass the right state structures to the callbacks.
*
* Returns:
- *
* Returns 0 on success. Can return -ERESTARTSYS when @stall is true and the
* waiting for the previous commits has been interrupted.
*/
diff --git a/drivers/gpu/drm/drm_bridge.c b/drivers/gpu/drm/drm_bridge.c
index d44f055dbe3e..c6af46dd02bf 100644
--- a/drivers/gpu/drm/drm_bridge.c
+++ b/drivers/gpu/drm/drm_bridge.c
@@ -353,8 +353,13 @@ err_reset_bridge:
bridge->encoder = NULL;
list_del(&bridge->chain_node);
- DRM_ERROR("failed to attach bridge %pOF to encoder %s: %d\n",
- bridge->of_node, encoder->name, ret);
+ if (ret != -EPROBE_DEFER)
+ DRM_ERROR("failed to attach bridge %pOF to encoder %s: %d\n",
+ bridge->of_node, encoder->name, ret);
+ else
+ dev_err_probe(encoder->dev->dev, -EPROBE_DEFER,
+ "failed to attach bridge %pOF to encoder %s\n",
+ bridge->of_node, encoder->name);
return ret;
}
diff --git a/drivers/gpu/drm/drm_connector.c b/drivers/gpu/drm/drm_connector.c
index ab6ab7ff7ea8..fc35f47e2849 100644
--- a/drivers/gpu/drm/drm_connector.c
+++ b/drivers/gpu/drm/drm_connector.c
@@ -426,6 +426,8 @@ static void drm_connector_cleanup_action(struct drm_device *dev,
*
* The connector structure should be allocated with drmm_kzalloc().
*
+ * The @drm_connector_funcs.destroy hook must be NULL.
+ *
* Returns:
* Zero on success, error code on failure.
*/
@@ -474,6 +476,8 @@ EXPORT_SYMBOL(drmm_connector_init);
*
* The connector structure should be allocated with drmm_kzalloc().
*
+ * The @drm_connector_funcs.destroy hook must be NULL.
+ *
* Returns:
* Zero on success, error code on failure.
*/
@@ -2315,24 +2319,71 @@ EXPORT_SYMBOL(drm_mode_create_aspect_ratio_property);
* DOC: standard connector properties
*
* Colorspace:
- * This property helps select a suitable colorspace based on the sink
- * capability. Modern sink devices support wider gamut like BT2020.
- * This helps switch to BT2020 mode if the BT2020 encoded video stream
- * is being played by the user, same for any other colorspace. Thereby
- * giving a good visual experience to users.
- *
- * The expectation from userspace is that it should parse the EDID
- * and get supported colorspaces. Use this property and switch to the
- * one supported. Sink supported colorspaces should be retrieved by
- * userspace from EDID and driver will not explicitly expose them.
- *
- * Basically the expectation from userspace is:
- * - Set up CRTC DEGAMMA/CTM/GAMMA to convert to some sink
- * colorspace
- * - Set this new property to let the sink know what it
- * converted the CRTC output to.
- * - This property is just to inform sink what colorspace
- * source is trying to drive.
+ * This property is used to inform the driver about the color encoding
+ * user space configured the pixel operation properties to produce.
+ * The variants set the colorimetry, transfer characteristics, and which
+ * YCbCr conversion should be used when necessary.
+ * The transfer characteristics from HDR_OUTPUT_METADATA takes precedence
+ * over this property.
+ * User space always configures the pixel operation properties to produce
+ * full quantization range data (see the Broadcast RGB property).
+ *
+ * Drivers inform the sink about what colorimetry, transfer
+ * characteristics, YCbCr conversion, and quantization range to expect
+ * (this can depend on the output mode, output format and other
+ * properties). Drivers also convert the user space provided data to what
+ * the sink expects.
+ *
+ * User space has to check if the sink supports all of the possible
+ * colorimetries that the driver is allowed to pick by parsing the EDID.
+ *
+ * For historical reasons this property exposes a number of variants which
+ * result in undefined behavior.
+ *
+ * Default:
+ * The behavior is driver-specific.
+ *
+ * BT2020_RGB:
+ *
+ * BT2020_YCC:
+ * User space configures the pixel operation properties to produce
+ * RGB content with Rec. ITU-R BT.2020 colorimetry, Rec.
+ * ITU-R BT.2020 (Table 4, RGB) transfer characteristics and full
+ * quantization range.
+ * User space can use the HDR_OUTPUT_METADATA property to set the
+ * transfer characteristics to PQ (Rec. ITU-R BT.2100 Table 4) or
+ * HLG (Rec. ITU-R BT.2100 Table 5) in which case, user space
+ * configures pixel operation properties to produce content with
+ * the respective transfer characteristics.
+ * User space has to make sure the sink supports Rec.
+ * ITU-R BT.2020 R'G'B' and Rec. ITU-R BT.2020 Y'C'BC'R
+ * colorimetry.
+ * Drivers can configure the sink to use an RGB format, tell the
+ * sink to expect Rec. ITU-R BT.2020 R'G'B' colorimetry and convert
+ * to the appropriate quantization range.
+ * Drivers can configure the sink to use a YCbCr format, tell the
+ * sink to expect Rec. ITU-R BT.2020 Y'C'BC'R colorimetry, convert
+ * to YCbCr using the Rec. ITU-R BT.2020 non-constant luminance
+ * conversion matrix and convert to the appropriate quantization
+ * range.
+ * The variants BT2020_RGB and BT2020_YCC are equivalent and the
+ * driver chooses between RGB and YCbCr on its own.
+ *
+ * SMPTE_170M_YCC:
+ * BT709_YCC:
+ * XVYCC_601:
+ * XVYCC_709:
+ * SYCC_601:
+ * opYCC_601:
+ * opRGB:
+ * BT2020_CYCC:
+ * DCI-P3_RGB_D65:
+ * DCI-P3_RGB_Theater:
+ * RGB_WIDE_FIXED:
+ * RGB_WIDE_FLOAT:
+ *
+ * BT601_YCC:
+ * The behavior is undefined.
*
* Because between HDMI and DP have different colorspaces,
* drm_mode_create_hdmi_colorspace_property() is used for HDMI connector and
diff --git a/drivers/gpu/drm/drm_crtc_internal.h b/drivers/gpu/drm/drm_crtc_internal.h
index 1f73b8d6d750..89706aa8232f 100644
--- a/drivers/gpu/drm/drm_crtc_internal.h
+++ b/drivers/gpu/drm/drm_crtc_internal.h
@@ -315,4 +315,19 @@ drm_edid_load_firmware(struct drm_connector *connector)
}
#endif
+/* drm_panic.c */
+#ifdef CONFIG_DRM_PANIC
+bool drm_panic_is_enabled(struct drm_device *dev);
+void drm_panic_register(struct drm_device *dev);
+void drm_panic_unregister(struct drm_device *dev);
+void drm_panic_init(void);
+void drm_panic_exit(void);
+#else
+static inline bool drm_panic_is_enabled(struct drm_device *dev) { return false; }
+static inline void drm_panic_register(struct drm_device *dev) {}
+static inline void drm_panic_unregister(struct drm_device *dev) {}
+static inline void drm_panic_init(void) {}
+static inline void drm_panic_exit(void) {}
+#endif
+
#endif /* __DRM_CRTC_INTERNAL_H__ */
diff --git a/drivers/gpu/drm/drm_displayid.c b/drivers/gpu/drm/drm_displayid.c
index 9d01d762801f..b4fd43783c50 100644
--- a/drivers/gpu/drm/drm_displayid.c
+++ b/drivers/gpu/drm/drm_displayid.c
@@ -33,9 +33,6 @@ validate_displayid(const u8 *displayid, int length, int idx)
if (IS_ERR(base))
return base;
- DRM_DEBUG_KMS("base revision 0x%x, length %d, %d %d\n",
- base->rev, base->bytes, base->prod_id, base->ext_count);
-
/* +1 for DispID checksum */
dispid_length = sizeof(*base) + base->bytes + 1;
if (dispid_length > length - idx)
diff --git a/drivers/gpu/drm/drm_drv.c b/drivers/gpu/drm/drm_drv.c
index 93543071a500..ac30b0ec9d93 100644
--- a/drivers/gpu/drm/drm_drv.c
+++ b/drivers/gpu/drm/drm_drv.c
@@ -34,6 +34,7 @@
#include <linux/pseudo_fs.h>
#include <linux/slab.h>
#include <linux/srcu.h>
+#include <linux/xarray.h>
#include <drm/drm_accel.h>
#include <drm/drm_cache.h>
@@ -54,8 +55,7 @@ MODULE_AUTHOR("Gareth Hughes, Leif Delgass, José Fonseca, Jon Smirl");
MODULE_DESCRIPTION("DRM shared core routines");
MODULE_LICENSE("GPL and additional rights");
-static DEFINE_SPINLOCK(drm_minor_lock);
-static struct idr drm_minors_idr;
+DEFINE_XARRAY_ALLOC(drm_minors_xa);
/*
* If the drm core fails to init for whatever reason,
@@ -83,6 +83,18 @@ DEFINE_STATIC_SRCU(drm_unplug_srcu);
* registered and unregistered dynamically according to device-state.
*/
+static struct xarray *drm_minor_get_xa(enum drm_minor_type type)
+{
+ if (type == DRM_MINOR_PRIMARY || type == DRM_MINOR_RENDER)
+ return &drm_minors_xa;
+#if IS_ENABLED(CONFIG_DRM_ACCEL)
+ else if (type == DRM_MINOR_ACCEL)
+ return &accel_minors_xa;
+#endif
+ else
+ return ERR_PTR(-EOPNOTSUPP);
+}
+
static struct drm_minor **drm_minor_get_slot(struct drm_device *dev,
enum drm_minor_type type)
{
@@ -101,25 +113,31 @@ static struct drm_minor **drm_minor_get_slot(struct drm_device *dev,
static void drm_minor_alloc_release(struct drm_device *dev, void *data)
{
struct drm_minor *minor = data;
- unsigned long flags;
WARN_ON(dev != minor->dev);
put_device(minor->kdev);
- if (minor->type == DRM_MINOR_ACCEL) {
- accel_minor_remove(minor->index);
- } else {
- spin_lock_irqsave(&drm_minor_lock, flags);
- idr_remove(&drm_minors_idr, minor->index);
- spin_unlock_irqrestore(&drm_minor_lock, flags);
- }
+ xa_erase(drm_minor_get_xa(minor->type), minor->index);
}
+/*
+ * DRM used to support 64 devices, for backwards compatibility we need to maintain the
+ * minor allocation scheme where minors 0-63 are primary nodes, 64-127 are control nodes,
+ * and 128-191 are render nodes.
+ * After reaching the limit, we're allocating minors dynamically - first-come, first-serve.
+ * Accel nodes are using a distinct major, so the minors are allocated in continuous 0-MAX
+ * range.
+ */
+#define DRM_MINOR_LIMIT(t) ({ \
+ typeof(t) _t = (t); \
+ _t == DRM_MINOR_ACCEL ? XA_LIMIT(0, ACCEL_MAX_MINORS) : XA_LIMIT(64 * _t, 64 * _t + 63); \
+})
+#define DRM_EXTENDED_MINOR_LIMIT XA_LIMIT(192, (1 << MINORBITS) - 1)
+
static int drm_minor_alloc(struct drm_device *dev, enum drm_minor_type type)
{
struct drm_minor *minor;
- unsigned long flags;
int r;
minor = drmm_kzalloc(dev, sizeof(*minor), GFP_KERNEL);
@@ -129,25 +147,14 @@ static int drm_minor_alloc(struct drm_device *dev, enum drm_minor_type type)
minor->type = type;
minor->dev = dev;
- idr_preload(GFP_KERNEL);
- if (type == DRM_MINOR_ACCEL) {
- r = accel_minor_alloc();
- } else {
- spin_lock_irqsave(&drm_minor_lock, flags);
- r = idr_alloc(&drm_minors_idr,
- NULL,
- 64 * type,
- 64 * (type + 1),
- GFP_NOWAIT);
- spin_unlock_irqrestore(&drm_minor_lock, flags);
- }
- idr_preload_end();
-
+ r = xa_alloc(drm_minor_get_xa(type), &minor->index,
+ NULL, DRM_MINOR_LIMIT(type), GFP_KERNEL);
+ if (r == -EBUSY && (type == DRM_MINOR_PRIMARY || type == DRM_MINOR_RENDER))
+ r = xa_alloc(&drm_minors_xa, &minor->index,
+ NULL, DRM_EXTENDED_MINOR_LIMIT, GFP_KERNEL);
if (r < 0)
return r;
- minor->index = r;
-
r = drmm_add_action_or_reset(dev, drm_minor_alloc_release, minor);
if (r)
return r;
@@ -163,7 +170,7 @@ static int drm_minor_alloc(struct drm_device *dev, enum drm_minor_type type)
static int drm_minor_register(struct drm_device *dev, enum drm_minor_type type)
{
struct drm_minor *minor;
- unsigned long flags;
+ void *entry;
int ret;
DRM_DEBUG("\n");
@@ -186,13 +193,12 @@ static int drm_minor_register(struct drm_device *dev, enum drm_minor_type type)
goto err_debugfs;
/* replace NULL with @minor so lookups will succeed from now on */
- if (minor->type == DRM_MINOR_ACCEL) {
- accel_minor_replace(minor, minor->index);
- } else {
- spin_lock_irqsave(&drm_minor_lock, flags);
- idr_replace(&drm_minors_idr, minor, minor->index);
- spin_unlock_irqrestore(&drm_minor_lock, flags);
+ entry = xa_store(drm_minor_get_xa(type), minor->index, minor, GFP_KERNEL);
+ if (xa_is_err(entry)) {
+ ret = xa_err(entry);
+ goto err_debugfs;
}
+ WARN_ON(entry);
DRM_DEBUG("new minor registered %d\n", minor->index);
return 0;
@@ -205,20 +211,13 @@ err_debugfs:
static void drm_minor_unregister(struct drm_device *dev, enum drm_minor_type type)
{
struct drm_minor *minor;
- unsigned long flags;
minor = *drm_minor_get_slot(dev, type);
if (!minor || !device_is_registered(minor->kdev))
return;
/* replace @minor with NULL so lookups will fail from now on */
- if (minor->type == DRM_MINOR_ACCEL) {
- accel_minor_replace(NULL, minor->index);
- } else {
- spin_lock_irqsave(&drm_minor_lock, flags);
- idr_replace(&drm_minors_idr, NULL, minor->index);
- spin_unlock_irqrestore(&drm_minor_lock, flags);
- }
+ xa_store(drm_minor_get_xa(type), minor->index, NULL, GFP_KERNEL);
device_del(minor->kdev);
dev_set_drvdata(minor->kdev, NULL); /* safety belt */
@@ -234,16 +233,15 @@ static void drm_minor_unregister(struct drm_device *dev, enum drm_minor_type typ
* minor->dev pointer will stay valid! However, the device may get unplugged and
* unregistered while you hold the minor.
*/
-struct drm_minor *drm_minor_acquire(unsigned int minor_id)
+struct drm_minor *drm_minor_acquire(struct xarray *minor_xa, unsigned int minor_id)
{
struct drm_minor *minor;
- unsigned long flags;
- spin_lock_irqsave(&drm_minor_lock, flags);
- minor = idr_find(&drm_minors_idr, minor_id);
+ xa_lock(minor_xa);
+ minor = xa_load(minor_xa, minor_id);
if (minor)
drm_dev_get(minor->dev);
- spin_unlock_irqrestore(&drm_minor_lock, flags);
+ xa_unlock(minor_xa);
if (!minor) {
return ERR_PTR(-ENODEV);
@@ -1036,7 +1034,7 @@ static int drm_stub_open(struct inode *inode, struct file *filp)
DRM_DEBUG("\n");
- minor = drm_minor_acquire(iminor(inode));
+ minor = drm_minor_acquire(&drm_minors_xa, iminor(inode));
if (IS_ERR(minor))
return PTR_ERR(minor);
@@ -1067,11 +1065,12 @@ static const struct file_operations drm_stub_fops = {
static void drm_core_exit(void)
{
drm_privacy_screen_lookup_exit();
+ drm_panic_exit();
accel_core_exit();
unregister_chrdev(DRM_MAJOR, "drm");
debugfs_remove(drm_debugfs_root);
drm_sysfs_destroy();
- idr_destroy(&drm_minors_idr);
+ WARN_ON(!xa_empty(&drm_minors_xa));
drm_connector_ida_destroy();
}
@@ -1080,7 +1079,6 @@ static int __init drm_core_init(void)
int ret;
drm_connector_ida_init();
- idr_init(&drm_minors_idr);
drm_memcpy_init_early();
ret = drm_sysfs_init();
@@ -1099,6 +1097,8 @@ static int __init drm_core_init(void)
if (ret < 0)
goto error;
+ drm_panic_init();
+
drm_privacy_screen_lookup_init();
drm_core_init_complete = true;
diff --git a/drivers/gpu/drm/drm_edid.c b/drivers/gpu/drm/drm_edid.c
index f68a41eeb1fa..855beafb76ff 100644
--- a/drivers/gpu/drm/drm_edid.c
+++ b/drivers/gpu/drm/drm_edid.c
@@ -1817,7 +1817,7 @@ static int edid_block_tag(const void *_block)
static bool edid_block_is_zero(const void *edid)
{
- return !memchr_inv(edid, 0, EDID_LENGTH);
+ return mem_is_zero(edid, EDID_LENGTH);
}
static bool drm_edid_eq(const struct drm_edid *drm_edid,
@@ -1966,22 +1966,14 @@ static void edid_block_dump(const char *level, const void *block, int block_num)
block, EDID_LENGTH, false);
}
-/**
- * drm_edid_block_valid - Sanity check the EDID block (base or extension)
- * @_block: pointer to raw EDID block
- * @block_num: type of block to validate (0 for base, extension otherwise)
- * @print_bad_edid: if true, dump bad EDID blocks to the console
- * @edid_corrupt: if true, the header or checksum is invalid
- *
+/*
* Validate a base or extension EDID block and optionally dump bad blocks to
* the console.
- *
- * Return: True if the block is valid, false otherwise.
*/
-bool drm_edid_block_valid(u8 *_block, int block_num, bool print_bad_edid,
- bool *edid_corrupt)
+static bool drm_edid_block_valid(void *_block, int block_num, bool print_bad_edid,
+ bool *edid_corrupt)
{
- struct edid *block = (struct edid *)_block;
+ struct edid *block = _block;
enum edid_block_status status;
bool is_base_block = block_num == 0;
bool valid;
@@ -2024,7 +2016,6 @@ bool drm_edid_block_valid(u8 *_block, int block_num, bool print_bad_edid,
return valid;
}
-EXPORT_SYMBOL(drm_edid_block_valid);
/**
* drm_edid_is_valid - sanity check EDID data
@@ -6629,6 +6620,11 @@ static void update_displayid_info(struct drm_connector *connector,
displayid_iter_edid_begin(drm_edid, &iter);
displayid_iter_for_each(block, &iter) {
+ drm_dbg_kms(connector->dev,
+ "[CONNECTOR:%d:%s] DisplayID extension version 0x%02x, primary use 0x%02x\n",
+ connector->base.id, connector->name,
+ displayid_version(&iter),
+ displayid_primary_use(&iter));
if (displayid_version(&iter) == DISPLAY_ID_STRUCTURE_VER_20 &&
(displayid_primary_use(&iter) == PRIMARY_USE_HEAD_MOUNTED_VR ||
displayid_primary_use(&iter) == PRIMARY_USE_HEAD_MOUNTED_AR))
diff --git a/drivers/gpu/drm/drm_exec.c b/drivers/gpu/drm/drm_exec.c
index 2da094bdf8a4..18e366cc4993 100644
--- a/drivers/gpu/drm/drm_exec.c
+++ b/drivers/gpu/drm/drm_exec.c
@@ -145,8 +145,7 @@ static int drm_exec_obj_locked(struct drm_exec *exec,
size_t size = exec->max_objects * sizeof(void *);
void *tmp;
- tmp = kvrealloc(exec->objects, size, size + PAGE_SIZE,
- GFP_KERNEL);
+ tmp = kvrealloc(exec->objects, size + PAGE_SIZE, GFP_KERNEL);
if (!tmp)
return -ENOMEM;
diff --git a/drivers/gpu/drm/drm_fb_helper.c b/drivers/gpu/drm/drm_fb_helper.c
index 56ac37ea2f27..29c53f9f449c 100644
--- a/drivers/gpu/drm/drm_fb_helper.c
+++ b/drivers/gpu/drm/drm_fb_helper.c
@@ -44,6 +44,7 @@
#include <drm/drm_vblank.h>
#include "drm_internal.h"
+#include "drm_crtc_internal.h"
static bool drm_fbdev_emulation = true;
module_param_named(fbdev_emulation, drm_fbdev_emulation, bool, 0600);
@@ -88,14 +89,6 @@ static DEFINE_MUTEX(kernel_fb_helper_lock);
* interfaces. Drivers that use one of the shared memory managers, TTM, SHMEM,
* DMA, should instead use the corresponding fbdev emulation.
*
- * Existing fbdev implementations should restore the fbdev console by using
- * drm_fb_helper_lastclose() as their &drm_driver.lastclose callback.
- * They should also notify the fb helper code from updates to the output
- * configuration by using drm_fb_helper_output_poll_changed() as their
- * &drm_mode_config_funcs.output_poll_changed callback. New implementations
- * of fbdev should be build on top of struct &drm_client_funcs, which handles
- * this automatically. Setting the old callbacks should be avoided.
- *
* For suspend/resume consider using drm_mode_config_helper_suspend() and
* drm_mode_config_helper_resume() which takes care of fbdev as well.
*
@@ -259,12 +252,12 @@ __drm_fb_helper_restore_fbdev_mode_unlocked(struct drm_fb_helper *fb_helper,
* drm_fb_helper_restore_fbdev_mode_unlocked - restore fbdev configuration
* @fb_helper: driver-allocated fbdev helper, can be NULL
*
- * This should be called from driver's drm &drm_driver.lastclose callback
- * when implementing an fbcon on top of kms using this helper. This ensures that
- * the user isn't greeted with a black screen when e.g. X dies.
+ * This helper should be called from fbdev emulation's &drm_client_funcs.restore
+ * callback. It ensures that the user isn't greeted with a black screen when the
+ * userspace compositor releases the display device.
*
- * RETURNS:
- * Zero if everything went ok, negative error code otherwise.
+ * Returns:
+ * 0 on success, or a negative errno code otherwise.
*/
int drm_fb_helper_restore_fbdev_mode_unlocked(struct drm_fb_helper *fb_helper)
{
@@ -527,6 +520,7 @@ struct fb_info *drm_fb_helper_alloc_info(struct drm_fb_helper *fb_helper)
fb_helper->info = info;
info->skip_vt_switch = true;
+ info->skip_panic = drm_panic_is_enabled(fb_helper->dev);
return info;
err_release:
@@ -2001,26 +1995,11 @@ EXPORT_SYMBOL(drm_fb_helper_hotplug_event);
* drm_fb_helper_lastclose - DRM driver lastclose helper for fbdev emulation
* @dev: DRM device
*
- * This function can be used as the &drm_driver->lastclose callback for drivers
- * that only need to call drm_fb_helper_restore_fbdev_mode_unlocked().
+ * This function is obsolete. Call drm_fb_helper_restore_fbdev_mode_unlocked()
+ * instead.
*/
void drm_fb_helper_lastclose(struct drm_device *dev)
{
drm_fb_helper_restore_fbdev_mode_unlocked(dev->fb_helper);
}
EXPORT_SYMBOL(drm_fb_helper_lastclose);
-
-/**
- * drm_fb_helper_output_poll_changed - DRM mode config \.output_poll_changed
- * helper for fbdev emulation
- * @dev: DRM device
- *
- * This function can be used as the
- * &drm_mode_config_funcs.output_poll_changed callback for drivers that only
- * need to call drm_fbdev.hotplug_event().
- */
-void drm_fb_helper_output_poll_changed(struct drm_device *dev)
-{
- drm_fb_helper_hotplug_event(dev->fb_helper);
-}
-EXPORT_SYMBOL(drm_fb_helper_output_poll_changed);
diff --git a/drivers/gpu/drm/drm_file.c b/drivers/gpu/drm/drm_file.c
index f8de3cba1a08..ad1dc638c83b 100644
--- a/drivers/gpu/drm/drm_file.c
+++ b/drivers/gpu/drm/drm_file.c
@@ -38,6 +38,7 @@
#include <linux/pci.h>
#include <linux/poll.h>
#include <linux/slab.h>
+#include <linux/vga_switcheroo.h>
#include <drm/drm_client.h>
#include <drm/drm_drv.h>
@@ -62,15 +63,6 @@ bool drm_dev_needs_global_mutex(struct drm_device *dev)
if (dev->driver->load || dev->driver->unload)
return true;
- /*
- * Drivers with the lastclose callback assume that it's synchronized
- * against concurrent opens, which again needs the BKL. The proper fix
- * is to use the drm_client infrastructure with proper locking for each
- * client.
- */
- if (dev->driver->lastclose)
- return true;
-
return false;
}
@@ -111,7 +103,6 @@ bool drm_dev_needs_global_mutex(struct drm_device *dev)
* .compat_ioctl = drm_compat_ioctl, // NULL if CONFIG_COMPAT=n
* .poll = drm_poll,
* .read = drm_read,
- * .llseek = no_llseek,
* .mmap = drm_gem_mmap,
* };
*
@@ -356,7 +347,6 @@ int drm_open_helper(struct file *filp, struct drm_minor *minor)
* resources for it. It also calls the &drm_driver.open driver callback.
*
* RETURNS:
- *
* 0 on success or negative errno value on failure.
*/
int drm_open(struct inode *inode, struct file *filp)
@@ -365,7 +355,7 @@ int drm_open(struct inode *inode, struct file *filp)
struct drm_minor *minor;
int retcode;
- minor = drm_minor_acquire(iminor(inode));
+ minor = drm_minor_acquire(&drm_minors_xa, iminor(inode));
if (IS_ERR(minor))
return PTR_ERR(minor);
@@ -396,15 +386,12 @@ err_undo:
}
EXPORT_SYMBOL(drm_open);
-void drm_lastclose(struct drm_device * dev)
+static void drm_lastclose(struct drm_device *dev)
{
- drm_dbg_core(dev, "\n");
-
- if (dev->driver->lastclose)
- dev->driver->lastclose(dev);
- drm_dbg_core(dev, "driver lastclose completed\n");
-
drm_client_dev_restore(dev);
+
+ if (dev_is_pci(dev->dev))
+ vga_switcheroo_process_delayed_switch();
}
/**
@@ -413,12 +400,11 @@ void drm_lastclose(struct drm_device * dev)
* @filp: file pointer.
*
* This function must be used by drivers as their &file_operations.release
- * method. It frees any resources associated with the open file, and calls the
- * &drm_driver.postclose driver callback. If this is the last open file for the
- * DRM device also proceeds to call the &drm_driver.lastclose driver callback.
+ * method. It frees any resources associated with the open file. If this
+ * is the last open file for the DRM device, it also restores the active
+ * in-kernel DRM client.
*
* RETURNS:
- *
* Always succeeds and returns 0.
*/
int drm_release(struct inode *inode, struct file *filp)
@@ -485,12 +471,10 @@ void drm_file_update_pid(struct drm_file *filp)
*
* This function may be used by drivers as their &file_operations.release
* method. It frees any resources associated with the open file prior to taking
- * the drm_global_mutex, which then calls the &drm_driver.postclose driver
- * callback. If this is the last open file for the DRM device also proceeds to
- * call the &drm_driver.lastclose driver callback.
+ * the drm_global_mutex. If this is the last open file for the DRM device, it
+ * then restores the active in-kernel DRM client.
*
* RETURNS:
- *
* Always succeeds and returns 0.
*/
int drm_release_noglobal(struct inode *inode, struct file *filp)
@@ -533,7 +517,6 @@ EXPORT_SYMBOL(drm_release_noglobal);
* safety.
*
* RETURNS:
- *
* Number of bytes read (always aligned to full events, and can be 0) or a
* negative error code on failure.
*/
@@ -619,7 +602,6 @@ EXPORT_SYMBOL(drm_read);
* See also drm_read().
*
* RETURNS:
- *
* Mask of POLL flags indicating the current status of the file.
*/
__poll_t drm_poll(struct file *filp, struct poll_table_struct *wait)
@@ -657,7 +639,6 @@ EXPORT_SYMBOL(drm_poll);
* already hold &drm_device.event_lock.
*
* RETURNS:
- *
* 0 on success or a negative error code on failure.
*/
int drm_event_reserve_init_locked(struct drm_device *dev,
@@ -699,7 +680,6 @@ EXPORT_SYMBOL(drm_event_reserve_init_locked);
* drm_event_reserve_init_locked() instead.
*
* RETURNS:
- *
* 0 on success or a negative error code on failure.
*/
int drm_event_reserve_init(struct drm_device *dev,
diff --git a/drivers/gpu/drm/drm_gem.c b/drivers/gpu/drm/drm_gem.c
index d4bbc5d109c8..149b8e25da5b 100644
--- a/drivers/gpu/drm/drm_gem.c
+++ b/drivers/gpu/drm/drm_gem.c
@@ -689,7 +689,6 @@ static int objects_lookup(struct drm_file *filp, u32 *handle, int count,
* For a single handle lookup, use drm_gem_object_lookup().
*
* Returns:
- *
* @objs filled in with GEM object pointers. Returned GEM objects need to be
* released with drm_gem_object_put(). -ENOENT is returned on a lookup
* failure. 0 is returned on success.
@@ -737,12 +736,11 @@ EXPORT_SYMBOL(drm_gem_objects_lookup);
* @filp: DRM file private date
* @handle: userspace handle
*
- * Returns:
+ * If looking up an array of handles, use drm_gem_objects_lookup().
*
+ * Returns:
* A reference to the object named by the handle if such exists on @filp, NULL
* otherwise.
- *
- * If looking up an array of handles, use drm_gem_objects_lookup().
*/
struct drm_gem_object *
drm_gem_object_lookup(struct drm_file *filp, u32 handle)
@@ -763,7 +761,6 @@ EXPORT_SYMBOL(drm_gem_object_lookup);
* @timeout: timeout value in jiffies or zero to return immediately
*
* Returns:
- *
* Returns -ERESTARTSYS if interrupted, 0 if the wait timed out, or
* greater than 0 on success.
*/
diff --git a/drivers/gpu/drm/drm_internal.h b/drivers/gpu/drm/drm_internal.h
index 690505a1f7a5..1705bfc90b1e 100644
--- a/drivers/gpu/drm/drm_internal.h
+++ b/drivers/gpu/drm/drm_internal.h
@@ -53,7 +53,6 @@ extern struct mutex drm_global_mutex;
bool drm_dev_needs_global_mutex(struct drm_device *dev);
struct drm_file *drm_file_alloc(struct drm_minor *minor);
void drm_file_free(struct drm_file *file);
-void drm_lastclose(struct drm_device *dev);
#ifdef CONFIG_PCI
@@ -81,10 +80,6 @@ void drm_prime_destroy_file_private(struct drm_prime_file_private *prime_fpriv);
void drm_prime_remove_buf_handle(struct drm_prime_file_private *prime_fpriv,
uint32_t handle);
-/* drm_drv.c */
-struct drm_minor *drm_minor_acquire(unsigned int minor_id);
-void drm_minor_release(struct drm_minor *minor);
-
/* drm_managed.c */
void drm_managed_release(struct drm_device *dev);
void drmm_add_final_kfree(struct drm_device *dev, void *container);
diff --git a/drivers/gpu/drm/drm_mipi_dsi.c b/drivers/gpu/drm/drm_mipi_dsi.c
index 969cfd5a01ae..2bc3973d35a1 100644
--- a/drivers/gpu/drm/drm_mipi_dsi.c
+++ b/drivers/gpu/drm/drm_mipi_dsi.c
@@ -603,6 +603,8 @@ EXPORT_SYMBOL(mipi_dsi_shutdown_peripheral);
* mipi_dsi_turn_on_peripheral() - sends a Turn On Peripheral command
* @dsi: DSI peripheral device
*
+ * This function is deprecated. Use mipi_dsi_turn_on_peripheral_multi() instead.
+ *
* Return: 0 on success or a negative error code on failure.
*/
int mipi_dsi_turn_on_peripheral(struct mipi_dsi_device *dsi)
@@ -652,6 +654,7 @@ EXPORT_SYMBOL(mipi_dsi_set_maximum_return_packet_size);
* @pps_selector: Select PPS from the table of pre-stored or uploaded PPS entries
*
* Enable or disable Display Stream Compression on the peripheral.
+ * This function is deprecated. Use mipi_dsi_compression_mode_ext_multi() instead.
*
* Return: 0 on success or a negative error code on failure.
*/
@@ -703,6 +706,7 @@ EXPORT_SYMBOL(mipi_dsi_compression_mode);
* @pps: VESA DSC 1.1 Picture Parameter Set
*
* Transmit the VESA DSC 1.1 Picture Parameter Set to the peripheral.
+ * This function is deprecated. Use mipi_dsi_picture_parameter_set_multi() instead.
*
* Return: 0 on success or a negative error code on failure.
*/
@@ -1037,6 +1041,8 @@ EXPORT_SYMBOL(mipi_dsi_dcs_read);
* mipi_dsi_dcs_nop() - send DCS nop packet
* @dsi: DSI peripheral device
*
+ * This function is deprecated. Use mipi_dsi_dcs_nop_multi() instead.
+ *
* Return: 0 on success or a negative error code on failure.
*/
int mipi_dsi_dcs_nop(struct mipi_dsi_device *dsi)
@@ -1055,6 +1061,8 @@ EXPORT_SYMBOL(mipi_dsi_dcs_nop);
* mipi_dsi_dcs_soft_reset() - perform a software reset of the display module
* @dsi: DSI peripheral device
*
+ * This function is deprecated. Use mipi_dsi_dcs_soft_reset_multi() instead.
+ *
* Return: 0 on success or a negative error code on failure.
*/
int mipi_dsi_dcs_soft_reset(struct mipi_dsi_device *dsi)
@@ -1124,6 +1132,8 @@ EXPORT_SYMBOL(mipi_dsi_dcs_get_pixel_format);
* display module except interface communication
* @dsi: DSI peripheral device
*
+ * This function is deprecated. Use mipi_dsi_dcs_enter_sleep_mode_multi() instead.
+ *
* Return: 0 on success or a negative error code on failure.
*/
int mipi_dsi_dcs_enter_sleep_mode(struct mipi_dsi_device *dsi)
@@ -1143,6 +1153,8 @@ EXPORT_SYMBOL(mipi_dsi_dcs_enter_sleep_mode);
* module
* @dsi: DSI peripheral device
*
+ * This function is deprecated. Use mipi_dsi_dcs_exit_sleep_mode_multi() instead.
+ *
* Return: 0 on success or a negative error code on failure.
*/
int mipi_dsi_dcs_exit_sleep_mode(struct mipi_dsi_device *dsi)
@@ -1162,6 +1174,8 @@ EXPORT_SYMBOL(mipi_dsi_dcs_exit_sleep_mode);
* display device
* @dsi: DSI peripheral device
*
+ * This function is deprecated. Use mipi_dsi_dcs_set_display_off_multi() instead.
+ *
* Return: 0 on success or a negative error code on failure.
*/
int mipi_dsi_dcs_set_display_off(struct mipi_dsi_device *dsi)
@@ -1181,6 +1195,8 @@ EXPORT_SYMBOL(mipi_dsi_dcs_set_display_off);
* display device
* @dsi: DSI peripheral device
*
+ * This function is deprecated. Use mipi_dsi_dcs_set_display_on_multi() instead.
+ *
* Return: 0 on success or a negative error code on failure
*/
int mipi_dsi_dcs_set_display_on(struct mipi_dsi_device *dsi)
@@ -1202,6 +1218,9 @@ EXPORT_SYMBOL(mipi_dsi_dcs_set_display_on);
* @start: first column of frame memory
* @end: last column of frame memory
*
+ * This function is deprecated. Use mipi_dsi_dcs_set_column_address_multi()
+ * instead.
+ *
* Return: 0 on success or a negative error code on failure.
*/
int mipi_dsi_dcs_set_column_address(struct mipi_dsi_device *dsi, u16 start,
@@ -1226,6 +1245,9 @@ EXPORT_SYMBOL(mipi_dsi_dcs_set_column_address);
* @start: first page of frame memory
* @end: last page of frame memory
*
+ * This function is deprecated. Use mipi_dsi_dcs_set_page_address_multi()
+ * instead.
+ *
* Return: 0 on success or a negative error code on failure.
*/
int mipi_dsi_dcs_set_page_address(struct mipi_dsi_device *dsi, u16 start,
@@ -1268,6 +1290,8 @@ EXPORT_SYMBOL(mipi_dsi_dcs_set_tear_off);
* @dsi: DSI peripheral device
* @mode: the Tearing Effect Output Line mode
*
+ * This function is deprecated. Use mipi_dsi_dcs_set_tear_on_multi() instead.
+ *
* Return: 0 on success or a negative error code on failure
*/
int mipi_dsi_dcs_set_tear_on(struct mipi_dsi_device *dsi,
@@ -1291,6 +1315,9 @@ EXPORT_SYMBOL(mipi_dsi_dcs_set_tear_on);
* @dsi: DSI peripheral device
* @format: pixel format
*
+ * This function is deprecated. Use mipi_dsi_dcs_set_pixel_format_multi()
+ * instead.
+ *
* Return: 0 on success or a negative error code on failure.
*/
int mipi_dsi_dcs_set_pixel_format(struct mipi_dsi_device *dsi, u8 format)
@@ -1312,6 +1339,9 @@ EXPORT_SYMBOL(mipi_dsi_dcs_set_pixel_format);
* @dsi: DSI peripheral device
* @scanline: scanline to use as trigger
*
+ * This function is deprecated. Use mipi_dsi_dcs_set_tear_scanline_multi()
+ * instead.
+ *
* Return: 0 on success or a negative error code on failure
*/
int mipi_dsi_dcs_set_tear_scanline(struct mipi_dsi_device *dsi, u16 scanline)
@@ -1334,6 +1364,9 @@ EXPORT_SYMBOL(mipi_dsi_dcs_set_tear_scanline);
* @dsi: DSI peripheral device
* @brightness: brightness value
*
+ * This function is deprecated. Use mipi_dsi_dcs_set_display_brightness_multi()
+ * instead.
+ *
* Return: 0 on success or a negative error code on failure.
*/
int mipi_dsi_dcs_set_display_brightness(struct mipi_dsi_device *dsi,
@@ -1639,6 +1672,198 @@ void mipi_dsi_dcs_set_tear_on_multi(struct mipi_dsi_multi_context *ctx,
}
EXPORT_SYMBOL(mipi_dsi_dcs_set_tear_on_multi);
+/**
+ * mipi_dsi_turn_on_peripheral_multi() - sends a Turn On Peripheral command
+ * @ctx: Context for multiple DSI transactions
+ *
+ * Like mipi_dsi_turn_on_peripheral() but deals with errors in a way that
+ * makes it convenient to make several calls in a row.
+ */
+void mipi_dsi_turn_on_peripheral_multi(struct mipi_dsi_multi_context *ctx)
+{
+ struct mipi_dsi_device *dsi = ctx->dsi;
+ struct device *dev = &dsi->dev;
+ int ret;
+
+ if (ctx->accum_err)
+ return;
+
+ ret = mipi_dsi_turn_on_peripheral(dsi);
+ if (ret < 0) {
+ ctx->accum_err = ret;
+ dev_err(dev, "Failed to turn on peripheral: %d\n",
+ ctx->accum_err);
+ }
+}
+EXPORT_SYMBOL(mipi_dsi_turn_on_peripheral_multi);
+
+/**
+ * mipi_dsi_dcs_soft_reset_multi() - perform a software reset of the display module
+ * @ctx: Context for multiple DSI transactions
+ *
+ * Like mipi_dsi_dcs_soft_reset() but deals with errors in a way that
+ * makes it convenient to make several calls in a row.
+ */
+void mipi_dsi_dcs_soft_reset_multi(struct mipi_dsi_multi_context *ctx)
+{
+ struct mipi_dsi_device *dsi = ctx->dsi;
+ struct device *dev = &dsi->dev;
+ int ret;
+
+ if (ctx->accum_err)
+ return;
+
+ ret = mipi_dsi_dcs_soft_reset(dsi);
+ if (ret < 0) {
+ ctx->accum_err = ret;
+ dev_err(dev, "Failed to mipi_dsi_dcs_soft_reset: %d\n",
+ ctx->accum_err);
+ }
+}
+EXPORT_SYMBOL(mipi_dsi_dcs_soft_reset_multi);
+
+/**
+ * mipi_dsi_dcs_set_display_brightness_multi() - sets the brightness value of
+ * the display
+ * @ctx: Context for multiple DSI transactions
+ * @brightness: brightness value
+ *
+ * Like mipi_dsi_dcs_set_display_brightness() but deals with errors in a way that
+ * makes it convenient to make several calls in a row.
+ */
+void mipi_dsi_dcs_set_display_brightness_multi(struct mipi_dsi_multi_context *ctx,
+ u16 brightness)
+{
+ struct mipi_dsi_device *dsi = ctx->dsi;
+ struct device *dev = &dsi->dev;
+ int ret;
+
+ if (ctx->accum_err)
+ return;
+
+ ret = mipi_dsi_dcs_set_display_brightness(dsi, brightness);
+ if (ret < 0) {
+ ctx->accum_err = ret;
+ dev_err(dev, "Failed to write display brightness: %d\n",
+ ctx->accum_err);
+ }
+}
+EXPORT_SYMBOL(mipi_dsi_dcs_set_display_brightness_multi);
+
+/**
+ * mipi_dsi_dcs_set_pixel_format_multi() - sets the pixel format for the RGB image
+ * data used by the interface
+ * @ctx: Context for multiple DSI transactions
+ * @format: pixel format
+ *
+ * Like mipi_dsi_dcs_set_pixel_format() but deals with errors in a way that
+ * makes it convenient to make several calls in a row.
+ */
+void mipi_dsi_dcs_set_pixel_format_multi(struct mipi_dsi_multi_context *ctx,
+ u8 format)
+{
+ struct mipi_dsi_device *dsi = ctx->dsi;
+ struct device *dev = &dsi->dev;
+ int ret;
+
+ if (ctx->accum_err)
+ return;
+
+ ret = mipi_dsi_dcs_set_pixel_format(dsi, format);
+ if (ret < 0) {
+ ctx->accum_err = ret;
+ dev_err(dev, "Failed to set pixel format: %d\n",
+ ctx->accum_err);
+ }
+}
+EXPORT_SYMBOL(mipi_dsi_dcs_set_pixel_format_multi);
+
+/**
+ * mipi_dsi_dcs_set_column_address_multi() - define the column extent of the
+ * frame memory accessed by the host processor
+ * @ctx: Context for multiple DSI transactions
+ * @start: first column of frame memory
+ * @end: last column of frame memory
+ *
+ * Like mipi_dsi_dcs_set_column_address() but deals with errors in a way that
+ * makes it convenient to make several calls in a row.
+ */
+void mipi_dsi_dcs_set_column_address_multi(struct mipi_dsi_multi_context *ctx,
+ u16 start, u16 end)
+{
+ struct mipi_dsi_device *dsi = ctx->dsi;
+ struct device *dev = &dsi->dev;
+ int ret;
+
+ if (ctx->accum_err)
+ return;
+
+ ret = mipi_dsi_dcs_set_column_address(dsi, start, end);
+ if (ret < 0) {
+ ctx->accum_err = ret;
+ dev_err(dev, "Failed to set column address: %d\n",
+ ctx->accum_err);
+ }
+}
+EXPORT_SYMBOL(mipi_dsi_dcs_set_column_address_multi);
+
+/**
+ * mipi_dsi_dcs_set_page_address_multi() - define the page extent of the
+ * frame memory accessed by the host processor
+ * @ctx: Context for multiple DSI transactions
+ * @start: first page of frame memory
+ * @end: last page of frame memory
+ *
+ * Like mipi_dsi_dcs_set_page_address() but deals with errors in a way that
+ * makes it convenient to make several calls in a row.
+ */
+void mipi_dsi_dcs_set_page_address_multi(struct mipi_dsi_multi_context *ctx,
+ u16 start, u16 end)
+{
+ struct mipi_dsi_device *dsi = ctx->dsi;
+ struct device *dev = &dsi->dev;
+ int ret;
+
+ if (ctx->accum_err)
+ return;
+
+ ret = mipi_dsi_dcs_set_page_address(dsi, start, end);
+ if (ret < 0) {
+ ctx->accum_err = ret;
+ dev_err(dev, "Failed to set page address: %d\n",
+ ctx->accum_err);
+ }
+}
+EXPORT_SYMBOL(mipi_dsi_dcs_set_page_address_multi);
+
+/**
+ * mipi_dsi_dcs_set_tear_scanline_multi() - set the scanline to use as trigger for
+ * the Tearing Effect output signal of the display module
+ * @ctx: Context for multiple DSI transactions
+ * @scanline: scanline to use as trigger
+ *
+ * Like mipi_dsi_dcs_set_tear_scanline() but deals with errors in a way that
+ * makes it convenient to make several calls in a row.
+ */
+void mipi_dsi_dcs_set_tear_scanline_multi(struct mipi_dsi_multi_context *ctx,
+ u16 scanline)
+{
+ struct mipi_dsi_device *dsi = ctx->dsi;
+ struct device *dev = &dsi->dev;
+ int ret;
+
+ if (ctx->accum_err)
+ return;
+
+ ret = mipi_dsi_dcs_set_tear_scanline(dsi, scanline);
+ if (ret < 0) {
+ ctx->accum_err = ret;
+ dev_err(dev, "Failed to set tear scanline: %d\n",
+ ctx->accum_err);
+ }
+}
+EXPORT_SYMBOL(mipi_dsi_dcs_set_tear_scanline_multi);
+
static int mipi_dsi_drv_probe(struct device *dev)
{
struct mipi_dsi_driver *drv = to_mipi_dsi_driver(dev->driver);
diff --git a/drivers/gpu/drm/drm_mode_config.c b/drivers/gpu/drm/drm_mode_config.c
index 568972258222..37d2e0a4ef4b 100644
--- a/drivers/gpu/drm/drm_mode_config.c
+++ b/drivers/gpu/drm/drm_mode_config.c
@@ -456,6 +456,8 @@ int drmm_mode_config_init(struct drm_device *dev)
if (ret == -EDEADLK)
ret = drm_modeset_backoff(&modeset_ctx);
+ might_fault();
+
ww_acquire_init(&resv_ctx, &reservation_ww_class);
ret = dma_resv_lock(&resv, &resv_ctx);
if (ret == -EDEADLK)
diff --git a/drivers/gpu/drm/drm_modes.c b/drivers/gpu/drm/drm_modes.c
index 1a0890083aee..6ba167a33461 100644
--- a/drivers/gpu/drm/drm_modes.c
+++ b/drivers/gpu/drm/drm_modes.c
@@ -539,7 +539,6 @@ static int fill_analog_mode(struct drm_device *dev,
* to reach those resolutions.
*
* Returns:
- *
* A pointer to the mode, allocated with drm_mode_create(). Returns NULL
* on error.
*/
diff --git a/drivers/gpu/drm/drm_panel.c b/drivers/gpu/drm/drm_panel.c
index cfbe020de54e..19ab0a794add 100644
--- a/drivers/gpu/drm/drm_panel.c
+++ b/drivers/gpu/drm/drm_panel.c
@@ -161,6 +161,15 @@ int drm_panel_unprepare(struct drm_panel *panel)
if (!panel)
return -EINVAL;
+ /*
+ * If you are seeing the warning below it likely means one of two things:
+ * - Your panel driver incorrectly calls drm_panel_unprepare() in its
+ * shutdown routine. You should delete this.
+ * - You are using panel-edp or panel-simple and your DRM modeset
+ * driver's shutdown() callback happened after the panel's shutdown().
+ * In this case the warning is harmless though ideally you should
+ * figure out how to reverse the order of the shutdown() callbacks.
+ */
if (!panel->prepared) {
dev_warn(panel->dev, "Skipping unprepare of already unprepared panel\n");
return 0;
@@ -245,6 +254,15 @@ int drm_panel_disable(struct drm_panel *panel)
if (!panel)
return -EINVAL;
+ /*
+ * If you are seeing the warning below it likely means one of two things:
+ * - Your panel driver incorrectly calls drm_panel_disable() in its
+ * shutdown routine. You should delete this.
+ * - You are using panel-edp or panel-simple and your DRM modeset
+ * driver's shutdown() callback happened after the panel's shutdown().
+ * In this case the warning is harmless though ideally you should
+ * figure out how to reverse the order of the shutdown() callbacks.
+ */
if (!panel->enabled) {
dev_warn(panel->dev, "Skipping disable of already disabled panel\n");
return 0;
diff --git a/drivers/gpu/drm/drm_panic.c b/drivers/gpu/drm/drm_panic.c
index 948aed00595e..74412b7bf936 100644
--- a/drivers/gpu/drm/drm_panic.c
+++ b/drivers/gpu/drm/drm_panic.c
@@ -18,6 +18,8 @@
#include <linux/overflow.h>
#include <linux/printk.h>
#include <linux/types.h>
+#include <linux/utsname.h>
+#include <linux/zlib.h>
#include <drm/drm_drv.h>
#include <drm/drm_fourcc.h>
@@ -26,6 +28,9 @@
#include <drm/drm_panic.h>
#include <drm/drm_plane.h>
#include <drm/drm_print.h>
+#include <drm/drm_rect.h>
+
+#include "drm_crtc_internal.h"
MODULE_AUTHOR("Jocelyn Falempe");
MODULE_DESCRIPTION("DRM panic handler");
@@ -76,11 +81,15 @@ struct drm_panic_line {
#define PANIC_LINE(s) {.len = sizeof(s) - 1, .txt = s}
static struct drm_panic_line panic_msg[] = {
- PANIC_LINE("KERNEL PANIC !"),
+ PANIC_LINE("KERNEL PANIC!"),
PANIC_LINE(""),
PANIC_LINE("Please reboot your computer."),
+ PANIC_LINE(""),
+ PANIC_LINE(""), /* will be replaced by the panic description */
};
+static const size_t panic_msg_lines = ARRAY_SIZE(panic_msg);
+
static const struct drm_panic_line logo_ascii[] = {
PANIC_LINE(" .--. _"),
PANIC_LINE(" |o_o | | |"),
@@ -91,6 +100,8 @@ static const struct drm_panic_line logo_ascii[] = {
PANIC_LINE(" \\___)=(___/"),
};
+static const size_t logo_ascii_lines = ARRAY_SIZE(logo_ascii);
+
#if defined(CONFIG_LOGO) && !defined(MODULE)
static const struct linux_logo *logo_mono;
@@ -249,20 +260,20 @@ static bool drm_panic_is_pixel_fg(const u8 *sbuf8, unsigned int spitch, int x, i
static void drm_panic_blit16(struct iosys_map *dmap, unsigned int dpitch,
const u8 *sbuf8, unsigned int spitch,
unsigned int height, unsigned int width,
- u16 fg16)
+ unsigned int scale, u16 fg16)
{
unsigned int y, x;
for (y = 0; y < height; y++)
for (x = 0; x < width; x++)
- if (drm_panic_is_pixel_fg(sbuf8, spitch, x, y))
+ if (drm_panic_is_pixel_fg(sbuf8, spitch, x / scale, y / scale))
iosys_map_wr(dmap, y * dpitch + x * sizeof(u16), u16, fg16);
}
static void drm_panic_blit24(struct iosys_map *dmap, unsigned int dpitch,
const u8 *sbuf8, unsigned int spitch,
unsigned int height, unsigned int width,
- u32 fg32)
+ unsigned int scale, u32 fg32)
{
unsigned int y, x;
@@ -270,7 +281,7 @@ static void drm_panic_blit24(struct iosys_map *dmap, unsigned int dpitch,
for (x = 0; x < width; x++) {
u32 off = y * dpitch + x * 3;
- if (drm_panic_is_pixel_fg(sbuf8, spitch, x, y)) {
+ if (drm_panic_is_pixel_fg(sbuf8, spitch, x / scale, y / scale)) {
/* write blue-green-red to output in little endianness */
iosys_map_wr(dmap, off, u8, (fg32 & 0x000000FF) >> 0);
iosys_map_wr(dmap, off + 1, u8, (fg32 & 0x0000FF00) >> 8);
@@ -283,24 +294,25 @@ static void drm_panic_blit24(struct iosys_map *dmap, unsigned int dpitch,
static void drm_panic_blit32(struct iosys_map *dmap, unsigned int dpitch,
const u8 *sbuf8, unsigned int spitch,
unsigned int height, unsigned int width,
- u32 fg32)
+ unsigned int scale, u32 fg32)
{
unsigned int y, x;
for (y = 0; y < height; y++)
for (x = 0; x < width; x++)
- if (drm_panic_is_pixel_fg(sbuf8, spitch, x, y))
+ if (drm_panic_is_pixel_fg(sbuf8, spitch, x / scale, y / scale))
iosys_map_wr(dmap, y * dpitch + x * sizeof(u32), u32, fg32);
}
static void drm_panic_blit_pixel(struct drm_scanout_buffer *sb, struct drm_rect *clip,
- const u8 *sbuf8, unsigned int spitch, u32 fg_color)
+ const u8 *sbuf8, unsigned int spitch, unsigned int scale,
+ u32 fg_color)
{
unsigned int y, x;
for (y = 0; y < drm_rect_height(clip); y++)
for (x = 0; x < drm_rect_width(clip); x++)
- if (drm_panic_is_pixel_fg(sbuf8, spitch, x, y))
+ if (drm_panic_is_pixel_fg(sbuf8, spitch, x / scale, y / scale))
sb->set_pixel(sb, clip->x1 + x, clip->y1 + y, fg_color);
}
@@ -310,18 +322,22 @@ static void drm_panic_blit_pixel(struct drm_scanout_buffer *sb, struct drm_rect
* @clip: destination rectangle
* @sbuf8: source buffer, in monochrome format, 8 pixels per byte.
* @spitch: source pitch in bytes
+ * @scale: integer scale, source buffer is scale time smaller than destination
+ * rectangle
* @fg_color: foreground color, in destination format
*
* This can be used to draw a font character, which is a monochrome image, to a
* framebuffer in other supported format.
*/
static void drm_panic_blit(struct drm_scanout_buffer *sb, struct drm_rect *clip,
- const u8 *sbuf8, unsigned int spitch, u32 fg_color)
+ const u8 *sbuf8, unsigned int spitch,
+ unsigned int scale, u32 fg_color)
+
{
struct iosys_map map;
if (sb->set_pixel)
- return drm_panic_blit_pixel(sb, clip, sbuf8, spitch, fg_color);
+ return drm_panic_blit_pixel(sb, clip, sbuf8, spitch, scale, fg_color);
map = sb->map[0];
iosys_map_incr(&map, clip->y1 * sb->pitch[0] + clip->x1 * sb->format->cpp[0]);
@@ -329,15 +345,15 @@ static void drm_panic_blit(struct drm_scanout_buffer *sb, struct drm_rect *clip,
switch (sb->format->cpp[0]) {
case 2:
drm_panic_blit16(&map, sb->pitch[0], sbuf8, spitch,
- drm_rect_height(clip), drm_rect_width(clip), fg_color);
+ drm_rect_height(clip), drm_rect_width(clip), scale, fg_color);
break;
case 3:
drm_panic_blit24(&map, sb->pitch[0], sbuf8, spitch,
- drm_rect_height(clip), drm_rect_width(clip), fg_color);
+ drm_rect_height(clip), drm_rect_width(clip), scale, fg_color);
break;
case 4:
drm_panic_blit32(&map, sb->pitch[0], sbuf8, spitch,
- drm_rect_height(clip), drm_rect_width(clip), fg_color);
+ drm_rect_height(clip), drm_rect_width(clip), scale, fg_color);
break;
default:
WARN_ONCE(1, "Can't blit with pixel width %d\n", sb->format->cpp[0]);
@@ -477,39 +493,51 @@ static void draw_txt_rectangle(struct drm_scanout_buffer *sb,
for (j = 0; j < line_len; j++) {
src = get_char_bitmap(font, msg[i].txt[j], font_pitch);
rec.x2 = rec.x1 + font->width;
- drm_panic_blit(sb, &rec, src, font_pitch, color);
+ drm_panic_blit(sb, &rec, src, font_pitch, 1, color);
rec.x1 += font->width;
}
}
}
+static void drm_panic_logo_rect(struct drm_rect *rect, const struct font_desc *font)
+{
+ if (logo_mono) {
+ drm_rect_init(rect, 0, 0, logo_mono->width, logo_mono->height);
+ } else {
+ int logo_width = get_max_line_len(logo_ascii, logo_ascii_lines) * font->width;
+
+ drm_rect_init(rect, 0, 0, logo_width, logo_ascii_lines * font->height);
+ }
+}
+
+static void drm_panic_logo_draw(struct drm_scanout_buffer *sb, struct drm_rect *rect,
+ const struct font_desc *font, u32 fg_color)
+{
+ if (logo_mono)
+ drm_panic_blit(sb, rect, logo_mono->data,
+ DIV_ROUND_UP(drm_rect_width(rect), 8), 1, fg_color);
+ else
+ draw_txt_rectangle(sb, font, logo_ascii, logo_ascii_lines, false, rect,
+ fg_color);
+}
+
static void draw_panic_static_user(struct drm_scanout_buffer *sb)
{
- size_t msg_lines = ARRAY_SIZE(panic_msg);
- size_t logo_ascii_lines = ARRAY_SIZE(logo_ascii);
u32 fg_color = convert_from_xrgb8888(CONFIG_DRM_PANIC_FOREGROUND_COLOR, sb->format->format);
u32 bg_color = convert_from_xrgb8888(CONFIG_DRM_PANIC_BACKGROUND_COLOR, sb->format->format);
const struct font_desc *font = get_default_font(sb->width, sb->height, NULL, NULL);
struct drm_rect r_screen, r_logo, r_msg;
- unsigned int logo_width, logo_height;
+ unsigned int msg_width, msg_height;
if (!font)
return;
r_screen = DRM_RECT_INIT(0, 0, sb->width, sb->height);
+ drm_panic_logo_rect(&r_logo, font);
- if (logo_mono) {
- logo_width = logo_mono->width;
- logo_height = logo_mono->height;
- } else {
- logo_width = get_max_line_len(logo_ascii, logo_ascii_lines) * font->width;
- logo_height = logo_ascii_lines * font->height;
- }
-
- r_logo = DRM_RECT_INIT(0, 0, logo_width, logo_height);
- r_msg = DRM_RECT_INIT(0, 0,
- min(get_max_line_len(panic_msg, msg_lines) * font->width, sb->width),
- min(msg_lines * font->height, sb->height));
+ msg_width = min(get_max_line_len(panic_msg, panic_msg_lines) * font->width, sb->width);
+ msg_height = min(panic_msg_lines * font->height, sb->height);
+ r_msg = DRM_RECT_INIT(0, 0, msg_width, msg_height);
/* Center the panic message */
drm_rect_translate(&r_msg, (sb->width - r_msg.x2) / 2, (sb->height - r_msg.y2) / 2);
@@ -517,16 +545,10 @@ static void draw_panic_static_user(struct drm_scanout_buffer *sb)
/* Fill with the background color, and draw text on top */
drm_panic_fill(sb, &r_screen, bg_color);
- if ((r_msg.x1 >= logo_width || r_msg.y1 >= logo_height) &&
- logo_width <= sb->width && logo_height <= sb->height) {
- if (logo_mono)
- drm_panic_blit(sb, &r_logo, logo_mono->data, DIV_ROUND_UP(logo_width, 8),
- fg_color);
- else
- draw_txt_rectangle(sb, font, logo_ascii, logo_ascii_lines, false, &r_logo,
- fg_color);
- }
- draw_txt_rectangle(sb, font, panic_msg, msg_lines, true, &r_msg, fg_color);
+ if (!drm_rect_overlap(&r_logo, &r_msg))
+ drm_panic_logo_draw(sb, &r_logo, font, fg_color);
+
+ draw_txt_rectangle(sb, font, panic_msg, panic_msg_lines, true, &r_msg, fg_color);
}
/*
@@ -608,6 +630,233 @@ static void draw_panic_static_kmsg(struct drm_scanout_buffer *sb)
}
}
+#if defined(CONFIG_DRM_PANIC_SCREEN_QR_CODE)
+/*
+ * It is unwise to allocate memory in the panic callback, so the buffers are
+ * pre-allocated. Only 2 buffers and the zlib workspace are needed.
+ * Two buffers are enough, using the following buffer usage:
+ * 1) kmsg messages are dumped in buffer1
+ * 2) kmsg is zlib-compressed into buffer2
+ * 3) compressed kmsg is encoded as QR-code Numeric stream in buffer1
+ * 4) QR-code image is generated in buffer2
+ * The Max QR code size is V40, 177x177, 4071 bytes for image, 2956 bytes for
+ * data segments.
+ *
+ * Typically, ~7500 bytes of kmsg, are compressed into 2800 bytes, which fits in
+ * a V40 QR-code (177x177).
+ *
+ * If CONFIG_DRM_PANIC_SCREEN_QR_CODE_URL is not set, the kmsg data will be put
+ * directly in the QR code.
+ * 1) kmsg messages are dumped in buffer1
+ * 2) kmsg message is encoded as byte stream in buffer2
+ * 3) QR-code image is generated in buffer1
+ */
+
+static uint panic_qr_version = CONFIG_DRM_PANIC_SCREEN_QR_VERSION;
+module_param(panic_qr_version, uint, 0644);
+MODULE_PARM_DESC(panic_qr_version, "maximum version (size) of the QR code");
+
+#define MAX_QR_DATA 2956
+#define MAX_ZLIB_RATIO 3
+#define QR_BUFFER1_SIZE (MAX_ZLIB_RATIO * MAX_QR_DATA) /* Must also be > 4071 */
+#define QR_BUFFER2_SIZE 4096
+#define QR_MARGIN 4 /* 4 modules of foreground color around the qr code */
+
+/* Compression parameters */
+#define COMPR_LEVEL 6
+#define WINDOW_BITS 12
+#define MEM_LEVEL 4
+
+static char *qrbuf1;
+static char *qrbuf2;
+static struct z_stream_s stream;
+
+static void __init drm_panic_qr_init(void)
+{
+ qrbuf1 = kmalloc(QR_BUFFER1_SIZE, GFP_KERNEL);
+ qrbuf2 = kmalloc(QR_BUFFER2_SIZE, GFP_KERNEL);
+ stream.workspace = kmalloc(zlib_deflate_workspacesize(WINDOW_BITS, MEM_LEVEL),
+ GFP_KERNEL);
+}
+
+static void drm_panic_qr_exit(void)
+{
+ kfree(qrbuf1);
+ qrbuf1 = NULL;
+ kfree(qrbuf2);
+ qrbuf2 = NULL;
+ kfree(stream.workspace);
+ stream.workspace = NULL;
+}
+
+extern size_t drm_panic_qr_max_data_size(u8 version, size_t url_len);
+
+extern u8 drm_panic_qr_generate(const char *url, u8 *data, size_t data_len, size_t data_size,
+ u8 *tmp, size_t tmp_size);
+
+static int drm_panic_get_qr_code_url(u8 **qr_image)
+{
+ struct kmsg_dump_iter iter;
+ char url[256];
+ size_t kmsg_len, max_kmsg_size;
+ char *kmsg;
+ int max_qr_data_size, url_len;
+
+ url_len = snprintf(url, sizeof(url), CONFIG_DRM_PANIC_SCREEN_QR_CODE_URL "?a=%s&v=%s&zl=",
+ utsname()->machine, utsname()->release);
+
+ max_qr_data_size = drm_panic_qr_max_data_size(panic_qr_version, url_len);
+ max_kmsg_size = min(MAX_ZLIB_RATIO * max_qr_data_size, QR_BUFFER1_SIZE);
+
+ /* get kmsg to buffer 1 */
+ kmsg_dump_rewind(&iter);
+ kmsg_dump_get_buffer(&iter, false, qrbuf1, max_kmsg_size, &kmsg_len);
+
+ if (!kmsg_len)
+ return -ENODATA;
+ kmsg = qrbuf1;
+
+try_again:
+ if (zlib_deflateInit2(&stream, COMPR_LEVEL, Z_DEFLATED, WINDOW_BITS,
+ MEM_LEVEL, Z_DEFAULT_STRATEGY) != Z_OK)
+ return -EINVAL;
+
+ stream.next_in = kmsg;
+ stream.avail_in = kmsg_len;
+ stream.total_in = 0;
+ stream.next_out = qrbuf2;
+ stream.avail_out = QR_BUFFER2_SIZE;
+ stream.total_out = 0;
+
+ if (zlib_deflate(&stream, Z_FINISH) != Z_STREAM_END)
+ return -EINVAL;
+
+ if (zlib_deflateEnd(&stream) != Z_OK)
+ return -EINVAL;
+
+ if (stream.total_out > max_qr_data_size) {
+ /* too much data for the QR code, so skip the first line and try again */
+ kmsg = strchr(kmsg, '\n');
+ if (!kmsg)
+ return -EINVAL;
+ /* skip the first \n */
+ kmsg += 1;
+ kmsg_len = strlen(kmsg);
+ goto try_again;
+ }
+ *qr_image = qrbuf2;
+
+ /* generate qr code image in buffer2 */
+ return drm_panic_qr_generate(url, qrbuf2, stream.total_out, QR_BUFFER2_SIZE,
+ qrbuf1, QR_BUFFER1_SIZE);
+}
+
+static int drm_panic_get_qr_code_raw(u8 **qr_image)
+{
+ struct kmsg_dump_iter iter;
+ size_t kmsg_len;
+ size_t max_kmsg_size = min(drm_panic_qr_max_data_size(panic_qr_version, 0),
+ QR_BUFFER1_SIZE);
+
+ kmsg_dump_rewind(&iter);
+ kmsg_dump_get_buffer(&iter, false, qrbuf1, max_kmsg_size, &kmsg_len);
+ if (!kmsg_len)
+ return -ENODATA;
+
+ *qr_image = qrbuf1;
+ return drm_panic_qr_generate(NULL, qrbuf1, kmsg_len, QR_BUFFER1_SIZE,
+ qrbuf2, QR_BUFFER2_SIZE);
+}
+
+static int drm_panic_get_qr_code(u8 **qr_image)
+{
+ if (strlen(CONFIG_DRM_PANIC_SCREEN_QR_CODE_URL) > 0)
+ return drm_panic_get_qr_code_url(qr_image);
+ else
+ return drm_panic_get_qr_code_raw(qr_image);
+}
+
+/*
+ * Draw the panic message at the center of the screen, with a QR Code
+ */
+static int _draw_panic_static_qr_code(struct drm_scanout_buffer *sb)
+{
+ u32 fg_color = convert_from_xrgb8888(CONFIG_DRM_PANIC_FOREGROUND_COLOR, sb->format->format);
+ u32 bg_color = convert_from_xrgb8888(CONFIG_DRM_PANIC_BACKGROUND_COLOR, sb->format->format);
+ const struct font_desc *font = get_default_font(sb->width, sb->height, NULL, NULL);
+ struct drm_rect r_screen, r_logo, r_msg, r_qr, r_qr_canvas;
+ unsigned int max_qr_size, scale;
+ unsigned int msg_width, msg_height;
+ int qr_width, qr_canvas_width, qr_pitch, v_margin;
+ u8 *qr_image;
+
+ if (!font || !qrbuf1 || !qrbuf2 || !stream.workspace)
+ return -ENOMEM;
+
+ r_screen = DRM_RECT_INIT(0, 0, sb->width, sb->height);
+
+ drm_panic_logo_rect(&r_logo, font);
+
+ msg_width = min(get_max_line_len(panic_msg, panic_msg_lines) * font->width, sb->width);
+ msg_height = min(panic_msg_lines * font->height, sb->height);
+ r_msg = DRM_RECT_INIT(0, 0, msg_width, msg_height);
+
+ max_qr_size = min(3 * sb->width / 4, 3 * sb->height / 4);
+
+ qr_width = drm_panic_get_qr_code(&qr_image);
+ if (qr_width <= 0)
+ return -ENOSPC;
+
+ qr_canvas_width = qr_width + QR_MARGIN * 2;
+ scale = max_qr_size / qr_canvas_width;
+ /* QR code is not readable if not scaled at least by 2 */
+ if (scale < 2)
+ return -ENOSPC;
+
+ pr_debug("QR width %d and scale %d\n", qr_width, scale);
+ r_qr_canvas = DRM_RECT_INIT(0, 0, qr_canvas_width * scale, qr_canvas_width * scale);
+
+ v_margin = (sb->height - drm_rect_height(&r_qr_canvas) - drm_rect_height(&r_msg)) / 5;
+
+ drm_rect_translate(&r_qr_canvas, (sb->width - r_qr_canvas.x2) / 2, 2 * v_margin);
+ r_qr = DRM_RECT_INIT(r_qr_canvas.x1 + QR_MARGIN * scale, r_qr_canvas.y1 + QR_MARGIN * scale,
+ qr_width * scale, qr_width * scale);
+
+ /* Center the panic message */
+ drm_rect_translate(&r_msg, (sb->width - r_msg.x2) / 2,
+ 3 * v_margin + drm_rect_height(&r_qr_canvas));
+
+ /* Fill with the background color, and draw text on top */
+ drm_panic_fill(sb, &r_screen, bg_color);
+
+ if (!drm_rect_overlap(&r_logo, &r_msg) && !drm_rect_overlap(&r_logo, &r_qr))
+ drm_panic_logo_draw(sb, &r_logo, font, fg_color);
+
+ draw_txt_rectangle(sb, font, panic_msg, panic_msg_lines, true, &r_msg, fg_color);
+
+ /* Draw the qr code */
+ qr_pitch = DIV_ROUND_UP(qr_width, 8);
+ drm_panic_fill(sb, &r_qr_canvas, fg_color);
+ drm_panic_fill(sb, &r_qr, bg_color);
+ drm_panic_blit(sb, &r_qr, qr_image, qr_pitch, scale, fg_color);
+ return 0;
+}
+
+static void draw_panic_static_qr_code(struct drm_scanout_buffer *sb)
+{
+ if (_draw_panic_static_qr_code(sb))
+ draw_panic_static_user(sb);
+}
+#else
+static void draw_panic_static_qr_code(struct drm_scanout_buffer *sb)
+{
+ draw_panic_static_user(sb);
+}
+
+static void drm_panic_qr_init(void) {};
+static void drm_panic_qr_exit(void) {};
+#endif
+
/*
* drm_panic_is_format_supported()
* @format: a fourcc color code
@@ -626,12 +875,38 @@ static void draw_panic_dispatch(struct drm_scanout_buffer *sb)
{
if (!strcmp(drm_panic_screen, "kmsg")) {
draw_panic_static_kmsg(sb);
+ } else if (!strcmp(drm_panic_screen, "qr_code")) {
+ draw_panic_static_qr_code(sb);
} else {
draw_panic_static_user(sb);
}
}
-static void draw_panic_plane(struct drm_plane *plane)
+static void drm_panic_set_description(const char *description)
+{
+ u32 len;
+
+ if (description) {
+ struct drm_panic_line *desc_line = &panic_msg[panic_msg_lines - 1];
+
+ desc_line->txt = description;
+ len = strlen(description);
+ /* ignore the last newline character */
+ if (len && description[len - 1] == '\n')
+ len -= 1;
+ desc_line->len = len;
+ }
+}
+
+static void drm_panic_clear_description(void)
+{
+ struct drm_panic_line *desc_line = &panic_msg[panic_msg_lines - 1];
+
+ desc_line->len = 0;
+ desc_line->txt = NULL;
+}
+
+static void draw_panic_plane(struct drm_plane *plane, const char *description)
{
struct drm_scanout_buffer sb = { };
int ret;
@@ -640,6 +915,8 @@ static void draw_panic_plane(struct drm_plane *plane)
if (!drm_panic_trylock(plane->dev, flags))
return;
+ drm_panic_set_description(description);
+
ret = plane->helper_private->get_scanout_buffer(plane, &sb);
if (!ret && drm_panic_is_format_supported(sb.format)) {
@@ -647,6 +924,7 @@ static void draw_panic_plane(struct drm_plane *plane)
if (plane->helper_private->panic_flush)
plane->helper_private->panic_flush(plane);
}
+ drm_panic_clear_description();
drm_panic_unlock(plane->dev, flags);
}
@@ -655,12 +933,12 @@ static struct drm_plane *to_drm_plane(struct kmsg_dumper *kd)
return container_of(kd, struct drm_plane, kmsg_panic);
}
-static void drm_panic(struct kmsg_dumper *dumper, enum kmsg_dump_reason reason)
+static void drm_panic(struct kmsg_dumper *dumper, struct kmsg_dump_detail *detail)
{
struct drm_plane *plane = to_drm_plane(dumper);
- if (reason == KMSG_DUMP_PANIC)
- draw_panic_plane(plane);
+ if (detail->reason == KMSG_DUMP_PANIC)
+ draw_panic_plane(plane, detail->description);
}
@@ -680,7 +958,7 @@ static ssize_t debugfs_trigger_write(struct file *file, const char __user *user_
if (kstrtobool_from_user(user_buf, count, &run) == 0 && run) {
struct drm_plane *plane = file->private_data;
- draw_panic_plane(plane);
+ draw_panic_plane(plane, "Test from debugfs");
}
return count;
}
@@ -704,6 +982,26 @@ static void debugfs_register_plane(struct drm_plane *plane, int index) {}
#endif /* CONFIG_DRM_PANIC_DEBUG */
/**
+ * drm_panic_is_enabled
+ * @dev: the drm device that may supports drm_panic
+ *
+ * returns true if the drm device supports drm_panic
+ */
+bool drm_panic_is_enabled(struct drm_device *dev)
+{
+ struct drm_plane *plane;
+
+ if (!dev->mode_config.num_total_plane)
+ return false;
+
+ drm_for_each_plane(plane, dev)
+ if (plane->helper_private && plane->helper_private->get_scanout_buffer)
+ return true;
+ return false;
+}
+EXPORT_SYMBOL(drm_panic_is_enabled);
+
+/**
* drm_panic_register() - Initialize DRM panic for a device
* @dev: the drm device on which the panic screen will be displayed.
*/
@@ -730,7 +1028,6 @@ void drm_panic_register(struct drm_device *dev)
if (registered_plane)
drm_info(dev, "Registered %d planes with drm panic\n", registered_plane);
}
-EXPORT_SYMBOL(drm_panic_register);
/**
* drm_panic_unregister()
@@ -749,4 +1046,19 @@ void drm_panic_unregister(struct drm_device *dev)
kmsg_dump_unregister(&plane->kmsg_panic);
}
}
-EXPORT_SYMBOL(drm_panic_unregister);
+
+/**
+ * drm_panic_init() - initialize DRM panic.
+ */
+void __init drm_panic_init(void)
+{
+ drm_panic_qr_init();
+}
+
+/**
+ * drm_panic_exit() - Free the resources taken by drm_panic_exit()
+ */
+void drm_panic_exit(void)
+{
+ drm_panic_qr_exit();
+}
diff --git a/drivers/gpu/drm/drm_panic_qr.rs b/drivers/gpu/drm/drm_panic_qr.rs
new file mode 100644
index 000000000000..1ef56cb07dfb
--- /dev/null
+++ b/drivers/gpu/drm/drm_panic_qr.rs
@@ -0,0 +1,1003 @@
+// SPDX-License-Identifier: MIT
+
+//! This is a simple QR encoder for DRM panic.
+//!
+//! It is called from a panic handler, so it should't allocate memory and
+//! does all the work on the stack or on the provided buffers. For
+//! simplification, it only supports low error correction, and applies the
+//! first mask (checkerboard). It will draw the smallest QRcode that can
+//! contain the string passed as parameter. To get the most compact
+//! QR code, the start of the URL is encoded as binary, and the
+//! compressed kmsg is encoded as numeric.
+//!
+//! The binary data must be a valid URL parameter, so the easiest way is
+//! to use base64 encoding. But this wastes 25% of data space, so the
+//! whole stack trace won't fit in the QR code. So instead it encodes
+//! every 13bits of input into 4 decimal digits, and then uses the
+//! efficient numeric encoding, that encode 3 decimal digits into
+//! 10bits. This makes 39bits of compressed data into 12 decimal digits,
+//! into 40bits in the QR code, so wasting only 2.5%. And the numbers are
+//! valid URL parameter, so the website can do the reverse, to get the
+//! binary data.
+//!
+//! Inspired by these 3 projects, all under MIT license:
+//!
+//! * <https://github.com/kennytm/qrcode-rust>
+//! * <https://github.com/erwanvivien/fast_qr>
+//! * <https://github.com/bjguillot/qr>
+
+use core::cmp;
+use kernel::str::CStr;
+
+#[derive(Debug, Clone, Copy, PartialEq, Eq, Ord, PartialOrd)]
+struct Version(usize);
+
+// Generator polynomials for ECC, only those that are needed for low quality.
+const P7: [u8; 7] = [87, 229, 146, 149, 238, 102, 21];
+const P10: [u8; 10] = [251, 67, 46, 61, 118, 70, 64, 94, 32, 45];
+const P15: [u8; 15] = [
+ 8, 183, 61, 91, 202, 37, 51, 58, 58, 237, 140, 124, 5, 99, 105,
+];
+const P18: [u8; 18] = [
+ 215, 234, 158, 94, 184, 97, 118, 170, 79, 187, 152, 148, 252, 179, 5, 98, 96, 153,
+];
+const P20: [u8; 20] = [
+ 17, 60, 79, 50, 61, 163, 26, 187, 202, 180, 221, 225, 83, 239, 156, 164, 212, 212, 188, 190,
+];
+const P22: [u8; 22] = [
+ 210, 171, 247, 242, 93, 230, 14, 109, 221, 53, 200, 74, 8, 172, 98, 80, 219, 134, 160, 105,
+ 165, 231,
+];
+const P24: [u8; 24] = [
+ 229, 121, 135, 48, 211, 117, 251, 126, 159, 180, 169, 152, 192, 226, 228, 218, 111, 0, 117,
+ 232, 87, 96, 227, 21,
+];
+const P26: [u8; 26] = [
+ 173, 125, 158, 2, 103, 182, 118, 17, 145, 201, 111, 28, 165, 53, 161, 21, 245, 142, 13, 102,
+ 48, 227, 153, 145, 218, 70,
+];
+const P28: [u8; 28] = [
+ 168, 223, 200, 104, 224, 234, 108, 180, 110, 190, 195, 147, 205, 27, 232, 201, 21, 43, 245, 87,
+ 42, 195, 212, 119, 242, 37, 9, 123,
+];
+const P30: [u8; 30] = [
+ 41, 173, 145, 152, 216, 31, 179, 182, 50, 48, 110, 86, 239, 96, 222, 125, 42, 173, 226, 193,
+ 224, 130, 156, 37, 251, 216, 238, 40, 192, 180,
+];
+
+/// QR Code parameters for Low quality ECC:
+/// - Error Correction polynomial.
+/// - Number of blocks in group 1.
+/// - Number of blocks in group 2.
+/// - Block size in group 1.
+///
+/// (Block size in group 2 is one more than group 1).
+struct VersionParameter(&'static [u8], u8, u8, u8);
+const VPARAM: [VersionParameter; 40] = [
+ VersionParameter(&P7, 1, 0, 19), // V1
+ VersionParameter(&P10, 1, 0, 34), // V2
+ VersionParameter(&P15, 1, 0, 55), // V3
+ VersionParameter(&P20, 1, 0, 80), // V4
+ VersionParameter(&P26, 1, 0, 108), // V5
+ VersionParameter(&P18, 2, 0, 68), // V6
+ VersionParameter(&P20, 2, 0, 78), // V7
+ VersionParameter(&P24, 2, 0, 97), // V8
+ VersionParameter(&P30, 2, 0, 116), // V9
+ VersionParameter(&P18, 2, 2, 68), // V10
+ VersionParameter(&P20, 4, 0, 81), // V11
+ VersionParameter(&P24, 2, 2, 92), // V12
+ VersionParameter(&P26, 4, 0, 107), // V13
+ VersionParameter(&P30, 3, 1, 115), // V14
+ VersionParameter(&P22, 5, 1, 87), // V15
+ VersionParameter(&P24, 5, 1, 98), // V16
+ VersionParameter(&P28, 1, 5, 107), // V17
+ VersionParameter(&P30, 5, 1, 120), // V18
+ VersionParameter(&P28, 3, 4, 113), // V19
+ VersionParameter(&P28, 3, 5, 107), // V20
+ VersionParameter(&P28, 4, 4, 116), // V21
+ VersionParameter(&P28, 2, 7, 111), // V22
+ VersionParameter(&P30, 4, 5, 121), // V23
+ VersionParameter(&P30, 6, 4, 117), // V24
+ VersionParameter(&P26, 8, 4, 106), // V25
+ VersionParameter(&P28, 10, 2, 114), // V26
+ VersionParameter(&P30, 8, 4, 122), // V27
+ VersionParameter(&P30, 3, 10, 117), // V28
+ VersionParameter(&P30, 7, 7, 116), // V29
+ VersionParameter(&P30, 5, 10, 115), // V30
+ VersionParameter(&P30, 13, 3, 115), // V31
+ VersionParameter(&P30, 17, 0, 115), // V32
+ VersionParameter(&P30, 17, 1, 115), // V33
+ VersionParameter(&P30, 13, 6, 115), // V34
+ VersionParameter(&P30, 12, 7, 121), // V35
+ VersionParameter(&P30, 6, 14, 121), // V36
+ VersionParameter(&P30, 17, 4, 122), // V37
+ VersionParameter(&P30, 4, 18, 122), // V38
+ VersionParameter(&P30, 20, 4, 117), // V39
+ VersionParameter(&P30, 19, 6, 118), // V40
+];
+
+const MAX_EC_SIZE: usize = 30;
+const MAX_BLK_SIZE: usize = 123;
+
+/// Position of the alignment pattern grid.
+const ALIGNMENT_PATTERNS: [&[u8]; 40] = [
+ &[],
+ &[6, 18],
+ &[6, 22],
+ &[6, 26],
+ &[6, 30],
+ &[6, 34],
+ &[6, 22, 38],
+ &[6, 24, 42],
+ &[6, 26, 46],
+ &[6, 28, 50],
+ &[6, 30, 54],
+ &[6, 32, 58],
+ &[6, 34, 62],
+ &[6, 26, 46, 66],
+ &[6, 26, 48, 70],
+ &[6, 26, 50, 74],
+ &[6, 30, 54, 78],
+ &[6, 30, 56, 82],
+ &[6, 30, 58, 86],
+ &[6, 34, 62, 90],
+ &[6, 28, 50, 72, 94],
+ &[6, 26, 50, 74, 98],
+ &[6, 30, 54, 78, 102],
+ &[6, 28, 54, 80, 106],
+ &[6, 32, 58, 84, 110],
+ &[6, 30, 58, 86, 114],
+ &[6, 34, 62, 90, 118],
+ &[6, 26, 50, 74, 98, 122],
+ &[6, 30, 54, 78, 102, 126],
+ &[6, 26, 52, 78, 104, 130],
+ &[6, 30, 56, 82, 108, 134],
+ &[6, 34, 60, 86, 112, 138],
+ &[6, 30, 58, 86, 114, 142],
+ &[6, 34, 62, 90, 118, 146],
+ &[6, 30, 54, 78, 102, 126, 150],
+ &[6, 24, 50, 76, 102, 128, 154],
+ &[6, 28, 54, 80, 106, 132, 158],
+ &[6, 32, 58, 84, 110, 136, 162],
+ &[6, 26, 54, 82, 110, 138, 166],
+ &[6, 30, 58, 86, 114, 142, 170],
+];
+
+/// Version information for format V7-V40.
+const VERSION_INFORMATION: [u32; 34] = [
+ 0b00_0111_1100_1001_0100,
+ 0b00_1000_0101_1011_1100,
+ 0b00_1001_1010_1001_1001,
+ 0b00_1010_0100_1101_0011,
+ 0b00_1011_1011_1111_0110,
+ 0b00_1100_0111_0110_0010,
+ 0b00_1101_1000_0100_0111,
+ 0b00_1110_0110_0000_1101,
+ 0b00_1111_1001_0010_1000,
+ 0b01_0000_1011_0111_1000,
+ 0b01_0001_0100_0101_1101,
+ 0b01_0010_1010_0001_0111,
+ 0b01_0011_0101_0011_0010,
+ 0b01_0100_1001_1010_0110,
+ 0b01_0101_0110_1000_0011,
+ 0b01_0110_1000_1100_1001,
+ 0b01_0111_0111_1110_1100,
+ 0b01_1000_1110_1100_0100,
+ 0b01_1001_0001_1110_0001,
+ 0b01_1010_1111_1010_1011,
+ 0b01_1011_0000_1000_1110,
+ 0b01_1100_1100_0001_1010,
+ 0b01_1101_0011_0011_1111,
+ 0b01_1110_1101_0111_0101,
+ 0b01_1111_0010_0101_0000,
+ 0b10_0000_1001_1101_0101,
+ 0b10_0001_0110_1111_0000,
+ 0b10_0010_1000_1011_1010,
+ 0b10_0011_0111_1001_1111,
+ 0b10_0100_1011_0000_1011,
+ 0b10_0101_0100_0010_1110,
+ 0b10_0110_1010_0110_0100,
+ 0b10_0111_0101_0100_0001,
+ 0b10_1000_1100_0110_1001,
+];
+
+/// Format info for low quality ECC.
+const FORMAT_INFOS_QR_L: [u16; 8] = [
+ 0x77c4, 0x72f3, 0x7daa, 0x789d, 0x662f, 0x6318, 0x6c41, 0x6976,
+];
+
+impl Version {
+ /// Returns the smallest QR version than can hold these segments.
+ fn from_segments(segments: &[&Segment<'_>]) -> Option<Version> {
+ for v in (1..=40).map(|k| Version(k)) {
+ if v.max_data() * 8 >= segments.iter().map(|s| s.total_size_bits(v)).sum() {
+ return Some(v);
+ }
+ }
+ None
+ }
+
+ fn width(&self) -> u8 {
+ (self.0 as u8) * 4 + 17
+ }
+
+ fn max_data(&self) -> usize {
+ self.g1_blk_size() * self.g1_blocks() + (self.g1_blk_size() + 1) * self.g2_blocks()
+ }
+
+ fn ec_size(&self) -> usize {
+ VPARAM[self.0 - 1].0.len()
+ }
+
+ fn g1_blocks(&self) -> usize {
+ VPARAM[self.0 - 1].1 as usize
+ }
+
+ fn g2_blocks(&self) -> usize {
+ VPARAM[self.0 - 1].2 as usize
+ }
+
+ fn g1_blk_size(&self) -> usize {
+ VPARAM[self.0 - 1].3 as usize
+ }
+
+ fn alignment_pattern(&self) -> &'static [u8] {
+ &ALIGNMENT_PATTERNS[self.0 - 1]
+ }
+
+ fn poly(&self) -> &'static [u8] {
+ VPARAM[self.0 - 1].0
+ }
+
+ fn version_info(&self) -> u32 {
+ if *self >= Version(7) {
+ VERSION_INFORMATION[self.0 - 7]
+ } else {
+ 0
+ }
+ }
+}
+
+/// Exponential table for Galois Field GF(256).
+const EXP_TABLE: [u8; 256] = [
+ 1, 2, 4, 8, 16, 32, 64, 128, 29, 58, 116, 232, 205, 135, 19, 38, 76, 152, 45, 90, 180, 117,
+ 234, 201, 143, 3, 6, 12, 24, 48, 96, 192, 157, 39, 78, 156, 37, 74, 148, 53, 106, 212, 181,
+ 119, 238, 193, 159, 35, 70, 140, 5, 10, 20, 40, 80, 160, 93, 186, 105, 210, 185, 111, 222, 161,
+ 95, 190, 97, 194, 153, 47, 94, 188, 101, 202, 137, 15, 30, 60, 120, 240, 253, 231, 211, 187,
+ 107, 214, 177, 127, 254, 225, 223, 163, 91, 182, 113, 226, 217, 175, 67, 134, 17, 34, 68, 136,
+ 13, 26, 52, 104, 208, 189, 103, 206, 129, 31, 62, 124, 248, 237, 199, 147, 59, 118, 236, 197,
+ 151, 51, 102, 204, 133, 23, 46, 92, 184, 109, 218, 169, 79, 158, 33, 66, 132, 21, 42, 84, 168,
+ 77, 154, 41, 82, 164, 85, 170, 73, 146, 57, 114, 228, 213, 183, 115, 230, 209, 191, 99, 198,
+ 145, 63, 126, 252, 229, 215, 179, 123, 246, 241, 255, 227, 219, 171, 75, 150, 49, 98, 196, 149,
+ 55, 110, 220, 165, 87, 174, 65, 130, 25, 50, 100, 200, 141, 7, 14, 28, 56, 112, 224, 221, 167,
+ 83, 166, 81, 162, 89, 178, 121, 242, 249, 239, 195, 155, 43, 86, 172, 69, 138, 9, 18, 36, 72,
+ 144, 61, 122, 244, 245, 247, 243, 251, 235, 203, 139, 11, 22, 44, 88, 176, 125, 250, 233, 207,
+ 131, 27, 54, 108, 216, 173, 71, 142, 1,
+];
+
+/// Reverse exponential table for Galois Field GF(256).
+const LOG_TABLE: [u8; 256] = [
+ 175, 0, 1, 25, 2, 50, 26, 198, 3, 223, 51, 238, 27, 104, 199, 75, 4, 100, 224, 14, 52, 141,
+ 239, 129, 28, 193, 105, 248, 200, 8, 76, 113, 5, 138, 101, 47, 225, 36, 15, 33, 53, 147, 142,
+ 218, 240, 18, 130, 69, 29, 181, 194, 125, 106, 39, 249, 185, 201, 154, 9, 120, 77, 228, 114,
+ 166, 6, 191, 139, 98, 102, 221, 48, 253, 226, 152, 37, 179, 16, 145, 34, 136, 54, 208, 148,
+ 206, 143, 150, 219, 189, 241, 210, 19, 92, 131, 56, 70, 64, 30, 66, 182, 163, 195, 72, 126,
+ 110, 107, 58, 40, 84, 250, 133, 186, 61, 202, 94, 155, 159, 10, 21, 121, 43, 78, 212, 229, 172,
+ 115, 243, 167, 87, 7, 112, 192, 247, 140, 128, 99, 13, 103, 74, 222, 237, 49, 197, 254, 24,
+ 227, 165, 153, 119, 38, 184, 180, 124, 17, 68, 146, 217, 35, 32, 137, 46, 55, 63, 209, 91, 149,
+ 188, 207, 205, 144, 135, 151, 178, 220, 252, 190, 97, 242, 86, 211, 171, 20, 42, 93, 158, 132,
+ 60, 57, 83, 71, 109, 65, 162, 31, 45, 67, 216, 183, 123, 164, 118, 196, 23, 73, 236, 127, 12,
+ 111, 246, 108, 161, 59, 82, 41, 157, 85, 170, 251, 96, 134, 177, 187, 204, 62, 90, 203, 89, 95,
+ 176, 156, 169, 160, 81, 11, 245, 22, 235, 122, 117, 44, 215, 79, 174, 213, 233, 230, 231, 173,
+ 232, 116, 214, 244, 234, 168, 80, 88, 175,
+];
+
+// 4 bits segment header.
+const MODE_STOP: u16 = 0;
+const MODE_NUMERIC: u16 = 1;
+const MODE_BINARY: u16 = 4;
+/// Padding bytes.
+const PADDING: [u8; 2] = [236, 17];
+
+/// Get the next 13 bits of data, starting at specified offset (in bits).
+fn get_next_13b(data: &[u8], offset: usize) -> Option<(u16, usize)> {
+ if offset < data.len() * 8 {
+ let size = cmp::min(13, data.len() * 8 - offset);
+ let byte_off = offset / 8;
+ let bit_off = offset % 8;
+ // `b` is 20 at max (`bit_off` <= 7 and `size` <= 13).
+ let b = (bit_off + size) as u16;
+
+ let first_byte = (data[byte_off] << bit_off >> bit_off) as u16;
+
+ let number = match b {
+ 0..=8 => first_byte >> (8 - b),
+ 9..=16 => (first_byte << (b - 8)) + (data[byte_off + 1] >> (16 - b)) as u16,
+ _ => {
+ (first_byte << (b - 8))
+ + ((data[byte_off + 1] as u16) << (b - 16))
+ + (data[byte_off + 2] >> (24 - b)) as u16
+ }
+ };
+ Some((number, size))
+ } else {
+ None
+ }
+}
+
+/// Number of bits to encode characters in numeric mode.
+const NUM_CHARS_BITS: [usize; 4] = [0, 4, 7, 10];
+const POW10: [u16; 4] = [1, 10, 100, 1000];
+
+enum Segment<'a> {
+ Numeric(&'a [u8]),
+ Binary(&'a [u8]),
+}
+
+impl Segment<'_> {
+ fn get_header(&self) -> (u16, usize) {
+ match self {
+ Segment::Binary(_) => (MODE_BINARY, 4),
+ Segment::Numeric(_) => (MODE_NUMERIC, 4),
+ }
+ }
+
+ // Returns the size of the length field in bits, depending on QR Version.
+ fn length_bits_count(&self, version: Version) -> usize {
+ let Version(v) = version;
+ match self {
+ Segment::Binary(_) => match v {
+ 1..=9 => 8,
+ _ => 16,
+ },
+ Segment::Numeric(_) => match v {
+ 1..=9 => 10,
+ 10..=26 => 12,
+ _ => 14,
+ },
+ }
+ }
+
+ // Number of characters in the segment.
+ fn character_count(&self) -> usize {
+ match self {
+ Segment::Binary(data) => data.len(),
+ Segment::Numeric(data) => {
+ let data_bits = data.len() * 8;
+ let last_chars = match data_bits % 13 {
+ 1 => 1,
+ k => (k + 1) / 3,
+ };
+ // 4 decimal numbers per 13bits + remainder.
+ 4 * (data_bits / 13) + last_chars
+ }
+ }
+ }
+
+ fn get_length_field(&self, version: Version) -> (u16, usize) {
+ (
+ self.character_count() as u16,
+ self.length_bits_count(version),
+ )
+ }
+
+ fn total_size_bits(&self, version: Version) -> usize {
+ let data_size = match self {
+ Segment::Binary(data) => data.len() * 8,
+ Segment::Numeric(_) => {
+ let digits = self.character_count();
+ 10 * (digits / 3) + NUM_CHARS_BITS[digits % 3]
+ }
+ };
+ // header + length + data.
+ 4 + self.length_bits_count(version) + data_size
+ }
+
+ fn iter(&self) -> SegmentIterator<'_> {
+ SegmentIterator {
+ segment: self,
+ offset: 0,
+ carry: 0,
+ carry_len: 0,
+ }
+ }
+}
+
+struct SegmentIterator<'a> {
+ segment: &'a Segment<'a>,
+ offset: usize,
+ carry: u16,
+ carry_len: usize,
+}
+
+impl Iterator for SegmentIterator<'_> {
+ type Item = (u16, usize);
+
+ fn next(&mut self) -> Option<Self::Item> {
+ match self.segment {
+ Segment::Binary(data) => {
+ if self.offset < data.len() {
+ let byte = data[self.offset] as u16;
+ self.offset += 1;
+ Some((byte, 8))
+ } else {
+ None
+ }
+ }
+ Segment::Numeric(data) => {
+ if self.carry_len == 3 {
+ let out = (self.carry, NUM_CHARS_BITS[self.carry_len]);
+ self.carry_len = 0;
+ self.carry = 0;
+ Some(out)
+ } else if let Some((bits, size)) = get_next_13b(data, self.offset) {
+ self.offset += size;
+ let new_chars = match size {
+ 1 => 1,
+ k => (k + 1) / 3,
+ };
+ if self.carry_len + new_chars > 3 {
+ self.carry_len = new_chars + self.carry_len - 3;
+ let out = (
+ self.carry * POW10[new_chars - self.carry_len]
+ + bits / POW10[self.carry_len],
+ NUM_CHARS_BITS[3],
+ );
+ self.carry = bits % POW10[self.carry_len];
+ Some(out)
+ } else {
+ let out = (
+ self.carry * POW10[new_chars] + bits,
+ NUM_CHARS_BITS[self.carry_len + new_chars],
+ );
+ self.carry_len = 0;
+ Some(out)
+ }
+ } else if self.carry_len > 0 {
+ let out = (self.carry, NUM_CHARS_BITS[self.carry_len]);
+ self.carry_len = 0;
+ Some(out)
+ } else {
+ None
+ }
+ }
+ }
+ }
+}
+
+struct EncodedMsg<'a> {
+ data: &'a mut [u8],
+ ec_size: usize,
+ g1_blocks: usize,
+ g2_blocks: usize,
+ g1_blk_size: usize,
+ g2_blk_size: usize,
+ poly: &'static [u8],
+ version: Version,
+}
+
+/// Data to be put in the QR code, with correct segment encoding, padding, and
+/// Error Code Correction.
+impl EncodedMsg<'_> {
+ fn new<'a, 'b>(segments: &[&Segment<'b>], data: &'a mut [u8]) -> Option<EncodedMsg<'a>> {
+ let version = Version::from_segments(segments)?;
+ let ec_size = version.ec_size();
+ let g1_blocks = version.g1_blocks();
+ let g2_blocks = version.g2_blocks();
+ let g1_blk_size = version.g1_blk_size();
+ let g2_blk_size = g1_blk_size + 1;
+ let poly = version.poly();
+
+ // clear the output.
+ data.fill(0);
+
+ let mut em = EncodedMsg {
+ data: data,
+ ec_size,
+ g1_blocks,
+ g2_blocks,
+ g1_blk_size,
+ g2_blk_size,
+ poly,
+ version,
+ };
+ em.encode(segments);
+ Some(em)
+ }
+
+ /// Push bits of data at an offset (in bits).
+ fn push(&mut self, offset: &mut usize, bits: (u16, usize)) {
+ let (number, len_bits) = bits;
+ let byte_off = *offset / 8;
+ let bit_off = *offset % 8;
+ let b = bit_off + len_bits;
+
+ match (bit_off, b) {
+ (0, 0..=8) => {
+ self.data[byte_off] = (number << (8 - b)) as u8;
+ }
+ (0, _) => {
+ self.data[byte_off] = (number >> (b - 8)) as u8;
+ self.data[byte_off + 1] = (number << (16 - b)) as u8;
+ }
+ (_, 0..=8) => {
+ self.data[byte_off] |= (number << (8 - b)) as u8;
+ }
+ (_, 9..=16) => {
+ self.data[byte_off] |= (number >> (b - 8)) as u8;
+ self.data[byte_off + 1] = (number << (16 - b)) as u8;
+ }
+ _ => {
+ self.data[byte_off] |= (number >> (b - 8)) as u8;
+ self.data[byte_off + 1] = (number >> (b - 16)) as u8;
+ self.data[byte_off + 2] = (number << (24 - b)) as u8;
+ }
+ }
+ *offset += len_bits;
+ }
+
+ fn add_segments(&mut self, segments: &[&Segment<'_>]) {
+ let mut offset: usize = 0;
+
+ for s in segments.iter() {
+ self.push(&mut offset, s.get_header());
+ self.push(&mut offset, s.get_length_field(self.version));
+ for bits in s.iter() {
+ self.push(&mut offset, bits);
+ }
+ }
+ self.push(&mut offset, (MODE_STOP, 4));
+
+ let pad_offset = (offset + 7) / 8;
+ for i in pad_offset..self.version.max_data() {
+ self.data[i] = PADDING[(i & 1) ^ (pad_offset & 1)];
+ }
+ }
+
+ fn error_code_for_blocks(&mut self, offset: usize, size: usize, ec_offset: usize) {
+ let mut tmp: [u8; MAX_BLK_SIZE + MAX_EC_SIZE] = [0; MAX_BLK_SIZE + MAX_EC_SIZE];
+
+ tmp[0..size].copy_from_slice(&self.data[offset..offset + size]);
+ for i in 0..size {
+ let lead_coeff = tmp[i] as usize;
+ if lead_coeff == 0 {
+ continue;
+ }
+ let log_lead_coeff = usize::from(LOG_TABLE[lead_coeff]);
+ for (u, &v) in tmp[i + 1..].iter_mut().zip(self.poly.iter()) {
+ *u ^= EXP_TABLE[(usize::from(v) + log_lead_coeff) % 255];
+ }
+ }
+ self.data[ec_offset..ec_offset + self.ec_size]
+ .copy_from_slice(&tmp[size..size + self.ec_size]);
+ }
+
+ fn compute_error_code(&mut self) {
+ let mut offset = 0;
+ let mut ec_offset = self.g1_blocks * self.g1_blk_size + self.g2_blocks * self.g2_blk_size;
+
+ for _ in 0..self.g1_blocks {
+ self.error_code_for_blocks(offset, self.g1_blk_size, ec_offset);
+ offset += self.g1_blk_size;
+ ec_offset += self.ec_size;
+ }
+ for _ in 0..self.g2_blocks {
+ self.error_code_for_blocks(offset, self.g2_blk_size, ec_offset);
+ offset += self.g2_blk_size;
+ ec_offset += self.ec_size;
+ }
+ }
+
+ fn encode(&mut self, segments: &[&Segment<'_>]) {
+ self.add_segments(segments);
+ self.compute_error_code();
+ }
+
+ fn iter(&self) -> EncodedMsgIterator<'_> {
+ EncodedMsgIterator {
+ em: self,
+ offset: 0,
+ }
+ }
+}
+
+/// Iterator, to retrieve the data in the interleaved order needed by QR code.
+struct EncodedMsgIterator<'a> {
+ em: &'a EncodedMsg<'a>,
+ offset: usize,
+}
+
+impl Iterator for EncodedMsgIterator<'_> {
+ type Item = u8;
+
+ // Send the bytes in interleaved mode, first byte of first block of group1,
+ // then first byte of second block of group1, ...
+ fn next(&mut self) -> Option<Self::Item> {
+ let em = self.em;
+ let blocks = em.g1_blocks + em.g2_blocks;
+ let g1_end = em.g1_blocks * em.g1_blk_size;
+ let g2_end = g1_end + em.g2_blocks * em.g2_blk_size;
+ let ec_end = g2_end + em.ec_size * blocks;
+
+ if self.offset >= ec_end {
+ return None;
+ }
+
+ let offset = if self.offset < em.g1_blk_size * blocks {
+ // group1 and group2 interleaved
+ let blk = self.offset % blocks;
+ let blk_off = self.offset / blocks;
+ if blk < em.g1_blocks {
+ blk * em.g1_blk_size + blk_off
+ } else {
+ g1_end + em.g2_blk_size * (blk - em.g1_blocks) + blk_off
+ }
+ } else if self.offset < g2_end {
+ // last byte of group2 blocks
+ let blk2 = self.offset - blocks * em.g1_blk_size;
+ em.g1_blk_size * em.g1_blocks + blk2 * em.g2_blk_size + em.g2_blk_size - 1
+ } else {
+ // EC blocks
+ let ec_offset = self.offset - g2_end;
+ let blk = ec_offset % blocks;
+ let blk_off = ec_offset / blocks;
+
+ g2_end + blk * em.ec_size + blk_off
+ };
+ self.offset += 1;
+ Some(em.data[offset])
+ }
+}
+
+/// A QR code image, encoded as a linear binary framebuffer.
+/// 1 bit per module (pixel), each new line start at next byte boundary.
+/// Max width is 177 for V40 QR code, so `u8` is enough for coordinate.
+struct QrImage<'a> {
+ data: &'a mut [u8],
+ width: u8,
+ stride: u8,
+ version: Version,
+}
+
+impl QrImage<'_> {
+ fn new<'a, 'b>(em: &'b EncodedMsg<'b>, qrdata: &'a mut [u8]) -> QrImage<'a> {
+ let width = em.version.width();
+ let stride = (width + 7) / 8;
+ let data = qrdata;
+
+ let mut qr_image = QrImage {
+ data,
+ width,
+ stride,
+ version: em.version,
+ };
+ qr_image.draw_all(em.iter());
+ qr_image
+ }
+
+ fn clear(&mut self) {
+ self.data.fill(0);
+ }
+
+ // Set pixel to light color.
+ fn set(&mut self, x: u8, y: u8) {
+ let off = y as usize * self.stride as usize + x as usize / 8;
+ let mut v = self.data[off];
+ v |= 0x80 >> (x % 8);
+ self.data[off] = v;
+ }
+
+ // Invert a module color.
+ fn xor(&mut self, x: u8, y: u8) {
+ let off = y as usize * self.stride as usize + x as usize / 8;
+ self.data[off] ^= 0x80 >> (x % 8);
+ }
+
+ // Draw a light square at (x, y) top left corner.
+ fn draw_square(&mut self, x: u8, y: u8, size: u8) {
+ for k in 0..size {
+ self.set(x + k, y);
+ self.set(x, y + k + 1);
+ self.set(x + size, y + k);
+ self.set(x + k + 1, y + size);
+ }
+ }
+
+ // Finder pattern: 3 8x8 square at the corners.
+ fn draw_finders(&mut self) {
+ self.draw_square(1, 1, 4);
+ self.draw_square(self.width - 6, 1, 4);
+ self.draw_square(1, self.width - 6, 4);
+ for k in 0..8 {
+ self.set(k, 7);
+ self.set(self.width - k - 1, 7);
+ self.set(k, self.width - 8);
+ }
+ for k in 0..7 {
+ self.set(7, k);
+ self.set(self.width - 8, k);
+ self.set(7, self.width - 1 - k);
+ }
+ }
+
+ fn is_finder(&self, x: u8, y: u8) -> bool {
+ let end = self.width - 8;
+ (x < 8 && y < 8) || (x < 8 && y >= end) || (x >= end && y < 8)
+ }
+
+ // Alignment pattern: 5x5 squares in a grid.
+ fn draw_alignments(&mut self) {
+ let positions = self.version.alignment_pattern();
+ for &x in positions.iter() {
+ for &y in positions.iter() {
+ if !self.is_finder(x, y) {
+ self.draw_square(x - 1, y - 1, 2);
+ }
+ }
+ }
+ }
+
+ fn is_alignment(&self, x: u8, y: u8) -> bool {
+ let positions = self.version.alignment_pattern();
+ for &ax in positions.iter() {
+ for &ay in positions.iter() {
+ if self.is_finder(ax, ay) {
+ continue;
+ }
+ if x >= ax - 2 && x <= ax + 2 && y >= ay - 2 && y <= ay + 2 {
+ return true;
+ }
+ }
+ }
+ false
+ }
+
+ // Timing pattern: 2 dotted line between the finder patterns.
+ fn draw_timing_patterns(&mut self) {
+ let end = self.width - 8;
+
+ for x in (9..end).step_by(2) {
+ self.set(x, 6);
+ self.set(6, x);
+ }
+ }
+
+ fn is_timing(&self, x: u8, y: u8) -> bool {
+ x == 6 || y == 6
+ }
+
+ // Mask info: 15 bits around the finders, written twice for redundancy.
+ fn draw_maskinfo(&mut self) {
+ let info: u16 = FORMAT_INFOS_QR_L[0];
+ let mut skip = 0;
+
+ for k in 0..7 {
+ if k == 6 {
+ skip = 1;
+ }
+ if info & (1 << (14 - k)) == 0 {
+ self.set(k + skip, 8);
+ self.set(8, self.width - 1 - k);
+ }
+ }
+ skip = 0;
+ for k in 0..8 {
+ if k == 2 {
+ skip = 1;
+ }
+ if info & (1 << (7 - k)) == 0 {
+ self.set(8, 8 - skip - k);
+ self.set(self.width - 8 + k, 8);
+ }
+ }
+ }
+
+ fn is_maskinfo(&self, x: u8, y: u8) -> bool {
+ let end = self.width - 8;
+ // Count the dark module as mask info.
+ (x <= 8 && y == 8) || (y <= 8 && x == 8) || (x == 8 && y >= end) || (x >= end && y == 8)
+ }
+
+ // Version info: 18bits written twice, close to the finders.
+ fn draw_version_info(&mut self) {
+ let vinfo = self.version.version_info();
+ let pos = self.width - 11;
+
+ if vinfo != 0 {
+ for x in 0..3 {
+ for y in 0..6 {
+ if vinfo & (1 << (x + y * 3)) == 0 {
+ self.set(x + pos, y);
+ self.set(y, x + pos);
+ }
+ }
+ }
+ }
+ }
+
+ fn is_version_info(&self, x: u8, y: u8) -> bool {
+ let vinfo = self.version.version_info();
+ let pos = self.width - 11;
+
+ vinfo != 0 && ((x >= pos && x < pos + 3 && y < 6) || (y >= pos && y < pos + 3 && x < 6))
+ }
+
+ // Returns true if the module is reserved (Not usable for data and EC).
+ fn is_reserved(&self, x: u8, y: u8) -> bool {
+ self.is_alignment(x, y)
+ || self.is_finder(x, y)
+ || self.is_timing(x, y)
+ || self.is_maskinfo(x, y)
+ || self.is_version_info(x, y)
+ }
+
+ // Last module to draw, at bottom left corner.
+ fn is_last(&self, x: u8, y: u8) -> bool {
+ x == 0 && y == self.width - 1
+ }
+
+ // Move to the next module according to QR code order.
+ // From bottom right corner, to bottom left corner.
+ fn next(&self, x: u8, y: u8) -> (u8, u8) {
+ let x_adj = if x <= 6 { x + 1 } else { x };
+ let column_type = (self.width - x_adj) % 4;
+
+ match column_type {
+ 2 if y > 0 => (x + 1, y - 1),
+ 0 if y < self.width - 1 => (x + 1, y + 1),
+ 0 | 2 if x == 7 => (x - 2, y),
+ _ => (x - 1, y),
+ }
+ }
+
+ // Find next module that can hold data.
+ fn next_available(&self, x: u8, y: u8) -> (u8, u8) {
+ let (mut x, mut y) = self.next(x, y);
+ while self.is_reserved(x, y) && !self.is_last(x, y) {
+ (x, y) = self.next(x, y);
+ }
+ (x, y)
+ }
+
+ fn draw_data(&mut self, data: impl Iterator<Item = u8>) {
+ let (mut x, mut y) = (self.width - 1, self.width - 1);
+ for byte in data {
+ for s in 0..8 {
+ if byte & (0x80 >> s) == 0 {
+ self.set(x, y);
+ }
+ (x, y) = self.next_available(x, y);
+ }
+ }
+ // Set the remaining modules (0, 3 or 7 depending on version).
+ // because 0 correspond to a light module.
+ while !self.is_last(x, y) {
+ if !self.is_reserved(x, y) {
+ self.set(x, y);
+ }
+ (x, y) = self.next(x, y);
+ }
+ }
+
+ // Apply checkerboard mask to all non-reserved modules.
+ fn apply_mask(&mut self) {
+ for x in 0..self.width {
+ for y in 0..self.width {
+ if (x ^ y) % 2 == 0 && !self.is_reserved(x, y) {
+ self.xor(x, y);
+ }
+ }
+ }
+ }
+
+ // Draw the QR code with the provided data iterator.
+ fn draw_all(&mut self, data: impl Iterator<Item = u8>) {
+ // First clear the table, as it may have already some data.
+ self.clear();
+ self.draw_finders();
+ self.draw_alignments();
+ self.draw_timing_patterns();
+ self.draw_version_info();
+ self.draw_data(data);
+ self.draw_maskinfo();
+ self.apply_mask();
+ }
+}
+
+/// C entry point for the rust QR Code generator.
+///
+/// Write the QR code image in the data buffer, and return the QR code width,
+/// or 0, if the data doesn't fit in a QR code.
+///
+/// * `url`: The base URL of the QR code. It will be encoded as Binary segment.
+/// * `data`: A pointer to the binary data, to be encoded. if URL is NULL, it
+/// will be encoded as binary segment, otherwise it will be encoded
+/// efficiently as a numeric segment, and appended to the URL.
+/// * `data_len`: Length of the data, that needs to be encoded, must be less
+/// than data_size.
+/// * `data_size`: Size of data buffer, it should be at least 4071 bytes to hold
+/// a V40 QR code. It will then be overwritten with the QR code image.
+/// * `tmp`: A temporary buffer that the QR code encoder will use, to write the
+/// segments and ECC.
+/// * `tmp_size`: Size of the temporary buffer, it must be at least 3706 bytes
+/// long for V40.
+///
+/// # Safety
+///
+/// * `url` must be null or point at a nul-terminated string.
+/// * `data` must be valid for reading and writing for `data_size` bytes.
+/// * `tmp` must be valid for reading and writing for `tmp_size` bytes.
+///
+/// They must remain valid for the duration of the function call.
+
+#[no_mangle]
+pub unsafe extern "C" fn drm_panic_qr_generate(
+ url: *const i8,
+ data: *mut u8,
+ data_len: usize,
+ data_size: usize,
+ tmp: *mut u8,
+ tmp_size: usize,
+) -> u8 {
+ if data_size < 4071 || tmp_size < 3706 || data_len > data_size {
+ return 0;
+ }
+ // SAFETY: The caller ensures that `data` is a valid pointer for reading and
+ // writing `data_size` bytes.
+ let data_slice: &mut [u8] = unsafe { core::slice::from_raw_parts_mut(data, data_size) };
+ // SAFETY: The caller ensures that `tmp` is a valid pointer for reading and
+ // writing `tmp_size` bytes.
+ let tmp_slice: &mut [u8] = unsafe { core::slice::from_raw_parts_mut(tmp, tmp_size) };
+ if url.is_null() {
+ match EncodedMsg::new(&[&Segment::Binary(&data_slice[0..data_len])], tmp_slice) {
+ None => 0,
+ Some(em) => {
+ let qr_image = QrImage::new(&em, data_slice);
+ qr_image.width
+ }
+ }
+ } else {
+ // SAFETY: The caller ensures that `url` is a valid pointer to a
+ // nul-terminated string.
+ let url_cstr: &CStr = unsafe { CStr::from_char_ptr(url) };
+ let segments = &[
+ &Segment::Binary(url_cstr.as_bytes()),
+ &Segment::Numeric(&data_slice[0..data_len]),
+ ];
+ match EncodedMsg::new(segments, tmp_slice) {
+ None => 0,
+ Some(em) => {
+ let qr_image = QrImage::new(&em, data_slice);
+ qr_image.width
+ }
+ }
+ }
+}
+
+/// Returns the maximum data size that can fit in a QR code of this version.
+/// * `version`: QR code version, between 1-40.
+/// * `url_len`: Length of the URL.
+///
+/// * If `url_len` > 0, remove the 2 segments header/length and also count the
+/// conversion to numeric segments.
+/// * If `url_len` = 0, only removes 3 bytes for 1 binary segment.
+#[no_mangle]
+pub extern "C" fn drm_panic_qr_max_data_size(version: u8, url_len: usize) -> usize {
+ if version < 1 || version > 40 {
+ return 0;
+ }
+ let max_data = Version(version as usize).max_data();
+
+ if url_len > 0 {
+ // Binary segment (URL) 4 + 16 bits, numeric segment (kmsg) 4 + 12 bits => 5 bytes.
+ if url_len + 5 >= max_data {
+ 0
+ } else {
+ let max = max_data - url_len - 5;
+ (max * 39) / 40
+ }
+ } else {
+ // Remove 3 bytes for the binary segment (header 4 bits, length 16 bits, stop 4bits).
+ max_data - 3
+ }
+}
diff --git a/drivers/gpu/drm/drm_prime.c b/drivers/gpu/drm/drm_prime.c
index 03bd3c7bd0dc..0e3f8adf162f 100644
--- a/drivers/gpu/drm/drm_prime.c
+++ b/drivers/gpu/drm/drm_prime.c
@@ -410,22 +410,30 @@ static struct dma_buf *export_and_register_object(struct drm_device *dev,
}
/**
- * drm_gem_prime_handle_to_fd - PRIME export function for GEM drivers
+ * drm_gem_prime_handle_to_dmabuf - PRIME export function for GEM drivers
* @dev: dev to export the buffer from
* @file_priv: drm file-private structure
* @handle: buffer handle to export
* @flags: flags like DRM_CLOEXEC
- * @prime_fd: pointer to storage for the fd id of the create dma-buf
*
* This is the PRIME export function which must be used mandatorily by GEM
* drivers to ensure correct lifetime management of the underlying GEM object.
* The actual exporting from GEM object to a dma-buf is done through the
* &drm_gem_object_funcs.export callback.
+ *
+ * Unlike drm_gem_prime_handle_to_fd(), it returns the struct dma_buf it
+ * has created, without attaching it to any file descriptors. The difference
+ * between those two is similar to that between anon_inode_getfile() and
+ * anon_inode_getfd(); insertion into descriptor table is something you
+ * can not revert if any cleanup is needed, so the descriptor-returning
+ * variants should only be used when you are past the last failure exit
+ * and the only thing left is passing the new file descriptor to userland.
+ * When all you need is the object itself or when you need to do something
+ * else that might fail, use that one instead.
*/
-int drm_gem_prime_handle_to_fd(struct drm_device *dev,
+struct dma_buf *drm_gem_prime_handle_to_dmabuf(struct drm_device *dev,
struct drm_file *file_priv, uint32_t handle,
- uint32_t flags,
- int *prime_fd)
+ uint32_t flags)
{
struct drm_gem_object *obj;
int ret = 0;
@@ -434,14 +442,14 @@ int drm_gem_prime_handle_to_fd(struct drm_device *dev,
mutex_lock(&file_priv->prime.lock);
obj = drm_gem_object_lookup(file_priv, handle);
if (!obj) {
- ret = -ENOENT;
+ dmabuf = ERR_PTR(-ENOENT);
goto out_unlock;
}
dmabuf = drm_prime_lookup_buf_by_handle(&file_priv->prime, handle);
if (dmabuf) {
get_dma_buf(dmabuf);
- goto out_have_handle;
+ goto out;
}
mutex_lock(&dev->object_name_lock);
@@ -463,7 +471,6 @@ int drm_gem_prime_handle_to_fd(struct drm_device *dev,
/* normally the created dma-buf takes ownership of the ref,
* but if that fails then drop the ref
*/
- ret = PTR_ERR(dmabuf);
mutex_unlock(&dev->object_name_lock);
goto out;
}
@@ -478,34 +485,51 @@ out_have_obj:
ret = drm_prime_add_buf_handle(&file_priv->prime,
dmabuf, handle);
mutex_unlock(&dev->object_name_lock);
- if (ret)
- goto fail_put_dmabuf;
-
-out_have_handle:
- ret = dma_buf_fd(dmabuf, flags);
- /*
- * We must _not_ remove the buffer from the handle cache since the newly
- * created dma buf is already linked in the global obj->dma_buf pointer,
- * and that is invariant as long as a userspace gem handle exists.
- * Closing the handle will clean out the cache anyway, so we don't leak.
- */
- if (ret < 0) {
- goto fail_put_dmabuf;
- } else {
- *prime_fd = ret;
- ret = 0;
+ if (ret) {
+ dma_buf_put(dmabuf);
+ dmabuf = ERR_PTR(ret);
}
-
- goto out;
-
-fail_put_dmabuf:
- dma_buf_put(dmabuf);
out:
drm_gem_object_put(obj);
out_unlock:
mutex_unlock(&file_priv->prime.lock);
+ return dmabuf;
+}
+EXPORT_SYMBOL(drm_gem_prime_handle_to_dmabuf);
- return ret;
+/**
+ * drm_gem_prime_handle_to_fd - PRIME export function for GEM drivers
+ * @dev: dev to export the buffer from
+ * @file_priv: drm file-private structure
+ * @handle: buffer handle to export
+ * @flags: flags like DRM_CLOEXEC
+ * @prime_fd: pointer to storage for the fd id of the create dma-buf
+ *
+ * This is the PRIME export function which must be used mandatorily by GEM
+ * drivers to ensure correct lifetime management of the underlying GEM object.
+ * The actual exporting from GEM object to a dma-buf is done through the
+ * &drm_gem_object_funcs.export callback.
+ */
+int drm_gem_prime_handle_to_fd(struct drm_device *dev,
+ struct drm_file *file_priv, uint32_t handle,
+ uint32_t flags,
+ int *prime_fd)
+{
+ struct dma_buf *dmabuf;
+ int fd = get_unused_fd_flags(flags);
+
+ if (fd < 0)
+ return fd;
+
+ dmabuf = drm_gem_prime_handle_to_dmabuf(dev, file_priv, handle, flags);
+ if (IS_ERR(dmabuf)) {
+ put_unused_fd(fd);
+ return PTR_ERR(dmabuf);
+ }
+
+ fd_install(fd, dmabuf->file);
+ *prime_fd = fd;
+ return 0;
}
EXPORT_SYMBOL(drm_gem_prime_handle_to_fd);
diff --git a/drivers/gpu/drm/drm_print.c b/drivers/gpu/drm/drm_print.c
index cf24dfdeb6b2..0081190201a7 100644
--- a/drivers/gpu/drm/drm_print.c
+++ b/drivers/gpu/drm/drm_print.c
@@ -100,8 +100,9 @@ void __drm_puts_coredump(struct drm_printer *p, const char *str)
copy = iterator->remain;
/* Copy out the bit of the string that we need */
- memcpy(iterator->data,
- str + (iterator->start - iterator->offset), copy);
+ if (iterator->data)
+ memcpy(iterator->data,
+ str + (iterator->start - iterator->offset), copy);
iterator->offset = iterator->start + copy;
iterator->remain -= copy;
@@ -110,7 +111,8 @@ void __drm_puts_coredump(struct drm_printer *p, const char *str)
len = min_t(ssize_t, strlen(str), iterator->remain);
- memcpy(iterator->data + pos, str, len);
+ if (iterator->data)
+ memcpy(iterator->data + pos, str, len);
iterator->offset += len;
iterator->remain -= len;
@@ -140,8 +142,9 @@ void __drm_printfn_coredump(struct drm_printer *p, struct va_format *vaf)
if ((iterator->offset >= iterator->start) && (len < iterator->remain)) {
ssize_t pos = iterator->offset - iterator->start;
- snprintf(((char *) iterator->data) + pos,
- iterator->remain, "%pV", vaf);
+ if (iterator->data)
+ snprintf(((char *) iterator->data) + pos,
+ iterator->remain, "%pV", vaf);
iterator->offset += len;
iterator->remain -= len;
diff --git a/drivers/gpu/drm/drm_probe_helper.c b/drivers/gpu/drm/drm_probe_helper.c
index bb49d552e671..92f21764246f 100644
--- a/drivers/gpu/drm/drm_probe_helper.c
+++ b/drivers/gpu/drm/drm_probe_helper.c
@@ -714,7 +714,7 @@ EXPORT_SYMBOL(drm_helper_probe_single_connector_modes);
* @dev: drm_device whose connector state changed
*
* This function fires off the uevent for userspace and also calls the
- * output_poll_changed function, which is most commonly used to inform the fbdev
+ * client hotplug function, which is most commonly used to inform the fbdev
* emulation code and allow it to update the fbcon output configuration.
*
* Drivers should call this from their hotplug handling code when a change is
@@ -730,11 +730,7 @@ EXPORT_SYMBOL(drm_helper_probe_single_connector_modes);
*/
void drm_kms_helper_hotplug_event(struct drm_device *dev)
{
- /* send a uevent + call fbdev */
drm_sysfs_hotplug_event(dev);
- if (dev->mode_config.funcs->output_poll_changed)
- dev->mode_config.funcs->output_poll_changed(dev);
-
drm_client_dev_hotplug(dev);
}
EXPORT_SYMBOL(drm_kms_helper_hotplug_event);
@@ -750,11 +746,7 @@ void drm_kms_helper_connector_hotplug_event(struct drm_connector *connector)
{
struct drm_device *dev = connector->dev;
- /* send a uevent + call fbdev */
drm_sysfs_connector_hotplug_event(connector);
- if (dev->mode_config.funcs->output_poll_changed)
- dev->mode_config.funcs->output_poll_changed(dev);
-
drm_client_dev_hotplug(dev);
}
EXPORT_SYMBOL(drm_kms_helper_connector_hotplug_event);
@@ -888,7 +880,7 @@ EXPORT_SYMBOL(drm_kms_helper_is_poll_worker);
* disabled. Polling is re-enabled by calling drm_kms_helper_poll_enable().
*
* If however, the polling was never initialized, this call will trigger a
- * warning and return
+ * warning and return.
*
* Note that calls to enable and disable polling must be strictly ordered, which
* is automatically the case when they're only call from suspend/resume
diff --git a/drivers/gpu/drm/drm_rect.c b/drivers/gpu/drm/drm_rect.c
index 85c79a38c13a..492acce0516f 100644
--- a/drivers/gpu/drm/drm_rect.c
+++ b/drivers/gpu/drm/drm_rect.c
@@ -85,7 +85,6 @@ static u32 clip_scaled(int src, int dst, int *clip)
* factors from @src to @dst.
*
* RETURNS:
- *
* %true if rectangle @dst is still visible after being clipped,
* %false otherwise.
*/
diff --git a/drivers/gpu/drm/drm_syncobj.c b/drivers/gpu/drm/drm_syncobj.c
index 4fcfc0b9b386..8e3d2d7060f8 100644
--- a/drivers/gpu/drm/drm_syncobj.c
+++ b/drivers/gpu/drm/drm_syncobj.c
@@ -715,16 +715,16 @@ static int drm_syncobj_fd_to_handle(struct drm_file *file_private,
struct fd f = fdget(fd);
int ret;
- if (!f.file)
+ if (!fd_file(f))
return -EINVAL;
- if (f.file->f_op != &drm_syncobj_file_fops) {
+ if (fd_file(f)->f_op != &drm_syncobj_file_fops) {
fdput(f);
return -EINVAL;
}
/* take a reference to put in the idr */
- syncobj = f.file->private_data;
+ syncobj = fd_file(f)->private_data;
drm_syncobj_get(syncobj);
idr_preload(GFP_KERNEL);
diff --git a/drivers/gpu/drm/drm_vblank.c b/drivers/gpu/drm/drm_vblank.c
index cc3571e25a9a..94e45ed6869d 100644
--- a/drivers/gpu/drm/drm_vblank.c
+++ b/drivers/gpu/drm/drm_vblank.c
@@ -131,7 +131,7 @@
* guaranteed to be enabled.
*
* On many hardware disabling the vblank interrupt cannot be done in a race-free
- * manner, see &drm_driver.vblank_disable_immediate and
+ * manner, see &drm_vblank_crtc_config.disable_immediate and
* &drm_driver.max_vblank_count. In that case the vblank core only disables the
* vblanks after a timer has expired, which can be configured through the
* ``vblankoffdelay`` module parameter.
@@ -686,7 +686,6 @@ EXPORT_SYMBOL(drm_calc_timestamping_constants);
* drm_atomic_helper_calc_timestamping_constants().
*
* Returns:
- *
* Returns true on success, and false on failure, i.e. when no accurate
* timestamp could be acquired.
*/
@@ -831,7 +830,6 @@ EXPORT_SYMBOL(drm_crtc_vblank_helper_get_vblank_timestamp_internal);
* drm_atomic_helper_calc_timestamping_constants().
*
* Returns:
- *
* Returns true on success, and false on failure, i.e. when no accurate
* timestamp could be acquired.
*/
@@ -1241,6 +1239,7 @@ EXPORT_SYMBOL(drm_crtc_vblank_get);
void drm_vblank_put(struct drm_device *dev, unsigned int pipe)
{
struct drm_vblank_crtc *vblank = drm_vblank_crtc(dev, pipe);
+ int vblank_offdelay = vblank->config.offdelay_ms;
if (drm_WARN_ON(dev, pipe >= dev->num_crtcs))
return;
@@ -1250,13 +1249,13 @@ void drm_vblank_put(struct drm_device *dev, unsigned int pipe)
/* Last user schedules interrupt disable */
if (atomic_dec_and_test(&vblank->refcount)) {
- if (drm_vblank_offdelay == 0)
+ if (!vblank_offdelay)
return;
- else if (drm_vblank_offdelay < 0)
+ else if (vblank_offdelay < 0)
vblank_disable_fn(&vblank->disable_timer);
- else if (!dev->vblank_disable_immediate)
+ else if (!vblank->config.disable_immediate)
mod_timer(&vblank->disable_timer,
- jiffies + ((drm_vblank_offdelay * HZ)/1000));
+ jiffies + ((vblank_offdelay * HZ) / 1000));
}
}
@@ -1265,7 +1264,8 @@ void drm_vblank_put(struct drm_device *dev, unsigned int pipe)
* @crtc: which counter to give up
*
* Release ownership of a given vblank counter, turning off interrupts
- * if possible. Disable interrupts after drm_vblank_offdelay milliseconds.
+ * if possible. Disable interrupts after &drm_vblank_crtc_config.offdelay_ms
+ * milliseconds.
*/
void drm_crtc_vblank_put(struct drm_crtc *crtc)
{
@@ -1466,16 +1466,20 @@ void drm_crtc_set_max_vblank_count(struct drm_crtc *crtc,
EXPORT_SYMBOL(drm_crtc_set_max_vblank_count);
/**
- * drm_crtc_vblank_on - enable vblank events on a CRTC
+ * drm_crtc_vblank_on_config - enable vblank events on a CRTC with custom
+ * configuration options
* @crtc: CRTC in question
+ * @config: Vblank configuration value
*
- * This functions restores the vblank interrupt state captured with
- * drm_crtc_vblank_off() again and is generally called when enabling @crtc. Note
- * that calls to drm_crtc_vblank_on() and drm_crtc_vblank_off() can be
- * unbalanced and so can also be unconditionally called in driver load code to
- * reflect the current hardware state of the crtc.
+ * See drm_crtc_vblank_on(). In addition, this function allows you to provide a
+ * custom vblank configuration for a given CRTC.
+ *
+ * Note that @config is copied, the pointer does not need to stay valid beyond
+ * this function call. For details of the parameters see
+ * struct drm_vblank_crtc_config.
*/
-void drm_crtc_vblank_on(struct drm_crtc *crtc)
+void drm_crtc_vblank_on_config(struct drm_crtc *crtc,
+ const struct drm_vblank_crtc_config *config)
{
struct drm_device *dev = crtc->dev;
unsigned int pipe = drm_crtc_index(crtc);
@@ -1488,6 +1492,8 @@ void drm_crtc_vblank_on(struct drm_crtc *crtc)
drm_dbg_vbl(dev, "crtc %d, vblank enabled %d, inmodeset %d\n",
pipe, vblank->enabled, vblank->inmodeset);
+ vblank->config = *config;
+
/* Drop our private "prevent drm_vblank_get" refcount */
if (vblank->inmodeset) {
atomic_dec(&vblank->refcount);
@@ -1500,10 +1506,33 @@ void drm_crtc_vblank_on(struct drm_crtc *crtc)
* re-enable interrupts if there are users left, or the
* user wishes vblank interrupts to be enabled all the time.
*/
- if (atomic_read(&vblank->refcount) != 0 || drm_vblank_offdelay == 0)
+ if (atomic_read(&vblank->refcount) != 0 || !vblank->config.offdelay_ms)
drm_WARN_ON(dev, drm_vblank_enable(dev, pipe));
spin_unlock_irq(&dev->vbl_lock);
}
+EXPORT_SYMBOL(drm_crtc_vblank_on_config);
+
+/**
+ * drm_crtc_vblank_on - enable vblank events on a CRTC
+ * @crtc: CRTC in question
+ *
+ * This functions restores the vblank interrupt state captured with
+ * drm_crtc_vblank_off() again and is generally called when enabling @crtc. Note
+ * that calls to drm_crtc_vblank_on() and drm_crtc_vblank_off() can be
+ * unbalanced and so can also be unconditionally called in driver load code to
+ * reflect the current hardware state of the crtc.
+ *
+ * Note that unlike in drm_crtc_vblank_on_config(), default values are used.
+ */
+void drm_crtc_vblank_on(struct drm_crtc *crtc)
+{
+ const struct drm_vblank_crtc_config config = {
+ .offdelay_ms = drm_vblank_offdelay,
+ .disable_immediate = crtc->dev->vblank_disable_immediate
+ };
+
+ drm_crtc_vblank_on_config(crtc, &config);
+}
EXPORT_SYMBOL(drm_crtc_vblank_on);
static void drm_vblank_restore(struct drm_device *dev, unsigned int pipe)
@@ -1556,16 +1585,21 @@ static void drm_vblank_restore(struct drm_device *dev, unsigned int pipe)
*
* Note that drivers must have race-free high-precision timestamping support,
* i.e. &drm_crtc_funcs.get_vblank_timestamp must be hooked up and
- * &drm_driver.vblank_disable_immediate must be set to indicate the
+ * &drm_vblank_crtc_config.disable_immediate must be set to indicate the
* time-stamping functions are race-free against vblank hardware counter
* increments.
*/
void drm_crtc_vblank_restore(struct drm_crtc *crtc)
{
- WARN_ON_ONCE(!crtc->funcs->get_vblank_timestamp);
- WARN_ON_ONCE(!crtc->dev->vblank_disable_immediate);
+ struct drm_device *dev = crtc->dev;
+ unsigned int pipe = drm_crtc_index(crtc);
+ struct drm_vblank_crtc *vblank = drm_vblank_crtc(dev, pipe);
+
+ drm_WARN_ON_ONCE(dev, !crtc->funcs->get_vblank_timestamp);
+ drm_WARN_ON_ONCE(dev, vblank->inmodeset);
+ drm_WARN_ON_ONCE(dev, !vblank->config.disable_immediate);
- drm_vblank_restore(crtc->dev, drm_crtc_index(crtc));
+ drm_vblank_restore(dev, pipe);
}
EXPORT_SYMBOL(drm_crtc_vblank_restore);
@@ -1754,7 +1788,7 @@ int drm_wait_vblank_ioctl(struct drm_device *dev, void *data,
/* If the counter is currently enabled and accurate, short-circuit
* queries to return the cached timestamp of the last vblank.
*/
- if (dev->vblank_disable_immediate &&
+ if (vblank->config.disable_immediate &&
drm_wait_vblank_is_query(vblwait) &&
READ_ONCE(vblank->enabled)) {
drm_wait_vblank_reply(dev, pipe, &vblwait->reply);
@@ -1918,8 +1952,8 @@ bool drm_handle_vblank(struct drm_device *dev, unsigned int pipe)
* been signaled. The disable has to be last (after
* drm_handle_vblank_events) so that the timestamp is always accurate.
*/
- disable_irq = (dev->vblank_disable_immediate &&
- drm_vblank_offdelay > 0 &&
+ disable_irq = (vblank->config.disable_immediate &&
+ vblank->config.offdelay_ms > 0 &&
!atomic_read(&vblank->refcount));
drm_handle_vblank_events(dev, pipe);
@@ -1992,7 +2026,8 @@ int drm_crtc_get_sequence_ioctl(struct drm_device *dev, void *data,
pipe = drm_crtc_index(crtc);
vblank = drm_crtc_vblank_crtc(crtc);
- vblank_enabled = dev->vblank_disable_immediate && READ_ONCE(vblank->enabled);
+ vblank_enabled = READ_ONCE(vblank->config.disable_immediate) &&
+ READ_ONCE(vblank->enabled);
if (!vblank_enabled) {
ret = drm_crtc_vblank_get(crtc);
diff --git a/drivers/gpu/drm/etnaviv/etnaviv_sched.c b/drivers/gpu/drm/etnaviv/etnaviv_sched.c
index 62dcfdc7894d..ab9ca4824b62 100644
--- a/drivers/gpu/drm/etnaviv/etnaviv_sched.c
+++ b/drivers/gpu/drm/etnaviv/etnaviv_sched.c
@@ -72,7 +72,7 @@ static enum drm_gpu_sched_stat etnaviv_sched_timedout_job(struct drm_sched_job
drm_sched_resubmit_jobs(&gpu->sched);
- drm_sched_start(&gpu->sched, true);
+ drm_sched_start(&gpu->sched);
return DRM_GPU_SCHED_STAT_NOMINAL;
out_no_timeout:
diff --git a/drivers/gpu/drm/exynos/exynos_drm_drv.h b/drivers/gpu/drm/exynos/exynos_drm_drv.h
index 81d501efd013..23646e55f142 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_drv.h
+++ b/drivers/gpu/drm/exynos/exynos_drm_drv.h
@@ -254,10 +254,6 @@ static inline int exynos_drm_check_fimc_device(struct device *dev)
}
#endif
-int exynos_atomic_commit(struct drm_device *dev, struct drm_atomic_state *state,
- bool nonblock);
-
-
extern struct platform_driver fimd_driver;
extern struct platform_driver exynos5433_decon_driver;
extern struct platform_driver decon_driver;
diff --git a/drivers/gpu/drm/exynos/exynos_drm_fimc.c b/drivers/gpu/drm/exynos/exynos_drm_fimc.c
index 142184c8c3bc..4d7ea65b7dd8 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_fimc.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_fimc.c
@@ -1125,7 +1125,7 @@ static void fimc_abort(struct exynos_drm_ipp *ipp,
}
}
-static struct exynos_drm_ipp_funcs ipp_funcs = {
+static const struct exynos_drm_ipp_funcs ipp_funcs = {
.commit = fimc_commit,
.abort = fimc_abort,
};
diff --git a/drivers/gpu/drm/exynos/exynos_drm_gsc.c b/drivers/gpu/drm/exynos/exynos_drm_gsc.c
index 1b111e2c3347..59fa22050717 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_gsc.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_gsc.c
@@ -1162,7 +1162,7 @@ static void gsc_abort(struct exynos_drm_ipp *ipp,
}
}
-static struct exynos_drm_ipp_funcs ipp_funcs = {
+static const struct exynos_drm_ipp_funcs ipp_funcs = {
.commit = gsc_commit,
.abort = gsc_abort,
};
@@ -1174,7 +1174,7 @@ static int gsc_bind(struct device *dev, struct device *master, void *data)
struct exynos_drm_ipp *ipp = &ctx->ipp;
ctx->drm_dev = drm_dev;
- ctx->drm_dev = drm_dev;
+ ipp->drm_dev = drm_dev;
exynos_drm_register_dma(drm_dev, dev, &ctx->dma_priv);
exynos_drm_ipp_register(dev, ipp, &ipp_funcs,
diff --git a/drivers/gpu/drm/exynos/exynos_drm_scaler.c b/drivers/gpu/drm/exynos/exynos_drm_scaler.c
index a9d469896824..2788105ac780 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_scaler.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_scaler.c
@@ -403,7 +403,7 @@ static int scaler_commit(struct exynos_drm_ipp *ipp,
return 0;
}
-static struct exynos_drm_ipp_funcs ipp_funcs = {
+static const struct exynos_drm_ipp_funcs ipp_funcs = {
.commit = scaler_commit,
};
diff --git a/drivers/gpu/drm/gma500/cdv_intel_lvds.c b/drivers/gpu/drm/gma500/cdv_intel_lvds.c
index 3adc2c9ab72d..f3a4517bdf27 100644
--- a/drivers/gpu/drm/gma500/cdv_intel_lvds.c
+++ b/drivers/gpu/drm/gma500/cdv_intel_lvds.c
@@ -568,7 +568,7 @@ void cdv_intel_lvds_init(struct drm_device *dev,
dev->dev, "I2C bus registration failed.\n");
goto err_encoder_cleanup;
}
- gma_encoder->i2c_bus->slave_addr = 0x2C;
+ gma_encoder->i2c_bus->target_addr = 0x2C;
dev_priv->lvds_i2c_bus = gma_encoder->i2c_bus;
/*
diff --git a/drivers/gpu/drm/gma500/intel_bios.c b/drivers/gpu/drm/gma500/intel_bios.c
index 8245b5603d2c..d5924ca3ed05 100644
--- a/drivers/gpu/drm/gma500/intel_bios.c
+++ b/drivers/gpu/drm/gma500/intel_bios.c
@@ -14,8 +14,8 @@
#include "psb_intel_drv.h"
#include "psb_intel_reg.h"
-#define SLAVE_ADDR1 0x70
-#define SLAVE_ADDR2 0x72
+#define TARGET_ADDR1 0x70
+#define TARGET_ADDR2 0x72
static void *find_section(struct bdb_header *bdb, int section_id)
{
@@ -357,10 +357,10 @@ parse_sdvo_device_mapping(struct drm_psb_private *dev_priv,
/* skip the device block if device type is invalid */
continue;
}
- if (p_child->slave_addr != SLAVE_ADDR1 &&
- p_child->slave_addr != SLAVE_ADDR2) {
+ if (p_child->target_addr != TARGET_ADDR1 &&
+ p_child->target_addr != TARGET_ADDR2) {
/*
- * If the slave address is neither 0x70 nor 0x72,
+ * If the target address is neither 0x70 nor 0x72,
* it is not a SDVO device. Skip it.
*/
continue;
@@ -371,22 +371,22 @@ parse_sdvo_device_mapping(struct drm_psb_private *dev_priv,
DRM_DEBUG_KMS("Incorrect SDVO port. Skip it\n");
continue;
}
- DRM_DEBUG_KMS("the SDVO device with slave addr %2x is found on"
+ DRM_DEBUG_KMS("the SDVO device with target addr %2x is found on"
" %s port\n",
- p_child->slave_addr,
+ p_child->target_addr,
(p_child->dvo_port == DEVICE_PORT_DVOB) ?
"SDVOB" : "SDVOC");
p_mapping = &(dev_priv->sdvo_mappings[p_child->dvo_port - 1]);
if (!p_mapping->initialized) {
p_mapping->dvo_port = p_child->dvo_port;
- p_mapping->slave_addr = p_child->slave_addr;
+ p_mapping->target_addr = p_child->target_addr;
p_mapping->dvo_wiring = p_child->dvo_wiring;
p_mapping->ddc_pin = p_child->ddc_pin;
p_mapping->i2c_pin = p_child->i2c_pin;
p_mapping->initialized = 1;
DRM_DEBUG_KMS("SDVO device: dvo=%x, addr=%x, wiring=%d, ddc_pin=%d, i2c_pin=%d\n",
p_mapping->dvo_port,
- p_mapping->slave_addr,
+ p_mapping->target_addr,
p_mapping->dvo_wiring,
p_mapping->ddc_pin,
p_mapping->i2c_pin);
@@ -394,10 +394,10 @@ parse_sdvo_device_mapping(struct drm_psb_private *dev_priv,
DRM_DEBUG_KMS("Maybe one SDVO port is shared by "
"two SDVO device.\n");
}
- if (p_child->slave2_addr) {
+ if (p_child->target2_addr) {
/* Maybe this is a SDVO device with multiple inputs */
/* And the mapping info is not added */
- DRM_DEBUG_KMS("there exists the slave2_addr. Maybe this"
+ DRM_DEBUG_KMS("there exists the target2_addr. Maybe this"
" is a SDVO device with multiple inputs.\n");
}
count++;
diff --git a/drivers/gpu/drm/gma500/intel_bios.h b/drivers/gpu/drm/gma500/intel_bios.h
index 0e6facf21e33..b5adea2a20c3 100644
--- a/drivers/gpu/drm/gma500/intel_bios.h
+++ b/drivers/gpu/drm/gma500/intel_bios.h
@@ -186,13 +186,13 @@ struct child_device_config {
u16 addin_offset;
u8 dvo_port; /* See Device_PORT_* above */
u8 i2c_pin;
- u8 slave_addr;
+ u8 target_addr;
u8 ddc_pin;
u16 edid_ptr;
u8 dvo_cfg; /* See DEVICE_CFG_* above */
u8 dvo2_port;
u8 i2c2_pin;
- u8 slave2_addr;
+ u8 target2_addr;
u8 ddc2_pin;
u8 capabilities;
u8 dvo_wiring;/* See DEVICE_WIRE_* above */
diff --git a/drivers/gpu/drm/gma500/intel_gmbus.c b/drivers/gpu/drm/gma500/intel_gmbus.c
index aa45509859f2..ee8b047587f2 100644
--- a/drivers/gpu/drm/gma500/intel_gmbus.c
+++ b/drivers/gpu/drm/gma500/intel_gmbus.c
@@ -333,7 +333,7 @@ gmbus_xfer(struct i2c_adapter *adapter,
clear_err:
/* Toggle the Software Clear Interrupt bit. This has the effect
* of resetting the GMBUS controller and so clearing the
- * BUS_ERROR raised by the slave's NAK.
+ * BUS_ERROR raised by the target's NAK.
*/
GMBUS_REG_WRITE(GMBUS1 + reg_offset, GMBUS_SW_CLR_INT);
GMBUS_REG_WRITE(GMBUS1 + reg_offset, 0);
diff --git a/drivers/gpu/drm/gma500/psb_drv.h b/drivers/gpu/drm/gma500/psb_drv.h
index 83c17689c454..bddf89b82fec 100644
--- a/drivers/gpu/drm/gma500/psb_drv.h
+++ b/drivers/gpu/drm/gma500/psb_drv.h
@@ -202,7 +202,7 @@ struct psb_intel_opregion {
struct sdvo_device_mapping {
u8 initialized;
u8 dvo_port;
- u8 slave_addr;
+ u8 target_addr;
u8 dvo_wiring;
u8 i2c_pin;
u8 i2c_speed;
diff --git a/drivers/gpu/drm/gma500/psb_intel_drv.h b/drivers/gpu/drm/gma500/psb_intel_drv.h
index c111e933e1ed..2499fd6a80c9 100644
--- a/drivers/gpu/drm/gma500/psb_intel_drv.h
+++ b/drivers/gpu/drm/gma500/psb_intel_drv.h
@@ -80,7 +80,7 @@ struct psb_intel_mode_device {
struct gma_i2c_chan {
struct i2c_adapter base;
struct i2c_algo_bit_data algo;
- u8 slave_addr;
+ u8 target_addr;
/* for getting at dev. private (mmio etc.) */
struct drm_device *drm_dev;
diff --git a/drivers/gpu/drm/gma500/psb_intel_lvds.c b/drivers/gpu/drm/gma500/psb_intel_lvds.c
index 8d1be94a443b..138f153d38ba 100644
--- a/drivers/gpu/drm/gma500/psb_intel_lvds.c
+++ b/drivers/gpu/drm/gma500/psb_intel_lvds.c
@@ -97,7 +97,7 @@ static int psb_lvds_i2c_set_brightness(struct drm_device *dev,
struct i2c_msg msgs[] = {
{
- .addr = lvds_i2c_bus->slave_addr,
+ .addr = lvds_i2c_bus->target_addr,
.flags = 0,
.len = 2,
.buf = out_buf,
@@ -710,7 +710,7 @@ void psb_intel_lvds_init(struct drm_device *dev,
dev->dev, "I2C bus registration failed.\n");
goto err_encoder_cleanup;
}
- lvds_priv->i2c_bus->slave_addr = 0x2C;
+ lvds_priv->i2c_bus->target_addr = 0x2C;
dev_priv->lvds_i2c_bus = lvds_priv->i2c_bus;
/*
diff --git a/drivers/gpu/drm/gma500/psb_intel_sdvo.c b/drivers/gpu/drm/gma500/psb_intel_sdvo.c
index e4f914deceba..8dafff963ca8 100644
--- a/drivers/gpu/drm/gma500/psb_intel_sdvo.c
+++ b/drivers/gpu/drm/gma500/psb_intel_sdvo.c
@@ -70,7 +70,7 @@ struct psb_intel_sdvo {
struct gma_encoder base;
struct i2c_adapter *i2c;
- u8 slave_addr;
+ u8 target_addr;
struct i2c_adapter ddc;
@@ -259,13 +259,13 @@ static bool psb_intel_sdvo_read_byte(struct psb_intel_sdvo *psb_intel_sdvo, u8 a
{
struct i2c_msg msgs[] = {
{
- .addr = psb_intel_sdvo->slave_addr,
+ .addr = psb_intel_sdvo->target_addr,
.flags = 0,
.len = 1,
.buf = &addr,
},
{
- .addr = psb_intel_sdvo->slave_addr,
+ .addr = psb_intel_sdvo->target_addr,
.flags = I2C_M_RD,
.len = 1,
.buf = ch,
@@ -463,14 +463,14 @@ static bool psb_intel_sdvo_write_cmd(struct psb_intel_sdvo *psb_intel_sdvo, u8 c
psb_intel_sdvo_debug_write(psb_intel_sdvo, cmd, args, args_len);
for (i = 0; i < args_len; i++) {
- msgs[i].addr = psb_intel_sdvo->slave_addr;
+ msgs[i].addr = psb_intel_sdvo->target_addr;
msgs[i].flags = 0;
msgs[i].len = 2;
msgs[i].buf = buf + 2 *i;
buf[2*i + 0] = SDVO_I2C_ARG_0 - i;
buf[2*i + 1] = ((u8*)args)[i];
}
- msgs[i].addr = psb_intel_sdvo->slave_addr;
+ msgs[i].addr = psb_intel_sdvo->target_addr;
msgs[i].flags = 0;
msgs[i].len = 2;
msgs[i].buf = buf + 2*i;
@@ -479,12 +479,12 @@ static bool psb_intel_sdvo_write_cmd(struct psb_intel_sdvo *psb_intel_sdvo, u8 c
/* the following two are to read the response */
status = SDVO_I2C_CMD_STATUS;
- msgs[i+1].addr = psb_intel_sdvo->slave_addr;
+ msgs[i+1].addr = psb_intel_sdvo->target_addr;
msgs[i+1].flags = 0;
msgs[i+1].len = 1;
msgs[i+1].buf = &status;
- msgs[i+2].addr = psb_intel_sdvo->slave_addr;
+ msgs[i+2].addr = psb_intel_sdvo->target_addr;
msgs[i+2].flags = I2C_M_RD;
msgs[i+2].len = 1;
msgs[i+2].buf = &status;
@@ -1899,7 +1899,7 @@ psb_intel_sdvo_is_hdmi_connector(struct psb_intel_sdvo *psb_intel_sdvo, int devi
}
static u8
-psb_intel_sdvo_get_slave_addr(struct drm_device *dev, int sdvo_reg)
+psb_intel_sdvo_get_target_addr(struct drm_device *dev, int sdvo_reg)
{
struct drm_psb_private *dev_priv = to_drm_psb_private(dev);
struct sdvo_device_mapping *my_mapping, *other_mapping;
@@ -1913,14 +1913,14 @@ psb_intel_sdvo_get_slave_addr(struct drm_device *dev, int sdvo_reg)
}
/* If the BIOS described our SDVO device, take advantage of it. */
- if (my_mapping->slave_addr)
- return my_mapping->slave_addr;
+ if (my_mapping->target_addr)
+ return my_mapping->target_addr;
/* If the BIOS only described a different SDVO device, use the
* address that it isn't using.
*/
- if (other_mapping->slave_addr) {
- if (other_mapping->slave_addr == 0x70)
+ if (other_mapping->target_addr) {
+ if (other_mapping->target_addr == 0x70)
return 0x72;
else
return 0x70;
@@ -2446,7 +2446,7 @@ bool psb_intel_sdvo_init(struct drm_device *dev, int sdvo_reg)
return false;
psb_intel_sdvo->sdvo_reg = sdvo_reg;
- psb_intel_sdvo->slave_addr = psb_intel_sdvo_get_slave_addr(dev, sdvo_reg) >> 1;
+ psb_intel_sdvo->target_addr = psb_intel_sdvo_get_target_addr(dev, sdvo_reg) >> 1;
psb_intel_sdvo_select_i2c_bus(dev_priv, psb_intel_sdvo, sdvo_reg);
if (!psb_intel_sdvo_init_ddc_proxy(psb_intel_sdvo, dev)) {
kfree(psb_intel_sdvo);
diff --git a/drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_drv.h b/drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_drv.h
index 207aa3f660b0..6b566f3aeecb 100644
--- a/drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_drv.h
+++ b/drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_drv.h
@@ -57,7 +57,6 @@ void hibmc_set_current_gate(struct hibmc_drm_private *priv,
int hibmc_de_init(struct hibmc_drm_private *priv);
int hibmc_vdac_init(struct hibmc_drm_private *priv);
-int hibmc_mm_init(struct hibmc_drm_private *hibmc);
int hibmc_ddc_create(struct drm_device *drm_dev, struct hibmc_connector *connector);
#endif
diff --git a/drivers/gpu/drm/i915/display/dvo_ch7017.c b/drivers/gpu/drm/i915/display/dvo_ch7017.c
index d0c3880d7f80..493e730c685b 100644
--- a/drivers/gpu/drm/i915/display/dvo_ch7017.c
+++ b/drivers/gpu/drm/i915/display/dvo_ch7017.c
@@ -170,13 +170,13 @@ static bool ch7017_read(struct intel_dvo_device *dvo, u8 addr, u8 *val)
{
struct i2c_msg msgs[] = {
{
- .addr = dvo->slave_addr,
+ .addr = dvo->target_addr,
.flags = 0,
.len = 1,
.buf = &addr,
},
{
- .addr = dvo->slave_addr,
+ .addr = dvo->target_addr,
.flags = I2C_M_RD,
.len = 1,
.buf = val,
@@ -189,7 +189,7 @@ static bool ch7017_write(struct intel_dvo_device *dvo, u8 addr, u8 val)
{
u8 buf[2] = { addr, val };
struct i2c_msg msg = {
- .addr = dvo->slave_addr,
+ .addr = dvo->target_addr,
.flags = 0,
.len = 2,
.buf = buf,
@@ -197,7 +197,7 @@ static bool ch7017_write(struct intel_dvo_device *dvo, u8 addr, u8 val)
return i2c_transfer(dvo->i2c_bus, &msg, 1) == 1;
}
-/** Probes for a CH7017 on the given bus and slave address. */
+/** Probes for a CH7017 on the given bus and target address. */
static bool ch7017_init(struct intel_dvo_device *dvo,
struct i2c_adapter *adapter)
{
@@ -227,13 +227,13 @@ static bool ch7017_init(struct intel_dvo_device *dvo,
break;
default:
DRM_DEBUG_KMS("ch701x not detected, got %d: from %s "
- "slave %d.\n",
- val, adapter->name, dvo->slave_addr);
+ "target %d.\n",
+ val, adapter->name, dvo->target_addr);
goto fail;
}
DRM_DEBUG_KMS("%s detected on %s, addr %d\n",
- str, adapter->name, dvo->slave_addr);
+ str, adapter->name, dvo->target_addr);
return true;
fail:
diff --git a/drivers/gpu/drm/i915/display/dvo_ch7xxx.c b/drivers/gpu/drm/i915/display/dvo_ch7xxx.c
index 2e8e85da5a40..534b8544e0a4 100644
--- a/drivers/gpu/drm/i915/display/dvo_ch7xxx.c
+++ b/drivers/gpu/drm/i915/display/dvo_ch7xxx.c
@@ -153,13 +153,13 @@ static bool ch7xxx_readb(struct intel_dvo_device *dvo, int addr, u8 *ch)
struct i2c_msg msgs[] = {
{
- .addr = dvo->slave_addr,
+ .addr = dvo->target_addr,
.flags = 0,
.len = 1,
.buf = out_buf,
},
{
- .addr = dvo->slave_addr,
+ .addr = dvo->target_addr,
.flags = I2C_M_RD,
.len = 1,
.buf = in_buf,
@@ -176,7 +176,7 @@ static bool ch7xxx_readb(struct intel_dvo_device *dvo, int addr, u8 *ch)
if (!ch7xxx->quiet) {
DRM_DEBUG_KMS("Unable to read register 0x%02x from %s:%02x.\n",
- addr, adapter->name, dvo->slave_addr);
+ addr, adapter->name, dvo->target_addr);
}
return false;
}
@@ -188,7 +188,7 @@ static bool ch7xxx_writeb(struct intel_dvo_device *dvo, int addr, u8 ch)
struct i2c_adapter *adapter = dvo->i2c_bus;
u8 out_buf[2];
struct i2c_msg msg = {
- .addr = dvo->slave_addr,
+ .addr = dvo->target_addr,
.flags = 0,
.len = 2,
.buf = out_buf,
@@ -202,7 +202,7 @@ static bool ch7xxx_writeb(struct intel_dvo_device *dvo, int addr, u8 ch)
if (!ch7xxx->quiet) {
DRM_DEBUG_KMS("Unable to write register 0x%02x to %s:%d.\n",
- addr, adapter->name, dvo->slave_addr);
+ addr, adapter->name, dvo->target_addr);
}
return false;
@@ -229,8 +229,8 @@ static bool ch7xxx_init(struct intel_dvo_device *dvo,
name = ch7xxx_get_id(vendor);
if (!name) {
- DRM_DEBUG_KMS("ch7xxx not detected; got VID 0x%02x from %s slave %d.\n",
- vendor, adapter->name, dvo->slave_addr);
+ DRM_DEBUG_KMS("ch7xxx not detected; got VID 0x%02x from %s target %d.\n",
+ vendor, adapter->name, dvo->target_addr);
goto out;
}
@@ -240,8 +240,8 @@ static bool ch7xxx_init(struct intel_dvo_device *dvo,
devid = ch7xxx_get_did(device);
if (!devid) {
- DRM_DEBUG_KMS("ch7xxx not detected; got DID 0x%02x from %s slave %d.\n",
- device, adapter->name, dvo->slave_addr);
+ DRM_DEBUG_KMS("ch7xxx not detected; got DID 0x%02x from %s target %d.\n",
+ device, adapter->name, dvo->target_addr);
goto out;
}
diff --git a/drivers/gpu/drm/i915/display/dvo_ivch.c b/drivers/gpu/drm/i915/display/dvo_ivch.c
index eef72bb3b767..0d5cce6051b1 100644
--- a/drivers/gpu/drm/i915/display/dvo_ivch.c
+++ b/drivers/gpu/drm/i915/display/dvo_ivch.c
@@ -198,7 +198,7 @@ static bool ivch_read(struct intel_dvo_device *dvo, int addr, u16 *data)
struct i2c_msg msgs[] = {
{
- .addr = dvo->slave_addr,
+ .addr = dvo->target_addr,
.flags = I2C_M_RD,
.len = 0,
},
@@ -209,7 +209,7 @@ static bool ivch_read(struct intel_dvo_device *dvo, int addr, u16 *data)
.buf = out_buf,
},
{
- .addr = dvo->slave_addr,
+ .addr = dvo->target_addr,
.flags = I2C_M_RD | I2C_M_NOSTART,
.len = 2,
.buf = in_buf,
@@ -226,7 +226,7 @@ static bool ivch_read(struct intel_dvo_device *dvo, int addr, u16 *data)
if (!priv->quiet) {
DRM_DEBUG_KMS("Unable to read register 0x%02x from "
"%s:%02x.\n",
- addr, adapter->name, dvo->slave_addr);
+ addr, adapter->name, dvo->target_addr);
}
return false;
}
@@ -238,7 +238,7 @@ static bool ivch_write(struct intel_dvo_device *dvo, int addr, u16 data)
struct i2c_adapter *adapter = dvo->i2c_bus;
u8 out_buf[3];
struct i2c_msg msg = {
- .addr = dvo->slave_addr,
+ .addr = dvo->target_addr,
.flags = 0,
.len = 3,
.buf = out_buf,
@@ -253,13 +253,13 @@ static bool ivch_write(struct intel_dvo_device *dvo, int addr, u16 data)
if (!priv->quiet) {
DRM_DEBUG_KMS("Unable to write register 0x%02x to %s:%d.\n",
- addr, adapter->name, dvo->slave_addr);
+ addr, adapter->name, dvo->target_addr);
}
return false;
}
-/* Probes the given bus and slave address for an ivch */
+/* Probes the given bus and target address for an ivch */
static bool ivch_init(struct intel_dvo_device *dvo,
struct i2c_adapter *adapter)
{
@@ -283,10 +283,10 @@ static bool ivch_init(struct intel_dvo_device *dvo,
* very unique, check that the value in the base address field matches
* the address it's responding on.
*/
- if ((temp & VR00_BASE_ADDRESS_MASK) != dvo->slave_addr) {
+ if ((temp & VR00_BASE_ADDRESS_MASK) != dvo->target_addr) {
DRM_DEBUG_KMS("ivch detect failed due to address mismatch "
"(%d vs %d)\n",
- (temp & VR00_BASE_ADDRESS_MASK), dvo->slave_addr);
+ (temp & VR00_BASE_ADDRESS_MASK), dvo->target_addr);
goto out;
}
diff --git a/drivers/gpu/drm/i915/display/dvo_ns2501.c b/drivers/gpu/drm/i915/display/dvo_ns2501.c
index 21486008dae9..9d47f8a93e94 100644
--- a/drivers/gpu/drm/i915/display/dvo_ns2501.c
+++ b/drivers/gpu/drm/i915/display/dvo_ns2501.c
@@ -398,13 +398,13 @@ static bool ns2501_readb(struct intel_dvo_device *dvo, int addr, u8 *ch)
struct i2c_msg msgs[] = {
{
- .addr = dvo->slave_addr,
+ .addr = dvo->target_addr,
.flags = 0,
.len = 1,
.buf = out_buf,
},
{
- .addr = dvo->slave_addr,
+ .addr = dvo->target_addr,
.flags = I2C_M_RD,
.len = 1,
.buf = in_buf,
@@ -422,7 +422,7 @@ static bool ns2501_readb(struct intel_dvo_device *dvo, int addr, u8 *ch)
if (!ns->quiet) {
DRM_DEBUG_KMS
("Unable to read register 0x%02x from %s:0x%02x.\n", addr,
- adapter->name, dvo->slave_addr);
+ adapter->name, dvo->target_addr);
}
return false;
@@ -441,7 +441,7 @@ static bool ns2501_writeb(struct intel_dvo_device *dvo, int addr, u8 ch)
u8 out_buf[2];
struct i2c_msg msg = {
- .addr = dvo->slave_addr,
+ .addr = dvo->target_addr,
.flags = 0,
.len = 2,
.buf = out_buf,
@@ -456,7 +456,7 @@ static bool ns2501_writeb(struct intel_dvo_device *dvo, int addr, u8 ch)
if (!ns->quiet) {
DRM_DEBUG_KMS("Unable to write register 0x%02x to %s:%d\n",
- addr, adapter->name, dvo->slave_addr);
+ addr, adapter->name, dvo->target_addr);
}
return false;
@@ -487,8 +487,8 @@ static bool ns2501_init(struct intel_dvo_device *dvo,
goto out;
if (ch != (NS2501_VID & 0xff)) {
- DRM_DEBUG_KMS("ns2501 not detected got %d: from %s Slave %d.\n",
- ch, adapter->name, dvo->slave_addr);
+ DRM_DEBUG_KMS("ns2501 not detected got %d: from %s Target %d.\n",
+ ch, adapter->name, dvo->target_addr);
goto out;
}
@@ -496,8 +496,8 @@ static bool ns2501_init(struct intel_dvo_device *dvo,
goto out;
if (ch != (NS2501_DID & 0xff)) {
- DRM_DEBUG_KMS("ns2501 not detected got %d: from %s Slave %d.\n",
- ch, adapter->name, dvo->slave_addr);
+ DRM_DEBUG_KMS("ns2501 not detected got %d: from %s Target %d.\n",
+ ch, adapter->name, dvo->target_addr);
goto out;
}
ns->quiet = false;
diff --git a/drivers/gpu/drm/i915/display/dvo_sil164.c b/drivers/gpu/drm/i915/display/dvo_sil164.c
index 6c461024c8e3..a8dd40c00997 100644
--- a/drivers/gpu/drm/i915/display/dvo_sil164.c
+++ b/drivers/gpu/drm/i915/display/dvo_sil164.c
@@ -79,13 +79,13 @@ static bool sil164_readb(struct intel_dvo_device *dvo, int addr, u8 *ch)
struct i2c_msg msgs[] = {
{
- .addr = dvo->slave_addr,
+ .addr = dvo->target_addr,
.flags = 0,
.len = 1,
.buf = out_buf,
},
{
- .addr = dvo->slave_addr,
+ .addr = dvo->target_addr,
.flags = I2C_M_RD,
.len = 1,
.buf = in_buf,
@@ -102,7 +102,7 @@ static bool sil164_readb(struct intel_dvo_device *dvo, int addr, u8 *ch)
if (!sil->quiet) {
DRM_DEBUG_KMS("Unable to read register 0x%02x from %s:%02x.\n",
- addr, adapter->name, dvo->slave_addr);
+ addr, adapter->name, dvo->target_addr);
}
return false;
}
@@ -113,7 +113,7 @@ static bool sil164_writeb(struct intel_dvo_device *dvo, int addr, u8 ch)
struct i2c_adapter *adapter = dvo->i2c_bus;
u8 out_buf[2];
struct i2c_msg msg = {
- .addr = dvo->slave_addr,
+ .addr = dvo->target_addr,
.flags = 0,
.len = 2,
.buf = out_buf,
@@ -127,7 +127,7 @@ static bool sil164_writeb(struct intel_dvo_device *dvo, int addr, u8 ch)
if (!sil->quiet) {
DRM_DEBUG_KMS("Unable to write register 0x%02x to %s:%d.\n",
- addr, adapter->name, dvo->slave_addr);
+ addr, adapter->name, dvo->target_addr);
}
return false;
@@ -153,8 +153,8 @@ static bool sil164_init(struct intel_dvo_device *dvo,
goto out;
if (ch != (SIL164_VID & 0xff)) {
- DRM_DEBUG_KMS("sil164 not detected got %d: from %s Slave %d.\n",
- ch, adapter->name, dvo->slave_addr);
+ DRM_DEBUG_KMS("sil164 not detected got %d: from %s Target %d.\n",
+ ch, adapter->name, dvo->target_addr);
goto out;
}
@@ -162,8 +162,8 @@ static bool sil164_init(struct intel_dvo_device *dvo,
goto out;
if (ch != (SIL164_DID & 0xff)) {
- DRM_DEBUG_KMS("sil164 not detected got %d: from %s Slave %d.\n",
- ch, adapter->name, dvo->slave_addr);
+ DRM_DEBUG_KMS("sil164 not detected got %d: from %s Target %d.\n",
+ ch, adapter->name, dvo->target_addr);
goto out;
}
sil->quiet = false;
diff --git a/drivers/gpu/drm/i915/display/dvo_tfp410.c b/drivers/gpu/drm/i915/display/dvo_tfp410.c
index 0939e097f4f9..d9a0cd753a87 100644
--- a/drivers/gpu/drm/i915/display/dvo_tfp410.c
+++ b/drivers/gpu/drm/i915/display/dvo_tfp410.c
@@ -100,13 +100,13 @@ static bool tfp410_readb(struct intel_dvo_device *dvo, int addr, u8 *ch)
struct i2c_msg msgs[] = {
{
- .addr = dvo->slave_addr,
+ .addr = dvo->target_addr,
.flags = 0,
.len = 1,
.buf = out_buf,
},
{
- .addr = dvo->slave_addr,
+ .addr = dvo->target_addr,
.flags = I2C_M_RD,
.len = 1,
.buf = in_buf,
@@ -123,7 +123,7 @@ static bool tfp410_readb(struct intel_dvo_device *dvo, int addr, u8 *ch)
if (!tfp->quiet) {
DRM_DEBUG_KMS("Unable to read register 0x%02x from %s:%02x.\n",
- addr, adapter->name, dvo->slave_addr);
+ addr, adapter->name, dvo->target_addr);
}
return false;
}
@@ -134,7 +134,7 @@ static bool tfp410_writeb(struct intel_dvo_device *dvo, int addr, u8 ch)
struct i2c_adapter *adapter = dvo->i2c_bus;
u8 out_buf[2];
struct i2c_msg msg = {
- .addr = dvo->slave_addr,
+ .addr = dvo->target_addr,
.flags = 0,
.len = 2,
.buf = out_buf,
@@ -148,7 +148,7 @@ static bool tfp410_writeb(struct intel_dvo_device *dvo, int addr, u8 ch)
if (!tfp->quiet) {
DRM_DEBUG_KMS("Unable to write register 0x%02x to %s:%d.\n",
- addr, adapter->name, dvo->slave_addr);
+ addr, adapter->name, dvo->target_addr);
}
return false;
@@ -183,15 +183,15 @@ static bool tfp410_init(struct intel_dvo_device *dvo,
if ((id = tfp410_getid(dvo, TFP410_VID_LO)) != TFP410_VID) {
DRM_DEBUG_KMS("tfp410 not detected got VID %X: from %s "
- "Slave %d.\n",
- id, adapter->name, dvo->slave_addr);
+ "Target %d.\n",
+ id, adapter->name, dvo->target_addr);
goto out;
}
if ((id = tfp410_getid(dvo, TFP410_DID_LO)) != TFP410_DID) {
DRM_DEBUG_KMS("tfp410 not detected got DID %X: from %s "
- "Slave %d.\n",
- id, adapter->name, dvo->slave_addr);
+ "Target %d.\n",
+ id, adapter->name, dvo->target_addr);
goto out;
}
tfp->quiet = false;
diff --git a/drivers/gpu/drm/i915/display/g4x_dp.c b/drivers/gpu/drm/i915/display/g4x_dp.c
index a8e746a0f670..526c8c4d7b53 100644
--- a/drivers/gpu/drm/i915/display/g4x_dp.c
+++ b/drivers/gpu/drm/i915/display/g4x_dp.c
@@ -89,6 +89,7 @@ void g4x_dp_set_clock(struct intel_encoder *encoder,
static void intel_dp_prepare(struct intel_encoder *encoder,
const struct intel_crtc_state *pipe_config)
{
+ struct intel_display *display = to_intel_display(encoder);
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
enum port port = encoder->port;
@@ -118,7 +119,7 @@ static void intel_dp_prepare(struct intel_encoder *encoder,
/* Preserve the BIOS-computed detected bit. This is
* supposed to be read-only.
*/
- intel_dp->DP = intel_de_read(dev_priv, intel_dp->output_reg) & DP_DETECTED;
+ intel_dp->DP = intel_de_read(display, intel_dp->output_reg) & DP_DETECTED;
/* Handle DP bits in common between all three register formats */
intel_dp->DP |= DP_VOLTAGE_0_4 | DP_PRE_EMPHASIS_0;
@@ -140,7 +141,7 @@ static void intel_dp_prepare(struct intel_encoder *encoder,
} else if (HAS_PCH_CPT(dev_priv) && port != PORT_A) {
intel_dp->DP |= DP_LINK_TRAIN_OFF_CPT;
- intel_de_rmw(dev_priv, TRANS_DP_CTL(crtc->pipe),
+ intel_de_rmw(display, TRANS_DP_CTL(crtc->pipe),
TRANS_DP_ENH_FRAMING,
pipe_config->enhanced_framing ?
TRANS_DP_ENH_FRAMING : 0);
@@ -166,9 +167,10 @@ static void intel_dp_prepare(struct intel_encoder *encoder,
static void assert_dp_port(struct intel_dp *intel_dp, bool state)
{
+ struct intel_display *display = to_intel_display(intel_dp);
struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
struct drm_i915_private *dev_priv = to_i915(dig_port->base.base.dev);
- bool cur_state = intel_de_read(dev_priv, intel_dp->output_reg) & DP_PORT_EN;
+ bool cur_state = intel_de_read(display, intel_dp->output_reg) & DP_PORT_EN;
I915_STATE_WARN(dev_priv, cur_state != state,
"[ENCODER:%d:%s] state assertion failure (expected %s, current %s)\n",
@@ -179,7 +181,8 @@ static void assert_dp_port(struct intel_dp *intel_dp, bool state)
static void assert_edp_pll(struct drm_i915_private *dev_priv, bool state)
{
- bool cur_state = intel_de_read(dev_priv, DP_A) & DP_PLL_ENABLE;
+ struct intel_display *display = &dev_priv->display;
+ bool cur_state = intel_de_read(display, DP_A) & DP_PLL_ENABLE;
I915_STATE_WARN(dev_priv, cur_state != state,
"eDP PLL state assertion failure (expected %s, current %s)\n",
@@ -191,6 +194,7 @@ static void assert_edp_pll(struct drm_i915_private *dev_priv, bool state)
static void ilk_edp_pll_on(struct intel_dp *intel_dp,
const struct intel_crtc_state *pipe_config)
{
+ struct intel_display *display = to_intel_display(intel_dp);
struct intel_crtc *crtc = to_intel_crtc(pipe_config->uapi.crtc);
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
@@ -198,7 +202,7 @@ static void ilk_edp_pll_on(struct intel_dp *intel_dp,
assert_dp_port_disabled(intel_dp);
assert_edp_pll_disabled(dev_priv);
- drm_dbg_kms(&dev_priv->drm, "enabling eDP PLL for clock %d\n",
+ drm_dbg_kms(display->drm, "enabling eDP PLL for clock %d\n",
pipe_config->port_clock);
intel_dp->DP &= ~DP_PLL_FREQ_MASK;
@@ -208,8 +212,8 @@ static void ilk_edp_pll_on(struct intel_dp *intel_dp,
else
intel_dp->DP |= DP_PLL_FREQ_270MHZ;
- intel_de_write(dev_priv, DP_A, intel_dp->DP);
- intel_de_posting_read(dev_priv, DP_A);
+ intel_de_write(display, DP_A, intel_dp->DP);
+ intel_de_posting_read(display, DP_A);
udelay(500);
/*
@@ -223,14 +227,15 @@ static void ilk_edp_pll_on(struct intel_dp *intel_dp,
intel_dp->DP |= DP_PLL_ENABLE;
- intel_de_write(dev_priv, DP_A, intel_dp->DP);
- intel_de_posting_read(dev_priv, DP_A);
+ intel_de_write(display, DP_A, intel_dp->DP);
+ intel_de_posting_read(display, DP_A);
udelay(200);
}
static void ilk_edp_pll_off(struct intel_dp *intel_dp,
const struct intel_crtc_state *old_crtc_state)
{
+ struct intel_display *display = to_intel_display(intel_dp);
struct intel_crtc *crtc = to_intel_crtc(old_crtc_state->uapi.crtc);
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
@@ -238,22 +243,23 @@ static void ilk_edp_pll_off(struct intel_dp *intel_dp,
assert_dp_port_disabled(intel_dp);
assert_edp_pll_enabled(dev_priv);
- drm_dbg_kms(&dev_priv->drm, "disabling eDP PLL\n");
+ drm_dbg_kms(display->drm, "disabling eDP PLL\n");
intel_dp->DP &= ~DP_PLL_ENABLE;
- intel_de_write(dev_priv, DP_A, intel_dp->DP);
- intel_de_posting_read(dev_priv, DP_A);
+ intel_de_write(display, DP_A, intel_dp->DP);
+ intel_de_posting_read(display, DP_A);
udelay(200);
}
static bool cpt_dp_port_selected(struct drm_i915_private *dev_priv,
enum port port, enum pipe *pipe)
{
+ struct intel_display *display = &dev_priv->display;
enum pipe p;
- for_each_pipe(dev_priv, p) {
- u32 val = intel_de_read(dev_priv, TRANS_DP_CTL(p));
+ for_each_pipe(display, p) {
+ u32 val = intel_de_read(display, TRANS_DP_CTL(p));
if ((val & TRANS_DP_PORT_SEL_MASK) == TRANS_DP_PORT_SEL(port)) {
*pipe = p;
@@ -261,7 +267,7 @@ static bool cpt_dp_port_selected(struct drm_i915_private *dev_priv,
}
}
- drm_dbg_kms(&dev_priv->drm, "No pipe for DP port %c found\n",
+ drm_dbg_kms(display->drm, "No pipe for DP port %c found\n",
port_name(port));
/* must initialize pipe to something for the asserts */
@@ -274,10 +280,11 @@ bool g4x_dp_port_enabled(struct drm_i915_private *dev_priv,
i915_reg_t dp_reg, enum port port,
enum pipe *pipe)
{
+ struct intel_display *display = &dev_priv->display;
bool ret;
u32 val;
- val = intel_de_read(dev_priv, dp_reg);
+ val = intel_de_read(display, dp_reg);
ret = val & DP_PORT_EN;
@@ -333,6 +340,7 @@ static void g4x_dp_get_m_n(struct intel_crtc_state *crtc_state)
static void intel_dp_get_config(struct intel_encoder *encoder,
struct intel_crtc_state *pipe_config)
{
+ struct intel_display *display = to_intel_display(encoder);
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
u32 tmp, flags = 0;
@@ -344,12 +352,12 @@ static void intel_dp_get_config(struct intel_encoder *encoder,
else
pipe_config->output_types |= BIT(INTEL_OUTPUT_DP);
- tmp = intel_de_read(dev_priv, intel_dp->output_reg);
+ tmp = intel_de_read(display, intel_dp->output_reg);
pipe_config->has_audio = tmp & DP_AUDIO_OUTPUT_ENABLE && port != PORT_A;
if (HAS_PCH_CPT(dev_priv) && port != PORT_A) {
- u32 trans_dp = intel_de_read(dev_priv,
+ u32 trans_dp = intel_de_read(display,
TRANS_DP_CTL(crtc->pipe));
if (trans_dp & TRANS_DP_ENH_FRAMING)
@@ -390,7 +398,7 @@ static void intel_dp_get_config(struct intel_encoder *encoder,
g4x_dp_get_m_n(pipe_config);
if (port == PORT_A) {
- if ((intel_de_read(dev_priv, DP_A) & DP_PLL_FREQ_MASK) == DP_PLL_FREQ_162MHZ)
+ if ((intel_de_read(display, DP_A) & DP_PLL_FREQ_MASK) == DP_PLL_FREQ_162MHZ)
pipe_config->port_clock = 162000;
else
pipe_config->port_clock = 270000;
@@ -410,17 +418,18 @@ static void
intel_dp_link_down(struct intel_encoder *encoder,
const struct intel_crtc_state *old_crtc_state)
{
+ struct intel_display *display = to_intel_display(encoder);
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
struct intel_crtc *crtc = to_intel_crtc(old_crtc_state->uapi.crtc);
enum port port = encoder->port;
- if (drm_WARN_ON(&dev_priv->drm,
- (intel_de_read(dev_priv, intel_dp->output_reg) &
+ if (drm_WARN_ON(display->drm,
+ (intel_de_read(display, intel_dp->output_reg) &
DP_PORT_EN) == 0))
return;
- drm_dbg_kms(&dev_priv->drm, "\n");
+ drm_dbg_kms(display->drm, "\n");
if ((IS_IVYBRIDGE(dev_priv) && port == PORT_A) ||
(HAS_PCH_CPT(dev_priv) && port != PORT_A)) {
@@ -430,12 +439,12 @@ intel_dp_link_down(struct intel_encoder *encoder,
intel_dp->DP &= ~DP_LINK_TRAIN_MASK;
intel_dp->DP |= DP_LINK_TRAIN_PAT_IDLE;
}
- intel_de_write(dev_priv, intel_dp->output_reg, intel_dp->DP);
- intel_de_posting_read(dev_priv, intel_dp->output_reg);
+ intel_de_write(display, intel_dp->output_reg, intel_dp->DP);
+ intel_de_posting_read(display, intel_dp->output_reg);
intel_dp->DP &= ~DP_PORT_EN;
- intel_de_write(dev_priv, intel_dp->output_reg, intel_dp->DP);
- intel_de_posting_read(dev_priv, intel_dp->output_reg);
+ intel_de_write(display, intel_dp->output_reg, intel_dp->DP);
+ intel_de_posting_read(display, intel_dp->output_reg);
/*
* HW workaround for IBX, we need to move the port
@@ -454,12 +463,12 @@ intel_dp_link_down(struct intel_encoder *encoder,
intel_dp->DP &= ~(DP_PIPE_SEL_MASK | DP_LINK_TRAIN_MASK);
intel_dp->DP |= DP_PORT_EN | DP_PIPE_SEL(PIPE_A) |
DP_LINK_TRAIN_PAT_1;
- intel_de_write(dev_priv, intel_dp->output_reg, intel_dp->DP);
- intel_de_posting_read(dev_priv, intel_dp->output_reg);
+ intel_de_write(display, intel_dp->output_reg, intel_dp->DP);
+ intel_de_posting_read(display, intel_dp->output_reg);
intel_dp->DP &= ~DP_PORT_EN;
- intel_de_write(dev_priv, intel_dp->output_reg, intel_dp->DP);
- intel_de_posting_read(dev_priv, intel_dp->output_reg);
+ intel_de_write(display, intel_dp->output_reg, intel_dp->DP);
+ intel_de_posting_read(display, intel_dp->output_reg);
intel_wait_for_vblank_if_active(dev_priv, PIPE_A);
intel_set_cpu_fifo_underrun_reporting(dev_priv, PIPE_A, true);
@@ -480,7 +489,7 @@ static void g4x_dp_audio_enable(struct intel_encoder *encoder,
const struct intel_crtc_state *crtc_state,
const struct drm_connector_state *conn_state)
{
- struct drm_i915_private *i915 = to_i915(encoder->base.dev);
+ struct intel_display *display = to_intel_display(encoder);
struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
if (!crtc_state->has_audio)
@@ -488,7 +497,7 @@ static void g4x_dp_audio_enable(struct intel_encoder *encoder,
/* Enable audio presence detect */
intel_dp->DP |= DP_AUDIO_OUTPUT_ENABLE;
- intel_de_write(i915, intel_dp->output_reg, intel_dp->DP);
+ intel_de_write(display, intel_dp->output_reg, intel_dp->DP);
intel_audio_codec_enable(encoder, crtc_state, conn_state);
}
@@ -497,7 +506,7 @@ static void g4x_dp_audio_disable(struct intel_encoder *encoder,
const struct intel_crtc_state *old_crtc_state,
const struct drm_connector_state *old_conn_state)
{
- struct drm_i915_private *i915 = to_i915(encoder->base.dev);
+ struct intel_display *display = to_intel_display(encoder);
struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
if (!old_crtc_state->has_audio)
@@ -507,7 +516,7 @@ static void g4x_dp_audio_disable(struct intel_encoder *encoder,
/* Disable audio presence detect */
intel_dp->DP &= ~DP_AUDIO_OUTPUT_ENABLE;
- intel_de_write(i915, intel_dp->output_reg, intel_dp->DP);
+ intel_de_write(display, intel_dp->output_reg, intel_dp->DP);
}
static void intel_disable_dp(struct intel_atomic_state *state,
@@ -596,7 +605,7 @@ cpt_set_link_train(struct intel_dp *intel_dp,
const struct intel_crtc_state *crtc_state,
u8 dp_train_pat)
{
- struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
+ struct intel_display *display = to_intel_display(intel_dp);
intel_dp->DP &= ~DP_LINK_TRAIN_MASK_CPT;
@@ -615,8 +624,8 @@ cpt_set_link_train(struct intel_dp *intel_dp,
return;
}
- intel_de_write(dev_priv, intel_dp->output_reg, intel_dp->DP);
- intel_de_posting_read(dev_priv, intel_dp->output_reg);
+ intel_de_write(display, intel_dp->output_reg, intel_dp->DP);
+ intel_de_posting_read(display, intel_dp->output_reg);
}
static void
@@ -624,7 +633,7 @@ g4x_set_link_train(struct intel_dp *intel_dp,
const struct intel_crtc_state *crtc_state,
u8 dp_train_pat)
{
- struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
+ struct intel_display *display = to_intel_display(intel_dp);
intel_dp->DP &= ~DP_LINK_TRAIN_MASK;
@@ -643,14 +652,14 @@ g4x_set_link_train(struct intel_dp *intel_dp,
return;
}
- intel_de_write(dev_priv, intel_dp->output_reg, intel_dp->DP);
- intel_de_posting_read(dev_priv, intel_dp->output_reg);
+ intel_de_write(display, intel_dp->output_reg, intel_dp->DP);
+ intel_de_posting_read(display, intel_dp->output_reg);
}
static void intel_dp_enable_port(struct intel_dp *intel_dp,
const struct intel_crtc_state *crtc_state)
{
- struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
+ struct intel_display *display = to_intel_display(intel_dp);
/* enable with pattern 1 (as per spec) */
@@ -665,8 +674,8 @@ static void intel_dp_enable_port(struct intel_dp *intel_dp,
*/
intel_dp->DP |= DP_PORT_EN;
- intel_de_write(dev_priv, intel_dp->output_reg, intel_dp->DP);
- intel_de_posting_read(dev_priv, intel_dp->output_reg);
+ intel_de_write(display, intel_dp->output_reg, intel_dp->DP);
+ intel_de_posting_read(display, intel_dp->output_reg);
}
static void intel_enable_dp(struct intel_atomic_state *state,
@@ -674,12 +683,13 @@ static void intel_enable_dp(struct intel_atomic_state *state,
const struct intel_crtc_state *pipe_config,
const struct drm_connector_state *conn_state)
{
+ struct intel_display *display = to_intel_display(state);
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
- u32 dp_reg = intel_de_read(dev_priv, intel_dp->output_reg);
+ u32 dp_reg = intel_de_read(display, intel_dp->output_reg);
intel_wakeref_t wakeref;
- if (drm_WARN_ON(&dev_priv->drm, dp_reg & DP_PORT_EN))
+ if (drm_WARN_ON(display->drm, dp_reg & DP_PORT_EN))
return;
with_intel_pps_lock(intel_dp, wakeref) {
@@ -1026,21 +1036,21 @@ static void
g4x_set_signal_levels(struct intel_encoder *encoder,
const struct intel_crtc_state *crtc_state)
{
- struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+ struct intel_display *display = to_intel_display(encoder);
struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
u8 train_set = intel_dp->train_set[0];
u32 signal_levels;
signal_levels = g4x_signal_levels(train_set);
- drm_dbg_kms(&dev_priv->drm, "Using signal levels %08x\n",
+ drm_dbg_kms(display->drm, "Using signal levels %08x\n",
signal_levels);
intel_dp->DP &= ~(DP_VOLTAGE_MASK | DP_PRE_EMPHASIS_MASK);
intel_dp->DP |= signal_levels;
- intel_de_write(dev_priv, intel_dp->output_reg, intel_dp->DP);
- intel_de_posting_read(dev_priv, intel_dp->output_reg);
+ intel_de_write(display, intel_dp->output_reg, intel_dp->DP);
+ intel_de_posting_read(display, intel_dp->output_reg);
}
/* SNB CPU eDP voltage swing and pre-emphasis control */
@@ -1074,21 +1084,21 @@ static void
snb_cpu_edp_set_signal_levels(struct intel_encoder *encoder,
const struct intel_crtc_state *crtc_state)
{
- struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+ struct intel_display *display = to_intel_display(encoder);
struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
u8 train_set = intel_dp->train_set[0];
u32 signal_levels;
signal_levels = snb_cpu_edp_signal_levels(train_set);
- drm_dbg_kms(&dev_priv->drm, "Using signal levels %08x\n",
+ drm_dbg_kms(display->drm, "Using signal levels %08x\n",
signal_levels);
intel_dp->DP &= ~EDP_LINK_TRAIN_VOL_EMP_MASK_SNB;
intel_dp->DP |= signal_levels;
- intel_de_write(dev_priv, intel_dp->output_reg, intel_dp->DP);
- intel_de_posting_read(dev_priv, intel_dp->output_reg);
+ intel_de_write(display, intel_dp->output_reg, intel_dp->DP);
+ intel_de_posting_read(display, intel_dp->output_reg);
}
/* IVB CPU eDP voltage swing and pre-emphasis control */
@@ -1126,21 +1136,21 @@ static void
ivb_cpu_edp_set_signal_levels(struct intel_encoder *encoder,
const struct intel_crtc_state *crtc_state)
{
- struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+ struct intel_display *display = to_intel_display(encoder);
struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
u8 train_set = intel_dp->train_set[0];
u32 signal_levels;
signal_levels = ivb_cpu_edp_signal_levels(train_set);
- drm_dbg_kms(&dev_priv->drm, "Using signal levels %08x\n",
+ drm_dbg_kms(display->drm, "Using signal levels %08x\n",
signal_levels);
intel_dp->DP &= ~EDP_LINK_TRAIN_VOL_EMP_MASK_IVB;
intel_dp->DP |= signal_levels;
- intel_de_write(dev_priv, intel_dp->output_reg, intel_dp->DP);
- intel_de_posting_read(dev_priv, intel_dp->output_reg);
+ intel_de_write(display, intel_dp->output_reg, intel_dp->DP);
+ intel_de_posting_read(display, intel_dp->output_reg);
}
/*
@@ -1185,15 +1195,15 @@ intel_dp_hotplug(struct intel_encoder *encoder,
static bool ibx_digital_port_connected(struct intel_encoder *encoder)
{
- struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
- u32 bit = dev_priv->display.hotplug.pch_hpd[encoder->hpd_pin];
+ struct intel_display *display = to_intel_display(encoder);
+ u32 bit = display->hotplug.pch_hpd[encoder->hpd_pin];
- return intel_de_read(dev_priv, SDEISR) & bit;
+ return intel_de_read(display, SDEISR) & bit;
}
static bool g4x_digital_port_connected(struct intel_encoder *encoder)
{
- struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+ struct intel_display *display = to_intel_display(encoder);
u32 bit;
switch (encoder->hpd_pin) {
@@ -1211,15 +1221,15 @@ static bool g4x_digital_port_connected(struct intel_encoder *encoder)
return false;
}
- return intel_de_read(dev_priv, PORT_HOTPLUG_STAT(dev_priv)) & bit;
+ return intel_de_read(display, PORT_HOTPLUG_STAT(display)) & bit;
}
static bool ilk_digital_port_connected(struct intel_encoder *encoder)
{
- struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
- u32 bit = dev_priv->display.hotplug.hpd[encoder->hpd_pin];
+ struct intel_display *display = to_intel_display(encoder);
+ u32 bit = display->hotplug.hpd[encoder->hpd_pin];
- return intel_de_read(dev_priv, DEISR) & bit;
+ return intel_de_read(display, DEISR) & bit;
}
static void g4x_dp_suspend_complete(struct intel_encoder *encoder)
@@ -1241,7 +1251,8 @@ static void intel_dp_encoder_destroy(struct drm_encoder *encoder)
enum pipe vlv_active_pipe(struct intel_dp *intel_dp)
{
- struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
+ struct intel_display *display = to_intel_display(intel_dp);
+ struct drm_i915_private *dev_priv = to_i915(display->drm);
struct intel_encoder *encoder = &dp_to_dig_port(intel_dp)->base;
enum pipe pipe;
@@ -1254,10 +1265,11 @@ enum pipe vlv_active_pipe(struct intel_dp *intel_dp)
static void intel_dp_encoder_reset(struct drm_encoder *encoder)
{
+ struct intel_display *display = to_intel_display(encoder->dev);
struct drm_i915_private *dev_priv = to_i915(encoder->dev);
struct intel_dp *intel_dp = enc_to_intel_dp(to_intel_encoder(encoder));
- intel_dp->DP = intel_de_read(dev_priv, intel_dp->output_reg);
+ intel_dp->DP = intel_de_read(display, intel_dp->output_reg);
intel_dp->reset_link_params = true;
@@ -1279,6 +1291,7 @@ static const struct drm_encoder_funcs intel_dp_enc_funcs = {
bool g4x_dp_init(struct drm_i915_private *dev_priv,
i915_reg_t output_reg, enum port port)
{
+ struct intel_display *display = &dev_priv->display;
const struct intel_bios_encoder_data *devdata;
struct intel_digital_port *dig_port;
struct intel_encoder *intel_encoder;
@@ -1288,11 +1301,11 @@ bool g4x_dp_init(struct drm_i915_private *dev_priv,
if (!assert_port_valid(dev_priv, port))
return false;
- devdata = intel_bios_encoder_data_lookup(dev_priv, port);
+ devdata = intel_bios_encoder_data_lookup(display, port);
/* FIXME bail? */
if (!devdata)
- drm_dbg_kms(&dev_priv->drm, "No VBT child device for DP-%c\n",
+ drm_dbg_kms(display->drm, "No VBT child device for DP-%c\n",
port_name(port));
dig_port = kzalloc(sizeof(*dig_port), GFP_KERNEL);
@@ -1312,7 +1325,7 @@ bool g4x_dp_init(struct drm_i915_private *dev_priv,
mutex_init(&dig_port->hdcp_mutex);
- if (drm_encoder_init(&dev_priv->drm, &intel_encoder->base,
+ if (drm_encoder_init(display->drm, &intel_encoder->base,
&intel_dp_enc_funcs, DRM_MODE_ENCODER_TMDS,
"DP %c", port_name(port)))
goto err_encoder_init;
@@ -1396,7 +1409,7 @@ bool g4x_dp_init(struct drm_i915_private *dev_priv,
dig_port->hpd_pulse = intel_dp_hpd_pulse;
- if (HAS_GMCH(dev_priv)) {
+ if (HAS_GMCH(display)) {
dig_port->connected = g4x_digital_port_connected;
} else {
if (port == PORT_A)
diff --git a/drivers/gpu/drm/i915/display/g4x_hdmi.c b/drivers/gpu/drm/i915/display/g4x_hdmi.c
index 8096492b3fad..46f23bdb4c17 100644
--- a/drivers/gpu/drm/i915/display/g4x_hdmi.c
+++ b/drivers/gpu/drm/i915/display/g4x_hdmi.c
@@ -686,6 +686,7 @@ static bool assert_hdmi_port_valid(struct drm_i915_private *i915, enum port port
void g4x_hdmi_init(struct drm_i915_private *dev_priv,
i915_reg_t hdmi_reg, enum port port)
{
+ struct intel_display *display = &dev_priv->display;
const struct intel_bios_encoder_data *devdata;
struct intel_digital_port *dig_port;
struct intel_encoder *intel_encoder;
@@ -697,7 +698,7 @@ void g4x_hdmi_init(struct drm_i915_private *dev_priv,
if (!assert_hdmi_port_valid(dev_priv, port))
return;
- devdata = intel_bios_encoder_data_lookup(dev_priv, port);
+ devdata = intel_bios_encoder_data_lookup(display, port);
/* FIXME bail? */
if (!devdata)
diff --git a/drivers/gpu/drm/i915/display/i9xx_wm.c b/drivers/gpu/drm/i915/display/i9xx_wm.c
index 2b7c3d270b17..15cda57fbc91 100644
--- a/drivers/gpu/drm/i915/display/i9xx_wm.c
+++ b/drivers/gpu/drm/i915/display/i9xx_wm.c
@@ -4028,7 +4028,7 @@ void i9xx_wm_init(struct drm_i915_private *dev_priv)
dev_priv->display.funcs.wm = &g4x_wm_funcs;
} else if (IS_PINEVIEW(dev_priv)) {
if (!pnv_get_cxsr_latency(dev_priv)) {
- drm_info(&dev_priv->drm, "Unknown FSB/MEM, disabling CxSR\n");
+ drm_info(&dev_priv->drm, "Unknown FSB/MEM, disabling CxSR\n");
/* Disable CxSR and never update its watermark again */
intel_set_memory_cxsr(dev_priv, false);
dev_priv->display.funcs.wm = &nop_funcs;
diff --git a/drivers/gpu/drm/i915/display/icl_dsi.c b/drivers/gpu/drm/i915/display/icl_dsi.c
index ae8f6617aa70..293efc1f841d 100644
--- a/drivers/gpu/drm/i915/display/icl_dsi.c
+++ b/drivers/gpu/drm/i915/display/icl_dsi.c
@@ -27,6 +27,7 @@
#include <drm/display/drm_dsc_helper.h>
#include <drm/drm_atomic_helper.h>
+#include <drm/drm_fixed.h>
#include <drm/drm_mipi_dsi.h>
#include "i915_reg.h"
@@ -330,7 +331,7 @@ static int afe_clk(struct intel_encoder *encoder,
int bpp;
if (crtc_state->dsc.compression_enable)
- bpp = to_bpp_int(crtc_state->dsc.compressed_bpp_x16);
+ bpp = fxp_q4_to_int(crtc_state->dsc.compressed_bpp_x16);
else
bpp = mipi_dsi_pixel_format_to_bpp(intel_dsi->pixel_format);
@@ -863,7 +864,7 @@ gen11_dsi_set_transcoder_timings(struct intel_encoder *encoder,
* compressed and non-compressed bpp.
*/
if (crtc_state->dsc.compression_enable) {
- mul = to_bpp_int(crtc_state->dsc.compressed_bpp_x16);
+ mul = fxp_q4_to_int(crtc_state->dsc.compressed_bpp_x16);
div = mipi_dsi_pixel_format_to_bpp(intel_dsi->pixel_format);
}
@@ -887,7 +888,7 @@ gen11_dsi_set_transcoder_timings(struct intel_encoder *encoder,
int bpp, line_time_us, byte_clk_period_ns;
if (crtc_state->dsc.compression_enable)
- bpp = to_bpp_int(crtc_state->dsc.compressed_bpp_x16);
+ bpp = fxp_q4_to_int(crtc_state->dsc.compressed_bpp_x16);
else
bpp = mipi_dsi_pixel_format_to_bpp(intel_dsi->pixel_format);
@@ -1470,7 +1471,7 @@ static void gen11_dsi_get_timings(struct intel_encoder *encoder,
&pipe_config->hw.adjusted_mode;
if (pipe_config->dsc.compressed_bpp_x16) {
- int div = to_bpp_int(pipe_config->dsc.compressed_bpp_x16);
+ int div = fxp_q4_to_int(pipe_config->dsc.compressed_bpp_x16);
int mul = mipi_dsi_pixel_format_to_bpp(intel_dsi->pixel_format);
adjusted_mode->crtc_htotal =
@@ -1944,6 +1945,7 @@ static void icl_dsi_add_properties(struct intel_connector *connector)
void icl_dsi_init(struct drm_i915_private *dev_priv,
const struct intel_bios_encoder_data *devdata)
{
+ struct intel_display *display = &dev_priv->display;
struct intel_dsi *intel_dsi;
struct intel_encoder *encoder;
struct intel_connector *intel_connector;
@@ -2007,7 +2009,7 @@ void icl_dsi_init(struct drm_i915_private *dev_priv,
intel_dsi->panel_power_off_time = ktime_get_boottime();
- intel_bios_init_panel_late(dev_priv, &intel_connector->panel, encoder->devdata, NULL);
+ intel_bios_init_panel_late(display, &intel_connector->panel, encoder->devdata, NULL);
mutex_lock(&dev_priv->drm.mode_config.mutex);
intel_panel_add_vbt_lfp_fixed_mode(intel_connector);
diff --git a/drivers/gpu/drm/i915/display/intel_acpi.c b/drivers/gpu/drm/i915/display/intel_acpi.c
index 0aa3999374e2..c3b29a331d72 100644
--- a/drivers/gpu/drm/i915/display/intel_acpi.c
+++ b/drivers/gpu/drm/i915/display/intel_acpi.c
@@ -183,9 +183,9 @@ void intel_unregister_dsm_handler(void)
{
}
-void intel_dsm_get_bios_data_funcs_supported(struct drm_i915_private *i915)
+void intel_dsm_get_bios_data_funcs_supported(struct intel_display *display)
{
- struct pci_dev *pdev = to_pci_dev(i915->drm.dev);
+ struct pci_dev *pdev = to_pci_dev(display->drm->dev);
acpi_handle dhandle;
union acpi_object *obj;
@@ -263,15 +263,14 @@ static u32 acpi_display_type(struct intel_connector *connector)
return display_type;
}
-void intel_acpi_device_id_update(struct drm_i915_private *dev_priv)
+void intel_acpi_device_id_update(struct intel_display *display)
{
- struct drm_device *drm_dev = &dev_priv->drm;
struct intel_connector *connector;
struct drm_connector_list_iter conn_iter;
u8 display_index[16] = {};
/* Populate the ACPI IDs for all connectors for a given drm_device */
- drm_connector_list_iter_begin(drm_dev, &conn_iter);
+ drm_connector_list_iter_begin(display->drm, &conn_iter);
for_each_intel_connector_iter(connector, &conn_iter) {
u32 device_id, type;
@@ -288,10 +287,10 @@ void intel_acpi_device_id_update(struct drm_i915_private *dev_priv)
}
/* NOTE: The connector order must be final before this is called. */
-void intel_acpi_assign_connector_fwnodes(struct drm_i915_private *i915)
+void intel_acpi_assign_connector_fwnodes(struct intel_display *display)
{
+ struct drm_device *drm_dev = display->drm;
struct drm_connector_list_iter conn_iter;
- struct drm_device *drm_dev = &i915->drm;
struct fwnode_handle *fwnode = NULL;
struct drm_connector *connector;
struct acpi_device *adev;
@@ -333,7 +332,7 @@ void intel_acpi_assign_connector_fwnodes(struct drm_i915_private *i915)
fwnode_handle_put(fwnode);
}
-void intel_acpi_video_register(struct drm_i915_private *i915)
+void intel_acpi_video_register(struct intel_display *display)
{
struct drm_connector_list_iter conn_iter;
struct drm_connector *connector;
@@ -347,7 +346,7 @@ void intel_acpi_video_register(struct drm_i915_private *i915)
* a native backlight later and acpi_video_register_backlight() should
* only be called after any native backlights have been registered.
*/
- drm_connector_list_iter_begin(&i915->drm, &conn_iter);
+ drm_connector_list_iter_begin(display->drm, &conn_iter);
drm_for_each_connector_iter(connector, &conn_iter) {
struct intel_panel *panel = &to_intel_connector(connector)->panel;
diff --git a/drivers/gpu/drm/i915/display/intel_acpi.h b/drivers/gpu/drm/i915/display/intel_acpi.h
index 6a0007452f95..788a63071661 100644
--- a/drivers/gpu/drm/i915/display/intel_acpi.h
+++ b/drivers/gpu/drm/i915/display/intel_acpi.h
@@ -6,26 +6,26 @@
#ifndef __INTEL_ACPI_H__
#define __INTEL_ACPI_H__
-struct drm_i915_private;
+struct intel_display;
#ifdef CONFIG_ACPI
void intel_register_dsm_handler(void);
void intel_unregister_dsm_handler(void);
-void intel_dsm_get_bios_data_funcs_supported(struct drm_i915_private *i915);
-void intel_acpi_device_id_update(struct drm_i915_private *i915);
-void intel_acpi_assign_connector_fwnodes(struct drm_i915_private *i915);
-void intel_acpi_video_register(struct drm_i915_private *i915);
+void intel_dsm_get_bios_data_funcs_supported(struct intel_display *display);
+void intel_acpi_device_id_update(struct intel_display *display);
+void intel_acpi_assign_connector_fwnodes(struct intel_display *display);
+void intel_acpi_video_register(struct intel_display *display);
#else
static inline void intel_register_dsm_handler(void) { return; }
static inline void intel_unregister_dsm_handler(void) { return; }
static inline
-void intel_dsm_get_bios_data_funcs_supported(struct drm_i915_private *i915) { return; }
+void intel_dsm_get_bios_data_funcs_supported(struct intel_display *display) { return; }
static inline
-void intel_acpi_device_id_update(struct drm_i915_private *i915) { return; }
+void intel_acpi_device_id_update(struct intel_display *display) { return; }
static inline
-void intel_acpi_assign_connector_fwnodes(struct drm_i915_private *i915) { return; }
+void intel_acpi_assign_connector_fwnodes(struct intel_display *display) { return; }
static inline
-void intel_acpi_video_register(struct drm_i915_private *i915) { return; }
+void intel_acpi_video_register(struct intel_display *display) { return; }
#endif /* CONFIG_ACPI */
#endif /* __INTEL_ACPI_H__ */
diff --git a/drivers/gpu/drm/i915/display/intel_alpm.c b/drivers/gpu/drm/i915/display/intel_alpm.c
index 10689480338e..186cf4833f71 100644
--- a/drivers/gpu/drm/i915/display/intel_alpm.c
+++ b/drivers/gpu/drm/i915/display/intel_alpm.c
@@ -139,7 +139,7 @@ static int
_lnl_compute_aux_less_alpm_params(struct intel_dp *intel_dp,
const struct intel_crtc_state *crtc_state)
{
- struct drm_i915_private *i915 = dp_to_i915(intel_dp);
+ struct intel_display *display = to_intel_display(intel_dp);
int aux_less_wake_time, aux_less_wake_lines, silence_period,
lfps_half_cycle;
@@ -158,7 +158,7 @@ _lnl_compute_aux_less_alpm_params(struct intel_dp *intel_dp,
lfps_half_cycle > PORT_ALPM_LFPS_CTL_LAST_LFPS_HALF_CYCLE_DURATION_MASK)
return false;
- if (i915->display.params.psr_safest_params)
+ if (display->params.psr_safest_params)
aux_less_wake_lines = ALPM_CTL_AUX_LESS_WAKE_TIME_MASK;
intel_dp->alpm_parameters.aux_less_wake_lines = aux_less_wake_lines;
@@ -171,10 +171,10 @@ _lnl_compute_aux_less_alpm_params(struct intel_dp *intel_dp,
static bool _lnl_compute_alpm_params(struct intel_dp *intel_dp,
const struct intel_crtc_state *crtc_state)
{
- struct drm_i915_private *i915 = dp_to_i915(intel_dp);
+ struct intel_display *display = to_intel_display(intel_dp);
int check_entry_lines;
- if (DISPLAY_VER(i915) < 20)
+ if (DISPLAY_VER(display) < 20)
return true;
/* ALPM Entry Check = 2 + CEILING( 5us /tline ) */
@@ -187,7 +187,7 @@ static bool _lnl_compute_alpm_params(struct intel_dp *intel_dp,
if (!_lnl_compute_aux_less_alpm_params(intel_dp, crtc_state))
return false;
- if (i915->display.params.psr_safest_params)
+ if (display->params.psr_safest_params)
check_entry_lines = 15;
intel_dp->alpm_parameters.check_entry_lines = check_entry_lines;
@@ -212,9 +212,9 @@ static int tgl_io_buffer_wake_time(void)
static int io_buffer_wake_time(const struct intel_crtc_state *crtc_state)
{
- struct drm_i915_private *i915 = to_i915(crtc_state->uapi.crtc->dev);
+ struct intel_display *display = to_intel_display(crtc_state);
- if (DISPLAY_VER(i915) >= 12)
+ if (DISPLAY_VER(display) >= 12)
return tgl_io_buffer_wake_time();
else
return skl_io_buffer_wake_time();
@@ -223,7 +223,7 @@ static int io_buffer_wake_time(const struct intel_crtc_state *crtc_state)
bool intel_alpm_compute_params(struct intel_dp *intel_dp,
const struct intel_crtc_state *crtc_state)
{
- struct drm_i915_private *i915 = dp_to_i915(intel_dp);
+ struct intel_display *display = to_intel_display(intel_dp);
int io_wake_lines, io_wake_time, fast_wake_lines, fast_wake_time;
int tfw_exit_latency = 20; /* eDP spec */
int phy_wake = 4; /* eDP spec */
@@ -236,9 +236,9 @@ bool intel_alpm_compute_params(struct intel_dp *intel_dp,
fast_wake_time = precharge + preamble + phy_wake +
tfw_exit_latency;
- if (DISPLAY_VER(i915) >= 20)
+ if (DISPLAY_VER(display) >= 20)
max_wake_lines = 68;
- else if (DISPLAY_VER(i915) >= 12)
+ else if (DISPLAY_VER(display) >= 12)
max_wake_lines = 12;
else
max_wake_lines = 8;
@@ -255,7 +255,7 @@ bool intel_alpm_compute_params(struct intel_dp *intel_dp,
if (!_lnl_compute_alpm_params(intel_dp, crtc_state))
return false;
- if (i915->display.params.psr_safest_params)
+ if (display->params.psr_safest_params)
io_wake_lines = fast_wake_lines = max_wake_lines;
/* According to Bspec lower limit should be set as 7 lines. */
@@ -269,7 +269,7 @@ void intel_alpm_lobf_compute_config(struct intel_dp *intel_dp,
struct intel_crtc_state *crtc_state,
struct drm_connector_state *conn_state)
{
- struct drm_i915_private *i915 = dp_to_i915(intel_dp);
+ struct intel_display *display = to_intel_display(intel_dp);
struct drm_display_mode *adjusted_mode = &crtc_state->hw.adjusted_mode;
int waketime_in_lines, first_sdp_position;
int context_latency, guardband;
@@ -277,10 +277,10 @@ void intel_alpm_lobf_compute_config(struct intel_dp *intel_dp,
if (!intel_dp_is_edp(intel_dp))
return;
- if (DISPLAY_VER(i915) < 20)
+ if (DISPLAY_VER(display) < 20)
return;
- if (!intel_dp_as_sdp_supported(intel_dp))
+ if (!intel_dp->as_sdp_supported)
return;
if (crtc_state->has_psr)
@@ -309,13 +309,13 @@ void intel_alpm_lobf_compute_config(struct intel_dp *intel_dp,
static void lnl_alpm_configure(struct intel_dp *intel_dp,
const struct intel_crtc_state *crtc_state)
{
- struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
+ struct intel_display *display = to_intel_display(intel_dp);
enum transcoder cpu_transcoder = crtc_state->cpu_transcoder;
enum port port = dp_to_dig_port(intel_dp)->base.port;
u32 alpm_ctl;
- if (DISPLAY_VER(dev_priv) < 20 || (!intel_dp->psr.sel_update_enabled &&
- !intel_dp_is_edp(intel_dp)))
+ if (DISPLAY_VER(display) < 20 ||
+ (!intel_dp->psr.sel_update_enabled && !intel_dp_is_edp(intel_dp)))
return;
/*
@@ -329,16 +329,16 @@ static void lnl_alpm_configure(struct intel_dp *intel_dp,
ALPM_CTL_AUX_LESS_SLEEP_HOLD_TIME_50_SYMBOLS |
ALPM_CTL_AUX_LESS_WAKE_TIME(intel_dp->alpm_parameters.aux_less_wake_lines);
- intel_de_write(dev_priv,
- PORT_ALPM_CTL(dev_priv, port),
+ intel_de_write(display,
+ PORT_ALPM_CTL(display, port),
PORT_ALPM_CTL_ALPM_AUX_LESS_ENABLE |
PORT_ALPM_CTL_MAX_PHY_SWING_SETUP(15) |
PORT_ALPM_CTL_MAX_PHY_SWING_HOLD(0) |
PORT_ALPM_CTL_SILENCE_PERIOD(
intel_dp->alpm_parameters.silence_period_sym_clocks));
- intel_de_write(dev_priv,
- PORT_ALPM_LFPS_CTL(dev_priv, port),
+ intel_de_write(display,
+ PORT_ALPM_LFPS_CTL(display, port),
PORT_ALPM_LFPS_CTL_LFPS_CYCLE_COUNT(10) |
PORT_ALPM_LFPS_CTL_LFPS_HALF_CYCLE_DURATION(
intel_dp->alpm_parameters.lfps_half_cycle_num_of_syms) |
@@ -356,7 +356,7 @@ static void lnl_alpm_configure(struct intel_dp *intel_dp,
alpm_ctl |= ALPM_CTL_ALPM_ENTRY_CHECK(intel_dp->alpm_parameters.check_entry_lines);
- intel_de_write(dev_priv, ALPM_CTL(dev_priv, cpu_transcoder), alpm_ctl);
+ intel_de_write(display, ALPM_CTL(display, cpu_transcoder), alpm_ctl);
}
void intel_alpm_configure(struct intel_dp *intel_dp,
@@ -368,14 +368,14 @@ void intel_alpm_configure(struct intel_dp *intel_dp,
static int i915_edp_lobf_info_show(struct seq_file *m, void *data)
{
struct intel_connector *connector = m->private;
- struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
+ struct intel_display *display = to_intel_display(connector);
struct drm_crtc *crtc;
struct intel_crtc_state *crtc_state;
enum transcoder cpu_transcoder;
u32 alpm_ctl;
int ret;
- ret = drm_modeset_lock_single_interruptible(&dev_priv->drm.mode_config.connection_mutex);
+ ret = drm_modeset_lock_single_interruptible(&display->drm->mode_config.connection_mutex);
if (ret)
return ret;
@@ -387,14 +387,14 @@ static int i915_edp_lobf_info_show(struct seq_file *m, void *data)
crtc_state = to_intel_crtc_state(crtc->state);
cpu_transcoder = crtc_state->cpu_transcoder;
- alpm_ctl = intel_de_read(dev_priv, ALPM_CTL(dev_priv, cpu_transcoder));
+ alpm_ctl = intel_de_read(display, ALPM_CTL(display, cpu_transcoder));
seq_printf(m, "LOBF status: %s\n", str_enabled_disabled(alpm_ctl & ALPM_CTL_LOBF_ENABLE));
seq_printf(m, "Aux-wake alpm status: %s\n",
str_enabled_disabled(!(alpm_ctl & ALPM_CTL_ALPM_AUX_LESS_ENABLE)));
seq_printf(m, "Aux-less alpm status: %s\n",
str_enabled_disabled(alpm_ctl & ALPM_CTL_ALPM_AUX_LESS_ENABLE));
out:
- drm_modeset_unlock(&dev_priv->drm.mode_config.connection_mutex);
+ drm_modeset_unlock(&display->drm->mode_config.connection_mutex);
return ret;
}
@@ -403,10 +403,10 @@ DEFINE_SHOW_ATTRIBUTE(i915_edp_lobf_info);
void intel_alpm_lobf_debugfs_add(struct intel_connector *connector)
{
- struct drm_i915_private *i915 = to_i915(connector->base.dev);
+ struct intel_display *display = to_intel_display(connector);
struct dentry *root = connector->base.debugfs_entry;
- if (DISPLAY_VER(i915) < 20 ||
+ if (DISPLAY_VER(display) < 20 ||
connector->base.connector_type != DRM_MODE_CONNECTOR_eDP)
return;
diff --git a/drivers/gpu/drm/i915/display/intel_atomic.c b/drivers/gpu/drm/i915/display/intel_atomic.c
index 76aa10b6f647..12d6ed940751 100644
--- a/drivers/gpu/drm/i915/display/intel_atomic.c
+++ b/drivers/gpu/drm/i915/display/intel_atomic.c
@@ -276,7 +276,8 @@ intel_crtc_duplicate_state(struct drm_crtc *crtc)
crtc_state->do_async_flip = false;
crtc_state->fb_bits = 0;
crtc_state->update_planes = 0;
- crtc_state->dsb = NULL;
+ crtc_state->dsb_color_vblank = NULL;
+ crtc_state->dsb_color_commit = NULL;
return &crtc_state->uapi;
}
@@ -310,7 +311,8 @@ intel_crtc_destroy_state(struct drm_crtc *crtc,
{
struct intel_crtc_state *crtc_state = to_intel_crtc_state(state);
- drm_WARN_ON(crtc->dev, crtc_state->dsb);
+ drm_WARN_ON(crtc->dev, crtc_state->dsb_color_vblank);
+ drm_WARN_ON(crtc->dev, crtc_state->dsb_color_commit);
__drm_atomic_helper_crtc_destroy_state(&crtc_state->uapi);
intel_crtc_free_hw_state(crtc_state);
diff --git a/drivers/gpu/drm/i915/display/intel_audio.c b/drivers/gpu/drm/i915/display/intel_audio.c
index b9bafec06fb8..f5e7eefab2f1 100644
--- a/drivers/gpu/drm/i915/display/intel_audio.c
+++ b/drivers/gpu/drm/i915/display/intel_audio.c
@@ -26,6 +26,7 @@
#include <drm/drm_edid.h>
#include <drm/drm_eld.h>
+#include <drm/drm_fixed.h>
#include <drm/intel/i915_component.h>
#include "i915_drv.h"
@@ -452,8 +453,8 @@ static unsigned int calc_hblank_early_prog(struct intel_encoder *encoder,
lanes = crtc_state->lane_count;
drm_dbg_kms(&i915->drm,
- "h_active = %u link_clk = %u : lanes = %u vdsc_bpp = " BPP_X16_FMT " cdclk = %u\n",
- h_active, link_clk, lanes, BPP_X16_ARGS(vdsc_bppx16), cdclk);
+ "h_active = %u link_clk = %u : lanes = %u vdsc_bpp = " FXP_Q4_FMT " cdclk = %u\n",
+ h_active, link_clk, lanes, FXP_Q4_ARGS(vdsc_bppx16), cdclk);
if (WARN_ON(!link_clk || !pixel_clk || !lanes || !vdsc_bppx16 || !cdclk))
return 0;
@@ -979,7 +980,8 @@ retry:
static unsigned long i915_audio_component_get_power(struct device *kdev)
{
- struct drm_i915_private *i915 = kdev_to_i915(kdev);
+ struct intel_display *display = to_intel_display(kdev);
+ struct drm_i915_private *i915 = to_i915(display->drm);
intel_wakeref_t ret;
/* Catch potential impedance mismatches before they occur! */
@@ -1011,7 +1013,8 @@ static unsigned long i915_audio_component_get_power(struct device *kdev)
static void i915_audio_component_put_power(struct device *kdev,
unsigned long cookie)
{
- struct drm_i915_private *i915 = kdev_to_i915(kdev);
+ struct intel_display *display = to_intel_display(kdev);
+ struct drm_i915_private *i915 = to_i915(display->drm);
/* Stop forcing CDCLK to 2*BCLK if no need for audio to be powered. */
if (--i915->display.audio.power_refcount == 0)
@@ -1024,7 +1027,8 @@ static void i915_audio_component_put_power(struct device *kdev,
static void i915_audio_component_codec_wake_override(struct device *kdev,
bool enable)
{
- struct drm_i915_private *i915 = kdev_to_i915(kdev);
+ struct intel_display *display = to_intel_display(kdev);
+ struct drm_i915_private *i915 = to_i915(display->drm);
unsigned long cookie;
if (DISPLAY_VER(i915) < 9)
@@ -1052,7 +1056,8 @@ static void i915_audio_component_codec_wake_override(struct device *kdev,
/* Get CDCLK in kHz */
static int i915_audio_component_get_cdclk_freq(struct device *kdev)
{
- struct drm_i915_private *i915 = kdev_to_i915(kdev);
+ struct intel_display *display = to_intel_display(kdev);
+ struct drm_i915_private *i915 = to_i915(display->drm);
if (drm_WARN_ON_ONCE(&i915->drm, !HAS_DDI(i915)))
return -ENODEV;
@@ -1111,7 +1116,8 @@ static struct intel_audio_state *find_audio_state(struct drm_i915_private *i915,
static int i915_audio_component_sync_audio_rate(struct device *kdev, int port,
int cpu_transcoder, int rate)
{
- struct drm_i915_private *i915 = kdev_to_i915(kdev);
+ struct intel_display *display = to_intel_display(kdev);
+ struct drm_i915_private *i915 = to_i915(display->drm);
struct i915_audio_component *acomp = i915->display.audio.component;
const struct intel_audio_state *audio_state;
struct intel_encoder *encoder;
@@ -1153,7 +1159,8 @@ static int i915_audio_component_get_eld(struct device *kdev, int port,
int cpu_transcoder, bool *enabled,
unsigned char *buf, int max_bytes)
{
- struct drm_i915_private *i915 = kdev_to_i915(kdev);
+ struct intel_display *display = to_intel_display(kdev);
+ struct drm_i915_private *i915 = to_i915(display->drm);
const struct intel_audio_state *audio_state;
int ret = 0;
@@ -1188,24 +1195,25 @@ static const struct drm_audio_component_ops i915_audio_component_ops = {
.get_eld = i915_audio_component_get_eld,
};
-static int i915_audio_component_bind(struct device *i915_kdev,
+static int i915_audio_component_bind(struct device *drv_kdev,
struct device *hda_kdev, void *data)
{
+ struct intel_display *display = to_intel_display(drv_kdev);
+ struct drm_i915_private *i915 = to_i915(display->drm);
struct i915_audio_component *acomp = data;
- struct drm_i915_private *i915 = kdev_to_i915(i915_kdev);
int i;
if (drm_WARN_ON(&i915->drm, acomp->base.ops || acomp->base.dev))
return -EEXIST;
if (drm_WARN_ON(&i915->drm,
- !device_link_add(hda_kdev, i915_kdev,
+ !device_link_add(hda_kdev, drv_kdev,
DL_FLAG_STATELESS)))
return -ENOMEM;
drm_modeset_lock_all(&i915->drm);
acomp->base.ops = &i915_audio_component_ops;
- acomp->base.dev = i915_kdev;
+ acomp->base.dev = drv_kdev;
BUILD_BUG_ON(MAX_PORTS != I915_MAX_PORTS);
for (i = 0; i < ARRAY_SIZE(acomp->aud_sample_rate); i++)
acomp->aud_sample_rate[i] = 0;
@@ -1215,11 +1223,12 @@ static int i915_audio_component_bind(struct device *i915_kdev,
return 0;
}
-static void i915_audio_component_unbind(struct device *i915_kdev,
+static void i915_audio_component_unbind(struct device *drv_kdev,
struct device *hda_kdev, void *data)
{
+ struct intel_display *display = to_intel_display(drv_kdev);
+ struct drm_i915_private *i915 = to_i915(display->drm);
struct i915_audio_component *acomp = data;
- struct drm_i915_private *i915 = kdev_to_i915(i915_kdev);
drm_modeset_lock_all(&i915->drm);
acomp->base.ops = NULL;
@@ -1227,7 +1236,7 @@ static void i915_audio_component_unbind(struct device *i915_kdev,
i915->display.audio.component = NULL;
drm_modeset_unlock_all(&i915->drm);
- device_link_remove(hda_kdev, i915_kdev);
+ device_link_remove(hda_kdev, drv_kdev);
if (i915->display.audio.power_refcount)
drm_err(&i915->drm, "audio power refcount %d after unbind\n",
diff --git a/drivers/gpu/drm/i915/display/intel_backlight.c b/drivers/gpu/drm/i915/display/intel_backlight.c
index 6c3333136737..9e05745d797d 100644
--- a/drivers/gpu/drm/i915/display/intel_backlight.c
+++ b/drivers/gpu/drm/i915/display/intel_backlight.c
@@ -455,7 +455,7 @@ void intel_backlight_disable(const struct drm_connector_state *old_conn_state)
mutex_lock(&i915->display.backlight.lock);
if (panel->backlight.device)
- panel->backlight.device->props.power = FB_BLANK_POWERDOWN;
+ panel->backlight.device->props.power = BACKLIGHT_POWER_OFF;
panel->backlight.enabled = false;
panel->backlight.funcs->disable(old_conn_state, 0);
@@ -773,7 +773,7 @@ static void __intel_backlight_enable(const struct intel_crtc_state *crtc_state,
panel->backlight.funcs->enable(crtc_state, conn_state, panel->backlight.level);
panel->backlight.enabled = true;
if (panel->backlight.device)
- panel->backlight.device->props.power = FB_BLANK_UNBLANK;
+ panel->backlight.device->props.power = BACKLIGHT_POWER_ON;
}
void intel_backlight_enable(const struct intel_crtc_state *crtc_state,
@@ -870,12 +870,12 @@ static int intel_backlight_device_update_status(struct backlight_device *bd)
*/
if (panel->backlight.enabled) {
if (panel->backlight.power) {
- bool enable = bd->props.power == FB_BLANK_UNBLANK &&
+ bool enable = bd->props.power == BACKLIGHT_POWER_ON &&
bd->props.brightness != 0;
panel->backlight.power(connector, enable);
}
} else {
- bd->props.power = FB_BLANK_POWERDOWN;
+ bd->props.power = BACKLIGHT_POWER_OFF;
}
drm_modeset_unlock(&i915->drm.mode_config.connection_mutex);
@@ -945,9 +945,9 @@ int intel_backlight_device_register(struct intel_connector *connector)
props.max_brightness);
if (panel->backlight.enabled)
- props.power = FB_BLANK_UNBLANK;
+ props.power = BACKLIGHT_POWER_ON;
else
- props.power = FB_BLANK_POWERDOWN;
+ props.power = BACKLIGHT_POWER_OFF;
name = kstrdup_const("intel_backlight", GFP_KERNEL);
if (!name)
@@ -1011,7 +1011,7 @@ static u32 cnp_hz_to_pwm(struct intel_connector *connector, u32 pwm_freq_hz)
{
struct drm_i915_private *i915 = to_i915(connector->base.dev);
- return DIV_ROUND_CLOSEST(KHz(RUNTIME_INFO(i915)->rawclk_freq),
+ return DIV_ROUND_CLOSEST(KHz(DISPLAY_RUNTIME_INFO(i915)->rawclk_freq),
pwm_freq_hz);
}
@@ -1073,7 +1073,7 @@ static u32 pch_hz_to_pwm(struct intel_connector *connector, u32 pwm_freq_hz)
{
struct drm_i915_private *i915 = to_i915(connector->base.dev);
- return DIV_ROUND_CLOSEST(KHz(RUNTIME_INFO(i915)->rawclk_freq),
+ return DIV_ROUND_CLOSEST(KHz(DISPLAY_RUNTIME_INFO(i915)->rawclk_freq),
pwm_freq_hz * 128);
}
@@ -1091,7 +1091,7 @@ static u32 i9xx_hz_to_pwm(struct intel_connector *connector, u32 pwm_freq_hz)
int clock;
if (IS_PINEVIEW(i915))
- clock = KHz(RUNTIME_INFO(i915)->rawclk_freq);
+ clock = KHz(DISPLAY_RUNTIME_INFO(i915)->rawclk_freq);
else
clock = KHz(i915->display.cdclk.hw.cdclk);
@@ -1109,7 +1109,7 @@ static u32 i965_hz_to_pwm(struct intel_connector *connector, u32 pwm_freq_hz)
int clock;
if (IS_G4X(i915))
- clock = KHz(RUNTIME_INFO(i915)->rawclk_freq);
+ clock = KHz(DISPLAY_RUNTIME_INFO(i915)->rawclk_freq);
else
clock = KHz(i915->display.cdclk.hw.cdclk);
@@ -1133,7 +1133,7 @@ static u32 vlv_hz_to_pwm(struct intel_connector *connector, u32 pwm_freq_hz)
clock = MHz(25);
mul = 16;
} else {
- clock = KHz(RUNTIME_INFO(i915)->rawclk_freq);
+ clock = KHz(DISPLAY_RUNTIME_INFO(i915)->rawclk_freq);
mul = 128;
}
diff --git a/drivers/gpu/drm/i915/display/intel_bios.c b/drivers/gpu/drm/i915/display/intel_bios.c
index ec1e3a380360..bed485374ab0 100644
--- a/drivers/gpu/drm/i915/display/intel_bios.c
+++ b/drivers/gpu/drm/i915/display/intel_bios.c
@@ -30,6 +30,7 @@
#include <drm/display/drm_dp_helper.h>
#include <drm/display/drm_dsc_helper.h>
#include <drm/drm_edid.h>
+#include <drm/drm_fixed.h>
#include "i915_drv.h"
#include "i915_reg.h"
@@ -65,15 +66,15 @@
/* Wrapper for VBT child device config */
struct intel_bios_encoder_data {
- struct drm_i915_private *i915;
+ struct intel_display *display;
struct child_device_config child;
struct dsc_compression_parameters_entry *dsc;
struct list_head node;
};
-#define SLAVE_ADDR1 0x70
-#define SLAVE_ADDR2 0x72
+#define TARGET_ADDR1 0x70
+#define TARGET_ADDR2 0x72
/* Get BDB block size given a pointer to Block ID. */
static u32 _get_blocksize(const u8 *block_base)
@@ -144,12 +145,12 @@ struct bdb_block_entry {
};
static const void *
-bdb_find_section(struct drm_i915_private *i915,
+bdb_find_section(struct intel_display *display,
enum bdb_block_id section_id)
{
struct bdb_block_entry *entry;
- list_for_each_entry(entry, &i915->display.vbt.bdb_blocks, node) {
+ list_for_each_entry(entry, &display->vbt.bdb_blocks, node) {
if (entry->section_id == section_id)
return entry->data + 3;
}
@@ -199,12 +200,12 @@ static const struct {
.min_size = sizeof(struct bdb_generic_dtd), },
};
-static size_t lfp_data_min_size(struct drm_i915_private *i915)
+static size_t lfp_data_min_size(struct intel_display *display)
{
const struct bdb_lfp_data_ptrs *ptrs;
size_t size;
- ptrs = bdb_find_section(i915, BDB_LFP_DATA_PTRS);
+ ptrs = bdb_find_section(display, BDB_LFP_DATA_PTRS);
if (!ptrs)
return 0;
@@ -359,7 +360,7 @@ static void next_lfp_data_ptr(struct lfp_data_ptr_table *next,
next->offset = prev->offset + size;
}
-static void *generate_lfp_data_ptrs(struct drm_i915_private *i915,
+static void *generate_lfp_data_ptrs(struct intel_display *display,
const void *bdb)
{
int i, size, table_size, block_size, offset, fp_timing_size;
@@ -373,7 +374,7 @@ static void *generate_lfp_data_ptrs(struct drm_i915_private *i915,
* include block 41 and thus we don't need to
* generate one.
*/
- if (i915->display.vbt.version < 155)
+ if (display->vbt.version < 155)
return NULL;
fp_timing_size = 38;
@@ -382,7 +383,7 @@ static void *generate_lfp_data_ptrs(struct drm_i915_private *i915,
if (!block)
return NULL;
- drm_dbg_kms(&i915->drm, "Generating LFP data table pointers\n");
+ drm_dbg_kms(display->drm, "Generating LFP data table pointers\n");
block_size = get_blocksize(block);
@@ -450,7 +451,7 @@ static void *generate_lfp_data_ptrs(struct drm_i915_private *i915,
}
static void
-init_bdb_block(struct drm_i915_private *i915,
+init_bdb_block(struct intel_display *display,
const void *bdb, enum bdb_block_id section_id,
size_t min_size)
{
@@ -463,14 +464,14 @@ init_bdb_block(struct drm_i915_private *i915,
/* Modern VBTs lack the LFP data table pointers block, make one up */
if (!block && section_id == BDB_LFP_DATA_PTRS) {
- temp_block = generate_lfp_data_ptrs(i915, bdb);
+ temp_block = generate_lfp_data_ptrs(display, bdb);
if (temp_block)
block = temp_block + 3;
}
if (!block)
return;
- drm_WARN(&i915->drm, min_size == 0,
+ drm_WARN(display->drm, min_size == 0,
"Block %d min_size is zero\n", section_id);
block_size = get_blocksize(block);
@@ -494,20 +495,22 @@ init_bdb_block(struct drm_i915_private *i915,
kfree(temp_block);
- drm_dbg_kms(&i915->drm, "Found BDB block %d (size %zu, min size %zu)\n",
+ drm_dbg_kms(display->drm,
+ "Found BDB block %d (size %zu, min size %zu)\n",
section_id, block_size, min_size);
if (section_id == BDB_LFP_DATA_PTRS &&
!fixup_lfp_data_ptrs(bdb, entry->data + 3)) {
- drm_err(&i915->drm, "VBT has malformed LFP data table pointers\n");
+ drm_err(display->drm,
+ "VBT has malformed LFP data table pointers\n");
kfree(entry);
return;
}
- list_add_tail(&entry->node, &i915->display.vbt.bdb_blocks);
+ list_add_tail(&entry->node, &display->vbt.bdb_blocks);
}
-static void init_bdb_blocks(struct drm_i915_private *i915,
+static void init_bdb_blocks(struct intel_display *display,
const void *bdb)
{
int i;
@@ -517,14 +520,14 @@ static void init_bdb_blocks(struct drm_i915_private *i915,
size_t min_size = bdb_blocks[i].min_size;
if (section_id == BDB_LFP_DATA)
- min_size = lfp_data_min_size(i915);
+ min_size = lfp_data_min_size(display);
- init_bdb_block(i915, bdb, section_id, min_size);
+ init_bdb_block(display, bdb, section_id, min_size);
}
}
static void
-fill_detail_timing_data(struct drm_i915_private *i915,
+fill_detail_timing_data(struct intel_display *display,
struct drm_display_mode *panel_fixed_mode,
const struct bdb_edid_dtd *dvo_timing)
{
@@ -567,12 +570,12 @@ fill_detail_timing_data(struct drm_i915_private *i915,
/* Some VBTs have bogus h/vsync_end values */
if (panel_fixed_mode->hsync_end > panel_fixed_mode->htotal) {
- drm_dbg_kms(&i915->drm, "reducing hsync_end %d->%d\n",
+ drm_dbg_kms(display->drm, "reducing hsync_end %d->%d\n",
panel_fixed_mode->hsync_end, panel_fixed_mode->htotal);
panel_fixed_mode->hsync_end = panel_fixed_mode->htotal;
}
if (panel_fixed_mode->vsync_end > panel_fixed_mode->vtotal) {
- drm_dbg_kms(&i915->drm, "reducing vsync_end %d->%d\n",
+ drm_dbg_kms(display->drm, "reducing vsync_end %d->%d\n",
panel_fixed_mode->vsync_end, panel_fixed_mode->vtotal);
panel_fixed_mode->vsync_end = panel_fixed_mode->vtotal;
}
@@ -617,26 +620,26 @@ get_lfp_data_tail(const struct bdb_lfp_data *data,
return NULL;
}
-static int opregion_get_panel_type(struct drm_i915_private *i915,
+static int opregion_get_panel_type(struct intel_display *display,
const struct intel_bios_encoder_data *devdata,
const struct drm_edid *drm_edid, bool use_fallback)
{
- return intel_opregion_get_panel_type(i915);
+ return intel_opregion_get_panel_type(display);
}
-static int vbt_get_panel_type(struct drm_i915_private *i915,
+static int vbt_get_panel_type(struct intel_display *display,
const struct intel_bios_encoder_data *devdata,
const struct drm_edid *drm_edid, bool use_fallback)
{
const struct bdb_lfp_options *lfp_options;
- lfp_options = bdb_find_section(i915, BDB_LFP_OPTIONS);
+ lfp_options = bdb_find_section(display, BDB_LFP_OPTIONS);
if (!lfp_options)
return -1;
if (lfp_options->panel_type > 0xf &&
lfp_options->panel_type != 0xff) {
- drm_dbg_kms(&i915->drm, "Invalid VBT panel type 0x%x\n",
+ drm_dbg_kms(display->drm, "Invalid VBT panel type 0x%x\n",
lfp_options->panel_type);
return -1;
}
@@ -644,12 +647,13 @@ static int vbt_get_panel_type(struct drm_i915_private *i915,
if (devdata && devdata->child.handle == DEVICE_HANDLE_LFP2)
return lfp_options->panel_type2;
- drm_WARN_ON(&i915->drm, devdata && devdata->child.handle != DEVICE_HANDLE_LFP1);
+ drm_WARN_ON(display->drm,
+ devdata && devdata->child.handle != DEVICE_HANDLE_LFP1);
return lfp_options->panel_type;
}
-static int pnpid_get_panel_type(struct drm_i915_private *i915,
+static int pnpid_get_panel_type(struct intel_display *display,
const struct intel_bios_encoder_data *devdata,
const struct drm_edid *drm_edid, bool use_fallback)
{
@@ -668,14 +672,14 @@ static int pnpid_get_panel_type(struct drm_i915_private *i915,
product_id_nodate.week_of_manufacture = 0;
product_id_nodate.year_of_manufacture = 0;
- p = drm_dbg_printer(&i915->drm, DRM_UT_KMS, "EDID");
+ p = drm_dbg_printer(display->drm, DRM_UT_KMS, "EDID");
drm_edid_print_product_id(&p, &product_id, true);
- ptrs = bdb_find_section(i915, BDB_LFP_DATA_PTRS);
+ ptrs = bdb_find_section(display, BDB_LFP_DATA_PTRS);
if (!ptrs)
return -1;
- data = bdb_find_section(i915, BDB_LFP_DATA);
+ data = bdb_find_section(display, BDB_LFP_DATA);
if (!data)
return -1;
@@ -699,7 +703,7 @@ static int pnpid_get_panel_type(struct drm_i915_private *i915,
return best;
}
-static int fallback_get_panel_type(struct drm_i915_private *i915,
+static int fallback_get_panel_type(struct intel_display *display,
const struct intel_bios_encoder_data *devdata,
const struct drm_edid *drm_edid, bool use_fallback)
{
@@ -713,13 +717,13 @@ enum panel_type {
PANEL_TYPE_FALLBACK,
};
-static int get_panel_type(struct drm_i915_private *i915,
+static int get_panel_type(struct intel_display *display,
const struct intel_bios_encoder_data *devdata,
const struct drm_edid *drm_edid, bool use_fallback)
{
struct {
const char *name;
- int (*get_panel_type)(struct drm_i915_private *i915,
+ int (*get_panel_type)(struct intel_display *display,
const struct intel_bios_encoder_data *devdata,
const struct drm_edid *drm_edid, bool use_fallback);
int panel_type;
@@ -744,14 +748,14 @@ static int get_panel_type(struct drm_i915_private *i915,
int i;
for (i = 0; i < ARRAY_SIZE(panel_types); i++) {
- panel_types[i].panel_type = panel_types[i].get_panel_type(i915, devdata,
+ panel_types[i].panel_type = panel_types[i].get_panel_type(display, devdata,
drm_edid, use_fallback);
- drm_WARN_ON(&i915->drm, panel_types[i].panel_type > 0xf &&
+ drm_WARN_ON(display->drm, panel_types[i].panel_type > 0xf &&
panel_types[i].panel_type != 0xff);
if (panel_types[i].panel_type >= 0)
- drm_dbg_kms(&i915->drm, "Panel type (%s): %d\n",
+ drm_dbg_kms(display->drm, "Panel type (%s): %d\n",
panel_types[i].name, panel_types[i].panel_type);
}
@@ -766,7 +770,7 @@ static int get_panel_type(struct drm_i915_private *i915,
else
i = PANEL_TYPE_FALLBACK;
- drm_dbg_kms(&i915->drm, "Selected panel type (%s): %d\n",
+ drm_dbg_kms(display->drm, "Selected panel type (%s): %d\n",
panel_types[i].name, panel_types[i].panel_type);
return panel_types[i].panel_type;
@@ -784,14 +788,14 @@ static bool panel_bool(unsigned int value, int panel_type)
/* Parse general panel options */
static void
-parse_panel_options(struct drm_i915_private *i915,
+parse_panel_options(struct intel_display *display,
struct intel_panel *panel)
{
const struct bdb_lfp_options *lfp_options;
int panel_type = panel->vbt.panel_type;
int drrs_mode;
- lfp_options = bdb_find_section(i915, BDB_LFP_OPTIONS);
+ lfp_options = bdb_find_section(display, BDB_LFP_OPTIONS);
if (!lfp_options)
return;
@@ -815,23 +819,23 @@ parse_panel_options(struct drm_i915_private *i915,
switch (drrs_mode) {
case 0:
panel->vbt.drrs_type = DRRS_TYPE_STATIC;
- drm_dbg_kms(&i915->drm, "DRRS supported mode is static\n");
+ drm_dbg_kms(display->drm, "DRRS supported mode is static\n");
break;
case 2:
panel->vbt.drrs_type = DRRS_TYPE_SEAMLESS;
- drm_dbg_kms(&i915->drm,
+ drm_dbg_kms(display->drm,
"DRRS supported mode is seamless\n");
break;
default:
panel->vbt.drrs_type = DRRS_TYPE_NONE;
- drm_dbg_kms(&i915->drm,
+ drm_dbg_kms(display->drm,
"DRRS not supported (VBT input)\n");
break;
}
}
static void
-parse_lfp_panel_dtd(struct drm_i915_private *i915,
+parse_lfp_panel_dtd(struct intel_display *display,
struct intel_panel *panel,
const struct bdb_lfp_data *lfp_data,
const struct bdb_lfp_data_ptrs *lfp_data_ptrs)
@@ -849,11 +853,11 @@ parse_lfp_panel_dtd(struct drm_i915_private *i915,
if (!panel_fixed_mode)
return;
- fill_detail_timing_data(i915, panel_fixed_mode, panel_dvo_timing);
+ fill_detail_timing_data(display, panel_fixed_mode, panel_dvo_timing);
panel->vbt.lfp_vbt_mode = panel_fixed_mode;
- drm_dbg_kms(&i915->drm,
+ drm_dbg_kms(display->drm,
"Found panel mode in BIOS VBT legacy lfp table: " DRM_MODE_FMT "\n",
DRM_MODE_ARG(panel_fixed_mode));
@@ -865,14 +869,14 @@ parse_lfp_panel_dtd(struct drm_i915_private *i915,
if (fp_timing->x_res == panel_fixed_mode->hdisplay &&
fp_timing->y_res == panel_fixed_mode->vdisplay) {
panel->vbt.bios_lvds_val = fp_timing->lvds_reg_val;
- drm_dbg_kms(&i915->drm,
+ drm_dbg_kms(display->drm,
"VBT initial LVDS value %x\n",
panel->vbt.bios_lvds_val);
}
}
static void
-parse_lfp_data(struct drm_i915_private *i915,
+parse_lfp_data(struct intel_display *display,
struct intel_panel *panel)
{
const struct bdb_lfp_data *data;
@@ -882,41 +886,41 @@ parse_lfp_data(struct drm_i915_private *i915,
struct drm_printer p;
int panel_type = panel->vbt.panel_type;
- ptrs = bdb_find_section(i915, BDB_LFP_DATA_PTRS);
+ ptrs = bdb_find_section(display, BDB_LFP_DATA_PTRS);
if (!ptrs)
return;
- data = bdb_find_section(i915, BDB_LFP_DATA);
+ data = bdb_find_section(display, BDB_LFP_DATA);
if (!data)
return;
if (!panel->vbt.lfp_vbt_mode)
- parse_lfp_panel_dtd(i915, panel, data, ptrs);
+ parse_lfp_panel_dtd(display, panel, data, ptrs);
pnp_id = get_lfp_pnp_id(data, ptrs, panel_type);
- p = drm_dbg_printer(&i915->drm, DRM_UT_KMS, "Panel");
+ p = drm_dbg_printer(display->drm, DRM_UT_KMS, "Panel");
drm_edid_print_product_id(&p, pnp_id, false);
tail = get_lfp_data_tail(data, ptrs);
if (!tail)
return;
- drm_dbg_kms(&i915->drm, "Panel name: %.*s\n",
+ drm_dbg_kms(display->drm, "Panel name: %.*s\n",
(int)sizeof(tail->panel_name[0].name),
tail->panel_name[panel_type].name);
- if (i915->display.vbt.version >= 188) {
+ if (display->vbt.version >= 188) {
panel->vbt.seamless_drrs_min_refresh_rate =
tail->seamless_drrs_min_refresh_rate[panel_type];
- drm_dbg_kms(&i915->drm,
+ drm_dbg_kms(display->drm,
"Seamless DRRS min refresh rate: %d Hz\n",
panel->vbt.seamless_drrs_min_refresh_rate);
}
}
static void
-parse_generic_dtd(struct drm_i915_private *i915,
+parse_generic_dtd(struct intel_display *display,
struct intel_panel *panel)
{
const struct bdb_generic_dtd *generic_dtd;
@@ -932,20 +936,20 @@ parse_generic_dtd(struct drm_i915_private *i915,
* first on VBT >= 229, but still fall back to trying the old LFP
* block if that fails.
*/
- if (i915->display.vbt.version < 229)
+ if (display->vbt.version < 229)
return;
- generic_dtd = bdb_find_section(i915, BDB_GENERIC_DTD);
+ generic_dtd = bdb_find_section(display, BDB_GENERIC_DTD);
if (!generic_dtd)
return;
if (generic_dtd->gdtd_size < sizeof(struct generic_dtd_entry)) {
- drm_err(&i915->drm, "GDTD size %u is too small.\n",
+ drm_err(display->drm, "GDTD size %u is too small.\n",
generic_dtd->gdtd_size);
return;
} else if (generic_dtd->gdtd_size !=
sizeof(struct generic_dtd_entry)) {
- drm_err(&i915->drm, "Unexpected GDTD size %u\n",
+ drm_err(display->drm, "Unexpected GDTD size %u\n",
generic_dtd->gdtd_size);
/* DTD has unknown fields, but keep going */
}
@@ -953,7 +957,7 @@ parse_generic_dtd(struct drm_i915_private *i915,
num_dtd = (get_blocksize(generic_dtd) -
sizeof(struct bdb_generic_dtd)) / generic_dtd->gdtd_size;
if (panel->vbt.panel_type >= num_dtd) {
- drm_err(&i915->drm,
+ drm_err(display->drm,
"Panel type %d not found in table of %d DTD's\n",
panel->vbt.panel_type, num_dtd);
return;
@@ -998,7 +1002,7 @@ parse_generic_dtd(struct drm_i915_private *i915,
else
panel_fixed_mode->flags |= DRM_MODE_FLAG_NVSYNC;
- drm_dbg_kms(&i915->drm,
+ drm_dbg_kms(display->drm,
"Found panel mode in BIOS VBT generic dtd table: " DRM_MODE_FMT "\n",
DRM_MODE_ARG(panel_fixed_mode));
@@ -1006,7 +1010,7 @@ parse_generic_dtd(struct drm_i915_private *i915,
}
static void
-parse_lfp_backlight(struct drm_i915_private *i915,
+parse_lfp_backlight(struct intel_display *display,
struct intel_panel *panel)
{
const struct bdb_lfp_backlight *backlight_data;
@@ -1014,12 +1018,12 @@ parse_lfp_backlight(struct drm_i915_private *i915,
int panel_type = panel->vbt.panel_type;
u16 level;
- backlight_data = bdb_find_section(i915, BDB_LFP_BACKLIGHT);
+ backlight_data = bdb_find_section(display, BDB_LFP_BACKLIGHT);
if (!backlight_data)
return;
if (backlight_data->entry_size != sizeof(backlight_data->data[0])) {
- drm_dbg_kms(&i915->drm,
+ drm_dbg_kms(display->drm,
"Unsupported backlight data entry size %u\n",
backlight_data->entry_size);
return;
@@ -1029,7 +1033,7 @@ parse_lfp_backlight(struct drm_i915_private *i915,
panel->vbt.backlight.present = entry->type == BDB_BACKLIGHT_TYPE_PWM;
if (!panel->vbt.backlight.present) {
- drm_dbg_kms(&i915->drm,
+ drm_dbg_kms(display->drm,
"PWM backlight not present in VBT (type %u)\n",
entry->type);
return;
@@ -1037,7 +1041,7 @@ parse_lfp_backlight(struct drm_i915_private *i915,
panel->vbt.backlight.type = INTEL_BACKLIGHT_DISPLAY_DDI;
panel->vbt.backlight.controller = 0;
- if (i915->display.vbt.version >= 191) {
+ if (display->vbt.version >= 191) {
const struct lfp_backlight_control_method *method;
method = &backlight_data->backlight_control[panel_type];
@@ -1048,14 +1052,14 @@ parse_lfp_backlight(struct drm_i915_private *i915,
panel->vbt.backlight.pwm_freq_hz = entry->pwm_freq_hz;
panel->vbt.backlight.active_low_pwm = entry->active_low_pwm;
- if (i915->display.vbt.version >= 234) {
+ if (display->vbt.version >= 234) {
u16 min_level;
bool scale;
level = backlight_data->brightness_level[panel_type].level;
min_level = backlight_data->brightness_min_level[panel_type].level;
- if (i915->display.vbt.version >= 236)
+ if (display->vbt.version >= 236)
scale = backlight_data->brightness_precision_bits[panel_type] == 16;
else
scale = level > 255;
@@ -1064,7 +1068,7 @@ parse_lfp_backlight(struct drm_i915_private *i915,
min_level = min_level / 255;
if (min_level > 255) {
- drm_warn(&i915->drm, "Brightness min level > 255\n");
+ drm_warn(display->drm, "Brightness min level > 255\n");
level = 255;
}
panel->vbt.backlight.min_brightness = min_level;
@@ -1076,13 +1080,13 @@ parse_lfp_backlight(struct drm_i915_private *i915,
panel->vbt.backlight.min_brightness = entry->min_brightness;
}
- if (i915->display.vbt.version >= 239)
+ if (display->vbt.version >= 239)
panel->vbt.backlight.hdr_dpcd_refresh_timeout =
DIV_ROUND_UP(backlight_data->hdr_dpcd_refresh_timeout[panel_type], 100);
else
panel->vbt.backlight.hdr_dpcd_refresh_timeout = 30;
- drm_dbg_kms(&i915->drm,
+ drm_dbg_kms(display->drm,
"VBT backlight PWM modulation frequency %u Hz, "
"active %s, min brightness %u, level %u, controller %u\n",
panel->vbt.backlight.pwm_freq_hz,
@@ -1093,16 +1097,16 @@ parse_lfp_backlight(struct drm_i915_private *i915,
}
static void
-parse_sdvo_lvds_data(struct drm_i915_private *i915,
+parse_sdvo_lvds_data(struct intel_display *display,
struct intel_panel *panel)
{
const struct bdb_sdvo_lvds_dtd *dtd;
struct drm_display_mode *panel_fixed_mode;
int index;
- index = i915->display.params.vbt_sdvo_panel_type;
+ index = display->params.vbt_sdvo_panel_type;
if (index == -2) {
- drm_dbg_kms(&i915->drm,
+ drm_dbg_kms(display->drm,
"Ignore SDVO LVDS mode from BIOS VBT tables.\n");
return;
}
@@ -1110,14 +1114,14 @@ parse_sdvo_lvds_data(struct drm_i915_private *i915,
if (index == -1) {
const struct bdb_sdvo_lvds_options *sdvo_lvds_options;
- sdvo_lvds_options = bdb_find_section(i915, BDB_SDVO_LVDS_OPTIONS);
+ sdvo_lvds_options = bdb_find_section(display, BDB_SDVO_LVDS_OPTIONS);
if (!sdvo_lvds_options)
return;
index = sdvo_lvds_options->panel_type;
}
- dtd = bdb_find_section(i915, BDB_SDVO_LVDS_DTD);
+ dtd = bdb_find_section(display, BDB_SDVO_LVDS_DTD);
if (!dtd)
return;
@@ -1128,7 +1132,8 @@ parse_sdvo_lvds_data(struct drm_i915_private *i915,
* it here to be sure.
*/
if (index >= ARRAY_SIZE(dtd->dtd)) {
- drm_err(&i915->drm, "index %d is larger than dtd->dtd[4] array\n",
+ drm_err(display->drm,
+ "index %d is larger than dtd->dtd[4] array\n",
index);
return;
}
@@ -1137,19 +1142,19 @@ parse_sdvo_lvds_data(struct drm_i915_private *i915,
if (!panel_fixed_mode)
return;
- fill_detail_timing_data(i915, panel_fixed_mode, &dtd->dtd[index]);
+ fill_detail_timing_data(display, panel_fixed_mode, &dtd->dtd[index]);
panel->vbt.sdvo_lvds_vbt_mode = panel_fixed_mode;
- drm_dbg_kms(&i915->drm,
+ drm_dbg_kms(display->drm,
"Found SDVO LVDS mode in BIOS VBT tables: " DRM_MODE_FMT "\n",
DRM_MODE_ARG(panel_fixed_mode));
}
-static int intel_bios_ssc_frequency(struct drm_i915_private *i915,
+static int intel_bios_ssc_frequency(struct intel_display *display,
bool alternate)
{
- switch (DISPLAY_VER(i915)) {
+ switch (DISPLAY_VER(display)) {
case 2:
return alternate ? 66667 : 48000;
case 3:
@@ -1161,45 +1166,46 @@ static int intel_bios_ssc_frequency(struct drm_i915_private *i915,
}
static void
-parse_general_features(struct drm_i915_private *i915)
+parse_general_features(struct intel_display *display)
{
+ struct drm_i915_private *i915 = to_i915(display->drm);
const struct bdb_general_features *general;
- general = bdb_find_section(i915, BDB_GENERAL_FEATURES);
+ general = bdb_find_section(display, BDB_GENERAL_FEATURES);
if (!general)
return;
- i915->display.vbt.int_tv_support = general->int_tv_support;
+ display->vbt.int_tv_support = general->int_tv_support;
/* int_crt_support can't be trusted on earlier platforms */
- if (i915->display.vbt.version >= 155 &&
- (HAS_DDI(i915) || IS_VALLEYVIEW(i915)))
- i915->display.vbt.int_crt_support = general->int_crt_support;
- i915->display.vbt.lvds_use_ssc = general->enable_ssc;
- i915->display.vbt.lvds_ssc_freq =
- intel_bios_ssc_frequency(i915, general->ssc_freq);
- i915->display.vbt.display_clock_mode = general->display_clock_mode;
- i915->display.vbt.fdi_rx_polarity_inverted = general->fdi_rx_polarity_inverted;
- if (i915->display.vbt.version >= 181) {
- i915->display.vbt.orientation = general->rotate_180 ?
+ if (display->vbt.version >= 155 &&
+ (HAS_DDI(display) || IS_VALLEYVIEW(i915)))
+ display->vbt.int_crt_support = general->int_crt_support;
+ display->vbt.lvds_use_ssc = general->enable_ssc;
+ display->vbt.lvds_ssc_freq =
+ intel_bios_ssc_frequency(display, general->ssc_freq);
+ display->vbt.display_clock_mode = general->display_clock_mode;
+ display->vbt.fdi_rx_polarity_inverted = general->fdi_rx_polarity_inverted;
+ if (display->vbt.version >= 181) {
+ display->vbt.orientation = general->rotate_180 ?
DRM_MODE_PANEL_ORIENTATION_BOTTOM_UP :
DRM_MODE_PANEL_ORIENTATION_NORMAL;
} else {
- i915->display.vbt.orientation = DRM_MODE_PANEL_ORIENTATION_UNKNOWN;
+ display->vbt.orientation = DRM_MODE_PANEL_ORIENTATION_UNKNOWN;
}
- if (i915->display.vbt.version >= 249 && general->afc_startup_config) {
- i915->display.vbt.override_afc_startup = true;
- i915->display.vbt.override_afc_startup_val = general->afc_startup_config == 0x1 ? 0x0 : 0x7;
+ if (display->vbt.version >= 249 && general->afc_startup_config) {
+ display->vbt.override_afc_startup = true;
+ display->vbt.override_afc_startup_val = general->afc_startup_config == 1 ? 0 : 7;
}
- drm_dbg_kms(&i915->drm,
+ drm_dbg_kms(display->drm,
"BDB_GENERAL_FEATURES int_tv_support %d int_crt_support %d lvds_use_ssc %d lvds_ssc_freq %d display_clock_mode %d fdi_rx_polarity_inverted %d\n",
- i915->display.vbt.int_tv_support,
- i915->display.vbt.int_crt_support,
- i915->display.vbt.lvds_use_ssc,
- i915->display.vbt.lvds_ssc_freq,
- i915->display.vbt.display_clock_mode,
- i915->display.vbt.fdi_rx_polarity_inverted);
+ display->vbt.int_tv_support,
+ display->vbt.int_crt_support,
+ display->vbt.lvds_use_ssc,
+ display->vbt.lvds_ssc_freq,
+ display->vbt.display_clock_mode,
+ display->vbt.fdi_rx_polarity_inverted);
}
static const struct child_device_config *
@@ -1209,7 +1215,7 @@ child_device_ptr(const struct bdb_general_definitions *defs, int i)
}
static void
-parse_sdvo_device_mapping(struct drm_i915_private *i915)
+parse_sdvo_device_mapping(struct intel_display *display)
{
const struct intel_bios_encoder_data *devdata;
int count = 0;
@@ -1218,19 +1224,19 @@ parse_sdvo_device_mapping(struct drm_i915_private *i915)
* Only parse SDVO mappings on gens that could have SDVO. This isn't
* accurate and doesn't have to be, as long as it's not too strict.
*/
- if (!IS_DISPLAY_VER(i915, 3, 7)) {
- drm_dbg_kms(&i915->drm, "Skipping SDVO device mapping\n");
+ if (!IS_DISPLAY_VER(display, 3, 7)) {
+ drm_dbg_kms(display->drm, "Skipping SDVO device mapping\n");
return;
}
- list_for_each_entry(devdata, &i915->display.vbt.display_devices, node) {
+ list_for_each_entry(devdata, &display->vbt.display_devices, node) {
const struct child_device_config *child = &devdata->child;
struct sdvo_device_mapping *mapping;
- if (child->slave_addr != SLAVE_ADDR1 &&
- child->slave_addr != SLAVE_ADDR2) {
+ if (child->target_addr != TARGET_ADDR1 &&
+ child->target_addr != TARGET_ADDR2) {
/*
- * If the slave address is neither 0x70 nor 0x72,
+ * If the target address is neither 0x70 nor 0x72,
* it is not a SDVO device. Skip it.
*/
continue;
@@ -1238,39 +1244,39 @@ parse_sdvo_device_mapping(struct drm_i915_private *i915)
if (child->dvo_port != DEVICE_PORT_DVOB &&
child->dvo_port != DEVICE_PORT_DVOC) {
/* skip the incorrect SDVO port */
- drm_dbg_kms(&i915->drm,
+ drm_dbg_kms(display->drm,
"Incorrect SDVO port. Skip it\n");
continue;
}
- drm_dbg_kms(&i915->drm,
- "the SDVO device with slave addr %2x is found on"
+ drm_dbg_kms(display->drm,
+ "the SDVO device with target addr %2x is found on"
" %s port\n",
- child->slave_addr,
+ child->target_addr,
(child->dvo_port == DEVICE_PORT_DVOB) ?
"SDVOB" : "SDVOC");
- mapping = &i915->display.vbt.sdvo_mappings[child->dvo_port - 1];
+ mapping = &display->vbt.sdvo_mappings[child->dvo_port - 1];
if (!mapping->initialized) {
mapping->dvo_port = child->dvo_port;
- mapping->slave_addr = child->slave_addr;
+ mapping->target_addr = child->target_addr;
mapping->dvo_wiring = child->dvo_wiring;
mapping->ddc_pin = child->ddc_pin;
mapping->i2c_pin = child->i2c_pin;
mapping->initialized = 1;
- drm_dbg_kms(&i915->drm,
+ drm_dbg_kms(display->drm,
"SDVO device: dvo=%x, addr=%x, wiring=%d, ddc_pin=%d, i2c_pin=%d\n",
- mapping->dvo_port, mapping->slave_addr,
+ mapping->dvo_port, mapping->target_addr,
mapping->dvo_wiring, mapping->ddc_pin,
mapping->i2c_pin);
} else {
- drm_dbg_kms(&i915->drm,
+ drm_dbg_kms(display->drm,
"Maybe one SDVO port is shared by "
"two SDVO device.\n");
}
- if (child->slave2_addr) {
+ if (child->target2_addr) {
/* Maybe this is a SDVO device with multiple inputs */
/* And the mapping info is not added */
- drm_dbg_kms(&i915->drm,
- "there exists the slave2_addr. Maybe this"
+ drm_dbg_kms(display->drm,
+ "there exists the target2_addr. Maybe this"
" is a SDVO device with multiple inputs.\n");
}
count++;
@@ -1278,28 +1284,28 @@ parse_sdvo_device_mapping(struct drm_i915_private *i915)
if (!count) {
/* No SDVO device info is found */
- drm_dbg_kms(&i915->drm,
+ drm_dbg_kms(display->drm,
"No SDVO device info is found in VBT\n");
}
}
static void
-parse_driver_features(struct drm_i915_private *i915)
+parse_driver_features(struct intel_display *display)
{
const struct bdb_driver_features *driver;
- driver = bdb_find_section(i915, BDB_DRIVER_FEATURES);
+ driver = bdb_find_section(display, BDB_DRIVER_FEATURES);
if (!driver)
return;
- if (DISPLAY_VER(i915) >= 5) {
+ if (DISPLAY_VER(display) >= 5) {
/*
* Note that we consider BDB_DRIVER_FEATURE_INT_SDVO_LVDS
* to mean "eDP". The VBT spec doesn't agree with that
* interpretation, but real world VBTs seem to.
*/
if (driver->lvds_config != BDB_DRIVER_FEATURE_INT_LVDS)
- i915->display.vbt.int_lvds_support = 0;
+ display->vbt.int_lvds_support = 0;
} else {
/*
* FIXME it's not clear which BDB version has the LVDS config
@@ -1312,25 +1318,25 @@ parse_driver_features(struct drm_i915_private *i915)
* in the wild with the bits correctly populated. Version
* 108 (on i85x) does not have the bits correctly populated.
*/
- if (i915->display.vbt.version >= 134 &&
+ if (display->vbt.version >= 134 &&
driver->lvds_config != BDB_DRIVER_FEATURE_INT_LVDS &&
driver->lvds_config != BDB_DRIVER_FEATURE_INT_SDVO_LVDS)
- i915->display.vbt.int_lvds_support = 0;
+ display->vbt.int_lvds_support = 0;
}
}
static void
-parse_panel_driver_features(struct drm_i915_private *i915,
+parse_panel_driver_features(struct intel_display *display,
struct intel_panel *panel)
{
const struct bdb_driver_features *driver;
- driver = bdb_find_section(i915, BDB_DRIVER_FEATURES);
+ driver = bdb_find_section(display, BDB_DRIVER_FEATURES);
if (!driver)
return;
- if (i915->display.vbt.version < 228) {
- drm_dbg_kms(&i915->drm, "DRRS State Enabled:%d\n",
+ if (display->vbt.version < 228) {
+ drm_dbg_kms(display->drm, "DRRS State Enabled:%d\n",
driver->drrs_enabled);
/*
* If DRRS is not supported, drrs_type has to be set to 0.
@@ -1354,7 +1360,7 @@ parse_panel_driver_features(struct drm_i915_private *i915,
}
static void
-parse_power_conservation_features(struct drm_i915_private *i915,
+parse_power_conservation_features(struct intel_display *display,
struct intel_panel *panel)
{
const struct bdb_lfp_power *power;
@@ -1362,10 +1368,10 @@ parse_power_conservation_features(struct drm_i915_private *i915,
panel->vbt.vrr = true; /* matches Windows behaviour */
- if (i915->display.vbt.version < 228)
+ if (display->vbt.version < 228)
return;
- power = bdb_find_section(i915, BDB_LFP_POWER);
+ power = bdb_find_section(display, BDB_LFP_POWER);
if (!power)
return;
@@ -1388,16 +1394,16 @@ parse_power_conservation_features(struct drm_i915_private *i915,
panel->vbt.drrs_type = DRRS_TYPE_NONE;
}
- if (i915->display.vbt.version >= 232)
+ if (display->vbt.version >= 232)
panel->vbt.edp.hobl = panel_bool(power->hobl, panel_type);
- if (i915->display.vbt.version >= 233)
+ if (display->vbt.version >= 233)
panel->vbt.vrr = panel_bool(power->vrr_feature_enabled,
panel_type);
}
static void
-parse_edp(struct drm_i915_private *i915,
+parse_edp(struct intel_display *display,
struct intel_panel *panel)
{
const struct bdb_edp *edp;
@@ -1405,7 +1411,7 @@ parse_edp(struct drm_i915_private *i915,
const struct edp_fast_link_params *edp_link_params;
int panel_type = panel->vbt.panel_type;
- edp = bdb_find_section(i915, BDB_EDP);
+ edp = bdb_find_section(display, BDB_EDP);
if (!edp)
return;
@@ -1427,7 +1433,7 @@ parse_edp(struct drm_i915_private *i915,
panel->vbt.edp.pps = *edp_pps;
- if (i915->display.vbt.version >= 224) {
+ if (display->vbt.version >= 224) {
panel->vbt.edp.rate =
edp->edp_fast_link_training_rate[panel_type] * 20;
} else {
@@ -1442,7 +1448,7 @@ parse_edp(struct drm_i915_private *i915,
panel->vbt.edp.rate = 540000;
break;
default:
- drm_dbg_kms(&i915->drm,
+ drm_dbg_kms(display->drm,
"VBT has unknown eDP link rate value %u\n",
edp_link_params->rate);
break;
@@ -1460,7 +1466,7 @@ parse_edp(struct drm_i915_private *i915,
panel->vbt.edp.lanes = 4;
break;
default:
- drm_dbg_kms(&i915->drm,
+ drm_dbg_kms(display->drm,
"VBT has unknown eDP lane count value %u\n",
edp_link_params->lanes);
break;
@@ -1480,7 +1486,7 @@ parse_edp(struct drm_i915_private *i915,
panel->vbt.edp.preemphasis = DP_TRAIN_PRE_EMPH_LEVEL_3;
break;
default:
- drm_dbg_kms(&i915->drm,
+ drm_dbg_kms(display->drm,
"VBT has unknown eDP pre-emphasis value %u\n",
edp_link_params->preemphasis);
break;
@@ -1500,19 +1506,19 @@ parse_edp(struct drm_i915_private *i915,
panel->vbt.edp.vswing = DP_TRAIN_VOLTAGE_SWING_LEVEL_3;
break;
default:
- drm_dbg_kms(&i915->drm,
+ drm_dbg_kms(display->drm,
"VBT has unknown eDP voltage swing value %u\n",
edp_link_params->vswing);
break;
}
- if (i915->display.vbt.version >= 173) {
+ if (display->vbt.version >= 173) {
u8 vswing;
/* Don't read from VBT if module parameter has valid value*/
- if (i915->display.params.edp_vswing) {
+ if (display->params.edp_vswing) {
panel->vbt.edp.low_vswing =
- i915->display.params.edp_vswing == 1;
+ display->params.edp_vswing == 1;
} else {
vswing = (edp->edp_vswing_preemph >> (panel_type * 4)) & 0xF;
panel->vbt.edp.low_vswing = vswing == 0;
@@ -1522,26 +1528,27 @@ parse_edp(struct drm_i915_private *i915,
panel->vbt.edp.drrs_msa_timing_delay =
panel_bits(edp->sdrrs_msa_timing_delay, panel_type, 2);
- if (i915->display.vbt.version >= 244)
+ if (display->vbt.version >= 244)
panel->vbt.edp.max_link_rate =
edp->edp_max_port_link_rate[panel_type] * 20;
- if (i915->display.vbt.version >= 251)
+ if (display->vbt.version >= 251)
panel->vbt.edp.dsc_disable =
panel_bool(edp->edp_dsc_disable, panel_type);
}
static void
-parse_psr(struct drm_i915_private *i915,
+parse_psr(struct intel_display *display,
struct intel_panel *panel)
{
+ struct drm_i915_private *i915 = to_i915(display->drm);
const struct bdb_psr *psr;
const struct psr_table *psr_table;
int panel_type = panel->vbt.panel_type;
- psr = bdb_find_section(i915, BDB_PSR);
+ psr = bdb_find_section(display, BDB_PSR);
if (!psr) {
- drm_dbg_kms(&i915->drm, "No PSR BDB found.\n");
+ drm_dbg_kms(display->drm, "No PSR BDB found.\n");
return;
}
@@ -1558,8 +1565,8 @@ parse_psr(struct drm_i915_private *i915,
* New psr options 0=500us, 1=100us, 2=2500us, 3=0us
* Old decimal value is wake up time in multiples of 100 us.
*/
- if (i915->display.vbt.version >= 205 &&
- (DISPLAY_VER(i915) >= 9 && !IS_BROXTON(i915))) {
+ if (display->vbt.version >= 205 &&
+ (DISPLAY_VER(display) >= 9 && !IS_BROXTON(i915))) {
switch (psr_table->tp1_wakeup_time) {
case 0:
panel->vbt.psr.tp1_wakeup_time_us = 500;
@@ -1571,7 +1578,7 @@ parse_psr(struct drm_i915_private *i915,
panel->vbt.psr.tp1_wakeup_time_us = 0;
break;
default:
- drm_dbg_kms(&i915->drm,
+ drm_dbg_kms(display->drm,
"VBT tp1 wakeup time value %d is outside range[0-3], defaulting to max value 2500us\n",
psr_table->tp1_wakeup_time);
fallthrough;
@@ -1591,7 +1598,7 @@ parse_psr(struct drm_i915_private *i915,
panel->vbt.psr.tp2_tp3_wakeup_time_us = 0;
break;
default:
- drm_dbg_kms(&i915->drm,
+ drm_dbg_kms(display->drm,
"VBT tp2_tp3 wakeup time value %d is outside range[0-3], defaulting to max value 2500us\n",
psr_table->tp2_tp3_wakeup_time);
fallthrough;
@@ -1604,7 +1611,7 @@ parse_psr(struct drm_i915_private *i915,
panel->vbt.psr.tp2_tp3_wakeup_time_us = psr_table->tp2_tp3_wakeup_time * 100;
}
- if (i915->display.vbt.version >= 226) {
+ if (display->vbt.version >= 226) {
u32 wakeup_time = psr->psr2_tp2_tp3_wakeup_time;
wakeup_time = panel_bits(wakeup_time, panel_type, 2);
@@ -1630,13 +1637,13 @@ parse_psr(struct drm_i915_private *i915,
}
}
-static void parse_dsi_backlight_ports(struct drm_i915_private *i915,
+static void parse_dsi_backlight_ports(struct intel_display *display,
struct intel_panel *panel,
enum port port)
{
- enum port port_bc = DISPLAY_VER(i915) >= 11 ? PORT_B : PORT_C;
+ enum port port_bc = DISPLAY_VER(display) >= 11 ? PORT_B : PORT_C;
- if (!panel->vbt.dsi.config->dual_link || i915->display.vbt.version < 197) {
+ if (!panel->vbt.dsi.config->dual_link || display->vbt.version < 197) {
panel->vbt.dsi.bl_ports = BIT(port);
if (panel->vbt.dsi.config->cabc_supported)
panel->vbt.dsi.cabc_ports = BIT(port);
@@ -1676,7 +1683,7 @@ static void parse_dsi_backlight_ports(struct drm_i915_private *i915,
}
static void
-parse_mipi_config(struct drm_i915_private *i915,
+parse_mipi_config(struct intel_display *display,
struct intel_panel *panel)
{
const struct bdb_mipi_config *start;
@@ -1686,27 +1693,19 @@ parse_mipi_config(struct drm_i915_private *i915,
enum port port;
/* parse MIPI blocks only if LFP type is MIPI */
- if (!intel_bios_is_dsi_present(i915, &port))
+ if (!intel_bios_is_dsi_present(display, &port))
return;
/* Initialize this to undefined indicating no generic MIPI support */
panel->vbt.dsi.panel_id = MIPI_DSI_UNDEFINED_PANEL_ID;
- /* Block #40 is already parsed and panel_fixed_mode is
- * stored in i915->lfp_vbt_mode
- * resuse this when needed
- */
-
- /* Parse #52 for panel index used from panel_type already
- * parsed
- */
- start = bdb_find_section(i915, BDB_MIPI_CONFIG);
+ start = bdb_find_section(display, BDB_MIPI_CONFIG);
if (!start) {
- drm_dbg_kms(&i915->drm, "No MIPI config BDB found");
+ drm_dbg_kms(display->drm, "No MIPI config BDB found");
return;
}
- drm_dbg(&i915->drm, "Found MIPI Config block, panel index = %d\n",
+ drm_dbg(display->drm, "Found MIPI Config block, panel index = %d\n",
panel_type);
/*
@@ -1727,7 +1726,7 @@ parse_mipi_config(struct drm_i915_private *i915,
return;
}
- parse_dsi_backlight_ports(i915, panel, port);
+ parse_dsi_backlight_ports(display, panel, port);
/* FIXME is the 90 vs. 270 correct? */
switch (config->rotation) {
@@ -1759,7 +1758,7 @@ parse_mipi_config(struct drm_i915_private *i915,
/* Find the sequence block and size for the given panel. */
static const u8 *
-find_panel_sequence_block(struct drm_i915_private *i915,
+find_panel_sequence_block(struct intel_display *display,
const struct bdb_mipi_sequence *sequence,
u16 panel_id, u32 *seq_size)
{
@@ -1777,7 +1776,8 @@ find_panel_sequence_block(struct drm_i915_private *i915,
for (i = 0; i < MAX_MIPI_CONFIGURATIONS && index < total; i++) {
if (index + header_size > total) {
- drm_err(&i915->drm, "Invalid sequence block (header)\n");
+ drm_err(display->drm,
+ "Invalid sequence block (header)\n");
return NULL;
}
@@ -1790,7 +1790,7 @@ find_panel_sequence_block(struct drm_i915_private *i915,
index += header_size;
if (index + current_size > total) {
- drm_err(&i915->drm, "Invalid sequence block\n");
+ drm_err(display->drm, "Invalid sequence block\n");
return NULL;
}
@@ -1802,12 +1802,13 @@ find_panel_sequence_block(struct drm_i915_private *i915,
index += current_size;
}
- drm_err(&i915->drm, "Sequence block detected but no valid configuration\n");
+ drm_err(display->drm,
+ "Sequence block detected but no valid configuration\n");
return NULL;
}
-static int goto_next_sequence(struct drm_i915_private *i915,
+static int goto_next_sequence(struct intel_display *display,
const u8 *data, int index, int total)
{
u16 len;
@@ -1838,7 +1839,7 @@ static int goto_next_sequence(struct drm_i915_private *i915,
len = *(data + index + 6) + 7;
break;
default:
- drm_err(&i915->drm, "Unknown operation byte\n");
+ drm_err(display->drm, "Unknown operation byte\n");
return 0;
}
}
@@ -1846,7 +1847,7 @@ static int goto_next_sequence(struct drm_i915_private *i915,
return 0;
}
-static int goto_next_sequence_v3(struct drm_i915_private *i915,
+static int goto_next_sequence_v3(struct intel_display *display,
const u8 *data, int index, int total)
{
int seq_end;
@@ -1858,7 +1859,7 @@ static int goto_next_sequence_v3(struct drm_i915_private *i915,
* checking on the structure.
*/
if (total < 5) {
- drm_err(&i915->drm, "Too small sequence size\n");
+ drm_err(display->drm, "Too small sequence size\n");
return 0;
}
@@ -1875,7 +1876,7 @@ static int goto_next_sequence_v3(struct drm_i915_private *i915,
seq_end = index + size_of_sequence;
if (seq_end > total) {
- drm_err(&i915->drm, "Invalid sequence size\n");
+ drm_err(display->drm, "Invalid sequence size\n");
return 0;
}
@@ -1885,7 +1886,8 @@ static int goto_next_sequence_v3(struct drm_i915_private *i915,
if (operation_byte == MIPI_SEQ_ELEM_END) {
if (index != seq_end) {
- drm_err(&i915->drm, "Invalid element structure\n");
+ drm_err(display->drm,
+ "Invalid element structure\n");
return 0;
}
return index;
@@ -1907,7 +1909,7 @@ static int goto_next_sequence_v3(struct drm_i915_private *i915,
case MIPI_SEQ_ELEM_PMIC:
break;
default:
- drm_err(&i915->drm, "Unknown operation byte %u\n",
+ drm_err(display->drm, "Unknown operation byte %u\n",
operation_byte);
break;
}
@@ -1920,13 +1922,13 @@ static int goto_next_sequence_v3(struct drm_i915_private *i915,
* Get len of pre-fixed deassert fragment from a v1 init OTP sequence,
* skip all delay + gpio operands and stop at the first DSI packet op.
*/
-static int get_init_otp_deassert_fragment_len(struct drm_i915_private *i915,
+static int get_init_otp_deassert_fragment_len(struct intel_display *display,
struct intel_panel *panel)
{
const u8 *data = panel->vbt.dsi.sequence[MIPI_SEQ_INIT_OTP];
int index, len;
- if (drm_WARN_ON(&i915->drm,
+ if (drm_WARN_ON(display->drm,
!data || panel->vbt.dsi.seq_version != 1))
return 0;
@@ -1955,7 +1957,7 @@ static int get_init_otp_deassert_fragment_len(struct drm_i915_private *i915,
* these devices we split the init OTP sequence into a deassert sequence and
* the actual init OTP part.
*/
-static void vlv_fixup_mipi_sequences(struct drm_i915_private *i915,
+static void vlv_fixup_mipi_sequences(struct intel_display *display,
struct intel_panel *panel)
{
u8 *init_otp;
@@ -1973,11 +1975,11 @@ static void vlv_fixup_mipi_sequences(struct drm_i915_private *i915,
return;
/* The deassert-sequence ends at the first DSI packet */
- len = get_init_otp_deassert_fragment_len(i915, panel);
+ len = get_init_otp_deassert_fragment_len(display, panel);
if (!len)
return;
- drm_dbg_kms(&i915->drm,
+ drm_dbg_kms(display->drm,
"Using init OTP fragment to deassert reset\n");
/* Copy the fragment, update seq byte and terminate it */
@@ -2010,29 +2012,32 @@ static void vlv_fixup_mipi_sequences(struct drm_i915_private *i915,
* or examine the contents of the sequences to
* avoid false positives?
*/
-static void icl_fixup_mipi_sequences(struct drm_i915_private *i915,
+static void icl_fixup_mipi_sequences(struct intel_display *display,
struct intel_panel *panel)
{
if (!panel->vbt.dsi.sequence[MIPI_SEQ_INIT_OTP] &&
panel->vbt.dsi.sequence[MIPI_SEQ_DISPLAY_ON]) {
- drm_dbg_kms(&i915->drm, "Broken VBT: Swapping INIT_OTP and DISPLAY_ON sequences\n");
+ drm_dbg_kms(display->drm,
+ "Broken VBT: Swapping INIT_OTP and DISPLAY_ON sequences\n");
swap(panel->vbt.dsi.sequence[MIPI_SEQ_INIT_OTP],
panel->vbt.dsi.sequence[MIPI_SEQ_DISPLAY_ON]);
}
}
-static void fixup_mipi_sequences(struct drm_i915_private *i915,
+static void fixup_mipi_sequences(struct intel_display *display,
struct intel_panel *panel)
{
- if (DISPLAY_VER(i915) >= 11)
- icl_fixup_mipi_sequences(i915, panel);
+ struct drm_i915_private *i915 = to_i915(display->drm);
+
+ if (DISPLAY_VER(display) >= 11)
+ icl_fixup_mipi_sequences(display, panel);
else if (IS_VALLEYVIEW(i915))
- vlv_fixup_mipi_sequences(i915, panel);
+ vlv_fixup_mipi_sequences(display, panel);
}
static void
-parse_mipi_sequence(struct drm_i915_private *i915,
+parse_mipi_sequence(struct intel_display *display,
struct intel_panel *panel)
{
int panel_type = panel->vbt.panel_type;
@@ -2046,25 +2051,25 @@ parse_mipi_sequence(struct drm_i915_private *i915,
if (panel->vbt.dsi.panel_id != MIPI_DSI_GENERIC_PANEL_ID)
return;
- sequence = bdb_find_section(i915, BDB_MIPI_SEQUENCE);
+ sequence = bdb_find_section(display, BDB_MIPI_SEQUENCE);
if (!sequence) {
- drm_dbg_kms(&i915->drm,
+ drm_dbg_kms(display->drm,
"No MIPI Sequence found, parsing complete\n");
return;
}
/* Fail gracefully for forward incompatible sequence block. */
if (sequence->version >= 4) {
- drm_err(&i915->drm,
+ drm_err(display->drm,
"Unable to parse MIPI Sequence Block v%u\n",
sequence->version);
return;
}
- drm_dbg(&i915->drm, "Found MIPI sequence block v%u\n",
+ drm_dbg(display->drm, "Found MIPI sequence block v%u\n",
sequence->version);
- seq_data = find_panel_sequence_block(i915, sequence, panel_type, &seq_size);
+ seq_data = find_panel_sequence_block(display, sequence, panel_type, &seq_size);
if (!seq_data)
return;
@@ -2079,24 +2084,24 @@ parse_mipi_sequence(struct drm_i915_private *i915,
break;
if (seq_id >= MIPI_SEQ_MAX) {
- drm_err(&i915->drm, "Unknown sequence %u\n",
+ drm_err(display->drm, "Unknown sequence %u\n",
seq_id);
goto err;
}
/* Log about presence of sequences we won't run. */
if (seq_id == MIPI_SEQ_TEAR_ON || seq_id == MIPI_SEQ_TEAR_OFF)
- drm_dbg_kms(&i915->drm,
+ drm_dbg_kms(display->drm,
"Unsupported sequence %u\n", seq_id);
panel->vbt.dsi.sequence[seq_id] = data + index;
if (sequence->version >= 3)
- index = goto_next_sequence_v3(i915, data, index, seq_size);
+ index = goto_next_sequence_v3(display, data, index, seq_size);
else
- index = goto_next_sequence(i915, data, index, seq_size);
+ index = goto_next_sequence(display, data, index, seq_size);
if (!index) {
- drm_err(&i915->drm, "Invalid sequence %u\n",
+ drm_err(display->drm, "Invalid sequence %u\n",
seq_id);
goto err;
}
@@ -2106,9 +2111,9 @@ parse_mipi_sequence(struct drm_i915_private *i915,
panel->vbt.dsi.size = seq_size;
panel->vbt.dsi.seq_version = sequence->version;
- fixup_mipi_sequences(i915, panel);
+ fixup_mipi_sequences(display, panel);
- drm_dbg(&i915->drm, "MIPI related VBT parsing complete\n");
+ drm_dbg(display->drm, "MIPI related VBT parsing complete\n");
return;
err:
@@ -2117,47 +2122,47 @@ err:
}
static void
-parse_compression_parameters(struct drm_i915_private *i915)
+parse_compression_parameters(struct intel_display *display)
{
const struct bdb_compression_parameters *params;
struct intel_bios_encoder_data *devdata;
u16 block_size;
int index;
- if (i915->display.vbt.version < 198)
+ if (display->vbt.version < 198)
return;
- params = bdb_find_section(i915, BDB_COMPRESSION_PARAMETERS);
+ params = bdb_find_section(display, BDB_COMPRESSION_PARAMETERS);
if (params) {
/* Sanity checks */
if (params->entry_size != sizeof(params->data[0])) {
- drm_dbg_kms(&i915->drm,
+ drm_dbg_kms(display->drm,
"VBT: unsupported compression param entry size\n");
return;
}
block_size = get_blocksize(params);
if (block_size < sizeof(*params)) {
- drm_dbg_kms(&i915->drm,
+ drm_dbg_kms(display->drm,
"VBT: expected 16 compression param entries\n");
return;
}
}
- list_for_each_entry(devdata, &i915->display.vbt.display_devices, node) {
+ list_for_each_entry(devdata, &display->vbt.display_devices, node) {
const struct child_device_config *child = &devdata->child;
if (!child->compression_enable)
continue;
if (!params) {
- drm_dbg_kms(&i915->drm,
+ drm_dbg_kms(display->drm,
"VBT: compression params not available\n");
continue;
}
if (child->compression_method_cps) {
- drm_dbg_kms(&i915->drm,
+ drm_dbg_kms(display->drm,
"VBT: CPS compression not supported\n");
continue;
}
@@ -2169,12 +2174,12 @@ parse_compression_parameters(struct drm_i915_private *i915)
}
}
-static u8 translate_iboost(struct drm_i915_private *i915, u8 val)
+static u8 translate_iboost(struct intel_display *display, u8 val)
{
static const u8 mapping[] = { 1, 3, 7 }; /* See VBT spec */
if (val >= ARRAY_SIZE(mapping)) {
- drm_dbg_kms(&i915->drm,
+ drm_dbg_kms(display->drm,
"Unsupported I_boost value found in VBT (%d), display may not work properly\n", val);
return 0;
}
@@ -2231,8 +2236,9 @@ static const u8 adlp_ddc_pin_map[] = {
[GMBUS_PIN_12_TC4_ICP] = ADLP_DDC_BUS_PORT_TC4,
};
-static u8 map_ddc_pin(struct drm_i915_private *i915, u8 vbt_pin)
+static u8 map_ddc_pin(struct intel_display *display, u8 vbt_pin)
{
+ struct drm_i915_private *i915 = to_i915(display->drm);
const u8 *ddc_pin_map;
int i, n_entries;
@@ -2247,7 +2253,7 @@ static u8 map_ddc_pin(struct drm_i915_private *i915, u8 vbt_pin)
} else if (IS_ROCKETLAKE(i915) && INTEL_PCH_TYPE(i915) == PCH_TGP) {
ddc_pin_map = rkl_pch_tgp_ddc_pin_map;
n_entries = ARRAY_SIZE(rkl_pch_tgp_ddc_pin_map);
- } else if (HAS_PCH_TGP(i915) && DISPLAY_VER(i915) == 9) {
+ } else if (HAS_PCH_TGP(i915) && DISPLAY_VER(display) == 9) {
ddc_pin_map = gen9bc_tgp_ddc_pin_map;
n_entries = ARRAY_SIZE(gen9bc_tgp_ddc_pin_map);
} else if (INTEL_PCH_TYPE(i915) >= PCH_ICP) {
@@ -2266,7 +2272,7 @@ static u8 map_ddc_pin(struct drm_i915_private *i915, u8 vbt_pin)
return i;
}
- drm_dbg_kms(&i915->drm,
+ drm_dbg_kms(display->drm,
"Ignoring alternate pin: VBT claims DDC pin %d, which is not valid for this platform\n",
vbt_pin);
return 0;
@@ -2324,9 +2330,10 @@ static enum port __dvo_port_to_port(int n_ports, int n_dvo,
return PORT_NONE;
}
-static enum port dvo_port_to_port(struct drm_i915_private *i915,
+static enum port dvo_port_to_port(struct intel_display *display,
u8 dvo_port)
{
+ struct drm_i915_private *i915 = to_i915(display->drm);
/*
* Each DDI port can have more than one value on the "DVO Port" field,
* so look for all the possible values for each port.
@@ -2378,7 +2385,7 @@ static enum port dvo_port_to_port(struct drm_i915_private *i915,
[PORT_TC4] = { DVO_PORT_HDMII, DVO_PORT_DPI, -1 },
};
- if (DISPLAY_VER(i915) >= 13)
+ if (DISPLAY_VER(display) >= 13)
return __dvo_port_to_port(ARRAY_SIZE(xelpd_port_mapping),
ARRAY_SIZE(xelpd_port_mapping[0]),
xelpd_port_mapping,
@@ -2401,13 +2408,13 @@ static enum port dvo_port_to_port(struct drm_i915_private *i915,
}
static enum port
-dsi_dvo_port_to_port(struct drm_i915_private *i915, u8 dvo_port)
+dsi_dvo_port_to_port(struct intel_display *display, u8 dvo_port)
{
switch (dvo_port) {
case DVO_PORT_MIPIA:
return PORT_A;
case DVO_PORT_MIPIC:
- if (DISPLAY_VER(i915) >= 11)
+ if (DISPLAY_VER(display) >= 11)
return PORT_B;
else
return PORT_C;
@@ -2418,13 +2425,13 @@ dsi_dvo_port_to_port(struct drm_i915_private *i915, u8 dvo_port)
enum port intel_bios_encoder_port(const struct intel_bios_encoder_data *devdata)
{
- struct drm_i915_private *i915 = devdata->i915;
+ struct intel_display *display = devdata->display;
const struct child_device_config *child = &devdata->child;
enum port port;
- port = dvo_port_to_port(i915, child->dvo_port);
- if (port == PORT_NONE && DISPLAY_VER(i915) >= 11)
- port = dsi_dvo_port_to_port(i915, child->dvo_port);
+ port = dvo_port_to_port(display, child->dvo_port);
+ if (port == PORT_NONE && DISPLAY_VER(display) >= 11)
+ port = dsi_dvo_port_to_port(display, child->dvo_port);
return port;
}
@@ -2469,10 +2476,10 @@ static int parse_bdb_216_dp_max_link_rate(const int vbt_max_link_rate)
int intel_bios_dp_max_link_rate(const struct intel_bios_encoder_data *devdata)
{
- if (!devdata || devdata->i915->display.vbt.version < 216)
+ if (!devdata || devdata->display->vbt.version < 216)
return 0;
- if (devdata->i915->display.vbt.version >= 230)
+ if (devdata->display->vbt.version >= 230)
return parse_bdb_230_dp_max_link_rate(devdata->child.dp_max_link_rate);
else
return parse_bdb_216_dp_max_link_rate(devdata->child.dp_max_link_rate);
@@ -2480,7 +2487,7 @@ int intel_bios_dp_max_link_rate(const struct intel_bios_encoder_data *devdata)
int intel_bios_dp_max_lane_count(const struct intel_bios_encoder_data *devdata)
{
- if (!devdata || devdata->i915->display.vbt.version < 244)
+ if (!devdata || devdata->display->vbt.version < 244)
return 0;
return devdata->child.dp_max_lane_count + 1;
@@ -2489,10 +2496,10 @@ int intel_bios_dp_max_lane_count(const struct intel_bios_encoder_data *devdata)
static void sanitize_device_type(struct intel_bios_encoder_data *devdata,
enum port port)
{
- struct drm_i915_private *i915 = devdata->i915;
+ struct intel_display *display = devdata->display;
bool is_hdmi;
- if (port != PORT_A || DISPLAY_VER(i915) >= 12)
+ if (port != PORT_A || DISPLAY_VER(display) >= 12)
return;
if (!intel_bios_encoder_supports_dvi(devdata))
@@ -2500,7 +2507,7 @@ static void sanitize_device_type(struct intel_bios_encoder_data *devdata,
is_hdmi = intel_bios_encoder_supports_hdmi(devdata);
- drm_dbg_kms(&i915->drm, "VBT claims port A supports DVI%s, ignoring\n",
+ drm_dbg_kms(display->drm, "VBT claims port A supports DVI%s, ignoring\n",
is_hdmi ? "/HDMI" : "");
devdata->child.device_type &= ~DEVICE_TYPE_TMDS_DVI_SIGNALING;
@@ -2510,7 +2517,8 @@ static void sanitize_device_type(struct intel_bios_encoder_data *devdata,
static void sanitize_hdmi_level_shift(struct intel_bios_encoder_data *devdata,
enum port port)
{
- struct drm_i915_private *i915 = devdata->i915;
+ struct intel_display *display = devdata->display;
+ struct drm_i915_private *i915 = to_i915(display->drm);
if (!intel_bios_encoder_supports_dvi(devdata))
return;
@@ -2521,7 +2529,8 @@ static void sanitize_hdmi_level_shift(struct intel_bios_encoder_data *devdata,
* up to 11, whereas the BDW max is 9.
*/
if (IS_BROADWELL(i915) && devdata->child.hdmi_level_shifter_value > 9) {
- drm_dbg_kms(&i915->drm, "Bogus port %c VBT HDMI level shift %d, adjusting to %d\n",
+ drm_dbg_kms(display->drm,
+ "Bogus port %c VBT HDMI level shift %d, adjusting to %d\n",
port_name(port), devdata->child.hdmi_level_shifter_value, 9);
devdata->child.hdmi_level_shifter_value = 9;
@@ -2569,14 +2578,14 @@ intel_bios_encoder_supports_dsi(const struct intel_bios_encoder_data *devdata)
bool
intel_bios_encoder_is_lspcon(const struct intel_bios_encoder_data *devdata)
{
- return devdata && HAS_LSPCON(devdata->i915) && devdata->child.lspcon;
+ return devdata && HAS_LSPCON(devdata->display) && devdata->child.lspcon;
}
/* This is an index in the HDMI/DVI DDI buffer translation table, or -1 */
int intel_bios_hdmi_level_shift(const struct intel_bios_encoder_data *devdata)
{
- if (!devdata || devdata->i915->display.vbt.version < 158 ||
- DISPLAY_VER(devdata->i915) >= 14)
+ if (!devdata || devdata->display->vbt.version < 158 ||
+ DISPLAY_VER(devdata->display) >= 14)
return -1;
return devdata->child.hdmi_level_shifter_value;
@@ -2584,7 +2593,7 @@ int intel_bios_hdmi_level_shift(const struct intel_bios_encoder_data *devdata)
int intel_bios_hdmi_max_tmds_clock(const struct intel_bios_encoder_data *devdata)
{
- if (!devdata || devdata->i915->display.vbt.version < 204)
+ if (!devdata || devdata->display->vbt.version < 204)
return 0;
switch (devdata->child.hdmi_max_data_rate) {
@@ -2606,8 +2615,9 @@ int intel_bios_hdmi_max_tmds_clock(const struct intel_bios_encoder_data *devdata
}
}
-static bool is_port_valid(struct drm_i915_private *i915, enum port port)
+static bool is_port_valid(struct intel_display *display, enum port port)
{
+ struct drm_i915_private *i915 = to_i915(display->drm);
/*
* On some ICL SKUs port F is not present, but broken VBTs mark
* the port as present. Only try to initialize port F for the
@@ -2621,7 +2631,7 @@ static bool is_port_valid(struct drm_i915_private *i915, enum port port)
static void print_ddi_port(const struct intel_bios_encoder_data *devdata)
{
- struct drm_i915_private *i915 = devdata->i915;
+ struct intel_display *display = devdata->display;
const struct child_device_config *child = &devdata->child;
bool is_dvi, is_hdmi, is_dp, is_edp, is_dsi, is_crt, supports_typec_usb, supports_tbt;
int dp_boost_level, dp_max_link_rate, hdmi_boost_level, hdmi_level_shift, max_tmds_clock;
@@ -2641,7 +2651,7 @@ static void print_ddi_port(const struct intel_bios_encoder_data *devdata)
supports_typec_usb = intel_bios_encoder_supports_typec_usb(devdata);
supports_tbt = intel_bios_encoder_supports_tbt(devdata);
- drm_dbg_kms(&i915->drm,
+ drm_dbg_kms(display->drm,
"Port %c VBT info: CRT:%d DVI:%d HDMI:%d DP:%d eDP:%d DSI:%d DP++:%d LSPCON:%d USB-Type-C:%d TBT:%d DSC:%d\n",
port_name(port), is_crt, is_dvi, is_hdmi, is_dp, is_edp, is_dsi,
intel_bios_encoder_supports_dp_dual_mode(devdata),
@@ -2651,33 +2661,33 @@ static void print_ddi_port(const struct intel_bios_encoder_data *devdata)
hdmi_level_shift = intel_bios_hdmi_level_shift(devdata);
if (hdmi_level_shift >= 0) {
- drm_dbg_kms(&i915->drm,
+ drm_dbg_kms(display->drm,
"Port %c VBT HDMI level shift: %d\n",
port_name(port), hdmi_level_shift);
}
max_tmds_clock = intel_bios_hdmi_max_tmds_clock(devdata);
if (max_tmds_clock)
- drm_dbg_kms(&i915->drm,
+ drm_dbg_kms(display->drm,
"Port %c VBT HDMI max TMDS clock: %d kHz\n",
port_name(port), max_tmds_clock);
/* I_boost config for SKL and above */
dp_boost_level = intel_bios_dp_boost_level(devdata);
if (dp_boost_level)
- drm_dbg_kms(&i915->drm,
+ drm_dbg_kms(display->drm,
"Port %c VBT (e)DP boost level: %d\n",
port_name(port), dp_boost_level);
hdmi_boost_level = intel_bios_hdmi_boost_level(devdata);
if (hdmi_boost_level)
- drm_dbg_kms(&i915->drm,
+ drm_dbg_kms(display->drm,
"Port %c VBT HDMI boost level: %d\n",
port_name(port), hdmi_boost_level);
dp_max_link_rate = intel_bios_dp_max_link_rate(devdata);
if (dp_max_link_rate)
- drm_dbg_kms(&i915->drm,
+ drm_dbg_kms(display->drm,
"Port %c VBT DP max link rate: %d\n",
port_name(port), dp_max_link_rate);
@@ -2685,22 +2695,22 @@ static void print_ddi_port(const struct intel_bios_encoder_data *devdata)
* FIXME need to implement support for VBT
* vswing/preemph tables should this ever trigger.
*/
- drm_WARN(&i915->drm, child->use_vbt_vswing,
+ drm_WARN(display->drm, child->use_vbt_vswing,
"Port %c asks to use VBT vswing/preemph tables\n",
port_name(port));
}
static void parse_ddi_port(struct intel_bios_encoder_data *devdata)
{
- struct drm_i915_private *i915 = devdata->i915;
+ struct intel_display *display = devdata->display;
enum port port;
port = intel_bios_encoder_port(devdata);
if (port == PORT_NONE)
return;
- if (!is_port_valid(i915, port)) {
- drm_dbg_kms(&i915->drm,
+ if (!is_port_valid(display, port)) {
+ drm_dbg_kms(display->drm,
"VBT reports port %c as supported, but that can't be true: skipping\n",
port_name(port));
return;
@@ -2710,22 +2720,24 @@ static void parse_ddi_port(struct intel_bios_encoder_data *devdata)
sanitize_hdmi_level_shift(devdata, port);
}
-static bool has_ddi_port_info(struct drm_i915_private *i915)
+static bool has_ddi_port_info(struct intel_display *display)
{
- return DISPLAY_VER(i915) >= 5 || IS_G4X(i915);
+ struct drm_i915_private *i915 = to_i915(display->drm);
+
+ return DISPLAY_VER(display) >= 5 || IS_G4X(i915);
}
-static void parse_ddi_ports(struct drm_i915_private *i915)
+static void parse_ddi_ports(struct intel_display *display)
{
struct intel_bios_encoder_data *devdata;
- if (!has_ddi_port_info(i915))
+ if (!has_ddi_port_info(display))
return;
- list_for_each_entry(devdata, &i915->display.vbt.display_devices, node)
+ list_for_each_entry(devdata, &display->vbt.display_devices, node)
parse_ddi_port(devdata);
- list_for_each_entry(devdata, &i915->display.vbt.display_devices, node)
+ list_for_each_entry(devdata, &display->vbt.display_devices, node)
print_ddi_port(devdata);
}
@@ -2751,27 +2763,27 @@ static int child_device_expected_size(u16 version)
return 22;
}
-static bool child_device_size_valid(struct drm_i915_private *i915, int size)
+static bool child_device_size_valid(struct intel_display *display, int size)
{
int expected_size;
- expected_size = child_device_expected_size(i915->display.vbt.version);
+ expected_size = child_device_expected_size(display->vbt.version);
if (expected_size < 0) {
expected_size = sizeof(struct child_device_config);
- drm_dbg(&i915->drm,
+ drm_dbg(display->drm,
"Expected child device config size for VBT version %u not known; assuming %d\n",
- i915->display.vbt.version, expected_size);
+ display->vbt.version, expected_size);
}
/* Flag an error for unexpected size, but continue anyway. */
if (size != expected_size)
- drm_err(&i915->drm,
+ drm_err(display->drm,
"Unexpected child device config size %d (expected %d for VBT version %u)\n",
- size, expected_size, i915->display.vbt.version);
+ size, expected_size, display->vbt.version);
/* The legacy sized child device config is the minimum we need. */
if (size < LEGACY_CHILD_DEVICE_CONFIG_SIZE) {
- drm_dbg_kms(&i915->drm,
+ drm_dbg_kms(display->drm,
"Child device config size %d is too small.\n",
size);
return false;
@@ -2781,8 +2793,9 @@ static bool child_device_size_valid(struct drm_i915_private *i915, int size)
}
static void
-parse_general_definitions(struct drm_i915_private *i915)
+parse_general_definitions(struct intel_display *display)
{
+ struct drm_i915_private *i915 = to_i915(display->drm);
const struct bdb_general_definitions *defs;
struct intel_bios_encoder_data *devdata;
const struct child_device_config *child;
@@ -2790,27 +2803,27 @@ parse_general_definitions(struct drm_i915_private *i915)
u16 block_size;
int bus_pin;
- defs = bdb_find_section(i915, BDB_GENERAL_DEFINITIONS);
+ defs = bdb_find_section(display, BDB_GENERAL_DEFINITIONS);
if (!defs) {
- drm_dbg_kms(&i915->drm,
+ drm_dbg_kms(display->drm,
"No general definition block is found, no devices defined.\n");
return;
}
block_size = get_blocksize(defs);
if (block_size < sizeof(*defs)) {
- drm_dbg_kms(&i915->drm,
+ drm_dbg_kms(display->drm,
"General definitions block too small (%u)\n",
block_size);
return;
}
bus_pin = defs->crt_ddc_gmbus_pin;
- drm_dbg_kms(&i915->drm, "crt_ddc_bus_pin: %d\n", bus_pin);
+ drm_dbg_kms(display->drm, "crt_ddc_bus_pin: %d\n", bus_pin);
if (intel_gmbus_is_valid_pin(i915, bus_pin))
- i915->display.vbt.crt_ddc_pin = bus_pin;
+ display->vbt.crt_ddc_pin = bus_pin;
- if (!child_device_size_valid(i915, defs->child_dev_size))
+ if (!child_device_size_valid(display, defs->child_dev_size))
return;
/* get the number of child device */
@@ -2821,7 +2834,7 @@ parse_general_definitions(struct drm_i915_private *i915)
if (!child->device_type)
continue;
- drm_dbg_kms(&i915->drm,
+ drm_dbg_kms(display->drm,
"Found VBT child device with type 0x%x\n",
child->device_type);
@@ -2829,7 +2842,7 @@ parse_general_definitions(struct drm_i915_private *i915)
if (!devdata)
break;
- devdata->i915 = i915;
+ devdata->display = display;
/*
* Copy as much as we know (sizeof) and is available
@@ -2839,37 +2852,39 @@ parse_general_definitions(struct drm_i915_private *i915)
memcpy(&devdata->child, child,
min_t(size_t, defs->child_dev_size, sizeof(*child)));
- list_add_tail(&devdata->node, &i915->display.vbt.display_devices);
+ list_add_tail(&devdata->node, &display->vbt.display_devices);
}
- if (list_empty(&i915->display.vbt.display_devices))
- drm_dbg_kms(&i915->drm,
+ if (list_empty(&display->vbt.display_devices))
+ drm_dbg_kms(display->drm,
"no child dev is parsed from VBT\n");
}
/* Common defaults which may be overridden by VBT. */
static void
-init_vbt_defaults(struct drm_i915_private *i915)
+init_vbt_defaults(struct intel_display *display)
{
- i915->display.vbt.crt_ddc_pin = GMBUS_PIN_VGADDC;
+ struct drm_i915_private *i915 = to_i915(display->drm);
+
+ display->vbt.crt_ddc_pin = GMBUS_PIN_VGADDC;
/* general features */
- i915->display.vbt.int_tv_support = 1;
- i915->display.vbt.int_crt_support = 1;
+ display->vbt.int_tv_support = 1;
+ display->vbt.int_crt_support = 1;
/* driver features */
- i915->display.vbt.int_lvds_support = 1;
+ display->vbt.int_lvds_support = 1;
/* Default to using SSC */
- i915->display.vbt.lvds_use_ssc = 1;
+ display->vbt.lvds_use_ssc = 1;
/*
* Core/SandyBridge/IvyBridge use alternative (120MHz) reference
* clock for LVDS.
*/
- i915->display.vbt.lvds_ssc_freq = intel_bios_ssc_frequency(i915,
- !HAS_PCH_SPLIT(i915));
- drm_dbg_kms(&i915->drm, "Set default to SSC at %d kHz\n",
- i915->display.vbt.lvds_ssc_freq);
+ display->vbt.lvds_ssc_freq = intel_bios_ssc_frequency(display,
+ !HAS_PCH_SPLIT(i915));
+ drm_dbg_kms(display->drm, "Set default to SSC at %d kHz\n",
+ display->vbt.lvds_ssc_freq);
}
/* Common defaults which may be overridden by VBT. */
@@ -2885,12 +2900,13 @@ init_vbt_panel_defaults(struct intel_panel *panel)
/* Defaults to initialize only if there is no VBT. */
static void
-init_vbt_missing_defaults(struct drm_i915_private *i915)
+init_vbt_missing_defaults(struct intel_display *display)
{
- unsigned int ports = DISPLAY_RUNTIME_INFO(i915)->port_mask;
+ struct drm_i915_private *i915 = to_i915(display->drm);
+ unsigned int ports = DISPLAY_RUNTIME_INFO(display)->port_mask;
enum port port;
- if (!HAS_DDI(i915) && !IS_CHERRYVIEW(i915))
+ if (!HAS_DDI(display) && !IS_CHERRYVIEW(i915))
return;
for_each_port_masked(port, ports) {
@@ -2910,7 +2926,7 @@ init_vbt_missing_defaults(struct drm_i915_private *i915)
if (!devdata)
break;
- devdata->i915 = i915;
+ devdata->display = display;
child = &devdata->child;
if (port == PORT_F)
@@ -2929,15 +2945,15 @@ init_vbt_missing_defaults(struct drm_i915_private *i915)
if (port == PORT_A)
child->device_type |= DEVICE_TYPE_INTERNAL_CONNECTOR;
- list_add_tail(&devdata->node, &i915->display.vbt.display_devices);
+ list_add_tail(&devdata->node, &display->vbt.display_devices);
- drm_dbg_kms(&i915->drm,
- "Generating default VBT child device with type 0x04%x on port %c\n",
+ drm_dbg_kms(display->drm,
+ "Generating default VBT child device with type 0x%04x on port %c\n",
child->device_type, port_name(port));
}
/* Bypass some minimum baseline VBT version checks */
- i915->display.vbt.version = 155;
+ display->vbt.version = 155;
}
static const struct bdb_header *get_bdb_header(const struct vbt_header *vbt)
@@ -2949,13 +2965,13 @@ static const struct bdb_header *get_bdb_header(const struct vbt_header *vbt)
/**
* intel_bios_is_valid_vbt - does the given buffer contain a valid VBT
- * @i915: the device
+ * @display: display device
* @buf: pointer to a buffer to validate
* @size: size of the buffer
*
* Returns true on valid VBT.
*/
-bool intel_bios_is_valid_vbt(struct drm_i915_private *i915,
+bool intel_bios_is_valid_vbt(struct intel_display *display,
const void *buf, size_t size)
{
const struct vbt_header *vbt = buf;
@@ -2965,17 +2981,18 @@ bool intel_bios_is_valid_vbt(struct drm_i915_private *i915,
return false;
if (sizeof(struct vbt_header) > size) {
- drm_dbg_kms(&i915->drm, "VBT header incomplete\n");
+ drm_dbg_kms(display->drm, "VBT header incomplete\n");
return false;
}
if (memcmp(vbt->signature, "$VBT", 4)) {
- drm_dbg_kms(&i915->drm, "VBT invalid signature\n");
+ drm_dbg_kms(display->drm, "VBT invalid signature\n");
return false;
}
if (vbt->vbt_size > size) {
- drm_dbg_kms(&i915->drm, "VBT incomplete (vbt_size overflows)\n");
+ drm_dbg_kms(display->drm,
+ "VBT incomplete (vbt_size overflows)\n");
return false;
}
@@ -2985,48 +3002,48 @@ bool intel_bios_is_valid_vbt(struct drm_i915_private *i915,
vbt->bdb_offset,
sizeof(struct bdb_header),
size)) {
- drm_dbg_kms(&i915->drm, "BDB header incomplete\n");
+ drm_dbg_kms(display->drm, "BDB header incomplete\n");
return false;
}
bdb = get_bdb_header(vbt);
if (range_overflows_t(size_t, vbt->bdb_offset, bdb->bdb_size, size)) {
- drm_dbg_kms(&i915->drm, "BDB incomplete\n");
+ drm_dbg_kms(display->drm, "BDB incomplete\n");
return false;
}
return vbt;
}
-static struct vbt_header *firmware_get_vbt(struct drm_i915_private *i915,
+static struct vbt_header *firmware_get_vbt(struct intel_display *display,
size_t *size)
{
struct vbt_header *vbt = NULL;
const struct firmware *fw = NULL;
- const char *name = i915->display.params.vbt_firmware;
+ const char *name = display->params.vbt_firmware;
int ret;
if (!name || !*name)
return NULL;
- ret = request_firmware(&fw, name, i915->drm.dev);
+ ret = request_firmware(&fw, name, display->drm->dev);
if (ret) {
- drm_err(&i915->drm,
+ drm_err(display->drm,
"Requesting VBT firmware \"%s\" failed (%d)\n",
name, ret);
return NULL;
}
- if (intel_bios_is_valid_vbt(i915, fw->data, fw->size)) {
+ if (intel_bios_is_valid_vbt(display, fw->data, fw->size)) {
vbt = kmemdup(fw->data, fw->size, GFP_KERNEL);
if (vbt) {
- drm_dbg_kms(&i915->drm,
+ drm_dbg_kms(display->drm,
"Found valid VBT firmware \"%s\"\n", name);
if (size)
*size = fw->size;
}
} else {
- drm_dbg_kms(&i915->drm, "Invalid VBT firmware \"%s\"\n",
+ drm_dbg_kms(display->drm, "Invalid VBT firmware \"%s\"\n",
name);
}
@@ -3042,9 +3059,10 @@ static u32 intel_spi_read(struct intel_uncore *uncore, u32 offset)
return intel_uncore_read(uncore, PRIMARY_SPI_TRIGGER);
}
-static struct vbt_header *spi_oprom_get_vbt(struct drm_i915_private *i915,
+static struct vbt_header *spi_oprom_get_vbt(struct intel_display *display,
size_t *size)
{
+ struct drm_i915_private *i915 = to_i915(display->drm);
u32 count, data, found, store = 0;
u32 static_region, oprom_offset;
u32 oprom_size = 0x200000;
@@ -3081,10 +3099,10 @@ static struct vbt_header *spi_oprom_get_vbt(struct drm_i915_private *i915,
for (count = 0; count < vbt_size; count += 4)
*(vbt + store++) = intel_spi_read(&i915->uncore, found + count);
- if (!intel_bios_is_valid_vbt(i915, vbt, vbt_size))
+ if (!intel_bios_is_valid_vbt(display, vbt, vbt_size))
goto err_free_vbt;
- drm_dbg_kms(&i915->drm, "Found valid VBT in SPI flash\n");
+ drm_dbg_kms(display->drm, "Found valid VBT in SPI flash\n");
if (size)
*size = vbt_size;
@@ -3097,10 +3115,10 @@ err_not_found:
return NULL;
}
-static struct vbt_header *oprom_get_vbt(struct drm_i915_private *i915,
+static struct vbt_header *oprom_get_vbt(struct intel_display *display,
size_t *sizep)
{
- struct pci_dev *pdev = to_pci_dev(i915->drm.dev);
+ struct pci_dev *pdev = to_pci_dev(display->drm->dev);
void __iomem *p = NULL, *oprom;
struct vbt_header *vbt;
u16 vbt_size;
@@ -3124,13 +3142,13 @@ static struct vbt_header *oprom_get_vbt(struct drm_i915_private *i915,
goto err_unmap_oprom;
if (sizeof(struct vbt_header) > size) {
- drm_dbg(&i915->drm, "VBT header incomplete\n");
+ drm_dbg(display->drm, "VBT header incomplete\n");
goto err_unmap_oprom;
}
vbt_size = ioread16(p + offsetof(struct vbt_header, vbt_size));
if (vbt_size > size) {
- drm_dbg(&i915->drm,
+ drm_dbg(display->drm,
"VBT incomplete (vbt_size overflows)\n");
goto err_unmap_oprom;
}
@@ -3142,7 +3160,7 @@ static struct vbt_header *oprom_get_vbt(struct drm_i915_private *i915,
memcpy_fromio(vbt, p, vbt_size);
- if (!intel_bios_is_valid_vbt(i915, vbt, vbt_size))
+ if (!intel_bios_is_valid_vbt(display, vbt, vbt_size))
goto err_free_vbt;
pci_unmap_rom(pdev, oprom);
@@ -3150,7 +3168,7 @@ static struct vbt_header *oprom_get_vbt(struct drm_i915_private *i915,
if (sizep)
*sizep = vbt_size;
- drm_dbg_kms(&i915->drm, "Found valid VBT in PCI ROM\n");
+ drm_dbg_kms(display->drm, "Found valid VBT in PCI ROM\n");
return vbt;
@@ -3162,16 +3180,17 @@ err_unmap_oprom:
return NULL;
}
-static const struct vbt_header *intel_bios_get_vbt(struct drm_i915_private *i915,
+static const struct vbt_header *intel_bios_get_vbt(struct intel_display *display,
size_t *sizep)
{
+ struct drm_i915_private *i915 = to_i915(display->drm);
const struct vbt_header *vbt = NULL;
intel_wakeref_t wakeref;
- vbt = firmware_get_vbt(i915, sizep);
+ vbt = firmware_get_vbt(display, sizep);
if (!vbt)
- vbt = intel_opregion_get_vbt(i915, sizep);
+ vbt = intel_opregion_get_vbt(display, sizep);
/*
* If the OpRegion does not have VBT, look in SPI flash
@@ -3179,76 +3198,77 @@ static const struct vbt_header *intel_bios_get_vbt(struct drm_i915_private *i915
*/
if (!vbt && IS_DGFX(i915))
with_intel_runtime_pm(&i915->runtime_pm, wakeref)
- vbt = spi_oprom_get_vbt(i915, sizep);
+ vbt = spi_oprom_get_vbt(display, sizep);
if (!vbt)
with_intel_runtime_pm(&i915->runtime_pm, wakeref)
- vbt = oprom_get_vbt(i915, sizep);
+ vbt = oprom_get_vbt(display, sizep);
return vbt;
}
/**
* intel_bios_init - find VBT and initialize settings from the BIOS
- * @i915: i915 device instance
+ * @display: display device instance
*
* Parse and initialize settings from the Video BIOS Tables (VBT). If the VBT
* was not found in ACPI OpRegion, try to find it in PCI ROM first. Also
* initialize some defaults if the VBT is not present at all.
*/
-void intel_bios_init(struct drm_i915_private *i915)
+void intel_bios_init(struct intel_display *display)
{
const struct vbt_header *vbt;
const struct bdb_header *bdb;
- INIT_LIST_HEAD(&i915->display.vbt.display_devices);
- INIT_LIST_HEAD(&i915->display.vbt.bdb_blocks);
+ INIT_LIST_HEAD(&display->vbt.display_devices);
+ INIT_LIST_HEAD(&display->vbt.bdb_blocks);
- if (!HAS_DISPLAY(i915)) {
- drm_dbg_kms(&i915->drm,
+ if (!HAS_DISPLAY(display)) {
+ drm_dbg_kms(display->drm,
"Skipping VBT init due to disabled display.\n");
return;
}
- init_vbt_defaults(i915);
+ init_vbt_defaults(display);
- vbt = intel_bios_get_vbt(i915, NULL);
+ vbt = intel_bios_get_vbt(display, NULL);
if (!vbt)
goto out;
bdb = get_bdb_header(vbt);
- i915->display.vbt.version = bdb->version;
+ display->vbt.version = bdb->version;
- drm_dbg_kms(&i915->drm,
+ drm_dbg_kms(display->drm,
"VBT signature \"%.*s\", BDB version %d\n",
- (int)sizeof(vbt->signature), vbt->signature, i915->display.vbt.version);
+ (int)sizeof(vbt->signature), vbt->signature,
+ display->vbt.version);
- init_bdb_blocks(i915, bdb);
+ init_bdb_blocks(display, bdb);
/* Grab useful general definitions */
- parse_general_features(i915);
- parse_general_definitions(i915);
- parse_driver_features(i915);
+ parse_general_features(display);
+ parse_general_definitions(display);
+ parse_driver_features(display);
/* Depends on child device list */
- parse_compression_parameters(i915);
+ parse_compression_parameters(display);
out:
if (!vbt) {
- drm_info(&i915->drm,
+ drm_info(display->drm,
"Failed to find VBIOS tables (VBT)\n");
- init_vbt_missing_defaults(i915);
+ init_vbt_missing_defaults(display);
}
/* Further processing on pre-parsed or generated child device data */
- parse_sdvo_device_mapping(i915);
- parse_ddi_ports(i915);
+ parse_sdvo_device_mapping(display);
+ parse_ddi_ports(display);
kfree(vbt);
}
-static void intel_bios_init_panel(struct drm_i915_private *i915,
+static void intel_bios_init_panel(struct intel_display *display,
struct intel_panel *panel,
const struct intel_bios_encoder_data *devdata,
const struct drm_edid *drm_edid,
@@ -3256,63 +3276,64 @@ static void intel_bios_init_panel(struct drm_i915_private *i915,
{
/* already have it? */
if (panel->vbt.panel_type >= 0) {
- drm_WARN_ON(&i915->drm, !use_fallback);
+ drm_WARN_ON(display->drm, !use_fallback);
return;
}
- panel->vbt.panel_type = get_panel_type(i915, devdata,
+ panel->vbt.panel_type = get_panel_type(display, devdata,
drm_edid, use_fallback);
if (panel->vbt.panel_type < 0) {
- drm_WARN_ON(&i915->drm, use_fallback);
+ drm_WARN_ON(display->drm, use_fallback);
return;
}
init_vbt_panel_defaults(panel);
- parse_panel_options(i915, panel);
- parse_generic_dtd(i915, panel);
- parse_lfp_data(i915, panel);
- parse_lfp_backlight(i915, panel);
- parse_sdvo_lvds_data(i915, panel);
- parse_panel_driver_features(i915, panel);
- parse_power_conservation_features(i915, panel);
- parse_edp(i915, panel);
- parse_psr(i915, panel);
- parse_mipi_config(i915, panel);
- parse_mipi_sequence(i915, panel);
+ parse_panel_options(display, panel);
+ parse_generic_dtd(display, panel);
+ parse_lfp_data(display, panel);
+ parse_lfp_backlight(display, panel);
+ parse_sdvo_lvds_data(display, panel);
+ parse_panel_driver_features(display, panel);
+ parse_power_conservation_features(display, panel);
+ parse_edp(display, panel);
+ parse_psr(display, panel);
+ parse_mipi_config(display, panel);
+ parse_mipi_sequence(display, panel);
}
-void intel_bios_init_panel_early(struct drm_i915_private *i915,
+void intel_bios_init_panel_early(struct intel_display *display,
struct intel_panel *panel,
const struct intel_bios_encoder_data *devdata)
{
- intel_bios_init_panel(i915, panel, devdata, NULL, false);
+ intel_bios_init_panel(display, panel, devdata, NULL, false);
}
-void intel_bios_init_panel_late(struct drm_i915_private *i915,
+void intel_bios_init_panel_late(struct intel_display *display,
struct intel_panel *panel,
const struct intel_bios_encoder_data *devdata,
const struct drm_edid *drm_edid)
{
- intel_bios_init_panel(i915, panel, devdata, drm_edid, true);
+ intel_bios_init_panel(display, panel, devdata, drm_edid, true);
}
/**
* intel_bios_driver_remove - Free any resources allocated by intel_bios_init()
- * @i915: i915 device instance
+ * @display: display device instance
*/
-void intel_bios_driver_remove(struct drm_i915_private *i915)
+void intel_bios_driver_remove(struct intel_display *display)
{
struct intel_bios_encoder_data *devdata, *nd;
struct bdb_block_entry *entry, *ne;
- list_for_each_entry_safe(devdata, nd, &i915->display.vbt.display_devices, node) {
+ list_for_each_entry_safe(devdata, nd, &display->vbt.display_devices,
+ node) {
list_del(&devdata->node);
kfree(devdata->dsc);
kfree(devdata);
}
- list_for_each_entry_safe(entry, ne, &i915->display.vbt.bdb_blocks, node) {
+ list_for_each_entry_safe(entry, ne, &display->vbt.bdb_blocks, node) {
list_del(&entry->node);
kfree(entry);
}
@@ -3336,22 +3357,22 @@ void intel_bios_fini_panel(struct intel_panel *panel)
/**
* intel_bios_is_tv_present - is integrated TV present in VBT
- * @i915: i915 device instance
+ * @display: display device instance
*
* Return true if TV is present. If no child devices were parsed from VBT,
* assume TV is present.
*/
-bool intel_bios_is_tv_present(struct drm_i915_private *i915)
+bool intel_bios_is_tv_present(struct intel_display *display)
{
const struct intel_bios_encoder_data *devdata;
- if (!i915->display.vbt.int_tv_support)
+ if (!display->vbt.int_tv_support)
return false;
- if (list_empty(&i915->display.vbt.display_devices))
+ if (list_empty(&display->vbt.display_devices))
return true;
- list_for_each_entry(devdata, &i915->display.vbt.display_devices, node) {
+ list_for_each_entry(devdata, &display->vbt.display_devices, node) {
const struct child_device_config *child = &devdata->child;
/*
@@ -3377,20 +3398,21 @@ bool intel_bios_is_tv_present(struct drm_i915_private *i915)
/**
* intel_bios_is_lvds_present - is LVDS present in VBT
- * @i915: i915 device instance
+ * @display: display device instance
* @i2c_pin: i2c pin for LVDS if present
*
* Return true if LVDS is present. If no child devices were parsed from VBT,
* assume LVDS is present.
*/
-bool intel_bios_is_lvds_present(struct drm_i915_private *i915, u8 *i2c_pin)
+bool intel_bios_is_lvds_present(struct intel_display *display, u8 *i2c_pin)
{
+ struct drm_i915_private *i915 = to_i915(display->drm);
const struct intel_bios_encoder_data *devdata;
- if (list_empty(&i915->display.vbt.display_devices))
+ if (list_empty(&display->vbt.display_devices))
return true;
- list_for_each_entry(devdata, &i915->display.vbt.display_devices, node) {
+ list_for_each_entry(devdata, &display->vbt.display_devices, node) {
const struct child_device_config *child = &devdata->child;
/* If the device type is not LFP, continue.
@@ -3417,7 +3439,7 @@ bool intel_bios_is_lvds_present(struct drm_i915_private *i915, u8 *i2c_pin)
* additional data. Trust that if the VBT was written into
* the OpRegion then they have validated the LVDS's existence.
*/
- return intel_opregion_vbt_present(i915);
+ return intel_opregion_vbt_present(display);
}
return false;
@@ -3425,25 +3447,25 @@ bool intel_bios_is_lvds_present(struct drm_i915_private *i915, u8 *i2c_pin)
/**
* intel_bios_is_port_present - is the specified digital port present
- * @i915: i915 device instance
+ * @display: display device instance
* @port: port to check
*
* Return true if the device in %port is present.
*/
-bool intel_bios_is_port_present(struct drm_i915_private *i915, enum port port)
+bool intel_bios_is_port_present(struct intel_display *display, enum port port)
{
const struct intel_bios_encoder_data *devdata;
- if (WARN_ON(!has_ddi_port_info(i915)))
+ if (WARN_ON(!has_ddi_port_info(display)))
return true;
- if (!is_port_valid(i915, port))
+ if (!is_port_valid(display, port))
return false;
- list_for_each_entry(devdata, &i915->display.vbt.display_devices, node) {
+ list_for_each_entry(devdata, &display->vbt.display_devices, node) {
const struct child_device_config *child = &devdata->child;
- if (dvo_port_to_port(i915, child->dvo_port) == port)
+ if (dvo_port_to_port(display, child->dvo_port) == port)
return true;
}
@@ -3474,32 +3496,32 @@ bool intel_bios_encoder_supports_dp_dual_mode(const struct intel_bios_encoder_da
/**
* intel_bios_is_dsi_present - is DSI present in VBT
- * @i915: i915 device instance
+ * @display: display device instance
* @port: port for DSI if present
*
* Return true if DSI is present, and return the port in %port.
*/
-bool intel_bios_is_dsi_present(struct drm_i915_private *i915,
+bool intel_bios_is_dsi_present(struct intel_display *display,
enum port *port)
{
const struct intel_bios_encoder_data *devdata;
- list_for_each_entry(devdata, &i915->display.vbt.display_devices, node) {
+ list_for_each_entry(devdata, &display->vbt.display_devices, node) {
const struct child_device_config *child = &devdata->child;
u8 dvo_port = child->dvo_port;
if (!(child->device_type & DEVICE_TYPE_MIPI_OUTPUT))
continue;
- if (dsi_dvo_port_to_port(i915, dvo_port) == PORT_NONE) {
- drm_dbg_kms(&i915->drm,
+ if (dsi_dvo_port_to_port(display, dvo_port) == PORT_NONE) {
+ drm_dbg_kms(display->drm,
"VBT has unsupported DSI port %c\n",
port_name(dvo_port - DVO_PORT_MIPIA));
continue;
}
if (port)
- *port = dsi_dvo_port_to_port(i915, dvo_port);
+ *port = dsi_dvo_port_to_port(display, dvo_port);
return true;
}
@@ -3510,7 +3532,7 @@ static void fill_dsc(struct intel_crtc_state *crtc_state,
struct dsc_compression_parameters_entry *dsc,
int dsc_max_bpc)
{
- struct drm_i915_private *i915 = to_i915(crtc_state->uapi.crtc->dev);
+ struct intel_display *display = to_intel_display(crtc_state);
struct drm_dsc_config *vdsc_cfg = &crtc_state->dsc.config;
int bpc = 8;
@@ -3524,13 +3546,13 @@ static void fill_dsc(struct intel_crtc_state *crtc_state,
else if (dsc->support_8bpc && dsc_max_bpc >= 8)
bpc = 8;
else
- drm_dbg_kms(&i915->drm, "VBT: Unsupported BPC %d for DCS\n",
+ drm_dbg_kms(display->drm, "VBT: Unsupported BPC %d for DCS\n",
dsc_max_bpc);
crtc_state->pipe_bpp = bpc * 3;
- crtc_state->dsc.compressed_bpp_x16 = to_bpp_x16(min(crtc_state->pipe_bpp,
- VBT_DSC_MAX_BPP(dsc->max_bpp)));
+ crtc_state->dsc.compressed_bpp_x16 = fxp_q4_from_int(min(crtc_state->pipe_bpp,
+ VBT_DSC_MAX_BPP(dsc->max_bpp)));
/*
* FIXME: This is ugly, and slice count should take DSC engine
@@ -3545,14 +3567,16 @@ static void fill_dsc(struct intel_crtc_state *crtc_state,
} else {
/* FIXME */
if (!(dsc->slices_per_line & BIT(0)))
- drm_dbg_kms(&i915->drm, "VBT: Unsupported DSC slice count for DSI\n");
+ drm_dbg_kms(display->drm,
+ "VBT: Unsupported DSC slice count for DSI\n");
crtc_state->dsc.slice_count = 1;
}
if (crtc_state->hw.adjusted_mode.crtc_hdisplay %
crtc_state->dsc.slice_count != 0)
- drm_dbg_kms(&i915->drm, "VBT: DSC hdisplay %d not divisible by slice count %d\n",
+ drm_dbg_kms(display->drm,
+ "VBT: DSC hdisplay %d not divisible by slice count %d\n",
crtc_state->hw.adjusted_mode.crtc_hdisplay,
crtc_state->dsc.slice_count);
@@ -3576,16 +3600,16 @@ bool intel_bios_get_dsc_params(struct intel_encoder *encoder,
struct intel_crtc_state *crtc_state,
int dsc_max_bpc)
{
- struct drm_i915_private *i915 = to_i915(encoder->base.dev);
+ struct intel_display *display = to_intel_display(encoder);
const struct intel_bios_encoder_data *devdata;
- list_for_each_entry(devdata, &i915->display.vbt.display_devices, node) {
+ list_for_each_entry(devdata, &display->vbt.display_devices, node) {
const struct child_device_config *child = &devdata->child;
if (!(child->device_type & DEVICE_TYPE_MIPI_OUTPUT))
continue;
- if (dsi_dvo_port_to_port(i915, child->dvo_port) == encoder->port) {
+ if (dsi_dvo_port_to_port(display, child->dvo_port) == encoder->port) {
if (!devdata->dsc)
return false;
@@ -3645,12 +3669,13 @@ static const u8 direct_aux_ch_map[] = {
[AUX_CH_I] = DP_AUX_I, /* aka AUX_CH_USBC6 */
};
-static enum aux_ch map_aux_ch(struct drm_i915_private *i915, u8 aux_channel)
+static enum aux_ch map_aux_ch(struct intel_display *display, u8 aux_channel)
{
+ struct drm_i915_private *i915 = to_i915(display->drm);
const u8 *aux_ch_map;
int i, n_entries;
- if (DISPLAY_VER(i915) >= 13) {
+ if (DISPLAY_VER(display) >= 13) {
aux_ch_map = adlp_aux_ch_map;
n_entries = ARRAY_SIZE(adlp_aux_ch_map);
} else if (IS_ALDERLAKE_S(i915)) {
@@ -3669,7 +3694,7 @@ static enum aux_ch map_aux_ch(struct drm_i915_private *i915, u8 aux_channel)
return i;
}
- drm_dbg_kms(&i915->drm,
+ drm_dbg_kms(display->drm,
"Ignoring alternate AUX CH: VBT claims AUX 0x%x, which is not valid for this platform\n",
aux_channel);
@@ -3681,22 +3706,22 @@ enum aux_ch intel_bios_dp_aux_ch(const struct intel_bios_encoder_data *devdata)
if (!devdata || !devdata->child.aux_channel)
return AUX_CH_NONE;
- return map_aux_ch(devdata->i915, devdata->child.aux_channel);
+ return map_aux_ch(devdata->display, devdata->child.aux_channel);
}
bool intel_bios_dp_has_shared_aux_ch(const struct intel_bios_encoder_data *devdata)
{
- struct drm_i915_private *i915;
+ struct intel_display *display;
u8 aux_channel;
int count = 0;
if (!devdata || !devdata->child.aux_channel)
return false;
- i915 = devdata->i915;
+ display = devdata->display;
aux_channel = devdata->child.aux_channel;
- list_for_each_entry(devdata, &i915->display.vbt.display_devices, node) {
+ list_for_each_entry(devdata, &display->vbt.display_devices, node) {
if (intel_bios_encoder_supports_dp(devdata) &&
aux_channel == devdata->child.aux_channel)
count++;
@@ -3707,18 +3732,18 @@ bool intel_bios_dp_has_shared_aux_ch(const struct intel_bios_encoder_data *devda
int intel_bios_dp_boost_level(const struct intel_bios_encoder_data *devdata)
{
- if (!devdata || devdata->i915->display.vbt.version < 196 || !devdata->child.iboost)
+ if (!devdata || devdata->display->vbt.version < 196 || !devdata->child.iboost)
return 0;
- return translate_iboost(devdata->i915, devdata->child.dp_iboost_level);
+ return translate_iboost(devdata->display, devdata->child.dp_iboost_level);
}
int intel_bios_hdmi_boost_level(const struct intel_bios_encoder_data *devdata)
{
- if (!devdata || devdata->i915->display.vbt.version < 196 || !devdata->child.iboost)
+ if (!devdata || devdata->display->vbt.version < 196 || !devdata->child.iboost)
return 0;
- return translate_iboost(devdata->i915, devdata->child.hdmi_iboost_level);
+ return translate_iboost(devdata->display, devdata->child.hdmi_iboost_level);
}
int intel_bios_hdmi_ddc_pin(const struct intel_bios_encoder_data *devdata)
@@ -3726,17 +3751,17 @@ int intel_bios_hdmi_ddc_pin(const struct intel_bios_encoder_data *devdata)
if (!devdata || !devdata->child.ddc_pin)
return 0;
- return map_ddc_pin(devdata->i915, devdata->child.ddc_pin);
+ return map_ddc_pin(devdata->display, devdata->child.ddc_pin);
}
bool intel_bios_encoder_supports_typec_usb(const struct intel_bios_encoder_data *devdata)
{
- return devdata->i915->display.vbt.version >= 195 && devdata->child.dp_usb_type_c;
+ return devdata->display->vbt.version >= 195 && devdata->child.dp_usb_type_c;
}
bool intel_bios_encoder_supports_tbt(const struct intel_bios_encoder_data *devdata)
{
- return devdata->i915->display.vbt.version >= 209 && devdata->child.tbt;
+ return devdata->display->vbt.version >= 209 && devdata->child.tbt;
}
bool intel_bios_encoder_lane_reversal(const struct intel_bios_encoder_data *devdata)
@@ -3750,11 +3775,11 @@ bool intel_bios_encoder_hpd_invert(const struct intel_bios_encoder_data *devdata
}
const struct intel_bios_encoder_data *
-intel_bios_encoder_data_lookup(struct drm_i915_private *i915, enum port port)
+intel_bios_encoder_data_lookup(struct intel_display *display, enum port port)
{
struct intel_bios_encoder_data *devdata;
- list_for_each_entry(devdata, &i915->display.vbt.display_devices, node) {
+ list_for_each_entry(devdata, &display->vbt.display_devices, node) {
if (intel_bios_encoder_port(devdata) == port)
return devdata;
}
@@ -3762,23 +3787,23 @@ intel_bios_encoder_data_lookup(struct drm_i915_private *i915, enum port port)
return NULL;
}
-void intel_bios_for_each_encoder(struct drm_i915_private *i915,
- void (*func)(struct drm_i915_private *i915,
+void intel_bios_for_each_encoder(struct intel_display *display,
+ void (*func)(struct intel_display *display,
const struct intel_bios_encoder_data *devdata))
{
struct intel_bios_encoder_data *devdata;
- list_for_each_entry(devdata, &i915->display.vbt.display_devices, node)
- func(i915, devdata);
+ list_for_each_entry(devdata, &display->vbt.display_devices, node)
+ func(display, devdata);
}
static int intel_bios_vbt_show(struct seq_file *m, void *unused)
{
- struct drm_i915_private *i915 = m->private;
+ struct intel_display *display = m->private;
const void *vbt;
size_t vbt_size;
- vbt = intel_bios_get_vbt(i915, &vbt_size);
+ vbt = intel_bios_get_vbt(display, &vbt_size);
if (vbt) {
seq_write(m, vbt, vbt_size);
@@ -3790,10 +3815,10 @@ static int intel_bios_vbt_show(struct seq_file *m, void *unused)
DEFINE_SHOW_ATTRIBUTE(intel_bios_vbt);
-void intel_bios_debugfs_register(struct drm_i915_private *i915)
+void intel_bios_debugfs_register(struct intel_display *display)
{
- struct drm_minor *minor = i915->drm.primary;
+ struct drm_minor *minor = display->drm->primary;
debugfs_create_file("i915_vbt", 0444, minor->debugfs_root,
- i915, &intel_bios_vbt_fops);
+ display, &intel_bios_vbt_fops);
}
diff --git a/drivers/gpu/drm/i915/display/intel_bios.h b/drivers/gpu/drm/i915/display/intel_bios.h
index 06a51be4afd8..8b703f6cfe17 100644
--- a/drivers/gpu/drm/i915/display/intel_bios.h
+++ b/drivers/gpu/drm/i915/display/intel_bios.h
@@ -33,9 +33,9 @@
#include <linux/types.h>
struct drm_edid;
-struct drm_i915_private;
struct intel_bios_encoder_data;
struct intel_crtc_state;
+struct intel_display;
struct intel_encoder;
struct intel_panel;
enum aux_ch;
@@ -232,28 +232,28 @@ struct mipi_pps_data {
u16 panel_power_cycle_delay;
} __packed;
-void intel_bios_init(struct drm_i915_private *dev_priv);
-void intel_bios_init_panel_early(struct drm_i915_private *dev_priv,
+void intel_bios_init(struct intel_display *display);
+void intel_bios_init_panel_early(struct intel_display *display,
struct intel_panel *panel,
const struct intel_bios_encoder_data *devdata);
-void intel_bios_init_panel_late(struct drm_i915_private *dev_priv,
+void intel_bios_init_panel_late(struct intel_display *display,
struct intel_panel *panel,
const struct intel_bios_encoder_data *devdata,
const struct drm_edid *drm_edid);
void intel_bios_fini_panel(struct intel_panel *panel);
-void intel_bios_driver_remove(struct drm_i915_private *dev_priv);
-bool intel_bios_is_valid_vbt(struct drm_i915_private *i915,
+void intel_bios_driver_remove(struct intel_display *display);
+bool intel_bios_is_valid_vbt(struct intel_display *display,
const void *buf, size_t size);
-bool intel_bios_is_tv_present(struct drm_i915_private *dev_priv);
-bool intel_bios_is_lvds_present(struct drm_i915_private *dev_priv, u8 *i2c_pin);
-bool intel_bios_is_port_present(struct drm_i915_private *dev_priv, enum port port);
-bool intel_bios_is_dsi_present(struct drm_i915_private *dev_priv, enum port *port);
+bool intel_bios_is_tv_present(struct intel_display *display);
+bool intel_bios_is_lvds_present(struct intel_display *display, u8 *i2c_pin);
+bool intel_bios_is_port_present(struct intel_display *display, enum port port);
+bool intel_bios_is_dsi_present(struct intel_display *display, enum port *port);
bool intel_bios_get_dsc_params(struct intel_encoder *encoder,
struct intel_crtc_state *crtc_state,
int dsc_max_bpc);
const struct intel_bios_encoder_data *
-intel_bios_encoder_data_lookup(struct drm_i915_private *i915, enum port port);
+intel_bios_encoder_data_lookup(struct intel_display *display, enum port port);
bool intel_bios_encoder_supports_dvi(const struct intel_bios_encoder_data *devdata);
bool intel_bios_encoder_supports_hdmi(const struct intel_bios_encoder_data *devdata);
@@ -277,10 +277,10 @@ int intel_bios_hdmi_ddc_pin(const struct intel_bios_encoder_data *devdata);
int intel_bios_hdmi_level_shift(const struct intel_bios_encoder_data *devdata);
int intel_bios_hdmi_max_tmds_clock(const struct intel_bios_encoder_data *devdata);
-void intel_bios_for_each_encoder(struct drm_i915_private *i915,
- void (*func)(struct drm_i915_private *i915,
+void intel_bios_for_each_encoder(struct intel_display *display,
+ void (*func)(struct intel_display *display,
const struct intel_bios_encoder_data *devdata));
-void intel_bios_debugfs_register(struct drm_i915_private *i915);
+void intel_bios_debugfs_register(struct intel_display *display);
#endif /* _INTEL_BIOS_H_ */
diff --git a/drivers/gpu/drm/i915/display/intel_cdclk.c b/drivers/gpu/drm/i915/display/intel_cdclk.c
index 16d5550f7e5e..aa3ba66c5307 100644
--- a/drivers/gpu/drm/i915/display/intel_cdclk.c
+++ b/drivers/gpu/drm/i915/display/intel_cdclk.c
@@ -23,7 +23,10 @@
#include <linux/time.h>
+#include <drm/drm_fixed.h>
+
#include "soc/intel_dram.h"
+
#include "hsw_ips.h"
#include "i915_reg.h"
#include "intel_atomic.h"
@@ -2750,7 +2753,7 @@ static int intel_vdsc_min_cdclk(const struct intel_crtc_state *crtc_state)
*/
int bigjoiner_interface_bits = DISPLAY_VER(i915) >= 14 ? 36 : 24;
int min_cdclk_bj =
- (to_bpp_int_roundup(crtc_state->dsc.compressed_bpp_x16) *
+ (fxp_q4_to_int_roundup(crtc_state->dsc.compressed_bpp_x16) *
pixel_clock) / (2 * bigjoiner_interface_bits);
min_cdclk = max(min_cdclk, min_cdclk_bj);
diff --git a/drivers/gpu/drm/i915/display/intel_color.c b/drivers/gpu/drm/i915/display/intel_color.c
index 7ac50aacec73..5d701f48351b 100644
--- a/drivers/gpu/drm/i915/display/intel_color.c
+++ b/drivers/gpu/drm/i915/display/intel_color.c
@@ -1313,8 +1313,8 @@ static void ilk_lut_write(const struct intel_crtc_state *crtc_state,
{
struct drm_i915_private *i915 = to_i915(crtc_state->uapi.crtc->dev);
- if (crtc_state->dsb)
- intel_dsb_reg_write(crtc_state->dsb, reg, val);
+ if (crtc_state->dsb_color_vblank)
+ intel_dsb_reg_write(crtc_state->dsb_color_vblank, reg, val);
else
intel_de_write_fw(i915, reg, val);
}
@@ -1337,15 +1337,15 @@ static void ilk_load_lut_8(const struct intel_crtc_state *crtc_state,
* unless we either write each entry twice,
* or use non-posted writes
*/
- if (crtc_state->dsb)
- intel_dsb_nonpost_start(crtc_state->dsb);
+ if (crtc_state->dsb_color_vblank)
+ intel_dsb_nonpost_start(crtc_state->dsb_color_vblank);
for (i = 0; i < 256; i++)
ilk_lut_write(crtc_state, LGC_PALETTE(pipe, i),
i9xx_lut_8(&lut[i]));
- if (crtc_state->dsb)
- intel_dsb_nonpost_end(crtc_state->dsb);
+ if (crtc_state->dsb_color_vblank)
+ intel_dsb_nonpost_end(crtc_state->dsb_color_vblank);
}
static void ilk_load_lut_10(const struct intel_crtc_state *crtc_state,
@@ -1870,7 +1870,7 @@ void intel_color_load_luts(const struct intel_crtc_state *crtc_state)
{
struct drm_i915_private *i915 = to_i915(crtc_state->uapi.crtc->dev);
- if (crtc_state->dsb)
+ if (crtc_state->dsb_color_vblank)
return;
i915->display.funcs.color->load_luts(crtc_state);
@@ -1890,8 +1890,8 @@ void intel_color_commit_arm(const struct intel_crtc_state *crtc_state)
i915->display.funcs.color->color_commit_arm(crtc_state);
- if (crtc_state->dsb)
- intel_dsb_commit(crtc_state->dsb, true);
+ if (crtc_state->dsb_color_commit)
+ intel_dsb_commit(crtc_state->dsb_color_commit, false);
}
void intel_color_post_update(const struct intel_crtc_state *crtc_state)
@@ -1919,33 +1919,51 @@ void intel_color_prepare_commit(struct intel_atomic_state *state,
if (!crtc_state->pre_csc_lut && !crtc_state->post_csc_lut)
return;
- crtc_state->dsb = intel_dsb_prepare(state, crtc, INTEL_DSB_0, 1024);
- if (!crtc_state->dsb)
+ crtc_state->dsb_color_vblank = intel_dsb_prepare(state, crtc, INTEL_DSB_1, 1024);
+ if (!crtc_state->dsb_color_vblank)
return;
i915->display.funcs.color->load_luts(crtc_state);
- intel_dsb_finish(crtc_state->dsb);
+ intel_dsb_finish(crtc_state->dsb_color_vblank);
+
+ crtc_state->dsb_color_commit = intel_dsb_prepare(state, crtc, INTEL_DSB_0, 16);
+ if (!crtc_state->dsb_color_commit) {
+ intel_dsb_cleanup(crtc_state->dsb_color_vblank);
+ crtc_state->dsb_color_vblank = NULL;
+ return;
+ }
+
+ intel_dsb_chain(state, crtc_state->dsb_color_commit,
+ crtc_state->dsb_color_vblank, true);
+
+ intel_dsb_finish(crtc_state->dsb_color_commit);
}
void intel_color_cleanup_commit(struct intel_crtc_state *crtc_state)
{
- if (!crtc_state->dsb)
- return;
+ if (crtc_state->dsb_color_commit) {
+ intel_dsb_cleanup(crtc_state->dsb_color_commit);
+ crtc_state->dsb_color_commit = NULL;
+ }
- intel_dsb_cleanup(crtc_state->dsb);
- crtc_state->dsb = NULL;
+ if (crtc_state->dsb_color_vblank) {
+ intel_dsb_cleanup(crtc_state->dsb_color_vblank);
+ crtc_state->dsb_color_vblank = NULL;
+ }
}
void intel_color_wait_commit(const struct intel_crtc_state *crtc_state)
{
- if (crtc_state->dsb)
- intel_dsb_wait(crtc_state->dsb);
+ if (crtc_state->dsb_color_commit)
+ intel_dsb_wait(crtc_state->dsb_color_commit);
+ if (crtc_state->dsb_color_vblank)
+ intel_dsb_wait(crtc_state->dsb_color_vblank);
}
bool intel_color_uses_dsb(const struct intel_crtc_state *crtc_state)
{
- return crtc_state->dsb;
+ return crtc_state->dsb_color_vblank;
}
static bool intel_can_preload_luts(struct intel_atomic_state *state,
diff --git a/drivers/gpu/drm/i915/display/intel_combo_phy.c b/drivers/gpu/drm/i915/display/intel_combo_phy.c
index 143d66951631..3252dab56430 100644
--- a/drivers/gpu/drm/i915/display/intel_combo_phy.c
+++ b/drivers/gpu/drm/i915/display/intel_combo_phy.c
@@ -159,9 +159,11 @@ static bool icl_combo_phy_enabled(struct drm_i915_private *dev_priv,
static bool ehl_vbt_ddi_d_present(struct drm_i915_private *i915)
{
- bool ddi_a_present = intel_bios_is_port_present(i915, PORT_A);
- bool ddi_d_present = intel_bios_is_port_present(i915, PORT_D);
- bool dsi_present = intel_bios_is_dsi_present(i915, NULL);
+ struct intel_display *display = &i915->display;
+
+ bool ddi_a_present = intel_bios_is_port_present(display, PORT_A);
+ bool ddi_d_present = intel_bios_is_port_present(display, PORT_D);
+ bool dsi_present = intel_bios_is_dsi_present(display, NULL);
/*
* VBT's 'dvo port' field for child devices references the DDI, not
diff --git a/drivers/gpu/drm/i915/display/intel_crtc_state_dump.c b/drivers/gpu/drm/i915/display/intel_crtc_state_dump.c
index 6df526e189b5..705ec5ad385c 100644
--- a/drivers/gpu/drm/i915/display/intel_crtc_state_dump.c
+++ b/drivers/gpu/drm/i915/display/intel_crtc_state_dump.c
@@ -10,6 +10,7 @@
#include "intel_crtc_state_dump.h"
#include "intel_display_types.h"
#include "intel_hdmi.h"
+#include "intel_vdsc.h"
#include "intel_vrr.h"
static void intel_dump_crtc_timings(struct drm_printer *p,
@@ -369,6 +370,8 @@ void intel_crtc_state_dump(const struct intel_crtc_state *pipe_config,
else if (IS_VALLEYVIEW(i915))
vlv_dump_csc(&p, "wgc csc", &pipe_config->csc);
+ intel_vdsc_state_dump(&p, 0, pipe_config);
+
dump_planes:
if (!state)
return;
diff --git a/drivers/gpu/drm/i915/display/intel_ddi.c b/drivers/gpu/drm/i915/display/intel_ddi.c
index a07aca96e551..b1c294236cc8 100644
--- a/drivers/gpu/drm/i915/display/intel_ddi.c
+++ b/drivers/gpu/drm/i915/display/intel_ddi.c
@@ -916,7 +916,7 @@ intel_ddi_main_link_aux_domain(struct intel_digital_port *dig_port,
* instead of a specific AUX_IO_<port> reference without powering up any
* extra wells.
*/
- if (intel_encoder_can_psr(&dig_port->base))
+ if (intel_psr_needs_aux_io_power(&dig_port->base, crtc_state))
return intel_display_power_aux_io_domain(i915, dig_port->aux_ch);
else if (DISPLAY_VER(i915) < 14 &&
(intel_crtc_has_dp_encoder(crtc_state) ||
@@ -1400,7 +1400,7 @@ static void tgl_dkl_phy_set_signal_levels(struct intel_encoder *encoder,
static int translate_signal_level(struct intel_dp *intel_dp,
u8 signal_levels)
{
- struct drm_i915_private *i915 = dp_to_i915(intel_dp);
+ struct intel_display *display = to_intel_display(intel_dp);
int i;
for (i = 0; i < ARRAY_SIZE(index_to_dp_signal_levels); i++) {
@@ -1408,7 +1408,7 @@ static int translate_signal_level(struct intel_dp *intel_dp,
return i;
}
- drm_WARN(&i915->drm, 1,
+ drm_WARN(display->drm, 1,
"Unsupported voltage swing/pre-emphasis level: 0x%x\n",
signal_levels);
@@ -2211,14 +2211,14 @@ static void intel_dp_sink_set_msa_timing_par_ignore_state(struct intel_dp *intel
const struct intel_crtc_state *crtc_state,
bool enable)
{
- struct drm_i915_private *i915 = dp_to_i915(intel_dp);
+ struct intel_display *display = to_intel_display(intel_dp);
if (!crtc_state->vrr.enable)
return;
if (drm_dp_dpcd_writeb(&intel_dp->aux, DP_DOWNSPREAD_CTRL,
enable ? DP_MSA_TIMING_PAR_IGNORE_EN : 0) <= 0)
- drm_dbg_kms(&i915->drm,
+ drm_dbg_kms(display->drm,
"Failed to %s MSA_TIMING_PAR_IGNORE in the sink\n",
str_enable_disable(enable));
}
@@ -2227,20 +2227,20 @@ static void intel_dp_sink_set_fec_ready(struct intel_dp *intel_dp,
const struct intel_crtc_state *crtc_state,
bool enable)
{
- struct drm_i915_private *i915 = dp_to_i915(intel_dp);
+ struct intel_display *display = to_intel_display(intel_dp);
if (!crtc_state->fec_enable)
return;
if (drm_dp_dpcd_writeb(&intel_dp->aux, DP_FEC_CONFIGURATION,
enable ? DP_FEC_READY : 0) <= 0)
- drm_dbg_kms(&i915->drm, "Failed to set FEC_READY to %s in the sink\n",
+ drm_dbg_kms(display->drm, "Failed to set FEC_READY to %s in the sink\n",
enable ? "enabled" : "disabled");
if (enable &&
drm_dp_dpcd_writeb(&intel_dp->aux, DP_FEC_STATUS,
DP_FEC_DECODE_EN_DETECTED | DP_FEC_DECODE_DIS_DETECTED) <= 0)
- drm_dbg_kms(&i915->drm, "Failed to clear FEC detected flags\n");
+ drm_dbg_kms(display->drm, "Failed to clear FEC detected flags\n");
}
static int read_fec_detected_status(struct drm_dp_aux *aux)
@@ -4172,7 +4172,8 @@ static void intel_ddi_sync_state(struct intel_encoder *encoder,
intel_tc_port_sanitize_mode(enc_to_dig_port(encoder),
crtc_state);
- if (intel_encoder_is_dp(encoder))
+ if ((crtc_state && intel_crtc_has_dp_encoder(crtc_state)) ||
+ (!crtc_state && intel_encoder_is_dp(encoder)))
intel_dp_sync_state(encoder, crtc_state);
}
@@ -4853,9 +4854,10 @@ static bool port_in_use(struct drm_i915_private *i915, enum port port)
return false;
}
-void intel_ddi_init(struct drm_i915_private *dev_priv,
+void intel_ddi_init(struct intel_display *display,
const struct intel_bios_encoder_data *devdata)
{
+ struct drm_i915_private *dev_priv = to_i915(display->drm);
struct intel_digital_port *dig_port;
struct intel_encoder *encoder;
bool init_hdmi, init_dp;
@@ -4898,7 +4900,7 @@ void intel_ddi_init(struct drm_i915_private *dev_priv,
* driver. In that case we should skip initializing the corresponding
* outputs.
*/
- if (intel_hti_uses_phy(dev_priv, phy)) {
+ if (intel_hti_uses_phy(display, phy)) {
drm_dbg_kms(&dev_priv->drm, "PORT %c / PHY %c reserved by HTI\n",
port_name(port), phy_name(phy));
return;
@@ -4972,7 +4974,7 @@ void intel_ddi_init(struct drm_i915_private *dev_priv,
} else {
drm_encoder_init(&dev_priv->drm, &encoder->base, &intel_ddi_funcs,
DRM_MODE_ENCODER_TMDS,
- "DDI %c/PHY %c", port_name(port), phy_name(phy));
+ "DDI %c/PHY %c", port_name(port), phy_name(phy));
}
intel_encoder_link_check_init(encoder, intel_ddi_link_check);
diff --git a/drivers/gpu/drm/i915/display/intel_ddi.h b/drivers/gpu/drm/i915/display/intel_ddi.h
index 434de7196875..6d85422bdefe 100644
--- a/drivers/gpu/drm/i915/display/intel_ddi.h
+++ b/drivers/gpu/drm/i915/display/intel_ddi.h
@@ -15,6 +15,7 @@ struct intel_bios_encoder_data;
struct intel_connector;
struct intel_crtc;
struct intel_crtc_state;
+struct intel_display;
struct intel_dp;
struct intel_dpll_hw_state;
struct intel_encoder;
@@ -53,7 +54,7 @@ void hsw_prepare_dp_ddi_buffers(struct intel_encoder *encoder,
const struct intel_crtc_state *crtc_state);
void intel_wait_ddi_buf_idle(struct drm_i915_private *dev_priv,
enum port port);
-void intel_ddi_init(struct drm_i915_private *dev_priv,
+void intel_ddi_init(struct intel_display *display,
const struct intel_bios_encoder_data *devdata);
bool intel_ddi_get_hw_state(struct intel_encoder *encoder, enum pipe *pipe);
void intel_ddi_enable_transcoder_func(struct intel_encoder *encoder,
diff --git a/drivers/gpu/drm/i915/display/intel_display.c b/drivers/gpu/drm/i915/display/intel_display.c
index c2c388212e2e..b4ef4d59da1a 100644
--- a/drivers/gpu/drm/i915/display/intel_display.c
+++ b/drivers/gpu/drm/i915/display/intel_display.c
@@ -39,6 +39,7 @@
#include <drm/drm_atomic_uapi.h>
#include <drm/drm_damage_helper.h>
#include <drm/drm_edid.h>
+#include <drm/drm_fixed.h>
#include <drm/drm_fourcc.h>
#include <drm/drm_probe_helper.h>
#include <drm/drm_rect.h>
@@ -1014,9 +1015,14 @@ static bool cmrr_params_changed(const struct intel_crtc_state *old_crtc_state,
old_crtc_state->cmrr.cmrr_n != new_crtc_state->cmrr.cmrr_n;
}
-static bool vrr_enabling(const struct intel_crtc_state *old_crtc_state,
- const struct intel_crtc_state *new_crtc_state)
+static bool intel_crtc_vrr_enabling(struct intel_atomic_state *state,
+ struct intel_crtc *crtc)
{
+ const struct intel_crtc_state *old_crtc_state =
+ intel_atomic_get_old_crtc_state(state, crtc);
+ const struct intel_crtc_state *new_crtc_state =
+ intel_atomic_get_new_crtc_state(state, crtc);
+
if (!new_crtc_state->hw.active)
return false;
@@ -1026,9 +1032,14 @@ static bool vrr_enabling(const struct intel_crtc_state *old_crtc_state,
vrr_params_changed(old_crtc_state, new_crtc_state)));
}
-static bool vrr_disabling(const struct intel_crtc_state *old_crtc_state,
- const struct intel_crtc_state *new_crtc_state)
+bool intel_crtc_vrr_disabling(struct intel_atomic_state *state,
+ struct intel_crtc *crtc)
{
+ const struct intel_crtc_state *old_crtc_state =
+ intel_atomic_get_old_crtc_state(state, crtc);
+ const struct intel_crtc_state *new_crtc_state =
+ intel_atomic_get_new_crtc_state(state, crtc);
+
if (!old_crtc_state->hw.active)
return false;
@@ -1181,7 +1192,7 @@ static void intel_pre_plane_update(struct intel_atomic_state *state,
intel_atomic_get_new_crtc_state(state, crtc);
enum pipe pipe = crtc->pipe;
- if (vrr_disabling(old_crtc_state, new_crtc_state)) {
+ if (intel_crtc_vrr_disabling(state, crtc)) {
intel_vrr_disable(old_crtc_state);
intel_crtc_update_active_timings(old_crtc_state, false);
}
@@ -4669,11 +4680,11 @@ intel_modeset_pipe_config(struct intel_atomic_state *state,
crtc_state->fec_enable = limits->force_fec_pipes & BIT(crtc->pipe);
crtc_state->max_link_bpp_x16 = limits->max_bpp_x16[crtc->pipe];
- if (crtc_state->pipe_bpp > to_bpp_int(crtc_state->max_link_bpp_x16)) {
+ if (crtc_state->pipe_bpp > fxp_q4_to_int(crtc_state->max_link_bpp_x16)) {
drm_dbg_kms(&i915->drm,
- "[CRTC:%d:%s] Link bpp limited to " BPP_X16_FMT "\n",
+ "[CRTC:%d:%s] Link bpp limited to " FXP_Q4_FMT "\n",
crtc->base.base.id, crtc->base.name,
- BPP_X16_ARGS(crtc_state->max_link_bpp_x16));
+ FXP_Q4_ARGS(crtc_state->max_link_bpp_x16));
crtc_state->bw_constrained = true;
}
@@ -5100,7 +5111,7 @@ intel_pipe_config_compare(const struct intel_crtc_state *current_config,
if (current_config->name != pipe_config->name) { \
BUILD_BUG_ON_MSG(!__same_type(current_config->name, bool), \
__stringify(name) " is not bool"); \
- pipe_config_mismatch(&p, fastset, crtc, __stringify(name), \
+ pipe_config_mismatch(&p, fastset, crtc, __stringify(name), \
"(expected %s, found %s)", \
str_yes_no(current_config->name), \
str_yes_no(pipe_config->name)); \
@@ -6249,6 +6260,8 @@ static int intel_async_flip_check_hw(struct intel_atomic_state *state, struct in
case I915_FORMAT_MOD_Y_TILED:
case I915_FORMAT_MOD_Yf_TILED:
case I915_FORMAT_MOD_4_TILED:
+ case I915_FORMAT_MOD_4_TILED_BMG_CCS:
+ case I915_FORMAT_MOD_4_TILED_LNL_CCS:
break;
default:
drm_dbg_kms(&i915->drm,
@@ -6830,8 +6843,6 @@ static void commit_pipe_post_planes(struct intel_atomic_state *state,
struct intel_crtc *crtc)
{
struct drm_i915_private *dev_priv = to_i915(state->base.dev);
- const struct intel_crtc_state *old_crtc_state =
- intel_atomic_get_old_crtc_state(state, crtc);
const struct intel_crtc_state *new_crtc_state =
intel_atomic_get_new_crtc_state(state, crtc);
@@ -6844,7 +6855,7 @@ static void commit_pipe_post_planes(struct intel_atomic_state *state,
!intel_crtc_needs_modeset(new_crtc_state))
skl_detach_scalers(new_crtc_state);
- if (vrr_enabling(old_crtc_state, new_crtc_state))
+ if (intel_crtc_vrr_enabling(state, crtc))
intel_vrr_enable(new_crtc_state);
}
@@ -6944,7 +6955,7 @@ static void intel_update_crtc(struct intel_atomic_state *state,
*
* FIXME Should be synchronized with the start of vblank somehow...
*/
- if (vrr_enabling(old_crtc_state, new_crtc_state) ||
+ if (intel_crtc_vrr_enabling(state, crtc) ||
new_crtc_state->update_m_n || new_crtc_state->update_lrr)
intel_crtc_update_active_timings(new_crtc_state,
new_crtc_state->vrr.enable);
@@ -7502,7 +7513,8 @@ static void intel_atomic_commit_tail(struct intel_atomic_state *state)
*
* FIXME get rid of this funny new->old swapping
*/
- old_crtc_state->dsb = fetch_and_zero(&new_crtc_state->dsb);
+ old_crtc_state->dsb_color_vblank = fetch_and_zero(&new_crtc_state->dsb_color_vblank);
+ old_crtc_state->dsb_color_commit = fetch_and_zero(&new_crtc_state->dsb_color_commit);
}
/* Underruns don't always raise interrupts, so check manually */
@@ -7777,10 +7789,11 @@ bool assert_port_valid(struct drm_i915_private *i915, enum port port)
void intel_setup_outputs(struct drm_i915_private *dev_priv)
{
+ struct intel_display *display = &dev_priv->display;
struct intel_encoder *encoder;
bool dpd_is_edp = false;
- intel_pps_unlock_regs_wa(dev_priv);
+ intel_pps_unlock_regs_wa(display);
if (!HAS_DISPLAY(dev_priv))
return;
@@ -7789,7 +7802,7 @@ void intel_setup_outputs(struct drm_i915_private *dev_priv)
if (intel_ddi_crt_present(dev_priv))
intel_crt_init(dev_priv);
- intel_bios_for_each_encoder(dev_priv, intel_ddi_init);
+ intel_bios_for_each_encoder(display, intel_ddi_init);
if (IS_GEMINILAKE(dev_priv) || IS_BROXTON(dev_priv))
vlv_dsi_init(dev_priv);
@@ -7851,14 +7864,14 @@ void intel_setup_outputs(struct drm_i915_private *dev_priv)
* HDMI ports that the VBT claim are DP or eDP.
*/
has_edp = intel_dp_is_port_edp(dev_priv, PORT_B);
- has_port = intel_bios_is_port_present(dev_priv, PORT_B);
+ has_port = intel_bios_is_port_present(display, PORT_B);
if (intel_de_read(dev_priv, VLV_DP_B) & DP_DETECTED || has_port)
has_edp &= g4x_dp_init(dev_priv, VLV_DP_B, PORT_B);
if ((intel_de_read(dev_priv, VLV_HDMIB) & SDVO_DETECTED || has_port) && !has_edp)
g4x_hdmi_init(dev_priv, VLV_HDMIB, PORT_B);
has_edp = intel_dp_is_port_edp(dev_priv, PORT_C);
- has_port = intel_bios_is_port_present(dev_priv, PORT_C);
+ has_port = intel_bios_is_port_present(display, PORT_C);
if (intel_de_read(dev_priv, VLV_DP_C) & DP_DETECTED || has_port)
has_edp &= g4x_dp_init(dev_priv, VLV_DP_C, PORT_C);
if ((intel_de_read(dev_priv, VLV_HDMIC) & SDVO_DETECTED || has_port) && !has_edp)
@@ -7869,7 +7882,7 @@ void intel_setup_outputs(struct drm_i915_private *dev_priv)
* eDP not supported on port D,
* so no need to worry about it
*/
- has_port = intel_bios_is_port_present(dev_priv, PORT_D);
+ has_port = intel_bios_is_port_present(display, PORT_D);
if (intel_de_read(dev_priv, CHV_DP_D) & DP_DETECTED || has_port)
g4x_dp_init(dev_priv, CHV_DP_D, PORT_D);
if (intel_de_read(dev_priv, CHV_HDMID) & SDVO_DETECTED || has_port)
@@ -7923,7 +7936,7 @@ void intel_setup_outputs(struct drm_i915_private *dev_priv)
g4x_dp_init(dev_priv, DP_D, PORT_D);
if (SUPPORTS_TV(dev_priv))
- intel_tv_init(dev_priv);
+ intel_tv_init(display);
} else if (DISPLAY_VER(dev_priv) == 2) {
if (IS_I85X(dev_priv))
intel_lvds_init(dev_priv);
diff --git a/drivers/gpu/drm/i915/display/intel_display.h b/drivers/gpu/drm/i915/display/intel_display.h
index b0cf6ca70952..b21d9578d5db 100644
--- a/drivers/gpu/drm/i915/display/intel_display.h
+++ b/drivers/gpu/drm/i915/display/intel_display.h
@@ -532,6 +532,9 @@ void intel_plane_fixup_bitmasks(struct intel_crtc_state *crtc_state);
void intel_update_watermarks(struct drm_i915_private *i915);
+bool intel_crtc_vrr_disabling(struct intel_atomic_state *state,
+ struct intel_crtc *crtc);
+
/* modesetting */
int intel_modeset_pipes_in_mask_early(struct intel_atomic_state *state,
const char *reason, u8 pipe_mask);
diff --git a/drivers/gpu/drm/i915/display/intel_display_core.h b/drivers/gpu/drm/i915/display/intel_display_core.h
index 7715fc329057..0a711114ff2b 100644
--- a/drivers/gpu/drm/i915/display/intel_display_core.h
+++ b/drivers/gpu/drm/i915/display/intel_display_core.h
@@ -237,7 +237,7 @@ struct intel_vbt_data {
struct sdvo_device_mapping {
u8 initialized;
u8 dvo_port;
- u8 slave_addr;
+ u8 target_addr;
u8 dvo_wiring;
u8 i2c_pin;
u8 ddc_pin;
diff --git a/drivers/gpu/drm/i915/display/intel_display_debugfs.c b/drivers/gpu/drm/i915/display/intel_display_debugfs.c
index 91757fed9c6d..f5f618199d39 100644
--- a/drivers/gpu/drm/i915/display/intel_display_debugfs.c
+++ b/drivers/gpu/drm/i915/display/intel_display_debugfs.c
@@ -36,6 +36,7 @@
#include "intel_pps.h"
#include "intel_psr.h"
#include "intel_psr_regs.h"
+#include "intel_vdsc.h"
#include "intel_wm.h"
static inline struct drm_i915_private *node_to_i915(struct drm_info_node *node)
@@ -492,7 +493,7 @@ static void crtc_updates_info(struct seq_file *m,
seq_printf(m, "%sMax update: %lluns\n",
hdr, crtc->debug.vbl.max);
seq_printf(m, "%sAverage update: %lluns\n",
- hdr, div64_u64(crtc->debug.vbl.sum, count));
+ hdr, div64_u64(crtc->debug.vbl.sum, count));
seq_printf(m, "%sOverruns > %uus: %u\n",
hdr, VBLANK_EVASION_TIME_US, crtc->debug.vbl.over);
}
@@ -551,6 +552,7 @@ static void crtc_updates_add(struct intel_crtc *crtc)
static void intel_crtc_info(struct seq_file *m, struct intel_crtc *crtc)
{
struct drm_i915_private *dev_priv = node_to_i915(m->private);
+ struct drm_printer p = drm_seq_file_printer(m);
const struct intel_crtc_state *crtc_state =
to_intel_crtc_state(crtc->base.state);
struct intel_encoder *encoder;
@@ -581,6 +583,8 @@ static void intel_crtc_info(struct seq_file *m, struct intel_crtc *crtc)
crtc_state->joiner_pipes,
intel_crtc_is_joiner_secondary(crtc_state) ? "slave" : "master");
+ intel_vdsc_state_dump(&p, 1, crtc_state);
+
for_each_intel_encoder_mask(&dev_priv->drm, encoder,
crtc_state->uapi.encoder_mask)
intel_encoder_info(m, crtc, encoder);
@@ -1008,7 +1012,7 @@ i915_fifo_underrun_reset_write(struct file *filp,
return ret;
}
- intel_fbc_reset_underrun(dev_priv);
+ intel_fbc_reset_underrun(&dev_priv->display);
return cnt;
}
@@ -1045,6 +1049,7 @@ static const struct {
void intel_display_debugfs_register(struct drm_i915_private *i915)
{
+ struct intel_display *display = &i915->display;
struct drm_minor *minor = i915->drm.primary;
int i;
@@ -1060,15 +1065,15 @@ void intel_display_debugfs_register(struct drm_i915_private *i915)
ARRAY_SIZE(intel_display_debugfs_list),
minor->debugfs_root, minor);
- intel_bios_debugfs_register(i915);
+ intel_bios_debugfs_register(display);
intel_cdclk_debugfs_register(i915);
intel_dmc_debugfs_register(i915);
- intel_fbc_debugfs_register(i915);
+ intel_fbc_debugfs_register(display);
intel_hpd_debugfs_register(i915);
- intel_opregion_debugfs_register(i915);
- intel_psr_debugfs_register(i915);
+ intel_opregion_debugfs_register(display);
+ intel_psr_debugfs_register(display);
intel_wm_debugfs_register(i915);
- intel_display_debugfs_params(i915);
+ intel_display_debugfs_params(display);
}
static int i915_hdcp_sink_capability_show(struct seq_file *m, void *data)
diff --git a/drivers/gpu/drm/i915/display/intel_display_debugfs_params.c b/drivers/gpu/drm/i915/display/intel_display_debugfs_params.c
index f35718748555..ec3ed29a83c9 100644
--- a/drivers/gpu/drm/i915/display/intel_display_debugfs_params.c
+++ b/drivers/gpu/drm/i915/display/intel_display_debugfs_params.c
@@ -151,13 +151,13 @@ intel_display_debugfs_create_uint(const char *name, umode_t mode,
} while (0)
/* add a subdirectory with files for each intel display param */
-void intel_display_debugfs_params(struct drm_i915_private *i915)
+void intel_display_debugfs_params(struct intel_display *display)
{
- struct drm_minor *minor = i915->drm.primary;
+ struct drm_minor *minor = display->drm->primary;
struct dentry *dir;
char dirname[16];
- snprintf(dirname, sizeof(dirname), "%s_params", i915->drm.driver->name);
+ snprintf(dirname, sizeof(dirname), "%s_params", display->drm->driver->name);
dir = debugfs_lookup(dirname, minor->debugfs_root);
if (!dir)
dir = debugfs_create_dir(dirname, minor->debugfs_root);
@@ -171,7 +171,7 @@ void intel_display_debugfs_params(struct drm_i915_private *i915)
*/
#define REGISTER(T, x, unused, mode, ...) _intel_display_param_create_file( \
- dir, #x, mode, &i915->display.params.x);
+ dir, #x, mode, &display->params.x);
INTEL_DISPLAY_PARAMS_FOR_EACH(REGISTER);
#undef REGISTER
}
diff --git a/drivers/gpu/drm/i915/display/intel_display_debugfs_params.h b/drivers/gpu/drm/i915/display/intel_display_debugfs_params.h
index 1e9945a4044c..a1120915a5a8 100644
--- a/drivers/gpu/drm/i915/display/intel_display_debugfs_params.h
+++ b/drivers/gpu/drm/i915/display/intel_display_debugfs_params.h
@@ -6,8 +6,8 @@
#ifndef __INTEL_DISPLAY_DEBUGFS_PARAMS__
#define __INTEL_DISPLAY_DEBUGFS_PARAMS__
-struct drm_i915_private;
+struct intel_display;
-void intel_display_debugfs_params(struct drm_i915_private *i915);
+void intel_display_debugfs_params(struct intel_display *display);
#endif /* __INTEL_DISPLAY_DEBUGFS_PARAMS__ */
diff --git a/drivers/gpu/drm/i915/display/intel_display_device.c b/drivers/gpu/drm/i915/display/intel_display_device.c
index dd7dce4b0e7a..1b46ba985580 100644
--- a/drivers/gpu/drm/i915/display/intel_display_device.c
+++ b/drivers/gpu/drm/i915/display/intel_display_device.c
@@ -16,14 +16,25 @@
#include "intel_display_power.h"
#include "intel_display_reg_defs.h"
#include "intel_fbc.h"
+#include "intel_step.h"
__diag_push();
__diag_ignore_all("-Woverride-init", "Allow field initialization overrides for display info");
+struct stepping_desc {
+ const enum intel_step *map; /* revid to step map */
+ size_t size; /* map size */
+};
+
+#define STEP_INFO(_map) \
+ .step_info.map = _map, \
+ .step_info.size = ARRAY_SIZE(_map)
+
struct subplatform_desc {
enum intel_display_subplatform subplatform;
const char *name;
const u16 *pciidlist;
+ struct stepping_desc step_info;
};
struct platform_desc {
@@ -31,6 +42,7 @@ struct platform_desc {
const char *name;
const struct subplatform_desc *subplatforms;
const struct intel_display_device_info *info; /* NULL for GMD ID */
+ struct stepping_desc step_info;
};
#define PLATFORM(_platform) \
@@ -610,6 +622,13 @@ static const u16 skl_ulx_ids[] = {
0
};
+static const enum intel_step skl_steppings[] = {
+ [0x6] = STEP_G0,
+ [0x7] = STEP_H0,
+ [0x9] = STEP_J0,
+ [0xA] = STEP_I1,
+};
+
static const struct platform_desc skl_desc = {
PLATFORM(SKYLAKE),
.subplatforms = (const struct subplatform_desc[]) {
@@ -618,6 +637,7 @@ static const struct platform_desc skl_desc = {
{},
},
.info = &skl_display,
+ STEP_INFO(skl_steppings),
};
static const u16 kbl_ult_ids[] = {
@@ -634,6 +654,16 @@ static const u16 kbl_ulx_ids[] = {
0
};
+static const enum intel_step kbl_steppings[] = {
+ [1] = STEP_B0,
+ [2] = STEP_B0,
+ [3] = STEP_B0,
+ [4] = STEP_C0,
+ [5] = STEP_B1,
+ [6] = STEP_B1,
+ [7] = STEP_C0,
+};
+
static const struct platform_desc kbl_desc = {
PLATFORM(KABYLAKE),
.subplatforms = (const struct subplatform_desc[]) {
@@ -642,6 +672,7 @@ static const struct platform_desc kbl_desc = {
{},
},
.info = &skl_display,
+ STEP_INFO(kbl_steppings),
};
static const u16 cfl_ult_ids[] = {
@@ -706,6 +737,13 @@ static const struct platform_desc cml_desc = {
BIT(TRANSCODER_DSI_A) | BIT(TRANSCODER_DSI_C), \
.__runtime_defaults.port_mask = BIT(PORT_A) | BIT(PORT_B) | BIT(PORT_C)
+static const enum intel_step bxt_steppings[] = {
+ [0xA] = STEP_C0,
+ [0xB] = STEP_C0,
+ [0xC] = STEP_D0,
+ [0xD] = STEP_E0,
+};
+
static const struct platform_desc bxt_desc = {
PLATFORM(BROXTON),
.info = &(const struct intel_display_device_info) {
@@ -714,6 +752,11 @@ static const struct platform_desc bxt_desc = {
.__runtime_defaults.ip.ver = 9,
},
+ STEP_INFO(bxt_steppings),
+};
+
+static const enum intel_step glk_steppings[] = {
+ [3] = STEP_B0,
};
static const struct platform_desc glk_desc = {
@@ -725,6 +768,7 @@ static const struct platform_desc glk_desc = {
.__runtime_defaults.ip.ver = 10,
},
+ STEP_INFO(glk_steppings),
};
#define ICL_DISPLAY \
@@ -773,6 +817,10 @@ static const u16 icl_port_f_ids[] = {
0
};
+static const enum intel_step icl_steppings[] = {
+ [7] = STEP_D0,
+};
+
static const struct platform_desc icl_desc = {
PLATFORM(ICELAKE),
.subplatforms = (const struct subplatform_desc[]) {
@@ -784,6 +832,7 @@ static const struct platform_desc icl_desc = {
.__runtime_defaults.port_mask = BIT(PORT_A) | BIT(PORT_B) | BIT(PORT_C) | BIT(PORT_D) | BIT(PORT_E),
},
+ STEP_INFO(icl_steppings),
};
static const struct intel_display_device_info jsl_ehl_display = {
@@ -792,14 +841,21 @@ static const struct intel_display_device_info jsl_ehl_display = {
.__runtime_defaults.port_mask = BIT(PORT_A) | BIT(PORT_B) | BIT(PORT_C) | BIT(PORT_D),
};
+static const enum intel_step jsl_ehl_steppings[] = {
+ [0] = STEP_A0,
+ [1] = STEP_B0,
+};
+
static const struct platform_desc jsl_desc = {
PLATFORM(JASPERLAKE),
.info = &jsl_ehl_display,
+ STEP_INFO(jsl_ehl_steppings),
};
static const struct platform_desc ehl_desc = {
PLATFORM(ELKHARTLAKE),
.info = &jsl_ehl_display,
+ STEP_INFO(jsl_ehl_steppings),
};
#define XE_D_DISPLAY \
@@ -850,10 +906,23 @@ static const u16 tgl_uy_ids[] = {
0
};
+static const enum intel_step tgl_steppings[] = {
+ [0] = STEP_B0,
+ [1] = STEP_D0,
+};
+
+static const enum intel_step tgl_uy_steppings[] = {
+ [0] = STEP_A0,
+ [1] = STEP_C0,
+ [2] = STEP_C0,
+ [3] = STEP_D0,
+};
+
static const struct platform_desc tgl_desc = {
PLATFORM(TIGERLAKE),
.subplatforms = (const struct subplatform_desc[]) {
- { INTEL_DISPLAY_TIGERLAKE_UY, "UY", tgl_uy_ids },
+ { INTEL_DISPLAY_TIGERLAKE_UY, "UY", tgl_uy_ids,
+ STEP_INFO(tgl_uy_steppings) },
{},
},
.info = &(const struct intel_display_device_info) {
@@ -866,6 +935,12 @@ static const struct platform_desc tgl_desc = {
.__runtime_defaults.port_mask = BIT(PORT_A) | BIT(PORT_B) |
BIT(PORT_TC1) | BIT(PORT_TC2) | BIT(PORT_TC3) | BIT(PORT_TC4) | BIT(PORT_TC5) | BIT(PORT_TC6),
},
+ STEP_INFO(tgl_steppings),
+};
+
+static const enum intel_step dg1_steppings[] = {
+ [0] = STEP_A0,
+ [1] = STEP_B0,
};
static const struct platform_desc dg1_desc = {
@@ -876,6 +951,13 @@ static const struct platform_desc dg1_desc = {
.__runtime_defaults.port_mask = BIT(PORT_A) | BIT(PORT_B) |
BIT(PORT_TC1) | BIT(PORT_TC2),
},
+ STEP_INFO(dg1_steppings),
+};
+
+static const enum intel_step rkl_steppings[] = {
+ [0] = STEP_A0,
+ [1] = STEP_B0,
+ [4] = STEP_C0,
};
static const struct platform_desc rkl_desc = {
@@ -892,6 +974,7 @@ static const struct platform_desc rkl_desc = {
.__runtime_defaults.port_mask = BIT(PORT_A) | BIT(PORT_B) |
BIT(PORT_TC1) | BIT(PORT_TC2),
},
+ STEP_INFO(rkl_steppings),
};
static const u16 adls_rpls_ids[] = {
@@ -899,10 +982,24 @@ static const u16 adls_rpls_ids[] = {
0
};
+static const enum intel_step adl_s_steppings[] = {
+ [0x0] = STEP_A0,
+ [0x1] = STEP_A2,
+ [0x4] = STEP_B0,
+ [0x8] = STEP_B0,
+ [0xC] = STEP_C0,
+};
+
+static const enum intel_step adl_s_rpl_s_steppings[] = {
+ [0x4] = STEP_D0,
+ [0xC] = STEP_C0,
+};
+
static const struct platform_desc adl_s_desc = {
PLATFORM(ALDERLAKE_S),
.subplatforms = (const struct subplatform_desc[]) {
- { INTEL_DISPLAY_ALDERLAKE_S_RAPTORLAKE_S, "RPL-S", adls_rpls_ids },
+ { INTEL_DISPLAY_ALDERLAKE_S_RAPTORLAKE_S, "RPL-S", adls_rpls_ids,
+ STEP_INFO(adl_s_rpl_s_steppings) },
{},
},
.info = &(const struct intel_display_device_info) {
@@ -913,6 +1010,7 @@ static const struct platform_desc adl_s_desc = {
.__runtime_defaults.port_mask = BIT(PORT_A) |
BIT(PORT_TC1) | BIT(PORT_TC2) | BIT(PORT_TC3) | BIT(PORT_TC4),
},
+ STEP_INFO(adl_s_steppings),
};
#define XE_LPD_FEATURES \
@@ -986,15 +1084,34 @@ static const u16 adlp_rplp_ids[] = {
0
};
+static const enum intel_step adl_p_steppings[] = {
+ [0x0] = STEP_A0,
+ [0x4] = STEP_B0,
+ [0x8] = STEP_C0,
+ [0xC] = STEP_D0,
+};
+
+static const enum intel_step adl_p_adl_n_steppings[] = {
+ [0x0] = STEP_D0,
+};
+
+static const enum intel_step adl_p_rpl_pu_steppings[] = {
+ [0x4] = STEP_E0,
+};
+
static const struct platform_desc adl_p_desc = {
PLATFORM(ALDERLAKE_P),
.subplatforms = (const struct subplatform_desc[]) {
- { INTEL_DISPLAY_ALDERLAKE_P_ALDERLAKE_N, "ADL-N", adlp_adln_ids },
- { INTEL_DISPLAY_ALDERLAKE_P_RAPTORLAKE_U, "RPL-U", adlp_rplu_ids },
- { INTEL_DISPLAY_ALDERLAKE_P_RAPTORLAKE_P, "RPL-P", adlp_rplp_ids },
+ { INTEL_DISPLAY_ALDERLAKE_P_ALDERLAKE_N, "ADL-N", adlp_adln_ids,
+ STEP_INFO(adl_p_adl_n_steppings) },
+ { INTEL_DISPLAY_ALDERLAKE_P_RAPTORLAKE_P, "RPL-P", adlp_rplp_ids,
+ STEP_INFO(adl_p_rpl_pu_steppings) },
+ { INTEL_DISPLAY_ALDERLAKE_P_RAPTORLAKE_U, "RPL-U", adlp_rplu_ids,
+ STEP_INFO(adl_p_rpl_pu_steppings) },
{},
},
.info = &xe_lpd_display,
+ STEP_INFO(adl_p_steppings),
};
static const struct intel_display_device_info xe_hpd_display = {
@@ -1023,12 +1140,33 @@ static const u16 dg2_g12_ids[] = {
0
};
+static const enum intel_step dg2_g10_steppings[] = {
+ [0x0] = STEP_A0,
+ [0x1] = STEP_A0,
+ [0x4] = STEP_B0,
+ [0x8] = STEP_C0,
+};
+
+static const enum intel_step dg2_g11_steppings[] = {
+ [0x0] = STEP_B0,
+ [0x4] = STEP_C0,
+ [0x5] = STEP_C0,
+};
+
+static const enum intel_step dg2_g12_steppings[] = {
+ [0x0] = STEP_C0,
+ [0x1] = STEP_C0,
+};
+
static const struct platform_desc dg2_desc = {
PLATFORM(DG2),
.subplatforms = (const struct subplatform_desc[]) {
- { INTEL_DISPLAY_DG2_G10, "G10", dg2_g10_ids },
- { INTEL_DISPLAY_DG2_G11, "G11", dg2_g11_ids },
- { INTEL_DISPLAY_DG2_G12, "G12", dg2_g12_ids },
+ { INTEL_DISPLAY_DG2_G10, "G10", dg2_g10_ids,
+ STEP_INFO(dg2_g10_steppings) },
+ { INTEL_DISPLAY_DG2_G11, "G11", dg2_g11_ids,
+ STEP_INFO(dg2_g11_steppings) },
+ { INTEL_DISPLAY_DG2_G12, "G12", dg2_g12_ids,
+ STEP_INFO(dg2_g12_steppings) },
{},
},
.info = &xe_hpd_display,
@@ -1261,13 +1399,66 @@ find_subplatform_desc(struct pci_dev *pdev, const struct platform_desc *desc)
return NULL;
}
+static enum intel_step get_pre_gmdid_step(struct intel_display *display,
+ const struct stepping_desc *main,
+ const struct stepping_desc *sub)
+{
+ struct pci_dev *pdev = to_pci_dev(display->drm->dev);
+ const enum intel_step *map = main->map;
+ int size = main->size;
+ int revision = pdev->revision;
+ enum intel_step step;
+
+ /* subplatform stepping info trumps main platform info */
+ if (sub && sub->map && sub->size) {
+ map = sub->map;
+ size = sub->size;
+ }
+
+ /* not all platforms define steppings, and it's fine */
+ if (!map || !size)
+ return STEP_NONE;
+
+ if (revision < size && map[revision] != STEP_NONE) {
+ step = map[revision];
+ } else {
+ drm_warn(display->drm, "Unknown revision 0x%02x\n", revision);
+
+ /*
+ * If we hit a gap in the revision to step map, use the information
+ * for the next revision.
+ *
+ * This may be wrong in all sorts of ways, especially if the
+ * steppings in the array are not monotonically increasing, but
+ * it's better than defaulting to 0.
+ */
+ while (revision < size && map[revision] == STEP_NONE)
+ revision++;
+
+ if (revision < size) {
+ drm_dbg_kms(display->drm, "Using display stepping for revision 0x%02x\n",
+ revision);
+ step = map[revision];
+ } else {
+ drm_dbg_kms(display->drm, "Using future display stepping\n");
+ step = STEP_FUTURE;
+ }
+ }
+
+ drm_WARN_ON(display->drm, step == STEP_NONE);
+
+ return step;
+}
+
void intel_display_device_probe(struct drm_i915_private *i915)
{
+ struct intel_display *display = &i915->display;
struct pci_dev *pdev = to_pci_dev(i915->drm.dev);
const struct intel_display_device_info *info;
struct intel_display_ip_ver ip_ver = {};
const struct platform_desc *desc;
const struct subplatform_desc *subdesc;
+ enum intel_step step;
/* Add drm device backpointer as early as possible. */
i915->display.drm = &i915->drm;
@@ -1307,13 +1498,25 @@ void intel_display_device_probe(struct drm_i915_private *i915)
DISPLAY_RUNTIME_INFO(i915)->subplatform = subdesc->subplatform;
}
- if (ip_ver.ver || ip_ver.rel || ip_ver.step)
+ if (ip_ver.ver || ip_ver.rel || ip_ver.step) {
DISPLAY_RUNTIME_INFO(i915)->ip = ip_ver;
+ step = STEP_A0 + ip_ver.step;
+ if (step > STEP_FUTURE) {
+ drm_dbg_kms(display->drm, "Using future display stepping\n");
+ step = STEP_FUTURE;
+ }
+ } else {
+ step = get_pre_gmdid_step(display, &desc->step_info,
+ subdesc ? &subdesc->step_info : NULL);
+ }
- drm_info(&i915->drm, "Found %s%s%s (device ID %04x) display version %u.%02u\n",
+ DISPLAY_RUNTIME_INFO(i915)->step = step;
+
+ drm_info(&i915->drm, "Found %s%s%s (device ID %04x) display version %u.%02u stepping %s\n",
desc->name, subdesc ? "/" : "", subdesc ? subdesc->name : "",
pdev->device, DISPLAY_RUNTIME_INFO(i915)->ip.ver,
- DISPLAY_RUNTIME_INFO(i915)->ip.rel);
+ DISPLAY_RUNTIME_INFO(i915)->ip.rel,
+ step != STEP_NONE ? intel_step_name(step) : "N/A");
return;
@@ -1474,6 +1677,9 @@ static void __intel_display_device_info_runtime_init(struct drm_i915_private *i9
}
}
+ display_runtime->rawclk_freq = intel_read_rawclk(i915);
+ drm_dbg_kms(&i915->drm, "rawclk rate: %d kHz\n", display_runtime->rawclk_freq);
+
return;
display_fused_off:
@@ -1509,6 +1715,8 @@ void intel_display_device_info_print(const struct intel_display_device_info *inf
drm_printf(p, "display version: %u\n",
runtime->ip.ver);
+ drm_printf(p, "display stepping: %s\n", intel_step_name(runtime->step));
+
#define PRINT_FLAG(name) drm_printf(p, "%s: %s\n", #name, str_yes_no(info->name))
DEV_INFO_DISPLAY_FOR_EACH_FLAG(PRINT_FLAG);
#undef PRINT_FLAG
@@ -1516,6 +1724,8 @@ void intel_display_device_info_print(const struct intel_display_device_info *inf
drm_printf(p, "has_hdcp: %s\n", str_yes_no(runtime->has_hdcp));
drm_printf(p, "has_dmc: %s\n", str_yes_no(runtime->has_dmc));
drm_printf(p, "has_dsc: %s\n", str_yes_no(runtime->has_dsc));
+
+ drm_printf(p, "rawclk rate: %u kHz\n", runtime->rawclk_freq);
}
/*
@@ -1529,9 +1739,11 @@ void intel_display_device_info_print(const struct intel_display_device_info *inf
*/
bool intel_display_device_enabled(struct drm_i915_private *i915)
{
+ struct intel_display *display = &i915->display;
+
/* Only valid when HAS_DISPLAY() is true */
- drm_WARN_ON(&i915->drm, !HAS_DISPLAY(i915));
+ drm_WARN_ON(display->drm, !HAS_DISPLAY(display));
- return !i915->display.params.disable_display &&
- !intel_opregion_headless_sku(i915);
+ return !display->params.disable_display &&
+ !intel_opregion_headless_sku(display);
}
diff --git a/drivers/gpu/drm/i915/display/intel_display_device.h b/drivers/gpu/drm/i915/display/intel_display_device.h
index 13453ea4daea..dfb0c8bf5ca2 100644
--- a/drivers/gpu/drm/i915/display/intel_display_device.h
+++ b/drivers/gpu/drm/i915/display/intel_display_device.h
@@ -161,7 +161,7 @@ enum intel_display_subplatform {
#define SUPPORTS_TV(i915) (DISPLAY_INFO(i915)->supports_tv)
/* Check that device has a display IP version within the specific range. */
-#define IS_DISPLAY_IP_RANGE(__i915, from, until) ( \
+#define IS_DISPLAY_VER_FULL(__i915, from, until) ( \
BUILD_BUG_ON_ZERO((from) < IP_VER(2, 0)) + \
(DISPLAY_VER_FULL(__i915) >= (from) && \
DISPLAY_VER_FULL(__i915) <= (until)))
@@ -175,14 +175,14 @@ enum intel_display_subplatform {
* hardware fix is present and the software workaround is no longer necessary.
* E.g.,
*
- * IS_DISPLAY_IP_STEP(i915, IP_VER(14, 0), STEP_A0, STEP_B2)
- * IS_DISPLAY_IP_STEP(i915, IP_VER(14, 0), STEP_C0, STEP_FOREVER)
+ * IS_DISPLAY_VER_STEP(i915, IP_VER(14, 0), STEP_A0, STEP_B2)
+ * IS_DISPLAY_VER_STEP(i915, IP_VER(14, 0), STEP_C0, STEP_FOREVER)
*
* "STEP_FOREVER" can be passed as "until" for workarounds that have no upper
* stepping bound for the specified IP version.
*/
-#define IS_DISPLAY_IP_STEP(__i915, ipver, from, until) \
- (IS_DISPLAY_IP_RANGE((__i915), (ipver), (ipver)) && \
+#define IS_DISPLAY_VER_STEP(__i915, ipver, from, until) \
+ (IS_DISPLAY_VER_FULL((__i915), (ipver), (ipver)) && \
IS_DISPLAY_STEP((__i915), (from), (until)))
#define DISPLAY_INFO(i915) (__to_intel_display(i915)->info.__device_info)
@@ -194,6 +194,12 @@ enum intel_display_subplatform {
#define IS_DISPLAY_VER(i915, from, until) \
(DISPLAY_VER(i915) >= (from) && DISPLAY_VER(i915) <= (until))
+#define INTEL_DISPLAY_STEP(__i915) (DISPLAY_RUNTIME_INFO(__i915)->step)
+
+#define IS_DISPLAY_STEP(__i915, since, until) \
+ (drm_WARN_ON(__to_intel_display(__i915)->drm, INTEL_DISPLAY_STEP(__i915) == STEP_NONE), \
+ INTEL_DISPLAY_STEP(__i915) >= (since) && INTEL_DISPLAY_STEP(__i915) < (until))
+
struct intel_display_runtime_info {
enum intel_display_platform platform;
enum intel_display_subplatform subplatform;
@@ -201,8 +207,11 @@ struct intel_display_runtime_info {
struct intel_display_ip_ver {
u16 ver;
u16 rel;
- u16 step;
+ u16 step; /* hardware */
} ip;
+ int step; /* symbolic */
+
+ u32 rawclk_freq;
u8 pipe_mask;
u8 cpu_transcoder_mask;
diff --git a/drivers/gpu/drm/i915/display/intel_display_driver.c b/drivers/gpu/drm/i915/display/intel_display_driver.c
index 794b4af38055..069426d9260b 100644
--- a/drivers/gpu/drm/i915/display/intel_display_driver.c
+++ b/drivers/gpu/drm/i915/display/intel_display_driver.c
@@ -217,7 +217,7 @@ int intel_display_driver_probe_noirq(struct drm_i915_private *i915)
return ret;
}
- intel_bios_init(i915);
+ intel_bios_init(display);
ret = intel_vga_register(i915);
if (ret)
@@ -265,7 +265,7 @@ int intel_display_driver_probe_noirq(struct drm_i915_private *i915)
intel_init_quirks(display);
- intel_fbc_init(i915);
+ intel_fbc_init(display);
return 0;
@@ -275,7 +275,7 @@ cleanup_vga_client_pw_domain_dmc:
cleanup_vga:
intel_vga_unregister(i915);
cleanup_bios:
- intel_bios_driver_remove(i915);
+ intel_bios_driver_remove(display);
return ret;
}
@@ -416,7 +416,8 @@ bool intel_display_driver_check_access(struct drm_i915_private *i915)
/* part #2: call after irq install, but before gem init */
int intel_display_driver_probe_nogem(struct drm_i915_private *i915)
{
- struct drm_device *dev = &i915->drm;
+ struct intel_display *display = &i915->display;
+ struct drm_device *dev = display->drm;
enum pipe pipe;
int ret;
@@ -427,7 +428,7 @@ int intel_display_driver_probe_nogem(struct drm_i915_private *i915)
intel_panel_sanitize_ssc(i915);
- intel_pps_setup(i915);
+ intel_pps_setup(display);
intel_gmbus_setup(i915);
@@ -452,13 +453,13 @@ int intel_display_driver_probe_nogem(struct drm_i915_private *i915)
if (i915->display.cdclk.max_cdclk_freq == 0)
intel_update_max_cdclk(i915);
- intel_hti_init(i915);
+ intel_hti_init(display);
/* Just disable it once at startup */
intel_vga_disable(i915);
intel_setup_outputs(i915);
- ret = intel_dp_tunnel_mgr_init(i915);
+ ret = intel_dp_tunnel_mgr_init(display);
if (ret)
goto err_hdcp;
@@ -466,7 +467,7 @@ int intel_display_driver_probe_nogem(struct drm_i915_private *i915)
drm_modeset_lock_all(dev);
intel_modeset_setup_hw_state(i915, dev->mode_config.acquire_ctx);
- intel_acpi_assign_connector_fwnodes(i915);
+ intel_acpi_assign_connector_fwnodes(display);
drm_modeset_unlock_all(dev);
intel_initial_plane_config(i915);
@@ -526,6 +527,7 @@ int intel_display_driver_probe(struct drm_i915_private *i915)
void intel_display_driver_register(struct drm_i915_private *i915)
{
+ struct intel_display *display = &i915->display;
struct drm_printer p = drm_dbg_printer(&i915->drm, DRM_UT_KMS,
"i915 display info:");
@@ -533,8 +535,8 @@ void intel_display_driver_register(struct drm_i915_private *i915)
return;
/* Must be done after probing outputs */
- intel_opregion_register(i915);
- intel_acpi_video_register(i915);
+ intel_opregion_register(display);
+ intel_acpi_video_register(display);
intel_audio_init(i915);
@@ -578,6 +580,8 @@ void intel_display_driver_remove(struct drm_i915_private *i915)
/* part #2: call after irq uninstall */
void intel_display_driver_remove_noirq(struct drm_i915_private *i915)
{
+ struct intel_display *display = &i915->display;
+
if (!HAS_DISPLAY(i915))
return;
@@ -598,7 +602,7 @@ void intel_display_driver_remove_noirq(struct drm_i915_private *i915)
intel_mode_config_cleanup(i915);
- intel_dp_tunnel_mgr_cleanup(i915);
+ intel_dp_tunnel_mgr_cleanup(display);
intel_overlay_cleanup(i915);
@@ -607,23 +611,27 @@ void intel_display_driver_remove_noirq(struct drm_i915_private *i915)
destroy_workqueue(i915->display.wq.flip);
destroy_workqueue(i915->display.wq.modeset);
- intel_fbc_cleanup(i915);
+ intel_fbc_cleanup(&i915->display);
}
/* part #3: call after gem init */
void intel_display_driver_remove_nogem(struct drm_i915_private *i915)
{
+ struct intel_display *display = &i915->display;
+
intel_dmc_fini(i915);
intel_power_domains_driver_remove(i915);
intel_vga_unregister(i915);
- intel_bios_driver_remove(i915);
+ intel_bios_driver_remove(display);
}
void intel_display_driver_unregister(struct drm_i915_private *i915)
{
+ struct intel_display *display = &i915->display;
+
if (!HAS_DISPLAY(i915))
return;
@@ -643,7 +651,7 @@ void intel_display_driver_unregister(struct drm_i915_private *i915)
drm_atomic_helper_shutdown(&i915->drm);
acpi_video_unregister();
- intel_opregion_unregister(i915);
+ intel_opregion_unregister(display);
}
/*
diff --git a/drivers/gpu/drm/i915/display/intel_display_irq.c b/drivers/gpu/drm/i915/display/intel_display_irq.c
index 5219ba295c74..73369847ed66 100644
--- a/drivers/gpu/drm/i915/display/intel_display_irq.c
+++ b/drivers/gpu/drm/i915/display/intel_display_irq.c
@@ -14,6 +14,7 @@
#include "intel_display_trace.h"
#include "intel_display_types.h"
#include "intel_dp_aux.h"
+#include "intel_dsb.h"
#include "intel_fdi_regs.h"
#include "intel_fifo_underrun.h"
#include "intel_gmbus.h"
@@ -270,10 +271,12 @@ void i915_disable_pipestat(struct drm_i915_private *dev_priv,
static bool i915_has_asle(struct drm_i915_private *i915)
{
+ struct intel_display *display = &i915->display;
+
if (!IS_PINEVIEW(i915) && !IS_MOBILE(i915))
return false;
- return intel_opregion_asle_present(i915);
+ return intel_opregion_asle_present(display);
}
/**
@@ -497,6 +500,8 @@ void i8xx_pipestat_irq_handler(struct drm_i915_private *dev_priv,
void i915_pipestat_irq_handler(struct drm_i915_private *dev_priv,
u32 iir, u32 pipe_stats[I915_MAX_PIPES])
{
+ struct intel_display *display = &dev_priv->display;
+
bool blc_event = false;
enum pipe pipe;
@@ -515,12 +520,13 @@ void i915_pipestat_irq_handler(struct drm_i915_private *dev_priv,
}
if (blc_event || (iir & I915_ASLE_INTERRUPT))
- intel_opregion_asle_intr(dev_priv);
+ intel_opregion_asle_intr(display);
}
void i965_pipestat_irq_handler(struct drm_i915_private *dev_priv,
u32 iir, u32 pipe_stats[I915_MAX_PIPES])
{
+ struct intel_display *display = &dev_priv->display;
bool blc_event = false;
enum pipe pipe;
@@ -539,7 +545,7 @@ void i965_pipestat_irq_handler(struct drm_i915_private *dev_priv,
}
if (blc_event || (iir & I915_ASLE_INTERRUPT))
- intel_opregion_asle_intr(dev_priv);
+ intel_opregion_asle_intr(display);
if (pipe_stats[0] & PIPE_GMBUS_INTERRUPT_STATUS)
intel_gmbus_irq_handler(dev_priv);
@@ -570,6 +576,7 @@ void valleyview_pipestat_irq_handler(struct drm_i915_private *dev_priv,
static void ibx_irq_handler(struct drm_i915_private *dev_priv, u32 pch_iir)
{
+ struct intel_display *display = &dev_priv->display;
enum pipe pipe;
u32 hotplug_trigger = pch_iir & SDE_HOTPLUG_MASK;
@@ -583,7 +590,7 @@ static void ibx_irq_handler(struct drm_i915_private *dev_priv, u32 pch_iir)
}
if (pch_iir & SDE_AUX_MASK)
- intel_dp_aux_irq_handler(dev_priv);
+ intel_dp_aux_irq_handler(display);
if (pch_iir & SDE_GMBUS)
intel_gmbus_irq_handler(dev_priv);
@@ -658,6 +665,7 @@ static void cpt_serr_int_handler(struct drm_i915_private *dev_priv)
static void cpt_irq_handler(struct drm_i915_private *dev_priv, u32 pch_iir)
{
+ struct intel_display *display = &dev_priv->display;
enum pipe pipe;
u32 hotplug_trigger = pch_iir & SDE_HOTPLUG_MASK_CPT;
@@ -671,7 +679,7 @@ static void cpt_irq_handler(struct drm_i915_private *dev_priv, u32 pch_iir)
}
if (pch_iir & SDE_AUX_MASK_CPT)
- intel_dp_aux_irq_handler(dev_priv);
+ intel_dp_aux_irq_handler(display);
if (pch_iir & SDE_GMBUS_CPT)
intel_gmbus_irq_handler(dev_priv);
@@ -695,6 +703,7 @@ static void cpt_irq_handler(struct drm_i915_private *dev_priv, u32 pch_iir)
void ilk_display_irq_handler(struct drm_i915_private *dev_priv, u32 de_iir)
{
+ struct intel_display *display = &dev_priv->display;
enum pipe pipe;
u32 hotplug_trigger = de_iir & DE_DP_A_HOTPLUG;
@@ -702,10 +711,10 @@ void ilk_display_irq_handler(struct drm_i915_private *dev_priv, u32 de_iir)
ilk_hpd_irq_handler(dev_priv, hotplug_trigger);
if (de_iir & DE_AUX_CHANNEL_A)
- intel_dp_aux_irq_handler(dev_priv);
+ intel_dp_aux_irq_handler(display);
if (de_iir & DE_GSE)
- intel_opregion_asle_intr(dev_priv);
+ intel_opregion_asle_intr(display);
if (de_iir & DE_POISON)
drm_err(&dev_priv->drm, "Poison interrupt\n");
@@ -743,6 +752,7 @@ void ilk_display_irq_handler(struct drm_i915_private *dev_priv, u32 de_iir)
void ivb_display_irq_handler(struct drm_i915_private *dev_priv, u32 de_iir)
{
+ struct intel_display *display = &dev_priv->display;
enum pipe pipe;
u32 hotplug_trigger = de_iir & DE_DP_A_HOTPLUG_IVB;
@@ -767,10 +777,10 @@ void ivb_display_irq_handler(struct drm_i915_private *dev_priv, u32 de_iir)
}
if (de_iir & DE_AUX_CHANNEL_A_IVB)
- intel_dp_aux_irq_handler(dev_priv);
+ intel_dp_aux_irq_handler(display);
if (de_iir & DE_GSE_IVB)
- intel_opregion_asle_intr(dev_priv);
+ intel_opregion_asle_intr(display);
for_each_pipe(dev_priv, pipe) {
if (de_iir & DE_PIPE_VBLANK_IVB(pipe))
@@ -894,6 +904,7 @@ static void intel_pmdemand_irq_handler(struct drm_i915_private *dev_priv)
static void
gen8_de_misc_irq_handler(struct drm_i915_private *dev_priv, u32 iir)
{
+ struct intel_display *display = &dev_priv->display;
bool found = false;
if (DISPLAY_VER(dev_priv) >= 14) {
@@ -906,8 +917,15 @@ gen8_de_misc_irq_handler(struct drm_i915_private *dev_priv, u32 iir)
intel_pmdemand_irq_handler(dev_priv);
found = true;
}
+
+ if (iir & XELPDP_RM_TIMEOUT) {
+ u32 val = intel_uncore_read(&dev_priv->uncore,
+ RM_TIMEOUT_REG_CAPTURE);
+ drm_warn(&dev_priv->drm, "Register Access Timeout = 0x%x\n", val);
+ found = true;
+ }
} else if (iir & GEN8_DE_MISC_GSE) {
- intel_opregion_asle_intr(dev_priv);
+ intel_opregion_asle_intr(display);
found = true;
}
@@ -1049,6 +1067,7 @@ static void gen8_read_and_ack_pch_irqs(struct drm_i915_private *i915, u32 *pch_i
void gen8_de_irq_handler(struct drm_i915_private *dev_priv, u32 master_ctl)
{
+ struct intel_display *display = &dev_priv->display;
u32 iir;
enum pipe pipe;
@@ -1084,7 +1103,7 @@ void gen8_de_irq_handler(struct drm_i915_private *dev_priv, u32 master_ctl)
intel_uncore_write(&dev_priv->uncore, GEN8_DE_PORT_IIR, iir);
if (iir & gen8_de_port_aux_mask(dev_priv)) {
- intel_dp_aux_irq_handler(dev_priv);
+ intel_dp_aux_irq_handler(display);
found = true;
}
@@ -1149,6 +1168,17 @@ void gen8_de_irq_handler(struct drm_i915_private *dev_priv, u32 master_ctl)
if (iir & gen8_de_pipe_flip_done_mask(dev_priv))
flip_done_handler(dev_priv, pipe);
+ if (HAS_DSB(dev_priv)) {
+ if (iir & GEN12_DSB_INT(INTEL_DSB_0))
+ intel_dsb_irq_handler(&dev_priv->display, pipe, INTEL_DSB_0);
+
+ if (iir & GEN12_DSB_INT(INTEL_DSB_1))
+ intel_dsb_irq_handler(&dev_priv->display, pipe, INTEL_DSB_1);
+
+ if (iir & GEN12_DSB_INT(INTEL_DSB_2))
+ intel_dsb_irq_handler(&dev_priv->display, pipe, INTEL_DSB_2);
+ }
+
if (iir & GEN8_PIPE_CDCLK_CRC_DONE)
hsw_pipe_crc_irq_handler(dev_priv, pipe);
@@ -1211,8 +1241,10 @@ u32 gen11_gu_misc_irq_ack(struct drm_i915_private *i915, const u32 master_ctl)
void gen11_gu_misc_irq_handler(struct drm_i915_private *i915, const u32 iir)
{
+ struct intel_display *display = &i915->display;
+
if (iir & GEN11_GU_MISC_GSE)
- intel_opregion_asle_intr(i915);
+ intel_opregion_asle_intr(display);
}
void gen11_display_irq_handler(struct drm_i915_private *i915)
@@ -1680,6 +1712,7 @@ static void icp_irq_postinstall(struct drm_i915_private *i915);
void gen8_de_irq_postinstall(struct drm_i915_private *dev_priv)
{
+ struct intel_display *display = &dev_priv->display;
struct intel_uncore *uncore = &dev_priv->uncore;
u32 de_pipe_masked = gen8_de_pipe_fault_mask(dev_priv) |
@@ -1710,14 +1743,19 @@ void gen8_de_irq_postinstall(struct drm_i915_private *dev_priv)
if (DISPLAY_VER(dev_priv) >= 14) {
de_misc_masked |= XELPDP_PMDEMAND_RSPTOUT_ERR |
- XELPDP_PMDEMAND_RSP;
+ XELPDP_PMDEMAND_RSP | XELPDP_RM_TIMEOUT;
} else if (DISPLAY_VER(dev_priv) >= 11) {
enum port port;
- if (intel_bios_is_dsi_present(dev_priv, &port))
+ if (intel_bios_is_dsi_present(display, &port))
de_port_masked |= DSI0_TE | DSI1_TE;
}
+ if (HAS_DSB(dev_priv))
+ de_pipe_masked |= GEN12_DSB_INT(INTEL_DSB_0) |
+ GEN12_DSB_INT(INTEL_DSB_1) |
+ GEN12_DSB_INT(INTEL_DSB_2);
+
de_pipe_enables = de_pipe_masked |
GEN8_PIPE_VBLANK |
gen8_de_pipe_underrun_mask(dev_priv) |
diff --git a/drivers/gpu/drm/i915/display/intel_display_params.c b/drivers/gpu/drm/i915/display/intel_display_params.c
index e82bd72d32fa..1a45d300b6f0 100644
--- a/drivers/gpu/drm/i915/display/intel_display_params.c
+++ b/drivers/gpu/drm/i915/display/intel_display_params.c
@@ -173,14 +173,14 @@ static void _param_print_charp(struct drm_printer *p, const char *driver_name,
/**
* intel_display_params_dump - dump intel display modparams
- * @i915: i915 device
+ * @display: display device
* @p: the &drm_printer
*
* Pretty printer for i915 modparams.
*/
-void intel_display_params_dump(struct drm_i915_private *i915, struct drm_printer *p)
+void intel_display_params_dump(struct intel_display *display, struct drm_printer *p)
{
-#define PRINT(T, x, ...) _param_print(p, i915->drm.driver->name, #x, i915->display.params.x);
+#define PRINT(T, x, ...) _param_print(p, display->drm->driver->name, #x, display->params.x);
INTEL_DISPLAY_PARAMS_FOR_EACH(PRINT);
#undef PRINT
}
diff --git a/drivers/gpu/drm/i915/display/intel_display_params.h b/drivers/gpu/drm/i915/display/intel_display_params.h
index 48c29c55c939..da8dc943234b 100644
--- a/drivers/gpu/drm/i915/display/intel_display_params.h
+++ b/drivers/gpu/drm/i915/display/intel_display_params.h
@@ -9,7 +9,7 @@
#include <linux/types.h>
struct drm_printer;
-struct drm_i915_private;
+struct intel_display;
/*
* Invoke param, a function-like macro, for each intel display param, with
@@ -56,7 +56,7 @@ struct intel_display_params {
};
#undef MEMBER
-void intel_display_params_dump(struct drm_i915_private *i915,
+void intel_display_params_dump(struct intel_display *display,
struct drm_printer *p);
void intel_display_params_copy(struct intel_display_params *dest);
void intel_display_params_free(struct intel_display_params *params);
diff --git a/drivers/gpu/drm/i915/display/intel_display_power.c b/drivers/gpu/drm/i915/display/intel_display_power.c
index e288a1b21d7e..ef2fdbf97346 100644
--- a/drivers/gpu/drm/i915/display/intel_display_power.c
+++ b/drivers/gpu/drm/i915/display/intel_display_power.c
@@ -36,7 +36,7 @@
for_each_power_well_reverse(__dev_priv, __power_well) \
for_each_if(test_bit((__domain), (__power_well)->domains.bits))
-const char *
+static const char *
intel_display_power_domain_str(enum intel_display_power_domain domain)
{
switch (domain) {
@@ -198,20 +198,8 @@ intel_display_power_domain_str(enum intel_display_power_domain domain)
}
}
-/**
- * __intel_display_power_is_enabled - unlocked check for a power domain
- * @dev_priv: i915 device instance
- * @domain: power domain to check
- *
- * This is the unlocked version of intel_display_power_is_enabled() and should
- * only be used from error capture and recovery code where deadlocks are
- * possible.
- *
- * Returns:
- * True when the power domain is enabled, false otherwise.
- */
-bool __intel_display_power_is_enabled(struct drm_i915_private *dev_priv,
- enum intel_display_power_domain domain)
+static bool __intel_display_power_is_enabled(struct drm_i915_private *dev_priv,
+ enum intel_display_power_domain domain)
{
struct i915_power_well *power_well;
bool is_enabled;
@@ -1696,7 +1684,7 @@ static void icl_display_core_init(struct drm_i915_private *dev_priv,
intel_dmc_load_program(dev_priv);
/* Wa_14011508470:tgl,dg1,rkl,adl-s,adl-p,dg2 */
- if (IS_DISPLAY_IP_RANGE(dev_priv, IP_VER(12, 0), IP_VER(13, 0)))
+ if (IS_DISPLAY_VER_FULL(dev_priv, IP_VER(12, 0), IP_VER(13, 0)))
intel_de_rmw(dev_priv, GEN11_CHICKEN_DCPR_2, 0,
DCPR_CLEAR_MEMSTAT_DIS | DCPR_SEND_RESP_IMM |
DCPR_MASK_LPMODE | DCPR_MASK_MAXLATENCY_MEMUP_CLR);
@@ -1704,6 +1692,14 @@ static void icl_display_core_init(struct drm_i915_private *dev_priv,
/* Wa_14011503030:xelpd */
if (DISPLAY_VER(dev_priv) == 13)
intel_de_write(dev_priv, XELPD_DISPLAY_ERR_FATAL_MASK, ~0);
+
+ /* Wa_15013987218 */
+ if (DISPLAY_VER(dev_priv) == 20) {
+ intel_de_rmw(dev_priv, SOUTH_DSPCLK_GATE_D,
+ 0, PCH_GMBUSUNIT_CLOCK_GATE_DISABLE);
+ intel_de_rmw(dev_priv, SOUTH_DSPCLK_GATE_D,
+ PCH_GMBUSUNIT_CLOCK_GATE_DISABLE, 0);
+ }
}
static void icl_display_core_uninit(struct drm_i915_private *dev_priv)
diff --git a/drivers/gpu/drm/i915/display/intel_display_power.h b/drivers/gpu/drm/i915/display/intel_display_power.h
index d6c2a5846bdc..425452c5a469 100644
--- a/drivers/gpu/drm/i915/display/intel_display_power.h
+++ b/drivers/gpu/drm/i915/display/intel_display_power.h
@@ -183,13 +183,8 @@ void intel_display_power_resume(struct drm_i915_private *i915);
void intel_display_power_set_target_dc_state(struct drm_i915_private *dev_priv,
u32 state);
-const char *
-intel_display_power_domain_str(enum intel_display_power_domain domain);
-
bool intel_display_power_is_enabled(struct drm_i915_private *dev_priv,
enum intel_display_power_domain domain);
-bool __intel_display_power_is_enabled(struct drm_i915_private *dev_priv,
- enum intel_display_power_domain domain);
intel_wakeref_t intel_display_power_get(struct drm_i915_private *dev_priv,
enum intel_display_power_domain domain);
intel_wakeref_t
diff --git a/drivers/gpu/drm/i915/display/intel_display_power_well.c b/drivers/gpu/drm/i915/display/intel_display_power_well.c
index 919f712fef13..46e9eff12c23 100644
--- a/drivers/gpu/drm/i915/display/intel_display_power_well.c
+++ b/drivers/gpu/drm/i915/display/intel_display_power_well.c
@@ -861,6 +861,8 @@ void skl_enable_dc6(struct drm_i915_private *dev_priv)
void bxt_enable_dc9(struct drm_i915_private *dev_priv)
{
+ struct intel_display *display = &dev_priv->display;
+
assert_can_enable_dc9(dev_priv);
drm_dbg_kms(&dev_priv->drm, "Enabling DC9\n");
@@ -870,19 +872,21 @@ void bxt_enable_dc9(struct drm_i915_private *dev_priv)
* because PPS registers are always on.
*/
if (!HAS_PCH_SPLIT(dev_priv))
- intel_pps_reset_all(dev_priv);
+ intel_pps_reset_all(display);
gen9_set_dc_state(dev_priv, DC_STATE_EN_DC9);
}
void bxt_disable_dc9(struct drm_i915_private *dev_priv)
{
+ struct intel_display *display = &dev_priv->display;
+
assert_can_disable_dc9(dev_priv);
drm_dbg_kms(&dev_priv->drm, "Disabling DC9\n");
gen9_set_dc_state(dev_priv, DC_STATE_DISABLE);
- intel_pps_unlock_regs_wa(dev_priv);
+ intel_pps_unlock_regs_wa(display);
}
static void hsw_power_well_sync_hw(struct drm_i915_private *dev_priv,
@@ -1176,14 +1180,15 @@ static void vlv_init_display_clock_gating(struct drm_i915_private *dev_priv)
MI_ARB_DISPLAY_TRICKLE_FEED_DISABLE);
intel_de_write(dev_priv, CBR1_VLV, 0);
- drm_WARN_ON(&dev_priv->drm, RUNTIME_INFO(dev_priv)->rawclk_freq == 0);
+ drm_WARN_ON(&dev_priv->drm, DISPLAY_RUNTIME_INFO(dev_priv)->rawclk_freq == 0);
intel_de_write(dev_priv, RAWCLK_FREQ_VLV,
- DIV_ROUND_CLOSEST(RUNTIME_INFO(dev_priv)->rawclk_freq,
+ DIV_ROUND_CLOSEST(DISPLAY_RUNTIME_INFO(dev_priv)->rawclk_freq,
1000));
}
static void vlv_display_power_well_init(struct drm_i915_private *dev_priv)
{
+ struct intel_display *display = &dev_priv->display;
struct intel_encoder *encoder;
enum pipe pipe;
@@ -1229,11 +1234,13 @@ static void vlv_display_power_well_init(struct drm_i915_private *dev_priv)
intel_vga_redisable_power_on(dev_priv);
- intel_pps_unlock_regs_wa(dev_priv);
+ intel_pps_unlock_regs_wa(display);
}
static void vlv_display_power_well_deinit(struct drm_i915_private *dev_priv)
{
+ struct intel_display *display = &dev_priv->display;
+
spin_lock_irq(&dev_priv->irq_lock);
valleyview_disable_display_irqs(dev_priv);
spin_unlock_irq(&dev_priv->irq_lock);
@@ -1241,7 +1248,7 @@ static void vlv_display_power_well_deinit(struct drm_i915_private *dev_priv)
/* make sure we're done processing display irqs */
intel_synchronize_irq(dev_priv);
- intel_pps_reset_all(dev_priv);
+ intel_pps_reset_all(display);
/* Prevent us from re-enabling polling on accident in late suspend */
if (!dev_priv->drm.dev->power.is_suspended)
diff --git a/drivers/gpu/drm/i915/display/intel_display_reset.c b/drivers/gpu/drm/i915/display/intel_display_reset.c
index c2c347b22448..49e2e650ebcd 100644
--- a/drivers/gpu/drm/i915/display/intel_display_reset.c
+++ b/drivers/gpu/drm/i915/display/intel_display_reset.c
@@ -83,7 +83,8 @@ void intel_display_reset_prepare(struct drm_i915_private *dev_priv)
void intel_display_reset_finish(struct drm_i915_private *i915)
{
- struct drm_modeset_acquire_ctx *ctx = &i915->display.restore.reset_ctx;
+ struct intel_display *display = &i915->display;
+ struct drm_modeset_acquire_ctx *ctx = &display->restore.reset_ctx;
struct drm_atomic_state *state;
int ret;
@@ -94,7 +95,7 @@ void intel_display_reset_finish(struct drm_i915_private *i915)
if (!test_bit(I915_RESET_MODESET, &to_gt(i915)->reset.flags))
return;
- state = fetch_and_zero(&i915->display.restore.modeset_state);
+ state = fetch_and_zero(&display->restore.modeset_state);
if (!state)
goto unlock;
@@ -112,7 +113,7 @@ void intel_display_reset_finish(struct drm_i915_private *i915)
* The display has been reset as well,
* so need a full re-initialization.
*/
- intel_pps_unlock_regs_wa(i915);
+ intel_pps_unlock_regs_wa(display);
intel_display_driver_init_hw(i915);
intel_clock_gating_init(i915);
intel_hpd_init(i915);
diff --git a/drivers/gpu/drm/i915/display/intel_display_types.h b/drivers/gpu/drm/i915/display/intel_display_types.h
index f9d3cc3c342b..f29e5dc3db91 100644
--- a/drivers/gpu/drm/i915/display/intel_display_types.h
+++ b/drivers/gpu/drm/i915/display/intel_display_types.h
@@ -1396,8 +1396,8 @@ struct intel_crtc_state {
/* Only valid on TGL+ */
enum transcoder mst_master_transcoder;
- /* For DSB related info */
- struct intel_dsb *dsb;
+ /* For DSB based color LUT updates */
+ struct intel_dsb *dsb_color_vblank, *dsb_color_commit;
u32 psr2_man_track_ctl;
@@ -1754,6 +1754,7 @@ struct intel_dp {
u8 lane_count;
u8 sink_count;
bool link_trained;
+ bool needs_modeset_retry;
bool use_max_params;
u8 dpcd[DP_RECEIVER_CAP_SIZE];
u8 psr_dpcd[EDP_PSR_RECEIVER_CAP_SIZE];
@@ -1777,10 +1778,30 @@ struct intel_dp {
int common_rates[DP_MAX_SUPPORTED_RATES];
struct {
/* TODO: move the rest of link specific fields to here */
+ /* common rate,lane_count configs in bw order */
+ int num_configs;
+#define INTEL_DP_MAX_LANE_COUNT 4
+#define INTEL_DP_MAX_SUPPORTED_LANE_CONFIGS (ilog2(INTEL_DP_MAX_LANE_COUNT) + 1)
+#define INTEL_DP_LANE_COUNT_EXP_BITS order_base_2(INTEL_DP_MAX_SUPPORTED_LANE_CONFIGS)
+#define INTEL_DP_LINK_RATE_IDX_BITS (BITS_PER_TYPE(u8) - INTEL_DP_LANE_COUNT_EXP_BITS)
+#define INTEL_DP_MAX_LINK_CONFIGS (DP_MAX_SUPPORTED_RATES * \
+ INTEL_DP_MAX_SUPPORTED_LANE_CONFIGS)
+ struct intel_dp_link_config {
+ u8 link_rate_idx:INTEL_DP_LINK_RATE_IDX_BITS;
+ u8 lane_count_exp:INTEL_DP_LANE_COUNT_EXP_BITS;
+ } configs[INTEL_DP_MAX_LINK_CONFIGS];
/* Max lane count for the current link */
int max_lane_count;
/* Max rate for the current link */
int max_rate;
+ /*
+ * Link parameters for which the MST topology was probed.
+ * Tracking these ensures that the MST path resources are
+ * re-enumerated whenever the link is retrained with new link
+ * parameters, as required by the DP standard.
+ */
+ int mst_probed_lane_count;
+ int mst_probed_rate;
int force_lane_count;
int force_rate;
bool retrain_disabled;
@@ -1806,6 +1827,7 @@ struct intel_dp {
/* connector directly attached - won't be use for modeset in mst world */
struct intel_connector *attached_connector;
+ bool as_sdp_supported;
struct drm_dp_tunnel *tunnel;
bool tunnel_suspended:1;
@@ -2063,8 +2085,6 @@ dp_to_lspcon(struct intel_dp *intel_dp)
return &dp_to_dig_port(intel_dp)->lspcon;
}
-#define dp_to_i915(__intel_dp) to_i915(dp_to_dig_port(__intel_dp)->base.base.dev)
-
static inline struct intel_digital_port *
hdmi_to_dig_port(struct intel_hdmi *intel_hdmi)
{
@@ -2182,35 +2202,18 @@ to_intel_frontbuffer(struct drm_framebuffer *fb)
return fb ? to_intel_framebuffer(fb)->frontbuffer : NULL;
}
-static inline int to_bpp_int(int bpp_x16)
-{
- return bpp_x16 >> 4;
-}
-
-static inline int to_bpp_frac(int bpp_x16)
-{
- return bpp_x16 & 0xf;
-}
-
-#define BPP_X16_FMT "%d.%04d"
-#define BPP_X16_ARGS(bpp_x16) to_bpp_int(bpp_x16), (to_bpp_frac(bpp_x16) * 625)
-
-static inline int to_bpp_int_roundup(int bpp_x16)
-{
- return (bpp_x16 + 0xf) >> 4;
-}
-
-static inline int to_bpp_x16(int bpp)
-{
- return bpp << 4;
-}
-
/*
* Conversion functions/macros from various pointer types to struct
* intel_display pointer.
*/
#define __drm_device_to_intel_display(p) \
- (&to_i915(p)->display)
+ ((p) ? &to_i915(p)->display : NULL)
+#define __device_to_intel_display(p) \
+ __drm_device_to_intel_display(dev_get_drvdata(p))
+#define __pci_dev_to_intel_display(p) \
+ __drm_device_to_intel_display(pci_get_drvdata(p))
+#define __intel_atomic_state_to_intel_display(p) \
+ __drm_device_to_intel_display((p)->base.dev)
#define __intel_connector_to_intel_display(p) \
__drm_device_to_intel_display((p)->base.dev)
#define __intel_crtc_to_intel_display(p) \
@@ -2234,6 +2237,9 @@ static inline int to_bpp_x16(int bpp)
#define to_intel_display(p) \
_Generic(*p, \
__assoc(drm_device, p), \
+ __assoc(device, p), \
+ __assoc(pci_dev, p), \
+ __assoc(intel_atomic_state, p), \
__assoc(intel_connector, p), \
__assoc(intel_crtc, p), \
__assoc(intel_crtc_state, p), \
diff --git a/drivers/gpu/drm/i915/display/intel_display_wa.h b/drivers/gpu/drm/i915/display/intel_display_wa.h
index 63201d09852c..be644ab6ae00 100644
--- a/drivers/gpu/drm/i915/display/intel_display_wa.h
+++ b/drivers/gpu/drm/i915/display/intel_display_wa.h
@@ -6,8 +6,16 @@
#ifndef __INTEL_DISPLAY_WA_H__
#define __INTEL_DISPLAY_WA_H__
+#include <linux/types.h>
+
struct drm_i915_private;
void intel_display_wa_apply(struct drm_i915_private *i915);
+#ifdef I915
+static inline bool intel_display_needs_wa_16023588340(struct drm_i915_private *i915) { return false; }
+#else
+bool intel_display_needs_wa_16023588340(struct drm_i915_private *i915);
+#endif
+
#endif
diff --git a/drivers/gpu/drm/i915/display/intel_dmc.c b/drivers/gpu/drm/i915/display/intel_dmc.c
index 73977b173898..7c756d5ba2a2 100644
--- a/drivers/gpu/drm/i915/display/intel_dmc.c
+++ b/drivers/gpu/drm/i915/display/intel_dmc.c
@@ -391,7 +391,7 @@ static const struct stepping_info *
intel_get_stepping_info(struct drm_i915_private *i915,
struct stepping_info *si)
{
- const char *step_name = intel_display_step_name(i915);
+ const char *step_name = intel_step_name(INTEL_DISPLAY_STEP(i915));
si->stepping = step_name[0];
si->substepping = step_name[1];
diff --git a/drivers/gpu/drm/i915/display/intel_dp.c b/drivers/gpu/drm/i915/display/intel_dp.c
index ebe7fe5417ae..90fa73575feb 100644
--- a/drivers/gpu/drm/i915/display/intel_dp.c
+++ b/drivers/gpu/drm/i915/display/intel_dp.c
@@ -29,6 +29,7 @@
#include <linux/i2c.h>
#include <linux/notifier.h>
#include <linux/slab.h>
+#include <linux/sort.h>
#include <linux/string_helpers.h>
#include <linux/timekeeping.h>
#include <linux/types.h>
@@ -42,6 +43,7 @@
#include <drm/drm_atomic_helper.h>
#include <drm/drm_crtc.h>
#include <drm/drm_edid.h>
+#include <drm/drm_fixed.h>
#include <drm/drm_probe_helper.h>
#include "g4x_dp.h"
@@ -88,6 +90,8 @@
#include "intel_vrr.h"
#include "intel_crtc_state_dump.h"
+#define dp_to_i915(__intel_dp) to_i915(dp_to_dig_port(__intel_dp)->base.base.dev)
+
/* DP DSC throughput values used for slice count calculations KPixels/s */
#define DP_DSC_PEAK_PIXEL_RATE 2720000
#define DP_DSC_MAX_ENC_THROUGHPUT_0 340000
@@ -130,14 +134,6 @@ bool intel_dp_is_edp(struct intel_dp *intel_dp)
return dig_port->base.type == INTEL_OUTPUT_EDP;
}
-bool intel_dp_as_sdp_supported(struct intel_dp *intel_dp)
-{
- struct drm_i915_private *i915 = dp_to_i915(intel_dp);
-
- return HAS_AS_SDP(i915) &&
- drm_dp_as_sdp_supported(&intel_dp->aux, intel_dp->dpcd);
-}
-
static void intel_dp_unset_edid(struct intel_dp *intel_dp);
/* Is link rate UHBR and thus 128b/132b? */
@@ -535,6 +531,10 @@ static void
intel_dp_set_source_rates(struct intel_dp *intel_dp)
{
/* The values must be in increasing order */
+ static const int bmg_rates[] = {
+ 162000, 216000, 243000, 270000, 324000, 432000, 540000, 675000,
+ 810000, 1000000, 1350000,
+ };
static const int mtl_rates[] = {
162000, 216000, 243000, 270000, 324000, 432000, 540000, 675000,
810000, 1000000, 2000000,
@@ -565,8 +565,13 @@ intel_dp_set_source_rates(struct intel_dp *intel_dp)
intel_dp->source_rates || intel_dp->num_source_rates);
if (DISPLAY_VER(dev_priv) >= 14) {
- source_rates = mtl_rates;
- size = ARRAY_SIZE(mtl_rates);
+ if (IS_BATTLEMAGE(dev_priv)) {
+ source_rates = bmg_rates;
+ size = ARRAY_SIZE(bmg_rates);
+ } else {
+ source_rates = mtl_rates;
+ size = ARRAY_SIZE(mtl_rates);
+ }
max_rate = mtl_max_source_rate(intel_dp);
} else if (DISPLAY_VER(dev_priv) >= 11) {
source_rates = icl_rates;
@@ -643,6 +648,106 @@ int intel_dp_rate_index(const int *rates, int len, int rate)
return -1;
}
+static int intel_dp_link_config_rate(struct intel_dp *intel_dp,
+ const struct intel_dp_link_config *lc)
+{
+ return intel_dp_common_rate(intel_dp, lc->link_rate_idx);
+}
+
+static int intel_dp_link_config_lane_count(const struct intel_dp_link_config *lc)
+{
+ return 1 << lc->lane_count_exp;
+}
+
+static int intel_dp_link_config_bw(struct intel_dp *intel_dp,
+ const struct intel_dp_link_config *lc)
+{
+ return drm_dp_max_dprx_data_rate(intel_dp_link_config_rate(intel_dp, lc),
+ intel_dp_link_config_lane_count(lc));
+}
+
+static int link_config_cmp_by_bw(const void *a, const void *b, const void *p)
+{
+ struct intel_dp *intel_dp = (struct intel_dp *)p; /* remove const */
+ const struct intel_dp_link_config *lc_a = a;
+ const struct intel_dp_link_config *lc_b = b;
+ int bw_a = intel_dp_link_config_bw(intel_dp, lc_a);
+ int bw_b = intel_dp_link_config_bw(intel_dp, lc_b);
+
+ if (bw_a != bw_b)
+ return bw_a - bw_b;
+
+ return intel_dp_link_config_rate(intel_dp, lc_a) -
+ intel_dp_link_config_rate(intel_dp, lc_b);
+}
+
+static void intel_dp_link_config_init(struct intel_dp *intel_dp)
+{
+ struct drm_i915_private *i915 = dp_to_i915(intel_dp);
+ struct intel_dp_link_config *lc;
+ int num_common_lane_configs;
+ int i;
+ int j;
+
+ if (drm_WARN_ON(&i915->drm, !is_power_of_2(intel_dp_max_common_lane_count(intel_dp))))
+ return;
+
+ num_common_lane_configs = ilog2(intel_dp_max_common_lane_count(intel_dp)) + 1;
+
+ if (drm_WARN_ON(&i915->drm, intel_dp->num_common_rates * num_common_lane_configs >
+ ARRAY_SIZE(intel_dp->link.configs)))
+ return;
+
+ intel_dp->link.num_configs = intel_dp->num_common_rates * num_common_lane_configs;
+
+ lc = &intel_dp->link.configs[0];
+ for (i = 0; i < intel_dp->num_common_rates; i++) {
+ for (j = 0; j < num_common_lane_configs; j++) {
+ lc->lane_count_exp = j;
+ lc->link_rate_idx = i;
+
+ lc++;
+ }
+ }
+
+ sort_r(intel_dp->link.configs, intel_dp->link.num_configs,
+ sizeof(intel_dp->link.configs[0]),
+ link_config_cmp_by_bw, NULL,
+ intel_dp);
+}
+
+void intel_dp_link_config_get(struct intel_dp *intel_dp, int idx, int *link_rate, int *lane_count)
+{
+ struct drm_i915_private *i915 = dp_to_i915(intel_dp);
+ const struct intel_dp_link_config *lc;
+
+ if (drm_WARN_ON(&i915->drm, idx < 0 || idx >= intel_dp->link.num_configs))
+ idx = 0;
+
+ lc = &intel_dp->link.configs[idx];
+
+ *link_rate = intel_dp_link_config_rate(intel_dp, lc);
+ *lane_count = intel_dp_link_config_lane_count(lc);
+}
+
+int intel_dp_link_config_index(struct intel_dp *intel_dp, int link_rate, int lane_count)
+{
+ int link_rate_idx = intel_dp_rate_index(intel_dp->common_rates, intel_dp->num_common_rates,
+ link_rate);
+ int lane_count_exp = ilog2(lane_count);
+ int i;
+
+ for (i = 0; i < intel_dp->link.num_configs; i++) {
+ const struct intel_dp_link_config *lc = &intel_dp->link.configs[i];
+
+ if (lc->lane_count_exp == lane_count_exp &&
+ lc->link_rate_idx == link_rate_idx)
+ return i;
+ }
+
+ return -1;
+}
+
static void intel_dp_set_common_rates(struct intel_dp *intel_dp)
{
struct drm_i915_private *i915 = dp_to_i915(intel_dp);
@@ -661,6 +766,8 @@ static void intel_dp_set_common_rates(struct intel_dp *intel_dp)
intel_dp->common_rates[0] = 162000;
intel_dp->num_common_rates = 1;
}
+
+ intel_dp_link_config_init(intel_dp);
}
static bool intel_dp_link_params_valid(struct intel_dp *intel_dp, int link_rate,
@@ -1599,8 +1706,8 @@ intel_dp_compute_link_config_wide(struct intel_dp *intel_dp,
int bpp, i, lane_count, clock = intel_dp_mode_clock(pipe_config, conn_state);
int mode_rate, link_rate, link_avail;
- for (bpp = to_bpp_int(limits->link.max_bpp_x16);
- bpp >= to_bpp_int(limits->link.min_bpp_x16);
+ for (bpp = fxp_q4_to_int(limits->link.max_bpp_x16);
+ bpp >= fxp_q4_to_int(limits->link.min_bpp_x16);
bpp -= 2 * 3) {
int link_bpp = intel_dp_output_bpp(pipe_config->output_format, bpp);
@@ -1928,7 +2035,7 @@ icl_dsc_compute_link_config(struct intel_dp *intel_dp,
timeslots);
if (ret == 0) {
pipe_config->dsc.compressed_bpp_x16 =
- to_bpp_x16(valid_dsc_bpp[i]);
+ fxp_q4_from_int(valid_dsc_bpp[i]);
return 0;
}
}
@@ -1971,7 +2078,7 @@ xelpd_dsc_compute_link_config(struct intel_dp *intel_dp,
compressed_bppx16 >= dsc_min_bpp;
compressed_bppx16 -= bppx16_step) {
if (intel_dp->force_dsc_fractional_bpp_en &&
- !to_bpp_frac(compressed_bppx16))
+ !fxp_q4_to_frac(compressed_bppx16))
continue;
ret = dsc_compute_link_config(intel_dp,
pipe_config,
@@ -1981,7 +2088,7 @@ xelpd_dsc_compute_link_config(struct intel_dp *intel_dp,
if (ret == 0) {
pipe_config->dsc.compressed_bpp_x16 = compressed_bppx16;
if (intel_dp->force_dsc_fractional_bpp_en &&
- to_bpp_frac(compressed_bppx16))
+ fxp_q4_to_frac(compressed_bppx16))
drm_dbg_kms(&i915->drm, "Forcing DSC fractional bpp\n");
return 0;
@@ -2006,7 +2113,7 @@ static int dsc_compute_compressed_bpp(struct intel_dp *intel_dp,
dsc_src_min_bpp = dsc_src_min_compressed_bpp();
dsc_sink_min_bpp = intel_dp_dsc_sink_min_compressed_bpp(pipe_config);
dsc_min_bpp = max(dsc_src_min_bpp, dsc_sink_min_bpp);
- dsc_min_bpp = max(dsc_min_bpp, to_bpp_int_roundup(limits->link.min_bpp_x16));
+ dsc_min_bpp = max(dsc_min_bpp, fxp_q4_to_int_roundup(limits->link.min_bpp_x16));
dsc_src_max_bpp = dsc_src_max_compressed_bpp(intel_dp);
dsc_sink_max_bpp = intel_dp_dsc_sink_max_compressed_bpp(connector,
@@ -2018,7 +2125,7 @@ static int dsc_compute_compressed_bpp(struct intel_dp *intel_dp,
adjusted_mode->hdisplay,
pipe_config->joiner_pipes);
dsc_max_bpp = min(dsc_max_bpp, dsc_joiner_max_bpp);
- dsc_max_bpp = min(dsc_max_bpp, to_bpp_int(limits->link.max_bpp_x16));
+ dsc_max_bpp = min(dsc_max_bpp, fxp_q4_to_int(limits->link.max_bpp_x16));
if (DISPLAY_VER(i915) >= 13)
return xelpd_dsc_compute_link_config(intel_dp, connector, pipe_config, limits,
@@ -2168,20 +2275,20 @@ static int intel_edp_dsc_compute_pipe_bpp(struct intel_dp *intel_dp,
dsc_src_min_bpp = dsc_src_min_compressed_bpp();
dsc_sink_min_bpp = intel_dp_dsc_sink_min_compressed_bpp(pipe_config);
dsc_min_bpp = max(dsc_src_min_bpp, dsc_sink_min_bpp);
- dsc_min_bpp = max(dsc_min_bpp, to_bpp_int_roundup(limits->link.min_bpp_x16));
+ dsc_min_bpp = max(dsc_min_bpp, fxp_q4_to_int_roundup(limits->link.min_bpp_x16));
dsc_src_max_bpp = dsc_src_max_compressed_bpp(intel_dp);
dsc_sink_max_bpp = intel_dp_dsc_sink_max_compressed_bpp(connector,
pipe_config,
pipe_bpp / 3);
dsc_max_bpp = dsc_sink_max_bpp ? min(dsc_sink_max_bpp, dsc_src_max_bpp) : dsc_src_max_bpp;
- dsc_max_bpp = min(dsc_max_bpp, to_bpp_int(limits->link.max_bpp_x16));
+ dsc_max_bpp = min(dsc_max_bpp, fxp_q4_to_int(limits->link.max_bpp_x16));
/* Compressed BPP should be less than the Input DSC bpp */
dsc_max_bpp = min(dsc_max_bpp, pipe_bpp - 1);
pipe_config->dsc.compressed_bpp_x16 =
- to_bpp_x16(max(dsc_min_bpp, dsc_max_bpp));
+ fxp_q4_from_int(max(dsc_min_bpp, dsc_max_bpp));
pipe_config->pipe_bpp = pipe_bpp;
@@ -2271,17 +2378,17 @@ int intel_dp_dsc_compute_config(struct intel_dp *intel_dp,
if (ret < 0) {
drm_dbg_kms(&dev_priv->drm,
"Cannot compute valid DSC parameters for Input Bpp = %d"
- "Compressed BPP = " BPP_X16_FMT "\n",
+ "Compressed BPP = " FXP_Q4_FMT "\n",
pipe_config->pipe_bpp,
- BPP_X16_ARGS(pipe_config->dsc.compressed_bpp_x16));
+ FXP_Q4_ARGS(pipe_config->dsc.compressed_bpp_x16));
return ret;
}
pipe_config->dsc.compression_enable = true;
drm_dbg_kms(&dev_priv->drm, "DP DSC computed with Input Bpp = %d "
- "Compressed Bpp = " BPP_X16_FMT " Slice Count = %d\n",
+ "Compressed Bpp = " FXP_Q4_FMT " Slice Count = %d\n",
pipe_config->pipe_bpp,
- BPP_X16_ARGS(pipe_config->dsc.compressed_bpp_x16),
+ FXP_Q4_ARGS(pipe_config->dsc.compressed_bpp_x16),
pipe_config->dsc.slice_count);
return 0;
@@ -2313,15 +2420,15 @@ intel_dp_compute_config_link_bpp_limits(struct intel_dp *intel_dp,
int max_link_bpp_x16;
max_link_bpp_x16 = min(crtc_state->max_link_bpp_x16,
- to_bpp_x16(limits->pipe.max_bpp));
+ fxp_q4_from_int(limits->pipe.max_bpp));
if (!dsc) {
- max_link_bpp_x16 = rounddown(max_link_bpp_x16, to_bpp_x16(2 * 3));
+ max_link_bpp_x16 = rounddown(max_link_bpp_x16, fxp_q4_from_int(2 * 3));
- if (max_link_bpp_x16 < to_bpp_x16(limits->pipe.min_bpp))
+ if (max_link_bpp_x16 < fxp_q4_from_int(limits->pipe.min_bpp))
return false;
- limits->link.min_bpp_x16 = to_bpp_x16(limits->pipe.min_bpp);
+ limits->link.min_bpp_x16 = fxp_q4_from_int(limits->pipe.min_bpp);
} else {
/*
* TODO: set the DSC link limits already here, atm these are
@@ -2334,7 +2441,7 @@ intel_dp_compute_config_link_bpp_limits(struct intel_dp *intel_dp,
limits->link.max_bpp_x16 = max_link_bpp_x16;
drm_dbg_kms(&i915->drm,
- "[ENCODER:%d:%s][CRTC:%d:%s] DP link limits: pixel clock %d kHz DSC %s max lanes %d max rate %d max pipe_bpp %d max link_bpp " BPP_X16_FMT "\n",
+ "[ENCODER:%d:%s][CRTC:%d:%s] DP link limits: pixel clock %d kHz DSC %s max lanes %d max rate %d max pipe_bpp %d max link_bpp " FXP_Q4_FMT "\n",
encoder->base.base.id, encoder->base.name,
crtc->base.base.id, crtc->base.name,
adjusted_mode->crtc_clock,
@@ -2342,7 +2449,7 @@ intel_dp_compute_config_link_bpp_limits(struct intel_dp *intel_dp,
limits->max_lane_count,
limits->max_rate,
limits->pipe.max_bpp,
- BPP_X16_ARGS(limits->link.max_bpp_x16));
+ FXP_Q4_ARGS(limits->link.max_bpp_x16));
return true;
}
@@ -2394,7 +2501,7 @@ int intel_dp_config_required_rate(const struct intel_crtc_state *crtc_state)
const struct drm_display_mode *adjusted_mode =
&crtc_state->hw.adjusted_mode;
int bpp = crtc_state->dsc.compression_enable ?
- to_bpp_int_roundup(crtc_state->dsc.compressed_bpp_x16) :
+ fxp_q4_to_int_roundup(crtc_state->dsc.compressed_bpp_x16) :
crtc_state->pipe_bpp;
return intel_dp_link_required(adjusted_mode->crtc_clock, bpp);
@@ -2473,10 +2580,10 @@ intel_dp_compute_link_config(struct intel_encoder *encoder,
}
drm_dbg_kms(&i915->drm,
- "DP lane count %d clock %d bpp input %d compressed " BPP_X16_FMT " link rate required %d available %d\n",
+ "DP lane count %d clock %d bpp input %d compressed " FXP_Q4_FMT " link rate required %d available %d\n",
pipe_config->lane_count, pipe_config->port_clock,
pipe_config->pipe_bpp,
- BPP_X16_ARGS(pipe_config->dsc.compressed_bpp_x16),
+ FXP_Q4_ARGS(pipe_config->dsc.compressed_bpp_x16),
intel_dp_config_required_rate(pipe_config),
intel_dp_max_link_data_rate(intel_dp,
pipe_config->port_clock,
@@ -2626,8 +2733,7 @@ static void intel_dp_compute_as_sdp(struct intel_dp *intel_dp,
const struct drm_display_mode *adjusted_mode =
&crtc_state->hw.adjusted_mode;
- if (!crtc_state->vrr.enable ||
- !intel_dp_as_sdp_supported(intel_dp))
+ if (!crtc_state->vrr.enable || !intel_dp->as_sdp_supported)
return;
crtc_state->infoframes.enable |= intel_hdmi_infoframe_enable(DP_SDP_ADAPTIVE_SYNC);
@@ -2876,7 +2982,6 @@ static void intel_dp_queue_modeset_retry_work(struct intel_connector *connector)
drm_connector_put(&connector->base);
}
-/* NOTE: @state is only valid for MST links and can be %NULL for SST. */
void
intel_dp_queue_modeset_retry_for_link(struct intel_atomic_state *state,
struct intel_encoder *encoder,
@@ -2885,18 +2990,19 @@ intel_dp_queue_modeset_retry_for_link(struct intel_atomic_state *state,
struct intel_connector *connector;
struct intel_digital_connector_state *conn_state;
struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
- struct drm_i915_private *i915 = dp_to_i915(intel_dp);
int i;
+ if (intel_dp->needs_modeset_retry)
+ return;
+
+ intel_dp->needs_modeset_retry = true;
+
if (!intel_crtc_has_type(crtc_state, INTEL_OUTPUT_DP_MST)) {
intel_dp_queue_modeset_retry_work(intel_dp->attached_connector);
return;
}
- if (drm_WARN_ON(&i915->drm, !state))
- return;
-
for_each_new_intel_connector_in_state(state, connector, conn_state, i) {
if (!conn_state->base.crtc)
continue;
@@ -2968,8 +3074,8 @@ intel_dp_compute_config(struct intel_encoder *encoder,
if (pipe_config->dsc.compression_enable)
link_bpp_x16 = pipe_config->dsc.compressed_bpp_x16;
else
- link_bpp_x16 = to_bpp_x16(intel_dp_output_bpp(pipe_config->output_format,
- pipe_config->pipe_bpp));
+ link_bpp_x16 = fxp_q4_from_int(intel_dp_output_bpp(pipe_config->output_format,
+ pipe_config->pipe_bpp));
if (intel_dp->mso_link_count) {
int n = intel_dp->mso_link_count;
@@ -3024,6 +3130,7 @@ void intel_dp_set_link_params(struct intel_dp *intel_dp,
{
memset(intel_dp->train_set, 0, sizeof(intel_dp->train_set));
intel_dp->link_trained = false;
+ intel_dp->needs_modeset_retry = false;
intel_dp->link_rate = link_rate;
intel_dp->lane_count = lane_count;
}
@@ -3032,6 +3139,8 @@ void intel_dp_reset_link_params(struct intel_dp *intel_dp)
{
intel_dp->link.max_lane_count = intel_dp_max_common_lane_count(intel_dp);
intel_dp->link.max_rate = intel_dp_max_common_rate(intel_dp);
+ intel_dp->link.mst_probed_lane_count = 0;
+ intel_dp->link.mst_probed_rate = 0;
intel_dp->link.retrain_disabled = false;
intel_dp->link.seq_train_failures = 0;
}
@@ -3367,8 +3476,11 @@ void intel_dp_sync_state(struct intel_encoder *encoder,
intel_dp_tunnel_resume(intel_dp, crtc_state, dpcd_updated);
- if (crtc_state)
+ if (crtc_state) {
intel_dp_reset_link_params(intel_dp);
+ intel_dp_set_link_params(intel_dp, crtc_state->port_clock, crtc_state->lane_count);
+ intel_dp->link_trained = true;
+ }
}
bool intel_dp_initial_fastset_check(struct intel_encoder *encoder,
@@ -3435,7 +3547,7 @@ static void intel_dp_get_pcon_dsc_cap(struct intel_dp *intel_dp)
static int intel_dp_pcon_get_frl_mask(u8 frl_bw_mask)
{
- int bw_gbps[] = {9, 18, 24, 32, 40, 48};
+ static const int bw_gbps[] = {9, 18, 24, 32, 40, 48};
int i;
for (i = ARRAY_SIZE(bw_gbps) - 1; i >= 0; i--) {
@@ -3955,6 +4067,9 @@ intel_edp_init_dpcd(struct intel_dp *intel_dp, struct intel_connector *connector
drm_dp_is_branch(intel_dp->dpcd));
intel_init_dpcd_quirks(intel_dp, &intel_dp->desc.ident);
+ intel_dp->colorimetry_support =
+ intel_dp_get_colorimetry_status(intel_dp);
+
/*
* Read the eDP display control registers.
*
@@ -4068,6 +4183,9 @@ intel_dp_get_dpcd(struct intel_dp *intel_dp)
intel_init_dpcd_quirks(intel_dp, &intel_dp->desc.ident);
+ intel_dp->colorimetry_support =
+ intel_dp_get_colorimetry_status(intel_dp);
+
intel_dp_update_sink_caps(intel_dp);
}
@@ -4158,6 +4276,9 @@ intel_dp_mst_configure(struct intel_dp *intel_dp)
intel_dp->is_mst = intel_dp->mst_detect != DRM_DP_SST;
+ if (intel_dp->is_mst)
+ intel_dp_mst_prepare_probe(intel_dp);
+
drm_dp_mst_topology_mgr_set_mst(&intel_dp->mst_mgr, intel_dp->is_mst);
/* Avoid stale info on the next detect cycle. */
@@ -4387,8 +4508,11 @@ void intel_dp_set_infoframes(struct intel_encoder *encoder,
if (!enable && HAS_DSC(dev_priv))
val &= ~VDIP_ENABLE_PPS;
- /* When PSR is enabled, this routine doesn't disable VSC DIP */
- if (!crtc_state->has_psr)
+ /*
+ * This routine disables VSC DIP if the function is called
+ * to disable SDP or if it does not have PSR
+ */
+ if (!enable || !crtc_state->has_psr)
val &= ~VIDEO_DIP_ENABLE_VSC_HSW;
intel_de_write(dev_priv, reg, val);
@@ -5081,7 +5205,7 @@ intel_dp_check_mst_status(struct intel_dp *intel_dp)
ack[3] |= DP_TUNNELING_IRQ;
}
- if (!memchr_inv(ack, 0, sizeof(ack)))
+ if (mem_is_zero(ack, sizeof(ack)))
break;
if (!intel_dp_ack_sink_irq_esi(intel_dp, ack))
@@ -5255,8 +5379,6 @@ static int intel_dp_retrain_link(struct intel_encoder *encoder,
{
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
- struct intel_crtc *crtc;
- bool mst_output = false;
u8 pipe_mask;
int ret;
@@ -5285,78 +5407,28 @@ static int intel_dp_retrain_link(struct intel_encoder *encoder,
encoder->base.base.id, encoder->base.name,
str_yes_no(intel_dp->link.force_retrain));
- for_each_intel_crtc_in_pipe_mask(&dev_priv->drm, crtc, pipe_mask) {
- const struct intel_crtc_state *crtc_state =
- to_intel_crtc_state(crtc->base.state);
-
- if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_DP_MST)) {
- mst_output = true;
- break;
- }
-
- /* Suppress underruns caused by re-training */
- intel_set_cpu_fifo_underrun_reporting(dev_priv, crtc->pipe, false);
- if (crtc_state->has_pch_encoder)
- intel_set_pch_fifo_underrun_reporting(dev_priv,
- intel_crtc_pch_transcoder(crtc), false);
- }
-
- /* TODO: use a modeset for SST as well. */
- if (mst_output) {
- ret = intel_modeset_commit_pipes(dev_priv, pipe_mask, ctx);
-
- if (ret && ret != -EDEADLK)
- drm_dbg_kms(&dev_priv->drm,
- "[ENCODER:%d:%s] link retraining failed: %pe\n",
- encoder->base.base.id, encoder->base.name,
- ERR_PTR(ret));
-
- goto out;
- }
-
- for_each_intel_crtc_in_pipe_mask(&dev_priv->drm, crtc, pipe_mask) {
- const struct intel_crtc_state *crtc_state =
- to_intel_crtc_state(crtc->base.state);
-
- intel_dp->link_trained = false;
-
- intel_dp_check_frl_training(intel_dp);
- intel_dp_pcon_dsc_configure(intel_dp, crtc_state);
- intel_dp_start_link_train(NULL, intel_dp, crtc_state);
- intel_dp_stop_link_train(intel_dp, crtc_state);
- break;
- }
-
- for_each_intel_crtc_in_pipe_mask(&dev_priv->drm, crtc, pipe_mask) {
- const struct intel_crtc_state *crtc_state =
- to_intel_crtc_state(crtc->base.state);
-
- /* Keep underrun reporting disabled until things are stable */
- intel_crtc_wait_for_next_vblank(crtc);
+ ret = intel_modeset_commit_pipes(dev_priv, pipe_mask, ctx);
+ if (ret == -EDEADLK)
+ return ret;
- intel_set_cpu_fifo_underrun_reporting(dev_priv, crtc->pipe, true);
- if (crtc_state->has_pch_encoder)
- intel_set_pch_fifo_underrun_reporting(dev_priv,
- intel_crtc_pch_transcoder(crtc), true);
- }
+ intel_dp->link.force_retrain = false;
-out:
- if (ret != -EDEADLK)
- intel_dp->link.force_retrain = false;
+ if (ret)
+ drm_dbg_kms(&dev_priv->drm,
+ "[ENCODER:%d:%s] link retraining failed: %pe\n",
+ encoder->base.base.id, encoder->base.name,
+ ERR_PTR(ret));
return ret;
}
void intel_dp_link_check(struct intel_encoder *encoder)
{
- struct drm_i915_private *i915 = to_i915(encoder->base.dev);
struct drm_modeset_acquire_ctx ctx;
int ret;
intel_modeset_lock_ctx_retry(&ctx, NULL, 0, ret)
ret = intel_dp_retrain_link(encoder, &ctx);
-
- drm_WARN_ON(&i915->drm, ret);
}
void intel_dp_check_link_state(struct intel_dp *intel_dp)
@@ -5906,6 +5978,15 @@ intel_dp_detect_dsc_caps(struct intel_dp *intel_dp, struct intel_connector *conn
connector);
}
+static void
+intel_dp_detect_sdp_caps(struct intel_dp *intel_dp)
+{
+ struct drm_i915_private *i915 = dp_to_i915(intel_dp);
+
+ intel_dp->as_sdp_supported = HAS_AS_SDP(i915) &&
+ drm_dp_as_sdp_supported(&intel_dp->aux, intel_dp->dpcd);
+}
+
static int
intel_dp_detect(struct drm_connector *connector,
struct drm_modeset_acquire_ctx *ctx,
@@ -5976,13 +6057,15 @@ intel_dp_detect(struct drm_connector *connector,
intel_dp_detect_dsc_caps(intel_dp, intel_connector);
- intel_dp_mst_configure(intel_dp);
+ intel_dp_detect_sdp_caps(intel_dp);
if (intel_dp->reset_link_params) {
intel_dp_reset_link_params(intel_dp);
intel_dp->reset_link_params = false;
}
+ intel_dp_mst_configure(intel_dp);
+
intel_dp_print_rates(intel_dp);
if (intel_dp->is_mst) {
@@ -6453,8 +6536,9 @@ static bool _intel_dp_is_port_edp(struct drm_i915_private *dev_priv,
bool intel_dp_is_port_edp(struct drm_i915_private *i915, enum port port)
{
+ struct intel_display *display = &i915->display;
const struct intel_bios_encoder_data *devdata =
- intel_bios_encoder_data_lookup(i915, port);
+ intel_bios_encoder_data_lookup(display, port);
return _intel_dp_is_port_edp(i915, devdata, port);
}
@@ -6557,6 +6641,7 @@ static void intel_edp_backlight_setup(struct intel_dp *intel_dp,
static bool intel_edp_init_connector(struct intel_dp *intel_dp,
struct intel_connector *intel_connector)
{
+ struct intel_display *display = to_intel_display(intel_dp);
struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
struct drm_connector *connector = &intel_connector->base;
struct drm_display_mode *fixed_mode;
@@ -6582,7 +6667,7 @@ static bool intel_edp_init_connector(struct intel_dp *intel_dp,
return false;
}
- intel_bios_init_panel_early(dev_priv, &intel_connector->panel,
+ intel_bios_init_panel_early(display, &intel_connector->panel,
encoder->devdata);
if (!intel_pps_init(intel_dp)) {
@@ -6679,7 +6764,7 @@ static bool intel_edp_init_connector(struct intel_dp *intel_dp,
drm_edid = ERR_PTR(-ENOENT);
}
- intel_bios_init_panel_late(dev_priv, &intel_connector->panel, encoder->devdata,
+ intel_bios_init_panel_late(display, &intel_connector->panel, encoder->devdata,
IS_ERR(drm_edid) ? NULL : drm_edid);
intel_panel_add_edid_fixed_modes(intel_connector, true);
@@ -6852,9 +6937,6 @@ intel_dp_init_connector(struct intel_digital_port *dig_port,
"HDCP init failed, skipping.\n");
}
- intel_dp->colorimetry_support =
- intel_dp_get_colorimetry_status(intel_dp);
-
intel_dp->frl.is_trained = false;
intel_dp->frl.trained_rate_gbps = 0;
diff --git a/drivers/gpu/drm/i915/display/intel_dp.h b/drivers/gpu/drm/i915/display/intel_dp.h
index a0f990a95ecc..1b9aaddd8c35 100644
--- a/drivers/gpu/drm/i915/display/intel_dp.h
+++ b/drivers/gpu/drm/i915/display/intel_dp.h
@@ -85,7 +85,6 @@ void intel_dp_audio_compute_config(struct intel_encoder *encoder,
struct drm_connector_state *conn_state);
bool intel_dp_has_hdmi_sink(struct intel_dp *intel_dp);
bool intel_dp_is_edp(struct intel_dp *intel_dp);
-bool intel_dp_as_sdp_supported(struct intel_dp *intel_dp);
bool intel_dp_is_uhbr(const struct intel_crtc_state *crtc_state);
bool intel_dp_has_dsc(const struct intel_connector *connector);
int intel_dp_link_symbol_size(int rate);
@@ -108,6 +107,8 @@ int intel_dp_max_common_rate(struct intel_dp *intel_dp);
int intel_dp_max_common_lane_count(struct intel_dp *intel_dp);
int intel_dp_common_rate(struct intel_dp *intel_dp, int index);
int intel_dp_rate_index(const int *rates, int len, int rate);
+int intel_dp_link_config_index(struct intel_dp *intel_dp, int link_rate, int lane_count);
+void intel_dp_link_config_get(struct intel_dp *intel_dp, int idx, int *link_rate, int *lane_count);
void intel_dp_update_sink_caps(struct intel_dp *intel_dp);
void intel_dp_reset_link_params(struct intel_dp *intel_dp);
diff --git a/drivers/gpu/drm/i915/display/intel_dp_aux.c b/drivers/gpu/drm/i915/display/intel_dp_aux.c
index be58185a77c0..04a7acd7f73c 100644
--- a/drivers/gpu/drm/i915/display/intel_dp_aux.c
+++ b/drivers/gpu/drm/i915/display/intel_dp_aux.c
@@ -18,12 +18,12 @@
#define AUX_CH_NAME_BUFSIZE 6
-static const char *aux_ch_name(struct drm_i915_private *i915,
+static const char *aux_ch_name(struct intel_display *display,
char *buf, int size, enum aux_ch aux_ch)
{
- if (DISPLAY_VER(i915) >= 13 && aux_ch >= AUX_CH_D_XELPD)
+ if (DISPLAY_VER(display) >= 13 && aux_ch >= AUX_CH_D_XELPD)
snprintf(buf, size, "%c", 'A' + aux_ch - AUX_CH_D_XELPD + AUX_CH_D);
- else if (DISPLAY_VER(i915) >= 12 && aux_ch >= AUX_CH_USBC1)
+ else if (DISPLAY_VER(display) >= 12 && aux_ch >= AUX_CH_USBC1)
snprintf(buf, size, "USBC%c", '1' + aux_ch - AUX_CH_USBC1);
else
snprintf(buf, size, "%c", 'A' + aux_ch);
@@ -56,17 +56,18 @@ static void intel_dp_aux_unpack(u32 src, u8 *dst, int dst_bytes)
static u32
intel_dp_aux_wait_done(struct intel_dp *intel_dp)
{
- struct drm_i915_private *i915 = dp_to_i915(intel_dp);
+ struct intel_display *display = to_intel_display(intel_dp);
i915_reg_t ch_ctl = intel_dp->aux_ch_ctl_reg(intel_dp);
const unsigned int timeout_ms = 10;
u32 status;
int ret;
- ret = intel_de_wait_custom(i915, ch_ctl, DP_AUX_CH_CTL_SEND_BUSY, 0,
+ ret = intel_de_wait_custom(display, ch_ctl, DP_AUX_CH_CTL_SEND_BUSY,
+ 0,
2, timeout_ms, &status);
if (ret == -ETIMEDOUT)
- drm_err(&i915->drm,
+ drm_err(display->drm,
"%s: did not complete or timeout within %ums (status 0x%08x)\n",
intel_dp->aux.name, timeout_ms, status);
@@ -75,7 +76,7 @@ intel_dp_aux_wait_done(struct intel_dp *intel_dp)
static u32 g4x_get_aux_clock_divider(struct intel_dp *intel_dp, int index)
{
- struct drm_i915_private *i915 = dp_to_i915(intel_dp);
+ struct intel_display *display = to_intel_display(intel_dp);
if (index)
return 0;
@@ -84,12 +85,12 @@ static u32 g4x_get_aux_clock_divider(struct intel_dp *intel_dp, int index)
* The clock divider is based off the hrawclk, and would like to run at
* 2MHz. So, take the hrawclk value and divide by 2000 and use that
*/
- return DIV_ROUND_CLOSEST(RUNTIME_INFO(i915)->rawclk_freq, 2000);
+ return DIV_ROUND_CLOSEST(DISPLAY_RUNTIME_INFO(display)->rawclk_freq, 2000);
}
static u32 ilk_get_aux_clock_divider(struct intel_dp *intel_dp, int index)
{
- struct drm_i915_private *i915 = dp_to_i915(intel_dp);
+ struct intel_display *display = to_intel_display(intel_dp);
struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
u32 freq;
@@ -102,15 +103,16 @@ static u32 ilk_get_aux_clock_divider(struct intel_dp *intel_dp, int index)
* divide by 2000 and use that
*/
if (dig_port->aux_ch == AUX_CH_A)
- freq = i915->display.cdclk.hw.cdclk;
+ freq = display->cdclk.hw.cdclk;
else
- freq = RUNTIME_INFO(i915)->rawclk_freq;
+ freq = DISPLAY_RUNTIME_INFO(display)->rawclk_freq;
return DIV_ROUND_CLOSEST(freq, 2000);
}
static u32 hsw_get_aux_clock_divider(struct intel_dp *intel_dp, int index)
{
- struct drm_i915_private *i915 = dp_to_i915(intel_dp);
+ struct intel_display *display = to_intel_display(intel_dp);
+ struct drm_i915_private *i915 = to_i915(display->drm);
struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
if (dig_port->aux_ch != AUX_CH_A && HAS_PCH_LPT_H(i915)) {
@@ -201,8 +203,8 @@ static u32 skl_get_aux_send_ctl(struct intel_dp *intel_dp,
int send_bytes,
u32 unused)
{
+ struct intel_display *display = to_intel_display(intel_dp);
struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
- struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev);
u32 ret;
/*
@@ -227,7 +229,7 @@ static u32 skl_get_aux_send_ctl(struct intel_dp *intel_dp,
* Power request bit is already set during aux power well enable.
* Preserve the bit across aux transactions.
*/
- if (DISPLAY_VER(i915) >= 14)
+ if (DISPLAY_VER(display) >= 14)
ret |= XELPDP_DP_AUX_CH_CTL_POWER_REQUEST;
return ret;
@@ -239,6 +241,7 @@ intel_dp_aux_xfer(struct intel_dp *intel_dp,
u8 *recv, int recv_size,
u32 aux_send_ctl_flags)
{
+ struct intel_display *display = to_intel_display(intel_dp);
struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
struct intel_encoder *encoder = &dig_port->base;
struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev);
@@ -297,7 +300,7 @@ intel_dp_aux_xfer(struct intel_dp *intel_dp,
/* Try to wait for any previous AUX channel activity */
for (try = 0; try < 3; try++) {
- status = intel_de_read_notrace(i915, ch_ctl);
+ status = intel_de_read_notrace(display, ch_ctl);
if ((status & DP_AUX_CH_CTL_SEND_BUSY) == 0)
break;
msleep(1);
@@ -306,10 +309,10 @@ intel_dp_aux_xfer(struct intel_dp *intel_dp,
trace_i915_reg_rw(false, ch_ctl, status, sizeof(status), true);
if (try == 3) {
- const u32 status = intel_de_read(i915, ch_ctl);
+ const u32 status = intel_de_read(display, ch_ctl);
if (status != intel_dp->aux_busy_last_status) {
- drm_WARN(&i915->drm, 1,
+ drm_WARN(display->drm, 1,
"%s: not started (status 0x%08x)\n",
intel_dp->aux.name, status);
intel_dp->aux_busy_last_status = status;
@@ -320,7 +323,7 @@ intel_dp_aux_xfer(struct intel_dp *intel_dp,
}
/* Only 5 data registers! */
- if (drm_WARN_ON(&i915->drm, send_bytes > 20 || recv_size > 20)) {
+ if (drm_WARN_ON(display->drm, send_bytes > 20 || recv_size > 20)) {
ret = -E2BIG;
goto out;
}
@@ -336,17 +339,17 @@ intel_dp_aux_xfer(struct intel_dp *intel_dp,
for (try = 0; try < 5; try++) {
/* Load the send data into the aux channel data registers */
for (i = 0; i < send_bytes; i += 4)
- intel_de_write(i915, ch_data[i >> 2],
+ intel_de_write(display, ch_data[i >> 2],
intel_dp_aux_pack(send + i,
send_bytes - i));
/* Send the command and wait for it to complete */
- intel_de_write(i915, ch_ctl, send_ctl);
+ intel_de_write(display, ch_ctl, send_ctl);
status = intel_dp_aux_wait_done(intel_dp);
/* Clear done status and any errors */
- intel_de_write(i915, ch_ctl,
+ intel_de_write(display, ch_ctl,
status | DP_AUX_CH_CTL_DONE |
DP_AUX_CH_CTL_TIME_OUT_ERROR |
DP_AUX_CH_CTL_RECEIVE_ERROR);
@@ -370,7 +373,7 @@ intel_dp_aux_xfer(struct intel_dp *intel_dp,
}
if ((status & DP_AUX_CH_CTL_DONE) == 0) {
- drm_err(&i915->drm, "%s: not done (status 0x%08x)\n",
+ drm_err(display->drm, "%s: not done (status 0x%08x)\n",
intel_dp->aux.name, status);
ret = -EBUSY;
goto out;
@@ -382,7 +385,7 @@ done:
* not connected.
*/
if (status & DP_AUX_CH_CTL_RECEIVE_ERROR) {
- drm_err(&i915->drm, "%s: receive error (status 0x%08x)\n",
+ drm_err(display->drm, "%s: receive error (status 0x%08x)\n",
intel_dp->aux.name, status);
ret = -EIO;
goto out;
@@ -393,7 +396,7 @@ done:
* -- don't fill the kernel log with these
*/
if (status & DP_AUX_CH_CTL_TIME_OUT_ERROR) {
- drm_dbg_kms(&i915->drm, "%s: timeout (status 0x%08x)\n",
+ drm_dbg_kms(display->drm, "%s: timeout (status 0x%08x)\n",
intel_dp->aux.name, status);
ret = -ETIMEDOUT;
goto out;
@@ -408,7 +411,7 @@ done:
* drm layer takes care for the necessary retries.
*/
if (recv_bytes == 0 || recv_bytes > 20) {
- drm_dbg_kms(&i915->drm,
+ drm_dbg_kms(display->drm,
"%s: Forbidden recv_bytes = %d on aux transaction\n",
intel_dp->aux.name, recv_bytes);
ret = -EBUSY;
@@ -419,7 +422,7 @@ done:
recv_bytes = recv_size;
for (i = 0; i < recv_bytes; i += 4)
- intel_dp_aux_unpack(intel_de_read(i915, ch_data[i >> 2]),
+ intel_dp_aux_unpack(intel_de_read(display, ch_data[i >> 2]),
recv + i, recv_bytes - i);
ret = recv_bytes;
@@ -468,7 +471,7 @@ static ssize_t
intel_dp_aux_transfer(struct drm_dp_aux *aux, struct drm_dp_aux_msg *msg)
{
struct intel_dp *intel_dp = container_of(aux, struct intel_dp, aux);
- struct drm_i915_private *i915 = dp_to_i915(intel_dp);
+ struct intel_display *display = to_intel_display(intel_dp);
u8 txbuf[20], rxbuf[20];
size_t txsize, rxsize;
u32 flags = intel_dp_aux_xfer_flags(msg);
@@ -483,10 +486,10 @@ intel_dp_aux_transfer(struct drm_dp_aux *aux, struct drm_dp_aux_msg *msg)
txsize = msg->size ? HEADER_SIZE + msg->size : BARE_ADDRESS_SIZE;
rxsize = 2; /* 0 or 1 data bytes */
- if (drm_WARN_ON(&i915->drm, txsize > 20))
+ if (drm_WARN_ON(display->drm, txsize > 20))
return -E2BIG;
- drm_WARN_ON(&i915->drm, !msg->buffer != !msg->size);
+ drm_WARN_ON(display->drm, !msg->buffer != !msg->size);
if (msg->buffer)
memcpy(txbuf + HEADER_SIZE, msg->buffer, msg->size);
@@ -511,7 +514,7 @@ intel_dp_aux_transfer(struct drm_dp_aux *aux, struct drm_dp_aux_msg *msg)
txsize = msg->size ? HEADER_SIZE : BARE_ADDRESS_SIZE;
rxsize = msg->size + 1;
- if (drm_WARN_ON(&i915->drm, rxsize > 20))
+ if (drm_WARN_ON(display->drm, rxsize > 20))
return -E2BIG;
ret = intel_dp_aux_xfer(intel_dp, txbuf, txsize,
@@ -721,7 +724,7 @@ static i915_reg_t tgl_aux_data_reg(struct intel_dp *intel_dp, int index)
static i915_reg_t xelpdp_aux_ctl_reg(struct intel_dp *intel_dp)
{
- struct drm_i915_private *i915 = dp_to_i915(intel_dp);
+ struct intel_display *display = to_intel_display(intel_dp);
struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
enum aux_ch aux_ch = dig_port->aux_ch;
@@ -732,16 +735,16 @@ static i915_reg_t xelpdp_aux_ctl_reg(struct intel_dp *intel_dp)
case AUX_CH_USBC2:
case AUX_CH_USBC3:
case AUX_CH_USBC4:
- return XELPDP_DP_AUX_CH_CTL(i915, aux_ch);
+ return XELPDP_DP_AUX_CH_CTL(display, aux_ch);
default:
MISSING_CASE(aux_ch);
- return XELPDP_DP_AUX_CH_CTL(i915, AUX_CH_A);
+ return XELPDP_DP_AUX_CH_CTL(display, AUX_CH_A);
}
}
static i915_reg_t xelpdp_aux_data_reg(struct intel_dp *intel_dp, int index)
{
- struct drm_i915_private *i915 = dp_to_i915(intel_dp);
+ struct intel_display *display = to_intel_display(intel_dp);
struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
enum aux_ch aux_ch = dig_port->aux_ch;
@@ -752,10 +755,10 @@ static i915_reg_t xelpdp_aux_data_reg(struct intel_dp *intel_dp, int index)
case AUX_CH_USBC2:
case AUX_CH_USBC3:
case AUX_CH_USBC4:
- return XELPDP_DP_AUX_CH_DATA(i915, aux_ch, index);
+ return XELPDP_DP_AUX_CH_DATA(display, aux_ch, index);
default:
MISSING_CASE(aux_ch);
- return XELPDP_DP_AUX_CH_DATA(i915, AUX_CH_A, index);
+ return XELPDP_DP_AUX_CH_DATA(display, AUX_CH_A, index);
}
}
@@ -769,19 +772,20 @@ void intel_dp_aux_fini(struct intel_dp *intel_dp)
void intel_dp_aux_init(struct intel_dp *intel_dp)
{
- struct drm_i915_private *i915 = dp_to_i915(intel_dp);
+ struct intel_display *display = to_intel_display(intel_dp);
+ struct drm_i915_private *i915 = to_i915(display->drm);
struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
struct intel_encoder *encoder = &dig_port->base;
enum aux_ch aux_ch = dig_port->aux_ch;
char buf[AUX_CH_NAME_BUFSIZE];
- if (DISPLAY_VER(i915) >= 14) {
+ if (DISPLAY_VER(display) >= 14) {
intel_dp->aux_ch_ctl_reg = xelpdp_aux_ctl_reg;
intel_dp->aux_ch_data_reg = xelpdp_aux_data_reg;
- } else if (DISPLAY_VER(i915) >= 12) {
+ } else if (DISPLAY_VER(display) >= 12) {
intel_dp->aux_ch_ctl_reg = tgl_aux_ctl_reg;
intel_dp->aux_ch_data_reg = tgl_aux_data_reg;
- } else if (DISPLAY_VER(i915) >= 9) {
+ } else if (DISPLAY_VER(display) >= 9) {
intel_dp->aux_ch_ctl_reg = skl_aux_ctl_reg;
intel_dp->aux_ch_data_reg = skl_aux_data_reg;
} else if (HAS_PCH_SPLIT(i915)) {
@@ -795,7 +799,7 @@ void intel_dp_aux_init(struct intel_dp *intel_dp)
intel_dp->aux_ch_data_reg = g4x_aux_data_reg;
}
- if (DISPLAY_VER(i915) >= 9)
+ if (DISPLAY_VER(display) >= 9)
intel_dp->get_aux_clock_divider = skl_get_aux_clock_divider;
else if (IS_BROADWELL(i915) || IS_HASWELL(i915))
intel_dp->get_aux_clock_divider = hsw_get_aux_clock_divider;
@@ -804,17 +808,17 @@ void intel_dp_aux_init(struct intel_dp *intel_dp)
else
intel_dp->get_aux_clock_divider = g4x_get_aux_clock_divider;
- if (DISPLAY_VER(i915) >= 9)
+ if (DISPLAY_VER(display) >= 9)
intel_dp->get_aux_send_ctl = skl_get_aux_send_ctl;
else
intel_dp->get_aux_send_ctl = g4x_get_aux_send_ctl;
- intel_dp->aux.drm_dev = &i915->drm;
+ intel_dp->aux.drm_dev = display->drm;
drm_dp_aux_init(&intel_dp->aux);
/* Failure to allocate our preferred name is not critical */
intel_dp->aux.name = kasprintf(GFP_KERNEL, "AUX %s/%s",
- aux_ch_name(i915, buf, sizeof(buf), aux_ch),
+ aux_ch_name(display, buf, sizeof(buf), aux_ch),
encoder->base.name);
intel_dp->aux.transfer = intel_dp_aux_transfer;
@@ -823,10 +827,10 @@ void intel_dp_aux_init(struct intel_dp *intel_dp)
static enum aux_ch default_aux_ch(struct intel_encoder *encoder)
{
- struct drm_i915_private *i915 = to_i915(encoder->base.dev);
+ struct intel_display *display = to_intel_display(encoder);
/* SKL has DDI E but no AUX E */
- if (DISPLAY_VER(i915) == 9 && encoder->port == PORT_E)
+ if (DISPLAY_VER(display) == 9 && encoder->port == PORT_E)
return AUX_CH_A;
return (enum aux_ch)encoder->port;
@@ -836,10 +840,10 @@ static struct intel_encoder *
get_encoder_by_aux_ch(struct intel_encoder *encoder,
enum aux_ch aux_ch)
{
- struct drm_i915_private *i915 = to_i915(encoder->base.dev);
+ struct intel_display *display = to_intel_display(encoder);
struct intel_encoder *other;
- for_each_intel_encoder(&i915->drm, other) {
+ for_each_intel_encoder(display->drm, other) {
if (other == encoder)
continue;
@@ -855,7 +859,7 @@ get_encoder_by_aux_ch(struct intel_encoder *encoder,
enum aux_ch intel_dp_aux_ch(struct intel_encoder *encoder)
{
- struct drm_i915_private *i915 = to_i915(encoder->base.dev);
+ struct intel_display *display = to_intel_display(encoder);
struct intel_encoder *other;
const char *source;
enum aux_ch aux_ch;
@@ -876,23 +880,23 @@ enum aux_ch intel_dp_aux_ch(struct intel_encoder *encoder)
other = get_encoder_by_aux_ch(encoder, aux_ch);
if (other) {
- drm_dbg_kms(&i915->drm,
+ drm_dbg_kms(display->drm,
"[ENCODER:%d:%s] AUX CH %s already claimed by [ENCODER:%d:%s]\n",
encoder->base.base.id, encoder->base.name,
- aux_ch_name(i915, buf, sizeof(buf), aux_ch),
+ aux_ch_name(display, buf, sizeof(buf), aux_ch),
other->base.base.id, other->base.name);
return AUX_CH_NONE;
}
- drm_dbg_kms(&i915->drm,
+ drm_dbg_kms(display->drm,
"[ENCODER:%d:%s] Using AUX CH %s (%s)\n",
encoder->base.base.id, encoder->base.name,
- aux_ch_name(i915, buf, sizeof(buf), aux_ch), source);
+ aux_ch_name(display, buf, sizeof(buf), aux_ch), source);
return aux_ch;
}
-void intel_dp_aux_irq_handler(struct drm_i915_private *i915)
+void intel_dp_aux_irq_handler(struct intel_display *display)
{
- wake_up_all(&i915->display.gmbus.wait_queue);
+ wake_up_all(&display->gmbus.wait_queue);
}
diff --git a/drivers/gpu/drm/i915/display/intel_dp_aux.h b/drivers/gpu/drm/i915/display/intel_dp_aux.h
index 593f58fafab7..90ee1c5fae28 100644
--- a/drivers/gpu/drm/i915/display/intel_dp_aux.h
+++ b/drivers/gpu/drm/i915/display/intel_dp_aux.h
@@ -9,7 +9,7 @@
#include <linux/types.h>
enum aux_ch;
-struct drm_i915_private;
+struct intel_display;
struct intel_dp;
struct intel_encoder;
@@ -18,7 +18,7 @@ void intel_dp_aux_init(struct intel_dp *intel_dp);
enum aux_ch intel_dp_aux_ch(struct intel_encoder *encoder);
-void intel_dp_aux_irq_handler(struct drm_i915_private *i915);
+void intel_dp_aux_irq_handler(struct intel_display *display);
u32 intel_dp_aux_pack(const u8 *src, int src_bytes);
int intel_dp_aux_fw_sync_len(struct intel_dp *intel_dp);
diff --git a/drivers/gpu/drm/i915/display/intel_dp_aux_backlight.c b/drivers/gpu/drm/i915/display/intel_dp_aux_backlight.c
index 8ce60d53dcde..33f72db99b58 100644
--- a/drivers/gpu/drm/i915/display/intel_dp_aux_backlight.c
+++ b/drivers/gpu/drm/i915/display/intel_dp_aux_backlight.c
@@ -109,7 +109,7 @@ static bool is_intel_tcon_cap(const u8 tcon_cap[4])
static bool
intel_dp_aux_supports_hdr_backlight(struct intel_connector *connector)
{
- struct drm_i915_private *i915 = to_i915(connector->base.dev);
+ struct intel_display *display = to_intel_display(connector);
struct intel_dp *intel_dp = enc_to_intel_dp(connector->encoder);
struct drm_dp_aux *aux = &intel_dp->aux;
struct intel_panel *panel = &connector->panel;
@@ -122,7 +122,8 @@ intel_dp_aux_supports_hdr_backlight(struct intel_connector *connector)
if (ret != sizeof(tcon_cap))
return false;
- drm_dbg_kms(&i915->drm, "[CONNECTOR:%d:%s] Detected %s HDR backlight interface version %d\n",
+ drm_dbg_kms(display->drm,
+ "[CONNECTOR:%d:%s] Detected %s HDR backlight interface version %d\n",
connector->base.base.id, connector->base.name,
is_intel_tcon_cap(tcon_cap) ? "Intel" : "unsupported", tcon_cap[0]);
@@ -141,10 +142,10 @@ intel_dp_aux_supports_hdr_backlight(struct intel_connector *connector)
* HDR static metadata we need to start maintaining table of
* ranges for such panels.
*/
- if (i915->display.params.enable_dpcd_backlight != INTEL_DP_AUX_BACKLIGHT_FORCE_INTEL &&
+ if (display->params.enable_dpcd_backlight != INTEL_DP_AUX_BACKLIGHT_FORCE_INTEL &&
!(connector->base.hdr_sink_metadata.hdmi_type1.metadata_type &
BIT(HDMI_STATIC_METADATA_TYPE1))) {
- drm_info(&i915->drm,
+ drm_info(display->drm,
"[CONNECTOR:%d:%s] Panel is missing HDR static metadata. Possible support for Intel HDR backlight interface is not used. If your backlight controls don't work try booting with i915.enable_dpcd_backlight=%d. needs this, please file a _new_ bug report on drm/i915, see " FDO_BUG_URL " for details.\n",
connector->base.base.id, connector->base.name,
INTEL_DP_AUX_BACKLIGHT_FORCE_INTEL);
@@ -170,14 +171,15 @@ intel_dp_aux_supports_hdr_backlight(struct intel_connector *connector)
static u32
intel_dp_aux_hdr_get_backlight(struct intel_connector *connector, enum pipe pipe)
{
- struct drm_i915_private *i915 = to_i915(connector->base.dev);
+ struct intel_display *display = to_intel_display(connector);
struct intel_panel *panel = &connector->panel;
struct intel_dp *intel_dp = enc_to_intel_dp(connector->encoder);
u8 tmp;
u8 buf[2] = {};
if (drm_dp_dpcd_readb(&intel_dp->aux, INTEL_EDP_HDR_GETSET_CTRL_PARAMS, &tmp) != 1) {
- drm_err(&i915->drm, "[CONNECTOR:%d:%s] Failed to read current backlight mode from DPCD\n",
+ drm_err(display->drm,
+ "[CONNECTOR:%d:%s] Failed to read current backlight mode from DPCD\n",
connector->base.base.id, connector->base.name);
return 0;
}
@@ -195,7 +197,8 @@ intel_dp_aux_hdr_get_backlight(struct intel_connector *connector, enum pipe pipe
if (drm_dp_dpcd_read(&intel_dp->aux, INTEL_EDP_BRIGHTNESS_NITS_LSB, buf,
sizeof(buf)) != sizeof(buf)) {
- drm_err(&i915->drm, "[CONNECTOR:%d:%s] Failed to read brightness from DPCD\n",
+ drm_err(display->drm,
+ "[CONNECTOR:%d:%s] Failed to read brightness from DPCD\n",
connector->base.base.id, connector->base.name);
return 0;
}
@@ -253,8 +256,8 @@ static void
intel_dp_aux_write_content_luminance(struct intel_connector *connector,
struct hdr_output_metadata *hdr_metadata)
{
+ struct intel_display *display = to_intel_display(connector);
struct intel_dp *intel_dp = enc_to_intel_dp(connector->encoder);
- struct drm_i915_private *i915 = to_i915(connector->base.dev);
int ret;
u8 buf[4];
@@ -270,7 +273,7 @@ intel_dp_aux_write_content_luminance(struct intel_connector *connector,
INTEL_EDP_HDR_CONTENT_LUMINANCE,
buf, sizeof(buf));
if (ret < 0)
- drm_dbg_kms(&i915->drm,
+ drm_dbg_kms(display->drm,
"Content Luminance DPCD reg write failed, err:-%d\n",
ret);
}
@@ -280,7 +283,7 @@ intel_dp_aux_fill_hdr_tcon_params(const struct drm_connector_state *conn_state,
{
struct intel_connector *connector = to_intel_connector(conn_state->connector);
struct intel_panel *panel = &connector->panel;
- struct drm_i915_private *i915 = to_i915(connector->base.dev);
+ struct intel_display *display = to_intel_display(connector);
/*
* According to spec segmented backlight needs to be set whenever panel is in
@@ -291,7 +294,7 @@ intel_dp_aux_fill_hdr_tcon_params(const struct drm_connector_state *conn_state,
*ctrl |= INTEL_EDP_HDR_TCON_2084_DECODE_ENABLE;
}
- if (DISPLAY_VER(i915) < 11)
+ if (DISPLAY_VER(display) < 11)
*ctrl &= ~INTEL_EDP_HDR_TCON_TONE_MAPPING_ENABLE;
if (panel->backlight.edp.intel_cap.supports_2020_gamut &&
@@ -311,9 +314,9 @@ static void
intel_dp_aux_hdr_enable_backlight(const struct intel_crtc_state *crtc_state,
const struct drm_connector_state *conn_state, u32 level)
{
+ struct intel_display *display = to_intel_display(crtc_state);
struct intel_connector *connector = to_intel_connector(conn_state->connector);
struct intel_panel *panel = &connector->panel;
- struct drm_i915_private *i915 = to_i915(connector->base.dev);
struct intel_dp *intel_dp = enc_to_intel_dp(connector->encoder);
struct hdr_output_metadata *hdr_metadata;
int ret;
@@ -323,7 +326,8 @@ intel_dp_aux_hdr_enable_backlight(const struct intel_crtc_state *crtc_state,
ret = drm_dp_dpcd_readb(&intel_dp->aux, INTEL_EDP_HDR_GETSET_CTRL_PARAMS, &old_ctrl);
if (ret != 1) {
- drm_err(&i915->drm, "[CONNECTOR:%d:%s] Failed to read current backlight control mode: %d\n",
+ drm_err(display->drm,
+ "[CONNECTOR:%d:%s] Failed to read current backlight control mode: %d\n",
connector->base.base.id, connector->base.name, ret);
return;
}
@@ -346,7 +350,8 @@ intel_dp_aux_hdr_enable_backlight(const struct intel_crtc_state *crtc_state,
if (ctrl != old_ctrl &&
drm_dp_dpcd_writeb(&intel_dp->aux, INTEL_EDP_HDR_GETSET_CTRL_PARAMS, ctrl) != 1)
- drm_err(&i915->drm, "[CONNECTOR:%d:%s] Failed to configure DPCD brightness controls\n",
+ drm_err(display->drm,
+ "[CONNECTOR:%d:%s] Failed to configure DPCD brightness controls\n",
connector->base.base.id, connector->base.name);
if (intel_dp_in_hdr_mode(conn_state)) {
@@ -377,7 +382,7 @@ static const char *dpcd_vs_pwm_str(bool aux)
static void
intel_dp_aux_write_panel_luminance_override(struct intel_connector *connector)
{
- struct drm_i915_private *i915 = to_i915(connector->base.dev);
+ struct intel_display *display = to_intel_display(connector);
struct intel_panel *panel = &connector->panel;
struct intel_dp *intel_dp = enc_to_intel_dp(connector->encoder);
int ret;
@@ -392,7 +397,7 @@ intel_dp_aux_write_panel_luminance_override(struct intel_connector *connector)
INTEL_EDP_HDR_PANEL_LUMINANCE_OVERRIDE,
buf, sizeof(buf));
if (ret < 0)
- drm_dbg_kms(&i915->drm,
+ drm_dbg_kms(display->drm,
"Panel Luminance DPCD reg write failed, err:-%d\n",
ret);
}
@@ -400,20 +405,21 @@ intel_dp_aux_write_panel_luminance_override(struct intel_connector *connector)
static int
intel_dp_aux_hdr_setup_backlight(struct intel_connector *connector, enum pipe pipe)
{
- struct drm_i915_private *i915 = to_i915(connector->base.dev);
+ struct intel_display *display = to_intel_display(connector);
struct intel_panel *panel = &connector->panel;
struct drm_luminance_range_info *luminance_range =
&connector->base.display_info.luminance_range;
int ret;
- drm_dbg_kms(&i915->drm, "[CONNECTOR:%d:%s] SDR backlight is controlled through %s\n",
+ drm_dbg_kms(display->drm,
+ "[CONNECTOR:%d:%s] SDR backlight is controlled through %s\n",
connector->base.base.id, connector->base.name,
dpcd_vs_pwm_str(panel->backlight.edp.intel_cap.sdr_uses_aux));
if (!panel->backlight.edp.intel_cap.sdr_uses_aux) {
ret = panel->backlight.pwm_funcs->setup(connector, pipe);
if (ret < 0) {
- drm_err(&i915->drm,
+ drm_err(display->drm,
"[CONNECTOR:%d:%s] Failed to setup SDR backlight controls through PWM: %d\n",
connector->base.base.id, connector->base.name, ret);
return ret;
@@ -430,7 +436,8 @@ intel_dp_aux_hdr_setup_backlight(struct intel_connector *connector, enum pipe pi
intel_dp_aux_write_panel_luminance_override(connector);
- drm_dbg_kms(&i915->drm, "[CONNECTOR:%d:%s] Using AUX HDR interface for backlight control (range %d..%d)\n",
+ drm_dbg_kms(display->drm,
+ "[CONNECTOR:%d:%s] Using AUX HDR interface for backlight control (range %d..%d)\n",
connector->base.base.id, connector->base.name,
panel->backlight.min, panel->backlight.max);
@@ -501,9 +508,9 @@ static void intel_dp_aux_vesa_disable_backlight(const struct drm_connector_state
static int intel_dp_aux_vesa_setup_backlight(struct intel_connector *connector, enum pipe pipe)
{
+ struct intel_display *display = to_intel_display(connector);
struct intel_dp *intel_dp = intel_attached_dp(connector);
struct intel_panel *panel = &connector->panel;
- struct drm_i915_private *i915 = dp_to_i915(intel_dp);
u16 current_level;
u8 current_mode;
int ret;
@@ -514,17 +521,19 @@ static int intel_dp_aux_vesa_setup_backlight(struct intel_connector *connector,
if (ret < 0)
return ret;
- drm_dbg_kms(&i915->drm, "[CONNECTOR:%d:%s] AUX VESA backlight enable is controlled through %s\n",
+ drm_dbg_kms(display->drm,
+ "[CONNECTOR:%d:%s] AUX VESA backlight enable is controlled through %s\n",
connector->base.base.id, connector->base.name,
dpcd_vs_pwm_str(panel->backlight.edp.vesa.info.aux_enable));
- drm_dbg_kms(&i915->drm, "[CONNECTOR:%d:%s] AUX VESA backlight level is controlled through %s\n",
+ drm_dbg_kms(display->drm,
+ "[CONNECTOR:%d:%s] AUX VESA backlight level is controlled through %s\n",
connector->base.base.id, connector->base.name,
dpcd_vs_pwm_str(panel->backlight.edp.vesa.info.aux_set));
if (!panel->backlight.edp.vesa.info.aux_set || !panel->backlight.edp.vesa.info.aux_enable) {
ret = panel->backlight.pwm_funcs->setup(connector, pipe);
if (ret < 0) {
- drm_err(&i915->drm,
+ drm_err(display->drm,
"[CONNECTOR:%d:%s] Failed to setup PWM backlight controls for eDP backlight: %d\n",
connector->base.base.id, connector->base.name, ret);
return ret;
@@ -553,7 +562,8 @@ static int intel_dp_aux_vesa_setup_backlight(struct intel_connector *connector,
}
}
- drm_dbg_kms(&i915->drm, "[CONNECTOR:%d:%s] Using AUX VESA interface for backlight control\n",
+ drm_dbg_kms(display->drm,
+ "[CONNECTOR:%d:%s] Using AUX VESA interface for backlight control\n",
connector->base.base.id, connector->base.name);
return 0;
@@ -562,11 +572,12 @@ static int intel_dp_aux_vesa_setup_backlight(struct intel_connector *connector,
static bool
intel_dp_aux_supports_vesa_backlight(struct intel_connector *connector)
{
+ struct intel_display *display = to_intel_display(connector);
struct intel_dp *intel_dp = intel_attached_dp(connector);
- struct drm_i915_private *i915 = dp_to_i915(intel_dp);
if (drm_edp_backlight_supported(intel_dp->edp_dpcd)) {
- drm_dbg_kms(&i915->drm, "[CONNECTOR:%d:%s] AUX Backlight Control Supported!\n",
+ drm_dbg_kms(display->drm,
+ "[CONNECTOR:%d:%s] AUX Backlight Control Supported!\n",
connector->base.base.id, connector->base.name);
return true;
}
@@ -591,16 +602,15 @@ static const struct intel_panel_bl_funcs intel_dp_vesa_bl_funcs = {
int intel_dp_aux_init_backlight_funcs(struct intel_connector *connector)
{
+ struct intel_display *display = to_intel_display(connector);
struct drm_device *dev = connector->base.dev;
struct intel_panel *panel = &connector->panel;
- struct intel_dp *intel_dp = enc_to_intel_dp(connector->encoder);
- struct drm_i915_private *i915 = dp_to_i915(intel_dp);
bool try_intel_interface = false, try_vesa_interface = false;
/* Check the VBT and user's module parameters to figure out which
* interfaces to probe
*/
- switch (i915->display.params.enable_dpcd_backlight) {
+ switch (display->params.enable_dpcd_backlight) {
case INTEL_DP_AUX_BACKLIGHT_OFF:
return -ENODEV;
case INTEL_DP_AUX_BACKLIGHT_AUTO:
diff --git a/drivers/gpu/drm/i915/display/intel_dp_hdcp.c b/drivers/gpu/drm/i915/display/intel_dp_hdcp.c
index b0101d72b9c1..3425b3643143 100644
--- a/drivers/gpu/drm/i915/display/intel_dp_hdcp.c
+++ b/drivers/gpu/drm/i915/display/intel_dp_hdcp.c
@@ -152,7 +152,7 @@ int intel_dp_hdcp_repeater_present(struct intel_digital_port *dig_port,
ssize_t ret;
u8 bcaps;
- ret = intel_dp_hdcp_read_bcaps(&dig_port->dp.aux, i915, &bcaps);
+ ret = intel_dp_hdcp_read_bcaps(&dig_port->dp.aux, i915, &bcaps);
if (ret)
return ret;
@@ -677,8 +677,15 @@ static
int intel_dp_hdcp2_get_capability(struct intel_connector *connector,
bool *capable)
{
- struct intel_digital_port *dig_port = intel_attached_dig_port(connector);
- struct drm_dp_aux *aux = &dig_port->dp.aux;
+ struct intel_digital_port *dig_port;
+ struct drm_dp_aux *aux;
+
+ *capable = false;
+ if (!intel_attached_encoder(connector))
+ return -EINVAL;
+
+ dig_port = intel_attached_dig_port(connector);
+ aux = &dig_port->dp.aux;
return _intel_dp_hdcp2_get_capability(aux, capable);
}
diff --git a/drivers/gpu/drm/i915/display/intel_dp_link_training.c b/drivers/gpu/drm/i915/display/intel_dp_link_training.c
index d044c8e36bb3..40bedc31d6bf 100644
--- a/drivers/gpu/drm/i915/display/intel_dp_link_training.c
+++ b/drivers/gpu/drm/i915/display/intel_dp_link_training.c
@@ -21,6 +21,8 @@
* IN THE SOFTWARE.
*/
+#include <drm/display/drm_dp_helper.h>
+
#include "i915_drv.h"
#include "intel_display_types.h"
#include "intel_dp.h"
@@ -37,13 +39,13 @@
drm_dp_phy_name(_dp_phy)
#define lt_dbg(_intel_dp, _dp_phy, _format, ...) \
- drm_dbg_kms(&dp_to_i915(_intel_dp)->drm, \
+ drm_dbg_kms(to_intel_display(_intel_dp)->drm, \
LT_MSG_PREFIX _format, \
LT_MSG_ARGS(_intel_dp, _dp_phy), ## __VA_ARGS__)
#define lt_err(_intel_dp, _dp_phy, _format, ...) do { \
if (intel_digital_port_connected(&dp_to_dig_port(_intel_dp)->base)) \
- drm_err(&dp_to_i915(_intel_dp)->drm, \
+ drm_err(to_intel_display(_intel_dp)->drm, \
LT_MSG_PREFIX _format, \
LT_MSG_ARGS(_intel_dp, _dp_phy), ## __VA_ARGS__); \
else \
@@ -114,7 +116,13 @@ intel_dp_set_lttpr_transparent_mode(struct intel_dp *intel_dp, bool enable)
u8 val = enable ? DP_PHY_REPEATER_MODE_TRANSPARENT :
DP_PHY_REPEATER_MODE_NON_TRANSPARENT;
- return drm_dp_dpcd_write(&intel_dp->aux, DP_PHY_REPEATER_MODE, &val, 1) == 1;
+ if (drm_dp_dpcd_write(&intel_dp->aux, DP_PHY_REPEATER_MODE, &val, 1) != 1)
+ return false;
+
+ intel_dp->lttpr_common_caps[DP_PHY_REPEATER_MODE -
+ DP_LT_TUNABLE_PHY_REPEATER_FIELD_DATA_STRUCTURE_REV] = val;
+
+ return true;
}
static bool intel_dp_lttpr_transparent_mode_enabled(struct intel_dp *intel_dp)
@@ -174,7 +182,7 @@ static int intel_dp_init_lttpr_phys(struct intel_dp *intel_dp, const u8 dpcd[DP_
* still taking into account any LTTPR common lane- rate/count limits.
*/
if (lttpr_count < 0)
- return 0;
+ goto out_reset_lttpr_count;
if (!intel_dp_set_lttpr_transparent_mode(intel_dp, false)) {
lt_dbg(intel_dp, DP_PHY_DPRX,
@@ -208,7 +216,8 @@ static int intel_dp_init_lttpr(struct intel_dp *intel_dp, const u8 dpcd[DP_RECEI
int intel_dp_read_dprx_caps(struct intel_dp *intel_dp, u8 dpcd[DP_RECEIVER_CAP_SIZE])
{
- struct drm_i915_private *i915 = dp_to_i915(intel_dp);
+ struct intel_display *display = to_intel_display(intel_dp);
+ struct drm_i915_private *i915 = to_i915(display->drm);
if (intel_dp_is_edp(intel_dp))
return 0;
@@ -217,7 +226,7 @@ int intel_dp_read_dprx_caps(struct intel_dp *intel_dp, u8 dpcd[DP_RECEIVER_CAP_S
* Detecting LTTPRs must be avoided on platforms with an AUX timeout
* period < 3.2ms. (see DP Standard v2.0, 2.11.2, 3.6.6.1).
*/
- if (DISPLAY_VER(i915) >= 10 && !IS_GEMINILAKE(i915))
+ if (DISPLAY_VER(display) >= 10 && !IS_GEMINILAKE(i915))
if (drm_dp_dpcd_probe(&intel_dp->aux,
DP_LT_TUNABLE_PHY_REPEATER_FIELD_DATA_STRUCTURE_REV))
return -EIO;
@@ -248,7 +257,8 @@ int intel_dp_read_dprx_caps(struct intel_dp *intel_dp, u8 dpcd[DP_RECEIVER_CAP_S
*/
int intel_dp_init_lttpr_and_dprx_caps(struct intel_dp *intel_dp)
{
- struct drm_i915_private *i915 = dp_to_i915(intel_dp);
+ struct intel_display *display = to_intel_display(intel_dp);
+ struct drm_i915_private *i915 = to_i915(display->drm);
int lttpr_count = 0;
/*
@@ -256,7 +266,7 @@ int intel_dp_init_lttpr_and_dprx_caps(struct intel_dp *intel_dp)
* period < 3.2ms. (see DP Standard v2.0, 2.11.2, 3.6.6.1).
*/
if (!intel_dp_is_edp(intel_dp) &&
- (DISPLAY_VER(i915) >= 10 && !IS_GEMINILAKE(i915))) {
+ (DISPLAY_VER(display) >= 10 && !IS_GEMINILAKE(i915))) {
u8 dpcd[DP_RECEIVER_CAP_SIZE];
int err = intel_dp_read_dprx_caps(intel_dp, dpcd);
@@ -319,10 +329,11 @@ static bool
intel_dp_phy_is_downstream_of_source(struct intel_dp *intel_dp,
enum drm_dp_phy dp_phy)
{
- struct drm_i915_private *i915 = dp_to_i915(intel_dp);
+ struct intel_display *display = to_intel_display(intel_dp);
int lttpr_count = drm_dp_lttpr_count(intel_dp->lttpr_common_caps);
- drm_WARN_ON_ONCE(&i915->drm, lttpr_count <= 0 && dp_phy != DP_PHY_DPRX);
+ drm_WARN_ON_ONCE(display->drm,
+ lttpr_count <= 0 && dp_phy != DP_PHY_DPRX);
return lttpr_count <= 0 || dp_phy == DP_PHY_LTTPR(lttpr_count - 1);
}
@@ -331,7 +342,7 @@ static u8 intel_dp_phy_voltage_max(struct intel_dp *intel_dp,
const struct intel_crtc_state *crtc_state,
enum drm_dp_phy dp_phy)
{
- struct drm_i915_private *i915 = dp_to_i915(intel_dp);
+ struct intel_display *display = to_intel_display(intel_dp);
u8 voltage_max;
/*
@@ -343,7 +354,7 @@ static u8 intel_dp_phy_voltage_max(struct intel_dp *intel_dp,
else
voltage_max = intel_dp_lttpr_voltage_max(intel_dp, dp_phy + 1);
- drm_WARN_ON_ONCE(&i915->drm,
+ drm_WARN_ON_ONCE(display->drm,
voltage_max != DP_TRAIN_VOLTAGE_SWING_LEVEL_2 &&
voltage_max != DP_TRAIN_VOLTAGE_SWING_LEVEL_3);
@@ -353,7 +364,7 @@ static u8 intel_dp_phy_voltage_max(struct intel_dp *intel_dp,
static u8 intel_dp_phy_preemph_max(struct intel_dp *intel_dp,
enum drm_dp_phy dp_phy)
{
- struct drm_i915_private *i915 = dp_to_i915(intel_dp);
+ struct intel_display *display = to_intel_display(intel_dp);
u8 preemph_max;
/*
@@ -365,7 +376,7 @@ static u8 intel_dp_phy_preemph_max(struct intel_dp *intel_dp,
else
preemph_max = intel_dp_lttpr_preemph_max(intel_dp, dp_phy + 1);
- drm_WARN_ON_ONCE(&i915->drm,
+ drm_WARN_ON_ONCE(display->drm,
preemph_max != DP_TRAIN_PRE_EMPH_LEVEL_2 &&
preemph_max != DP_TRAIN_PRE_EMPH_LEVEL_3);
@@ -375,10 +386,11 @@ static u8 intel_dp_phy_preemph_max(struct intel_dp *intel_dp,
static bool has_per_lane_signal_levels(struct intel_dp *intel_dp,
enum drm_dp_phy dp_phy)
{
- struct drm_i915_private *i915 = dp_to_i915(intel_dp);
+ struct intel_display *display = to_intel_display(intel_dp);
+ struct drm_i915_private *i915 = to_i915(display->drm);
return !intel_dp_phy_is_downstream_of_source(intel_dp, dp_phy) ||
- DISPLAY_VER(i915) >= 10 || IS_BROXTON(i915);
+ DISPLAY_VER(display) >= 10 || IS_BROXTON(i915);
}
/* 128b/132b */
@@ -697,26 +709,28 @@ static bool intel_dp_link_max_vswing_reached(struct intel_dp *intel_dp,
return true;
}
-static void
-intel_dp_update_downspread_ctrl(struct intel_dp *intel_dp,
- const struct intel_crtc_state *crtc_state)
+void intel_dp_link_training_set_mode(struct intel_dp *intel_dp, int link_rate, bool is_vrr)
{
u8 link_config[2];
- link_config[0] = crtc_state->vrr.flipline ? DP_MSA_TIMING_PAR_IGNORE_EN : 0;
- link_config[1] = intel_dp_is_uhbr(crtc_state) ?
+ link_config[0] = is_vrr ? DP_MSA_TIMING_PAR_IGNORE_EN : 0;
+ link_config[1] = drm_dp_is_uhbr_rate(link_rate) ?
DP_SET_ANSI_128B132B : DP_SET_ANSI_8B10B;
drm_dp_dpcd_write(&intel_dp->aux, DP_DOWNSPREAD_CTRL, link_config, 2);
}
-static void
-intel_dp_update_link_bw_set(struct intel_dp *intel_dp,
- const struct intel_crtc_state *crtc_state,
- u8 link_bw, u8 rate_select)
+static void intel_dp_update_downspread_ctrl(struct intel_dp *intel_dp,
+ const struct intel_crtc_state *crtc_state)
{
- u8 lane_count = crtc_state->lane_count;
+ intel_dp_link_training_set_mode(intel_dp,
+ crtc_state->port_clock, crtc_state->vrr.flipline);
+}
- if (crtc_state->enhanced_framing)
+void intel_dp_link_training_set_bw(struct intel_dp *intel_dp,
+ int link_bw, int rate_select, int lane_count,
+ bool enhanced_framing)
+{
+ if (enhanced_framing)
lane_count |= DP_LANE_COUNT_ENHANCED_FRAME_EN;
if (link_bw) {
@@ -740,6 +754,14 @@ intel_dp_update_link_bw_set(struct intel_dp *intel_dp,
}
}
+static void intel_dp_update_link_bw_set(struct intel_dp *intel_dp,
+ const struct intel_crtc_state *crtc_state,
+ u8 link_bw, u8 rate_select)
+{
+ intel_dp_link_training_set_bw(intel_dp, link_bw, rate_select, crtc_state->lane_count,
+ crtc_state->enhanced_framing);
+}
+
/*
* Prepare link training by configuring the link parameters. On DDI platforms
* also enable the port here.
@@ -932,7 +954,8 @@ static u32 intel_dp_training_pattern(struct intel_dp *intel_dp,
const struct intel_crtc_state *crtc_state,
enum drm_dp_phy dp_phy)
{
- struct drm_i915_private *i915 = dp_to_i915(intel_dp);
+ struct intel_display *display = to_intel_display(intel_dp);
+ struct drm_i915_private *i915 = to_i915(display->drm);
bool source_tps3, sink_tps3, source_tps4, sink_tps4;
/* UHBR+ use separate 128b/132b TPS2 */
@@ -1152,6 +1175,36 @@ static bool intel_dp_can_link_train_fallback_for_edp(struct intel_dp *intel_dp,
return true;
}
+static bool reduce_link_params_in_bw_order(struct intel_dp *intel_dp,
+ const struct intel_crtc_state *crtc_state,
+ int *new_link_rate, int *new_lane_count)
+{
+ int link_rate;
+ int lane_count;
+ int i;
+
+ i = intel_dp_link_config_index(intel_dp, crtc_state->port_clock, crtc_state->lane_count);
+ for (i--; i >= 0; i--) {
+ intel_dp_link_config_get(intel_dp, i, &link_rate, &lane_count);
+
+ if ((intel_dp->link.force_rate &&
+ intel_dp->link.force_rate != link_rate) ||
+ (intel_dp->link.force_lane_count &&
+ intel_dp->link.force_lane_count != lane_count))
+ continue;
+
+ break;
+ }
+
+ if (i < 0)
+ return false;
+
+ *new_link_rate = link_rate;
+ *new_lane_count = lane_count;
+
+ return true;
+}
+
static int reduce_link_rate(struct intel_dp *intel_dp, int current_rate)
{
int rate_index;
@@ -1187,6 +1240,41 @@ static int reduce_lane_count(struct intel_dp *intel_dp, int current_lane_count)
return current_lane_count >> 1;
}
+static bool reduce_link_params_in_rate_lane_order(struct intel_dp *intel_dp,
+ const struct intel_crtc_state *crtc_state,
+ int *new_link_rate, int *new_lane_count)
+{
+ int link_rate;
+ int lane_count;
+
+ lane_count = crtc_state->lane_count;
+ link_rate = reduce_link_rate(intel_dp, crtc_state->port_clock);
+ if (link_rate < 0) {
+ lane_count = reduce_lane_count(intel_dp, crtc_state->lane_count);
+ link_rate = intel_dp_max_common_rate(intel_dp);
+ }
+
+ if (lane_count < 0)
+ return false;
+
+ *new_link_rate = link_rate;
+ *new_lane_count = lane_count;
+
+ return true;
+}
+
+static bool reduce_link_params(struct intel_dp *intel_dp, const struct intel_crtc_state *crtc_state,
+ int *new_link_rate, int *new_lane_count)
+{
+ /* TODO: Use the same fallback logic on SST as on MST. */
+ if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_DP_MST))
+ return reduce_link_params_in_bw_order(intel_dp, crtc_state,
+ new_link_rate, new_lane_count);
+ else
+ return reduce_link_params_in_rate_lane_order(intel_dp, crtc_state,
+ new_link_rate, new_lane_count);
+}
+
static int intel_dp_get_link_train_fallback_values(struct intel_dp *intel_dp,
const struct intel_crtc_state *crtc_state)
{
@@ -1200,14 +1288,7 @@ static int intel_dp_get_link_train_fallback_values(struct intel_dp *intel_dp,
return 0;
}
- new_lane_count = crtc_state->lane_count;
- new_link_rate = reduce_link_rate(intel_dp, crtc_state->port_clock);
- if (new_link_rate < 0) {
- new_lane_count = reduce_lane_count(intel_dp, crtc_state->lane_count);
- new_link_rate = intel_dp_max_common_rate(intel_dp);
- }
-
- if (new_lane_count < 0)
+ if (!reduce_link_params(intel_dp, crtc_state, &new_link_rate, &new_lane_count))
return -1;
if (intel_dp_is_edp(intel_dp) &&
@@ -1228,12 +1309,10 @@ static int intel_dp_get_link_train_fallback_values(struct intel_dp *intel_dp,
return 0;
}
-/* NOTE: @state is only valid for MST links and can be %NULL for SST. */
static bool intel_dp_schedule_fallback_link_training(struct intel_atomic_state *state,
struct intel_dp *intel_dp,
const struct intel_crtc_state *crtc_state)
{
- struct drm_i915_private *i915 = dp_to_i915(intel_dp);
struct intel_encoder *encoder = &dp_to_dig_port(intel_dp)->base;
if (!intel_digital_port_connected(&dp_to_dig_port(intel_dp)->base)) {
@@ -1249,11 +1328,6 @@ static bool intel_dp_schedule_fallback_link_training(struct intel_atomic_state *
return false;
}
- if (drm_WARN_ON(&i915->drm,
- intel_crtc_has_type(crtc_state, INTEL_OUTPUT_DP_MST) &&
- !state))
- return false;
-
/* Schedule a Hotplug Uevent to userspace to start modeset */
intel_dp_queue_modeset_retry_for_link(state, encoder, crtc_state);
@@ -1512,14 +1586,12 @@ intel_dp_128b132b_link_train(struct intel_dp *intel_dp,
* retraining with reduced link rate/lane parameters if the link training
* fails.
* After calling this function intel_dp_stop_link_train() must be called.
- *
- * NOTE: @state is only valid for MST links and can be %NULL for SST.
*/
void intel_dp_start_link_train(struct intel_atomic_state *state,
struct intel_dp *intel_dp,
const struct intel_crtc_state *crtc_state)
{
- struct drm_i915_private *i915 = dp_to_i915(intel_dp);
+ struct intel_display *display = to_intel_display(state);
struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
struct intel_encoder *encoder = &dig_port->base;
bool passed;
@@ -1530,11 +1602,6 @@ void intel_dp_start_link_train(struct intel_atomic_state *state,
*/
int lttpr_count = intel_dp_init_lttpr_and_dprx_caps(intel_dp);
- if (drm_WARN_ON(&i915->drm,
- intel_crtc_has_type(crtc_state, INTEL_OUTPUT_DP_MST) &&
- !state))
- return;
-
if (lttpr_count < 0)
/* Still continue with enabling the port and link training. */
lttpr_count = 0;
@@ -1569,7 +1636,7 @@ void intel_dp_start_link_train(struct intel_atomic_state *state,
* For test cases which rely on the link training or processing of HPDs
* ignore_long_hpd flag can unset from the testcase.
*/
- if (i915->display.hotplug.ignore_long_hpd) {
+ if (display->hotplug.ignore_long_hpd) {
lt_dbg(intel_dp, DP_PHY_DPRX, "Ignore the link failure\n");
return;
}
@@ -1621,14 +1688,14 @@ static struct intel_dp *intel_connector_to_intel_dp(struct intel_connector *conn
static int i915_dp_force_link_rate_show(struct seq_file *m, void *data)
{
struct intel_connector *connector = to_intel_connector(m->private);
- struct drm_i915_private *i915 = to_i915(connector->base.dev);
+ struct intel_display *display = to_intel_display(connector);
struct intel_dp *intel_dp = intel_connector_to_intel_dp(connector);
int current_rate = -1;
int force_rate;
int err;
int i;
- err = drm_modeset_lock_single_interruptible(&i915->drm.mode_config.connection_mutex);
+ err = drm_modeset_lock_single_interruptible(&display->drm->mode_config.connection_mutex);
if (err)
return err;
@@ -1636,7 +1703,7 @@ static int i915_dp_force_link_rate_show(struct seq_file *m, void *data)
current_rate = intel_dp->link_rate;
force_rate = intel_dp->link.force_rate;
- drm_modeset_unlock(&i915->drm.mode_config.connection_mutex);
+ drm_modeset_unlock(&display->drm->mode_config.connection_mutex);
seq_printf(m, "%sauto%s",
force_rate == 0 ? "[" : "",
@@ -1692,7 +1759,7 @@ static ssize_t i915_dp_force_link_rate_write(struct file *file,
{
struct seq_file *m = file->private_data;
struct intel_connector *connector = to_intel_connector(m->private);
- struct drm_i915_private *i915 = to_i915(connector->base.dev);
+ struct intel_display *display = to_intel_display(connector);
struct intel_dp *intel_dp = intel_connector_to_intel_dp(connector);
int rate;
int err;
@@ -1701,14 +1768,14 @@ static ssize_t i915_dp_force_link_rate_write(struct file *file,
if (rate < 0)
return rate;
- err = drm_modeset_lock_single_interruptible(&i915->drm.mode_config.connection_mutex);
+ err = drm_modeset_lock_single_interruptible(&display->drm->mode_config.connection_mutex);
if (err)
return err;
intel_dp_reset_link_params(intel_dp);
intel_dp->link.force_rate = rate;
- drm_modeset_unlock(&i915->drm.mode_config.connection_mutex);
+ drm_modeset_unlock(&display->drm->mode_config.connection_mutex);
*offp += len;
@@ -1719,14 +1786,14 @@ DEFINE_SHOW_STORE_ATTRIBUTE(i915_dp_force_link_rate);
static int i915_dp_force_lane_count_show(struct seq_file *m, void *data)
{
struct intel_connector *connector = to_intel_connector(m->private);
- struct drm_i915_private *i915 = to_i915(connector->base.dev);
+ struct intel_display *display = to_intel_display(connector);
struct intel_dp *intel_dp = intel_connector_to_intel_dp(connector);
int current_lane_count = -1;
int force_lane_count;
int err;
int i;
- err = drm_modeset_lock_single_interruptible(&i915->drm.mode_config.connection_mutex);
+ err = drm_modeset_lock_single_interruptible(&display->drm->mode_config.connection_mutex);
if (err)
return err;
@@ -1734,7 +1801,7 @@ static int i915_dp_force_lane_count_show(struct seq_file *m, void *data)
current_lane_count = intel_dp->lane_count;
force_lane_count = intel_dp->link.force_lane_count;
- drm_modeset_unlock(&i915->drm.mode_config.connection_mutex);
+ drm_modeset_unlock(&display->drm->mode_config.connection_mutex);
seq_printf(m, "%sauto%s",
force_lane_count == 0 ? "[" : "",
@@ -1794,7 +1861,7 @@ static ssize_t i915_dp_force_lane_count_write(struct file *file,
{
struct seq_file *m = file->private_data;
struct intel_connector *connector = to_intel_connector(m->private);
- struct drm_i915_private *i915 = to_i915(connector->base.dev);
+ struct intel_display *display = to_intel_display(connector);
struct intel_dp *intel_dp = intel_connector_to_intel_dp(connector);
int lane_count;
int err;
@@ -1803,14 +1870,14 @@ static ssize_t i915_dp_force_lane_count_write(struct file *file,
if (lane_count < 0)
return lane_count;
- err = drm_modeset_lock_single_interruptible(&i915->drm.mode_config.connection_mutex);
+ err = drm_modeset_lock_single_interruptible(&display->drm->mode_config.connection_mutex);
if (err)
return err;
intel_dp_reset_link_params(intel_dp);
intel_dp->link.force_lane_count = lane_count;
- drm_modeset_unlock(&i915->drm.mode_config.connection_mutex);
+ drm_modeset_unlock(&display->drm->mode_config.connection_mutex);
*offp += len;
@@ -1821,17 +1888,17 @@ DEFINE_SHOW_STORE_ATTRIBUTE(i915_dp_force_lane_count);
static int i915_dp_max_link_rate_show(void *data, u64 *val)
{
struct intel_connector *connector = to_intel_connector(data);
- struct drm_i915_private *i915 = to_i915(connector->base.dev);
+ struct intel_display *display = to_intel_display(connector);
struct intel_dp *intel_dp = intel_connector_to_intel_dp(connector);
int err;
- err = drm_modeset_lock_single_interruptible(&i915->drm.mode_config.connection_mutex);
+ err = drm_modeset_lock_single_interruptible(&display->drm->mode_config.connection_mutex);
if (err)
return err;
*val = intel_dp->link.max_rate;
- drm_modeset_unlock(&i915->drm.mode_config.connection_mutex);
+ drm_modeset_unlock(&display->drm->mode_config.connection_mutex);
return 0;
}
@@ -1840,17 +1907,17 @@ DEFINE_DEBUGFS_ATTRIBUTE(i915_dp_max_link_rate_fops, i915_dp_max_link_rate_show,
static int i915_dp_max_lane_count_show(void *data, u64 *val)
{
struct intel_connector *connector = to_intel_connector(data);
- struct drm_i915_private *i915 = to_i915(connector->base.dev);
+ struct intel_display *display = to_intel_display(connector);
struct intel_dp *intel_dp = intel_connector_to_intel_dp(connector);
int err;
- err = drm_modeset_lock_single_interruptible(&i915->drm.mode_config.connection_mutex);
+ err = drm_modeset_lock_single_interruptible(&display->drm->mode_config.connection_mutex);
if (err)
return err;
*val = intel_dp->link.max_lane_count;
- drm_modeset_unlock(&i915->drm.mode_config.connection_mutex);
+ drm_modeset_unlock(&display->drm->mode_config.connection_mutex);
return 0;
}
@@ -1859,17 +1926,17 @@ DEFINE_DEBUGFS_ATTRIBUTE(i915_dp_max_lane_count_fops, i915_dp_max_lane_count_sho
static int i915_dp_force_link_training_failure_show(void *data, u64 *val)
{
struct intel_connector *connector = to_intel_connector(data);
- struct drm_i915_private *i915 = to_i915(connector->base.dev);
+ struct intel_display *display = to_intel_display(connector);
struct intel_dp *intel_dp = intel_connector_to_intel_dp(connector);
int err;
- err = drm_modeset_lock_single_interruptible(&i915->drm.mode_config.connection_mutex);
+ err = drm_modeset_lock_single_interruptible(&display->drm->mode_config.connection_mutex);
if (err)
return err;
*val = intel_dp->link.force_train_failure;
- drm_modeset_unlock(&i915->drm.mode_config.connection_mutex);
+ drm_modeset_unlock(&display->drm->mode_config.connection_mutex);
return 0;
}
@@ -1877,20 +1944,20 @@ static int i915_dp_force_link_training_failure_show(void *data, u64 *val)
static int i915_dp_force_link_training_failure_write(void *data, u64 val)
{
struct intel_connector *connector = to_intel_connector(data);
- struct drm_i915_private *i915 = to_i915(connector->base.dev);
+ struct intel_display *display = to_intel_display(connector);
struct intel_dp *intel_dp = intel_connector_to_intel_dp(connector);
int err;
if (val > 2)
return -EINVAL;
- err = drm_modeset_lock_single_interruptible(&i915->drm.mode_config.connection_mutex);
+ err = drm_modeset_lock_single_interruptible(&display->drm->mode_config.connection_mutex);
if (err)
return err;
intel_dp->link.force_train_failure = val;
- drm_modeset_unlock(&i915->drm.mode_config.connection_mutex);
+ drm_modeset_unlock(&display->drm->mode_config.connection_mutex);
return 0;
}
@@ -1901,17 +1968,17 @@ DEFINE_DEBUGFS_ATTRIBUTE(i915_dp_force_link_training_failure_fops,
static int i915_dp_force_link_retrain_show(void *data, u64 *val)
{
struct intel_connector *connector = to_intel_connector(data);
- struct drm_i915_private *i915 = to_i915(connector->base.dev);
+ struct intel_display *display = to_intel_display(connector);
struct intel_dp *intel_dp = intel_connector_to_intel_dp(connector);
int err;
- err = drm_modeset_lock_single_interruptible(&i915->drm.mode_config.connection_mutex);
+ err = drm_modeset_lock_single_interruptible(&display->drm->mode_config.connection_mutex);
if (err)
return err;
*val = intel_dp->link.force_retrain;
- drm_modeset_unlock(&i915->drm.mode_config.connection_mutex);
+ drm_modeset_unlock(&display->drm->mode_config.connection_mutex);
return 0;
}
@@ -1919,17 +1986,17 @@ static int i915_dp_force_link_retrain_show(void *data, u64 *val)
static int i915_dp_force_link_retrain_write(void *data, u64 val)
{
struct intel_connector *connector = to_intel_connector(data);
- struct drm_i915_private *i915 = to_i915(connector->base.dev);
+ struct intel_display *display = to_intel_display(connector);
struct intel_dp *intel_dp = intel_connector_to_intel_dp(connector);
int err;
- err = drm_modeset_lock_single_interruptible(&i915->drm.mode_config.connection_mutex);
+ err = drm_modeset_lock_single_interruptible(&display->drm->mode_config.connection_mutex);
if (err)
return err;
intel_dp->link.force_retrain = val;
- drm_modeset_unlock(&i915->drm.mode_config.connection_mutex);
+ drm_modeset_unlock(&display->drm->mode_config.connection_mutex);
intel_hpd_trigger_irq(dp_to_dig_port(intel_dp));
@@ -1942,17 +2009,17 @@ DEFINE_DEBUGFS_ATTRIBUTE(i915_dp_force_link_retrain_fops,
static int i915_dp_link_retrain_disabled_show(struct seq_file *m, void *data)
{
struct intel_connector *connector = to_intel_connector(m->private);
- struct drm_i915_private *i915 = to_i915(connector->base.dev);
+ struct intel_display *display = to_intel_display(connector);
struct intel_dp *intel_dp = intel_connector_to_intel_dp(connector);
int err;
- err = drm_modeset_lock_single_interruptible(&i915->drm.mode_config.connection_mutex);
+ err = drm_modeset_lock_single_interruptible(&display->drm->mode_config.connection_mutex);
if (err)
return err;
seq_printf(m, "%s\n", str_yes_no(intel_dp->link.retrain_disabled));
- drm_modeset_unlock(&i915->drm.mode_config.connection_mutex);
+ drm_modeset_unlock(&display->drm->mode_config.connection_mutex);
return 0;
}
diff --git a/drivers/gpu/drm/i915/display/intel_dp_link_training.h b/drivers/gpu/drm/i915/display/intel_dp_link_training.h
index 42e7fc6cb171..2066b9146762 100644
--- a/drivers/gpu/drm/i915/display/intel_dp_link_training.h
+++ b/drivers/gpu/drm/i915/display/intel_dp_link_training.h
@@ -16,6 +16,12 @@ struct intel_dp;
int intel_dp_read_dprx_caps(struct intel_dp *intel_dp, u8 dpcd[DP_RECEIVER_CAP_SIZE]);
int intel_dp_init_lttpr_and_dprx_caps(struct intel_dp *intel_dp);
+void intel_dp_link_training_set_mode(struct intel_dp *intel_dp,
+ int link_rate, bool is_vrr);
+void intel_dp_link_training_set_bw(struct intel_dp *intel_dp,
+ int link_bw, int rate_select, int lane_count,
+ bool enhanced_framing);
+
void intel_dp_get_adjust_train(struct intel_dp *intel_dp,
const struct intel_crtc_state *crtc_state,
enum drm_dp_phy dp_phy,
diff --git a/drivers/gpu/drm/i915/display/intel_dp_mst.c b/drivers/gpu/drm/i915/display/intel_dp_mst.c
index 17978a1f9ab0..15541932b809 100644
--- a/drivers/gpu/drm/i915/display/intel_dp_mst.c
+++ b/drivers/gpu/drm/i915/display/intel_dp_mst.c
@@ -43,6 +43,7 @@
#include "intel_dp_hdcp.h"
#include "intel_dp_mst.h"
#include "intel_dp_tunnel.h"
+#include "intel_dp_link_training.h"
#include "intel_dpio_phy.h"
#include "intel_hdcp.h"
#include "intel_hotplug.h"
@@ -211,8 +212,8 @@ static int intel_dp_mst_find_vcpi_slots_for_bpp(struct intel_encoder *encoder,
drm_dbg_kms(&i915->drm, "Trying bpp %d\n", bpp);
- link_bpp_x16 = to_bpp_x16(dsc ? bpp :
- intel_dp_output_bpp(crtc_state->output_format, bpp));
+ link_bpp_x16 = fxp_q4_from_int(dsc ? bpp :
+ intel_dp_output_bpp(crtc_state->output_format, bpp));
local_bw_overhead = intel_dp_mst_bw_overhead(crtc_state, connector,
false, dsc, link_bpp_x16);
@@ -289,7 +290,7 @@ static int intel_dp_mst_find_vcpi_slots_for_bpp(struct intel_encoder *encoder,
if (!dsc)
crtc_state->pipe_bpp = bpp;
else
- crtc_state->dsc.compressed_bpp_x16 = to_bpp_x16(bpp);
+ crtc_state->dsc.compressed_bpp_x16 = fxp_q4_from_int(bpp);
drm_dbg_kms(&i915->drm, "Got %d slots for pipe bpp %d dsc %d\n", slots, bpp, dsc);
}
@@ -308,8 +309,8 @@ static int intel_dp_mst_compute_link_config(struct intel_encoder *encoder,
* YUV420 is only half of the pipe bpp value.
*/
slots = intel_dp_mst_find_vcpi_slots_for_bpp(encoder, crtc_state,
- to_bpp_int(limits->link.max_bpp_x16),
- to_bpp_int(limits->link.min_bpp_x16),
+ fxp_q4_to_int(limits->link.max_bpp_x16),
+ fxp_q4_to_int(limits->link.min_bpp_x16),
limits,
conn_state, 2 * 3, false);
@@ -374,11 +375,11 @@ static int intel_dp_dsc_mst_compute_link_config(struct intel_encoder *encoder,
crtc_state,
max_bpp / 3);
max_compressed_bpp = min(max_compressed_bpp,
- to_bpp_int(limits->link.max_bpp_x16));
+ fxp_q4_to_int(limits->link.max_bpp_x16));
min_compressed_bpp = intel_dp_dsc_sink_min_compressed_bpp(crtc_state);
min_compressed_bpp = max(min_compressed_bpp,
- to_bpp_int_roundup(limits->link.min_bpp_x16));
+ fxp_q4_to_int_roundup(limits->link.min_bpp_x16));
drm_dbg_kms(&i915->drm, "DSC Sink supported compressed min bpp %d compressed max bpp %d\n",
min_compressed_bpp, max_compressed_bpp);
@@ -478,10 +479,10 @@ adjust_limits_for_dsc_hblank_expansion_quirk(const struct intel_connector *conne
crtc->base.base.id, crtc->base.name,
connector->base.base.id, connector->base.name);
- if (limits->link.max_bpp_x16 < to_bpp_x16(24))
+ if (limits->link.max_bpp_x16 < fxp_q4_from_int(24))
return false;
- limits->link.min_bpp_x16 = to_bpp_x16(24);
+ limits->link.min_bpp_x16 = fxp_q4_from_int(24);
return true;
}
@@ -489,18 +490,18 @@ adjust_limits_for_dsc_hblank_expansion_quirk(const struct intel_connector *conne
drm_WARN_ON(&i915->drm, limits->min_rate != limits->max_rate);
if (limits->max_rate < 540000)
- min_bpp_x16 = to_bpp_x16(13);
+ min_bpp_x16 = fxp_q4_from_int(13);
else if (limits->max_rate < 810000)
- min_bpp_x16 = to_bpp_x16(10);
+ min_bpp_x16 = fxp_q4_from_int(10);
if (limits->link.min_bpp_x16 >= min_bpp_x16)
return true;
drm_dbg_kms(&i915->drm,
- "[CRTC:%d:%s][CONNECTOR:%d:%s] Increasing link min bpp to " BPP_X16_FMT " in DSC mode due to hblank expansion quirk\n",
+ "[CRTC:%d:%s][CONNECTOR:%d:%s] Increasing link min bpp to " FXP_Q4_FMT " in DSC mode due to hblank expansion quirk\n",
crtc->base.base.id, crtc->base.name,
connector->base.base.id, connector->base.name,
- BPP_X16_ARGS(min_bpp_x16));
+ FXP_Q4_ARGS(min_bpp_x16));
if (limits->link.max_bpp_x16 < min_bpp_x16)
return false;
@@ -1113,6 +1114,33 @@ static void intel_mst_pre_pll_enable_dp(struct intel_atomic_state *state,
to_intel_crtc(pipe_config->uapi.crtc));
}
+static bool intel_mst_probed_link_params_valid(struct intel_dp *intel_dp,
+ int link_rate, int lane_count)
+{
+ return intel_dp->link.mst_probed_rate == link_rate &&
+ intel_dp->link.mst_probed_lane_count == lane_count;
+}
+
+static void intel_mst_set_probed_link_params(struct intel_dp *intel_dp,
+ int link_rate, int lane_count)
+{
+ intel_dp->link.mst_probed_rate = link_rate;
+ intel_dp->link.mst_probed_lane_count = lane_count;
+}
+
+static void intel_mst_reprobe_topology(struct intel_dp *intel_dp,
+ const struct intel_crtc_state *crtc_state)
+{
+ if (intel_mst_probed_link_params_valid(intel_dp,
+ crtc_state->port_clock, crtc_state->lane_count))
+ return;
+
+ drm_dp_mst_topology_queue_probe(&intel_dp->mst_mgr);
+
+ intel_mst_set_probed_link_params(intel_dp,
+ crtc_state->port_clock, crtc_state->lane_count);
+}
+
static void intel_mst_pre_enable_dp(struct intel_atomic_state *state,
struct intel_encoder *encoder,
const struct intel_crtc_state *pipe_config,
@@ -1149,17 +1177,19 @@ static void intel_mst_pre_enable_dp(struct intel_atomic_state *state,
intel_dp_sink_enable_decompression(state, connector, pipe_config);
- if (first_mst_stream)
+ if (first_mst_stream) {
dig_port->base.pre_enable(state, &dig_port->base,
pipe_config, NULL);
+ intel_mst_reprobe_topology(intel_dp, pipe_config);
+ }
+
intel_dp->active_mst_links++;
ret = drm_dp_add_payload_part1(&intel_dp->mst_mgr, mst_state,
drm_atomic_get_mst_payload_state(mst_state, connector->port));
if (ret < 0)
- drm_dbg_kms(&dev_priv->drm, "Failed to create MST payload for %s: %d\n",
- connector->base.name, ret);
+ intel_dp_queue_modeset_retry_for_link(state, &dig_port->base, pipe_config);
/*
* Before Gen 12 this is not done as part of
@@ -1223,6 +1253,7 @@ static void intel_mst_enable_dp(struct intel_atomic_state *state,
enum transcoder trans = pipe_config->cpu_transcoder;
bool first_mst_stream = intel_dp->active_mst_links == 1;
struct intel_crtc *pipe_crtc;
+ int ret;
drm_WARN_ON(&dev_priv->drm, pipe_config->has_pch_encoder);
@@ -1254,8 +1285,11 @@ static void intel_mst_enable_dp(struct intel_atomic_state *state,
if (first_mst_stream)
intel_ddi_wait_for_fec_status(encoder, pipe_config, true);
- drm_dp_add_payload_part2(&intel_dp->mst_mgr,
- drm_atomic_get_mst_payload_state(mst_state, connector->port));
+ ret = drm_dp_add_payload_part2(&intel_dp->mst_mgr,
+ drm_atomic_get_mst_payload_state(mst_state,
+ connector->port));
+ if (ret < 0)
+ intel_dp_queue_modeset_retry_for_link(state, &dig_port->base, pipe_config);
if (DISPLAY_VER(dev_priv) >= 12)
intel_de_rmw(dev_priv, hsw_chicken_trans_reg(dev_priv, trans),
@@ -1999,6 +2033,36 @@ bool intel_dp_mst_crtc_needs_modeset(struct intel_atomic_state *state,
return false;
}
+/**
+ * intel_dp_mst_prepare_probe - Prepare an MST link for topology probing
+ * @intel_dp: DP port object
+ *
+ * Prepare an MST link for topology probing, programming the target
+ * link parameters to DPCD. This step is a requirement of the enumaration
+ * of path resources during probing.
+ */
+void intel_dp_mst_prepare_probe(struct intel_dp *intel_dp)
+{
+ int link_rate = intel_dp_max_link_rate(intel_dp);
+ int lane_count = intel_dp_max_lane_count(intel_dp);
+ u8 rate_select;
+ u8 link_bw;
+
+ if (intel_dp->link_trained)
+ return;
+
+ if (intel_mst_probed_link_params_valid(intel_dp, link_rate, lane_count))
+ return;
+
+ intel_dp_compute_rate(intel_dp, link_rate, &link_bw, &rate_select);
+
+ intel_dp_link_training_set_mode(intel_dp, link_rate, false);
+ intel_dp_link_training_set_bw(intel_dp, link_bw, rate_select, lane_count,
+ drm_dp_enhanced_frame_cap(intel_dp->dpcd));
+
+ intel_mst_set_probed_link_params(intel_dp, link_rate, lane_count);
+}
+
/*
* intel_dp_mst_verify_dpcd_state - verify the MST SW enabled state wrt. the DPCD
* @intel_dp: DP port object
diff --git a/drivers/gpu/drm/i915/display/intel_dp_mst.h b/drivers/gpu/drm/i915/display/intel_dp_mst.h
index 9e4c7679f1c3..8343804ce3f8 100644
--- a/drivers/gpu/drm/i915/display/intel_dp_mst.h
+++ b/drivers/gpu/drm/i915/display/intel_dp_mst.h
@@ -27,6 +27,7 @@ int intel_dp_mst_atomic_check_link(struct intel_atomic_state *state,
struct intel_link_bw_limits *limits);
bool intel_dp_mst_crtc_needs_modeset(struct intel_atomic_state *state,
struct intel_crtc *crtc);
+void intel_dp_mst_prepare_probe(struct intel_dp *intel_dp);
bool intel_dp_mst_verify_dpcd_state(struct intel_dp *intel_dp);
#endif /* __INTEL_DP_MST_H__ */
diff --git a/drivers/gpu/drm/i915/display/intel_dp_tunnel.c b/drivers/gpu/drm/i915/display/intel_dp_tunnel.c
index 6503abdc2b98..94198bc04939 100644
--- a/drivers/gpu/drm/i915/display/intel_dp_tunnel.c
+++ b/drivers/gpu/drm/i915/display/intel_dp_tunnel.c
@@ -69,7 +69,7 @@ static int get_current_link_bw(struct intel_dp *intel_dp,
static int update_tunnel_state(struct intel_dp *intel_dp)
{
- struct drm_i915_private *i915 = dp_to_i915(intel_dp);
+ struct intel_display *display = to_intel_display(intel_dp);
struct intel_encoder *encoder = &dp_to_dig_port(intel_dp)->base;
bool old_bw_below_dprx;
bool new_bw_below_dprx;
@@ -81,7 +81,7 @@ static int update_tunnel_state(struct intel_dp *intel_dp)
ret = drm_dp_tunnel_update_state(intel_dp->tunnel);
if (ret < 0) {
- drm_dbg_kms(&i915->drm,
+ drm_dbg_kms(display->drm,
"[DPTUN %s][ENCODER:%d:%s] State update failed (err %pe)\n",
drm_dp_tunnel_name(intel_dp->tunnel),
encoder->base.base.id, encoder->base.name,
@@ -103,7 +103,7 @@ static int update_tunnel_state(struct intel_dp *intel_dp)
!new_bw_below_dprx)
return 0;
- drm_dbg_kms(&i915->drm,
+ drm_dbg_kms(display->drm,
"[DPTUN %s][ENCODER:%d:%s] Notify users about BW change: %d -> %d\n",
drm_dp_tunnel_name(intel_dp->tunnel),
encoder->base.base.id, encoder->base.name,
@@ -121,20 +121,20 @@ static int update_tunnel_state(struct intel_dp *intel_dp)
*/
static int allocate_initial_tunnel_bw_for_pipes(struct intel_dp *intel_dp, u8 pipe_mask)
{
- struct drm_i915_private *i915 = dp_to_i915(intel_dp);
+ struct intel_display *display = to_intel_display(intel_dp);
struct intel_encoder *encoder = &dp_to_dig_port(intel_dp)->base;
struct intel_crtc *crtc;
int tunnel_bw = 0;
int err;
- for_each_intel_crtc_in_pipe_mask(&i915->drm, crtc, pipe_mask) {
+ for_each_intel_crtc_in_pipe_mask(display->drm, crtc, pipe_mask) {
const struct intel_crtc_state *crtc_state =
to_intel_crtc_state(crtc->base.state);
int stream_bw = intel_dp_config_required_rate(crtc_state);
tunnel_bw += stream_bw;
- drm_dbg_kms(&i915->drm,
+ drm_dbg_kms(display->drm,
"[DPTUN %s][ENCODER:%d:%s][CRTC:%d:%s] Initial BW for stream %d: %d/%d Mb/s\n",
drm_dp_tunnel_name(intel_dp->tunnel),
encoder->base.base.id, encoder->base.name,
@@ -145,7 +145,7 @@ static int allocate_initial_tunnel_bw_for_pipes(struct intel_dp *intel_dp, u8 pi
err = drm_dp_tunnel_alloc_bw(intel_dp->tunnel, tunnel_bw);
if (err) {
- drm_dbg_kms(&i915->drm,
+ drm_dbg_kms(display->drm,
"[DPTUN %s][ENCODER:%d:%s] Initial BW allocation failed (err %pe)\n",
drm_dp_tunnel_name(intel_dp->tunnel),
encoder->base.base.id, encoder->base.name,
@@ -172,12 +172,12 @@ static int allocate_initial_tunnel_bw(struct intel_dp *intel_dp,
static int detect_new_tunnel(struct intel_dp *intel_dp, struct drm_modeset_acquire_ctx *ctx)
{
- struct drm_i915_private *i915 = dp_to_i915(intel_dp);
+ struct intel_display *display = to_intel_display(intel_dp);
struct intel_encoder *encoder = &dp_to_dig_port(intel_dp)->base;
struct drm_dp_tunnel *tunnel;
int ret;
- tunnel = drm_dp_tunnel_detect(i915->display.dp_tunnel_mgr,
+ tunnel = drm_dp_tunnel_detect(display->dp_tunnel_mgr,
&intel_dp->aux);
if (IS_ERR(tunnel))
return PTR_ERR(tunnel);
@@ -189,7 +189,7 @@ static int detect_new_tunnel(struct intel_dp *intel_dp, struct drm_modeset_acqui
if (ret == -EOPNOTSUPP)
return 0;
- drm_dbg_kms(&i915->drm,
+ drm_dbg_kms(display->drm,
"[DPTUN %s][ENCODER:%d:%s] Failed to enable BW allocation mode (ret %pe)\n",
drm_dp_tunnel_name(intel_dp->tunnel),
encoder->base.base.id, encoder->base.name,
@@ -266,14 +266,15 @@ bool intel_dp_tunnel_bw_alloc_is_enabled(struct intel_dp *intel_dp)
*/
void intel_dp_tunnel_suspend(struct intel_dp *intel_dp)
{
- struct drm_i915_private *i915 = dp_to_i915(intel_dp);
+ struct intel_display *display = to_intel_display(intel_dp);
struct intel_connector *connector = intel_dp->attached_connector;
struct intel_encoder *encoder = &dp_to_dig_port(intel_dp)->base;
if (!intel_dp_tunnel_bw_alloc_is_enabled(intel_dp))
return;
- drm_dbg_kms(&i915->drm, "[DPTUN %s][CONNECTOR:%d:%s][ENCODER:%d:%s] Suspend\n",
+ drm_dbg_kms(display->drm,
+ "[DPTUN %s][CONNECTOR:%d:%s][ENCODER:%d:%s] Suspend\n",
drm_dp_tunnel_name(intel_dp->tunnel),
connector->base.base.id, connector->base.name,
encoder->base.base.id, encoder->base.name);
@@ -295,7 +296,7 @@ void intel_dp_tunnel_resume(struct intel_dp *intel_dp,
const struct intel_crtc_state *crtc_state,
bool dpcd_updated)
{
- struct drm_i915_private *i915 = dp_to_i915(intel_dp);
+ struct intel_display *display = to_intel_display(intel_dp);
struct intel_connector *connector = intel_dp->attached_connector;
struct intel_encoder *encoder = &dp_to_dig_port(intel_dp)->base;
u8 dpcd[DP_RECEIVER_CAP_SIZE];
@@ -307,7 +308,8 @@ void intel_dp_tunnel_resume(struct intel_dp *intel_dp,
intel_dp->tunnel_suspended = false;
- drm_dbg_kms(&i915->drm, "[DPTUN %s][CONNECTOR:%d:%s][ENCODER:%d:%s] Resume\n",
+ drm_dbg_kms(display->drm,
+ "[DPTUN %s][CONNECTOR:%d:%s][ENCODER:%d:%s] Resume\n",
drm_dp_tunnel_name(intel_dp->tunnel),
connector->base.base.id, connector->base.name,
encoder->base.base.id, encoder->base.name);
@@ -347,7 +349,7 @@ void intel_dp_tunnel_resume(struct intel_dp *intel_dp,
return;
out_err:
- drm_dbg_kms(&i915->drm,
+ drm_dbg_kms(display->drm,
"[DPTUN %s][CONNECTOR:%d:%s][ENCODER:%d:%s] Tunnel can't be resumed, will drop and reject it (err %pe)\n",
drm_dp_tunnel_name(intel_dp->tunnel),
connector->base.base.id, connector->base.name,
@@ -369,12 +371,12 @@ add_inherited_tunnel(struct intel_atomic_state *state,
struct drm_dp_tunnel *tunnel,
struct intel_crtc *crtc)
{
- struct drm_i915_private *i915 = to_i915(state->base.dev);
+ struct intel_display *display = to_intel_display(state);
struct drm_dp_tunnel *old_tunnel;
old_tunnel = get_inherited_tunnel(state, crtc);
if (old_tunnel) {
- drm_WARN_ON(&i915->drm, old_tunnel != tunnel);
+ drm_WARN_ON(display->drm, old_tunnel != tunnel);
return 0;
}
@@ -394,7 +396,7 @@ static int check_inherited_tunnel_state(struct intel_atomic_state *state,
struct intel_dp *intel_dp,
const struct intel_digital_connector_state *old_conn_state)
{
- struct drm_i915_private *i915 = dp_to_i915(intel_dp);
+ struct intel_display *display = to_intel_display(state);
struct intel_encoder *encoder = &dp_to_dig_port(intel_dp)->base;
struct intel_connector *connector =
to_intel_connector(old_conn_state->base.connector);
@@ -422,7 +424,7 @@ static int check_inherited_tunnel_state(struct intel_atomic_state *state,
old_crtc_state->dp_tunnel_ref.tunnel == intel_dp->tunnel)
return 0;
- drm_dbg_kms(&i915->drm,
+ drm_dbg_kms(display->drm,
"[DPTUN %s][CONNECTOR:%d:%s][ENCODER:%d:%s][CRTC:%d:%s] Adding state for inherited tunnel %p\n",
drm_dp_tunnel_name(intel_dp->tunnel),
connector->base.base.id, connector->base.name,
@@ -441,12 +443,13 @@ static int check_inherited_tunnel_state(struct intel_atomic_state *state,
*/
void intel_dp_tunnel_atomic_cleanup_inherited_state(struct intel_atomic_state *state)
{
+ struct intel_display *display = to_intel_display(state);
enum pipe pipe;
if (!state->inherited_dp_tunnels)
return;
- for_each_pipe(to_i915(state->base.dev), pipe)
+ for_each_pipe(display, pipe)
if (state->inherited_dp_tunnels->ref[pipe].tunnel)
drm_dp_tunnel_ref_put(&state->inherited_dp_tunnels->ref[pipe]);
@@ -457,7 +460,7 @@ void intel_dp_tunnel_atomic_cleanup_inherited_state(struct intel_atomic_state *s
static int intel_dp_tunnel_atomic_add_group_state(struct intel_atomic_state *state,
struct drm_dp_tunnel *tunnel)
{
- struct drm_i915_private *i915 = to_i915(state->base.dev);
+ struct intel_display *display = to_intel_display(state);
u32 pipe_mask;
int err;
@@ -466,7 +469,7 @@ static int intel_dp_tunnel_atomic_add_group_state(struct intel_atomic_state *sta
if (err)
return err;
- drm_WARN_ON(&i915->drm, pipe_mask & ~((1 << I915_MAX_PIPES) - 1));
+ drm_WARN_ON(display->drm, pipe_mask & ~((1 << I915_MAX_PIPES) - 1));
return intel_modeset_pipes_in_mask_early(state, "DPTUN", pipe_mask);
}
@@ -504,7 +507,7 @@ static int check_group_state(struct intel_atomic_state *state,
struct intel_connector *connector,
struct intel_crtc *crtc)
{
- struct drm_i915_private *i915 = to_i915(state->base.dev);
+ struct intel_display *display = to_intel_display(state);
struct intel_encoder *encoder = &dp_to_dig_port(intel_dp)->base;
const struct intel_crtc_state *crtc_state =
intel_atomic_get_new_crtc_state(state, crtc);
@@ -512,7 +515,7 @@ static int check_group_state(struct intel_atomic_state *state,
if (!crtc_state->dp_tunnel_ref.tunnel)
return 0;
- drm_dbg_kms(&i915->drm,
+ drm_dbg_kms(display->drm,
"[DPTUN %s][CONNECTOR:%d:%s][ENCODER:%d:%s][CRTC:%d:%s] Adding group state for tunnel %p\n",
drm_dp_tunnel_name(intel_dp->tunnel),
connector->base.base.id, connector->base.name,
@@ -583,7 +586,7 @@ int intel_dp_tunnel_atomic_compute_stream_bw(struct intel_atomic_state *state,
const struct intel_connector *connector,
struct intel_crtc_state *crtc_state)
{
- struct drm_i915_private *i915 = to_i915(state->base.dev);
+ struct intel_display *display = to_intel_display(state);
struct intel_encoder *encoder = &dp_to_dig_port(intel_dp)->base;
struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
int required_rate = intel_dp_config_required_rate(crtc_state);
@@ -592,7 +595,7 @@ int intel_dp_tunnel_atomic_compute_stream_bw(struct intel_atomic_state *state,
if (!intel_dp_tunnel_bw_alloc_is_enabled(intel_dp))
return 0;
- drm_dbg_kms(&i915->drm,
+ drm_dbg_kms(display->drm,
"[DPTUN %s][CONNECTOR:%d:%s][ENCODER:%d:%s][CRTC:%d:%s] Stream %d required BW %d Mb/s\n",
drm_dp_tunnel_name(intel_dp->tunnel),
connector->base.base.id, connector->base.name,
@@ -708,7 +711,7 @@ static void queue_retry_work(struct intel_atomic_state *state,
struct drm_dp_tunnel *tunnel,
const struct intel_crtc_state *crtc_state)
{
- struct drm_i915_private *i915 = to_i915(state->base.dev);
+ struct intel_display *display = to_intel_display(state);
struct intel_encoder *encoder;
encoder = intel_get_crtc_new_encoder(state, crtc_state);
@@ -716,7 +719,7 @@ static void queue_retry_work(struct intel_atomic_state *state,
if (!intel_digital_port_connected(encoder))
return;
- drm_dbg_kms(&i915->drm,
+ drm_dbg_kms(display->drm,
"[DPTUN %s][ENCODER:%d:%s] BW allocation failed on a connected sink\n",
drm_dp_tunnel_name(tunnel),
encoder->base.base.id,
@@ -765,7 +768,7 @@ void intel_dp_tunnel_atomic_alloc_bw(struct intel_atomic_state *state)
/**
* intel_dp_tunnel_mgr_init - Initialize the DP tunnel manager
- * @i915: i915 device object
+ * @display: display device
*
* Initialize the DP tunnel manager. The tunnel manager will support the
* detection/management of DP tunnels on all DP connectors, so the function
@@ -773,14 +776,14 @@ void intel_dp_tunnel_atomic_alloc_bw(struct intel_atomic_state *state)
*
* Return 0 in case of success, a negative error code otherwise.
*/
-int intel_dp_tunnel_mgr_init(struct drm_i915_private *i915)
+int intel_dp_tunnel_mgr_init(struct intel_display *display)
{
struct drm_dp_tunnel_mgr *tunnel_mgr;
struct drm_connector_list_iter connector_list_iter;
struct intel_connector *connector;
int dp_connectors = 0;
- drm_connector_list_iter_begin(&i915->drm, &connector_list_iter);
+ drm_connector_list_iter_begin(display->drm, &connector_list_iter);
for_each_intel_connector_iter(connector, &connector_list_iter) {
if (connector->base.connector_type != DRM_MODE_CONNECTOR_DisplayPort)
continue;
@@ -789,23 +792,23 @@ int intel_dp_tunnel_mgr_init(struct drm_i915_private *i915)
}
drm_connector_list_iter_end(&connector_list_iter);
- tunnel_mgr = drm_dp_tunnel_mgr_create(&i915->drm, dp_connectors);
+ tunnel_mgr = drm_dp_tunnel_mgr_create(display->drm, dp_connectors);
if (IS_ERR(tunnel_mgr))
return PTR_ERR(tunnel_mgr);
- i915->display.dp_tunnel_mgr = tunnel_mgr;
+ display->dp_tunnel_mgr = tunnel_mgr;
return 0;
}
/**
* intel_dp_tunnel_mgr_cleanup - Clean up the DP tunnel manager state
- * @i915: i915 device object
+ * @display: display device
*
* Clean up the DP tunnel manager state.
*/
-void intel_dp_tunnel_mgr_cleanup(struct drm_i915_private *i915)
+void intel_dp_tunnel_mgr_cleanup(struct intel_display *display)
{
- drm_dp_tunnel_mgr_destroy(i915->display.dp_tunnel_mgr);
- i915->display.dp_tunnel_mgr = NULL;
+ drm_dp_tunnel_mgr_destroy(display->dp_tunnel_mgr);
+ display->dp_tunnel_mgr = NULL;
}
diff --git a/drivers/gpu/drm/i915/display/intel_dp_tunnel.h b/drivers/gpu/drm/i915/display/intel_dp_tunnel.h
index 08b2cba84af2..a0c00b7d3303 100644
--- a/drivers/gpu/drm/i915/display/intel_dp_tunnel.h
+++ b/drivers/gpu/drm/i915/display/intel_dp_tunnel.h
@@ -9,14 +9,13 @@
#include <linux/errno.h>
#include <linux/types.h>
-struct drm_i915_private;
struct drm_connector_state;
struct drm_modeset_acquire_ctx;
-
struct intel_atomic_state;
struct intel_connector;
struct intel_crtc;
struct intel_crtc_state;
+struct intel_display;
struct intel_dp;
struct intel_encoder;
struct intel_link_bw_limits;
@@ -53,8 +52,8 @@ int intel_dp_tunnel_atomic_check_state(struct intel_atomic_state *state,
void intel_dp_tunnel_atomic_alloc_bw(struct intel_atomic_state *state);
-int intel_dp_tunnel_mgr_init(struct drm_i915_private *i915);
-void intel_dp_tunnel_mgr_cleanup(struct drm_i915_private *i915);
+int intel_dp_tunnel_mgr_init(struct intel_display *display);
+void intel_dp_tunnel_mgr_cleanup(struct intel_display *display);
#else
@@ -121,12 +120,12 @@ intel_dp_tunnel_atomic_alloc_bw(struct intel_atomic_state *state)
}
static inline int
-intel_dp_tunnel_mgr_init(struct drm_i915_private *i915)
+intel_dp_tunnel_mgr_init(struct intel_display *display)
{
return 0;
}
-static inline void intel_dp_tunnel_mgr_cleanup(struct drm_i915_private *i915) {}
+static inline void intel_dp_tunnel_mgr_cleanup(struct intel_display *display) {}
#endif /* CONFIG_DRM_I915_DP_TUNNEL */
diff --git a/drivers/gpu/drm/i915/display/intel_dpll.c b/drivers/gpu/drm/i915/display/intel_dpll.c
index d67d5e2fd570..340dfce480b8 100644
--- a/drivers/gpu/drm/i915/display/intel_dpll.c
+++ b/drivers/gpu/drm/i915/display/intel_dpll.c
@@ -1823,6 +1823,7 @@ static bool i9xx_has_pps(struct drm_i915_private *dev_priv)
void i9xx_enable_pll(const struct intel_crtc_state *crtc_state)
{
+ struct intel_display *display = to_intel_display(crtc_state);
struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
const struct i9xx_dpll_hw_state *hw_state = &crtc_state->dpll_hw_state.i9xx;
@@ -1833,7 +1834,7 @@ void i9xx_enable_pll(const struct intel_crtc_state *crtc_state)
/* PLL is protected by panel, make sure we can write it */
if (i9xx_has_pps(dev_priv))
- assert_pps_unlocked(dev_priv, pipe);
+ assert_pps_unlocked(display, pipe);
intel_de_write(dev_priv, FP0(pipe), hw_state->fp0);
intel_de_write(dev_priv, FP1(pipe), hw_state->fp1);
@@ -2004,6 +2005,7 @@ static void _vlv_enable_pll(const struct intel_crtc_state *crtc_state)
void vlv_enable_pll(const struct intel_crtc_state *crtc_state)
{
+ struct intel_display *display = to_intel_display(crtc_state);
struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
const struct i9xx_dpll_hw_state *hw_state = &crtc_state->dpll_hw_state.i9xx;
@@ -2012,7 +2014,7 @@ void vlv_enable_pll(const struct intel_crtc_state *crtc_state)
assert_transcoder_disabled(dev_priv, crtc_state->cpu_transcoder);
/* PLL is protected by panel, make sure we can write it */
- assert_pps_unlocked(dev_priv, pipe);
+ assert_pps_unlocked(display, pipe);
/* Enable Refclk */
intel_de_write(dev_priv, DPLL(dev_priv, pipe),
@@ -2150,6 +2152,7 @@ static void _chv_enable_pll(const struct intel_crtc_state *crtc_state)
void chv_enable_pll(const struct intel_crtc_state *crtc_state)
{
+ struct intel_display *display = to_intel_display(crtc_state);
struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
const struct i9xx_dpll_hw_state *hw_state = &crtc_state->dpll_hw_state.i9xx;
@@ -2158,7 +2161,7 @@ void chv_enable_pll(const struct intel_crtc_state *crtc_state)
assert_transcoder_disabled(dev_priv, crtc_state->cpu_transcoder);
/* PLL is protected by panel, make sure we can write it */
- assert_pps_unlocked(dev_priv, pipe);
+ assert_pps_unlocked(display, pipe);
/* Enable Refclk and SSC */
intel_de_write(dev_priv, DPLL(dev_priv, pipe),
diff --git a/drivers/gpu/drm/i915/display/intel_dpll_mgr.c b/drivers/gpu/drm/i915/display/intel_dpll_mgr.c
index 292d163036b1..f490b2157828 100644
--- a/drivers/gpu/drm/i915/display/intel_dpll_mgr.c
+++ b/drivers/gpu/drm/i915/display/intel_dpll_mgr.c
@@ -3339,6 +3339,7 @@ static int icl_get_combo_phy_dpll(struct intel_atomic_state *state,
struct intel_crtc *crtc,
struct intel_encoder *encoder)
{
+ struct intel_display *display = to_intel_display(crtc);
struct drm_i915_private *i915 = to_i915(crtc->base.dev);
struct intel_crtc_state *crtc_state =
intel_atomic_get_new_crtc_state(state, crtc);
@@ -3379,7 +3380,7 @@ static int icl_get_combo_phy_dpll(struct intel_atomic_state *state,
}
/* Eliminate DPLLs from consideration if reserved by HTI */
- dpll_mask &= ~intel_hti_dpll_mask(i915);
+ dpll_mask &= ~intel_hti_dpll_mask(display);
port_dpll->pll = intel_find_shared_dpll(state, crtc,
&port_dpll->hw_state,
diff --git a/drivers/gpu/drm/i915/display/intel_dpt.c b/drivers/gpu/drm/i915/display/intel_dpt.c
index 73a1918e2537..3a6d99044828 100644
--- a/drivers/gpu/drm/i915/display/intel_dpt.c
+++ b/drivers/gpu/drm/i915/display/intel_dpt.c
@@ -317,3 +317,7 @@ void intel_dpt_destroy(struct i915_address_space *vm)
i915_vm_put(&dpt->vm);
}
+u64 intel_dpt_offset(struct i915_vma *dpt_vma)
+{
+ return dpt_vma->node.start;
+}
diff --git a/drivers/gpu/drm/i915/display/intel_dpt.h b/drivers/gpu/drm/i915/display/intel_dpt.h
index ff18a525bfbe..1f88b0ee17e7 100644
--- a/drivers/gpu/drm/i915/display/intel_dpt.h
+++ b/drivers/gpu/drm/i915/display/intel_dpt.h
@@ -6,6 +6,8 @@
#ifndef __INTEL_DPT_H__
#define __INTEL_DPT_H__
+#include <linux/types.h>
+
struct drm_i915_private;
struct i915_address_space;
@@ -20,5 +22,6 @@ void intel_dpt_suspend(struct drm_i915_private *i915);
void intel_dpt_resume(struct drm_i915_private *i915);
struct i915_address_space *
intel_dpt_create(struct intel_framebuffer *fb);
+u64 intel_dpt_offset(struct i915_vma *dpt_vma);
#endif /* __INTEL_DPT_H__ */
diff --git a/drivers/gpu/drm/i915/display/intel_dsb.c b/drivers/gpu/drm/i915/display/intel_dsb.c
index 2ab3765f6c06..da24e041d269 100644
--- a/drivers/gpu/drm/i915/display/intel_dsb.c
+++ b/drivers/gpu/drm/i915/display/intel_dsb.c
@@ -6,6 +6,7 @@
#include "i915_drv.h"
#include "i915_irq.h"
+#include "i915_reg.h"
#include "intel_crtc.h"
#include "intel_de.h"
#include "intel_display_types.h"
@@ -42,7 +43,8 @@ struct intel_dsb {
*/
unsigned int ins_start_offset;
- int dewake_scanline;
+ u32 chicken;
+ int hw_dewake_scanline;
};
/**
@@ -82,6 +84,93 @@ struct intel_dsb {
#define DSB_OPCODE_POLL 0xA
/* see DSB_REG_VALUE_MASK */
+static bool pre_commit_is_vrr_active(struct intel_atomic_state *state,
+ struct intel_crtc *crtc)
+{
+ const struct intel_crtc_state *old_crtc_state =
+ intel_atomic_get_old_crtc_state(state, crtc);
+ const struct intel_crtc_state *new_crtc_state =
+ intel_atomic_get_new_crtc_state(state, crtc);
+
+ /* VRR will be enabled afterwards, if necessary */
+ if (intel_crtc_needs_modeset(new_crtc_state))
+ return false;
+
+ /* VRR will have been disabled during intel_pre_plane_update() */
+ return old_crtc_state->vrr.enable && !intel_crtc_vrr_disabling(state, crtc);
+}
+
+static const struct intel_crtc_state *
+pre_commit_crtc_state(struct intel_atomic_state *state,
+ struct intel_crtc *crtc)
+{
+ const struct intel_crtc_state *old_crtc_state =
+ intel_atomic_get_old_crtc_state(state, crtc);
+ const struct intel_crtc_state *new_crtc_state =
+ intel_atomic_get_new_crtc_state(state, crtc);
+
+ /*
+ * During fastsets/etc. the transcoder is still
+ * running with the old timings at this point.
+ */
+ if (intel_crtc_needs_modeset(new_crtc_state))
+ return new_crtc_state;
+ else
+ return old_crtc_state;
+}
+
+static int dsb_vtotal(struct intel_atomic_state *state,
+ struct intel_crtc *crtc)
+{
+ const struct intel_crtc_state *crtc_state = pre_commit_crtc_state(state, crtc);
+
+ if (pre_commit_is_vrr_active(state, crtc))
+ return crtc_state->vrr.vmax;
+ else
+ return intel_mode_vtotal(&crtc_state->hw.adjusted_mode);
+}
+
+static int dsb_dewake_scanline_start(struct intel_atomic_state *state,
+ struct intel_crtc *crtc)
+{
+ const struct intel_crtc_state *crtc_state = pre_commit_crtc_state(state, crtc);
+ struct drm_i915_private *i915 = to_i915(state->base.dev);
+ unsigned int latency = skl_watermark_max_latency(i915, 0);
+
+ return intel_mode_vdisplay(&crtc_state->hw.adjusted_mode) -
+ intel_usecs_to_scanlines(&crtc_state->hw.adjusted_mode, latency);
+}
+
+static int dsb_dewake_scanline_end(struct intel_atomic_state *state,
+ struct intel_crtc *crtc)
+{
+ const struct intel_crtc_state *crtc_state = pre_commit_crtc_state(state, crtc);
+
+ return intel_mode_vdisplay(&crtc_state->hw.adjusted_mode);
+}
+
+static int dsb_scanline_to_hw(struct intel_atomic_state *state,
+ struct intel_crtc *crtc, int scanline)
+{
+ const struct intel_crtc_state *crtc_state = pre_commit_crtc_state(state, crtc);
+ int vtotal = dsb_vtotal(state, crtc);
+
+ return (scanline + vtotal - intel_crtc_scanline_offset(crtc_state)) % vtotal;
+}
+
+static u32 dsb_chicken(struct intel_atomic_state *state,
+ struct intel_crtc *crtc)
+{
+ if (pre_commit_is_vrr_active(state, crtc))
+ return DSB_SKIP_WAITS_EN |
+ DSB_CTRL_WAIT_SAFE_WINDOW |
+ DSB_CTRL_NO_WAIT_VBLANK |
+ DSB_INST_WAIT_SAFE_WINDOW |
+ DSB_INST_NO_WAIT_VBLANK;
+ else
+ return DSB_SKIP_WAITS_EN;
+}
+
static bool assert_dsb_has_room(struct intel_dsb *dsb)
{
struct intel_crtc *crtc = dsb->crtc;
@@ -281,6 +370,79 @@ void intel_dsb_nonpost_end(struct intel_dsb *dsb)
intel_dsb_noop(dsb, 4);
}
+static void intel_dsb_emit_wait_dsl(struct intel_dsb *dsb,
+ u32 opcode, int lower, int upper)
+{
+ u64 window = ((u64)upper << DSB_SCANLINE_UPPER_SHIFT) |
+ ((u64)lower << DSB_SCANLINE_LOWER_SHIFT);
+
+ intel_dsb_emit(dsb, lower_32_bits(window),
+ (opcode << DSB_OPCODE_SHIFT) |
+ upper_32_bits(window));
+}
+
+static void intel_dsb_wait_dsl(struct intel_atomic_state *state,
+ struct intel_dsb *dsb,
+ int lower_in, int upper_in,
+ int lower_out, int upper_out)
+{
+ struct intel_crtc *crtc = dsb->crtc;
+
+ lower_in = dsb_scanline_to_hw(state, crtc, lower_in);
+ upper_in = dsb_scanline_to_hw(state, crtc, upper_in);
+
+ lower_out = dsb_scanline_to_hw(state, crtc, lower_out);
+ upper_out = dsb_scanline_to_hw(state, crtc, upper_out);
+
+ if (upper_in >= lower_in)
+ intel_dsb_emit_wait_dsl(dsb, DSB_OPCODE_WAIT_DSL_IN,
+ lower_in, upper_in);
+ else if (upper_out >= lower_out)
+ intel_dsb_emit_wait_dsl(dsb, DSB_OPCODE_WAIT_DSL_OUT,
+ lower_out, upper_out);
+ else
+ drm_WARN_ON(crtc->base.dev, 1); /* assert_dsl_ok() should have caught it already */
+}
+
+static void assert_dsl_ok(struct intel_atomic_state *state,
+ struct intel_dsb *dsb,
+ int start, int end)
+{
+ struct intel_crtc *crtc = dsb->crtc;
+ int vtotal = dsb_vtotal(state, crtc);
+
+ /*
+ * Waiting for the entire frame doesn't make sense,
+ * (IN==don't wait, OUT=wait forever).
+ */
+ drm_WARN(crtc->base.dev, (end - start + vtotal) % vtotal == vtotal - 1,
+ "[CRTC:%d:%s] DSB %d bad scanline window wait: %d-%d (vt=%d)\n",
+ crtc->base.base.id, crtc->base.name, dsb->id,
+ start, end, vtotal);
+}
+
+void intel_dsb_wait_scanline_in(struct intel_atomic_state *state,
+ struct intel_dsb *dsb,
+ int start, int end)
+{
+ assert_dsl_ok(state, dsb, start, end);
+
+ intel_dsb_wait_dsl(state, dsb,
+ start, end,
+ end + 1, start - 1);
+}
+
+void intel_dsb_wait_scanline_out(struct intel_atomic_state *state,
+ struct intel_dsb *dsb,
+ int start, int end)
+{
+ assert_dsl_ok(state, dsb, start, end);
+
+ intel_dsb_wait_dsl(state, dsb,
+ end + 1, start - 1,
+ start, end);
+}
+
static void intel_dsb_align_tail(struct intel_dsb *dsb)
{
u32 aligned_tail, tail;
@@ -302,8 +464,10 @@ void intel_dsb_finish(struct intel_dsb *dsb)
/*
* DSB_FORCE_DEWAKE remains active even after DSB is
* disabled, so make sure to clear it (if set during
- * intel_dsb_commit()).
+ * intel_dsb_commit()). And clear DSB_ENABLE_DEWAKE as
+ * well for good measure.
*/
+ intel_dsb_reg_write(dsb, DSB_PMCTRL(crtc->pipe, dsb->id), 0);
intel_dsb_reg_write_masked(dsb, DSB_PMCTRL_2(crtc->pipe, dsb->id),
DSB_FORCE_DEWAKE, 0);
@@ -312,35 +476,109 @@ void intel_dsb_finish(struct intel_dsb *dsb)
intel_dsb_buffer_flush_map(&dsb->dsb_buf);
}
-static int intel_dsb_dewake_scanline(const struct intel_crtc_state *crtc_state)
+static u32 dsb_error_int_status(struct intel_display *display)
{
- struct drm_i915_private *i915 = to_i915(crtc_state->uapi.crtc->dev);
- const struct drm_display_mode *adjusted_mode = &crtc_state->hw.adjusted_mode;
- unsigned int latency = skl_watermark_max_latency(i915, 0);
- int vblank_start;
+ u32 errors;
- if (crtc_state->vrr.enable)
- vblank_start = intel_vrr_vmin_vblank_start(crtc_state);
- else
- vblank_start = intel_mode_vblank_start(adjusted_mode);
+ errors = DSB_GTT_FAULT_INT_STATUS |
+ DSB_RSPTIMEOUT_INT_STATUS |
+ DSB_POLL_ERR_INT_STATUS;
+
+ /*
+ * All the non-existing status bits operate as
+ * normal r/w bits, so any attempt to clear them
+ * will just end up setting them. Never do that so
+ * we won't mistake them for actual error interrupts.
+ */
+ if (DISPLAY_VER(display) >= 14)
+ errors |= DSB_ATS_FAULT_INT_STATUS;
- return max(0, vblank_start - intel_usecs_to_scanlines(adjusted_mode, latency));
+ return errors;
}
-static u32 dsb_chicken(struct intel_crtc *crtc)
+static u32 dsb_error_int_en(struct intel_display *display)
{
- if (crtc->mode_flags & I915_MODE_FLAG_VRR)
- return DSB_SKIP_WAITS_EN |
- DSB_CTRL_WAIT_SAFE_WINDOW |
- DSB_CTRL_NO_WAIT_VBLANK |
- DSB_INST_WAIT_SAFE_WINDOW |
- DSB_INST_NO_WAIT_VBLANK;
- else
- return DSB_SKIP_WAITS_EN;
+ u32 errors;
+
+ errors = DSB_GTT_FAULT_INT_EN |
+ DSB_RSPTIMEOUT_INT_EN |
+ DSB_POLL_ERR_INT_EN;
+
+ if (DISPLAY_VER(display) >= 14)
+ errors |= DSB_ATS_FAULT_INT_EN;
+
+ return errors;
+}
+
+static void _intel_dsb_chain(struct intel_atomic_state *state,
+ struct intel_dsb *dsb,
+ struct intel_dsb *chained_dsb,
+ u32 ctrl)
+{
+ struct intel_display *display = to_intel_display(state->base.dev);
+ struct intel_crtc *crtc = dsb->crtc;
+ enum pipe pipe = crtc->pipe;
+ u32 tail;
+
+ if (drm_WARN_ON(display->drm, dsb->id == chained_dsb->id))
+ return;
+
+ tail = chained_dsb->free_pos * 4;
+ if (drm_WARN_ON(display->drm, !IS_ALIGNED(tail, CACHELINE_BYTES)))
+ return;
+
+ intel_dsb_reg_write(dsb, DSB_CTRL(pipe, chained_dsb->id),
+ ctrl | DSB_ENABLE);
+
+ intel_dsb_reg_write(dsb, DSB_CHICKEN(pipe, chained_dsb->id),
+ dsb_chicken(state, crtc));
+
+ intel_dsb_reg_write(dsb, DSB_INTERRUPT(pipe, chained_dsb->id),
+ dsb_error_int_status(display) | DSB_PROG_INT_STATUS |
+ dsb_error_int_en(display));
+
+ if (ctrl & DSB_WAIT_FOR_VBLANK) {
+ int dewake_scanline = dsb_dewake_scanline_start(state, crtc);
+ int hw_dewake_scanline = dsb_scanline_to_hw(state, crtc, dewake_scanline);
+
+ intel_dsb_reg_write(dsb, DSB_PMCTRL(pipe, chained_dsb->id),
+ DSB_ENABLE_DEWAKE |
+ DSB_SCANLINE_FOR_DEWAKE(hw_dewake_scanline));
+ }
+
+ intel_dsb_reg_write(dsb, DSB_HEAD(pipe, chained_dsb->id),
+ intel_dsb_buffer_ggtt_offset(&chained_dsb->dsb_buf));
+
+ intel_dsb_reg_write(dsb, DSB_TAIL(pipe, chained_dsb->id),
+ intel_dsb_buffer_ggtt_offset(&chained_dsb->dsb_buf) + tail);
+
+ if (ctrl & DSB_WAIT_FOR_VBLANK) {
+ /*
+ * Keep DEwake alive via the first DSB, in
+ * case we're already past dewake_scanline,
+ * and thus DSB_ENABLE_DEWAKE on the second
+ * DSB won't do its job.
+ */
+ intel_dsb_reg_write_masked(dsb, DSB_PMCTRL_2(pipe, dsb->id),
+ DSB_FORCE_DEWAKE, DSB_FORCE_DEWAKE);
+
+ intel_dsb_wait_scanline_out(state, dsb,
+ dsb_dewake_scanline_start(state, crtc),
+ dsb_dewake_scanline_end(state, crtc));
+ }
+}
+
+void intel_dsb_chain(struct intel_atomic_state *state,
+ struct intel_dsb *dsb,
+ struct intel_dsb *chained_dsb,
+ bool wait_for_vblank)
+{
+ _intel_dsb_chain(state, dsb, chained_dsb,
+ wait_for_vblank ? DSB_WAIT_FOR_VBLANK : 0);
}
static void _intel_dsb_commit(struct intel_dsb *dsb, u32 ctrl,
- int dewake_scanline)
+ int hw_dewake_scanline)
{
struct intel_crtc *crtc = dsb->crtc;
struct intel_display *display = to_intel_display(crtc->base.dev);
@@ -361,15 +599,17 @@ static void _intel_dsb_commit(struct intel_dsb *dsb, u32 ctrl,
ctrl | DSB_ENABLE);
intel_de_write_fw(display, DSB_CHICKEN(pipe, dsb->id),
- dsb_chicken(crtc));
+ dsb->chicken);
+
+ intel_de_write_fw(display, DSB_INTERRUPT(pipe, dsb->id),
+ dsb_error_int_status(display) | DSB_PROG_INT_STATUS |
+ dsb_error_int_en(display));
intel_de_write_fw(display, DSB_HEAD(pipe, dsb->id),
intel_dsb_buffer_ggtt_offset(&dsb->dsb_buf));
- if (dewake_scanline >= 0) {
- int diff, hw_dewake_scanline;
-
- hw_dewake_scanline = intel_crtc_scanline_to_hw(crtc, dewake_scanline);
+ if (hw_dewake_scanline >= 0) {
+ int diff, position;
intel_de_write_fw(display, DSB_PMCTRL(pipe, dsb->id),
DSB_ENABLE_DEWAKE |
@@ -379,7 +619,9 @@ static void _intel_dsb_commit(struct intel_dsb *dsb, u32 ctrl,
* Force DEwake immediately if we're already past
* or close to racing past the target scanline.
*/
- diff = dewake_scanline - intel_get_crtc_scanline(crtc);
+ position = intel_de_read_fw(display, PIPEDSL(display, pipe)) & PIPEDSL_LINE_MASK;
+
+ diff = hw_dewake_scanline - position;
intel_de_write_fw(display, DSB_PMCTRL_2(pipe, dsb->id),
(diff >= 0 && diff < 5 ? DSB_FORCE_DEWAKE : 0) |
DSB_BLOCK_DEWAKE_EXTENSION);
@@ -401,7 +643,7 @@ void intel_dsb_commit(struct intel_dsb *dsb,
{
_intel_dsb_commit(dsb,
wait_for_vblank ? DSB_WAIT_FOR_VBLANK : 0,
- wait_for_vblank ? dsb->dewake_scanline : -1);
+ wait_for_vblank ? dsb->hw_dewake_scanline : -1);
}
void intel_dsb_wait(struct intel_dsb *dsb)
@@ -430,6 +672,9 @@ void intel_dsb_wait(struct intel_dsb *dsb)
dsb->free_pos = 0;
dsb->ins_start_offset = 0;
intel_de_write_fw(display, DSB_CTRL(pipe, dsb->id), 0);
+
+ intel_de_write_fw(display, DSB_INTERRUPT(pipe, dsb->id),
+ dsb_error_int_status(display) | DSB_PROG_INT_STATUS);
}
/**
@@ -451,8 +696,6 @@ struct intel_dsb *intel_dsb_prepare(struct intel_atomic_state *state,
unsigned int max_cmds)
{
struct drm_i915_private *i915 = to_i915(state->base.dev);
- const struct intel_crtc_state *crtc_state =
- intel_atomic_get_new_crtc_state(state, crtc);
intel_wakeref_t wakeref;
struct intel_dsb *dsb;
unsigned int size;
@@ -486,7 +729,10 @@ struct intel_dsb *intel_dsb_prepare(struct intel_atomic_state *state,
dsb->size = size / 4; /* in dwords */
dsb->free_pos = 0;
dsb->ins_start_offset = 0;
- dsb->dewake_scanline = intel_dsb_dewake_scanline(crtc_state);
+
+ dsb->chicken = dsb_chicken(state, crtc);
+ dsb->hw_dewake_scanline =
+ dsb_scanline_to_hw(state, crtc, dsb_dewake_scanline_start(state, crtc));
return dsb;
@@ -513,3 +759,18 @@ void intel_dsb_cleanup(struct intel_dsb *dsb)
intel_dsb_buffer_cleanup(&dsb->dsb_buf);
kfree(dsb);
}
+
+void intel_dsb_irq_handler(struct intel_display *display,
+ enum pipe pipe, enum intel_dsb_id dsb_id)
+{
+ struct intel_crtc *crtc = intel_crtc_for_pipe(to_i915(display->drm), pipe);
+ u32 tmp, errors;
+
+ tmp = intel_de_read_fw(display, DSB_INTERRUPT(pipe, dsb_id));
+ intel_de_write_fw(display, DSB_INTERRUPT(pipe, dsb_id), tmp);
+
+ errors = tmp & dsb_error_int_status(display);
+ if (errors)
+ drm_err(display->drm, "[CRTC:%d:%s] DSB %d error interrupt: 0x%x\n",
+ crtc->base.base.id, crtc->base.name, dsb_id, errors);
+}
diff --git a/drivers/gpu/drm/i915/display/intel_dsb.h b/drivers/gpu/drm/i915/display/intel_dsb.h
index bb42749f2ea4..c352c12aa59f 100644
--- a/drivers/gpu/drm/i915/display/intel_dsb.h
+++ b/drivers/gpu/drm/i915/display/intel_dsb.h
@@ -13,8 +13,11 @@
struct intel_atomic_state;
struct intel_crtc;
struct intel_crtc_state;
+struct intel_display;
struct intel_dsb;
+enum pipe;
+
enum intel_dsb_id {
INTEL_DSB_0,
INTEL_DSB_1,
@@ -36,9 +39,22 @@ void intel_dsb_reg_write_masked(struct intel_dsb *dsb,
void intel_dsb_noop(struct intel_dsb *dsb, int count);
void intel_dsb_nonpost_start(struct intel_dsb *dsb);
void intel_dsb_nonpost_end(struct intel_dsb *dsb);
+void intel_dsb_wait_scanline_in(struct intel_atomic_state *state,
+ struct intel_dsb *dsb,
+ int lower, int upper);
+void intel_dsb_wait_scanline_out(struct intel_atomic_state *state,
+ struct intel_dsb *dsb,
+ int lower, int upper);
+void intel_dsb_chain(struct intel_atomic_state *state,
+ struct intel_dsb *dsb,
+ struct intel_dsb *chained_dsb,
+ bool wait_for_vblank);
void intel_dsb_commit(struct intel_dsb *dsb,
bool wait_for_vblank);
void intel_dsb_wait(struct intel_dsb *dsb);
+void intel_dsb_irq_handler(struct intel_display *display,
+ enum pipe pipe, enum intel_dsb_id dsb_id);
+
#endif
diff --git a/drivers/gpu/drm/i915/display/intel_dsi.h b/drivers/gpu/drm/i915/display/intel_dsi.h
index e99c94edfaae..e8ba4ccd99d3 100644
--- a/drivers/gpu/drm/i915/display/intel_dsi.h
+++ b/drivers/gpu/drm/i915/display/intel_dsi.h
@@ -66,7 +66,7 @@ struct intel_dsi {
/* number of DSI lanes */
unsigned int lane_count;
- /* i2c bus associated with the slave device */
+ /* i2c bus associated with the target device */
int i2c_bus_num;
/*
diff --git a/drivers/gpu/drm/i915/display/intel_dsi_vbt.c b/drivers/gpu/drm/i915/display/intel_dsi_vbt.c
index 072ef1d62bda..d8951464bd2b 100644
--- a/drivers/gpu/drm/i915/display/intel_dsi_vbt.c
+++ b/drivers/gpu/drm/i915/display/intel_dsi_vbt.c
@@ -56,7 +56,7 @@
#define MIPI_PORT_SHIFT 3
struct i2c_adapter_lookup {
- u16 slave_addr;
+ u16 target_addr;
struct intel_dsi *intel_dsi;
acpi_handle dev_handle;
};
@@ -443,7 +443,7 @@ static int i2c_adapter_lookup(struct acpi_resource *ares, void *data)
if (!i2c_acpi_get_i2c_resource(ares, &sb))
return 1;
- if (lookup->slave_addr != sb->slave_address)
+ if (lookup->target_addr != sb->slave_address)
return 1;
status = acpi_get_handle(lookup->dev_handle,
@@ -460,12 +460,12 @@ static int i2c_adapter_lookup(struct acpi_resource *ares, void *data)
}
static void i2c_acpi_find_adapter(struct intel_dsi *intel_dsi,
- const u16 slave_addr)
+ const u16 target_addr)
{
struct drm_device *drm_dev = intel_dsi->base.base.dev;
struct acpi_device *adev = ACPI_COMPANION(drm_dev->dev);
struct i2c_adapter_lookup lookup = {
- .slave_addr = slave_addr,
+ .target_addr = target_addr,
.intel_dsi = intel_dsi,
.dev_handle = acpi_device_handle(adev),
};
@@ -476,7 +476,7 @@ static void i2c_acpi_find_adapter(struct intel_dsi *intel_dsi,
}
#else
static inline void i2c_acpi_find_adapter(struct intel_dsi *intel_dsi,
- const u16 slave_addr)
+ const u16 target_addr)
{
}
#endif
@@ -488,17 +488,17 @@ static const u8 *mipi_exec_i2c(struct intel_dsi *intel_dsi, const u8 *data)
struct i2c_msg msg;
int ret;
u8 vbt_i2c_bus_num = *(data + 2);
- u16 slave_addr = *(u16 *)(data + 3);
+ u16 target_addr = *(u16 *)(data + 3);
u8 reg_offset = *(data + 5);
u8 payload_size = *(data + 6);
u8 *payload_data;
- drm_dbg_kms(&i915->drm, "bus %d client-addr 0x%02x reg 0x%02x data %*ph\n",
- vbt_i2c_bus_num, slave_addr, reg_offset, payload_size, data + 7);
+ drm_dbg_kms(&i915->drm, "bus %d target-addr 0x%02x reg 0x%02x data %*ph\n",
+ vbt_i2c_bus_num, target_addr, reg_offset, payload_size, data + 7);
if (intel_dsi->i2c_bus_num < 0) {
intel_dsi->i2c_bus_num = vbt_i2c_bus_num;
- i2c_acpi_find_adapter(intel_dsi, slave_addr);
+ i2c_acpi_find_adapter(intel_dsi, target_addr);
}
adapter = i2c_get_adapter(intel_dsi->i2c_bus_num);
@@ -514,7 +514,7 @@ static const u8 *mipi_exec_i2c(struct intel_dsi *intel_dsi, const u8 *data)
payload_data[0] = reg_offset;
memcpy(&payload_data[1], (data + 7), payload_size);
- msg.addr = slave_addr;
+ msg.addr = target_addr;
msg.flags = 0;
msg.len = payload_size + 1;
msg.buf = payload_data;
diff --git a/drivers/gpu/drm/i915/display/intel_dvo.c b/drivers/gpu/drm/i915/display/intel_dvo.c
index 091824334f26..12e7628cbecf 100644
--- a/drivers/gpu/drm/i915/display/intel_dvo.c
+++ b/drivers/gpu/drm/i915/display/intel_dvo.c
@@ -60,42 +60,42 @@ static const struct intel_dvo_device intel_dvo_devices[] = {
.type = INTEL_DVO_CHIP_TMDS,
.name = "sil164",
.port = PORT_C,
- .slave_addr = SIL164_ADDR,
+ .target_addr = SIL164_ADDR,
.dev_ops = &sil164_ops,
},
{
.type = INTEL_DVO_CHIP_TMDS,
.name = "ch7xxx",
.port = PORT_C,
- .slave_addr = CH7xxx_ADDR,
+ .target_addr = CH7xxx_ADDR,
.dev_ops = &ch7xxx_ops,
},
{
.type = INTEL_DVO_CHIP_TMDS,
.name = "ch7xxx",
.port = PORT_C,
- .slave_addr = 0x75, /* For some ch7010 */
+ .target_addr = 0x75, /* For some ch7010 */
.dev_ops = &ch7xxx_ops,
},
{
.type = INTEL_DVO_CHIP_LVDS,
.name = "ivch",
.port = PORT_A,
- .slave_addr = 0x02, /* Might also be 0x44, 0x84, 0xc4 */
+ .target_addr = 0x02, /* Might also be 0x44, 0x84, 0xc4 */
.dev_ops = &ivch_ops,
},
{
.type = INTEL_DVO_CHIP_TMDS,
.name = "tfp410",
.port = PORT_C,
- .slave_addr = TFP410_ADDR,
+ .target_addr = TFP410_ADDR,
.dev_ops = &tfp410_ops,
},
{
.type = INTEL_DVO_CHIP_LVDS,
.name = "ch7017",
.port = PORT_C,
- .slave_addr = 0x75,
+ .target_addr = 0x75,
.gpio = GMBUS_PIN_DPB,
.dev_ops = &ch7017_ops,
},
@@ -103,7 +103,7 @@ static const struct intel_dvo_device intel_dvo_devices[] = {
.type = INTEL_DVO_CHIP_LVDS_NO_FIXED,
.name = "ns2501",
.port = PORT_B,
- .slave_addr = NS2501_ADDR,
+ .target_addr = NS2501_ADDR,
.dev_ops = &ns2501_ops,
},
};
diff --git a/drivers/gpu/drm/i915/display/intel_dvo_dev.h b/drivers/gpu/drm/i915/display/intel_dvo_dev.h
index af7b04539b93..4bf476656b8c 100644
--- a/drivers/gpu/drm/i915/display/intel_dvo_dev.h
+++ b/drivers/gpu/drm/i915/display/intel_dvo_dev.h
@@ -38,7 +38,7 @@ struct intel_dvo_device {
enum port port;
/* GPIO register used for i2c bus to control this device */
u32 gpio;
- int slave_addr;
+ int target_addr;
const struct intel_dvo_dev_ops *dev_ops;
void *dev_priv;
diff --git a/drivers/gpu/drm/i915/display/intel_fb.c b/drivers/gpu/drm/i915/display/intel_fb.c
index f23547a88b1f..5be7bb43e2e0 100644
--- a/drivers/gpu/drm/i915/display/intel_fb.c
+++ b/drivers/gpu/drm/i915/display/intel_fb.c
@@ -163,6 +163,14 @@ struct intel_modifier_desc {
static const struct intel_modifier_desc intel_modifiers[] = {
{
+ .modifier = I915_FORMAT_MOD_4_TILED_LNL_CCS,
+ .display_ver = { 20, -1 },
+ .plane_caps = INTEL_PLANE_CAP_TILING_4,
+ }, {
+ .modifier = I915_FORMAT_MOD_4_TILED_BMG_CCS,
+ .display_ver = { 14, -1 },
+ .plane_caps = INTEL_PLANE_CAP_TILING_4 | INTEL_PLANE_CAP_NEED64K_PHYS,
+ }, {
.modifier = I915_FORMAT_MOD_4_TILED_MTL_MC_CCS,
.display_ver = { 14, 14 },
.plane_caps = INTEL_PLANE_CAP_TILING_4 | INTEL_PLANE_CAP_CCS_MC,
@@ -412,6 +420,24 @@ bool intel_fb_is_mc_ccs_modifier(u64 modifier)
INTEL_PLANE_CAP_CCS_MC);
}
+/**
+ * intel_fb_needs_64k_phys: Check if modifier requires 64k physical placement.
+ * @modifier: Modifier to check
+ *
+ * Returns:
+ * Returns %true if @modifier requires 64k aligned physical pages.
+ */
+bool intel_fb_needs_64k_phys(u64 modifier)
+{
+ const struct intel_modifier_desc *md = lookup_modifier_or_null(modifier);
+
+ if (!md)
+ return false;
+
+ return plane_caps_contain_any(md->plane_caps,
+ INTEL_PLANE_CAP_NEED64K_PHYS);
+}
+
static bool check_modifier_display_ver_range(const struct intel_modifier_desc *md,
u8 display_ver_from, u8 display_ver_until)
{
@@ -437,6 +463,14 @@ static bool plane_has_modifier(struct drm_i915_private *i915,
HAS_FLAT_CCS(i915) != !md->ccs.packed_aux_planes)
return false;
+ if (md->modifier == I915_FORMAT_MOD_4_TILED_BMG_CCS &&
+ (GRAPHICS_VER(i915) < 20 || !IS_DGFX(i915)))
+ return false;
+
+ if (md->modifier == I915_FORMAT_MOD_4_TILED_LNL_CCS &&
+ (GRAPHICS_VER(i915) < 20 || IS_DGFX(i915)))
+ return false;
+
return true;
}
@@ -653,6 +687,8 @@ intel_tile_width_bytes(const struct drm_framebuffer *fb, int color_plane)
return 128;
else
return 512;
+ case I915_FORMAT_MOD_4_TILED_BMG_CCS:
+ case I915_FORMAT_MOD_4_TILED_LNL_CCS:
case I915_FORMAT_MOD_4_TILED_DG2_RC_CCS:
case I915_FORMAT_MOD_4_TILED_DG2_RC_CCS_CC:
case I915_FORMAT_MOD_4_TILED_DG2_MC_CCS:
diff --git a/drivers/gpu/drm/i915/display/intel_fb.h b/drivers/gpu/drm/i915/display/intel_fb.h
index 6dee0c8b7f22..10de437e8ef8 100644
--- a/drivers/gpu/drm/i915/display/intel_fb.h
+++ b/drivers/gpu/drm/i915/display/intel_fb.h
@@ -28,11 +28,13 @@ struct intel_plane_state;
#define INTEL_PLANE_CAP_TILING_Y BIT(4)
#define INTEL_PLANE_CAP_TILING_Yf BIT(5)
#define INTEL_PLANE_CAP_TILING_4 BIT(6)
+#define INTEL_PLANE_CAP_NEED64K_PHYS BIT(7)
bool intel_fb_is_tiled_modifier(u64 modifier);
bool intel_fb_is_ccs_modifier(u64 modifier);
bool intel_fb_is_rc_ccs_cc_modifier(u64 modifier);
bool intel_fb_is_mc_ccs_modifier(u64 modifier);
+bool intel_fb_needs_64k_phys(u64 modifier);
bool intel_fb_is_ccs_aux_plane(const struct drm_framebuffer *fb, int color_plane);
int intel_fb_rc_ccs_cc_plane(const struct drm_framebuffer *fb);
diff --git a/drivers/gpu/drm/i915/display/intel_fbc.c b/drivers/gpu/drm/i915/display/intel_fbc.c
index 67116c9f1464..52b79bacef4d 100644
--- a/drivers/gpu/drm/i915/display/intel_fbc.c
+++ b/drivers/gpu/drm/i915/display/intel_fbc.c
@@ -56,17 +56,18 @@
#include "intel_display_device.h"
#include "intel_display_trace.h"
#include "intel_display_types.h"
+#include "intel_display_wa.h"
#include "intel_fbc.h"
#include "intel_fbc_regs.h"
#include "intel_frontbuffer.h"
-#define for_each_fbc_id(__dev_priv, __fbc_id) \
+#define for_each_fbc_id(__display, __fbc_id) \
for ((__fbc_id) = INTEL_FBC_A; (__fbc_id) < I915_MAX_FBCS; (__fbc_id)++) \
- for_each_if(DISPLAY_RUNTIME_INFO(__dev_priv)->fbc_mask & BIT(__fbc_id))
+ for_each_if(DISPLAY_RUNTIME_INFO(__display)->fbc_mask & BIT(__fbc_id))
-#define for_each_intel_fbc(__dev_priv, __fbc, __fbc_id) \
- for_each_fbc_id((__dev_priv), (__fbc_id)) \
- for_each_if((__fbc) = (__dev_priv)->display.fbc[(__fbc_id)])
+#define for_each_intel_fbc(__display, __fbc, __fbc_id) \
+ for_each_fbc_id((__display), (__fbc_id)) \
+ for_each_if((__fbc) = (__display)->fbc[(__fbc_id)])
struct intel_fbc_funcs {
void (*activate)(struct intel_fbc *fbc);
@@ -89,7 +90,7 @@ struct intel_fbc_state {
};
struct intel_fbc {
- struct drm_i915_private *i915;
+ struct intel_display *display;
const struct intel_fbc_funcs *funcs;
/*
@@ -139,21 +140,24 @@ static unsigned int intel_fbc_plane_stride(const struct intel_plane_state *plane
return stride;
}
+static unsigned int intel_fbc_cfb_cpp(void)
+{
+ return 4; /* FBC always 4 bytes per pixel */
+}
+
/* plane stride based cfb stride in bytes, assuming 1:1 compression limit */
-static unsigned int _intel_fbc_cfb_stride(const struct intel_plane_state *plane_state)
+static unsigned int intel_fbc_plane_cfb_stride(const struct intel_plane_state *plane_state)
{
- unsigned int cpp = 4; /* FBC always 4 bytes per pixel */
+ unsigned int cpp = intel_fbc_cfb_cpp();
return intel_fbc_plane_stride(plane_state) * cpp;
}
/* minimum acceptable cfb stride in bytes, assuming 1:1 compression limit */
-static unsigned int skl_fbc_min_cfb_stride(const struct intel_plane_state *plane_state)
+static unsigned int skl_fbc_min_cfb_stride(struct intel_display *display,
+ unsigned int cpp, unsigned int width)
{
- struct drm_i915_private *i915 = to_i915(plane_state->uapi.plane->dev);
unsigned int limit = 4; /* 1:4 compression limit is the worst case */
- unsigned int cpp = 4; /* FBC always 4 bytes per pixel */
- unsigned int width = drm_rect_width(&plane_state->uapi.src) >> 16;
unsigned int height = 4; /* FBC segment is 4 lines */
unsigned int stride;
@@ -164,7 +168,7 @@ static unsigned int skl_fbc_min_cfb_stride(const struct intel_plane_state *plane
* Wa_16011863758: icl+
* Avoid some hardware segment address miscalculation.
*/
- if (DISPLAY_VER(i915) >= 11)
+ if (DISPLAY_VER(display) >= 11)
stride += 64;
/*
@@ -178,40 +182,67 @@ static unsigned int skl_fbc_min_cfb_stride(const struct intel_plane_state *plane
}
/* properly aligned cfb stride in bytes, assuming 1:1 compression limit */
-static unsigned int intel_fbc_cfb_stride(const struct intel_plane_state *plane_state)
+static unsigned int _intel_fbc_cfb_stride(struct intel_display *display,
+ unsigned int cpp, unsigned int width,
+ unsigned int stride)
{
- struct drm_i915_private *i915 = to_i915(plane_state->uapi.plane->dev);
- unsigned int stride = _intel_fbc_cfb_stride(plane_state);
-
/*
* At least some of the platforms require each 4 line segment to
* be 512 byte aligned. Aligning each line to 512 bytes guarantees
* that regardless of the compression limit we choose later.
*/
- if (DISPLAY_VER(i915) >= 9)
- return max(ALIGN(stride, 512), skl_fbc_min_cfb_stride(plane_state));
+ if (DISPLAY_VER(display) >= 9)
+ return max(ALIGN(stride, 512), skl_fbc_min_cfb_stride(display, cpp, width));
else
return stride;
}
-static unsigned int intel_fbc_cfb_size(const struct intel_plane_state *plane_state)
+static unsigned int intel_fbc_cfb_stride(const struct intel_plane_state *plane_state)
{
- struct drm_i915_private *i915 = to_i915(plane_state->uapi.plane->dev);
- int lines = drm_rect_height(&plane_state->uapi.src) >> 16;
+ struct intel_display *display = to_intel_display(plane_state->uapi.plane->dev);
+ unsigned int stride = intel_fbc_plane_cfb_stride(plane_state);
+ unsigned int width = drm_rect_width(&plane_state->uapi.src) >> 16;
+ unsigned int cpp = intel_fbc_cfb_cpp();
- if (DISPLAY_VER(i915) == 7)
- lines = min(lines, 2048);
- else if (DISPLAY_VER(i915) >= 8)
- lines = min(lines, 2560);
+ return _intel_fbc_cfb_stride(display, cpp, width, stride);
+}
- return lines * intel_fbc_cfb_stride(plane_state);
+/*
+ * Maximum height the hardware will compress, on HSW+
+ * additional lines (up to the actual plane height) will
+ * remain uncompressed.
+ */
+static unsigned int intel_fbc_max_cfb_height(struct intel_display *display)
+{
+ struct drm_i915_private *i915 = to_i915(display->drm);
+
+ if (DISPLAY_VER(display) >= 8)
+ return 2560;
+ else if (DISPLAY_VER(display) >= 5 || IS_G4X(i915))
+ return 2048;
+ else
+ return 1536;
+}
+
+static unsigned int _intel_fbc_cfb_size(struct intel_display *display,
+ unsigned int height, unsigned int stride)
+{
+ return min(height, intel_fbc_max_cfb_height(display)) * stride;
+}
+
+static unsigned int intel_fbc_cfb_size(const struct intel_plane_state *plane_state)
+{
+ struct intel_display *display = to_intel_display(plane_state->uapi.plane->dev);
+ unsigned int height = drm_rect_height(&plane_state->uapi.src) >> 16;
+
+ return _intel_fbc_cfb_size(display, height, intel_fbc_cfb_stride(plane_state));
}
static u16 intel_fbc_override_cfb_stride(const struct intel_plane_state *plane_state)
{
- struct drm_i915_private *i915 = to_i915(plane_state->uapi.plane->dev);
+ struct intel_display *display = to_intel_display(plane_state->uapi.plane->dev);
unsigned int stride_aligned = intel_fbc_cfb_stride(plane_state);
- unsigned int stride = _intel_fbc_cfb_stride(plane_state);
+ unsigned int stride = intel_fbc_plane_cfb_stride(plane_state);
const struct drm_framebuffer *fb = plane_state->hw.fb;
/*
@@ -222,23 +253,31 @@ static u16 intel_fbc_override_cfb_stride(const struct intel_plane_state *plane_s
* we always need to use the override there.
*/
if (stride != stride_aligned ||
- (DISPLAY_VER(i915) == 9 && fb->modifier == DRM_FORMAT_MOD_LINEAR))
+ (DISPLAY_VER(display) == 9 && fb->modifier == DRM_FORMAT_MOD_LINEAR))
return stride_aligned * 4 / 64;
return 0;
}
+static bool intel_fbc_has_fences(struct intel_display *display)
+{
+ struct drm_i915_private __maybe_unused *i915 = to_i915(display->drm);
+
+ return intel_gt_support_legacy_fencing(to_gt(i915));
+}
+
static u32 i8xx_fbc_ctl(struct intel_fbc *fbc)
{
const struct intel_fbc_state *fbc_state = &fbc->state;
- struct drm_i915_private *i915 = fbc->i915;
+ struct intel_display *display = fbc->display;
+ struct drm_i915_private *i915 = to_i915(display->drm);
unsigned int cfb_stride;
u32 fbc_ctl;
cfb_stride = fbc_state->cfb_stride / fbc->limit;
/* FBC_CTL wants 32B or 64B units */
- if (DISPLAY_VER(i915) == 2)
+ if (DISPLAY_VER(display) == 2)
cfb_stride = (cfb_stride / 32) - 1;
else
cfb_stride = (cfb_stride / 64) - 1;
@@ -272,21 +311,21 @@ static u32 i965_fbc_ctl2(struct intel_fbc *fbc)
static void i8xx_fbc_deactivate(struct intel_fbc *fbc)
{
- struct drm_i915_private *i915 = fbc->i915;
+ struct intel_display *display = fbc->display;
u32 fbc_ctl;
/* Disable compression */
- fbc_ctl = intel_de_read(i915, FBC_CONTROL);
+ fbc_ctl = intel_de_read(display, FBC_CONTROL);
if ((fbc_ctl & FBC_CTL_EN) == 0)
return;
fbc_ctl &= ~FBC_CTL_EN;
- intel_de_write(i915, FBC_CONTROL, fbc_ctl);
+ intel_de_write(display, FBC_CONTROL, fbc_ctl);
/* Wait for compressing bit to clear */
- if (intel_de_wait_for_clear(i915, FBC_STATUS,
+ if (intel_de_wait_for_clear(display, FBC_STATUS,
FBC_STAT_COMPRESSING, 10)) {
- drm_dbg_kms(&i915->drm, "FBC idle timed out\n");
+ drm_dbg_kms(display->drm, "FBC idle timed out\n");
return;
}
}
@@ -294,32 +333,32 @@ static void i8xx_fbc_deactivate(struct intel_fbc *fbc)
static void i8xx_fbc_activate(struct intel_fbc *fbc)
{
const struct intel_fbc_state *fbc_state = &fbc->state;
- struct drm_i915_private *i915 = fbc->i915;
+ struct intel_display *display = fbc->display;
int i;
/* Clear old tags */
for (i = 0; i < (FBC_LL_SIZE / 32) + 1; i++)
- intel_de_write(i915, FBC_TAG(i), 0);
+ intel_de_write(display, FBC_TAG(i), 0);
- if (DISPLAY_VER(i915) == 4) {
- intel_de_write(i915, FBC_CONTROL2,
+ if (DISPLAY_VER(display) == 4) {
+ intel_de_write(display, FBC_CONTROL2,
i965_fbc_ctl2(fbc));
- intel_de_write(i915, FBC_FENCE_OFF,
+ intel_de_write(display, FBC_FENCE_OFF,
fbc_state->fence_y_offset);
}
- intel_de_write(i915, FBC_CONTROL,
+ intel_de_write(display, FBC_CONTROL,
FBC_CTL_EN | i8xx_fbc_ctl(fbc));
}
static bool i8xx_fbc_is_active(struct intel_fbc *fbc)
{
- return intel_de_read(fbc->i915, FBC_CONTROL) & FBC_CTL_EN;
+ return intel_de_read(fbc->display, FBC_CONTROL) & FBC_CTL_EN;
}
static bool i8xx_fbc_is_compressing(struct intel_fbc *fbc)
{
- return intel_de_read(fbc->i915, FBC_STATUS) &
+ return intel_de_read(fbc->display, FBC_STATUS) &
(FBC_STAT_COMPRESSING | FBC_STAT_COMPRESSED);
}
@@ -327,7 +366,7 @@ static void i8xx_fbc_nuke(struct intel_fbc *fbc)
{
struct intel_fbc_state *fbc_state = &fbc->state;
enum i9xx_plane_id i9xx_plane = fbc_state->plane->i9xx_plane;
- struct drm_i915_private *dev_priv = fbc->i915;
+ struct drm_i915_private *dev_priv = to_i915(fbc->display->drm);
intel_de_write_fw(dev_priv, DSPADDR(dev_priv, i9xx_plane),
intel_de_read_fw(dev_priv, DSPADDR(dev_priv, i9xx_plane)));
@@ -335,13 +374,14 @@ static void i8xx_fbc_nuke(struct intel_fbc *fbc)
static void i8xx_fbc_program_cfb(struct intel_fbc *fbc)
{
- struct drm_i915_private *i915 = fbc->i915;
+ struct intel_display *display = fbc->display;
+ struct drm_i915_private *i915 = to_i915(display->drm);
- drm_WARN_ON(&i915->drm,
+ drm_WARN_ON(display->drm,
range_overflows_end_t(u64, i915_gem_stolen_area_address(i915),
i915_gem_stolen_node_offset(&fbc->compressed_fb),
U32_MAX));
- drm_WARN_ON(&i915->drm,
+ drm_WARN_ON(display->drm,
range_overflows_end_t(u64, i915_gem_stolen_area_address(i915),
i915_gem_stolen_node_offset(&fbc->compressed_llb),
U32_MAX));
@@ -364,7 +404,7 @@ static void i965_fbc_nuke(struct intel_fbc *fbc)
{
struct intel_fbc_state *fbc_state = &fbc->state;
enum i9xx_plane_id i9xx_plane = fbc_state->plane->i9xx_plane;
- struct drm_i915_private *dev_priv = fbc->i915;
+ struct drm_i915_private *dev_priv = to_i915(fbc->display->drm);
intel_de_write_fw(dev_priv, DSPSURF(dev_priv, i9xx_plane),
intel_de_read_fw(dev_priv, DSPSURF(dev_priv, i9xx_plane)));
@@ -397,7 +437,8 @@ static u32 g4x_dpfc_ctl_limit(struct intel_fbc *fbc)
static u32 g4x_dpfc_ctl(struct intel_fbc *fbc)
{
const struct intel_fbc_state *fbc_state = &fbc->state;
- struct drm_i915_private *i915 = fbc->i915;
+ struct intel_display *display = fbc->display;
+ struct drm_i915_private *i915 = to_i915(display->drm);
u32 dpfc_ctl;
dpfc_ctl = g4x_dpfc_ctl_limit(fbc) |
@@ -409,7 +450,7 @@ static u32 g4x_dpfc_ctl(struct intel_fbc *fbc)
if (fbc_state->fence_id >= 0) {
dpfc_ctl |= DPFC_CTL_FENCE_EN_G4X;
- if (DISPLAY_VER(i915) < 6)
+ if (DISPLAY_VER(display) < 6)
dpfc_ctl |= DPFC_CTL_FENCENO(fbc_state->fence_id);
}
@@ -419,43 +460,43 @@ static u32 g4x_dpfc_ctl(struct intel_fbc *fbc)
static void g4x_fbc_activate(struct intel_fbc *fbc)
{
const struct intel_fbc_state *fbc_state = &fbc->state;
- struct drm_i915_private *i915 = fbc->i915;
+ struct intel_display *display = fbc->display;
- intel_de_write(i915, DPFC_FENCE_YOFF,
+ intel_de_write(display, DPFC_FENCE_YOFF,
fbc_state->fence_y_offset);
- intel_de_write(i915, DPFC_CONTROL,
+ intel_de_write(display, DPFC_CONTROL,
DPFC_CTL_EN | g4x_dpfc_ctl(fbc));
}
static void g4x_fbc_deactivate(struct intel_fbc *fbc)
{
- struct drm_i915_private *i915 = fbc->i915;
+ struct intel_display *display = fbc->display;
u32 dpfc_ctl;
/* Disable compression */
- dpfc_ctl = intel_de_read(i915, DPFC_CONTROL);
+ dpfc_ctl = intel_de_read(display, DPFC_CONTROL);
if (dpfc_ctl & DPFC_CTL_EN) {
dpfc_ctl &= ~DPFC_CTL_EN;
- intel_de_write(i915, DPFC_CONTROL, dpfc_ctl);
+ intel_de_write(display, DPFC_CONTROL, dpfc_ctl);
}
}
static bool g4x_fbc_is_active(struct intel_fbc *fbc)
{
- return intel_de_read(fbc->i915, DPFC_CONTROL) & DPFC_CTL_EN;
+ return intel_de_read(fbc->display, DPFC_CONTROL) & DPFC_CTL_EN;
}
static bool g4x_fbc_is_compressing(struct intel_fbc *fbc)
{
- return intel_de_read(fbc->i915, DPFC_STATUS) & DPFC_COMP_SEG_MASK;
+ return intel_de_read(fbc->display, DPFC_STATUS) & DPFC_COMP_SEG_MASK;
}
static void g4x_fbc_program_cfb(struct intel_fbc *fbc)
{
- struct drm_i915_private *i915 = fbc->i915;
+ struct intel_display *display = fbc->display;
- intel_de_write(i915, DPFC_CB_BASE,
+ intel_de_write(display, DPFC_CB_BASE,
i915_gem_stolen_node_offset(&fbc->compressed_fb));
}
@@ -471,43 +512,43 @@ static const struct intel_fbc_funcs g4x_fbc_funcs = {
static void ilk_fbc_activate(struct intel_fbc *fbc)
{
struct intel_fbc_state *fbc_state = &fbc->state;
- struct drm_i915_private *i915 = fbc->i915;
+ struct intel_display *display = fbc->display;
- intel_de_write(i915, ILK_DPFC_FENCE_YOFF(fbc->id),
+ intel_de_write(display, ILK_DPFC_FENCE_YOFF(fbc->id),
fbc_state->fence_y_offset);
- intel_de_write(i915, ILK_DPFC_CONTROL(fbc->id),
+ intel_de_write(display, ILK_DPFC_CONTROL(fbc->id),
DPFC_CTL_EN | g4x_dpfc_ctl(fbc));
}
static void ilk_fbc_deactivate(struct intel_fbc *fbc)
{
- struct drm_i915_private *i915 = fbc->i915;
+ struct intel_display *display = fbc->display;
u32 dpfc_ctl;
/* Disable compression */
- dpfc_ctl = intel_de_read(i915, ILK_DPFC_CONTROL(fbc->id));
+ dpfc_ctl = intel_de_read(display, ILK_DPFC_CONTROL(fbc->id));
if (dpfc_ctl & DPFC_CTL_EN) {
dpfc_ctl &= ~DPFC_CTL_EN;
- intel_de_write(i915, ILK_DPFC_CONTROL(fbc->id), dpfc_ctl);
+ intel_de_write(display, ILK_DPFC_CONTROL(fbc->id), dpfc_ctl);
}
}
static bool ilk_fbc_is_active(struct intel_fbc *fbc)
{
- return intel_de_read(fbc->i915, ILK_DPFC_CONTROL(fbc->id)) & DPFC_CTL_EN;
+ return intel_de_read(fbc->display, ILK_DPFC_CONTROL(fbc->id)) & DPFC_CTL_EN;
}
static bool ilk_fbc_is_compressing(struct intel_fbc *fbc)
{
- return intel_de_read(fbc->i915, ILK_DPFC_STATUS(fbc->id)) & DPFC_COMP_SEG_MASK;
+ return intel_de_read(fbc->display, ILK_DPFC_STATUS(fbc->id)) & DPFC_COMP_SEG_MASK;
}
static void ilk_fbc_program_cfb(struct intel_fbc *fbc)
{
- struct drm_i915_private *i915 = fbc->i915;
+ struct intel_display *display = fbc->display;
- intel_de_write(i915, ILK_DPFC_CB_BASE(fbc->id),
+ intel_de_write(display, ILK_DPFC_CB_BASE(fbc->id),
i915_gem_stolen_node_offset(&fbc->compressed_fb));
}
@@ -523,14 +564,14 @@ static const struct intel_fbc_funcs ilk_fbc_funcs = {
static void snb_fbc_program_fence(struct intel_fbc *fbc)
{
const struct intel_fbc_state *fbc_state = &fbc->state;
- struct drm_i915_private *i915 = fbc->i915;
+ struct intel_display *display = fbc->display;
u32 ctl = 0;
if (fbc_state->fence_id >= 0)
ctl = SNB_DPFC_FENCE_EN | SNB_DPFC_FENCENO(fbc_state->fence_id);
- intel_de_write(i915, SNB_DPFC_CTL_SA, ctl);
- intel_de_write(i915, SNB_DPFC_CPU_FENCE_OFFSET, fbc_state->fence_y_offset);
+ intel_de_write(display, SNB_DPFC_CTL_SA, ctl);
+ intel_de_write(display, SNB_DPFC_CPU_FENCE_OFFSET, fbc_state->fence_y_offset);
}
static void snb_fbc_activate(struct intel_fbc *fbc)
@@ -542,10 +583,10 @@ static void snb_fbc_activate(struct intel_fbc *fbc)
static void snb_fbc_nuke(struct intel_fbc *fbc)
{
- struct drm_i915_private *i915 = fbc->i915;
+ struct intel_display *display = fbc->display;
- intel_de_write(i915, MSG_FBC_REND_STATE(fbc->id), FBC_REND_NUKE);
- intel_de_posting_read(i915, MSG_FBC_REND_STATE(fbc->id));
+ intel_de_write(display, MSG_FBC_REND_STATE(fbc->id), FBC_REND_NUKE);
+ intel_de_posting_read(display, MSG_FBC_REND_STATE(fbc->id));
}
static const struct intel_fbc_funcs snb_fbc_funcs = {
@@ -560,20 +601,20 @@ static const struct intel_fbc_funcs snb_fbc_funcs = {
static void glk_fbc_program_cfb_stride(struct intel_fbc *fbc)
{
const struct intel_fbc_state *fbc_state = &fbc->state;
- struct drm_i915_private *i915 = fbc->i915;
+ struct intel_display *display = fbc->display;
u32 val = 0;
if (fbc_state->override_cfb_stride)
val |= FBC_STRIDE_OVERRIDE |
FBC_STRIDE(fbc_state->override_cfb_stride / fbc->limit);
- intel_de_write(i915, GLK_FBC_STRIDE(fbc->id), val);
+ intel_de_write(display, GLK_FBC_STRIDE(fbc->id), val);
}
static void skl_fbc_program_cfb_stride(struct intel_fbc *fbc)
{
const struct intel_fbc_state *fbc_state = &fbc->state;
- struct drm_i915_private *i915 = fbc->i915;
+ struct intel_display *display = fbc->display;
u32 val = 0;
/* Display WA #0529: skl, kbl, bxt. */
@@ -581,7 +622,7 @@ static void skl_fbc_program_cfb_stride(struct intel_fbc *fbc)
val |= CHICKEN_FBC_STRIDE_OVERRIDE |
CHICKEN_FBC_STRIDE(fbc_state->override_cfb_stride / fbc->limit);
- intel_de_rmw(i915, CHICKEN_MISC_4,
+ intel_de_rmw(display, CHICKEN_MISC_4,
CHICKEN_FBC_STRIDE_OVERRIDE |
CHICKEN_FBC_STRIDE_MASK, val);
}
@@ -589,7 +630,8 @@ static void skl_fbc_program_cfb_stride(struct intel_fbc *fbc)
static u32 ivb_dpfc_ctl(struct intel_fbc *fbc)
{
const struct intel_fbc_state *fbc_state = &fbc->state;
- struct drm_i915_private *i915 = fbc->i915;
+ struct intel_display *display = fbc->display;
+ struct drm_i915_private *i915 = to_i915(display->drm);
u32 dpfc_ctl;
dpfc_ctl = g4x_dpfc_ctl_limit(fbc);
@@ -597,7 +639,7 @@ static u32 ivb_dpfc_ctl(struct intel_fbc *fbc)
if (IS_IVYBRIDGE(i915))
dpfc_ctl |= DPFC_CTL_PLANE_IVB(fbc_state->plane->i9xx_plane);
- if (DISPLAY_VER(i915) >= 20)
+ if (DISPLAY_VER(display) >= 20)
dpfc_ctl |= DPFC_CTL_PLANE_BINDING(fbc_state->plane->id);
if (fbc_state->fence_id >= 0)
@@ -611,35 +653,35 @@ static u32 ivb_dpfc_ctl(struct intel_fbc *fbc)
static void ivb_fbc_activate(struct intel_fbc *fbc)
{
- struct drm_i915_private *i915 = fbc->i915;
+ struct intel_display *display = fbc->display;
u32 dpfc_ctl;
- if (DISPLAY_VER(i915) >= 10)
+ if (DISPLAY_VER(display) >= 10)
glk_fbc_program_cfb_stride(fbc);
- else if (DISPLAY_VER(i915) == 9)
+ else if (DISPLAY_VER(display) == 9)
skl_fbc_program_cfb_stride(fbc);
- if (intel_gt_support_legacy_fencing(to_gt(i915)))
+ if (intel_fbc_has_fences(display))
snb_fbc_program_fence(fbc);
/* wa_14019417088 Alternative WA*/
dpfc_ctl = ivb_dpfc_ctl(fbc);
- if (DISPLAY_VER(i915) >= 20)
- intel_de_write(i915, ILK_DPFC_CONTROL(fbc->id), dpfc_ctl);
+ if (DISPLAY_VER(display) >= 20)
+ intel_de_write(display, ILK_DPFC_CONTROL(fbc->id), dpfc_ctl);
- intel_de_write(i915, ILK_DPFC_CONTROL(fbc->id),
+ intel_de_write(display, ILK_DPFC_CONTROL(fbc->id),
DPFC_CTL_EN | dpfc_ctl);
}
static bool ivb_fbc_is_compressing(struct intel_fbc *fbc)
{
- return intel_de_read(fbc->i915, ILK_DPFC_STATUS2(fbc->id)) & DPFC_COMP_SEG_MASK_IVB;
+ return intel_de_read(fbc->display, ILK_DPFC_STATUS2(fbc->id)) & DPFC_COMP_SEG_MASK_IVB;
}
static void ivb_fbc_set_false_color(struct intel_fbc *fbc,
bool enable)
{
- intel_de_rmw(fbc->i915, ILK_DPFC_CONTROL(fbc->id),
+ intel_de_rmw(fbc->display, ILK_DPFC_CONTROL(fbc->id),
DPFC_CTL_FALSE_COLOR, enable ? DPFC_CTL_FALSE_COLOR : 0);
}
@@ -684,10 +726,10 @@ static bool intel_fbc_is_compressing(struct intel_fbc *fbc)
static void intel_fbc_nuke(struct intel_fbc *fbc)
{
- struct drm_i915_private *i915 = fbc->i915;
+ struct intel_display *display = fbc->display;
lockdep_assert_held(&fbc->lock);
- drm_WARN_ON(&i915->drm, fbc->flip_pending);
+ drm_WARN_ON(display->drm, fbc->flip_pending);
trace_intel_fbc_nuke(fbc->state.plane);
@@ -714,16 +756,19 @@ static void intel_fbc_deactivate(struct intel_fbc *fbc, const char *reason)
fbc->no_fbc_reason = reason;
}
-static u64 intel_fbc_cfb_base_max(struct drm_i915_private *i915)
+static u64 intel_fbc_cfb_base_max(struct intel_display *display)
{
- if (DISPLAY_VER(i915) >= 5 || IS_G4X(i915))
+ struct drm_i915_private *i915 = to_i915(display->drm);
+
+ if (DISPLAY_VER(display) >= 5 || IS_G4X(i915))
return BIT_ULL(28);
else
return BIT_ULL(32);
}
-static u64 intel_fbc_stolen_end(struct drm_i915_private *i915)
+static u64 intel_fbc_stolen_end(struct intel_display *display)
{
+ struct drm_i915_private __maybe_unused *i915 = to_i915(display->drm);
u64 end;
/* The FBC hardware for BDW/SKL doesn't have access to the stolen
@@ -731,12 +776,12 @@ static u64 intel_fbc_stolen_end(struct drm_i915_private *i915)
* If we enable FBC using a CFB on that memory range we'll get FIFO
* underruns, even if that range is not reserved by the BIOS. */
if (IS_BROADWELL(i915) ||
- (DISPLAY_VER(i915) == 9 && !IS_BROXTON(i915)))
+ (DISPLAY_VER(display) == 9 && !IS_BROXTON(i915)))
end = i915_gem_stolen_area_size(i915) - 8 * 1024 * 1024;
else
end = U64_MAX;
- return min(end, intel_fbc_cfb_base_max(i915));
+ return min(end, intel_fbc_cfb_base_max(display));
}
static int intel_fbc_min_limit(const struct intel_plane_state *plane_state)
@@ -744,8 +789,10 @@ static int intel_fbc_min_limit(const struct intel_plane_state *plane_state)
return plane_state->hw.fb->format->cpp[0] == 2 ? 2 : 1;
}
-static int intel_fbc_max_limit(struct drm_i915_private *i915)
+static int intel_fbc_max_limit(struct intel_display *display)
{
+ struct drm_i915_private *i915 = to_i915(display->drm);
+
/* WaFbcOnly1to1Ratio:ctg */
if (IS_G4X(i915))
return 1;
@@ -760,8 +807,9 @@ static int intel_fbc_max_limit(struct drm_i915_private *i915)
static int find_compression_limit(struct intel_fbc *fbc,
unsigned int size, int min_limit)
{
- struct drm_i915_private *i915 = fbc->i915;
- u64 end = intel_fbc_stolen_end(i915);
+ struct intel_display *display = fbc->display;
+ struct drm_i915_private *i915 = to_i915(display->drm);
+ u64 end = intel_fbc_stolen_end(display);
int ret, limit = min_limit;
size /= limit;
@@ -772,7 +820,7 @@ static int find_compression_limit(struct intel_fbc *fbc,
if (ret == 0)
return limit;
- for (; limit <= intel_fbc_max_limit(i915); limit <<= 1) {
+ for (; limit <= intel_fbc_max_limit(display); limit <<= 1) {
ret = i915_gem_stolen_insert_node_in_range(i915, &fbc->compressed_fb,
size >>= 1, 4096, 0, end);
if (ret == 0)
@@ -785,15 +833,16 @@ static int find_compression_limit(struct intel_fbc *fbc,
static int intel_fbc_alloc_cfb(struct intel_fbc *fbc,
unsigned int size, int min_limit)
{
- struct drm_i915_private *i915 = fbc->i915;
+ struct intel_display *display = fbc->display;
+ struct drm_i915_private *i915 = to_i915(display->drm);
int ret;
- drm_WARN_ON(&i915->drm,
+ drm_WARN_ON(display->drm,
i915_gem_stolen_node_allocated(&fbc->compressed_fb));
- drm_WARN_ON(&i915->drm,
+ drm_WARN_ON(display->drm,
i915_gem_stolen_node_allocated(&fbc->compressed_llb));
- if (DISPLAY_VER(i915) < 5 && !IS_G4X(i915)) {
+ if (DISPLAY_VER(display) < 5 && !IS_G4X(i915)) {
ret = i915_gem_stolen_insert_node(i915, &fbc->compressed_llb,
4096, 4096);
if (ret)
@@ -804,12 +853,12 @@ static int intel_fbc_alloc_cfb(struct intel_fbc *fbc,
if (!ret)
goto err_llb;
else if (ret > min_limit)
- drm_info_once(&i915->drm,
+ drm_info_once(display->drm,
"Reducing the compressed framebuffer size. This may lead to less power savings than a non-reduced-size. Try to increase stolen memory size if available in BIOS.\n");
fbc->limit = ret;
- drm_dbg_kms(&i915->drm,
+ drm_dbg_kms(display->drm,
"reserved %llu bytes of contiguous stolen space for FBC, limit: %d\n",
i915_gem_stolen_node_size(&fbc->compressed_fb), fbc->limit);
return 0;
@@ -819,7 +868,8 @@ err_llb:
i915_gem_stolen_remove_node(i915, &fbc->compressed_llb);
err:
if (i915_gem_stolen_initialized(i915))
- drm_info_once(&i915->drm, "not enough stolen space for compressed buffer (need %d more bytes), disabling. Hint: you may be able to increase stolen memory size in the BIOS to avoid this.\n", size);
+ drm_info_once(display->drm,
+ "not enough stolen space for compressed buffer (need %d more bytes), disabling. Hint: you may be able to increase stolen memory size in the BIOS to avoid this.\n", size);
return -ENOSPC;
}
@@ -830,14 +880,15 @@ static void intel_fbc_program_cfb(struct intel_fbc *fbc)
static void intel_fbc_program_workarounds(struct intel_fbc *fbc)
{
- struct drm_i915_private *i915 = fbc->i915;
+ struct intel_display *display = fbc->display;
+ struct drm_i915_private *i915 = to_i915(display->drm);
if (IS_SKYLAKE(i915) || IS_BROXTON(i915)) {
/*
* WaFbcHighMemBwCorruptionAvoidance:skl,bxt
* Display WA #0883: skl,bxt
*/
- intel_de_rmw(i915, ILK_DPFC_CHICKEN(fbc->id),
+ intel_de_rmw(display, ILK_DPFC_CHICKEN(fbc->id),
0, DPFC_DISABLE_DUMMY0);
}
@@ -847,24 +898,25 @@ static void intel_fbc_program_workarounds(struct intel_fbc *fbc)
* WaFbcNukeOnHostModify:skl,kbl,cfl
* Display WA #0873: skl,kbl,cfl
*/
- intel_de_rmw(i915, ILK_DPFC_CHICKEN(fbc->id),
+ intel_de_rmw(display, ILK_DPFC_CHICKEN(fbc->id),
0, DPFC_NUKE_ON_ANY_MODIFICATION);
}
/* Wa_1409120013:icl,jsl,tgl,dg1 */
- if (IS_DISPLAY_VER(i915, 11, 12))
- intel_de_rmw(i915, ILK_DPFC_CHICKEN(fbc->id),
+ if (IS_DISPLAY_VER(display, 11, 12))
+ intel_de_rmw(display, ILK_DPFC_CHICKEN(fbc->id),
0, DPFC_CHICKEN_COMP_DUMMY_PIXEL);
/* Wa_22014263786:icl,jsl,tgl,dg1,rkl,adls,adlp,mtl */
- if (DISPLAY_VER(i915) >= 11 && !IS_DG2(i915))
- intel_de_rmw(i915, ILK_DPFC_CHICKEN(fbc->id),
+ if (DISPLAY_VER(display) >= 11 && !IS_DG2(i915))
+ intel_de_rmw(display, ILK_DPFC_CHICKEN(fbc->id),
0, DPFC_CHICKEN_FORCE_SLB_INVALIDATION);
}
static void __intel_fbc_cleanup_cfb(struct intel_fbc *fbc)
{
- struct drm_i915_private *i915 = fbc->i915;
+ struct intel_display *display = fbc->display;
+ struct drm_i915_private *i915 = to_i915(display->drm);
if (WARN_ON(intel_fbc_hw_is_active(fbc)))
return;
@@ -875,12 +927,12 @@ static void __intel_fbc_cleanup_cfb(struct intel_fbc *fbc)
i915_gem_stolen_remove_node(i915, &fbc->compressed_fb);
}
-void intel_fbc_cleanup(struct drm_i915_private *i915)
+void intel_fbc_cleanup(struct intel_display *display)
{
struct intel_fbc *fbc;
enum intel_fbc_id fbc_id;
- for_each_intel_fbc(i915, fbc, fbc_id) {
+ for_each_intel_fbc(display, fbc, fbc_id) {
mutex_lock(&fbc->lock);
__intel_fbc_cleanup_cfb(fbc);
mutex_unlock(&fbc->lock);
@@ -932,15 +984,16 @@ static bool icl_fbc_stride_is_valid(const struct intel_plane_state *plane_state)
static bool stride_is_valid(const struct intel_plane_state *plane_state)
{
- struct drm_i915_private *i915 = to_i915(plane_state->uapi.plane->dev);
+ struct intel_display *display = to_intel_display(plane_state->uapi.plane->dev);
+ struct drm_i915_private *i915 = to_i915(display->drm);
- if (DISPLAY_VER(i915) >= 11)
+ if (DISPLAY_VER(display) >= 11)
return icl_fbc_stride_is_valid(plane_state);
- else if (DISPLAY_VER(i915) >= 9)
+ else if (DISPLAY_VER(display) >= 9)
return skl_fbc_stride_is_valid(plane_state);
- else if (DISPLAY_VER(i915) >= 5 || IS_G4X(i915))
+ else if (DISPLAY_VER(display) >= 5 || IS_G4X(i915))
return g4x_fbc_stride_is_valid(plane_state);
- else if (DISPLAY_VER(i915) == 4)
+ else if (DISPLAY_VER(display) == 4)
return i965_fbc_stride_is_valid(plane_state);
else
return i8xx_fbc_stride_is_valid(plane_state);
@@ -948,7 +1001,7 @@ static bool stride_is_valid(const struct intel_plane_state *plane_state)
static bool i8xx_fbc_pixel_format_is_valid(const struct intel_plane_state *plane_state)
{
- struct drm_i915_private *i915 = to_i915(plane_state->uapi.plane->dev);
+ struct intel_display *display = to_intel_display(plane_state->uapi.plane->dev);
const struct drm_framebuffer *fb = plane_state->hw.fb;
switch (fb->format->format) {
@@ -958,7 +1011,7 @@ static bool i8xx_fbc_pixel_format_is_valid(const struct intel_plane_state *plane
case DRM_FORMAT_XRGB1555:
case DRM_FORMAT_RGB565:
/* 16bpp not supported on gen2 */
- if (DISPLAY_VER(i915) == 2)
+ if (DISPLAY_VER(display) == 2)
return false;
return true;
default:
@@ -968,7 +1021,8 @@ static bool i8xx_fbc_pixel_format_is_valid(const struct intel_plane_state *plane
static bool g4x_fbc_pixel_format_is_valid(const struct intel_plane_state *plane_state)
{
- struct drm_i915_private *i915 = to_i915(plane_state->uapi.plane->dev);
+ struct intel_display *display = to_intel_display(plane_state->uapi.plane->dev);
+ struct drm_i915_private *i915 = to_i915(display->drm);
const struct drm_framebuffer *fb = plane_state->hw.fb;
switch (fb->format->format) {
@@ -1003,11 +1057,12 @@ static bool lnl_fbc_pixel_format_is_valid(const struct intel_plane_state *plane_
static bool pixel_format_is_valid(const struct intel_plane_state *plane_state)
{
- struct drm_i915_private *i915 = to_i915(plane_state->uapi.plane->dev);
+ struct intel_display *display = to_intel_display(plane_state->uapi.plane->dev);
+ struct drm_i915_private *i915 = to_i915(display->drm);
- if (DISPLAY_VER(i915) >= 20)
+ if (DISPLAY_VER(display) >= 20)
return lnl_fbc_pixel_format_is_valid(plane_state);
- else if (DISPLAY_VER(i915) >= 5 || IS_G4X(i915))
+ else if (DISPLAY_VER(display) >= 5 || IS_G4X(i915))
return g4x_fbc_pixel_format_is_valid(plane_state);
else
return i8xx_fbc_pixel_format_is_valid(plane_state);
@@ -1037,43 +1092,52 @@ static bool skl_fbc_rotation_is_valid(const struct intel_plane_state *plane_stat
static bool rotation_is_valid(const struct intel_plane_state *plane_state)
{
- struct drm_i915_private *i915 = to_i915(plane_state->uapi.plane->dev);
+ struct intel_display *display = to_intel_display(plane_state->uapi.plane->dev);
+ struct drm_i915_private *i915 = to_i915(display->drm);
- if (DISPLAY_VER(i915) >= 9)
+ if (DISPLAY_VER(display) >= 9)
return skl_fbc_rotation_is_valid(plane_state);
- else if (DISPLAY_VER(i915) >= 5 || IS_G4X(i915))
+ else if (DISPLAY_VER(display) >= 5 || IS_G4X(i915))
return g4x_fbc_rotation_is_valid(plane_state);
else
return i8xx_fbc_rotation_is_valid(plane_state);
}
+static void intel_fbc_max_surface_size(struct intel_display *display,
+ unsigned int *w, unsigned int *h)
+{
+ struct drm_i915_private *i915 = to_i915(display->drm);
+
+ if (DISPLAY_VER(display) >= 11) {
+ *w = 8192;
+ *h = 4096;
+ } else if (DISPLAY_VER(display) >= 10) {
+ *w = 5120;
+ *h = 4096;
+ } else if (DISPLAY_VER(display) >= 7) {
+ *w = 4096;
+ *h = 4096;
+ } else if (DISPLAY_VER(display) >= 5 || IS_G4X(i915)) {
+ *w = 4096;
+ *h = 2048;
+ } else {
+ *w = 2048;
+ *h = 1536;
+ }
+}
+
/*
* For some reason, the hardware tracking starts looking at whatever we
* programmed as the display plane base address register. It does not look at
* the X and Y offset registers. That's why we include the src x/y offsets
* instead of just looking at the plane size.
*/
-static bool intel_fbc_hw_tracking_covers_screen(const struct intel_plane_state *plane_state)
+static bool intel_fbc_surface_size_ok(const struct intel_plane_state *plane_state)
{
- struct drm_i915_private *i915 = to_i915(plane_state->uapi.plane->dev);
+ struct intel_display *display = to_intel_display(plane_state->uapi.plane->dev);
unsigned int effective_w, effective_h, max_w, max_h;
- if (DISPLAY_VER(i915) >= 11) {
- max_w = 8192;
- max_h = 4096;
- } else if (DISPLAY_VER(i915) >= 10) {
- max_w = 5120;
- max_h = 4096;
- } else if (DISPLAY_VER(i915) >= 7) {
- max_w = 4096;
- max_h = 4096;
- } else if (IS_G4X(i915) || DISPLAY_VER(i915) >= 5) {
- max_w = 4096;
- max_h = 2048;
- } else {
- max_w = 2048;
- max_h = 1536;
- }
+ intel_fbc_max_surface_size(display, &max_w, &max_h);
effective_w = plane_state->view.color_plane[0].x +
(drm_rect_width(&plane_state->uapi.src) >> 16);
@@ -1083,24 +1147,32 @@ static bool intel_fbc_hw_tracking_covers_screen(const struct intel_plane_state *
return effective_w <= max_w && effective_h <= max_h;
}
-static bool intel_fbc_plane_size_valid(const struct intel_plane_state *plane_state)
+static void intel_fbc_max_plane_size(struct intel_display *display,
+ unsigned int *w, unsigned int *h)
{
- struct drm_i915_private *i915 = to_i915(plane_state->uapi.plane->dev);
- unsigned int w, h, max_w, max_h;
+ struct drm_i915_private *i915 = to_i915(display->drm);
- if (DISPLAY_VER(i915) >= 10) {
- max_w = 5120;
- max_h = 4096;
- } else if (DISPLAY_VER(i915) >= 8 || IS_HASWELL(i915)) {
- max_w = 4096;
- max_h = 4096;
- } else if (IS_G4X(i915) || DISPLAY_VER(i915) >= 5) {
- max_w = 4096;
- max_h = 2048;
+ if (DISPLAY_VER(display) >= 10) {
+ *w = 5120;
+ *h = 4096;
+ } else if (DISPLAY_VER(display) >= 8 || IS_HASWELL(i915)) {
+ *w = 4096;
+ *h = 4096;
+ } else if (DISPLAY_VER(display) >= 5 || IS_G4X(i915)) {
+ *w = 4096;
+ *h = 2048;
} else {
- max_w = 2048;
- max_h = 1536;
+ *w = 2048;
+ *h = 1536;
}
+}
+
+static bool intel_fbc_plane_size_valid(const struct intel_plane_state *plane_state)
+{
+ struct intel_display *display = to_intel_display(plane_state->uapi.plane->dev);
+ unsigned int w, h, max_w, max_h;
+
+ intel_fbc_max_plane_size(display, &max_w, &max_h);
w = drm_rect_width(&plane_state->uapi.src) >> 16;
h = drm_rect_height(&plane_state->uapi.src) >> 16;
@@ -1122,9 +1194,9 @@ static bool skl_fbc_tiling_valid(const struct intel_plane_state *plane_state)
static bool tiling_is_valid(const struct intel_plane_state *plane_state)
{
- struct drm_i915_private *i915 = to_i915(plane_state->uapi.plane->dev);
+ struct intel_display *display = to_intel_display(plane_state->uapi.plane->dev);
- if (DISPLAY_VER(i915) >= 9)
+ if (DISPLAY_VER(display) >= 9)
return skl_fbc_tiling_valid(plane_state);
else
return i8xx_fbc_tiling_valid(plane_state);
@@ -1134,7 +1206,7 @@ static void intel_fbc_update_state(struct intel_atomic_state *state,
struct intel_crtc *crtc,
struct intel_plane *plane)
{
- struct drm_i915_private *i915 = to_i915(state->base.dev);
+ struct intel_display *display = to_intel_display(state->base.dev);
const struct intel_crtc_state *crtc_state =
intel_atomic_get_new_crtc_state(state, crtc);
const struct intel_plane_state *plane_state =
@@ -1152,8 +1224,8 @@ static void intel_fbc_update_state(struct intel_atomic_state *state,
fbc_state->fence_y_offset = intel_plane_fence_y_offset(plane_state);
- drm_WARN_ON(&i915->drm, plane_state->flags & PLANE_HAS_FENCE &&
- !intel_gt_support_legacy_fencing(to_gt(i915)));
+ drm_WARN_ON(display->drm, plane_state->flags & PLANE_HAS_FENCE &&
+ !intel_fbc_has_fences(display));
if (plane_state->flags & PLANE_HAS_FENCE)
fbc_state->fence_id = i915_vma_fence_id(plane_state->ggtt_vma);
@@ -1167,7 +1239,7 @@ static void intel_fbc_update_state(struct intel_atomic_state *state,
static bool intel_fbc_is_fence_ok(const struct intel_plane_state *plane_state)
{
- struct drm_i915_private *i915 = to_i915(plane_state->uapi.plane->dev);
+ struct intel_display *display = to_intel_display(plane_state->uapi.plane->dev);
/*
* The use of a CPU fence is one of two ways to detect writes by the
@@ -1181,7 +1253,7 @@ static bool intel_fbc_is_fence_ok(const struct intel_plane_state *plane_state)
* so have no fence associated with it) due to aperture constraints
* at the time of pinning.
*/
- return DISPLAY_VER(i915) >= 9 ||
+ return DISPLAY_VER(display) >= 9 ||
(plane_state->flags & PLANE_HAS_FENCE &&
i915_vma_fence_id(plane_state->ggtt_vma) != -1);
}
@@ -1206,7 +1278,8 @@ static bool intel_fbc_is_ok(const struct intel_plane_state *plane_state)
static int intel_fbc_check_plane(struct intel_atomic_state *state,
struct intel_plane *plane)
{
- struct drm_i915_private *i915 = to_i915(state->base.dev);
+ struct intel_display *display = to_intel_display(state->base.dev);
+ struct drm_i915_private *i915 = to_i915(display->drm);
struct intel_plane_state *plane_state =
intel_atomic_get_new_plane_state(state, plane);
const struct drm_framebuffer *fb = plane_state->hw.fb;
@@ -1227,7 +1300,7 @@ static int intel_fbc_check_plane(struct intel_atomic_state *state,
return 0;
}
- if (!i915->display.params.enable_fbc) {
+ if (!display->params.enable_fbc) {
plane_state->no_fbc_reason = "disabled per module param or by default";
return 0;
}
@@ -1237,6 +1310,11 @@ static int intel_fbc_check_plane(struct intel_atomic_state *state,
return 0;
}
+ if (intel_display_needs_wa_16023588340(i915)) {
+ plane_state->no_fbc_reason = "Wa_16023588340";
+ return 0;
+ }
+
/* WaFbcTurnOffFbcWhenHyperVisorIsUsed:skl,bxt */
if (i915_vtd_active(i915) && (IS_SKYLAKE(i915) || IS_BROXTON(i915))) {
plane_state->no_fbc_reason = "VT-d enabled";
@@ -1260,15 +1338,15 @@ static int intel_fbc_check_plane(struct intel_atomic_state *state,
* Recommendation is to keep this combination disabled
* Bspec: 50422 HSD: 14010260002
*/
- if (IS_DISPLAY_VER(i915, 12, 14) && crtc_state->has_sel_update &&
+ if (IS_DISPLAY_VER(display, 12, 14) && crtc_state->has_sel_update &&
!crtc_state->has_panel_replay) {
plane_state->no_fbc_reason = "PSR2 enabled";
return 0;
}
/* Wa_14016291713 */
- if ((IS_DISPLAY_VER(i915, 12, 13) ||
- IS_DISPLAY_IP_STEP(i915, IP_VER(14, 0), STEP_A0, STEP_C0)) &&
+ if ((IS_DISPLAY_VER(display, 12, 13) ||
+ IS_DISPLAY_VER_STEP(i915, IP_VER(14, 0), STEP_A0, STEP_C0)) &&
crtc_state->has_psr && !crtc_state->has_panel_replay) {
plane_state->no_fbc_reason = "PSR1 enabled (Wa_14016291713)";
return 0;
@@ -1294,7 +1372,7 @@ static int intel_fbc_check_plane(struct intel_atomic_state *state,
return 0;
}
- if (DISPLAY_VER(i915) < 20 &&
+ if (DISPLAY_VER(display) < 20 &&
plane_state->hw.pixel_blend_mode != DRM_MODE_BLEND_PIXEL_NONE &&
fb->format->has_alpha) {
plane_state->no_fbc_reason = "per-pixel alpha not supported";
@@ -1306,7 +1384,7 @@ static int intel_fbc_check_plane(struct intel_atomic_state *state,
return 0;
}
- if (!intel_fbc_hw_tracking_covers_screen(plane_state)) {
+ if (!intel_fbc_surface_size_ok(plane_state)) {
plane_state->no_fbc_reason = "surface size too big";
return 0;
}
@@ -1316,14 +1394,14 @@ static int intel_fbc_check_plane(struct intel_atomic_state *state,
* having a Y offset that isn't divisible by 4 causes FIFO underrun
* and screen flicker.
*/
- if (DISPLAY_VER(i915) >= 9 &&
+ if (DISPLAY_VER(display) >= 9 &&
plane_state->view.color_plane[0].y & 3) {
plane_state->no_fbc_reason = "plane start Y offset misaligned";
return 0;
}
/* Wa_22010751166: icl, ehl, tgl, dg1, rkl */
- if (DISPLAY_VER(i915) >= 11 &&
+ if (DISPLAY_VER(display) >= 11 &&
(plane_state->view.color_plane[0].y +
(drm_rect_height(&plane_state->uapi.src) >> 16)) & 3) {
plane_state->no_fbc_reason = "plane end Y offset misaligned";
@@ -1399,7 +1477,7 @@ static bool __intel_fbc_pre_update(struct intel_atomic_state *state,
struct intel_crtc *crtc,
struct intel_plane *plane)
{
- struct drm_i915_private *i915 = to_i915(state->base.dev);
+ struct intel_display *display = to_intel_display(state->base.dev);
struct intel_fbc *fbc = plane->fbc;
bool need_vblank_wait = false;
@@ -1425,7 +1503,7 @@ static bool __intel_fbc_pre_update(struct intel_atomic_state *state,
* and skipping the extra vblank wait before the plane update
* if at least one frame has already passed.
*/
- if (fbc->activated && DISPLAY_VER(i915) >= 10)
+ if (fbc->activated && DISPLAY_VER(display) >= 10)
need_vblank_wait = true;
fbc->activated = false;
@@ -1459,13 +1537,13 @@ bool intel_fbc_pre_update(struct intel_atomic_state *state,
static void __intel_fbc_disable(struct intel_fbc *fbc)
{
- struct drm_i915_private *i915 = fbc->i915;
+ struct intel_display *display = fbc->display;
struct intel_plane *plane = fbc->state.plane;
lockdep_assert_held(&fbc->lock);
- drm_WARN_ON(&i915->drm, fbc->active);
+ drm_WARN_ON(display->drm, fbc->active);
- drm_dbg_kms(&i915->drm, "Disabling FBC on [PLANE:%d:%s]\n",
+ drm_dbg_kms(display->drm, "Disabling FBC on [PLANE:%d:%s]\n",
plane->base.base.id, plane->base.name);
__intel_fbc_cleanup_cfb(fbc);
@@ -1542,7 +1620,7 @@ void intel_fbc_invalidate(struct drm_i915_private *i915,
struct intel_fbc *fbc;
enum intel_fbc_id fbc_id;
- for_each_intel_fbc(i915, fbc, fbc_id)
+ for_each_intel_fbc(&i915->display, fbc, fbc_id)
__intel_fbc_invalidate(fbc, frontbuffer_bits, origin);
}
@@ -1581,7 +1659,7 @@ void intel_fbc_flush(struct drm_i915_private *i915,
struct intel_fbc *fbc;
enum intel_fbc_id fbc_id;
- for_each_intel_fbc(i915, fbc, fbc_id)
+ for_each_intel_fbc(&i915->display, fbc, fbc_id)
__intel_fbc_flush(fbc, frontbuffer_bits, origin);
}
@@ -1606,7 +1684,7 @@ static void __intel_fbc_enable(struct intel_atomic_state *state,
struct intel_crtc *crtc,
struct intel_plane *plane)
{
- struct drm_i915_private *i915 = to_i915(state->base.dev);
+ struct intel_display *display = to_intel_display(state->base.dev);
const struct intel_plane_state *plane_state =
intel_atomic_get_new_plane_state(state, plane);
struct intel_fbc *fbc = plane->fbc;
@@ -1625,7 +1703,7 @@ static void __intel_fbc_enable(struct intel_atomic_state *state,
__intel_fbc_disable(fbc);
}
- drm_WARN_ON(&i915->drm, fbc->active);
+ drm_WARN_ON(display->drm, fbc->active);
fbc->no_fbc_reason = plane_state->no_fbc_reason;
if (fbc->no_fbc_reason)
@@ -1647,7 +1725,7 @@ static void __intel_fbc_enable(struct intel_atomic_state *state,
return;
}
- drm_dbg_kms(&i915->drm, "Enabling FBC on [PLANE:%d:%s]\n",
+ drm_dbg_kms(display->drm, "Enabling FBC on [PLANE:%d:%s]\n",
plane->base.base.id, plane->base.name);
fbc->no_fbc_reason = "FBC enabled but not active yet\n";
@@ -1665,10 +1743,10 @@ static void __intel_fbc_enable(struct intel_atomic_state *state,
*/
void intel_fbc_disable(struct intel_crtc *crtc)
{
- struct drm_i915_private *i915 = to_i915(crtc->base.dev);
+ struct intel_display *display = to_intel_display(crtc->base.dev);
struct intel_plane *plane;
- for_each_intel_plane(&i915->drm, plane) {
+ for_each_intel_plane(display->drm, plane) {
struct intel_fbc *fbc = plane->fbc;
if (!fbc || plane->pipe != crtc->pipe)
@@ -1713,7 +1791,8 @@ void intel_fbc_update(struct intel_atomic_state *state,
static void intel_fbc_underrun_work_fn(struct work_struct *work)
{
struct intel_fbc *fbc = container_of(work, typeof(*fbc), underrun_work);
- struct drm_i915_private *i915 = fbc->i915;
+ struct intel_display *display = fbc->display;
+ struct drm_i915_private *i915 = to_i915(display->drm);
mutex_lock(&fbc->lock);
@@ -1721,7 +1800,7 @@ static void intel_fbc_underrun_work_fn(struct work_struct *work)
if (fbc->underrun_detected || !fbc->state.plane)
goto out;
- drm_dbg_kms(&i915->drm, "Disabling FBC due to FIFO underrun.\n");
+ drm_dbg_kms(display->drm, "Disabling FBC due to FIFO underrun.\n");
fbc->underrun_detected = true;
intel_fbc_deactivate(fbc, "FIFO underrun");
@@ -1734,14 +1813,14 @@ out:
static void __intel_fbc_reset_underrun(struct intel_fbc *fbc)
{
- struct drm_i915_private *i915 = fbc->i915;
+ struct intel_display *display = fbc->display;
cancel_work_sync(&fbc->underrun_work);
mutex_lock(&fbc->lock);
if (fbc->underrun_detected) {
- drm_dbg_kms(&i915->drm,
+ drm_dbg_kms(display->drm,
"Re-allowing FBC after fifo underrun\n");
fbc->no_fbc_reason = "FIFO underrun cleared";
}
@@ -1752,22 +1831,24 @@ static void __intel_fbc_reset_underrun(struct intel_fbc *fbc)
/*
* intel_fbc_reset_underrun - reset FBC fifo underrun status.
- * @i915: the i915 device
+ * @display: display
*
* See intel_fbc_handle_fifo_underrun_irq(). For automated testing we
* want to re-enable FBC after an underrun to increase test coverage.
*/
-void intel_fbc_reset_underrun(struct drm_i915_private *i915)
+void intel_fbc_reset_underrun(struct intel_display *display)
{
struct intel_fbc *fbc;
enum intel_fbc_id fbc_id;
- for_each_intel_fbc(i915, fbc, fbc_id)
+ for_each_intel_fbc(display, fbc, fbc_id)
__intel_fbc_reset_underrun(fbc);
}
static void __intel_fbc_handle_fifo_underrun_irq(struct intel_fbc *fbc)
{
+ struct drm_i915_private *i915 = to_i915(fbc->display->drm);
+
/*
* There's no guarantee that underrun_detected won't be set to true
* right after this check and before the work is scheduled, but that's
@@ -1779,12 +1860,12 @@ static void __intel_fbc_handle_fifo_underrun_irq(struct intel_fbc *fbc)
if (READ_ONCE(fbc->underrun_detected))
return;
- queue_work(fbc->i915->unordered_wq, &fbc->underrun_work);
+ queue_work(i915->unordered_wq, &fbc->underrun_work);
}
/**
* intel_fbc_handle_fifo_underrun_irq - disable FBC when we get a FIFO underrun
- * @i915: i915 device
+ * @display: display
*
* Without FBC, most underruns are harmless and don't really cause too many
* problems, except for an annoying message on dmesg. With FBC, underruns can
@@ -1796,12 +1877,12 @@ static void __intel_fbc_handle_fifo_underrun_irq(struct intel_fbc *fbc)
*
* This function is called from the IRQ handler.
*/
-void intel_fbc_handle_fifo_underrun_irq(struct drm_i915_private *i915)
+void intel_fbc_handle_fifo_underrun_irq(struct intel_display *display)
{
struct intel_fbc *fbc;
enum intel_fbc_id fbc_id;
- for_each_intel_fbc(i915, fbc, fbc_id)
+ for_each_intel_fbc(display, fbc, fbc_id)
__intel_fbc_handle_fifo_underrun_irq(fbc);
}
@@ -1814,15 +1895,17 @@ void intel_fbc_handle_fifo_underrun_irq(struct drm_i915_private *i915)
* space to change the value during runtime without sanitizing it again. IGT
* relies on being able to change i915.enable_fbc at runtime.
*/
-static int intel_sanitize_fbc_option(struct drm_i915_private *i915)
+static int intel_sanitize_fbc_option(struct intel_display *display)
{
- if (i915->display.params.enable_fbc >= 0)
- return !!i915->display.params.enable_fbc;
+ struct drm_i915_private *i915 = to_i915(display->drm);
+
+ if (display->params.enable_fbc >= 0)
+ return !!display->params.enable_fbc;
- if (!HAS_FBC(i915))
+ if (!HAS_FBC(display))
return 0;
- if (IS_BROADWELL(i915) || DISPLAY_VER(i915) >= 9)
+ if (IS_BROADWELL(i915) || DISPLAY_VER(display) >= 9)
return 1;
return 0;
@@ -1833,9 +1916,10 @@ void intel_fbc_add_plane(struct intel_fbc *fbc, struct intel_plane *plane)
plane->fbc = fbc;
}
-static struct intel_fbc *intel_fbc_create(struct drm_i915_private *i915,
+static struct intel_fbc *intel_fbc_create(struct intel_display *display,
enum intel_fbc_id fbc_id)
{
+ struct drm_i915_private *i915 = to_i915(display->drm);
struct intel_fbc *fbc;
fbc = kzalloc(sizeof(*fbc), GFP_KERNEL);
@@ -1843,19 +1927,19 @@ static struct intel_fbc *intel_fbc_create(struct drm_i915_private *i915,
return NULL;
fbc->id = fbc_id;
- fbc->i915 = i915;
+ fbc->display = display;
INIT_WORK(&fbc->underrun_work, intel_fbc_underrun_work_fn);
mutex_init(&fbc->lock);
- if (DISPLAY_VER(i915) >= 7)
+ if (DISPLAY_VER(display) >= 7)
fbc->funcs = &ivb_fbc_funcs;
- else if (DISPLAY_VER(i915) == 6)
+ else if (DISPLAY_VER(display) == 6)
fbc->funcs = &snb_fbc_funcs;
- else if (DISPLAY_VER(i915) == 5)
+ else if (DISPLAY_VER(display) == 5)
fbc->funcs = &ilk_fbc_funcs;
else if (IS_G4X(i915))
fbc->funcs = &g4x_fbc_funcs;
- else if (DISPLAY_VER(i915) == 4)
+ else if (DISPLAY_VER(display) == 4)
fbc->funcs = &i965_fbc_funcs;
else
fbc->funcs = &i8xx_fbc_funcs;
@@ -1865,36 +1949,36 @@ static struct intel_fbc *intel_fbc_create(struct drm_i915_private *i915,
/**
* intel_fbc_init - Initialize FBC
- * @i915: the i915 device
+ * @display: display
*
* This function might be called during PM init process.
*/
-void intel_fbc_init(struct drm_i915_private *i915)
+void intel_fbc_init(struct intel_display *display)
{
enum intel_fbc_id fbc_id;
- i915->display.params.enable_fbc = intel_sanitize_fbc_option(i915);
- drm_dbg_kms(&i915->drm, "Sanitized enable_fbc value: %d\n",
- i915->display.params.enable_fbc);
+ display->params.enable_fbc = intel_sanitize_fbc_option(display);
+ drm_dbg_kms(display->drm, "Sanitized enable_fbc value: %d\n",
+ display->params.enable_fbc);
- for_each_fbc_id(i915, fbc_id)
- i915->display.fbc[fbc_id] = intel_fbc_create(i915, fbc_id);
+ for_each_fbc_id(display, fbc_id)
+ display->fbc[fbc_id] = intel_fbc_create(display, fbc_id);
}
/**
* intel_fbc_sanitize - Sanitize FBC
- * @i915: the i915 device
+ * @display: display
*
* Make sure FBC is initially disabled since we have no
* idea eg. into which parts of stolen it might be scribbling
* into.
*/
-void intel_fbc_sanitize(struct drm_i915_private *i915)
+void intel_fbc_sanitize(struct intel_display *display)
{
struct intel_fbc *fbc;
enum intel_fbc_id fbc_id;
- for_each_intel_fbc(i915, fbc, fbc_id) {
+ for_each_intel_fbc(display, fbc, fbc_id) {
if (intel_fbc_hw_is_active(fbc))
intel_fbc_hw_deactivate(fbc);
}
@@ -1903,11 +1987,12 @@ void intel_fbc_sanitize(struct drm_i915_private *i915)
static int intel_fbc_debugfs_status_show(struct seq_file *m, void *unused)
{
struct intel_fbc *fbc = m->private;
- struct drm_i915_private *i915 = fbc->i915;
+ struct intel_display *display = fbc->display;
+ struct drm_i915_private *i915 = to_i915(display->drm);
struct intel_plane *plane;
intel_wakeref_t wakeref;
- drm_modeset_lock_all(&i915->drm);
+ drm_modeset_lock_all(display->drm);
wakeref = intel_runtime_pm_get(&i915->runtime_pm);
mutex_lock(&fbc->lock);
@@ -1920,7 +2005,7 @@ static int intel_fbc_debugfs_status_show(struct seq_file *m, void *unused)
seq_printf(m, "FBC disabled: %s\n", fbc->no_fbc_reason);
}
- for_each_intel_plane(&i915->drm, plane) {
+ for_each_intel_plane(display->drm, plane) {
const struct intel_plane_state *plane_state =
to_intel_plane_state(plane->base.state);
@@ -1936,7 +2021,7 @@ static int intel_fbc_debugfs_status_show(struct seq_file *m, void *unused)
mutex_unlock(&fbc->lock);
intel_runtime_pm_put(&i915->runtime_pm, wakeref);
- drm_modeset_unlock_all(&i915->drm);
+ drm_modeset_unlock_all(display->drm);
return 0;
}
@@ -1993,12 +2078,12 @@ void intel_fbc_crtc_debugfs_add(struct intel_crtc *crtc)
}
/* FIXME: remove this once igt is on board with per-crtc stuff */
-void intel_fbc_debugfs_register(struct drm_i915_private *i915)
+void intel_fbc_debugfs_register(struct intel_display *display)
{
- struct drm_minor *minor = i915->drm.primary;
+ struct drm_minor *minor = display->drm->primary;
struct intel_fbc *fbc;
- fbc = i915->display.fbc[INTEL_FBC_A];
+ fbc = display->fbc[INTEL_FBC_A];
if (fbc)
intel_fbc_debugfs_add(fbc, minor->debugfs_root);
}
diff --git a/drivers/gpu/drm/i915/display/intel_fbc.h b/drivers/gpu/drm/i915/display/intel_fbc.h
index 6720ec8ee8a2..ceae55458e14 100644
--- a/drivers/gpu/drm/i915/display/intel_fbc.h
+++ b/drivers/gpu/drm/i915/display/intel_fbc.h
@@ -13,6 +13,7 @@ struct drm_i915_private;
struct intel_atomic_state;
struct intel_crtc;
struct intel_crtc_state;
+struct intel_display;
struct intel_fbc;
struct intel_plane;
struct intel_plane_state;
@@ -31,9 +32,9 @@ bool intel_fbc_pre_update(struct intel_atomic_state *state,
struct intel_crtc *crtc);
void intel_fbc_post_update(struct intel_atomic_state *state,
struct intel_crtc *crtc);
-void intel_fbc_init(struct drm_i915_private *dev_priv);
-void intel_fbc_cleanup(struct drm_i915_private *dev_priv);
-void intel_fbc_sanitize(struct drm_i915_private *dev_priv);
+void intel_fbc_init(struct intel_display *display);
+void intel_fbc_cleanup(struct intel_display *display);
+void intel_fbc_sanitize(struct intel_display *display);
void intel_fbc_update(struct intel_atomic_state *state,
struct intel_crtc *crtc);
void intel_fbc_disable(struct intel_crtc *crtc);
@@ -43,9 +44,9 @@ void intel_fbc_invalidate(struct drm_i915_private *dev_priv,
void intel_fbc_flush(struct drm_i915_private *dev_priv,
unsigned int frontbuffer_bits, enum fb_op_origin origin);
void intel_fbc_add_plane(struct intel_fbc *fbc, struct intel_plane *plane);
-void intel_fbc_handle_fifo_underrun_irq(struct drm_i915_private *i915);
-void intel_fbc_reset_underrun(struct drm_i915_private *i915);
+void intel_fbc_handle_fifo_underrun_irq(struct intel_display *display);
+void intel_fbc_reset_underrun(struct intel_display *display);
void intel_fbc_crtc_debugfs_add(struct intel_crtc *crtc);
-void intel_fbc_debugfs_register(struct drm_i915_private *i915);
+void intel_fbc_debugfs_register(struct intel_display *display);
#endif /* __INTEL_FBC_H__ */
diff --git a/drivers/gpu/drm/i915/display/intel_fdi.c b/drivers/gpu/drm/i915/display/intel_fdi.c
index d33befd7994d..222cd0e1a2bc 100644
--- a/drivers/gpu/drm/i915/display/intel_fdi.c
+++ b/drivers/gpu/drm/i915/display/intel_fdi.c
@@ -5,6 +5,8 @@
#include <linux/string_helpers.h>
+#include <drm/drm_fixed.h>
+
#include "i915_reg.h"
#include "intel_atomic.h"
#include "intel_crtc.h"
@@ -304,7 +306,7 @@ int intel_fdi_link_freq(struct drm_i915_private *i915,
bool intel_fdi_compute_pipe_bpp(struct intel_crtc_state *crtc_state)
{
int pipe_bpp = min(crtc_state->pipe_bpp,
- to_bpp_int(crtc_state->max_link_bpp_x16));
+ fxp_q4_to_int(crtc_state->max_link_bpp_x16));
pipe_bpp = rounddown(pipe_bpp, 2 * 3);
@@ -340,7 +342,7 @@ int ilk_fdi_compute_config(struct intel_crtc *crtc,
pipe_config->fdi_lanes = lane;
- intel_link_compute_m_n(to_bpp_x16(pipe_config->pipe_bpp),
+ intel_link_compute_m_n(fxp_q4_from_int(pipe_config->pipe_bpp),
lane, fdi_dotclock,
link_bw,
intel_dp_bw_fec_overhead(false),
diff --git a/drivers/gpu/drm/i915/display/intel_fifo_underrun.c b/drivers/gpu/drm/i915/display/intel_fifo_underrun.c
index e5e4ca7cc499..8949fbb1cc60 100644
--- a/drivers/gpu/drm/i915/display/intel_fifo_underrun.c
+++ b/drivers/gpu/drm/i915/display/intel_fifo_underrun.c
@@ -440,7 +440,7 @@ void intel_cpu_fifo_underrun_irq_handler(struct drm_i915_private *dev_priv,
drm_err(&dev_priv->drm, "CPU pipe %c FIFO underrun\n", pipe_name(pipe));
}
- intel_fbc_handle_fifo_underrun_irq(dev_priv);
+ intel_fbc_handle_fifo_underrun_irq(&dev_priv->display);
}
/**
diff --git a/drivers/gpu/drm/i915/display/intel_frontbuffer.c b/drivers/gpu/drm/i915/display/intel_frontbuffer.c
index 4923c340a0b6..af4576dee92a 100644
--- a/drivers/gpu/drm/i915/display/intel_frontbuffer.c
+++ b/drivers/gpu/drm/i915/display/intel_frontbuffer.c
@@ -83,6 +83,8 @@ static void frontbuffer_flush(struct drm_i915_private *i915,
unsigned int frontbuffer_bits,
enum fb_op_origin origin)
{
+ struct intel_display *display = &i915->display;
+
/* Delay flushing when rings are still busy.*/
spin_lock(&i915->display.fb_tracking.lock);
frontbuffer_bits &= ~i915->display.fb_tracking.busy_bits;
@@ -96,7 +98,7 @@ static void frontbuffer_flush(struct drm_i915_private *i915,
might_sleep();
intel_td_flush(i915);
intel_drrs_flush(i915, frontbuffer_bits);
- intel_psr_flush(i915, frontbuffer_bits, origin);
+ intel_psr_flush(display, frontbuffer_bits, origin);
intel_fbc_flush(i915, frontbuffer_bits, origin);
}
@@ -172,6 +174,7 @@ void __intel_fb_invalidate(struct intel_frontbuffer *front,
unsigned int frontbuffer_bits)
{
struct drm_i915_private *i915 = intel_bo_to_i915(front->obj);
+ struct intel_display *display = &i915->display;
if (origin == ORIGIN_CS) {
spin_lock(&i915->display.fb_tracking.lock);
@@ -183,7 +186,7 @@ void __intel_fb_invalidate(struct intel_frontbuffer *front,
trace_intel_frontbuffer_invalidate(i915, frontbuffer_bits, origin);
might_sleep();
- intel_psr_invalidate(i915, frontbuffer_bits, origin);
+ intel_psr_invalidate(display, frontbuffer_bits, origin);
intel_drrs_invalidate(i915, frontbuffer_bits);
intel_fbc_invalidate(i915, frontbuffer_bits, origin);
}
diff --git a/drivers/gpu/drm/i915/display/intel_gmbus.c b/drivers/gpu/drm/i915/display/intel_gmbus.c
index 9c8e1e91ff1c..6470f75106bd 100644
--- a/drivers/gpu/drm/i915/display/intel_gmbus.c
+++ b/drivers/gpu/drm/i915/display/intel_gmbus.c
@@ -478,7 +478,7 @@ gmbus_xfer_read_chunk(struct drm_i915_private *i915,
/*
* HW spec says that 512Bytes in Burst read need special treatment.
* But it doesn't talk about other multiple of 256Bytes. And couldn't locate
- * an I2C slave, which supports such a lengthy burst read too for experiments.
+ * an I2C target, which supports such a lengthy burst read too for experiments.
*
* So until things get clarified on HW support, to avoid the burst read length
* in fold of 256Bytes except 512, max burst read length is fixed at 767Bytes.
@@ -701,7 +701,7 @@ clear_err:
/* Toggle the Software Clear Interrupt bit. This has the effect
* of resetting the GMBUS controller and so clearing the
- * BUS_ERROR raised by the slave's NAK.
+ * BUS_ERROR raised by the target's NAK.
*/
intel_de_write_fw(i915, GMBUS1(i915), GMBUS_SW_CLR_INT);
intel_de_write_fw(i915, GMBUS1(i915), 0);
diff --git a/drivers/gpu/drm/i915/display/intel_hdcp.c b/drivers/gpu/drm/i915/display/intel_hdcp.c
index 3ebe035f382e..6980b98792c2 100644
--- a/drivers/gpu/drm/i915/display/intel_hdcp.c
+++ b/drivers/gpu/drm/i915/display/intel_hdcp.c
@@ -42,11 +42,11 @@ intel_hdcp_disable_hdcp_line_rekeying(struct intel_encoder *encoder,
return;
if (DISPLAY_VER(dev_priv) >= 14) {
- if (IS_DISPLAY_IP_STEP(dev_priv, IP_VER(14, 0), STEP_D0, STEP_FOREVER))
+ if (IS_DISPLAY_VER_STEP(dev_priv, IP_VER(14, 0), STEP_D0, STEP_FOREVER))
intel_de_rmw(dev_priv, MTL_CHICKEN_TRANS(hdcp->cpu_transcoder),
0, HDCP_LINE_REKEY_DISABLE);
- else if (IS_DISPLAY_IP_STEP(dev_priv, IP_VER(14, 1), STEP_B0, STEP_FOREVER) ||
- IS_DISPLAY_IP_STEP(dev_priv, IP_VER(20, 0), STEP_B0, STEP_FOREVER))
+ else if (IS_DISPLAY_VER_STEP(dev_priv, IP_VER(14, 1), STEP_B0, STEP_FOREVER) ||
+ IS_DISPLAY_VER_STEP(dev_priv, IP_VER(20, 0), STEP_B0, STEP_FOREVER))
intel_de_rmw(dev_priv,
TRANS_DDI_FUNC_CTL(dev_priv, hdcp->cpu_transcoder),
0, TRANS_DDI_HDCP_LINE_REKEY_DISABLE);
@@ -203,11 +203,16 @@ int intel_hdcp_read_valid_bksv(struct intel_digital_port *dig_port,
/* Is HDCP1.4 capable on Platform and Sink */
bool intel_hdcp_get_capability(struct intel_connector *connector)
{
- struct intel_digital_port *dig_port = intel_attached_dig_port(connector);
+ struct intel_digital_port *dig_port;
const struct intel_hdcp_shim *shim = connector->hdcp.shim;
bool capable = false;
u8 bksv[5];
+ if (!intel_attached_encoder(connector))
+ return capable;
+
+ dig_port = intel_attached_dig_port(connector);
+
if (!shim)
return capable;
@@ -2176,10 +2181,11 @@ static void intel_hdcp_check_work(struct work_struct *work)
DRM_HDCP_CHECK_PERIOD_MS);
}
-static int i915_hdcp_component_bind(struct device *i915_kdev,
+static int i915_hdcp_component_bind(struct device *drv_kdev,
struct device *mei_kdev, void *data)
{
- struct drm_i915_private *i915 = kdev_to_i915(i915_kdev);
+ struct intel_display *display = to_intel_display(drv_kdev);
+ struct drm_i915_private *i915 = to_i915(display->drm);
drm_dbg(&i915->drm, "I915 HDCP comp bind\n");
mutex_lock(&i915->display.hdcp.hdcp_mutex);
@@ -2190,10 +2196,11 @@ static int i915_hdcp_component_bind(struct device *i915_kdev,
return 0;
}
-static void i915_hdcp_component_unbind(struct device *i915_kdev,
+static void i915_hdcp_component_unbind(struct device *drv_kdev,
struct device *mei_kdev, void *data)
{
- struct drm_i915_private *i915 = kdev_to_i915(i915_kdev);
+ struct intel_display *display = to_intel_display(drv_kdev);
+ struct drm_i915_private *i915 = to_i915(display->drm);
drm_dbg(&i915->drm, "I915 HDCP comp unbind\n");
mutex_lock(&i915->display.hdcp.hdcp_mutex);
diff --git a/drivers/gpu/drm/i915/display/intel_hdcp_gsc_message.c b/drivers/gpu/drm/i915/display/intel_hdcp_gsc_message.c
index 6548e71b4c49..35bdb532bbb3 100644
--- a/drivers/gpu/drm/i915/display/intel_hdcp_gsc_message.c
+++ b/drivers/gpu/drm/i915/display/intel_hdcp_gsc_message.c
@@ -7,6 +7,7 @@
#include <drm/intel/i915_hdcp_interface.h>
#include "i915_drv.h"
+#include "intel_display_types.h"
#include "intel_hdcp_gsc_message.h"
int
@@ -15,17 +16,19 @@ intel_hdcp_gsc_initiate_session(struct device *dev, struct hdcp_port_data *data,
{
struct wired_cmd_initiate_hdcp2_session_in session_init_in = {};
struct wired_cmd_initiate_hdcp2_session_out session_init_out = {};
+ struct intel_display *display;
struct drm_i915_private *i915;
ssize_t byte;
if (!dev || !data || !ake_data)
return -EINVAL;
- i915 = kdev_to_i915(dev);
- if (!i915) {
+ display = to_intel_display(dev);
+ if (!display) {
dev_err(dev, "DRM not initialized, aborting HDCP.\n");
return -ENODEV;
}
+ i915 = to_i915(display->drm);
session_init_in.header.api_version = HDCP_API_VERSION;
session_init_in.header.command_id = WIRED_INITIATE_HDCP2_SESSION;
@@ -72,17 +75,19 @@ intel_hdcp_gsc_verify_receiver_cert_prepare_km(struct device *dev,
{
struct wired_cmd_verify_receiver_cert_in verify_rxcert_in = {};
struct wired_cmd_verify_receiver_cert_out verify_rxcert_out = {};
+ struct intel_display *display;
struct drm_i915_private *i915;
ssize_t byte;
if (!dev || !data || !rx_cert || !km_stored || !ek_pub_km || !msg_sz)
return -EINVAL;
- i915 = kdev_to_i915(dev);
- if (!i915) {
+ display = to_intel_display(dev);
+ if (!display) {
dev_err(dev, "DRM not initialized, aborting HDCP.\n");
return -ENODEV;
}
+ i915 = to_i915(display->drm);
verify_rxcert_in.header.api_version = HDCP_API_VERSION;
verify_rxcert_in.header.command_id = WIRED_VERIFY_RECEIVER_CERT;
@@ -135,17 +140,19 @@ intel_hdcp_gsc_verify_hprime(struct device *dev, struct hdcp_port_data *data,
{
struct wired_cmd_ake_send_hprime_in send_hprime_in = {};
struct wired_cmd_ake_send_hprime_out send_hprime_out = {};
+ struct intel_display *display;
struct drm_i915_private *i915;
ssize_t byte;
if (!dev || !data || !rx_hprime)
return -EINVAL;
- i915 = kdev_to_i915(dev);
- if (!i915) {
+ display = to_intel_display(dev);
+ if (!display) {
dev_err(dev, "DRM not initialized, aborting HDCP.\n");
return -ENODEV;
}
+ i915 = to_i915(display->drm);
send_hprime_in.header.api_version = HDCP_API_VERSION;
send_hprime_in.header.command_id = WIRED_AKE_SEND_HPRIME;
@@ -183,17 +190,19 @@ intel_hdcp_gsc_store_pairing_info(struct device *dev, struct hdcp_port_data *dat
{
struct wired_cmd_ake_send_pairing_info_in pairing_info_in = {};
struct wired_cmd_ake_send_pairing_info_out pairing_info_out = {};
+ struct intel_display *display;
struct drm_i915_private *i915;
ssize_t byte;
if (!dev || !data || !pairing_info)
return -EINVAL;
- i915 = kdev_to_i915(dev);
- if (!i915) {
+ display = to_intel_display(dev);
+ if (!display) {
dev_err(dev, "DRM not initialized, aborting HDCP.\n");
return -ENODEV;
}
+ i915 = to_i915(display->drm);
pairing_info_in.header.api_version = HDCP_API_VERSION;
pairing_info_in.header.command_id = WIRED_AKE_SEND_PAIRING_INFO;
@@ -234,17 +243,19 @@ intel_hdcp_gsc_initiate_locality_check(struct device *dev,
{
struct wired_cmd_init_locality_check_in lc_init_in = {};
struct wired_cmd_init_locality_check_out lc_init_out = {};
+ struct intel_display *display;
struct drm_i915_private *i915;
ssize_t byte;
if (!dev || !data || !lc_init_data)
return -EINVAL;
- i915 = kdev_to_i915(dev);
- if (!i915) {
+ display = to_intel_display(dev);
+ if (!display) {
dev_err(dev, "DRM not initialized, aborting HDCP.\n");
return -ENODEV;
}
+ i915 = to_i915(display->drm);
lc_init_in.header.api_version = HDCP_API_VERSION;
lc_init_in.header.command_id = WIRED_INIT_LOCALITY_CHECK;
@@ -280,17 +291,19 @@ intel_hdcp_gsc_verify_lprime(struct device *dev, struct hdcp_port_data *data,
{
struct wired_cmd_validate_locality_in verify_lprime_in = {};
struct wired_cmd_validate_locality_out verify_lprime_out = {};
+ struct intel_display *display;
struct drm_i915_private *i915;
ssize_t byte;
if (!dev || !data || !rx_lprime)
return -EINVAL;
- i915 = kdev_to_i915(dev);
- if (!i915) {
+ display = to_intel_display(dev);
+ if (!display) {
dev_err(dev, "DRM not initialized, aborting HDCP.\n");
return -ENODEV;
}
+ i915 = to_i915(display->drm);
verify_lprime_in.header.api_version = HDCP_API_VERSION;
verify_lprime_in.header.command_id = WIRED_VALIDATE_LOCALITY;
@@ -330,17 +343,19 @@ int intel_hdcp_gsc_get_session_key(struct device *dev,
{
struct wired_cmd_get_session_key_in get_skey_in = {};
struct wired_cmd_get_session_key_out get_skey_out = {};
+ struct intel_display *display;
struct drm_i915_private *i915;
ssize_t byte;
if (!dev || !data || !ske_data)
return -EINVAL;
- i915 = kdev_to_i915(dev);
- if (!i915) {
+ display = to_intel_display(dev);
+ if (!display) {
dev_err(dev, "DRM not initialized, aborting HDCP.\n");
return -ENODEV;
}
+ i915 = to_i915(display->drm);
get_skey_in.header.api_version = HDCP_API_VERSION;
get_skey_in.header.command_id = WIRED_GET_SESSION_KEY;
@@ -382,17 +397,19 @@ intel_hdcp_gsc_repeater_check_flow_prepare_ack(struct device *dev,
{
struct wired_cmd_verify_repeater_in verify_repeater_in = {};
struct wired_cmd_verify_repeater_out verify_repeater_out = {};
+ struct intel_display *display;
struct drm_i915_private *i915;
ssize_t byte;
if (!dev || !rep_topology || !rep_send_ack || !data)
return -EINVAL;
- i915 = kdev_to_i915(dev);
- if (!i915) {
+ display = to_intel_display(dev);
+ if (!display) {
dev_err(dev, "DRM not initialized, aborting HDCP.\n");
return -ENODEV;
}
+ i915 = to_i915(display->drm);
verify_repeater_in.header.api_version = HDCP_API_VERSION;
verify_repeater_in.header.command_id = WIRED_VERIFY_REPEATER;
@@ -442,6 +459,7 @@ int intel_hdcp_gsc_verify_mprime(struct device *dev,
{
struct wired_cmd_repeater_auth_stream_req_in *verify_mprime_in;
struct wired_cmd_repeater_auth_stream_req_out verify_mprime_out = {};
+ struct intel_display *display;
struct drm_i915_private *i915;
ssize_t byte;
size_t cmd_size;
@@ -449,11 +467,12 @@ int intel_hdcp_gsc_verify_mprime(struct device *dev,
if (!dev || !stream_ready || !data)
return -EINVAL;
- i915 = kdev_to_i915(dev);
- if (!i915) {
+ display = to_intel_display(dev);
+ if (!display) {
dev_err(dev, "DRM not initialized, aborting HDCP.\n");
return -ENODEV;
}
+ i915 = to_i915(display->drm);
cmd_size = struct_size(verify_mprime_in, streams, data->k);
if (cmd_size == SIZE_MAX)
@@ -504,17 +523,19 @@ int intel_hdcp_gsc_enable_authentication(struct device *dev,
{
struct wired_cmd_enable_auth_in enable_auth_in = {};
struct wired_cmd_enable_auth_out enable_auth_out = {};
+ struct intel_display *display;
struct drm_i915_private *i915;
ssize_t byte;
if (!dev || !data)
return -EINVAL;
- i915 = kdev_to_i915(dev);
- if (!i915) {
+ display = to_intel_display(dev);
+ if (!display) {
dev_err(dev, "DRM not initialized, aborting HDCP.\n");
return -ENODEV;
}
+ i915 = to_i915(display->drm);
enable_auth_in.header.api_version = HDCP_API_VERSION;
enable_auth_in.header.command_id = WIRED_ENABLE_AUTH;
@@ -549,17 +570,19 @@ intel_hdcp_gsc_close_session(struct device *dev, struct hdcp_port_data *data)
{
struct wired_cmd_close_session_in session_close_in = {};
struct wired_cmd_close_session_out session_close_out = {};
+ struct intel_display *display;
struct drm_i915_private *i915;
ssize_t byte;
if (!dev || !data)
return -EINVAL;
- i915 = kdev_to_i915(dev);
- if (!i915) {
+ display = to_intel_display(dev);
+ if (!display) {
dev_err(dev, "DRM not initialized, aborting HDCP.\n");
return -ENODEV;
}
+ i915 = to_i915(display->drm);
session_close_in.header.api_version = HDCP_API_VERSION;
session_close_in.header.command_id = WIRED_CLOSE_SESSION;
diff --git a/drivers/gpu/drm/i915/display/intel_hdmi.c b/drivers/gpu/drm/i915/display/intel_hdmi.c
index 19498ee455fa..cd9ee171e0df 100644
--- a/drivers/gpu/drm/i915/display/intel_hdmi.c
+++ b/drivers/gpu/drm/i915/display/intel_hdmi.c
@@ -60,30 +60,25 @@
#include "intel_panel.h"
#include "intel_snps_phy.h"
-inline struct drm_i915_private *intel_hdmi_to_i915(struct intel_hdmi *intel_hdmi)
-{
- return to_i915(hdmi_to_dig_port(intel_hdmi)->base.base.dev);
-}
-
static void
assert_hdmi_port_disabled(struct intel_hdmi *intel_hdmi)
{
- struct drm_i915_private *dev_priv = intel_hdmi_to_i915(intel_hdmi);
+ struct intel_display *display = to_intel_display(intel_hdmi);
u32 enabled_bits;
- enabled_bits = HAS_DDI(dev_priv) ? DDI_BUF_CTL_ENABLE : SDVO_ENABLE;
+ enabled_bits = HAS_DDI(display) ? DDI_BUF_CTL_ENABLE : SDVO_ENABLE;
- drm_WARN(&dev_priv->drm,
- intel_de_read(dev_priv, intel_hdmi->hdmi_reg) & enabled_bits,
+ drm_WARN(display->drm,
+ intel_de_read(display, intel_hdmi->hdmi_reg) & enabled_bits,
"HDMI port enabled, expecting disabled\n");
}
static void
-assert_hdmi_transcoder_func_disabled(struct drm_i915_private *dev_priv,
+assert_hdmi_transcoder_func_disabled(struct intel_display *display,
enum transcoder cpu_transcoder)
{
- drm_WARN(&dev_priv->drm,
- intel_de_read(dev_priv, TRANS_DDI_FUNC_CTL(dev_priv, cpu_transcoder)) &
+ drm_WARN(display->drm,
+ intel_de_read(display, TRANS_DDI_FUNC_CTL(display, cpu_transcoder)) &
TRANS_DDI_FUNC_ENABLE,
"HDMI transcoder function enabled, expecting disabled\n");
}
@@ -158,35 +153,35 @@ static u32 hsw_infoframe_enable(unsigned int type)
}
static i915_reg_t
-hsw_dip_data_reg(struct drm_i915_private *dev_priv,
+hsw_dip_data_reg(struct intel_display *display,
enum transcoder cpu_transcoder,
unsigned int type,
int i)
{
switch (type) {
case HDMI_PACKET_TYPE_GAMUT_METADATA:
- return HSW_TVIDEO_DIP_GMP_DATA(dev_priv, cpu_transcoder, i);
+ return HSW_TVIDEO_DIP_GMP_DATA(display, cpu_transcoder, i);
case DP_SDP_VSC:
- return HSW_TVIDEO_DIP_VSC_DATA(dev_priv, cpu_transcoder, i);
+ return HSW_TVIDEO_DIP_VSC_DATA(display, cpu_transcoder, i);
case DP_SDP_ADAPTIVE_SYNC:
- return ADL_TVIDEO_DIP_AS_SDP_DATA(dev_priv, cpu_transcoder, i);
+ return ADL_TVIDEO_DIP_AS_SDP_DATA(display, cpu_transcoder, i);
case DP_SDP_PPS:
- return ICL_VIDEO_DIP_PPS_DATA(dev_priv, cpu_transcoder, i);
+ return ICL_VIDEO_DIP_PPS_DATA(display, cpu_transcoder, i);
case HDMI_INFOFRAME_TYPE_AVI:
- return HSW_TVIDEO_DIP_AVI_DATA(dev_priv, cpu_transcoder, i);
+ return HSW_TVIDEO_DIP_AVI_DATA(display, cpu_transcoder, i);
case HDMI_INFOFRAME_TYPE_SPD:
- return HSW_TVIDEO_DIP_SPD_DATA(dev_priv, cpu_transcoder, i);
+ return HSW_TVIDEO_DIP_SPD_DATA(display, cpu_transcoder, i);
case HDMI_INFOFRAME_TYPE_VENDOR:
- return HSW_TVIDEO_DIP_VS_DATA(dev_priv, cpu_transcoder, i);
+ return HSW_TVIDEO_DIP_VS_DATA(display, cpu_transcoder, i);
case HDMI_INFOFRAME_TYPE_DRM:
- return GLK_TVIDEO_DIP_DRM_DATA(dev_priv, cpu_transcoder, i);
+ return GLK_TVIDEO_DIP_DRM_DATA(display, cpu_transcoder, i);
default:
MISSING_CASE(type);
return INVALID_MMIO_REG;
}
}
-static int hsw_dip_data_size(struct drm_i915_private *dev_priv,
+static int hsw_dip_data_size(struct intel_display *display,
unsigned int type)
{
switch (type) {
@@ -197,7 +192,7 @@ static int hsw_dip_data_size(struct drm_i915_private *dev_priv,
case DP_SDP_PPS:
return VIDEO_DIP_PPS_DATA_SIZE;
case HDMI_PACKET_TYPE_GAMUT_METADATA:
- if (DISPLAY_VER(dev_priv) >= 11)
+ if (DISPLAY_VER(display) >= 11)
return VIDEO_DIP_GMP_DATA_SIZE;
else
return VIDEO_DIP_DATA_SIZE;
@@ -211,12 +206,12 @@ static void g4x_write_infoframe(struct intel_encoder *encoder,
unsigned int type,
const void *frame, ssize_t len)
{
+ struct intel_display *display = to_intel_display(encoder);
const u32 *data = frame;
- struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
- u32 val = intel_de_read(dev_priv, VIDEO_DIP_CTL);
+ u32 val = intel_de_read(display, VIDEO_DIP_CTL);
int i;
- drm_WARN(&dev_priv->drm, !(val & VIDEO_DIP_ENABLE),
+ drm_WARN(display->drm, !(val & VIDEO_DIP_ENABLE),
"Writing DIP with CTL reg disabled\n");
val &= ~(VIDEO_DIP_SELECT_MASK | 0xf); /* clear DIP data offset */
@@ -224,22 +219,22 @@ static void g4x_write_infoframe(struct intel_encoder *encoder,
val &= ~g4x_infoframe_enable(type);
- intel_de_write(dev_priv, VIDEO_DIP_CTL, val);
+ intel_de_write(display, VIDEO_DIP_CTL, val);
for (i = 0; i < len; i += 4) {
- intel_de_write(dev_priv, VIDEO_DIP_DATA, *data);
+ intel_de_write(display, VIDEO_DIP_DATA, *data);
data++;
}
/* Write every possible data byte to force correct ECC calculation. */
for (; i < VIDEO_DIP_DATA_SIZE; i += 4)
- intel_de_write(dev_priv, VIDEO_DIP_DATA, 0);
+ intel_de_write(display, VIDEO_DIP_DATA, 0);
val |= g4x_infoframe_enable(type);
val &= ~VIDEO_DIP_FREQ_MASK;
val |= VIDEO_DIP_FREQ_VSYNC;
- intel_de_write(dev_priv, VIDEO_DIP_CTL, val);
- intel_de_posting_read(dev_priv, VIDEO_DIP_CTL);
+ intel_de_write(display, VIDEO_DIP_CTL, val);
+ intel_de_posting_read(display, VIDEO_DIP_CTL);
}
static void g4x_read_infoframe(struct intel_encoder *encoder,
@@ -247,22 +242,22 @@ static void g4x_read_infoframe(struct intel_encoder *encoder,
unsigned int type,
void *frame, ssize_t len)
{
- struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+ struct intel_display *display = to_intel_display(encoder);
u32 *data = frame;
int i;
- intel_de_rmw(dev_priv, VIDEO_DIP_CTL,
+ intel_de_rmw(display, VIDEO_DIP_CTL,
VIDEO_DIP_SELECT_MASK | 0xf, g4x_infoframe_index(type));
for (i = 0; i < len; i += 4)
- *data++ = intel_de_read(dev_priv, VIDEO_DIP_DATA);
+ *data++ = intel_de_read(display, VIDEO_DIP_DATA);
}
static u32 g4x_infoframes_enabled(struct intel_encoder *encoder,
const struct intel_crtc_state *pipe_config)
{
- struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
- u32 val = intel_de_read(dev_priv, VIDEO_DIP_CTL);
+ struct intel_display *display = to_intel_display(encoder);
+ u32 val = intel_de_read(display, VIDEO_DIP_CTL);
if ((val & VIDEO_DIP_ENABLE) == 0)
return 0;
@@ -279,14 +274,14 @@ static void ibx_write_infoframe(struct intel_encoder *encoder,
unsigned int type,
const void *frame, ssize_t len)
{
+ struct intel_display *display = to_intel_display(encoder);
const u32 *data = frame;
- struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
i915_reg_t reg = TVIDEO_DIP_CTL(crtc->pipe);
- u32 val = intel_de_read(dev_priv, reg);
+ u32 val = intel_de_read(display, reg);
int i;
- drm_WARN(&dev_priv->drm, !(val & VIDEO_DIP_ENABLE),
+ drm_WARN(display->drm, !(val & VIDEO_DIP_ENABLE),
"Writing DIP with CTL reg disabled\n");
val &= ~(VIDEO_DIP_SELECT_MASK | 0xf); /* clear DIP data offset */
@@ -294,23 +289,23 @@ static void ibx_write_infoframe(struct intel_encoder *encoder,
val &= ~g4x_infoframe_enable(type);
- intel_de_write(dev_priv, reg, val);
+ intel_de_write(display, reg, val);
for (i = 0; i < len; i += 4) {
- intel_de_write(dev_priv, TVIDEO_DIP_DATA(crtc->pipe),
+ intel_de_write(display, TVIDEO_DIP_DATA(crtc->pipe),
*data);
data++;
}
/* Write every possible data byte to force correct ECC calculation. */
for (; i < VIDEO_DIP_DATA_SIZE; i += 4)
- intel_de_write(dev_priv, TVIDEO_DIP_DATA(crtc->pipe), 0);
+ intel_de_write(display, TVIDEO_DIP_DATA(crtc->pipe), 0);
val |= g4x_infoframe_enable(type);
val &= ~VIDEO_DIP_FREQ_MASK;
val |= VIDEO_DIP_FREQ_VSYNC;
- intel_de_write(dev_priv, reg, val);
- intel_de_posting_read(dev_priv, reg);
+ intel_de_write(display, reg, val);
+ intel_de_posting_read(display, reg);
}
static void ibx_read_infoframe(struct intel_encoder *encoder,
@@ -318,25 +313,25 @@ static void ibx_read_infoframe(struct intel_encoder *encoder,
unsigned int type,
void *frame, ssize_t len)
{
- struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+ struct intel_display *display = to_intel_display(encoder);
struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
u32 *data = frame;
int i;
- intel_de_rmw(dev_priv, TVIDEO_DIP_CTL(crtc->pipe),
+ intel_de_rmw(display, TVIDEO_DIP_CTL(crtc->pipe),
VIDEO_DIP_SELECT_MASK | 0xf, g4x_infoframe_index(type));
for (i = 0; i < len; i += 4)
- *data++ = intel_de_read(dev_priv, TVIDEO_DIP_DATA(crtc->pipe));
+ *data++ = intel_de_read(display, TVIDEO_DIP_DATA(crtc->pipe));
}
static u32 ibx_infoframes_enabled(struct intel_encoder *encoder,
const struct intel_crtc_state *pipe_config)
{
- struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+ struct intel_display *display = to_intel_display(encoder);
enum pipe pipe = to_intel_crtc(pipe_config->uapi.crtc)->pipe;
i915_reg_t reg = TVIDEO_DIP_CTL(pipe);
- u32 val = intel_de_read(dev_priv, reg);
+ u32 val = intel_de_read(display, reg);
if ((val & VIDEO_DIP_ENABLE) == 0)
return 0;
@@ -354,14 +349,14 @@ static void cpt_write_infoframe(struct intel_encoder *encoder,
unsigned int type,
const void *frame, ssize_t len)
{
+ struct intel_display *display = to_intel_display(encoder);
const u32 *data = frame;
- struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
i915_reg_t reg = TVIDEO_DIP_CTL(crtc->pipe);
- u32 val = intel_de_read(dev_priv, reg);
+ u32 val = intel_de_read(display, reg);
int i;
- drm_WARN(&dev_priv->drm, !(val & VIDEO_DIP_ENABLE),
+ drm_WARN(display->drm, !(val & VIDEO_DIP_ENABLE),
"Writing DIP with CTL reg disabled\n");
val &= ~(VIDEO_DIP_SELECT_MASK | 0xf); /* clear DIP data offset */
@@ -372,23 +367,23 @@ static void cpt_write_infoframe(struct intel_encoder *encoder,
if (type != HDMI_INFOFRAME_TYPE_AVI)
val &= ~g4x_infoframe_enable(type);
- intel_de_write(dev_priv, reg, val);
+ intel_de_write(display, reg, val);
for (i = 0; i < len; i += 4) {
- intel_de_write(dev_priv, TVIDEO_DIP_DATA(crtc->pipe),
+ intel_de_write(display, TVIDEO_DIP_DATA(crtc->pipe),
*data);
data++;
}
/* Write every possible data byte to force correct ECC calculation. */
for (; i < VIDEO_DIP_DATA_SIZE; i += 4)
- intel_de_write(dev_priv, TVIDEO_DIP_DATA(crtc->pipe), 0);
+ intel_de_write(display, TVIDEO_DIP_DATA(crtc->pipe), 0);
val |= g4x_infoframe_enable(type);
val &= ~VIDEO_DIP_FREQ_MASK;
val |= VIDEO_DIP_FREQ_VSYNC;
- intel_de_write(dev_priv, reg, val);
- intel_de_posting_read(dev_priv, reg);
+ intel_de_write(display, reg, val);
+ intel_de_posting_read(display, reg);
}
static void cpt_read_infoframe(struct intel_encoder *encoder,
@@ -396,24 +391,24 @@ static void cpt_read_infoframe(struct intel_encoder *encoder,
unsigned int type,
void *frame, ssize_t len)
{
- struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+ struct intel_display *display = to_intel_display(encoder);
struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
u32 *data = frame;
int i;
- intel_de_rmw(dev_priv, TVIDEO_DIP_CTL(crtc->pipe),
+ intel_de_rmw(display, TVIDEO_DIP_CTL(crtc->pipe),
VIDEO_DIP_SELECT_MASK | 0xf, g4x_infoframe_index(type));
for (i = 0; i < len; i += 4)
- *data++ = intel_de_read(dev_priv, TVIDEO_DIP_DATA(crtc->pipe));
+ *data++ = intel_de_read(display, TVIDEO_DIP_DATA(crtc->pipe));
}
static u32 cpt_infoframes_enabled(struct intel_encoder *encoder,
const struct intel_crtc_state *pipe_config)
{
- struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+ struct intel_display *display = to_intel_display(encoder);
enum pipe pipe = to_intel_crtc(pipe_config->uapi.crtc)->pipe;
- u32 val = intel_de_read(dev_priv, TVIDEO_DIP_CTL(pipe));
+ u32 val = intel_de_read(display, TVIDEO_DIP_CTL(pipe));
if ((val & VIDEO_DIP_ENABLE) == 0)
return 0;
@@ -428,14 +423,14 @@ static void vlv_write_infoframe(struct intel_encoder *encoder,
unsigned int type,
const void *frame, ssize_t len)
{
+ struct intel_display *display = to_intel_display(encoder);
const u32 *data = frame;
- struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
i915_reg_t reg = VLV_TVIDEO_DIP_CTL(crtc->pipe);
- u32 val = intel_de_read(dev_priv, reg);
+ u32 val = intel_de_read(display, reg);
int i;
- drm_WARN(&dev_priv->drm, !(val & VIDEO_DIP_ENABLE),
+ drm_WARN(display->drm, !(val & VIDEO_DIP_ENABLE),
"Writing DIP with CTL reg disabled\n");
val &= ~(VIDEO_DIP_SELECT_MASK | 0xf); /* clear DIP data offset */
@@ -443,24 +438,24 @@ static void vlv_write_infoframe(struct intel_encoder *encoder,
val &= ~g4x_infoframe_enable(type);
- intel_de_write(dev_priv, reg, val);
+ intel_de_write(display, reg, val);
for (i = 0; i < len; i += 4) {
- intel_de_write(dev_priv,
+ intel_de_write(display,
VLV_TVIDEO_DIP_DATA(crtc->pipe), *data);
data++;
}
/* Write every possible data byte to force correct ECC calculation. */
for (; i < VIDEO_DIP_DATA_SIZE; i += 4)
- intel_de_write(dev_priv,
+ intel_de_write(display,
VLV_TVIDEO_DIP_DATA(crtc->pipe), 0);
val |= g4x_infoframe_enable(type);
val &= ~VIDEO_DIP_FREQ_MASK;
val |= VIDEO_DIP_FREQ_VSYNC;
- intel_de_write(dev_priv, reg, val);
- intel_de_posting_read(dev_priv, reg);
+ intel_de_write(display, reg, val);
+ intel_de_posting_read(display, reg);
}
static void vlv_read_infoframe(struct intel_encoder *encoder,
@@ -468,25 +463,25 @@ static void vlv_read_infoframe(struct intel_encoder *encoder,
unsigned int type,
void *frame, ssize_t len)
{
- struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+ struct intel_display *display = to_intel_display(encoder);
struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
u32 *data = frame;
int i;
- intel_de_rmw(dev_priv, VLV_TVIDEO_DIP_CTL(crtc->pipe),
+ intel_de_rmw(display, VLV_TVIDEO_DIP_CTL(crtc->pipe),
VIDEO_DIP_SELECT_MASK | 0xf, g4x_infoframe_index(type));
for (i = 0; i < len; i += 4)
- *data++ = intel_de_read(dev_priv,
+ *data++ = intel_de_read(display,
VLV_TVIDEO_DIP_DATA(crtc->pipe));
}
static u32 vlv_infoframes_enabled(struct intel_encoder *encoder,
const struct intel_crtc_state *pipe_config)
{
- struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+ struct intel_display *display = to_intel_display(encoder);
enum pipe pipe = to_intel_crtc(pipe_config->uapi.crtc)->pipe;
- u32 val = intel_de_read(dev_priv, VLV_TVIDEO_DIP_CTL(pipe));
+ u32 val = intel_de_read(display, VLV_TVIDEO_DIP_CTL(pipe));
if ((val & VIDEO_DIP_ENABLE) == 0)
return 0;
@@ -504,75 +499,75 @@ void hsw_write_infoframe(struct intel_encoder *encoder,
unsigned int type,
const void *frame, ssize_t len)
{
+ struct intel_display *display = to_intel_display(encoder);
const u32 *data = frame;
- struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
enum transcoder cpu_transcoder = crtc_state->cpu_transcoder;
- i915_reg_t ctl_reg = HSW_TVIDEO_DIP_CTL(dev_priv, cpu_transcoder);
+ i915_reg_t ctl_reg = HSW_TVIDEO_DIP_CTL(display, cpu_transcoder);
int data_size;
int i;
- u32 val = intel_de_read(dev_priv, ctl_reg);
+ u32 val = intel_de_read(display, ctl_reg);
- data_size = hsw_dip_data_size(dev_priv, type);
+ data_size = hsw_dip_data_size(display, type);
- drm_WARN_ON(&dev_priv->drm, len > data_size);
+ drm_WARN_ON(display->drm, len > data_size);
val &= ~hsw_infoframe_enable(type);
- intel_de_write(dev_priv, ctl_reg, val);
+ intel_de_write(display, ctl_reg, val);
for (i = 0; i < len; i += 4) {
- intel_de_write(dev_priv,
- hsw_dip_data_reg(dev_priv, cpu_transcoder, type, i >> 2),
+ intel_de_write(display,
+ hsw_dip_data_reg(display, cpu_transcoder, type, i >> 2),
*data);
data++;
}
/* Write every possible data byte to force correct ECC calculation. */
for (; i < data_size; i += 4)
- intel_de_write(dev_priv,
- hsw_dip_data_reg(dev_priv, cpu_transcoder, type, i >> 2),
+ intel_de_write(display,
+ hsw_dip_data_reg(display, cpu_transcoder, type, i >> 2),
0);
/* Wa_14013475917 */
- if (!(IS_DISPLAY_VER(dev_priv, 13, 14) && crtc_state->has_psr &&
+ if (!(IS_DISPLAY_VER(display, 13, 14) && crtc_state->has_psr &&
!crtc_state->has_panel_replay && type == DP_SDP_VSC))
val |= hsw_infoframe_enable(type);
if (type == DP_SDP_VSC)
val |= VSC_DIP_HW_DATA_SW_HEA;
- intel_de_write(dev_priv, ctl_reg, val);
- intel_de_posting_read(dev_priv, ctl_reg);
+ intel_de_write(display, ctl_reg, val);
+ intel_de_posting_read(display, ctl_reg);
}
void hsw_read_infoframe(struct intel_encoder *encoder,
const struct intel_crtc_state *crtc_state,
unsigned int type, void *frame, ssize_t len)
{
- struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+ struct intel_display *display = to_intel_display(encoder);
enum transcoder cpu_transcoder = crtc_state->cpu_transcoder;
u32 *data = frame;
int i;
for (i = 0; i < len; i += 4)
- *data++ = intel_de_read(dev_priv,
- hsw_dip_data_reg(dev_priv, cpu_transcoder, type, i >> 2));
+ *data++ = intel_de_read(display,
+ hsw_dip_data_reg(display, cpu_transcoder, type, i >> 2));
}
static u32 hsw_infoframes_enabled(struct intel_encoder *encoder,
const struct intel_crtc_state *pipe_config)
{
- struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
- u32 val = intel_de_read(dev_priv,
- HSW_TVIDEO_DIP_CTL(dev_priv, pipe_config->cpu_transcoder));
+ struct intel_display *display = to_intel_display(encoder);
+ u32 val = intel_de_read(display,
+ HSW_TVIDEO_DIP_CTL(display, pipe_config->cpu_transcoder));
u32 mask;
mask = (VIDEO_DIP_ENABLE_VSC_HSW | VIDEO_DIP_ENABLE_AVI_HSW |
VIDEO_DIP_ENABLE_GCP_HSW | VIDEO_DIP_ENABLE_VS_HSW |
VIDEO_DIP_ENABLE_GMP_HSW | VIDEO_DIP_ENABLE_SPD_HSW);
- if (DISPLAY_VER(dev_priv) >= 10)
+ if (DISPLAY_VER(display) >= 10)
mask |= VIDEO_DIP_ENABLE_DRM_GLK;
- if (HAS_AS_SDP(dev_priv))
+ if (HAS_AS_SDP(display))
mask |= VIDEO_DIP_ENABLE_AS_ADL;
return val & mask;
@@ -604,7 +599,7 @@ u32 intel_hdmi_infoframe_enable(unsigned int type)
u32 intel_hdmi_infoframes_enabled(struct intel_encoder *encoder,
const struct intel_crtc_state *crtc_state)
{
- struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+ struct intel_display *display = to_intel_display(encoder);
struct intel_digital_port *dig_port = enc_to_dig_port(encoder);
u32 val, ret = 0;
int i;
@@ -615,7 +610,7 @@ u32 intel_hdmi_infoframes_enabled(struct intel_encoder *encoder,
for (i = 0; i < ARRAY_SIZE(infoframe_type_to_idx); i++) {
unsigned int type = infoframe_type_to_idx[i];
- if (HAS_DDI(dev_priv)) {
+ if (HAS_DDI(display)) {
if (val & hsw_infoframe_enable(type))
ret |= BIT(i);
} else {
@@ -830,11 +825,11 @@ intel_hdmi_compute_drm_infoframe(struct intel_encoder *encoder,
struct intel_crtc_state *crtc_state,
struct drm_connector_state *conn_state)
{
+ struct intel_display *display = to_intel_display(encoder);
struct hdmi_drm_infoframe *frame = &crtc_state->infoframes.drm.drm;
- struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
int ret;
- if (DISPLAY_VER(dev_priv) < 10)
+ if (DISPLAY_VER(display) < 10)
return true;
if (!crtc_state->has_infoframe)
@@ -848,13 +843,13 @@ intel_hdmi_compute_drm_infoframe(struct intel_encoder *encoder,
ret = drm_hdmi_infoframe_set_hdr_metadata(frame, conn_state);
if (ret < 0) {
- drm_dbg_kms(&dev_priv->drm,
+ drm_dbg_kms(display->drm,
"couldn't set HDR metadata in infoframe\n");
return false;
}
ret = hdmi_drm_infoframe_check(frame);
- if (drm_WARN_ON(&dev_priv->drm, ret))
+ if (drm_WARN_ON(display->drm, ret))
return false;
return true;
@@ -865,11 +860,11 @@ static void g4x_set_infoframes(struct intel_encoder *encoder,
const struct intel_crtc_state *crtc_state,
const struct drm_connector_state *conn_state)
{
- struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+ struct intel_display *display = to_intel_display(encoder);
struct intel_digital_port *dig_port = enc_to_dig_port(encoder);
struct intel_hdmi *intel_hdmi = &dig_port->hdmi;
i915_reg_t reg = VIDEO_DIP_CTL;
- u32 val = intel_de_read(dev_priv, reg);
+ u32 val = intel_de_read(display, reg);
u32 port = VIDEO_DIP_PORT(encoder->port);
assert_hdmi_port_disabled(intel_hdmi);
@@ -889,21 +884,21 @@ static void g4x_set_infoframes(struct intel_encoder *encoder,
if (!(val & VIDEO_DIP_ENABLE))
return;
if (port != (val & VIDEO_DIP_PORT_MASK)) {
- drm_dbg_kms(&dev_priv->drm,
+ drm_dbg_kms(display->drm,
"video DIP still enabled on port %c\n",
(val & VIDEO_DIP_PORT_MASK) >> 29);
return;
}
val &= ~(VIDEO_DIP_ENABLE | VIDEO_DIP_ENABLE_AVI |
VIDEO_DIP_ENABLE_VENDOR | VIDEO_DIP_ENABLE_SPD);
- intel_de_write(dev_priv, reg, val);
- intel_de_posting_read(dev_priv, reg);
+ intel_de_write(display, reg, val);
+ intel_de_posting_read(display, reg);
return;
}
if (port != (val & VIDEO_DIP_PORT_MASK)) {
if (val & VIDEO_DIP_ENABLE) {
- drm_dbg_kms(&dev_priv->drm,
+ drm_dbg_kms(display->drm,
"video DIP already enabled on port %c\n",
(val & VIDEO_DIP_PORT_MASK) >> 29);
return;
@@ -916,8 +911,8 @@ static void g4x_set_infoframes(struct intel_encoder *encoder,
val &= ~(VIDEO_DIP_ENABLE_AVI |
VIDEO_DIP_ENABLE_VENDOR | VIDEO_DIP_ENABLE_SPD);
- intel_de_write(dev_priv, reg, val);
- intel_de_posting_read(dev_priv, reg);
+ intel_de_write(display, reg, val);
+ intel_de_posting_read(display, reg);
intel_write_infoframe(encoder, crtc_state,
HDMI_INFOFRAME_TYPE_AVI,
@@ -977,6 +972,7 @@ static bool intel_hdmi_set_gcp_infoframe(struct intel_encoder *encoder,
const struct intel_crtc_state *crtc_state,
const struct drm_connector_state *conn_state)
{
+ struct intel_display *display = to_intel_display(encoder);
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
i915_reg_t reg;
@@ -985,8 +981,8 @@ static bool intel_hdmi_set_gcp_infoframe(struct intel_encoder *encoder,
intel_hdmi_infoframe_enable(HDMI_PACKET_TYPE_GENERAL_CONTROL)) == 0)
return false;
- if (HAS_DDI(dev_priv))
- reg = HSW_TVIDEO_DIP_GCP(dev_priv, crtc_state->cpu_transcoder);
+ if (HAS_DDI(display))
+ reg = HSW_TVIDEO_DIP_GCP(display, crtc_state->cpu_transcoder);
else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
reg = VLV_TVIDEO_DIP_GCP(crtc->pipe);
else if (HAS_PCH_SPLIT(dev_priv))
@@ -994,7 +990,7 @@ static bool intel_hdmi_set_gcp_infoframe(struct intel_encoder *encoder,
else
return false;
- intel_de_write(dev_priv, reg, crtc_state->infoframes.gcp);
+ intel_de_write(display, reg, crtc_state->infoframes.gcp);
return true;
}
@@ -1002,6 +998,7 @@ static bool intel_hdmi_set_gcp_infoframe(struct intel_encoder *encoder,
void intel_hdmi_read_gcp_infoframe(struct intel_encoder *encoder,
struct intel_crtc_state *crtc_state)
{
+ struct intel_display *display = to_intel_display(encoder);
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
i915_reg_t reg;
@@ -1010,8 +1007,8 @@ void intel_hdmi_read_gcp_infoframe(struct intel_encoder *encoder,
intel_hdmi_infoframe_enable(HDMI_PACKET_TYPE_GENERAL_CONTROL)) == 0)
return;
- if (HAS_DDI(dev_priv))
- reg = HSW_TVIDEO_DIP_GCP(dev_priv, crtc_state->cpu_transcoder);
+ if (HAS_DDI(display))
+ reg = HSW_TVIDEO_DIP_GCP(display, crtc_state->cpu_transcoder);
else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
reg = VLV_TVIDEO_DIP_GCP(crtc->pipe);
else if (HAS_PCH_SPLIT(dev_priv))
@@ -1019,7 +1016,7 @@ void intel_hdmi_read_gcp_infoframe(struct intel_encoder *encoder,
else
return;
- crtc_state->infoframes.gcp = intel_de_read(dev_priv, reg);
+ crtc_state->infoframes.gcp = intel_de_read(display, reg);
}
static void intel_hdmi_compute_gcp_infoframe(struct intel_encoder *encoder,
@@ -1049,12 +1046,12 @@ static void ibx_set_infoframes(struct intel_encoder *encoder,
const struct intel_crtc_state *crtc_state,
const struct drm_connector_state *conn_state)
{
- struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+ struct intel_display *display = to_intel_display(encoder);
struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
struct intel_digital_port *dig_port = enc_to_dig_port(encoder);
struct intel_hdmi *intel_hdmi = &dig_port->hdmi;
i915_reg_t reg = TVIDEO_DIP_CTL(crtc->pipe);
- u32 val = intel_de_read(dev_priv, reg);
+ u32 val = intel_de_read(display, reg);
u32 port = VIDEO_DIP_PORT(encoder->port);
assert_hdmi_port_disabled(intel_hdmi);
@@ -1068,13 +1065,13 @@ static void ibx_set_infoframes(struct intel_encoder *encoder,
val &= ~(VIDEO_DIP_ENABLE | VIDEO_DIP_ENABLE_AVI |
VIDEO_DIP_ENABLE_VENDOR | VIDEO_DIP_ENABLE_GAMUT |
VIDEO_DIP_ENABLE_SPD | VIDEO_DIP_ENABLE_GCP);
- intel_de_write(dev_priv, reg, val);
- intel_de_posting_read(dev_priv, reg);
+ intel_de_write(display, reg, val);
+ intel_de_posting_read(display, reg);
return;
}
if (port != (val & VIDEO_DIP_PORT_MASK)) {
- drm_WARN(&dev_priv->drm, val & VIDEO_DIP_ENABLE,
+ drm_WARN(display->drm, val & VIDEO_DIP_ENABLE,
"DIP already enabled on port %c\n",
(val & VIDEO_DIP_PORT_MASK) >> 29);
val &= ~VIDEO_DIP_PORT_MASK;
@@ -1089,8 +1086,8 @@ static void ibx_set_infoframes(struct intel_encoder *encoder,
if (intel_hdmi_set_gcp_infoframe(encoder, crtc_state, conn_state))
val |= VIDEO_DIP_ENABLE_GCP;
- intel_de_write(dev_priv, reg, val);
- intel_de_posting_read(dev_priv, reg);
+ intel_de_write(display, reg, val);
+ intel_de_posting_read(display, reg);
intel_write_infoframe(encoder, crtc_state,
HDMI_INFOFRAME_TYPE_AVI,
@@ -1108,11 +1105,11 @@ static void cpt_set_infoframes(struct intel_encoder *encoder,
const struct intel_crtc_state *crtc_state,
const struct drm_connector_state *conn_state)
{
- struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+ struct intel_display *display = to_intel_display(encoder);
struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(encoder);
i915_reg_t reg = TVIDEO_DIP_CTL(crtc->pipe);
- u32 val = intel_de_read(dev_priv, reg);
+ u32 val = intel_de_read(display, reg);
assert_hdmi_port_disabled(intel_hdmi);
@@ -1125,8 +1122,8 @@ static void cpt_set_infoframes(struct intel_encoder *encoder,
val &= ~(VIDEO_DIP_ENABLE | VIDEO_DIP_ENABLE_AVI |
VIDEO_DIP_ENABLE_VENDOR | VIDEO_DIP_ENABLE_GAMUT |
VIDEO_DIP_ENABLE_SPD | VIDEO_DIP_ENABLE_GCP);
- intel_de_write(dev_priv, reg, val);
- intel_de_posting_read(dev_priv, reg);
+ intel_de_write(display, reg, val);
+ intel_de_posting_read(display, reg);
return;
}
@@ -1138,8 +1135,8 @@ static void cpt_set_infoframes(struct intel_encoder *encoder,
if (intel_hdmi_set_gcp_infoframe(encoder, crtc_state, conn_state))
val |= VIDEO_DIP_ENABLE_GCP;
- intel_de_write(dev_priv, reg, val);
- intel_de_posting_read(dev_priv, reg);
+ intel_de_write(display, reg, val);
+ intel_de_posting_read(display, reg);
intel_write_infoframe(encoder, crtc_state,
HDMI_INFOFRAME_TYPE_AVI,
@@ -1157,11 +1154,11 @@ static void vlv_set_infoframes(struct intel_encoder *encoder,
const struct intel_crtc_state *crtc_state,
const struct drm_connector_state *conn_state)
{
- struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+ struct intel_display *display = to_intel_display(encoder);
struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(encoder);
i915_reg_t reg = VLV_TVIDEO_DIP_CTL(crtc->pipe);
- u32 val = intel_de_read(dev_priv, reg);
+ u32 val = intel_de_read(display, reg);
u32 port = VIDEO_DIP_PORT(encoder->port);
assert_hdmi_port_disabled(intel_hdmi);
@@ -1175,13 +1172,13 @@ static void vlv_set_infoframes(struct intel_encoder *encoder,
val &= ~(VIDEO_DIP_ENABLE | VIDEO_DIP_ENABLE_AVI |
VIDEO_DIP_ENABLE_VENDOR | VIDEO_DIP_ENABLE_GAMUT |
VIDEO_DIP_ENABLE_SPD | VIDEO_DIP_ENABLE_GCP);
- intel_de_write(dev_priv, reg, val);
- intel_de_posting_read(dev_priv, reg);
+ intel_de_write(display, reg, val);
+ intel_de_posting_read(display, reg);
return;
}
if (port != (val & VIDEO_DIP_PORT_MASK)) {
- drm_WARN(&dev_priv->drm, val & VIDEO_DIP_ENABLE,
+ drm_WARN(display->drm, val & VIDEO_DIP_ENABLE,
"DIP already enabled on port %c\n",
(val & VIDEO_DIP_PORT_MASK) >> 29);
val &= ~VIDEO_DIP_PORT_MASK;
@@ -1196,8 +1193,8 @@ static void vlv_set_infoframes(struct intel_encoder *encoder,
if (intel_hdmi_set_gcp_infoframe(encoder, crtc_state, conn_state))
val |= VIDEO_DIP_ENABLE_GCP;
- intel_de_write(dev_priv, reg, val);
- intel_de_posting_read(dev_priv, reg);
+ intel_de_write(display, reg, val);
+ intel_de_posting_read(display, reg);
intel_write_infoframe(encoder, crtc_state,
HDMI_INFOFRAME_TYPE_AVI,
@@ -1215,12 +1212,12 @@ static void hsw_set_infoframes(struct intel_encoder *encoder,
const struct intel_crtc_state *crtc_state,
const struct drm_connector_state *conn_state)
{
- struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
- i915_reg_t reg = HSW_TVIDEO_DIP_CTL(dev_priv,
+ struct intel_display *display = to_intel_display(encoder);
+ i915_reg_t reg = HSW_TVIDEO_DIP_CTL(display,
crtc_state->cpu_transcoder);
- u32 val = intel_de_read(dev_priv, reg);
+ u32 val = intel_de_read(display, reg);
- assert_hdmi_transcoder_func_disabled(dev_priv,
+ assert_hdmi_transcoder_func_disabled(display,
crtc_state->cpu_transcoder);
val &= ~(VIDEO_DIP_ENABLE_VSC_HSW | VIDEO_DIP_ENABLE_AVI_HSW |
@@ -1229,16 +1226,16 @@ static void hsw_set_infoframes(struct intel_encoder *encoder,
VIDEO_DIP_ENABLE_DRM_GLK | VIDEO_DIP_ENABLE_AS_ADL);
if (!enable) {
- intel_de_write(dev_priv, reg, val);
- intel_de_posting_read(dev_priv, reg);
+ intel_de_write(display, reg, val);
+ intel_de_posting_read(display, reg);
return;
}
if (intel_hdmi_set_gcp_infoframe(encoder, crtc_state, conn_state))
val |= VIDEO_DIP_ENABLE_GCP_HSW;
- intel_de_write(dev_priv, reg, val);
- intel_de_posting_read(dev_priv, reg);
+ intel_de_write(display, reg, val);
+ intel_de_posting_read(display, reg);
intel_write_infoframe(encoder, crtc_state,
HDMI_INFOFRAME_TYPE_AVI,
@@ -1256,16 +1253,16 @@ static void hsw_set_infoframes(struct intel_encoder *encoder,
void intel_dp_dual_mode_set_tmds_output(struct intel_hdmi *hdmi, bool enable)
{
- struct drm_i915_private *dev_priv = intel_hdmi_to_i915(hdmi);
+ struct intel_display *display = to_intel_display(hdmi);
struct i2c_adapter *ddc = hdmi->attached_connector->base.ddc;
if (hdmi->dp_dual_mode.type < DRM_DP_DUAL_MODE_TYPE2_DVI)
return;
- drm_dbg_kms(&dev_priv->drm, "%s DP dual mode adaptor TMDS output\n",
+ drm_dbg_kms(display->drm, "%s DP dual mode adaptor TMDS output\n",
enable ? "Enabling" : "Disabling");
- drm_dp_dual_mode_set_tmds_output(&dev_priv->drm,
+ drm_dp_dual_mode_set_tmds_output(display->drm,
hdmi->dp_dual_mode.type, ddc, enable);
}
@@ -1331,7 +1328,7 @@ static
int intel_hdmi_hdcp_write_an_aksv(struct intel_digital_port *dig_port,
u8 *an)
{
- struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev);
+ struct intel_display *display = to_intel_display(dig_port);
struct intel_hdmi *hdmi = &dig_port->hdmi;
struct i2c_adapter *ddc = hdmi->attached_connector->base.ddc;
int ret;
@@ -1339,14 +1336,14 @@ int intel_hdmi_hdcp_write_an_aksv(struct intel_digital_port *dig_port,
ret = intel_hdmi_hdcp_write(dig_port, DRM_HDCP_DDC_AN, an,
DRM_HDCP_AN_LEN);
if (ret) {
- drm_dbg_kms(&i915->drm, "Write An over DDC failed (%d)\n",
+ drm_dbg_kms(display->drm, "Write An over DDC failed (%d)\n",
ret);
return ret;
}
ret = intel_gmbus_output_aksv(ddc);
if (ret < 0) {
- drm_dbg_kms(&i915->drm, "Failed to output aksv (%d)\n", ret);
+ drm_dbg_kms(display->drm, "Failed to output aksv (%d)\n", ret);
return ret;
}
return 0;
@@ -1355,13 +1352,13 @@ int intel_hdmi_hdcp_write_an_aksv(struct intel_digital_port *dig_port,
static int intel_hdmi_hdcp_read_bksv(struct intel_digital_port *dig_port,
u8 *bksv)
{
- struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev);
+ struct intel_display *display = to_intel_display(dig_port);
int ret;
ret = intel_hdmi_hdcp_read(dig_port, DRM_HDCP_DDC_BKSV, bksv,
DRM_HDCP_KSV_LEN);
if (ret)
- drm_dbg_kms(&i915->drm, "Read Bksv over DDC failed (%d)\n",
+ drm_dbg_kms(display->drm, "Read Bksv over DDC failed (%d)\n",
ret);
return ret;
}
@@ -1370,13 +1367,14 @@ static
int intel_hdmi_hdcp_read_bstatus(struct intel_digital_port *dig_port,
u8 *bstatus)
{
- struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev);
+ struct intel_display *display = to_intel_display(dig_port);
int ret;
ret = intel_hdmi_hdcp_read(dig_port, DRM_HDCP_DDC_BSTATUS,
bstatus, DRM_HDCP_BSTATUS_LEN);
if (ret)
- drm_dbg_kms(&i915->drm, "Read bstatus over DDC failed (%d)\n",
+ drm_dbg_kms(display->drm,
+ "Read bstatus over DDC failed (%d)\n",
ret);
return ret;
}
@@ -1385,13 +1383,13 @@ static
int intel_hdmi_hdcp_repeater_present(struct intel_digital_port *dig_port,
bool *repeater_present)
{
- struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev);
+ struct intel_display *display = to_intel_display(dig_port);
int ret;
u8 val;
ret = intel_hdmi_hdcp_read(dig_port, DRM_HDCP_DDC_BCAPS, &val, 1);
if (ret) {
- drm_dbg_kms(&i915->drm, "Read bcaps over DDC failed (%d)\n",
+ drm_dbg_kms(display->drm, "Read bcaps over DDC failed (%d)\n",
ret);
return ret;
}
@@ -1403,13 +1401,13 @@ static
int intel_hdmi_hdcp_read_ri_prime(struct intel_digital_port *dig_port,
u8 *ri_prime)
{
- struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev);
+ struct intel_display *display = to_intel_display(dig_port);
int ret;
ret = intel_hdmi_hdcp_read(dig_port, DRM_HDCP_DDC_RI_PRIME,
ri_prime, DRM_HDCP_RI_LEN);
if (ret)
- drm_dbg_kms(&i915->drm, "Read Ri' over DDC failed (%d)\n",
+ drm_dbg_kms(display->drm, "Read Ri' over DDC failed (%d)\n",
ret);
return ret;
}
@@ -1418,13 +1416,13 @@ static
int intel_hdmi_hdcp_read_ksv_ready(struct intel_digital_port *dig_port,
bool *ksv_ready)
{
- struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev);
+ struct intel_display *display = to_intel_display(dig_port);
int ret;
u8 val;
ret = intel_hdmi_hdcp_read(dig_port, DRM_HDCP_DDC_BCAPS, &val, 1);
if (ret) {
- drm_dbg_kms(&i915->drm, "Read bcaps over DDC failed (%d)\n",
+ drm_dbg_kms(display->drm, "Read bcaps over DDC failed (%d)\n",
ret);
return ret;
}
@@ -1436,12 +1434,12 @@ static
int intel_hdmi_hdcp_read_ksv_fifo(struct intel_digital_port *dig_port,
int num_downstream, u8 *ksv_fifo)
{
- struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev);
+ struct intel_display *display = to_intel_display(dig_port);
int ret;
ret = intel_hdmi_hdcp_read(dig_port, DRM_HDCP_DDC_KSV_FIFO,
ksv_fifo, num_downstream * DRM_HDCP_KSV_LEN);
if (ret) {
- drm_dbg_kms(&i915->drm,
+ drm_dbg_kms(display->drm,
"Read ksv fifo over DDC failed (%d)\n", ret);
return ret;
}
@@ -1452,7 +1450,7 @@ static
int intel_hdmi_hdcp_read_v_prime_part(struct intel_digital_port *dig_port,
int i, u32 *part)
{
- struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev);
+ struct intel_display *display = to_intel_display(dig_port);
int ret;
if (i >= DRM_HDCP_V_PRIME_NUM_PARTS)
@@ -1461,7 +1459,8 @@ int intel_hdmi_hdcp_read_v_prime_part(struct intel_digital_port *dig_port,
ret = intel_hdmi_hdcp_read(dig_port, DRM_HDCP_DDC_V_PRIME(i),
part, DRM_HDCP_V_PRIME_PART_LEN);
if (ret)
- drm_dbg_kms(&i915->drm, "Read V'[%d] over DDC failed (%d)\n",
+ drm_dbg_kms(display->drm,
+ "Read V'[%d] over DDC failed (%d)\n",
i, ret);
return ret;
}
@@ -1469,15 +1468,15 @@ int intel_hdmi_hdcp_read_v_prime_part(struct intel_digital_port *dig_port,
static int kbl_repositioning_enc_en_signal(struct intel_connector *connector,
enum transcoder cpu_transcoder)
{
- struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
+ struct intel_display *display = to_intel_display(connector);
struct intel_digital_port *dig_port = intel_attached_dig_port(connector);
struct intel_crtc *crtc = to_intel_crtc(connector->base.state->crtc);
u32 scanline;
int ret;
for (;;) {
- scanline = intel_de_read(dev_priv,
- PIPEDSL(dev_priv, crtc->pipe));
+ scanline = intel_de_read(display,
+ PIPEDSL(display, crtc->pipe));
if (scanline > 100 && scanline < 200)
break;
usleep_range(25, 50);
@@ -1486,7 +1485,7 @@ static int kbl_repositioning_enc_en_signal(struct intel_connector *connector,
ret = intel_ddi_toggle_hdcp_bits(&dig_port->base, cpu_transcoder,
false, TRANS_DDI_HDCP_SIGNALLING);
if (ret) {
- drm_err(&dev_priv->drm,
+ drm_err(display->drm,
"Disable HDCP signalling failed (%d)\n", ret);
return ret;
}
@@ -1494,7 +1493,7 @@ static int kbl_repositioning_enc_en_signal(struct intel_connector *connector,
ret = intel_ddi_toggle_hdcp_bits(&dig_port->base, cpu_transcoder,
true, TRANS_DDI_HDCP_SIGNALLING);
if (ret) {
- drm_err(&dev_priv->drm,
+ drm_err(display->drm,
"Enable HDCP signalling failed (%d)\n", ret);
return ret;
}
@@ -1507,6 +1506,7 @@ int intel_hdmi_hdcp_toggle_signalling(struct intel_digital_port *dig_port,
enum transcoder cpu_transcoder,
bool enable)
{
+ struct intel_display *display = to_intel_display(dig_port);
struct intel_hdmi *hdmi = &dig_port->hdmi;
struct intel_connector *connector = hdmi->attached_connector;
struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
@@ -1519,7 +1519,7 @@ int intel_hdmi_hdcp_toggle_signalling(struct intel_digital_port *dig_port,
cpu_transcoder, enable,
TRANS_DDI_HDCP_SIGNALLING);
if (ret) {
- drm_err(&dev_priv->drm, "%s HDCP signalling failed (%d)\n",
+ drm_err(display->drm, "%s HDCP signalling failed (%d)\n",
enable ? "Enable" : "Disable", ret);
return ret;
}
@@ -1539,6 +1539,7 @@ static
bool intel_hdmi_hdcp_check_link_once(struct intel_digital_port *dig_port,
struct intel_connector *connector)
{
+ struct intel_display *display = to_intel_display(dig_port);
struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev);
enum port port = dig_port->base.port;
enum transcoder cpu_transcoder = connector->hdcp.cpu_transcoder;
@@ -1558,9 +1559,9 @@ bool intel_hdmi_hdcp_check_link_once(struct intel_digital_port *dig_port,
if (wait_for((intel_de_read(i915, HDCP_STATUS(i915, cpu_transcoder, port)) &
(HDCP_STATUS_RI_MATCH | HDCP_STATUS_ENC)) ==
(HDCP_STATUS_RI_MATCH | HDCP_STATUS_ENC), 1)) {
- drm_dbg_kms(&i915->drm, "Ri' mismatch detected (%x)\n",
- intel_de_read(i915, HDCP_STATUS(i915, cpu_transcoder,
- port)));
+ drm_dbg_kms(display->drm, "Ri' mismatch detected (%x)\n",
+ intel_de_read(i915, HDCP_STATUS(i915, cpu_transcoder,
+ port)));
return false;
}
return true;
@@ -1570,14 +1571,14 @@ static
bool intel_hdmi_hdcp_check_link(struct intel_digital_port *dig_port,
struct intel_connector *connector)
{
- struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev);
+ struct intel_display *display = to_intel_display(dig_port);
int retry;
for (retry = 0; retry < 3; retry++)
if (intel_hdmi_hdcp_check_link_once(dig_port, connector))
return true;
- drm_err(&i915->drm, "Link check failed\n");
+ drm_err(display->drm, "Link check failed\n");
return false;
}
@@ -1628,13 +1629,13 @@ hdcp2_detect_msg_availability(struct intel_digital_port *dig_port,
u8 msg_id, bool *msg_ready,
ssize_t *msg_sz)
{
- struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev);
+ struct intel_display *display = to_intel_display(dig_port);
u8 rx_status[HDCP_2_2_HDMI_RXSTATUS_LEN];
int ret;
ret = intel_hdmi_hdcp2_read_rx_status(dig_port, rx_status);
if (ret < 0) {
- drm_dbg_kms(&i915->drm, "rx_status read failed. Err %d\n",
+ drm_dbg_kms(display->drm, "rx_status read failed. Err %d\n",
ret);
return ret;
}
@@ -1655,7 +1656,7 @@ static ssize_t
intel_hdmi_hdcp2_wait_for_msg(struct intel_digital_port *dig_port,
u8 msg_id, bool paired)
{
- struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev);
+ struct intel_display *display = to_intel_display(dig_port);
bool msg_ready = false;
int timeout, ret;
ssize_t msg_sz = 0;
@@ -1670,7 +1671,8 @@ intel_hdmi_hdcp2_wait_for_msg(struct intel_digital_port *dig_port,
!ret && msg_ready && msg_sz, timeout * 1000,
1000, 5 * 1000);
if (ret)
- drm_dbg_kms(&i915->drm, "msg_id: %d, ret: %d, timeout: %d\n",
+ drm_dbg_kms(display->drm,
+ "msg_id: %d, ret: %d, timeout: %d\n",
msg_id, ret, timeout);
return ret ? ret : msg_sz;
@@ -1691,8 +1693,8 @@ static
int intel_hdmi_hdcp2_read_msg(struct intel_connector *connector,
u8 msg_id, void *buf, size_t size)
{
+ struct intel_display *display = to_intel_display(connector);
struct intel_digital_port *dig_port = intel_attached_dig_port(connector);
- struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev);
struct intel_hdmi *hdmi = &dig_port->hdmi;
struct intel_hdcp *hdcp = &hdmi->attached_connector->hdcp;
unsigned int offset;
@@ -1708,7 +1710,7 @@ int intel_hdmi_hdcp2_read_msg(struct intel_connector *connector,
* available buffer.
*/
if (ret > size) {
- drm_dbg_kms(&i915->drm,
+ drm_dbg_kms(display->drm,
"msg_sz(%zd) is more than exp size(%zu)\n",
ret, size);
return -EINVAL;
@@ -1717,7 +1719,7 @@ int intel_hdmi_hdcp2_read_msg(struct intel_connector *connector,
offset = HDCP_2_2_HDMI_REG_RD_MSG_OFFSET;
ret = intel_hdmi_hdcp_read(dig_port, offset, buf, ret);
if (ret)
- drm_dbg_kms(&i915->drm, "Failed to read msg_id: %d(%zd)\n",
+ drm_dbg_kms(display->drm, "Failed to read msg_id: %d(%zd)\n",
msg_id, ret);
return ret;
@@ -1783,16 +1785,17 @@ static const struct intel_hdcp_shim intel_hdmi_hdcp_shim = {
static int intel_hdmi_source_max_tmds_clock(struct intel_encoder *encoder)
{
+ struct intel_display *display = to_intel_display(encoder);
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
int max_tmds_clock, vbt_max_tmds_clock;
- if (DISPLAY_VER(dev_priv) >= 13 || IS_ALDERLAKE_S(dev_priv))
+ if (DISPLAY_VER(display) >= 13 || IS_ALDERLAKE_S(dev_priv))
max_tmds_clock = 600000;
- else if (DISPLAY_VER(dev_priv) >= 10)
+ else if (DISPLAY_VER(display) >= 10)
max_tmds_clock = 594000;
- else if (DISPLAY_VER(dev_priv) >= 8 || IS_HASWELL(dev_priv))
+ else if (DISPLAY_VER(display) >= 8 || IS_HASWELL(dev_priv))
max_tmds_clock = 300000;
- else if (DISPLAY_VER(dev_priv) >= 5)
+ else if (DISPLAY_VER(display) >= 5)
max_tmds_clock = 225000;
else
max_tmds_clock = 165000;
@@ -1848,7 +1851,8 @@ hdmi_port_clock_valid(struct intel_hdmi *hdmi,
int clock, bool respect_downstream_limits,
bool has_hdmi_sink)
{
- struct drm_i915_private *dev_priv = intel_hdmi_to_i915(hdmi);
+ struct intel_display *display = to_intel_display(hdmi);
+ struct drm_i915_private *dev_priv = to_i915(display->drm);
struct intel_encoder *encoder = &hdmi_to_dig_port(hdmi)->base;
if (clock < 25000)
@@ -1885,7 +1889,7 @@ hdmi_port_clock_valid(struct intel_hdmi *hdmi,
* FIXME: We will hopefully get an algorithmic way of programming
* the MPLLB for HDMI in the future.
*/
- if (DISPLAY_VER(dev_priv) >= 14)
+ if (DISPLAY_VER(display) >= 14)
return intel_cx0_phy_check_hdmi_link_rate(hdmi, clock);
else if (IS_DG2(dev_priv))
return intel_snps_phy_check_hdmi_link_rate(clock);
@@ -1908,13 +1912,13 @@ int intel_hdmi_tmds_clock(int clock, int bpc,
return DIV_ROUND_CLOSEST(clock * bpc, 8);
}
-static bool intel_hdmi_source_bpc_possible(struct drm_i915_private *i915, int bpc)
+static bool intel_hdmi_source_bpc_possible(struct intel_display *display, int bpc)
{
switch (bpc) {
case 12:
- return !HAS_GMCH(i915);
+ return !HAS_GMCH(display);
case 10:
- return DISPLAY_VER(i915) >= 11;
+ return DISPLAY_VER(display) >= 11;
case 8:
return true;
default:
@@ -1960,7 +1964,7 @@ intel_hdmi_mode_clock_valid(struct drm_connector *connector, int clock,
bool has_hdmi_sink,
enum intel_output_format sink_format)
{
- struct drm_i915_private *i915 = to_i915(connector->dev);
+ struct intel_display *display = to_intel_display(connector->dev);
struct intel_hdmi *hdmi = intel_attached_hdmi(to_intel_connector(connector));
enum drm_mode_status status = MODE_OK;
int bpc;
@@ -1973,7 +1977,7 @@ intel_hdmi_mode_clock_valid(struct drm_connector *connector, int clock,
for (bpc = 12; bpc >= 8; bpc -= 2) {
int tmds_clock = intel_hdmi_tmds_clock(clock, bpc, sink_format);
- if (!intel_hdmi_source_bpc_possible(i915, bpc))
+ if (!intel_hdmi_source_bpc_possible(display, bpc))
continue;
if (!intel_hdmi_sink_bpc_possible(connector, bpc, has_hdmi_sink, sink_format))
@@ -1985,7 +1989,7 @@ intel_hdmi_mode_clock_valid(struct drm_connector *connector, int clock,
}
/* can never happen */
- drm_WARN_ON(&i915->drm, status == MODE_OK);
+ drm_WARN_ON(display->drm, status == MODE_OK);
return status;
}
@@ -1994,8 +1998,9 @@ static enum drm_mode_status
intel_hdmi_mode_valid(struct drm_connector *connector,
struct drm_display_mode *mode)
{
+ struct intel_display *display = to_intel_display(connector->dev);
struct intel_hdmi *hdmi = intel_attached_hdmi(to_intel_connector(connector));
- struct drm_i915_private *dev_priv = intel_hdmi_to_i915(hdmi);
+ struct drm_i915_private *dev_priv = to_i915(display->drm);
enum drm_mode_status status;
int clock = mode->clock;
int max_dotclk = to_i915(connector->dev)->display.cdclk.max_dotclk_freq;
@@ -2073,17 +2078,16 @@ bool intel_hdmi_bpc_possible(const struct intel_crtc_state *crtc_state,
static bool hdmi_bpc_possible(const struct intel_crtc_state *crtc_state, int bpc)
{
- struct drm_i915_private *dev_priv =
- to_i915(crtc_state->uapi.crtc->dev);
+ struct intel_display *display = to_intel_display(crtc_state);
const struct drm_display_mode *adjusted_mode =
&crtc_state->hw.adjusted_mode;
- if (!intel_hdmi_source_bpc_possible(dev_priv, bpc))
+ if (!intel_hdmi_source_bpc_possible(display, bpc))
return false;
/* Display Wa_1405510057:icl,ehl */
if (intel_hdmi_is_ycbcr420(crtc_state) &&
- bpc == 10 && DISPLAY_VER(dev_priv) == 11 &&
+ bpc == 10 && DISPLAY_VER(display) == 11 &&
(adjusted_mode->crtc_hblank_end -
adjusted_mode->crtc_hblank_start) % 8 == 2)
return false;
@@ -2130,7 +2134,7 @@ static int intel_hdmi_compute_clock(struct intel_encoder *encoder,
struct intel_crtc_state *crtc_state,
bool respect_downstream_limits)
{
- struct drm_i915_private *i915 = to_i915(encoder->base.dev);
+ struct intel_display *display = to_intel_display(encoder);
const struct drm_display_mode *adjusted_mode =
&crtc_state->hw.adjusted_mode;
int bpc, clock = adjusted_mode->crtc_clock;
@@ -2153,7 +2157,7 @@ static int intel_hdmi_compute_clock(struct intel_encoder *encoder,
*/
crtc_state->pipe_bpp = min(crtc_state->pipe_bpp, bpc * 3);
- drm_dbg_kms(&i915->drm,
+ drm_dbg_kms(display->drm,
"picking %d bpc for HDMI output (pipe bpp: %d)\n",
bpc, crtc_state->pipe_bpp);
@@ -2230,10 +2234,10 @@ static int intel_hdmi_compute_output_format(struct intel_encoder *encoder,
const struct drm_connector_state *conn_state,
bool respect_downstream_limits)
{
+ struct intel_display *display = to_intel_display(encoder);
struct intel_connector *connector = to_intel_connector(conn_state->connector);
const struct drm_display_mode *adjusted_mode = &crtc_state->hw.adjusted_mode;
const struct drm_display_info *info = &connector->base.display_info;
- struct drm_i915_private *i915 = to_i915(connector->base.dev);
bool ycbcr_420_only = drm_mode_is_420_only(info, adjusted_mode);
int ret;
@@ -2241,7 +2245,7 @@ static int intel_hdmi_compute_output_format(struct intel_encoder *encoder,
intel_hdmi_sink_format(crtc_state, connector, ycbcr_420_only);
if (ycbcr_420_only && crtc_state->sink_format != INTEL_OUTPUT_FORMAT_YCBCR420) {
- drm_dbg_kms(&i915->drm,
+ drm_dbg_kms(display->drm,
"YCbCr 4:2:0 mode but YCbCr 4:2:0 output not possible. Falling back to RGB.\n");
crtc_state->sink_format = INTEL_OUTPUT_FORMAT_RGB;
}
@@ -2302,7 +2306,7 @@ int intel_hdmi_compute_config(struct intel_encoder *encoder,
struct intel_crtc_state *pipe_config,
struct drm_connector_state *conn_state)
{
- struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+ struct intel_display *display = to_intel_display(encoder);
struct drm_display_mode *adjusted_mode = &pipe_config->hw.adjusted_mode;
struct drm_connector *connector = conn_state->connector;
struct drm_scdc *scdc = &connector->display_info.hdmi.scdc;
@@ -2335,7 +2339,7 @@ int intel_hdmi_compute_config(struct intel_encoder *encoder,
if (ret)
ret = intel_hdmi_compute_output_format(encoder, pipe_config, conn_state, false);
if (ret) {
- drm_dbg_kms(&dev_priv->drm,
+ drm_dbg_kms(display->drm,
"unsupported HDMI clock (%d kHz), rejecting mode\n",
pipe_config->hw.adjusted_mode.crtc_clock);
return ret;
@@ -2370,22 +2374,22 @@ int intel_hdmi_compute_config(struct intel_encoder *encoder,
conn_state);
if (!intel_hdmi_compute_avi_infoframe(encoder, pipe_config, conn_state)) {
- drm_dbg_kms(&dev_priv->drm, "bad AVI infoframe\n");
+ drm_dbg_kms(display->drm, "bad AVI infoframe\n");
return -EINVAL;
}
if (!intel_hdmi_compute_spd_infoframe(encoder, pipe_config, conn_state)) {
- drm_dbg_kms(&dev_priv->drm, "bad SPD infoframe\n");
+ drm_dbg_kms(display->drm, "bad SPD infoframe\n");
return -EINVAL;
}
if (!intel_hdmi_compute_hdmi_infoframe(encoder, pipe_config, conn_state)) {
- drm_dbg_kms(&dev_priv->drm, "bad HDMI infoframe\n");
+ drm_dbg_kms(display->drm, "bad HDMI infoframe\n");
return -EINVAL;
}
if (!intel_hdmi_compute_drm_infoframe(encoder, pipe_config, conn_state)) {
- drm_dbg_kms(&dev_priv->drm, "bad DRM infoframe\n");
+ drm_dbg_kms(display->drm, "bad DRM infoframe\n");
return -EINVAL;
}
@@ -2418,13 +2422,14 @@ intel_hdmi_unset_edid(struct drm_connector *connector)
static void
intel_hdmi_dp_dual_mode_detect(struct drm_connector *connector)
{
+ struct intel_display *display = to_intel_display(connector->dev);
struct drm_i915_private *dev_priv = to_i915(connector->dev);
struct intel_hdmi *hdmi = intel_attached_hdmi(to_intel_connector(connector));
struct intel_encoder *encoder = &hdmi_to_dig_port(hdmi)->base;
struct i2c_adapter *ddc = connector->ddc;
enum drm_dp_dual_mode_type type;
- type = drm_dp_dual_mode_detect(&dev_priv->drm, ddc);
+ type = drm_dp_dual_mode_detect(display->drm, ddc);
/*
* Type 1 DVI adaptors are not required to implement any
@@ -2438,7 +2443,7 @@ intel_hdmi_dp_dual_mode_detect(struct drm_connector *connector)
if (type == DRM_DP_DUAL_MODE_UNKNOWN) {
if (!connector->force &&
intel_bios_encoder_supports_dp_dual_mode(encoder->devdata)) {
- drm_dbg_kms(&dev_priv->drm,
+ drm_dbg_kms(display->drm,
"Assuming DP dual mode adaptor presence based on VBT\n");
type = DRM_DP_DUAL_MODE_TYPE1_DVI;
} else {
@@ -2451,17 +2456,17 @@ intel_hdmi_dp_dual_mode_detect(struct drm_connector *connector)
hdmi->dp_dual_mode.type = type;
hdmi->dp_dual_mode.max_tmds_clock =
- drm_dp_dual_mode_max_tmds_clock(&dev_priv->drm, type, ddc);
+ drm_dp_dual_mode_max_tmds_clock(display->drm, type, ddc);
- drm_dbg_kms(&dev_priv->drm,
+ drm_dbg_kms(display->drm,
"DP dual mode adaptor (%s) detected (max TMDS clock: %d kHz)\n",
drm_dp_get_dual_mode_type_name(type),
hdmi->dp_dual_mode.max_tmds_clock);
/* Older VBTs are often buggy and can't be trusted :( Play it safe. */
- if ((DISPLAY_VER(dev_priv) >= 8 || IS_HASWELL(dev_priv)) &&
+ if ((DISPLAY_VER(display) >= 8 || IS_HASWELL(dev_priv)) &&
!intel_bios_encoder_supports_dp_dual_mode(encoder->devdata)) {
- drm_dbg_kms(&dev_priv->drm,
+ drm_dbg_kms(display->drm,
"Ignoring DP dual mode adaptor max TMDS clock for native HDMI port\n");
hdmi->dp_dual_mode.max_tmds_clock = 0;
}
@@ -2470,6 +2475,7 @@ intel_hdmi_dp_dual_mode_detect(struct drm_connector *connector)
static bool
intel_hdmi_set_edid(struct drm_connector *connector)
{
+ struct intel_display *display = to_intel_display(connector->dev);
struct drm_i915_private *dev_priv = to_i915(connector->dev);
struct intel_hdmi *intel_hdmi = intel_attached_hdmi(to_intel_connector(connector));
struct i2c_adapter *ddc = connector->ddc;
@@ -2482,7 +2488,7 @@ intel_hdmi_set_edid(struct drm_connector *connector)
drm_edid = drm_edid_read_ddc(connector, ddc);
if (!drm_edid && !intel_gmbus_is_forced_bit(ddc)) {
- drm_dbg_kms(&dev_priv->drm,
+ drm_dbg_kms(display->drm,
"HDMI GMBUS EDID read failed, retry using GPIO bit-banging\n");
intel_gmbus_force_bit(ddc, true);
drm_edid = drm_edid_read_ddc(connector, ddc);
@@ -2511,13 +2517,14 @@ intel_hdmi_set_edid(struct drm_connector *connector)
static enum drm_connector_status
intel_hdmi_detect(struct drm_connector *connector, bool force)
{
+ struct intel_display *display = to_intel_display(connector->dev);
enum drm_connector_status status = connector_status_disconnected;
struct drm_i915_private *dev_priv = to_i915(connector->dev);
struct intel_hdmi *intel_hdmi = intel_attached_hdmi(to_intel_connector(connector));
struct intel_encoder *encoder = &hdmi_to_dig_port(intel_hdmi)->base;
intel_wakeref_t wakeref;
- drm_dbg_kms(&dev_priv->drm, "[CONNECTOR:%d:%s]\n",
+ drm_dbg_kms(display->drm, "[CONNECTOR:%d:%s]\n",
connector->base.id, connector->name);
if (!intel_display_device_enabled(dev_priv))
@@ -2528,7 +2535,7 @@ intel_hdmi_detect(struct drm_connector *connector, bool force)
wakeref = intel_display_power_get(dev_priv, POWER_DOMAIN_GMBUS);
- if (DISPLAY_VER(dev_priv) >= 11 &&
+ if (DISPLAY_VER(display) >= 11 &&
!intel_digital_port_connected(encoder))
goto out;
@@ -2549,9 +2556,10 @@ out:
static void
intel_hdmi_force(struct drm_connector *connector)
{
+ struct intel_display *display = to_intel_display(connector->dev);
struct drm_i915_private *i915 = to_i915(connector->dev);
- drm_dbg_kms(&i915->drm, "[CONNECTOR:%d:%s]\n",
+ drm_dbg_kms(display->drm, "[CONNECTOR:%d:%s]\n",
connector->base.id, connector->name);
if (!intel_display_driver_check_access(i915))
@@ -2608,9 +2616,9 @@ static const struct drm_connector_funcs intel_hdmi_connector_funcs = {
static int intel_hdmi_connector_atomic_check(struct drm_connector *connector,
struct drm_atomic_state *state)
{
- struct drm_i915_private *i915 = to_i915(state->dev);
+ struct intel_display *display = to_intel_display(connector->dev);
- if (HAS_DDI(i915))
+ if (HAS_DDI(display))
return intel_digital_connector_atomic_check(connector, state);
else
return g4x_hdmi_connector_atomic_check(connector, state);
@@ -2625,7 +2633,7 @@ static const struct drm_connector_helper_funcs intel_hdmi_connector_helper_funcs
static void
intel_hdmi_add_properties(struct intel_hdmi *intel_hdmi, struct drm_connector *connector)
{
- struct drm_i915_private *dev_priv = to_i915(connector->dev);
+ struct intel_display *display = to_intel_display(intel_hdmi);
intel_attach_force_audio_property(connector);
intel_attach_broadcast_rgb_property(connector);
@@ -2634,10 +2642,10 @@ intel_hdmi_add_properties(struct intel_hdmi *intel_hdmi, struct drm_connector *c
intel_attach_hdmi_colorspace_property(connector);
drm_connector_attach_content_type_property(connector);
- if (DISPLAY_VER(dev_priv) >= 10)
+ if (DISPLAY_VER(display) >= 10)
drm_connector_attach_hdr_output_metadata_property(connector);
- if (!HAS_GMCH(dev_priv))
+ if (!HAS_GMCH(display))
drm_connector_attach_max_bpc_property(connector, 8, 12);
}
@@ -2664,14 +2672,14 @@ bool intel_hdmi_handle_sink_scrambling(struct intel_encoder *encoder,
bool high_tmds_clock_ratio,
bool scrambling)
{
- struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+ struct intel_display *display = to_intel_display(encoder);
struct drm_scrambling *sink_scrambling =
&connector->display_info.hdmi.scdc.scrambling;
if (!sink_scrambling->supported)
return true;
- drm_dbg_kms(&dev_priv->drm,
+ drm_dbg_kms(display->drm,
"[CONNECTOR:%d:%s] scrambling=%s, TMDS bit clock ratio=1/%d\n",
connector->base.id, connector->name,
str_yes_no(scrambling), high_tmds_clock_ratio ? 40 : 10);
@@ -2752,7 +2760,7 @@ static u8 cnp_encoder_to_ddc_pin(struct intel_encoder *encoder)
static u8 icl_encoder_to_ddc_pin(struct intel_encoder *encoder)
{
- struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+ struct intel_display *display = to_intel_display(encoder);
enum port port = encoder->port;
if (intel_encoder_is_combo(encoder))
@@ -2760,7 +2768,7 @@ static u8 icl_encoder_to_ddc_pin(struct intel_encoder *encoder)
else if (intel_encoder_is_tc(encoder))
return GMBUS_PIN_9_TC1_ICP + intel_encoder_to_tc(encoder);
- drm_WARN(&dev_priv->drm, 1, "Unknown port:%c\n", port_name(port));
+ drm_WARN(display->drm, 1, "Unknown port:%c\n", port_name(port));
return GMBUS_PIN_2_BXT;
}
@@ -2808,10 +2816,11 @@ static u8 rkl_encoder_to_ddc_pin(struct intel_encoder *encoder)
static u8 gen9bc_tgp_encoder_to_ddc_pin(struct intel_encoder *encoder)
{
+ struct intel_display *display = to_intel_display(encoder);
struct drm_i915_private *i915 = to_i915(encoder->base.dev);
enum phy phy = intel_encoder_to_phy(encoder);
- drm_WARN_ON(&i915->drm, encoder->port == PORT_A);
+ drm_WARN_ON(display->drm, encoder->port == PORT_A);
/*
* Pin mapping for GEN9 BC depends on which PCH is present. With TGP,
@@ -2871,6 +2880,7 @@ static u8 g4x_encoder_to_ddc_pin(struct intel_encoder *encoder)
static u8 intel_hdmi_default_ddc_pin(struct intel_encoder *encoder)
{
+ struct intel_display *display = to_intel_display(encoder);
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
u8 ddc_pin;
@@ -2880,7 +2890,7 @@ static u8 intel_hdmi_default_ddc_pin(struct intel_encoder *encoder)
ddc_pin = dg1_encoder_to_ddc_pin(encoder);
else if (IS_ROCKETLAKE(dev_priv))
ddc_pin = rkl_encoder_to_ddc_pin(encoder);
- else if (DISPLAY_VER(dev_priv) == 9 && HAS_PCH_TGP(dev_priv))
+ else if (DISPLAY_VER(display) == 9 && HAS_PCH_TGP(dev_priv))
ddc_pin = gen9bc_tgp_encoder_to_ddc_pin(encoder);
else if ((IS_JASPERLAKE(dev_priv) || IS_ELKHARTLAKE(dev_priv)) &&
HAS_PCH_TGP(dev_priv))
@@ -2902,10 +2912,11 @@ static u8 intel_hdmi_default_ddc_pin(struct intel_encoder *encoder)
static struct intel_encoder *
get_encoder_by_ddc_pin(struct intel_encoder *encoder, u8 ddc_pin)
{
+ struct intel_display *display = to_intel_display(encoder);
struct drm_i915_private *i915 = to_i915(encoder->base.dev);
struct intel_encoder *other;
- for_each_intel_encoder(&i915->drm, other) {
+ for_each_intel_encoder(display->drm, other) {
struct intel_connector *connector;
if (other == encoder)
@@ -2925,6 +2936,7 @@ get_encoder_by_ddc_pin(struct intel_encoder *encoder, u8 ddc_pin)
static u8 intel_hdmi_ddc_pin(struct intel_encoder *encoder)
{
+ struct intel_display *display = to_intel_display(encoder);
struct drm_i915_private *i915 = to_i915(encoder->base.dev);
struct intel_encoder *other;
const char *source;
@@ -2939,20 +2951,22 @@ static u8 intel_hdmi_ddc_pin(struct intel_encoder *encoder)
}
if (!intel_gmbus_is_valid_pin(i915, ddc_pin)) {
- drm_dbg_kms(&i915->drm, "[ENCODER:%d:%s] Invalid DDC pin %d\n",
+ drm_dbg_kms(display->drm,
+ "[ENCODER:%d:%s] Invalid DDC pin %d\n",
encoder->base.base.id, encoder->base.name, ddc_pin);
return 0;
}
other = get_encoder_by_ddc_pin(encoder, ddc_pin);
if (other) {
- drm_dbg_kms(&i915->drm, "[ENCODER:%d:%s] DDC pin %d already claimed by [ENCODER:%d:%s]\n",
+ drm_dbg_kms(display->drm,
+ "[ENCODER:%d:%s] DDC pin %d already claimed by [ENCODER:%d:%s]\n",
encoder->base.base.id, encoder->base.name, ddc_pin,
other->base.base.id, other->base.name);
return 0;
}
- drm_dbg_kms(&i915->drm,
+ drm_dbg_kms(display->drm,
"[ENCODER:%d:%s] Using DDC pin 0x%x (%s)\n",
encoder->base.base.id, encoder->base.name,
ddc_pin, source);
@@ -2962,6 +2976,7 @@ static u8 intel_hdmi_ddc_pin(struct intel_encoder *encoder)
void intel_infoframe_init(struct intel_digital_port *dig_port)
{
+ struct intel_display *display = to_intel_display(dig_port);
struct drm_i915_private *dev_priv =
to_i915(dig_port->base.base.dev);
@@ -2975,7 +2990,7 @@ void intel_infoframe_init(struct intel_digital_port *dig_port)
dig_port->read_infoframe = g4x_read_infoframe;
dig_port->set_infoframes = g4x_set_infoframes;
dig_port->infoframes_enabled = g4x_infoframes_enabled;
- } else if (HAS_DDI(dev_priv)) {
+ } else if (HAS_DDI(display)) {
if (intel_bios_encoder_is_lspcon(dig_port->base.devdata)) {
dig_port->write_infoframe = lspcon_write_infoframe;
dig_port->read_infoframe = lspcon_read_infoframe;
@@ -3003,6 +3018,7 @@ void intel_infoframe_init(struct intel_digital_port *dig_port)
void intel_hdmi_init_connector(struct intel_digital_port *dig_port,
struct intel_connector *intel_connector)
{
+ struct intel_display *display = to_intel_display(dig_port);
struct drm_connector *connector = &intel_connector->base;
struct intel_hdmi *intel_hdmi = &dig_port->hdmi;
struct intel_encoder *intel_encoder = &dig_port->base;
@@ -3012,11 +3028,11 @@ void intel_hdmi_init_connector(struct intel_digital_port *dig_port,
struct cec_connector_info conn_info;
u8 ddc_pin;
- drm_dbg_kms(&dev_priv->drm,
+ drm_dbg_kms(display->drm,
"Adding HDMI connector on [ENCODER:%d:%s]\n",
intel_encoder->base.base.id, intel_encoder->base.name);
- if (DISPLAY_VER(dev_priv) < 12 && drm_WARN_ON(dev, port == PORT_A))
+ if (DISPLAY_VER(display) < 12 && drm_WARN_ON(dev, port == PORT_A))
return;
if (drm_WARN(dev, dig_port->max_lanes < 4,
@@ -3036,18 +3052,18 @@ void intel_hdmi_init_connector(struct intel_digital_port *dig_port,
drm_connector_helper_add(connector, &intel_hdmi_connector_helper_funcs);
- if (DISPLAY_VER(dev_priv) < 12)
+ if (DISPLAY_VER(display) < 12)
connector->interlace_allowed = true;
connector->stereo_allowed = true;
- if (DISPLAY_VER(dev_priv) >= 10)
+ if (DISPLAY_VER(display) >= 10)
connector->ycbcr_420_allowed = true;
intel_connector->polled = DRM_CONNECTOR_POLL_HPD;
intel_connector->base.polled = intel_connector->polled;
- if (HAS_DDI(dev_priv))
+ if (HAS_DDI(display))
intel_connector->get_hw_state = intel_ddi_connector_get_hw_state;
else
intel_connector->get_hw_state = intel_connector_get_hw_state;
@@ -3061,7 +3077,7 @@ void intel_hdmi_init_connector(struct intel_digital_port *dig_port,
int ret = intel_hdcp_init(intel_connector, dig_port,
&intel_hdmi_hdcp_shim);
if (ret)
- drm_dbg_kms(&dev_priv->drm,
+ drm_dbg_kms(display->drm,
"HDCP init failed, skipping.\n");
}
@@ -3071,7 +3087,7 @@ void intel_hdmi_init_connector(struct intel_digital_port *dig_port,
cec_notifier_conn_register(dev->dev, port_identifier(port),
&conn_info);
if (!intel_hdmi->cec_notifier)
- drm_dbg_kms(&dev_priv->drm, "CEC notifier get failed\n");
+ drm_dbg_kms(display->drm, "CEC notifier get failed\n");
}
/*
diff --git a/drivers/gpu/drm/i915/display/intel_hdmi.h b/drivers/gpu/drm/i915/display/intel_hdmi.h
index 6b39df38d57a..9b97623665c5 100644
--- a/drivers/gpu/drm/i915/display/intel_hdmi.h
+++ b/drivers/gpu/drm/i915/display/intel_hdmi.h
@@ -58,6 +58,5 @@ int intel_hdmi_dsc_get_num_slices(const struct intel_crtc_state *crtc_state,
int src_max_slices, int src_max_slice_width,
int hdmi_max_slices, int hdmi_throughput);
int intel_hdmi_dsc_get_slice_height(int vactive);
-struct drm_i915_private *intel_hdmi_to_i915(struct intel_hdmi *intel_hdmi);
#endif /* __INTEL_HDMI_H__ */
diff --git a/drivers/gpu/drm/i915/display/intel_hotplug_irq.c b/drivers/gpu/drm/i915/display/intel_hotplug_irq.c
index a1f07ee69a86..2c4e946d5575 100644
--- a/drivers/gpu/drm/i915/display/intel_hotplug_irq.c
+++ b/drivers/gpu/drm/i915/display/intel_hotplug_irq.c
@@ -456,6 +456,7 @@ u32 i9xx_hpd_irq_ack(struct drm_i915_private *dev_priv)
void i9xx_hpd_irq_handler(struct drm_i915_private *dev_priv, u32 hotplug_status)
{
+ struct intel_display *display = &dev_priv->display;
u32 pin_mask = 0, long_mask = 0;
u32 hotplug_trigger;
@@ -477,7 +478,7 @@ void i9xx_hpd_irq_handler(struct drm_i915_private *dev_priv, u32 hotplug_status)
if ((IS_G4X(dev_priv) ||
IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) &&
hotplug_status & DP_AUX_CHANNEL_MASK_INT_STATUS_G4X)
- intel_dp_aux_irq_handler(dev_priv);
+ intel_dp_aux_irq_handler(display);
}
void ibx_hpd_irq_handler(struct drm_i915_private *dev_priv, u32 hotplug_trigger)
@@ -513,6 +514,7 @@ void ibx_hpd_irq_handler(struct drm_i915_private *dev_priv, u32 hotplug_trigger)
void xelpdp_pica_irq_handler(struct drm_i915_private *i915, u32 iir)
{
+ struct intel_display *display = &i915->display;
enum hpd_pin pin;
u32 hotplug_trigger = iir & (XELPDP_DP_ALT_HOTPLUG_MASK | XELPDP_TBT_HOTPLUG_MASK);
u32 trigger_aux = iir & XELPDP_AUX_TC_MASK;
@@ -545,7 +547,7 @@ void xelpdp_pica_irq_handler(struct drm_i915_private *i915, u32 iir)
}
if (trigger_aux)
- intel_dp_aux_irq_handler(i915);
+ intel_dp_aux_irq_handler(display);
if (!pin_mask && !trigger_aux)
drm_err(&i915->drm,
diff --git a/drivers/gpu/drm/i915/display/intel_hti.c b/drivers/gpu/drm/i915/display/intel_hti.c
index a92d008d4e6e..19d1f196d9fb 100644
--- a/drivers/gpu/drm/i915/display/intel_hti.c
+++ b/drivers/gpu/drm/i915/display/intel_hti.c
@@ -9,33 +9,33 @@
#include "intel_hti.h"
#include "intel_hti_regs.h"
-void intel_hti_init(struct drm_i915_private *i915)
+void intel_hti_init(struct intel_display *display)
{
/*
* If the platform has HTI, we need to find out whether it has reserved
* any display resources before we create our display outputs.
*/
- if (DISPLAY_INFO(i915)->has_hti)
- i915->display.hti.state = intel_de_read(i915, HDPORT_STATE);
+ if (DISPLAY_INFO(display)->has_hti)
+ display->hti.state = intel_de_read(display, HDPORT_STATE);
}
-bool intel_hti_uses_phy(struct drm_i915_private *i915, enum phy phy)
+bool intel_hti_uses_phy(struct intel_display *display, enum phy phy)
{
- if (drm_WARN_ON(&i915->drm, phy == PHY_NONE))
+ if (drm_WARN_ON(display->drm, phy == PHY_NONE))
return false;
- return i915->display.hti.state & HDPORT_ENABLED &&
- i915->display.hti.state & HDPORT_DDI_USED(phy);
+ return display->hti.state & HDPORT_ENABLED &&
+ display->hti.state & HDPORT_DDI_USED(phy);
}
-u32 intel_hti_dpll_mask(struct drm_i915_private *i915)
+u32 intel_hti_dpll_mask(struct intel_display *display)
{
- if (!(i915->display.hti.state & HDPORT_ENABLED))
+ if (!(display->hti.state & HDPORT_ENABLED))
return 0;
/*
* Note: This is subtle. The values must coincide with what's defined
* for the platform.
*/
- return REG_FIELD_GET(HDPORT_DPLL_USED_MASK, i915->display.hti.state);
+ return REG_FIELD_GET(HDPORT_DPLL_USED_MASK, display->hti.state);
}
diff --git a/drivers/gpu/drm/i915/display/intel_hti.h b/drivers/gpu/drm/i915/display/intel_hti.h
index 2893d6668657..b692571c5558 100644
--- a/drivers/gpu/drm/i915/display/intel_hti.h
+++ b/drivers/gpu/drm/i915/display/intel_hti.h
@@ -8,11 +8,11 @@
#include <linux/types.h>
-struct drm_i915_private;
+struct intel_display;
enum phy;
-void intel_hti_init(struct drm_i915_private *i915);
-bool intel_hti_uses_phy(struct drm_i915_private *i915, enum phy phy);
-u32 intel_hti_dpll_mask(struct drm_i915_private *i915);
+void intel_hti_init(struct intel_display *display);
+bool intel_hti_uses_phy(struct intel_display *display, enum phy phy);
+u32 intel_hti_dpll_mask(struct intel_display *display);
#endif /* __INTEL_HTI_H__ */
diff --git a/drivers/gpu/drm/i915/display/intel_link_bw.c b/drivers/gpu/drm/i915/display/intel_link_bw.c
index dfd7d5e23f3f..e7a9b860fac6 100644
--- a/drivers/gpu/drm/i915/display/intel_link_bw.c
+++ b/drivers/gpu/drm/i915/display/intel_link_bw.c
@@ -3,6 +3,8 @@
* Copyright © 2023 Intel Corporation
*/
+#include <drm/drm_fixed.h>
+
#include "i915_drv.h"
#include "intel_atomic.h"
@@ -23,12 +25,13 @@
void intel_link_bw_init_limits(struct intel_atomic_state *state,
struct intel_link_bw_limits *limits)
{
+ struct intel_display *display = to_intel_display(state);
struct drm_i915_private *i915 = to_i915(state->base.dev);
enum pipe pipe;
limits->force_fec_pipes = 0;
limits->bpp_limit_reached_pipes = 0;
- for_each_pipe(i915, pipe) {
+ for_each_pipe(display, pipe) {
const struct intel_crtc_state *crtc_state =
intel_atomic_get_new_crtc_state(state,
intel_crtc_for_pipe(i915, pipe));
@@ -67,12 +70,12 @@ int intel_link_bw_reduce_bpp(struct intel_atomic_state *state,
u8 pipe_mask,
const char *reason)
{
- struct drm_i915_private *i915 = to_i915(state->base.dev);
+ struct intel_display *display = to_intel_display(state);
enum pipe max_bpp_pipe = INVALID_PIPE;
struct intel_crtc *crtc;
int max_bpp_x16 = 0;
- for_each_intel_crtc_in_pipe_mask(&i915->drm, crtc, pipe_mask) {
+ for_each_intel_crtc_in_pipe_mask(display->drm, crtc, pipe_mask) {
struct intel_crtc_state *crtc_state;
int link_bpp_x16;
@@ -93,7 +96,7 @@ int intel_link_bw_reduce_bpp(struct intel_atomic_state *state,
* is based on the pipe bpp value, set the actual link bpp
* limit here once the MST BW allocation is fixed.
*/
- link_bpp_x16 = to_bpp_x16(crtc_state->pipe_bpp);
+ link_bpp_x16 = fxp_q4_from_int(crtc_state->pipe_bpp);
if (link_bpp_x16 > max_bpp_x16) {
max_bpp_x16 = link_bpp_x16;
@@ -134,7 +137,7 @@ intel_link_bw_set_bpp_limit_for_pipe(struct intel_atomic_state *state,
struct intel_link_bw_limits *new_limits,
enum pipe pipe)
{
- struct drm_i915_private *i915 = to_i915(state->base.dev);
+ struct intel_display *display = to_intel_display(state);
if (pipe == INVALID_PIPE)
return false;
@@ -143,7 +146,7 @@ intel_link_bw_set_bpp_limit_for_pipe(struct intel_atomic_state *state,
old_limits->max_bpp_x16[pipe])
return false;
- if (drm_WARN_ON(&i915->drm,
+ if (drm_WARN_ON(display->drm,
new_limits->bpp_limit_reached_pipes & BIT(pipe)))
return false;
@@ -176,7 +179,7 @@ static int check_all_link_config(struct intel_atomic_state *state,
}
static bool
-assert_link_limit_change_valid(struct drm_i915_private *i915,
+assert_link_limit_change_valid(struct intel_display *display,
const struct intel_link_bw_limits *old_limits,
const struct intel_link_bw_limits *new_limits)
{
@@ -184,14 +187,14 @@ assert_link_limit_change_valid(struct drm_i915_private *i915,
enum pipe pipe;
/* FEC can't be forced off after it was forced on. */
- if (drm_WARN_ON(&i915->drm,
+ if (drm_WARN_ON(display->drm,
(old_limits->force_fec_pipes & new_limits->force_fec_pipes) !=
old_limits->force_fec_pipes))
return false;
- for_each_pipe(i915, pipe) {
+ for_each_pipe(display, pipe) {
/* The bpp limit can only decrease. */
- if (drm_WARN_ON(&i915->drm,
+ if (drm_WARN_ON(display->drm,
new_limits->max_bpp_x16[pipe] >
old_limits->max_bpp_x16[pipe]))
return false;
@@ -202,7 +205,7 @@ assert_link_limit_change_valid(struct drm_i915_private *i915,
}
/* At least one limit must change. */
- if (drm_WARN_ON(&i915->drm,
+ if (drm_WARN_ON(display->drm,
!bpps_changed &&
new_limits->force_fec_pipes ==
old_limits->force_fec_pipes))
@@ -230,7 +233,7 @@ assert_link_limit_change_valid(struct drm_i915_private *i915,
int intel_link_bw_atomic_check(struct intel_atomic_state *state,
struct intel_link_bw_limits *new_limits)
{
- struct drm_i915_private *i915 = to_i915(state->base.dev);
+ struct intel_display *display = to_intel_display(state);
struct intel_link_bw_limits old_limits = *new_limits;
int ret;
@@ -238,7 +241,7 @@ int intel_link_bw_atomic_check(struct intel_atomic_state *state,
if (ret != -EAGAIN)
return ret;
- if (!assert_link_limit_change_valid(i915, &old_limits, new_limits))
+ if (!assert_link_limit_change_valid(display, &old_limits, new_limits))
return -EINVAL;
return -EAGAIN;
diff --git a/drivers/gpu/drm/i915/display/intel_link_bw.h b/drivers/gpu/drm/i915/display/intel_link_bw.h
index 6b0ccfff59da..e69049cf178f 100644
--- a/drivers/gpu/drm/i915/display/intel_link_bw.h
+++ b/drivers/gpu/drm/i915/display/intel_link_bw.h
@@ -10,8 +10,6 @@
#include "intel_display_limits.h"
-struct drm_i915_private;
-
struct intel_atomic_state;
struct intel_crtc_state;
diff --git a/drivers/gpu/drm/i915/display/intel_load_detect.c b/drivers/gpu/drm/i915/display/intel_load_detect.c
index d5a0aecf3e8f..b457c69dc0be 100644
--- a/drivers/gpu/drm/i915/display/intel_load_detect.c
+++ b/drivers/gpu/drm/i915/display/intel_load_detect.c
@@ -48,23 +48,22 @@ struct drm_atomic_state *
intel_load_detect_get_pipe(struct drm_connector *connector,
struct drm_modeset_acquire_ctx *ctx)
{
+ struct intel_display *display = to_intel_display(connector->dev);
struct intel_encoder *encoder =
intel_attached_encoder(to_intel_connector(connector));
struct intel_crtc *possible_crtc;
struct intel_crtc *crtc = NULL;
- struct drm_device *dev = encoder->base.dev;
- struct drm_i915_private *dev_priv = to_i915(dev);
- struct drm_mode_config *config = &dev->mode_config;
+ struct drm_mode_config *config = &display->drm->mode_config;
struct drm_atomic_state *state = NULL, *restore_state = NULL;
struct drm_connector_state *connector_state;
struct intel_crtc_state *crtc_state;
int ret;
- drm_dbg_kms(&dev_priv->drm, "[CONNECTOR:%d:%s], [ENCODER:%d:%s]\n",
+ drm_dbg_kms(display->drm, "[CONNECTOR:%d:%s], [ENCODER:%d:%s]\n",
connector->base.id, connector->name,
encoder->base.base.id, encoder->base.name);
- drm_WARN_ON(dev, !drm_modeset_is_locked(&config->connection_mutex));
+ drm_WARN_ON(display->drm, !drm_modeset_is_locked(&config->connection_mutex));
/*
* Algorithm gets a little messy:
@@ -89,7 +88,7 @@ intel_load_detect_get_pipe(struct drm_connector *connector,
}
/* Find an unused one (if possible) */
- for_each_intel_crtc(dev, possible_crtc) {
+ for_each_intel_crtc(display->drm, possible_crtc) {
if (!(encoder->base.possible_crtcs &
drm_crtc_mask(&possible_crtc->base)))
continue;
@@ -111,15 +110,15 @@ intel_load_detect_get_pipe(struct drm_connector *connector,
* If we didn't find an unused CRTC, don't use any.
*/
if (!crtc) {
- drm_dbg_kms(&dev_priv->drm,
+ drm_dbg_kms(display->drm,
"no pipe available for load-detect\n");
ret = -ENODEV;
goto fail;
}
found:
- state = drm_atomic_state_alloc(dev);
- restore_state = drm_atomic_state_alloc(dev);
+ state = drm_atomic_state_alloc(display->drm);
+ restore_state = drm_atomic_state_alloc(display->drm);
if (!state || !restore_state) {
ret = -ENOMEM;
goto fail;
@@ -164,7 +163,7 @@ found:
if (!ret)
ret = drm_atomic_add_affected_planes(restore_state, &crtc->base);
if (ret) {
- drm_dbg_kms(&dev_priv->drm,
+ drm_dbg_kms(display->drm,
"Failed to create a copy of old state to restore: %i\n",
ret);
goto fail;
@@ -172,7 +171,7 @@ found:
ret = drm_atomic_commit(state);
if (ret) {
- drm_dbg_kms(&dev_priv->drm,
+ drm_dbg_kms(display->drm,
"failed to set mode on load-detect pipe\n");
goto fail;
}
@@ -204,13 +203,13 @@ void intel_load_detect_release_pipe(struct drm_connector *connector,
struct drm_atomic_state *state,
struct drm_modeset_acquire_ctx *ctx)
{
+ struct intel_display *display = to_intel_display(connector->dev);
struct intel_encoder *intel_encoder =
intel_attached_encoder(to_intel_connector(connector));
- struct drm_i915_private *i915 = to_i915(intel_encoder->base.dev);
struct drm_encoder *encoder = &intel_encoder->base;
int ret;
- drm_dbg_kms(&i915->drm, "[CONNECTOR:%d:%s], [ENCODER:%d:%s]\n",
+ drm_dbg_kms(display->drm, "[CONNECTOR:%d:%s], [ENCODER:%d:%s]\n",
connector->base.id, connector->name,
encoder->base.id, encoder->name);
@@ -219,7 +218,7 @@ void intel_load_detect_release_pipe(struct drm_connector *connector,
ret = drm_atomic_helper_commit_duplicated_state(state, ctx);
if (ret)
- drm_dbg_kms(&i915->drm,
+ drm_dbg_kms(display->drm,
"Couldn't release load detect pipe: %i\n", ret);
drm_atomic_state_put(state);
}
diff --git a/drivers/gpu/drm/i915/display/intel_lspcon.c b/drivers/gpu/drm/i915/display/intel_lspcon.c
index 8b26354d6e53..f9db867fae89 100644
--- a/drivers/gpu/drm/i915/display/intel_lspcon.c
+++ b/drivers/gpu/drm/i915/display/intel_lspcon.c
@@ -79,33 +79,33 @@ static const char *lspcon_mode_name(enum drm_lspcon_mode mode)
static bool lspcon_detect_vendor(struct intel_lspcon *lspcon)
{
- struct intel_dp *dp = lspcon_to_intel_dp(lspcon);
- struct drm_i915_private *i915 = dp_to_i915(dp);
+ struct intel_dp *intel_dp = lspcon_to_intel_dp(lspcon);
+ struct intel_display *display = to_intel_display(intel_dp);
struct drm_dp_dpcd_ident *ident;
u32 vendor_oui;
- if (drm_dp_read_desc(&dp->aux, &dp->desc, drm_dp_is_branch(dp->dpcd))) {
- drm_err(&i915->drm, "Can't read description\n");
+ if (drm_dp_read_desc(&intel_dp->aux, &intel_dp->desc, drm_dp_is_branch(intel_dp->dpcd))) {
+ drm_err(display->drm, "Can't read description\n");
return false;
}
- ident = &dp->desc.ident;
+ ident = &intel_dp->desc.ident;
vendor_oui = (ident->oui[0] << 16) | (ident->oui[1] << 8) |
ident->oui[2];
switch (vendor_oui) {
case LSPCON_VENDOR_MCA_OUI:
lspcon->vendor = LSPCON_VENDOR_MCA;
- drm_dbg_kms(&i915->drm, "Vendor: Mega Chips\n");
+ drm_dbg_kms(display->drm, "Vendor: Mega Chips\n");
break;
case LSPCON_VENDOR_PARADE_OUI:
lspcon->vendor = LSPCON_VENDOR_PARADE;
- drm_dbg_kms(&i915->drm, "Vendor: Parade Tech\n");
+ drm_dbg_kms(display->drm, "Vendor: Parade Tech\n");
break;
default:
- drm_err(&i915->drm, "Invalid/Unknown vendor OUI\n");
+ drm_err(display->drm, "Invalid/Unknown vendor OUI\n");
return false;
}
@@ -123,7 +123,7 @@ static u32 get_hdr_status_reg(struct intel_lspcon *lspcon)
void lspcon_detect_hdr_capability(struct intel_lspcon *lspcon)
{
struct intel_dp *intel_dp = lspcon_to_intel_dp(lspcon);
- struct drm_i915_private *i915 = dp_to_i915(intel_dp);
+ struct intel_display *display = to_intel_display(intel_dp);
u8 hdr_caps;
int ret;
@@ -131,10 +131,10 @@ void lspcon_detect_hdr_capability(struct intel_lspcon *lspcon)
&hdr_caps, 1);
if (ret < 0) {
- drm_dbg_kms(&i915->drm, "HDR capability detection failed\n");
+ drm_dbg_kms(display->drm, "HDR capability detection failed\n");
lspcon->hdr_supported = false;
} else if (hdr_caps & 0x1) {
- drm_dbg_kms(&i915->drm, "LSPCON capable of HDR\n");
+ drm_dbg_kms(display->drm, "LSPCON capable of HDR\n");
lspcon->hdr_supported = true;
}
}
@@ -142,12 +142,12 @@ void lspcon_detect_hdr_capability(struct intel_lspcon *lspcon)
static enum drm_lspcon_mode lspcon_get_current_mode(struct intel_lspcon *lspcon)
{
struct intel_dp *intel_dp = lspcon_to_intel_dp(lspcon);
- struct drm_i915_private *i915 = dp_to_i915(intel_dp);
+ struct intel_display *display = to_intel_display(intel_dp);
enum drm_lspcon_mode current_mode;
struct i2c_adapter *ddc = &intel_dp->aux.ddc;
if (drm_lspcon_get_mode(intel_dp->aux.drm_dev, ddc, &current_mode)) {
- drm_dbg_kms(&i915->drm, "Error reading LSPCON mode\n");
+ drm_dbg_kms(display->drm, "Error reading LSPCON mode\n");
return DRM_LSPCON_MODE_INVALID;
}
return current_mode;
@@ -169,23 +169,23 @@ static enum drm_lspcon_mode lspcon_wait_mode(struct intel_lspcon *lspcon,
enum drm_lspcon_mode mode)
{
struct intel_dp *intel_dp = lspcon_to_intel_dp(lspcon);
- struct drm_i915_private *i915 = dp_to_i915(intel_dp);
+ struct intel_display *display = to_intel_display(intel_dp);
enum drm_lspcon_mode current_mode;
current_mode = lspcon_get_current_mode(lspcon);
if (current_mode == mode)
goto out;
- drm_dbg_kms(&i915->drm, "Waiting for LSPCON mode %s to settle\n",
+ drm_dbg_kms(display->drm, "Waiting for LSPCON mode %s to settle\n",
lspcon_mode_name(mode));
wait_for((current_mode = lspcon_get_current_mode(lspcon)) == mode,
lspcon_get_mode_settle_timeout(lspcon));
if (current_mode != mode)
- drm_err(&i915->drm, "LSPCON mode hasn't settled\n");
+ drm_err(display->drm, "LSPCON mode hasn't settled\n");
out:
- drm_dbg_kms(&i915->drm, "Current LSPCON mode %s\n",
+ drm_dbg_kms(display->drm, "Current LSPCON mode %s\n",
lspcon_mode_name(current_mode));
return current_mode;
@@ -195,46 +195,46 @@ static int lspcon_change_mode(struct intel_lspcon *lspcon,
enum drm_lspcon_mode mode)
{
struct intel_dp *intel_dp = lspcon_to_intel_dp(lspcon);
- struct drm_i915_private *i915 = dp_to_i915(intel_dp);
+ struct intel_display *display = to_intel_display(intel_dp);
int err;
enum drm_lspcon_mode current_mode;
struct i2c_adapter *ddc = &intel_dp->aux.ddc;
err = drm_lspcon_get_mode(intel_dp->aux.drm_dev, ddc, &current_mode);
if (err) {
- drm_err(&i915->drm, "Error reading LSPCON mode\n");
+ drm_err(display->drm, "Error reading LSPCON mode\n");
return err;
}
if (current_mode == mode) {
- drm_dbg_kms(&i915->drm, "Current mode = desired LSPCON mode\n");
+ drm_dbg_kms(display->drm, "Current mode = desired LSPCON mode\n");
return 0;
}
err = drm_lspcon_set_mode(intel_dp->aux.drm_dev, ddc, mode);
if (err < 0) {
- drm_err(&i915->drm, "LSPCON mode change failed\n");
+ drm_err(display->drm, "LSPCON mode change failed\n");
return err;
}
lspcon->mode = mode;
- drm_dbg_kms(&i915->drm, "LSPCON mode changed done\n");
+ drm_dbg_kms(display->drm, "LSPCON mode changed done\n");
return 0;
}
static bool lspcon_wake_native_aux_ch(struct intel_lspcon *lspcon)
{
struct intel_dp *intel_dp = lspcon_to_intel_dp(lspcon);
- struct drm_i915_private *i915 = dp_to_i915(intel_dp);
+ struct intel_display *display = to_intel_display(intel_dp);
u8 rev;
if (drm_dp_dpcd_readb(&lspcon_to_intel_dp(lspcon)->aux, DP_DPCD_REV,
&rev) != 1) {
- drm_dbg_kms(&i915->drm, "Native AUX CH down\n");
+ drm_dbg_kms(display->drm, "Native AUX CH down\n");
return false;
}
- drm_dbg_kms(&i915->drm, "Native AUX CH up, DPCD version: %d.%d\n",
+ drm_dbg_kms(display->drm, "Native AUX CH up, DPCD version: %d.%d\n",
rev >> 4, rev & 0xf);
return true;
@@ -242,12 +242,12 @@ static bool lspcon_wake_native_aux_ch(struct intel_lspcon *lspcon)
static bool lspcon_probe(struct intel_lspcon *lspcon)
{
- int retry;
- enum drm_dp_dual_mode_type adaptor_type;
struct intel_dp *intel_dp = lspcon_to_intel_dp(lspcon);
- struct drm_i915_private *i915 = dp_to_i915(intel_dp);
+ struct intel_display *display = to_intel_display(intel_dp);
struct i2c_adapter *ddc = &intel_dp->aux.ddc;
+ enum drm_dp_dual_mode_type adaptor_type;
enum drm_lspcon_mode expected_mode;
+ int retry;
expected_mode = lspcon_wake_native_aux_ch(lspcon) ?
DRM_LSPCON_MODE_PCON : DRM_LSPCON_MODE_LS;
@@ -263,13 +263,13 @@ static bool lspcon_probe(struct intel_lspcon *lspcon)
}
if (adaptor_type != DRM_DP_DUAL_MODE_LSPCON) {
- drm_dbg_kms(&i915->drm, "No LSPCON detected, found %s\n",
+ drm_dbg_kms(display->drm, "No LSPCON detected, found %s\n",
drm_dp_get_dual_mode_type_name(adaptor_type));
return false;
}
/* Yay ... got a LSPCON device */
- drm_dbg_kms(&i915->drm, "LSPCON detected\n");
+ drm_dbg_kms(display->drm, "LSPCON detected\n");
lspcon->mode = lspcon_wait_mode(lspcon, expected_mode);
/*
@@ -279,7 +279,7 @@ static bool lspcon_probe(struct intel_lspcon *lspcon)
*/
if (lspcon->mode != DRM_LSPCON_MODE_PCON) {
if (lspcon_change_mode(lspcon, DRM_LSPCON_MODE_PCON) < 0) {
- drm_err(&i915->drm, "LSPCON mode change to PCON failed\n");
+ drm_err(display->drm, "LSPCON mode change to PCON failed\n");
return false;
}
}
@@ -289,13 +289,13 @@ static bool lspcon_probe(struct intel_lspcon *lspcon)
static void lspcon_resume_in_pcon_wa(struct intel_lspcon *lspcon)
{
struct intel_dp *intel_dp = lspcon_to_intel_dp(lspcon);
- struct drm_i915_private *i915 = dp_to_i915(intel_dp);
+ struct intel_display *display = to_intel_display(intel_dp);
struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
unsigned long start = jiffies;
while (1) {
if (intel_digital_port_connected(&dig_port->base)) {
- drm_dbg_kms(&i915->drm, "LSPCON recovering in PCON mode after %u ms\n",
+ drm_dbg_kms(display->drm, "LSPCON recovering in PCON mode after %u ms\n",
jiffies_to_msecs(jiffies - start));
return;
}
@@ -306,7 +306,7 @@ static void lspcon_resume_in_pcon_wa(struct intel_lspcon *lspcon)
usleep_range(10000, 15000);
}
- drm_dbg_kms(&i915->drm, "LSPCON DP descriptor mismatch after resume\n");
+ drm_dbg_kms(display->drm, "LSPCON DP descriptor mismatch after resume\n");
}
static bool lspcon_parade_fw_ready(struct drm_dp_aux *aux)
@@ -477,10 +477,10 @@ void lspcon_write_infoframe(struct intel_encoder *encoder,
unsigned int type,
const void *frame, ssize_t len)
{
- bool ret = true;
+ struct intel_display *display = to_intel_display(encoder);
struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
- struct drm_i915_private *i915 = dp_to_i915(intel_dp);
struct intel_lspcon *lspcon = enc_to_intel_lspcon(encoder);
+ bool ret = true;
switch (type) {
case HDMI_INFOFRAME_TYPE_AVI:
@@ -492,7 +492,7 @@ void lspcon_write_infoframe(struct intel_encoder *encoder,
frame, len);
break;
case HDMI_PACKET_TYPE_GAMUT_METADATA:
- drm_dbg_kms(&i915->drm, "Update HDR metadata for lspcon\n");
+ drm_dbg_kms(display->drm, "Update HDR metadata for lspcon\n");
/* It uses the legacy hsw implementation for the same */
hsw_write_infoframe(encoder, crtc_state, type, frame, len);
break;
@@ -501,7 +501,7 @@ void lspcon_write_infoframe(struct intel_encoder *encoder,
}
if (!ret) {
- drm_err(&i915->drm, "Failed to write infoframes\n");
+ drm_err(display->drm, "Failed to write infoframes\n");
return;
}
}
@@ -522,17 +522,17 @@ void lspcon_set_infoframes(struct intel_encoder *encoder,
const struct intel_crtc_state *crtc_state,
const struct drm_connector_state *conn_state)
{
- ssize_t ret;
- union hdmi_infoframe frame;
- u8 buf[VIDEO_DIP_DATA_SIZE];
+ struct intel_display *display = to_intel_display(encoder);
struct intel_digital_port *dig_port = enc_to_dig_port(encoder);
struct intel_lspcon *lspcon = &dig_port->lspcon;
- struct drm_i915_private *i915 = to_i915(encoder->base.dev);
const struct drm_display_mode *adjusted_mode =
&crtc_state->hw.adjusted_mode;
+ union hdmi_infoframe frame;
+ u8 buf[VIDEO_DIP_DATA_SIZE];
+ ssize_t ret;
if (!lspcon->active) {
- drm_err(&i915->drm, "Writing infoframes while LSPCON disabled ?\n");
+ drm_err(display->drm, "Writing infoframes while LSPCON disabled ?\n");
return;
}
@@ -542,7 +542,7 @@ void lspcon_set_infoframes(struct intel_encoder *encoder,
conn_state->connector,
adjusted_mode);
if (ret < 0) {
- drm_err(&i915->drm, "couldn't fill AVI infoframe\n");
+ drm_err(display->drm, "couldn't fill AVI infoframe\n");
return;
}
@@ -583,7 +583,7 @@ void lspcon_set_infoframes(struct intel_encoder *encoder,
ret = hdmi_infoframe_pack(&frame, buf, sizeof(buf));
if (ret < 0) {
- drm_err(&i915->drm, "Failed to pack AVI IF\n");
+ drm_err(display->drm, "Failed to pack AVI IF\n");
return;
}
@@ -624,9 +624,9 @@ static bool _lspcon_read_avi_infoframe_enabled_parade(struct drm_dp_aux *aux)
u32 lspcon_infoframes_enabled(struct intel_encoder *encoder,
const struct intel_crtc_state *pipe_config)
{
+ struct intel_display *display = to_intel_display(encoder);
struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
struct intel_lspcon *lspcon = enc_to_intel_lspcon(encoder);
- struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
bool infoframes_enabled;
u32 val = 0;
u32 mask, tmp;
@@ -640,8 +640,8 @@ u32 lspcon_infoframes_enabled(struct intel_encoder *encoder,
val |= intel_hdmi_infoframe_enable(HDMI_INFOFRAME_TYPE_AVI);
if (lspcon->hdr_supported) {
- tmp = intel_de_read(dev_priv,
- HSW_TVIDEO_DIP_CTL(dev_priv, pipe_config->cpu_transcoder));
+ tmp = intel_de_read(display,
+ HSW_TVIDEO_DIP_CTL(display, pipe_config->cpu_transcoder));
mask = VIDEO_DIP_ENABLE_GMP_HSW;
if (tmp & mask)
@@ -658,32 +658,32 @@ void lspcon_wait_pcon_mode(struct intel_lspcon *lspcon)
bool lspcon_init(struct intel_digital_port *dig_port)
{
+ struct intel_display *display = to_intel_display(dig_port);
struct intel_dp *intel_dp = &dig_port->dp;
struct intel_lspcon *lspcon = &dig_port->lspcon;
- struct drm_i915_private *i915 = dp_to_i915(intel_dp);
struct drm_connector *connector = &intel_dp->attached_connector->base;
lspcon->active = false;
lspcon->mode = DRM_LSPCON_MODE_INVALID;
if (!lspcon_probe(lspcon)) {
- drm_err(&i915->drm, "Failed to probe lspcon\n");
+ drm_err(display->drm, "Failed to probe lspcon\n");
return false;
}
if (drm_dp_read_dpcd_caps(&intel_dp->aux, intel_dp->dpcd) != 0) {
- drm_err(&i915->drm, "LSPCON DPCD read failed\n");
+ drm_err(display->drm, "LSPCON DPCD read failed\n");
return false;
}
if (!lspcon_detect_vendor(lspcon)) {
- drm_err(&i915->drm, "LSPCON vendor detection failed\n");
+ drm_err(display->drm, "LSPCON vendor detection failed\n");
return false;
}
connector->ycbcr_420_allowed = true;
lspcon->active = true;
- drm_dbg_kms(&i915->drm, "Success: LSPCON init\n");
+ drm_dbg_kms(display->drm, "Success: LSPCON init\n");
return true;
}
@@ -697,9 +697,8 @@ u32 intel_lspcon_infoframes_enabled(struct intel_encoder *encoder,
void lspcon_resume(struct intel_digital_port *dig_port)
{
+ struct intel_display *display = to_intel_display(dig_port);
struct intel_lspcon *lspcon = &dig_port->lspcon;
- struct drm_device *dev = dig_port->base.base.dev;
- struct drm_i915_private *i915 = to_i915(dev);
enum drm_lspcon_mode expected_mode;
if (!intel_bios_encoder_is_lspcon(dig_port->base.devdata))
@@ -707,7 +706,7 @@ void lspcon_resume(struct intel_digital_port *dig_port)
if (!lspcon->active) {
if (!lspcon_init(dig_port)) {
- drm_err(&i915->drm, "LSPCON init failed on port %c\n",
+ drm_err(display->drm, "LSPCON init failed on port %c\n",
port_name(dig_port->base.port));
return;
}
@@ -724,7 +723,7 @@ void lspcon_resume(struct intel_digital_port *dig_port)
return;
if (lspcon_change_mode(lspcon, DRM_LSPCON_MODE_PCON))
- drm_err(&i915->drm, "LSPCON resume failed\n");
+ drm_err(display->drm, "LSPCON resume failed\n");
else
- drm_dbg_kms(&i915->drm, "LSPCON resume success\n");
+ drm_dbg_kms(display->drm, "LSPCON resume success\n");
}
diff --git a/drivers/gpu/drm/i915/display/intel_lvds.c b/drivers/gpu/drm/i915/display/intel_lvds.c
index 9f018503d4fd..fb4ed9f7855b 100644
--- a/drivers/gpu/drm/i915/display/intel_lvds.c
+++ b/drivers/gpu/drm/i915/display/intel_lvds.c
@@ -838,6 +838,7 @@ static void intel_lvds_add_properties(struct drm_connector *connector)
*/
void intel_lvds_init(struct drm_i915_private *i915)
{
+ struct intel_display *display = &i915->display;
struct intel_lvds_encoder *lvds_encoder;
struct intel_connector *connector;
const struct drm_edid *drm_edid;
@@ -872,7 +873,7 @@ void intel_lvds_init(struct drm_i915_private *i915)
}
ddc_pin = GMBUS_PIN_PANEL;
- if (!intel_bios_is_lvds_present(i915, &ddc_pin)) {
+ if (!intel_bios_is_lvds_present(display, &ddc_pin)) {
if ((lvds & LVDS_PORT_EN) == 0) {
drm_dbg_kms(&i915->drm,
"LVDS is not present in VBT\n");
@@ -966,7 +967,7 @@ void intel_lvds_init(struct drm_i915_private *i915)
} else {
drm_edid = ERR_PTR(-ENOENT);
}
- intel_bios_init_panel_late(i915, &connector->panel, NULL,
+ intel_bios_init_panel_late(display, &connector->panel, NULL,
IS_ERR(drm_edid) ? NULL : drm_edid);
/* Try EDID first */
diff --git a/drivers/gpu/drm/i915/display/intel_modeset_setup.c b/drivers/gpu/drm/i915/display/intel_modeset_setup.c
index e1213f3d93cc..72694dde3c22 100644
--- a/drivers/gpu/drm/i915/display/intel_modeset_setup.c
+++ b/drivers/gpu/drm/i915/display/intel_modeset_setup.c
@@ -987,7 +987,7 @@ void intel_modeset_setup_hw_state(struct drm_i915_private *i915,
}
}
- intel_fbc_sanitize(i915);
+ intel_fbc_sanitize(&i915->display);
intel_sanitize_plane_mapping(i915);
diff --git a/drivers/gpu/drm/i915/display/intel_opregion.c b/drivers/gpu/drm/i915/display/intel_opregion.c
index 68bd5101ec89..ff11836459de 100644
--- a/drivers/gpu/drm/i915/display/intel_opregion.c
+++ b/drivers/gpu/drm/i915/display/intel_opregion.c
@@ -252,7 +252,7 @@ struct opregion_asle_ext {
#define OPREGION_SIZE (8 * 1024)
struct intel_opregion {
- struct drm_i915_private *i915;
+ struct intel_display *display;
struct opregion_header *header;
struct opregion_acpi *acpi;
@@ -268,9 +268,9 @@ struct intel_opregion {
struct notifier_block acpi_notifier;
};
-static int check_swsci_function(struct drm_i915_private *i915, u32 function)
+static int check_swsci_function(struct intel_display *display, u32 function)
{
- struct intel_opregion *opregion = i915->display.opregion;
+ struct intel_opregion *opregion = display->opregion;
struct opregion_swsci *swsci;
u32 main_function, sub_function;
@@ -300,20 +300,20 @@ static int check_swsci_function(struct drm_i915_private *i915, u32 function)
return 0;
}
-static int swsci(struct drm_i915_private *dev_priv,
+static int swsci(struct intel_display *display,
u32 function, u32 parm, u32 *parm_out)
{
struct opregion_swsci *swsci;
- struct pci_dev *pdev = to_pci_dev(dev_priv->drm.dev);
+ struct pci_dev *pdev = to_pci_dev(display->drm->dev);
u32 scic, dslp;
u16 swsci_val;
int ret;
- ret = check_swsci_function(dev_priv, function);
+ ret = check_swsci_function(display, function);
if (ret)
return ret;
- swsci = dev_priv->display.opregion->swsci;
+ swsci = display->opregion->swsci;
/* Driver sleep timeout in ms. */
dslp = swsci->dslp;
@@ -331,7 +331,7 @@ static int swsci(struct drm_i915_private *dev_priv,
/* The spec tells us to do this, but we are the only user... */
scic = swsci->scic;
if (scic & SWSCI_SCIC_INDICATOR) {
- drm_dbg(&dev_priv->drm, "SWSCI request already in progress\n");
+ drm_dbg(display->drm, "SWSCI request already in progress\n");
return -EBUSY;
}
@@ -355,7 +355,7 @@ static int swsci(struct drm_i915_private *dev_priv,
/* Poll for the result. */
#define C (((scic = swsci->scic) & SWSCI_SCIC_INDICATOR) == 0)
if (wait_for(C, dslp)) {
- drm_dbg(&dev_priv->drm, "SWSCI request timed out\n");
+ drm_dbg(display->drm, "SWSCI request timed out\n");
return -ETIMEDOUT;
}
@@ -364,7 +364,7 @@ static int swsci(struct drm_i915_private *dev_priv,
/* Note: scic == 0 is an error! */
if (scic != SWSCI_SCIC_EXIT_STATUS_SUCCESS) {
- drm_dbg(&dev_priv->drm, "SWSCI request error %u\n", scic);
+ drm_dbg(display->drm, "SWSCI request error %u\n", scic);
return -EIO;
}
@@ -381,28 +381,28 @@ static int swsci(struct drm_i915_private *dev_priv,
#define DISPLAY_TYPE_EXTERNAL_FLAT_PANEL 2
#define DISPLAY_TYPE_INTERNAL_FLAT_PANEL 3
-int intel_opregion_notify_encoder(struct intel_encoder *intel_encoder,
+int intel_opregion_notify_encoder(struct intel_encoder *encoder,
bool enable)
{
- struct drm_i915_private *dev_priv = to_i915(intel_encoder->base.dev);
+ struct intel_display *display = to_intel_display(encoder);
u32 parm = 0;
u32 type = 0;
u32 port;
int ret;
/* don't care about old stuff for now */
- if (!HAS_DDI(dev_priv))
+ if (!HAS_DDI(display))
return 0;
/* Avoid port out of bounds checks if SWSCI isn't there. */
- ret = check_swsci_function(dev_priv, SWSCI_SBCB_DISPLAY_POWER_STATE);
+ ret = check_swsci_function(display, SWSCI_SBCB_DISPLAY_POWER_STATE);
if (ret)
return ret;
- if (intel_encoder->type == INTEL_OUTPUT_DSI)
+ if (encoder->type == INTEL_OUTPUT_DSI)
port = 0;
else
- port = intel_encoder->port;
+ port = encoder->port;
if (port == PORT_E) {
port = 0;
@@ -419,17 +419,17 @@ int intel_opregion_notify_encoder(struct intel_encoder *intel_encoder,
* number is out of bounds after mapping.
*/
if (port > 4) {
- drm_dbg_kms(&dev_priv->drm,
+ drm_dbg_kms(display->drm,
"[ENCODER:%d:%s] port %c (index %u) out of bounds for display power state notification\n",
- intel_encoder->base.base.id, intel_encoder->base.name,
- port_name(intel_encoder->port), port);
+ encoder->base.base.id, encoder->base.name,
+ port_name(encoder->port), port);
return -EINVAL;
}
if (!enable)
parm |= 4 << 8;
- switch (intel_encoder->type) {
+ switch (encoder->type) {
case INTEL_OUTPUT_ANALOG:
type = DISPLAY_TYPE_CRT;
break;
@@ -444,15 +444,15 @@ int intel_opregion_notify_encoder(struct intel_encoder *intel_encoder,
type = DISPLAY_TYPE_INTERNAL_FLAT_PANEL;
break;
default:
- drm_WARN_ONCE(&dev_priv->drm, 1,
+ drm_WARN_ONCE(display->drm, 1,
"unsupported intel_encoder type %d\n",
- intel_encoder->type);
+ encoder->type);
return -EINVAL;
}
parm |= type << (16 + port * 3);
- return swsci(dev_priv, SWSCI_SBCB_DISPLAY_POWER_STATE, parm, NULL);
+ return swsci(display, SWSCI_SBCB_DISPLAY_POWER_STATE, parm, NULL);
}
static const struct {
@@ -466,33 +466,33 @@ static const struct {
{ PCI_D3cold, 0x04 },
};
-int intel_opregion_notify_adapter(struct drm_i915_private *dev_priv,
+int intel_opregion_notify_adapter(struct intel_display *display,
pci_power_t state)
{
int i;
- if (!HAS_DDI(dev_priv))
+ if (!HAS_DDI(display))
return 0;
for (i = 0; i < ARRAY_SIZE(power_state_map); i++) {
if (state == power_state_map[i].pci_power_state)
- return swsci(dev_priv, SWSCI_SBCB_ADAPTER_POWER_STATE,
+ return swsci(display, SWSCI_SBCB_ADAPTER_POWER_STATE,
power_state_map[i].parm, NULL);
}
return -EINVAL;
}
-static u32 asle_set_backlight(struct drm_i915_private *dev_priv, u32 bclp)
+static u32 asle_set_backlight(struct intel_display *display, u32 bclp)
{
struct intel_connector *connector;
struct drm_connector_list_iter conn_iter;
- struct opregion_asle *asle = dev_priv->display.opregion->asle;
+ struct opregion_asle *asle = display->opregion->asle;
- drm_dbg(&dev_priv->drm, "bclp = 0x%08x\n", bclp);
+ drm_dbg(display->drm, "bclp = 0x%08x\n", bclp);
if (acpi_video_get_backlight_type() == acpi_backlight_native) {
- drm_dbg_kms(&dev_priv->drm,
+ drm_dbg_kms(display->drm,
"opregion backlight request ignored\n");
return 0;
}
@@ -504,104 +504,104 @@ static u32 asle_set_backlight(struct drm_i915_private *dev_priv, u32 bclp)
if (bclp > 255)
return ASLC_BACKLIGHT_FAILED;
- drm_modeset_lock(&dev_priv->drm.mode_config.connection_mutex, NULL);
+ drm_modeset_lock(&display->drm->mode_config.connection_mutex, NULL);
/*
* Update backlight on all connectors that support backlight (usually
* only one).
*/
- drm_dbg_kms(&dev_priv->drm, "updating opregion backlight %d/255\n",
+ drm_dbg_kms(display->drm, "updating opregion backlight %d/255\n",
bclp);
- drm_connector_list_iter_begin(&dev_priv->drm, &conn_iter);
+ drm_connector_list_iter_begin(display->drm, &conn_iter);
for_each_intel_connector_iter(connector, &conn_iter)
intel_backlight_set_acpi(connector->base.state, bclp, 255);
drm_connector_list_iter_end(&conn_iter);
asle->cblv = DIV_ROUND_UP(bclp * 100, 255) | ASLE_CBLV_VALID;
- drm_modeset_unlock(&dev_priv->drm.mode_config.connection_mutex);
+ drm_modeset_unlock(&display->drm->mode_config.connection_mutex);
return 0;
}
-static u32 asle_set_als_illum(struct drm_i915_private *dev_priv, u32 alsi)
+static u32 asle_set_als_illum(struct intel_display *display, u32 alsi)
{
/* alsi is the current ALS reading in lux. 0 indicates below sensor
range, 0xffff indicates above sensor range. 1-0xfffe are valid */
- drm_dbg(&dev_priv->drm, "Illum is not supported\n");
+ drm_dbg(display->drm, "Illum is not supported\n");
return ASLC_ALS_ILLUM_FAILED;
}
-static u32 asle_set_pwm_freq(struct drm_i915_private *dev_priv, u32 pfmb)
+static u32 asle_set_pwm_freq(struct intel_display *display, u32 pfmb)
{
- drm_dbg(&dev_priv->drm, "PWM freq is not supported\n");
+ drm_dbg(display->drm, "PWM freq is not supported\n");
return ASLC_PWM_FREQ_FAILED;
}
-static u32 asle_set_pfit(struct drm_i915_private *dev_priv, u32 pfit)
+static u32 asle_set_pfit(struct intel_display *display, u32 pfit)
{
/* Panel fitting is currently controlled by the X code, so this is a
noop until modesetting support works fully */
- drm_dbg(&dev_priv->drm, "Pfit is not supported\n");
+ drm_dbg(display->drm, "Pfit is not supported\n");
return ASLC_PFIT_FAILED;
}
-static u32 asle_set_supported_rotation_angles(struct drm_i915_private *dev_priv, u32 srot)
+static u32 asle_set_supported_rotation_angles(struct intel_display *display, u32 srot)
{
- drm_dbg(&dev_priv->drm, "SROT is not supported\n");
+ drm_dbg(display->drm, "SROT is not supported\n");
return ASLC_ROTATION_ANGLES_FAILED;
}
-static u32 asle_set_button_array(struct drm_i915_private *dev_priv, u32 iuer)
+static u32 asle_set_button_array(struct intel_display *display, u32 iuer)
{
if (!iuer)
- drm_dbg(&dev_priv->drm,
+ drm_dbg(display->drm,
"Button array event is not supported (nothing)\n");
if (iuer & ASLE_IUER_ROTATION_LOCK_BTN)
- drm_dbg(&dev_priv->drm,
+ drm_dbg(display->drm,
"Button array event is not supported (rotation lock)\n");
if (iuer & ASLE_IUER_VOLUME_DOWN_BTN)
- drm_dbg(&dev_priv->drm,
+ drm_dbg(display->drm,
"Button array event is not supported (volume down)\n");
if (iuer & ASLE_IUER_VOLUME_UP_BTN)
- drm_dbg(&dev_priv->drm,
+ drm_dbg(display->drm,
"Button array event is not supported (volume up)\n");
if (iuer & ASLE_IUER_WINDOWS_BTN)
- drm_dbg(&dev_priv->drm,
+ drm_dbg(display->drm,
"Button array event is not supported (windows)\n");
if (iuer & ASLE_IUER_POWER_BTN)
- drm_dbg(&dev_priv->drm,
+ drm_dbg(display->drm,
"Button array event is not supported (power)\n");
return ASLC_BUTTON_ARRAY_FAILED;
}
-static u32 asle_set_convertible(struct drm_i915_private *dev_priv, u32 iuer)
+static u32 asle_set_convertible(struct intel_display *display, u32 iuer)
{
if (iuer & ASLE_IUER_CONVERTIBLE)
- drm_dbg(&dev_priv->drm,
+ drm_dbg(display->drm,
"Convertible is not supported (clamshell)\n");
else
- drm_dbg(&dev_priv->drm,
+ drm_dbg(display->drm,
"Convertible is not supported (slate)\n");
return ASLC_CONVERTIBLE_FAILED;
}
-static u32 asle_set_docking(struct drm_i915_private *dev_priv, u32 iuer)
+static u32 asle_set_docking(struct intel_display *display, u32 iuer)
{
if (iuer & ASLE_IUER_DOCKING)
- drm_dbg(&dev_priv->drm, "Docking is not supported (docked)\n");
+ drm_dbg(display->drm, "Docking is not supported (docked)\n");
else
- drm_dbg(&dev_priv->drm,
+ drm_dbg(display->drm,
"Docking is not supported (undocked)\n");
return ASLC_DOCKING_FAILED;
}
-static u32 asle_isct_state(struct drm_i915_private *dev_priv)
+static u32 asle_isct_state(struct intel_display *display)
{
- drm_dbg(&dev_priv->drm, "ISCT is not supported\n");
+ drm_dbg(display->drm, "ISCT is not supported\n");
return ASLC_ISCT_STATE_FAILED;
}
@@ -609,7 +609,7 @@ static void asle_work(struct work_struct *work)
{
struct intel_opregion *opregion =
container_of(work, struct intel_opregion, asle_work);
- struct drm_i915_private *dev_priv = opregion->i915;
+ struct intel_display *display = opregion->display;
struct opregion_asle *asle = opregion->asle;
u32 aslc_stat = 0;
u32 aslc_req;
@@ -620,50 +620,51 @@ static void asle_work(struct work_struct *work)
aslc_req = asle->aslc;
if (!(aslc_req & ASLC_REQ_MSK)) {
- drm_dbg(&dev_priv->drm,
+ drm_dbg(display->drm,
"No request on ASLC interrupt 0x%08x\n", aslc_req);
return;
}
if (aslc_req & ASLC_SET_ALS_ILLUM)
- aslc_stat |= asle_set_als_illum(dev_priv, asle->alsi);
+ aslc_stat |= asle_set_als_illum(display, asle->alsi);
if (aslc_req & ASLC_SET_BACKLIGHT)
- aslc_stat |= asle_set_backlight(dev_priv, asle->bclp);
+ aslc_stat |= asle_set_backlight(display, asle->bclp);
if (aslc_req & ASLC_SET_PFIT)
- aslc_stat |= asle_set_pfit(dev_priv, asle->pfit);
+ aslc_stat |= asle_set_pfit(display, asle->pfit);
if (aslc_req & ASLC_SET_PWM_FREQ)
- aslc_stat |= asle_set_pwm_freq(dev_priv, asle->pfmb);
+ aslc_stat |= asle_set_pwm_freq(display, asle->pfmb);
if (aslc_req & ASLC_SUPPORTED_ROTATION_ANGLES)
- aslc_stat |= asle_set_supported_rotation_angles(dev_priv,
+ aslc_stat |= asle_set_supported_rotation_angles(display,
asle->srot);
if (aslc_req & ASLC_BUTTON_ARRAY)
- aslc_stat |= asle_set_button_array(dev_priv, asle->iuer);
+ aslc_stat |= asle_set_button_array(display, asle->iuer);
if (aslc_req & ASLC_CONVERTIBLE_INDICATOR)
- aslc_stat |= asle_set_convertible(dev_priv, asle->iuer);
+ aslc_stat |= asle_set_convertible(display, asle->iuer);
if (aslc_req & ASLC_DOCKING_INDICATOR)
- aslc_stat |= asle_set_docking(dev_priv, asle->iuer);
+ aslc_stat |= asle_set_docking(display, asle->iuer);
if (aslc_req & ASLC_ISCT_STATE_CHANGE)
- aslc_stat |= asle_isct_state(dev_priv);
+ aslc_stat |= asle_isct_state(display);
asle->aslc = aslc_stat;
}
-bool intel_opregion_asle_present(struct drm_i915_private *i915)
+bool intel_opregion_asle_present(struct intel_display *display)
{
- return i915->display.opregion && i915->display.opregion->asle;
+ return display->opregion && display->opregion->asle;
}
-void intel_opregion_asle_intr(struct drm_i915_private *i915)
+void intel_opregion_asle_intr(struct intel_display *display)
{
- struct intel_opregion *opregion = i915->display.opregion;
+ struct drm_i915_private *i915 = to_i915(display->drm);
+ struct intel_opregion *opregion = display->opregion;
if (opregion && opregion->asle)
queue_work(i915->unordered_wq, &opregion->asle_work);
@@ -720,9 +721,9 @@ static void set_did(struct intel_opregion *opregion, int i, u32 val)
}
}
-static void intel_didl_outputs(struct drm_i915_private *dev_priv)
+static void intel_didl_outputs(struct intel_display *display)
{
- struct intel_opregion *opregion = dev_priv->display.opregion;
+ struct intel_opregion *opregion = display->opregion;
struct intel_connector *connector;
struct drm_connector_list_iter conn_iter;
int i = 0, max_outputs;
@@ -737,9 +738,9 @@ static void intel_didl_outputs(struct drm_i915_private *dev_priv)
max_outputs = ARRAY_SIZE(opregion->acpi->didl) +
ARRAY_SIZE(opregion->acpi->did2);
- intel_acpi_device_id_update(dev_priv);
+ intel_acpi_device_id_update(display);
- drm_connector_list_iter_begin(&dev_priv->drm, &conn_iter);
+ drm_connector_list_iter_begin(display->drm, &conn_iter);
for_each_intel_connector_iter(connector, &conn_iter) {
if (i < max_outputs)
set_did(opregion, i, connector->acpi_device_id);
@@ -747,10 +748,10 @@ static void intel_didl_outputs(struct drm_i915_private *dev_priv)
}
drm_connector_list_iter_end(&conn_iter);
- drm_dbg_kms(&dev_priv->drm, "%d outputs detected\n", i);
+ drm_dbg_kms(display->drm, "%d outputs detected\n", i);
if (i > max_outputs)
- drm_err(&dev_priv->drm,
+ drm_err(display->drm,
"More than %d outputs in connector list\n",
max_outputs);
@@ -759,9 +760,9 @@ static void intel_didl_outputs(struct drm_i915_private *dev_priv)
set_did(opregion, i, 0);
}
-static void intel_setup_cadls(struct drm_i915_private *dev_priv)
+static void intel_setup_cadls(struct intel_display *display)
{
- struct intel_opregion *opregion = dev_priv->display.opregion;
+ struct intel_opregion *opregion = display->opregion;
struct intel_connector *connector;
struct drm_connector_list_iter conn_iter;
int i = 0;
@@ -776,7 +777,7 @@ static void intel_setup_cadls(struct drm_i915_private *dev_priv)
* Note that internal panels should be at the front of the connector
* list already, ensuring they're not left out.
*/
- drm_connector_list_iter_begin(&dev_priv->drm, &conn_iter);
+ drm_connector_list_iter_begin(display->drm, &conn_iter);
for_each_intel_connector_iter(connector, &conn_iter) {
if (i >= ARRAY_SIZE(opregion->acpi->cadl))
break;
@@ -789,9 +790,9 @@ static void intel_setup_cadls(struct drm_i915_private *dev_priv)
opregion->acpi->cadl[i] = 0;
}
-static void swsci_setup(struct drm_i915_private *dev_priv)
+static void swsci_setup(struct intel_display *display)
{
- struct intel_opregion *opregion = dev_priv->display.opregion;
+ struct intel_opregion *opregion = display->opregion;
bool requested_callbacks = false;
u32 tmp;
@@ -800,7 +801,7 @@ static void swsci_setup(struct drm_i915_private *dev_priv)
opregion->swsci_sbcb_sub_functions = 1;
/* We use GBDA to ask for supported GBDA calls. */
- if (swsci(dev_priv, SWSCI_GBDA_SUPPORTED_CALLS, 0, &tmp) == 0) {
+ if (swsci(display, SWSCI_GBDA_SUPPORTED_CALLS, 0, &tmp) == 0) {
/* make the bits match the sub-function codes */
tmp <<= 1;
opregion->swsci_gbda_sub_functions |= tmp;
@@ -811,7 +812,7 @@ static void swsci_setup(struct drm_i915_private *dev_priv)
* must not call interfaces that are not specifically requested by the
* bios.
*/
- if (swsci(dev_priv, SWSCI_GBDA_REQUESTED_CALLBACKS, 0, &tmp) == 0) {
+ if (swsci(display, SWSCI_GBDA_REQUESTED_CALLBACKS, 0, &tmp) == 0) {
/* here, the bits already match sub-function codes */
opregion->swsci_sbcb_sub_functions |= tmp;
requested_callbacks = true;
@@ -822,7 +823,7 @@ static void swsci_setup(struct drm_i915_private *dev_priv)
* the callback is _requested_. But we still can't call interfaces that
* are not requested.
*/
- if (swsci(dev_priv, SWSCI_SBCB_SUPPORTED_CALLBACKS, 0, &tmp) == 0) {
+ if (swsci(display, SWSCI_SBCB_SUPPORTED_CALLBACKS, 0, &tmp) == 0) {
/* make the bits match the sub-function codes */
u32 low = tmp & 0x7ff;
u32 high = tmp & ~0xfff; /* bit 11 is reserved */
@@ -832,7 +833,7 @@ static void swsci_setup(struct drm_i915_private *dev_priv)
if (requested_callbacks) {
u32 req = opregion->swsci_sbcb_sub_functions;
if ((req & tmp) != req)
- drm_dbg(&dev_priv->drm,
+ drm_dbg(display->drm,
"SWSCI BIOS requested (%08x) SBCB callbacks that are not supported (%08x)\n",
req, tmp);
/* XXX: for now, trust the requested callbacks */
@@ -842,7 +843,7 @@ static void swsci_setup(struct drm_i915_private *dev_priv)
}
}
- drm_dbg(&dev_priv->drm,
+ drm_dbg(display->drm,
"SWSCI GBDA callbacks %08x, SBCB callbacks %08x\n",
opregion->swsci_gbda_sub_functions,
opregion->swsci_sbcb_sub_functions);
@@ -867,10 +868,10 @@ static const struct dmi_system_id intel_no_opregion_vbt[] = {
{ }
};
-int intel_opregion_setup(struct drm_i915_private *dev_priv)
+int intel_opregion_setup(struct intel_display *display)
{
struct intel_opregion *opregion;
- struct pci_dev *pdev = to_pci_dev(dev_priv->drm.dev);
+ struct pci_dev *pdev = to_pci_dev(display->drm->dev);
u32 asls, mboxes;
char buf[sizeof(OPREGION_SIGNATURE)];
int err = 0;
@@ -885,10 +886,10 @@ int intel_opregion_setup(struct drm_i915_private *dev_priv)
BUILD_BUG_ON(sizeof(struct opregion_asle_ext) != 0x400);
pci_read_config_dword(pdev, ASLS, &asls);
- drm_dbg(&dev_priv->drm, "graphic opregion physical addr: 0x%x\n",
+ drm_dbg(display->drm, "graphic opregion physical addr: 0x%x\n",
asls);
if (asls == 0) {
- drm_dbg(&dev_priv->drm, "ACPI OpRegion not supported!\n");
+ drm_dbg(display->drm, "ACPI OpRegion not supported!\n");
return -ENOTSUPP;
}
@@ -896,8 +897,8 @@ int intel_opregion_setup(struct drm_i915_private *dev_priv)
if (!opregion)
return -ENOMEM;
- opregion->i915 = dev_priv;
- dev_priv->display.opregion = opregion;
+ opregion->display = display;
+ display->opregion = opregion;
INIT_WORK(&opregion->asle_work, asle_work);
@@ -910,20 +911,20 @@ int intel_opregion_setup(struct drm_i915_private *dev_priv)
memcpy(buf, base, sizeof(buf));
if (memcmp(buf, OPREGION_SIGNATURE, 16)) {
- drm_dbg(&dev_priv->drm, "opregion signature mismatch\n");
+ drm_dbg(display->drm, "opregion signature mismatch\n");
err = -EINVAL;
goto err_out;
}
opregion->header = base;
- drm_dbg(&dev_priv->drm, "ACPI OpRegion version %u.%u.%u\n",
+ drm_dbg(display->drm, "ACPI OpRegion version %u.%u.%u\n",
opregion->header->over.major,
opregion->header->over.minor,
opregion->header->over.revision);
mboxes = opregion->header->mboxes;
if (mboxes & MBOX_ACPI) {
- drm_dbg(&dev_priv->drm, "Public ACPI methods supported\n");
+ drm_dbg(display->drm, "Public ACPI methods supported\n");
opregion->acpi = base + OPREGION_ACPI_OFFSET;
/*
* Indicate we handle monitor hotplug events ourselves so we do
@@ -938,30 +939,30 @@ int intel_opregion_setup(struct drm_i915_private *dev_priv)
u8 major = opregion->header->over.major;
if (major >= 3) {
- drm_err(&dev_priv->drm, "SWSCI Mailbox #2 present for opregion v3.x, ignoring\n");
+ drm_err(display->drm, "SWSCI Mailbox #2 present for opregion v3.x, ignoring\n");
} else {
if (major >= 2)
- drm_dbg(&dev_priv->drm, "SWSCI Mailbox #2 present for opregion v2.x\n");
- drm_dbg(&dev_priv->drm, "SWSCI supported\n");
+ drm_dbg(display->drm, "SWSCI Mailbox #2 present for opregion v2.x\n");
+ drm_dbg(display->drm, "SWSCI supported\n");
opregion->swsci = base + OPREGION_SWSCI_OFFSET;
- swsci_setup(dev_priv);
+ swsci_setup(display);
}
}
if (mboxes & MBOX_ASLE) {
- drm_dbg(&dev_priv->drm, "ASLE supported\n");
+ drm_dbg(display->drm, "ASLE supported\n");
opregion->asle = base + OPREGION_ASLE_OFFSET;
opregion->asle->ardy = ASLE_ARDY_NOT_READY;
}
if (mboxes & MBOX_ASLE_EXT) {
- drm_dbg(&dev_priv->drm, "ASLE extension supported\n");
+ drm_dbg(display->drm, "ASLE extension supported\n");
opregion->asle_ext = base + OPREGION_ASLE_EXT_OFFSET;
}
if (mboxes & MBOX_BACKLIGHT) {
- drm_dbg(&dev_priv->drm, "Mailbox #2 for backlight present\n");
+ drm_dbg(display->drm, "Mailbox #2 for backlight present\n");
}
if (dmi_check_system(intel_no_opregion_vbt))
@@ -979,7 +980,7 @@ int intel_opregion_setup(struct drm_i915_private *dev_priv)
*/
if (opregion->header->over.major > 2 ||
opregion->header->over.minor >= 1) {
- drm_WARN_ON(&dev_priv->drm, rvda < OPREGION_SIZE);
+ drm_WARN_ON(display->drm, rvda < OPREGION_SIZE);
rvda += asls;
}
@@ -989,14 +990,14 @@ int intel_opregion_setup(struct drm_i915_private *dev_priv)
vbt = opregion->rvda;
vbt_size = opregion->asle->rvds;
- if (intel_bios_is_valid_vbt(dev_priv, vbt, vbt_size)) {
- drm_dbg_kms(&dev_priv->drm,
+ if (intel_bios_is_valid_vbt(display, vbt, vbt_size)) {
+ drm_dbg_kms(display->drm,
"Found valid VBT in ACPI OpRegion (RVDA)\n");
opregion->vbt = vbt;
opregion->vbt_size = vbt_size;
goto out;
} else {
- drm_dbg_kms(&dev_priv->drm,
+ drm_dbg_kms(display->drm,
"Invalid VBT in ACPI OpRegion (RVDA)\n");
memunmap(opregion->rvda);
opregion->rvda = NULL;
@@ -1014,13 +1015,13 @@ int intel_opregion_setup(struct drm_i915_private *dev_priv)
vbt_size = (mboxes & MBOX_ASLE_EXT) ?
OPREGION_ASLE_EXT_OFFSET : OPREGION_SIZE;
vbt_size -= OPREGION_VBT_OFFSET;
- if (intel_bios_is_valid_vbt(dev_priv, vbt, vbt_size)) {
- drm_dbg_kms(&dev_priv->drm,
+ if (intel_bios_is_valid_vbt(display, vbt, vbt_size)) {
+ drm_dbg_kms(display->drm,
"Found valid VBT in ACPI OpRegion (Mailbox #4)\n");
opregion->vbt = vbt;
opregion->vbt_size = vbt_size;
} else {
- drm_dbg_kms(&dev_priv->drm,
+ drm_dbg_kms(display->drm,
"Invalid VBT in ACPI OpRegion (Mailbox #4)\n");
}
@@ -1031,7 +1032,7 @@ err_out:
memunmap(base);
err_memremap:
kfree(opregion);
- dev_priv->display.opregion = NULL;
+ display->opregion = NULL;
return err;
}
@@ -1054,25 +1055,25 @@ static const struct dmi_system_id intel_use_opregion_panel_type[] = {
};
int
-intel_opregion_get_panel_type(struct drm_i915_private *dev_priv)
+intel_opregion_get_panel_type(struct intel_display *display)
{
u32 panel_details;
int ret;
- ret = swsci(dev_priv, SWSCI_GBDA_PANEL_DETAILS, 0x0, &panel_details);
+ ret = swsci(display, SWSCI_GBDA_PANEL_DETAILS, 0x0, &panel_details);
if (ret)
return ret;
ret = (panel_details >> 8) & 0xff;
if (ret > 0x10) {
- drm_dbg_kms(&dev_priv->drm,
+ drm_dbg_kms(display->drm,
"Invalid OpRegion panel type 0x%x\n", ret);
return -EINVAL;
}
/* fall back to VBT panel type? */
if (ret == 0x0) {
- drm_dbg_kms(&dev_priv->drm, "No panel type in OpRegion\n");
+ drm_dbg_kms(display->drm, "No panel type in OpRegion\n");
return -ENODEV;
}
@@ -1082,7 +1083,7 @@ intel_opregion_get_panel_type(struct drm_i915_private *dev_priv)
* via a quirk list :(
*/
if (!dmi_check_system(intel_use_opregion_panel_type)) {
- drm_dbg_kms(&dev_priv->drm,
+ drm_dbg_kms(display->drm,
"Ignoring OpRegion panel type (%d)\n", ret - 1);
return -ENODEV;
}
@@ -1092,7 +1093,7 @@ intel_opregion_get_panel_type(struct drm_i915_private *dev_priv)
/**
* intel_opregion_get_edid - Fetch EDID from ACPI OpRegion mailbox #5
- * @intel_connector: eDP connector
+ * @connector: eDP connector
*
* This reads the ACPI Opregion mailbox #5 to extract the EDID that is passed
* to it.
@@ -1101,11 +1102,10 @@ intel_opregion_get_panel_type(struct drm_i915_private *dev_priv)
* The EDID in the OpRegion, or NULL if there is none or it's invalid.
*
*/
-const struct drm_edid *intel_opregion_get_edid(struct intel_connector *intel_connector)
+const struct drm_edid *intel_opregion_get_edid(struct intel_connector *connector)
{
- struct drm_connector *connector = &intel_connector->base;
- struct drm_i915_private *i915 = to_i915(connector->dev);
- struct intel_opregion *opregion = i915->display.opregion;
+ struct intel_display *display = to_intel_display(connector);
+ struct intel_opregion *opregion = display->opregion;
const struct drm_edid *drm_edid;
const void *edid;
int len;
@@ -1117,13 +1117,13 @@ const struct drm_edid *intel_opregion_get_edid(struct intel_connector *intel_con
/* Validity corresponds to number of 128-byte blocks */
len = (opregion->asle_ext->phed & ASLE_PHED_EDID_VALID_MASK) * 128;
- if (!len || !memchr_inv(edid, 0, len))
+ if (!len || mem_is_zero(edid, len))
return NULL;
drm_edid = drm_edid_alloc(edid, len);
if (!drm_edid_valid(drm_edid)) {
- drm_dbg_kms(&i915->drm, "Invalid EDID in ACPI OpRegion (Mailbox #5)\n");
+ drm_dbg_kms(display->drm, "Invalid EDID in ACPI OpRegion (Mailbox #5)\n");
drm_edid_free(drm_edid);
drm_edid = NULL;
}
@@ -1131,9 +1131,9 @@ const struct drm_edid *intel_opregion_get_edid(struct intel_connector *intel_con
return drm_edid;
}
-bool intel_opregion_vbt_present(struct drm_i915_private *i915)
+bool intel_opregion_vbt_present(struct intel_display *display)
{
- struct intel_opregion *opregion = i915->display.opregion;
+ struct intel_opregion *opregion = display->opregion;
if (!opregion || !opregion->vbt)
return false;
@@ -1141,9 +1141,9 @@ bool intel_opregion_vbt_present(struct drm_i915_private *i915)
return true;
}
-const void *intel_opregion_get_vbt(struct drm_i915_private *i915, size_t *size)
+const void *intel_opregion_get_vbt(struct intel_display *display, size_t *size)
{
- struct intel_opregion *opregion = i915->display.opregion;
+ struct intel_opregion *opregion = display->opregion;
if (!opregion || !opregion->vbt)
return NULL;
@@ -1154,9 +1154,9 @@ const void *intel_opregion_get_vbt(struct drm_i915_private *i915, size_t *size)
return kmemdup(opregion->vbt, opregion->vbt_size, GFP_KERNEL);
}
-bool intel_opregion_headless_sku(struct drm_i915_private *i915)
+bool intel_opregion_headless_sku(struct intel_display *display)
{
- struct intel_opregion *opregion = i915->display.opregion;
+ struct intel_opregion *opregion = display->opregion;
struct opregion_header *header;
if (!opregion)
@@ -1171,9 +1171,9 @@ bool intel_opregion_headless_sku(struct drm_i915_private *i915)
return opregion->header->pcon & PCON_HEADLESS_SKU;
}
-void intel_opregion_register(struct drm_i915_private *i915)
+void intel_opregion_register(struct intel_display *display)
{
- struct intel_opregion *opregion = i915->display.opregion;
+ struct intel_opregion *opregion = display->opregion;
if (!opregion)
return;
@@ -1184,16 +1184,16 @@ void intel_opregion_register(struct drm_i915_private *i915)
register_acpi_notifier(&opregion->acpi_notifier);
}
- intel_opregion_resume(i915);
+ intel_opregion_resume(display);
}
-static void intel_opregion_resume_display(struct drm_i915_private *i915)
+static void intel_opregion_resume_display(struct intel_display *display)
{
- struct intel_opregion *opregion = i915->display.opregion;
+ struct intel_opregion *opregion = display->opregion;
if (opregion->acpi) {
- intel_didl_outputs(i915);
- intel_setup_cadls(i915);
+ intel_didl_outputs(display);
+ intel_setup_cadls(display);
/*
* Notify BIOS we are ready to handle ACPI video ext notifs.
@@ -1210,25 +1210,25 @@ static void intel_opregion_resume_display(struct drm_i915_private *i915)
}
/* Some platforms abuse the _DSM to enable MUX */
- intel_dsm_get_bios_data_funcs_supported(i915);
+ intel_dsm_get_bios_data_funcs_supported(display);
}
-void intel_opregion_resume(struct drm_i915_private *i915)
+void intel_opregion_resume(struct intel_display *display)
{
- struct intel_opregion *opregion = i915->display.opregion;
+ struct intel_opregion *opregion = display->opregion;
if (!opregion)
return;
- if (HAS_DISPLAY(i915))
- intel_opregion_resume_display(i915);
+ if (HAS_DISPLAY(display))
+ intel_opregion_resume_display(display);
- intel_opregion_notify_adapter(i915, PCI_D0);
+ intel_opregion_notify_adapter(display, PCI_D0);
}
-static void intel_opregion_suspend_display(struct drm_i915_private *i915)
+static void intel_opregion_suspend_display(struct intel_display *display)
{
- struct intel_opregion *opregion = i915->display.opregion;
+ struct intel_opregion *opregion = display->opregion;
if (opregion->asle)
opregion->asle->ardy = ASLE_ARDY_NOT_READY;
@@ -1239,24 +1239,24 @@ static void intel_opregion_suspend_display(struct drm_i915_private *i915)
opregion->acpi->drdy = 0;
}
-void intel_opregion_suspend(struct drm_i915_private *i915, pci_power_t state)
+void intel_opregion_suspend(struct intel_display *display, pci_power_t state)
{
- struct intel_opregion *opregion = i915->display.opregion;
+ struct intel_opregion *opregion = display->opregion;
if (!opregion)
return;
- intel_opregion_notify_adapter(i915, state);
+ intel_opregion_notify_adapter(display, state);
- if (HAS_DISPLAY(i915))
- intel_opregion_suspend_display(i915);
+ if (HAS_DISPLAY(display))
+ intel_opregion_suspend_display(display);
}
-void intel_opregion_unregister(struct drm_i915_private *i915)
+void intel_opregion_unregister(struct intel_display *display)
{
- struct intel_opregion *opregion = i915->display.opregion;
+ struct intel_opregion *opregion = display->opregion;
- intel_opregion_suspend(i915, PCI_D1);
+ intel_opregion_suspend(display, PCI_D1);
if (!opregion)
return;
@@ -1267,9 +1267,9 @@ void intel_opregion_unregister(struct drm_i915_private *i915)
}
}
-void intel_opregion_cleanup(struct drm_i915_private *i915)
+void intel_opregion_cleanup(struct intel_display *display)
{
- struct intel_opregion *opregion = i915->display.opregion;
+ struct intel_opregion *opregion = display->opregion;
if (!opregion)
return;
@@ -1278,13 +1278,13 @@ void intel_opregion_cleanup(struct drm_i915_private *i915)
if (opregion->rvda)
memunmap(opregion->rvda);
kfree(opregion);
- i915->display.opregion = NULL;
+ display->opregion = NULL;
}
static int intel_opregion_show(struct seq_file *m, void *unused)
{
- struct drm_i915_private *i915 = m->private;
- struct intel_opregion *opregion = i915->display.opregion;
+ struct intel_display *display = m->private;
+ struct intel_opregion *opregion = display->opregion;
if (opregion)
seq_write(m, opregion->header, OPREGION_SIZE);
@@ -1294,10 +1294,10 @@ static int intel_opregion_show(struct seq_file *m, void *unused)
DEFINE_SHOW_ATTRIBUTE(intel_opregion);
-void intel_opregion_debugfs_register(struct drm_i915_private *i915)
+void intel_opregion_debugfs_register(struct intel_display *display)
{
- struct drm_minor *minor = i915->drm.primary;
+ struct drm_minor *minor = display->drm->primary;
debugfs_create_file("i915_opregion", 0444, minor->debugfs_root,
- i915, &intel_opregion_fops);
+ display, &intel_opregion_fops);
}
diff --git a/drivers/gpu/drm/i915/display/intel_opregion.h b/drivers/gpu/drm/i915/display/intel_opregion.h
index 4b2b8e752632..8101eeebfd8b 100644
--- a/drivers/gpu/drm/i915/display/intel_opregion.h
+++ b/drivers/gpu/drm/i915/display/intel_opregion.h
@@ -28,88 +28,88 @@
#include <linux/pci.h>
#include <linux/types.h>
-struct drm_i915_private;
struct intel_connector;
+struct intel_display;
struct intel_encoder;
#ifdef CONFIG_ACPI
-int intel_opregion_setup(struct drm_i915_private *dev_priv);
-void intel_opregion_cleanup(struct drm_i915_private *i915);
+int intel_opregion_setup(struct intel_display *display);
+void intel_opregion_cleanup(struct intel_display *display);
-void intel_opregion_register(struct drm_i915_private *dev_priv);
-void intel_opregion_unregister(struct drm_i915_private *dev_priv);
+void intel_opregion_register(struct intel_display *display);
+void intel_opregion_unregister(struct intel_display *display);
-void intel_opregion_resume(struct drm_i915_private *dev_priv);
-void intel_opregion_suspend(struct drm_i915_private *dev_priv,
+void intel_opregion_resume(struct intel_display *display);
+void intel_opregion_suspend(struct intel_display *display,
pci_power_t state);
-bool intel_opregion_asle_present(struct drm_i915_private *i915);
-void intel_opregion_asle_intr(struct drm_i915_private *dev_priv);
-int intel_opregion_notify_encoder(struct intel_encoder *intel_encoder,
+bool intel_opregion_asle_present(struct intel_display *display);
+void intel_opregion_asle_intr(struct intel_display *display);
+int intel_opregion_notify_encoder(struct intel_encoder *encoder,
bool enable);
-int intel_opregion_notify_adapter(struct drm_i915_private *dev_priv,
+int intel_opregion_notify_adapter(struct intel_display *display,
pci_power_t state);
-int intel_opregion_get_panel_type(struct drm_i915_private *dev_priv);
+int intel_opregion_get_panel_type(struct intel_display *display);
const struct drm_edid *intel_opregion_get_edid(struct intel_connector *connector);
-bool intel_opregion_vbt_present(struct drm_i915_private *i915);
-const void *intel_opregion_get_vbt(struct drm_i915_private *i915, size_t *size);
+bool intel_opregion_vbt_present(struct intel_display *display);
+const void *intel_opregion_get_vbt(struct intel_display *display, size_t *size);
-bool intel_opregion_headless_sku(struct drm_i915_private *i915);
+bool intel_opregion_headless_sku(struct intel_display *display);
-void intel_opregion_debugfs_register(struct drm_i915_private *i915);
+void intel_opregion_debugfs_register(struct intel_display *display);
#else /* CONFIG_ACPI*/
-static inline int intel_opregion_setup(struct drm_i915_private *dev_priv)
+static inline int intel_opregion_setup(struct intel_display *display)
{
return 0;
}
-static inline void intel_opregion_cleanup(struct drm_i915_private *i915)
+static inline void intel_opregion_cleanup(struct intel_display *display)
{
}
-static inline void intel_opregion_register(struct drm_i915_private *dev_priv)
+static inline void intel_opregion_register(struct intel_display *display)
{
}
-static inline void intel_opregion_unregister(struct drm_i915_private *dev_priv)
+static inline void intel_opregion_unregister(struct intel_display *display)
{
}
-static inline void intel_opregion_resume(struct drm_i915_private *dev_priv)
+static inline void intel_opregion_resume(struct intel_display *display)
{
}
-static inline void intel_opregion_suspend(struct drm_i915_private *dev_priv,
+static inline void intel_opregion_suspend(struct intel_display *display,
pci_power_t state)
{
}
-static inline bool intel_opregion_asle_present(struct drm_i915_private *i915)
+static inline bool intel_opregion_asle_present(struct intel_display *display)
{
return false;
}
-static inline void intel_opregion_asle_intr(struct drm_i915_private *dev_priv)
+static inline void intel_opregion_asle_intr(struct intel_display *display)
{
}
static inline int
-intel_opregion_notify_encoder(struct intel_encoder *intel_encoder, bool enable)
+intel_opregion_notify_encoder(struct intel_encoder *encoder, bool enable)
{
return 0;
}
static inline int
-intel_opregion_notify_adapter(struct drm_i915_private *dev, pci_power_t state)
+intel_opregion_notify_adapter(struct intel_display *display, pci_power_t state)
{
return 0;
}
-static inline int intel_opregion_get_panel_type(struct drm_i915_private *dev)
+static inline int intel_opregion_get_panel_type(struct intel_display *display)
{
return -ENODEV;
}
@@ -120,23 +120,23 @@ intel_opregion_get_edid(struct intel_connector *connector)
return NULL;
}
-static inline bool intel_opregion_vbt_present(struct drm_i915_private *i915)
+static inline bool intel_opregion_vbt_present(struct intel_display *display)
{
return false;
}
static inline const void *
-intel_opregion_get_vbt(struct drm_i915_private *i915, size_t *size)
+intel_opregion_get_vbt(struct intel_display *display, size_t *size)
{
return NULL;
}
-static inline bool intel_opregion_headless_sku(struct drm_i915_private *i915)
+static inline bool intel_opregion_headless_sku(struct intel_display *display)
{
return false;
}
-static inline void intel_opregion_debugfs_register(struct drm_i915_private *i915)
+static inline void intel_opregion_debugfs_register(struct intel_display *display)
{
}
diff --git a/drivers/gpu/drm/i915/display/intel_pch_display.c b/drivers/gpu/drm/i915/display/intel_pch_display.c
index 0d48b9bec29c..f13ab680c2cf 100644
--- a/drivers/gpu/drm/i915/display/intel_pch_display.c
+++ b/drivers/gpu/drm/i915/display/intel_pch_display.c
@@ -358,6 +358,7 @@ void ilk_pch_pre_enable(struct intel_atomic_state *state,
void ilk_pch_enable(struct intel_atomic_state *state,
struct intel_crtc *crtc)
{
+ struct intel_display *display = to_intel_display(state);
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
const struct intel_crtc_state *crtc_state =
intel_atomic_get_new_crtc_state(state, crtc);
@@ -399,7 +400,7 @@ void ilk_pch_enable(struct intel_atomic_state *state,
intel_enable_shared_dpll(crtc_state);
/* set transcoder timing, panel must allow it */
- assert_pps_unlocked(dev_priv, pipe);
+ assert_pps_unlocked(display, pipe);
if (intel_crtc_has_dp_encoder(crtc_state)) {
intel_pch_transcoder_set_m1_n1(crtc, &crtc_state->dp_m_n);
intel_pch_transcoder_set_m2_n2(crtc, &crtc_state->dp_m2_n2);
diff --git a/drivers/gpu/drm/i915/display/intel_pmdemand.c b/drivers/gpu/drm/i915/display/intel_pmdemand.c
index 9ca981b7a12c..ceaf9e3147da 100644
--- a/drivers/gpu/drm/i915/display/intel_pmdemand.c
+++ b/drivers/gpu/drm/i915/display/intel_pmdemand.c
@@ -92,7 +92,7 @@ int intel_pmdemand_init(struct drm_i915_private *i915)
&pmdemand_state->base,
&intel_pmdemand_funcs);
- if (IS_DISPLAY_IP_STEP(i915, IP_VER(14, 0), STEP_A0, STEP_C0))
+ if (IS_DISPLAY_VER_STEP(i915, IP_VER(14, 0), STEP_A0, STEP_C0))
/* Wa_14016740474 */
intel_de_rmw(i915, XELPD_CHICKEN_DCPR_3, 0, DMD_RSP_TIMEOUT_DISABLE);
diff --git a/drivers/gpu/drm/i915/display/intel_pps.c b/drivers/gpu/drm/i915/display/intel_pps.c
index 7ce926241e83..feddc30e3375 100644
--- a/drivers/gpu/drm/i915/display/intel_pps.c
+++ b/drivers/gpu/drm/i915/display/intel_pps.c
@@ -18,15 +18,18 @@
#include "intel_pps_regs.h"
#include "intel_quirks.h"
-static void vlv_steal_power_sequencer(struct drm_i915_private *dev_priv,
+static void vlv_steal_power_sequencer(struct intel_display *display,
enum pipe pipe);
static void pps_init_delays(struct intel_dp *intel_dp);
static void pps_init_registers(struct intel_dp *intel_dp, bool force_disable_vdd);
-static const char *pps_name(struct drm_i915_private *i915,
- struct intel_pps *pps)
+static const char *pps_name(struct intel_dp *intel_dp)
{
+ struct intel_display *display = to_intel_display(intel_dp);
+ struct drm_i915_private *i915 = to_i915(display->drm);
+ struct intel_pps *pps = &intel_dp->pps;
+
if (IS_VALLEYVIEW(i915) || IS_CHERRYVIEW(i915)) {
switch (pps->pps_pipe) {
case INVALID_PIPE:
@@ -60,14 +63,15 @@ static const char *pps_name(struct drm_i915_private *i915,
intel_wakeref_t intel_pps_lock(struct intel_dp *intel_dp)
{
- struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
+ struct intel_display *display = to_intel_display(intel_dp);
+ struct drm_i915_private *dev_priv = to_i915(display->drm);
intel_wakeref_t wakeref;
/*
* See intel_pps_reset_all() why we need a power domain reference here.
*/
wakeref = intel_display_power_get(dev_priv, POWER_DOMAIN_DISPLAY_CORE);
- mutex_lock(&dev_priv->display.pps.mutex);
+ mutex_lock(&display->pps.mutex);
return wakeref;
}
@@ -75,9 +79,10 @@ intel_wakeref_t intel_pps_lock(struct intel_dp *intel_dp)
intel_wakeref_t intel_pps_unlock(struct intel_dp *intel_dp,
intel_wakeref_t wakeref)
{
- struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
+ struct intel_display *display = to_intel_display(intel_dp);
+ struct drm_i915_private *dev_priv = to_i915(display->drm);
- mutex_unlock(&dev_priv->display.pps.mutex);
+ mutex_unlock(&display->pps.mutex);
intel_display_power_put(dev_priv, POWER_DOMAIN_DISPLAY_CORE, wakeref);
return 0;
@@ -86,7 +91,8 @@ intel_wakeref_t intel_pps_unlock(struct intel_dp *intel_dp,
static void
vlv_power_sequencer_kick(struct intel_dp *intel_dp)
{
- struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
+ struct intel_display *display = to_intel_display(intel_dp);
+ struct drm_i915_private *dev_priv = to_i915(display->drm);
struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
enum pipe pipe = intel_dp->pps.pps_pipe;
bool pll_enabled, release_cl_override = false;
@@ -94,22 +100,22 @@ vlv_power_sequencer_kick(struct intel_dp *intel_dp)
enum dpio_channel ch = vlv_pipe_to_channel(pipe);
u32 DP;
- if (drm_WARN(&dev_priv->drm,
- intel_de_read(dev_priv, intel_dp->output_reg) & DP_PORT_EN,
+ if (drm_WARN(display->drm,
+ intel_de_read(display, intel_dp->output_reg) & DP_PORT_EN,
"skipping %s kick due to [ENCODER:%d:%s] being active\n",
- pps_name(dev_priv, &intel_dp->pps),
+ pps_name(intel_dp),
dig_port->base.base.base.id, dig_port->base.base.name))
return;
- drm_dbg_kms(&dev_priv->drm,
+ drm_dbg_kms(display->drm,
"kicking %s for [ENCODER:%d:%s]\n",
- pps_name(dev_priv, &intel_dp->pps),
+ pps_name(intel_dp),
dig_port->base.base.base.id, dig_port->base.base.name);
/* Preserve the BIOS-computed detected bit. This is
* supposed to be read-only.
*/
- DP = intel_de_read(dev_priv, intel_dp->output_reg) & DP_DETECTED;
+ DP = intel_de_read(display, intel_dp->output_reg) & DP_DETECTED;
DP |= DP_VOLTAGE_0_4 | DP_PRE_EMPHASIS_0;
DP |= DP_PORT_WIDTH(1);
DP |= DP_LINK_TRAIN_PAT_1;
@@ -119,7 +125,7 @@ vlv_power_sequencer_kick(struct intel_dp *intel_dp)
else
DP |= DP_PIPE_SEL(pipe);
- pll_enabled = intel_de_read(dev_priv, DPLL(dev_priv, pipe)) & DPLL_VCO_ENABLE;
+ pll_enabled = intel_de_read(display, DPLL(display, pipe)) & DPLL_VCO_ENABLE;
/*
* The DPLL for the pipe must be enabled for this to work.
@@ -130,7 +136,7 @@ vlv_power_sequencer_kick(struct intel_dp *intel_dp)
!chv_phy_powergate_ch(dev_priv, phy, ch, true);
if (vlv_force_pll_on(dev_priv, pipe, vlv_get_dpll(dev_priv))) {
- drm_err(&dev_priv->drm,
+ drm_err(display->drm,
"Failed to force on PLL for pipe %c!\n",
pipe_name(pipe));
return;
@@ -143,14 +149,14 @@ vlv_power_sequencer_kick(struct intel_dp *intel_dp)
* to make this power sequencer lock onto the port.
* Otherwise even VDD force bit won't work.
*/
- intel_de_write(dev_priv, intel_dp->output_reg, DP);
- intel_de_posting_read(dev_priv, intel_dp->output_reg);
+ intel_de_write(display, intel_dp->output_reg, DP);
+ intel_de_posting_read(display, intel_dp->output_reg);
- intel_de_write(dev_priv, intel_dp->output_reg, DP | DP_PORT_EN);
- intel_de_posting_read(dev_priv, intel_dp->output_reg);
+ intel_de_write(display, intel_dp->output_reg, DP | DP_PORT_EN);
+ intel_de_posting_read(display, intel_dp->output_reg);
- intel_de_write(dev_priv, intel_dp->output_reg, DP & ~DP_PORT_EN);
- intel_de_posting_read(dev_priv, intel_dp->output_reg);
+ intel_de_write(display, intel_dp->output_reg, DP & ~DP_PORT_EN);
+ intel_de_posting_read(display, intel_dp->output_reg);
if (!pll_enabled) {
vlv_force_pll_off(dev_priv, pipe);
@@ -160,7 +166,7 @@ vlv_power_sequencer_kick(struct intel_dp *intel_dp)
}
}
-static enum pipe vlv_find_free_pps(struct drm_i915_private *dev_priv)
+static enum pipe vlv_find_free_pps(struct intel_display *display)
{
struct intel_encoder *encoder;
unsigned int pipes = (1 << PIPE_A) | (1 << PIPE_B);
@@ -169,11 +175,11 @@ static enum pipe vlv_find_free_pps(struct drm_i915_private *dev_priv)
* We don't have power sequencer currently.
* Pick one that's not used by other ports.
*/
- for_each_intel_dp(&dev_priv->drm, encoder) {
+ for_each_intel_dp(display->drm, encoder) {
struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
if (encoder->type == INTEL_OUTPUT_EDP) {
- drm_WARN_ON(&dev_priv->drm,
+ drm_WARN_ON(display->drm,
intel_dp->pps.active_pipe != INVALID_PIPE &&
intel_dp->pps.active_pipe !=
intel_dp->pps.pps_pipe);
@@ -181,7 +187,7 @@ static enum pipe vlv_find_free_pps(struct drm_i915_private *dev_priv)
if (intel_dp->pps.pps_pipe != INVALID_PIPE)
pipes &= ~(1 << intel_dp->pps.pps_pipe);
} else {
- drm_WARN_ON(&dev_priv->drm,
+ drm_WARN_ON(display->drm,
intel_dp->pps.pps_pipe != INVALID_PIPE);
if (intel_dp->pps.active_pipe != INVALID_PIPE)
@@ -198,36 +204,36 @@ static enum pipe vlv_find_free_pps(struct drm_i915_private *dev_priv)
static enum pipe
vlv_power_sequencer_pipe(struct intel_dp *intel_dp)
{
- struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
+ struct intel_display *display = to_intel_display(intel_dp);
struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
enum pipe pipe;
- lockdep_assert_held(&dev_priv->display.pps.mutex);
+ lockdep_assert_held(&display->pps.mutex);
/* We should never land here with regular DP ports */
- drm_WARN_ON(&dev_priv->drm, !intel_dp_is_edp(intel_dp));
+ drm_WARN_ON(display->drm, !intel_dp_is_edp(intel_dp));
- drm_WARN_ON(&dev_priv->drm, intel_dp->pps.active_pipe != INVALID_PIPE &&
+ drm_WARN_ON(display->drm, intel_dp->pps.active_pipe != INVALID_PIPE &&
intel_dp->pps.active_pipe != intel_dp->pps.pps_pipe);
if (intel_dp->pps.pps_pipe != INVALID_PIPE)
return intel_dp->pps.pps_pipe;
- pipe = vlv_find_free_pps(dev_priv);
+ pipe = vlv_find_free_pps(display);
/*
* Didn't find one. This should not happen since there
* are two power sequencers and up to two eDP ports.
*/
- if (drm_WARN_ON(&dev_priv->drm, pipe == INVALID_PIPE))
+ if (drm_WARN_ON(display->drm, pipe == INVALID_PIPE))
pipe = PIPE_A;
- vlv_steal_power_sequencer(dev_priv, pipe);
+ vlv_steal_power_sequencer(display, pipe);
intel_dp->pps.pps_pipe = pipe;
- drm_dbg_kms(&dev_priv->drm,
+ drm_dbg_kms(display->drm,
"picked %s for [ENCODER:%d:%s]\n",
- pps_name(dev_priv, &intel_dp->pps),
+ pps_name(intel_dp),
dig_port->base.base.base.id, dig_port->base.base.name);
/* init power sequencer on this pipe and port */
@@ -246,13 +252,13 @@ vlv_power_sequencer_pipe(struct intel_dp *intel_dp)
static int
bxt_power_sequencer_idx(struct intel_dp *intel_dp)
{
- struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
+ struct intel_display *display = to_intel_display(intel_dp);
int pps_idx = intel_dp->pps.pps_idx;
- lockdep_assert_held(&dev_priv->display.pps.mutex);
+ lockdep_assert_held(&display->pps.mutex);
/* We should never land here with regular DP ports */
- drm_WARN_ON(&dev_priv->drm, !intel_dp_is_edp(intel_dp));
+ drm_WARN_ON(display->drm, !intel_dp_is_edp(intel_dp));
if (!intel_dp->pps.pps_reset)
return pps_idx;
@@ -268,37 +274,38 @@ bxt_power_sequencer_idx(struct intel_dp *intel_dp)
return pps_idx;
}
-typedef bool (*pps_check)(struct drm_i915_private *dev_priv, int pps_idx);
+typedef bool (*pps_check)(struct intel_display *display, int pps_idx);
-static bool pps_has_pp_on(struct drm_i915_private *dev_priv, int pps_idx)
+static bool pps_has_pp_on(struct intel_display *display, int pps_idx)
{
- return intel_de_read(dev_priv, PP_STATUS(dev_priv, pps_idx)) & PP_ON;
+ return intel_de_read(display, PP_STATUS(display, pps_idx)) & PP_ON;
}
-static bool pps_has_vdd_on(struct drm_i915_private *dev_priv, int pps_idx)
+static bool pps_has_vdd_on(struct intel_display *display, int pps_idx)
{
- return intel_de_read(dev_priv, PP_CONTROL(dev_priv, pps_idx)) & EDP_FORCE_VDD;
+ return intel_de_read(display, PP_CONTROL(display, pps_idx)) & EDP_FORCE_VDD;
}
-static bool pps_any(struct drm_i915_private *dev_priv, int pps_idx)
+static bool pps_any(struct intel_display *display, int pps_idx)
{
return true;
}
static enum pipe
-vlv_initial_pps_pipe(struct drm_i915_private *dev_priv,
+vlv_initial_pps_pipe(struct intel_display *display,
enum port port, pps_check check)
{
enum pipe pipe;
for (pipe = PIPE_A; pipe <= PIPE_B; pipe++) {
- u32 port_sel = intel_de_read(dev_priv, PP_ON_DELAYS(dev_priv, pipe)) &
+ u32 port_sel = intel_de_read(display,
+ PP_ON_DELAYS(display, pipe)) &
PANEL_PORT_SELECT_MASK;
if (port_sel != PANEL_PORT_SELECT_VLV(port))
continue;
- if (!check(dev_priv, pipe))
+ if (!check(display, pipe))
continue;
return pipe;
@@ -310,41 +317,43 @@ vlv_initial_pps_pipe(struct drm_i915_private *dev_priv,
static void
vlv_initial_power_sequencer_setup(struct intel_dp *intel_dp)
{
- struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
+ struct intel_display *display = to_intel_display(intel_dp);
struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
enum port port = dig_port->base.port;
- lockdep_assert_held(&dev_priv->display.pps.mutex);
+ lockdep_assert_held(&display->pps.mutex);
/* try to find a pipe with this port selected */
/* first pick one where the panel is on */
- intel_dp->pps.pps_pipe = vlv_initial_pps_pipe(dev_priv, port,
+ intel_dp->pps.pps_pipe = vlv_initial_pps_pipe(display, port,
pps_has_pp_on);
/* didn't find one? pick one where vdd is on */
if (intel_dp->pps.pps_pipe == INVALID_PIPE)
- intel_dp->pps.pps_pipe = vlv_initial_pps_pipe(dev_priv, port,
+ intel_dp->pps.pps_pipe = vlv_initial_pps_pipe(display, port,
pps_has_vdd_on);
/* didn't find one? pick one with just the correct port */
if (intel_dp->pps.pps_pipe == INVALID_PIPE)
- intel_dp->pps.pps_pipe = vlv_initial_pps_pipe(dev_priv, port,
+ intel_dp->pps.pps_pipe = vlv_initial_pps_pipe(display, port,
pps_any);
/* didn't find one? just let vlv_power_sequencer_pipe() pick one when needed */
if (intel_dp->pps.pps_pipe == INVALID_PIPE) {
- drm_dbg_kms(&dev_priv->drm,
+ drm_dbg_kms(display->drm,
"[ENCODER:%d:%s] no initial power sequencer\n",
dig_port->base.base.base.id, dig_port->base.base.name);
return;
}
- drm_dbg_kms(&dev_priv->drm,
+ drm_dbg_kms(display->drm,
"[ENCODER:%d:%s] initial power sequencer: %s\n",
dig_port->base.base.base.id, dig_port->base.base.name,
- pps_name(dev_priv, &intel_dp->pps));
+ pps_name(intel_dp));
}
-static int intel_num_pps(struct drm_i915_private *i915)
+static int intel_num_pps(struct intel_display *display)
{
+ struct drm_i915_private *i915 = to_i915(display->drm);
+
if (IS_VALLEYVIEW(i915) || IS_CHERRYVIEW(i915))
return 2;
@@ -365,23 +374,24 @@ static int intel_num_pps(struct drm_i915_private *i915)
static bool intel_pps_is_valid(struct intel_dp *intel_dp)
{
- struct drm_i915_private *i915 = dp_to_i915(intel_dp);
+ struct intel_display *display = to_intel_display(intel_dp);
+ struct drm_i915_private *i915 = to_i915(display->drm);
if (intel_dp->pps.pps_idx == 1 &&
INTEL_PCH_TYPE(i915) >= PCH_ICP &&
INTEL_PCH_TYPE(i915) <= PCH_ADP)
- return intel_de_read(i915, SOUTH_CHICKEN1) & ICP_SECOND_PPS_IO_SELECT;
+ return intel_de_read(display, SOUTH_CHICKEN1) & ICP_SECOND_PPS_IO_SELECT;
return true;
}
static int
-bxt_initial_pps_idx(struct drm_i915_private *i915, pps_check check)
+bxt_initial_pps_idx(struct intel_display *display, pps_check check)
{
- int pps_idx, pps_num = intel_num_pps(i915);
+ int pps_idx, pps_num = intel_num_pps(display);
for (pps_idx = 0; pps_idx < pps_num; pps_idx++) {
- if (check(i915, pps_idx))
+ if (check(display, pps_idx))
return pps_idx;
}
@@ -391,11 +401,12 @@ bxt_initial_pps_idx(struct drm_i915_private *i915, pps_check check)
static bool
pps_initial_setup(struct intel_dp *intel_dp)
{
+ struct intel_display *display = to_intel_display(intel_dp);
struct intel_encoder *encoder = &dp_to_dig_port(intel_dp)->base;
struct intel_connector *connector = intel_dp->attached_connector;
struct drm_i915_private *i915 = to_i915(encoder->base.dev);
- lockdep_assert_held(&i915->display.pps.mutex);
+ lockdep_assert_held(&display->pps.mutex);
if (IS_VALLEYVIEW(i915) || IS_CHERRYVIEW(i915)) {
vlv_initial_power_sequencer_setup(intel_dp);
@@ -403,46 +414,47 @@ pps_initial_setup(struct intel_dp *intel_dp)
}
/* first ask the VBT */
- if (intel_num_pps(i915) > 1)
+ if (intel_num_pps(display) > 1)
intel_dp->pps.pps_idx = connector->panel.vbt.backlight.controller;
else
intel_dp->pps.pps_idx = 0;
- if (drm_WARN_ON(&i915->drm, intel_dp->pps.pps_idx >= intel_num_pps(i915)))
+ if (drm_WARN_ON(display->drm, intel_dp->pps.pps_idx >= intel_num_pps(display)))
intel_dp->pps.pps_idx = -1;
/* VBT wasn't parsed yet? pick one where the panel is on */
if (intel_dp->pps.pps_idx < 0)
- intel_dp->pps.pps_idx = bxt_initial_pps_idx(i915, pps_has_pp_on);
+ intel_dp->pps.pps_idx = bxt_initial_pps_idx(display, pps_has_pp_on);
/* didn't find one? pick one where vdd is on */
if (intel_dp->pps.pps_idx < 0)
- intel_dp->pps.pps_idx = bxt_initial_pps_idx(i915, pps_has_vdd_on);
+ intel_dp->pps.pps_idx = bxt_initial_pps_idx(display, pps_has_vdd_on);
/* didn't find one? pick any */
if (intel_dp->pps.pps_idx < 0) {
- intel_dp->pps.pps_idx = bxt_initial_pps_idx(i915, pps_any);
+ intel_dp->pps.pps_idx = bxt_initial_pps_idx(display, pps_any);
- drm_dbg_kms(&i915->drm,
+ drm_dbg_kms(display->drm,
"[ENCODER:%d:%s] no initial power sequencer, assuming %s\n",
encoder->base.base.id, encoder->base.name,
- pps_name(i915, &intel_dp->pps));
+ pps_name(intel_dp));
} else {
- drm_dbg_kms(&i915->drm,
+ drm_dbg_kms(display->drm,
"[ENCODER:%d:%s] initial power sequencer: %s\n",
encoder->base.base.id, encoder->base.name,
- pps_name(i915, &intel_dp->pps));
+ pps_name(intel_dp));
}
return intel_pps_is_valid(intel_dp);
}
-void intel_pps_reset_all(struct drm_i915_private *dev_priv)
+void intel_pps_reset_all(struct intel_display *display)
{
+ struct drm_i915_private *dev_priv = to_i915(display->drm);
struct intel_encoder *encoder;
- if (drm_WARN_ON(&dev_priv->drm, !IS_LP(dev_priv)))
+ if (drm_WARN_ON(display->drm, !IS_LP(dev_priv)))
return;
- if (!HAS_DISPLAY(dev_priv))
+ if (!HAS_DISPLAY(display))
return;
/*
@@ -455,16 +467,16 @@ void intel_pps_reset_all(struct drm_i915_private *dev_priv)
* should use them always.
*/
- for_each_intel_dp(&dev_priv->drm, encoder) {
+ for_each_intel_dp(display->drm, encoder) {
struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
- drm_WARN_ON(&dev_priv->drm,
+ drm_WARN_ON(display->drm,
intel_dp->pps.active_pipe != INVALID_PIPE);
if (encoder->type != INTEL_OUTPUT_EDP)
continue;
- if (DISPLAY_VER(dev_priv) >= 9)
+ if (DISPLAY_VER(display) >= 9)
intel_dp->pps.pps_reset = true;
else
intel_dp->pps.pps_pipe = INVALID_PIPE;
@@ -482,7 +494,8 @@ struct pps_registers {
static void intel_pps_get_registers(struct intel_dp *intel_dp,
struct pps_registers *regs)
{
- struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
+ struct intel_display *display = to_intel_display(intel_dp);
+ struct drm_i915_private *dev_priv = to_i915(display->drm);
int pps_idx;
memset(regs, 0, sizeof(*regs));
@@ -494,17 +507,17 @@ static void intel_pps_get_registers(struct intel_dp *intel_dp,
else
pps_idx = intel_dp->pps.pps_idx;
- regs->pp_ctrl = PP_CONTROL(dev_priv, pps_idx);
- regs->pp_stat = PP_STATUS(dev_priv, pps_idx);
- regs->pp_on = PP_ON_DELAYS(dev_priv, pps_idx);
- regs->pp_off = PP_OFF_DELAYS(dev_priv, pps_idx);
+ regs->pp_ctrl = PP_CONTROL(display, pps_idx);
+ regs->pp_stat = PP_STATUS(display, pps_idx);
+ regs->pp_on = PP_ON_DELAYS(display, pps_idx);
+ regs->pp_off = PP_OFF_DELAYS(display, pps_idx);
/* Cycle delay moved from PP_DIVISOR to PP_CONTROL */
if (IS_GEMINILAKE(dev_priv) || IS_BROXTON(dev_priv) ||
INTEL_PCH_TYPE(dev_priv) >= PCH_CNP)
regs->pp_div = INVALID_MMIO_REG;
else
- regs->pp_div = PP_DIVISOR(dev_priv, pps_idx);
+ regs->pp_div = PP_DIVISOR(display, pps_idx);
}
static i915_reg_t
@@ -529,49 +542,51 @@ _pp_stat_reg(struct intel_dp *intel_dp)
static bool edp_have_panel_power(struct intel_dp *intel_dp)
{
- struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
+ struct intel_display *display = to_intel_display(intel_dp);
+ struct drm_i915_private *dev_priv = to_i915(display->drm);
- lockdep_assert_held(&dev_priv->display.pps.mutex);
+ lockdep_assert_held(&display->pps.mutex);
if ((IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) &&
intel_dp->pps.pps_pipe == INVALID_PIPE)
return false;
- return (intel_de_read(dev_priv, _pp_stat_reg(intel_dp)) & PP_ON) != 0;
+ return (intel_de_read(display, _pp_stat_reg(intel_dp)) & PP_ON) != 0;
}
static bool edp_have_panel_vdd(struct intel_dp *intel_dp)
{
- struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
+ struct intel_display *display = to_intel_display(intel_dp);
+ struct drm_i915_private *dev_priv = to_i915(display->drm);
- lockdep_assert_held(&dev_priv->display.pps.mutex);
+ lockdep_assert_held(&display->pps.mutex);
if ((IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) &&
intel_dp->pps.pps_pipe == INVALID_PIPE)
return false;
- return intel_de_read(dev_priv, _pp_ctrl_reg(intel_dp)) & EDP_FORCE_VDD;
+ return intel_de_read(display, _pp_ctrl_reg(intel_dp)) & EDP_FORCE_VDD;
}
void intel_pps_check_power_unlocked(struct intel_dp *intel_dp)
{
- struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
+ struct intel_display *display = to_intel_display(intel_dp);
struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
if (!intel_dp_is_edp(intel_dp))
return;
if (!edp_have_panel_power(intel_dp) && !edp_have_panel_vdd(intel_dp)) {
- drm_WARN(&dev_priv->drm, 1,
+ drm_WARN(display->drm, 1,
"[ENCODER:%d:%s] %s powered off while attempting AUX CH communication.\n",
dig_port->base.base.base.id, dig_port->base.base.name,
- pps_name(dev_priv, &intel_dp->pps));
- drm_dbg_kms(&dev_priv->drm,
+ pps_name(intel_dp));
+ drm_dbg_kms(display->drm,
"[ENCODER:%d:%s] %s PP_STATUS: 0x%08x PP_CONTROL: 0x%08x\n",
dig_port->base.base.base.id, dig_port->base.base.name,
- pps_name(dev_priv, &intel_dp->pps),
- intel_de_read(dev_priv, _pp_stat_reg(intel_dp)),
- intel_de_read(dev_priv, _pp_ctrl_reg(intel_dp)));
+ pps_name(intel_dp),
+ intel_de_read(display, _pp_stat_reg(intel_dp)),
+ intel_de_read(display, _pp_ctrl_reg(intel_dp)));
}
}
@@ -589,68 +604,71 @@ static void intel_pps_verify_state(struct intel_dp *intel_dp);
static void wait_panel_status(struct intel_dp *intel_dp,
u32 mask, u32 value)
{
- struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
+ struct intel_display *display = to_intel_display(intel_dp);
struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
i915_reg_t pp_stat_reg, pp_ctrl_reg;
- lockdep_assert_held(&dev_priv->display.pps.mutex);
+ lockdep_assert_held(&display->pps.mutex);
intel_pps_verify_state(intel_dp);
pp_stat_reg = _pp_stat_reg(intel_dp);
pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
- drm_dbg_kms(&dev_priv->drm,
+ drm_dbg_kms(display->drm,
"[ENCODER:%d:%s] %s mask: 0x%08x value: 0x%08x PP_STATUS: 0x%08x PP_CONTROL: 0x%08x\n",
dig_port->base.base.base.id, dig_port->base.base.name,
- pps_name(dev_priv, &intel_dp->pps),
+ pps_name(intel_dp),
mask, value,
- intel_de_read(dev_priv, pp_stat_reg),
- intel_de_read(dev_priv, pp_ctrl_reg));
+ intel_de_read(display, pp_stat_reg),
+ intel_de_read(display, pp_ctrl_reg));
- if (intel_de_wait(dev_priv, pp_stat_reg, mask, value, 5000))
- drm_err(&dev_priv->drm,
+ if (intel_de_wait(display, pp_stat_reg, mask, value, 5000))
+ drm_err(display->drm,
"[ENCODER:%d:%s] %s panel status timeout: PP_STATUS: 0x%08x PP_CONTROL: 0x%08x\n",
dig_port->base.base.base.id, dig_port->base.base.name,
- pps_name(dev_priv, &intel_dp->pps),
- intel_de_read(dev_priv, pp_stat_reg),
- intel_de_read(dev_priv, pp_ctrl_reg));
+ pps_name(intel_dp),
+ intel_de_read(display, pp_stat_reg),
+ intel_de_read(display, pp_ctrl_reg));
- drm_dbg_kms(&dev_priv->drm, "Wait complete\n");
+ drm_dbg_kms(display->drm, "Wait complete\n");
}
static void wait_panel_on(struct intel_dp *intel_dp)
{
- struct drm_i915_private *i915 = dp_to_i915(intel_dp);
+ struct intel_display *display = to_intel_display(intel_dp);
struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
- drm_dbg_kms(&i915->drm, "[ENCODER:%d:%s] %s wait for panel power on\n",
+ drm_dbg_kms(display->drm,
+ "[ENCODER:%d:%s] %s wait for panel power on\n",
dig_port->base.base.base.id, dig_port->base.base.name,
- pps_name(i915, &intel_dp->pps));
+ pps_name(intel_dp));
wait_panel_status(intel_dp, IDLE_ON_MASK, IDLE_ON_VALUE);
}
static void wait_panel_off(struct intel_dp *intel_dp)
{
- struct drm_i915_private *i915 = dp_to_i915(intel_dp);
+ struct intel_display *display = to_intel_display(intel_dp);
struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
- drm_dbg_kms(&i915->drm, "[ENCODER:%d:%s] %s wait for panel power off time\n",
+ drm_dbg_kms(display->drm,
+ "[ENCODER:%d:%s] %s wait for panel power off time\n",
dig_port->base.base.base.id, dig_port->base.base.name,
- pps_name(i915, &intel_dp->pps));
+ pps_name(intel_dp));
wait_panel_status(intel_dp, IDLE_OFF_MASK, IDLE_OFF_VALUE);
}
static void wait_panel_power_cycle(struct intel_dp *intel_dp)
{
- struct drm_i915_private *i915 = dp_to_i915(intel_dp);
+ struct intel_display *display = to_intel_display(intel_dp);
struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
ktime_t panel_power_on_time;
s64 panel_power_off_duration;
- drm_dbg_kms(&i915->drm, "[ENCODER:%d:%s] %s wait for panel power cycle\n",
+ drm_dbg_kms(display->drm,
+ "[ENCODER:%d:%s] %s wait for panel power cycle\n",
dig_port->base.base.base.id, dig_port->base.base.name,
- pps_name(i915, &intel_dp->pps));
+ pps_name(intel_dp));
/* take the difference of current time and panel power off time
* and then make panel wait for t11_t12 if needed. */
@@ -695,13 +713,13 @@ static void edp_wait_backlight_off(struct intel_dp *intel_dp)
static u32 ilk_get_pp_control(struct intel_dp *intel_dp)
{
- struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
+ struct intel_display *display = to_intel_display(intel_dp);
u32 control;
- lockdep_assert_held(&dev_priv->display.pps.mutex);
+ lockdep_assert_held(&display->pps.mutex);
- control = intel_de_read(dev_priv, _pp_ctrl_reg(intel_dp));
- if (drm_WARN_ON(&dev_priv->drm, !HAS_DDI(dev_priv) &&
+ control = intel_de_read(display, _pp_ctrl_reg(intel_dp));
+ if (drm_WARN_ON(display->drm, !HAS_DDI(display) &&
(control & PANEL_UNLOCK_MASK) != PANEL_UNLOCK_REGS)) {
control &= ~PANEL_UNLOCK_MASK;
control |= PANEL_UNLOCK_REGS;
@@ -716,13 +734,14 @@ static u32 ilk_get_pp_control(struct intel_dp *intel_dp)
*/
bool intel_pps_vdd_on_unlocked(struct intel_dp *intel_dp)
{
- struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
+ struct intel_display *display = to_intel_display(intel_dp);
+ struct drm_i915_private *dev_priv = to_i915(display->drm);
struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
u32 pp;
i915_reg_t pp_stat_reg, pp_ctrl_reg;
bool need_to_disable = !intel_dp->pps.want_panel_vdd;
- lockdep_assert_held(&dev_priv->display.pps.mutex);
+ lockdep_assert_held(&display->pps.mutex);
if (!intel_dp_is_edp(intel_dp))
return false;
@@ -733,16 +752,16 @@ bool intel_pps_vdd_on_unlocked(struct intel_dp *intel_dp)
if (edp_have_panel_vdd(intel_dp))
return need_to_disable;
- drm_WARN_ON(&dev_priv->drm, intel_dp->pps.vdd_wakeref);
+ drm_WARN_ON(display->drm, intel_dp->pps.vdd_wakeref);
intel_dp->pps.vdd_wakeref = intel_display_power_get(dev_priv,
intel_aux_power_domain(dig_port));
pp_stat_reg = _pp_stat_reg(intel_dp);
pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
- drm_dbg_kms(&dev_priv->drm, "[ENCODER:%d:%s] %s turning VDD on\n",
+ drm_dbg_kms(display->drm, "[ENCODER:%d:%s] %s turning VDD on\n",
dig_port->base.base.base.id, dig_port->base.base.name,
- pps_name(dev_priv, &intel_dp->pps));
+ pps_name(intel_dp));
if (!edp_have_panel_power(intel_dp))
wait_panel_power_cycle(intel_dp);
@@ -750,21 +769,22 @@ bool intel_pps_vdd_on_unlocked(struct intel_dp *intel_dp)
pp = ilk_get_pp_control(intel_dp);
pp |= EDP_FORCE_VDD;
- intel_de_write(dev_priv, pp_ctrl_reg, pp);
- intel_de_posting_read(dev_priv, pp_ctrl_reg);
- drm_dbg_kms(&dev_priv->drm, "[ENCODER:%d:%s] %s PP_STATUS: 0x%08x PP_CONTROL: 0x%08x\n",
+ intel_de_write(display, pp_ctrl_reg, pp);
+ intel_de_posting_read(display, pp_ctrl_reg);
+ drm_dbg_kms(display->drm,
+ "[ENCODER:%d:%s] %s PP_STATUS: 0x%08x PP_CONTROL: 0x%08x\n",
dig_port->base.base.base.id, dig_port->base.base.name,
- pps_name(dev_priv, &intel_dp->pps),
- intel_de_read(dev_priv, pp_stat_reg),
- intel_de_read(dev_priv, pp_ctrl_reg));
+ pps_name(intel_dp),
+ intel_de_read(display, pp_stat_reg),
+ intel_de_read(display, pp_ctrl_reg));
/*
* If the panel wasn't on, delay before accessing aux channel
*/
if (!edp_have_panel_power(intel_dp)) {
- drm_dbg_kms(&dev_priv->drm,
+ drm_dbg_kms(display->drm,
"[ENCODER:%d:%s] %s panel power wasn't enabled\n",
dig_port->base.base.base.id, dig_port->base.base.name,
- pps_name(dev_priv, &intel_dp->pps));
+ pps_name(intel_dp));
msleep(intel_dp->pps.panel_power_up_delay);
}
@@ -779,7 +799,8 @@ bool intel_pps_vdd_on_unlocked(struct intel_dp *intel_dp)
*/
void intel_pps_vdd_on(struct intel_dp *intel_dp)
{
- struct drm_i915_private *i915 = dp_to_i915(intel_dp);
+ struct intel_display *display = to_intel_display(intel_dp);
+ struct drm_i915_private *i915 = to_i915(display->drm);
intel_wakeref_t wakeref;
bool vdd;
@@ -792,27 +813,27 @@ void intel_pps_vdd_on(struct intel_dp *intel_dp)
I915_STATE_WARN(i915, !vdd, "[ENCODER:%d:%s] %s VDD already requested on\n",
dp_to_dig_port(intel_dp)->base.base.base.id,
dp_to_dig_port(intel_dp)->base.base.name,
- pps_name(i915, &intel_dp->pps));
+ pps_name(intel_dp));
}
static void intel_pps_vdd_off_sync_unlocked(struct intel_dp *intel_dp)
{
- struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
- struct intel_digital_port *dig_port =
- dp_to_dig_port(intel_dp);
+ struct intel_display *display = to_intel_display(intel_dp);
+ struct drm_i915_private *dev_priv = to_i915(display->drm);
+ struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
u32 pp;
i915_reg_t pp_stat_reg, pp_ctrl_reg;
- lockdep_assert_held(&dev_priv->display.pps.mutex);
+ lockdep_assert_held(&display->pps.mutex);
- drm_WARN_ON(&dev_priv->drm, intel_dp->pps.want_panel_vdd);
+ drm_WARN_ON(display->drm, intel_dp->pps.want_panel_vdd);
if (!edp_have_panel_vdd(intel_dp))
return;
- drm_dbg_kms(&dev_priv->drm, "[ENCODER:%d:%s] %s turning VDD off\n",
+ drm_dbg_kms(display->drm, "[ENCODER:%d:%s] %s turning VDD off\n",
dig_port->base.base.base.id, dig_port->base.base.name,
- pps_name(dev_priv, &intel_dp->pps));
+ pps_name(intel_dp));
pp = ilk_get_pp_control(intel_dp);
pp &= ~EDP_FORCE_VDD;
@@ -820,15 +841,16 @@ static void intel_pps_vdd_off_sync_unlocked(struct intel_dp *intel_dp)
pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
pp_stat_reg = _pp_stat_reg(intel_dp);
- intel_de_write(dev_priv, pp_ctrl_reg, pp);
- intel_de_posting_read(dev_priv, pp_ctrl_reg);
+ intel_de_write(display, pp_ctrl_reg, pp);
+ intel_de_posting_read(display, pp_ctrl_reg);
/* Make sure sequencer is idle before allowing subsequent activity */
- drm_dbg_kms(&dev_priv->drm, "[ENCODER:%d:%s] %s PP_STATUS: 0x%08x PP_CONTROL: 0x%08x\n",
+ drm_dbg_kms(display->drm,
+ "[ENCODER:%d:%s] %s PP_STATUS: 0x%08x PP_CONTROL: 0x%08x\n",
dig_port->base.base.base.id, dig_port->base.base.name,
- pps_name(dev_priv, &intel_dp->pps),
- intel_de_read(dev_priv, pp_stat_reg),
- intel_de_read(dev_priv, pp_ctrl_reg));
+ pps_name(intel_dp),
+ intel_de_read(display, pp_stat_reg),
+ intel_de_read(display, pp_ctrl_reg));
if ((pp & PANEL_POWER_ON) == 0)
intel_dp->pps.panel_power_off_time = ktime_get_boottime();
@@ -869,7 +891,8 @@ static void edp_panel_vdd_work(struct work_struct *__work)
static void edp_panel_vdd_schedule_off(struct intel_dp *intel_dp)
{
- struct drm_i915_private *i915 = dp_to_i915(intel_dp);
+ struct intel_display *display = to_intel_display(intel_dp);
+ struct drm_i915_private *i915 = to_i915(display->drm);
unsigned long delay;
/*
@@ -896,9 +919,10 @@ static void edp_panel_vdd_schedule_off(struct intel_dp *intel_dp)
*/
void intel_pps_vdd_off_unlocked(struct intel_dp *intel_dp, bool sync)
{
- struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
+ struct intel_display *display = to_intel_display(intel_dp);
+ struct drm_i915_private *dev_priv = to_i915(display->drm);
- lockdep_assert_held(&dev_priv->display.pps.mutex);
+ lockdep_assert_held(&display->pps.mutex);
if (!intel_dp_is_edp(intel_dp))
return;
@@ -907,7 +931,7 @@ void intel_pps_vdd_off_unlocked(struct intel_dp *intel_dp, bool sync)
"[ENCODER:%d:%s] %s VDD not forced on",
dp_to_dig_port(intel_dp)->base.base.base.id,
dp_to_dig_port(intel_dp)->base.base.name,
- pps_name(dev_priv, &intel_dp->pps));
+ pps_name(intel_dp));
intel_dp->pps.want_panel_vdd = false;
@@ -919,25 +943,26 @@ void intel_pps_vdd_off_unlocked(struct intel_dp *intel_dp, bool sync)
void intel_pps_on_unlocked(struct intel_dp *intel_dp)
{
- struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
+ struct intel_display *display = to_intel_display(intel_dp);
+ struct drm_i915_private *dev_priv = to_i915(display->drm);
u32 pp;
i915_reg_t pp_ctrl_reg;
- lockdep_assert_held(&dev_priv->display.pps.mutex);
+ lockdep_assert_held(&display->pps.mutex);
if (!intel_dp_is_edp(intel_dp))
return;
- drm_dbg_kms(&dev_priv->drm, "[ENCODER:%d:%s] %s turn panel power on\n",
+ drm_dbg_kms(display->drm, "[ENCODER:%d:%s] %s turn panel power on\n",
dp_to_dig_port(intel_dp)->base.base.base.id,
dp_to_dig_port(intel_dp)->base.base.name,
- pps_name(dev_priv, &intel_dp->pps));
+ pps_name(intel_dp));
- if (drm_WARN(&dev_priv->drm, edp_have_panel_power(intel_dp),
+ if (drm_WARN(display->drm, edp_have_panel_power(intel_dp),
"[ENCODER:%d:%s] %s panel power already on\n",
dp_to_dig_port(intel_dp)->base.base.base.id,
dp_to_dig_port(intel_dp)->base.base.name,
- pps_name(dev_priv, &intel_dp->pps)))
+ pps_name(intel_dp)))
return;
wait_panel_power_cycle(intel_dp);
@@ -947,24 +972,36 @@ void intel_pps_on_unlocked(struct intel_dp *intel_dp)
if (IS_IRONLAKE(dev_priv)) {
/* ILK workaround: disable reset around power sequence */
pp &= ~PANEL_POWER_RESET;
- intel_de_write(dev_priv, pp_ctrl_reg, pp);
- intel_de_posting_read(dev_priv, pp_ctrl_reg);
+ intel_de_write(display, pp_ctrl_reg, pp);
+ intel_de_posting_read(display, pp_ctrl_reg);
}
+ /*
+ * WA: 22019252566
+ * Disable DPLS gating around power sequence.
+ */
+ if (IS_DISPLAY_VER(display, 13, 14))
+ intel_de_rmw(display, SOUTH_DSPCLK_GATE_D,
+ 0, PCH_DPLSUNIT_CLOCK_GATE_DISABLE);
+
pp |= PANEL_POWER_ON;
if (!IS_IRONLAKE(dev_priv))
pp |= PANEL_POWER_RESET;
- intel_de_write(dev_priv, pp_ctrl_reg, pp);
- intel_de_posting_read(dev_priv, pp_ctrl_reg);
+ intel_de_write(display, pp_ctrl_reg, pp);
+ intel_de_posting_read(display, pp_ctrl_reg);
wait_panel_on(intel_dp);
intel_dp->pps.last_power_on = jiffies;
+ if (IS_DISPLAY_VER(display, 13, 14))
+ intel_de_rmw(display, SOUTH_DSPCLK_GATE_D,
+ PCH_DPLSUNIT_CLOCK_GATE_DISABLE, 0);
+
if (IS_IRONLAKE(dev_priv)) {
pp |= PANEL_POWER_RESET; /* restore panel reset bit */
- intel_de_write(dev_priv, pp_ctrl_reg, pp);
- intel_de_posting_read(dev_priv, pp_ctrl_reg);
+ intel_de_write(display, pp_ctrl_reg, pp);
+ intel_de_posting_read(display, pp_ctrl_reg);
}
}
@@ -981,24 +1018,25 @@ void intel_pps_on(struct intel_dp *intel_dp)
void intel_pps_off_unlocked(struct intel_dp *intel_dp)
{
- struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
+ struct intel_display *display = to_intel_display(intel_dp);
+ struct drm_i915_private *dev_priv = to_i915(display->drm);
struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
u32 pp;
i915_reg_t pp_ctrl_reg;
- lockdep_assert_held(&dev_priv->display.pps.mutex);
+ lockdep_assert_held(&display->pps.mutex);
if (!intel_dp_is_edp(intel_dp))
return;
- drm_dbg_kms(&dev_priv->drm, "[ENCODER:%d:%s] %s turn panel power off\n",
+ drm_dbg_kms(display->drm, "[ENCODER:%d:%s] %s turn panel power off\n",
dig_port->base.base.base.id, dig_port->base.base.name,
- pps_name(dev_priv, &intel_dp->pps));
+ pps_name(intel_dp));
- drm_WARN(&dev_priv->drm, !intel_dp->pps.want_panel_vdd,
+ drm_WARN(display->drm, !intel_dp->pps.want_panel_vdd,
"[ENCODER:%d:%s] %s need VDD to turn off panel\n",
dig_port->base.base.base.id, dig_port->base.base.name,
- pps_name(dev_priv, &intel_dp->pps));
+ pps_name(intel_dp));
pp = ilk_get_pp_control(intel_dp);
/* We need to switch off panel power _and_ force vdd, for otherwise some
@@ -1010,8 +1048,8 @@ void intel_pps_off_unlocked(struct intel_dp *intel_dp)
intel_dp->pps.want_panel_vdd = false;
- intel_de_write(dev_priv, pp_ctrl_reg, pp);
- intel_de_posting_read(dev_priv, pp_ctrl_reg);
+ intel_de_write(display, pp_ctrl_reg, pp);
+ intel_de_posting_read(display, pp_ctrl_reg);
wait_panel_off(intel_dp);
intel_dp->pps.panel_power_off_time = ktime_get_boottime();
@@ -1036,7 +1074,7 @@ void intel_pps_off(struct intel_dp *intel_dp)
/* Enable backlight in the panel power control. */
void intel_pps_backlight_on(struct intel_dp *intel_dp)
{
- struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
+ struct intel_display *display = to_intel_display(intel_dp);
intel_wakeref_t wakeref;
/*
@@ -1054,15 +1092,15 @@ void intel_pps_backlight_on(struct intel_dp *intel_dp)
pp = ilk_get_pp_control(intel_dp);
pp |= EDP_BLC_ENABLE;
- intel_de_write(dev_priv, pp_ctrl_reg, pp);
- intel_de_posting_read(dev_priv, pp_ctrl_reg);
+ intel_de_write(display, pp_ctrl_reg, pp);
+ intel_de_posting_read(display, pp_ctrl_reg);
}
}
/* Disable backlight in the panel power control. */
void intel_pps_backlight_off(struct intel_dp *intel_dp)
{
- struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
+ struct intel_display *display = to_intel_display(intel_dp);
intel_wakeref_t wakeref;
if (!intel_dp_is_edp(intel_dp))
@@ -1075,8 +1113,8 @@ void intel_pps_backlight_off(struct intel_dp *intel_dp)
pp = ilk_get_pp_control(intel_dp);
pp &= ~EDP_BLC_ENABLE;
- intel_de_write(dev_priv, pp_ctrl_reg, pp);
- intel_de_posting_read(dev_priv, pp_ctrl_reg);
+ intel_de_write(display, pp_ctrl_reg, pp);
+ intel_de_posting_read(display, pp_ctrl_reg);
}
intel_dp->pps.last_backlight_off = jiffies;
@@ -1089,7 +1127,7 @@ void intel_pps_backlight_off(struct intel_dp *intel_dp)
*/
void intel_pps_backlight_power(struct intel_connector *connector, bool enable)
{
- struct drm_i915_private *i915 = to_i915(connector->base.dev);
+ struct intel_display *display = to_intel_display(connector);
struct intel_dp *intel_dp = intel_attached_dp(connector);
intel_wakeref_t wakeref;
bool is_enabled;
@@ -1100,7 +1138,7 @@ void intel_pps_backlight_power(struct intel_connector *connector, bool enable)
if (is_enabled == enable)
return;
- drm_dbg_kms(&i915->drm, "panel power control backlight %s\n",
+ drm_dbg_kms(display->drm, "panel power control backlight %s\n",
enable ? "enable" : "disable");
if (enable)
@@ -1111,14 +1149,14 @@ void intel_pps_backlight_power(struct intel_connector *connector, bool enable)
static void vlv_detach_power_sequencer(struct intel_dp *intel_dp)
{
+ struct intel_display *display = to_intel_display(intel_dp);
struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
- struct drm_i915_private *dev_priv = to_i915(dig_port->base.base.dev);
enum pipe pipe = intel_dp->pps.pps_pipe;
- i915_reg_t pp_on_reg = PP_ON_DELAYS(dev_priv, pipe);
+ i915_reg_t pp_on_reg = PP_ON_DELAYS(display, pipe);
- drm_WARN_ON(&dev_priv->drm, intel_dp->pps.active_pipe != INVALID_PIPE);
+ drm_WARN_ON(display->drm, intel_dp->pps.active_pipe != INVALID_PIPE);
- if (drm_WARN_ON(&dev_priv->drm, pipe != PIPE_A && pipe != PIPE_B))
+ if (drm_WARN_ON(display->drm, pipe != PIPE_A && pipe != PIPE_B))
return;
intel_pps_vdd_off_sync_unlocked(intel_dp);
@@ -1132,27 +1170,27 @@ static void vlv_detach_power_sequencer(struct intel_dp *intel_dp)
* port select always when logically disconnecting a power sequencer
* from a port.
*/
- drm_dbg_kms(&dev_priv->drm,
+ drm_dbg_kms(display->drm,
"detaching %s from [ENCODER:%d:%s]\n",
- pps_name(dev_priv, &intel_dp->pps),
+ pps_name(intel_dp),
dig_port->base.base.base.id, dig_port->base.base.name);
- intel_de_write(dev_priv, pp_on_reg, 0);
- intel_de_posting_read(dev_priv, pp_on_reg);
+ intel_de_write(display, pp_on_reg, 0);
+ intel_de_posting_read(display, pp_on_reg);
intel_dp->pps.pps_pipe = INVALID_PIPE;
}
-static void vlv_steal_power_sequencer(struct drm_i915_private *dev_priv,
+static void vlv_steal_power_sequencer(struct intel_display *display,
enum pipe pipe)
{
struct intel_encoder *encoder;
- lockdep_assert_held(&dev_priv->display.pps.mutex);
+ lockdep_assert_held(&display->pps.mutex);
- for_each_intel_dp(&dev_priv->drm, encoder) {
+ for_each_intel_dp(display->drm, encoder) {
struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
- drm_WARN(&dev_priv->drm, intel_dp->pps.active_pipe == pipe,
+ drm_WARN(display->drm, intel_dp->pps.active_pipe == pipe,
"stealing PPS %c from active [ENCODER:%d:%s]\n",
pipe_name(pipe), encoder->base.base.id,
encoder->base.name);
@@ -1160,7 +1198,7 @@ static void vlv_steal_power_sequencer(struct drm_i915_private *dev_priv,
if (intel_dp->pps.pps_pipe != pipe)
continue;
- drm_dbg_kms(&dev_priv->drm,
+ drm_dbg_kms(display->drm,
"stealing PPS %c from [ENCODER:%d:%s]\n",
pipe_name(pipe), encoder->base.base.id,
encoder->base.name);
@@ -1173,13 +1211,13 @@ static void vlv_steal_power_sequencer(struct drm_i915_private *dev_priv,
void vlv_pps_init(struct intel_encoder *encoder,
const struct intel_crtc_state *crtc_state)
{
- struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+ struct intel_display *display = to_intel_display(encoder);
struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
- lockdep_assert_held(&dev_priv->display.pps.mutex);
+ lockdep_assert_held(&display->pps.mutex);
- drm_WARN_ON(&dev_priv->drm, intel_dp->pps.active_pipe != INVALID_PIPE);
+ drm_WARN_ON(display->drm, intel_dp->pps.active_pipe != INVALID_PIPE);
if (intel_dp->pps.pps_pipe != INVALID_PIPE &&
intel_dp->pps.pps_pipe != crtc->pipe) {
@@ -1195,7 +1233,7 @@ void vlv_pps_init(struct intel_encoder *encoder,
* We may be stealing the power
* sequencer from another port.
*/
- vlv_steal_power_sequencer(dev_priv, crtc->pipe);
+ vlv_steal_power_sequencer(display, crtc->pipe);
intel_dp->pps.active_pipe = crtc->pipe;
@@ -1205,9 +1243,9 @@ void vlv_pps_init(struct intel_encoder *encoder,
/* now it's all ours */
intel_dp->pps.pps_pipe = crtc->pipe;
- drm_dbg_kms(&dev_priv->drm,
+ drm_dbg_kms(display->drm,
"initializing %s for [ENCODER:%d:%s]\n",
- pps_name(dev_priv, &intel_dp->pps),
+ pps_name(intel_dp),
encoder->base.base.id, encoder->base.name);
/* init power sequencer on this pipe and port */
@@ -1217,10 +1255,11 @@ void vlv_pps_init(struct intel_encoder *encoder,
static void pps_vdd_init(struct intel_dp *intel_dp)
{
- struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
+ struct intel_display *display = to_intel_display(intel_dp);
+ struct drm_i915_private *dev_priv = to_i915(display->drm);
struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
- lockdep_assert_held(&dev_priv->display.pps.mutex);
+ lockdep_assert_held(&display->pps.mutex);
if (!edp_have_panel_vdd(intel_dp))
return;
@@ -1231,11 +1270,11 @@ static void pps_vdd_init(struct intel_dp *intel_dp)
* schedule a vdd off, so we don't hold on to the reference
* indefinitely.
*/
- drm_dbg_kms(&dev_priv->drm,
+ drm_dbg_kms(display->drm,
"[ENCODER:%d:%s] %s VDD left on by BIOS, adjusting state tracking\n",
dig_port->base.base.base.id, dig_port->base.base.name,
- pps_name(dev_priv, &intel_dp->pps));
- drm_WARN_ON(&dev_priv->drm, intel_dp->pps.vdd_wakeref);
+ pps_name(intel_dp));
+ drm_WARN_ON(display->drm, intel_dp->pps.vdd_wakeref);
intel_dp->pps.vdd_wakeref = intel_display_power_get(dev_priv,
intel_aux_power_domain(dig_port));
}
@@ -1269,7 +1308,7 @@ static void pps_init_timestamps(struct intel_dp *intel_dp)
static void
intel_pps_readout_hw_state(struct intel_dp *intel_dp, struct edp_power_seq *seq)
{
- struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
+ struct intel_display *display = to_intel_display(intel_dp);
u32 pp_on, pp_off, pp_ctl;
struct pps_registers regs;
@@ -1278,11 +1317,11 @@ intel_pps_readout_hw_state(struct intel_dp *intel_dp, struct edp_power_seq *seq)
pp_ctl = ilk_get_pp_control(intel_dp);
/* Ensure PPS is unlocked */
- if (!HAS_DDI(dev_priv))
- intel_de_write(dev_priv, regs.pp_ctrl, pp_ctl);
+ if (!HAS_DDI(display))
+ intel_de_write(display, regs.pp_ctrl, pp_ctl);
- pp_on = intel_de_read(dev_priv, regs.pp_on);
- pp_off = intel_de_read(dev_priv, regs.pp_off);
+ pp_on = intel_de_read(display, regs.pp_on);
+ pp_off = intel_de_read(display, regs.pp_off);
/* Pull timing values out of registers */
seq->t1_t3 = REG_FIELD_GET(PANEL_POWER_UP_DELAY_MASK, pp_on);
@@ -1293,7 +1332,7 @@ intel_pps_readout_hw_state(struct intel_dp *intel_dp, struct edp_power_seq *seq)
if (i915_mmio_reg_valid(regs.pp_div)) {
u32 pp_div;
- pp_div = intel_de_read(dev_priv, regs.pp_div);
+ pp_div = intel_de_read(display, regs.pp_div);
seq->t11_t12 = REG_FIELD_GET(PANEL_POWER_CYCLE_DELAY_MASK, pp_div) * 1000;
} else {
@@ -1305,9 +1344,10 @@ static void
intel_pps_dump_state(struct intel_dp *intel_dp, const char *state_name,
const struct edp_power_seq *seq)
{
- struct drm_i915_private *i915 = dp_to_i915(intel_dp);
+ struct intel_display *display = to_intel_display(intel_dp);
- drm_dbg_kms(&i915->drm, "%s t1_t3 %d t8 %d t9 %d t10 %d t11_t12 %d\n",
+ drm_dbg_kms(display->drm,
+ "%s t1_t3 %d t8 %d t9 %d t10 %d t11_t12 %d\n",
state_name,
seq->t1_t3, seq->t8, seq->t9, seq->t10, seq->t11_t12);
}
@@ -1315,7 +1355,7 @@ intel_pps_dump_state(struct intel_dp *intel_dp, const char *state_name,
static void
intel_pps_verify_state(struct intel_dp *intel_dp)
{
- struct drm_i915_private *i915 = dp_to_i915(intel_dp);
+ struct intel_display *display = to_intel_display(intel_dp);
struct edp_power_seq hw;
struct edp_power_seq *sw = &intel_dp->pps.pps_delays;
@@ -1323,7 +1363,7 @@ intel_pps_verify_state(struct intel_dp *intel_dp)
if (hw.t1_t3 != sw->t1_t3 || hw.t8 != sw->t8 || hw.t9 != sw->t9 ||
hw.t10 != sw->t10 || hw.t11_t12 != sw->t11_t12) {
- drm_err(&i915->drm, "PPS state mismatch\n");
+ drm_err(display->drm, "PPS state mismatch\n");
intel_pps_dump_state(intel_dp, "sw", sw);
intel_pps_dump_state(intel_dp, "hw", &hw);
}
@@ -1338,9 +1378,9 @@ static bool pps_delays_valid(struct edp_power_seq *delays)
static void pps_init_delays_bios(struct intel_dp *intel_dp,
struct edp_power_seq *bios)
{
- struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
+ struct intel_display *display = to_intel_display(intel_dp);
- lockdep_assert_held(&dev_priv->display.pps.mutex);
+ lockdep_assert_held(&display->pps.mutex);
if (!pps_delays_valid(&intel_dp->pps.bios_pps_delays))
intel_pps_readout_hw_state(intel_dp, &intel_dp->pps.bios_pps_delays);
@@ -1385,9 +1425,9 @@ static void pps_init_delays_vbt(struct intel_dp *intel_dp,
static void pps_init_delays_spec(struct intel_dp *intel_dp,
struct edp_power_seq *spec)
{
- struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
+ struct intel_display *display = to_intel_display(intel_dp);
- lockdep_assert_held(&dev_priv->display.pps.mutex);
+ lockdep_assert_held(&display->pps.mutex);
/* Upper limits from eDP 1.3 spec. Note that we use the clunky units of
* our hw here, which are all in 100usec. */
@@ -1406,11 +1446,11 @@ static void pps_init_delays_spec(struct intel_dp *intel_dp,
static void pps_init_delays(struct intel_dp *intel_dp)
{
- struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
+ struct intel_display *display = to_intel_display(intel_dp);
struct edp_power_seq cur, vbt, spec,
*final = &intel_dp->pps.pps_delays;
- lockdep_assert_held(&dev_priv->display.pps.mutex);
+ lockdep_assert_held(&display->pps.mutex);
/* already initialized? */
if (pps_delays_valid(final))
@@ -1440,13 +1480,13 @@ static void pps_init_delays(struct intel_dp *intel_dp)
intel_dp->pps.panel_power_cycle_delay = get_delay(t11_t12);
#undef get_delay
- drm_dbg_kms(&dev_priv->drm,
+ drm_dbg_kms(display->drm,
"panel power up delay %d, power down delay %d, power cycle delay %d\n",
intel_dp->pps.panel_power_up_delay,
intel_dp->pps.panel_power_down_delay,
intel_dp->pps.panel_power_cycle_delay);
- drm_dbg_kms(&dev_priv->drm, "backlight on delay %d, off delay %d\n",
+ drm_dbg_kms(display->drm, "backlight on delay %d, off delay %d\n",
intel_dp->pps.backlight_on_delay,
intel_dp->pps.backlight_off_delay);
@@ -1469,14 +1509,15 @@ static void pps_init_delays(struct intel_dp *intel_dp)
static void pps_init_registers(struct intel_dp *intel_dp, bool force_disable_vdd)
{
- struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
+ struct intel_display *display = to_intel_display(intel_dp);
+ struct drm_i915_private *dev_priv = to_i915(display->drm);
u32 pp_on, pp_off, port_sel = 0;
- int div = RUNTIME_INFO(dev_priv)->rawclk_freq / 1000;
+ int div = DISPLAY_RUNTIME_INFO(display)->rawclk_freq / 1000;
struct pps_registers regs;
enum port port = dp_to_dig_port(intel_dp)->base.port;
const struct edp_power_seq *seq = &intel_dp->pps.pps_delays;
- lockdep_assert_held(&dev_priv->display.pps.mutex);
+ lockdep_assert_held(&display->pps.mutex);
intel_pps_get_registers(intel_dp, &regs);
@@ -1495,16 +1536,16 @@ static void pps_init_registers(struct intel_dp *intel_dp, bool force_disable_vdd
if (force_disable_vdd) {
u32 pp = ilk_get_pp_control(intel_dp);
- drm_WARN(&dev_priv->drm, pp & PANEL_POWER_ON,
+ drm_WARN(display->drm, pp & PANEL_POWER_ON,
"Panel power already on\n");
if (pp & EDP_FORCE_VDD)
- drm_dbg_kms(&dev_priv->drm,
+ drm_dbg_kms(display->drm,
"VDD already on, disabling first\n");
pp &= ~EDP_FORCE_VDD;
- intel_de_write(dev_priv, regs.pp_ctrl, pp);
+ intel_de_write(display, regs.pp_ctrl, pp);
}
pp_on = REG_FIELD_PREP(PANEL_POWER_UP_DELAY_MASK, seq->t1_t3) |
@@ -1535,32 +1576,33 @@ static void pps_init_registers(struct intel_dp *intel_dp, bool force_disable_vdd
pp_on |= port_sel;
- intel_de_write(dev_priv, regs.pp_on, pp_on);
- intel_de_write(dev_priv, regs.pp_off, pp_off);
+ intel_de_write(display, regs.pp_on, pp_on);
+ intel_de_write(display, regs.pp_off, pp_off);
/*
* Compute the divisor for the pp clock, simply match the Bspec formula.
*/
if (i915_mmio_reg_valid(regs.pp_div))
- intel_de_write(dev_priv, regs.pp_div,
+ intel_de_write(display, regs.pp_div,
REG_FIELD_PREP(PP_REFERENCE_DIVIDER_MASK, (100 * div) / 2 - 1) | REG_FIELD_PREP(PANEL_POWER_CYCLE_DELAY_MASK, DIV_ROUND_UP(seq->t11_t12, 1000)));
else
- intel_de_rmw(dev_priv, regs.pp_ctrl, BXT_POWER_CYCLE_DELAY_MASK,
+ intel_de_rmw(display, regs.pp_ctrl, BXT_POWER_CYCLE_DELAY_MASK,
REG_FIELD_PREP(BXT_POWER_CYCLE_DELAY_MASK,
DIV_ROUND_UP(seq->t11_t12, 1000)));
- drm_dbg_kms(&dev_priv->drm,
+ drm_dbg_kms(display->drm,
"panel power sequencer register settings: PP_ON %#x, PP_OFF %#x, PP_DIV %#x\n",
- intel_de_read(dev_priv, regs.pp_on),
- intel_de_read(dev_priv, regs.pp_off),
+ intel_de_read(display, regs.pp_on),
+ intel_de_read(display, regs.pp_off),
i915_mmio_reg_valid(regs.pp_div) ?
- intel_de_read(dev_priv, regs.pp_div) :
- (intel_de_read(dev_priv, regs.pp_ctrl) & BXT_POWER_CYCLE_DELAY_MASK));
+ intel_de_read(display, regs.pp_div) :
+ (intel_de_read(display, regs.pp_ctrl) & BXT_POWER_CYCLE_DELAY_MASK));
}
void intel_pps_encoder_reset(struct intel_dp *intel_dp)
{
- struct drm_i915_private *i915 = dp_to_i915(intel_dp);
+ struct intel_display *display = to_intel_display(intel_dp);
+ struct drm_i915_private *i915 = to_i915(display->drm);
intel_wakeref_t wakeref;
if (!intel_dp_is_edp(intel_dp))
@@ -1606,17 +1648,19 @@ bool intel_pps_init(struct intel_dp *intel_dp)
static void pps_init_late(struct intel_dp *intel_dp)
{
- struct drm_i915_private *i915 = dp_to_i915(intel_dp);
+ struct intel_display *display = to_intel_display(intel_dp);
+ struct drm_i915_private *i915 = to_i915(display->drm);
struct intel_encoder *encoder = &dp_to_dig_port(intel_dp)->base;
struct intel_connector *connector = intel_dp->attached_connector;
if (IS_VALLEYVIEW(i915) || IS_CHERRYVIEW(i915))
return;
- if (intel_num_pps(i915) < 2)
+ if (intel_num_pps(display) < 2)
return;
- drm_WARN(&i915->drm, connector->panel.vbt.backlight.controller >= 0 &&
+ drm_WARN(display->drm,
+ connector->panel.vbt.backlight.controller >= 0 &&
intel_dp->pps.pps_idx != connector->panel.vbt.backlight.controller,
"[ENCODER:%d:%s] power sequencer mismatch: %d (initial) vs. %d (VBT)\n",
encoder->base.base.id, encoder->base.name,
@@ -1645,32 +1689,34 @@ void intel_pps_init_late(struct intel_dp *intel_dp)
}
}
-void intel_pps_unlock_regs_wa(struct drm_i915_private *dev_priv)
+void intel_pps_unlock_regs_wa(struct intel_display *display)
{
int pps_num;
int pps_idx;
- if (!HAS_DISPLAY(dev_priv) || HAS_DDI(dev_priv))
+ if (!HAS_DISPLAY(display) || HAS_DDI(display))
return;
/*
* This w/a is needed at least on CPT/PPT, but to be sure apply it
* everywhere where registers can be write protected.
*/
- pps_num = intel_num_pps(dev_priv);
+ pps_num = intel_num_pps(display);
for (pps_idx = 0; pps_idx < pps_num; pps_idx++)
- intel_de_rmw(dev_priv, PP_CONTROL(dev_priv, pps_idx),
+ intel_de_rmw(display, PP_CONTROL(display, pps_idx),
PANEL_UNLOCK_MASK, PANEL_UNLOCK_REGS);
}
-void intel_pps_setup(struct drm_i915_private *i915)
+void intel_pps_setup(struct intel_display *display)
{
+ struct drm_i915_private *i915 = to_i915(display->drm);
+
if (HAS_PCH_SPLIT(i915) || IS_GEMINILAKE(i915) || IS_BROXTON(i915))
- i915->display.pps.mmio_base = PCH_PPS_BASE;
+ display->pps.mmio_base = PCH_PPS_BASE;
else if (IS_VALLEYVIEW(i915) || IS_CHERRYVIEW(i915))
- i915->display.pps.mmio_base = VLV_PPS_BASE;
+ display->pps.mmio_base = VLV_PPS_BASE;
else
- i915->display.pps.mmio_base = PPS_BASE;
+ display->pps.mmio_base = PPS_BASE;
}
static int intel_pps_show(struct seq_file *m, void *data)
@@ -1704,21 +1750,23 @@ void intel_pps_connector_debugfs_add(struct intel_connector *connector)
connector, &intel_pps_fops);
}
-void assert_pps_unlocked(struct drm_i915_private *dev_priv, enum pipe pipe)
+void assert_pps_unlocked(struct intel_display *display, enum pipe pipe)
{
+ struct drm_i915_private *dev_priv = to_i915(display->drm);
i915_reg_t pp_reg;
u32 val;
enum pipe panel_pipe = INVALID_PIPE;
bool locked = true;
- if (drm_WARN_ON(&dev_priv->drm, HAS_DDI(dev_priv)))
+ if (drm_WARN_ON(display->drm, HAS_DDI(display)))
return;
if (HAS_PCH_SPLIT(dev_priv)) {
u32 port_sel;
- pp_reg = PP_CONTROL(dev_priv, 0);
- port_sel = intel_de_read(dev_priv, PP_ON_DELAYS(dev_priv, 0)) & PANEL_PORT_SELECT_MASK;
+ pp_reg = PP_CONTROL(display, 0);
+ port_sel = intel_de_read(display, PP_ON_DELAYS(display, 0)) &
+ PANEL_PORT_SELECT_MASK;
switch (port_sel) {
case PANEL_PORT_SELECT_LVDS:
@@ -1739,20 +1787,21 @@ void assert_pps_unlocked(struct drm_i915_private *dev_priv, enum pipe pipe)
}
} else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
/* presumably write lock depends on pipe, not port select */
- pp_reg = PP_CONTROL(dev_priv, pipe);
+ pp_reg = PP_CONTROL(display, pipe);
panel_pipe = pipe;
} else {
u32 port_sel;
- pp_reg = PP_CONTROL(dev_priv, 0);
- port_sel = intel_de_read(dev_priv, PP_ON_DELAYS(dev_priv, 0)) & PANEL_PORT_SELECT_MASK;
+ pp_reg = PP_CONTROL(display, 0);
+ port_sel = intel_de_read(display, PP_ON_DELAYS(display, 0)) &
+ PANEL_PORT_SELECT_MASK;
- drm_WARN_ON(&dev_priv->drm,
+ drm_WARN_ON(display->drm,
port_sel != PANEL_PORT_SELECT_LVDS);
intel_lvds_port_enabled(dev_priv, LVDS, &panel_pipe);
}
- val = intel_de_read(dev_priv, pp_reg);
+ val = intel_de_read(display, pp_reg);
if (!(val & PANEL_POWER_ON) ||
((val & PANEL_UNLOCK_MASK) == PANEL_UNLOCK_REGS))
locked = false;
diff --git a/drivers/gpu/drm/i915/display/intel_pps.h b/drivers/gpu/drm/i915/display/intel_pps.h
index 07ef96ca8da2..0c5da83a559e 100644
--- a/drivers/gpu/drm/i915/display/intel_pps.h
+++ b/drivers/gpu/drm/i915/display/intel_pps.h
@@ -11,9 +11,9 @@
#include "intel_wakeref.h"
enum pipe;
-struct drm_i915_private;
struct intel_connector;
struct intel_crtc_state;
+struct intel_display;
struct intel_dp;
struct intel_encoder;
@@ -43,16 +43,16 @@ void intel_pps_wait_power_cycle(struct intel_dp *intel_dp);
bool intel_pps_init(struct intel_dp *intel_dp);
void intel_pps_init_late(struct intel_dp *intel_dp);
void intel_pps_encoder_reset(struct intel_dp *intel_dp);
-void intel_pps_reset_all(struct drm_i915_private *i915);
+void intel_pps_reset_all(struct intel_display *display);
void vlv_pps_init(struct intel_encoder *encoder,
const struct intel_crtc_state *crtc_state);
-void intel_pps_unlock_regs_wa(struct drm_i915_private *i915);
-void intel_pps_setup(struct drm_i915_private *i915);
+void intel_pps_unlock_regs_wa(struct intel_display *display);
+void intel_pps_setup(struct intel_display *display);
void intel_pps_connector_debugfs_add(struct intel_connector *connector);
-void assert_pps_unlocked(struct drm_i915_private *i915, enum pipe pipe);
+void assert_pps_unlocked(struct intel_display *display, enum pipe pipe);
#endif /* __INTEL_PPS_H__ */
diff --git a/drivers/gpu/drm/i915/display/intel_psr.c b/drivers/gpu/drm/i915/display/intel_psr.c
index 9cb1cdaaeefa..136a0d6ca970 100644
--- a/drivers/gpu/drm/i915/display/intel_psr.c
+++ b/drivers/gpu/drm/i915/display/intel_psr.c
@@ -203,16 +203,35 @@ bool intel_encoder_can_psr(struct intel_encoder *encoder)
return false;
}
+bool intel_psr_needs_aux_io_power(struct intel_encoder *encoder,
+ const struct intel_crtc_state *crtc_state)
+{
+ /*
+ * For PSR/PR modes only eDP requires the AUX IO power to be enabled whenever
+ * the output is enabled. For non-eDP outputs the main link is always
+ * on, hence it doesn't require the HW initiated AUX wake-up signaling used
+ * for eDP.
+ *
+ * TODO:
+ * - Consider leaving AUX IO disabled for eDP / PR as well, in case
+ * the ALPM with main-link off mode is not enabled.
+ * - Leave AUX IO enabled for DP / PR, once support for ALPM with
+ * main-link off mode is added for it and this mode gets enabled.
+ */
+ return intel_crtc_has_type(crtc_state, INTEL_OUTPUT_EDP) &&
+ intel_encoder_can_psr(encoder);
+}
+
static bool psr_global_enabled(struct intel_dp *intel_dp)
{
+ struct intel_display *display = to_intel_display(intel_dp);
struct intel_connector *connector = intel_dp->attached_connector;
- struct drm_i915_private *i915 = dp_to_i915(intel_dp);
switch (intel_dp->psr.debug & I915_PSR_DEBUG_MODE_MASK) {
case I915_PSR_DEBUG_DEFAULT:
- if (i915->display.params.enable_psr == -1)
+ if (display->params.enable_psr == -1)
return connector->panel.vbt.psr.enable;
- return i915->display.params.enable_psr;
+ return display->params.enable_psr;
case I915_PSR_DEBUG_DISABLE:
return false;
default:
@@ -222,14 +241,14 @@ static bool psr_global_enabled(struct intel_dp *intel_dp)
static bool psr2_global_enabled(struct intel_dp *intel_dp)
{
- struct drm_i915_private *i915 = dp_to_i915(intel_dp);
+ struct intel_display *display = to_intel_display(intel_dp);
switch (intel_dp->psr.debug & I915_PSR_DEBUG_MODE_MASK) {
case I915_PSR_DEBUG_DISABLE:
case I915_PSR_DEBUG_FORCE_PSR1:
return false;
default:
- if (i915->display.params.enable_psr == 1)
+ if (display->params.enable_psr == 1)
return false;
return true;
}
@@ -237,9 +256,9 @@ static bool psr2_global_enabled(struct intel_dp *intel_dp)
static bool psr2_su_region_et_global_enabled(struct intel_dp *intel_dp)
{
- struct drm_i915_private *i915 = dp_to_i915(intel_dp);
+ struct intel_display *display = to_intel_display(intel_dp);
- if (i915->display.params.enable_psr != -1)
+ if (display->params.enable_psr != -1)
return false;
return true;
@@ -247,9 +266,9 @@ static bool psr2_su_region_et_global_enabled(struct intel_dp *intel_dp)
static bool panel_replay_global_enabled(struct intel_dp *intel_dp)
{
- struct drm_i915_private *i915 = dp_to_i915(intel_dp);
+ struct intel_display *display = to_intel_display(intel_dp);
- if ((i915->display.params.enable_psr != -1) ||
+ if ((display->params.enable_psr != -1) ||
(intel_dp->psr.debug & I915_PSR_DEBUG_PANEL_REPLAY_DISABLE))
return false;
return true;
@@ -257,111 +276,111 @@ static bool panel_replay_global_enabled(struct intel_dp *intel_dp)
static u32 psr_irq_psr_error_bit_get(struct intel_dp *intel_dp)
{
- struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
+ struct intel_display *display = to_intel_display(intel_dp);
- return DISPLAY_VER(dev_priv) >= 12 ? TGL_PSR_ERROR :
+ return DISPLAY_VER(display) >= 12 ? TGL_PSR_ERROR :
EDP_PSR_ERROR(intel_dp->psr.transcoder);
}
static u32 psr_irq_post_exit_bit_get(struct intel_dp *intel_dp)
{
- struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
+ struct intel_display *display = to_intel_display(intel_dp);
- return DISPLAY_VER(dev_priv) >= 12 ? TGL_PSR_POST_EXIT :
+ return DISPLAY_VER(display) >= 12 ? TGL_PSR_POST_EXIT :
EDP_PSR_POST_EXIT(intel_dp->psr.transcoder);
}
static u32 psr_irq_pre_entry_bit_get(struct intel_dp *intel_dp)
{
- struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
+ struct intel_display *display = to_intel_display(intel_dp);
- return DISPLAY_VER(dev_priv) >= 12 ? TGL_PSR_PRE_ENTRY :
+ return DISPLAY_VER(display) >= 12 ? TGL_PSR_PRE_ENTRY :
EDP_PSR_PRE_ENTRY(intel_dp->psr.transcoder);
}
static u32 psr_irq_mask_get(struct intel_dp *intel_dp)
{
- struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
+ struct intel_display *display = to_intel_display(intel_dp);
- return DISPLAY_VER(dev_priv) >= 12 ? TGL_PSR_MASK :
+ return DISPLAY_VER(display) >= 12 ? TGL_PSR_MASK :
EDP_PSR_MASK(intel_dp->psr.transcoder);
}
-static i915_reg_t psr_ctl_reg(struct drm_i915_private *dev_priv,
+static i915_reg_t psr_ctl_reg(struct intel_display *display,
enum transcoder cpu_transcoder)
{
- if (DISPLAY_VER(dev_priv) >= 8)
- return EDP_PSR_CTL(dev_priv, cpu_transcoder);
+ if (DISPLAY_VER(display) >= 8)
+ return EDP_PSR_CTL(display, cpu_transcoder);
else
return HSW_SRD_CTL;
}
-static i915_reg_t psr_debug_reg(struct drm_i915_private *dev_priv,
+static i915_reg_t psr_debug_reg(struct intel_display *display,
enum transcoder cpu_transcoder)
{
- if (DISPLAY_VER(dev_priv) >= 8)
- return EDP_PSR_DEBUG(dev_priv, cpu_transcoder);
+ if (DISPLAY_VER(display) >= 8)
+ return EDP_PSR_DEBUG(display, cpu_transcoder);
else
return HSW_SRD_DEBUG;
}
-static i915_reg_t psr_perf_cnt_reg(struct drm_i915_private *dev_priv,
+static i915_reg_t psr_perf_cnt_reg(struct intel_display *display,
enum transcoder cpu_transcoder)
{
- if (DISPLAY_VER(dev_priv) >= 8)
- return EDP_PSR_PERF_CNT(dev_priv, cpu_transcoder);
+ if (DISPLAY_VER(display) >= 8)
+ return EDP_PSR_PERF_CNT(display, cpu_transcoder);
else
return HSW_SRD_PERF_CNT;
}
-static i915_reg_t psr_status_reg(struct drm_i915_private *dev_priv,
+static i915_reg_t psr_status_reg(struct intel_display *display,
enum transcoder cpu_transcoder)
{
- if (DISPLAY_VER(dev_priv) >= 8)
- return EDP_PSR_STATUS(dev_priv, cpu_transcoder);
+ if (DISPLAY_VER(display) >= 8)
+ return EDP_PSR_STATUS(display, cpu_transcoder);
else
return HSW_SRD_STATUS;
}
-static i915_reg_t psr_imr_reg(struct drm_i915_private *dev_priv,
+static i915_reg_t psr_imr_reg(struct intel_display *display,
enum transcoder cpu_transcoder)
{
- if (DISPLAY_VER(dev_priv) >= 12)
- return TRANS_PSR_IMR(dev_priv, cpu_transcoder);
+ if (DISPLAY_VER(display) >= 12)
+ return TRANS_PSR_IMR(display, cpu_transcoder);
else
return EDP_PSR_IMR;
}
-static i915_reg_t psr_iir_reg(struct drm_i915_private *dev_priv,
+static i915_reg_t psr_iir_reg(struct intel_display *display,
enum transcoder cpu_transcoder)
{
- if (DISPLAY_VER(dev_priv) >= 12)
- return TRANS_PSR_IIR(dev_priv, cpu_transcoder);
+ if (DISPLAY_VER(display) >= 12)
+ return TRANS_PSR_IIR(display, cpu_transcoder);
else
return EDP_PSR_IIR;
}
-static i915_reg_t psr_aux_ctl_reg(struct drm_i915_private *dev_priv,
+static i915_reg_t psr_aux_ctl_reg(struct intel_display *display,
enum transcoder cpu_transcoder)
{
- if (DISPLAY_VER(dev_priv) >= 8)
- return EDP_PSR_AUX_CTL(dev_priv, cpu_transcoder);
+ if (DISPLAY_VER(display) >= 8)
+ return EDP_PSR_AUX_CTL(display, cpu_transcoder);
else
return HSW_SRD_AUX_CTL;
}
-static i915_reg_t psr_aux_data_reg(struct drm_i915_private *dev_priv,
+static i915_reg_t psr_aux_data_reg(struct intel_display *display,
enum transcoder cpu_transcoder, int i)
{
- if (DISPLAY_VER(dev_priv) >= 8)
- return EDP_PSR_AUX_DATA(dev_priv, cpu_transcoder, i);
+ if (DISPLAY_VER(display) >= 8)
+ return EDP_PSR_AUX_DATA(display, cpu_transcoder, i);
else
return HSW_SRD_AUX_DATA(i);
}
static void psr_irq_control(struct intel_dp *intel_dp)
{
- struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
+ struct intel_display *display = to_intel_display(intel_dp);
enum transcoder cpu_transcoder = intel_dp->psr.transcoder;
u32 mask;
@@ -373,80 +392,81 @@ static void psr_irq_control(struct intel_dp *intel_dp)
mask |= psr_irq_post_exit_bit_get(intel_dp) |
psr_irq_pre_entry_bit_get(intel_dp);
- intel_de_rmw(dev_priv, psr_imr_reg(dev_priv, cpu_transcoder),
+ intel_de_rmw(display, psr_imr_reg(display, cpu_transcoder),
psr_irq_mask_get(intel_dp), ~mask);
}
-static void psr_event_print(struct drm_i915_private *i915,
+static void psr_event_print(struct intel_display *display,
u32 val, bool sel_update_enabled)
{
- drm_dbg_kms(&i915->drm, "PSR exit events: 0x%x\n", val);
+ drm_dbg_kms(display->drm, "PSR exit events: 0x%x\n", val);
if (val & PSR_EVENT_PSR2_WD_TIMER_EXPIRE)
- drm_dbg_kms(&i915->drm, "\tPSR2 watchdog timer expired\n");
+ drm_dbg_kms(display->drm, "\tPSR2 watchdog timer expired\n");
if ((val & PSR_EVENT_PSR2_DISABLED) && sel_update_enabled)
- drm_dbg_kms(&i915->drm, "\tPSR2 disabled\n");
+ drm_dbg_kms(display->drm, "\tPSR2 disabled\n");
if (val & PSR_EVENT_SU_DIRTY_FIFO_UNDERRUN)
- drm_dbg_kms(&i915->drm, "\tSU dirty FIFO underrun\n");
+ drm_dbg_kms(display->drm, "\tSU dirty FIFO underrun\n");
if (val & PSR_EVENT_SU_CRC_FIFO_UNDERRUN)
- drm_dbg_kms(&i915->drm, "\tSU CRC FIFO underrun\n");
+ drm_dbg_kms(display->drm, "\tSU CRC FIFO underrun\n");
if (val & PSR_EVENT_GRAPHICS_RESET)
- drm_dbg_kms(&i915->drm, "\tGraphics reset\n");
+ drm_dbg_kms(display->drm, "\tGraphics reset\n");
if (val & PSR_EVENT_PCH_INTERRUPT)
- drm_dbg_kms(&i915->drm, "\tPCH interrupt\n");
+ drm_dbg_kms(display->drm, "\tPCH interrupt\n");
if (val & PSR_EVENT_MEMORY_UP)
- drm_dbg_kms(&i915->drm, "\tMemory up\n");
+ drm_dbg_kms(display->drm, "\tMemory up\n");
if (val & PSR_EVENT_FRONT_BUFFER_MODIFY)
- drm_dbg_kms(&i915->drm, "\tFront buffer modification\n");
+ drm_dbg_kms(display->drm, "\tFront buffer modification\n");
if (val & PSR_EVENT_WD_TIMER_EXPIRE)
- drm_dbg_kms(&i915->drm, "\tPSR watchdog timer expired\n");
+ drm_dbg_kms(display->drm, "\tPSR watchdog timer expired\n");
if (val & PSR_EVENT_PIPE_REGISTERS_UPDATE)
- drm_dbg_kms(&i915->drm, "\tPIPE registers updated\n");
+ drm_dbg_kms(display->drm, "\tPIPE registers updated\n");
if (val & PSR_EVENT_REGISTER_UPDATE)
- drm_dbg_kms(&i915->drm, "\tRegister updated\n");
+ drm_dbg_kms(display->drm, "\tRegister updated\n");
if (val & PSR_EVENT_HDCP_ENABLE)
- drm_dbg_kms(&i915->drm, "\tHDCP enabled\n");
+ drm_dbg_kms(display->drm, "\tHDCP enabled\n");
if (val & PSR_EVENT_KVMR_SESSION_ENABLE)
- drm_dbg_kms(&i915->drm, "\tKVMR session enabled\n");
+ drm_dbg_kms(display->drm, "\tKVMR session enabled\n");
if (val & PSR_EVENT_VBI_ENABLE)
- drm_dbg_kms(&i915->drm, "\tVBI enabled\n");
+ drm_dbg_kms(display->drm, "\tVBI enabled\n");
if (val & PSR_EVENT_LPSP_MODE_EXIT)
- drm_dbg_kms(&i915->drm, "\tLPSP mode exited\n");
+ drm_dbg_kms(display->drm, "\tLPSP mode exited\n");
if ((val & PSR_EVENT_PSR_DISABLE) && !sel_update_enabled)
- drm_dbg_kms(&i915->drm, "\tPSR disabled\n");
+ drm_dbg_kms(display->drm, "\tPSR disabled\n");
}
void intel_psr_irq_handler(struct intel_dp *intel_dp, u32 psr_iir)
{
- struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
+ struct intel_display *display = to_intel_display(intel_dp);
+ struct drm_i915_private *dev_priv = to_i915(display->drm);
enum transcoder cpu_transcoder = intel_dp->psr.transcoder;
ktime_t time_ns = ktime_get();
if (psr_iir & psr_irq_pre_entry_bit_get(intel_dp)) {
intel_dp->psr.last_entry_attempt = time_ns;
- drm_dbg_kms(&dev_priv->drm,
+ drm_dbg_kms(display->drm,
"[transcoder %s] PSR entry attempt in 2 vblanks\n",
transcoder_name(cpu_transcoder));
}
if (psr_iir & psr_irq_post_exit_bit_get(intel_dp)) {
intel_dp->psr.last_exit = time_ns;
- drm_dbg_kms(&dev_priv->drm,
+ drm_dbg_kms(display->drm,
"[transcoder %s] PSR exit completed\n",
transcoder_name(cpu_transcoder));
- if (DISPLAY_VER(dev_priv) >= 9) {
+ if (DISPLAY_VER(display) >= 9) {
u32 val;
val = intel_de_rmw(dev_priv,
PSR_EVENT(dev_priv, cpu_transcoder),
0, 0);
- psr_event_print(dev_priv, val, intel_dp->psr.sel_update_enabled);
+ psr_event_print(display, val, intel_dp->psr.sel_update_enabled);
}
}
if (psr_iir & psr_irq_psr_error_bit_get(intel_dp)) {
- drm_warn(&dev_priv->drm, "[transcoder %s] PSR aux error\n",
+ drm_warn(display->drm, "[transcoder %s] PSR aux error\n",
transcoder_name(cpu_transcoder));
intel_dp->psr.irq_aux_error = true;
@@ -459,7 +479,7 @@ void intel_psr_irq_handler(struct intel_dp *intel_dp, u32 psr_iir)
* again so we don't care about unmask the interruption
* or unset irq_aux_error.
*/
- intel_de_rmw(dev_priv, psr_imr_reg(dev_priv, cpu_transcoder),
+ intel_de_rmw(display, psr_imr_reg(display, cpu_transcoder),
0, psr_irq_psr_error_bit_get(intel_dp));
queue_work(dev_priv->unordered_wq, &intel_dp->psr.work);
@@ -468,14 +488,14 @@ void intel_psr_irq_handler(struct intel_dp *intel_dp, u32 psr_iir)
static u8 intel_dp_get_sink_sync_latency(struct intel_dp *intel_dp)
{
- struct drm_i915_private *i915 = dp_to_i915(intel_dp);
+ struct intel_display *display = to_intel_display(intel_dp);
u8 val = 8; /* assume the worst if we can't read the value */
if (drm_dp_dpcd_readb(&intel_dp->aux,
DP_SYNCHRONIZATION_LATENCY_IN_SINK, &val) == 1)
val &= DP_MAX_RESYNC_FRAME_COUNT_MASK;
else
- drm_dbg_kms(&i915->drm,
+ drm_dbg_kms(display->drm,
"Unable to get sink synchronization latency, assuming 8 frames\n");
return val;
}
@@ -516,7 +536,7 @@ intel_dp_get_su_y_granularity_offset(struct intel_dp *intel_dp)
*/
static void intel_dp_get_su_granularity(struct intel_dp *intel_dp)
{
- struct drm_i915_private *i915 = dp_to_i915(intel_dp);
+ struct intel_display *display = to_intel_display(intel_dp);
ssize_t r;
u16 w;
u8 y;
@@ -542,7 +562,7 @@ static void intel_dp_get_su_granularity(struct intel_dp *intel_dp)
intel_dp_get_su_x_granularity_offset(intel_dp),
&w, 2);
if (r != 2)
- drm_dbg_kms(&i915->drm,
+ drm_dbg_kms(display->drm,
"Unable to read selective update x granularity\n");
/*
* Spec says that if the value read is 0 the default granularity should
@@ -555,7 +575,7 @@ static void intel_dp_get_su_granularity(struct intel_dp *intel_dp)
intel_dp_get_su_y_granularity_offset(intel_dp),
&y, 1);
if (r != 1) {
- drm_dbg_kms(&i915->drm,
+ drm_dbg_kms(display->drm,
"Unable to read selective update y granularity\n");
y = 4;
}
@@ -569,17 +589,17 @@ exit:
static void _panel_replay_init_dpcd(struct intel_dp *intel_dp)
{
- struct drm_i915_private *i915 = dp_to_i915(intel_dp);
+ struct intel_display *display = to_intel_display(intel_dp);
if (intel_dp_is_edp(intel_dp)) {
if (!intel_alpm_aux_less_wake_supported(intel_dp)) {
- drm_dbg_kms(&i915->drm,
+ drm_dbg_kms(display->drm,
"Panel doesn't support AUX-less ALPM, eDP Panel Replay not possible\n");
return;
}
if (!(intel_dp->pr_dpcd & DP_PANEL_REPLAY_EARLY_TRANSPORT_SUPPORT)) {
- drm_dbg_kms(&i915->drm,
+ drm_dbg_kms(display->drm,
"Panel doesn't support early transport, eDP Panel Replay not possible\n");
return;
}
@@ -590,7 +610,7 @@ static void _panel_replay_init_dpcd(struct intel_dp *intel_dp)
if (intel_dp->pr_dpcd & DP_PANEL_REPLAY_SU_SUPPORT)
intel_dp->psr.sink_panel_replay_su_support = true;
- drm_dbg_kms(&i915->drm,
+ drm_dbg_kms(display->drm,
"Panel replay %sis supported by panel\n",
intel_dp->psr.sink_panel_replay_su_support ?
"selective_update " : "");
@@ -598,20 +618,19 @@ static void _panel_replay_init_dpcd(struct intel_dp *intel_dp)
static void _psr_init_dpcd(struct intel_dp *intel_dp)
{
- struct drm_i915_private *i915 =
- to_i915(dp_to_dig_port(intel_dp)->base.base.dev);
+ struct intel_display *display = to_intel_display(intel_dp);
- drm_dbg_kms(&i915->drm, "eDP panel supports PSR version %x\n",
+ drm_dbg_kms(display->drm, "eDP panel supports PSR version %x\n",
intel_dp->psr_dpcd[0]);
if (drm_dp_has_quirk(&intel_dp->desc, DP_DPCD_QUIRK_NO_PSR)) {
- drm_dbg_kms(&i915->drm,
+ drm_dbg_kms(display->drm,
"PSR support not currently available for this panel\n");
return;
}
if (!(intel_dp->edp_dpcd[1] & DP_EDP_SET_POWER_CAP)) {
- drm_dbg_kms(&i915->drm,
+ drm_dbg_kms(display->drm,
"Panel lacks power state control, PSR cannot be enabled\n");
return;
}
@@ -620,7 +639,7 @@ static void _psr_init_dpcd(struct intel_dp *intel_dp)
intel_dp->psr.sink_sync_latency =
intel_dp_get_sink_sync_latency(intel_dp);
- if (DISPLAY_VER(i915) >= 9 &&
+ if (DISPLAY_VER(display) >= 9 &&
intel_dp->psr_dpcd[0] >= DP_PSR2_WITH_Y_COORD_IS_SUPPORTED) {
bool y_req = intel_dp->psr_dpcd[1] &
DP_PSR2_SU_Y_COORDINATE_REQUIRED;
@@ -638,7 +657,7 @@ static void _psr_init_dpcd(struct intel_dp *intel_dp)
*/
intel_dp->psr.sink_psr2_support = y_req &&
intel_alpm_aux_wake_supported(intel_dp);
- drm_dbg_kms(&i915->drm, "PSR2 %ssupported\n",
+ drm_dbg_kms(display->drm, "PSR2 %ssupported\n",
intel_dp->psr.sink_psr2_support ? "" : "not ");
}
}
@@ -663,7 +682,8 @@ void intel_psr_init_dpcd(struct intel_dp *intel_dp)
static void hsw_psr_setup_aux(struct intel_dp *intel_dp)
{
- struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
+ struct intel_display *display = to_intel_display(intel_dp);
+ struct drm_i915_private *dev_priv = to_i915(display->drm);
enum transcoder cpu_transcoder = intel_dp->psr.transcoder;
u32 aux_clock_divider, aux_ctl;
/* write DP_SET_POWER=D0 */
@@ -679,7 +699,7 @@ static void hsw_psr_setup_aux(struct intel_dp *intel_dp)
BUILD_BUG_ON(sizeof(aux_msg) > 20);
for (i = 0; i < sizeof(aux_msg); i += 4)
intel_de_write(dev_priv,
- psr_aux_data_reg(dev_priv, cpu_transcoder, i >> 2),
+ psr_aux_data_reg(display, cpu_transcoder, i >> 2),
intel_dp_aux_pack(&aux_msg[i], sizeof(aux_msg) - i));
aux_clock_divider = intel_dp->get_aux_clock_divider(intel_dp, 0);
@@ -694,15 +714,15 @@ static void hsw_psr_setup_aux(struct intel_dp *intel_dp)
EDP_PSR_AUX_CTL_PRECHARGE_2US_MASK |
EDP_PSR_AUX_CTL_BIT_CLOCK_2X_MASK;
- intel_de_write(dev_priv, psr_aux_ctl_reg(dev_priv, cpu_transcoder),
+ intel_de_write(display, psr_aux_ctl_reg(display, cpu_transcoder),
aux_ctl);
}
static bool psr2_su_region_et_valid(struct intel_dp *intel_dp, bool panel_replay)
{
- struct drm_i915_private *i915 = dp_to_i915(intel_dp);
+ struct intel_display *display = to_intel_display(intel_dp);
- if (DISPLAY_VER(i915) < 20 || !intel_dp_is_edp(intel_dp) ||
+ if (DISPLAY_VER(display) < 20 || !intel_dp_is_edp(intel_dp) ||
intel_dp->psr.debug & I915_PSR_DEBUG_SU_REGION_ET_DISABLE)
return false;
@@ -741,7 +761,7 @@ static void _panel_replay_enable_sink(struct intel_dp *intel_dp,
static void _psr_enable_sink(struct intel_dp *intel_dp,
const struct intel_crtc_state *crtc_state)
{
- struct drm_i915_private *i915 = dp_to_i915(intel_dp);
+ struct intel_display *display = to_intel_display(intel_dp);
u8 val = DP_PSR_ENABLE;
if (crtc_state->has_sel_update) {
@@ -750,7 +770,7 @@ static void _psr_enable_sink(struct intel_dp *intel_dp,
if (intel_dp->psr.link_standby)
val |= DP_PSR_MAIN_LINK_ACTIVE;
- if (DISPLAY_VER(i915) >= 8)
+ if (DISPLAY_VER(display) >= 8)
val |= DP_PSR_CRC_VERIFICATION;
}
@@ -802,14 +822,15 @@ void intel_psr_enable_sink(struct intel_dp *intel_dp,
static u32 intel_psr1_get_tp_time(struct intel_dp *intel_dp)
{
+ struct intel_display *display = to_intel_display(intel_dp);
struct intel_connector *connector = intel_dp->attached_connector;
- struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
+ struct drm_i915_private *dev_priv = to_i915(display->drm);
u32 val = 0;
- if (DISPLAY_VER(dev_priv) >= 11)
+ if (DISPLAY_VER(display) >= 11)
val |= EDP_PSR_TP4_TIME_0us;
- if (dev_priv->display.params.psr_safest_params) {
+ if (display->params.psr_safest_params) {
val |= EDP_PSR_TP1_TIME_2500us;
val |= EDP_PSR_TP2_TP3_TIME_2500us;
goto check_tp3_sel;
@@ -854,8 +875,8 @@ check_tp3_sel:
static u8 psr_compute_idle_frames(struct intel_dp *intel_dp)
{
+ struct intel_display *display = to_intel_display(intel_dp);
struct intel_connector *connector = intel_dp->attached_connector;
- struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
int idle_frames;
/* Let's use 6 as the minimum to cover all known cases including the
@@ -864,7 +885,7 @@ static u8 psr_compute_idle_frames(struct intel_dp *intel_dp)
idle_frames = max(6, connector->panel.vbt.psr.idle_frames);
idle_frames = max(idle_frames, intel_dp->psr.sink_sync_latency + 1);
- if (drm_WARN_ON(&dev_priv->drm, idle_frames > 0xf))
+ if (drm_WARN_ON(display->drm, idle_frames > 0xf))
idle_frames = 0xf;
return idle_frames;
@@ -872,14 +893,15 @@ static u8 psr_compute_idle_frames(struct intel_dp *intel_dp)
static void hsw_activate_psr1(struct intel_dp *intel_dp)
{
- struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
+ struct intel_display *display = to_intel_display(intel_dp);
+ struct drm_i915_private *dev_priv = to_i915(display->drm);
enum transcoder cpu_transcoder = intel_dp->psr.transcoder;
u32 max_sleep_time = 0x1f;
u32 val = EDP_PSR_ENABLE;
val |= EDP_PSR_IDLE_FRAMES(psr_compute_idle_frames(intel_dp));
- if (DISPLAY_VER(dev_priv) < 20)
+ if (DISPLAY_VER(display) < 20)
val |= EDP_PSR_MAX_SLEEP_TIME(max_sleep_time);
if (IS_HASWELL(dev_priv))
@@ -890,23 +912,23 @@ static void hsw_activate_psr1(struct intel_dp *intel_dp)
val |= intel_psr1_get_tp_time(intel_dp);
- if (DISPLAY_VER(dev_priv) >= 8)
+ if (DISPLAY_VER(display) >= 8)
val |= EDP_PSR_CRC_ENABLE;
- if (DISPLAY_VER(dev_priv) >= 20)
+ if (DISPLAY_VER(display) >= 20)
val |= LNL_EDP_PSR_ENTRY_SETUP_FRAMES(intel_dp->psr.entry_setup_frames);
- intel_de_rmw(dev_priv, psr_ctl_reg(dev_priv, cpu_transcoder),
+ intel_de_rmw(display, psr_ctl_reg(display, cpu_transcoder),
~EDP_PSR_RESTORE_PSR_ACTIVE_CTX_MASK, val);
}
static u32 intel_psr2_get_tp_time(struct intel_dp *intel_dp)
{
+ struct intel_display *display = to_intel_display(intel_dp);
struct intel_connector *connector = intel_dp->attached_connector;
- struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
u32 val = 0;
- if (dev_priv->display.params.psr_safest_params)
+ if (display->params.psr_safest_params)
return EDP_PSR2_TP2_TIME_2500us;
if (connector->panel.vbt.psr.psr2_tp2_tp3_wakeup_time_us >= 0 &&
@@ -950,7 +972,7 @@ static u8 frames_before_su_entry(struct intel_dp *intel_dp)
static void dg2_activate_panel_replay(struct intel_dp *intel_dp)
{
- struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
+ struct intel_display *display = to_intel_display(intel_dp);
struct intel_psr *psr = &intel_dp->psr;
enum transcoder cpu_transcoder = intel_dp->psr.transcoder;
@@ -961,38 +983,39 @@ static void dg2_activate_panel_replay(struct intel_dp *intel_dp)
if (intel_dp->psr.req_psr2_sdp_prior_scanline)
val |= EDP_PSR2_SU_SDP_SCANLINE;
- intel_de_write(dev_priv, EDP_PSR2_CTL(dev_priv, cpu_transcoder),
+ intel_de_write(display, EDP_PSR2_CTL(display, cpu_transcoder),
val);
}
- intel_de_rmw(dev_priv,
- PSR2_MAN_TRK_CTL(dev_priv, intel_dp->psr.transcoder),
+ intel_de_rmw(display,
+ PSR2_MAN_TRK_CTL(display, intel_dp->psr.transcoder),
0, ADLP_PSR2_MAN_TRK_CTL_SF_CONTINUOS_FULL_FRAME);
- intel_de_rmw(dev_priv, TRANS_DP2_CTL(intel_dp->psr.transcoder), 0,
+ intel_de_rmw(display, TRANS_DP2_CTL(intel_dp->psr.transcoder), 0,
TRANS_DP2_PANEL_REPLAY_ENABLE);
}
static void hsw_activate_psr2(struct intel_dp *intel_dp)
{
- struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
+ struct intel_display *display = to_intel_display(intel_dp);
+ struct drm_i915_private *dev_priv = to_i915(display->drm);
enum transcoder cpu_transcoder = intel_dp->psr.transcoder;
u32 val = EDP_PSR2_ENABLE;
u32 psr_val = 0;
val |= EDP_PSR2_IDLE_FRAMES(psr_compute_idle_frames(intel_dp));
- if (DISPLAY_VER(dev_priv) < 14 && !IS_ALDERLAKE_P(dev_priv))
+ if (DISPLAY_VER(display) < 14 && !IS_ALDERLAKE_P(dev_priv))
val |= EDP_SU_TRACK_ENABLE;
- if (DISPLAY_VER(dev_priv) >= 10 && DISPLAY_VER(dev_priv) < 13)
+ if (DISPLAY_VER(display) >= 10 && DISPLAY_VER(display) < 13)
val |= EDP_Y_COORDINATE_ENABLE;
val |= EDP_PSR2_FRAME_BEFORE_SU(frames_before_su_entry(intel_dp));
val |= intel_psr2_get_tp_time(intel_dp);
- if (DISPLAY_VER(dev_priv) >= 12 && DISPLAY_VER(dev_priv) < 20) {
+ if (DISPLAY_VER(display) >= 12 && DISPLAY_VER(display) < 20) {
if (psr2_block_count(intel_dp) > 2)
val |= TGL_EDP_PSR2_BLOCK_COUNT_NUM_3;
else
@@ -1000,7 +1023,7 @@ static void hsw_activate_psr2(struct intel_dp *intel_dp)
}
/* Wa_22012278275:adl-p */
- if (IS_ALDERLAKE_P(dev_priv) && IS_DISPLAY_STEP(dev_priv, STEP_A0, STEP_E0)) {
+ if (IS_ALDERLAKE_P(dev_priv) && IS_DISPLAY_STEP(display, STEP_A0, STEP_E0)) {
static const u8 map[] = {
2, /* 5 lines */
1, /* 6 lines */
@@ -1023,12 +1046,12 @@ static void hsw_activate_psr2(struct intel_dp *intel_dp)
tmp = map[intel_dp->alpm_parameters.fast_wake_lines - TGL_EDP_PSR2_FAST_WAKE_MIN_LINES];
val |= TGL_EDP_PSR2_FAST_WAKE(tmp + TGL_EDP_PSR2_FAST_WAKE_MIN_LINES);
- } else if (DISPLAY_VER(dev_priv) >= 20) {
+ } else if (DISPLAY_VER(display) >= 20) {
val |= LNL_EDP_PSR2_IO_BUFFER_WAKE(intel_dp->alpm_parameters.io_wake_lines);
- } else if (DISPLAY_VER(dev_priv) >= 12) {
+ } else if (DISPLAY_VER(display) >= 12) {
val |= TGL_EDP_PSR2_IO_BUFFER_WAKE(intel_dp->alpm_parameters.io_wake_lines);
val |= TGL_EDP_PSR2_FAST_WAKE(intel_dp->alpm_parameters.fast_wake_lines);
- } else if (DISPLAY_VER(dev_priv) >= 9) {
+ } else if (DISPLAY_VER(display) >= 9) {
val |= EDP_PSR2_IO_BUFFER_WAKE(intel_dp->alpm_parameters.io_wake_lines);
val |= EDP_PSR2_FAST_WAKE(intel_dp->alpm_parameters.fast_wake_lines);
}
@@ -1036,18 +1059,18 @@ static void hsw_activate_psr2(struct intel_dp *intel_dp)
if (intel_dp->psr.req_psr2_sdp_prior_scanline)
val |= EDP_PSR2_SU_SDP_SCANLINE;
- if (DISPLAY_VER(dev_priv) >= 20)
+ if (DISPLAY_VER(display) >= 20)
psr_val |= LNL_EDP_PSR_ENTRY_SETUP_FRAMES(intel_dp->psr.entry_setup_frames);
if (intel_dp->psr.psr2_sel_fetch_enabled) {
u32 tmp;
- tmp = intel_de_read(dev_priv,
- PSR2_MAN_TRK_CTL(dev_priv, cpu_transcoder));
- drm_WARN_ON(&dev_priv->drm, !(tmp & PSR2_MAN_TRK_CTL_ENABLE));
- } else if (HAS_PSR2_SEL_FETCH(dev_priv)) {
- intel_de_write(dev_priv,
- PSR2_MAN_TRK_CTL(dev_priv, cpu_transcoder), 0);
+ tmp = intel_de_read(display,
+ PSR2_MAN_TRK_CTL(display, cpu_transcoder));
+ drm_WARN_ON(display->drm, !(tmp & PSR2_MAN_TRK_CTL_ENABLE));
+ } else if (HAS_PSR2_SEL_FETCH(display)) {
+ intel_de_write(display,
+ PSR2_MAN_TRK_CTL(display, cpu_transcoder), 0);
}
if (intel_dp->psr.su_region_et_enabled)
@@ -1057,19 +1080,21 @@ static void hsw_activate_psr2(struct intel_dp *intel_dp)
* PSR2 HW is incorrectly using EDP_PSR_TP1_TP3_SEL and BSpec is
* recommending keep this bit unset while PSR2 is enabled.
*/
- intel_de_write(dev_priv, psr_ctl_reg(dev_priv, cpu_transcoder), psr_val);
+ intel_de_write(display, psr_ctl_reg(display, cpu_transcoder), psr_val);
- intel_de_write(dev_priv, EDP_PSR2_CTL(dev_priv, cpu_transcoder), val);
+ intel_de_write(display, EDP_PSR2_CTL(display, cpu_transcoder), val);
}
static bool
-transcoder_has_psr2(struct drm_i915_private *dev_priv, enum transcoder cpu_transcoder)
+transcoder_has_psr2(struct intel_display *display, enum transcoder cpu_transcoder)
{
- if (IS_ALDERLAKE_P(dev_priv) || DISPLAY_VER(dev_priv) >= 14)
+ struct drm_i915_private *dev_priv = to_i915(display->drm);
+
+ if (IS_ALDERLAKE_P(dev_priv) || DISPLAY_VER(display) >= 14)
return cpu_transcoder == TRANSCODER_A || cpu_transcoder == TRANSCODER_B;
- else if (DISPLAY_VER(dev_priv) >= 12)
+ else if (DISPLAY_VER(display) >= 12)
return cpu_transcoder == TRANSCODER_A;
- else if (DISPLAY_VER(dev_priv) >= 9)
+ else if (DISPLAY_VER(display) >= 9)
return cpu_transcoder == TRANSCODER_EDP;
else
return false;
@@ -1087,17 +1112,18 @@ static u32 intel_get_frame_time_us(const struct intel_crtc_state *crtc_state)
static void psr2_program_idle_frames(struct intel_dp *intel_dp,
u32 idle_frames)
{
- struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
+ struct intel_display *display = to_intel_display(intel_dp);
enum transcoder cpu_transcoder = intel_dp->psr.transcoder;
- intel_de_rmw(dev_priv, EDP_PSR2_CTL(dev_priv, cpu_transcoder),
+ intel_de_rmw(display, EDP_PSR2_CTL(display, cpu_transcoder),
EDP_PSR2_IDLE_FRAMES_MASK,
EDP_PSR2_IDLE_FRAMES(idle_frames));
}
static void tgl_psr2_enable_dc3co(struct intel_dp *intel_dp)
{
- struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
+ struct intel_display *display = to_intel_display(intel_dp);
+ struct drm_i915_private *dev_priv = to_i915(display->drm);
psr2_program_idle_frames(intel_dp, 0);
intel_display_power_set_target_dc_state(dev_priv, DC_STATE_EN_DC3CO);
@@ -1105,7 +1131,8 @@ static void tgl_psr2_enable_dc3co(struct intel_dp *intel_dp)
static void tgl_psr2_disable_dc3co(struct intel_dp *intel_dp)
{
- struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
+ struct intel_display *display = to_intel_display(intel_dp);
+ struct drm_i915_private *dev_priv = to_i915(display->drm);
intel_display_power_set_target_dc_state(dev_priv, DC_STATE_EN_UPTO_DC6);
psr2_program_idle_frames(intel_dp, psr_compute_idle_frames(intel_dp));
@@ -1140,12 +1167,13 @@ static bool
dc3co_is_pipe_port_compatible(struct intel_dp *intel_dp,
struct intel_crtc_state *crtc_state)
{
+ struct intel_display *display = to_intel_display(intel_dp);
struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
enum pipe pipe = to_intel_crtc(crtc_state->uapi.crtc)->pipe;
- struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
+ struct drm_i915_private *dev_priv = to_i915(display->drm);
enum port port = dig_port->base.port;
- if (IS_ALDERLAKE_P(dev_priv) || DISPLAY_VER(dev_priv) >= 14)
+ if (IS_ALDERLAKE_P(dev_priv) || DISPLAY_VER(display) >= 14)
return pipe <= PIPE_B && port <= PORT_B;
else
return pipe == PIPE_A && port == PORT_A;
@@ -1155,9 +1183,10 @@ static void
tgl_dc3co_exitline_compute_config(struct intel_dp *intel_dp,
struct intel_crtc_state *crtc_state)
{
+ struct intel_display *display = to_intel_display(intel_dp);
+ struct drm_i915_private *dev_priv = to_i915(display->drm);
const u32 crtc_vdisplay = crtc_state->uapi.adjusted_mode.crtc_vdisplay;
- struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
- struct i915_power_domains *power_domains = &dev_priv->display.power.domains;
+ struct i915_power_domains *power_domains = &display->power.domains;
u32 exit_scanlines;
/*
@@ -1181,7 +1210,7 @@ tgl_dc3co_exitline_compute_config(struct intel_dp *intel_dp,
return;
/* Wa_16011303918:adl-p */
- if (IS_ALDERLAKE_P(dev_priv) && IS_DISPLAY_STEP(dev_priv, STEP_A0, STEP_B0))
+ if (IS_ALDERLAKE_P(dev_priv) && IS_DISPLAY_STEP(display, STEP_A0, STEP_B0))
return;
/*
@@ -1191,7 +1220,7 @@ tgl_dc3co_exitline_compute_config(struct intel_dp *intel_dp,
exit_scanlines =
intel_usecs_to_scanlines(&crtc_state->uapi.adjusted_mode, 200) + 1;
- if (drm_WARN_ON(&dev_priv->drm, exit_scanlines > crtc_vdisplay))
+ if (drm_WARN_ON(display->drm, exit_scanlines > crtc_vdisplay))
return;
crtc_state->dc3co_exitline = crtc_vdisplay - exit_scanlines;
@@ -1200,17 +1229,17 @@ tgl_dc3co_exitline_compute_config(struct intel_dp *intel_dp,
static bool intel_psr2_sel_fetch_config_valid(struct intel_dp *intel_dp,
struct intel_crtc_state *crtc_state)
{
- struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
+ struct intel_display *display = to_intel_display(intel_dp);
- if (!dev_priv->display.params.enable_psr2_sel_fetch &&
+ if (!display->params.enable_psr2_sel_fetch &&
intel_dp->psr.debug != I915_PSR_DEBUG_ENABLE_SEL_FETCH) {
- drm_dbg_kms(&dev_priv->drm,
+ drm_dbg_kms(display->drm,
"PSR2 sel fetch not enabled, disabled by parameter\n");
return false;
}
if (crtc_state->uapi.async_flip) {
- drm_dbg_kms(&dev_priv->drm,
+ drm_dbg_kms(display->drm,
"PSR2 sel fetch not enabled, async flip enabled\n");
return false;
}
@@ -1221,7 +1250,8 @@ static bool intel_psr2_sel_fetch_config_valid(struct intel_dp *intel_dp,
static bool psr2_granularity_check(struct intel_dp *intel_dp,
struct intel_crtc_state *crtc_state)
{
- struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
+ struct intel_display *display = to_intel_display(intel_dp);
+ struct drm_i915_private *dev_priv = to_i915(display->drm);
const struct drm_dsc_config *vdsc_cfg = &crtc_state->dsc.config;
const int crtc_hdisplay = crtc_state->hw.adjusted_mode.crtc_hdisplay;
const int crtc_vdisplay = crtc_state->hw.adjusted_mode.crtc_vdisplay;
@@ -1243,7 +1273,7 @@ static bool psr2_granularity_check(struct intel_dp *intel_dp,
* For other platforms with SW tracking we can adjust the y coordinates
* to match sink requirement if multiple of 4.
*/
- if (IS_ALDERLAKE_P(dev_priv) || DISPLAY_VER(dev_priv) >= 14)
+ if (IS_ALDERLAKE_P(dev_priv) || DISPLAY_VER(display) >= 14)
y_granularity = intel_dp->psr.su_y_granularity;
else if (intel_dp->psr.su_y_granularity <= 2)
y_granularity = 4;
@@ -1264,8 +1294,8 @@ static bool psr2_granularity_check(struct intel_dp *intel_dp,
static bool _compute_psr2_sdp_prior_scanline_indication(struct intel_dp *intel_dp,
struct intel_crtc_state *crtc_state)
{
+ struct intel_display *display = to_intel_display(intel_dp);
const struct drm_display_mode *adjusted_mode = &crtc_state->uapi.adjusted_mode;
- struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
u32 hblank_total, hblank_ns, req_ns;
hblank_total = adjusted_mode->crtc_hblank_end - adjusted_mode->crtc_hblank_start;
@@ -1278,7 +1308,7 @@ static bool _compute_psr2_sdp_prior_scanline_indication(struct intel_dp *intel_d
return true;
/* Not supported <13 / Wa_22012279113:adl-p */
- if (DISPLAY_VER(dev_priv) < 14 || intel_dp->edp_dpcd[0] < DP_EDP_14b)
+ if (DISPLAY_VER(display) < 14 || intel_dp->edp_dpcd[0] < DP_EDP_14b)
return false;
crtc_state->req_psr2_sdp_prior_scanline = true;
@@ -1288,12 +1318,12 @@ static bool _compute_psr2_sdp_prior_scanline_indication(struct intel_dp *intel_d
static int intel_psr_entry_setup_frames(struct intel_dp *intel_dp,
const struct drm_display_mode *adjusted_mode)
{
- struct drm_i915_private *i915 = dp_to_i915(intel_dp);
+ struct intel_display *display = to_intel_display(intel_dp);
int psr_setup_time = drm_dp_psr_setup_time(intel_dp->psr_dpcd);
int entry_setup_frames = 0;
if (psr_setup_time < 0) {
- drm_dbg_kms(&i915->drm,
+ drm_dbg_kms(display->drm,
"PSR condition failed: Invalid PSR setup time (0x%02x)\n",
intel_dp->psr_dpcd[1]);
return -ETIME;
@@ -1301,14 +1331,14 @@ static int intel_psr_entry_setup_frames(struct intel_dp *intel_dp,
if (intel_usecs_to_scanlines(adjusted_mode, psr_setup_time) >
adjusted_mode->crtc_vtotal - adjusted_mode->crtc_vdisplay - 1) {
- if (DISPLAY_VER(i915) >= 20) {
+ if (DISPLAY_VER(display) >= 20) {
/* setup entry frames can be up to 3 frames */
entry_setup_frames = 1;
- drm_dbg_kms(&i915->drm,
+ drm_dbg_kms(display->drm,
"PSR setup entry frames %d\n",
entry_setup_frames);
} else {
- drm_dbg_kms(&i915->drm,
+ drm_dbg_kms(display->drm,
"PSR condition failed: PSR setup time (%d us) too long\n",
psr_setup_time);
return -ETIME;
@@ -1322,7 +1352,7 @@ static bool wake_lines_fit_into_vblank(struct intel_dp *intel_dp,
const struct intel_crtc_state *crtc_state,
bool aux_less)
{
- struct drm_i915_private *i915 = dp_to_i915(intel_dp);
+ struct intel_display *display = to_intel_display(intel_dp);
int vblank = crtc_state->hw.adjusted_mode.crtc_vblank_end -
crtc_state->hw.adjusted_mode.crtc_vblank_start;
int wake_lines;
@@ -1330,7 +1360,7 @@ static bool wake_lines_fit_into_vblank(struct intel_dp *intel_dp,
if (aux_less)
wake_lines = intel_dp->alpm_parameters.aux_less_wake_lines;
else
- wake_lines = DISPLAY_VER(i915) < 20 ?
+ wake_lines = DISPLAY_VER(display) < 20 ?
psr2_block_count_lines(intel_dp) :
intel_dp->alpm_parameters.io_wake_lines;
@@ -1348,16 +1378,16 @@ static bool alpm_config_valid(struct intel_dp *intel_dp,
const struct intel_crtc_state *crtc_state,
bool aux_less)
{
- struct drm_i915_private *i915 = dp_to_i915(intel_dp);
+ struct intel_display *display = to_intel_display(intel_dp);
if (!intel_alpm_compute_params(intel_dp, crtc_state)) {
- drm_dbg_kms(&i915->drm,
+ drm_dbg_kms(display->drm,
"PSR2/Panel Replay not enabled, Unable to use long enough wake times\n");
return false;
}
if (!wake_lines_fit_into_vblank(intel_dp, crtc_state, aux_less)) {
- drm_dbg_kms(&i915->drm,
+ drm_dbg_kms(display->drm,
"PSR2/Panel Replay not enabled, too short vblank time\n");
return false;
}
@@ -1368,7 +1398,8 @@ static bool alpm_config_valid(struct intel_dp *intel_dp,
static bool intel_psr2_config_valid(struct intel_dp *intel_dp,
struct intel_crtc_state *crtc_state)
{
- struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
+ struct intel_display *display = to_intel_display(intel_dp);
+ struct drm_i915_private *dev_priv = to_i915(display->drm);
int crtc_hdisplay = crtc_state->hw.adjusted_mode.crtc_hdisplay;
int crtc_vdisplay = crtc_state->hw.adjusted_mode.crtc_vdisplay;
int psr_max_h = 0, psr_max_v = 0, max_bpp = 0;
@@ -1378,24 +1409,26 @@ static bool intel_psr2_config_valid(struct intel_dp *intel_dp,
/* JSL and EHL only supports eDP 1.3 */
if (IS_JASPERLAKE(dev_priv) || IS_ELKHARTLAKE(dev_priv)) {
- drm_dbg_kms(&dev_priv->drm, "PSR2 not supported by phy\n");
+ drm_dbg_kms(display->drm, "PSR2 not supported by phy\n");
return false;
}
/* Wa_16011181250 */
if (IS_ROCKETLAKE(dev_priv) || IS_ALDERLAKE_S(dev_priv) ||
IS_DG2(dev_priv)) {
- drm_dbg_kms(&dev_priv->drm, "PSR2 is defeatured for this platform\n");
+ drm_dbg_kms(display->drm,
+ "PSR2 is defeatured for this platform\n");
return false;
}
- if (IS_ALDERLAKE_P(dev_priv) && IS_DISPLAY_STEP(dev_priv, STEP_A0, STEP_B0)) {
- drm_dbg_kms(&dev_priv->drm, "PSR2 not completely functional in this stepping\n");
+ if (IS_ALDERLAKE_P(dev_priv) && IS_DISPLAY_STEP(display, STEP_A0, STEP_B0)) {
+ drm_dbg_kms(display->drm,
+ "PSR2 not completely functional in this stepping\n");
return false;
}
- if (!transcoder_has_psr2(dev_priv, crtc_state->cpu_transcoder)) {
- drm_dbg_kms(&dev_priv->drm,
+ if (!transcoder_has_psr2(display, crtc_state->cpu_transcoder)) {
+ drm_dbg_kms(display->drm,
"PSR2 not supported in transcoder %s\n",
transcoder_name(crtc_state->cpu_transcoder));
return false;
@@ -1407,28 +1440,28 @@ static bool intel_psr2_config_valid(struct intel_dp *intel_dp,
* over PSR2.
*/
if (crtc_state->dsc.compression_enable &&
- (DISPLAY_VER(dev_priv) < 14 && !IS_ALDERLAKE_P(dev_priv))) {
- drm_dbg_kms(&dev_priv->drm,
+ (DISPLAY_VER(display) < 14 && !IS_ALDERLAKE_P(dev_priv))) {
+ drm_dbg_kms(display->drm,
"PSR2 cannot be enabled since DSC is enabled\n");
return false;
}
- if (DISPLAY_VER(dev_priv) >= 12) {
+ if (DISPLAY_VER(display) >= 12) {
psr_max_h = 5120;
psr_max_v = 3200;
max_bpp = 30;
- } else if (DISPLAY_VER(dev_priv) >= 10) {
+ } else if (DISPLAY_VER(display) >= 10) {
psr_max_h = 4096;
psr_max_v = 2304;
max_bpp = 24;
- } else if (DISPLAY_VER(dev_priv) == 9) {
+ } else if (DISPLAY_VER(display) == 9) {
psr_max_h = 3640;
psr_max_v = 2304;
max_bpp = 24;
}
if (crtc_state->pipe_bpp > max_bpp) {
- drm_dbg_kms(&dev_priv->drm,
+ drm_dbg_kms(display->drm,
"PSR2 not enabled, pipe bpp %d > max supported %d\n",
crtc_state->pipe_bpp, max_bpp);
return false;
@@ -1436,8 +1469,8 @@ static bool intel_psr2_config_valid(struct intel_dp *intel_dp,
/* Wa_16011303918:adl-p */
if (crtc_state->vrr.enable &&
- IS_ALDERLAKE_P(dev_priv) && IS_DISPLAY_STEP(dev_priv, STEP_A0, STEP_B0)) {
- drm_dbg_kms(&dev_priv->drm,
+ IS_ALDERLAKE_P(dev_priv) && IS_DISPLAY_STEP(display, STEP_A0, STEP_B0)) {
+ drm_dbg_kms(display->drm,
"PSR2 not enabled, not compatible with HW stepping + VRR\n");
return false;
}
@@ -1447,7 +1480,7 @@ static bool intel_psr2_config_valid(struct intel_dp *intel_dp,
if (!crtc_state->enable_psr2_sel_fetch &&
(crtc_hdisplay > psr_max_h || crtc_vdisplay > psr_max_v)) {
- drm_dbg_kms(&dev_priv->drm,
+ drm_dbg_kms(display->drm,
"PSR2 not enabled, resolution %dx%d > max supported %dx%d\n",
crtc_hdisplay, crtc_vdisplay,
psr_max_h, psr_max_v);
@@ -1462,18 +1495,19 @@ static bool intel_psr2_config_valid(struct intel_dp *intel_dp,
static bool intel_sel_update_config_valid(struct intel_dp *intel_dp,
struct intel_crtc_state *crtc_state)
{
- struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
+ struct intel_display *display = to_intel_display(intel_dp);
- if (HAS_PSR2_SEL_FETCH(dev_priv) &&
+ if (HAS_PSR2_SEL_FETCH(display) &&
!intel_psr2_sel_fetch_config_valid(intel_dp, crtc_state) &&
- !HAS_PSR_HW_TRACKING(dev_priv)) {
- drm_dbg_kms(&dev_priv->drm,
+ !HAS_PSR_HW_TRACKING(display)) {
+ drm_dbg_kms(display->drm,
"Selective update not enabled, selective fetch not valid and no HW tracking available\n");
goto unsupported;
}
if (!psr2_global_enabled(intel_dp)) {
- drm_dbg_kms(&dev_priv->drm, "Selective update disabled by flag\n");
+ drm_dbg_kms(display->drm,
+ "Selective update disabled by flag\n");
goto unsupported;
}
@@ -1481,23 +1515,23 @@ static bool intel_sel_update_config_valid(struct intel_dp *intel_dp,
goto unsupported;
if (!_compute_psr2_sdp_prior_scanline_indication(intel_dp, crtc_state)) {
- drm_dbg_kms(&dev_priv->drm,
+ drm_dbg_kms(display->drm,
"Selective update not enabled, SDP indication do not fit in hblank\n");
goto unsupported;
}
- if (crtc_state->has_panel_replay && (DISPLAY_VER(dev_priv) < 14 ||
+ if (crtc_state->has_panel_replay && (DISPLAY_VER(display) < 14 ||
!intel_dp->psr.sink_panel_replay_su_support))
goto unsupported;
if (crtc_state->crc_enabled) {
- drm_dbg_kms(&dev_priv->drm,
+ drm_dbg_kms(display->drm,
"Selective update not enabled because it would inhibit pipe CRC calculation\n");
goto unsupported;
}
if (!psr2_granularity_check(intel_dp, crtc_state)) {
- drm_dbg_kms(&dev_priv->drm,
+ drm_dbg_kms(display->drm,
"Selective update not enabled, SU granularity not compatible\n");
goto unsupported;
}
@@ -1515,7 +1549,7 @@ unsupported:
static bool _psr_compute_config(struct intel_dp *intel_dp,
struct intel_crtc_state *crtc_state)
{
- struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
+ struct intel_display *display = to_intel_display(intel_dp);
const struct drm_display_mode *adjusted_mode = &crtc_state->hw.adjusted_mode;
int entry_setup_frames;
@@ -1534,7 +1568,7 @@ static bool _psr_compute_config(struct intel_dp *intel_dp,
if (entry_setup_frames >= 0) {
intel_dp->psr.entry_setup_frames = entry_setup_frames;
} else {
- drm_dbg_kms(&dev_priv->drm,
+ drm_dbg_kms(display->drm,
"PSR condition failed: PSR setup timing not met\n");
return false;
}
@@ -1547,7 +1581,7 @@ _panel_replay_compute_config(struct intel_dp *intel_dp,
const struct intel_crtc_state *crtc_state,
const struct drm_connector_state *conn_state)
{
- struct drm_i915_private *i915 = dp_to_i915(intel_dp);
+ struct intel_display *display = to_intel_display(intel_dp);
struct intel_connector *connector =
to_intel_connector(conn_state->connector);
struct intel_hdcp *hdcp = &connector->hdcp;
@@ -1556,7 +1590,7 @@ _panel_replay_compute_config(struct intel_dp *intel_dp,
return false;
if (!panel_replay_global_enabled(intel_dp)) {
- drm_dbg_kms(&i915->drm, "Panel Replay disabled by flag\n");
+ drm_dbg_kms(display->drm, "Panel Replay disabled by flag\n");
return false;
}
@@ -1567,7 +1601,7 @@ _panel_replay_compute_config(struct intel_dp *intel_dp,
/* 128b/132b Panel Replay is not supported on eDP */
if (intel_dp_is_uhbr(crtc_state)) {
- drm_dbg_kms(&i915->drm,
+ drm_dbg_kms(display->drm,
"Panel Replay is not supported with 128b/132b\n");
return false;
}
@@ -1578,7 +1612,7 @@ _panel_replay_compute_config(struct intel_dp *intel_dp,
(conn_state->content_protection ==
DRM_MODE_CONTENT_PROTECTION_ENABLED && hdcp->value ==
DRM_MODE_CONTENT_PROTECTION_UNDESIRED)) {
- drm_dbg_kms(&i915->drm,
+ drm_dbg_kms(display->drm,
"Panel Replay is not supported with HDCP\n");
return false;
}
@@ -1586,6 +1620,12 @@ _panel_replay_compute_config(struct intel_dp *intel_dp,
if (!alpm_config_valid(intel_dp, crtc_state, true))
return false;
+ if (crtc_state->crc_enabled) {
+ drm_dbg_kms(display->drm,
+ "Panel Replay not enabled because it would inhibit pipe CRC calculation\n");
+ return false;
+ }
+
return true;
}
@@ -1593,22 +1633,22 @@ void intel_psr_compute_config(struct intel_dp *intel_dp,
struct intel_crtc_state *crtc_state,
struct drm_connector_state *conn_state)
{
- struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
+ struct intel_display *display = to_intel_display(intel_dp);
const struct drm_display_mode *adjusted_mode = &crtc_state->hw.adjusted_mode;
if (!psr_global_enabled(intel_dp)) {
- drm_dbg_kms(&dev_priv->drm, "PSR disabled by flag\n");
+ drm_dbg_kms(display->drm, "PSR disabled by flag\n");
return;
}
if (intel_dp->psr.sink_not_reliable) {
- drm_dbg_kms(&dev_priv->drm,
+ drm_dbg_kms(display->drm,
"PSR sink implementation is not reliable\n");
return;
}
if (adjusted_mode->flags & DRM_MODE_FLAG_INTERLACE) {
- drm_dbg_kms(&dev_priv->drm,
+ drm_dbg_kms(display->drm,
"PSR condition failed: Interlaced mode enabled\n");
return;
}
@@ -1619,7 +1659,7 @@ void intel_psr_compute_config(struct intel_dp *intel_dp,
* PSR is a transcoder level feature.
*/
if (crtc_state->joiner_pipes) {
- drm_dbg_kms(&dev_priv->drm,
+ drm_dbg_kms(display->drm,
"PSR disabled due to joiner\n");
return;
}
@@ -1640,7 +1680,7 @@ void intel_psr_compute_config(struct intel_dp *intel_dp,
void intel_psr_get_config(struct intel_encoder *encoder,
struct intel_crtc_state *pipe_config)
{
- struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+ struct intel_display *display = to_intel_display(encoder);
struct intel_digital_port *dig_port = enc_to_dig_port(encoder);
enum transcoder cpu_transcoder = pipe_config->cpu_transcoder;
struct intel_dp *intel_dp;
@@ -1673,18 +1713,18 @@ void intel_psr_get_config(struct intel_encoder *encoder,
if (!intel_dp->psr.sel_update_enabled)
goto unlock;
- if (HAS_PSR2_SEL_FETCH(dev_priv)) {
- val = intel_de_read(dev_priv,
- PSR2_MAN_TRK_CTL(dev_priv, cpu_transcoder));
+ if (HAS_PSR2_SEL_FETCH(display)) {
+ val = intel_de_read(display,
+ PSR2_MAN_TRK_CTL(display, cpu_transcoder));
if (val & PSR2_MAN_TRK_CTL_ENABLE)
pipe_config->enable_psr2_sel_fetch = true;
}
pipe_config->enable_psr2_su_region_et = intel_dp->psr.su_region_et_enabled;
- if (DISPLAY_VER(dev_priv) >= 12) {
- val = intel_de_read(dev_priv,
- TRANS_EXITLINE(dev_priv, cpu_transcoder));
+ if (DISPLAY_VER(display) >= 12) {
+ val = intel_de_read(display,
+ TRANS_EXITLINE(display, cpu_transcoder));
pipe_config->dc3co_exitline = REG_FIELD_GET(EXITLINE_MASK, val);
}
unlock:
@@ -1693,17 +1733,17 @@ unlock:
static void intel_psr_activate(struct intel_dp *intel_dp)
{
- struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
+ struct intel_display *display = to_intel_display(intel_dp);
enum transcoder cpu_transcoder = intel_dp->psr.transcoder;
- drm_WARN_ON(&dev_priv->drm,
- transcoder_has_psr2(dev_priv, cpu_transcoder) &&
- intel_de_read(dev_priv, EDP_PSR2_CTL(dev_priv, cpu_transcoder)) & EDP_PSR2_ENABLE);
+ drm_WARN_ON(display->drm,
+ transcoder_has_psr2(display, cpu_transcoder) &&
+ intel_de_read(display, EDP_PSR2_CTL(display, cpu_transcoder)) & EDP_PSR2_ENABLE);
- drm_WARN_ON(&dev_priv->drm,
- intel_de_read(dev_priv, psr_ctl_reg(dev_priv, cpu_transcoder)) & EDP_PSR_ENABLE);
+ drm_WARN_ON(display->drm,
+ intel_de_read(display, psr_ctl_reg(display, cpu_transcoder)) & EDP_PSR_ENABLE);
- drm_WARN_ON(&dev_priv->drm, intel_dp->psr.active);
+ drm_WARN_ON(display->drm, intel_dp->psr.active);
lockdep_assert_held(&intel_dp->psr.lock);
@@ -1742,30 +1782,31 @@ static u32 wa_16013835468_bit_get(struct intel_dp *intel_dp)
static void wm_optimization_wa(struct intel_dp *intel_dp,
const struct intel_crtc_state *crtc_state)
{
- struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
+ struct intel_display *display = to_intel_display(intel_dp);
bool set_wa_bit = false;
/* Wa_14015648006 */
- if (IS_DISPLAY_VER(dev_priv, 11, 14))
+ if (IS_DISPLAY_VER(display, 11, 14))
set_wa_bit |= crtc_state->wm_level_disabled;
/* Wa_16013835468 */
- if (DISPLAY_VER(dev_priv) == 12)
+ if (DISPLAY_VER(display) == 12)
set_wa_bit |= crtc_state->hw.adjusted_mode.crtc_vblank_start !=
crtc_state->hw.adjusted_mode.crtc_vdisplay;
if (set_wa_bit)
- intel_de_rmw(dev_priv, GEN8_CHICKEN_DCPR_1,
+ intel_de_rmw(display, GEN8_CHICKEN_DCPR_1,
0, wa_16013835468_bit_get(intel_dp));
else
- intel_de_rmw(dev_priv, GEN8_CHICKEN_DCPR_1,
+ intel_de_rmw(display, GEN8_CHICKEN_DCPR_1,
wa_16013835468_bit_get(intel_dp), 0);
}
static void intel_psr_enable_source(struct intel_dp *intel_dp,
const struct intel_crtc_state *crtc_state)
{
- struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
+ struct intel_display *display = to_intel_display(intel_dp);
+ struct drm_i915_private *dev_priv = to_i915(display->drm);
enum transcoder cpu_transcoder = intel_dp->psr.transcoder;
u32 mask = 0;
@@ -1773,7 +1814,7 @@ static void intel_psr_enable_source(struct intel_dp *intel_dp,
* Only HSW and BDW have PSR AUX registers that need to be setup.
* SKL+ use hardcoded values PSR AUX transactions
*/
- if (DISPLAY_VER(dev_priv) < 9)
+ if (DISPLAY_VER(display) < 9)
hsw_psr_setup_aux(intel_dp);
/*
@@ -1790,7 +1831,7 @@ static void intel_psr_enable_source(struct intel_dp *intel_dp,
* Panel Replay on DP: No bits are applicable
* Panel Replay on eDP: All bits are applicable
*/
- if (DISPLAY_VER(dev_priv) < 20 || intel_dp_is_edp(intel_dp))
+ if (DISPLAY_VER(display) < 20 || intel_dp_is_edp(intel_dp))
mask = EDP_PSR_DEBUG_MASK_HPD;
if (intel_dp_is_edp(intel_dp)) {
@@ -1804,17 +1845,17 @@ static void intel_psr_enable_source(struct intel_dp *intel_dp,
* As a workaround leave LPSP unmasked to prevent PSR entry
* when external displays are active.
*/
- if (DISPLAY_VER(dev_priv) >= 8 || IS_HASWELL_ULT(dev_priv))
+ if (DISPLAY_VER(display) >= 8 || IS_HASWELL_ULT(dev_priv))
mask |= EDP_PSR_DEBUG_MASK_LPSP;
- if (DISPLAY_VER(dev_priv) < 20)
+ if (DISPLAY_VER(display) < 20)
mask |= EDP_PSR_DEBUG_MASK_MAX_SLEEP;
/*
* No separate pipe reg write mask on hsw/bdw, so have to unmask all
* registers in order to keep the CURSURFLIVE tricks working :(
*/
- if (IS_DISPLAY_VER(dev_priv, 9, 10))
+ if (IS_DISPLAY_VER(display, 9, 10))
mask |= EDP_PSR_DEBUG_MASK_DISP_REG_WRITE;
/* allow PSR with sprite enabled */
@@ -1822,7 +1863,7 @@ static void intel_psr_enable_source(struct intel_dp *intel_dp,
mask |= EDP_PSR_DEBUG_MASK_SPRITE_ENABLE;
}
- intel_de_write(dev_priv, psr_debug_reg(dev_priv, cpu_transcoder), mask);
+ intel_de_write(display, psr_debug_reg(display, cpu_transcoder), mask);
psr_irq_control(intel_dp);
@@ -1831,13 +1872,13 @@ static void intel_psr_enable_source(struct intel_dp *intel_dp,
* transcoder, EXITLINE will need to be unset when disabling PSR
*/
if (intel_dp->psr.dc3co_exitline)
- intel_de_rmw(dev_priv,
- TRANS_EXITLINE(dev_priv, cpu_transcoder),
+ intel_de_rmw(display,
+ TRANS_EXITLINE(display, cpu_transcoder),
EXITLINE_MASK,
intel_dp->psr.dc3co_exitline << EXITLINE_SHIFT | EXITLINE_ENABLE);
- if (HAS_PSR_HW_TRACKING(dev_priv) && HAS_PSR2_SEL_FETCH(dev_priv))
- intel_de_rmw(dev_priv, CHICKEN_PAR1_1, IGNORE_PSR2_HW_TRACKING,
+ if (HAS_PSR_HW_TRACKING(display) && HAS_PSR2_SEL_FETCH(display))
+ intel_de_rmw(display, CHICKEN_PAR1_1, IGNORE_PSR2_HW_TRACKING,
intel_dp->psr.psr2_sel_fetch_enabled ?
IGNORE_PSR2_HW_TRACKING : 0);
@@ -1851,8 +1892,8 @@ static void intel_psr_enable_source(struct intel_dp *intel_dp,
wm_optimization_wa(intel_dp, crtc_state);
if (intel_dp->psr.sel_update_enabled) {
- if (DISPLAY_VER(dev_priv) == 9)
- intel_de_rmw(dev_priv, CHICKEN_TRANS(cpu_transcoder), 0,
+ if (DISPLAY_VER(display) == 9)
+ intel_de_rmw(display, CHICKEN_TRANS(cpu_transcoder), 0,
PSR2_VSC_ENABLE_PROG_HEADER |
PSR2_ADD_VERTICAL_LINE_COUNT);
@@ -1862,27 +1903,27 @@ static void intel_psr_enable_source(struct intel_dp *intel_dp,
* cause issues if non-supported panels are used.
*/
if (!intel_dp->psr.panel_replay_enabled &&
- (IS_DISPLAY_IP_STEP(dev_priv, IP_VER(14, 0), STEP_A0, STEP_B0) ||
+ (IS_DISPLAY_VER_STEP(display, IP_VER(14, 0), STEP_A0, STEP_B0) ||
IS_ALDERLAKE_P(dev_priv)))
- intel_de_rmw(dev_priv, hsw_chicken_trans_reg(dev_priv, cpu_transcoder),
+ intel_de_rmw(display, hsw_chicken_trans_reg(dev_priv, cpu_transcoder),
0, ADLP_1_BASED_X_GRANULARITY);
/* Wa_16012604467:adlp,mtl[a0,b0] */
if (!intel_dp->psr.panel_replay_enabled &&
- IS_DISPLAY_IP_STEP(dev_priv, IP_VER(14, 0), STEP_A0, STEP_B0))
- intel_de_rmw(dev_priv,
- MTL_CLKGATE_DIS_TRANS(dev_priv, cpu_transcoder),
+ IS_DISPLAY_VER_STEP(display, IP_VER(14, 0), STEP_A0, STEP_B0))
+ intel_de_rmw(display,
+ MTL_CLKGATE_DIS_TRANS(display, cpu_transcoder),
0,
MTL_CLKGATE_DIS_TRANS_DMASC_GATING_DIS);
else if (IS_ALDERLAKE_P(dev_priv))
- intel_de_rmw(dev_priv, CLKGATE_DIS_MISC, 0,
+ intel_de_rmw(display, CLKGATE_DIS_MISC, 0,
CLKGATE_DIS_MISC_DMASC_GATING_DIS);
}
}
static bool psr_interrupt_error_check(struct intel_dp *intel_dp)
{
- struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
+ struct intel_display *display = to_intel_display(intel_dp);
enum transcoder cpu_transcoder = intel_dp->psr.transcoder;
u32 val;
@@ -1897,11 +1938,11 @@ static bool psr_interrupt_error_check(struct intel_dp *intel_dp)
* first time that PSR HW tries to activate so lets keep PSR disabled
* to avoid any rendering problems.
*/
- val = intel_de_read(dev_priv, psr_iir_reg(dev_priv, cpu_transcoder));
+ val = intel_de_read(display, psr_iir_reg(display, cpu_transcoder));
val &= psr_irq_psr_error_bit_get(intel_dp);
if (val) {
intel_dp->psr.sink_not_reliable = true;
- drm_dbg_kms(&dev_priv->drm,
+ drm_dbg_kms(display->drm,
"PSR interruption error set, not enabling PSR\n");
return false;
}
@@ -1913,11 +1954,11 @@ no_err:
static void intel_psr_enable_locked(struct intel_dp *intel_dp,
const struct intel_crtc_state *crtc_state)
{
+ struct intel_display *display = to_intel_display(intel_dp);
struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
- struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
u32 val;
- drm_WARN_ON(&dev_priv->drm, intel_dp->psr.enabled);
+ drm_WARN_ON(display->drm, intel_dp->psr.enabled);
intel_dp->psr.sel_update_enabled = crtc_state->has_sel_update;
intel_dp->psr.panel_replay_enabled = crtc_state->has_panel_replay;
@@ -1938,9 +1979,9 @@ static void intel_psr_enable_locked(struct intel_dp *intel_dp,
return;
if (intel_dp->psr.panel_replay_enabled) {
- drm_dbg_kms(&dev_priv->drm, "Enabling Panel Replay\n");
+ drm_dbg_kms(display->drm, "Enabling Panel Replay\n");
} else {
- drm_dbg_kms(&dev_priv->drm, "Enabling PSR%s\n",
+ drm_dbg_kms(display->drm, "Enabling PSR%s\n",
intel_dp->psr.sel_update_enabled ? "2" : "1");
/*
@@ -1962,68 +2003,71 @@ static void intel_psr_enable_locked(struct intel_dp *intel_dp,
static void intel_psr_exit(struct intel_dp *intel_dp)
{
- struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
+ struct intel_display *display = to_intel_display(intel_dp);
enum transcoder cpu_transcoder = intel_dp->psr.transcoder;
u32 val;
if (!intel_dp->psr.active) {
- if (transcoder_has_psr2(dev_priv, cpu_transcoder)) {
- val = intel_de_read(dev_priv,
- EDP_PSR2_CTL(dev_priv, cpu_transcoder));
- drm_WARN_ON(&dev_priv->drm, val & EDP_PSR2_ENABLE);
+ if (transcoder_has_psr2(display, cpu_transcoder)) {
+ val = intel_de_read(display,
+ EDP_PSR2_CTL(display, cpu_transcoder));
+ drm_WARN_ON(display->drm, val & EDP_PSR2_ENABLE);
}
- val = intel_de_read(dev_priv, psr_ctl_reg(dev_priv, cpu_transcoder));
- drm_WARN_ON(&dev_priv->drm, val & EDP_PSR_ENABLE);
+ val = intel_de_read(display,
+ psr_ctl_reg(display, cpu_transcoder));
+ drm_WARN_ON(display->drm, val & EDP_PSR_ENABLE);
return;
}
if (intel_dp->psr.panel_replay_enabled) {
- intel_de_rmw(dev_priv, TRANS_DP2_CTL(intel_dp->psr.transcoder),
+ intel_de_rmw(display, TRANS_DP2_CTL(intel_dp->psr.transcoder),
TRANS_DP2_PANEL_REPLAY_ENABLE, 0);
} else if (intel_dp->psr.sel_update_enabled) {
tgl_disallow_dc3co_on_psr2_exit(intel_dp);
- val = intel_de_rmw(dev_priv,
- EDP_PSR2_CTL(dev_priv, cpu_transcoder),
+ val = intel_de_rmw(display,
+ EDP_PSR2_CTL(display, cpu_transcoder),
EDP_PSR2_ENABLE, 0);
- drm_WARN_ON(&dev_priv->drm, !(val & EDP_PSR2_ENABLE));
+ drm_WARN_ON(display->drm, !(val & EDP_PSR2_ENABLE));
} else {
- val = intel_de_rmw(dev_priv, psr_ctl_reg(dev_priv, cpu_transcoder),
+ val = intel_de_rmw(display,
+ psr_ctl_reg(display, cpu_transcoder),
EDP_PSR_ENABLE, 0);
- drm_WARN_ON(&dev_priv->drm, !(val & EDP_PSR_ENABLE));
+ drm_WARN_ON(display->drm, !(val & EDP_PSR_ENABLE));
}
intel_dp->psr.active = false;
}
static void intel_psr_wait_exit_locked(struct intel_dp *intel_dp)
{
- struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
+ struct intel_display *display = to_intel_display(intel_dp);
enum transcoder cpu_transcoder = intel_dp->psr.transcoder;
i915_reg_t psr_status;
u32 psr_status_mask;
if (intel_dp_is_edp(intel_dp) && (intel_dp->psr.sel_update_enabled ||
intel_dp->psr.panel_replay_enabled)) {
- psr_status = EDP_PSR2_STATUS(dev_priv, cpu_transcoder);
+ psr_status = EDP_PSR2_STATUS(display, cpu_transcoder);
psr_status_mask = EDP_PSR2_STATUS_STATE_MASK;
} else {
- psr_status = psr_status_reg(dev_priv, cpu_transcoder);
+ psr_status = psr_status_reg(display, cpu_transcoder);
psr_status_mask = EDP_PSR_STATUS_STATE_MASK;
}
/* Wait till PSR is idle */
- if (intel_de_wait_for_clear(dev_priv, psr_status,
+ if (intel_de_wait_for_clear(display, psr_status,
psr_status_mask, 2000))
- drm_err(&dev_priv->drm, "Timed out waiting PSR idle state\n");
+ drm_err(display->drm, "Timed out waiting PSR idle state\n");
}
static void intel_psr_disable_locked(struct intel_dp *intel_dp)
{
- struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
+ struct intel_display *display = to_intel_display(intel_dp);
+ struct drm_i915_private *dev_priv = to_i915(display->drm);
enum transcoder cpu_transcoder = intel_dp->psr.transcoder;
lockdep_assert_held(&intel_dp->psr.lock);
@@ -2032,9 +2076,9 @@ static void intel_psr_disable_locked(struct intel_dp *intel_dp)
return;
if (intel_dp->psr.panel_replay_enabled)
- drm_dbg_kms(&dev_priv->drm, "Disabling Panel Replay\n");
+ drm_dbg_kms(display->drm, "Disabling Panel Replay\n");
else
- drm_dbg_kms(&dev_priv->drm, "Disabling PSR%s\n",
+ drm_dbg_kms(display->drm, "Disabling PSR%s\n",
intel_dp->psr.sel_update_enabled ? "2" : "1");
intel_psr_exit(intel_dp);
@@ -2044,19 +2088,19 @@ static void intel_psr_disable_locked(struct intel_dp *intel_dp)
* Wa_16013835468
* Wa_14015648006
*/
- if (DISPLAY_VER(dev_priv) >= 11)
- intel_de_rmw(dev_priv, GEN8_CHICKEN_DCPR_1,
+ if (DISPLAY_VER(display) >= 11)
+ intel_de_rmw(display, GEN8_CHICKEN_DCPR_1,
wa_16013835468_bit_get(intel_dp), 0);
if (intel_dp->psr.sel_update_enabled) {
/* Wa_16012604467:adlp,mtl[a0,b0] */
if (!intel_dp->psr.panel_replay_enabled &&
- IS_DISPLAY_IP_STEP(dev_priv, IP_VER(14, 0), STEP_A0, STEP_B0))
- intel_de_rmw(dev_priv,
- MTL_CLKGATE_DIS_TRANS(dev_priv, cpu_transcoder),
+ IS_DISPLAY_VER_STEP(display, IP_VER(14, 0), STEP_A0, STEP_B0))
+ intel_de_rmw(display,
+ MTL_CLKGATE_DIS_TRANS(display, cpu_transcoder),
MTL_CLKGATE_DIS_TRANS_DMASC_GATING_DIS, 0);
else if (IS_ALDERLAKE_P(dev_priv))
- intel_de_rmw(dev_priv, CLKGATE_DIS_MISC,
+ intel_de_rmw(display, CLKGATE_DIS_MISC,
CLKGATE_DIS_MISC_DMASC_GATING_DIS, 0);
}
@@ -2065,12 +2109,12 @@ static void intel_psr_disable_locked(struct intel_dp *intel_dp)
/* Panel Replay on eDP is always using ALPM aux less. */
if (intel_dp->psr.panel_replay_enabled && intel_dp_is_edp(intel_dp)) {
- intel_de_rmw(dev_priv, ALPM_CTL(dev_priv, cpu_transcoder),
+ intel_de_rmw(display, ALPM_CTL(display, cpu_transcoder),
ALPM_CTL_ALPM_ENABLE |
ALPM_CTL_ALPM_AUX_LESS_ENABLE, 0);
- intel_de_rmw(dev_priv,
- PORT_ALPM_CTL(dev_priv, cpu_transcoder),
+ intel_de_rmw(display,
+ PORT_ALPM_CTL(display, cpu_transcoder),
PORT_ALPM_CTL_ALPM_AUX_LESS_ENABLE, 0);
}
@@ -2101,12 +2145,12 @@ static void intel_psr_disable_locked(struct intel_dp *intel_dp)
void intel_psr_disable(struct intel_dp *intel_dp,
const struct intel_crtc_state *old_crtc_state)
{
- struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
+ struct intel_display *display = to_intel_display(intel_dp);
if (!old_crtc_state->has_psr)
return;
- if (drm_WARN_ON(&dev_priv->drm, !CAN_PSR(intel_dp)))
+ if (drm_WARN_ON(display->drm, !CAN_PSR(intel_dp)))
return;
mutex_lock(&intel_dp->psr.lock);
@@ -2126,7 +2170,7 @@ void intel_psr_disable(struct intel_dp *intel_dp,
*/
void intel_psr_pause(struct intel_dp *intel_dp)
{
- struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
+ struct intel_display *display = to_intel_display(intel_dp);
struct intel_psr *psr = &intel_dp->psr;
if (!CAN_PSR(intel_dp) && !CAN_PANEL_REPLAY(intel_dp))
@@ -2140,7 +2184,7 @@ void intel_psr_pause(struct intel_dp *intel_dp)
}
/* If we ever hit this, we will need to add refcount to pause/resume */
- drm_WARN_ON(&dev_priv->drm, psr->paused);
+ drm_WARN_ON(display->drm, psr->paused);
intel_psr_exit(intel_dp);
intel_psr_wait_exit_locked(intel_dp);
@@ -2177,45 +2221,53 @@ unlock:
mutex_unlock(&psr->lock);
}
-static u32 man_trk_ctl_enable_bit_get(struct drm_i915_private *dev_priv)
+static u32 man_trk_ctl_enable_bit_get(struct intel_display *display)
{
- return IS_ALDERLAKE_P(dev_priv) || DISPLAY_VER(dev_priv) >= 14 ? 0 :
+ struct drm_i915_private *dev_priv = to_i915(display->drm);
+
+ return IS_ALDERLAKE_P(dev_priv) || DISPLAY_VER(display) >= 14 ? 0 :
PSR2_MAN_TRK_CTL_ENABLE;
}
-static u32 man_trk_ctl_single_full_frame_bit_get(struct drm_i915_private *dev_priv)
+static u32 man_trk_ctl_single_full_frame_bit_get(struct intel_display *display)
{
- return IS_ALDERLAKE_P(dev_priv) || DISPLAY_VER(dev_priv) >= 14 ?
+ struct drm_i915_private *dev_priv = to_i915(display->drm);
+
+ return IS_ALDERLAKE_P(dev_priv) || DISPLAY_VER(display) >= 14 ?
ADLP_PSR2_MAN_TRK_CTL_SF_SINGLE_FULL_FRAME :
PSR2_MAN_TRK_CTL_SF_SINGLE_FULL_FRAME;
}
-static u32 man_trk_ctl_partial_frame_bit_get(struct drm_i915_private *dev_priv)
+static u32 man_trk_ctl_partial_frame_bit_get(struct intel_display *display)
{
- return IS_ALDERLAKE_P(dev_priv) || DISPLAY_VER(dev_priv) >= 14 ?
+ struct drm_i915_private *dev_priv = to_i915(display->drm);
+
+ return IS_ALDERLAKE_P(dev_priv) || DISPLAY_VER(display) >= 14 ?
ADLP_PSR2_MAN_TRK_CTL_SF_PARTIAL_FRAME_UPDATE :
PSR2_MAN_TRK_CTL_SF_PARTIAL_FRAME_UPDATE;
}
-static u32 man_trk_ctl_continuos_full_frame(struct drm_i915_private *dev_priv)
+static u32 man_trk_ctl_continuos_full_frame(struct intel_display *display)
{
- return IS_ALDERLAKE_P(dev_priv) || DISPLAY_VER(dev_priv) >= 14 ?
+ struct drm_i915_private *dev_priv = to_i915(display->drm);
+
+ return IS_ALDERLAKE_P(dev_priv) || DISPLAY_VER(display) >= 14 ?
ADLP_PSR2_MAN_TRK_CTL_SF_CONTINUOS_FULL_FRAME :
PSR2_MAN_TRK_CTL_SF_CONTINUOS_FULL_FRAME;
}
static void psr_force_hw_tracking_exit(struct intel_dp *intel_dp)
{
- struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
+ struct intel_display *display = to_intel_display(intel_dp);
enum transcoder cpu_transcoder = intel_dp->psr.transcoder;
if (intel_dp->psr.psr2_sel_fetch_enabled)
- intel_de_write(dev_priv,
- PSR2_MAN_TRK_CTL(dev_priv, cpu_transcoder),
- man_trk_ctl_enable_bit_get(dev_priv) |
- man_trk_ctl_partial_frame_bit_get(dev_priv) |
- man_trk_ctl_single_full_frame_bit_get(dev_priv) |
- man_trk_ctl_continuos_full_frame(dev_priv));
+ intel_de_write(display,
+ PSR2_MAN_TRK_CTL(display, cpu_transcoder),
+ man_trk_ctl_enable_bit_get(display) |
+ man_trk_ctl_partial_frame_bit_get(display) |
+ man_trk_ctl_single_full_frame_bit_get(display) |
+ man_trk_ctl_continuos_full_frame(display));
/*
* Display WA #0884: skl+
@@ -2230,20 +2282,20 @@ static void psr_force_hw_tracking_exit(struct intel_dp *intel_dp)
* but testing proved that it works for up display 13, for newer
* than that testing will be needed.
*/
- intel_de_write(dev_priv, CURSURFLIVE(dev_priv, intel_dp->psr.pipe), 0);
+ intel_de_write(display, CURSURFLIVE(display, intel_dp->psr.pipe), 0);
}
void intel_psr2_program_trans_man_trk_ctl(const struct intel_crtc_state *crtc_state)
{
+ struct intel_display *display = to_intel_display(crtc_state);
struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
- struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
enum transcoder cpu_transcoder = crtc_state->cpu_transcoder;
struct intel_encoder *encoder;
if (!crtc_state->enable_psr2_sel_fetch)
return;
- for_each_intel_encoder_mask_with_psr(&dev_priv->drm, encoder,
+ for_each_intel_encoder_mask_with_psr(display->drm, encoder,
crtc_state->uapi.encoder_mask) {
struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
@@ -2253,36 +2305,37 @@ void intel_psr2_program_trans_man_trk_ctl(const struct intel_crtc_state *crtc_st
break;
}
- intel_de_write(dev_priv, PSR2_MAN_TRK_CTL(dev_priv, cpu_transcoder),
+ intel_de_write(display, PSR2_MAN_TRK_CTL(display, cpu_transcoder),
crtc_state->psr2_man_track_ctl);
if (!crtc_state->enable_psr2_su_region_et)
return;
- intel_de_write(dev_priv, PIPE_SRCSZ_ERLY_TPT(crtc->pipe),
+ intel_de_write(display, PIPE_SRCSZ_ERLY_TPT(crtc->pipe),
crtc_state->pipe_srcsz_early_tpt);
}
static void psr2_man_trk_ctl_calc(struct intel_crtc_state *crtc_state,
bool full_update)
{
+ struct intel_display *display = to_intel_display(crtc_state);
struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
- u32 val = man_trk_ctl_enable_bit_get(dev_priv);
+ u32 val = man_trk_ctl_enable_bit_get(display);
/* SF partial frame enable has to be set even on full update */
- val |= man_trk_ctl_partial_frame_bit_get(dev_priv);
+ val |= man_trk_ctl_partial_frame_bit_get(display);
if (full_update) {
- val |= man_trk_ctl_single_full_frame_bit_get(dev_priv);
- val |= man_trk_ctl_continuos_full_frame(dev_priv);
+ val |= man_trk_ctl_single_full_frame_bit_get(display);
+ val |= man_trk_ctl_continuos_full_frame(display);
goto exit;
}
if (crtc_state->psr2_su_area.y1 == -1)
goto exit;
- if (IS_ALDERLAKE_P(dev_priv) || DISPLAY_VER(dev_priv) >= 14) {
+ if (IS_ALDERLAKE_P(dev_priv) || DISPLAY_VER(display) >= 14) {
val |= ADLP_PSR2_MAN_TRK_CTL_SU_REGION_START_ADDR(crtc_state->psr2_su_area.y1);
val |= ADLP_PSR2_MAN_TRK_CTL_SU_REGION_END_ADDR(crtc_state->psr2_su_area.y2 - 1);
} else {
@@ -2335,13 +2388,14 @@ static void clip_area_update(struct drm_rect *overlap_damage_area,
static void intel_psr2_sel_fetch_pipe_alignment(struct intel_crtc_state *crtc_state)
{
+ struct intel_display *display = to_intel_display(crtc_state);
struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
const struct drm_dsc_config *vdsc_cfg = &crtc_state->dsc.config;
u16 y_alignment;
/* ADLP aligns the SU region to vdsc slice height in case dsc is enabled */
if (crtc_state->dsc.compression_enable &&
- (IS_ALDERLAKE_P(dev_priv) || DISPLAY_VER(dev_priv) >= 14))
+ (IS_ALDERLAKE_P(dev_priv) || DISPLAY_VER(display) >= 14))
y_alignment = vdsc_cfg->slice_height;
else
y_alignment = crtc_state->su_y_granularity;
@@ -2429,6 +2483,7 @@ static bool psr2_sel_fetch_pipe_state_supported(const struct intel_crtc_state *c
int intel_psr2_sel_fetch_update(struct intel_atomic_state *state,
struct intel_crtc *crtc)
{
+ struct intel_display *display = to_intel_display(state);
struct drm_i915_private *dev_priv = to_i915(state->base.dev);
struct intel_crtc_state *crtc_state = intel_atomic_get_new_crtc_state(state, crtc);
struct intel_plane_state *new_plane_state, *old_plane_state;
@@ -2525,7 +2580,7 @@ int intel_psr2_sel_fetch_update(struct intel_atomic_state *state,
* calculation for those.
*/
if (crtc_state->psr2_su_area.y1 == -1) {
- drm_info_once(&dev_priv->drm,
+ drm_info_once(display->drm,
"Selective fetch area calculation failed in pipe %c\n",
pipe_name(crtc->pipe));
full_update = true;
@@ -2536,7 +2591,7 @@ int intel_psr2_sel_fetch_update(struct intel_atomic_state *state,
/* Wa_14014971492 */
if (!crtc_state->has_panel_replay &&
- ((IS_DISPLAY_IP_STEP(dev_priv, IP_VER(14, 0), STEP_A0, STEP_B0) ||
+ ((IS_DISPLAY_VER_STEP(display, IP_VER(14, 0), STEP_A0, STEP_B0) ||
IS_ALDERLAKE_P(dev_priv) || IS_TIGERLAKE(dev_priv))) &&
crtc_state->splitter.enable)
crtc_state->psr2_su_area.y1 = 0;
@@ -2622,6 +2677,7 @@ skip_sel_fetch_set_loop:
void intel_psr_pre_plane_update(struct intel_atomic_state *state,
struct intel_crtc *crtc)
{
+ struct intel_display *display = to_intel_display(state);
struct drm_i915_private *i915 = to_i915(state->base.dev);
const struct intel_crtc_state *old_crtc_state =
intel_atomic_get_old_crtc_state(state, crtc);
@@ -2629,7 +2685,7 @@ void intel_psr_pre_plane_update(struct intel_atomic_state *state,
intel_atomic_get_new_crtc_state(state, crtc);
struct intel_encoder *encoder;
- if (!HAS_PSR(i915))
+ if (!HAS_PSR(display))
return;
for_each_intel_encoder_mask_with_psr(state->base.dev, encoder,
@@ -2670,7 +2726,7 @@ void intel_psr_pre_plane_update(struct intel_atomic_state *state,
void intel_psr_post_plane_update(struct intel_atomic_state *state,
struct intel_crtc *crtc)
{
- struct drm_i915_private *dev_priv = to_i915(state->base.dev);
+ struct intel_display *display = to_intel_display(state);
const struct intel_crtc_state *crtc_state =
intel_atomic_get_new_crtc_state(state, crtc);
struct intel_encoder *encoder;
@@ -2686,13 +2742,14 @@ void intel_psr_post_plane_update(struct intel_atomic_state *state,
mutex_lock(&psr->lock);
- drm_WARN_ON(&dev_priv->drm, psr->enabled && !crtc_state->active_planes);
+ drm_WARN_ON(display->drm,
+ psr->enabled && !crtc_state->active_planes);
keep_disabled |= psr->sink_not_reliable;
keep_disabled |= !crtc_state->active_planes;
/* Display WA #1136: skl, bxt */
- keep_disabled |= DISPLAY_VER(dev_priv) < 11 &&
+ keep_disabled |= DISPLAY_VER(display) < 11 &&
crtc_state->wm_level_disabled;
if (!psr->enabled && !keep_disabled)
@@ -2717,7 +2774,7 @@ void intel_psr_post_plane_update(struct intel_atomic_state *state,
static int _psr2_ready_for_pipe_update_locked(struct intel_dp *intel_dp)
{
- struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
+ struct intel_display *display = to_intel_display(intel_dp);
enum transcoder cpu_transcoder = intel_dp->psr.transcoder;
/*
@@ -2725,14 +2782,14 @@ static int _psr2_ready_for_pipe_update_locked(struct intel_dp *intel_dp)
* As all higher states has bit 4 of PSR2 state set we can just wait for
* EDP_PSR2_STATUS_STATE_DEEP_SLEEP to be cleared.
*/
- return intel_de_wait_for_clear(dev_priv,
- EDP_PSR2_STATUS(dev_priv, cpu_transcoder),
+ return intel_de_wait_for_clear(display,
+ EDP_PSR2_STATUS(display, cpu_transcoder),
EDP_PSR2_STATUS_STATE_DEEP_SLEEP, 50);
}
static int _psr1_ready_for_pipe_update_locked(struct intel_dp *intel_dp)
{
- struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
+ struct intel_display *display = to_intel_display(intel_dp);
enum transcoder cpu_transcoder = intel_dp->psr.transcoder;
/*
@@ -2741,18 +2798,11 @@ static int _psr1_ready_for_pipe_update_locked(struct intel_dp *intel_dp)
* exit training time + 1.5 ms of aux channel handshake. 50 ms is
* defensive enough to cover everything.
*/
- return intel_de_wait_for_clear(dev_priv,
- psr_status_reg(dev_priv, cpu_transcoder),
+ return intel_de_wait_for_clear(display,
+ psr_status_reg(display, cpu_transcoder),
EDP_PSR_STATUS_STATE_MASK, 50);
}
-static int _panel_replay_ready_for_pipe_update_locked(struct intel_dp *intel_dp)
-{
- return intel_dp_is_edp(intel_dp) ?
- _psr2_ready_for_pipe_update_locked(intel_dp) :
- _psr1_ready_for_pipe_update_locked(intel_dp);
-}
-
/**
* intel_psr_wait_for_idle_locked - wait for PSR be ready for a pipe update
* @new_crtc_state: new CRTC state
@@ -2762,37 +2812,36 @@ static int _panel_replay_ready_for_pipe_update_locked(struct intel_dp *intel_dp)
*/
void intel_psr_wait_for_idle_locked(const struct intel_crtc_state *new_crtc_state)
{
- struct drm_i915_private *dev_priv = to_i915(new_crtc_state->uapi.crtc->dev);
+ struct intel_display *display = to_intel_display(new_crtc_state);
struct intel_encoder *encoder;
if (!new_crtc_state->has_psr)
return;
- for_each_intel_encoder_mask_with_psr(&dev_priv->drm, encoder,
+ for_each_intel_encoder_mask_with_psr(display->drm, encoder,
new_crtc_state->uapi.encoder_mask) {
struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
int ret;
lockdep_assert_held(&intel_dp->psr.lock);
- if (!intel_dp->psr.enabled)
+ if (!intel_dp->psr.enabled || intel_dp->psr.panel_replay_enabled)
continue;
- if (intel_dp->psr.panel_replay_enabled)
- ret = _panel_replay_ready_for_pipe_update_locked(intel_dp);
- else if (intel_dp->psr.sel_update_enabled)
+ if (intel_dp->psr.sel_update_enabled)
ret = _psr2_ready_for_pipe_update_locked(intel_dp);
else
ret = _psr1_ready_for_pipe_update_locked(intel_dp);
if (ret)
- drm_err(&dev_priv->drm, "PSR wait timed out, atomic update may fail\n");
+ drm_err(display->drm,
+ "PSR wait timed out, atomic update may fail\n");
}
}
static bool __psr_wait_for_idle_locked(struct intel_dp *intel_dp)
{
- struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
+ struct intel_display *display = to_intel_display(intel_dp);
enum transcoder cpu_transcoder = intel_dp->psr.transcoder;
i915_reg_t reg;
u32 mask;
@@ -2803,18 +2852,18 @@ static bool __psr_wait_for_idle_locked(struct intel_dp *intel_dp)
if (intel_dp_is_edp(intel_dp) && (intel_dp->psr.sel_update_enabled ||
intel_dp->psr.panel_replay_enabled)) {
- reg = EDP_PSR2_STATUS(dev_priv, cpu_transcoder);
+ reg = EDP_PSR2_STATUS(display, cpu_transcoder);
mask = EDP_PSR2_STATUS_STATE_MASK;
} else {
- reg = psr_status_reg(dev_priv, cpu_transcoder);
+ reg = psr_status_reg(display, cpu_transcoder);
mask = EDP_PSR_STATUS_STATE_MASK;
}
mutex_unlock(&intel_dp->psr.lock);
- err = intel_de_wait_for_clear(dev_priv, reg, mask, 50);
+ err = intel_de_wait_for_clear(display, reg, mask, 50);
if (err)
- drm_err(&dev_priv->drm,
+ drm_err(display->drm,
"Timed out waiting for PSR Idle for re-enable\n");
/* After the unlocked wait, verify that PSR is still wanted! */
@@ -2822,7 +2871,7 @@ static bool __psr_wait_for_idle_locked(struct intel_dp *intel_dp)
return err == 0 && intel_dp->psr.enabled;
}
-static int intel_psr_fastset_force(struct drm_i915_private *dev_priv)
+static int intel_psr_fastset_force(struct intel_display *display)
{
struct drm_connector_list_iter conn_iter;
struct drm_modeset_acquire_ctx ctx;
@@ -2830,7 +2879,7 @@ static int intel_psr_fastset_force(struct drm_i915_private *dev_priv)
struct drm_connector *conn;
int err = 0;
- state = drm_atomic_state_alloc(&dev_priv->drm);
+ state = drm_atomic_state_alloc(display->drm);
if (!state)
return -ENOMEM;
@@ -2840,7 +2889,7 @@ static int intel_psr_fastset_force(struct drm_i915_private *dev_priv)
to_intel_atomic_state(state)->internal = true;
retry:
- drm_connector_list_iter_begin(&dev_priv->drm, &conn_iter);
+ drm_connector_list_iter_begin(display->drm, &conn_iter);
drm_for_each_connector_iter(conn, &conn_iter) {
struct drm_connector_state *conn_state;
struct drm_crtc_state *crtc_state;
@@ -2887,7 +2936,7 @@ retry:
int intel_psr_debug_set(struct intel_dp *intel_dp, u64 val)
{
- struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
+ struct intel_display *display = to_intel_display(intel_dp);
const u32 mode = val & I915_PSR_DEBUG_MODE_MASK;
const u32 disable_bits = val & (I915_PSR_DEBUG_SU_REGION_ET_DISABLE |
I915_PSR_DEBUG_PANEL_REPLAY_DISABLE);
@@ -2898,7 +2947,7 @@ int intel_psr_debug_set(struct intel_dp *intel_dp, u64 val)
I915_PSR_DEBUG_PANEL_REPLAY_DISABLE |
I915_PSR_DEBUG_MODE_MASK) ||
mode > I915_PSR_DEBUG_ENABLE_SEL_FETCH) {
- drm_dbg_kms(&dev_priv->drm, "Invalid debug mask %llx\n", val);
+ drm_dbg_kms(display->drm, "Invalid debug mask %llx\n", val);
return -EINVAL;
}
@@ -2923,7 +2972,7 @@ int intel_psr_debug_set(struct intel_dp *intel_dp, u64 val)
mutex_unlock(&intel_dp->psr.lock);
if (old_mode != mode || old_disable_bits != disable_bits)
- ret = intel_psr_fastset_force(dev_priv);
+ ret = intel_psr_fastset_force(display);
return ret;
}
@@ -2975,7 +3024,7 @@ unlock:
static void _psr_invalidate_handle(struct intel_dp *intel_dp)
{
- struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
+ struct intel_display *display = to_intel_display(intel_dp);
enum transcoder cpu_transcoder = intel_dp->psr.transcoder;
if (intel_dp->psr.psr2_sel_fetch_enabled) {
@@ -2983,20 +3032,20 @@ static void _psr_invalidate_handle(struct intel_dp *intel_dp)
if (intel_dp->psr.psr2_sel_fetch_cff_enabled) {
/* Send one update otherwise lag is observed in screen */
- intel_de_write(dev_priv,
- CURSURFLIVE(dev_priv, intel_dp->psr.pipe),
+ intel_de_write(display,
+ CURSURFLIVE(display, intel_dp->psr.pipe),
0);
return;
}
- val = man_trk_ctl_enable_bit_get(dev_priv) |
- man_trk_ctl_partial_frame_bit_get(dev_priv) |
- man_trk_ctl_continuos_full_frame(dev_priv);
- intel_de_write(dev_priv,
- PSR2_MAN_TRK_CTL(dev_priv, cpu_transcoder),
+ val = man_trk_ctl_enable_bit_get(display) |
+ man_trk_ctl_partial_frame_bit_get(display) |
+ man_trk_ctl_continuos_full_frame(display);
+ intel_de_write(display,
+ PSR2_MAN_TRK_CTL(display, cpu_transcoder),
val);
- intel_de_write(dev_priv,
- CURSURFLIVE(dev_priv, intel_dp->psr.pipe), 0);
+ intel_de_write(display,
+ CURSURFLIVE(display, intel_dp->psr.pipe), 0);
intel_dp->psr.psr2_sel_fetch_cff_enabled = true;
} else {
intel_psr_exit(intel_dp);
@@ -3005,7 +3054,7 @@ static void _psr_invalidate_handle(struct intel_dp *intel_dp)
/**
* intel_psr_invalidate - Invalidate PSR
- * @dev_priv: i915 device
+ * @display: display device
* @frontbuffer_bits: frontbuffer plane tracking bits
* @origin: which operation caused the invalidate
*
@@ -3016,7 +3065,7 @@ static void _psr_invalidate_handle(struct intel_dp *intel_dp)
*
* Dirty frontbuffers relevant to PSR are tracked in busy_frontbuffer_bits."
*/
-void intel_psr_invalidate(struct drm_i915_private *dev_priv,
+void intel_psr_invalidate(struct intel_display *display,
unsigned frontbuffer_bits, enum fb_op_origin origin)
{
struct intel_encoder *encoder;
@@ -3024,7 +3073,7 @@ void intel_psr_invalidate(struct drm_i915_private *dev_priv,
if (origin == ORIGIN_FLIP)
return;
- for_each_intel_encoder_with_psr(&dev_priv->drm, encoder) {
+ for_each_intel_encoder_with_psr(display->drm, encoder) {
unsigned int pipe_frontbuffer_bits = frontbuffer_bits;
struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
@@ -3054,7 +3103,8 @@ static void
tgl_dc3co_flush_locked(struct intel_dp *intel_dp, unsigned int frontbuffer_bits,
enum fb_op_origin origin)
{
- struct drm_i915_private *i915 = dp_to_i915(intel_dp);
+ struct intel_display *display = to_intel_display(intel_dp);
+ struct drm_i915_private *i915 = to_i915(display->drm);
if (!intel_dp->psr.dc3co_exitline || !intel_dp->psr.sel_update_enabled ||
!intel_dp->psr.active)
@@ -3075,17 +3125,18 @@ tgl_dc3co_flush_locked(struct intel_dp *intel_dp, unsigned int frontbuffer_bits,
static void _psr_flush_handle(struct intel_dp *intel_dp)
{
- struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
+ struct intel_display *display = to_intel_display(intel_dp);
+ struct drm_i915_private *dev_priv = to_i915(display->drm);
enum transcoder cpu_transcoder = intel_dp->psr.transcoder;
if (intel_dp->psr.psr2_sel_fetch_enabled) {
if (intel_dp->psr.psr2_sel_fetch_cff_enabled) {
/* can we turn CFF off? */
if (intel_dp->psr.busy_frontbuffer_bits == 0) {
- u32 val = man_trk_ctl_enable_bit_get(dev_priv) |
- man_trk_ctl_partial_frame_bit_get(dev_priv) |
- man_trk_ctl_single_full_frame_bit_get(dev_priv) |
- man_trk_ctl_continuos_full_frame(dev_priv);
+ u32 val = man_trk_ctl_enable_bit_get(display) |
+ man_trk_ctl_partial_frame_bit_get(display) |
+ man_trk_ctl_single_full_frame_bit_get(display) |
+ man_trk_ctl_continuos_full_frame(display);
/*
* Set psr2_sel_fetch_cff_enabled as false to allow selective
@@ -3093,11 +3144,11 @@ static void _psr_flush_handle(struct intel_dp *intel_dp)
* SU configuration in case update is sent for any reason after
* sff bit gets cleared by the HW on next vblank.
*/
- intel_de_write(dev_priv,
- PSR2_MAN_TRK_CTL(dev_priv, cpu_transcoder),
+ intel_de_write(display,
+ PSR2_MAN_TRK_CTL(display, cpu_transcoder),
val);
- intel_de_write(dev_priv,
- CURSURFLIVE(dev_priv, intel_dp->psr.pipe),
+ intel_de_write(display,
+ CURSURFLIVE(display, intel_dp->psr.pipe),
0);
intel_dp->psr.psr2_sel_fetch_cff_enabled = false;
}
@@ -3118,7 +3169,7 @@ static void _psr_flush_handle(struct intel_dp *intel_dp)
/**
* intel_psr_flush - Flush PSR
- * @dev_priv: i915 device
+ * @display: display device
* @frontbuffer_bits: frontbuffer plane tracking bits
* @origin: which operation caused the flush
*
@@ -3129,12 +3180,12 @@ static void _psr_flush_handle(struct intel_dp *intel_dp)
*
* Dirty frontbuffers relevant to PSR are tracked in busy_frontbuffer_bits.
*/
-void intel_psr_flush(struct drm_i915_private *dev_priv,
+void intel_psr_flush(struct intel_display *display,
unsigned frontbuffer_bits, enum fb_op_origin origin)
{
struct intel_encoder *encoder;
- for_each_intel_encoder_with_psr(&dev_priv->drm, encoder) {
+ for_each_intel_encoder_with_psr(display->drm, encoder) {
unsigned int pipe_frontbuffer_bits = frontbuffer_bits;
struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
@@ -3183,11 +3234,12 @@ unlock:
*/
void intel_psr_init(struct intel_dp *intel_dp)
{
+ struct intel_display *display = to_intel_display(intel_dp);
+ struct drm_i915_private *dev_priv = to_i915(display->drm);
struct intel_connector *connector = intel_dp->attached_connector;
struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
- struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
- if (!(HAS_PSR(dev_priv) || HAS_DP20(dev_priv)))
+ if (!(HAS_PSR(display) || HAS_DP20(dev_priv)))
return;
/*
@@ -3199,21 +3251,21 @@ void intel_psr_init(struct intel_dp *intel_dp)
* So lets keep it hardcoded to PORT_A for BDW, GEN9 and GEN11.
* But GEN12 supports a instance of PSR registers per transcoder.
*/
- if (DISPLAY_VER(dev_priv) < 12 && dig_port->base.port != PORT_A) {
- drm_dbg_kms(&dev_priv->drm,
+ if (DISPLAY_VER(display) < 12 && dig_port->base.port != PORT_A) {
+ drm_dbg_kms(display->drm,
"PSR condition failed: Port not supported\n");
return;
}
if ((HAS_DP20(dev_priv) && !intel_dp_is_edp(intel_dp)) ||
- DISPLAY_VER(dev_priv) >= 20)
+ DISPLAY_VER(display) >= 20)
intel_dp->psr.source_panel_replay_support = true;
- if (HAS_PSR(dev_priv) && intel_dp_is_edp(intel_dp))
+ if (HAS_PSR(display) && intel_dp_is_edp(intel_dp))
intel_dp->psr.source_support = true;
/* Set link_standby x link_off defaults */
- if (DISPLAY_VER(dev_priv) < 12)
+ if (DISPLAY_VER(display) < 12)
/* For new platforms up to TGL let's respect VBT back again */
intel_dp->psr.link_standby = connector->panel.vbt.psr.full_link;
@@ -3250,7 +3302,7 @@ static int psr_get_status_and_error_status(struct intel_dp *intel_dp,
static void psr_alpm_check(struct intel_dp *intel_dp)
{
- struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
+ struct intel_display *display = to_intel_display(intel_dp);
struct drm_dp_aux *aux = &intel_dp->aux;
struct intel_psr *psr = &intel_dp->psr;
u8 val;
@@ -3261,14 +3313,14 @@ static void psr_alpm_check(struct intel_dp *intel_dp)
r = drm_dp_dpcd_readb(aux, DP_RECEIVER_ALPM_STATUS, &val);
if (r != 1) {
- drm_err(&dev_priv->drm, "Error reading ALPM status\n");
+ drm_err(display->drm, "Error reading ALPM status\n");
return;
}
if (val & DP_ALPM_LOCK_TIMEOUT_ERROR) {
intel_psr_disable_locked(intel_dp);
psr->sink_not_reliable = true;
- drm_dbg_kms(&dev_priv->drm,
+ drm_dbg_kms(display->drm,
"ALPM lock timeout error, disabling PSR\n");
/* Clearing error */
@@ -3278,21 +3330,21 @@ static void psr_alpm_check(struct intel_dp *intel_dp)
static void psr_capability_changed_check(struct intel_dp *intel_dp)
{
- struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
+ struct intel_display *display = to_intel_display(intel_dp);
struct intel_psr *psr = &intel_dp->psr;
u8 val;
int r;
r = drm_dp_dpcd_readb(&intel_dp->aux, DP_PSR_ESI, &val);
if (r != 1) {
- drm_err(&dev_priv->drm, "Error reading DP_PSR_ESI\n");
+ drm_err(display->drm, "Error reading DP_PSR_ESI\n");
return;
}
if (val & DP_PSR_CAPS_CHANGE) {
intel_psr_disable_locked(intel_dp);
psr->sink_not_reliable = true;
- drm_dbg_kms(&dev_priv->drm,
+ drm_dbg_kms(display->drm,
"Sink PSR capability changed, disabling PSR\n");
/* Clearing it */
@@ -3309,7 +3361,7 @@ static void psr_capability_changed_check(struct intel_dp *intel_dp)
*/
void intel_psr_short_pulse(struct intel_dp *intel_dp)
{
- struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
+ struct intel_display *display = to_intel_display(intel_dp);
struct intel_psr *psr = &intel_dp->psr;
u8 status, error_status;
const u8 errors = DP_PSR_RFB_STORAGE_ERROR |
@@ -3325,7 +3377,7 @@ void intel_psr_short_pulse(struct intel_dp *intel_dp)
goto exit;
if (psr_get_status_and_error_status(intel_dp, &status, &error_status)) {
- drm_err(&dev_priv->drm,
+ drm_err(display->drm,
"Error reading PSR status or error status\n");
goto exit;
}
@@ -3338,20 +3390,20 @@ void intel_psr_short_pulse(struct intel_dp *intel_dp)
if (!psr->panel_replay_enabled && status == DP_PSR_SINK_INTERNAL_ERROR &&
!error_status)
- drm_dbg_kms(&dev_priv->drm,
+ drm_dbg_kms(display->drm,
"PSR sink internal error, disabling PSR\n");
if (error_status & DP_PSR_RFB_STORAGE_ERROR)
- drm_dbg_kms(&dev_priv->drm,
+ drm_dbg_kms(display->drm,
"PSR RFB storage error, disabling PSR\n");
if (error_status & DP_PSR_VSC_SDP_UNCORRECTABLE_ERROR)
- drm_dbg_kms(&dev_priv->drm,
+ drm_dbg_kms(display->drm,
"PSR VSC SDP uncorrectable error, disabling PSR\n");
if (error_status & DP_PSR_LINK_CRC_ERROR)
- drm_dbg_kms(&dev_priv->drm,
+ drm_dbg_kms(display->drm,
"PSR Link CRC error, disabling PSR\n");
if (error_status & ~errors)
- drm_err(&dev_priv->drm,
+ drm_err(display->drm,
"PSR_ERROR_STATUS unhandled errors %x\n",
error_status & ~errors);
/* clear status register */
@@ -3390,13 +3442,13 @@ bool intel_psr_enabled(struct intel_dp *intel_dp)
*/
void intel_psr_lock(const struct intel_crtc_state *crtc_state)
{
- struct drm_i915_private *i915 = to_i915(crtc_state->uapi.crtc->dev);
+ struct intel_display *display = to_intel_display(crtc_state);
struct intel_encoder *encoder;
if (!crtc_state->has_psr)
return;
- for_each_intel_encoder_mask_with_psr(&i915->drm, encoder,
+ for_each_intel_encoder_mask_with_psr(display->drm, encoder,
crtc_state->uapi.encoder_mask) {
struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
@@ -3413,13 +3465,13 @@ void intel_psr_lock(const struct intel_crtc_state *crtc_state)
*/
void intel_psr_unlock(const struct intel_crtc_state *crtc_state)
{
- struct drm_i915_private *i915 = to_i915(crtc_state->uapi.crtc->dev);
+ struct intel_display *display = to_intel_display(crtc_state);
struct intel_encoder *encoder;
if (!crtc_state->has_psr)
return;
- for_each_intel_encoder_mask_with_psr(&i915->drm, encoder,
+ for_each_intel_encoder_mask_with_psr(display->drm, encoder,
crtc_state->uapi.encoder_mask) {
struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
@@ -3431,7 +3483,7 @@ void intel_psr_unlock(const struct intel_crtc_state *crtc_state)
static void
psr_source_status(struct intel_dp *intel_dp, struct seq_file *m)
{
- struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
+ struct intel_display *display = to_intel_display(intel_dp);
enum transcoder cpu_transcoder = intel_dp->psr.transcoder;
const char *status = "unknown";
u32 val, status_val;
@@ -3451,8 +3503,8 @@ psr_source_status(struct intel_dp *intel_dp, struct seq_file *m)
"BUF_ON",
"TG_ON"
};
- val = intel_de_read(dev_priv,
- EDP_PSR2_STATUS(dev_priv, cpu_transcoder));
+ val = intel_de_read(display,
+ EDP_PSR2_STATUS(display, cpu_transcoder));
status_val = REG_FIELD_GET(EDP_PSR2_STATUS_STATE_MASK, val);
if (status_val < ARRAY_SIZE(live_status))
status = live_status[status_val];
@@ -3467,7 +3519,8 @@ psr_source_status(struct intel_dp *intel_dp, struct seq_file *m)
"SRDOFFACK",
"SRDENT_ON",
};
- val = intel_de_read(dev_priv, psr_status_reg(dev_priv, cpu_transcoder));
+ val = intel_de_read(display,
+ psr_status_reg(display, cpu_transcoder));
status_val = REG_FIELD_GET(EDP_PSR_STATUS_STATE_MASK, val);
if (status_val < ARRAY_SIZE(live_status))
status = live_status[status_val];
@@ -3528,7 +3581,8 @@ static void intel_psr_print_mode(struct intel_dp *intel_dp,
static int intel_psr_status(struct seq_file *m, struct intel_dp *intel_dp)
{
- struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
+ struct intel_display *display = to_intel_display(intel_dp);
+ struct drm_i915_private *dev_priv = to_i915(display->drm);
enum transcoder cpu_transcoder = intel_dp->psr.transcoder;
struct intel_psr *psr = &intel_dp->psr;
intel_wakeref_t wakeref;
@@ -3553,20 +3607,20 @@ static int intel_psr_status(struct seq_file *m, struct intel_dp *intel_dp)
}
if (psr->panel_replay_enabled) {
- val = intel_de_read(dev_priv, TRANS_DP2_CTL(cpu_transcoder));
+ val = intel_de_read(display, TRANS_DP2_CTL(cpu_transcoder));
if (intel_dp_is_edp(intel_dp))
- psr2_ctl = intel_de_read(dev_priv,
- EDP_PSR2_CTL(dev_priv,
+ psr2_ctl = intel_de_read(display,
+ EDP_PSR2_CTL(display,
cpu_transcoder));
enabled = val & TRANS_DP2_PANEL_REPLAY_ENABLE;
} else if (psr->sel_update_enabled) {
- val = intel_de_read(dev_priv,
- EDP_PSR2_CTL(dev_priv, cpu_transcoder));
+ val = intel_de_read(display,
+ EDP_PSR2_CTL(display, cpu_transcoder));
enabled = val & EDP_PSR2_ENABLE;
} else {
- val = intel_de_read(dev_priv, psr_ctl_reg(dev_priv, cpu_transcoder));
+ val = intel_de_read(display, psr_ctl_reg(display, cpu_transcoder));
enabled = val & EDP_PSR_ENABLE;
}
seq_printf(m, "Source PSR/PanelReplay ctl: %s [0x%08x]\n",
@@ -3581,7 +3635,7 @@ static int intel_psr_status(struct seq_file *m, struct intel_dp *intel_dp)
/*
* SKL+ Perf counter is reset to 0 everytime DC state is entered
*/
- val = intel_de_read(dev_priv, psr_perf_cnt_reg(dev_priv, cpu_transcoder));
+ val = intel_de_read(display, psr_perf_cnt_reg(display, cpu_transcoder));
seq_printf(m, "Performance counter: %u\n",
REG_FIELD_GET(EDP_PSR_PERF_CNT_MASK, val));
@@ -3600,8 +3654,8 @@ static int intel_psr_status(struct seq_file *m, struct intel_dp *intel_dp)
* frame boundary between register reads
*/
for (frame = 0; frame < PSR2_SU_STATUS_FRAMES; frame += 3) {
- val = intel_de_read(dev_priv,
- PSR2_SU_STATUS(dev_priv, cpu_transcoder, frame));
+ val = intel_de_read(display,
+ PSR2_SU_STATUS(display, cpu_transcoder, frame));
su_frames_val[frame / 3] = val;
}
@@ -3629,15 +3683,15 @@ unlock:
static int i915_edp_psr_status_show(struct seq_file *m, void *data)
{
- struct drm_i915_private *dev_priv = m->private;
+ struct intel_display *display = m->private;
struct intel_dp *intel_dp = NULL;
struct intel_encoder *encoder;
- if (!HAS_PSR(dev_priv))
+ if (!HAS_PSR(display))
return -ENODEV;
/* Find the first EDP which supports PSR */
- for_each_intel_encoder_with_psr(&dev_priv->drm, encoder) {
+ for_each_intel_encoder_with_psr(display->drm, encoder) {
intel_dp = enc_to_intel_dp(encoder);
break;
}
@@ -3652,18 +3706,19 @@ DEFINE_SHOW_ATTRIBUTE(i915_edp_psr_status);
static int
i915_edp_psr_debug_set(void *data, u64 val)
{
- struct drm_i915_private *dev_priv = data;
+ struct intel_display *display = data;
+ struct drm_i915_private *dev_priv = to_i915(display->drm);
struct intel_encoder *encoder;
intel_wakeref_t wakeref;
int ret = -ENODEV;
- if (!HAS_PSR(dev_priv))
+ if (!HAS_PSR(display))
return ret;
- for_each_intel_encoder_with_psr(&dev_priv->drm, encoder) {
+ for_each_intel_encoder_with_psr(display->drm, encoder) {
struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
- drm_dbg_kms(&dev_priv->drm, "Setting PSR debug to %llx\n", val);
+ drm_dbg_kms(display->drm, "Setting PSR debug to %llx\n", val);
wakeref = intel_runtime_pm_get(&dev_priv->runtime_pm);
@@ -3679,13 +3734,13 @@ i915_edp_psr_debug_set(void *data, u64 val)
static int
i915_edp_psr_debug_get(void *data, u64 *val)
{
- struct drm_i915_private *dev_priv = data;
+ struct intel_display *display = data;
struct intel_encoder *encoder;
- if (!HAS_PSR(dev_priv))
+ if (!HAS_PSR(display))
return -ENODEV;
- for_each_intel_encoder_with_psr(&dev_priv->drm, encoder) {
+ for_each_intel_encoder_with_psr(display->drm, encoder) {
struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
// TODO: split to each transcoder's PSR debug state
@@ -3700,15 +3755,15 @@ DEFINE_SIMPLE_ATTRIBUTE(i915_edp_psr_debug_fops,
i915_edp_psr_debug_get, i915_edp_psr_debug_set,
"%llu\n");
-void intel_psr_debugfs_register(struct drm_i915_private *i915)
+void intel_psr_debugfs_register(struct intel_display *display)
{
- struct drm_minor *minor = i915->drm.primary;
+ struct drm_minor *minor = display->drm->primary;
debugfs_create_file("i915_edp_psr_debug", 0644, minor->debugfs_root,
- i915, &i915_edp_psr_debug_fops);
+ display, &i915_edp_psr_debug_fops);
debugfs_create_file("i915_edp_psr_status", 0444, minor->debugfs_root,
- i915, &i915_edp_psr_status_fops);
+ display, &i915_edp_psr_status_fops);
}
static const char *psr_mode_str(struct intel_dp *intel_dp)
@@ -3789,6 +3844,7 @@ DEFINE_SHOW_ATTRIBUTE(i915_psr_status);
void intel_psr_connector_debugfs_add(struct intel_connector *connector)
{
+ struct intel_display *display = to_intel_display(connector);
struct drm_i915_private *i915 = to_i915(connector->base.dev);
struct dentry *root = connector->base.debugfs_entry;
@@ -3801,7 +3857,7 @@ void intel_psr_connector_debugfs_add(struct intel_connector *connector)
debugfs_create_file("i915_psr_sink_status", 0444, root,
connector, &i915_psr_sink_status_fops);
- if (HAS_PSR(i915) || HAS_DP20(i915))
+ if (HAS_PSR(display) || HAS_DP20(i915))
debugfs_create_file("i915_psr_status", 0444, root,
connector, &i915_psr_status_fops);
}
diff --git a/drivers/gpu/drm/i915/display/intel_psr.h b/drivers/gpu/drm/i915/display/intel_psr.h
index d483c85870e1..6eb5f15f674f 100644
--- a/drivers/gpu/drm/i915/display/intel_psr.h
+++ b/drivers/gpu/drm/i915/display/intel_psr.h
@@ -11,11 +11,11 @@
enum fb_op_origin;
struct drm_connector;
struct drm_connector_state;
-struct drm_i915_private;
struct intel_atomic_state;
struct intel_connector;
struct intel_crtc;
struct intel_crtc_state;
+struct intel_display;
struct intel_dp;
struct intel_encoder;
struct intel_plane;
@@ -25,6 +25,8 @@ struct intel_plane_state;
(intel_dp)->psr.source_panel_replay_support)
bool intel_encoder_can_psr(struct intel_encoder *encoder);
+bool intel_psr_needs_aux_io_power(struct intel_encoder *encoder,
+ const struct intel_crtc_state *crtc_state);
void intel_psr_init_dpcd(struct intel_dp *intel_dp);
void intel_psr_enable_sink(struct intel_dp *intel_dp,
const struct intel_crtc_state *crtc_state);
@@ -35,10 +37,10 @@ void intel_psr_post_plane_update(struct intel_atomic_state *state,
void intel_psr_disable(struct intel_dp *intel_dp,
const struct intel_crtc_state *old_crtc_state);
int intel_psr_debug_set(struct intel_dp *intel_dp, u64 value);
-void intel_psr_invalidate(struct drm_i915_private *dev_priv,
+void intel_psr_invalidate(struct intel_display *display,
unsigned frontbuffer_bits,
enum fb_op_origin origin);
-void intel_psr_flush(struct drm_i915_private *dev_priv,
+void intel_psr_flush(struct intel_display *display,
unsigned frontbuffer_bits,
enum fb_op_origin origin);
void intel_psr_init(struct intel_dp *intel_dp);
@@ -60,6 +62,6 @@ void intel_psr_resume(struct intel_dp *intel_dp);
void intel_psr_lock(const struct intel_crtc_state *crtc_state);
void intel_psr_unlock(const struct intel_crtc_state *crtc_state);
void intel_psr_connector_debugfs_add(struct intel_connector *connector);
-void intel_psr_debugfs_register(struct drm_i915_private *i915);
+void intel_psr_debugfs_register(struct intel_display *display);
#endif /* __INTEL_PSR_H__ */
diff --git a/drivers/gpu/drm/i915/display/intel_quirks.c b/drivers/gpu/drm/i915/display/intel_quirks.c
index dfd8b4960e6d..29b56d53a340 100644
--- a/drivers/gpu/drm/i915/display/intel_quirks.c
+++ b/drivers/gpu/drm/i915/display/intel_quirks.c
@@ -282,7 +282,7 @@ void intel_init_dpcd_quirks(struct intel_dp *intel_dp,
!memcmp(q->sink_oui, ident->oui, sizeof(ident->oui)) &&
(!memcmp(q->sink_device_id, ident->device_id,
sizeof(ident->device_id)) ||
- !memchr_inv(q->sink_device_id, 0, sizeof(q->sink_device_id))))
+ mem_is_zero(q->sink_device_id, sizeof(q->sink_device_id))))
q->hook(intel_dp);
}
}
diff --git a/drivers/gpu/drm/i915/display/intel_sdvo.c b/drivers/gpu/drm/i915/display/intel_sdvo.c
index d0d712405129..7cc519b402e9 100644
--- a/drivers/gpu/drm/i915/display/intel_sdvo.c
+++ b/drivers/gpu/drm/i915/display/intel_sdvo.c
@@ -95,7 +95,7 @@ struct intel_sdvo {
struct intel_encoder base;
struct i2c_adapter *i2c;
- u8 slave_addr;
+ u8 target_addr;
struct intel_sdvo_ddc ddc[3];
@@ -255,13 +255,13 @@ static bool intel_sdvo_read_byte(struct intel_sdvo *intel_sdvo, u8 addr, u8 *ch)
struct drm_i915_private *i915 = to_i915(intel_sdvo->base.base.dev);
struct i2c_msg msgs[] = {
{
- .addr = intel_sdvo->slave_addr,
+ .addr = intel_sdvo->target_addr,
.flags = 0,
.len = 1,
.buf = &addr,
},
{
- .addr = intel_sdvo->slave_addr,
+ .addr = intel_sdvo->target_addr,
.flags = I2C_M_RD,
.len = 1,
.buf = ch,
@@ -483,14 +483,14 @@ static bool __intel_sdvo_write_cmd(struct intel_sdvo *intel_sdvo, u8 cmd,
intel_sdvo_debug_write(intel_sdvo, cmd, args, args_len);
for (i = 0; i < args_len; i++) {
- msgs[i].addr = intel_sdvo->slave_addr;
+ msgs[i].addr = intel_sdvo->target_addr;
msgs[i].flags = 0;
msgs[i].len = 2;
msgs[i].buf = buf + 2 *i;
buf[2*i + 0] = SDVO_I2C_ARG_0 - i;
buf[2*i + 1] = ((u8*)args)[i];
}
- msgs[i].addr = intel_sdvo->slave_addr;
+ msgs[i].addr = intel_sdvo->target_addr;
msgs[i].flags = 0;
msgs[i].len = 2;
msgs[i].buf = buf + 2*i;
@@ -499,12 +499,12 @@ static bool __intel_sdvo_write_cmd(struct intel_sdvo *intel_sdvo, u8 cmd,
/* the following two are to read the response */
status = SDVO_I2C_CMD_STATUS;
- msgs[i+1].addr = intel_sdvo->slave_addr;
+ msgs[i+1].addr = intel_sdvo->target_addr;
msgs[i+1].flags = 0;
msgs[i+1].len = 1;
msgs[i+1].buf = &status;
- msgs[i+2].addr = intel_sdvo->slave_addr;
+ msgs[i+2].addr = intel_sdvo->target_addr;
msgs[i+2].flags = I2C_M_RD;
msgs[i+2].len = 1;
msgs[i+2].buf = &status;
@@ -2652,9 +2652,9 @@ intel_sdvo_select_i2c_bus(struct intel_sdvo *sdvo)
else
pin = GMBUS_PIN_DPB;
- drm_dbg_kms(&dev_priv->drm, "[ENCODER:%d:%s] I2C pin %d, slave addr 0x%x\n",
+ drm_dbg_kms(&dev_priv->drm, "[ENCODER:%d:%s] I2C pin %d, target addr 0x%x\n",
sdvo->base.base.base.id, sdvo->base.base.name,
- pin, sdvo->slave_addr);
+ pin, sdvo->target_addr);
sdvo->i2c = intel_gmbus_get_adapter(dev_priv, pin);
@@ -2680,7 +2680,7 @@ intel_sdvo_is_hdmi_connector(struct intel_sdvo *intel_sdvo)
}
static u8
-intel_sdvo_get_slave_addr(struct intel_sdvo *sdvo)
+intel_sdvo_get_target_addr(struct intel_sdvo *sdvo)
{
struct drm_i915_private *dev_priv = to_i915(sdvo->base.base.dev);
const struct sdvo_device_mapping *my_mapping, *other_mapping;
@@ -2694,15 +2694,15 @@ intel_sdvo_get_slave_addr(struct intel_sdvo *sdvo)
}
/* If the BIOS described our SDVO device, take advantage of it. */
- if (my_mapping->slave_addr)
- return my_mapping->slave_addr;
+ if (my_mapping->target_addr)
+ return my_mapping->target_addr;
/*
* If the BIOS only described a different SDVO device, use the
* address that it isn't using.
*/
- if (other_mapping->slave_addr) {
- if (other_mapping->slave_addr == 0x70)
+ if (other_mapping->target_addr) {
+ if (other_mapping->target_addr == 0x70)
return 0x72;
else
return 0x70;
@@ -2919,6 +2919,7 @@ intel_sdvo_analog_init(struct intel_sdvo *intel_sdvo, u16 type)
static bool
intel_sdvo_lvds_init(struct intel_sdvo *intel_sdvo, u16 type)
{
+ struct intel_display *display = to_intel_display(&intel_sdvo->base);
struct drm_encoder *encoder = &intel_sdvo->base.base;
struct drm_i915_private *i915 = to_i915(encoder->dev);
struct drm_connector *connector;
@@ -2946,7 +2947,7 @@ intel_sdvo_lvds_init(struct intel_sdvo *intel_sdvo, u16 type)
if (!intel_sdvo_create_enhance_property(intel_sdvo, intel_sdvo_connector))
goto err;
- intel_bios_init_panel_late(i915, &intel_connector->panel, NULL, NULL);
+ intel_bios_init_panel_late(display, &intel_connector->panel, NULL, NULL);
/*
* Fetch modes from VBT. For SDVO prefer the VBT mode since some
@@ -3405,7 +3406,7 @@ bool intel_sdvo_init(struct drm_i915_private *dev_priv,
"SDVO %c", port_name(port));
intel_sdvo->sdvo_reg = sdvo_reg;
- intel_sdvo->slave_addr = intel_sdvo_get_slave_addr(intel_sdvo) >> 1;
+ intel_sdvo->target_addr = intel_sdvo_get_target_addr(intel_sdvo) >> 1;
intel_sdvo_select_i2c_bus(intel_sdvo);
diff --git a/drivers/gpu/drm/i915/display/intel_sprite.c b/drivers/gpu/drm/i915/display/intel_sprite.c
index f8cceb3e5d8e..e657b09ede99 100644
--- a/drivers/gpu/drm/i915/display/intel_sprite.c
+++ b/drivers/gpu/drm/i915/display/intel_sprite.c
@@ -48,9 +48,9 @@
#include "intel_sprite.h"
#include "intel_sprite_regs.h"
-static char sprite_name(struct drm_i915_private *i915, enum pipe pipe, int sprite)
+static char sprite_name(struct intel_display *display, enum pipe pipe, int sprite)
{
- return pipe * DISPLAY_RUNTIME_INFO(i915)->num_sprites[pipe] + sprite + 'A';
+ return pipe * DISPLAY_RUNTIME_INFO(display)->num_sprites[pipe] + sprite + 'A';
}
static void i9xx_plane_linear_gamma(u16 gamma[8])
@@ -67,7 +67,7 @@ static void
chv_sprite_update_csc(const struct intel_plane_state *plane_state)
{
struct intel_plane *plane = to_intel_plane(plane_state->uapi.plane);
- struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
+ struct intel_display *display = to_intel_display(plane->base.dev);
const struct drm_framebuffer *fb = plane_state->hw.fb;
enum plane_id plane_id = plane->id;
/*
@@ -100,35 +100,35 @@ chv_sprite_update_csc(const struct intel_plane_state *plane_state)
if (!fb->format->is_yuv)
return;
- intel_de_write_fw(dev_priv, SPCSCYGOFF(plane_id),
+ intel_de_write_fw(display, SPCSCYGOFF(plane_id),
SPCSC_OOFF(0) | SPCSC_IOFF(0));
- intel_de_write_fw(dev_priv, SPCSCCBOFF(plane_id),
+ intel_de_write_fw(display, SPCSCCBOFF(plane_id),
SPCSC_OOFF(0) | SPCSC_IOFF(0));
- intel_de_write_fw(dev_priv, SPCSCCROFF(plane_id),
+ intel_de_write_fw(display, SPCSCCROFF(plane_id),
SPCSC_OOFF(0) | SPCSC_IOFF(0));
- intel_de_write_fw(dev_priv, SPCSCC01(plane_id),
+ intel_de_write_fw(display, SPCSCC01(plane_id),
SPCSC_C1(csc[1]) | SPCSC_C0(csc[0]));
- intel_de_write_fw(dev_priv, SPCSCC23(plane_id),
+ intel_de_write_fw(display, SPCSCC23(plane_id),
SPCSC_C1(csc[3]) | SPCSC_C0(csc[2]));
- intel_de_write_fw(dev_priv, SPCSCC45(plane_id),
+ intel_de_write_fw(display, SPCSCC45(plane_id),
SPCSC_C1(csc[5]) | SPCSC_C0(csc[4]));
- intel_de_write_fw(dev_priv, SPCSCC67(plane_id),
+ intel_de_write_fw(display, SPCSCC67(plane_id),
SPCSC_C1(csc[7]) | SPCSC_C0(csc[6]));
- intel_de_write_fw(dev_priv, SPCSCC8(plane_id), SPCSC_C0(csc[8]));
+ intel_de_write_fw(display, SPCSCC8(plane_id), SPCSC_C0(csc[8]));
- intel_de_write_fw(dev_priv, SPCSCYGICLAMP(plane_id),
+ intel_de_write_fw(display, SPCSCYGICLAMP(plane_id),
SPCSC_IMAX(1023) | SPCSC_IMIN(0));
- intel_de_write_fw(dev_priv, SPCSCCBICLAMP(plane_id),
+ intel_de_write_fw(display, SPCSCCBICLAMP(plane_id),
SPCSC_IMAX(512) | SPCSC_IMIN(-512));
- intel_de_write_fw(dev_priv, SPCSCCRICLAMP(plane_id),
+ intel_de_write_fw(display, SPCSCCRICLAMP(plane_id),
SPCSC_IMAX(512) | SPCSC_IMIN(-512));
- intel_de_write_fw(dev_priv, SPCSCYGOCLAMP(plane_id),
+ intel_de_write_fw(display, SPCSCYGOCLAMP(plane_id),
SPCSC_OMAX(1023) | SPCSC_OMIN(0));
- intel_de_write_fw(dev_priv, SPCSCCBOCLAMP(plane_id),
+ intel_de_write_fw(display, SPCSCCBOCLAMP(plane_id),
SPCSC_OMAX(1023) | SPCSC_OMIN(0));
- intel_de_write_fw(dev_priv, SPCSCCROCLAMP(plane_id),
+ intel_de_write_fw(display, SPCSCCROCLAMP(plane_id),
SPCSC_OMAX(1023) | SPCSC_OMIN(0));
}
@@ -139,7 +139,7 @@ static void
vlv_sprite_update_clrc(const struct intel_plane_state *plane_state)
{
struct intel_plane *plane = to_intel_plane(plane_state->uapi.plane);
- struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
+ struct intel_display *display = to_intel_display(plane->base.dev);
const struct drm_framebuffer *fb = plane_state->hw.fb;
enum pipe pipe = plane->pipe;
enum plane_id plane_id = plane->id;
@@ -168,9 +168,9 @@ vlv_sprite_update_clrc(const struct intel_plane_state *plane_state)
}
/* FIXME these register are single buffered :( */
- intel_de_write_fw(dev_priv, SPCLRC0(pipe, plane_id),
+ intel_de_write_fw(display, SPCLRC0(pipe, plane_id),
SP_CONTRAST(contrast) | SP_BRIGHTNESS(brightness));
- intel_de_write_fw(dev_priv, SPCLRC1(pipe, plane_id),
+ intel_de_write_fw(display, SPCLRC1(pipe, plane_id),
SP_SH_SIN(sh_sin) | SP_SH_COS(sh_cos));
}
@@ -357,7 +357,7 @@ static u32 vlv_sprite_ctl(const struct intel_crtc_state *crtc_state,
static void vlv_sprite_update_gamma(const struct intel_plane_state *plane_state)
{
struct intel_plane *plane = to_intel_plane(plane_state->uapi.plane);
- struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
+ struct intel_display *display = to_intel_display(plane->base.dev);
const struct drm_framebuffer *fb = plane_state->hw.fb;
enum pipe pipe = plane->pipe;
enum plane_id plane_id = plane->id;
@@ -373,7 +373,7 @@ static void vlv_sprite_update_gamma(const struct intel_plane_state *plane_state)
/* FIXME these register are single buffered :( */
/* The two end points are implicit (0.0 and 1.0) */
for (i = 1; i < 8 - 1; i++)
- intel_de_write_fw(dev_priv, SPGAMC(pipe, plane_id, i - 1),
+ intel_de_write_fw(display, SPGAMC(pipe, plane_id, i - 1),
gamma[i] << 16 | gamma[i] << 8 | gamma[i]);
}
@@ -382,7 +382,7 @@ vlv_sprite_update_noarm(struct intel_plane *plane,
const struct intel_crtc_state *crtc_state,
const struct intel_plane_state *plane_state)
{
- struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
+ struct intel_display *display = to_intel_display(plane->base.dev);
enum pipe pipe = plane->pipe;
enum plane_id plane_id = plane->id;
int crtc_x = plane_state->uapi.dst.x1;
@@ -390,11 +390,11 @@ vlv_sprite_update_noarm(struct intel_plane *plane,
u32 crtc_w = drm_rect_width(&plane_state->uapi.dst);
u32 crtc_h = drm_rect_height(&plane_state->uapi.dst);
- intel_de_write_fw(dev_priv, SPSTRIDE(pipe, plane_id),
+ intel_de_write_fw(display, SPSTRIDE(pipe, plane_id),
plane_state->view.color_plane[0].mapping_stride);
- intel_de_write_fw(dev_priv, SPPOS(pipe, plane_id),
+ intel_de_write_fw(display, SPPOS(pipe, plane_id),
SP_POS_Y(crtc_y) | SP_POS_X(crtc_x));
- intel_de_write_fw(dev_priv, SPSIZE(pipe, plane_id),
+ intel_de_write_fw(display, SPSIZE(pipe, plane_id),
SP_HEIGHT(crtc_h - 1) | SP_WIDTH(crtc_w - 1));
}
@@ -403,6 +403,7 @@ vlv_sprite_update_arm(struct intel_plane *plane,
const struct intel_crtc_state *crtc_state,
const struct intel_plane_state *plane_state)
{
+ struct intel_display *display = to_intel_display(plane->base.dev);
struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
enum pipe pipe = plane->pipe;
enum plane_id plane_id = plane->id;
@@ -420,18 +421,18 @@ vlv_sprite_update_arm(struct intel_plane *plane,
chv_sprite_update_csc(plane_state);
if (key->flags) {
- intel_de_write_fw(dev_priv, SPKEYMINVAL(pipe, plane_id),
+ intel_de_write_fw(display, SPKEYMINVAL(pipe, plane_id),
key->min_value);
- intel_de_write_fw(dev_priv, SPKEYMSK(pipe, plane_id),
+ intel_de_write_fw(display, SPKEYMSK(pipe, plane_id),
key->channel_mask);
- intel_de_write_fw(dev_priv, SPKEYMAXVAL(pipe, plane_id),
+ intel_de_write_fw(display, SPKEYMAXVAL(pipe, plane_id),
key->max_value);
}
- intel_de_write_fw(dev_priv, SPCONSTALPHA(pipe, plane_id), 0);
+ intel_de_write_fw(display, SPCONSTALPHA(pipe, plane_id), 0);
- intel_de_write_fw(dev_priv, SPLINOFF(pipe, plane_id), linear_offset);
- intel_de_write_fw(dev_priv, SPTILEOFF(pipe, plane_id),
+ intel_de_write_fw(display, SPLINOFF(pipe, plane_id), linear_offset);
+ intel_de_write_fw(display, SPTILEOFF(pipe, plane_id),
SP_OFFSET_Y(y) | SP_OFFSET_X(x));
/*
@@ -439,8 +440,8 @@ vlv_sprite_update_arm(struct intel_plane *plane,
* disabled. Try to make the plane enable atomic by writing
* the control register just before the surface register.
*/
- intel_de_write_fw(dev_priv, SPCNTR(pipe, plane_id), sprctl);
- intel_de_write_fw(dev_priv, SPSURF(pipe, plane_id),
+ intel_de_write_fw(display, SPCNTR(pipe, plane_id), sprctl);
+ intel_de_write_fw(display, SPSURF(pipe, plane_id),
intel_plane_ggtt_offset(plane_state) + sprsurf_offset);
vlv_sprite_update_clrc(plane_state);
@@ -451,18 +452,19 @@ static void
vlv_sprite_disable_arm(struct intel_plane *plane,
const struct intel_crtc_state *crtc_state)
{
- struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
+ struct intel_display *display = to_intel_display(plane->base.dev);
enum pipe pipe = plane->pipe;
enum plane_id plane_id = plane->id;
- intel_de_write_fw(dev_priv, SPCNTR(pipe, plane_id), 0);
- intel_de_write_fw(dev_priv, SPSURF(pipe, plane_id), 0);
+ intel_de_write_fw(display, SPCNTR(pipe, plane_id), 0);
+ intel_de_write_fw(display, SPSURF(pipe, plane_id), 0);
}
static bool
vlv_sprite_get_hw_state(struct intel_plane *plane,
enum pipe *pipe)
{
+ struct intel_display *display = to_intel_display(plane->base.dev);
struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
enum intel_display_power_domain power_domain;
enum plane_id plane_id = plane->id;
@@ -474,7 +476,7 @@ vlv_sprite_get_hw_state(struct intel_plane *plane,
if (!wakeref)
return false;
- ret = intel_de_read(dev_priv, SPCNTR(plane->pipe, plane_id)) & SP_ENABLE;
+ ret = intel_de_read(display, SPCNTR(plane->pipe, plane_id)) & SP_ENABLE;
*pipe = plane->pipe;
@@ -766,7 +768,7 @@ static void ivb_sprite_linear_gamma(const struct intel_plane_state *plane_state,
static void ivb_sprite_update_gamma(const struct intel_plane_state *plane_state)
{
struct intel_plane *plane = to_intel_plane(plane_state->uapi.plane);
- struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
+ struct intel_display *display = to_intel_display(plane->base.dev);
enum pipe pipe = plane->pipe;
u16 gamma[18];
int i;
@@ -778,17 +780,17 @@ static void ivb_sprite_update_gamma(const struct intel_plane_state *plane_state)
/* FIXME these register are single buffered :( */
for (i = 0; i < 16; i++)
- intel_de_write_fw(dev_priv, SPRGAMC(pipe, i),
+ intel_de_write_fw(display, SPRGAMC(pipe, i),
gamma[i] << 20 | gamma[i] << 10 | gamma[i]);
- intel_de_write_fw(dev_priv, SPRGAMC16(pipe, 0), gamma[i]);
- intel_de_write_fw(dev_priv, SPRGAMC16(pipe, 1), gamma[i]);
- intel_de_write_fw(dev_priv, SPRGAMC16(pipe, 2), gamma[i]);
+ intel_de_write_fw(display, SPRGAMC16(pipe, 0), gamma[i]);
+ intel_de_write_fw(display, SPRGAMC16(pipe, 1), gamma[i]);
+ intel_de_write_fw(display, SPRGAMC16(pipe, 2), gamma[i]);
i++;
- intel_de_write_fw(dev_priv, SPRGAMC17(pipe, 0), gamma[i]);
- intel_de_write_fw(dev_priv, SPRGAMC17(pipe, 1), gamma[i]);
- intel_de_write_fw(dev_priv, SPRGAMC17(pipe, 2), gamma[i]);
+ intel_de_write_fw(display, SPRGAMC17(pipe, 0), gamma[i]);
+ intel_de_write_fw(display, SPRGAMC17(pipe, 1), gamma[i]);
+ intel_de_write_fw(display, SPRGAMC17(pipe, 2), gamma[i]);
i++;
}
@@ -797,6 +799,7 @@ ivb_sprite_update_noarm(struct intel_plane *plane,
const struct intel_crtc_state *crtc_state,
const struct intel_plane_state *plane_state)
{
+ struct intel_display *display = to_intel_display(plane->base.dev);
struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
enum pipe pipe = plane->pipe;
int crtc_x = plane_state->uapi.dst.x1;
@@ -812,14 +815,14 @@ ivb_sprite_update_noarm(struct intel_plane *plane,
SPRITE_SRC_WIDTH(src_w - 1) |
SPRITE_SRC_HEIGHT(src_h - 1);
- intel_de_write_fw(dev_priv, SPRSTRIDE(pipe),
+ intel_de_write_fw(display, SPRSTRIDE(pipe),
plane_state->view.color_plane[0].mapping_stride);
- intel_de_write_fw(dev_priv, SPRPOS(pipe),
+ intel_de_write_fw(display, SPRPOS(pipe),
SPRITE_POS_Y(crtc_y) | SPRITE_POS_X(crtc_x));
- intel_de_write_fw(dev_priv, SPRSIZE(pipe),
+ intel_de_write_fw(display, SPRSIZE(pipe),
SPRITE_HEIGHT(crtc_h - 1) | SPRITE_WIDTH(crtc_w - 1));
if (IS_IVYBRIDGE(dev_priv))
- intel_de_write_fw(dev_priv, SPRSCALE(pipe), sprscale);
+ intel_de_write_fw(display, SPRSCALE(pipe), sprscale);
}
static void
@@ -827,6 +830,7 @@ ivb_sprite_update_arm(struct intel_plane *plane,
const struct intel_crtc_state *crtc_state,
const struct intel_plane_state *plane_state)
{
+ struct intel_display *display = to_intel_display(plane->base.dev);
struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
enum pipe pipe = plane->pipe;
const struct drm_intel_sprite_colorkey *key = &plane_state->ckey;
@@ -840,20 +844,20 @@ ivb_sprite_update_arm(struct intel_plane *plane,
linear_offset = intel_fb_xy_to_linear(x, y, plane_state, 0);
if (key->flags) {
- intel_de_write_fw(dev_priv, SPRKEYVAL(pipe), key->min_value);
- intel_de_write_fw(dev_priv, SPRKEYMSK(pipe),
+ intel_de_write_fw(display, SPRKEYVAL(pipe), key->min_value);
+ intel_de_write_fw(display, SPRKEYMSK(pipe),
key->channel_mask);
- intel_de_write_fw(dev_priv, SPRKEYMAX(pipe), key->max_value);
+ intel_de_write_fw(display, SPRKEYMAX(pipe), key->max_value);
}
/* HSW consolidates SPRTILEOFF and SPRLINOFF into a single SPROFFSET
* register */
if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) {
- intel_de_write_fw(dev_priv, SPROFFSET(pipe),
+ intel_de_write_fw(display, SPROFFSET(pipe),
SPRITE_OFFSET_Y(y) | SPRITE_OFFSET_X(x));
} else {
- intel_de_write_fw(dev_priv, SPRLINOFF(pipe), linear_offset);
- intel_de_write_fw(dev_priv, SPRTILEOFF(pipe),
+ intel_de_write_fw(display, SPRLINOFF(pipe), linear_offset);
+ intel_de_write_fw(display, SPRTILEOFF(pipe),
SPRITE_OFFSET_Y(y) | SPRITE_OFFSET_X(x));
}
@@ -862,8 +866,8 @@ ivb_sprite_update_arm(struct intel_plane *plane,
* disabled. Try to make the plane enable atomic by writing
* the control register just before the surface register.
*/
- intel_de_write_fw(dev_priv, SPRCTL(pipe), sprctl);
- intel_de_write_fw(dev_priv, SPRSURF(pipe),
+ intel_de_write_fw(display, SPRCTL(pipe), sprctl);
+ intel_de_write_fw(display, SPRSURF(pipe),
intel_plane_ggtt_offset(plane_state) + sprsurf_offset);
ivb_sprite_update_gamma(plane_state);
@@ -873,20 +877,22 @@ static void
ivb_sprite_disable_arm(struct intel_plane *plane,
const struct intel_crtc_state *crtc_state)
{
+ struct intel_display *display = to_intel_display(plane->base.dev);
struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
enum pipe pipe = plane->pipe;
- intel_de_write_fw(dev_priv, SPRCTL(pipe), 0);
+ intel_de_write_fw(display, SPRCTL(pipe), 0);
/* Disable the scaler */
if (IS_IVYBRIDGE(dev_priv))
- intel_de_write_fw(dev_priv, SPRSCALE(pipe), 0);
- intel_de_write_fw(dev_priv, SPRSURF(pipe), 0);
+ intel_de_write_fw(display, SPRSCALE(pipe), 0);
+ intel_de_write_fw(display, SPRSURF(pipe), 0);
}
static bool
ivb_sprite_get_hw_state(struct intel_plane *plane,
enum pipe *pipe)
{
+ struct intel_display *display = to_intel_display(plane->base.dev);
struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
enum intel_display_power_domain power_domain;
intel_wakeref_t wakeref;
@@ -897,7 +903,7 @@ ivb_sprite_get_hw_state(struct intel_plane *plane,
if (!wakeref)
return false;
- ret = intel_de_read(dev_priv, SPRCTL(plane->pipe)) & SPRITE_ENABLE;
+ ret = intel_de_read(display, SPRCTL(plane->pipe)) & SPRITE_ENABLE;
*pipe = plane->pipe;
@@ -1073,7 +1079,7 @@ static u32 g4x_sprite_ctl(const struct intel_crtc_state *crtc_state,
static void g4x_sprite_update_gamma(const struct intel_plane_state *plane_state)
{
struct intel_plane *plane = to_intel_plane(plane_state->uapi.plane);
- struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
+ struct intel_display *display = to_intel_display(plane->base.dev);
const struct drm_framebuffer *fb = plane_state->hw.fb;
enum pipe pipe = plane->pipe;
u16 gamma[8];
@@ -1088,7 +1094,7 @@ static void g4x_sprite_update_gamma(const struct intel_plane_state *plane_state)
/* FIXME these register are single buffered :( */
/* The two end points are implicit (0.0 and 1.0) */
for (i = 1; i < 8 - 1; i++)
- intel_de_write_fw(dev_priv, DVSGAMC_G4X(pipe, i - 1),
+ intel_de_write_fw(display, DVSGAMC_G4X(pipe, i - 1),
gamma[i] << 16 | gamma[i] << 8 | gamma[i]);
}
@@ -1103,7 +1109,7 @@ static void ilk_sprite_linear_gamma(u16 gamma[17])
static void ilk_sprite_update_gamma(const struct intel_plane_state *plane_state)
{
struct intel_plane *plane = to_intel_plane(plane_state->uapi.plane);
- struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
+ struct intel_display *display = to_intel_display(plane->base.dev);
const struct drm_framebuffer *fb = plane_state->hw.fb;
enum pipe pipe = plane->pipe;
u16 gamma[17];
@@ -1117,12 +1123,12 @@ static void ilk_sprite_update_gamma(const struct intel_plane_state *plane_state)
/* FIXME these register are single buffered :( */
for (i = 0; i < 16; i++)
- intel_de_write_fw(dev_priv, DVSGAMC_ILK(pipe, i),
+ intel_de_write_fw(display, DVSGAMC_ILK(pipe, i),
gamma[i] << 20 | gamma[i] << 10 | gamma[i]);
- intel_de_write_fw(dev_priv, DVSGAMCMAX_ILK(pipe, 0), gamma[i]);
- intel_de_write_fw(dev_priv, DVSGAMCMAX_ILK(pipe, 1), gamma[i]);
- intel_de_write_fw(dev_priv, DVSGAMCMAX_ILK(pipe, 2), gamma[i]);
+ intel_de_write_fw(display, DVSGAMCMAX_ILK(pipe, 0), gamma[i]);
+ intel_de_write_fw(display, DVSGAMCMAX_ILK(pipe, 1), gamma[i]);
+ intel_de_write_fw(display, DVSGAMCMAX_ILK(pipe, 2), gamma[i]);
i++;
}
@@ -1131,7 +1137,7 @@ g4x_sprite_update_noarm(struct intel_plane *plane,
const struct intel_crtc_state *crtc_state,
const struct intel_plane_state *plane_state)
{
- struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
+ struct intel_display *display = to_intel_display(plane->base.dev);
enum pipe pipe = plane->pipe;
int crtc_x = plane_state->uapi.dst.x1;
int crtc_y = plane_state->uapi.dst.y1;
@@ -1146,13 +1152,13 @@ g4x_sprite_update_noarm(struct intel_plane *plane,
DVS_SRC_WIDTH(src_w - 1) |
DVS_SRC_HEIGHT(src_h - 1);
- intel_de_write_fw(dev_priv, DVSSTRIDE(pipe),
+ intel_de_write_fw(display, DVSSTRIDE(pipe),
plane_state->view.color_plane[0].mapping_stride);
- intel_de_write_fw(dev_priv, DVSPOS(pipe),
+ intel_de_write_fw(display, DVSPOS(pipe),
DVS_POS_Y(crtc_y) | DVS_POS_X(crtc_x));
- intel_de_write_fw(dev_priv, DVSSIZE(pipe),
+ intel_de_write_fw(display, DVSSIZE(pipe),
DVS_HEIGHT(crtc_h - 1) | DVS_WIDTH(crtc_w - 1));
- intel_de_write_fw(dev_priv, DVSSCALE(pipe), dvsscale);
+ intel_de_write_fw(display, DVSSCALE(pipe), dvsscale);
}
static void
@@ -1160,6 +1166,7 @@ g4x_sprite_update_arm(struct intel_plane *plane,
const struct intel_crtc_state *crtc_state,
const struct intel_plane_state *plane_state)
{
+ struct intel_display *display = to_intel_display(plane->base.dev);
struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
enum pipe pipe = plane->pipe;
const struct drm_intel_sprite_colorkey *key = &plane_state->ckey;
@@ -1173,14 +1180,14 @@ g4x_sprite_update_arm(struct intel_plane *plane,
linear_offset = intel_fb_xy_to_linear(x, y, plane_state, 0);
if (key->flags) {
- intel_de_write_fw(dev_priv, DVSKEYVAL(pipe), key->min_value);
- intel_de_write_fw(dev_priv, DVSKEYMSK(pipe),
+ intel_de_write_fw(display, DVSKEYVAL(pipe), key->min_value);
+ intel_de_write_fw(display, DVSKEYMSK(pipe),
key->channel_mask);
- intel_de_write_fw(dev_priv, DVSKEYMAX(pipe), key->max_value);
+ intel_de_write_fw(display, DVSKEYMAX(pipe), key->max_value);
}
- intel_de_write_fw(dev_priv, DVSLINOFF(pipe), linear_offset);
- intel_de_write_fw(dev_priv, DVSTILEOFF(pipe),
+ intel_de_write_fw(display, DVSLINOFF(pipe), linear_offset);
+ intel_de_write_fw(display, DVSTILEOFF(pipe),
DVS_OFFSET_Y(y) | DVS_OFFSET_X(x));
/*
@@ -1188,8 +1195,8 @@ g4x_sprite_update_arm(struct intel_plane *plane,
* disabled. Try to make the plane enable atomic by writing
* the control register just before the surface register.
*/
- intel_de_write_fw(dev_priv, DVSCNTR(pipe), dvscntr);
- intel_de_write_fw(dev_priv, DVSSURF(pipe),
+ intel_de_write_fw(display, DVSCNTR(pipe), dvscntr);
+ intel_de_write_fw(display, DVSSURF(pipe),
intel_plane_ggtt_offset(plane_state) + dvssurf_offset);
if (IS_G4X(dev_priv))
@@ -1202,19 +1209,20 @@ static void
g4x_sprite_disable_arm(struct intel_plane *plane,
const struct intel_crtc_state *crtc_state)
{
- struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
+ struct intel_display *display = to_intel_display(plane->base.dev);
enum pipe pipe = plane->pipe;
- intel_de_write_fw(dev_priv, DVSCNTR(pipe), 0);
+ intel_de_write_fw(display, DVSCNTR(pipe), 0);
/* Disable the scaler */
- intel_de_write_fw(dev_priv, DVSSCALE(pipe), 0);
- intel_de_write_fw(dev_priv, DVSSURF(pipe), 0);
+ intel_de_write_fw(display, DVSSCALE(pipe), 0);
+ intel_de_write_fw(display, DVSSURF(pipe), 0);
}
static bool
g4x_sprite_get_hw_state(struct intel_plane *plane,
enum pipe *pipe)
{
+ struct intel_display *display = to_intel_display(plane->base.dev);
struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
enum intel_display_power_domain power_domain;
intel_wakeref_t wakeref;
@@ -1225,7 +1233,7 @@ g4x_sprite_get_hw_state(struct intel_plane *plane,
if (!wakeref)
return false;
- ret = intel_de_read(dev_priv, DVSCNTR(plane->pipe)) & DVS_ENABLE;
+ ret = intel_de_read(display, DVSCNTR(plane->pipe)) & DVS_ENABLE;
*pipe = plane->pipe;
@@ -1255,7 +1263,7 @@ static int
g4x_sprite_check_scaling(struct intel_crtc_state *crtc_state,
struct intel_plane_state *plane_state)
{
- struct drm_i915_private *i915 = to_i915(plane_state->uapi.plane->dev);
+ struct intel_display *display = to_intel_display(crtc_state);
const struct drm_framebuffer *fb = plane_state->hw.fb;
const struct drm_rect *src = &plane_state->uapi.src;
const struct drm_rect *dst = &plane_state->uapi.dst;
@@ -1281,7 +1289,8 @@ g4x_sprite_check_scaling(struct intel_crtc_state *crtc_state,
if (adjusted_mode->flags & DRM_MODE_FLAG_INTERLACE) {
if (src_h & 1) {
- drm_dbg_kms(&i915->drm, "Source height must be even with interlaced modes\n");
+ drm_dbg_kms(display->drm,
+ "Source height must be even with interlaced modes\n");
return -EINVAL;
}
min_height = 6;
@@ -1293,19 +1302,22 @@ g4x_sprite_check_scaling(struct intel_crtc_state *crtc_state,
if (src_w < min_width || src_h < min_height ||
src_w > 2048 || src_h > 2048) {
- drm_dbg_kms(&i915->drm, "Source dimensions (%dx%d) exceed hardware limits (%dx%d - %dx%d)\n",
+ drm_dbg_kms(display->drm,
+ "Source dimensions (%dx%d) exceed hardware limits (%dx%d - %dx%d)\n",
src_w, src_h, min_width, min_height, 2048, 2048);
return -EINVAL;
}
if (width_bytes > 4096) {
- drm_dbg_kms(&i915->drm, "Fetch width (%d) exceeds hardware max with scaling (%u)\n",
+ drm_dbg_kms(display->drm,
+ "Fetch width (%d) exceeds hardware max with scaling (%u)\n",
width_bytes, 4096);
return -EINVAL;
}
if (stride > 4096) {
- drm_dbg_kms(&i915->drm, "Stride (%u) exceeds hardware max with scaling (%u)\n",
+ drm_dbg_kms(display->drm,
+ "Stride (%u) exceeds hardware max with scaling (%u)\n",
stride, 4096);
return -EINVAL;
}
@@ -1317,6 +1329,7 @@ static int
g4x_sprite_check(struct intel_crtc_state *crtc_state,
struct intel_plane_state *plane_state)
{
+ struct intel_display *display = to_intel_display(crtc_state);
struct intel_plane *plane = to_intel_plane(plane_state->uapi.plane);
struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
int min_scale = DRM_PLANE_NO_SCALING;
@@ -1324,7 +1337,7 @@ g4x_sprite_check(struct intel_crtc_state *crtc_state,
int ret;
if (g4x_fb_scalable(plane_state->hw.fb)) {
- if (DISPLAY_VER(dev_priv) < 7) {
+ if (DISPLAY_VER(display) < 7) {
min_scale = 1;
max_scale = 16 << 16;
} else if (IS_IVYBRIDGE(dev_priv)) {
@@ -1353,7 +1366,7 @@ g4x_sprite_check(struct intel_crtc_state *crtc_state,
if (ret)
return ret;
- if (DISPLAY_VER(dev_priv) >= 7)
+ if (DISPLAY_VER(display) >= 7)
plane_state->ctl = ivb_sprite_ctl(crtc_state, plane_state);
else
plane_state->ctl = g4x_sprite_ctl(crtc_state, plane_state);
@@ -1364,6 +1377,7 @@ g4x_sprite_check(struct intel_crtc_state *crtc_state,
int chv_plane_check_rotation(const struct intel_plane_state *plane_state)
{
struct intel_plane *plane = to_intel_plane(plane_state->uapi.plane);
+ struct intel_display *display = to_intel_display(plane->base.dev);
struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
unsigned int rotation = plane_state->hw.rotation;
@@ -1371,7 +1385,7 @@ int chv_plane_check_rotation(const struct intel_plane_state *plane_state)
if (IS_CHERRYVIEW(dev_priv) &&
rotation & DRM_MODE_ROTATE_180 &&
rotation & DRM_MODE_REFLECT_X) {
- drm_dbg_kms(&dev_priv->drm,
+ drm_dbg_kms(display->drm,
"Cannot rotate and reflect at the same time\n");
return -EINVAL;
}
@@ -1573,6 +1587,7 @@ struct intel_plane *
intel_sprite_plane_create(struct drm_i915_private *dev_priv,
enum pipe pipe, int sprite)
{
+ struct intel_display *display = &dev_priv->display;
struct intel_plane *plane;
const struct drm_plane_funcs *plane_funcs;
unsigned int supported_rotations;
@@ -1604,7 +1619,7 @@ intel_sprite_plane_create(struct drm_i915_private *dev_priv,
}
plane_funcs = &vlv_sprite_funcs;
- } else if (DISPLAY_VER(dev_priv) >= 7) {
+ } else if (DISPLAY_VER(display) >= 7) {
plane->update_noarm = ivb_sprite_update_noarm;
plane->update_arm = ivb_sprite_update_arm;
plane->disable_arm = ivb_sprite_disable_arm;
@@ -1663,11 +1678,11 @@ intel_sprite_plane_create(struct drm_i915_private *dev_priv,
modifiers = intel_fb_plane_get_modifiers(dev_priv, INTEL_PLANE_CAP_TILING_X);
- ret = drm_universal_plane_init(&dev_priv->drm, &plane->base,
+ ret = drm_universal_plane_init(display->drm, &plane->base,
0, plane_funcs,
formats, num_formats, modifiers,
DRM_PLANE_TYPE_OVERLAY,
- "sprite %c", sprite_name(dev_priv, pipe, sprite));
+ "sprite %c", sprite_name(display, pipe, sprite));
kfree(modifiers);
if (ret)
diff --git a/drivers/gpu/drm/i915/display/intel_tc.c b/drivers/gpu/drm/i915/display/intel_tc.c
index 9887967b2ca5..6f2ee7dbc43b 100644
--- a/drivers/gpu/drm/i915/display/intel_tc.c
+++ b/drivers/gpu/drm/i915/display/intel_tc.c
@@ -393,6 +393,9 @@ void intel_tc_port_set_fia_lane_count(struct intel_digital_port *dig_port,
bool lane_reversal = dig_port->saved_port_bits & DDI_BUF_PORT_REVERSAL;
u32 val;
+ if (DISPLAY_VER(i915) >= 14)
+ return;
+
drm_WARN_ON(&i915->drm,
lane_reversal && tc->mode != TC_PORT_LEGACY);
diff --git a/drivers/gpu/drm/i915/display/intel_tv.c b/drivers/gpu/drm/i915/display/intel_tv.c
index 9df0f1263913..581844d1db9a 100644
--- a/drivers/gpu/drm/i915/display/intel_tv.c
+++ b/drivers/gpu/drm/i915/display/intel_tv.c
@@ -914,8 +914,8 @@ static struct intel_tv *intel_attached_tv(struct intel_connector *connector)
static bool
intel_tv_get_hw_state(struct intel_encoder *encoder, enum pipe *pipe)
{
- struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
- u32 tmp = intel_de_read(dev_priv, TV_CTL);
+ struct intel_display *display = to_intel_display(encoder);
+ u32 tmp = intel_de_read(display, TV_CTL);
*pipe = (tmp & TV_ENC_PIPE_SEL_MASK) >> TV_ENC_PIPE_SEL_SHIFT;
@@ -928,13 +928,12 @@ intel_enable_tv(struct intel_atomic_state *state,
const struct intel_crtc_state *pipe_config,
const struct drm_connector_state *conn_state)
{
- struct drm_device *dev = encoder->base.dev;
- struct drm_i915_private *dev_priv = to_i915(dev);
+ struct intel_display *display = to_intel_display(state);
/* Prevents vblank waits from timing out in intel_tv_detect_type() */
intel_crtc_wait_for_next_vblank(to_intel_crtc(pipe_config->uapi.crtc));
- intel_de_rmw(dev_priv, TV_CTL, 0, TV_ENC_ENABLE);
+ intel_de_rmw(display, TV_CTL, 0, TV_ENC_ENABLE);
}
static void
@@ -943,10 +942,9 @@ intel_disable_tv(struct intel_atomic_state *state,
const struct intel_crtc_state *old_crtc_state,
const struct drm_connector_state *old_conn_state)
{
- struct drm_device *dev = encoder->base.dev;
- struct drm_i915_private *dev_priv = to_i915(dev);
+ struct intel_display *display = to_intel_display(state);
- intel_de_rmw(dev_priv, TV_CTL, TV_ENC_ENABLE, 0);
+ intel_de_rmw(display, TV_CTL, TV_ENC_ENABLE, 0);
}
static const struct tv_mode *intel_tv_mode_find(const struct drm_connector_state *conn_state)
@@ -960,9 +958,10 @@ static enum drm_mode_status
intel_tv_mode_valid(struct drm_connector *connector,
struct drm_display_mode *mode)
{
+ struct intel_display *display = to_intel_display(connector->dev);
struct drm_i915_private *i915 = to_i915(connector->dev);
const struct tv_mode *tv_mode = intel_tv_mode_find(connector->state);
- int max_dotclk = i915->display.cdclk.max_dotclk_freq;
+ int max_dotclk = display->cdclk.max_dotclk_freq;
enum drm_mode_status status;
status = intel_cpu_transcoder_mode_valid(i915, mode);
@@ -1092,6 +1091,7 @@ static void
intel_tv_get_config(struct intel_encoder *encoder,
struct intel_crtc_state *pipe_config)
{
+ struct intel_display *display = to_intel_display(encoder);
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
struct drm_display_mode *adjusted_mode =
&pipe_config->hw.adjusted_mode;
@@ -1104,11 +1104,11 @@ intel_tv_get_config(struct intel_encoder *encoder,
pipe_config->output_types |= BIT(INTEL_OUTPUT_TVOUT);
- tv_ctl = intel_de_read(dev_priv, TV_CTL);
- hctl1 = intel_de_read(dev_priv, TV_H_CTL_1);
- hctl3 = intel_de_read(dev_priv, TV_H_CTL_3);
- vctl1 = intel_de_read(dev_priv, TV_V_CTL_1);
- vctl2 = intel_de_read(dev_priv, TV_V_CTL_2);
+ tv_ctl = intel_de_read(display, TV_CTL);
+ hctl1 = intel_de_read(display, TV_H_CTL_1);
+ hctl3 = intel_de_read(display, TV_H_CTL_3);
+ vctl1 = intel_de_read(display, TV_V_CTL_1);
+ vctl2 = intel_de_read(display, TV_V_CTL_2);
tv_mode.htotal = (hctl1 & TV_HTOTAL_MASK) >> TV_HTOTAL_SHIFT;
tv_mode.hsync_end = (hctl1 & TV_HSYNC_END_MASK) >> TV_HSYNC_END_SHIFT;
@@ -1143,17 +1143,17 @@ intel_tv_get_config(struct intel_encoder *encoder,
break;
}
- tmp = intel_de_read(dev_priv, TV_WIN_POS);
+ tmp = intel_de_read(display, TV_WIN_POS);
xpos = tmp >> 16;
ypos = tmp & 0xffff;
- tmp = intel_de_read(dev_priv, TV_WIN_SIZE);
+ tmp = intel_de_read(display, TV_WIN_SIZE);
xsize = tmp >> 16;
ysize = tmp & 0xffff;
intel_tv_mode_to_mode(&mode, &tv_mode, pipe_config->port_clock);
- drm_dbg_kms(&dev_priv->drm, "TV mode: " DRM_MODE_FMT "\n",
+ drm_dbg_kms(display->drm, "TV mode: " DRM_MODE_FMT "\n",
DRM_MODE_ARG(&mode));
intel_tv_scale_mode_horiz(&mode, hdisplay,
@@ -1171,10 +1171,10 @@ intel_tv_get_config(struct intel_encoder *encoder,
I915_MODE_FLAG_USE_SCANLINE_COUNTER;
}
-static bool intel_tv_source_too_wide(struct drm_i915_private *dev_priv,
+static bool intel_tv_source_too_wide(struct intel_display *display,
int hdisplay)
{
- return DISPLAY_VER(dev_priv) == 3 && hdisplay > 1024;
+ return DISPLAY_VER(display) == 3 && hdisplay > 1024;
}
static bool intel_tv_vert_scaling(const struct drm_display_mode *tv_mode,
@@ -1192,6 +1192,7 @@ intel_tv_compute_config(struct intel_encoder *encoder,
struct intel_crtc_state *pipe_config,
struct drm_connector_state *conn_state)
{
+ struct intel_display *display = to_intel_display(encoder);
struct intel_atomic_state *state =
to_intel_atomic_state(pipe_config->uapi.state);
struct intel_crtc *crtc = to_intel_crtc(pipe_config->uapi.crtc);
@@ -1214,7 +1215,7 @@ intel_tv_compute_config(struct intel_encoder *encoder,
pipe_config->sink_format = INTEL_OUTPUT_FORMAT_RGB;
pipe_config->output_format = INTEL_OUTPUT_FORMAT_RGB;
- drm_dbg_kms(&dev_priv->drm, "forcing bpc to 8 for TV\n");
+ drm_dbg_kms(display->drm, "forcing bpc to 8 for TV\n");
pipe_config->pipe_bpp = 8*3;
pipe_config->port_clock = tv_mode->clock;
@@ -1228,14 +1229,14 @@ intel_tv_compute_config(struct intel_encoder *encoder,
intel_tv_mode_to_mode(adjusted_mode, tv_mode, pipe_config->port_clock);
drm_mode_set_crtcinfo(adjusted_mode, 0);
- if (intel_tv_source_too_wide(dev_priv, hdisplay) ||
+ if (intel_tv_source_too_wide(display, hdisplay) ||
!intel_tv_vert_scaling(adjusted_mode, conn_state, vdisplay)) {
int extra, top, bottom;
extra = adjusted_mode->crtc_vdisplay - vdisplay;
if (extra < 0) {
- drm_dbg_kms(&dev_priv->drm,
+ drm_dbg_kms(display->drm,
"No vertical scaling for >1024 pixel wide modes\n");
return -EINVAL;
}
@@ -1269,7 +1270,7 @@ intel_tv_compute_config(struct intel_encoder *encoder,
tv_conn_state->bypass_vfilter = false;
}
- drm_dbg_kms(&dev_priv->drm, "TV mode: " DRM_MODE_FMT "\n",
+ drm_dbg_kms(display->drm, "TV mode: " DRM_MODE_FMT "\n",
DRM_MODE_ARG(adjusted_mode));
/*
@@ -1355,7 +1356,7 @@ intel_tv_compute_config(struct intel_encoder *encoder,
}
static void
-set_tv_mode_timings(struct drm_i915_private *dev_priv,
+set_tv_mode_timings(struct intel_display *display,
const struct tv_mode *tv_mode,
bool burst_ena)
{
@@ -1401,32 +1402,32 @@ set_tv_mode_timings(struct drm_i915_private *dev_priv,
vctl7 = (tv_mode->vburst_start_f4 << TV_VBURST_START_F4_SHIFT) |
(tv_mode->vburst_end_f4 << TV_VBURST_END_F4_SHIFT);
- intel_de_write(dev_priv, TV_H_CTL_1, hctl1);
- intel_de_write(dev_priv, TV_H_CTL_2, hctl2);
- intel_de_write(dev_priv, TV_H_CTL_3, hctl3);
- intel_de_write(dev_priv, TV_V_CTL_1, vctl1);
- intel_de_write(dev_priv, TV_V_CTL_2, vctl2);
- intel_de_write(dev_priv, TV_V_CTL_3, vctl3);
- intel_de_write(dev_priv, TV_V_CTL_4, vctl4);
- intel_de_write(dev_priv, TV_V_CTL_5, vctl5);
- intel_de_write(dev_priv, TV_V_CTL_6, vctl6);
- intel_de_write(dev_priv, TV_V_CTL_7, vctl7);
+ intel_de_write(display, TV_H_CTL_1, hctl1);
+ intel_de_write(display, TV_H_CTL_2, hctl2);
+ intel_de_write(display, TV_H_CTL_3, hctl3);
+ intel_de_write(display, TV_V_CTL_1, vctl1);
+ intel_de_write(display, TV_V_CTL_2, vctl2);
+ intel_de_write(display, TV_V_CTL_3, vctl3);
+ intel_de_write(display, TV_V_CTL_4, vctl4);
+ intel_de_write(display, TV_V_CTL_5, vctl5);
+ intel_de_write(display, TV_V_CTL_6, vctl6);
+ intel_de_write(display, TV_V_CTL_7, vctl7);
}
-static void set_color_conversion(struct drm_i915_private *dev_priv,
+static void set_color_conversion(struct intel_display *display,
const struct color_conversion *color_conversion)
{
- intel_de_write(dev_priv, TV_CSC_Y,
+ intel_de_write(display, TV_CSC_Y,
(color_conversion->ry << 16) | color_conversion->gy);
- intel_de_write(dev_priv, TV_CSC_Y2,
+ intel_de_write(display, TV_CSC_Y2,
(color_conversion->by << 16) | color_conversion->ay);
- intel_de_write(dev_priv, TV_CSC_U,
+ intel_de_write(display, TV_CSC_U,
(color_conversion->ru << 16) | color_conversion->gu);
- intel_de_write(dev_priv, TV_CSC_U2,
+ intel_de_write(display, TV_CSC_U2,
(color_conversion->bu << 16) | color_conversion->au);
- intel_de_write(dev_priv, TV_CSC_V,
+ intel_de_write(display, TV_CSC_V,
(color_conversion->rv << 16) | color_conversion->gv);
- intel_de_write(dev_priv, TV_CSC_V2,
+ intel_de_write(display, TV_CSC_V2,
(color_conversion->bv << 16) | color_conversion->av);
}
@@ -1435,6 +1436,7 @@ static void intel_tv_pre_enable(struct intel_atomic_state *state,
const struct intel_crtc_state *pipe_config,
const struct drm_connector_state *conn_state)
{
+ struct intel_display *display = to_intel_display(encoder);
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
struct intel_crtc *crtc = to_intel_crtc(pipe_config->uapi.crtc);
struct intel_tv *intel_tv = enc_to_tv(encoder);
@@ -1450,7 +1452,7 @@ static void intel_tv_pre_enable(struct intel_atomic_state *state,
int xpos, ypos;
unsigned int xsize, ysize;
- tv_ctl = intel_de_read(dev_priv, TV_CTL);
+ tv_ctl = intel_de_read(display, TV_CTL);
tv_ctl &= TV_CTL_SAVE;
switch (intel_tv->type) {
@@ -1525,21 +1527,21 @@ static void intel_tv_pre_enable(struct intel_atomic_state *state,
if (IS_I915GM(dev_priv))
tv_ctl |= TV_ENC_C0_FIX | TV_ENC_SDP_FIX;
- set_tv_mode_timings(dev_priv, tv_mode, burst_ena);
+ set_tv_mode_timings(display, tv_mode, burst_ena);
- intel_de_write(dev_priv, TV_SC_CTL_1, scctl1);
- intel_de_write(dev_priv, TV_SC_CTL_2, scctl2);
- intel_de_write(dev_priv, TV_SC_CTL_3, scctl3);
+ intel_de_write(display, TV_SC_CTL_1, scctl1);
+ intel_de_write(display, TV_SC_CTL_2, scctl2);
+ intel_de_write(display, TV_SC_CTL_3, scctl3);
- set_color_conversion(dev_priv, color_conversion);
+ set_color_conversion(display, color_conversion);
- if (DISPLAY_VER(dev_priv) >= 4)
- intel_de_write(dev_priv, TV_CLR_KNOBS, 0x00404000);
+ if (DISPLAY_VER(display) >= 4)
+ intel_de_write(display, TV_CLR_KNOBS, 0x00404000);
else
- intel_de_write(dev_priv, TV_CLR_KNOBS, 0x00606000);
+ intel_de_write(display, TV_CLR_KNOBS, 0x00606000);
if (video_levels)
- intel_de_write(dev_priv, TV_CLR_LEVEL,
+ intel_de_write(display, TV_CLR_LEVEL,
((video_levels->black << TV_BLACK_LEVEL_SHIFT) | (video_levels->blank << TV_BLANK_LEVEL_SHIFT)));
assert_transcoder_disabled(dev_priv, pipe_config->cpu_transcoder);
@@ -1548,7 +1550,7 @@ static void intel_tv_pre_enable(struct intel_atomic_state *state,
tv_filter_ctl = TV_AUTO_SCALE;
if (tv_conn_state->bypass_vfilter)
tv_filter_ctl |= TV_V_FILTER_BYPASS;
- intel_de_write(dev_priv, TV_FILTER_CTL_1, tv_filter_ctl);
+ intel_de_write(display, TV_FILTER_CTL_1, tv_filter_ctl);
xsize = tv_mode->hblank_start - tv_mode->hblank_end;
ysize = intel_tv_mode_vdisplay(tv_mode);
@@ -1559,31 +1561,32 @@ static void intel_tv_pre_enable(struct intel_atomic_state *state,
conn_state->tv.margins.right);
ysize -= (tv_conn_state->margins.top +
tv_conn_state->margins.bottom);
- intel_de_write(dev_priv, TV_WIN_POS, (xpos << 16) | ypos);
- intel_de_write(dev_priv, TV_WIN_SIZE, (xsize << 16) | ysize);
+ intel_de_write(display, TV_WIN_POS, (xpos << 16) | ypos);
+ intel_de_write(display, TV_WIN_SIZE, (xsize << 16) | ysize);
j = 0;
for (i = 0; i < 60; i++)
- intel_de_write(dev_priv, TV_H_LUMA(i),
+ intel_de_write(display, TV_H_LUMA(i),
tv_mode->filter_table[j++]);
for (i = 0; i < 60; i++)
- intel_de_write(dev_priv, TV_H_CHROMA(i),
+ intel_de_write(display, TV_H_CHROMA(i),
tv_mode->filter_table[j++]);
for (i = 0; i < 43; i++)
- intel_de_write(dev_priv, TV_V_LUMA(i),
+ intel_de_write(display, TV_V_LUMA(i),
tv_mode->filter_table[j++]);
for (i = 0; i < 43; i++)
- intel_de_write(dev_priv, TV_V_CHROMA(i),
+ intel_de_write(display, TV_V_CHROMA(i),
tv_mode->filter_table[j++]);
- intel_de_write(dev_priv, TV_DAC,
- intel_de_read(dev_priv, TV_DAC) & TV_DAC_SAVE);
- intel_de_write(dev_priv, TV_CTL, tv_ctl);
+ intel_de_write(display, TV_DAC,
+ intel_de_read(display, TV_DAC) & TV_DAC_SAVE);
+ intel_de_write(display, TV_CTL, tv_ctl);
}
static int
intel_tv_detect_type(struct intel_tv *intel_tv,
struct drm_connector *connector)
{
+ struct intel_display *display = to_intel_display(connector->dev);
struct intel_crtc *crtc = to_intel_crtc(connector->state->crtc);
struct drm_device *dev = connector->dev;
struct drm_i915_private *dev_priv = to_i915(dev);
@@ -1600,8 +1603,8 @@ intel_tv_detect_type(struct intel_tv *intel_tv,
spin_unlock_irq(&dev_priv->irq_lock);
}
- save_tv_dac = tv_dac = intel_de_read(dev_priv, TV_DAC);
- save_tv_ctl = tv_ctl = intel_de_read(dev_priv, TV_CTL);
+ save_tv_dac = tv_dac = intel_de_read(display, TV_DAC);
+ save_tv_ctl = tv_ctl = intel_de_read(display, TV_CTL);
/* Poll for TV detection */
tv_ctl &= ~(TV_ENC_ENABLE | TV_ENC_PIPE_SEL_MASK | TV_TEST_MODE_MASK);
@@ -1627,15 +1630,15 @@ intel_tv_detect_type(struct intel_tv *intel_tv,
tv_dac &= ~(TVDAC_STATE_CHG_EN | TVDAC_A_SENSE_CTL |
TVDAC_B_SENSE_CTL | TVDAC_C_SENSE_CTL);
- intel_de_write(dev_priv, TV_CTL, tv_ctl);
- intel_de_write(dev_priv, TV_DAC, tv_dac);
- intel_de_posting_read(dev_priv, TV_DAC);
+ intel_de_write(display, TV_CTL, tv_ctl);
+ intel_de_write(display, TV_DAC, tv_dac);
+ intel_de_posting_read(display, TV_DAC);
intel_crtc_wait_for_next_vblank(crtc);
type = -1;
- tv_dac = intel_de_read(dev_priv, TV_DAC);
- drm_dbg_kms(&dev_priv->drm, "TV detected: %x, %x\n", tv_ctl, tv_dac);
+ tv_dac = intel_de_read(display, TV_DAC);
+ drm_dbg_kms(display->drm, "TV detected: %x, %x\n", tv_ctl, tv_dac);
/*
* A B C
* 0 1 1 Composite
@@ -1643,25 +1646,25 @@ intel_tv_detect_type(struct intel_tv *intel_tv,
* 0 0 0 Component
*/
if ((tv_dac & TVDAC_SENSE_MASK) == (TVDAC_B_SENSE | TVDAC_C_SENSE)) {
- drm_dbg_kms(&dev_priv->drm,
+ drm_dbg_kms(display->drm,
"Detected Composite TV connection\n");
type = DRM_MODE_CONNECTOR_Composite;
} else if ((tv_dac & (TVDAC_A_SENSE|TVDAC_B_SENSE)) == TVDAC_A_SENSE) {
- drm_dbg_kms(&dev_priv->drm,
+ drm_dbg_kms(display->drm,
"Detected S-Video TV connection\n");
type = DRM_MODE_CONNECTOR_SVIDEO;
} else if ((tv_dac & TVDAC_SENSE_MASK) == 0) {
- drm_dbg_kms(&dev_priv->drm,
+ drm_dbg_kms(display->drm,
"Detected Component TV connection\n");
type = DRM_MODE_CONNECTOR_Component;
} else {
- drm_dbg_kms(&dev_priv->drm, "Unrecognised TV connection\n");
+ drm_dbg_kms(display->drm, "Unrecognised TV connection\n");
type = -1;
}
- intel_de_write(dev_priv, TV_DAC, save_tv_dac & ~TVDAC_STATE_CHG_EN);
- intel_de_write(dev_priv, TV_CTL, save_tv_ctl);
- intel_de_posting_read(dev_priv, TV_CTL);
+ intel_de_write(display, TV_DAC, save_tv_dac & ~TVDAC_STATE_CHG_EN);
+ intel_de_write(display, TV_CTL, save_tv_ctl);
+ intel_de_posting_read(display, TV_CTL);
/* For unknown reasons the hw barfs if we don't do this vblank wait. */
intel_crtc_wait_for_next_vblank(crtc);
@@ -1711,12 +1714,13 @@ intel_tv_detect(struct drm_connector *connector,
struct drm_modeset_acquire_ctx *ctx,
bool force)
{
+ struct intel_display *display = to_intel_display(connector->dev);
struct drm_i915_private *i915 = to_i915(connector->dev);
struct intel_tv *intel_tv = intel_attached_tv(to_intel_connector(connector));
enum drm_connector_status status;
int type;
- drm_dbg_kms(&i915->drm, "[CONNECTOR:%d:%s] force=%d\n",
+ drm_dbg_kms(display->drm, "[CONNECTOR:%d:%s] force=%d\n",
connector->base.id, connector->name, force);
if (!intel_display_device_enabled(i915))
@@ -1791,7 +1795,7 @@ intel_tv_set_mode_type(struct drm_display_mode *mode,
static int
intel_tv_get_modes(struct drm_connector *connector)
{
- struct drm_i915_private *dev_priv = to_i915(connector->dev);
+ struct intel_display *display = to_intel_display(connector->dev);
const struct tv_mode *tv_mode = intel_tv_mode_find(connector->state);
int i, count = 0;
@@ -1805,7 +1809,7 @@ intel_tv_get_modes(struct drm_connector *connector)
continue;
/* no vertical scaling with wide sources on gen3 */
- if (DISPLAY_VER(dev_priv) == 3 && input->w > 1024 &&
+ if (DISPLAY_VER(display) == 3 && input->w > 1024 &&
input->h > intel_tv_mode_vdisplay(tv_mode))
continue;
@@ -1822,7 +1826,8 @@ intel_tv_get_modes(struct drm_connector *connector)
*/
intel_tv_mode_to_mode(mode, tv_mode, tv_mode->clock);
if (count == 0) {
- drm_dbg_kms(&dev_priv->drm, "TV mode: " DRM_MODE_FMT "\n",
+ drm_dbg_kms(display->drm,
+ "TV mode: " DRM_MODE_FMT "\n",
DRM_MODE_ARG(mode));
}
intel_tv_scale_mode_horiz(mode, input->w, 0, 0);
@@ -1887,7 +1892,7 @@ static const struct drm_encoder_funcs intel_tv_enc_funcs = {
static void intel_tv_add_properties(struct drm_connector *connector)
{
- struct drm_i915_private *i915 = to_i915(connector->dev);
+ struct intel_display *display = to_intel_display(connector->dev);
struct drm_connector_state *conn_state = connector->state;
const char *tv_format_names[ARRAY_SIZE(tv_modes)];
int i;
@@ -1903,32 +1908,32 @@ static void intel_tv_add_properties(struct drm_connector *connector)
/* Create TV properties then attach current values */
for (i = 0; i < ARRAY_SIZE(tv_modes); i++) {
/* 1080p50/1080p60 not supported on gen3 */
- if (DISPLAY_VER(i915) == 3 && tv_modes[i].oversample == 1)
+ if (DISPLAY_VER(display) == 3 && tv_modes[i].oversample == 1)
break;
tv_format_names[i] = tv_modes[i].name;
}
- drm_mode_create_tv_properties_legacy(&i915->drm, i, tv_format_names);
+ drm_mode_create_tv_properties_legacy(display->drm, i, tv_format_names);
drm_object_attach_property(&connector->base,
- i915->drm.mode_config.legacy_tv_mode_property,
+ display->drm->mode_config.legacy_tv_mode_property,
conn_state->tv.legacy_mode);
drm_object_attach_property(&connector->base,
- i915->drm.mode_config.tv_left_margin_property,
+ display->drm->mode_config.tv_left_margin_property,
conn_state->tv.margins.left);
drm_object_attach_property(&connector->base,
- i915->drm.mode_config.tv_top_margin_property,
+ display->drm->mode_config.tv_top_margin_property,
conn_state->tv.margins.top);
drm_object_attach_property(&connector->base,
- i915->drm.mode_config.tv_right_margin_property,
+ display->drm->mode_config.tv_right_margin_property,
conn_state->tv.margins.right);
drm_object_attach_property(&connector->base,
- i915->drm.mode_config.tv_bottom_margin_property,
+ display->drm->mode_config.tv_bottom_margin_property,
conn_state->tv.margins.bottom);
}
void
-intel_tv_init(struct drm_i915_private *dev_priv)
+intel_tv_init(struct intel_display *display)
{
struct drm_connector *connector;
struct intel_tv *intel_tv;
@@ -1936,11 +1941,11 @@ intel_tv_init(struct drm_i915_private *dev_priv)
struct intel_connector *intel_connector;
u32 tv_dac_on, tv_dac_off, save_tv_dac;
- if ((intel_de_read(dev_priv, TV_CTL) & TV_FUSE_STATE_MASK) == TV_FUSE_STATE_DISABLED)
+ if ((intel_de_read(display, TV_CTL) & TV_FUSE_STATE_MASK) == TV_FUSE_STATE_DISABLED)
return;
- if (!intel_bios_is_tv_present(dev_priv)) {
- drm_dbg_kms(&dev_priv->drm, "Integrated TV is not present.\n");
+ if (!intel_bios_is_tv_present(display)) {
+ drm_dbg_kms(display->drm, "Integrated TV is not present.\n");
return;
}
@@ -1948,15 +1953,15 @@ intel_tv_init(struct drm_i915_private *dev_priv)
* Sanity check the TV output by checking to see if the
* DAC register holds a value
*/
- save_tv_dac = intel_de_read(dev_priv, TV_DAC);
+ save_tv_dac = intel_de_read(display, TV_DAC);
- intel_de_write(dev_priv, TV_DAC, save_tv_dac | TVDAC_STATE_CHG_EN);
- tv_dac_on = intel_de_read(dev_priv, TV_DAC);
+ intel_de_write(display, TV_DAC, save_tv_dac | TVDAC_STATE_CHG_EN);
+ tv_dac_on = intel_de_read(display, TV_DAC);
- intel_de_write(dev_priv, TV_DAC, save_tv_dac & ~TVDAC_STATE_CHG_EN);
- tv_dac_off = intel_de_read(dev_priv, TV_DAC);
+ intel_de_write(display, TV_DAC, save_tv_dac & ~TVDAC_STATE_CHG_EN);
+ tv_dac_off = intel_de_read(display, TV_DAC);
- intel_de_write(dev_priv, TV_DAC, save_tv_dac);
+ intel_de_write(display, TV_DAC, save_tv_dac);
/*
* If the register does not hold the state change enable
@@ -1994,10 +1999,11 @@ intel_tv_init(struct drm_i915_private *dev_priv)
intel_connector->polled = DRM_CONNECTOR_POLL_CONNECT;
intel_connector->base.polled = intel_connector->polled;
- drm_connector_init(&dev_priv->drm, connector, &intel_tv_connector_funcs,
+ drm_connector_init(display->drm, connector, &intel_tv_connector_funcs,
DRM_MODE_CONNECTOR_SVIDEO);
- drm_encoder_init(&dev_priv->drm, &intel_encoder->base, &intel_tv_enc_funcs,
+ drm_encoder_init(display->drm, &intel_encoder->base,
+ &intel_tv_enc_funcs,
DRM_MODE_ENCODER_TVDAC, "TV");
intel_encoder->compute_config = intel_tv_compute_config;
diff --git a/drivers/gpu/drm/i915/display/intel_tv.h b/drivers/gpu/drm/i915/display/intel_tv.h
index f08827b8bf2b..0f280f69e73c 100644
--- a/drivers/gpu/drm/i915/display/intel_tv.h
+++ b/drivers/gpu/drm/i915/display/intel_tv.h
@@ -6,12 +6,12 @@
#ifndef __INTEL_TV_H__
#define __INTEL_TV_H__
-struct drm_i915_private;
+struct intel_display;
#ifdef I915
-void intel_tv_init(struct drm_i915_private *dev_priv);
+void intel_tv_init(struct intel_display *display);
#else
-static inline void intel_tv_init(struct drm_i915_private *dev_priv)
+static inline void intel_tv_init(struct intel_display *display)
{
}
#endif
diff --git a/drivers/gpu/drm/i915/display/intel_vblank.c b/drivers/gpu/drm/i915/display/intel_vblank.c
index 5b065e1cd4e4..0b7f2134e441 100644
--- a/drivers/gpu/drm/i915/display/intel_vblank.c
+++ b/drivers/gpu/drm/i915/display/intel_vblank.c
@@ -67,8 +67,8 @@
*/
u32 i915_get_vblank_counter(struct drm_crtc *crtc)
{
- struct drm_i915_private *dev_priv = to_i915(crtc->dev);
- struct drm_vblank_crtc *vblank = &dev_priv->drm.vblank[drm_crtc_index(crtc)];
+ struct intel_display *display = to_intel_display(crtc->dev);
+ struct drm_vblank_crtc *vblank = drm_crtc_vblank_crtc(crtc);
const struct drm_display_mode *mode = &vblank->hwmode;
enum pipe pipe = to_intel_crtc(crtc)->pipe;
u32 pixel, vbl_start, hsync_start, htotal;
@@ -103,8 +103,8 @@ u32 i915_get_vblank_counter(struct drm_crtc *crtc)
* we get a low value that's stable across two reads of the high
* register.
*/
- frame = intel_de_read64_2x32(dev_priv, PIPEFRAMEPIXEL(dev_priv, pipe),
- PIPEFRAME(dev_priv, pipe));
+ frame = intel_de_read64_2x32(display, PIPEFRAMEPIXEL(display, pipe),
+ PIPEFRAME(display, pipe));
pixel = frame & PIPE_PIXEL_MASK;
frame = (frame >> PIPE_FRAME_LOW_SHIFT) & 0xffffff;
@@ -119,19 +119,19 @@ u32 i915_get_vblank_counter(struct drm_crtc *crtc)
u32 g4x_get_vblank_counter(struct drm_crtc *crtc)
{
- struct drm_i915_private *dev_priv = to_i915(crtc->dev);
- struct drm_vblank_crtc *vblank = &dev_priv->drm.vblank[drm_crtc_index(crtc)];
+ struct intel_display *display = to_intel_display(crtc->dev);
+ struct drm_vblank_crtc *vblank = drm_crtc_vblank_crtc(crtc);
enum pipe pipe = to_intel_crtc(crtc)->pipe;
if (!vblank->max_vblank_count)
return 0;
- return intel_de_read(dev_priv, PIPE_FRMCOUNT_G4X(dev_priv, pipe));
+ return intel_de_read(display, PIPE_FRMCOUNT_G4X(display, pipe));
}
static u32 intel_crtc_scanlines_since_frame_timestamp(struct intel_crtc *crtc)
{
- struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+ struct intel_display *display = to_intel_display(crtc);
struct drm_vblank_crtc *vblank = drm_crtc_vblank_crtc(&crtc->base);
const struct drm_display_mode *mode = &vblank->hwmode;
u32 htotal = mode->crtc_htotal;
@@ -150,16 +150,16 @@ static u32 intel_crtc_scanlines_since_frame_timestamp(struct intel_crtc *crtc)
* pipe frame time stamp. The time stamp value
* is sampled at every start of vertical blank.
*/
- scan_prev_time = intel_de_read_fw(dev_priv,
+ scan_prev_time = intel_de_read_fw(display,
PIPE_FRMTMSTMP(crtc->pipe));
/*
* The TIMESTAMP_CTR register has the current
* time stamp value.
*/
- scan_curr_time = intel_de_read_fw(dev_priv, IVB_TIMESTAMP_CTR);
+ scan_curr_time = intel_de_read_fw(display, IVB_TIMESTAMP_CTR);
- scan_post_time = intel_de_read_fw(dev_priv,
+ scan_post_time = intel_de_read_fw(display,
PIPE_FRMTMSTMP(crtc->pipe));
} while (scan_post_time != scan_prev_time);
@@ -190,8 +190,9 @@ static u32 __intel_get_crtc_scanline_from_timestamp(struct intel_crtc *crtc)
return scanline;
}
-static int intel_crtc_scanline_offset(const struct intel_crtc_state *crtc_state)
+int intel_crtc_scanline_offset(const struct intel_crtc_state *crtc_state)
{
+ struct intel_display *display = to_intel_display(crtc_state);
struct drm_i915_private *i915 = to_i915(crtc_state->uapi.crtc->dev);
/*
@@ -220,7 +221,7 @@ static int intel_crtc_scanline_offset(const struct intel_crtc_state *crtc_state)
* However if queried just before the start of vblank we'll get an
* answer that's slightly in the future.
*/
- if (DISPLAY_VER(i915) == 2)
+ if (DISPLAY_VER(display) == 2)
return -1;
else if (HAS_DDI(i915) && intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI))
return 2;
@@ -234,8 +235,7 @@ static int intel_crtc_scanline_offset(const struct intel_crtc_state *crtc_state)
*/
static int __intel_get_crtc_scanline(struct intel_crtc *crtc)
{
- struct drm_device *dev = crtc->base.dev;
- struct drm_i915_private *dev_priv = to_i915(dev);
+ struct intel_display *display = to_intel_display(crtc);
struct drm_vblank_crtc *vblank = drm_crtc_vblank_crtc(&crtc->base);
const struct drm_display_mode *mode = &vblank->hwmode;
enum pipe pipe = crtc->pipe;
@@ -249,7 +249,7 @@ static int __intel_get_crtc_scanline(struct intel_crtc *crtc)
vtotal = intel_mode_vtotal(mode);
- position = intel_de_read_fw(dev_priv, PIPEDSL(dev_priv, pipe)) & PIPEDSL_LINE_MASK;
+ position = intel_de_read_fw(display, PIPEDSL(display, pipe)) & PIPEDSL_LINE_MASK;
/*
* On HSW, the DSL reg (0x70000) appears to return 0 if we
@@ -263,13 +263,13 @@ static int __intel_get_crtc_scanline(struct intel_crtc *crtc)
* problem. We may need to extend this to include other platforms,
* but so far testing only shows the problem on HSW.
*/
- if (HAS_DDI(dev_priv) && !position) {
+ if (HAS_DDI(display) && !position) {
int i, temp;
for (i = 0; i < 100; i++) {
udelay(1);
- temp = intel_de_read_fw(dev_priv,
- PIPEDSL(dev_priv, pipe)) & PIPEDSL_LINE_MASK;
+ temp = intel_de_read_fw(display,
+ PIPEDSL(display, pipe)) & PIPEDSL_LINE_MASK;
if (temp != position) {
position = temp;
break;
@@ -284,15 +284,6 @@ static int __intel_get_crtc_scanline(struct intel_crtc *crtc)
return (position + vtotal + crtc->scanline_offset) % vtotal;
}
-int intel_crtc_scanline_to_hw(struct intel_crtc *crtc, int scanline)
-{
- const struct drm_vblank_crtc *vblank = drm_crtc_vblank_crtc(&crtc->base);
- const struct drm_display_mode *mode = &vblank->hwmode;
- int vtotal = intel_mode_vtotal(mode);
-
- return (scanline + vtotal - crtc->scanline_offset) % vtotal;
-}
-
/*
* The uncore version of the spin lock functions is used to decide
* whether we need to lock the uncore lock or not. This is only
@@ -303,41 +294,49 @@ int intel_crtc_scanline_to_hw(struct intel_crtc *crtc, int scanline)
* all register accesses to the same cacheline to be serialized,
* otherwise they may hang.
*/
-static void intel_vblank_section_enter(struct drm_i915_private *i915)
+#ifdef I915
+static void intel_vblank_section_enter(struct intel_display *display)
__acquires(i915->uncore.lock)
{
-#ifdef I915
+ struct drm_i915_private *i915 = to_i915(display->drm);
spin_lock(&i915->uncore.lock);
-#endif
}
-static void intel_vblank_section_exit(struct drm_i915_private *i915)
+static void intel_vblank_section_exit(struct intel_display *display)
__releases(i915->uncore.lock)
{
-#ifdef I915
+ struct drm_i915_private *i915 = to_i915(display->drm);
spin_unlock(&i915->uncore.lock);
-#endif
+}
+#else
+static void intel_vblank_section_enter(struct intel_display *display)
+{
}
+static void intel_vblank_section_exit(struct intel_display *display)
+{
+}
+#endif
+
static bool i915_get_crtc_scanoutpos(struct drm_crtc *_crtc,
bool in_vblank_irq,
int *vpos, int *hpos,
ktime_t *stime, ktime_t *etime,
const struct drm_display_mode *mode)
{
- struct drm_device *dev = _crtc->dev;
- struct drm_i915_private *dev_priv = to_i915(dev);
+ struct intel_display *display = to_intel_display(_crtc->dev);
+ struct drm_i915_private *dev_priv = to_i915(display->drm);
struct intel_crtc *crtc = to_intel_crtc(_crtc);
enum pipe pipe = crtc->pipe;
int position;
int vbl_start, vbl_end, hsync_start, htotal, vtotal;
unsigned long irqflags;
- bool use_scanline_counter = DISPLAY_VER(dev_priv) >= 5 ||
- IS_G4X(dev_priv) || DISPLAY_VER(dev_priv) == 2 ||
+ bool use_scanline_counter = DISPLAY_VER(display) >= 5 ||
+ IS_G4X(dev_priv) || DISPLAY_VER(display) == 2 ||
crtc->mode_flags & I915_MODE_FLAG_USE_SCANLINE_COUNTER;
- if (drm_WARN_ON(&dev_priv->drm, !mode->crtc_clock)) {
- drm_dbg(&dev_priv->drm,
+ if (drm_WARN_ON(display->drm, !mode->crtc_clock)) {
+ drm_dbg(display->drm,
"trying to get scanoutpos for disabled pipe %c\n",
pipe_name(pipe));
return false;
@@ -355,7 +354,7 @@ static bool i915_get_crtc_scanoutpos(struct drm_crtc *_crtc,
* preemption disabled, so the following code must not block.
*/
local_irq_save(irqflags);
- intel_vblank_section_enter(dev_priv);
+ intel_vblank_section_enter(display);
/* preempt_disable_rt() should go right here in PREEMPT_RT patchset. */
@@ -387,7 +386,7 @@ static bool i915_get_crtc_scanoutpos(struct drm_crtc *_crtc,
* We can split this into vertical and horizontal
* scanout position.
*/
- position = (intel_de_read_fw(dev_priv, PIPEFRAMEPIXEL(dev_priv, pipe)) & PIPE_PIXEL_MASK) >> PIPE_PIXEL_SHIFT;
+ position = (intel_de_read_fw(display, PIPEFRAMEPIXEL(display, pipe)) & PIPE_PIXEL_MASK) >> PIPE_PIXEL_SHIFT;
/* convert to pixel counts */
vbl_start *= htotal;
@@ -423,7 +422,7 @@ static bool i915_get_crtc_scanoutpos(struct drm_crtc *_crtc,
/* preempt_enable_rt() should go right here in PREEMPT_RT patchset. */
- intel_vblank_section_exit(dev_priv);
+ intel_vblank_section_exit(display);
local_irq_restore(irqflags);
/*
@@ -458,42 +457,42 @@ bool intel_crtc_get_vblank_timestamp(struct drm_crtc *crtc, int *max_error,
int intel_get_crtc_scanline(struct intel_crtc *crtc)
{
- struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+ struct intel_display *display = to_intel_display(crtc);
unsigned long irqflags;
int position;
local_irq_save(irqflags);
- intel_vblank_section_enter(dev_priv);
+ intel_vblank_section_enter(display);
position = __intel_get_crtc_scanline(crtc);
- intel_vblank_section_exit(dev_priv);
+ intel_vblank_section_exit(display);
local_irq_restore(irqflags);
return position;
}
-static bool pipe_scanline_is_moving(struct drm_i915_private *dev_priv,
+static bool pipe_scanline_is_moving(struct intel_display *display,
enum pipe pipe)
{
- i915_reg_t reg = PIPEDSL(dev_priv, pipe);
+ i915_reg_t reg = PIPEDSL(display, pipe);
u32 line1, line2;
- line1 = intel_de_read(dev_priv, reg) & PIPEDSL_LINE_MASK;
+ line1 = intel_de_read(display, reg) & PIPEDSL_LINE_MASK;
msleep(5);
- line2 = intel_de_read(dev_priv, reg) & PIPEDSL_LINE_MASK;
+ line2 = intel_de_read(display, reg) & PIPEDSL_LINE_MASK;
return line1 != line2;
}
static void wait_for_pipe_scanline_moving(struct intel_crtc *crtc, bool state)
{
- struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+ struct intel_display *display = to_intel_display(crtc);
enum pipe pipe = crtc->pipe;
/* Wait for the display line to settle/start moving */
- if (wait_for(pipe_scanline_is_moving(dev_priv, pipe) == state, 100))
- drm_err(&dev_priv->drm,
+ if (wait_for(pipe_scanline_is_moving(display, pipe) == state, 100))
+ drm_err(display->drm,
"pipe %c scanline %s wait timed out\n",
pipe_name(pipe), str_on_off(state));
}
@@ -511,8 +510,8 @@ void intel_wait_for_pipe_scanline_moving(struct intel_crtc *crtc)
void intel_crtc_update_active_timings(const struct intel_crtc_state *crtc_state,
bool vrr_enable)
{
+ struct intel_display *display = to_intel_display(crtc_state);
struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
- struct drm_i915_private *i915 = to_i915(crtc->base.dev);
u8 mode_flags = crtc_state->mode_flags;
struct drm_display_mode adjusted_mode;
int vmax_vblank_start = 0;
@@ -521,7 +520,8 @@ void intel_crtc_update_active_timings(const struct intel_crtc_state *crtc_state,
drm_mode_init(&adjusted_mode, &crtc_state->hw.adjusted_mode);
if (vrr_enable) {
- drm_WARN_ON(&i915->drm, (mode_flags & I915_MODE_FLAG_VRR) == 0);
+ drm_WARN_ON(display->drm,
+ (mode_flags & I915_MODE_FLAG_VRR) == 0);
adjusted_mode.crtc_vtotal = crtc_state->vrr.vmax;
adjusted_mode.crtc_vblank_end = crtc_state->vrr.vmax;
@@ -543,8 +543,8 @@ void intel_crtc_update_active_timings(const struct intel_crtc_state *crtc_state,
* __intel_get_crtc_scanline()) with vblank_time_lock?
* Need to audit everything to make sure it's safe.
*/
- spin_lock_irqsave(&i915->drm.vblank_time_lock, irqflags);
- intel_vblank_section_enter(i915);
+ spin_lock_irqsave(&display->drm->vblank_time_lock, irqflags);
+ intel_vblank_section_enter(display);
drm_calc_timestamping_constants(&crtc->base, &adjusted_mode);
@@ -553,8 +553,8 @@ void intel_crtc_update_active_timings(const struct intel_crtc_state *crtc_state,
crtc->mode_flags = mode_flags;
crtc->scanline_offset = intel_crtc_scanline_offset(crtc_state);
- intel_vblank_section_exit(i915);
- spin_unlock_irqrestore(&i915->drm.vblank_time_lock, irqflags);
+ intel_vblank_section_exit(display);
+ spin_unlock_irqrestore(&display->drm->vblank_time_lock, irqflags);
}
int intel_mode_vdisplay(const struct drm_display_mode *mode)
@@ -652,14 +652,15 @@ void intel_vblank_evade_init(const struct intel_crtc_state *old_crtc_state,
*/
if (intel_color_uses_dsb(new_crtc_state) ||
new_crtc_state->update_m_n || new_crtc_state->update_lrr)
- evade->min -= adjusted_mode->crtc_vblank_start - adjusted_mode->crtc_vdisplay;
+ evade->min -= intel_mode_vblank_start(adjusted_mode) -
+ intel_mode_vdisplay(adjusted_mode);
}
/* must be called with vblank interrupt already enabled! */
int intel_vblank_evade(struct intel_vblank_evade_ctx *evade)
{
struct intel_crtc *crtc = evade->crtc;
- struct drm_i915_private *i915 = to_i915(crtc->base.dev);
+ struct intel_display *display = to_intel_display(crtc);
long timeout = msecs_to_jiffies_timeout(1);
wait_queue_head_t *wq = drm_crtc_vblank_waitqueue(&crtc->base);
DEFINE_WAIT(wait);
@@ -681,7 +682,7 @@ int intel_vblank_evade(struct intel_vblank_evade_ctx *evade)
break;
if (!timeout) {
- drm_err(&i915->drm,
+ drm_err(display->drm,
"Potential atomic update failure on pipe %c\n",
pipe_name(crtc->pipe));
break;
diff --git a/drivers/gpu/drm/i915/display/intel_vblank.h b/drivers/gpu/drm/i915/display/intel_vblank.h
index 7e526f6861e4..6d7336256982 100644
--- a/drivers/gpu/drm/i915/display/intel_vblank.h
+++ b/drivers/gpu/drm/i915/display/intel_vblank.h
@@ -40,6 +40,6 @@ void intel_wait_for_pipe_scanline_stopped(struct intel_crtc *crtc);
void intel_wait_for_pipe_scanline_moving(struct intel_crtc *crtc);
void intel_crtc_update_active_timings(const struct intel_crtc_state *crtc_state,
bool vrr_enable);
-int intel_crtc_scanline_to_hw(struct intel_crtc *crtc, int scanline);
+int intel_crtc_scanline_offset(const struct intel_crtc_state *crtc_state);
#endif /* __INTEL_VBLANK_H__ */
diff --git a/drivers/gpu/drm/i915/display/intel_vbt_defs.h b/drivers/gpu/drm/i915/display/intel_vbt_defs.h
index 1af8407e2081..42022756bbd5 100644
--- a/drivers/gpu/drm/i915/display/intel_vbt_defs.h
+++ b/drivers/gpu/drm/i915/display/intel_vbt_defs.h
@@ -493,7 +493,7 @@ struct child_device_config {
u16 addin_offset;
u8 dvo_port; /* See DEVICE_PORT_* and DVO_PORT_* above */
u8 i2c_pin;
- u8 slave_addr;
+ u8 target_addr;
u8 ddc_pin;
u16 edid_ptr;
u8 dvo_cfg; /* See DEVICE_CFG_* above */
@@ -502,7 +502,7 @@ struct child_device_config {
struct {
u8 dvo2_port;
u8 i2c2_pin;
- u8 slave2_addr;
+ u8 target2_addr;
u8 ddc2_pin;
} __packed;
struct {
@@ -1080,6 +1080,8 @@ struct bdb_edp {
u16 edp_fast_link_training_rate[16]; /* 224+ */
u16 edp_max_port_link_rate[16]; /* 244+ */
u16 edp_dsc_disable; /* 251+ */
+ u16 t6_delay_support; /* 260+ */
+ u16 link_idle_time[16]; /* 260+ */
} __packed;
/*
@@ -1321,7 +1323,7 @@ struct als_data_entry {
} __packed;
struct aggressiveness_profile_entry {
- u8 dpst_aggressiveness : 4;
+ u8 dpst_aggressiveness : 4; /* (228/252)-256 */
u8 lace_aggressiveness : 4;
} __packed;
@@ -1330,12 +1332,27 @@ struct aggressiveness_profile2_entry {
u8 elp_aggressiveness : 4;
} __packed;
+struct aggressiveness_profile3_entry {
+ u8 apd_aggressiveness:4;
+ u8 pixoptix_aggressiveness:4;
+} __packed;
+
+struct aggressiveness_profile4_entry {
+ u8 xpst_aggressiveness:4;
+ u8 tcon_aggressiveness:4;
+} __packed;
+
+struct panel_identification {
+ u8 panel_technology:4;
+ u8 reserved:4;
+} __packed;
+
struct bdb_lfp_power {
struct lfp_power_features features; /* ???-227 */
struct als_data_entry als[5];
u8 lace_aggressiveness_profile:3; /* 210-227 */
u8 reserved1:5;
- u16 dpst; /* 228+ */
+ u16 dpst; /* 228-256 */
u16 psr; /* 228+ */
u16 drrs; /* 228+ */
u16 lace_support; /* 228+ */
@@ -1343,12 +1360,20 @@ struct bdb_lfp_power {
u16 dmrrs; /* 228+ */
u16 adb; /* 228+ */
u16 lace_enabled_status; /* 228+ */
- struct aggressiveness_profile_entry aggressiveness[16]; /* 228+ */
+ struct aggressiveness_profile_entry aggressiveness[16];
u16 hobl; /* 232+ */
u16 vrr_feature_enabled; /* 233+ */
- u16 elp; /* 247+ */
- u16 opst; /* 247+ */
- struct aggressiveness_profile2_entry aggressiveness2[16]; /* 247+ */
+ u16 elp; /* 247-256 */
+ u16 opst; /* 247-256 */
+ struct aggressiveness_profile2_entry aggressiveness2[16]; /* 247-256 */
+ u16 apd; /* 253-256 */
+ u16 pixoptix; /* 253-256 */
+ struct aggressiveness_profile3_entry aggressiveness3[16]; /* 253-256 */
+ struct panel_identification panel_identification[16]; /* 257+ */
+ u16 xpst_support; /* 257+ */
+ u16 tcon_based_backlight_optimization; /* 257+ */
+ struct aggressiveness_profile4_entry aggressiveness4[16]; /* 257+ */
+ u16 tcon_backlight_xpst_coexistence; /* 257+ */
} __packed;
/*
diff --git a/drivers/gpu/drm/i915/display/intel_vdsc.c b/drivers/gpu/drm/i915/display/intel_vdsc.c
index b9687b7692b8..2e849b015e74 100644
--- a/drivers/gpu/drm/i915/display/intel_vdsc.c
+++ b/drivers/gpu/drm/i915/display/intel_vdsc.c
@@ -8,6 +8,7 @@
#include <linux/limits.h>
#include <drm/display/drm_dsc_helper.h>
+#include <drm/drm_fixed.h>
#include "i915_drv.h"
#include "intel_crtc.h"
@@ -76,7 +77,7 @@ intel_vdsc_set_min_max_qp(struct drm_dsc_config *vdsc_cfg, int buf,
static void
calculate_rc_params(struct drm_dsc_config *vdsc_cfg)
{
- int bpp = to_bpp_int(vdsc_cfg->bits_per_pixel);
+ int bpp = fxp_q4_to_int(vdsc_cfg->bits_per_pixel);
int bpc = vdsc_cfg->bits_per_component;
int qp_bpc_modifier = (bpc - 8) * 2;
int uncompressed_bpg_rate;
@@ -184,7 +185,7 @@ calculate_rc_params(struct drm_dsc_config *vdsc_cfg)
}
} else {
/* fractional bpp part * 10000 (for precision up to 4 decimal places) */
- int fractional_bits = to_bpp_frac(vdsc_cfg->bits_per_pixel);
+ int fractional_bits = fxp_q4_to_frac(vdsc_cfg->bits_per_pixel);
static const s8 ofs_und6[] = {
0, -2, -2, -4, -6, -6, -8, -8, -8, -10, -10, -12, -12, -12, -12
@@ -263,7 +264,7 @@ int intel_dsc_compute_params(struct intel_crtc_state *pipe_config)
struct intel_crtc *crtc = to_intel_crtc(pipe_config->uapi.crtc);
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
struct drm_dsc_config *vdsc_cfg = &pipe_config->dsc.config;
- u16 compressed_bpp = to_bpp_int(pipe_config->dsc.compressed_bpp_x16);
+ u16 compressed_bpp = fxp_q4_to_int(pipe_config->dsc.compressed_bpp_x16);
int err;
int ret;
@@ -456,36 +457,30 @@ static void intel_dsc_pps_configure(const struct intel_crtc_state *crtc_state)
pps_val |= DSC_PPS0_422_ENABLE;
if (vdsc_cfg->vbr_enable)
pps_val |= DSC_PPS0_VBR_ENABLE;
- drm_dbg_kms(&dev_priv->drm, "PPS0 = 0x%08x\n", pps_val);
intel_dsc_pps_write(crtc_state, 0, pps_val);
/* PPS 1 */
pps_val = DSC_PPS1_BPP(vdsc_cfg->bits_per_pixel);
- drm_dbg_kms(&dev_priv->drm, "PPS1 = 0x%08x\n", pps_val);
intel_dsc_pps_write(crtc_state, 1, pps_val);
/* PPS 2 */
pps_val = DSC_PPS2_PIC_HEIGHT(vdsc_cfg->pic_height) |
DSC_PPS2_PIC_WIDTH(vdsc_cfg->pic_width / num_vdsc_instances);
- drm_dbg_kms(&dev_priv->drm, "PPS2 = 0x%08x\n", pps_val);
intel_dsc_pps_write(crtc_state, 2, pps_val);
/* PPS 3 */
pps_val = DSC_PPS3_SLICE_HEIGHT(vdsc_cfg->slice_height) |
DSC_PPS3_SLICE_WIDTH(vdsc_cfg->slice_width);
- drm_dbg_kms(&dev_priv->drm, "PPS3 = 0x%08x\n", pps_val);
intel_dsc_pps_write(crtc_state, 3, pps_val);
/* PPS 4 */
pps_val = DSC_PPS4_INITIAL_XMIT_DELAY(vdsc_cfg->initial_xmit_delay) |
DSC_PPS4_INITIAL_DEC_DELAY(vdsc_cfg->initial_dec_delay);
- drm_dbg_kms(&dev_priv->drm, "PPS4 = 0x%08x\n", pps_val);
intel_dsc_pps_write(crtc_state, 4, pps_val);
/* PPS 5 */
pps_val = DSC_PPS5_SCALE_INC_INT(vdsc_cfg->scale_increment_interval) |
DSC_PPS5_SCALE_DEC_INT(vdsc_cfg->scale_decrement_interval);
- drm_dbg_kms(&dev_priv->drm, "PPS5 = 0x%08x\n", pps_val);
intel_dsc_pps_write(crtc_state, 5, pps_val);
/* PPS 6 */
@@ -493,25 +488,21 @@ static void intel_dsc_pps_configure(const struct intel_crtc_state *crtc_state)
DSC_PPS6_FIRST_LINE_BPG_OFFSET(vdsc_cfg->first_line_bpg_offset) |
DSC_PPS6_FLATNESS_MIN_QP(vdsc_cfg->flatness_min_qp) |
DSC_PPS6_FLATNESS_MAX_QP(vdsc_cfg->flatness_max_qp);
- drm_dbg_kms(&dev_priv->drm, "PPS6 = 0x%08x\n", pps_val);
intel_dsc_pps_write(crtc_state, 6, pps_val);
/* PPS 7 */
pps_val = DSC_PPS7_SLICE_BPG_OFFSET(vdsc_cfg->slice_bpg_offset) |
DSC_PPS7_NFL_BPG_OFFSET(vdsc_cfg->nfl_bpg_offset);
- drm_dbg_kms(&dev_priv->drm, "PPS7 = 0x%08x\n", pps_val);
intel_dsc_pps_write(crtc_state, 7, pps_val);
/* PPS 8 */
pps_val = DSC_PPS8_FINAL_OFFSET(vdsc_cfg->final_offset) |
DSC_PPS8_INITIAL_OFFSET(vdsc_cfg->initial_offset);
- drm_dbg_kms(&dev_priv->drm, "PPS8 = 0x%08x\n", pps_val);
intel_dsc_pps_write(crtc_state, 8, pps_val);
/* PPS 9 */
pps_val = DSC_PPS9_RC_MODEL_SIZE(vdsc_cfg->rc_model_size) |
DSC_PPS9_RC_EDGE_FACTOR(DSC_RC_EDGE_FACTOR_CONST);
- drm_dbg_kms(&dev_priv->drm, "PPS9 = 0x%08x\n", pps_val);
intel_dsc_pps_write(crtc_state, 9, pps_val);
/* PPS 10 */
@@ -519,7 +510,6 @@ static void intel_dsc_pps_configure(const struct intel_crtc_state *crtc_state)
DSC_PPS10_RC_QUANT_INC_LIMIT1(vdsc_cfg->rc_quant_incr_limit1) |
DSC_PPS10_RC_TARGET_OFF_HIGH(DSC_RC_TGT_OFFSET_HI_CONST) |
DSC_PPS10_RC_TARGET_OFF_LOW(DSC_RC_TGT_OFFSET_LO_CONST);
- drm_dbg_kms(&dev_priv->drm, "PPS10 = 0x%08x\n", pps_val);
intel_dsc_pps_write(crtc_state, 10, pps_val);
/* PPS 16 */
@@ -528,31 +518,25 @@ static void intel_dsc_pps_configure(const struct intel_crtc_state *crtc_state)
vdsc_cfg->slice_width) |
DSC_PPS16_SLICE_ROW_PER_FRAME(vdsc_cfg->pic_height /
vdsc_cfg->slice_height);
- drm_dbg_kms(&dev_priv->drm, "PPS16 = 0x%08x\n", pps_val);
intel_dsc_pps_write(crtc_state, 16, pps_val);
if (DISPLAY_VER(dev_priv) >= 14) {
/* PPS 17 */
pps_val = DSC_PPS17_SL_BPG_OFFSET(vdsc_cfg->second_line_bpg_offset);
- drm_dbg_kms(&dev_priv->drm, "PPS17 = 0x%08x\n", pps_val);
intel_dsc_pps_write(crtc_state, 17, pps_val);
/* PPS 18 */
pps_val = DSC_PPS18_NSL_BPG_OFFSET(vdsc_cfg->nsl_bpg_offset) |
DSC_PPS18_SL_OFFSET_ADJ(vdsc_cfg->second_line_offset_adj);
- drm_dbg_kms(&dev_priv->drm, "PPS18 = 0x%08x\n", pps_val);
intel_dsc_pps_write(crtc_state, 18, pps_val);
}
/* Populate the RC_BUF_THRESH registers */
memset(rc_buf_thresh_dword, 0, sizeof(rc_buf_thresh_dword));
- for (i = 0; i < DSC_NUM_BUF_RANGES - 1; i++) {
+ for (i = 0; i < DSC_NUM_BUF_RANGES - 1; i++)
rc_buf_thresh_dword[i / 4] |=
(u32)(vdsc_cfg->rc_buf_thresh[i] <<
BITS_PER_BYTE * (i % 4));
- drm_dbg_kms(&dev_priv->drm, "RC_BUF_THRESH_%d = 0x%08x\n", i,
- rc_buf_thresh_dword[i / 4]);
- }
if (!is_pipe_dsc(crtc, cpu_transcoder)) {
intel_de_write(dev_priv, DSCA_RC_BUF_THRESH_0,
rc_buf_thresh_dword[0]);
@@ -599,7 +583,7 @@ static void intel_dsc_pps_configure(const struct intel_crtc_state *crtc_state)
/* Populate the RC_RANGE_PARAMETERS registers */
memset(rc_range_params_dword, 0, sizeof(rc_range_params_dword));
- for (i = 0; i < DSC_NUM_BUF_RANGES; i++) {
+ for (i = 0; i < DSC_NUM_BUF_RANGES; i++)
rc_range_params_dword[i / 2] |=
(u32)(((vdsc_cfg->rc_range_params[i].range_bpg_offset <<
RC_BPG_OFFSET_SHIFT) |
@@ -607,9 +591,6 @@ static void intel_dsc_pps_configure(const struct intel_crtc_state *crtc_state)
RC_MAX_QP_SHIFT) |
(vdsc_cfg->rc_range_params[i].range_min_qp <<
RC_MIN_QP_SHIFT)) << 16 * (i % 2));
- drm_dbg_kms(&dev_priv->drm, "RC_RANGE_PARAM_%d = 0x%08x\n", i,
- rc_range_params_dword[i / 2]);
- }
if (!is_pipe_dsc(crtc, cpu_transcoder)) {
intel_de_write(dev_priv, DSCA_RC_RANGE_PARAMETERS_0,
rc_range_params_dword[0]);
@@ -989,3 +970,23 @@ void intel_dsc_get_config(struct intel_crtc_state *crtc_state)
out:
intel_display_power_put(dev_priv, power_domain, wakeref);
}
+
+static void intel_vdsc_dump_state(struct drm_printer *p, int indent,
+ const struct intel_crtc_state *crtc_state)
+{
+ drm_printf_indent(p, indent,
+ "dsc-dss: compressed-bpp:" FXP_Q4_FMT ", slice-count: %d, split: %s\n",
+ FXP_Q4_ARGS(crtc_state->dsc.compressed_bpp_x16),
+ crtc_state->dsc.slice_count,
+ str_yes_no(crtc_state->dsc.dsc_split));
+}
+
+void intel_vdsc_state_dump(struct drm_printer *p, int indent,
+ const struct intel_crtc_state *crtc_state)
+{
+ if (!crtc_state->dsc.compression_enable)
+ return;
+
+ intel_vdsc_dump_state(p, indent, crtc_state);
+ drm_dsc_dump_config(p, indent, &crtc_state->dsc.config);
+}
diff --git a/drivers/gpu/drm/i915/display/intel_vdsc.h b/drivers/gpu/drm/i915/display/intel_vdsc.h
index 2cc41ff08909..290b2e9b3482 100644
--- a/drivers/gpu/drm/i915/display/intel_vdsc.h
+++ b/drivers/gpu/drm/i915/display/intel_vdsc.h
@@ -8,6 +8,8 @@
#include <linux/types.h>
+struct drm_printer;
+
enum transcoder;
struct intel_crtc;
struct intel_crtc_state;
@@ -27,5 +29,7 @@ void intel_dsc_dsi_pps_write(struct intel_encoder *encoder,
const struct intel_crtc_state *crtc_state);
void intel_dsc_dp_pps_write(struct intel_encoder *encoder,
const struct intel_crtc_state *crtc_state);
+void intel_vdsc_state_dump(struct drm_printer *p, int indent,
+ const struct intel_crtc_state *crtc_state);
#endif /* __INTEL_VDSC_H__ */
diff --git a/drivers/gpu/drm/i915/display/intel_vrr.c b/drivers/gpu/drm/i915/display/intel_vrr.c
index 5a0da64c7db3..9a51f5bac307 100644
--- a/drivers/gpu/drm/i915/display/intel_vrr.c
+++ b/drivers/gpu/drm/i915/display/intel_vrr.c
@@ -17,8 +17,8 @@
bool intel_vrr_is_capable(struct intel_connector *connector)
{
+ struct intel_display *display = to_intel_display(connector);
const struct drm_display_info *info = &connector->base.display_info;
- struct drm_i915_private *i915 = to_i915(connector->base.dev);
struct intel_dp *intel_dp;
/*
@@ -43,7 +43,7 @@ bool intel_vrr_is_capable(struct intel_connector *connector)
return false;
}
- return HAS_VRR(i915) &&
+ return HAS_VRR(display) &&
info->monitor_range.max_vfreq - info->monitor_range.min_vfreq > 10;
}
@@ -89,10 +89,9 @@ intel_vrr_check_modeset(struct intel_atomic_state *state)
*/
static int intel_vrr_vblank_exit_length(const struct intel_crtc_state *crtc_state)
{
- struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
- struct drm_i915_private *i915 = to_i915(crtc->base.dev);
+ struct intel_display *display = to_intel_display(crtc_state);
- if (DISPLAY_VER(i915) >= 13)
+ if (DISPLAY_VER(display) >= 13)
return crtc_state->vrr.guardband;
else
/* The hw imposes the extra scanline before frame start */
@@ -113,11 +112,11 @@ int intel_vrr_vmax_vblank_start(const struct intel_crtc_state *crtc_state)
static bool
is_cmrr_frac_required(struct intel_crtc_state *crtc_state)
{
+ struct intel_display *display = to_intel_display(crtc_state);
int calculated_refresh_k, actual_refresh_k, pixel_clock_per_line;
struct drm_display_mode *adjusted_mode = &crtc_state->hw.adjusted_mode;
- struct drm_i915_private *i915 = to_i915(crtc_state->uapi.crtc->dev);
- if (!HAS_CMRR(i915))
+ if (!HAS_CMRR(display))
return false;
actual_refresh_k =
@@ -161,8 +160,7 @@ void
intel_vrr_compute_config(struct intel_crtc_state *crtc_state,
struct drm_connector_state *conn_state)
{
- struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
- struct drm_i915_private *i915 = to_i915(crtc->base.dev);
+ struct intel_display *display = to_intel_display(crtc_state);
struct intel_connector *connector =
to_intel_connector(conn_state->connector);
struct intel_dp *intel_dp = intel_attached_dp(connector);
@@ -186,7 +184,7 @@ intel_vrr_compute_config(struct intel_crtc_state *crtc_state,
if (!crtc_state->vrr.in_range)
return;
- if (HAS_LRR(i915))
+ if (HAS_LRR(display))
crtc_state->update_lrr = true;
vmin = DIV_ROUND_UP(adjusted_mode->crtc_clock * 1000,
@@ -233,8 +231,7 @@ intel_vrr_compute_config(struct intel_crtc_state *crtc_state,
crtc_state->mode_flags |= I915_MODE_FLAG_VRR;
}
- if (intel_dp_as_sdp_supported(intel_dp) &&
- crtc_state->vrr.enable) {
+ if (intel_dp->as_sdp_supported && crtc_state->vrr.enable) {
crtc_state->vrr.vsync_start =
(crtc_state->hw.adjusted_mode.crtc_vtotal -
crtc_state->hw.adjusted_mode.vsync_start);
@@ -247,7 +244,7 @@ intel_vrr_compute_config(struct intel_crtc_state *crtc_state,
* For XE_LPD+, we use guardband and pipeline override
* is deprecated.
*/
- if (DISPLAY_VER(i915) >= 13) {
+ if (DISPLAY_VER(display) >= 13) {
crtc_state->vrr.guardband =
crtc_state->vrr.vmin + 1 - adjusted_mode->crtc_vblank_start;
} else {
@@ -259,9 +256,9 @@ intel_vrr_compute_config(struct intel_crtc_state *crtc_state,
static u32 trans_vrr_ctl(const struct intel_crtc_state *crtc_state)
{
- struct drm_i915_private *i915 = to_i915(crtc_state->uapi.crtc->dev);
+ struct intel_display *display = to_intel_display(crtc_state);
- if (DISPLAY_VER(i915) >= 13)
+ if (DISPLAY_VER(display) >= 13)
return VRR_CTL_IGN_MAX_SHIFT | VRR_CTL_FLIP_LINE_EN |
XELPD_VRR_CTL_VRR_GUARDBAND(crtc_state->vrr.guardband);
else
@@ -272,7 +269,7 @@ static u32 trans_vrr_ctl(const struct intel_crtc_state *crtc_state)
void intel_vrr_set_transcoder_timings(const struct intel_crtc_state *crtc_state)
{
- struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
+ struct intel_display *display = to_intel_display(crtc_state);
enum transcoder cpu_transcoder = crtc_state->cpu_transcoder;
/*
@@ -280,133 +277,130 @@ void intel_vrr_set_transcoder_timings(const struct intel_crtc_state *crtc_state)
* TGL: generate VRR "safe window" for DSB vblank waits
* ADL/DG2: make TRANS_SET_CONTEXT_LATENCY effective with VRR
*/
- if (IS_DISPLAY_VER(dev_priv, 12, 13))
- intel_de_rmw(dev_priv, CHICKEN_TRANS(cpu_transcoder),
+ if (IS_DISPLAY_VER(display, 12, 13))
+ intel_de_rmw(display, CHICKEN_TRANS(cpu_transcoder),
0, PIPE_VBLANK_WITH_DELAY);
if (!crtc_state->vrr.flipline) {
- intel_de_write(dev_priv,
- TRANS_VRR_CTL(dev_priv, cpu_transcoder), 0);
+ intel_de_write(display,
+ TRANS_VRR_CTL(display, cpu_transcoder), 0);
return;
}
if (crtc_state->cmrr.enable) {
- intel_de_write(dev_priv, TRANS_CMRR_M_HI(dev_priv, cpu_transcoder),
+ intel_de_write(display, TRANS_CMRR_M_HI(display, cpu_transcoder),
upper_32_bits(crtc_state->cmrr.cmrr_m));
- intel_de_write(dev_priv, TRANS_CMRR_M_LO(dev_priv, cpu_transcoder),
+ intel_de_write(display, TRANS_CMRR_M_LO(display, cpu_transcoder),
lower_32_bits(crtc_state->cmrr.cmrr_m));
- intel_de_write(dev_priv, TRANS_CMRR_N_HI(dev_priv, cpu_transcoder),
+ intel_de_write(display, TRANS_CMRR_N_HI(display, cpu_transcoder),
upper_32_bits(crtc_state->cmrr.cmrr_n));
- intel_de_write(dev_priv, TRANS_CMRR_N_LO(dev_priv, cpu_transcoder),
+ intel_de_write(display, TRANS_CMRR_N_LO(display, cpu_transcoder),
lower_32_bits(crtc_state->cmrr.cmrr_n));
}
- intel_de_write(dev_priv, TRANS_VRR_VMIN(dev_priv, cpu_transcoder),
+ intel_de_write(display, TRANS_VRR_VMIN(display, cpu_transcoder),
crtc_state->vrr.vmin - 1);
- intel_de_write(dev_priv, TRANS_VRR_VMAX(dev_priv, cpu_transcoder),
+ intel_de_write(display, TRANS_VRR_VMAX(display, cpu_transcoder),
crtc_state->vrr.vmax - 1);
- intel_de_write(dev_priv, TRANS_VRR_CTL(dev_priv, cpu_transcoder),
+ intel_de_write(display, TRANS_VRR_CTL(display, cpu_transcoder),
trans_vrr_ctl(crtc_state));
- intel_de_write(dev_priv, TRANS_VRR_FLIPLINE(dev_priv, cpu_transcoder),
+ intel_de_write(display, TRANS_VRR_FLIPLINE(display, cpu_transcoder),
crtc_state->vrr.flipline - 1);
}
void intel_vrr_send_push(const struct intel_crtc_state *crtc_state)
{
- struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
- struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+ struct intel_display *display = to_intel_display(crtc_state);
enum transcoder cpu_transcoder = crtc_state->cpu_transcoder;
if (!crtc_state->vrr.enable)
return;
- intel_de_write(dev_priv, TRANS_PUSH(dev_priv, cpu_transcoder),
+ intel_de_write(display, TRANS_PUSH(display, cpu_transcoder),
TRANS_PUSH_EN | TRANS_PUSH_SEND);
}
bool intel_vrr_is_push_sent(const struct intel_crtc_state *crtc_state)
{
- struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
- struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+ struct intel_display *display = to_intel_display(crtc_state);
enum transcoder cpu_transcoder = crtc_state->cpu_transcoder;
if (!crtc_state->vrr.enable)
return false;
- return intel_de_read(dev_priv, TRANS_PUSH(dev_priv, cpu_transcoder)) & TRANS_PUSH_SEND;
+ return intel_de_read(display, TRANS_PUSH(display, cpu_transcoder)) & TRANS_PUSH_SEND;
}
void intel_vrr_enable(const struct intel_crtc_state *crtc_state)
{
- struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
+ struct intel_display *display = to_intel_display(crtc_state);
enum transcoder cpu_transcoder = crtc_state->cpu_transcoder;
if (!crtc_state->vrr.enable)
return;
- intel_de_write(dev_priv, TRANS_PUSH(dev_priv, cpu_transcoder),
+ intel_de_write(display, TRANS_PUSH(display, cpu_transcoder),
TRANS_PUSH_EN);
- if (HAS_AS_SDP(dev_priv))
- intel_de_write(dev_priv,
- TRANS_VRR_VSYNC(dev_priv, cpu_transcoder),
+ if (HAS_AS_SDP(display))
+ intel_de_write(display,
+ TRANS_VRR_VSYNC(display, cpu_transcoder),
VRR_VSYNC_END(crtc_state->vrr.vsync_end) |
VRR_VSYNC_START(crtc_state->vrr.vsync_start));
if (crtc_state->cmrr.enable) {
- intel_de_write(dev_priv, TRANS_VRR_CTL(dev_priv, cpu_transcoder),
+ intel_de_write(display, TRANS_VRR_CTL(display, cpu_transcoder),
VRR_CTL_VRR_ENABLE | VRR_CTL_CMRR_ENABLE |
trans_vrr_ctl(crtc_state));
} else {
- intel_de_write(dev_priv, TRANS_VRR_CTL(dev_priv, cpu_transcoder),
+ intel_de_write(display, TRANS_VRR_CTL(display, cpu_transcoder),
VRR_CTL_VRR_ENABLE | trans_vrr_ctl(crtc_state));
}
}
void intel_vrr_disable(const struct intel_crtc_state *old_crtc_state)
{
- struct intel_crtc *crtc = to_intel_crtc(old_crtc_state->uapi.crtc);
- struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+ struct intel_display *display = to_intel_display(old_crtc_state);
enum transcoder cpu_transcoder = old_crtc_state->cpu_transcoder;
if (!old_crtc_state->vrr.enable)
return;
- intel_de_write(dev_priv, TRANS_VRR_CTL(dev_priv, cpu_transcoder),
+ intel_de_write(display, TRANS_VRR_CTL(display, cpu_transcoder),
trans_vrr_ctl(old_crtc_state));
- intel_de_wait_for_clear(dev_priv,
- TRANS_VRR_STATUS(dev_priv, cpu_transcoder),
+ intel_de_wait_for_clear(display,
+ TRANS_VRR_STATUS(display, cpu_transcoder),
VRR_STATUS_VRR_EN_LIVE, 1000);
- intel_de_write(dev_priv, TRANS_PUSH(dev_priv, cpu_transcoder), 0);
+ intel_de_write(display, TRANS_PUSH(display, cpu_transcoder), 0);
- if (HAS_AS_SDP(dev_priv))
- intel_de_write(dev_priv,
- TRANS_VRR_VSYNC(dev_priv, cpu_transcoder), 0);
+ if (HAS_AS_SDP(display))
+ intel_de_write(display,
+ TRANS_VRR_VSYNC(display, cpu_transcoder), 0);
}
void intel_vrr_get_config(struct intel_crtc_state *crtc_state)
{
- struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
+ struct intel_display *display = to_intel_display(crtc_state);
enum transcoder cpu_transcoder = crtc_state->cpu_transcoder;
u32 trans_vrr_ctl, trans_vrr_vsync;
- trans_vrr_ctl = intel_de_read(dev_priv,
- TRANS_VRR_CTL(dev_priv, cpu_transcoder));
+ trans_vrr_ctl = intel_de_read(display,
+ TRANS_VRR_CTL(display, cpu_transcoder));
crtc_state->vrr.enable = trans_vrr_ctl & VRR_CTL_VRR_ENABLE;
- if (HAS_CMRR(dev_priv))
+ if (HAS_CMRR(display))
crtc_state->cmrr.enable = (trans_vrr_ctl & VRR_CTL_CMRR_ENABLE);
if (crtc_state->cmrr.enable) {
crtc_state->cmrr.cmrr_n =
- intel_de_read64_2x32(dev_priv, TRANS_CMRR_N_LO(dev_priv, cpu_transcoder),
- TRANS_CMRR_N_HI(dev_priv, cpu_transcoder));
+ intel_de_read64_2x32(display, TRANS_CMRR_N_LO(display, cpu_transcoder),
+ TRANS_CMRR_N_HI(display, cpu_transcoder));
crtc_state->cmrr.cmrr_m =
- intel_de_read64_2x32(dev_priv, TRANS_CMRR_M_LO(dev_priv, cpu_transcoder),
- TRANS_CMRR_M_HI(dev_priv, cpu_transcoder));
+ intel_de_read64_2x32(display, TRANS_CMRR_M_LO(display, cpu_transcoder),
+ TRANS_CMRR_M_HI(display, cpu_transcoder));
}
- if (DISPLAY_VER(dev_priv) >= 13)
+ if (DISPLAY_VER(display) >= 13)
crtc_state->vrr.guardband =
REG_FIELD_GET(XELPD_VRR_CTL_VRR_GUARDBAND_MASK, trans_vrr_ctl);
else
@@ -415,21 +409,21 @@ void intel_vrr_get_config(struct intel_crtc_state *crtc_state)
REG_FIELD_GET(VRR_CTL_PIPELINE_FULL_MASK, trans_vrr_ctl);
if (trans_vrr_ctl & VRR_CTL_FLIP_LINE_EN) {
- crtc_state->vrr.flipline = intel_de_read(dev_priv,
- TRANS_VRR_FLIPLINE(dev_priv, cpu_transcoder)) + 1;
- crtc_state->vrr.vmax = intel_de_read(dev_priv,
- TRANS_VRR_VMAX(dev_priv, cpu_transcoder)) + 1;
- crtc_state->vrr.vmin = intel_de_read(dev_priv,
- TRANS_VRR_VMIN(dev_priv, cpu_transcoder)) + 1;
+ crtc_state->vrr.flipline = intel_de_read(display,
+ TRANS_VRR_FLIPLINE(display, cpu_transcoder)) + 1;
+ crtc_state->vrr.vmax = intel_de_read(display,
+ TRANS_VRR_VMAX(display, cpu_transcoder)) + 1;
+ crtc_state->vrr.vmin = intel_de_read(display,
+ TRANS_VRR_VMIN(display, cpu_transcoder)) + 1;
}
if (crtc_state->vrr.enable) {
crtc_state->mode_flags |= I915_MODE_FLAG_VRR;
- if (HAS_AS_SDP(dev_priv)) {
+ if (HAS_AS_SDP(display)) {
trans_vrr_vsync =
- intel_de_read(dev_priv,
- TRANS_VRR_VSYNC(dev_priv, cpu_transcoder));
+ intel_de_read(display,
+ TRANS_VRR_VSYNC(display, cpu_transcoder));
crtc_state->vrr.vsync_start =
REG_FIELD_GET(VRR_VSYNC_START_MASK, trans_vrr_vsync);
crtc_state->vrr.vsync_end =
diff --git a/drivers/gpu/drm/i915/display/skl_universal_plane.c b/drivers/gpu/drm/i915/display/skl_universal_plane.c
index ba5a628b4757..17d4c880ecc4 100644
--- a/drivers/gpu/drm/i915/display/skl_universal_plane.c
+++ b/drivers/gpu/drm/i915/display/skl_universal_plane.c
@@ -14,6 +14,7 @@
#include "intel_de.h"
#include "intel_display_irq.h"
#include "intel_display_types.h"
+#include "intel_dpt.h"
#include "intel_fb.h"
#include "intel_fbc.h"
#include "intel_frontbuffer.h"
@@ -537,6 +538,8 @@ static u32 tgl_plane_min_alignment(struct intel_plane *plane,
case I915_FORMAT_MOD_4_TILED_DG2_RC_CCS:
case I915_FORMAT_MOD_4_TILED_DG2_RC_CCS_CC:
case I915_FORMAT_MOD_4_TILED_DG2_MC_CCS:
+ case I915_FORMAT_MOD_4_TILED_BMG_CCS:
+ case I915_FORMAT_MOD_4_TILED_LNL_CCS:
/*
* Align to at least 4x1 main surface
* tiles (16K) to match 64B of AUX.
@@ -948,6 +951,9 @@ static u32 skl_plane_ctl_tiling(u64 fb_modifier)
return PLANE_CTL_TILED_4 | PLANE_CTL_RENDER_DECOMPRESSION_ENABLE;
case I915_FORMAT_MOD_4_TILED_MTL_MC_CCS:
return PLANE_CTL_TILED_4 | PLANE_CTL_MEDIA_DECOMPRESSION_ENABLE;
+ case I915_FORMAT_MOD_4_TILED_BMG_CCS:
+ case I915_FORMAT_MOD_4_TILED_LNL_CCS:
+ return PLANE_CTL_TILED_4 | PLANE_CTL_RENDER_DECOMPRESSION_ENABLE;
case I915_FORMAT_MOD_Y_TILED_CCS:
case I915_FORMAT_MOD_Y_TILED_GEN12_RC_CCS_CC:
return PLANE_CTL_TILED_Y | PLANE_CTL_RENDER_DECOMPRESSION_ENABLE;
@@ -1085,11 +1091,6 @@ static u32 skl_plane_ctl(const struct intel_crtc_state *crtc_state,
if (DISPLAY_VER(dev_priv) == 13)
plane_ctl |= adlp_plane_ctl_arb_slots(plane_state);
- if (GRAPHICS_VER(dev_priv) >= 20 &&
- fb->modifier == I915_FORMAT_MOD_4_TILED) {
- plane_ctl |= PLANE_CTL_RENDER_DECOMPRESSION_ENABLE;
- }
-
return plane_ctl;
}
@@ -1162,7 +1163,7 @@ static u32 skl_surf_address(const struct intel_plane_state *plane_state,
* within the DPT is always 0.
*/
drm_WARN_ON(&i915->drm, plane_state->dpt_vma &&
- plane_state->dpt_vma->node.start);
+ intel_dpt_offset(plane_state->dpt_vma));
drm_WARN_ON(&i915->drm, offset & 0x1fffff);
return offset >> 9;
} else {
@@ -2452,6 +2453,9 @@ static u8 skl_get_plane_caps(struct drm_i915_private *i915,
if (gen12_plane_has_mc_ccs(i915, plane_id))
caps |= INTEL_PLANE_CAP_CCS_MC;
+ if (DISPLAY_VER(i915) >= 14 && IS_DGFX(i915))
+ caps |= INTEL_PLANE_CAP_NEED64K_PHYS;
+
return caps;
}
diff --git a/drivers/gpu/drm/i915/display/skl_watermark.c b/drivers/gpu/drm/i915/display/skl_watermark.c
index a2726364b34d..045c7cac166b 100644
--- a/drivers/gpu/drm/i915/display/skl_watermark.c
+++ b/drivers/gpu/drm/i915/display/skl_watermark.c
@@ -2830,17 +2830,17 @@ static int skl_wm_add_affected_planes(struct intel_atomic_state *state,
}
/*
- * If Fixed Refresh Rate:
+ * If Fixed Refresh Rate or For VRR case Vmin = Vmax = Flipline:
* Program DEEP PKG_C_LATENCY Pkg C with highest valid latency from
* watermark level1 and up and above. If watermark level 1 is
* invalid program it with all 1's.
* Program PKG_C_LATENCY Added Wake Time = DSB execution time
- * If Variable Refresh Rate:
+ * If Variable Refresh Rate where Vmin != Vmax != Flipline:
* Program DEEP PKG_C_LATENCY Pkg C with all 1's.
* Program PKG_C_LATENCY Added Wake Time = 0
*/
static void
-skl_program_dpkgc_latency(struct drm_i915_private *i915, bool vrr_enabled)
+skl_program_dpkgc_latency(struct drm_i915_private *i915, bool enable_dpkgc)
{
u32 max_latency = 0;
u32 clear = 0, val = 0;
@@ -2849,15 +2849,15 @@ skl_program_dpkgc_latency(struct drm_i915_private *i915, bool vrr_enabled)
if (DISPLAY_VER(i915) < 20)
return;
- if (vrr_enabled) {
- max_latency = LNL_PKG_C_LATENCY_MASK;
- added_wake_time = 0;
- } else {
+ if (enable_dpkgc) {
max_latency = skl_watermark_max_latency(i915, 1);
if (max_latency == 0)
max_latency = LNL_PKG_C_LATENCY_MASK;
added_wake_time = DSB_EXE_TIME +
i915->display.sagv.block_time_us;
+ } else {
+ max_latency = LNL_PKG_C_LATENCY_MASK;
+ added_wake_time = 0;
}
clear |= LNL_ADDED_WAKE_TIME_MASK | LNL_PKG_C_LATENCY_MASK;
@@ -2873,7 +2873,7 @@ skl_compute_wm(struct intel_atomic_state *state)
struct intel_crtc *crtc;
struct intel_crtc_state __maybe_unused *new_crtc_state;
int ret, i;
- bool vrr_enabled = false;
+ bool enable_dpkgc = false;
for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i) {
ret = skl_build_pipe_wm(state, crtc);
@@ -2899,11 +2899,13 @@ skl_compute_wm(struct intel_atomic_state *state)
if (ret)
return ret;
- if (new_crtc_state->vrr.enable)
- vrr_enabled = true;
+ if ((new_crtc_state->vrr.vmin == new_crtc_state->vrr.vmax &&
+ new_crtc_state->vrr.vmin == new_crtc_state->vrr.flipline) ||
+ !new_crtc_state->vrr.enable)
+ enable_dpkgc = true;
}
- skl_program_dpkgc_latency(to_i915(state->base.dev), vrr_enabled);
+ skl_program_dpkgc_latency(to_i915(state->base.dev), enable_dpkgc);
skl_print_wm_changes(state);
diff --git a/drivers/gpu/drm/i915/display/vlv_dsi.c b/drivers/gpu/drm/i915/display/vlv_dsi.c
index 931d2cf74ed8..d21f3fb39706 100644
--- a/drivers/gpu/drm/i915/display/vlv_dsi.c
+++ b/drivers/gpu/drm/i915/display/vlv_dsi.c
@@ -1879,6 +1879,7 @@ static const struct dmi_system_id vlv_dsi_dmi_quirk_table[] = {
void vlv_dsi_init(struct drm_i915_private *dev_priv)
{
+ struct intel_display *display = &dev_priv->display;
struct intel_dsi *intel_dsi;
struct intel_encoder *encoder;
struct intel_connector *connector;
@@ -1890,7 +1891,7 @@ void vlv_dsi_init(struct drm_i915_private *dev_priv)
drm_dbg_kms(&dev_priv->drm, "\n");
/* There is no detection method for MIPI so rely on VBT */
- if (!intel_bios_is_dsi_present(dev_priv, &port))
+ if (!intel_bios_is_dsi_present(display, &port))
return;
if (IS_GEMINILAKE(dev_priv) || IS_BROXTON(dev_priv))
@@ -1945,7 +1946,7 @@ void vlv_dsi_init(struct drm_i915_private *dev_priv)
intel_dsi->panel_power_off_time = ktime_get_boottime();
- intel_bios_init_panel_late(dev_priv, &connector->panel, NULL, NULL);
+ intel_bios_init_panel_late(display, &connector->panel, NULL, NULL);
if (connector->panel.vbt.dsi.config->dual_link)
intel_dsi->ports = BIT(PORT_A) | BIT(PORT_C);
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c
index d54162ce0f99..a3b83cfe1726 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c
@@ -12,8 +12,6 @@
#include <drm/drm_auth.h>
#include <drm/drm_syncobj.h>
-#include "display/intel_frontbuffer.h"
-
#include "gem/i915_gem_ioctls.h"
#include "gt/intel_context.h"
#include "gt/intel_gpu_commands.h"
@@ -827,7 +825,7 @@ static int eb_select_context(struct i915_execbuffer *eb)
struct i915_gem_context *ctx;
ctx = i915_gem_context_lookup(eb->file->driver_priv, eb->args->rsvd1);
- if (unlikely(IS_ERR(ctx)))
+ if (IS_ERR(ctx))
return PTR_ERR(ctx);
eb->gem_context = ctx;
@@ -1533,7 +1531,7 @@ static int eb_relocate_vma(struct i915_execbuffer *eb, struct eb_vma *ev)
u64_to_user_ptr(entry->relocs_ptr);
unsigned long remain = entry->relocation_count;
- if (unlikely(remain > N_RELOC(ULONG_MAX)))
+ if (unlikely(remain > N_RELOC(INT_MAX)))
return -EINVAL;
/*
@@ -1641,7 +1639,7 @@ static int check_relocations(const struct drm_i915_gem_exec_object2 *entry)
if (size == 0)
return 0;
- if (size > N_RELOC(ULONG_MAX))
+ if (size > N_RELOC(INT_MAX))
return -EINVAL;
addr = u64_to_user_ptr(entry->relocs_ptr);
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_mman.c b/drivers/gpu/drm/i915/gem/i915_gem_mman.c
index cac6d4184506..21274aa9bddd 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_mman.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_mman.c
@@ -252,6 +252,7 @@ static vm_fault_t vm_fault_cpu(struct vm_fault *vmf)
struct vm_area_struct *area = vmf->vma;
struct i915_mmap_offset *mmo = area->vm_private_data;
struct drm_i915_gem_object *obj = mmo->obj;
+ unsigned long obj_offset;
resource_size_t iomap;
int err;
@@ -273,10 +274,11 @@ static vm_fault_t vm_fault_cpu(struct vm_fault *vmf)
iomap -= obj->mm.region->region.start;
}
+ obj_offset = area->vm_pgoff - drm_vma_node_start(&mmo->vma_node);
/* PTEs are revoked in obj->ops->put_pages() */
err = remap_io_sg(area,
area->vm_start, area->vm_end - area->vm_start,
- obj->mm.pages->sgl, iomap);
+ obj->mm.pages->sgl, obj_offset, iomap);
if (area->vm_flags & VM_WRITE) {
GEM_BUG_ON(!i915_gem_object_has_pinned_pages(obj));
@@ -293,8 +295,10 @@ out:
static void set_address_limits(struct vm_area_struct *area,
struct i915_vma *vma,
unsigned long obj_offset,
+ resource_size_t gmadr_start,
unsigned long *start_vaddr,
- unsigned long *end_vaddr)
+ unsigned long *end_vaddr,
+ unsigned long *pfn)
{
unsigned long vm_start, vm_end, vma_size; /* user's memory parameters */
long start, end; /* memory boundaries */
@@ -323,6 +327,10 @@ static void set_address_limits(struct vm_area_struct *area,
/* Let's move back into the "<< PAGE_SHIFT" domain */
*start_vaddr = (unsigned long)start << PAGE_SHIFT;
*end_vaddr = (unsigned long)end << PAGE_SHIFT;
+
+ *pfn = (gmadr_start + i915_ggtt_offset(vma)) >> PAGE_SHIFT;
+ *pfn += (*start_vaddr - area->vm_start) >> PAGE_SHIFT;
+ *pfn += obj_offset - vma->gtt_view.partial.offset;
}
static vm_fault_t vm_fault_gtt(struct vm_fault *vmf)
@@ -441,11 +449,13 @@ retry:
if (ret)
goto err_unpin;
- set_address_limits(area, vma, obj_offset, &start, &end);
-
- pfn = (ggtt->gmadr.start + i915_ggtt_offset(vma)) >> PAGE_SHIFT;
- pfn += (start - area->vm_start) >> PAGE_SHIFT;
- pfn += obj_offset - vma->gtt_view.partial.offset;
+ /*
+ * Dump all the necessary parameters in this function to perform the
+ * arithmetic calculation for the virtual address start and end and
+ * the PFN (Page Frame Number).
+ */
+ set_address_limits(area, vma, obj_offset, ggtt->gmadr.start,
+ &start, &end, &pfn);
/* Finally, remap it using the new GTT offset */
ret = remap_io_mapping(area, start, pfn, end - start, &ggtt->iomap);
@@ -1071,9 +1081,9 @@ int i915_gem_mmap(struct file *filp, struct vm_area_struct *vma)
rcu_read_lock();
drm_vma_offset_lock_lookup(dev->vma_offset_manager);
- node = drm_vma_offset_exact_lookup_locked(dev->vma_offset_manager,
- vma->vm_pgoff,
- vma_pages(vma));
+ node = drm_vma_offset_lookup_locked(dev->vma_offset_manager,
+ vma->vm_pgoff,
+ vma_pages(vma));
if (node && drm_vma_node_is_allowed(node, priv)) {
/*
* Skip 0-refcnted objects as it is in the process of being
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_object.h b/drivers/gpu/drm/i915/gem/i915_gem_object.h
index 5d7446a48ae7..3dc61cbd2e11 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_object.h
+++ b/drivers/gpu/drm/i915/gem/i915_gem_object.h
@@ -89,7 +89,6 @@ __i915_gem_object_unset_pages(struct drm_i915_gem_object *obj);
* @handle: userspace handle
*
* Returns:
- *
* A pointer to the object named by the handle if such exists on @filp, NULL
* otherwise. This object is only valid whilst under the RCU read lock, and
* note carefully the object may be in the process of being destroyed.
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_object_types.h b/drivers/gpu/drm/i915/gem/i915_gem_object_types.h
index 1495b6074492..68413c05c812 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_object_types.h
+++ b/drivers/gpu/drm/i915/gem/i915_gem_object_types.h
@@ -535,7 +535,7 @@ struct drm_i915_gem_object {
* I915_CACHE_NONE. The only exception is userptr objects, where we
* instead force I915_CACHE_LLC, but we also don't allow userspace to
* ever change the @cache_level for such objects. Another special case
- * is dma-buf, which doesn't rely on @cache_dirty, but there we
+ * is dma-buf, which doesn't rely on @cache_dirty, but there we
* always do a forced flush when acquiring the pages, if there is a
* chance that the pages can be read directly from main memory with
* the GPU.
diff --git a/drivers/gpu/drm/i915/gem/selftests/i915_gem_dmabuf.c b/drivers/gpu/drm/i915/gem/selftests/i915_gem_dmabuf.c
index 3527b8f446fe..2fda549dd82d 100644
--- a/drivers/gpu/drm/i915/gem/selftests/i915_gem_dmabuf.c
+++ b/drivers/gpu/drm/i915/gem/selftests/i915_gem_dmabuf.c
@@ -506,7 +506,7 @@ static int igt_dmabuf_export_vmap(void *arg)
goto out;
}
- if (memchr_inv(ptr, 0, dmabuf->size)) {
+ if (!mem_is_zero(ptr, dmabuf->size)) {
pr_err("Exported object not initialised to zero!\n");
err = -EINVAL;
goto out;
diff --git a/drivers/gpu/drm/i915/gt/intel_engine_cs.c b/drivers/gpu/drm/i915/gt/intel_engine_cs.c
index 3b740ca25000..4d30a86016f2 100644
--- a/drivers/gpu/drm/i915/gt/intel_engine_cs.c
+++ b/drivers/gpu/drm/i915/gt/intel_engine_cs.c
@@ -693,6 +693,8 @@ void intel_engines_release(struct intel_gt *gt)
memset(&engine->reset, 0, sizeof(engine->reset));
}
+
+ llist_del_all(&gt->i915->uabi_engines_llist);
}
void intel_engine_free_request_pool(struct intel_engine_cs *engine)
diff --git a/drivers/gpu/drm/i915/gt/intel_ggtt.c b/drivers/gpu/drm/i915/gt/intel_ggtt.c
index 206a5e0fedf1..d60a6ca0cae5 100644
--- a/drivers/gpu/drm/i915/gt/intel_ggtt.c
+++ b/drivers/gpu/drm/i915/gt/intel_ggtt.c
@@ -12,7 +12,6 @@
#include <drm/intel/i915_drm.h>
#include <drm/intel/intel-gtt.h>
-#include "display/intel_display.h"
#include "gem/i915_gem_lmem.h"
#include "intel_context.h"
diff --git a/drivers/gpu/drm/i915/gt/intel_ggtt_fencing.c b/drivers/gpu/drm/i915/gt/intel_ggtt_fencing.c
index 93bc1cc1ee7e..0ffba50981e3 100644
--- a/drivers/gpu/drm/i915/gt/intel_ggtt_fencing.c
+++ b/drivers/gpu/drm/i915/gt/intel_ggtt_fencing.c
@@ -418,7 +418,6 @@ out_unpin:
* For an untiled surface, this removes any existing fence.
*
* Returns:
- *
* 0 on success, negative error code on failure.
*/
int i915_vma_pin_fence(struct i915_vma *vma)
diff --git a/drivers/gpu/drm/i915/gt/intel_gpu_commands.h b/drivers/gpu/drm/i915/gt/intel_gpu_commands.h
index 2bd8d98d2110..5394bc7d4daf 100644
--- a/drivers/gpu/drm/i915/gt/intel_gpu_commands.h
+++ b/drivers/gpu/drm/i915/gt/intel_gpu_commands.h
@@ -220,6 +220,7 @@
#define GFX_OP_DESTBUFFER_INFO ((0x3<<29)|(0x1d<<24)|(0x8e<<16)|1)
#define GFX_OP_DRAWRECT_INFO ((0x3<<29)|(0x1d<<24)|(0x80<<16)|(0x3))
#define GFX_OP_DRAWRECT_INFO_I965 ((0x7900<<16)|0x2)
+#define CMD_3DSTATE_MESH_CONTROL ((0x3 << 29) | (0x3 << 27) | (0x0 << 24) | (0x77 << 16) | (0x3))
#define XY_CTRL_SURF_INSTR_SIZE 5
#define MI_FLUSH_DW_SIZE 3
diff --git a/drivers/gpu/drm/i915/gt/intel_gt.h b/drivers/gpu/drm/i915/gt/intel_gt.h
index b5e114d284ad..998ca029b73a 100644
--- a/drivers/gpu/drm/i915/gt/intel_gt.h
+++ b/drivers/gpu/drm/i915/gt/intel_gt.h
@@ -174,7 +174,6 @@ static inline bool intel_gt_is_wedged(const struct intel_gt *gt)
int intel_gt_probe_all(struct drm_i915_private *i915);
int intel_gt_tiles_init(struct drm_i915_private *i915);
-void intel_gt_release_all(struct drm_i915_private *i915);
#define for_each_gt(gt__, i915__, id__) \
for ((id__) = 0; \
@@ -208,4 +207,10 @@ enum i915_map_type intel_gt_coherent_map_type(struct intel_gt *gt,
void intel_gt_bind_context_set_ready(struct intel_gt *gt);
void intel_gt_bind_context_set_unready(struct intel_gt *gt);
bool intel_gt_is_bind_context_ready(struct intel_gt *gt);
+
+static inline void intel_gt_set_wedged_async(struct intel_gt *gt)
+{
+ queue_work(system_highpri_wq, &gt->wedge);
+}
+
#endif /* __INTEL_GT_H__ */
diff --git a/drivers/gpu/drm/i915/gt/intel_gt_regs.h b/drivers/gpu/drm/i915/gt/intel_gt_regs.h
index e42b3a5d4e63..57a3c83d3655 100644
--- a/drivers/gpu/drm/i915/gt/intel_gt_regs.h
+++ b/drivers/gpu/drm/i915/gt/intel_gt_regs.h
@@ -1553,6 +1553,8 @@
#define VLV_RENDER_C0_COUNT _MMIO(0x138118)
#define VLV_MEDIA_C0_COUNT _MMIO(0x13811c)
+#define PCU_PWM_FAN_SPEED _MMIO(0x138140)
+
#define GEN12_RPSTAT1 _MMIO(0x1381b4)
#define GEN12_VOLTAGE_MASK REG_GENMASK(10, 0)
#define GEN12_CAGF_MASK REG_GENMASK(19, 11)
diff --git a/drivers/gpu/drm/i915/gt/intel_gt_types.h b/drivers/gpu/drm/i915/gt/intel_gt_types.h
index cfdd2ad5e954..bcee084b1f27 100644
--- a/drivers/gpu/drm/i915/gt/intel_gt_types.h
+++ b/drivers/gpu/drm/i915/gt/intel_gt_types.h
@@ -292,6 +292,8 @@ struct intel_gt {
struct gt_defaults defaults;
struct kobject *sysfs_defaults;
+ struct work_struct wedge;
+
struct i915_perf_gt perf;
/** link: &ggtt.gt_list */
diff --git a/drivers/gpu/drm/i915/gt/intel_reset.c b/drivers/gpu/drm/i915/gt/intel_reset.c
index 735cd23a43c6..8f1ea95471ef 100644
--- a/drivers/gpu/drm/i915/gt/intel_reset.c
+++ b/drivers/gpu/drm/i915/gt/intel_reset.c
@@ -1013,6 +1013,15 @@ static void __intel_gt_set_wedged(struct intel_gt *gt)
GT_TRACE(gt, "end\n");
}
+static void set_wedged_work(struct work_struct *w)
+{
+ struct intel_gt *gt = container_of(w, struct intel_gt, wedge);
+ intel_wakeref_t wf;
+
+ with_intel_runtime_pm(gt->uncore->rpm, wf)
+ __intel_gt_set_wedged(gt);
+}
+
void intel_gt_set_wedged(struct intel_gt *gt)
{
intel_wakeref_t wakeref;
@@ -1614,6 +1623,7 @@ void intel_gt_init_reset(struct intel_gt *gt)
init_waitqueue_head(&gt->reset.queue);
mutex_init(&gt->reset.mutex);
init_srcu_struct(&gt->reset.backoff_srcu);
+ INIT_WORK(&gt->wedge, set_wedged_work);
/*
* While undesirable to wait inside the shrinker, complain anyway.
@@ -1640,7 +1650,7 @@ static void intel_wedge_me(struct work_struct *work)
struct intel_wedge_me *w = container_of(work, typeof(*w), work.work);
gt_err(w->gt, "%s timed out, cancelling all in-flight rendering.\n", w->name);
- intel_gt_set_wedged(w->gt);
+ set_wedged_work(&w->gt->wedge);
}
void __intel_init_wedge(struct intel_wedge_me *w,
diff --git a/drivers/gpu/drm/i915/gt/intel_workarounds.c b/drivers/gpu/drm/i915/gt/intel_workarounds.c
index 09a287c1aedd..e539a656cfc3 100644
--- a/drivers/gpu/drm/i915/gt/intel_workarounds.c
+++ b/drivers/gpu/drm/i915/gt/intel_workarounds.c
@@ -111,9 +111,8 @@ static void wa_init_finish(struct i915_wa_list *wal)
{
/* Trim unused entries. */
if (!IS_ALIGNED(wal->count, WA_LIST_CHUNK)) {
- struct i915_wa *list = kmemdup(wal->list,
- wal->count * sizeof(*list),
- GFP_KERNEL);
+ struct i915_wa *list = kmemdup_array(wal->list, wal->count,
+ sizeof(*list), GFP_KERNEL);
if (list) {
kfree(wal->list);
@@ -974,7 +973,12 @@ int intel_engine_emit_ctx_wa(struct i915_request *rq)
if (ret)
return ret;
- cs = intel_ring_begin(rq, (wal->count * 2 + 2));
+ if ((IS_GFX_GT_IP_RANGE(rq->engine->gt, IP_VER(12, 70), IP_VER(12, 74)) ||
+ IS_DG2(rq->i915)) && rq->engine->class == RENDER_CLASS)
+ cs = intel_ring_begin(rq, (wal->count * 2 + 6));
+ else
+ cs = intel_ring_begin(rq, (wal->count * 2 + 2));
+
if (IS_ERR(cs))
return PTR_ERR(cs);
@@ -1004,6 +1008,15 @@ int intel_engine_emit_ctx_wa(struct i915_request *rq)
}
*cs++ = MI_NOOP;
+ /* Wa_14019789679 */
+ if ((IS_GFX_GT_IP_RANGE(rq->engine->gt, IP_VER(12, 70), IP_VER(12, 74)) ||
+ IS_DG2(rq->i915)) && rq->engine->class == RENDER_CLASS) {
+ *cs++ = CMD_3DSTATE_MESH_CONTROL;
+ *cs++ = 0;
+ *cs++ = 0;
+ *cs++ = MI_NOOP;
+ }
+
intel_uncore_forcewake_put__locked(uncore, fw);
spin_unlock(&uncore->lock);
intel_gt_mcr_unlock(wal->gt, flags);
@@ -2058,7 +2071,7 @@ static void dg2_whitelist_build(struct intel_engine_cs *engine)
case RENDER_CLASS:
/* Required by recommended tuning setting (not a workaround) */
whitelist_mcr_reg(w, XEHP_COMMON_SLICE_CHICKEN3);
-
+ whitelist_reg(w, GEN7_COMMON_SLICE_CHICKEN1);
break;
default:
break;
@@ -2073,7 +2086,7 @@ static void xelpg_whitelist_build(struct intel_engine_cs *engine)
case RENDER_CLASS:
/* Required by recommended tuning setting (not a workaround) */
whitelist_mcr_reg(w, XEHP_COMMON_SLICE_CHICKEN3);
-
+ whitelist_reg(w, GEN7_COMMON_SLICE_CHICKEN1);
break;
default:
break;
diff --git a/drivers/gpu/drm/i915/gt/selftest_migrate.c b/drivers/gpu/drm/i915/gt/selftest_migrate.c
index 3eff364ccf3a..ca460cee4f8b 100644
--- a/drivers/gpu/drm/i915/gt/selftest_migrate.c
+++ b/drivers/gpu/drm/i915/gt/selftest_migrate.c
@@ -336,7 +336,7 @@ static int clear(struct intel_migrate *migrate,
if (vaddr[x] != val) {
pr_err("%ps failed, (%u != %u), offset: %zu\n",
- fn, vaddr[x], val, x * sizeof(u32));
+ fn, vaddr[x], val, x * sizeof(u32));
igt_hexdump(vaddr + i * 1024, 4096);
err = -EINVAL;
}
diff --git a/drivers/gpu/drm/i915/gt/sysfs_engines.c b/drivers/gpu/drm/i915/gt/sysfs_engines.c
index 021f51d9b456..aab2759067d2 100644
--- a/drivers/gpu/drm/i915/gt/sysfs_engines.c
+++ b/drivers/gpu/drm/i915/gt/sysfs_engines.c
@@ -530,9 +530,8 @@ void intel_engines_add_sysfs(struct drm_i915_private *i915)
err_object:
kobject_put(kobj);
err_engine:
- dev_err(kdev, "Failed to add sysfs engine '%s'\n",
- engine->name);
- break;
+ dev_warn(kdev, "Failed to add sysfs engine '%s'\n",
+ engine->name);
}
}
}
diff --git a/drivers/gpu/drm/i915/gt/uc/abi/guc_klvs_abi.h b/drivers/gpu/drm/i915/gt/uc/abi/guc_klvs_abi.h
index 37ff539a6963..0c709e6c15be 100644
--- a/drivers/gpu/drm/i915/gt/uc/abi/guc_klvs_abi.h
+++ b/drivers/gpu/drm/i915/gt/uc/abi/guc_klvs_abi.h
@@ -107,6 +107,7 @@ enum {
enum {
GUC_WORKAROUND_KLV_SERIALIZED_RA_MODE = 0x9001,
GUC_WORKAROUND_KLV_BLOCK_INTERRUPTS_WHEN_MGSR_BLOCKED = 0x9002,
+ GUC_WORKAROUND_KLV_AVOID_GFX_CLEAR_WHILE_ACTIVE = 0x9006,
};
#endif /* _ABI_GUC_KLVS_ABI_H */
diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc.c b/drivers/gpu/drm/i915/gt/uc/intel_guc.c
index 5e60a34692af..097fc6bd1285 100644
--- a/drivers/gpu/drm/i915/gt/uc/intel_guc.c
+++ b/drivers/gpu/drm/i915/gt/uc/intel_guc.c
@@ -296,7 +296,7 @@ static u32 guc_ctl_wa_flags(struct intel_guc *guc)
/* Wa_16019325821 */
/* Wa_14019159160 */
- if (IS_GFX_GT_IP_RANGE(gt, IP_VER(12, 70), IP_VER(12, 71)))
+ if (IS_GFX_GT_IP_RANGE(gt, IP_VER(12, 70), IP_VER(12, 74)))
flags |= GUC_WA_RCS_CCS_SWITCHOUT;
/*
diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc_ads.c b/drivers/gpu/drm/i915/gt/uc/intel_guc_ads.c
index 7995f059f30d..46fabbfc775e 100644
--- a/drivers/gpu/drm/i915/gt/uc/intel_guc_ads.c
+++ b/drivers/gpu/drm/i915/gt/uc/intel_guc_ads.c
@@ -815,8 +815,7 @@ engine_instance_list:
return PAGE_ALIGN(total_size);
}
-static void guc_waklv_enable_simple(struct intel_guc *guc,
- u32 klv_id, u32 *offset, u32 *remain)
+static void guc_waklv_enable_simple(struct intel_guc *guc, u32 *offset, u32 *remain, u32 klv_id)
{
u32 size;
u32 klv_entry[] = {
@@ -850,19 +849,20 @@ static void guc_waklv_init(struct intel_guc *guc)
remain = guc_ads_waklv_size(guc);
/* Wa_14019159160 */
- if (IS_GFX_GT_IP_RANGE(gt, IP_VER(12, 70), IP_VER(12, 71)))
- guc_waklv_enable_simple(guc,
- GUC_WORKAROUND_KLV_SERIALIZED_RA_MODE,
- &offset, &remain);
+ if (IS_GFX_GT_IP_RANGE(gt, IP_VER(12, 70), IP_VER(12, 74))) {
+ guc_waklv_enable_simple(guc, &offset, &remain,
+ GUC_WORKAROUND_KLV_SERIALIZED_RA_MODE);
+ guc_waklv_enable_simple(guc, &offset, &remain,
+ GUC_WORKAROUND_KLV_AVOID_GFX_CLEAR_WHILE_ACTIVE);
+ }
/* Wa_16021333562 */
if ((GUC_FIRMWARE_VER(guc) >= MAKE_GUC_VER(70, 21, 1)) &&
(IS_GFX_GT_IP_RANGE(gt, IP_VER(12, 70), IP_VER(12, 74)) ||
IS_MEDIA_GT_IP_RANGE(gt, IP_VER(13, 0), IP_VER(13, 0)) ||
IS_DG2(gt->i915)))
- guc_waklv_enable_simple(guc,
- GUC_WORKAROUND_KLV_BLOCK_INTERRUPTS_WHEN_MGSR_BLOCKED,
- &offset, &remain);
+ guc_waklv_enable_simple(guc, &offset, &remain,
+ GUC_WORKAROUND_KLV_BLOCK_INTERRUPTS_WHEN_MGSR_BLOCKED);
size = guc_ads_waklv_size(guc) - remain;
if (!size)
diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c b/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c
index 908ebfa22933..ed979847187f 100644
--- a/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c
+++ b/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c
@@ -2014,11 +2014,12 @@ void intel_guc_submission_reset_finish(struct intel_guc *guc)
/*
* Technically possible for either of these values to be non-zero here,
- * but very unlikely + harmless. Regardless let's add a warn so we can
+ * but very unlikely + harmless. Regardless let's add an error so we can
* see in CI if this happens frequently / a precursor to taking down the
* machine.
*/
- GEM_WARN_ON(atomic_read(&guc->outstanding_submission_g2h));
+ if (atomic_read(&guc->outstanding_submission_g2h))
+ guc_err(guc, "Unexpected outstanding GuC to Host in reset finish\n");
atomic_set(&guc->outstanding_submission_g2h, 0);
intel_guc_global_policies_update(guc);
@@ -4506,7 +4507,7 @@ static void guc_default_vfuncs(struct intel_engine_cs *engine)
/* Wa_16019325821 */
/* Wa_14019159160 */
if ((engine->class == COMPUTE_CLASS || engine->class == RENDER_CLASS) &&
- IS_GFX_GT_IP_RANGE(engine->gt, IP_VER(12, 70), IP_VER(12, 71)))
+ IS_GFX_GT_IP_RANGE(engine->gt, IP_VER(12, 70), IP_VER(12, 74)))
engine->flags |= I915_ENGINE_USES_WA_HOLD_SWITCHOUT;
/*
diff --git a/drivers/gpu/drm/i915/gt/uc/intel_uc.c b/drivers/gpu/drm/i915/gt/uc/intel_uc.c
index 7a63abf8f644..5b8080ec5315 100644
--- a/drivers/gpu/drm/i915/gt/uc/intel_uc.c
+++ b/drivers/gpu/drm/i915/gt/uc/intel_uc.c
@@ -99,7 +99,7 @@ static void __confirm_options(struct intel_uc *uc)
}
if (!intel_uc_supports_guc(uc))
- gt_info(gt, "Incompatible option enable_guc=%d - %s\n",
+ gt_info(gt, "Incompatible option enable_guc=%d - %s\n",
i915->params.enable_guc, "GuC is not supported!");
if (i915->params.enable_guc & ENABLE_GUC_SUBMISSION &&
diff --git a/drivers/gpu/drm/i915/gvt/edid.c b/drivers/gpu/drm/i915/gvt/edid.c
index af9afdb53c7f..c022dc736045 100644
--- a/drivers/gpu/drm/i915/gvt/edid.c
+++ b/drivers/gpu/drm/i915/gvt/edid.c
@@ -42,8 +42,8 @@
#define GMBUS1_TOTAL_BYTES_MASK 0x1ff
#define gmbus1_total_byte_count(v) (((v) >> \
GMBUS1_TOTAL_BYTES_SHIFT) & GMBUS1_TOTAL_BYTES_MASK)
-#define gmbus1_slave_addr(v) (((v) & 0xff) >> 1)
-#define gmbus1_slave_index(v) (((v) >> 8) & 0xff)
+#define gmbus1_target_addr(v) (((v) & 0xff) >> 1)
+#define gmbus1_target_index(v) (((v) >> 8) & 0xff)
#define gmbus1_bus_cycle(v) (((v) >> 25) & 0x7)
/* GMBUS0 bits definitions */
@@ -54,7 +54,7 @@ static unsigned char edid_get_byte(struct intel_vgpu *vgpu)
struct intel_vgpu_i2c_edid *edid = &vgpu->display.i2c_edid;
unsigned char chr = 0;
- if (edid->state == I2C_NOT_SPECIFIED || !edid->slave_selected) {
+ if (edid->state == I2C_NOT_SPECIFIED || !edid->target_selected) {
gvt_vgpu_err("Driver tries to read EDID without proper sequence!\n");
return 0;
}
@@ -179,7 +179,7 @@ static int gmbus1_mmio_write(struct intel_vgpu *vgpu, unsigned int offset,
void *p_data, unsigned int bytes)
{
struct intel_vgpu_i2c_edid *i2c_edid = &vgpu->display.i2c_edid;
- u32 slave_addr;
+ u32 target_addr;
u32 wvalue = *(u32 *)p_data;
if (vgpu_vreg(vgpu, offset) & GMBUS_SW_CLR_INT) {
@@ -210,21 +210,21 @@ static int gmbus1_mmio_write(struct intel_vgpu *vgpu, unsigned int offset,
i2c_edid->gmbus.total_byte_count =
gmbus1_total_byte_count(wvalue);
- slave_addr = gmbus1_slave_addr(wvalue);
+ target_addr = gmbus1_target_addr(wvalue);
/* vgpu gmbus only support EDID */
- if (slave_addr == EDID_ADDR) {
- i2c_edid->slave_selected = true;
- } else if (slave_addr != 0) {
+ if (target_addr == EDID_ADDR) {
+ i2c_edid->target_selected = true;
+ } else if (target_addr != 0) {
gvt_dbg_dpy(
- "vgpu%d: unsupported gmbus slave addr(0x%x)\n"
+ "vgpu%d: unsupported gmbus target addr(0x%x)\n"
" gmbus operations will be ignored.\n",
- vgpu->id, slave_addr);
+ vgpu->id, target_addr);
}
if (wvalue & GMBUS_CYCLE_INDEX)
i2c_edid->current_edid_read =
- gmbus1_slave_index(wvalue);
+ gmbus1_target_index(wvalue);
i2c_edid->gmbus.cycle_type = gmbus1_bus_cycle(wvalue);
switch (gmbus1_bus_cycle(wvalue)) {
@@ -523,7 +523,7 @@ void intel_gvt_i2c_handle_aux_ch_write(struct intel_vgpu *vgpu,
} else if (addr == EDID_ADDR) {
i2c_edid->state = I2C_AUX_CH;
i2c_edid->port = port_idx;
- i2c_edid->slave_selected = true;
+ i2c_edid->target_selected = true;
if (intel_vgpu_has_monitor_on_port(vgpu,
port_idx) &&
intel_vgpu_port_is_dp(vgpu, port_idx))
@@ -542,7 +542,7 @@ void intel_gvt_i2c_handle_aux_ch_write(struct intel_vgpu *vgpu,
return;
if (drm_WARN_ON(&i915->drm, msg_length != 4))
return;
- if (i2c_edid->edid_available && i2c_edid->slave_selected) {
+ if (i2c_edid->edid_available && i2c_edid->target_selected) {
unsigned char val = edid_get_byte(vgpu);
aux_data_for_write = (val << 16);
@@ -571,7 +571,7 @@ void intel_vgpu_init_i2c_edid(struct intel_vgpu *vgpu)
edid->state = I2C_NOT_SPECIFIED;
edid->port = -1;
- edid->slave_selected = false;
+ edid->target_selected = false;
edid->edid_available = false;
edid->current_edid_read = 0;
diff --git a/drivers/gpu/drm/i915/gvt/edid.h b/drivers/gpu/drm/i915/gvt/edid.h
index dfe0cbc6aad8..c3b5a55aecb3 100644
--- a/drivers/gpu/drm/i915/gvt/edid.h
+++ b/drivers/gpu/drm/i915/gvt/edid.h
@@ -80,7 +80,7 @@ enum gmbus_cycle_type {
* R/W Protect
* Command and Status.
* bit0 is the direction bit: 1 is read; 0 is write.
- * bit1 - bit7 is slave 7-bit address.
+ * bit1 - bit7 is target 7-bit address.
* bit16 - bit24 total byte count (ignore?)
*
* GMBUS2:
@@ -130,7 +130,7 @@ struct intel_vgpu_i2c_edid {
enum i2c_state state;
unsigned int port;
- bool slave_selected;
+ bool target_selected;
bool edid_available;
unsigned int current_edid_read;
diff --git a/drivers/gpu/drm/i915/gvt/kvmgt.c b/drivers/gpu/drm/i915/gvt/kvmgt.c
index 38830818c120..ca0fb126b02d 100644
--- a/drivers/gpu/drm/i915/gvt/kvmgt.c
+++ b/drivers/gpu/drm/i915/gvt/kvmgt.c
@@ -425,6 +425,18 @@ static const struct intel_vgpu_regops intel_vgpu_regops_opregion = {
.release = intel_vgpu_reg_release_opregion,
};
+static bool edid_valid(const void *edid, size_t size)
+{
+ const struct drm_edid *drm_edid;
+ bool is_valid;
+
+ drm_edid = drm_edid_alloc(edid, size);
+ is_valid = drm_edid_valid(drm_edid);
+ drm_edid_free(drm_edid);
+
+ return is_valid;
+}
+
static int handle_edid_regs(struct intel_vgpu *vgpu,
struct vfio_edid_region *region, char *buf,
size_t count, u16 offset, bool is_write)
@@ -443,11 +455,7 @@ static int handle_edid_regs(struct intel_vgpu *vgpu,
switch (offset) {
case offsetof(struct vfio_region_gfx_edid, link_state):
if (data == VFIO_DEVICE_GFX_LINK_STATE_UP) {
- if (!drm_edid_block_valid(
- (u8 *)region->edid_blob,
- 0,
- true,
- NULL)) {
+ if (!edid_valid(region->edid_blob, EDID_SIZE)) {
gvt_vgpu_err("invalid EDID blob\n");
return -EINVAL;
}
diff --git a/drivers/gpu/drm/i915/gvt/opregion.c b/drivers/gpu/drm/i915/gvt/opregion.c
index d2bed466540a..908f910420c2 100644
--- a/drivers/gpu/drm/i915/gvt/opregion.c
+++ b/drivers/gpu/drm/i915/gvt/opregion.c
@@ -86,7 +86,7 @@ struct efp_child_device_config {
u8 skip2;
u8 dvo_port;
u8 i2c_pin; /* for add-in card */
- u8 slave_addr; /* for add-in card */
+ u8 target_addr; /* for add-in card */
u8 ddc_pin;
u16 edid_ptr;
u8 dvo_config;
diff --git a/drivers/gpu/drm/i915/gvt/trace.h b/drivers/gpu/drm/i915/gvt/trace.h
index 020f1aa28322..63874d385c6f 100644
--- a/drivers/gpu/drm/i915/gvt/trace.h
+++ b/drivers/gpu/drm/i915/gvt/trace.h
@@ -227,7 +227,7 @@ TRACE_EVENT(oos_sync,
#define GVT_CMD_STR_LEN 40
TRACE_EVENT(gvt_command,
TP_PROTO(u8 vgpu_id, u8 ring_id, u32 ip_gma, u32 *cmd_va,
- u32 cmd_len, u32 buf_type, u32 buf_addr_type,
+ u32 cmd_len, u32 buf_type, u32 buf_addr_type,
void *workload, const char *cmd_name),
TP_ARGS(vgpu_id, ring_id, ip_gma, cmd_va, cmd_len, buf_type,
diff --git a/drivers/gpu/drm/i915/i915_debugfs.c b/drivers/gpu/drm/i915/i915_debugfs.c
index bc717cf544e4..f969f585d07b 100644
--- a/drivers/gpu/drm/i915/i915_debugfs.c
+++ b/drivers/gpu/drm/i915/i915_debugfs.c
@@ -66,6 +66,7 @@ static inline struct drm_i915_private *node_to_i915(struct drm_info_node *node)
static int i915_capabilities(struct seq_file *m, void *data)
{
struct drm_i915_private *i915 = node_to_i915(m->private);
+ struct intel_display *display = &i915->display;
struct drm_printer p = drm_seq_file_printer(m);
seq_printf(m, "pch: %d\n", INTEL_PCH_TYPE(i915));
@@ -77,7 +78,7 @@ static int i915_capabilities(struct seq_file *m, void *data)
kernel_param_lock(THIS_MODULE);
i915_params_dump(&i915->params, &p);
- intel_display_params_dump(i915, &p);
+ intel_display_params_dump(display, &p);
kernel_param_unlock(THIS_MODULE);
return 0;
diff --git a/drivers/gpu/drm/i915/i915_driver.c b/drivers/gpu/drm/i915/i915_driver.c
index cf276299bccb..a40f05b993da 100644
--- a/drivers/gpu/drm/i915/i915_driver.c
+++ b/drivers/gpu/drm/i915/i915_driver.c
@@ -49,7 +49,7 @@
#include "display/intel_bw.h"
#include "display/intel_cdclk.h"
#include "display/intel_display_driver.h"
-#include "display/intel_display_types.h"
+#include "display/intel_display.h"
#include "display/intel_dmc.h"
#include "display/intel_dp.h"
#include "display/intel_dpt.h"
@@ -58,10 +58,8 @@
#include "display/intel_hotplug.h"
#include "display/intel_overlay.h"
#include "display/intel_pch_refclk.h"
-#include "display/intel_pipe_crc.h"
#include "display/intel_pps.h"
#include "display/intel_sprite.h"
-#include "display/intel_vga.h"
#include "display/skl_watermark.h"
#include "gem/i915_gem_context.h"
@@ -442,6 +440,7 @@ static int i915_pcode_init(struct drm_i915_private *i915)
*/
static int i915_driver_hw_probe(struct drm_i915_private *dev_priv)
{
+ struct intel_display *display = &dev_priv->display;
struct pci_dev *pdev = to_pci_dev(dev_priv->drm.dev);
int ret;
@@ -451,8 +450,8 @@ static int i915_driver_hw_probe(struct drm_i915_private *dev_priv)
if (HAS_PPGTT(dev_priv)) {
if (intel_vgpu_active(dev_priv) &&
!intel_vgpu_has_full_ppgtt(dev_priv)) {
- i915_report_error(dev_priv,
- "incompatible vGPU found, support for isolated ppGTT required\n");
+ drm_err(&dev_priv->drm,
+ "incompatible vGPU found, support for isolated ppGTT required\n");
return -ENXIO;
}
}
@@ -465,8 +464,8 @@ static int i915_driver_hw_probe(struct drm_i915_private *dev_priv)
*/
if (intel_vgpu_active(dev_priv) &&
!intel_vgpu_has_hwsp_emulation(dev_priv)) {
- i915_report_error(dev_priv,
- "old vGPU host found, support for HWSP emulation required\n");
+ drm_err(&dev_priv->drm,
+ "old vGPU host found, support for HWSP emulation required\n");
return -ENXIO;
}
}
@@ -542,7 +541,7 @@ static int i915_driver_hw_probe(struct drm_i915_private *dev_priv)
if (ret)
goto err_msi;
- intel_opregion_setup(dev_priv);
+ intel_opregion_setup(display);
ret = i915_pcode_init(dev_priv);
if (ret)
@@ -559,7 +558,7 @@ static int i915_driver_hw_probe(struct drm_i915_private *dev_priv)
return 0;
err_opregion:
- intel_opregion_cleanup(dev_priv);
+ intel_opregion_cleanup(display);
err_msi:
if (pdev->msi_enabled)
pci_disable_msi(pdev);
@@ -580,11 +579,12 @@ err_perf:
*/
static void i915_driver_hw_remove(struct drm_i915_private *dev_priv)
{
+ struct intel_display *display = &dev_priv->display;
struct pci_dev *pdev = to_pci_dev(dev_priv->drm.dev);
i915_perf_fini(dev_priv);
- intel_opregion_cleanup(dev_priv);
+ intel_opregion_cleanup(display);
if (pdev->msi_enabled)
pci_disable_msi(pdev);
@@ -723,7 +723,7 @@ i915_driver_create(struct pci_dev *pdev, const struct pci_device_id *ent)
if (IS_ERR(i915))
return i915;
- pci_set_drvdata(pdev, i915);
+ pci_set_drvdata(pdev, &i915->drm);
/* Device parameters start as a copy of module parameters. */
i915_params_copy(&i915->params, &i915_modparams);
@@ -1014,6 +1014,7 @@ static int i915_drm_prepare(struct drm_device *dev)
static int i915_drm_suspend(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = to_i915(dev);
+ struct intel_display *display = &dev_priv->display;
struct pci_dev *pdev = to_pci_dev(dev_priv->drm.dev);
pci_power_t opregion_target_state;
@@ -1049,7 +1050,7 @@ static int i915_drm_suspend(struct drm_device *dev)
i915_save_display(dev_priv);
opregion_target_state = suspend_to_idle(dev_priv) ? PCI_D1 : PCI_D3cold;
- intel_opregion_suspend(dev_priv, opregion_target_state);
+ intel_opregion_suspend(display, opregion_target_state);
dev_priv->suspend_count++;
@@ -1138,6 +1139,7 @@ int i915_driver_suspend_switcheroo(struct drm_i915_private *i915,
static int i915_drm_resume(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = to_i915(dev);
+ struct intel_display *display = &dev_priv->display;
struct intel_gt *gt;
int ret, i;
@@ -1165,7 +1167,7 @@ static int i915_drm_resume(struct drm_device *dev)
intel_dmc_resume(dev_priv);
i915_restore_display(dev_priv);
- intel_pps_unlock_regs_wa(dev_priv);
+ intel_pps_unlock_regs_wa(display);
intel_init_pch_refclk(dev_priv);
@@ -1205,7 +1207,7 @@ static int i915_drm_resume(struct drm_device *dev)
}
intel_hpd_poll_disable(dev_priv);
- intel_opregion_resume(dev_priv);
+ intel_opregion_resume(display);
intel_fbdev_set_suspend(dev, FBINFO_STATE_RUNNING, false);
@@ -1454,6 +1456,7 @@ static int i915_pm_restore(struct device *kdev)
static int intel_runtime_suspend(struct device *kdev)
{
struct drm_i915_private *dev_priv = kdev_to_i915(kdev);
+ struct intel_display *display = &dev_priv->display;
struct intel_runtime_pm *rpm = &dev_priv->runtime_pm;
struct pci_dev *pdev = to_pci_dev(dev_priv->drm.dev);
struct pci_dev *root_pdev;
@@ -1528,7 +1531,7 @@ static int intel_runtime_suspend(struct device *kdev)
* won't be able to restore them. Since PCI_D3hot matches the
* actual specification and appears to be working, use it.
*/
- intel_opregion_notify_adapter(dev_priv, PCI_D3hot);
+ intel_opregion_notify_adapter(display, PCI_D3hot);
} else {
/*
* current versions of firmware which depend on this opregion
@@ -1537,7 +1540,7 @@ static int intel_runtime_suspend(struct device *kdev)
* to distinguish it from notifications that might be sent via
* the suspend path.
*/
- intel_opregion_notify_adapter(dev_priv, PCI_D1);
+ intel_opregion_notify_adapter(display, PCI_D1);
}
assert_forcewakes_inactive(&dev_priv->uncore);
@@ -1552,6 +1555,7 @@ static int intel_runtime_suspend(struct device *kdev)
static int intel_runtime_resume(struct device *kdev)
{
struct drm_i915_private *dev_priv = kdev_to_i915(kdev);
+ struct intel_display *display = &dev_priv->display;
struct intel_runtime_pm *rpm = &dev_priv->runtime_pm;
struct pci_dev *pdev = to_pci_dev(dev_priv->drm.dev);
struct pci_dev *root_pdev;
@@ -1566,7 +1570,7 @@ static int intel_runtime_resume(struct device *kdev)
drm_WARN_ON_ONCE(&dev_priv->drm, atomic_read(&rpm->wakeref_count));
disable_rpm_wakeref_asserts(rpm);
- intel_opregion_notify_adapter(dev_priv, PCI_D0);
+ intel_opregion_notify_adapter(display, PCI_D0);
root_pdev = pcie_find_root_port(pdev);
if (root_pdev)
@@ -1694,9 +1698,9 @@ static const struct drm_ioctl_desc i915_ioctls[] = {
DRM_IOCTL_DEF_DRV(I915_FREE, drm_noop, DRM_AUTH),
DRM_IOCTL_DEF_DRV(I915_INIT_HEAP, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
DRM_IOCTL_DEF_DRV(I915_CMDBUFFER, drm_noop, DRM_AUTH),
- DRM_IOCTL_DEF_DRV(I915_DESTROY_HEAP, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
- DRM_IOCTL_DEF_DRV(I915_SET_VBLANK_PIPE, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
- DRM_IOCTL_DEF_DRV(I915_GET_VBLANK_PIPE, drm_noop, DRM_AUTH),
+ DRM_IOCTL_DEF_DRV(I915_DESTROY_HEAP, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
+ DRM_IOCTL_DEF_DRV(I915_SET_VBLANK_PIPE, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
+ DRM_IOCTL_DEF_DRV(I915_GET_VBLANK_PIPE, drm_noop, DRM_AUTH),
DRM_IOCTL_DEF_DRV(I915_VBLANK_SWAP, drm_noop, DRM_AUTH),
DRM_IOCTL_DEF_DRV(I915_HWS_ADDR, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
DRM_IOCTL_DEF_DRV(I915_GEM_INIT, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
index 110340e02a02..39f6614a0a99 100644
--- a/drivers/gpu/drm/i915/i915_drv.h
+++ b/drivers/gpu/drm/i915/i915_drv.h
@@ -365,12 +365,16 @@ static inline struct drm_i915_private *to_i915(const struct drm_device *dev)
static inline struct drm_i915_private *kdev_to_i915(struct device *kdev)
{
- return dev_get_drvdata(kdev);
+ struct drm_device *drm = dev_get_drvdata(kdev);
+
+ return drm ? to_i915(drm) : NULL;
}
static inline struct drm_i915_private *pdev_to_i915(struct pci_dev *pdev)
{
- return pci_get_drvdata(pdev);
+ struct drm_device *drm = pci_get_drvdata(pdev);
+
+ return drm ? to_i915(drm) : NULL;
}
static inline struct intel_gt *to_gt(const struct drm_i915_private *i915)
@@ -408,14 +412,8 @@ static inline struct intel_gt *to_gt(const struct drm_i915_private *i915)
#define INTEL_REVID(i915) (to_pci_dev((i915)->drm.dev)->revision)
-#define INTEL_DISPLAY_STEP(__i915) (RUNTIME_INFO(__i915)->step.display_step)
#define INTEL_GRAPHICS_STEP(__i915) (RUNTIME_INFO(__i915)->step.graphics_step)
#define INTEL_MEDIA_STEP(__i915) (RUNTIME_INFO(__i915)->step.media_step)
-#define INTEL_BASEDIE_STEP(__i915) (RUNTIME_INFO(__i915)->step.basedie_step)
-
-#define IS_DISPLAY_STEP(__i915, since, until) \
- (drm_WARN_ON(&(__i915)->drm, INTEL_DISPLAY_STEP(__i915) == STEP_NONE), \
- INTEL_DISPLAY_STEP(__i915) >= (since) && INTEL_DISPLAY_STEP(__i915) < (until))
#define IS_GRAPHICS_STEP(__i915, since, until) \
(drm_WARN_ON(&(__i915)->drm, INTEL_GRAPHICS_STEP(__i915) == STEP_NONE), \
@@ -425,10 +423,6 @@ static inline struct intel_gt *to_gt(const struct drm_i915_private *i915)
(drm_WARN_ON(&(__i915)->drm, INTEL_MEDIA_STEP(__i915) == STEP_NONE), \
INTEL_MEDIA_STEP(__i915) >= (since) && INTEL_MEDIA_STEP(__i915) < (until))
-#define IS_BASEDIE_STEP(__i915, since, until) \
- (drm_WARN_ON(&(__i915)->drm, INTEL_BASEDIE_STEP(__i915) == STEP_NONE), \
- INTEL_BASEDIE_STEP(__i915) >= (since) && INTEL_BASEDIE_STEP(__i915) < (until))
-
static __always_inline unsigned int
__platform_mask_index(const struct intel_runtime_info *info,
enum intel_platform p)
@@ -680,9 +674,6 @@ IS_SUBPLATFORM(const struct drm_i915_private *i915,
((sizes) & ~RUNTIME_INFO(i915)->page_sizes) == 0; \
})
-/* Early gen2 have a totally busted CS tlb and require pinned batches. */
-#define HAS_BROKEN_CS_TLB(i915) (IS_I830(i915) || IS_I845G(i915))
-
#define NEEDS_RC6_CTX_CORRUPTION_WA(i915) \
(IS_BROADWELL(i915) || GRAPHICS_VER(i915) == 9)
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index 1391c01d7663..070ab6546987 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -39,8 +39,6 @@
#include <drm/drm_cache.h>
#include <drm/drm_vma_manager.h>
-#include "display/intel_display.h"
-
#include "gem/i915_gem_clflush.h"
#include "gem/i915_gem_context.h"
#include "gem/i915_gem_ioctls.h"
diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.c b/drivers/gpu/drm/i915/i915_gem_gtt.c
index 7bd1861ddbdf..a9662cc6ed1e 100644
--- a/drivers/gpu/drm/i915/i915_gem_gtt.c
+++ b/drivers/gpu/drm/i915/i915_gem_gtt.c
@@ -15,7 +15,6 @@
#include <asm/set_memory.h>
#include <asm/smp.h>
-#include "display/intel_frontbuffer.h"
#include "gt/intel_gt.h"
#include "gt/intel_gt_requests.h"
diff --git a/drivers/gpu/drm/i915/i915_gpu_error.c b/drivers/gpu/drm/i915/i915_gpu_error.c
index 96c6cafd5b9e..6469b9bcf2ec 100644
--- a/drivers/gpu/drm/i915/i915_gpu_error.c
+++ b/drivers/gpu/drm/i915/i915_gpu_error.c
@@ -660,9 +660,10 @@ static void err_print_params(struct drm_i915_error_state_buf *m,
const struct i915_params *params)
{
struct drm_printer p = i915_error_printer(m);
+ struct intel_display *display = &m->i915->display;
i915_params_dump(params, &p);
- intel_display_params_dump(m->i915, &p);
+ intel_display_params_dump(display, &p);
}
static void err_print_pciid(struct drm_i915_error_state_buf *m,
diff --git a/drivers/gpu/drm/i915/i915_hwmon.c b/drivers/gpu/drm/i915/i915_hwmon.c
index 49db3e09826c..17d30f6b84b0 100644
--- a/drivers/gpu/drm/i915/i915_hwmon.c
+++ b/drivers/gpu/drm/i915/i915_hwmon.c
@@ -5,6 +5,7 @@
#include <linux/hwmon.h>
#include <linux/hwmon-sysfs.h>
+#include <linux/jiffies.h>
#include <linux/types.h>
#include "i915_drv.h"
@@ -36,6 +37,7 @@ struct hwm_reg {
i915_reg_t pkg_rapl_limit;
i915_reg_t energy_status_all;
i915_reg_t energy_status_tile;
+ i915_reg_t fan_speed;
};
struct hwm_energy_info {
@@ -43,11 +45,17 @@ struct hwm_energy_info {
long accum_energy; /* Accumulated energy for energy1_input */
};
+struct hwm_fan_info {
+ u32 reg_val_prev;
+ u64 time_prev;
+};
+
struct hwm_drvdata {
struct i915_hwmon *hwmon;
struct intel_uncore *uncore;
struct device *hwmon_dev;
struct hwm_energy_info ei; /* Energy info for energy1_input */
+ struct hwm_fan_info fi; /* Fan info for fan1_input */
char name[12];
int gt_n;
bool reset_in_progress;
@@ -276,6 +284,7 @@ static const struct hwmon_channel_info * const hwm_info[] = {
HWMON_CHANNEL_INFO(power, HWMON_P_MAX | HWMON_P_RATED_MAX | HWMON_P_CRIT),
HWMON_CHANNEL_INFO(energy, HWMON_E_INPUT),
HWMON_CHANNEL_INFO(curr, HWMON_C_CRIT),
+ HWMON_CHANNEL_INFO(fan, HWMON_F_INPUT),
NULL
};
@@ -614,6 +623,69 @@ hwm_curr_write(struct hwm_drvdata *ddat, u32 attr, long val)
}
static umode_t
+hwm_fan_is_visible(const struct hwm_drvdata *ddat, u32 attr)
+{
+ struct i915_hwmon *hwmon = ddat->hwmon;
+
+ if (attr == hwmon_fan_input && i915_mmio_reg_valid(hwmon->rg.fan_speed))
+ return 0444;
+
+ return 0;
+}
+
+static int
+hwm_fan_input_read(struct hwm_drvdata *ddat, long *val)
+{
+ struct i915_hwmon *hwmon = ddat->hwmon;
+ struct hwm_fan_info *fi = &ddat->fi;
+ u64 rotations, time_now, time;
+ intel_wakeref_t wakeref;
+ u32 reg_val;
+ int ret = 0;
+
+ wakeref = intel_runtime_pm_get(ddat->uncore->rpm);
+ mutex_lock(&hwmon->hwmon_lock);
+
+ reg_val = intel_uncore_read(ddat->uncore, hwmon->rg.fan_speed);
+ time_now = get_jiffies_64();
+
+ /*
+ * HW register value is accumulated count of pulses from
+ * PWM fan with the scale of 2 pulses per rotation.
+ */
+ rotations = (reg_val - fi->reg_val_prev) / 2;
+
+ time = jiffies_delta_to_msecs(time_now - fi->time_prev);
+ if (unlikely(!time)) {
+ ret = -EAGAIN;
+ goto exit;
+ }
+
+ /*
+ * Calculate fan speed in RPM by time averaging two subsequent
+ * readings in minutes.
+ * RPM = number of rotations * msecs per minute / time in msecs
+ */
+ *val = DIV_ROUND_UP_ULL(rotations * (MSEC_PER_SEC * 60), time);
+
+ fi->reg_val_prev = reg_val;
+ fi->time_prev = time_now;
+exit:
+ mutex_unlock(&hwmon->hwmon_lock);
+ intel_runtime_pm_put(ddat->uncore->rpm, wakeref);
+ return ret;
+}
+
+static int
+hwm_fan_read(struct hwm_drvdata *ddat, u32 attr, long *val)
+{
+ if (attr == hwmon_fan_input)
+ return hwm_fan_input_read(ddat, val);
+
+ return -EOPNOTSUPP;
+}
+
+static umode_t
hwm_is_visible(const void *drvdata, enum hwmon_sensor_types type,
u32 attr, int channel)
{
@@ -628,6 +700,8 @@ hwm_is_visible(const void *drvdata, enum hwmon_sensor_types type,
return hwm_energy_is_visible(ddat, attr);
case hwmon_curr:
return hwm_curr_is_visible(ddat, attr);
+ case hwmon_fan:
+ return hwm_fan_is_visible(ddat, attr);
default:
return 0;
}
@@ -648,6 +722,8 @@ hwm_read(struct device *dev, enum hwmon_sensor_types type, u32 attr,
return hwm_energy_read(ddat, attr, val);
case hwmon_curr:
return hwm_curr_read(ddat, attr, val);
+ case hwmon_fan:
+ return hwm_fan_read(ddat, attr, val);
default:
return -EOPNOTSUPP;
}
@@ -739,12 +815,14 @@ hwm_get_preregistration_info(struct drm_i915_private *i915)
hwmon->rg.pkg_rapl_limit = PCU_PACKAGE_RAPL_LIMIT;
hwmon->rg.energy_status_all = PCU_PACKAGE_ENERGY_STATUS;
hwmon->rg.energy_status_tile = INVALID_MMIO_REG;
+ hwmon->rg.fan_speed = PCU_PWM_FAN_SPEED;
} else {
hwmon->rg.pkg_power_sku_unit = INVALID_MMIO_REG;
hwmon->rg.pkg_power_sku = INVALID_MMIO_REG;
hwmon->rg.pkg_rapl_limit = INVALID_MMIO_REG;
hwmon->rg.energy_status_all = INVALID_MMIO_REG;
hwmon->rg.energy_status_tile = INVALID_MMIO_REG;
+ hwmon->rg.fan_speed = INVALID_MMIO_REG;
}
with_intel_runtime_pm(uncore->rpm, wakeref) {
@@ -755,6 +833,16 @@ hwm_get_preregistration_info(struct drm_i915_private *i915)
if (i915_mmio_reg_valid(hwmon->rg.pkg_power_sku_unit))
val_sku_unit = intel_uncore_read(uncore,
hwmon->rg.pkg_power_sku_unit);
+
+ /*
+ * Store the initial fan register value, so that we can use it for
+ * initial fan speed calculation.
+ */
+ if (i915_mmio_reg_valid(hwmon->rg.fan_speed)) {
+ ddat->fi.reg_val_prev = intel_uncore_read(uncore,
+ hwmon->rg.fan_speed);
+ ddat->fi.time_prev = get_jiffies_64();
+ }
}
hwmon->scl_shift_power = REG_FIELD_GET(PKG_PWR_UNIT, val_sku_unit);
diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c
index 8059ac7e15fe..2321de48d169 100644
--- a/drivers/gpu/drm/i915/i915_irq.c
+++ b/drivers/gpu/drm/i915/i915_irq.c
@@ -34,7 +34,6 @@
#include <drm/drm_drv.h>
#include "display/intel_display_irq.h"
-#include "display/intel_display_types.h"
#include "display/intel_hotplug.h"
#include "display/intel_hotplug_irq.h"
#include "display/intel_lpe_audio.h"
diff --git a/drivers/gpu/drm/i915/i915_mm.c b/drivers/gpu/drm/i915/i915_mm.c
index 7998bc74ab49..f5c97a620962 100644
--- a/drivers/gpu/drm/i915/i915_mm.c
+++ b/drivers/gpu/drm/i915/i915_mm.c
@@ -122,13 +122,15 @@ int remap_io_mapping(struct vm_area_struct *vma,
* @addr: target user address to start at
* @size: size of map area
* @sgl: Start sg entry
+ * @offset: offset from the start of the page
* @iobase: Use stored dma address offset by this address or pfn if -1
*
* Note: this is only safe if the mm semaphore is held when called.
*/
int remap_io_sg(struct vm_area_struct *vma,
unsigned long addr, unsigned long size,
- struct scatterlist *sgl, resource_size_t iobase)
+ struct scatterlist *sgl, unsigned long offset,
+ resource_size_t iobase)
{
struct remap_pfn r = {
.mm = vma->vm_mm,
@@ -141,6 +143,14 @@ int remap_io_sg(struct vm_area_struct *vma,
/* We rely on prevalidation of the io-mapping to skip track_pfn(). */
GEM_BUG_ON((vma->vm_flags & EXPECTED_FLAGS) != EXPECTED_FLAGS);
+ while (offset >= sg_dma_len(r.sgt.sgp) >> PAGE_SHIFT) {
+ offset -= sg_dma_len(r.sgt.sgp) >> PAGE_SHIFT;
+ r.sgt = __sgt_iter(__sg_next(r.sgt.sgp), use_dma(iobase));
+ if (!r.sgt.sgp)
+ return -EINVAL;
+ }
+ r.sgt.curr = offset << PAGE_SHIFT;
+
if (!use_dma(iobase))
flush_cache_range(vma, addr, size);
diff --git a/drivers/gpu/drm/i915/i915_mm.h b/drivers/gpu/drm/i915/i915_mm.h
index 04c8974d822b..69f9351b1a1c 100644
--- a/drivers/gpu/drm/i915/i915_mm.h
+++ b/drivers/gpu/drm/i915/i915_mm.h
@@ -30,6 +30,7 @@ int remap_io_mapping(struct vm_area_struct *vma,
int remap_io_sg(struct vm_area_struct *vma,
unsigned long addr, unsigned long size,
- struct scatterlist *sgl, resource_size_t iobase);
+ struct scatterlist *sgl, unsigned long offset,
+ resource_size_t iobase);
#endif /* __I915_MM_H__ */
diff --git a/drivers/gpu/drm/i915/i915_module.c b/drivers/gpu/drm/i915/i915_module.c
index 65acd7bf75d0..7ed6d70389af 100644
--- a/drivers/gpu/drm/i915/i915_module.c
+++ b/drivers/gpu/drm/i915/i915_module.c
@@ -30,15 +30,20 @@ static int i915_check_nomodeset(void)
*/
if (i915_modparams.modeset == 0)
+ pr_warn("i915.modeset=0 is deprecated. Please use the 'nomodeset' kernel parameter instead.\n");
+ else if (i915_modparams.modeset != -1)
+ pr_warn("i915.modeset=%d is deprecated. Please remove it and the 'nomodeset' kernel parameter instead.\n",
+ i915_modparams.modeset);
+
+ if (i915_modparams.modeset == 0)
use_kms = false;
if (drm_firmware_drivers_only() && i915_modparams.modeset == -1)
use_kms = false;
if (!use_kms) {
- /* Silently fail loading to not upset userspace. */
DRM_DEBUG_DRIVER("KMS disabled.\n");
- return 1;
+ return -ENODEV;
}
return 0;
diff --git a/drivers/gpu/drm/i915/i915_params.c b/drivers/gpu/drm/i915/i915_params.c
index 316e55f3e87b..37746dd619fd 100644
--- a/drivers/gpu/drm/i915/i915_params.c
+++ b/drivers/gpu/drm/i915/i915_params.c
@@ -64,8 +64,7 @@ struct i915_params i915_modparams __read_mostly = {
*/
i915_param_named(modeset, int, 0400,
- "Use kernel modesetting [KMS] (0=disable, "
- "1=on, -1=force vga console preference [default])");
+ "Deprecated. Use the 'nomodeset' kernel parameter instead.");
i915_param_named_unsafe(reset, uint, 0400,
"Attempt GPU resets (0=disabled, 1=full gpu reset, 2=engine reset [default])");
diff --git a/drivers/gpu/drm/i915/i915_pci.c b/drivers/gpu/drm/i915/i915_pci.c
index ce4dfd65fafa..d37bb3a704d0 100644
--- a/drivers/gpu/drm/i915/i915_pci.c
+++ b/drivers/gpu/drm/i915/i915_pci.c
@@ -26,7 +26,6 @@
#include <drm/drm_drv.h>
#include <drm/intel/i915_pciids.h>
-#include "display/intel_display.h"
#include "display/intel_display_driver.h"
#include "gt/intel_gt_regs.h"
#include "gt/intel_sa_media.h"
@@ -880,7 +879,7 @@ static void i915_pci_remove(struct pci_dev *pdev)
{
struct drm_i915_private *i915;
- i915 = pci_get_drvdata(pdev);
+ i915 = pdev_to_i915(pdev);
if (!i915) /* driver load aborted, nothing to cleanup */
return;
@@ -1003,7 +1002,7 @@ static int i915_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
if (err)
return err;
- if (i915_inject_probe_failure(pci_get_drvdata(pdev))) {
+ if (i915_inject_probe_failure(pdev_to_i915(pdev))) {
i915_pci_remove(pdev);
return -ENODEV;
}
@@ -1025,7 +1024,7 @@ static int i915_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
static void i915_pci_shutdown(struct pci_dev *pdev)
{
- struct drm_i915_private *i915 = pci_get_drvdata(pdev);
+ struct drm_i915_private *i915 = pdev_to_i915(pdev);
i915_driver_shutdown(i915);
}
diff --git a/drivers/gpu/drm/i915/i915_perf.c b/drivers/gpu/drm/i915/i915_perf.c
index 025a79fe5920..2406cda75b7b 100644
--- a/drivers/gpu/drm/i915/i915_perf.c
+++ b/drivers/gpu/drm/i915/i915_perf.c
@@ -3751,7 +3751,6 @@ static int i915_perf_release(struct inode *inode, struct file *file)
static const struct file_operations fops = {
.owner = THIS_MODULE,
- .llseek = no_llseek,
.release = i915_perf_release,
.poll = i915_perf_poll,
.read = i915_perf_read,
diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h
index 0e3d79227e3c..41f4350a7c6c 100644
--- a/drivers/gpu/drm/i915/i915_reg.h
+++ b/drivers/gpu/drm/i915/i915_reg.h
@@ -2396,6 +2396,7 @@
/* Display Internal Timeout Register */
#define RM_TIMEOUT _MMIO(0x42060)
+#define RM_TIMEOUT_REG_CAPTURE _MMIO(0x420E0)
#define MMIO_TIMEOUT_US(us) ((us) << 0)
/* interrupts */
@@ -2515,6 +2516,10 @@
#define GEN11_PIPE_PLANE7_FLIP_DONE REG_BIT(18) /* icl/tgl */
#define GEN11_PIPE_PLANE6_FLIP_DONE REG_BIT(17) /* icl/tgl */
#define GEN11_PIPE_PLANE5_FLIP_DONE REG_BIT(16) /* icl+ */
+#define GEN12_DSB_2_INT REG_BIT(15) /* tgl+ */
+#define GEN12_DSB_1_INT REG_BIT(14) /* tgl+ */
+#define GEN12_DSB_0_INT REG_BIT(13) /* tgl+ */
+#define GEN12_DSB_INT(dsb_id) REG_BIT(13 + (dsb_id))
#define GEN9_PIPE_CURSOR_FAULT REG_BIT(11) /* skl+ */
#define GEN9_PIPE_PLANE4_FAULT REG_BIT(10) /* skl+ */
#define GEN8_PIPE_CURSOR_FAULT REG_BIT(10) /* bdw */
@@ -2574,6 +2579,7 @@
#define GEN8_DE_MISC_IMR _MMIO(0x44464)
#define GEN8_DE_MISC_IIR _MMIO(0x44468)
#define GEN8_DE_MISC_IER _MMIO(0x4446c)
+#define XELPDP_RM_TIMEOUT REG_BIT(29)
#define XELPDP_PMDEMAND_RSPTOUT_ERR REG_BIT(27)
#define GEN8_DE_MISC_GSE REG_BIT(27)
#define GEN8_DE_EDP_PSR REG_BIT(19)
diff --git a/drivers/gpu/drm/i915/i915_sysfs.c b/drivers/gpu/drm/i915/i915_sysfs.c
index 613decd47760..8775beab9cb8 100644
--- a/drivers/gpu/drm/i915/i915_sysfs.c
+++ b/drivers/gpu/drm/i915/i915_sysfs.c
@@ -191,8 +191,8 @@ void i915_teardown_sysfs(struct drm_i915_private *dev_priv)
i915_gpu_error_sysfs_teardown(dev_priv);
- device_remove_bin_file(kdev, &dpf_attrs_1);
- device_remove_bin_file(kdev, &dpf_attrs);
+ device_remove_bin_file(kdev, &dpf_attrs_1);
+ device_remove_bin_file(kdev, &dpf_attrs);
kobject_put(dev_priv->sysfs_gt);
}
diff --git a/drivers/gpu/drm/i915/i915_utils.c b/drivers/gpu/drm/i915/i915_utils.c
index f2ba51c20e97..2576f8f6c0f6 100644
--- a/drivers/gpu/drm/i915/i915_utils.c
+++ b/drivers/gpu/drm/i915/i915_utils.c
@@ -11,51 +11,10 @@
#include "i915_reg.h"
#include "i915_utils.h"
-#define FDO_BUG_MSG "Please file a bug on drm/i915; see " FDO_BUG_URL " for details."
-
-void
-__i915_printk(struct drm_i915_private *dev_priv, const char *level,
- const char *fmt, ...)
-{
- static bool shown_bug_once;
- struct device *kdev = dev_priv->drm.dev;
- bool is_error = level[1] <= KERN_ERR[1];
- bool is_debug = level[1] == KERN_DEBUG[1];
- struct va_format vaf;
- va_list args;
-
- if (is_debug && !drm_debug_enabled(DRM_UT_DRIVER))
- return;
-
- va_start(args, fmt);
-
- vaf.fmt = fmt;
- vaf.va = &args;
-
- if (is_error)
- dev_printk(level, kdev, "%pV", &vaf);
- else
- dev_printk(level, kdev, "[" DRM_NAME ":%ps] %pV",
- __builtin_return_address(0), &vaf);
-
- va_end(args);
-
- if (is_error && !shown_bug_once) {
- /*
- * Ask the user to file a bug report for the error, except
- * if they may have caused the bug by fiddling with unsafe
- * module parameters.
- */
- if (!test_taint(TAINT_USER))
- dev_notice(kdev, "%s", FDO_BUG_MSG);
- shown_bug_once = true;
- }
-}
-
void add_taint_for_CI(struct drm_i915_private *i915, unsigned int taint)
{
- __i915_printk(i915, KERN_NOTICE, "CI tainted:%#x by %pS\n",
- taint, (void *)_RET_IP_);
+ drm_notice(&i915->drm, "CI tainted: %#x by %pS\n",
+ taint, __builtin_return_address(0));
/* Failures that occur during fault injection testing are expected */
if (!i915_error_injected())
@@ -74,9 +33,9 @@ int __i915_inject_probe_error(struct drm_i915_private *i915, int err,
if (++i915_probe_fail_count < i915_modparams.inject_probe_failure)
return 0;
- __i915_printk(i915, KERN_INFO,
- "Injecting failure %d at checkpoint %u [%s:%d]\n",
- err, i915_modparams.inject_probe_failure, func, line);
+ drm_info(&i915->drm, "Injecting failure %d at checkpoint %u [%s:%d]\n",
+ err, i915_modparams.inject_probe_failure, func, line);
+
i915_modparams.inject_probe_failure = 0;
return err;
}
diff --git a/drivers/gpu/drm/i915/i915_utils.h b/drivers/gpu/drm/i915/i915_utils.h
index 06ec6ceb61d5..71bdc89bd621 100644
--- a/drivers/gpu/drm/i915/i915_utils.h
+++ b/drivers/gpu/drm/i915/i915_utils.h
@@ -45,13 +45,6 @@ struct timer_list;
#define MISSING_CASE(x) WARN(1, "Missing case (%s == %ld)\n", \
__stringify(x), (long)(x))
-void __printf(3, 4)
-__i915_printk(struct drm_i915_private *dev_priv, const char *level,
- const char *fmt, ...);
-
-#define i915_report_error(dev_priv, fmt, ...) \
- __i915_printk(dev_priv, KERN_ERR, fmt, ##__VA_ARGS__)
-
#if IS_ENABLED(CONFIG_DRM_I915_DEBUG)
int __i915_inject_probe_error(struct drm_i915_private *i915, int err,
@@ -69,9 +62,12 @@ bool i915_error_injected(void);
#define i915_inject_probe_failure(i915) i915_inject_probe_error((i915), -ENODEV)
-#define i915_probe_error(i915, fmt, ...) \
- __i915_printk(i915, i915_error_injected() ? KERN_DEBUG : KERN_ERR, \
- fmt, ##__VA_ARGS__)
+#define i915_probe_error(i915, fmt, ...) ({ \
+ if (i915_error_injected()) \
+ drm_dbg(&(i915)->drm, fmt, ##__VA_ARGS__); \
+ else \
+ drm_err(&(i915)->drm, fmt, ##__VA_ARGS__); \
+})
#define range_overflows(start, size, max) ({ \
typeof(start) start__ = (start); \
diff --git a/drivers/gpu/drm/i915/i915_vma.h b/drivers/gpu/drm/i915/i915_vma.h
index e356dfb883d3..6a6be8048aa8 100644
--- a/drivers/gpu/drm/i915/i915_vma.h
+++ b/drivers/gpu/drm/i915/i915_vma.h
@@ -389,7 +389,6 @@ void i915_vma_unpin_iomap(struct i915_vma *vma);
* i915_vma_unpin_fence().
*
* Returns:
- *
* True if the vma has a fence, false otherwise.
*/
int __must_check i915_vma_pin_fence(struct i915_vma *vma);
diff --git a/drivers/gpu/drm/i915/intel_device_info.c b/drivers/gpu/drm/i915/intel_device_info.c
index eede5417cb3f..3c47c625993e 100644
--- a/drivers/gpu/drm/i915/intel_device_info.c
+++ b/drivers/gpu/drm/i915/intel_device_info.c
@@ -108,8 +108,6 @@ void intel_device_info_print(const struct intel_device_info *info,
drm_printf(p, "graphics stepping: %s\n", intel_step_name(runtime->step.graphics_step));
drm_printf(p, "media stepping: %s\n", intel_step_name(runtime->step.media_step));
- drm_printf(p, "display stepping: %s\n", intel_step_name(runtime->step.display_step));
- drm_printf(p, "base die stepping: %s\n", intel_step_name(runtime->step.basedie_step));
drm_printf(p, "gt: %d\n", info->gt);
drm_printf(p, "memory-regions: 0x%x\n", info->memory_regions);
@@ -124,7 +122,6 @@ void intel_device_info_print(const struct intel_device_info *info,
#undef PRINT_FLAG
drm_printf(p, "has_pooled_eu: %s\n", str_yes_no(runtime->has_pooled_eu));
- drm_printf(p, "rawclk rate: %u kHz\n", runtime->rawclk_freq);
}
#define ID(id) (id)
@@ -377,10 +374,6 @@ void intel_device_info_runtime_init(struct drm_i915_private *dev_priv)
"Disabling ppGTT for VT-d support\n");
runtime->ppgtt_type = INTEL_PPGTT_NONE;
}
-
- runtime->rawclk_freq = intel_read_rawclk(dev_priv);
- drm_dbg(&dev_priv->drm, "rawclk rate: %d kHz\n", runtime->rawclk_freq);
-
}
/*
diff --git a/drivers/gpu/drm/i915/intel_device_info.h b/drivers/gpu/drm/i915/intel_device_info.h
index df73ef94615d..643ff1bf74ee 100644
--- a/drivers/gpu/drm/i915/intel_device_info.h
+++ b/drivers/gpu/drm/i915/intel_device_info.h
@@ -207,8 +207,6 @@ struct intel_runtime_info {
u16 device_id;
- u32 rawclk_freq;
-
struct intel_step_info step;
unsigned int page_sizes; /* page sizes supported by the HW */
diff --git a/drivers/gpu/drm/i915/intel_step.c b/drivers/gpu/drm/i915/intel_step.c
index a5adfb5d8fd2..285b96fadfd5 100644
--- a/drivers/gpu/drm/i915/intel_step.c
+++ b/drivers/gpu/drm/i915/intel_step.c
@@ -23,8 +23,7 @@
* use a macro to define these to make it easier to identify the platforms
* where the two steppings can deviate.
*/
-#define COMMON_STEP(x) .graphics_step = STEP_##x, .display_step = STEP_##x, .media_step = STEP_##x
-#define COMMON_GT_MEDIA_STEP(x) .graphics_step = STEP_##x, .media_step = STEP_##x
+#define COMMON_STEP(x) .graphics_step = STEP_##x, .media_step = STEP_##x
static const struct intel_step_info skl_revids[] = {
[0x6] = { COMMON_STEP(G0) },
@@ -34,13 +33,13 @@ static const struct intel_step_info skl_revids[] = {
};
static const struct intel_step_info kbl_revids[] = {
- [1] = { COMMON_GT_MEDIA_STEP(B0), .display_step = STEP_B0 },
- [2] = { COMMON_GT_MEDIA_STEP(C0), .display_step = STEP_B0 },
- [3] = { COMMON_GT_MEDIA_STEP(D0), .display_step = STEP_B0 },
- [4] = { COMMON_GT_MEDIA_STEP(F0), .display_step = STEP_C0 },
- [5] = { COMMON_GT_MEDIA_STEP(C0), .display_step = STEP_B1 },
- [6] = { COMMON_GT_MEDIA_STEP(D1), .display_step = STEP_B1 },
- [7] = { COMMON_GT_MEDIA_STEP(G0), .display_step = STEP_C0 },
+ [1] = { COMMON_STEP(B0) },
+ [2] = { COMMON_STEP(C0) },
+ [3] = { COMMON_STEP(D0) },
+ [4] = { COMMON_STEP(F0) },
+ [5] = { COMMON_STEP(C0) },
+ [6] = { COMMON_STEP(D1) },
+ [7] = { COMMON_STEP(G0) },
};
static const struct intel_step_info bxt_revids[] = {
@@ -64,16 +63,16 @@ static const struct intel_step_info jsl_ehl_revids[] = {
};
static const struct intel_step_info tgl_uy_revids[] = {
- [0] = { COMMON_GT_MEDIA_STEP(A0), .display_step = STEP_A0 },
- [1] = { COMMON_GT_MEDIA_STEP(B0), .display_step = STEP_C0 },
- [2] = { COMMON_GT_MEDIA_STEP(B1), .display_step = STEP_C0 },
- [3] = { COMMON_GT_MEDIA_STEP(C0), .display_step = STEP_D0 },
+ [0] = { COMMON_STEP(A0) },
+ [1] = { COMMON_STEP(B0) },
+ [2] = { COMMON_STEP(B1) },
+ [3] = { COMMON_STEP(C0) },
};
/* Same GT stepping between tgl_uy_revids and tgl_revids don't mean the same HW */
static const struct intel_step_info tgl_revids[] = {
- [0] = { COMMON_GT_MEDIA_STEP(A0), .display_step = STEP_B0 },
- [1] = { COMMON_GT_MEDIA_STEP(B0), .display_step = STEP_D0 },
+ [0] = { COMMON_STEP(A0) },
+ [1] = { COMMON_STEP(B0) },
};
static const struct intel_step_info rkl_revids[] = {
@@ -88,49 +87,49 @@ static const struct intel_step_info dg1_revids[] = {
};
static const struct intel_step_info adls_revids[] = {
- [0x0] = { COMMON_GT_MEDIA_STEP(A0), .display_step = STEP_A0 },
- [0x1] = { COMMON_GT_MEDIA_STEP(A0), .display_step = STEP_A2 },
- [0x4] = { COMMON_GT_MEDIA_STEP(B0), .display_step = STEP_B0 },
- [0x8] = { COMMON_GT_MEDIA_STEP(C0), .display_step = STEP_B0 },
- [0xC] = { COMMON_GT_MEDIA_STEP(D0), .display_step = STEP_C0 },
+ [0x0] = { COMMON_STEP(A0) },
+ [0x1] = { COMMON_STEP(A0) },
+ [0x4] = { COMMON_STEP(B0) },
+ [0x8] = { COMMON_STEP(C0) },
+ [0xC] = { COMMON_STEP(D0) },
};
static const struct intel_step_info adlp_revids[] = {
- [0x0] = { COMMON_GT_MEDIA_STEP(A0), .display_step = STEP_A0 },
- [0x4] = { COMMON_GT_MEDIA_STEP(B0), .display_step = STEP_B0 },
- [0x8] = { COMMON_GT_MEDIA_STEP(C0), .display_step = STEP_C0 },
- [0xC] = { COMMON_GT_MEDIA_STEP(C0), .display_step = STEP_D0 },
+ [0x0] = { COMMON_STEP(A0) },
+ [0x4] = { COMMON_STEP(B0) },
+ [0x8] = { COMMON_STEP(C0) },
+ [0xC] = { COMMON_STEP(C0) },
};
static const struct intel_step_info dg2_g10_revid_step_tbl[] = {
- [0x0] = { COMMON_GT_MEDIA_STEP(A0), .display_step = STEP_A0 },
- [0x1] = { COMMON_GT_MEDIA_STEP(A1), .display_step = STEP_A0 },
- [0x4] = { COMMON_GT_MEDIA_STEP(B0), .display_step = STEP_B0 },
- [0x8] = { COMMON_GT_MEDIA_STEP(C0), .display_step = STEP_C0 },
+ [0x0] = { COMMON_STEP(A0) },
+ [0x1] = { COMMON_STEP(A1) },
+ [0x4] = { COMMON_STEP(B0) },
+ [0x8] = { COMMON_STEP(C0) },
};
static const struct intel_step_info dg2_g11_revid_step_tbl[] = {
- [0x0] = { COMMON_GT_MEDIA_STEP(A0), .display_step = STEP_B0 },
- [0x4] = { COMMON_GT_MEDIA_STEP(B0), .display_step = STEP_C0 },
- [0x5] = { COMMON_GT_MEDIA_STEP(B1), .display_step = STEP_C0 },
+ [0x0] = { COMMON_STEP(A0) },
+ [0x4] = { COMMON_STEP(B0) },
+ [0x5] = { COMMON_STEP(B1) },
};
static const struct intel_step_info dg2_g12_revid_step_tbl[] = {
- [0x0] = { COMMON_GT_MEDIA_STEP(A0), .display_step = STEP_C0 },
- [0x1] = { COMMON_GT_MEDIA_STEP(A1), .display_step = STEP_C0 },
+ [0x0] = { COMMON_STEP(A0) },
+ [0x1] = { COMMON_STEP(A1) },
};
static const struct intel_step_info adls_rpls_revids[] = {
- [0x4] = { COMMON_GT_MEDIA_STEP(D0), .display_step = STEP_D0 },
- [0xC] = { COMMON_GT_MEDIA_STEP(D0), .display_step = STEP_C0 },
+ [0x4] = { COMMON_STEP(D0) },
+ [0xC] = { COMMON_STEP(D0) },
};
static const struct intel_step_info adlp_rplp_revids[] = {
- [0x4] = { COMMON_GT_MEDIA_STEP(C0), .display_step = STEP_E0 },
+ [0x4] = { COMMON_STEP(C0) },
};
static const struct intel_step_info adlp_n_revids[] = {
- [0x0] = { COMMON_GT_MEDIA_STEP(A0), .display_step = STEP_D0 },
+ [0x0] = { COMMON_STEP(A0) },
};
static u8 gmd_to_intel_step(struct drm_i915_private *i915,
@@ -158,11 +157,6 @@ void intel_step_init(struct drm_i915_private *i915)
&RUNTIME_INFO(i915)->graphics.ip);
step.media_step = gmd_to_intel_step(i915,
&RUNTIME_INFO(i915)->media.ip);
- step.display_step = STEP_A0 + DISPLAY_RUNTIME_INFO(i915)->ip.step;
- if (step.display_step >= STEP_FUTURE) {
- drm_dbg(&i915->drm, "Using future display steppings\n");
- step.display_step = STEP_FUTURE;
- }
RUNTIME_INFO(i915)->step = step;
@@ -252,7 +246,6 @@ void intel_step_init(struct drm_i915_private *i915)
} else {
drm_dbg(&i915->drm, "Using future steppings\n");
step.graphics_step = STEP_FUTURE;
- step.display_step = STEP_FUTURE;
}
}
@@ -275,8 +268,3 @@ const char *intel_step_name(enum intel_step step)
return "**";
}
}
-
-const char *intel_display_step_name(struct drm_i915_private *i915)
-{
- return intel_step_name(RUNTIME_INFO(i915)->step.display_step);
-}
diff --git a/drivers/gpu/drm/i915/intel_step.h b/drivers/gpu/drm/i915/intel_step.h
index b6f43b624774..22f1d6905160 100644
--- a/drivers/gpu/drm/i915/intel_step.h
+++ b/drivers/gpu/drm/i915/intel_step.h
@@ -16,9 +16,7 @@ struct intel_step_info {
* the expectation breaks gmd_to_intel_step().
*/
u8 graphics_step; /* Represents the compute tile on Xe_HPC */
- u8 display_step;
u8 media_step;
- u8 basedie_step;
};
#define STEP_ENUM_VAL(name) STEP_##name,
@@ -78,6 +76,5 @@ enum intel_step {
void intel_step_init(struct drm_i915_private *i915);
const char *intel_step_name(enum intel_step step);
-const char *intel_display_step_name(struct drm_i915_private *i915);
#endif /* __INTEL_STEP_H__ */
diff --git a/drivers/gpu/drm/i915/intel_uncore.c b/drivers/gpu/drm/i915/intel_uncore.c
index 2eba289d88ad..6aa179a3e92a 100644
--- a/drivers/gpu/drm/i915/intel_uncore.c
+++ b/drivers/gpu/drm/i915/intel_uncore.c
@@ -24,6 +24,7 @@
#include <drm/drm_managed.h>
#include <linux/pm_runtime.h>
+#include "gt/intel_gt.h"
#include "gt/intel_engine_regs.h"
#include "gt/intel_gt_regs.h"
@@ -180,14 +181,16 @@ fw_domain_wait_ack_clear(const struct intel_uncore_forcewake_domain *d)
if (!wait_ack_clear(d, FORCEWAKE_KERNEL))
return;
- if (fw_ack(d) == ~0)
+ if (fw_ack(d) == ~0) {
drm_err(&d->uncore->i915->drm,
"%s: MMIO unreliable (forcewake register returns 0xFFFFFFFF)!\n",
intel_uncore_forcewake_domain_to_str(d->id));
- else
+ intel_gt_set_wedged_async(d->uncore->gt);
+ } else {
drm_err(&d->uncore->i915->drm,
"%s: timed out waiting for forcewake ack to clear.\n",
intel_uncore_forcewake_domain_to_str(d->id));
+ }
add_taint_for_CI(d->uncore->i915, TAINT_WARN); /* CI now unreliable */
}
diff --git a/drivers/gpu/drm/i915/selftests/intel_memory_region.c b/drivers/gpu/drm/i915/selftests/intel_memory_region.c
index ae6070b5bf07..f08f6674911e 100644
--- a/drivers/gpu/drm/i915/selftests/intel_memory_region.c
+++ b/drivers/gpu/drm/i915/selftests/intel_memory_region.c
@@ -517,7 +517,7 @@ static int igt_mock_max_segment(void *arg)
if (!IS_ALIGNED(daddr, ps)) {
pr_err("%s: Created an unaligned scatterlist entry, addr=%pa, ps=%u\n",
- __func__, &daddr, ps);
+ __func__, &daddr, ps);
err = -EINVAL;
goto out_close;
}
diff --git a/drivers/gpu/drm/i915/selftests/mock_gem_device.c b/drivers/gpu/drm/i915/selftests/mock_gem_device.c
index 0bd29846873b..91794ca17a58 100644
--- a/drivers/gpu/drm/i915/selftests/mock_gem_device.c
+++ b/drivers/gpu/drm/i915/selftests/mock_gem_device.c
@@ -172,7 +172,7 @@ struct drm_i915_private *mock_gem_device(void)
return NULL;
}
- pci_set_drvdata(pdev, i915);
+ pci_set_drvdata(pdev, &i915->drm);
/* Device parameters start as a copy of module parameters. */
i915_params_copy(&i915->params, &i915_modparams);
diff --git a/drivers/gpu/drm/imagination/pvr_device.h b/drivers/gpu/drm/imagination/pvr_device.h
index ecdd5767d8ef..b574e23d484b 100644
--- a/drivers/gpu/drm/imagination/pvr_device.h
+++ b/drivers/gpu/drm/imagination/pvr_device.h
@@ -668,7 +668,7 @@ pvr_ioctl_union_padding_check(void *instance, size_t union_offset,
void *padding_start = ((u8 *)instance) + union_offset + member_size;
size_t padding_size = union_size - member_size;
- return !memchr_inv(padding_start, 0, padding_size);
+ return mem_is_zero(padding_start, padding_size);
}
/**
diff --git a/drivers/gpu/drm/imagination/pvr_queue.c b/drivers/gpu/drm/imagination/pvr_queue.c
index 5ed9c98fb599..20cb46012082 100644
--- a/drivers/gpu/drm/imagination/pvr_queue.c
+++ b/drivers/gpu/drm/imagination/pvr_queue.c
@@ -782,7 +782,7 @@ static void pvr_queue_start(struct pvr_queue *queue)
}
}
- drm_sched_start(&queue->scheduler, true);
+ drm_sched_start(&queue->scheduler);
}
/**
@@ -842,7 +842,7 @@ pvr_queue_timedout_job(struct drm_sched_job *s_job)
}
mutex_unlock(&pvr_dev->queues.lock);
- drm_sched_start(sched, true);
+ drm_sched_start(sched);
return DRM_GPU_SCHED_STAT_NOMINAL;
}
diff --git a/drivers/gpu/drm/imx/ipuv3/parallel-display.c b/drivers/gpu/drm/imx/ipuv3/parallel-display.c
index 55dedd73f528..91d7808a2d8d 100644
--- a/drivers/gpu/drm/imx/ipuv3/parallel-display.c
+++ b/drivers/gpu/drm/imx/ipuv3/parallel-display.c
@@ -34,7 +34,7 @@ struct imx_parallel_display_encoder {
struct imx_parallel_display {
struct device *dev;
- void *edid;
+ const struct drm_edid *drm_edid;
u32 bus_format;
u32 bus_flags;
struct drm_display_mode mode;
@@ -62,9 +62,9 @@ static int imx_pd_connector_get_modes(struct drm_connector *connector)
if (num_modes > 0)
return num_modes;
- if (imxpd->edid) {
- drm_connector_update_edid_property(connector, imxpd->edid);
- num_modes = drm_add_edid_modes(connector, imxpd->edid);
+ if (imxpd->drm_edid) {
+ drm_edid_connector_update(connector, imxpd->drm_edid);
+ num_modes = drm_edid_connector_add_modes(connector);
}
if (np) {
@@ -331,7 +331,7 @@ static int imx_pd_probe(struct platform_device *pdev)
edidp = of_get_property(np, "edid", &edid_len);
if (edidp)
- imxpd->edid = devm_kmemdup(dev, edidp, edid_len, GFP_KERNEL);
+ imxpd->drm_edid = drm_edid_alloc(edidp, edid_len);
ret = of_property_read_string(np, "interface-pix-fmt", &fmt);
if (!ret) {
@@ -355,7 +355,11 @@ static int imx_pd_probe(struct platform_device *pdev)
static void imx_pd_remove(struct platform_device *pdev)
{
+ struct imx_parallel_display *imxpd = platform_get_drvdata(pdev);
+
component_del(&pdev->dev, &imx_pd_ops);
+
+ drm_edid_free(imxpd->drm_edid);
}
static const struct of_device_id imx_pd_dt_ids[] = {
diff --git a/drivers/gpu/drm/lima/lima_sched.c b/drivers/gpu/drm/lima/lima_sched.c
index bbf3f8feab94..1a944edb6ddc 100644
--- a/drivers/gpu/drm/lima/lima_sched.c
+++ b/drivers/gpu/drm/lima/lima_sched.c
@@ -463,7 +463,7 @@ static enum drm_gpu_sched_stat lima_sched_timedout_job(struct drm_sched_job *job
lima_pm_idle(ldev);
drm_sched_resubmit_jobs(&pipe->base);
- drm_sched_start(&pipe->base, true);
+ drm_sched_start(&pipe->base);
return DRM_GPU_SCHED_STAT_NOMINAL;
}
diff --git a/drivers/gpu/drm/loongson/lsdc_ttm.c b/drivers/gpu/drm/loongson/lsdc_ttm.c
index 465f622ac05d..2e42c6970c9f 100644
--- a/drivers/gpu/drm/loongson/lsdc_ttm.c
+++ b/drivers/gpu/drm/loongson/lsdc_ttm.c
@@ -341,16 +341,12 @@ void lsdc_bo_unpin(struct lsdc_bo *lbo)
void lsdc_bo_ref(struct lsdc_bo *lbo)
{
- struct ttm_buffer_object *tbo = &lbo->tbo;
-
- ttm_bo_get(tbo);
+ drm_gem_object_get(&lbo->tbo.base);
}
void lsdc_bo_unref(struct lsdc_bo *lbo)
{
- struct ttm_buffer_object *tbo = &lbo->tbo;
-
- ttm_bo_put(tbo);
+ drm_gem_object_put(&lbo->tbo.base);
}
int lsdc_bo_kmap(struct lsdc_bo *lbo)
diff --git a/drivers/gpu/drm/mediatek/mtk_crtc.c b/drivers/gpu/drm/mediatek/mtk_crtc.c
index 6f34f573e127..175b00e5a253 100644
--- a/drivers/gpu/drm/mediatek/mtk_crtc.c
+++ b/drivers/gpu/drm/mediatek/mtk_crtc.c
@@ -69,6 +69,8 @@ struct mtk_crtc {
/* lock for display hardware access */
struct mutex hw_lock;
bool config_updating;
+ /* lock for config_updating to cmd buffer */
+ spinlock_t config_lock;
};
struct mtk_crtc_state {
@@ -106,51 +108,18 @@ static void mtk_crtc_finish_page_flip(struct mtk_crtc *mtk_crtc)
static void mtk_drm_finish_page_flip(struct mtk_crtc *mtk_crtc)
{
+ unsigned long flags;
+
drm_crtc_handle_vblank(&mtk_crtc->base);
+
+ spin_lock_irqsave(&mtk_crtc->config_lock, flags);
if (!mtk_crtc->config_updating && mtk_crtc->pending_needs_vblank) {
mtk_crtc_finish_page_flip(mtk_crtc);
mtk_crtc->pending_needs_vblank = false;
}
+ spin_unlock_irqrestore(&mtk_crtc->config_lock, flags);
}
-#if IS_REACHABLE(CONFIG_MTK_CMDQ)
-static int mtk_drm_cmdq_pkt_create(struct cmdq_client *client, struct cmdq_pkt *pkt,
- size_t size)
-{
- struct device *dev;
- dma_addr_t dma_addr;
-
- pkt->va_base = kzalloc(size, GFP_KERNEL);
- if (!pkt->va_base)
- return -ENOMEM;
-
- pkt->buf_size = size;
- pkt->cl = (void *)client;
-
- dev = client->chan->mbox->dev;
- dma_addr = dma_map_single(dev, pkt->va_base, pkt->buf_size,
- DMA_TO_DEVICE);
- if (dma_mapping_error(dev, dma_addr)) {
- dev_err(dev, "dma map failed, size=%u\n", (u32)(u64)size);
- kfree(pkt->va_base);
- return -ENOMEM;
- }
-
- pkt->pa_base = dma_addr;
-
- return 0;
-}
-
-static void mtk_drm_cmdq_pkt_destroy(struct cmdq_pkt *pkt)
-{
- struct cmdq_client *client = (struct cmdq_client *)pkt->cl;
-
- dma_unmap_single(client->chan->mbox->dev, pkt->pa_base, pkt->buf_size,
- DMA_TO_DEVICE);
- kfree(pkt->va_base);
-}
-#endif
-
static void mtk_crtc_destroy(struct drm_crtc *crtc)
{
struct mtk_crtc *mtk_crtc = to_mtk_crtc(crtc);
@@ -158,7 +127,7 @@ static void mtk_crtc_destroy(struct drm_crtc *crtc)
mtk_mutex_put(mtk_crtc->mutex);
#if IS_REACHABLE(CONFIG_MTK_CMDQ)
- mtk_drm_cmdq_pkt_destroy(&mtk_crtc->cmdq_handle);
+ cmdq_pkt_destroy(&mtk_crtc->cmdq_client, &mtk_crtc->cmdq_handle);
if (mtk_crtc->cmdq_client.chan) {
mbox_free_channel(mtk_crtc->cmdq_client.chan);
@@ -308,12 +277,19 @@ static void ddp_cmdq_cb(struct mbox_client *cl, void *mssg)
struct mtk_crtc *mtk_crtc = container_of(cmdq_cl, struct mtk_crtc, cmdq_client);
struct mtk_crtc_state *state;
unsigned int i;
+ unsigned long flags;
if (data->sta < 0)
return;
state = to_mtk_crtc_state(mtk_crtc->base.state);
+ spin_lock_irqsave(&mtk_crtc->config_lock, flags);
+ if (mtk_crtc->config_updating) {
+ spin_unlock_irqrestore(&mtk_crtc->config_lock, flags);
+ goto ddp_cmdq_cb_out;
+ }
+
state->pending_config = false;
if (mtk_crtc->pending_planes) {
@@ -340,6 +316,10 @@ static void ddp_cmdq_cb(struct mbox_client *cl, void *mssg)
mtk_crtc->pending_async_planes = false;
}
+ spin_unlock_irqrestore(&mtk_crtc->config_lock, flags);
+
+ddp_cmdq_cb_out:
+
mtk_crtc->cmdq_vblank_cnt = 0;
wake_up(&mtk_crtc->cb_blocking_queue);
}
@@ -449,6 +429,7 @@ static void mtk_crtc_ddp_hw_fini(struct mtk_crtc *mtk_crtc)
{
struct drm_device *drm = mtk_crtc->base.dev;
struct drm_crtc *crtc = &mtk_crtc->base;
+ unsigned long flags;
int i;
for (i = 0; i < mtk_crtc->ddp_comp_nr; i++) {
@@ -480,10 +461,10 @@ static void mtk_crtc_ddp_hw_fini(struct mtk_crtc *mtk_crtc)
pm_runtime_put(drm->dev);
if (crtc->state->event && !crtc->state->active) {
- spin_lock_irq(&crtc->dev->event_lock);
+ spin_lock_irqsave(&crtc->dev->event_lock, flags);
drm_crtc_send_vblank_event(crtc, crtc->state->event);
crtc->state->event = NULL;
- spin_unlock_irq(&crtc->dev->event_lock);
+ spin_unlock_irqrestore(&crtc->dev->event_lock, flags);
}
}
@@ -569,9 +550,14 @@ static void mtk_crtc_update_config(struct mtk_crtc *mtk_crtc, bool needs_vblank)
struct mtk_drm_private *priv = crtc->dev->dev_private;
unsigned int pending_planes = 0, pending_async_planes = 0;
int i;
+ unsigned long flags;
mutex_lock(&mtk_crtc->hw_lock);
+
+ spin_lock_irqsave(&mtk_crtc->config_lock, flags);
mtk_crtc->config_updating = true;
+ spin_unlock_irqrestore(&mtk_crtc->config_lock, flags);
+
if (needs_vblank)
mtk_crtc->pending_needs_vblank = true;
@@ -607,7 +593,7 @@ static void mtk_crtc_update_config(struct mtk_crtc *mtk_crtc, bool needs_vblank)
cmdq_pkt_clear_event(cmdq_handle, mtk_crtc->cmdq_event);
cmdq_pkt_wfe(cmdq_handle, mtk_crtc->cmdq_event, false);
mtk_crtc_ddp_config(crtc, cmdq_handle);
- cmdq_pkt_finalize(cmdq_handle);
+ cmdq_pkt_eoc(cmdq_handle);
dma_sync_single_for_device(mtk_crtc->cmdq_client.chan->mbox->dev,
cmdq_handle->pa_base,
cmdq_handle->cmd_buf_size,
@@ -625,7 +611,10 @@ static void mtk_crtc_update_config(struct mtk_crtc *mtk_crtc, bool needs_vblank)
mbox_client_txdone(mtk_crtc->cmdq_client.chan, 0);
}
#endif
+ spin_lock_irqsave(&mtk_crtc->config_lock, flags);
mtk_crtc->config_updating = false;
+ spin_unlock_irqrestore(&mtk_crtc->config_lock, flags);
+
mutex_unlock(&mtk_crtc->hw_lock);
}
@@ -925,7 +914,7 @@ static int mtk_crtc_init_comp_planes(struct drm_device *drm_dev,
mtk_crtc_plane_type(mtk_crtc->layer_nr, num_planes),
mtk_ddp_comp_supported_rotations(comp),
mtk_ddp_comp_get_formats(comp),
- mtk_ddp_comp_get_num_formats(comp));
+ mtk_ddp_comp_get_num_formats(comp), i);
if (ret)
return ret;
@@ -1068,6 +1057,7 @@ int mtk_crtc_create(struct drm_device *drm_dev, const unsigned int *path,
drm_mode_crtc_set_gamma_size(&mtk_crtc->base, gamma_lut_size);
drm_crtc_enable_color_mgmt(&mtk_crtc->base, 0, has_ctm, gamma_lut_size);
mutex_init(&mtk_crtc->hw_lock);
+ spin_lock_init(&mtk_crtc->config_lock);
#if IS_REACHABLE(CONFIG_MTK_CMDQ)
i = priv->mbox_index++;
@@ -1094,9 +1084,9 @@ int mtk_crtc_create(struct drm_device *drm_dev, const unsigned int *path,
mbox_free_channel(mtk_crtc->cmdq_client.chan);
mtk_crtc->cmdq_client.chan = NULL;
} else {
- ret = mtk_drm_cmdq_pkt_create(&mtk_crtc->cmdq_client,
- &mtk_crtc->cmdq_handle,
- PAGE_SIZE);
+ ret = cmdq_pkt_create(&mtk_crtc->cmdq_client,
+ &mtk_crtc->cmdq_handle,
+ PAGE_SIZE);
if (ret) {
dev_dbg(dev, "mtk_crtc %d failed to create cmdq packet\n",
drm_crtc_index(&mtk_crtc->base));
diff --git a/drivers/gpu/drm/mediatek/mtk_disp_ovl.c b/drivers/gpu/drm/mediatek/mtk_disp_ovl.c
index 9d6d9fd8342e..89b439dcf3a6 100644
--- a/drivers/gpu/drm/mediatek/mtk_disp_ovl.c
+++ b/drivers/gpu/drm/mediatek/mtk_disp_ovl.c
@@ -56,8 +56,12 @@
#define GMC_THRESHOLD_HIGH ((1 << GMC_THRESHOLD_BITS) / 4)
#define GMC_THRESHOLD_LOW ((1 << GMC_THRESHOLD_BITS) / 8)
+#define OVL_CON_CLRFMT_MAN BIT(23)
#define OVL_CON_BYTE_SWAP BIT(24)
-#define OVL_CON_MTX_YUV_TO_RGB (6 << 16)
+
+/* OVL_CON_RGB_SWAP works only if OVL_CON_CLRFMT_MAN is enabled */
+#define OVL_CON_RGB_SWAP BIT(25)
+
#define OVL_CON_CLRFMT_RGB (1 << 12)
#define OVL_CON_CLRFMT_ARGB8888 (2 << 12)
#define OVL_CON_CLRFMT_RGBA8888 (3 << 12)
@@ -65,6 +69,11 @@
#define OVL_CON_CLRFMT_BGRA8888 (OVL_CON_CLRFMT_ARGB8888 | OVL_CON_BYTE_SWAP)
#define OVL_CON_CLRFMT_UYVY (4 << 12)
#define OVL_CON_CLRFMT_YUYV (5 << 12)
+#define OVL_CON_MTX_YUV_TO_RGB (6 << 16)
+#define OVL_CON_CLRFMT_PARGB8888 ((3 << 12) | OVL_CON_CLRFMT_MAN)
+#define OVL_CON_CLRFMT_PABGR8888 (OVL_CON_CLRFMT_PARGB8888 | OVL_CON_RGB_SWAP)
+#define OVL_CON_CLRFMT_PBGRA8888 (OVL_CON_CLRFMT_PARGB8888 | OVL_CON_BYTE_SWAP)
+#define OVL_CON_CLRFMT_PRGBA8888 (OVL_CON_CLRFMT_PABGR8888 | OVL_CON_BYTE_SWAP)
#define OVL_CON_CLRFMT_RGB565(ovl) ((ovl)->data->fmt_rgb565_is_0 ? \
0 : OVL_CON_CLRFMT_RGB)
#define OVL_CON_CLRFMT_RGB888(ovl) ((ovl)->data->fmt_rgb565_is_0 ? \
@@ -377,7 +386,8 @@ void mtk_ovl_layer_off(struct device *dev, unsigned int idx,
DISP_REG_OVL_RDMA_CTRL(idx));
}
-static unsigned int ovl_fmt_convert(struct mtk_disp_ovl *ovl, unsigned int fmt)
+static unsigned int ovl_fmt_convert(struct mtk_disp_ovl *ovl, unsigned int fmt,
+ unsigned int blend_mode)
{
/* The return value in switch "MEM_MODE_INPUT_FORMAT_XXX"
* is defined in mediatek HW data sheet.
@@ -398,22 +408,30 @@ static unsigned int ovl_fmt_convert(struct mtk_disp_ovl *ovl, unsigned int fmt)
case DRM_FORMAT_RGBA8888:
case DRM_FORMAT_RGBX1010102:
case DRM_FORMAT_RGBA1010102:
- return OVL_CON_CLRFMT_RGBA8888;
+ return blend_mode == DRM_MODE_BLEND_COVERAGE ?
+ OVL_CON_CLRFMT_RGBA8888 :
+ OVL_CON_CLRFMT_PRGBA8888;
case DRM_FORMAT_BGRX8888:
case DRM_FORMAT_BGRA8888:
case DRM_FORMAT_BGRX1010102:
case DRM_FORMAT_BGRA1010102:
- return OVL_CON_CLRFMT_BGRA8888;
+ return blend_mode == DRM_MODE_BLEND_COVERAGE ?
+ OVL_CON_CLRFMT_BGRA8888 :
+ OVL_CON_CLRFMT_PBGRA8888;
case DRM_FORMAT_XRGB8888:
case DRM_FORMAT_ARGB8888:
case DRM_FORMAT_XRGB2101010:
case DRM_FORMAT_ARGB2101010:
- return OVL_CON_CLRFMT_ARGB8888;
+ return blend_mode == DRM_MODE_BLEND_COVERAGE ?
+ OVL_CON_CLRFMT_ARGB8888 :
+ OVL_CON_CLRFMT_PARGB8888;
case DRM_FORMAT_XBGR8888:
case DRM_FORMAT_ABGR8888:
case DRM_FORMAT_XBGR2101010:
case DRM_FORMAT_ABGR2101010:
- return OVL_CON_CLRFMT_ABGR8888;
+ return blend_mode == DRM_MODE_BLEND_COVERAGE ?
+ OVL_CON_CLRFMT_ABGR8888 :
+ OVL_CON_CLRFMT_PABGR8888;
case DRM_FORMAT_UYVY:
return OVL_CON_CLRFMT_UYVY | OVL_CON_MTX_YUV_TO_RGB;
case DRM_FORMAT_YUYV:
@@ -434,6 +452,7 @@ void mtk_ovl_layer_config(struct device *dev, unsigned int idx,
unsigned int fmt = pending->format;
unsigned int offset = (pending->y << 16) | pending->x;
unsigned int src_size = (pending->height << 16) | pending->width;
+ unsigned int blend_mode = state->base.pixel_blend_mode;
unsigned int ignore_pixel_alpha = 0;
unsigned int con;
bool is_afbc = pending->modifier != DRM_FORMAT_MOD_LINEAR;
@@ -452,7 +471,7 @@ void mtk_ovl_layer_config(struct device *dev, unsigned int idx,
return;
}
- con = ovl_fmt_convert(ovl, fmt);
+ con = ovl_fmt_convert(ovl, fmt, blend_mode);
if (state->base.fb) {
con |= OVL_CON_AEN;
con |= state->base.alpha & OVL_CON_ALPHA;
@@ -463,7 +482,8 @@ void mtk_ovl_layer_config(struct device *dev, unsigned int idx,
* For RGB888 related formats, whether CONST_BLD is enabled or not won't
* affect the result. Therefore we use !has_alpha as the condition.
*/
- if (state->base.fb && !state->base.fb->format->has_alpha)
+ if ((state->base.fb && !state->base.fb->format->has_alpha) ||
+ blend_mode == DRM_MODE_BLEND_PIXEL_NONE)
ignore_pixel_alpha = OVL_CONST_BLEND;
if (pending->rotation & DRM_MODE_REFLECT_Y) {
diff --git a/drivers/gpu/drm/mediatek/mtk_disp_ovl_adaptor.c b/drivers/gpu/drm/mediatek/mtk_disp_ovl_adaptor.c
index 1a2a73757370..c6768210b08b 100644
--- a/drivers/gpu/drm/mediatek/mtk_disp_ovl_adaptor.c
+++ b/drivers/gpu/drm/mediatek/mtk_disp_ovl_adaptor.c
@@ -17,7 +17,6 @@
#include <linux/soc/mediatek/mtk-mmsys.h>
#include <linux/soc/mediatek/mtk-mutex.h>
-#include "mtk_crtc.h"
#include "mtk_ddp_comp.h"
#include "mtk_disp_drv.h"
#include "mtk_drm_drv.h"
@@ -494,12 +493,12 @@ static int compare_of(struct device *dev, void *data)
static int ovl_adaptor_comp_init(struct device *dev, struct component_match **match)
{
struct mtk_disp_ovl_adaptor *priv = dev_get_drvdata(dev);
- struct device_node *node, *parent;
+ struct device_node *parent;
struct platform_device *comp_pdev;
parent = dev->parent->parent->of_node->parent;
- for_each_child_of_node(parent, node) {
+ for_each_child_of_node_scoped(parent, node) {
const struct of_device_id *of_id;
enum mtk_ovl_adaptor_comp_type type;
int id;
diff --git a/drivers/gpu/drm/mediatek/mtk_disp_rdma.c b/drivers/gpu/drm/mediatek/mtk_disp_rdma.c
index 634bbba5d43f..07243f372260 100644
--- a/drivers/gpu/drm/mediatek/mtk_disp_rdma.c
+++ b/drivers/gpu/drm/mediatek/mtk_disp_rdma.c
@@ -341,14 +341,11 @@ static int mtk_disp_rdma_probe(struct platform_device *pdev)
dev_dbg(dev, "get mediatek,gce-client-reg fail!\n");
#endif
- if (of_find_property(dev->of_node, "mediatek,rdma-fifo-size", &ret)) {
- ret = of_property_read_u32(dev->of_node,
- "mediatek,rdma-fifo-size",
- &priv->fifo_size);
- if (ret)
- return dev_err_probe(dev, ret,
- "Failed to get rdma fifo size\n");
- }
+ ret = of_property_read_u32(dev->of_node,
+ "mediatek,rdma-fifo-size",
+ &priv->fifo_size);
+ if (ret && (ret != -EINVAL))
+ return dev_err_probe(dev, ret, "Failed to get rdma fifo size\n");
/* Disable and clear pending interrupts */
writel(0x0, priv->regs + DISP_REG_RDMA_INT_ENABLE);
diff --git a/drivers/gpu/drm/mediatek/mtk_drm_drv.c b/drivers/gpu/drm/mediatek/mtk_drm_drv.c
index 77b50c56c124..3e807195a0d0 100644
--- a/drivers/gpu/drm/mediatek/mtk_drm_drv.c
+++ b/drivers/gpu/drm/mediatek/mtk_drm_drv.c
@@ -559,11 +559,7 @@ static int mtk_drm_kms_init(struct drm_device *drm)
* Configure the DMA segment size to make sure we get contiguous IOVA
* when importing PRIME buffers.
*/
- ret = dma_set_max_seg_size(dma_dev, UINT_MAX);
- if (ret) {
- dev_err(dma_dev, "Failed to set DMA segment size\n");
- goto err_component_unbind;
- }
+ dma_set_max_seg_size(dma_dev, UINT_MAX);
ret = drm_vblank_init(drm, MAX_CRTC);
if (ret < 0)
diff --git a/drivers/gpu/drm/mediatek/mtk_dsi.c b/drivers/gpu/drm/mediatek/mtk_dsi.c
index b6e3c011a12d..eeec641cab60 100644
--- a/drivers/gpu/drm/mediatek/mtk_dsi.c
+++ b/drivers/gpu/drm/mediatek/mtk_dsi.c
@@ -88,12 +88,15 @@
#define DSI_HSA_WC 0x50
#define DSI_HBP_WC 0x54
#define DSI_HFP_WC 0x58
+#define HFP_HS_VB_PS_WC GENMASK(30, 16)
+#define HFP_HS_EN BIT(31)
#define DSI_CMDQ_SIZE 0x60
#define CMDQ_SIZE 0x3f
#define CMDQ_SIZE_SEL BIT(15)
#define DSI_HSTX_CKL_WC 0x64
+#define HSTX_CKL_WC GENMASK(15, 2)
#define DSI_RX_DATA0 0x74
#define DSI_RX_DATA1 0x78
@@ -187,6 +190,7 @@ struct mtk_dsi_driver_data {
bool has_shadow_ctl;
bool has_size_ctl;
bool cmdq_long_packet_ctl;
+ bool support_per_frame_lp;
};
struct mtk_dsi {
@@ -426,7 +430,75 @@ static void mtk_dsi_ps_control(struct mtk_dsi *dsi, bool config_vact)
writel(ps_val, dsi->regs + DSI_PSCTRL);
}
-static void mtk_dsi_config_vdo_timing(struct mtk_dsi *dsi)
+static void mtk_dsi_config_vdo_timing_per_frame_lp(struct mtk_dsi *dsi)
+{
+ u32 horizontal_sync_active_byte;
+ u32 horizontal_backporch_byte;
+ u32 horizontal_frontporch_byte;
+ u32 hfp_byte_adjust, v_active_adjust;
+ u32 cklp_wc_min_adjust, cklp_wc_max_adjust;
+ u32 dsi_tmp_buf_bpp;
+ unsigned int da_hs_trail;
+ unsigned int ps_wc, hs_vb_ps_wc;
+ u32 v_active_roundup, hstx_cklp_wc;
+ u32 hstx_cklp_wc_max, hstx_cklp_wc_min;
+ struct videomode *vm = &dsi->vm;
+
+ if (dsi->format == MIPI_DSI_FMT_RGB565)
+ dsi_tmp_buf_bpp = 2;
+ else
+ dsi_tmp_buf_bpp = 3;
+
+ da_hs_trail = dsi->phy_timing.da_hs_trail;
+ ps_wc = vm->hactive * dsi_tmp_buf_bpp;
+
+ if (dsi->mode_flags & MIPI_DSI_MODE_VIDEO_SYNC_PULSE) {
+ horizontal_sync_active_byte =
+ vm->hsync_len * dsi_tmp_buf_bpp - 10;
+ horizontal_backporch_byte =
+ vm->hback_porch * dsi_tmp_buf_bpp - 10;
+ hfp_byte_adjust = 12;
+ v_active_adjust = 32 + horizontal_sync_active_byte;
+ cklp_wc_min_adjust = 12 + 2 + 4 + horizontal_sync_active_byte;
+ cklp_wc_max_adjust = 20 + 6 + 4 + horizontal_sync_active_byte;
+ } else {
+ horizontal_sync_active_byte = vm->hsync_len * dsi_tmp_buf_bpp - 4;
+ horizontal_backporch_byte = (vm->hback_porch + vm->hsync_len) *
+ dsi_tmp_buf_bpp - 10;
+ cklp_wc_min_adjust = 4;
+ cklp_wc_max_adjust = 12 + 4 + 4;
+ if (dsi->mode_flags & MIPI_DSI_MODE_VIDEO_BURST) {
+ hfp_byte_adjust = 18;
+ v_active_adjust = 28;
+ } else {
+ hfp_byte_adjust = 12;
+ v_active_adjust = 22;
+ }
+ }
+ horizontal_frontporch_byte = vm->hfront_porch * dsi_tmp_buf_bpp - hfp_byte_adjust;
+ v_active_roundup = (v_active_adjust + horizontal_backporch_byte + ps_wc +
+ horizontal_frontporch_byte) % dsi->lanes;
+ if (v_active_roundup)
+ horizontal_backporch_byte += dsi->lanes - v_active_roundup;
+ hstx_cklp_wc_min = (DIV_ROUND_UP(cklp_wc_min_adjust, dsi->lanes) + da_hs_trail + 1)
+ * dsi->lanes / 6 - 1;
+ hstx_cklp_wc_max = (DIV_ROUND_UP((cklp_wc_max_adjust + horizontal_backporch_byte +
+ ps_wc), dsi->lanes) + da_hs_trail + 1) * dsi->lanes / 6 - 1;
+
+ hstx_cklp_wc = FIELD_PREP(HSTX_CKL_WC, (hstx_cklp_wc_min + hstx_cklp_wc_max) / 2);
+ writel(hstx_cklp_wc, dsi->regs + DSI_HSTX_CKL_WC);
+
+ hs_vb_ps_wc = ps_wc - (dsi->phy_timing.lpx + dsi->phy_timing.da_hs_exit +
+ dsi->phy_timing.da_hs_prepare + dsi->phy_timing.da_hs_zero + 2) * dsi->lanes;
+ horizontal_frontporch_byte |= FIELD_PREP(HFP_HS_EN, 1) |
+ FIELD_PREP(HFP_HS_VB_PS_WC, hs_vb_ps_wc);
+
+ writel(horizontal_sync_active_byte, dsi->regs + DSI_HSA_WC);
+ writel(horizontal_backporch_byte, dsi->regs + DSI_HBP_WC);
+ writel(horizontal_frontporch_byte, dsi->regs + DSI_HFP_WC);
+}
+
+static void mtk_dsi_config_vdo_timing_per_line_lp(struct mtk_dsi *dsi)
{
u32 horizontal_sync_active_byte;
u32 horizontal_backporch_byte;
@@ -436,7 +508,6 @@ static void mtk_dsi_config_vdo_timing(struct mtk_dsi *dsi)
u32 dsi_tmp_buf_bpp, data_phy_cycles;
u32 delta;
struct mtk_phy_timing *timing = &dsi->phy_timing;
-
struct videomode *vm = &dsi->vm;
if (dsi->format == MIPI_DSI_FMT_RGB565)
@@ -444,16 +515,6 @@ static void mtk_dsi_config_vdo_timing(struct mtk_dsi *dsi)
else
dsi_tmp_buf_bpp = 3;
- writel(vm->vsync_len, dsi->regs + DSI_VSA_NL);
- writel(vm->vback_porch, dsi->regs + DSI_VBP_NL);
- writel(vm->vfront_porch, dsi->regs + DSI_VFP_NL);
- writel(vm->vactive, dsi->regs + DSI_VACT_NL);
-
- if (dsi->driver_data->has_size_ctl)
- writel(FIELD_PREP(DSI_HEIGHT, vm->vactive) |
- FIELD_PREP(DSI_WIDTH, vm->hactive),
- dsi->regs + DSI_SIZE_CON);
-
horizontal_sync_active_byte = (vm->hsync_len * dsi_tmp_buf_bpp - 10);
if (dsi->mode_flags & MIPI_DSI_MODE_VIDEO_SYNC_PULSE)
@@ -499,6 +560,26 @@ static void mtk_dsi_config_vdo_timing(struct mtk_dsi *dsi)
writel(horizontal_sync_active_byte, dsi->regs + DSI_HSA_WC);
writel(horizontal_backporch_byte, dsi->regs + DSI_HBP_WC);
writel(horizontal_frontporch_byte, dsi->regs + DSI_HFP_WC);
+}
+
+static void mtk_dsi_config_vdo_timing(struct mtk_dsi *dsi)
+{
+ struct videomode *vm = &dsi->vm;
+
+ writel(vm->vsync_len, dsi->regs + DSI_VSA_NL);
+ writel(vm->vback_porch, dsi->regs + DSI_VBP_NL);
+ writel(vm->vfront_porch, dsi->regs + DSI_VFP_NL);
+ writel(vm->vactive, dsi->regs + DSI_VACT_NL);
+
+ if (dsi->driver_data->has_size_ctl)
+ writel(FIELD_PREP(DSI_HEIGHT, vm->vactive) |
+ FIELD_PREP(DSI_WIDTH, vm->hactive),
+ dsi->regs + DSI_SIZE_CON);
+
+ if (dsi->driver_data->support_per_frame_lp)
+ mtk_dsi_config_vdo_timing_per_frame_lp(dsi);
+ else
+ mtk_dsi_config_vdo_timing_per_line_lp(dsi);
mtk_dsi_ps_control(dsi, false);
}
@@ -1197,6 +1278,7 @@ static const struct mtk_dsi_driver_data mt8188_dsi_driver_data = {
.has_shadow_ctl = true,
.has_size_ctl = true,
.cmdq_long_packet_ctl = true,
+ .support_per_frame_lp = true,
};
static const struct of_device_id mtk_dsi_of_match[] = {
diff --git a/drivers/gpu/drm/mediatek/mtk_ethdr.c b/drivers/gpu/drm/mediatek/mtk_ethdr.c
index 9dfd13d32dfa..d1d9cf8b10e1 100644
--- a/drivers/gpu/drm/mediatek/mtk_ethdr.c
+++ b/drivers/gpu/drm/mediatek/mtk_ethdr.c
@@ -3,6 +3,7 @@
* Copyright (c) 2021 MediaTek Inc.
*/
+#include <drm/drm_blend.h>
#include <drm/drm_fourcc.h>
#include <drm/drm_framebuffer.h>
#include <linux/clk.h>
@@ -35,6 +36,7 @@
#define MIX_SRC_L0_EN BIT(0)
#define MIX_L_SRC_CON(n) (0x28 + 0x18 * (n))
#define NON_PREMULTI_SOURCE (2 << 12)
+#define PREMULTI_SOURCE (3 << 12)
#define MIX_L_SRC_SIZE(n) (0x30 + 0x18 * (n))
#define MIX_L_SRC_OFFSET(n) (0x34 + 0x18 * (n))
#define MIX_FUNC_DCM0 0x120
@@ -175,7 +177,13 @@ void mtk_ethdr_layer_config(struct device *dev, unsigned int idx,
alpha_con |= state->base.alpha & MIXER_ALPHA;
}
- if (state->base.fb && !state->base.fb->format->has_alpha) {
+ if (state->base.pixel_blend_mode == DRM_MODE_BLEND_PREMULTI)
+ alpha_con |= PREMULTI_SOURCE;
+ else
+ alpha_con |= NON_PREMULTI_SOURCE;
+
+ if ((state->base.fb && !state->base.fb->format->has_alpha) ||
+ state->base.pixel_blend_mode == DRM_MODE_BLEND_PIXEL_NONE) {
/*
* Mixer doesn't support CONST_BLD mode,
* use a trick to make the output equivalent
@@ -191,8 +199,7 @@ void mtk_ethdr_layer_config(struct device *dev, unsigned int idx,
mtk_ddp_write(cmdq_pkt, pending->height << 16 | align_width, &mixer->cmdq_base,
mixer->regs, MIX_L_SRC_SIZE(idx));
mtk_ddp_write(cmdq_pkt, offset, &mixer->cmdq_base, mixer->regs, MIX_L_SRC_OFFSET(idx));
- mtk_ddp_write_mask(cmdq_pkt, alpha_con, &mixer->cmdq_base, mixer->regs, MIX_L_SRC_CON(idx),
- 0x1ff);
+ mtk_ddp_write(cmdq_pkt, alpha_con, &mixer->cmdq_base, mixer->regs, MIX_L_SRC_CON(idx));
mtk_ddp_write_mask(cmdq_pkt, BIT(idx), &mixer->cmdq_base, mixer->regs, MIX_SRC_CON,
BIT(idx));
}
diff --git a/drivers/gpu/drm/mediatek/mtk_plane.c b/drivers/gpu/drm/mediatek/mtk_plane.c
index 1723d4333f37..7d2cb4e0fafa 100644
--- a/drivers/gpu/drm/mediatek/mtk_plane.c
+++ b/drivers/gpu/drm/mediatek/mtk_plane.c
@@ -321,7 +321,7 @@ static const struct drm_plane_helper_funcs mtk_plane_helper_funcs = {
int mtk_plane_init(struct drm_device *dev, struct drm_plane *plane,
unsigned long possible_crtcs, enum drm_plane_type type,
unsigned int supported_rotations, const u32 *formats,
- size_t num_formats)
+ size_t num_formats, unsigned int plane_idx)
{
int err;
@@ -338,6 +338,22 @@ int mtk_plane_init(struct drm_device *dev, struct drm_plane *plane,
return err;
}
+ /*
+ * The hardware does not support repositioning planes by muxing: their
+ * Z-position is infact fixed and the only way to change the actual
+ * order is to swap the contents of the entire register set of one
+ * overlay with another, which may be more expensive than desired.
+ *
+ * With no repositioning, the caller of this function guarantees that
+ * the plane_idx is correct. This means that, for example, the PRIMARY
+ * plane fed to this function will always have plane_idx zero.
+ */
+ err = drm_plane_create_zpos_immutable_property(plane, plane_idx);
+ if (err) {
+ DRM_ERROR("Failed to create zpos property for plane %u\n", plane_idx);
+ return err;
+ }
+
if (supported_rotations) {
err = drm_plane_create_rotation_property(plane,
DRM_MODE_ROTATE_0,
@@ -346,6 +362,17 @@ int mtk_plane_init(struct drm_device *dev, struct drm_plane *plane,
DRM_INFO("Create rotation property failed\n");
}
+ err = drm_plane_create_alpha_property(plane);
+ if (err)
+ DRM_ERROR("failed to create property: alpha\n");
+
+ err = drm_plane_create_blend_mode_property(plane,
+ BIT(DRM_MODE_BLEND_PREMULTI) |
+ BIT(DRM_MODE_BLEND_COVERAGE) |
+ BIT(DRM_MODE_BLEND_PIXEL_NONE));
+ if (err)
+ DRM_ERROR("failed to create property: blend_mode\n");
+
drm_plane_helper_add(plane, &mtk_plane_helper_funcs);
return 0;
diff --git a/drivers/gpu/drm/mediatek/mtk_plane.h b/drivers/gpu/drm/mediatek/mtk_plane.h
index 231bb7aac947..5b177eac67b7 100644
--- a/drivers/gpu/drm/mediatek/mtk_plane.h
+++ b/drivers/gpu/drm/mediatek/mtk_plane.h
@@ -49,6 +49,5 @@ to_mtk_plane_state(struct drm_plane_state *state)
int mtk_plane_init(struct drm_device *dev, struct drm_plane *plane,
unsigned long possible_crtcs, enum drm_plane_type type,
unsigned int supported_rotations, const u32 *formats,
- size_t num_formats);
-
+ size_t num_formats, unsigned int plane_idx);
#endif
diff --git a/drivers/gpu/drm/mgag200/Makefile b/drivers/gpu/drm/mgag200/Makefile
index d1b25f9f6586..5a02203fad12 100644
--- a/drivers/gpu/drm/mgag200/Makefile
+++ b/drivers/gpu/drm/mgag200/Makefile
@@ -12,6 +12,7 @@ mgag200-y := \
mgag200_g200se.o \
mgag200_g200wb.o \
mgag200_mode.o \
+ mgag200_vga_bmc.o \
mgag200_vga.o
obj-$(CONFIG_DRM_MGAG200) += mgag200.o
diff --git a/drivers/gpu/drm/mgag200/mgag200_bmc.c b/drivers/gpu/drm/mgag200/mgag200_bmc.c
index 23ef85aa7e37..a689c71ff165 100644
--- a/drivers/gpu/drm/mgag200/mgag200_bmc.c
+++ b/drivers/gpu/drm/mgag200/mgag200_bmc.c
@@ -9,12 +9,7 @@
#include "mgag200_drv.h"
-static struct mgag200_bmc_connector *to_mgag200_bmc_connector(struct drm_connector *connector)
-{
- return container_of(connector, struct mgag200_bmc_connector, base);
-}
-
-void mgag200_bmc_disable_vidrst(struct mga_device *mdev)
+void mgag200_bmc_stop_scanout(struct mga_device *mdev)
{
u8 tmp;
int iter_max;
@@ -73,15 +68,10 @@ void mgag200_bmc_disable_vidrst(struct mga_device *mdev)
}
}
-void mgag200_bmc_enable_vidrst(struct mga_device *mdev)
+void mgag200_bmc_start_scanout(struct mga_device *mdev)
{
u8 tmp;
- /* Ensure that the vrsten and hrsten are set */
- WREG8(MGAREG_CRTCEXT_INDEX, 1);
- tmp = RREG8(MGAREG_CRTCEXT_DATA);
- WREG8(MGAREG_CRTCEXT_DATA, tmp | 0x88);
-
/* Assert rstlvl2 */
WREG8(DAC_INDEX, MGA1064_REMHEADCTL2);
tmp = RREG8(DAC_DATA);
@@ -107,100 +97,3 @@ void mgag200_bmc_enable_vidrst(struct mga_device *mdev)
tmp &= ~0x10;
WREG_DAC(MGA1064_GEN_IO_DATA, tmp);
}
-
-static const struct drm_encoder_funcs mgag200_bmc_encoder_funcs = {
- .destroy = drm_encoder_cleanup,
-};
-
-static int mgag200_bmc_connector_helper_detect_ctx(struct drm_connector *connector,
- struct drm_modeset_acquire_ctx *ctx,
- bool force)
-{
- struct mgag200_bmc_connector *bmc_connector = to_mgag200_bmc_connector(connector);
- struct drm_connector *physical_connector = bmc_connector->physical_connector;
-
- /*
- * Most user-space compositors cannot handle more than one connected
- * connector per CRTC. Hence, we only mark the BMC as connected if the
- * physical connector is disconnected. If the physical connector's status
- * is connected or unknown, the BMC remains disconnected. This has no
- * effect on the output of the BMC.
- *
- * FIXME: Remove this logic once user-space compositors can handle more
- * than one connector per CRTC. The BMC should always be connected.
- */
-
- if (physical_connector && physical_connector->status == connector_status_disconnected)
- return connector_status_connected;
-
- return connector_status_disconnected;
-}
-
-static int mgag200_bmc_connector_helper_get_modes(struct drm_connector *connector)
-{
- struct drm_device *dev = connector->dev;
- struct mga_device *mdev = to_mga_device(dev);
- const struct mgag200_device_info *minfo = mdev->info;
-
- return drm_add_modes_noedid(connector, minfo->max_hdisplay, minfo->max_vdisplay);
-}
-
-static const struct drm_connector_helper_funcs mgag200_bmc_connector_helper_funcs = {
- .get_modes = mgag200_bmc_connector_helper_get_modes,
- .detect_ctx = mgag200_bmc_connector_helper_detect_ctx,
-};
-
-static const struct drm_connector_funcs mgag200_bmc_connector_funcs = {
- .reset = drm_atomic_helper_connector_reset,
- .fill_modes = drm_helper_probe_single_connector_modes,
- .destroy = drm_connector_cleanup,
- .atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state,
- .atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
-};
-
-static int mgag200_bmc_connector_init(struct drm_device *dev,
- struct mgag200_bmc_connector *bmc_connector,
- struct drm_connector *physical_connector)
-{
- struct drm_connector *connector = &bmc_connector->base;
- int ret;
-
- ret = drm_connector_init(dev, connector, &mgag200_bmc_connector_funcs,
- DRM_MODE_CONNECTOR_VIRTUAL);
- if (ret)
- return ret;
- drm_connector_helper_add(connector, &mgag200_bmc_connector_helper_funcs);
-
- bmc_connector->physical_connector = physical_connector;
-
- return 0;
-}
-
-int mgag200_bmc_output_init(struct mga_device *mdev, struct drm_connector *physical_connector)
-{
- struct drm_device *dev = &mdev->base;
- struct drm_crtc *crtc = &mdev->crtc;
- struct drm_encoder *encoder;
- struct mgag200_bmc_connector *bmc_connector;
- struct drm_connector *connector;
- int ret;
-
- encoder = &mdev->output.bmc.encoder;
- ret = drm_encoder_init(dev, encoder, &mgag200_bmc_encoder_funcs,
- DRM_MODE_ENCODER_VIRTUAL, NULL);
- if (ret)
- return ret;
- encoder->possible_crtcs = drm_crtc_mask(crtc);
-
- bmc_connector = &mdev->output.bmc.bmc_connector;
- ret = mgag200_bmc_connector_init(dev, bmc_connector, physical_connector);
- if (ret)
- return ret;
- connector = &bmc_connector->base;
-
- ret = drm_connector_attach_encoder(connector, encoder);
- if (ret)
- return ret;
-
- return 0;
-}
diff --git a/drivers/gpu/drm/mgag200/mgag200_drv.c b/drivers/gpu/drm/mgag200/mgag200_drv.c
index 62080cf0f2da..6623ee4e3277 100644
--- a/drivers/gpu/drm/mgag200/mgag200_drv.c
+++ b/drivers/gpu/drm/mgag200/mgag200_drv.c
@@ -18,6 +18,7 @@
#include <drm/drm_managed.h>
#include <drm/drm_module.h>
#include <drm/drm_pciids.h>
+#include <drm/drm_vblank.h>
#include "mgag200_drv.h"
@@ -84,6 +85,34 @@ resource_size_t mgag200_probe_vram(void __iomem *mem, resource_size_t size)
return offset - 65536;
}
+static irqreturn_t mgag200_irq_handler(int irq, void *arg)
+{
+ struct drm_device *dev = arg;
+ struct mga_device *mdev = to_mga_device(dev);
+ struct drm_crtc *crtc;
+ u32 status, ien;
+
+ status = RREG32(MGAREG_STATUS);
+
+ if (status & MGAREG_STATUS_VLINEPEN) {
+ ien = RREG32(MGAREG_IEN);
+ if (!(ien & MGAREG_IEN_VLINEIEN))
+ goto out;
+
+ crtc = drm_crtc_from_index(dev, 0);
+ if (WARN_ON_ONCE(!crtc))
+ goto out;
+ drm_crtc_handle_vblank(crtc);
+
+ WREG32(MGAREG_ICLEAR, MGAREG_ICLEAR_VLINEICLR);
+
+ return IRQ_HANDLED;
+ }
+
+out:
+ return IRQ_NONE;
+}
+
/*
* DRM driver
*/
@@ -167,6 +196,7 @@ int mgag200_device_init(struct mga_device *mdev,
const struct mgag200_device_funcs *funcs)
{
struct drm_device *dev = &mdev->base;
+ struct pci_dev *pdev = to_pci_dev(dev->dev);
u8 crtcext3, misc;
int ret;
@@ -192,6 +222,16 @@ int mgag200_device_init(struct mga_device *mdev,
mutex_unlock(&mdev->rmmio_lock);
+ WREG32(MGAREG_IEN, 0);
+ WREG32(MGAREG_ICLEAR, MGAREG_ICLEAR_VLINEICLR);
+
+ ret = devm_request_irq(&pdev->dev, pdev->irq, mgag200_irq_handler, IRQF_SHARED,
+ dev->driver->name, dev);
+ if (ret) {
+ drm_err(dev, "Failed to acquire interrupt, error %d\n", ret);
+ return ret;
+ }
+
return 0;
}
diff --git a/drivers/gpu/drm/mgag200/mgag200_drv.h b/drivers/gpu/drm/mgag200/mgag200_drv.h
index 7f7dfbd0f013..4760ba92871b 100644
--- a/drivers/gpu/drm/mgag200/mgag200_drv.h
+++ b/drivers/gpu/drm/mgag200/mgag200_drv.h
@@ -179,6 +179,8 @@ struct mgag200_crtc_state {
const struct drm_format_info *format;
struct mgag200_pll_values pixpllc;
+
+ bool set_vidrst;
};
static inline struct mgag200_crtc_state *to_mgag200_crtc_state(struct drm_crtc_state *base)
@@ -186,11 +188,6 @@ static inline struct mgag200_crtc_state *to_mgag200_crtc_state(struct drm_crtc_s
return container_of(base, struct mgag200_crtc_state, base);
}
-struct mgag200_bmc_connector {
- struct drm_connector base;
- struct drm_connector *physical_connector;
-};
-
enum mga_type {
G200_PCI,
G200_AGP,
@@ -214,8 +211,8 @@ struct mgag200_device_info {
*/
unsigned long max_mem_bandwidth;
- /* HW has external source (e.g., BMC) to synchronize with */
- bool has_vidrst:1;
+ /* Synchronize scanout with BMC */
+ bool sync_bmc:1;
struct {
unsigned data_bit:3;
@@ -230,13 +227,13 @@ struct mgag200_device_info {
};
#define MGAG200_DEVICE_INFO_INIT(_max_hdisplay, _max_vdisplay, _max_mem_bandwidth, \
- _has_vidrst, _i2c_data_bit, _i2c_clock_bit, \
+ _sync_bmc, _i2c_data_bit, _i2c_clock_bit, \
_bug_no_startadd) \
{ \
.max_hdisplay = (_max_hdisplay), \
.max_vdisplay = (_max_vdisplay), \
.max_mem_bandwidth = (_max_mem_bandwidth), \
- .has_vidrst = (_has_vidrst), \
+ .sync_bmc = (_sync_bmc), \
.i2c = { \
.data_bit = (_i2c_data_bit), \
.clock_bit = (_i2c_clock_bit), \
@@ -246,18 +243,6 @@ struct mgag200_device_info {
struct mgag200_device_funcs {
/*
- * Disables an external reset source (i.e., BMC) before programming
- * a new display mode.
- */
- void (*disable_vidrst)(struct mga_device *mdev);
-
- /*
- * Enables an external reset source (i.e., BMC) after programming
- * a new display mode.
- */
- void (*enable_vidrst)(struct mga_device *mdev);
-
- /*
* Validate that the given state can be programmed into PIXPLLC. On
* success, the calculated parameters should be stored in the CRTC's
* state in struct @mgag200_crtc_state.pixpllc.
@@ -293,10 +278,6 @@ struct mga_device {
struct drm_encoder encoder;
struct drm_connector connector;
} vga;
- struct {
- struct drm_encoder encoder;
- struct mgag200_bmc_connector bmc_connector;
- } bmc;
} output;
};
@@ -410,17 +391,24 @@ int mgag200_crtc_helper_atomic_check(struct drm_crtc *crtc, struct drm_atomic_st
void mgag200_crtc_helper_atomic_flush(struct drm_crtc *crtc, struct drm_atomic_state *old_state);
void mgag200_crtc_helper_atomic_enable(struct drm_crtc *crtc, struct drm_atomic_state *old_state);
void mgag200_crtc_helper_atomic_disable(struct drm_crtc *crtc, struct drm_atomic_state *old_state);
+bool mgag200_crtc_helper_get_scanout_position(struct drm_crtc *crtc, bool in_vblank_irq,
+ int *vpos, int *hpos,
+ ktime_t *stime, ktime_t *etime,
+ const struct drm_display_mode *mode);
#define MGAG200_CRTC_HELPER_FUNCS \
.mode_valid = mgag200_crtc_helper_mode_valid, \
.atomic_check = mgag200_crtc_helper_atomic_check, \
.atomic_flush = mgag200_crtc_helper_atomic_flush, \
.atomic_enable = mgag200_crtc_helper_atomic_enable, \
- .atomic_disable = mgag200_crtc_helper_atomic_disable
+ .atomic_disable = mgag200_crtc_helper_atomic_disable, \
+ .get_scanout_position = mgag200_crtc_helper_get_scanout_position
void mgag200_crtc_reset(struct drm_crtc *crtc);
struct drm_crtc_state *mgag200_crtc_atomic_duplicate_state(struct drm_crtc *crtc);
void mgag200_crtc_atomic_destroy_state(struct drm_crtc *crtc, struct drm_crtc_state *crtc_state);
+int mgag200_crtc_enable_vblank(struct drm_crtc *crtc);
+void mgag200_crtc_disable_vblank(struct drm_crtc *crtc);
#define MGAG200_CRTC_FUNCS \
.reset = mgag200_crtc_reset, \
@@ -428,20 +416,26 @@ void mgag200_crtc_atomic_destroy_state(struct drm_crtc *crtc, struct drm_crtc_st
.set_config = drm_atomic_helper_set_config, \
.page_flip = drm_atomic_helper_page_flip, \
.atomic_duplicate_state = mgag200_crtc_atomic_duplicate_state, \
- .atomic_destroy_state = mgag200_crtc_atomic_destroy_state
+ .atomic_destroy_state = mgag200_crtc_atomic_destroy_state, \
+ .enable_vblank = mgag200_crtc_enable_vblank, \
+ .disable_vblank = mgag200_crtc_disable_vblank, \
+ .get_vblank_timestamp = drm_crtc_vblank_helper_get_vblank_timestamp
-void mgag200_set_mode_regs(struct mga_device *mdev, const struct drm_display_mode *mode);
+void mgag200_set_mode_regs(struct mga_device *mdev, const struct drm_display_mode *mode,
+ bool set_vidrst);
void mgag200_set_format_regs(struct mga_device *mdev, const struct drm_format_info *format);
void mgag200_enable_display(struct mga_device *mdev);
void mgag200_init_registers(struct mga_device *mdev);
int mgag200_mode_config_init(struct mga_device *mdev, resource_size_t vram_available);
+/* mgag200_vga_bmc.c */
+int mgag200_vga_bmc_output_init(struct mga_device *mdev);
+
/* mgag200_vga.c */
int mgag200_vga_output_init(struct mga_device *mdev);
- /* mgag200_bmc.c */
-void mgag200_bmc_disable_vidrst(struct mga_device *mdev);
-void mgag200_bmc_enable_vidrst(struct mga_device *mdev);
-int mgag200_bmc_output_init(struct mga_device *mdev, struct drm_connector *physical_connector);
+/* mgag200_bmc.c */
+void mgag200_bmc_stop_scanout(struct mga_device *mdev);
+void mgag200_bmc_start_scanout(struct mga_device *mdev);
#endif /* __MGAG200_DRV_H__ */
diff --git a/drivers/gpu/drm/mgag200/mgag200_g200.c b/drivers/gpu/drm/mgag200/mgag200_g200.c
index f874e2949840..77ce8d36cef0 100644
--- a/drivers/gpu/drm/mgag200/mgag200_g200.c
+++ b/drivers/gpu/drm/mgag200/mgag200_g200.c
@@ -8,6 +8,7 @@
#include <drm/drm_drv.h>
#include <drm/drm_gem_atomic_helper.h>
#include <drm/drm_probe_helper.h>
+#include <drm/drm_vblank.h>
#include "mgag200_drv.h"
@@ -403,5 +404,9 @@ struct mga_device *mgag200_g200_device_create(struct pci_dev *pdev, const struct
drm_mode_config_reset(dev);
drm_kms_helper_poll_init(dev);
+ ret = drm_vblank_init(dev, 1);
+ if (ret)
+ return ERR_PTR(ret);
+
return mdev;
}
diff --git a/drivers/gpu/drm/mgag200/mgag200_g200eh.c b/drivers/gpu/drm/mgag200/mgag200_g200eh.c
index 52bf49ead5c5..09ced65c1d2f 100644
--- a/drivers/gpu/drm/mgag200/mgag200_g200eh.c
+++ b/drivers/gpu/drm/mgag200/mgag200_g200eh.c
@@ -8,6 +8,7 @@
#include <drm/drm_drv.h>
#include <drm/drm_gem_atomic_helper.h>
#include <drm/drm_probe_helper.h>
+#include <drm/drm_vblank.h>
#include "mgag200_drv.h"
@@ -214,11 +215,7 @@ static int mgag200_g200eh_pipeline_init(struct mga_device *mdev)
drm_mode_crtc_set_gamma_size(crtc, MGAG200_LUT_SIZE);
drm_crtc_enable_color_mgmt(crtc, 0, false, MGAG200_LUT_SIZE);
- ret = mgag200_vga_output_init(mdev);
- if (ret)
- return ret;
-
- ret = mgag200_bmc_output_init(mdev, &mdev->output.vga.connector);
+ ret = mgag200_vga_bmc_output_init(mdev);
if (ret)
return ret;
@@ -279,5 +276,9 @@ struct mga_device *mgag200_g200eh_device_create(struct pci_dev *pdev, const stru
drm_mode_config_reset(dev);
drm_kms_helper_poll_init(dev);
+ ret = drm_vblank_init(dev, 1);
+ if (ret)
+ return ERR_PTR(ret);
+
return mdev;
}
diff --git a/drivers/gpu/drm/mgag200/mgag200_g200eh3.c b/drivers/gpu/drm/mgag200/mgag200_g200eh3.c
index e7f89b2a59fd..5daa469137bd 100644
--- a/drivers/gpu/drm/mgag200/mgag200_g200eh3.c
+++ b/drivers/gpu/drm/mgag200/mgag200_g200eh3.c
@@ -7,6 +7,7 @@
#include <drm/drm_drv.h>
#include <drm/drm_gem_atomic_helper.h>
#include <drm/drm_probe_helper.h>
+#include <drm/drm_vblank.h>
#include "mgag200_drv.h"
@@ -118,11 +119,7 @@ static int mgag200_g200eh3_pipeline_init(struct mga_device *mdev)
drm_mode_crtc_set_gamma_size(crtc, MGAG200_LUT_SIZE);
drm_crtc_enable_color_mgmt(crtc, 0, false, MGAG200_LUT_SIZE);
- ret = mgag200_vga_output_init(mdev);
- if (ret)
- return ret;
-
- ret = mgag200_bmc_output_init(mdev, &mdev->output.vga.connector);
+ ret = mgag200_vga_bmc_output_init(mdev);
if (ret)
return ret;
@@ -184,5 +181,9 @@ struct mga_device *mgag200_g200eh3_device_create(struct pci_dev *pdev,
drm_mode_config_reset(dev);
drm_kms_helper_poll_init(dev);
+ ret = drm_vblank_init(dev, 1);
+ if (ret)
+ return ERR_PTR(ret);
+
return mdev;
}
diff --git a/drivers/gpu/drm/mgag200/mgag200_g200er.c b/drivers/gpu/drm/mgag200/mgag200_g200er.c
index 4e8a1756138d..09cfffafe130 100644
--- a/drivers/gpu/drm/mgag200/mgag200_g200er.c
+++ b/drivers/gpu/drm/mgag200/mgag200_g200er.c
@@ -8,6 +8,7 @@
#include <drm/drm_drv.h>
#include <drm/drm_gem_atomic_helper.h>
#include <drm/drm_probe_helper.h>
+#include <drm/drm_vblank.h>
#include "mgag200_drv.h"
@@ -191,11 +192,8 @@ static void mgag200_g200er_crtc_helper_atomic_enable(struct drm_crtc *crtc,
struct mgag200_crtc_state *mgag200_crtc_state = to_mgag200_crtc_state(crtc_state);
const struct drm_format_info *format = mgag200_crtc_state->format;
- if (funcs->disable_vidrst)
- funcs->disable_vidrst(mdev);
-
mgag200_set_format_regs(mdev, format);
- mgag200_set_mode_regs(mdev, adjusted_mode);
+ mgag200_set_mode_regs(mdev, adjusted_mode, mgag200_crtc_state->set_vidrst);
if (funcs->pixpllc_atomic_update)
funcs->pixpllc_atomic_update(crtc, old_state);
@@ -209,8 +207,7 @@ static void mgag200_g200er_crtc_helper_atomic_enable(struct drm_crtc *crtc,
mgag200_enable_display(mdev);
- if (funcs->enable_vidrst)
- funcs->enable_vidrst(mdev);
+ drm_crtc_vblank_on(crtc);
}
static const struct drm_crtc_helper_funcs mgag200_g200er_crtc_helper_funcs = {
@@ -218,7 +215,8 @@ static const struct drm_crtc_helper_funcs mgag200_g200er_crtc_helper_funcs = {
.atomic_check = mgag200_crtc_helper_atomic_check,
.atomic_flush = mgag200_crtc_helper_atomic_flush,
.atomic_enable = mgag200_g200er_crtc_helper_atomic_enable,
- .atomic_disable = mgag200_crtc_helper_atomic_disable
+ .atomic_disable = mgag200_crtc_helper_atomic_disable,
+ .get_scanout_position = mgag200_crtc_helper_get_scanout_position,
};
static const struct drm_crtc_funcs mgag200_g200er_crtc_funcs = {
@@ -257,11 +255,7 @@ static int mgag200_g200er_pipeline_init(struct mga_device *mdev)
drm_mode_crtc_set_gamma_size(crtc, MGAG200_LUT_SIZE);
drm_crtc_enable_color_mgmt(crtc, 0, false, MGAG200_LUT_SIZE);
- ret = mgag200_vga_output_init(mdev);
- if (ret)
- return ret;
-
- ret = mgag200_bmc_output_init(mdev, &mdev->output.vga.connector);
+ ret = mgag200_vga_bmc_output_init(mdev);
if (ret)
return ret;
@@ -318,5 +312,9 @@ struct mga_device *mgag200_g200er_device_create(struct pci_dev *pdev, const stru
drm_mode_config_reset(dev);
drm_kms_helper_poll_init(dev);
+ ret = drm_vblank_init(dev, 1);
+ if (ret)
+ return ERR_PTR(ret);
+
return mdev;
}
diff --git a/drivers/gpu/drm/mgag200/mgag200_g200ev.c b/drivers/gpu/drm/mgag200/mgag200_g200ev.c
index d884f3cb0ec7..3d48baa91d8b 100644
--- a/drivers/gpu/drm/mgag200/mgag200_g200ev.c
+++ b/drivers/gpu/drm/mgag200/mgag200_g200ev.c
@@ -8,6 +8,7 @@
#include <drm/drm_drv.h>
#include <drm/drm_gem_atomic_helper.h>
#include <drm/drm_probe_helper.h>
+#include <drm/drm_vblank.h>
#include "mgag200_drv.h"
@@ -192,11 +193,8 @@ static void mgag200_g200ev_crtc_helper_atomic_enable(struct drm_crtc *crtc,
struct mgag200_crtc_state *mgag200_crtc_state = to_mgag200_crtc_state(crtc_state);
const struct drm_format_info *format = mgag200_crtc_state->format;
- if (funcs->disable_vidrst)
- funcs->disable_vidrst(mdev);
-
mgag200_set_format_regs(mdev, format);
- mgag200_set_mode_regs(mdev, adjusted_mode);
+ mgag200_set_mode_regs(mdev, adjusted_mode, mgag200_crtc_state->set_vidrst);
if (funcs->pixpllc_atomic_update)
funcs->pixpllc_atomic_update(crtc, old_state);
@@ -210,8 +208,7 @@ static void mgag200_g200ev_crtc_helper_atomic_enable(struct drm_crtc *crtc,
mgag200_enable_display(mdev);
- if (funcs->enable_vidrst)
- funcs->enable_vidrst(mdev);
+ drm_crtc_vblank_on(crtc);
}
static const struct drm_crtc_helper_funcs mgag200_g200ev_crtc_helper_funcs = {
@@ -219,7 +216,8 @@ static const struct drm_crtc_helper_funcs mgag200_g200ev_crtc_helper_funcs = {
.atomic_check = mgag200_crtc_helper_atomic_check,
.atomic_flush = mgag200_crtc_helper_atomic_flush,
.atomic_enable = mgag200_g200ev_crtc_helper_atomic_enable,
- .atomic_disable = mgag200_crtc_helper_atomic_disable
+ .atomic_disable = mgag200_crtc_helper_atomic_disable,
+ .get_scanout_position = mgag200_crtc_helper_get_scanout_position,
};
static const struct drm_crtc_funcs mgag200_g200ev_crtc_funcs = {
@@ -258,11 +256,7 @@ static int mgag200_g200ev_pipeline_init(struct mga_device *mdev)
drm_mode_crtc_set_gamma_size(crtc, MGAG200_LUT_SIZE);
drm_crtc_enable_color_mgmt(crtc, 0, false, MGAG200_LUT_SIZE);
- ret = mgag200_vga_output_init(mdev);
- if (ret)
- return ret;
-
- ret = mgag200_bmc_output_init(mdev, &mdev->output.vga.connector);
+ ret = mgag200_vga_bmc_output_init(mdev);
if (ret)
return ret;
@@ -323,5 +317,9 @@ struct mga_device *mgag200_g200ev_device_create(struct pci_dev *pdev, const stru
drm_mode_config_reset(dev);
drm_kms_helper_poll_init(dev);
+ ret = drm_vblank_init(dev, 1);
+ if (ret)
+ return ERR_PTR(ret);
+
return mdev;
}
diff --git a/drivers/gpu/drm/mgag200/mgag200_g200ew3.c b/drivers/gpu/drm/mgag200/mgag200_g200ew3.c
index 839401e8b465..dabc778e64e8 100644
--- a/drivers/gpu/drm/mgag200/mgag200_g200ew3.c
+++ b/drivers/gpu/drm/mgag200/mgag200_g200ew3.c
@@ -7,6 +7,7 @@
#include <drm/drm_drv.h>
#include <drm/drm_gem_atomic_helper.h>
#include <drm/drm_probe_helper.h>
+#include <drm/drm_vblank.h>
#include "mgag200_drv.h"
@@ -127,11 +128,7 @@ static int mgag200_g200ew3_pipeline_init(struct mga_device *mdev)
drm_mode_crtc_set_gamma_size(crtc, MGAG200_LUT_SIZE);
drm_crtc_enable_color_mgmt(crtc, 0, false, MGAG200_LUT_SIZE);
- ret = mgag200_vga_output_init(mdev);
- if (ret)
- return ret;
-
- ret = mgag200_bmc_output_init(mdev, &mdev->output.vga.connector);
+ ret = mgag200_vga_bmc_output_init(mdev);
if (ret)
return ret;
@@ -146,8 +143,6 @@ static const struct mgag200_device_info mgag200_g200ew3_device_info =
MGAG200_DEVICE_INFO_INIT(2048, 2048, 0, true, 0, 1, false);
static const struct mgag200_device_funcs mgag200_g200ew3_device_funcs = {
- .disable_vidrst = mgag200_bmc_disable_vidrst,
- .enable_vidrst = mgag200_bmc_enable_vidrst,
.pixpllc_atomic_check = mgag200_g200ew3_pixpllc_atomic_check,
.pixpllc_atomic_update = mgag200_g200wb_pixpllc_atomic_update, // same as G200WB
};
@@ -204,5 +199,9 @@ struct mga_device *mgag200_g200ew3_device_create(struct pci_dev *pdev,
drm_mode_config_reset(dev);
drm_kms_helper_poll_init(dev);
+ ret = drm_vblank_init(dev, 1);
+ if (ret)
+ return ERR_PTR(ret);
+
return mdev;
}
diff --git a/drivers/gpu/drm/mgag200/mgag200_g200se.c b/drivers/gpu/drm/mgag200/mgag200_g200se.c
index a824bb8ad579..9dcbe8304271 100644
--- a/drivers/gpu/drm/mgag200/mgag200_g200se.c
+++ b/drivers/gpu/drm/mgag200/mgag200_g200se.c
@@ -8,6 +8,7 @@
#include <drm/drm_drv.h>
#include <drm/drm_gem_atomic_helper.h>
#include <drm/drm_probe_helper.h>
+#include <drm/drm_vblank.h>
#include "mgag200_drv.h"
@@ -323,11 +324,8 @@ static void mgag200_g200se_crtc_helper_atomic_enable(struct drm_crtc *crtc,
struct mgag200_crtc_state *mgag200_crtc_state = to_mgag200_crtc_state(crtc_state);
const struct drm_format_info *format = mgag200_crtc_state->format;
- if (funcs->disable_vidrst)
- funcs->disable_vidrst(mdev);
-
mgag200_set_format_regs(mdev, format);
- mgag200_set_mode_regs(mdev, adjusted_mode);
+ mgag200_set_mode_regs(mdev, adjusted_mode, mgag200_crtc_state->set_vidrst);
if (funcs->pixpllc_atomic_update)
funcs->pixpllc_atomic_update(crtc, old_state);
@@ -341,8 +339,7 @@ static void mgag200_g200se_crtc_helper_atomic_enable(struct drm_crtc *crtc,
mgag200_enable_display(mdev);
- if (funcs->enable_vidrst)
- funcs->enable_vidrst(mdev);
+ drm_crtc_vblank_on(crtc);
}
static const struct drm_crtc_helper_funcs mgag200_g200se_crtc_helper_funcs = {
@@ -350,7 +347,8 @@ static const struct drm_crtc_helper_funcs mgag200_g200se_crtc_helper_funcs = {
.atomic_check = mgag200_crtc_helper_atomic_check,
.atomic_flush = mgag200_crtc_helper_atomic_flush,
.atomic_enable = mgag200_g200se_crtc_helper_atomic_enable,
- .atomic_disable = mgag200_crtc_helper_atomic_disable
+ .atomic_disable = mgag200_crtc_helper_atomic_disable,
+ .get_scanout_position = mgag200_crtc_helper_get_scanout_position,
};
static const struct drm_crtc_funcs mgag200_g200se_crtc_funcs = {
@@ -389,11 +387,7 @@ static int mgag200_g200se_pipeline_init(struct mga_device *mdev)
drm_mode_crtc_set_gamma_size(crtc, MGAG200_LUT_SIZE);
drm_crtc_enable_color_mgmt(crtc, 0, false, MGAG200_LUT_SIZE);
- ret = mgag200_vga_output_init(mdev);
- if (ret)
- return ret;
-
- ret = mgag200_bmc_output_init(mdev, &mdev->output.vga.connector);
+ ret = mgag200_vga_bmc_output_init(mdev);
if (ret)
return ret;
@@ -523,5 +517,9 @@ struct mga_device *mgag200_g200se_device_create(struct pci_dev *pdev, const stru
drm_mode_config_reset(dev);
drm_kms_helper_poll_init(dev);
+ ret = drm_vblank_init(dev, 1);
+ if (ret)
+ return ERR_PTR(ret);
+
return mdev;
}
diff --git a/drivers/gpu/drm/mgag200/mgag200_g200wb.c b/drivers/gpu/drm/mgag200/mgag200_g200wb.c
index 835df0f4fc13..83a24aedbf2f 100644
--- a/drivers/gpu/drm/mgag200/mgag200_g200wb.c
+++ b/drivers/gpu/drm/mgag200/mgag200_g200wb.c
@@ -8,6 +8,7 @@
#include <drm/drm_drv.h>
#include <drm/drm_gem_atomic_helper.h>
#include <drm/drm_probe_helper.h>
+#include <drm/drm_vblank.h>
#include "mgag200_drv.h"
@@ -261,11 +262,7 @@ static int mgag200_g200wb_pipeline_init(struct mga_device *mdev)
drm_mode_crtc_set_gamma_size(crtc, MGAG200_LUT_SIZE);
drm_crtc_enable_color_mgmt(crtc, 0, false, MGAG200_LUT_SIZE);
- ret = mgag200_vga_output_init(mdev);
- if (ret)
- return ret;
-
- ret = mgag200_bmc_output_init(mdev, &mdev->output.vga.connector);
+ ret = mgag200_vga_bmc_output_init(mdev);
if (ret)
return ret;
@@ -280,8 +277,6 @@ static const struct mgag200_device_info mgag200_g200wb_device_info =
MGAG200_DEVICE_INFO_INIT(1280, 1024, 31877, true, 0, 1, false);
static const struct mgag200_device_funcs mgag200_g200wb_device_funcs = {
- .disable_vidrst = mgag200_bmc_disable_vidrst,
- .enable_vidrst = mgag200_bmc_enable_vidrst,
.pixpllc_atomic_check = mgag200_g200wb_pixpllc_atomic_check,
.pixpllc_atomic_update = mgag200_g200wb_pixpllc_atomic_update,
};
@@ -328,5 +323,9 @@ struct mga_device *mgag200_g200wb_device_create(struct pci_dev *pdev, const stru
drm_mode_config_reset(dev);
drm_kms_helper_poll_init(dev);
+ ret = drm_vblank_init(dev, 1);
+ if (ret)
+ return ERR_PTR(ret);
+
return mdev;
}
diff --git a/drivers/gpu/drm/mgag200/mgag200_mode.c b/drivers/gpu/drm/mgag200/mgag200_mode.c
index d4550e4b3b01..7159909aca1e 100644
--- a/drivers/gpu/drm/mgag200/mgag200_mode.c
+++ b/drivers/gpu/drm/mgag200/mgag200_mode.c
@@ -22,6 +22,7 @@
#include <drm/drm_gem_framebuffer_helper.h>
#include <drm/drm_panic.h>
#include <drm/drm_print.h>
+#include <drm/drm_vblank.h>
#include "mgag200_ddc.h"
#include "mgag200_drv.h"
@@ -201,26 +202,39 @@ void mgag200_init_registers(struct mga_device *mdev)
WREG8(MGA_MISC_OUT, misc);
}
-void mgag200_set_mode_regs(struct mga_device *mdev, const struct drm_display_mode *mode)
+void mgag200_set_mode_regs(struct mga_device *mdev, const struct drm_display_mode *mode,
+ bool set_vidrst)
{
- const struct mgag200_device_info *info = mdev->info;
- unsigned int hdisplay, hsyncstart, hsyncend, htotal;
- unsigned int vdisplay, vsyncstart, vsyncend, vtotal;
+ unsigned int hdispend, hsyncstr, hsyncend, htotal, hblkstr, hblkend;
+ unsigned int vdispend, vsyncstr, vsyncend, vtotal, vblkstr, vblkend;
+ unsigned int linecomp;
u8 misc, crtcext1, crtcext2, crtcext5;
- hdisplay = mode->hdisplay / 8 - 1;
- hsyncstart = mode->hsync_start / 8 - 1;
- hsyncend = mode->hsync_end / 8 - 1;
- htotal = mode->htotal / 8 - 1;
-
+ hdispend = mode->crtc_hdisplay / 8 - 1;
+ hsyncstr = mode->crtc_hsync_start / 8 - 1;
+ hsyncend = mode->crtc_hsync_end / 8 - 1;
+ htotal = mode->crtc_htotal / 8 - 1;
/* Work around hardware quirk */
if ((htotal & 0x07) == 0x06 || (htotal & 0x07) == 0x04)
htotal++;
+ hblkstr = mode->crtc_hblank_start / 8 - 1;
+ hblkend = htotal;
+
+ vdispend = mode->crtc_vdisplay - 1;
+ vsyncstr = mode->crtc_vsync_start - 1;
+ vsyncend = mode->crtc_vsync_end - 1;
+ vtotal = mode->crtc_vtotal - 2;
+ vblkstr = mode->crtc_vblank_start;
+ vblkend = vtotal + 1;
- vdisplay = mode->vdisplay - 1;
- vsyncstart = mode->vsync_start - 1;
- vsyncend = mode->vsync_end - 1;
- vtotal = mode->vtotal - 2;
+ /*
+ * There's no VBLANK interrupt on Matrox chipsets, so we use
+ * the VLINE interrupt instead. It triggers when the current
+ * <linecomp> has been reached. For VBLANK, this is the first
+ * non-visible line at the bottom of the screen. Therefore,
+ * keep <linecomp> in sync with <vblkstr>.
+ */
+ linecomp = vblkstr;
misc = RREG8(MGA_MISC_IN);
@@ -235,45 +249,45 @@ void mgag200_set_mode_regs(struct mga_device *mdev, const struct drm_display_mod
misc &= ~MGAREG_MISC_VSYNCPOL;
crtcext1 = (((htotal - 4) & 0x100) >> 8) |
- ((hdisplay & 0x100) >> 7) |
- ((hsyncstart & 0x100) >> 6) |
- (htotal & 0x40);
- if (info->has_vidrst)
+ ((hblkstr & 0x100) >> 7) |
+ ((hsyncstr & 0x100) >> 6) |
+ (hblkend & 0x40);
+ if (set_vidrst)
crtcext1 |= MGAREG_CRTCEXT1_VRSTEN |
MGAREG_CRTCEXT1_HRSTEN;
crtcext2 = ((vtotal & 0xc00) >> 10) |
- ((vdisplay & 0x400) >> 8) |
- ((vdisplay & 0xc00) >> 7) |
- ((vsyncstart & 0xc00) >> 5) |
- ((vdisplay & 0x400) >> 3);
+ ((vdispend & 0x400) >> 8) |
+ ((vblkstr & 0xc00) >> 7) |
+ ((vsyncstr & 0xc00) >> 5) |
+ ((linecomp & 0x400) >> 3);
crtcext5 = 0x00;
- WREG_CRT(0, htotal - 4);
- WREG_CRT(1, hdisplay);
- WREG_CRT(2, hdisplay);
- WREG_CRT(3, (htotal & 0x1F) | 0x80);
- WREG_CRT(4, hsyncstart);
- WREG_CRT(5, ((htotal & 0x20) << 2) | (hsyncend & 0x1F));
- WREG_CRT(6, vtotal & 0xFF);
- WREG_CRT(7, ((vtotal & 0x100) >> 8) |
- ((vdisplay & 0x100) >> 7) |
- ((vsyncstart & 0x100) >> 6) |
- ((vdisplay & 0x100) >> 5) |
- ((vdisplay & 0x100) >> 4) | /* linecomp */
- ((vtotal & 0x200) >> 4) |
- ((vdisplay & 0x200) >> 3) |
- ((vsyncstart & 0x200) >> 2));
- WREG_CRT(9, ((vdisplay & 0x200) >> 4) |
- ((vdisplay & 0x200) >> 3));
- WREG_CRT(16, vsyncstart & 0xFF);
- WREG_CRT(17, (vsyncend & 0x0F) | 0x20);
- WREG_CRT(18, vdisplay & 0xFF);
- WREG_CRT(20, 0);
- WREG_CRT(21, vdisplay & 0xFF);
- WREG_CRT(22, (vtotal + 1) & 0xFF);
- WREG_CRT(23, 0xc3);
- WREG_CRT(24, vdisplay & 0xFF);
+ WREG_CRT(0x00, htotal - 4);
+ WREG_CRT(0x01, hdispend);
+ WREG_CRT(0x02, hblkstr);
+ WREG_CRT(0x03, (hblkend & 0x1f) | 0x80);
+ WREG_CRT(0x04, hsyncstr);
+ WREG_CRT(0x05, ((hblkend & 0x20) << 2) | (hsyncend & 0x1f));
+ WREG_CRT(0x06, vtotal & 0xff);
+ WREG_CRT(0x07, ((vtotal & 0x100) >> 8) |
+ ((vdispend & 0x100) >> 7) |
+ ((vsyncstr & 0x100) >> 6) |
+ ((vblkstr & 0x100) >> 5) |
+ ((linecomp & 0x100) >> 4) |
+ ((vtotal & 0x200) >> 4) |
+ ((vdispend & 0x200) >> 3) |
+ ((vsyncstr & 0x200) >> 2));
+ WREG_CRT(0x09, ((vblkstr & 0x200) >> 4) |
+ ((linecomp & 0x200) >> 3));
+ WREG_CRT(0x10, vsyncstr & 0xff);
+ WREG_CRT(0x11, (vsyncend & 0x0f) | 0x20);
+ WREG_CRT(0x12, vdispend & 0xff);
+ WREG_CRT(0x14, 0);
+ WREG_CRT(0x15, vblkstr & 0xff);
+ WREG_CRT(0x16, vblkend & 0xff);
+ WREG_CRT(0x17, 0xc3);
+ WREG_CRT(0x18, linecomp & 0xff);
WREG_ECRT(0x01, crtcext1);
WREG_ECRT(0x02, crtcext2);
@@ -631,6 +645,8 @@ void mgag200_crtc_helper_atomic_flush(struct drm_crtc *crtc, struct drm_atomic_s
struct mgag200_crtc_state *mgag200_crtc_state = to_mgag200_crtc_state(crtc_state);
struct drm_device *dev = crtc->dev;
struct mga_device *mdev = to_mga_device(dev);
+ struct drm_pending_vblank_event *event;
+ unsigned long flags;
if (crtc_state->enable && crtc_state->color_mgmt_changed) {
const struct drm_format_info *format = mgag200_crtc_state->format;
@@ -640,6 +656,18 @@ void mgag200_crtc_helper_atomic_flush(struct drm_crtc *crtc, struct drm_atomic_s
else
mgag200_crtc_set_gamma_linear(mdev, format);
}
+
+ event = crtc->state->event;
+ if (event) {
+ crtc->state->event = NULL;
+
+ spin_lock_irqsave(&dev->event_lock, flags);
+ if (drm_crtc_vblank_get(crtc) != 0)
+ drm_crtc_send_vblank_event(crtc, event);
+ else
+ drm_crtc_arm_vblank_event(crtc, event);
+ spin_unlock_irqrestore(&dev->event_lock, flags);
+ }
}
void mgag200_crtc_helper_atomic_enable(struct drm_crtc *crtc, struct drm_atomic_state *old_state)
@@ -652,11 +680,8 @@ void mgag200_crtc_helper_atomic_enable(struct drm_crtc *crtc, struct drm_atomic_
struct mgag200_crtc_state *mgag200_crtc_state = to_mgag200_crtc_state(crtc_state);
const struct drm_format_info *format = mgag200_crtc_state->format;
- if (funcs->disable_vidrst)
- funcs->disable_vidrst(mdev);
-
mgag200_set_format_regs(mdev, format);
- mgag200_set_mode_regs(mdev, adjusted_mode);
+ mgag200_set_mode_regs(mdev, adjusted_mode, mgag200_crtc_state->set_vidrst);
if (funcs->pixpllc_atomic_update)
funcs->pixpllc_atomic_update(crtc, old_state);
@@ -668,22 +693,41 @@ void mgag200_crtc_helper_atomic_enable(struct drm_crtc *crtc, struct drm_atomic_
mgag200_enable_display(mdev);
- if (funcs->enable_vidrst)
- funcs->enable_vidrst(mdev);
+ drm_crtc_vblank_on(crtc);
}
void mgag200_crtc_helper_atomic_disable(struct drm_crtc *crtc, struct drm_atomic_state *old_state)
{
struct mga_device *mdev = to_mga_device(crtc->dev);
- const struct mgag200_device_funcs *funcs = mdev->funcs;
- if (funcs->disable_vidrst)
- funcs->disable_vidrst(mdev);
+ drm_crtc_vblank_off(crtc);
mgag200_disable_display(mdev);
+}
+
+bool mgag200_crtc_helper_get_scanout_position(struct drm_crtc *crtc, bool in_vblank_irq,
+ int *vpos, int *hpos,
+ ktime_t *stime, ktime_t *etime,
+ const struct drm_display_mode *mode)
+{
+ struct mga_device *mdev = to_mga_device(crtc->dev);
+ u32 vcount;
- if (funcs->enable_vidrst)
- funcs->enable_vidrst(mdev);
+ if (stime)
+ *stime = ktime_get();
+
+ if (vpos) {
+ vcount = RREG32(MGAREG_VCOUNT);
+ *vpos = vcount & GENMASK(11, 0);
+ }
+
+ if (hpos)
+ *hpos = mode->htotal >> 1; // near middle of scanline on average
+
+ if (etime)
+ *etime = ktime_get();
+
+ return true;
}
void mgag200_crtc_reset(struct drm_crtc *crtc)
@@ -717,6 +761,7 @@ struct drm_crtc_state *mgag200_crtc_atomic_duplicate_state(struct drm_crtc *crtc
new_mgag200_crtc_state->format = mgag200_crtc_state->format;
memcpy(&new_mgag200_crtc_state->pixpllc, &mgag200_crtc_state->pixpllc,
sizeof(new_mgag200_crtc_state->pixpllc));
+ new_mgag200_crtc_state->set_vidrst = mgag200_crtc_state->set_vidrst;
return &new_mgag200_crtc_state->base;
}
@@ -729,6 +774,30 @@ void mgag200_crtc_atomic_destroy_state(struct drm_crtc *crtc, struct drm_crtc_st
kfree(mgag200_crtc_state);
}
+int mgag200_crtc_enable_vblank(struct drm_crtc *crtc)
+{
+ struct mga_device *mdev = to_mga_device(crtc->dev);
+ u32 ien;
+
+ WREG32(MGAREG_ICLEAR, MGAREG_ICLEAR_VLINEICLR);
+
+ ien = RREG32(MGAREG_IEN);
+ ien |= MGAREG_IEN_VLINEIEN;
+ WREG32(MGAREG_IEN, ien);
+
+ return 0;
+}
+
+void mgag200_crtc_disable_vblank(struct drm_crtc *crtc)
+{
+ struct mga_device *mdev = to_mga_device(crtc->dev);
+ u32 ien;
+
+ ien = RREG32(MGAREG_IEN);
+ ien &= ~(MGAREG_IEN_VLINEIEN);
+ WREG32(MGAREG_IEN, ien);
+}
+
/*
* Mode config
*/
diff --git a/drivers/gpu/drm/mgag200/mgag200_reg.h b/drivers/gpu/drm/mgag200/mgag200_reg.h
index aa73463674e4..d4fef8f25871 100644
--- a/drivers/gpu/drm/mgag200/mgag200_reg.h
+++ b/drivers/gpu/drm/mgag200/mgag200_reg.h
@@ -102,10 +102,17 @@
#define MGAREG_EXEC 0x0100
#define MGAREG_FIFOSTATUS 0x1e10
+
#define MGAREG_STATUS 0x1e14
+#define MGAREG_STATUS_VLINEPEN BIT(5)
+
#define MGAREG_CACHEFLUSH 0x1fff
+
#define MGAREG_ICLEAR 0x1e18
+#define MGAREG_ICLEAR_VLINEICLR BIT(5)
+
#define MGAREG_IEN 0x1e1c
+#define MGAREG_IEN_VLINEIEN BIT(5)
#define MGAREG_VCOUNT 0x1e20
diff --git a/drivers/gpu/drm/mgag200/mgag200_vga_bmc.c b/drivers/gpu/drm/mgag200/mgag200_vga_bmc.c
new file mode 100644
index 000000000000..a5a3ac108bd5
--- /dev/null
+++ b/drivers/gpu/drm/mgag200/mgag200_vga_bmc.c
@@ -0,0 +1,156 @@
+// SPDX-License-Identifier: GPL-2.0-only
+
+#include <drm/drm_atomic_helper.h>
+#include <drm/drm_edid.h>
+#include <drm/drm_modeset_helper_vtables.h>
+#include <drm/drm_probe_helper.h>
+
+#include "mgag200_ddc.h"
+#include "mgag200_drv.h"
+
+static void mgag200_vga_bmc_encoder_atomic_disable(struct drm_encoder *encoder,
+ struct drm_atomic_state *state)
+{
+ struct mga_device *mdev = to_mga_device(encoder->dev);
+
+ if (mdev->info->sync_bmc)
+ mgag200_bmc_stop_scanout(mdev);
+}
+
+static void mgag200_vga_bmc_encoder_atomic_enable(struct drm_encoder *encoder,
+ struct drm_atomic_state *state)
+{
+ struct mga_device *mdev = to_mga_device(encoder->dev);
+
+ if (mdev->info->sync_bmc)
+ mgag200_bmc_start_scanout(mdev);
+}
+
+static int mgag200_vga_bmc_encoder_atomic_check(struct drm_encoder *encoder,
+ struct drm_crtc_state *new_crtc_state,
+ struct drm_connector_state *new_connector_state)
+{
+ struct mga_device *mdev = to_mga_device(encoder->dev);
+ struct mgag200_crtc_state *new_mgag200_crtc_state = to_mgag200_crtc_state(new_crtc_state);
+
+ new_mgag200_crtc_state->set_vidrst = mdev->info->sync_bmc;
+
+ return 0;
+}
+
+static const struct drm_encoder_helper_funcs mgag200_dac_encoder_helper_funcs = {
+ .atomic_disable = mgag200_vga_bmc_encoder_atomic_disable,
+ .atomic_enable = mgag200_vga_bmc_encoder_atomic_enable,
+ .atomic_check = mgag200_vga_bmc_encoder_atomic_check,
+};
+
+static const struct drm_encoder_funcs mgag200_dac_encoder_funcs = {
+ .destroy = drm_encoder_cleanup
+};
+
+static int mgag200_vga_bmc_connector_helper_get_modes(struct drm_connector *connector)
+{
+ struct mga_device *mdev = to_mga_device(connector->dev);
+ const struct mgag200_device_info *minfo = mdev->info;
+ int count;
+
+ count = drm_connector_helper_get_modes(connector);
+
+ if (!count) {
+ /*
+ * There's no EDID data without a connected monitor. Set BMC-
+ * compatible modes in this case. The XGA default resolution
+ * should work well for all BMCs.
+ */
+ count = drm_add_modes_noedid(connector, minfo->max_hdisplay, minfo->max_vdisplay);
+ if (count)
+ drm_set_preferred_mode(connector, 1024, 768);
+ }
+
+ return count;
+}
+
+/*
+ * There's no monitor connected if the DDC did not return an EDID. Still
+ * return 'connected' as there's always a BMC. Incrementing the connector's
+ * epoch counter triggers an update of the related properties.
+ */
+static int mgag200_vga_bmc_connector_helper_detect_ctx(struct drm_connector *connector,
+ struct drm_modeset_acquire_ctx *ctx,
+ bool force)
+{
+ enum drm_connector_status old_status, status;
+
+ if (connector->edid_blob_ptr)
+ old_status = connector_status_connected;
+ else
+ old_status = connector_status_disconnected;
+
+ status = drm_connector_helper_detect_from_ddc(connector, ctx, force);
+
+ if (status != old_status)
+ ++connector->epoch_counter;
+ return connector_status_connected;
+}
+
+static const struct drm_connector_helper_funcs mgag200_vga_connector_helper_funcs = {
+ .get_modes = mgag200_vga_bmc_connector_helper_get_modes,
+ .detect_ctx = mgag200_vga_bmc_connector_helper_detect_ctx,
+};
+
+static const struct drm_connector_funcs mgag200_vga_connector_funcs = {
+ .reset = drm_atomic_helper_connector_reset,
+ .fill_modes = drm_helper_probe_single_connector_modes,
+ .destroy = drm_connector_cleanup,
+ .atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state,
+ .atomic_destroy_state = drm_atomic_helper_connector_destroy_state
+};
+
+int mgag200_vga_bmc_output_init(struct mga_device *mdev)
+{
+ struct drm_device *dev = &mdev->base;
+ struct drm_crtc *crtc = &mdev->crtc;
+ struct drm_encoder *encoder;
+ struct drm_connector *connector;
+ struct i2c_adapter *ddc;
+ int ret;
+
+ encoder = &mdev->output.vga.encoder;
+ ret = drm_encoder_init(dev, encoder, &mgag200_dac_encoder_funcs,
+ DRM_MODE_ENCODER_DAC, NULL);
+ if (ret) {
+ drm_err(dev, "drm_encoder_init() failed: %d\n", ret);
+ return ret;
+ }
+ drm_encoder_helper_add(encoder, &mgag200_dac_encoder_helper_funcs);
+
+ encoder->possible_crtcs = drm_crtc_mask(crtc);
+
+ ddc = mgag200_ddc_create(mdev);
+ if (IS_ERR(ddc)) {
+ ret = PTR_ERR(ddc);
+ drm_err(dev, "failed to add DDC bus: %d\n", ret);
+ return ret;
+ }
+
+ connector = &mdev->output.vga.connector;
+ ret = drm_connector_init_with_ddc(dev, connector,
+ &mgag200_vga_connector_funcs,
+ DRM_MODE_CONNECTOR_VGA, ddc);
+ if (ret) {
+ drm_err(dev, "drm_connector_init_with_ddc() failed: %d\n", ret);
+ return ret;
+ }
+ drm_connector_helper_add(connector, &mgag200_vga_connector_helper_funcs);
+
+ connector->polled = DRM_CONNECTOR_POLL_CONNECT |
+ DRM_CONNECTOR_POLL_DISCONNECT;
+
+ ret = drm_connector_attach_encoder(connector, encoder);
+ if (ret) {
+ drm_err(dev, "drm_connector_attach_encoder() failed: %d\n", ret);
+ return ret;
+ }
+
+ return 0;
+}
diff --git a/drivers/gpu/drm/msm/Makefile b/drivers/gpu/drm/msm/Makefile
index f5e2838c6a76..13110fcc46a8 100644
--- a/drivers/gpu/drm/msm/Makefile
+++ b/drivers/gpu/drm/msm/Makefile
@@ -37,6 +37,7 @@ msm-display-$(CONFIG_DRM_MSM_HDMI) += \
hdmi/hdmi_phy.o \
hdmi/hdmi_phy_8960.o \
hdmi/hdmi_phy_8996.o \
+ hdmi/hdmi_phy_8998.o \
hdmi/hdmi_phy_8x60.o \
hdmi/hdmi_phy_8x74.o \
hdmi/hdmi_pll_8960.o \
diff --git a/drivers/gpu/drm/msm/adreno/a3xx_catalog.c b/drivers/gpu/drm/msm/adreno/a3xx_catalog.c
index 0de8465b6cf0..2eb6c3e93748 100644
--- a/drivers/gpu/drm/msm/adreno/a3xx_catalog.c
+++ b/drivers/gpu/drm/msm/adreno/a3xx_catalog.c
@@ -42,6 +42,17 @@ static const struct adreno_info a3xx_gpus[] = {
.inactive_period = DRM_MSM_INACTIVE_PERIOD,
.init = a3xx_gpu_init,
}, {
+ .chip_ids = ADRENO_CHIP_IDS(0x03000620),
+ .family = ADRENO_3XX,
+ .revn = 308,
+ .fw = {
+ [ADRENO_FW_PM4] = "a300_pm4.fw",
+ [ADRENO_FW_PFP] = "a300_pfp.fw",
+ },
+ .gmem = SZ_128K,
+ .inactive_period = DRM_MSM_INACTIVE_PERIOD,
+ .init = a3xx_gpu_init,
+ }, {
.chip_ids = ADRENO_CHIP_IDS(
0x03020000,
0x03020001,
diff --git a/drivers/gpu/drm/msm/adreno/a3xx_gpu.c b/drivers/gpu/drm/msm/adreno/a3xx_gpu.c
index 5273dc849838..b46ff49f47cf 100644
--- a/drivers/gpu/drm/msm/adreno/a3xx_gpu.c
+++ b/drivers/gpu/drm/msm/adreno/a3xx_gpu.c
@@ -145,6 +145,10 @@ static int a3xx_hw_init(struct msm_gpu *gpu)
gpu_write(gpu, REG_A3XX_VBIF_ROUND_ROBIN_QOS_ARB, 0x0003);
gpu_write(gpu, REG_A3XX_VBIF_OUT_RD_LIM_CONF0, 0x0000000a);
gpu_write(gpu, REG_A3XX_VBIF_OUT_WR_LIM_CONF0, 0x0000000a);
+ } else if (adreno_is_a306a(adreno_gpu)) {
+ gpu_write(gpu, REG_A3XX_VBIF_ROUND_ROBIN_QOS_ARB, 0x0003);
+ gpu_write(gpu, REG_A3XX_VBIF_OUT_RD_LIM_CONF0, 0x00000010);
+ gpu_write(gpu, REG_A3XX_VBIF_OUT_WR_LIM_CONF0, 0x00000010);
} else if (adreno_is_a320(adreno_gpu)) {
/* Set up 16 deep read/write request queues: */
gpu_write(gpu, REG_A3XX_VBIF_IN_RD_LIM_CONF0, 0x10101010);
@@ -237,7 +241,9 @@ static int a3xx_hw_init(struct msm_gpu *gpu)
gpu_write(gpu, REG_A3XX_UCHE_CACHE_MODE_CONTROL_REG, 0x00000001);
/* Enable Clock gating: */
- if (adreno_is_a305b(adreno_gpu) || adreno_is_a306(adreno_gpu))
+ if (adreno_is_a305b(adreno_gpu) ||
+ adreno_is_a306(adreno_gpu) ||
+ adreno_is_a306a(adreno_gpu))
gpu_write(gpu, REG_A3XX_RBBM_CLOCK_CTL, 0xaaaaaaaa);
else if (adreno_is_a320(adreno_gpu))
gpu_write(gpu, REG_A3XX_RBBM_CLOCK_CTL, 0xbfffffff);
@@ -334,8 +340,10 @@ static int a3xx_hw_init(struct msm_gpu *gpu)
gpu_write(gpu, REG_A3XX_CP_PFP_UCODE_DATA, ptr[i]);
/* CP ROQ queue sizes (bytes) - RB:16, ST:16, IB1:32, IB2:64 */
- if (adreno_is_a305(adreno_gpu) || adreno_is_a306(adreno_gpu) ||
- adreno_is_a320(adreno_gpu)) {
+ if (adreno_is_a305(adreno_gpu) ||
+ adreno_is_a306(adreno_gpu) ||
+ adreno_is_a306a(adreno_gpu) ||
+ adreno_is_a320(adreno_gpu)) {
gpu_write(gpu, REG_AXXX_CP_QUEUE_THRESHOLDS,
AXXX_CP_QUEUE_THRESHOLDS_CSQ_IB1_START(2) |
AXXX_CP_QUEUE_THRESHOLDS_CSQ_IB2_START(6) |
diff --git a/drivers/gpu/drm/msm/adreno/a5xx_gpu.c b/drivers/gpu/drm/msm/adreno/a5xx_gpu.c
index c0b5373e90d7..e09044930547 100644
--- a/drivers/gpu/drm/msm/adreno/a5xx_gpu.c
+++ b/drivers/gpu/drm/msm/adreno/a5xx_gpu.c
@@ -65,6 +65,8 @@ void a5xx_flush(struct msm_gpu *gpu, struct msm_ringbuffer *ring,
static void a5xx_submit_in_rb(struct msm_gpu *gpu, struct msm_gem_submit *submit)
{
+ struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
+ struct a5xx_gpu *a5xx_gpu = to_a5xx_gpu(adreno_gpu);
struct msm_ringbuffer *ring = submit->ring;
struct drm_gem_object *obj;
uint32_t *ptr, dwords;
@@ -109,6 +111,7 @@ static void a5xx_submit_in_rb(struct msm_gpu *gpu, struct msm_gem_submit *submit
}
}
+ a5xx_gpu->last_seqno[ring->id] = submit->seqno;
a5xx_flush(gpu, ring, true);
a5xx_preempt_trigger(gpu);
@@ -150,9 +153,13 @@ static void a5xx_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit)
OUT_PKT7(ring, CP_SET_PROTECTED_MODE, 1);
OUT_RING(ring, 1);
- /* Enable local preemption for finegrain preemption */
+ /*
+ * Disable local preemption by default because it requires
+ * user-space to be aware of it and provide additional handling
+ * to restore rendering state or do various flushes on switch.
+ */
OUT_PKT7(ring, CP_PREEMPT_ENABLE_LOCAL, 1);
- OUT_RING(ring, 0x1);
+ OUT_RING(ring, 0x0);
/* Allow CP_CONTEXT_SWITCH_YIELD packets in the IB2 */
OUT_PKT7(ring, CP_YIELD_ENABLE, 1);
@@ -206,6 +213,7 @@ static void a5xx_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit)
/* Write the fence to the scratch register */
OUT_PKT4(ring, REG_A5XX_CP_SCRATCH_REG(2), 1);
OUT_RING(ring, submit->seqno);
+ a5xx_gpu->last_seqno[ring->id] = submit->seqno;
/*
* Execute a CACHE_FLUSH_TS event. This will ensure that the
@@ -1793,5 +1801,9 @@ struct msm_gpu *a5xx_gpu_init(struct drm_device *dev)
else
adreno_gpu->ubwc_config.highest_bank_bit = 14;
+ /* a5xx only supports UBWC 1.0, these are not configurable */
+ adreno_gpu->ubwc_config.macrotile_mode = 0;
+ adreno_gpu->ubwc_config.ubwc_swizzle = 0x7;
+
return gpu;
}
diff --git a/drivers/gpu/drm/msm/adreno/a5xx_gpu.h b/drivers/gpu/drm/msm/adreno/a5xx_gpu.h
index c7187bcc5e90..9c0d701fe4b8 100644
--- a/drivers/gpu/drm/msm/adreno/a5xx_gpu.h
+++ b/drivers/gpu/drm/msm/adreno/a5xx_gpu.h
@@ -34,8 +34,10 @@ struct a5xx_gpu {
struct drm_gem_object *preempt_counters_bo[MSM_GPU_MAX_RINGS];
struct a5xx_preempt_record *preempt[MSM_GPU_MAX_RINGS];
uint64_t preempt_iova[MSM_GPU_MAX_RINGS];
+ uint32_t last_seqno[MSM_GPU_MAX_RINGS];
atomic_t preempt_state;
+ spinlock_t preempt_start_lock;
struct timer_list preempt_timer;
struct drm_gem_object *shadow_bo;
diff --git a/drivers/gpu/drm/msm/adreno/a5xx_preempt.c b/drivers/gpu/drm/msm/adreno/a5xx_preempt.c
index f58dd564d122..0469fea55010 100644
--- a/drivers/gpu/drm/msm/adreno/a5xx_preempt.c
+++ b/drivers/gpu/drm/msm/adreno/a5xx_preempt.c
@@ -55,6 +55,8 @@ static inline void update_wptr(struct msm_gpu *gpu, struct msm_ringbuffer *ring)
/* Return the highest priority ringbuffer with something in it */
static struct msm_ringbuffer *get_next_ring(struct msm_gpu *gpu)
{
+ struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
+ struct a5xx_gpu *a5xx_gpu = to_a5xx_gpu(adreno_gpu);
unsigned long flags;
int i;
@@ -64,6 +66,8 @@ static struct msm_ringbuffer *get_next_ring(struct msm_gpu *gpu)
spin_lock_irqsave(&ring->preempt_lock, flags);
empty = (get_wptr(ring) == gpu->funcs->get_rptr(gpu, ring));
+ if (!empty && ring == a5xx_gpu->cur_ring)
+ empty = ring->memptrs->fence == a5xx_gpu->last_seqno[i];
spin_unlock_irqrestore(&ring->preempt_lock, flags);
if (!empty)
@@ -98,11 +102,18 @@ void a5xx_preempt_trigger(struct msm_gpu *gpu)
return;
/*
+ * Serialize preemption start to ensure that we always make
+ * decision on latest state. Otherwise we can get stuck in
+ * lower priority or empty ring.
+ */
+ spin_lock_irqsave(&a5xx_gpu->preempt_start_lock, flags);
+
+ /*
* Try to start preemption by moving from NONE to START. If
* unsuccessful, a preemption is already in flight
*/
if (!try_preempt_state(a5xx_gpu, PREEMPT_NONE, PREEMPT_START))
- return;
+ goto out;
/* Get the next ring to preempt to */
ring = get_next_ring(gpu);
@@ -127,9 +138,11 @@ void a5xx_preempt_trigger(struct msm_gpu *gpu)
set_preempt_state(a5xx_gpu, PREEMPT_ABORT);
update_wptr(gpu, a5xx_gpu->cur_ring);
set_preempt_state(a5xx_gpu, PREEMPT_NONE);
- return;
+ goto out;
}
+ spin_unlock_irqrestore(&a5xx_gpu->preempt_start_lock, flags);
+
/* Make sure the wptr doesn't update while we're in motion */
spin_lock_irqsave(&ring->preempt_lock, flags);
a5xx_gpu->preempt[ring->id]->wptr = get_wptr(ring);
@@ -152,6 +165,10 @@ void a5xx_preempt_trigger(struct msm_gpu *gpu)
/* And actually start the preemption */
gpu_write(gpu, REG_A5XX_CP_CONTEXT_SWITCH_CNTL, 1);
+ return;
+
+out:
+ spin_unlock_irqrestore(&a5xx_gpu->preempt_start_lock, flags);
}
void a5xx_preempt_irq(struct msm_gpu *gpu)
@@ -188,6 +205,12 @@ void a5xx_preempt_irq(struct msm_gpu *gpu)
update_wptr(gpu, a5xx_gpu->cur_ring);
set_preempt_state(a5xx_gpu, PREEMPT_NONE);
+
+ /*
+ * Try to trigger preemption again in case there was a submit or
+ * retire during ring switch
+ */
+ a5xx_preempt_trigger(gpu);
}
void a5xx_preempt_hw_init(struct msm_gpu *gpu)
@@ -204,6 +227,8 @@ void a5xx_preempt_hw_init(struct msm_gpu *gpu)
return;
for (i = 0; i < gpu->nr_rings; i++) {
+ a5xx_gpu->preempt[i]->data = 0;
+ a5xx_gpu->preempt[i]->info = 0;
a5xx_gpu->preempt[i]->wptr = 0;
a5xx_gpu->preempt[i]->rptr = 0;
a5xx_gpu->preempt[i]->rbase = gpu->rb[i]->iova;
@@ -298,5 +323,6 @@ void a5xx_preempt_init(struct msm_gpu *gpu)
}
}
+ spin_lock_init(&a5xx_gpu->preempt_start_lock);
timer_setup(&a5xx_gpu->preempt_timer, a5xx_preempt_timer, 0);
}
diff --git a/drivers/gpu/drm/msm/adreno/a6xx_catalog.c b/drivers/gpu/drm/msm/adreno/a6xx_catalog.c
index 68ba9aed5506..0312b6ee0356 100644
--- a/drivers/gpu/drm/msm/adreno/a6xx_catalog.c
+++ b/drivers/gpu/drm/msm/adreno/a6xx_catalog.c
@@ -129,6 +129,59 @@ static const struct adreno_reglist a615_hwcg[] = {
{},
};
+static const struct adreno_reglist a620_hwcg[] = {
+ {REG_A6XX_RBBM_CLOCK_CNTL_SP0, 0x02222222},
+ {REG_A6XX_RBBM_CLOCK_CNTL2_SP0, 0x02222220},
+ {REG_A6XX_RBBM_CLOCK_DELAY_SP0, 0x00000080},
+ {REG_A6XX_RBBM_CLOCK_HYST_SP0, 0x0000f3cf},
+ {REG_A6XX_RBBM_CLOCK_CNTL_TP0, 0x02222222},
+ {REG_A6XX_RBBM_CLOCK_CNTL2_TP0, 0x22222222},
+ {REG_A6XX_RBBM_CLOCK_CNTL3_TP0, 0x22222222},
+ {REG_A6XX_RBBM_CLOCK_CNTL4_TP0, 0x00022222},
+ {REG_A6XX_RBBM_CLOCK_DELAY_TP0, 0x11111111},
+ {REG_A6XX_RBBM_CLOCK_DELAY2_TP0, 0x11111111},
+ {REG_A6XX_RBBM_CLOCK_DELAY3_TP0, 0x11111111},
+ {REG_A6XX_RBBM_CLOCK_DELAY4_TP0, 0x00011111},
+ {REG_A6XX_RBBM_CLOCK_HYST_TP0, 0x77777777},
+ {REG_A6XX_RBBM_CLOCK_HYST2_TP0, 0x77777777},
+ {REG_A6XX_RBBM_CLOCK_HYST3_TP0, 0x77777777},
+ {REG_A6XX_RBBM_CLOCK_HYST4_TP0, 0x00077777},
+ {REG_A6XX_RBBM_CLOCK_CNTL_RB0, 0x22222222},
+ {REG_A6XX_RBBM_CLOCK_CNTL2_RB0, 0x01002222},
+ {REG_A6XX_RBBM_CLOCK_CNTL_CCU0, 0x00002220},
+ {REG_A6XX_RBBM_CLOCK_HYST_RB_CCU0, 0x00040f00},
+ {REG_A6XX_RBBM_CLOCK_CNTL_RAC, 0x25222022},
+ {REG_A6XX_RBBM_CLOCK_CNTL2_RAC, 0x00005555},
+ {REG_A6XX_RBBM_CLOCK_DELAY_RAC, 0x00000011},
+ {REG_A6XX_RBBM_CLOCK_HYST_RAC, 0x00445044},
+ {REG_A6XX_RBBM_CLOCK_CNTL_TSE_RAS_RBBM, 0x04222222},
+ {REG_A6XX_RBBM_CLOCK_MODE_VFD, 0x00002222},
+ {REG_A6XX_RBBM_CLOCK_MODE_GPC, 0x00222222},
+ {REG_A6XX_RBBM_CLOCK_DELAY_HLSQ_2, 0x00000002},
+ {REG_A6XX_RBBM_CLOCK_MODE_HLSQ, 0x00002222},
+ {REG_A6XX_RBBM_CLOCK_DELAY_TSE_RAS_RBBM, 0x00004000},
+ {REG_A6XX_RBBM_CLOCK_DELAY_VFD, 0x00002222},
+ {REG_A6XX_RBBM_CLOCK_DELAY_GPC, 0x00000200},
+ {REG_A6XX_RBBM_CLOCK_DELAY_HLSQ, 0x00000000},
+ {REG_A6XX_RBBM_CLOCK_HYST_TSE_RAS_RBBM, 0x00000000},
+ {REG_A6XX_RBBM_CLOCK_HYST_VFD, 0x00000000},
+ {REG_A6XX_RBBM_CLOCK_HYST_GPC, 0x04104004},
+ {REG_A6XX_RBBM_CLOCK_HYST_HLSQ, 0x00000000},
+ {REG_A6XX_RBBM_CLOCK_CNTL_TEX_FCHE, 0x00000222},
+ {REG_A6XX_RBBM_CLOCK_DELAY_TEX_FCHE, 0x00000111},
+ {REG_A6XX_RBBM_CLOCK_HYST_TEX_FCHE, 0x00000777},
+ {REG_A6XX_RBBM_CLOCK_CNTL_UCHE, 0x22222222},
+ {REG_A6XX_RBBM_CLOCK_HYST_UCHE, 0x00000004},
+ {REG_A6XX_RBBM_CLOCK_DELAY_UCHE, 0x00000002},
+ {REG_A6XX_RBBM_ISDB_CNT, 0x00000182},
+ {REG_A6XX_RBBM_RAC_THRESHOLD_CNT, 0x00000000},
+ {REG_A6XX_RBBM_SP_HYST_CNT, 0x00000000},
+ {REG_A6XX_RBBM_CLOCK_CNTL_GMU_GX, 0x00000222},
+ {REG_A6XX_RBBM_CLOCK_DELAY_GMU_GX, 0x00000111},
+ {REG_A6XX_RBBM_CLOCK_HYST_GMU_GX, 0x00000555},
+ {},
+};
+
static const struct adreno_reglist a630_hwcg[] = {
{REG_A6XX_RBBM_CLOCK_CNTL_SP0, 0x22222222},
{REG_A6XX_RBBM_CLOCK_CNTL_SP1, 0x22222222},
@@ -448,7 +501,6 @@ static const struct adreno_reglist a690_hwcg[] = {
{REG_A6XX_RBBM_CLOCK_CNTL_GMU_GX, 0x00000222},
{REG_A6XX_RBBM_CLOCK_DELAY_GMU_GX, 0x00000111},
{REG_A6XX_RBBM_CLOCK_HYST_GMU_GX, 0x00000555},
- {REG_A6XX_GPU_GMU_AO_GMU_CGC_MODE_CNTL, 0x20200},
{REG_A6XX_GPU_GMU_AO_GMU_CGC_DELAY_CNTL, 0x10111},
{REG_A6XX_GPU_GMU_AO_GMU_CGC_HYST_CNTL, 0x5555},
{}
@@ -491,7 +543,6 @@ static const u32 a630_protect_regs[] = {
};
DECLARE_ADRENO_PROTECT(a630_protect, 32);
-/* These are for a620 and a650 */
static const u32 a650_protect_regs[] = {
A6XX_PROTECT_RDONLY(0x00000, 0x04ff),
A6XX_PROTECT_RDONLY(0x00501, 0x0005),
@@ -636,6 +687,8 @@ static const struct adreno_info a6xx_gpus[] = {
.a6xx = &(const struct a6xx_info) {
.hwcg = a612_hwcg,
.protect = &a630_protect,
+ .gmu_cgc_mode = 0x00020202,
+ .prim_fifo_threshold = 0x00080000,
},
/*
* There are (at least) three SoCs implementing A610: SM6125
@@ -652,6 +705,35 @@ static const struct adreno_info a6xx_gpus[] = {
{ 127, 4 },
),
}, {
+ .chip_ids = ADRENO_CHIP_IDS(0x06010500),
+ .family = ADRENO_6XX_GEN1,
+ .revn = 615,
+ .fw = {
+ [ADRENO_FW_SQE] = "a630_sqe.fw",
+ [ADRENO_FW_GMU] = "a630_gmu.bin",
+ },
+ .gmem = SZ_512K,
+ .inactive_period = DRM_MSM_INACTIVE_PERIOD,
+ .init = a6xx_gpu_init,
+ .zapfw = "a615_zap.mdt",
+ .a6xx = &(const struct a6xx_info) {
+ .hwcg = a615_hwcg,
+ .protect = &a630_protect,
+ .gmu_cgc_mode = 0x00000222,
+ .prim_fifo_threshold = 0x0018000,
+ },
+ .speedbins = ADRENO_SPEEDBINS(
+ /*
+ * The default speed bin (0) has the same values as
+ * speed bin 90 which goes up to 432 MHz.
+ */
+ { 0, 0 },
+ { 90, 0 },
+ { 105, 1 },
+ { 146, 2 },
+ { 163, 3 },
+ ),
+ }, {
.machine = "qcom,sm7150",
.chip_ids = ADRENO_CHIP_IDS(0x06010800),
.family = ADRENO_6XX_GEN1,
@@ -667,6 +749,8 @@ static const struct adreno_info a6xx_gpus[] = {
.a6xx = &(const struct a6xx_info) {
.hwcg = a615_hwcg,
.protect = &a630_protect,
+ .gmu_cgc_mode = 0x00000222,
+ .prim_fifo_threshold = 0x00180000,
},
.speedbins = ADRENO_SPEEDBINS(
{ 0, 0 },
@@ -689,6 +773,8 @@ static const struct adreno_info a6xx_gpus[] = {
.init = a6xx_gpu_init,
.a6xx = &(const struct a6xx_info) {
.protect = &a630_protect,
+ .gmu_cgc_mode = 0x00000222,
+ .prim_fifo_threshold = 0x00180000,
},
.speedbins = ADRENO_SPEEDBINS(
{ 0, 0 },
@@ -711,6 +797,8 @@ static const struct adreno_info a6xx_gpus[] = {
.a6xx = &(const struct a6xx_info) {
.hwcg = a615_hwcg,
.protect = &a630_protect,
+ .gmu_cgc_mode = 0x00000222,
+ .prim_fifo_threshold = 0x00018000,
},
.speedbins = ADRENO_SPEEDBINS(
{ 0, 0 },
@@ -733,6 +821,8 @@ static const struct adreno_info a6xx_gpus[] = {
.a6xx = &(const struct a6xx_info) {
.hwcg = a615_hwcg,
.protect = &a630_protect,
+ .gmu_cgc_mode = 0x00000222,
+ .prim_fifo_threshold = 0x00018000,
},
.speedbins = ADRENO_SPEEDBINS(
{ 0, 0 },
@@ -755,6 +845,8 @@ static const struct adreno_info a6xx_gpus[] = {
.a6xx = &(const struct a6xx_info) {
.hwcg = a615_hwcg,
.protect = &a630_protect,
+ .gmu_cgc_mode = 0x00000222,
+ .prim_fifo_threshold = 0x00018000,
},
.speedbins = ADRENO_SPEEDBINS(
{ 0, 0 },
@@ -764,6 +856,30 @@ static const struct adreno_info a6xx_gpus[] = {
{ 180, 1 },
),
}, {
+ .chip_ids = ADRENO_CHIP_IDS(0x06020100),
+ .family = ADRENO_6XX_GEN3,
+ .fw = {
+ [ADRENO_FW_SQE] = "a650_sqe.fw",
+ [ADRENO_FW_GMU] = "a621_gmu.bin",
+ },
+ .gmem = SZ_512K,
+ .inactive_period = DRM_MSM_INACTIVE_PERIOD,
+ .quirks = ADRENO_QUIRK_HAS_CACHED_COHERENT |
+ ADRENO_QUIRK_HAS_HW_APRIV,
+ .init = a6xx_gpu_init,
+ .zapfw = "a620_zap.mbn",
+ .a6xx = &(const struct a6xx_info) {
+ .hwcg = a620_hwcg,
+ .protect = &a650_protect,
+ .gmu_cgc_mode = 0x00020200,
+ .prim_fifo_threshold = 0x00010000,
+ },
+ .address_space_size = SZ_16G,
+ .speedbins = ADRENO_SPEEDBINS(
+ { 0, 0 },
+ { 137, 1 },
+ ),
+ }, {
.chip_ids = ADRENO_CHIP_IDS(
0x06030001,
0x06030002
@@ -782,6 +898,8 @@ static const struct adreno_info a6xx_gpus[] = {
.a6xx = &(const struct a6xx_info) {
.hwcg = a630_hwcg,
.protect = &a630_protect,
+ .gmu_cgc_mode = 0x00020202,
+ .prim_fifo_threshold = 0x00180000,
},
}, {
.chip_ids = ADRENO_CHIP_IDS(0x06040001),
@@ -799,6 +917,8 @@ static const struct adreno_info a6xx_gpus[] = {
.a6xx = &(const struct a6xx_info) {
.hwcg = a640_hwcg,
.protect = &a630_protect,
+ .gmu_cgc_mode = 0x00020202,
+ .prim_fifo_threshold = 0x00180000,
},
.speedbins = ADRENO_SPEEDBINS(
{ 0, 0 },
@@ -821,6 +941,8 @@ static const struct adreno_info a6xx_gpus[] = {
.a6xx = &(const struct a6xx_info) {
.hwcg = a650_hwcg,
.protect = &a650_protect,
+ .gmu_cgc_mode = 0x00020202,
+ .prim_fifo_threshold = 0x00300200,
},
.address_space_size = SZ_16G,
.speedbins = ADRENO_SPEEDBINS(
@@ -846,6 +968,8 @@ static const struct adreno_info a6xx_gpus[] = {
.a6xx = &(const struct a6xx_info) {
.hwcg = a660_hwcg,
.protect = &a660_protect,
+ .gmu_cgc_mode = 0x00020000,
+ .prim_fifo_threshold = 0x00300200,
},
.address_space_size = SZ_16G,
}, {
@@ -864,11 +988,14 @@ static const struct adreno_info a6xx_gpus[] = {
.a6xx = &(const struct a6xx_info) {
.hwcg = a660_hwcg,
.protect = &a660_protect,
+ .gmu_cgc_mode = 0x00020202,
+ .prim_fifo_threshold = 0x00200200,
},
.address_space_size = SZ_16G,
.speedbins = ADRENO_SPEEDBINS(
{ 0, 0 },
{ 117, 0 },
+ { 129, 4 },
{ 172, 2 }, /* Called speedbin 1 downstream, but let's not break things! */
{ 190, 1 },
),
@@ -888,6 +1015,8 @@ static const struct adreno_info a6xx_gpus[] = {
.a6xx = &(const struct a6xx_info) {
.hwcg = a640_hwcg,
.protect = &a630_protect,
+ .gmu_cgc_mode = 0x00020202,
+ .prim_fifo_threshold = 0x00200200,
},
}, {
.chip_ids = ADRENO_CHIP_IDS(0x06090000),
@@ -905,6 +1034,8 @@ static const struct adreno_info a6xx_gpus[] = {
.a6xx = &(const struct a6xx_info) {
.hwcg = a690_hwcg,
.protect = &a690_protect,
+ .gmu_cgc_mode = 0x00020200,
+ .prim_fifo_threshold = 0x00800200,
},
.address_space_size = SZ_16G,
}
@@ -1165,6 +1296,8 @@ static const struct adreno_info a7xx_gpus[] = {
.a6xx = &(const struct a6xx_info) {
.hwcg = a702_hwcg,
.protect = &a650_protect,
+ .gmu_cgc_mode = 0x00020202,
+ .prim_fifo_threshold = 0x0000c000,
},
.speedbins = ADRENO_SPEEDBINS(
{ 0, 0 },
@@ -1188,6 +1321,7 @@ static const struct adreno_info a7xx_gpus[] = {
.a6xx = &(const struct a6xx_info) {
.hwcg = a730_hwcg,
.protect = &a730_protect,
+ .gmu_cgc_mode = 0x00020000,
},
.address_space_size = SZ_16G,
}, {
@@ -1207,6 +1341,7 @@ static const struct adreno_info a7xx_gpus[] = {
.hwcg = a740_hwcg,
.protect = &a730_protect,
.gmu_chipid = 0x7020100,
+ .gmu_cgc_mode = 0x00020202,
},
.address_space_size = SZ_16G,
}, {
@@ -1225,6 +1360,7 @@ static const struct adreno_info a7xx_gpus[] = {
.hwcg = a740_hwcg,
.protect = &a730_protect,
.gmu_chipid = 0x7050001,
+ .gmu_cgc_mode = 0x00020202,
},
.address_space_size = SZ_256G,
}, {
@@ -1243,6 +1379,7 @@ static const struct adreno_info a7xx_gpus[] = {
.a6xx = &(const struct a6xx_info) {
.protect = &a730_protect,
.gmu_chipid = 0x7090100,
+ .gmu_cgc_mode = 0x00020202,
},
.address_space_size = SZ_16G,
}
diff --git a/drivers/gpu/drm/msm/adreno/a6xx_gmu.c b/drivers/gpu/drm/msm/adreno/a6xx_gmu.c
index cb538a262d1c..37927bdd6fbe 100644
--- a/drivers/gpu/drm/msm/adreno/a6xx_gmu.c
+++ b/drivers/gpu/drm/msm/adreno/a6xx_gmu.c
@@ -423,6 +423,20 @@ static int a6xx_gmu_gfx_rail_on(struct a6xx_gmu *gmu)
return a6xx_gmu_set_oob(gmu, GMU_OOB_BOOT_SLUMBER);
}
+static void a6xx_gemnoc_workaround(struct a6xx_gmu *gmu)
+{
+ struct a6xx_gpu *a6xx_gpu = container_of(gmu, struct a6xx_gpu, gmu);
+ struct adreno_gpu *adreno_gpu = &a6xx_gpu->base;
+
+ /*
+ * GEMNoC can power collapse whilst the GPU is being powered down, resulting
+ * in the power down sequence not being fully executed. That in turn can
+ * prevent CX_GDSC from collapsing. Assert Qactive to avoid this.
+ */
+ if (adreno_is_a621(adreno_gpu) || adreno_is_7c3(adreno_gpu))
+ gmu_write(gmu, REG_A6XX_GMU_AO_AHB_FENCE_CTRL, BIT(0));
+}
+
/* Let the GMU know that we are about to go into slumber */
static int a6xx_gmu_notify_slumber(struct a6xx_gmu *gmu)
{
@@ -456,6 +470,8 @@ static int a6xx_gmu_notify_slumber(struct a6xx_gmu *gmu)
}
out:
+ a6xx_gemnoc_workaround(gmu);
+
/* Put fence into allow mode */
gmu_write(gmu, REG_A6XX_GMU_AO_AHB_FENCE_CTRL, 0);
return ret;
@@ -525,8 +541,7 @@ static void a6xx_gmu_rpmh_init(struct a6xx_gmu *gmu)
if (IS_ERR(pdcptr))
goto err;
- if (adreno_is_a650(adreno_gpu) ||
- adreno_is_a660_family(adreno_gpu) ||
+ if (adreno_is_a650_family(adreno_gpu) ||
adreno_is_a7xx(adreno_gpu))
pdc_in_aop = true;
else if (adreno_is_a618(adreno_gpu) || adreno_is_a640_family(adreno_gpu))
@@ -946,6 +961,8 @@ static void a6xx_gmu_force_off(struct a6xx_gmu *gmu)
/* Force off SPTP in case the GMU is managing it */
a6xx_sptprac_disable(gmu);
+ a6xx_gemnoc_workaround(gmu);
+
/* Make sure there are no outstanding RPMh votes */
a6xx_gmu_rpmh_off(gmu);
diff --git a/drivers/gpu/drm/msm/adreno/a6xx_gpu.c b/drivers/gpu/drm/msm/adreno/a6xx_gpu.c
index bcaec86ac67a..06cab2c6fd66 100644
--- a/drivers/gpu/drm/msm/adreno/a6xx_gpu.c
+++ b/drivers/gpu/drm/msm/adreno/a6xx_gpu.c
@@ -402,7 +402,8 @@ static void a6xx_set_hwcg(struct msm_gpu *gpu, bool state)
struct a6xx_gmu *gmu = &a6xx_gpu->gmu;
const struct adreno_reglist *reg;
unsigned int i;
- u32 val, clock_cntl_on, cgc_mode;
+ u32 cgc_delay, cgc_hyst;
+ u32 val, clock_cntl_on;
if (!(adreno_gpu->info->a6xx->hwcg || adreno_is_a7xx(adreno_gpu)))
return;
@@ -416,16 +417,15 @@ static void a6xx_set_hwcg(struct msm_gpu *gpu, bool state)
else
clock_cntl_on = 0x8aa8aa82;
- if (adreno_is_a7xx(adreno_gpu)) {
- cgc_mode = adreno_is_a740_family(adreno_gpu) ? 0x20222 : 0x20000;
-
- gmu_write(&a6xx_gpu->gmu, REG_A6XX_GPU_GMU_AO_GMU_CGC_MODE_CNTL,
- state ? cgc_mode : 0);
- gmu_write(&a6xx_gpu->gmu, REG_A6XX_GPU_GMU_AO_GMU_CGC_DELAY_CNTL,
- state ? 0x10111 : 0);
- gmu_write(&a6xx_gpu->gmu, REG_A6XX_GPU_GMU_AO_GMU_CGC_HYST_CNTL,
- state ? 0x5555 : 0);
- }
+ cgc_delay = adreno_is_a615_family(adreno_gpu) ? 0x111 : 0x10111;
+ cgc_hyst = adreno_is_a615_family(adreno_gpu) ? 0x555 : 0x5555;
+
+ gmu_write(&a6xx_gpu->gmu, REG_A6XX_GPU_GMU_AO_GMU_CGC_MODE_CNTL,
+ state ? adreno_gpu->info->a6xx->gmu_cgc_mode : 0);
+ gmu_write(&a6xx_gpu->gmu, REG_A6XX_GPU_GMU_AO_GMU_CGC_DELAY_CNTL,
+ state ? cgc_delay : 0);
+ gmu_write(&a6xx_gpu->gmu, REG_A6XX_GPU_GMU_AO_GMU_CGC_HYST_CNTL,
+ state ? cgc_hyst : 0);
if (!adreno_gpu->info->a6xx->hwcg) {
gpu_write(gpu, REG_A7XX_RBBM_CLOCK_CNTL_GLOBAL, 1);
@@ -493,24 +493,17 @@ static void a6xx_set_cp_protect(struct msm_gpu *gpu)
static void a6xx_calc_ubwc_config(struct adreno_gpu *gpu)
{
- /* Unknown, introduced with A650 family, related to UBWC mode/ver 4 */
gpu->ubwc_config.rgb565_predicator = 0;
- /* Unknown, introduced with A650 family */
gpu->ubwc_config.uavflagprd_inv = 0;
- /* Whether the minimum access length is 64 bits */
gpu->ubwc_config.min_acc_len = 0;
- /* Entirely magic, per-GPU-gen value */
- gpu->ubwc_config.ubwc_mode = 0;
- /*
- * The Highest Bank Bit value represents the bit of the highest DDR bank.
- * This should ideally use DRAM type detection.
- */
+ gpu->ubwc_config.ubwc_swizzle = 0x6;
+ gpu->ubwc_config.macrotile_mode = 0;
gpu->ubwc_config.highest_bank_bit = 15;
if (adreno_is_a610(gpu)) {
gpu->ubwc_config.highest_bank_bit = 13;
gpu->ubwc_config.min_acc_len = 1;
- gpu->ubwc_config.ubwc_mode = 1;
+ gpu->ubwc_config.ubwc_swizzle = 0x7;
}
if (adreno_is_a618(gpu))
@@ -523,9 +516,18 @@ static void a6xx_calc_ubwc_config(struct adreno_gpu *gpu)
if (adreno_is_a619_holi(gpu))
gpu->ubwc_config.highest_bank_bit = 13;
+ if (adreno_is_a621(gpu)) {
+ gpu->ubwc_config.highest_bank_bit = 13;
+ gpu->ubwc_config.amsbc = 1;
+ gpu->ubwc_config.uavflagprd_inv = 2;
+ }
+
if (adreno_is_a640_family(gpu))
gpu->ubwc_config.amsbc = 1;
+ if (adreno_is_a680(gpu))
+ gpu->ubwc_config.macrotile_mode = 1;
+
if (adreno_is_a650(gpu) ||
adreno_is_a660(gpu) ||
adreno_is_a690(gpu) ||
@@ -536,6 +538,7 @@ static void a6xx_calc_ubwc_config(struct adreno_gpu *gpu)
gpu->ubwc_config.amsbc = 1;
gpu->ubwc_config.rgb565_predicator = 1;
gpu->ubwc_config.uavflagprd_inv = 2;
+ gpu->ubwc_config.macrotile_mode = 1;
}
if (adreno_is_7c3(gpu)) {
@@ -543,12 +546,12 @@ static void a6xx_calc_ubwc_config(struct adreno_gpu *gpu)
gpu->ubwc_config.amsbc = 1;
gpu->ubwc_config.rgb565_predicator = 1;
gpu->ubwc_config.uavflagprd_inv = 2;
+ gpu->ubwc_config.macrotile_mode = 1;
}
if (adreno_is_a702(gpu)) {
gpu->ubwc_config.highest_bank_bit = 14;
gpu->ubwc_config.min_acc_len = 1;
- gpu->ubwc_config.ubwc_mode = 0;
}
}
@@ -564,21 +567,26 @@ static void a6xx_set_ubwc_config(struct msm_gpu *gpu)
u32 hbb = adreno_gpu->ubwc_config.highest_bank_bit - 13;
u32 hbb_hi = hbb >> 2;
u32 hbb_lo = hbb & 3;
+ u32 ubwc_mode = adreno_gpu->ubwc_config.ubwc_swizzle & 1;
+ u32 level2_swizzling_dis = !(adreno_gpu->ubwc_config.ubwc_swizzle & 2);
gpu_write(gpu, REG_A6XX_RB_NC_MODE_CNTL,
+ level2_swizzling_dis << 12 |
adreno_gpu->ubwc_config.rgb565_predicator << 11 |
hbb_hi << 10 | adreno_gpu->ubwc_config.amsbc << 4 |
adreno_gpu->ubwc_config.min_acc_len << 3 |
- hbb_lo << 1 | adreno_gpu->ubwc_config.ubwc_mode);
+ hbb_lo << 1 | ubwc_mode);
- gpu_write(gpu, REG_A6XX_TPL1_NC_MODE_CNTL, hbb_hi << 4 |
+ gpu_write(gpu, REG_A6XX_TPL1_NC_MODE_CNTL,
+ level2_swizzling_dis << 6 | hbb_hi << 4 |
adreno_gpu->ubwc_config.min_acc_len << 3 |
- hbb_lo << 1 | adreno_gpu->ubwc_config.ubwc_mode);
+ hbb_lo << 1 | ubwc_mode);
- gpu_write(gpu, REG_A6XX_SP_NC_MODE_CNTL, hbb_hi << 10 |
+ gpu_write(gpu, REG_A6XX_SP_NC_MODE_CNTL,
+ level2_swizzling_dis << 12 | hbb_hi << 10 |
adreno_gpu->ubwc_config.uavflagprd_inv << 4 |
adreno_gpu->ubwc_config.min_acc_len << 3 |
- hbb_lo << 1 | adreno_gpu->ubwc_config.ubwc_mode);
+ hbb_lo << 1 | ubwc_mode);
if (adreno_is_a7xx(adreno_gpu))
gpu_write(gpu, REG_A7XX_GRAS_NC_MODE_CNTL,
@@ -586,6 +594,9 @@ static void a6xx_set_ubwc_config(struct msm_gpu *gpu)
gpu_write(gpu, REG_A6XX_UCHE_MODE_CNTL,
adreno_gpu->ubwc_config.min_acc_len << 23 | hbb_lo << 21);
+
+ gpu_write(gpu, REG_A6XX_RBBM_NC_MODE_CNTL,
+ adreno_gpu->ubwc_config.macrotile_mode);
}
static int a6xx_cp_init(struct msm_gpu *gpu)
@@ -976,25 +987,11 @@ static int hw_init(struct msm_gpu *gpu)
} else if (!adreno_is_a7xx(adreno_gpu))
gpu_write(gpu, REG_A6XX_CP_MEM_POOL_SIZE, 128);
- /* Setting the primFifo thresholds default values,
- * and vccCacheSkipDis=1 bit (0x200) for A640 and newer
- */
- if (adreno_is_a702(adreno_gpu))
- gpu_write(gpu, REG_A6XX_PC_DBG_ECO_CNTL, 0x0000c000);
- else if (adreno_is_a690(adreno_gpu))
- gpu_write(gpu, REG_A6XX_PC_DBG_ECO_CNTL, 0x00800200);
- else if (adreno_is_a650(adreno_gpu) || adreno_is_a660(adreno_gpu))
- gpu_write(gpu, REG_A6XX_PC_DBG_ECO_CNTL, 0x00300200);
- else if (adreno_is_a640_family(adreno_gpu) || adreno_is_7c3(adreno_gpu))
- gpu_write(gpu, REG_A6XX_PC_DBG_ECO_CNTL, 0x00200200);
- else if (adreno_is_a650(adreno_gpu) || adreno_is_a660(adreno_gpu))
- gpu_write(gpu, REG_A6XX_PC_DBG_ECO_CNTL, 0x00300200);
- else if (adreno_is_a619(adreno_gpu))
- gpu_write(gpu, REG_A6XX_PC_DBG_ECO_CNTL, 0x00018000);
- else if (adreno_is_a610(adreno_gpu))
- gpu_write(gpu, REG_A6XX_PC_DBG_ECO_CNTL, 0x00080000);
- else if (!adreno_is_a7xx(adreno_gpu))
- gpu_write(gpu, REG_A6XX_PC_DBG_ECO_CNTL, 0x00180000);
+
+ /* Set the default primFifo threshold values */
+ if (adreno_gpu->info->a6xx->prim_fifo_threshold)
+ gpu_write(gpu, REG_A6XX_PC_DBG_ECO_CNTL,
+ adreno_gpu->info->a6xx->prim_fifo_threshold);
/* Set the AHB default slave response to "ERROR" */
gpu_write(gpu, REG_A6XX_CP_AHB_CNTL, 0x1);
diff --git a/drivers/gpu/drm/msm/adreno/a6xx_gpu.h b/drivers/gpu/drm/msm/adreno/a6xx_gpu.h
index e3e5c53ae8af..0fb7febf70e7 100644
--- a/drivers/gpu/drm/msm/adreno/a6xx_gpu.h
+++ b/drivers/gpu/drm/msm/adreno/a6xx_gpu.h
@@ -22,6 +22,8 @@ struct a6xx_info {
const struct adreno_reglist *hwcg;
const struct adreno_protect *protect;
u32 gmu_chipid;
+ u32 gmu_cgc_mode;
+ u32 prim_fifo_threshold;
};
struct a6xx_gpu {
diff --git a/drivers/gpu/drm/msm/adreno/a6xx_gpu_state.c b/drivers/gpu/drm/msm/adreno/a6xx_gpu_state.c
index 789a11416f7a..0fcae53c0b14 100644
--- a/drivers/gpu/drm/msm/adreno/a6xx_gpu_state.c
+++ b/drivers/gpu/drm/msm/adreno/a6xx_gpu_state.c
@@ -388,18 +388,18 @@ static void a7xx_get_debugbus_blocks(struct msm_gpu *gpu,
const u32 *debugbus_blocks, *gbif_debugbus_blocks;
int i;
- if (adreno_is_a730(adreno_gpu)) {
+ if (adreno_gpu->info->family == ADRENO_7XX_GEN1) {
debugbus_blocks = gen7_0_0_debugbus_blocks;
debugbus_blocks_count = ARRAY_SIZE(gen7_0_0_debugbus_blocks);
gbif_debugbus_blocks = a7xx_gbif_debugbus_blocks;
gbif_debugbus_blocks_count = ARRAY_SIZE(a7xx_gbif_debugbus_blocks);
- } else if (adreno_is_a740_family(adreno_gpu)) {
+ } else if (adreno_gpu->info->family == ADRENO_7XX_GEN2) {
debugbus_blocks = gen7_2_0_debugbus_blocks;
debugbus_blocks_count = ARRAY_SIZE(gen7_2_0_debugbus_blocks);
gbif_debugbus_blocks = a7xx_gbif_debugbus_blocks;
gbif_debugbus_blocks_count = ARRAY_SIZE(a7xx_gbif_debugbus_blocks);
} else {
- BUG_ON(!adreno_is_a750(adreno_gpu));
+ BUG_ON(adreno_gpu->info->family != ADRENO_7XX_GEN3);
debugbus_blocks = gen7_9_0_debugbus_blocks;
debugbus_blocks_count = ARRAY_SIZE(gen7_9_0_debugbus_blocks);
gbif_debugbus_blocks = gen7_9_0_gbif_debugbus_blocks;
@@ -509,7 +509,7 @@ static void a6xx_get_debugbus(struct msm_gpu *gpu,
const struct a6xx_debugbus_block *cx_debugbus_blocks;
if (adreno_is_a7xx(adreno_gpu)) {
- BUG_ON(!(adreno_is_a730(adreno_gpu) || adreno_is_a740_family(adreno_gpu)));
+ BUG_ON(adreno_gpu->info->family > ADRENO_7XX_GEN3);
cx_debugbus_blocks = a7xx_cx_debugbus_blocks;
nr_cx_debugbus_blocks = ARRAY_SIZE(a7xx_cx_debugbus_blocks);
} else {
@@ -660,13 +660,16 @@ static void a7xx_get_dbgahb_clusters(struct msm_gpu *gpu,
const struct gen7_sptp_cluster_registers *dbgahb_clusters;
unsigned dbgahb_clusters_size;
- if (adreno_is_a730(adreno_gpu)) {
+ if (adreno_gpu->info->family == ADRENO_7XX_GEN1) {
dbgahb_clusters = gen7_0_0_sptp_clusters;
dbgahb_clusters_size = ARRAY_SIZE(gen7_0_0_sptp_clusters);
- } else {
- BUG_ON(!adreno_is_a740_family(adreno_gpu));
+ } else if (adreno_gpu->info->family == ADRENO_7XX_GEN2) {
dbgahb_clusters = gen7_2_0_sptp_clusters;
dbgahb_clusters_size = ARRAY_SIZE(gen7_2_0_sptp_clusters);
+ } else {
+ BUG_ON(adreno_gpu->info->family != ADRENO_7XX_GEN3);
+ dbgahb_clusters = gen7_9_0_sptp_clusters;
+ dbgahb_clusters_size = ARRAY_SIZE(gen7_9_0_sptp_clusters);
}
a6xx_state->dbgahb_clusters = state_kcalloc(a6xx_state,
@@ -818,14 +821,14 @@ static void a7xx_get_clusters(struct msm_gpu *gpu,
const struct gen7_cluster_registers *clusters;
unsigned clusters_size;
- if (adreno_is_a730(adreno_gpu)) {
+ if (adreno_gpu->info->family == ADRENO_7XX_GEN1) {
clusters = gen7_0_0_clusters;
clusters_size = ARRAY_SIZE(gen7_0_0_clusters);
- } else if (adreno_is_a740_family(adreno_gpu)) {
+ } else if (adreno_gpu->info->family == ADRENO_7XX_GEN2) {
clusters = gen7_2_0_clusters;
clusters_size = ARRAY_SIZE(gen7_2_0_clusters);
} else {
- BUG_ON(!adreno_is_a750(adreno_gpu));
+ BUG_ON(adreno_gpu->info->family != ADRENO_7XX_GEN3);
clusters = gen7_9_0_clusters;
clusters_size = ARRAY_SIZE(gen7_9_0_clusters);
}
@@ -893,7 +896,7 @@ static void a7xx_get_shader_block(struct msm_gpu *gpu,
if (WARN_ON(datasize > A6XX_CD_DATA_SIZE))
return;
- if (adreno_is_a730(adreno_gpu)) {
+ if (adreno_gpu->info->family == ADRENO_7XX_GEN1) {
gpu_rmw(gpu, REG_A7XX_SP_DBG_CNTL, GENMASK(1, 0), 3);
}
@@ -923,7 +926,7 @@ static void a7xx_get_shader_block(struct msm_gpu *gpu,
datasize);
out:
- if (adreno_is_a730(adreno_gpu)) {
+ if (adreno_gpu->info->family == ADRENO_7XX_GEN1) {
gpu_rmw(gpu, REG_A7XX_SP_DBG_CNTL, GENMASK(1, 0), 0);
}
}
@@ -956,14 +959,14 @@ static void a7xx_get_shaders(struct msm_gpu *gpu,
unsigned num_shader_blocks;
int i;
- if (adreno_is_a730(adreno_gpu)) {
+ if (adreno_gpu->info->family == ADRENO_7XX_GEN1) {
shader_blocks = gen7_0_0_shader_blocks;
num_shader_blocks = ARRAY_SIZE(gen7_0_0_shader_blocks);
- } else if (adreno_is_a740_family(adreno_gpu)) {
+ } else if (adreno_gpu->info->family == ADRENO_7XX_GEN2) {
shader_blocks = gen7_2_0_shader_blocks;
num_shader_blocks = ARRAY_SIZE(gen7_2_0_shader_blocks);
} else {
- BUG_ON(!adreno_is_a750(adreno_gpu));
+ BUG_ON(adreno_gpu->info->family != ADRENO_7XX_GEN3);
shader_blocks = gen7_9_0_shader_blocks;
num_shader_blocks = ARRAY_SIZE(gen7_9_0_shader_blocks);
}
@@ -1348,14 +1351,14 @@ static void a7xx_get_registers(struct msm_gpu *gpu,
const u32 *pre_crashdumper_regs;
const struct gen7_reg_list *reglist;
- if (adreno_is_a730(adreno_gpu)) {
+ if (adreno_gpu->info->family == ADRENO_7XX_GEN1) {
reglist = gen7_0_0_reg_list;
pre_crashdumper_regs = gen7_0_0_pre_crashdumper_gpu_registers;
- } else if (adreno_is_a740_family(adreno_gpu)) {
+ } else if (adreno_gpu->info->family == ADRENO_7XX_GEN2) {
reglist = gen7_2_0_reg_list;
pre_crashdumper_regs = gen7_0_0_pre_crashdumper_gpu_registers;
} else {
- BUG_ON(!adreno_is_a750(adreno_gpu));
+ BUG_ON(adreno_gpu->info->family != ADRENO_7XX_GEN3);
reglist = gen7_9_0_reg_list;
pre_crashdumper_regs = gen7_9_0_pre_crashdumper_gpu_registers;
}
@@ -1405,8 +1408,7 @@ static void a7xx_get_post_crashdumper_registers(struct msm_gpu *gpu,
struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
const u32 *regs;
- BUG_ON(!(adreno_is_a730(adreno_gpu) || adreno_is_a740_family(adreno_gpu) ||
- adreno_is_a750(adreno_gpu)));
+ BUG_ON(adreno_gpu->info->family > ADRENO_7XX_GEN3);
regs = gen7_0_0_post_crashdumper_registers;
a7xx_get_ahb_gpu_registers(gpu,
@@ -1514,11 +1516,11 @@ static void a7xx_get_indexed_registers(struct msm_gpu *gpu,
const struct a6xx_indexed_registers *indexed_regs;
int i, indexed_count, mempool_count;
- if (adreno_is_a730(adreno_gpu) || adreno_is_a740_family(adreno_gpu)) {
+ if (adreno_gpu->info->family <= ADRENO_7XX_GEN2) {
indexed_regs = a7xx_indexed_reglist;
indexed_count = ARRAY_SIZE(a7xx_indexed_reglist);
} else {
- BUG_ON(!adreno_is_a750(adreno_gpu));
+ BUG_ON(adreno_gpu->info->family != ADRENO_7XX_GEN3);
indexed_regs = gen7_9_0_cp_indexed_reg_list;
indexed_count = ARRAY_SIZE(gen7_9_0_cp_indexed_reg_list);
}
diff --git a/drivers/gpu/drm/msm/adreno/adreno_gen7_9_0_snapshot.h b/drivers/gpu/drm/msm/adreno/adreno_gen7_9_0_snapshot.h
index 260d66eccfec..9a327d543f27 100644
--- a/drivers/gpu/drm/msm/adreno/adreno_gen7_9_0_snapshot.h
+++ b/drivers/gpu/drm/msm/adreno/adreno_gen7_9_0_snapshot.h
@@ -1303,7 +1303,7 @@ static struct a6xx_indexed_registers gen7_9_0_cp_indexed_reg_list[] = {
REG_A6XX_CP_ROQ_DBG_DATA, 0x00800},
{ "CP_UCODE_DBG_DATA", REG_A6XX_CP_SQE_UCODE_DBG_ADDR,
REG_A6XX_CP_SQE_UCODE_DBG_DATA, 0x08000},
- { "CP_BV_SQE_STAT_ADDR", REG_A7XX_CP_BV_DRAW_STATE_ADDR,
+ { "CP_BV_DRAW_STATE_ADDR", REG_A7XX_CP_BV_DRAW_STATE_ADDR,
REG_A7XX_CP_BV_DRAW_STATE_DATA, 0x00200},
{ "CP_BV_ROQ_DBG_ADDR", REG_A7XX_CP_BV_ROQ_DBG_ADDR,
REG_A7XX_CP_BV_ROQ_DBG_DATA, 0x00800},
diff --git a/drivers/gpu/drm/msm/adreno/adreno_gpu.c b/drivers/gpu/drm/msm/adreno/adreno_gpu.c
index ecc3fc5cec22..465a4cd14a43 100644
--- a/drivers/gpu/drm/msm/adreno/adreno_gpu.c
+++ b/drivers/gpu/drm/msm/adreno/adreno_gpu.c
@@ -379,6 +379,12 @@ int adreno_get_param(struct msm_gpu *gpu, struct msm_file_private *ctx,
case MSM_PARAM_RAYTRACING:
*value = adreno_gpu->has_ray_tracing;
return 0;
+ case MSM_PARAM_UBWC_SWIZZLE:
+ *value = adreno_gpu->ubwc_config.ubwc_swizzle;
+ return 0;
+ case MSM_PARAM_MACROTILE_MODE:
+ *value = adreno_gpu->ubwc_config.macrotile_mode;
+ return 0;
default:
DBG("%s: invalid param: %u", gpu->name, param);
return -EINVAL;
@@ -478,7 +484,7 @@ adreno_request_fw(struct adreno_gpu *adreno_gpu, const char *fwname)
ret = request_firmware_direct(&fw, fwname, drm->dev);
if (!ret) {
DRM_DEV_INFO(drm->dev, "loaded %s from legacy location\n",
- newname);
+ fwname);
adreno_gpu->fwloc = FW_LOCATION_LEGACY;
goto out;
} else if (adreno_gpu->fwloc != FW_LOCATION_UNKNOWN) {
@@ -688,11 +694,9 @@ int adreno_gpu_state_get(struct msm_gpu *gpu, struct msm_gpu_state *state)
size = j + 1;
if (size) {
- state->ring[i].data = kvmalloc(size << 2, GFP_KERNEL);
- if (state->ring[i].data) {
- memcpy(state->ring[i].data, gpu->rb[i]->start, size << 2);
+ state->ring[i].data = kvmemdup(gpu->rb[i]->start, size << 2, GFP_KERNEL);
+ if (state->ring[i].data)
state->ring[i].data_size = size << 2;
- }
}
}
@@ -1083,6 +1087,7 @@ int adreno_gpu_init(struct drm_device *drm, struct platform_device *pdev,
adreno_gpu->chip_id = config->chip_id;
gpu->allow_relocs = config->info->family < ADRENO_6XX_GEN1;
+ gpu->pdev = pdev;
/* Only handle the core clock when GMU is not in use (or is absent). */
if (adreno_has_gmu_wrapper(adreno_gpu) ||
diff --git a/drivers/gpu/drm/msm/adreno/adreno_gpu.h b/drivers/gpu/drm/msm/adreno/adreno_gpu.h
index 1ab523a163a0..58d7e7915c57 100644
--- a/drivers/gpu/drm/msm/adreno/adreno_gpu.h
+++ b/drivers/gpu/drm/msm/adreno/adreno_gpu.h
@@ -191,12 +191,42 @@ struct adreno_gpu {
const struct firmware *fw[ADRENO_FW_MAX];
struct {
+ /**
+ * @rgb565_predicator: Unknown, introduced with A650 family,
+ * related to UBWC mode/ver 4
+ */
u32 rgb565_predicator;
+ /** @uavflagprd_inv: Unknown, introduced with A650 family */
u32 uavflagprd_inv;
+ /** @min_acc_len: Whether the minimum access length is 64 bits */
u32 min_acc_len;
- u32 ubwc_mode;
+ /**
+ * @ubwc_swizzle: Whether to enable level 1, 2 & 3 bank swizzling.
+ *
+ * UBWC 1.0 always enables all three levels.
+ * UBWC 2.0 removes level 1 bank swizzling, leaving levels 2 & 3.
+ * UBWC 4.0 adds the optional ability to disable levels 2 & 3.
+ *
+ * This is a bitmask where BIT(0) enables level 1, BIT(1)
+ * controls level 2, and BIT(2) enables level 3.
+ */
+ u32 ubwc_swizzle;
+ /**
+ * @highest_bank_bit: Highest Bank Bit
+ *
+ * The Highest Bank Bit value represents the bit of the highest
+ * DDR bank. This should ideally use DRAM type detection.
+ */
u32 highest_bank_bit;
u32 amsbc;
+ /**
+ * @macrotile_mode: Macrotile Mode
+ *
+ * Whether to use 4-channel macrotiling mode or the newer
+ * 8-channel macrotiling mode introduced in UBWC 3.1. 0 is
+ * 4-channel and 1 is 8-channel.
+ */
+ u32 macrotile_mode;
} ubwc_config;
/*
@@ -294,6 +324,12 @@ static inline bool adreno_is_a306(const struct adreno_gpu *gpu)
return adreno_is_revn(gpu, 307);
}
+static inline bool adreno_is_a306a(const struct adreno_gpu *gpu)
+{
+ /* a306a (marketing name is a308) */
+ return adreno_is_revn(gpu, 308);
+}
+
static inline bool adreno_is_a320(const struct adreno_gpu *gpu)
{
return adreno_is_revn(gpu, 320);
@@ -384,6 +420,11 @@ static inline int adreno_is_a619_holi(const struct adreno_gpu *gpu)
return adreno_is_a619(gpu) && adreno_has_gmu_wrapper(gpu);
}
+static inline int adreno_is_a621(const struct adreno_gpu *gpu)
+{
+ return gpu->info->chip_ids[0] == 0x06020100;
+}
+
static inline int adreno_is_a630(const struct adreno_gpu *gpu)
{
return adreno_is_revn(gpu, 630);
@@ -433,7 +474,13 @@ static inline int adreno_is_a610_family(const struct adreno_gpu *gpu)
return adreno_is_a610(gpu) || adreno_is_a702(gpu);
}
-/* check for a615, a616, a618, a619 or any a630 derivatives */
+/* TODO: 615/616 */
+static inline int adreno_is_a615_family(const struct adreno_gpu *gpu)
+{
+ return adreno_is_a618(gpu) ||
+ adreno_is_a619(gpu);
+}
+
static inline int adreno_is_a630_family(const struct adreno_gpu *gpu)
{
if (WARN_ON_ONCE(!gpu->info))
diff --git a/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_5_0_sm8150.h b/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_5_0_sm8150.h
index 145f3d5953a3..6ccfde82fecd 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_5_0_sm8150.h
+++ b/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_5_0_sm8150.h
@@ -33,6 +33,7 @@ static const struct dpu_mdp_cfg sm8150_mdp = {
[DPU_CLK_CTRL_DMA1] = { .reg_off = 0x2b4, .bit_off = 8 },
[DPU_CLK_CTRL_DMA2] = { .reg_off = 0x2bc, .bit_off = 8 },
[DPU_CLK_CTRL_DMA3] = { .reg_off = 0x2c4, .bit_off = 8 },
+ [DPU_CLK_CTRL_WB2] = { .reg_off = 0x2bc, .bit_off = 16 },
},
};
@@ -290,6 +291,21 @@ static const struct dpu_dsc_cfg sm8150_dsc[] = {
},
};
+static const struct dpu_wb_cfg sm8150_wb[] = {
+ {
+ .name = "wb_2", .id = WB_2,
+ .base = 0x65000, .len = 0x2c8,
+ .features = WB_SDM845_MASK,
+ .format_list = wb2_formats_rgb,
+ .num_formats = ARRAY_SIZE(wb2_formats_rgb),
+ .clk_ctrl = DPU_CLK_CTRL_WB2,
+ .xin_id = 6,
+ .vbif_idx = VBIF_RT,
+ .maxlinewidth = 4096,
+ .intr_wb_done = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 4),
+ },
+};
+
static const struct dpu_intf_cfg sm8150_intf[] = {
{
.name = "intf_0", .id = INTF_0,
@@ -384,6 +400,8 @@ const struct dpu_mdss_cfg dpu_sm8150_cfg = {
.pingpong = sm8150_pp,
.merge_3d_count = ARRAY_SIZE(sm8150_merge_3d),
.merge_3d = sm8150_merge_3d,
+ .wb_count = ARRAY_SIZE(sm8150_wb),
+ .wb = sm8150_wb,
.intf_count = ARRAY_SIZE(sm8150_intf),
.intf = sm8150_intf,
.vbif_count = ARRAY_SIZE(sdm845_vbif),
diff --git a/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_5_1_sc8180x.h b/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_5_1_sc8180x.h
index 9e3bec8bc121..bab19ddd1d4f 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_5_1_sc8180x.h
+++ b/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_5_1_sc8180x.h
@@ -33,6 +33,7 @@ static const struct dpu_mdp_cfg sc8180x_mdp = {
[DPU_CLK_CTRL_DMA1] = { .reg_off = 0x2b4, .bit_off = 8 },
[DPU_CLK_CTRL_DMA2] = { .reg_off = 0x2bc, .bit_off = 8 },
[DPU_CLK_CTRL_DMA3] = { .reg_off = 0x2c4, .bit_off = 8 },
+ [DPU_CLK_CTRL_WB2] = { .reg_off = 0x2bc, .bit_off = 16 },
},
};
@@ -297,6 +298,21 @@ static const struct dpu_dsc_cfg sc8180x_dsc[] = {
},
};
+static const struct dpu_wb_cfg sc8180x_wb[] = {
+ {
+ .name = "wb_2", .id = WB_2,
+ .base = 0x65000, .len = 0x2c8,
+ .features = WB_SDM845_MASK,
+ .format_list = wb2_formats_rgb,
+ .num_formats = ARRAY_SIZE(wb2_formats_rgb),
+ .clk_ctrl = DPU_CLK_CTRL_WB2,
+ .xin_id = 6,
+ .vbif_idx = VBIF_RT,
+ .maxlinewidth = 4096,
+ .intr_wb_done = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 4),
+ },
+};
+
static const struct dpu_intf_cfg sc8180x_intf[] = {
{
.name = "intf_0", .id = INTF_0,
@@ -410,6 +426,8 @@ const struct dpu_mdss_cfg dpu_sc8180x_cfg = {
.pingpong = sc8180x_pp,
.merge_3d_count = ARRAY_SIZE(sc8180x_merge_3d),
.merge_3d = sc8180x_merge_3d,
+ .wb_count = ARRAY_SIZE(sc8180x_wb),
+ .wb = sc8180x_wb,
.intf_count = ARRAY_SIZE(sc8180x_intf),
.intf = sc8180x_intf,
.vbif_count = ARRAY_SIZE(sdm845_vbif),
diff --git a/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_5_4_sm6125.h b/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_5_4_sm6125.h
index 76b2ec0d2489..d039b96beb97 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_5_4_sm6125.h
+++ b/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_5_4_sm6125.h
@@ -27,6 +27,7 @@ static const struct dpu_mdp_cfg sm6125_mdp = {
[DPU_CLK_CTRL_VIG0] = { .reg_off = 0x2ac, .bit_off = 0 },
[DPU_CLK_CTRL_DMA0] = { .reg_off = 0x2ac, .bit_off = 8 },
[DPU_CLK_CTRL_DMA1] = { .reg_off = 0x2b4, .bit_off = 8 },
+ [DPU_CLK_CTRL_WB2] = { .reg_off = 0x2bc, .bit_off = 16 },
},
};
@@ -139,6 +140,21 @@ static const struct dpu_pingpong_cfg sm6125_pp[] = {
},
};
+static const struct dpu_wb_cfg sm6125_wb[] = {
+ {
+ .name = "wb_2", .id = WB_2,
+ .base = 0x65000, .len = 0x2c8,
+ .features = WB_SDM845_MASK,
+ .format_list = wb2_formats_rgb,
+ .num_formats = ARRAY_SIZE(wb2_formats_rgb),
+ .clk_ctrl = DPU_CLK_CTRL_WB2,
+ .xin_id = 6,
+ .vbif_idx = VBIF_RT,
+ .maxlinewidth = 2160,
+ .intr_wb_done = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 4),
+ },
+};
+
static const struct dpu_intf_cfg sm6125_intf[] = {
{
.name = "intf_0", .id = INTF_0,
@@ -210,6 +226,8 @@ const struct dpu_mdss_cfg dpu_sm6125_cfg = {
.dspp = sm6125_dspp,
.pingpong_count = ARRAY_SIZE(sm6125_pp),
.pingpong = sm6125_pp,
+ .wb_count = ARRAY_SIZE(sm6125_wb),
+ .wb = sm6125_wb,
.intf_count = ARRAY_SIZE(sm6125_intf),
.intf = sm6125_intf,
.vbif_count = ARRAY_SIZE(sdm845_vbif),
diff --git a/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_6_4_sm6350.h b/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_6_4_sm6350.h
index e17a30be7525..0502cee2f116 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_6_4_sm6350.h
+++ b/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_6_4_sm6350.h
@@ -26,6 +26,7 @@ static const struct dpu_mdp_cfg sm6350_mdp = {
[DPU_CLK_CTRL_DMA0] = { .reg_off = 0x2ac, .bit_off = 8 },
[DPU_CLK_CTRL_DMA1] = { .reg_off = 0x2b4, .bit_off = 8 },
[DPU_CLK_CTRL_DMA2] = { .reg_off = 0x2c4, .bit_off = 8 },
+ [DPU_CLK_CTRL_WB2] = { .reg_off = 0x2bc, .bit_off = 16 },
[DPU_CLK_CTRL_REG_DMA] = { .reg_off = 0x2bc, .bit_off = 20 },
},
};
@@ -145,6 +146,21 @@ static const struct dpu_dsc_cfg sm6350_dsc[] = {
},
};
+static const struct dpu_wb_cfg sm6350_wb[] = {
+ {
+ .name = "wb_2", .id = WB_2,
+ .base = 0x65000, .len = 0x2c8,
+ .features = WB_SM8250_MASK,
+ .format_list = wb2_formats_rgb,
+ .num_formats = ARRAY_SIZE(wb2_formats_rgb),
+ .clk_ctrl = DPU_CLK_CTRL_WB2,
+ .xin_id = 6,
+ .vbif_idx = VBIF_RT,
+ .maxlinewidth = 1920,
+ .intr_wb_done = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 4),
+ },
+};
+
static const struct dpu_intf_cfg sm6350_intf[] = {
{
.name = "intf_0", .id = INTF_0,
@@ -218,6 +234,8 @@ const struct dpu_mdss_cfg dpu_sm6350_cfg = {
.dsc = sm6350_dsc,
.pingpong_count = ARRAY_SIZE(sm6350_pp),
.pingpong = sm6350_pp,
+ .wb_count = ARRAY_SIZE(sm6350_wb),
+ .wb = sm6350_wb,
.intf_count = ARRAY_SIZE(sm6350_intf),
.intf = sm6350_intf,
.vbif_count = ARRAY_SIZE(sdm845_vbif),
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_crtc.h b/drivers/gpu/drm/msm/disp/dpu1/dpu_crtc.h
index b26d5fe40c72..febc3e764a63 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_crtc.h
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_crtc.h
@@ -231,7 +231,7 @@ struct dpu_crtc_state {
container_of(x, struct dpu_crtc_state, base)
/**
- * dpu_crtc_frame_pending - retun the number of pending frames
+ * dpu_crtc_frame_pending - return the number of pending frames
* @crtc: Pointer to drm crtc object
*/
static inline int dpu_crtc_frame_pending(struct drm_crtc *crtc)
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_catalog.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_catalog.c
index 648c8d0a4c36..dcb4fd85e73b 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_catalog.c
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_catalog.c
@@ -96,14 +96,16 @@
#define INTF_SC7280_MASK (INTF_SC7180_MASK)
-#define WB_SM8250_MASK (BIT(DPU_WB_LINE_MODE) | \
+#define WB_SDM845_MASK (BIT(DPU_WB_LINE_MODE) | \
BIT(DPU_WB_UBWC) | \
BIT(DPU_WB_YUV_CONFIG) | \
BIT(DPU_WB_PIPE_ALPHA) | \
BIT(DPU_WB_XY_ROI_OFFSET) | \
BIT(DPU_WB_QOS) | \
BIT(DPU_WB_QOS_8LVL) | \
- BIT(DPU_WB_CDP) | \
+ BIT(DPU_WB_CDP))
+
+#define WB_SM8250_MASK (WB_SDM845_MASK | \
BIT(DPU_WB_INPUT_CTRL))
#define DEFAULT_PIXEL_RAM_SIZE (50 * 1024)
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_top.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_top.c
index 6e2ac50b94a4..0f40eea7f5e2 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_top.c
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_top.c
@@ -2,6 +2,8 @@
/* Copyright (c) 2015-2018, The Linux Foundation. All rights reserved.
*/
+#include <linux/bitfield.h>
+
#include <drm/drm_managed.h>
#include "dpu_hwio.h"
@@ -231,8 +233,38 @@ static void dpu_hw_intf_audio_select(struct dpu_hw_mdp *mdp)
DPU_REG_WRITE(c, HDMI_DP_CORE_SELECT, 0x1);
}
+static void dpu_hw_dp_phy_intf_sel(struct dpu_hw_mdp *mdp,
+ enum dpu_dp_phy_sel phys[2])
+{
+ struct dpu_hw_blk_reg_map *c = &mdp->hw;
+ unsigned int intf;
+ u32 sel = 0;
+
+ sel |= FIELD_PREP(MDP_DP_PHY_INTF_SEL_INTF0, phys[0]);
+ sel |= FIELD_PREP(MDP_DP_PHY_INTF_SEL_INTF1, phys[1]);
+
+ for (intf = 0; intf < 2; intf++) {
+ switch (phys[intf]) {
+ case DPU_DP_PHY_0:
+ sel |= FIELD_PREP(MDP_DP_PHY_INTF_SEL_PHY0, intf + 1);
+ break;
+ case DPU_DP_PHY_1:
+ sel |= FIELD_PREP(MDP_DP_PHY_INTF_SEL_PHY1, intf + 1);
+ break;
+ case DPU_DP_PHY_2:
+ sel |= FIELD_PREP(MDP_DP_PHY_INTF_SEL_PHY2, intf + 1);
+ break;
+ default:
+ /* ignore */
+ break;
+ }
+ }
+
+ DPU_REG_WRITE(c, MDP_DP_PHY_INTF_SEL, sel);
+}
+
static void _setup_mdp_ops(struct dpu_hw_mdp_ops *ops,
- unsigned long cap)
+ unsigned long cap, const struct dpu_mdss_version *mdss_rev)
{
ops->setup_split_pipe = dpu_hw_setup_split_pipe;
ops->setup_clk_force_ctrl = dpu_hw_setup_clk_force_ctrl;
@@ -245,6 +277,9 @@ static void _setup_mdp_ops(struct dpu_hw_mdp_ops *ops,
ops->get_safe_status = dpu_hw_get_safe_status;
+ if (mdss_rev->core_major_ver >= 5)
+ ops->dp_phy_intf_sel = dpu_hw_dp_phy_intf_sel;
+
if (cap & BIT(DPU_MDP_AUDIO_SELECT))
ops->intf_audio_select = dpu_hw_intf_audio_select;
}
@@ -252,7 +287,7 @@ static void _setup_mdp_ops(struct dpu_hw_mdp_ops *ops,
struct dpu_hw_mdp *dpu_hw_mdptop_init(struct drm_device *dev,
const struct dpu_mdp_cfg *cfg,
void __iomem *addr,
- const struct dpu_mdss_cfg *m)
+ const struct dpu_mdss_version *mdss_rev)
{
struct dpu_hw_mdp *mdp;
@@ -270,7 +305,7 @@ struct dpu_hw_mdp *dpu_hw_mdptop_init(struct drm_device *dev,
* Assign ops
*/
mdp->caps = cfg;
- _setup_mdp_ops(&mdp->ops, mdp->caps->features);
+ _setup_mdp_ops(&mdp->ops, mdp->caps->features, mdss_rev);
return mdp;
}
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_top.h b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_top.h
index 5c9a7ede991e..f1ab9fd106e5 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_top.h
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_top.h
@@ -67,6 +67,13 @@ struct dpu_vsync_source_cfg {
enum dpu_vsync_source vsync_source;
};
+enum dpu_dp_phy_sel {
+ DPU_DP_PHY_NONE,
+ DPU_DP_PHY_0,
+ DPU_DP_PHY_1,
+ DPU_DP_PHY_2,
+};
+
/**
* struct dpu_hw_mdp_ops - interface to the MDP TOP Hw driver functions
* Assumption is these functions will be called after clocks are enabled.
@@ -126,6 +133,13 @@ struct dpu_hw_mdp_ops {
struct dpu_danger_safe_status *status);
/**
+ * dp_phy_intf_sel - configure intf to phy mapping
+ * @mdp: mdp top context driver
+ * @phys: list of phys the DP interfaces should be connected to. 0 disables the INTF.
+ */
+ void (*dp_phy_intf_sel)(struct dpu_hw_mdp *mdp, enum dpu_dp_phy_sel phys[2]);
+
+ /**
* intf_audio_select - select the external interface for audio
* @mdp: mdp top context driver
*/
@@ -148,12 +162,12 @@ struct dpu_hw_mdp {
* @dev: Corresponding device for devres management
* @cfg: MDP TOP configuration from catalog
* @addr: Mapped register io address of MDP
- * @m: Pointer to mdss catalog data
+ * @mdss_rev: dpu core's major and minor versions
*/
struct dpu_hw_mdp *dpu_hw_mdptop_init(struct drm_device *dev,
const struct dpu_mdp_cfg *cfg,
void __iomem *addr,
- const struct dpu_mdss_cfg *m);
+ const struct dpu_mdss_version *mdss_rev);
void dpu_hw_mdp_destroy(struct dpu_hw_mdp *mdp);
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hwio.h b/drivers/gpu/drm/msm/disp/dpu1/dpu_hwio.h
index 5acd5683d25a..054fe097ebf8 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_hwio.h
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hwio.h
@@ -60,6 +60,13 @@
#define MDP_WD_TIMER_4_LOAD_VALUE 0x448
#define DCE_SEL 0x450
+#define MDP_DP_PHY_INTF_SEL 0x460
+#define MDP_DP_PHY_INTF_SEL_INTF0 GENMASK(2, 0)
+#define MDP_DP_PHY_INTF_SEL_INTF1 GENMASK(5, 3)
+#define MDP_DP_PHY_INTF_SEL_PHY0 GENMASK(8, 6)
+#define MDP_DP_PHY_INTF_SEL_PHY1 GENMASK(11, 9)
+#define MDP_DP_PHY_INTF_SEL_PHY2 GENMASK(14, 12)
+
#define MDP_PERIPH_TOP0 MDP_WD_TIMER_0_CTL
#define MDP_PERIPH_TOP0_END CLK_CTRL3
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_kms.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_kms.c
index d1e2143110f2..9bcae53c4f45 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_kms.c
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_kms.c
@@ -1146,7 +1146,7 @@ static int dpu_kms_hw_init(struct msm_kms *kms)
dpu_kms->hw_mdp = dpu_hw_mdptop_init(dev,
dpu_kms->catalog->mdp,
dpu_kms->mmio,
- dpu_kms->catalog);
+ dpu_kms->catalog->mdss_ver);
if (IS_ERR(dpu_kms->hw_mdp)) {
rc = PTR_ERR(dpu_kms->hw_mdp);
DPU_ERROR("failed to get hw_mdp: %d\n", rc);
@@ -1181,6 +1181,16 @@ static int dpu_kms_hw_init(struct msm_kms *kms)
goto err_pm_put;
}
+ /*
+ * We need to program DP <-> PHY relationship only for SC8180X since it
+ * has fewer DP controllers than DP PHYs.
+ * If any other platform requires the same kind of programming, or if
+ * the INTF <->DP relationship isn't static anymore, this needs to be
+ * configured through the DT.
+ */
+ if (of_device_is_compatible(dpu_kms->pdev->dev.of_node, "qcom,sc8180x-dpu"))
+ dpu_kms->hw_mdp->ops.dp_phy_intf_sel(dpu_kms->hw_mdp, (unsigned int[]){ 1, 2, });
+
dpu_kms->hw_intr = dpu_hw_intr_init(dev, dpu_kms->mmio, dpu_kms->catalog);
if (IS_ERR(dpu_kms->hw_intr)) {
rc = PTR_ERR(dpu_kms->hw_intr);
diff --git a/drivers/gpu/drm/msm/disp/mdp5/mdp5_smp.c b/drivers/gpu/drm/msm/disp/mdp5/mdp5_smp.c
index 3a7f7edda96b..500b7dc895d0 100644
--- a/drivers/gpu/drm/msm/disp/mdp5/mdp5_smp.c
+++ b/drivers/gpu/drm/msm/disp/mdp5/mdp5_smp.c
@@ -351,7 +351,7 @@ void mdp5_smp_dump(struct mdp5_smp *smp, struct drm_printer *p,
drm_printf(p, "%s:%d\t%d\t%s\n",
pipe2name(pipe), j, inuse,
- plane ? plane->name : NULL);
+ plane ? plane->name : "(null)");
total += inuse;
}
diff --git a/drivers/gpu/drm/msm/dp/dp_display.c b/drivers/gpu/drm/msm/dp/dp_display.c
index 9622e58dce3e..e1228fb093ee 100644
--- a/drivers/gpu/drm/msm/dp/dp_display.c
+++ b/drivers/gpu/drm/msm/dp/dp_display.c
@@ -119,7 +119,7 @@ struct msm_dp_desc {
};
static const struct msm_dp_desc sc7180_dp_descs[] = {
- { .io_start = 0x0ae90000, .id = MSM_DP_CONTROLLER_0 },
+ { .io_start = 0x0ae90000, .id = MSM_DP_CONTROLLER_0, .wide_bus_supported = true },
{}
};
@@ -130,9 +130,9 @@ static const struct msm_dp_desc sc7280_dp_descs[] = {
};
static const struct msm_dp_desc sc8180x_dp_descs[] = {
- { .io_start = 0x0ae90000, .id = MSM_DP_CONTROLLER_0 },
- { .io_start = 0x0ae98000, .id = MSM_DP_CONTROLLER_1 },
- { .io_start = 0x0ae9a000, .id = MSM_DP_CONTROLLER_2 },
+ { .io_start = 0x0ae90000, .id = MSM_DP_CONTROLLER_0, .wide_bus_supported = true },
+ { .io_start = 0x0ae98000, .id = MSM_DP_CONTROLLER_1, .wide_bus_supported = true },
+ { .io_start = 0x0ae9a000, .id = MSM_DP_CONTROLLER_2, .wide_bus_supported = true },
{}
};
@@ -149,7 +149,7 @@ static const struct msm_dp_desc sc8280xp_dp_descs[] = {
};
static const struct msm_dp_desc sm8650_dp_descs[] = {
- { .io_start = 0x0af54000, .id = MSM_DP_CONTROLLER_0 },
+ { .io_start = 0x0af54000, .id = MSM_DP_CONTROLLER_0, .wide_bus_supported = true },
{}
};
diff --git a/drivers/gpu/drm/msm/dsi/phy/dsi_phy_7nm.c b/drivers/gpu/drm/msm/dsi/phy/dsi_phy_7nm.c
index 3b59137ca674..031446c87dae 100644
--- a/drivers/gpu/drm/msm/dsi/phy/dsi_phy_7nm.c
+++ b/drivers/gpu/drm/msm/dsi/phy/dsi_phy_7nm.c
@@ -135,7 +135,7 @@ static void dsi_pll_calc_dec_frac(struct dsi_pll_7nm *pll, struct dsi_pll_config
config->pll_clock_inverters = 0x00;
else
config->pll_clock_inverters = 0x40;
- } else {
+ } else if (pll->phy->cfg->quirks & DSI_PHY_7NM_QUIRK_V4_1) {
if (pll_freq <= 1000000000ULL)
config->pll_clock_inverters = 0xa0;
else if (pll_freq <= 2500000000ULL)
@@ -144,6 +144,16 @@ static void dsi_pll_calc_dec_frac(struct dsi_pll_7nm *pll, struct dsi_pll_config
config->pll_clock_inverters = 0x00;
else
config->pll_clock_inverters = 0x40;
+ } else {
+ /* 4.2, 4.3 */
+ if (pll_freq <= 1000000000ULL)
+ config->pll_clock_inverters = 0xa0;
+ else if (pll_freq <= 2500000000ULL)
+ config->pll_clock_inverters = 0x20;
+ else if (pll_freq <= 3500000000ULL)
+ config->pll_clock_inverters = 0x00;
+ else
+ config->pll_clock_inverters = 0x40;
}
config->decimal_div_start = dec;
diff --git a/drivers/gpu/drm/msm/hdmi/hdmi.c b/drivers/gpu/drm/msm/hdmi/hdmi.c
index 24abcb7254cc..0bfee41c2e71 100644
--- a/drivers/gpu/drm/msm/hdmi/hdmi.c
+++ b/drivers/gpu/drm/msm/hdmi/hdmi.c
@@ -549,6 +549,7 @@ static void msm_hdmi_dev_remove(struct platform_device *pdev)
}
static const struct of_device_id msm_hdmi_dt_match[] = {
+ { .compatible = "qcom,hdmi-tx-8998", .data = &hdmi_tx_8974_config },
{ .compatible = "qcom,hdmi-tx-8996", .data = &hdmi_tx_8974_config },
{ .compatible = "qcom,hdmi-tx-8994", .data = &hdmi_tx_8974_config },
{ .compatible = "qcom,hdmi-tx-8084", .data = &hdmi_tx_8974_config },
diff --git a/drivers/gpu/drm/msm/hdmi/hdmi.h b/drivers/gpu/drm/msm/hdmi/hdmi.h
index 4586baf36415..a62d2aedfbb7 100644
--- a/drivers/gpu/drm/msm/hdmi/hdmi.h
+++ b/drivers/gpu/drm/msm/hdmi/hdmi.h
@@ -137,6 +137,7 @@ enum hdmi_phy_type {
MSM_HDMI_PHY_8960,
MSM_HDMI_PHY_8x74,
MSM_HDMI_PHY_8996,
+ MSM_HDMI_PHY_8998,
MSM_HDMI_PHY_MAX,
};
@@ -154,6 +155,7 @@ extern const struct hdmi_phy_cfg msm_hdmi_phy_8x60_cfg;
extern const struct hdmi_phy_cfg msm_hdmi_phy_8960_cfg;
extern const struct hdmi_phy_cfg msm_hdmi_phy_8x74_cfg;
extern const struct hdmi_phy_cfg msm_hdmi_phy_8996_cfg;
+extern const struct hdmi_phy_cfg msm_hdmi_phy_8998_cfg;
struct hdmi_phy {
struct platform_device *pdev;
@@ -184,6 +186,7 @@ void __exit msm_hdmi_phy_driver_unregister(void);
#ifdef CONFIG_COMMON_CLK
int msm_hdmi_pll_8960_init(struct platform_device *pdev);
int msm_hdmi_pll_8996_init(struct platform_device *pdev);
+int msm_hdmi_pll_8998_init(struct platform_device *pdev);
#else
static inline int msm_hdmi_pll_8960_init(struct platform_device *pdev)
{
@@ -194,6 +197,11 @@ static inline int msm_hdmi_pll_8996_init(struct platform_device *pdev)
{
return -ENODEV;
}
+
+static inline int msm_hdmi_pll_8998_init(struct platform_device *pdev)
+{
+ return -ENODEV;
+}
#endif
/*
diff --git a/drivers/gpu/drm/msm/hdmi/hdmi_phy.c b/drivers/gpu/drm/msm/hdmi/hdmi_phy.c
index 88a3423b7f24..95b3f7535d84 100644
--- a/drivers/gpu/drm/msm/hdmi/hdmi_phy.c
+++ b/drivers/gpu/drm/msm/hdmi/hdmi_phy.c
@@ -118,6 +118,9 @@ static int msm_hdmi_phy_pll_init(struct platform_device *pdev,
case MSM_HDMI_PHY_8996:
ret = msm_hdmi_pll_8996_init(pdev);
break;
+ case MSM_HDMI_PHY_8998:
+ ret = msm_hdmi_pll_8998_init(pdev);
+ break;
/*
* we don't have PLL support for these, don't report an error for now
*/
@@ -193,6 +196,8 @@ static const struct of_device_id msm_hdmi_phy_dt_match[] = {
.data = &msm_hdmi_phy_8x74_cfg },
{ .compatible = "qcom,hdmi-phy-8996",
.data = &msm_hdmi_phy_8996_cfg },
+ { .compatible = "qcom,hdmi-phy-8998",
+ .data = &msm_hdmi_phy_8998_cfg },
{}
};
diff --git a/drivers/gpu/drm/msm/hdmi/hdmi_phy_8998.c b/drivers/gpu/drm/msm/hdmi/hdmi_phy_8998.c
new file mode 100644
index 000000000000..0e3a2b16a2ce
--- /dev/null
+++ b/drivers/gpu/drm/msm/hdmi/hdmi_phy_8998.c
@@ -0,0 +1,779 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2016, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2024 Freebox SAS
+ */
+
+#include <linux/clk-provider.h>
+#include <linux/delay.h>
+
+#include "hdmi.h"
+
+#define HDMI_VCO_MAX_FREQ 12000000000UL
+#define HDMI_VCO_MIN_FREQ 8000000000UL
+
+#define HDMI_PCLK_MAX_FREQ 600000000
+#define HDMI_PCLK_MIN_FREQ 25000000
+
+#define HDMI_HIGH_FREQ_BIT_CLK_THRESHOLD 3400000000UL
+#define HDMI_DIG_FREQ_BIT_CLK_THRESHOLD 1500000000UL
+#define HDMI_MID_FREQ_BIT_CLK_THRESHOLD 750000000UL
+#define HDMI_CORECLK_DIV 5
+#define HDMI_DEFAULT_REF_CLOCK 19200000
+#define HDMI_PLL_CMP_CNT 1024
+
+#define HDMI_PLL_POLL_MAX_READS 100
+#define HDMI_PLL_POLL_TIMEOUT_US 150
+
+#define HDMI_NUM_TX_CHANNEL 4
+
+struct hdmi_pll_8998 {
+ struct platform_device *pdev;
+ struct clk_hw clk_hw;
+ unsigned long rate;
+
+ /* pll mmio base */
+ void __iomem *mmio_qserdes_com;
+ /* tx channel base */
+ void __iomem *mmio_qserdes_tx[HDMI_NUM_TX_CHANNEL];
+};
+
+#define hw_clk_to_pll(x) container_of(x, struct hdmi_pll_8998, clk_hw)
+
+struct hdmi_8998_phy_pll_reg_cfg {
+ u32 com_svs_mode_clk_sel;
+ u32 com_hsclk_sel;
+ u32 com_pll_cctrl_mode0;
+ u32 com_pll_rctrl_mode0;
+ u32 com_cp_ctrl_mode0;
+ u32 com_dec_start_mode0;
+ u32 com_div_frac_start1_mode0;
+ u32 com_div_frac_start2_mode0;
+ u32 com_div_frac_start3_mode0;
+ u32 com_integloop_gain0_mode0;
+ u32 com_integloop_gain1_mode0;
+ u32 com_lock_cmp_en;
+ u32 com_lock_cmp1_mode0;
+ u32 com_lock_cmp2_mode0;
+ u32 com_lock_cmp3_mode0;
+ u32 com_core_clk_en;
+ u32 com_coreclk_div_mode0;
+
+ u32 tx_lx_tx_band[HDMI_NUM_TX_CHANNEL];
+ u32 tx_lx_tx_drv_lvl[HDMI_NUM_TX_CHANNEL];
+ u32 tx_lx_tx_emp_post1_lvl[HDMI_NUM_TX_CHANNEL];
+ u32 tx_lx_pre_driver_1[HDMI_NUM_TX_CHANNEL];
+ u32 tx_lx_pre_driver_2[HDMI_NUM_TX_CHANNEL];
+ u32 tx_lx_res_code_offset[HDMI_NUM_TX_CHANNEL];
+
+ u32 phy_mode;
+};
+
+struct hdmi_8998_post_divider {
+ u64 vco_freq;
+ int hsclk_divsel;
+ int vco_ratio;
+ int tx_band_sel;
+ int half_rate_mode;
+};
+
+static inline struct hdmi_phy *pll_get_phy(struct hdmi_pll_8998 *pll)
+{
+ return platform_get_drvdata(pll->pdev);
+}
+
+static inline void hdmi_pll_write(struct hdmi_pll_8998 *pll, int offset,
+ u32 data)
+{
+ writel(data, pll->mmio_qserdes_com + offset);
+}
+
+static inline u32 hdmi_pll_read(struct hdmi_pll_8998 *pll, int offset)
+{
+ return readl(pll->mmio_qserdes_com + offset);
+}
+
+static inline void hdmi_tx_chan_write(struct hdmi_pll_8998 *pll, int channel,
+ int offset, int data)
+{
+ writel(data, pll->mmio_qserdes_tx[channel] + offset);
+}
+
+static inline u32 pll_get_cpctrl(u64 frac_start, unsigned long ref_clk,
+ bool gen_ssc)
+{
+ if ((frac_start != 0) || gen_ssc)
+ return 0x8;
+
+ return 0x30;
+}
+
+static inline u32 pll_get_rctrl(u64 frac_start, bool gen_ssc)
+{
+ if ((frac_start != 0) || gen_ssc)
+ return 0x16;
+
+ return 0x18;
+}
+
+static inline u32 pll_get_cctrl(u64 frac_start, bool gen_ssc)
+{
+ if ((frac_start != 0) || gen_ssc)
+ return 0x34;
+
+ return 0x2;
+}
+
+static inline u32 pll_get_integloop_gain(u64 frac_start, u64 bclk, u32 ref_clk,
+ bool gen_ssc)
+{
+ int digclk_divsel = bclk > HDMI_DIG_FREQ_BIT_CLK_THRESHOLD ? 1 : 2;
+ u64 base;
+
+ if ((frac_start != 0) || gen_ssc)
+ base = 0x3F;
+ else
+ base = 0xC4;
+
+ base <<= (digclk_divsel == 2 ? 1 : 0);
+
+ return (base <= 2046 ? base : 2046);
+}
+
+static inline u32 pll_get_pll_cmp(u64 fdata, unsigned long ref_clk)
+{
+ u64 dividend = HDMI_PLL_CMP_CNT * fdata;
+ u32 divisor = ref_clk * 10;
+ u32 rem;
+
+ rem = do_div(dividend, divisor);
+ if (rem > (divisor >> 1))
+ dividend++;
+
+ return dividend - 1;
+}
+
+static inline u64 pll_cmp_to_fdata(u32 pll_cmp, unsigned long ref_clk)
+{
+ u64 fdata = ((u64)pll_cmp) * ref_clk * 10;
+
+ do_div(fdata, HDMI_PLL_CMP_CNT);
+
+ return fdata;
+}
+
+#define HDMI_REF_CLOCK_HZ ((u64)19200000)
+#define HDMI_MHZ_TO_HZ ((u64)1000000)
+static int pll_get_post_div(struct hdmi_8998_post_divider *pd, u64 bclk)
+{
+ u32 const ratio_list[] = {1, 2, 3, 4, 5, 6,
+ 9, 10, 12, 15, 25};
+ u32 const band_list[] = {0, 1, 2, 3};
+ u32 const sz_ratio = ARRAY_SIZE(ratio_list);
+ u32 const sz_band = ARRAY_SIZE(band_list);
+ u32 const cmp_cnt = 1024;
+ u32 const th_min = 500, th_max = 1000;
+ u32 half_rate_mode = 0;
+ u32 list_elements;
+ int optimal_index;
+ u32 i, j, k;
+ u32 found_hsclk_divsel = 0, found_vco_ratio;
+ u32 found_tx_band_sel;
+ u64 const min_freq = HDMI_VCO_MIN_FREQ, max_freq = HDMI_VCO_MAX_FREQ;
+ u64 freq_list[ARRAY_SIZE(ratio_list) * ARRAY_SIZE(band_list)];
+ u64 found_vco_freq;
+ u64 freq_optimal;
+
+find_optimal_index:
+ freq_optimal = max_freq;
+ optimal_index = -1;
+ list_elements = 0;
+
+ for (i = 0; i < sz_ratio; i++) {
+ for (j = 0; j < sz_band; j++) {
+ u64 freq = div_u64(bclk, (1 << half_rate_mode));
+
+ freq *= (ratio_list[i] * (1 << band_list[j]));
+ freq_list[list_elements++] = freq;
+ }
+ }
+
+ for (k = 0; k < ARRAY_SIZE(freq_list); k++) {
+ u32 const clks_pll_div = 2, core_clk_div = 5;
+ u32 const rng1 = 16, rng2 = 8;
+ u32 th1, th2;
+ u64 core_clk, rvar1, rem;
+
+ core_clk = div_u64(freq_list[k],
+ ratio_list[k / sz_band] * clks_pll_div *
+ core_clk_div);
+
+ rvar1 = HDMI_REF_CLOCK_HZ * rng1 * HDMI_MHZ_TO_HZ;
+ rvar1 = div64_u64_rem(rvar1, (cmp_cnt * core_clk), &rem);
+ if (rem > ((cmp_cnt * core_clk) >> 1))
+ rvar1++;
+ th1 = rvar1;
+
+ rvar1 = HDMI_REF_CLOCK_HZ * rng2 * HDMI_MHZ_TO_HZ;
+ rvar1 = div64_u64_rem(rvar1, (cmp_cnt * core_clk), &rem);
+ if (rem > ((cmp_cnt * core_clk) >> 1))
+ rvar1++;
+ th2 = rvar1;
+
+ if (freq_list[k] >= min_freq &&
+ freq_list[k] <= max_freq) {
+ if ((th1 >= th_min && th1 <= th_max) ||
+ (th2 >= th_min && th2 <= th_max)) {
+ if (freq_list[k] <= freq_optimal) {
+ freq_optimal = freq_list[k];
+ optimal_index = k;
+ }
+ }
+ }
+ }
+
+ if (optimal_index == -1) {
+ if (!half_rate_mode) {
+ half_rate_mode = 1;
+ goto find_optimal_index;
+ } else {
+ return -EINVAL;
+ }
+ } else {
+ found_vco_ratio = ratio_list[optimal_index / sz_band];
+ found_tx_band_sel = band_list[optimal_index % sz_band];
+ found_vco_freq = freq_optimal;
+ }
+
+ switch (found_vco_ratio) {
+ case 1:
+ found_hsclk_divsel = 15;
+ break;
+ case 2:
+ found_hsclk_divsel = 0;
+ break;
+ case 3:
+ found_hsclk_divsel = 4;
+ break;
+ case 4:
+ found_hsclk_divsel = 8;
+ break;
+ case 5:
+ found_hsclk_divsel = 12;
+ break;
+ case 6:
+ found_hsclk_divsel = 1;
+ break;
+ case 9:
+ found_hsclk_divsel = 5;
+ break;
+ case 10:
+ found_hsclk_divsel = 2;
+ break;
+ case 12:
+ found_hsclk_divsel = 9;
+ break;
+ case 15:
+ found_hsclk_divsel = 13;
+ break;
+ case 25:
+ found_hsclk_divsel = 14;
+ break;
+ };
+
+ pd->vco_freq = found_vco_freq;
+ pd->tx_band_sel = found_tx_band_sel;
+ pd->vco_ratio = found_vco_ratio;
+ pd->hsclk_divsel = found_hsclk_divsel;
+
+ return 0;
+}
+
+static int pll_calculate(unsigned long pix_clk, unsigned long ref_clk,
+ struct hdmi_8998_phy_pll_reg_cfg *cfg)
+{
+ struct hdmi_8998_post_divider pd;
+ u64 bclk;
+ u64 dec_start;
+ u64 frac_start;
+ u64 fdata;
+ u32 pll_divisor;
+ u32 rem;
+ u32 cpctrl;
+ u32 rctrl;
+ u32 cctrl;
+ u32 integloop_gain;
+ u32 pll_cmp;
+ int i, ret;
+
+ /* bit clk = 10 * pix_clk */
+ bclk = ((u64)pix_clk) * 10;
+
+ ret = pll_get_post_div(&pd, bclk);
+ if (ret)
+ return ret;
+
+ dec_start = pd.vco_freq;
+ pll_divisor = 4 * ref_clk;
+ do_div(dec_start, pll_divisor);
+
+ frac_start = pd.vco_freq * (1 << 20);
+
+ rem = do_div(frac_start, pll_divisor);
+ frac_start -= dec_start * (1 << 20);
+ if (rem > (pll_divisor >> 1))
+ frac_start++;
+
+ cpctrl = pll_get_cpctrl(frac_start, ref_clk, false);
+ rctrl = pll_get_rctrl(frac_start, false);
+ cctrl = pll_get_cctrl(frac_start, false);
+ integloop_gain = pll_get_integloop_gain(frac_start, bclk,
+ ref_clk, false);
+
+ fdata = pd.vco_freq;
+ do_div(fdata, pd.vco_ratio);
+
+ pll_cmp = pll_get_pll_cmp(fdata, ref_clk);
+
+ /* Convert these values to register specific values */
+ if (bclk > HDMI_DIG_FREQ_BIT_CLK_THRESHOLD)
+ cfg->com_svs_mode_clk_sel = 1;
+ else
+ cfg->com_svs_mode_clk_sel = 2;
+
+ cfg->com_hsclk_sel = (0x20 | pd.hsclk_divsel);
+ cfg->com_pll_cctrl_mode0 = cctrl;
+ cfg->com_pll_rctrl_mode0 = rctrl;
+ cfg->com_cp_ctrl_mode0 = cpctrl;
+ cfg->com_dec_start_mode0 = dec_start;
+ cfg->com_div_frac_start1_mode0 = (frac_start & 0xff);
+ cfg->com_div_frac_start2_mode0 = ((frac_start & 0xff00) >> 8);
+ cfg->com_div_frac_start3_mode0 = ((frac_start & 0xf0000) >> 16);
+ cfg->com_integloop_gain0_mode0 = (integloop_gain & 0xff);
+ cfg->com_integloop_gain1_mode0 = ((integloop_gain & 0xf00) >> 8);
+ cfg->com_lock_cmp1_mode0 = (pll_cmp & 0xff);
+ cfg->com_lock_cmp2_mode0 = ((pll_cmp & 0xff00) >> 8);
+ cfg->com_lock_cmp3_mode0 = ((pll_cmp & 0x30000) >> 16);
+ cfg->com_lock_cmp_en = 0x0;
+ cfg->com_core_clk_en = 0x2c;
+ cfg->com_coreclk_div_mode0 = HDMI_CORECLK_DIV;
+ cfg->phy_mode = (bclk > HDMI_HIGH_FREQ_BIT_CLK_THRESHOLD) ? 0x5 : 0x4;
+
+ for (i = 0; i < HDMI_NUM_TX_CHANNEL; i++)
+ cfg->tx_lx_tx_band[i] = pd.tx_band_sel;
+
+ if (bclk > HDMI_HIGH_FREQ_BIT_CLK_THRESHOLD) {
+ cfg->tx_lx_tx_drv_lvl[0] = 0x0f;
+ cfg->tx_lx_tx_drv_lvl[1] = 0x0f;
+ cfg->tx_lx_tx_drv_lvl[2] = 0x0f;
+ cfg->tx_lx_tx_drv_lvl[3] = 0x0f;
+ cfg->tx_lx_tx_emp_post1_lvl[0] = 0x03;
+ cfg->tx_lx_tx_emp_post1_lvl[1] = 0x02;
+ cfg->tx_lx_tx_emp_post1_lvl[2] = 0x03;
+ cfg->tx_lx_tx_emp_post1_lvl[3] = 0x00;
+ cfg->tx_lx_pre_driver_1[0] = 0x00;
+ cfg->tx_lx_pre_driver_1[1] = 0x00;
+ cfg->tx_lx_pre_driver_1[2] = 0x00;
+ cfg->tx_lx_pre_driver_1[3] = 0x00;
+ cfg->tx_lx_pre_driver_2[0] = 0x1C;
+ cfg->tx_lx_pre_driver_2[1] = 0x1C;
+ cfg->tx_lx_pre_driver_2[2] = 0x1C;
+ cfg->tx_lx_pre_driver_2[3] = 0x00;
+ cfg->tx_lx_res_code_offset[0] = 0x03;
+ cfg->tx_lx_res_code_offset[1] = 0x00;
+ cfg->tx_lx_res_code_offset[2] = 0x00;
+ cfg->tx_lx_res_code_offset[3] = 0x03;
+ } else if (bclk > HDMI_DIG_FREQ_BIT_CLK_THRESHOLD) {
+ cfg->tx_lx_tx_drv_lvl[0] = 0x0f;
+ cfg->tx_lx_tx_drv_lvl[1] = 0x0f;
+ cfg->tx_lx_tx_drv_lvl[2] = 0x0f;
+ cfg->tx_lx_tx_drv_lvl[3] = 0x0f;
+ cfg->tx_lx_tx_emp_post1_lvl[0] = 0x03;
+ cfg->tx_lx_tx_emp_post1_lvl[1] = 0x03;
+ cfg->tx_lx_tx_emp_post1_lvl[2] = 0x03;
+ cfg->tx_lx_tx_emp_post1_lvl[3] = 0x00;
+ cfg->tx_lx_pre_driver_1[0] = 0x00;
+ cfg->tx_lx_pre_driver_1[1] = 0x00;
+ cfg->tx_lx_pre_driver_1[2] = 0x00;
+ cfg->tx_lx_pre_driver_1[3] = 0x00;
+ cfg->tx_lx_pre_driver_2[0] = 0x16;
+ cfg->tx_lx_pre_driver_2[1] = 0x16;
+ cfg->tx_lx_pre_driver_2[2] = 0x16;
+ cfg->tx_lx_pre_driver_2[3] = 0x18;
+ cfg->tx_lx_res_code_offset[0] = 0x03;
+ cfg->tx_lx_res_code_offset[1] = 0x00;
+ cfg->tx_lx_res_code_offset[2] = 0x00;
+ cfg->tx_lx_res_code_offset[3] = 0x00;
+ } else if (bclk > HDMI_MID_FREQ_BIT_CLK_THRESHOLD) {
+ cfg->tx_lx_tx_drv_lvl[0] = 0x0f;
+ cfg->tx_lx_tx_drv_lvl[1] = 0x0f;
+ cfg->tx_lx_tx_drv_lvl[2] = 0x0f;
+ cfg->tx_lx_tx_drv_lvl[3] = 0x0f;
+ cfg->tx_lx_tx_emp_post1_lvl[0] = 0x05;
+ cfg->tx_lx_tx_emp_post1_lvl[1] = 0x05;
+ cfg->tx_lx_tx_emp_post1_lvl[2] = 0x05;
+ cfg->tx_lx_tx_emp_post1_lvl[3] = 0x00;
+ cfg->tx_lx_pre_driver_1[0] = 0x00;
+ cfg->tx_lx_pre_driver_1[1] = 0x00;
+ cfg->tx_lx_pre_driver_1[2] = 0x00;
+ cfg->tx_lx_pre_driver_1[3] = 0x00;
+ cfg->tx_lx_pre_driver_2[0] = 0x0E;
+ cfg->tx_lx_pre_driver_2[1] = 0x0E;
+ cfg->tx_lx_pre_driver_2[2] = 0x0E;
+ cfg->tx_lx_pre_driver_2[3] = 0x0E;
+ cfg->tx_lx_res_code_offset[0] = 0x00;
+ cfg->tx_lx_res_code_offset[1] = 0x00;
+ cfg->tx_lx_res_code_offset[2] = 0x00;
+ cfg->tx_lx_res_code_offset[3] = 0x00;
+ } else {
+ cfg->tx_lx_tx_drv_lvl[0] = 0x01;
+ cfg->tx_lx_tx_drv_lvl[1] = 0x01;
+ cfg->tx_lx_tx_drv_lvl[2] = 0x01;
+ cfg->tx_lx_tx_drv_lvl[3] = 0x00;
+ cfg->tx_lx_tx_emp_post1_lvl[0] = 0x00;
+ cfg->tx_lx_tx_emp_post1_lvl[1] = 0x00;
+ cfg->tx_lx_tx_emp_post1_lvl[2] = 0x00;
+ cfg->tx_lx_tx_emp_post1_lvl[3] = 0x00;
+ cfg->tx_lx_pre_driver_1[0] = 0x00;
+ cfg->tx_lx_pre_driver_1[1] = 0x00;
+ cfg->tx_lx_pre_driver_1[2] = 0x00;
+ cfg->tx_lx_pre_driver_1[3] = 0x00;
+ cfg->tx_lx_pre_driver_2[0] = 0x16;
+ cfg->tx_lx_pre_driver_2[1] = 0x16;
+ cfg->tx_lx_pre_driver_2[2] = 0x16;
+ cfg->tx_lx_pre_driver_2[3] = 0x18;
+ cfg->tx_lx_res_code_offset[0] = 0x00;
+ cfg->tx_lx_res_code_offset[1] = 0x00;
+ cfg->tx_lx_res_code_offset[2] = 0x00;
+ cfg->tx_lx_res_code_offset[3] = 0x00;
+ }
+
+ return 0;
+}
+
+static int hdmi_8998_pll_set_clk_rate(struct clk_hw *hw, unsigned long rate,
+ unsigned long parent_rate)
+{
+ struct hdmi_pll_8998 *pll = hw_clk_to_pll(hw);
+ struct hdmi_phy *phy = pll_get_phy(pll);
+ struct hdmi_8998_phy_pll_reg_cfg cfg = {};
+ int i, ret;
+
+ ret = pll_calculate(rate, parent_rate, &cfg);
+ if (ret) {
+ DRM_ERROR("PLL calculation failed\n");
+ return ret;
+ }
+
+ /* Initially shut down PHY */
+ hdmi_phy_write(phy, REG_HDMI_8998_PHY_PD_CTL, 0x0);
+ udelay(500);
+
+ /* Power up sequence */
+ hdmi_phy_write(phy, REG_HDMI_8998_PHY_PD_CTL, 0x1);
+ hdmi_pll_write(pll, REG_HDMI_8998_PHY_QSERDES_COM_RESETSM_CNTRL, 0x20);
+ hdmi_phy_write(phy, REG_HDMI_8998_PHY_CMN_CTRL, 0x6);
+
+ for (i = 0; i < HDMI_NUM_TX_CHANNEL; i++) {
+ hdmi_tx_chan_write(pll, i,
+ REG_HDMI_8998_PHY_TXn_INTERFACE_SELECT_TX_BAND,
+ cfg.tx_lx_tx_band[i]);
+ hdmi_tx_chan_write(pll, i,
+ REG_HDMI_8998_PHY_TXn_CLKBUF_TERM_ENABLE,
+ 0x1);
+ hdmi_tx_chan_write(pll, i,
+ REG_HDMI_8998_PHY_TXn_LANE_MODE,
+ 0x20);
+ }
+
+ hdmi_pll_write(pll, REG_HDMI_8998_PHY_QSERDES_COM_SYSCLK_BUF_ENABLE, 0x02);
+ hdmi_pll_write(pll, REG_HDMI_8998_PHY_QSERDES_COM_BIAS_EN_CLKBUFLR_EN, 0x0B);
+ hdmi_pll_write(pll, REG_HDMI_8998_PHY_QSERDES_COM_SYSCLK_EN_SEL, 0x37);
+ hdmi_pll_write(pll, REG_HDMI_8998_PHY_QSERDES_COM_SYS_CLK_CTRL, 0x02);
+ hdmi_pll_write(pll, REG_HDMI_8998_PHY_QSERDES_COM_CLK_ENABLE1, 0x0E);
+
+ /* Bypass VCO calibration */
+ hdmi_pll_write(pll, REG_HDMI_8998_PHY_QSERDES_COM_SVS_MODE_CLK_SEL,
+ cfg.com_svs_mode_clk_sel);
+
+ hdmi_pll_write(pll, REG_HDMI_8998_PHY_QSERDES_COM_PLL_IVCO, 0x07);
+ hdmi_pll_write(pll, REG_HDMI_8998_PHY_QSERDES_COM_VCO_TUNE_CTRL, 0x00);
+
+ hdmi_pll_write(pll, REG_HDMI_8998_PHY_QSERDES_COM_CLK_SEL, 0x30);
+ hdmi_pll_write(pll, REG_HDMI_8998_PHY_QSERDES_COM_HSCLK_SEL,
+ cfg.com_hsclk_sel);
+ hdmi_pll_write(pll, REG_HDMI_8998_PHY_QSERDES_COM_LOCK_CMP_EN,
+ cfg.com_lock_cmp_en);
+
+ hdmi_pll_write(pll, REG_HDMI_8998_PHY_QSERDES_COM_PLL_CCTRL_MODE0,
+ cfg.com_pll_cctrl_mode0);
+ hdmi_pll_write(pll, REG_HDMI_8998_PHY_QSERDES_COM_PLL_RCTRL_MODE0,
+ cfg.com_pll_rctrl_mode0);
+ hdmi_pll_write(pll, REG_HDMI_8998_PHY_QSERDES_COM_CP_CTRL_MODE0,
+ cfg.com_cp_ctrl_mode0);
+ hdmi_pll_write(pll, REG_HDMI_8998_PHY_QSERDES_COM_DEC_START_MODE0,
+ cfg.com_dec_start_mode0);
+ hdmi_pll_write(pll, REG_HDMI_8998_PHY_QSERDES_COM_DIV_FRAC_START1_MODE0,
+ cfg.com_div_frac_start1_mode0);
+ hdmi_pll_write(pll, REG_HDMI_8998_PHY_QSERDES_COM_DIV_FRAC_START2_MODE0,
+ cfg.com_div_frac_start2_mode0);
+ hdmi_pll_write(pll, REG_HDMI_8998_PHY_QSERDES_COM_DIV_FRAC_START3_MODE0,
+ cfg.com_div_frac_start3_mode0);
+
+ hdmi_pll_write(pll, REG_HDMI_8998_PHY_QSERDES_COM_INTEGLOOP_GAIN0_MODE0,
+ cfg.com_integloop_gain0_mode0);
+ hdmi_pll_write(pll, REG_HDMI_8998_PHY_QSERDES_COM_INTEGLOOP_GAIN1_MODE0,
+ cfg.com_integloop_gain1_mode0);
+
+ hdmi_pll_write(pll, REG_HDMI_8998_PHY_QSERDES_COM_LOCK_CMP1_MODE0,
+ cfg.com_lock_cmp1_mode0);
+ hdmi_pll_write(pll, REG_HDMI_8998_PHY_QSERDES_COM_LOCK_CMP2_MODE0,
+ cfg.com_lock_cmp2_mode0);
+ hdmi_pll_write(pll, REG_HDMI_8998_PHY_QSERDES_COM_LOCK_CMP3_MODE0,
+ cfg.com_lock_cmp3_mode0);
+
+ hdmi_pll_write(pll, REG_HDMI_8998_PHY_QSERDES_COM_VCO_TUNE_MAP, 0x00);
+ hdmi_pll_write(pll, REG_HDMI_8998_PHY_QSERDES_COM_CORE_CLK_EN,
+ cfg.com_core_clk_en);
+ hdmi_pll_write(pll, REG_HDMI_8998_PHY_QSERDES_COM_CORECLK_DIV_MODE0,
+ cfg.com_coreclk_div_mode0);
+
+ /* TX lanes setup (TX 0/1/2/3) */
+ for (i = 0; i < HDMI_NUM_TX_CHANNEL; i++) {
+ hdmi_tx_chan_write(pll, i,
+ REG_HDMI_8998_PHY_TXn_DRV_LVL,
+ cfg.tx_lx_tx_drv_lvl[i]);
+ hdmi_tx_chan_write(pll, i,
+ REG_HDMI_8998_PHY_TXn_EMP_POST1_LVL,
+ cfg.tx_lx_tx_emp_post1_lvl[i]);
+ hdmi_tx_chan_write(pll, i,
+ REG_HDMI_8998_PHY_TXn_PRE_DRIVER_1,
+ cfg.tx_lx_pre_driver_1[i]);
+ hdmi_tx_chan_write(pll, i,
+ REG_HDMI_8998_PHY_TXn_PRE_DRIVER_2,
+ cfg.tx_lx_pre_driver_2[i]);
+ hdmi_tx_chan_write(pll, i,
+ REG_HDMI_8998_PHY_TXn_DRV_LVL_RES_CODE_OFFSET,
+ cfg.tx_lx_res_code_offset[i]);
+ }
+
+ hdmi_phy_write(phy, REG_HDMI_8998_PHY_MODE, cfg.phy_mode);
+
+ for (i = 0; i < HDMI_NUM_TX_CHANNEL; i++) {
+ hdmi_tx_chan_write(pll, i,
+ REG_HDMI_8998_PHY_TXn_LANE_CONFIG,
+ 0x10);
+ }
+
+ /*
+ * Ensure that vco configuration gets flushed to hardware before
+ * enabling the PLL
+ */
+ wmb();
+
+ pll->rate = rate;
+
+ return 0;
+}
+
+static int hdmi_8998_phy_ready_status(struct hdmi_phy *phy)
+{
+ u32 nb_tries = HDMI_PLL_POLL_MAX_READS;
+ unsigned long timeout = HDMI_PLL_POLL_TIMEOUT_US;
+ u32 status;
+ int phy_ready = 0;
+
+ while (nb_tries--) {
+ status = hdmi_phy_read(phy, REG_HDMI_8998_PHY_STATUS);
+ phy_ready = status & BIT(0);
+
+ if (phy_ready)
+ break;
+
+ udelay(timeout);
+ }
+
+ return phy_ready;
+}
+
+static int hdmi_8998_pll_lock_status(struct hdmi_pll_8998 *pll)
+{
+ u32 status;
+ int nb_tries = HDMI_PLL_POLL_MAX_READS;
+ unsigned long timeout = HDMI_PLL_POLL_TIMEOUT_US;
+ int pll_locked = 0;
+
+ while (nb_tries--) {
+ status = hdmi_pll_read(pll,
+ REG_HDMI_8998_PHY_QSERDES_COM_C_READY_STATUS);
+ pll_locked = status & BIT(0);
+
+ if (pll_locked)
+ break;
+
+ udelay(timeout);
+ }
+
+ return pll_locked;
+}
+
+static int hdmi_8998_pll_prepare(struct clk_hw *hw)
+{
+ struct hdmi_pll_8998 *pll = hw_clk_to_pll(hw);
+ struct hdmi_phy *phy = pll_get_phy(pll);
+ int i, ret = 0;
+
+ hdmi_phy_write(phy, REG_HDMI_8998_PHY_CFG, 0x1);
+ udelay(100);
+
+ hdmi_phy_write(phy, REG_HDMI_8998_PHY_CFG, 0x59);
+ udelay(100);
+
+ ret = hdmi_8998_pll_lock_status(pll);
+ if (!ret)
+ return ret;
+
+ for (i = 0; i < HDMI_NUM_TX_CHANNEL; i++) {
+ hdmi_tx_chan_write(pll, i,
+ REG_HDMI_8998_PHY_TXn_LANE_CONFIG, 0x1F);
+ }
+
+ /* Ensure all registers are flushed to hardware */
+ wmb();
+
+ ret = hdmi_8998_phy_ready_status(phy);
+ if (!ret)
+ return ret;
+
+ /* Restart the retiming buffer */
+ hdmi_phy_write(phy, REG_HDMI_8998_PHY_CFG, 0x58);
+ udelay(1);
+ hdmi_phy_write(phy, REG_HDMI_8998_PHY_CFG, 0x59);
+
+ /* Ensure all registers are flushed to hardware */
+ wmb();
+
+ return 0;
+}
+
+static long hdmi_8998_pll_round_rate(struct clk_hw *hw,
+ unsigned long rate,
+ unsigned long *parent_rate)
+{
+ if (rate < HDMI_PCLK_MIN_FREQ)
+ return HDMI_PCLK_MIN_FREQ;
+ else if (rate > HDMI_PCLK_MAX_FREQ)
+ return HDMI_PCLK_MAX_FREQ;
+ else
+ return rate;
+}
+
+static unsigned long hdmi_8998_pll_recalc_rate(struct clk_hw *hw,
+ unsigned long parent_rate)
+{
+ struct hdmi_pll_8998 *pll = hw_clk_to_pll(hw);
+ return pll->rate;
+}
+
+static void hdmi_8998_pll_unprepare(struct clk_hw *hw)
+{
+ struct hdmi_pll_8998 *pll = hw_clk_to_pll(hw);
+ struct hdmi_phy *phy = pll_get_phy(pll);
+
+ hdmi_phy_write(phy, REG_HDMI_8998_PHY_PD_CTL, 0);
+ usleep_range(100, 150);
+}
+
+static int hdmi_8998_pll_is_enabled(struct clk_hw *hw)
+{
+ struct hdmi_pll_8998 *pll = hw_clk_to_pll(hw);
+ u32 status;
+ int pll_locked;
+
+ status = hdmi_pll_read(pll, REG_HDMI_8998_PHY_QSERDES_COM_C_READY_STATUS);
+ pll_locked = status & BIT(0);
+
+ return pll_locked;
+}
+
+static const struct clk_ops hdmi_8998_pll_ops = {
+ .set_rate = hdmi_8998_pll_set_clk_rate,
+ .round_rate = hdmi_8998_pll_round_rate,
+ .recalc_rate = hdmi_8998_pll_recalc_rate,
+ .prepare = hdmi_8998_pll_prepare,
+ .unprepare = hdmi_8998_pll_unprepare,
+ .is_enabled = hdmi_8998_pll_is_enabled,
+};
+
+static const struct clk_init_data pll_init = {
+ .name = "hdmipll",
+ .ops = &hdmi_8998_pll_ops,
+ .parent_data = (const struct clk_parent_data[]){
+ { .fw_name = "xo", .name = "xo_board" },
+ },
+ .num_parents = 1,
+ .flags = CLK_IGNORE_UNUSED,
+};
+
+int msm_hdmi_pll_8998_init(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct hdmi_pll_8998 *pll;
+ int ret, i;
+
+ pll = devm_kzalloc(dev, sizeof(*pll), GFP_KERNEL);
+ if (!pll)
+ return -ENOMEM;
+
+ pll->pdev = pdev;
+
+ pll->mmio_qserdes_com = msm_ioremap(pdev, "hdmi_pll");
+ if (IS_ERR(pll->mmio_qserdes_com)) {
+ DRM_DEV_ERROR(dev, "failed to map pll base\n");
+ return -ENOMEM;
+ }
+
+ for (i = 0; i < HDMI_NUM_TX_CHANNEL; i++) {
+ char name[32];
+
+ snprintf(name, sizeof(name), "hdmi_tx_l%d", i);
+
+ pll->mmio_qserdes_tx[i] = msm_ioremap(pdev, name);
+ if (IS_ERR(pll->mmio_qserdes_tx[i])) {
+ DRM_DEV_ERROR(dev, "failed to map pll base\n");
+ return -ENOMEM;
+ }
+ }
+ pll->clk_hw.init = &pll_init;
+
+ ret = devm_clk_hw_register(dev, &pll->clk_hw);
+ if (ret) {
+ DRM_DEV_ERROR(dev, "failed to register pll clock\n");
+ return ret;
+ }
+
+ ret = devm_of_clk_add_hw_provider(dev, of_clk_hw_simple_get, &pll->clk_hw);
+ if (ret) {
+ DRM_DEV_ERROR(dev, "failed to register clk provider: %d\n", ret);
+ return ret;
+ }
+
+ return 0;
+}
+
+static const char * const hdmi_phy_8998_reg_names[] = {
+ "vddio",
+ "vcca",
+};
+
+static const char * const hdmi_phy_8998_clk_names[] = {
+ "iface", "ref", "xo",
+};
+
+const struct hdmi_phy_cfg msm_hdmi_phy_8998_cfg = {
+ .type = MSM_HDMI_PHY_8998,
+ .reg_names = hdmi_phy_8998_reg_names,
+ .num_regs = ARRAY_SIZE(hdmi_phy_8998_reg_names),
+ .clk_names = hdmi_phy_8998_clk_names,
+ .num_clks = ARRAY_SIZE(hdmi_phy_8998_clk_names),
+};
diff --git a/drivers/gpu/drm/msm/msm_debugfs.c b/drivers/gpu/drm/msm/msm_debugfs.c
index 4494f6d1c7cb..7ab607252d18 100644
--- a/drivers/gpu/drm/msm/msm_debugfs.c
+++ b/drivers/gpu/drm/msm/msm_debugfs.c
@@ -357,12 +357,10 @@ void msm_debugfs_init(struct drm_minor *minor)
if (priv->kms && priv->kms->funcs->debugfs_init)
priv->kms->funcs->debugfs_init(priv->kms, minor);
-#ifdef CONFIG_FAULT_INJECTION
fault_create_debugfs_attr("fail_gem_alloc", minor->debugfs_root,
&fail_gem_alloc);
fault_create_debugfs_attr("fail_gem_iova", minor->debugfs_root,
&fail_gem_iova);
-#endif
}
#endif
diff --git a/drivers/gpu/drm/msm/msm_drv.c b/drivers/gpu/drm/msm/msm_drv.c
index 9c33f4e3f822..8c13b08708d2 100644
--- a/drivers/gpu/drm/msm/msm_drv.c
+++ b/drivers/gpu/drm/msm/msm_drv.c
@@ -7,6 +7,7 @@
#include <linux/dma-mapping.h>
#include <linux/fault-inject.h>
+#include <linux/debugfs.h>
#include <linux/of_address.h>
#include <linux/uaccess.h>
@@ -58,10 +59,8 @@ static bool modeset = true;
MODULE_PARM_DESC(modeset, "Use kernel modesetting [KMS] (1=on (default), 0=disable)");
module_param(modeset, bool, 0600);
-#ifdef CONFIG_FAULT_INJECTION
DECLARE_FAULT_ATTR(fail_gem_alloc);
DECLARE_FAULT_ATTR(fail_gem_iova);
-#endif
static int msm_drm_uninit(struct device *dev)
{
diff --git a/drivers/gpu/drm/msm/msm_drv.h b/drivers/gpu/drm/msm/msm_drv.h
index be016d7b4ef1..2e28a1344636 100644
--- a/drivers/gpu/drm/msm/msm_drv.h
+++ b/drivers/gpu/drm/msm/msm_drv.h
@@ -33,12 +33,8 @@
#include <drm/msm_drm.h>
#include <drm/drm_gem.h>
-#ifdef CONFIG_FAULT_INJECTION
extern struct fault_attr fail_gem_alloc;
extern struct fault_attr fail_gem_iova;
-#else
-# define should_fail(attr, size) 0
-#endif
struct msm_kms;
struct msm_gpu;
@@ -215,8 +211,6 @@ struct msm_drm_private {
struct notifier_block vmap_notifier;
struct shrinker *shrinker;
- struct drm_atomic_state *pm_state;
-
/**
* hangcheck_period: For hang detection, in ms
*
@@ -254,8 +248,6 @@ void msm_atomic_destroy_pending_timer(struct msm_pending_timer *timer);
void msm_atomic_commit_tail(struct drm_atomic_state *state);
int msm_atomic_check(struct drm_device *dev, struct drm_atomic_state *state);
struct drm_atomic_state *msm_atomic_state_alloc(struct drm_device *dev);
-void msm_atomic_state_clear(struct drm_atomic_state *state);
-void msm_atomic_state_free(struct drm_atomic_state *state);
int msm_crtc_enable_vblank(struct drm_crtc *crtc);
void msm_crtc_disable_vblank(struct drm_crtc *crtc);
diff --git a/drivers/gpu/drm/msm/msm_gpu.c b/drivers/gpu/drm/msm/msm_gpu.c
index 3666b42b4ecd..a274b8466423 100644
--- a/drivers/gpu/drm/msm/msm_gpu.c
+++ b/drivers/gpu/drm/msm/msm_gpu.c
@@ -931,7 +931,6 @@ int msm_gpu_init(struct drm_device *drm, struct platform_device *pdev,
if (IS_ERR(gpu->gpu_cx))
gpu->gpu_cx = NULL;
- gpu->pdev = pdev;
platform_set_drvdata(pdev, &gpu->adreno_smmu);
msm_devfreq_init(gpu);
diff --git a/drivers/gpu/drm/msm/msm_perf.c b/drivers/gpu/drm/msm/msm_perf.c
index 3d3da79fec2a..d3c7889aaf26 100644
--- a/drivers/gpu/drm/msm/msm_perf.c
+++ b/drivers/gpu/drm/msm/msm_perf.c
@@ -192,7 +192,6 @@ static const struct file_operations perf_debugfs_fops = {
.owner = THIS_MODULE,
.open = perf_open,
.read = perf_read,
- .llseek = no_llseek,
.release = perf_release,
};
diff --git a/drivers/gpu/drm/msm/msm_rd.c b/drivers/gpu/drm/msm/msm_rd.c
index ca44fd291c5b..39138e190cb9 100644
--- a/drivers/gpu/drm/msm/msm_rd.c
+++ b/drivers/gpu/drm/msm/msm_rd.c
@@ -227,7 +227,6 @@ static const struct file_operations rd_debugfs_fops = {
.owner = THIS_MODULE,
.open = rd_open,
.read = rd_read,
- .llseek = no_llseek,
.release = rd_release,
};
diff --git a/drivers/gpu/drm/msm/registers/adreno/a6xx.xml b/drivers/gpu/drm/msm/registers/adreno/a6xx.xml
index 2dfe6913ab4f..97608603ea62 100644
--- a/drivers/gpu/drm/msm/registers/adreno/a6xx.xml
+++ b/drivers/gpu/drm/msm/registers/adreno/a6xx.xml
@@ -1198,6 +1198,1027 @@ to upconvert to 32b float internally?
<value value="0x3" name="TESS_CCW_TRIS"/>
</enum>
+<enum name="a7xx_cp_perfcounter_select">
+ <value value="0" name="A7XX_PERF_CP_ALWAYS_COUNT"/>
+ <value value="1" name="A7XX_PERF_CP_BUSY_GFX_CORE_IDLE"/>
+ <value value="2" name="A7XX_PERF_CP_BUSY_CYCLES"/>
+ <value value="3" name="A7XX_PERF_CP_NUM_PREEMPTIONS"/>
+ <value value="4" name="A7XX_PERF_CP_PREEMPTION_REACTION_DELAY"/>
+ <value value="5" name="A7XX_PERF_CP_PREEMPTION_SWITCH_OUT_TIME"/>
+ <value value="6" name="A7XX_PERF_CP_PREEMPTION_SWITCH_IN_TIME"/>
+ <value value="7" name="A7XX_PERF_CP_DEAD_DRAWS_IN_BIN_RENDER"/>
+ <value value="8" name="A7XX_PERF_CP_PREDICATED_DRAWS_KILLED"/>
+ <value value="9" name="A7XX_PERF_CP_MODE_SWITCH"/>
+ <value value="10" name="A7XX_PERF_CP_ZPASS_DONE"/>
+ <value value="11" name="A7XX_PERF_CP_CONTEXT_DONE"/>
+ <value value="12" name="A7XX_PERF_CP_CACHE_FLUSH"/>
+ <value value="13" name="A7XX_PERF_CP_LONG_PREEMPTIONS"/>
+ <value value="14" name="A7XX_PERF_CP_SQE_I_CACHE_STARVE"/>
+ <value value="15" name="A7XX_PERF_CP_SQE_IDLE"/>
+ <value value="16" name="A7XX_PERF_CP_SQE_PM4_STARVE_RB_IB"/>
+ <value value="17" name="A7XX_PERF_CP_SQE_PM4_STARVE_SDS"/>
+ <value value="18" name="A7XX_PERF_CP_SQE_MRB_STARVE"/>
+ <value value="19" name="A7XX_PERF_CP_SQE_RRB_STARVE"/>
+ <value value="20" name="A7XX_PERF_CP_SQE_VSD_STARVE"/>
+ <value value="21" name="A7XX_PERF_CP_VSD_DECODE_STARVE"/>
+ <value value="22" name="A7XX_PERF_CP_SQE_PIPE_OUT_STALL"/>
+ <value value="23" name="A7XX_PERF_CP_SQE_SYNC_STALL"/>
+ <value value="24" name="A7XX_PERF_CP_SQE_PM4_WFI_STALL"/>
+ <value value="25" name="A7XX_PERF_CP_SQE_SYS_WFI_STALL"/>
+ <value value="26" name="A7XX_PERF_CP_SQE_T4_EXEC"/>
+ <value value="27" name="A7XX_PERF_CP_SQE_LOAD_STATE_EXEC"/>
+ <value value="28" name="A7XX_PERF_CP_SQE_SAVE_SDS_STATE"/>
+ <value value="29" name="A7XX_PERF_CP_SQE_DRAW_EXEC"/>
+ <value value="30" name="A7XX_PERF_CP_SQE_CTXT_REG_BUNCH_EXEC"/>
+ <value value="31" name="A7XX_PERF_CP_SQE_EXEC_PROFILED"/>
+ <value value="32" name="A7XX_PERF_CP_MEMORY_POOL_EMPTY"/>
+ <value value="33" name="A7XX_PERF_CP_MEMORY_POOL_SYNC_STALL"/>
+ <value value="34" name="A7XX_PERF_CP_MEMORY_POOL_ABOVE_THRESH"/>
+ <value value="35" name="A7XX_PERF_CP_AHB_WR_STALL_PRE_DRAWS"/>
+ <value value="36" name="A7XX_PERF_CP_AHB_STALL_SQE_GMU"/>
+ <value value="37" name="A7XX_PERF_CP_AHB_STALL_SQE_WR_OTHER"/>
+ <value value="38" name="A7XX_PERF_CP_AHB_STALL_SQE_RD_OTHER"/>
+ <value value="39" name="A7XX_PERF_CP_CLUSTER0_EMPTY"/>
+ <value value="40" name="A7XX_PERF_CP_CLUSTER1_EMPTY"/>
+ <value value="41" name="A7XX_PERF_CP_CLUSTER2_EMPTY"/>
+ <value value="42" name="A7XX_PERF_CP_CLUSTER3_EMPTY"/>
+ <value value="43" name="A7XX_PERF_CP_CLUSTER4_EMPTY"/>
+ <value value="44" name="A7XX_PERF_CP_CLUSTER5_EMPTY"/>
+ <value value="45" name="A7XX_PERF_CP_PM4_DATA"/>
+ <value value="46" name="A7XX_PERF_CP_PM4_HEADERS"/>
+ <value value="47" name="A7XX_PERF_CP_VBIF_READ_BEATS"/>
+ <value value="48" name="A7XX_PERF_CP_VBIF_WRITE_BEATS"/>
+ <value value="49" name="A7XX_PERF_CP_SQE_INSTR_COUNTER"/>
+ <value value="50" name="A7XX_PERF_CP_RESERVED_50"/>
+ <value value="51" name="A7XX_PERF_CP_RESERVED_51"/>
+ <value value="52" name="A7XX_PERF_CP_RESERVED_52"/>
+ <value value="53" name="A7XX_PERF_CP_RESERVED_53"/>
+ <value value="54" name="A7XX_PERF_CP_RESERVED_54"/>
+ <value value="55" name="A7XX_PERF_CP_RESERVED_55"/>
+ <value value="56" name="A7XX_PERF_CP_RESERVED_56"/>
+ <value value="57" name="A7XX_PERF_CP_RESERVED_57"/>
+ <value value="58" name="A7XX_PERF_CP_RESERVED_58"/>
+ <value value="59" name="A7XX_PERF_CP_RESERVED_59"/>
+ <value value="60" name="A7XX_PERF_CP_CLUSTER0_FULL"/>
+ <value value="61" name="A7XX_PERF_CP_CLUSTER1_FULL"/>
+ <value value="62" name="A7XX_PERF_CP_CLUSTER2_FULL"/>
+ <value value="63" name="A7XX_PERF_CP_CLUSTER3_FULL"/>
+ <value value="64" name="A7XX_PERF_CP_CLUSTER4_FULL"/>
+ <value value="65" name="A7XX_PERF_CP_CLUSTER5_FULL"/>
+ <value value="66" name="A7XX_PERF_CP_CLUSTER6_FULL"/>
+ <value value="67" name="A7XX_PERF_CP_CLUSTER6_EMPTY"/>
+ <value value="68" name="A7XX_PERF_CP_ICACHE_MISSES"/>
+ <value value="69" name="A7XX_PERF_CP_ICACHE_HITS"/>
+ <value value="70" name="A7XX_PERF_CP_ICACHE_STALL"/>
+ <value value="71" name="A7XX_PERF_CP_DCACHE_MISSES"/>
+ <value value="72" name="A7XX_PERF_CP_DCACHE_HITS"/>
+ <value value="73" name="A7XX_PERF_CP_DCACHE_STALLS"/>
+ <value value="74" name="A7XX_PERF_CP_AQE_SQE_STALL"/>
+ <value value="75" name="A7XX_PERF_CP_SQE_AQE_STARVE"/>
+ <value value="76" name="A7XX_PERF_CP_PREEMPT_LATENCY"/>
+ <value value="77" name="A7XX_PERF_CP_SQE_MD8_STALL_CYCLES"/>
+ <value value="78" name="A7XX_PERF_CP_SQE_MESH_EXEC_CYCLES"/>
+ <value value="79" name="A7XX_PERF_CP_AQE_NUM_AS_CHUNKS"/>
+ <value value="80" name="A7XX_PERF_CP_AQE_NUM_MS_CHUNKS"/>
+</enum>
+
+<enum name="a7xx_rbbm_perfcounter_select">
+ <value value="0" name="A7XX_PERF_RBBM_ALWAYS_COUNT"/>
+ <value value="1" name="A7XX_PERF_RBBM_ALWAYS_ON"/>
+ <value value="2" name="A7XX_PERF_RBBM_TSE_BUSY"/>
+ <value value="3" name="A7XX_PERF_RBBM_RAS_BUSY"/>
+ <value value="4" name="A7XX_PERF_RBBM_PC_DCALL_BUSY"/>
+ <value value="5" name="A7XX_PERF_RBBM_PC_VSD_BUSY"/>
+ <value value="6" name="A7XX_PERF_RBBM_STATUS_MASKED"/>
+ <value value="7" name="A7XX_PERF_RBBM_COM_BUSY"/>
+ <value value="8" name="A7XX_PERF_RBBM_DCOM_BUSY"/>
+ <value value="9" name="A7XX_PERF_RBBM_VBIF_BUSY"/>
+ <value value="10" name="A7XX_PERF_RBBM_VSC_BUSY"/>
+ <value value="11" name="A7XX_PERF_RBBM_TESS_BUSY"/>
+ <value value="12" name="A7XX_PERF_RBBM_UCHE_BUSY"/>
+ <value value="13" name="A7XX_PERF_RBBM_HLSQ_BUSY"/>
+</enum>
+
+<enum name="a7xx_pc_perfcounter_select">
+ <value value="0" name="A7XX_PERF_PC_BUSY_CYCLES"/>
+ <value value="1" name="A7XX_PERF_PC_WORKING_CYCLES"/>
+ <value value="2" name="A7XX_PERF_PC_STALL_CYCLES_VFD"/>
+ <value value="3" name="A7XX_PERF_PC_RESERVED"/>
+ <value value="4" name="A7XX_PERF_PC_STALL_CYCLES_VPC"/>
+ <value value="5" name="A7XX_PERF_PC_STALL_CYCLES_UCHE"/>
+ <value value="6" name="A7XX_PERF_PC_STALL_CYCLES_TESS"/>
+ <value value="7" name="A7XX_PERF_PC_STALL_CYCLES_VFD_ONLY"/>
+ <value value="8" name="A7XX_PERF_PC_STALL_CYCLES_VPC_ONLY"/>
+ <value value="9" name="A7XX_PERF_PC_PASS1_TF_STALL_CYCLES"/>
+ <value value="10" name="A7XX_PERF_PC_STARVE_CYCLES_FOR_INDEX"/>
+ <value value="11" name="A7XX_PERF_PC_STARVE_CYCLES_FOR_TESS_FACTOR"/>
+ <value value="12" name="A7XX_PERF_PC_STARVE_CYCLES_FOR_VIZ_STREAM"/>
+ <value value="13" name="A7XX_PERF_PC_STARVE_CYCLES_DI"/>
+ <value value="14" name="A7XX_PERF_PC_VIS_STREAMS_LOADED"/>
+ <value value="15" name="A7XX_PERF_PC_INSTANCES"/>
+ <value value="16" name="A7XX_PERF_PC_VPC_PRIMITIVES"/>
+ <value value="17" name="A7XX_PERF_PC_DEAD_PRIM"/>
+ <value value="18" name="A7XX_PERF_PC_LIVE_PRIM"/>
+ <value value="19" name="A7XX_PERF_PC_VERTEX_HITS"/>
+ <value value="20" name="A7XX_PERF_PC_IA_VERTICES"/>
+ <value value="21" name="A7XX_PERF_PC_IA_PRIMITIVES"/>
+ <value value="22" name="A7XX_PERF_PC_RESERVED_22"/>
+ <value value="23" name="A7XX_PERF_PC_HS_INVOCATIONS"/>
+ <value value="24" name="A7XX_PERF_PC_DS_INVOCATIONS"/>
+ <value value="25" name="A7XX_PERF_PC_VS_INVOCATIONS"/>
+ <value value="26" name="A7XX_PERF_PC_GS_INVOCATIONS"/>
+ <value value="27" name="A7XX_PERF_PC_DS_PRIMITIVES"/>
+ <value value="28" name="A7XX_PERF_PC_3D_DRAWCALLS"/>
+ <value value="29" name="A7XX_PERF_PC_2D_DRAWCALLS"/>
+ <value value="30" name="A7XX_PERF_PC_NON_DRAWCALL_GLOBAL_EVENTS"/>
+ <value value="31" name="A7XX_PERF_PC_TESS_BUSY_CYCLES"/>
+ <value value="32" name="A7XX_PERF_PC_TESS_WORKING_CYCLES"/>
+ <value value="33" name="A7XX_PERF_PC_TESS_STALL_CYCLES_PC"/>
+ <value value="34" name="A7XX_PERF_PC_TESS_STARVE_CYCLES_PC"/>
+ <value value="35" name="A7XX_PERF_PC_TESS_SINGLE_PRIM_CYCLES"/>
+ <value value="36" name="A7XX_PERF_PC_TESS_PC_UV_TRANS"/>
+ <value value="37" name="A7XX_PERF_PC_TESS_PC_UV_PATCHES"/>
+ <value value="38" name="A7XX_PERF_PC_TESS_FACTOR_TRANS"/>
+ <value value="39" name="A7XX_PERF_PC_TAG_CHECKED_VERTICES"/>
+ <value value="40" name="A7XX_PERF_PC_MESH_VS_WAVES"/>
+ <value value="41" name="A7XX_PERF_PC_MESH_DRAWS"/>
+ <value value="42" name="A7XX_PERF_PC_MESH_DEAD_DRAWS"/>
+ <value value="43" name="A7XX_PERF_PC_MESH_MVIS_EN_DRAWS"/>
+ <value value="44" name="A7XX_PERF_PC_MESH_DEAD_PRIM"/>
+ <value value="45" name="A7XX_PERF_PC_MESH_LIVE_PRIM"/>
+ <value value="46" name="A7XX_PERF_PC_MESH_PA_EN_PRIM"/>
+ <value value="47" name="A7XX_PERF_PC_STARVE_CYCLES_FOR_MVIS_STREAM"/>
+ <value value="48" name="A7XX_PERF_PC_STARVE_CYCLES_PREDRAW"/>
+ <value value="49" name="A7XX_PERF_PC_STALL_CYCLES_COMPUTE_GFX"/>
+ <value value="50" name="A7XX_PERF_PC_STALL_CYCLES_GFX_COMPUTE"/>
+ <value value="51" name="A7XX_PERF_PC_TESS_PC_MULTI_PATCH_TRANS"/>
+</enum>
+
+<enum name="a7xx_vfd_perfcounter_select">
+ <value value="0" name="A7XX_PERF_VFD_BUSY_CYCLES"/>
+ <value value="1" name="A7XX_PERF_VFD_STALL_CYCLES_UCHE"/>
+ <value value="2" name="A7XX_PERF_VFD_STALL_CYCLES_VPC_ALLOC"/>
+ <value value="3" name="A7XX_PERF_VFD_STALL_CYCLES_SP_INFO"/>
+ <value value="4" name="A7XX_PERF_VFD_STALL_CYCLES_SP_ATTR"/>
+ <value value="5" name="A7XX_PERF_VFD_STARVE_CYCLES_UCHE"/>
+ <value value="6" name="A7XX_PERF_VFD_RBUFFER_FULL"/>
+ <value value="7" name="A7XX_PERF_VFD_ATTR_INFO_FIFO_FULL"/>
+ <value value="8" name="A7XX_PERF_VFD_DECODED_ATTRIBUTE_BYTES"/>
+ <value value="9" name="A7XX_PERF_VFD_NUM_ATTRIBUTES"/>
+ <value value="10" name="A7XX_PERF_VFD_UPPER_SHADER_FIBERS"/>
+ <value value="11" name="A7XX_PERF_VFD_LOWER_SHADER_FIBERS"/>
+ <value value="12" name="A7XX_PERF_VFD_MODE_0_FIBERS"/>
+ <value value="13" name="A7XX_PERF_VFD_MODE_1_FIBERS"/>
+ <value value="14" name="A7XX_PERF_VFD_MODE_2_FIBERS"/>
+ <value value="15" name="A7XX_PERF_VFD_MODE_3_FIBERS"/>
+ <value value="16" name="A7XX_PERF_VFD_MODE_4_FIBERS"/>
+ <value value="17" name="A7XX_PERF_VFD_TOTAL_VERTICES"/>
+ <value value="18" name="A7XX_PERF_VFDP_STALL_CYCLES_VFD"/>
+ <value value="19" name="A7XX_PERF_VFDP_STALL_CYCLES_VFD_INDEX"/>
+ <value value="20" name="A7XX_PERF_VFDP_STALL_CYCLES_VFD_PROG"/>
+ <value value="21" name="A7XX_PERF_VFDP_STARVE_CYCLES_PC"/>
+ <value value="22" name="A7XX_PERF_VFDP_VS_STAGE_WAVES"/>
+ <value value="23" name="A7XX_PERF_VFD_STALL_CYCLES_PRG_END_FE"/>
+ <value value="24" name="A7XX_PERF_VFD_STALL_CYCLES_CBSYNC"/>
+</enum>
+
+<enum name="a7xx_hlsq_perfcounter_select">
+ <value value="0" name="A7XX_PERF_HLSQ_BUSY_CYCLES"/>
+ <value value="1" name="A7XX_PERF_HLSQ_STALL_CYCLES_UCHE"/>
+ <value value="2" name="A7XX_PERF_HLSQ_STALL_CYCLES_SP_STATE"/>
+ <value value="3" name="A7XX_PERF_HLSQ_STALL_CYCLES_SP_FS_STAGE"/>
+ <value value="4" name="A7XX_PERF_HLSQ_UCHE_LATENCY_CYCLES"/>
+ <value value="5" name="A7XX_PERF_HLSQ_UCHE_LATENCY_COUNT"/>
+ <value value="6" name="A7XX_PERF_HLSQ_RESERVED_6"/>
+ <value value="7" name="A7XX_PERF_HLSQ_RESERVED_7"/>
+ <value value="8" name="A7XX_PERF_HLSQ_RESERVED_8"/>
+ <value value="9" name="A7XX_PERF_HLSQ_RESERVED_9"/>
+ <value value="10" name="A7XX_PERF_HLSQ_COMPUTE_DRAWCALLS"/>
+ <value value="11" name="A7XX_PERF_HLSQ_FS_DATA_WAIT_PROGRAMMING"/>
+ <value value="12" name="A7XX_PERF_HLSQ_DUAL_FS_PROG_ACTIVE"/>
+ <value value="13" name="A7XX_PERF_HLSQ_DUAL_VS_PROG_ACTIVE"/>
+ <value value="14" name="A7XX_PERF_HLSQ_FS_BATCH_COUNT_ZERO"/>
+ <value value="15" name="A7XX_PERF_HLSQ_VS_BATCH_COUNT_ZERO"/>
+ <value value="16" name="A7XX_PERF_HLSQ_WAVE_PENDING_NO_QUAD"/>
+ <value value="17" name="A7XX_PERF_HLSQ_WAVE_PENDING_NO_PRIM_BASE"/>
+ <value value="18" name="A7XX_PERF_HLSQ_STALL_CYCLES_VPC"/>
+ <value value="19" name="A7XX_PERF_HLSQ_RESERVED_19"/>
+ <value value="20" name="A7XX_PERF_HLSQ_DRAW_MODE_SWITCH_VSFS_SYNC"/>
+ <value value="21" name="A7XX_PERF_HLSQ_VSBR_STALL_CYCLES"/>
+ <value value="22" name="A7XX_PERF_HLSQ_FS_STALL_CYCLES"/>
+ <value value="23" name="A7XX_PERF_HLSQ_LPAC_STALL_CYCLES"/>
+ <value value="24" name="A7XX_PERF_HLSQ_BV_STALL_CYCLES"/>
+ <value value="25" name="A7XX_PERF_HLSQ_VSBR_DEREF_CYCLES"/>
+ <value value="26" name="A7XX_PERF_HLSQ_FS_DEREF_CYCLES"/>
+ <value value="27" name="A7XX_PERF_HLSQ_LPAC_DEREF_CYCLES"/>
+ <value value="28" name="A7XX_PERF_HLSQ_BV_DEREF_CYCLES"/>
+ <value value="29" name="A7XX_PERF_HLSQ_VSBR_S2W_CYCLES"/>
+ <value value="30" name="A7XX_PERF_HLSQ_FS_S2W_CYCLES"/>
+ <value value="31" name="A7XX_PERF_HLSQ_LPAC_S2W_CYCLES"/>
+ <value value="32" name="A7XX_PERF_HLSQ_BV_S2W_CYCLES"/>
+ <value value="33" name="A7XX_PERF_HLSQ_VSBR_WAIT_FS_S2W"/>
+ <value value="34" name="A7XX_PERF_HLSQ_FS_WAIT_VS_S2W"/>
+ <value value="35" name="A7XX_PERF_HLSQ_LPAC_WAIT_VS_S2W"/>
+ <value value="36" name="A7XX_PERF_HLSQ_BV_WAIT_FS_S2W"/>
+ <value value="37" name="A7XX_PERF_HLSQ_VS_WAIT_CONST_RESOURCE"/>
+ <value value="38" name="A7XX_PERF_HLSQ_FS_WAIT_SAME_VS_S2W"/>
+ <value value="39" name="A7XX_PERF_HLSQ_FS_STARVING_SP"/>
+ <value value="40" name="A7XX_PERF_HLSQ_VS_DATA_WAIT_PROGRAMMING"/>
+ <value value="41" name="A7XX_PERF_HLSQ_BV_DATA_WAIT_PROGRAMMING"/>
+ <value value="42" name="A7XX_PERF_HLSQ_STPROC_WAVE_CONTEXTS_VS"/>
+ <value value="43" name="A7XX_PERF_HLSQ_STPROC_WAVE_CONTEXT_CYCLES_VS"/>
+ <value value="44" name="A7XX_PERF_HLSQ_STPROC_WAVE_CONTEXTS_FS"/>
+ <value value="45" name="A7XX_PERF_HLSQ_STPROC_WAVE_CONTEXT_CYCLES_FS"/>
+ <value value="46" name="A7XX_PERF_HLSQ_STPROC_WAVE_CONTEXTS_BV"/>
+ <value value="47" name="A7XX_PERF_HLSQ_STPROC_WAVE_CONTEXT_CYCLES_BV"/>
+ <value value="48" name="A7XX_PERF_HLSQ_STPROC_WAVE_CONTEXTS_LPAC"/>
+ <value value="49" name="A7XX_PERF_HLSQ_STPROC_WAVE_CONTEXT_CYCLES_LPAC"/>
+ <value value="50" name="A7XX_PERF_HLSQ_SPTROC_STCHE_WARMUP_INC_VS"/>
+ <value value="51" name="A7XX_PERF_HLSQ_SPTROC_STCHE_WARMUP_INC_FS"/>
+ <value value="52" name="A7XX_PERF_HLSQ_SPTROC_STCHE_WARMUP_INC_BV"/>
+ <value value="53" name="A7XX_PERF_HLSQ_SPTROC_STCHE_WARMUP_INC_LPAC"/>
+ <value value="54" name="A7XX_PERF_HLSQ_SPTROC_STCHE_MISS_INC_VS"/>
+ <value value="55" name="A7XX_PERF_HLSQ_SPTROC_STCHE_MISS_INC_FS"/>
+ <value value="56" name="A7XX_PERF_HLSQ_SPTROC_STCHE_MISS_INC_BV"/>
+ <value value="57" name="A7XX_PERF_HLSQ_SPTROC_STCHE_MISS_INC_LPAC"/>
+</enum>
+
+<enum name="a7xx_vpc_perfcounter_select">
+ <value value="0" name="A7XX_PERF_VPC_BUSY_CYCLES"/>
+ <value value="1" name="A7XX_PERF_VPC_WORKING_CYCLES"/>
+ <value value="2" name="A7XX_PERF_VPC_STALL_CYCLES_UCHE"/>
+ <value value="3" name="A7XX_PERF_VPC_STALL_CYCLES_VFD_WACK"/>
+ <value value="4" name="A7XX_PERF_VPC_STALL_CYCLES_HLSQ_PRIM_ALLOC"/>
+ <value value="5" name="A7XX_PERF_VPC_RESERVED_5"/>
+ <value value="6" name="A7XX_PERF_VPC_STALL_CYCLES_SP_LM"/>
+ <value value="7" name="A7XX_PERF_VPC_STARVE_CYCLES_SP"/>
+ <value value="8" name="A7XX_PERF_VPC_STARVE_CYCLES_LRZ"/>
+ <value value="9" name="A7XX_PERF_VPC_PC_PRIMITIVES"/>
+ <value value="10" name="A7XX_PERF_VPC_SP_COMPONENTS"/>
+ <value value="11" name="A7XX_PERF_VPC_STALL_CYCLES_VPCRAM_POS"/>
+ <value value="12" name="A7XX_PERF_VPC_LRZ_ASSIGN_PRIMITIVES"/>
+ <value value="13" name="A7XX_PERF_VPC_RB_VISIBLE_PRIMITIVES"/>
+ <value value="14" name="A7XX_PERF_VPC_LM_TRANSACTION"/>
+ <value value="15" name="A7XX_PERF_VPC_STREAMOUT_TRANSACTION"/>
+ <value value="16" name="A7XX_PERF_VPC_VS_BUSY_CYCLES"/>
+ <value value="17" name="A7XX_PERF_VPC_PS_BUSY_CYCLES"/>
+ <value value="18" name="A7XX_PERF_VPC_VS_WORKING_CYCLES"/>
+ <value value="19" name="A7XX_PERF_VPC_PS_WORKING_CYCLES"/>
+ <value value="20" name="A7XX_PERF_VPC_STARVE_CYCLES_RB"/>
+ <value value="21" name="A7XX_PERF_VPC_NUM_VPCRAM_READ_POS"/>
+ <value value="22" name="A7XX_PERF_VPC_WIT_FULL_CYCLES"/>
+ <value value="23" name="A7XX_PERF_VPC_VPCRAM_FULL_CYCLES"/>
+ <value value="24" name="A7XX_PERF_VPC_LM_FULL_WAIT_FOR_INTP_END"/>
+ <value value="25" name="A7XX_PERF_VPC_NUM_VPCRAM_WRITE"/>
+ <value value="26" name="A7XX_PERF_VPC_NUM_VPCRAM_READ_SO"/>
+ <value value="27" name="A7XX_PERF_VPC_NUM_ATTR_REQ_LM"/>
+ <value value="28" name="A7XX_PERF_VPC_STALL_CYCLE_TSE"/>
+ <value value="29" name="A7XX_PERF_VPC_TSE_PRIMITIVES"/>
+ <value value="30" name="A7XX_PERF_VPC_GS_PRIMITIVES"/>
+ <value value="31" name="A7XX_PERF_VPC_TSE_TRANSACTIONS"/>
+ <value value="32" name="A7XX_PERF_VPC_STALL_CYCLES_CCU"/>
+ <value value="33" name="A7XX_PERF_VPC_NUM_WM_HIT"/>
+ <value value="34" name="A7XX_PERF_VPC_STALL_DQ_WACK"/>
+ <value value="35" name="A7XX_PERF_VPC_STALL_CYCLES_CCHE"/>
+ <value value="36" name="A7XX_PERF_VPC_STARVE_CYCLES_CCHE"/>
+ <value value="37" name="A7XX_PERF_VPC_NUM_PA_REQ"/>
+ <value value="38" name="A7XX_PERF_VPC_NUM_LM_REQ_HIT"/>
+ <value value="39" name="A7XX_PERF_VPC_CCHE_REQBUF_FULL"/>
+ <value value="40" name="A7XX_PERF_VPC_STALL_CYCLES_LM_ACK"/>
+ <value value="41" name="A7XX_PERF_VPC_STALL_CYCLES_PRG_END_FE"/>
+ <value value="42" name="A7XX_PERF_VPC_STALL_CYCLES_PRG_END_PCVS"/>
+ <value value="43" name="A7XX_PERF_VPC_STALL_CYCLES_PRG_END_VPCPS"/>
+</enum>
+
+<enum name="a7xx_tse_perfcounter_select">
+ <value value="0" name="A7XX_PERF_TSE_BUSY_CYCLES"/>
+ <value value="1" name="A7XX_PERF_TSE_CLIPPING_CYCLES"/>
+ <value value="2" name="A7XX_PERF_TSE_STALL_CYCLES_RAS"/>
+ <value value="3" name="A7XX_PERF_TSE_STALL_CYCLES_LRZ_BARYPLANE"/>
+ <value value="4" name="A7XX_PERF_TSE_STALL_CYCLES_LRZ_ZPLANE"/>
+ <value value="5" name="A7XX_PERF_TSE_STARVE_CYCLES_PC"/>
+ <value value="6" name="A7XX_PERF_TSE_INPUT_PRIM"/>
+ <value value="7" name="A7XX_PERF_TSE_INPUT_NULL_PRIM"/>
+ <value value="8" name="A7XX_PERF_TSE_TRIVAL_REJ_PRIM"/>
+ <value value="9" name="A7XX_PERF_TSE_CLIPPED_PRIM"/>
+ <value value="10" name="A7XX_PERF_TSE_ZERO_AREA_PRIM"/>
+ <value value="11" name="A7XX_PERF_TSE_FACENESS_CULLED_PRIM"/>
+ <value value="12" name="A7XX_PERF_TSE_ZERO_PIXEL_PRIM"/>
+ <value value="13" name="A7XX_PERF_TSE_OUTPUT_NULL_PRIM"/>
+ <value value="14" name="A7XX_PERF_TSE_OUTPUT_VISIBLE_PRIM"/>
+ <value value="15" name="A7XX_PERF_TSE_CINVOCATION"/>
+ <value value="16" name="A7XX_PERF_TSE_CPRIMITIVES"/>
+ <value value="17" name="A7XX_PERF_TSE_2D_INPUT_PRIM"/>
+ <value value="18" name="A7XX_PERF_TSE_2D_ALIVE_CYCLES"/>
+ <value value="19" name="A7XX_PERF_TSE_CLIP_PLANES"/>
+</enum>
+
+<enum name="a7xx_ras_perfcounter_select">
+ <value value="0" name="A7XX_PERF_RAS_BUSY_CYCLES"/>
+ <value value="1" name="A7XX_PERF_RAS_SUPERTILE_ACTIVE_CYCLES"/>
+ <value value="2" name="A7XX_PERF_RAS_STALL_CYCLES_LRZ"/>
+ <value value="3" name="A7XX_PERF_RAS_STARVE_CYCLES_TSE"/>
+ <value value="4" name="A7XX_PERF_RAS_SUPER_TILES"/>
+ <value value="5" name="A7XX_PERF_RAS_8X4_TILES"/>
+ <value value="6" name="A7XX_PERF_RAS_MASKGEN_ACTIVE"/>
+ <value value="7" name="A7XX_PERF_RAS_FULLY_COVERED_SUPER_TILES"/>
+ <value value="8" name="A7XX_PERF_RAS_FULLY_COVERED_8X4_TILES"/>
+ <value value="9" name="A7XX_PERF_RAS_PRIM_KILLED_INVISILBE"/>
+ <value value="10" name="A7XX_PERF_RAS_SUPERTILE_GEN_ACTIVE_CYCLES"/>
+ <value value="11" name="A7XX_PERF_RAS_LRZ_INTF_WORKING_CYCLES"/>
+ <value value="12" name="A7XX_PERF_RAS_BLOCKS"/>
+ <value value="13" name="A7XX_PERF_RAS_SAMPLE_MASK_GEN_LANE_0_WORKING_CC_l2"/>
+ <value value="14" name="A7XX_PERF_RAS_SAMPLE_MASK_GEN_LANE_1_WORKING_CC_l2"/>
+ <value value="15" name="A7XX_PERF_RAS_SAMPLE_MASK_GEN_LANE_2_WORKING_CC_l2"/>
+ <value value="16" name="A7XX_PERF_RAS_SAMPLE_MASK_GEN_LANE_3_WORKING_CC_l2"/>
+ <value value="17" name="A7XX_PERF_RAS_SAMPLE_MASK_GEN_LANE_4_WORKING_CC_l2"/>
+ <value value="18" name="A7XX_PERF_RAS_SAMPLE_MASK_GEN_LANE_5_WORKING_CC_l2"/>
+ <value value="19" name="A7XX_PERF_RAS_SAMPLE_MASK_GEN_LANE_6_WORKING_CC_l2"/>
+ <value value="20" name="A7XX_PERF_RAS_SAMPLE_MASK_GEN_LANE_7_WORKING_CC_l2"/>
+ <value value="21" name="A7XX_PERF_RAS_SAMPLE_MASK_GEN_LANE_8_WORKING_CC_l2"/>
+ <value value="22" name="A7XX_PERF_RAS_SAMPLE_MASK_GEN_LANE_9_WORKING_CC_l2"/>
+ <value value="23" name="A7XX_PERF_RAS_SAMPLE_MASK_GEN_LANE_10_WORKING_CC_l2"/>
+ <value value="24" name="A7XX_PERF_RAS_SAMPLE_MASK_GEN_LANE_11_WORKING_CC_l2"/>
+ <value value="25" name="A7XX_PERF_RAS_SAMPLE_MASK_GEN_LANE_12_WORKING_CC_l2"/>
+ <value value="26" name="A7XX_PERF_RAS_SAMPLE_MASK_GEN_LANE_13_WORKING_CC_l2"/>
+ <value value="27" name="A7XX_PERF_RAS_SAMPLE_MASK_GEN_LANE_14_WORKING_CC_l2"/>
+ <value value="28" name="A7XX_PERF_RAS_SAMPLE_MASK_GEN_LANE_15_WORKING_CC_l2"/>
+ <value value="29" name="A7XX_PERF_RAS_FALSE_PARTIAL_STILE"/>
+
+</enum>
+
+<enum name="a7xx_uche_perfcounter_select">
+ <value value="0" name="A7XX_PERF_UCHE_BUSY_CYCLES"/>
+ <value value="1" name="A7XX_PERF_UCHE_STALL_CYCLES_ARBITER"/>
+ <value value="2" name="A7XX_PERF_UCHE_VBIF_LATENCY_CYCLES"/>
+ <value value="3" name="A7XX_PERF_UCHE_VBIF_LATENCY_SAMPLES"/>
+ <value value="4" name="A7XX_PERF_UCHE_VBIF_READ_BEATS_TP"/>
+ <value value="5" name="A7XX_PERF_UCHE_VBIF_READ_BEATS_VFD"/>
+ <value value="6" name="A7XX_PERF_UCHE_VBIF_READ_BEATS_HLSQ"/>
+ <value value="7" name="A7XX_PERF_UCHE_VBIF_READ_BEATS_LRZ"/>
+ <value value="8" name="A7XX_PERF_UCHE_VBIF_READ_BEATS_SP"/>
+ <value value="9" name="A7XX_PERF_UCHE_READ_REQUESTS_TP"/>
+ <value value="10" name="A7XX_PERF_UCHE_READ_REQUESTS_VFD"/>
+ <value value="11" name="A7XX_PERF_UCHE_READ_REQUESTS_HLSQ"/>
+ <value value="12" name="A7XX_PERF_UCHE_READ_REQUESTS_LRZ"/>
+ <value value="13" name="A7XX_PERF_UCHE_READ_REQUESTS_SP"/>
+ <value value="14" name="A7XX_PERF_UCHE_WRITE_REQUESTS_LRZ"/>
+ <value value="15" name="A7XX_PERF_UCHE_WRITE_REQUESTS_SP"/>
+ <value value="16" name="A7XX_PERF_UCHE_WRITE_REQUESTS_VPC"/>
+ <value value="17" name="A7XX_PERF_UCHE_WRITE_REQUESTS_VSC"/>
+ <value value="18" name="A7XX_PERF_UCHE_EVICTS"/>
+ <value value="19" name="A7XX_PERF_UCHE_BANK_REQ0"/>
+ <value value="20" name="A7XX_PERF_UCHE_BANK_REQ1"/>
+ <value value="21" name="A7XX_PERF_UCHE_BANK_REQ2"/>
+ <value value="22" name="A7XX_PERF_UCHE_BANK_REQ3"/>
+ <value value="23" name="A7XX_PERF_UCHE_BANK_REQ4"/>
+ <value value="24" name="A7XX_PERF_UCHE_BANK_REQ5"/>
+ <value value="25" name="A7XX_PERF_UCHE_BANK_REQ6"/>
+ <value value="26" name="A7XX_PERF_UCHE_BANK_REQ7"/>
+ <value value="27" name="A7XX_PERF_UCHE_VBIF_READ_BEATS_CH0"/>
+ <value value="28" name="A7XX_PERF_UCHE_VBIF_READ_BEATS_CH1"/>
+ <value value="29" name="A7XX_PERF_UCHE_GMEM_READ_BEATS"/>
+ <value value="30" name="A7XX_PERF_UCHE_TPH_REF_FULL"/>
+ <value value="31" name="A7XX_PERF_UCHE_TPH_VICTIM_FULL"/>
+ <value value="32" name="A7XX_PERF_UCHE_TPH_EXT_FULL"/>
+ <value value="33" name="A7XX_PERF_UCHE_VBIF_STALL_WRITE_DATA"/>
+ <value value="34" name="A7XX_PERF_UCHE_DCMP_LATENCY_SAMPLES"/>
+ <value value="35" name="A7XX_PERF_UCHE_DCMP_LATENCY_CYCLES"/>
+ <value value="36" name="A7XX_PERF_UCHE_VBIF_READ_BEATS_PC"/>
+ <value value="37" name="A7XX_PERF_UCHE_READ_REQUESTS_PC"/>
+ <value value="38" name="A7XX_PERF_UCHE_RAM_READ_REQ"/>
+ <value value="39" name="A7XX_PERF_UCHE_RAM_WRITE_REQ"/>
+ <value value="40" name="A7XX_PERF_UCHE_STARVED_CYCLES_VBIF_DECMP"/>
+ <value value="41" name="A7XX_PERF_UCHE_STALL_CYCLES_DECMP"/>
+ <value value="42" name="A7XX_PERF_UCHE_ARBITER_STALL_CYCLES_VBIF"/>
+ <value value="43" name="A7XX_PERF_UCHE_READ_REQUESTS_TP_UBWC"/>
+ <value value="44" name="A7XX_PERF_UCHE_READ_REQUESTS_TP_NONUBWC"/>
+ <value value="45" name="A7XX_PERF_UCHE_READ_REQUESTS_TP_GMEM"/>
+ <value value="46" name="A7XX_PERF_UCHE_LONG_LINE_ALL_EVICTS_KAILUA"/>
+ <value value="47" name="A7XX_PERF_UCHE_LONG_LINE_PARTIAL_EVICTS_KAILUA"/>
+ <value value="48" name="A7XX_PERF_UCHE_TPH_CONFLICT_CL_CCHE"/>
+ <value value="49" name="A7XX_PERF_UCHE_TPH_CONFLICT_CL_OTHER_KAILUA"/>
+ <value value="50" name="A7XX_PERF_UCHE_DBANK_CONFLICT_CL_CCHE"/>
+ <value value="51" name="A7XX_PERF_UCHE_DBANK_CONFLICT_CL_OTHER_CLIENTS"/>
+ <value value="52" name="A7XX_PERF_UCHE_VBIF_WRITE_BEATS_CH0"/>
+ <value value="53" name="A7XX_PERF_UCHE_VBIF_WRITE_BEATS_CH1"/>
+ <value value="54" name="A7XX_PERF_UCHE_CCHE_TPH_QUEUE_FULL"/>
+ <value value="55" name="A7XX_PERF_UCHE_CCHE_DPH_QUEUE_FULL"/>
+ <value value="56" name="A7XX_PERF_UCHE_GMEM_WRITE_BEATS"/>
+ <value value="57" name="A7XX_PERF_UCHE_UBWC_READ_BEATS"/>
+ <value value="58" name="A7XX_PERF_UCHE_UBWC_WRITE_BEATS"/>
+</enum>
+
+<enum name="a7xx_tp_perfcounter_select">
+ <value value="0" name="A7XX_PERF_TP_BUSY_CYCLES"/>
+ <value value="1" name="A7XX_PERF_TP_STALL_CYCLES_UCHE"/>
+ <value value="2" name="A7XX_PERF_TP_LATENCY_CYCLES"/>
+ <value value="3" name="A7XX_PERF_TP_LATENCY_TRANS"/>
+ <value value="4" name="A7XX_PERF_TP_FLAG_FIFO_DELAY_SAMPLES"/>
+ <value value="5" name="A7XX_PERF_TP_FLAG_FIFO_DELAY_CYCLES"/>
+ <value value="6" name="A7XX_PERF_TP_L1_CACHELINE_REQUESTS"/>
+ <value value="7" name="A7XX_PERF_TP_L1_CACHELINE_MISSES"/>
+ <value value="8" name="A7XX_PERF_TP_SP_TP_TRANS"/>
+ <value value="9" name="A7XX_PERF_TP_TP_SP_TRANS"/>
+ <value value="10" name="A7XX_PERF_TP_OUTPUT_PIXELS"/>
+ <value value="11" name="A7XX_PERF_TP_FILTER_WORKLOAD_16BIT"/>
+ <value value="12" name="A7XX_PERF_TP_FILTER_WORKLOAD_32BIT"/>
+ <value value="13" name="A7XX_PERF_TP_QUADS_RECEIVED"/>
+ <value value="14" name="A7XX_PERF_TP_QUADS_OFFSET"/>
+ <value value="15" name="A7XX_PERF_TP_QUADS_SHADOW"/>
+ <value value="16" name="A7XX_PERF_TP_QUADS_ARRAY"/>
+ <value value="17" name="A7XX_PERF_TP_QUADS_GRADIENT"/>
+ <value value="18" name="A7XX_PERF_TP_QUADS_1D"/>
+ <value value="19" name="A7XX_PERF_TP_QUADS_2D"/>
+ <value value="20" name="A7XX_PERF_TP_QUADS_BUFFER"/>
+ <value value="21" name="A7XX_PERF_TP_QUADS_3D"/>
+ <value value="22" name="A7XX_PERF_TP_QUADS_CUBE"/>
+ <value value="23" name="A7XX_PERF_TP_DIVERGENT_QUADS_RECEIVED"/>
+ <value value="24" name="A7XX_PERF_TP_PRT_NON_RESIDENT_EVENTS"/>
+ <value value="25" name="A7XX_PERF_TP_OUTPUT_PIXELS_POINT"/>
+ <value value="26" name="A7XX_PERF_TP_OUTPUT_PIXELS_BILINEAR"/>
+ <value value="27" name="A7XX_PERF_TP_OUTPUT_PIXELS_MIP"/>
+ <value value="28" name="A7XX_PERF_TP_OUTPUT_PIXELS_ANISO"/>
+ <value value="29" name="A7XX_PERF_TP_OUTPUT_PIXELS_ZERO_LOD"/>
+ <value value="30" name="A7XX_PERF_TP_FLAG_CACHE_REQUESTS"/>
+ <value value="31" name="A7XX_PERF_TP_FLAG_CACHE_MISSES"/>
+ <value value="32" name="A7XX_PERF_TP_L1_5_L2_REQUESTS"/>
+ <value value="33" name="A7XX_PERF_TP_2D_OUTPUT_PIXELS"/>
+ <value value="34" name="A7XX_PERF_TP_2D_OUTPUT_PIXELS_POINT"/>
+ <value value="35" name="A7XX_PERF_TP_2D_OUTPUT_PIXELS_BILINEAR"/>
+ <value value="36" name="A7XX_PERF_TP_2D_FILTER_WORKLOAD_16BIT"/>
+ <value value="37" name="A7XX_PERF_TP_2D_FILTER_WORKLOAD_32BIT"/>
+ <value value="38" name="A7XX_PERF_TP_TPA2TPC_TRANS"/>
+ <value value="39" name="A7XX_PERF_TP_L1_MISSES_ASTC_1TILE"/>
+ <value value="40" name="A7XX_PERF_TP_L1_MISSES_ASTC_2TILE"/>
+ <value value="41" name="A7XX_PERF_TP_L1_MISSES_ASTC_4TILE"/>
+ <value value="42" name="A7XX_PERF_TP_L1_5_COMPRESS_REQS"/>
+ <value value="43" name="A7XX_PERF_TP_L1_5_L2_COMPRESS_MISS"/>
+ <value value="44" name="A7XX_PERF_TP_L1_BANK_CONFLICT"/>
+ <value value="45" name="A7XX_PERF_TP_L1_5_MISS_LATENCY_CYCLES"/>
+ <value value="46" name="A7XX_PERF_TP_L1_5_MISS_LATENCY_TRANS"/>
+ <value value="47" name="A7XX_PERF_TP_QUADS_CONSTANT_MULTIPLIED"/>
+ <value value="48" name="A7XX_PERF_TP_FRONTEND_WORKING_CYCLES"/>
+ <value value="49" name="A7XX_PERF_TP_L1_TAG_WORKING_CYCLES"/>
+ <value value="50" name="A7XX_PERF_TP_L1_DATA_WRITE_WORKING_CYCLES"/>
+ <value value="51" name="A7XX_PERF_TP_PRE_L1_DECOM_WORKING_CYCLES"/>
+ <value value="52" name="A7XX_PERF_TP_BACKEND_WORKING_CYCLES"/>
+ <value value="53" name="A7XX_PERF_TP_L1_5_CACHE_WORKING_CYCLES"/>
+ <value value="54" name="A7XX_PERF_TP_STARVE_CYCLES_SP"/>
+ <value value="55" name="A7XX_PERF_TP_STARVE_CYCLES_UCHE"/>
+ <value value="56" name="A7XX_PERF_TP_STALL_CYCLES_UFC"/>
+ <value value="57" name="A7XX_PERF_TP_FORMAT_DECOMP"/>
+ <value value="58" name="A7XX_PERF_TP_FILTER_POINT_FP16"/>
+ <value value="59" name="A7XX_PERF_TP_FILTER_POINT_FP32"/>
+ <value value="60" name="A7XX_PERF_TP_LATENCY_FIFO_FULL"/>
+ <value value="61" name="A7XX_PERF_TP_RESERVED_61"/>
+ <value value="62" name="A7XX_PERF_TP_RESERVED_62"/>
+ <value value="63" name="A7XX_PERF_TP_RESERVED_63"/>
+ <value value="64" name="A7XX_PERF_TP_RESERVED_64"/>
+ <value value="65" name="A7XX_PERF_TP_RESERVED_65"/>
+ <value value="66" name="A7XX_PERF_TP_RESERVED_66"/>
+ <value value="67" name="A7XX_PERF_TP_RESERVED_67"/>
+ <value value="68" name="A7XX_PERF_TP_RESERVED_68"/>
+ <value value="69" name="A7XX_PERF_TP_RESERVED_69"/>
+ <value value="70" name="A7XX_PERF_TP_RESERVED_70"/>
+ <value value="71" name="A7XX_PERF_TP_RESERVED_71"/>
+ <value value="72" name="A7XX_PERF_TP_RESERVED_72"/>
+ <value value="73" name="A7XX_PERF_TP_RESERVED_73"/>
+ <value value="74" name="A7XX_PERF_TP_RESERVED_74"/>
+ <value value="75" name="A7XX_PERF_TP_RESERVED_75"/>
+ <value value="76" name="A7XX_PERF_TP_RESERVED_76"/>
+ <value value="77" name="A7XX_PERF_TP_RESERVED_77"/>
+ <value value="78" name="A7XX_PERF_TP_RESERVED_78"/>
+ <value value="79" name="A7XX_PERF_TP_RESERVED_79"/>
+ <value value="80" name="A7XX_PERF_TP_RESERVED_80"/>
+ <value value="81" name="A7XX_PERF_TP_RESERVED_81"/>
+ <value value="82" name="A7XX_PERF_TP_RESERVED_82"/>
+ <value value="83" name="A7XX_PERF_TP_RESERVED_83"/>
+ <value value="84" name="A7XX_PERF_TP_RESERVED_84"/>
+ <value value="85" name="A7XX_PERF_TP_RESERVED_85"/>
+ <value value="86" name="A7XX_PERF_TP_RESERVED_86"/>
+ <value value="87" name="A7XX_PERF_TP_RESERVED_87"/>
+ <value value="88" name="A7XX_PERF_TP_RESERVED_88"/>
+ <value value="89" name="A7XX_PERF_TP_RESERVED_89"/>
+ <value value="90" name="A7XX_PERF_TP_RESERVED_90"/>
+ <value value="91" name="A7XX_PERF_TP_RESERVED_91"/>
+ <value value="92" name="A7XX_PERF_TP_RESERVED_92"/>
+ <value value="93" name="A7XX_PERF_TP_RESERVED_93"/>
+ <value value="94" name="A7XX_PERF_TP_RESERVED_94"/>
+ <value value="95" name="A7XX_PERF_TP_RESERVED_95"/>
+ <value value="96" name="A7XX_PERF_TP_RESERVED_96"/>
+ <value value="97" name="A7XX_PERF_TP_RESERVED_97"/>
+ <value value="98" name="A7XX_PERF_TP_RESERVED_98"/>
+ <value value="99" name="A7XX_PERF_TP_RESERVED_99"/>
+ <value value="100" name="A7XX_PERF_TP_RESERVED_100"/>
+ <value value="101" name="A7XX_PERF_TP_RESERVED_101"/>
+ <value value="102" name="A7XX_PERF_TP_RESERVED_102"/>
+ <value value="103" name="A7XX_PERF_TP_RESERVED_103"/>
+ <value value="104" name="A7XX_PERF_TP_RESERVED_104"/>
+ <value value="105" name="A7XX_PERF_TP_RESERVED_105"/>
+ <value value="106" name="A7XX_PERF_TP_RESERVED_106"/>
+ <value value="107" name="A7XX_PERF_TP_RESERVED_107"/>
+ <value value="108" name="A7XX_PERF_TP_RESERVED_108"/>
+ <value value="109" name="A7XX_PERF_TP_RESERVED_109"/>
+ <value value="110" name="A7XX_PERF_TP_RESERVED_110"/>
+ <value value="111" name="A7XX_PERF_TP_RESERVED_111"/>
+ <value value="112" name="A7XX_PERF_TP_RESERVED_112"/>
+ <value value="113" name="A7XX_PERF_TP_RESERVED_113"/>
+ <value value="114" name="A7XX_PERF_TP_RESERVED_114"/>
+ <value value="115" name="A7XX_PERF_TP_RESERVED_115"/>
+ <value value="116" name="A7XX_PERF_TP_RESERVED_116"/>
+ <value value="117" name="A7XX_PERF_TP_RESERVED_117"/>
+ <value value="118" name="A7XX_PERF_TP_RESERVED_118"/>
+ <value value="119" name="A7XX_PERF_TP_RESERVED_119"/>
+ <value value="120" name="A7XX_PERF_TP_RESERVED_120"/>
+ <value value="121" name="A7XX_PERF_TP_RESERVED_121"/>
+ <value value="122" name="A7XX_PERF_TP_RESERVED_122"/>
+ <value value="123" name="A7XX_PERF_TP_RESERVED_123"/>
+ <value value="124" name="A7XX_PERF_TP_RESERVED_124"/>
+ <value value="125" name="A7XX_PERF_TP_RESERVED_125"/>
+ <value value="126" name="A7XX_PERF_TP_RESERVED_126"/>
+ <value value="127" name="A7XX_PERF_TP_RESERVED_127"/>
+ <value value="128" name="A7XX_PERF_TP_FORMAT_DECOMP_BILINEAR"/>
+ <value value="129" name="A7XX_PERF_TP_PACKED_POINT_BOTH_VALID_FP16"/>
+ <value value="130" name="A7XX_PERF_TP_PACKED_POINT_SINGLE_VALID_FP16"/>
+ <value value="131" name="A7XX_PERF_TP_PACKED_POINT_BOTH_VALID_FP32"/>
+ <value value="132" name="A7XX_PERF_TP_PACKED_POINT_SINGLE_VALID_FP32"/>
+</enum>
+
+<enum name="a7xx_sp_perfcounter_select">
+ <value value="0" name="A7XX_PERF_SP_BUSY_CYCLES"/>
+ <value value="1" name="A7XX_PERF_SP_ALU_WORKING_CYCLES"/>
+ <value value="2" name="A7XX_PERF_SP_EFU_WORKING_CYCLES"/>
+ <value value="3" name="A7XX_PERF_SP_STALL_CYCLES_VPC"/>
+ <value value="4" name="A7XX_PERF_SP_STALL_CYCLES_TP"/>
+ <value value="5" name="A7XX_PERF_SP_STALL_CYCLES_UCHE"/>
+ <value value="6" name="A7XX_PERF_SP_STALL_CYCLES_RB"/>
+ <value value="7" name="A7XX_PERF_SP_NON_EXECUTION_CYCLES"/>
+ <value value="8" name="A7XX_PERF_SP_WAVE_CONTEXTS"/>
+ <value value="9" name="A7XX_PERF_SP_WAVE_CONTEXT_CYCLES"/>
+ <value value="10" name="A7XX_PERF_SP_STAGE_WAVE_CYCLES"/>
+ <value value="11" name="A7XX_PERF_SP_STAGE_WAVE_SAMPLES"/>
+ <value value="12" name="A7XX_PERF_SP_VS_STAGE_WAVE_CYCLES"/>
+ <value value="13" name="A7XX_PERF_SP_VS_STAGE_WAVE_SAMPLES"/>
+ <value value="14" name="A7XX_PERF_SP_FS_STAGE_DURATION_CYCLES"/>
+ <value value="15" name="A7XX_PERF_SP_VS_STAGE_DURATION_CYCLES"/>
+ <value value="16" name="A7XX_PERF_SP_WAVE_CTRL_CYCLES"/>
+ <value value="17" name="A7XX_PERF_SP_WAVE_LOAD_CYCLES"/>
+ <value value="18" name="A7XX_PERF_SP_WAVE_EMIT_CYCLES"/>
+ <value value="19" name="A7XX_PERF_SP_WAVE_NOP_CYCLES"/>
+ <value value="20" name="A7XX_PERF_SP_WAVE_WAIT_CYCLES"/>
+ <value value="21" name="A7XX_PERF_SP_WAVE_FETCH_CYCLES"/>
+ <value value="22" name="A7XX_PERF_SP_WAVE_IDLE_CYCLES"/>
+ <value value="23" name="A7XX_PERF_SP_WAVE_END_CYCLES"/>
+ <value value="24" name="A7XX_PERF_SP_WAVE_LONG_SYNC_CYCLES"/>
+ <value value="25" name="A7XX_PERF_SP_WAVE_SHORT_SYNC_CYCLES"/>
+ <value value="26" name="A7XX_PERF_SP_WAVE_JOIN_CYCLES"/>
+ <value value="27" name="A7XX_PERF_SP_LM_LOAD_INSTRUCTIONS"/>
+ <value value="28" name="A7XX_PERF_SP_LM_STORE_INSTRUCTIONS"/>
+ <value value="29" name="A7XX_PERF_SP_LM_ATOMICS"/>
+ <value value="30" name="A7XX_PERF_SP_GM_LOAD_INSTRUCTIONS"/>
+ <value value="31" name="A7XX_PERF_SP_GM_STORE_INSTRUCTIONS"/>
+ <value value="32" name="A7XX_PERF_SP_GM_ATOMICS"/>
+ <value value="33" name="A7XX_PERF_SP_VS_STAGE_TEX_INSTRUCTIONS"/>
+ <value value="34" name="A7XX_PERF_SP_VS_STAGE_EFU_INSTRUCTIONS"/>
+ <value value="35" name="A7XX_PERF_SP_VS_STAGE_FULL_ALU_INSTRUCTIONS"/>
+ <value value="36" name="A7XX_PERF_SP_VS_STAGE_HALF_ALU_INSTRUCTIONS"/>
+ <value value="37" name="A7XX_PERF_SP_FS_STAGE_TEX_INSTRUCTIONS"/>
+ <value value="38" name="A7XX_PERF_SP_FS_STAGE_CFLOW_INSTRUCTIONS"/>
+ <value value="39" name="A7XX_PERF_SP_FS_STAGE_EFU_INSTRUCTIONS"/>
+ <value value="40" name="A7XX_PERF_SP_FS_STAGE_FULL_ALU_INSTRUCTIONS"/>
+ <value value="41" name="A7XX_PERF_SP_FS_STAGE_HALF_ALU_INSTRUCTIONS"/>
+ <value value="42" name="A7XX_PERF_SP_FS_STAGE_BARY_INSTRUCTIONS"/>
+ <value value="43" name="A7XX_PERF_SP_VS_INSTRUCTIONS"/>
+ <value value="44" name="A7XX_PERF_SP_FS_INSTRUCTIONS"/>
+ <value value="45" name="A7XX_PERF_SP_ADDR_LOCK_COUNT"/>
+ <value value="46" name="A7XX_PERF_SP_UCHE_READ_TRANS"/>
+ <value value="47" name="A7XX_PERF_SP_UCHE_WRITE_TRANS"/>
+ <value value="48" name="A7XX_PERF_SP_EXPORT_VPC_TRANS"/>
+ <value value="49" name="A7XX_PERF_SP_EXPORT_RB_TRANS"/>
+ <value value="50" name="A7XX_PERF_SP_PIXELS_KILLED"/>
+ <value value="51" name="A7XX_PERF_SP_ICL1_REQUESTS"/>
+ <value value="52" name="A7XX_PERF_SP_ICL1_MISSES"/>
+ <value value="53" name="A7XX_PERF_SP_HS_INSTRUCTIONS"/>
+ <value value="54" name="A7XX_PERF_SP_DS_INSTRUCTIONS"/>
+ <value value="55" name="A7XX_PERF_SP_GS_INSTRUCTIONS"/>
+ <value value="56" name="A7XX_PERF_SP_CS_INSTRUCTIONS"/>
+ <value value="57" name="A7XX_PERF_SP_GPR_READ"/>
+ <value value="58" name="A7XX_PERF_SP_GPR_WRITE"/>
+ <value value="59" name="A7XX_PERF_SP_FS_STAGE_HALF_EFU_INSTRUCTIONS"/>
+ <value value="60" name="A7XX_PERF_SP_VS_STAGE_HALF_EFU_INSTRUCTIONS"/>
+ <value value="61" name="A7XX_PERF_SP_LM_BANK_CONFLICTS"/>
+ <value value="62" name="A7XX_PERF_SP_TEX_CONTROL_WORKING_CYCLES"/>
+ <value value="63" name="A7XX_PERF_SP_LOAD_CONTROL_WORKING_CYCLES"/>
+ <value value="64" name="A7XX_PERF_SP_FLOW_CONTROL_WORKING_CYCLES"/>
+ <value value="65" name="A7XX_PERF_SP_LM_WORKING_CYCLES"/>
+ <value value="66" name="A7XX_PERF_SP_DISPATCHER_WORKING_CYCLES"/>
+ <value value="67" name="A7XX_PERF_SP_SEQUENCER_WORKING_CYCLES"/>
+ <value value="68" name="A7XX_PERF_SP_LOW_EFFICIENCY_STARVED_BY_TP"/>
+ <value value="69" name="A7XX_PERF_SP_STARVE_CYCLES_HLSQ"/>
+ <value value="70" name="A7XX_PERF_SP_NON_EXECUTION_LS_CYCLES"/>
+ <value value="71" name="A7XX_PERF_SP_WORKING_EU"/>
+ <value value="72" name="A7XX_PERF_SP_ANY_EU_WORKING"/>
+ <value value="73" name="A7XX_PERF_SP_WORKING_EU_FS_STAGE"/>
+ <value value="74" name="A7XX_PERF_SP_ANY_EU_WORKING_FS_STAGE"/>
+ <value value="75" name="A7XX_PERF_SP_WORKING_EU_VS_STAGE"/>
+ <value value="76" name="A7XX_PERF_SP_ANY_EU_WORKING_VS_STAGE"/>
+ <value value="77" name="A7XX_PERF_SP_WORKING_EU_CS_STAGE"/>
+ <value value="78" name="A7XX_PERF_SP_ANY_EU_WORKING_CS_STAGE"/>
+ <value value="79" name="A7XX_PERF_SP_GPR_READ_PREFETCH"/>
+ <value value="80" name="A7XX_PERF_SP_GPR_READ_CONFLICT"/>
+ <value value="81" name="A7XX_PERF_SP_GPR_WRITE_CONFLICT"/>
+ <value value="82" name="A7XX_PERF_SP_GM_LOAD_LATENCY_CYCLES"/>
+ <value value="83" name="A7XX_PERF_SP_GM_LOAD_LATENCY_SAMPLES"/>
+ <value value="84" name="A7XX_PERF_SP_EXECUTABLE_WAVES"/>
+ <value value="85" name="A7XX_PERF_SP_ICL1_MISS_FETCH_CYCLES"/>
+ <value value="86" name="A7XX_PERF_SP_WORKING_EU_LPAC"/>
+ <value value="87" name="A7XX_PERF_SP_BYPASS_BUSY_CYCLES"/>
+ <value value="88" name="A7XX_PERF_SP_ANY_EU_WORKING_LPAC"/>
+ <value value="89" name="A7XX_PERF_SP_WAVE_ALU_CYCLES"/>
+ <value value="90" name="A7XX_PERF_SP_WAVE_EFU_CYCLES"/>
+ <value value="91" name="A7XX_PERF_SP_WAVE_INT_CYCLES"/>
+ <value value="92" name="A7XX_PERF_SP_WAVE_CSP_CYCLES"/>
+ <value value="93" name="A7XX_PERF_SP_EWAVE_CONTEXTS"/>
+ <value value="94" name="A7XX_PERF_SP_EWAVE_CONTEXT_CYCLES"/>
+ <value value="95" name="A7XX_PERF_SP_LPAC_BUSY_CYCLES"/>
+ <value value="96" name="A7XX_PERF_SP_LPAC_INSTRUCTIONS"/>
+ <value value="97" name="A7XX_PERF_SP_FS_STAGE_1X_WAVES"/>
+ <value value="98" name="A7XX_PERF_SP_FS_STAGE_2X_WAVES"/>
+ <value value="99" name="A7XX_PERF_SP_QUADS"/>
+ <value value="100" name="A7XX_PERF_SP_CS_INVOCATIONS"/>
+ <value value="101" name="A7XX_PERF_SP_PIXELS"/>
+ <value value="102" name="A7XX_PERF_SP_LPAC_DRAWCALLS"/>
+ <value value="103" name="A7XX_PERF_SP_PI_WORKING_CYCLES"/>
+ <value value="104" name="A7XX_PERF_SP_WAVE_INPUT_CYCLES"/>
+ <value value="105" name="A7XX_PERF_SP_WAVE_OUTPUT_CYCLES"/>
+ <value value="106" name="A7XX_PERF_SP_WAVE_HWAVE_WAIT_CYCLES"/>
+ <value value="107" name="A7XX_PERF_SP_WAVE_HWAVE_SYNC"/>
+ <value value="108" name="A7XX_PERF_SP_OUTPUT_3D_PIXELS"/>
+ <value value="109" name="A7XX_PERF_SP_FULL_ALU_MAD_INSTRUCTIONS"/>
+ <value value="110" name="A7XX_PERF_SP_HALF_ALU_MAD_INSTRUCTIONS"/>
+ <value value="111" name="A7XX_PERF_SP_FULL_ALU_MUL_INSTRUCTIONS"/>
+ <value value="112" name="A7XX_PERF_SP_HALF_ALU_MUL_INSTRUCTIONS"/>
+ <value value="113" name="A7XX_PERF_SP_FULL_ALU_ADD_INSTRUCTIONS"/>
+ <value value="114" name="A7XX_PERF_SP_HALF_ALU_ADD_INSTRUCTIONS"/>
+ <value value="115" name="A7XX_PERF_SP_BARY_FP32_INSTRUCTIONS"/>
+ <value value="116" name="A7XX_PERF_SP_ALU_GPR_READ_CYCLES"/>
+ <value value="117" name="A7XX_PERF_SP_ALU_DATA_FORWARDING_CYCLES"/>
+ <value value="118" name="A7XX_PERF_SP_LM_FULL_CYCLES"/>
+ <value value="119" name="A7XX_PERF_SP_TEXTURE_FETCH_LATENCY_CYCLES"/>
+ <value value="120" name="A7XX_PERF_SP_TEXTURE_FETCH_LATENCY_SAMPLES"/>
+ <value value="121" name="A7XX_PERF_SP_FS_STAGE_PI_TEX_INSTRUCTION"/>
+ <value value="122" name="A7XX_PERF_SP_RAY_QUERY_INSTRUCTIONS"/>
+ <value value="123" name="A7XX_PERF_SP_RBRT_KICKOFF_FIBERS"/>
+ <value value="124" name="A7XX_PERF_SP_RBRT_KICKOFF_DQUADS"/>
+ <value value="125" name="A7XX_PERF_SP_RTU_BUSY_CYCLES"/>
+ <value value="126" name="A7XX_PERF_SP_RTU_L0_HITS"/>
+ <value value="127" name="A7XX_PERF_SP_RTU_L0_MISSES"/>
+ <value value="128" name="A7XX_PERF_SP_RTU_L0_HIT_ON_MISS"/>
+ <value value="129" name="A7XX_PERF_SP_RTU_STALL_CYCLES_WAVE_QUEUE"/>
+ <value value="130" name="A7XX_PERF_SP_RTU_STALL_CYCLES_L0_HIT_QUEUE"/>
+ <value value="131" name="A7XX_PERF_SP_RTU_STALL_CYCLES_L0_MISS_QUEUE"/>
+ <value value="132" name="A7XX_PERF_SP_RTU_STALL_CYCLES_L0D_IDX_QUEUE"/>
+ <value value="133" name="A7XX_PERF_SP_RTU_STALL_CYCLES_L0DATA"/>
+ <value value="134" name="A7XX_PERF_SP_RTU_STALL_CYCLES_REPLACE_CNT"/>
+ <value value="135" name="A7XX_PERF_SP_RTU_STALL_CYCLES_MRG_CNT"/>
+ <value value="136" name="A7XX_PERF_SP_RTU_STALL_CYCLES_UCHE"/>
+ <value value="137" name="A7XX_PERF_SP_RTU_OPERAND_FETCH_STALL_CYCLES_L0"/>
+ <value value="138" name="A7XX_PERF_SP_RTU_OPERAND_FETCH_STALL_CYCLES_INS_FIFO"/>
+ <value value="139" name="A7XX_PERF_SP_RTU_BVH_FETCH_LATENCY_CYCLES"/>
+ <value value="140" name="A7XX_PERF_SP_RTU_BVH_FETCH_LATENCY_SAMPLES"/>
+ <value value="141" name="A7XX_PERF_SP_STCHE_MISS_INC_VS"/>
+ <value value="142" name="A7XX_PERF_SP_STCHE_MISS_INC_FS"/>
+ <value value="143" name="A7XX_PERF_SP_STCHE_MISS_INC_BV"/>
+ <value value="144" name="A7XX_PERF_SP_STCHE_MISS_INC_LPAC"/>
+ <value value="145" name="A7XX_PERF_SP_VGPR_ACTIVE_CONTEXTS"/>
+ <value value="146" name="A7XX_PERF_SP_PGPR_ALLOC_CONTEXTS"/>
+ <value value="147" name="A7XX_PERF_SP_VGPR_ALLOC_CONTEXTS"/>
+ <value value="148" name="A7XX_PERF_SP_RTU_RAY_BOX_INTERSECTIONS"/>
+ <value value="149" name="A7XX_PERF_SP_RTU_RAY_TRIANGLE_INTERSECTIONS"/>
+ <value value="150" name="A7XX_PERF_SP_SCH_STALL_CYCLES_RTU"/>
+</enum>
+
+<enum name="a7xx_rb_perfcounter_select">
+ <value value="0" name="A7XX_PERF_RB_BUSY_CYCLES"/>
+ <value value="1" name="A7XX_PERF_RB_STALL_CYCLES_HLSQ"/>
+ <value value="2" name="A7XX_PERF_RB_STALL_CYCLES_FIFO0_FULL"/>
+ <value value="3" name="A7XX_PERF_RB_STALL_CYCLES_FIFO1_FULL"/>
+ <value value="4" name="A7XX_PERF_RB_STALL_CYCLES_FIFO2_FULL"/>
+ <value value="5" name="A7XX_PERF_RB_STARVE_CYCLES_SP"/>
+ <value value="6" name="A7XX_PERF_RB_STARVE_CYCLES_LRZ_TILE"/>
+ <value value="7" name="A7XX_PERF_RB_STARVE_CYCLES_CCU"/>
+ <value value="8" name="A7XX_PERF_RB_STARVE_CYCLES_Z_PLANE"/>
+ <value value="9" name="A7XX_PERF_RB_STARVE_CYCLES_BARY_PLANE"/>
+ <value value="10" name="A7XX_PERF_RB_Z_WORKLOAD"/>
+ <value value="11" name="A7XX_PERF_RB_HLSQ_ACTIVE"/>
+ <value value="12" name="A7XX_PERF_RB_Z_READ"/>
+ <value value="13" name="A7XX_PERF_RB_Z_WRITE"/>
+ <value value="14" name="A7XX_PERF_RB_C_READ"/>
+ <value value="15" name="A7XX_PERF_RB_C_WRITE"/>
+ <value value="16" name="A7XX_PERF_RB_TOTAL_PASS"/>
+ <value value="17" name="A7XX_PERF_RB_Z_PASS"/>
+ <value value="18" name="A7XX_PERF_RB_Z_FAIL"/>
+ <value value="19" name="A7XX_PERF_RB_S_FAIL"/>
+ <value value="20" name="A7XX_PERF_RB_BLENDED_FXP_COMPONENTS"/>
+ <value value="21" name="A7XX_PERF_RB_BLENDED_FP16_COMPONENTS"/>
+ <value value="22" name="A7XX_PERF_RB_PS_INVOCATIONS"/>
+ <value value="23" name="A7XX_PERF_RB_2D_ALIVE_CYCLES"/>
+ <value value="24" name="A7XX_PERF_RB_2D_STALL_CYCLES_A2D"/>
+ <value value="25" name="A7XX_PERF_RB_2D_STARVE_CYCLES_SRC"/>
+ <value value="26" name="A7XX_PERF_RB_2D_STARVE_CYCLES_SP"/>
+ <value value="27" name="A7XX_PERF_RB_2D_STARVE_CYCLES_DST"/>
+ <value value="28" name="A7XX_PERF_RB_2D_VALID_PIXELS"/>
+ <value value="29" name="A7XX_PERF_RB_3D_PIXELS"/>
+ <value value="30" name="A7XX_PERF_RB_BLENDER_WORKING_CYCLES"/>
+ <value value="31" name="A7XX_PERF_RB_ZPROC_WORKING_CYCLES"/>
+ <value value="32" name="A7XX_PERF_RB_CPROC_WORKING_CYCLES"/>
+ <value value="33" name="A7XX_PERF_RB_SAMPLER_WORKING_CYCLES"/>
+ <value value="34" name="A7XX_PERF_RB_STALL_CYCLES_CCU_COLOR_READ"/>
+ <value value="35" name="A7XX_PERF_RB_STALL_CYCLES_CCU_COLOR_WRITE"/>
+ <value value="36" name="A7XX_PERF_RB_STALL_CYCLES_CCU_DEPTH_READ"/>
+ <value value="37" name="A7XX_PERF_RB_STALL_CYCLES_CCU_DEPTH_WRITE"/>
+ <value value="38" name="A7XX_PERF_RB_STALL_CYCLES_VPC"/>
+ <value value="39" name="A7XX_PERF_RB_2D_INPUT_TRANS"/>
+ <value value="40" name="A7XX_PERF_RB_2D_OUTPUT_RB_DST_TRANS"/>
+ <value value="41" name="A7XX_PERF_RB_2D_OUTPUT_RB_SRC_TRANS"/>
+ <value value="42" name="A7XX_PERF_RB_BLENDED_FP32_COMPONENTS"/>
+ <value value="43" name="A7XX_PERF_RB_COLOR_PIX_TILES"/>
+ <value value="44" name="A7XX_PERF_RB_STALL_CYCLES_CCU"/>
+ <value value="45" name="A7XX_PERF_RB_EARLY_Z_ARB3_GRANT"/>
+ <value value="46" name="A7XX_PERF_RB_LATE_Z_ARB3_GRANT"/>
+ <value value="47" name="A7XX_PERF_RB_EARLY_Z_SKIP_GRANT"/>
+ <value value="48" name="A7XX_PERF_RB_VRS_1x1_QUADS"/>
+ <value value="49" name="A7XX_PERF_RB_VRS_2x1_QUADS"/>
+ <value value="50" name="A7XX_PERF_RB_VRS_1x2_QUADS"/>
+ <value value="51" name="A7XX_PERF_RB_VRS_2x2_QUADS"/>
+ <value value="52" name="A7XX_PERF_RB_VRS_4x2_QUADS"/>
+ <value value="53" name="A7XX_PERF_RB_VRS_4x4_QUADS"/>
+</enum>
+
+<enum name="a7xx_vsc_perfcounter_select">
+ <value value="0" name="A7XX_PERF_VSC_BUSY_CYCLES"/>
+ <value value="1" name="A7XX_PERF_VSC_WORKING_CYCLES"/>
+ <value value="2" name="A7XX_PERF_VSC_STALL_CYCLES_UCHE"/>
+ <value value="3" name="A7XX_PERF_VSC_EOT_NUM"/>
+ <value value="4" name="A7XX_PERF_VSC_INPUT_TILES"/>
+</enum>
+
+<enum name="a7xx_ccu_perfcounter_select">
+ <value value="0" name="A7XX_PERF_CCU_BUSY_CYCLES"/>
+ <value value="1" name="A7XX_PERF_CCU_STALL_CYCLES_RB_DEPTH_RETURN"/>
+ <value value="2" name="A7XX_PERF_CCU_STALL_CYCLES_RB_COLOR_RETURN"/>
+ <value value="3" name="A7XX_PERF_CCU_DEPTH_BLOCKS"/>
+ <value value="4" name="A7XX_PERF_CCU_COLOR_BLOCKS"/>
+ <value value="5" name="A7XX_PERF_CCU_DEPTH_BLOCK_HIT"/>
+ <value value="6" name="A7XX_PERF_CCU_COLOR_BLOCK_HIT"/>
+ <value value="7" name="A7XX_PERF_CCU_PARTIAL_BLOCK_READ"/>
+ <value value="8" name="A7XX_PERF_CCU_GMEM_READ"/>
+ <value value="9" name="A7XX_PERF_CCU_GMEM_WRITE"/>
+ <value value="10" name="A7XX_PERF_CCU_2D_RD_REQ"/>
+ <value value="11" name="A7XX_PERF_CCU_2D_WR_REQ"/>
+ <value value="12" name="A7XX_PERF_CCU_UBWC_COLOR_BLOCKS_CONCURRENT"/>
+ <value value="13" name="A7XX_PERF_CCU_UBWC_DEPTH_BLOCKS_CONCURRENT"/>
+ <value value="14" name="A7XX_PERF_CCU_COLOR_RESOLVE_DROPPED"/>
+ <value value="15" name="A7XX_PERF_CCU_DEPTH_RESOLVE_DROPPED"/>
+ <value value="16" name="A7XX_PERF_CCU_COLOR_RENDER_CONCURRENT"/>
+ <value value="17" name="A7XX_PERF_CCU_DEPTH_RENDER_CONCURRENT"/>
+ <value value="18" name="A7XX_PERF_CCU_COLOR_RESOLVE_AFTER_RENDER"/>
+ <value value="19" name="A7XX_PERF_CCU_DEPTH_RESOLVE_AFTER_RENDER"/>
+ <value value="20" name="A7XX_PERF_CCU_GMEM_EXTRA_DEPTH_READ"/>
+ <value value="21" name="A7XX_PERF_CCU_GMEM_COLOR_READ_4AA"/>
+ <value value="22" name="A7XX_PERF_CCU_GMEM_COLOR_READ_4AA_FULL"/>
+</enum>
+
+<enum name="a7xx_lrz_perfcounter_select">
+ <value value="0" name="A7XX_PERF_LRZ_BUSY_CYCLES"/>
+ <value value="1" name="A7XX_PERF_LRZ_STARVE_CYCLES_RAS"/>
+ <value value="2" name="A7XX_PERF_LRZ_STALL_CYCLES_RB"/>
+ <value value="3" name="A7XX_PERF_LRZ_STALL_CYCLES_VSC"/>
+ <value value="4" name="A7XX_PERF_LRZ_STALL_CYCLES_VPC"/>
+ <value value="5" name="A7XX_PERF_LRZ_STALL_CYCLES_FLAG_PREFETCH"/>
+ <value value="6" name="A7XX_PERF_LRZ_STALL_CYCLES_UCHE"/>
+ <value value="7" name="A7XX_PERF_LRZ_LRZ_READ"/>
+ <value value="8" name="A7XX_PERF_LRZ_LRZ_WRITE"/>
+ <value value="9" name="A7XX_PERF_LRZ_READ_LATENCY"/>
+ <value value="10" name="A7XX_PERF_LRZ_MERGE_CACHE_UPDATING"/>
+ <value value="11" name="A7XX_PERF_LRZ_PRIM_KILLED_BY_MASKGEN"/>
+ <value value="12" name="A7XX_PERF_LRZ_PRIM_KILLED_BY_LRZ"/>
+ <value value="13" name="A7XX_PERF_LRZ_VISIBLE_PRIM_AFTER_LRZ"/>
+ <value value="14" name="A7XX_PERF_LRZ_FULL_8X8_TILES"/>
+ <value value="15" name="A7XX_PERF_LRZ_PARTIAL_8X8_TILES"/>
+ <value value="16" name="A7XX_PERF_LRZ_TILE_KILLED"/>
+ <value value="17" name="A7XX_PERF_LRZ_TOTAL_PIXEL"/>
+ <value value="18" name="A7XX_PERF_LRZ_VISIBLE_PIXEL_AFTER_LRZ"/>
+ <value value="19" name="A7XX_PERF_LRZ_FEEDBACK_ACCEPT"/>
+ <value value="20" name="A7XX_PERF_LRZ_FEEDBACK_DISCARD"/>
+ <value value="21" name="A7XX_PERF_LRZ_FEEDBACK_STALL"/>
+ <value value="22" name="A7XX_PERF_LRZ_STALL_CYCLES_RB_ZPLANE"/>
+ <value value="23" name="A7XX_PERF_LRZ_STALL_CYCLES_RB_BPLANE"/>
+ <value value="24" name="A7XX_PERF_LRZ_RAS_MASK_TRANS"/>
+ <value value="25" name="A7XX_PERF_LRZ_STALL_CYCLES_MVC"/>
+ <value value="26" name="A7XX_PERF_LRZ_TILE_KILLED_BY_IMAGE_VRS"/>
+ <value value="27" name="A7XX_PERF_LRZ_TILE_KILLED_BY_Z"/>
+</enum>
+
+<enum name="a7xx_cmp_perfcounter_select">
+ <value value="0" name="A7XX_PERF_CMPDECMP_STALL_CYCLES_ARB"/>
+ <value value="1" name="A7XX_PERF_CMPDECMP_VBIF_LATENCY_CYCLES"/>
+ <value value="2" name="A7XX_PERF_CMPDECMP_VBIF_LATENCY_SAMPLES"/>
+ <value value="3" name="A7XX_PERF_CMPDECMP_VBIF_READ_DATA_CCU"/>
+ <value value="4" name="A7XX_PERF_CMPDECMP_VBIF_WRITE_DATA_CCU"/>
+ <value value="5" name="A7XX_PERF_CMPDECMP_VBIF_READ_REQUEST"/>
+ <value value="6" name="A7XX_PERF_CMPDECMP_VBIF_WRITE_REQUEST"/>
+ <value value="7" name="A7XX_PERF_CMPDECMP_VBIF_READ_DATA"/>
+ <value value="8" name="A7XX_PERF_CMPDECMP_VBIF_WRITE_DATA"/>
+ <value value="9" name="A7XX_PERF_CMPDECMP_DEPTH_WRITE_FLAG1_COUNT"/>
+ <value value="10" name="A7XX_PERF_CMPDECMP_DEPTH_WRITE_FLAG2_COUNT"/>
+ <value value="11" name="A7XX_PERF_CMPDECMP_DEPTH_WRITE_FLAG3_COUNT"/>
+ <value value="12" name="A7XX_PERF_CMPDECMP_DEPTH_WRITE_FLAG4_COUNT"/>
+ <value value="13" name="A7XX_PERF_CMPDECMP_DEPTH_WRITE_FLAG5_COUNT"/>
+ <value value="14" name="A7XX_PERF_CMPDECMP_DEPTH_WRITE_FLAG6_COUNT"/>
+ <value value="15" name="A7XX_PERF_CMPDECMP_DEPTH_WRITE_FLAG8_COUNT"/>
+ <value value="16" name="A7XX_PERF_CMPDECMP_COLOR_WRITE_FLAG1_COUNT"/>
+ <value value="17" name="A7XX_PERF_CMPDECMP_COLOR_WRITE_FLAG2_COUNT"/>
+ <value value="18" name="A7XX_PERF_CMPDECMP_COLOR_WRITE_FLAG3_COUNT"/>
+ <value value="19" name="A7XX_PERF_CMPDECMP_COLOR_WRITE_FLAG4_COUNT"/>
+ <value value="20" name="A7XX_PERF_CMPDECMP_COLOR_WRITE_FLAG5_COUNT"/>
+ <value value="21" name="A7XX_PERF_CMPDECMP_COLOR_WRITE_FLAG6_COUNT"/>
+ <value value="22" name="A7XX_PERF_CMPDECMP_COLOR_WRITE_FLAG8_COUNT"/>
+ <value value="23" name="A7XX_PERF_CMPDECMP_VBIF_READ_DATA_UCHE_CH0"/>
+ <value value="24" name="A7XX_PERF_CMPDECMP_VBIF_READ_DATA_UCHE_CH1"/>
+ <value value="25" name="A7XX_PERF_CMPDECMP_VBIF_WRITE_DATA_UCHE"/>
+ <value value="26" name="A7XX_PERF_CMPDECMP_DEPTH_WRITE_FLAG0_COUNT"/>
+ <value value="27" name="A7XX_PERF_CMPDECMP_COLOR_WRITE_FLAG0_COUNT"/>
+ <value value="28" name="A7XX_PERF_CMPDECMP_COLOR_WRITE_FLAGALPHA_COUNT"/>
+ <value value="29" name="A7XX_PERF_CMPDECMP_RESOLVE_EVENTS"/>
+ <value value="30" name="A7XX_PERF_CMPDECMP_CONCURRENT_RESOLVE_EVENTS"/>
+ <value value="31" name="A7XX_PERF_CMPDECMP_DROPPED_CLEAR_EVENTS"/>
+ <value value="32" name="A7XX_PERF_CMPDECMP_ST_BLOCKS_CONCURRENT"/>
+ <value value="33" name="A7XX_PERF_CMPDECMP_LRZ_ST_BLOCKS_CONCURRENT"/>
+ <value value="34" name="A7XX_PERF_CMPDECMP_DEPTH_READ_FLAG0_COUNT"/>
+ <value value="35" name="A7XX_PERF_CMPDECMP_DEPTH_READ_FLAG1_COUNT"/>
+ <value value="36" name="A7XX_PERF_CMPDECMP_DEPTH_READ_FLAG2_COUNT"/>
+ <value value="37" name="A7XX_PERF_CMPDECMP_DEPTH_READ_FLAG3_COUNT"/>
+ <value value="38" name="A7XX_PERF_CMPDECMP_DEPTH_READ_FLAG4_COUNT"/>
+ <value value="39" name="A7XX_PERF_CMPDECMP_DEPTH_READ_FLAG5_COUNT"/>
+ <value value="40" name="A7XX_PERF_CMPDECMP_DEPTH_READ_FLAG6_COUNT"/>
+ <value value="41" name="A7XX_PERF_CMPDECMP_DEPTH_READ_FLAG8_COUNT"/>
+ <value value="42" name="A7XX_PERF_CMPDECMP_COLOR_READ_FLAG0_COUNT"/>
+ <value value="43" name="A7XX_PERF_CMPDECMP_COLOR_READ_FLAG1_COUNT"/>
+ <value value="44" name="A7XX_PERF_CMPDECMP_COLOR_READ_FLAG2_COUNT"/>
+ <value value="45" name="A7XX_PERF_CMPDECMP_COLOR_READ_FLAG3_COUNT"/>
+ <value value="46" name="A7XX_PERF_CMPDECMP_COLOR_READ_FLAG4_COUNT"/>
+ <value value="47" name="A7XX_PERF_CMPDECMP_COLOR_READ_FLAG5_COUNT"/>
+ <value value="48" name="A7XX_PERF_CMPDECMP_COLOR_READ_FLAG6_COUNT"/>
+ <value value="49" name="A7XX_PERF_CMPDECMP_COLOR_READ_FLAG8_COUNT"/>
+</enum>
+
+<enum name="a7xx_gbif_perfcounter_select">
+ <value value="0" name="A7XX_PERF_GBIF_RESERVED_0"/>
+ <value value="1" name="A7XX_PERF_GBIF_RESERVED_1"/>
+ <value value="2" name="A7XX_PERF_GBIF_RESERVED_2"/>
+ <value value="3" name="A7XX_PERF_GBIF_RESERVED_3"/>
+ <value value="4" name="A7XX_PERF_GBIF_RESERVED_4"/>
+ <value value="5" name="A7XX_PERF_GBIF_RESERVED_5"/>
+ <value value="6" name="A7XX_PERF_GBIF_RESERVED_6"/>
+ <value value="7" name="A7XX_PERF_GBIF_RESERVED_7"/>
+ <value value="8" name="A7XX_PERF_GBIF_RESERVED_8"/>
+ <value value="9" name="A7XX_PERF_GBIF_RESERVED_9"/>
+ <value value="10" name="A7XX_PERF_GBIF_AXI0_READ_REQUESTS_TOTAL"/>
+ <value value="11" name="A7XX_PERF_GBIF_AXI1_READ_REQUESTS_TOTAL"/>
+ <value value="12" name="A7XX_PERF_GBIF_RESERVED_12"/>
+ <value value="13" name="A7XX_PERF_GBIF_RESERVED_13"/>
+ <value value="14" name="A7XX_PERF_GBIF_RESERVED_14"/>
+ <value value="15" name="A7XX_PERF_GBIF_RESERVED_15"/>
+ <value value="16" name="A7XX_PERF_GBIF_RESERVED_16"/>
+ <value value="17" name="A7XX_PERF_GBIF_RESERVED_17"/>
+ <value value="18" name="A7XX_PERF_GBIF_RESERVED_18"/>
+ <value value="19" name="A7XX_PERF_GBIF_RESERVED_19"/>
+ <value value="20" name="A7XX_PERF_GBIF_RESERVED_20"/>
+ <value value="21" name="A7XX_PERF_GBIF_RESERVED_21"/>
+ <value value="22" name="A7XX_PERF_GBIF_AXI0_WRITE_REQUESTS_TOTAL"/>
+ <value value="23" name="A7XX_PERF_GBIF_AXI1_WRITE_REQUESTS_TOTAL"/>
+ <value value="24" name="A7XX_PERF_GBIF_RESERVED_24"/>
+ <value value="25" name="A7XX_PERF_GBIF_RESERVED_25"/>
+ <value value="26" name="A7XX_PERF_GBIF_RESERVED_26"/>
+ <value value="27" name="A7XX_PERF_GBIF_RESERVED_27"/>
+ <value value="28" name="A7XX_PERF_GBIF_RESERVED_28"/>
+ <value value="29" name="A7XX_PERF_GBIF_RESERVED_29"/>
+ <value value="30" name="A7XX_PERF_GBIF_RESERVED_30"/>
+ <value value="31" name="A7XX_PERF_GBIF_RESERVED_31"/>
+ <value value="32" name="A7XX_PERF_GBIF_RESERVED_32"/>
+ <value value="33" name="A7XX_PERF_GBIF_RESERVED_33"/>
+ <value value="34" name="A7XX_PERF_GBIF_AXI0_READ_DATA_BEATS_TOTAL"/>
+ <value value="35" name="A7XX_PERF_GBIF_AXI1_READ_DATA_BEATS_TOTAL"/>
+ <value value="36" name="A7XX_PERF_GBIF_RESERVED_36"/>
+ <value value="37" name="A7XX_PERF_GBIF_RESERVED_37"/>
+ <value value="38" name="A7XX_PERF_GBIF_RESERVED_38"/>
+ <value value="39" name="A7XX_PERF_GBIF_RESERVED_39"/>
+ <value value="40" name="A7XX_PERF_GBIF_RESERVED_40"/>
+ <value value="41" name="A7XX_PERF_GBIF_RESERVED_41"/>
+ <value value="42" name="A7XX_PERF_GBIF_RESERVED_42"/>
+ <value value="43" name="A7XX_PERF_GBIF_RESERVED_43"/>
+ <value value="44" name="A7XX_PERF_GBIF_RESERVED_44"/>
+ <value value="45" name="A7XX_PERF_GBIF_RESERVED_45"/>
+ <value value="46" name="A7XX_PERF_GBIF_AXI0_WRITE_DATA_BEATS_TOTAL"/>
+ <value value="47" name="A7XX_PERF_GBIF_AXI1_WRITE_DATA_BEATS_TOTAL"/>
+ <value value="48" name="A7XX_PERF_GBIF_RESERVED_48"/>
+ <value value="49" name="A7XX_PERF_GBIF_RESERVED_49"/>
+ <value value="50" name="A7XX_PERF_GBIF_RESERVED_50"/>
+ <value value="51" name="A7XX_PERF_GBIF_RESERVED_51"/>
+ <value value="52" name="A7XX_PERF_GBIF_RESERVED_52"/>
+ <value value="53" name="A7XX_PERF_GBIF_RESERVED_53"/>
+ <value value="54" name="A7XX_PERF_GBIF_RESERVED_54"/>
+ <value value="55" name="A7XX_PERF_GBIF_RESERVED_55"/>
+ <value value="56" name="A7XX_PERF_GBIF_RESERVED_56"/>
+ <value value="57" name="A7XX_PERF_GBIF_RESERVED_57"/>
+ <value value="58" name="A7XX_PERF_GBIF_RESERVED_58"/>
+ <value value="59" name="A7XX_PERF_GBIF_RESERVED_59"/>
+ <value value="60" name="A7XX_PERF_GBIF_RESERVED_60"/>
+ <value value="61" name="A7XX_PERF_GBIF_RESERVED_61"/>
+ <value value="62" name="A7XX_PERF_GBIF_RESERVED_62"/>
+ <value value="63" name="A7XX_PERF_GBIF_RESERVED_63"/>
+ <value value="64" name="A7XX_PERF_GBIF_RESERVED_64"/>
+ <value value="65" name="A7XX_PERF_GBIF_RESERVED_65"/>
+ <value value="66" name="A7XX_PERF_GBIF_RESERVED_66"/>
+ <value value="67" name="A7XX_PERF_GBIF_RESERVED_67"/>
+ <value value="68" name="A7XX_PERF_GBIF_CYCLES_CH0_HELD_OFF_RD_ALL"/>
+ <value value="69" name="A7XX_PERF_GBIF_CYCLES_CH1_HELD_OFF_RD_ALL"/>
+ <value value="70" name="A7XX_PERF_GBIF_CYCLES_CH0_HELD_OFF_WR_ALL"/>
+ <value value="71" name="A7XX_PERF_GBIF_CYCLES_CH1_HELD_OFF_WR_ALL"/>
+ <value value="72" name="A7XX_PERF_GBIF_AXI_CH0_REQUEST_HELD_OFF"/>
+ <value value="73" name="A7XX_PERF_GBIF_AXI_CH1_REQUEST_HELD_OFF"/>
+ <value value="74" name="A7XX_PERF_GBIF_AXI_REQUEST_HELD_OFF"/>
+ <value value="75" name="A7XX_PERF_GBIF_AXI_CH0_WRITE_DATA_HELD_OFF"/>
+ <value value="76" name="A7XX_PERF_GBIF_AXI_CH1_WRITE_DATA_HELD_OFF"/>
+ <value value="77" name="A7XX_PERF_GBIF_AXI_ALL_WRITE_DATA_HELD_OFF"/>
+ <value value="78" name="A7XX_PERF_GBIF_AXI_ALL_READ_BEATS"/>
+ <value value="79" name="A7XX_PERF_GBIF_AXI_ALL_WRITE_BEATS"/>
+ <value value="80" name="A7XX_PERF_GBIF_AXI_ALL_BEATS"/>
+</enum>
+
+<enum name="a7xx_ufc_perfcounter_select">
+ <value value="0" name="A7XX_PERF_UFC_BUSY_CYCLES"/>
+ <value value="1" name="A7XX_PERF_UFC_READ_DATA_VBIF"/>
+ <value value="2" name="A7XX_PERF_UFC_WRITE_DATA_VBIF"/>
+ <value value="3" name="A7XX_PERF_UFC_READ_REQUEST_VBIF"/>
+ <value value="4" name="A7XX_PERF_UFC_WRITE_REQUEST_VBIF"/>
+ <value value="5" name="A7XX_PERF_UFC_LRZ_FILTER_HIT"/>
+ <value value="6" name="A7XX_PERF_UFC_LRZ_FILTER_MISS"/>
+ <value value="7" name="A7XX_PERF_UFC_CRE_FILTER_HIT"/>
+ <value value="8" name="A7XX_PERF_UFC_CRE_FILTER_MISS"/>
+ <value value="9" name="A7XX_PERF_UFC_SP_FILTER_HIT"/>
+ <value value="10" name="A7XX_PERF_UFC_SP_FILTER_MISS"/>
+ <value value="11" name="A7XX_PERF_UFC_SP_REQUESTS"/>
+ <value value="12" name="A7XX_PERF_UFC_TP_FILTER_HIT"/>
+ <value value="13" name="A7XX_PERF_UFC_TP_FILTER_MISS"/>
+ <value value="14" name="A7XX_PERF_UFC_TP_REQUESTS"/>
+ <value value="15" name="A7XX_PERF_UFC_MAIN_HIT_LRZ_PREFETCH"/>
+ <value value="16" name="A7XX_PERF_UFC_MAIN_HIT_CRE_PREFETCH"/>
+ <value value="17" name="A7XX_PERF_UFC_MAIN_HIT_SP_PREFETCH"/>
+ <value value="18" name="A7XX_PERF_UFC_MAIN_HIT_TP_PREFETCH"/>
+ <value value="19" name="A7XX_PERF_UFC_MAIN_HIT_UBWC_READ"/>
+ <value value="20" name="A7XX_PERF_UFC_MAIN_HIT_UBWC_WRITE"/>
+ <value value="21" name="A7XX_PERF_UFC_MAIN_MISS_LRZ_PREFETCH"/>
+ <value value="22" name="A7XX_PERF_UFC_MAIN_MISS_CRE_PREFETCH"/>
+ <value value="23" name="A7XX_PERF_UFC_MAIN_MISS_SP_PREFETCH"/>
+ <value value="24" name="A7XX_PERF_UFC_MAIN_MISS_TP_PREFETCH"/>
+ <value value="25" name="A7XX_PERF_UFC_MAIN_MISS_UBWC_READ"/>
+ <value value="26" name="A7XX_PERF_UFC_MAIN_MISS_UBWC_WRITE"/>
+ <value value="27" name="A7XX_PERF_UFC_UBWC_READ_UFC_TRANS"/>
+ <value value="28" name="A7XX_PERF_UFC_UBWC_WRITE_UFC_TRANS"/>
+ <value value="29" name="A7XX_PERF_UFC_STALL_CYCLES_GBIF_CMD"/>
+ <value value="30" name="A7XX_PERF_UFC_STALL_CYCLES_GBIF_RDATA"/>
+ <value value="31" name="A7XX_PERF_UFC_STALL_CYCLES_GBIF_WDATA"/>
+ <value value="32" name="A7XX_PERF_UFC_STALL_CYCLES_UBWC_WR_FLAG"/>
+ <value value="33" name="A7XX_PERF_UFC_STALL_CYCLES_UBWC_FLAG_RTN"/>
+ <value value="34" name="A7XX_PERF_UFC_STALL_CYCLES_UBWC_EVENT"/>
+ <value value="35" name="A7XX_PERF_UFC_LRZ_PREFETCH_STALLED_CYCLES"/>
+ <value value="36" name="A7XX_PERF_UFC_CRE_PREFETCH_STALLED_CYCLES"/>
+ <value value="37" name="A7XX_PERF_UFC_SPTP_PREFETCH_STALLED_CYCLES"/>
+ <value value="38" name="A7XX_PERF_UFC_UBWC_RD_STALLED_CYCLES"/>
+ <value value="39" name="A7XX_PERF_UFC_UBWC_WR_STALLED_CYCLES"/>
+ <value value="40" name="A7XX_PERF_UFC_PREFETCH_STALLED_CYCLES"/>
+ <value value="41" name="A7XX_PERF_UFC_EVICTION_STALLED_CYCLES"/>
+ <value value="42" name="A7XX_PERF_UFC_LOCK_STALLED_CYCLES"/>
+ <value value="43" name="A7XX_PERF_UFC_MISS_LATENCY_CYCLES"/>
+ <value value="44" name="A7XX_PERF_UFC_MISS_LATENCY_SAMPLES"/>
+ <value value="45" name="A7XX_PERF_UFC_UBWC_REQ_STALLED_CYCLES"/>
+ <value value="46" name="A7XX_PERF_UFC_TP_HINT_TAG_MISS"/>
+ <value value="47" name="A7XX_PERF_UFC_TP_HINT_TAG_HIT_RDY"/>
+ <value value="48" name="A7XX_PERF_UFC_TP_HINT_TAG_HIT_NRDY"/>
+ <value value="49" name="A7XX_PERF_UFC_TP_HINT_IS_FCLEAR"/>
+ <value value="50" name="A7XX_PERF_UFC_TP_HINT_IS_ALPHA0"/>
+ <value value="51" name="A7XX_PERF_UFC_SP_L1_FILTER_HIT"/>
+ <value value="52" name="A7XX_PERF_UFC_SP_L1_FILTER_MISS"/>
+ <value value="53" name="A7XX_PERF_UFC_SP_L1_FILTER_REQUESTS"/>
+ <value value="54" name="A7XX_PERF_UFC_TP_L1_TAG_HIT_RDY"/>
+ <value value="55" name="A7XX_PERF_UFC_TP_L1_TAG_HIT_NRDY"/>
+ <value value="56" name="A7XX_PERF_UFC_TP_L1_TAG_MISS"/>
+ <value value="57" name="A7XX_PERF_UFC_TP_L1_FILTER_REQUESTS"/>
+</enum>
+
<domain name="A6XX" width="32" prefix="variant" varset="chip">
<bitset name="A6XX_RBBM_INT_0_MASK" inline="no" varset="chip">
<bitfield name="RBBM_GPU_IDLE" pos="0" type="boolean"/>
@@ -1584,7 +2605,7 @@ to upconvert to 32b float internally?
<reg32 offset="0x050e" name="RBBM_PERFCTR_SRAM_INIT_CMD"/>
<reg32 offset="0x050f" name="RBBM_PERFCTR_SRAM_INIT_STATUS"/>
<reg32 offset="0x0533" name="RBBM_ISDB_CNT"/>
- <reg32 offset="0x0534" name="RBBM_NC_MODE_CNTL" variants="A7XX-"/>
+ <reg32 offset="0x0534" name="RBBM_NC_MODE_CNTL"/>
<reg32 offset="0x0535" name="RBBM_SNAPSHOT_STATUS" variants="A7XX-"/>
<!---
@@ -2184,13 +3205,28 @@ to upconvert to 32b float internally?
<value value="3" name="BUFFERS_IN_SYSMEM"/>
</enum>
+ <enum name="a6xx_lrz_feedback_mask">
+ <value value="0x0" name="LRZ_FEEDBACK_NONE"/>
+ <value value="0x1" name="LRZ_FEEDBACK_EARLY_Z"/>
+ <value value="0x2" name="LRZ_FEEDBACK_EARLY_LRZ_LATE_Z"/>
+ <!-- We don't have a flag type and this flags combination is often used -->
+ <value value="0x3" name="LRZ_FEEDBACK_EARLY_Z_OR_EARLY_LRZ_LATE_Z"/>
+ <value value="0x4" name="LRZ_FEEDBACK_LATE_Z"/>
+ </enum>
+
<reg32 offset="0x80a1" name="GRAS_BIN_CONTROL" usage="rp_blit">
<bitfield name="BINW" low="0" high="5" shr="5" type="uint"/>
<bitfield name="BINH" low="8" high="14" shr="4" type="uint"/>
<bitfield name="RENDER_MODE" low="18" high="20" type="a6xx_render_mode"/>
+ <doc>Disable LRZ feedback writes</doc>
<bitfield name="FORCE_LRZ_WRITE_DIS" pos="21" type="boolean"/>
<bitfield name="BUFFERS_LOCATION" low="22" high="23" type="a6xx_buffers_location" variants="A6XX"/>
- <bitfield name="LRZ_FEEDBACK_ZMODE_MASK" low="24" high="26"/>
+ <doc>
+ Allows draws that don't have GRAS_LRZ_CNTL.LRZ_WRITE but have
+ GRAS_LRZ_CNTL.ENABLE to contribute to LRZ during RENDERING pass.
+ In sysmem mode GRAS_LRZ_CNTL.LRZ_WRITE is not considered.
+ </doc>
+ <bitfield name="LRZ_FEEDBACK_ZMODE_MASK" low="24" high="26" type="a6xx_lrz_feedback_mask"/>
<bitfield name="UNK27" pos="27"/>
</reg32>
@@ -2270,7 +3306,7 @@ to upconvert to 32b float internally?
- 0.0 if GREATER
- 1.0 if LESS
</doc>
- <bitfield name="FC_ENABLE" pos="3" type="boolean"/>
+ <bitfield name="FC_ENABLE" pos="3" type="boolean" variants="A6XX"/>
<!-- set when depth-test + depth-write enabled -->
<bitfield name="Z_TEST_ENABLE" pos="4" type="boolean"/>
<bitfield name="Z_BOUNDS_ENABLE" pos="5" type="boolean"/>
@@ -2284,7 +3320,7 @@ to upconvert to 32b float internally?
Disable LRZ based on previous direction and the current one.
If DIR_WRITE is not enabled - there is no write to direction buffer.
</doc>
- <bitfield name="DISABLE_ON_WRONG_DIR" pos="9" type="boolean"/>
+ <bitfield name="DISABLE_ON_WRONG_DIR" pos="9" type="boolean" variants="A6XX"/>
<bitfield name="Z_FUNC" low="11" high="13" type="adreno_compare_func" variants="A7XX-"/>
</reg32>
@@ -2357,7 +3393,10 @@ to upconvert to 32b float internally?
<bitfield name="BASE_MIP_LEVEL" low="28" high="31" type="uint"/>
</reg32>
- <reg32 offset="0x810b" name="GRAS_UNKNOWN_810B" variants="A7XX-" usage="cmd"/>
+ <reg32 offset="0x810b" name="GRAS_LRZ_CNTL2" variants="A7XX-" usage="rp_blit">
+ <bitfield name="DISABLE_ON_WRONG_DIR" pos="0" type="boolean"/>
+ <bitfield name="FC_ENABLE" pos="1" type="boolean"/>
+ </reg32>
<!-- 0x810c-0x810f invalid -->
@@ -2366,7 +3405,10 @@ to upconvert to 32b float internally?
<!-- A bit tentative but it's a color and it is followed by LRZ_CLEAR -->
<reg32 offset="0x8111" name="GRAS_LRZ_CLEAR_DEPTH_F32" type="float" variants="A7XX-"/>
- <reg32 offset="0x8113" name="GRAS_LRZ_DEPTH_BUFFER_INFO" variants="A7XX-" usage="rp_blit"/>
+ <reg32 offset="0x8113" name="GRAS_LRZ_DEPTH_BUFFER_INFO" variants="A7XX-" usage="rp_blit">
+ <bitfield name="DEPTH_FORMAT" low="0" high="2" type="a6xx_depth_format"/>
+ <bitfield name="UNK3" pos="3"/>
+ </reg32>
<!-- Always written together and always equal 09510840 00000a62 -->
<reg32 offset="0x8120" name="GRAS_UNKNOWN_8120" variants="A7XX-" usage="cmd"/>
@@ -2440,7 +3482,7 @@ to upconvert to 32b float internally?
<bitfield name="RENDER_MODE" low="18" high="20" type="a6xx_render_mode"/>
<bitfield name="FORCE_LRZ_WRITE_DIS" pos="21" type="boolean"/>
<bitfield name="BUFFERS_LOCATION" low="22" high="23" type="a6xx_buffers_location"/>
- <bitfield name="LRZ_FEEDBACK_ZMODE_MASK" low="24" high="26"/>
+ <bitfield name="LRZ_FEEDBACK_ZMODE_MASK" low="24" high="26" type="a6xx_lrz_feedback_mask"/>
</reg32>
<reg32 offset="0x8800" name="RB_BIN_CONTROL" variants="A7XX-" usage="rp_blit">
@@ -2448,7 +3490,7 @@ to upconvert to 32b float internally?
<bitfield name="BINH" low="8" high="14" shr="4" type="uint"/>
<bitfield name="RENDER_MODE" low="18" high="20" type="a6xx_render_mode"/>
<bitfield name="FORCE_LRZ_WRITE_DIS" pos="21" type="boolean"/>
- <bitfield name="LRZ_FEEDBACK_ZMODE_MASK" low="24" high="26"/>
+ <bitfield name="LRZ_FEEDBACK_ZMODE_MASK" low="24" high="26" type="a6xx_lrz_feedback_mask"/>
</reg32>
<reg32 offset="0x8801" name="RB_RENDER_CNTL" variants="A6XX" usage="rp_blit">
@@ -2605,6 +3647,7 @@ to upconvert to 32b float internally?
<bitfield name="UNK10" pos="10"/>
<bitfield name="LOSSLESSCOMPEN" pos="11" type="boolean"/>
<bitfield name="COLOR_SWAP" low="13" high="14" type="a3xx_color_swap"/>
+ <bitfield name="MUTABLEEN" pos="16" type="boolean" variants="A7XX-"/>
</reg32>
<!--
at least in gmem, things seem to be aligned to pitch of 64..
@@ -2770,6 +3813,7 @@ to upconvert to 32b float internally?
<bitfield name="COLOR_SWAP" low="5" high="6" type="a3xx_color_swap"/>
<bitfield name="COLOR_FORMAT" low="7" high="14" type="a6xx_format"/>
<bitfield name="UNK15" pos="15" type="boolean"/>
+ <bitfield name="MUTABLEEN" pos="16" type="boolean" variants="A7XX-"/>
</reg32>
<reg64 offset="0x88d8" name="RB_BLIT_DST" type="waddress" align="64" usage="rp_blit"/>
<reg32 offset="0x88da" name="RB_BLIT_DST_PITCH" low="0" high="15" shr="6" type="uint" usage="rp_blit"/>
@@ -2886,13 +3930,12 @@ to upconvert to 32b float internally?
<reg32 offset="0x8c00" name="RB_2D_BLIT_CNTL" type="a6xx_2d_blit_cntl" usage="rp_blit"/>
<reg32 offset="0x8c01" name="RB_2D_UNKNOWN_8C01" low="0" high="31" usage="rp_blit"/>
- <bitset name="a6xx_2d_surf_info" inline="yes">
+ <bitset name="a6xx_2d_src_surf_info" inline="yes">
<bitfield name="COLOR_FORMAT" low="0" high="7" type="a6xx_format"/>
<bitfield name="TILE_MODE" low="8" high="9" type="a6xx_tile_mode"/>
<bitfield name="COLOR_SWAP" low="10" high="11" type="a3xx_color_swap"/>
<bitfield name="FLAGS" pos="12" type="boolean"/>
<bitfield name="SRGB" pos="13" type="boolean"/>
- <!-- the rest is only for src -->
<bitfield name="SAMPLES" low="14" high="15" type="a3xx_msaa_samples"/>
<bitfield name="FILTER" pos="16" type="boolean"/>
<bitfield name="UNK17" pos="17" type="boolean"/>
@@ -2903,11 +3946,21 @@ to upconvert to 32b float internally?
<bitfield name="UNK22" pos="22" type="boolean"/>
<bitfield name="UNK23" low="23" high="26"/>
<bitfield name="UNK28" pos="28" type="boolean"/>
+ <bitfield name="MUTABLEEN" pos="29" type="boolean" variants="A7XX-"/>
+ </bitset>
+
+ <bitset name="a6xx_2d_dst_surf_info" inline="yes">
+ <bitfield name="COLOR_FORMAT" low="0" high="7" type="a6xx_format"/>
+ <bitfield name="TILE_MODE" low="8" high="9" type="a6xx_tile_mode"/>
+ <bitfield name="COLOR_SWAP" low="10" high="11" type="a3xx_color_swap"/>
+ <bitfield name="FLAGS" pos="12" type="boolean"/>
+ <bitfield name="SRGB" pos="13" type="boolean"/>
+ <bitfield name="SAMPLES" low="14" high="15" type="a3xx_msaa_samples"/>
+ <bitfield name="MUTABLEEN" pos="17" type="boolean" variants="A7XX-"/>
</bitset>
<!-- 0x8c02-0x8c16 invalid -->
- <!-- TODO: RB_2D_DST_INFO has 17 valid bits (doesn't match a6xx_2d_surf_info) -->
- <reg32 offset="0x8c17" name="RB_2D_DST_INFO" type="a6xx_2d_surf_info" usage="rp_blit"/>
+ <reg32 offset="0x8c17" name="RB_2D_DST_INFO" type="a6xx_2d_dst_surf_info" usage="rp_blit"/>
<reg64 offset="0x8c18" name="RB_2D_DST" type="waddress" align="64" usage="rp_blit"/>
<reg32 offset="0x8c1a" name="RB_2D_DST_PITCH" low="0" high="15" shr="6" type="uint" usage="rp_blit"/>
<!-- this is a guess but seems likely (for NV12/IYUV): -->
@@ -2927,7 +3980,10 @@ to upconvert to 32b float internally?
<reg32 offset="0x8c2d" name="RB_2D_SRC_SOLID_C1" usage="rp_blit"/>
<reg32 offset="0x8c2e" name="RB_2D_SRC_SOLID_C2" usage="rp_blit"/>
<reg32 offset="0x8c2f" name="RB_2D_SRC_SOLID_C3" usage="rp_blit"/>
- <!-- 0x8c34-0x8dff invalid -->
+
+ <reg32 offset="0x8c34" name="RB_UNKNOWN_8C34" variants="A7XX-" usage="cmd"/>
+
+ <!-- 0x8c35-0x8dff invalid -->
<!-- always 0x1 ? either doesn't exist for a650 or write-only: -->
<reg32 offset="0x8e01" name="RB_UNKNOWN_8E01" usage="cmd"/>
@@ -4275,7 +5331,7 @@ to upconvert to 32b float internally?
badly named or the functionality moved in a6xx. But downstream kernel
calls this "a6xx_sp_ps_tp_2d_cluster"
-->
- <reg32 offset="0xb4c0" name="SP_PS_2D_SRC_INFO" type="a6xx_2d_surf_info" variants="A6XX" usage="rp_blit"/>
+ <reg32 offset="0xb4c0" name="SP_PS_2D_SRC_INFO" type="a6xx_2d_src_surf_info" variants="A6XX" usage="rp_blit"/>
<reg32 offset="0xb4c1" name="SP_PS_2D_SRC_SIZE" variants="A6XX" usage="rp_blit">
<bitfield name="WIDTH" low="0" high="14" type="uint"/>
<bitfield name="HEIGHT" low="15" high="29" type="uint"/>
@@ -4286,7 +5342,7 @@ to upconvert to 32b float internally?
<bitfield name="PITCH" low="9" high="23" shr="6" type="uint"/>
</reg32>
- <reg32 offset="0xb2c0" name="SP_PS_2D_SRC_INFO" type="a6xx_2d_surf_info" variants="A7XX-" usage="rp_blit"/>
+ <reg32 offset="0xb2c0" name="SP_PS_2D_SRC_INFO" type="a6xx_2d_src_surf_info" variants="A7XX-" usage="rp_blit"/>
<reg32 offset="0xb2c1" name="SP_PS_2D_SRC_SIZE" variants="A7XX">
<bitfield name="WIDTH" low="0" high="14" type="uint"/>
<bitfield name="HEIGHT" low="15" high="29" type="uint"/>
@@ -4329,7 +5385,12 @@ to upconvert to 32b float internally?
<!-- always 0x100000 or 0x1000000? -->
<reg32 offset="0xb600" name="TPL1_DBG_ECO_CNTL" low="0" high="25" usage="cmd"/>
<reg32 offset="0xb601" name="TPL1_ADDR_MODE_CNTL" type="a5xx_address_mode"/>
- <reg32 offset="0xb602" name="TPL1_DBG_ECO_CNTL1" usage="cmd"/>
+ <reg32 offset="0xb602" name="TPL1_DBG_ECO_CNTL1" usage="cmd">
+ <!-- Affects UBWC in some way, if BLIT_OP_SCALE is done with this bit set
+ and if other blit is done without it - UBWC image may be copied incorrectly.
+ -->
+ <bitfield name="TP_UBWC_FLAG_HINT" pos="18" type="boolean"/>
+ </reg32>
<reg32 offset="0xb604" name="TPL1_NC_MODE_CNTL">
<bitfield name="MODE" pos="0" type="boolean"/>
<bitfield name="LOWER_BIT" low="1" high="2" type="uint"/>
@@ -4351,7 +5412,8 @@ to upconvert to 32b float internally?
<reg32 offset="0xb60b" name="TPL1_BICUBIC_WEIGHTS_TABLE_3" low="0" high="29" variants="A7XX" usage="cmd"/>
<reg32 offset="0xb60c" name="TPL1_BICUBIC_WEIGHTS_TABLE_4" low="0" high="29" variants="A7XX" usage="cmd"/>
- <array offset="0xb610" name="TPL1_PERFCTR_TP_SEL" stride="1" length="12"/>
+ <array offset="0xb610" name="TPL1_PERFCTR_TP_SEL" stride="1" length="12" variants="A6XX"/>
+ <array offset="0xb610" name="TPL1_PERFCTR_TP_SEL" stride="1" length="18" variants="A7XX"/>
<!-- TODO: 4 more perfcntr sel at 0xb620 ? -->
@@ -4582,15 +5644,15 @@ to upconvert to 32b float internally?
<bitfield name="UNK6" pos="6" type="boolean"/>
</reg32>
- <reg32 offset="0xbb00" name="HLSQ_DRAW_CMD">
+ <reg32 offset="0xbb00" name="HLSQ_DRAW_CMD" variants="A6XX">
<bitfield name="STATE_ID" low="0" high="7"/>
</reg32>
- <reg32 offset="0xbb01" name="HLSQ_DISPATCH_CMD">
+ <reg32 offset="0xbb01" name="HLSQ_DISPATCH_CMD" variants="A6XX">
<bitfield name="STATE_ID" low="0" high="7"/>
</reg32>
- <reg32 offset="0xbb02" name="HLSQ_EVENT_CMD">
+ <reg32 offset="0xbb02" name="HLSQ_EVENT_CMD" variants="A6XX">
<!-- I think only the low bit is actually used? -->
<bitfield name="STATE_ID" low="16" high="23"/>
<bitfield name="EVENT" low="0" high="6" type="vgt_event_type"/>
@@ -4623,6 +5685,19 @@ to upconvert to 32b float internally?
<bitfield name="GFX_BINDLESS" low="14" high="18" type="hex"/>
</reg32>
+ <reg32 offset="0xab1c" name="HLSQ_DRAW_CMD" variants="A7XX-">
+ <bitfield name="STATE_ID" low="0" high="7"/>
+ </reg32>
+
+ <reg32 offset="0xab1d" name="HLSQ_DISPATCH_CMD" variants="A7XX-">
+ <bitfield name="STATE_ID" low="0" high="7"/>
+ </reg32>
+
+ <reg32 offset="0xab1e" name="HLSQ_EVENT_CMD" variants="A7XX-">
+ <bitfield name="STATE_ID" low="16" high="23"/>
+ <bitfield name="EVENT" low="0" high="6" type="vgt_event_type"/>
+ </reg32>
+
<reg32 offset="0xab1f" name="HLSQ_INVALIDATE_CMD" variants="A7XX-" usage="cmd">
<doc>
This register clears pending loads queued up by
@@ -4791,7 +5866,7 @@ to upconvert to 32b float internally?
<reg32 offset="3" name="3"/>
</domain>
-<domain name="A6XX_TEX_CONST" width="32">
+<domain name="A6XX_TEX_CONST" width="32" varset="chip">
<doc>Texture constant dwords</doc>
<enum name="a6xx_tex_swiz"> <!-- same as a4xx? -->
<value name="A6XX_TEX_X" value="0"/>
@@ -4831,6 +5906,7 @@ to upconvert to 32b float internally?
<reg32 offset="1" name="1">
<bitfield name="WIDTH" low="0" high="14" type="uint"/>
<bitfield name="HEIGHT" low="15" high="29" type="uint"/>
+ <bitfield name="MUTABLEEN" pos="31" type="boolean" variants="A7XX-"/>
</reg32>
<reg32 offset="2" name="2">
<!--
diff --git a/drivers/gpu/drm/msm/registers/display/hdmi.xml b/drivers/gpu/drm/msm/registers/display/hdmi.xml
index 6c81581016c7..1cf1b14fbd91 100644
--- a/drivers/gpu/drm/msm/registers/display/hdmi.xml
+++ b/drivers/gpu/drm/msm/registers/display/hdmi.xml
@@ -1012,4 +1012,93 @@ xsi:schemaLocation="https://gitlab.freedesktop.org/freedreno/ rules-fd.xsd">
<reg32 offset="0x00110" name="TX_ALOG_INTF_OBSV"/>
</domain>
+<domain name="HDMI_8998_PHY" width="32">
+ <reg32 offset="0x00000" name="CFG"/>
+ <reg32 offset="0x00004" name="PD_CTL"/>
+ <reg32 offset="0x00010" name="MODE"/>
+ <reg32 offset="0x0005C" name="CLOCK"/>
+ <reg32 offset="0x00068" name="CMN_CTRL"/>
+ <reg32 offset="0x000B4" name="STATUS"/>
+</domain>
+
+<domain name="HDMI_8998_PHY_QSERDES_COM" width="32">
+ <reg32 offset="0x0000" name="ATB_SEL1"/>
+ <reg32 offset="0x0004" name="ATB_SEL2"/>
+ <reg32 offset="0x0008" name="FREQ_UPDATE"/>
+ <reg32 offset="0x000C" name="BG_TIMER"/>
+ <reg32 offset="0x0010" name="SSC_EN_CENTER"/>
+ <reg32 offset="0x0014" name="SSC_ADJ_PER1"/>
+ <reg32 offset="0x0018" name="SSC_ADJ_PER2"/>
+ <reg32 offset="0x001C" name="SSC_PER1"/>
+ <reg32 offset="0x0020" name="SSC_PER2"/>
+ <reg32 offset="0x0024" name="SSC_STEP_SIZE1"/>
+ <reg32 offset="0x0028" name="SSC_STEP_SIZE2"/>
+ <reg32 offset="0x002C" name="POST_DIV"/>
+ <reg32 offset="0x0030" name="POST_DIV_MUX"/>
+ <reg32 offset="0x0034" name="BIAS_EN_CLKBUFLR_EN"/>
+ <reg32 offset="0x0038" name="CLK_ENABLE1"/>
+ <reg32 offset="0x003C" name="SYS_CLK_CTRL"/>
+ <reg32 offset="0x0040" name="SYSCLK_BUF_ENABLE"/>
+ <reg32 offset="0x0044" name="PLL_EN"/>
+ <reg32 offset="0x0048" name="PLL_IVCO"/>
+ <reg32 offset="0x004C" name="CMN_IETRIM"/>
+ <reg32 offset="0x0050" name="CMN_IPTRIM"/>
+ <reg32 offset="0x0060" name="CP_CTRL_MODE0"/>
+ <reg32 offset="0x0064" name="CP_CTRL_MODE1"/>
+ <reg32 offset="0x0068" name="PLL_RCTRL_MODE0"/>
+ <reg32 offset="0x006C" name="PLL_RCTRL_MODE1"/>
+ <reg32 offset="0x0070" name="PLL_CCTRL_MODE0"/>
+ <reg32 offset="0x0074" name="PLL_CCTRL_MODE1"/>
+ <reg32 offset="0x0078" name="PLL_CNTRL"/>
+ <reg32 offset="0x007C" name="BIAS_EN_CTRL_BY_PSM"/>
+ <reg32 offset="0x0080" name="SYSCLK_EN_SEL"/>
+ <reg32 offset="0x0084" name="CML_SYSCLK_SEL"/>
+ <reg32 offset="0x0088" name="RESETSM_CNTRL"/>
+ <reg32 offset="0x008C" name="RESETSM_CNTRL2"/>
+ <reg32 offset="0x0090" name="LOCK_CMP_EN"/>
+ <reg32 offset="0x0094" name="LOCK_CMP_CFG"/>
+ <reg32 offset="0x0098" name="LOCK_CMP1_MODE0"/>
+ <reg32 offset="0x009C" name="LOCK_CMP2_MODE0"/>
+ <reg32 offset="0x00A0" name="LOCK_CMP3_MODE0"/>
+ <reg32 offset="0x00B0" name="DEC_START_MODE0"/>
+ <reg32 offset="0x00B4" name="DEC_START_MODE1"/>
+ <reg32 offset="0x00B8" name="DIV_FRAC_START1_MODE0"/>
+ <reg32 offset="0x00BC" name="DIV_FRAC_START2_MODE0"/>
+ <reg32 offset="0x00C0" name="DIV_FRAC_START3_MODE0"/>
+ <reg32 offset="0x00C4" name="DIV_FRAC_START1_MODE1"/>
+ <reg32 offset="0x00C8" name="DIV_FRAC_START2_MODE1"/>
+ <reg32 offset="0x00CC" name="DIV_FRAC_START3_MODE1"/>
+ <reg32 offset="0x00D0" name="INTEGLOOP_INITVAL"/>
+ <reg32 offset="0x00D4" name="INTEGLOOP_EN"/>
+ <reg32 offset="0x00D8" name="INTEGLOOP_GAIN0_MODE0"/>
+ <reg32 offset="0x00DC" name="INTEGLOOP_GAIN1_MODE0"/>
+ <reg32 offset="0x00E0" name="INTEGLOOP_GAIN0_MODE1"/>
+ <reg32 offset="0x00E4" name="INTEGLOOP_GAIN1_MODE1"/>
+ <reg32 offset="0x00E8" name="VCOCAL_DEADMAN_CTRL"/>
+ <reg32 offset="0x00EC" name="VCO_TUNE_CTRL"/>
+ <reg32 offset="0x00F0" name="VCO_TUNE_MAP"/>
+ <reg32 offset="0x0124" name="CMN_STATUS"/>
+ <reg32 offset="0x0128" name="RESET_SM_STATUS"/>
+ <reg32 offset="0x0138" name="CLK_SEL"/>
+ <reg32 offset="0x013C" name="HSCLK_SEL"/>
+ <reg32 offset="0x0148" name="CORECLK_DIV_MODE0"/>
+ <reg32 offset="0x0150" name="SW_RESET"/>
+ <reg32 offset="0x0154" name="CORE_CLK_EN"/>
+ <reg32 offset="0x0158" name="C_READY_STATUS"/>
+ <reg32 offset="0x015C" name="CMN_CONFIG"/>
+ <reg32 offset="0x0164" name="SVS_MODE_CLK_SEL"/>
+</domain>
+
+<domain name="HDMI_8998_PHY_TXn" width="32">
+ <reg32 offset="0x0000" name="EMP_POST1_LVL"/>
+ <reg32 offset="0x0008" name="INTERFACE_SELECT_TX_BAND"/>
+ <reg32 offset="0x000C" name="CLKBUF_TERM_ENABLE"/>
+ <reg32 offset="0x0014" name="DRV_LVL_RES_CODE_OFFSET"/>
+ <reg32 offset="0x0018" name="DRV_LVL"/>
+ <reg32 offset="0x001C" name="LANE_CONFIG"/>
+ <reg32 offset="0x0024" name="PRE_DRIVER_1"/>
+ <reg32 offset="0x0028" name="PRE_DRIVER_2"/>
+ <reg32 offset="0x002C" name="LANE_MODE"/>
+</domain>
+
</database>
diff --git a/drivers/gpu/drm/mxsfb/lcdif_kms.c b/drivers/gpu/drm/mxsfb/lcdif_kms.c
index 2541d2de4e45..dbd42cc1da87 100644
--- a/drivers/gpu/drm/mxsfb/lcdif_kms.c
+++ b/drivers/gpu/drm/mxsfb/lcdif_kms.c
@@ -407,8 +407,7 @@ static void lcdif_crtc_mode_set_nofb(struct drm_crtc_state *crtc_state,
struct drm_display_mode *m = &crtc_state->adjusted_mode;
DRM_DEV_DEBUG_DRIVER(drm->dev, "Pixel clock: %dkHz (actual: %dkHz)\n",
- m->crtc_clock,
- (int)(clk_get_rate(lcdif->clk) / 1000));
+ m->clock, (int)(clk_get_rate(lcdif->clk) / 1000));
DRM_DEV_DEBUG_DRIVER(drm->dev, "Bridge bus_flags: 0x%08X\n",
lcdif_crtc_state->bus_flags);
DRM_DEV_DEBUG_DRIVER(drm->dev, "Mode flags: 0x%08X\n", m->flags);
@@ -538,7 +537,7 @@ static void lcdif_crtc_atomic_enable(struct drm_crtc *crtc,
struct drm_device *drm = lcdif->drm;
dma_addr_t paddr;
- clk_set_rate(lcdif->clk, m->crtc_clock * 1000);
+ clk_set_rate(lcdif->clk, m->clock * 1000);
pm_runtime_get_sync(drm->dev);
diff --git a/drivers/gpu/drm/nouveau/Kbuild b/drivers/gpu/drm/nouveau/Kbuild
index c32c01827c1d..7b863355c5c6 100644
--- a/drivers/gpu/drm/nouveau/Kbuild
+++ b/drivers/gpu/drm/nouveau/Kbuild
@@ -25,7 +25,6 @@ nouveau-$(CONFIG_COMPAT) += nouveau_ioc32.o
nouveau-$(CONFIG_LEDS_CLASS) += nouveau_led.o
nouveau-y += nouveau_nvif.o
nouveau-$(CONFIG_NOUVEAU_PLATFORM_DRIVER) += nouveau_platform.o
-nouveau-y += nouveau_usif.o # userspace <-> nvif
nouveau-y += nouveau_vga.o
# DRM - memory management
diff --git a/drivers/gpu/drm/nouveau/dispnv04/crtc.c b/drivers/gpu/drm/nouveau/dispnv04/crtc.c
index 4310ad71870b..67146f1e8482 100644
--- a/drivers/gpu/drm/nouveau/dispnv04/crtc.c
+++ b/drivers/gpu/drm/nouveau/dispnv04/crtc.c
@@ -118,8 +118,8 @@ static void nv_crtc_calc_state_ext(struct drm_crtc *crtc, struct drm_display_mod
{
struct drm_device *dev = crtc->dev;
struct nouveau_drm *drm = nouveau_drm(dev);
- struct nvkm_bios *bios = nvxx_bios(&drm->client.device);
- struct nvkm_clk *clk = nvxx_clk(&drm->client.device);
+ struct nvkm_bios *bios = nvxx_bios(drm);
+ struct nvkm_clk *clk = nvxx_clk(drm);
struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc);
struct nv04_mode_state *state = &nv04_display(dev)->mode_reg;
struct nv04_crtc_reg *regp = &state->crtc_reg[nv_crtc->index];
@@ -617,9 +617,15 @@ nv_crtc_swap_fbs(struct drm_crtc *crtc, struct drm_framebuffer *old_fb)
ret = nouveau_bo_pin(nvbo, NOUVEAU_GEM_DOMAIN_VRAM, false);
if (ret == 0) {
- if (disp->image[nv_crtc->index])
- nouveau_bo_unpin(disp->image[nv_crtc->index]);
- nouveau_bo_ref(nvbo, &disp->image[nv_crtc->index]);
+ if (disp->image[nv_crtc->index]) {
+ struct nouveau_bo *bo = disp->image[nv_crtc->index];
+
+ nouveau_bo_unpin(bo);
+ drm_gem_object_put(&bo->bo.base);
+ }
+
+ drm_gem_object_get(&nvbo->bo.base);
+ disp->image[nv_crtc->index] = nvbo;
}
return ret;
@@ -754,13 +760,17 @@ static void nv_crtc_destroy(struct drm_crtc *crtc)
drm_crtc_cleanup(crtc);
- if (disp->image[nv_crtc->index])
- nouveau_bo_unpin(disp->image[nv_crtc->index]);
- nouveau_bo_ref(NULL, &disp->image[nv_crtc->index]);
+ if (disp->image[nv_crtc->index]) {
+ struct nouveau_bo *bo = disp->image[nv_crtc->index];
+
+ nouveau_bo_unpin(bo);
+ drm_gem_object_put(&bo->bo.base);
+ disp->image[nv_crtc->index] = NULL;
+ }
nouveau_bo_unmap(nv_crtc->cursor.nvbo);
nouveau_bo_unpin(nv_crtc->cursor.nvbo);
- nouveau_bo_ref(NULL, &nv_crtc->cursor.nvbo);
+ nouveau_bo_fini(nv_crtc->cursor.nvbo);
nvif_event_dtor(&nv_crtc->vblank);
nvif_head_dtor(&nv_crtc->head);
kfree(nv_crtc);
@@ -794,9 +804,14 @@ nv_crtc_disable(struct drm_crtc *crtc)
{
struct nv04_display *disp = nv04_display(crtc->dev);
struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc);
- if (disp->image[nv_crtc->index])
- nouveau_bo_unpin(disp->image[nv_crtc->index]);
- nouveau_bo_ref(NULL, &disp->image[nv_crtc->index]);
+
+ if (disp->image[nv_crtc->index]) {
+ struct nouveau_bo *bo = disp->image[nv_crtc->index];
+
+ nouveau_bo_unpin(bo);
+ drm_gem_object_put(&bo->bo.base);
+ disp->image[nv_crtc->index] = NULL;
+ }
}
static int
@@ -1042,7 +1057,7 @@ nv04_finish_page_flip(struct nouveau_channel *chan,
struct nv04_page_flip_state *ps)
{
struct nouveau_fence_chan *fctx = chan->fence;
- struct nouveau_drm *drm = chan->drm;
+ struct nouveau_drm *drm = chan->cli->drm;
struct drm_device *dev = drm->dev;
struct nv04_page_flip_state *s;
unsigned long flags;
@@ -1098,9 +1113,9 @@ nv04_page_flip_emit(struct nouveau_channel *chan,
struct nouveau_fence **pfence)
{
struct nouveau_fence_chan *fctx = chan->fence;
- struct nouveau_drm *drm = chan->drm;
+ struct nouveau_drm *drm = chan->cli->drm;
struct drm_device *dev = drm->dev;
- struct nvif_push *push = chan->chan.push;
+ struct nvif_push *push = &chan->chan.push;
unsigned long flags;
int ret;
@@ -1157,8 +1172,8 @@ nv04_crtc_page_flip(struct drm_crtc *crtc, struct drm_framebuffer *fb,
chan = drm->channel;
if (!chan)
return -ENODEV;
- cli = (void *)chan->user.client;
- push = chan->chan.push;
+ cli = chan->cli;
+ push = &chan->chan.push;
s = kzalloc(sizeof(*s), GFP_KERNEL);
if (!s)
@@ -1210,7 +1225,11 @@ nv04_crtc_page_flip(struct drm_crtc *crtc, struct drm_framebuffer *fb,
PUSH_NVSQ(push, NV05F, 0x0130, 0);
}
- nouveau_bo_ref(new_bo, &dispnv04->image[head]);
+ if (dispnv04->image[head])
+ drm_gem_object_put(&dispnv04->image[head]->bo.base);
+
+ drm_gem_object_get(&new_bo->bo.base);
+ dispnv04->image[head] = new_bo;
ret = nv04_page_flip_emit(chan, old_bo, new_bo, s, &fence);
if (ret)
@@ -1329,7 +1348,7 @@ nv04_crtc_create(struct drm_device *dev, int crtc_num)
nouveau_bo_unpin(nv_crtc->cursor.nvbo);
}
if (ret)
- nouveau_bo_ref(NULL, &nv_crtc->cursor.nvbo);
+ nouveau_bo_fini(nv_crtc->cursor.nvbo);
}
nv04_cursor_init(nv_crtc);
diff --git a/drivers/gpu/drm/nouveau/dispnv04/dac.c b/drivers/gpu/drm/nouveau/dispnv04/dac.c
index d6b8e0cce2ac..2e12bf136607 100644
--- a/drivers/gpu/drm/nouveau/dispnv04/dac.c
+++ b/drivers/gpu/drm/nouveau/dispnv04/dac.c
@@ -237,7 +237,7 @@ uint32_t nv17_dac_sample_load(struct drm_encoder *encoder)
struct drm_device *dev = encoder->dev;
struct nouveau_drm *drm = nouveau_drm(dev);
struct nvif_object *device = &nouveau_drm(dev)->client.device.object;
- struct nvkm_gpio *gpio = nvxx_gpio(&drm->client.device);
+ struct nvkm_gpio *gpio = nvxx_gpio(drm);
struct dcb_output *dcb = nouveau_encoder(encoder)->dcb;
uint32_t sample, testval, regoffset = nv04_dac_output_offset(encoder);
uint32_t saved_powerctrl_2 = 0, saved_powerctrl_4 = 0, saved_routput,
diff --git a/drivers/gpu/drm/nouveau/dispnv04/dfp.c b/drivers/gpu/drm/nouveau/dispnv04/dfp.c
index d5b129dc623b..504c421aa176 100644
--- a/drivers/gpu/drm/nouveau/dispnv04/dfp.c
+++ b/drivers/gpu/drm/nouveau/dispnv04/dfp.c
@@ -626,7 +626,7 @@ static void nv04_tmds_slave_init(struct drm_encoder *encoder)
struct drm_device *dev = encoder->dev;
struct dcb_output *dcb = nouveau_encoder(encoder)->dcb;
struct nouveau_drm *drm = nouveau_drm(dev);
- struct nvkm_i2c *i2c = nvxx_i2c(&drm->client.device);
+ struct nvkm_i2c *i2c = nvxx_i2c(drm);
struct nvkm_i2c_bus *bus = nvkm_i2c_bus_find(i2c, NVKM_I2C_BUS_PRI);
struct nvkm_i2c_bus_probe info[] = {
{
diff --git a/drivers/gpu/drm/nouveau/dispnv04/disp.c b/drivers/gpu/drm/nouveau/dispnv04/disp.c
index 4b7497a8755c..f71199a39bc4 100644
--- a/drivers/gpu/drm/nouveau/dispnv04/disp.c
+++ b/drivers/gpu/drm/nouveau/dispnv04/disp.c
@@ -189,7 +189,6 @@ static void
nv04_display_destroy(struct drm_device *dev)
{
struct nv04_display *disp = nv04_display(dev);
- struct nouveau_drm *drm = nouveau_drm(dev);
struct nouveau_encoder *encoder;
struct nouveau_crtc *nv_crtc;
@@ -206,15 +205,13 @@ nv04_display_destroy(struct drm_device *dev)
nouveau_display(dev)->priv = NULL;
vfree(disp);
-
- nvif_object_unmap(&drm->client.device.object);
}
int
nv04_display_create(struct drm_device *dev)
{
struct nouveau_drm *drm = nouveau_drm(dev);
- struct nvkm_i2c *i2c = nvxx_i2c(&drm->client.device);
+ struct nvkm_i2c *i2c = nvxx_i2c(drm);
struct dcb_table *dcb = &drm->vbios.dcb;
struct drm_connector *connector, *ct;
struct drm_encoder *encoder;
@@ -229,8 +226,6 @@ nv04_display_create(struct drm_device *dev)
disp->drm = drm;
- nvif_object_map(&drm->client.device.object, NULL, 0);
-
nouveau_display(dev)->priv = disp;
nouveau_display(dev)->dtor = nv04_display_destroy;
nouveau_display(dev)->init = nv04_display_init;
diff --git a/drivers/gpu/drm/nouveau/dispnv04/disp.h b/drivers/gpu/drm/nouveau/dispnv04/disp.h
index 11a6663758ec..85ec0f534392 100644
--- a/drivers/gpu/drm/nouveau/dispnv04/disp.h
+++ b/drivers/gpu/drm/nouveau/dispnv04/disp.h
@@ -176,7 +176,7 @@ static inline void
nouveau_bios_run_init_table(struct drm_device *dev, u16 table,
struct dcb_output *outp, int crtc)
{
- nvbios_init(&nvxx_bios(&nouveau_drm(dev)->client.device)->subdev, table,
+ nvbios_init(&nvxx_bios(nouveau_drm(dev))->subdev, table,
init.outp = outp;
init.head = crtc;
);
diff --git a/drivers/gpu/drm/nouveau/dispnv04/hw.c b/drivers/gpu/drm/nouveau/dispnv04/hw.c
index f7d35657aa64..8b376f9c8746 100644
--- a/drivers/gpu/drm/nouveau/dispnv04/hw.c
+++ b/drivers/gpu/drm/nouveau/dispnv04/hw.c
@@ -166,7 +166,7 @@ nouveau_hw_get_pllvals(struct drm_device *dev, enum nvbios_pll_type plltype,
{
struct nouveau_drm *drm = nouveau_drm(dev);
struct nvif_object *device = &drm->client.device.object;
- struct nvkm_bios *bios = nvxx_bios(&drm->client.device);
+ struct nvkm_bios *bios = nvxx_bios(drm);
uint32_t reg1, pll1, pll2 = 0;
struct nvbios_pll pll_lim;
int ret;
@@ -258,9 +258,8 @@ nouveau_hw_fix_bad_vpll(struct drm_device *dev, int head)
*/
struct nouveau_drm *drm = nouveau_drm(dev);
- struct nvif_device *device = &drm->client.device;
- struct nvkm_clk *clk = nvxx_clk(device);
- struct nvkm_bios *bios = nvxx_bios(device);
+ struct nvkm_clk *clk = nvxx_clk(drm);
+ struct nvkm_bios *bios = nvxx_bios(drm);
struct nvbios_pll pll_lim;
struct nvkm_pll_vals pv;
enum nvbios_pll_type pll = head ? PLL_VPLL1 : PLL_VPLL0;
@@ -470,7 +469,7 @@ nv_load_state_ramdac(struct drm_device *dev, int head,
struct nv04_mode_state *state)
{
struct nouveau_drm *drm = nouveau_drm(dev);
- struct nvkm_clk *clk = nvxx_clk(&drm->client.device);
+ struct nvkm_clk *clk = nvxx_clk(drm);
struct nv04_crtc_reg *regp = &state->crtc_reg[head];
uint32_t pllreg = head ? NV_RAMDAC_VPLL2 : NV_PRAMDAC_VPLL_COEFF;
int i;
diff --git a/drivers/gpu/drm/nouveau/dispnv04/tvnv04.c b/drivers/gpu/drm/nouveau/dispnv04/tvnv04.c
index de3ea731d6e6..d3014027a812 100644
--- a/drivers/gpu/drm/nouveau/dispnv04/tvnv04.c
+++ b/drivers/gpu/drm/nouveau/dispnv04/tvnv04.c
@@ -53,7 +53,7 @@ static struct nvkm_i2c_bus_probe nv04_tv_encoder_info[] = {
int nv04_tv_identify(struct drm_device *dev, int i2c_index)
{
struct nouveau_drm *drm = nouveau_drm(dev);
- struct nvkm_i2c *i2c = nvxx_i2c(&drm->client.device);
+ struct nvkm_i2c *i2c = nvxx_i2c(drm);
struct nvkm_i2c_bus *bus = nvkm_i2c_bus_find(i2c, i2c_index);
if (bus) {
return nvkm_i2c_bus_probe(bus, "TV encoder",
@@ -205,7 +205,7 @@ nv04_tv_create(struct drm_connector *connector, struct dcb_output *entry)
struct drm_encoder *encoder;
struct drm_device *dev = connector->dev;
struct nouveau_drm *drm = nouveau_drm(dev);
- struct nvkm_i2c *i2c = nvxx_i2c(&drm->client.device);
+ struct nvkm_i2c *i2c = nvxx_i2c(drm);
struct nvkm_i2c_bus *bus = nvkm_i2c_bus_find(i2c, entry->i2c_index);
int type, ret;
diff --git a/drivers/gpu/drm/nouveau/dispnv04/tvnv17.c b/drivers/gpu/drm/nouveau/dispnv04/tvnv17.c
index 2033214c4b78..3ecb101d23e9 100644
--- a/drivers/gpu/drm/nouveau/dispnv04/tvnv17.c
+++ b/drivers/gpu/drm/nouveau/dispnv04/tvnv17.c
@@ -47,7 +47,7 @@ static uint32_t nv42_tv_sample_load(struct drm_encoder *encoder)
{
struct drm_device *dev = encoder->dev;
struct nouveau_drm *drm = nouveau_drm(dev);
- struct nvkm_gpio *gpio = nvxx_gpio(&drm->client.device);
+ struct nvkm_gpio *gpio = nvxx_gpio(drm);
uint32_t testval, regoffset = nv04_dac_output_offset(encoder);
uint32_t gpio0, gpio1, fp_htotal, fp_hsync_start, fp_hsync_end,
fp_control, test_ctrl, dacclk, ctv_14, ctv_1c, ctv_6c;
@@ -131,7 +131,7 @@ static bool
get_tv_detect_quirks(struct drm_device *dev, uint32_t *pin_mask)
{
struct nouveau_drm *drm = nouveau_drm(dev);
- struct nvkm_device *device = nvxx_device(&drm->client.device);
+ struct nvkm_device *device = nvxx_device(drm);
if (device->quirk && device->quirk->tv_pin_mask) {
*pin_mask = device->quirk->tv_pin_mask;
@@ -369,7 +369,7 @@ static void nv17_tv_dpms(struct drm_encoder *encoder, int mode)
{
struct drm_device *dev = encoder->dev;
struct nouveau_drm *drm = nouveau_drm(dev);
- struct nvkm_gpio *gpio = nvxx_gpio(&drm->client.device);
+ struct nvkm_gpio *gpio = nvxx_gpio(drm);
struct nv17_tv_state *regs = &to_tv_enc(encoder)->state;
struct nv17_tv_norm_params *tv_norm = get_tv_norm(encoder);
diff --git a/drivers/gpu/drm/nouveau/dispnv50/base507c.c b/drivers/gpu/drm/nouveau/dispnv50/base507c.c
index 70c62b861276..a431f6c5f6fa 100644
--- a/drivers/gpu/drm/nouveau/dispnv50/base507c.c
+++ b/drivers/gpu/drm/nouveau/dispnv50/base507c.c
@@ -35,7 +35,7 @@
int
base507c_update(struct nv50_wndw *wndw, u32 *interlock)
{
- struct nvif_push *push = wndw->wndw.push;
+ struct nvif_push *push = &wndw->wndw.push;
int ret;
if ((ret = PUSH_WAIT(push, 2)))
@@ -48,7 +48,7 @@ base507c_update(struct nv50_wndw *wndw, u32 *interlock)
int
base507c_image_clr(struct nv50_wndw *wndw)
{
- struct nvif_push *push = wndw->wndw.push;
+ struct nvif_push *push = &wndw->wndw.push;
int ret;
if ((ret = PUSH_WAIT(push, 4)))
@@ -65,7 +65,7 @@ base507c_image_clr(struct nv50_wndw *wndw)
static int
base507c_image_set(struct nv50_wndw *wndw, struct nv50_wndw_atom *asyw)
{
- struct nvif_push *push = wndw->wndw.push;
+ struct nvif_push *push = &wndw->wndw.push;
int ret;
if ((ret = PUSH_WAIT(push, 13)))
@@ -118,7 +118,7 @@ base507c_image_set(struct nv50_wndw *wndw, struct nv50_wndw_atom *asyw)
int
base507c_xlut_clr(struct nv50_wndw *wndw)
{
- struct nvif_push *push = wndw->wndw.push;
+ struct nvif_push *push = &wndw->wndw.push;
int ret;
if ((ret = PUSH_WAIT(push, 2)))
@@ -132,7 +132,7 @@ base507c_xlut_clr(struct nv50_wndw *wndw)
int
base507c_xlut_set(struct nv50_wndw *wndw, struct nv50_wndw_atom *asyw)
{
- struct nvif_push *push = wndw->wndw.push;
+ struct nvif_push *push = &wndw->wndw.push;
int ret;
if ((ret = PUSH_WAIT(push, 2)))
@@ -158,7 +158,7 @@ base507c_ntfy_wait_begun(struct nouveau_bo *bo, u32 offset,
int
base507c_ntfy_clr(struct nv50_wndw *wndw)
{
- struct nvif_push *push = wndw->wndw.push;
+ struct nvif_push *push = &wndw->wndw.push;
int ret;
if ((ret = PUSH_WAIT(push, 2)))
@@ -171,7 +171,7 @@ base507c_ntfy_clr(struct nv50_wndw *wndw)
int
base507c_ntfy_set(struct nv50_wndw *wndw, struct nv50_wndw_atom *asyw)
{
- struct nvif_push *push = wndw->wndw.push;
+ struct nvif_push *push = &wndw->wndw.push;
int ret;
if ((ret = PUSH_WAIT(push, 3)))
@@ -195,7 +195,7 @@ base507c_ntfy_reset(struct nouveau_bo *bo, u32 offset)
int
base507c_sema_clr(struct nv50_wndw *wndw)
{
- struct nvif_push *push = wndw->wndw.push;
+ struct nvif_push *push = &wndw->wndw.push;
int ret;
if ((ret = PUSH_WAIT(push, 2)))
@@ -208,7 +208,7 @@ base507c_sema_clr(struct nv50_wndw *wndw)
int
base507c_sema_set(struct nv50_wndw *wndw, struct nv50_wndw_atom *asyw)
{
- struct nvif_push *push = wndw->wndw.push;
+ struct nvif_push *push = &wndw->wndw.push;
int ret;
if ((ret = PUSH_WAIT(push, 5)))
@@ -307,7 +307,6 @@ base507c_new_(const struct nv50_wndw_func *func, const u32 *format,
struct nvif_disp_chan_v0 args = {
.id = head,
};
- struct nouveau_display *disp = nouveau_display(drm->dev);
struct nv50_disp *disp50 = nv50_disp(drm->dev);
struct nv50_wndw *wndw;
int ret;
@@ -318,7 +317,7 @@ base507c_new_(const struct nv50_wndw_func *func, const u32 *format,
if (*pwndw = wndw, ret)
return ret;
- ret = nv50_dmac_create(&drm->client.device, &disp->disp.object,
+ ret = nv50_dmac_create(drm,
&oclass, head, &args, sizeof(args),
disp50->sync->offset, &wndw->wndw);
if (ret) {
diff --git a/drivers/gpu/drm/nouveau/dispnv50/base827c.c b/drivers/gpu/drm/nouveau/dispnv50/base827c.c
index 093d4ba6910e..4545cc5f3a14 100644
--- a/drivers/gpu/drm/nouveau/dispnv50/base827c.c
+++ b/drivers/gpu/drm/nouveau/dispnv50/base827c.c
@@ -28,7 +28,7 @@
static int
base827c_image_set(struct nv50_wndw *wndw, struct nv50_wndw_atom *asyw)
{
- struct nvif_push *push = wndw->wndw.push;
+ struct nvif_push *push = &wndw->wndw.push;
int ret;
if ((ret = PUSH_WAIT(push, 13)))
diff --git a/drivers/gpu/drm/nouveau/dispnv50/base907c.c b/drivers/gpu/drm/nouveau/dispnv50/base907c.c
index e6b0417c325b..4a2d5a259e15 100644
--- a/drivers/gpu/drm/nouveau/dispnv50/base907c.c
+++ b/drivers/gpu/drm/nouveau/dispnv50/base907c.c
@@ -28,7 +28,7 @@
static int
base907c_image_set(struct nv50_wndw *wndw, struct nv50_wndw_atom *asyw)
{
- struct nvif_push *push = wndw->wndw.push;
+ struct nvif_push *push = &wndw->wndw.push;
int ret;
if ((ret = PUSH_WAIT(push, 10)))
@@ -65,7 +65,7 @@ base907c_image_set(struct nv50_wndw *wndw, struct nv50_wndw_atom *asyw)
static int
base907c_xlut_clr(struct nv50_wndw *wndw)
{
- struct nvif_push *push = wndw->wndw.push;
+ struct nvif_push *push = &wndw->wndw.push;
int ret;
if ((ret = PUSH_WAIT(push, 6)))
@@ -84,7 +84,7 @@ base907c_xlut_clr(struct nv50_wndw *wndw)
static int
base907c_xlut_set(struct nv50_wndw *wndw, struct nv50_wndw_atom *asyw)
{
- struct nvif_push *push = wndw->wndw.push;
+ struct nvif_push *push = &wndw->wndw.push;
int ret;
if ((ret = PUSH_WAIT(push, 6)))
@@ -156,7 +156,7 @@ base907c_csc(struct nv50_wndw *wndw, struct nv50_wndw_atom *asyw,
static int
base907c_csc_clr(struct nv50_wndw *wndw)
{
- struct nvif_push *push = wndw->wndw.push;
+ struct nvif_push *push = &wndw->wndw.push;
int ret;
if ((ret = PUSH_WAIT(push, 2)))
@@ -170,7 +170,7 @@ base907c_csc_clr(struct nv50_wndw *wndw)
static int
base907c_csc_set(struct nv50_wndw *wndw, struct nv50_wndw_atom *asyw)
{
- struct nvif_push *push = wndw->wndw.push;
+ struct nvif_push *push = &wndw->wndw.push;
int ret;
if ((ret = PUSH_WAIT(push, 13)))
diff --git a/drivers/gpu/drm/nouveau/dispnv50/core507d.c b/drivers/gpu/drm/nouveau/dispnv50/core507d.c
index e5bb5ca950c8..ce2cb78bbdd3 100644
--- a/drivers/gpu/drm/nouveau/dispnv50/core507d.c
+++ b/drivers/gpu/drm/nouveau/dispnv50/core507d.c
@@ -33,7 +33,7 @@
int
core507d_update(struct nv50_core *core, u32 *interlock, bool ntfy)
{
- struct nvif_push *push = core->chan.push;
+ struct nvif_push *push = &core->chan.push;
int ret;
if ((ret = PUSH_WAIT(push, (ntfy ? 2 : 0) + 3)))
@@ -80,7 +80,7 @@ core507d_ntfy_init(struct nouveau_bo *bo, u32 offset)
int
core507d_read_caps(struct nv50_disp *disp)
{
- struct nvif_push *push = disp->core->chan.push;
+ struct nvif_push *push = &disp->core->chan.push;
int ret;
ret = PUSH_WAIT(push, 6);
@@ -130,7 +130,7 @@ core507d_caps_init(struct nouveau_drm *drm, struct nv50_disp *disp)
int
core507d_init(struct nv50_core *core)
{
- struct nvif_push *push = core->chan.push;
+ struct nvif_push *push = &core->chan.push;
int ret;
if ((ret = PUSH_WAIT(push, 2)))
@@ -166,7 +166,7 @@ core507d_new_(const struct nv50_core_func *func, struct nouveau_drm *drm,
return -ENOMEM;
core->func = func;
- ret = nv50_dmac_create(&drm->client.device, &disp->disp->object,
+ ret = nv50_dmac_create(drm,
&oclass, 0, &args, sizeof(args),
disp->sync->offset, &core->chan);
if (ret) {
diff --git a/drivers/gpu/drm/nouveau/dispnv50/corec37d.c b/drivers/gpu/drm/nouveau/dispnv50/corec37d.c
index 42f877f2ced2..7f637b8830be 100644
--- a/drivers/gpu/drm/nouveau/dispnv50/corec37d.c
+++ b/drivers/gpu/drm/nouveau/dispnv50/corec37d.c
@@ -33,7 +33,7 @@
int
corec37d_wndw_owner(struct nv50_core *core)
{
- struct nvif_push *push = core->chan.push;
+ struct nvif_push *push = &core->chan.push;
const u32 windows = 8; /*XXX*/
int ret, i;
@@ -51,7 +51,7 @@ corec37d_wndw_owner(struct nv50_core *core)
int
corec37d_update(struct nv50_core *core, u32 *interlock, bool ntfy)
{
- struct nvif_push *push = core->chan.push;
+ struct nvif_push *push = &core->chan.push;
int ret;
if ((ret = PUSH_WAIT(push, (ntfy ? 2 * 2 : 0) + 5)))
@@ -127,7 +127,7 @@ int corec37d_caps_init(struct nouveau_drm *drm, struct nv50_disp *disp)
static int
corec37d_init(struct nv50_core *core)
{
- struct nvif_push *push = core->chan.push;
+ struct nvif_push *push = &core->chan.push;
const u32 windows = 8; /*XXX*/
int ret, i;
diff --git a/drivers/gpu/drm/nouveau/dispnv50/corec57d.c b/drivers/gpu/drm/nouveau/dispnv50/corec57d.c
index 53b1e2a569c1..421d0d57e1d8 100644
--- a/drivers/gpu/drm/nouveau/dispnv50/corec57d.c
+++ b/drivers/gpu/drm/nouveau/dispnv50/corec57d.c
@@ -29,7 +29,7 @@
static int
corec57d_init(struct nv50_core *core)
{
- struct nvif_push *push = core->chan.push;
+ struct nvif_push *push = &core->chan.push;
const u32 windows = 8; /*XXX*/
int ret, i;
diff --git a/drivers/gpu/drm/nouveau/dispnv50/crc907d.c b/drivers/gpu/drm/nouveau/dispnv50/crc907d.c
index f9ad641555b7..a674ba435b05 100644
--- a/drivers/gpu/drm/nouveau/dispnv50/crc907d.c
+++ b/drivers/gpu/drm/nouveau/dispnv50/crc907d.c
@@ -26,7 +26,7 @@ static int
crc907d_set_src(struct nv50_head *head, int or, enum nv50_crc_source_type source,
struct nv50_crc_notifier_ctx *ctx)
{
- struct nvif_push *push = nv50_disp(head->base.base.dev)->core->chan.push;
+ struct nvif_push *push = &nv50_disp(head->base.base.dev)->core->chan.push;
const int i = head->base.index;
u32 crc_args = NVDEF(NV907D, HEAD_SET_CRC_CONTROL, CONTROLLING_CHANNEL, CORE) |
NVDEF(NV907D, HEAD_SET_CRC_CONTROL, EXPECT_BUFFER_COLLAPSE, FALSE) |
@@ -74,7 +74,7 @@ crc907d_set_src(struct nv50_head *head, int or, enum nv50_crc_source_type source
static int
crc907d_set_ctx(struct nv50_head *head, struct nv50_crc_notifier_ctx *ctx)
{
- struct nvif_push *push = nv50_disp(head->base.base.dev)->core->chan.push;
+ struct nvif_push *push = &nv50_disp(head->base.base.dev)->core->chan.push;
const int i = head->base.index;
int ret;
diff --git a/drivers/gpu/drm/nouveau/dispnv50/crcc37d.c b/drivers/gpu/drm/nouveau/dispnv50/crcc37d.c
index f10f6c484408..4821ce32f9ed 100644
--- a/drivers/gpu/drm/nouveau/dispnv50/crcc37d.c
+++ b/drivers/gpu/drm/nouveau/dispnv50/crcc37d.c
@@ -15,7 +15,7 @@ static int
crcc37d_set_src(struct nv50_head *head, int or, enum nv50_crc_source_type source,
struct nv50_crc_notifier_ctx *ctx)
{
- struct nvif_push *push = nv50_disp(head->base.base.dev)->core->chan.push;
+ struct nvif_push *push = &nv50_disp(head->base.base.dev)->core->chan.push;
const int i = head->base.index;
u32 crc_args = NVVAL(NVC37D, HEAD_SET_CRC_CONTROL, CONTROLLING_CHANNEL, i * 4) |
NVDEF(NVC37D, HEAD_SET_CRC_CONTROL, EXPECT_BUFFER_COLLAPSE, FALSE) |
@@ -53,7 +53,7 @@ crcc37d_set_src(struct nv50_head *head, int or, enum nv50_crc_source_type source
int crcc37d_set_ctx(struct nv50_head *head, struct nv50_crc_notifier_ctx *ctx)
{
- struct nvif_push *push = nv50_disp(head->base.base.dev)->core->chan.push;
+ struct nvif_push *push = &nv50_disp(head->base.base.dev)->core->chan.push;
const int i = head->base.index;
int ret;
diff --git a/drivers/gpu/drm/nouveau/dispnv50/crcc57d.c b/drivers/gpu/drm/nouveau/dispnv50/crcc57d.c
index cc0130e3d496..ad591dcb0bc9 100644
--- a/drivers/gpu/drm/nouveau/dispnv50/crcc57d.c
+++ b/drivers/gpu/drm/nouveau/dispnv50/crcc57d.c
@@ -13,7 +13,7 @@
static int crcc57d_set_src(struct nv50_head *head, int or, enum nv50_crc_source_type source,
struct nv50_crc_notifier_ctx *ctx)
{
- struct nvif_push *push = nv50_disp(head->base.base.dev)->core->chan.push;
+ struct nvif_push *push = &nv50_disp(head->base.base.dev)->core->chan.push;
const int i = head->base.index;
u32 crc_args = NVDEF(NVC57D, HEAD_SET_CRC_CONTROL, CONTROLLING_CHANNEL, CORE) |
NVDEF(NVC57D, HEAD_SET_CRC_CONTROL, EXPECT_BUFFER_COLLAPSE, FALSE) |
diff --git a/drivers/gpu/drm/nouveau/dispnv50/dac507d.c b/drivers/gpu/drm/nouveau/dispnv50/dac507d.c
index 09de78d96679..99ae692f219e 100644
--- a/drivers/gpu/drm/nouveau/dispnv50/dac507d.c
+++ b/drivers/gpu/drm/nouveau/dispnv50/dac507d.c
@@ -29,7 +29,7 @@ static int
dac507d_ctrl(struct nv50_core *core, int or, u32 ctrl,
struct nv50_head_atom *asyh)
{
- struct nvif_push *push = core->chan.push;
+ struct nvif_push *push = &core->chan.push;
u32 sync = 0;
int ret;
diff --git a/drivers/gpu/drm/nouveau/dispnv50/dac907d.c b/drivers/gpu/drm/nouveau/dispnv50/dac907d.c
index 95efa625b691..74bc9f81e3f1 100644
--- a/drivers/gpu/drm/nouveau/dispnv50/dac907d.c
+++ b/drivers/gpu/drm/nouveau/dispnv50/dac907d.c
@@ -29,7 +29,7 @@ static int
dac907d_ctrl(struct nv50_core *core, int or, u32 ctrl,
struct nv50_head_atom *asyh)
{
- struct nvif_push *push = core->chan.push;
+ struct nvif_push *push = &core->chan.push;
int ret;
if ((ret = PUSH_WAIT(push, 2)))
diff --git a/drivers/gpu/drm/nouveau/dispnv50/disp.c b/drivers/gpu/drm/nouveau/dispnv50/disp.c
index 0efd6b4906cf..eed579a6c858 100644
--- a/drivers/gpu/drm/nouveau/dispnv50/disp.c
+++ b/drivers/gpu/drm/nouveau/dispnv50/disp.c
@@ -93,8 +93,11 @@ nv50_chan_create(struct nvif_device *device, struct nvif_object *disp,
ret = nvif_object_ctor(disp, "kmsChan", 0,
oclass[0], data, size,
&chan->user);
- if (ret == 0)
- nvif_object_map(&chan->user, NULL, 0);
+ if (ret == 0) {
+ ret = nvif_object_map(&chan->user, NULL, 0);
+ if (ret)
+ nvif_object_dtor(&chan->user);
+ }
nvif_object_sclass_put(&sclass);
return ret;
}
@@ -124,20 +127,20 @@ nv50_dmac_destroy(struct nv50_dmac *dmac)
nv50_chan_destroy(&dmac->base);
- nvif_mem_dtor(&dmac->_push.mem);
+ nvif_mem_dtor(&dmac->push.mem);
}
static void
nv50_dmac_kick(struct nvif_push *push)
{
- struct nv50_dmac *dmac = container_of(push, typeof(*dmac), _push);
+ struct nv50_dmac *dmac = container_of(push, typeof(*dmac), push);
- dmac->cur = push->cur - (u32 __iomem *)dmac->_push.mem.object.map.ptr;
+ dmac->cur = push->cur - (u32 __iomem *)dmac->push.mem.object.map.ptr;
if (dmac->put != dmac->cur) {
/* Push buffer fetches are not coherent with BAR1, we need to ensure
* writes have been flushed right through to VRAM before writing PUT.
*/
- if (dmac->push->mem.type & NVIF_MEM_VRAM) {
+ if (dmac->push.mem.type & NVIF_MEM_VRAM) {
struct nvif_device *device = dmac->base.device;
nvif_wr32(&device->object, 0x070000, 0x00000001);
nvif_msec(device, 2000,
@@ -172,7 +175,7 @@ nv50_dmac_wind(struct nv50_dmac *dmac)
if (get == 0) {
/* Corner-case, HW idle, but non-committed work pending. */
if (dmac->put == 0)
- nv50_dmac_kick(dmac->push);
+ nv50_dmac_kick(&dmac->push);
if (nvif_msec(dmac->base.device, 2000,
if (NVIF_TV32(&dmac->base.user, NV507C, GET, PTR, >, 0))
@@ -181,7 +184,7 @@ nv50_dmac_wind(struct nv50_dmac *dmac)
return -ETIMEDOUT;
}
- PUSH_RSVD(dmac->push, PUSH_JUMP(dmac->push, 0));
+ PUSH_RSVD(&dmac->push, PUSH_JUMP(&dmac->push, 0));
dmac->cur = 0;
return 0;
}
@@ -189,19 +192,19 @@ nv50_dmac_wind(struct nv50_dmac *dmac)
static int
nv50_dmac_wait(struct nvif_push *push, u32 size)
{
- struct nv50_dmac *dmac = container_of(push, typeof(*dmac), _push);
+ struct nv50_dmac *dmac = container_of(push, typeof(*dmac), push);
int free;
if (WARN_ON(size > dmac->max))
return -EINVAL;
- dmac->cur = push->cur - (u32 __iomem *)dmac->_push.mem.object.map.ptr;
+ dmac->cur = push->cur - (u32 __iomem *)dmac->push.mem.object.map.ptr;
if (dmac->cur + size >= dmac->max) {
int ret = nv50_dmac_wind(dmac);
if (ret)
return ret;
- push->cur = dmac->_push.mem.object.map.ptr;
+ push->cur = dmac->push.mem.object.map.ptr;
push->cur = push->cur + dmac->cur;
nv50_dmac_kick(push);
}
@@ -214,7 +217,7 @@ nv50_dmac_wait(struct nvif_push *push, u32 size)
return -ETIMEDOUT;
}
- push->bgn = dmac->_push.mem.object.map.ptr;
+ push->bgn = dmac->push.mem.object.map.ptr;
push->bgn = push->bgn + dmac->cur;
push->cur = push->bgn;
push->end = push->cur + free;
@@ -226,17 +229,16 @@ static int nv50_dmac_vram_pushbuf = -1;
module_param_named(kms_vram_pushbuf, nv50_dmac_vram_pushbuf, int, 0400);
int
-nv50_dmac_create(struct nvif_device *device, struct nvif_object *disp,
+nv50_dmac_create(struct nouveau_drm *drm,
const s32 *oclass, u8 head, void *data, u32 size, s64 syncbuf,
struct nv50_dmac *dmac)
{
- struct nouveau_cli *cli = (void *)device->object.client;
+ struct nvif_device *device = &drm->device;
+ struct nvif_object *disp = &drm->display->disp.object;
struct nvif_disp_chan_v0 *args = data;
u8 type = NVIF_MEM_COHERENT;
int ret;
- mutex_init(&dmac->lock);
-
/* Pascal added support for 47-bit physical addresses, but some
* parts of EVO still only accept 40-bit PAs.
*
@@ -250,18 +252,15 @@ nv50_dmac_create(struct nvif_device *device, struct nvif_object *disp,
(nv50_dmac_vram_pushbuf < 0 && device->info.family == NV_DEVICE_INFO_V0_PASCAL))
type |= NVIF_MEM_VRAM;
- ret = nvif_mem_ctor_map(&cli->mmu, "kmsChanPush", type, 0x1000,
- &dmac->_push.mem);
+ ret = nvif_mem_ctor_map(&drm->mmu, "kmsChanPush", type, 0x1000, &dmac->push.mem);
if (ret)
return ret;
- dmac->ptr = dmac->_push.mem.object.map.ptr;
- dmac->_push.wait = nv50_dmac_wait;
- dmac->_push.kick = nv50_dmac_kick;
- dmac->push = &dmac->_push;
- dmac->push->bgn = dmac->_push.mem.object.map.ptr;
- dmac->push->cur = dmac->push->bgn;
- dmac->push->end = dmac->push->bgn;
+ dmac->push.wait = nv50_dmac_wait;
+ dmac->push.kick = nv50_dmac_kick;
+ dmac->push.bgn = dmac->push.mem.object.map.ptr;
+ dmac->push.cur = dmac->push.bgn;
+ dmac->push.end = dmac->push.bgn;
dmac->max = 0x1000/4 - 1;
/* EVO channels are affected by a HW bug where the last 12 DWORDs
@@ -270,7 +269,7 @@ nv50_dmac_create(struct nvif_device *device, struct nvif_object *disp,
if (disp->oclass < GV100_DISP)
dmac->max -= 12;
- args->pushbuf = nvif_handle(&dmac->_push.mem.object);
+ args->pushbuf = nvif_handle(&dmac->push.mem.object);
ret = nv50_chan_create(device, disp, oclass, head, data, size,
&dmac->base);
@@ -558,7 +557,7 @@ nv50_dac_create(struct nouveau_encoder *nv_encoder)
{
struct drm_connector *connector = &nv_encoder->conn->base;
struct nouveau_drm *drm = nouveau_drm(connector->dev);
- struct nvkm_i2c *i2c = nvxx_i2c(&drm->client.device);
+ struct nvkm_i2c *i2c = nvxx_i2c(drm);
struct nvkm_i2c_bus *bus;
struct drm_encoder *encoder;
struct dcb_output *dcbe = nv_encoder->dcb;
@@ -593,8 +592,7 @@ static int
nv50_audio_component_get_eld(struct device *kdev, int port, int dev_id,
bool *enabled, unsigned char *buf, int max_bytes)
{
- struct drm_device *drm_dev = dev_get_drvdata(kdev);
- struct nouveau_drm *drm = nouveau_drm(drm_dev);
+ struct nouveau_drm *drm = dev_get_drvdata(kdev);
struct drm_encoder *encoder;
struct nouveau_encoder *nv_encoder;
struct nouveau_crtc *nv_crtc;
@@ -639,18 +637,17 @@ static int
nv50_audio_component_bind(struct device *kdev, struct device *hda_kdev,
void *data)
{
- struct drm_device *drm_dev = dev_get_drvdata(kdev);
- struct nouveau_drm *drm = nouveau_drm(drm_dev);
+ struct nouveau_drm *drm = dev_get_drvdata(kdev);
struct drm_audio_component *acomp = data;
if (WARN_ON(!device_link_add(hda_kdev, kdev, DL_FLAG_STATELESS)))
return -ENOMEM;
- drm_modeset_lock_all(drm_dev);
+ drm_modeset_lock_all(drm->dev);
acomp->ops = &nv50_audio_component_ops;
acomp->dev = kdev;
drm->audio.component = acomp;
- drm_modeset_unlock_all(drm_dev);
+ drm_modeset_unlock_all(drm->dev);
return 0;
}
@@ -658,15 +655,14 @@ static void
nv50_audio_component_unbind(struct device *kdev, struct device *hda_kdev,
void *data)
{
- struct drm_device *drm_dev = dev_get_drvdata(kdev);
- struct nouveau_drm *drm = nouveau_drm(drm_dev);
+ struct nouveau_drm *drm = dev_get_drvdata(kdev);
struct drm_audio_component *acomp = data;
- drm_modeset_lock_all(drm_dev);
+ drm_modeset_lock_all(drm->dev);
drm->audio.component = NULL;
acomp->ops = NULL;
acomp->dev = NULL;
- drm_modeset_unlock_all(drm_dev);
+ drm_modeset_unlock_all(drm->dev);
}
static const struct component_ops nv50_audio_component_bind_ops = {
@@ -1884,7 +1880,7 @@ nv50_sor_create(struct nouveau_encoder *nv_encoder)
struct drm_connector *connector = &nv_encoder->conn->base;
struct nouveau_connector *nv_connector = nouveau_connector(connector);
struct nouveau_drm *drm = nouveau_drm(connector->dev);
- struct nvkm_i2c *i2c = nvxx_i2c(&drm->client.device);
+ struct nvkm_i2c *i2c = nvxx_i2c(drm);
struct drm_encoder *encoder;
struct dcb_output *dcbe = nv_encoder->dcb;
struct nv50_disp *disp = nv50_disp(connector->dev);
@@ -2051,7 +2047,7 @@ nv50_pior_create(struct nouveau_encoder *nv_encoder)
struct drm_device *dev = connector->dev;
struct nouveau_drm *drm = nouveau_drm(dev);
struct nv50_disp *disp = nv50_disp(dev);
- struct nvkm_i2c *i2c = nvxx_i2c(&drm->client.device);
+ struct nvkm_i2c *i2c = nvxx_i2c(drm);
struct nvkm_i2c_bus *bus = NULL;
struct nvkm_i2c_aux *aux = NULL;
struct i2c_adapter *ddc;
@@ -2652,7 +2648,6 @@ nv50_disp_atomic_state_alloc(struct drm_device *dev)
static const struct drm_mode_config_funcs
nv50_disp_func = {
.fb_create = nouveau_user_framebuffer_create,
- .output_poll_changed = drm_fb_helper_output_poll_changed,
.atomic_check = nv50_disp_atomic_check,
.atomic_commit = nv50_disp_atomic_commit,
.atomic_state_alloc = nv50_disp_atomic_state_alloc,
@@ -2819,7 +2814,7 @@ nv50_display_destroy(struct drm_device *dev)
nouveau_bo_unmap(disp->sync);
if (disp->sync)
nouveau_bo_unpin(disp->sync);
- nouveau_bo_ref(NULL, &disp->sync);
+ nouveau_bo_fini(disp->sync);
nouveau_display(dev)->priv = NULL;
kfree(disp);
@@ -2862,7 +2857,7 @@ nv50_display_create(struct drm_device *dev)
nouveau_bo_unpin(disp->sync);
}
if (ret)
- nouveau_bo_ref(NULL, &disp->sync);
+ nouveau_bo_fini(disp->sync);
}
if (ret)
diff --git a/drivers/gpu/drm/nouveau/dispnv50/disp.h b/drivers/gpu/drm/nouveau/dispnv50/disp.h
index 5508a7cfd492..15f9242b72ac 100644
--- a/drivers/gpu/drm/nouveau/dispnv50/disp.h
+++ b/drivers/gpu/drm/nouveau/dispnv50/disp.h
@@ -62,18 +62,11 @@ struct nv50_chan {
struct nv50_dmac {
struct nv50_chan base;
- struct nvif_push _push;
- struct nvif_push *push;
- u32 *ptr;
+ struct nvif_push push;
struct nvif_object sync;
struct nvif_object vram;
- /* Protects against concurrent pushbuf access to this channel, lock is
- * grabbed by evo_wait (if the pushbuf reservation is successful) and
- * dropped again by evo_kick. */
- struct mutex lock;
-
u32 cur;
u32 put;
u32 max;
@@ -95,7 +88,7 @@ struct nv50_outp_atom {
} set, clr;
};
-int nv50_dmac_create(struct nvif_device *device, struct nvif_object *disp,
+int nv50_dmac_create(struct nouveau_drm *,
const s32 *oclass, u8 head, void *data, u32 size,
s64 syncbuf, struct nv50_dmac *dmac);
void nv50_dmac_destroy(struct nv50_dmac *);
@@ -108,9 +101,6 @@ void nv50_dmac_destroy(struct nv50_dmac *);
*/
struct nouveau_encoder *nv50_real_outp(struct drm_encoder *encoder);
-u32 *evo_wait(struct nv50_dmac *, int nr);
-void evo_kick(u32 *, struct nv50_dmac *);
-
extern const u64 disp50xx_modifiers[];
extern const u64 disp90xx_modifiers[];
extern const u64 wndwc57e_modifiers[];
diff --git a/drivers/gpu/drm/nouveau/dispnv50/head507d.c b/drivers/gpu/drm/nouveau/dispnv50/head507d.c
index 0edd4e520c8e..7fa1e0279d7d 100644
--- a/drivers/gpu/drm/nouveau/dispnv50/head507d.c
+++ b/drivers/gpu/drm/nouveau/dispnv50/head507d.c
@@ -29,7 +29,7 @@
int
head507d_procamp(struct nv50_head *head, struct nv50_head_atom *asyh)
{
- struct nvif_push *push = nv50_disp(head->base.base.dev)->core->chan.push;
+ struct nvif_push *push = &nv50_disp(head->base.base.dev)->core->chan.push;
const int i = head->base.index;
int ret;
@@ -48,7 +48,7 @@ head507d_procamp(struct nv50_head *head, struct nv50_head_atom *asyh)
int
head507d_dither(struct nv50_head *head, struct nv50_head_atom *asyh)
{
- struct nvif_push *push = nv50_disp(head->base.base.dev)->core->chan.push;
+ struct nvif_push *push = &nv50_disp(head->base.base.dev)->core->chan.push;
const int i = head->base.index;
int ret;
@@ -66,7 +66,7 @@ head507d_dither(struct nv50_head *head, struct nv50_head_atom *asyh)
int
head507d_ovly(struct nv50_head *head, struct nv50_head_atom *asyh)
{
- struct nvif_push *push = nv50_disp(head->base.base.dev)->core->chan.push;
+ struct nvif_push *push = &nv50_disp(head->base.base.dev)->core->chan.push;
const int i = head->base.index;
u32 bounds = 0;
int ret;
@@ -94,7 +94,7 @@ head507d_ovly(struct nv50_head *head, struct nv50_head_atom *asyh)
int
head507d_base(struct nv50_head *head, struct nv50_head_atom *asyh)
{
- struct nvif_push *push = nv50_disp(head->base.base.dev)->core->chan.push;
+ struct nvif_push *push = &nv50_disp(head->base.base.dev)->core->chan.push;
const int i = head->base.index;
u32 bounds = 0;
int ret;
@@ -122,7 +122,7 @@ head507d_base(struct nv50_head *head, struct nv50_head_atom *asyh)
static int
head507d_curs_clr(struct nv50_head *head)
{
- struct nvif_push *push = nv50_disp(head->base.base.dev)->core->chan.push;
+ struct nvif_push *push = &nv50_disp(head->base.base.dev)->core->chan.push;
const int i = head->base.index;
int ret;
@@ -139,7 +139,7 @@ head507d_curs_clr(struct nv50_head *head)
static int
head507d_curs_set(struct nv50_head *head, struct nv50_head_atom *asyh)
{
- struct nvif_push *push = nv50_disp(head->base.base.dev)->core->chan.push;
+ struct nvif_push *push = &nv50_disp(head->base.base.dev)->core->chan.push;
const int i = head->base.index;
int ret;
@@ -188,7 +188,7 @@ head507d_curs_layout(struct nv50_head *head, struct nv50_wndw_atom *asyw,
int
head507d_core_clr(struct nv50_head *head)
{
- struct nvif_push *push = nv50_disp(head->base.base.dev)->core->chan.push;
+ struct nvif_push *push = &nv50_disp(head->base.base.dev)->core->chan.push;
const int i = head->base.index;
int ret;
@@ -202,7 +202,7 @@ head507d_core_clr(struct nv50_head *head)
static int
head507d_core_set(struct nv50_head *head, struct nv50_head_atom *asyh)
{
- struct nvif_push *push = nv50_disp(head->base.base.dev)->core->chan.push;
+ struct nvif_push *push = &nv50_disp(head->base.base.dev)->core->chan.push;
const int i = head->base.index;
int ret;
@@ -278,7 +278,7 @@ head507d_core_calc(struct nv50_head *head, struct nv50_head_atom *asyh)
static int
head507d_olut_clr(struct nv50_head *head)
{
- struct nvif_push *push = nv50_disp(head->base.base.dev)->core->chan.push;
+ struct nvif_push *push = &nv50_disp(head->base.base.dev)->core->chan.push;
const int i = head->base.index;
int ret;
@@ -293,7 +293,7 @@ head507d_olut_clr(struct nv50_head *head)
static int
head507d_olut_set(struct nv50_head *head, struct nv50_head_atom *asyh)
{
- struct nvif_push *push = nv50_disp(head->base.base.dev)->core->chan.push;
+ struct nvif_push *push = &nv50_disp(head->base.base.dev)->core->chan.push;
const int i = head->base.index;
int ret;
@@ -345,7 +345,7 @@ head507d_olut(struct nv50_head *head, struct nv50_head_atom *asyh, int size)
int
head507d_mode(struct nv50_head *head, struct nv50_head_atom *asyh)
{
- struct nvif_push *push = nv50_disp(head->base.base.dev)->core->chan.push;
+ struct nvif_push *push = &nv50_disp(head->base.base.dev)->core->chan.push;
struct nv50_head_mode *m = &asyh->mode;
const int i = head->base.index;
int ret;
@@ -400,7 +400,7 @@ head507d_mode(struct nv50_head *head, struct nv50_head_atom *asyh)
int
head507d_view(struct nv50_head *head, struct nv50_head_atom *asyh)
{
- struct nvif_push *push = nv50_disp(head->base.base.dev)->core->chan.push;
+ struct nvif_push *push = &nv50_disp(head->base.base.dev)->core->chan.push;
const int i = head->base.index;
int ret;
diff --git a/drivers/gpu/drm/nouveau/dispnv50/head827d.c b/drivers/gpu/drm/nouveau/dispnv50/head827d.c
index 194d1771c481..1545d576fe9c 100644
--- a/drivers/gpu/drm/nouveau/dispnv50/head827d.c
+++ b/drivers/gpu/drm/nouveau/dispnv50/head827d.c
@@ -29,7 +29,7 @@
static int
head827d_curs_clr(struct nv50_head *head)
{
- struct nvif_push *push = nv50_disp(head->base.base.dev)->core->chan.push;
+ struct nvif_push *push = &nv50_disp(head->base.base.dev)->core->chan.push;
const int i = head->base.index;
int ret;
@@ -48,7 +48,7 @@ head827d_curs_clr(struct nv50_head *head)
static int
head827d_curs_set(struct nv50_head *head, struct nv50_head_atom *asyh)
{
- struct nvif_push *push = nv50_disp(head->base.base.dev)->core->chan.push;
+ struct nvif_push *push = &nv50_disp(head->base.base.dev)->core->chan.push;
const int i = head->base.index;
int ret;
@@ -73,7 +73,7 @@ head827d_curs_set(struct nv50_head *head, struct nv50_head_atom *asyh)
static int
head827d_core_set(struct nv50_head *head, struct nv50_head_atom *asyh)
{
- struct nvif_push *push = nv50_disp(head->base.base.dev)->core->chan.push;
+ struct nvif_push *push = &nv50_disp(head->base.base.dev)->core->chan.push;
const int i = head->base.index;
int ret;
@@ -110,7 +110,7 @@ head827d_core_set(struct nv50_head *head, struct nv50_head_atom *asyh)
static int
head827d_olut_clr(struct nv50_head *head)
{
- struct nvif_push *push = nv50_disp(head->base.base.dev)->core->chan.push;
+ struct nvif_push *push = &nv50_disp(head->base.base.dev)->core->chan.push;
const int i = head->base.index;
int ret;
@@ -127,7 +127,7 @@ head827d_olut_clr(struct nv50_head *head)
static int
head827d_olut_set(struct nv50_head *head, struct nv50_head_atom *asyh)
{
- struct nvif_push *push = nv50_disp(head->base.base.dev)->core->chan.push;
+ struct nvif_push *push = &nv50_disp(head->base.base.dev)->core->chan.push;
const int i = head->base.index;
int ret;
diff --git a/drivers/gpu/drm/nouveau/dispnv50/head907d.c b/drivers/gpu/drm/nouveau/dispnv50/head907d.c
index 18fe4c1e2d6a..6c9e0438e55c 100644
--- a/drivers/gpu/drm/nouveau/dispnv50/head907d.c
+++ b/drivers/gpu/drm/nouveau/dispnv50/head907d.c
@@ -36,7 +36,7 @@
int
head907d_or(struct nv50_head *head, struct nv50_head_atom *asyh)
{
- struct nvif_push *push = nv50_disp(head->base.base.dev)->core->chan.push;
+ struct nvif_push *push = &nv50_disp(head->base.base.dev)->core->chan.push;
const int i = head->base.index;
int ret;
@@ -57,7 +57,7 @@ head907d_or(struct nv50_head *head, struct nv50_head_atom *asyh)
int
head907d_procamp(struct nv50_head *head, struct nv50_head_atom *asyh)
{
- struct nvif_push *push = nv50_disp(head->base.base.dev)->core->chan.push;
+ struct nvif_push *push = &nv50_disp(head->base.base.dev)->core->chan.push;
const int i = head->base.index;
int ret;
@@ -77,7 +77,7 @@ head907d_procamp(struct nv50_head *head, struct nv50_head_atom *asyh)
static int
head907d_dither(struct nv50_head *head, struct nv50_head_atom *asyh)
{
- struct nvif_push *push = nv50_disp(head->base.base.dev)->core->chan.push;
+ struct nvif_push *push = &nv50_disp(head->base.base.dev)->core->chan.push;
const int i = head->base.index;
int ret;
@@ -95,7 +95,7 @@ head907d_dither(struct nv50_head *head, struct nv50_head_atom *asyh)
int
head907d_ovly(struct nv50_head *head, struct nv50_head_atom *asyh)
{
- struct nvif_push *push = nv50_disp(head->base.base.dev)->core->chan.push;
+ struct nvif_push *push = &nv50_disp(head->base.base.dev)->core->chan.push;
const int i = head->base.index;
u32 bounds = 0;
int ret;
@@ -124,7 +124,7 @@ head907d_ovly(struct nv50_head *head, struct nv50_head_atom *asyh)
static int
head907d_base(struct nv50_head *head, struct nv50_head_atom *asyh)
{
- struct nvif_push *push = nv50_disp(head->base.base.dev)->core->chan.push;
+ struct nvif_push *push = &nv50_disp(head->base.base.dev)->core->chan.push;
const int i = head->base.index;
u32 bounds = 0;
int ret;
@@ -152,7 +152,7 @@ head907d_base(struct nv50_head *head, struct nv50_head_atom *asyh)
int
head907d_curs_clr(struct nv50_head *head)
{
- struct nvif_push *push = nv50_disp(head->base.base.dev)->core->chan.push;
+ struct nvif_push *push = &nv50_disp(head->base.base.dev)->core->chan.push;
const int i = head->base.index;
int ret;
@@ -171,7 +171,7 @@ head907d_curs_clr(struct nv50_head *head)
int
head907d_curs_set(struct nv50_head *head, struct nv50_head_atom *asyh)
{
- struct nvif_push *push = nv50_disp(head->base.base.dev)->core->chan.push;
+ struct nvif_push *push = &nv50_disp(head->base.base.dev)->core->chan.push;
const int i = head->base.index;
int ret;
@@ -195,7 +195,7 @@ head907d_curs_set(struct nv50_head *head, struct nv50_head_atom *asyh)
int
head907d_core_clr(struct nv50_head *head)
{
- struct nvif_push *push = nv50_disp(head->base.base.dev)->core->chan.push;
+ struct nvif_push *push = &nv50_disp(head->base.base.dev)->core->chan.push;
const int i = head->base.index;
int ret;
@@ -209,7 +209,7 @@ head907d_core_clr(struct nv50_head *head)
int
head907d_core_set(struct nv50_head *head, struct nv50_head_atom *asyh)
{
- struct nvif_push *push = nv50_disp(head->base.base.dev)->core->chan.push;
+ struct nvif_push *push = &nv50_disp(head->base.base.dev)->core->chan.push;
const int i = head->base.index;
int ret;
@@ -246,7 +246,7 @@ head907d_core_set(struct nv50_head *head, struct nv50_head_atom *asyh)
int
head907d_olut_clr(struct nv50_head *head)
{
- struct nvif_push *push = nv50_disp(head->base.base.dev)->core->chan.push;
+ struct nvif_push *push = &nv50_disp(head->base.base.dev)->core->chan.push;
const int i = head->base.index;
int ret;
@@ -263,7 +263,7 @@ head907d_olut_clr(struct nv50_head *head)
int
head907d_olut_set(struct nv50_head *head, struct nv50_head_atom *asyh)
{
- struct nvif_push *push = nv50_disp(head->base.base.dev)->core->chan.push;
+ struct nvif_push *push = &nv50_disp(head->base.base.dev)->core->chan.push;
const int i = head->base.index;
int ret;
@@ -322,7 +322,7 @@ bool head907d_ilut_check(int size)
int
head907d_mode(struct nv50_head *head, struct nv50_head_atom *asyh)
{
- struct nvif_push *push = nv50_disp(head->base.base.dev)->core->chan.push;
+ struct nvif_push *push = &nv50_disp(head->base.base.dev)->core->chan.push;
struct nv50_head_mode *m = &asyh->mode;
const int i = head->base.index;
int ret;
@@ -378,7 +378,7 @@ head907d_mode(struct nv50_head *head, struct nv50_head_atom *asyh)
int
head907d_view(struct nv50_head *head, struct nv50_head_atom *asyh)
{
- struct nvif_push *push = nv50_disp(head->base.base.dev)->core->chan.push;
+ struct nvif_push *push = &nv50_disp(head->base.base.dev)->core->chan.push;
const int i = head->base.index;
int ret;
diff --git a/drivers/gpu/drm/nouveau/dispnv50/head917d.c b/drivers/gpu/drm/nouveau/dispnv50/head917d.c
index 4ce47b55f72c..2d9aee050510 100644
--- a/drivers/gpu/drm/nouveau/dispnv50/head917d.c
+++ b/drivers/gpu/drm/nouveau/dispnv50/head917d.c
@@ -30,7 +30,7 @@
static int
head917d_dither(struct nv50_head *head, struct nv50_head_atom *asyh)
{
- struct nvif_push *push = nv50_disp(head->base.base.dev)->core->chan.push;
+ struct nvif_push *push = &nv50_disp(head->base.base.dev)->core->chan.push;
const int i = head->base.index;
int ret;
@@ -48,7 +48,7 @@ head917d_dither(struct nv50_head *head, struct nv50_head_atom *asyh)
static int
head917d_base(struct nv50_head *head, struct nv50_head_atom *asyh)
{
- struct nvif_push *push = nv50_disp(head->base.base.dev)->core->chan.push;
+ struct nvif_push *push = &nv50_disp(head->base.base.dev)->core->chan.push;
const int i = head->base.index;
u32 bounds = 0;
int ret;
@@ -77,7 +77,7 @@ head917d_base(struct nv50_head *head, struct nv50_head_atom *asyh)
static int
head917d_curs_set(struct nv50_head *head, struct nv50_head_atom *asyh)
{
- struct nvif_push *push = nv50_disp(head->base.base.dev)->core->chan.push;
+ struct nvif_push *push = &nv50_disp(head->base.base.dev)->core->chan.push;
const int i = head->base.index;
int ret;
diff --git a/drivers/gpu/drm/nouveau/dispnv50/headc37d.c b/drivers/gpu/drm/nouveau/dispnv50/headc37d.c
index a4a3b78ea42c..2bcb3790fc10 100644
--- a/drivers/gpu/drm/nouveau/dispnv50/headc37d.c
+++ b/drivers/gpu/drm/nouveau/dispnv50/headc37d.c
@@ -30,7 +30,7 @@
static int
headc37d_or(struct nv50_head *head, struct nv50_head_atom *asyh)
{
- struct nvif_push *push = nv50_disp(head->base.base.dev)->core->chan.push;
+ struct nvif_push *push = &nv50_disp(head->base.base.dev)->core->chan.push;
const int i = head->base.index;
u8 depth;
int ret;
@@ -64,7 +64,7 @@ headc37d_or(struct nv50_head *head, struct nv50_head_atom *asyh)
static int
headc37d_procamp(struct nv50_head *head, struct nv50_head_atom *asyh)
{
- struct nvif_push *push = nv50_disp(head->base.base.dev)->core->chan.push;
+ struct nvif_push *push = &nv50_disp(head->base.base.dev)->core->chan.push;
const int i = head->base.index;
int ret;
@@ -85,7 +85,7 @@ headc37d_procamp(struct nv50_head *head, struct nv50_head_atom *asyh)
int
headc37d_dither(struct nv50_head *head, struct nv50_head_atom *asyh)
{
- struct nvif_push *push = nv50_disp(head->base.base.dev)->core->chan.push;
+ struct nvif_push *push = &nv50_disp(head->base.base.dev)->core->chan.push;
const int i = head->base.index;
int ret;
@@ -104,7 +104,7 @@ headc37d_dither(struct nv50_head *head, struct nv50_head_atom *asyh)
int
headc37d_curs_clr(struct nv50_head *head)
{
- struct nvif_push *push = nv50_disp(head->base.base.dev)->core->chan.push;
+ struct nvif_push *push = &nv50_disp(head->base.base.dev)->core->chan.push;
const int i = head->base.index;
int ret;
@@ -122,7 +122,7 @@ headc37d_curs_clr(struct nv50_head *head)
int
headc37d_curs_set(struct nv50_head *head, struct nv50_head_atom *asyh)
{
- struct nvif_push *push = nv50_disp(head->base.base.dev)->core->chan.push;
+ struct nvif_push *push = &nv50_disp(head->base.base.dev)->core->chan.push;
const int i = head->base.index;
int ret;
@@ -161,7 +161,7 @@ headc37d_curs_format(struct nv50_head *head, struct nv50_wndw_atom *asyw,
static int
headc37d_olut_clr(struct nv50_head *head)
{
- struct nvif_push *push = nv50_disp(head->base.base.dev)->core->chan.push;
+ struct nvif_push *push = &nv50_disp(head->base.base.dev)->core->chan.push;
const int i = head->base.index;
int ret;
@@ -175,7 +175,7 @@ headc37d_olut_clr(struct nv50_head *head)
static int
headc37d_olut_set(struct nv50_head *head, struct nv50_head_atom *asyh)
{
- struct nvif_push *push = nv50_disp(head->base.base.dev)->core->chan.push;
+ struct nvif_push *push = &nv50_disp(head->base.base.dev)->core->chan.push;
const int i = head->base.index;
int ret;
@@ -209,7 +209,7 @@ headc37d_olut(struct nv50_head *head, struct nv50_head_atom *asyh, int size)
static int
headc37d_mode(struct nv50_head *head, struct nv50_head_atom *asyh)
{
- struct nvif_push *push = nv50_disp(head->base.base.dev)->core->chan.push;
+ struct nvif_push *push = &nv50_disp(head->base.base.dev)->core->chan.push;
struct nv50_head_mode *m = &asyh->mode;
const int i = head->base.index;
int ret;
@@ -254,7 +254,7 @@ headc37d_mode(struct nv50_head *head, struct nv50_head_atom *asyh)
int
headc37d_view(struct nv50_head *head, struct nv50_head_atom *asyh)
{
- struct nvif_push *push = nv50_disp(head->base.base.dev)->core->chan.push;
+ struct nvif_push *push = &nv50_disp(head->base.base.dev)->core->chan.push;
const int i = head->base.index;
int ret;
diff --git a/drivers/gpu/drm/nouveau/dispnv50/headc57d.c b/drivers/gpu/drm/nouveau/dispnv50/headc57d.c
index 53b1248c40ec..fde4087e7691 100644
--- a/drivers/gpu/drm/nouveau/dispnv50/headc57d.c
+++ b/drivers/gpu/drm/nouveau/dispnv50/headc57d.c
@@ -30,7 +30,7 @@
static int
headc57d_display_id(struct nv50_head *head, u32 display_id)
{
- struct nvif_push *push = nv50_disp(head->base.base.dev)->core->chan.push;
+ struct nvif_push *push = &nv50_disp(head->base.base.dev)->core->chan.push;
int ret;
if ((ret = PUSH_WAIT(push, 2)))
@@ -43,7 +43,7 @@ headc57d_display_id(struct nv50_head *head, u32 display_id)
static int
headc57d_or(struct nv50_head *head, struct nv50_head_atom *asyh)
{
- struct nvif_push *push = nv50_disp(head->base.base.dev)->core->chan.push;
+ struct nvif_push *push = &nv50_disp(head->base.base.dev)->core->chan.push;
const int i = head->base.index;
u8 depth;
int ret;
@@ -78,7 +78,7 @@ headc57d_or(struct nv50_head *head, struct nv50_head_atom *asyh)
static int
headc57d_procamp(struct nv50_head *head, struct nv50_head_atom *asyh)
{
- struct nvif_push *push = nv50_disp(head->base.base.dev)->core->chan.push;
+ struct nvif_push *push = &nv50_disp(head->base.base.dev)->core->chan.push;
const int i = head->base.index;
int ret;
@@ -96,7 +96,7 @@ headc57d_procamp(struct nv50_head *head, struct nv50_head_atom *asyh)
static int
headc57d_olut_clr(struct nv50_head *head)
{
- struct nvif_push *push = nv50_disp(head->base.base.dev)->core->chan.push;
+ struct nvif_push *push = &nv50_disp(head->base.base.dev)->core->chan.push;
const int i = head->base.index;
int ret;
@@ -110,7 +110,7 @@ headc57d_olut_clr(struct nv50_head *head)
static int
headc57d_olut_set(struct nv50_head *head, struct nv50_head_atom *asyh)
{
- struct nvif_push *push = nv50_disp(head->base.base.dev)->core->chan.push;
+ struct nvif_push *push = &nv50_disp(head->base.base.dev)->core->chan.push;
const int i = head->base.index;
int ret;
@@ -201,7 +201,7 @@ headc57d_olut(struct nv50_head *head, struct nv50_head_atom *asyh, int size)
static int
headc57d_mode(struct nv50_head *head, struct nv50_head_atom *asyh)
{
- struct nvif_push *push = nv50_disp(head->base.base.dev)->core->chan.push;
+ struct nvif_push *push = &nv50_disp(head->base.base.dev)->core->chan.push;
struct nv50_head_mode *m = &asyh->mode;
const int i = head->base.index;
int ret;
diff --git a/drivers/gpu/drm/nouveau/dispnv50/ovly507e.c b/drivers/gpu/drm/nouveau/dispnv50/ovly507e.c
index 797c1e4e0eaa..654e506f8431 100644
--- a/drivers/gpu/drm/nouveau/dispnv50/ovly507e.c
+++ b/drivers/gpu/drm/nouveau/dispnv50/ovly507e.c
@@ -33,7 +33,7 @@
int
ovly507e_scale_set(struct nv50_wndw *wndw, struct nv50_wndw_atom *asyw)
{
- struct nvif_push *push = wndw->wndw.push;
+ struct nvif_push *push = &wndw->wndw.push;
int ret;
if ((ret = PUSH_WAIT(push, 4)))
@@ -55,7 +55,7 @@ ovly507e_scale_set(struct nv50_wndw *wndw, struct nv50_wndw_atom *asyw)
static int
ovly507e_image_set(struct nv50_wndw *wndw, struct nv50_wndw_atom *asyw)
{
- struct nvif_push *push = wndw->wndw.push;
+ struct nvif_push *push = &wndw->wndw.push;
int ret;
if ((ret = PUSH_WAIT(push, 12)))
@@ -159,7 +159,7 @@ ovly507e_new_(const struct nv50_wndw_func *func, const u32 *format,
if (*pwndw = wndw, ret)
return ret;
- ret = nv50_dmac_create(&drm->client.device, &disp->disp->object,
+ ret = nv50_dmac_create(drm,
&oclass, 0, &args, sizeof(args),
disp->sync->offset, &wndw->wndw);
if (ret) {
diff --git a/drivers/gpu/drm/nouveau/dispnv50/ovly827e.c b/drivers/gpu/drm/nouveau/dispnv50/ovly827e.c
index 02dc02d9260f..a5ae22ed663d 100644
--- a/drivers/gpu/drm/nouveau/dispnv50/ovly827e.c
+++ b/drivers/gpu/drm/nouveau/dispnv50/ovly827e.c
@@ -32,7 +32,7 @@
static int
ovly827e_image_set(struct nv50_wndw *wndw, struct nv50_wndw_atom *asyw)
{
- struct nvif_push *push = wndw->wndw.push;
+ struct nvif_push *push = &wndw->wndw.push;
int ret;
if ((ret = PUSH_WAIT(push, 12)))
diff --git a/drivers/gpu/drm/nouveau/dispnv50/ovly907e.c b/drivers/gpu/drm/nouveau/dispnv50/ovly907e.c
index 645130d18a99..8cf0e18fa596 100644
--- a/drivers/gpu/drm/nouveau/dispnv50/ovly907e.c
+++ b/drivers/gpu/drm/nouveau/dispnv50/ovly907e.c
@@ -29,7 +29,7 @@
static int
ovly907e_image_set(struct nv50_wndw *wndw, struct nv50_wndw_atom *asyw)
{
- struct nvif_push *push = wndw->wndw.push;
+ struct nvif_push *push = &wndw->wndw.push;
int ret;
if ((ret = PUSH_WAIT(push, 12)))
diff --git a/drivers/gpu/drm/nouveau/dispnv50/pior507d.c b/drivers/gpu/drm/nouveau/dispnv50/pior507d.c
index 17d230256bdd..79507d169778 100644
--- a/drivers/gpu/drm/nouveau/dispnv50/pior507d.c
+++ b/drivers/gpu/drm/nouveau/dispnv50/pior507d.c
@@ -30,7 +30,7 @@ static int
pior507d_ctrl(struct nv50_core *core, int or, u32 ctrl,
struct nv50_head_atom *asyh)
{
- struct nvif_push *push = core->chan.push;
+ struct nvif_push *push = &core->chan.push;
int ret;
if (asyh) {
diff --git a/drivers/gpu/drm/nouveau/dispnv50/sor507d.c b/drivers/gpu/drm/nouveau/dispnv50/sor507d.c
index ca73d7710885..08cc9845322e 100644
--- a/drivers/gpu/drm/nouveau/dispnv50/sor507d.c
+++ b/drivers/gpu/drm/nouveau/dispnv50/sor507d.c
@@ -30,7 +30,7 @@ static int
sor507d_ctrl(struct nv50_core *core, int or, u32 ctrl,
struct nv50_head_atom *asyh)
{
- struct nvif_push *push = core->chan.push;
+ struct nvif_push *push = &core->chan.push;
int ret;
if (asyh) {
diff --git a/drivers/gpu/drm/nouveau/dispnv50/sor907d.c b/drivers/gpu/drm/nouveau/dispnv50/sor907d.c
index c86cd8fa61d6..23957cc8f326 100644
--- a/drivers/gpu/drm/nouveau/dispnv50/sor907d.c
+++ b/drivers/gpu/drm/nouveau/dispnv50/sor907d.c
@@ -32,7 +32,7 @@ static int
sor907d_ctrl(struct nv50_core *core, int or, u32 ctrl,
struct nv50_head_atom *asyh)
{
- struct nvif_push *push = core->chan.push;
+ struct nvif_push *push = &core->chan.push;
int ret;
if ((ret = PUSH_WAIT(push, 2)))
diff --git a/drivers/gpu/drm/nouveau/dispnv50/sorc37d.c b/drivers/gpu/drm/nouveau/dispnv50/sorc37d.c
index 9eaef34816da..da05d4614e00 100644
--- a/drivers/gpu/drm/nouveau/dispnv50/sorc37d.c
+++ b/drivers/gpu/drm/nouveau/dispnv50/sorc37d.c
@@ -29,7 +29,7 @@ static int
sorc37d_ctrl(struct nv50_core *core, int or, u32 ctrl,
struct nv50_head_atom *asyh)
{
- struct nvif_push *push = core->chan.push;
+ struct nvif_push *push = &core->chan.push;
int ret;
if ((ret = PUSH_WAIT(push, 2)))
diff --git a/drivers/gpu/drm/nouveau/dispnv50/wimmc37b.c b/drivers/gpu/drm/nouveau/dispnv50/wimmc37b.c
index ee76b091d4ef..7985da61aaac 100644
--- a/drivers/gpu/drm/nouveau/dispnv50/wimmc37b.c
+++ b/drivers/gpu/drm/nouveau/dispnv50/wimmc37b.c
@@ -31,7 +31,7 @@
static int
wimmc37b_update(struct nv50_wndw *wndw, u32 *interlock)
{
- struct nvif_push *push = wndw->wimm.push;
+ struct nvif_push *push = &wndw->wimm.push;
int ret;
if ((ret = PUSH_WAIT(push, 2)))
@@ -46,7 +46,7 @@ wimmc37b_update(struct nv50_wndw *wndw, u32 *interlock)
static int
wimmc37b_point(struct nv50_wndw *wndw, struct nv50_wndw_atom *asyw)
{
- struct nvif_push *push = wndw->wimm.push;
+ struct nvif_push *push = &wndw->wimm.push;
int ret;
if ((ret = PUSH_WAIT(push, 2)))
@@ -71,10 +71,9 @@ wimmc37b_init_(const struct nv50_wimm_func *func, struct nouveau_drm *drm,
struct nvif_disp_chan_v0 args = {
.id = wndw->id,
};
- struct nv50_disp *disp = nv50_disp(drm->dev);
int ret;
- ret = nv50_dmac_create(&drm->client.device, &disp->disp->object,
+ ret = nv50_dmac_create(drm,
&oclass, 0, &args, sizeof(args), -1,
&wndw->wimm);
if (ret) {
diff --git a/drivers/gpu/drm/nouveau/dispnv50/wndwc37e.c b/drivers/gpu/drm/nouveau/dispnv50/wndwc37e.c
index b3deea5aca58..50a7b97d37a2 100644
--- a/drivers/gpu/drm/nouveau/dispnv50/wndwc37e.c
+++ b/drivers/gpu/drm/nouveau/dispnv50/wndwc37e.c
@@ -39,7 +39,7 @@ wndwc37e_csc_clr(struct nv50_wndw *wndw)
static int
wndwc37e_csc_set(struct nv50_wndw *wndw, struct nv50_wndw_atom *asyw)
{
- struct nvif_push *push = wndw->wndw.push;
+ struct nvif_push *push = &wndw->wndw.push;
int ret;
if ((ret = PUSH_WAIT(push, 13)))
@@ -52,7 +52,7 @@ wndwc37e_csc_set(struct nv50_wndw *wndw, struct nv50_wndw_atom *asyw)
static int
wndwc37e_ilut_clr(struct nv50_wndw *wndw)
{
- struct nvif_push *push = wndw->wndw.push;
+ struct nvif_push *push = &wndw->wndw.push;
int ret;
if ((ret = PUSH_WAIT(push, 2)))
@@ -65,7 +65,7 @@ wndwc37e_ilut_clr(struct nv50_wndw *wndw)
static int
wndwc37e_ilut_set(struct nv50_wndw *wndw, struct nv50_wndw_atom *asyw)
{
- struct nvif_push *push = wndw->wndw.push;
+ struct nvif_push *push = &wndw->wndw.push;
int ret;
if ((ret = PUSH_WAIT(push, 4)))
@@ -94,7 +94,7 @@ wndwc37e_ilut(struct nv50_wndw *wndw, struct nv50_wndw_atom *asyw, int size)
int
wndwc37e_blend_set(struct nv50_wndw *wndw, struct nv50_wndw_atom *asyw)
{
- struct nvif_push *push = wndw->wndw.push;
+ struct nvif_push *push = &wndw->wndw.push;
int ret;
if ((ret = PUSH_WAIT(push, 8)))
@@ -139,7 +139,7 @@ wndwc37e_blend_set(struct nv50_wndw *wndw, struct nv50_wndw_atom *asyw)
int
wndwc37e_image_clr(struct nv50_wndw *wndw)
{
- struct nvif_push *push = wndw->wndw.push;
+ struct nvif_push *push = &wndw->wndw.push;
int ret;
if ((ret = PUSH_WAIT(push, 4)))
@@ -156,7 +156,7 @@ wndwc37e_image_clr(struct nv50_wndw *wndw)
static int
wndwc37e_image_set(struct nv50_wndw *wndw, struct nv50_wndw_atom *asyw)
{
- struct nvif_push *push = wndw->wndw.push;
+ struct nvif_push *push = &wndw->wndw.push;
int ret;
if ((ret = PUSH_WAIT(push, 17)))
@@ -209,7 +209,7 @@ wndwc37e_image_set(struct nv50_wndw *wndw, struct nv50_wndw_atom *asyw)
int
wndwc37e_ntfy_clr(struct nv50_wndw *wndw)
{
- struct nvif_push *push = wndw->wndw.push;
+ struct nvif_push *push = &wndw->wndw.push;
int ret;
if ((ret = PUSH_WAIT(push, 2)))
@@ -222,7 +222,7 @@ wndwc37e_ntfy_clr(struct nv50_wndw *wndw)
int
wndwc37e_ntfy_set(struct nv50_wndw *wndw, struct nv50_wndw_atom *asyw)
{
- struct nvif_push *push = wndw->wndw.push;
+ struct nvif_push *push = &wndw->wndw.push;
int ret;
if ((ret = PUSH_WAIT(push, 3)))
@@ -239,7 +239,7 @@ wndwc37e_ntfy_set(struct nv50_wndw *wndw, struct nv50_wndw_atom *asyw)
int
wndwc37e_sema_clr(struct nv50_wndw *wndw)
{
- struct nvif_push *push = wndw->wndw.push;
+ struct nvif_push *push = &wndw->wndw.push;
int ret;
if ((ret = PUSH_WAIT(push, 2)))
@@ -252,7 +252,7 @@ wndwc37e_sema_clr(struct nv50_wndw *wndw)
int
wndwc37e_sema_set(struct nv50_wndw *wndw, struct nv50_wndw_atom *asyw)
{
- struct nvif_push *push = wndw->wndw.push;
+ struct nvif_push *push = &wndw->wndw.push;
int ret;
if ((ret = PUSH_WAIT(push, 5)))
@@ -268,7 +268,7 @@ wndwc37e_sema_set(struct nv50_wndw *wndw, struct nv50_wndw_atom *asyw)
int
wndwc37e_update(struct nv50_wndw *wndw, u32 *interlock)
{
- struct nvif_push *push = wndw->wndw.push;
+ struct nvif_push *push = &wndw->wndw.push;
int ret;
if ((ret = PUSH_WAIT(push, 5)))
@@ -363,7 +363,7 @@ wndwc37e_new_(const struct nv50_wndw_func *func, struct nouveau_drm *drm,
if (*pwndw = wndw, ret)
return ret;
- ret = nv50_dmac_create(&drm->client.device, &disp->disp->object,
+ ret = nv50_dmac_create(drm,
&oclass, 0, &args, sizeof(args),
disp->sync->offset, &wndw->wndw);
if (ret) {
diff --git a/drivers/gpu/drm/nouveau/dispnv50/wndwc57e.c b/drivers/gpu/drm/nouveau/dispnv50/wndwc57e.c
index 1d214a4b960a..d1ca51aae58c 100644
--- a/drivers/gpu/drm/nouveau/dispnv50/wndwc57e.c
+++ b/drivers/gpu/drm/nouveau/dispnv50/wndwc57e.c
@@ -32,7 +32,7 @@
static int
wndwc57e_image_set(struct nv50_wndw *wndw, struct nv50_wndw_atom *asyw)
{
- struct nvif_push *push = wndw->wndw.push;
+ struct nvif_push *push = &wndw->wndw.push;
int ret;
if ((ret = PUSH_WAIT(push, 17)))
@@ -81,7 +81,7 @@ wndwc57e_image_set(struct nv50_wndw *wndw, struct nv50_wndw_atom *asyw)
int
wndwc57e_csc_clr(struct nv50_wndw *wndw)
{
- struct nvif_push *push = wndw->wndw.push;
+ struct nvif_push *push = &wndw->wndw.push;
const u32 identity[12] = {
0x00010000, 0x00000000, 0x00000000, 0x00000000,
0x00000000, 0x00010000, 0x00000000, 0x00000000,
@@ -99,7 +99,7 @@ wndwc57e_csc_clr(struct nv50_wndw *wndw)
int
wndwc57e_csc_set(struct nv50_wndw *wndw, struct nv50_wndw_atom *asyw)
{
- struct nvif_push *push = wndw->wndw.push;
+ struct nvif_push *push = &wndw->wndw.push;
int ret;
if ((ret = PUSH_WAIT(push, 13)))
@@ -112,7 +112,7 @@ wndwc57e_csc_set(struct nv50_wndw *wndw, struct nv50_wndw_atom *asyw)
int
wndwc57e_ilut_clr(struct nv50_wndw *wndw)
{
- struct nvif_push *push = wndw->wndw.push;
+ struct nvif_push *push = &wndw->wndw.push;
int ret;
if ((ret = PUSH_WAIT(push, 2)))
@@ -125,7 +125,7 @@ wndwc57e_ilut_clr(struct nv50_wndw *wndw)
int
wndwc57e_ilut_set(struct nv50_wndw *wndw, struct nv50_wndw_atom *asyw)
{
- struct nvif_push *push = wndw->wndw.push;
+ struct nvif_push *push = &wndw->wndw.push;
int ret;
if ((ret = PUSH_WAIT(push, 4)))
diff --git a/drivers/gpu/drm/nouveau/dispnv50/wndwc67e.c b/drivers/gpu/drm/nouveau/dispnv50/wndwc67e.c
index 7a370fa1df20..52af293c98f4 100644
--- a/drivers/gpu/drm/nouveau/dispnv50/wndwc67e.c
+++ b/drivers/gpu/drm/nouveau/dispnv50/wndwc67e.c
@@ -29,7 +29,7 @@
static int
wndwc67e_image_set(struct nv50_wndw *wndw, struct nv50_wndw_atom *asyw)
{
- struct nvif_push *push = wndw->wndw.push;
+ struct nvif_push *push = &wndw->wndw.push;
int ret;
if ((ret = PUSH_WAIT(push, 17)))
diff --git a/drivers/gpu/drm/nouveau/include/nvif/cl0080.h b/drivers/gpu/drm/nouveau/include/nvif/cl0080.h
index fa161b74d967..ea937fa7bc55 100644
--- a/drivers/gpu/drm/nouveau/include/nvif/cl0080.h
+++ b/drivers/gpu/drm/nouveau/include/nvif/cl0080.h
@@ -2,13 +2,6 @@
#ifndef __NVIF_CL0080_H__
#define __NVIF_CL0080_H__
-struct nv_device_v0 {
- __u8 version;
- __u8 priv;
- __u8 pad02[6];
- __u64 device; /* device identifier, ~0 for client default */
-};
-
#define NV_DEVICE_V0_INFO 0x00
#define NV_DEVICE_V0_TIME 0x01
diff --git a/drivers/gpu/drm/nouveau/include/nvif/class.h b/drivers/gpu/drm/nouveau/include/nvif/class.h
index e668ab1664f0..824e052dcc25 100644
--- a/drivers/gpu/drm/nouveau/include/nvif/class.h
+++ b/drivers/gpu/drm/nouveau/include/nvif/class.h
@@ -7,9 +7,6 @@
#define NVIF_CLASS_CONTROL /* if0001.h */ -0x00000001
-#define NVIF_CLASS_PERFMON /* if0002.h */ -0x00000002
-#define NVIF_CLASS_PERFDOM /* if0003.h */ -0x00000003
-
#define NVIF_CLASS_SW_NV04 /* if0004.h */ -0x00000004
#define NVIF_CLASS_SW_NV10 /* if0005.h */ -0x00000005
#define NVIF_CLASS_SW_NV50 /* if0005.h */ -0x00000006
diff --git a/drivers/gpu/drm/nouveau/include/nvif/client.h b/drivers/gpu/drm/nouveau/include/nvif/client.h
index 5d9395e651b6..03f1d564eb12 100644
--- a/drivers/gpu/drm/nouveau/include/nvif/client.h
+++ b/drivers/gpu/drm/nouveau/include/nvif/client.h
@@ -7,21 +7,12 @@
struct nvif_client {
struct nvif_object object;
const struct nvif_driver *driver;
- u64 version;
- u8 route;
};
-int nvif_client_ctor(struct nvif_client *parent, const char *name, u64 device,
- struct nvif_client *);
+int nvif_client_ctor(struct nvif_client *parent, const char *name, struct nvif_client *);
void nvif_client_dtor(struct nvif_client *);
-int nvif_client_ioctl(struct nvif_client *, void *, u32);
int nvif_client_suspend(struct nvif_client *);
int nvif_client_resume(struct nvif_client *);
/*XXX*/
-#include <core/client.h>
-#define nvxx_client(a) ({ \
- struct nvif_client *_client = (a); \
- (struct nvkm_client *)_client->object.priv; \
-})
#endif
diff --git a/drivers/gpu/drm/nouveau/include/nvif/device.h b/drivers/gpu/drm/nouveau/include/nvif/device.h
index b0e59800a320..7877a2a79da9 100644
--- a/drivers/gpu/drm/nouveau/include/nvif/device.h
+++ b/drivers/gpu/drm/nouveau/include/nvif/device.h
@@ -18,41 +18,8 @@ struct nvif_device {
struct nvif_user user;
};
-int nvif_device_ctor(struct nvif_object *, const char *name, u32 handle,
- s32 oclass, void *, u32, struct nvif_device *);
+int nvif_device_ctor(struct nvif_client *, const char *name, struct nvif_device *);
void nvif_device_dtor(struct nvif_device *);
+int nvif_device_map(struct nvif_device *);
u64 nvif_device_time(struct nvif_device *);
-
-/*XXX*/
-#include <subdev/bios.h>
-#include <subdev/fb.h>
-#include <subdev/bar.h>
-#include <subdev/gpio.h>
-#include <subdev/clk.h>
-#include <subdev/i2c.h>
-#include <subdev/timer.h>
-#include <subdev/therm.h>
-#include <subdev/pci.h>
-
-#define nvxx_device(a) ({ \
- struct nvif_device *_device = (a); \
- struct { \
- struct nvkm_object object; \
- struct nvkm_device *device; \
- } *_udevice = _device->object.priv; \
- _udevice->device; \
-})
-#define nvxx_bios(a) nvxx_device(a)->bios
-#define nvxx_fb(a) nvxx_device(a)->fb
-#define nvxx_gpio(a) nvxx_device(a)->gpio
-#define nvxx_clk(a) nvxx_device(a)->clk
-#define nvxx_i2c(a) nvxx_device(a)->i2c
-#define nvxx_iccsense(a) nvxx_device(a)->iccsense
-#define nvxx_therm(a) nvxx_device(a)->therm
-#define nvxx_volt(a) nvxx_device(a)->volt
-
-#include <engine/fifo.h>
-#include <engine/gr.h>
-
-#define nvxx_gr(a) nvxx_device(a)->gr
#endif
diff --git a/drivers/gpu/drm/nouveau/include/nvif/driver.h b/drivers/gpu/drm/nouveau/include/nvif/driver.h
index 7a3af05f7f98..7b08ff769039 100644
--- a/drivers/gpu/drm/nouveau/include/nvif/driver.h
+++ b/drivers/gpu/drm/nouveau/include/nvif/driver.h
@@ -8,20 +8,15 @@ struct nvif_driver {
const char *name;
int (*init)(const char *name, u64 device, const char *cfg,
const char *dbg, void **priv);
- void (*fini)(void *priv);
int (*suspend)(void *priv);
int (*resume)(void *priv);
int (*ioctl)(void *priv, void *data, u32 size, void **hack);
void __iomem *(*map)(void *priv, u64 handle, u32 size);
void (*unmap)(void *priv, void __iomem *ptr, u32 size);
- bool keep;
};
int nvif_driver_init(const char *drv, const char *cfg, const char *dbg,
const char *name, u64 device, struct nvif_client *);
extern const struct nvif_driver nvif_driver_nvkm;
-extern const struct nvif_driver nvif_driver_drm;
-extern const struct nvif_driver nvif_driver_lib;
-extern const struct nvif_driver nvif_driver_null;
#endif
diff --git a/drivers/gpu/drm/nouveau/include/nvif/if0000.h b/drivers/gpu/drm/nouveau/include/nvif/if0000.h
index f7b8f8f48760..c06383835337 100644
--- a/drivers/gpu/drm/nouveau/include/nvif/if0000.h
+++ b/drivers/gpu/drm/nouveau/include/nvif/if0000.h
@@ -5,16 +5,6 @@
struct nvif_client_v0 {
__u8 version;
__u8 pad01[7];
- __u64 device;
char name[32];
};
-
-#define NVIF_CLIENT_V0_DEVLIST 0x00
-
-struct nvif_client_devlist_v0 {
- __u8 version;
- __u8 count;
- __u8 pad02[6];
- __u64 device[];
-};
#endif
diff --git a/drivers/gpu/drm/nouveau/include/nvif/if0002.h b/drivers/gpu/drm/nouveau/include/nvif/if0002.h
deleted file mode 100644
index df2915d6a61e..000000000000
--- a/drivers/gpu/drm/nouveau/include/nvif/if0002.h
+++ /dev/null
@@ -1,39 +0,0 @@
-/* SPDX-License-Identifier: MIT */
-#ifndef __NVIF_IF0002_H__
-#define __NVIF_IF0002_H__
-
-#define NVIF_PERFMON_V0_QUERY_DOMAIN 0x00
-#define NVIF_PERFMON_V0_QUERY_SIGNAL 0x01
-#define NVIF_PERFMON_V0_QUERY_SOURCE 0x02
-
-struct nvif_perfmon_query_domain_v0 {
- __u8 version;
- __u8 id;
- __u8 counter_nr;
- __u8 iter;
- __u16 signal_nr;
- __u8 pad05[2];
- char name[64];
-};
-
-struct nvif_perfmon_query_signal_v0 {
- __u8 version;
- __u8 domain;
- __u16 iter;
- __u8 signal;
- __u8 source_nr;
- __u8 pad05[2];
- char name[64];
-};
-
-struct nvif_perfmon_query_source_v0 {
- __u8 version;
- __u8 domain;
- __u8 signal;
- __u8 iter;
- __u8 pad04[4];
- __u32 source;
- __u32 mask;
- char name[64];
-};
-#endif
diff --git a/drivers/gpu/drm/nouveau/include/nvif/if0003.h b/drivers/gpu/drm/nouveau/include/nvif/if0003.h
deleted file mode 100644
index 78467da07c37..000000000000
--- a/drivers/gpu/drm/nouveau/include/nvif/if0003.h
+++ /dev/null
@@ -1,34 +0,0 @@
-/* SPDX-License-Identifier: MIT */
-#ifndef __NVIF_IF0003_H__
-#define __NVIF_IF0003_H__
-
-struct nvif_perfdom_v0 {
- __u8 version;
- __u8 domain;
- __u8 mode;
- __u8 pad03[1];
- struct {
- __u8 signal[4];
- __u64 source[4][8];
- __u16 logic_op;
- } ctr[4];
-};
-
-#define NVIF_PERFDOM_V0_INIT 0x00
-#define NVIF_PERFDOM_V0_SAMPLE 0x01
-#define NVIF_PERFDOM_V0_READ 0x02
-
-struct nvif_perfdom_init {
-};
-
-struct nvif_perfdom_sample {
-};
-
-struct nvif_perfdom_read_v0 {
- __u8 version;
- __u8 pad01[7];
- __u32 ctr[4];
- __u32 clk;
- __u8 pad04[4];
-};
-#endif
diff --git a/drivers/gpu/drm/nouveau/include/nvif/ioctl.h b/drivers/gpu/drm/nouveau/include/nvif/ioctl.h
index 4e047bb1fc07..e825c8a1d9ca 100644
--- a/drivers/gpu/drm/nouveau/include/nvif/ioctl.h
+++ b/drivers/gpu/drm/nouveau/include/nvif/ioctl.h
@@ -2,17 +2,12 @@
#ifndef __NVIF_IOCTL_H__
#define __NVIF_IOCTL_H__
-#define NVIF_VERSION_LATEST 0x0000000000000100ULL
-
struct nvif_ioctl_v0 {
__u8 version;
-#define NVIF_IOCTL_V0_NOP 0x00
#define NVIF_IOCTL_V0_SCLASS 0x01
#define NVIF_IOCTL_V0_NEW 0x02
#define NVIF_IOCTL_V0_DEL 0x03
#define NVIF_IOCTL_V0_MTHD 0x04
-#define NVIF_IOCTL_V0_RD 0x05
-#define NVIF_IOCTL_V0_WR 0x06
#define NVIF_IOCTL_V0_MAP 0x07
#define NVIF_IOCTL_V0_UNMAP 0x08
__u8 type;
@@ -28,10 +23,6 @@ struct nvif_ioctl_v0 {
__u8 data[]; /* ioctl data (below) */
};
-struct nvif_ioctl_nop_v0 {
- __u64 version;
-};
-
struct nvif_ioctl_sclass_v0 {
/* nvif_ioctl ... */
__u8 version;
@@ -67,24 +58,6 @@ struct nvif_ioctl_mthd_v0 {
__u8 data[]; /* method data (class.h) */
};
-struct nvif_ioctl_rd_v0 {
- /* nvif_ioctl ... */
- __u8 version;
- __u8 size;
- __u8 pad02[2];
- __u32 data;
- __u64 addr;
-};
-
-struct nvif_ioctl_wr_v0 {
- /* nvif_ioctl ... */
- __u8 version;
- __u8 size;
- __u8 pad02[2];
- __u32 data;
- __u64 addr;
-};
-
struct nvif_ioctl_map_v0 {
/* nvif_ioctl ... */
__u8 version;
diff --git a/drivers/gpu/drm/nouveau/include/nvif/object.h b/drivers/gpu/drm/nouveau/include/nvif/object.h
index f52399caee82..8d205b6af46a 100644
--- a/drivers/gpu/drm/nouveau/include/nvif/object.h
+++ b/drivers/gpu/drm/nouveau/include/nvif/object.h
@@ -34,8 +34,6 @@ void nvif_object_dtor(struct nvif_object *);
int nvif_object_ioctl(struct nvif_object *, void *, u32, void **);
int nvif_object_sclass_get(struct nvif_object *, struct nvif_sclass **);
void nvif_object_sclass_put(struct nvif_sclass **);
-u32 nvif_object_rd(struct nvif_object *, int, u64);
-void nvif_object_wr(struct nvif_object *, int, u64, u32);
int nvif_object_mthd(struct nvif_object *, u32, void *, u32);
int nvif_object_map_handle(struct nvif_object *, void *, u32,
u64 *handle, u64 *length);
@@ -47,20 +45,11 @@ void nvif_object_unmap(struct nvif_object *);
#define nvif_object(a) (a)->object
#define nvif_rd(a,f,b,c) ({ \
- struct nvif_object *_object = (a); \
- u32 _data; \
- if (likely(_object->map.ptr)) \
- _data = f((u8 __iomem *)_object->map.ptr + (c)); \
- else \
- _data = nvif_object_rd(_object, (b), (c)); \
+ u32 _data = f((u8 __iomem *)(a)->map.ptr + (c)); \
_data; \
})
#define nvif_wr(a,f,b,c,d) ({ \
- struct nvif_object *_object = (a); \
- if (likely(_object->map.ptr)) \
- f((d), (u8 __iomem *)_object->map.ptr + (c)); \
- else \
- nvif_object_wr(_object, (b), (c), (d)); \
+ f((d), (u8 __iomem *)(a)->map.ptr + (c)); \
})
#define nvif_rd08(a,b) ({ ((u8)nvif_rd((a), ioread8, 1, (b))); })
#define nvif_rd16(a,b) ({ ((u16)nvif_rd((a), ioread16_native, 2, (b))); })
@@ -69,7 +58,7 @@ void nvif_object_unmap(struct nvif_object *);
#define nvif_wr16(a,b,c) nvif_wr((a), iowrite16_native, 2, (b), (u16)(c))
#define nvif_wr32(a,b,c) nvif_wr((a), iowrite32_native, 4, (b), (u32)(c))
#define nvif_mask(a,b,c,d) ({ \
- struct nvif_object *__object = (a); \
+ typeof(a) __object = (a); \
u32 _addr = (b), _data = nvif_rd32(__object, _addr); \
nvif_wr32(__object, _addr, (_data & ~(c)) | (d)); \
_data; \
@@ -134,11 +123,4 @@ struct nvif_mclass {
#define NVIF_MR32(p,A...) DRF_MR(NVIF_RD32_, NVIF_WR32_, u32, (p), 0, ##A)
#define NVIF_MV32(p,A...) DRF_MV(NVIF_RD32_, NVIF_WR32_, u32, (p), 0, ##A)
#define NVIF_MD32(p,A...) DRF_MD(NVIF_RD32_, NVIF_WR32_, u32, (p), 0, ##A)
-
-/*XXX*/
-#include <core/object.h>
-#define nvxx_object(a) ({ \
- struct nvif_object *_object = (a); \
- (struct nvkm_object *)_object->priv; \
-})
#endif
diff --git a/drivers/gpu/drm/nouveau/include/nvif/os.h b/drivers/gpu/drm/nouveau/include/nvif/os.h
index 429d0106c123..a2eaf3929ac3 100644
--- a/drivers/gpu/drm/nouveau/include/nvif/os.h
+++ b/drivers/gpu/drm/nouveau/include/nvif/os.h
@@ -34,4 +34,23 @@
#include <soc/tegra/fuse.h>
#include <soc/tegra/pmc.h>
+
+#ifdef __BIG_ENDIAN
+#define ioread16_native ioread16be
+#define iowrite16_native iowrite16be
+#define ioread32_native ioread32be
+#define iowrite32_native iowrite32be
+#else
+#define ioread16_native ioread16
+#define iowrite16_native iowrite16
+#define ioread32_native ioread32
+#define iowrite32_native iowrite32
+#endif
+
+#define iowrite64_native(v,p) do { \
+ u32 __iomem *_p = (u32 __iomem *)(p); \
+ u64 _v = (v); \
+ iowrite32_native(lower_32_bits(_v), &_p[0]); \
+ iowrite32_native(upper_32_bits(_v), &_p[1]); \
+} while(0)
#endif
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/core/client.h b/drivers/gpu/drm/nouveau/include/nvkm/core/client.h
index 932c9fd0b2d8..15f27fdd877a 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/core/client.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/core/client.h
@@ -22,7 +22,6 @@ struct nvkm_client {
int nvkm_client_new(const char *name, u64 device, const char *cfg, const char *dbg,
int (*)(u64, void *, u32), struct nvkm_client **);
-struct nvkm_client *nvkm_client_search(struct nvkm_client *, u64 handle);
/* logging for client-facing objects */
#define nvif_printk(o,l,p,f,a...) do { \
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/core/device.h b/drivers/gpu/drm/nouveau/include/nvkm/core/device.h
index f057d348221e..46afb877a296 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/core/device.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/core/device.h
@@ -109,7 +109,6 @@ struct nvkm_device_chip {
};
struct nvkm_device *nvkm_device_find(u64 name);
-int nvkm_device_list(u64 *name, int size);
/* privileged register interface accessor macros */
#define nvkm_rd08(d,a) ioread8((d)->pri + (a))
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/core/layout.h b/drivers/gpu/drm/nouveau/include/nvkm/core/layout.h
index 30c17db483cb..9d2a1abf64f9 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/core/layout.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/core/layout.h
@@ -46,7 +46,6 @@ NVKM_LAYOUT_INST(NVKM_ENGINE_NVDEC , struct nvkm_nvdec , nvdec, 8)
NVKM_LAYOUT_INST(NVKM_ENGINE_NVENC , struct nvkm_nvenc , nvenc, 3)
NVKM_LAYOUT_INST(NVKM_ENGINE_NVJPG , struct nvkm_engine , nvjpg, 8)
NVKM_LAYOUT_ONCE(NVKM_ENGINE_OFA , struct nvkm_engine , ofa)
-NVKM_LAYOUT_ONCE(NVKM_ENGINE_PM , struct nvkm_pm , pm)
NVKM_LAYOUT_ONCE(NVKM_ENGINE_SEC , struct nvkm_engine , sec)
NVKM_LAYOUT_ONCE(NVKM_ENGINE_SEC2 , struct nvkm_sec2 , sec2)
NVKM_LAYOUT_ONCE(NVKM_ENGINE_SW , struct nvkm_sw , sw)
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/core/object.h b/drivers/gpu/drm/nouveau/include/nvkm/core/object.h
index ed1f66360782..10107ef3ca49 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/core/object.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/core/object.h
@@ -15,8 +15,6 @@ struct nvkm_object {
struct list_head head;
struct list_head tree;
- u8 route;
- u64 token;
u64 object;
struct rb_node node;
};
@@ -35,12 +33,6 @@ struct nvkm_object_func {
int (*map)(struct nvkm_object *, void *argv, u32 argc,
enum nvkm_object_map *, u64 *addr, u64 *size);
int (*unmap)(struct nvkm_object *);
- int (*rd08)(struct nvkm_object *, u64 addr, u8 *data);
- int (*rd16)(struct nvkm_object *, u64 addr, u16 *data);
- int (*rd32)(struct nvkm_object *, u64 addr, u32 *data);
- int (*wr08)(struct nvkm_object *, u64 addr, u8 data);
- int (*wr16)(struct nvkm_object *, u64 addr, u16 data);
- int (*wr32)(struct nvkm_object *, u64 addr, u32 data);
int (*bind)(struct nvkm_object *, struct nvkm_gpuobj *, int align,
struct nvkm_gpuobj **);
int (*sclass)(struct nvkm_object *, int index, struct nvkm_oclass *);
@@ -63,12 +55,6 @@ int nvkm_object_ntfy(struct nvkm_object *, u32 mthd, struct nvkm_event **);
int nvkm_object_map(struct nvkm_object *, void *argv, u32 argc,
enum nvkm_object_map *, u64 *addr, u64 *size);
int nvkm_object_unmap(struct nvkm_object *);
-int nvkm_object_rd08(struct nvkm_object *, u64 addr, u8 *data);
-int nvkm_object_rd16(struct nvkm_object *, u64 addr, u16 *data);
-int nvkm_object_rd32(struct nvkm_object *, u64 addr, u32 *data);
-int nvkm_object_wr08(struct nvkm_object *, u64 addr, u8 data);
-int nvkm_object_wr16(struct nvkm_object *, u64 addr, u16 data);
-int nvkm_object_wr32(struct nvkm_object *, u64 addr, u32 data);
int nvkm_object_bind(struct nvkm_object *, struct nvkm_gpuobj *, int align,
struct nvkm_gpuobj **);
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/core/oclass.h b/drivers/gpu/drm/nouveau/include/nvkm/core/oclass.h
index 8e1b945d38f3..cad05f0e7948 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/core/oclass.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/core/oclass.h
@@ -21,8 +21,6 @@ struct nvkm_oclass {
const void *priv;
const void *engn;
u32 handle;
- u8 route;
- u64 token;
u64 object;
struct nvkm_client *client;
struct nvkm_object *parent;
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/core/os.h b/drivers/gpu/drm/nouveau/include/nvkm/core/os.h
index 3fd5c007a663..9b05612e6490 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/core/os.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/core/os.h
@@ -3,25 +3,6 @@
#define __NVKM_OS_H__
#include <nvif/os.h>
-#ifdef __BIG_ENDIAN
-#define ioread16_native ioread16be
-#define iowrite16_native iowrite16be
-#define ioread32_native ioread32be
-#define iowrite32_native iowrite32be
-#else
-#define ioread16_native ioread16
-#define iowrite16_native iowrite16
-#define ioread32_native ioread32
-#define iowrite32_native iowrite32
-#endif
-
-#define iowrite64_native(v,p) do { \
- u32 __iomem *_p = (u32 __iomem *)(p); \
- u64 _v = (v); \
- iowrite32_native(lower_32_bits(_v), &_p[0]); \
- iowrite32_native(upper_32_bits(_v), &_p[1]); \
-} while(0)
-
struct nvkm_blob {
void *data;
u32 size;
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/core/pci.h b/drivers/gpu/drm/nouveau/include/nvkm/core/pci.h
index b4b5df3e1610..7444c4d59e09 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/core/pci.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/core/pci.h
@@ -10,6 +10,5 @@ struct nvkm_device_pci {
};
int nvkm_device_pci_new(struct pci_dev *, const char *cfg, const char *dbg,
- bool detect, bool mmio, u64 subdev_mask,
struct nvkm_device **);
#endif
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/core/tegra.h b/drivers/gpu/drm/nouveau/include/nvkm/core/tegra.h
index ccee53d4e4ec..22f74fc88cd7 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/core/tegra.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/core/tegra.h
@@ -51,6 +51,5 @@ struct nvkm_device_tegra_func {
int nvkm_device_tegra_new(const struct nvkm_device_tegra_func *,
struct platform_device *,
const char *cfg, const char *dbg,
- bool detect, bool mmio, u64 subdev_mask,
struct nvkm_device **);
#endif
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/engine/pm.h b/drivers/gpu/drm/nouveau/include/nvkm/engine/pm.h
deleted file mode 100644
index af89d46ea360..000000000000
--- a/drivers/gpu/drm/nouveau/include/nvkm/engine/pm.h
+++ /dev/null
@@ -1,29 +0,0 @@
-/* SPDX-License-Identifier: MIT */
-#ifndef __NVKM_PM_H__
-#define __NVKM_PM_H__
-#include <core/engine.h>
-
-struct nvkm_pm {
- const struct nvkm_pm_func *func;
- struct nvkm_engine engine;
-
- struct {
- spinlock_t lock;
- struct nvkm_object *object;
- } client;
-
- struct list_head domains;
- struct list_head sources;
- u32 sequence;
-};
-
-int nv40_pm_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_pm **);
-int nv50_pm_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_pm **);
-int g84_pm_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_pm **);
-int gt200_pm_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_pm **);
-int gt215_pm_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_pm **);
-int gf100_pm_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_pm **);
-int gf108_pm_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_pm **);
-int gf117_pm_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_pm **);
-int gk104_pm_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_pm **);
-#endif
diff --git a/drivers/gpu/drm/nouveau/nouveau_abi16.c b/drivers/gpu/drm/nouveau/nouveau_abi16.c
index d56909071de6..2a0617e5fe2a 100644
--- a/drivers/gpu/drm/nouveau/nouveau_abi16.c
+++ b/drivers/gpu/drm/nouveau/nouveau_abi16.c
@@ -46,23 +46,9 @@ nouveau_abi16(struct drm_file *file_priv)
struct nouveau_abi16 *abi16;
cli->abi16 = abi16 = kzalloc(sizeof(*abi16), GFP_KERNEL);
if (cli->abi16) {
- struct nv_device_v0 args = {
- .device = ~0ULL,
- };
-
+ abi16->cli = cli;
INIT_LIST_HEAD(&abi16->channels);
-
- /* allocate device object targeting client's default
- * device (ie. the one that belongs to the fd it
- * opened)
- */
- if (nvif_device_ctor(&cli->base.object, "abi16Device",
- 0, NV_DEVICE, &args, sizeof(args),
- &abi16->device) == 0)
- return cli->abi16;
-
- kfree(cli->abi16);
- cli->abi16 = NULL;
+ INIT_LIST_HEAD(&abi16->objects);
}
}
return cli->abi16;
@@ -82,11 +68,72 @@ nouveau_abi16_get(struct drm_file *file_priv)
int
nouveau_abi16_put(struct nouveau_abi16 *abi16, int ret)
{
- struct nouveau_cli *cli = (void *)abi16->device.object.client;
+ struct nouveau_cli *cli = abi16->cli;
mutex_unlock(&cli->mutex);
return ret;
}
+/* Tracks objects created via the DRM_NOUVEAU_NVIF ioctl.
+ *
+ * The only two types of object that userspace ever allocated via this
+ * interface are 'device', in order to retrieve basic device info, and
+ * 'engine objects', which instantiate HW classes on a channel.
+ *
+ * The remainder of what used to be available via DRM_NOUVEAU_NVIF has
+ * been removed, but these object types need to be tracked to maintain
+ * compatibility with userspace.
+ */
+struct nouveau_abi16_obj {
+ enum nouveau_abi16_obj_type {
+ DEVICE,
+ ENGOBJ,
+ } type;
+ u64 object;
+
+ struct nvif_object engobj;
+
+ struct list_head head; /* protected by nouveau_abi16.cli.mutex */
+};
+
+static struct nouveau_abi16_obj *
+nouveau_abi16_obj_find(struct nouveau_abi16 *abi16, u64 object)
+{
+ struct nouveau_abi16_obj *obj;
+
+ list_for_each_entry(obj, &abi16->objects, head) {
+ if (obj->object == object)
+ return obj;
+ }
+
+ return NULL;
+}
+
+static void
+nouveau_abi16_obj_del(struct nouveau_abi16_obj *obj)
+{
+ list_del(&obj->head);
+ kfree(obj);
+}
+
+static struct nouveau_abi16_obj *
+nouveau_abi16_obj_new(struct nouveau_abi16 *abi16, enum nouveau_abi16_obj_type type, u64 object)
+{
+ struct nouveau_abi16_obj *obj;
+
+ obj = nouveau_abi16_obj_find(abi16, object);
+ if (obj)
+ return ERR_PTR(-EEXIST);
+
+ obj = kzalloc(sizeof(*obj), GFP_KERNEL);
+ if (!obj)
+ return ERR_PTR(-ENOMEM);
+
+ obj->type = type;
+ obj->object = object;
+ list_add_tail(&obj->head, &abi16->objects);
+ return obj;
+}
+
s32
nouveau_abi16_swclass(struct nouveau_drm *drm)
{
@@ -164,17 +211,20 @@ nouveau_abi16_chan_fini(struct nouveau_abi16 *abi16,
void
nouveau_abi16_fini(struct nouveau_abi16 *abi16)
{
- struct nouveau_cli *cli = (void *)abi16->device.object.client;
+ struct nouveau_cli *cli = abi16->cli;
struct nouveau_abi16_chan *chan, *temp;
+ struct nouveau_abi16_obj *obj, *tmp;
+
+ /* cleanup objects */
+ list_for_each_entry_safe(obj, tmp, &abi16->objects, head) {
+ nouveau_abi16_obj_del(obj);
+ }
/* cleanup channels */
list_for_each_entry_safe(chan, temp, &abi16->channels, head) {
nouveau_abi16_chan_fini(abi16, chan);
}
- /* destroy the device object */
- nvif_device_dtor(&abi16->device);
-
kfree(cli->abi16);
cli->abi16 = NULL;
}
@@ -199,8 +249,8 @@ nouveau_abi16_ioctl_getparam(ABI16_IOCTL_ARGS)
struct nouveau_cli *cli = nouveau_cli(file_priv);
struct nouveau_drm *drm = nouveau_drm(dev);
struct nvif_device *device = &drm->client.device;
- struct nvkm_device *nvkm_device = nvxx_device(&drm->client.device);
- struct nvkm_gr *gr = nvxx_gr(device);
+ struct nvkm_device *nvkm_device = nvxx_device(drm);
+ struct nvkm_gr *gr = nvxx_gr(drm);
struct drm_nouveau_getparam *getparam = data;
struct pci_dev *pdev = to_pci_dev(dev->dev);
@@ -291,7 +341,7 @@ nouveau_abi16_ioctl_channel_alloc(ABI16_IOCTL_ARGS)
struct nouveau_drm *drm = nouveau_drm(dev);
struct nouveau_abi16 *abi16 = nouveau_abi16_get(file_priv);
struct nouveau_abi16_chan *chan;
- struct nvif_device *device;
+ struct nvif_device *device = &cli->device;
u64 engine, runm;
int ret;
@@ -308,7 +358,6 @@ nouveau_abi16_ioctl_channel_alloc(ABI16_IOCTL_ARGS)
*/
__nouveau_cli_disable_uvmm_noinit(cli);
- device = &abi16->device;
engine = NV_DEVICE_HOST_RUNLIST_ENGINES_GR;
/* hack to allow channel engine type specification on kepler */
@@ -356,7 +405,7 @@ nouveau_abi16_ioctl_channel_alloc(ABI16_IOCTL_ARGS)
list_add(&chan->head, &abi16->channels);
/* create channel object and initialise dma and fence management */
- ret = nouveau_channel_new(drm, device, false, runm, init->fb_ctxdma_handle,
+ ret = nouveau_channel_new(cli, false, runm, init->fb_ctxdma_handle,
init->tt_ctxdma_handle, &chan->chan);
if (ret)
goto done;
@@ -458,44 +507,6 @@ nouveau_abi16_chan(struct nouveau_abi16 *abi16, int channel)
}
int
-nouveau_abi16_usif(struct drm_file *file_priv, void *data, u32 size)
-{
- union {
- struct nvif_ioctl_v0 v0;
- } *args = data;
- struct nouveau_abi16_chan *chan;
- struct nouveau_abi16 *abi16;
- int ret = -ENOSYS;
-
- if (!(ret = nvif_unpack(ret, &data, &size, args->v0, 0, 0, true))) {
- switch (args->v0.type) {
- case NVIF_IOCTL_V0_NEW:
- case NVIF_IOCTL_V0_MTHD:
- case NVIF_IOCTL_V0_SCLASS:
- break;
- default:
- return -EACCES;
- }
- } else
- return ret;
-
- if (!(abi16 = nouveau_abi16(file_priv)))
- return -ENOMEM;
-
- if (args->v0.token != ~0ULL) {
- if (!(chan = nouveau_abi16_chan(abi16, args->v0.token)))
- return -EINVAL;
- args->v0.object = nvif_handle(&chan->chan->user);
- args->v0.owner = NVIF_IOCTL_V0_OWNER_ANY;
- return 0;
- }
-
- args->v0.object = nvif_handle(&abi16->device.object);
- args->v0.owner = NVIF_IOCTL_V0_OWNER_ANY;
- return 0;
-}
-
-int
nouveau_abi16_ioctl_channel_free(ABI16_IOCTL_ARGS)
{
struct drm_nouveau_channel_free *req = data;
@@ -519,7 +530,6 @@ nouveau_abi16_ioctl_grobj_alloc(ABI16_IOCTL_ARGS)
struct nouveau_abi16 *abi16 = nouveau_abi16_get(file_priv);
struct nouveau_abi16_chan *chan;
struct nouveau_abi16_ntfy *ntfy;
- struct nvif_client *client;
struct nvif_sclass *sclass;
s32 oclass = 0;
int ret, i;
@@ -529,7 +539,6 @@ nouveau_abi16_ioctl_grobj_alloc(ABI16_IOCTL_ARGS)
if (init->handle == ~0)
return nouveau_abi16_put(abi16, -EINVAL);
- client = abi16->device.object.client;
chan = nouveau_abi16_chan(abi16, init->channel);
if (!chan)
@@ -594,10 +603,8 @@ nouveau_abi16_ioctl_grobj_alloc(ABI16_IOCTL_ARGS)
list_add(&ntfy->head, &chan->notifiers);
- client->route = NVDRM_OBJECT_ABI16;
ret = nvif_object_ctor(&chan->chan->user, "abi16EngObj", init->handle,
oclass, NULL, 0, &ntfy->object);
- client->route = NVDRM_OBJECT_NVIF;
if (ret)
nouveau_abi16_ntfy_fini(chan, ntfy);
@@ -612,18 +619,17 @@ nouveau_abi16_ioctl_notifierobj_alloc(ABI16_IOCTL_ARGS)
struct nouveau_abi16 *abi16 = nouveau_abi16_get(file_priv);
struct nouveau_abi16_chan *chan;
struct nouveau_abi16_ntfy *ntfy;
- struct nvif_device *device = &abi16->device;
- struct nvif_client *client;
+ struct nvif_device *device;
struct nv_dma_v0 args = {};
int ret;
if (unlikely(!abi16))
return -ENOMEM;
+ device = &abi16->cli->device;
/* completely unnecessary for these chipsets... */
if (unlikely(device->info.family >= NV_DEVICE_INFO_V0_FERMI))
return nouveau_abi16_put(abi16, -EINVAL);
- client = abi16->device.object.client;
chan = nouveau_abi16_chan(abi16, info->channel);
if (!chan)
@@ -660,11 +666,9 @@ nouveau_abi16_ioctl_notifierobj_alloc(ABI16_IOCTL_ARGS)
args.limit += chan->ntfy->offset;
}
- client->route = NVDRM_OBJECT_ABI16;
ret = nvif_object_ctor(&chan->chan->user, "abi16Ntfy", info->handle,
NV_DMA_IN_MEMORY, &args, sizeof(args),
&ntfy->object);
- client->route = NVDRM_OBJECT_NVIF;
if (ret)
goto done;
@@ -704,3 +708,183 @@ nouveau_abi16_ioctl_gpuobj_free(ABI16_IOCTL_ARGS)
return nouveau_abi16_put(abi16, ret);
}
+
+static int
+nouveau_abi16_ioctl_mthd(struct nouveau_abi16 *abi16, struct nvif_ioctl_v0 *ioctl, u32 argc)
+{
+ struct nouveau_cli *cli = abi16->cli;
+ struct nvif_ioctl_mthd_v0 *args;
+ struct nouveau_abi16_obj *obj;
+ struct nv_device_info_v0 *info;
+
+ if (ioctl->route || argc < sizeof(*args))
+ return -EINVAL;
+ args = (void *)ioctl->data;
+ argc -= sizeof(*args);
+
+ obj = nouveau_abi16_obj_find(abi16, ioctl->object);
+ if (!obj || obj->type != DEVICE)
+ return -EINVAL;
+
+ if (args->method != NV_DEVICE_V0_INFO ||
+ argc != sizeof(*info))
+ return -EINVAL;
+
+ info = (void *)args->data;
+ if (info->version != 0x00)
+ return -EINVAL;
+
+ info = &cli->device.info;
+ memcpy(args->data, info, sizeof(*info));
+ return 0;
+}
+
+static int
+nouveau_abi16_ioctl_del(struct nouveau_abi16 *abi16, struct nvif_ioctl_v0 *ioctl, u32 argc)
+{
+ struct nouveau_abi16_obj *obj;
+
+ if (ioctl->route || argc)
+ return -EINVAL;
+
+ obj = nouveau_abi16_obj_find(abi16, ioctl->object);
+ if (obj) {
+ if (obj->type == ENGOBJ)
+ nvif_object_dtor(&obj->engobj);
+ nouveau_abi16_obj_del(obj);
+ }
+
+ return 0;
+}
+
+static int
+nouveau_abi16_ioctl_new(struct nouveau_abi16 *abi16, struct nvif_ioctl_v0 *ioctl, u32 argc)
+{
+ struct nvif_ioctl_new_v0 *args;
+ struct nouveau_abi16_chan *chan;
+ struct nouveau_abi16_obj *obj;
+ int ret;
+
+ if (argc < sizeof(*args))
+ return -EINVAL;
+ args = (void *)ioctl->data;
+ argc -= sizeof(*args);
+
+ if (args->version != 0)
+ return -EINVAL;
+
+ if (!ioctl->route) {
+ if (ioctl->object || args->oclass != NV_DEVICE)
+ return -EINVAL;
+
+ obj = nouveau_abi16_obj_new(abi16, DEVICE, args->object);
+ if (IS_ERR(obj))
+ return PTR_ERR(obj);
+
+ return 0;
+ }
+
+ chan = nouveau_abi16_chan(abi16, ioctl->token);
+ if (!chan)
+ return -EINVAL;
+
+ obj = nouveau_abi16_obj_new(abi16, ENGOBJ, args->object);
+ if (IS_ERR(obj))
+ return PTR_ERR(obj);
+
+ ret = nvif_object_ctor(&chan->chan->user, "abi16EngObj", args->handle, args->oclass,
+ NULL, 0, &obj->engobj);
+ if (ret)
+ nouveau_abi16_obj_del(obj);
+
+ return ret;
+}
+
+static int
+nouveau_abi16_ioctl_sclass(struct nouveau_abi16 *abi16, struct nvif_ioctl_v0 *ioctl, u32 argc)
+{
+ struct nvif_ioctl_sclass_v0 *args;
+ struct nouveau_abi16_chan *chan;
+ struct nvif_sclass *sclass;
+ int ret;
+
+ if (!ioctl->route || argc < sizeof(*args))
+ return -EINVAL;
+ args = (void *)ioctl->data;
+ argc -= sizeof(*args);
+
+ if (argc != args->count * sizeof(args->oclass[0]))
+ return -EINVAL;
+
+ chan = nouveau_abi16_chan(abi16, ioctl->token);
+ if (!chan)
+ return -EINVAL;
+
+ ret = nvif_object_sclass_get(&chan->chan->user, &sclass);
+ if (ret < 0)
+ return ret;
+
+ for (int i = 0; i < min_t(u8, args->count, ret); i++) {
+ args->oclass[i].oclass = sclass[i].oclass;
+ args->oclass[i].minver = sclass[i].minver;
+ args->oclass[i].maxver = sclass[i].maxver;
+ }
+ args->count = ret;
+
+ nvif_object_sclass_put(&sclass);
+ return 0;
+}
+
+int
+nouveau_abi16_ioctl(struct drm_file *filp, void __user *user, u32 size)
+{
+ struct nvif_ioctl_v0 *ioctl;
+ struct nouveau_abi16 *abi16;
+ u32 argc = size;
+ int ret;
+
+ if (argc < sizeof(*ioctl))
+ return -EINVAL;
+ argc -= sizeof(*ioctl);
+
+ ioctl = kmalloc(size, GFP_KERNEL);
+ if (!ioctl)
+ return -ENOMEM;
+
+ ret = -EFAULT;
+ if (copy_from_user(ioctl, user, size))
+ goto done_free;
+
+ if (ioctl->version != 0x00 ||
+ (ioctl->route && ioctl->route != 0xff)) {
+ ret = -EINVAL;
+ goto done_free;
+ }
+
+ abi16 = nouveau_abi16_get(filp);
+ if (unlikely(!abi16)) {
+ ret = -ENOMEM;
+ goto done_free;
+ }
+
+ switch (ioctl->type) {
+ case NVIF_IOCTL_V0_SCLASS: ret = nouveau_abi16_ioctl_sclass(abi16, ioctl, argc); break;
+ case NVIF_IOCTL_V0_NEW : ret = nouveau_abi16_ioctl_new (abi16, ioctl, argc); break;
+ case NVIF_IOCTL_V0_DEL : ret = nouveau_abi16_ioctl_del (abi16, ioctl, argc); break;
+ case NVIF_IOCTL_V0_MTHD : ret = nouveau_abi16_ioctl_mthd (abi16, ioctl, argc); break;
+ default:
+ ret = -EINVAL;
+ break;
+ }
+
+ nouveau_abi16_put(abi16, 0);
+
+ if (ret == 0) {
+ if (copy_to_user(user, ioctl, size))
+ ret = -EFAULT;
+ }
+
+done_free:
+ kfree(ioctl);
+ return ret;
+}
diff --git a/drivers/gpu/drm/nouveau/nouveau_abi16.h b/drivers/gpu/drm/nouveau/nouveau_abi16.h
index 661b901d8ecc..af6b4e1cefd2 100644
--- a/drivers/gpu/drm/nouveau/nouveau_abi16.h
+++ b/drivers/gpu/drm/nouveau/nouveau_abi16.h
@@ -30,16 +30,16 @@ struct nouveau_abi16_chan {
};
struct nouveau_abi16 {
- struct nvif_device device;
+ struct nouveau_cli *cli;
struct list_head channels;
- u64 handles;
+ struct list_head objects;
};
struct nouveau_abi16 *nouveau_abi16_get(struct drm_file *);
int nouveau_abi16_put(struct nouveau_abi16 *, int);
void nouveau_abi16_fini(struct nouveau_abi16 *);
s32 nouveau_abi16_swclass(struct nouveau_drm *);
-int nouveau_abi16_usif(struct drm_file *, void *data, u32 size);
+int nouveau_abi16_ioctl(struct drm_file *, void __user *user, u32 size);
#define NOUVEAU_GEM_DOMAIN_VRAM (1 << 1)
#define NOUVEAU_GEM_DOMAIN_GART (1 << 2)
diff --git a/drivers/gpu/drm/nouveau/nouveau_bios.c b/drivers/gpu/drm/nouveau/nouveau_bios.c
index 8c3c1f1e01c5..c8335f5b49db 100644
--- a/drivers/gpu/drm/nouveau/nouveau_bios.c
+++ b/drivers/gpu/drm/nouveau/nouveau_bios.c
@@ -2015,7 +2015,7 @@ uint8_t *nouveau_bios_embedded_edid(struct drm_device *dev)
static bool NVInitVBIOS(struct drm_device *dev)
{
struct nouveau_drm *drm = nouveau_drm(dev);
- struct nvkm_bios *bios = nvxx_bios(&drm->client.device);
+ struct nvkm_bios *bios = nvxx_bios(drm);
struct nvbios *legacy = &drm->vbios;
memset(legacy, 0, sizeof(struct nvbios));
@@ -2086,7 +2086,7 @@ nouveau_bios_init(struct drm_device *dev)
/* only relevant for PCI devices */
if (!dev_is_pci(dev->dev) ||
- nvkm_gsp_rm(nvxx_device(&drm->client.device)->gsp))
+ nvkm_gsp_rm(nvxx_device(drm)->gsp))
return 0;
if (!NVInitVBIOS(dev))
diff --git a/drivers/gpu/drm/nouveau/nouveau_bios.h b/drivers/gpu/drm/nouveau/nouveau_bios.h
index 18eb061ccafb..62b5f5889041 100644
--- a/drivers/gpu/drm/nouveau/nouveau_bios.h
+++ b/drivers/gpu/drm/nouveau/nouveau_bios.h
@@ -48,6 +48,7 @@ struct bit_entry {
int bit_table(struct drm_device *, u8 id, struct bit_entry *);
+#include <subdev/bios.h>
#include <subdev/bios/dcb.h>
#include <subdev/bios/conn.h>
diff --git a/drivers/gpu/drm/nouveau/nouveau_bo.c b/drivers/gpu/drm/nouveau/nouveau_bo.c
index 70fb003a6666..db961eade225 100644
--- a/drivers/gpu/drm/nouveau/nouveau_bo.c
+++ b/drivers/gpu/drm/nouveau/nouveau_bo.c
@@ -58,7 +58,7 @@ nv10_bo_update_tile_region(struct drm_device *dev, struct nouveau_drm_tile *reg,
{
struct nouveau_drm *drm = nouveau_drm(dev);
int i = reg - drm->tile.reg;
- struct nvkm_fb *fb = nvxx_fb(&drm->client.device);
+ struct nvkm_fb *fb = nvxx_fb(drm);
struct nvkm_fb_tile *tile = &fb->tile.region[i];
nouveau_fence_unref(&reg->fence);
@@ -109,7 +109,7 @@ nv10_bo_set_tiling(struct drm_device *dev, u32 addr,
u32 size, u32 pitch, u32 zeta)
{
struct nouveau_drm *drm = nouveau_drm(dev);
- struct nvkm_fb *fb = nvxx_fb(&drm->client.device);
+ struct nvkm_fb *fb = nvxx_fb(drm);
struct nouveau_drm_tile *tile, *found = NULL;
int i;
@@ -859,7 +859,7 @@ nouveau_bo_move_m2mf(struct ttm_buffer_object *bo, int evict,
{
struct nouveau_drm *drm = nouveau_bdev(bo->bdev);
struct nouveau_channel *chan = drm->ttm.chan;
- struct nouveau_cli *cli = (void *)chan->user.client;
+ struct nouveau_cli *cli = chan->cli;
struct nouveau_fence *fence;
int ret;
@@ -1171,7 +1171,7 @@ static int
nouveau_ttm_io_mem_reserve(struct ttm_device *bdev, struct ttm_resource *reg)
{
struct nouveau_drm *drm = nouveau_bdev(bdev);
- struct nvkm_device *device = nvxx_device(&drm->client.device);
+ struct nvkm_device *device = nvxx_device(drm);
struct nouveau_mem *mem = nouveau_mem(reg);
struct nvif_mmu *mmu = &drm->client.mmu;
int ret;
@@ -1291,7 +1291,7 @@ vm_fault_t nouveau_ttm_fault_reserve_notify(struct ttm_buffer_object *bo)
{
struct nouveau_drm *drm = nouveau_bdev(bo->bdev);
struct nouveau_bo *nvbo = nouveau_bo(bo);
- struct nvkm_device *device = nvxx_device(&drm->client.device);
+ struct nvkm_device *device = nvxx_device(drm);
u32 mappable = device->func->resource_size(device, 1) >> PAGE_SHIFT;
int i, ret;
diff --git a/drivers/gpu/drm/nouveau/nouveau_bo.h b/drivers/gpu/drm/nouveau/nouveau_bo.h
index 4e891752c255..596a63a50a20 100644
--- a/drivers/gpu/drm/nouveau/nouveau_bo.h
+++ b/drivers/gpu/drm/nouveau/nouveau_bo.h
@@ -53,25 +53,10 @@ nouveau_bo(struct ttm_buffer_object *bo)
return container_of(bo, struct nouveau_bo, bo);
}
-static inline int
-nouveau_bo_ref(struct nouveau_bo *ref, struct nouveau_bo **pnvbo)
+static inline void
+nouveau_bo_fini(struct nouveau_bo *bo)
{
- struct nouveau_bo *prev;
-
- if (!pnvbo)
- return -EINVAL;
- prev = *pnvbo;
-
- if (ref) {
- ttm_bo_get(&ref->bo);
- *pnvbo = nouveau_bo(&ref->bo);
- } else {
- *pnvbo = NULL;
- }
- if (prev)
- ttm_bo_put(&prev->bo);
-
- return 0;
+ ttm_bo_put(&bo->bo);
}
extern struct ttm_device_funcs nouveau_bo_driver;
@@ -115,35 +100,6 @@ nvbo_kmap_obj_iovirtual(struct nouveau_bo *nvbo)
return ioptr;
}
-static inline void
-nouveau_bo_unmap_unpin_unref(struct nouveau_bo **pnvbo)
-{
- if (*pnvbo) {
- nouveau_bo_unmap(*pnvbo);
- nouveau_bo_unpin(*pnvbo);
- nouveau_bo_ref(NULL, pnvbo);
- }
-}
-
-static inline int
-nouveau_bo_new_pin_map(struct nouveau_cli *cli, u64 size, int align, u32 domain,
- struct nouveau_bo **pnvbo)
-{
- int ret = nouveau_bo_new(cli, size, align, domain,
- 0, 0, NULL, NULL, pnvbo);
- if (ret == 0) {
- ret = nouveau_bo_pin(*pnvbo, domain, true);
- if (ret == 0) {
- ret = nouveau_bo_map(*pnvbo);
- if (ret == 0)
- return ret;
- nouveau_bo_unpin(*pnvbo);
- }
- nouveau_bo_ref(NULL, pnvbo);
- }
- return ret;
-}
-
int nv04_bo_move_init(struct nouveau_channel *, u32);
int nv04_bo_move_m2mf(struct nouveau_channel *, struct ttm_buffer_object *,
struct ttm_resource *, struct ttm_resource *);
diff --git a/drivers/gpu/drm/nouveau/nouveau_bo0039.c b/drivers/gpu/drm/nouveau/nouveau_bo0039.c
index e2ce44adaa5c..0b6758e024a1 100644
--- a/drivers/gpu/drm/nouveau/nouveau_bo0039.c
+++ b/drivers/gpu/drm/nouveau/nouveau_bo0039.c
@@ -47,7 +47,7 @@ int
nv04_bo_move_m2mf(struct nouveau_channel *chan, struct ttm_buffer_object *bo,
struct ttm_resource *old_reg, struct ttm_resource *new_reg)
{
- struct nvif_push *push = chan->chan.push;
+ struct nvif_push *push = &chan->chan.push;
u32 src_ctxdma = nouveau_bo_mem_ctxdma(bo, chan, old_reg);
u32 src_offset = old_reg->start << PAGE_SHIFT;
u32 dst_ctxdma = nouveau_bo_mem_ctxdma(bo, chan, new_reg);
@@ -96,7 +96,7 @@ nv04_bo_move_m2mf(struct nouveau_channel *chan, struct ttm_buffer_object *bo,
int
nv04_bo_move_init(struct nouveau_channel *chan, u32 handle)
{
- struct nvif_push *push = chan->chan.push;
+ struct nvif_push *push = &chan->chan.push;
int ret;
ret = PUSH_WAIT(push, 4);
@@ -104,6 +104,6 @@ nv04_bo_move_init(struct nouveau_channel *chan, u32 handle)
return ret;
PUSH_MTHD(push, NV039, SET_OBJECT, handle);
- PUSH_MTHD(push, NV039, SET_CONTEXT_DMA_NOTIFIES, chan->drm->ntfy.handle);
+ PUSH_MTHD(push, NV039, SET_CONTEXT_DMA_NOTIFIES, chan->cli->drm->ntfy.handle);
return 0;
}
diff --git a/drivers/gpu/drm/nouveau/nouveau_bo5039.c b/drivers/gpu/drm/nouveau/nouveau_bo5039.c
index c6cf3629a9f9..c3de17548d97 100644
--- a/drivers/gpu/drm/nouveau/nouveau_bo5039.c
+++ b/drivers/gpu/drm/nouveau/nouveau_bo5039.c
@@ -40,7 +40,7 @@ nv50_bo_move_m2mf(struct nouveau_channel *chan, struct ttm_buffer_object *bo,
struct ttm_resource *old_reg, struct ttm_resource *new_reg)
{
struct nouveau_mem *mem = nouveau_mem(old_reg);
- struct nvif_push *push = chan->chan.push;
+ struct nvif_push *push = &chan->chan.push;
u64 length = new_reg->size;
u64 src_offset = mem->vma[0].addr;
u64 dst_offset = mem->vma[1].addr;
@@ -136,7 +136,7 @@ nv50_bo_move_m2mf(struct nouveau_channel *chan, struct ttm_buffer_object *bo,
int
nv50_bo_move_init(struct nouveau_channel *chan, u32 handle)
{
- struct nvif_push *push = chan->chan.push;
+ struct nvif_push *push = &chan->chan.push;
int ret;
ret = PUSH_WAIT(push, 6);
@@ -144,7 +144,7 @@ nv50_bo_move_init(struct nouveau_channel *chan, u32 handle)
return ret;
PUSH_MTHD(push, NV5039, SET_OBJECT, handle);
- PUSH_MTHD(push, NV5039, SET_CONTEXT_DMA_NOTIFY, chan->drm->ntfy.handle,
+ PUSH_MTHD(push, NV5039, SET_CONTEXT_DMA_NOTIFY, chan->cli->drm->ntfy.handle,
SET_CONTEXT_DMA_BUFFER_IN, chan->vram.handle,
SET_CONTEXT_DMA_BUFFER_OUT, chan->vram.handle);
return 0;
diff --git a/drivers/gpu/drm/nouveau/nouveau_bo74c1.c b/drivers/gpu/drm/nouveau/nouveau_bo74c1.c
index 9b7ba31fae13..e6ef79de2498 100644
--- a/drivers/gpu/drm/nouveau/nouveau_bo74c1.c
+++ b/drivers/gpu/drm/nouveau/nouveau_bo74c1.c
@@ -37,7 +37,7 @@ nv84_bo_move_exec(struct nouveau_channel *chan, struct ttm_buffer_object *bo,
struct ttm_resource *old_reg, struct ttm_resource *new_reg)
{
struct nouveau_mem *mem = nouveau_mem(old_reg);
- struct nvif_push *push = chan->chan.push;
+ struct nvif_push *push = &chan->chan.push;
int ret;
ret = PUSH_WAIT(push, 7);
diff --git a/drivers/gpu/drm/nouveau/nouveau_bo85b5.c b/drivers/gpu/drm/nouveau/nouveau_bo85b5.c
index a15a38a87a95..c4861d073ad4 100644
--- a/drivers/gpu/drm/nouveau/nouveau_bo85b5.c
+++ b/drivers/gpu/drm/nouveau/nouveau_bo85b5.c
@@ -41,7 +41,7 @@ nva3_bo_move_copy(struct nouveau_channel *chan, struct ttm_buffer_object *bo,
struct ttm_resource *old_reg, struct ttm_resource *new_reg)
{
struct nouveau_mem *mem = nouveau_mem(old_reg);
- struct nvif_push *push = chan->chan.push;
+ struct nvif_push *push = &chan->chan.push;
u64 src_offset = mem->vma[0].addr;
u64 dst_offset = mem->vma[1].addr;
u32 page_count = PFN_UP(new_reg->size);
diff --git a/drivers/gpu/drm/nouveau/nouveau_bo9039.c b/drivers/gpu/drm/nouveau/nouveau_bo9039.c
index d2bb2687d401..ad82269c7725 100644
--- a/drivers/gpu/drm/nouveau/nouveau_bo9039.c
+++ b/drivers/gpu/drm/nouveau/nouveau_bo9039.c
@@ -38,7 +38,7 @@ int
nvc0_bo_move_m2mf(struct nouveau_channel *chan, struct ttm_buffer_object *bo,
struct ttm_resource *old_reg, struct ttm_resource *new_reg)
{
- struct nvif_push *push = chan->chan.push;
+ struct nvif_push *push = &chan->chan.push;
struct nouveau_mem *mem = nouveau_mem(old_reg);
u64 src_offset = mem->vma[0].addr;
u64 dst_offset = mem->vma[1].addr;
@@ -86,7 +86,7 @@ nvc0_bo_move_m2mf(struct nouveau_channel *chan, struct ttm_buffer_object *bo,
int
nvc0_bo_move_init(struct nouveau_channel *chan, u32 handle)
{
- struct nvif_push *push = chan->chan.push;
+ struct nvif_push *push = &chan->chan.push;
int ret;
ret = PUSH_WAIT(push, 2);
diff --git a/drivers/gpu/drm/nouveau/nouveau_bo90b5.c b/drivers/gpu/drm/nouveau/nouveau_bo90b5.c
index 4618f4f5ab56..5eaeef9d25e4 100644
--- a/drivers/gpu/drm/nouveau/nouveau_bo90b5.c
+++ b/drivers/gpu/drm/nouveau/nouveau_bo90b5.c
@@ -34,7 +34,7 @@ nvc0_bo_move_copy(struct nouveau_channel *chan, struct ttm_buffer_object *bo,
struct ttm_resource *old_reg, struct ttm_resource *new_reg)
{
struct nouveau_mem *mem = nouveau_mem(old_reg);
- struct nvif_push *push = chan->chan.push;
+ struct nvif_push *push = &chan->chan.push;
u64 src_offset = mem->vma[0].addr;
u64 dst_offset = mem->vma[1].addr;
u32 page_count = PFN_UP(new_reg->size);
diff --git a/drivers/gpu/drm/nouveau/nouveau_boa0b5.c b/drivers/gpu/drm/nouveau/nouveau_boa0b5.c
index 07a5c6302c98..dff2ae0e1e45 100644
--- a/drivers/gpu/drm/nouveau/nouveau_boa0b5.c
+++ b/drivers/gpu/drm/nouveau/nouveau_boa0b5.c
@@ -39,7 +39,7 @@ nve0_bo_move_copy(struct nouveau_channel *chan, struct ttm_buffer_object *bo,
struct ttm_resource *old_reg, struct ttm_resource *new_reg)
{
struct nouveau_mem *mem = nouveau_mem(old_reg);
- struct nvif_push *push = chan->chan.push;
+ struct nvif_push *push = &chan->chan.push;
int ret;
ret = PUSH_WAIT(push, 10);
@@ -78,7 +78,7 @@ nve0_bo_move_copy(struct nouveau_channel *chan, struct ttm_buffer_object *bo,
int
nve0_bo_move_init(struct nouveau_channel *chan, u32 handle)
{
- struct nvif_push *push = chan->chan.push;
+ struct nvif_push *push = &chan->chan.push;
int ret;
ret = PUSH_WAIT(push, 2);
diff --git a/drivers/gpu/drm/nouveau/nouveau_chan.c b/drivers/gpu/drm/nouveau/nouveau_chan.c
index 7c97b2886807..2cb2e5675807 100644
--- a/drivers/gpu/drm/nouveau/nouveau_chan.c
+++ b/drivers/gpu/drm/nouveau/nouveau_chan.c
@@ -52,7 +52,7 @@ static int
nouveau_channel_killed(struct nvif_event *event, void *repv, u32 repc)
{
struct nouveau_channel *chan = container_of(event, typeof(*chan), kill);
- struct nouveau_cli *cli = (void *)chan->user.client;
+ struct nouveau_cli *cli = chan->cli;
NV_PRINTK(warn, cli, "channel %d killed!\n", chan->chid);
@@ -66,7 +66,7 @@ int
nouveau_channel_idle(struct nouveau_channel *chan)
{
if (likely(chan && chan->fence && !atomic_read(&chan->killed))) {
- struct nouveau_cli *cli = (void *)chan->user.client;
+ struct nouveau_cli *cli = chan->cli;
struct nouveau_fence *fence = NULL;
int ret;
@@ -78,7 +78,7 @@ nouveau_channel_idle(struct nouveau_channel *chan)
if (ret) {
NV_PRINTK(err, cli, "failed to idle channel %d [%s]\n",
- chan->chid, nvxx_client(&cli->base)->name);
+ chan->chid, cli->name);
return ret;
}
}
@@ -90,12 +90,10 @@ nouveau_channel_del(struct nouveau_channel **pchan)
{
struct nouveau_channel *chan = *pchan;
if (chan) {
- struct nouveau_cli *cli = (void *)chan->user.client;
-
if (chan->fence)
- nouveau_fence(chan->drm)->context_del(chan);
+ nouveau_fence(chan->cli->drm)->context_del(chan);
- if (cli)
+ if (nvif_object_constructed(&chan->user))
nouveau_svmm_part(chan->vmm->svmm, chan->inst);
nvif_object_dtor(&chan->blit);
@@ -110,7 +108,7 @@ nouveau_channel_del(struct nouveau_channel **pchan)
nouveau_bo_unmap(chan->push.buffer);
if (chan->push.buffer && chan->push.buffer->bo.pin_count)
nouveau_bo_unpin(chan->push.buffer);
- nouveau_bo_ref(NULL, &chan->push.buffer);
+ nouveau_bo_fini(chan->push.buffer);
kfree(chan);
}
*pchan = NULL;
@@ -119,33 +117,34 @@ nouveau_channel_del(struct nouveau_channel **pchan)
static void
nouveau_channel_kick(struct nvif_push *push)
{
- struct nouveau_channel *chan = container_of(push, typeof(*chan), chan._push);
- chan->dma.cur = chan->dma.cur + (chan->chan._push.cur - chan->chan._push.bgn);
+ struct nouveau_channel *chan = container_of(push, typeof(*chan), chan.push);
+ chan->dma.cur = chan->dma.cur + (chan->chan.push.cur - chan->chan.push.bgn);
FIRE_RING(chan);
- chan->chan._push.bgn = chan->chan._push.cur;
+ chan->chan.push.bgn = chan->chan.push.cur;
}
static int
nouveau_channel_wait(struct nvif_push *push, u32 size)
{
- struct nouveau_channel *chan = container_of(push, typeof(*chan), chan._push);
+ struct nouveau_channel *chan = container_of(push, typeof(*chan), chan.push);
int ret;
- chan->dma.cur = chan->dma.cur + (chan->chan._push.cur - chan->chan._push.bgn);
+ chan->dma.cur = chan->dma.cur + (chan->chan.push.cur - chan->chan.push.bgn);
ret = RING_SPACE(chan, size);
if (ret == 0) {
- chan->chan._push.bgn = chan->chan._push.mem.object.map.ptr;
- chan->chan._push.bgn = chan->chan._push.bgn + chan->dma.cur;
- chan->chan._push.cur = chan->chan._push.bgn;
- chan->chan._push.end = chan->chan._push.bgn + size;
+ chan->chan.push.bgn = chan->chan.push.mem.object.map.ptr;
+ chan->chan.push.bgn = chan->chan.push.bgn + chan->dma.cur;
+ chan->chan.push.cur = chan->chan.push.bgn;
+ chan->chan.push.end = chan->chan.push.bgn + size;
}
return ret;
}
static int
-nouveau_channel_prep(struct nouveau_drm *drm, struct nvif_device *device,
+nouveau_channel_prep(struct nouveau_cli *cli,
u32 size, struct nouveau_channel **pchan)
{
- struct nouveau_cli *cli = (void *)device->object.client;
+ struct nouveau_drm *drm = cli->drm;
+ struct nvif_device *device = &cli->device;
struct nv_dma_v0 args = {};
struct nouveau_channel *chan;
u32 target;
@@ -155,8 +154,7 @@ nouveau_channel_prep(struct nouveau_drm *drm, struct nvif_device *device,
if (!chan)
return -ENOMEM;
- chan->device = device;
- chan->drm = drm;
+ chan->cli = cli;
chan->vmm = nouveau_cli_vmm(cli);
atomic_set(&chan->killed, 0);
@@ -178,13 +176,12 @@ nouveau_channel_prep(struct nouveau_drm *drm, struct nvif_device *device,
return ret;
}
- chan->chan._push.mem.object.parent = cli->base.object.parent;
- chan->chan._push.mem.object.client = &cli->base;
- chan->chan._push.mem.object.name = "chanPush";
- chan->chan._push.mem.object.map.ptr = chan->push.buffer->kmap.virtual;
- chan->chan._push.wait = nouveau_channel_wait;
- chan->chan._push.kick = nouveau_channel_kick;
- chan->chan.push = &chan->chan._push;
+ chan->chan.push.mem.object.parent = cli->base.object.parent;
+ chan->chan.push.mem.object.client = &cli->base;
+ chan->chan.push.mem.object.name = "chanPush";
+ chan->chan.push.mem.object.map.ptr = chan->push.buffer->kmap.virtual;
+ chan->chan.push.wait = nouveau_channel_wait;
+ chan->chan.push.kick = nouveau_channel_kick;
/* create dma object covering the *entire* memory space that the
* pushbuf lives in, this is because the GEM code requires that
@@ -218,8 +215,7 @@ nouveau_channel_prep(struct nouveau_drm *drm, struct nvif_device *device,
*/
args.target = NV_DMA_V0_TARGET_PCI;
args.access = NV_DMA_V0_ACCESS_RDWR;
- args.start = nvxx_device(device)->func->
- resource_addr(nvxx_device(device), 1);
+ args.start = nvxx_device(drm)->func->resource_addr(nvxx_device(drm), 1);
args.limit = args.start + device->info.ram_user - 1;
} else {
args.target = NV_DMA_V0_TARGET_VRAM;
@@ -228,12 +224,11 @@ nouveau_channel_prep(struct nouveau_drm *drm, struct nvif_device *device,
args.limit = device->info.ram_user - 1;
}
} else {
- if (chan->drm->agp.bridge) {
+ if (drm->agp.bridge) {
args.target = NV_DMA_V0_TARGET_AGP;
args.access = NV_DMA_V0_ACCESS_RDWR;
- args.start = chan->drm->agp.base;
- args.limit = chan->drm->agp.base +
- chan->drm->agp.size - 1;
+ args.start = drm->agp.base;
+ args.limit = drm->agp.base + drm->agp.size - 1;
} else {
args.target = NV_DMA_V0_TARGET_VM;
args.access = NV_DMA_V0_ACCESS_RDWR;
@@ -254,7 +249,7 @@ nouveau_channel_prep(struct nouveau_drm *drm, struct nvif_device *device,
}
static int
-nouveau_channel_ctor(struct nouveau_drm *drm, struct nvif_device *device, bool priv, u64 runm,
+nouveau_channel_ctor(struct nouveau_cli *cli, bool priv, u64 runm,
struct nouveau_channel **pchan)
{
const struct nvif_mclass hosts[] = {
@@ -279,7 +274,7 @@ nouveau_channel_ctor(struct nouveau_drm *drm, struct nvif_device *device, bool p
struct nvif_chan_v0 chan;
char name[TASK_COMM_LEN+16];
} args;
- struct nouveau_cli *cli = (void *)device->object.client;
+ struct nvif_device *device = &cli->device;
struct nouveau_channel *chan;
const u64 plength = 0x10000;
const u64 ioffset = plength;
@@ -298,7 +293,7 @@ nouveau_channel_ctor(struct nouveau_drm *drm, struct nvif_device *device, bool p
size = ioffset + ilength;
/* allocate dma push buffer */
- ret = nouveau_channel_prep(drm, device, size, &chan);
+ ret = nouveau_channel_prep(cli, size, &chan);
*pchan = chan;
if (ret)
return ret;
@@ -363,8 +358,9 @@ nouveau_channel_ctor(struct nouveau_drm *drm, struct nvif_device *device, bool p
static int
nouveau_channel_init(struct nouveau_channel *chan, u32 vram, u32 gart)
{
- struct nvif_device *device = chan->device;
- struct nouveau_drm *drm = chan->drm;
+ struct nouveau_cli *cli = chan->cli;
+ struct nouveau_drm *drm = cli->drm;
+ struct nvif_device *device = &cli->device;
struct nv_dma_v0 args = {};
int ret, i;
@@ -419,12 +415,11 @@ nouveau_channel_init(struct nouveau_channel *chan, u32 vram, u32 gart)
args.start = 0;
args.limit = chan->vmm->vmm.limit - 1;
} else
- if (chan->drm->agp.bridge) {
+ if (drm->agp.bridge) {
args.target = NV_DMA_V0_TARGET_AGP;
args.access = NV_DMA_V0_ACCESS_RDWR;
- args.start = chan->drm->agp.base;
- args.limit = chan->drm->agp.base +
- chan->drm->agp.size - 1;
+ args.start = drm->agp.base;
+ args.limit = drm->agp.base + drm->agp.size - 1;
} else {
args.target = NV_DMA_V0_TARGET_VM;
args.access = NV_DMA_V0_ACCESS_RDWR;
@@ -465,12 +460,12 @@ nouveau_channel_init(struct nouveau_channel *chan, u32 vram, u32 gart)
chan->dma.cur = chan->dma.put;
chan->dma.free = chan->dma.max - chan->dma.cur;
- ret = PUSH_WAIT(chan->chan.push, NOUVEAU_DMA_SKIPS);
+ ret = PUSH_WAIT(&chan->chan.push, NOUVEAU_DMA_SKIPS);
if (ret)
return ret;
for (i = 0; i < NOUVEAU_DMA_SKIPS; i++)
- PUSH_DATA(chan->chan.push, 0x00000000);
+ PUSH_DATA(&chan->chan.push, 0x00000000);
/* allocate software object class (used for fences on <= nv05) */
if (device->info.family < NV_DEVICE_INFO_V0_CELSIUS) {
@@ -480,26 +475,25 @@ nouveau_channel_init(struct nouveau_channel *chan, u32 vram, u32 gart)
if (ret)
return ret;
- ret = PUSH_WAIT(chan->chan.push, 2);
+ ret = PUSH_WAIT(&chan->chan.push, 2);
if (ret)
return ret;
- PUSH_NVSQ(chan->chan.push, NV_SW, 0x0000, chan->nvsw.handle);
- PUSH_KICK(chan->chan.push);
+ PUSH_NVSQ(&chan->chan.push, NV_SW, 0x0000, chan->nvsw.handle);
+ PUSH_KICK(&chan->chan.push);
}
/* initialise synchronisation */
- return nouveau_fence(chan->drm)->context_new(chan);
+ return nouveau_fence(drm)->context_new(chan);
}
int
-nouveau_channel_new(struct nouveau_drm *drm, struct nvif_device *device,
+nouveau_channel_new(struct nouveau_cli *cli,
bool priv, u64 runm, u32 vram, u32 gart, struct nouveau_channel **pchan)
{
- struct nouveau_cli *cli = (void *)device->object.client;
int ret;
- ret = nouveau_channel_ctor(drm, device, priv, runm, pchan);
+ ret = nouveau_channel_ctor(cli, priv, runm, pchan);
if (ret) {
NV_PRINTK(dbg, cli, "channel create, %d\n", ret);
return ret;
diff --git a/drivers/gpu/drm/nouveau/nouveau_chan.h b/drivers/gpu/drm/nouveau/nouveau_chan.h
index 5de2ef4e98c2..016f668c0bc1 100644
--- a/drivers/gpu/drm/nouveau/nouveau_chan.h
+++ b/drivers/gpu/drm/nouveau/nouveau_chan.h
@@ -8,12 +8,10 @@ struct nvif_device;
struct nouveau_channel {
struct {
- struct nvif_push _push;
- struct nvif_push *push;
+ struct nvif_push push;
} chan;
- struct nvif_device *device;
- struct nouveau_drm *drm;
+ struct nouveau_cli *cli;
struct nouveau_vmm *vmm;
struct nvif_mem mem_userd;
@@ -62,7 +60,7 @@ struct nouveau_channel {
int nouveau_channels_init(struct nouveau_drm *);
void nouveau_channels_fini(struct nouveau_drm *);
-int nouveau_channel_new(struct nouveau_drm *, struct nvif_device *, bool priv, u64 runm,
+int nouveau_channel_new(struct nouveau_cli *, bool priv, u64 runm,
u32 vram, u32 gart, struct nouveau_channel **);
void nouveau_channel_del(struct nouveau_channel **);
int nouveau_channel_idle(struct nouveau_channel *);
diff --git a/drivers/gpu/drm/nouveau/nouveau_display.c b/drivers/gpu/drm/nouveau/nouveau_display.c
index d4725a968827..e2fd561cd23f 100644
--- a/drivers/gpu/drm/nouveau/nouveau_display.c
+++ b/drivers/gpu/drm/nouveau/nouveau_display.c
@@ -391,7 +391,6 @@ nouveau_user_framebuffer_create(struct drm_device *dev,
static const struct drm_mode_config_funcs nouveau_mode_config_funcs = {
.fb_create = nouveau_user_framebuffer_create,
- .output_poll_changed = drm_fb_helper_output_poll_changed,
};
@@ -446,10 +445,8 @@ static struct nouveau_drm_prop_enum_list dither_depth[] = {
} while(0)
void
-nouveau_display_hpd_resume(struct drm_device *dev)
+nouveau_display_hpd_resume(struct nouveau_drm *drm)
{
- struct nouveau_drm *drm = nouveau_drm(dev);
-
if (drm->headless)
return;
diff --git a/drivers/gpu/drm/nouveau/nouveau_display.h b/drivers/gpu/drm/nouveau/nouveau_display.h
index 2ab2ddb1eadf..1f506f8b289c 100644
--- a/drivers/gpu/drm/nouveau/nouveau_display.h
+++ b/drivers/gpu/drm/nouveau/nouveau_display.h
@@ -45,7 +45,7 @@ nouveau_display(struct drm_device *dev)
int nouveau_display_create(struct drm_device *dev);
void nouveau_display_destroy(struct drm_device *dev);
int nouveau_display_init(struct drm_device *dev, bool resume, bool runtime);
-void nouveau_display_hpd_resume(struct drm_device *dev);
+void nouveau_display_hpd_resume(struct nouveau_drm *);
void nouveau_display_fini(struct drm_device *dev, bool suspend, bool runtime);
int nouveau_display_suspend(struct drm_device *dev, bool runtime);
void nouveau_display_resume(struct drm_device *dev, bool runtime);
diff --git a/drivers/gpu/drm/nouveau/nouveau_dma.c b/drivers/gpu/drm/nouveau/nouveau_dma.c
index b01c029f3a90..a1f329ef0641 100644
--- a/drivers/gpu/drm/nouveau/nouveau_dma.c
+++ b/drivers/gpu/drm/nouveau/nouveau_dma.c
@@ -72,7 +72,7 @@ void
nv50_dma_push(struct nouveau_channel *chan, u64 offset, u32 length,
bool no_prefetch)
{
- struct nvif_user *user = &chan->drm->client.device.user;
+ struct nvif_user *user = &chan->cli->drm->client.device.user;
struct nouveau_bo *pb = chan->push.buffer;
int ip = (chan->dma.ib_put * 2) + chan->dma.ib_base;
diff --git a/drivers/gpu/drm/nouveau/nouveau_dmem.c b/drivers/gpu/drm/nouveau/nouveau_dmem.c
index 6fb65b01d778..1f2d649f4b96 100644
--- a/drivers/gpu/drm/nouveau/nouveau_dmem.c
+++ b/drivers/gpu/drm/nouveau/nouveau_dmem.c
@@ -294,7 +294,7 @@ nouveau_dmem_chunk_alloc(struct nouveau_drm *drm, struct page **ppage)
out_bo_unpin:
nouveau_bo_unpin(chunk->bo);
out_bo_free:
- nouveau_bo_ref(NULL, &chunk->bo);
+ nouveau_bo_fini(chunk->bo);
out_release:
release_mem_region(chunk->pagemap.range.start, range_len(&chunk->pagemap.range));
out_free:
@@ -426,7 +426,7 @@ nouveau_dmem_fini(struct nouveau_drm *drm)
list_for_each_entry_safe(chunk, tmp, &drm->dmem->chunks, list) {
nouveau_dmem_evict_chunk(chunk);
nouveau_bo_unpin(chunk->bo);
- nouveau_bo_ref(NULL, &chunk->bo);
+ nouveau_bo_fini(chunk->bo);
WARN_ON(chunk->callocated);
list_del(&chunk->list);
memunmap_pages(&chunk->pagemap);
@@ -443,7 +443,7 @@ nvc0b5_migrate_copy(struct nouveau_drm *drm, u64 npages,
enum nouveau_aper dst_aper, u64 dst_addr,
enum nouveau_aper src_aper, u64 src_addr)
{
- struct nvif_push *push = drm->dmem->migrate.chan->chan.push;
+ struct nvif_push *push = &drm->dmem->migrate.chan->chan.push;
u32 launch_dma = 0;
int ret;
@@ -516,7 +516,7 @@ static int
nvc0b5_migrate_clear(struct nouveau_drm *drm, u32 length,
enum nouveau_aper dst_aper, u64 dst_addr)
{
- struct nvif_push *push = drm->dmem->migrate.chan->chan.push;
+ struct nvif_push *push = &drm->dmem->migrate.chan->chan.push;
u32 launch_dma = 0;
int ret;
diff --git a/drivers/gpu/drm/nouveau/nouveau_drm.c b/drivers/gpu/drm/nouveau/nouveau_drm.c
index e243b42f8582..f6e78dba594f 100644
--- a/drivers/gpu/drm/nouveau/nouveau_drm.c
+++ b/drivers/gpu/drm/nouveau/nouveau_drm.c
@@ -63,7 +63,6 @@
#include "nouveau_abi16.h"
#include "nouveau_fence.h"
#include "nouveau_debugfs.h"
-#include "nouveau_usif.h"
#include "nouveau_connector.h"
#include "nouveau_platform.h"
#include "nouveau_svm.h"
@@ -200,7 +199,6 @@ nouveau_cli_fini(struct nouveau_cli *cli)
flush_work(&cli->work);
WARN_ON(!list_empty(&cli->worker));
- usif_client_fini(cli);
if (cli->sched)
nouveau_sched_destroy(&cli->sched);
if (uvmm)
@@ -208,10 +206,11 @@ nouveau_cli_fini(struct nouveau_cli *cli)
nouveau_vmm_fini(&cli->svm);
nouveau_vmm_fini(&cli->vmm);
nvif_mmu_dtor(&cli->mmu);
+ cli->device.object.map.ptr = NULL;
nvif_device_dtor(&cli->device);
- mutex_lock(&cli->drm->master.lock);
+ mutex_lock(&cli->drm->client_mutex);
nvif_client_dtor(&cli->base);
- mutex_unlock(&cli->drm->master.lock);
+ mutex_unlock(&cli->drm->client_mutex);
}
static int
@@ -226,13 +225,6 @@ nouveau_cli_init(struct nouveau_drm *drm, const char *sname,
{}
};
static const struct nvif_mclass
- mmus[] = {
- { NVIF_CLASS_MMU_GF100, -1 },
- { NVIF_CLASS_MMU_NV50 , -1 },
- { NVIF_CLASS_MMU_NV04 , -1 },
- {}
- };
- static const struct nvif_mclass
vmms[] = {
{ NVIF_CLASS_VMM_GP100, -1 },
{ NVIF_CLASS_VMM_GM200, -1 },
@@ -241,50 +233,33 @@ nouveau_cli_init(struct nouveau_drm *drm, const char *sname,
{ NVIF_CLASS_VMM_NV04 , -1 },
{}
};
- u64 device = nouveau_name(drm->dev);
int ret;
snprintf(cli->name, sizeof(cli->name), "%s", sname);
cli->drm = drm;
mutex_init(&cli->mutex);
- usif_client_init(cli);
INIT_WORK(&cli->work, nouveau_cli_work);
INIT_LIST_HEAD(&cli->worker);
mutex_init(&cli->lock);
- if (cli == &drm->master) {
- ret = nvif_driver_init(NULL, nouveau_config, nouveau_debug,
- cli->name, device, &cli->base);
- } else {
- mutex_lock(&drm->master.lock);
- ret = nvif_client_ctor(&drm->master.base, cli->name, device,
- &cli->base);
- mutex_unlock(&drm->master.lock);
- }
+ mutex_lock(&drm->client_mutex);
+ ret = nvif_client_ctor(&drm->_client, cli->name, &cli->base);
+ mutex_unlock(&drm->client_mutex);
if (ret) {
NV_PRINTK(err, cli, "Client allocation failed: %d\n", ret);
goto done;
}
- ret = nvif_device_ctor(&cli->base.object, "drmDevice", 0, NV_DEVICE,
- &(struct nv_device_v0) {
- .device = ~0,
- .priv = true,
- }, sizeof(struct nv_device_v0),
- &cli->device);
+ ret = nvif_device_ctor(&cli->base, "drmDevice", &cli->device);
if (ret) {
NV_PRINTK(err, cli, "Device allocation failed: %d\n", ret);
goto done;
}
- ret = nvif_mclass(&cli->device.object, mmus);
- if (ret < 0) {
- NV_PRINTK(err, cli, "No supported MMU class\n");
- goto done;
- }
+ cli->device.object.map.ptr = drm->device.object.map.ptr;
- ret = nvif_mmu_ctor(&cli->device.object, "drmMmu", mmus[ret].oclass,
+ ret = nvif_mmu_ctor(&cli->device.object, "drmMmu", drm->mmu.object.oclass,
&cli->mmu);
if (ret) {
NV_PRINTK(err, cli, "MMU allocation failed: %d\n", ret);
@@ -356,7 +331,7 @@ nouveau_accel_ce_init(struct nouveau_drm *drm)
return;
}
- ret = nouveau_channel_new(drm, device, false, runm, NvDmaFB, NvDmaTT, &drm->cechan);
+ ret = nouveau_channel_new(&drm->client, false, runm, NvDmaFB, NvDmaTT, &drm->cechan);
if (ret)
NV_ERROR(drm, "failed to create ce channel, %d\n", ret);
}
@@ -384,7 +359,7 @@ nouveau_accel_gr_init(struct nouveau_drm *drm)
return;
}
- ret = nouveau_channel_new(drm, device, false, runm, NvDmaFB, NvDmaTT, &drm->channel);
+ ret = nouveau_channel_new(&drm->client, false, runm, NvDmaFB, NvDmaTT, &drm->channel);
if (ret) {
NV_ERROR(drm, "failed to create kernel channel, %d\n", ret);
nouveau_accel_gr_fini(drm);
@@ -407,7 +382,8 @@ nouveau_accel_gr_init(struct nouveau_drm *drm)
}
if (ret == 0) {
- struct nvif_push *push = drm->channel->chan.push;
+ struct nvif_push *push = &drm->channel->chan.push;
+
ret = PUSH_WAIT(push, 8);
if (ret == 0) {
if (device->info.chipset >= 0x11) {
@@ -432,8 +408,7 @@ nouveau_accel_gr_init(struct nouveau_drm *drm)
* any GPU where it's possible we'll end up using M2MF for BO moves.
*/
if (device->info.family < NV_DEVICE_INFO_V0_FERMI) {
- ret = nvkm_gpuobj_new(nvxx_device(device), 32, 0, false, NULL,
- &drm->notify);
+ ret = nvkm_gpuobj_new(nvxx_device(drm), 32, 0, false, NULL, &drm->notify);
if (ret) {
NV_ERROR(drm, "failed to allocate notifier, %d\n", ret);
nouveau_accel_gr_fini(drm);
@@ -578,37 +553,70 @@ nouveau_parent = {
.errorf = nouveau_drm_errorf,
};
-static int
-nouveau_drm_device_init(struct drm_device *dev)
+static void
+nouveau_drm_device_fini(struct nouveau_drm *drm)
{
- struct nouveau_drm *drm;
- int ret;
+ struct drm_device *dev = drm->dev;
+ struct nouveau_cli *cli, *temp_cli;
- if (!(drm = kzalloc(sizeof(*drm), GFP_KERNEL)))
- return -ENOMEM;
- dev->dev_private = drm;
- drm->dev = dev;
+ if (nouveau_pmops_runtime()) {
+ pm_runtime_get_sync(dev->dev);
+ pm_runtime_forbid(dev->dev);
+ }
- nvif_parent_ctor(&nouveau_parent, &drm->parent);
- drm->master.base.object.parent = &drm->parent;
+ nouveau_led_fini(dev);
+ nouveau_dmem_fini(drm);
+ nouveau_svm_fini(drm);
+ nouveau_hwmon_fini(dev);
+ nouveau_debugfs_fini(drm);
- drm->sched_wq = alloc_workqueue("nouveau_sched_wq_shared", 0,
- WQ_MAX_ACTIVE);
- if (!drm->sched_wq) {
- ret = -ENOMEM;
- goto fail_alloc;
+ if (dev->mode_config.num_crtc)
+ nouveau_display_fini(dev, false, false);
+ nouveau_display_destroy(dev);
+
+ nouveau_accel_fini(drm);
+ nouveau_bios_takedown(dev);
+
+ nouveau_ttm_fini(drm);
+ nouveau_vga_fini(drm);
+
+ /*
+ * There may be existing clients from as-yet unclosed files. For now,
+ * clean them up here rather than deferring until the file is closed,
+ * but this likely not correct if we want to support hot-unplugging
+ * properly.
+ */
+ mutex_lock(&drm->clients_lock);
+ list_for_each_entry_safe(cli, temp_cli, &drm->clients, head) {
+ list_del(&cli->head);
+ mutex_lock(&cli->mutex);
+ if (cli->abi16)
+ nouveau_abi16_fini(cli->abi16);
+ mutex_unlock(&cli->mutex);
+ nouveau_cli_fini(cli);
+ kfree(cli);
}
+ mutex_unlock(&drm->clients_lock);
- ret = nouveau_cli_init(drm, "DRM-master", &drm->master);
- if (ret)
- goto fail_wq;
+ nouveau_cli_fini(&drm->client);
+ destroy_workqueue(drm->sched_wq);
+ mutex_destroy(&drm->clients_lock);
+}
+
+static int
+nouveau_drm_device_init(struct nouveau_drm *drm)
+{
+ struct drm_device *dev = drm->dev;
+ int ret;
+
+ drm->sched_wq = alloc_workqueue("nouveau_sched_wq_shared", 0,
+ WQ_MAX_ACTIVE);
+ if (!drm->sched_wq)
+ return -ENOMEM;
ret = nouveau_cli_init(drm, "DRM", &drm->client);
if (ret)
- goto fail_master;
-
- nvxx_client(&drm->client.base)->debug =
- nvkm_dbgopt(nouveau_debug, "DRM");
+ goto fail_wq;
INIT_LIST_HEAD(&drm->clients);
mutex_init(&drm->clients_lock);
@@ -658,6 +666,12 @@ nouveau_drm_device_init(struct drm_device *dev)
pm_runtime_put(dev->dev);
}
+ ret = drm_dev_register(drm->dev, 0);
+ if (ret) {
+ nouveau_drm_device_fini(drm);
+ return ret;
+ }
+
return 0;
fail_dispinit:
nouveau_display_destroy(dev);
@@ -669,67 +683,95 @@ fail_bios:
fail_ttm:
nouveau_vga_fini(drm);
nouveau_cli_fini(&drm->client);
-fail_master:
- nouveau_cli_fini(&drm->master);
fail_wq:
destroy_workqueue(drm->sched_wq);
-fail_alloc:
- nvif_parent_dtor(&drm->parent);
- kfree(drm);
return ret;
}
static void
-nouveau_drm_device_fini(struct drm_device *dev)
+nouveau_drm_device_del(struct nouveau_drm *drm)
{
- struct nouveau_cli *cli, *temp_cli;
- struct nouveau_drm *drm = nouveau_drm(dev);
+ if (drm->dev)
+ drm_dev_put(drm->dev);
- if (nouveau_pmops_runtime()) {
- pm_runtime_get_sync(dev->dev);
- pm_runtime_forbid(dev->dev);
+ nvif_mmu_dtor(&drm->mmu);
+ nvif_device_dtor(&drm->device);
+ nvif_client_dtor(&drm->_client);
+ nvif_parent_dtor(&drm->parent);
+
+ mutex_destroy(&drm->client_mutex);
+ kfree(drm);
+}
+
+static struct nouveau_drm *
+nouveau_drm_device_new(const struct drm_driver *drm_driver, struct device *parent,
+ struct nvkm_device *device)
+{
+ static const struct nvif_mclass
+ mmus[] = {
+ { NVIF_CLASS_MMU_GF100, -1 },
+ { NVIF_CLASS_MMU_NV50 , -1 },
+ { NVIF_CLASS_MMU_NV04 , -1 },
+ {}
+ };
+ struct nouveau_drm *drm;
+ int ret;
+
+ drm = kzalloc(sizeof(*drm), GFP_KERNEL);
+ if (!drm)
+ return ERR_PTR(-ENOMEM);
+
+ drm->nvkm = device;
+
+ drm->dev = drm_dev_alloc(drm_driver, parent);
+ if (IS_ERR(drm->dev)) {
+ ret = PTR_ERR(drm->dev);
+ goto done;
}
- nouveau_led_fini(dev);
- nouveau_dmem_fini(drm);
- nouveau_svm_fini(drm);
- nouveau_hwmon_fini(dev);
- nouveau_debugfs_fini(drm);
+ drm->dev->dev_private = drm;
+ dev_set_drvdata(parent, drm);
- if (dev->mode_config.num_crtc)
- nouveau_display_fini(dev, false, false);
- nouveau_display_destroy(dev);
+ nvif_parent_ctor(&nouveau_parent, &drm->parent);
+ mutex_init(&drm->client_mutex);
+ drm->_client.object.parent = &drm->parent;
- nouveau_accel_fini(drm);
- nouveau_bios_takedown(dev);
+ ret = nvif_driver_init(NULL, nouveau_config, nouveau_debug, "drm",
+ nouveau_name(drm->dev), &drm->_client);
+ if (ret)
+ goto done;
- nouveau_ttm_fini(drm);
- nouveau_vga_fini(drm);
+ ret = nvif_device_ctor(&drm->_client, "drmDevice", &drm->device);
+ if (ret) {
+ NV_ERROR(drm, "Device allocation failed: %d\n", ret);
+ goto done;
+ }
- /*
- * There may be existing clients from as-yet unclosed files. For now,
- * clean them up here rather than deferring until the file is closed,
- * but this likely not correct if we want to support hot-unplugging
- * properly.
- */
- mutex_lock(&drm->clients_lock);
- list_for_each_entry_safe(cli, temp_cli, &drm->clients, head) {
- list_del(&cli->head);
- mutex_lock(&cli->mutex);
- if (cli->abi16)
- nouveau_abi16_fini(cli->abi16);
- mutex_unlock(&cli->mutex);
- nouveau_cli_fini(cli);
- kfree(cli);
+ ret = nvif_device_map(&drm->device);
+ if (ret) {
+ NV_ERROR(drm, "Failed to map PRI: %d\n", ret);
+ goto done;
}
- mutex_unlock(&drm->clients_lock);
- nouveau_cli_fini(&drm->client);
- nouveau_cli_fini(&drm->master);
- destroy_workqueue(drm->sched_wq);
- nvif_parent_dtor(&drm->parent);
- mutex_destroy(&drm->clients_lock);
- kfree(drm);
+ ret = nvif_mclass(&drm->device.object, mmus);
+ if (ret < 0) {
+ NV_ERROR(drm, "No supported MMU class\n");
+ goto done;
+ }
+
+ ret = nvif_mmu_ctor(&drm->device.object, "drmMmu", mmus[ret].oclass, &drm->mmu);
+ if (ret) {
+ NV_ERROR(drm, "MMU allocation failed: %d\n", ret);
+ goto done;
+ }
+
+done:
+ if (ret) {
+ nouveau_drm_device_del(drm);
+ drm = NULL;
+ }
+
+ return ret ? ERR_PTR(ret) : drm;
}
/*
@@ -774,8 +816,7 @@ nouveau_drm_device_fini(struct drm_device *dev)
static void quirk_broken_nv_runpm(struct pci_dev *pdev)
{
- struct drm_device *dev = pci_get_drvdata(pdev);
- struct nouveau_drm *drm = nouveau_drm(dev);
+ struct nouveau_drm *drm = pci_get_drvdata(pdev);
struct pci_dev *bridge = pci_upstream_bridge(pdev);
if (!bridge || bridge->vendor != PCI_VENDOR_ID_INTEL)
@@ -794,7 +835,7 @@ static int nouveau_drm_probe(struct pci_dev *pdev,
const struct pci_device_id *pent)
{
struct nvkm_device *device;
- struct drm_device *drm_dev;
+ struct nouveau_drm *drm;
int ret;
if (vga_switcheroo_client_probe_defer(pdev))
@@ -803,31 +844,23 @@ static int nouveau_drm_probe(struct pci_dev *pdev,
/* We need to check that the chipset is supported before booting
* fbdev off the hardware, as there's no way to put it back.
*/
- ret = nvkm_device_pci_new(pdev, nouveau_config, "error",
- true, false, 0, &device);
+ ret = nvkm_device_pci_new(pdev, nouveau_config, nouveau_debug, &device);
if (ret)
return ret;
- nvkm_device_del(&device);
-
/* Remove conflicting drivers (vesafb, efifb etc). */
ret = drm_aperture_remove_conflicting_pci_framebuffers(pdev, &driver_pci);
if (ret)
return ret;
- ret = nvkm_device_pci_new(pdev, nouveau_config, nouveau_debug,
- true, true, ~0ULL, &device);
- if (ret)
- return ret;
-
pci_set_master(pdev);
if (nouveau_atomic)
driver_pci.driver_features |= DRIVER_ATOMIC;
- drm_dev = drm_dev_alloc(&driver_pci, &pdev->dev);
- if (IS_ERR(drm_dev)) {
- ret = PTR_ERR(drm_dev);
+ drm = nouveau_drm_device_new(&driver_pci, &pdev->dev, device);
+ if (IS_ERR(drm)) {
+ ret = PTR_ERR(drm);
goto fail_nvkm;
}
@@ -835,69 +868,55 @@ static int nouveau_drm_probe(struct pci_dev *pdev,
if (ret)
goto fail_drm;
- pci_set_drvdata(pdev, drm_dev);
-
- ret = nouveau_drm_device_init(drm_dev);
+ ret = nouveau_drm_device_init(drm);
if (ret)
goto fail_pci;
- ret = drm_dev_register(drm_dev, pent->driver_data);
- if (ret)
- goto fail_drm_dev_init;
-
- if (nouveau_drm(drm_dev)->client.device.info.ram_size <= 32 * 1024 * 1024)
- drm_fbdev_ttm_setup(drm_dev, 8);
+ if (drm->client.device.info.ram_size <= 32 * 1024 * 1024)
+ drm_fbdev_ttm_setup(drm->dev, 8);
else
- drm_fbdev_ttm_setup(drm_dev, 32);
+ drm_fbdev_ttm_setup(drm->dev, 32);
quirk_broken_nv_runpm(pdev);
return 0;
-fail_drm_dev_init:
- nouveau_drm_device_fini(drm_dev);
fail_pci:
pci_disable_device(pdev);
fail_drm:
- drm_dev_put(drm_dev);
+ nouveau_drm_device_del(drm);
fail_nvkm:
nvkm_device_del(&device);
return ret;
}
void
-nouveau_drm_device_remove(struct drm_device *dev)
+nouveau_drm_device_remove(struct nouveau_drm *drm)
{
- struct nouveau_drm *drm = nouveau_drm(dev);
- struct nvkm_client *client;
- struct nvkm_device *device;
+ struct nvkm_device *device = drm->nvkm;
- drm_dev_unplug(dev);
+ drm_dev_unplug(drm->dev);
- client = nvxx_client(&drm->client.base);
- device = nvkm_device_find(client->device);
-
- nouveau_drm_device_fini(dev);
- drm_dev_put(dev);
+ nouveau_drm_device_fini(drm);
+ nouveau_drm_device_del(drm);
nvkm_device_del(&device);
}
static void
nouveau_drm_remove(struct pci_dev *pdev)
{
- struct drm_device *dev = pci_get_drvdata(pdev);
- struct nouveau_drm *drm = nouveau_drm(dev);
+ struct nouveau_drm *drm = pci_get_drvdata(pdev);
/* revert our workaround */
if (drm->old_pm_cap)
pdev->pm_cap = drm->old_pm_cap;
- nouveau_drm_device_remove(dev);
+ nouveau_drm_device_remove(drm);
pci_disable_device(pdev);
}
static int
-nouveau_do_suspend(struct drm_device *dev, bool runtime)
+nouveau_do_suspend(struct nouveau_drm *drm, bool runtime)
{
- struct nouveau_drm *drm = nouveau_drm(dev);
+ struct drm_device *dev = drm->dev;
struct ttm_resource_manager *man;
int ret;
@@ -939,7 +958,7 @@ nouveau_do_suspend(struct drm_device *dev, bool runtime)
}
NV_DEBUG(drm, "suspending object tree...\n");
- ret = nvif_client_suspend(&drm->master.base);
+ ret = nvif_client_suspend(&drm->_client);
if (ret)
goto fail_client;
@@ -958,13 +977,13 @@ fail_display:
}
static int
-nouveau_do_resume(struct drm_device *dev, bool runtime)
+nouveau_do_resume(struct nouveau_drm *drm, bool runtime)
{
+ struct drm_device *dev = drm->dev;
int ret = 0;
- struct nouveau_drm *drm = nouveau_drm(dev);
NV_DEBUG(drm, "resuming object tree...\n");
- ret = nvif_client_resume(&drm->master.base);
+ ret = nvif_client_resume(&drm->_client);
if (ret) {
NV_ERROR(drm, "Client resume failed with error: %d\n", ret);
return ret;
@@ -991,14 +1010,14 @@ int
nouveau_pmops_suspend(struct device *dev)
{
struct pci_dev *pdev = to_pci_dev(dev);
- struct drm_device *drm_dev = pci_get_drvdata(pdev);
+ struct nouveau_drm *drm = pci_get_drvdata(pdev);
int ret;
- if (drm_dev->switch_power_state == DRM_SWITCH_POWER_OFF ||
- drm_dev->switch_power_state == DRM_SWITCH_POWER_DYNAMIC_OFF)
+ if (drm->dev->switch_power_state == DRM_SWITCH_POWER_OFF ||
+ drm->dev->switch_power_state == DRM_SWITCH_POWER_DYNAMIC_OFF)
return 0;
- ret = nouveau_do_suspend(drm_dev, false);
+ ret = nouveau_do_suspend(drm, false);
if (ret)
return ret;
@@ -1013,11 +1032,11 @@ int
nouveau_pmops_resume(struct device *dev)
{
struct pci_dev *pdev = to_pci_dev(dev);
- struct drm_device *drm_dev = pci_get_drvdata(pdev);
+ struct nouveau_drm *drm = pci_get_drvdata(pdev);
int ret;
- if (drm_dev->switch_power_state == DRM_SWITCH_POWER_OFF ||
- drm_dev->switch_power_state == DRM_SWITCH_POWER_DYNAMIC_OFF)
+ if (drm->dev->switch_power_state == DRM_SWITCH_POWER_OFF ||
+ drm->dev->switch_power_state == DRM_SWITCH_POWER_DYNAMIC_OFF)
return 0;
pci_set_power_state(pdev, PCI_D0);
@@ -1027,10 +1046,10 @@ nouveau_pmops_resume(struct device *dev)
return ret;
pci_set_master(pdev);
- ret = nouveau_do_resume(drm_dev, false);
+ ret = nouveau_do_resume(drm, false);
/* Monitors may have been connected / disconnected during suspend */
- nouveau_display_hpd_resume(drm_dev);
+ nouveau_display_hpd_resume(drm);
return ret;
}
@@ -1038,17 +1057,17 @@ nouveau_pmops_resume(struct device *dev)
static int
nouveau_pmops_freeze(struct device *dev)
{
- struct pci_dev *pdev = to_pci_dev(dev);
- struct drm_device *drm_dev = pci_get_drvdata(pdev);
- return nouveau_do_suspend(drm_dev, false);
+ struct nouveau_drm *drm = dev_get_drvdata(dev);
+
+ return nouveau_do_suspend(drm, false);
}
static int
nouveau_pmops_thaw(struct device *dev)
{
- struct pci_dev *pdev = to_pci_dev(dev);
- struct drm_device *drm_dev = pci_get_drvdata(pdev);
- return nouveau_do_resume(drm_dev, false);
+ struct nouveau_drm *drm = dev_get_drvdata(dev);
+
+ return nouveau_do_resume(drm, false);
}
bool
@@ -1063,7 +1082,7 @@ static int
nouveau_pmops_runtime_suspend(struct device *dev)
{
struct pci_dev *pdev = to_pci_dev(dev);
- struct drm_device *drm_dev = pci_get_drvdata(pdev);
+ struct nouveau_drm *drm = pci_get_drvdata(pdev);
int ret;
if (!nouveau_pmops_runtime()) {
@@ -1072,12 +1091,12 @@ nouveau_pmops_runtime_suspend(struct device *dev)
}
nouveau_switcheroo_optimus_dsm();
- ret = nouveau_do_suspend(drm_dev, true);
+ ret = nouveau_do_suspend(drm, true);
pci_save_state(pdev);
pci_disable_device(pdev);
pci_ignore_hotplug(pdev);
pci_set_power_state(pdev, PCI_D3cold);
- drm_dev->switch_power_state = DRM_SWITCH_POWER_DYNAMIC_OFF;
+ drm->dev->switch_power_state = DRM_SWITCH_POWER_DYNAMIC_OFF;
return ret;
}
@@ -1085,9 +1104,8 @@ static int
nouveau_pmops_runtime_resume(struct device *dev)
{
struct pci_dev *pdev = to_pci_dev(dev);
- struct drm_device *drm_dev = pci_get_drvdata(pdev);
- struct nouveau_drm *drm = nouveau_drm(drm_dev);
- struct nvif_device *device = &nouveau_drm(drm_dev)->client.device;
+ struct nouveau_drm *drm = pci_get_drvdata(pdev);
+ struct nvif_device *device = &drm->client.device;
int ret;
if (!nouveau_pmops_runtime()) {
@@ -1102,7 +1120,7 @@ nouveau_pmops_runtime_resume(struct device *dev)
return ret;
pci_set_master(pdev);
- ret = nouveau_do_resume(drm_dev, true);
+ ret = nouveau_do_resume(drm, true);
if (ret) {
NV_ERROR(drm, "resume failed with: %d\n", ret);
return ret;
@@ -1110,10 +1128,10 @@ nouveau_pmops_runtime_resume(struct device *dev)
/* do magic */
nvif_mask(&device->object, 0x088488, (1 << 25), (1 << 25));
- drm_dev->switch_power_state = DRM_SWITCH_POWER_ON;
+ drm->dev->switch_power_state = DRM_SWITCH_POWER_ON;
/* Monitors may have been connected / disconnected during suspend */
- nouveau_display_hpd_resume(drm_dev);
+ nouveau_display_hpd_resume(drm);
return ret;
}
@@ -1249,7 +1267,7 @@ nouveau_drm_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
switch (_IOC_NR(cmd) - DRM_COMMAND_BASE) {
case DRM_NOUVEAU_NVIF:
- ret = usif_ioctl(filp, (void __user *)arg, _IOC_SIZE(cmd));
+ ret = nouveau_abi16_ioctl(filp, (void __user *)arg, _IOC_SIZE(cmd));
break;
default:
ret = drm_ioctl(file, cmd, arg);
@@ -1286,7 +1304,6 @@ driver_stub = {
DRIVER_RENDER,
.open = nouveau_drm_open,
.postclose = nouveau_drm_postclose,
- .lastclose = nouveau_vga_lastclose,
#if defined(CONFIG_DEBUG_FS)
.debugfs_init = nouveau_drm_debugfs_init,
@@ -1370,15 +1387,14 @@ nouveau_platform_device_create(const struct nvkm_device_tegra_func *func,
struct platform_device *pdev,
struct nvkm_device **pdevice)
{
- struct drm_device *drm;
+ struct nouveau_drm *drm;
int err;
- err = nvkm_device_tegra_new(func, pdev, nouveau_config, nouveau_debug,
- true, true, ~0ULL, pdevice);
+ err = nvkm_device_tegra_new(func, pdev, nouveau_config, nouveau_debug, pdevice);
if (err)
goto err_free;
- drm = drm_dev_alloc(&driver_platform, &pdev->dev);
+ drm = nouveau_drm_device_new(&driver_platform, &pdev->dev, *pdevice);
if (IS_ERR(drm)) {
err = PTR_ERR(drm);
goto err_free;
@@ -1388,12 +1404,10 @@ nouveau_platform_device_create(const struct nvkm_device_tegra_func *func,
if (err)
goto err_put;
- platform_set_drvdata(pdev, drm);
-
- return drm;
+ return drm->dev;
err_put:
- drm_dev_put(drm);
+ nouveau_drm_device_del(drm);
err_free:
nvkm_device_del(pdevice);
diff --git a/drivers/gpu/drm/nouveau/nouveau_drv.h b/drivers/gpu/drm/nouveau/nouveau_drv.h
index 25fca98a20bc..685d6ca3d8aa 100644
--- a/drivers/gpu/drm/nouveau/nouveau_drv.h
+++ b/drivers/gpu/drm/nouveau/nouveau_drv.h
@@ -201,8 +201,13 @@ u_memcpya(uint64_t user, unsigned int nmemb, unsigned int size)
#include <nvif/parent.h>
struct nouveau_drm {
+ struct nvkm_device *nvkm;
struct nvif_parent parent;
- struct nouveau_cli master;
+ struct mutex client_mutex;
+ struct nvif_client _client;
+ struct nvif_device device;
+ struct nvif_mmu mmu;
+
struct nouveau_cli client;
struct drm_device *dev;
@@ -326,25 +331,28 @@ bool nouveau_pmops_runtime(void);
struct drm_device *
nouveau_platform_device_create(const struct nvkm_device_tegra_func *,
struct platform_device *, struct nvkm_device **);
-void nouveau_drm_device_remove(struct drm_device *dev);
+void nouveau_drm_device_remove(struct nouveau_drm *);
#define NV_PRINTK(l,c,f,a...) do { \
struct nouveau_cli *_cli = (c); \
dev_##l(_cli->drm->dev->dev, "%s: "f, _cli->name, ##a); \
} while(0)
-#define NV_FATAL(drm,f,a...) NV_PRINTK(crit, &(drm)->client, f, ##a)
-#define NV_ERROR(drm,f,a...) NV_PRINTK(err, &(drm)->client, f, ##a)
-#define NV_WARN(drm,f,a...) NV_PRINTK(warn, &(drm)->client, f, ##a)
-#define NV_INFO(drm,f,a...) NV_PRINTK(info, &(drm)->client, f, ##a)
+#define NV_PRINTK_(l,drm,f,a...) do { \
+ dev_##l((drm)->nvkm->dev, "drm: "f, ##a); \
+} while(0)
+#define NV_FATAL(drm,f,a...) NV_PRINTK_(crit, (drm), f, ##a)
+#define NV_ERROR(drm,f,a...) NV_PRINTK_(err, (drm), f, ##a)
+#define NV_WARN(drm,f,a...) NV_PRINTK_(warn, (drm), f, ##a)
+#define NV_INFO(drm,f,a...) NV_PRINTK_(info, (drm), f, ##a)
#define NV_DEBUG(drm,f,a...) do { \
if (drm_debug_enabled(DRM_UT_DRIVER)) \
- NV_PRINTK(info, &(drm)->client, f, ##a); \
+ NV_PRINTK_(info, (drm), f, ##a); \
} while(0)
#define NV_ATOMIC(drm,f,a...) do { \
if (drm_debug_enabled(DRM_UT_ATOMIC)) \
- NV_PRINTK(info, &(drm)->client, f, ##a); \
+ NV_PRINTK_(info, (drm), f, ##a); \
} while(0)
#define NV_PRINTK_ONCE(l,c,f,a...) NV_PRINTK(l##_once,c,f, ##a)
@@ -355,4 +363,41 @@ void nouveau_drm_device_remove(struct drm_device *dev);
extern int nouveau_modeset;
+/*XXX: Don't use these in new code.
+ *
+ * These accessors are used in a few places (mostly older code paths)
+ * to get direct access to NVKM structures, where a more well-defined
+ * interface doesn't exist. Outside of the current use, these should
+ * not be relied on, and instead be implemented as NVIF.
+ *
+ * This is especially important when considering GSP-RM, as a lot the
+ * modules don't exist, or are "stub" implementations that just allow
+ * the GSP-RM paths to be bootstrapped.
+ */
+#include <subdev/bios.h>
+#include <subdev/fb.h>
+#include <subdev/gpio.h>
+#include <subdev/clk.h>
+#include <subdev/i2c.h>
+#include <subdev/timer.h>
+#include <subdev/therm.h>
+
+static inline struct nvkm_device *
+nvxx_device(struct nouveau_drm *drm)
+{
+ return drm->nvkm;
+}
+
+#define nvxx_bios(a) nvxx_device(a)->bios
+#define nvxx_fb(a) nvxx_device(a)->fb
+#define nvxx_gpio(a) nvxx_device(a)->gpio
+#define nvxx_clk(a) nvxx_device(a)->clk
+#define nvxx_i2c(a) nvxx_device(a)->i2c
+#define nvxx_iccsense(a) nvxx_device(a)->iccsense
+#define nvxx_therm(a) nvxx_device(a)->therm
+#define nvxx_volt(a) nvxx_device(a)->volt
+
+#include <engine/gr.h>
+
+#define nvxx_gr(a) nvxx_device(a)->gr
#endif
diff --git a/drivers/gpu/drm/nouveau/nouveau_fence.c b/drivers/gpu/drm/nouveau/nouveau_fence.c
index 93f08f9479d8..09686d038d60 100644
--- a/drivers/gpu/drm/nouveau/nouveau_fence.c
+++ b/drivers/gpu/drm/nouveau/nouveau_fence.c
@@ -181,8 +181,9 @@ nouveau_fence_wait_uevent_handler(struct nvif_event *event, void *repv, u32 repc
void
nouveau_fence_context_new(struct nouveau_channel *chan, struct nouveau_fence_chan *fctx)
{
- struct nouveau_fence_priv *priv = (void*)chan->drm->fence;
- struct nouveau_cli *cli = (void *)chan->user.client;
+ struct nouveau_cli *cli = chan->cli;
+ struct nouveau_drm *drm = cli->drm;
+ struct nouveau_fence_priv *priv = (void*)drm->fence;
struct {
struct nvif_event_v0 base;
struct nvif_chan_event_v0 host;
@@ -193,14 +194,14 @@ nouveau_fence_context_new(struct nouveau_channel *chan, struct nouveau_fence_cha
INIT_LIST_HEAD(&fctx->flip);
INIT_LIST_HEAD(&fctx->pending);
spin_lock_init(&fctx->lock);
- fctx->context = chan->drm->runl[chan->runlist].context_base + chan->chid;
+ fctx->context = drm->runl[chan->runlist].context_base + chan->chid;
- if (chan == chan->drm->cechan)
+ if (chan == drm->cechan)
strcpy(fctx->name, "copy engine channel");
- else if (chan == chan->drm->channel)
+ else if (chan == drm->channel)
strcpy(fctx->name, "generic kernel channel");
else
- strcpy(fctx->name, nvxx_client(&cli->base)->name);
+ strcpy(fctx->name, cli->name);
kref_init(&fctx->fence_ref);
if (!priv->uevent)
@@ -221,7 +222,7 @@ nouveau_fence_emit(struct nouveau_fence *fence)
{
struct nouveau_channel *chan = unrcu_pointer(fence->channel);
struct nouveau_fence_chan *fctx = chan->fence;
- struct nouveau_fence_priv *priv = (void*)chan->drm->fence;
+ struct nouveau_fence_priv *priv = (void*)chan->cli->drm->fence;
int ret;
fence->timeout = jiffies + (15 * HZ);
@@ -382,7 +383,7 @@ nouveau_fence_sync(struct nouveau_bo *nvbo, struct nouveau_channel *chan,
if (i == 0 && usage == DMA_RESV_USAGE_WRITE)
continue;
- f = nouveau_local_fence(fence, chan->drm);
+ f = nouveau_local_fence(fence, chan->cli->drm);
if (f) {
struct nouveau_channel *prev;
bool must_wait = true;
diff --git a/drivers/gpu/drm/nouveau/nouveau_gem.c b/drivers/gpu/drm/nouveau/nouveau_gem.c
index 5a887d67dc0e..9ae2cee1c7c5 100644
--- a/drivers/gpu/drm/nouveau/nouveau_gem.c
+++ b/drivers/gpu/drm/nouveau/nouveau_gem.c
@@ -567,10 +567,11 @@ retry:
}
static int
-validate_list(struct nouveau_channel *chan, struct nouveau_cli *cli,
+validate_list(struct nouveau_channel *chan,
struct list_head *list, struct drm_nouveau_gem_pushbuf_bo *pbbo)
{
- struct nouveau_drm *drm = chan->drm;
+ struct nouveau_cli *cli = chan->cli;
+ struct nouveau_drm *drm = cli->drm;
struct nouveau_bo *nvbo;
int ret, relocs = 0;
@@ -642,7 +643,7 @@ nouveau_gem_pushbuf_validate(struct nouveau_channel *chan,
return ret;
}
- ret = validate_list(chan, cli, &op->list, pbbo);
+ ret = validate_list(chan, &op->list, pbbo);
if (unlikely(ret < 0)) {
if (ret != -ERESTARTSYS)
NV_PRINTK(err, cli, "validating bo list\n");
@@ -870,7 +871,7 @@ revalidate:
}
} else
if (drm->client.device.info.chipset >= 0x25) {
- ret = PUSH_WAIT(chan->chan.push, req->nr_push * 2);
+ ret = PUSH_WAIT(&chan->chan.push, req->nr_push * 2);
if (ret) {
NV_PRINTK(err, cli, "cal_space: %d\n", ret);
goto out;
@@ -880,11 +881,11 @@ revalidate:
struct nouveau_bo *nvbo = (void *)(unsigned long)
bo[push[i].bo_index].user_priv;
- PUSH_CALL(chan->chan.push, nvbo->offset + push[i].offset);
- PUSH_DATA(chan->chan.push, 0);
+ PUSH_CALL(&chan->chan.push, nvbo->offset + push[i].offset);
+ PUSH_DATA(&chan->chan.push, 0);
}
} else {
- ret = PUSH_WAIT(chan->chan.push, req->nr_push * (2 + NOUVEAU_DMA_SKIPS));
+ ret = PUSH_WAIT(&chan->chan.push, req->nr_push * (2 + NOUVEAU_DMA_SKIPS));
if (ret) {
NV_PRINTK(err, cli, "jmp_space: %d\n", ret);
goto out;
@@ -913,10 +914,10 @@ revalidate:
push[i].length - 8) / 4, cmd);
}
- PUSH_JUMP(chan->chan.push, nvbo->offset + push[i].offset);
- PUSH_DATA(chan->chan.push, 0);
+ PUSH_JUMP(&chan->chan.push, nvbo->offset + push[i].offset);
+ PUSH_DATA(&chan->chan.push, 0);
for (j = 0; j < NOUVEAU_DMA_SKIPS; j++)
- PUSH_DATA(chan->chan.push, 0);
+ PUSH_DATA(&chan->chan.push, 0);
}
}
diff --git a/drivers/gpu/drm/nouveau/nouveau_hwmon.c b/drivers/gpu/drm/nouveau/nouveau_hwmon.c
index db30a4c2cd4d..5c07a9ee8b77 100644
--- a/drivers/gpu/drm/nouveau/nouveau_hwmon.c
+++ b/drivers/gpu/drm/nouveau/nouveau_hwmon.c
@@ -52,7 +52,7 @@ nouveau_hwmon_temp1_auto_point1_temp(struct device *d,
{
struct drm_device *dev = dev_get_drvdata(d);
struct nouveau_drm *drm = nouveau_drm(dev);
- struct nvkm_therm *therm = nvxx_therm(&drm->client.device);
+ struct nvkm_therm *therm = nvxx_therm(drm);
return sysfs_emit(buf, "%d\n",
therm->attr_get(therm, NVKM_THERM_ATTR_THRS_FAN_BOOST) * 1000);
@@ -64,7 +64,7 @@ nouveau_hwmon_set_temp1_auto_point1_temp(struct device *d,
{
struct drm_device *dev = dev_get_drvdata(d);
struct nouveau_drm *drm = nouveau_drm(dev);
- struct nvkm_therm *therm = nvxx_therm(&drm->client.device);
+ struct nvkm_therm *therm = nvxx_therm(drm);
long value;
if (kstrtol(buf, 10, &value))
@@ -85,7 +85,7 @@ nouveau_hwmon_temp1_auto_point1_temp_hyst(struct device *d,
{
struct drm_device *dev = dev_get_drvdata(d);
struct nouveau_drm *drm = nouveau_drm(dev);
- struct nvkm_therm *therm = nvxx_therm(&drm->client.device);
+ struct nvkm_therm *therm = nvxx_therm(drm);
return sysfs_emit(buf, "%d\n",
therm->attr_get(therm, NVKM_THERM_ATTR_THRS_FAN_BOOST_HYST) * 1000);
@@ -97,7 +97,7 @@ nouveau_hwmon_set_temp1_auto_point1_temp_hyst(struct device *d,
{
struct drm_device *dev = dev_get_drvdata(d);
struct nouveau_drm *drm = nouveau_drm(dev);
- struct nvkm_therm *therm = nvxx_therm(&drm->client.device);
+ struct nvkm_therm *therm = nvxx_therm(drm);
long value;
if (kstrtol(buf, 10, &value))
@@ -118,7 +118,7 @@ nouveau_hwmon_get_pwm1_max(struct device *d,
{
struct drm_device *dev = dev_get_drvdata(d);
struct nouveau_drm *drm = nouveau_drm(dev);
- struct nvkm_therm *therm = nvxx_therm(&drm->client.device);
+ struct nvkm_therm *therm = nvxx_therm(drm);
int ret;
ret = therm->attr_get(therm, NVKM_THERM_ATTR_FAN_MAX_DUTY);
@@ -134,7 +134,7 @@ nouveau_hwmon_get_pwm1_min(struct device *d,
{
struct drm_device *dev = dev_get_drvdata(d);
struct nouveau_drm *drm = nouveau_drm(dev);
- struct nvkm_therm *therm = nvxx_therm(&drm->client.device);
+ struct nvkm_therm *therm = nvxx_therm(drm);
int ret;
ret = therm->attr_get(therm, NVKM_THERM_ATTR_FAN_MIN_DUTY);
@@ -150,7 +150,7 @@ nouveau_hwmon_set_pwm1_min(struct device *d, struct device_attribute *a,
{
struct drm_device *dev = dev_get_drvdata(d);
struct nouveau_drm *drm = nouveau_drm(dev);
- struct nvkm_therm *therm = nvxx_therm(&drm->client.device);
+ struct nvkm_therm *therm = nvxx_therm(drm);
long value;
int ret;
@@ -173,7 +173,7 @@ nouveau_hwmon_set_pwm1_max(struct device *d, struct device_attribute *a,
{
struct drm_device *dev = dev_get_drvdata(d);
struct nouveau_drm *drm = nouveau_drm(dev);
- struct nvkm_therm *therm = nvxx_therm(&drm->client.device);
+ struct nvkm_therm *therm = nvxx_therm(drm);
long value;
int ret;
@@ -247,7 +247,7 @@ static umode_t
nouveau_power_is_visible(const void *data, u32 attr, int channel)
{
struct nouveau_drm *drm = nouveau_drm((struct drm_device *)data);
- struct nvkm_iccsense *iccsense = nvxx_iccsense(&drm->client.device);
+ struct nvkm_iccsense *iccsense = nvxx_iccsense(drm);
if (!iccsense || !iccsense->data_valid || list_empty(&iccsense->rails))
return 0;
@@ -272,7 +272,7 @@ static umode_t
nouveau_temp_is_visible(const void *data, u32 attr, int channel)
{
struct nouveau_drm *drm = nouveau_drm((struct drm_device *)data);
- struct nvkm_therm *therm = nvxx_therm(&drm->client.device);
+ struct nvkm_therm *therm = nvxx_therm(drm);
if (!therm || !therm->attr_get || nvkm_therm_temp_get(therm) < 0)
return 0;
@@ -296,7 +296,7 @@ static umode_t
nouveau_pwm_is_visible(const void *data, u32 attr, int channel)
{
struct nouveau_drm *drm = nouveau_drm((struct drm_device *)data);
- struct nvkm_therm *therm = nvxx_therm(&drm->client.device);
+ struct nvkm_therm *therm = nvxx_therm(drm);
if (!therm || !therm->attr_get || !therm->fan_get ||
therm->fan_get(therm) < 0)
@@ -315,7 +315,7 @@ static umode_t
nouveau_input_is_visible(const void *data, u32 attr, int channel)
{
struct nouveau_drm *drm = nouveau_drm((struct drm_device *)data);
- struct nvkm_volt *volt = nvxx_volt(&drm->client.device);
+ struct nvkm_volt *volt = nvxx_volt(drm);
if (!volt || nvkm_volt_get(volt) < 0)
return 0;
@@ -335,7 +335,7 @@ static umode_t
nouveau_fan_is_visible(const void *data, u32 attr, int channel)
{
struct nouveau_drm *drm = nouveau_drm((struct drm_device *)data);
- struct nvkm_therm *therm = nvxx_therm(&drm->client.device);
+ struct nvkm_therm *therm = nvxx_therm(drm);
if (!therm || !therm->attr_get || nvkm_therm_fan_sense(therm) < 0)
return 0;
@@ -367,7 +367,7 @@ nouveau_temp_read(struct device *dev, u32 attr, int channel, long *val)
{
struct drm_device *drm_dev = dev_get_drvdata(dev);
struct nouveau_drm *drm = nouveau_drm(drm_dev);
- struct nvkm_therm *therm = nvxx_therm(&drm->client.device);
+ struct nvkm_therm *therm = nvxx_therm(drm);
int ret;
if (!therm || !therm->attr_get)
@@ -416,7 +416,7 @@ nouveau_fan_read(struct device *dev, u32 attr, int channel, long *val)
{
struct drm_device *drm_dev = dev_get_drvdata(dev);
struct nouveau_drm *drm = nouveau_drm(drm_dev);
- struct nvkm_therm *therm = nvxx_therm(&drm->client.device);
+ struct nvkm_therm *therm = nvxx_therm(drm);
if (!therm)
return -EOPNOTSUPP;
@@ -439,7 +439,7 @@ nouveau_in_read(struct device *dev, u32 attr, int channel, long *val)
{
struct drm_device *drm_dev = dev_get_drvdata(dev);
struct nouveau_drm *drm = nouveau_drm(drm_dev);
- struct nvkm_volt *volt = nvxx_volt(&drm->client.device);
+ struct nvkm_volt *volt = nvxx_volt(drm);
int ret;
if (!volt)
@@ -470,7 +470,7 @@ nouveau_pwm_read(struct device *dev, u32 attr, int channel, long *val)
{
struct drm_device *drm_dev = dev_get_drvdata(dev);
struct nouveau_drm *drm = nouveau_drm(drm_dev);
- struct nvkm_therm *therm = nvxx_therm(&drm->client.device);
+ struct nvkm_therm *therm = nvxx_therm(drm);
if (!therm || !therm->attr_get || !therm->fan_get)
return -EOPNOTSUPP;
@@ -496,7 +496,7 @@ nouveau_power_read(struct device *dev, u32 attr, int channel, long *val)
{
struct drm_device *drm_dev = dev_get_drvdata(dev);
struct nouveau_drm *drm = nouveau_drm(drm_dev);
- struct nvkm_iccsense *iccsense = nvxx_iccsense(&drm->client.device);
+ struct nvkm_iccsense *iccsense = nvxx_iccsense(drm);
if (!iccsense)
return -EOPNOTSUPP;
@@ -525,7 +525,7 @@ nouveau_temp_write(struct device *dev, u32 attr, int channel, long val)
{
struct drm_device *drm_dev = dev_get_drvdata(dev);
struct nouveau_drm *drm = nouveau_drm(drm_dev);
- struct nvkm_therm *therm = nvxx_therm(&drm->client.device);
+ struct nvkm_therm *therm = nvxx_therm(drm);
if (!therm || !therm->attr_set)
return -EOPNOTSUPP;
@@ -559,7 +559,7 @@ nouveau_pwm_write(struct device *dev, u32 attr, int channel, long val)
{
struct drm_device *drm_dev = dev_get_drvdata(dev);
struct nouveau_drm *drm = nouveau_drm(drm_dev);
- struct nvkm_therm *therm = nvxx_therm(&drm->client.device);
+ struct nvkm_therm *therm = nvxx_therm(drm);
if (!therm || !therm->attr_set)
return -EOPNOTSUPP;
@@ -664,9 +664,9 @@ nouveau_hwmon_init(struct drm_device *dev)
{
#if defined(CONFIG_HWMON) || (defined(MODULE) && defined(CONFIG_HWMON_MODULE))
struct nouveau_drm *drm = nouveau_drm(dev);
- struct nvkm_iccsense *iccsense = nvxx_iccsense(&drm->client.device);
- struct nvkm_therm *therm = nvxx_therm(&drm->client.device);
- struct nvkm_volt *volt = nvxx_volt(&drm->client.device);
+ struct nvkm_iccsense *iccsense = nvxx_iccsense(drm);
+ struct nvkm_therm *therm = nvxx_therm(drm);
+ struct nvkm_volt *volt = nvxx_volt(drm);
const struct attribute_group *special_groups[N_ATTR_GROUPS];
struct nouveau_hwmon *hwmon;
struct device *hwmon_dev;
diff --git a/drivers/gpu/drm/nouveau/nouveau_led.c b/drivers/gpu/drm/nouveau/nouveau_led.c
index 2c5e0628da12..ac950518a820 100644
--- a/drivers/gpu/drm/nouveau/nouveau_led.c
+++ b/drivers/gpu/drm/nouveau/nouveau_led.c
@@ -78,7 +78,7 @@ int
nouveau_led_init(struct drm_device *dev)
{
struct nouveau_drm *drm = nouveau_drm(dev);
- struct nvkm_gpio *gpio = nvxx_gpio(&drm->client.device);
+ struct nvkm_gpio *gpio = nvxx_gpio(drm);
struct dcb_gpio_func logo_led;
int ret;
diff --git a/drivers/gpu/drm/nouveau/nouveau_mem.c b/drivers/gpu/drm/nouveau/nouveau_mem.c
index 25f31d5169e5..fac92fdbf9cc 100644
--- a/drivers/gpu/drm/nouveau/nouveau_mem.c
+++ b/drivers/gpu/drm/nouveau/nouveau_mem.c
@@ -78,20 +78,19 @@ nouveau_mem_map(struct nouveau_mem *mem,
void
nouveau_mem_fini(struct nouveau_mem *mem)
{
- nvif_vmm_put(&mem->cli->drm->client.vmm.vmm, &mem->vma[1]);
- nvif_vmm_put(&mem->cli->drm->client.vmm.vmm, &mem->vma[0]);
- mutex_lock(&mem->cli->drm->master.lock);
+ nvif_vmm_put(&mem->drm->client.vmm.vmm, &mem->vma[1]);
+ nvif_vmm_put(&mem->drm->client.vmm.vmm, &mem->vma[0]);
+ mutex_lock(&mem->drm->client_mutex);
nvif_mem_dtor(&mem->mem);
- mutex_unlock(&mem->cli->drm->master.lock);
+ mutex_unlock(&mem->drm->client_mutex);
}
int
nouveau_mem_host(struct ttm_resource *reg, struct ttm_tt *tt)
{
struct nouveau_mem *mem = nouveau_mem(reg);
- struct nouveau_cli *cli = mem->cli;
- struct nouveau_drm *drm = cli->drm;
- struct nvif_mmu *mmu = &cli->mmu;
+ struct nouveau_drm *drm = mem->drm;
+ struct nvif_mmu *mmu = &drm->mmu;
struct nvif_mem_ram_v0 args = {};
u8 type;
int ret;
@@ -114,11 +113,11 @@ nouveau_mem_host(struct ttm_resource *reg, struct ttm_tt *tt)
else
args.dma = tt->dma_address;
- mutex_lock(&drm->master.lock);
- ret = nvif_mem_ctor_type(mmu, "ttmHostMem", cli->mem->oclass, type, PAGE_SHIFT,
+ mutex_lock(&drm->client_mutex);
+ ret = nvif_mem_ctor_type(mmu, "ttmHostMem", mmu->mem, type, PAGE_SHIFT,
reg->size,
&args, sizeof(args), &mem->mem);
- mutex_unlock(&drm->master.lock);
+ mutex_unlock(&drm->client_mutex);
return ret;
}
@@ -126,16 +125,15 @@ int
nouveau_mem_vram(struct ttm_resource *reg, bool contig, u8 page)
{
struct nouveau_mem *mem = nouveau_mem(reg);
- struct nouveau_cli *cli = mem->cli;
- struct nouveau_drm *drm = cli->drm;
- struct nvif_mmu *mmu = &cli->mmu;
+ struct nouveau_drm *drm = mem->drm;
+ struct nvif_mmu *mmu = &drm->mmu;
u64 size = ALIGN(reg->size, 1 << page);
int ret;
- mutex_lock(&drm->master.lock);
- switch (cli->mem->oclass) {
+ mutex_lock(&drm->client_mutex);
+ switch (mmu->mem) {
case NVIF_CLASS_MEM_GF100:
- ret = nvif_mem_ctor_type(mmu, "ttmVram", cli->mem->oclass,
+ ret = nvif_mem_ctor_type(mmu, "ttmVram", mmu->mem,
drm->ttm.type_vram, page, size,
&(struct gf100_mem_v0) {
.contig = contig,
@@ -143,7 +141,7 @@ nouveau_mem_vram(struct ttm_resource *reg, bool contig, u8 page)
&mem->mem);
break;
case NVIF_CLASS_MEM_NV50:
- ret = nvif_mem_ctor_type(mmu, "ttmVram", cli->mem->oclass,
+ ret = nvif_mem_ctor_type(mmu, "ttmVram", mmu->mem,
drm->ttm.type_vram, page, size,
&(struct nv50_mem_v0) {
.bankswz = mmu->kind[mem->kind] == 2,
@@ -156,7 +154,7 @@ nouveau_mem_vram(struct ttm_resource *reg, bool contig, u8 page)
WARN_ON(1);
break;
}
- mutex_unlock(&drm->master.lock);
+ mutex_unlock(&drm->client_mutex);
reg->start = mem->mem.addr >> PAGE_SHIFT;
return ret;
@@ -173,7 +171,7 @@ nouveau_mem_del(struct ttm_resource_manager *man, struct ttm_resource *reg)
}
int
-nouveau_mem_new(struct nouveau_cli *cli, u8 kind, u8 comp,
+nouveau_mem_new(struct nouveau_drm *drm, u8 kind, u8 comp,
struct ttm_resource **res)
{
struct nouveau_mem *mem;
@@ -181,7 +179,7 @@ nouveau_mem_new(struct nouveau_cli *cli, u8 kind, u8 comp,
if (!(mem = kzalloc(sizeof(*mem), GFP_KERNEL)))
return -ENOMEM;
- mem->cli = cli;
+ mem->drm = drm;
mem->kind = kind;
mem->comp = comp;
diff --git a/drivers/gpu/drm/nouveau/nouveau_mem.h b/drivers/gpu/drm/nouveau/nouveau_mem.h
index 5365a3d3a17f..a070ee049f6b 100644
--- a/drivers/gpu/drm/nouveau/nouveau_mem.h
+++ b/drivers/gpu/drm/nouveau/nouveau_mem.h
@@ -8,7 +8,7 @@ struct ttm_tt;
struct nouveau_mem {
struct ttm_resource base;
- struct nouveau_cli *cli;
+ struct nouveau_drm *drm;
u8 kind;
u8 comp;
struct nvif_mem mem;
@@ -21,7 +21,7 @@ nouveau_mem(struct ttm_resource *reg)
return container_of(reg, struct nouveau_mem, base);
}
-int nouveau_mem_new(struct nouveau_cli *, u8 kind, u8 comp,
+int nouveau_mem_new(struct nouveau_drm *, u8 kind, u8 comp,
struct ttm_resource **);
void nouveau_mem_del(struct ttm_resource_manager *man,
struct ttm_resource *);
diff --git a/drivers/gpu/drm/nouveau/nouveau_nvif.c b/drivers/gpu/drm/nouveau/nouveau_nvif.c
index 1d49ebdfd5dc..adb802421fda 100644
--- a/drivers/gpu/drm/nouveau/nouveau_nvif.c
+++ b/drivers/gpu/drm/nouveau/nouveau_nvif.c
@@ -35,7 +35,6 @@
#include <nvif/ioctl.h>
#include "nouveau_drv.h"
-#include "nouveau_usif.h"
static void
nvkm_client_unmap(void *priv, void __iomem *ptr, u32 size)
@@ -98,5 +97,4 @@ nvif_driver_nvkm = {
.ioctl = nvkm_client_ioctl,
.map = nvkm_client_map,
.unmap = nvkm_client_unmap,
- .keep = false,
};
diff --git a/drivers/gpu/drm/nouveau/nouveau_platform.c b/drivers/gpu/drm/nouveau/nouveau_platform.c
index bf2dc7567ea4..829fdc6e4031 100644
--- a/drivers/gpu/drm/nouveau/nouveau_platform.c
+++ b/drivers/gpu/drm/nouveau/nouveau_platform.c
@@ -26,7 +26,6 @@ static int nouveau_platform_probe(struct platform_device *pdev)
const struct nvkm_device_tegra_func *func;
struct nvkm_device *device = NULL;
struct drm_device *drm;
- int ret;
func = of_device_get_match_data(&pdev->dev);
@@ -34,19 +33,14 @@ static int nouveau_platform_probe(struct platform_device *pdev)
if (IS_ERR(drm))
return PTR_ERR(drm);
- ret = drm_dev_register(drm, 0);
- if (ret < 0) {
- drm_dev_put(drm);
- return ret;
- }
-
return 0;
}
static void nouveau_platform_remove(struct platform_device *pdev)
{
- struct drm_device *dev = platform_get_drvdata(pdev);
- nouveau_drm_device_remove(dev);
+ struct nouveau_drm *drm = platform_get_drvdata(pdev);
+
+ nouveau_drm_device_remove(drm);
}
#if IS_ENABLED(CONFIG_OF)
diff --git a/drivers/gpu/drm/nouveau/nouveau_sched.c b/drivers/gpu/drm/nouveau/nouveau_sched.c
index 32fa2e273965..eb6c3f9a01f5 100644
--- a/drivers/gpu/drm/nouveau/nouveau_sched.c
+++ b/drivers/gpu/drm/nouveau/nouveau_sched.c
@@ -379,7 +379,7 @@ nouveau_sched_timedout_job(struct drm_sched_job *sched_job)
else
NV_PRINTK(warn, job->cli, "Generic job timeout.\n");
- drm_sched_start(sched, true);
+ drm_sched_start(sched);
return stat;
}
@@ -404,7 +404,7 @@ nouveau_sched_init(struct nouveau_sched *sched, struct nouveau_drm *drm,
{
struct drm_gpu_scheduler *drm_sched = &sched->base;
struct drm_sched_entity *entity = &sched->entity;
- long job_hang_limit = msecs_to_jiffies(NOUVEAU_SCHED_JOB_TIMEOUT_MS);
+ const long timeout = msecs_to_jiffies(NOUVEAU_SCHED_JOB_TIMEOUT_MS);
int ret;
if (!wq) {
@@ -418,7 +418,7 @@ nouveau_sched_init(struct nouveau_sched *sched, struct nouveau_drm *drm,
ret = drm_sched_init(drm_sched, &nouveau_sched_ops, wq,
NOUVEAU_SCHED_PRIORITY_COUNT,
- credit_limit, 0, job_hang_limit,
+ credit_limit, 0, timeout,
NULL, NULL, "nouveau_sched", drm->dev->dev);
if (ret)
goto fail_wq;
diff --git a/drivers/gpu/drm/nouveau/nouveau_sgdma.c b/drivers/gpu/drm/nouveau/nouveau_sgdma.c
index b14895f75b3c..bd870028514b 100644
--- a/drivers/gpu/drm/nouveau/nouveau_sgdma.c
+++ b/drivers/gpu/drm/nouveau/nouveau_sgdma.c
@@ -43,7 +43,7 @@ nouveau_sgdma_bind(struct ttm_device *bdev, struct ttm_tt *ttm, struct ttm_resou
return ret;
if (drm->client.device.info.family < NV_DEVICE_INFO_V0_TESLA) {
- ret = nouveau_mem_map(mem, &mem->cli->vmm.vmm, &mem->vma[0]);
+ ret = nouveau_mem_map(mem, &drm->client.vmm.vmm, &mem->vma[0]);
if (ret) {
nouveau_mem_fini(mem);
return ret;
diff --git a/drivers/gpu/drm/nouveau/nouveau_ttm.c b/drivers/gpu/drm/nouveau/nouveau_ttm.c
index 486f39f31a38..e244927eb5d4 100644
--- a/drivers/gpu/drm/nouveau/nouveau_ttm.c
+++ b/drivers/gpu/drm/nouveau/nouveau_ttm.c
@@ -73,7 +73,7 @@ nouveau_vram_manager_new(struct ttm_resource_manager *man,
if (drm->client.device.info.ram_size == 0)
return -ENOMEM;
- ret = nouveau_mem_new(&drm->master, nvbo->kind, nvbo->comp, res);
+ ret = nouveau_mem_new(drm, nvbo->kind, nvbo->comp, res);
if (ret)
return ret;
@@ -105,7 +105,7 @@ nouveau_gart_manager_new(struct ttm_resource_manager *man,
struct nouveau_drm *drm = nouveau_bdev(bo->bdev);
int ret;
- ret = nouveau_mem_new(&drm->master, nvbo->kind, nvbo->comp, res);
+ ret = nouveau_mem_new(drm, nvbo->kind, nvbo->comp, res);
if (ret)
return ret;
@@ -132,13 +132,13 @@ nv04_gart_manager_new(struct ttm_resource_manager *man,
struct nouveau_mem *mem;
int ret;
- ret = nouveau_mem_new(&drm->master, nvbo->kind, nvbo->comp, res);
+ ret = nouveau_mem_new(drm, nvbo->kind, nvbo->comp, res);
if (ret)
return ret;
mem = nouveau_mem(*res);
ttm_resource_init(bo, place, *res);
- ret = nvif_vmm_get(&mem->cli->vmm.vmm, PTES, false, 12, 0,
+ ret = nvif_vmm_get(&drm->client.vmm.vmm, PTES, false, 12, 0,
(long)(*res)->size, &mem->vma[0]);
if (ret) {
nouveau_mem_del(man, *res);
@@ -261,7 +261,7 @@ nouveau_ttm_fini_gtt(struct nouveau_drm *drm)
int
nouveau_ttm_init(struct nouveau_drm *drm)
{
- struct nvkm_device *device = nvxx_device(&drm->client.device);
+ struct nvkm_device *device = nvxx_device(drm);
struct nvkm_pci *pci = device->pci;
struct nvif_mmu *mmu = &drm->client.mmu;
struct drm_device *dev = drm->dev;
@@ -348,7 +348,7 @@ nouveau_ttm_init(struct nouveau_drm *drm)
void
nouveau_ttm_fini(struct nouveau_drm *drm)
{
- struct nvkm_device *device = nvxx_device(&drm->client.device);
+ struct nvkm_device *device = nvxx_device(drm);
nouveau_ttm_fini_vram(drm);
nouveau_ttm_fini_gtt(drm);
diff --git a/drivers/gpu/drm/nouveau/nouveau_usif.c b/drivers/gpu/drm/nouveau/nouveau_usif.c
deleted file mode 100644
index 002d1479ba89..000000000000
--- a/drivers/gpu/drm/nouveau/nouveau_usif.c
+++ /dev/null
@@ -1,194 +0,0 @@
-/*
- * Copyright 2014 Red Hat Inc.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
- * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
- * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
- * OTHER DEALINGS IN THE SOFTWARE.
- *
- * Authors: Ben Skeggs <bskeggs@redhat.com>
- */
-
-#include "nouveau_drv.h"
-#include "nouveau_usif.h"
-#include "nouveau_abi16.h"
-
-#include <nvif/unpack.h>
-#include <nvif/client.h>
-#include <nvif/ioctl.h>
-
-#include <nvif/class.h>
-#include <nvif/cl0080.h>
-
-struct usif_object {
- struct list_head head;
- u8 route;
- u64 token;
-};
-
-static void
-usif_object_dtor(struct usif_object *object)
-{
- list_del(&object->head);
- kfree(object);
-}
-
-static int
-usif_object_new(struct drm_file *f, void *data, u32 size, void *argv, u32 argc, bool parent_abi16)
-{
- struct nouveau_cli *cli = nouveau_cli(f);
- struct nvif_client *client = &cli->base;
- union {
- struct nvif_ioctl_new_v0 v0;
- } *args = data;
- struct usif_object *object;
- int ret = -ENOSYS;
-
- if ((ret = nvif_unpack(ret, &data, &size, args->v0, 0, 0, true)))
- return ret;
-
- switch (args->v0.oclass) {
- case NV_DMA_FROM_MEMORY:
- case NV_DMA_TO_MEMORY:
- case NV_DMA_IN_MEMORY:
- return -EINVAL;
- case NV_DEVICE: {
- union {
- struct nv_device_v0 v0;
- } *args = data;
-
- if ((ret = nvif_unpack(ret, &data, &size, args->v0, 0, 0, false)))
- return ret;
-
- args->v0.priv = false;
- break;
- }
- default:
- if (!parent_abi16)
- return -EINVAL;
- break;
- }
-
- if (!(object = kmalloc(sizeof(*object), GFP_KERNEL)))
- return -ENOMEM;
- list_add(&object->head, &cli->objects);
-
- object->route = args->v0.route;
- object->token = args->v0.token;
- args->v0.route = NVDRM_OBJECT_USIF;
- args->v0.token = (unsigned long)(void *)object;
- ret = nvif_client_ioctl(client, argv, argc);
- if (ret) {
- usif_object_dtor(object);
- return ret;
- }
-
- args->v0.token = object->token;
- args->v0.route = object->route;
- return 0;
-}
-
-int
-usif_ioctl(struct drm_file *filp, void __user *user, u32 argc)
-{
- struct nouveau_cli *cli = nouveau_cli(filp);
- struct nvif_client *client = &cli->base;
- void *data = kmalloc(argc, GFP_KERNEL);
- u32 size = argc;
- union {
- struct nvif_ioctl_v0 v0;
- } *argv = data;
- struct usif_object *object;
- bool abi16 = false;
- u8 owner;
- int ret;
-
- if (ret = -ENOMEM, !argv)
- goto done;
- if (ret = -EFAULT, copy_from_user(argv, user, size))
- goto done;
-
- if (!(ret = nvif_unpack(-ENOSYS, &data, &size, argv->v0, 0, 0, true))) {
- /* block access to objects not created via this interface */
- owner = argv->v0.owner;
- if (argv->v0.object == 0ULL &&
- argv->v0.type != NVIF_IOCTL_V0_DEL)
- argv->v0.owner = NVDRM_OBJECT_ANY; /* except client */
- else
- argv->v0.owner = NVDRM_OBJECT_USIF;
- } else
- goto done;
-
- /* USIF slightly abuses some return-only ioctl members in order
- * to provide interoperability with the older ABI16 objects
- */
- mutex_lock(&cli->mutex);
- if (argv->v0.route) {
- if (ret = -EINVAL, argv->v0.route == 0xff)
- ret = nouveau_abi16_usif(filp, argv, argc);
- if (ret) {
- mutex_unlock(&cli->mutex);
- goto done;
- }
-
- abi16 = true;
- }
-
- switch (argv->v0.type) {
- case NVIF_IOCTL_V0_NEW:
- ret = usif_object_new(filp, data, size, argv, argc, abi16);
- break;
- default:
- ret = nvif_client_ioctl(client, argv, argc);
- break;
- }
- if (argv->v0.route == NVDRM_OBJECT_USIF) {
- object = (void *)(unsigned long)argv->v0.token;
- argv->v0.route = object->route;
- argv->v0.token = object->token;
- if (ret == 0 && argv->v0.type == NVIF_IOCTL_V0_DEL) {
- list_del(&object->head);
- kfree(object);
- }
- } else {
- argv->v0.route = NVIF_IOCTL_V0_ROUTE_HIDDEN;
- argv->v0.token = 0;
- }
- argv->v0.owner = owner;
- mutex_unlock(&cli->mutex);
-
- if (copy_to_user(user, argv, argc))
- ret = -EFAULT;
-done:
- kfree(argv);
- return ret;
-}
-
-void
-usif_client_fini(struct nouveau_cli *cli)
-{
- struct usif_object *object, *otemp;
-
- list_for_each_entry_safe(object, otemp, &cli->objects, head) {
- usif_object_dtor(object);
- }
-}
-
-void
-usif_client_init(struct nouveau_cli *cli)
-{
- INIT_LIST_HEAD(&cli->objects);
-}
diff --git a/drivers/gpu/drm/nouveau/nouveau_usif.h b/drivers/gpu/drm/nouveau/nouveau_usif.h
deleted file mode 100644
index dc90d4a9d0d9..000000000000
--- a/drivers/gpu/drm/nouveau/nouveau_usif.h
+++ /dev/null
@@ -1,10 +0,0 @@
-/* SPDX-License-Identifier: MIT */
-#ifndef __NOUVEAU_USIF_H__
-#define __NOUVEAU_USIF_H__
-
-void usif_client_init(struct nouveau_cli *);
-void usif_client_fini(struct nouveau_cli *);
-int usif_ioctl(struct drm_file *, void __user *, u32);
-int usif_notify(const void *, u32, const void *, u32);
-
-#endif
diff --git a/drivers/gpu/drm/nouveau/nouveau_vga.c b/drivers/gpu/drm/nouveau/nouveau_vga.c
index f8bf0ec26844..ab4e11dc0b8a 100644
--- a/drivers/gpu/drm/nouveau/nouveau_vga.c
+++ b/drivers/gpu/drm/nouveau/nouveau_vga.c
@@ -11,7 +11,7 @@
static unsigned int
nouveau_vga_set_decode(struct pci_dev *pdev, bool state)
{
- struct nouveau_drm *drm = nouveau_drm(pci_get_drvdata(pdev));
+ struct nouveau_drm *drm = pci_get_drvdata(pdev);
struct nvif_object *device = &drm->client.device.object;
if (drm->client.device.info.family == NV_DEVICE_INFO_V0_CURIE &&
@@ -34,7 +34,8 @@ static void
nouveau_switcheroo_set_state(struct pci_dev *pdev,
enum vga_switcheroo_state state)
{
- struct drm_device *dev = pci_get_drvdata(pdev);
+ struct nouveau_drm *drm = pci_get_drvdata(pdev);
+ struct drm_device *dev = drm->dev;
if ((nouveau_is_optimus() || nouveau_is_v1_dsm()) && state == VGA_SWITCHEROO_OFF)
return;
@@ -56,21 +57,23 @@ nouveau_switcheroo_set_state(struct pci_dev *pdev,
static void
nouveau_switcheroo_reprobe(struct pci_dev *pdev)
{
- struct drm_device *dev = pci_get_drvdata(pdev);
- drm_fb_helper_output_poll_changed(dev);
+ struct nouveau_drm *drm = pci_get_drvdata(pdev);
+ struct drm_device *dev = drm->dev;
+
+ drm_client_dev_hotplug(dev);
}
static bool
nouveau_switcheroo_can_switch(struct pci_dev *pdev)
{
- struct drm_device *dev = pci_get_drvdata(pdev);
+ struct nouveau_drm *drm = pci_get_drvdata(pdev);
/*
* FIXME: open_count is protected by drm_global_mutex but that would lead to
* locking inversion with the driver load path. And the access here is
* completely racy anyway. So don't bother with locking for now.
*/
- return atomic_read(&dev->open_count) == 0;
+ return atomic_read(&drm->dev->open_count) == 0;
}
static const struct vga_switcheroo_client_ops
@@ -125,10 +128,3 @@ nouveau_vga_fini(struct nouveau_drm *drm)
if (runtime && nouveau_is_v1_dsm() && !nouveau_is_optimus())
vga_switcheroo_fini_domain_pm_ops(drm->dev->dev);
}
-
-
-void
-nouveau_vga_lastclose(struct drm_device *dev)
-{
- vga_switcheroo_process_delayed_switch();
-}
diff --git a/drivers/gpu/drm/nouveau/nouveau_vga.h b/drivers/gpu/drm/nouveau/nouveau_vga.h
index 951a83f984dd..63be415d2a44 100644
--- a/drivers/gpu/drm/nouveau/nouveau_vga.h
+++ b/drivers/gpu/drm/nouveau/nouveau_vga.h
@@ -4,6 +4,5 @@
void nouveau_vga_init(struct nouveau_drm *);
void nouveau_vga_fini(struct nouveau_drm *);
-void nouveau_vga_lastclose(struct drm_device *dev);
#endif
diff --git a/drivers/gpu/drm/nouveau/nv04_fence.c b/drivers/gpu/drm/nouveau/nv04_fence.c
index cdbc75e3d1f6..fa5c6029f783 100644
--- a/drivers/gpu/drm/nouveau/nv04_fence.c
+++ b/drivers/gpu/drm/nouveau/nv04_fence.c
@@ -39,7 +39,7 @@ struct nv04_fence_priv {
static int
nv04_fence_emit(struct nouveau_fence *fence)
{
- struct nvif_push *push = unrcu_pointer(fence->channel)->chan.push;
+ struct nvif_push *push = &unrcu_pointer(fence->channel)->chan.push;
int ret = PUSH_WAIT(push, 2);
if (ret == 0) {
PUSH_NVSQ(push, NV_SW, 0x0150, fence->base.seqno);
diff --git a/drivers/gpu/drm/nouveau/nv10_fence.c b/drivers/gpu/drm/nouveau/nv10_fence.c
index c6a0db5b9e21..8c73f40e3bda 100644
--- a/drivers/gpu/drm/nouveau/nv10_fence.c
+++ b/drivers/gpu/drm/nouveau/nv10_fence.c
@@ -32,7 +32,7 @@
int
nv10_fence_emit(struct nouveau_fence *fence)
{
- struct nvif_push *push = fence->channel->chan.push;
+ struct nvif_push *push = &fence->channel->chan.push;
int ret = PUSH_WAIT(push, 2);
if (ret == 0) {
PUSH_MTHD(push, NV06E, SET_REFERENCE, fence->base.seqno);
@@ -88,7 +88,7 @@ nv10_fence_destroy(struct nouveau_drm *drm)
nouveau_bo_unmap(priv->bo);
if (priv->bo)
nouveau_bo_unpin(priv->bo);
- nouveau_bo_ref(NULL, &priv->bo);
+ nouveau_bo_fini(priv->bo);
drm->fence = NULL;
kfree(priv);
}
diff --git a/drivers/gpu/drm/nouveau/nv17_fence.c b/drivers/gpu/drm/nouveau/nv17_fence.c
index 07c2e0878c24..d09bfd11369f 100644
--- a/drivers/gpu/drm/nouveau/nv17_fence.c
+++ b/drivers/gpu/drm/nouveau/nv17_fence.c
@@ -36,11 +36,11 @@ int
nv17_fence_sync(struct nouveau_fence *fence,
struct nouveau_channel *prev, struct nouveau_channel *chan)
{
- struct nouveau_cli *cli = (void *)prev->user.client;
- struct nv10_fence_priv *priv = chan->drm->fence;
+ struct nouveau_cli *cli = prev->cli;
+ struct nv10_fence_priv *priv = cli->drm->fence;
struct nv10_fence_chan *fctx = chan->fence;
- struct nvif_push *ppush = prev->chan.push;
- struct nvif_push *npush = chan->chan.push;
+ struct nvif_push *ppush = &prev->chan.push;
+ struct nvif_push *npush = &chan->chan.push;
u32 value;
int ret;
@@ -76,7 +76,7 @@ nv17_fence_sync(struct nouveau_fence *fence,
static int
nv17_fence_context_new(struct nouveau_channel *chan)
{
- struct nv10_fence_priv *priv = chan->drm->fence;
+ struct nv10_fence_priv *priv = chan->cli->drm->fence;
struct ttm_resource *reg = priv->bo->bo.resource;
struct nv10_fence_chan *fctx;
u32 start = reg->start * PAGE_SIZE;
@@ -141,7 +141,7 @@ nv17_fence_create(struct nouveau_drm *drm)
nouveau_bo_unpin(priv->bo);
}
if (ret)
- nouveau_bo_ref(NULL, &priv->bo);
+ nouveau_bo_fini(priv->bo);
}
if (ret) {
diff --git a/drivers/gpu/drm/nouveau/nv50_fence.c b/drivers/gpu/drm/nouveau/nv50_fence.c
index ea1e1f480bfe..62e28dddf87c 100644
--- a/drivers/gpu/drm/nouveau/nv50_fence.c
+++ b/drivers/gpu/drm/nouveau/nv50_fence.c
@@ -35,7 +35,7 @@
static int
nv50_fence_context_new(struct nouveau_channel *chan)
{
- struct nv10_fence_priv *priv = chan->drm->fence;
+ struct nv10_fence_priv *priv = chan->cli->drm->fence;
struct nv10_fence_chan *fctx;
struct ttm_resource *reg = priv->bo->bo.resource;
u32 start = reg->start * PAGE_SIZE;
@@ -92,7 +92,7 @@ nv50_fence_create(struct nouveau_drm *drm)
nouveau_bo_unpin(priv->bo);
}
if (ret)
- nouveau_bo_ref(NULL, &priv->bo);
+ nouveau_bo_fini(priv->bo);
}
if (ret) {
diff --git a/drivers/gpu/drm/nouveau/nv84_fence.c b/drivers/gpu/drm/nouveau/nv84_fence.c
index 812b8c62eeba..aa7dd0c5d917 100644
--- a/drivers/gpu/drm/nouveau/nv84_fence.c
+++ b/drivers/gpu/drm/nouveau/nv84_fence.c
@@ -35,7 +35,7 @@
static int
nv84_fence_emit32(struct nouveau_channel *chan, u64 virtual, u32 sequence)
{
- struct nvif_push *push = chan->chan.push;
+ struct nvif_push *push = &chan->chan.push;
int ret = PUSH_WAIT(push, 8);
if (ret == 0) {
PUSH_MTHD(push, NV826F, SET_CONTEXT_DMA_SEMAPHORE, chan->vram.handle);
@@ -58,7 +58,7 @@ nv84_fence_emit32(struct nouveau_channel *chan, u64 virtual, u32 sequence)
static int
nv84_fence_sync32(struct nouveau_channel *chan, u64 virtual, u32 sequence)
{
- struct nvif_push *push = chan->chan.push;
+ struct nvif_push *push = &chan->chan.push;
int ret = PUSH_WAIT(push, 7);
if (ret == 0) {
PUSH_MTHD(push, NV826F, SET_CONTEXT_DMA_SEMAPHORE, chan->vram.handle);
@@ -79,7 +79,7 @@ nv84_fence_sync32(struct nouveau_channel *chan, u64 virtual, u32 sequence)
static inline u32
nv84_fence_chid(struct nouveau_channel *chan)
{
- return chan->drm->runl[chan->runlist].chan_id_base + chan->chid;
+ return chan->cli->drm->runl[chan->runlist].chan_id_base + chan->chid;
}
static int
@@ -105,14 +105,14 @@ nv84_fence_sync(struct nouveau_fence *fence,
static u32
nv84_fence_read(struct nouveau_channel *chan)
{
- struct nv84_fence_priv *priv = chan->drm->fence;
+ struct nv84_fence_priv *priv = chan->cli->drm->fence;
return nouveau_bo_rd32(priv->bo, nv84_fence_chid(chan) * 16/4);
}
static void
nv84_fence_context_del(struct nouveau_channel *chan)
{
- struct nv84_fence_priv *priv = chan->drm->fence;
+ struct nv84_fence_priv *priv = chan->cli->drm->fence;
struct nv84_fence_chan *fctx = chan->fence;
nouveau_bo_wr32(priv->bo, nv84_fence_chid(chan) * 16 / 4, fctx->base.sequence);
@@ -127,7 +127,7 @@ nv84_fence_context_del(struct nouveau_channel *chan)
int
nv84_fence_context_new(struct nouveau_channel *chan)
{
- struct nv84_fence_priv *priv = chan->drm->fence;
+ struct nv84_fence_priv *priv = chan->cli->drm->fence;
struct nv84_fence_chan *fctx;
int ret;
@@ -188,7 +188,7 @@ nv84_fence_destroy(struct nouveau_drm *drm)
nouveau_bo_unmap(priv->bo);
if (priv->bo)
nouveau_bo_unpin(priv->bo);
- nouveau_bo_ref(NULL, &priv->bo);
+ nouveau_bo_fini(priv->bo);
drm->fence = NULL;
kfree(priv);
}
@@ -232,7 +232,7 @@ nv84_fence_create(struct nouveau_drm *drm)
nouveau_bo_unpin(priv->bo);
}
if (ret)
- nouveau_bo_ref(NULL, &priv->bo);
+ nouveau_bo_fini(priv->bo);
}
if (ret)
diff --git a/drivers/gpu/drm/nouveau/nvc0_fence.c b/drivers/gpu/drm/nouveau/nvc0_fence.c
index e1461c0b0779..a5e98d0d4217 100644
--- a/drivers/gpu/drm/nouveau/nvc0_fence.c
+++ b/drivers/gpu/drm/nouveau/nvc0_fence.c
@@ -34,7 +34,7 @@
static int
nvc0_fence_emit32(struct nouveau_channel *chan, u64 virtual, u32 sequence)
{
- struct nvif_push *push = chan->chan.push;
+ struct nvif_push *push = &chan->chan.push;
int ret = PUSH_WAIT(push, 6);
if (ret == 0) {
PUSH_MTHD(push, NV906F, SEMAPHOREA,
@@ -57,7 +57,7 @@ nvc0_fence_emit32(struct nouveau_channel *chan, u64 virtual, u32 sequence)
static int
nvc0_fence_sync32(struct nouveau_channel *chan, u64 virtual, u32 sequence)
{
- struct nvif_push *push = chan->chan.push;
+ struct nvif_push *push = &chan->chan.push;
int ret = PUSH_WAIT(push, 5);
if (ret == 0) {
PUSH_MTHD(push, NV906F, SEMAPHOREA,
diff --git a/drivers/gpu/drm/nouveau/nvif/client.c b/drivers/gpu/drm/nouveau/nvif/client.c
index 3a27245f467f..fdf5054ed7d8 100644
--- a/drivers/gpu/drm/nouveau/nvif/client.c
+++ b/drivers/gpu/drm/nouveau/nvif/client.c
@@ -30,12 +30,6 @@
#include <nvif/if0000.h>
int
-nvif_client_ioctl(struct nvif_client *client, void *data, u32 size)
-{
- return client->driver->ioctl(client->object.priv, data, size, NULL);
-}
-
-int
nvif_client_suspend(struct nvif_client *client)
{
return client->driver->suspend(client->object.priv);
@@ -51,22 +45,13 @@ void
nvif_client_dtor(struct nvif_client *client)
{
nvif_object_dtor(&client->object);
- if (client->driver) {
- if (client->driver->fini)
- client->driver->fini(client->object.priv);
- client->driver = NULL;
- }
+ client->driver = NULL;
}
int
-nvif_client_ctor(struct nvif_client *parent, const char *name, u64 device,
- struct nvif_client *client)
+nvif_client_ctor(struct nvif_client *parent, const char *name, struct nvif_client *client)
{
- struct nvif_client_v0 args = { .device = device };
- struct {
- struct nvif_ioctl_v0 ioctl;
- struct nvif_ioctl_nop_v0 nop;
- } nop = {};
+ struct nvif_client_v0 args = {};
int ret;
strscpy_pad(args.name, name, sizeof(args.name));
@@ -79,15 +64,6 @@ nvif_client_ctor(struct nvif_client *parent, const char *name, u64 device,
client->object.client = client;
client->object.handle = ~0;
- client->route = NVIF_IOCTL_V0_ROUTE_NVIF;
client->driver = parent->driver;
-
- if (ret == 0) {
- ret = nvif_client_ioctl(client, &nop, sizeof(nop));
- client->version = nop.nop.version;
- }
-
- if (ret)
- nvif_client_dtor(client);
- return ret;
+ return 0;
}
diff --git a/drivers/gpu/drm/nouveau/nvif/device.c b/drivers/gpu/drm/nouveau/nvif/device.c
index 8c3d883f3313..24880931039f 100644
--- a/drivers/gpu/drm/nouveau/nvif/device.c
+++ b/drivers/gpu/drm/nouveau/nvif/device.c
@@ -21,8 +21,8 @@
*
* Authors: Ben Skeggs <bskeggs@redhat.com>
*/
-
#include <nvif/device.h>
+#include <nvif/client.h>
u64
nvif_device_time(struct nvif_device *device)
@@ -38,6 +38,12 @@ nvif_device_time(struct nvif_device *device)
return device->user.func->time(&device->user);
}
+int
+nvif_device_map(struct nvif_device *device)
+{
+ return nvif_object_map(&device->object, NULL, 0);
+}
+
void
nvif_device_dtor(struct nvif_device *device)
{
@@ -48,11 +54,10 @@ nvif_device_dtor(struct nvif_device *device)
}
int
-nvif_device_ctor(struct nvif_object *parent, const char *name, u32 handle,
- s32 oclass, void *data, u32 size, struct nvif_device *device)
+nvif_device_ctor(struct nvif_client *client, const char *name, struct nvif_device *device)
{
- int ret = nvif_object_ctor(parent, name ? name : "nvifDevice", handle,
- oclass, data, size, &device->object);
+ int ret = nvif_object_ctor(&client->object, name ? name : "nvifDevice", 0,
+ 0x0080, NULL, 0, &device->object);
device->runlist = NULL;
device->user.func = NULL;
if (ret == 0) {
diff --git a/drivers/gpu/drm/nouveau/nvif/driver.c b/drivers/gpu/drm/nouveau/nvif/driver.c
index 5e00dd07afed..78706e97a6a2 100644
--- a/drivers/gpu/drm/nouveau/nvif/driver.c
+++ b/drivers/gpu/drm/nouveau/nvif/driver.c
@@ -24,35 +24,17 @@
#include <nvif/driver.h>
#include <nvif/client.h>
-static const struct nvif_driver *
-nvif_driver[] = {
-#ifdef __KERNEL__
- &nvif_driver_nvkm,
-#else
- &nvif_driver_drm,
- &nvif_driver_lib,
- &nvif_driver_null,
-#endif
- NULL
-};
-
int
nvif_driver_init(const char *drv, const char *cfg, const char *dbg,
const char *name, u64 device, struct nvif_client *client)
{
- int ret = -EINVAL, i;
+ int ret;
+
+ client->driver = &nvif_driver_nvkm;
- for (i = 0; (client->driver = nvif_driver[i]); i++) {
- if (!drv || !strcmp(client->driver->name, drv)) {
- ret = client->driver->init(name, device, cfg, dbg,
- &client->object.priv);
- if (ret == 0)
- break;
- client->driver->fini(client->object.priv);
- }
- }
+ ret = client->driver->init(name, device, cfg, dbg, &client->object.priv);
+ if (ret)
+ return ret;
- if (ret == 0)
- ret = nvif_client_ctor(client, name, device, client);
- return ret;
+ return nvif_client_ctor(client, name, client);
}
diff --git a/drivers/gpu/drm/nouveau/nvif/object.c b/drivers/gpu/drm/nouveau/nvif/object.c
index 1d19c87eaec1..0b87278ac0f8 100644
--- a/drivers/gpu/drm/nouveau/nvif/object.c
+++ b/drivers/gpu/drm/nouveau/nvif/object.c
@@ -40,7 +40,6 @@ nvif_object_ioctl(struct nvif_object *object, void *data, u32 size, void **hack)
args->v0.object = nvif_handle(object);
else
args->v0.object = 0;
- args->v0.owner = NVIF_IOCTL_V0_OWNER_ANY;
} else
return -ENOSYS;
@@ -98,43 +97,6 @@ nvif_object_sclass_get(struct nvif_object *object, struct nvif_sclass **psclass)
return ret;
}
-u32
-nvif_object_rd(struct nvif_object *object, int size, u64 addr)
-{
- struct {
- struct nvif_ioctl_v0 ioctl;
- struct nvif_ioctl_rd_v0 rd;
- } args = {
- .ioctl.type = NVIF_IOCTL_V0_RD,
- .rd.size = size,
- .rd.addr = addr,
- };
- int ret = nvif_object_ioctl(object, &args, sizeof(args), NULL);
- if (ret) {
- /*XXX: warn? */
- return 0;
- }
- return args.rd.data;
-}
-
-void
-nvif_object_wr(struct nvif_object *object, int size, u64 addr, u32 data)
-{
- struct {
- struct nvif_ioctl_v0 ioctl;
- struct nvif_ioctl_wr_v0 wr;
- } args = {
- .ioctl.type = NVIF_IOCTL_V0_WR,
- .wr.size = size,
- .wr.addr = addr,
- .wr.data = data,
- };
- int ret = nvif_object_ioctl(object, &args, sizeof(args), NULL);
- if (ret) {
- /*XXX: warn? */
- }
-}
-
int
nvif_object_mthd(struct nvif_object *object, u32 mthd, void *data, u32 size)
{
@@ -299,8 +261,6 @@ nvif_object_ctor(struct nvif_object *parent, const char *name, u32 handle,
args->ioctl.version = 0;
args->ioctl.type = NVIF_IOCTL_V0_NEW;
args->new.version = 0;
- args->new.route = parent->client->route;
- args->new.token = nvif_handle(object);
args->new.object = nvif_handle(object);
args->new.handle = handle;
args->new.oclass = oclass;
diff --git a/drivers/gpu/drm/nouveau/nvkm/core/client.c b/drivers/gpu/drm/nouveau/nvkm/core/client.c
index c55662937ab2..72c88db627a5 100644
--- a/drivers/gpu/drm/nouveau/nvkm/core/client.c
+++ b/drivers/gpu/drm/nouveau/nvkm/core/client.c
@@ -42,7 +42,7 @@ nvkm_uclient_new(const struct nvkm_oclass *oclass, void *argv, u32 argc,
if (!(ret = nvif_unpack(ret, &argv, &argc, args->v0, 0, 0, false))){
args->v0.name[sizeof(args->v0.name) - 1] = 0;
- ret = nvkm_client_new(args->v0.name, args->v0.device, NULL,
+ ret = nvkm_client_new(args->v0.name, oclass->client->device, NULL,
NULL, oclass->client->event, &client);
if (ret)
return ret;
@@ -51,8 +51,6 @@ nvkm_uclient_new(const struct nvkm_oclass *oclass, void *argv, u32 argc,
client->object.client = oclass->client;
client->object.handle = oclass->handle;
- client->object.route = oclass->route;
- client->object.token = oclass->token;
client->object.object = oclass->object;
client->debug = oclass->client->debug;
*pobject = &client->object;
@@ -67,58 +65,6 @@ nvkm_uclient_sclass = {
.ctor = nvkm_uclient_new,
};
-static const struct nvkm_object_func nvkm_client;
-struct nvkm_client *
-nvkm_client_search(struct nvkm_client *client, u64 handle)
-{
- struct nvkm_object *object;
-
- object = nvkm_object_search(client, handle, &nvkm_client);
- if (IS_ERR(object))
- return (void *)object;
-
- return nvkm_client(object);
-}
-
-static int
-nvkm_client_mthd_devlist(struct nvkm_client *client, void *data, u32 size)
-{
- union {
- struct nvif_client_devlist_v0 v0;
- } *args = data;
- int ret = -ENOSYS;
-
- nvif_ioctl(&client->object, "client devlist size %d\n", size);
- if (!(ret = nvif_unpack(ret, &data, &size, args->v0, 0, 0, true))) {
- nvif_ioctl(&client->object, "client devlist vers %d count %d\n",
- args->v0.version, args->v0.count);
- if (size == sizeof(args->v0.device[0]) * args->v0.count) {
- ret = nvkm_device_list(args->v0.device, args->v0.count);
- if (ret >= 0) {
- args->v0.count = ret;
- ret = 0;
- }
- } else {
- ret = -EINVAL;
- }
- }
-
- return ret;
-}
-
-static int
-nvkm_client_mthd(struct nvkm_object *object, u32 mthd, void *data, u32 size)
-{
- struct nvkm_client *client = nvkm_client(object);
- switch (mthd) {
- case NVIF_CLIENT_V0_DEVLIST:
- return nvkm_client_mthd_devlist(client, data, size);
- default:
- break;
- }
- return -EINVAL;
-}
-
static int
nvkm_client_child_new(const struct nvkm_oclass *oclass,
void *data, u32 size, struct nvkm_object **pobject)
@@ -144,12 +90,6 @@ nvkm_client_child_get(struct nvkm_object *object, int index,
return 0;
}
-static int
-nvkm_client_fini(struct nvkm_object *object, bool suspend)
-{
- return 0;
-}
-
static void *
nvkm_client_dtor(struct nvkm_object *object)
{
@@ -159,8 +99,6 @@ nvkm_client_dtor(struct nvkm_object *object)
static const struct nvkm_object_func
nvkm_client = {
.dtor = nvkm_client_dtor,
- .fini = nvkm_client_fini,
- .mthd = nvkm_client_mthd,
.sclass = nvkm_client_child_get,
};
diff --git a/drivers/gpu/drm/nouveau/nvkm/core/ioctl.c b/drivers/gpu/drm/nouveau/nvkm/core/ioctl.c
index 0b33287e43a7..45051a1249da 100644
--- a/drivers/gpu/drm/nouveau/nvkm/core/ioctl.c
+++ b/drivers/gpu/drm/nouveau/nvkm/core/ioctl.c
@@ -33,18 +33,7 @@ static int
nvkm_ioctl_nop(struct nvkm_client *client,
struct nvkm_object *object, void *data, u32 size)
{
- union {
- struct nvif_ioctl_nop_v0 v0;
- } *args = data;
- int ret = -ENOSYS;
-
- nvif_ioctl(object, "nop size %d\n", size);
- if (!(ret = nvif_unpack(ret, &data, &size, args->v0, 0, 0, false))) {
- nvif_ioctl(object, "nop vers %lld\n", args->v0.version);
- args->v0.version = NVIF_VERSION_LATEST;
- }
-
- return ret;
+ return -ENOSYS;
}
#include <nvif/class.h>
@@ -112,10 +101,9 @@ nvkm_ioctl_new(struct nvkm_client *client,
nvif_ioctl(parent, "new size %d\n", size);
if (!(ret = nvif_unpack(ret, &data, &size, args->v0, 0, 0, true))) {
- nvif_ioctl(parent, "new vers %d handle %08x class %08x "
- "route %02x token %llx object %016llx\n",
+ nvif_ioctl(parent, "new vers %d handle %08x class %08x object %016llx\n",
args->v0.version, args->v0.handle, args->v0.oclass,
- args->v0.route, args->v0.token, args->v0.object);
+ args->v0.object);
} else
return ret;
@@ -127,8 +115,6 @@ nvkm_ioctl_new(struct nvkm_client *client,
do {
memset(&oclass, 0x00, sizeof(oclass));
oclass.handle = args->v0.handle;
- oclass.route = args->v0.route;
- oclass.token = args->v0.token;
oclass.object = args->v0.object;
oclass.client = client;
oclass.parent = parent;
@@ -205,69 +191,14 @@ static int
nvkm_ioctl_rd(struct nvkm_client *client,
struct nvkm_object *object, void *data, u32 size)
{
- union {
- struct nvif_ioctl_rd_v0 v0;
- } *args = data;
- union {
- u8 b08;
- u16 b16;
- u32 b32;
- } v;
- int ret = -ENOSYS;
-
- nvif_ioctl(object, "rd size %d\n", size);
- if (!(ret = nvif_unpack(ret, &data, &size, args->v0, 0, 0, false))) {
- nvif_ioctl(object, "rd vers %d size %d addr %016llx\n",
- args->v0.version, args->v0.size, args->v0.addr);
- switch (args->v0.size) {
- case 1:
- ret = nvkm_object_rd08(object, args->v0.addr, &v.b08);
- args->v0.data = v.b08;
- break;
- case 2:
- ret = nvkm_object_rd16(object, args->v0.addr, &v.b16);
- args->v0.data = v.b16;
- break;
- case 4:
- ret = nvkm_object_rd32(object, args->v0.addr, &v.b32);
- args->v0.data = v.b32;
- break;
- default:
- ret = -EINVAL;
- break;
- }
- }
-
- return ret;
+ return -ENOSYS;
}
static int
nvkm_ioctl_wr(struct nvkm_client *client,
struct nvkm_object *object, void *data, u32 size)
{
- union {
- struct nvif_ioctl_wr_v0 v0;
- } *args = data;
- int ret = -ENOSYS;
-
- nvif_ioctl(object, "wr size %d\n", size);
- if (!(ret = nvif_unpack(ret, &data, &size, args->v0, 0, 0, false))) {
- nvif_ioctl(object,
- "wr vers %d size %d addr %016llx data %08x\n",
- args->v0.version, args->v0.size, args->v0.addr,
- args->v0.data);
- } else
- return ret;
-
- switch (args->v0.size) {
- case 1: return nvkm_object_wr08(object, args->v0.addr, args->v0.data);
- case 2: return nvkm_object_wr16(object, args->v0.addr, args->v0.data);
- case 4: return nvkm_object_wr32(object, args->v0.addr, args->v0.data);
- default:
- break;
- }
-
- return -EINVAL;
+ return -ENOSYS;
}
static int
@@ -331,7 +262,7 @@ nvkm_ioctl_v0[] = {
static int
nvkm_ioctl_path(struct nvkm_client *client, u64 handle, u32 type,
- void *data, u32 size, u8 owner, u8 *route, u64 *token)
+ void *data, u32 size)
{
struct nvkm_object *object;
int ret;
@@ -342,13 +273,6 @@ nvkm_ioctl_path(struct nvkm_client *client, u64 handle, u32 type,
return PTR_ERR(object);
}
- if (owner != NVIF_IOCTL_V0_OWNER_ANY && owner != object->route) {
- nvif_ioctl(&client->object, "route != owner\n");
- return -EACCES;
- }
- *route = object->route;
- *token = object->token;
-
if (ret = -EINVAL, type < ARRAY_SIZE(nvkm_ioctl_v0)) {
if (nvkm_ioctl_v0[type].version == 0)
ret = nvkm_ioctl_v0[type].func(client, object, data, size);
@@ -374,8 +298,7 @@ nvkm_ioctl(struct nvkm_client *client, void *data, u32 size, void **hack)
args->v0.version, args->v0.type, args->v0.object,
args->v0.owner);
ret = nvkm_ioctl_path(client, args->v0.object, args->v0.type,
- data, size, args->v0.owner,
- &args->v0.route, &args->v0.token);
+ data, size);
}
if (ret != 1) {
diff --git a/drivers/gpu/drm/nouveau/nvkm/core/object.c b/drivers/gpu/drm/nouveau/nvkm/core/object.c
index aea3ba72027a..390c265cf8af 100644
--- a/drivers/gpu/drm/nouveau/nvkm/core/object.c
+++ b/drivers/gpu/drm/nouveau/nvkm/core/object.c
@@ -133,54 +133,6 @@ nvkm_object_unmap(struct nvkm_object *object)
}
int
-nvkm_object_rd08(struct nvkm_object *object, u64 addr, u8 *data)
-{
- if (likely(object->func->rd08))
- return object->func->rd08(object, addr, data);
- return -ENODEV;
-}
-
-int
-nvkm_object_rd16(struct nvkm_object *object, u64 addr, u16 *data)
-{
- if (likely(object->func->rd16))
- return object->func->rd16(object, addr, data);
- return -ENODEV;
-}
-
-int
-nvkm_object_rd32(struct nvkm_object *object, u64 addr, u32 *data)
-{
- if (likely(object->func->rd32))
- return object->func->rd32(object, addr, data);
- return -ENODEV;
-}
-
-int
-nvkm_object_wr08(struct nvkm_object *object, u64 addr, u8 data)
-{
- if (likely(object->func->wr08))
- return object->func->wr08(object, addr, data);
- return -ENODEV;
-}
-
-int
-nvkm_object_wr16(struct nvkm_object *object, u64 addr, u16 data)
-{
- if (likely(object->func->wr16))
- return object->func->wr16(object, addr, data);
- return -ENODEV;
-}
-
-int
-nvkm_object_wr32(struct nvkm_object *object, u64 addr, u32 data)
-{
- if (likely(object->func->wr32))
- return object->func->wr32(object, addr, data);
- return -ENODEV;
-}
-
-int
nvkm_object_bind(struct nvkm_object *object, struct nvkm_gpuobj *gpuobj,
int align, struct nvkm_gpuobj **pgpuobj)
{
@@ -313,8 +265,6 @@ nvkm_object_ctor(const struct nvkm_object_func *func,
object->engine = nvkm_engine_ref(oclass->engine);
object->oclass = oclass->base.oclass;
object->handle = oclass->handle;
- object->route = oclass->route;
- object->token = oclass->token;
object->object = oclass->object;
INIT_LIST_HEAD(&object->head);
INIT_LIST_HEAD(&object->tree);
diff --git a/drivers/gpu/drm/nouveau/nvkm/core/oproxy.c b/drivers/gpu/drm/nouveau/nvkm/core/oproxy.c
index 3385528da650..5db80d1780f0 100644
--- a/drivers/gpu/drm/nouveau/nvkm/core/oproxy.c
+++ b/drivers/gpu/drm/nouveau/nvkm/core/oproxy.c
@@ -56,42 +56,6 @@ nvkm_oproxy_unmap(struct nvkm_object *object)
}
static int
-nvkm_oproxy_rd08(struct nvkm_object *object, u64 addr, u8 *data)
-{
- return nvkm_object_rd08(nvkm_oproxy(object)->object, addr, data);
-}
-
-static int
-nvkm_oproxy_rd16(struct nvkm_object *object, u64 addr, u16 *data)
-{
- return nvkm_object_rd16(nvkm_oproxy(object)->object, addr, data);
-}
-
-static int
-nvkm_oproxy_rd32(struct nvkm_object *object, u64 addr, u32 *data)
-{
- return nvkm_object_rd32(nvkm_oproxy(object)->object, addr, data);
-}
-
-static int
-nvkm_oproxy_wr08(struct nvkm_object *object, u64 addr, u8 data)
-{
- return nvkm_object_wr08(nvkm_oproxy(object)->object, addr, data);
-}
-
-static int
-nvkm_oproxy_wr16(struct nvkm_object *object, u64 addr, u16 data)
-{
- return nvkm_object_wr16(nvkm_oproxy(object)->object, addr, data);
-}
-
-static int
-nvkm_oproxy_wr32(struct nvkm_object *object, u64 addr, u32 data)
-{
- return nvkm_object_wr32(nvkm_oproxy(object)->object, addr, data);
-}
-
-static int
nvkm_oproxy_bind(struct nvkm_object *object, struct nvkm_gpuobj *parent,
int align, struct nvkm_gpuobj **pgpuobj)
{
@@ -197,12 +161,6 @@ nvkm_oproxy_func = {
.ntfy = nvkm_oproxy_ntfy,
.map = nvkm_oproxy_map,
.unmap = nvkm_oproxy_unmap,
- .rd08 = nvkm_oproxy_rd08,
- .rd16 = nvkm_oproxy_rd16,
- .rd32 = nvkm_oproxy_rd32,
- .wr08 = nvkm_oproxy_wr08,
- .wr16 = nvkm_oproxy_wr16,
- .wr32 = nvkm_oproxy_wr32,
.bind = nvkm_oproxy_bind,
.sclass = nvkm_oproxy_sclass,
.uevent = nvkm_oproxy_uevent,
diff --git a/drivers/gpu/drm/nouveau/nvkm/core/uevent.c b/drivers/gpu/drm/nouveau/nvkm/core/uevent.c
index ba9d9edaec75..cc254c390a57 100644
--- a/drivers/gpu/drm/nouveau/nvkm/core/uevent.c
+++ b/drivers/gpu/drm/nouveau/nvkm/core/uevent.c
@@ -116,9 +116,9 @@ nvkm_uevent_ntfy(struct nvkm_event_ntfy *ntfy, u32 bits)
struct nvkm_client *client = uevent->object.client;
if (uevent->func)
- return uevent->func(uevent->parent, uevent->object.token, bits);
+ return uevent->func(uevent->parent, uevent->object.object, bits);
- return client->event(uevent->object.token, NULL, 0);
+ return client->event(uevent->object.object, NULL, 0);
}
int
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/Kbuild b/drivers/gpu/drm/nouveau/nvkm/engine/Kbuild
index bfaaff645a34..2e48b0816670 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/Kbuild
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/Kbuild
@@ -19,7 +19,6 @@ include $(src)/nvkm/engine/nvenc/Kbuild
include $(src)/nvkm/engine/nvdec/Kbuild
include $(src)/nvkm/engine/nvjpg/Kbuild
include $(src)/nvkm/engine/ofa/Kbuild
-include $(src)/nvkm/engine/pm/Kbuild
include $(src)/nvkm/engine/sec/Kbuild
include $(src)/nvkm/engine/sec2/Kbuild
include $(src)/nvkm/engine/sw/Kbuild
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/device/base.c b/drivers/gpu/drm/nouveau/nvkm/engine/device/base.c
index 31ed3da32fe7..9093d89b16f3 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/device/base.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/device/base.c
@@ -53,26 +53,6 @@ nvkm_device_find(u64 handle)
return device;
}
-int
-nvkm_device_list(u64 *name, int size)
-{
- struct nvkm_device *device;
- int nr = 0;
- mutex_lock(&nv_devices_mutex);
- list_for_each_entry(device, &nv_devices, head) {
- if (nr++ < size)
- name[nr - 1] = device->handle;
- }
- mutex_unlock(&nv_devices_mutex);
- return nr;
-}
-
-static const struct nvkm_device_chip
-null_chipset = {
- .name = "NULL",
- .bios = { 0x00000001, nvkm_bios_new },
-};
-
static const struct nvkm_device_chip
nv4_chipset = {
.name = "NV04",
@@ -490,7 +470,6 @@ nv40_chipset = {
.fifo = { 0x00000001, nv40_fifo_new },
.gr = { 0x00000001, nv40_gr_new },
.mpeg = { 0x00000001, nv40_mpeg_new },
- .pm = { 0x00000001, nv40_pm_new },
.sw = { 0x00000001, nv10_sw_new },
};
@@ -516,7 +495,6 @@ nv41_chipset = {
.fifo = { 0x00000001, nv40_fifo_new },
.gr = { 0x00000001, nv40_gr_new },
.mpeg = { 0x00000001, nv40_mpeg_new },
- .pm = { 0x00000001, nv40_pm_new },
.sw = { 0x00000001, nv10_sw_new },
};
@@ -542,7 +520,6 @@ nv42_chipset = {
.fifo = { 0x00000001, nv40_fifo_new },
.gr = { 0x00000001, nv40_gr_new },
.mpeg = { 0x00000001, nv40_mpeg_new },
- .pm = { 0x00000001, nv40_pm_new },
.sw = { 0x00000001, nv10_sw_new },
};
@@ -568,7 +545,6 @@ nv43_chipset = {
.fifo = { 0x00000001, nv40_fifo_new },
.gr = { 0x00000001, nv40_gr_new },
.mpeg = { 0x00000001, nv40_mpeg_new },
- .pm = { 0x00000001, nv40_pm_new },
.sw = { 0x00000001, nv10_sw_new },
};
@@ -594,7 +570,6 @@ nv44_chipset = {
.fifo = { 0x00000001, nv40_fifo_new },
.gr = { 0x00000001, nv44_gr_new },
.mpeg = { 0x00000001, nv44_mpeg_new },
- .pm = { 0x00000001, nv40_pm_new },
.sw = { 0x00000001, nv10_sw_new },
};
@@ -620,7 +595,6 @@ nv45_chipset = {
.fifo = { 0x00000001, nv40_fifo_new },
.gr = { 0x00000001, nv40_gr_new },
.mpeg = { 0x00000001, nv44_mpeg_new },
- .pm = { 0x00000001, nv40_pm_new },
.sw = { 0x00000001, nv10_sw_new },
};
@@ -646,7 +620,6 @@ nv46_chipset = {
.fifo = { 0x00000001, nv40_fifo_new },
.gr = { 0x00000001, nv44_gr_new },
.mpeg = { 0x00000001, nv44_mpeg_new },
- .pm = { 0x00000001, nv40_pm_new },
.sw = { 0x00000001, nv10_sw_new },
};
@@ -672,7 +645,6 @@ nv47_chipset = {
.fifo = { 0x00000001, nv40_fifo_new },
.gr = { 0x00000001, nv40_gr_new },
.mpeg = { 0x00000001, nv44_mpeg_new },
- .pm = { 0x00000001, nv40_pm_new },
.sw = { 0x00000001, nv10_sw_new },
};
@@ -698,7 +670,6 @@ nv49_chipset = {
.fifo = { 0x00000001, nv40_fifo_new },
.gr = { 0x00000001, nv40_gr_new },
.mpeg = { 0x00000001, nv44_mpeg_new },
- .pm = { 0x00000001, nv40_pm_new },
.sw = { 0x00000001, nv10_sw_new },
};
@@ -724,7 +695,6 @@ nv4a_chipset = {
.fifo = { 0x00000001, nv40_fifo_new },
.gr = { 0x00000001, nv44_gr_new },
.mpeg = { 0x00000001, nv44_mpeg_new },
- .pm = { 0x00000001, nv40_pm_new },
.sw = { 0x00000001, nv10_sw_new },
};
@@ -750,7 +720,6 @@ nv4b_chipset = {
.fifo = { 0x00000001, nv40_fifo_new },
.gr = { 0x00000001, nv40_gr_new },
.mpeg = { 0x00000001, nv44_mpeg_new },
- .pm = { 0x00000001, nv40_pm_new },
.sw = { 0x00000001, nv10_sw_new },
};
@@ -776,7 +745,6 @@ nv4c_chipset = {
.fifo = { 0x00000001, nv40_fifo_new },
.gr = { 0x00000001, nv44_gr_new },
.mpeg = { 0x00000001, nv44_mpeg_new },
- .pm = { 0x00000001, nv40_pm_new },
.sw = { 0x00000001, nv10_sw_new },
};
@@ -802,7 +770,6 @@ nv4e_chipset = {
.fifo = { 0x00000001, nv40_fifo_new },
.gr = { 0x00000001, nv44_gr_new },
.mpeg = { 0x00000001, nv44_mpeg_new },
- .pm = { 0x00000001, nv40_pm_new },
.sw = { 0x00000001, nv10_sw_new },
};
@@ -831,7 +798,6 @@ nv50_chipset = {
.fifo = { 0x00000001, nv50_fifo_new },
.gr = { 0x00000001, nv50_gr_new },
.mpeg = { 0x00000001, nv50_mpeg_new },
- .pm = { 0x00000001, nv50_pm_new },
.sw = { 0x00000001, nv50_sw_new },
};
@@ -857,7 +823,6 @@ nv63_chipset = {
.fifo = { 0x00000001, nv40_fifo_new },
.gr = { 0x00000001, nv44_gr_new },
.mpeg = { 0x00000001, nv44_mpeg_new },
- .pm = { 0x00000001, nv40_pm_new },
.sw = { 0x00000001, nv10_sw_new },
};
@@ -883,7 +848,6 @@ nv67_chipset = {
.fifo = { 0x00000001, nv40_fifo_new },
.gr = { 0x00000001, nv44_gr_new },
.mpeg = { 0x00000001, nv44_mpeg_new },
- .pm = { 0x00000001, nv40_pm_new },
.sw = { 0x00000001, nv10_sw_new },
};
@@ -909,7 +873,6 @@ nv68_chipset = {
.fifo = { 0x00000001, nv40_fifo_new },
.gr = { 0x00000001, nv44_gr_new },
.mpeg = { 0x00000001, nv44_mpeg_new },
- .pm = { 0x00000001, nv40_pm_new },
.sw = { 0x00000001, nv10_sw_new },
};
@@ -940,7 +903,6 @@ nv84_chipset = {
.fifo = { 0x00000001, g84_fifo_new },
.gr = { 0x00000001, g84_gr_new },
.mpeg = { 0x00000001, g84_mpeg_new },
- .pm = { 0x00000001, g84_pm_new },
.sw = { 0x00000001, nv50_sw_new },
.vp = { 0x00000001, g84_vp_new },
};
@@ -972,7 +934,6 @@ nv86_chipset = {
.fifo = { 0x00000001, g84_fifo_new },
.gr = { 0x00000001, g84_gr_new },
.mpeg = { 0x00000001, g84_mpeg_new },
- .pm = { 0x00000001, g84_pm_new },
.sw = { 0x00000001, nv50_sw_new },
.vp = { 0x00000001, g84_vp_new },
};
@@ -1004,7 +965,6 @@ nv92_chipset = {
.fifo = { 0x00000001, g84_fifo_new },
.gr = { 0x00000001, g84_gr_new },
.mpeg = { 0x00000001, g84_mpeg_new },
- .pm = { 0x00000001, g84_pm_new },
.sw = { 0x00000001, nv50_sw_new },
.vp = { 0x00000001, g84_vp_new },
};
@@ -1036,7 +996,6 @@ nv94_chipset = {
.fifo = { 0x00000001, g84_fifo_new },
.gr = { 0x00000001, g84_gr_new },
.mpeg = { 0x00000001, g84_mpeg_new },
- .pm = { 0x00000001, g84_pm_new },
.sw = { 0x00000001, nv50_sw_new },
.vp = { 0x00000001, g84_vp_new },
};
@@ -1068,7 +1027,6 @@ nv96_chipset = {
.fifo = { 0x00000001, g84_fifo_new },
.gr = { 0x00000001, g84_gr_new },
.mpeg = { 0x00000001, g84_mpeg_new },
- .pm = { 0x00000001, g84_pm_new },
.sw = { 0x00000001, nv50_sw_new },
.vp = { 0x00000001, g84_vp_new },
};
@@ -1100,7 +1058,6 @@ nv98_chipset = {
.mspdec = { 0x00000001, g98_mspdec_new },
.msppp = { 0x00000001, g98_msppp_new },
.msvld = { 0x00000001, g98_msvld_new },
- .pm = { 0x00000001, g84_pm_new },
.sec = { 0x00000001, g98_sec_new },
.sw = { 0x00000001, nv50_sw_new },
};
@@ -1132,7 +1089,6 @@ nva0_chipset = {
.fifo = { 0x00000001, g84_fifo_new },
.gr = { 0x00000001, gt200_gr_new },
.mpeg = { 0x00000001, g84_mpeg_new },
- .pm = { 0x00000001, gt200_pm_new },
.sw = { 0x00000001, nv50_sw_new },
.vp = { 0x00000001, g84_vp_new },
};
@@ -1167,7 +1123,6 @@ nva3_chipset = {
.mspdec = { 0x00000001, gt215_mspdec_new },
.msppp = { 0x00000001, gt215_msppp_new },
.msvld = { 0x00000001, gt215_msvld_new },
- .pm = { 0x00000001, gt215_pm_new },
.sw = { 0x00000001, nv50_sw_new },
};
@@ -1200,7 +1155,6 @@ nva5_chipset = {
.mspdec = { 0x00000001, gt215_mspdec_new },
.msppp = { 0x00000001, gt215_msppp_new },
.msvld = { 0x00000001, gt215_msvld_new },
- .pm = { 0x00000001, gt215_pm_new },
.sw = { 0x00000001, nv50_sw_new },
};
@@ -1233,7 +1187,6 @@ nva8_chipset = {
.mspdec = { 0x00000001, gt215_mspdec_new },
.msppp = { 0x00000001, gt215_msppp_new },
.msvld = { 0x00000001, gt215_msvld_new },
- .pm = { 0x00000001, gt215_pm_new },
.sw = { 0x00000001, nv50_sw_new },
};
@@ -1264,7 +1217,6 @@ nvaa_chipset = {
.mspdec = { 0x00000001, g98_mspdec_new },
.msppp = { 0x00000001, g98_msppp_new },
.msvld = { 0x00000001, g98_msvld_new },
- .pm = { 0x00000001, g84_pm_new },
.sec = { 0x00000001, g98_sec_new },
.sw = { 0x00000001, nv50_sw_new },
};
@@ -1296,7 +1248,6 @@ nvac_chipset = {
.mspdec = { 0x00000001, g98_mspdec_new },
.msppp = { 0x00000001, g98_msppp_new },
.msvld = { 0x00000001, g98_msvld_new },
- .pm = { 0x00000001, g84_pm_new },
.sec = { 0x00000001, g98_sec_new },
.sw = { 0x00000001, nv50_sw_new },
};
@@ -1330,7 +1281,6 @@ nvaf_chipset = {
.mspdec = { 0x00000001, gt215_mspdec_new },
.msppp = { 0x00000001, gt215_msppp_new },
.msvld = { 0x00000001, mcp89_msvld_new },
- .pm = { 0x00000001, gt215_pm_new },
.sw = { 0x00000001, nv50_sw_new },
};
@@ -1366,7 +1316,6 @@ nvc0_chipset = {
.mspdec = { 0x00000001, gf100_mspdec_new },
.msppp = { 0x00000001, gf100_msppp_new },
.msvld = { 0x00000001, gf100_msvld_new },
- .pm = { 0x00000001, gf100_pm_new },
.sw = { 0x00000001, gf100_sw_new },
};
@@ -1402,7 +1351,6 @@ nvc1_chipset = {
.mspdec = { 0x00000001, gf100_mspdec_new },
.msppp = { 0x00000001, gf100_msppp_new },
.msvld = { 0x00000001, gf100_msvld_new },
- .pm = { 0x00000001, gf108_pm_new },
.sw = { 0x00000001, gf100_sw_new },
};
@@ -1438,7 +1386,6 @@ nvc3_chipset = {
.mspdec = { 0x00000001, gf100_mspdec_new },
.msppp = { 0x00000001, gf100_msppp_new },
.msvld = { 0x00000001, gf100_msvld_new },
- .pm = { 0x00000001, gf100_pm_new },
.sw = { 0x00000001, gf100_sw_new },
};
@@ -1474,7 +1421,6 @@ nvc4_chipset = {
.mspdec = { 0x00000001, gf100_mspdec_new },
.msppp = { 0x00000001, gf100_msppp_new },
.msvld = { 0x00000001, gf100_msvld_new },
- .pm = { 0x00000001, gf100_pm_new },
.sw = { 0x00000001, gf100_sw_new },
};
@@ -1510,7 +1456,6 @@ nvc8_chipset = {
.mspdec = { 0x00000001, gf100_mspdec_new },
.msppp = { 0x00000001, gf100_msppp_new },
.msvld = { 0x00000001, gf100_msvld_new },
- .pm = { 0x00000001, gf100_pm_new },
.sw = { 0x00000001, gf100_sw_new },
};
@@ -1546,7 +1491,6 @@ nvce_chipset = {
.mspdec = { 0x00000001, gf100_mspdec_new },
.msppp = { 0x00000001, gf100_msppp_new },
.msvld = { 0x00000001, gf100_msvld_new },
- .pm = { 0x00000001, gf100_pm_new },
.sw = { 0x00000001, gf100_sw_new },
};
@@ -1582,7 +1526,6 @@ nvcf_chipset = {
.mspdec = { 0x00000001, gf100_mspdec_new },
.msppp = { 0x00000001, gf100_msppp_new },
.msvld = { 0x00000001, gf100_msvld_new },
- .pm = { 0x00000001, gf100_pm_new },
.sw = { 0x00000001, gf100_sw_new },
};
@@ -1617,7 +1560,6 @@ nvd7_chipset = {
.mspdec = { 0x00000001, gf100_mspdec_new },
.msppp = { 0x00000001, gf100_msppp_new },
.msvld = { 0x00000001, gf100_msvld_new },
- .pm = { 0x00000001, gf117_pm_new },
.sw = { 0x00000001, gf100_sw_new },
};
@@ -1653,7 +1595,6 @@ nvd9_chipset = {
.mspdec = { 0x00000001, gf100_mspdec_new },
.msppp = { 0x00000001, gf100_msppp_new },
.msvld = { 0x00000001, gf100_msvld_new },
- .pm = { 0x00000001, gf117_pm_new },
.sw = { 0x00000001, gf100_sw_new },
};
@@ -1690,7 +1631,6 @@ nve4_chipset = {
.mspdec = { 0x00000001, gk104_mspdec_new },
.msppp = { 0x00000001, gf100_msppp_new },
.msvld = { 0x00000001, gk104_msvld_new },
- .pm = { 0x00000001, gk104_pm_new },
.sw = { 0x00000001, gf100_sw_new },
};
@@ -1727,7 +1667,6 @@ nve6_chipset = {
.mspdec = { 0x00000001, gk104_mspdec_new },
.msppp = { 0x00000001, gf100_msppp_new },
.msvld = { 0x00000001, gk104_msvld_new },
- .pm = { 0x00000001, gk104_pm_new },
.sw = { 0x00000001, gf100_sw_new },
};
@@ -1764,7 +1703,6 @@ nve7_chipset = {
.mspdec = { 0x00000001, gk104_mspdec_new },
.msppp = { 0x00000001, gf100_msppp_new },
.msvld = { 0x00000001, gk104_msvld_new },
- .pm = { 0x00000001, gk104_pm_new },
.sw = { 0x00000001, gf100_sw_new },
};
@@ -1789,7 +1727,6 @@ nvea_chipset = {
.dma = { 0x00000001, gf119_dma_new },
.fifo = { 0x00000001, gk20a_fifo_new },
.gr = { 0x00000001, gk20a_gr_new },
- .pm = { 0x00000001, gk104_pm_new },
.sw = { 0x00000001, gf100_sw_new },
};
@@ -3104,7 +3041,6 @@ nvkm_device_ctor(const struct nvkm_device_func *func,
const struct nvkm_device_quirk *quirk,
struct device *dev, enum nvkm_device_type type, u64 handle,
const char *name, const char *cfg, const char *dbg,
- bool detect, bool mmio, u64 subdev_mask,
struct nvkm_device *device)
{
struct nvkm_subdev *subdev;
@@ -3132,233 +3068,228 @@ nvkm_device_ctor(const struct nvkm_device_func *func,
mmio_base = device->func->resource_addr(device, 0);
mmio_size = device->func->resource_size(device, 0);
- if (detect || mmio) {
- device->pri = ioremap(mmio_base, mmio_size);
- if (device->pri == NULL) {
- nvdev_error(device, "unable to map PRI\n");
- ret = -ENOMEM;
- goto done;
- }
+ device->pri = ioremap(mmio_base, mmio_size);
+ if (device->pri == NULL) {
+ nvdev_error(device, "unable to map PRI\n");
+ ret = -ENOMEM;
+ goto done;
}
/* identify the chipset, and determine classes of subdev/engines */
- if (detect) {
- /* switch mmio to cpu's native endianness */
- if (!nvkm_device_endianness(device)) {
- nvdev_error(device,
- "Couldn't switch GPU to CPUs endianness\n");
- ret = -ENOSYS;
- goto done;
- }
- boot0 = nvkm_rd32(device, 0x000000);
-
- /* chipset can be overridden for devel/testing purposes */
- chipset = nvkm_longopt(device->cfgopt, "NvChipset", 0);
- if (chipset) {
- u32 override_boot0;
-
- if (chipset >= 0x10) {
- override_boot0 = ((chipset & 0x1ff) << 20);
- override_boot0 |= 0x000000a1;
- } else {
- if (chipset != 0x04)
- override_boot0 = 0x20104000;
- else
- override_boot0 = 0x20004000;
- }
+ /* switch mmio to cpu's native endianness */
+ if (!nvkm_device_endianness(device)) {
+ nvdev_error(device,
+ "Couldn't switch GPU to CPUs endianness\n");
+ ret = -ENOSYS;
+ goto done;
+ }
- nvdev_warn(device, "CHIPSET OVERRIDE: %08x -> %08x\n",
- boot0, override_boot0);
- boot0 = override_boot0;
- }
+ boot0 = nvkm_rd32(device, 0x000000);
- /* determine chipset and derive architecture from it */
- if ((boot0 & 0x1f000000) > 0) {
- device->chipset = (boot0 & 0x1ff00000) >> 20;
- device->chiprev = (boot0 & 0x000000ff);
- switch (device->chipset & 0x1f0) {
- case 0x010: {
- if (0x461 & (1 << (device->chipset & 0xf)))
- device->card_type = NV_10;
- else
- device->card_type = NV_11;
- device->chiprev = 0x00;
- break;
- }
- case 0x020: device->card_type = NV_20; break;
- case 0x030: device->card_type = NV_30; break;
- case 0x040:
- case 0x060: device->card_type = NV_40; break;
- case 0x050:
- case 0x080:
- case 0x090:
- case 0x0a0: device->card_type = NV_50; break;
- case 0x0c0:
- case 0x0d0: device->card_type = NV_C0; break;
- case 0x0e0:
- case 0x0f0:
- case 0x100: device->card_type = NV_E0; break;
- case 0x110:
- case 0x120: device->card_type = GM100; break;
- case 0x130: device->card_type = GP100; break;
- case 0x140: device->card_type = GV100; break;
- case 0x160: device->card_type = TU100; break;
- case 0x170: device->card_type = GA100; break;
- case 0x190: device->card_type = AD100; break;
- default:
- break;
- }
- } else
- if ((boot0 & 0xff00fff0) == 0x20004000) {
- if (boot0 & 0x00f00000)
- device->chipset = 0x05;
+ /* chipset can be overridden for devel/testing purposes */
+ chipset = nvkm_longopt(device->cfgopt, "NvChipset", 0);
+ if (chipset) {
+ u32 override_boot0;
+
+ if (chipset >= 0x10) {
+ override_boot0 = ((chipset & 0x1ff) << 20);
+ override_boot0 |= 0x000000a1;
+ } else {
+ if (chipset != 0x04)
+ override_boot0 = 0x20104000;
else
- device->chipset = 0x04;
- device->card_type = NV_04;
+ override_boot0 = 0x20004000;
}
- switch (device->chipset) {
- case 0x004: device->chip = &nv4_chipset; break;
- case 0x005: device->chip = &nv5_chipset; break;
- case 0x010: device->chip = &nv10_chipset; break;
- case 0x011: device->chip = &nv11_chipset; break;
- case 0x015: device->chip = &nv15_chipset; break;
- case 0x017: device->chip = &nv17_chipset; break;
- case 0x018: device->chip = &nv18_chipset; break;
- case 0x01a: device->chip = &nv1a_chipset; break;
- case 0x01f: device->chip = &nv1f_chipset; break;
- case 0x020: device->chip = &nv20_chipset; break;
- case 0x025: device->chip = &nv25_chipset; break;
- case 0x028: device->chip = &nv28_chipset; break;
- case 0x02a: device->chip = &nv2a_chipset; break;
- case 0x030: device->chip = &nv30_chipset; break;
- case 0x031: device->chip = &nv31_chipset; break;
- case 0x034: device->chip = &nv34_chipset; break;
- case 0x035: device->chip = &nv35_chipset; break;
- case 0x036: device->chip = &nv36_chipset; break;
- case 0x040: device->chip = &nv40_chipset; break;
- case 0x041: device->chip = &nv41_chipset; break;
- case 0x042: device->chip = &nv42_chipset; break;
- case 0x043: device->chip = &nv43_chipset; break;
- case 0x044: device->chip = &nv44_chipset; break;
- case 0x045: device->chip = &nv45_chipset; break;
- case 0x046: device->chip = &nv46_chipset; break;
- case 0x047: device->chip = &nv47_chipset; break;
- case 0x049: device->chip = &nv49_chipset; break;
- case 0x04a: device->chip = &nv4a_chipset; break;
- case 0x04b: device->chip = &nv4b_chipset; break;
- case 0x04c: device->chip = &nv4c_chipset; break;
- case 0x04e: device->chip = &nv4e_chipset; break;
- case 0x050: device->chip = &nv50_chipset; break;
- case 0x063: device->chip = &nv63_chipset; break;
- case 0x067: device->chip = &nv67_chipset; break;
- case 0x068: device->chip = &nv68_chipset; break;
- case 0x084: device->chip = &nv84_chipset; break;
- case 0x086: device->chip = &nv86_chipset; break;
- case 0x092: device->chip = &nv92_chipset; break;
- case 0x094: device->chip = &nv94_chipset; break;
- case 0x096: device->chip = &nv96_chipset; break;
- case 0x098: device->chip = &nv98_chipset; break;
- case 0x0a0: device->chip = &nva0_chipset; break;
- case 0x0a3: device->chip = &nva3_chipset; break;
- case 0x0a5: device->chip = &nva5_chipset; break;
- case 0x0a8: device->chip = &nva8_chipset; break;
- case 0x0aa: device->chip = &nvaa_chipset; break;
- case 0x0ac: device->chip = &nvac_chipset; break;
- case 0x0af: device->chip = &nvaf_chipset; break;
- case 0x0c0: device->chip = &nvc0_chipset; break;
- case 0x0c1: device->chip = &nvc1_chipset; break;
- case 0x0c3: device->chip = &nvc3_chipset; break;
- case 0x0c4: device->chip = &nvc4_chipset; break;
- case 0x0c8: device->chip = &nvc8_chipset; break;
- case 0x0ce: device->chip = &nvce_chipset; break;
- case 0x0cf: device->chip = &nvcf_chipset; break;
- case 0x0d7: device->chip = &nvd7_chipset; break;
- case 0x0d9: device->chip = &nvd9_chipset; break;
- case 0x0e4: device->chip = &nve4_chipset; break;
- case 0x0e6: device->chip = &nve6_chipset; break;
- case 0x0e7: device->chip = &nve7_chipset; break;
- case 0x0ea: device->chip = &nvea_chipset; break;
- case 0x0f0: device->chip = &nvf0_chipset; break;
- case 0x0f1: device->chip = &nvf1_chipset; break;
- case 0x106: device->chip = &nv106_chipset; break;
- case 0x108: device->chip = &nv108_chipset; break;
- case 0x117: device->chip = &nv117_chipset; break;
- case 0x118: device->chip = &nv118_chipset; break;
- case 0x120: device->chip = &nv120_chipset; break;
- case 0x124: device->chip = &nv124_chipset; break;
- case 0x126: device->chip = &nv126_chipset; break;
- case 0x12b: device->chip = &nv12b_chipset; break;
- case 0x130: device->chip = &nv130_chipset; break;
- case 0x132: device->chip = &nv132_chipset; break;
- case 0x134: device->chip = &nv134_chipset; break;
- case 0x136: device->chip = &nv136_chipset; break;
- case 0x137: device->chip = &nv137_chipset; break;
- case 0x138: device->chip = &nv138_chipset; break;
- case 0x13b: device->chip = &nv13b_chipset; break;
- case 0x140: device->chip = &nv140_chipset; break;
- case 0x162: device->chip = &nv162_chipset; break;
- case 0x164: device->chip = &nv164_chipset; break;
- case 0x166: device->chip = &nv166_chipset; break;
- case 0x167: device->chip = &nv167_chipset; break;
- case 0x168: device->chip = &nv168_chipset; break;
- case 0x172: device->chip = &nv172_chipset; break;
- case 0x173: device->chip = &nv173_chipset; break;
- case 0x174: device->chip = &nv174_chipset; break;
- case 0x176: device->chip = &nv176_chipset; break;
- case 0x177: device->chip = &nv177_chipset; break;
- case 0x192: device->chip = &nv192_chipset; break;
- case 0x193: device->chip = &nv193_chipset; break;
- case 0x194: device->chip = &nv194_chipset; break;
- case 0x196: device->chip = &nv196_chipset; break;
- case 0x197: device->chip = &nv197_chipset; break;
- default:
- if (nvkm_boolopt(device->cfgopt, "NvEnableUnsupportedChipsets", false)) {
- switch (device->chipset) {
- case 0x170: device->chip = &nv170_chipset; break;
- default:
- break;
- }
- }
+ nvdev_warn(device, "CHIPSET OVERRIDE: %08x -> %08x\n",
+ boot0, override_boot0);
+ boot0 = override_boot0;
+ }
- if (!device->chip) {
- nvdev_error(device, "unknown chipset (%08x)\n", boot0);
- ret = -ENODEV;
- goto done;
- }
+ /* determine chipset and derive architecture from it */
+ if ((boot0 & 0x1f000000) > 0) {
+ device->chipset = (boot0 & 0x1ff00000) >> 20;
+ device->chiprev = (boot0 & 0x000000ff);
+ switch (device->chipset & 0x1f0) {
+ case 0x010: {
+ if (0x461 & (1 << (device->chipset & 0xf)))
+ device->card_type = NV_10;
+ else
+ device->card_type = NV_11;
+ device->chiprev = 0x00;
+ break;
+ }
+ case 0x020: device->card_type = NV_20; break;
+ case 0x030: device->card_type = NV_30; break;
+ case 0x040:
+ case 0x060: device->card_type = NV_40; break;
+ case 0x050:
+ case 0x080:
+ case 0x090:
+ case 0x0a0: device->card_type = NV_50; break;
+ case 0x0c0:
+ case 0x0d0: device->card_type = NV_C0; break;
+ case 0x0e0:
+ case 0x0f0:
+ case 0x100: device->card_type = NV_E0; break;
+ case 0x110:
+ case 0x120: device->card_type = GM100; break;
+ case 0x130: device->card_type = GP100; break;
+ case 0x140: device->card_type = GV100; break;
+ case 0x160: device->card_type = TU100; break;
+ case 0x170: device->card_type = GA100; break;
+ case 0x190: device->card_type = AD100; break;
+ default:
break;
}
+ } else
+ if ((boot0 & 0xff00fff0) == 0x20004000) {
+ if (boot0 & 0x00f00000)
+ device->chipset = 0x05;
+ else
+ device->chipset = 0x04;
+ device->card_type = NV_04;
+ }
- nvdev_info(device, "NVIDIA %s (%08x)\n",
- device->chip->name, boot0);
+ switch (device->chipset) {
+ case 0x004: device->chip = &nv4_chipset; break;
+ case 0x005: device->chip = &nv5_chipset; break;
+ case 0x010: device->chip = &nv10_chipset; break;
+ case 0x011: device->chip = &nv11_chipset; break;
+ case 0x015: device->chip = &nv15_chipset; break;
+ case 0x017: device->chip = &nv17_chipset; break;
+ case 0x018: device->chip = &nv18_chipset; break;
+ case 0x01a: device->chip = &nv1a_chipset; break;
+ case 0x01f: device->chip = &nv1f_chipset; break;
+ case 0x020: device->chip = &nv20_chipset; break;
+ case 0x025: device->chip = &nv25_chipset; break;
+ case 0x028: device->chip = &nv28_chipset; break;
+ case 0x02a: device->chip = &nv2a_chipset; break;
+ case 0x030: device->chip = &nv30_chipset; break;
+ case 0x031: device->chip = &nv31_chipset; break;
+ case 0x034: device->chip = &nv34_chipset; break;
+ case 0x035: device->chip = &nv35_chipset; break;
+ case 0x036: device->chip = &nv36_chipset; break;
+ case 0x040: device->chip = &nv40_chipset; break;
+ case 0x041: device->chip = &nv41_chipset; break;
+ case 0x042: device->chip = &nv42_chipset; break;
+ case 0x043: device->chip = &nv43_chipset; break;
+ case 0x044: device->chip = &nv44_chipset; break;
+ case 0x045: device->chip = &nv45_chipset; break;
+ case 0x046: device->chip = &nv46_chipset; break;
+ case 0x047: device->chip = &nv47_chipset; break;
+ case 0x049: device->chip = &nv49_chipset; break;
+ case 0x04a: device->chip = &nv4a_chipset; break;
+ case 0x04b: device->chip = &nv4b_chipset; break;
+ case 0x04c: device->chip = &nv4c_chipset; break;
+ case 0x04e: device->chip = &nv4e_chipset; break;
+ case 0x050: device->chip = &nv50_chipset; break;
+ case 0x063: device->chip = &nv63_chipset; break;
+ case 0x067: device->chip = &nv67_chipset; break;
+ case 0x068: device->chip = &nv68_chipset; break;
+ case 0x084: device->chip = &nv84_chipset; break;
+ case 0x086: device->chip = &nv86_chipset; break;
+ case 0x092: device->chip = &nv92_chipset; break;
+ case 0x094: device->chip = &nv94_chipset; break;
+ case 0x096: device->chip = &nv96_chipset; break;
+ case 0x098: device->chip = &nv98_chipset; break;
+ case 0x0a0: device->chip = &nva0_chipset; break;
+ case 0x0a3: device->chip = &nva3_chipset; break;
+ case 0x0a5: device->chip = &nva5_chipset; break;
+ case 0x0a8: device->chip = &nva8_chipset; break;
+ case 0x0aa: device->chip = &nvaa_chipset; break;
+ case 0x0ac: device->chip = &nvac_chipset; break;
+ case 0x0af: device->chip = &nvaf_chipset; break;
+ case 0x0c0: device->chip = &nvc0_chipset; break;
+ case 0x0c1: device->chip = &nvc1_chipset; break;
+ case 0x0c3: device->chip = &nvc3_chipset; break;
+ case 0x0c4: device->chip = &nvc4_chipset; break;
+ case 0x0c8: device->chip = &nvc8_chipset; break;
+ case 0x0ce: device->chip = &nvce_chipset; break;
+ case 0x0cf: device->chip = &nvcf_chipset; break;
+ case 0x0d7: device->chip = &nvd7_chipset; break;
+ case 0x0d9: device->chip = &nvd9_chipset; break;
+ case 0x0e4: device->chip = &nve4_chipset; break;
+ case 0x0e6: device->chip = &nve6_chipset; break;
+ case 0x0e7: device->chip = &nve7_chipset; break;
+ case 0x0ea: device->chip = &nvea_chipset; break;
+ case 0x0f0: device->chip = &nvf0_chipset; break;
+ case 0x0f1: device->chip = &nvf1_chipset; break;
+ case 0x106: device->chip = &nv106_chipset; break;
+ case 0x108: device->chip = &nv108_chipset; break;
+ case 0x117: device->chip = &nv117_chipset; break;
+ case 0x118: device->chip = &nv118_chipset; break;
+ case 0x120: device->chip = &nv120_chipset; break;
+ case 0x124: device->chip = &nv124_chipset; break;
+ case 0x126: device->chip = &nv126_chipset; break;
+ case 0x12b: device->chip = &nv12b_chipset; break;
+ case 0x130: device->chip = &nv130_chipset; break;
+ case 0x132: device->chip = &nv132_chipset; break;
+ case 0x134: device->chip = &nv134_chipset; break;
+ case 0x136: device->chip = &nv136_chipset; break;
+ case 0x137: device->chip = &nv137_chipset; break;
+ case 0x138: device->chip = &nv138_chipset; break;
+ case 0x13b: device->chip = &nv13b_chipset; break;
+ case 0x140: device->chip = &nv140_chipset; break;
+ case 0x162: device->chip = &nv162_chipset; break;
+ case 0x164: device->chip = &nv164_chipset; break;
+ case 0x166: device->chip = &nv166_chipset; break;
+ case 0x167: device->chip = &nv167_chipset; break;
+ case 0x168: device->chip = &nv168_chipset; break;
+ case 0x172: device->chip = &nv172_chipset; break;
+ case 0x173: device->chip = &nv173_chipset; break;
+ case 0x174: device->chip = &nv174_chipset; break;
+ case 0x176: device->chip = &nv176_chipset; break;
+ case 0x177: device->chip = &nv177_chipset; break;
+ case 0x192: device->chip = &nv192_chipset; break;
+ case 0x193: device->chip = &nv193_chipset; break;
+ case 0x194: device->chip = &nv194_chipset; break;
+ case 0x196: device->chip = &nv196_chipset; break;
+ case 0x197: device->chip = &nv197_chipset; break;
+ default:
+ if (nvkm_boolopt(device->cfgopt, "NvEnableUnsupportedChipsets", false)) {
+ switch (device->chipset) {
+ case 0x170: device->chip = &nv170_chipset; break;
+ default:
+ break;
+ }
+ }
- /* vGPU detection */
- boot1 = nvkm_rd32(device, 0x0000004);
- if (device->card_type >= TU100 && (boot1 & 0x00030000)) {
- nvdev_info(device, "vGPUs are not supported\n");
+ if (!device->chip) {
+ nvdev_error(device, "unknown chipset (%08x)\n", boot0);
ret = -ENODEV;
goto done;
}
+ break;
+ }
- /* read strapping information */
- strap = nvkm_rd32(device, 0x101000);
+ nvdev_info(device, "NVIDIA %s (%08x)\n",
+ device->chip->name, boot0);
- /* determine frequency of timing crystal */
- if ( device->card_type <= NV_10 || device->chipset < 0x17 ||
- (device->chipset >= 0x20 && device->chipset < 0x25))
- strap &= 0x00000040;
- else
- strap &= 0x00400040;
+ /* vGPU detection */
+ boot1 = nvkm_rd32(device, 0x0000004);
+ if (device->card_type >= TU100 && (boot1 & 0x00030000)) {
+ nvdev_info(device, "vGPUs are not supported\n");
+ ret = -ENODEV;
+ goto done;
+ }
- switch (strap) {
- case 0x00000000: device->crystal = 13500; break;
- case 0x00000040: device->crystal = 14318; break;
- case 0x00400000: device->crystal = 27000; break;
- case 0x00400040: device->crystal = 25000; break;
- }
- } else {
- device->chip = &null_chipset;
+ /* read strapping information */
+ strap = nvkm_rd32(device, 0x101000);
+
+ /* determine frequency of timing crystal */
+ if ( device->card_type <= NV_10 || device->chipset < 0x17 ||
+ (device->chipset >= 0x20 && device->chipset < 0x25))
+ strap &= 0x00000040;
+ else
+ strap &= 0x00400040;
+
+ switch (strap) {
+ case 0x00000000: device->crystal = 13500; break;
+ case 0x00000040: device->crystal = 14318; break;
+ case 0x00400000: device->crystal = 27000; break;
+ case 0x00400040: device->crystal = 25000; break;
}
if (!device->name)
@@ -3368,7 +3299,7 @@ nvkm_device_ctor(const struct nvkm_device_func *func,
nvkm_intr_ctor(device);
#define NVKM_LAYOUT_ONCE(type,data,ptr) \
- if (device->chip->ptr.inst && (subdev_mask & (BIT_ULL(type)))) { \
+ if (device->chip->ptr.inst) { \
WARN_ON(device->chip->ptr.inst != 0x00000001); \
ret = device->chip->ptr.ctor(device, (type), -1, &device->ptr); \
subdev = nvkm_device_subdev(device, (type), 0); \
@@ -3387,7 +3318,7 @@ nvkm_device_ctor(const struct nvkm_device_func *func,
#define NVKM_LAYOUT_INST(type,data,ptr,cnt) \
WARN_ON(device->chip->ptr.inst & ~((1 << ARRAY_SIZE(device->ptr)) - 1)); \
for (j = 0; device->chip->ptr.inst && j < ARRAY_SIZE(device->ptr); j++) { \
- if ((device->chip->ptr.inst & BIT(j)) && (subdev_mask & BIT_ULL(type))) { \
+ if (device->chip->ptr.inst & BIT(j)) { \
ret = device->chip->ptr.ctor(device, (type), (j), &device->ptr[j]); \
subdev = nvkm_device_subdev(device, (type), (j)); \
if (ret) { \
@@ -3409,7 +3340,7 @@ nvkm_device_ctor(const struct nvkm_device_func *func,
ret = nvkm_intr_install(device);
done:
- if (device->pri && (!mmio || ret)) {
+ if (ret && device->pri) {
iounmap(device->pri);
device->pri = NULL;
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/device/pci.c b/drivers/gpu/drm/nouveau/nvkm/engine/device/pci.c
index abccb2bb68a6..3ff6436007fa 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/device/pci.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/device/pci.c
@@ -1626,7 +1626,6 @@ nvkm_device_pci_func = {
int
nvkm_device_pci_new(struct pci_dev *pci_dev, const char *cfg, const char *dbg,
- bool detect, bool mmio, u64 subdev_mask,
struct nvkm_device **pdevice)
{
const struct nvkm_device_quirk *quirk = NULL;
@@ -1680,8 +1679,7 @@ nvkm_device_pci_new(struct pci_dev *pci_dev, const char *cfg, const char *dbg,
pci_dev->bus->number << 16 |
PCI_SLOT(pci_dev->devfn) << 8 |
PCI_FUNC(pci_dev->devfn), name,
- cfg, dbg, detect, mmio, subdev_mask,
- &pdev->device);
+ cfg, dbg, &pdev->device);
if (ret)
return ret;
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/device/priv.h b/drivers/gpu/drm/nouveau/nvkm/engine/device/priv.h
index bf3176bec18a..e42b18820a95 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/device/priv.h
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/device/priv.h
@@ -45,7 +45,6 @@
#include <engine/nvdec.h>
#include <engine/nvjpg.h>
#include <engine/ofa.h>
-#include <engine/pm.h>
#include <engine/sec.h>
#include <engine/sec2.h>
#include <engine/sw.h>
@@ -56,7 +55,6 @@ int nvkm_device_ctor(const struct nvkm_device_func *,
const struct nvkm_device_quirk *,
struct device *, enum nvkm_device_type, u64 handle,
const char *name, const char *cfg, const char *dbg,
- bool detect, bool mmio, u64 subdev_mask,
struct nvkm_device *);
int nvkm_device_init(struct nvkm_device *);
int nvkm_device_fini(struct nvkm_device *, bool suspend);
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/device/tegra.c b/drivers/gpu/drm/nouveau/nvkm/engine/device/tegra.c
index 87caa4a72921..d1c294f00665 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/device/tegra.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/device/tegra.c
@@ -237,7 +237,6 @@ int
nvkm_device_tegra_new(const struct nvkm_device_tegra_func *func,
struct platform_device *pdev,
const char *cfg, const char *dbg,
- bool detect, bool mmio, u64 subdev_mask,
struct nvkm_device **pdevice)
{
struct nvkm_device_tegra *tdev;
@@ -311,8 +310,7 @@ nvkm_device_tegra_new(const struct nvkm_device_tegra_func *func,
tdev->gpu_speedo_id = tegra_sku_info.gpu_speedo_id;
ret = nvkm_device_ctor(&nvkm_device_tegra_func, NULL, &pdev->dev,
NVKM_DEVICE_TEGRA, pdev->id, NULL,
- cfg, dbg, detect, mmio, subdev_mask,
- &tdev->device);
+ cfg, dbg, &tdev->device);
if (ret)
goto powerdown;
@@ -333,7 +331,6 @@ int
nvkm_device_tegra_new(const struct nvkm_device_tegra_func *func,
struct platform_device *pdev,
const char *cfg, const char *dbg,
- bool detect, bool mmio, u64 subdev_mask,
struct nvkm_device **pdevice)
{
return -ENOSYS;
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/device/user.c b/drivers/gpu/drm/nouveau/nvkm/engine/device/user.c
index 7fd4800a876a..d7f75b3a43c8 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/device/user.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/device/user.c
@@ -203,54 +203,6 @@ nvkm_udevice_mthd(struct nvkm_object *object, u32 mthd, void *data, u32 size)
}
static int
-nvkm_udevice_rd08(struct nvkm_object *object, u64 addr, u8 *data)
-{
- struct nvkm_udevice *udev = nvkm_udevice(object);
- *data = nvkm_rd08(udev->device, addr);
- return 0;
-}
-
-static int
-nvkm_udevice_rd16(struct nvkm_object *object, u64 addr, u16 *data)
-{
- struct nvkm_udevice *udev = nvkm_udevice(object);
- *data = nvkm_rd16(udev->device, addr);
- return 0;
-}
-
-static int
-nvkm_udevice_rd32(struct nvkm_object *object, u64 addr, u32 *data)
-{
- struct nvkm_udevice *udev = nvkm_udevice(object);
- *data = nvkm_rd32(udev->device, addr);
- return 0;
-}
-
-static int
-nvkm_udevice_wr08(struct nvkm_object *object, u64 addr, u8 data)
-{
- struct nvkm_udevice *udev = nvkm_udevice(object);
- nvkm_wr08(udev->device, addr, data);
- return 0;
-}
-
-static int
-nvkm_udevice_wr16(struct nvkm_object *object, u64 addr, u16 data)
-{
- struct nvkm_udevice *udev = nvkm_udevice(object);
- nvkm_wr16(udev->device, addr, data);
- return 0;
-}
-
-static int
-nvkm_udevice_wr32(struct nvkm_object *object, u64 addr, u32 data)
-{
- struct nvkm_udevice *udev = nvkm_udevice(object);
- nvkm_wr32(udev->device, addr, data);
- return 0;
-}
-
-static int
nvkm_udevice_map(struct nvkm_object *object, void *argv, u32 argc,
enum nvkm_object_map *type, u64 *addr, u64 *size)
{
@@ -322,8 +274,7 @@ nvkm_udevice_child_get(struct nvkm_object *object, int index,
struct nvkm_engine *engine;
u64 mask = (1ULL << NVKM_ENGINE_DMAOBJ) |
(1ULL << NVKM_ENGINE_FIFO) |
- (1ULL << NVKM_ENGINE_DISP) |
- (1ULL << NVKM_ENGINE_PM);
+ (1ULL << NVKM_ENGINE_DISP);
const struct nvkm_device_oclass *sclass = NULL;
int i;
@@ -358,25 +309,11 @@ nvkm_udevice_child_get(struct nvkm_object *object, int index,
}
static const struct nvkm_object_func
-nvkm_udevice_super = {
- .init = nvkm_udevice_init,
- .fini = nvkm_udevice_fini,
- .mthd = nvkm_udevice_mthd,
- .map = nvkm_udevice_map,
- .rd08 = nvkm_udevice_rd08,
- .rd16 = nvkm_udevice_rd16,
- .rd32 = nvkm_udevice_rd32,
- .wr08 = nvkm_udevice_wr08,
- .wr16 = nvkm_udevice_wr16,
- .wr32 = nvkm_udevice_wr32,
- .sclass = nvkm_udevice_child_get,
-};
-
-static const struct nvkm_object_func
nvkm_udevice = {
.init = nvkm_udevice_init,
.fini = nvkm_udevice_fini,
.mthd = nvkm_udevice_mthd,
+ .map = nvkm_udevice_map,
.sclass = nvkm_udevice_child_get,
};
@@ -384,38 +321,16 @@ static int
nvkm_udevice_new(const struct nvkm_oclass *oclass, void *data, u32 size,
struct nvkm_object **pobject)
{
- union {
- struct nv_device_v0 v0;
- } *args = data;
struct nvkm_client *client = oclass->client;
- struct nvkm_object *parent = &client->object;
- const struct nvkm_object_func *func;
struct nvkm_udevice *udev;
- int ret = -ENOSYS;
-
- nvif_ioctl(parent, "create device size %d\n", size);
- if (!(ret = nvif_unpack(ret, &data, &size, args->v0, 0, 0, false))) {
- nvif_ioctl(parent, "create device v%d device %016llx\n",
- args->v0.version, args->v0.device);
- } else
- return ret;
-
- /* give priviledged clients register access */
- if (args->v0.priv)
- func = &nvkm_udevice_super;
- else
- func = &nvkm_udevice;
if (!(udev = kzalloc(sizeof(*udev), GFP_KERNEL)))
return -ENOMEM;
- nvkm_object_ctor(func, oclass, &udev->object);
+ nvkm_object_ctor(&nvkm_udevice, oclass, &udev->object);
*pobject = &udev->object;
/* find the device that matches what the client requested */
- if (args->v0.device != ~0)
- udev->device = nvkm_device_find(args->v0.device);
- else
- udev->device = nvkm_device_find(client->device);
+ udev->device = nvkm_device_find(client->device);
if (!udev->device)
return -ENODEV;
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/chan.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/chan.c
index d5e18daed79f..4e43ee383c34 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/chan.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/chan.c
@@ -27,28 +27,6 @@
#include <nvif/if0014.h>
static int
-nvkm_disp_chan_rd32(struct nvkm_object *object, u64 addr, u32 *data)
-{
- struct nvkm_disp_chan *chan = nvkm_disp_chan(object);
- struct nvkm_device *device = chan->disp->engine.subdev.device;
- u64 size, base = chan->func->user(chan, &size);
-
- *data = nvkm_rd32(device, base + addr);
- return 0;
-}
-
-static int
-nvkm_disp_chan_wr32(struct nvkm_object *object, u64 addr, u32 data)
-{
- struct nvkm_disp_chan *chan = nvkm_disp_chan(object);
- struct nvkm_device *device = chan->disp->engine.subdev.device;
- u64 size, base = chan->func->user(chan, &size);
-
- nvkm_wr32(device, base + addr, data);
- return 0;
-}
-
-static int
nvkm_disp_chan_ntfy(struct nvkm_object *object, u32 type, struct nvkm_event **pevent)
{
struct nvkm_disp_chan *chan = nvkm_disp_chan(object);
@@ -188,8 +166,6 @@ nvkm_disp_chan = {
.dtor = nvkm_disp_chan_dtor,
.init = nvkm_disp_chan_init,
.fini = nvkm_disp_chan_fini,
- .rd32 = nvkm_disp_chan_rd32,
- .wr32 = nvkm_disp_chan_wr32,
.ntfy = nvkm_disp_chan_ntfy,
.map = nvkm_disp_chan_map,
.sclass = nvkm_disp_chan_child_get,
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/pm/Kbuild b/drivers/gpu/drm/nouveau/nvkm/engine/pm/Kbuild
deleted file mode 100644
index 2cc8a5f6fe0c..000000000000
--- a/drivers/gpu/drm/nouveau/nvkm/engine/pm/Kbuild
+++ /dev/null
@@ -1,11 +0,0 @@
-# SPDX-License-Identifier: MIT
-nvkm-y += nvkm/engine/pm/base.o
-nvkm-y += nvkm/engine/pm/nv40.o
-nvkm-y += nvkm/engine/pm/nv50.o
-nvkm-y += nvkm/engine/pm/g84.o
-nvkm-y += nvkm/engine/pm/gt200.o
-nvkm-y += nvkm/engine/pm/gt215.o
-nvkm-y += nvkm/engine/pm/gf100.o
-nvkm-y += nvkm/engine/pm/gf108.o
-nvkm-y += nvkm/engine/pm/gf117.o
-nvkm-y += nvkm/engine/pm/gk104.o
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/pm/base.c b/drivers/gpu/drm/nouveau/nvkm/engine/pm/base.c
deleted file mode 100644
index 131db2645f84..000000000000
--- a/drivers/gpu/drm/nouveau/nvkm/engine/pm/base.c
+++ /dev/null
@@ -1,867 +0,0 @@
-/*
- * Copyright 2013 Red Hat Inc.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
- * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
- * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
- * OTHER DEALINGS IN THE SOFTWARE.
- *
- * Authors: Ben Skeggs
- */
-#include "priv.h"
-
-#include <core/client.h>
-#include <core/option.h>
-
-#include <nvif/class.h>
-#include <nvif/if0002.h>
-#include <nvif/if0003.h>
-#include <nvif/ioctl.h>
-#include <nvif/unpack.h>
-
-static u8
-nvkm_pm_count_perfdom(struct nvkm_pm *pm)
-{
- struct nvkm_perfdom *dom;
- u8 domain_nr = 0;
-
- list_for_each_entry(dom, &pm->domains, head)
- domain_nr++;
- return domain_nr;
-}
-
-static u16
-nvkm_perfdom_count_perfsig(struct nvkm_perfdom *dom)
-{
- u16 signal_nr = 0;
- int i;
-
- if (dom) {
- for (i = 0; i < dom->signal_nr; i++) {
- if (dom->signal[i].name)
- signal_nr++;
- }
- }
- return signal_nr;
-}
-
-static struct nvkm_perfdom *
-nvkm_perfdom_find(struct nvkm_pm *pm, int di)
-{
- struct nvkm_perfdom *dom;
- int tmp = 0;
-
- list_for_each_entry(dom, &pm->domains, head) {
- if (tmp++ == di)
- return dom;
- }
- return NULL;
-}
-
-static struct nvkm_perfsig *
-nvkm_perfsig_find(struct nvkm_pm *pm, u8 di, u8 si, struct nvkm_perfdom **pdom)
-{
- struct nvkm_perfdom *dom = *pdom;
-
- if (dom == NULL) {
- dom = nvkm_perfdom_find(pm, di);
- if (dom == NULL)
- return NULL;
- *pdom = dom;
- }
-
- if (!dom->signal[si].name)
- return NULL;
- return &dom->signal[si];
-}
-
-static u8
-nvkm_perfsig_count_perfsrc(struct nvkm_perfsig *sig)
-{
- u8 source_nr = 0, i;
-
- for (i = 0; i < ARRAY_SIZE(sig->source); i++) {
- if (sig->source[i])
- source_nr++;
- }
- return source_nr;
-}
-
-static struct nvkm_perfsrc *
-nvkm_perfsrc_find(struct nvkm_pm *pm, struct nvkm_perfsig *sig, int si)
-{
- struct nvkm_perfsrc *src;
- bool found = false;
- int tmp = 1; /* Sources ID start from 1 */
- u8 i;
-
- for (i = 0; i < ARRAY_SIZE(sig->source) && sig->source[i]; i++) {
- if (sig->source[i] == si) {
- found = true;
- break;
- }
- }
-
- if (found) {
- list_for_each_entry(src, &pm->sources, head) {
- if (tmp++ == si)
- return src;
- }
- }
-
- return NULL;
-}
-
-static int
-nvkm_perfsrc_enable(struct nvkm_pm *pm, struct nvkm_perfctr *ctr)
-{
- struct nvkm_subdev *subdev = &pm->engine.subdev;
- struct nvkm_device *device = subdev->device;
- struct nvkm_perfdom *dom = NULL;
- struct nvkm_perfsig *sig;
- struct nvkm_perfsrc *src;
- u32 mask, value;
- int i, j;
-
- for (i = 0; i < 4; i++) {
- for (j = 0; j < 8 && ctr->source[i][j]; j++) {
- sig = nvkm_perfsig_find(pm, ctr->domain,
- ctr->signal[i], &dom);
- if (!sig)
- return -EINVAL;
-
- src = nvkm_perfsrc_find(pm, sig, ctr->source[i][j]);
- if (!src)
- return -EINVAL;
-
- /* set enable bit if needed */
- mask = value = 0x00000000;
- if (src->enable)
- mask = value = 0x80000000;
- mask |= (src->mask << src->shift);
- value |= ((ctr->source[i][j] >> 32) << src->shift);
-
- /* enable the source */
- nvkm_mask(device, src->addr, mask, value);
- nvkm_debug(subdev,
- "enabled source %08x %08x %08x\n",
- src->addr, mask, value);
- }
- }
- return 0;
-}
-
-static int
-nvkm_perfsrc_disable(struct nvkm_pm *pm, struct nvkm_perfctr *ctr)
-{
- struct nvkm_subdev *subdev = &pm->engine.subdev;
- struct nvkm_device *device = subdev->device;
- struct nvkm_perfdom *dom = NULL;
- struct nvkm_perfsig *sig;
- struct nvkm_perfsrc *src;
- u32 mask;
- int i, j;
-
- for (i = 0; i < 4; i++) {
- for (j = 0; j < 8 && ctr->source[i][j]; j++) {
- sig = nvkm_perfsig_find(pm, ctr->domain,
- ctr->signal[i], &dom);
- if (!sig)
- return -EINVAL;
-
- src = nvkm_perfsrc_find(pm, sig, ctr->source[i][j]);
- if (!src)
- return -EINVAL;
-
- /* unset enable bit if needed */
- mask = 0x00000000;
- if (src->enable)
- mask = 0x80000000;
- mask |= (src->mask << src->shift);
-
- /* disable the source */
- nvkm_mask(device, src->addr, mask, 0);
- nvkm_debug(subdev, "disabled source %08x %08x\n",
- src->addr, mask);
- }
- }
- return 0;
-}
-
-/*******************************************************************************
- * Perfdom object classes
- ******************************************************************************/
-static int
-nvkm_perfdom_init(struct nvkm_perfdom *dom, void *data, u32 size)
-{
- union {
- struct nvif_perfdom_init none;
- } *args = data;
- struct nvkm_object *object = &dom->object;
- struct nvkm_pm *pm = dom->perfmon->pm;
- int ret = -ENOSYS, i;
-
- nvif_ioctl(object, "perfdom init size %d\n", size);
- if (!(ret = nvif_unvers(ret, &data, &size, args->none))) {
- nvif_ioctl(object, "perfdom init\n");
- } else
- return ret;
-
- for (i = 0; i < 4; i++) {
- if (dom->ctr[i]) {
- dom->func->init(pm, dom, dom->ctr[i]);
-
- /* enable sources */
- nvkm_perfsrc_enable(pm, dom->ctr[i]);
- }
- }
-
- /* start next batch of counters for sampling */
- dom->func->next(pm, dom);
- return 0;
-}
-
-static int
-nvkm_perfdom_sample(struct nvkm_perfdom *dom, void *data, u32 size)
-{
- union {
- struct nvif_perfdom_sample none;
- } *args = data;
- struct nvkm_object *object = &dom->object;
- struct nvkm_pm *pm = dom->perfmon->pm;
- int ret = -ENOSYS;
-
- nvif_ioctl(object, "perfdom sample size %d\n", size);
- if (!(ret = nvif_unvers(ret, &data, &size, args->none))) {
- nvif_ioctl(object, "perfdom sample\n");
- } else
- return ret;
- pm->sequence++;
-
- /* sample previous batch of counters */
- list_for_each_entry(dom, &pm->domains, head)
- dom->func->next(pm, dom);
-
- return 0;
-}
-
-static int
-nvkm_perfdom_read(struct nvkm_perfdom *dom, void *data, u32 size)
-{
- union {
- struct nvif_perfdom_read_v0 v0;
- } *args = data;
- struct nvkm_object *object = &dom->object;
- struct nvkm_pm *pm = dom->perfmon->pm;
- int ret = -ENOSYS, i;
-
- nvif_ioctl(object, "perfdom read size %d\n", size);
- if (!(ret = nvif_unpack(ret, &data, &size, args->v0, 0, 0, false))) {
- nvif_ioctl(object, "perfdom read vers %d\n", args->v0.version);
- } else
- return ret;
-
- for (i = 0; i < 4; i++) {
- if (dom->ctr[i])
- dom->func->read(pm, dom, dom->ctr[i]);
- }
-
- if (!dom->clk)
- return -EAGAIN;
-
- for (i = 0; i < 4; i++)
- if (dom->ctr[i])
- args->v0.ctr[i] = dom->ctr[i]->ctr;
- args->v0.clk = dom->clk;
- return 0;
-}
-
-static int
-nvkm_perfdom_mthd(struct nvkm_object *object, u32 mthd, void *data, u32 size)
-{
- struct nvkm_perfdom *dom = nvkm_perfdom(object);
- switch (mthd) {
- case NVIF_PERFDOM_V0_INIT:
- return nvkm_perfdom_init(dom, data, size);
- case NVIF_PERFDOM_V0_SAMPLE:
- return nvkm_perfdom_sample(dom, data, size);
- case NVIF_PERFDOM_V0_READ:
- return nvkm_perfdom_read(dom, data, size);
- default:
- break;
- }
- return -EINVAL;
-}
-
-static void *
-nvkm_perfdom_dtor(struct nvkm_object *object)
-{
- struct nvkm_perfdom *dom = nvkm_perfdom(object);
- struct nvkm_pm *pm = dom->perfmon->pm;
- int i;
-
- for (i = 0; i < 4; i++) {
- struct nvkm_perfctr *ctr = dom->ctr[i];
- if (ctr) {
- nvkm_perfsrc_disable(pm, ctr);
- if (ctr->head.next)
- list_del(&ctr->head);
- }
- kfree(ctr);
- }
-
- return dom;
-}
-
-static int
-nvkm_perfctr_new(struct nvkm_perfdom *dom, int slot, u8 domain,
- struct nvkm_perfsig *signal[4], u64 source[4][8],
- u16 logic_op, struct nvkm_perfctr **pctr)
-{
- struct nvkm_perfctr *ctr;
- int i, j;
-
- if (!dom)
- return -EINVAL;
-
- ctr = *pctr = kzalloc(sizeof(*ctr), GFP_KERNEL);
- if (!ctr)
- return -ENOMEM;
-
- ctr->domain = domain;
- ctr->logic_op = logic_op;
- ctr->slot = slot;
- for (i = 0; i < 4; i++) {
- if (signal[i]) {
- ctr->signal[i] = signal[i] - dom->signal;
- for (j = 0; j < 8; j++)
- ctr->source[i][j] = source[i][j];
- }
- }
- list_add_tail(&ctr->head, &dom->list);
-
- return 0;
-}
-
-static const struct nvkm_object_func
-nvkm_perfdom = {
- .dtor = nvkm_perfdom_dtor,
- .mthd = nvkm_perfdom_mthd,
-};
-
-static int
-nvkm_perfdom_new_(struct nvkm_perfmon *perfmon,
- const struct nvkm_oclass *oclass, void *data, u32 size,
- struct nvkm_object **pobject)
-{
- union {
- struct nvif_perfdom_v0 v0;
- } *args = data;
- struct nvkm_pm *pm = perfmon->pm;
- struct nvkm_object *parent = oclass->parent;
- struct nvkm_perfdom *sdom = NULL;
- struct nvkm_perfctr *ctr[4] = {};
- struct nvkm_perfdom *dom;
- int c, s, m;
- int ret = -ENOSYS;
-
- nvif_ioctl(parent, "create perfdom size %d\n", size);
- if (!(ret = nvif_unpack(ret, &data, &size, args->v0, 0, 0, false))) {
- nvif_ioctl(parent, "create perfdom vers %d dom %d mode %02x\n",
- args->v0.version, args->v0.domain, args->v0.mode);
- } else
- return ret;
-
- for (c = 0; c < ARRAY_SIZE(args->v0.ctr); c++) {
- struct nvkm_perfsig *sig[4] = {};
- u64 src[4][8] = {};
-
- for (s = 0; s < ARRAY_SIZE(args->v0.ctr[c].signal); s++) {
- sig[s] = nvkm_perfsig_find(pm, args->v0.domain,
- args->v0.ctr[c].signal[s],
- &sdom);
- if (args->v0.ctr[c].signal[s] && !sig[s])
- return -EINVAL;
-
- for (m = 0; m < 8; m++) {
- src[s][m] = args->v0.ctr[c].source[s][m];
- if (src[s][m] && !nvkm_perfsrc_find(pm, sig[s],
- src[s][m]))
- return -EINVAL;
- }
- }
-
- ret = nvkm_perfctr_new(sdom, c, args->v0.domain, sig, src,
- args->v0.ctr[c].logic_op, &ctr[c]);
- if (ret)
- return ret;
- }
-
- if (!sdom)
- return -EINVAL;
-
- if (!(dom = kzalloc(sizeof(*dom), GFP_KERNEL)))
- return -ENOMEM;
- nvkm_object_ctor(&nvkm_perfdom, oclass, &dom->object);
- dom->perfmon = perfmon;
- *pobject = &dom->object;
-
- dom->func = sdom->func;
- dom->addr = sdom->addr;
- dom->mode = args->v0.mode;
- for (c = 0; c < ARRAY_SIZE(ctr); c++)
- dom->ctr[c] = ctr[c];
- return 0;
-}
-
-/*******************************************************************************
- * Perfmon object classes
- ******************************************************************************/
-static int
-nvkm_perfmon_mthd_query_domain(struct nvkm_perfmon *perfmon,
- void *data, u32 size)
-{
- union {
- struct nvif_perfmon_query_domain_v0 v0;
- } *args = data;
- struct nvkm_object *object = &perfmon->object;
- struct nvkm_pm *pm = perfmon->pm;
- struct nvkm_perfdom *dom;
- u8 domain_nr;
- int di, ret = -ENOSYS;
-
- nvif_ioctl(object, "perfmon query domain size %d\n", size);
- if (!(ret = nvif_unpack(ret, &data, &size, args->v0, 0, 0, false))) {
- nvif_ioctl(object, "perfmon domain vers %d iter %02x\n",
- args->v0.version, args->v0.iter);
- di = (args->v0.iter & 0xff) - 1;
- } else
- return ret;
-
- domain_nr = nvkm_pm_count_perfdom(pm);
- if (di >= (int)domain_nr)
- return -EINVAL;
-
- if (di >= 0) {
- dom = nvkm_perfdom_find(pm, di);
- if (dom == NULL)
- return -EINVAL;
-
- args->v0.id = di;
- args->v0.signal_nr = nvkm_perfdom_count_perfsig(dom);
- strscpy(args->v0.name, dom->name, sizeof(args->v0.name));
-
- /* Currently only global counters (PCOUNTER) are implemented
- * but this will be different for local counters (MP). */
- args->v0.counter_nr = 4;
- }
-
- if (++di < domain_nr) {
- args->v0.iter = ++di;
- return 0;
- }
-
- args->v0.iter = 0xff;
- return 0;
-}
-
-static int
-nvkm_perfmon_mthd_query_signal(struct nvkm_perfmon *perfmon,
- void *data, u32 size)
-{
- union {
- struct nvif_perfmon_query_signal_v0 v0;
- } *args = data;
- struct nvkm_object *object = &perfmon->object;
- struct nvkm_pm *pm = perfmon->pm;
- struct nvkm_device *device = pm->engine.subdev.device;
- struct nvkm_perfdom *dom;
- struct nvkm_perfsig *sig;
- const bool all = nvkm_boolopt(device->cfgopt, "NvPmShowAll", false);
- const bool raw = nvkm_boolopt(device->cfgopt, "NvPmUnnamed", all);
- int ret = -ENOSYS, si;
-
- nvif_ioctl(object, "perfmon query signal size %d\n", size);
- if (!(ret = nvif_unpack(ret, &data, &size, args->v0, 0, 0, false))) {
- nvif_ioctl(object,
- "perfmon query signal vers %d dom %d iter %04x\n",
- args->v0.version, args->v0.domain, args->v0.iter);
- si = (args->v0.iter & 0xffff) - 1;
- } else
- return ret;
-
- dom = nvkm_perfdom_find(pm, args->v0.domain);
- if (dom == NULL || si >= (int)dom->signal_nr)
- return -EINVAL;
-
- if (si >= 0) {
- sig = &dom->signal[si];
- if (raw || !sig->name) {
- snprintf(args->v0.name, sizeof(args->v0.name),
- "/%s/%02x", dom->name, si);
- } else {
- strscpy(args->v0.name, sig->name, sizeof(args->v0.name));
- }
-
- args->v0.signal = si;
- args->v0.source_nr = nvkm_perfsig_count_perfsrc(sig);
- }
-
- while (++si < dom->signal_nr) {
- if (all || dom->signal[si].name) {
- args->v0.iter = ++si;
- return 0;
- }
- }
-
- args->v0.iter = 0xffff;
- return 0;
-}
-
-static int
-nvkm_perfmon_mthd_query_source(struct nvkm_perfmon *perfmon,
- void *data, u32 size)
-{
- union {
- struct nvif_perfmon_query_source_v0 v0;
- } *args = data;
- struct nvkm_object *object = &perfmon->object;
- struct nvkm_pm *pm = perfmon->pm;
- struct nvkm_perfdom *dom = NULL;
- struct nvkm_perfsig *sig;
- struct nvkm_perfsrc *src;
- u8 source_nr = 0;
- int si, ret = -ENOSYS;
-
- nvif_ioctl(object, "perfmon query source size %d\n", size);
- if (!(ret = nvif_unpack(ret, &data, &size, args->v0, 0, 0, false))) {
- nvif_ioctl(object,
- "perfmon source vers %d dom %d sig %02x iter %02x\n",
- args->v0.version, args->v0.domain, args->v0.signal,
- args->v0.iter);
- si = (args->v0.iter & 0xff) - 1;
- } else
- return ret;
-
- sig = nvkm_perfsig_find(pm, args->v0.domain, args->v0.signal, &dom);
- if (!sig)
- return -EINVAL;
-
- source_nr = nvkm_perfsig_count_perfsrc(sig);
- if (si >= (int)source_nr)
- return -EINVAL;
-
- if (si >= 0) {
- src = nvkm_perfsrc_find(pm, sig, sig->source[si]);
- if (!src)
- return -EINVAL;
-
- args->v0.source = sig->source[si];
- args->v0.mask = src->mask;
- strscpy(args->v0.name, src->name, sizeof(args->v0.name));
- }
-
- if (++si < source_nr) {
- args->v0.iter = ++si;
- return 0;
- }
-
- args->v0.iter = 0xff;
- return 0;
-}
-
-static int
-nvkm_perfmon_mthd(struct nvkm_object *object, u32 mthd, void *data, u32 size)
-{
- struct nvkm_perfmon *perfmon = nvkm_perfmon(object);
- switch (mthd) {
- case NVIF_PERFMON_V0_QUERY_DOMAIN:
- return nvkm_perfmon_mthd_query_domain(perfmon, data, size);
- case NVIF_PERFMON_V0_QUERY_SIGNAL:
- return nvkm_perfmon_mthd_query_signal(perfmon, data, size);
- case NVIF_PERFMON_V0_QUERY_SOURCE:
- return nvkm_perfmon_mthd_query_source(perfmon, data, size);
- default:
- break;
- }
- return -EINVAL;
-}
-
-static int
-nvkm_perfmon_child_new(const struct nvkm_oclass *oclass, void *data, u32 size,
- struct nvkm_object **pobject)
-{
- struct nvkm_perfmon *perfmon = nvkm_perfmon(oclass->parent);
- return nvkm_perfdom_new_(perfmon, oclass, data, size, pobject);
-}
-
-static int
-nvkm_perfmon_child_get(struct nvkm_object *object, int index,
- struct nvkm_oclass *oclass)
-{
- if (index == 0) {
- oclass->base.oclass = NVIF_CLASS_PERFDOM;
- oclass->base.minver = 0;
- oclass->base.maxver = 0;
- oclass->ctor = nvkm_perfmon_child_new;
- return 0;
- }
- return -EINVAL;
-}
-
-static void *
-nvkm_perfmon_dtor(struct nvkm_object *object)
-{
- struct nvkm_perfmon *perfmon = nvkm_perfmon(object);
- struct nvkm_pm *pm = perfmon->pm;
- spin_lock(&pm->client.lock);
- if (pm->client.object == &perfmon->object)
- pm->client.object = NULL;
- spin_unlock(&pm->client.lock);
- return perfmon;
-}
-
-static const struct nvkm_object_func
-nvkm_perfmon = {
- .dtor = nvkm_perfmon_dtor,
- .mthd = nvkm_perfmon_mthd,
- .sclass = nvkm_perfmon_child_get,
-};
-
-static int
-nvkm_perfmon_new(struct nvkm_pm *pm, const struct nvkm_oclass *oclass,
- void *data, u32 size, struct nvkm_object **pobject)
-{
- struct nvkm_perfmon *perfmon;
-
- if (!(perfmon = kzalloc(sizeof(*perfmon), GFP_KERNEL)))
- return -ENOMEM;
- nvkm_object_ctor(&nvkm_perfmon, oclass, &perfmon->object);
- perfmon->pm = pm;
- *pobject = &perfmon->object;
- return 0;
-}
-
-/*******************************************************************************
- * PPM engine/subdev functions
- ******************************************************************************/
-
-static int
-nvkm_pm_oclass_new(struct nvkm_device *device, const struct nvkm_oclass *oclass,
- void *data, u32 size, struct nvkm_object **pobject)
-{
- struct nvkm_pm *pm = nvkm_pm(oclass->engine);
- int ret;
-
- ret = nvkm_perfmon_new(pm, oclass, data, size, pobject);
- if (ret)
- return ret;
-
- spin_lock(&pm->client.lock);
- if (pm->client.object == NULL)
- pm->client.object = *pobject;
- ret = (pm->client.object == *pobject) ? 0 : -EBUSY;
- spin_unlock(&pm->client.lock);
- return ret;
-}
-
-static const struct nvkm_device_oclass
-nvkm_pm_oclass = {
- .base.oclass = NVIF_CLASS_PERFMON,
- .base.minver = -1,
- .base.maxver = -1,
- .ctor = nvkm_pm_oclass_new,
-};
-
-static int
-nvkm_pm_oclass_get(struct nvkm_oclass *oclass, int index,
- const struct nvkm_device_oclass **class)
-{
- if (index == 0) {
- oclass->base = nvkm_pm_oclass.base;
- *class = &nvkm_pm_oclass;
- return index;
- }
- return 1;
-}
-
-static int
-nvkm_perfsrc_new(struct nvkm_pm *pm, struct nvkm_perfsig *sig,
- const struct nvkm_specsrc *spec)
-{
- const struct nvkm_specsrc *ssrc;
- const struct nvkm_specmux *smux;
- struct nvkm_perfsrc *src;
- u8 source_nr = 0;
-
- if (!spec) {
- /* No sources are defined for this signal. */
- return 0;
- }
-
- ssrc = spec;
- while (ssrc->name) {
- smux = ssrc->mux;
- while (smux->name) {
- bool found = false;
- u8 source_id = 0;
- u32 len;
-
- list_for_each_entry(src, &pm->sources, head) {
- if (src->addr == ssrc->addr &&
- src->shift == smux->shift) {
- found = true;
- break;
- }
- source_id++;
- }
-
- if (!found) {
- src = kzalloc(sizeof(*src), GFP_KERNEL);
- if (!src)
- return -ENOMEM;
-
- src->addr = ssrc->addr;
- src->mask = smux->mask;
- src->shift = smux->shift;
- src->enable = smux->enable;
-
- len = strlen(ssrc->name) +
- strlen(smux->name) + 2;
- src->name = kzalloc(len, GFP_KERNEL);
- if (!src->name) {
- kfree(src);
- return -ENOMEM;
- }
- snprintf(src->name, len, "%s_%s", ssrc->name,
- smux->name);
-
- list_add_tail(&src->head, &pm->sources);
- }
-
- sig->source[source_nr++] = source_id + 1;
- smux++;
- }
- ssrc++;
- }
-
- return 0;
-}
-
-int
-nvkm_perfdom_new(struct nvkm_pm *pm, const char *name, u32 mask,
- u32 base, u32 size_unit, u32 size_domain,
- const struct nvkm_specdom *spec)
-{
- const struct nvkm_specdom *sdom;
- const struct nvkm_specsig *ssig;
- struct nvkm_perfdom *dom;
- int ret, i;
-
- for (i = 0; i == 0 || mask; i++) {
- u32 addr = base + (i * size_unit);
- if (i && !(mask & (1 << i)))
- continue;
-
- sdom = spec;
- while (sdom->signal_nr) {
- dom = kzalloc(struct_size(dom, signal, sdom->signal_nr),
- GFP_KERNEL);
- if (!dom)
- return -ENOMEM;
-
- if (mask) {
- snprintf(dom->name, sizeof(dom->name),
- "%s/%02x/%02x", name, i,
- (int)(sdom - spec));
- } else {
- snprintf(dom->name, sizeof(dom->name),
- "%s/%02x", name, (int)(sdom - spec));
- }
-
- list_add_tail(&dom->head, &pm->domains);
- INIT_LIST_HEAD(&dom->list);
- dom->func = sdom->func;
- dom->addr = addr;
- dom->signal_nr = sdom->signal_nr;
-
- ssig = (sdom++)->signal;
- while (ssig->name) {
- struct nvkm_perfsig *sig =
- &dom->signal[ssig->signal];
- sig->name = ssig->name;
- ret = nvkm_perfsrc_new(pm, sig, ssig->source);
- if (ret)
- return ret;
- ssig++;
- }
-
- addr += size_domain;
- }
-
- mask &= ~(1 << i);
- }
-
- return 0;
-}
-
-static int
-nvkm_pm_fini(struct nvkm_engine *engine, bool suspend)
-{
- struct nvkm_pm *pm = nvkm_pm(engine);
- if (pm->func->fini)
- pm->func->fini(pm);
- return 0;
-}
-
-static void *
-nvkm_pm_dtor(struct nvkm_engine *engine)
-{
- struct nvkm_pm *pm = nvkm_pm(engine);
- struct nvkm_perfdom *dom, *next_dom;
- struct nvkm_perfsrc *src, *next_src;
-
- list_for_each_entry_safe(dom, next_dom, &pm->domains, head) {
- list_del(&dom->head);
- kfree(dom);
- }
-
- list_for_each_entry_safe(src, next_src, &pm->sources, head) {
- list_del(&src->head);
- kfree(src->name);
- kfree(src);
- }
-
- return pm;
-}
-
-static const struct nvkm_engine_func
-nvkm_pm = {
- .dtor = nvkm_pm_dtor,
- .fini = nvkm_pm_fini,
- .base.sclass = nvkm_pm_oclass_get,
-};
-
-int
-nvkm_pm_ctor(const struct nvkm_pm_func *func, struct nvkm_device *device,
- enum nvkm_subdev_type type, int inst, struct nvkm_pm *pm)
-{
- pm->func = func;
- INIT_LIST_HEAD(&pm->domains);
- INIT_LIST_HEAD(&pm->sources);
- spin_lock_init(&pm->client.lock);
- return nvkm_engine_ctor(&nvkm_pm, device, type, inst, true, &pm->engine);
-}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/pm/g84.c b/drivers/gpu/drm/nouveau/nvkm/engine/pm/g84.c
deleted file mode 100644
index 0086d00eb162..000000000000
--- a/drivers/gpu/drm/nouveau/nvkm/engine/pm/g84.c
+++ /dev/null
@@ -1,165 +0,0 @@
-/*
- * Copyright 2013 Red Hat Inc.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
- * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
- * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
- * OTHER DEALINGS IN THE SOFTWARE.
- *
- * Authors: Ben Skeggs
- */
-#include "nv40.h"
-
-const struct nvkm_specsrc
-g84_vfetch_sources[] = {
- { 0x400c0c, (const struct nvkm_specmux[]) {
- { 0x3, 0, "unk0" },
- {}
- }, "pgraph_vfetch_unk0c" },
- {}
-};
-
-static const struct nvkm_specsrc
-g84_prop_sources[] = {
- { 0x408e50, (const struct nvkm_specmux[]) {
- { 0x1f, 0, "sel", true },
- {}
- }, "pgraph_tpc0_prop_pm_mux" },
- {}
-};
-
-static const struct nvkm_specsrc
-g84_crop_sources[] = {
- { 0x407008, (const struct nvkm_specmux[]) {
- { 0xf, 0, "sel0", true },
- { 0x7, 16, "sel1", true },
- {}
- }, "pgraph_rop0_crop_pm_mux" },
- {}
-};
-
-static const struct nvkm_specsrc
-g84_tex_sources[] = {
- { 0x408808, (const struct nvkm_specmux[]) {
- { 0xfffff, 0, "unk0" },
- {}
- }, "pgraph_tpc0_tex_unk08" },
- {}
-};
-
-static const struct nvkm_specdom
-g84_pm[] = {
- { 0x20, (const struct nvkm_specsig[]) {
- {}
- }, &nv40_perfctr_func },
- { 0xf0, (const struct nvkm_specsig[]) {
- { 0xbd, "pc01_gr_idle" },
- { 0x5e, "pc01_strmout_00" },
- { 0x5f, "pc01_strmout_01" },
- { 0xd2, "pc01_trast_00" },
- { 0xd3, "pc01_trast_01" },
- { 0xd4, "pc01_trast_02" },
- { 0xd5, "pc01_trast_03" },
- { 0xd8, "pc01_trast_04" },
- { 0xd9, "pc01_trast_05" },
- { 0x5c, "pc01_vattr_00" },
- { 0x5d, "pc01_vattr_01" },
- { 0x66, "pc01_vfetch_00", g84_vfetch_sources },
- { 0x67, "pc01_vfetch_01", g84_vfetch_sources },
- { 0x68, "pc01_vfetch_02", g84_vfetch_sources },
- { 0x69, "pc01_vfetch_03", g84_vfetch_sources },
- { 0x6a, "pc01_vfetch_04", g84_vfetch_sources },
- { 0x6b, "pc01_vfetch_05", g84_vfetch_sources },
- { 0x6c, "pc01_vfetch_06", g84_vfetch_sources },
- { 0x6d, "pc01_vfetch_07", g84_vfetch_sources },
- { 0x6e, "pc01_vfetch_08", g84_vfetch_sources },
- { 0x6f, "pc01_vfetch_09", g84_vfetch_sources },
- { 0x70, "pc01_vfetch_0a", g84_vfetch_sources },
- { 0x71, "pc01_vfetch_0b", g84_vfetch_sources },
- { 0x72, "pc01_vfetch_0c", g84_vfetch_sources },
- { 0x73, "pc01_vfetch_0d", g84_vfetch_sources },
- { 0x74, "pc01_vfetch_0e", g84_vfetch_sources },
- { 0x75, "pc01_vfetch_0f", g84_vfetch_sources },
- { 0x76, "pc01_vfetch_10", g84_vfetch_sources },
- { 0x77, "pc01_vfetch_11", g84_vfetch_sources },
- { 0x78, "pc01_vfetch_12", g84_vfetch_sources },
- { 0x79, "pc01_vfetch_13", g84_vfetch_sources },
- { 0x7a, "pc01_vfetch_14", g84_vfetch_sources },
- { 0x7b, "pc01_vfetch_15", g84_vfetch_sources },
- { 0x7c, "pc01_vfetch_16", g84_vfetch_sources },
- { 0x7d, "pc01_vfetch_17", g84_vfetch_sources },
- { 0x7e, "pc01_vfetch_18", g84_vfetch_sources },
- { 0x7f, "pc01_vfetch_19", g84_vfetch_sources },
- { 0x07, "pc01_zcull_00", nv50_zcull_sources },
- { 0x08, "pc01_zcull_01", nv50_zcull_sources },
- { 0x09, "pc01_zcull_02", nv50_zcull_sources },
- { 0x0a, "pc01_zcull_03", nv50_zcull_sources },
- { 0x0b, "pc01_zcull_04", nv50_zcull_sources },
- { 0x0c, "pc01_zcull_05", nv50_zcull_sources },
- { 0xa4, "pc01_unk00" },
- { 0xec, "pc01_trailer" },
- {}
- }, &nv40_perfctr_func },
- { 0xa0, (const struct nvkm_specsig[]) {
- { 0x30, "pc02_crop_00", g84_crop_sources },
- { 0x31, "pc02_crop_01", g84_crop_sources },
- { 0x32, "pc02_crop_02", g84_crop_sources },
- { 0x33, "pc02_crop_03", g84_crop_sources },
- { 0x00, "pc02_prop_00", g84_prop_sources },
- { 0x01, "pc02_prop_01", g84_prop_sources },
- { 0x02, "pc02_prop_02", g84_prop_sources },
- { 0x03, "pc02_prop_03", g84_prop_sources },
- { 0x04, "pc02_prop_04", g84_prop_sources },
- { 0x05, "pc02_prop_05", g84_prop_sources },
- { 0x06, "pc02_prop_06", g84_prop_sources },
- { 0x07, "pc02_prop_07", g84_prop_sources },
- { 0x48, "pc02_tex_00", g84_tex_sources },
- { 0x49, "pc02_tex_01", g84_tex_sources },
- { 0x4a, "pc02_tex_02", g84_tex_sources },
- { 0x4b, "pc02_tex_03", g84_tex_sources },
- { 0x1a, "pc02_tex_04", g84_tex_sources },
- { 0x1b, "pc02_tex_05", g84_tex_sources },
- { 0x1c, "pc02_tex_06", g84_tex_sources },
- { 0x44, "pc02_zrop_00", nv50_zrop_sources },
- { 0x45, "pc02_zrop_01", nv50_zrop_sources },
- { 0x46, "pc02_zrop_02", nv50_zrop_sources },
- { 0x47, "pc02_zrop_03", nv50_zrop_sources },
- { 0x8c, "pc02_trailer" },
- {}
- }, &nv40_perfctr_func },
- { 0x20, (const struct nvkm_specsig[]) {
- {}
- }, &nv40_perfctr_func },
- { 0x20, (const struct nvkm_specsig[]) {
- {}
- }, &nv40_perfctr_func },
- { 0x20, (const struct nvkm_specsig[]) {
- {}
- }, &nv40_perfctr_func },
- { 0x20, (const struct nvkm_specsig[]) {
- {}
- }, &nv40_perfctr_func },
- { 0x20, (const struct nvkm_specsig[]) {
- {}
- }, &nv40_perfctr_func },
- {}
-};
-
-int
-g84_pm_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst, struct nvkm_pm **ppm)
-{
- return nv40_pm_new_(g84_pm, device, type, inst, ppm);
-}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/pm/gf100.c b/drivers/gpu/drm/nouveau/nvkm/engine/pm/gf100.c
deleted file mode 100644
index 8e02701def8e..000000000000
--- a/drivers/gpu/drm/nouveau/nvkm/engine/pm/gf100.c
+++ /dev/null
@@ -1,243 +0,0 @@
-/*
- * Copyright 2013 Red Hat Inc.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
- * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
- * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
- * OTHER DEALINGS IN THE SOFTWARE.
- *
- * Authors: Ben Skeggs
- */
-#include "gf100.h"
-
-const struct nvkm_specsrc
-gf100_pbfb_sources[] = {
- { 0x10f100, (const struct nvkm_specmux[]) {
- { 0x1, 0, "unk0" },
- { 0x3f, 4, "unk4" },
- {}
- }, "pbfb_broadcast_pm_unk100" },
- {}
-};
-
-const struct nvkm_specsrc
-gf100_pmfb_sources[] = {
- { 0x140028, (const struct nvkm_specmux[]) {
- { 0x3fff, 0, "unk0" },
- { 0x7, 16, "unk16" },
- { 0x3, 24, "unk24" },
- { 0x2, 29, "unk29" },
- {}
- }, "pmfb0_pm_unk28" },
- {}
-};
-
-static const struct nvkm_specsrc
-gf100_l1_sources[] = {
- { 0x5044a8, (const struct nvkm_specmux[]) {
- { 0x3f, 0, "sel", true },
- {}
- }, "pgraph_gpc0_tpc0_l1_pm_mux" },
- {}
-};
-
-static const struct nvkm_specsrc
-gf100_tex_sources[] = {
- { 0x5042c0, (const struct nvkm_specmux[]) {
- { 0xf, 0, "sel0", true },
- { 0x7, 8, "sel1", true },
- {}
- }, "pgraph_gpc0_tpc0_tex_pm_mux_c_d" },
- {}
-};
-
-static const struct nvkm_specsrc
-gf100_unk400_sources[] = {
- { 0x50440c, (const struct nvkm_specmux[]) {
- { 0x3f, 0, "sel", true },
- {}
- }, "pgraph_gpc0_tpc0_unk400_pm_mux" },
- {}
-};
-
-static const struct nvkm_specdom
-gf100_pm_hub[] = {
- {}
-};
-
-const struct nvkm_specdom
-gf100_pm_gpc[] = {
- { 0xe0, (const struct nvkm_specsig[]) {
- { 0x00, "gpc00_l1_00", gf100_l1_sources },
- { 0x01, "gpc00_l1_01", gf100_l1_sources },
- { 0x02, "gpc00_l1_02", gf100_l1_sources },
- { 0x03, "gpc00_l1_03", gf100_l1_sources },
- { 0x05, "gpc00_l1_04", gf100_l1_sources },
- { 0x06, "gpc00_l1_05", gf100_l1_sources },
- { 0x0a, "gpc00_tex_00", gf100_tex_sources },
- { 0x0b, "gpc00_tex_01", gf100_tex_sources },
- { 0x0c, "gpc00_tex_02", gf100_tex_sources },
- { 0x0d, "gpc00_tex_03", gf100_tex_sources },
- { 0x0e, "gpc00_tex_04", gf100_tex_sources },
- { 0x0f, "gpc00_tex_05", gf100_tex_sources },
- { 0x10, "gpc00_tex_06", gf100_tex_sources },
- { 0x11, "gpc00_tex_07", gf100_tex_sources },
- { 0x12, "gpc00_tex_08", gf100_tex_sources },
- { 0x26, "gpc00_unk400_00", gf100_unk400_sources },
- {}
- }, &gf100_perfctr_func },
- {}
-};
-
-static const struct nvkm_specdom
-gf100_pm_part[] = {
- { 0xe0, (const struct nvkm_specsig[]) {
- { 0x0f, "part00_pbfb_00", gf100_pbfb_sources },
- { 0x10, "part00_pbfb_01", gf100_pbfb_sources },
- { 0x21, "part00_pmfb_00", gf100_pmfb_sources },
- { 0x04, "part00_pmfb_01", gf100_pmfb_sources },
- { 0x00, "part00_pmfb_02", gf100_pmfb_sources },
- { 0x02, "part00_pmfb_03", gf100_pmfb_sources },
- { 0x01, "part00_pmfb_04", gf100_pmfb_sources },
- { 0x2e, "part00_pmfb_05", gf100_pmfb_sources },
- { 0x2f, "part00_pmfb_06", gf100_pmfb_sources },
- { 0x1b, "part00_pmfb_07", gf100_pmfb_sources },
- { 0x1c, "part00_pmfb_08", gf100_pmfb_sources },
- { 0x1d, "part00_pmfb_09", gf100_pmfb_sources },
- { 0x1e, "part00_pmfb_0a", gf100_pmfb_sources },
- { 0x1f, "part00_pmfb_0b", gf100_pmfb_sources },
- {}
- }, &gf100_perfctr_func },
- {}
-};
-
-static void
-gf100_perfctr_init(struct nvkm_pm *pm, struct nvkm_perfdom *dom,
- struct nvkm_perfctr *ctr)
-{
- struct nvkm_device *device = pm->engine.subdev.device;
- u32 log = ctr->logic_op;
- u32 src = 0x00000000;
- int i;
-
- for (i = 0; i < 4; i++)
- src |= ctr->signal[i] << (i * 8);
-
- nvkm_wr32(device, dom->addr + 0x09c, 0x00040002 | (dom->mode << 3));
- nvkm_wr32(device, dom->addr + 0x100, 0x00000000);
- nvkm_wr32(device, dom->addr + 0x040 + (ctr->slot * 0x08), src);
- nvkm_wr32(device, dom->addr + 0x044 + (ctr->slot * 0x08), log);
-}
-
-static void
-gf100_perfctr_read(struct nvkm_pm *pm, struct nvkm_perfdom *dom,
- struct nvkm_perfctr *ctr)
-{
- struct nvkm_device *device = pm->engine.subdev.device;
-
- switch (ctr->slot) {
- case 0: ctr->ctr = nvkm_rd32(device, dom->addr + 0x08c); break;
- case 1: ctr->ctr = nvkm_rd32(device, dom->addr + 0x088); break;
- case 2: ctr->ctr = nvkm_rd32(device, dom->addr + 0x080); break;
- case 3: ctr->ctr = nvkm_rd32(device, dom->addr + 0x090); break;
- }
- dom->clk = nvkm_rd32(device, dom->addr + 0x070);
-}
-
-static void
-gf100_perfctr_next(struct nvkm_pm *pm, struct nvkm_perfdom *dom)
-{
- struct nvkm_device *device = pm->engine.subdev.device;
- nvkm_wr32(device, dom->addr + 0x06c, dom->signal_nr - 0x40 + 0x27);
- nvkm_wr32(device, dom->addr + 0x0ec, 0x00000011);
-}
-
-const struct nvkm_funcdom
-gf100_perfctr_func = {
- .init = gf100_perfctr_init,
- .read = gf100_perfctr_read,
- .next = gf100_perfctr_next,
-};
-
-static void
-gf100_pm_fini(struct nvkm_pm *pm)
-{
- struct nvkm_device *device = pm->engine.subdev.device;
- nvkm_mask(device, 0x000200, 0x10000000, 0x00000000);
- nvkm_mask(device, 0x000200, 0x10000000, 0x10000000);
-}
-
-static const struct nvkm_pm_func
-gf100_pm_ = {
- .fini = gf100_pm_fini,
-};
-
-int
-gf100_pm_new_(const struct gf100_pm_func *func, struct nvkm_device *device,
- enum nvkm_subdev_type type, int inst, struct nvkm_pm **ppm)
-{
- struct nvkm_pm *pm;
- u32 mask;
- int ret;
-
- if (!(pm = *ppm = kzalloc(sizeof(*pm), GFP_KERNEL)))
- return -ENOMEM;
-
- ret = nvkm_pm_ctor(&gf100_pm_, device, type, inst, pm);
- if (ret)
- return ret;
-
- /* HUB */
- ret = nvkm_perfdom_new(pm, "hub", 0, 0x1b0000, 0, 0x200,
- func->doms_hub);
- if (ret)
- return ret;
-
- /* GPC */
- mask = (1 << nvkm_rd32(device, 0x022430)) - 1;
- mask &= ~nvkm_rd32(device, 0x022504);
- mask &= ~nvkm_rd32(device, 0x022584);
-
- ret = nvkm_perfdom_new(pm, "gpc", mask, 0x180000,
- 0x1000, 0x200, func->doms_gpc);
- if (ret)
- return ret;
-
- /* PART */
- mask = (1 << nvkm_rd32(device, 0x022438)) - 1;
- mask &= ~nvkm_rd32(device, 0x022548);
- mask &= ~nvkm_rd32(device, 0x0225c8);
-
- ret = nvkm_perfdom_new(pm, "part", mask, 0x1a0000,
- 0x1000, 0x200, func->doms_part);
- if (ret)
- return ret;
-
- return 0;
-}
-
-static const struct gf100_pm_func
-gf100_pm = {
- .doms_gpc = gf100_pm_gpc,
- .doms_hub = gf100_pm_hub,
- .doms_part = gf100_pm_part,
-};
-
-int
-gf100_pm_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst, struct nvkm_pm **ppm)
-{
- return gf100_pm_new_(&gf100_pm, device, type, inst, ppm);
-}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/pm/gf100.h b/drivers/gpu/drm/nouveau/nvkm/engine/pm/gf100.h
deleted file mode 100644
index bc4b014c4e8e..000000000000
--- a/drivers/gpu/drm/nouveau/nvkm/engine/pm/gf100.h
+++ /dev/null
@@ -1,20 +0,0 @@
-/* SPDX-License-Identifier: MIT */
-#ifndef __NVKM_PM_NVC0_H__
-#define __NVKM_PM_NVC0_H__
-#include "priv.h"
-
-struct gf100_pm_func {
- const struct nvkm_specdom *doms_hub;
- const struct nvkm_specdom *doms_gpc;
- const struct nvkm_specdom *doms_part;
-};
-
-int gf100_pm_new_(const struct gf100_pm_func *, struct nvkm_device *, enum nvkm_subdev_type, int,
- struct nvkm_pm **);
-
-extern const struct nvkm_funcdom gf100_perfctr_func;
-extern const struct nvkm_specdom gf100_pm_gpc[];
-
-extern const struct nvkm_specsrc gf100_pbfb_sources[];
-extern const struct nvkm_specsrc gf100_pmfb_sources[];
-#endif
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/pm/gf108.c b/drivers/gpu/drm/nouveau/nvkm/engine/pm/gf108.c
deleted file mode 100644
index 505565866b59..000000000000
--- a/drivers/gpu/drm/nouveau/nvkm/engine/pm/gf108.c
+++ /dev/null
@@ -1,66 +0,0 @@
-/*
- * Copyright 2015 Samuel Pitoiset
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
- * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
- * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
- * OTHER DEALINGS IN THE SOFTWARE.
- *
- * Authors: Samuel Pitoiset
- */
-#include "gf100.h"
-
-static const struct nvkm_specdom
-gf108_pm_hub[] = {
- {}
-};
-
-static const struct nvkm_specdom
-gf108_pm_part[] = {
- { 0xe0, (const struct nvkm_specsig[]) {
- { 0x14, "part00_pbfb_00", gf100_pbfb_sources },
- { 0x15, "part00_pbfb_01", gf100_pbfb_sources },
- { 0x20, "part00_pbfb_02", gf100_pbfb_sources },
- { 0x21, "part00_pbfb_03", gf100_pbfb_sources },
- { 0x01, "part00_pmfb_00", gf100_pmfb_sources },
- { 0x04, "part00_pmfb_01", gf100_pmfb_sources },
- { 0x05, "part00_pmfb_02", gf100_pmfb_sources},
- { 0x07, "part00_pmfb_03", gf100_pmfb_sources },
- { 0x0d, "part00_pmfb_04", gf100_pmfb_sources },
- { 0x12, "part00_pmfb_05", gf100_pmfb_sources },
- { 0x13, "part00_pmfb_06", gf100_pmfb_sources },
- { 0x2c, "part00_pmfb_07", gf100_pmfb_sources },
- { 0x2d, "part00_pmfb_08", gf100_pmfb_sources },
- { 0x2e, "part00_pmfb_09", gf100_pmfb_sources },
- { 0x2f, "part00_pmfb_0a", gf100_pmfb_sources },
- { 0x30, "part00_pmfb_0b", gf100_pmfb_sources },
- {}
- }, &gf100_perfctr_func },
- {}
-};
-
-static const struct gf100_pm_func
-gf108_pm = {
- .doms_gpc = gf100_pm_gpc,
- .doms_hub = gf108_pm_hub,
- .doms_part = gf108_pm_part,
-};
-
-int
-gf108_pm_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst, struct nvkm_pm **ppm)
-{
- return gf100_pm_new_(&gf108_pm, device, type, inst, ppm);
-}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/pm/gf117.c b/drivers/gpu/drm/nouveau/nvkm/engine/pm/gf117.c
deleted file mode 100644
index c61e8c010bb3..000000000000
--- a/drivers/gpu/drm/nouveau/nvkm/engine/pm/gf117.c
+++ /dev/null
@@ -1,80 +0,0 @@
-/*
- * Copyright 2015 Samuel Pitoiset
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
- * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
- * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
- * OTHER DEALINGS IN THE SOFTWARE.
- *
- * Authors: Samuel Pitoiset
- */
-#include "gf100.h"
-
-static const struct nvkm_specsrc
-gf117_pmfb_sources[] = {
- { 0x140028, (const struct nvkm_specmux[]) {
- { 0x3fff, 0, "unk0" },
- { 0x7, 16, "unk16" },
- { 0x3, 24, "unk24" },
- { 0x2, 28, "unk28" },
- {}
- }, "pmfb0_pm_unk28" },
- { 0x14125c, (const struct nvkm_specmux[]) {
- { 0x3fff, 0, "unk0" },
- {}
- }, "pmfb0_subp0_pm_unk25c" },
- {}
-};
-
-static const struct nvkm_specdom
-gf117_pm_hub[] = {
- {}
-};
-
-static const struct nvkm_specdom
-gf117_pm_part[] = {
- { 0xe0, (const struct nvkm_specsig[]) {
- { 0x00, "part00_pbfb_00", gf100_pbfb_sources },
- { 0x01, "part00_pbfb_01", gf100_pbfb_sources },
- { 0x12, "part00_pmfb_00", gf117_pmfb_sources },
- { 0x15, "part00_pmfb_01", gf117_pmfb_sources },
- { 0x16, "part00_pmfb_02", gf117_pmfb_sources },
- { 0x18, "part00_pmfb_03", gf117_pmfb_sources },
- { 0x1e, "part00_pmfb_04", gf117_pmfb_sources },
- { 0x23, "part00_pmfb_05", gf117_pmfb_sources },
- { 0x24, "part00_pmfb_06", gf117_pmfb_sources },
- { 0x0c, "part00_pmfb_07", gf117_pmfb_sources },
- { 0x0d, "part00_pmfb_08", gf117_pmfb_sources },
- { 0x0e, "part00_pmfb_09", gf117_pmfb_sources },
- { 0x0f, "part00_pmfb_0a", gf117_pmfb_sources },
- { 0x10, "part00_pmfb_0b", gf117_pmfb_sources },
- {}
- }, &gf100_perfctr_func },
- {}
-};
-
-static const struct gf100_pm_func
-gf117_pm = {
- .doms_gpc = gf100_pm_gpc,
- .doms_hub = gf117_pm_hub,
- .doms_part = gf117_pm_part,
-};
-
-int
-gf117_pm_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst, struct nvkm_pm **ppm)
-{
- return gf100_pm_new_(&gf117_pm, device, type, inst, ppm);
-}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/pm/gk104.c b/drivers/gpu/drm/nouveau/nvkm/engine/pm/gk104.c
deleted file mode 100644
index 75bf3df1cb18..000000000000
--- a/drivers/gpu/drm/nouveau/nvkm/engine/pm/gk104.c
+++ /dev/null
@@ -1,184 +0,0 @@
-/*
- * Copyright 2013 Red Hat Inc.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
- * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
- * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
- * OTHER DEALINGS IN THE SOFTWARE.
- *
- * Authors: Ben Skeggs
- */
-#include "gf100.h"
-
-static const struct nvkm_specsrc
-gk104_pmfb_sources[] = {
- { 0x140028, (const struct nvkm_specmux[]) {
- { 0x3fff, 0, "unk0" },
- { 0x7, 16, "unk16" },
- { 0x3, 24, "unk24" },
- { 0x2, 28, "unk28" },
- {}
- }, "pmfb0_pm_unk28" },
- { 0x14125c, (const struct nvkm_specmux[]) {
- { 0x3fff, 0, "unk0" },
- {}
- }, "pmfb0_subp0_pm_unk25c" },
- { 0x14165c, (const struct nvkm_specmux[]) {
- { 0x3fff, 0, "unk0" },
- {}
- }, "pmfb0_subp1_pm_unk25c" },
- { 0x141a5c, (const struct nvkm_specmux[]) {
- { 0x3fff, 0, "unk0" },
- {}
- }, "pmfb0_subp2_pm_unk25c" },
- { 0x141e5c, (const struct nvkm_specmux[]) {
- { 0x3fff, 0, "unk0" },
- {}
- }, "pmfb0_subp3_pm_unk25c" },
- {}
-};
-
-static const struct nvkm_specsrc
-gk104_tex_sources[] = {
- { 0x5042c0, (const struct nvkm_specmux[]) {
- { 0xf, 0, "sel0", true },
- { 0x7, 8, "sel1", true },
- {}
- }, "pgraph_gpc0_tpc0_tex_pm_mux_c_d" },
- { 0x5042c8, (const struct nvkm_specmux[]) {
- { 0x1f, 0, "sel", true },
- {}
- }, "pgraph_gpc0_tpc0_tex_pm_unkc8" },
- { 0x5042b8, (const struct nvkm_specmux[]) {
- { 0xff, 0, "sel", true },
- {}
- }, "pgraph_gpc0_tpc0_tex_pm_unkb8" },
- {}
-};
-
-static const struct nvkm_specdom
-gk104_pm_hub[] = {
- { 0x60, (const struct nvkm_specsig[]) {
- { 0x47, "hub00_user_0" },
- {}
- }, &gf100_perfctr_func },
- { 0x40, (const struct nvkm_specsig[]) {
- { 0x27, "hub01_user_0" },
- {}
- }, &gf100_perfctr_func },
- { 0x60, (const struct nvkm_specsig[]) {
- { 0x47, "hub02_user_0" },
- {}
- }, &gf100_perfctr_func },
- { 0x60, (const struct nvkm_specsig[]) {
- { 0x47, "hub03_user_0" },
- {}
- }, &gf100_perfctr_func },
- { 0x40, (const struct nvkm_specsig[]) {
- { 0x03, "host_mmio_rd" },
- { 0x27, "hub04_user_0" },
- {}
- }, &gf100_perfctr_func },
- { 0x60, (const struct nvkm_specsig[]) {
- { 0x47, "hub05_user_0" },
- {}
- }, &gf100_perfctr_func },
- { 0xc0, (const struct nvkm_specsig[]) {
- { 0x74, "host_fb_rd3x" },
- { 0x75, "host_fb_rd3x_2" },
- { 0xa7, "hub06_user_0" },
- {}
- }, &gf100_perfctr_func },
- { 0x60, (const struct nvkm_specsig[]) {
- { 0x47, "hub07_user_0" },
- {}
- }, &gf100_perfctr_func },
- {}
-};
-
-static const struct nvkm_specdom
-gk104_pm_gpc[] = {
- { 0xe0, (const struct nvkm_specsig[]) {
- { 0xc7, "gpc00_user_0" },
- {}
- }, &gf100_perfctr_func },
- { 0x20, (const struct nvkm_specsig[]) {
- {}
- }, &gf100_perfctr_func },
- { 0x20, (const struct nvkm_specsig[]) {
- { 0x00, "gpc02_tex_00", gk104_tex_sources },
- { 0x01, "gpc02_tex_01", gk104_tex_sources },
- { 0x02, "gpc02_tex_02", gk104_tex_sources },
- { 0x03, "gpc02_tex_03", gk104_tex_sources },
- { 0x04, "gpc02_tex_04", gk104_tex_sources },
- { 0x05, "gpc02_tex_05", gk104_tex_sources },
- { 0x06, "gpc02_tex_06", gk104_tex_sources },
- { 0x07, "gpc02_tex_07", gk104_tex_sources },
- { 0x08, "gpc02_tex_08", gk104_tex_sources },
- { 0x0a, "gpc02_tex_0a", gk104_tex_sources },
- { 0x0b, "gpc02_tex_0b", gk104_tex_sources },
- { 0x0d, "gpc02_tex_0c", gk104_tex_sources },
- { 0x0c, "gpc02_tex_0d", gk104_tex_sources },
- { 0x0e, "gpc02_tex_0e", gk104_tex_sources },
- { 0x0f, "gpc02_tex_0f", gk104_tex_sources },
- { 0x10, "gpc02_tex_10", gk104_tex_sources },
- { 0x11, "gpc02_tex_11", gk104_tex_sources },
- { 0x12, "gpc02_tex_12", gk104_tex_sources },
- {}
- }, &gf100_perfctr_func },
- {}
-};
-
-static const struct nvkm_specdom
-gk104_pm_part[] = {
- { 0x60, (const struct nvkm_specsig[]) {
- { 0x00, "part00_pbfb_00", gf100_pbfb_sources },
- { 0x01, "part00_pbfb_01", gf100_pbfb_sources },
- { 0x0c, "part00_pmfb_00", gk104_pmfb_sources },
- { 0x0d, "part00_pmfb_01", gk104_pmfb_sources },
- { 0x0e, "part00_pmfb_02", gk104_pmfb_sources },
- { 0x0f, "part00_pmfb_03", gk104_pmfb_sources },
- { 0x10, "part00_pmfb_04", gk104_pmfb_sources },
- { 0x12, "part00_pmfb_05", gk104_pmfb_sources },
- { 0x15, "part00_pmfb_06", gk104_pmfb_sources },
- { 0x16, "part00_pmfb_07", gk104_pmfb_sources },
- { 0x18, "part00_pmfb_08", gk104_pmfb_sources },
- { 0x21, "part00_pmfb_09", gk104_pmfb_sources },
- { 0x25, "part00_pmfb_0a", gk104_pmfb_sources },
- { 0x26, "part00_pmfb_0b", gk104_pmfb_sources },
- { 0x27, "part00_pmfb_0c", gk104_pmfb_sources },
- { 0x47, "part00_user_0" },
- {}
- }, &gf100_perfctr_func },
- { 0x60, (const struct nvkm_specsig[]) {
- { 0x47, "part01_user_0" },
- {}
- }, &gf100_perfctr_func },
- {}
-};
-
-static const struct gf100_pm_func
-gk104_pm = {
- .doms_gpc = gk104_pm_gpc,
- .doms_hub = gk104_pm_hub,
- .doms_part = gk104_pm_part,
-};
-
-int
-gk104_pm_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst, struct nvkm_pm **ppm)
-{
- return gf100_pm_new_(&gk104_pm, device, type, inst, ppm);
-}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/pm/gt200.c b/drivers/gpu/drm/nouveau/nvkm/engine/pm/gt200.c
deleted file mode 100644
index 25874c541486..000000000000
--- a/drivers/gpu/drm/nouveau/nvkm/engine/pm/gt200.c
+++ /dev/null
@@ -1,157 +0,0 @@
-/*
- * Copyright 2015 Nouveau project
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
- * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
- * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
- * OTHER DEALINGS IN THE SOFTWARE.
- *
- * Authors: Samuel Pitoiset
- */
-#include "nv40.h"
-
-const struct nvkm_specsrc
-gt200_crop_sources[] = {
- { 0x407008, (const struct nvkm_specmux[]) {
- { 0xf, 0, "sel0", true },
- { 0x1f, 16, "sel1", true },
- {}
- }, "pgraph_rop0_crop_pm_mux" },
- {}
-};
-
-const struct nvkm_specsrc
-gt200_prop_sources[] = {
- { 0x408750, (const struct nvkm_specmux[]) {
- { 0x3f, 0, "sel", true },
- {}
- }, "pgraph_tpc0_prop_pm_mux" },
- {}
-};
-
-const struct nvkm_specsrc
-gt200_tex_sources[] = {
- { 0x408508, (const struct nvkm_specmux[]) {
- { 0xfffff, 0, "unk0" },
- {}
- }, "pgraph_tpc0_tex_unk08" },
- {}
-};
-
-static const struct nvkm_specdom
-gt200_pm[] = {
- { 0x20, (const struct nvkm_specsig[]) {
- {}
- }, &nv40_perfctr_func },
- { 0xf0, (const struct nvkm_specsig[]) {
- { 0xc9, "pc01_gr_idle" },
- { 0x84, "pc01_strmout_00" },
- { 0x85, "pc01_strmout_01" },
- { 0xde, "pc01_trast_00" },
- { 0xdf, "pc01_trast_01" },
- { 0xe0, "pc01_trast_02" },
- { 0xe1, "pc01_trast_03" },
- { 0xe4, "pc01_trast_04" },
- { 0xe5, "pc01_trast_05" },
- { 0x82, "pc01_vattr_00" },
- { 0x83, "pc01_vattr_01" },
- { 0x46, "pc01_vfetch_00", g84_vfetch_sources },
- { 0x47, "pc01_vfetch_01", g84_vfetch_sources },
- { 0x48, "pc01_vfetch_02", g84_vfetch_sources },
- { 0x49, "pc01_vfetch_03", g84_vfetch_sources },
- { 0x4a, "pc01_vfetch_04", g84_vfetch_sources },
- { 0x4b, "pc01_vfetch_05", g84_vfetch_sources },
- { 0x4c, "pc01_vfetch_06", g84_vfetch_sources },
- { 0x4d, "pc01_vfetch_07", g84_vfetch_sources },
- { 0x4e, "pc01_vfetch_08", g84_vfetch_sources },
- { 0x4f, "pc01_vfetch_09", g84_vfetch_sources },
- { 0x50, "pc01_vfetch_0a", g84_vfetch_sources },
- { 0x51, "pc01_vfetch_0b", g84_vfetch_sources },
- { 0x52, "pc01_vfetch_0c", g84_vfetch_sources },
- { 0x53, "pc01_vfetch_0d", g84_vfetch_sources },
- { 0x54, "pc01_vfetch_0e", g84_vfetch_sources },
- { 0x55, "pc01_vfetch_0f", g84_vfetch_sources },
- { 0x56, "pc01_vfetch_10", g84_vfetch_sources },
- { 0x57, "pc01_vfetch_11", g84_vfetch_sources },
- { 0x58, "pc01_vfetch_12", g84_vfetch_sources },
- { 0x59, "pc01_vfetch_13", g84_vfetch_sources },
- { 0x5a, "pc01_vfetch_14", g84_vfetch_sources },
- { 0x5b, "pc01_vfetch_15", g84_vfetch_sources },
- { 0x5c, "pc01_vfetch_16", g84_vfetch_sources },
- { 0x5d, "pc01_vfetch_17", g84_vfetch_sources },
- { 0x5e, "pc01_vfetch_18", g84_vfetch_sources },
- { 0x5f, "pc01_vfetch_19", g84_vfetch_sources },
- { 0x07, "pc01_zcull_00", nv50_zcull_sources },
- { 0x08, "pc01_zcull_01", nv50_zcull_sources },
- { 0x09, "pc01_zcull_02", nv50_zcull_sources },
- { 0x0a, "pc01_zcull_03", nv50_zcull_sources },
- { 0x0b, "pc01_zcull_04", nv50_zcull_sources },
- { 0x0c, "pc01_zcull_05", nv50_zcull_sources },
-
- { 0xb0, "pc01_unk00" },
- { 0xec, "pc01_trailer" },
- {}
- }, &nv40_perfctr_func },
- { 0xf0, (const struct nvkm_specsig[]) {
- { 0x55, "pc02_crop_00", gt200_crop_sources },
- { 0x56, "pc02_crop_01", gt200_crop_sources },
- { 0x57, "pc02_crop_02", gt200_crop_sources },
- { 0x58, "pc02_crop_03", gt200_crop_sources },
- { 0x00, "pc02_prop_00", gt200_prop_sources },
- { 0x01, "pc02_prop_01", gt200_prop_sources },
- { 0x02, "pc02_prop_02", gt200_prop_sources },
- { 0x03, "pc02_prop_03", gt200_prop_sources },
- { 0x04, "pc02_prop_04", gt200_prop_sources },
- { 0x05, "pc02_prop_05", gt200_prop_sources },
- { 0x06, "pc02_prop_06", gt200_prop_sources },
- { 0x07, "pc02_prop_07", gt200_prop_sources },
- { 0x78, "pc02_tex_00", gt200_tex_sources },
- { 0x79, "pc02_tex_01", gt200_tex_sources },
- { 0x7a, "pc02_tex_02", gt200_tex_sources },
- { 0x7b, "pc02_tex_03", gt200_tex_sources },
- { 0x32, "pc02_tex_04", gt200_tex_sources },
- { 0x33, "pc02_tex_05", gt200_tex_sources },
- { 0x34, "pc02_tex_06", gt200_tex_sources },
- { 0x74, "pc02_zrop_00", nv50_zrop_sources },
- { 0x75, "pc02_zrop_01", nv50_zrop_sources },
- { 0x76, "pc02_zrop_02", nv50_zrop_sources },
- { 0x77, "pc02_zrop_03", nv50_zrop_sources },
- { 0xec, "pc02_trailer" },
- {}
- }, &nv40_perfctr_func },
- { 0x20, (const struct nvkm_specsig[]) {
- {}
- }, &nv40_perfctr_func },
- { 0x20, (const struct nvkm_specsig[]) {
- {}
- }, &nv40_perfctr_func },
- { 0x20, (const struct nvkm_specsig[]) {
- {}
- }, &nv40_perfctr_func },
- { 0x20, (const struct nvkm_specsig[]) {
- {}
- }, &nv40_perfctr_func },
- { 0x20, (const struct nvkm_specsig[]) {
- {}
- }, &nv40_perfctr_func },
- {}
-};
-
-int
-gt200_pm_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst, struct nvkm_pm **ppm)
-{
- return nv40_pm_new_(gt200_pm, device, type, inst, ppm);
-}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/pm/gt215.c b/drivers/gpu/drm/nouveau/nvkm/engine/pm/gt215.c
deleted file mode 100644
index 54c23e2b6645..000000000000
--- a/drivers/gpu/drm/nouveau/nvkm/engine/pm/gt215.c
+++ /dev/null
@@ -1,138 +0,0 @@
-/*
- * Copyright 2013 Red Hat Inc.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
- * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
- * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
- * OTHER DEALINGS IN THE SOFTWARE.
- *
- * Authors: Ben Skeggs
- */
-#include "nv40.h"
-
-static const struct nvkm_specsrc
-gt215_zcull_sources[] = {
- { 0x402ca4, (const struct nvkm_specmux[]) {
- { 0x7fff, 0, "unk0" },
- { 0xff, 24, "unk24" },
- {}
- }, "pgraph_zcull_pm_unka4" },
- {}
-};
-
-static const struct nvkm_specdom
-gt215_pm[] = {
- { 0x20, (const struct nvkm_specsig[]) {
- {}
- }, &nv40_perfctr_func },
- { 0xf0, (const struct nvkm_specsig[]) {
- { 0xcb, "pc01_gr_idle" },
- { 0x86, "pc01_strmout_00" },
- { 0x87, "pc01_strmout_01" },
- { 0xe0, "pc01_trast_00" },
- { 0xe1, "pc01_trast_01" },
- { 0xe2, "pc01_trast_02" },
- { 0xe3, "pc01_trast_03" },
- { 0xe6, "pc01_trast_04" },
- { 0xe7, "pc01_trast_05" },
- { 0x84, "pc01_vattr_00" },
- { 0x85, "pc01_vattr_01" },
- { 0x46, "pc01_vfetch_00", g84_vfetch_sources },
- { 0x47, "pc01_vfetch_01", g84_vfetch_sources },
- { 0x48, "pc01_vfetch_02", g84_vfetch_sources },
- { 0x49, "pc01_vfetch_03", g84_vfetch_sources },
- { 0x4a, "pc01_vfetch_04", g84_vfetch_sources },
- { 0x4b, "pc01_vfetch_05", g84_vfetch_sources },
- { 0x4c, "pc01_vfetch_06", g84_vfetch_sources },
- { 0x4d, "pc01_vfetch_07", g84_vfetch_sources },
- { 0x4e, "pc01_vfetch_08", g84_vfetch_sources },
- { 0x4f, "pc01_vfetch_09", g84_vfetch_sources },
- { 0x50, "pc01_vfetch_0a", g84_vfetch_sources },
- { 0x51, "pc01_vfetch_0b", g84_vfetch_sources },
- { 0x52, "pc01_vfetch_0c", g84_vfetch_sources },
- { 0x53, "pc01_vfetch_0d", g84_vfetch_sources },
- { 0x54, "pc01_vfetch_0e", g84_vfetch_sources },
- { 0x55, "pc01_vfetch_0f", g84_vfetch_sources },
- { 0x56, "pc01_vfetch_10", g84_vfetch_sources },
- { 0x57, "pc01_vfetch_11", g84_vfetch_sources },
- { 0x58, "pc01_vfetch_12", g84_vfetch_sources },
- { 0x59, "pc01_vfetch_13", g84_vfetch_sources },
- { 0x5a, "pc01_vfetch_14", g84_vfetch_sources },
- { 0x5b, "pc01_vfetch_15", g84_vfetch_sources },
- { 0x5c, "pc01_vfetch_16", g84_vfetch_sources },
- { 0x5d, "pc01_vfetch_17", g84_vfetch_sources },
- { 0x5e, "pc01_vfetch_18", g84_vfetch_sources },
- { 0x5f, "pc01_vfetch_19", g84_vfetch_sources },
- { 0x07, "pc01_zcull_00", gt215_zcull_sources },
- { 0x08, "pc01_zcull_01", gt215_zcull_sources },
- { 0x09, "pc01_zcull_02", gt215_zcull_sources },
- { 0x0a, "pc01_zcull_03", gt215_zcull_sources },
- { 0x0b, "pc01_zcull_04", gt215_zcull_sources },
- { 0x0c, "pc01_zcull_05", gt215_zcull_sources },
- { 0xb2, "pc01_unk00" },
- { 0xec, "pc01_trailer" },
- {}
- }, &nv40_perfctr_func },
- { 0xe0, (const struct nvkm_specsig[]) {
- { 0x64, "pc02_crop_00", gt200_crop_sources },
- { 0x65, "pc02_crop_01", gt200_crop_sources },
- { 0x66, "pc02_crop_02", gt200_crop_sources },
- { 0x67, "pc02_crop_03", gt200_crop_sources },
- { 0x00, "pc02_prop_00", gt200_prop_sources },
- { 0x01, "pc02_prop_01", gt200_prop_sources },
- { 0x02, "pc02_prop_02", gt200_prop_sources },
- { 0x03, "pc02_prop_03", gt200_prop_sources },
- { 0x04, "pc02_prop_04", gt200_prop_sources },
- { 0x05, "pc02_prop_05", gt200_prop_sources },
- { 0x06, "pc02_prop_06", gt200_prop_sources },
- { 0x07, "pc02_prop_07", gt200_prop_sources },
- { 0x80, "pc02_tex_00", gt200_tex_sources },
- { 0x81, "pc02_tex_01", gt200_tex_sources },
- { 0x82, "pc02_tex_02", gt200_tex_sources },
- { 0x83, "pc02_tex_03", gt200_tex_sources },
- { 0x3a, "pc02_tex_04", gt200_tex_sources },
- { 0x3b, "pc02_tex_05", gt200_tex_sources },
- { 0x3c, "pc02_tex_06", gt200_tex_sources },
- { 0x7c, "pc02_zrop_00", nv50_zrop_sources },
- { 0x7d, "pc02_zrop_01", nv50_zrop_sources },
- { 0x7e, "pc02_zrop_02", nv50_zrop_sources },
- { 0x7f, "pc02_zrop_03", nv50_zrop_sources },
- { 0xcc, "pc02_trailer" },
- {}
- }, &nv40_perfctr_func },
- { 0x20, (const struct nvkm_specsig[]) {
- {}
- }, &nv40_perfctr_func },
- { 0x20, (const struct nvkm_specsig[]) {
- {}
- }, &nv40_perfctr_func },
- { 0x20, (const struct nvkm_specsig[]) {
- {}
- }, &nv40_perfctr_func },
- { 0x20, (const struct nvkm_specsig[]) {
- {}
- }, &nv40_perfctr_func },
- { 0x20, (const struct nvkm_specsig[]) {
- {}
- }, &nv40_perfctr_func },
- {}
-};
-
-int
-gt215_pm_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst, struct nvkm_pm **ppm)
-{
- return nv40_pm_new_(gt215_pm, device, type, inst, ppm);
-}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/pm/nv40.c b/drivers/gpu/drm/nouveau/nvkm/engine/pm/nv40.c
deleted file mode 100644
index eba5b3b79340..000000000000
--- a/drivers/gpu/drm/nouveau/nvkm/engine/pm/nv40.c
+++ /dev/null
@@ -1,123 +0,0 @@
-/*
- * Copyright 2013 Red Hat Inc.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
- * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
- * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
- * OTHER DEALINGS IN THE SOFTWARE.
- *
- * Authors: Ben Skeggs
- */
-#include "nv40.h"
-
-static void
-nv40_perfctr_init(struct nvkm_pm *pm, struct nvkm_perfdom *dom,
- struct nvkm_perfctr *ctr)
-{
- struct nvkm_device *device = pm->engine.subdev.device;
- u32 log = ctr->logic_op;
- u32 src = 0x00000000;
- int i;
-
- for (i = 0; i < 4; i++)
- src |= ctr->signal[i] << (i * 8);
-
- nvkm_wr32(device, 0x00a7c0 + dom->addr, 0x00000001 | (dom->mode << 4));
- nvkm_wr32(device, 0x00a400 + dom->addr + (ctr->slot * 0x40), src);
- nvkm_wr32(device, 0x00a420 + dom->addr + (ctr->slot * 0x40), log);
-}
-
-static void
-nv40_perfctr_read(struct nvkm_pm *pm, struct nvkm_perfdom *dom,
- struct nvkm_perfctr *ctr)
-{
- struct nvkm_device *device = pm->engine.subdev.device;
-
- switch (ctr->slot) {
- case 0: ctr->ctr = nvkm_rd32(device, 0x00a700 + dom->addr); break;
- case 1: ctr->ctr = nvkm_rd32(device, 0x00a6c0 + dom->addr); break;
- case 2: ctr->ctr = nvkm_rd32(device, 0x00a680 + dom->addr); break;
- case 3: ctr->ctr = nvkm_rd32(device, 0x00a740 + dom->addr); break;
- }
- dom->clk = nvkm_rd32(device, 0x00a600 + dom->addr);
-}
-
-static void
-nv40_perfctr_next(struct nvkm_pm *pm, struct nvkm_perfdom *dom)
-{
- struct nvkm_device *device = pm->engine.subdev.device;
- struct nv40_pm *nv40pm = container_of(pm, struct nv40_pm, base);
-
- if (nv40pm->sequence != pm->sequence) {
- nvkm_wr32(device, 0x400084, 0x00000020);
- nv40pm->sequence = pm->sequence;
- }
-}
-
-const struct nvkm_funcdom
-nv40_perfctr_func = {
- .init = nv40_perfctr_init,
- .read = nv40_perfctr_read,
- .next = nv40_perfctr_next,
-};
-
-static const struct nvkm_pm_func
-nv40_pm_ = {
-};
-
-int
-nv40_pm_new_(const struct nvkm_specdom *doms, struct nvkm_device *device,
- enum nvkm_subdev_type type, int inst, struct nvkm_pm **ppm)
-{
- struct nv40_pm *pm;
- int ret;
-
- if (!(pm = kzalloc(sizeof(*pm), GFP_KERNEL)))
- return -ENOMEM;
- *ppm = &pm->base;
-
- ret = nvkm_pm_ctor(&nv40_pm_, device, type, inst, &pm->base);
- if (ret)
- return ret;
-
- return nvkm_perfdom_new(&pm->base, "pc", 0, 0, 0, 4, doms);
-}
-
-static const struct nvkm_specdom
-nv40_pm[] = {
- { 0x20, (const struct nvkm_specsig[]) {
- {}
- }, &nv40_perfctr_func },
- { 0x20, (const struct nvkm_specsig[]) {
- {}
- }, &nv40_perfctr_func },
- { 0x20, (const struct nvkm_specsig[]) {
- {}
- }, &nv40_perfctr_func },
- { 0x20, (const struct nvkm_specsig[]) {
- {}
- }, &nv40_perfctr_func },
- { 0x20, (const struct nvkm_specsig[]) {
- {}
- }, &nv40_perfctr_func },
- {}
-};
-
-int
-nv40_pm_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst, struct nvkm_pm **ppm)
-{
- return nv40_pm_new_(nv40_pm, device, type, inst, ppm);
-}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/pm/nv40.h b/drivers/gpu/drm/nouveau/nvkm/engine/pm/nv40.h
deleted file mode 100644
index afb79843723d..000000000000
--- a/drivers/gpu/drm/nouveau/nvkm/engine/pm/nv40.h
+++ /dev/null
@@ -1,15 +0,0 @@
-/* SPDX-License-Identifier: MIT */
-#ifndef __NVKM_PM_NV40_H__
-#define __NVKM_PM_NV40_H__
-#define nv40_pm(p) container_of((p), struct nv40_pm, base)
-#include "priv.h"
-
-struct nv40_pm {
- struct nvkm_pm base;
- u32 sequence;
-};
-
-int nv40_pm_new_(const struct nvkm_specdom *, struct nvkm_device *, enum nvkm_subdev_type, int,
- struct nvkm_pm **);
-extern const struct nvkm_funcdom nv40_perfctr_func;
-#endif
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/pm/nv50.c b/drivers/gpu/drm/nouveau/nvkm/engine/pm/nv50.c
deleted file mode 100644
index bbd3404901f9..000000000000
--- a/drivers/gpu/drm/nouveau/nvkm/engine/pm/nv50.c
+++ /dev/null
@@ -1,175 +0,0 @@
-/*
- * Copyright 2013 Red Hat Inc.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
- * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
- * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
- * OTHER DEALINGS IN THE SOFTWARE.
- *
- * Authors: Ben Skeggs
- */
-#include "nv40.h"
-
-const struct nvkm_specsrc
-nv50_zcull_sources[] = {
- { 0x402ca4, (const struct nvkm_specmux[]) {
- { 0x7fff, 0, "unk0" },
- {}
- }, "pgraph_zcull_pm_unka4" },
- {}
-};
-
-const struct nvkm_specsrc
-nv50_zrop_sources[] = {
- { 0x40708c, (const struct nvkm_specmux[]) {
- { 0xf, 0, "sel0", true },
- { 0xf, 16, "sel1", true },
- {}
- }, "pgraph_rop0_zrop_pm_mux" },
- {}
-};
-
-static const struct nvkm_specsrc
-nv50_prop_sources[] = {
- { 0x40be50, (const struct nvkm_specmux[]) {
- { 0x1f, 0, "sel", true },
- {}
- }, "pgraph_tpc3_prop_pm_mux" },
- {}
-};
-
-static const struct nvkm_specsrc
-nv50_crop_sources[] = {
- { 0x407008, (const struct nvkm_specmux[]) {
- { 0x7, 0, "sel0", true },
- { 0x7, 16, "sel1", true },
- {}
- }, "pgraph_rop0_crop_pm_mux" },
- {}
-};
-
-static const struct nvkm_specsrc
-nv50_tex_sources[] = {
- { 0x40b808, (const struct nvkm_specmux[]) {
- { 0x3fff, 0, "unk0" },
- {}
- }, "pgraph_tpc3_tex_unk08" },
- {}
-};
-
-static const struct nvkm_specsrc
-nv50_vfetch_sources[] = {
- { 0x400c0c, (const struct nvkm_specmux[]) {
- { 0x1, 0, "unk0" },
- {}
- }, "pgraph_vfetch_unk0c" },
- {}
-};
-
-static const struct nvkm_specdom
-nv50_pm[] = {
- { 0x20, (const struct nvkm_specsig[]) {
- {}
- }, &nv40_perfctr_func },
- { 0xf0, (const struct nvkm_specsig[]) {
- { 0xc8, "pc01_gr_idle" },
- { 0x7f, "pc01_strmout_00" },
- { 0x80, "pc01_strmout_01" },
- { 0xdc, "pc01_trast_00" },
- { 0xdd, "pc01_trast_01" },
- { 0xde, "pc01_trast_02" },
- { 0xdf, "pc01_trast_03" },
- { 0xe2, "pc01_trast_04" },
- { 0xe3, "pc01_trast_05" },
- { 0x7c, "pc01_vattr_00" },
- { 0x7d, "pc01_vattr_01" },
- { 0x26, "pc01_vfetch_00", nv50_vfetch_sources },
- { 0x27, "pc01_vfetch_01", nv50_vfetch_sources },
- { 0x28, "pc01_vfetch_02", nv50_vfetch_sources },
- { 0x29, "pc01_vfetch_03", nv50_vfetch_sources },
- { 0x2a, "pc01_vfetch_04", nv50_vfetch_sources },
- { 0x2b, "pc01_vfetch_05", nv50_vfetch_sources },
- { 0x2c, "pc01_vfetch_06", nv50_vfetch_sources },
- { 0x2d, "pc01_vfetch_07", nv50_vfetch_sources },
- { 0x2e, "pc01_vfetch_08", nv50_vfetch_sources },
- { 0x2f, "pc01_vfetch_09", nv50_vfetch_sources },
- { 0x30, "pc01_vfetch_0a", nv50_vfetch_sources },
- { 0x31, "pc01_vfetch_0b", nv50_vfetch_sources },
- { 0x32, "pc01_vfetch_0c", nv50_vfetch_sources },
- { 0x33, "pc01_vfetch_0d", nv50_vfetch_sources },
- { 0x34, "pc01_vfetch_0e", nv50_vfetch_sources },
- { 0x35, "pc01_vfetch_0f", nv50_vfetch_sources },
- { 0x36, "pc01_vfetch_10", nv50_vfetch_sources },
- { 0x37, "pc01_vfetch_11", nv50_vfetch_sources },
- { 0x38, "pc01_vfetch_12", nv50_vfetch_sources },
- { 0x39, "pc01_vfetch_13", nv50_vfetch_sources },
- { 0x3a, "pc01_vfetch_14", nv50_vfetch_sources },
- { 0x3b, "pc01_vfetch_15", nv50_vfetch_sources },
- { 0x3c, "pc01_vfetch_16", nv50_vfetch_sources },
- { 0x3d, "pc01_vfetch_17", nv50_vfetch_sources },
- { 0x3e, "pc01_vfetch_18", nv50_vfetch_sources },
- { 0x3f, "pc01_vfetch_19", nv50_vfetch_sources },
- { 0x20, "pc01_zcull_00", nv50_zcull_sources },
- { 0x21, "pc01_zcull_01", nv50_zcull_sources },
- { 0x22, "pc01_zcull_02", nv50_zcull_sources },
- { 0x23, "pc01_zcull_03", nv50_zcull_sources },
- { 0x24, "pc01_zcull_04", nv50_zcull_sources },
- { 0x25, "pc01_zcull_05", nv50_zcull_sources },
- { 0xae, "pc01_unk00" },
- { 0xee, "pc01_trailer" },
- {}
- }, &nv40_perfctr_func },
- { 0xf0, (const struct nvkm_specsig[]) {
- { 0x52, "pc02_crop_00", nv50_crop_sources },
- { 0x53, "pc02_crop_01", nv50_crop_sources },
- { 0x54, "pc02_crop_02", nv50_crop_sources },
- { 0x55, "pc02_crop_03", nv50_crop_sources },
- { 0x00, "pc02_prop_00", nv50_prop_sources },
- { 0x01, "pc02_prop_01", nv50_prop_sources },
- { 0x02, "pc02_prop_02", nv50_prop_sources },
- { 0x03, "pc02_prop_03", nv50_prop_sources },
- { 0x04, "pc02_prop_04", nv50_prop_sources },
- { 0x05, "pc02_prop_05", nv50_prop_sources },
- { 0x06, "pc02_prop_06", nv50_prop_sources },
- { 0x07, "pc02_prop_07", nv50_prop_sources },
- { 0x70, "pc02_tex_00", nv50_tex_sources },
- { 0x71, "pc02_tex_01", nv50_tex_sources },
- { 0x72, "pc02_tex_02", nv50_tex_sources },
- { 0x73, "pc02_tex_03", nv50_tex_sources },
- { 0x40, "pc02_tex_04", nv50_tex_sources },
- { 0x41, "pc02_tex_05", nv50_tex_sources },
- { 0x42, "pc02_tex_06", nv50_tex_sources },
- { 0x6c, "pc02_zrop_00", nv50_zrop_sources },
- { 0x6d, "pc02_zrop_01", nv50_zrop_sources },
- { 0x6e, "pc02_zrop_02", nv50_zrop_sources },
- { 0x6f, "pc02_zrop_03", nv50_zrop_sources },
- { 0xee, "pc02_trailer" },
- {}
- }, &nv40_perfctr_func },
- { 0x20, (const struct nvkm_specsig[]) {
- {}
- }, &nv40_perfctr_func },
- { 0x20, (const struct nvkm_specsig[]) {
- {}
- }, &nv40_perfctr_func },
- {}
-};
-
-int
-nv50_pm_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst, struct nvkm_pm **ppm)
-{
- return nv40_pm_new_(nv50_pm, device, type, inst, ppm);
-}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/pm/priv.h b/drivers/gpu/drm/nouveau/nvkm/engine/pm/priv.h
deleted file mode 100644
index c011227f7052..000000000000
--- a/drivers/gpu/drm/nouveau/nvkm/engine/pm/priv.h
+++ /dev/null
@@ -1,105 +0,0 @@
-/* SPDX-License-Identifier: MIT */
-#ifndef __NVKM_PM_PRIV_H__
-#define __NVKM_PM_PRIV_H__
-#define nvkm_pm(p) container_of((p), struct nvkm_pm, engine)
-#include <engine/pm.h>
-
-int nvkm_pm_ctor(const struct nvkm_pm_func *, struct nvkm_device *, enum nvkm_subdev_type, int,
- struct nvkm_pm *);
-
-struct nvkm_pm_func {
- void (*fini)(struct nvkm_pm *);
-};
-
-struct nvkm_perfctr {
- struct list_head head;
- u8 domain;
- u8 signal[4];
- u64 source[4][8];
- int slot;
- u32 logic_op;
- u32 ctr;
-};
-
-struct nvkm_specmux {
- u32 mask;
- u8 shift;
- const char *name;
- bool enable;
-};
-
-struct nvkm_specsrc {
- u32 addr;
- const struct nvkm_specmux *mux;
- const char *name;
-};
-
-struct nvkm_perfsrc {
- struct list_head head;
- char *name;
- u32 addr;
- u32 mask;
- u8 shift;
- bool enable;
-};
-
-extern const struct nvkm_specsrc nv50_zcull_sources[];
-extern const struct nvkm_specsrc nv50_zrop_sources[];
-extern const struct nvkm_specsrc g84_vfetch_sources[];
-extern const struct nvkm_specsrc gt200_crop_sources[];
-extern const struct nvkm_specsrc gt200_prop_sources[];
-extern const struct nvkm_specsrc gt200_tex_sources[];
-
-struct nvkm_specsig {
- u8 signal;
- const char *name;
- const struct nvkm_specsrc *source;
-};
-
-struct nvkm_perfsig {
- const char *name;
- u8 source[8];
-};
-
-struct nvkm_specdom {
- u16 signal_nr;
- const struct nvkm_specsig *signal;
- const struct nvkm_funcdom *func;
-};
-
-#define nvkm_perfdom(p) container_of((p), struct nvkm_perfdom, object)
-#include <core/object.h>
-
-struct nvkm_perfdom {
- struct nvkm_object object;
- struct nvkm_perfmon *perfmon;
- struct list_head head;
- struct list_head list;
- const struct nvkm_funcdom *func;
- struct nvkm_perfctr *ctr[4];
- char name[32];
- u32 addr;
- u8 mode;
- u32 clk;
- u16 signal_nr;
- struct nvkm_perfsig signal[] __counted_by(signal_nr);
-};
-
-struct nvkm_funcdom {
- void (*init)(struct nvkm_pm *, struct nvkm_perfdom *,
- struct nvkm_perfctr *);
- void (*read)(struct nvkm_pm *, struct nvkm_perfdom *,
- struct nvkm_perfctr *);
- void (*next)(struct nvkm_pm *, struct nvkm_perfdom *);
-};
-
-int nvkm_perfdom_new(struct nvkm_pm *, const char *, u32, u32, u32, u32,
- const struct nvkm_specdom *);
-
-#define nvkm_perfmon(p) container_of((p), struct nvkm_perfmon, object)
-
-struct nvkm_perfmon {
- struct nvkm_object object;
- struct nvkm_pm *pm;
-};
-#endif
diff --git a/drivers/gpu/drm/omapdrm/dss/base.c b/drivers/gpu/drm/omapdrm/dss/base.c
index 050ca7eafac5..5f8002f6bb7a 100644
--- a/drivers/gpu/drm/omapdrm/dss/base.c
+++ b/drivers/gpu/drm/omapdrm/dss/base.c
@@ -242,8 +242,7 @@ static void omapdss_walk_device(struct device *dev, struct device_node *node,
of_node_put(n);
- n = NULL;
- while ((n = of_graph_get_next_endpoint(node, n)) != NULL) {
+ for_each_endpoint_of_node(node, n) {
struct device_node *pn = of_graph_get_remote_port_parent(n);
if (!pn)
diff --git a/drivers/gpu/drm/omapdrm/omap_drv.c b/drivers/gpu/drm/omapdrm/omap_drv.c
index 6598c9c08ba1..d3eac4817d76 100644
--- a/drivers/gpu/drm/omapdrm/omap_drv.c
+++ b/drivers/gpu/drm/omapdrm/omap_drv.c
@@ -695,6 +695,10 @@ static int omapdrm_init(struct omap_drm_private *priv, struct device *dev)
soc = soc_device_match(omapdrm_soc_devices);
priv->omaprev = soc ? (uintptr_t)soc->data : 0;
priv->wq = alloc_ordered_workqueue("omapdrm", 0);
+ if (!priv->wq) {
+ ret = -ENOMEM;
+ goto err_alloc_workqueue;
+ }
mutex_init(&priv->list_lock);
INIT_LIST_HEAD(&priv->obj_list);
@@ -753,6 +757,7 @@ err_gem_deinit:
drm_mode_config_cleanup(ddev);
omap_gem_deinit(ddev);
destroy_workqueue(priv->wq);
+err_alloc_workqueue:
omap_disconnect_pipelines(ddev);
drm_dev_put(ddev);
return ret;
diff --git a/drivers/gpu/drm/panel/Kconfig b/drivers/gpu/drm/panel/Kconfig
index 9f49b0189d3b..d3a9a9fafe4e 100644
--- a/drivers/gpu/drm/panel/Kconfig
+++ b/drivers/gpu/drm/panel/Kconfig
@@ -87,6 +87,15 @@ config DRM_PANEL_BOE_TV101WUM_NL6
Say Y here if you want to support for BOE TV101WUM and AUO KD101N80
45NA WUXGA PANEL DSI Video Mode panel
+config DRM_PANEL_BOE_TV101WUM_LL2
+ tristate "BOE TV101WUM LL2 1200x1920 panel"
+ depends on OF
+ depends on DRM_MIPI_DSI
+ depends on BACKLIGHT_CLASS_DEVICE
+ help
+ Say Y here if you want to support for BOE TV101WUM-LL2
+ WUXGA PANEL DSI Video Mode panel
+
config DRM_PANEL_EBBG_FT8719
tristate "EBBG FT8719 panel driver"
depends on OF
@@ -784,7 +793,8 @@ config DRM_PANEL_SHARP_LS060T1SX01
config DRM_PANEL_SITRONIX_ST7701
tristate "Sitronix ST7701 panel driver"
depends on OF
- depends on DRM_MIPI_DSI
+ depends on SPI || DRM_MIPI_DSI
+ select DRM_MIPI_DBI if SPI
depends on BACKLIGHT_CLASS_DEVICE
help
Say Y here if you want to enable support for the Sitronix
diff --git a/drivers/gpu/drm/panel/Makefile b/drivers/gpu/drm/panel/Makefile
index 5581387707c6..987a08702410 100644
--- a/drivers/gpu/drm/panel/Makefile
+++ b/drivers/gpu/drm/panel/Makefile
@@ -6,6 +6,7 @@ obj-$(CONFIG_DRM_PANEL_AUO_A030JTN01) += panel-auo-a030jtn01.o
obj-$(CONFIG_DRM_PANEL_BOE_BF060Y8M_AJ0) += panel-boe-bf060y8m-aj0.o
obj-$(CONFIG_DRM_PANEL_BOE_HIMAX8279D) += panel-boe-himax8279d.o
obj-$(CONFIG_DRM_PANEL_BOE_TH101MB31UIG002_28A) += panel-boe-th101mb31ig002-28a.o
+obj-$(CONFIG_DRM_PANEL_BOE_TV101WUM_LL2) += panel-boe-tv101wum-ll2.o
obj-$(CONFIG_DRM_PANEL_BOE_TV101WUM_NL6) += panel-boe-tv101wum-nl6.o
obj-$(CONFIG_DRM_PANEL_DSI_CM) += panel-dsi-cm.o
obj-$(CONFIG_DRM_PANEL_LVDS) += panel-lvds.o
diff --git a/drivers/gpu/drm/panel/panel-boe-bf060y8m-aj0.c b/drivers/gpu/drm/panel/panel-boe-bf060y8m-aj0.c
index e77db8597eb7..7e66db4a88bb 100644
--- a/drivers/gpu/drm/panel/panel-boe-bf060y8m-aj0.c
+++ b/drivers/gpu/drm/panel/panel-boe-bf060y8m-aj0.c
@@ -377,6 +377,8 @@ static int boe_bf060y8m_aj0_probe(struct mipi_dsi_device *dsi)
drm_panel_init(&boe->panel, dev, &boe_bf060y8m_aj0_panel_funcs,
DRM_MODE_CONNECTOR_DSI);
+ boe->panel.prepare_prev_first = true;
+
boe->panel.backlight = boe_bf060y8m_aj0_create_backlight(dsi);
if (IS_ERR(boe->panel.backlight))
return dev_err_probe(dev, PTR_ERR(boe->panel.backlight),
diff --git a/drivers/gpu/drm/panel/panel-boe-th101mb31ig002-28a.c b/drivers/gpu/drm/panel/panel-boe-th101mb31ig002-28a.c
index 763e9f8342d3..0b87f1e6ecae 100644
--- a/drivers/gpu/drm/panel/panel-boe-th101mb31ig002-28a.c
+++ b/drivers/gpu/drm/panel/panel-boe-th101mb31ig002-28a.c
@@ -16,12 +16,31 @@
#include <drm/drm_mipi_dsi.h>
#include <drm/drm_modes.h>
#include <drm/drm_panel.h>
+#include <drm/drm_probe_helper.h>
+
+struct boe_th101mb31ig002;
+
+struct panel_desc {
+ const struct drm_display_mode *modes;
+ unsigned long mode_flags;
+ enum mipi_dsi_pixel_format format;
+ int (*init)(struct boe_th101mb31ig002 *ctx);
+ unsigned int lanes;
+ bool lp11_before_reset;
+ unsigned int vcioo_to_lp11_delay_ms;
+ unsigned int lp11_to_reset_delay_ms;
+ unsigned int backlight_off_to_display_off_delay_ms;
+ unsigned int enter_sleep_to_reset_down_delay_ms;
+ unsigned int power_off_delay_ms;
+};
struct boe_th101mb31ig002 {
struct drm_panel panel;
struct mipi_dsi_device *dsi;
+ const struct panel_desc *desc;
+
struct regulator *power;
struct gpio_desc *enable;
struct gpio_desc *reset;
@@ -39,74 +58,123 @@ static void boe_th101mb31ig002_reset(struct boe_th101mb31ig002 *ctx)
usleep_range(5000, 6000);
}
-static int boe_th101mb31ig002_enable(struct drm_panel *panel)
+static int boe_th101mb31ig002_enable(struct boe_th101mb31ig002 *ctx)
{
- struct boe_th101mb31ig002 *ctx = container_of(panel,
- struct boe_th101mb31ig002,
- panel);
- struct mipi_dsi_device *dsi = ctx->dsi;
- struct device *dev = &dsi->dev;
- int ret;
-
- mipi_dsi_dcs_write_seq(dsi, 0xE0, 0xAB, 0xBA);
- mipi_dsi_dcs_write_seq(dsi, 0xE1, 0xBA, 0xAB);
- mipi_dsi_dcs_write_seq(dsi, 0xB1, 0x10, 0x01, 0x47, 0xFF);
- mipi_dsi_dcs_write_seq(dsi, 0xB2, 0x0C, 0x14, 0x04, 0x50, 0x50, 0x14);
- mipi_dsi_dcs_write_seq(dsi, 0xB3, 0x56, 0x53, 0x00);
- mipi_dsi_dcs_write_seq(dsi, 0xB4, 0x33, 0x30, 0x04);
- mipi_dsi_dcs_write_seq(dsi, 0xB6, 0xB0, 0x00, 0x00, 0x10, 0x00, 0x10,
- 0x00);
- mipi_dsi_dcs_write_seq(dsi, 0xB8, 0x05, 0x12, 0x29, 0x49, 0x48, 0x00,
- 0x00);
- mipi_dsi_dcs_write_seq(dsi, 0xB9, 0x7C, 0x65, 0x55, 0x49, 0x46, 0x36,
- 0x3B, 0x24, 0x3D, 0x3C, 0x3D, 0x5C, 0x4C,
- 0x55, 0x47, 0x46, 0x39, 0x26, 0x06, 0x7C,
- 0x65, 0x55, 0x49, 0x46, 0x36, 0x3B, 0x24,
- 0x3D, 0x3C, 0x3D, 0x5C, 0x4C, 0x55, 0x47,
- 0x46, 0x39, 0x26, 0x06);
- mipi_dsi_dcs_write_seq(dsi, 0x00, 0xFF, 0x87, 0x12, 0x34, 0x44, 0x44,
- 0x44, 0x44, 0x98, 0x04, 0x98, 0x04, 0x0F,
- 0x00, 0x00, 0xC1);
- mipi_dsi_dcs_write_seq(dsi, 0xC1, 0x54, 0x94, 0x02, 0x85, 0x9F, 0x00,
- 0x7F, 0x00, 0x54, 0x00);
- mipi_dsi_dcs_write_seq(dsi, 0xC2, 0x17, 0x09, 0x08, 0x89, 0x08, 0x11,
- 0x22, 0x20, 0x44, 0xFF, 0x18, 0x00);
- mipi_dsi_dcs_write_seq(dsi, 0xC3, 0x86, 0x46, 0x05, 0x05, 0x1C, 0x1C,
- 0x1D, 0x1D, 0x02, 0x1F, 0x1F, 0x1E, 0x1E,
- 0x0F, 0x0F, 0x0D, 0x0D, 0x13, 0x13, 0x11,
- 0x11, 0x00);
- mipi_dsi_dcs_write_seq(dsi, 0xC4, 0x07, 0x07, 0x04, 0x04, 0x1C, 0x1C,
- 0x1D, 0x1D, 0x02, 0x1F, 0x1F, 0x1E, 0x1E,
- 0x0E, 0x0E, 0x0C, 0x0C, 0x12, 0x12, 0x10,
- 0x10, 0x00);
- mipi_dsi_dcs_write_seq(dsi, 0xC6, 0x2A, 0x2A);
- mipi_dsi_dcs_write_seq(dsi, 0xC8, 0x21, 0x00, 0x31, 0x42, 0x34, 0x16);
- mipi_dsi_dcs_write_seq(dsi, 0xCA, 0xCB, 0x43);
- mipi_dsi_dcs_write_seq(dsi, 0xCD, 0x0E, 0x4B, 0x4B, 0x20, 0x19, 0x6B,
- 0x06, 0xB3);
- mipi_dsi_dcs_write_seq(dsi, 0xD2, 0xE3, 0x2B, 0x38, 0x00);
- mipi_dsi_dcs_write_seq(dsi, 0xD4, 0x00, 0x01, 0x00, 0x0E, 0x04, 0x44,
- 0x08, 0x10, 0x00, 0x00, 0x00);
- mipi_dsi_dcs_write_seq(dsi, 0xE6, 0x80, 0x01, 0xFF, 0xFF, 0xFF, 0xFF,
- 0xFF, 0xFF);
- mipi_dsi_dcs_write_seq(dsi, 0xF0, 0x12, 0x03, 0x20, 0x00, 0xFF);
- mipi_dsi_dcs_write_seq(dsi, 0xF3, 0x00);
-
- ret = mipi_dsi_dcs_exit_sleep_mode(dsi);
- if (ret < 0) {
- dev_err(dev, "Failed to exit sleep mode: %d\n", ret);
- return ret;
- }
-
- msleep(120);
-
- ret = mipi_dsi_dcs_set_display_on(dsi);
- if (ret < 0) {
- dev_err(dev, "Failed to set panel on: %d\n", ret);
- return ret;
- }
+ struct mipi_dsi_multi_context dsi_ctx = { .dsi = ctx->dsi };
+
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xe0, 0xab, 0xba);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xe1, 0xba, 0xab);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xb1, 0x10, 0x01, 0x47, 0xff);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xb2, 0x0c, 0x14, 0x04, 0x50, 0x50, 0x14);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xb3, 0x56, 0x53, 0x00);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xb4, 0x33, 0x30, 0x04);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xb6, 0xb0, 0x00, 0x00, 0x10, 0x00, 0x10,
+ 0x00);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xb8, 0x05, 0x12, 0x29, 0x49, 0x48, 0x00,
+ 0x00);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xb9, 0x7c, 0x65, 0x55, 0x49, 0x46, 0x36,
+ 0x3b, 0x24, 0x3d, 0x3c, 0x3d, 0x5c, 0x4c,
+ 0x55, 0x47, 0x46, 0x39, 0x26, 0x06, 0x7c,
+ 0x65, 0x55, 0x49, 0x46, 0x36, 0x3b, 0x24,
+ 0x3d, 0x3c, 0x3d, 0x5c, 0x4c, 0x55, 0x47,
+ 0x46, 0x39, 0x26, 0x06);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x00, 0xff, 0x87, 0x12, 0x34, 0x44, 0x44,
+ 0x44, 0x44, 0x98, 0x04, 0x98, 0x04, 0x0f,
+ 0x00, 0x00, 0xc1);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xc1, 0x54, 0x94, 0x02, 0x85, 0x9f, 0x00,
+ 0x7f, 0x00, 0x54, 0x00);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xc2, 0x17, 0x09, 0x08, 0x89, 0x08, 0x11,
+ 0x22, 0x20, 0x44, 0xff, 0x18, 0x00);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xc3, 0x86, 0x46, 0x05, 0x05, 0x1c, 0x1c,
+ 0x1d, 0x1d, 0x02, 0x1f, 0x1f, 0x1e, 0x1e,
+ 0x0f, 0x0f, 0x0d, 0x0d, 0x13, 0x13, 0x11,
+ 0x11, 0x00);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xc4, 0x07, 0x07, 0x04, 0x04, 0x1c, 0x1c,
+ 0x1d, 0x1d, 0x02, 0x1f, 0x1f, 0x1e, 0x1e,
+ 0x0e, 0x0e, 0x0c, 0x0c, 0x12, 0x12, 0x10,
+ 0x10, 0x00);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xc6, 0x2a, 0x2a);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xc8, 0x21, 0x00, 0x31, 0x42, 0x34, 0x16);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xca, 0xcb, 0x43);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xcd, 0x0e, 0x4b, 0x4b, 0x20, 0x19, 0x6b,
+ 0x06, 0xb3);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xd2, 0xe3, 0x2b, 0x38, 0x00);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xd4, 0x00, 0x01, 0x00, 0x0e, 0x04, 0x44,
+ 0x08, 0x10, 0x00, 0x00, 0x00);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xe6, 0x80, 0x01, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xf0, 0x12, 0x03, 0x20, 0x00, 0xff);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xf3, 0x00);
+
+ mipi_dsi_dcs_exit_sleep_mode_multi(&dsi_ctx);
+
+ mipi_dsi_msleep(&dsi_ctx, 120);
+
+ mipi_dsi_dcs_set_display_on_multi(&dsi_ctx);
+
+ return dsi_ctx.accum_err;
+}
- return 0;
+static int starry_er88577_init_cmd(struct boe_th101mb31ig002 *ctx)
+{
+ struct mipi_dsi_multi_context dsi_ctx = { .dsi = ctx->dsi };
+
+ msleep(70);
+
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xe0, 0xab, 0xba);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xe1, 0xba, 0xab);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xb1, 0x10, 0x01, 0x47, 0xff);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xb2, 0x0c, 0x14, 0x04, 0x50, 0x50, 0x14);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xb3, 0x56, 0x53, 0x00);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xb4, 0x33, 0x30, 0x04);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xb6, 0xb0, 0x00, 0x00, 0x10, 0x00, 0x10,
+ 0x00);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xb8, 0x05, 0x12, 0x29, 0x49, 0x40);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xb9, 0x7c, 0x61, 0x4f, 0x42, 0x3e, 0x2d,
+ 0x31, 0x1a, 0x33, 0x33, 0x33, 0x52, 0x40,
+ 0x47, 0x38, 0x34, 0x26, 0x0e, 0x06, 0x7c,
+ 0x61, 0x4f, 0x42, 0x3e, 0x2d, 0x31, 0x1a,
+ 0x33, 0x33, 0x33, 0x52, 0x40, 0x47, 0x38,
+ 0x34, 0x26, 0x0e, 0x06);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xc0, 0xcc, 0x76, 0x12, 0x34, 0x44, 0x44,
+ 0x44, 0x44, 0x98, 0x04, 0x98, 0x04, 0x0f,
+ 0x00, 0x00, 0xc1);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xc1, 0x54, 0x94, 0x02, 0x85, 0x9f, 0x00,
+ 0x6f, 0x00, 0x54, 0x00);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xc2, 0x17, 0x09, 0x08, 0x89, 0x08, 0x11,
+ 0x22, 0x20, 0x44, 0xff, 0x18, 0x00);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xc3, 0x87, 0x47, 0x05, 0x05, 0x1c, 0x1c,
+ 0x1d, 0x1d, 0x02, 0x1e, 0x1e, 0x1f, 0x1f,
+ 0x0f, 0x0f, 0x0d, 0x0d, 0x13, 0x13, 0x11,
+ 0x11, 0x24);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xc4, 0x06, 0x06, 0x04, 0x04, 0x1c, 0x1c,
+ 0x1d, 0x1d, 0x02, 0x1e, 0x1e, 0x1f, 0x1f,
+ 0x0e, 0x0e, 0x0c, 0x0c, 0x12, 0x12, 0x10,
+ 0x10, 0x24);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xc8, 0x21, 0x00, 0x31, 0x42, 0x34, 0x16);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xca, 0xcb, 0x43);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xcd, 0x0e, 0x4b, 0x4b, 0x20, 0x19, 0x6b,
+ 0x06, 0xb3);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xd1, 0x40, 0x0d, 0xff, 0x0f);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xd2, 0xe3, 0x2b, 0x38, 0x08);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xd3, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x33, 0x20, 0x3a, 0xd5, 0x86, 0xf3);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xd4, 0x00, 0x01, 0x00, 0x0e, 0x04, 0x44,
+ 0x08, 0x10, 0x00, 0x00, 0x00);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xe6, 0x80, 0x09, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xf0, 0x12, 0x03, 0x20, 0x00, 0xff);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xf3, 0x00);
+
+ mipi_dsi_dcs_exit_sleep_mode_multi(&dsi_ctx);
+
+ mipi_dsi_msleep(&dsi_ctx, 120);
+
+ mipi_dsi_dcs_set_display_on_multi(&dsi_ctx);
+
+ mipi_dsi_msleep(&dsi_ctx, 20);
+
+ return dsi_ctx.accum_err;
}
static int boe_th101mb31ig002_disable(struct drm_panel *panel)
@@ -114,21 +182,21 @@ static int boe_th101mb31ig002_disable(struct drm_panel *panel)
struct boe_th101mb31ig002 *ctx = container_of(panel,
struct boe_th101mb31ig002,
panel);
- struct mipi_dsi_device *dsi = ctx->dsi;
- struct device *dev = &dsi->dev;
- int ret;
+ struct mipi_dsi_multi_context dsi_ctx = { .dsi = ctx->dsi };
- ret = mipi_dsi_dcs_set_display_off(dsi);
- if (ret < 0)
- dev_err(dev, "Failed to set panel off: %d\n", ret);
+ if (ctx->desc->backlight_off_to_display_off_delay_ms)
+ mipi_dsi_msleep(&dsi_ctx, ctx->desc->backlight_off_to_display_off_delay_ms);
- msleep(120);
+ mipi_dsi_dcs_set_display_off_multi(&dsi_ctx);
- ret = mipi_dsi_dcs_enter_sleep_mode(dsi);
- if (ret < 0)
- dev_err(dev, "Failed to enter sleep mode: %d\n", ret);
+ mipi_dsi_msleep(&dsi_ctx, 120);
- return 0;
+ mipi_dsi_dcs_enter_sleep_mode_multi(&dsi_ctx);
+
+ if (ctx->desc->enter_sleep_to_reset_down_delay_ms)
+ mipi_dsi_msleep(&dsi_ctx, ctx->desc->enter_sleep_to_reset_down_delay_ms);
+
+ return dsi_ctx.accum_err;
}
static int boe_th101mb31ig002_unprepare(struct drm_panel *panel)
@@ -141,6 +209,9 @@ static int boe_th101mb31ig002_unprepare(struct drm_panel *panel)
gpiod_set_value_cansleep(ctx->enable, 0);
regulator_disable(ctx->power);
+ if (ctx->desc->power_off_delay_ms)
+ msleep(ctx->desc->power_off_delay_ms);
+
return 0;
}
@@ -158,10 +229,25 @@ static int boe_th101mb31ig002_prepare(struct drm_panel *panel)
return ret;
}
+ if (ctx->desc->vcioo_to_lp11_delay_ms)
+ msleep(ctx->desc->vcioo_to_lp11_delay_ms);
+
+ if (ctx->desc->lp11_before_reset) {
+ ret = mipi_dsi_dcs_nop(ctx->dsi);
+ if (ret)
+ return ret;
+ }
+
+ if (ctx->desc->lp11_to_reset_delay_ms)
+ msleep(ctx->desc->lp11_to_reset_delay_ms);
+
gpiod_set_value_cansleep(ctx->enable, 1);
msleep(50);
boe_th101mb31ig002_reset(ctx);
- boe_th101mb31ig002_enable(panel);
+
+ ret = ctx->desc->init(ctx);
+ if (ret)
+ return ret;
return 0;
}
@@ -181,39 +267,62 @@ static const struct drm_display_mode boe_th101mb31ig002_default_mode = {
.type = DRM_MODE_TYPE_DRIVER | DRM_MODE_TYPE_PREFERRED,
};
+static const struct panel_desc boe_th101mb31ig002_desc = {
+ .modes = &boe_th101mb31ig002_default_mode,
+ .lanes = 4,
+ .format = MIPI_DSI_FMT_RGB888,
+ .mode_flags = MIPI_DSI_MODE_VIDEO_BURST |
+ MIPI_DSI_MODE_NO_EOT_PACKET |
+ MIPI_DSI_MODE_LPM,
+ .init = boe_th101mb31ig002_enable,
+};
+
+static const struct drm_display_mode starry_er88577_default_mode = {
+ .clock = (800 + 25 + 25 + 25) * (1280 + 20 + 4 + 12) * 60 / 1000,
+ .hdisplay = 800,
+ .hsync_start = 800 + 25,
+ .hsync_end = 800 + 25 + 25,
+ .htotal = 800 + 25 + 25 + 25,
+ .vdisplay = 1280,
+ .vsync_start = 1280 + 20,
+ .vsync_end = 1280 + 20 + 4,
+ .vtotal = 1280 + 20 + 4 + 12,
+ .width_mm = 135,
+ .height_mm = 216,
+ .type = DRM_MODE_TYPE_DRIVER | DRM_MODE_TYPE_PREFERRED,
+};
+
+static const struct panel_desc starry_er88577_desc = {
+ .modes = &starry_er88577_default_mode,
+ .lanes = 4,
+ .format = MIPI_DSI_FMT_RGB888,
+ .mode_flags = MIPI_DSI_MODE_VIDEO | MIPI_DSI_MODE_VIDEO_SYNC_PULSE |
+ MIPI_DSI_MODE_LPM,
+ .init = starry_er88577_init_cmd,
+ .lp11_before_reset = true,
+ .vcioo_to_lp11_delay_ms = 5,
+ .lp11_to_reset_delay_ms = 50,
+ .backlight_off_to_display_off_delay_ms = 100,
+ .enter_sleep_to_reset_down_delay_ms = 100,
+ .power_off_delay_ms = 1000,
+};
+
static int boe_th101mb31ig002_get_modes(struct drm_panel *panel,
struct drm_connector *connector)
{
struct boe_th101mb31ig002 *ctx = container_of(panel,
struct boe_th101mb31ig002,
panel);
- struct drm_display_mode *mode;
-
- mode = drm_mode_duplicate(connector->dev,
- &boe_th101mb31ig002_default_mode);
- if (!mode) {
- dev_err(panel->dev, "Failed to add mode %ux%u@%u\n",
- boe_th101mb31ig002_default_mode.hdisplay,
- boe_th101mb31ig002_default_mode.vdisplay,
- drm_mode_vrefresh(&boe_th101mb31ig002_default_mode));
- return -ENOMEM;
- }
-
- drm_mode_set_name(mode);
+ const struct drm_display_mode *desc_mode = ctx->desc->modes;
connector->display_info.bpc = 8;
- connector->display_info.width_mm = mode->width_mm;
- connector->display_info.height_mm = mode->height_mm;
-
/*
* TODO: Remove once all drm drivers call
* drm_connector_set_orientation_from_panel()
*/
drm_connector_set_panel_orientation(connector, ctx->orientation);
- drm_mode_probed_add(connector, mode);
-
- return 1;
+ return drm_connector_helper_get_modes_fixed(connector, desc_mode);
}
static enum drm_panel_orientation
@@ -237,6 +346,7 @@ static const struct drm_panel_funcs boe_th101mb31ig002_funcs = {
static int boe_th101mb31ig002_dsi_probe(struct mipi_dsi_device *dsi)
{
struct boe_th101mb31ig002 *ctx;
+ const struct panel_desc *desc;
int ret;
ctx = devm_kzalloc(&dsi->dev, sizeof(*ctx), GFP_KERNEL);
@@ -246,11 +356,11 @@ static int boe_th101mb31ig002_dsi_probe(struct mipi_dsi_device *dsi)
mipi_dsi_set_drvdata(dsi, ctx);
ctx->dsi = dsi;
- dsi->lanes = 4;
- dsi->format = MIPI_DSI_FMT_RGB888;
- dsi->mode_flags = MIPI_DSI_MODE_VIDEO_BURST |
- MIPI_DSI_MODE_NO_EOT_PACKET |
- MIPI_DSI_MODE_LPM;
+ desc = of_device_get_match_data(&dsi->dev);
+ dsi->lanes = desc->lanes;
+ dsi->format = desc->format;
+ dsi->mode_flags = desc->mode_flags;
+ ctx->desc = desc;
ctx->power = devm_regulator_get(&dsi->dev, "power");
if (IS_ERR(ctx->power))
@@ -262,7 +372,7 @@ static int boe_th101mb31ig002_dsi_probe(struct mipi_dsi_device *dsi)
return dev_err_probe(&dsi->dev, PTR_ERR(ctx->enable),
"Failed to get enable GPIO\n");
- ctx->reset = devm_gpiod_get(&dsi->dev, "reset", GPIOD_OUT_HIGH);
+ ctx->reset = devm_gpiod_get_optional(&dsi->dev, "reset", GPIOD_OUT_HIGH);
if (IS_ERR(ctx->reset))
return dev_err_probe(&dsi->dev, PTR_ERR(ctx->reset),
"Failed to get reset GPIO\n");
@@ -302,7 +412,14 @@ static void boe_th101mb31ig002_dsi_remove(struct mipi_dsi_device *dsi)
}
static const struct of_device_id boe_th101mb31ig002_of_match[] = {
- { .compatible = "boe,th101mb31ig002-28a", },
+ {
+ .compatible = "boe,th101mb31ig002-28a",
+ .data = &boe_th101mb31ig002_desc
+ },
+ {
+ .compatible = "starry,er88577",
+ .data = &starry_er88577_desc
+ },
{ /* sentinel */ }
};
MODULE_DEVICE_TABLE(of, boe_th101mb31ig002_of_match);
diff --git a/drivers/gpu/drm/panel/panel-boe-tv101wum-ll2.c b/drivers/gpu/drm/panel/panel-boe-tv101wum-ll2.c
new file mode 100644
index 000000000000..50e4a5341bc6
--- /dev/null
+++ b/drivers/gpu/drm/panel/panel-boe-tv101wum-ll2.c
@@ -0,0 +1,241 @@
+// SPDX-License-Identifier: GPL-2.0-only
+// Generated with linux-mdss-dsi-panel-driver-generator from vendor device tree:
+// Copyright (c) 2013, The Linux Foundation. All rights reserved.
+// Copyright (c) 2024, Neil Armstrong <neil.armstrong@linaro.org>
+
+#include <linux/delay.h>
+#include <linux/gpio/consumer.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/regulator/consumer.h>
+
+#include <drm/drm_mipi_dsi.h>
+#include <drm/drm_modes.h>
+#include <drm/drm_panel.h>
+#include <drm/drm_probe_helper.h>
+
+struct boe_tv101wum_ll2 {
+ struct drm_panel panel;
+ struct mipi_dsi_device *dsi;
+ struct gpio_desc *reset_gpio;
+ struct regulator_bulk_data *supplies;
+};
+
+static const struct regulator_bulk_data boe_tv101wum_ll2_supplies[] = {
+ { .supply = "vsp" },
+ { .supply = "vsn" },
+};
+
+static inline struct boe_tv101wum_ll2 *to_boe_tv101wum_ll2(struct drm_panel *panel)
+{
+ return container_of(panel, struct boe_tv101wum_ll2, panel);
+}
+
+static void boe_tv101wum_ll2_reset(struct boe_tv101wum_ll2 *ctx)
+{
+ gpiod_set_value_cansleep(ctx->reset_gpio, 0);
+ usleep_range(5000, 6000);
+ gpiod_set_value_cansleep(ctx->reset_gpio, 1);
+ usleep_range(5000, 6000);
+ gpiod_set_value_cansleep(ctx->reset_gpio, 0);
+
+ msleep(120);
+}
+
+static int boe_tv101wum_ll2_on(struct boe_tv101wum_ll2 *ctx)
+{
+ struct mipi_dsi_device *dsi = ctx->dsi;
+ struct mipi_dsi_multi_context dsi_ctx = { .dsi = dsi };
+
+ dsi->mode_flags |= MIPI_DSI_MODE_LPM;
+
+ mipi_dsi_dcs_exit_sleep_mode_multi(&dsi_ctx);
+
+ mipi_dsi_msleep(&dsi_ctx, 120);
+
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x50, 0x5a, 0x0e);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x80, 0xff, 0x81, 0x68, 0x6c, 0x22,
+ 0x6d, 0x12, 0x00, 0x00, 0x00, 0x00, 0x00);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x50, 0x5a, 0x23);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x90, 0x00, 0x00);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x94, 0x2c, 0x00);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x50, 0x5a, 0x19);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xa2, 0x38);
+
+ mipi_dsi_generic_write_seq_multi(&dsi_ctx, 0x50, 0x5a, 0x0c);
+ mipi_dsi_generic_write_seq_multi(&dsi_ctx, 0x80, 0xfd);
+ mipi_dsi_generic_write_seq_multi(&dsi_ctx, 0x50, 0x00);
+
+ mipi_dsi_dcs_set_display_on_multi(&dsi_ctx);
+
+ mipi_dsi_msleep(&dsi_ctx, 20);
+
+ return dsi_ctx.accum_err;
+}
+
+static void boe_tv101wum_ll2_off(struct boe_tv101wum_ll2 *ctx)
+{
+ struct mipi_dsi_device *dsi = ctx->dsi;
+ struct mipi_dsi_multi_context dsi_ctx = { .dsi = dsi };
+
+ dsi->mode_flags &= ~MIPI_DSI_MODE_LPM;
+
+ mipi_dsi_dcs_set_display_off_multi(&dsi_ctx);
+
+ mipi_dsi_msleep(&dsi_ctx, 70);
+
+ mipi_dsi_dcs_enter_sleep_mode_multi(&dsi_ctx);
+
+ mipi_dsi_msleep(&dsi_ctx, 20);
+
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x04, 0x5a);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x05, 0x5a);
+
+ mipi_dsi_msleep(&dsi_ctx, 150);
+}
+
+static int boe_tv101wum_ll2_prepare(struct drm_panel *panel)
+{
+ struct boe_tv101wum_ll2 *ctx = to_boe_tv101wum_ll2(panel);
+ int ret;
+
+ ret = regulator_bulk_enable(ARRAY_SIZE(boe_tv101wum_ll2_supplies),
+ ctx->supplies);
+ if (ret < 0)
+ return ret;
+
+ boe_tv101wum_ll2_reset(ctx);
+
+ ret = boe_tv101wum_ll2_on(ctx);
+ if (ret < 0) {
+ gpiod_set_value_cansleep(ctx->reset_gpio, 1);
+ regulator_bulk_disable(ARRAY_SIZE(boe_tv101wum_ll2_supplies),
+ ctx->supplies);
+ return ret;
+ }
+
+ return 0;
+}
+
+static int boe_tv101wum_ll2_unprepare(struct drm_panel *panel)
+{
+ struct boe_tv101wum_ll2 *ctx = to_boe_tv101wum_ll2(panel);
+
+ /* Ignore errors on failure, in any case set gpio and disable regulators */
+ boe_tv101wum_ll2_off(ctx);
+
+ gpiod_set_value_cansleep(ctx->reset_gpio, 1);
+
+ regulator_bulk_disable(ARRAY_SIZE(boe_tv101wum_ll2_supplies),
+ ctx->supplies);
+
+ return 0;
+}
+
+static const struct drm_display_mode boe_tv101wum_ll2_mode = {
+ .clock = (1200 + 27 + 8 + 12) * (1920 + 155 + 8 + 32) * 60 / 1000,
+ .hdisplay = 1200,
+ .hsync_start = 1200 + 27,
+ .hsync_end = 1200 + 27 + 8,
+ .htotal = 1200 + 27 + 8 + 12,
+ .vdisplay = 1920,
+ .vsync_start = 1920 + 155,
+ .vsync_end = 1920 + 155 + 8,
+ .vtotal = 1920 + 155 + 8 + 32,
+ .width_mm = 136,
+ .height_mm = 217,
+ .type = DRM_MODE_TYPE_DRIVER,
+};
+
+static int boe_tv101wum_ll2_get_modes(struct drm_panel *panel,
+ struct drm_connector *connector)
+{
+ /* We do not set display_info.bpc since unset value is bpc=8 by default */
+ return drm_connector_helper_get_modes_fixed(connector, &boe_tv101wum_ll2_mode);
+}
+
+static const struct drm_panel_funcs boe_tv101wum_ll2_panel_funcs = {
+ .prepare = boe_tv101wum_ll2_prepare,
+ .unprepare = boe_tv101wum_ll2_unprepare,
+ .get_modes = boe_tv101wum_ll2_get_modes,
+};
+
+static int boe_tv101wum_ll2_probe(struct mipi_dsi_device *dsi)
+{
+ struct device *dev = &dsi->dev;
+ struct boe_tv101wum_ll2 *ctx;
+ int ret;
+
+ ctx = devm_kzalloc(dev, sizeof(*ctx), GFP_KERNEL);
+ if (!ctx)
+ return -ENOMEM;
+
+ ret = devm_regulator_bulk_get_const(&dsi->dev,
+ ARRAY_SIZE(boe_tv101wum_ll2_supplies),
+ boe_tv101wum_ll2_supplies,
+ &ctx->supplies);
+ if (ret < 0)
+ return ret;
+
+ ctx->reset_gpio = devm_gpiod_get(dev, "reset", GPIOD_OUT_LOW);
+ if (IS_ERR(ctx->reset_gpio))
+ return dev_err_probe(dev, PTR_ERR(ctx->reset_gpio),
+ "Failed to get reset-gpios\n");
+
+ ctx->dsi = dsi;
+ mipi_dsi_set_drvdata(dsi, ctx);
+
+ dsi->lanes = 4;
+ dsi->format = MIPI_DSI_FMT_RGB888;
+ dsi->mode_flags = MIPI_DSI_MODE_VIDEO | MIPI_DSI_MODE_VIDEO_BURST |
+ MIPI_DSI_MODE_VIDEO_HSE;
+
+ drm_panel_init(&ctx->panel, dev, &boe_tv101wum_ll2_panel_funcs,
+ DRM_MODE_CONNECTOR_DSI);
+ ctx->panel.prepare_prev_first = true;
+
+ ret = drm_panel_of_backlight(&ctx->panel);
+ if (ret)
+ return dev_err_probe(dev, ret, "Failed to get backlight\n");
+
+ drm_panel_add(&ctx->panel);
+
+ ret = mipi_dsi_attach(dsi);
+ if (ret < 0) {
+ drm_panel_remove(&ctx->panel);
+ return dev_err_probe(dev, ret, "Failed to attach to DSI host\n");
+ }
+
+ return 0;
+}
+
+static void boe_tv101wum_ll2_remove(struct mipi_dsi_device *dsi)
+{
+ struct boe_tv101wum_ll2 *ctx = mipi_dsi_get_drvdata(dsi);
+ int ret;
+
+ ret = mipi_dsi_detach(dsi);
+ if (ret < 0)
+ dev_err(&dsi->dev, "Failed to detach from DSI host: %d\n", ret);
+
+ drm_panel_remove(&ctx->panel);
+}
+
+static const struct of_device_id boe_tv101wum_ll2_of_match[] = {
+ { .compatible = "boe,tv101wum-ll2" },
+ { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(of, boe_tv101wum_ll2_of_match);
+
+static struct mipi_dsi_driver boe_tv101wum_ll2_driver = {
+ .probe = boe_tv101wum_ll2_probe,
+ .remove = boe_tv101wum_ll2_remove,
+ .driver = {
+ .name = "panel-boe-tv101wum_ll2",
+ .of_match_table = boe_tv101wum_ll2_of_match,
+ },
+};
+module_mipi_dsi_driver(boe_tv101wum_ll2_driver);
+
+MODULE_DESCRIPTION("DRM driver for BOE TV101WUM-LL2 Panel");
+MODULE_LICENSE("GPL");
diff --git a/drivers/gpu/drm/panel/panel-boe-tv101wum-nl6.c b/drivers/gpu/drm/panel/panel-boe-tv101wum-nl6.c
index ce919a980875..3e5b0d8636d0 100644
--- a/drivers/gpu/drm/panel/panel-boe-tv101wum-nl6.c
+++ b/drivers/gpu/drm/panel/panel-boe-tv101wum-nl6.c
@@ -54,12 +54,22 @@ struct boe_panel {
struct gpio_desc *enable_gpio;
};
+#define NT36523_DCS_SWITCH_PAGE 0xff
+
+#define nt36523_switch_page(ctx, page) \
+ mipi_dsi_dcs_write_seq_multi(ctx, NT36523_DCS_SWITCH_PAGE, (page))
+
+static void nt36523_enable_reload_cmds(struct mipi_dsi_multi_context *ctx)
+{
+ mipi_dsi_dcs_write_seq_multi(ctx, 0xfb, 0x01);
+}
+
static int boe_tv110c9m_init(struct boe_panel *boe)
{
struct mipi_dsi_multi_context ctx = { .dsi = boe->dsi };
- mipi_dsi_dcs_write_seq_multi(&ctx, 0xff, 0x20);
- mipi_dsi_dcs_write_seq_multi(&ctx, 0xfb, 0x01);
+ nt36523_switch_page(&ctx, 0x20);
+ nt36523_enable_reload_cmds(&ctx);
mipi_dsi_dcs_write_seq_multi(&ctx, 0x05, 0xd9);
mipi_dsi_dcs_write_seq_multi(&ctx, 0x07, 0x78);
mipi_dsi_dcs_write_seq_multi(&ctx, 0x08, 0x5a);
@@ -99,16 +109,14 @@ static int boe_tv110c9m_init(struct boe_panel *boe)
mipi_dsi_dcs_write_seq_multi(&ctx, 0xbb, 0x03, 0x8e, 0x03, 0xa2, 0x03, 0xb7, 0x03, 0xe7,
0x03, 0xfd, 0x03, 0xff);
- mipi_dsi_dcs_write_seq_multi(&ctx, 0xff, 0x21);
- mipi_dsi_dcs_write_seq_multi(&ctx, 0xfb, 0x01);
-
+ nt36523_switch_page(&ctx, 0x21);
+ nt36523_enable_reload_cmds(&ctx);
mipi_dsi_dcs_write_seq_multi(&ctx, 0xb0, 0x00, 0x00, 0x00, 0x1b, 0x00, 0x45, 0x00, 0x65,
0x00, 0x81, 0x00, 0x99, 0x00, 0xae, 0x00, 0xc1);
mipi_dsi_dcs_write_seq_multi(&ctx, 0xb1, 0x00, 0xd2, 0x01, 0x0b, 0x01, 0x34, 0x01, 0x76,
0x01, 0xa3, 0x01, 0xef, 0x02, 0x27, 0x02, 0x29);
mipi_dsi_dcs_write_seq_multi(&ctx, 0xb2, 0x02, 0x5f, 0x02, 0x9e, 0x02, 0xc9, 0x03, 0x00,
0x03, 0x26, 0x03, 0x53, 0x03, 0x63, 0x03, 0x73);
-
mipi_dsi_dcs_write_seq_multi(&ctx, 0xb3, 0x03, 0x86, 0x03, 0x9a, 0x03, 0xaf, 0x03, 0xdf,
0x03, 0xf5, 0x03, 0xe0);
mipi_dsi_dcs_write_seq_multi(&ctx, 0xb4, 0x00, 0x00, 0x00, 0x1b, 0x00, 0x45, 0x00, 0x65,
@@ -119,89 +127,66 @@ static int boe_tv110c9m_init(struct boe_panel *boe)
0x03, 0x26, 0x03, 0x53, 0x03, 0x63, 0x03, 0x73);
mipi_dsi_dcs_write_seq_multi(&ctx, 0xb7, 0x03, 0x86, 0x03, 0x9a, 0x03, 0xaf, 0x03, 0xdf,
0x03, 0xf5, 0x03, 0xe0);
-
mipi_dsi_dcs_write_seq_multi(&ctx, 0xb8, 0x00, 0x00, 0x00, 0x1b, 0x00, 0x45, 0x00, 0x65,
0x00, 0x81, 0x00, 0x99, 0x00, 0xae, 0x00, 0xc1);
mipi_dsi_dcs_write_seq_multi(&ctx, 0xb9, 0x00, 0xd2, 0x01, 0x0b, 0x01, 0x34, 0x01, 0x76,
0x01, 0xa3, 0x01, 0xef, 0x02, 0x27, 0x02, 0x29);
mipi_dsi_dcs_write_seq_multi(&ctx, 0xba, 0x02, 0x5f, 0x02, 0x9e, 0x02, 0xc9, 0x03, 0x00,
0x03, 0x26, 0x03, 0x53, 0x03, 0x63, 0x03, 0x73);
-
mipi_dsi_dcs_write_seq_multi(&ctx, 0xbb, 0x03, 0x86, 0x03, 0x9a, 0x03, 0xaf, 0x03, 0xdf,
0x03, 0xf5, 0x03, 0xe0);
- mipi_dsi_dcs_write_seq_multi(&ctx, 0xff, 0x24);
- mipi_dsi_dcs_write_seq_multi(&ctx, 0xfb, 0x01);
+ nt36523_switch_page(&ctx, 0x24);
+ nt36523_enable_reload_cmds(&ctx);
mipi_dsi_dcs_write_seq_multi(&ctx, 0x00, 0x00);
mipi_dsi_dcs_write_seq_multi(&ctx, 0x01, 0x00);
-
mipi_dsi_dcs_write_seq_multi(&ctx, 0x02, 0x1c);
mipi_dsi_dcs_write_seq_multi(&ctx, 0x03, 0x1c);
-
mipi_dsi_dcs_write_seq_multi(&ctx, 0x04, 0x1d);
mipi_dsi_dcs_write_seq_multi(&ctx, 0x05, 0x1d);
-
mipi_dsi_dcs_write_seq_multi(&ctx, 0x06, 0x04);
mipi_dsi_dcs_write_seq_multi(&ctx, 0x07, 0x04);
-
mipi_dsi_dcs_write_seq_multi(&ctx, 0x08, 0x0f);
mipi_dsi_dcs_write_seq_multi(&ctx, 0x09, 0x0f);
-
mipi_dsi_dcs_write_seq_multi(&ctx, 0x0a, 0x0e);
mipi_dsi_dcs_write_seq_multi(&ctx, 0x0b, 0x0e);
-
mipi_dsi_dcs_write_seq_multi(&ctx, 0x0c, 0x0d);
mipi_dsi_dcs_write_seq_multi(&ctx, 0x0d, 0x0d);
-
mipi_dsi_dcs_write_seq_multi(&ctx, 0x0e, 0x0c);
mipi_dsi_dcs_write_seq_multi(&ctx, 0x0f, 0x0c);
-
mipi_dsi_dcs_write_seq_multi(&ctx, 0x10, 0x08);
mipi_dsi_dcs_write_seq_multi(&ctx, 0x11, 0x08);
-
mipi_dsi_dcs_write_seq_multi(&ctx, 0x12, 0x00);
mipi_dsi_dcs_write_seq_multi(&ctx, 0x13, 0x00);
mipi_dsi_dcs_write_seq_multi(&ctx, 0x14, 0x00);
mipi_dsi_dcs_write_seq_multi(&ctx, 0x15, 0x00);
-
mipi_dsi_dcs_write_seq_multi(&ctx, 0x16, 0x00);
mipi_dsi_dcs_write_seq_multi(&ctx, 0x17, 0x00);
-
mipi_dsi_dcs_write_seq_multi(&ctx, 0x18, 0x1c);
mipi_dsi_dcs_write_seq_multi(&ctx, 0x19, 0x1c);
-
mipi_dsi_dcs_write_seq_multi(&ctx, 0x1a, 0x1d);
mipi_dsi_dcs_write_seq_multi(&ctx, 0x1b, 0x1d);
-
mipi_dsi_dcs_write_seq_multi(&ctx, 0x1c, 0x04);
mipi_dsi_dcs_write_seq_multi(&ctx, 0x1d, 0x04);
-
mipi_dsi_dcs_write_seq_multi(&ctx, 0x1e, 0x0f);
mipi_dsi_dcs_write_seq_multi(&ctx, 0x1f, 0x0f);
-
mipi_dsi_dcs_write_seq_multi(&ctx, 0x20, 0x0e);
mipi_dsi_dcs_write_seq_multi(&ctx, 0x21, 0x0e);
-
mipi_dsi_dcs_write_seq_multi(&ctx, 0x22, 0x0d);
mipi_dsi_dcs_write_seq_multi(&ctx, 0x23, 0x0d);
-
mipi_dsi_dcs_write_seq_multi(&ctx, 0x24, 0x0c);
mipi_dsi_dcs_write_seq_multi(&ctx, 0x25, 0x0c);
-
mipi_dsi_dcs_write_seq_multi(&ctx, 0x26, 0x08);
mipi_dsi_dcs_write_seq_multi(&ctx, 0x27, 0x08);
-
mipi_dsi_dcs_write_seq_multi(&ctx, 0x28, 0x00);
mipi_dsi_dcs_write_seq_multi(&ctx, 0x29, 0x00);
mipi_dsi_dcs_write_seq_multi(&ctx, 0x2a, 0x00);
mipi_dsi_dcs_write_seq_multi(&ctx, 0x2b, 0x00);
-
mipi_dsi_dcs_write_seq_multi(&ctx, 0x2d, 0x20);
mipi_dsi_dcs_write_seq_multi(&ctx, 0x2f, 0x0a);
mipi_dsi_dcs_write_seq_multi(&ctx, 0x30, 0x44);
mipi_dsi_dcs_write_seq_multi(&ctx, 0x33, 0x0c);
mipi_dsi_dcs_write_seq_multi(&ctx, 0x34, 0x32);
-
mipi_dsi_dcs_write_seq_multi(&ctx, 0x37, 0x44);
mipi_dsi_dcs_write_seq_multi(&ctx, 0x38, 0x40);
mipi_dsi_dcs_write_seq_multi(&ctx, 0x39, 0x00);
@@ -244,7 +229,6 @@ static int boe_tv110c9m_init(struct boe_panel *boe)
mipi_dsi_dcs_write_seq_multi(&ctx, 0xdb, 0x05);
mipi_dsi_dcs_write_seq_multi(&ctx, 0xdc, 0xa9);
mipi_dsi_dcs_write_seq_multi(&ctx, 0xdd, 0x22);
-
mipi_dsi_dcs_write_seq_multi(&ctx, 0xdf, 0x05);
mipi_dsi_dcs_write_seq_multi(&ctx, 0xe0, 0xa9);
mipi_dsi_dcs_write_seq_multi(&ctx, 0xe1, 0x05);
@@ -258,8 +242,9 @@ static int boe_tv110c9m_init(struct boe_panel *boe)
mipi_dsi_dcs_write_seq_multi(&ctx, 0x8d, 0x00);
mipi_dsi_dcs_write_seq_multi(&ctx, 0x8e, 0x00);
mipi_dsi_dcs_write_seq_multi(&ctx, 0xb5, 0x90);
- mipi_dsi_dcs_write_seq_multi(&ctx, 0xff, 0x25);
- mipi_dsi_dcs_write_seq_multi(&ctx, 0xfb, 0x01);
+
+ nt36523_switch_page(&ctx, 0x25);
+ nt36523_enable_reload_cmds(&ctx);
mipi_dsi_dcs_write_seq_multi(&ctx, 0x05, 0x00);
mipi_dsi_dcs_write_seq_multi(&ctx, 0x19, 0x07);
mipi_dsi_dcs_write_seq_multi(&ctx, 0x1f, 0x60);
@@ -281,26 +266,22 @@ static int boe_tv110c9m_init(struct boe_panel *boe)
mipi_dsi_dcs_write_seq_multi(&ctx, 0x61, 0x60);
mipi_dsi_dcs_write_seq_multi(&ctx, 0x62, 0x50);
mipi_dsi_dcs_write_seq_multi(&ctx, 0xf1, 0x10);
- mipi_dsi_dcs_write_seq_multi(&ctx, 0xff, 0x2a);
- mipi_dsi_dcs_write_seq_multi(&ctx, 0xfb, 0x01);
+ nt36523_switch_page(&ctx, 0x2a);
+ nt36523_enable_reload_cmds(&ctx);
mipi_dsi_dcs_write_seq_multi(&ctx, 0x64, 0x16);
mipi_dsi_dcs_write_seq_multi(&ctx, 0x67, 0x16);
mipi_dsi_dcs_write_seq_multi(&ctx, 0x6a, 0x16);
-
mipi_dsi_dcs_write_seq_multi(&ctx, 0x70, 0x30);
-
mipi_dsi_dcs_write_seq_multi(&ctx, 0xa2, 0xf3);
mipi_dsi_dcs_write_seq_multi(&ctx, 0xa3, 0xff);
mipi_dsi_dcs_write_seq_multi(&ctx, 0xa4, 0xff);
mipi_dsi_dcs_write_seq_multi(&ctx, 0xa5, 0xff);
-
mipi_dsi_dcs_write_seq_multi(&ctx, 0xd6, 0x08);
- mipi_dsi_dcs_write_seq_multi(&ctx, 0xff, 0x26);
- mipi_dsi_dcs_write_seq_multi(&ctx, 0xfb, 0x01);
+ nt36523_switch_page(&ctx, 0x26);
+ nt36523_enable_reload_cmds(&ctx);
mipi_dsi_dcs_write_seq_multi(&ctx, 0x00, 0xa1);
-
mipi_dsi_dcs_write_seq_multi(&ctx, 0x02, 0x31);
mipi_dsi_dcs_write_seq_multi(&ctx, 0x04, 0x28);
mipi_dsi_dcs_write_seq_multi(&ctx, 0x06, 0x30);
@@ -323,7 +304,6 @@ static int boe_tv110c9m_init(struct boe_panel *boe)
mipi_dsi_dcs_write_seq_multi(&ctx, 0x23, 0x00);
mipi_dsi_dcs_write_seq_multi(&ctx, 0x2a, 0x0d);
mipi_dsi_dcs_write_seq_multi(&ctx, 0x2b, 0x7f);
-
mipi_dsi_dcs_write_seq_multi(&ctx, 0x1d, 0x00);
mipi_dsi_dcs_write_seq_multi(&ctx, 0x1e, 0x65);
mipi_dsi_dcs_write_seq_multi(&ctx, 0x1f, 0x65);
@@ -343,7 +323,6 @@ static int boe_tv110c9m_init(struct boe_panel *boe)
mipi_dsi_dcs_write_seq_multi(&ctx, 0xc9, 0x9e);
mipi_dsi_dcs_write_seq_multi(&ctx, 0xca, 0x4e);
mipi_dsi_dcs_write_seq_multi(&ctx, 0xcb, 0x00);
-
mipi_dsi_dcs_write_seq_multi(&ctx, 0xa9, 0x49);
mipi_dsi_dcs_write_seq_multi(&ctx, 0xaa, 0x4b);
mipi_dsi_dcs_write_seq_multi(&ctx, 0xab, 0x48);
@@ -373,9 +352,9 @@ static int boe_tv110c9m_init(struct boe_panel *boe)
mipi_dsi_dcs_write_seq_multi(&ctx, 0xc3, 0x4f);
mipi_dsi_dcs_write_seq_multi(&ctx, 0xc4, 0x3a);
mipi_dsi_dcs_write_seq_multi(&ctx, 0xc5, 0x42);
- mipi_dsi_dcs_write_seq_multi(&ctx, 0xff, 0x27);
- mipi_dsi_dcs_write_seq_multi(&ctx, 0xfb, 0x01);
+ nt36523_switch_page(&ctx, 0x27);
+ nt36523_enable_reload_cmds(&ctx);
mipi_dsi_dcs_write_seq_multi(&ctx, 0x56, 0x06);
mipi_dsi_dcs_write_seq_multi(&ctx, 0x58, 0x80);
mipi_dsi_dcs_write_seq_multi(&ctx, 0x59, 0x75);
@@ -394,17 +373,14 @@ static int boe_tv110c9m_init(struct boe_panel *boe)
mipi_dsi_dcs_write_seq_multi(&ctx, 0x66, 0x00);
mipi_dsi_dcs_write_seq_multi(&ctx, 0x67, 0x01);
mipi_dsi_dcs_write_seq_multi(&ctx, 0x68, 0x44);
-
mipi_dsi_dcs_write_seq_multi(&ctx, 0x00, 0x00);
mipi_dsi_dcs_write_seq_multi(&ctx, 0x78, 0x00);
mipi_dsi_dcs_write_seq_multi(&ctx, 0xc3, 0x00);
- mipi_dsi_dcs_write_seq_multi(&ctx, 0xff, 0x2a);
- mipi_dsi_dcs_write_seq_multi(&ctx, 0xfb, 0x01);
-
+ nt36523_switch_page(&ctx, 0x2a);
+ nt36523_enable_reload_cmds(&ctx);
mipi_dsi_dcs_write_seq_multi(&ctx, 0x22, 0x2f);
mipi_dsi_dcs_write_seq_multi(&ctx, 0x23, 0x08);
-
mipi_dsi_dcs_write_seq_multi(&ctx, 0x24, 0x00);
mipi_dsi_dcs_write_seq_multi(&ctx, 0x25, 0x65);
mipi_dsi_dcs_write_seq_multi(&ctx, 0x26, 0xf8);
@@ -415,30 +391,30 @@ static int boe_tv110c9m_init(struct boe_panel *boe)
mipi_dsi_dcs_write_seq_multi(&ctx, 0x2b, 0x00);
mipi_dsi_dcs_write_seq_multi(&ctx, 0x2d, 0x1a);
- mipi_dsi_dcs_write_seq_multi(&ctx, 0xff, 0x23);
- mipi_dsi_dcs_write_seq_multi(&ctx, 0xfb, 0x01);
-
+ nt36523_switch_page(&ctx, 0x23);
+ nt36523_enable_reload_cmds(&ctx);
mipi_dsi_dcs_write_seq_multi(&ctx, 0x00, 0x80);
mipi_dsi_dcs_write_seq_multi(&ctx, 0x07, 0x00);
- mipi_dsi_dcs_write_seq_multi(&ctx, 0xff, 0xe0);
- mipi_dsi_dcs_write_seq_multi(&ctx, 0xfb, 0x01);
+ nt36523_switch_page(&ctx, 0xe0);
+ nt36523_enable_reload_cmds(&ctx);
mipi_dsi_dcs_write_seq_multi(&ctx, 0x14, 0x60);
mipi_dsi_dcs_write_seq_multi(&ctx, 0x16, 0xc0);
- mipi_dsi_dcs_write_seq_multi(&ctx, 0xff, 0xf0);
- mipi_dsi_dcs_write_seq_multi(&ctx, 0xfb, 0x01);
+ nt36523_switch_page(&ctx, 0xf0);
+ nt36523_enable_reload_cmds(&ctx);
mipi_dsi_dcs_write_seq_multi(&ctx, 0x3a, 0x08);
- mipi_dsi_dcs_write_seq_multi(&ctx, 0xff, 0x10);
- mipi_dsi_dcs_write_seq_multi(&ctx, 0xfb, 0x01);
+ nt36523_switch_page(&ctx, 0x10);
+ nt36523_enable_reload_cmds(&ctx);
mipi_dsi_dcs_write_seq_multi(&ctx, 0xb9, 0x01);
- mipi_dsi_dcs_write_seq_multi(&ctx, 0xff, 0x20);
- mipi_dsi_dcs_write_seq_multi(&ctx, 0xfb, 0x01);
+
+ nt36523_switch_page(&ctx, 0x20);
+ nt36523_enable_reload_cmds(&ctx);
mipi_dsi_dcs_write_seq_multi(&ctx, 0x18, 0x40);
- mipi_dsi_dcs_write_seq_multi(&ctx, 0xff, 0x10);
- mipi_dsi_dcs_write_seq_multi(&ctx, 0xfb, 0x01);
+ nt36523_switch_page(&ctx, 0x10);
+ nt36523_enable_reload_cmds(&ctx);
mipi_dsi_dcs_write_seq_multi(&ctx, 0xb9, 0x02);
mipi_dsi_dcs_write_seq_multi(&ctx, 0x35, 0x00);
mipi_dsi_dcs_write_seq_multi(&ctx, 0x51, 0x00, 0xff);
@@ -464,13 +440,12 @@ static int inx_hj110iz_init(struct boe_panel *boe)
{
struct mipi_dsi_multi_context ctx = { .dsi = boe->dsi };
- mipi_dsi_dcs_write_seq_multi(&ctx, 0xff, 0x20);
- mipi_dsi_dcs_write_seq_multi(&ctx, 0xfb, 0x01);
+ nt36523_switch_page(&ctx, 0x20);
+ nt36523_enable_reload_cmds(&ctx);
mipi_dsi_dcs_write_seq_multi(&ctx, 0x05, 0xd1);
mipi_dsi_dcs_write_seq_multi(&ctx, 0x06, 0xc0);
mipi_dsi_dcs_write_seq_multi(&ctx, 0x07, 0x87);
mipi_dsi_dcs_write_seq_multi(&ctx, 0x08, 0x4b);
-
mipi_dsi_dcs_write_seq_multi(&ctx, 0x0d, 0x63);
mipi_dsi_dcs_write_seq_multi(&ctx, 0x0e, 0x91);
mipi_dsi_dcs_write_seq_multi(&ctx, 0x0f, 0x69);
@@ -482,10 +457,10 @@ static int inx_hj110iz_init(struct boe_panel *boe)
mipi_dsi_dcs_write_seq_multi(&ctx, 0x69, 0x98);
mipi_dsi_dcs_write_seq_multi(&ctx, 0x75, 0xa2);
mipi_dsi_dcs_write_seq_multi(&ctx, 0x77, 0xb3);
-
mipi_dsi_dcs_write_seq_multi(&ctx, 0x58, 0x43);
- mipi_dsi_dcs_write_seq_multi(&ctx, 0xff, 0x24);
- mipi_dsi_dcs_write_seq_multi(&ctx, 0xfb, 0x01);
+
+ nt36523_switch_page(&ctx, 0x24);
+ nt36523_enable_reload_cmds(&ctx);
mipi_dsi_dcs_write_seq_multi(&ctx, 0x91, 0x44);
mipi_dsi_dcs_write_seq_multi(&ctx, 0x92, 0x4c);
mipi_dsi_dcs_write_seq_multi(&ctx, 0x94, 0x86);
@@ -493,7 +468,6 @@ static int inx_hj110iz_init(struct boe_panel *boe)
mipi_dsi_dcs_write_seq_multi(&ctx, 0x61, 0xd0);
mipi_dsi_dcs_write_seq_multi(&ctx, 0x63, 0x70);
mipi_dsi_dcs_write_seq_multi(&ctx, 0xc2, 0xca);
-
mipi_dsi_dcs_write_seq_multi(&ctx, 0x00, 0x03);
mipi_dsi_dcs_write_seq_multi(&ctx, 0x01, 0x03);
mipi_dsi_dcs_write_seq_multi(&ctx, 0x02, 0x03);
@@ -538,7 +512,6 @@ static int inx_hj110iz_init(struct boe_panel *boe)
mipi_dsi_dcs_write_seq_multi(&ctx, 0x29, 0x04);
mipi_dsi_dcs_write_seq_multi(&ctx, 0x2a, 0x00);
mipi_dsi_dcs_write_seq_multi(&ctx, 0x2b, 0x03);
-
mipi_dsi_dcs_write_seq_multi(&ctx, 0x2f, 0x0a);
mipi_dsi_dcs_write_seq_multi(&ctx, 0x30, 0x35);
mipi_dsi_dcs_write_seq_multi(&ctx, 0x37, 0xa7);
@@ -546,7 +519,6 @@ static int inx_hj110iz_init(struct boe_panel *boe)
mipi_dsi_dcs_write_seq_multi(&ctx, 0x3a, 0x46);
mipi_dsi_dcs_write_seq_multi(&ctx, 0x3b, 0x32);
mipi_dsi_dcs_write_seq_multi(&ctx, 0x3d, 0x12);
-
mipi_dsi_dcs_write_seq_multi(&ctx, 0x3f, 0x33);
mipi_dsi_dcs_write_seq_multi(&ctx, 0x40, 0x31);
mipi_dsi_dcs_write_seq_multi(&ctx, 0x41, 0x40);
@@ -556,7 +528,6 @@ static int inx_hj110iz_init(struct boe_panel *boe)
mipi_dsi_dcs_write_seq_multi(&ctx, 0x4a, 0x45);
mipi_dsi_dcs_write_seq_multi(&ctx, 0x4b, 0x45);
mipi_dsi_dcs_write_seq_multi(&ctx, 0x4c, 0x14);
-
mipi_dsi_dcs_write_seq_multi(&ctx, 0x4d, 0x21);
mipi_dsi_dcs_write_seq_multi(&ctx, 0x4e, 0x43);
mipi_dsi_dcs_write_seq_multi(&ctx, 0x4f, 0x65);
@@ -569,7 +540,6 @@ static int inx_hj110iz_init(struct boe_panel *boe)
mipi_dsi_dcs_write_seq_multi(&ctx, 0x5c, 0x88);
mipi_dsi_dcs_write_seq_multi(&ctx, 0x5e, 0x00, 0x00);
mipi_dsi_dcs_write_seq_multi(&ctx, 0x5f, 0x00);
-
mipi_dsi_dcs_write_seq_multi(&ctx, 0x7a, 0xff);
mipi_dsi_dcs_write_seq_multi(&ctx, 0x7b, 0xff);
mipi_dsi_dcs_write_seq_multi(&ctx, 0x7c, 0x00);
@@ -581,7 +551,6 @@ static int inx_hj110iz_init(struct boe_panel *boe)
mipi_dsi_dcs_write_seq_multi(&ctx, 0x82, 0x08);
mipi_dsi_dcs_write_seq_multi(&ctx, 0x97, 0x02);
mipi_dsi_dcs_write_seq_multi(&ctx, 0xc5, 0x10);
-
mipi_dsi_dcs_write_seq_multi(&ctx, 0xd7, 0x55);
mipi_dsi_dcs_write_seq_multi(&ctx, 0xd8, 0x55);
mipi_dsi_dcs_write_seq_multi(&ctx, 0xd9, 0x23);
@@ -609,43 +578,32 @@ static int inx_hj110iz_init(struct boe_panel *boe)
mipi_dsi_dcs_write_seq_multi(&ctx, 0xb6, 0x05, 0x00, 0x05, 0x00, 0x00, 0x00, 0x00, 0x00,
0x05, 0x05, 0x00, 0x00);
- mipi_dsi_dcs_write_seq_multi(&ctx, 0xff, 0x25);
-
- mipi_dsi_dcs_write_seq_multi(&ctx, 0xfb, 0x01);
+ nt36523_switch_page(&ctx, 0x25);
+ nt36523_enable_reload_cmds(&ctx);
mipi_dsi_dcs_write_seq_multi(&ctx, 0x05, 0x00);
mipi_dsi_dcs_write_seq_multi(&ctx, 0xf1, 0x10);
-
mipi_dsi_dcs_write_seq_multi(&ctx, 0x1e, 0x00);
mipi_dsi_dcs_write_seq_multi(&ctx, 0x1f, 0x46);
mipi_dsi_dcs_write_seq_multi(&ctx, 0x20, 0x32);
-
mipi_dsi_dcs_write_seq_multi(&ctx, 0x25, 0x00);
mipi_dsi_dcs_write_seq_multi(&ctx, 0x26, 0x46);
mipi_dsi_dcs_write_seq_multi(&ctx, 0x27, 0x32);
-
mipi_dsi_dcs_write_seq_multi(&ctx, 0x3f, 0x80);
mipi_dsi_dcs_write_seq_multi(&ctx, 0x40, 0x00);
mipi_dsi_dcs_write_seq_multi(&ctx, 0x43, 0x00);
-
mipi_dsi_dcs_write_seq_multi(&ctx, 0x44, 0x46);
mipi_dsi_dcs_write_seq_multi(&ctx, 0x45, 0x46);
-
mipi_dsi_dcs_write_seq_multi(&ctx, 0x48, 0x46);
mipi_dsi_dcs_write_seq_multi(&ctx, 0x49, 0x32);
-
mipi_dsi_dcs_write_seq_multi(&ctx, 0x5b, 0x80);
-
mipi_dsi_dcs_write_seq_multi(&ctx, 0x5c, 0x00);
mipi_dsi_dcs_write_seq_multi(&ctx, 0x5d, 0x46);
mipi_dsi_dcs_write_seq_multi(&ctx, 0x5e, 0x32);
-
mipi_dsi_dcs_write_seq_multi(&ctx, 0x5f, 0x46);
mipi_dsi_dcs_write_seq_multi(&ctx, 0x60, 0x32);
-
mipi_dsi_dcs_write_seq_multi(&ctx, 0x61, 0x46);
mipi_dsi_dcs_write_seq_multi(&ctx, 0x62, 0x32);
mipi_dsi_dcs_write_seq_multi(&ctx, 0x68, 0x0c);
-
mipi_dsi_dcs_write_seq_multi(&ctx, 0x6c, 0x0d);
mipi_dsi_dcs_write_seq_multi(&ctx, 0x6e, 0x0d);
mipi_dsi_dcs_write_seq_multi(&ctx, 0x78, 0x00);
@@ -653,9 +611,8 @@ static int inx_hj110iz_init(struct boe_panel *boe)
mipi_dsi_dcs_write_seq_multi(&ctx, 0x7a, 0x0c);
mipi_dsi_dcs_write_seq_multi(&ctx, 0x7b, 0xb0);
- mipi_dsi_dcs_write_seq_multi(&ctx, 0xff, 0x26);
- mipi_dsi_dcs_write_seq_multi(&ctx, 0xfb, 0x01);
-
+ nt36523_switch_page(&ctx, 0x26);
+ nt36523_enable_reload_cmds(&ctx);
mipi_dsi_dcs_write_seq_multi(&ctx, 0x00, 0xa1);
mipi_dsi_dcs_write_seq_multi(&ctx, 0x02, 0x31);
mipi_dsi_dcs_write_seq_multi(&ctx, 0x0a, 0xf4);
@@ -674,18 +631,15 @@ static int inx_hj110iz_init(struct boe_panel *boe)
mipi_dsi_dcs_write_seq_multi(&ctx, 0x18, 0x86);
mipi_dsi_dcs_write_seq_multi(&ctx, 0x22, 0x00);
mipi_dsi_dcs_write_seq_multi(&ctx, 0x23, 0x00);
-
mipi_dsi_dcs_write_seq_multi(&ctx, 0x19, 0x0e);
mipi_dsi_dcs_write_seq_multi(&ctx, 0x1a, 0x31);
mipi_dsi_dcs_write_seq_multi(&ctx, 0x1b, 0x0d);
mipi_dsi_dcs_write_seq_multi(&ctx, 0x1c, 0x29);
mipi_dsi_dcs_write_seq_multi(&ctx, 0x2a, 0x0e);
mipi_dsi_dcs_write_seq_multi(&ctx, 0x2b, 0x31);
-
mipi_dsi_dcs_write_seq_multi(&ctx, 0x1d, 0x00);
mipi_dsi_dcs_write_seq_multi(&ctx, 0x1e, 0x62);
mipi_dsi_dcs_write_seq_multi(&ctx, 0x1f, 0x62);
-
mipi_dsi_dcs_write_seq_multi(&ctx, 0x2f, 0x06);
mipi_dsi_dcs_write_seq_multi(&ctx, 0x30, 0x62);
mipi_dsi_dcs_write_seq_multi(&ctx, 0x31, 0x06);
@@ -693,11 +647,9 @@ static int inx_hj110iz_init(struct boe_panel *boe)
mipi_dsi_dcs_write_seq_multi(&ctx, 0x33, 0x11);
mipi_dsi_dcs_write_seq_multi(&ctx, 0x34, 0x89);
mipi_dsi_dcs_write_seq_multi(&ctx, 0x35, 0x67);
-
mipi_dsi_dcs_write_seq_multi(&ctx, 0x39, 0x0b);
mipi_dsi_dcs_write_seq_multi(&ctx, 0x3a, 0x62);
mipi_dsi_dcs_write_seq_multi(&ctx, 0x3b, 0x06);
-
mipi_dsi_dcs_write_seq_multi(&ctx, 0xc8, 0x04);
mipi_dsi_dcs_write_seq_multi(&ctx, 0xc9, 0x89);
mipi_dsi_dcs_write_seq_multi(&ctx, 0xca, 0x4e);
@@ -711,21 +663,18 @@ static int inx_hj110iz_init(struct boe_panel *boe)
mipi_dsi_dcs_write_seq_multi(&ctx, 0xaf, 0x39);
mipi_dsi_dcs_write_seq_multi(&ctx, 0xb0, 0x38);
- mipi_dsi_dcs_write_seq_multi(&ctx, 0xff, 0x27);
- mipi_dsi_dcs_write_seq_multi(&ctx, 0xfb, 0x01);
-
+ nt36523_switch_page(&ctx, 0x27);
+ nt36523_enable_reload_cmds(&ctx);
mipi_dsi_dcs_write_seq_multi(&ctx, 0xd0, 0x11);
mipi_dsi_dcs_write_seq_multi(&ctx, 0xd1, 0x54);
mipi_dsi_dcs_write_seq_multi(&ctx, 0xde, 0x43);
mipi_dsi_dcs_write_seq_multi(&ctx, 0xdf, 0x02);
-
mipi_dsi_dcs_write_seq_multi(&ctx, 0xc0, 0x18);
mipi_dsi_dcs_write_seq_multi(&ctx, 0xc1, 0x00);
mipi_dsi_dcs_write_seq_multi(&ctx, 0xc2, 0x00);
mipi_dsi_dcs_write_seq_multi(&ctx, 0x00, 0x00);
mipi_dsi_dcs_write_seq_multi(&ctx, 0xc3, 0x00);
mipi_dsi_dcs_write_seq_multi(&ctx, 0x56, 0x06);
-
mipi_dsi_dcs_write_seq_multi(&ctx, 0x58, 0x80);
mipi_dsi_dcs_write_seq_multi(&ctx, 0x59, 0x78);
mipi_dsi_dcs_write_seq_multi(&ctx, 0x5a, 0x00);
@@ -743,20 +692,17 @@ static int inx_hj110iz_init(struct boe_panel *boe)
mipi_dsi_dcs_write_seq_multi(&ctx, 0x66, 0x00);
mipi_dsi_dcs_write_seq_multi(&ctx, 0x67, 0x01);
mipi_dsi_dcs_write_seq_multi(&ctx, 0x68, 0x44);
-
mipi_dsi_dcs_write_seq_multi(&ctx, 0x98, 0x01);
mipi_dsi_dcs_write_seq_multi(&ctx, 0xb4, 0x03);
mipi_dsi_dcs_write_seq_multi(&ctx, 0x9b, 0xbe);
-
mipi_dsi_dcs_write_seq_multi(&ctx, 0xab, 0x14);
mipi_dsi_dcs_write_seq_multi(&ctx, 0xbc, 0x08);
mipi_dsi_dcs_write_seq_multi(&ctx, 0xbd, 0x28);
- mipi_dsi_dcs_write_seq_multi(&ctx, 0xff, 0x2a);
- mipi_dsi_dcs_write_seq_multi(&ctx, 0xfb, 0x01);
+ nt36523_switch_page(&ctx, 0x2a);
+ nt36523_enable_reload_cmds(&ctx);
mipi_dsi_dcs_write_seq_multi(&ctx, 0x22, 0x2f);
mipi_dsi_dcs_write_seq_multi(&ctx, 0x23, 0x08);
-
mipi_dsi_dcs_write_seq_multi(&ctx, 0x24, 0x00);
mipi_dsi_dcs_write_seq_multi(&ctx, 0x25, 0x62);
mipi_dsi_dcs_write_seq_multi(&ctx, 0x26, 0xf8);
@@ -766,7 +712,6 @@ static int inx_hj110iz_init(struct boe_panel *boe)
mipi_dsi_dcs_write_seq_multi(&ctx, 0x2a, 0x1a);
mipi_dsi_dcs_write_seq_multi(&ctx, 0x2b, 0x00);
mipi_dsi_dcs_write_seq_multi(&ctx, 0x2d, 0x1a);
-
mipi_dsi_dcs_write_seq_multi(&ctx, 0x64, 0x96);
mipi_dsi_dcs_write_seq_multi(&ctx, 0x65, 0x10);
mipi_dsi_dcs_write_seq_multi(&ctx, 0x66, 0x00);
@@ -783,14 +728,11 @@ static int inx_hj110iz_init(struct boe_panel *boe)
mipi_dsi_dcs_write_seq_multi(&ctx, 0x7a, 0x10);
mipi_dsi_dcs_write_seq_multi(&ctx, 0x88, 0x96);
mipi_dsi_dcs_write_seq_multi(&ctx, 0x89, 0x10);
-
mipi_dsi_dcs_write_seq_multi(&ctx, 0xa2, 0x3f);
mipi_dsi_dcs_write_seq_multi(&ctx, 0xa3, 0x30);
mipi_dsi_dcs_write_seq_multi(&ctx, 0xa4, 0xc0);
mipi_dsi_dcs_write_seq_multi(&ctx, 0xa5, 0x03);
-
mipi_dsi_dcs_write_seq_multi(&ctx, 0xe8, 0x00);
-
mipi_dsi_dcs_write_seq_multi(&ctx, 0x97, 0x3c);
mipi_dsi_dcs_write_seq_multi(&ctx, 0x98, 0x02);
mipi_dsi_dcs_write_seq_multi(&ctx, 0x99, 0x95);
@@ -800,7 +742,7 @@ static int inx_hj110iz_init(struct boe_panel *boe)
mipi_dsi_dcs_write_seq_multi(&ctx, 0x9d, 0x0a);
mipi_dsi_dcs_write_seq_multi(&ctx, 0x9e, 0x90);
- mipi_dsi_dcs_write_seq_multi(&ctx, 0xff, 0x25);
+ nt36523_switch_page(&ctx, 0x25);
mipi_dsi_dcs_write_seq_multi(&ctx, 0x13, 0x02);
mipi_dsi_dcs_write_seq_multi(&ctx, 0x14, 0xd7);
mipi_dsi_dcs_write_seq_multi(&ctx, 0xdb, 0x02);
@@ -809,8 +751,7 @@ static int inx_hj110iz_init(struct boe_panel *boe)
mipi_dsi_dcs_write_seq_multi(&ctx, 0x19, 0x0f);
mipi_dsi_dcs_write_seq_multi(&ctx, 0x1b, 0x5b);
- mipi_dsi_dcs_write_seq_multi(&ctx, 0xff, 0x20);
-
+ nt36523_switch_page(&ctx, 0x20);
mipi_dsi_dcs_write_seq_multi(&ctx, 0xb0, 0x00, 0x00, 0x00, 0x0c, 0x00, 0x24, 0x00, 0x38,
0x00, 0x4c, 0x00, 0x5e, 0x00, 0x6f, 0x00, 0x7e);
mipi_dsi_dcs_write_seq_multi(&ctx, 0xb1, 0x00, 0x8c, 0x00, 0xbe, 0x00, 0xe5, 0x01, 0x27,
@@ -819,7 +760,6 @@ static int inx_hj110iz_init(struct boe_panel *boe)
0x03, 0x00, 0x03, 0x31, 0x03, 0x40, 0x03, 0x51);
mipi_dsi_dcs_write_seq_multi(&ctx, 0xb3, 0x03, 0x62, 0x03, 0x75, 0x03, 0x89, 0x03, 0x9c,
0x03, 0xaa, 0x03, 0xb2);
-
mipi_dsi_dcs_write_seq_multi(&ctx, 0xb4, 0x00, 0x00, 0x00, 0x0d, 0x00, 0x27, 0x00, 0x3d,
0x00, 0x52, 0x00, 0x64, 0x00, 0x75, 0x00, 0x84);
mipi_dsi_dcs_write_seq_multi(&ctx, 0xb5, 0x00, 0x93, 0x00, 0xc5, 0x00, 0xec, 0x01, 0x2c,
@@ -828,7 +768,6 @@ static int inx_hj110iz_init(struct boe_panel *boe)
0x03, 0x01, 0x03, 0x31, 0x03, 0x41, 0x03, 0x51);
mipi_dsi_dcs_write_seq_multi(&ctx, 0xb7, 0x03, 0x63, 0x03, 0x75, 0x03, 0x89, 0x03, 0x9c,
0x03, 0xaa, 0x03, 0xb2);
-
mipi_dsi_dcs_write_seq_multi(&ctx, 0xb8, 0x00, 0x00, 0x00, 0x0e, 0x00, 0x2a, 0x00, 0x40,
0x00, 0x56, 0x00, 0x68, 0x00, 0x7a, 0x00, 0x89);
mipi_dsi_dcs_write_seq_multi(&ctx, 0xb9, 0x00, 0x98, 0x00, 0xc9, 0x00, 0xf1, 0x01, 0x30,
@@ -838,7 +777,7 @@ static int inx_hj110iz_init(struct boe_panel *boe)
mipi_dsi_dcs_write_seq_multi(&ctx, 0xbb, 0x03, 0x66, 0x03, 0x75, 0x03, 0x89, 0x03, 0x9c,
0x03, 0xaa, 0x03, 0xb2);
- mipi_dsi_dcs_write_seq_multi(&ctx, 0xff, 0x21);
+ nt36523_switch_page(&ctx, 0x21);
mipi_dsi_dcs_write_seq_multi(&ctx, 0xb0, 0x00, 0x00, 0x00, 0x0c, 0x00, 0x24, 0x00, 0x38,
0x00, 0x4c, 0x00, 0x5e, 0x00, 0x6f, 0x00, 0x7e);
mipi_dsi_dcs_write_seq_multi(&ctx, 0xb1, 0x00, 0x8c, 0x00, 0xbe, 0x00, 0xe5, 0x01, 0x27,
@@ -847,7 +786,6 @@ static int inx_hj110iz_init(struct boe_panel *boe)
0x03, 0x00, 0x03, 0x31, 0x03, 0x40, 0x03, 0x51);
mipi_dsi_dcs_write_seq_multi(&ctx, 0xb3, 0x03, 0x62, 0x03, 0x77, 0x03, 0x90, 0x03, 0xac,
0x03, 0xca, 0x03, 0xda);
-
mipi_dsi_dcs_write_seq_multi(&ctx, 0xb4, 0x00, 0x00, 0x00, 0x0d, 0x00, 0x27, 0x00, 0x3d,
0x00, 0x52, 0x00, 0x64, 0x00, 0x75, 0x00, 0x84);
mipi_dsi_dcs_write_seq_multi(&ctx, 0xb5, 0x00, 0x93, 0x00, 0xc5, 0x00, 0xec, 0x01, 0x2c,
@@ -856,7 +794,6 @@ static int inx_hj110iz_init(struct boe_panel *boe)
0x03, 0x01, 0x03, 0x31, 0x03, 0x41, 0x03, 0x51);
mipi_dsi_dcs_write_seq_multi(&ctx, 0xb7, 0x03, 0x63, 0x03, 0x77, 0x03, 0x90, 0x03, 0xac,
0x03, 0xca, 0x03, 0xda);
-
mipi_dsi_dcs_write_seq_multi(&ctx, 0xb8, 0x00, 0x00, 0x00, 0x0e, 0x00, 0x2a, 0x00, 0x40,
0x00, 0x56, 0x00, 0x68, 0x00, 0x7a, 0x00, 0x89);
mipi_dsi_dcs_write_seq_multi(&ctx, 0xb9, 0x00, 0x98, 0x00, 0xc9, 0x00, 0xf1, 0x01, 0x30,
@@ -866,22 +803,21 @@ static int inx_hj110iz_init(struct boe_panel *boe)
mipi_dsi_dcs_write_seq_multi(&ctx, 0xbb, 0x03, 0x66, 0x03, 0x77, 0x03, 0x90, 0x03, 0xac,
0x03, 0xca, 0x03, 0xda);
- mipi_dsi_dcs_write_seq_multi(&ctx, 0xff, 0xf0);
- mipi_dsi_dcs_write_seq_multi(&ctx, 0xfb, 0x01);
+ nt36523_switch_page(&ctx, 0xf0);
+ nt36523_enable_reload_cmds(&ctx);
mipi_dsi_dcs_write_seq_multi(&ctx, 0x3a, 0x08);
- mipi_dsi_dcs_write_seq_multi(&ctx, 0xff, 0x10);
+ nt36523_switch_page(&ctx, 0x10);
mipi_dsi_dcs_write_seq_multi(&ctx, 0xb9, 0x01);
- mipi_dsi_dcs_write_seq_multi(&ctx, 0xff, 0x20);
-
+ nt36523_switch_page(&ctx, 0x20);
mipi_dsi_dcs_write_seq_multi(&ctx, 0x18, 0x40);
- mipi_dsi_dcs_write_seq_multi(&ctx, 0xff, 0x10);
+ nt36523_switch_page(&ctx, 0x10);
mipi_dsi_dcs_write_seq_multi(&ctx, 0xb9, 0x02);
- mipi_dsi_dcs_write_seq_multi(&ctx, 0xff, 0x10);
- mipi_dsi_dcs_write_seq_multi(&ctx, 0xfb, 0x01);
+ nt36523_switch_page(&ctx, 0x10);
+ nt36523_enable_reload_cmds(&ctx);
mipi_dsi_dcs_write_seq_multi(&ctx, 0xb0, 0x01);
mipi_dsi_dcs_write_seq_multi(&ctx, 0x35, 0x00);
mipi_dsi_dcs_write_seq_multi(&ctx, 0x3b, 0x03, 0xae, 0x1a, 0x04, 0x04);
diff --git a/drivers/gpu/drm/panel/panel-edp.c b/drivers/gpu/drm/panel/panel-edp.c
index 3a574a9b46e7..767e47a2b0c1 100644
--- a/drivers/gpu/drm/panel/panel-edp.c
+++ b/drivers/gpu/drm/panel/panel-edp.c
@@ -954,16 +954,24 @@ static void panel_edp_shutdown(struct device *dev)
* drm_atomic_helper_shutdown() at shutdown time and that should
* cause the panel to be disabled / unprepared if needed. For now,
* however, we'll keep these calls due to the sheer number of
- * different DRM modeset drivers used with panel-edp. The fact that
- * we're calling these and _also_ the drm_atomic_helper_shutdown()
- * will try to disable/unprepare means that we can get a warning about
- * trying to disable/unprepare an already disabled/unprepared panel,
- * but that's something we'll have to live with until we've confirmed
- * that all DRM modeset drivers are properly calling
- * drm_atomic_helper_shutdown().
+ * different DRM modeset drivers used with panel-edp. Once we've
+ * confirmed that all DRM modeset drivers using this panel properly
+ * call drm_atomic_helper_shutdown() we can simply delete the two
+ * calls below.
+ *
+ * TO BE EXPLICIT: THE CALLS BELOW SHOULDN'T BE COPIED TO ANY NEW
+ * PANEL DRIVERS.
+ *
+ * FIXME: If we're still haven't figured out if all DRM modeset
+ * drivers properly call drm_atomic_helper_shutdown() but we _have_
+ * managed to make sure that DRM modeset drivers get their shutdown()
+ * callback before the panel's shutdown() callback (perhaps using
+ * device link), we could add a WARN_ON here to help move forward.
*/
- drm_panel_disable(&panel->base);
- drm_panel_unprepare(&panel->base);
+ if (panel->base.enabled)
+ drm_panel_disable(&panel->base);
+ if (panel->base.prepared)
+ drm_panel_unprepare(&panel->base);
}
static void panel_edp_remove(struct device *dev)
@@ -1845,7 +1853,10 @@ static const struct edp_panel_entry edp_panels[] = {
EDP_PANEL_ENTRY('A', 'U', 'O', 0x635c, &delay_200_500_e50, "B116XAN06.3"),
EDP_PANEL_ENTRY('A', 'U', 'O', 0x639c, &delay_200_500_e50, "B140HAK02.7"),
EDP_PANEL_ENTRY('A', 'U', 'O', 0x723c, &delay_200_500_e50, "B140XTN07.2"),
+ EDP_PANEL_ENTRY('A', 'U', 'O', 0x73aa, &delay_200_500_e50, "B116XTN02.3"),
EDP_PANEL_ENTRY('A', 'U', 'O', 0x8594, &delay_200_500_e50, "B133UAN01.0"),
+ EDP_PANEL_ENTRY('A', 'U', 'O', 0xa199, &delay_200_500_e50, "B116XAN06.1"),
+ EDP_PANEL_ENTRY('A', 'U', 'O', 0xc4b4, &delay_200_500_e50, "B116XAT04.1"),
EDP_PANEL_ENTRY('A', 'U', 'O', 0xd497, &delay_200_500_e50, "B120XAN01.0"),
EDP_PANEL_ENTRY('A', 'U', 'O', 0xf390, &delay_200_500_e50, "B140XTN07.7"),
@@ -1891,15 +1902,19 @@ static const struct edp_panel_entry edp_panels[] = {
EDP_PANEL_ENTRY('B', 'O', 'E', 0x09ad, &delay_200_500_e80, "NV116WHM-N47"),
EDP_PANEL_ENTRY('B', 'O', 'E', 0x09ae, &delay_200_500_e200, "NT140FHM-N45"),
EDP_PANEL_ENTRY('B', 'O', 'E', 0x09dd, &delay_200_500_e50, "NT116WHM-N21"),
+ EDP_PANEL_ENTRY('B', 'O', 'E', 0x0a1b, &delay_200_500_e50, "NV133WUM-N63"),
EDP_PANEL_ENTRY('B', 'O', 'E', 0x0a36, &delay_200_500_e200, "Unknown"),
EDP_PANEL_ENTRY('B', 'O', 'E', 0x0a3e, &delay_200_500_e80, "NV116WHM-N49"),
EDP_PANEL_ENTRY('B', 'O', 'E', 0x0a5d, &delay_200_500_e50, "NV116WHM-N45"),
EDP_PANEL_ENTRY('B', 'O', 'E', 0x0ac5, &delay_200_500_e50, "NV116WHM-N4C"),
+ EDP_PANEL_ENTRY('B', 'O', 'E', 0x0ae8, &delay_200_500_e50_p2e80, "NV140WUM-N41"),
EDP_PANEL_ENTRY('B', 'O', 'E', 0x0b34, &delay_200_500_e80, "NV122WUM-N41"),
EDP_PANEL_ENTRY('B', 'O', 'E', 0x0b43, &delay_200_500_e200, "NV140FHM-T09"),
EDP_PANEL_ENTRY('B', 'O', 'E', 0x0b56, &delay_200_500_e80, "NT140FHM-N47"),
+ EDP_PANEL_ENTRY('B', 'O', 'E', 0x0b66, &delay_200_500_e80, "NE140WUM-N6G"),
EDP_PANEL_ENTRY('B', 'O', 'E', 0x0c20, &delay_200_500_e80, "NT140FHM-N47"),
EDP_PANEL_ENTRY('B', 'O', 'E', 0x0cb6, &delay_200_500_e200, "NT116WHM-N44"),
+ EDP_PANEL_ENTRY('B', 'O', 'E', 0x0cfa, &delay_200_500_e50, "NV116WHM-A4D"),
EDP_PANEL_ENTRY('C', 'M', 'N', 0x1130, &delay_200_500_e50, "N116BGE-EB2"),
EDP_PANEL_ENTRY('C', 'M', 'N', 0x1132, &delay_200_500_e80_d50, "N116BGE-EA2"),
@@ -1915,8 +1930,10 @@ static const struct edp_panel_entry edp_panels[] = {
EDP_PANEL_ENTRY('C', 'M', 'N', 0x1156, &delay_200_500_e80_d50, "Unknown"),
EDP_PANEL_ENTRY('C', 'M', 'N', 0x1157, &delay_200_500_e80_d50, "N116BGE-EA2"),
EDP_PANEL_ENTRY('C', 'M', 'N', 0x115b, &delay_200_500_e80_d50, "N116BCN-EB1"),
+ EDP_PANEL_ENTRY('C', 'M', 'N', 0x115d, &delay_200_500_e80_d50, "N116BCA-EA2"),
EDP_PANEL_ENTRY('C', 'M', 'N', 0x115e, &delay_200_500_e80_d50, "N116BCA-EA1"),
EDP_PANEL_ENTRY('C', 'M', 'N', 0x1160, &delay_200_500_e80_d50, "N116BCJ-EAK"),
+ EDP_PANEL_ENTRY('C', 'M', 'N', 0x1161, &delay_200_500_e80, "N116BCP-EA2"),
EDP_PANEL_ENTRY('C', 'M', 'N', 0x1247, &delay_200_500_e80_d50, "N120ACA-EA1"),
EDP_PANEL_ENTRY('C', 'M', 'N', 0x142b, &delay_200_500_e80_d50, "N140HCA-EAC"),
EDP_PANEL_ENTRY('C', 'M', 'N', 0x142e, &delay_200_500_e80_d50, "N140BGA-EA4"),
@@ -1929,9 +1946,10 @@ static const struct edp_panel_entry edp_panels[] = {
EDP_PANEL_ENTRY('C', 'S', 'O', 0x1200, &delay_200_500_e50_p2e200, "MNC207QS1-1"),
EDP_PANEL_ENTRY('C', 'S', 'W', 0x1100, &delay_200_500_e80_d50, "MNB601LS1-1"),
+ EDP_PANEL_ENTRY('C', 'S', 'W', 0x1104, &delay_200_500_e50, "MNB601LS1-4"),
EDP_PANEL_ENTRY('H', 'K', 'C', 0x2d51, &delay_200_500_e200, "Unknown"),
- EDP_PANEL_ENTRY('H', 'K', 'C', 0x2d5b, &delay_200_500_e200, "Unknown"),
+ EDP_PANEL_ENTRY('H', 'K', 'C', 0x2d5b, &delay_200_500_e200, "MB116AN01"),
EDP_PANEL_ENTRY('H', 'K', 'C', 0x2d5c, &delay_200_500_e200, "MB116AN01-2"),
EDP_PANEL_ENTRY('I', 'V', 'O', 0x048e, &delay_200_500_e200_d10, "M116NWR6 R5"),
@@ -1960,8 +1978,6 @@ static const struct edp_panel_entry edp_panels[] = {
EDP_PANEL_ENTRY('L', 'G', 'D', 0x05af, &delay_200_500_e200_d200, "Unknown"),
EDP_PANEL_ENTRY('L', 'G', 'D', 0x05f1, &delay_200_500_e200_d200, "Unknown"),
- EDP_PANEL_ENTRY('S', 'D', 'C', 0x416d, &delay_100_500_e200, "ATNA45AF01"),
-
EDP_PANEL_ENTRY('S', 'H', 'P', 0x1511, &delay_200_500_e50, "LQ140M1JW48"),
EDP_PANEL_ENTRY('S', 'H', 'P', 0x1523, &delay_80_500_e50, "LQ140M1JW46"),
EDP_PANEL_ENTRY('S', 'H', 'P', 0x153a, &delay_200_500_e50, "LQ140T1JH01"),
diff --git a/drivers/gpu/drm/panel/panel-himax-hx8394.c b/drivers/gpu/drm/panel/panel-himax-hx8394.c
index cb9f46e853de..92b03a2f65a3 100644
--- a/drivers/gpu/drm/panel/panel-himax-hx8394.c
+++ b/drivers/gpu/drm/panel/panel-himax-hx8394.c
@@ -339,6 +339,156 @@ static const struct hx8394_panel_desc powkiddy_x55_desc = {
.init_sequence = powkiddy_x55_init_sequence,
};
+static int mchp_ac40t08a_init_sequence(struct hx8394 *ctx)
+{
+ struct mipi_dsi_device *dsi = to_mipi_dsi_device(ctx->dev);
+
+ /* DCS commands do not seem to be sent correclty without this delay */
+ msleep(20);
+
+ /* 5.19.8 SETEXTC: Set extension command (B9h) */
+ mipi_dsi_dcs_write_seq(dsi, HX8394_CMD_SETEXTC,
+ 0xff, 0x83, 0x94);
+
+ /* 5.19.9 SETMIPI: Set MIPI control (BAh) */
+ mipi_dsi_dcs_write_seq(dsi, HX8394_CMD_SETMIPI,
+ 0x63, 0x03, 0x68, 0x6b, 0xb2, 0xc0);
+
+ /* 5.19.2 SETPOWER: Set power (B1h) */
+ mipi_dsi_dcs_write_seq(dsi, HX8394_CMD_SETPOWER,
+ 0x48, 0x12, 0x72, 0x09, 0x32, 0x54,
+ 0x71, 0x71, 0x57, 0x47);
+
+ /* 5.19.3 SETDISP: Set display related register (B2h) */
+ mipi_dsi_dcs_write_seq(dsi, HX8394_CMD_SETDISP,
+ 0x00, 0x80, 0x64, 0x0c, 0x0d, 0x2f);
+
+ /* 5.19.4 SETCYC: Set display waveform cycles (B4h) */
+ mipi_dsi_dcs_write_seq(dsi, HX8394_CMD_SETCYC,
+ 0x73, 0x74, 0x73, 0x74, 0x73, 0x74,
+ 0x01, 0x0c, 0x86, 0x75, 0x00, 0x3f,
+ 0x73, 0x74, 0x73, 0x74, 0x73, 0x74,
+ 0x01, 0x0c, 0x86);
+
+ /* 5.19.5 SETVCOM: Set VCOM voltage (B6h) */
+ mipi_dsi_dcs_write_seq(dsi, HX8394_CMD_SETVCOM,
+ 0x6e, 0x6e);
+
+ /* 5.19.19 SETGIP0: Set GIP Option0 (D3h) */
+ mipi_dsi_dcs_write_seq(dsi, HX8394_CMD_SETGIP0,
+ 0x00, 0x00, 0x07, 0x07, 0x40, 0x07,
+ 0x0c, 0x00, 0x08, 0x10, 0x08, 0x00,
+ 0x08, 0x54, 0x15, 0x0a, 0x05, 0x0a,
+ 0x02, 0x15, 0x06, 0x05, 0x06, 0x47,
+ 0x44, 0x0a, 0x0a, 0x4b, 0x10, 0x07,
+ 0x07, 0x0c, 0x40);
+
+ /* 5.19.20 Set GIP Option1 (D5h) */
+ mipi_dsi_dcs_write_seq(dsi, HX8394_CMD_SETGIP1,
+ 0x1c, 0x1c, 0x1d, 0x1d, 0x00, 0x01,
+ 0x02, 0x03, 0x04, 0x05, 0x06, 0x07,
+ 0x08, 0x09, 0x0a, 0x0b, 0x24, 0x25,
+ 0x18, 0x18, 0x26, 0x27, 0x18, 0x18,
+ 0x18, 0x18, 0x18, 0x18, 0x18, 0x18,
+ 0x18, 0x18, 0x18, 0x18, 0x18, 0x18,
+ 0x18, 0x18, 0x20, 0x21, 0x18, 0x18,
+ 0x18, 0x18);
+
+ /* 5.19.21 Set GIP Option2 (D6h) */
+ mipi_dsi_dcs_write_seq(dsi, HX8394_CMD_SETGIP2,
+ 0x1c, 0x1c, 0x1d, 0x1d, 0x07, 0x06,
+ 0x05, 0x04, 0x03, 0x02, 0x01, 0x00,
+ 0x0b, 0x0a, 0x09, 0x08, 0x21, 0x20,
+ 0x18, 0x18, 0x27, 0x26, 0x18, 0x18,
+ 0x18, 0x18, 0x18, 0x18, 0x18, 0x18,
+ 0x18, 0x18, 0x18, 0x18, 0x18, 0x18,
+ 0x18, 0x18, 0x25, 0x24, 0x18, 0x18,
+ 0x18, 0x18);
+
+ /* 5.19.25 SETGAMMA: Set gamma curve related setting (E0h) */
+ mipi_dsi_dcs_write_seq(dsi, HX8394_CMD_SETGAMMA,
+ 0x00, 0x0a, 0x15, 0x1b, 0x1e, 0x21,
+ 0x24, 0x22, 0x47, 0x56, 0x65, 0x66,
+ 0x6e, 0x82, 0x88, 0x8b, 0x9a, 0x9d,
+ 0x98, 0xa8, 0xb9, 0x5d, 0x5c, 0x61,
+ 0x66, 0x6a, 0x6f, 0x7f, 0x7f, 0x00,
+ 0x0a, 0x15, 0x1b, 0x1e, 0x21, 0x24,
+ 0x22, 0x47, 0x56, 0x65, 0x65, 0x6e,
+ 0x81, 0x87, 0x8b, 0x98, 0x9d, 0x99,
+ 0xa8, 0xba, 0x5d, 0x5d, 0x62, 0x67,
+ 0x6b, 0x72, 0x7f, 0x7f);
+
+ /* Unknown command, not listed in the HX8394-F datasheet (C0H) */
+ mipi_dsi_dcs_write_seq(dsi, HX8394_CMD_UNKNOWN1,
+ 0x1f, 0x73);
+
+ /* Set CABC control (C9h)*/
+ mipi_dsi_dcs_write_seq(dsi, HX8394_CMD_SETCABC,
+ 0x76, 0x00, 0x30);
+
+ /* 5.19.17 SETPANEL (CCh) */
+ mipi_dsi_dcs_write_seq(dsi, HX8394_CMD_SETPANEL,
+ 0x0b);
+
+ /* Unknown command, not listed in the HX8394-F datasheet (D4h) */
+ mipi_dsi_dcs_write_seq(dsi, HX8394_CMD_UNKNOWN3,
+ 0x02);
+
+ /* 5.19.11 Set register bank (BDh) */
+ mipi_dsi_dcs_write_seq(dsi, HX8394_CMD_SETREGBANK,
+ 0x02);
+
+ /* 5.19.11 Set register bank (D8h) */
+ mipi_dsi_dcs_write_seq(dsi, HX8394_CMD_UNKNOWN4,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff);
+
+ /* 5.19.11 Set register bank (BDh) */
+ mipi_dsi_dcs_write_seq(dsi, HX8394_CMD_SETREGBANK,
+ 0x00);
+
+ /* 5.19.11 Set register bank (BDh) */
+ mipi_dsi_dcs_write_seq(dsi, HX8394_CMD_SETREGBANK,
+ 0x01);
+
+ /* 5.19.2 SETPOWER: Set power (B1h) */
+ mipi_dsi_dcs_write_seq(dsi, HX8394_CMD_SETPOWER,
+ 0x00);
+
+ /* 5.19.11 Set register bank (BDh) */
+ mipi_dsi_dcs_write_seq(dsi, HX8394_CMD_SETREGBANK,
+ 0x00);
+
+ /* Unknown command, not listed in the HX8394-F datasheet (C6h) */
+ mipi_dsi_dcs_write_seq(dsi, HX8394_CMD_UNKNOWN2,
+ 0xed);
+
+ return 0;
+}
+
+static const struct drm_display_mode mchp_ac40t08a_mode = {
+ .hdisplay = 720,
+ .hsync_start = 720 + 12,
+ .hsync_end = 720 + 12 + 24,
+ .htotal = 720 + 12 + 12 + 24,
+ .vdisplay = 1280,
+ .vsync_start = 1280 + 13,
+ .vsync_end = 1280 + 14,
+ .vtotal = 1280 + 14 + 13,
+ .clock = 60226,
+ .flags = DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC,
+ .width_mm = 76,
+ .height_mm = 132,
+};
+
+static const struct hx8394_panel_desc mchp_ac40t08a_desc = {
+ .mode = &mchp_ac40t08a_mode,
+ .lanes = 4,
+ .mode_flags = MIPI_DSI_MODE_VIDEO | MIPI_DSI_MODE_VIDEO_BURST,
+ .format = MIPI_DSI_FMT_RGB888,
+ .init_sequence = mchp_ac40t08a_init_sequence,
+};
+
static int hx8394_enable(struct drm_panel *panel)
{
struct hx8394 *ctx = panel_to_hx8394(panel);
@@ -486,7 +636,7 @@ static int hx8394_probe(struct mipi_dsi_device *dsi)
if (!ctx)
return -ENOMEM;
- ctx->reset_gpio = devm_gpiod_get(dev, "reset", GPIOD_OUT_HIGH);
+ ctx->reset_gpio = devm_gpiod_get_optional(dev, "reset", GPIOD_OUT_HIGH);
if (IS_ERR(ctx->reset_gpio))
return dev_err_probe(dev, PTR_ERR(ctx->reset_gpio),
"Failed to get reset gpio\n");
@@ -555,6 +705,7 @@ static void hx8394_remove(struct mipi_dsi_device *dsi)
static const struct of_device_id hx8394_of_match[] = {
{ .compatible = "hannstar,hsd060bhw4", .data = &hsd060bhw4_desc },
{ .compatible = "powkiddy,x55-panel", .data = &powkiddy_x55_desc },
+ { .compatible = "microchip,ac40t08a-mipi-panel", .data = &mchp_ac40t08a_desc },
{ /* sentinel */ }
};
MODULE_DEVICE_TABLE(of, hx8394_of_match);
diff --git a/drivers/gpu/drm/panel/panel-ilitek-ili9341.c b/drivers/gpu/drm/panel/panel-ilitek-ili9341.c
index 775d5d5e828c..1fbc5d433d75 100644
--- a/drivers/gpu/drm/panel/panel-ilitek-ili9341.c
+++ b/drivers/gpu/drm/panel/panel-ilitek-ili9341.c
@@ -121,19 +121,19 @@ struct ili9341_config {
const struct drm_display_mode mode;
/* ca: TODO: need comments for this register */
u8 ca[ILI9341_CA_LEN];
- /* power_b: TODO: need comments for this register */
+ /* power_b: Power control B (CFh) */
u8 power_b[ILI9341_POWER_B_LEN];
- /* power_seq: TODO: need comments for this register */
+ /* power_seq: Power on sequence control (EDh) */
u8 power_seq[ILI9341_POWER_SEQ_LEN];
- /* dtca: TODO: need comments for this register */
+ /* dtca: Driver timing control A (E8h) */
u8 dtca[ILI9341_DTCA_LEN];
- /* dtcb: TODO: need comments for this register */
+ /* dtcb: Driver timing control B (EAh) */
u8 dtcb[ILI9341_DTCB_LEN];
- /* power_a: TODO: need comments for this register */
+ /* power_a: Power control A (CBh) */
u8 power_a[ILI9341_POWER_A_LEN];
/* frc: Frame Rate Control (In Normal Mode/Full Colors) (B1h) */
u8 frc[ILI9341_FRC_LEN];
- /* prc: TODO: need comments for this register */
+ /* prc: Pump ratio control (F7h) */
u8 prc;
/* dfc_1: B6h DISCTRL (Display Function Control) */
u8 dfc_1[ILI9341_DFC_1_LEN];
@@ -147,7 +147,7 @@ struct ili9341_config {
u8 vcom_2;
/* address_mode: Memory Access Control (36h) */
u8 address_mode;
- /* g3amma_en: TODO: need comments for this register */
+ /* g3amma_en: Enable 3G (F2h) */
u8 g3amma_en;
/* rgb_interface: RGB Interface Signal Control (B0h) */
u8 rgb_interface;
diff --git a/drivers/gpu/drm/panel/panel-ilitek-ili9806e.c b/drivers/gpu/drm/panel/panel-ilitek-ili9806e.c
index e4a44cd26c4d..a3c79ad99d0b 100644
--- a/drivers/gpu/drm/panel/panel-ilitek-ili9806e.c
+++ b/drivers/gpu/drm/panel/panel-ilitek-ili9806e.c
@@ -380,7 +380,172 @@ static const struct panel_desc com35h3p70ulc_desc = {
.lanes = 2,
};
+static void dmt028vghmcmi_1d_init(struct mipi_dsi_multi_context *ctx)
+{
+ mipi_dsi_dcs_write_seq_multi(ctx, 0xff, 0xff, 0x98, 0x06, 0x04, 0x01);
+ mipi_dsi_dcs_write_seq_multi(ctx, 0x08, 0x10);
+ mipi_dsi_dcs_write_seq_multi(ctx, 0x21, 0x01);
+ mipi_dsi_dcs_write_seq_multi(ctx, 0x30, 0x03);
+ mipi_dsi_dcs_write_seq_multi(ctx, 0x31, 0x00);
+ mipi_dsi_dcs_write_seq_multi(ctx, 0x60, 0x06);
+ mipi_dsi_dcs_write_seq_multi(ctx, 0x61, 0x00);
+ mipi_dsi_dcs_write_seq_multi(ctx, 0x62, 0x07);
+ mipi_dsi_dcs_write_seq_multi(ctx, 0x63, 0x00);
+ mipi_dsi_dcs_write_seq_multi(ctx, 0x40, 0x16);
+ mipi_dsi_dcs_write_seq_multi(ctx, 0x41, 0x44);
+ mipi_dsi_dcs_write_seq_multi(ctx, 0x42, 0x00);
+ mipi_dsi_dcs_write_seq_multi(ctx, 0x43, 0x83);
+ mipi_dsi_dcs_write_seq_multi(ctx, 0x44, 0x89);
+ mipi_dsi_dcs_write_seq_multi(ctx, 0x45, 0x8a);
+ mipi_dsi_dcs_write_seq_multi(ctx, 0x46, 0x44);
+ mipi_dsi_dcs_write_seq_multi(ctx, 0x47, 0x44);
+ mipi_dsi_dcs_write_seq_multi(ctx, 0x50, 0x78);
+ mipi_dsi_dcs_write_seq_multi(ctx, 0x51, 0x78);
+ mipi_dsi_dcs_write_seq_multi(ctx, 0x52, 0x00);
+ mipi_dsi_dcs_write_seq_multi(ctx, 0x53, 0x6c);
+ mipi_dsi_dcs_write_seq_multi(ctx, 0x54, 0x00);
+ mipi_dsi_dcs_write_seq_multi(ctx, 0x55, 0x6c);
+ mipi_dsi_dcs_write_seq_multi(ctx, 0x56, 0x00);
+ /* Gamma settings */
+ mipi_dsi_dcs_write_seq_multi(ctx, 0xa0, 0x00);
+ mipi_dsi_dcs_write_seq_multi(ctx, 0xa1, 0x09);
+ mipi_dsi_dcs_write_seq_multi(ctx, 0xa2, 0x14);
+ mipi_dsi_dcs_write_seq_multi(ctx, 0xa3, 0x09);
+ mipi_dsi_dcs_write_seq_multi(ctx, 0xa4, 0x05);
+ mipi_dsi_dcs_write_seq_multi(ctx, 0xa5, 0x0a);
+ mipi_dsi_dcs_write_seq_multi(ctx, 0xa6, 0x07);
+ mipi_dsi_dcs_write_seq_multi(ctx, 0xa7, 0x07);
+ mipi_dsi_dcs_write_seq_multi(ctx, 0xa8, 0x08);
+ mipi_dsi_dcs_write_seq_multi(ctx, 0xa9, 0x0b);
+ mipi_dsi_dcs_write_seq_multi(ctx, 0xaa, 0x0c);
+ mipi_dsi_dcs_write_seq_multi(ctx, 0xab, 0x05);
+ mipi_dsi_dcs_write_seq_multi(ctx, 0xac, 0x0a);
+ mipi_dsi_dcs_write_seq_multi(ctx, 0xad, 0x19);
+ mipi_dsi_dcs_write_seq_multi(ctx, 0xae, 0x0b);
+ mipi_dsi_dcs_write_seq_multi(ctx, 0xaf, 0x00);
+
+ mipi_dsi_dcs_write_seq_multi(ctx, 0xc0, 0x00);
+ mipi_dsi_dcs_write_seq_multi(ctx, 0xc1, 0x0c);
+ mipi_dsi_dcs_write_seq_multi(ctx, 0xc2, 0x14);
+ mipi_dsi_dcs_write_seq_multi(ctx, 0xc3, 0x11);
+ mipi_dsi_dcs_write_seq_multi(ctx, 0xc4, 0x05);
+ mipi_dsi_dcs_write_seq_multi(ctx, 0xc5, 0x0c);
+ mipi_dsi_dcs_write_seq_multi(ctx, 0xc6, 0x08);
+ mipi_dsi_dcs_write_seq_multi(ctx, 0xc7, 0x03);
+ mipi_dsi_dcs_write_seq_multi(ctx, 0xc8, 0x06);
+ mipi_dsi_dcs_write_seq_multi(ctx, 0xc9, 0x0a);
+ mipi_dsi_dcs_write_seq_multi(ctx, 0xca, 0x10);
+ mipi_dsi_dcs_write_seq_multi(ctx, 0xcb, 0x05);
+ mipi_dsi_dcs_write_seq_multi(ctx, 0xcc, 0x0d);
+ mipi_dsi_dcs_write_seq_multi(ctx, 0xcd, 0x15);
+ mipi_dsi_dcs_write_seq_multi(ctx, 0xce, 0x13);
+ mipi_dsi_dcs_write_seq_multi(ctx, 0xcf, 0x00);
+
+ mipi_dsi_dcs_write_seq_multi(ctx, 0xff, 0xff, 0x98, 0x06, 0x04, 0x07);
+ mipi_dsi_dcs_write_seq_multi(ctx, 0x17, 0x22);
+ mipi_dsi_dcs_write_seq_multi(ctx, 0x18, 0x1d);
+ mipi_dsi_dcs_write_seq_multi(ctx, 0x02, 0x77);
+ mipi_dsi_dcs_write_seq_multi(ctx, 0xe1, 0x79);
+ mipi_dsi_dcs_write_seq_multi(ctx, 0x06, 0x13);
+
+ mipi_dsi_dcs_write_seq_multi(ctx, 0xff, 0xff, 0x98, 0x06, 0x04, 0x06);
+ /* GIP 0 */
+ mipi_dsi_dcs_write_seq_multi(ctx, 0x00, 0x21);
+ mipi_dsi_dcs_write_seq_multi(ctx, 0x01, 0x0a);
+ mipi_dsi_dcs_write_seq_multi(ctx, 0x02, 0x00);
+ mipi_dsi_dcs_write_seq_multi(ctx, 0x03, 0x05);
+ mipi_dsi_dcs_write_seq_multi(ctx, 0x04, 0x01);
+ mipi_dsi_dcs_write_seq_multi(ctx, 0x05, 0x01);
+ mipi_dsi_dcs_write_seq_multi(ctx, 0x06, 0x98);
+ mipi_dsi_dcs_write_seq_multi(ctx, 0x07, 0x06);
+ mipi_dsi_dcs_write_seq_multi(ctx, 0x08, 0x01);
+ mipi_dsi_dcs_write_seq_multi(ctx, 0x09, 0x00);
+ mipi_dsi_dcs_write_seq_multi(ctx, 0x0a, 0x00);
+ mipi_dsi_dcs_write_seq_multi(ctx, 0x0b, 0x00);
+ mipi_dsi_dcs_write_seq_multi(ctx, 0x0c, 0x01);
+ mipi_dsi_dcs_write_seq_multi(ctx, 0x0d, 0x01);
+ mipi_dsi_dcs_write_seq_multi(ctx, 0x0e, 0x00);
+ mipi_dsi_dcs_write_seq_multi(ctx, 0x0f, 0x00);
+ mipi_dsi_dcs_write_seq_multi(ctx, 0x10, 0xf7);
+ mipi_dsi_dcs_write_seq_multi(ctx, 0x11, 0xf0);
+ mipi_dsi_dcs_write_seq_multi(ctx, 0x12, 0x00);
+ mipi_dsi_dcs_write_seq_multi(ctx, 0x13, 0x00);
+ mipi_dsi_dcs_write_seq_multi(ctx, 0x14, 0x00);
+ mipi_dsi_dcs_write_seq_multi(ctx, 0x15, 0xc0);
+ mipi_dsi_dcs_write_seq_multi(ctx, 0x16, 0x08);
+ mipi_dsi_dcs_write_seq_multi(ctx, 0x17, 0x00);
+ mipi_dsi_dcs_write_seq_multi(ctx, 0x18, 0x00);
+ mipi_dsi_dcs_write_seq_multi(ctx, 0x19, 0x00);
+ mipi_dsi_dcs_write_seq_multi(ctx, 0x1a, 0x00);
+ mipi_dsi_dcs_write_seq_multi(ctx, 0x1b, 0x00);
+ mipi_dsi_dcs_write_seq_multi(ctx, 0x1c, 0x00);
+ mipi_dsi_dcs_write_seq_multi(ctx, 0x1d, 0x00);
+ /* GIP 1 */
+ mipi_dsi_dcs_write_seq_multi(ctx, 0x20, 0x01);
+ mipi_dsi_dcs_write_seq_multi(ctx, 0x21, 0x23);
+ mipi_dsi_dcs_write_seq_multi(ctx, 0x22, 0x44);
+ mipi_dsi_dcs_write_seq_multi(ctx, 0x23, 0x67);
+ mipi_dsi_dcs_write_seq_multi(ctx, 0x24, 0x01);
+ mipi_dsi_dcs_write_seq_multi(ctx, 0x25, 0x23);
+ mipi_dsi_dcs_write_seq_multi(ctx, 0x26, 0x45);
+ mipi_dsi_dcs_write_seq_multi(ctx, 0x27, 0x67);
+ /* GIP 2 */
+ mipi_dsi_dcs_write_seq_multi(ctx, 0x30, 0x01);
+ mipi_dsi_dcs_write_seq_multi(ctx, 0x31, 0x22);
+ mipi_dsi_dcs_write_seq_multi(ctx, 0x32, 0x22);
+ mipi_dsi_dcs_write_seq_multi(ctx, 0x33, 0xbc);
+ mipi_dsi_dcs_write_seq_multi(ctx, 0x34, 0xad);
+ mipi_dsi_dcs_write_seq_multi(ctx, 0x35, 0xda);
+ mipi_dsi_dcs_write_seq_multi(ctx, 0x36, 0xcb);
+ mipi_dsi_dcs_write_seq_multi(ctx, 0x37, 0x22);
+ mipi_dsi_dcs_write_seq_multi(ctx, 0x38, 0x55);
+ mipi_dsi_dcs_write_seq_multi(ctx, 0x39, 0x76);
+ mipi_dsi_dcs_write_seq_multi(ctx, 0x3a, 0x67);
+ mipi_dsi_dcs_write_seq_multi(ctx, 0x3b, 0x88);
+ mipi_dsi_dcs_write_seq_multi(ctx, 0x3c, 0x22);
+ mipi_dsi_dcs_write_seq_multi(ctx, 0x3d, 0x11);
+ mipi_dsi_dcs_write_seq_multi(ctx, 0x3e, 0x00);
+ mipi_dsi_dcs_write_seq_multi(ctx, 0x3f, 0x22);
+ mipi_dsi_dcs_write_seq_multi(ctx, 0x40, 0x22);
+
+ mipi_dsi_dcs_write_seq_multi(ctx, 0x52, 0x10);
+ mipi_dsi_dcs_write_seq_multi(ctx, 0x53, 0x10);
+ mipi_dsi_dcs_write_seq_multi(ctx, 0x54, 0x13);
+
+ mipi_dsi_dcs_write_seq_multi(ctx, 0xff, 0xff, 0x98, 0x06, 0x04, 0x00);
+};
+
+static const struct drm_display_mode dmt028vghmcmi_1d_default_mode = {
+ .clock = 22000,
+
+ .hdisplay = 480,
+ .hsync_start = 480 + 20,
+ .hsync_end = 480 + 20 + 4,
+ .htotal = 480 + 20 + 4 + 10,
+
+ .vdisplay = 640,
+ .vsync_start = 640 + 40,
+ .vsync_end = 640 + 40 + 4,
+ .vtotal = 640 + 40 + 4 + 20,
+
+ .width_mm = 53,
+ .height_mm = 79,
+
+ .flags = DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC,
+ .type = DRM_MODE_TYPE_DRIVER | DRM_MODE_TYPE_PREFERRED,
+};
+
+static const struct panel_desc dmt028vghmcmi_1d_desc = {
+ .init_sequence = dmt028vghmcmi_1d_init,
+ .display_mode = &dmt028vghmcmi_1d_default_mode,
+ .mode_flags = MIPI_DSI_MODE_VIDEO | MIPI_DSI_MODE_VIDEO_BURST |
+ MIPI_DSI_MODE_LPM | MIPI_DSI_CLOCK_NON_CONTINUOUS,
+ .format = MIPI_DSI_FMT_RGB888,
+ .lanes = 2,
+};
+
static const struct of_device_id ili9806e_of_match[] = {
+ { .compatible = "densitron,dmt028vghmcmi-1d", .data = &dmt028vghmcmi_1d_desc },
{ .compatible = "ortustech,com35h3p70ulc", .data = &com35h3p70ulc_desc },
{ }
};
diff --git a/drivers/gpu/drm/panel/panel-jadard-jd9365da-h3.c b/drivers/gpu/drm/panel/panel-jadard-jd9365da-h3.c
index c6b669866fed..44897e5218a6 100644
--- a/drivers/gpu/drm/panel/panel-jadard-jd9365da-h3.c
+++ b/drivers/gpu/drm/panel/panel-jadard-jd9365da-h3.c
@@ -31,8 +31,6 @@ struct jadard_panel_desc {
bool reset_before_power_off_vcioo;
unsigned int vcioo_to_lp11_delay_ms;
unsigned int lp11_to_reset_delay_ms;
- unsigned int exit_sleep_to_display_on_delay_ms;
- unsigned int display_on_delay_ms;
unsigned int backlight_off_to_display_off_delay_ms;
unsigned int display_off_to_enter_sleep_delay_ms;
unsigned int enter_sleep_to_reset_down_delay_ms;
@@ -48,29 +46,22 @@ struct jadard {
struct gpio_desc *reset;
};
-static inline struct jadard *panel_to_jadard(struct drm_panel *panel)
+#define JD9365DA_DCS_SWITCH_PAGE 0xe0
+
+#define jd9365da_switch_page(dsi_ctx, page) \
+ mipi_dsi_dcs_write_seq_multi(dsi_ctx, JD9365DA_DCS_SWITCH_PAGE, (page))
+
+static void jadard_enable_standard_cmds(struct mipi_dsi_multi_context *dsi_ctx)
{
- return container_of(panel, struct jadard, panel);
+ mipi_dsi_dcs_write_seq_multi(dsi_ctx, 0xe1, 0x93);
+ mipi_dsi_dcs_write_seq_multi(dsi_ctx, 0xe2, 0x65);
+ mipi_dsi_dcs_write_seq_multi(dsi_ctx, 0xe3, 0xf8);
+ mipi_dsi_dcs_write_seq_multi(dsi_ctx, 0x80, 0x03);
}
-static int jadard_enable(struct drm_panel *panel)
+static inline struct jadard *panel_to_jadard(struct drm_panel *panel)
{
- struct jadard *jadard = panel_to_jadard(panel);
- struct mipi_dsi_multi_context dsi_ctx = { .dsi = jadard->dsi };
-
- msleep(120);
-
- mipi_dsi_dcs_exit_sleep_mode_multi(&dsi_ctx);
-
- if (jadard->desc->exit_sleep_to_display_on_delay_ms)
- mipi_dsi_msleep(&dsi_ctx, jadard->desc->exit_sleep_to_display_on_delay_ms);
-
- mipi_dsi_dcs_set_display_on_multi(&dsi_ctx);
-
- if (jadard->desc->display_on_delay_ms)
- mipi_dsi_msleep(&dsi_ctx, jadard->desc->display_on_delay_ms);
-
- return dsi_ctx.accum_err;
+ return container_of(panel, struct jadard, panel);
}
static int jadard_disable(struct drm_panel *panel)
@@ -189,7 +180,6 @@ static const struct drm_panel_funcs jadard_funcs = {
.disable = jadard_disable,
.unprepare = jadard_unprepare,
.prepare = jadard_prepare,
- .enable = jadard_enable,
.get_modes = jadard_get_modes,
.get_orientation = jadard_panel_get_orientation,
};
@@ -198,12 +188,10 @@ static int radxa_display_8hd_ad002_init_cmds(struct jadard *jadard)
{
struct mipi_dsi_multi_context dsi_ctx = { .dsi = jadard->dsi };
- mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xE0, 0x00);
- mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xE1, 0x93);
- mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xE2, 0x65);
- mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xE3, 0xF8);
- mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x80, 0x03);
- mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xE0, 0x01);
+ jd9365da_switch_page(&dsi_ctx, 0x00);
+ jadard_enable_standard_cmds(&dsi_ctx);
+
+ jd9365da_switch_page(&dsi_ctx, 0x01);
mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x00, 0x00);
mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x01, 0x7E);
mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x03, 0x00);
@@ -276,7 +264,8 @@ static int radxa_display_8hd_ad002_init_cmds(struct jadard *jadard)
mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x80, 0x37);
mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x81, 0x23);
mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x82, 0x10);
- mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xE0, 0x02);
+
+ jd9365da_switch_page(&dsi_ctx, 0x02);
mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x00, 0x47);
mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x01, 0x47);
mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x02, 0x45);
@@ -360,13 +349,21 @@ static int radxa_display_8hd_ad002_init_cmds(struct jadard *jadard)
mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x7C, 0x00);
mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x7D, 0x03);
mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x7E, 0x7B);
- mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xE0, 0x04);
+
+ jd9365da_switch_page(&dsi_ctx, 0x04);
mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x00, 0x0E);
mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x02, 0xB3);
mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x09, 0x60);
mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x0E, 0x2A);
mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x36, 0x59);
- mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xE0, 0x00);
+
+ jd9365da_switch_page(&dsi_ctx, 0x00);
+
+ mipi_dsi_msleep(&dsi_ctx, 120);
+
+ mipi_dsi_dcs_exit_sleep_mode_multi(&dsi_ctx);
+
+ mipi_dsi_dcs_set_display_on_multi(&dsi_ctx);
return dsi_ctx.accum_err;
};
@@ -398,12 +395,10 @@ static int cz101b4001_init_cmds(struct jadard *jadard)
{
struct mipi_dsi_multi_context dsi_ctx = { .dsi = jadard->dsi };
- mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xE0, 0x00);
- mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xE1, 0x93);
- mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xE2, 0x65);
- mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xE3, 0xF8);
- mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x80, 0x03);
- mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xE0, 0x01);
+ jd9365da_switch_page(&dsi_ctx, 0x00);
+ jadard_enable_standard_cmds(&dsi_ctx);
+
+ jd9365da_switch_page(&dsi_ctx, 0x01);
mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x00, 0x00);
mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x01, 0x3B);
mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x0C, 0x74);
@@ -471,7 +466,8 @@ static int cz101b4001_init_cmds(struct jadard *jadard)
mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x80, 0x20);
mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x81, 0x0F);
mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x82, 0x00);
- mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xE0, 0x02);
+
+ jd9365da_switch_page(&dsi_ctx, 0x02);
mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x00, 0x02);
mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x01, 0x02);
mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x02, 0x00);
@@ -584,15 +580,23 @@ static int cz101b4001_init_cmds(struct jadard *jadard)
mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x7A, 0x17);
mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x7D, 0x14);
mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x7E, 0x82);
- mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xE0, 0x04);
+
+ jd9365da_switch_page(&dsi_ctx, 0x04);
mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x00, 0x0E);
mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x02, 0xB3);
mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x09, 0x61);
mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x0E, 0x48);
- mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xE0, 0x00);
+
+ jd9365da_switch_page(&dsi_ctx, 0x00);
mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xE6, 0x02);
mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xE7, 0x0C);
+ mipi_dsi_msleep(&dsi_ctx, 120);
+
+ mipi_dsi_dcs_exit_sleep_mode_multi(&dsi_ctx);
+
+ mipi_dsi_dcs_set_display_on_multi(&dsi_ctx);
+
return dsi_ctx.accum_err;
};
@@ -623,12 +627,10 @@ static int kingdisplay_kd101ne3_init_cmds(struct jadard *jadard)
{
struct mipi_dsi_multi_context dsi_ctx = { .dsi = jadard->dsi };
- mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xe0, 0x00);
- mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xe1, 0x93);
- mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xe2, 0x65);
- mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xe3, 0xf8);
- mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x80, 0x03);
- mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xe0, 0x01);
+ jd9365da_switch_page(&dsi_ctx, 0x00);
+ jadard_enable_standard_cmds(&dsi_ctx);
+
+ jd9365da_switch_page(&dsi_ctx, 0x01);
mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x0c, 0x74);
mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x17, 0x00);
mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x18, 0xc7);
@@ -694,7 +696,8 @@ static int kingdisplay_kd101ne3_init_cmds(struct jadard *jadard)
mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x80, 0x26);
mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x81, 0x14);
mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x82, 0x02);
- mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xe0, 0x02);
+
+ jd9365da_switch_page(&dsi_ctx, 0x02);
mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x00, 0x52);
mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x01, 0x5f);
mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x02, 0x5f);
@@ -808,12 +811,24 @@ static int kingdisplay_kd101ne3_init_cmds(struct jadard *jadard)
mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x76, 0x00);
mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x77, 0x05);
mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x78, 0x2a);
- mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xe0, 0x04);
+
+ jd9365da_switch_page(&dsi_ctx, 0x04);
mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x00, 0x0e);
mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x02, 0xb3);
mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x09, 0x61);
mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x0e, 0x48);
- mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xe0, 0x00);
+
+ jd9365da_switch_page(&dsi_ctx, 0x00);
+
+ mipi_dsi_msleep(&dsi_ctx, 120);
+
+ mipi_dsi_dcs_exit_sleep_mode_multi(&dsi_ctx);
+
+ mipi_dsi_msleep(&dsi_ctx, 120);
+
+ mipi_dsi_dcs_set_display_on_multi(&dsi_ctx);
+
+ mipi_dsi_msleep(&dsi_ctx, 20);
return dsi_ctx.accum_err;
};
@@ -843,8 +858,257 @@ static const struct jadard_panel_desc kingdisplay_kd101ne3_40ti_desc = {
.reset_before_power_off_vcioo = true,
.vcioo_to_lp11_delay_ms = 5,
.lp11_to_reset_delay_ms = 10,
- .exit_sleep_to_display_on_delay_ms = 120,
- .display_on_delay_ms = 20,
+ .backlight_off_to_display_off_delay_ms = 100,
+ .display_off_to_enter_sleep_delay_ms = 50,
+ .enter_sleep_to_reset_down_delay_ms = 100,
+};
+
+static int melfas_lmfbx101117480_init_cmds(struct jadard *jadard)
+{
+ struct mipi_dsi_multi_context dsi_ctx = { .dsi = jadard->dsi };
+
+ jd9365da_switch_page(&dsi_ctx, 0x00);
+ jadard_enable_standard_cmds(&dsi_ctx);
+
+ jd9365da_switch_page(&dsi_ctx, 0x01);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x0c, 0x74);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x17, 0x00);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x18, 0xd7);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x19, 0x01);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x1a, 0x00);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x1b, 0xd7);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x1c, 0x01);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x1f, 0x70);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x20, 0x2d);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x21, 0x2d);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x22, 0x7e);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x24, 0xfd);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x37, 0x19);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x35, 0x28);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x38, 0x05);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x39, 0x08);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x3a, 0x12);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x3c, 0x7e);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x3d, 0xff);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x3e, 0xff);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x3f, 0x7f);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x40, 0x06);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x41, 0xa0);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x43, 0x1e);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x44, 0x0b);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x0c, 0x74);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x55, 0x02);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x56, 0x01);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x57, 0x6a);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x58, 0x09);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x59, 0x0a);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x5a, 0x2e);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x5b, 0x1a);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x5c, 0x15);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x5d, 0x73);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x5e, 0x56);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x5f, 0x43);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x60, 0x38);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x61, 0x36);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x62, 0x28);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x63, 0x2f);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x64, 0x19);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x65, 0x32);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x66, 0x31);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x67, 0x31);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x68, 0x4f);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x69, 0x3e);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x6a, 0x47);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x6b, 0x36);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x6c, 0x31);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x6d, 0x24);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x6e, 0x12);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x6f, 0x02);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x70, 0x73);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x71, 0x56);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x72, 0x43);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x73, 0x38);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x74, 0x36);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x75, 0x28);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x76, 0x2f);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x77, 0x19);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x78, 0x32);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x79, 0x31);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x7a, 0x31);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x7b, 0x4f);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x7c, 0x3e);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x7d, 0x47);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x7e, 0x36);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x7f, 0x31);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x80, 0x24);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x81, 0x12);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x82, 0x02);
+
+ jd9365da_switch_page(&dsi_ctx, 0x02);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x00, 0x52);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x01, 0x55);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x02, 0x55);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x03, 0x50);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x04, 0x77);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x05, 0x57);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x06, 0x55);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x07, 0x4e);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x08, 0x4c);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x09, 0x5f);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x0a, 0x4a);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x0b, 0x48);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x0c, 0x55);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x0d, 0x46);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x0e, 0x44);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x0f, 0x40);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x10, 0x55);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x11, 0x55);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x12, 0x55);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x13, 0x55);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x14, 0x55);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x15, 0x55);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x16, 0x53);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x17, 0x55);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x18, 0x55);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x19, 0x51);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x1a, 0x77);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x1b, 0x57);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x1c, 0x55);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x1d, 0x4f);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x1e, 0x4d);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x1f, 0x5f);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x20, 0x4b);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x21, 0x49);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x22, 0x55);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x23, 0x47);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x24, 0x45);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x25, 0x41);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x26, 0x55);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x27, 0x55);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x28, 0x55);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x29, 0x55);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x2a, 0x55);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x2b, 0x55);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x2c, 0x13);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x2d, 0x15);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x2e, 0x15);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x2f, 0x01);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x30, 0x37);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x31, 0x17);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x32, 0x15);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x33, 0x0d);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x34, 0x0f);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x35, 0x15);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x36, 0x05);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x37, 0x07);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x38, 0x15);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x39, 0x09);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x3a, 0x0b);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x3b, 0x11);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x3c, 0x15);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x3d, 0x15);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x3e, 0x15);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x3f, 0x15);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x40, 0x15);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x41, 0x15);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x42, 0x12);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x43, 0x15);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x44, 0x15);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x45, 0x00);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x46, 0x37);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x47, 0x17);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x48, 0x15);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x49, 0x0c);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x4a, 0x0e);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x4b, 0x15);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x4c, 0x04);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x4d, 0x06);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x4e, 0x15);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x4f, 0x08);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x50, 0x0a);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x51, 0x10);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x52, 0x15);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x53, 0x15);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x54, 0x15);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x55, 0x15);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x56, 0x15);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x57, 0x15);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x58, 0x40);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x5b, 0x10);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x5c, 0x06);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x5d, 0x40);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x5e, 0x00);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x5f, 0x00);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x60, 0x40);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x61, 0x03);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x62, 0x04);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x63, 0x6c);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x64, 0x6c);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x65, 0x75);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x66, 0x08);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x67, 0xb4);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x68, 0x08);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x69, 0x6c);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x6a, 0x6c);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x6b, 0x0c);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x6d, 0x00);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x6e, 0x00);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x6f, 0x88);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x75, 0xbb);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x76, 0x00);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x77, 0x05);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x78, 0x2a);
+
+ jd9365da_switch_page(&dsi_ctx, 0x04);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x02, 0x23);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x09, 0x11);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x0e, 0x48);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x36, 0x49);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x2b, 0x08);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x2e, 0x03);
+
+ jd9365da_switch_page(&dsi_ctx, 0x00);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xe6, 0x02);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xe7, 0x06);
+
+ mipi_dsi_msleep(&dsi_ctx, 120);
+
+ mipi_dsi_dcs_exit_sleep_mode_multi(&dsi_ctx);
+
+ mipi_dsi_msleep(&dsi_ctx, 120);
+
+ mipi_dsi_dcs_set_display_on_multi(&dsi_ctx);
+
+ mipi_dsi_msleep(&dsi_ctx, 20);
+
+ return dsi_ctx.accum_err;
+};
+
+static const struct jadard_panel_desc melfas_lmfbx101117480_desc = {
+ .mode = {
+ .clock = (800 + 24 + 24 + 24) * (1280 + 30 + 4 + 8) * 60 / 1000,
+
+ .hdisplay = 800,
+ .hsync_start = 800 + 24,
+ .hsync_end = 800 + 24 + 24,
+ .htotal = 800 + 24 + 24 + 24,
+
+ .vdisplay = 1280,
+ .vsync_start = 1280 + 30,
+ .vsync_end = 1280 + 30 + 4,
+ .vtotal = 1280 + 30 + 4 + 8,
+
+ .width_mm = 135,
+ .height_mm = 216,
+ .type = DRM_MODE_TYPE_DRIVER | DRM_MODE_TYPE_PREFERRED,
+ },
+ .lanes = 4,
+ .format = MIPI_DSI_FMT_RGB888,
+ .init = melfas_lmfbx101117480_init_cmds,
+ .lp11_before_reset = true,
+ .reset_before_power_off_vcioo = true,
+ .vcioo_to_lp11_delay_ms = 5,
+ .lp11_to_reset_delay_ms = 10,
.backlight_off_to_display_off_delay_ms = 100,
.display_off_to_enter_sleep_delay_ms = 50,
.enter_sleep_to_reset_down_delay_ms = 100,
@@ -927,6 +1191,10 @@ static const struct of_device_id jadard_of_match[] = {
.data = &kingdisplay_kd101ne3_40ti_desc
},
{
+ .compatible = "melfas,lmfbx101117480",
+ .data = &melfas_lmfbx101117480_desc
+ },
+ {
.compatible = "radxa,display-10hd-ad001",
.data = &cz101b4001_desc
},
diff --git a/drivers/gpu/drm/panel/panel-jdi-fhd-r63452.c b/drivers/gpu/drm/panel/panel-jdi-fhd-r63452.c
index 483dc88d16d8..4eb71e85e9e9 100644
--- a/drivers/gpu/drm/panel/panel-jdi-fhd-r63452.c
+++ b/drivers/gpu/drm/panel/panel-jdi-fhd-r63452.c
@@ -41,142 +41,89 @@ static void jdi_fhd_r63452_reset(struct jdi_fhd_r63452 *ctx)
static int jdi_fhd_r63452_on(struct jdi_fhd_r63452 *ctx)
{
struct mipi_dsi_device *dsi = ctx->dsi;
- struct device *dev = &dsi->dev;
- int ret;
+ struct mipi_dsi_multi_context dsi_ctx = { .dsi = dsi };
dsi->mode_flags |= MIPI_DSI_MODE_LPM;
- mipi_dsi_generic_write_seq(dsi, 0xb0, 0x00);
- mipi_dsi_generic_write_seq(dsi, 0xd6, 0x01);
- mipi_dsi_generic_write_seq(dsi, 0xec,
- 0x64, 0xdc, 0xec, 0x3b, 0x52, 0x00, 0x0b, 0x0b,
- 0x13, 0x15, 0x68, 0x0b, 0xb5);
- mipi_dsi_generic_write_seq(dsi, 0xb0, 0x03);
-
- ret = mipi_dsi_dcs_set_tear_on(dsi, MIPI_DSI_DCS_TEAR_MODE_VBLANK);
- if (ret < 0) {
- dev_err(dev, "Failed to set tear on: %d\n", ret);
- return ret;
- }
-
- mipi_dsi_dcs_write_seq(dsi, MIPI_DCS_SET_ADDRESS_MODE, 0x00);
+ mipi_dsi_generic_write_seq_multi(&dsi_ctx, 0xb0, 0x00);
+ mipi_dsi_generic_write_seq_multi(&dsi_ctx, 0xd6, 0x01);
+ mipi_dsi_generic_write_seq_multi(&dsi_ctx, 0xec,
+ 0x64, 0xdc, 0xec, 0x3b, 0x52, 0x00, 0x0b, 0x0b,
+ 0x13, 0x15, 0x68, 0x0b, 0xb5);
+ mipi_dsi_generic_write_seq_multi(&dsi_ctx, 0xb0, 0x03);
- ret = mipi_dsi_dcs_set_pixel_format(dsi, 0x77);
- if (ret < 0) {
- dev_err(dev, "Failed to set pixel format: %d\n", ret);
- return ret;
- }
-
- ret = mipi_dsi_dcs_set_column_address(dsi, 0x0000, 0x0437);
- if (ret < 0) {
- dev_err(dev, "Failed to set column address: %d\n", ret);
- return ret;
- }
+ mipi_dsi_dcs_set_tear_on_multi(&dsi_ctx, MIPI_DSI_DCS_TEAR_MODE_VBLANK);
- ret = mipi_dsi_dcs_set_page_address(dsi, 0x0000, 0x077f);
- if (ret < 0) {
- dev_err(dev, "Failed to set page address: %d\n", ret);
- return ret;
- }
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, MIPI_DCS_SET_ADDRESS_MODE, 0x00);
- ret = mipi_dsi_dcs_set_tear_scanline(dsi, 0x0000);
- if (ret < 0) {
- dev_err(dev, "Failed to set tear scanline: %d\n", ret);
- return ret;
- }
+ mipi_dsi_dcs_set_pixel_format_multi(&dsi_ctx, 0x77);
+ mipi_dsi_dcs_set_column_address_multi(&dsi_ctx, 0x0000, 0x0437);
+ mipi_dsi_dcs_set_page_address_multi(&dsi_ctx, 0x0000, 0x077f);
+ mipi_dsi_dcs_set_tear_scanline_multi(&dsi_ctx, 0x0000);
+ mipi_dsi_dcs_set_display_brightness_multi(&dsi_ctx, 0x00ff);
- ret = mipi_dsi_dcs_set_display_brightness(dsi, 0x00ff);
- if (ret < 0) {
- dev_err(dev, "Failed to set display brightness: %d\n", ret);
- return ret;
- }
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, MIPI_DCS_WRITE_CONTROL_DISPLAY, 0x24);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, MIPI_DCS_WRITE_POWER_SAVE, 0x00);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, MIPI_DCS_SET_CABC_MIN_BRIGHTNESS, 0x00);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x84, 0x00);
- mipi_dsi_dcs_write_seq(dsi, MIPI_DCS_WRITE_CONTROL_DISPLAY, 0x24);
- mipi_dsi_dcs_write_seq(dsi, MIPI_DCS_WRITE_POWER_SAVE, 0x00);
- mipi_dsi_dcs_write_seq(dsi, MIPI_DCS_SET_CABC_MIN_BRIGHTNESS, 0x00);
- mipi_dsi_dcs_write_seq(dsi, 0x84, 0x00);
+ mipi_dsi_dcs_set_display_on_multi(&dsi_ctx);
+ mipi_dsi_msleep(&dsi_ctx, 20);
+ mipi_dsi_dcs_exit_sleep_mode_multi(&dsi_ctx);
+ mipi_dsi_msleep(&dsi_ctx, 80);
- ret = mipi_dsi_dcs_set_display_on(dsi);
- if (ret < 0) {
- dev_err(dev, "Failed to set display on: %d\n", ret);
- return ret;
- }
- msleep(20);
+ mipi_dsi_generic_write_seq_multi(&dsi_ctx, 0xb0, 0x04);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x84, 0x00);
+ mipi_dsi_generic_write_seq_multi(&dsi_ctx, 0xc8, 0x11);
+ mipi_dsi_generic_write_seq_multi(&dsi_ctx, 0xb0, 0x03);
- ret = mipi_dsi_dcs_exit_sleep_mode(dsi);
- if (ret < 0) {
- dev_err(dev, "Failed to exit sleep mode: %d\n", ret);
- return ret;
- }
- msleep(80);
-
- mipi_dsi_generic_write_seq(dsi, 0xb0, 0x04);
- mipi_dsi_dcs_write_seq(dsi, 0x84, 0x00);
- mipi_dsi_generic_write_seq(dsi, 0xc8, 0x11);
- mipi_dsi_generic_write_seq(dsi, 0xb0, 0x03);
-
- return 0;
+ return dsi_ctx.accum_err;
}
-static int jdi_fhd_r63452_off(struct jdi_fhd_r63452 *ctx)
+static void jdi_fhd_r63452_off(struct jdi_fhd_r63452 *ctx)
{
struct mipi_dsi_device *dsi = ctx->dsi;
- struct device *dev = &dsi->dev;
- int ret;
+ struct mipi_dsi_multi_context dsi_ctx = { .dsi = dsi };
dsi->mode_flags &= ~MIPI_DSI_MODE_LPM;
- mipi_dsi_generic_write_seq(dsi, 0xb0, 0x00);
- mipi_dsi_generic_write_seq(dsi, 0xd6, 0x01);
- mipi_dsi_generic_write_seq(dsi, 0xec,
- 0x64, 0xdc, 0xec, 0x3b, 0x52, 0x00, 0x0b, 0x0b,
- 0x13, 0x15, 0x68, 0x0b, 0x95);
- mipi_dsi_generic_write_seq(dsi, 0xb0, 0x03);
-
- ret = mipi_dsi_dcs_set_display_off(dsi);
- if (ret < 0) {
- dev_err(dev, "Failed to set display off: %d\n", ret);
- return ret;
- }
- usleep_range(2000, 3000);
-
- ret = mipi_dsi_dcs_enter_sleep_mode(dsi);
- if (ret < 0) {
- dev_err(dev, "Failed to enter sleep mode: %d\n", ret);
- return ret;
- }
- msleep(120);
-
- return 0;
+ mipi_dsi_generic_write_seq_multi(&dsi_ctx, 0xb0, 0x00);
+ mipi_dsi_generic_write_seq_multi(&dsi_ctx, 0xd6, 0x01);
+ mipi_dsi_generic_write_seq_multi(&dsi_ctx, 0xec,
+ 0x64, 0xdc, 0xec, 0x3b, 0x52, 0x00, 0x0b, 0x0b,
+ 0x13, 0x15, 0x68, 0x0b, 0x95);
+ mipi_dsi_generic_write_seq_multi(&dsi_ctx, 0xb0, 0x03);
+
+ mipi_dsi_dcs_set_display_off_multi(&dsi_ctx);
+ mipi_dsi_usleep_range(&dsi_ctx, 2000, 3000);
+ mipi_dsi_dcs_enter_sleep_mode_multi(&dsi_ctx);
+ mipi_dsi_msleep(&dsi_ctx, 120);
}
static int jdi_fhd_r63452_prepare(struct drm_panel *panel)
{
struct jdi_fhd_r63452 *ctx = to_jdi_fhd_r63452(panel);
- struct device *dev = &ctx->dsi->dev;
int ret;
jdi_fhd_r63452_reset(ctx);
ret = jdi_fhd_r63452_on(ctx);
- if (ret < 0) {
- dev_err(dev, "Failed to initialize panel: %d\n", ret);
+ if (ret < 0)
gpiod_set_value_cansleep(ctx->reset_gpio, 1);
- return ret;
- }
- return 0;
+ return ret;
}
static int jdi_fhd_r63452_unprepare(struct drm_panel *panel)
{
struct jdi_fhd_r63452 *ctx = to_jdi_fhd_r63452(panel);
- struct device *dev = &ctx->dsi->dev;
- int ret;
- ret = jdi_fhd_r63452_off(ctx);
- if (ret < 0)
- dev_err(dev, "Failed to un-initialize panel: %d\n", ret);
+ /*
+ * NOTE: We don't return an error here as while the panel won't have
+ * been cleanly turned off at least we've asserted the reset signal
+ * so it should be safe to power it back on again later
+ */
+ jdi_fhd_r63452_off(ctx);
gpiod_set_value_cansleep(ctx->reset_gpio, 1);
diff --git a/drivers/gpu/drm/panel/panel-mantix-mlaf057we51.c b/drivers/gpu/drm/panel/panel-mantix-mlaf057we51.c
index ea4a6bf6d35b..4db852ffb0f6 100644
--- a/drivers/gpu/drm/panel/panel-mantix-mlaf057we51.c
+++ b/drivers/gpu/drm/panel/panel-mantix-mlaf057we51.c
@@ -23,7 +23,7 @@
/* Manufacturer specific Commands send via DSI */
#define MANTIX_CMD_OTP_STOP_RELOAD_MIPI 0x41
-#define MANTIX_CMD_INT_CANCEL 0x4C
+#define MANTIX_CMD_INT_CANCEL 0x4c
#define MANTIX_CMD_SPI_FINISH 0x90
struct mantix {
@@ -45,82 +45,57 @@ static inline struct mantix *panel_to_mantix(struct drm_panel *panel)
return container_of(panel, struct mantix, panel);
}
-static int mantix_init_sequence(struct mantix *ctx)
+static void mantix_init_sequence(struct mipi_dsi_multi_context *dsi_ctx)
{
- struct mipi_dsi_device *dsi = to_mipi_dsi_device(ctx->dev);
- struct device *dev = ctx->dev;
-
/*
* Init sequence was supplied by the panel vendor.
*/
- mipi_dsi_generic_write_seq(dsi, MANTIX_CMD_OTP_STOP_RELOAD_MIPI, 0x5A);
-
- mipi_dsi_generic_write_seq(dsi, MANTIX_CMD_INT_CANCEL, 0x03);
- mipi_dsi_generic_write_seq(dsi, MANTIX_CMD_OTP_STOP_RELOAD_MIPI, 0x5A, 0x03);
- mipi_dsi_generic_write_seq(dsi, 0x80, 0xA9, 0x00);
+ mipi_dsi_generic_write_seq_multi(dsi_ctx, MANTIX_CMD_OTP_STOP_RELOAD_MIPI, 0x5a);
- mipi_dsi_generic_write_seq(dsi, MANTIX_CMD_OTP_STOP_RELOAD_MIPI, 0x5A, 0x09);
- mipi_dsi_generic_write_seq(dsi, 0x80, 0x64, 0x00, 0x64, 0x00, 0x00);
- msleep(20);
+ mipi_dsi_generic_write_seq_multi(dsi_ctx, MANTIX_CMD_INT_CANCEL, 0x03);
+ mipi_dsi_generic_write_seq_multi(dsi_ctx, MANTIX_CMD_OTP_STOP_RELOAD_MIPI, 0x5a, 0x03);
+ mipi_dsi_generic_write_seq_multi(dsi_ctx, 0x80, 0xa9, 0x00);
- mipi_dsi_generic_write_seq(dsi, MANTIX_CMD_SPI_FINISH, 0xA5);
- mipi_dsi_generic_write_seq(dsi, MANTIX_CMD_OTP_STOP_RELOAD_MIPI, 0x00, 0x2F);
- msleep(20);
+ mipi_dsi_generic_write_seq_multi(dsi_ctx, MANTIX_CMD_OTP_STOP_RELOAD_MIPI, 0x5a, 0x09);
+ mipi_dsi_generic_write_seq_multi(dsi_ctx, 0x80, 0x64, 0x00, 0x64, 0x00, 0x00);
+ mipi_dsi_msleep(dsi_ctx, 20);
- dev_dbg(dev, "Panel init sequence done\n");
- return 0;
+ mipi_dsi_generic_write_seq_multi(dsi_ctx, MANTIX_CMD_SPI_FINISH, 0xa5);
+ mipi_dsi_generic_write_seq_multi(dsi_ctx, MANTIX_CMD_OTP_STOP_RELOAD_MIPI, 0x00, 0x2f);
+ mipi_dsi_msleep(dsi_ctx, 20);
}
static int mantix_enable(struct drm_panel *panel)
{
struct mantix *ctx = panel_to_mantix(panel);
- struct device *dev = ctx->dev;
- struct mipi_dsi_device *dsi = to_mipi_dsi_device(dev);
- int ret;
+ struct mipi_dsi_device *dsi = to_mipi_dsi_device(ctx->dev);
+ struct mipi_dsi_multi_context dsi_ctx = { .dsi = dsi };
- ret = mantix_init_sequence(ctx);
- if (ret < 0) {
- dev_err(ctx->dev, "Panel init sequence failed: %d\n", ret);
- return ret;
- }
+ mantix_init_sequence(&dsi_ctx);
+ if (!dsi_ctx.accum_err)
+ dev_dbg(ctx->dev, "Panel init sequence done\n");
- ret = mipi_dsi_dcs_exit_sleep_mode(dsi);
- if (ret < 0) {
- dev_err(dev, "Failed to exit sleep mode\n");
- return ret;
- }
- msleep(20);
+ mipi_dsi_dcs_exit_sleep_mode_multi(&dsi_ctx);
+ mipi_dsi_msleep(&dsi_ctx, 20);
- ret = mipi_dsi_dcs_set_display_on(dsi);
- if (ret)
- return ret;
- usleep_range(10000, 12000);
+ mipi_dsi_dcs_set_display_on_multi(&dsi_ctx);
+ mipi_dsi_usleep_range(&dsi_ctx, 10000, 12000);
- ret = mipi_dsi_turn_on_peripheral(dsi);
- if (ret < 0) {
- dev_err(dev, "Failed to turn on peripheral\n");
- return ret;
- }
+ mipi_dsi_turn_on_peripheral_multi(&dsi_ctx);
- return 0;
+ return dsi_ctx.accum_err;
}
static int mantix_disable(struct drm_panel *panel)
{
struct mantix *ctx = panel_to_mantix(panel);
struct mipi_dsi_device *dsi = to_mipi_dsi_device(ctx->dev);
- int ret;
-
- ret = mipi_dsi_dcs_set_display_off(dsi);
- if (ret < 0)
- dev_err(ctx->dev, "Failed to turn off the display: %d\n", ret);
+ struct mipi_dsi_multi_context dsi_ctx = { .dsi = dsi };
- ret = mipi_dsi_dcs_enter_sleep_mode(dsi);
- if (ret < 0)
- dev_err(ctx->dev, "Failed to enter sleep mode: %d\n", ret);
+ mipi_dsi_dcs_set_display_off_multi(&dsi_ctx);
+ mipi_dsi_dcs_enter_sleep_mode_multi(&dsi_ctx);
-
- return 0;
+ return dsi_ctx.accum_err;
}
static int mantix_unprepare(struct drm_panel *panel)
diff --git a/drivers/gpu/drm/panel/panel-newvision-nv3051d.c b/drivers/gpu/drm/panel/panel-newvision-nv3051d.c
index 94d89ffd596b..5d115ecd5dd4 100644
--- a/drivers/gpu/drm/panel/panel-newvision-nv3051d.c
+++ b/drivers/gpu/drm/panel/panel-newvision-nv3051d.c
@@ -47,195 +47,196 @@ static inline struct panel_nv3051d *panel_to_panelnv3051d(struct drm_panel *pane
static int panel_nv3051d_init_sequence(struct panel_nv3051d *ctx)
{
struct mipi_dsi_device *dsi = to_mipi_dsi_device(ctx->dev);
+ struct mipi_dsi_multi_context dsi_ctx = {.dsi = dsi};
/*
* Init sequence was supplied by device vendor with no
* documentation.
*/
- mipi_dsi_dcs_write_seq(dsi, 0xFF, 0x30);
- mipi_dsi_dcs_write_seq(dsi, 0xFF, 0x52);
- mipi_dsi_dcs_write_seq(dsi, 0xFF, 0x01);
- mipi_dsi_dcs_write_seq(dsi, 0xE3, 0x00);
- mipi_dsi_dcs_write_seq(dsi, 0x03, 0x40);
- mipi_dsi_dcs_write_seq(dsi, 0x04, 0x00);
- mipi_dsi_dcs_write_seq(dsi, 0x05, 0x03);
- mipi_dsi_dcs_write_seq(dsi, 0x24, 0x12);
- mipi_dsi_dcs_write_seq(dsi, 0x25, 0x1E);
- mipi_dsi_dcs_write_seq(dsi, 0x26, 0x28);
- mipi_dsi_dcs_write_seq(dsi, 0x27, 0x52);
- mipi_dsi_dcs_write_seq(dsi, 0x28, 0x57);
- mipi_dsi_dcs_write_seq(dsi, 0x29, 0x01);
- mipi_dsi_dcs_write_seq(dsi, 0x2A, 0xDF);
- mipi_dsi_dcs_write_seq(dsi, 0x38, 0x9C);
- mipi_dsi_dcs_write_seq(dsi, 0x39, 0xA7);
- mipi_dsi_dcs_write_seq(dsi, 0x3A, 0x53);
- mipi_dsi_dcs_write_seq(dsi, 0x44, 0x00);
- mipi_dsi_dcs_write_seq(dsi, 0x49, 0x3C);
- mipi_dsi_dcs_write_seq(dsi, 0x59, 0xFE);
- mipi_dsi_dcs_write_seq(dsi, 0x5C, 0x00);
- mipi_dsi_dcs_write_seq(dsi, 0x91, 0x77);
- mipi_dsi_dcs_write_seq(dsi, 0x92, 0x77);
- mipi_dsi_dcs_write_seq(dsi, 0xA0, 0x55);
- mipi_dsi_dcs_write_seq(dsi, 0xA1, 0x50);
- mipi_dsi_dcs_write_seq(dsi, 0xA4, 0x9C);
- mipi_dsi_dcs_write_seq(dsi, 0xA7, 0x02);
- mipi_dsi_dcs_write_seq(dsi, 0xA8, 0x01);
- mipi_dsi_dcs_write_seq(dsi, 0xA9, 0x01);
- mipi_dsi_dcs_write_seq(dsi, 0xAA, 0xFC);
- mipi_dsi_dcs_write_seq(dsi, 0xAB, 0x28);
- mipi_dsi_dcs_write_seq(dsi, 0xAC, 0x06);
- mipi_dsi_dcs_write_seq(dsi, 0xAD, 0x06);
- mipi_dsi_dcs_write_seq(dsi, 0xAE, 0x06);
- mipi_dsi_dcs_write_seq(dsi, 0xAF, 0x03);
- mipi_dsi_dcs_write_seq(dsi, 0xB0, 0x08);
- mipi_dsi_dcs_write_seq(dsi, 0xB1, 0x26);
- mipi_dsi_dcs_write_seq(dsi, 0xB2, 0x28);
- mipi_dsi_dcs_write_seq(dsi, 0xB3, 0x28);
- mipi_dsi_dcs_write_seq(dsi, 0xB4, 0x33);
- mipi_dsi_dcs_write_seq(dsi, 0xB5, 0x08);
- mipi_dsi_dcs_write_seq(dsi, 0xB6, 0x26);
- mipi_dsi_dcs_write_seq(dsi, 0xB7, 0x08);
- mipi_dsi_dcs_write_seq(dsi, 0xB8, 0x26);
- mipi_dsi_dcs_write_seq(dsi, 0xFF, 0x30);
- mipi_dsi_dcs_write_seq(dsi, 0xFF, 0x52);
- mipi_dsi_dcs_write_seq(dsi, 0xFF, 0x02);
- mipi_dsi_dcs_write_seq(dsi, 0xB1, 0x0E);
- mipi_dsi_dcs_write_seq(dsi, 0xD1, 0x0E);
- mipi_dsi_dcs_write_seq(dsi, 0xB4, 0x29);
- mipi_dsi_dcs_write_seq(dsi, 0xD4, 0x2B);
- mipi_dsi_dcs_write_seq(dsi, 0xB2, 0x0C);
- mipi_dsi_dcs_write_seq(dsi, 0xD2, 0x0A);
- mipi_dsi_dcs_write_seq(dsi, 0xB3, 0x28);
- mipi_dsi_dcs_write_seq(dsi, 0xD3, 0x28);
- mipi_dsi_dcs_write_seq(dsi, 0xB6, 0x11);
- mipi_dsi_dcs_write_seq(dsi, 0xD6, 0x0D);
- mipi_dsi_dcs_write_seq(dsi, 0xB7, 0x32);
- mipi_dsi_dcs_write_seq(dsi, 0xD7, 0x30);
- mipi_dsi_dcs_write_seq(dsi, 0xC1, 0x04);
- mipi_dsi_dcs_write_seq(dsi, 0xE1, 0x06);
- mipi_dsi_dcs_write_seq(dsi, 0xB8, 0x0A);
- mipi_dsi_dcs_write_seq(dsi, 0xD8, 0x0A);
- mipi_dsi_dcs_write_seq(dsi, 0xB9, 0x01);
- mipi_dsi_dcs_write_seq(dsi, 0xD9, 0x01);
- mipi_dsi_dcs_write_seq(dsi, 0xBD, 0x13);
- mipi_dsi_dcs_write_seq(dsi, 0xDD, 0x13);
- mipi_dsi_dcs_write_seq(dsi, 0xBC, 0x11);
- mipi_dsi_dcs_write_seq(dsi, 0xDC, 0x11);
- mipi_dsi_dcs_write_seq(dsi, 0xBB, 0x0F);
- mipi_dsi_dcs_write_seq(dsi, 0xDB, 0x0F);
- mipi_dsi_dcs_write_seq(dsi, 0xBA, 0x0F);
- mipi_dsi_dcs_write_seq(dsi, 0xDA, 0x0F);
- mipi_dsi_dcs_write_seq(dsi, 0xBE, 0x18);
- mipi_dsi_dcs_write_seq(dsi, 0xDE, 0x18);
- mipi_dsi_dcs_write_seq(dsi, 0xBF, 0x0F);
- mipi_dsi_dcs_write_seq(dsi, 0xDF, 0x0F);
- mipi_dsi_dcs_write_seq(dsi, 0xC0, 0x17);
- mipi_dsi_dcs_write_seq(dsi, 0xE0, 0x17);
- mipi_dsi_dcs_write_seq(dsi, 0xB5, 0x3B);
- mipi_dsi_dcs_write_seq(dsi, 0xD5, 0x3C);
- mipi_dsi_dcs_write_seq(dsi, 0xB0, 0x0B);
- mipi_dsi_dcs_write_seq(dsi, 0xD0, 0x0C);
- mipi_dsi_dcs_write_seq(dsi, 0xFF, 0x30);
- mipi_dsi_dcs_write_seq(dsi, 0xFF, 0x52);
- mipi_dsi_dcs_write_seq(dsi, 0xFF, 0x03);
- mipi_dsi_dcs_write_seq(dsi, 0x00, 0x2A);
- mipi_dsi_dcs_write_seq(dsi, 0x01, 0x2A);
- mipi_dsi_dcs_write_seq(dsi, 0x02, 0x2A);
- mipi_dsi_dcs_write_seq(dsi, 0x03, 0x2A);
- mipi_dsi_dcs_write_seq(dsi, 0x04, 0x61);
- mipi_dsi_dcs_write_seq(dsi, 0x05, 0x80);
- mipi_dsi_dcs_write_seq(dsi, 0x06, 0xC7);
- mipi_dsi_dcs_write_seq(dsi, 0x07, 0x01);
- mipi_dsi_dcs_write_seq(dsi, 0x08, 0x82);
- mipi_dsi_dcs_write_seq(dsi, 0x09, 0x83);
- mipi_dsi_dcs_write_seq(dsi, 0x30, 0x2A);
- mipi_dsi_dcs_write_seq(dsi, 0x31, 0x2A);
- mipi_dsi_dcs_write_seq(dsi, 0x32, 0x2A);
- mipi_dsi_dcs_write_seq(dsi, 0x33, 0x2A);
- mipi_dsi_dcs_write_seq(dsi, 0x34, 0x61);
- mipi_dsi_dcs_write_seq(dsi, 0x35, 0xC5);
- mipi_dsi_dcs_write_seq(dsi, 0x36, 0x80);
- mipi_dsi_dcs_write_seq(dsi, 0x37, 0x23);
- mipi_dsi_dcs_write_seq(dsi, 0x40, 0x82);
- mipi_dsi_dcs_write_seq(dsi, 0x41, 0x83);
- mipi_dsi_dcs_write_seq(dsi, 0x42, 0x80);
- mipi_dsi_dcs_write_seq(dsi, 0x43, 0x81);
- mipi_dsi_dcs_write_seq(dsi, 0x44, 0x11);
- mipi_dsi_dcs_write_seq(dsi, 0x45, 0xF2);
- mipi_dsi_dcs_write_seq(dsi, 0x46, 0xF1);
- mipi_dsi_dcs_write_seq(dsi, 0x47, 0x11);
- mipi_dsi_dcs_write_seq(dsi, 0x48, 0xF4);
- mipi_dsi_dcs_write_seq(dsi, 0x49, 0xF3);
- mipi_dsi_dcs_write_seq(dsi, 0x50, 0x02);
- mipi_dsi_dcs_write_seq(dsi, 0x51, 0x01);
- mipi_dsi_dcs_write_seq(dsi, 0x52, 0x04);
- mipi_dsi_dcs_write_seq(dsi, 0x53, 0x03);
- mipi_dsi_dcs_write_seq(dsi, 0x54, 0x11);
- mipi_dsi_dcs_write_seq(dsi, 0x55, 0xF6);
- mipi_dsi_dcs_write_seq(dsi, 0x56, 0xF5);
- mipi_dsi_dcs_write_seq(dsi, 0x57, 0x11);
- mipi_dsi_dcs_write_seq(dsi, 0x58, 0xF8);
- mipi_dsi_dcs_write_seq(dsi, 0x59, 0xF7);
- mipi_dsi_dcs_write_seq(dsi, 0x7E, 0x02);
- mipi_dsi_dcs_write_seq(dsi, 0x7F, 0x80);
- mipi_dsi_dcs_write_seq(dsi, 0xE0, 0x5A);
- mipi_dsi_dcs_write_seq(dsi, 0xB1, 0x00);
- mipi_dsi_dcs_write_seq(dsi, 0xB4, 0x0E);
- mipi_dsi_dcs_write_seq(dsi, 0xB5, 0x0F);
- mipi_dsi_dcs_write_seq(dsi, 0xB6, 0x04);
- mipi_dsi_dcs_write_seq(dsi, 0xB7, 0x07);
- mipi_dsi_dcs_write_seq(dsi, 0xB8, 0x06);
- mipi_dsi_dcs_write_seq(dsi, 0xB9, 0x05);
- mipi_dsi_dcs_write_seq(dsi, 0xBA, 0x0F);
- mipi_dsi_dcs_write_seq(dsi, 0xC7, 0x00);
- mipi_dsi_dcs_write_seq(dsi, 0xCA, 0x0E);
- mipi_dsi_dcs_write_seq(dsi, 0xCB, 0x0F);
- mipi_dsi_dcs_write_seq(dsi, 0xCC, 0x04);
- mipi_dsi_dcs_write_seq(dsi, 0xCD, 0x07);
- mipi_dsi_dcs_write_seq(dsi, 0xCE, 0x06);
- mipi_dsi_dcs_write_seq(dsi, 0xCF, 0x05);
- mipi_dsi_dcs_write_seq(dsi, 0xD0, 0x0F);
- mipi_dsi_dcs_write_seq(dsi, 0x81, 0x0F);
- mipi_dsi_dcs_write_seq(dsi, 0x84, 0x0E);
- mipi_dsi_dcs_write_seq(dsi, 0x85, 0x0F);
- mipi_dsi_dcs_write_seq(dsi, 0x86, 0x07);
- mipi_dsi_dcs_write_seq(dsi, 0x87, 0x04);
- mipi_dsi_dcs_write_seq(dsi, 0x88, 0x05);
- mipi_dsi_dcs_write_seq(dsi, 0x89, 0x06);
- mipi_dsi_dcs_write_seq(dsi, 0x8A, 0x00);
- mipi_dsi_dcs_write_seq(dsi, 0x97, 0x0F);
- mipi_dsi_dcs_write_seq(dsi, 0x9A, 0x0E);
- mipi_dsi_dcs_write_seq(dsi, 0x9B, 0x0F);
- mipi_dsi_dcs_write_seq(dsi, 0x9C, 0x07);
- mipi_dsi_dcs_write_seq(dsi, 0x9D, 0x04);
- mipi_dsi_dcs_write_seq(dsi, 0x9E, 0x05);
- mipi_dsi_dcs_write_seq(dsi, 0x9F, 0x06);
- mipi_dsi_dcs_write_seq(dsi, 0xA0, 0x00);
- mipi_dsi_dcs_write_seq(dsi, 0xFF, 0x30);
- mipi_dsi_dcs_write_seq(dsi, 0xFF, 0x52);
- mipi_dsi_dcs_write_seq(dsi, 0xFF, 0x02);
- mipi_dsi_dcs_write_seq(dsi, 0x01, 0x01);
- mipi_dsi_dcs_write_seq(dsi, 0x02, 0xDA);
- mipi_dsi_dcs_write_seq(dsi, 0x03, 0xBA);
- mipi_dsi_dcs_write_seq(dsi, 0x04, 0xA8);
- mipi_dsi_dcs_write_seq(dsi, 0x05, 0x9A);
- mipi_dsi_dcs_write_seq(dsi, 0x06, 0x70);
- mipi_dsi_dcs_write_seq(dsi, 0x07, 0xFF);
- mipi_dsi_dcs_write_seq(dsi, 0x08, 0x91);
- mipi_dsi_dcs_write_seq(dsi, 0x09, 0x90);
- mipi_dsi_dcs_write_seq(dsi, 0x0A, 0xFF);
- mipi_dsi_dcs_write_seq(dsi, 0x0B, 0x8F);
- mipi_dsi_dcs_write_seq(dsi, 0x0C, 0x60);
- mipi_dsi_dcs_write_seq(dsi, 0x0D, 0x58);
- mipi_dsi_dcs_write_seq(dsi, 0x0E, 0x48);
- mipi_dsi_dcs_write_seq(dsi, 0x0F, 0x38);
- mipi_dsi_dcs_write_seq(dsi, 0x10, 0x2B);
- mipi_dsi_dcs_write_seq(dsi, 0xFF, 0x30);
- mipi_dsi_dcs_write_seq(dsi, 0xFF, 0x52);
- mipi_dsi_dcs_write_seq(dsi, 0xFF, 0x00);
- mipi_dsi_dcs_write_seq(dsi, 0x36, 0x02);
- mipi_dsi_dcs_write_seq(dsi, 0x3A, 0x70);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xFF, 0x30);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xFF, 0x52);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xFF, 0x01);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xE3, 0x00);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x03, 0x40);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x04, 0x00);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x05, 0x03);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x24, 0x12);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x25, 0x1E);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x26, 0x28);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x27, 0x52);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x28, 0x57);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x29, 0x01);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x2A, 0xDF);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x38, 0x9C);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x39, 0xA7);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x3A, 0x53);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x44, 0x00);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x49, 0x3C);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x59, 0xFE);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x5C, 0x00);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x91, 0x77);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x92, 0x77);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xA0, 0x55);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xA1, 0x50);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xA4, 0x9C);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xA7, 0x02);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xA8, 0x01);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xA9, 0x01);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xAA, 0xFC);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xAB, 0x28);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xAC, 0x06);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xAD, 0x06);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xAE, 0x06);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xAF, 0x03);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xB0, 0x08);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xB1, 0x26);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xB2, 0x28);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xB3, 0x28);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xB4, 0x33);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xB5, 0x08);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xB6, 0x26);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xB7, 0x08);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xB8, 0x26);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xFF, 0x30);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xFF, 0x52);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xFF, 0x02);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xB1, 0x0E);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xD1, 0x0E);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xB4, 0x29);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xD4, 0x2B);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xB2, 0x0C);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xD2, 0x0A);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xB3, 0x28);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xD3, 0x28);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xB6, 0x11);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xD6, 0x0D);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xB7, 0x32);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xD7, 0x30);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xC1, 0x04);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xE1, 0x06);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xB8, 0x0A);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xD8, 0x0A);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xB9, 0x01);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xD9, 0x01);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xBD, 0x13);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xDD, 0x13);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xBC, 0x11);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xDC, 0x11);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xBB, 0x0F);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xDB, 0x0F);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xBA, 0x0F);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xDA, 0x0F);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xBE, 0x18);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xDE, 0x18);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xBF, 0x0F);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xDF, 0x0F);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xC0, 0x17);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xE0, 0x17);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xB5, 0x3B);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xD5, 0x3C);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xB0, 0x0B);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xD0, 0x0C);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xFF, 0x30);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xFF, 0x52);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xFF, 0x03);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x00, 0x2A);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x01, 0x2A);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x02, 0x2A);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x03, 0x2A);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x04, 0x61);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x05, 0x80);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x06, 0xC7);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x07, 0x01);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x08, 0x82);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x09, 0x83);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x30, 0x2A);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x31, 0x2A);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x32, 0x2A);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x33, 0x2A);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x34, 0x61);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x35, 0xC5);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x36, 0x80);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x37, 0x23);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x40, 0x82);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x41, 0x83);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x42, 0x80);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x43, 0x81);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x44, 0x11);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x45, 0xF2);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x46, 0xF1);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x47, 0x11);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x48, 0xF4);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x49, 0xF3);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x50, 0x02);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x51, 0x01);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x52, 0x04);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x53, 0x03);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x54, 0x11);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x55, 0xF6);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x56, 0xF5);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x57, 0x11);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x58, 0xF8);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x59, 0xF7);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x7E, 0x02);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x7F, 0x80);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xE0, 0x5A);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xB1, 0x00);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xB4, 0x0E);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xB5, 0x0F);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xB6, 0x04);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xB7, 0x07);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xB8, 0x06);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xB9, 0x05);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xBA, 0x0F);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xC7, 0x00);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xCA, 0x0E);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xCB, 0x0F);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xCC, 0x04);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xCD, 0x07);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xCE, 0x06);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xCF, 0x05);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xD0, 0x0F);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x81, 0x0F);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x84, 0x0E);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x85, 0x0F);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x86, 0x07);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x87, 0x04);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x88, 0x05);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x89, 0x06);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x8A, 0x00);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x97, 0x0F);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x9A, 0x0E);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x9B, 0x0F);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x9C, 0x07);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x9D, 0x04);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x9E, 0x05);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x9F, 0x06);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xA0, 0x00);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xFF, 0x30);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xFF, 0x52);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xFF, 0x02);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x01, 0x01);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x02, 0xDA);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x03, 0xBA);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x04, 0xA8);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x05, 0x9A);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x06, 0x70);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x07, 0xFF);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x08, 0x91);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x09, 0x90);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x0A, 0xFF);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x0B, 0x8F);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x0C, 0x60);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x0D, 0x58);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x0E, 0x48);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x0F, 0x38);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x10, 0x2B);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xFF, 0x30);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xFF, 0x52);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xFF, 0x00);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x36, 0x02);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x3A, 0x70);
dev_dbg(ctx->dev, "Panel init sequence done\n");
diff --git a/drivers/gpu/drm/panel/panel-novatek-nt35510.c b/drivers/gpu/drm/panel/panel-novatek-nt35510.c
index d3bfdfc9cff6..57686340de49 100644
--- a/drivers/gpu/drm/panel/panel-novatek-nt35510.c
+++ b/drivers/gpu/drm/panel/panel-novatek-nt35510.c
@@ -1166,7 +1166,7 @@ static int nt35510_probe(struct mipi_dsi_device *dsi)
bl->props.brightness = nt->conf->wrdisbv;
else
bl->props.brightness = 255;
- bl->props.power = FB_BLANK_POWERDOWN;
+ bl->props.power = BACKLIGHT_POWER_OFF;
nt->panel.backlight = bl;
}
diff --git a/drivers/gpu/drm/panel/panel-novatek-nt35950.c b/drivers/gpu/drm/panel/panel-novatek-nt35950.c
index 028fdac293f7..b036208f9356 100644
--- a/drivers/gpu/drm/panel/panel-novatek-nt35950.c
+++ b/drivers/gpu/drm/panel/panel-novatek-nt35950.c
@@ -100,106 +100,87 @@ static void nt35950_reset(struct nt35950 *nt)
/*
* nt35950_set_cmd2_page - Select manufacturer control (CMD2) page
+ * @dsi_ctx: context for mipi_dsi functions
* @nt: Main driver structure
* @page: Page number (0-7)
- *
- * Return: Number of transferred bytes or negative number on error
*/
-static int nt35950_set_cmd2_page(struct nt35950 *nt, u8 page)
+static void nt35950_set_cmd2_page(struct mipi_dsi_multi_context *dsi_ctx,
+ struct nt35950 *nt, u8 page)
{
const u8 mauc_cmd2_page[] = { MCS_CMD_MAUCCTR, 0x55, 0xaa, 0x52,
0x08, page };
- int ret;
- ret = mipi_dsi_dcs_write_buffer(nt->dsi[0], mauc_cmd2_page,
+ mipi_dsi_dcs_write_buffer_multi(dsi_ctx, mauc_cmd2_page,
ARRAY_SIZE(mauc_cmd2_page));
- if (ret < 0)
- return ret;
-
- nt->last_page = page;
- return 0;
+ if (!dsi_ctx->accum_err)
+ nt->last_page = page;
}
/*
* nt35950_set_data_compression - Set data compression mode
+ * @dsi_ctx: context for mipi_dsi functions
* @nt: Main driver structure
* @comp_mode: Compression mode
- *
- * Return: Number of transferred bytes or negative number on error
*/
-static int nt35950_set_data_compression(struct nt35950 *nt, u8 comp_mode)
+static void nt35950_set_data_compression(struct mipi_dsi_multi_context *dsi_ctx,
+ struct nt35950 *nt, u8 comp_mode)
{
u8 cmd_data_compression[] = { MCS_PARAM_DATA_COMPRESSION, comp_mode };
u8 cmd_vesa_dsc_on[] = { MCS_PARAM_VESA_DSC_ON, !!comp_mode };
u8 cmd_vesa_dsc_setting[] = { MCS_PARAM_VESA_DSC_SETTING, 0x03 };
u8 last_page = nt->last_page;
- int ret;
/* Set CMD2 Page 0 if we're not there yet */
- if (last_page != 0) {
- ret = nt35950_set_cmd2_page(nt, 0);
- if (ret < 0)
- return ret;
- }
+ if (last_page != 0)
+ nt35950_set_cmd2_page(dsi_ctx, nt, 0);
- ret = mipi_dsi_dcs_write_buffer(nt->dsi[0], cmd_data_compression,
+ mipi_dsi_dcs_write_buffer_multi(dsi_ctx, cmd_data_compression,
ARRAY_SIZE(cmd_data_compression));
- if (ret < 0)
- return ret;
-
- ret = mipi_dsi_dcs_write_buffer(nt->dsi[0], cmd_vesa_dsc_on,
+ mipi_dsi_dcs_write_buffer_multi(dsi_ctx, cmd_vesa_dsc_on,
ARRAY_SIZE(cmd_vesa_dsc_on));
- if (ret < 0)
- return ret;
/* Set the vesa dsc setting on Page 4 */
- ret = nt35950_set_cmd2_page(nt, 4);
- if (ret < 0)
- return ret;
+ nt35950_set_cmd2_page(dsi_ctx, nt, 4);
/* Display Stream Compression setting, always 0x03 */
- ret = mipi_dsi_dcs_write_buffer(nt->dsi[0], cmd_vesa_dsc_setting,
+ mipi_dsi_dcs_write_buffer_multi(dsi_ctx, cmd_vesa_dsc_setting,
ARRAY_SIZE(cmd_vesa_dsc_setting));
- if (ret < 0)
- return ret;
/* Get back to the previously set page */
- return nt35950_set_cmd2_page(nt, last_page);
+ nt35950_set_cmd2_page(dsi_ctx, nt, last_page);
}
/*
* nt35950_set_scaler - Enable/disable resolution upscaling
- * @nt: Main driver structure
+ * @dsi_ctx: context for mipi_dsi functions
* @scale_up: Scale up function control
- *
- * Return: Number of transferred bytes or negative number on error
*/
-static int nt35950_set_scaler(struct nt35950 *nt, u8 scale_up)
+static void nt35950_set_scaler(struct mipi_dsi_multi_context *dsi_ctx,
+ u8 scale_up)
{
u8 cmd_scaler[] = { MCS_PARAM_SCALER_FUNCTION, scale_up };
- return mipi_dsi_dcs_write_buffer(nt->dsi[0], cmd_scaler,
- ARRAY_SIZE(cmd_scaler));
+ mipi_dsi_dcs_write_buffer_multi(dsi_ctx, cmd_scaler,
+ ARRAY_SIZE(cmd_scaler));
}
/*
* nt35950_set_scale_mode - Resolution upscaling mode
- * @nt: Main driver structure
+ * @dsi_ctx: context for mipi_dsi functions
* @mode: Scaler mode (MCS_DATA_COMPRESSION_*)
- *
- * Return: Number of transferred bytes or negative number on error
*/
-static int nt35950_set_scale_mode(struct nt35950 *nt, u8 mode)
+static void nt35950_set_scale_mode(struct mipi_dsi_multi_context *dsi_ctx,
+ u8 mode)
{
u8 cmd_scaler[] = { MCS_PARAM_SCALEUP_MODE, mode };
- return mipi_dsi_dcs_write_buffer(nt->dsi[0], cmd_scaler,
- ARRAY_SIZE(cmd_scaler));
+ mipi_dsi_dcs_write_buffer_multi(dsi_ctx, cmd_scaler,
+ ARRAY_SIZE(cmd_scaler));
}
/*
* nt35950_inject_black_image - Display a completely black image
- * @nt: Main driver structure
+ * @dsi_ctx: context for mipi_dsi functions
*
* After IC setup, the attached panel may show random data
* due to driveric behavior changes (resolution, compression,
@@ -208,43 +189,34 @@ static int nt35950_set_scale_mode(struct nt35950 *nt, u8 mode)
* the display.
* It makes sense to push a black image before sending the sleep-out
* and display-on commands.
- *
- * Return: Number of transferred bytes or negative number on error
*/
-static int nt35950_inject_black_image(struct nt35950 *nt)
+static void nt35950_inject_black_image(struct mipi_dsi_multi_context *dsi_ctx)
{
const u8 cmd0_black_img[] = { 0x6f, 0x01 };
const u8 cmd1_black_img[] = { 0xf3, 0x10 };
u8 cmd_test[] = { 0xff, 0xaa, 0x55, 0xa5, 0x80 };
- int ret;
/* Enable test command */
- ret = mipi_dsi_dcs_write_buffer(nt->dsi[0], cmd_test, ARRAY_SIZE(cmd_test));
- if (ret < 0)
- return ret;
+ mipi_dsi_dcs_write_buffer_multi(dsi_ctx, cmd_test, ARRAY_SIZE(cmd_test));
/* Send a black image */
- ret = mipi_dsi_dcs_write_buffer(nt->dsi[0], cmd0_black_img,
+ mipi_dsi_dcs_write_buffer_multi(dsi_ctx, cmd0_black_img,
ARRAY_SIZE(cmd0_black_img));
- if (ret < 0)
- return ret;
- ret = mipi_dsi_dcs_write_buffer(nt->dsi[0], cmd1_black_img,
+ mipi_dsi_dcs_write_buffer_multi(dsi_ctx, cmd1_black_img,
ARRAY_SIZE(cmd1_black_img));
- if (ret < 0)
- return ret;
/* Disable test command */
cmd_test[ARRAY_SIZE(cmd_test) - 1] = 0x00;
- return mipi_dsi_dcs_write_buffer(nt->dsi[0], cmd_test, ARRAY_SIZE(cmd_test));
+ mipi_dsi_dcs_write_buffer_multi(dsi_ctx, cmd_test, ARRAY_SIZE(cmd_test));
}
/*
* nt35950_set_dispout - Set Display Output register parameters
* @nt: Main driver structure
- *
- * Return: Number of transferred bytes or negative number on error
+ * @dsi_ctx: context for mipi_dsi functions
*/
-static int nt35950_set_dispout(struct nt35950 *nt)
+static void nt35950_set_dispout(struct mipi_dsi_multi_context *dsi_ctx,
+ struct nt35950 *nt)
{
u8 cmd_dispout[] = { MCS_PARAM_DISP_OUTPUT_CTRL, 0x00 };
const struct nt35950_panel_mode *mode_data = nt->desc->mode_data;
@@ -254,8 +226,8 @@ static int nt35950_set_dispout(struct nt35950 *nt)
if (mode_data[nt->cur_mode].enable_sram)
cmd_dispout[1] |= MCS_DISP_OUT_SRAM_EN;
- return mipi_dsi_dcs_write_buffer(nt->dsi[0], cmd_dispout,
- ARRAY_SIZE(cmd_dispout));
+ mipi_dsi_dcs_write_buffer_multi(dsi_ctx, cmd_dispout,
+ ARRAY_SIZE(cmd_dispout));
}
static int nt35950_get_current_mode(struct nt35950 *nt)
@@ -284,78 +256,47 @@ static int nt35950_on(struct nt35950 *nt)
{
const struct nt35950_panel_mode *mode_data = nt->desc->mode_data;
struct mipi_dsi_device *dsi = nt->dsi[0];
- struct device *dev = &dsi->dev;
- int ret;
+ struct mipi_dsi_multi_context dsi_ctx = { .dsi = dsi };
nt->cur_mode = nt35950_get_current_mode(nt);
nt->dsi[0]->mode_flags |= MIPI_DSI_MODE_LPM;
nt->dsi[1]->mode_flags |= MIPI_DSI_MODE_LPM;
- ret = nt35950_set_cmd2_page(nt, 0);
- if (ret < 0)
- return ret;
+ nt35950_set_cmd2_page(&dsi_ctx, nt, 0);
+ nt35950_set_data_compression(&dsi_ctx, nt, mode_data[nt->cur_mode].compression);
+ nt35950_set_scale_mode(&dsi_ctx, mode_data[nt->cur_mode].scaler_mode);
+ nt35950_set_scaler(&dsi_ctx, mode_data[nt->cur_mode].scaler_on);
+ nt35950_set_dispout(&dsi_ctx, nt);
- ret = nt35950_set_data_compression(nt, mode_data[nt->cur_mode].compression);
- if (ret < 0)
- return ret;
-
- ret = nt35950_set_scale_mode(nt, mode_data[nt->cur_mode].scaler_mode);
- if (ret < 0)
- return ret;
-
- ret = nt35950_set_scaler(nt, mode_data[nt->cur_mode].scaler_on);
- if (ret < 0)
- return ret;
-
- ret = nt35950_set_dispout(nt);
- if (ret < 0)
- return ret;
-
- ret = mipi_dsi_dcs_set_tear_on(dsi, MIPI_DSI_DCS_TEAR_MODE_VBLANK);
- if (ret < 0) {
- dev_err(dev, "Failed to set tear on: %d\n", ret);
- return ret;
- }
-
- ret = mipi_dsi_dcs_set_tear_scanline(dsi, 0);
- if (ret < 0) {
- dev_err(dev, "Failed to set tear scanline: %d\n", ret);
- return ret;
- }
+ mipi_dsi_dcs_set_tear_on_multi(&dsi_ctx, MIPI_DSI_DCS_TEAR_MODE_VBLANK);
+ mipi_dsi_dcs_set_tear_scanline_multi(&dsi_ctx, 0);
/* CMD2 Page 1 */
- ret = nt35950_set_cmd2_page(nt, 1);
- if (ret < 0)
- return ret;
+ nt35950_set_cmd2_page(&dsi_ctx, nt, 1);
/* Unknown command */
- mipi_dsi_dcs_write_seq(dsi, 0xd4, 0x88, 0x88);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xd4, 0x88, 0x88);
/* CMD2 Page 7 */
- ret = nt35950_set_cmd2_page(nt, 7);
- if (ret < 0)
- return ret;
+ nt35950_set_cmd2_page(&dsi_ctx, nt, 7);
/* Enable SubPixel Rendering */
- mipi_dsi_dcs_write_seq(dsi, MCS_PARAM_SPR_EN, 0x01);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, MCS_PARAM_SPR_EN, 0x01);
/* SPR Mode: YYG Rainbow-RGB */
- mipi_dsi_dcs_write_seq(dsi, MCS_PARAM_SPR_MODE, MCS_SPR_MODE_YYG_RAINBOW_RGB);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, MCS_PARAM_SPR_MODE,
+ MCS_SPR_MODE_YYG_RAINBOW_RGB);
/* CMD3 */
- ret = nt35950_inject_black_image(nt);
- if (ret < 0)
- return ret;
+ nt35950_inject_black_image(&dsi_ctx);
+ mipi_dsi_dcs_exit_sleep_mode_multi(&dsi_ctx);
+ mipi_dsi_msleep(&dsi_ctx, 120);
- ret = mipi_dsi_dcs_exit_sleep_mode(dsi);
- if (ret < 0)
- return ret;
- msleep(120);
+ mipi_dsi_dcs_set_display_on_multi(&dsi_ctx);
+ mipi_dsi_msleep(&dsi_ctx, 120);
- ret = mipi_dsi_dcs_set_display_on(dsi);
- if (ret < 0)
- return ret;
- msleep(120);
+ if (dsi_ctx.accum_err)
+ return dsi_ctx.accum_err;
nt->dsi[0]->mode_flags &= ~MIPI_DSI_MODE_LPM;
nt->dsi[1]->mode_flags &= ~MIPI_DSI_MODE_LPM;
@@ -363,30 +304,19 @@ static int nt35950_on(struct nt35950 *nt)
return 0;
}
-static int nt35950_off(struct nt35950 *nt)
+static void nt35950_off(struct nt35950 *nt)
{
- struct device *dev = &nt->dsi[0]->dev;
- int ret;
+ struct mipi_dsi_device *dsi = nt->dsi[0];
+ struct mipi_dsi_multi_context dsi_ctx = { .dsi = dsi };
- ret = mipi_dsi_dcs_set_display_off(nt->dsi[0]);
- if (ret < 0) {
- dev_err(dev, "Failed to set display off: %d\n", ret);
- goto set_lpm;
- }
- usleep_range(10000, 11000);
+ mipi_dsi_dcs_set_display_off_multi(&dsi_ctx);
+ mipi_dsi_usleep_range(&dsi_ctx, 10000, 11000);
- ret = mipi_dsi_dcs_enter_sleep_mode(nt->dsi[0]);
- if (ret < 0) {
- dev_err(dev, "Failed to enter sleep mode: %d\n", ret);
- goto set_lpm;
- }
- msleep(150);
+ mipi_dsi_dcs_enter_sleep_mode_multi(&dsi_ctx);
+ mipi_dsi_msleep(&dsi_ctx, 150);
-set_lpm:
nt->dsi[0]->mode_flags |= MIPI_DSI_MODE_LPM;
nt->dsi[1]->mode_flags |= MIPI_DSI_MODE_LPM;
-
- return 0;
}
static int nt35950_sharp_init_vregs(struct nt35950 *nt, struct device *dev)
@@ -427,7 +357,6 @@ static int nt35950_sharp_init_vregs(struct nt35950 *nt, struct device *dev)
static int nt35950_prepare(struct drm_panel *panel)
{
struct nt35950 *nt = to_nt35950(panel);
- struct device *dev = &nt->dsi[0]->dev;
int ret;
ret = regulator_enable(nt->vregs[0].consumer);
@@ -452,10 +381,6 @@ static int nt35950_prepare(struct drm_panel *panel)
nt35950_reset(nt);
ret = nt35950_on(nt);
- if (ret < 0) {
- dev_err(dev, "Failed to initialize panel: %d\n", ret);
- goto end;
- }
end:
if (ret < 0) {
@@ -469,12 +394,8 @@ end:
static int nt35950_unprepare(struct drm_panel *panel)
{
struct nt35950 *nt = to_nt35950(panel);
- struct device *dev = &nt->dsi[0]->dev;
- int ret;
- ret = nt35950_off(nt);
- if (ret < 0)
- dev_err(dev, "Failed to deinitialize panel: %d\n", ret);
+ nt35950_off(nt);
gpiod_set_value_cansleep(nt->reset_gpio, 0);
regulator_bulk_disable(ARRAY_SIZE(nt->vregs), nt->vregs);
diff --git a/drivers/gpu/drm/panel/panel-novatek-nt36672e.c b/drivers/gpu/drm/panel/panel-novatek-nt36672e.c
index e81a70147259..8c9e04207ba9 100644
--- a/drivers/gpu/drm/panel/panel-novatek-nt36672e.c
+++ b/drivers/gpu/drm/panel/panel-novatek-nt36672e.c
@@ -44,6 +44,16 @@ struct nt36672e_panel {
const struct panel_desc *desc;
};
+#define NT36672E_DCS_SWITCH_PAGE 0xff
+
+#define nt36672e_switch_page(ctx, page) \
+ mipi_dsi_dcs_write_seq_multi(ctx, NT36672E_DCS_SWITCH_PAGE, (page))
+
+static void nt36672e_enable_reload_cmds(struct mipi_dsi_multi_context *ctx)
+{
+ mipi_dsi_dcs_write_seq_multi(ctx, 0xfb, 0x01);
+}
+
static inline struct nt36672e_panel *to_nt36672e_panel(struct drm_panel *panel)
{
return container_of(panel, struct nt36672e_panel, panel);
@@ -51,16 +61,16 @@ static inline struct nt36672e_panel *to_nt36672e_panel(struct drm_panel *panel)
static void nt36672e_1080x2408_60hz_init(struct mipi_dsi_multi_context *ctx)
{
- mipi_dsi_dcs_write_seq_multi(ctx, 0xff, 0x10);
- mipi_dsi_dcs_write_seq_multi(ctx, 0xfb, 0x01);
+ nt36672e_switch_page(ctx, 0x10);
+ nt36672e_enable_reload_cmds(ctx);
mipi_dsi_dcs_write_seq_multi(ctx, 0xb0, 0x00);
mipi_dsi_dcs_write_seq_multi(ctx, 0xc0, 0x00);
mipi_dsi_dcs_write_seq_multi(ctx, 0xc1, 0x89, 0x28, 0x00, 0x08, 0x00, 0xaa, 0x02,
0x0e, 0x00, 0x2b, 0x00, 0x07, 0x0d, 0xb7, 0x0c, 0xb7);
-
mipi_dsi_dcs_write_seq_multi(ctx, 0xc2, 0x1b, 0xa0);
- mipi_dsi_dcs_write_seq_multi(ctx, 0xff, 0x20);
- mipi_dsi_dcs_write_seq_multi(ctx, 0xfb, 0x01);
+
+ nt36672e_switch_page(ctx, 0x20);
+ nt36672e_enable_reload_cmds(ctx);
mipi_dsi_dcs_write_seq_multi(ctx, 0x01, 0x66);
mipi_dsi_dcs_write_seq_multi(ctx, 0x06, 0x40);
mipi_dsi_dcs_write_seq_multi(ctx, 0x07, 0x38);
@@ -76,8 +86,9 @@ static void nt36672e_1080x2408_60hz_init(struct mipi_dsi_multi_context *ctx)
mipi_dsi_dcs_write_seq_multi(ctx, 0xf7, 0x54);
mipi_dsi_dcs_write_seq_multi(ctx, 0xf8, 0x64);
mipi_dsi_dcs_write_seq_multi(ctx, 0xf9, 0x54);
- mipi_dsi_dcs_write_seq_multi(ctx, 0xff, 0x24);
- mipi_dsi_dcs_write_seq_multi(ctx, 0xfb, 0x01);
+
+ nt36672e_switch_page(ctx, 0x24);
+ nt36672e_enable_reload_cmds(ctx);
mipi_dsi_dcs_write_seq_multi(ctx, 0x01, 0x0f);
mipi_dsi_dcs_write_seq_multi(ctx, 0x03, 0x0c);
mipi_dsi_dcs_write_seq_multi(ctx, 0x05, 0x1d);
@@ -139,8 +150,9 @@ static void nt36672e_1080x2408_60hz_init(struct mipi_dsi_multi_context *ctx)
mipi_dsi_dcs_write_seq_multi(ctx, 0xc9, 0x00);
mipi_dsi_dcs_write_seq_multi(ctx, 0xd9, 0x80);
mipi_dsi_dcs_write_seq_multi(ctx, 0xe9, 0x02);
- mipi_dsi_dcs_write_seq_multi(ctx, 0xff, 0x25);
- mipi_dsi_dcs_write_seq_multi(ctx, 0xfb, 0x01);
+
+ nt36672e_switch_page(ctx, 0x25);
+ nt36672e_enable_reload_cmds(ctx);
mipi_dsi_dcs_write_seq_multi(ctx, 0x18, 0x22);
mipi_dsi_dcs_write_seq_multi(ctx, 0x19, 0xe4);
mipi_dsi_dcs_write_seq_multi(ctx, 0x21, 0x40);
@@ -164,8 +176,9 @@ static void nt36672e_1080x2408_60hz_init(struct mipi_dsi_multi_context *ctx)
mipi_dsi_dcs_write_seq_multi(ctx, 0xd7, 0x80);
mipi_dsi_dcs_write_seq_multi(ctx, 0xef, 0x20);
mipi_dsi_dcs_write_seq_multi(ctx, 0xf0, 0x84);
- mipi_dsi_dcs_write_seq_multi(ctx, 0xff, 0x26);
- mipi_dsi_dcs_write_seq_multi(ctx, 0xfb, 0x01);
+
+ nt36672e_switch_page(ctx, 0x26);
+ nt36672e_enable_reload_cmds(ctx);
mipi_dsi_dcs_write_seq_multi(ctx, 0x81, 0x0f);
mipi_dsi_dcs_write_seq_multi(ctx, 0x83, 0x01);
mipi_dsi_dcs_write_seq_multi(ctx, 0x84, 0x03);
@@ -185,8 +198,9 @@ static void nt36672e_1080x2408_60hz_init(struct mipi_dsi_multi_context *ctx)
mipi_dsi_dcs_write_seq_multi(ctx, 0x9c, 0x00);
mipi_dsi_dcs_write_seq_multi(ctx, 0x9d, 0x00);
mipi_dsi_dcs_write_seq_multi(ctx, 0x9e, 0x00);
- mipi_dsi_dcs_write_seq_multi(ctx, 0xff, 0x27);
- mipi_dsi_dcs_write_seq_multi(ctx, 0xfb, 0x01);
+
+ nt36672e_switch_page(ctx, 0x27);
+ nt36672e_enable_reload_cmds(ctx);
mipi_dsi_dcs_write_seq_multi(ctx, 0x01, 0x68);
mipi_dsi_dcs_write_seq_multi(ctx, 0x20, 0x81);
mipi_dsi_dcs_write_seq_multi(ctx, 0x21, 0x6a);
@@ -215,8 +229,9 @@ static void nt36672e_1080x2408_60hz_init(struct mipi_dsi_multi_context *ctx)
mipi_dsi_dcs_write_seq_multi(ctx, 0xe6, 0xd3);
mipi_dsi_dcs_write_seq_multi(ctx, 0xeb, 0x03);
mipi_dsi_dcs_write_seq_multi(ctx, 0xec, 0x28);
- mipi_dsi_dcs_write_seq_multi(ctx, 0xff, 0x2a);
- mipi_dsi_dcs_write_seq_multi(ctx, 0xfb, 0x01);
+
+ nt36672e_switch_page(ctx, 0x2a);
+ nt36672e_enable_reload_cmds(ctx);
mipi_dsi_dcs_write_seq_multi(ctx, 0x00, 0x91);
mipi_dsi_dcs_write_seq_multi(ctx, 0x03, 0x20);
mipi_dsi_dcs_write_seq_multi(ctx, 0x07, 0x50);
@@ -260,8 +275,9 @@ static void nt36672e_1080x2408_60hz_init(struct mipi_dsi_multi_context *ctx)
mipi_dsi_dcs_write_seq_multi(ctx, 0x8c, 0x7d);
mipi_dsi_dcs_write_seq_multi(ctx, 0x8d, 0x7d);
mipi_dsi_dcs_write_seq_multi(ctx, 0x8e, 0x7d);
- mipi_dsi_dcs_write_seq_multi(ctx, 0xff, 0x20);
- mipi_dsi_dcs_write_seq_multi(ctx, 0xfb, 0x01);
+
+ nt36672e_switch_page(ctx, 0x20);
+ nt36672e_enable_reload_cmds(ctx);
mipi_dsi_dcs_write_seq_multi(ctx, 0xb0, 0x00, 0x00, 0x00, 0x17, 0x00, 0x49, 0x00,
0x6a, 0x00, 0x89, 0x00, 0x9f, 0x00, 0xb6, 0x00, 0xc8);
mipi_dsi_dcs_write_seq_multi(ctx, 0xb1, 0x00, 0xd9, 0x01, 0x10, 0x01, 0x3a, 0x01,
@@ -286,8 +302,9 @@ static void nt36672e_1080x2408_60hz_init(struct mipi_dsi_multi_context *ctx)
0x01, 0x03, 0x1f, 0x03, 0x4a, 0x03, 0x59, 0x03, 0x6a);
mipi_dsi_dcs_write_seq_multi(ctx, 0xbb, 0x03, 0x7d, 0x03, 0x93, 0x03, 0xab, 0x03,
0xc8, 0x03, 0xec, 0x03, 0xfe, 0x00, 0x00);
- mipi_dsi_dcs_write_seq_multi(ctx, 0xff, 0x21);
- mipi_dsi_dcs_write_seq_multi(ctx, 0xfb, 0x01);
+
+ nt36672e_switch_page(ctx, 0x21);
+ nt36672e_enable_reload_cmds(ctx);
mipi_dsi_dcs_write_seq_multi(ctx, 0xb0, 0x00, 0x00, 0x00, 0x17, 0x00, 0x49, 0x00,
0x6a, 0x00, 0x89, 0x00, 0x9f, 0x00, 0xb6, 0x00, 0xc8);
mipi_dsi_dcs_write_seq_multi(ctx, 0xb1, 0x00, 0xd9, 0x01, 0x10, 0x01, 0x3a, 0x01,
@@ -312,8 +329,9 @@ static void nt36672e_1080x2408_60hz_init(struct mipi_dsi_multi_context *ctx)
0x01, 0x03, 0x1f, 0x03, 0x4a, 0x03, 0x59, 0x03, 0x6a);
mipi_dsi_dcs_write_seq_multi(ctx, 0xbb, 0x03, 0x7d, 0x03, 0x93, 0x03, 0xab, 0x03,
0xc8, 0x03, 0xec, 0x03, 0xfe, 0x00, 0x00);
- mipi_dsi_dcs_write_seq_multi(ctx, 0xff, 0x2c);
- mipi_dsi_dcs_write_seq_multi(ctx, 0xfb, 0x01);
+
+ nt36672e_switch_page(ctx, 0x2c);
+ nt36672e_enable_reload_cmds(ctx);
mipi_dsi_dcs_write_seq_multi(ctx, 0x61, 0x1f);
mipi_dsi_dcs_write_seq_multi(ctx, 0x62, 0x1f);
mipi_dsi_dcs_write_seq_multi(ctx, 0x7e, 0x03);
@@ -327,12 +345,13 @@ static void nt36672e_1080x2408_60hz_init(struct mipi_dsi_multi_context *ctx)
mipi_dsi_dcs_write_seq_multi(ctx, 0x56, 0x0f);
mipi_dsi_dcs_write_seq_multi(ctx, 0x58, 0x0f);
mipi_dsi_dcs_write_seq_multi(ctx, 0x59, 0x0f);
- mipi_dsi_dcs_write_seq_multi(ctx, 0xff, 0xf0);
- mipi_dsi_dcs_write_seq_multi(ctx, 0xfb, 0x01);
+
+ nt36672e_switch_page(ctx, 0xf0);
+ nt36672e_enable_reload_cmds(ctx);
mipi_dsi_dcs_write_seq_multi(ctx, 0x5a, 0x00);
- mipi_dsi_dcs_write_seq_multi(ctx, 0xff, 0x10);
- mipi_dsi_dcs_write_seq_multi(ctx, 0xfb, 0x01);
+ nt36672e_switch_page(ctx, 0x10);
+ nt36672e_enable_reload_cmds(ctx);
mipi_dsi_dcs_write_seq_multi(ctx, 0x51, 0xff);
mipi_dsi_dcs_write_seq_multi(ctx, 0x53, 0x24);
mipi_dsi_dcs_write_seq_multi(ctx, 0x55, 0x01);
diff --git a/drivers/gpu/drm/panel/panel-orisetech-otm8009a.c b/drivers/gpu/drm/panel/panel-orisetech-otm8009a.c
index 93183f30d7d6..a9b5dad70bc1 100644
--- a/drivers/gpu/drm/panel/panel-orisetech-otm8009a.c
+++ b/drivers/gpu/drm/panel/panel-orisetech-otm8009a.c
@@ -389,7 +389,7 @@ static int otm8009a_backlight_update_status(struct backlight_device *bd)
return -ENXIO;
}
- if (bd->props.power <= FB_BLANK_NORMAL) {
+ if (bd->props.power <= BACKLIGHT_POWER_REDUCED) {
/* Power on the backlight with the requested brightness
* Note We can not use mipi_dsi_dcs_set_display_brightness()
* as otm8009a driver support only 8-bit brightness (1 param).
@@ -465,7 +465,7 @@ static int otm8009a_probe(struct mipi_dsi_device *dsi)
ctx->bl_dev->props.max_brightness = OTM8009A_BACKLIGHT_MAX;
ctx->bl_dev->props.brightness = OTM8009A_BACKLIGHT_DEFAULT;
- ctx->bl_dev->props.power = FB_BLANK_POWERDOWN;
+ ctx->bl_dev->props.power = BACKLIGHT_POWER_OFF;
ctx->bl_dev->props.type = BACKLIGHT_RAW;
drm_panel_add(&ctx->panel);
diff --git a/drivers/gpu/drm/panel/panel-samsung-s6e3ha2.c b/drivers/gpu/drm/panel/panel-samsung-s6e3ha2.c
index 639a4fdf57bb..ab8b58545284 100644
--- a/drivers/gpu/drm/panel/panel-samsung-s6e3ha2.c
+++ b/drivers/gpu/drm/panel/panel-samsung-s6e3ha2.c
@@ -458,7 +458,7 @@ static int s6e3ha2_set_brightness(struct backlight_device *bl_dev)
return -EINVAL;
}
- if (bl_dev->props.power > FB_BLANK_NORMAL)
+ if (bl_dev->props.power > BACKLIGHT_POWER_REDUCED)
return -EPERM;
s6e3ha2_call_write_func(ret, s6e3ha2_test_key_on_f0(ctx));
@@ -508,7 +508,7 @@ static int s6e3ha2_disable(struct drm_panel *panel)
s6e3ha2_call_write_func(ret, mipi_dsi_dcs_set_display_off(dsi));
msleep(40);
- ctx->bl_dev->props.power = FB_BLANK_NORMAL;
+ ctx->bl_dev->props.power = BACKLIGHT_POWER_REDUCED;
return 0;
}
@@ -554,7 +554,7 @@ static int s6e3ha2_prepare(struct drm_panel *panel)
if (ret < 0)
goto err;
- ctx->bl_dev->props.power = FB_BLANK_NORMAL;
+ ctx->bl_dev->props.power = BACKLIGHT_POWER_REDUCED;
return 0;
@@ -601,7 +601,7 @@ static int s6e3ha2_enable(struct drm_panel *panel)
s6e3ha2_call_write_func(ret, s6e3ha2_test_key_off_f0(ctx));
s6e3ha2_call_write_func(ret, mipi_dsi_dcs_set_display_on(dsi));
- ctx->bl_dev->props.power = FB_BLANK_UNBLANK;
+ ctx->bl_dev->props.power = BACKLIGHT_POWER_ON;
return 0;
}
@@ -729,7 +729,7 @@ static int s6e3ha2_probe(struct mipi_dsi_device *dsi)
ctx->bl_dev->props.max_brightness = S6E3HA2_MAX_BRIGHTNESS;
ctx->bl_dev->props.brightness = S6E3HA2_DEFAULT_BRIGHTNESS;
- ctx->bl_dev->props.power = FB_BLANK_POWERDOWN;
+ ctx->bl_dev->props.power = BACKLIGHT_POWER_OFF;
drm_panel_init(&ctx->panel, dev, &s6e3ha2_drm_funcs,
DRM_MODE_CONNECTOR_DSI);
diff --git a/drivers/gpu/drm/panel/panel-samsung-s6e63j0x03.c b/drivers/gpu/drm/panel/panel-samsung-s6e63j0x03.c
index 46d6f4a87bf7..ed53787d1dea 100644
--- a/drivers/gpu/drm/panel/panel-samsung-s6e63j0x03.c
+++ b/drivers/gpu/drm/panel/panel-samsung-s6e63j0x03.c
@@ -225,7 +225,7 @@ static int s6e63j0x03_disable(struct drm_panel *panel)
if (ret < 0)
return ret;
- ctx->bl_dev->props.power = FB_BLANK_NORMAL;
+ ctx->bl_dev->props.power = BACKLIGHT_POWER_REDUCED;
ret = mipi_dsi_dcs_enter_sleep_mode(dsi);
if (ret < 0)
@@ -245,7 +245,7 @@ static int s6e63j0x03_unprepare(struct drm_panel *panel)
if (ret < 0)
return ret;
- ctx->bl_dev->props.power = FB_BLANK_POWERDOWN;
+ ctx->bl_dev->props.power = BACKLIGHT_POWER_OFF;
return 0;
}
@@ -332,7 +332,7 @@ static int s6e63j0x03_prepare(struct drm_panel *panel)
if (ret < 0)
goto err;
- ctx->bl_dev->props.power = FB_BLANK_NORMAL;
+ ctx->bl_dev->props.power = BACKLIGHT_POWER_REDUCED;
return 0;
@@ -393,7 +393,7 @@ static int s6e63j0x03_enable(struct drm_panel *panel)
if (ret < 0)
return ret;
- ctx->bl_dev->props.power = FB_BLANK_UNBLANK;
+ ctx->bl_dev->props.power = BACKLIGHT_POWER_ON;
return 0;
}
@@ -473,7 +473,7 @@ static int s6e63j0x03_probe(struct mipi_dsi_device *dsi)
ctx->bl_dev->props.max_brightness = MAX_BRIGHTNESS;
ctx->bl_dev->props.brightness = DEFAULT_BRIGHTNESS;
- ctx->bl_dev->props.power = FB_BLANK_POWERDOWN;
+ ctx->bl_dev->props.power = BACKLIGHT_POWER_OFF;
drm_panel_add(&ctx->panel);
diff --git a/drivers/gpu/drm/panel/panel-simple.c b/drivers/gpu/drm/panel/panel-simple.c
index bf40057c5cf3..86735430462f 100644
--- a/drivers/gpu/drm/panel/panel-simple.c
+++ b/drivers/gpu/drm/panel/panel-simple.c
@@ -726,16 +726,24 @@ static void panel_simple_shutdown(struct device *dev)
* drm_atomic_helper_shutdown() at shutdown time and that should
* cause the panel to be disabled / unprepared if needed. For now,
* however, we'll keep these calls due to the sheer number of
- * different DRM modeset drivers used with panel-simple. The fact that
- * we're calling these and _also_ the drm_atomic_helper_shutdown()
- * will try to disable/unprepare means that we can get a warning about
- * trying to disable/unprepare an already disabled/unprepared panel,
- * but that's something we'll have to live with until we've confirmed
- * that all DRM modeset drivers are properly calling
- * drm_atomic_helper_shutdown().
+ * different DRM modeset drivers used with panel-simple. Once we've
+ * confirmed that all DRM modeset drivers using this panel properly
+ * call drm_atomic_helper_shutdown() we can simply delete the two
+ * calls below.
+ *
+ * TO BE EXPLICIT: THE CALLS BELOW SHOULDN'T BE COPIED TO ANY NEW
+ * PANEL DRIVERS.
+ *
+ * FIXME: If we're still haven't figured out if all DRM modeset
+ * drivers properly call drm_atomic_helper_shutdown() but we _have_
+ * managed to make sure that DRM modeset drivers get their shutdown()
+ * callback before the panel's shutdown() callback (perhaps using
+ * device link), we could add a WARN_ON here to help move forward.
*/
- drm_panel_disable(&panel->base);
- drm_panel_unprepare(&panel->base);
+ if (panel->base.enabled)
+ drm_panel_disable(&panel->base);
+ if (panel->base.prepared)
+ drm_panel_unprepare(&panel->base);
}
static void panel_simple_remove(struct device *dev)
@@ -2519,6 +2527,38 @@ static const struct panel_desc innolux_g070y2_l01 = {
.connector_type = DRM_MODE_CONNECTOR_LVDS,
};
+static const struct display_timing innolux_g070ace_lh3_timing = {
+ .pixelclock = { 25200000, 25400000, 35700000 },
+ .hactive = { 800, 800, 800 },
+ .hfront_porch = { 30, 32, 87 },
+ .hback_porch = { 29, 31, 86 },
+ .hsync_len = { 1, 1, 1 },
+ .vactive = { 480, 480, 480 },
+ .vfront_porch = { 4, 5, 65 },
+ .vback_porch = { 3, 4, 65 },
+ .vsync_len = { 1, 1, 1 },
+ .flags = DISPLAY_FLAGS_DE_HIGH,
+};
+
+static const struct panel_desc innolux_g070ace_lh3 = {
+ .timings = &innolux_g070ace_lh3_timing,
+ .num_timings = 1,
+ .bpc = 8,
+ .size = {
+ .width = 152,
+ .height = 91,
+ },
+ .delay = {
+ .prepare = 10,
+ .enable = 450,
+ .disable = 200,
+ .unprepare = 510,
+ },
+ .bus_format = MEDIA_BUS_FMT_RGB888_1X7X4_SPWG,
+ .bus_flags = DRM_BUS_FLAG_DE_HIGH,
+ .connector_type = DRM_MODE_CONNECTOR_LVDS,
+};
+
static const struct drm_display_mode innolux_g070y2_t02_mode = {
.clock = 33333,
.hdisplay = 800,
@@ -3478,6 +3518,39 @@ static const struct panel_desc olimex_lcd_olinuxino_43ts = {
.bus_format = MEDIA_BUS_FMT_RGB888_1X24,
};
+static const struct display_timing ontat_kd50g21_40nt_a1_timing = {
+ .pixelclock = { 30000000, 30000000, 50000000 },
+ .hactive = { 800, 800, 800 },
+ .hfront_porch = { 1, 40, 255 },
+ .hback_porch = { 1, 40, 87 },
+ .hsync_len = { 1, 48, 87 },
+ .vactive = { 480, 480, 480 },
+ .vfront_porch = { 1, 13, 255 },
+ .vback_porch = { 1, 29, 29 },
+ .vsync_len = { 3, 3, 31 },
+ .flags = DISPLAY_FLAGS_HSYNC_LOW | DISPLAY_FLAGS_VSYNC_LOW |
+ DISPLAY_FLAGS_DE_HIGH | DISPLAY_FLAGS_PIXDATA_POSEDGE,
+};
+
+static const struct panel_desc ontat_kd50g21_40nt_a1 = {
+ .timings = &ontat_kd50g21_40nt_a1_timing,
+ .num_timings = 1,
+ .bpc = 8,
+ .size = {
+ .width = 108,
+ .height = 65,
+ },
+ .delay = {
+ .prepare = 147, /* 5 VSDs */
+ .enable = 147, /* 5 VSDs */
+ .disable = 88, /* 3 VSDs */
+ .unprepare = 117, /* 4 VSDs */
+ },
+ .bus_format = MEDIA_BUS_FMT_RGB888_1X24,
+ .bus_flags = DRM_BUS_FLAG_DE_HIGH | DRM_BUS_FLAG_PIXDATA_SAMPLE_NEGEDGE,
+ .connector_type = DRM_MODE_CONNECTOR_DPI,
+};
+
/*
* 800x480 CVT. The panel appears to be quite accepting, at least as far as
* pixel clocks, but this is the timing that was being used in the Adafruit
@@ -4727,6 +4800,9 @@ static const struct of_device_id platform_of_match[] = {
.compatible = "innolux,g070ace-l01",
.data = &innolux_g070ace_l01,
}, {
+ .compatible = "innolux,g070ace-lh3",
+ .data = &innolux_g070ace_lh3,
+ }, {
.compatible = "innolux,g070y2-l01",
.data = &innolux_g070y2_l01,
}, {
@@ -4838,6 +4914,9 @@ static const struct of_device_id platform_of_match[] = {
.compatible = "olimex,lcd-olinuxino-43-ts",
.data = &olimex_lcd_olinuxino_43ts,
}, {
+ .compatible = "ontat,kd50g21-40nt-a1",
+ .data = &ontat_kd50g21_40nt_a1,
+ }, {
.compatible = "ontat,yx700wv03",
.data = &ontat_yx700wv03,
}, {
diff --git a/drivers/gpu/drm/panel/panel-sitronix-st7701.c b/drivers/gpu/drm/panel/panel-sitronix-st7701.c
index 421eb4592b61..eef03d04e0cd 100644
--- a/drivers/gpu/drm/panel/panel-sitronix-st7701.c
+++ b/drivers/gpu/drm/panel/panel-sitronix-st7701.c
@@ -4,6 +4,7 @@
* Author: Jagan Teki <jagan@amarulasolutions.com>
*/
+#include <drm/drm_mipi_dbi.h>
#include <drm/drm_mipi_dsi.h>
#include <drm/drm_modes.h>
#include <drm/drm_panel.h>
@@ -14,79 +15,80 @@
#include <linux/module.h>
#include <linux/of.h>
#include <linux/regulator/consumer.h>
+#include <linux/spi/spi.h>
#include <video/mipi_display.h>
/* Command2 BKx selection command */
-#define DSI_CMD2BKX_SEL 0xFF
-#define DSI_CMD1 0
-#define DSI_CMD2 BIT(4)
-#define DSI_CMD2BK_MASK GENMASK(3, 0)
+#define ST7701_CMD2BKX_SEL 0xFF
+#define ST7701_CMD1 0
+#define ST7701_CMD2 BIT(4)
+#define ST7701_CMD2BK_MASK GENMASK(3, 0)
/* Command2, BK0 commands */
-#define DSI_CMD2_BK0_PVGAMCTRL 0xB0 /* Positive Voltage Gamma Control */
-#define DSI_CMD2_BK0_NVGAMCTRL 0xB1 /* Negative Voltage Gamma Control */
-#define DSI_CMD2_BK0_LNESET 0xC0 /* Display Line setting */
-#define DSI_CMD2_BK0_PORCTRL 0xC1 /* Porch control */
-#define DSI_CMD2_BK0_INVSEL 0xC2 /* Inversion selection, Frame Rate Control */
+#define ST7701_CMD2_BK0_PVGAMCTRL 0xB0 /* Positive Voltage Gamma Control */
+#define ST7701_CMD2_BK0_NVGAMCTRL 0xB1 /* Negative Voltage Gamma Control */
+#define ST7701_CMD2_BK0_LNESET 0xC0 /* Display Line setting */
+#define ST7701_CMD2_BK0_PORCTRL 0xC1 /* Porch control */
+#define ST7701_CMD2_BK0_INVSEL 0xC2 /* Inversion selection, Frame Rate Control */
/* Command2, BK1 commands */
-#define DSI_CMD2_BK1_VRHS 0xB0 /* Vop amplitude setting */
-#define DSI_CMD2_BK1_VCOM 0xB1 /* VCOM amplitude setting */
-#define DSI_CMD2_BK1_VGHSS 0xB2 /* VGH Voltage setting */
-#define DSI_CMD2_BK1_TESTCMD 0xB3 /* TEST Command Setting */
-#define DSI_CMD2_BK1_VGLS 0xB5 /* VGL Voltage setting */
-#define DSI_CMD2_BK1_PWCTLR1 0xB7 /* Power Control 1 */
-#define DSI_CMD2_BK1_PWCTLR2 0xB8 /* Power Control 2 */
-#define DSI_CMD2_BK1_SPD1 0xC1 /* Source pre_drive timing set1 */
-#define DSI_CMD2_BK1_SPD2 0xC2 /* Source EQ2 Setting */
-#define DSI_CMD2_BK1_MIPISET1 0xD0 /* MIPI Setting 1 */
+#define ST7701_CMD2_BK1_VRHS 0xB0 /* Vop amplitude setting */
+#define ST7701_CMD2_BK1_VCOM 0xB1 /* VCOM amplitude setting */
+#define ST7701_CMD2_BK1_VGHSS 0xB2 /* VGH Voltage setting */
+#define ST7701_CMD2_BK1_TESTCMD 0xB3 /* TEST Command Setting */
+#define ST7701_CMD2_BK1_VGLS 0xB5 /* VGL Voltage setting */
+#define ST7701_CMD2_BK1_PWCTLR1 0xB7 /* Power Control 1 */
+#define ST7701_CMD2_BK1_PWCTLR2 0xB8 /* Power Control 2 */
+#define ST7701_CMD2_BK1_SPD1 0xC1 /* Source pre_drive timing set1 */
+#define ST7701_CMD2_BK1_SPD2 0xC2 /* Source EQ2 Setting */
+#define ST7701_CMD2_BK1_MIPISET1 0xD0 /* MIPI Setting 1 */
/* Command2, BK0 bytes */
-#define DSI_CMD2_BK0_GAMCTRL_AJ_MASK GENMASK(7, 6)
-#define DSI_CMD2_BK0_GAMCTRL_VC0_MASK GENMASK(3, 0)
-#define DSI_CMD2_BK0_GAMCTRL_VC4_MASK GENMASK(5, 0)
-#define DSI_CMD2_BK0_GAMCTRL_VC8_MASK GENMASK(5, 0)
-#define DSI_CMD2_BK0_GAMCTRL_VC16_MASK GENMASK(4, 0)
-#define DSI_CMD2_BK0_GAMCTRL_VC24_MASK GENMASK(4, 0)
-#define DSI_CMD2_BK0_GAMCTRL_VC52_MASK GENMASK(3, 0)
-#define DSI_CMD2_BK0_GAMCTRL_VC80_MASK GENMASK(5, 0)
-#define DSI_CMD2_BK0_GAMCTRL_VC108_MASK GENMASK(3, 0)
-#define DSI_CMD2_BK0_GAMCTRL_VC147_MASK GENMASK(3, 0)
-#define DSI_CMD2_BK0_GAMCTRL_VC175_MASK GENMASK(5, 0)
-#define DSI_CMD2_BK0_GAMCTRL_VC203_MASK GENMASK(3, 0)
-#define DSI_CMD2_BK0_GAMCTRL_VC231_MASK GENMASK(4, 0)
-#define DSI_CMD2_BK0_GAMCTRL_VC239_MASK GENMASK(4, 0)
-#define DSI_CMD2_BK0_GAMCTRL_VC247_MASK GENMASK(5, 0)
-#define DSI_CMD2_BK0_GAMCTRL_VC251_MASK GENMASK(5, 0)
-#define DSI_CMD2_BK0_GAMCTRL_VC255_MASK GENMASK(4, 0)
-#define DSI_CMD2_BK0_LNESET_LINE_MASK GENMASK(6, 0)
-#define DSI_CMD2_BK0_LNESET_LDE_EN BIT(7)
-#define DSI_CMD2_BK0_LNESET_LINEDELTA GENMASK(1, 0)
-#define DSI_CMD2_BK0_PORCTRL_VBP_MASK GENMASK(7, 0)
-#define DSI_CMD2_BK0_PORCTRL_VFP_MASK GENMASK(7, 0)
-#define DSI_CMD2_BK0_INVSEL_ONES_MASK GENMASK(5, 4)
-#define DSI_CMD2_BK0_INVSEL_NLINV_MASK GENMASK(2, 0)
-#define DSI_CMD2_BK0_INVSEL_RTNI_MASK GENMASK(4, 0)
+#define ST7701_CMD2_BK0_GAMCTRL_AJ_MASK GENMASK(7, 6)
+#define ST7701_CMD2_BK0_GAMCTRL_VC0_MASK GENMASK(3, 0)
+#define ST7701_CMD2_BK0_GAMCTRL_VC4_MASK GENMASK(5, 0)
+#define ST7701_CMD2_BK0_GAMCTRL_VC8_MASK GENMASK(5, 0)
+#define ST7701_CMD2_BK0_GAMCTRL_VC16_MASK GENMASK(4, 0)
+#define ST7701_CMD2_BK0_GAMCTRL_VC24_MASK GENMASK(4, 0)
+#define ST7701_CMD2_BK0_GAMCTRL_VC52_MASK GENMASK(3, 0)
+#define ST7701_CMD2_BK0_GAMCTRL_VC80_MASK GENMASK(5, 0)
+#define ST7701_CMD2_BK0_GAMCTRL_VC108_MASK GENMASK(3, 0)
+#define ST7701_CMD2_BK0_GAMCTRL_VC147_MASK GENMASK(3, 0)
+#define ST7701_CMD2_BK0_GAMCTRL_VC175_MASK GENMASK(5, 0)
+#define ST7701_CMD2_BK0_GAMCTRL_VC203_MASK GENMASK(3, 0)
+#define ST7701_CMD2_BK0_GAMCTRL_VC231_MASK GENMASK(4, 0)
+#define ST7701_CMD2_BK0_GAMCTRL_VC239_MASK GENMASK(4, 0)
+#define ST7701_CMD2_BK0_GAMCTRL_VC247_MASK GENMASK(5, 0)
+#define ST7701_CMD2_BK0_GAMCTRL_VC251_MASK GENMASK(5, 0)
+#define ST7701_CMD2_BK0_GAMCTRL_VC255_MASK GENMASK(4, 0)
+#define ST7701_CMD2_BK0_LNESET_LINE_MASK GENMASK(6, 0)
+#define ST7701_CMD2_BK0_LNESET_LDE_EN BIT(7)
+#define ST7701_CMD2_BK0_LNESET_LINEDELTA GENMASK(1, 0)
+#define ST7701_CMD2_BK0_PORCTRL_VBP_MASK GENMASK(7, 0)
+#define ST7701_CMD2_BK0_PORCTRL_VFP_MASK GENMASK(7, 0)
+#define ST7701_CMD2_BK0_INVSEL_ONES_MASK GENMASK(5, 4)
+#define ST7701_CMD2_BK0_INVSEL_NLINV_MASK GENMASK(2, 0)
+#define ST7701_CMD2_BK0_INVSEL_RTNI_MASK GENMASK(4, 0)
/* Command2, BK1 bytes */
-#define DSI_CMD2_BK1_VRHA_MASK GENMASK(7, 0)
-#define DSI_CMD2_BK1_VCOM_MASK GENMASK(7, 0)
-#define DSI_CMD2_BK1_VGHSS_MASK GENMASK(3, 0)
-#define DSI_CMD2_BK1_TESTCMD_VAL BIT(7)
-#define DSI_CMD2_BK1_VGLS_ONES BIT(6)
-#define DSI_CMD2_BK1_VGLS_MASK GENMASK(3, 0)
-#define DSI_CMD2_BK1_PWRCTRL1_AP_MASK GENMASK(7, 6)
-#define DSI_CMD2_BK1_PWRCTRL1_APIS_MASK GENMASK(3, 2)
-#define DSI_CMD2_BK1_PWRCTRL1_APOS_MASK GENMASK(1, 0)
-#define DSI_CMD2_BK1_PWRCTRL2_AVDD_MASK GENMASK(5, 4)
-#define DSI_CMD2_BK1_PWRCTRL2_AVCL_MASK GENMASK(1, 0)
-#define DSI_CMD2_BK1_SPD1_ONES_MASK GENMASK(6, 4)
-#define DSI_CMD2_BK1_SPD1_T2D_MASK GENMASK(3, 0)
-#define DSI_CMD2_BK1_SPD2_ONES_MASK GENMASK(6, 4)
-#define DSI_CMD2_BK1_SPD2_T3D_MASK GENMASK(3, 0)
-#define DSI_CMD2_BK1_MIPISET1_ONES BIT(7)
-#define DSI_CMD2_BK1_MIPISET1_EOT_EN BIT(3)
+#define ST7701_CMD2_BK1_VRHA_MASK GENMASK(7, 0)
+#define ST7701_CMD2_BK1_VCOM_MASK GENMASK(7, 0)
+#define ST7701_CMD2_BK1_VGHSS_MASK GENMASK(3, 0)
+#define ST7701_CMD2_BK1_TESTCMD_VAL BIT(7)
+#define ST7701_CMD2_BK1_VGLS_ONES BIT(6)
+#define ST7701_CMD2_BK1_VGLS_MASK GENMASK(3, 0)
+#define ST7701_CMD2_BK1_PWRCTRL1_AP_MASK GENMASK(7, 6)
+#define ST7701_CMD2_BK1_PWRCTRL1_APIS_MASK GENMASK(3, 2)
+#define ST7701_CMD2_BK1_PWRCTRL1_APOS_MASK GENMASK(1, 0)
+#define ST7701_CMD2_BK1_PWRCTRL2_AVDD_MASK GENMASK(5, 4)
+#define ST7701_CMD2_BK1_PWRCTRL2_AVCL_MASK GENMASK(1, 0)
+#define ST7701_CMD2_BK1_SPD1_ONES_MASK GENMASK(6, 4)
+#define ST7701_CMD2_BK1_SPD1_T2D_MASK GENMASK(3, 0)
+#define ST7701_CMD2_BK1_SPD2_ONES_MASK GENMASK(6, 4)
+#define ST7701_CMD2_BK1_SPD2_T3D_MASK GENMASK(3, 0)
+#define ST7701_CMD2_BK1_MIPISET1_ONES BIT(7)
+#define ST7701_CMD2_BK1_MIPISET1_EOT_EN BIT(3)
#define CFIELD_PREP(_mask, _val) \
(((typeof(_mask))(_val) << (__builtin_ffsll(_mask) - 1)) & (_mask))
@@ -130,12 +132,16 @@ struct st7701_panel_desc {
struct st7701 {
struct drm_panel panel;
struct mipi_dsi_device *dsi;
+ struct mipi_dbi dbi;
const struct st7701_panel_desc *desc;
struct regulator_bulk_data supplies[2];
struct gpio_desc *reset;
unsigned int sleep_delay;
enum drm_panel_orientation orientation;
+
+ int (*write_command)(struct st7701 *st7701, u8 cmd, const u8 *seq,
+ size_t len);
};
static inline struct st7701 *panel_to_st7701(struct drm_panel *panel)
@@ -143,16 +149,22 @@ static inline struct st7701 *panel_to_st7701(struct drm_panel *panel)
return container_of(panel, struct st7701, panel);
}
-static inline int st7701_dsi_write(struct st7701 *st7701, const void *seq,
- size_t len)
+static int st7701_dsi_write(struct st7701 *st7701, u8 cmd, const u8 *seq,
+ size_t len)
+{
+ return mipi_dsi_dcs_write(st7701->dsi, cmd, seq, len);
+}
+
+static int st7701_dbi_write(struct st7701 *st7701, u8 cmd, const u8 *seq,
+ size_t len)
{
- return mipi_dsi_dcs_write_buffer(st7701->dsi, seq, len);
+ return mipi_dbi_command_stackbuf(&st7701->dbi, cmd, seq, len);
}
-#define ST7701_DSI(st7701, seq...) \
- { \
- const u8 d[] = { seq }; \
- st7701_dsi_write(st7701, d, ARRAY_SIZE(d)); \
+#define ST7701_WRITE(st7701, cmd, seq...) \
+ { \
+ const u8 d[] = { seq }; \
+ st7701->write_command(st7701, cmd, d, ARRAY_SIZE(d)); \
}
static u8 st7701_vgls_map(struct st7701 *st7701)
@@ -185,11 +197,11 @@ static void st7701_switch_cmd_bkx(struct st7701 *st7701, bool cmd2, u8 bkx)
u8 val;
if (cmd2)
- val = DSI_CMD2 | FIELD_PREP(DSI_CMD2BK_MASK, bkx);
+ val = ST7701_CMD2 | FIELD_PREP(ST7701_CMD2BK_MASK, bkx);
else
- val = DSI_CMD1;
+ val = ST7701_CMD1;
- ST7701_DSI(st7701, DSI_CMD2BKX_SEL, 0x77, 0x01, 0x00, 0x00, val);
+ ST7701_WRITE(st7701, ST7701_CMD2BKX_SEL, 0x77, 0x01, 0x00, 0x00, val);
}
static void st7701_init_sequence(struct st7701 *st7701)
@@ -199,22 +211,22 @@ static void st7701_init_sequence(struct st7701 *st7701)
const u8 linecount8 = mode->vdisplay / 8;
const u8 linecountrem2 = (mode->vdisplay % 8) / 2;
- ST7701_DSI(st7701, MIPI_DCS_SOFT_RESET, 0x00);
+ ST7701_WRITE(st7701, MIPI_DCS_SOFT_RESET, 0x00);
/* We need to wait 5ms before sending new commands */
msleep(5);
- ST7701_DSI(st7701, MIPI_DCS_EXIT_SLEEP_MODE, 0x00);
+ ST7701_WRITE(st7701, MIPI_DCS_EXIT_SLEEP_MODE, 0x00);
msleep(st7701->sleep_delay);
/* Command2, BK0 */
st7701_switch_cmd_bkx(st7701, true, 0);
- mipi_dsi_dcs_write(st7701->dsi, DSI_CMD2_BK0_PVGAMCTRL,
- desc->pv_gamma, ARRAY_SIZE(desc->pv_gamma));
- mipi_dsi_dcs_write(st7701->dsi, DSI_CMD2_BK0_NVGAMCTRL,
- desc->nv_gamma, ARRAY_SIZE(desc->nv_gamma));
+ st7701->write_command(st7701, ST7701_CMD2_BK0_PVGAMCTRL, desc->pv_gamma,
+ ARRAY_SIZE(desc->pv_gamma));
+ st7701->write_command(st7701, ST7701_CMD2_BK0_NVGAMCTRL, desc->nv_gamma,
+ ARRAY_SIZE(desc->nv_gamma));
/*
* Vertical line count configuration:
* Line[6:0]: select number of vertical lines of the TFT matrix in
@@ -226,14 +238,14 @@ static void st7701_init_sequence(struct st7701 *st7701)
* Total number of vertical lines:
* LN = ((Line[6:0] + 1) * 8) + (LDE_EN ? Line_delta[1:0] * 2 : 0)
*/
- ST7701_DSI(st7701, DSI_CMD2_BK0_LNESET,
- FIELD_PREP(DSI_CMD2_BK0_LNESET_LINE_MASK, linecount8 - 1) |
- (linecountrem2 ? DSI_CMD2_BK0_LNESET_LDE_EN : 0),
- FIELD_PREP(DSI_CMD2_BK0_LNESET_LINEDELTA, linecountrem2));
- ST7701_DSI(st7701, DSI_CMD2_BK0_PORCTRL,
- FIELD_PREP(DSI_CMD2_BK0_PORCTRL_VBP_MASK,
+ ST7701_WRITE(st7701, ST7701_CMD2_BK0_LNESET,
+ FIELD_PREP(ST7701_CMD2_BK0_LNESET_LINE_MASK, linecount8 - 1) |
+ (linecountrem2 ? ST7701_CMD2_BK0_LNESET_LDE_EN : 0),
+ FIELD_PREP(ST7701_CMD2_BK0_LNESET_LINEDELTA, linecountrem2));
+ ST7701_WRITE(st7701, ST7701_CMD2_BK0_PORCTRL,
+ FIELD_PREP(ST7701_CMD2_BK0_PORCTRL_VBP_MASK,
mode->vtotal - mode->vsync_end),
- FIELD_PREP(DSI_CMD2_BK0_PORCTRL_VFP_MASK,
+ FIELD_PREP(ST7701_CMD2_BK0_PORCTRL_VFP_MASK,
mode->vsync_start - mode->vdisplay));
/*
* Horizontal pixel count configuration:
@@ -241,70 +253,70 @@ static void st7701_init_sequence(struct st7701 *st7701)
* The PCLK is number of pixel clock per line, which matches
* mode htotal. The minimum is 512 PCLK.
*/
- ST7701_DSI(st7701, DSI_CMD2_BK0_INVSEL,
- DSI_CMD2_BK0_INVSEL_ONES_MASK |
- FIELD_PREP(DSI_CMD2_BK0_INVSEL_NLINV_MASK, desc->nlinv),
- FIELD_PREP(DSI_CMD2_BK0_INVSEL_RTNI_MASK,
+ ST7701_WRITE(st7701, ST7701_CMD2_BK0_INVSEL,
+ ST7701_CMD2_BK0_INVSEL_ONES_MASK |
+ FIELD_PREP(ST7701_CMD2_BK0_INVSEL_NLINV_MASK, desc->nlinv),
+ FIELD_PREP(ST7701_CMD2_BK0_INVSEL_RTNI_MASK,
(clamp((u32)mode->htotal, 512U, 1008U) - 512) / 16));
/* Command2, BK1 */
st7701_switch_cmd_bkx(st7701, true, 1);
/* Vop = 3.5375V + (VRHA[7:0] * 0.0125V) */
- ST7701_DSI(st7701, DSI_CMD2_BK1_VRHS,
- FIELD_PREP(DSI_CMD2_BK1_VRHA_MASK,
+ ST7701_WRITE(st7701, ST7701_CMD2_BK1_VRHS,
+ FIELD_PREP(ST7701_CMD2_BK1_VRHA_MASK,
DIV_ROUND_CLOSEST(desc->vop_uv - 3537500, 12500)));
/* Vcom = 0.1V + (VCOM[7:0] * 0.0125V) */
- ST7701_DSI(st7701, DSI_CMD2_BK1_VCOM,
- FIELD_PREP(DSI_CMD2_BK1_VCOM_MASK,
+ ST7701_WRITE(st7701, ST7701_CMD2_BK1_VCOM,
+ FIELD_PREP(ST7701_CMD2_BK1_VCOM_MASK,
DIV_ROUND_CLOSEST(desc->vcom_uv - 100000, 12500)));
/* Vgh = 11.5V + (VGHSS[7:0] * 0.5V) */
- ST7701_DSI(st7701, DSI_CMD2_BK1_VGHSS,
- FIELD_PREP(DSI_CMD2_BK1_VGHSS_MASK,
+ ST7701_WRITE(st7701, ST7701_CMD2_BK1_VGHSS,
+ FIELD_PREP(ST7701_CMD2_BK1_VGHSS_MASK,
DIV_ROUND_CLOSEST(clamp(desc->vgh_mv,
(u16)11500,
(u16)17000) - 11500,
500)));
- ST7701_DSI(st7701, DSI_CMD2_BK1_TESTCMD, DSI_CMD2_BK1_TESTCMD_VAL);
+ ST7701_WRITE(st7701, ST7701_CMD2_BK1_TESTCMD, ST7701_CMD2_BK1_TESTCMD_VAL);
/* Vgl is non-linear */
- ST7701_DSI(st7701, DSI_CMD2_BK1_VGLS,
- DSI_CMD2_BK1_VGLS_ONES |
- FIELD_PREP(DSI_CMD2_BK1_VGLS_MASK, st7701_vgls_map(st7701)));
+ ST7701_WRITE(st7701, ST7701_CMD2_BK1_VGLS,
+ ST7701_CMD2_BK1_VGLS_ONES |
+ FIELD_PREP(ST7701_CMD2_BK1_VGLS_MASK, st7701_vgls_map(st7701)));
- ST7701_DSI(st7701, DSI_CMD2_BK1_PWCTLR1,
- FIELD_PREP(DSI_CMD2_BK1_PWRCTRL1_AP_MASK,
+ ST7701_WRITE(st7701, ST7701_CMD2_BK1_PWCTLR1,
+ FIELD_PREP(ST7701_CMD2_BK1_PWRCTRL1_AP_MASK,
desc->gamma_op_bias) |
- FIELD_PREP(DSI_CMD2_BK1_PWRCTRL1_APIS_MASK,
+ FIELD_PREP(ST7701_CMD2_BK1_PWRCTRL1_APIS_MASK,
desc->input_op_bias) |
- FIELD_PREP(DSI_CMD2_BK1_PWRCTRL1_APOS_MASK,
+ FIELD_PREP(ST7701_CMD2_BK1_PWRCTRL1_APOS_MASK,
desc->output_op_bias));
/* Avdd = 6.2V + (AVDD[1:0] * 0.2V) , Avcl = -4.4V - (AVCL[1:0] * 0.2V) */
- ST7701_DSI(st7701, DSI_CMD2_BK1_PWCTLR2,
- FIELD_PREP(DSI_CMD2_BK1_PWRCTRL2_AVDD_MASK,
+ ST7701_WRITE(st7701, ST7701_CMD2_BK1_PWCTLR2,
+ FIELD_PREP(ST7701_CMD2_BK1_PWRCTRL2_AVDD_MASK,
DIV_ROUND_CLOSEST(desc->avdd_mv - 6200, 200)) |
- FIELD_PREP(DSI_CMD2_BK1_PWRCTRL2_AVCL_MASK,
+ FIELD_PREP(ST7701_CMD2_BK1_PWRCTRL2_AVCL_MASK,
DIV_ROUND_CLOSEST(-4400 - desc->avcl_mv, 200)));
/* T2D = 0.2us * T2D[3:0] */
- ST7701_DSI(st7701, DSI_CMD2_BK1_SPD1,
- DSI_CMD2_BK1_SPD1_ONES_MASK |
- FIELD_PREP(DSI_CMD2_BK1_SPD1_T2D_MASK,
+ ST7701_WRITE(st7701, ST7701_CMD2_BK1_SPD1,
+ ST7701_CMD2_BK1_SPD1_ONES_MASK |
+ FIELD_PREP(ST7701_CMD2_BK1_SPD1_T2D_MASK,
DIV_ROUND_CLOSEST(desc->t2d_ns, 200)));
/* T3D = 4us + (0.8us * T3D[3:0]) */
- ST7701_DSI(st7701, DSI_CMD2_BK1_SPD2,
- DSI_CMD2_BK1_SPD2_ONES_MASK |
- FIELD_PREP(DSI_CMD2_BK1_SPD2_T3D_MASK,
+ ST7701_WRITE(st7701, ST7701_CMD2_BK1_SPD2,
+ ST7701_CMD2_BK1_SPD2_ONES_MASK |
+ FIELD_PREP(ST7701_CMD2_BK1_SPD2_T3D_MASK,
DIV_ROUND_CLOSEST(desc->t3d_ns - 4000, 800)));
- ST7701_DSI(st7701, DSI_CMD2_BK1_MIPISET1,
- DSI_CMD2_BK1_MIPISET1_ONES |
- (desc->eot_en ? DSI_CMD2_BK1_MIPISET1_EOT_EN : 0));
+ ST7701_WRITE(st7701, ST7701_CMD2_BK1_MIPISET1,
+ ST7701_CMD2_BK1_MIPISET1_ONES |
+ (desc->eot_en ? ST7701_CMD2_BK1_MIPISET1_EOT_EN : 0));
}
static void ts8550b_gip_sequence(struct st7701 *st7701)
@@ -313,89 +325,89 @@ static void ts8550b_gip_sequence(struct st7701 *st7701)
* ST7701_SPEC_V1.2 is unable to provide enough information above this
* specific command sequence, so grab the same from vendor BSP driver.
*/
- ST7701_DSI(st7701, 0xE0, 0x00, 0x00, 0x02);
- ST7701_DSI(st7701, 0xE1, 0x0B, 0x00, 0x0D, 0x00, 0x0C, 0x00, 0x0E,
+ ST7701_WRITE(st7701, 0xE0, 0x00, 0x00, 0x02);
+ ST7701_WRITE(st7701, 0xE1, 0x0B, 0x00, 0x0D, 0x00, 0x0C, 0x00, 0x0E,
0x00, 0x00, 0x44, 0x44);
- ST7701_DSI(st7701, 0xE2, 0x33, 0x33, 0x44, 0x44, 0x64, 0x00, 0x66,
+ ST7701_WRITE(st7701, 0xE2, 0x33, 0x33, 0x44, 0x44, 0x64, 0x00, 0x66,
0x00, 0x65, 0x00, 0x67, 0x00, 0x00);
- ST7701_DSI(st7701, 0xE3, 0x00, 0x00, 0x33, 0x33);
- ST7701_DSI(st7701, 0xE4, 0x44, 0x44);
- ST7701_DSI(st7701, 0xE5, 0x0C, 0x78, 0x3C, 0xA0, 0x0E, 0x78, 0x3C,
+ ST7701_WRITE(st7701, 0xE3, 0x00, 0x00, 0x33, 0x33);
+ ST7701_WRITE(st7701, 0xE4, 0x44, 0x44);
+ ST7701_WRITE(st7701, 0xE5, 0x0C, 0x78, 0x3C, 0xA0, 0x0E, 0x78, 0x3C,
0xA0, 0x10, 0x78, 0x3C, 0xA0, 0x12, 0x78, 0x3C, 0xA0);
- ST7701_DSI(st7701, 0xE6, 0x00, 0x00, 0x33, 0x33);
- ST7701_DSI(st7701, 0xE7, 0x44, 0x44);
- ST7701_DSI(st7701, 0xE8, 0x0D, 0x78, 0x3C, 0xA0, 0x0F, 0x78, 0x3C,
+ ST7701_WRITE(st7701, 0xE6, 0x00, 0x00, 0x33, 0x33);
+ ST7701_WRITE(st7701, 0xE7, 0x44, 0x44);
+ ST7701_WRITE(st7701, 0xE8, 0x0D, 0x78, 0x3C, 0xA0, 0x0F, 0x78, 0x3C,
0xA0, 0x11, 0x78, 0x3C, 0xA0, 0x13, 0x78, 0x3C, 0xA0);
- ST7701_DSI(st7701, 0xEB, 0x02, 0x02, 0x39, 0x39, 0xEE, 0x44, 0x00);
- ST7701_DSI(st7701, 0xEC, 0x00, 0x00);
- ST7701_DSI(st7701, 0xED, 0xFF, 0xF1, 0x04, 0x56, 0x72, 0x3F, 0xFF,
+ ST7701_WRITE(st7701, 0xEB, 0x02, 0x02, 0x39, 0x39, 0xEE, 0x44, 0x00);
+ ST7701_WRITE(st7701, 0xEC, 0x00, 0x00);
+ ST7701_WRITE(st7701, 0xED, 0xFF, 0xF1, 0x04, 0x56, 0x72, 0x3F, 0xFF,
0xFF, 0xFF, 0xFF, 0xF3, 0x27, 0x65, 0x40, 0x1F, 0xFF);
}
static void dmt028vghmcmi_1a_gip_sequence(struct st7701 *st7701)
{
- ST7701_DSI(st7701, 0xEE, 0x42);
- ST7701_DSI(st7701, 0xE0, 0x00, 0x00, 0x02);
+ ST7701_WRITE(st7701, 0xEE, 0x42);
+ ST7701_WRITE(st7701, 0xE0, 0x00, 0x00, 0x02);
- ST7701_DSI(st7701, 0xE1,
+ ST7701_WRITE(st7701, 0xE1,
0x04, 0xA0, 0x06, 0xA0,
0x05, 0xA0, 0x07, 0xA0,
0x00, 0x44, 0x44);
- ST7701_DSI(st7701, 0xE2,
+ ST7701_WRITE(st7701, 0xE2,
0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00);
- ST7701_DSI(st7701, 0xE3,
+ ST7701_WRITE(st7701, 0xE3,
0x00, 0x00, 0x22, 0x22);
- ST7701_DSI(st7701, 0xE4, 0x44, 0x44);
- ST7701_DSI(st7701, 0xE5,
+ ST7701_WRITE(st7701, 0xE4, 0x44, 0x44);
+ ST7701_WRITE(st7701, 0xE5,
0x0C, 0x90, 0xA0, 0xA0,
0x0E, 0x92, 0xA0, 0xA0,
0x08, 0x8C, 0xA0, 0xA0,
0x0A, 0x8E, 0xA0, 0xA0);
- ST7701_DSI(st7701, 0xE6,
+ ST7701_WRITE(st7701, 0xE6,
0x00, 0x00, 0x22, 0x22);
- ST7701_DSI(st7701, 0xE7, 0x44, 0x44);
- ST7701_DSI(st7701, 0xE8,
+ ST7701_WRITE(st7701, 0xE7, 0x44, 0x44);
+ ST7701_WRITE(st7701, 0xE8,
0x0D, 0x91, 0xA0, 0xA0,
0x0F, 0x93, 0xA0, 0xA0,
0x09, 0x8D, 0xA0, 0xA0,
0x0B, 0x8F, 0xA0, 0xA0);
- ST7701_DSI(st7701, 0xEB,
+ ST7701_WRITE(st7701, 0xEB,
0x00, 0x00, 0xE4, 0xE4,
0x44, 0x00, 0x00);
- ST7701_DSI(st7701, 0xED,
+ ST7701_WRITE(st7701, 0xED,
0xFF, 0xF5, 0x47, 0x6F,
0x0B, 0xA1, 0xAB, 0xFF,
0xFF, 0xBA, 0x1A, 0xB0,
0xF6, 0x74, 0x5F, 0xFF);
- ST7701_DSI(st7701, 0xEF,
+ ST7701_WRITE(st7701, 0xEF,
0x08, 0x08, 0x08, 0x40,
0x3F, 0x64);
st7701_switch_cmd_bkx(st7701, false, 0);
st7701_switch_cmd_bkx(st7701, true, 3);
- ST7701_DSI(st7701, 0xE6, 0x7C);
- ST7701_DSI(st7701, 0xE8, 0x00, 0x0E);
+ ST7701_WRITE(st7701, 0xE6, 0x7C);
+ ST7701_WRITE(st7701, 0xE8, 0x00, 0x0E);
st7701_switch_cmd_bkx(st7701, false, 0);
- ST7701_DSI(st7701, 0x11);
+ ST7701_WRITE(st7701, 0x11);
msleep(120);
st7701_switch_cmd_bkx(st7701, true, 3);
- ST7701_DSI(st7701, 0xE8, 0x00, 0x0C);
+ ST7701_WRITE(st7701, 0xE8, 0x00, 0x0C);
msleep(10);
- ST7701_DSI(st7701, 0xE8, 0x00, 0x00);
+ ST7701_WRITE(st7701, 0xE8, 0x00, 0x00);
st7701_switch_cmd_bkx(st7701, false, 0);
- ST7701_DSI(st7701, 0x11);
+ ST7701_WRITE(st7701, 0x11);
msleep(120);
- ST7701_DSI(st7701, 0xE8, 0x00, 0x00);
+ ST7701_WRITE(st7701, 0xE8, 0x00, 0x00);
st7701_switch_cmd_bkx(st7701, false, 0);
- ST7701_DSI(st7701, 0x3A, 0x70);
+ ST7701_WRITE(st7701, 0x3A, 0x70);
}
static void kd50t048a_gip_sequence(struct st7701 *st7701)
@@ -404,59 +416,108 @@ static void kd50t048a_gip_sequence(struct st7701 *st7701)
* ST7701_SPEC_V1.2 is unable to provide enough information above this
* specific command sequence, so grab the same from vendor BSP driver.
*/
- ST7701_DSI(st7701, 0xE0, 0x00, 0x00, 0x02);
- ST7701_DSI(st7701, 0xE1, 0x08, 0x00, 0x0A, 0x00, 0x07, 0x00, 0x09,
+ ST7701_WRITE(st7701, 0xE0, 0x00, 0x00, 0x02);
+ ST7701_WRITE(st7701, 0xE1, 0x08, 0x00, 0x0A, 0x00, 0x07, 0x00, 0x09,
0x00, 0x00, 0x33, 0x33);
- ST7701_DSI(st7701, 0xE2, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ ST7701_WRITE(st7701, 0xE2, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00);
- ST7701_DSI(st7701, 0xE3, 0x00, 0x00, 0x33, 0x33);
- ST7701_DSI(st7701, 0xE4, 0x44, 0x44);
- ST7701_DSI(st7701, 0xE5, 0x0E, 0x60, 0xA0, 0xA0, 0x10, 0x60, 0xA0,
+ ST7701_WRITE(st7701, 0xE3, 0x00, 0x00, 0x33, 0x33);
+ ST7701_WRITE(st7701, 0xE4, 0x44, 0x44);
+ ST7701_WRITE(st7701, 0xE5, 0x0E, 0x60, 0xA0, 0xA0, 0x10, 0x60, 0xA0,
0xA0, 0x0A, 0x60, 0xA0, 0xA0, 0x0C, 0x60, 0xA0, 0xA0);
- ST7701_DSI(st7701, 0xE6, 0x00, 0x00, 0x33, 0x33);
- ST7701_DSI(st7701, 0xE7, 0x44, 0x44);
- ST7701_DSI(st7701, 0xE8, 0x0D, 0x60, 0xA0, 0xA0, 0x0F, 0x60, 0xA0,
+ ST7701_WRITE(st7701, 0xE6, 0x00, 0x00, 0x33, 0x33);
+ ST7701_WRITE(st7701, 0xE7, 0x44, 0x44);
+ ST7701_WRITE(st7701, 0xE8, 0x0D, 0x60, 0xA0, 0xA0, 0x0F, 0x60, 0xA0,
0xA0, 0x09, 0x60, 0xA0, 0xA0, 0x0B, 0x60, 0xA0, 0xA0);
- ST7701_DSI(st7701, 0xEB, 0x02, 0x01, 0xE4, 0xE4, 0x44, 0x00, 0x40);
- ST7701_DSI(st7701, 0xEC, 0x02, 0x01);
- ST7701_DSI(st7701, 0xED, 0xAB, 0x89, 0x76, 0x54, 0x01, 0xFF, 0xFF,
+ ST7701_WRITE(st7701, 0xEB, 0x02, 0x01, 0xE4, 0xE4, 0x44, 0x00, 0x40);
+ ST7701_WRITE(st7701, 0xEC, 0x02, 0x01);
+ ST7701_WRITE(st7701, 0xED, 0xAB, 0x89, 0x76, 0x54, 0x01, 0xFF, 0xFF,
0xFF, 0xFF, 0xFF, 0xFF, 0x10, 0x45, 0x67, 0x98, 0xBA);
}
static void rg_arc_gip_sequence(struct st7701 *st7701)
{
st7701_switch_cmd_bkx(st7701, true, 3);
- ST7701_DSI(st7701, 0xEF, 0x08);
+ ST7701_WRITE(st7701, 0xEF, 0x08);
st7701_switch_cmd_bkx(st7701, true, 0);
- ST7701_DSI(st7701, 0xC7, 0x04);
- ST7701_DSI(st7701, 0xCC, 0x38);
+ ST7701_WRITE(st7701, 0xC7, 0x04);
+ ST7701_WRITE(st7701, 0xCC, 0x38);
st7701_switch_cmd_bkx(st7701, true, 1);
- ST7701_DSI(st7701, 0xB9, 0x10);
- ST7701_DSI(st7701, 0xBC, 0x03);
- ST7701_DSI(st7701, 0xC0, 0x89);
- ST7701_DSI(st7701, 0xE0, 0x00, 0x00, 0x02);
- ST7701_DSI(st7701, 0xE1, 0x04, 0x00, 0x00, 0x00, 0x05, 0x00, 0x00,
+ ST7701_WRITE(st7701, 0xB9, 0x10);
+ ST7701_WRITE(st7701, 0xBC, 0x03);
+ ST7701_WRITE(st7701, 0xC0, 0x89);
+ ST7701_WRITE(st7701, 0xE0, 0x00, 0x00, 0x02);
+ ST7701_WRITE(st7701, 0xE1, 0x04, 0x00, 0x00, 0x00, 0x05, 0x00, 0x00,
0x00, 0x00, 0x20, 0x20);
- ST7701_DSI(st7701, 0xE2, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ ST7701_WRITE(st7701, 0xE2, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00);
- ST7701_DSI(st7701, 0xE3, 0x00, 0x00, 0x33, 0x00);
- ST7701_DSI(st7701, 0xE4, 0x22, 0x00);
- ST7701_DSI(st7701, 0xE5, 0x04, 0x5C, 0xA0, 0xA0, 0x06, 0x5C, 0xA0,
+ ST7701_WRITE(st7701, 0xE3, 0x00, 0x00, 0x33, 0x00);
+ ST7701_WRITE(st7701, 0xE4, 0x22, 0x00);
+ ST7701_WRITE(st7701, 0xE5, 0x04, 0x5C, 0xA0, 0xA0, 0x06, 0x5C, 0xA0,
0xA0, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00);
- ST7701_DSI(st7701, 0xE6, 0x00, 0x00, 0x33, 0x00);
- ST7701_DSI(st7701, 0xE7, 0x22, 0x00);
- ST7701_DSI(st7701, 0xE8, 0x05, 0x5C, 0xA0, 0xA0, 0x07, 0x5C, 0xA0,
+ ST7701_WRITE(st7701, 0xE6, 0x00, 0x00, 0x33, 0x00);
+ ST7701_WRITE(st7701, 0xE7, 0x22, 0x00);
+ ST7701_WRITE(st7701, 0xE8, 0x05, 0x5C, 0xA0, 0xA0, 0x07, 0x5C, 0xA0,
0xA0, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00);
- ST7701_DSI(st7701, 0xEB, 0x02, 0x00, 0x40, 0x40, 0x00, 0x00, 0x00);
- ST7701_DSI(st7701, 0xEC, 0x00, 0x00);
- ST7701_DSI(st7701, 0xED, 0xFA, 0x45, 0x0B, 0xFF, 0xFF, 0xFF, 0xFF,
+ ST7701_WRITE(st7701, 0xEB, 0x02, 0x00, 0x40, 0x40, 0x00, 0x00, 0x00);
+ ST7701_WRITE(st7701, 0xEC, 0x00, 0x00);
+ ST7701_WRITE(st7701, 0xED, 0xFA, 0x45, 0x0B, 0xFF, 0xFF, 0xFF, 0xFF,
0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xB0, 0x54, 0xAF);
- ST7701_DSI(st7701, 0xEF, 0x08, 0x08, 0x08, 0x45, 0x3F, 0x54);
+ ST7701_WRITE(st7701, 0xEF, 0x08, 0x08, 0x08, 0x45, 0x3F, 0x54);
+ st7701_switch_cmd_bkx(st7701, false, 0);
+ ST7701_WRITE(st7701, MIPI_DCS_SET_ADDRESS_MODE, 0x17);
+ ST7701_WRITE(st7701, MIPI_DCS_SET_PIXEL_FORMAT, 0x77);
+ ST7701_WRITE(st7701, MIPI_DCS_EXIT_SLEEP_MODE, 0x00);
+ msleep(120);
+}
+
+static void rg28xx_gip_sequence(struct st7701 *st7701)
+{
+ st7701_switch_cmd_bkx(st7701, true, 3);
+ ST7701_WRITE(st7701, 0xEF, 0x08);
+
+ st7701_switch_cmd_bkx(st7701, true, 0);
+ ST7701_WRITE(st7701, 0xC3, 0x02, 0x10, 0x02);
+ ST7701_WRITE(st7701, 0xC7, 0x04);
+ ST7701_WRITE(st7701, 0xCC, 0x10);
+
+ st7701_switch_cmd_bkx(st7701, true, 1);
+ ST7701_WRITE(st7701, 0xEE, 0x42);
+ ST7701_WRITE(st7701, 0xE0, 0x00, 0x00, 0x02);
+
+ ST7701_WRITE(st7701, 0xE1, 0x04, 0xA0, 0x06, 0xA0, 0x05, 0xA0, 0x07, 0xA0,
+ 0x00, 0x44, 0x44);
+ ST7701_WRITE(st7701, 0xE2, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00);
+ ST7701_WRITE(st7701, 0xE3, 0x00, 0x00, 0x22, 0x22);
+ ST7701_WRITE(st7701, 0xE4, 0x44, 0x44);
+ ST7701_WRITE(st7701, 0xE5, 0x0C, 0x90, 0xA0, 0xA0, 0x0E, 0x92, 0xA0, 0xA0,
+ 0x08, 0x8C, 0xA0, 0xA0, 0x0A, 0x8E, 0xA0, 0xA0);
+ ST7701_WRITE(st7701, 0xE6, 0x00, 0x00, 0x22, 0x22);
+ ST7701_WRITE(st7701, 0xE7, 0x44, 0x44);
+ ST7701_WRITE(st7701, 0xE8, 0x0D, 0x91, 0xA0, 0xA0, 0x0F, 0x93, 0xA0, 0xA0,
+ 0x09, 0x8D, 0xA0, 0xA0, 0x0B, 0x8F, 0xA0, 0xA0);
+ ST7701_WRITE(st7701, 0xEB, 0x00, 0x00, 0xE4, 0xE4, 0x44, 0x00, 0x40);
+ ST7701_WRITE(st7701, 0xED, 0xFF, 0xF5, 0x47, 0x6F, 0x0B, 0xA1, 0xBA, 0xFF,
+ 0xFF, 0xAB, 0x1A, 0xB0, 0xF6, 0x74, 0x5F, 0xFF);
+ ST7701_WRITE(st7701, 0xEF, 0x08, 0x08, 0x08, 0x45, 0x3F, 0x54);
+
st7701_switch_cmd_bkx(st7701, false, 0);
- ST7701_DSI(st7701, MIPI_DCS_SET_ADDRESS_MODE, 0x17);
- ST7701_DSI(st7701, MIPI_DCS_SET_PIXEL_FORMAT, 0x77);
- ST7701_DSI(st7701, MIPI_DCS_EXIT_SLEEP_MODE, 0x00);
+
+ st7701_switch_cmd_bkx(st7701, true, 3);
+ ST7701_WRITE(st7701, 0xE6, 0x16);
+ ST7701_WRITE(st7701, 0xE8, 0x00, 0x0E);
+
+ st7701_switch_cmd_bkx(st7701, false, 0);
+ ST7701_WRITE(st7701, MIPI_DCS_SET_ADDRESS_MODE, 0x10);
+ ST7701_WRITE(st7701, MIPI_DCS_EXIT_SLEEP_MODE);
msleep(120);
+
+ st7701_switch_cmd_bkx(st7701, true, 3);
+ ST7701_WRITE(st7701, 0xE8, 0x00, 0x0C);
+ msleep(10);
+ ST7701_WRITE(st7701, 0xE8, 0x00, 0x00);
+ st7701_switch_cmd_bkx(st7701, false, 0);
}
static int st7701_prepare(struct drm_panel *panel)
@@ -490,7 +551,7 @@ static int st7701_enable(struct drm_panel *panel)
{
struct st7701 *st7701 = panel_to_st7701(panel);
- ST7701_DSI(st7701, MIPI_DCS_SET_DISPLAY_ON, 0x00);
+ ST7701_WRITE(st7701, MIPI_DCS_SET_DISPLAY_ON, 0x00);
return 0;
}
@@ -499,7 +560,7 @@ static int st7701_disable(struct drm_panel *panel)
{
struct st7701 *st7701 = panel_to_st7701(panel);
- ST7701_DSI(st7701, MIPI_DCS_SET_DISPLAY_OFF, 0x00);
+ ST7701_WRITE(st7701, MIPI_DCS_SET_DISPLAY_OFF, 0x00);
return 0;
}
@@ -508,7 +569,7 @@ static int st7701_unprepare(struct drm_panel *panel)
{
struct st7701 *st7701 = panel_to_st7701(panel);
- ST7701_DSI(st7701, MIPI_DCS_ENTER_SLEEP_MODE, 0x00);
+ ST7701_WRITE(st7701, MIPI_DCS_ENTER_SLEEP_MODE, 0x00);
msleep(st7701->sleep_delay);
@@ -539,7 +600,7 @@ static int st7701_get_modes(struct drm_panel *panel,
mode = drm_mode_duplicate(connector->dev, desc_mode);
if (!mode) {
- dev_err(&st7701->dsi->dev, "failed to add mode %ux%u@%u\n",
+ dev_err(panel->dev, "failed to add mode %ux%u@%u\n",
desc_mode->hdisplay, desc_mode->vdisplay,
drm_mode_vrefresh(desc_mode));
return -ENOMEM;
@@ -602,62 +663,62 @@ static const struct st7701_panel_desc ts8550b_desc = {
.panel_sleep_delay = 80, /* panel need extra 80ms for sleep out cmd */
.pv_gamma = {
- CFIELD_PREP(DSI_CMD2_BK0_GAMCTRL_AJ_MASK, 0) |
- CFIELD_PREP(DSI_CMD2_BK0_GAMCTRL_VC0_MASK, 0),
- CFIELD_PREP(DSI_CMD2_BK0_GAMCTRL_AJ_MASK, 0) |
- CFIELD_PREP(DSI_CMD2_BK0_GAMCTRL_VC4_MASK, 0xe),
- CFIELD_PREP(DSI_CMD2_BK0_GAMCTRL_AJ_MASK, 0) |
- CFIELD_PREP(DSI_CMD2_BK0_GAMCTRL_VC8_MASK, 0x15),
- CFIELD_PREP(DSI_CMD2_BK0_GAMCTRL_VC16_MASK, 0xf),
-
- CFIELD_PREP(DSI_CMD2_BK0_GAMCTRL_AJ_MASK, 0) |
- CFIELD_PREP(DSI_CMD2_BK0_GAMCTRL_VC24_MASK, 0x11),
- CFIELD_PREP(DSI_CMD2_BK0_GAMCTRL_VC52_MASK, 0x8),
- CFIELD_PREP(DSI_CMD2_BK0_GAMCTRL_VC80_MASK, 0x8),
- CFIELD_PREP(DSI_CMD2_BK0_GAMCTRL_VC108_MASK, 0x8),
-
- CFIELD_PREP(DSI_CMD2_BK0_GAMCTRL_VC147_MASK, 0x8),
- CFIELD_PREP(DSI_CMD2_BK0_GAMCTRL_VC175_MASK, 0x23),
- CFIELD_PREP(DSI_CMD2_BK0_GAMCTRL_VC203_MASK, 0x4),
- CFIELD_PREP(DSI_CMD2_BK0_GAMCTRL_AJ_MASK, 0) |
- CFIELD_PREP(DSI_CMD2_BK0_GAMCTRL_VC231_MASK, 0x13),
-
- CFIELD_PREP(DSI_CMD2_BK0_GAMCTRL_VC239_MASK, 0x12),
- CFIELD_PREP(DSI_CMD2_BK0_GAMCTRL_AJ_MASK, 0) |
- CFIELD_PREP(DSI_CMD2_BK0_GAMCTRL_VC247_MASK, 0x2b),
- CFIELD_PREP(DSI_CMD2_BK0_GAMCTRL_AJ_MASK, 0) |
- CFIELD_PREP(DSI_CMD2_BK0_GAMCTRL_VC251_MASK, 0x34),
- CFIELD_PREP(DSI_CMD2_BK0_GAMCTRL_AJ_MASK, 0) |
- CFIELD_PREP(DSI_CMD2_BK0_GAMCTRL_VC255_MASK, 0x1f)
+ CFIELD_PREP(ST7701_CMD2_BK0_GAMCTRL_AJ_MASK, 0) |
+ CFIELD_PREP(ST7701_CMD2_BK0_GAMCTRL_VC0_MASK, 0),
+ CFIELD_PREP(ST7701_CMD2_BK0_GAMCTRL_AJ_MASK, 0) |
+ CFIELD_PREP(ST7701_CMD2_BK0_GAMCTRL_VC4_MASK, 0xe),
+ CFIELD_PREP(ST7701_CMD2_BK0_GAMCTRL_AJ_MASK, 0) |
+ CFIELD_PREP(ST7701_CMD2_BK0_GAMCTRL_VC8_MASK, 0x15),
+ CFIELD_PREP(ST7701_CMD2_BK0_GAMCTRL_VC16_MASK, 0xf),
+
+ CFIELD_PREP(ST7701_CMD2_BK0_GAMCTRL_AJ_MASK, 0) |
+ CFIELD_PREP(ST7701_CMD2_BK0_GAMCTRL_VC24_MASK, 0x11),
+ CFIELD_PREP(ST7701_CMD2_BK0_GAMCTRL_VC52_MASK, 0x8),
+ CFIELD_PREP(ST7701_CMD2_BK0_GAMCTRL_VC80_MASK, 0x8),
+ CFIELD_PREP(ST7701_CMD2_BK0_GAMCTRL_VC108_MASK, 0x8),
+
+ CFIELD_PREP(ST7701_CMD2_BK0_GAMCTRL_VC147_MASK, 0x8),
+ CFIELD_PREP(ST7701_CMD2_BK0_GAMCTRL_VC175_MASK, 0x23),
+ CFIELD_PREP(ST7701_CMD2_BK0_GAMCTRL_VC203_MASK, 0x4),
+ CFIELD_PREP(ST7701_CMD2_BK0_GAMCTRL_AJ_MASK, 0) |
+ CFIELD_PREP(ST7701_CMD2_BK0_GAMCTRL_VC231_MASK, 0x13),
+
+ CFIELD_PREP(ST7701_CMD2_BK0_GAMCTRL_VC239_MASK, 0x12),
+ CFIELD_PREP(ST7701_CMD2_BK0_GAMCTRL_AJ_MASK, 0) |
+ CFIELD_PREP(ST7701_CMD2_BK0_GAMCTRL_VC247_MASK, 0x2b),
+ CFIELD_PREP(ST7701_CMD2_BK0_GAMCTRL_AJ_MASK, 0) |
+ CFIELD_PREP(ST7701_CMD2_BK0_GAMCTRL_VC251_MASK, 0x34),
+ CFIELD_PREP(ST7701_CMD2_BK0_GAMCTRL_AJ_MASK, 0) |
+ CFIELD_PREP(ST7701_CMD2_BK0_GAMCTRL_VC255_MASK, 0x1f)
},
.nv_gamma = {
- CFIELD_PREP(DSI_CMD2_BK0_GAMCTRL_AJ_MASK, 0) |
- CFIELD_PREP(DSI_CMD2_BK0_GAMCTRL_VC0_MASK, 0),
- CFIELD_PREP(DSI_CMD2_BK0_GAMCTRL_AJ_MASK, 0) |
- CFIELD_PREP(DSI_CMD2_BK0_GAMCTRL_VC4_MASK, 0xe),
- CFIELD_PREP(DSI_CMD2_BK0_GAMCTRL_AJ_MASK, 0x2) |
- CFIELD_PREP(DSI_CMD2_BK0_GAMCTRL_VC8_MASK, 0x15),
- CFIELD_PREP(DSI_CMD2_BK0_GAMCTRL_VC16_MASK, 0xf),
-
- CFIELD_PREP(DSI_CMD2_BK0_GAMCTRL_AJ_MASK, 0) |
- CFIELD_PREP(DSI_CMD2_BK0_GAMCTRL_VC24_MASK, 0x13),
- CFIELD_PREP(DSI_CMD2_BK0_GAMCTRL_VC52_MASK, 0x7),
- CFIELD_PREP(DSI_CMD2_BK0_GAMCTRL_VC80_MASK, 0x9),
- CFIELD_PREP(DSI_CMD2_BK0_GAMCTRL_VC108_MASK, 0x8),
-
- CFIELD_PREP(DSI_CMD2_BK0_GAMCTRL_VC147_MASK, 0x8),
- CFIELD_PREP(DSI_CMD2_BK0_GAMCTRL_VC175_MASK, 0x22),
- CFIELD_PREP(DSI_CMD2_BK0_GAMCTRL_VC203_MASK, 0x4),
- CFIELD_PREP(DSI_CMD2_BK0_GAMCTRL_AJ_MASK, 0) |
- CFIELD_PREP(DSI_CMD2_BK0_GAMCTRL_VC231_MASK, 0x10),
-
- CFIELD_PREP(DSI_CMD2_BK0_GAMCTRL_VC239_MASK, 0xe),
- CFIELD_PREP(DSI_CMD2_BK0_GAMCTRL_AJ_MASK, 0) |
- CFIELD_PREP(DSI_CMD2_BK0_GAMCTRL_VC247_MASK, 0x2c),
- CFIELD_PREP(DSI_CMD2_BK0_GAMCTRL_AJ_MASK, 0) |
- CFIELD_PREP(DSI_CMD2_BK0_GAMCTRL_VC251_MASK, 0x34),
- CFIELD_PREP(DSI_CMD2_BK0_GAMCTRL_AJ_MASK, 0) |
- CFIELD_PREP(DSI_CMD2_BK0_GAMCTRL_VC255_MASK, 0x1f)
+ CFIELD_PREP(ST7701_CMD2_BK0_GAMCTRL_AJ_MASK, 0) |
+ CFIELD_PREP(ST7701_CMD2_BK0_GAMCTRL_VC0_MASK, 0),
+ CFIELD_PREP(ST7701_CMD2_BK0_GAMCTRL_AJ_MASK, 0) |
+ CFIELD_PREP(ST7701_CMD2_BK0_GAMCTRL_VC4_MASK, 0xe),
+ CFIELD_PREP(ST7701_CMD2_BK0_GAMCTRL_AJ_MASK, 0x2) |
+ CFIELD_PREP(ST7701_CMD2_BK0_GAMCTRL_VC8_MASK, 0x15),
+ CFIELD_PREP(ST7701_CMD2_BK0_GAMCTRL_VC16_MASK, 0xf),
+
+ CFIELD_PREP(ST7701_CMD2_BK0_GAMCTRL_AJ_MASK, 0) |
+ CFIELD_PREP(ST7701_CMD2_BK0_GAMCTRL_VC24_MASK, 0x13),
+ CFIELD_PREP(ST7701_CMD2_BK0_GAMCTRL_VC52_MASK, 0x7),
+ CFIELD_PREP(ST7701_CMD2_BK0_GAMCTRL_VC80_MASK, 0x9),
+ CFIELD_PREP(ST7701_CMD2_BK0_GAMCTRL_VC108_MASK, 0x8),
+
+ CFIELD_PREP(ST7701_CMD2_BK0_GAMCTRL_VC147_MASK, 0x8),
+ CFIELD_PREP(ST7701_CMD2_BK0_GAMCTRL_VC175_MASK, 0x22),
+ CFIELD_PREP(ST7701_CMD2_BK0_GAMCTRL_VC203_MASK, 0x4),
+ CFIELD_PREP(ST7701_CMD2_BK0_GAMCTRL_AJ_MASK, 0) |
+ CFIELD_PREP(ST7701_CMD2_BK0_GAMCTRL_VC231_MASK, 0x10),
+
+ CFIELD_PREP(ST7701_CMD2_BK0_GAMCTRL_VC239_MASK, 0xe),
+ CFIELD_PREP(ST7701_CMD2_BK0_GAMCTRL_AJ_MASK, 0) |
+ CFIELD_PREP(ST7701_CMD2_BK0_GAMCTRL_VC247_MASK, 0x2c),
+ CFIELD_PREP(ST7701_CMD2_BK0_GAMCTRL_AJ_MASK, 0) |
+ CFIELD_PREP(ST7701_CMD2_BK0_GAMCTRL_VC251_MASK, 0x34),
+ CFIELD_PREP(ST7701_CMD2_BK0_GAMCTRL_AJ_MASK, 0) |
+ CFIELD_PREP(ST7701_CMD2_BK0_GAMCTRL_VC255_MASK, 0x1f)
},
.nlinv = 7,
.vop_uv = 4400000,
@@ -703,62 +764,62 @@ static const struct st7701_panel_desc dmt028vghmcmi_1a_desc = {
.panel_sleep_delay = 5, /* panel need extra 5ms for sleep out cmd */
.pv_gamma = {
- CFIELD_PREP(DSI_CMD2_BK0_GAMCTRL_AJ_MASK, 0) |
- CFIELD_PREP(DSI_CMD2_BK0_GAMCTRL_VC0_MASK, 0),
- CFIELD_PREP(DSI_CMD2_BK0_GAMCTRL_AJ_MASK, 0) |
- CFIELD_PREP(DSI_CMD2_BK0_GAMCTRL_VC4_MASK, 0x10),
- CFIELD_PREP(DSI_CMD2_BK0_GAMCTRL_AJ_MASK, 0) |
- CFIELD_PREP(DSI_CMD2_BK0_GAMCTRL_VC8_MASK, 0x17),
- CFIELD_PREP(DSI_CMD2_BK0_GAMCTRL_VC16_MASK, 0xd),
-
- CFIELD_PREP(DSI_CMD2_BK0_GAMCTRL_AJ_MASK, 0) |
- CFIELD_PREP(DSI_CMD2_BK0_GAMCTRL_VC24_MASK, 0x11),
- CFIELD_PREP(DSI_CMD2_BK0_GAMCTRL_VC52_MASK, 0x6),
- CFIELD_PREP(DSI_CMD2_BK0_GAMCTRL_VC80_MASK, 0x5),
- CFIELD_PREP(DSI_CMD2_BK0_GAMCTRL_VC108_MASK, 0x8),
-
- CFIELD_PREP(DSI_CMD2_BK0_GAMCTRL_VC147_MASK, 0x7),
- CFIELD_PREP(DSI_CMD2_BK0_GAMCTRL_VC175_MASK, 0x1f),
- CFIELD_PREP(DSI_CMD2_BK0_GAMCTRL_VC203_MASK, 0x4),
- CFIELD_PREP(DSI_CMD2_BK0_GAMCTRL_AJ_MASK, 0) |
- CFIELD_PREP(DSI_CMD2_BK0_GAMCTRL_VC231_MASK, 0x11),
-
- CFIELD_PREP(DSI_CMD2_BK0_GAMCTRL_VC239_MASK, 0xe),
- CFIELD_PREP(DSI_CMD2_BK0_GAMCTRL_AJ_MASK, 0) |
- CFIELD_PREP(DSI_CMD2_BK0_GAMCTRL_VC247_MASK, 0x29),
- CFIELD_PREP(DSI_CMD2_BK0_GAMCTRL_AJ_MASK, 0) |
- CFIELD_PREP(DSI_CMD2_BK0_GAMCTRL_VC251_MASK, 0x30),
- CFIELD_PREP(DSI_CMD2_BK0_GAMCTRL_AJ_MASK, 0) |
- CFIELD_PREP(DSI_CMD2_BK0_GAMCTRL_VC255_MASK, 0x1f)
+ CFIELD_PREP(ST7701_CMD2_BK0_GAMCTRL_AJ_MASK, 0) |
+ CFIELD_PREP(ST7701_CMD2_BK0_GAMCTRL_VC0_MASK, 0),
+ CFIELD_PREP(ST7701_CMD2_BK0_GAMCTRL_AJ_MASK, 0) |
+ CFIELD_PREP(ST7701_CMD2_BK0_GAMCTRL_VC4_MASK, 0x10),
+ CFIELD_PREP(ST7701_CMD2_BK0_GAMCTRL_AJ_MASK, 0) |
+ CFIELD_PREP(ST7701_CMD2_BK0_GAMCTRL_VC8_MASK, 0x17),
+ CFIELD_PREP(ST7701_CMD2_BK0_GAMCTRL_VC16_MASK, 0xd),
+
+ CFIELD_PREP(ST7701_CMD2_BK0_GAMCTRL_AJ_MASK, 0) |
+ CFIELD_PREP(ST7701_CMD2_BK0_GAMCTRL_VC24_MASK, 0x11),
+ CFIELD_PREP(ST7701_CMD2_BK0_GAMCTRL_VC52_MASK, 0x6),
+ CFIELD_PREP(ST7701_CMD2_BK0_GAMCTRL_VC80_MASK, 0x5),
+ CFIELD_PREP(ST7701_CMD2_BK0_GAMCTRL_VC108_MASK, 0x8),
+
+ CFIELD_PREP(ST7701_CMD2_BK0_GAMCTRL_VC147_MASK, 0x7),
+ CFIELD_PREP(ST7701_CMD2_BK0_GAMCTRL_VC175_MASK, 0x1f),
+ CFIELD_PREP(ST7701_CMD2_BK0_GAMCTRL_VC203_MASK, 0x4),
+ CFIELD_PREP(ST7701_CMD2_BK0_GAMCTRL_AJ_MASK, 0) |
+ CFIELD_PREP(ST7701_CMD2_BK0_GAMCTRL_VC231_MASK, 0x11),
+
+ CFIELD_PREP(ST7701_CMD2_BK0_GAMCTRL_VC239_MASK, 0xe),
+ CFIELD_PREP(ST7701_CMD2_BK0_GAMCTRL_AJ_MASK, 0) |
+ CFIELD_PREP(ST7701_CMD2_BK0_GAMCTRL_VC247_MASK, 0x29),
+ CFIELD_PREP(ST7701_CMD2_BK0_GAMCTRL_AJ_MASK, 0) |
+ CFIELD_PREP(ST7701_CMD2_BK0_GAMCTRL_VC251_MASK, 0x30),
+ CFIELD_PREP(ST7701_CMD2_BK0_GAMCTRL_AJ_MASK, 0) |
+ CFIELD_PREP(ST7701_CMD2_BK0_GAMCTRL_VC255_MASK, 0x1f)
},
.nv_gamma = {
- CFIELD_PREP(DSI_CMD2_BK0_GAMCTRL_AJ_MASK, 0) |
- CFIELD_PREP(DSI_CMD2_BK0_GAMCTRL_VC0_MASK, 0),
- CFIELD_PREP(DSI_CMD2_BK0_GAMCTRL_AJ_MASK, 0) |
- CFIELD_PREP(DSI_CMD2_BK0_GAMCTRL_VC4_MASK, 0xd),
- CFIELD_PREP(DSI_CMD2_BK0_GAMCTRL_AJ_MASK, 0) |
- CFIELD_PREP(DSI_CMD2_BK0_GAMCTRL_VC8_MASK, 0x14),
- CFIELD_PREP(DSI_CMD2_BK0_GAMCTRL_VC16_MASK, 0xe),
-
- CFIELD_PREP(DSI_CMD2_BK0_GAMCTRL_AJ_MASK, 0) |
- CFIELD_PREP(DSI_CMD2_BK0_GAMCTRL_VC24_MASK, 0x11),
- CFIELD_PREP(DSI_CMD2_BK0_GAMCTRL_VC52_MASK, 0x6),
- CFIELD_PREP(DSI_CMD2_BK0_GAMCTRL_VC80_MASK, 0x4),
- CFIELD_PREP(DSI_CMD2_BK0_GAMCTRL_VC108_MASK, 0x8),
-
- CFIELD_PREP(DSI_CMD2_BK0_GAMCTRL_VC147_MASK, 0x8),
- CFIELD_PREP(DSI_CMD2_BK0_GAMCTRL_VC175_MASK, 0x20),
- CFIELD_PREP(DSI_CMD2_BK0_GAMCTRL_VC203_MASK, 0x5),
- CFIELD_PREP(DSI_CMD2_BK0_GAMCTRL_AJ_MASK, 0) |
- CFIELD_PREP(DSI_CMD2_BK0_GAMCTRL_VC231_MASK, 0x13),
-
- CFIELD_PREP(DSI_CMD2_BK0_GAMCTRL_VC239_MASK, 0x13),
- CFIELD_PREP(DSI_CMD2_BK0_GAMCTRL_AJ_MASK, 0) |
- CFIELD_PREP(DSI_CMD2_BK0_GAMCTRL_VC247_MASK, 0x26),
- CFIELD_PREP(DSI_CMD2_BK0_GAMCTRL_AJ_MASK, 0) |
- CFIELD_PREP(DSI_CMD2_BK0_GAMCTRL_VC251_MASK, 0x30),
- CFIELD_PREP(DSI_CMD2_BK0_GAMCTRL_AJ_MASK, 0) |
- CFIELD_PREP(DSI_CMD2_BK0_GAMCTRL_VC255_MASK, 0x1f)
+ CFIELD_PREP(ST7701_CMD2_BK0_GAMCTRL_AJ_MASK, 0) |
+ CFIELD_PREP(ST7701_CMD2_BK0_GAMCTRL_VC0_MASK, 0),
+ CFIELD_PREP(ST7701_CMD2_BK0_GAMCTRL_AJ_MASK, 0) |
+ CFIELD_PREP(ST7701_CMD2_BK0_GAMCTRL_VC4_MASK, 0xd),
+ CFIELD_PREP(ST7701_CMD2_BK0_GAMCTRL_AJ_MASK, 0) |
+ CFIELD_PREP(ST7701_CMD2_BK0_GAMCTRL_VC8_MASK, 0x14),
+ CFIELD_PREP(ST7701_CMD2_BK0_GAMCTRL_VC16_MASK, 0xe),
+
+ CFIELD_PREP(ST7701_CMD2_BK0_GAMCTRL_AJ_MASK, 0) |
+ CFIELD_PREP(ST7701_CMD2_BK0_GAMCTRL_VC24_MASK, 0x11),
+ CFIELD_PREP(ST7701_CMD2_BK0_GAMCTRL_VC52_MASK, 0x6),
+ CFIELD_PREP(ST7701_CMD2_BK0_GAMCTRL_VC80_MASK, 0x4),
+ CFIELD_PREP(ST7701_CMD2_BK0_GAMCTRL_VC108_MASK, 0x8),
+
+ CFIELD_PREP(ST7701_CMD2_BK0_GAMCTRL_VC147_MASK, 0x8),
+ CFIELD_PREP(ST7701_CMD2_BK0_GAMCTRL_VC175_MASK, 0x20),
+ CFIELD_PREP(ST7701_CMD2_BK0_GAMCTRL_VC203_MASK, 0x5),
+ CFIELD_PREP(ST7701_CMD2_BK0_GAMCTRL_AJ_MASK, 0) |
+ CFIELD_PREP(ST7701_CMD2_BK0_GAMCTRL_VC231_MASK, 0x13),
+
+ CFIELD_PREP(ST7701_CMD2_BK0_GAMCTRL_VC239_MASK, 0x13),
+ CFIELD_PREP(ST7701_CMD2_BK0_GAMCTRL_AJ_MASK, 0) |
+ CFIELD_PREP(ST7701_CMD2_BK0_GAMCTRL_VC247_MASK, 0x26),
+ CFIELD_PREP(ST7701_CMD2_BK0_GAMCTRL_AJ_MASK, 0) |
+ CFIELD_PREP(ST7701_CMD2_BK0_GAMCTRL_VC251_MASK, 0x30),
+ CFIELD_PREP(ST7701_CMD2_BK0_GAMCTRL_AJ_MASK, 0) |
+ CFIELD_PREP(ST7701_CMD2_BK0_GAMCTRL_VC255_MASK, 0x1f)
},
.nlinv = 1,
.vop_uv = 4800000,
@@ -802,62 +863,62 @@ static const struct st7701_panel_desc kd50t048a_desc = {
.panel_sleep_delay = 0,
.pv_gamma = {
- CFIELD_PREP(DSI_CMD2_BK0_GAMCTRL_AJ_MASK, 0) |
- CFIELD_PREP(DSI_CMD2_BK0_GAMCTRL_VC0_MASK, 0),
- CFIELD_PREP(DSI_CMD2_BK0_GAMCTRL_AJ_MASK, 0) |
- CFIELD_PREP(DSI_CMD2_BK0_GAMCTRL_VC4_MASK, 0xd),
- CFIELD_PREP(DSI_CMD2_BK0_GAMCTRL_AJ_MASK, 0) |
- CFIELD_PREP(DSI_CMD2_BK0_GAMCTRL_VC8_MASK, 0x14),
- CFIELD_PREP(DSI_CMD2_BK0_GAMCTRL_VC16_MASK, 0xd),
-
- CFIELD_PREP(DSI_CMD2_BK0_GAMCTRL_AJ_MASK, 0) |
- CFIELD_PREP(DSI_CMD2_BK0_GAMCTRL_VC24_MASK, 0x10),
- CFIELD_PREP(DSI_CMD2_BK0_GAMCTRL_VC52_MASK, 0x5),
- CFIELD_PREP(DSI_CMD2_BK0_GAMCTRL_VC80_MASK, 0x2),
- CFIELD_PREP(DSI_CMD2_BK0_GAMCTRL_VC108_MASK, 0x8),
-
- CFIELD_PREP(DSI_CMD2_BK0_GAMCTRL_VC147_MASK, 0x8),
- CFIELD_PREP(DSI_CMD2_BK0_GAMCTRL_VC175_MASK, 0x1e),
- CFIELD_PREP(DSI_CMD2_BK0_GAMCTRL_VC203_MASK, 0x5),
- CFIELD_PREP(DSI_CMD2_BK0_GAMCTRL_AJ_MASK, 0) |
- CFIELD_PREP(DSI_CMD2_BK0_GAMCTRL_VC231_MASK, 0x13),
-
- CFIELD_PREP(DSI_CMD2_BK0_GAMCTRL_VC239_MASK, 0x11),
- CFIELD_PREP(DSI_CMD2_BK0_GAMCTRL_AJ_MASK, 2) |
- CFIELD_PREP(DSI_CMD2_BK0_GAMCTRL_VC247_MASK, 0x23),
- CFIELD_PREP(DSI_CMD2_BK0_GAMCTRL_AJ_MASK, 0) |
- CFIELD_PREP(DSI_CMD2_BK0_GAMCTRL_VC251_MASK, 0x29),
- CFIELD_PREP(DSI_CMD2_BK0_GAMCTRL_AJ_MASK, 0) |
- CFIELD_PREP(DSI_CMD2_BK0_GAMCTRL_VC255_MASK, 0x18)
+ CFIELD_PREP(ST7701_CMD2_BK0_GAMCTRL_AJ_MASK, 0) |
+ CFIELD_PREP(ST7701_CMD2_BK0_GAMCTRL_VC0_MASK, 0),
+ CFIELD_PREP(ST7701_CMD2_BK0_GAMCTRL_AJ_MASK, 0) |
+ CFIELD_PREP(ST7701_CMD2_BK0_GAMCTRL_VC4_MASK, 0xd),
+ CFIELD_PREP(ST7701_CMD2_BK0_GAMCTRL_AJ_MASK, 0) |
+ CFIELD_PREP(ST7701_CMD2_BK0_GAMCTRL_VC8_MASK, 0x14),
+ CFIELD_PREP(ST7701_CMD2_BK0_GAMCTRL_VC16_MASK, 0xd),
+
+ CFIELD_PREP(ST7701_CMD2_BK0_GAMCTRL_AJ_MASK, 0) |
+ CFIELD_PREP(ST7701_CMD2_BK0_GAMCTRL_VC24_MASK, 0x10),
+ CFIELD_PREP(ST7701_CMD2_BK0_GAMCTRL_VC52_MASK, 0x5),
+ CFIELD_PREP(ST7701_CMD2_BK0_GAMCTRL_VC80_MASK, 0x2),
+ CFIELD_PREP(ST7701_CMD2_BK0_GAMCTRL_VC108_MASK, 0x8),
+
+ CFIELD_PREP(ST7701_CMD2_BK0_GAMCTRL_VC147_MASK, 0x8),
+ CFIELD_PREP(ST7701_CMD2_BK0_GAMCTRL_VC175_MASK, 0x1e),
+ CFIELD_PREP(ST7701_CMD2_BK0_GAMCTRL_VC203_MASK, 0x5),
+ CFIELD_PREP(ST7701_CMD2_BK0_GAMCTRL_AJ_MASK, 0) |
+ CFIELD_PREP(ST7701_CMD2_BK0_GAMCTRL_VC231_MASK, 0x13),
+
+ CFIELD_PREP(ST7701_CMD2_BK0_GAMCTRL_VC239_MASK, 0x11),
+ CFIELD_PREP(ST7701_CMD2_BK0_GAMCTRL_AJ_MASK, 2) |
+ CFIELD_PREP(ST7701_CMD2_BK0_GAMCTRL_VC247_MASK, 0x23),
+ CFIELD_PREP(ST7701_CMD2_BK0_GAMCTRL_AJ_MASK, 0) |
+ CFIELD_PREP(ST7701_CMD2_BK0_GAMCTRL_VC251_MASK, 0x29),
+ CFIELD_PREP(ST7701_CMD2_BK0_GAMCTRL_AJ_MASK, 0) |
+ CFIELD_PREP(ST7701_CMD2_BK0_GAMCTRL_VC255_MASK, 0x18)
},
.nv_gamma = {
- CFIELD_PREP(DSI_CMD2_BK0_GAMCTRL_AJ_MASK, 0) |
- CFIELD_PREP(DSI_CMD2_BK0_GAMCTRL_VC0_MASK, 0),
- CFIELD_PREP(DSI_CMD2_BK0_GAMCTRL_AJ_MASK, 0) |
- CFIELD_PREP(DSI_CMD2_BK0_GAMCTRL_VC4_MASK, 0xc),
- CFIELD_PREP(DSI_CMD2_BK0_GAMCTRL_AJ_MASK, 0) |
- CFIELD_PREP(DSI_CMD2_BK0_GAMCTRL_VC8_MASK, 0x14),
- CFIELD_PREP(DSI_CMD2_BK0_GAMCTRL_VC16_MASK, 0xc),
-
- CFIELD_PREP(DSI_CMD2_BK0_GAMCTRL_AJ_MASK, 0) |
- CFIELD_PREP(DSI_CMD2_BK0_GAMCTRL_VC24_MASK, 0x10),
- CFIELD_PREP(DSI_CMD2_BK0_GAMCTRL_VC52_MASK, 0x5),
- CFIELD_PREP(DSI_CMD2_BK0_GAMCTRL_VC80_MASK, 0x3),
- CFIELD_PREP(DSI_CMD2_BK0_GAMCTRL_VC108_MASK, 0x8),
-
- CFIELD_PREP(DSI_CMD2_BK0_GAMCTRL_VC147_MASK, 0x7),
- CFIELD_PREP(DSI_CMD2_BK0_GAMCTRL_VC175_MASK, 0x20),
- CFIELD_PREP(DSI_CMD2_BK0_GAMCTRL_VC203_MASK, 0x5),
- CFIELD_PREP(DSI_CMD2_BK0_GAMCTRL_AJ_MASK, 0) |
- CFIELD_PREP(DSI_CMD2_BK0_GAMCTRL_VC231_MASK, 0x13),
-
- CFIELD_PREP(DSI_CMD2_BK0_GAMCTRL_VC239_MASK, 0x11),
- CFIELD_PREP(DSI_CMD2_BK0_GAMCTRL_AJ_MASK, 2) |
- CFIELD_PREP(DSI_CMD2_BK0_GAMCTRL_VC247_MASK, 0x24),
- CFIELD_PREP(DSI_CMD2_BK0_GAMCTRL_AJ_MASK, 0) |
- CFIELD_PREP(DSI_CMD2_BK0_GAMCTRL_VC251_MASK, 0x29),
- CFIELD_PREP(DSI_CMD2_BK0_GAMCTRL_AJ_MASK, 0) |
- CFIELD_PREP(DSI_CMD2_BK0_GAMCTRL_VC255_MASK, 0x18)
+ CFIELD_PREP(ST7701_CMD2_BK0_GAMCTRL_AJ_MASK, 0) |
+ CFIELD_PREP(ST7701_CMD2_BK0_GAMCTRL_VC0_MASK, 0),
+ CFIELD_PREP(ST7701_CMD2_BK0_GAMCTRL_AJ_MASK, 0) |
+ CFIELD_PREP(ST7701_CMD2_BK0_GAMCTRL_VC4_MASK, 0xc),
+ CFIELD_PREP(ST7701_CMD2_BK0_GAMCTRL_AJ_MASK, 0) |
+ CFIELD_PREP(ST7701_CMD2_BK0_GAMCTRL_VC8_MASK, 0x14),
+ CFIELD_PREP(ST7701_CMD2_BK0_GAMCTRL_VC16_MASK, 0xc),
+
+ CFIELD_PREP(ST7701_CMD2_BK0_GAMCTRL_AJ_MASK, 0) |
+ CFIELD_PREP(ST7701_CMD2_BK0_GAMCTRL_VC24_MASK, 0x10),
+ CFIELD_PREP(ST7701_CMD2_BK0_GAMCTRL_VC52_MASK, 0x5),
+ CFIELD_PREP(ST7701_CMD2_BK0_GAMCTRL_VC80_MASK, 0x3),
+ CFIELD_PREP(ST7701_CMD2_BK0_GAMCTRL_VC108_MASK, 0x8),
+
+ CFIELD_PREP(ST7701_CMD2_BK0_GAMCTRL_VC147_MASK, 0x7),
+ CFIELD_PREP(ST7701_CMD2_BK0_GAMCTRL_VC175_MASK, 0x20),
+ CFIELD_PREP(ST7701_CMD2_BK0_GAMCTRL_VC203_MASK, 0x5),
+ CFIELD_PREP(ST7701_CMD2_BK0_GAMCTRL_AJ_MASK, 0) |
+ CFIELD_PREP(ST7701_CMD2_BK0_GAMCTRL_VC231_MASK, 0x13),
+
+ CFIELD_PREP(ST7701_CMD2_BK0_GAMCTRL_VC239_MASK, 0x11),
+ CFIELD_PREP(ST7701_CMD2_BK0_GAMCTRL_AJ_MASK, 2) |
+ CFIELD_PREP(ST7701_CMD2_BK0_GAMCTRL_VC247_MASK, 0x24),
+ CFIELD_PREP(ST7701_CMD2_BK0_GAMCTRL_AJ_MASK, 0) |
+ CFIELD_PREP(ST7701_CMD2_BK0_GAMCTRL_VC251_MASK, 0x29),
+ CFIELD_PREP(ST7701_CMD2_BK0_GAMCTRL_AJ_MASK, 0) |
+ CFIELD_PREP(ST7701_CMD2_BK0_GAMCTRL_VC255_MASK, 0x18)
},
.nlinv = 1,
.vop_uv = 4887500,
@@ -901,62 +962,62 @@ static const struct st7701_panel_desc rg_arc_desc = {
.panel_sleep_delay = 80,
.pv_gamma = {
- CFIELD_PREP(DSI_CMD2_BK0_GAMCTRL_AJ_MASK, 0x01) |
- CFIELD_PREP(DSI_CMD2_BK0_GAMCTRL_VC0_MASK, 0),
- CFIELD_PREP(DSI_CMD2_BK0_GAMCTRL_AJ_MASK, 0) |
- CFIELD_PREP(DSI_CMD2_BK0_GAMCTRL_VC4_MASK, 0x16),
- CFIELD_PREP(DSI_CMD2_BK0_GAMCTRL_AJ_MASK, 0) |
- CFIELD_PREP(DSI_CMD2_BK0_GAMCTRL_VC8_MASK, 0x1d),
- CFIELD_PREP(DSI_CMD2_BK0_GAMCTRL_VC16_MASK, 0x0e),
-
- CFIELD_PREP(DSI_CMD2_BK0_GAMCTRL_AJ_MASK, 0) |
- CFIELD_PREP(DSI_CMD2_BK0_GAMCTRL_VC24_MASK, 0x12),
- CFIELD_PREP(DSI_CMD2_BK0_GAMCTRL_VC52_MASK, 0x06),
- CFIELD_PREP(DSI_CMD2_BK0_GAMCTRL_VC80_MASK, 0x0c),
- CFIELD_PREP(DSI_CMD2_BK0_GAMCTRL_VC108_MASK, 0x0a),
-
- CFIELD_PREP(DSI_CMD2_BK0_GAMCTRL_VC147_MASK, 0x09),
- CFIELD_PREP(DSI_CMD2_BK0_GAMCTRL_VC175_MASK, 0x25),
- CFIELD_PREP(DSI_CMD2_BK0_GAMCTRL_VC203_MASK, 0x00),
- CFIELD_PREP(DSI_CMD2_BK0_GAMCTRL_AJ_MASK, 0) |
- CFIELD_PREP(DSI_CMD2_BK0_GAMCTRL_VC231_MASK, 0x03),
-
- CFIELD_PREP(DSI_CMD2_BK0_GAMCTRL_VC239_MASK, 0x00),
- CFIELD_PREP(DSI_CMD2_BK0_GAMCTRL_AJ_MASK, 0) |
- CFIELD_PREP(DSI_CMD2_BK0_GAMCTRL_VC247_MASK, 0x3f),
- CFIELD_PREP(DSI_CMD2_BK0_GAMCTRL_AJ_MASK, 0) |
- CFIELD_PREP(DSI_CMD2_BK0_GAMCTRL_VC251_MASK, 0x3f),
- CFIELD_PREP(DSI_CMD2_BK0_GAMCTRL_AJ_MASK, 0) |
- CFIELD_PREP(DSI_CMD2_BK0_GAMCTRL_VC255_MASK, 0x1c)
+ CFIELD_PREP(ST7701_CMD2_BK0_GAMCTRL_AJ_MASK, 0x01) |
+ CFIELD_PREP(ST7701_CMD2_BK0_GAMCTRL_VC0_MASK, 0),
+ CFIELD_PREP(ST7701_CMD2_BK0_GAMCTRL_AJ_MASK, 0) |
+ CFIELD_PREP(ST7701_CMD2_BK0_GAMCTRL_VC4_MASK, 0x16),
+ CFIELD_PREP(ST7701_CMD2_BK0_GAMCTRL_AJ_MASK, 0) |
+ CFIELD_PREP(ST7701_CMD2_BK0_GAMCTRL_VC8_MASK, 0x1d),
+ CFIELD_PREP(ST7701_CMD2_BK0_GAMCTRL_VC16_MASK, 0x0e),
+
+ CFIELD_PREP(ST7701_CMD2_BK0_GAMCTRL_AJ_MASK, 0) |
+ CFIELD_PREP(ST7701_CMD2_BK0_GAMCTRL_VC24_MASK, 0x12),
+ CFIELD_PREP(ST7701_CMD2_BK0_GAMCTRL_VC52_MASK, 0x06),
+ CFIELD_PREP(ST7701_CMD2_BK0_GAMCTRL_VC80_MASK, 0x0c),
+ CFIELD_PREP(ST7701_CMD2_BK0_GAMCTRL_VC108_MASK, 0x0a),
+
+ CFIELD_PREP(ST7701_CMD2_BK0_GAMCTRL_VC147_MASK, 0x09),
+ CFIELD_PREP(ST7701_CMD2_BK0_GAMCTRL_VC175_MASK, 0x25),
+ CFIELD_PREP(ST7701_CMD2_BK0_GAMCTRL_VC203_MASK, 0x00),
+ CFIELD_PREP(ST7701_CMD2_BK0_GAMCTRL_AJ_MASK, 0) |
+ CFIELD_PREP(ST7701_CMD2_BK0_GAMCTRL_VC231_MASK, 0x03),
+
+ CFIELD_PREP(ST7701_CMD2_BK0_GAMCTRL_VC239_MASK, 0x00),
+ CFIELD_PREP(ST7701_CMD2_BK0_GAMCTRL_AJ_MASK, 0) |
+ CFIELD_PREP(ST7701_CMD2_BK0_GAMCTRL_VC247_MASK, 0x3f),
+ CFIELD_PREP(ST7701_CMD2_BK0_GAMCTRL_AJ_MASK, 0) |
+ CFIELD_PREP(ST7701_CMD2_BK0_GAMCTRL_VC251_MASK, 0x3f),
+ CFIELD_PREP(ST7701_CMD2_BK0_GAMCTRL_AJ_MASK, 0) |
+ CFIELD_PREP(ST7701_CMD2_BK0_GAMCTRL_VC255_MASK, 0x1c)
},
.nv_gamma = {
- CFIELD_PREP(DSI_CMD2_BK0_GAMCTRL_AJ_MASK, 0x01) |
- CFIELD_PREP(DSI_CMD2_BK0_GAMCTRL_VC0_MASK, 0),
- CFIELD_PREP(DSI_CMD2_BK0_GAMCTRL_AJ_MASK, 0) |
- CFIELD_PREP(DSI_CMD2_BK0_GAMCTRL_VC4_MASK, 0x16),
- CFIELD_PREP(DSI_CMD2_BK0_GAMCTRL_AJ_MASK, 0) |
- CFIELD_PREP(DSI_CMD2_BK0_GAMCTRL_VC8_MASK, 0x1e),
- CFIELD_PREP(DSI_CMD2_BK0_GAMCTRL_VC16_MASK, 0x0e),
-
- CFIELD_PREP(DSI_CMD2_BK0_GAMCTRL_AJ_MASK, 0) |
- CFIELD_PREP(DSI_CMD2_BK0_GAMCTRL_VC24_MASK, 0x11),
- CFIELD_PREP(DSI_CMD2_BK0_GAMCTRL_VC52_MASK, 0x06),
- CFIELD_PREP(DSI_CMD2_BK0_GAMCTRL_VC80_MASK, 0x0c),
- CFIELD_PREP(DSI_CMD2_BK0_GAMCTRL_VC108_MASK, 0x08),
-
- CFIELD_PREP(DSI_CMD2_BK0_GAMCTRL_VC147_MASK, 0x09),
- CFIELD_PREP(DSI_CMD2_BK0_GAMCTRL_VC175_MASK, 0x26),
- CFIELD_PREP(DSI_CMD2_BK0_GAMCTRL_VC203_MASK, 0x00),
- CFIELD_PREP(DSI_CMD2_BK0_GAMCTRL_AJ_MASK, 0) |
- CFIELD_PREP(DSI_CMD2_BK0_GAMCTRL_VC231_MASK, 0x15),
-
- CFIELD_PREP(DSI_CMD2_BK0_GAMCTRL_VC239_MASK, 0x00),
- CFIELD_PREP(DSI_CMD2_BK0_GAMCTRL_AJ_MASK, 0) |
- CFIELD_PREP(DSI_CMD2_BK0_GAMCTRL_VC247_MASK, 0x3f),
- CFIELD_PREP(DSI_CMD2_BK0_GAMCTRL_AJ_MASK, 0) |
- CFIELD_PREP(DSI_CMD2_BK0_GAMCTRL_VC251_MASK, 0x3f),
- CFIELD_PREP(DSI_CMD2_BK0_GAMCTRL_AJ_MASK, 0) |
- CFIELD_PREP(DSI_CMD2_BK0_GAMCTRL_VC255_MASK, 0x1c)
+ CFIELD_PREP(ST7701_CMD2_BK0_GAMCTRL_AJ_MASK, 0x01) |
+ CFIELD_PREP(ST7701_CMD2_BK0_GAMCTRL_VC0_MASK, 0),
+ CFIELD_PREP(ST7701_CMD2_BK0_GAMCTRL_AJ_MASK, 0) |
+ CFIELD_PREP(ST7701_CMD2_BK0_GAMCTRL_VC4_MASK, 0x16),
+ CFIELD_PREP(ST7701_CMD2_BK0_GAMCTRL_AJ_MASK, 0) |
+ CFIELD_PREP(ST7701_CMD2_BK0_GAMCTRL_VC8_MASK, 0x1e),
+ CFIELD_PREP(ST7701_CMD2_BK0_GAMCTRL_VC16_MASK, 0x0e),
+
+ CFIELD_PREP(ST7701_CMD2_BK0_GAMCTRL_AJ_MASK, 0) |
+ CFIELD_PREP(ST7701_CMD2_BK0_GAMCTRL_VC24_MASK, 0x11),
+ CFIELD_PREP(ST7701_CMD2_BK0_GAMCTRL_VC52_MASK, 0x06),
+ CFIELD_PREP(ST7701_CMD2_BK0_GAMCTRL_VC80_MASK, 0x0c),
+ CFIELD_PREP(ST7701_CMD2_BK0_GAMCTRL_VC108_MASK, 0x08),
+
+ CFIELD_PREP(ST7701_CMD2_BK0_GAMCTRL_VC147_MASK, 0x09),
+ CFIELD_PREP(ST7701_CMD2_BK0_GAMCTRL_VC175_MASK, 0x26),
+ CFIELD_PREP(ST7701_CMD2_BK0_GAMCTRL_VC203_MASK, 0x00),
+ CFIELD_PREP(ST7701_CMD2_BK0_GAMCTRL_AJ_MASK, 0) |
+ CFIELD_PREP(ST7701_CMD2_BK0_GAMCTRL_VC231_MASK, 0x15),
+
+ CFIELD_PREP(ST7701_CMD2_BK0_GAMCTRL_VC239_MASK, 0x00),
+ CFIELD_PREP(ST7701_CMD2_BK0_GAMCTRL_AJ_MASK, 0) |
+ CFIELD_PREP(ST7701_CMD2_BK0_GAMCTRL_VC247_MASK, 0x3f),
+ CFIELD_PREP(ST7701_CMD2_BK0_GAMCTRL_AJ_MASK, 0) |
+ CFIELD_PREP(ST7701_CMD2_BK0_GAMCTRL_VC251_MASK, 0x3f),
+ CFIELD_PREP(ST7701_CMD2_BK0_GAMCTRL_AJ_MASK, 0) |
+ CFIELD_PREP(ST7701_CMD2_BK0_GAMCTRL_VC255_MASK, 0x1c)
},
.nlinv = 0,
.vop_uv = 4500000,
@@ -974,42 +1035,148 @@ static const struct st7701_panel_desc rg_arc_desc = {
.gip_sequence = rg_arc_gip_sequence,
};
-static int st7701_dsi_probe(struct mipi_dsi_device *dsi)
+static const struct drm_display_mode rg28xx_mode = {
+ .clock = 22325,
+
+ .hdisplay = 480,
+ .hsync_start = 480 + 40,
+ .hsync_end = 480 + 40 + 4,
+ .htotal = 480 + 40 + 4 + 20,
+
+ .vdisplay = 640,
+ .vsync_start = 640 + 2,
+ .vsync_end = 640 + 2 + 40,
+ .vtotal = 640 + 2 + 40 + 16,
+
+ .width_mm = 44,
+ .height_mm = 58,
+
+ .flags = DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC,
+
+ .type = DRM_MODE_TYPE_DRIVER | DRM_MODE_TYPE_PREFERRED,
+};
+
+static const struct st7701_panel_desc rg28xx_desc = {
+ .mode = &rg28xx_mode,
+
+ .panel_sleep_delay = 80,
+
+ .pv_gamma = {
+ CFIELD_PREP(ST7701_CMD2_BK0_GAMCTRL_AJ_MASK, 0) |
+ CFIELD_PREP(ST7701_CMD2_BK0_GAMCTRL_VC0_MASK, 0),
+ CFIELD_PREP(ST7701_CMD2_BK0_GAMCTRL_AJ_MASK, 0) |
+ CFIELD_PREP(ST7701_CMD2_BK0_GAMCTRL_VC4_MASK, 0x10),
+ CFIELD_PREP(ST7701_CMD2_BK0_GAMCTRL_AJ_MASK, 0) |
+ CFIELD_PREP(ST7701_CMD2_BK0_GAMCTRL_VC8_MASK, 0x17),
+ CFIELD_PREP(ST7701_CMD2_BK0_GAMCTRL_VC16_MASK, 0xd),
+
+ CFIELD_PREP(ST7701_CMD2_BK0_GAMCTRL_AJ_MASK, 0) |
+ CFIELD_PREP(ST7701_CMD2_BK0_GAMCTRL_VC24_MASK, 0x11),
+ CFIELD_PREP(ST7701_CMD2_BK0_GAMCTRL_VC52_MASK, 0x6),
+ CFIELD_PREP(ST7701_CMD2_BK0_GAMCTRL_VC80_MASK, 0x5),
+ CFIELD_PREP(ST7701_CMD2_BK0_GAMCTRL_VC108_MASK, 0x8),
+
+ CFIELD_PREP(ST7701_CMD2_BK0_GAMCTRL_VC147_MASK, 0x7),
+ CFIELD_PREP(ST7701_CMD2_BK0_GAMCTRL_VC175_MASK, 0x1f),
+ CFIELD_PREP(ST7701_CMD2_BK0_GAMCTRL_VC203_MASK, 0x4),
+ CFIELD_PREP(ST7701_CMD2_BK0_GAMCTRL_AJ_MASK, 0) |
+ CFIELD_PREP(ST7701_CMD2_BK0_GAMCTRL_VC231_MASK, 0x11),
+
+ CFIELD_PREP(ST7701_CMD2_BK0_GAMCTRL_VC239_MASK, 0xe),
+ CFIELD_PREP(ST7701_CMD2_BK0_GAMCTRL_AJ_MASK, 0) |
+ CFIELD_PREP(ST7701_CMD2_BK0_GAMCTRL_VC247_MASK, 0x29),
+ CFIELD_PREP(ST7701_CMD2_BK0_GAMCTRL_AJ_MASK, 0) |
+ CFIELD_PREP(ST7701_CMD2_BK0_GAMCTRL_VC251_MASK, 0x30),
+ CFIELD_PREP(ST7701_CMD2_BK0_GAMCTRL_AJ_MASK, 0) |
+ CFIELD_PREP(ST7701_CMD2_BK0_GAMCTRL_VC255_MASK, 0x1f)
+ },
+ .nv_gamma = {
+ CFIELD_PREP(ST7701_CMD2_BK0_GAMCTRL_AJ_MASK, 0) |
+ CFIELD_PREP(ST7701_CMD2_BK0_GAMCTRL_VC0_MASK, 0),
+ CFIELD_PREP(ST7701_CMD2_BK0_GAMCTRL_AJ_MASK, 0) |
+ CFIELD_PREP(ST7701_CMD2_BK0_GAMCTRL_VC4_MASK, 0xd),
+ CFIELD_PREP(ST7701_CMD2_BK0_GAMCTRL_AJ_MASK, 0) |
+ CFIELD_PREP(ST7701_CMD2_BK0_GAMCTRL_VC8_MASK, 0x14),
+ CFIELD_PREP(ST7701_CMD2_BK0_GAMCTRL_VC16_MASK, 0xe),
+
+ CFIELD_PREP(ST7701_CMD2_BK0_GAMCTRL_AJ_MASK, 0) |
+ CFIELD_PREP(ST7701_CMD2_BK0_GAMCTRL_VC24_MASK, 0x11),
+ CFIELD_PREP(ST7701_CMD2_BK0_GAMCTRL_VC52_MASK, 0x6),
+ CFIELD_PREP(ST7701_CMD2_BK0_GAMCTRL_VC80_MASK, 0x4),
+ CFIELD_PREP(ST7701_CMD2_BK0_GAMCTRL_VC108_MASK, 0x8),
+
+ CFIELD_PREP(ST7701_CMD2_BK0_GAMCTRL_VC147_MASK, 0x8),
+ CFIELD_PREP(ST7701_CMD2_BK0_GAMCTRL_VC175_MASK, 0x20),
+ CFIELD_PREP(ST7701_CMD2_BK0_GAMCTRL_VC203_MASK, 0x5),
+ CFIELD_PREP(ST7701_CMD2_BK0_GAMCTRL_AJ_MASK, 0) |
+ CFIELD_PREP(ST7701_CMD2_BK0_GAMCTRL_VC231_MASK, 0x13),
+
+ CFIELD_PREP(ST7701_CMD2_BK0_GAMCTRL_VC239_MASK, 0x13),
+ CFIELD_PREP(ST7701_CMD2_BK0_GAMCTRL_AJ_MASK, 0) |
+ CFIELD_PREP(ST7701_CMD2_BK0_GAMCTRL_VC247_MASK, 0x26),
+ CFIELD_PREP(ST7701_CMD2_BK0_GAMCTRL_AJ_MASK, 0) |
+ CFIELD_PREP(ST7701_CMD2_BK0_GAMCTRL_VC251_MASK, 0x30),
+ CFIELD_PREP(ST7701_CMD2_BK0_GAMCTRL_AJ_MASK, 0) |
+ CFIELD_PREP(ST7701_CMD2_BK0_GAMCTRL_VC255_MASK, 0x1f)
+ },
+ .nlinv = 7,
+ .vop_uv = 4800000,
+ .vcom_uv = 1512500,
+ .vgh_mv = 15000,
+ .vgl_mv = -11730,
+ .avdd_mv = 6600,
+ .avcl_mv = -4400,
+ .gamma_op_bias = OP_BIAS_MIDDLE,
+ .input_op_bias = OP_BIAS_MIN,
+ .output_op_bias = OP_BIAS_MIN,
+ .t2d_ns = 1600,
+ .t3d_ns = 10400,
+ .eot_en = true,
+ .gip_sequence = rg28xx_gip_sequence,
+};
+
+static void st7701_cleanup(void *data)
+{
+ struct st7701 *st7701 = (struct st7701 *)data;
+
+ drm_panel_remove(&st7701->panel);
+ drm_panel_disable(&st7701->panel);
+ drm_panel_unprepare(&st7701->panel);
+}
+
+static int st7701_probe(struct device *dev, int connector_type)
{
const struct st7701_panel_desc *desc;
struct st7701 *st7701;
int ret;
- st7701 = devm_kzalloc(&dsi->dev, sizeof(*st7701), GFP_KERNEL);
+ st7701 = devm_kzalloc(dev, sizeof(*st7701), GFP_KERNEL);
if (!st7701)
return -ENOMEM;
- desc = of_device_get_match_data(&dsi->dev);
- dsi->mode_flags = MIPI_DSI_MODE_VIDEO | MIPI_DSI_MODE_VIDEO_BURST |
- MIPI_DSI_MODE_LPM | MIPI_DSI_CLOCK_NON_CONTINUOUS;
- dsi->format = desc->format;
- dsi->lanes = desc->lanes;
+ desc = of_device_get_match_data(dev);
+ if (!desc)
+ return -ENODEV;
st7701->supplies[0].supply = "VCC";
st7701->supplies[1].supply = "IOVCC";
- ret = devm_regulator_bulk_get(&dsi->dev, ARRAY_SIZE(st7701->supplies),
+ ret = devm_regulator_bulk_get(dev, ARRAY_SIZE(st7701->supplies),
st7701->supplies);
if (ret < 0)
return ret;
- st7701->reset = devm_gpiod_get(&dsi->dev, "reset", GPIOD_OUT_LOW);
+ st7701->reset = devm_gpiod_get(dev, "reset", GPIOD_OUT_LOW);
if (IS_ERR(st7701->reset)) {
- dev_err(&dsi->dev, "Couldn't get our reset GPIO\n");
+ dev_err(dev, "Couldn't get our reset GPIO\n");
return PTR_ERR(st7701->reset);
}
- ret = of_drm_get_panel_orientation(dsi->dev.of_node, &st7701->orientation);
+ ret = of_drm_get_panel_orientation(dev->of_node, &st7701->orientation);
if (ret < 0)
- return dev_err_probe(&dsi->dev, ret, "Failed to get orientation\n");
+ return dev_err_probe(dev, ret, "Failed to get orientation\n");
- drm_panel_init(&st7701->panel, &dsi->dev, &st7701_funcs,
- DRM_MODE_CONNECTOR_DSI);
+ drm_panel_init(&st7701->panel, dev, &st7701_funcs, connector_type);
/**
* Once sleep out has been issued, ST7701 IC required to wait 120ms
@@ -1028,48 +1195,143 @@ static int st7701_dsi_probe(struct mipi_dsi_device *dsi)
drm_panel_add(&st7701->panel);
- mipi_dsi_set_drvdata(dsi, st7701);
- st7701->dsi = dsi;
+ dev_set_drvdata(dev, st7701);
st7701->desc = desc;
- ret = mipi_dsi_attach(dsi);
- if (ret)
- goto err_attach;
+ return devm_add_action_or_reset(dev, st7701_cleanup, st7701);
+}
+
+static int st7701_dsi_probe(struct mipi_dsi_device *dsi)
+{
+ struct st7701 *st7701;
+ int err;
+
+ err = st7701_probe(&dsi->dev, DRM_MODE_CONNECTOR_DSI);
+ if (err)
+ return err;
+
+ st7701 = dev_get_drvdata(&dsi->dev);
+ st7701->dsi = dsi;
+ st7701->write_command = st7701_dsi_write;
+
+ if (!st7701->desc->lanes)
+ return dev_err_probe(&dsi->dev, -EINVAL, "This panel is not for MIPI DSI\n");
+
+ dsi->mode_flags = MIPI_DSI_MODE_VIDEO | MIPI_DSI_MODE_VIDEO_BURST |
+ MIPI_DSI_MODE_LPM | MIPI_DSI_CLOCK_NON_CONTINUOUS;
+ dsi->format = st7701->desc->format;
+ dsi->lanes = st7701->desc->lanes;
+
+ err = mipi_dsi_attach(dsi);
+ if (err)
+ return dev_err_probe(&dsi->dev, err, "Failed to init MIPI DSI\n");
return 0;
+}
-err_attach:
- drm_panel_remove(&st7701->panel);
- return ret;
+static int st7701_spi_probe(struct spi_device *spi)
+{
+ struct st7701 *st7701;
+ struct gpio_desc *dc;
+ int err;
+
+ err = st7701_probe(&spi->dev, DRM_MODE_CONNECTOR_DPI);
+ if (err)
+ return err;
+
+ st7701 = dev_get_drvdata(&spi->dev);
+ st7701->write_command = st7701_dbi_write;
+
+ dc = devm_gpiod_get_optional(&spi->dev, "dc", GPIOD_OUT_LOW);
+ if (IS_ERR(dc))
+ return dev_err_probe(&spi->dev, PTR_ERR(dc), "Failed to get GPIO for D/CX\n");
+
+ err = mipi_dbi_spi_init(spi, &st7701->dbi, dc);
+ if (err)
+ return dev_err_probe(&spi->dev, err, "Failed to init MIPI DBI\n");
+ st7701->dbi.read_commands = NULL;
+
+ return 0;
}
static void st7701_dsi_remove(struct mipi_dsi_device *dsi)
{
- struct st7701 *st7701 = mipi_dsi_get_drvdata(dsi);
-
mipi_dsi_detach(dsi);
- drm_panel_remove(&st7701->panel);
}
-static const struct of_device_id st7701_of_match[] = {
+static const struct of_device_id st7701_dsi_of_match[] = {
{ .compatible = "anbernic,rg-arc-panel", .data = &rg_arc_desc },
{ .compatible = "densitron,dmt028vghmcmi-1a", .data = &dmt028vghmcmi_1a_desc },
{ .compatible = "elida,kd50t048a", .data = &kd50t048a_desc },
{ .compatible = "techstar,ts8550b", .data = &ts8550b_desc },
{ }
};
-MODULE_DEVICE_TABLE(of, st7701_of_match);
+MODULE_DEVICE_TABLE(of, st7701_dsi_of_match);
+
+static const struct of_device_id st7701_spi_of_match[] = {
+ { .compatible = "anbernic,rg28xx-panel", .data = &rg28xx_desc },
+ { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(of, st7701_spi_of_match);
+
+static const struct spi_device_id st7701_spi_ids[] = {
+ { "rg28xx-panel" },
+ { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(spi, st7701_spi_ids);
static struct mipi_dsi_driver st7701_dsi_driver = {
.probe = st7701_dsi_probe,
.remove = st7701_dsi_remove,
.driver = {
.name = "st7701",
- .of_match_table = st7701_of_match,
+ .of_match_table = st7701_dsi_of_match,
},
};
-module_mipi_dsi_driver(st7701_dsi_driver);
+
+static struct spi_driver st7701_spi_driver = {
+ .probe = st7701_spi_probe,
+ .id_table = st7701_spi_ids,
+ .driver = {
+ .name = "st7701",
+ .of_match_table = st7701_spi_of_match,
+ },
+};
+
+static int __init st7701_driver_init(void)
+{
+ int err;
+
+ if (IS_ENABLED(CONFIG_SPI)) {
+ err = spi_register_driver(&st7701_spi_driver);
+ if (err)
+ return err;
+ }
+
+ if (IS_ENABLED(CONFIG_DRM_MIPI_DSI)) {
+ err = mipi_dsi_driver_register(&st7701_dsi_driver);
+ if (err) {
+ if (IS_ENABLED(CONFIG_SPI))
+ spi_unregister_driver(&st7701_spi_driver);
+ return err;
+ }
+ }
+
+ return 0;
+}
+module_init(st7701_driver_init);
+
+static void __exit st7701_driver_exit(void)
+{
+ if (IS_ENABLED(CONFIG_DRM_MIPI_DSI))
+ mipi_dsi_driver_unregister(&st7701_dsi_driver);
+
+ if (IS_ENABLED(CONFIG_SPI))
+ spi_unregister_driver(&st7701_spi_driver);
+}
+module_exit(st7701_driver_exit);
MODULE_AUTHOR("Jagan Teki <jagan@amarulasolutions.com>");
+MODULE_AUTHOR("Hironori KIKUCHI <kikuchan98@gmail.com>");
MODULE_DESCRIPTION("Sitronix ST7701 LCD Panel Driver");
MODULE_LICENSE("GPL");
diff --git a/drivers/gpu/drm/panel/panel-sony-acx565akm.c b/drivers/gpu/drm/panel/panel-sony-acx565akm.c
index 73ba93ff00fe..217f03569494 100644
--- a/drivers/gpu/drm/panel/panel-sony-acx565akm.c
+++ b/drivers/gpu/drm/panel/panel-sony-acx565akm.c
@@ -342,7 +342,7 @@ static const struct backlight_ops acx565akm_bl_ops = {
static int acx565akm_backlight_init(struct acx565akm_panel *lcd)
{
struct backlight_properties props = {
- .power = FB_BLANK_UNBLANK,
+ .power = BACKLIGHT_POWER_ON,
.type = BACKLIGHT_RAW,
};
int ret;
diff --git a/drivers/gpu/drm/panel/panel-sony-tulip-truly-nt35521.c b/drivers/gpu/drm/panel/panel-sony-tulip-truly-nt35521.c
index f2198fa29735..104b2290560e 100644
--- a/drivers/gpu/drm/panel/panel-sony-tulip-truly-nt35521.c
+++ b/drivers/gpu/drm/panel/panel-sony-tulip-truly-nt35521.c
@@ -25,6 +25,12 @@ struct truly_nt35521 {
struct gpio_desc *blen_gpio;
};
+#define NT35521_DCS_SWITCH_PAGE 0xf0
+
+#define nt35521_switch_page(dsi_ctx, page) \
+ mipi_dsi_dcs_write_seq_multi(dsi_ctx, NT35521_DCS_SWITCH_PAGE, \
+ 0x55, 0xaa, 0x52, 0x08, (page))
+
static inline
struct truly_nt35521 *to_truly_nt35521(struct drm_panel *panel)
{
@@ -48,7 +54,7 @@ static int truly_nt35521_on(struct truly_nt35521 *ctx)
dsi->mode_flags |= MIPI_DSI_MODE_LPM;
- mipi_dsi_generic_write_seq_multi(&dsi_ctx, 0xf0, 0x55, 0xaa, 0x52, 0x08, 0x00);
+ nt35521_switch_page(&dsi_ctx, 0x00);
mipi_dsi_generic_write_seq_multi(&dsi_ctx, 0xff, 0xaa, 0x55, 0xa5, 0x80);
mipi_dsi_generic_write_seq_multi(&dsi_ctx, 0x6f, 0x11, 0x00);
mipi_dsi_generic_write_seq_multi(&dsi_ctx, 0xf7, 0x20, 0x00);
@@ -59,7 +65,8 @@ static int truly_nt35521_on(struct truly_nt35521 *ctx)
mipi_dsi_generic_write_seq_multi(&dsi_ctx, 0xbb, 0x11, 0x11);
mipi_dsi_generic_write_seq_multi(&dsi_ctx, 0xbc, 0x00, 0x00);
mipi_dsi_generic_write_seq_multi(&dsi_ctx, 0xb6, 0x02);
- mipi_dsi_generic_write_seq_multi(&dsi_ctx, 0xf0, 0x55, 0xaa, 0x52, 0x08, 0x01);
+
+ nt35521_switch_page(&dsi_ctx, 0x01);
mipi_dsi_generic_write_seq_multi(&dsi_ctx, 0xb0, 0x09, 0x09);
mipi_dsi_generic_write_seq_multi(&dsi_ctx, 0xb1, 0x09, 0x09);
mipi_dsi_generic_write_seq_multi(&dsi_ctx, 0xbc, 0x8c, 0x00);
@@ -71,7 +78,8 @@ static int truly_nt35521_on(struct truly_nt35521 *ctx)
mipi_dsi_generic_write_seq_multi(&dsi_ctx, 0xb4, 0x25, 0x25);
mipi_dsi_generic_write_seq_multi(&dsi_ctx, 0xb9, 0x43, 0x43);
mipi_dsi_generic_write_seq_multi(&dsi_ctx, 0xba, 0x24, 0x24);
- mipi_dsi_generic_write_seq_multi(&dsi_ctx, 0xf0, 0x55, 0xaa, 0x52, 0x08, 0x02);
+
+ nt35521_switch_page(&dsi_ctx, 0x02);
mipi_dsi_generic_write_seq_multi(&dsi_ctx, 0xee, 0x03);
mipi_dsi_generic_write_seq_multi(&dsi_ctx, 0xb0,
0x00, 0xb2, 0x00, 0xb3, 0x00, 0xb6, 0x00, 0xc3,
@@ -103,7 +111,8 @@ static int truly_nt35521_on(struct truly_nt35521 *ctx)
0x02, 0x93, 0x02, 0xcd, 0x02, 0xf6, 0x03, 0x31,
0x03, 0x6c, 0x03, 0xe9, 0x03, 0xef, 0x03, 0xf4);
mipi_dsi_generic_write_seq_multi(&dsi_ctx, 0xbb, 0x03, 0xf6, 0x03, 0xf7);
- mipi_dsi_generic_write_seq_multi(&dsi_ctx, 0xf0, 0x55, 0xaa, 0x52, 0x08, 0x03);
+
+ nt35521_switch_page(&dsi_ctx, 0x03);
mipi_dsi_generic_write_seq_multi(&dsi_ctx, 0xb0, 0x22, 0x00);
mipi_dsi_generic_write_seq_multi(&dsi_ctx, 0xb1, 0x22, 0x00);
mipi_dsi_generic_write_seq_multi(&dsi_ctx, 0xb2, 0x05, 0x00, 0x60, 0x00, 0x00);
@@ -122,7 +131,8 @@ static int truly_nt35521_on(struct truly_nt35521 *ctx)
mipi_dsi_generic_write_seq_multi(&dsi_ctx, 0xc5, 0xc0);
mipi_dsi_generic_write_seq_multi(&dsi_ctx, 0xc6, 0x00);
mipi_dsi_generic_write_seq_multi(&dsi_ctx, 0xc7, 0x00);
- mipi_dsi_generic_write_seq_multi(&dsi_ctx, 0xf0, 0x55, 0xaa, 0x52, 0x08, 0x05);
+
+ nt35521_switch_page(&dsi_ctx, 0x05);
mipi_dsi_generic_write_seq_multi(&dsi_ctx, 0xb0, 0x17, 0x06);
mipi_dsi_generic_write_seq_multi(&dsi_ctx, 0xb1, 0x17, 0x06);
mipi_dsi_generic_write_seq_multi(&dsi_ctx, 0xb2, 0x17, 0x06);
@@ -178,7 +188,8 @@ static int truly_nt35521_on(struct truly_nt35521 *ctx)
mipi_dsi_generic_write_seq_multi(&dsi_ctx, 0xeb, 0x00);
mipi_dsi_generic_write_seq_multi(&dsi_ctx, 0xec, 0x00);
mipi_dsi_generic_write_seq_multi(&dsi_ctx, 0xed, 0x30);
- mipi_dsi_generic_write_seq_multi(&dsi_ctx, 0xf0, 0x55, 0xaa, 0x52, 0x08, 0x06);
+
+ nt35521_switch_page(&dsi_ctx, 0x06);
mipi_dsi_generic_write_seq_multi(&dsi_ctx, 0xb0, 0x31, 0x31);
mipi_dsi_generic_write_seq_multi(&dsi_ctx, 0xb1, 0x31, 0x31);
mipi_dsi_generic_write_seq_multi(&dsi_ctx, 0xb2, 0x2d, 0x2e);
@@ -235,10 +246,12 @@ static int truly_nt35521_on(struct truly_nt35521 *ctx)
mipi_dsi_generic_write_seq_multi(&dsi_ctx, 0x6f, 0x11);
mipi_dsi_generic_write_seq_multi(&dsi_ctx, 0xf3, 0x01);
mipi_dsi_generic_write_seq_multi(&dsi_ctx, 0x35, 0x00);
- mipi_dsi_generic_write_seq_multi(&dsi_ctx, 0xf0, 0x55, 0xaa, 0x52, 0x08, 0x00);
+
+ nt35521_switch_page(&dsi_ctx, 0x00);
mipi_dsi_generic_write_seq_multi(&dsi_ctx, 0xd9, 0x02, 0x03, 0x00);
mipi_dsi_generic_write_seq_multi(&dsi_ctx, 0xf0, 0x55, 0xaa, 0x52, 0x00, 0x00);
- mipi_dsi_generic_write_seq_multi(&dsi_ctx, 0xf0, 0x55, 0xaa, 0x52, 0x08, 0x00);
+
+ nt35521_switch_page(&dsi_ctx, 0x00);
mipi_dsi_generic_write_seq_multi(&dsi_ctx, 0xb1, 0x6c, 0x21);
mipi_dsi_generic_write_seq_multi(&dsi_ctx, 0xf0, 0x55, 0xaa, 0x52, 0x00, 0x00);
mipi_dsi_generic_write_seq_multi(&dsi_ctx, 0x35, 0x00);
diff --git a/drivers/gpu/drm/panel/panel-startek-kd070fhfid015.c b/drivers/gpu/drm/panel/panel-startek-kd070fhfid015.c
index 0156689f41cd..c0c95355b743 100644
--- a/drivers/gpu/drm/panel/panel-startek-kd070fhfid015.c
+++ b/drivers/gpu/drm/panel/panel-startek-kd070fhfid015.c
@@ -24,10 +24,10 @@
#include <drm/drm_modes.h>
#include <drm/drm_panel.h>
-#define DSI_REG_MCAP 0xB0
-#define DSI_REG_IS 0xB3 /* Interface Setting */
-#define DSI_REG_IIS 0xB4 /* Interface ID Setting */
-#define DSI_REG_CTRL 0xB6
+#define DSI_REG_MCAP 0xb0
+#define DSI_REG_IS 0xb3 /* Interface Setting */
+#define DSI_REG_IIS 0xb4 /* Interface ID Setting */
+#define DSI_REG_CTRL 0xb6
enum {
IOVCC = 0,
@@ -52,92 +52,55 @@ static inline struct stk_panel *to_stk_panel(struct drm_panel *panel)
static int stk_panel_init(struct stk_panel *stk)
{
struct mipi_dsi_device *dsi = stk->dsi;
- struct device *dev = &stk->dsi->dev;
- int ret;
-
- ret = mipi_dsi_dcs_soft_reset(dsi);
- if (ret < 0) {
- dev_err(dev, "failed to mipi_dsi_dcs_soft_reset: %d\n", ret);
- return ret;
- }
- mdelay(5);
+ struct mipi_dsi_multi_context dsi_ctx = {.dsi = dsi};
- ret = mipi_dsi_dcs_exit_sleep_mode(dsi);
- if (ret < 0) {
- dev_err(dev, "failed to set exit sleep mode: %d\n", ret);
- return ret;
- }
- msleep(120);
+ mipi_dsi_dcs_soft_reset_multi(&dsi_ctx);
+ mipi_dsi_msleep(&dsi_ctx, 5);
+ mipi_dsi_dcs_exit_sleep_mode_multi(&dsi_ctx);
+ mipi_dsi_msleep(&dsi_ctx, 120);
- mipi_dsi_generic_write_seq(dsi, DSI_REG_MCAP, 0x04);
+ mipi_dsi_generic_write_seq_multi(&dsi_ctx, DSI_REG_MCAP, 0x04);
/* Interface setting, video mode */
- mipi_dsi_generic_write_seq(dsi, DSI_REG_IS, 0x14, 0x08, 0x00, 0x22, 0x00);
- mipi_dsi_generic_write_seq(dsi, DSI_REG_IIS, 0x0C, 0x00);
- mipi_dsi_generic_write_seq(dsi, DSI_REG_CTRL, 0x3A, 0xD3);
+ mipi_dsi_generic_write_seq_multi(&dsi_ctx, DSI_REG_IS, 0x14, 0x08, 0x00, 0x22, 0x00);
+ mipi_dsi_generic_write_seq_multi(&dsi_ctx, DSI_REG_IIS, 0x0c, 0x00);
+ mipi_dsi_generic_write_seq_multi(&dsi_ctx, DSI_REG_CTRL, 0x3a, 0xd3);
- ret = mipi_dsi_dcs_set_display_brightness(dsi, 0x77);
- if (ret < 0) {
- dev_err(dev, "failed to write display brightness: %d\n", ret);
- return ret;
- }
+ mipi_dsi_dcs_set_display_brightness_multi(&dsi_ctx, 0x77);
- mipi_dsi_dcs_write_seq(dsi, MIPI_DCS_WRITE_CONTROL_DISPLAY,
- MIPI_DCS_WRITE_MEMORY_START);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, MIPI_DCS_WRITE_CONTROL_DISPLAY,
+ MIPI_DCS_WRITE_MEMORY_START);
- ret = mipi_dsi_dcs_set_pixel_format(dsi, 0x77);
- if (ret < 0) {
- dev_err(dev, "failed to set pixel format: %d\n", ret);
- return ret;
- }
+ mipi_dsi_dcs_set_pixel_format_multi(&dsi_ctx, 0x77);
+ mipi_dsi_dcs_set_column_address_multi(&dsi_ctx, 0, stk->mode->hdisplay - 1);
+ mipi_dsi_dcs_set_page_address_multi(&dsi_ctx, 0, stk->mode->vdisplay - 1);
- ret = mipi_dsi_dcs_set_column_address(dsi, 0, stk->mode->hdisplay - 1);
- if (ret < 0) {
- dev_err(dev, "failed to set column address: %d\n", ret);
- return ret;
- }
-
- ret = mipi_dsi_dcs_set_page_address(dsi, 0, stk->mode->vdisplay - 1);
- if (ret < 0) {
- dev_err(dev, "failed to set page address: %d\n", ret);
- return ret;
- }
-
- return 0;
+ return dsi_ctx.accum_err;
}
static int stk_panel_on(struct stk_panel *stk)
{
struct mipi_dsi_device *dsi = stk->dsi;
- struct device *dev = &stk->dsi->dev;
- int ret;
+ struct mipi_dsi_multi_context dsi_ctx = {.dsi = dsi};
- ret = mipi_dsi_dcs_set_display_on(dsi);
- if (ret < 0)
- dev_err(dev, "failed to set display on: %d\n", ret);
+ mipi_dsi_dcs_set_display_on_multi(&dsi_ctx);
- mdelay(20);
+ mipi_dsi_msleep(&dsi_ctx, 20);
- return ret;
+ return dsi_ctx.accum_err;
}
static void stk_panel_off(struct stk_panel *stk)
{
struct mipi_dsi_device *dsi = stk->dsi;
- struct device *dev = &stk->dsi->dev;
- int ret;
+ struct mipi_dsi_multi_context dsi_ctx = {.dsi = dsi};
dsi->mode_flags &= ~MIPI_DSI_MODE_LPM;
- ret = mipi_dsi_dcs_set_display_off(dsi);
- if (ret < 0)
- dev_err(dev, "failed to set display off: %d\n", ret);
+ mipi_dsi_dcs_set_display_off_multi(&dsi_ctx);
+ mipi_dsi_dcs_enter_sleep_mode_multi(&dsi_ctx);
- ret = mipi_dsi_dcs_enter_sleep_mode(dsi);
- if (ret < 0)
- dev_err(dev, "failed to enter sleep mode: %d\n", ret);
-
- msleep(100);
+ mipi_dsi_msleep(&dsi_ctx, 100);
}
static int stk_panel_unprepare(struct drm_panel *panel)
@@ -155,7 +118,6 @@ static int stk_panel_unprepare(struct drm_panel *panel)
static int stk_panel_prepare(struct drm_panel *panel)
{
struct stk_panel *stk = to_stk_panel(panel);
- struct device *dev = &stk->dsi->dev;
int ret;
gpiod_set_value(stk->reset_gpio, 0);
@@ -175,16 +137,12 @@ static int stk_panel_prepare(struct drm_panel *panel)
gpiod_set_value(stk->reset_gpio, 1);
mdelay(10);
ret = stk_panel_init(stk);
- if (ret < 0) {
- dev_err(dev, "failed to init panel: %d\n", ret);
+ if (ret < 0)
goto poweroff;
- }
ret = stk_panel_on(stk);
- if (ret < 0) {
- dev_err(dev, "failed to set panel on: %d\n", ret);
+ if (ret < 0)
goto poweroff;
- }
return 0;
@@ -250,18 +208,15 @@ static int dsi_dcs_bl_get_brightness(struct backlight_device *bl)
static int dsi_dcs_bl_update_status(struct backlight_device *bl)
{
struct mipi_dsi_device *dsi = bl_get_data(bl);
- struct device *dev = &dsi->dev;
- int ret;
+ struct mipi_dsi_multi_context dsi_ctx = {.dsi = dsi};
dsi->mode_flags &= ~MIPI_DSI_MODE_LPM;
- ret = mipi_dsi_dcs_set_display_brightness(dsi, bl->props.brightness);
- if (ret < 0) {
- dev_err(dev, "failed to set DSI control: %d\n", ret);
- return ret;
- }
+ mipi_dsi_dcs_set_display_brightness_multi(&dsi_ctx, bl->props.brightness);
+ if (dsi_ctx.accum_err)
+ return dsi_ctx.accum_err;
dsi->mode_flags |= MIPI_DSI_MODE_LPM;
- return 0;
+ return dsi_ctx.accum_err;
}
static const struct backlight_ops dsi_bl_ops = {
diff --git a/drivers/gpu/drm/panel/panel-visionox-vtdr6130.c b/drivers/gpu/drm/panel/panel-visionox-vtdr6130.c
index 540099253e1b..17b8defe79c1 100644
--- a/drivers/gpu/drm/panel/panel-visionox-vtdr6130.c
+++ b/drivers/gpu/drm/panel/panel-visionox-vtdr6130.c
@@ -19,7 +19,13 @@ struct visionox_vtdr6130 {
struct drm_panel panel;
struct mipi_dsi_device *dsi;
struct gpio_desc *reset_gpio;
- struct regulator_bulk_data supplies[3];
+ struct regulator_bulk_data *supplies;
+};
+
+static const struct regulator_bulk_data visionox_vtdr6130_supplies[] = {
+ { .supply = "vddio" },
+ { .supply = "vci" },
+ { .supply = "vdd" },
};
static inline struct visionox_vtdr6130 *to_visionox_vtdr6130(struct drm_panel *panel)
@@ -40,123 +46,106 @@ static void visionox_vtdr6130_reset(struct visionox_vtdr6130 *ctx)
static int visionox_vtdr6130_on(struct visionox_vtdr6130 *ctx)
{
struct mipi_dsi_device *dsi = ctx->dsi;
- struct device *dev = &dsi->dev;
- int ret;
+ struct mipi_dsi_multi_context dsi_ctx = { .dsi = dsi };
dsi->mode_flags |= MIPI_DSI_MODE_LPM;
- ret = mipi_dsi_dcs_set_tear_on(dsi, MIPI_DSI_DCS_TEAR_MODE_VBLANK);
- if (ret)
- return ret;
-
- mipi_dsi_dcs_write_seq(dsi, MIPI_DCS_WRITE_CONTROL_DISPLAY, 0x20);
- mipi_dsi_dcs_write_seq(dsi, MIPI_DCS_SET_DISPLAY_BRIGHTNESS, 0x00, 0x00);
- mipi_dsi_dcs_write_seq(dsi, 0x59, 0x09);
- mipi_dsi_dcs_write_seq(dsi, 0x6c, 0x01);
- mipi_dsi_dcs_write_seq(dsi, 0x6d, 0x00);
- mipi_dsi_dcs_write_seq(dsi, 0x6f, 0x01);
- mipi_dsi_dcs_write_seq(dsi, 0x70,
- 0x12, 0x00, 0x00, 0xab, 0x30, 0x80, 0x09, 0x60, 0x04,
- 0x38, 0x00, 0x28, 0x02, 0x1c, 0x02, 0x1c, 0x02, 0x00,
- 0x02, 0x0e, 0x00, 0x20, 0x03, 0xdd, 0x00, 0x07, 0x00,
- 0x0c, 0x02, 0x77, 0x02, 0x8b, 0x18, 0x00, 0x10, 0xf0,
- 0x07, 0x10, 0x20, 0x00, 0x06, 0x0f, 0x0f, 0x33, 0x0e,
- 0x1c, 0x2a, 0x38, 0x46, 0x54, 0x62, 0x69, 0x70, 0x77,
- 0x79, 0x7b, 0x7d, 0x7e, 0x02, 0x02, 0x22, 0x00, 0x2a,
- 0x40, 0x2a, 0xbe, 0x3a, 0xfc, 0x3a, 0xfa, 0x3a, 0xf8,
- 0x3b, 0x38, 0x3b, 0x78, 0x3b, 0xb6, 0x4b, 0xb6, 0x4b,
- 0xf4, 0x4b, 0xf4, 0x6c, 0x34, 0x84, 0x74, 0x00, 0x00,
- 0x00, 0x00, 0x00, 0x00);
- mipi_dsi_dcs_write_seq(dsi, 0xf0, 0xaa, 0x10);
- mipi_dsi_dcs_write_seq(dsi, 0xb1,
- 0x01, 0x38, 0x00, 0x14, 0x00, 0x1c, 0x00, 0x01, 0x66,
- 0x00, 0x14, 0x00, 0x14, 0x00, 0x01, 0x66, 0x00, 0x14,
- 0x05, 0xcc, 0x00);
- mipi_dsi_dcs_write_seq(dsi, 0xf0, 0xaa, 0x13);
- mipi_dsi_dcs_write_seq(dsi, 0xce,
- 0x09, 0x11, 0x09, 0x11, 0x08, 0xc1, 0x07, 0xfa, 0x05,
- 0xa4, 0x00, 0x3c, 0x00, 0x34, 0x00, 0x24, 0x00, 0x0c,
- 0x00, 0x0c, 0x04, 0x00, 0x35);
- mipi_dsi_dcs_write_seq(dsi, 0xf0, 0xaa, 0x14);
- mipi_dsi_dcs_write_seq(dsi, 0xb2, 0x03, 0x33);
- mipi_dsi_dcs_write_seq(dsi, 0xb4,
- 0x00, 0x33, 0x00, 0x00, 0x00, 0x3e, 0x00, 0x00, 0x00,
- 0x3e, 0x00, 0x00);
- mipi_dsi_dcs_write_seq(dsi, 0xb5,
- 0x00, 0x09, 0x09, 0x09, 0x09, 0x09, 0x09, 0x06, 0x01);
- mipi_dsi_dcs_write_seq(dsi, 0xb9, 0x00, 0x00, 0x08, 0x09, 0x09, 0x09);
- mipi_dsi_dcs_write_seq(dsi, 0xbc,
- 0x10, 0x00, 0x00, 0x06, 0x11, 0x09, 0x3b, 0x09, 0x47,
- 0x09, 0x47, 0x00);
- mipi_dsi_dcs_write_seq(dsi, 0xbe,
- 0x10, 0x10, 0x00, 0x08, 0x22, 0x09, 0x19, 0x09, 0x25,
- 0x09, 0x25, 0x00);
- mipi_dsi_dcs_write_seq(dsi, 0xff, 0x5a, 0x80);
- mipi_dsi_dcs_write_seq(dsi, 0x65, 0x14);
- mipi_dsi_dcs_write_seq(dsi, 0xfa, 0x08, 0x08, 0x08);
- mipi_dsi_dcs_write_seq(dsi, 0xff, 0x5a, 0x81);
- mipi_dsi_dcs_write_seq(dsi, 0x65, 0x05);
- mipi_dsi_dcs_write_seq(dsi, 0xf3, 0x0f);
- mipi_dsi_dcs_write_seq(dsi, 0xf0, 0xaa, 0x00);
- mipi_dsi_dcs_write_seq(dsi, 0xff, 0x5a, 0x82);
- mipi_dsi_dcs_write_seq(dsi, 0xf9, 0x00);
- mipi_dsi_dcs_write_seq(dsi, 0xff, 0x51, 0x83);
- mipi_dsi_dcs_write_seq(dsi, 0x65, 0x04);
- mipi_dsi_dcs_write_seq(dsi, 0xf8, 0x00);
- mipi_dsi_dcs_write_seq(dsi, 0xff, 0x5a, 0x00);
- mipi_dsi_dcs_write_seq(dsi, 0x65, 0x01);
- mipi_dsi_dcs_write_seq(dsi, 0xf4, 0x9a);
- mipi_dsi_dcs_write_seq(dsi, 0xff, 0x5a, 0x00);
-
- ret = mipi_dsi_dcs_exit_sleep_mode(dsi);
- if (ret < 0) {
- dev_err(dev, "Failed to exit sleep mode: %d\n", ret);
- return ret;
- }
- msleep(120);
-
- ret = mipi_dsi_dcs_set_display_on(dsi);
- if (ret < 0) {
- dev_err(dev, "Failed to set display on: %d\n", ret);
- return ret;
- }
- msleep(20);
-
- return 0;
+ mipi_dsi_dcs_set_tear_on_multi(&dsi_ctx, MIPI_DSI_DCS_TEAR_MODE_VBLANK);
+
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx,
+ MIPI_DCS_WRITE_CONTROL_DISPLAY, 0x20);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx,
+ MIPI_DCS_SET_DISPLAY_BRIGHTNESS, 0x00,
+ 0x00);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x59, 0x09);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x6c, 0x01);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x6d, 0x00);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x6f, 0x01);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x70, 0x12, 0x00, 0x00, 0xab,
+ 0x30, 0x80, 0x09, 0x60, 0x04, 0x38, 0x00,
+ 0x28, 0x02, 0x1c, 0x02, 0x1c, 0x02, 0x00,
+ 0x02, 0x0e, 0x00, 0x20, 0x03, 0xdd, 0x00,
+ 0x07, 0x00, 0x0c, 0x02, 0x77, 0x02, 0x8b,
+ 0x18, 0x00, 0x10, 0xf0, 0x07, 0x10, 0x20,
+ 0x00, 0x06, 0x0f, 0x0f, 0x33, 0x0e, 0x1c,
+ 0x2a, 0x38, 0x46, 0x54, 0x62, 0x69, 0x70,
+ 0x77, 0x79, 0x7b, 0x7d, 0x7e, 0x02, 0x02,
+ 0x22, 0x00, 0x2a, 0x40, 0x2a, 0xbe, 0x3a,
+ 0xfc, 0x3a, 0xfa, 0x3a, 0xf8, 0x3b, 0x38,
+ 0x3b, 0x78, 0x3b, 0xb6, 0x4b, 0xb6, 0x4b,
+ 0xf4, 0x4b, 0xf4, 0x6c, 0x34, 0x84, 0x74,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xf0, 0xaa, 0x10);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xb1, 0x01, 0x38, 0x00, 0x14,
+ 0x00, 0x1c, 0x00, 0x01, 0x66, 0x00, 0x14,
+ 0x00, 0x14, 0x00, 0x01, 0x66, 0x00, 0x14,
+ 0x05, 0xcc, 0x00);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xf0, 0xaa, 0x13);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xce, 0x09, 0x11, 0x09, 0x11,
+ 0x08, 0xc1, 0x07, 0xfa, 0x05, 0xa4, 0x00,
+ 0x3c, 0x00, 0x34, 0x00, 0x24, 0x00, 0x0c,
+ 0x00, 0x0c, 0x04, 0x00, 0x35);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xf0, 0xaa, 0x14);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xb2, 0x03, 0x33);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xb4, 0x00, 0x33, 0x00, 0x00,
+ 0x00, 0x3e, 0x00, 0x00, 0x00, 0x3e, 0x00,
+ 0x00);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xb5, 0x00, 0x09, 0x09, 0x09,
+ 0x09, 0x09, 0x09, 0x06, 0x01);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xb9, 0x00, 0x00, 0x08, 0x09,
+ 0x09, 0x09);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xbc, 0x10, 0x00, 0x00, 0x06,
+ 0x11, 0x09, 0x3b, 0x09, 0x47, 0x09, 0x47,
+ 0x00);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xbe, 0x10, 0x10, 0x00, 0x08,
+ 0x22, 0x09, 0x19, 0x09, 0x25, 0x09, 0x25,
+ 0x00);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xff, 0x5a, 0x80);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x65, 0x14);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xfa, 0x08, 0x08, 0x08);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xff, 0x5a, 0x81);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x65, 0x05);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xf3, 0x0f);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xf0, 0xaa, 0x00);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xff, 0x5a, 0x82);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xf9, 0x00);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xff, 0x51, 0x83);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x65, 0x04);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xf8, 0x00);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xff, 0x5a, 0x00);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x65, 0x01);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xf4, 0x9a);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xff, 0x5a, 0x00);
+
+ mipi_dsi_dcs_exit_sleep_mode_multi(&dsi_ctx);
+ mipi_dsi_msleep(&dsi_ctx, 120);
+
+ mipi_dsi_dcs_set_display_on_multi(&dsi_ctx);
+ mipi_dsi_msleep(&dsi_ctx, 20);
+
+ return dsi_ctx.accum_err;
}
-static int visionox_vtdr6130_off(struct visionox_vtdr6130 *ctx)
+static void visionox_vtdr6130_off(struct visionox_vtdr6130 *ctx)
{
struct mipi_dsi_device *dsi = ctx->dsi;
- struct device *dev = &dsi->dev;
- int ret;
+ struct mipi_dsi_multi_context dsi_ctx = { .dsi = dsi };
dsi->mode_flags &= ~MIPI_DSI_MODE_LPM;
- ret = mipi_dsi_dcs_set_display_off(dsi);
- if (ret < 0) {
- dev_err(dev, "Failed to set display off: %d\n", ret);
- return ret;
- }
- msleep(20);
-
- ret = mipi_dsi_dcs_enter_sleep_mode(dsi);
- if (ret < 0) {
- dev_err(dev, "Failed to enter sleep mode: %d\n", ret);
- return ret;
- }
- msleep(120);
+ mipi_dsi_dcs_set_display_off_multi(&dsi_ctx);
+ mipi_dsi_msleep(&dsi_ctx, 20);
- return 0;
+ mipi_dsi_dcs_enter_sleep_mode_multi(&dsi_ctx);
+ mipi_dsi_msleep(&dsi_ctx, 120);
}
static int visionox_vtdr6130_prepare(struct drm_panel *panel)
{
struct visionox_vtdr6130 *ctx = to_visionox_vtdr6130(panel);
- struct device *dev = &ctx->dsi->dev;
int ret;
- ret = regulator_bulk_enable(ARRAY_SIZE(ctx->supplies),
+ ret = regulator_bulk_enable(ARRAY_SIZE(visionox_vtdr6130_supplies),
ctx->supplies);
if (ret < 0)
return ret;
@@ -165,9 +154,9 @@ static int visionox_vtdr6130_prepare(struct drm_panel *panel)
ret = visionox_vtdr6130_on(ctx);
if (ret < 0) {
- dev_err(dev, "Failed to initialize panel: %d\n", ret);
gpiod_set_value_cansleep(ctx->reset_gpio, 1);
- regulator_bulk_disable(ARRAY_SIZE(ctx->supplies), ctx->supplies);
+ regulator_bulk_disable(ARRAY_SIZE(visionox_vtdr6130_supplies),
+ ctx->supplies);
return ret;
}
@@ -177,16 +166,13 @@ static int visionox_vtdr6130_prepare(struct drm_panel *panel)
static int visionox_vtdr6130_unprepare(struct drm_panel *panel)
{
struct visionox_vtdr6130 *ctx = to_visionox_vtdr6130(panel);
- struct device *dev = &ctx->dsi->dev;
- int ret;
- ret = visionox_vtdr6130_off(ctx);
- if (ret < 0)
- dev_err(dev, "Failed to un-initialize panel: %d\n", ret);
+ visionox_vtdr6130_off(ctx);
gpiod_set_value_cansleep(ctx->reset_gpio, 1);
- regulator_bulk_disable(ARRAY_SIZE(ctx->supplies), ctx->supplies);
+ regulator_bulk_disable(ARRAY_SIZE(visionox_vtdr6130_supplies),
+ ctx->supplies);
return 0;
}
@@ -266,12 +252,10 @@ static int visionox_vtdr6130_probe(struct mipi_dsi_device *dsi)
if (!ctx)
return -ENOMEM;
- ctx->supplies[0].supply = "vddio";
- ctx->supplies[1].supply = "vci";
- ctx->supplies[2].supply = "vdd";
-
- ret = devm_regulator_bulk_get(&dsi->dev, ARRAY_SIZE(ctx->supplies),
- ctx->supplies);
+ ret = devm_regulator_bulk_get_const(&dsi->dev,
+ ARRAY_SIZE(visionox_vtdr6130_supplies),
+ visionox_vtdr6130_supplies,
+ &ctx->supplies);
if (ret < 0)
return ret;
diff --git a/drivers/gpu/drm/panfrost/panfrost_job.c b/drivers/gpu/drm/panfrost/panfrost_job.c
index a61ef0af9a4e..df49d37d0e7e 100644
--- a/drivers/gpu/drm/panfrost/panfrost_job.c
+++ b/drivers/gpu/drm/panfrost/panfrost_job.c
@@ -727,7 +727,7 @@ panfrost_reset(struct panfrost_device *pfdev,
/* Restart the schedulers */
for (i = 0; i < NUM_JOB_SLOTS; i++)
- drm_sched_start(&pfdev->js->queue[i].sched, true);
+ drm_sched_start(&pfdev->js->queue[i].sched);
/* Re-enable job interrupts now that everything has been restarted. */
job_write(pfdev, JOB_INT_MASK,
diff --git a/drivers/gpu/drm/panthor/panthor_mmu.c b/drivers/gpu/drm/panthor/panthor_mmu.c
index cc6e13a97783..bbc12728437f 100644
--- a/drivers/gpu/drm/panthor/panthor_mmu.c
+++ b/drivers/gpu/drm/panthor/panthor_mmu.c
@@ -833,7 +833,7 @@ static void panthor_vm_stop(struct panthor_vm *vm)
static void panthor_vm_start(struct panthor_vm *vm)
{
- drm_sched_start(&vm->sched, true);
+ drm_sched_start(&vm->sched);
}
/**
diff --git a/drivers/gpu/drm/panthor/panthor_sched.c b/drivers/gpu/drm/panthor/panthor_sched.c
index 12b272a912f8..91a31b70c037 100644
--- a/drivers/gpu/drm/panthor/panthor_sched.c
+++ b/drivers/gpu/drm/panthor/panthor_sched.c
@@ -2538,7 +2538,7 @@ static void queue_start(struct panthor_queue *queue)
list_for_each_entry(job, &queue->scheduler.pending_list, base.list)
job->base.s_fence->parent = dma_fence_get(job->done_fence);
- drm_sched_start(&queue->scheduler, true);
+ drm_sched_start(&queue->scheduler);
}
static void panthor_group_stop(struct panthor_group *group)
diff --git a/drivers/gpu/drm/radeon/atombios_encoders.c b/drivers/gpu/drm/radeon/atombios_encoders.c
index 03e6871b3065..d1c5e471bdca 100644
--- a/drivers/gpu/drm/radeon/atombios_encoders.c
+++ b/drivers/gpu/drm/radeon/atombios_encoders.c
@@ -249,7 +249,7 @@ void radeon_atom_backlight_init(struct radeon_encoder *radeon_encoder,
*/
if (bd->props.brightness == 0)
bd->props.brightness = RADEON_MAX_BL_LEVEL;
- bd->props.power = FB_BLANK_UNBLANK;
+ bd->props.power = BACKLIGHT_POWER_ON;
backlight_update_status(bd);
DRM_INFO("radeon atom DIG backlight initialized\n");
@@ -2179,7 +2179,7 @@ assigned:
void
radeon_atom_encoder_init(struct radeon_device *rdev)
{
- struct drm_device *dev = rdev->ddev;
+ struct drm_device *dev = rdev_to_drm(rdev);
struct drm_encoder *encoder;
list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
diff --git a/drivers/gpu/drm/radeon/cik.c b/drivers/gpu/drm/radeon/cik.c
index b5e96a8fc2c1..11a492f21157 100644
--- a/drivers/gpu/drm/radeon/cik.c
+++ b/drivers/gpu/drm/radeon/cik.c
@@ -7585,7 +7585,7 @@ restart_ih:
DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");
if (rdev->irq.crtc_vblank_int[0]) {
- drm_handle_vblank(rdev->ddev, 0);
+ drm_handle_vblank(rdev_to_drm(rdev), 0);
rdev->pm.vblank_sync = true;
wake_up(&rdev->irq.vblank_queue);
}
@@ -7615,7 +7615,7 @@ restart_ih:
DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");
if (rdev->irq.crtc_vblank_int[1]) {
- drm_handle_vblank(rdev->ddev, 1);
+ drm_handle_vblank(rdev_to_drm(rdev), 1);
rdev->pm.vblank_sync = true;
wake_up(&rdev->irq.vblank_queue);
}
@@ -7645,7 +7645,7 @@ restart_ih:
DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");
if (rdev->irq.crtc_vblank_int[2]) {
- drm_handle_vblank(rdev->ddev, 2);
+ drm_handle_vblank(rdev_to_drm(rdev), 2);
rdev->pm.vblank_sync = true;
wake_up(&rdev->irq.vblank_queue);
}
@@ -7675,7 +7675,7 @@ restart_ih:
DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");
if (rdev->irq.crtc_vblank_int[3]) {
- drm_handle_vblank(rdev->ddev, 3);
+ drm_handle_vblank(rdev_to_drm(rdev), 3);
rdev->pm.vblank_sync = true;
wake_up(&rdev->irq.vblank_queue);
}
@@ -7705,7 +7705,7 @@ restart_ih:
DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");
if (rdev->irq.crtc_vblank_int[4]) {
- drm_handle_vblank(rdev->ddev, 4);
+ drm_handle_vblank(rdev_to_drm(rdev), 4);
rdev->pm.vblank_sync = true;
wake_up(&rdev->irq.vblank_queue);
}
@@ -7735,7 +7735,7 @@ restart_ih:
DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");
if (rdev->irq.crtc_vblank_int[5]) {
- drm_handle_vblank(rdev->ddev, 5);
+ drm_handle_vblank(rdev_to_drm(rdev), 5);
rdev->pm.vblank_sync = true;
wake_up(&rdev->irq.vblank_queue);
}
@@ -8581,7 +8581,7 @@ int cik_init(struct radeon_device *rdev)
/* Initialize surface registers */
radeon_surface_init(rdev);
/* Initialize clocks */
- radeon_get_clock_info(rdev->ddev);
+ radeon_get_clock_info(rdev_to_drm(rdev));
/* Fence driver */
radeon_fence_driver_init(rdev);
diff --git a/drivers/gpu/drm/radeon/dce6_afmt.c b/drivers/gpu/drm/radeon/dce6_afmt.c
index 4c06f47453fd..d6ab93ed9ec4 100644
--- a/drivers/gpu/drm/radeon/dce6_afmt.c
+++ b/drivers/gpu/drm/radeon/dce6_afmt.c
@@ -91,7 +91,7 @@ struct r600_audio_pin *dce6_audio_get_pin(struct radeon_device *rdev)
pin = &rdev->audio.pin[i];
pin_count = 0;
- list_for_each_entry(encoder, &rdev->ddev->mode_config.encoder_list, head) {
+ list_for_each_entry(encoder, &rdev_to_drm(rdev)->mode_config.encoder_list, head) {
if (radeon_encoder_is_digital(encoder)) {
radeon_encoder = to_radeon_encoder(encoder);
dig = radeon_encoder->enc_priv;
diff --git a/drivers/gpu/drm/radeon/evergreen.c b/drivers/gpu/drm/radeon/evergreen.c
index c634dc28e6c3..bc4ab71613a5 100644
--- a/drivers/gpu/drm/radeon/evergreen.c
+++ b/drivers/gpu/drm/radeon/evergreen.c
@@ -1673,7 +1673,7 @@ void evergreen_pm_misc(struct radeon_device *rdev)
*/
void evergreen_pm_prepare(struct radeon_device *rdev)
{
- struct drm_device *ddev = rdev->ddev;
+ struct drm_device *ddev = rdev_to_drm(rdev);
struct drm_crtc *crtc;
struct radeon_crtc *radeon_crtc;
u32 tmp;
@@ -1698,7 +1698,7 @@ void evergreen_pm_prepare(struct radeon_device *rdev)
*/
void evergreen_pm_finish(struct radeon_device *rdev)
{
- struct drm_device *ddev = rdev->ddev;
+ struct drm_device *ddev = rdev_to_drm(rdev);
struct drm_crtc *crtc;
struct radeon_crtc *radeon_crtc;
u32 tmp;
@@ -1763,7 +1763,7 @@ void evergreen_hpd_set_polarity(struct radeon_device *rdev,
*/
void evergreen_hpd_init(struct radeon_device *rdev)
{
- struct drm_device *dev = rdev->ddev;
+ struct drm_device *dev = rdev_to_drm(rdev);
struct drm_connector *connector;
unsigned enabled = 0;
u32 tmp = DC_HPDx_CONNECTION_TIMER(0x9c4) |
@@ -1804,7 +1804,7 @@ void evergreen_hpd_init(struct radeon_device *rdev)
*/
void evergreen_hpd_fini(struct radeon_device *rdev)
{
- struct drm_device *dev = rdev->ddev;
+ struct drm_device *dev = rdev_to_drm(rdev);
struct drm_connector *connector;
unsigned disabled = 0;
@@ -4753,7 +4753,7 @@ restart_ih:
event_name = "vblank";
if (rdev->irq.crtc_vblank_int[crtc_idx]) {
- drm_handle_vblank(rdev->ddev, crtc_idx);
+ drm_handle_vblank(rdev_to_drm(rdev), crtc_idx);
rdev->pm.vblank_sync = true;
wake_up(&rdev->irq.vblank_queue);
}
@@ -5211,7 +5211,7 @@ int evergreen_init(struct radeon_device *rdev)
/* Initialize surface registers */
radeon_surface_init(rdev);
/* Initialize clocks */
- radeon_get_clock_info(rdev->ddev);
+ radeon_get_clock_info(rdev_to_drm(rdev));
/* Fence driver */
radeon_fence_driver_init(rdev);
/* initialize AGP */
diff --git a/drivers/gpu/drm/radeon/evergreen_cs.c b/drivers/gpu/drm/radeon/evergreen_cs.c
index e5577d2a19ef..a46613283393 100644
--- a/drivers/gpu/drm/radeon/evergreen_cs.c
+++ b/drivers/gpu/drm/radeon/evergreen_cs.c
@@ -397,7 +397,7 @@ static int evergreen_cs_track_validate_cb(struct radeon_cs_parser *p, unsigned i
struct evergreen_cs_track *track = p->track;
struct eg_surface surf;
unsigned pitch, slice, mslice;
- unsigned long offset;
+ u64 offset;
int r;
mslice = G_028C6C_SLICE_MAX(track->cb_color_view[id]) + 1;
@@ -435,14 +435,14 @@ static int evergreen_cs_track_validate_cb(struct radeon_cs_parser *p, unsigned i
return r;
}
- offset = track->cb_color_bo_offset[id] << 8;
+ offset = (u64)track->cb_color_bo_offset[id] << 8;
if (offset & (surf.base_align - 1)) {
- dev_warn(p->dev, "%s:%d cb[%d] bo base %ld not aligned with %ld\n",
+ dev_warn(p->dev, "%s:%d cb[%d] bo base %llu not aligned with %ld\n",
__func__, __LINE__, id, offset, surf.base_align);
return -EINVAL;
}
- offset += surf.layer_size * mslice;
+ offset += (u64)surf.layer_size * mslice;
if (offset > radeon_bo_size(track->cb_color_bo[id])) {
/* old ddx are broken they allocate bo with w*h*bpp but
* program slice with ALIGN(h, 8), catch this and patch
@@ -450,14 +450,14 @@ static int evergreen_cs_track_validate_cb(struct radeon_cs_parser *p, unsigned i
*/
if (!surf.mode) {
uint32_t *ib = p->ib.ptr;
- unsigned long tmp, nby, bsize, size, min = 0;
+ u64 tmp, nby, bsize, size, min = 0;
/* find the height the ddx wants */
if (surf.nby > 8) {
min = surf.nby - 8;
}
bsize = radeon_bo_size(track->cb_color_bo[id]);
- tmp = track->cb_color_bo_offset[id] << 8;
+ tmp = (u64)track->cb_color_bo_offset[id] << 8;
for (nby = surf.nby; nby > min; nby--) {
size = nby * surf.nbx * surf.bpe * surf.nsamples;
if ((tmp + size * mslice) <= bsize) {
@@ -469,7 +469,7 @@ static int evergreen_cs_track_validate_cb(struct radeon_cs_parser *p, unsigned i
slice = ((nby * surf.nbx) / 64) - 1;
if (!evergreen_surface_check(p, &surf, "cb")) {
/* check if this one works */
- tmp += surf.layer_size * mslice;
+ tmp += (u64)surf.layer_size * mslice;
if (tmp <= bsize) {
ib[track->cb_color_slice_idx[id]] = slice;
goto old_ddx_ok;
@@ -478,9 +478,9 @@ static int evergreen_cs_track_validate_cb(struct radeon_cs_parser *p, unsigned i
}
}
dev_warn(p->dev, "%s:%d cb[%d] bo too small (layer size %d, "
- "offset %d, max layer %d, bo size %ld, slice %d)\n",
+ "offset %llu, max layer %d, bo size %ld, slice %d)\n",
__func__, __LINE__, id, surf.layer_size,
- track->cb_color_bo_offset[id] << 8, mslice,
+ (u64)track->cb_color_bo_offset[id] << 8, mslice,
radeon_bo_size(track->cb_color_bo[id]), slice);
dev_warn(p->dev, "%s:%d problematic surf: (%d %d) (%d %d %d %d %d %d %d)\n",
__func__, __LINE__, surf.nbx, surf.nby,
@@ -564,7 +564,7 @@ static int evergreen_cs_track_validate_stencil(struct radeon_cs_parser *p)
struct evergreen_cs_track *track = p->track;
struct eg_surface surf;
unsigned pitch, slice, mslice;
- unsigned long offset;
+ u64 offset;
int r;
mslice = G_028008_SLICE_MAX(track->db_depth_view) + 1;
@@ -610,18 +610,18 @@ static int evergreen_cs_track_validate_stencil(struct radeon_cs_parser *p)
return r;
}
- offset = track->db_s_read_offset << 8;
+ offset = (u64)track->db_s_read_offset << 8;
if (offset & (surf.base_align - 1)) {
- dev_warn(p->dev, "%s:%d stencil read bo base %ld not aligned with %ld\n",
+ dev_warn(p->dev, "%s:%d stencil read bo base %llu not aligned with %ld\n",
__func__, __LINE__, offset, surf.base_align);
return -EINVAL;
}
- offset += surf.layer_size * mslice;
+ offset += (u64)surf.layer_size * mslice;
if (offset > radeon_bo_size(track->db_s_read_bo)) {
dev_warn(p->dev, "%s:%d stencil read bo too small (layer size %d, "
- "offset %ld, max layer %d, bo size %ld)\n",
+ "offset %llu, max layer %d, bo size %ld)\n",
__func__, __LINE__, surf.layer_size,
- (unsigned long)track->db_s_read_offset << 8, mslice,
+ (u64)track->db_s_read_offset << 8, mslice,
radeon_bo_size(track->db_s_read_bo));
dev_warn(p->dev, "%s:%d stencil invalid (0x%08x 0x%08x 0x%08x 0x%08x)\n",
__func__, __LINE__, track->db_depth_size,
@@ -629,18 +629,18 @@ static int evergreen_cs_track_validate_stencil(struct radeon_cs_parser *p)
return -EINVAL;
}
- offset = track->db_s_write_offset << 8;
+ offset = (u64)track->db_s_write_offset << 8;
if (offset & (surf.base_align - 1)) {
- dev_warn(p->dev, "%s:%d stencil write bo base %ld not aligned with %ld\n",
+ dev_warn(p->dev, "%s:%d stencil write bo base %llu not aligned with %ld\n",
__func__, __LINE__, offset, surf.base_align);
return -EINVAL;
}
- offset += surf.layer_size * mslice;
+ offset += (u64)surf.layer_size * mslice;
if (offset > radeon_bo_size(track->db_s_write_bo)) {
dev_warn(p->dev, "%s:%d stencil write bo too small (layer size %d, "
- "offset %ld, max layer %d, bo size %ld)\n",
+ "offset %llu, max layer %d, bo size %ld)\n",
__func__, __LINE__, surf.layer_size,
- (unsigned long)track->db_s_write_offset << 8, mslice,
+ (u64)track->db_s_write_offset << 8, mslice,
radeon_bo_size(track->db_s_write_bo));
return -EINVAL;
}
@@ -661,7 +661,7 @@ static int evergreen_cs_track_validate_depth(struct radeon_cs_parser *p)
struct evergreen_cs_track *track = p->track;
struct eg_surface surf;
unsigned pitch, slice, mslice;
- unsigned long offset;
+ u64 offset;
int r;
mslice = G_028008_SLICE_MAX(track->db_depth_view) + 1;
@@ -708,34 +708,34 @@ static int evergreen_cs_track_validate_depth(struct radeon_cs_parser *p)
return r;
}
- offset = track->db_z_read_offset << 8;
+ offset = (u64)track->db_z_read_offset << 8;
if (offset & (surf.base_align - 1)) {
- dev_warn(p->dev, "%s:%d stencil read bo base %ld not aligned with %ld\n",
+ dev_warn(p->dev, "%s:%d stencil read bo base %llu not aligned with %ld\n",
__func__, __LINE__, offset, surf.base_align);
return -EINVAL;
}
- offset += surf.layer_size * mslice;
+ offset += (u64)surf.layer_size * mslice;
if (offset > radeon_bo_size(track->db_z_read_bo)) {
dev_warn(p->dev, "%s:%d depth read bo too small (layer size %d, "
- "offset %ld, max layer %d, bo size %ld)\n",
+ "offset %llu, max layer %d, bo size %ld)\n",
__func__, __LINE__, surf.layer_size,
- (unsigned long)track->db_z_read_offset << 8, mslice,
+ (u64)track->db_z_read_offset << 8, mslice,
radeon_bo_size(track->db_z_read_bo));
return -EINVAL;
}
- offset = track->db_z_write_offset << 8;
+ offset = (u64)track->db_z_write_offset << 8;
if (offset & (surf.base_align - 1)) {
- dev_warn(p->dev, "%s:%d stencil write bo base %ld not aligned with %ld\n",
+ dev_warn(p->dev, "%s:%d stencil write bo base %llu not aligned with %ld\n",
__func__, __LINE__, offset, surf.base_align);
return -EINVAL;
}
- offset += surf.layer_size * mslice;
+ offset += (u64)surf.layer_size * mslice;
if (offset > radeon_bo_size(track->db_z_write_bo)) {
dev_warn(p->dev, "%s:%d depth write bo too small (layer size %d, "
- "offset %ld, max layer %d, bo size %ld)\n",
+ "offset %llu, max layer %d, bo size %ld)\n",
__func__, __LINE__, surf.layer_size,
- (unsigned long)track->db_z_write_offset << 8, mslice,
+ (u64)track->db_z_write_offset << 8, mslice,
radeon_bo_size(track->db_z_write_bo));
return -EINVAL;
}
diff --git a/drivers/gpu/drm/radeon/ni.c b/drivers/gpu/drm/radeon/ni.c
index 77aee99e473a..3890911fe693 100644
--- a/drivers/gpu/drm/radeon/ni.c
+++ b/drivers/gpu/drm/radeon/ni.c
@@ -2360,7 +2360,7 @@ int cayman_init(struct radeon_device *rdev)
/* Initialize surface registers */
radeon_surface_init(rdev);
/* Initialize clocks */
- radeon_get_clock_info(rdev->ddev);
+ radeon_get_clock_info(rdev_to_drm(rdev));
/* Fence driver */
radeon_fence_driver_init(rdev);
/* initialize memory controller */
diff --git a/drivers/gpu/drm/radeon/r100.c b/drivers/gpu/drm/radeon/r100.c
index 0b1e19345f43..80703417d8a1 100644
--- a/drivers/gpu/drm/radeon/r100.c
+++ b/drivers/gpu/drm/radeon/r100.c
@@ -459,7 +459,7 @@ void r100_pm_misc(struct radeon_device *rdev)
*/
void r100_pm_prepare(struct radeon_device *rdev)
{
- struct drm_device *ddev = rdev->ddev;
+ struct drm_device *ddev = rdev_to_drm(rdev);
struct drm_crtc *crtc;
struct radeon_crtc *radeon_crtc;
u32 tmp;
@@ -490,7 +490,7 @@ void r100_pm_prepare(struct radeon_device *rdev)
*/
void r100_pm_finish(struct radeon_device *rdev)
{
- struct drm_device *ddev = rdev->ddev;
+ struct drm_device *ddev = rdev_to_drm(rdev);
struct drm_crtc *crtc;
struct radeon_crtc *radeon_crtc;
u32 tmp;
@@ -603,7 +603,7 @@ void r100_hpd_set_polarity(struct radeon_device *rdev,
*/
void r100_hpd_init(struct radeon_device *rdev)
{
- struct drm_device *dev = rdev->ddev;
+ struct drm_device *dev = rdev_to_drm(rdev);
struct drm_connector *connector;
unsigned enable = 0;
@@ -626,7 +626,7 @@ void r100_hpd_init(struct radeon_device *rdev)
*/
void r100_hpd_fini(struct radeon_device *rdev)
{
- struct drm_device *dev = rdev->ddev;
+ struct drm_device *dev = rdev_to_drm(rdev);
struct drm_connector *connector;
unsigned disable = 0;
@@ -798,7 +798,7 @@ int r100_irq_process(struct radeon_device *rdev)
/* Vertical blank interrupts */
if (status & RADEON_CRTC_VBLANK_STAT) {
if (rdev->irq.crtc_vblank_int[0]) {
- drm_handle_vblank(rdev->ddev, 0);
+ drm_handle_vblank(rdev_to_drm(rdev), 0);
rdev->pm.vblank_sync = true;
wake_up(&rdev->irq.vblank_queue);
}
@@ -807,7 +807,7 @@ int r100_irq_process(struct radeon_device *rdev)
}
if (status & RADEON_CRTC2_VBLANK_STAT) {
if (rdev->irq.crtc_vblank_int[1]) {
- drm_handle_vblank(rdev->ddev, 1);
+ drm_handle_vblank(rdev_to_drm(rdev), 1);
rdev->pm.vblank_sync = true;
wake_up(&rdev->irq.vblank_queue);
}
@@ -1016,45 +1016,65 @@ static int r100_cp_init_microcode(struct radeon_device *rdev)
DRM_DEBUG_KMS("\n");
- if ((rdev->family == CHIP_R100) || (rdev->family == CHIP_RV100) ||
- (rdev->family == CHIP_RV200) || (rdev->family == CHIP_RS100) ||
- (rdev->family == CHIP_RS200)) {
+ switch (rdev->family) {
+ case CHIP_R100:
+ case CHIP_RV100:
+ case CHIP_RV200:
+ case CHIP_RS100:
+ case CHIP_RS200:
DRM_INFO("Loading R100 Microcode\n");
fw_name = FIRMWARE_R100;
- } else if ((rdev->family == CHIP_R200) ||
- (rdev->family == CHIP_RV250) ||
- (rdev->family == CHIP_RV280) ||
- (rdev->family == CHIP_RS300)) {
+ break;
+
+ case CHIP_R200:
+ case CHIP_RV250:
+ case CHIP_RV280:
+ case CHIP_RS300:
DRM_INFO("Loading R200 Microcode\n");
fw_name = FIRMWARE_R200;
- } else if ((rdev->family == CHIP_R300) ||
- (rdev->family == CHIP_R350) ||
- (rdev->family == CHIP_RV350) ||
- (rdev->family == CHIP_RV380) ||
- (rdev->family == CHIP_RS400) ||
- (rdev->family == CHIP_RS480)) {
+ break;
+
+ case CHIP_R300:
+ case CHIP_R350:
+ case CHIP_RV350:
+ case CHIP_RV380:
+ case CHIP_RS400:
+ case CHIP_RS480:
DRM_INFO("Loading R300 Microcode\n");
fw_name = FIRMWARE_R300;
- } else if ((rdev->family == CHIP_R420) ||
- (rdev->family == CHIP_R423) ||
- (rdev->family == CHIP_RV410)) {
+ break;
+
+ case CHIP_R420:
+ case CHIP_R423:
+ case CHIP_RV410:
DRM_INFO("Loading R400 Microcode\n");
fw_name = FIRMWARE_R420;
- } else if ((rdev->family == CHIP_RS690) ||
- (rdev->family == CHIP_RS740)) {
+ break;
+
+ case CHIP_RS690:
+ case CHIP_RS740:
DRM_INFO("Loading RS690/RS740 Microcode\n");
fw_name = FIRMWARE_RS690;
- } else if (rdev->family == CHIP_RS600) {
+ break;
+
+ case CHIP_RS600:
DRM_INFO("Loading RS600 Microcode\n");
fw_name = FIRMWARE_RS600;
- } else if ((rdev->family == CHIP_RV515) ||
- (rdev->family == CHIP_R520) ||
- (rdev->family == CHIP_RV530) ||
- (rdev->family == CHIP_R580) ||
- (rdev->family == CHIP_RV560) ||
- (rdev->family == CHIP_RV570)) {
+ break;
+
+ case CHIP_RV515:
+ case CHIP_R520:
+ case CHIP_RV530:
+ case CHIP_R580:
+ case CHIP_RV560:
+ case CHIP_RV570:
DRM_INFO("Loading R500 Microcode\n");
fw_name = FIRMWARE_R520;
+ break;
+
+ default:
+ DRM_ERROR("Unsupported Radeon family %u\n", rdev->family);
+ return -EINVAL;
}
err = request_firmware(&rdev->me_fw, fw_name, rdev->dev);
@@ -1471,7 +1491,7 @@ int r100_cs_packet_parse_vline(struct radeon_cs_parser *p)
header = radeon_get_ib_value(p, h_idx);
crtc_id = radeon_get_ib_value(p, h_idx + 5);
reg = R100_CP_PACKET0_GET_REG(header);
- crtc = drm_crtc_find(p->rdev->ddev, p->filp, crtc_id);
+ crtc = drm_crtc_find(rdev_to_drm(p->rdev), p->filp, crtc_id);
if (!crtc) {
DRM_ERROR("cannot find crtc %d\n", crtc_id);
return -ENOENT;
@@ -3059,7 +3079,7 @@ DEFINE_SHOW_ATTRIBUTE(r100_debugfs_mc_info);
void r100_debugfs_rbbm_init(struct radeon_device *rdev)
{
#if defined(CONFIG_DEBUG_FS)
- struct dentry *root = rdev->ddev->primary->debugfs_root;
+ struct dentry *root = rdev_to_drm(rdev)->primary->debugfs_root;
debugfs_create_file("r100_rbbm_info", 0444, root, rdev,
&r100_debugfs_rbbm_info_fops);
@@ -3069,7 +3089,7 @@ void r100_debugfs_rbbm_init(struct radeon_device *rdev)
void r100_debugfs_cp_init(struct radeon_device *rdev)
{
#if defined(CONFIG_DEBUG_FS)
- struct dentry *root = rdev->ddev->primary->debugfs_root;
+ struct dentry *root = rdev_to_drm(rdev)->primary->debugfs_root;
debugfs_create_file("r100_cp_ring_info", 0444, root, rdev,
&r100_debugfs_cp_ring_info_fops);
@@ -3081,7 +3101,7 @@ void r100_debugfs_cp_init(struct radeon_device *rdev)
void r100_debugfs_mc_info_init(struct radeon_device *rdev)
{
#if defined(CONFIG_DEBUG_FS)
- struct dentry *root = rdev->ddev->primary->debugfs_root;
+ struct dentry *root = rdev_to_drm(rdev)->primary->debugfs_root;
debugfs_create_file("r100_mc_info", 0444, root, rdev,
&r100_debugfs_mc_info_fops);
@@ -3947,7 +3967,7 @@ int r100_resume(struct radeon_device *rdev)
RREG32(R_0007C0_CP_STAT));
}
/* post */
- radeon_combios_asic_init(rdev->ddev);
+ radeon_combios_asic_init(rdev_to_drm(rdev));
/* Resume clock after posting */
r100_clock_startup(rdev);
/* Initialize surface registers */
@@ -4056,7 +4076,7 @@ int r100_init(struct radeon_device *rdev)
/* Set asic errata */
r100_errata(rdev);
/* Initialize clocks */
- radeon_get_clock_info(rdev->ddev);
+ radeon_get_clock_info(rdev_to_drm(rdev));
/* initialize AGP */
if (rdev->flags & RADEON_IS_AGP) {
r = radeon_agp_init(rdev);
diff --git a/drivers/gpu/drm/radeon/r300.c b/drivers/gpu/drm/radeon/r300.c
index 1620f534f55f..05c13102a8cb 100644
--- a/drivers/gpu/drm/radeon/r300.c
+++ b/drivers/gpu/drm/radeon/r300.c
@@ -616,7 +616,7 @@ DEFINE_SHOW_ATTRIBUTE(rv370_debugfs_pcie_gart_info);
static void rv370_debugfs_pcie_gart_info_init(struct radeon_device *rdev)
{
#if defined(CONFIG_DEBUG_FS)
- struct dentry *root = rdev->ddev->primary->debugfs_root;
+ struct dentry *root = rdev_to_drm(rdev)->primary->debugfs_root;
debugfs_create_file("rv370_pcie_gart_info", 0444, root, rdev,
&rv370_debugfs_pcie_gart_info_fops);
@@ -1452,7 +1452,7 @@ int r300_resume(struct radeon_device *rdev)
RREG32(R_0007C0_CP_STAT));
}
/* post */
- radeon_combios_asic_init(rdev->ddev);
+ radeon_combios_asic_init(rdev_to_drm(rdev));
/* Resume clock after posting */
r300_clock_startup(rdev);
/* Initialize surface registers */
@@ -1538,7 +1538,7 @@ int r300_init(struct radeon_device *rdev)
/* Set asic errata */
r300_errata(rdev);
/* Initialize clocks */
- radeon_get_clock_info(rdev->ddev);
+ radeon_get_clock_info(rdev_to_drm(rdev));
/* initialize AGP */
if (rdev->flags & RADEON_IS_AGP) {
r = radeon_agp_init(rdev);
diff --git a/drivers/gpu/drm/radeon/r420.c b/drivers/gpu/drm/radeon/r420.c
index a979662eaa73..9a31cdec6415 100644
--- a/drivers/gpu/drm/radeon/r420.c
+++ b/drivers/gpu/drm/radeon/r420.c
@@ -322,7 +322,7 @@ int r420_resume(struct radeon_device *rdev)
if (rdev->is_atom_bios) {
atom_asic_init(rdev->mode_info.atom_context);
} else {
- radeon_combios_asic_init(rdev->ddev);
+ radeon_combios_asic_init(rdev_to_drm(rdev));
}
/* Resume clock after posting */
r420_clock_resume(rdev);
@@ -414,7 +414,7 @@ int r420_init(struct radeon_device *rdev)
return -EINVAL;
/* Initialize clocks */
- radeon_get_clock_info(rdev->ddev);
+ radeon_get_clock_info(rdev_to_drm(rdev));
/* initialize AGP */
if (rdev->flags & RADEON_IS_AGP) {
r = radeon_agp_init(rdev);
@@ -493,7 +493,7 @@ DEFINE_SHOW_ATTRIBUTE(r420_debugfs_pipes_info);
void r420_debugfs_pipes_info_init(struct radeon_device *rdev)
{
#if defined(CONFIG_DEBUG_FS)
- struct dentry *root = rdev->ddev->primary->debugfs_root;
+ struct dentry *root = rdev_to_drm(rdev)->primary->debugfs_root;
debugfs_create_file("r420_pipes_info", 0444, root, rdev,
&r420_debugfs_pipes_info_fops);
diff --git a/drivers/gpu/drm/radeon/r520.c b/drivers/gpu/drm/radeon/r520.c
index 6cbcaa845192..08e127b3249a 100644
--- a/drivers/gpu/drm/radeon/r520.c
+++ b/drivers/gpu/drm/radeon/r520.c
@@ -287,7 +287,7 @@ int r520_init(struct radeon_device *rdev)
atom_asic_init(rdev->mode_info.atom_context);
}
/* Initialize clocks */
- radeon_get_clock_info(rdev->ddev);
+ radeon_get_clock_info(rdev_to_drm(rdev));
/* initialize AGP */
if (rdev->flags & RADEON_IS_AGP) {
r = radeon_agp_init(rdev);
diff --git a/drivers/gpu/drm/radeon/r600.c b/drivers/gpu/drm/radeon/r600.c
index 087d41e370fd..8b62f7faa5b9 100644
--- a/drivers/gpu/drm/radeon/r600.c
+++ b/drivers/gpu/drm/radeon/r600.c
@@ -950,7 +950,7 @@ void r600_hpd_set_polarity(struct radeon_device *rdev,
void r600_hpd_init(struct radeon_device *rdev)
{
- struct drm_device *dev = rdev->ddev;
+ struct drm_device *dev = rdev_to_drm(rdev);
struct drm_connector *connector;
unsigned enable = 0;
@@ -1017,7 +1017,7 @@ void r600_hpd_init(struct radeon_device *rdev)
void r600_hpd_fini(struct radeon_device *rdev)
{
- struct drm_device *dev = rdev->ddev;
+ struct drm_device *dev = rdev_to_drm(rdev);
struct drm_connector *connector;
unsigned disable = 0;
@@ -3280,7 +3280,7 @@ int r600_init(struct radeon_device *rdev)
/* Initialize surface registers */
radeon_surface_init(rdev);
/* Initialize clocks */
- radeon_get_clock_info(rdev->ddev);
+ radeon_get_clock_info(rdev_to_drm(rdev));
/* Fence driver */
radeon_fence_driver_init(rdev);
if (rdev->flags & RADEON_IS_AGP) {
@@ -4136,7 +4136,7 @@ restart_ih:
DRM_DEBUG("IH: D1 vblank - IH event w/o asserted irq bit?\n");
if (rdev->irq.crtc_vblank_int[0]) {
- drm_handle_vblank(rdev->ddev, 0);
+ drm_handle_vblank(rdev_to_drm(rdev), 0);
rdev->pm.vblank_sync = true;
wake_up(&rdev->irq.vblank_queue);
}
@@ -4166,7 +4166,7 @@ restart_ih:
DRM_DEBUG("IH: D2 vblank - IH event w/o asserted irq bit?\n");
if (rdev->irq.crtc_vblank_int[1]) {
- drm_handle_vblank(rdev->ddev, 1);
+ drm_handle_vblank(rdev_to_drm(rdev), 1);
rdev->pm.vblank_sync = true;
wake_up(&rdev->irq.vblank_queue);
}
@@ -4358,7 +4358,7 @@ DEFINE_SHOW_ATTRIBUTE(r600_debugfs_mc_info);
static void r600_debugfs_mc_info_init(struct radeon_device *rdev)
{
#if defined(CONFIG_DEBUG_FS)
- struct dentry *root = rdev->ddev->primary->debugfs_root;
+ struct dentry *root = rdev_to_drm(rdev)->primary->debugfs_root;
debugfs_create_file("r600_mc_info", 0444, root, rdev,
&r600_debugfs_mc_info_fops);
diff --git a/drivers/gpu/drm/radeon/r600_cs.c b/drivers/gpu/drm/radeon/r600_cs.c
index 6cf54a747749..1b2d31c4d77c 100644
--- a/drivers/gpu/drm/radeon/r600_cs.c
+++ b/drivers/gpu/drm/radeon/r600_cs.c
@@ -884,7 +884,7 @@ int r600_cs_common_vline_parse(struct radeon_cs_parser *p,
crtc_id = radeon_get_ib_value(p, h_idx + 2 + 7 + 1);
reg = R600_CP_PACKET0_GET_REG(header);
- crtc = drm_crtc_find(p->rdev->ddev, p->filp, crtc_id);
+ crtc = drm_crtc_find(rdev_to_drm(p->rdev), p->filp, crtc_id);
if (!crtc) {
DRM_ERROR("cannot find crtc %d\n", crtc_id);
return -ENOENT;
diff --git a/drivers/gpu/drm/radeon/r600_dpm.c b/drivers/gpu/drm/radeon/r600_dpm.c
index 64980a61d38a..81d58ef667dd 100644
--- a/drivers/gpu/drm/radeon/r600_dpm.c
+++ b/drivers/gpu/drm/radeon/r600_dpm.c
@@ -153,7 +153,7 @@ void r600_dpm_print_ps_status(struct radeon_device *rdev,
u32 r600_dpm_get_vblank_time(struct radeon_device *rdev)
{
- struct drm_device *dev = rdev->ddev;
+ struct drm_device *dev = rdev_to_drm(rdev);
struct drm_crtc *crtc;
struct radeon_crtc *radeon_crtc;
u32 vblank_in_pixels;
@@ -180,7 +180,7 @@ u32 r600_dpm_get_vblank_time(struct radeon_device *rdev)
u32 r600_dpm_get_vrefresh(struct radeon_device *rdev)
{
- struct drm_device *dev = rdev->ddev;
+ struct drm_device *dev = rdev_to_drm(rdev);
struct drm_crtc *crtc;
struct radeon_crtc *radeon_crtc;
u32 vrefresh = 0;
diff --git a/drivers/gpu/drm/radeon/r600_hdmi.c b/drivers/gpu/drm/radeon/r600_hdmi.c
index f3551ebaa2f0..661f374f5f27 100644
--- a/drivers/gpu/drm/radeon/r600_hdmi.c
+++ b/drivers/gpu/drm/radeon/r600_hdmi.c
@@ -116,7 +116,7 @@ void r600_audio_update_hdmi(struct work_struct *work)
{
struct radeon_device *rdev = container_of(work, struct radeon_device,
audio_work);
- struct drm_device *dev = rdev->ddev;
+ struct drm_device *dev = rdev_to_drm(rdev);
struct r600_audio_pin audio_status = r600_audio_status(rdev);
struct drm_encoder *encoder;
bool changed = false;
diff --git a/drivers/gpu/drm/radeon/radeon.h b/drivers/gpu/drm/radeon/radeon.h
index 0999c8eaae94..fd8a4513025f 100644
--- a/drivers/gpu/drm/radeon/radeon.h
+++ b/drivers/gpu/drm/radeon/radeon.h
@@ -2297,7 +2297,7 @@ typedef void (*radeon_wreg_t)(struct radeon_device*, uint32_t, uint32_t);
struct radeon_device {
struct device *dev;
- struct drm_device *ddev;
+ struct drm_device ddev;
struct pci_dev *pdev;
#ifdef __alpha__
struct pci_controller *hose;
@@ -2476,6 +2476,11 @@ void r100_io_wreg(struct radeon_device *rdev, u32 reg, u32 v);
u32 cik_mm_rdoorbell(struct radeon_device *rdev, u32 index);
void cik_mm_wdoorbell(struct radeon_device *rdev, u32 index, u32 v);
+static inline struct drm_device *rdev_to_drm(struct radeon_device *rdev)
+{
+ return &rdev->ddev;
+}
+
/*
* Cast helper
*/
diff --git a/drivers/gpu/drm/radeon/radeon_acpi.c b/drivers/gpu/drm/radeon/radeon_acpi.c
index 603a78e41ba5..22ce61bdfc06 100644
--- a/drivers/gpu/drm/radeon/radeon_acpi.c
+++ b/drivers/gpu/drm/radeon/radeon_acpi.c
@@ -405,11 +405,11 @@ static int radeon_atif_handler(struct radeon_device *rdev,
if (req.pending & ATIF_DGPU_DISPLAY_EVENT) {
if ((rdev->flags & RADEON_IS_PX) &&
radeon_atpx_dgpu_req_power_for_displays()) {
- pm_runtime_get_sync(rdev->ddev->dev);
+ pm_runtime_get_sync(rdev_to_drm(rdev)->dev);
/* Just fire off a uevent and let userspace tell us what to do */
- drm_helper_hpd_irq_event(rdev->ddev);
- pm_runtime_mark_last_busy(rdev->ddev->dev);
- pm_runtime_put_autosuspend(rdev->ddev->dev);
+ drm_helper_hpd_irq_event(rdev_to_drm(rdev));
+ pm_runtime_mark_last_busy(rdev_to_drm(rdev)->dev);
+ pm_runtime_put_autosuspend(rdev_to_drm(rdev)->dev);
}
}
/* TODO: check other events */
@@ -736,7 +736,7 @@ int radeon_acpi_init(struct radeon_device *rdev)
struct radeon_encoder *target = NULL;
/* Find the encoder controlling the brightness */
- list_for_each_entry(tmp, &rdev->ddev->mode_config.encoder_list,
+ list_for_each_entry(tmp, &rdev_to_drm(rdev)->mode_config.encoder_list,
head) {
struct radeon_encoder *enc = to_radeon_encoder(tmp);
diff --git a/drivers/gpu/drm/radeon/radeon_agp.c b/drivers/gpu/drm/radeon/radeon_agp.c
index a3d749e350f9..89d7b0e9e79f 100644
--- a/drivers/gpu/drm/radeon/radeon_agp.c
+++ b/drivers/gpu/drm/radeon/radeon_agp.c
@@ -161,7 +161,7 @@ struct radeon_agp_head *radeon_agp_head_init(struct drm_device *dev)
static int radeon_agp_head_acquire(struct radeon_device *rdev)
{
- struct drm_device *dev = rdev->ddev;
+ struct drm_device *dev = rdev_to_drm(rdev);
struct pci_dev *pdev = to_pci_dev(dev->dev);
if (!rdev->agp)
diff --git a/drivers/gpu/drm/radeon/radeon_atombios.c b/drivers/gpu/drm/radeon/radeon_atombios.c
index 10793a433bf5..81a0a91921b9 100644
--- a/drivers/gpu/drm/radeon/radeon_atombios.c
+++ b/drivers/gpu/drm/radeon/radeon_atombios.c
@@ -187,7 +187,7 @@ void radeon_atombios_i2c_init(struct radeon_device *rdev)
if (i2c.valid) {
sprintf(stmp, "0x%x", i2c.i2c_id);
- rdev->i2c_bus[i] = radeon_i2c_create(rdev->ddev, &i2c, stmp);
+ rdev->i2c_bus[i] = radeon_i2c_create(rdev_to_drm(rdev), &i2c, stmp);
}
gpio = (ATOM_GPIO_I2C_ASSIGMENT *)
((u8 *)gpio + sizeof(ATOM_GPIO_I2C_ASSIGMENT));
@@ -1716,27 +1716,25 @@ struct radeon_encoder_atom_dig *radeon_atombios_get_lvds_info(struct
case LCD_FAKE_EDID_PATCH_RECORD_TYPE:
fake_edid_record = (ATOM_FAKE_EDID_PATCH_RECORD *)record;
if (fake_edid_record->ucFakeEDIDLength) {
- struct edid *edid;
- int edid_size =
- max((int)EDID_LENGTH, (int)fake_edid_record->ucFakeEDIDLength);
- edid = kmalloc(edid_size, GFP_KERNEL);
- if (edid) {
- memcpy((u8 *)edid, (u8 *)&fake_edid_record->ucFakeEDIDString[0],
- fake_edid_record->ucFakeEDIDLength);
-
- if (drm_edid_is_valid(edid)) {
- rdev->mode_info.bios_hardcoded_edid = edid;
- rdev->mode_info.bios_hardcoded_edid_size = edid_size;
- } else
- kfree(edid);
- }
+ const struct drm_edid *edid;
+ int edid_size;
+
+ if (fake_edid_record->ucFakeEDIDLength == 128)
+ edid_size = fake_edid_record->ucFakeEDIDLength;
+ else
+ edid_size = fake_edid_record->ucFakeEDIDLength * 128;
+ edid = drm_edid_alloc(fake_edid_record->ucFakeEDIDString, edid_size);
+ if (drm_edid_valid(edid))
+ rdev->mode_info.bios_hardcoded_edid = edid;
+ else
+ drm_edid_free(edid);
+ record += struct_size(fake_edid_record,
+ ucFakeEDIDString,
+ edid_size);
+ } else {
+ /* empty fake edid record must be 3 bytes long */
+ record += sizeof(ATOM_FAKE_EDID_PATCH_RECORD) + 1;
}
- record += fake_edid_record->ucFakeEDIDLength ?
- struct_size(fake_edid_record,
- ucFakeEDIDString,
- fake_edid_record->ucFakeEDIDLength) :
- /* empty fake edid record must be 3 bytes long */
- sizeof(ATOM_FAKE_EDID_PATCH_RECORD) + 1;
break;
case LCD_PANEL_RESOLUTION_RECORD_TYPE:
panel_res_record = (ATOM_PANEL_RESOLUTION_PATCH_RECORD *)record;
diff --git a/drivers/gpu/drm/radeon/radeon_audio.c b/drivers/gpu/drm/radeon/radeon_audio.c
index 0bcd767b9f47..47aa06a9a942 100644
--- a/drivers/gpu/drm/radeon/radeon_audio.c
+++ b/drivers/gpu/drm/radeon/radeon_audio.c
@@ -196,7 +196,7 @@ static void radeon_audio_enable(struct radeon_device *rdev,
return;
if (rdev->mode_info.mode_config_initialized) {
- list_for_each_entry(encoder, &rdev->ddev->mode_config.encoder_list, head) {
+ list_for_each_entry(encoder, &rdev_to_drm(rdev)->mode_config.encoder_list, head) {
if (radeon_encoder_is_digital(encoder)) {
radeon_encoder = to_radeon_encoder(encoder);
dig = radeon_encoder->enc_priv;
@@ -760,7 +760,7 @@ static int radeon_audio_component_get_eld(struct device *kdev, int port,
if (!rdev->audio.enabled || !rdev->mode_info.mode_config_initialized)
return 0;
- list_for_each_entry(encoder, &rdev->ddev->mode_config.encoder_list, head) {
+ list_for_each_entry(encoder, &rdev_to_drm(rdev)->mode_config.encoder_list, head) {
if (!radeon_encoder_is_digital(encoder))
continue;
radeon_encoder = to_radeon_encoder(encoder);
diff --git a/drivers/gpu/drm/radeon/radeon_combios.c b/drivers/gpu/drm/radeon/radeon_combios.c
index 6952b1273b0f..df8d7f56b028 100644
--- a/drivers/gpu/drm/radeon/radeon_combios.c
+++ b/drivers/gpu/drm/radeon/radeon_combios.c
@@ -370,27 +370,22 @@ static uint16_t combios_get_table_offset(struct drm_device *dev,
bool radeon_combios_check_hardcoded_edid(struct radeon_device *rdev)
{
int edid_info, size;
- struct edid *edid;
+ const struct drm_edid *edid;
unsigned char *raw;
- edid_info = combios_get_table_offset(rdev->ddev, COMBIOS_HARDCODED_EDID_TABLE);
+ edid_info = combios_get_table_offset(rdev_to_drm(rdev), COMBIOS_HARDCODED_EDID_TABLE);
if (!edid_info)
return false;
raw = rdev->bios + edid_info;
size = EDID_LENGTH * (raw[0x7e] + 1);
- edid = kmalloc(size, GFP_KERNEL);
- if (edid == NULL)
- return false;
-
- memcpy((unsigned char *)edid, raw, size);
+ edid = drm_edid_alloc(raw, size);
- if (!drm_edid_is_valid(edid)) {
- kfree(edid);
+ if (!drm_edid_valid(edid)) {
+ drm_edid_free(edid);
return false;
}
rdev->mode_info.bios_hardcoded_edid = edid;
- rdev->mode_info.bios_hardcoded_edid_size = size;
return true;
}
@@ -398,18 +393,7 @@ bool radeon_combios_check_hardcoded_edid(struct radeon_device *rdev)
struct edid *
radeon_bios_get_hardcoded_edid(struct radeon_device *rdev)
{
- struct edid *edid;
-
- if (rdev->mode_info.bios_hardcoded_edid) {
- edid = kmalloc(rdev->mode_info.bios_hardcoded_edid_size, GFP_KERNEL);
- if (edid) {
- memcpy((unsigned char *)edid,
- (unsigned char *)rdev->mode_info.bios_hardcoded_edid,
- rdev->mode_info.bios_hardcoded_edid_size);
- return edid;
- }
- }
- return NULL;
+ return drm_edid_duplicate(drm_edid_raw(rdev->mode_info.bios_hardcoded_edid));
}
static struct radeon_i2c_bus_rec combios_setup_i2c_bus(struct radeon_device *rdev,
@@ -642,7 +626,7 @@ static struct radeon_i2c_bus_rec combios_setup_i2c_bus(struct radeon_device *rde
static struct radeon_i2c_bus_rec radeon_combios_get_i2c_info_from_table(struct radeon_device *rdev)
{
- struct drm_device *dev = rdev->ddev;
+ struct drm_device *dev = rdev_to_drm(rdev);
struct radeon_i2c_bus_rec i2c;
u16 offset;
u8 id, blocks, clk, data;
@@ -670,7 +654,7 @@ static struct radeon_i2c_bus_rec radeon_combios_get_i2c_info_from_table(struct r
void radeon_combios_i2c_init(struct radeon_device *rdev)
{
- struct drm_device *dev = rdev->ddev;
+ struct drm_device *dev = rdev_to_drm(rdev);
struct radeon_i2c_bus_rec i2c;
/* actual hw pads
@@ -812,7 +796,7 @@ bool radeon_combios_get_clock_info(struct drm_device *dev)
bool radeon_combios_sideport_present(struct radeon_device *rdev)
{
- struct drm_device *dev = rdev->ddev;
+ struct drm_device *dev = rdev_to_drm(rdev);
u16 igp_info;
/* sideport is AMD only */
@@ -915,7 +899,7 @@ struct radeon_encoder_primary_dac *radeon_combios_get_primary_dac_info(struct
enum radeon_tv_std
radeon_combios_get_tv_info(struct radeon_device *rdev)
{
- struct drm_device *dev = rdev->ddev;
+ struct drm_device *dev = rdev_to_drm(rdev);
uint16_t tv_info;
enum radeon_tv_std tv_std = TV_STD_NTSC;
@@ -2637,7 +2621,7 @@ static const char *thermal_controller_names[] = {
void radeon_combios_get_power_modes(struct radeon_device *rdev)
{
- struct drm_device *dev = rdev->ddev;
+ struct drm_device *dev = rdev_to_drm(rdev);
u16 offset, misc, misc2 = 0;
u8 rev, tmp;
int state_index = 0;
diff --git a/drivers/gpu/drm/radeon/radeon_connectors.c b/drivers/gpu/drm/radeon/radeon_connectors.c
index 69693ba5949e..528a8f3677c2 100644
--- a/drivers/gpu/drm/radeon/radeon_connectors.c
+++ b/drivers/gpu/drm/radeon/radeon_connectors.c
@@ -505,6 +505,9 @@ static void radeon_add_common_modes(struct drm_encoder *encoder, struct drm_conn
continue;
mode = drm_cvt_mode(dev, common_modes[i].w, common_modes[i].h, 60, false, false, false);
+ if (!mode)
+ continue;
+
drm_mode_probed_add(connector, mode);
}
}
@@ -1056,7 +1059,7 @@ radeon_vga_detect(struct drm_connector *connector, bool force)
*/
if ((!rdev->is_atom_bios) &&
(ret == connector_status_disconnected) &&
- rdev->mode_info.bios_hardcoded_edid_size) {
+ rdev->mode_info.bios_hardcoded_edid) {
ret = connector_status_connected;
}
@@ -1389,7 +1392,7 @@ radeon_dvi_detect(struct drm_connector *connector, bool force)
out:
if ((!rdev->is_atom_bios) &&
(ret == connector_status_disconnected) &&
- rdev->mode_info.bios_hardcoded_edid_size) {
+ rdev->mode_info.bios_hardcoded_edid) {
radeon_connector->use_digital = true;
ret = connector_status_connected;
}
diff --git a/drivers/gpu/drm/radeon/radeon_device.c b/drivers/gpu/drm/radeon/radeon_device.c
index afbb3a80c0c6..554b236c2328 100644
--- a/drivers/gpu/drm/radeon/radeon_device.c
+++ b/drivers/gpu/drm/radeon/radeon_device.c
@@ -760,7 +760,7 @@ bool radeon_boot_test_post_card(struct radeon_device *rdev)
if (rdev->is_atom_bios)
atom_asic_init(rdev->mode_info.atom_context);
else
- radeon_combios_asic_init(rdev->ddev);
+ radeon_combios_asic_init(rdev_to_drm(rdev));
return true;
} else {
dev_err(rdev->dev, "Card not posted and no BIOS - ignoring\n");
@@ -980,7 +980,7 @@ int radeon_atombios_init(struct radeon_device *rdev)
return -ENOMEM;
rdev->mode_info.atom_card_info = atom_card_info;
- atom_card_info->dev = rdev->ddev;
+ atom_card_info->dev = rdev_to_drm(rdev);
atom_card_info->reg_read = cail_reg_read;
atom_card_info->reg_write = cail_reg_write;
/* needed for iio ops */
@@ -1005,7 +1005,7 @@ int radeon_atombios_init(struct radeon_device *rdev)
mutex_init(&rdev->mode_info.atom_context->mutex);
mutex_init(&rdev->mode_info.atom_context->scratch_mutex);
- radeon_atom_initialize_bios_scratch_regs(rdev->ddev);
+ radeon_atom_initialize_bios_scratch_regs(rdev_to_drm(rdev));
atom_allocate_fb_scratch(rdev->mode_info.atom_context);
return 0;
}
@@ -1049,7 +1049,7 @@ void radeon_atombios_fini(struct radeon_device *rdev)
*/
int radeon_combios_init(struct radeon_device *rdev)
{
- radeon_combios_initialize_bios_scratch_regs(rdev->ddev);
+ radeon_combios_initialize_bios_scratch_regs(rdev_to_drm(rdev));
return 0;
}
@@ -1285,9 +1285,6 @@ int radeon_device_init(struct radeon_device *rdev,
bool runtime = false;
rdev->shutdown = false;
- rdev->dev = &pdev->dev;
- rdev->ddev = ddev;
- rdev->pdev = pdev;
rdev->flags = flags;
rdev->family = flags & RADEON_FAMILY_MASK;
rdev->is_atom_bios = false;
@@ -1847,7 +1844,7 @@ int radeon_gpu_reset(struct radeon_device *rdev)
downgrade_write(&rdev->exclusive_lock);
- drm_helper_resume_force_mode(rdev->ddev);
+ drm_helper_resume_force_mode(rdev_to_drm(rdev));
/* set the power state here in case we are a PX system or headless */
if ((rdev->pm.pm_method == PM_METHOD_DPM) && rdev->pm.dpm_enabled)
diff --git a/drivers/gpu/drm/radeon/radeon_display.c b/drivers/gpu/drm/radeon/radeon_display.c
index 843383f7237f..8f5f8abcb1b4 100644
--- a/drivers/gpu/drm/radeon/radeon_display.c
+++ b/drivers/gpu/drm/radeon/radeon_display.c
@@ -302,13 +302,13 @@ void radeon_crtc_handle_vblank(struct radeon_device *rdev, int crtc_id)
if ((radeon_use_pflipirq == 2) && ASIC_IS_DCE4(rdev))
return;
- spin_lock_irqsave(&rdev->ddev->event_lock, flags);
+ spin_lock_irqsave(&rdev_to_drm(rdev)->event_lock, flags);
if (radeon_crtc->flip_status != RADEON_FLIP_SUBMITTED) {
DRM_DEBUG_DRIVER("radeon_crtc->flip_status = %d != "
"RADEON_FLIP_SUBMITTED(%d)\n",
radeon_crtc->flip_status,
RADEON_FLIP_SUBMITTED);
- spin_unlock_irqrestore(&rdev->ddev->event_lock, flags);
+ spin_unlock_irqrestore(&rdev_to_drm(rdev)->event_lock, flags);
return;
}
@@ -334,7 +334,7 @@ void radeon_crtc_handle_vblank(struct radeon_device *rdev, int crtc_id)
*/
if (update_pending &&
(DRM_SCANOUTPOS_VALID &
- radeon_get_crtc_scanoutpos(rdev->ddev, crtc_id,
+ radeon_get_crtc_scanoutpos(rdev_to_drm(rdev), crtc_id,
GET_DISTANCE_TO_VBLANKSTART,
&vpos, &hpos, NULL, NULL,
&rdev->mode_info.crtcs[crtc_id]->base.hwmode)) &&
@@ -347,7 +347,7 @@ void radeon_crtc_handle_vblank(struct radeon_device *rdev, int crtc_id)
*/
update_pending = 0;
}
- spin_unlock_irqrestore(&rdev->ddev->event_lock, flags);
+ spin_unlock_irqrestore(&rdev_to_drm(rdev)->event_lock, flags);
if (!update_pending)
radeon_crtc_handle_flip(rdev, crtc_id);
}
@@ -370,14 +370,14 @@ void radeon_crtc_handle_flip(struct radeon_device *rdev, int crtc_id)
if (radeon_crtc == NULL)
return;
- spin_lock_irqsave(&rdev->ddev->event_lock, flags);
+ spin_lock_irqsave(&rdev_to_drm(rdev)->event_lock, flags);
work = radeon_crtc->flip_work;
if (radeon_crtc->flip_status != RADEON_FLIP_SUBMITTED) {
DRM_DEBUG_DRIVER("radeon_crtc->flip_status = %d != "
"RADEON_FLIP_SUBMITTED(%d)\n",
radeon_crtc->flip_status,
RADEON_FLIP_SUBMITTED);
- spin_unlock_irqrestore(&rdev->ddev->event_lock, flags);
+ spin_unlock_irqrestore(&rdev_to_drm(rdev)->event_lock, flags);
return;
}
@@ -389,7 +389,7 @@ void radeon_crtc_handle_flip(struct radeon_device *rdev, int crtc_id)
if (work->event)
drm_crtc_send_vblank_event(&radeon_crtc->base, work->event);
- spin_unlock_irqrestore(&rdev->ddev->event_lock, flags);
+ spin_unlock_irqrestore(&rdev_to_drm(rdev)->event_lock, flags);
drm_crtc_vblank_put(&radeon_crtc->base);
radeon_irq_kms_pflip_irq_put(rdev, work->crtc_id);
@@ -408,7 +408,7 @@ static void radeon_flip_work_func(struct work_struct *__work)
struct radeon_flip_work *work =
container_of(__work, struct radeon_flip_work, flip_work);
struct radeon_device *rdev = work->rdev;
- struct drm_device *dev = rdev->ddev;
+ struct drm_device *dev = rdev_to_drm(rdev);
struct radeon_crtc *radeon_crtc = rdev->mode_info.crtcs[work->crtc_id];
struct drm_crtc *crtc = &radeon_crtc->base;
@@ -1401,7 +1401,7 @@ static int radeon_modeset_create_props(struct radeon_device *rdev)
if (rdev->is_atom_bios) {
rdev->mode_info.coherent_mode_property =
- drm_property_create_range(rdev->ddev, 0 , "coherent", 0, 1);
+ drm_property_create_range(rdev_to_drm(rdev), 0, "coherent", 0, 1);
if (!rdev->mode_info.coherent_mode_property)
return -ENOMEM;
}
@@ -1409,57 +1409,57 @@ static int radeon_modeset_create_props(struct radeon_device *rdev)
if (!ASIC_IS_AVIVO(rdev)) {
sz = ARRAY_SIZE(radeon_tmds_pll_enum_list);
rdev->mode_info.tmds_pll_property =
- drm_property_create_enum(rdev->ddev, 0,
+ drm_property_create_enum(rdev_to_drm(rdev), 0,
"tmds_pll",
radeon_tmds_pll_enum_list, sz);
}
rdev->mode_info.load_detect_property =
- drm_property_create_range(rdev->ddev, 0, "load detection", 0, 1);
+ drm_property_create_range(rdev_to_drm(rdev), 0, "load detection", 0, 1);
if (!rdev->mode_info.load_detect_property)
return -ENOMEM;
- drm_mode_create_scaling_mode_property(rdev->ddev);
+ drm_mode_create_scaling_mode_property(rdev_to_drm(rdev));
sz = ARRAY_SIZE(radeon_tv_std_enum_list);
rdev->mode_info.tv_std_property =
- drm_property_create_enum(rdev->ddev, 0,
+ drm_property_create_enum(rdev_to_drm(rdev), 0,
"tv standard",
radeon_tv_std_enum_list, sz);
sz = ARRAY_SIZE(radeon_underscan_enum_list);
rdev->mode_info.underscan_property =
- drm_property_create_enum(rdev->ddev, 0,
+ drm_property_create_enum(rdev_to_drm(rdev), 0,
"underscan",
radeon_underscan_enum_list, sz);
rdev->mode_info.underscan_hborder_property =
- drm_property_create_range(rdev->ddev, 0,
+ drm_property_create_range(rdev_to_drm(rdev), 0,
"underscan hborder", 0, 128);
if (!rdev->mode_info.underscan_hborder_property)
return -ENOMEM;
rdev->mode_info.underscan_vborder_property =
- drm_property_create_range(rdev->ddev, 0,
+ drm_property_create_range(rdev_to_drm(rdev), 0,
"underscan vborder", 0, 128);
if (!rdev->mode_info.underscan_vborder_property)
return -ENOMEM;
sz = ARRAY_SIZE(radeon_audio_enum_list);
rdev->mode_info.audio_property =
- drm_property_create_enum(rdev->ddev, 0,
+ drm_property_create_enum(rdev_to_drm(rdev), 0,
"audio",
radeon_audio_enum_list, sz);
sz = ARRAY_SIZE(radeon_dither_enum_list);
rdev->mode_info.dither_property =
- drm_property_create_enum(rdev->ddev, 0,
+ drm_property_create_enum(rdev_to_drm(rdev), 0,
"dither",
radeon_dither_enum_list, sz);
sz = ARRAY_SIZE(radeon_output_csc_enum_list);
rdev->mode_info.output_csc_property =
- drm_property_create_enum(rdev->ddev, 0,
+ drm_property_create_enum(rdev_to_drm(rdev), 0,
"output_csc",
radeon_output_csc_enum_list, sz);
@@ -1578,29 +1578,29 @@ int radeon_modeset_init(struct radeon_device *rdev)
int i;
int ret;
- drm_mode_config_init(rdev->ddev);
+ drm_mode_config_init(rdev_to_drm(rdev));
rdev->mode_info.mode_config_initialized = true;
- rdev->ddev->mode_config.funcs = &radeon_mode_funcs;
+ rdev_to_drm(rdev)->mode_config.funcs = &radeon_mode_funcs;
if (radeon_use_pflipirq == 2 && rdev->family >= CHIP_R600)
- rdev->ddev->mode_config.async_page_flip = true;
+ rdev_to_drm(rdev)->mode_config.async_page_flip = true;
if (ASIC_IS_DCE5(rdev)) {
- rdev->ddev->mode_config.max_width = 16384;
- rdev->ddev->mode_config.max_height = 16384;
+ rdev_to_drm(rdev)->mode_config.max_width = 16384;
+ rdev_to_drm(rdev)->mode_config.max_height = 16384;
} else if (ASIC_IS_AVIVO(rdev)) {
- rdev->ddev->mode_config.max_width = 8192;
- rdev->ddev->mode_config.max_height = 8192;
+ rdev_to_drm(rdev)->mode_config.max_width = 8192;
+ rdev_to_drm(rdev)->mode_config.max_height = 8192;
} else {
- rdev->ddev->mode_config.max_width = 4096;
- rdev->ddev->mode_config.max_height = 4096;
+ rdev_to_drm(rdev)->mode_config.max_width = 4096;
+ rdev_to_drm(rdev)->mode_config.max_height = 4096;
}
- rdev->ddev->mode_config.preferred_depth = 24;
- rdev->ddev->mode_config.prefer_shadow = 1;
+ rdev_to_drm(rdev)->mode_config.preferred_depth = 24;
+ rdev_to_drm(rdev)->mode_config.prefer_shadow = 1;
- rdev->ddev->mode_config.fb_modifiers_not_supported = true;
+ rdev_to_drm(rdev)->mode_config.fb_modifiers_not_supported = true;
ret = radeon_modeset_create_props(rdev);
if (ret) {
@@ -1618,11 +1618,11 @@ int radeon_modeset_init(struct radeon_device *rdev)
/* allocate crtcs */
for (i = 0; i < rdev->num_crtc; i++) {
- radeon_crtc_init(rdev->ddev, i);
+ radeon_crtc_init(rdev_to_drm(rdev), i);
}
/* okay we should have all the bios connectors */
- ret = radeon_setup_enc_conn(rdev->ddev);
+ ret = radeon_setup_enc_conn(rdev_to_drm(rdev));
if (!ret) {
return ret;
}
@@ -1639,7 +1639,7 @@ int radeon_modeset_init(struct radeon_device *rdev)
/* setup afmt */
radeon_afmt_init(rdev);
- drm_kms_helper_poll_init(rdev->ddev);
+ drm_kms_helper_poll_init(rdev_to_drm(rdev));
/* do pm late init */
ret = radeon_pm_late_init(rdev);
@@ -1650,15 +1650,15 @@ int radeon_modeset_init(struct radeon_device *rdev)
void radeon_modeset_fini(struct radeon_device *rdev)
{
if (rdev->mode_info.mode_config_initialized) {
- drm_kms_helper_poll_fini(rdev->ddev);
+ drm_kms_helper_poll_fini(rdev_to_drm(rdev));
radeon_hpd_fini(rdev);
- drm_helper_force_disable_all(rdev->ddev);
+ drm_helper_force_disable_all(rdev_to_drm(rdev));
radeon_afmt_fini(rdev);
- drm_mode_config_cleanup(rdev->ddev);
+ drm_mode_config_cleanup(rdev_to_drm(rdev));
rdev->mode_info.mode_config_initialized = false;
}
- kfree(rdev->mode_info.bios_hardcoded_edid);
+ drm_edid_free(rdev->mode_info.bios_hardcoded_edid);
/* free i2c buses */
radeon_i2c_fini(rdev);
diff --git a/drivers/gpu/drm/radeon/radeon_drv.c b/drivers/gpu/drm/radeon/radeon_drv.c
index ac49779ed03d..e5a6f3e7c75b 100644
--- a/drivers/gpu/drm/radeon/radeon_drv.c
+++ b/drivers/gpu/drm/radeon/radeon_drv.c
@@ -259,7 +259,8 @@ static int radeon_pci_probe(struct pci_dev *pdev,
const struct pci_device_id *ent)
{
unsigned long flags = 0;
- struct drm_device *dev;
+ struct drm_device *ddev;
+ struct radeon_device *rdev;
int ret;
if (!ent)
@@ -300,28 +301,37 @@ static int radeon_pci_probe(struct pci_dev *pdev,
if (ret)
return ret;
- dev = drm_dev_alloc(&kms_driver, &pdev->dev);
- if (IS_ERR(dev))
- return PTR_ERR(dev);
+ rdev = devm_drm_dev_alloc(&pdev->dev, &kms_driver, typeof(*rdev), ddev);
+ if (IS_ERR(rdev))
+ return PTR_ERR(rdev);
+
+ rdev->dev = &pdev->dev;
+ rdev->pdev = pdev;
+ ddev = rdev_to_drm(rdev);
+ ddev->dev_private = rdev;
ret = pci_enable_device(pdev);
if (ret)
goto err_free;
- pci_set_drvdata(pdev, dev);
+ pci_set_drvdata(pdev, ddev);
+
+ ret = radeon_driver_load_kms(ddev, flags);
+ if (ret)
+ goto err_agp;
- ret = drm_dev_register(dev, ent->driver_data);
+ ret = drm_dev_register(ddev, flags);
if (ret)
goto err_agp;
- radeon_fbdev_setup(dev->dev_private);
+ radeon_fbdev_setup(ddev->dev_private);
return 0;
err_agp:
pci_disable_device(pdev);
err_free:
- drm_dev_put(dev);
+ drm_dev_put(ddev);
return ret;
}
@@ -570,7 +580,6 @@ static const struct drm_ioctl_desc radeon_ioctls_kms[] = {
static const struct drm_driver kms_driver = {
.driver_features =
DRIVER_GEM | DRIVER_RENDER | DRIVER_MODESET,
- .load = radeon_driver_load_kms,
.open = radeon_driver_open_kms,
.postclose = radeon_driver_postclose_kms,
.unload = radeon_driver_unload_kms,
diff --git a/drivers/gpu/drm/radeon/radeon_fbdev.c b/drivers/gpu/drm/radeon/radeon_fbdev.c
index 02bf25759059..fb70de29545c 100644
--- a/drivers/gpu/drm/radeon/radeon_fbdev.c
+++ b/drivers/gpu/drm/radeon/radeon_fbdev.c
@@ -67,7 +67,7 @@ static int radeon_fbdev_create_pinned_object(struct drm_fb_helper *fb_helper,
int height = mode_cmd->height;
u32 cpp;
- info = drm_get_format_info(rdev->ddev, mode_cmd);
+ info = drm_get_format_info(rdev_to_drm(rdev), mode_cmd);
cpp = info->cpp[0];
/* need to align pitch with crtc limits */
@@ -148,15 +148,15 @@ static int radeon_fbdev_fb_open(struct fb_info *info, int user)
struct radeon_device *rdev = fb_helper->dev->dev_private;
int ret;
- ret = pm_runtime_get_sync(rdev->ddev->dev);
+ ret = pm_runtime_get_sync(rdev_to_drm(rdev)->dev);
if (ret < 0 && ret != -EACCES)
goto err_pm_runtime_mark_last_busy;
return 0;
err_pm_runtime_mark_last_busy:
- pm_runtime_mark_last_busy(rdev->ddev->dev);
- pm_runtime_put_autosuspend(rdev->ddev->dev);
+ pm_runtime_mark_last_busy(rdev_to_drm(rdev)->dev);
+ pm_runtime_put_autosuspend(rdev_to_drm(rdev)->dev);
return ret;
}
@@ -165,8 +165,8 @@ static int radeon_fbdev_fb_release(struct fb_info *info, int user)
struct drm_fb_helper *fb_helper = info->par;
struct radeon_device *rdev = fb_helper->dev->dev_private;
- pm_runtime_mark_last_busy(rdev->ddev->dev);
- pm_runtime_put_autosuspend(rdev->ddev->dev);
+ pm_runtime_mark_last_busy(rdev_to_drm(rdev)->dev);
+ pm_runtime_put_autosuspend(rdev_to_drm(rdev)->dev);
return 0;
}
@@ -236,7 +236,7 @@ static int radeon_fbdev_fb_helper_fb_probe(struct drm_fb_helper *fb_helper,
ret = -ENOMEM;
goto err_radeon_fbdev_destroy_pinned_object;
}
- ret = radeon_framebuffer_init(rdev->ddev, fb, &mode_cmd, gobj);
+ ret = radeon_framebuffer_init(rdev_to_drm(rdev), fb, &mode_cmd, gobj);
if (ret) {
DRM_ERROR("failed to initialize framebuffer %d\n", ret);
goto err_kfree;
@@ -374,12 +374,12 @@ void radeon_fbdev_setup(struct radeon_device *rdev)
fb_helper = kzalloc(sizeof(*fb_helper), GFP_KERNEL);
if (!fb_helper)
return;
- drm_fb_helper_prepare(rdev->ddev, fb_helper, bpp_sel, &radeon_fbdev_fb_helper_funcs);
+ drm_fb_helper_prepare(rdev_to_drm(rdev), fb_helper, bpp_sel, &radeon_fbdev_fb_helper_funcs);
- ret = drm_client_init(rdev->ddev, &fb_helper->client, "radeon-fbdev",
+ ret = drm_client_init(rdev_to_drm(rdev), &fb_helper->client, "radeon-fbdev",
&radeon_fbdev_client_funcs);
if (ret) {
- drm_err(rdev->ddev, "Failed to register client: %d\n", ret);
+ drm_err(rdev_to_drm(rdev), "Failed to register client: %d\n", ret);
goto err_drm_client_init;
}
@@ -394,13 +394,13 @@ err_drm_client_init:
void radeon_fbdev_set_suspend(struct radeon_device *rdev, int state)
{
- if (rdev->ddev->fb_helper)
- drm_fb_helper_set_suspend(rdev->ddev->fb_helper, state);
+ if (rdev_to_drm(rdev)->fb_helper)
+ drm_fb_helper_set_suspend(rdev_to_drm(rdev)->fb_helper, state);
}
bool radeon_fbdev_robj_is_fb(struct radeon_device *rdev, struct radeon_bo *robj)
{
- struct drm_fb_helper *fb_helper = rdev->ddev->fb_helper;
+ struct drm_fb_helper *fb_helper = rdev_to_drm(rdev)->fb_helper;
struct drm_gem_object *gobj;
if (!fb_helper)
diff --git a/drivers/gpu/drm/radeon/radeon_fence.c b/drivers/gpu/drm/radeon/radeon_fence.c
index 4fb780d96f32..daff61586be5 100644
--- a/drivers/gpu/drm/radeon/radeon_fence.c
+++ b/drivers/gpu/drm/radeon/radeon_fence.c
@@ -150,7 +150,7 @@ int radeon_fence_emit(struct radeon_device *rdev,
rdev->fence_context + ring,
seq);
radeon_fence_ring_emit(rdev, ring, *fence);
- trace_radeon_fence_emit(rdev->ddev, ring, (*fence)->seq);
+ trace_radeon_fence_emit(rdev_to_drm(rdev), ring, (*fence)->seq);
radeon_fence_schedule_check(rdev, ring);
return 0;
}
@@ -489,7 +489,7 @@ static long radeon_fence_wait_seq_timeout(struct radeon_device *rdev,
if (!target_seq[i])
continue;
- trace_radeon_fence_wait_begin(rdev->ddev, i, target_seq[i]);
+ trace_radeon_fence_wait_begin(rdev_to_drm(rdev), i, target_seq[i]);
radeon_irq_kms_sw_irq_get(rdev, i);
}
@@ -511,7 +511,7 @@ static long radeon_fence_wait_seq_timeout(struct radeon_device *rdev,
continue;
radeon_irq_kms_sw_irq_put(rdev, i);
- trace_radeon_fence_wait_end(rdev->ddev, i, target_seq[i]);
+ trace_radeon_fence_wait_end(rdev_to_drm(rdev), i, target_seq[i]);
}
return r;
@@ -995,7 +995,7 @@ DEFINE_DEBUGFS_ATTRIBUTE(radeon_debugfs_gpu_reset_fops,
void radeon_debugfs_fence_init(struct radeon_device *rdev)
{
#if defined(CONFIG_DEBUG_FS)
- struct dentry *root = rdev->ddev->primary->debugfs_root;
+ struct dentry *root = rdev_to_drm(rdev)->primary->debugfs_root;
debugfs_create_file("radeon_gpu_reset", 0444, root, rdev,
&radeon_debugfs_gpu_reset_fops);
diff --git a/drivers/gpu/drm/radeon/radeon_gem.c b/drivers/gpu/drm/radeon/radeon_gem.c
index e66a230331ee..9735f4968b86 100644
--- a/drivers/gpu/drm/radeon/radeon_gem.c
+++ b/drivers/gpu/drm/radeon/radeon_gem.c
@@ -88,7 +88,7 @@ static void radeon_gem_object_free(struct drm_gem_object *gobj)
if (robj) {
radeon_mn_unregister(robj);
- radeon_bo_unref(&robj);
+ ttm_bo_put(&robj->tbo);
}
}
@@ -899,7 +899,7 @@ DEFINE_SHOW_ATTRIBUTE(radeon_debugfs_gem_info);
void radeon_gem_debugfs_init(struct radeon_device *rdev)
{
#if defined(CONFIG_DEBUG_FS)
- struct dentry *root = rdev->ddev->primary->debugfs_root;
+ struct dentry *root = rdev_to_drm(rdev)->primary->debugfs_root;
debugfs_create_file("radeon_gem_info", 0444, root, rdev,
&radeon_debugfs_gem_info_fops);
diff --git a/drivers/gpu/drm/radeon/radeon_i2c.c b/drivers/gpu/drm/radeon/radeon_i2c.c
index 3d174390a8af..1f16619ed06e 100644
--- a/drivers/gpu/drm/radeon/radeon_i2c.c
+++ b/drivers/gpu/drm/radeon/radeon_i2c.c
@@ -1011,7 +1011,7 @@ void radeon_i2c_add(struct radeon_device *rdev,
struct radeon_i2c_bus_rec *rec,
const char *name)
{
- struct drm_device *dev = rdev->ddev;
+ struct drm_device *dev = rdev_to_drm(rdev);
int i;
for (i = 0; i < RADEON_MAX_I2C_BUS; i++) {
diff --git a/drivers/gpu/drm/radeon/radeon_ib.c b/drivers/gpu/drm/radeon/radeon_ib.c
index 63d914f3414d..1aa41cc3f991 100644
--- a/drivers/gpu/drm/radeon/radeon_ib.c
+++ b/drivers/gpu/drm/radeon/radeon_ib.c
@@ -309,7 +309,7 @@ DEFINE_SHOW_ATTRIBUTE(radeon_debugfs_sa_info);
static void radeon_debugfs_sa_init(struct radeon_device *rdev)
{
#if defined(CONFIG_DEBUG_FS)
- struct dentry *root = rdev->ddev->primary->debugfs_root;
+ struct dentry *root = rdev_to_drm(rdev)->primary->debugfs_root;
debugfs_create_file("radeon_sa_info", 0444, root, rdev,
&radeon_debugfs_sa_info_fops);
diff --git a/drivers/gpu/drm/radeon/radeon_irq_kms.c b/drivers/gpu/drm/radeon/radeon_irq_kms.c
index c4dda908666c..9961251b44ba 100644
--- a/drivers/gpu/drm/radeon/radeon_irq_kms.c
+++ b/drivers/gpu/drm/radeon/radeon_irq_kms.c
@@ -80,7 +80,7 @@ static void radeon_hotplug_work_func(struct work_struct *work)
{
struct radeon_device *rdev = container_of(work, struct radeon_device,
hotplug_work.work);
- struct drm_device *dev = rdev->ddev;
+ struct drm_device *dev = rdev_to_drm(rdev);
struct drm_mode_config *mode_config = &dev->mode_config;
struct drm_connector *connector;
@@ -101,7 +101,7 @@ static void radeon_dp_work_func(struct work_struct *work)
{
struct radeon_device *rdev = container_of(work, struct radeon_device,
dp_work);
- struct drm_device *dev = rdev->ddev;
+ struct drm_device *dev = rdev_to_drm(rdev);
struct drm_mode_config *mode_config = &dev->mode_config;
struct drm_connector *connector;
@@ -197,7 +197,7 @@ static void radeon_driver_irq_uninstall_kms(struct drm_device *dev)
static int radeon_irq_install(struct radeon_device *rdev, int irq)
{
- struct drm_device *dev = rdev->ddev;
+ struct drm_device *dev = rdev_to_drm(rdev);
int ret;
if (irq == IRQ_NOTCONNECTED)
@@ -218,7 +218,7 @@ static int radeon_irq_install(struct radeon_device *rdev, int irq)
static void radeon_irq_uninstall(struct radeon_device *rdev)
{
- struct drm_device *dev = rdev->ddev;
+ struct drm_device *dev = rdev_to_drm(rdev);
struct pci_dev *pdev = to_pci_dev(dev->dev);
radeon_driver_irq_uninstall_kms(dev);
@@ -322,9 +322,9 @@ int radeon_irq_kms_init(struct radeon_device *rdev)
spin_lock_init(&rdev->irq.lock);
/* Disable vblank irqs aggressively for power-saving */
- rdev->ddev->vblank_disable_immediate = true;
+ rdev_to_drm(rdev)->vblank_disable_immediate = true;
- r = drm_vblank_init(rdev->ddev, rdev->num_crtc);
+ r = drm_vblank_init(rdev_to_drm(rdev), rdev->num_crtc);
if (r) {
return r;
}
diff --git a/drivers/gpu/drm/radeon/radeon_kms.c b/drivers/gpu/drm/radeon/radeon_kms.c
index a16590c6247f..645e33bf7947 100644
--- a/drivers/gpu/drm/radeon/radeon_kms.c
+++ b/drivers/gpu/drm/radeon/radeon_kms.c
@@ -104,15 +104,9 @@ done_free:
int radeon_driver_load_kms(struct drm_device *dev, unsigned long flags)
{
struct pci_dev *pdev = to_pci_dev(dev->dev);
- struct radeon_device *rdev;
+ struct radeon_device *rdev = dev->dev_private;
int r, acpi_status;
- rdev = kzalloc(sizeof(struct radeon_device), GFP_KERNEL);
- if (rdev == NULL) {
- return -ENOMEM;
- }
- dev->dev_private = (void *)rdev;
-
#ifdef __alpha__
rdev->hose = pdev->sysdata;
#endif
diff --git a/drivers/gpu/drm/radeon/radeon_legacy_encoders.c b/drivers/gpu/drm/radeon/radeon_legacy_encoders.c
index c4350ac2b3d2..d6aa1a3012a8 100644
--- a/drivers/gpu/drm/radeon/radeon_legacy_encoders.c
+++ b/drivers/gpu/drm/radeon/radeon_legacy_encoders.c
@@ -450,7 +450,7 @@ void radeon_legacy_backlight_init(struct radeon_encoder *radeon_encoder,
}
bd->props.brightness = radeon_legacy_backlight_get_brightness(bd);
- bd->props.power = FB_BLANK_UNBLANK;
+ bd->props.power = BACKLIGHT_POWER_ON;
backlight_update_status(bd);
DRM_INFO("radeon legacy LVDS backlight initialized\n");
diff --git a/drivers/gpu/drm/radeon/radeon_mode.h b/drivers/gpu/drm/radeon/radeon_mode.h
index e0a5af180801..421c83fc70dc 100644
--- a/drivers/gpu/drm/radeon/radeon_mode.h
+++ b/drivers/gpu/drm/radeon/radeon_mode.h
@@ -39,6 +39,7 @@
#include <linux/i2c-algo-bit.h>
struct edid;
+struct drm_edid;
struct radeon_bo;
struct radeon_device;
@@ -262,8 +263,7 @@ struct radeon_mode_info {
/* Output CSC */
struct drm_property *output_csc_property;
/* hardcoded DFP edid from BIOS */
- struct edid *bios_hardcoded_edid;
- int bios_hardcoded_edid_size;
+ const struct drm_edid *bios_hardcoded_edid;
/* firmware flags */
u16 firmware_flags;
diff --git a/drivers/gpu/drm/radeon/radeon_object.c b/drivers/gpu/drm/radeon/radeon_object.c
index a955f8a2f7fe..d0e4b43d155c 100644
--- a/drivers/gpu/drm/radeon/radeon_object.c
+++ b/drivers/gpu/drm/radeon/radeon_object.c
@@ -150,7 +150,7 @@ int radeon_bo_create(struct radeon_device *rdev,
bo = kzalloc(sizeof(struct radeon_bo), GFP_KERNEL);
if (bo == NULL)
return -ENOMEM;
- drm_gem_private_object_init(rdev->ddev, &bo->tbo.base, size);
+ drm_gem_private_object_init(rdev_to_drm(rdev), &bo->tbo.base, size);
bo->rdev = rdev;
bo->surface_reg = -1;
INIT_LIST_HEAD(&bo->list);
@@ -256,18 +256,15 @@ struct radeon_bo *radeon_bo_ref(struct radeon_bo *bo)
if (bo == NULL)
return NULL;
- ttm_bo_get(&bo->tbo);
+ drm_gem_object_get(&bo->tbo.base);
return bo;
}
void radeon_bo_unref(struct radeon_bo **bo)
{
- struct ttm_buffer_object *tbo;
-
if ((*bo) == NULL)
return;
- tbo = &((*bo)->tbo);
- ttm_bo_put(tbo);
+ drm_gem_object_put(&(*bo)->tbo.base);
*bo = NULL;
}
diff --git a/drivers/gpu/drm/radeon/radeon_pm.c b/drivers/gpu/drm/radeon/radeon_pm.c
index 2d9d9f46f243..b4fb7e70320b 100644
--- a/drivers/gpu/drm/radeon/radeon_pm.c
+++ b/drivers/gpu/drm/radeon/radeon_pm.c
@@ -282,7 +282,7 @@ static void radeon_pm_set_clocks(struct radeon_device *rdev)
if (rdev->irq.installed) {
i = 0;
- drm_for_each_crtc(crtc, rdev->ddev) {
+ drm_for_each_crtc(crtc, rdev_to_drm(rdev)) {
if (rdev->pm.active_crtcs & (1 << i)) {
/* This can fail if a modeset is in progress */
if (drm_crtc_vblank_get(crtc) == 0)
@@ -299,7 +299,7 @@ static void radeon_pm_set_clocks(struct radeon_device *rdev)
if (rdev->irq.installed) {
i = 0;
- drm_for_each_crtc(crtc, rdev->ddev) {
+ drm_for_each_crtc(crtc, rdev_to_drm(rdev)) {
if (rdev->pm.req_vblank & (1 << i)) {
rdev->pm.req_vblank &= ~(1 << i);
drm_crtc_vblank_put(crtc);
@@ -671,7 +671,7 @@ static ssize_t radeon_hwmon_show_temp(struct device *dev,
char *buf)
{
struct radeon_device *rdev = dev_get_drvdata(dev);
- struct drm_device *ddev = rdev->ddev;
+ struct drm_device *ddev = rdev_to_drm(rdev);
int temp;
/* Can't get temperature when the card is off */
@@ -715,7 +715,7 @@ static ssize_t radeon_hwmon_show_sclk(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct radeon_device *rdev = dev_get_drvdata(dev);
- struct drm_device *ddev = rdev->ddev;
+ struct drm_device *ddev = rdev_to_drm(rdev);
u32 sclk = 0;
/* Can't get clock frequency when the card is off */
@@ -740,7 +740,7 @@ static ssize_t radeon_hwmon_show_vddc(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct radeon_device *rdev = dev_get_drvdata(dev);
- struct drm_device *ddev = rdev->ddev;
+ struct drm_device *ddev = rdev_to_drm(rdev);
u16 vddc = 0;
/* Can't get vddc when the card is off */
@@ -1692,7 +1692,7 @@ void radeon_pm_fini(struct radeon_device *rdev)
static void radeon_pm_compute_clocks_old(struct radeon_device *rdev)
{
- struct drm_device *ddev = rdev->ddev;
+ struct drm_device *ddev = rdev_to_drm(rdev);
struct drm_crtc *crtc;
struct radeon_crtc *radeon_crtc;
@@ -1765,7 +1765,7 @@ static void radeon_pm_compute_clocks_old(struct radeon_device *rdev)
static void radeon_pm_compute_clocks_dpm(struct radeon_device *rdev)
{
- struct drm_device *ddev = rdev->ddev;
+ struct drm_device *ddev = rdev_to_drm(rdev);
struct drm_crtc *crtc;
struct radeon_crtc *radeon_crtc;
struct radeon_connector *radeon_connector;
@@ -1826,7 +1826,7 @@ static bool radeon_pm_in_vbl(struct radeon_device *rdev)
*/
for (crtc = 0; (crtc < rdev->num_crtc) && in_vbl; crtc++) {
if (rdev->pm.active_crtcs & (1 << crtc)) {
- vbl_status = radeon_get_crtc_scanoutpos(rdev->ddev,
+ vbl_status = radeon_get_crtc_scanoutpos(rdev_to_drm(rdev),
crtc,
USE_REAL_VBLANKSTART,
&vpos, &hpos, NULL, NULL,
@@ -1918,7 +1918,7 @@ static void radeon_dynpm_idle_work_handler(struct work_struct *work)
static int radeon_debugfs_pm_info_show(struct seq_file *m, void *unused)
{
struct radeon_device *rdev = m->private;
- struct drm_device *ddev = rdev->ddev;
+ struct drm_device *ddev = rdev_to_drm(rdev);
if ((rdev->flags & RADEON_IS_PX) &&
(ddev->switch_power_state != DRM_SWITCH_POWER_ON)) {
@@ -1955,7 +1955,7 @@ DEFINE_SHOW_ATTRIBUTE(radeon_debugfs_pm_info);
static void radeon_debugfs_pm_init(struct radeon_device *rdev)
{
#if defined(CONFIG_DEBUG_FS)
- struct dentry *root = rdev->ddev->primary->debugfs_root;
+ struct dentry *root = rdev_to_drm(rdev)->primary->debugfs_root;
debugfs_create_file("radeon_pm_info", 0444, root, rdev,
&radeon_debugfs_pm_info_fops);
diff --git a/drivers/gpu/drm/radeon/radeon_ring.c b/drivers/gpu/drm/radeon/radeon_ring.c
index 8d1d458286a8..581ae20c46e4 100644
--- a/drivers/gpu/drm/radeon/radeon_ring.c
+++ b/drivers/gpu/drm/radeon/radeon_ring.c
@@ -550,7 +550,7 @@ static void radeon_debugfs_ring_init(struct radeon_device *rdev, struct radeon_r
{
#if defined(CONFIG_DEBUG_FS)
const char *ring_name = radeon_debugfs_ring_idx_to_name(ring->idx);
- struct dentry *root = rdev->ddev->primary->debugfs_root;
+ struct dentry *root = rdev_to_drm(rdev)->primary->debugfs_root;
if (ring_name)
debugfs_create_file(ring_name, 0444, root, ring,
diff --git a/drivers/gpu/drm/radeon/radeon_ttm.c b/drivers/gpu/drm/radeon/radeon_ttm.c
index 5c65b6dfb99a..69d0c12fa419 100644
--- a/drivers/gpu/drm/radeon/radeon_ttm.c
+++ b/drivers/gpu/drm/radeon/radeon_ttm.c
@@ -682,8 +682,8 @@ int radeon_ttm_init(struct radeon_device *rdev)
/* No others user of address space so set it to 0 */
r = ttm_device_init(&rdev->mman.bdev, &radeon_bo_driver, rdev->dev,
- rdev->ddev->anon_inode->i_mapping,
- rdev->ddev->vma_offset_manager,
+ rdev_to_drm(rdev)->anon_inode->i_mapping,
+ rdev_to_drm(rdev)->vma_offset_manager,
rdev->need_swiotlb,
dma_addressing_limited(&rdev->pdev->dev));
if (r) {
@@ -890,7 +890,7 @@ static const struct file_operations radeon_ttm_gtt_fops = {
static void radeon_ttm_debugfs_init(struct radeon_device *rdev)
{
#if defined(CONFIG_DEBUG_FS)
- struct drm_minor *minor = rdev->ddev->primary;
+ struct drm_minor *minor = rdev_to_drm(rdev)->primary;
struct dentry *root = minor->debugfs_root;
debugfs_create_file("radeon_vram", 0444, root, rdev,
diff --git a/drivers/gpu/drm/radeon/rs400.c b/drivers/gpu/drm/radeon/rs400.c
index d4d1501e6576..d6c18fd740ec 100644
--- a/drivers/gpu/drm/radeon/rs400.c
+++ b/drivers/gpu/drm/radeon/rs400.c
@@ -379,7 +379,7 @@ DEFINE_SHOW_ATTRIBUTE(rs400_debugfs_gart_info);
static void rs400_debugfs_pcie_gart_info_init(struct radeon_device *rdev)
{
#if defined(CONFIG_DEBUG_FS)
- struct dentry *root = rdev->ddev->primary->debugfs_root;
+ struct dentry *root = rdev_to_drm(rdev)->primary->debugfs_root;
debugfs_create_file("rs400_gart_info", 0444, root, rdev,
&rs400_debugfs_gart_info_fops);
@@ -474,7 +474,7 @@ int rs400_resume(struct radeon_device *rdev)
RREG32(R_0007C0_CP_STAT));
}
/* post */
- radeon_combios_asic_init(rdev->ddev);
+ radeon_combios_asic_init(rdev_to_drm(rdev));
/* Resume clock after posting */
r300_clock_startup(rdev);
/* Initialize surface registers */
@@ -552,7 +552,7 @@ int rs400_init(struct radeon_device *rdev)
return -EINVAL;
/* Initialize clocks */
- radeon_get_clock_info(rdev->ddev);
+ radeon_get_clock_info(rdev_to_drm(rdev));
/* initialize memory controller */
rs400_mc_init(rdev);
/* Fence driver */
diff --git a/drivers/gpu/drm/radeon/rs600.c b/drivers/gpu/drm/radeon/rs600.c
index 5c162778899b..88c8e91ea651 100644
--- a/drivers/gpu/drm/radeon/rs600.c
+++ b/drivers/gpu/drm/radeon/rs600.c
@@ -321,7 +321,7 @@ void rs600_pm_misc(struct radeon_device *rdev)
void rs600_pm_prepare(struct radeon_device *rdev)
{
- struct drm_device *ddev = rdev->ddev;
+ struct drm_device *ddev = rdev_to_drm(rdev);
struct drm_crtc *crtc;
struct radeon_crtc *radeon_crtc;
u32 tmp;
@@ -339,7 +339,7 @@ void rs600_pm_prepare(struct radeon_device *rdev)
void rs600_pm_finish(struct radeon_device *rdev)
{
- struct drm_device *ddev = rdev->ddev;
+ struct drm_device *ddev = rdev_to_drm(rdev);
struct drm_crtc *crtc;
struct radeon_crtc *radeon_crtc;
u32 tmp;
@@ -408,7 +408,7 @@ void rs600_hpd_set_polarity(struct radeon_device *rdev,
void rs600_hpd_init(struct radeon_device *rdev)
{
- struct drm_device *dev = rdev->ddev;
+ struct drm_device *dev = rdev_to_drm(rdev);
struct drm_connector *connector;
unsigned enable = 0;
@@ -435,7 +435,7 @@ void rs600_hpd_init(struct radeon_device *rdev)
void rs600_hpd_fini(struct radeon_device *rdev)
{
- struct drm_device *dev = rdev->ddev;
+ struct drm_device *dev = rdev_to_drm(rdev);
struct drm_connector *connector;
unsigned disable = 0;
@@ -797,7 +797,7 @@ int rs600_irq_process(struct radeon_device *rdev)
/* Vertical blank interrupts */
if (G_007EDC_LB_D1_VBLANK_INTERRUPT(rdev->irq.stat_regs.r500.disp_int)) {
if (rdev->irq.crtc_vblank_int[0]) {
- drm_handle_vblank(rdev->ddev, 0);
+ drm_handle_vblank(rdev_to_drm(rdev), 0);
rdev->pm.vblank_sync = true;
wake_up(&rdev->irq.vblank_queue);
}
@@ -806,7 +806,7 @@ int rs600_irq_process(struct radeon_device *rdev)
}
if (G_007EDC_LB_D2_VBLANK_INTERRUPT(rdev->irq.stat_regs.r500.disp_int)) {
if (rdev->irq.crtc_vblank_int[1]) {
- drm_handle_vblank(rdev->ddev, 1);
+ drm_handle_vblank(rdev_to_drm(rdev), 1);
rdev->pm.vblank_sync = true;
wake_up(&rdev->irq.vblank_queue);
}
@@ -1133,7 +1133,7 @@ int rs600_init(struct radeon_device *rdev)
return -EINVAL;
/* Initialize clocks */
- radeon_get_clock_info(rdev->ddev);
+ radeon_get_clock_info(rdev_to_drm(rdev));
/* initialize memory controller */
rs600_mc_init(rdev);
r100_debugfs_rbbm_init(rdev);
diff --git a/drivers/gpu/drm/radeon/rs690.c b/drivers/gpu/drm/radeon/rs690.c
index 14fb0819b8c1..016eb4992803 100644
--- a/drivers/gpu/drm/radeon/rs690.c
+++ b/drivers/gpu/drm/radeon/rs690.c
@@ -845,7 +845,7 @@ int rs690_init(struct radeon_device *rdev)
return -EINVAL;
/* Initialize clocks */
- radeon_get_clock_info(rdev->ddev);
+ radeon_get_clock_info(rdev_to_drm(rdev));
/* initialize memory controller */
rs690_mc_init(rdev);
rv515_debugfs(rdev);
diff --git a/drivers/gpu/drm/radeon/rv515.c b/drivers/gpu/drm/radeon/rv515.c
index bbc6ccabf788..1b4dfb645585 100644
--- a/drivers/gpu/drm/radeon/rv515.c
+++ b/drivers/gpu/drm/radeon/rv515.c
@@ -255,7 +255,7 @@ DEFINE_SHOW_ATTRIBUTE(rv515_debugfs_ga_info);
void rv515_debugfs(struct radeon_device *rdev)
{
#if defined(CONFIG_DEBUG_FS)
- struct dentry *root = rdev->ddev->primary->debugfs_root;
+ struct dentry *root = rdev_to_drm(rdev)->primary->debugfs_root;
debugfs_create_file("rv515_pipes_info", 0444, root, rdev,
&rv515_debugfs_pipes_info_fops);
@@ -636,7 +636,7 @@ int rv515_init(struct radeon_device *rdev)
if (radeon_boot_test_post_card(rdev) == false)
return -EINVAL;
/* Initialize clocks */
- radeon_get_clock_info(rdev->ddev);
+ radeon_get_clock_info(rdev_to_drm(rdev));
/* initialize AGP */
if (rdev->flags & RADEON_IS_AGP) {
r = radeon_agp_init(rdev);
diff --git a/drivers/gpu/drm/radeon/rv770.c b/drivers/gpu/drm/radeon/rv770.c
index 9ce12fa3c356..7d4b0bf59109 100644
--- a/drivers/gpu/drm/radeon/rv770.c
+++ b/drivers/gpu/drm/radeon/rv770.c
@@ -1935,7 +1935,7 @@ int rv770_init(struct radeon_device *rdev)
/* Initialize surface registers */
radeon_surface_init(rdev);
/* Initialize clocks */
- radeon_get_clock_info(rdev->ddev);
+ radeon_get_clock_info(rdev_to_drm(rdev));
/* Fence driver */
radeon_fence_driver_init(rdev);
/* initialize AGP */
diff --git a/drivers/gpu/drm/radeon/si.c b/drivers/gpu/drm/radeon/si.c
index 15759c8ca5b7..6c95575ce109 100644
--- a/drivers/gpu/drm/radeon/si.c
+++ b/drivers/gpu/drm/radeon/si.c
@@ -6277,7 +6277,7 @@ restart_ih:
event_name = "vblank";
if (rdev->irq.crtc_vblank_int[crtc_idx]) {
- drm_handle_vblank(rdev->ddev, crtc_idx);
+ drm_handle_vblank(rdev_to_drm(rdev), crtc_idx);
rdev->pm.vblank_sync = true;
wake_up(&rdev->irq.vblank_queue);
}
@@ -6839,7 +6839,7 @@ int si_init(struct radeon_device *rdev)
/* Initialize surface registers */
radeon_surface_init(rdev);
/* Initialize clocks */
- radeon_get_clock_info(rdev->ddev);
+ radeon_get_clock_info(rdev_to_drm(rdev));
/* Fence driver */
radeon_fence_driver_init(rdev);
diff --git a/drivers/gpu/drm/renesas/rcar-du/Kconfig b/drivers/gpu/drm/renesas/rcar-du/Kconfig
index 025677fe88d3..e1f41468a9a6 100644
--- a/drivers/gpu/drm/renesas/rcar-du/Kconfig
+++ b/drivers/gpu/drm/renesas/rcar-du/Kconfig
@@ -62,14 +62,6 @@ config DRM_RCAR_MIPI_DSI
select DRM_MIPI_DSI
select RESET_CONTROLLER
-config DRM_RZG2L_MIPI_DSI
- tristate "RZ/G2L MIPI DSI Encoder Support"
- depends on DRM && DRM_BRIDGE && OF
- depends on ARCH_RENESAS || COMPILE_TEST
- select DRM_MIPI_DSI
- help
- Enable support for the RZ/G2L Display Unit embedded MIPI DSI encoders.
-
config DRM_RCAR_VSP
bool "R-Car DU VSP Compositor Support" if ARM
default y if ARM64
diff --git a/drivers/gpu/drm/renesas/rcar-du/Makefile b/drivers/gpu/drm/renesas/rcar-du/Makefile
index b8f2c82651d9..6f132325c8b7 100644
--- a/drivers/gpu/drm/renesas/rcar-du/Makefile
+++ b/drivers/gpu/drm/renesas/rcar-du/Makefile
@@ -14,5 +14,3 @@ obj-$(CONFIG_DRM_RCAR_DU) += rcar-du-drm.o
obj-$(CONFIG_DRM_RCAR_DW_HDMI) += rcar_dw_hdmi.o
obj-$(CONFIG_DRM_RCAR_LVDS) += rcar_lvds.o
obj-$(CONFIG_DRM_RCAR_MIPI_DSI) += rcar_mipi_dsi.o
-
-obj-$(CONFIG_DRM_RZG2L_MIPI_DSI) += rzg2l_mipi_dsi.o
diff --git a/drivers/gpu/drm/renesas/rz-du/Kconfig b/drivers/gpu/drm/renesas/rz-du/Kconfig
index e1a6dd322caf..89bdb598e0ae 100644
--- a/drivers/gpu/drm/renesas/rz-du/Kconfig
+++ b/drivers/gpu/drm/renesas/rz-du/Kconfig
@@ -12,3 +12,11 @@ config DRM_RZG2L_DU
help
Choose this option if you have an RZ/G2L alike chipset.
If M is selected the module will be called rzg2l-du-drm.
+
+config DRM_RZG2L_MIPI_DSI
+ tristate "RZ/G2L MIPI DSI Encoder Support"
+ depends on DRM && DRM_BRIDGE && OF
+ depends on ARCH_RENESAS || COMPILE_TEST
+ select DRM_MIPI_DSI
+ help
+ Enable support for the RZ/G2L Display Unit embedded MIPI DSI encoders.
diff --git a/drivers/gpu/drm/renesas/rz-du/Makefile b/drivers/gpu/drm/renesas/rz-du/Makefile
index 663b82a2577f..2987900ea6b6 100644
--- a/drivers/gpu/drm/renesas/rz-du/Makefile
+++ b/drivers/gpu/drm/renesas/rz-du/Makefile
@@ -6,3 +6,5 @@ rzg2l-du-drm-y := rzg2l_du_crtc.o \
rzg2l-du-drm-$(CONFIG_VIDEO_RENESAS_VSP1) += rzg2l_du_vsp.o
obj-$(CONFIG_DRM_RZG2L_DU) += rzg2l-du-drm.o
+
+obj-$(CONFIG_DRM_RZG2L_MIPI_DSI) += rzg2l_mipi_dsi.o
diff --git a/drivers/gpu/drm/renesas/rz-du/rzg2l_du_crtc.c b/drivers/gpu/drm/renesas/rz-du/rzg2l_du_crtc.c
index 6e7aac6219be..c4c1474d487e 100644
--- a/drivers/gpu/drm/renesas/rz-du/rzg2l_du_crtc.c
+++ b/drivers/gpu/drm/renesas/rz-du/rzg2l_du_crtc.c
@@ -28,6 +28,7 @@
#include "rzg2l_du_vsp.h"
#define DU_MCR0 0x00
+#define DU_MCR0_DPI_OE BIT(0)
#define DU_MCR0_DI_EN BIT(8)
#define DU_DITR0 0x10
@@ -216,9 +217,14 @@ static void rzg2l_du_crtc_put(struct rzg2l_du_crtc *rcrtc)
static void rzg2l_du_start_stop(struct rzg2l_du_crtc *rcrtc, bool start)
{
+ struct rzg2l_du_crtc_state *rstate = to_rzg2l_crtc_state(rcrtc->crtc.state);
struct rzg2l_du_device *rcdu = rcrtc->dev;
+ u32 val = DU_MCR0_DI_EN;
- writel(start ? DU_MCR0_DI_EN : 0, rcdu->mmio + DU_MCR0);
+ if (rstate->outputs & BIT(RZG2L_DU_OUTPUT_DPAD0))
+ val |= DU_MCR0_DPI_OE;
+
+ writel(start ? val : 0, rcdu->mmio + DU_MCR0);
}
static void rzg2l_du_crtc_start(struct rzg2l_du_crtc *rcrtc)
diff --git a/drivers/gpu/drm/renesas/rz-du/rzg2l_du_drv.c b/drivers/gpu/drm/renesas/rz-du/rzg2l_du_drv.c
index e5eca8691a33..bc7c381f92ac 100644
--- a/drivers/gpu/drm/renesas/rz-du/rzg2l_du_drv.c
+++ b/drivers/gpu/drm/renesas/rz-du/rzg2l_du_drv.c
@@ -25,6 +25,16 @@
* Device Information
*/
+static const struct rzg2l_du_device_info rzg2l_du_r9a07g043u_info = {
+ .channels_mask = BIT(0),
+ .routes = {
+ [RZG2L_DU_OUTPUT_DPAD0] = {
+ .possible_outputs = BIT(0),
+ .port = 0,
+ },
+ },
+};
+
static const struct rzg2l_du_device_info rzg2l_du_r9a07g044_info = {
.channels_mask = BIT(0),
.routes = {
@@ -40,6 +50,7 @@ static const struct rzg2l_du_device_info rzg2l_du_r9a07g044_info = {
};
static const struct of_device_id rzg2l_du_of_table[] = {
+ { .compatible = "renesas,r9a07g043u-du", .data = &rzg2l_du_r9a07g043u_info },
{ .compatible = "renesas,r9a07g044-du", .data = &rzg2l_du_r9a07g044_info },
{ /* sentinel */ }
};
diff --git a/drivers/gpu/drm/renesas/rz-du/rzg2l_du_kms.c b/drivers/gpu/drm/renesas/rz-du/rzg2l_du_kms.c
index 07b312b6f81e..b99217b4e05d 100644
--- a/drivers/gpu/drm/renesas/rz-du/rzg2l_du_kms.c
+++ b/drivers/gpu/drm/renesas/rz-du/rzg2l_du_kms.c
@@ -183,7 +183,8 @@ static int rzg2l_du_encoders_init(struct rzg2l_du_device *rcdu)
/* Find the output route corresponding to the port number. */
for (i = 0; i < RZG2L_DU_OUTPUT_MAX; ++i) {
- if (rcdu->info->routes[i].port == ep.port) {
+ if (rcdu->info->routes[i].possible_outputs &&
+ rcdu->info->routes[i].port == ep.port) {
output = i;
break;
}
diff --git a/drivers/gpu/drm/renesas/rcar-du/rzg2l_mipi_dsi.c b/drivers/gpu/drm/renesas/rz-du/rzg2l_mipi_dsi.c
index 10febea473cd..10febea473cd 100644
--- a/drivers/gpu/drm/renesas/rcar-du/rzg2l_mipi_dsi.c
+++ b/drivers/gpu/drm/renesas/rz-du/rzg2l_mipi_dsi.c
diff --git a/drivers/gpu/drm/renesas/rcar-du/rzg2l_mipi_dsi_regs.h b/drivers/gpu/drm/renesas/rz-du/rzg2l_mipi_dsi_regs.h
index 1dbc16ec64a4..1dbc16ec64a4 100644
--- a/drivers/gpu/drm/renesas/rcar-du/rzg2l_mipi_dsi_regs.h
+++ b/drivers/gpu/drm/renesas/rz-du/rzg2l_mipi_dsi_regs.h
diff --git a/drivers/gpu/drm/rockchip/analogix_dp-rockchip.c b/drivers/gpu/drm/rockchip/analogix_dp-rockchip.c
index 362c7951ca4a..d3341edfe4f4 100644
--- a/drivers/gpu/drm/rockchip/analogix_dp-rockchip.c
+++ b/drivers/gpu/drm/rockchip/analogix_dp-rockchip.c
@@ -262,7 +262,7 @@ rockchip_dp_drm_encoder_atomic_check(struct drm_encoder *encoder,
return 0;
}
-static struct drm_encoder_helper_funcs rockchip_dp_encoder_helper_funcs = {
+static const struct drm_encoder_helper_funcs rockchip_dp_encoder_helper_funcs = {
.mode_fixup = rockchip_dp_drm_encoder_mode_fixup,
.mode_set = rockchip_dp_drm_encoder_mode_set,
.atomic_enable = rockchip_dp_drm_encoder_enable,
diff --git a/drivers/gpu/drm/rockchip/cdn-dp-core.c b/drivers/gpu/drm/rockchip/cdn-dp-core.c
index bd7aa891b839..b04538907f95 100644
--- a/drivers/gpu/drm/rockchip/cdn-dp-core.c
+++ b/drivers/gpu/drm/rockchip/cdn-dp-core.c
@@ -266,15 +266,6 @@ static int cdn_dp_connector_get_modes(struct drm_connector *connector)
mutex_lock(&dp->lock);
- if (dp->drm_edid) {
- /* FIXME: get rid of drm_edid_raw() */
- const struct edid *edid = drm_edid_raw(dp->drm_edid);
-
- DRM_DEV_DEBUG_KMS(dp->dev, "got edid: width[%d] x height[%d]\n",
- edid->width_cm, edid->height_cm);
-
- }
-
ret = drm_edid_connector_add_modes(connector);
mutex_unlock(&dp->lock);
@@ -369,6 +360,7 @@ static int cdn_dp_firmware_init(struct cdn_dp_device *dp)
static int cdn_dp_get_sink_capability(struct cdn_dp_device *dp)
{
+ const struct drm_display_info *info = &dp->connector.display_info;
int ret;
if (!cdn_dp_check_sink_connection(dp))
@@ -386,7 +378,11 @@ static int cdn_dp_get_sink_capability(struct cdn_dp_device *dp)
cdn_dp_get_edid_block, dp);
drm_edid_connector_update(&dp->connector, dp->drm_edid);
- dp->sink_has_audio = dp->connector.display_info.has_audio;
+ dp->sink_has_audio = info->has_audio;
+
+ if (dp->drm_edid)
+ DRM_DEV_DEBUG_KMS(dp->dev, "got edid: width[%d] x height[%d]\n",
+ info->width_mm / 10, info->height_mm / 10);
return 0;
}
@@ -969,21 +965,21 @@ static void cdn_dp_pd_event_work(struct work_struct *work)
/* Not connected, notify userspace to disable the block */
if (!cdn_dp_connected_port(dp)) {
- DRM_DEV_INFO(dp->dev, "Not connected. Disabling cdn\n");
+ DRM_DEV_INFO(dp->dev, "Not connected; disabling cdn\n");
dp->connected = false;
/* Connected but not enabled, enable the block */
} else if (!dp->active) {
- DRM_DEV_INFO(dp->dev, "Connected, not enabled. Enabling cdn\n");
+ DRM_DEV_INFO(dp->dev, "Connected, not enabled; enabling cdn\n");
ret = cdn_dp_enable(dp);
if (ret) {
- DRM_DEV_ERROR(dp->dev, "Enable dp failed %d\n", ret);
+ DRM_DEV_ERROR(dp->dev, "Enabling dp failed: %d\n", ret);
dp->connected = false;
}
/* Enabled and connected to a dongle without a sink, notify userspace */
} else if (!cdn_dp_check_sink_connection(dp)) {
- DRM_DEV_INFO(dp->dev, "Connected without sink. Assert hpd\n");
+ DRM_DEV_INFO(dp->dev, "Connected without sink; assert hpd\n");
dp->connected = false;
/* Enabled and connected with a sink, re-train if requested */
@@ -992,11 +988,11 @@ static void cdn_dp_pd_event_work(struct work_struct *work)
unsigned int lanes = dp->max_lanes;
struct drm_display_mode *mode = &dp->mode;
- DRM_DEV_INFO(dp->dev, "Connected with sink. Re-train link\n");
+ DRM_DEV_INFO(dp->dev, "Connected with sink; re-train link\n");
ret = cdn_dp_train_link(dp);
if (ret) {
dp->connected = false;
- DRM_DEV_ERROR(dp->dev, "Train link failed %d\n", ret);
+ DRM_DEV_ERROR(dp->dev, "Training link failed: %d\n", ret);
goto out;
}
@@ -1006,9 +1002,7 @@ static void cdn_dp_pd_event_work(struct work_struct *work)
ret = cdn_dp_config_video(dp);
if (ret) {
dp->connected = false;
- DRM_DEV_ERROR(dp->dev,
- "Failed to config video %d\n",
- ret);
+ DRM_DEV_ERROR(dp->dev, "Failed to configure video: %d\n", ret);
}
}
}
diff --git a/drivers/gpu/drm/rockchip/dw_hdmi-rockchip.c b/drivers/gpu/drm/rockchip/dw_hdmi-rockchip.c
index fe33092abbe7..240552eb517f 100644
--- a/drivers/gpu/drm/rockchip/dw_hdmi-rockchip.c
+++ b/drivers/gpu/drm/rockchip/dw_hdmi-rockchip.c
@@ -61,11 +61,13 @@
* @lcdsel_grf_reg: grf register offset of lcdc select
* @lcdsel_big: reg value of selecting vop big for HDMI
* @lcdsel_lit: reg value of selecting vop little for HDMI
+ * @max_tmds_clock: maximum TMDS clock rate supported
*/
struct rockchip_hdmi_chip_data {
int lcdsel_grf_reg;
u32 lcdsel_big;
u32 lcdsel_lit;
+ int max_tmds_clock;
};
struct rockchip_hdmi {
@@ -77,8 +79,6 @@ struct rockchip_hdmi {
struct clk *ref_clk;
struct clk *grf_clk;
struct dw_hdmi *hdmi;
- struct regulator *avdd_0v9;
- struct regulator *avdd_1v8;
struct phy *phy;
};
@@ -209,43 +209,40 @@ static const struct dw_hdmi_phy_config rockchip_phy_config[] = {
static int rockchip_hdmi_parse_dt(struct rockchip_hdmi *hdmi)
{
struct device_node *np = hdmi->dev->of_node;
+ int ret;
hdmi->regmap = syscon_regmap_lookup_by_phandle(np, "rockchip,grf");
if (IS_ERR(hdmi->regmap)) {
- DRM_DEV_ERROR(hdmi->dev, "Unable to get rockchip,grf\n");
+ drm_err(hdmi, "Unable to get rockchip,grf\n");
return PTR_ERR(hdmi->regmap);
}
- hdmi->ref_clk = devm_clk_get_optional(hdmi->dev, "ref");
+ hdmi->ref_clk = devm_clk_get_optional_enabled(hdmi->dev, "ref");
if (!hdmi->ref_clk)
- hdmi->ref_clk = devm_clk_get_optional(hdmi->dev, "vpll");
+ hdmi->ref_clk = devm_clk_get_optional_enabled(hdmi->dev, "vpll");
- if (PTR_ERR(hdmi->ref_clk) == -EPROBE_DEFER) {
- return -EPROBE_DEFER;
- } else if (IS_ERR(hdmi->ref_clk)) {
- DRM_DEV_ERROR(hdmi->dev, "failed to get reference clock\n");
- return PTR_ERR(hdmi->ref_clk);
+ if (IS_ERR(hdmi->ref_clk)) {
+ ret = PTR_ERR(hdmi->ref_clk);
+ if (ret != -EPROBE_DEFER)
+ drm_err(hdmi, "failed to get reference clock\n");
+ return ret;
}
- hdmi->grf_clk = devm_clk_get(hdmi->dev, "grf");
- if (PTR_ERR(hdmi->grf_clk) == -ENOENT) {
- hdmi->grf_clk = NULL;
- } else if (PTR_ERR(hdmi->grf_clk) == -EPROBE_DEFER) {
- return -EPROBE_DEFER;
- } else if (IS_ERR(hdmi->grf_clk)) {
- DRM_DEV_ERROR(hdmi->dev, "failed to get grf clock\n");
- return PTR_ERR(hdmi->grf_clk);
+ hdmi->grf_clk = devm_clk_get_optional(hdmi->dev, "grf");
+ if (IS_ERR(hdmi->grf_clk)) {
+ ret = PTR_ERR(hdmi->grf_clk);
+ if (ret != -EPROBE_DEFER)
+ drm_err(hdmi, "failed to get grf clock\n");
+ return ret;
}
- hdmi->avdd_0v9 = devm_regulator_get(hdmi->dev, "avdd-0v9");
- if (IS_ERR(hdmi->avdd_0v9))
- return PTR_ERR(hdmi->avdd_0v9);
+ ret = devm_regulator_get_enable(hdmi->dev, "avdd-0v9");
+ if (ret)
+ return ret;
- hdmi->avdd_1v8 = devm_regulator_get(hdmi->dev, "avdd-1v8");
- if (IS_ERR(hdmi->avdd_1v8))
- return PTR_ERR(hdmi->avdd_1v8);
+ ret = devm_regulator_get_enable(hdmi->dev, "avdd-1v8");
- return 0;
+ return ret;
}
static enum drm_mode_status
@@ -259,6 +256,10 @@ dw_hdmi_rockchip_mode_valid(struct dw_hdmi *dw_hdmi, void *data,
bool exact_match = hdmi->plat_data->phy_force_vendor;
int i;
+ if (hdmi->chip_data->max_tmds_clock &&
+ mode->clock > hdmi->chip_data->max_tmds_clock)
+ return MODE_CLOCK_HIGH;
+
if (hdmi->ref_clk) {
int rpclk = clk_round_rate(hdmi->ref_clk, pclk);
@@ -322,17 +323,16 @@ static void dw_hdmi_rockchip_encoder_enable(struct drm_encoder *encoder)
ret = clk_prepare_enable(hdmi->grf_clk);
if (ret < 0) {
- DRM_DEV_ERROR(hdmi->dev, "failed to enable grfclk %d\n", ret);
+ drm_err(hdmi, "failed to enable grfclk %d\n", ret);
return;
}
ret = regmap_write(hdmi->regmap, hdmi->chip_data->lcdsel_grf_reg, val);
if (ret != 0)
- DRM_DEV_ERROR(hdmi->dev, "Could not write to GRF: %d\n", ret);
+ drm_err(hdmi, "Could not write to GRF: %d\n", ret);
clk_disable_unprepare(hdmi->grf_clk);
- DRM_DEV_DEBUG(hdmi->dev, "vop %s output to hdmi\n",
- ret ? "LIT" : "BIG");
+ drm_dbg(hdmi, "vop %s output to hdmi\n", ret ? "LIT" : "BIG");
}
static int
@@ -362,6 +362,8 @@ static int dw_hdmi_rockchip_genphy_init(struct dw_hdmi *dw_hdmi, void *data,
{
struct rockchip_hdmi *hdmi = (struct rockchip_hdmi *)data;
+ dw_hdmi_set_high_tmds_clock_ratio(dw_hdmi, display);
+
return phy_power_on(hdmi->phy);
}
@@ -434,6 +436,8 @@ static void dw_hdmi_rk3328_setup_hpd(struct dw_hdmi *dw_hdmi, void *data)
HIWORD_UPDATE(RK3328_HDMI_SDAIN_MSK | RK3328_HDMI_SCLIN_MSK,
RK3328_HDMI_SDAIN_MSK | RK3328_HDMI_SCLIN_MSK |
RK3328_HDMI_HPD_IOE));
+
+ dw_hdmi_rk3328_read_hpd(dw_hdmi, data);
}
static const struct dw_hdmi_phy_ops rk3228_hdmi_phy_ops = {
@@ -446,13 +450,11 @@ static const struct dw_hdmi_phy_ops rk3228_hdmi_phy_ops = {
static struct rockchip_hdmi_chip_data rk3228_chip_data = {
.lcdsel_grf_reg = -1,
+ .max_tmds_clock = 594000,
};
static const struct dw_hdmi_plat_data rk3228_hdmi_drv_data = {
.mode_valid = dw_hdmi_rockchip_mode_valid,
- .mpll_cfg = rockchip_mpll_cfg,
- .cur_ctr = rockchip_cur_ctr,
- .phy_config = rockchip_phy_config,
.phy_data = &rk3228_chip_data,
.phy_ops = &rk3228_hdmi_phy_ops,
.phy_name = "inno_dw_hdmi_phy2",
@@ -463,6 +465,7 @@ static struct rockchip_hdmi_chip_data rk3288_chip_data = {
.lcdsel_grf_reg = RK3288_GRF_SOC_CON6,
.lcdsel_big = HIWORD_UPDATE(0, RK3288_HDMI_LCDC_SEL),
.lcdsel_lit = HIWORD_UPDATE(RK3288_HDMI_LCDC_SEL, RK3288_HDMI_LCDC_SEL),
+ .max_tmds_clock = 340000,
};
static const struct dw_hdmi_plat_data rk3288_hdmi_drv_data = {
@@ -483,13 +486,11 @@ static const struct dw_hdmi_phy_ops rk3328_hdmi_phy_ops = {
static struct rockchip_hdmi_chip_data rk3328_chip_data = {
.lcdsel_grf_reg = -1,
+ .max_tmds_clock = 594000,
};
static const struct dw_hdmi_plat_data rk3328_hdmi_drv_data = {
.mode_valid = dw_hdmi_rockchip_mode_valid,
- .mpll_cfg = rockchip_mpll_cfg,
- .cur_ctr = rockchip_cur_ctr,
- .phy_config = rockchip_phy_config,
.phy_data = &rk3328_chip_data,
.phy_ops = &rk3328_hdmi_phy_ops,
.phy_name = "inno_dw_hdmi_phy2",
@@ -501,6 +502,7 @@ static struct rockchip_hdmi_chip_data rk3399_chip_data = {
.lcdsel_grf_reg = RK3399_GRF_SOC_CON20,
.lcdsel_big = HIWORD_UPDATE(0, RK3399_HDMI_LCDC_SEL),
.lcdsel_lit = HIWORD_UPDATE(RK3399_HDMI_LCDC_SEL, RK3399_HDMI_LCDC_SEL),
+ .max_tmds_clock = 340000,
};
static const struct dw_hdmi_plat_data rk3399_hdmi_drv_data = {
@@ -514,6 +516,7 @@ static const struct dw_hdmi_plat_data rk3399_hdmi_drv_data = {
static struct rockchip_hdmi_chip_data rk3568_chip_data = {
.lcdsel_grf_reg = -1,
+ .max_tmds_clock = 340000,
};
static const struct dw_hdmi_plat_data rk3568_hdmi_drv_data = {
@@ -592,7 +595,7 @@ static int dw_hdmi_rockchip_bind(struct device *dev, struct device *master,
ret = rockchip_hdmi_parse_dt(hdmi);
if (ret) {
if (ret != -EPROBE_DEFER)
- DRM_DEV_ERROR(hdmi->dev, "Unable to parse OF data\n");
+ drm_err(hdmi, "Unable to parse OF data\n");
return ret;
}
@@ -600,29 +603,10 @@ static int dw_hdmi_rockchip_bind(struct device *dev, struct device *master,
if (IS_ERR(hdmi->phy)) {
ret = PTR_ERR(hdmi->phy);
if (ret != -EPROBE_DEFER)
- DRM_DEV_ERROR(hdmi->dev, "failed to get phy\n");
+ drm_err(hdmi, "failed to get phy\n");
return ret;
}
- ret = regulator_enable(hdmi->avdd_0v9);
- if (ret) {
- DRM_DEV_ERROR(hdmi->dev, "failed to enable avdd0v9: %d\n", ret);
- goto err_avdd_0v9;
- }
-
- ret = regulator_enable(hdmi->avdd_1v8);
- if (ret) {
- DRM_DEV_ERROR(hdmi->dev, "failed to enable avdd1v8: %d\n", ret);
- goto err_avdd_1v8;
- }
-
- ret = clk_prepare_enable(hdmi->ref_clk);
- if (ret) {
- DRM_DEV_ERROR(hdmi->dev, "Failed to enable HDMI reference clock: %d\n",
- ret);
- goto err_clk;
- }
-
if (hdmi->chip_data == &rk3568_chip_data) {
regmap_write(hdmi->regmap, RK3568_GRF_VO_CON1,
HIWORD_UPDATE(RK3568_HDMI_SDAIN_MSK |
@@ -651,12 +635,7 @@ static int dw_hdmi_rockchip_bind(struct device *dev, struct device *master,
err_bind:
drm_encoder_cleanup(encoder);
- clk_disable_unprepare(hdmi->ref_clk);
-err_clk:
- regulator_disable(hdmi->avdd_1v8);
-err_avdd_1v8:
- regulator_disable(hdmi->avdd_0v9);
-err_avdd_0v9:
+
return ret;
}
@@ -667,10 +646,6 @@ static void dw_hdmi_rockchip_unbind(struct device *dev, struct device *master,
dw_hdmi_unbind(hdmi->hdmi);
drm_encoder_cleanup(&hdmi->encoder.encoder);
- clk_disable_unprepare(hdmi->ref_clk);
-
- regulator_disable(hdmi->avdd_1v8);
- regulator_disable(hdmi->avdd_0v9);
}
static const struct component_ops dw_hdmi_rockchip_ops = {
diff --git a/drivers/gpu/drm/rockchip/inno_hdmi.c b/drivers/gpu/drm/rockchip/inno_hdmi.c
index dec6913cec5b..42ef62aa0a1e 100644
--- a/drivers/gpu/drm/rockchip/inno_hdmi.c
+++ b/drivers/gpu/drm/rockchip/inno_hdmi.c
@@ -543,7 +543,7 @@ inno_hdmi_encoder_atomic_check(struct drm_encoder *encoder,
return 0;
}
-static struct drm_encoder_helper_funcs inno_hdmi_encoder_helper_funcs = {
+static const struct drm_encoder_helper_funcs inno_hdmi_encoder_helper_funcs = {
.atomic_check = inno_hdmi_encoder_atomic_check,
.atomic_enable = inno_hdmi_encoder_enable,
.atomic_disable = inno_hdmi_encoder_disable,
diff --git a/drivers/gpu/drm/rockchip/rockchip_drm_drv.h b/drivers/gpu/drm/rockchip/rockchip_drm_drv.h
index bbb9e0bf6804..8d566fcd80a2 100644
--- a/drivers/gpu/drm/rockchip/rockchip_drm_drv.h
+++ b/drivers/gpu/drm/rockchip/rockchip_drm_drv.h
@@ -12,9 +12,10 @@
#include <drm/drm_atomic_helper.h>
#include <drm/drm_gem.h>
+#include <linux/bits.h>
+#include <linux/component.h>
#include <linux/i2c.h>
#include <linux/module.h>
-#include <linux/component.h>
#define ROCKCHIP_MAX_FB_BUFFER 3
#define ROCKCHIP_MAX_CONNECTOR 2
diff --git a/drivers/gpu/drm/rockchip/rockchip_drm_vop.c b/drivers/gpu/drm/rockchip/rockchip_drm_vop.c
index a13473b2d54c..f161f40d8ce4 100644
--- a/drivers/gpu/drm/rockchip/rockchip_drm_vop.c
+++ b/drivers/gpu/drm/rockchip/rockchip_drm_vop.c
@@ -396,8 +396,8 @@ static void scl_vop_cal_scl_fac(struct vop *vop, const struct vop_win_data *win,
if (info->is_yuv)
is_yuv = true;
- if (dst_w > 3840) {
- DRM_DEV_ERROR(vop->dev, "Maximum dst width (3840) exceeded\n");
+ if (dst_w > 4096) {
+ DRM_DEV_ERROR(vop->dev, "Maximum dst width (4096) exceeded\n");
return;
}
@@ -1583,6 +1583,10 @@ static void vop_crtc_atomic_flush(struct drm_crtc *crtc,
VOP_AFBC_SET(vop, enable, s->enable_afbc);
vop_cfg_done(vop);
+ /* Ack the DMA transfer of the previous frame (RK3066). */
+ if (VOP_HAS_REG(vop, common, dma_stop))
+ VOP_REG_SET(vop, common, dma_stop, 0);
+
spin_unlock(&vop->reg_lock);
/*
diff --git a/drivers/gpu/drm/rockchip/rockchip_drm_vop.h b/drivers/gpu/drm/rockchip/rockchip_drm_vop.h
index b33e5bdc26be..0cf512cc1614 100644
--- a/drivers/gpu/drm/rockchip/rockchip_drm_vop.h
+++ b/drivers/gpu/drm/rockchip/rockchip_drm_vop.h
@@ -122,6 +122,7 @@ struct vop_common {
struct vop_reg lut_buffer_index;
struct vop_reg gate_en;
struct vop_reg mmu_en;
+ struct vop_reg dma_stop;
struct vop_reg out_mode;
struct vop_reg standby;
};
diff --git a/drivers/gpu/drm/rockchip/rockchip_vop_reg.c b/drivers/gpu/drm/rockchip/rockchip_vop_reg.c
index b9ee02061d5b..e2c6ba26f437 100644
--- a/drivers/gpu/drm/rockchip/rockchip_vop_reg.c
+++ b/drivers/gpu/drm/rockchip/rockchip_vop_reg.c
@@ -466,6 +466,7 @@ static const struct vop_output rk3066_output = {
};
static const struct vop_common rk3066_common = {
+ .dma_stop = VOP_REG(RK3066_SYS_CTRL0, 0x1, 0),
.standby = VOP_REG(RK3066_SYS_CTRL0, 0x1, 1),
.out_mode = VOP_REG(RK3066_DSP_CTRL0, 0xf, 0),
.cfg_done = VOP_REG(RK3066_REG_CFG_DONE, 0x1, 0),
@@ -514,6 +515,7 @@ static const struct vop_data rk3066_vop = {
.output = &rk3066_output,
.win = rk3066_vop_win_data,
.win_size = ARRAY_SIZE(rk3066_vop_win_data),
+ .feature = VOP_FEATURE_INTERNAL_RGB,
.max_output = { 1920, 1080 },
};
diff --git a/drivers/gpu/drm/scheduler/sched_main.c b/drivers/gpu/drm/scheduler/sched_main.c
index 7e90c9f95611..ab53ab486fe6 100644
--- a/drivers/gpu/drm/scheduler/sched_main.c
+++ b/drivers/gpu/drm/scheduler/sched_main.c
@@ -674,13 +674,11 @@ EXPORT_SYMBOL(drm_sched_stop);
* drm_sched_start - recover jobs after a reset
*
* @sched: scheduler instance
- * @full_recovery: proceed with complete sched restart
*
*/
-void drm_sched_start(struct drm_gpu_scheduler *sched, bool full_recovery)
+void drm_sched_start(struct drm_gpu_scheduler *sched)
{
struct drm_sched_job *s_job, *tmp;
- int r;
/*
* Locking the list is not required here as the sched thread is parked
@@ -692,24 +690,17 @@ void drm_sched_start(struct drm_gpu_scheduler *sched, bool full_recovery)
atomic_add(s_job->credits, &sched->credit_count);
- if (!full_recovery)
+ if (!fence) {
+ drm_sched_job_done(s_job, -ECANCELED);
continue;
+ }
- if (fence) {
- r = dma_fence_add_callback(fence, &s_job->cb,
- drm_sched_job_done_cb);
- if (r == -ENOENT)
- drm_sched_job_done(s_job, fence->error);
- else if (r)
- DRM_DEV_ERROR(sched->dev, "fence add callback failed (%d)\n",
- r);
- } else
- drm_sched_job_done(s_job, -ECANCELED);
+ if (dma_fence_add_callback(fence, &s_job->cb,
+ drm_sched_job_done_cb))
+ drm_sched_job_done(s_job, fence->error);
}
- if (full_recovery)
- drm_sched_start_timeout_unlocked(sched);
-
+ drm_sched_start_timeout_unlocked(sched);
drm_sched_wqueue_start(sched);
}
EXPORT_SYMBOL(drm_sched_start);
diff --git a/drivers/gpu/drm/sti/sti_dvo.c b/drivers/gpu/drm/sti/sti_dvo.c
index 48a5d49fc131..68b8197b3dd1 100644
--- a/drivers/gpu/drm/sti/sti_dvo.c
+++ b/drivers/gpu/drm/sti/sti_dvo.c
@@ -582,7 +582,6 @@ MODULE_DEVICE_TABLE(of, dvo_of_match);
struct platform_driver sti_dvo_driver = {
.driver = {
.name = "sti-dvo",
- .owner = THIS_MODULE,
.of_match_table = dvo_of_match,
},
.probe = sti_dvo_probe,
diff --git a/drivers/gpu/drm/sti/sti_hda.c b/drivers/gpu/drm/sti/sti_hda.c
index 6ee35612a14e..f18faad974aa 100644
--- a/drivers/gpu/drm/sti/sti_hda.c
+++ b/drivers/gpu/drm/sti/sti_hda.c
@@ -807,7 +807,6 @@ MODULE_DEVICE_TABLE(of, hda_of_match);
struct platform_driver sti_hda_driver = {
.driver = {
.name = "sti-hda",
- .owner = THIS_MODULE,
.of_match_table = hda_of_match,
},
.probe = sti_hda_probe,
diff --git a/drivers/gpu/drm/sti/sti_hdmi.c b/drivers/gpu/drm/sti/sti_hdmi.c
index 500936d5743c..847470f747c0 100644
--- a/drivers/gpu/drm/sti/sti_hdmi.c
+++ b/drivers/gpu/drm/sti/sti_hdmi.c
@@ -974,28 +974,32 @@ static const struct drm_bridge_funcs sti_hdmi_bridge_funcs = {
static int sti_hdmi_connector_get_modes(struct drm_connector *connector)
{
+ const struct drm_display_info *info = &connector->display_info;
struct sti_hdmi_connector *hdmi_connector
= to_sti_hdmi_connector(connector);
struct sti_hdmi *hdmi = hdmi_connector->hdmi;
- struct edid *edid;
+ const struct drm_edid *drm_edid;
int count;
DRM_DEBUG_DRIVER("\n");
- edid = drm_get_edid(connector, hdmi->ddc_adapt);
- if (!edid)
- goto fail;
+ drm_edid = drm_edid_read(connector);
+
+ drm_edid_connector_update(connector, drm_edid);
- cec_notifier_set_phys_addr_from_edid(hdmi->notifier, edid);
+ cec_notifier_set_phys_addr(hdmi->notifier,
+ connector->display_info.source_physical_address);
+
+ if (!drm_edid)
+ goto fail;
- count = drm_add_edid_modes(connector, edid);
- drm_connector_update_edid_property(connector, edid);
+ count = drm_edid_connector_add_modes(connector);
DRM_DEBUG_KMS("%s : %dx%d cm\n",
- (connector->display_info.is_hdmi ? "hdmi monitor" : "dvi monitor"),
- edid->width_cm, edid->height_cm);
+ info->is_hdmi ? "hdmi monitor" : "dvi monitor",
+ info->width_mm / 10, info->height_mm / 10);
- kfree(edid);
+ drm_edid_free(drm_edid);
return count;
fail:
@@ -1485,7 +1489,6 @@ static void sti_hdmi_remove(struct platform_device *pdev)
struct platform_driver sti_hdmi_driver = {
.driver = {
.name = "sti-hdmi",
- .owner = THIS_MODULE,
.of_match_table = hdmi_of_match,
},
.probe = sti_hdmi_probe,
diff --git a/drivers/gpu/drm/sti/sti_hqvdp.c b/drivers/gpu/drm/sti/sti_hqvdp.c
index 0fb48ac044d8..acbf70b95aeb 100644
--- a/drivers/gpu/drm/sti/sti_hqvdp.c
+++ b/drivers/gpu/drm/sti/sti_hqvdp.c
@@ -1414,7 +1414,6 @@ MODULE_DEVICE_TABLE(of, hqvdp_of_match);
struct platform_driver sti_hqvdp_driver = {
.driver = {
.name = "sti-hqvdp",
- .owner = THIS_MODULE,
.of_match_table = hqvdp_of_match,
},
.probe = sti_hqvdp_probe,
diff --git a/drivers/gpu/drm/sti/sti_tvout.c b/drivers/gpu/drm/sti/sti_tvout.c
index 64615638b79a..e714c232026c 100644
--- a/drivers/gpu/drm/sti/sti_tvout.c
+++ b/drivers/gpu/drm/sti/sti_tvout.c
@@ -886,7 +886,6 @@ MODULE_DEVICE_TABLE(of, tvout_of_match);
struct platform_driver sti_tvout_driver = {
.driver = {
.name = "sti-tvout",
- .owner = THIS_MODULE,
.of_match_table = tvout_of_match,
},
.probe = sti_tvout_probe,
diff --git a/drivers/gpu/drm/sti/sti_vtg.c b/drivers/gpu/drm/sti/sti_vtg.c
index 5e5f82b6a5d9..5ba469b711b5 100644
--- a/drivers/gpu/drm/sti/sti_vtg.c
+++ b/drivers/gpu/drm/sti/sti_vtg.c
@@ -431,7 +431,6 @@ MODULE_DEVICE_TABLE(of, vtg_of_match);
struct platform_driver sti_vtg_driver = {
.driver = {
.name = "sti-vtg",
- .owner = THIS_MODULE,
.of_match_table = vtg_of_match,
},
.probe = vtg_probe,
diff --git a/drivers/gpu/drm/stm/drv.c b/drivers/gpu/drm/stm/drv.c
index e8523abef27a..e1232f74dfa5 100644
--- a/drivers/gpu/drm/stm/drv.c
+++ b/drivers/gpu/drm/stm/drv.c
@@ -25,6 +25,7 @@
#include <drm/drm_module.h>
#include <drm/drm_probe_helper.h>
#include <drm/drm_vblank.h>
+#include <drm/drm_managed.h>
#include "ltdc.h"
@@ -75,7 +76,7 @@ static int drv_load(struct drm_device *ddev)
DRM_DEBUG("%s\n", __func__);
- ldev = devm_kzalloc(ddev->dev, sizeof(*ldev), GFP_KERNEL);
+ ldev = drmm_kzalloc(ddev, sizeof(*ldev), GFP_KERNEL);
if (!ldev)
return -ENOMEM;
@@ -203,12 +204,14 @@ static int stm_drm_platform_probe(struct platform_device *pdev)
ret = drm_dev_register(ddev, 0);
if (ret)
- goto err_put;
+ goto err_unload;
drm_fbdev_dma_setup(ddev, 16);
return 0;
+err_unload:
+ drv_unload(ddev);
err_put:
drm_dev_put(ddev);
diff --git a/drivers/gpu/drm/stm/ltdc.c b/drivers/gpu/drm/stm/ltdc.c
index 5576fdae4962..54a73753eff9 100644
--- a/drivers/gpu/drm/stm/ltdc.c
+++ b/drivers/gpu/drm/stm/ltdc.c
@@ -36,6 +36,7 @@
#include <drm/drm_probe_helper.h>
#include <drm/drm_simple_kms_helper.h>
#include <drm/drm_vblank.h>
+#include <drm/drm_managed.h>
#include <video/videomode.h>
@@ -169,6 +170,7 @@
#define IER_RRIE BIT(3) /* Register Reload Interrupt Enable */
#define IER_FUEIE BIT(6) /* Fifo Underrun Error Interrupt Enable */
#define IER_CRCIE BIT(7) /* CRC Error Interrupt Enable */
+#define IER_MASK (IER_LIE | IER_FUWIE | IER_TERRIE | IER_RRIE | IER_FUEIE | IER_CRCIE)
#define CPSR_CYPOS GENMASK(15, 0) /* Current Y position */
@@ -187,6 +189,7 @@
#define LXCR_COLKEN BIT(1) /* Color Keying Enable */
#define LXCR_CLUTEN BIT(4) /* Color Look-Up Table ENable */
#define LXCR_HMEN BIT(8) /* Horizontal Mirroring ENable */
+#define LXCR_MASK (LXCR_LEN | LXCR_COLKEN | LXCR_CLUTEN | LXCR_HMEN)
#define LXWHPCR_WHSTPOS GENMASK(11, 0) /* Window Horizontal StarT POSition */
#define LXWHPCR_WHSPPOS GENMASK(27, 16) /* Window Horizontal StoP POSition */
@@ -491,11 +494,6 @@ static inline struct ltdc_device *plane_to_ltdc(struct drm_plane *plane)
return (struct ltdc_device *)plane->dev->dev_private;
}
-static inline struct ltdc_device *encoder_to_ltdc(struct drm_encoder *enc)
-{
- return (struct ltdc_device *)enc->dev->dev_private;
-}
-
static inline enum ltdc_pix_fmt to_ltdc_pixelformat(u32 drm_fmt)
{
enum ltdc_pix_fmt pf;
@@ -784,7 +782,7 @@ static void ltdc_crtc_atomic_enable(struct drm_crtc *crtc,
regmap_write(ldev->regmap, LTDC_BCCR, BCCR_BCBLACK);
/* Enable IRQ */
- regmap_set_bits(ldev->regmap, LTDC_IER, IER_FUWIE | IER_FUEIE | IER_RRIE | IER_TERRIE);
+ regmap_set_bits(ldev->regmap, LTDC_IER, IER_FUWIE | IER_FUEIE | IER_TERRIE);
/* Commit shadow registers = update planes at next vblank */
if (!ldev->caps.plane_reg_shadow)
@@ -806,11 +804,10 @@ static void ltdc_crtc_atomic_disable(struct drm_crtc *crtc,
/* Disable all layers */
for (layer_index = 0; layer_index < ldev->caps.nb_layers; layer_index++)
- regmap_write_bits(ldev->regmap, LTDC_L1CR + layer_index * LAY_OFS,
- LXCR_CLUTEN | LXCR_LEN, 0);
+ regmap_write_bits(ldev->regmap, LTDC_L1CR + layer_index * LAY_OFS, LXCR_MASK, 0);
- /* disable IRQ */
- regmap_clear_bits(ldev->regmap, LTDC_IER, IER_FUWIE | IER_FUEIE | IER_RRIE | IER_TERRIE);
+ /* Disable IRQ */
+ regmap_clear_bits(ldev->regmap, LTDC_IER, IER_FUWIE | IER_FUEIE | IER_TERRIE);
/* immediately commit disable of layers before switching off LTDC */
if (!ldev->caps.plane_reg_shadow)
@@ -1199,7 +1196,6 @@ static void ltdc_crtc_atomic_print_state(struct drm_printer *p,
}
static const struct drm_crtc_funcs ltdc_crtc_funcs = {
- .destroy = drm_crtc_cleanup,
.set_config = drm_atomic_helper_set_config,
.page_flip = drm_atomic_helper_page_flip,
.reset = drm_atomic_helper_crtc_reset,
@@ -1212,7 +1208,6 @@ static const struct drm_crtc_funcs ltdc_crtc_funcs = {
};
static const struct drm_crtc_funcs ltdc_crtc_with_crc_support_funcs = {
- .destroy = drm_crtc_cleanup,
.set_config = drm_atomic_helper_set_config,
.page_flip = drm_atomic_helper_page_flip,
.reset = drm_atomic_helper_crtc_reset,
@@ -1474,7 +1469,7 @@ static void ltdc_plane_atomic_update(struct drm_plane *plane,
if (newstate->rotation & DRM_MODE_REFLECT_X)
val |= LXCR_HMEN;
- regmap_write_bits(ldev->regmap, LTDC_L1CR + lofs, LXCR_LEN | LXCR_CLUTEN | LXCR_HMEN, val);
+ regmap_write_bits(ldev->regmap, LTDC_L1CR + lofs, LXCR_MASK, val);
/* Commit shadow registers = update plane at next vblank */
if (ldev->caps.plane_reg_shadow)
@@ -1512,7 +1507,10 @@ static void ltdc_plane_atomic_disable(struct drm_plane *plane,
u32 lofs = plane->index * LAY_OFS;
/* Disable layer */
- regmap_write_bits(ldev->regmap, LTDC_L1CR + lofs, LXCR_LEN | LXCR_CLUTEN | LXCR_HMEN, 0);
+ regmap_write_bits(ldev->regmap, LTDC_L1CR + lofs, LXCR_MASK, 0);
+
+ /* Reset the layer transparency to hide any related background color */
+ regmap_write_bits(ldev->regmap, LTDC_L1CACR + lofs, LXCACR_CONSTA, 0x00);
/* Commit shadow registers = update plane at next vblank */
if (ldev->caps.plane_reg_shadow)
@@ -1545,7 +1543,6 @@ static void ltdc_plane_atomic_print_state(struct drm_printer *p,
static const struct drm_plane_funcs ltdc_plane_funcs = {
.update_plane = drm_atomic_helper_update_plane,
.disable_plane = drm_atomic_helper_disable_plane,
- .destroy = drm_plane_cleanup,
.reset = drm_atomic_helper_plane_reset,
.atomic_duplicate_state = drm_atomic_helper_plane_duplicate_state,
.atomic_destroy_state = drm_atomic_helper_plane_destroy_state,
@@ -1572,7 +1569,6 @@ static struct drm_plane *ltdc_plane_create(struct drm_device *ddev,
const u64 *modifiers = ltdc_format_modifiers;
u32 lofs = index * LAY_OFS;
u32 val;
- int ret;
/* Allocate the biggest size according to supported color formats */
formats = devm_kzalloc(dev, (ldev->caps.pix_fmt_nb +
@@ -1580,6 +1576,8 @@ static struct drm_plane *ltdc_plane_create(struct drm_device *ddev,
ARRAY_SIZE(ltdc_drm_fmt_ycbcr_sp) +
ARRAY_SIZE(ltdc_drm_fmt_ycbcr_fp)) *
sizeof(*formats), GFP_KERNEL);
+ if (!formats)
+ return NULL;
for (i = 0; i < ldev->caps.pix_fmt_nb; i++) {
drm_fmt = ldev->caps.pix_fmt_drm[i];
@@ -1613,14 +1611,10 @@ static struct drm_plane *ltdc_plane_create(struct drm_device *ddev,
}
}
- plane = devm_kzalloc(dev, sizeof(*plane), GFP_KERNEL);
- if (!plane)
- return NULL;
-
- ret = drm_universal_plane_init(ddev, plane, possible_crtcs,
- &ltdc_plane_funcs, formats, nb_fmt,
- modifiers, type, NULL);
- if (ret < 0)
+ plane = drmm_universal_plane_alloc(ddev, struct drm_plane, dev,
+ possible_crtcs, &ltdc_plane_funcs, formats,
+ nb_fmt, modifiers, type, NULL);
+ if (IS_ERR(plane))
return NULL;
if (ldev->caps.ycbcr_input) {
@@ -1643,15 +1637,6 @@ static struct drm_plane *ltdc_plane_create(struct drm_device *ddev,
return plane;
}
-static void ltdc_plane_destroy_all(struct drm_device *ddev)
-{
- struct drm_plane *plane, *plane_temp;
-
- list_for_each_entry_safe(plane, plane_temp,
- &ddev->mode_config.plane_list, head)
- drm_plane_cleanup(plane);
-}
-
static int ltdc_crtc_init(struct drm_device *ddev, struct drm_crtc *crtc)
{
struct ltdc_device *ldev = ddev->dev_private;
@@ -1677,14 +1662,14 @@ static int ltdc_crtc_init(struct drm_device *ddev, struct drm_crtc *crtc)
/* Init CRTC according to its hardware features */
if (ldev->caps.crc)
- ret = drm_crtc_init_with_planes(ddev, crtc, primary, NULL,
- &ltdc_crtc_with_crc_support_funcs, NULL);
+ ret = drmm_crtc_init_with_planes(ddev, crtc, primary, NULL,
+ &ltdc_crtc_with_crc_support_funcs, NULL);
else
- ret = drm_crtc_init_with_planes(ddev, crtc, primary, NULL,
- &ltdc_crtc_funcs, NULL);
+ ret = drmm_crtc_init_with_planes(ddev, crtc, primary, NULL,
+ &ltdc_crtc_funcs, NULL);
if (ret) {
DRM_ERROR("Can not initialize CRTC\n");
- goto cleanup;
+ return ret;
}
drm_crtc_helper_add(crtc, &ltdc_crtc_helper_funcs);
@@ -1698,9 +1683,8 @@ static int ltdc_crtc_init(struct drm_device *ddev, struct drm_crtc *crtc)
for (i = 1; i < ldev->caps.nb_layers; i++) {
overlay = ltdc_plane_create(ddev, DRM_PLANE_TYPE_OVERLAY, i);
if (!overlay) {
- ret = -ENOMEM;
DRM_ERROR("Can not create overlay plane %d\n", i);
- goto cleanup;
+ return -ENOMEM;
}
if (ldev->caps.dynamic_zorder)
drm_plane_create_zpos_property(overlay, i, 0, ldev->caps.nb_layers - 1);
@@ -1713,10 +1697,6 @@ static int ltdc_crtc_init(struct drm_device *ddev, struct drm_crtc *crtc)
}
return 0;
-
-cleanup:
- ltdc_plane_destroy_all(ddev);
- return ret;
}
static void ltdc_encoder_disable(struct drm_encoder *encoder)
@@ -1776,23 +1756,19 @@ static int ltdc_encoder_init(struct drm_device *ddev, struct drm_bridge *bridge)
struct drm_encoder *encoder;
int ret;
- encoder = devm_kzalloc(ddev->dev, sizeof(*encoder), GFP_KERNEL);
- if (!encoder)
- return -ENOMEM;
+ encoder = drmm_simple_encoder_alloc(ddev, struct drm_encoder, dev,
+ DRM_MODE_ENCODER_DPI);
+ if (IS_ERR(encoder))
+ return PTR_ERR(encoder);
encoder->possible_crtcs = CRTC_MASK;
encoder->possible_clones = 0; /* No cloning support */
- drm_simple_encoder_init(ddev, encoder, DRM_MODE_ENCODER_DPI);
-
drm_encoder_helper_add(encoder, &ltdc_encoder_helper_funcs);
ret = drm_bridge_attach(encoder, bridge, NULL, 0);
- if (ret) {
- if (ret != -EPROBE_DEFER)
- drm_encoder_cleanup(encoder);
+ if (ret)
return ret;
- }
DRM_DEBUG_DRIVER("Bridge encoder:%d created\n", encoder->base.id);
@@ -1962,8 +1938,7 @@ int ltdc_load(struct drm_device *ddev)
goto err;
if (panel) {
- bridge = drm_panel_bridge_add_typed(panel,
- DRM_MODE_CONNECTOR_DPI);
+ bridge = drmm_panel_bridge_add(ddev, panel);
if (IS_ERR(bridge)) {
DRM_ERROR("panel-bridge endpoint %d\n", i);
ret = PTR_ERR(bridge);
@@ -2013,13 +1988,8 @@ int ltdc_load(struct drm_device *ddev)
goto err;
}
- /* Disable interrupts */
- if (ldev->caps.fifo_threshold)
- regmap_clear_bits(ldev->regmap, LTDC_IER, IER_LIE | IER_RRIE | IER_FUWIE |
- IER_TERRIE);
- else
- regmap_clear_bits(ldev->regmap, LTDC_IER, IER_LIE | IER_RRIE | IER_FUWIE |
- IER_TERRIE | IER_FUEIE);
+ /* Disable all interrupts */
+ regmap_clear_bits(ldev->regmap, LTDC_IER, IER_MASK);
DRM_DEBUG_DRIVER("ltdc hw version 0x%08x\n", ldev->caps.hw_version);
@@ -2045,7 +2015,7 @@ int ltdc_load(struct drm_device *ddev)
}
}
- crtc = devm_kzalloc(dev, sizeof(*crtc), GFP_KERNEL);
+ crtc = drmm_kzalloc(ddev, sizeof(*crtc), GFP_KERNEL);
if (!crtc) {
DRM_ERROR("Failed to allocate crtc\n");
ret = -ENOMEM;
@@ -2072,9 +2042,6 @@ int ltdc_load(struct drm_device *ddev)
return 0;
err:
- for (i = 0; i < nb_endpoints; i++)
- drm_of_panel_bridge_remove(ddev->dev->of_node, 0, i);
-
clk_disable_unprepare(ldev->pixel_clk);
return ret;
@@ -2082,16 +2049,8 @@ err:
void ltdc_unload(struct drm_device *ddev)
{
- struct device *dev = ddev->dev;
- int nb_endpoints, i;
-
DRM_DEBUG_DRIVER("\n");
- nb_endpoints = of_graph_get_endpoint_count(dev->of_node);
-
- for (i = 0; i < nb_endpoints; i++)
- drm_of_panel_bridge_remove(ddev->dev->of_node, 0, i);
-
pm_runtime_disable(ddev->dev);
}
diff --git a/drivers/gpu/drm/stm/lvds.c b/drivers/gpu/drm/stm/lvds.c
index 2fa2c81784e9..06f2d7a56cc9 100644
--- a/drivers/gpu/drm/stm/lvds.c
+++ b/drivers/gpu/drm/stm/lvds.c
@@ -1210,7 +1210,6 @@ static struct platform_driver lvds_platform_driver = {
.remove = lvds_remove,
.driver = {
.name = "stm32-display-lvds",
- .owner = THIS_MODULE,
.of_match_table = lvds_dt_ids,
},
};
diff --git a/drivers/gpu/drm/tegra/drm.c b/drivers/gpu/drm/tegra/drm.c
index bc08b813c197..c9eb329665ec 100644
--- a/drivers/gpu/drm/tegra/drm.c
+++ b/drivers/gpu/drm/tegra/drm.c
@@ -1332,6 +1332,11 @@ static int host1x_drm_remove(struct host1x_device *dev)
return 0;
}
+static void host1x_drm_shutdown(struct host1x_device *dev)
+{
+ drm_atomic_helper_shutdown(dev_get_drvdata(&dev->dev));
+}
+
#ifdef CONFIG_PM_SLEEP
static int host1x_drm_suspend(struct device *dev)
{
@@ -1400,6 +1405,7 @@ static struct host1x_driver host1x_drm_driver = {
},
.probe = host1x_drm_probe,
.remove = host1x_drm_remove,
+ .shutdown = host1x_drm_shutdown,
.subdevs = host1x_drm_subdevs,
};
diff --git a/drivers/gpu/drm/tegra/drm.h b/drivers/gpu/drm/tegra/drm.h
index 682011166a8f..2f3781e04b0a 100644
--- a/drivers/gpu/drm/tegra/drm.h
+++ b/drivers/gpu/drm/tegra/drm.h
@@ -133,7 +133,7 @@ struct tegra_output {
struct drm_bridge *bridge;
struct drm_panel *panel;
struct i2c_adapter *ddc;
- const struct edid *edid;
+ const struct drm_edid *drm_edid;
struct cec_notifier *cec;
unsigned int hpd_irq;
struct gpio_desc *hpd_gpio;
diff --git a/drivers/gpu/drm/tegra/gr3d.c b/drivers/gpu/drm/tegra/gr3d.c
index 00c8564520e7..4de1ea0fc7c0 100644
--- a/drivers/gpu/drm/tegra/gr3d.c
+++ b/drivers/gpu/drm/tegra/gr3d.c
@@ -46,6 +46,7 @@ struct gr3d {
unsigned int nclocks;
struct reset_control_bulk_data resets[RST_GR3D_MAX];
unsigned int nresets;
+ struct dev_pm_domain_list *pd_list;
DECLARE_BITMAP(addr_regs, GR3D_NUM_REGS);
};
@@ -369,18 +370,12 @@ static int gr3d_power_up_legacy_domain(struct device *dev, const char *name,
return 0;
}
-static void gr3d_del_link(void *link)
-{
- device_link_del(link);
-}
-
static int gr3d_init_power(struct device *dev, struct gr3d *gr3d)
{
- static const char * const opp_genpd_names[] = { "3d0", "3d1", NULL };
- const u32 link_flags = DL_FLAG_STATELESS | DL_FLAG_PM_RUNTIME;
- struct device **opp_virt_devs, *pd_dev;
- struct device_link *link;
- unsigned int i;
+ struct dev_pm_domain_attach_data pd_data = {
+ .pd_names = (const char *[]) { "3d0", "3d1" },
+ .num_pd_names = 2,
+ };
int err;
err = of_count_phandle_with_args(dev->of_node, "power-domains",
@@ -414,29 +409,10 @@ static int gr3d_init_power(struct device *dev, struct gr3d *gr3d)
if (dev->pm_domain)
return 0;
- err = devm_pm_opp_attach_genpd(dev, opp_genpd_names, &opp_virt_devs);
- if (err)
+ err = dev_pm_domain_attach_list(dev, &pd_data, &gr3d->pd_list);
+ if (err < 0)
return err;
- for (i = 0; opp_genpd_names[i]; i++) {
- pd_dev = opp_virt_devs[i];
- if (!pd_dev) {
- dev_err(dev, "failed to get %s power domain\n",
- opp_genpd_names[i]);
- return -EINVAL;
- }
-
- link = device_link_add(dev, pd_dev, link_flags);
- if (!link) {
- dev_err(dev, "failed to link to %s\n", dev_name(pd_dev));
- return -EINVAL;
- }
-
- err = devm_add_action_or_reset(dev, gr3d_del_link, link);
- if (err)
- return err;
- }
-
return 0;
}
@@ -527,13 +503,13 @@ static int gr3d_probe(struct platform_device *pdev)
err = devm_tegra_core_dev_init_opp_table_common(&pdev->dev);
if (err)
- return err;
+ goto err;
err = host1x_client_register(&gr3d->client.base);
if (err < 0) {
dev_err(&pdev->dev, "failed to register host1x client: %d\n",
err);
- return err;
+ goto err;
}
/* initialize address register map */
@@ -541,6 +517,9 @@ static int gr3d_probe(struct platform_device *pdev)
set_bit(gr3d_addr_regs[i], gr3d->addr_regs);
return 0;
+err:
+ dev_pm_domain_detach_list(gr3d->pd_list);
+ return err;
}
static void gr3d_remove(struct platform_device *pdev)
@@ -549,6 +528,7 @@ static void gr3d_remove(struct platform_device *pdev)
pm_runtime_disable(&pdev->dev);
host1x_client_unregister(&gr3d->client.base);
+ dev_pm_domain_detach_list(gr3d->pd_list);
}
static int __maybe_unused gr3d_runtime_suspend(struct device *dev)
diff --git a/drivers/gpu/drm/tegra/hub.c b/drivers/gpu/drm/tegra/hub.c
index f21e57e8599e..e0c2019a591b 100644
--- a/drivers/gpu/drm/tegra/hub.c
+++ b/drivers/gpu/drm/tegra/hub.c
@@ -521,12 +521,11 @@ static void tegra_shared_plane_atomic_disable(struct drm_plane *plane,
static inline u32 compute_phase_incr(fixed20_12 in, unsigned int out)
{
- u64 tmp, tmp1, tmp2;
+ u64 tmp, tmp1;
tmp = (u64)dfixed_trunc(in);
- tmp2 = (u64)out;
- tmp1 = (tmp << NFB) + (tmp2 >> 1);
- do_div(tmp1, tmp2);
+ tmp1 = (tmp << NFB) + ((u64)out >> 1);
+ do_div(tmp1, out);
return lower_32_bits(tmp1);
}
diff --git a/drivers/gpu/drm/tegra/output.c b/drivers/gpu/drm/tegra/output.c
index 4da3c3d1abbc..49e4f63a5550 100644
--- a/drivers/gpu/drm/tegra/output.c
+++ b/drivers/gpu/drm/tegra/output.c
@@ -21,7 +21,7 @@
int tegra_output_connector_get_modes(struct drm_connector *connector)
{
struct tegra_output *output = connector_to_output(connector);
- struct edid *edid = NULL;
+ const struct drm_edid *drm_edid = NULL;
int err = 0;
/*
@@ -34,18 +34,17 @@ int tegra_output_connector_get_modes(struct drm_connector *connector)
return err;
}
- if (output->edid)
- edid = kmemdup(output->edid, sizeof(*edid), GFP_KERNEL);
+ if (output->drm_edid)
+ drm_edid = drm_edid_dup(output->drm_edid);
else if (output->ddc)
- edid = drm_get_edid(connector, output->ddc);
+ drm_edid = drm_edid_read_ddc(connector, output->ddc);
- cec_notifier_set_phys_addr_from_edid(output->cec, edid);
- drm_connector_update_edid_property(connector, edid);
+ drm_edid_connector_update(connector, drm_edid);
+ cec_notifier_set_phys_addr(output->cec,
+ connector->display_info.source_physical_address);
- if (edid) {
- err = drm_add_edid_modes(connector, edid);
- kfree(edid);
- }
+ err = drm_edid_connector_add_modes(connector);
+ drm_edid_free(drm_edid);
return err;
}
@@ -98,6 +97,7 @@ static irqreturn_t hpd_irq(int irq, void *data)
int tegra_output_probe(struct tegra_output *output)
{
struct device_node *ddc, *panel;
+ const void *edid;
unsigned long flags;
int err, size;
@@ -124,8 +124,6 @@ int tegra_output_probe(struct tegra_output *output)
return PTR_ERR(output->panel);
}
- output->edid = of_get_property(output->of_node, "nvidia,edid", &size);
-
ddc = of_parse_phandle(output->of_node, "nvidia,ddc-i2c-bus", 0);
if (ddc) {
output->ddc = of_get_i2c_adapter_by_node(ddc);
@@ -137,6 +135,9 @@ int tegra_output_probe(struct tegra_output *output)
}
}
+ edid = of_get_property(output->of_node, "nvidia,edid", &size);
+ output->drm_edid = drm_edid_alloc(edid, size);
+
output->hpd_gpio = devm_fwnode_gpiod_get(output->dev,
of_fwnode_handle(output->of_node),
"nvidia,hpd",
@@ -187,6 +188,8 @@ put_i2c:
if (output->ddc)
i2c_put_adapter(output->ddc);
+ drm_edid_free(output->drm_edid);
+
return err;
}
@@ -197,6 +200,8 @@ void tegra_output_remove(struct tegra_output *output)
if (output->ddc)
i2c_put_adapter(output->ddc);
+
+ drm_edid_free(output->drm_edid);
}
int tegra_output_init(struct drm_device *drm, struct tegra_output *output)
diff --git a/drivers/gpu/drm/tests/drm_gem_shmem_test.c b/drivers/gpu/drm/tests/drm_gem_shmem_test.c
index d8d0e4d1682f..fd4215e2f982 100644
--- a/drivers/gpu/drm/tests/drm_gem_shmem_test.c
+++ b/drivers/gpu/drm/tests/drm_gem_shmem_test.c
@@ -23,29 +23,16 @@
#define TEST_BYTE 0xae
/*
- * Wrappers to avoid an explicit type casting when passing action
- * functions to kunit_add_action().
+ * Wrappers to avoid cast warnings when passing action functions
+ * directly to kunit_add_action().
*/
-static void kfree_wrapper(void *ptr)
-{
- const void *obj = ptr;
-
- kfree(obj);
-}
-
-static void sg_free_table_wrapper(void *ptr)
-{
- struct sg_table *sgt = ptr;
+KUNIT_DEFINE_ACTION_WRAPPER(kfree_wrapper, kfree, const void *);
- sg_free_table(sgt);
-}
-
-static void drm_gem_shmem_free_wrapper(void *ptr)
-{
- struct drm_gem_shmem_object *shmem = ptr;
+KUNIT_DEFINE_ACTION_WRAPPER(sg_free_table_wrapper, sg_free_table,
+ struct sg_table *);
- drm_gem_shmem_free(shmem);
-}
+KUNIT_DEFINE_ACTION_WRAPPER(drm_gem_shmem_free_wrapper, drm_gem_shmem_free,
+ struct drm_gem_shmem_object *);
/*
* Test creating a shmem GEM object backed by shmem buffer. The test
diff --git a/drivers/gpu/drm/tilcdc/tilcdc_panel.c b/drivers/gpu/drm/tilcdc/tilcdc_panel.c
index 68093d6b6b16..5f2d1b6f9ee9 100644
--- a/drivers/gpu/drm/tilcdc/tilcdc_panel.c
+++ b/drivers/gpu/drm/tilcdc/tilcdc_panel.c
@@ -49,7 +49,7 @@ static void panel_encoder_dpms(struct drm_encoder *encoder, int mode)
if (backlight) {
backlight->props.power = mode == DRM_MODE_DPMS_ON ?
- FB_BLANK_UNBLANK : FB_BLANK_POWERDOWN;
+ BACKLIGHT_POWER_ON : BACKLIGHT_POWER_OFF;
backlight_update_status(backlight);
}
diff --git a/drivers/gpu/drm/tiny/gm12u320.c b/drivers/gpu/drm/tiny/gm12u320.c
index e0defb1d134f..0bd7707c053e 100644
--- a/drivers/gpu/drm/tiny/gm12u320.c
+++ b/drivers/gpu/drm/tiny/gm12u320.c
@@ -464,7 +464,7 @@ static int gm12u320_set_ecomode(struct gm12u320_device *gm12u320)
* Note this assumes this driver is only ever used with the Acer C120, if we
* add support for other devices the vendor and model should be parameterized.
*/
-static struct edid gm12u320_edid = {
+static const struct edid gm12u320_edid = {
.header = { 0x00, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x00 },
.mfg_id = { 0x04, 0x72 }, /* "ACR" */
.prod_code = { 0x20, 0xc1 }, /* C120h */
@@ -523,8 +523,15 @@ static struct edid gm12u320_edid = {
static int gm12u320_conn_get_modes(struct drm_connector *connector)
{
- drm_connector_update_edid_property(connector, &gm12u320_edid);
- return drm_add_edid_modes(connector, &gm12u320_edid);
+ const struct drm_edid *drm_edid;
+ int count;
+
+ drm_edid = drm_edid_alloc(&gm12u320_edid, sizeof(gm12u320_edid));
+ drm_edid_connector_update(connector, drm_edid);
+ count = drm_edid_connector_add_modes(connector);
+ drm_edid_free(drm_edid);
+
+ return count;
}
static const struct drm_connector_helper_funcs gm12u320_conn_helper_funcs = {
diff --git a/drivers/gpu/drm/ttm/tests/ttm_bo_test.c b/drivers/gpu/drm/ttm/tests/ttm_bo_test.c
index d1b32303d051..f0a7eb62116c 100644
--- a/drivers/gpu/drm/ttm/tests/ttm_bo_test.c
+++ b/drivers/gpu/drm/ttm/tests/ttm_bo_test.c
@@ -271,7 +271,7 @@ static void ttm_bo_unreserve_basic(struct kunit *test)
man = ttm_manager_type(priv->ttm_dev, mem_type);
KUNIT_ASSERT_EQ(test,
- list_is_last(&res1->lru, &man->lru[bo->priority]), 1);
+ list_is_last(&res1->lru.link, &man->lru[bo->priority]), 1);
ttm_resource_free(bo, &res2);
ttm_resource_free(bo, &res1);
@@ -308,11 +308,11 @@ static void ttm_bo_unreserve_pinned(struct kunit *test)
err = ttm_resource_alloc(bo, place, &res2);
KUNIT_ASSERT_EQ(test, err, 0);
KUNIT_ASSERT_EQ(test,
- list_is_last(&res2->lru, &priv->ttm_dev->pinned), 1);
+ list_is_last(&res2->lru.link, &priv->ttm_dev->pinned), 1);
ttm_bo_unreserve(bo);
KUNIT_ASSERT_EQ(test,
- list_is_last(&res1->lru, &priv->ttm_dev->pinned), 1);
+ list_is_last(&res1->lru.link, &priv->ttm_dev->pinned), 1);
ttm_resource_free(bo, &res1);
ttm_resource_free(bo, &res2);
diff --git a/drivers/gpu/drm/ttm/tests/ttm_resource_test.c b/drivers/gpu/drm/ttm/tests/ttm_resource_test.c
index 9c2f13e53162..22260e7aea58 100644
--- a/drivers/gpu/drm/ttm/tests/ttm_resource_test.c
+++ b/drivers/gpu/drm/ttm/tests/ttm_resource_test.c
@@ -198,7 +198,7 @@ static void ttm_resource_fini_basic(struct kunit *test)
ttm_resource_init(bo, place, res);
ttm_resource_fini(man, res);
- KUNIT_ASSERT_TRUE(test, list_empty(&res->lru));
+ KUNIT_ASSERT_TRUE(test, list_empty(&res->lru.link));
KUNIT_ASSERT_EQ(test, man->usage, 0);
}
diff --git a/drivers/gpu/drm/ttm/ttm_bo.c b/drivers/gpu/drm/ttm/ttm_bo.c
index 2427be8bc97f..320592435252 100644
--- a/drivers/gpu/drm/ttm/ttm_bo.c
+++ b/drivers/gpu/drm/ttm/ttm_bo.c
@@ -224,80 +224,6 @@ static void ttm_bo_flush_all_fences(struct ttm_buffer_object *bo)
dma_resv_iter_end(&cursor);
}
-/**
- * ttm_bo_cleanup_refs
- * If bo idle, remove from lru lists, and unref.
- * If not idle, block if possible.
- *
- * Must be called with lru_lock and reservation held, this function
- * will drop the lru lock and optionally the reservation lock before returning.
- *
- * @bo: The buffer object to clean-up
- * @interruptible: Any sleeps should occur interruptibly.
- * @no_wait_gpu: Never wait for gpu. Return -EBUSY instead.
- * @unlock_resv: Unlock the reservation lock as well.
- */
-
-static int ttm_bo_cleanup_refs(struct ttm_buffer_object *bo,
- bool interruptible, bool no_wait_gpu,
- bool unlock_resv)
-{
- struct dma_resv *resv = &bo->base._resv;
- int ret;
-
- if (dma_resv_test_signaled(resv, DMA_RESV_USAGE_BOOKKEEP))
- ret = 0;
- else
- ret = -EBUSY;
-
- if (ret && !no_wait_gpu) {
- long lret;
-
- if (unlock_resv)
- dma_resv_unlock(bo->base.resv);
- spin_unlock(&bo->bdev->lru_lock);
-
- lret = dma_resv_wait_timeout(resv, DMA_RESV_USAGE_BOOKKEEP,
- interruptible,
- 30 * HZ);
-
- if (lret < 0)
- return lret;
- else if (lret == 0)
- return -EBUSY;
-
- spin_lock(&bo->bdev->lru_lock);
- if (unlock_resv && !dma_resv_trylock(bo->base.resv)) {
- /*
- * We raced, and lost, someone else holds the reservation now,
- * and is probably busy in ttm_bo_cleanup_memtype_use.
- *
- * Even if it's not the case, because we finished waiting any
- * delayed destruction would succeed, so just return success
- * here.
- */
- spin_unlock(&bo->bdev->lru_lock);
- return 0;
- }
- ret = 0;
- }
-
- if (ret) {
- if (unlock_resv)
- dma_resv_unlock(bo->base.resv);
- spin_unlock(&bo->bdev->lru_lock);
- return ret;
- }
-
- spin_unlock(&bo->bdev->lru_lock);
- ttm_bo_cleanup_memtype_use(bo);
-
- if (unlock_resv)
- dma_resv_unlock(bo->base.resv);
-
- return 0;
-}
-
/*
* Block for the dma_resv object to become idle, lock the buffer and clean up
* the resource and tt object.
@@ -506,150 +432,152 @@ bool ttm_bo_eviction_valuable(struct ttm_buffer_object *bo,
}
EXPORT_SYMBOL(ttm_bo_eviction_valuable);
-/*
- * Check the target bo is allowable to be evicted or swapout, including cases:
- *
- * a. if share same reservation object with ctx->resv, have assumption
- * reservation objects should already be locked, so not lock again and
- * return true directly when either the opreation allow_reserved_eviction
- * or the target bo already is in delayed free list;
+/**
+ * ttm_bo_evict_first() - Evict the first bo on the manager's LRU list.
+ * @bdev: The ttm device.
+ * @man: The manager whose bo to evict.
+ * @ctx: The TTM operation ctx governing the eviction.
*
- * b. Otherwise, trylock it.
+ * Return: 0 if successful or the resource disappeared. Negative error code on error.
*/
-static bool ttm_bo_evict_swapout_allowable(struct ttm_buffer_object *bo,
- struct ttm_operation_ctx *ctx,
- const struct ttm_place *place,
- bool *locked, bool *busy)
+int ttm_bo_evict_first(struct ttm_device *bdev, struct ttm_resource_manager *man,
+ struct ttm_operation_ctx *ctx)
{
- bool ret = false;
+ struct ttm_resource_cursor cursor;
+ struct ttm_buffer_object *bo;
+ struct ttm_resource *res;
+ unsigned int mem_type;
+ int ret = 0;
- if (bo->pin_count) {
- *locked = false;
- if (busy)
- *busy = false;
- return false;
+ spin_lock(&bdev->lru_lock);
+ res = ttm_resource_manager_first(man, &cursor);
+ ttm_resource_cursor_fini(&cursor);
+ if (!res) {
+ ret = -ENOENT;
+ goto out_no_ref;
}
+ bo = res->bo;
+ if (!ttm_bo_get_unless_zero(bo))
+ goto out_no_ref;
+ mem_type = res->mem_type;
+ spin_unlock(&bdev->lru_lock);
+ ret = ttm_bo_reserve(bo, ctx->interruptible, ctx->no_wait_gpu, NULL);
+ if (ret)
+ goto out_no_lock;
+ if (!bo->resource || bo->resource->mem_type != mem_type)
+ goto out_bo_moved;
- if (bo->base.resv == ctx->resv) {
- dma_resv_assert_held(bo->base.resv);
- if (ctx->allow_res_evict)
- ret = true;
- *locked = false;
- if (busy)
- *busy = false;
+ if (bo->deleted) {
+ ret = ttm_bo_wait_ctx(bo, ctx);
+ if (!ret)
+ ttm_bo_cleanup_memtype_use(bo);
} else {
- ret = dma_resv_trylock(bo->base.resv);
- *locked = ret;
- if (busy)
- *busy = !ret;
- }
-
- if (ret && place && (bo->resource->mem_type != place->mem_type ||
- !bo->bdev->funcs->eviction_valuable(bo, place))) {
- ret = false;
- if (*locked) {
- dma_resv_unlock(bo->base.resv);
- *locked = false;
- }
+ ret = ttm_bo_evict(bo, ctx);
}
+out_bo_moved:
+ dma_resv_unlock(bo->base.resv);
+out_no_lock:
+ ttm_bo_put(bo);
+ return ret;
+out_no_ref:
+ spin_unlock(&bdev->lru_lock);
return ret;
}
/**
- * ttm_mem_evict_wait_busy - wait for a busy BO to become available
- *
- * @busy_bo: BO which couldn't be locked with trylock
- * @ctx: operation context
- * @ticket: acquire ticket
- *
- * Try to lock a busy buffer object to avoid failing eviction.
+ * struct ttm_bo_evict_walk - Parameters for the evict walk.
*/
-static int ttm_mem_evict_wait_busy(struct ttm_buffer_object *busy_bo,
- struct ttm_operation_ctx *ctx,
- struct ww_acquire_ctx *ticket)
+struct ttm_bo_evict_walk {
+ /** @walk: The walk base parameters. */
+ struct ttm_lru_walk walk;
+ /** @place: The place passed to the resource allocation. */
+ const struct ttm_place *place;
+ /** @evictor: The buffer object we're trying to make room for. */
+ struct ttm_buffer_object *evictor;
+ /** @res: The allocated resource if any. */
+ struct ttm_resource **res;
+ /** @evicted: Number of successful evictions. */
+ unsigned long evicted;
+};
+
+static s64 ttm_bo_evict_cb(struct ttm_lru_walk *walk, struct ttm_buffer_object *bo)
{
- int r;
-
- if (!busy_bo || !ticket)
- return -EBUSY;
-
- if (ctx->interruptible)
- r = dma_resv_lock_interruptible(busy_bo->base.resv,
- ticket);
- else
- r = dma_resv_lock(busy_bo->base.resv, ticket);
-
- /*
- * TODO: It would be better to keep the BO locked until allocation is at
- * least tried one more time, but that would mean a much larger rework
- * of TTM.
- */
- if (!r)
- dma_resv_unlock(busy_bo->base.resv);
-
- return r == -EDEADLK ? -EBUSY : r;
-}
-
-int ttm_mem_evict_first(struct ttm_device *bdev,
- struct ttm_resource_manager *man,
- const struct ttm_place *place,
- struct ttm_operation_ctx *ctx,
- struct ww_acquire_ctx *ticket)
-{
- struct ttm_buffer_object *bo = NULL, *busy_bo = NULL;
- struct ttm_resource_cursor cursor;
- struct ttm_resource *res;
- bool locked = false;
- int ret;
+ struct ttm_bo_evict_walk *evict_walk =
+ container_of(walk, typeof(*evict_walk), walk);
+ s64 lret;
- spin_lock(&bdev->lru_lock);
- ttm_resource_manager_for_each_res(man, &cursor, res) {
- bool busy;
-
- if (!ttm_bo_evict_swapout_allowable(res->bo, ctx, place,
- &locked, &busy)) {
- if (busy && !busy_bo && ticket !=
- dma_resv_locking_ctx(res->bo->base.resv))
- busy_bo = res->bo;
- continue;
- }
+ if (bo->pin_count || !bo->bdev->funcs->eviction_valuable(bo, evict_walk->place))
+ return 0;
- if (ttm_bo_get_unless_zero(res->bo)) {
- bo = res->bo;
- break;
- }
- if (locked)
- dma_resv_unlock(res->bo->base.resv);
+ if (bo->deleted) {
+ lret = ttm_bo_wait_ctx(bo, walk->ctx);
+ if (!lret)
+ ttm_bo_cleanup_memtype_use(bo);
+ } else {
+ lret = ttm_bo_evict(bo, walk->ctx);
}
- if (!bo) {
- if (busy_bo && !ttm_bo_get_unless_zero(busy_bo))
- busy_bo = NULL;
- spin_unlock(&bdev->lru_lock);
- ret = ttm_mem_evict_wait_busy(busy_bo, ctx, ticket);
- if (busy_bo)
- ttm_bo_put(busy_bo);
- return ret;
- }
+ if (lret)
+ goto out;
- if (bo->deleted) {
- ret = ttm_bo_cleanup_refs(bo, ctx->interruptible,
- ctx->no_wait_gpu, locked);
- ttm_bo_put(bo);
- return ret;
- }
+ evict_walk->evicted++;
+ if (evict_walk->res)
+ lret = ttm_resource_alloc(evict_walk->evictor, evict_walk->place,
+ evict_walk->res);
+ if (lret == 0)
+ return 1;
+out:
+ /* Errors that should terminate the walk. */
+ if (lret == -ENOSPC)
+ return -EBUSY;
- spin_unlock(&bdev->lru_lock);
+ return lret;
+}
- ret = ttm_bo_evict(bo, ctx);
- if (locked)
- ttm_bo_unreserve(bo);
- else
- ttm_bo_move_to_lru_tail_unlocked(bo);
+static const struct ttm_lru_walk_ops ttm_evict_walk_ops = {
+ .process_bo = ttm_bo_evict_cb,
+};
+
+static int ttm_bo_evict_alloc(struct ttm_device *bdev,
+ struct ttm_resource_manager *man,
+ const struct ttm_place *place,
+ struct ttm_buffer_object *evictor,
+ struct ttm_operation_ctx *ctx,
+ struct ww_acquire_ctx *ticket,
+ struct ttm_resource **res)
+{
+ struct ttm_bo_evict_walk evict_walk = {
+ .walk = {
+ .ops = &ttm_evict_walk_ops,
+ .ctx = ctx,
+ .ticket = ticket,
+ },
+ .place = place,
+ .evictor = evictor,
+ .res = res,
+ };
+ s64 lret;
+
+ evict_walk.walk.trylock_only = true;
+ lret = ttm_lru_walk_for_evict(&evict_walk.walk, bdev, man, 1);
+ if (lret || !ticket)
+ goto out;
- ttm_bo_put(bo);
- return ret;
+ /* If ticket-locking, repeat while making progress. */
+ evict_walk.walk.trylock_only = false;
+ do {
+ /* The walk may clear the evict_walk.walk.ticket field */
+ evict_walk.walk.ticket = ticket;
+ evict_walk.evicted = 0;
+ lret = ttm_lru_walk_for_evict(&evict_walk.walk, bdev, man, 1);
+ } while (!lret && evict_walk.evicted);
+out:
+ if (lret < 0)
+ return lret;
+ if (lret == 0)
+ return -EBUSY;
+ return 0;
}
/**
@@ -760,6 +688,7 @@ static int ttm_bo_alloc_resource(struct ttm_buffer_object *bo,
for (i = 0; i < placement->num_placement; ++i) {
const struct ttm_place *place = &placement->placement[i];
struct ttm_resource_manager *man;
+ bool may_evict;
man = ttm_manager_type(bdev, place->mem_type);
if (!man || !ttm_resource_manager_used(man))
@@ -769,22 +698,21 @@ static int ttm_bo_alloc_resource(struct ttm_buffer_object *bo,
TTM_PL_FLAG_FALLBACK))
continue;
- do {
- ret = ttm_resource_alloc(bo, place, res);
- if (unlikely(ret && ret != -ENOSPC))
+ may_evict = (force_space && place->mem_type != TTM_PL_SYSTEM);
+ ret = ttm_resource_alloc(bo, place, res);
+ if (ret) {
+ if (ret != -ENOSPC)
return ret;
- if (likely(!ret) || !force_space)
- break;
-
- ret = ttm_mem_evict_first(bdev, man, place, ctx,
- ticket);
- if (unlikely(ret == -EBUSY))
- break;
- if (unlikely(ret))
+ if (!may_evict)
+ continue;
+
+ ret = ttm_bo_evict_alloc(bdev, man, place, bo, ctx,
+ ticket, res);
+ if (ret == -EBUSY)
+ continue;
+ if (ret)
return ret;
- } while (1);
- if (ret)
- continue;
+ }
ret = ttm_bo_add_move_fence(bo, man, ctx->no_wait_gpu);
if (unlikely(ret)) {
@@ -1118,12 +1046,24 @@ int ttm_bo_wait_ctx(struct ttm_buffer_object *bo, struct ttm_operation_ctx *ctx)
}
EXPORT_SYMBOL(ttm_bo_wait_ctx);
-int ttm_bo_swapout(struct ttm_buffer_object *bo, struct ttm_operation_ctx *ctx,
- gfp_t gfp_flags)
+/**
+ * struct ttm_bo_swapout_walk - Parameters for the swapout walk
+ */
+struct ttm_bo_swapout_walk {
+ /** @walk: The walk base parameters. */
+ struct ttm_lru_walk walk;
+ /** @gfp_flags: The gfp flags to use for ttm_tt_swapout() */
+ gfp_t gfp_flags;
+};
+
+static s64
+ttm_bo_swapout_cb(struct ttm_lru_walk *walk, struct ttm_buffer_object *bo)
{
- struct ttm_place place;
- bool locked;
- long ret;
+ struct ttm_place place = {.mem_type = bo->resource->mem_type};
+ struct ttm_bo_swapout_walk *swapout_walk =
+ container_of(walk, typeof(*swapout_walk), walk);
+ struct ttm_operation_ctx *ctx = walk->ctx;
+ s64 ret;
/*
* While the bo may already reside in SYSTEM placement, set
@@ -1131,28 +1071,29 @@ int ttm_bo_swapout(struct ttm_buffer_object *bo, struct ttm_operation_ctx *ctx,
* The driver may use the fact that we're moving from SYSTEM
* as an indication that we're about to swap out.
*/
- memset(&place, 0, sizeof(place));
- place.mem_type = bo->resource->mem_type;
- if (!ttm_bo_evict_swapout_allowable(bo, ctx, &place, &locked, NULL))
- return -EBUSY;
+ if (bo->pin_count || !bo->bdev->funcs->eviction_valuable(bo, &place)) {
+ ret = -EBUSY;
+ goto out;
+ }
if (!bo->ttm || !ttm_tt_is_populated(bo->ttm) ||
bo->ttm->page_flags & TTM_TT_FLAG_EXTERNAL ||
- bo->ttm->page_flags & TTM_TT_FLAG_SWAPPED ||
- !ttm_bo_get_unless_zero(bo)) {
- if (locked)
- dma_resv_unlock(bo->base.resv);
- return -EBUSY;
+ bo->ttm->page_flags & TTM_TT_FLAG_SWAPPED) {
+ ret = -EBUSY;
+ goto out;
}
if (bo->deleted) {
- ret = ttm_bo_cleanup_refs(bo, false, false, locked);
- ttm_bo_put(bo);
- return ret == -EBUSY ? -ENOSPC : ret;
- }
+ pgoff_t num_pages = bo->ttm->num_pages;
- /* TODO: Cleanup the locking */
- spin_unlock(&bo->bdev->lru_lock);
+ ret = ttm_bo_wait_ctx(bo, ctx);
+ if (ret)
+ goto out;
+
+ ttm_bo_cleanup_memtype_use(bo);
+ ret = num_pages;
+ goto out;
+ }
/*
* Move to system cached
@@ -1164,12 +1105,13 @@ int ttm_bo_swapout(struct ttm_buffer_object *bo, struct ttm_operation_ctx *ctx,
memset(&hop, 0, sizeof(hop));
place.mem_type = TTM_PL_SYSTEM;
ret = ttm_resource_alloc(bo, &place, &evict_mem);
- if (unlikely(ret))
+ if (ret)
goto out;
ret = ttm_bo_handle_move_mem(bo, evict_mem, true, ctx, &hop);
- if (unlikely(ret != 0)) {
- WARN(ret == -EMULTIHOP, "Unexpected multihop in swaput - likely driver bug.\n");
+ if (ret) {
+ WARN(ret == -EMULTIHOP,
+ "Unexpected multihop in swapout - likely driver bug.\n");
ttm_resource_free(bo, &evict_mem);
goto out;
}
@@ -1179,30 +1121,54 @@ int ttm_bo_swapout(struct ttm_buffer_object *bo, struct ttm_operation_ctx *ctx,
* Make sure BO is idle.
*/
ret = ttm_bo_wait_ctx(bo, ctx);
- if (unlikely(ret != 0))
+ if (ret)
goto out;
ttm_bo_unmap_virtual(bo);
-
- /*
- * Swap out. Buffer will be swapped in again as soon as
- * anyone tries to access a ttm page.
- */
if (bo->bdev->funcs->swap_notify)
bo->bdev->funcs->swap_notify(bo);
if (ttm_tt_is_populated(bo->ttm))
- ret = ttm_tt_swapout(bo->bdev, bo->ttm, gfp_flags);
+ ret = ttm_tt_swapout(bo->bdev, bo->ttm, swapout_walk->gfp_flags);
+
out:
+ /* Consider -ENOMEM and -ENOSPC non-fatal. */
+ if (ret == -ENOMEM || ret == -ENOSPC)
+ ret = -EBUSY;
- /*
- * Unreserve without putting on LRU to avoid swapping out an
- * already swapped buffer.
- */
- if (locked)
- dma_resv_unlock(bo->base.resv);
- ttm_bo_put(bo);
- return ret == -EBUSY ? -ENOSPC : ret;
+ return ret;
+}
+
+const struct ttm_lru_walk_ops ttm_swap_ops = {
+ .process_bo = ttm_bo_swapout_cb,
+};
+
+/**
+ * ttm_bo_swapout() - Swap out buffer objects on the LRU list to shmem.
+ * @bdev: The ttm device.
+ * @ctx: The ttm_operation_ctx governing the swapout operation.
+ * @man: The resource manager whose resources / buffer objects are
+ * goint to be swapped out.
+ * @gfp_flags: The gfp flags used for shmem page allocations.
+ * @target: The desired number of bytes to swap out.
+ *
+ * Return: The number of bytes actually swapped out, or negative error code
+ * on error.
+ */
+s64 ttm_bo_swapout(struct ttm_device *bdev, struct ttm_operation_ctx *ctx,
+ struct ttm_resource_manager *man, gfp_t gfp_flags,
+ s64 target)
+{
+ struct ttm_bo_swapout_walk swapout_walk = {
+ .walk = {
+ .ops = &ttm_swap_ops,
+ .ctx = ctx,
+ .trylock_only = true,
+ },
+ .gfp_flags = gfp_flags,
+ };
+
+ return ttm_lru_walk_for_evict(&swapout_walk.walk, bdev, man, target);
}
void ttm_bo_tt_destroy(struct ttm_buffer_object *bo)
diff --git a/drivers/gpu/drm/ttm/ttm_bo_util.c b/drivers/gpu/drm/ttm/ttm_bo_util.c
index 0b3f4267130c..3c07f4712d5c 100644
--- a/drivers/gpu/drm/ttm/ttm_bo_util.c
+++ b/drivers/gpu/drm/ttm/ttm_bo_util.c
@@ -768,3 +768,154 @@ error_destroy_tt:
ttm_tt_destroy(bo->bdev, ttm);
return ret;
}
+
+static bool ttm_lru_walk_trylock(struct ttm_lru_walk *walk,
+ struct ttm_buffer_object *bo,
+ bool *needs_unlock)
+{
+ struct ttm_operation_ctx *ctx = walk->ctx;
+
+ *needs_unlock = false;
+
+ if (dma_resv_trylock(bo->base.resv)) {
+ *needs_unlock = true;
+ return true;
+ }
+
+ if (bo->base.resv == ctx->resv && ctx->allow_res_evict) {
+ dma_resv_assert_held(bo->base.resv);
+ return true;
+ }
+
+ return false;
+}
+
+static int ttm_lru_walk_ticketlock(struct ttm_lru_walk *walk,
+ struct ttm_buffer_object *bo,
+ bool *needs_unlock)
+{
+ struct dma_resv *resv = bo->base.resv;
+ int ret;
+
+ if (walk->ctx->interruptible)
+ ret = dma_resv_lock_interruptible(resv, walk->ticket);
+ else
+ ret = dma_resv_lock(resv, walk->ticket);
+
+ if (!ret) {
+ *needs_unlock = true;
+ /*
+ * Only a single ticketlock per loop. Ticketlocks are prone
+ * to return -EDEADLK causing the eviction to fail, so
+ * after waiting for the ticketlock, revert back to
+ * trylocking for this walk.
+ */
+ walk->ticket = NULL;
+ } else if (ret == -EDEADLK) {
+ /* Caller needs to exit the ww transaction. */
+ ret = -ENOSPC;
+ }
+
+ return ret;
+}
+
+static void ttm_lru_walk_unlock(struct ttm_buffer_object *bo, bool locked)
+{
+ if (locked)
+ dma_resv_unlock(bo->base.resv);
+}
+
+/**
+ * ttm_lru_walk_for_evict() - Perform a LRU list walk, with actions taken on
+ * valid items.
+ * @walk: describe the walks and actions taken
+ * @bdev: The TTM device.
+ * @man: The struct ttm_resource manager whose LRU lists we're walking.
+ * @target: The end condition for the walk.
+ *
+ * The LRU lists of @man are walk, and for each struct ttm_resource encountered,
+ * the corresponding ttm_buffer_object is locked and taken a reference on, and
+ * the LRU lock is dropped. the LRU lock may be dropped before locking and, in
+ * that case, it's verified that the item actually remains on the LRU list after
+ * the lock, and that the buffer object didn't switch resource in between.
+ *
+ * With a locked object, the actions indicated by @walk->process_bo are
+ * performed, and after that, the bo is unlocked, the refcount dropped and the
+ * next struct ttm_resource is processed. Here, the walker relies on
+ * TTM's restartable LRU list implementation.
+ *
+ * Typically @walk->process_bo() would return the number of pages evicted,
+ * swapped or shrunken, so that when the total exceeds @target, or when the
+ * LRU list has been walked in full, iteration is terminated. It's also terminated
+ * on error. Note that the definition of @target is done by the caller, it
+ * could have a different meaning than the number of pages.
+ *
+ * Note that the way dma_resv individualization is done, locking needs to be done
+ * either with the LRU lock held (trylocking only) or with a reference on the
+ * object.
+ *
+ * Return: The progress made towards target or negative error code on error.
+ */
+s64 ttm_lru_walk_for_evict(struct ttm_lru_walk *walk, struct ttm_device *bdev,
+ struct ttm_resource_manager *man, s64 target)
+{
+ struct ttm_resource_cursor cursor;
+ struct ttm_resource *res;
+ s64 progress = 0;
+ s64 lret;
+
+ spin_lock(&bdev->lru_lock);
+ ttm_resource_manager_for_each_res(man, &cursor, res) {
+ struct ttm_buffer_object *bo = res->bo;
+ bool bo_needs_unlock = false;
+ bool bo_locked = false;
+ int mem_type;
+
+ /*
+ * Attempt a trylock before taking a reference on the bo,
+ * since if we do it the other way around, and the trylock fails,
+ * we need to drop the lru lock to put the bo.
+ */
+ if (ttm_lru_walk_trylock(walk, bo, &bo_needs_unlock))
+ bo_locked = true;
+ else if (!walk->ticket || walk->ctx->no_wait_gpu ||
+ walk->trylock_only)
+ continue;
+
+ if (!ttm_bo_get_unless_zero(bo)) {
+ ttm_lru_walk_unlock(bo, bo_needs_unlock);
+ continue;
+ }
+
+ mem_type = res->mem_type;
+ spin_unlock(&bdev->lru_lock);
+
+ lret = 0;
+ if (!bo_locked)
+ lret = ttm_lru_walk_ticketlock(walk, bo, &bo_needs_unlock);
+
+ /*
+ * Note that in between the release of the lru lock and the
+ * ticketlock, the bo may have switched resource,
+ * and also memory type, since the resource may have been
+ * freed and allocated again with a different memory type.
+ * In that case, just skip it.
+ */
+ if (!lret && bo->resource && bo->resource->mem_type == mem_type)
+ lret = walk->ops->process_bo(walk, bo);
+
+ ttm_lru_walk_unlock(bo, bo_needs_unlock);
+ ttm_bo_put(bo);
+ if (lret == -EBUSY || lret == -EALREADY)
+ lret = 0;
+ progress = (lret < 0) ? lret : progress + lret;
+
+ spin_lock(&bdev->lru_lock);
+ if (progress < 0 || progress >= target)
+ break;
+ }
+ ttm_resource_cursor_fini(&cursor);
+ spin_unlock(&bdev->lru_lock);
+
+ return progress;
+}
diff --git a/drivers/gpu/drm/ttm/ttm_device.c b/drivers/gpu/drm/ttm/ttm_device.c
index 434cf0258000..e7cc4954c1bc 100644
--- a/drivers/gpu/drm/ttm/ttm_device.c
+++ b/drivers/gpu/drm/ttm/ttm_device.c
@@ -148,35 +148,20 @@ int ttm_global_swapout(struct ttm_operation_ctx *ctx, gfp_t gfp_flags)
int ttm_device_swapout(struct ttm_device *bdev, struct ttm_operation_ctx *ctx,
gfp_t gfp_flags)
{
- struct ttm_resource_cursor cursor;
struct ttm_resource_manager *man;
- struct ttm_resource *res;
unsigned i;
- int ret;
+ s64 lret;
- spin_lock(&bdev->lru_lock);
for (i = TTM_PL_SYSTEM; i < TTM_NUM_MEM_TYPES; ++i) {
man = ttm_manager_type(bdev, i);
if (!man || !man->use_tt)
continue;
- ttm_resource_manager_for_each_res(man, &cursor, res) {
- struct ttm_buffer_object *bo = res->bo;
- uint32_t num_pages;
-
- if (!bo || bo->resource != res)
- continue;
-
- num_pages = PFN_UP(bo->base.size);
- ret = ttm_bo_swapout(bo, ctx, gfp_flags);
- /* ttm_bo_swapout has dropped the lru_lock */
- if (!ret)
- return num_pages;
- if (ret != -EBUSY)
- return ret;
- }
+ lret = ttm_bo_swapout(bdev, ctx, man, gfp_flags, 1);
+ /* Can be both positive (num_pages) and negative (error) */
+ if (lret)
+ return lret;
}
- spin_unlock(&bdev->lru_lock);
return 0;
}
EXPORT_SYMBOL(ttm_device_swapout);
@@ -274,14 +259,14 @@ static void ttm_device_clear_lru_dma_mappings(struct ttm_device *bdev,
struct ttm_resource *res;
spin_lock(&bdev->lru_lock);
- while ((res = list_first_entry_or_null(list, typeof(*res), lru))) {
+ while ((res = ttm_lru_first_res_or_null(list))) {
struct ttm_buffer_object *bo = res->bo;
/* Take ref against racing releases once lru_lock is unlocked */
if (!ttm_bo_get_unless_zero(bo))
continue;
- list_del_init(&res->lru);
+ list_del_init(&bo->resource->lru.link);
spin_unlock(&bdev->lru_lock);
if (bo->ttm)
diff --git a/drivers/gpu/drm/ttm/ttm_pool.c b/drivers/gpu/drm/ttm/ttm_pool.c
index 6e1fd6985ffc..8504dbe19c1a 100644
--- a/drivers/gpu/drm/ttm/ttm_pool.c
+++ b/drivers/gpu/drm/ttm/ttm_pool.c
@@ -91,7 +91,7 @@ static struct page *ttm_pool_alloc_page(struct ttm_pool *pool, gfp_t gfp_flags,
*/
if (order)
gfp_flags |= __GFP_NOMEMALLOC | __GFP_NORETRY | __GFP_NOWARN |
- __GFP_KSWAPD_RECLAIM;
+ __GFP_THISNODE;
if (!pool->use_dma_alloc) {
p = alloc_pages_node(pool->nid, gfp_flags, order);
diff --git a/drivers/gpu/drm/ttm/ttm_resource.c b/drivers/gpu/drm/ttm/ttm_resource.c
index 4a66b851b67d..6d764ba88aab 100644
--- a/drivers/gpu/drm/ttm/ttm_resource.c
+++ b/drivers/gpu/drm/ttm/ttm_resource.c
@@ -33,6 +33,68 @@
#include <drm/drm_util.h>
+/* Detach the cursor from the bulk move list*/
+static void
+ttm_resource_cursor_clear_bulk(struct ttm_resource_cursor *cursor)
+{
+ lockdep_assert_held(&cursor->man->bdev->lru_lock);
+
+ cursor->bulk = NULL;
+ list_del_init(&cursor->bulk_link);
+}
+
+/* Move the cursor to the end of the bulk move list it's in */
+static void ttm_resource_cursor_move_bulk_tail(struct ttm_lru_bulk_move *bulk,
+ struct ttm_resource_cursor *cursor)
+{
+ struct ttm_lru_bulk_move_pos *pos;
+
+ lockdep_assert_held(&cursor->man->bdev->lru_lock);
+
+ if (WARN_ON_ONCE(bulk != cursor->bulk)) {
+ list_del_init(&cursor->bulk_link);
+ return;
+ }
+
+ pos = &bulk->pos[cursor->mem_type][cursor->priority];
+ if (pos->last)
+ list_move(&cursor->hitch.link, &pos->last->lru.link);
+ ttm_resource_cursor_clear_bulk(cursor);
+}
+
+/* Move all cursors attached to a bulk move to its end */
+static void ttm_bulk_move_adjust_cursors(struct ttm_lru_bulk_move *bulk)
+{
+ struct ttm_resource_cursor *cursor, *next;
+
+ list_for_each_entry_safe(cursor, next, &bulk->cursor_list, bulk_link)
+ ttm_resource_cursor_move_bulk_tail(bulk, cursor);
+}
+
+/* Remove a cursor from an empty bulk move list */
+static void ttm_bulk_move_drop_cursors(struct ttm_lru_bulk_move *bulk)
+{
+ struct ttm_resource_cursor *cursor, *next;
+
+ list_for_each_entry_safe(cursor, next, &bulk->cursor_list, bulk_link)
+ ttm_resource_cursor_clear_bulk(cursor);
+}
+
+/**
+ * ttm_resource_cursor_fini() - Finalize the LRU list cursor usage
+ * @cursor: The struct ttm_resource_cursor to finalize.
+ *
+ * The function pulls the LRU list cursor off any lists it was previusly
+ * attached to. Needs to be called with the LRU lock held. The function
+ * can be called multiple times after eachother.
+ */
+void ttm_resource_cursor_fini(struct ttm_resource_cursor *cursor)
+{
+ lockdep_assert_held(&cursor->man->bdev->lru_lock);
+ list_del_init(&cursor->hitch.link);
+ ttm_resource_cursor_clear_bulk(cursor);
+}
+
/**
* ttm_lru_bulk_move_init - initialize a bulk move structure
* @bulk: the structure to init
@@ -42,10 +104,28 @@
void ttm_lru_bulk_move_init(struct ttm_lru_bulk_move *bulk)
{
memset(bulk, 0, sizeof(*bulk));
+ INIT_LIST_HEAD(&bulk->cursor_list);
}
EXPORT_SYMBOL(ttm_lru_bulk_move_init);
/**
+ * ttm_lru_bulk_move_fini - finalize a bulk move structure
+ * @bdev: The struct ttm_device
+ * @bulk: the structure to finalize
+ *
+ * Sanity checks that bulk moves don't have any
+ * resources left and hence no cursors attached.
+ */
+void ttm_lru_bulk_move_fini(struct ttm_device *bdev,
+ struct ttm_lru_bulk_move *bulk)
+{
+ spin_lock(&bdev->lru_lock);
+ ttm_bulk_move_drop_cursors(bulk);
+ spin_unlock(&bdev->lru_lock);
+}
+EXPORT_SYMBOL(ttm_lru_bulk_move_fini);
+
+/**
* ttm_lru_bulk_move_tail - bulk move range of resources to the LRU tail.
*
* @bulk: bulk move structure
@@ -57,6 +137,7 @@ void ttm_lru_bulk_move_tail(struct ttm_lru_bulk_move *bulk)
{
unsigned i, j;
+ ttm_bulk_move_adjust_cursors(bulk);
for (i = 0; i < TTM_NUM_MEM_TYPES; ++i) {
for (j = 0; j < TTM_MAX_BO_PRIORITY; ++j) {
struct ttm_lru_bulk_move_pos *pos = &bulk->pos[i][j];
@@ -70,8 +151,8 @@ void ttm_lru_bulk_move_tail(struct ttm_lru_bulk_move *bulk)
dma_resv_assert_held(pos->last->bo->base.resv);
man = ttm_manager_type(pos->first->bo->bdev, i);
- list_bulk_move_tail(&man->lru[j], &pos->first->lru,
- &pos->last->lru);
+ list_bulk_move_tail(&man->lru[j], &pos->first->lru.link,
+ &pos->last->lru.link);
}
}
}
@@ -84,14 +165,38 @@ ttm_lru_bulk_move_pos(struct ttm_lru_bulk_move *bulk, struct ttm_resource *res)
return &bulk->pos[res->mem_type][res->bo->priority];
}
+/* Return the previous resource on the list (skip over non-resource list items) */
+static struct ttm_resource *ttm_lru_prev_res(struct ttm_resource *cur)
+{
+ struct ttm_lru_item *lru = &cur->lru;
+
+ do {
+ lru = list_prev_entry(lru, link);
+ } while (!ttm_lru_item_is_res(lru));
+
+ return ttm_lru_item_to_res(lru);
+}
+
+/* Return the next resource on the list (skip over non-resource list items) */
+static struct ttm_resource *ttm_lru_next_res(struct ttm_resource *cur)
+{
+ struct ttm_lru_item *lru = &cur->lru;
+
+ do {
+ lru = list_next_entry(lru, link);
+ } while (!ttm_lru_item_is_res(lru));
+
+ return ttm_lru_item_to_res(lru);
+}
+
/* Move the resource to the tail of the bulk move range */
static void ttm_lru_bulk_move_pos_tail(struct ttm_lru_bulk_move_pos *pos,
struct ttm_resource *res)
{
if (pos->last != res) {
if (pos->first == res)
- pos->first = list_next_entry(res, lru);
- list_move(&res->lru, &pos->last->lru);
+ pos->first = ttm_lru_next_res(res);
+ list_move(&res->lru.link, &pos->last->lru.link);
pos->last = res;
}
}
@@ -122,11 +227,11 @@ static void ttm_lru_bulk_move_del(struct ttm_lru_bulk_move *bulk,
pos->first = NULL;
pos->last = NULL;
} else if (pos->first == res) {
- pos->first = list_next_entry(res, lru);
+ pos->first = ttm_lru_next_res(res);
} else if (pos->last == res) {
- pos->last = list_prev_entry(res, lru);
+ pos->last = ttm_lru_prev_res(res);
} else {
- list_move(&res->lru, &pos->last->lru);
+ list_move(&res->lru.link, &pos->last->lru.link);
}
}
@@ -155,7 +260,7 @@ void ttm_resource_move_to_lru_tail(struct ttm_resource *res)
lockdep_assert_held(&bo->bdev->lru_lock);
if (bo->pin_count) {
- list_move_tail(&res->lru, &bdev->pinned);
+ list_move_tail(&res->lru.link, &bdev->pinned);
} else if (bo->bulk_move) {
struct ttm_lru_bulk_move_pos *pos =
@@ -166,7 +271,7 @@ void ttm_resource_move_to_lru_tail(struct ttm_resource *res)
struct ttm_resource_manager *man;
man = ttm_manager_type(bdev, res->mem_type);
- list_move_tail(&res->lru, &man->lru[bo->priority]);
+ list_move_tail(&res->lru.link, &man->lru[bo->priority]);
}
}
@@ -197,9 +302,9 @@ void ttm_resource_init(struct ttm_buffer_object *bo,
man = ttm_manager_type(bo->bdev, place->mem_type);
spin_lock(&bo->bdev->lru_lock);
if (bo->pin_count)
- list_add_tail(&res->lru, &bo->bdev->pinned);
+ list_add_tail(&res->lru.link, &bo->bdev->pinned);
else
- list_add_tail(&res->lru, &man->lru[bo->priority]);
+ list_add_tail(&res->lru.link, &man->lru[bo->priority]);
man->usage += res->size;
spin_unlock(&bo->bdev->lru_lock);
}
@@ -221,7 +326,7 @@ void ttm_resource_fini(struct ttm_resource_manager *man,
struct ttm_device *bdev = man->bdev;
spin_lock(&bdev->lru_lock);
- list_del_init(&res->lru);
+ list_del_init(&res->lru.link);
man->usage -= res->size;
spin_unlock(&bdev->lru_lock);
}
@@ -390,24 +495,11 @@ int ttm_resource_manager_evict_all(struct ttm_device *bdev,
};
struct dma_fence *fence;
int ret;
- unsigned i;
- /*
- * Can't use standard list traversal since we're unlocking.
- */
-
- spin_lock(&bdev->lru_lock);
- for (i = 0; i < TTM_MAX_BO_PRIORITY; ++i) {
- while (!list_empty(&man->lru[i])) {
- spin_unlock(&bdev->lru_lock);
- ret = ttm_mem_evict_first(bdev, man, NULL, &ctx,
- NULL);
- if (ret)
- return ret;
- spin_lock(&bdev->lru_lock);
- }
- }
- spin_unlock(&bdev->lru_lock);
+ do {
+ ret = ttm_bo_evict_first(bdev, man, &ctx);
+ cond_resched();
+ } while (!ret);
spin_lock(&man->move_lock);
fence = dma_fence_get(man->move);
@@ -460,53 +552,106 @@ void ttm_resource_manager_debug(struct ttm_resource_manager *man,
}
EXPORT_SYMBOL(ttm_resource_manager_debug);
+static void
+ttm_resource_cursor_check_bulk(struct ttm_resource_cursor *cursor,
+ struct ttm_lru_item *next_lru)
+{
+ struct ttm_resource *next = ttm_lru_item_to_res(next_lru);
+ struct ttm_lru_bulk_move *bulk = NULL;
+ struct ttm_buffer_object *bo = next->bo;
+
+ lockdep_assert_held(&cursor->man->bdev->lru_lock);
+ bulk = bo->bulk_move;
+
+ if (cursor->bulk != bulk) {
+ if (bulk) {
+ list_move_tail(&cursor->bulk_link, &bulk->cursor_list);
+ cursor->mem_type = next->mem_type;
+ } else {
+ list_del_init(&cursor->bulk_link);
+ }
+ cursor->bulk = bulk;
+ }
+}
+
/**
- * ttm_resource_manager_first
- *
+ * ttm_resource_manager_first() - Start iterating over the resources
+ * of a resource manager
* @man: resource manager to iterate over
* @cursor: cursor to record the position
*
- * Returns the first resource from the resource manager.
+ * Initializes the cursor and starts iterating. When done iterating,
+ * the caller must explicitly call ttm_resource_cursor_fini().
+ *
+ * Return: The first resource from the resource manager.
*/
struct ttm_resource *
ttm_resource_manager_first(struct ttm_resource_manager *man,
struct ttm_resource_cursor *cursor)
{
- struct ttm_resource *res;
-
lockdep_assert_held(&man->bdev->lru_lock);
- for (cursor->priority = 0; cursor->priority < TTM_MAX_BO_PRIORITY;
- ++cursor->priority)
- list_for_each_entry(res, &man->lru[cursor->priority], lru)
- return res;
+ cursor->priority = 0;
+ cursor->man = man;
+ ttm_lru_item_init(&cursor->hitch, TTM_LRU_HITCH);
+ INIT_LIST_HEAD(&cursor->bulk_link);
+ list_add(&cursor->hitch.link, &man->lru[cursor->priority]);
- return NULL;
+ return ttm_resource_manager_next(cursor);
}
/**
- * ttm_resource_manager_next
- *
- * @man: resource manager to iterate over
+ * ttm_resource_manager_next() - Continue iterating over the resource manager
+ * resources
* @cursor: cursor to record the position
- * @res: the current resource pointer
*
- * Returns the next resource from the resource manager.
+ * Return: the next resource from the resource manager.
*/
struct ttm_resource *
-ttm_resource_manager_next(struct ttm_resource_manager *man,
- struct ttm_resource_cursor *cursor,
- struct ttm_resource *res)
+ttm_resource_manager_next(struct ttm_resource_cursor *cursor)
{
+ struct ttm_resource_manager *man = cursor->man;
+ struct ttm_lru_item *lru;
+
lockdep_assert_held(&man->bdev->lru_lock);
- list_for_each_entry_continue(res, &man->lru[cursor->priority], lru)
- return res;
+ for (;;) {
+ lru = &cursor->hitch;
+ list_for_each_entry_continue(lru, &man->lru[cursor->priority], link) {
+ if (ttm_lru_item_is_res(lru)) {
+ ttm_resource_cursor_check_bulk(cursor, lru);
+ list_move(&cursor->hitch.link, &lru->link);
+ return ttm_lru_item_to_res(lru);
+ }
+ }
+
+ if (++cursor->priority >= TTM_MAX_BO_PRIORITY)
+ break;
+
+ list_move(&cursor->hitch.link, &man->lru[cursor->priority]);
+ ttm_resource_cursor_clear_bulk(cursor);
+ }
+
+ ttm_resource_cursor_fini(cursor);
- for (++cursor->priority; cursor->priority < TTM_MAX_BO_PRIORITY;
- ++cursor->priority)
- list_for_each_entry(res, &man->lru[cursor->priority], lru)
- return res;
+ return NULL;
+}
+
+/**
+ * ttm_lru_first_res_or_null() - Return the first resource on an lru list
+ * @head: The list head of the lru list.
+ *
+ * Return: Pointer to the first resource on the lru list or NULL if
+ * there is none.
+ */
+struct ttm_resource *ttm_lru_first_res_or_null(struct list_head *head)
+{
+ struct ttm_lru_item *lru;
+
+ list_for_each_entry(lru, head, link) {
+ if (ttm_lru_item_is_res(lru))
+ return ttm_lru_item_to_res(lru);
+ }
return NULL;
}
diff --git a/drivers/gpu/drm/udl/udl_edid.c b/drivers/gpu/drm/udl/udl_edid.c
index d67e6bf1f2ae..12f48ae17073 100644
--- a/drivers/gpu/drm/udl/udl_edid.c
+++ b/drivers/gpu/drm/udl/udl_edid.c
@@ -69,7 +69,7 @@ bool udl_probe_edid(struct udl_device *udl)
* The adapter sends all-zeros if no monitor has been
* connected. We consider anything else a connection.
*/
- return !!memchr_inv(hdr, 0, sizeof(hdr));
+ return !mem_is_zero(hdr, sizeof(hdr));
}
const struct drm_edid *udl_edid_read(struct drm_connector *connector)
diff --git a/drivers/gpu/drm/v3d/v3d_bo.c b/drivers/gpu/drm/v3d/v3d_bo.c
index 9eafe53a8f41..ebe52bef4ffb 100644
--- a/drivers/gpu/drm/v3d/v3d_bo.c
+++ b/drivers/gpu/drm/v3d/v3d_bo.c
@@ -26,6 +26,17 @@
#include "v3d_drv.h"
#include "uapi/drm/v3d_drm.h"
+static enum drm_gem_object_status v3d_gem_status(struct drm_gem_object *obj)
+{
+ struct v3d_bo *bo = to_v3d_bo(obj);
+ enum drm_gem_object_status res = 0;
+
+ if (bo->base.pages)
+ res |= DRM_GEM_OBJECT_RESIDENT;
+
+ return res;
+}
+
/* Called DRM core on the last userspace/kernel unreference of the
* BO.
*/
@@ -63,6 +74,7 @@ static const struct drm_gem_object_funcs v3d_gem_funcs = {
.vmap = drm_gem_shmem_object_vmap,
.vunmap = drm_gem_shmem_object_vunmap,
.mmap = drm_gem_shmem_object_mmap,
+ .status = v3d_gem_status,
.vm_ops = &drm_gem_shmem_vm_ops,
};
diff --git a/drivers/gpu/drm/v3d/v3d_drv.c b/drivers/gpu/drm/v3d/v3d_drv.c
index 5982941d933b..d7ff1f5fa481 100644
--- a/drivers/gpu/drm/v3d/v3d_drv.c
+++ b/drivers/gpu/drm/v3d/v3d_drv.c
@@ -95,7 +95,7 @@ static int v3d_get_param_ioctl(struct drm_device *dev, void *data,
args->value = 1;
return 0;
case DRM_V3D_PARAM_MAX_PERF_COUNTERS:
- args->value = v3d->max_counters;
+ args->value = v3d->perfmon_info.max_counters;
return 0;
default:
DRM_DEBUG("Unknown parameter %d\n", args->param);
@@ -184,6 +184,8 @@ static void v3d_show_fdinfo(struct drm_printer *p, struct drm_file *file)
drm_printf(p, "v3d-jobs-%s: \t%llu jobs\n",
v3d_queue_to_string(queue), jobs_completed);
}
+
+ drm_show_memory_stats(p, file);
}
static const struct file_operations v3d_drm_fops = {
@@ -301,12 +303,7 @@ static int v3d_platform_drm_probe(struct platform_device *pdev)
ident3 = V3D_READ(V3D_HUB_IDENT3);
v3d->rev = V3D_GET_FIELD(ident3, V3D_HUB_IDENT3_IPREV);
- if (v3d->ver >= 71)
- v3d->max_counters = V3D_V71_NUM_PERFCOUNTERS;
- else if (v3d->ver >= 42)
- v3d->max_counters = V3D_V42_NUM_PERFCOUNTERS;
- else
- v3d->max_counters = 0;
+ v3d_perfmon_init(v3d);
v3d->reset = devm_reset_control_get_exclusive(dev, NULL);
if (IS_ERR(v3d->reset)) {
diff --git a/drivers/gpu/drm/v3d/v3d_drv.h b/drivers/gpu/drm/v3d/v3d_drv.h
index a0febdb6f214..cf4b23369dc4 100644
--- a/drivers/gpu/drm/v3d/v3d_drv.h
+++ b/drivers/gpu/drm/v3d/v3d_drv.h
@@ -106,10 +106,7 @@ struct v3d_dev {
bool single_irq_line;
- /* Different revisions of V3D have different total number of performance
- * counters
- */
- unsigned int max_counters;
+ struct v3d_perfmon_info perfmon_info;
void __iomem *hub_regs;
void __iomem *core_regs[3];
@@ -353,13 +350,9 @@ struct v3d_timestamp_query {
struct drm_syncobj *syncobj;
};
-/* Number of perfmons required to handle all supported performance counters */
-#define V3D_MAX_PERFMONS DIV_ROUND_UP(V3D_MAX_COUNTERS, \
- DRM_V3D_MAX_PERF_COUNTERS)
-
struct v3d_performance_query {
/* Performance monitor IDs for this query */
- u32 kperfmon_ids[V3D_MAX_PERFMONS];
+ u32 *kperfmon_ids;
/* Syncobj that indicates the query availability */
struct drm_syncobj *syncobj;
@@ -574,6 +567,7 @@ int v3d_sched_init(struct v3d_dev *v3d);
void v3d_sched_fini(struct v3d_dev *v3d);
/* v3d_perfmon.c */
+void v3d_perfmon_init(struct v3d_dev *v3d);
void v3d_perfmon_get(struct v3d_perfmon *perfmon);
void v3d_perfmon_put(struct v3d_perfmon *perfmon);
void v3d_perfmon_start(struct v3d_dev *v3d, struct v3d_perfmon *perfmon);
diff --git a/drivers/gpu/drm/v3d/v3d_perfmon.c b/drivers/gpu/drm/v3d/v3d_perfmon.c
index b7d0b02e1a95..cd7f1eedf17f 100644
--- a/drivers/gpu/drm/v3d/v3d_perfmon.c
+++ b/drivers/gpu/drm/v3d/v3d_perfmon.c
@@ -195,6 +195,23 @@ static const struct v3d_perf_counter_desc v3d_v71_performance_counters[] = {
{"QPU", "QPU-stalls-other", "[QPU] Stalled qcycles waiting for any other reason (vary/W/Z)"},
};
+void v3d_perfmon_init(struct v3d_dev *v3d)
+{
+ const struct v3d_perf_counter_desc *counters = NULL;
+ unsigned int max = 0;
+
+ if (v3d->ver >= 71) {
+ counters = v3d_v71_performance_counters;
+ max = ARRAY_SIZE(v3d_v71_performance_counters);
+ } else if (v3d->ver >= 42) {
+ counters = v3d_v42_performance_counters;
+ max = ARRAY_SIZE(v3d_v42_performance_counters);
+ }
+
+ v3d->perfmon_info.max_counters = max;
+ v3d->perfmon_info.counters = counters;
+}
+
void v3d_perfmon_get(struct v3d_perfmon *perfmon)
{
if (perfmon)
@@ -321,7 +338,7 @@ int v3d_perfmon_create_ioctl(struct drm_device *dev, void *data,
/* Make sure all counters are valid. */
for (i = 0; i < req->ncounters; i++) {
- if (req->counters[i] >= v3d->max_counters)
+ if (req->counters[i] >= v3d->perfmon_info.max_counters)
return -EINVAL;
}
@@ -416,25 +433,14 @@ int v3d_perfmon_get_counter_ioctl(struct drm_device *dev, void *data,
return -EINVAL;
}
+ if (!v3d->perfmon_info.max_counters)
+ return -EOPNOTSUPP;
+
/* Make sure that the counter ID is valid */
- if (req->counter >= v3d->max_counters)
+ if (req->counter >= v3d->perfmon_info.max_counters)
return -EINVAL;
- BUILD_BUG_ON(ARRAY_SIZE(v3d_v42_performance_counters) !=
- V3D_V42_NUM_PERFCOUNTERS);
- BUILD_BUG_ON(ARRAY_SIZE(v3d_v71_performance_counters) !=
- V3D_V71_NUM_PERFCOUNTERS);
- BUILD_BUG_ON(V3D_MAX_COUNTERS < V3D_V42_NUM_PERFCOUNTERS);
- BUILD_BUG_ON(V3D_MAX_COUNTERS < V3D_V71_NUM_PERFCOUNTERS);
- BUILD_BUG_ON((V3D_MAX_COUNTERS != V3D_V42_NUM_PERFCOUNTERS) &&
- (V3D_MAX_COUNTERS != V3D_V71_NUM_PERFCOUNTERS));
-
- if (v3d->ver >= 71)
- counter = &v3d_v71_performance_counters[req->counter];
- else if (v3d->ver >= 42)
- counter = &v3d_v42_performance_counters[req->counter];
- else
- return -EOPNOTSUPP;
+ counter = &v3d->perfmon_info.counters[req->counter];
strscpy(req->name, counter->name, sizeof(req->name));
strscpy(req->category, counter->category, sizeof(req->category));
diff --git a/drivers/gpu/drm/v3d/v3d_performance_counters.h b/drivers/gpu/drm/v3d/v3d_performance_counters.h
index 131b2909522a..d919a2fc9449 100644
--- a/drivers/gpu/drm/v3d/v3d_performance_counters.h
+++ b/drivers/gpu/drm/v3d/v3d_performance_counters.h
@@ -19,11 +19,17 @@ struct v3d_perf_counter_desc {
char description[256];
};
+struct v3d_perfmon_info {
+ /*
+ * Different revisions of V3D have different total number of
+ * performance counters.
+ */
+ unsigned int max_counters;
-#define V3D_V42_NUM_PERFCOUNTERS (87)
-#define V3D_V71_NUM_PERFCOUNTERS (93)
-
-/* Maximum number of performance counters supported by any version of V3D */
-#define V3D_MAX_COUNTERS (93)
+ /*
+ * Array of counters valid for the platform.
+ */
+ const struct v3d_perf_counter_desc *counters;
+};
#endif
diff --git a/drivers/gpu/drm/v3d/v3d_sched.c b/drivers/gpu/drm/v3d/v3d_sched.c
index ad1e6236ff6f..08d2a2739582 100644
--- a/drivers/gpu/drm/v3d/v3d_sched.c
+++ b/drivers/gpu/drm/v3d/v3d_sched.c
@@ -94,8 +94,10 @@ v3d_performance_query_info_free(struct v3d_performance_query_info *query_info,
if (query_info->queries) {
unsigned int i;
- for (i = 0; i < count; i++)
+ for (i = 0; i < count; i++) {
drm_syncobj_put(query_info->queries[i].syncobj);
+ kvfree(query_info->queries[i].kperfmon_ids);
+ }
kvfree(query_info->queries);
}
@@ -365,8 +367,7 @@ v3d_rewrite_csd_job_wg_counts_from_indirect(struct v3d_cpu_job *job)
struct v3d_bo *bo = to_v3d_bo(job->base.bo[0]);
struct v3d_bo *indirect = to_v3d_bo(indirect_csd->indirect);
struct drm_v3d_submit_csd *args = &indirect_csd->job->args;
- struct v3d_dev *v3d = job->base.v3d;
- u32 num_batches, *wg_counts;
+ u32 *wg_counts;
v3d_get_bo_vaddr(bo);
v3d_get_bo_vaddr(indirect);
@@ -379,17 +380,8 @@ v3d_rewrite_csd_job_wg_counts_from_indirect(struct v3d_cpu_job *job)
args->cfg[0] = wg_counts[0] << V3D_CSD_CFG012_WG_COUNT_SHIFT;
args->cfg[1] = wg_counts[1] << V3D_CSD_CFG012_WG_COUNT_SHIFT;
args->cfg[2] = wg_counts[2] << V3D_CSD_CFG012_WG_COUNT_SHIFT;
-
- num_batches = DIV_ROUND_UP(indirect_csd->wg_size, 16) *
- (wg_counts[0] * wg_counts[1] * wg_counts[2]);
-
- /* V3D 7.1.6 and later don't subtract 1 from the number of batches */
- if (v3d->ver < 71 || (v3d->ver == 71 && v3d->rev < 6))
- args->cfg[4] = num_batches - 1;
- else
- args->cfg[4] = num_batches;
-
- WARN_ON(args->cfg[4] == ~0);
+ args->cfg[4] = DIV_ROUND_UP(indirect_csd->wg_size, 16) *
+ (wg_counts[0] * wg_counts[1] * wg_counts[2]) - 1;
for (int i = 0; i < 3; i++) {
/* 0xffffffff indicates that the uniform rewrite is not needed */
@@ -443,18 +435,23 @@ v3d_reset_timestamp_queries(struct v3d_cpu_job *job)
v3d_put_bo_vaddr(bo);
}
-static void
-write_to_buffer(void *dst, u32 idx, bool do_64bit, u64 value)
+static void write_to_buffer_32(u32 *dst, unsigned int idx, u32 value)
{
- if (do_64bit) {
- u64 *dst64 = (u64 *)dst;
+ dst[idx] = value;
+}
- dst64[idx] = value;
- } else {
- u32 *dst32 = (u32 *)dst;
+static void write_to_buffer_64(u64 *dst, unsigned int idx, u64 value)
+{
+ dst[idx] = value;
+}
- dst32[idx] = (u32)value;
- }
+static void
+write_to_buffer(void *dst, unsigned int idx, bool do_64bit, u64 value)
+{
+ if (do_64bit)
+ write_to_buffer_64(dst, idx, value);
+ else
+ write_to_buffer_32(dst, idx, value);
}
static void
@@ -527,18 +524,24 @@ v3d_reset_performance_queries(struct v3d_cpu_job *job)
}
static void
-v3d_write_performance_query_result(struct v3d_cpu_job *job, void *data, u32 query)
+v3d_write_performance_query_result(struct v3d_cpu_job *job, void *data,
+ unsigned int query)
{
- struct v3d_performance_query_info *performance_query = &job->performance_query;
- struct v3d_copy_query_results_info *copy = &job->copy;
+ struct v3d_performance_query_info *performance_query =
+ &job->performance_query;
struct v3d_file_priv *v3d_priv = job->base.file->driver_priv;
+ struct v3d_performance_query *perf_query =
+ &performance_query->queries[query];
struct v3d_dev *v3d = job->base.v3d;
- struct v3d_perfmon *perfmon;
- u64 counter_values[V3D_MAX_COUNTERS];
+ unsigned int i, j, offset;
+
+ for (i = 0, offset = 0;
+ i < performance_query->nperfmons;
+ i++, offset += DRM_V3D_MAX_PERF_COUNTERS) {
+ struct v3d_perfmon *perfmon;
- for (int i = 0; i < performance_query->nperfmons; i++) {
perfmon = v3d_perfmon_find(v3d_priv,
- performance_query->queries[query].kperfmon_ids[i]);
+ perf_query->kperfmon_ids[i]);
if (!perfmon) {
DRM_DEBUG("Failed to find perfmon.");
continue;
@@ -546,14 +549,18 @@ v3d_write_performance_query_result(struct v3d_cpu_job *job, void *data, u32 quer
v3d_perfmon_stop(v3d, perfmon, true);
- memcpy(&counter_values[i * DRM_V3D_MAX_PERF_COUNTERS], perfmon->values,
- perfmon->ncounters * sizeof(u64));
+ if (job->copy.do_64bit) {
+ for (j = 0; j < perfmon->ncounters; j++)
+ write_to_buffer_64(data, offset + j,
+ perfmon->values[j]);
+ } else {
+ for (j = 0; j < perfmon->ncounters; j++)
+ write_to_buffer_32(data, offset + j,
+ perfmon->values[j]);
+ }
v3d_perfmon_put(perfmon);
}
-
- for (int i = 0; i < performance_query->ncounters; i++)
- write_to_buffer(data, i, copy->do_64bit, counter_values[i]);
}
static void
@@ -660,7 +667,7 @@ v3d_gpu_reset_for_timeout(struct v3d_dev *v3d, struct drm_sched_job *sched_job)
/* Unblock schedulers and restart their jobs. */
for (q = 0; q < V3D_MAX_QUEUES; q++) {
- drm_sched_start(&v3d->queue[q].sched, true);
+ drm_sched_start(&v3d->queue[q].sched);
}
mutex_unlock(&v3d->reset_lock);
diff --git a/drivers/gpu/drm/v3d/v3d_submit.c b/drivers/gpu/drm/v3d/v3d_submit.c
index 4cdfabbf4964..d607aa9c4ec2 100644
--- a/drivers/gpu/drm/v3d/v3d_submit.c
+++ b/drivers/gpu/drm/v3d/v3d_submit.c
@@ -452,6 +452,7 @@ v3d_get_cpu_timestamp_query_params(struct drm_file *file_priv,
{
u32 __user *offsets, *syncs;
struct drm_v3d_timestamp_query timestamp;
+ struct v3d_timestamp_query_info *query_info = &job->timestamp_query;
unsigned int i;
int err;
@@ -473,10 +474,10 @@ v3d_get_cpu_timestamp_query_params(struct drm_file *file_priv,
job->job_type = V3D_CPU_JOB_TYPE_TIMESTAMP_QUERY;
- job->timestamp_query.queries = kvmalloc_array(timestamp.count,
- sizeof(struct v3d_timestamp_query),
- GFP_KERNEL);
- if (!job->timestamp_query.queries)
+ query_info->queries = kvmalloc_array(timestamp.count,
+ sizeof(struct v3d_timestamp_query),
+ GFP_KERNEL);
+ if (!query_info->queries)
return -ENOMEM;
offsets = u64_to_user_ptr(timestamp.offsets);
@@ -485,25 +486,26 @@ v3d_get_cpu_timestamp_query_params(struct drm_file *file_priv,
for (i = 0; i < timestamp.count; i++) {
u32 offset, sync;
- if (copy_from_user(&offset, offsets++, sizeof(offset))) {
+ if (get_user(offset, offsets++)) {
err = -EFAULT;
goto error;
}
- job->timestamp_query.queries[i].offset = offset;
+ query_info->queries[i].offset = offset;
- if (copy_from_user(&sync, syncs++, sizeof(sync))) {
+ if (get_user(sync, syncs++)) {
err = -EFAULT;
goto error;
}
- job->timestamp_query.queries[i].syncobj = drm_syncobj_find(file_priv, sync);
- if (!job->timestamp_query.queries[i].syncobj) {
+ query_info->queries[i].syncobj = drm_syncobj_find(file_priv,
+ sync);
+ if (!query_info->queries[i].syncobj) {
err = -ENOENT;
goto error;
}
}
- job->timestamp_query.count = timestamp.count;
+ query_info->count = timestamp.count;
return 0;
@@ -519,6 +521,7 @@ v3d_get_cpu_reset_timestamp_params(struct drm_file *file_priv,
{
u32 __user *syncs;
struct drm_v3d_reset_timestamp_query reset;
+ struct v3d_timestamp_query_info *query_info = &job->timestamp_query;
unsigned int i;
int err;
@@ -537,10 +540,10 @@ v3d_get_cpu_reset_timestamp_params(struct drm_file *file_priv,
job->job_type = V3D_CPU_JOB_TYPE_RESET_TIMESTAMP_QUERY;
- job->timestamp_query.queries = kvmalloc_array(reset.count,
- sizeof(struct v3d_timestamp_query),
- GFP_KERNEL);
- if (!job->timestamp_query.queries)
+ query_info->queries = kvmalloc_array(reset.count,
+ sizeof(struct v3d_timestamp_query),
+ GFP_KERNEL);
+ if (!query_info->queries)
return -ENOMEM;
syncs = u64_to_user_ptr(reset.syncs);
@@ -548,20 +551,21 @@ v3d_get_cpu_reset_timestamp_params(struct drm_file *file_priv,
for (i = 0; i < reset.count; i++) {
u32 sync;
- job->timestamp_query.queries[i].offset = reset.offset + 8 * i;
+ query_info->queries[i].offset = reset.offset + 8 * i;
- if (copy_from_user(&sync, syncs++, sizeof(sync))) {
+ if (get_user(sync, syncs++)) {
err = -EFAULT;
goto error;
}
- job->timestamp_query.queries[i].syncobj = drm_syncobj_find(file_priv, sync);
- if (!job->timestamp_query.queries[i].syncobj) {
+ query_info->queries[i].syncobj = drm_syncobj_find(file_priv,
+ sync);
+ if (!query_info->queries[i].syncobj) {
err = -ENOENT;
goto error;
}
}
- job->timestamp_query.count = reset.count;
+ query_info->count = reset.count;
return 0;
@@ -578,6 +582,7 @@ v3d_get_cpu_copy_query_results_params(struct drm_file *file_priv,
{
u32 __user *offsets, *syncs;
struct drm_v3d_copy_timestamp_query copy;
+ struct v3d_timestamp_query_info *query_info = &job->timestamp_query;
unsigned int i;
int err;
@@ -599,10 +604,10 @@ v3d_get_cpu_copy_query_results_params(struct drm_file *file_priv,
job->job_type = V3D_CPU_JOB_TYPE_COPY_TIMESTAMP_QUERY;
- job->timestamp_query.queries = kvmalloc_array(copy.count,
- sizeof(struct v3d_timestamp_query),
- GFP_KERNEL);
- if (!job->timestamp_query.queries)
+ query_info->queries = kvmalloc_array(copy.count,
+ sizeof(struct v3d_timestamp_query),
+ GFP_KERNEL);
+ if (!query_info->queries)
return -ENOMEM;
offsets = u64_to_user_ptr(copy.offsets);
@@ -611,25 +616,26 @@ v3d_get_cpu_copy_query_results_params(struct drm_file *file_priv,
for (i = 0; i < copy.count; i++) {
u32 offset, sync;
- if (copy_from_user(&offset, offsets++, sizeof(offset))) {
+ if (get_user(offset, offsets++)) {
err = -EFAULT;
goto error;
}
- job->timestamp_query.queries[i].offset = offset;
+ query_info->queries[i].offset = offset;
- if (copy_from_user(&sync, syncs++, sizeof(sync))) {
+ if (get_user(sync, syncs++)) {
err = -EFAULT;
goto error;
}
- job->timestamp_query.queries[i].syncobj = drm_syncobj_find(file_priv, sync);
- if (!job->timestamp_query.queries[i].syncobj) {
+ query_info->queries[i].syncobj = drm_syncobj_find(file_priv,
+ sync);
+ if (!query_info->queries[i].syncobj) {
err = -ENOENT;
goto error;
}
}
- job->timestamp_query.count = copy.count;
+ query_info->count = copy.count;
job->copy.do_64bit = copy.do_64bit;
job->copy.do_partial = copy.do_partial;
@@ -645,95 +651,121 @@ error:
}
static int
-v3d_get_cpu_reset_performance_params(struct drm_file *file_priv,
- struct drm_v3d_extension __user *ext,
- struct v3d_cpu_job *job)
+v3d_copy_query_info(struct v3d_performance_query_info *query_info,
+ unsigned int count,
+ unsigned int nperfmons,
+ u32 __user *syncs,
+ u64 __user *kperfmon_ids,
+ struct drm_file *file_priv)
{
- u32 __user *syncs;
- u64 __user *kperfmon_ids;
- struct drm_v3d_reset_performance_query reset;
unsigned int i, j;
int err;
- if (!job) {
- DRM_DEBUG("CPU job extension was attached to a GPU job.\n");
- return -EINVAL;
- }
-
- if (job->job_type) {
- DRM_DEBUG("Two CPU job extensions were added to the same CPU job.\n");
- return -EINVAL;
- }
-
- if (copy_from_user(&reset, ext, sizeof(reset)))
- return -EFAULT;
-
- if (reset.nperfmons > V3D_MAX_PERFMONS)
- return -EINVAL;
-
- job->job_type = V3D_CPU_JOB_TYPE_RESET_PERFORMANCE_QUERY;
-
- job->performance_query.queries = kvmalloc_array(reset.count,
- sizeof(struct v3d_performance_query),
- GFP_KERNEL);
- if (!job->performance_query.queries)
- return -ENOMEM;
-
- syncs = u64_to_user_ptr(reset.syncs);
- kperfmon_ids = u64_to_user_ptr(reset.kperfmon_ids);
-
- for (i = 0; i < reset.count; i++) {
- u32 sync;
- u64 ids;
+ for (i = 0; i < count; i++) {
+ struct v3d_performance_query *query = &query_info->queries[i];
u32 __user *ids_pointer;
- u32 id;
+ u32 sync, id;
+ u64 ids;
- if (copy_from_user(&sync, syncs++, sizeof(sync))) {
+ if (get_user(sync, syncs++)) {
err = -EFAULT;
goto error;
}
- if (copy_from_user(&ids, kperfmon_ids++, sizeof(ids))) {
+ if (get_user(ids, kperfmon_ids++)) {
err = -EFAULT;
goto error;
}
+ query->kperfmon_ids =
+ kvmalloc_array(nperfmons,
+ sizeof(struct v3d_performance_query *),
+ GFP_KERNEL);
+ if (!query->kperfmon_ids) {
+ err = -ENOMEM;
+ goto error;
+ }
+
ids_pointer = u64_to_user_ptr(ids);
- for (j = 0; j < reset.nperfmons; j++) {
- if (copy_from_user(&id, ids_pointer++, sizeof(id))) {
+ for (j = 0; j < nperfmons; j++) {
+ if (get_user(id, ids_pointer++)) {
+ kvfree(query->kperfmon_ids);
err = -EFAULT;
goto error;
}
- job->performance_query.queries[i].kperfmon_ids[j] = id;
+ query->kperfmon_ids[j] = id;
}
- job->performance_query.queries[i].syncobj = drm_syncobj_find(file_priv, sync);
- if (!job->performance_query.queries[i].syncobj) {
+ query->syncobj = drm_syncobj_find(file_priv, sync);
+ if (!query->syncobj) {
+ kvfree(query->kperfmon_ids);
err = -ENOENT;
goto error;
}
}
- job->performance_query.count = reset.count;
- job->performance_query.nperfmons = reset.nperfmons;
return 0;
error:
- v3d_performance_query_info_free(&job->performance_query, i);
+ v3d_performance_query_info_free(query_info, i);
return err;
}
static int
+v3d_get_cpu_reset_performance_params(struct drm_file *file_priv,
+ struct drm_v3d_extension __user *ext,
+ struct v3d_cpu_job *job)
+{
+ struct v3d_performance_query_info *query_info = &job->performance_query;
+ struct drm_v3d_reset_performance_query reset;
+ int err;
+
+ if (!job) {
+ DRM_DEBUG("CPU job extension was attached to a GPU job.\n");
+ return -EINVAL;
+ }
+
+ if (job->job_type) {
+ DRM_DEBUG("Two CPU job extensions were added to the same CPU job.\n");
+ return -EINVAL;
+ }
+
+ if (copy_from_user(&reset, ext, sizeof(reset)))
+ return -EFAULT;
+
+ job->job_type = V3D_CPU_JOB_TYPE_RESET_PERFORMANCE_QUERY;
+
+ query_info->queries =
+ kvmalloc_array(reset.count,
+ sizeof(struct v3d_performance_query),
+ GFP_KERNEL);
+ if (!query_info->queries)
+ return -ENOMEM;
+
+ err = v3d_copy_query_info(query_info,
+ reset.count,
+ reset.nperfmons,
+ u64_to_user_ptr(reset.syncs),
+ u64_to_user_ptr(reset.kperfmon_ids),
+ file_priv);
+ if (err)
+ return err;
+
+ query_info->count = reset.count;
+ query_info->nperfmons = reset.nperfmons;
+
+ return 0;
+}
+
+static int
v3d_get_cpu_copy_performance_query_params(struct drm_file *file_priv,
struct drm_v3d_extension __user *ext,
struct v3d_cpu_job *job)
{
- u32 __user *syncs;
- u64 __user *kperfmon_ids;
+ struct v3d_performance_query_info *query_info = &job->performance_query;
struct drm_v3d_copy_performance_query copy;
- unsigned int i, j;
int err;
if (!job) {
@@ -752,56 +784,27 @@ v3d_get_cpu_copy_performance_query_params(struct drm_file *file_priv,
if (copy.pad)
return -EINVAL;
- if (copy.nperfmons > V3D_MAX_PERFMONS)
- return -EINVAL;
-
job->job_type = V3D_CPU_JOB_TYPE_COPY_PERFORMANCE_QUERY;
- job->performance_query.queries = kvmalloc_array(copy.count,
- sizeof(struct v3d_performance_query),
- GFP_KERNEL);
- if (!job->performance_query.queries)
+ query_info->queries =
+ kvmalloc_array(copy.count,
+ sizeof(struct v3d_performance_query),
+ GFP_KERNEL);
+ if (!query_info->queries)
return -ENOMEM;
- syncs = u64_to_user_ptr(copy.syncs);
- kperfmon_ids = u64_to_user_ptr(copy.kperfmon_ids);
+ err = v3d_copy_query_info(query_info,
+ copy.count,
+ copy.nperfmons,
+ u64_to_user_ptr(copy.syncs),
+ u64_to_user_ptr(copy.kperfmon_ids),
+ file_priv);
+ if (err)
+ return err;
- for (i = 0; i < copy.count; i++) {
- u32 sync;
- u64 ids;
- u32 __user *ids_pointer;
- u32 id;
-
- if (copy_from_user(&sync, syncs++, sizeof(sync))) {
- err = -EFAULT;
- goto error;
- }
-
- if (copy_from_user(&ids, kperfmon_ids++, sizeof(ids))) {
- err = -EFAULT;
- goto error;
- }
-
- ids_pointer = u64_to_user_ptr(ids);
-
- for (j = 0; j < copy.nperfmons; j++) {
- if (copy_from_user(&id, ids_pointer++, sizeof(id))) {
- err = -EFAULT;
- goto error;
- }
-
- job->performance_query.queries[i].kperfmon_ids[j] = id;
- }
-
- job->performance_query.queries[i].syncobj = drm_syncobj_find(file_priv, sync);
- if (!job->performance_query.queries[i].syncobj) {
- err = -ENOENT;
- goto error;
- }
- }
- job->performance_query.count = copy.count;
- job->performance_query.nperfmons = copy.nperfmons;
- job->performance_query.ncounters = copy.ncounters;
+ query_info->count = copy.count;
+ query_info->nperfmons = copy.nperfmons;
+ query_info->ncounters = copy.ncounters;
job->copy.do_64bit = copy.do_64bit;
job->copy.do_partial = copy.do_partial;
@@ -810,10 +813,6 @@ v3d_get_cpu_copy_performance_query_params(struct drm_file *file_priv,
job->copy.stride = copy.stride;
return 0;
-
-error:
- v3d_performance_query_info_free(&job->performance_query, i);
- return err;
}
/* Whenever userspace sets ioctl extensions, v3d_get_extensions parses data
diff --git a/drivers/gpu/drm/vboxvideo/vbox_main.c b/drivers/gpu/drm/vboxvideo/vbox_main.c
index d4ade9325401..7f686a0190e6 100644
--- a/drivers/gpu/drm/vboxvideo/vbox_main.c
+++ b/drivers/gpu/drm/vboxvideo/vbox_main.c
@@ -114,6 +114,10 @@ int vbox_hw_init(struct vbox_private *vbox)
DRM_INFO("VRAM %08x\n", vbox->full_vram_size);
+ ret = pcim_request_region(pdev, 0, "vboxvideo");
+ if (ret)
+ return ret;
+
/* Map guest-heap at end of vram */
vbox->guest_heap = pcim_iomap_range(pdev, 0,
GUEST_HEAP_OFFSET(vbox), GUEST_HEAP_SIZE);
diff --git a/drivers/gpu/drm/vc4/vc4_bo.c b/drivers/gpu/drm/vc4/vc4_bo.c
index 86d629e45307..3f72be7490d5 100644
--- a/drivers/gpu/drm/vc4/vc4_bo.c
+++ b/drivers/gpu/drm/vc4/vc4_bo.c
@@ -469,7 +469,7 @@ struct vc4_bo *vc4_bo_create(struct drm_device *dev, size_t unaligned_size,
if (IS_ERR(dma_obj)) {
struct drm_printer p = drm_info_printer(vc4->base.dev);
- DRM_ERROR("Failed to allocate from GEM DMA helper:\n");
+ drm_err(dev, "Failed to allocate from GEM DMA helper:\n");
vc4_bo_stats_print(&p, vc4);
return ERR_PTR(-ENOMEM);
}
@@ -702,7 +702,7 @@ static struct dma_buf *vc4_prime_export(struct drm_gem_object *obj, int flags)
*/
ret = vc4_bo_inc_usecnt(bo);
if (ret) {
- DRM_ERROR("Failed to increment BO usecnt\n");
+ drm_err(obj->dev, "Failed to increment BO usecnt\n");
return ERR_PTR(ret);
}
@@ -1050,10 +1050,10 @@ static void vc4_bo_cache_destroy(struct drm_device *dev, void *unused)
for (i = 0; i < vc4->num_labels; i++) {
if (vc4->bo_labels[i].num_allocated) {
- DRM_ERROR("Destroying BO cache with %d %s "
- "BOs still allocated\n",
- vc4->bo_labels[i].num_allocated,
- vc4->bo_labels[i].name);
+ drm_err(dev, "Destroying BO cache with %d %s "
+ "BOs still allocated\n",
+ vc4->bo_labels[i].num_allocated,
+ vc4->bo_labels[i].name);
}
if (is_user_label(i))
@@ -1083,7 +1083,7 @@ int vc4_label_bo_ioctl(struct drm_device *dev, void *data,
gem_obj = drm_gem_object_lookup(file_priv, args->handle);
if (!gem_obj) {
- DRM_ERROR("Failed to look up GEM BO %d\n", args->handle);
+ drm_err(dev, "Failed to look up GEM BO %d\n", args->handle);
kfree(name);
return -ENOENT;
}
diff --git a/drivers/gpu/drm/vc4/vc4_dpi.c b/drivers/gpu/drm/vc4/vc4_dpi.c
index 39152e755a13..a382dc4654bd 100644
--- a/drivers/gpu/drm/vc4/vc4_dpi.c
+++ b/drivers/gpu/drm/vc4/vc4_dpi.c
@@ -199,8 +199,8 @@ static void vc4_dpi_encoder_enable(struct drm_encoder *encoder)
DPI_FORMAT);
break;
default:
- DRM_ERROR("Unknown media bus format %d\n",
- bus_format);
+ drm_err(dev, "Unknown media bus format %d\n",
+ bus_format);
break;
}
}
@@ -236,11 +236,11 @@ static void vc4_dpi_encoder_enable(struct drm_encoder *encoder)
ret = clk_set_rate(dpi->pixel_clock, mode->clock * 1000);
if (ret)
- DRM_ERROR("Failed to set clock rate: %d\n", ret);
+ drm_err(dev, "Failed to set clock rate: %d\n", ret);
ret = clk_prepare_enable(dpi->pixel_clock);
if (ret)
- DRM_ERROR("Failed to set clock rate: %d\n", ret);
+ drm_err(dev, "Failed to set clock rate: %d\n", ret);
drm_dev_exit(idx);
}
@@ -339,7 +339,7 @@ static int vc4_dpi_bind(struct device *dev, struct device *master, void *data)
if (IS_ERR(dpi->core_clock)) {
ret = PTR_ERR(dpi->core_clock);
if (ret != -EPROBE_DEFER)
- DRM_ERROR("Failed to get core clock: %d\n", ret);
+ drm_err(drm, "Failed to get core clock: %d\n", ret);
return ret;
}
@@ -347,13 +347,13 @@ static int vc4_dpi_bind(struct device *dev, struct device *master, void *data)
if (IS_ERR(dpi->pixel_clock)) {
ret = PTR_ERR(dpi->pixel_clock);
if (ret != -EPROBE_DEFER)
- DRM_ERROR("Failed to get pixel clock: %d\n", ret);
+ drm_err(drm, "Failed to get pixel clock: %d\n", ret);
return ret;
}
ret = clk_prepare_enable(dpi->core_clock);
if (ret) {
- DRM_ERROR("Failed to turn on core clock: %d\n", ret);
+ drm_err(drm, "Failed to turn on core clock: %d\n", ret);
return ret;
}
diff --git a/drivers/gpu/drm/vc4/vc4_dsi.c b/drivers/gpu/drm/vc4/vc4_dsi.c
index 46f6c4ce61c5..f5ccc1bf7a63 100644
--- a/drivers/gpu/drm/vc4/vc4_dsi.c
+++ b/drivers/gpu/drm/vc4/vc4_dsi.c
@@ -613,6 +613,7 @@ struct vc4_dsi {
static inline void
dsi_dma_workaround_write(struct vc4_dsi *dsi, u32 offset, u32 val)
{
+ struct drm_device *drm = dsi->bridge.dev;
struct dma_chan *chan = dsi->reg_dma_chan;
struct dma_async_tx_descriptor *tx;
dma_cookie_t cookie;
@@ -633,19 +634,19 @@ dsi_dma_workaround_write(struct vc4_dsi *dsi, u32 offset, u32 val)
dsi->reg_dma_paddr,
4, 0);
if (!tx) {
- DRM_ERROR("Failed to set up DMA register write\n");
+ drm_err(drm, "Failed to set up DMA register write\n");
return;
}
cookie = tx->tx_submit(tx);
ret = dma_submit_error(cookie);
if (ret) {
- DRM_ERROR("Failed to submit DMA: %d\n", ret);
+ drm_err(drm, "Failed to submit DMA: %d\n", ret);
return;
}
ret = dma_sync_wait(chan, cookie);
if (ret)
- DRM_ERROR("Failed to wait for DMA: %d\n", ret);
+ drm_err(drm, "Failed to wait for DMA: %d\n", ret);
}
#define DSI_READ(offset) \
@@ -893,7 +894,7 @@ static void vc4_dsi_bridge_pre_enable(struct drm_bridge *bridge,
ret = pm_runtime_resume_and_get(dev);
if (ret) {
- DRM_ERROR("Failed to runtime PM enable on DSI%d\n", dsi->variant->port);
+ drm_err(bridge->dev, "Failed to runtime PM enable on DSI%d\n", dsi->variant->port);
return;
}
@@ -986,13 +987,14 @@ static void vc4_dsi_bridge_pre_enable(struct drm_bridge *bridge,
ret = clk_prepare_enable(dsi->escape_clock);
if (ret) {
- DRM_ERROR("Failed to turn on DSI escape clock: %d\n", ret);
+ drm_err(bridge->dev, "Failed to turn on DSI escape clock: %d\n",
+ ret);
return;
}
ret = clk_prepare_enable(dsi->pll_phy_clock);
if (ret) {
- DRM_ERROR("Failed to turn on DSI PLL: %d\n", ret);
+ drm_err(bridge->dev, "Failed to turn on DSI PLL: %d\n", ret);
return;
}
@@ -1014,7 +1016,7 @@ static void vc4_dsi_bridge_pre_enable(struct drm_bridge *bridge,
ret = clk_prepare_enable(dsi->pixel_clock);
if (ret) {
- DRM_ERROR("Failed to turn on DSI pixel clock: %d\n", ret);
+ drm_err(bridge->dev, "Failed to turn on DSI pixel clock: %d\n", ret);
return;
}
@@ -1172,6 +1174,7 @@ static ssize_t vc4_dsi_host_transfer(struct mipi_dsi_host *host,
const struct mipi_dsi_msg *msg)
{
struct vc4_dsi *dsi = host_to_dsi(host);
+ struct drm_device *drm = dsi->bridge.dev;
struct mipi_dsi_packet packet;
u32 pkth = 0, pktc = 0;
int i, ret;
@@ -1303,8 +1306,8 @@ static ssize_t vc4_dsi_host_transfer(struct mipi_dsi_host *host,
DSI_RXPKT1H_BC_PARAM);
if (rxlen != msg->rx_len) {
- DRM_ERROR("DSI returned %db, expecting %db\n",
- rxlen, (int)msg->rx_len);
+ drm_err(drm, "DSI returned %db, expecting %db\n",
+ rxlen, (int)msg->rx_len);
ret = -ENXIO;
goto reset_fifo_and_return;
}
@@ -1326,7 +1329,7 @@ static ssize_t vc4_dsi_host_transfer(struct mipi_dsi_host *host,
return ret;
reset_fifo_and_return:
- DRM_ERROR("DSI transfer failed, resetting: %d\n", ret);
+ drm_err(drm, "DSI transfer failed, resetting: %d\n", ret);
DSI_PORT_WRITE(TXPKT1C, DSI_PORT_READ(TXPKT1C) & ~DSI_TXPKT1C_CMD_EN);
udelay(1);
@@ -1468,7 +1471,8 @@ static void dsi_handle_error(struct vc4_dsi *dsi,
if (!(stat & bit))
return;
- DRM_ERROR("DSI%d: %s error\n", dsi->variant->port, type);
+ drm_err(dsi->bridge.dev, "DSI%d: %s error\n", dsi->variant->port,
+ type);
*ret = IRQ_HANDLED;
}
@@ -1687,7 +1691,7 @@ static int vc4_dsi_bind(struct device *dev, struct device *master, void *data)
&dsi->reg_dma_paddr,
GFP_KERNEL);
if (!dsi->reg_dma_mem) {
- DRM_ERROR("Failed to get DMA memory\n");
+ drm_err(drm, "Failed to get DMA memory\n");
return -ENOMEM;
}
@@ -1702,8 +1706,8 @@ static int vc4_dsi_bind(struct device *dev, struct device *master, void *data)
if (IS_ERR(dsi->reg_dma_chan)) {
ret = PTR_ERR(dsi->reg_dma_chan);
if (ret != -EPROBE_DEFER)
- DRM_ERROR("Failed to get DMA channel: %d\n",
- ret);
+ drm_err(drm, "Failed to get DMA channel: %d\n",
+ ret);
return ret;
}
diff --git a/drivers/gpu/drm/vc4/vc4_gem.c b/drivers/gpu/drm/vc4/vc4_gem.c
index 03648f954985..24fb1b57e1dd 100644
--- a/drivers/gpu/drm/vc4/vc4_gem.c
+++ b/drivers/gpu/drm/vc4/vc4_gem.c
@@ -832,8 +832,8 @@ vc4_get_bcl(struct drm_device *dev, struct vc4_exec_info *exec)
*/
temp = kvmalloc_array(temp_size, 1, GFP_KERNEL);
if (!temp) {
- DRM_ERROR("Failed to allocate storage for copying "
- "in bin/render CLs.\n");
+ drm_err(dev, "Failed to allocate storage for copying "
+ "in bin/render CLs.\n");
ret = -ENOMEM;
goto fail;
}
@@ -866,7 +866,7 @@ vc4_get_bcl(struct drm_device *dev, struct vc4_exec_info *exec)
bo = vc4_bo_create(dev, exec_size, true, VC4_BO_TYPE_BCL);
if (IS_ERR(bo)) {
- DRM_ERROR("Couldn't allocate BO for binning\n");
+ drm_err(dev, "Couldn't allocate BO for binning\n");
ret = PTR_ERR(bo);
goto fail;
}
@@ -1153,10 +1153,9 @@ vc4_submit_cl_ioctl(struct drm_device *dev, void *data,
}
exec = kcalloc(1, sizeof(*exec), GFP_KERNEL);
- if (!exec) {
- DRM_ERROR("malloc failure on exec struct\n");
+ if (!exec)
return -ENOMEM;
- }
+
exec->dev = vc4;
ret = vc4_v3d_pm_get(vc4);
diff --git a/drivers/gpu/drm/vc4/vc4_hdmi.c b/drivers/gpu/drm/vc4/vc4_hdmi.c
index d57c4a5948c8..6611ab7c26a6 100644
--- a/drivers/gpu/drm/vc4/vc4_hdmi.c
+++ b/drivers/gpu/drm/vc4/vc4_hdmi.c
@@ -429,6 +429,7 @@ static int vc4_hdmi_connector_detect_ctx(struct drm_connector *connector,
{
struct vc4_hdmi *vc4_hdmi = connector_to_vc4_hdmi(connector);
enum drm_connector_status status = connector_status_disconnected;
+ int ret;
/*
* NOTE: This function should really take vc4_hdmi->mutex, but
@@ -441,7 +442,12 @@ static int vc4_hdmi_connector_detect_ctx(struct drm_connector *connector,
* the lock for now.
*/
- WARN_ON(pm_runtime_resume_and_get(&vc4_hdmi->pdev->dev));
+ ret = pm_runtime_resume_and_get(&vc4_hdmi->pdev->dev);
+ if (ret) {
+ drm_err_once(connector->dev, "Failed to retain HDMI power domain: %d\n",
+ ret);
+ return connector_status_unknown;
+ }
if (vc4_hdmi->hpd_gpio) {
if (gpiod_get_value_cansleep(vc4_hdmi->hpd_gpio))
@@ -698,7 +704,7 @@ static int vc4_hdmi_write_infoframe(struct drm_connector *connector,
ret = vc4_hdmi_stop_packet(vc4_hdmi, type, true);
if (ret) {
- DRM_ERROR("Failed to wait for infoframe to go idle: %d\n", ret);
+ drm_err(drm, "Failed to wait for infoframe to go idle: %d\n", ret);
goto out;
}
@@ -734,7 +740,7 @@ static int vc4_hdmi_write_infoframe(struct drm_connector *connector,
ret = wait_for((HDMI_READ(HDMI_RAM_PACKET_STATUS) &
BIT(packet_id)), 100);
if (ret)
- DRM_ERROR("Failed to wait for infoframe to start: %d\n", ret);
+ drm_err(drm, "Failed to wait for infoframe to start: %d\n", ret);
out:
drm_dev_exit(idx);
@@ -895,7 +901,7 @@ static void vc4_hdmi_encoder_post_crtc_powerdown(struct drm_encoder *encoder,
ret = pm_runtime_put(&vc4_hdmi->pdev->dev);
if (ret < 0)
- DRM_ERROR("Failed to release power domain: %d\n", ret);
+ drm_err(drm, "Failed to release power domain: %d\n", ret);
drm_dev_exit(idx);
@@ -1437,7 +1443,7 @@ static void vc4_hdmi_encoder_pre_crtc_configure(struct drm_encoder *encoder,
ret = pm_runtime_resume_and_get(&vc4_hdmi->pdev->dev);
if (ret < 0) {
- DRM_ERROR("Failed to retain power domain: %d\n", ret);
+ drm_err(drm, "Failed to retain power domain: %d\n", ret);
goto err_dev_exit;
}
@@ -1462,19 +1468,19 @@ static void vc4_hdmi_encoder_pre_crtc_configure(struct drm_encoder *encoder,
div_u64(tmds_char_rate, 100) * 101);
ret = clk_set_min_rate(vc4_hdmi->hsm_clock, hsm_rate);
if (ret) {
- DRM_ERROR("Failed to set HSM clock rate: %d\n", ret);
+ drm_err(drm, "Failed to set HSM clock rate: %d\n", ret);
goto err_put_runtime_pm;
}
ret = clk_set_rate(vc4_hdmi->pixel_clock, tmds_char_rate);
if (ret) {
- DRM_ERROR("Failed to set pixel clock rate: %d\n", ret);
+ drm_err(drm, "Failed to set pixel clock rate: %d\n", ret);
goto err_put_runtime_pm;
}
ret = clk_prepare_enable(vc4_hdmi->pixel_clock);
if (ret) {
- DRM_ERROR("Failed to turn on pixel clock: %d\n", ret);
+ drm_err(drm, "Failed to turn on pixel clock: %d\n", ret);
goto err_put_runtime_pm;
}
@@ -1490,13 +1496,13 @@ static void vc4_hdmi_encoder_pre_crtc_configure(struct drm_encoder *encoder,
ret = clk_set_min_rate(vc4_hdmi->pixel_bvb_clock, bvb_rate);
if (ret) {
- DRM_ERROR("Failed to set pixel bvb clock rate: %d\n", ret);
+ drm_err(drm, "Failed to set pixel bvb clock rate: %d\n", ret);
goto err_disable_pixel_clock;
}
ret = clk_prepare_enable(vc4_hdmi->pixel_bvb_clock);
if (ret) {
- DRM_ERROR("Failed to turn on pixel bvb clock: %d\n", ret);
+ drm_err(drm, "Failed to turn on pixel bvb clock: %d\n", ret);
goto err_disable_pixel_clock;
}
@@ -2945,13 +2951,13 @@ static int vc4_hdmi_init_resources(struct drm_device *drm,
if (IS_ERR(vc4_hdmi->pixel_clock)) {
ret = PTR_ERR(vc4_hdmi->pixel_clock);
if (ret != -EPROBE_DEFER)
- DRM_ERROR("Failed to get pixel clock\n");
+ drm_err(drm, "Failed to get pixel clock\n");
return ret;
}
vc4_hdmi->hsm_clock = devm_clk_get(dev, "hdmi");
if (IS_ERR(vc4_hdmi->hsm_clock)) {
- DRM_ERROR("Failed to get HDMI state machine clock\n");
+ drm_err(drm, "Failed to get HDMI state machine clock\n");
return PTR_ERR(vc4_hdmi->hsm_clock);
}
vc4_hdmi->audio_clock = vc4_hdmi->hsm_clock;
@@ -3035,31 +3041,31 @@ static int vc5_hdmi_init_resources(struct drm_device *drm,
vc4_hdmi->hsm_clock = devm_clk_get(dev, "hdmi");
if (IS_ERR(vc4_hdmi->hsm_clock)) {
- DRM_ERROR("Failed to get HDMI state machine clock\n");
+ drm_err(drm, "Failed to get HDMI state machine clock\n");
return PTR_ERR(vc4_hdmi->hsm_clock);
}
vc4_hdmi->pixel_bvb_clock = devm_clk_get(dev, "bvb");
if (IS_ERR(vc4_hdmi->pixel_bvb_clock)) {
- DRM_ERROR("Failed to get pixel bvb clock\n");
+ drm_err(drm, "Failed to get pixel bvb clock\n");
return PTR_ERR(vc4_hdmi->pixel_bvb_clock);
}
vc4_hdmi->audio_clock = devm_clk_get(dev, "audio");
if (IS_ERR(vc4_hdmi->audio_clock)) {
- DRM_ERROR("Failed to get audio clock\n");
+ drm_err(drm, "Failed to get audio clock\n");
return PTR_ERR(vc4_hdmi->audio_clock);
}
vc4_hdmi->cec_clock = devm_clk_get(dev, "cec");
if (IS_ERR(vc4_hdmi->cec_clock)) {
- DRM_ERROR("Failed to get CEC clock\n");
+ drm_err(drm, "Failed to get CEC clock\n");
return PTR_ERR(vc4_hdmi->cec_clock);
}
vc4_hdmi->reset = devm_reset_control_get(dev, NULL);
if (IS_ERR(vc4_hdmi->reset)) {
- DRM_ERROR("Failed to get HDMI reset line\n");
+ drm_err(drm, "Failed to get HDMI reset line\n");
return PTR_ERR(vc4_hdmi->reset);
}
@@ -3215,14 +3221,14 @@ static int vc4_hdmi_bind(struct device *dev, struct device *master, void *data)
ddc_node = of_parse_phandle(dev->of_node, "ddc", 0);
if (!ddc_node) {
- DRM_ERROR("Failed to find ddc node in device tree\n");
+ drm_err(drm, "Failed to find ddc node in device tree\n");
return -ENODEV;
}
vc4_hdmi->ddc = of_find_i2c_adapter_by_node(ddc_node);
of_node_put(ddc_node);
if (!vc4_hdmi->ddc) {
- DRM_DEBUG("Failed to get ddc i2c adapter by node\n");
+ drm_err(drm, "Failed to get ddc i2c adapter by node\n");
return -EPROBE_DEFER;
}
diff --git a/drivers/gpu/drm/vc4/vc4_hvs.c b/drivers/gpu/drm/vc4/vc4_hvs.c
index 04af672caacb..2a835a5cff9d 100644
--- a/drivers/gpu/drm/vc4/vc4_hvs.c
+++ b/drivers/gpu/drm/vc4/vc4_hvs.c
@@ -191,8 +191,8 @@ static int vc4_hvs_upload_linear_kernel(struct vc4_hvs *hvs,
ret = drm_mm_insert_node(&hvs->dlist_mm, space, VC4_KERNEL_DWORDS);
if (ret) {
- DRM_ERROR("Failed to allocate space for filter kernel: %d\n",
- ret);
+ drm_err(&hvs->vc4->base, "Failed to allocate space for filter kernel: %d\n",
+ ret);
return ret;
}
diff --git a/drivers/gpu/drm/vc4/vc4_irq.c b/drivers/gpu/drm/vc4/vc4_irq.c
index 563b3dfeb9b9..ef93d8e22a35 100644
--- a/drivers/gpu/drm/vc4/vc4_irq.c
+++ b/drivers/gpu/drm/vc4/vc4_irq.c
@@ -76,7 +76,7 @@ vc4_overflow_mem_work(struct work_struct *work)
bin_bo_slot = vc4_v3d_get_bin_slot(vc4);
if (bin_bo_slot < 0) {
- DRM_ERROR("Couldn't allocate binner overflow mem\n");
+ drm_err(&vc4->base, "Couldn't allocate binner overflow mem\n");
goto complete;
}
diff --git a/drivers/gpu/drm/vc4/vc4_v3d.c b/drivers/gpu/drm/vc4/vc4_v3d.c
index 04ac7805e6d5..bf5c4e36c94e 100644
--- a/drivers/gpu/drm/vc4/vc4_v3d.c
+++ b/drivers/gpu/drm/vc4/vc4_v3d.c
@@ -441,21 +441,9 @@ static int vc4_v3d_bind(struct device *dev, struct device *master, void *data)
vc4->v3d = v3d;
v3d->vc4 = vc4;
- v3d->clk = devm_clk_get(dev, NULL);
- if (IS_ERR(v3d->clk)) {
- int ret = PTR_ERR(v3d->clk);
-
- if (ret == -ENOENT) {
- /* bcm2835 didn't have a clock reference in the DT. */
- ret = 0;
- v3d->clk = NULL;
- } else {
- if (ret != -EPROBE_DEFER)
- dev_err(dev, "Failed to get V3D clock: %d\n",
- ret);
- return ret;
- }
- }
+ v3d->clk = devm_clk_get_optional(dev, NULL);
+ if (IS_ERR(v3d->clk))
+ return dev_err_probe(dev, PTR_ERR(v3d->clk), "Failed to get V3D clock\n");
ret = platform_get_irq(pdev, 0);
if (ret < 0)
@@ -471,8 +459,8 @@ static int vc4_v3d_bind(struct device *dev, struct device *master, void *data)
return ret;
if (V3D_READ(V3D_IDENT0) != V3D_EXPECTED_IDENT0) {
- DRM_ERROR("V3D_IDENT0 read 0x%08x instead of 0x%08x\n",
- V3D_READ(V3D_IDENT0), V3D_EXPECTED_IDENT0);
+ drm_err(drm, "V3D_IDENT0 read 0x%08x instead of 0x%08x\n",
+ V3D_READ(V3D_IDENT0), V3D_EXPECTED_IDENT0);
ret = -EINVAL;
goto err_put_runtime_pm;
}
@@ -485,7 +473,7 @@ static int vc4_v3d_bind(struct device *dev, struct device *master, void *data)
ret = vc4_irq_install(drm, vc4->irq);
if (ret) {
- DRM_ERROR("Failed to install IRQ handler\n");
+ drm_err(drm, "Failed to install IRQ handler\n");
goto err_put_runtime_pm;
}
diff --git a/drivers/gpu/drm/vc4/vc4_validate.c b/drivers/gpu/drm/vc4/vc4_validate.c
index 7dff3ca5af6b..0c17284bf6f5 100644
--- a/drivers/gpu/drm/vc4/vc4_validate.c
+++ b/drivers/gpu/drm/vc4/vc4_validate.c
@@ -65,7 +65,7 @@ utile_width(int cpp)
case 8:
return 2;
default:
- DRM_ERROR("unknown cpp: %d\n", cpp);
+ pr_err("unknown cpp: %d\n", cpp);
return 1;
}
}
@@ -82,7 +82,7 @@ utile_height(int cpp)
case 8:
return 4;
default:
- DRM_ERROR("unknown cpp: %d\n", cpp);
+ pr_err("unknown cpp: %d\n", cpp);
return 1;
}
}
@@ -390,8 +390,8 @@ validate_tile_binning_config(VALIDATE_ARGS)
bin_slot = vc4_v3d_get_bin_slot(vc4);
if (bin_slot < 0) {
if (bin_slot != -EINTR && bin_slot != -ERESTARTSYS) {
- DRM_ERROR("Failed to allocate binner memory: %d\n",
- bin_slot);
+ drm_err(dev, "Failed to allocate binner memory: %d\n",
+ bin_slot);
}
return bin_slot;
}
diff --git a/drivers/gpu/drm/vc4/vc4_vec.c b/drivers/gpu/drm/vc4/vc4_vec.c
index 070813b8aff8..eb64e881051e 100644
--- a/drivers/gpu/drm/vc4/vc4_vec.c
+++ b/drivers/gpu/drm/vc4/vc4_vec.c
@@ -557,7 +557,7 @@ static void vc4_vec_encoder_disable(struct drm_encoder *encoder,
ret = pm_runtime_put(&vec->pdev->dev);
if (ret < 0) {
- DRM_ERROR("Failed to release power domain: %d\n", ret);
+ drm_err(drm, "Failed to release power domain: %d\n", ret);
goto err_dev_exit;
}
@@ -591,7 +591,7 @@ static void vc4_vec_encoder_enable(struct drm_encoder *encoder,
ret = pm_runtime_resume_and_get(&vec->pdev->dev);
if (ret < 0) {
- DRM_ERROR("Failed to retain power domain: %d\n", ret);
+ drm_err(drm, "Failed to retain power domain: %d\n", ret);
goto err_dev_exit;
}
@@ -604,13 +604,13 @@ static void vc4_vec_encoder_enable(struct drm_encoder *encoder,
*/
ret = clk_set_rate(vec->clock, 108000000);
if (ret) {
- DRM_ERROR("Failed to set clock rate: %d\n", ret);
+ drm_err(drm, "Failed to set clock rate: %d\n", ret);
goto err_put_runtime_pm;
}
ret = clk_prepare_enable(vec->clock);
if (ret) {
- DRM_ERROR("Failed to turn on core clock: %d\n", ret);
+ drm_err(drm, "Failed to turn on core clock: %d\n", ret);
goto err_put_runtime_pm;
}
@@ -806,7 +806,7 @@ static int vc4_vec_bind(struct device *dev, struct device *master, void *data)
if (IS_ERR(vec->clock)) {
ret = PTR_ERR(vec->clock);
if (ret != -EPROBE_DEFER)
- DRM_ERROR("Failed to get clock: %d\n", ret);
+ drm_err(drm, "Failed to get clock: %d\n", ret);
return ret;
}
diff --git a/drivers/gpu/drm/vkms/vkms_drv.c b/drivers/gpu/drm/vkms/vkms_drv.c
index 8dc9dc13896e..0c1a713b7b7b 100644
--- a/drivers/gpu/drm/vkms/vkms_drv.c
+++ b/drivers/gpu/drm/vkms/vkms_drv.c
@@ -164,9 +164,11 @@ static int vkms_modeset_init(struct vkms_device *vkmsdev)
dev->mode_config.max_height = YRES_MAX;
dev->mode_config.cursor_width = 512;
dev->mode_config.cursor_height = 512;
- /* FIXME: There's a confusion between bpp and depth between this and
+ /*
+ * FIXME: There's a confusion between bpp and depth between this and
* fbdev helpers. We have to go with 0, meaning "pick the default",
- * which ix XRGB8888 in all cases. */
+ * which is XRGB8888 in all cases.
+ */
dev->mode_config.preferred_depth = 0;
dev->mode_config.helper_private = &vkms_mode_config_helpers;
diff --git a/drivers/gpu/drm/vkms/vkms_drv.h b/drivers/gpu/drm/vkms/vkms_drv.h
index 8f5710debb1e..5e46ea5b96dc 100644
--- a/drivers/gpu/drm/vkms/vkms_drv.h
+++ b/drivers/gpu/drm/vkms/vkms_drv.h
@@ -103,7 +103,6 @@ struct vkms_output {
struct drm_writeback_connector wb_connector;
struct hrtimer vblank_hrtimer;
ktime_t period_ns;
- struct drm_pending_vblank_event *event;
/* ordered wq for composer_work */
struct workqueue_struct *composer_workq;
/* protects concurrent access to composer */
diff --git a/drivers/gpu/drm/vkms/vkms_formats.c b/drivers/gpu/drm/vkms/vkms_formats.c
index 36046b12f296..040b7f113a3b 100644
--- a/drivers/gpu/drm/vkms/vkms_formats.c
+++ b/drivers/gpu/drm/vkms/vkms_formats.c
@@ -75,7 +75,7 @@ static void XRGB8888_to_argb_u16(u8 *src_pixels, struct pixel_argb_u16 *out_pixe
static void ARGB16161616_to_argb_u16(u8 *src_pixels, struct pixel_argb_u16 *out_pixel)
{
- u16 *pixels = (u16 *)src_pixels;
+ __le16 *pixels = (__force __le16 *)src_pixels;
out_pixel->a = le16_to_cpu(pixels[3]);
out_pixel->r = le16_to_cpu(pixels[2]);
@@ -85,7 +85,7 @@ static void ARGB16161616_to_argb_u16(u8 *src_pixels, struct pixel_argb_u16 *out_
static void XRGB16161616_to_argb_u16(u8 *src_pixels, struct pixel_argb_u16 *out_pixel)
{
- u16 *pixels = (u16 *)src_pixels;
+ __le16 *pixels = (__force __le16 *)src_pixels;
out_pixel->a = (u16)0xffff;
out_pixel->r = le16_to_cpu(pixels[2]);
@@ -95,7 +95,7 @@ static void XRGB16161616_to_argb_u16(u8 *src_pixels, struct pixel_argb_u16 *out_
static void RGB565_to_argb_u16(u8 *src_pixels, struct pixel_argb_u16 *out_pixel)
{
- u16 *pixels = (u16 *)src_pixels;
+ __le16 *pixels = (__force __le16 *)src_pixels;
s64 fp_rb_ratio = drm_fixp_div(drm_int2fixp(65535), drm_int2fixp(31));
s64 fp_g_ratio = drm_fixp_div(drm_int2fixp(65535), drm_int2fixp(63));
@@ -178,7 +178,7 @@ static void argb_u16_to_XRGB8888(u8 *dst_pixels, struct pixel_argb_u16 *in_pixel
static void argb_u16_to_ARGB16161616(u8 *dst_pixels, struct pixel_argb_u16 *in_pixel)
{
- u16 *pixels = (u16 *)dst_pixels;
+ __le16 *pixels = (__force __le16 *)dst_pixels;
pixels[3] = cpu_to_le16(in_pixel->a);
pixels[2] = cpu_to_le16(in_pixel->r);
@@ -188,9 +188,9 @@ static void argb_u16_to_ARGB16161616(u8 *dst_pixels, struct pixel_argb_u16 *in_p
static void argb_u16_to_XRGB16161616(u8 *dst_pixels, struct pixel_argb_u16 *in_pixel)
{
- u16 *pixels = (u16 *)dst_pixels;
+ __le16 *pixels = (__force __le16 *)dst_pixels;
- pixels[3] = 0xffff;
+ pixels[3] = cpu_to_le16(0xffff);
pixels[2] = cpu_to_le16(in_pixel->r);
pixels[1] = cpu_to_le16(in_pixel->g);
pixels[0] = cpu_to_le16(in_pixel->b);
@@ -198,7 +198,7 @@ static void argb_u16_to_XRGB16161616(u8 *dst_pixels, struct pixel_argb_u16 *in_p
static void argb_u16_to_RGB565(u8 *dst_pixels, struct pixel_argb_u16 *in_pixel)
{
- u16 *pixels = (u16 *)dst_pixels;
+ __le16 *pixels = (__force __le16 *)dst_pixels;
s64 fp_rb_ratio = drm_fixp_div(drm_int2fixp(65535), drm_int2fixp(31));
s64 fp_g_ratio = drm_fixp_div(drm_int2fixp(65535), drm_int2fixp(63));
diff --git a/drivers/gpu/drm/xe/Makefile b/drivers/gpu/drm/xe/Makefile
index e97c9da451b3..edfd812e0f41 100644
--- a/drivers/gpu/drm/xe/Makefile
+++ b/drivers/gpu/drm/xe/Makefile
@@ -12,34 +12,15 @@ subdir-ccflags-$(CONFIG_DRM_XE_WERROR) += -Werror
subdir-ccflags-y += -I$(obj) -I$(src)
# generated sources
-hostprogs := xe_gen_wa_oob
+hostprogs := xe_gen_wa_oob
generated_oob := $(obj)/generated/xe_wa_oob.c $(obj)/generated/xe_wa_oob.h
-
quiet_cmd_wa_oob = GEN $(notdir $(generated_oob))
cmd_wa_oob = mkdir -p $(@D); $^ $(generated_oob)
-
$(obj)/generated/%_wa_oob.c $(obj)/generated/%_wa_oob.h: $(obj)/xe_gen_wa_oob \
$(src)/xe_wa_oob.rules
$(call cmd,wa_oob)
-uses_generated_oob := \
- $(obj)/xe_ggtt.o \
- $(obj)/xe_device.o \
- $(obj)/xe_gsc.o \
- $(obj)/xe_gt.o \
- $(obj)/xe_guc.o \
- $(obj)/xe_guc_ads.o \
- $(obj)/xe_guc_pc.o \
- $(obj)/xe_migrate.o \
- $(obj)/xe_pat.o \
- $(obj)/xe_ring_ops.o \
- $(obj)/xe_vm.o \
- $(obj)/xe_wa.o \
- $(obj)/xe_ttm_stolen_mgr.o
-
-$(uses_generated_oob): $(generated_oob)
-
# Please keep these build lists sorted!
# core driver code
@@ -47,7 +28,6 @@ $(uses_generated_oob): $(generated_oob)
xe-y += xe_bb.o \
xe_bo.o \
xe_bo_evict.o \
- xe_debugfs.o \
xe_devcoredump.o \
xe_device.o \
xe_device_sysfs.o \
@@ -60,12 +40,12 @@ xe-y += xe_bb.o \
xe_ggtt.o \
xe_gpu_scheduler.o \
xe_gsc.o \
+ xe_gsc_debugfs.o \
xe_gsc_proxy.o \
xe_gsc_submit.o \
xe_gt.o \
xe_gt_ccs_mode.o \
xe_gt_clock.o \
- xe_gt_debugfs.o \
xe_gt_freq.o \
xe_gt_idle.o \
xe_gt_mcr.o \
@@ -78,7 +58,6 @@ xe-y += xe_bb.o \
xe_guc_ads.o \
xe_guc_ct.o \
xe_guc_db_mgr.o \
- xe_guc_debugfs.o \
xe_guc_hwconfig.o \
xe_guc_id_mgr.o \
xe_guc_klv_helpers.o \
@@ -88,9 +67,9 @@ xe-y += xe_bb.o \
xe_heci_gsc.o \
xe_hw_engine.o \
xe_hw_engine_class_sysfs.o \
+ xe_hw_engine_group.o \
xe_hw_fence.o \
xe_huc.o \
- xe_huc_debugfs.o \
xe_irq.o \
xe_lrc.o \
xe_migrate.o \
@@ -126,7 +105,6 @@ xe-y += xe_bb.o \
xe_ttm_vram_mgr.o \
xe_tuning.o \
xe_uc.o \
- xe_uc_debugfs.o \
xe_uc_fw.o \
xe_vm.o \
xe_vram.o \
@@ -143,7 +121,6 @@ xe-$(CONFIG_HWMON) += xe_hwmon.o
# graphics virtualization (SR-IOV) support
xe-y += \
xe_gt_sriov_vf.o \
- xe_gt_sriov_vf_debugfs.o \
xe_guc_relay.o \
xe_memirq.o \
xe_sriov.o
@@ -152,7 +129,6 @@ xe-$(CONFIG_PCI_IOV) += \
xe_gt_sriov_pf.o \
xe_gt_sriov_pf_config.o \
xe_gt_sriov_pf_control.o \
- xe_gt_sriov_pf_debugfs.o \
xe_gt_sriov_pf_monitor.o \
xe_gt_sriov_pf_policy.o \
xe_gt_sriov_pf_service.o \
@@ -194,6 +170,7 @@ xe-$(CONFIG_DRM_XE_DISPLAY) += \
display/xe_display.o \
display/xe_display_misc.o \
display/xe_display_rps.o \
+ display/xe_display_wa.o \
display/xe_dsb_buffer.o \
display/xe_fb_pin.o \
display/xe_hdcp_gsc.o \
@@ -299,6 +276,16 @@ ifeq ($(CONFIG_DRM_FBDEV_EMULATION),y)
endif
ifeq ($(CONFIG_DEBUG_FS),y)
+ xe-y += xe_debugfs.o \
+ xe_gt_debugfs.o \
+ xe_gt_sriov_vf_debugfs.o \
+ xe_gt_stats.o \
+ xe_guc_debugfs.o \
+ xe_huc_debugfs.o \
+ xe_uc_debugfs.o
+
+ xe-$(CONFIG_PCI_IOV) += xe_gt_sriov_pf_debugfs.o
+
xe-$(CONFIG_DRM_XE_DISPLAY) += \
i915-display/intel_display_debugfs.o \
i915-display/intel_display_debugfs_params.o \
@@ -322,3 +309,6 @@ quiet_cmd_hdrtest = HDRTEST $(patsubst %.hdrtest,%.h,$@)
$(obj)/%.hdrtest: $(src)/%.h FORCE
$(call if_changed_dep,hdrtest)
+
+uses_generated_oob := $(addprefix $(obj)/, $(xe-y))
+$(uses_generated_oob): $(obj)/generated/xe_wa_oob.h
diff --git a/drivers/gpu/drm/xe/abi/guc_klvs_abi.h b/drivers/gpu/drm/xe/abi/guc_klvs_abi.h
index 8f9f60b28306..6b30743a2f6c 100644
--- a/drivers/gpu/drm/xe/abi/guc_klvs_abi.h
+++ b/drivers/gpu/drm/xe/abi/guc_klvs_abi.h
@@ -351,6 +351,7 @@ enum xe_guc_klv_ids {
GUC_WORKAROUND_KLV_ID_GAM_PFQ_SHADOW_TAIL_POLLING = 0x9005,
GUC_WORKAROUND_KLV_ID_DISABLE_MTP_DURING_ASYNC_COMPUTE = 0x9007,
GUC_WA_KLV_NP_RD_WRITE_TO_CLEAR_RCSM_AT_CGP_LATE_RESTORE = 0x9008,
+ GUC_WORKAROUND_KLV_ID_BACK_TO_BACK_RCS_ENGINE_RESET = 0x9009,
};
#endif
diff --git a/drivers/gpu/drm/xe/compat-i915-headers/i915_drv.h b/drivers/gpu/drm/xe/compat-i915-headers/i915_drv.h
index 1f1ad4d3ef51..f27a2c75b56d 100644
--- a/drivers/gpu/drm/xe/compat-i915-headers/i915_drv.h
+++ b/drivers/gpu/drm/xe/compat-i915-headers/i915_drv.h
@@ -21,11 +21,6 @@ static inline struct drm_i915_private *to_i915(const struct drm_device *dev)
return container_of(dev, struct drm_i915_private, drm);
}
-static inline struct drm_i915_private *kdev_to_i915(struct device *kdev)
-{
- return dev_get_drvdata(kdev);
-}
-
#define IS_PLATFORM(xe, x) ((xe)->info.platform == x)
#define INTEL_INFO(dev_priv) (&((dev_priv)->info))
#define IS_I830(dev_priv) (dev_priv && 0)
@@ -80,14 +75,9 @@ static inline struct drm_i915_private *kdev_to_i915(struct device *kdev)
#define IS_MOBILE(xe) (xe && 0)
-#define HAS_GMD_ID(xe) GRAPHICS_VERx100(xe) >= 1270
-
-/* Workarounds not handled yet */
-#define IS_DISPLAY_STEP(xe, first, last) ({u8 __step = (xe)->info.step.display; first <= __step && __step < last; })
-
-#define IS_LP(xe) (0)
-#define IS_GEN9_LP(xe) (0)
-#define IS_GEN9_BC(xe) (0)
+#define IS_LP(xe) ((xe) && 0)
+#define IS_GEN9_LP(xe) ((xe) && 0)
+#define IS_GEN9_BC(xe) ((xe) && 0)
#define IS_TIGERLAKE_UY(xe) (xe && 0)
#define IS_COMETLAKE_ULX(xe) (xe && 0)
@@ -115,9 +105,6 @@ struct i915_sched_attr {
};
#define i915_gem_fence_wait_priority(fence, attr) do { (void) attr; } while (0)
-#define pdev_to_i915 pdev_to_xe_device
-#define RUNTIME_INFO(xe) (&(xe)->info.i915_runtime)
-
#define FORCEWAKE_ALL XE_FORCEWAKE_ALL
#ifdef CONFIG_ARM64
diff --git a/drivers/gpu/drm/xe/compat-i915-headers/i915_vma.h b/drivers/gpu/drm/xe/compat-i915-headers/i915_vma.h
index a20d2638ea7a..bdae8392e125 100644
--- a/drivers/gpu/drm/xe/compat-i915-headers/i915_vma.h
+++ b/drivers/gpu/drm/xe/compat-i915-headers/i915_vma.h
@@ -7,7 +7,8 @@
#define I915_VMA_H
#include <uapi/drm/i915_drm.h>
-#include <drm/drm_mm.h>
+
+#include "xe_ggtt_types.h"
/* We don't want these from i915_drm.h in case of Xe */
#undef I915_TILING_X
@@ -19,7 +20,7 @@ struct xe_bo;
struct i915_vma {
struct xe_bo *bo, *dpt;
- struct drm_mm_node node;
+ struct xe_ggtt_node *node;
};
#define i915_ggtt_clear_scanout(bo) do { } while (0)
@@ -28,7 +29,7 @@ struct i915_vma {
static inline u32 i915_ggtt_offset(const struct i915_vma *vma)
{
- return vma->node.start;
+ return vma->node->base.start;
}
#endif
diff --git a/drivers/gpu/drm/xe/compat-i915-headers/intel_step.h b/drivers/gpu/drm/xe/compat-i915-headers/intel_step.h
index 0006ef812346..2cf13a572ab0 100644
--- a/drivers/gpu/drm/xe/compat-i915-headers/intel_step.h
+++ b/drivers/gpu/drm/xe/compat-i915-headers/intel_step.h
@@ -6,15 +6,9 @@
#ifndef __INTEL_STEP_H__
#define __INTEL_STEP_H__
-#include "xe_device_types.h"
#include "xe_step.h"
-#define intel_display_step_name xe_display_step_name
-
-static inline
-const char *xe_display_step_name(struct xe_device *xe)
-{
- return xe_step_name(xe->info.step.display);
-}
+#define intel_step xe_step
+#define intel_step_name xe_step_name
#endif /* __INTEL_STEP_H__ */
diff --git a/drivers/gpu/drm/xe/display/intel_fb_bo.c b/drivers/gpu/drm/xe/display/intel_fb_bo.c
index f835492f73fb..63ce97cc4cfe 100644
--- a/drivers/gpu/drm/xe/display/intel_fb_bo.c
+++ b/drivers/gpu/drm/xe/display/intel_fb_bo.c
@@ -7,6 +7,7 @@
#include <drm/ttm/ttm_bo.h>
#include "intel_display_types.h"
+#include "intel_fb.h"
#include "intel_fb_bo.h"
#include "xe_bo.h"
@@ -28,6 +29,14 @@ int intel_fb_bo_framebuffer_init(struct intel_framebuffer *intel_fb,
struct xe_device *xe = to_xe_device(bo->ttm.base.dev);
int ret;
+ /*
+ * Some modifiers require physical alignment of 64KiB VRAM pages;
+ * require that the BO in those cases is created correctly.
+ */
+ if (XE_IOCTL_DBG(xe, intel_fb_needs_64k_phys(mode_cmd->modifier[0]) &&
+ !(bo->flags & XE_BO_FLAG_NEEDS_64K)))
+ return -EINVAL;
+
xe_bo_get(bo);
ret = ttm_bo_reserve(&bo->ttm, true, false, NULL);
diff --git a/drivers/gpu/drm/xe/display/intel_fbdev_fb.c b/drivers/gpu/drm/xe/display/intel_fbdev_fb.c
index 816ad13821a8..99499d6c0256 100644
--- a/drivers/gpu/drm/xe/display/intel_fbdev_fb.c
+++ b/drivers/gpu/drm/xe/display/intel_fbdev_fb.c
@@ -8,8 +8,10 @@
#include "intel_display_types.h"
#include "intel_fbdev_fb.h"
#include "xe_bo.h"
-#include "xe_gt.h"
#include "xe_ttm_stolen_mgr.h"
+#include "xe_wa.h"
+
+#include <generated/xe_wa_oob.h>
struct intel_framebuffer *intel_fbdev_fb_alloc(struct drm_fb_helper *helper,
struct drm_fb_helper_surface_size *sizes)
@@ -37,7 +39,7 @@ struct intel_framebuffer *intel_fbdev_fb_alloc(struct drm_fb_helper *helper,
size = PAGE_ALIGN(size);
obj = ERR_PTR(-ENODEV);
- if (!IS_DGFX(xe)) {
+ if (!IS_DGFX(xe) && !XE_WA(xe_root_mmio_gt(xe), 22019338487_display)) {
obj = xe_bo_create_pin_map(xe, xe_device_get_root_tile(xe),
NULL, size,
ttm_bo_type_kernel, XE_BO_FLAG_SCANOUT |
@@ -48,6 +50,7 @@ struct intel_framebuffer *intel_fbdev_fb_alloc(struct drm_fb_helper *helper,
else
drm_info(&xe->drm, "Allocated fbdev into stolen failed: %li\n", PTR_ERR(obj));
}
+
if (IS_ERR(obj)) {
obj = xe_bo_create_pin_map(xe, xe_device_get_root_tile(xe), NULL, size,
ttm_bo_type_kernel, XE_BO_FLAG_SCANOUT |
diff --git a/drivers/gpu/drm/xe/display/xe_display.c b/drivers/gpu/drm/xe/display/xe_display.c
index c860fda410c8..75736faf2a80 100644
--- a/drivers/gpu/drm/xe/display/xe_display.c
+++ b/drivers/gpu/drm/xe/display/xe_display.c
@@ -10,7 +10,7 @@
#include <drm/drm_drv.h>
#include <drm/drm_managed.h>
-#include <drm/xe_drm.h>
+#include <uapi/drm/xe_drm.h>
#include "soc/intel_dram.h"
#include "i915_drv.h" /* FIXME: HAS_DISPLAY() depends on this */
@@ -46,7 +46,7 @@ static bool has_display(struct xe_device *xe)
*/
bool xe_display_driver_probe_defer(struct pci_dev *pdev)
{
- if (!xe_modparam.enable_display)
+ if (!xe_modparam.probe_display)
return 0;
return intel_display_driver_probe_defer(pdev);
@@ -62,7 +62,7 @@ bool xe_display_driver_probe_defer(struct pci_dev *pdev)
*/
void xe_display_driver_set_hooks(struct drm_driver *driver)
{
- if (!xe_modparam.enable_display)
+ if (!xe_modparam.probe_display)
return;
driver->driver_features |= DRIVER_MODESET | DRIVER_ATOMIC;
@@ -104,7 +104,7 @@ static void xe_display_fini_nommio(struct drm_device *dev, void *dummy)
{
struct xe_device *xe = to_xe_device(dev);
- if (!xe->info.enable_display)
+ if (!xe->info.probe_display)
return;
intel_power_domains_cleanup(xe);
@@ -112,7 +112,7 @@ static void xe_display_fini_nommio(struct drm_device *dev, void *dummy)
int xe_display_init_nommio(struct xe_device *xe)
{
- if (!xe->info.enable_display)
+ if (!xe->info.probe_display)
return 0;
/* Fake uncore lock */
@@ -127,25 +127,27 @@ int xe_display_init_nommio(struct xe_device *xe)
static void xe_display_fini_noirq(void *arg)
{
struct xe_device *xe = arg;
+ struct intel_display *display = &xe->display;
- if (!xe->info.enable_display)
+ if (!xe->info.probe_display)
return;
intel_display_driver_remove_noirq(xe);
- intel_opregion_cleanup(xe);
+ intel_opregion_cleanup(display);
}
int xe_display_init_noirq(struct xe_device *xe)
{
+ struct intel_display *display = &xe->display;
int err;
- if (!xe->info.enable_display)
+ if (!xe->info.probe_display)
return 0;
intel_display_driver_early_probe(xe);
/* Early display init.. */
- intel_opregion_setup(xe);
+ intel_opregion_setup(display);
/*
* Fill the dram structure to get the system dram info. This will be
@@ -159,7 +161,7 @@ int xe_display_init_noirq(struct xe_device *xe)
err = intel_display_driver_probe_noirq(xe);
if (err) {
- intel_opregion_cleanup(xe);
+ intel_opregion_cleanup(display);
return err;
}
@@ -170,7 +172,7 @@ static void xe_display_fini_noaccel(void *arg)
{
struct xe_device *xe = arg;
- if (!xe->info.enable_display)
+ if (!xe->info.probe_display)
return;
intel_display_driver_remove_nogem(xe);
@@ -180,7 +182,7 @@ int xe_display_init_noaccel(struct xe_device *xe)
{
int err;
- if (!xe->info.enable_display)
+ if (!xe->info.probe_display)
return 0;
err = intel_display_driver_probe_nogem(xe);
@@ -192,7 +194,7 @@ int xe_display_init_noaccel(struct xe_device *xe)
int xe_display_init(struct xe_device *xe)
{
- if (!xe->info.enable_display)
+ if (!xe->info.probe_display)
return 0;
return intel_display_driver_probe(xe);
@@ -200,7 +202,7 @@ int xe_display_init(struct xe_device *xe)
void xe_display_fini(struct xe_device *xe)
{
- if (!xe->info.enable_display)
+ if (!xe->info.probe_display)
return;
intel_hpd_poll_fini(xe);
@@ -211,7 +213,7 @@ void xe_display_fini(struct xe_device *xe)
void xe_display_register(struct xe_device *xe)
{
- if (!xe->info.enable_display)
+ if (!xe->info.probe_display)
return;
intel_display_driver_register(xe);
@@ -221,7 +223,7 @@ void xe_display_register(struct xe_device *xe)
void xe_display_unregister(struct xe_device *xe)
{
- if (!xe->info.enable_display)
+ if (!xe->info.probe_display)
return;
intel_unregister_dsm_handler();
@@ -231,7 +233,7 @@ void xe_display_unregister(struct xe_device *xe)
void xe_display_driver_remove(struct xe_device *xe)
{
- if (!xe->info.enable_display)
+ if (!xe->info.probe_display)
return;
intel_display_driver_remove(xe);
@@ -241,7 +243,7 @@ void xe_display_driver_remove(struct xe_device *xe)
void xe_display_irq_handler(struct xe_device *xe, u32 master_ctl)
{
- if (!xe->info.enable_display)
+ if (!xe->info.probe_display)
return;
if (master_ctl & DISPLAY_IRQ)
@@ -250,16 +252,18 @@ void xe_display_irq_handler(struct xe_device *xe, u32 master_ctl)
void xe_display_irq_enable(struct xe_device *xe, u32 gu_misc_iir)
{
- if (!xe->info.enable_display)
+ struct intel_display *display = &xe->display;
+
+ if (!xe->info.probe_display)
return;
if (gu_misc_iir & GU_MISC_GSE)
- intel_opregion_asle_intr(xe);
+ intel_opregion_asle_intr(display);
}
void xe_display_irq_reset(struct xe_device *xe)
{
- if (!xe->info.enable_display)
+ if (!xe->info.probe_display)
return;
gen11_display_irq_reset(xe);
@@ -267,7 +271,7 @@ void xe_display_irq_reset(struct xe_device *xe)
void xe_display_irq_postinstall(struct xe_device *xe, struct xe_gt *gt)
{
- if (!xe->info.enable_display)
+ if (!xe->info.probe_display)
return;
if (gt->info.id == XE_GT0)
@@ -304,10 +308,23 @@ static void xe_display_flush_cleanup_work(struct xe_device *xe)
}
}
+/* TODO: System and runtime suspend/resume sequences will be sanitized as a follow-up. */
+void xe_display_pm_runtime_suspend(struct xe_device *xe)
+{
+ if (!xe->info.probe_display)
+ return;
+
+ if (xe->d3cold.allowed)
+ xe_display_pm_suspend(xe, true);
+
+ intel_hpd_poll_enable(xe);
+}
+
void xe_display_pm_suspend(struct xe_device *xe, bool runtime)
{
+ struct intel_display *display = &xe->display;
bool s2idle = suspend_to_idle();
- if (!xe->info.enable_display)
+ if (!xe->info.probe_display)
return;
/*
@@ -316,14 +333,11 @@ void xe_display_pm_suspend(struct xe_device *xe, bool runtime)
*/
intel_power_domains_disable(xe);
intel_fbdev_set_suspend(&xe->drm, FBINFO_STATE_SUSPENDED, true);
- if (has_display(xe)) {
+ if (!runtime && has_display(xe)) {
drm_kms_helper_poll_disable(&xe->drm);
- if (!runtime)
- intel_display_driver_disable_user_access(xe);
- }
-
- if (!runtime)
+ intel_display_driver_disable_user_access(xe);
intel_display_driver_suspend(xe);
+ }
xe_display_flush_cleanup_work(xe);
@@ -336,7 +350,7 @@ void xe_display_pm_suspend(struct xe_device *xe, bool runtime)
intel_encoder_suspend_all(&xe->display);
}
- intel_opregion_suspend(xe, s2idle ? PCI_D1 : PCI_D3cold);
+ intel_opregion_suspend(display, s2idle ? PCI_D1 : PCI_D3cold);
intel_dmc_suspend(xe);
}
@@ -344,7 +358,7 @@ void xe_display_pm_suspend(struct xe_device *xe, bool runtime)
void xe_display_pm_suspend_late(struct xe_device *xe)
{
bool s2idle = suspend_to_idle();
- if (!xe->info.enable_display)
+ if (!xe->info.probe_display)
return;
intel_power_domains_suspend(xe, s2idle);
@@ -352,9 +366,20 @@ void xe_display_pm_suspend_late(struct xe_device *xe)
intel_display_power_suspend_late(xe);
}
+void xe_display_pm_runtime_resume(struct xe_device *xe)
+{
+ if (!xe->info.probe_display)
+ return;
+
+ intel_hpd_poll_disable(xe);
+
+ if (xe->d3cold.allowed)
+ xe_display_pm_resume(xe, true);
+}
+
void xe_display_pm_resume_early(struct xe_device *xe)
{
- if (!xe->info.enable_display)
+ if (!xe->info.probe_display)
return;
intel_display_power_resume_early(xe);
@@ -364,7 +389,9 @@ void xe_display_pm_resume_early(struct xe_device *xe)
void xe_display_pm_resume(struct xe_device *xe, bool runtime)
{
- if (!xe->info.enable_display)
+ struct intel_display *display = &xe->display;
+
+ if (!xe->info.probe_display)
return;
intel_dmc_resume(xe);
@@ -380,17 +407,14 @@ void xe_display_pm_resume(struct xe_device *xe, bool runtime)
/* MST sideband requires HPD interrupts enabled */
intel_dp_mst_resume(xe);
- if (!runtime)
+ if (!runtime && has_display(xe)) {
intel_display_driver_resume(xe);
-
- if (has_display(xe)) {
drm_kms_helper_poll_enable(&xe->drm);
- if (!runtime)
- intel_display_driver_enable_user_access(xe);
+ intel_display_driver_enable_user_access(xe);
+ intel_hpd_poll_disable(xe);
}
- intel_hpd_poll_disable(xe);
- intel_opregion_resume(xe);
+ intel_opregion_resume(display);
intel_fbdev_set_suspend(&xe->drm, FBINFO_STATE_RUNNING, false);
@@ -408,7 +432,7 @@ int xe_display_probe(struct xe_device *xe)
{
int err;
- if (!xe->info.enable_display)
+ if (!xe->info.probe_display)
goto no_display;
intel_display_device_probe(xe);
@@ -421,7 +445,7 @@ int xe_display_probe(struct xe_device *xe)
return 0;
no_display:
- xe->info.enable_display = false;
+ xe->info.probe_display = false;
unset_display_features(xe);
return 0;
}
diff --git a/drivers/gpu/drm/xe/display/xe_display.h b/drivers/gpu/drm/xe/display/xe_display.h
index 000fb5799df5..53d727fd792b 100644
--- a/drivers/gpu/drm/xe/display/xe_display.h
+++ b/drivers/gpu/drm/xe/display/xe_display.h
@@ -38,6 +38,8 @@ void xe_display_pm_suspend(struct xe_device *xe, bool runtime);
void xe_display_pm_suspend_late(struct xe_device *xe);
void xe_display_pm_resume_early(struct xe_device *xe);
void xe_display_pm_resume(struct xe_device *xe, bool runtime);
+void xe_display_pm_runtime_suspend(struct xe_device *xe);
+void xe_display_pm_runtime_resume(struct xe_device *xe);
#else
@@ -67,6 +69,8 @@ static inline void xe_display_pm_suspend(struct xe_device *xe, bool runtime) {}
static inline void xe_display_pm_suspend_late(struct xe_device *xe) {}
static inline void xe_display_pm_resume_early(struct xe_device *xe) {}
static inline void xe_display_pm_resume(struct xe_device *xe, bool runtime) {}
+static inline void xe_display_pm_runtime_suspend(struct xe_device *xe) {}
+static inline void xe_display_pm_runtime_resume(struct xe_device *xe) {}
#endif /* CONFIG_DRM_XE_DISPLAY */
#endif /* _XE_DISPLAY_H_ */
diff --git a/drivers/gpu/drm/xe/display/xe_display_wa.c b/drivers/gpu/drm/xe/display/xe_display_wa.c
new file mode 100644
index 000000000000..68e3d1959ad6
--- /dev/null
+++ b/drivers/gpu/drm/xe/display/xe_display_wa.c
@@ -0,0 +1,16 @@
+// SPDX-License-Identifier: MIT
+/*
+ * Copyright © 2024 Intel Corporation
+ */
+
+#include "intel_display_wa.h"
+
+#include "xe_device.h"
+#include "xe_wa.h"
+
+#include <generated/xe_wa_oob.h>
+
+bool intel_display_needs_wa_16023588340(struct drm_i915_private *i915)
+{
+ return XE_WA(xe_root_mmio_gt(i915), 16023588340);
+}
diff --git a/drivers/gpu/drm/xe/display/xe_dsb_buffer.c b/drivers/gpu/drm/xe/display/xe_dsb_buffer.c
index ccd0d87d438a..f99d901a3214 100644
--- a/drivers/gpu/drm/xe/display/xe_dsb_buffer.c
+++ b/drivers/gpu/drm/xe/display/xe_dsb_buffer.c
@@ -9,7 +9,6 @@
#include "xe_bo.h"
#include "xe_device.h"
#include "xe_device_types.h"
-#include "xe_gt.h"
u32 intel_dsb_buffer_ggtt_offset(struct intel_dsb_buffer *dsb_buf)
{
diff --git a/drivers/gpu/drm/xe/display/xe_fb_pin.c b/drivers/gpu/drm/xe/display/xe_fb_pin.c
index d7db44e79eaf..b58fc4ba2aac 100644
--- a/drivers/gpu/drm/xe/display/xe_fb_pin.c
+++ b/drivers/gpu/drm/xe/display/xe_fb_pin.c
@@ -12,7 +12,6 @@
#include "xe_bo.h"
#include "xe_device.h"
#include "xe_ggtt.h"
-#include "xe_gt.h"
#include "xe_pm.h"
static void
@@ -204,21 +203,28 @@ static int __xe_pin_fb_vma_ggtt(const struct intel_framebuffer *fb,
if (xe_bo_is_vram(bo) && ggtt->flags & XE_GGTT_FLAGS_64K)
align = max_t(u32, align, SZ_64K);
- if (bo->ggtt_node.size && view->type == I915_GTT_VIEW_NORMAL) {
+ if (bo->ggtt_node && view->type == I915_GTT_VIEW_NORMAL) {
vma->node = bo->ggtt_node;
} else if (view->type == I915_GTT_VIEW_NORMAL) {
u32 x, size = bo->ttm.base.size;
- ret = xe_ggtt_insert_special_node_locked(ggtt, &vma->node, size,
- align, 0);
- if (ret)
+ vma->node = xe_ggtt_node_init(ggtt);
+ if (IS_ERR(vma->node)) {
+ ret = PTR_ERR(vma->node);
goto out_unlock;
+ }
+
+ ret = xe_ggtt_node_insert_locked(vma->node, size, align, 0);
+ if (ret) {
+ xe_ggtt_node_fini(vma->node);
+ goto out_unlock;
+ }
for (x = 0; x < size; x += XE_PAGE_SIZE) {
u64 pte = ggtt->pt_ops->pte_encode_bo(bo, x,
xe->pat.idx[XE_CACHE_NONE]);
- ggtt->pt_ops->ggtt_set_pte(ggtt, vma->node.start + x, pte);
+ ggtt->pt_ops->ggtt_set_pte(ggtt, vma->node->base.start + x, pte);
}
} else {
u32 i, ggtt_ofs;
@@ -227,12 +233,19 @@ static int __xe_pin_fb_vma_ggtt(const struct intel_framebuffer *fb,
/* display seems to use tiles instead of bytes here, so convert it back.. */
u32 size = intel_rotation_info_size(rot_info) * XE_PAGE_SIZE;
- ret = xe_ggtt_insert_special_node_locked(ggtt, &vma->node, size,
- align, 0);
- if (ret)
+ vma->node = xe_ggtt_node_init(ggtt);
+ if (IS_ERR(vma->node)) {
+ ret = PTR_ERR(vma->node);
+ goto out_unlock;
+ }
+
+ ret = xe_ggtt_node_insert_locked(vma->node, size, align, 0);
+ if (ret) {
+ xe_ggtt_node_fini(vma->node);
goto out_unlock;
+ }
- ggtt_ofs = vma->node.start;
+ ggtt_ofs = vma->node->base.start;
for (i = 0; i < ARRAY_SIZE(rot_info->plane); i++)
write_ggtt_rotated(bo, ggtt, &ggtt_ofs,
@@ -320,14 +333,11 @@ err:
static void __xe_unpin_fb_vma(struct i915_vma *vma)
{
- struct xe_device *xe = to_xe_device(vma->bo->ttm.base.dev);
- struct xe_ggtt *ggtt = xe_device_get_root_tile(xe)->mem.ggtt;
-
if (vma->dpt)
xe_bo_unpin_map_no_vm(vma->dpt);
- else if (!drm_mm_node_allocated(&vma->bo->ggtt_node) ||
- vma->bo->ggtt_node.start != vma->node.start)
- xe_ggtt_remove_node(ggtt, &vma->node, false);
+ else if (!xe_ggtt_node_allocated(vma->bo->ggtt_node) ||
+ vma->bo->ggtt_node->base.start != vma->node->base.start)
+ xe_ggtt_node_remove(vma->node, false);
ttm_bo_reserve(&vma->bo->ttm, false, false, NULL);
ttm_bo_unpin(&vma->bo->ttm);
@@ -377,8 +387,8 @@ void intel_plane_unpin_fb(struct intel_plane_state *old_plane_state)
}
/*
- * For Xe introduce dummy intel_dpt_create which just return NULL and
- * intel_dpt_destroy which does nothing.
+ * For Xe introduce dummy intel_dpt_create which just return NULL,
+ * intel_dpt_destroy which does nothing, and fake intel_dpt_ofsset returning 0;
*/
struct i915_address_space *intel_dpt_create(struct intel_framebuffer *fb)
{
@@ -389,3 +399,8 @@ void intel_dpt_destroy(struct i915_address_space *vm)
{
return;
}
+
+u64 intel_dpt_offset(struct i915_vma *dpt_vma)
+{
+ return 0;
+}
diff --git a/drivers/gpu/drm/xe/display/xe_hdcp_gsc.c b/drivers/gpu/drm/xe/display/xe_hdcp_gsc.c
index 990285aa9b26..6619a40aed15 100644
--- a/drivers/gpu/drm/xe/display/xe_hdcp_gsc.c
+++ b/drivers/gpu/drm/xe/display/xe_hdcp_gsc.c
@@ -16,7 +16,6 @@
#include "xe_force_wake.h"
#include "xe_gsc_proxy.h"
#include "xe_gsc_submit.h"
-#include "xe_gt.h"
#include "xe_map.h"
#include "xe_pm.h"
#include "xe_uc_fw.h"
@@ -40,10 +39,14 @@ bool intel_hdcp_gsc_check_status(struct xe_device *xe)
{
struct xe_tile *tile = xe_device_get_root_tile(xe);
struct xe_gt *gt = tile->media_gt;
+ struct xe_gsc *gsc = &gt->uc.gsc;
bool ret = true;
- if (!xe_uc_fw_is_enabled(&gt->uc.gsc.fw))
+ if (!gsc && !xe_uc_fw_is_enabled(&gsc->fw)) {
+ drm_dbg_kms(&xe->drm,
+ "GSC Components not ready for HDCP2.x\n");
return false;
+ }
xe_pm_runtime_get(xe);
if (xe_force_wake_get(gt_to_fw(gt), XE_FW_GSC)) {
@@ -53,7 +56,7 @@ bool intel_hdcp_gsc_check_status(struct xe_device *xe)
goto out;
}
- if (!xe_gsc_proxy_init_done(&gt->uc.gsc))
+ if (!xe_gsc_proxy_init_done(gsc))
ret = false;
xe_force_wake_put(gt_to_fw(gt), XE_FW_GSC);
diff --git a/drivers/gpu/drm/xe/display/xe_plane_initial.c b/drivers/gpu/drm/xe/display/xe_plane_initial.c
index 5eccd6abb3ef..a50ab9eae40a 100644
--- a/drivers/gpu/drm/xe/display/xe_plane_initial.c
+++ b/drivers/gpu/drm/xe/display/xe_plane_initial.c
@@ -18,6 +18,9 @@
#include "intel_frontbuffer.h"
#include "intel_plane_initial.h"
#include "xe_bo.h"
+#include "xe_wa.h"
+
+#include <generated/xe_wa_oob.h>
static bool
intel_reuse_initial_plane_obj(struct intel_crtc *this,
@@ -104,6 +107,9 @@ initial_plane_bo(struct xe_device *xe,
phys_base = base;
flags |= XE_BO_FLAG_STOLEN;
+ if (XE_WA(xe_root_mmio_gt(xe), 22019338487_display))
+ return NULL;
+
/*
* If the FB is too big, just don't use it since fbdev is not very
* important and we should probably use that space with FBC or other
diff --git a/drivers/gpu/drm/xe/regs/xe_engine_regs.h b/drivers/gpu/drm/xe/regs/xe_engine_regs.h
index c38db2a74614..81b71903675e 100644
--- a/drivers/gpu/drm/xe/regs/xe_engine_regs.h
+++ b/drivers/gpu/drm/xe/regs/xe_engine_regs.h
@@ -104,6 +104,7 @@
#define CSFE_CHICKEN1(base) XE_REG((base) + 0xd4, XE_REG_OPTION_MASKED)
#define GHWSP_CSB_REPORT_DIS REG_BIT(15)
#define PPHWSP_CSB_AND_TIMESTAMP_REPORT_DIS REG_BIT(14)
+#define CS_PRIORITY_MEM_READ REG_BIT(7)
#define FF_SLICE_CS_CHICKEN1(base) XE_REG((base) + 0xe0, XE_REG_OPTION_MASKED)
#define FFSC_PERCTX_PREEMPT_CTRL REG_BIT(14)
diff --git a/drivers/gpu/drm/xe/regs/xe_gsc_regs.h b/drivers/gpu/drm/xe/regs/xe_gsc_regs.h
index e2a925be137c..7702364b65f1 100644
--- a/drivers/gpu/drm/xe/regs/xe_gsc_regs.h
+++ b/drivers/gpu/drm/xe/regs/xe_gsc_regs.h
@@ -32,8 +32,12 @@
#define HECI1_FWSTS1_CURRENT_STATE_RESET 0
#define HECI1_FWSTS1_PROXY_STATE_NORMAL 5
#define HECI1_FWSTS1_INIT_COMPLETE REG_BIT(9)
+#define HECI_FWSTS2(base) XE_REG((base) + 0xc48)
+#define HECI_FWSTS3(base) XE_REG((base) + 0xc60)
+#define HECI_FWSTS4(base) XE_REG((base) + 0xc64)
#define HECI_FWSTS5(base) XE_REG((base) + 0xc68)
#define HECI1_FWSTS5_HUC_AUTH_DONE REG_BIT(19)
+#define HECI_FWSTS6(base) XE_REG((base) + 0xc6c)
#define HECI_H_GS1(base) XE_REG((base) + 0xc4c)
#define HECI_H_GS1_ER_PREP REG_BIT(0)
diff --git a/drivers/gpu/drm/xe/regs/xe_gt_regs.h b/drivers/gpu/drm/xe/regs/xe_gt_regs.h
index 3c2865040058..660ff42e45a6 100644
--- a/drivers/gpu/drm/xe/regs/xe_gt_regs.h
+++ b/drivers/gpu/drm/xe/regs/xe_gt_regs.h
@@ -80,7 +80,10 @@
#define LE_CACHEABILITY_MASK REG_GENMASK(1, 0)
#define LE_CACHEABILITY(value) REG_FIELD_PREP(LE_CACHEABILITY_MASK, value)
-#define XE2_GAMREQSTRM_CTRL XE_REG(0x4194)
+#define STATELESS_COMPRESSION_CTRL XE_REG_MCR(0x4148)
+#define UNIFIED_COMPRESSION_FORMAT REG_GENMASK(3, 0)
+
+#define XE2_GAMREQSTRM_CTRL XE_REG_MCR(0x4194)
#define CG_DIS_CNTLBUS REG_BIT(6)
#define CCS_AUX_INV XE_REG(0x4208)
@@ -91,6 +94,8 @@
#define VE1_AUX_INV XE_REG(0x42b8)
#define AUX_INV REG_BIT(0)
+#define XE2_LMEM_CFG XE_REG(0x48b0)
+
#define XEHP_TILE_ADDR_RANGE(_idx) XE_REG_MCR(0x4900 + (_idx) * 4)
#define XEHP_FLAT_CCS_BASE_ADDR XE_REG_MCR(0x4910)
#define XEHP_FLAT_CCS_PTR REG_GENMASK(31, 8)
@@ -100,12 +105,14 @@
#define CHICKEN_RASTER_1 XE_REG_MCR(0x6204, XE_REG_OPTION_MASKED)
#define DIS_SF_ROUND_NEAREST_EVEN REG_BIT(8)
+#define DIS_CLIP_NEGATIVE_BOUNDING_BOX REG_BIT(6)
#define CHICKEN_RASTER_2 XE_REG_MCR(0x6208, XE_REG_OPTION_MASKED)
#define TBIMR_FAST_CLIP REG_BIT(5)
#define FF_MODE XE_REG_MCR(0x6210)
#define DIS_TE_AUTOSTRIP REG_BIT(31)
+#define VS_HIT_MAX_VALUE_MASK REG_GENMASK(25, 20)
#define DIS_MESH_PARTIAL_AUTOSTRIP REG_BIT(16)
#define DIS_MESH_AUTOSTRIP REG_BIT(15)
@@ -190,6 +197,7 @@
#define GSCPSMI_BASE XE_REG(0x880c)
#define CCCHKNREG1 XE_REG_MCR(0x8828)
+#define L3CMPCTRL REG_BIT(23)
#define ENCOMPPERFFIX REG_BIT(18)
/* Fuse readout registers for GT */
@@ -364,6 +372,9 @@
#define XEHP_L3NODEARBCFG XE_REG_MCR(0xb0b4)
#define XEHP_LNESPARE REG_BIT(19)
+#define L3SQCREG2 XE_REG_MCR(0xb104)
+#define COMPMEMRD256BOVRFETCHEN REG_BIT(20)
+
#define L3SQCREG3 XE_REG_MCR(0xb108)
#define COMPPWOVERFETCHEN REG_BIT(28)
@@ -403,6 +414,10 @@
#define INVALIDATION_BROADCAST_MODE_DIS REG_BIT(12)
#define GLOBAL_INVALIDATION_MODE REG_BIT(2)
+#define LMEM_CFG XE_REG(0xcf58)
+#define LMEM_EN REG_BIT(31)
+#define LMTT_DIR_PTR REG_GENMASK(30, 0) /* in multiples of 64KB */
+
#define HALF_SLICE_CHICKEN5 XE_REG_MCR(0xe188, XE_REG_OPTION_MASKED)
#define DISABLE_SAMPLE_G_PERFORMANCE REG_BIT(0)
diff --git a/drivers/gpu/drm/xe/regs/xe_regs.h b/drivers/gpu/drm/xe/regs/xe_regs.h
index 23e33ec84902..dfa869f0dddd 100644
--- a/drivers/gpu/drm/xe/regs/xe_regs.h
+++ b/drivers/gpu/drm/xe/regs/xe_regs.h
@@ -15,8 +15,6 @@
#define GU_MISC_IRQ_OFFSET 0x444f0
#define GU_MISC_GSE REG_BIT(27)
-#define SOFTWARE_FLAGS_SPR33 XE_REG(0x4f084)
-
#define GU_CNTL_PROTECTED XE_REG(0x10100C)
#define DRIVERINT_FLR_DIS REG_BIT(31)
@@ -24,11 +22,14 @@
#define LMEM_INIT REG_BIT(7)
#define DRIVERFLR REG_BIT(31)
+#define XEHP_CLOCK_GATE_DIS XE_REG(0x101014)
+#define SGSI_SIDECLK_DIS REG_BIT(17)
+
#define GU_DEBUG XE_REG(0x101018)
#define DRIVERFLR_STATUS REG_BIT(31)
-#define XEHP_CLOCK_GATE_DIS XE_REG(0x101014)
-#define SGSI_SIDECLK_DIS REG_BIT(17)
+#define VIRTUAL_CTRL_REG XE_REG(0x10108c)
+#define GUEST_GTT_UPDATE_EN REG_BIT(8)
#define XEHP_MTCFG_ADDR XE_REG(0x101800)
#define TILE_COUNT REG_GENMASK(15, 8)
@@ -66,6 +67,9 @@
#define DISPLAY_IRQ REG_BIT(16)
#define GT_DW_IRQ(x) REG_BIT(x)
+#define VF_CAP_REG XE_REG(0x1901f8, XE_REG_OPTION_VF)
+#define VF_CAP REG_BIT(0)
+
#define PVC_RP_STATE_CAP XE_REG(0x281014)
#endif
diff --git a/drivers/gpu/drm/xe/regs/xe_sriov_regs.h b/drivers/gpu/drm/xe/regs/xe_sriov_regs.h
deleted file mode 100644
index 017b4ddd1ecf..000000000000
--- a/drivers/gpu/drm/xe/regs/xe_sriov_regs.h
+++ /dev/null
@@ -1,23 +0,0 @@
-/* SPDX-License-Identifier: MIT */
-/*
- * Copyright © 2023 Intel Corporation
- */
-
-#ifndef _REGS_XE_SRIOV_REGS_H_
-#define _REGS_XE_SRIOV_REGS_H_
-
-#include "regs/xe_reg_defs.h"
-
-#define XE2_LMEM_CFG XE_REG(0x48b0)
-
-#define LMEM_CFG XE_REG(0xcf58)
-#define LMEM_EN REG_BIT(31)
-#define LMTT_DIR_PTR REG_GENMASK(30, 0) /* in multiples of 64KB */
-
-#define VIRTUAL_CTRL_REG XE_REG(0x10108c)
-#define GUEST_GTT_UPDATE_EN REG_BIT(8)
-
-#define VF_CAP_REG XE_REG(0x1901f8, XE_REG_OPTION_VF)
-#define VF_CAP REG_BIT(0)
-
-#endif
diff --git a/drivers/gpu/drm/xe/tests/Makefile b/drivers/gpu/drm/xe/tests/Makefile
index 6e58931fddd4..0e3408f4952c 100644
--- a/drivers/gpu/drm/xe/tests/Makefile
+++ b/drivers/gpu/drm/xe/tests/Makefile
@@ -2,11 +2,7 @@
# "live" kunit tests
obj-$(CONFIG_DRM_XE_KUNIT_TEST) += xe_live_test.o
-xe_live_test-y = xe_live_test_mod.o \
- xe_bo_test.o \
- xe_dma_buf_test.o \
- xe_migrate_test.o \
- xe_mocs_test.o
+xe_live_test-y = xe_live_test_mod.o
# Normal kunit tests
obj-$(CONFIG_DRM_XE_KUNIT_TEST) += xe_test.o
diff --git a/drivers/gpu/drm/xe/tests/xe_bo.c b/drivers/gpu/drm/xe/tests/xe_bo.c
index 9f3c02826464..8dac069483e8 100644
--- a/drivers/gpu/drm/xe/tests/xe_bo.c
+++ b/drivers/gpu/drm/xe/tests/xe_bo.c
@@ -6,7 +6,7 @@
#include <kunit/test.h>
#include <kunit/visibility.h>
-#include "tests/xe_bo_test.h"
+#include "tests/xe_kunit_helpers.h"
#include "tests/xe_pci_test.h"
#include "tests/xe_test.h"
@@ -36,7 +36,8 @@ static int ccs_test_migrate(struct xe_tile *tile, struct xe_bo *bo,
/* Optionally clear bo *and* CCS data in VRAM. */
if (clear) {
- fence = xe_migrate_clear(tile->migrate, bo, bo->ttm.resource);
+ fence = xe_migrate_clear(tile->migrate, bo, bo->ttm.resource,
+ XE_MIGRATE_CLEAR_FLAG_FULL);
if (IS_ERR(fence)) {
KUNIT_FAIL(test, "Failed to submit bo clear.\n");
return PTR_ERR(fence);
@@ -124,7 +125,7 @@ static void ccs_test_run_tile(struct xe_device *xe, struct xe_tile *tile,
kunit_info(test, "Testing system memory\n");
bo = xe_bo_create_user(xe, NULL, NULL, SZ_1M, DRM_XE_GEM_CPU_CACHING_WC,
- ttm_bo_type_device, bo_flags);
+ bo_flags);
if (IS_ERR(bo)) {
KUNIT_FAIL(test, "Failed to create bo.\n");
return;
@@ -154,12 +155,18 @@ out_unlock:
static int ccs_test_run_device(struct xe_device *xe)
{
- struct kunit *test = xe_cur_kunit();
+ struct kunit *test = kunit_get_current_test();
struct xe_tile *tile;
int id;
if (!xe_device_has_flat_ccs(xe)) {
- kunit_info(test, "Skipping non-flat-ccs device.\n");
+ kunit_skip(test, "non-flat-ccs device\n");
+ return 0;
+ }
+
+ /* For xe2+ dgfx, we don't handle ccs metadata */
+ if (GRAPHICS_VER(xe) >= 20 && IS_DGFX(xe)) {
+ kunit_skip(test, "xe2+ dgfx device\n");
return 0;
}
@@ -177,11 +184,12 @@ static int ccs_test_run_device(struct xe_device *xe)
return 0;
}
-void xe_ccs_migrate_kunit(struct kunit *test)
+static void xe_ccs_migrate_kunit(struct kunit *test)
{
- xe_call_for_each_device(ccs_test_run_device);
+ struct xe_device *xe = test->priv;
+
+ ccs_test_run_device(xe);
}
-EXPORT_SYMBOL_IF_KUNIT(xe_ccs_migrate_kunit);
static int evict_test_run_tile(struct xe_device *xe, struct xe_tile *tile, struct kunit *test)
{
@@ -198,7 +206,6 @@ static int evict_test_run_tile(struct xe_device *xe, struct xe_tile *tile, struc
xe_vm_lock(vm, false);
bo = xe_bo_create_user(xe, NULL, vm, 0x10000,
DRM_XE_GEM_CPU_CACHING_WC,
- ttm_bo_type_device,
bo_flags);
xe_vm_unlock(vm);
if (IS_ERR(bo)) {
@@ -208,7 +215,7 @@ static int evict_test_run_tile(struct xe_device *xe, struct xe_tile *tile, struc
external = xe_bo_create_user(xe, NULL, NULL, 0x10000,
DRM_XE_GEM_CPU_CACHING_WC,
- ttm_bo_type_device, bo_flags);
+ bo_flags);
if (IS_ERR(external)) {
KUNIT_FAIL(test, "external bo create err=%pe\n", external);
goto cleanup_bo;
@@ -325,13 +332,12 @@ cleanup_bo:
static int evict_test_run_device(struct xe_device *xe)
{
- struct kunit *test = xe_cur_kunit();
+ struct kunit *test = kunit_get_current_test();
struct xe_tile *tile;
int id;
if (!IS_DGFX(xe)) {
- kunit_info(test, "Skipping non-discrete device %s.\n",
- dev_name(xe->drm.dev));
+ kunit_skip(test, "non-discrete device\n");
return 0;
}
@@ -345,8 +351,23 @@ static int evict_test_run_device(struct xe_device *xe)
return 0;
}
-void xe_bo_evict_kunit(struct kunit *test)
+static void xe_bo_evict_kunit(struct kunit *test)
{
- xe_call_for_each_device(evict_test_run_device);
+ struct xe_device *xe = test->priv;
+
+ evict_test_run_device(xe);
}
-EXPORT_SYMBOL_IF_KUNIT(xe_bo_evict_kunit);
+
+static struct kunit_case xe_bo_tests[] = {
+ KUNIT_CASE_PARAM(xe_ccs_migrate_kunit, xe_pci_live_device_gen_param),
+ KUNIT_CASE_PARAM(xe_bo_evict_kunit, xe_pci_live_device_gen_param),
+ {}
+};
+
+VISIBLE_IF_KUNIT
+struct kunit_suite xe_bo_test_suite = {
+ .name = "xe_bo",
+ .test_cases = xe_bo_tests,
+ .init = xe_kunit_helper_xe_device_live_test_init,
+};
+EXPORT_SYMBOL_IF_KUNIT(xe_bo_test_suite);
diff --git a/drivers/gpu/drm/xe/tests/xe_bo_test.c b/drivers/gpu/drm/xe/tests/xe_bo_test.c
deleted file mode 100644
index a324cde77db8..000000000000
--- a/drivers/gpu/drm/xe/tests/xe_bo_test.c
+++ /dev/null
@@ -1,21 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/*
- * Copyright © 2022 Intel Corporation
- */
-
-#include "xe_bo_test.h"
-
-#include <kunit/test.h>
-
-static struct kunit_case xe_bo_tests[] = {
- KUNIT_CASE(xe_ccs_migrate_kunit),
- KUNIT_CASE(xe_bo_evict_kunit),
- {}
-};
-
-static struct kunit_suite xe_bo_test_suite = {
- .name = "xe_bo",
- .test_cases = xe_bo_tests,
-};
-
-kunit_test_suite(xe_bo_test_suite);
diff --git a/drivers/gpu/drm/xe/tests/xe_bo_test.h b/drivers/gpu/drm/xe/tests/xe_bo_test.h
deleted file mode 100644
index 0113ab45066a..000000000000
--- a/drivers/gpu/drm/xe/tests/xe_bo_test.h
+++ /dev/null
@@ -1,14 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 AND MIT */
-/*
- * Copyright © 2023 Intel Corporation
- */
-
-#ifndef _XE_BO_TEST_H_
-#define _XE_BO_TEST_H_
-
-struct kunit;
-
-void xe_ccs_migrate_kunit(struct kunit *test);
-void xe_bo_evict_kunit(struct kunit *test);
-
-#endif
diff --git a/drivers/gpu/drm/xe/tests/xe_dma_buf.c b/drivers/gpu/drm/xe/tests/xe_dma_buf.c
index e7f9b531c465..cedd3e88a6fb 100644
--- a/drivers/gpu/drm/xe/tests/xe_dma_buf.c
+++ b/drivers/gpu/drm/xe/tests/xe_dma_buf.c
@@ -3,12 +3,12 @@
* Copyright © 2022 Intel Corporation
*/
-#include <drm/xe_drm.h>
+#include <uapi/drm/xe_drm.h>
#include <kunit/test.h>
#include <kunit/visibility.h>
-#include "tests/xe_dma_buf_test.h"
+#include "tests/xe_kunit_helpers.h"
#include "tests/xe_pci_test.h"
#include "xe_pci.h"
@@ -107,7 +107,7 @@ static void check_residency(struct kunit *test, struct xe_bo *exported,
static void xe_test_dmabuf_import_same_driver(struct xe_device *xe)
{
- struct kunit *test = xe_cur_kunit();
+ struct kunit *test = kunit_get_current_test();
struct dma_buf_test_params *params = to_dma_buf_test_params(test->priv);
struct drm_gem_object *import;
struct dma_buf *dmabuf;
@@ -126,7 +126,7 @@ static void xe_test_dmabuf_import_same_driver(struct xe_device *xe)
kunit_info(test, "running %s\n", __func__);
bo = xe_bo_create_user(xe, NULL, NULL, size, DRM_XE_GEM_CPU_CACHING_WC,
- ttm_bo_type_device, params->mem_mask);
+ params->mem_mask);
if (IS_ERR(bo)) {
KUNIT_FAIL(test, "xe_bo_create() failed with err=%ld\n",
PTR_ERR(bo));
@@ -258,7 +258,7 @@ static const struct dma_buf_test_params test_params[] = {
static int dma_buf_run_device(struct xe_device *xe)
{
const struct dma_buf_test_params *params;
- struct kunit *test = xe_cur_kunit();
+ struct kunit *test = kunit_get_current_test();
xe_pm_runtime_get(xe);
for (params = test_params; params->mem_mask; ++params) {
@@ -274,8 +274,22 @@ static int dma_buf_run_device(struct xe_device *xe)
return 0;
}
-void xe_dma_buf_kunit(struct kunit *test)
+static void xe_dma_buf_kunit(struct kunit *test)
{
- xe_call_for_each_device(dma_buf_run_device);
+ struct xe_device *xe = test->priv;
+
+ dma_buf_run_device(xe);
}
-EXPORT_SYMBOL_IF_KUNIT(xe_dma_buf_kunit);
+
+static struct kunit_case xe_dma_buf_tests[] = {
+ KUNIT_CASE_PARAM(xe_dma_buf_kunit, xe_pci_live_device_gen_param),
+ {}
+};
+
+VISIBLE_IF_KUNIT
+struct kunit_suite xe_dma_buf_test_suite = {
+ .name = "xe_dma_buf",
+ .test_cases = xe_dma_buf_tests,
+ .init = xe_kunit_helper_xe_device_live_test_init,
+};
+EXPORT_SYMBOL_IF_KUNIT(xe_dma_buf_test_suite);
diff --git a/drivers/gpu/drm/xe/tests/xe_dma_buf_test.c b/drivers/gpu/drm/xe/tests/xe_dma_buf_test.c
deleted file mode 100644
index 99cdb718b6c6..000000000000
--- a/drivers/gpu/drm/xe/tests/xe_dma_buf_test.c
+++ /dev/null
@@ -1,20 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/*
- * Copyright © 2022 Intel Corporation
- */
-
-#include "xe_dma_buf_test.h"
-
-#include <kunit/test.h>
-
-static struct kunit_case xe_dma_buf_tests[] = {
- KUNIT_CASE(xe_dma_buf_kunit),
- {}
-};
-
-static struct kunit_suite xe_dma_buf_test_suite = {
- .name = "xe_dma_buf",
- .test_cases = xe_dma_buf_tests,
-};
-
-kunit_test_suite(xe_dma_buf_test_suite);
diff --git a/drivers/gpu/drm/xe/tests/xe_dma_buf_test.h b/drivers/gpu/drm/xe/tests/xe_dma_buf_test.h
deleted file mode 100644
index e6b464ddd526..000000000000
--- a/drivers/gpu/drm/xe/tests/xe_dma_buf_test.h
+++ /dev/null
@@ -1,13 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 AND MIT */
-/*
- * Copyright © 2023 Intel Corporation
- */
-
-#ifndef _XE_DMA_BUF_TEST_H_
-#define _XE_DMA_BUF_TEST_H_
-
-struct kunit;
-
-void xe_dma_buf_kunit(struct kunit *test);
-
-#endif
diff --git a/drivers/gpu/drm/xe/tests/xe_kunit_helpers.c b/drivers/gpu/drm/xe/tests/xe_kunit_helpers.c
index fefe79b3b75a..bc5156966ce9 100644
--- a/drivers/gpu/drm/xe/tests/xe_kunit_helpers.c
+++ b/drivers/gpu/drm/xe/tests/xe_kunit_helpers.c
@@ -12,7 +12,9 @@
#include "tests/xe_kunit_helpers.h"
#include "tests/xe_pci_test.h"
+#include "xe_device.h"
#include "xe_device_types.h"
+#include "xe_pm.h"
/**
* xe_kunit_helper_alloc_xe_device - Allocate a &xe_device for a KUnit test.
@@ -88,3 +90,40 @@ int xe_kunit_helper_xe_device_test_init(struct kunit *test)
return 0;
}
EXPORT_SYMBOL_IF_KUNIT(xe_kunit_helper_xe_device_test_init);
+
+KUNIT_DEFINE_ACTION_WRAPPER(put_xe_pm_runtime, xe_pm_runtime_put, struct xe_device *);
+
+/**
+ * xe_kunit_helper_xe_device_live_test_init - Prepare a &xe_device for
+ * use in a live KUnit test.
+ * @test: the &kunit where live &xe_device will be used
+ *
+ * This function expects pointer to the &xe_device in the &test.param_value,
+ * like it is prepared by the &xe_pci_live_device_gen_param and stores that
+ * pointer as &kunit.priv to allow the test code to access it.
+ *
+ * This function makes sure that device is not wedged and then resumes it
+ * to avoid waking up the device inside the test. It uses deferred cleanup
+ * action to release a runtime_pm reference.
+ *
+ * This function can be used as custom implementation of &kunit_suite.init.
+ *
+ * This function uses KUNIT_ASSERT to detect any failures.
+ *
+ * Return: Always 0.
+ */
+int xe_kunit_helper_xe_device_live_test_init(struct kunit *test)
+{
+ struct xe_device *xe = xe_device_const_cast(test->param_value);
+
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, xe);
+ kunit_info(test, "running on %s device\n", xe->info.platform_name);
+
+ KUNIT_ASSERT_FALSE(test, xe_device_wedged(xe));
+ xe_pm_runtime_get(xe);
+ KUNIT_ASSERT_EQ(test, 0, kunit_add_action_or_reset(test, put_xe_pm_runtime, xe));
+
+ test->priv = xe;
+ return 0;
+}
+EXPORT_SYMBOL_IF_KUNIT(xe_kunit_helper_xe_device_live_test_init);
diff --git a/drivers/gpu/drm/xe/tests/xe_kunit_helpers.h b/drivers/gpu/drm/xe/tests/xe_kunit_helpers.h
index 067a1babf049..83665f7b1254 100644
--- a/drivers/gpu/drm/xe/tests/xe_kunit_helpers.h
+++ b/drivers/gpu/drm/xe/tests/xe_kunit_helpers.h
@@ -14,4 +14,6 @@ struct xe_device *xe_kunit_helper_alloc_xe_device(struct kunit *test,
struct device *dev);
int xe_kunit_helper_xe_device_test_init(struct kunit *test);
+int xe_kunit_helper_xe_device_live_test_init(struct kunit *test);
+
#endif
diff --git a/drivers/gpu/drm/xe/tests/xe_live_test_mod.c b/drivers/gpu/drm/xe/tests/xe_live_test_mod.c
index eb1ea99a5a8b..5f14737c8210 100644
--- a/drivers/gpu/drm/xe/tests/xe_live_test_mod.c
+++ b/drivers/gpu/drm/xe/tests/xe_live_test_mod.c
@@ -3,6 +3,17 @@
* Copyright © 2023 Intel Corporation
*/
#include <linux/module.h>
+#include <kunit/test.h>
+
+extern struct kunit_suite xe_bo_test_suite;
+extern struct kunit_suite xe_dma_buf_test_suite;
+extern struct kunit_suite xe_migrate_test_suite;
+extern struct kunit_suite xe_mocs_test_suite;
+
+kunit_test_suite(xe_bo_test_suite);
+kunit_test_suite(xe_dma_buf_test_suite);
+kunit_test_suite(xe_migrate_test_suite);
+kunit_test_suite(xe_mocs_test_suite);
MODULE_AUTHOR("Intel Corporation");
MODULE_LICENSE("GPL");
diff --git a/drivers/gpu/drm/xe/tests/xe_migrate.c b/drivers/gpu/drm/xe/tests/xe_migrate.c
index 962f6438e219..1a192a2a941b 100644
--- a/drivers/gpu/drm/xe/tests/xe_migrate.c
+++ b/drivers/gpu/drm/xe/tests/xe_migrate.c
@@ -6,7 +6,7 @@
#include <kunit/test.h>
#include <kunit/visibility.h>
-#include "tests/xe_migrate_test.h"
+#include "tests/xe_kunit_helpers.h"
#include "tests/xe_pci_test.h"
#include "xe_pci.h"
@@ -105,7 +105,8 @@ static void test_copy(struct xe_migrate *m, struct xe_bo *bo,
}
xe_map_memset(xe, &remote->vmap, 0, 0xd0, remote->size);
- fence = xe_migrate_clear(m, remote, remote->ttm.resource);
+ fence = xe_migrate_clear(m, remote, remote->ttm.resource,
+ XE_MIGRATE_CLEAR_FLAG_FULL);
if (!sanity_fence_failed(xe, fence, big ? "Clearing remote big bo" :
"Clearing remote small bo", test)) {
retval = xe_map_rd(xe, &remote->vmap, 0, u64);
@@ -279,7 +280,8 @@ static void xe_migrate_sanity_test(struct xe_migrate *m, struct kunit *test)
kunit_info(test, "Clearing small buffer object\n");
xe_map_memset(xe, &tiny->vmap, 0, 0x22, tiny->size);
expected = 0;
- fence = xe_migrate_clear(m, tiny, tiny->ttm.resource);
+ fence = xe_migrate_clear(m, tiny, tiny->ttm.resource,
+ XE_MIGRATE_CLEAR_FLAG_FULL);
if (sanity_fence_failed(xe, fence, "Clearing small bo", test))
goto out;
@@ -300,7 +302,8 @@ static void xe_migrate_sanity_test(struct xe_migrate *m, struct kunit *test)
kunit_info(test, "Clearing big buffer object\n");
xe_map_memset(xe, &big->vmap, 0, 0x11, big->size);
expected = 0;
- fence = xe_migrate_clear(m, big, big->ttm.resource);
+ fence = xe_migrate_clear(m, big, big->ttm.resource,
+ XE_MIGRATE_CLEAR_FLAG_FULL);
if (sanity_fence_failed(xe, fence, "Clearing big bo", test))
goto out;
@@ -334,7 +337,7 @@ vunmap:
static int migrate_test_run_device(struct xe_device *xe)
{
- struct kunit *test = xe_cur_kunit();
+ struct kunit *test = kunit_get_current_test();
struct xe_tile *tile;
int id;
@@ -354,8 +357,425 @@ static int migrate_test_run_device(struct xe_device *xe)
return 0;
}
-void xe_migrate_sanity_kunit(struct kunit *test)
+static void xe_migrate_sanity_kunit(struct kunit *test)
{
- xe_call_for_each_device(migrate_test_run_device);
+ struct xe_device *xe = test->priv;
+
+ migrate_test_run_device(xe);
+}
+
+static struct dma_fence *blt_copy(struct xe_tile *tile,
+ struct xe_bo *src_bo, struct xe_bo *dst_bo,
+ bool copy_only_ccs, const char *str, struct kunit *test)
+{
+ struct xe_gt *gt = tile->primary_gt;
+ struct xe_migrate *m = tile->migrate;
+ struct xe_device *xe = gt_to_xe(gt);
+ struct dma_fence *fence = NULL;
+ u64 size = src_bo->size;
+ struct xe_res_cursor src_it, dst_it;
+ struct ttm_resource *src = src_bo->ttm.resource, *dst = dst_bo->ttm.resource;
+ u64 src_L0_ofs, dst_L0_ofs;
+ u32 src_L0_pt, dst_L0_pt;
+ u64 src_L0, dst_L0;
+ int err;
+ bool src_is_vram = mem_type_is_vram(src->mem_type);
+ bool dst_is_vram = mem_type_is_vram(dst->mem_type);
+
+ if (!src_is_vram)
+ xe_res_first_sg(xe_bo_sg(src_bo), 0, size, &src_it);
+ else
+ xe_res_first(src, 0, size, &src_it);
+
+ if (!dst_is_vram)
+ xe_res_first_sg(xe_bo_sg(dst_bo), 0, size, &dst_it);
+ else
+ xe_res_first(dst, 0, size, &dst_it);
+
+ while (size) {
+ u32 batch_size = 2; /* arb_clear() + MI_BATCH_BUFFER_END */
+ struct xe_sched_job *job;
+ struct xe_bb *bb;
+ u32 flush_flags = 0;
+ u32 update_idx;
+ u32 avail_pts = max_mem_transfer_per_pass(xe) / LEVEL0_PAGE_TABLE_ENCODE_SIZE;
+ u32 pte_flags;
+
+ src_L0 = xe_migrate_res_sizes(m, &src_it);
+ dst_L0 = xe_migrate_res_sizes(m, &dst_it);
+
+ src_L0 = min(src_L0, dst_L0);
+
+ pte_flags = src_is_vram ? (PTE_UPDATE_FLAG_IS_VRAM |
+ PTE_UPDATE_FLAG_IS_COMP_PTE) : 0;
+ batch_size += pte_update_size(m, pte_flags, src, &src_it, &src_L0,
+ &src_L0_ofs, &src_L0_pt, 0, 0,
+ avail_pts);
+
+ pte_flags = dst_is_vram ? (PTE_UPDATE_FLAG_IS_VRAM |
+ PTE_UPDATE_FLAG_IS_COMP_PTE) : 0;
+ batch_size += pte_update_size(m, pte_flags, dst, &dst_it, &src_L0,
+ &dst_L0_ofs, &dst_L0_pt, 0,
+ avail_pts, avail_pts);
+
+ /* Add copy commands size here */
+ batch_size += ((copy_only_ccs) ? 0 : EMIT_COPY_DW) +
+ ((xe_device_has_flat_ccs(xe) && copy_only_ccs) ? EMIT_COPY_CCS_DW : 0);
+
+ bb = xe_bb_new(gt, batch_size, xe->info.has_usm);
+ if (IS_ERR(bb)) {
+ err = PTR_ERR(bb);
+ goto err_sync;
+ }
+
+ if (src_is_vram)
+ xe_res_next(&src_it, src_L0);
+ else
+ emit_pte(m, bb, src_L0_pt, src_is_vram, false,
+ &src_it, src_L0, src);
+
+ if (dst_is_vram)
+ xe_res_next(&dst_it, src_L0);
+ else
+ emit_pte(m, bb, dst_L0_pt, dst_is_vram, false,
+ &dst_it, src_L0, dst);
+
+ bb->cs[bb->len++] = MI_BATCH_BUFFER_END;
+ update_idx = bb->len;
+ if (!copy_only_ccs)
+ emit_copy(gt, bb, src_L0_ofs, dst_L0_ofs, src_L0, XE_PAGE_SIZE);
+
+ if (copy_only_ccs)
+ flush_flags = xe_migrate_ccs_copy(m, bb, src_L0_ofs,
+ src_is_vram, dst_L0_ofs,
+ dst_is_vram, src_L0, dst_L0_ofs,
+ copy_only_ccs);
+
+ job = xe_bb_create_migration_job(m->q, bb,
+ xe_migrate_batch_base(m, xe->info.has_usm),
+ update_idx);
+ if (IS_ERR(job)) {
+ err = PTR_ERR(job);
+ goto err;
+ }
+
+ xe_sched_job_add_migrate_flush(job, flush_flags);
+
+ mutex_lock(&m->job_mutex);
+ xe_sched_job_arm(job);
+ dma_fence_put(fence);
+ fence = dma_fence_get(&job->drm.s_fence->finished);
+ xe_sched_job_push(job);
+
+ dma_fence_put(m->fence);
+ m->fence = dma_fence_get(fence);
+
+ mutex_unlock(&m->job_mutex);
+
+ xe_bb_free(bb, fence);
+ size -= src_L0;
+ continue;
+
+err:
+ xe_bb_free(bb, NULL);
+
+err_sync:
+ if (fence) {
+ dma_fence_wait(fence, false);
+ dma_fence_put(fence);
+ }
+ return ERR_PTR(err);
+ }
+
+ return fence;
+}
+
+static void test_migrate(struct xe_device *xe, struct xe_tile *tile,
+ struct xe_bo *sys_bo, struct xe_bo *vram_bo, struct xe_bo *ccs_bo,
+ struct kunit *test)
+{
+ struct dma_fence *fence;
+ u64 expected, retval;
+ long timeout;
+ long ret;
+
+ expected = 0xd0d0d0d0d0d0d0d0;
+ xe_map_memset(xe, &sys_bo->vmap, 0, 0xd0, sys_bo->size);
+
+ fence = blt_copy(tile, sys_bo, vram_bo, false, "Blit copy from sysmem to vram", test);
+ if (!sanity_fence_failed(xe, fence, "Blit copy from sysmem to vram", test)) {
+ retval = xe_map_rd(xe, &vram_bo->vmap, 0, u64);
+ if (retval == expected)
+ KUNIT_FAIL(test, "Sanity check failed: VRAM must have compressed value\n");
+ }
+ dma_fence_put(fence);
+
+ kunit_info(test, "Evict vram buffer object\n");
+ ret = xe_bo_evict(vram_bo, true);
+ if (ret) {
+ KUNIT_FAIL(test, "Failed to evict bo.\n");
+ return;
+ }
+
+ ret = xe_bo_vmap(vram_bo);
+ if (ret) {
+ KUNIT_FAIL(test, "Failed to vmap vram bo: %li\n", ret);
+ return;
+ }
+
+ retval = xe_map_rd(xe, &vram_bo->vmap, 0, u64);
+ check(retval, expected, "Clear evicted vram data first value", test);
+ retval = xe_map_rd(xe, &vram_bo->vmap, vram_bo->size - 8, u64);
+ check(retval, expected, "Clear evicted vram data last value", test);
+
+ fence = blt_copy(tile, vram_bo, ccs_bo,
+ true, "Blit surf copy from vram to sysmem", test);
+ if (!sanity_fence_failed(xe, fence, "Clear ccs buffer data", test)) {
+ retval = xe_map_rd(xe, &ccs_bo->vmap, 0, u64);
+ check(retval, 0, "Clear ccs data first value", test);
+
+ retval = xe_map_rd(xe, &ccs_bo->vmap, ccs_bo->size - 8, u64);
+ check(retval, 0, "Clear ccs data last value", test);
+ }
+ dma_fence_put(fence);
+
+ kunit_info(test, "Restore vram buffer object\n");
+ ret = xe_bo_validate(vram_bo, NULL, false);
+ if (ret) {
+ KUNIT_FAIL(test, "Failed to validate vram bo for: %li\n", ret);
+ return;
+ }
+
+ /* Sync all migration blits */
+ timeout = dma_resv_wait_timeout(vram_bo->ttm.base.resv,
+ DMA_RESV_USAGE_KERNEL,
+ true,
+ 5 * HZ);
+ if (timeout <= 0) {
+ KUNIT_FAIL(test, "Failed to sync bo eviction.\n");
+ return;
+ }
+
+ ret = xe_bo_vmap(vram_bo);
+ if (ret) {
+ KUNIT_FAIL(test, "Failed to vmap vram bo: %li\n", ret);
+ return;
+ }
+
+ retval = xe_map_rd(xe, &vram_bo->vmap, 0, u64);
+ check(retval, expected, "Restored value must be equal to initial value", test);
+ retval = xe_map_rd(xe, &vram_bo->vmap, vram_bo->size - 8, u64);
+ check(retval, expected, "Restored value must be equal to initial value", test);
+
+ fence = blt_copy(tile, vram_bo, ccs_bo,
+ true, "Blit surf copy from vram to sysmem", test);
+ if (!sanity_fence_failed(xe, fence, "Clear ccs buffer data", test)) {
+ retval = xe_map_rd(xe, &ccs_bo->vmap, 0, u64);
+ check(retval, 0, "Clear ccs data first value", test);
+ retval = xe_map_rd(xe, &ccs_bo->vmap, ccs_bo->size - 8, u64);
+ check(retval, 0, "Clear ccs data last value", test);
+ }
+ dma_fence_put(fence);
}
-EXPORT_SYMBOL_IF_KUNIT(xe_migrate_sanity_kunit);
+
+static void test_clear(struct xe_device *xe, struct xe_tile *tile,
+ struct xe_bo *sys_bo, struct xe_bo *vram_bo, struct kunit *test)
+{
+ struct dma_fence *fence;
+ u64 expected, retval;
+
+ expected = 0xd0d0d0d0d0d0d0d0;
+ xe_map_memset(xe, &sys_bo->vmap, 0, 0xd0, sys_bo->size);
+
+ fence = blt_copy(tile, sys_bo, vram_bo, false, "Blit copy from sysmem to vram", test);
+ if (!sanity_fence_failed(xe, fence, "Blit copy from sysmem to vram", test)) {
+ retval = xe_map_rd(xe, &vram_bo->vmap, 0, u64);
+ if (retval == expected)
+ KUNIT_FAIL(test, "Sanity check failed: VRAM must have compressed value\n");
+ }
+ dma_fence_put(fence);
+
+ fence = blt_copy(tile, vram_bo, sys_bo, false, "Blit copy from vram to sysmem", test);
+ if (!sanity_fence_failed(xe, fence, "Blit copy from vram to sysmem", test)) {
+ retval = xe_map_rd(xe, &sys_bo->vmap, 0, u64);
+ check(retval, expected, "Decompressed value must be equal to initial value", test);
+ retval = xe_map_rd(xe, &sys_bo->vmap, sys_bo->size - 8, u64);
+ check(retval, expected, "Decompressed value must be equal to initial value", test);
+ }
+ dma_fence_put(fence);
+
+ kunit_info(test, "Clear vram buffer object\n");
+ expected = 0x0000000000000000;
+ fence = xe_migrate_clear(tile->migrate, vram_bo, vram_bo->ttm.resource,
+ XE_MIGRATE_CLEAR_FLAG_FULL);
+ if (sanity_fence_failed(xe, fence, "Clear vram_bo", test))
+ return;
+ dma_fence_put(fence);
+
+ fence = blt_copy(tile, vram_bo, sys_bo,
+ false, "Blit copy from vram to sysmem", test);
+ if (!sanity_fence_failed(xe, fence, "Clear main buffer data", test)) {
+ retval = xe_map_rd(xe, &sys_bo->vmap, 0, u64);
+ check(retval, expected, "Clear main buffer first value", test);
+ retval = xe_map_rd(xe, &sys_bo->vmap, sys_bo->size - 8, u64);
+ check(retval, expected, "Clear main buffer last value", test);
+ }
+ dma_fence_put(fence);
+
+ fence = blt_copy(tile, vram_bo, sys_bo,
+ true, "Blit surf copy from vram to sysmem", test);
+ if (!sanity_fence_failed(xe, fence, "Clear ccs buffer data", test)) {
+ retval = xe_map_rd(xe, &sys_bo->vmap, 0, u64);
+ check(retval, expected, "Clear ccs data first value", test);
+ retval = xe_map_rd(xe, &sys_bo->vmap, sys_bo->size - 8, u64);
+ check(retval, expected, "Clear ccs data last value", test);
+ }
+ dma_fence_put(fence);
+}
+
+static void validate_ccs_test_run_tile(struct xe_device *xe, struct xe_tile *tile,
+ struct kunit *test)
+{
+ struct xe_bo *sys_bo, *vram_bo = NULL, *ccs_bo = NULL;
+ unsigned int bo_flags = XE_BO_FLAG_VRAM_IF_DGFX(tile);
+ long ret;
+
+ sys_bo = xe_bo_create_user(xe, NULL, NULL, SZ_4M,
+ DRM_XE_GEM_CPU_CACHING_WC,
+ XE_BO_FLAG_SYSTEM | XE_BO_FLAG_NEEDS_CPU_ACCESS);
+
+ if (IS_ERR(sys_bo)) {
+ KUNIT_FAIL(test, "xe_bo_create() failed with err=%ld\n",
+ PTR_ERR(sys_bo));
+ return;
+ }
+
+ xe_bo_lock(sys_bo, false);
+ ret = xe_bo_validate(sys_bo, NULL, false);
+ if (ret) {
+ KUNIT_FAIL(test, "Failed to validate system bo for: %li\n", ret);
+ goto free_sysbo;
+ }
+
+ ret = xe_bo_vmap(sys_bo);
+ if (ret) {
+ KUNIT_FAIL(test, "Failed to vmap system bo: %li\n", ret);
+ goto free_sysbo;
+ }
+ xe_bo_unlock(sys_bo);
+
+ ccs_bo = xe_bo_create_user(xe, NULL, NULL, SZ_4M,
+ DRM_XE_GEM_CPU_CACHING_WC,
+ bo_flags | XE_BO_FLAG_NEEDS_CPU_ACCESS);
+
+ if (IS_ERR(ccs_bo)) {
+ KUNIT_FAIL(test, "xe_bo_create() failed with err=%ld\n",
+ PTR_ERR(ccs_bo));
+ return;
+ }
+
+ xe_bo_lock(ccs_bo, false);
+ ret = xe_bo_validate(ccs_bo, NULL, false);
+ if (ret) {
+ KUNIT_FAIL(test, "Failed to validate system bo for: %li\n", ret);
+ goto free_ccsbo;
+ }
+
+ ret = xe_bo_vmap(ccs_bo);
+ if (ret) {
+ KUNIT_FAIL(test, "Failed to vmap system bo: %li\n", ret);
+ goto free_ccsbo;
+ }
+ xe_bo_unlock(ccs_bo);
+
+ vram_bo = xe_bo_create_user(xe, NULL, NULL, SZ_4M,
+ DRM_XE_GEM_CPU_CACHING_WC,
+ bo_flags | XE_BO_FLAG_NEEDS_CPU_ACCESS);
+ if (IS_ERR(vram_bo)) {
+ KUNIT_FAIL(test, "xe_bo_create() failed with err=%ld\n",
+ PTR_ERR(vram_bo));
+ return;
+ }
+
+ xe_bo_lock(vram_bo, false);
+ ret = xe_bo_validate(vram_bo, NULL, false);
+ if (ret) {
+ KUNIT_FAIL(test, "Failed to validate vram bo for: %li\n", ret);
+ goto free_vrambo;
+ }
+
+ ret = xe_bo_vmap(vram_bo);
+ if (ret) {
+ KUNIT_FAIL(test, "Failed to vmap vram bo: %li\n", ret);
+ goto free_vrambo;
+ }
+
+ test_clear(xe, tile, sys_bo, vram_bo, test);
+ test_migrate(xe, tile, sys_bo, vram_bo, ccs_bo, test);
+ xe_bo_unlock(vram_bo);
+
+ xe_bo_lock(vram_bo, false);
+ xe_bo_vunmap(vram_bo);
+ xe_bo_unlock(vram_bo);
+
+ xe_bo_lock(ccs_bo, false);
+ xe_bo_vunmap(ccs_bo);
+ xe_bo_unlock(ccs_bo);
+
+ xe_bo_lock(sys_bo, false);
+ xe_bo_vunmap(sys_bo);
+ xe_bo_unlock(sys_bo);
+free_vrambo:
+ xe_bo_put(vram_bo);
+free_ccsbo:
+ xe_bo_put(ccs_bo);
+free_sysbo:
+ xe_bo_put(sys_bo);
+}
+
+static int validate_ccs_test_run_device(struct xe_device *xe)
+{
+ struct kunit *test = kunit_get_current_test();
+ struct xe_tile *tile;
+ int id;
+
+ if (!xe_device_has_flat_ccs(xe)) {
+ kunit_skip(test, "non-flat-ccs device\n");
+ return 0;
+ }
+
+ if (!(GRAPHICS_VER(xe) >= 20 && IS_DGFX(xe))) {
+ kunit_skip(test, "non-xe2 discrete device\n");
+ return 0;
+ }
+
+ xe_pm_runtime_get(xe);
+
+ for_each_tile(tile, xe, id)
+ validate_ccs_test_run_tile(xe, tile, test);
+
+ xe_pm_runtime_put(xe);
+
+ return 0;
+}
+
+static void xe_validate_ccs_kunit(struct kunit *test)
+{
+ struct xe_device *xe = test->priv;
+
+ validate_ccs_test_run_device(xe);
+}
+
+static struct kunit_case xe_migrate_tests[] = {
+ KUNIT_CASE_PARAM(xe_migrate_sanity_kunit, xe_pci_live_device_gen_param),
+ KUNIT_CASE_PARAM(xe_validate_ccs_kunit, xe_pci_live_device_gen_param),
+ {}
+};
+
+VISIBLE_IF_KUNIT
+struct kunit_suite xe_migrate_test_suite = {
+ .name = "xe_migrate",
+ .test_cases = xe_migrate_tests,
+ .init = xe_kunit_helper_xe_device_live_test_init,
+};
+EXPORT_SYMBOL_IF_KUNIT(xe_migrate_test_suite);
diff --git a/drivers/gpu/drm/xe/tests/xe_migrate_test.c b/drivers/gpu/drm/xe/tests/xe_migrate_test.c
deleted file mode 100644
index eb0d8963419c..000000000000
--- a/drivers/gpu/drm/xe/tests/xe_migrate_test.c
+++ /dev/null
@@ -1,20 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/*
- * Copyright © 2022 Intel Corporation
- */
-
-#include "xe_migrate_test.h"
-
-#include <kunit/test.h>
-
-static struct kunit_case xe_migrate_tests[] = {
- KUNIT_CASE(xe_migrate_sanity_kunit),
- {}
-};
-
-static struct kunit_suite xe_migrate_test_suite = {
- .name = "xe_migrate",
- .test_cases = xe_migrate_tests,
-};
-
-kunit_test_suite(xe_migrate_test_suite);
diff --git a/drivers/gpu/drm/xe/tests/xe_migrate_test.h b/drivers/gpu/drm/xe/tests/xe_migrate_test.h
deleted file mode 100644
index 7c645c66824f..000000000000
--- a/drivers/gpu/drm/xe/tests/xe_migrate_test.h
+++ /dev/null
@@ -1,13 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 AND MIT */
-/*
- * Copyright © 2023 Intel Corporation
- */
-
-#ifndef _XE_MIGRATE_TEST_H_
-#define _XE_MIGRATE_TEST_H_
-
-struct kunit;
-
-void xe_migrate_sanity_kunit(struct kunit *test);
-
-#endif
diff --git a/drivers/gpu/drm/xe/tests/xe_mocs.c b/drivers/gpu/drm/xe/tests/xe_mocs.c
index 67c65e88c384..79be73b4a02b 100644
--- a/drivers/gpu/drm/xe/tests/xe_mocs.c
+++ b/drivers/gpu/drm/xe/tests/xe_mocs.c
@@ -6,7 +6,7 @@
#include <kunit/test.h>
#include <kunit/visibility.h>
-#include "tests/xe_mocs_test.h"
+#include "tests/xe_kunit_helpers.h"
#include "tests/xe_pci_test.h"
#include "tests/xe_test.h"
@@ -23,7 +23,7 @@ struct live_mocs {
static int live_mocs_init(struct live_mocs *arg, struct xe_gt *gt)
{
unsigned int flags;
- struct kunit *test = xe_cur_kunit();
+ struct kunit *test = kunit_get_current_test();
memset(arg, 0, sizeof(*arg));
@@ -41,7 +41,7 @@ static int live_mocs_init(struct live_mocs *arg, struct xe_gt *gt)
static void read_l3cc_table(struct xe_gt *gt,
const struct xe_mocs_info *info)
{
- struct kunit *test = xe_cur_kunit();
+ struct kunit *test = kunit_get_current_test();
u32 l3cc, l3cc_expected;
unsigned int i;
u32 reg_val;
@@ -78,7 +78,7 @@ static void read_l3cc_table(struct xe_gt *gt,
static void read_mocs_table(struct xe_gt *gt,
const struct xe_mocs_info *info)
{
- struct kunit *test = xe_cur_kunit();
+ struct kunit *test = kunit_get_current_test();
u32 mocs, mocs_expected;
unsigned int i;
u32 reg_val;
@@ -134,11 +134,15 @@ static int mocs_kernel_test_run_device(struct xe_device *xe)
return 0;
}
-void xe_live_mocs_kernel_kunit(struct kunit *test)
+static void xe_live_mocs_kernel_kunit(struct kunit *test)
{
- xe_call_for_each_device(mocs_kernel_test_run_device);
+ struct xe_device *xe = test->priv;
+
+ if (IS_SRIOV_VF(xe))
+ kunit_skip(test, "this test is N/A for VF");
+
+ mocs_kernel_test_run_device(xe);
}
-EXPORT_SYMBOL_IF_KUNIT(xe_live_mocs_kernel_kunit);
static int mocs_reset_test_run_device(struct xe_device *xe)
{
@@ -148,7 +152,7 @@ static int mocs_reset_test_run_device(struct xe_device *xe)
struct xe_gt *gt;
unsigned int flags;
int id;
- struct kunit *test = xe_cur_kunit();
+ struct kunit *test = kunit_get_current_test();
xe_pm_runtime_get(xe);
@@ -175,8 +179,26 @@ static int mocs_reset_test_run_device(struct xe_device *xe)
return 0;
}
-void xe_live_mocs_reset_kunit(struct kunit *test)
+static void xe_live_mocs_reset_kunit(struct kunit *test)
{
- xe_call_for_each_device(mocs_reset_test_run_device);
+ struct xe_device *xe = test->priv;
+
+ if (IS_SRIOV_VF(xe))
+ kunit_skip(test, "this test is N/A for VF");
+
+ mocs_reset_test_run_device(xe);
}
-EXPORT_SYMBOL_IF_KUNIT(xe_live_mocs_reset_kunit);
+
+static struct kunit_case xe_mocs_tests[] = {
+ KUNIT_CASE_PARAM(xe_live_mocs_kernel_kunit, xe_pci_live_device_gen_param),
+ KUNIT_CASE_PARAM(xe_live_mocs_reset_kunit, xe_pci_live_device_gen_param),
+ {}
+};
+
+VISIBLE_IF_KUNIT
+struct kunit_suite xe_mocs_test_suite = {
+ .name = "xe_mocs",
+ .test_cases = xe_mocs_tests,
+ .init = xe_kunit_helper_xe_device_live_test_init,
+};
+EXPORT_SYMBOL_IF_KUNIT(xe_mocs_test_suite);
diff --git a/drivers/gpu/drm/xe/tests/xe_mocs_test.c b/drivers/gpu/drm/xe/tests/xe_mocs_test.c
deleted file mode 100644
index 6315886b659e..000000000000
--- a/drivers/gpu/drm/xe/tests/xe_mocs_test.c
+++ /dev/null
@@ -1,21 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/*
- * Copyright © 2022 Intel Corporation
- */
-
-#include "xe_mocs_test.h"
-
-#include <kunit/test.h>
-
-static struct kunit_case xe_mocs_tests[] = {
- KUNIT_CASE(xe_live_mocs_kernel_kunit),
- KUNIT_CASE(xe_live_mocs_reset_kunit),
- {}
-};
-
-static struct kunit_suite xe_mocs_test_suite = {
- .name = "xe_mocs",
- .test_cases = xe_mocs_tests,
-};
-
-kunit_test_suite(xe_mocs_test_suite);
diff --git a/drivers/gpu/drm/xe/tests/xe_mocs_test.h b/drivers/gpu/drm/xe/tests/xe_mocs_test.h
deleted file mode 100644
index e7699d495411..000000000000
--- a/drivers/gpu/drm/xe/tests/xe_mocs_test.h
+++ /dev/null
@@ -1,14 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 AND MIT */
-/*
- * Copyright © 2023 Intel Corporation
- */
-
-#ifndef _XE_MOCS_TEST_H_
-#define _XE_MOCS_TEST_H_
-
-struct kunit;
-
-void xe_live_mocs_kernel_kunit(struct kunit *test);
-void xe_live_mocs_reset_kunit(struct kunit *test);
-
-#endif
diff --git a/drivers/gpu/drm/xe/tests/xe_pci.c b/drivers/gpu/drm/xe/tests/xe_pci.c
index f62809ca8b51..67404863087e 100644
--- a/drivers/gpu/drm/xe/tests/xe_pci.c
+++ b/drivers/gpu/drm/xe/tests/xe_pci.c
@@ -12,58 +12,6 @@
#include <kunit/test-bug.h>
#include <kunit/visibility.h>
-struct kunit_test_data {
- int ndevs;
- xe_device_fn xe_fn;
-};
-
-static int dev_to_xe_device_fn(struct device *dev, void *__data)
-
-{
- struct drm_device *drm = dev_get_drvdata(dev);
- struct kunit_test_data *data = __data;
- int ret = 0;
- int idx;
-
- data->ndevs++;
-
- if (drm_dev_enter(drm, &idx))
- ret = data->xe_fn(to_xe_device(dev_get_drvdata(dev)));
- drm_dev_exit(idx);
-
- return ret;
-}
-
-/**
- * xe_call_for_each_device - Iterate over all devices this driver binds to
- * @xe_fn: Function to call for each device.
- *
- * This function iterated over all devices this driver binds to, and calls
- * @xe_fn: for each one of them. If the called function returns anything else
- * than 0, iteration is stopped and the return value is returned by this
- * function. Across each function call, drm_dev_enter() / drm_dev_exit() is
- * called for the corresponding drm device.
- *
- * Return: Number of devices iterated or
- * the error code of a call to @xe_fn returning an error code.
- */
-int xe_call_for_each_device(xe_device_fn xe_fn)
-{
- int ret;
- struct kunit_test_data data = {
- .xe_fn = xe_fn,
- .ndevs = 0,
- };
-
- ret = driver_for_each_device(&xe_pci_driver.driver, NULL,
- &data, dev_to_xe_device_fn);
-
- if (!data.ndevs)
- kunit_skip(current->kunit_test, "test runs only on hardware\n");
-
- return ret ?: data.ndevs;
-}
-
/**
* xe_call_for_each_graphics_ip - Iterate over all recognized graphics IPs
* @xe_fn: Function to call for each device.
@@ -167,3 +115,33 @@ done:
return 0;
}
EXPORT_SYMBOL_IF_KUNIT(xe_pci_fake_device_init);
+
+/**
+ * xe_pci_live_device_gen_param - Helper to iterate Xe devices as KUnit parameters
+ * @prev: the previously returned value, or NULL for the first iteration
+ * @desc: the buffer for a parameter name
+ *
+ * Iterates over the available Xe devices on the system. Uses the device name
+ * as the parameter name.
+ *
+ * To be used only as a parameter generator function in &KUNIT_CASE_PARAM.
+ *
+ * Return: pointer to the next &struct xe_device ready to be used as a parameter
+ * or NULL if there are no more Xe devices on the system.
+ */
+const void *xe_pci_live_device_gen_param(const void *prev, char *desc)
+{
+ const struct xe_device *xe = prev;
+ struct device *dev = xe ? xe->drm.dev : NULL;
+ struct device *next;
+
+ next = driver_find_next_device(&xe_pci_driver.driver, dev);
+ if (dev)
+ put_device(dev);
+ if (!next)
+ return NULL;
+
+ snprintf(desc, KUNIT_PARAM_DESC_SIZE, "%s", dev_name(next));
+ return pdev_to_xe_device(to_pci_dev(next));
+}
+EXPORT_SYMBOL_IF_KUNIT(xe_pci_live_device_gen_param);
diff --git a/drivers/gpu/drm/xe/tests/xe_pci_test.c b/drivers/gpu/drm/xe/tests/xe_pci_test.c
index a6705a536391..744a37583d2d 100644
--- a/drivers/gpu/drm/xe/tests/xe_pci_test.c
+++ b/drivers/gpu/drm/xe/tests/xe_pci_test.c
@@ -16,7 +16,7 @@
static void check_graphics_ip(const struct xe_graphics_desc *graphics)
{
- struct kunit *test = xe_cur_kunit();
+ struct kunit *test = kunit_get_current_test();
u64 mask = graphics->hw_engine_mask;
/* RCS, CCS, and BCS engines are allowed on the graphics IP */
@@ -30,7 +30,7 @@ static void check_graphics_ip(const struct xe_graphics_desc *graphics)
static void check_media_ip(const struct xe_media_desc *media)
{
- struct kunit *test = xe_cur_kunit();
+ struct kunit *test = kunit_get_current_test();
u64 mask = media->hw_engine_mask;
/* VCS, VECS and GSCCS engines are allowed on the media IP */
diff --git a/drivers/gpu/drm/xe/tests/xe_pci_test.h b/drivers/gpu/drm/xe/tests/xe_pci_test.h
index f40dcec83992..ede46800aff1 100644
--- a/drivers/gpu/drm/xe/tests/xe_pci_test.h
+++ b/drivers/gpu/drm/xe/tests/xe_pci_test.h
@@ -19,7 +19,6 @@ typedef int (*xe_device_fn)(struct xe_device *);
typedef void (*xe_graphics_fn)(const struct xe_graphics_desc *);
typedef void (*xe_media_fn)(const struct xe_media_desc *);
-int xe_call_for_each_device(xe_device_fn xe_fn);
void xe_call_for_each_graphics_ip(xe_graphics_fn xe_fn);
void xe_call_for_each_media_ip(xe_media_fn xe_fn);
@@ -35,4 +34,6 @@ struct xe_pci_fake_data {
int xe_pci_fake_device_init(struct xe_device *xe);
+const void *xe_pci_live_device_gen_param(const void *prev, char *desc);
+
#endif
diff --git a/drivers/gpu/drm/xe/tests/xe_rtp_test.c b/drivers/gpu/drm/xe/tests/xe_rtp_test.c
index f217445c246a..36a3b5420fef 100644
--- a/drivers/gpu/drm/xe/tests/xe_rtp_test.c
+++ b/drivers/gpu/drm/xe/tests/xe_rtp_test.c
@@ -31,16 +31,23 @@
#undef XE_REG_MCR
#define XE_REG_MCR(...) XE_REG(__VA_ARGS__, .mcr = 1)
-struct rtp_test_case {
+struct rtp_to_sr_test_case {
const char *name;
struct xe_reg expected_reg;
u32 expected_set_bits;
u32 expected_clr_bits;
- unsigned long expected_count;
+ unsigned long expected_count_sr_entries;
unsigned int expected_sr_errors;
+ unsigned long expected_active;
const struct xe_rtp_entry_sr *entries;
};
+struct rtp_test_case {
+ const char *name;
+ unsigned long expected_active;
+ const struct xe_rtp_entry *entries;
+};
+
static bool match_yes(const struct xe_gt *gt, const struct xe_hw_engine *hwe)
{
return true;
@@ -51,13 +58,14 @@ static bool match_no(const struct xe_gt *gt, const struct xe_hw_engine *hwe)
return false;
}
-static const struct rtp_test_case cases[] = {
+static const struct rtp_to_sr_test_case rtp_to_sr_cases[] = {
{
.name = "coalesce-same-reg",
.expected_reg = REGULAR_REG1,
.expected_set_bits = REG_BIT(0) | REG_BIT(1),
.expected_clr_bits = REG_BIT(0) | REG_BIT(1),
- .expected_count = 1,
+ .expected_active = BIT(0) | BIT(1),
+ .expected_count_sr_entries = 1,
/* Different bits on the same register: create a single entry */
.entries = (const struct xe_rtp_entry_sr[]) {
{ XE_RTP_NAME("basic-1"),
@@ -76,7 +84,8 @@ static const struct rtp_test_case cases[] = {
.expected_reg = REGULAR_REG1,
.expected_set_bits = REG_BIT(0),
.expected_clr_bits = REG_BIT(0),
- .expected_count = 1,
+ .expected_active = BIT(0),
+ .expected_count_sr_entries = 1,
/* Don't coalesce second entry since rules don't match */
.entries = (const struct xe_rtp_entry_sr[]) {
{ XE_RTP_NAME("basic-1"),
@@ -95,7 +104,8 @@ static const struct rtp_test_case cases[] = {
.expected_reg = REGULAR_REG1,
.expected_set_bits = REG_BIT(0) | REG_BIT(1) | REG_BIT(2),
.expected_clr_bits = REG_BIT(0) | REG_BIT(1) | REG_BIT(2),
- .expected_count = 1,
+ .expected_active = BIT(0) | BIT(1) | BIT(2),
+ .expected_count_sr_entries = 1,
.entries = (const struct xe_rtp_entry_sr[]) {
{ XE_RTP_NAME("first"),
XE_RTP_RULES(FUNC(match_yes), OR, FUNC(match_no)),
@@ -121,7 +131,7 @@ static const struct rtp_test_case cases[] = {
{
.name = "match-or-xfail",
.expected_reg = REGULAR_REG1,
- .expected_count = 0,
+ .expected_count_sr_entries = 0,
.entries = (const struct xe_rtp_entry_sr[]) {
{ XE_RTP_NAME("leading-or"),
XE_RTP_RULES(OR, FUNC(match_yes)),
@@ -148,7 +158,8 @@ static const struct rtp_test_case cases[] = {
.expected_reg = REGULAR_REG1,
.expected_set_bits = REG_BIT(0),
.expected_clr_bits = REG_BIT(0),
- .expected_count = 1,
+ .expected_active = BIT(0),
+ .expected_count_sr_entries = 1,
/* Don't coalesce second entry due to one of the rules */
.entries = (const struct xe_rtp_entry_sr[]) {
{ XE_RTP_NAME("basic-1"),
@@ -167,7 +178,8 @@ static const struct rtp_test_case cases[] = {
.expected_reg = REGULAR_REG1,
.expected_set_bits = REG_BIT(0),
.expected_clr_bits = REG_BIT(0),
- .expected_count = 2,
+ .expected_active = BIT(0) | BIT(1),
+ .expected_count_sr_entries = 2,
/* Same bits on different registers are not coalesced */
.entries = (const struct xe_rtp_entry_sr[]) {
{ XE_RTP_NAME("basic-1"),
@@ -186,7 +198,8 @@ static const struct rtp_test_case cases[] = {
.expected_reg = REGULAR_REG1,
.expected_set_bits = REG_BIT(0),
.expected_clr_bits = REG_BIT(1) | REG_BIT(0),
- .expected_count = 1,
+ .expected_active = BIT(0) | BIT(1),
+ .expected_count_sr_entries = 1,
/* Check clr vs set actions on different bits */
.entries = (const struct xe_rtp_entry_sr[]) {
{ XE_RTP_NAME("basic-1"),
@@ -207,7 +220,8 @@ static const struct rtp_test_case cases[] = {
.expected_reg = REGULAR_REG1,
.expected_set_bits = TEMP_FIELD,
.expected_clr_bits = TEMP_MASK,
- .expected_count = 1,
+ .expected_active = BIT(0),
+ .expected_count_sr_entries = 1,
/* Check FIELD_SET works */
.entries = (const struct xe_rtp_entry_sr[]) {
{ XE_RTP_NAME("basic-1"),
@@ -225,7 +239,8 @@ static const struct rtp_test_case cases[] = {
.expected_reg = REGULAR_REG1,
.expected_set_bits = REG_BIT(0),
.expected_clr_bits = REG_BIT(0),
- .expected_count = 1,
+ .expected_active = BIT(0) | BIT(1),
+ .expected_count_sr_entries = 1,
.expected_sr_errors = 1,
.entries = (const struct xe_rtp_entry_sr[]) {
{ XE_RTP_NAME("basic-1"),
@@ -245,7 +260,8 @@ static const struct rtp_test_case cases[] = {
.expected_reg = REGULAR_REG1,
.expected_set_bits = REG_BIT(0),
.expected_clr_bits = REG_BIT(0),
- .expected_count = 1,
+ .expected_active = BIT(0) | BIT(1),
+ .expected_count_sr_entries = 1,
.expected_sr_errors = 1,
.entries = (const struct xe_rtp_entry_sr[]) {
{ XE_RTP_NAME("basic-1"),
@@ -265,7 +281,8 @@ static const struct rtp_test_case cases[] = {
.expected_reg = REGULAR_REG1,
.expected_set_bits = REG_BIT(0),
.expected_clr_bits = REG_BIT(0),
- .expected_count = 1,
+ .expected_active = BIT(0) | BIT(1) | BIT(2),
+ .expected_count_sr_entries = 1,
.expected_sr_errors = 2,
.entries = (const struct xe_rtp_entry_sr[]) {
{ XE_RTP_NAME("basic-1"),
@@ -287,28 +304,35 @@ static const struct rtp_test_case cases[] = {
},
};
-static void xe_rtp_process_tests(struct kunit *test)
+static void xe_rtp_process_to_sr_tests(struct kunit *test)
{
- const struct rtp_test_case *param = test->param_value;
+ const struct rtp_to_sr_test_case *param = test->param_value;
struct xe_device *xe = test->priv;
struct xe_gt *gt = xe_device_get_root_tile(xe)->primary_gt;
struct xe_reg_sr *reg_sr = &gt->reg_sr;
const struct xe_reg_sr_entry *sre, *sr_entry = NULL;
struct xe_rtp_process_ctx ctx = XE_RTP_PROCESS_CTX_INITIALIZER(gt);
- unsigned long idx, count = 0;
+ unsigned long idx, count_sr_entries = 0, count_rtp_entries = 0, active = 0;
+
+ xe_reg_sr_init(reg_sr, "xe_rtp_to_sr_tests", xe);
+
+ while (param->entries[count_rtp_entries].rules)
+ count_rtp_entries++;
- xe_reg_sr_init(reg_sr, "xe_rtp_tests", xe);
+ xe_rtp_process_ctx_enable_active_tracking(&ctx, &active, count_rtp_entries);
xe_rtp_process_to_sr(&ctx, param->entries, reg_sr);
xa_for_each(&reg_sr->xa, idx, sre) {
if (idx == param->expected_reg.addr)
sr_entry = sre;
- count++;
+ count_sr_entries++;
}
- KUNIT_EXPECT_EQ(test, count, param->expected_count);
- if (count) {
+ KUNIT_EXPECT_EQ(test, active, param->expected_active);
+
+ KUNIT_EXPECT_EQ(test, count_sr_entries, param->expected_count_sr_entries);
+ if (count_sr_entries) {
KUNIT_EXPECT_EQ(test, sr_entry->clr_bits, param->expected_clr_bits);
KUNIT_EXPECT_EQ(test, sr_entry->set_bits, param->expected_set_bits);
KUNIT_EXPECT_EQ(test, sr_entry->reg.raw, param->expected_reg.raw);
@@ -319,12 +343,162 @@ static void xe_rtp_process_tests(struct kunit *test)
KUNIT_EXPECT_EQ(test, reg_sr->errors, param->expected_sr_errors);
}
+/*
+ * Entries below follow the logic used with xe_wa_oob.rules:
+ * 1) Entries with empty name are OR'ed: all entries marked active since the
+ * last entry with a name
+ * 2) There are no action associated with rules
+ */
+static const struct rtp_test_case rtp_cases[] = {
+ {
+ .name = "active1",
+ .expected_active = BIT(0),
+ .entries = (const struct xe_rtp_entry[]) {
+ { XE_RTP_NAME("r1"),
+ XE_RTP_RULES(FUNC(match_yes)),
+ },
+ {}
+ },
+ },
+ {
+ .name = "active2",
+ .expected_active = BIT(0) | BIT(1),
+ .entries = (const struct xe_rtp_entry[]) {
+ { XE_RTP_NAME("r1"),
+ XE_RTP_RULES(FUNC(match_yes)),
+ },
+ { XE_RTP_NAME("r2"),
+ XE_RTP_RULES(FUNC(match_yes)),
+ },
+ {}
+ },
+ },
+ {
+ .name = "active-inactive",
+ .expected_active = BIT(0),
+ .entries = (const struct xe_rtp_entry[]) {
+ { XE_RTP_NAME("r1"),
+ XE_RTP_RULES(FUNC(match_yes)),
+ },
+ { XE_RTP_NAME("r2"),
+ XE_RTP_RULES(FUNC(match_no)),
+ },
+ {}
+ },
+ },
+ {
+ .name = "inactive-active",
+ .expected_active = BIT(1),
+ .entries = (const struct xe_rtp_entry[]) {
+ { XE_RTP_NAME("r1"),
+ XE_RTP_RULES(FUNC(match_no)),
+ },
+ { XE_RTP_NAME("r2"),
+ XE_RTP_RULES(FUNC(match_yes)),
+ },
+ {}
+ },
+ },
+ {
+ .name = "inactive-1st_or_active-inactive",
+ .expected_active = BIT(1),
+ .entries = (const struct xe_rtp_entry[]) {
+ { XE_RTP_NAME("r1"),
+ XE_RTP_RULES(FUNC(match_no)),
+ },
+ { XE_RTP_NAME("r2_or_conditions"),
+ XE_RTP_RULES(FUNC(match_yes), OR,
+ FUNC(match_no), OR,
+ FUNC(match_no)) },
+ { XE_RTP_NAME("r3"),
+ XE_RTP_RULES(FUNC(match_no)),
+ },
+ {}
+ },
+ },
+ {
+ .name = "inactive-2nd_or_active-inactive",
+ .expected_active = BIT(1),
+ .entries = (const struct xe_rtp_entry[]) {
+ { XE_RTP_NAME("r1"),
+ XE_RTP_RULES(FUNC(match_no)),
+ },
+ { XE_RTP_NAME("r2_or_conditions"),
+ XE_RTP_RULES(FUNC(match_no), OR,
+ FUNC(match_yes), OR,
+ FUNC(match_no)) },
+ { XE_RTP_NAME("r3"),
+ XE_RTP_RULES(FUNC(match_no)),
+ },
+ {}
+ },
+ },
+ {
+ .name = "inactive-last_or_active-inactive",
+ .expected_active = BIT(1),
+ .entries = (const struct xe_rtp_entry[]) {
+ { XE_RTP_NAME("r1"),
+ XE_RTP_RULES(FUNC(match_no)),
+ },
+ { XE_RTP_NAME("r2_or_conditions"),
+ XE_RTP_RULES(FUNC(match_no), OR,
+ FUNC(match_no), OR,
+ FUNC(match_yes)) },
+ { XE_RTP_NAME("r3"),
+ XE_RTP_RULES(FUNC(match_no)),
+ },
+ {}
+ },
+ },
+ {
+ .name = "inactive-no_or_active-inactive",
+ .expected_active = 0,
+ .entries = (const struct xe_rtp_entry[]) {
+ { XE_RTP_NAME("r1"),
+ XE_RTP_RULES(FUNC(match_no)),
+ },
+ { XE_RTP_NAME("r2_or_conditions"),
+ XE_RTP_RULES(FUNC(match_no), OR,
+ FUNC(match_no), OR,
+ FUNC(match_no)) },
+ { XE_RTP_NAME("r3"),
+ XE_RTP_RULES(FUNC(match_no)),
+ },
+ {}
+ },
+ },
+};
+
+static void xe_rtp_process_tests(struct kunit *test)
+{
+ const struct rtp_test_case *param = test->param_value;
+ struct xe_device *xe = test->priv;
+ struct xe_gt *gt = xe_device_get_root_tile(xe)->primary_gt;
+ struct xe_rtp_process_ctx ctx = XE_RTP_PROCESS_CTX_INITIALIZER(gt);
+ unsigned long count_rtp_entries = 0, active = 0;
+
+ while (param->entries[count_rtp_entries].rules)
+ count_rtp_entries++;
+
+ xe_rtp_process_ctx_enable_active_tracking(&ctx, &active, count_rtp_entries);
+ xe_rtp_process(&ctx, param->entries);
+
+ KUNIT_EXPECT_EQ(test, active, param->expected_active);
+}
+
+static void rtp_to_sr_desc(const struct rtp_to_sr_test_case *t, char *desc)
+{
+ strscpy(desc, t->name, KUNIT_PARAM_DESC_SIZE);
+}
+
+KUNIT_ARRAY_PARAM(rtp_to_sr, rtp_to_sr_cases, rtp_to_sr_desc);
+
static void rtp_desc(const struct rtp_test_case *t, char *desc)
{
strscpy(desc, t->name, KUNIT_PARAM_DESC_SIZE);
}
-KUNIT_ARRAY_PARAM(rtp, cases, rtp_desc);
+KUNIT_ARRAY_PARAM(rtp, rtp_cases, rtp_desc);
static int xe_rtp_test_init(struct kunit *test)
{
@@ -357,6 +531,7 @@ static void xe_rtp_test_exit(struct kunit *test)
}
static struct kunit_case xe_rtp_tests[] = {
+ KUNIT_CASE_PARAM(xe_rtp_process_to_sr_tests, rtp_to_sr_gen_params),
KUNIT_CASE_PARAM(xe_rtp_process_tests, rtp_gen_params),
{}
};
diff --git a/drivers/gpu/drm/xe/tests/xe_test.h b/drivers/gpu/drm/xe/tests/xe_test.h
index 7a1ae213e750..9c23ad9dba8d 100644
--- a/drivers/gpu/drm/xe/tests/xe_test.h
+++ b/drivers/gpu/drm/xe/tests/xe_test.h
@@ -9,8 +9,8 @@
#include <linux/types.h>
#if IS_ENABLED(CONFIG_DRM_XE_KUNIT_TEST)
-#include <linux/sched.h>
#include <kunit/test.h>
+#include <kunit/test-bug.h>
/*
* Each test that provides a kunit private test structure, place a test id
@@ -31,8 +31,6 @@ struct xe_test_priv {
#define XE_TEST_DECLARE(x) x
#define XE_TEST_ONLY(x) unlikely(x)
-#define XE_TEST_EXPORT
-#define xe_cur_kunit() current->kunit_test
/**
* xe_cur_kunit_priv - Obtain the struct xe_test_priv pointed to by
@@ -48,10 +46,10 @@ xe_cur_kunit_priv(enum xe_test_priv_id id)
{
struct xe_test_priv *priv;
- if (!xe_cur_kunit())
+ if (!kunit_get_current_test())
return NULL;
- priv = xe_cur_kunit()->priv;
+ priv = kunit_get_current_test()->priv;
return priv->id == id ? priv : NULL;
}
@@ -59,8 +57,6 @@ xe_cur_kunit_priv(enum xe_test_priv_id id)
#define XE_TEST_DECLARE(x)
#define XE_TEST_ONLY(x) 0
-#define XE_TEST_EXPORT static
-#define xe_cur_kunit() NULL
#define xe_cur_kunit_priv(_id) NULL
#endif
diff --git a/drivers/gpu/drm/xe/tests/xe_wa_test.c b/drivers/gpu/drm/xe/tests/xe_wa_test.c
index 9d0c715142b9..c96d1fe34151 100644
--- a/drivers/gpu/drm/xe/tests/xe_wa_test.c
+++ b/drivers/gpu/drm/xe/tests/xe_wa_test.c
@@ -74,6 +74,7 @@ static const struct platform_test_case cases[] = {
GMDID_CASE(METEORLAKE, 1274, A0, 1300, A0),
GMDID_CASE(LUNARLAKE, 2004, A0, 2000, A0),
GMDID_CASE(LUNARLAKE, 2004, B0, 2000, A0),
+ GMDID_CASE(BATTLEMAGE, 2001, A0, 1301, A1),
};
static void platform_desc(const struct platform_test_case *t, char *desc)
diff --git a/drivers/gpu/drm/xe/xe_assert.h b/drivers/gpu/drm/xe/xe_assert.h
index 8b0cc1bc9327..e22bbf57fca7 100644
--- a/drivers/gpu/drm/xe/xe_assert.h
+++ b/drivers/gpu/drm/xe/xe_assert.h
@@ -81,7 +81,7 @@
#if IS_ENABLED(CONFIG_DRM_XE_DEBUG)
#define __xe_assert_msg(xe, condition, msg, arg...) ({ \
- (void)drm_WARN(&(xe)->drm, !(condition), "[" DRM_NAME "] Assertion `%s` failed!\n" msg, \
+ (void)drm_WARN(&(xe)->drm, !(condition), "Assertion `%s` failed!\n" msg, \
__stringify(condition), ## arg); \
})
#else
diff --git a/drivers/gpu/drm/xe/xe_bb.c b/drivers/gpu/drm/xe/xe_bb.c
index a13e0b3a169e..ef777dbdf4ec 100644
--- a/drivers/gpu/drm/xe/xe_bb.c
+++ b/drivers/gpu/drm/xe/xe_bb.c
@@ -65,7 +65,8 @@ __xe_bb_create_job(struct xe_exec_queue *q, struct xe_bb *bb, u64 *addr)
{
u32 size = drm_suballoc_size(bb->bo);
- bb->cs[bb->len++] = MI_BATCH_BUFFER_END;
+ if (bb->len == 0 || bb->cs[bb->len - 1] != MI_BATCH_BUFFER_END)
+ bb->cs[bb->len++] = MI_BATCH_BUFFER_END;
xe_gt_assert(q->gt, bb->len * 4 + bb_prefetch(q->gt) <= size);
diff --git a/drivers/gpu/drm/xe/xe_bo.c b/drivers/gpu/drm/xe/xe_bo.c
index 261d3d6c8a93..f379df3a12bf 100644
--- a/drivers/gpu/drm/xe/xe_bo.c
+++ b/drivers/gpu/drm/xe/xe_bo.c
@@ -13,7 +13,7 @@
#include <drm/ttm/ttm_device.h>
#include <drm/ttm/ttm_placement.h>
#include <drm/ttm/ttm_tt.h>
-#include <drm/xe_drm.h>
+#include <uapi/drm/xe_drm.h>
#include "xe_device.h"
#include "xe_dma_buf.h"
@@ -758,7 +758,16 @@ static int xe_bo_move(struct ttm_buffer_object *ttm_bo, bool evict,
xe_assert(xe, migrate);
trace_xe_bo_move(bo, new_mem->mem_type, old_mem_type, move_lacks_source);
- xe_pm_runtime_get_noresume(xe);
+ if (xe_rpm_reclaim_safe(xe)) {
+ /*
+ * We might be called through swapout in the validation path of
+ * another TTM device, so unconditionally acquire rpm here.
+ */
+ xe_pm_runtime_get(xe);
+ } else {
+ drm_WARN_ON(&xe->drm, handle_system_ccs);
+ xe_pm_runtime_get_noresume(xe);
+ }
if (xe_bo_is_pinned(bo) && !xe_bo_is_user(bo)) {
/*
@@ -793,8 +802,16 @@ static int xe_bo_move(struct ttm_buffer_object *ttm_bo, bool evict,
}
}
} else {
- if (move_lacks_source)
- fence = xe_migrate_clear(migrate, bo, new_mem);
+ if (move_lacks_source) {
+ u32 flags = 0;
+
+ if (mem_type_is_vram(new_mem->mem_type))
+ flags |= XE_MIGRATE_CLEAR_FLAG_FULL;
+ else if (handle_system_ccs)
+ flags |= XE_MIGRATE_CLEAR_FLAG_CCS_DATA;
+
+ fence = xe_migrate_clear(migrate, bo, new_mem, flags);
+ }
else
fence = xe_migrate_copy(migrate, bo, bo, old_mem,
new_mem, handle_system_ccs);
@@ -1090,7 +1107,7 @@ static void xe_ttm_bo_destroy(struct ttm_buffer_object *ttm_bo)
xe_assert(xe, list_empty(&ttm_bo->base.gpuva.list));
- if (bo->ggtt_node.size)
+ if (bo->ggtt_node && bo->ggtt_node->base.size)
xe_ggtt_remove_bo(bo->tile->mem.ggtt, bo);
#ifdef CONFIG_PROC_FS
@@ -1264,13 +1281,14 @@ struct xe_bo *___xe_bo_create_locked(struct xe_device *xe, struct xe_bo *bo,
if (flags & (XE_BO_FLAG_VRAM_MASK | XE_BO_FLAG_STOLEN) &&
!(flags & XE_BO_FLAG_IGNORE_MIN_PAGE_SIZE) &&
((xe->info.vram_flags & XE_VRAM_FLAGS_NEED64K) ||
- (flags & XE_BO_NEEDS_64K))) {
- aligned_size = ALIGN(size, SZ_64K);
+ (flags & (XE_BO_FLAG_NEEDS_64K | XE_BO_FLAG_NEEDS_2M)))) {
+ size_t align = flags & XE_BO_FLAG_NEEDS_2M ? SZ_2M : SZ_64K;
+
+ aligned_size = ALIGN(size, align);
if (type != ttm_bo_type_device)
- size = ALIGN(size, SZ_64K);
+ size = ALIGN(size, align);
flags |= XE_BO_FLAG_INTERNAL_64K;
- alignment = SZ_64K >> PAGE_SHIFT;
-
+ alignment = align >> PAGE_SHIFT;
} else {
aligned_size = ALIGN(size, SZ_4K);
flags &= ~XE_BO_FLAG_INTERNAL_64K;
@@ -1490,11 +1508,10 @@ struct xe_bo *xe_bo_create_locked(struct xe_device *xe, struct xe_tile *tile,
struct xe_bo *xe_bo_create_user(struct xe_device *xe, struct xe_tile *tile,
struct xe_vm *vm, size_t size,
u16 cpu_caching,
- enum ttm_bo_type type,
u32 flags)
{
struct xe_bo *bo = __xe_bo_create_locked(xe, tile, vm, size, 0, ~0ULL,
- cpu_caching, type,
+ cpu_caching, ttm_bo_type_device,
flags | XE_BO_FLAG_USER);
if (!IS_ERR(bo))
xe_bo_unlock_vm_held(bo);
@@ -1989,6 +2006,13 @@ int xe_gem_create_ioctl(struct drm_device *dev, void *data,
bo_flags |= args->placement << (ffs(XE_BO_FLAG_SYSTEM) - 1);
+ /* CCS formats need physical placement at a 64K alignment in VRAM. */
+ if ((bo_flags & XE_BO_FLAG_VRAM_MASK) &&
+ (bo_flags & XE_BO_FLAG_SCANOUT) &&
+ !(xe->info.vram_flags & XE_VRAM_FLAGS_NEED64K) &&
+ IS_ALIGNED(args->size, SZ_64K))
+ bo_flags |= XE_BO_FLAG_NEEDS_64K;
+
if (args->flags & DRM_XE_GEM_CREATE_FLAG_NEEDS_VISIBLE_VRAM) {
if (XE_IOCTL_DBG(xe, !(bo_flags & XE_BO_FLAG_VRAM_MASK)))
return -EINVAL;
@@ -2018,7 +2042,7 @@ int xe_gem_create_ioctl(struct drm_device *dev, void *data,
}
bo = xe_bo_create_user(xe, NULL, vm, args->size, args->cpu_caching,
- ttm_bo_type_device, bo_flags);
+ bo_flags);
if (vm)
xe_vm_unlock(vm);
@@ -2296,6 +2320,20 @@ void xe_bo_put_commit(struct llist_head *deferred)
drm_gem_object_free(&bo->ttm.base.refcount);
}
+void xe_bo_put(struct xe_bo *bo)
+{
+ might_sleep();
+ if (bo) {
+#ifdef CONFIG_PROC_FS
+ if (bo->client)
+ might_lock(&bo->client->bos_lock);
+#endif
+ if (bo->ggtt_node && bo->ggtt_node->ggtt)
+ might_lock(&bo->ggtt_node->ggtt->lock);
+ drm_gem_object_put(&bo->ttm.base);
+ }
+}
+
/**
* xe_bo_dumb_create - Create a dumb bo as backing for a fb
* @file_priv: ...
@@ -2324,7 +2362,6 @@ int xe_bo_dumb_create(struct drm_file *file_priv,
bo = xe_bo_create_user(xe, NULL, NULL, args->size,
DRM_XE_GEM_CPU_CACHING_WC,
- ttm_bo_type_device,
XE_BO_FLAG_VRAM_IF_DGFX(xe_device_get_root_tile(xe)) |
XE_BO_FLAG_SCANOUT |
XE_BO_FLAG_NEEDS_CPU_ACCESS);
diff --git a/drivers/gpu/drm/xe/xe_bo.h b/drivers/gpu/drm/xe/xe_bo.h
index 6de894c728f5..6e4be52306df 100644
--- a/drivers/gpu/drm/xe/xe_bo.h
+++ b/drivers/gpu/drm/xe/xe_bo.h
@@ -36,8 +36,9 @@
#define XE_BO_FLAG_PAGETABLE BIT(12)
#define XE_BO_FLAG_NEEDS_CPU_ACCESS BIT(13)
#define XE_BO_FLAG_NEEDS_UC BIT(14)
-#define XE_BO_NEEDS_64K BIT(15)
-#define XE_BO_FLAG_GGTT_INVALIDATE BIT(16)
+#define XE_BO_FLAG_NEEDS_64K BIT(15)
+#define XE_BO_FLAG_NEEDS_2M BIT(16)
+#define XE_BO_FLAG_GGTT_INVALIDATE BIT(17)
/* this one is trigger internally only */
#define XE_BO_FLAG_INTERNAL_TEST BIT(30)
#define XE_BO_FLAG_INTERNAL_64K BIT(31)
@@ -86,7 +87,6 @@ struct xe_bo *xe_bo_create(struct xe_device *xe, struct xe_tile *tile,
struct xe_bo *xe_bo_create_user(struct xe_device *xe, struct xe_tile *tile,
struct xe_vm *vm, size_t size,
u16 cpu_caching,
- enum ttm_bo_type type,
u32 flags);
struct xe_bo *xe_bo_create_pin_map(struct xe_device *xe, struct xe_tile *tile,
struct xe_vm *vm, size_t size,
@@ -126,11 +126,7 @@ static inline struct xe_bo *xe_bo_get(struct xe_bo *bo)
return bo;
}
-static inline void xe_bo_put(struct xe_bo *bo)
-{
- if (bo)
- drm_gem_object_put(&bo->ttm.base);
-}
+void xe_bo_put(struct xe_bo *bo);
static inline void __xe_bo_unset_bulk_move(struct xe_bo *bo)
{
@@ -194,9 +190,12 @@ xe_bo_main_addr(struct xe_bo *bo, size_t page_size)
static inline u32
xe_bo_ggtt_addr(struct xe_bo *bo)
{
- XE_WARN_ON(bo->ggtt_node.size > bo->size);
- XE_WARN_ON(bo->ggtt_node.start + bo->ggtt_node.size > (1ull << 32));
- return bo->ggtt_node.start;
+ if (XE_WARN_ON(!bo->ggtt_node))
+ return 0;
+
+ XE_WARN_ON(bo->ggtt_node->base.size > bo->size);
+ XE_WARN_ON(bo->ggtt_node->base.start + bo->ggtt_node->base.size > (1ull << 32));
+ return bo->ggtt_node->base.start;
}
int xe_bo_vmap(struct xe_bo *bo);
diff --git a/drivers/gpu/drm/xe/xe_bo_types.h b/drivers/gpu/drm/xe/xe_bo_types.h
index 10450f1fbbde..2ed558ac2264 100644
--- a/drivers/gpu/drm/xe/xe_bo_types.h
+++ b/drivers/gpu/drm/xe/xe_bo_types.h
@@ -8,12 +8,13 @@
#include <linux/iosys-map.h>
-#include <drm/drm_mm.h>
#include <drm/ttm/ttm_bo.h>
#include <drm/ttm/ttm_device.h>
#include <drm/ttm/ttm_execbuf_util.h>
#include <drm/ttm/ttm_placement.h>
+#include "xe_ggtt_types.h"
+
struct xe_device;
struct xe_vm;
@@ -39,7 +40,7 @@ struct xe_bo {
/** @placement: current placement for this BO */
struct ttm_placement placement;
/** @ggtt_node: GGTT node if this BO is mapped in the GGTT */
- struct drm_mm_node ggtt_node;
+ struct xe_ggtt_node *ggtt_node;
/** @vmap: iosys map of this buffer */
struct iosys_map vmap;
/** @ttm_kmap: TTM bo kmap object for internal use only. Keep off. */
@@ -58,6 +59,8 @@ struct xe_bo {
#endif
/** @freed: List node for delayed put. */
struct llist_node freed;
+ /** @update_index: Update index if PT BO */
+ int update_index;
/** @created: Whether the bo has passed initial creation */
bool created;
diff --git a/drivers/gpu/drm/xe/xe_debugfs.c b/drivers/gpu/drm/xe/xe_debugfs.c
index 1011e5d281fa..668615c6b172 100644
--- a/drivers/gpu/drm/xe/xe_debugfs.c
+++ b/drivers/gpu/drm/xe/xe_debugfs.c
@@ -6,6 +6,7 @@
#include "xe_debugfs.h"
#include <linux/debugfs.h>
+#include <linux/fault-inject.h>
#include <linux/string_helpers.h>
#include <drm/drm_debugfs.h>
@@ -26,10 +27,7 @@
#include "xe_vm.h"
#endif
-#ifdef CONFIG_FAULT_INJECTION
-#include <linux/fault-inject.h> /* XXX: fault-inject.h is broken */
DECLARE_FAULT_ATTR(gt_reset_failure);
-#endif
static struct xe_device *node_to_xe(struct drm_info_node *node)
{
@@ -47,10 +45,9 @@ static int info(struct seq_file *m, void *data)
drm_printf(&p, "graphics_verx100 %d\n", xe->info.graphics_verx100);
drm_printf(&p, "media_verx100 %d\n", xe->info.media_verx100);
- drm_printf(&p, "stepping G:%s M:%s D:%s B:%s\n",
+ drm_printf(&p, "stepping G:%s M:%s B:%s\n",
xe_step_name(xe->info.step.graphics),
xe_step_name(xe->info.step.media),
- xe_step_name(xe->info.step.display),
xe_step_name(xe->info.step.basedie));
drm_printf(&p, "is_dgfx %s\n", str_yes_no(xe->info.is_dgfx));
drm_printf(&p, "platform %d\n", xe->info.platform);
@@ -214,8 +211,5 @@ void xe_debugfs_register(struct xe_device *xe)
for_each_gt(gt, xe, id)
xe_gt_debugfs_register(gt);
-#ifdef CONFIG_FAULT_INJECTION
fault_create_debugfs_attr("fail_gt_reset", root, &gt_reset_failure);
-#endif
-
}
diff --git a/drivers/gpu/drm/xe/xe_debugfs.h b/drivers/gpu/drm/xe/xe_debugfs.h
index 715b8e2e0bd9..17f4c2f1b5e4 100644
--- a/drivers/gpu/drm/xe/xe_debugfs.h
+++ b/drivers/gpu/drm/xe/xe_debugfs.h
@@ -8,6 +8,10 @@
struct xe_device;
+#ifdef CONFIG_DEBUG_FS
void xe_debugfs_register(struct xe_device *xe);
+#else
+static inline void xe_debugfs_register(struct xe_device *xe) { }
+#endif
#endif
diff --git a/drivers/gpu/drm/xe/xe_devcoredump.c b/drivers/gpu/drm/xe/xe_devcoredump.c
index 62c2b10fbf1d..bdb76e834e4c 100644
--- a/drivers/gpu/drm/xe/xe_devcoredump.c
+++ b/drivers/gpu/drm/xe/xe_devcoredump.c
@@ -66,22 +66,9 @@ static struct xe_guc *exec_queue_to_guc(struct xe_exec_queue *q)
return &q->gt->uc.guc;
}
-static void xe_devcoredump_deferred_snap_work(struct work_struct *work)
+static ssize_t __xe_devcoredump_read(char *buffer, size_t count,
+ struct xe_devcoredump *coredump)
{
- struct xe_devcoredump_snapshot *ss = container_of(work, typeof(*ss), work);
-
- /* keep going if fw fails as we still want to save the memory and SW data */
- if (xe_force_wake_get(gt_to_fw(ss->gt), XE_FORCEWAKE_ALL))
- xe_gt_info(ss->gt, "failed to get forcewake for coredump capture\n");
- xe_vm_snapshot_capture_delayed(ss->vm);
- xe_guc_exec_queue_snapshot_capture_delayed(ss->ge);
- xe_force_wake_put(gt_to_fw(ss->gt), XE_FORCEWAKE_ALL);
-}
-
-static ssize_t xe_devcoredump_read(char *buffer, loff_t offset,
- size_t count, void *data, size_t datalen)
-{
- struct xe_devcoredump *coredump = data;
struct xe_device *xe;
struct xe_devcoredump_snapshot *ss;
struct drm_printer p;
@@ -89,18 +76,11 @@ static ssize_t xe_devcoredump_read(char *buffer, loff_t offset,
struct timespec64 ts;
int i;
- if (!coredump)
- return -ENODEV;
-
xe = coredump_to_xe(coredump);
ss = &coredump->snapshot;
- /* Ensure delayed work is captured before continuing */
- flush_work(&ss->work);
-
iter.data = buffer;
- iter.offset = 0;
- iter.start = offset;
+ iter.start = 0;
iter.remain = count;
p = drm_coredump_printer(&iter);
@@ -134,10 +114,83 @@ static ssize_t xe_devcoredump_read(char *buffer, loff_t offset,
return count - iter.remain;
}
+static void xe_devcoredump_snapshot_free(struct xe_devcoredump_snapshot *ss)
+{
+ int i;
+
+ xe_guc_ct_snapshot_free(ss->ct);
+ ss->ct = NULL;
+
+ xe_guc_exec_queue_snapshot_free(ss->ge);
+ ss->ge = NULL;
+
+ xe_sched_job_snapshot_free(ss->job);
+ ss->job = NULL;
+
+ for (i = 0; i < XE_NUM_HW_ENGINES; i++)
+ if (ss->hwe[i]) {
+ xe_hw_engine_snapshot_free(ss->hwe[i]);
+ ss->hwe[i] = NULL;
+ }
+
+ xe_vm_snapshot_free(ss->vm);
+ ss->vm = NULL;
+}
+
+static void xe_devcoredump_deferred_snap_work(struct work_struct *work)
+{
+ struct xe_devcoredump_snapshot *ss = container_of(work, typeof(*ss), work);
+ struct xe_devcoredump *coredump = container_of(ss, typeof(*coredump), snapshot);
+
+ /* keep going if fw fails as we still want to save the memory and SW data */
+ if (xe_force_wake_get(gt_to_fw(ss->gt), XE_FORCEWAKE_ALL))
+ xe_gt_info(ss->gt, "failed to get forcewake for coredump capture\n");
+ xe_vm_snapshot_capture_delayed(ss->vm);
+ xe_guc_exec_queue_snapshot_capture_delayed(ss->ge);
+ xe_force_wake_put(gt_to_fw(ss->gt), XE_FORCEWAKE_ALL);
+
+ /* Calculate devcoredump size */
+ ss->read.size = __xe_devcoredump_read(NULL, INT_MAX, coredump);
+
+ ss->read.buffer = kvmalloc(ss->read.size, GFP_USER);
+ if (!ss->read.buffer)
+ return;
+
+ __xe_devcoredump_read(ss->read.buffer, ss->read.size, coredump);
+ xe_devcoredump_snapshot_free(ss);
+}
+
+static ssize_t xe_devcoredump_read(char *buffer, loff_t offset,
+ size_t count, void *data, size_t datalen)
+{
+ struct xe_devcoredump *coredump = data;
+ struct xe_devcoredump_snapshot *ss;
+ ssize_t byte_copied;
+
+ if (!coredump)
+ return -ENODEV;
+
+ ss = &coredump->snapshot;
+
+ /* Ensure delayed work is captured before continuing */
+ flush_work(&ss->work);
+
+ if (!ss->read.buffer)
+ return -ENODEV;
+
+ if (offset >= ss->read.size)
+ return 0;
+
+ byte_copied = count < ss->read.size - offset ? count :
+ ss->read.size - offset;
+ memcpy(buffer, ss->read.buffer + offset, byte_copied);
+
+ return byte_copied;
+}
+
static void xe_devcoredump_free(void *data)
{
struct xe_devcoredump *coredump = data;
- int i;
/* Our device is gone. Nothing to do... */
if (!data || !coredump_to_xe(coredump))
@@ -145,13 +198,8 @@ static void xe_devcoredump_free(void *data)
cancel_work_sync(&coredump->snapshot.work);
- xe_guc_ct_snapshot_free(coredump->snapshot.ct);
- xe_guc_exec_queue_snapshot_free(coredump->snapshot.ge);
- xe_sched_job_snapshot_free(coredump->snapshot.job);
- for (i = 0; i < XE_NUM_HW_ENGINES; i++)
- if (coredump->snapshot.hwe[i])
- xe_hw_engine_snapshot_free(coredump->snapshot.hwe[i]);
- xe_vm_snapshot_free(coredump->snapshot.vm);
+ xe_devcoredump_snapshot_free(&coredump->snapshot);
+ kvfree(coredump->snapshot.read.buffer);
/* To prevent stale data on next snapshot, clear everything */
memset(&coredump->snapshot, 0, sizeof(coredump->snapshot));
@@ -171,7 +219,6 @@ static void devcoredump_snapshot(struct xe_devcoredump *coredump,
u32 adj_logical_mask = q->logical_mask;
u32 width_mask = (0x1 << q->width) - 1;
const char *process_name = "no process";
- struct task_struct *task = NULL;
int i;
bool cookie;
@@ -179,14 +226,9 @@ static void devcoredump_snapshot(struct xe_devcoredump *coredump,
ss->snapshot_time = ktime_get_real();
ss->boot_time = ktime_get_boottime();
- if (q->vm && q->vm->xef) {
- task = get_pid_task(q->vm->xef->drm->pid, PIDTYPE_PID);
- if (task)
- process_name = task->comm;
- }
+ if (q->vm && q->vm->xef)
+ process_name = q->vm->xef->process_name;
strscpy(ss->process_name, process_name);
- if (task)
- put_task_struct(task);
ss->gt = q->gt;
INIT_WORK(&ss->work, xe_devcoredump_deferred_snap_work);
@@ -266,4 +308,5 @@ int xe_devcoredump_init(struct xe_device *xe)
{
return devm_add_action_or_reset(xe->drm.dev, xe_driver_devcoredump_fini, &xe->drm);
}
+
#endif
diff --git a/drivers/gpu/drm/xe/xe_devcoredump_types.h b/drivers/gpu/drm/xe/xe_devcoredump_types.h
index 923cdf72a816..440d05d77a5a 100644
--- a/drivers/gpu/drm/xe/xe_devcoredump_types.h
+++ b/drivers/gpu/drm/xe/xe_devcoredump_types.h
@@ -46,6 +46,14 @@ struct xe_devcoredump_snapshot {
struct xe_sched_job_snapshot *job;
/** @vm: Snapshot of VM state */
struct xe_vm_snapshot *vm;
+
+ /** @read: devcoredump in human readable format */
+ struct {
+ /** @read.size: size of devcoredump in human readable format */
+ ssize_t size;
+ /** @read.buffer: buffer of devcoredump in human readable format */
+ char *buffer;
+ } read;
};
/**
diff --git a/drivers/gpu/drm/xe/xe_device.c b/drivers/gpu/drm/xe/xe_device.c
index 59ee84ac1937..70d4e4d46c3c 100644
--- a/drivers/gpu/drm/xe/xe_device.c
+++ b/drivers/gpu/drm/xe/xe_device.c
@@ -15,7 +15,7 @@
#include <drm/drm_ioctl.h>
#include <drm/drm_managed.h>
#include <drm/drm_print.h>
-#include <drm/xe_drm.h>
+#include <uapi/drm/xe_drm.h>
#include "display/xe_display.h"
#include "instructions/xe_gpu_commands.h"
@@ -37,6 +37,7 @@
#include "xe_gt_printk.h"
#include "xe_gt_sriov_vf.h"
#include "xe_guc.h"
+#include "xe_hw_engine_group.h"
#include "xe_hwmon.h"
#include "xe_irq.h"
#include "xe_memirq.h"
@@ -64,6 +65,7 @@ static int xe_file_open(struct drm_device *dev, struct drm_file *file)
struct xe_drm_client *client;
struct xe_file *xef;
int ret = -ENOMEM;
+ struct task_struct *task = NULL;
xef = kzalloc(sizeof(*xef), GFP_KERNEL);
if (!xef)
@@ -92,6 +94,13 @@ static int xe_file_open(struct drm_device *dev, struct drm_file *file)
file->driver_priv = xef;
kref_init(&xef->refcount);
+ task = get_pid_task(rcu_access_pointer(file->pid), PIDTYPE_PID);
+ if (task) {
+ xef->process_name = kstrdup(task->comm, GFP_KERNEL);
+ xef->pid = task->pid;
+ put_task_struct(task);
+ }
+
return 0;
}
@@ -110,6 +119,7 @@ static void xe_file_destroy(struct kref *ref)
spin_unlock(&xe->clients.lock);
xe_drm_client_put(xef->client);
+ kfree(xef->process_name);
kfree(xef);
}
@@ -156,6 +166,8 @@ static void xe_file_close(struct drm_device *dev, struct drm_file *file)
* vm->lock taken during xe_exec_queue_kill().
*/
xa_for_each(&xef->exec_queue.xa, idx, q) {
+ if (q->vm && q->hwe->hw_engine_group)
+ xe_hw_engine_group_del_exec_queue(q->hwe->hw_engine_group, q);
xe_exec_queue_kill(q);
xe_exec_queue_put(q);
}
@@ -535,7 +547,7 @@ static void update_device_info(struct xe_device *xe)
{
/* disable features that are not available/applicable to VFs */
if (IS_SRIOV_VF(xe)) {
- xe->info.enable_display = 0;
+ xe->info.probe_display = 0;
xe->info.has_heci_gscfi = 0;
xe->info.skip_guc_pc = 1;
xe->info.skip_pcode = 1;
@@ -789,13 +801,22 @@ void xe_device_shutdown(struct xe_device *xe)
{
}
+/**
+ * xe_device_wmb() - Device specific write memory barrier
+ * @xe: the &xe_device
+ *
+ * While wmb() is sufficient for a barrier if we use system memory, on discrete
+ * platforms with device memory we additionally need to issue a register write.
+ * Since it doesn't matter which register we write to, use the read-only VF_CAP
+ * register that is also marked as accessible by the VFs.
+ */
void xe_device_wmb(struct xe_device *xe)
{
struct xe_gt *gt = xe_root_mmio_gt(xe);
wmb();
if (IS_DGFX(xe))
- xe_mmio_write32(gt, SOFTWARE_FLAGS_SPR33, 0);
+ xe_mmio_write32(gt, VF_CAP_REG, 0);
}
/**
diff --git a/drivers/gpu/drm/xe/xe_device.h b/drivers/gpu/drm/xe/xe_device.h
index 533ccfb2567a..894f04770454 100644
--- a/drivers/gpu/drm/xe/xe_device.h
+++ b/drivers/gpu/drm/xe/xe_device.h
@@ -15,9 +15,23 @@ static inline struct xe_device *to_xe_device(const struct drm_device *dev)
return container_of(dev, struct xe_device, drm);
}
+static inline struct xe_device *kdev_to_xe_device(struct device *kdev)
+{
+ struct drm_device *drm = dev_get_drvdata(kdev);
+
+ return drm ? to_xe_device(drm) : NULL;
+}
+
static inline struct xe_device *pdev_to_xe_device(struct pci_dev *pdev)
{
- return pci_get_drvdata(pdev);
+ struct drm_device *drm = pci_get_drvdata(pdev);
+
+ return drm ? to_xe_device(drm) : NULL;
+}
+
+static inline struct xe_device *xe_device_const_cast(const struct xe_device *xe)
+{
+ return (struct xe_device *)xe;
}
static inline struct xe_device *ttm_to_xe_device(struct ttm_device *ttm)
@@ -129,16 +143,6 @@ static inline struct xe_force_wake *gt_to_fw(struct xe_gt *gt)
void xe_device_assert_mem_access(struct xe_device *xe);
-static inline bool xe_device_in_fault_mode(struct xe_device *xe)
-{
- return xe->usm.num_vm_in_fault_mode != 0;
-}
-
-static inline bool xe_device_in_non_fault_mode(struct xe_device *xe)
-{
- return xe->usm.num_vm_in_non_fault_mode != 0;
-}
-
static inline bool xe_device_has_flat_ccs(struct xe_device *xe)
{
return xe->info.has_flat_ccs;
diff --git a/drivers/gpu/drm/xe/xe_device_types.h b/drivers/gpu/drm/xe/xe_device_types.h
index 9e5fdf96750b..ec7eb7811126 100644
--- a/drivers/gpu/drm/xe/xe_device_types.h
+++ b/drivers/gpu/drm/xe/xe_device_types.h
@@ -23,6 +23,10 @@
#include "xe_sriov_types.h"
#include "xe_step_types.h"
+#if IS_ENABLED(CONFIG_DRM_XE_DEBUG)
+#define TEST_VM_OPS_ERROR
+#endif
+
#if IS_ENABLED(CONFIG_DRM_XE_DISPLAY)
#include "soc/intel_pch.h"
#include "intel_display_core.h"
@@ -40,6 +44,7 @@ struct xe_pat_ops;
#define MEDIA_VERx100(xe) ((xe)->info.media_verx100)
#define IS_DGFX(xe) ((xe)->info.is_dgfx)
#define HAS_HECI_GSCFI(xe) ((xe)->info.has_heci_gscfi)
+#define HAS_HECI_CSCFI(xe) ((xe)->info.has_heci_cscfi)
#define XE_VRAM_FLAGS_NEED64K BIT(0)
@@ -199,7 +204,7 @@ struct xe_tile {
struct xe_memirq memirq;
/** @sriov.vf.ggtt_balloon: GGTT regions excluded from use. */
- struct drm_mm_node ggtt_balloon[2];
+ struct xe_ggtt_node *ggtt_balloon[2];
} vf;
} sriov;
@@ -283,26 +288,29 @@ struct xe_device {
u8 has_sriov:1;
/** @info.has_usm: Device has unified shared memory support */
u8 has_usm:1;
- /** @info.enable_display: display enabled */
- u8 enable_display:1;
+ /**
+ * @info.probe_display: Probe display hardware. If set to
+ * false, the driver will behave as if there is no display
+ * hardware present and will not try to read/write to it in any
+ * way. The display hardware, if it exists, will not be
+ * exposed to userspace and will be left untouched in whatever
+ * state the firmware or bootloader left it in.
+ */
+ u8 probe_display:1;
/** @info.skip_mtcfg: skip Multi-Tile configuration from MTCFG register */
u8 skip_mtcfg:1;
/** @info.skip_pcode: skip access to PCODE uC */
u8 skip_pcode:1;
/** @info.has_heci_gscfi: device has heci gscfi */
u8 has_heci_gscfi:1;
+ /** @info.has_heci_cscfi: device has heci cscfi */
+ u8 has_heci_cscfi:1;
/** @info.skip_guc_pc: Skip GuC based PM feature init */
u8 skip_guc_pc:1;
/** @info.has_atomic_enable_pte_bit: Device has atomic enable PTE bit */
u8 has_atomic_enable_pte_bit:1;
/** @info.has_device_atomics_on_smem: Supports device atomics on SMEM */
u8 has_device_atomics_on_smem:1;
-
-#if IS_ENABLED(CONFIG_DRM_XE_DISPLAY)
- struct {
- u32 rawclk_freq;
- } i915_runtime;
-#endif
} info;
/** @irq: device interrupt state */
@@ -360,10 +368,6 @@ struct xe_device {
struct xarray asid_to_vm;
/** @usm.next_asid: next ASID, used to cyclical alloc asids */
u32 next_asid;
- /** @usm.num_vm_in_fault_mode: number of VM in fault mode */
- u32 num_vm_in_fault_mode;
- /** @usm.num_vm_in_non_fault_mode: number of VM in non-fault mode */
- u32 num_vm_in_non_fault_mode;
/** @usm.lock: protects UM state */
struct mutex lock;
} usm;
@@ -483,6 +487,14 @@ struct xe_device {
int mode;
} wedged;
+#ifdef TEST_VM_OPS_ERROR
+ /**
+ * @vm_inject_error_position: inject errors at different places in VM
+ * bind IOCTL based on this value
+ */
+ u8 vm_inject_error_position;
+#endif
+
/* private: */
#if IS_ENABLED(CONFIG_DRM_XE_DISPLAY)
@@ -573,6 +585,18 @@ struct xe_file {
/** @client: drm client */
struct xe_drm_client *client;
+ /**
+ * @process_name: process name for file handle, used to safely output
+ * during error situations where xe file can outlive process
+ */
+ char *process_name;
+
+ /**
+ * @pid: pid for file handle, used to safely output uring error
+ * situations where xe file can outlive process
+ */
+ pid_t pid;
+
/** @refcount: ref count of this xe file */
struct kref refcount;
};
diff --git a/drivers/gpu/drm/xe/xe_drm_client.c b/drivers/gpu/drm/xe/xe_drm_client.c
index 1af95b9b9171..c4add8b38bbd 100644
--- a/drivers/gpu/drm/xe/xe_drm_client.c
+++ b/drivers/gpu/drm/xe/xe_drm_client.c
@@ -5,7 +5,7 @@
#include "xe_drm_client.h"
#include <drm/drm_print.h>
-#include <drm/xe_drm.h>
+#include <uapi/drm/xe_drm.h>
#include <linux/kernel.h>
#include <linux/slab.h>
#include <linux/types.h>
@@ -168,15 +168,10 @@ static void bo_meminfo(struct xe_bo *bo,
struct drm_memory_stats stats[TTM_NUM_MEM_TYPES])
{
u64 sz = bo->size;
- u32 mem_type;
+ u32 mem_type = bo->ttm.resource->mem_type;
xe_bo_assert_held(bo);
- if (bo->placement.placement)
- mem_type = bo->placement.placement->mem_type;
- else
- mem_type = XE_PL_TT;
-
if (drm_gem_object_is_shared_for_memory_stats(&bo->ttm.base))
stats[mem_type].shared += sz;
else
diff --git a/drivers/gpu/drm/xe/xe_exec.c b/drivers/gpu/drm/xe/xe_exec.c
index f36980aa26e6..7b38485817dc 100644
--- a/drivers/gpu/drm/xe/xe_exec.c
+++ b/drivers/gpu/drm/xe/xe_exec.c
@@ -8,12 +8,13 @@
#include <drm/drm_device.h>
#include <drm/drm_exec.h>
#include <drm/drm_file.h>
-#include <drm/xe_drm.h>
+#include <uapi/drm/xe_drm.h>
#include <linux/delay.h>
#include "xe_bo.h"
#include "xe_device.h"
#include "xe_exec_queue.h"
+#include "xe_hw_engine_group.h"
#include "xe_macros.h"
#include "xe_ring_ops_types.h"
#include "xe_sched_job.h"
@@ -124,6 +125,8 @@ int xe_exec_ioctl(struct drm_device *dev, void *data, struct drm_file *file)
bool write_locked, skip_retry = false;
ktime_t end = 0;
int err = 0;
+ struct xe_hw_engine_group *group;
+ enum xe_hw_engine_group_execution_mode mode, previous_mode;
if (XE_IOCTL_DBG(xe, args->extensions) ||
XE_IOCTL_DBG(xe, args->pad[0] || args->pad[1] || args->pad[2]) ||
@@ -182,6 +185,15 @@ int xe_exec_ioctl(struct drm_device *dev, void *data, struct drm_file *file)
}
}
+ group = q->hwe->hw_engine_group;
+ mode = xe_hw_engine_group_find_exec_mode(q);
+
+ if (mode == EXEC_MODE_DMA_FENCE) {
+ err = xe_hw_engine_group_get_mode(group, mode, &previous_mode);
+ if (err)
+ goto err_syncs;
+ }
+
retry:
if (!xe_vm_in_lr_mode(vm) && xe_vm_userptr_check_repin(vm)) {
err = down_write_killable(&vm->lock);
@@ -199,7 +211,7 @@ retry:
downgrade_write(&vm->lock);
write_locked = false;
if (err)
- goto err_unlock_list;
+ goto err_hw_exec_mode;
}
if (!args->num_batch_buffer) {
@@ -312,6 +324,9 @@ retry:
spin_unlock(&xe->ttm.lru_lock);
}
+ if (mode == EXEC_MODE_LR)
+ xe_hw_engine_group_resume_faulting_lr_jobs(group);
+
err_repin:
if (!xe_vm_in_lr_mode(vm))
up_read(&vm->userptr.notifier_lock);
@@ -324,6 +339,9 @@ err_unlock_list:
up_read(&vm->lock);
if (err == -EAGAIN && !skip_retry)
goto retry;
+err_hw_exec_mode:
+ if (mode == EXEC_MODE_DMA_FENCE)
+ xe_hw_engine_group_put(group);
err_syncs:
while (num_syncs--)
xe_sync_entry_cleanup(&syncs[num_syncs]);
diff --git a/drivers/gpu/drm/xe/xe_exec_queue.c b/drivers/gpu/drm/xe/xe_exec_queue.c
index 9731dcd0b1bd..7f28b7fc68d5 100644
--- a/drivers/gpu/drm/xe/xe_exec_queue.c
+++ b/drivers/gpu/drm/xe/xe_exec_queue.c
@@ -9,11 +9,12 @@
#include <drm/drm_device.h>
#include <drm/drm_file.h>
-#include <drm/xe_drm.h>
+#include <uapi/drm/xe_drm.h>
#include "xe_device.h"
#include "xe_gt.h"
#include "xe_hw_engine_class_sysfs.h"
+#include "xe_hw_engine_group.h"
#include "xe_hw_fence.h"
#include "xe_lrc.h"
#include "xe_macros.h"
@@ -73,6 +74,7 @@ static struct xe_exec_queue *__xe_exec_queue_alloc(struct xe_device *xe,
q->ops = gt->exec_queue_ops;
INIT_LIST_HEAD(&q->lr.link);
INIT_LIST_HEAD(&q->multi_gt_link);
+ INIT_LIST_HEAD(&q->hw_engine_group_link);
q->sched_props.timeslice_us = hwe->eclass->sched_props.timeslice_us;
q->sched_props.preempt_timeout_us =
@@ -166,7 +168,8 @@ err_post_alloc:
struct xe_exec_queue *xe_exec_queue_create_class(struct xe_device *xe, struct xe_gt *gt,
struct xe_vm *vm,
- enum xe_engine_class class, u32 flags)
+ enum xe_engine_class class,
+ u32 flags, u64 extensions)
{
struct xe_hw_engine *hwe, *hwe0 = NULL;
enum xe_hw_engine_id id;
@@ -186,7 +189,56 @@ struct xe_exec_queue *xe_exec_queue_create_class(struct xe_device *xe, struct xe
if (!logical_mask)
return ERR_PTR(-ENODEV);
- return xe_exec_queue_create(xe, vm, logical_mask, 1, hwe0, flags, 0);
+ return xe_exec_queue_create(xe, vm, logical_mask, 1, hwe0, flags, extensions);
+}
+
+/**
+ * xe_exec_queue_create_bind() - Create bind exec queue.
+ * @xe: Xe device.
+ * @tile: tile which bind exec queue belongs to.
+ * @flags: exec queue creation flags
+ * @extensions: exec queue creation extensions
+ *
+ * Normalize bind exec queue creation. Bind exec queue is tied to migration VM
+ * for access to physical memory required for page table programming. On a
+ * faulting devices the reserved copy engine instance must be used to avoid
+ * deadlocking (user binds cannot get stuck behind faults as kernel binds which
+ * resolve faults depend on user binds). On non-faulting devices any copy engine
+ * can be used.
+ *
+ * Returns exec queue on success, ERR_PTR on failure
+ */
+struct xe_exec_queue *xe_exec_queue_create_bind(struct xe_device *xe,
+ struct xe_tile *tile,
+ u32 flags, u64 extensions)
+{
+ struct xe_gt *gt = tile->primary_gt;
+ struct xe_exec_queue *q;
+ struct xe_vm *migrate_vm;
+
+ migrate_vm = xe_migrate_get_vm(tile->migrate);
+ if (xe->info.has_usm) {
+ struct xe_hw_engine *hwe = xe_gt_hw_engine(gt,
+ XE_ENGINE_CLASS_COPY,
+ gt->usm.reserved_bcs_instance,
+ false);
+
+ if (!hwe) {
+ xe_vm_put(migrate_vm);
+ return ERR_PTR(-EINVAL);
+ }
+
+ q = xe_exec_queue_create(xe, migrate_vm,
+ BIT(hwe->logical_instance), 1, hwe,
+ flags, extensions);
+ } else {
+ q = xe_exec_queue_create_class(xe, gt, migrate_vm,
+ XE_ENGINE_CLASS_COPY, flags,
+ extensions);
+ }
+ xe_vm_put(migrate_vm);
+
+ return q;
}
void xe_exec_queue_destroy(struct kref *ref)
@@ -418,63 +470,6 @@ static int exec_queue_user_extensions(struct xe_device *xe, struct xe_exec_queue
return 0;
}
-static const enum xe_engine_class user_to_xe_engine_class[] = {
- [DRM_XE_ENGINE_CLASS_RENDER] = XE_ENGINE_CLASS_RENDER,
- [DRM_XE_ENGINE_CLASS_COPY] = XE_ENGINE_CLASS_COPY,
- [DRM_XE_ENGINE_CLASS_VIDEO_DECODE] = XE_ENGINE_CLASS_VIDEO_DECODE,
- [DRM_XE_ENGINE_CLASS_VIDEO_ENHANCE] = XE_ENGINE_CLASS_VIDEO_ENHANCE,
- [DRM_XE_ENGINE_CLASS_COMPUTE] = XE_ENGINE_CLASS_COMPUTE,
-};
-
-static struct xe_hw_engine *
-find_hw_engine(struct xe_device *xe,
- struct drm_xe_engine_class_instance eci)
-{
- u32 idx;
-
- if (eci.engine_class >= ARRAY_SIZE(user_to_xe_engine_class))
- return NULL;
-
- if (eci.gt_id >= xe->info.gt_count)
- return NULL;
-
- idx = array_index_nospec(eci.engine_class,
- ARRAY_SIZE(user_to_xe_engine_class));
-
- return xe_gt_hw_engine(xe_device_get_gt(xe, eci.gt_id),
- user_to_xe_engine_class[idx],
- eci.engine_instance, true);
-}
-
-static u32 bind_exec_queue_logical_mask(struct xe_device *xe, struct xe_gt *gt,
- struct drm_xe_engine_class_instance *eci,
- u16 width, u16 num_placements)
-{
- struct xe_hw_engine *hwe;
- enum xe_hw_engine_id id;
- u32 logical_mask = 0;
-
- if (XE_IOCTL_DBG(xe, width != 1))
- return 0;
- if (XE_IOCTL_DBG(xe, num_placements != 1))
- return 0;
- if (XE_IOCTL_DBG(xe, eci[0].engine_instance != 0))
- return 0;
-
- eci[0].engine_class = DRM_XE_ENGINE_CLASS_COPY;
-
- for_each_hw_engine(hwe, gt, id) {
- if (xe_hw_engine_is_reserved(hwe))
- continue;
-
- if (hwe->class ==
- user_to_xe_engine_class[DRM_XE_ENGINE_CLASS_COPY])
- logical_mask |= BIT(hwe->logical_instance);
- }
-
- return logical_mask;
-}
-
static u32 calc_validate_logical_mask(struct xe_device *xe, struct xe_gt *gt,
struct drm_xe_engine_class_instance *eci,
u16 width, u16 num_placements)
@@ -497,7 +492,7 @@ static u32 calc_validate_logical_mask(struct xe_device *xe, struct xe_gt *gt,
n = j * width + i;
- hwe = find_hw_engine(xe, eci[n]);
+ hwe = xe_hw_engine_lookup(xe, eci[n]);
if (XE_IOCTL_DBG(xe, !hwe))
return 0;
@@ -536,8 +531,9 @@ int xe_exec_queue_create_ioctl(struct drm_device *dev, void *data,
struct drm_xe_engine_class_instance __user *user_eci =
u64_to_user_ptr(args->instances);
struct xe_hw_engine *hwe;
- struct xe_vm *vm, *migrate_vm;
+ struct xe_vm *vm;
struct xe_gt *gt;
+ struct xe_tile *tile;
struct xe_exec_queue *q = NULL;
u32 logical_mask;
u32 id;
@@ -562,37 +558,20 @@ int xe_exec_queue_create_ioctl(struct drm_device *dev, void *data,
return -EINVAL;
if (eci[0].engine_class == DRM_XE_ENGINE_CLASS_VM_BIND) {
- for_each_gt(gt, xe, id) {
- struct xe_exec_queue *new;
- u32 flags;
-
- if (xe_gt_is_media_type(gt))
- continue;
-
- eci[0].gt_id = gt->info.id;
- logical_mask = bind_exec_queue_logical_mask(xe, gt, eci,
- args->width,
- args->num_placements);
- if (XE_IOCTL_DBG(xe, !logical_mask))
- return -EINVAL;
-
- hwe = find_hw_engine(xe, eci[0]);
- if (XE_IOCTL_DBG(xe, !hwe))
- return -EINVAL;
-
- /* The migration vm doesn't hold rpm ref */
- xe_pm_runtime_get_noresume(xe);
-
- flags = EXEC_QUEUE_FLAG_VM | (id ? EXEC_QUEUE_FLAG_BIND_ENGINE_CHILD : 0);
+ if (XE_IOCTL_DBG(xe, args->width != 1) ||
+ XE_IOCTL_DBG(xe, args->num_placements != 1) ||
+ XE_IOCTL_DBG(xe, eci[0].engine_instance != 0))
+ return -EINVAL;
- migrate_vm = xe_migrate_get_vm(gt_to_tile(gt)->migrate);
- new = xe_exec_queue_create(xe, migrate_vm, logical_mask,
- args->width, hwe, flags,
- args->extensions);
+ for_each_tile(tile, xe, id) {
+ struct xe_exec_queue *new;
+ u32 flags = EXEC_QUEUE_FLAG_VM;
- xe_pm_runtime_put(xe); /* now held by engine */
+ if (id)
+ flags |= EXEC_QUEUE_FLAG_BIND_ENGINE_CHILD;
- xe_vm_put(migrate_vm);
+ new = xe_exec_queue_create_bind(xe, tile, flags,
+ args->extensions);
if (IS_ERR(new)) {
err = PTR_ERR(new);
if (q)
@@ -613,7 +592,7 @@ int xe_exec_queue_create_ioctl(struct drm_device *dev, void *data,
if (XE_IOCTL_DBG(xe, !logical_mask))
return -EINVAL;
- hwe = find_hw_engine(xe, eci[0]);
+ hwe = xe_hw_engine_lookup(xe, eci[0]);
if (XE_IOCTL_DBG(xe, !hwe))
return -EINVAL;
@@ -648,6 +627,12 @@ int xe_exec_queue_create_ioctl(struct drm_device *dev, void *data,
if (XE_IOCTL_DBG(xe, err))
goto put_exec_queue;
}
+
+ if (q->vm && q->hwe->hw_engine_group) {
+ err = xe_hw_engine_group_add_exec_queue(q->hwe->hw_engine_group, q);
+ if (err)
+ goto put_exec_queue;
+ }
}
mutex_lock(&xef->exec_queue.lock);
@@ -798,6 +783,15 @@ void xe_exec_queue_update_run_ticks(struct xe_exec_queue *q)
xef->run_ticks[q->class] += (new_ts - old_ts) * q->width;
}
+/**
+ * xe_exec_queue_kill - permanently stop all execution from an exec queue
+ * @q: The exec queue
+ *
+ * This function permanently stops all activity on an exec queue. If the queue
+ * is actively executing on the HW, it will be kicked off the engine; any
+ * pending jobs are discarded and all future submissions are rejected.
+ * This function is safe to call multiple times.
+ */
void xe_exec_queue_kill(struct xe_exec_queue *q)
{
struct xe_exec_queue *eq = q, *next;
@@ -830,6 +824,9 @@ int xe_exec_queue_destroy_ioctl(struct drm_device *dev, void *data,
if (XE_IOCTL_DBG(xe, !q))
return -ENOENT;
+ if (q->vm && q->hwe->hw_engine_group)
+ xe_hw_engine_group_del_exec_queue(q->hwe->hw_engine_group, q);
+
xe_exec_queue_kill(q);
trace_xe_exec_queue_close(q);
@@ -841,10 +838,12 @@ int xe_exec_queue_destroy_ioctl(struct drm_device *dev, void *data,
static void xe_exec_queue_last_fence_lockdep_assert(struct xe_exec_queue *q,
struct xe_vm *vm)
{
- if (q->flags & EXEC_QUEUE_FLAG_VM)
+ if (q->flags & EXEC_QUEUE_FLAG_VM) {
lockdep_assert_held(&vm->lock);
- else
+ } else {
xe_vm_assert_held(vm);
+ lockdep_assert_held(&q->hwe->hw_engine_group->mode_sem);
+ }
}
/**
@@ -856,10 +855,7 @@ void xe_exec_queue_last_fence_put(struct xe_exec_queue *q, struct xe_vm *vm)
{
xe_exec_queue_last_fence_lockdep_assert(q, vm);
- if (q->last_fence) {
- dma_fence_put(q->last_fence);
- q->last_fence = NULL;
- }
+ xe_exec_queue_last_fence_put_unlocked(q);
}
/**
@@ -902,6 +898,33 @@ struct dma_fence *xe_exec_queue_last_fence_get(struct xe_exec_queue *q,
}
/**
+ * xe_exec_queue_last_fence_get_for_resume() - Get last fence
+ * @q: The exec queue
+ * @vm: The VM the engine does a bind or exec for
+ *
+ * Get last fence, takes a ref. Only safe to be called in the context of
+ * resuming the hw engine group's long-running exec queue, when the group
+ * semaphore is held.
+ *
+ * Returns: last fence if not signaled, dma fence stub if signaled
+ */
+struct dma_fence *xe_exec_queue_last_fence_get_for_resume(struct xe_exec_queue *q,
+ struct xe_vm *vm)
+{
+ struct dma_fence *fence;
+
+ lockdep_assert_held_write(&q->hwe->hw_engine_group->mode_sem);
+
+ if (q->last_fence &&
+ test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &q->last_fence->flags))
+ xe_exec_queue_last_fence_put_unlocked(q);
+
+ fence = q->last_fence ? q->last_fence : dma_fence_get_stub();
+ dma_fence_get(fence);
+ return fence;
+}
+
+/**
* xe_exec_queue_last_fence_set() - Set last fence
* @q: The exec queue
* @vm: The VM the engine does a bind or exec for
@@ -918,3 +941,26 @@ void xe_exec_queue_last_fence_set(struct xe_exec_queue *q, struct xe_vm *vm,
xe_exec_queue_last_fence_put(q, vm);
q->last_fence = dma_fence_get(fence);
}
+
+/**
+ * xe_exec_queue_last_fence_test_dep - Test last fence dependency of queue
+ * @q: The exec queue
+ * @vm: The VM the engine does a bind or exec for
+ *
+ * Returns:
+ * -ETIME if there exists an unsignalled last fence dependency, zero otherwise.
+ */
+int xe_exec_queue_last_fence_test_dep(struct xe_exec_queue *q, struct xe_vm *vm)
+{
+ struct dma_fence *fence;
+ int err = 0;
+
+ fence = xe_exec_queue_last_fence_get(q, vm);
+ if (fence) {
+ err = test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags) ?
+ 0 : -ETIME;
+ dma_fence_put(fence);
+ }
+
+ return err;
+}
diff --git a/drivers/gpu/drm/xe/xe_exec_queue.h b/drivers/gpu/drm/xe/xe_exec_queue.h
index 289a3a51d2a2..90c7f73eab88 100644
--- a/drivers/gpu/drm/xe/xe_exec_queue.h
+++ b/drivers/gpu/drm/xe/xe_exec_queue.h
@@ -20,7 +20,11 @@ struct xe_exec_queue *xe_exec_queue_create(struct xe_device *xe, struct xe_vm *v
u64 extensions);
struct xe_exec_queue *xe_exec_queue_create_class(struct xe_device *xe, struct xe_gt *gt,
struct xe_vm *vm,
- enum xe_engine_class class, u32 flags);
+ enum xe_engine_class class,
+ u32 flags, u64 extensions);
+struct xe_exec_queue *xe_exec_queue_create_bind(struct xe_device *xe,
+ struct xe_tile *tile,
+ u32 flags, u64 extensions);
void xe_exec_queue_fini(struct xe_exec_queue *q);
void xe_exec_queue_destroy(struct kref *ref);
@@ -73,8 +77,12 @@ void xe_exec_queue_last_fence_put(struct xe_exec_queue *e, struct xe_vm *vm);
void xe_exec_queue_last_fence_put_unlocked(struct xe_exec_queue *e);
struct dma_fence *xe_exec_queue_last_fence_get(struct xe_exec_queue *e,
struct xe_vm *vm);
+struct dma_fence *xe_exec_queue_last_fence_get_for_resume(struct xe_exec_queue *e,
+ struct xe_vm *vm);
void xe_exec_queue_last_fence_set(struct xe_exec_queue *e, struct xe_vm *vm,
struct dma_fence *fence);
+int xe_exec_queue_last_fence_test_dep(struct xe_exec_queue *q,
+ struct xe_vm *vm);
void xe_exec_queue_update_run_ticks(struct xe_exec_queue *q);
#endif
diff --git a/drivers/gpu/drm/xe/xe_exec_queue_types.h b/drivers/gpu/drm/xe/xe_exec_queue_types.h
index f6ee0ae80fd6..7deb480e26af 100644
--- a/drivers/gpu/drm/xe/xe_exec_queue_types.h
+++ b/drivers/gpu/drm/xe/xe_exec_queue_types.h
@@ -140,6 +140,8 @@ struct xe_exec_queue {
* Protected by @vm's resv. Unused if @vm == NULL.
*/
u64 tlb_flush_seqno;
+ /** @hw_engine_group_link: link into exec queues in the same hw engine group */
+ struct list_head hw_engine_group_link;
/** @lrc: logical ring context for this exec queue */
struct xe_lrc *lrc[];
};
@@ -169,9 +171,11 @@ struct xe_exec_queue_ops {
int (*suspend)(struct xe_exec_queue *q);
/**
* @suspend_wait: Wait for an exec queue to suspend executing, should be
- * call after suspend.
+ * call after suspend. In dma-fencing path thus must return within a
+ * reasonable amount of time. -ETIME return shall indicate an error
+ * waiting for suspend resulting in associated VM getting killed.
*/
- void (*suspend_wait)(struct xe_exec_queue *q);
+ int (*suspend_wait)(struct xe_exec_queue *q);
/**
* @resume: Resume exec queue execution, exec queue must be in a suspended
* state and dma fence returned from most recent suspend call must be
diff --git a/drivers/gpu/drm/xe/xe_execlist.c b/drivers/gpu/drm/xe/xe_execlist.c
index db906117db6d..6a59165b9569 100644
--- a/drivers/gpu/drm/xe/xe_execlist.c
+++ b/drivers/gpu/drm/xe/xe_execlist.c
@@ -123,8 +123,8 @@ static void __xe_execlist_port_idle(struct xe_execlist_port *port)
if (!port->running_exl)
return;
- xe_lrc_write_ring(port->hwe->kernel_lrc, noop, sizeof(noop));
- __start_lrc(port->hwe, port->hwe->kernel_lrc, 0);
+ xe_lrc_write_ring(port->lrc, noop, sizeof(noop));
+ __start_lrc(port->hwe, port->lrc, 0);
port->running_exl = NULL;
}
@@ -254,14 +254,22 @@ struct xe_execlist_port *xe_execlist_port_create(struct xe_device *xe,
{
struct drm_device *drm = &xe->drm;
struct xe_execlist_port *port;
- int i;
+ int i, err;
port = drmm_kzalloc(drm, sizeof(*port), GFP_KERNEL);
- if (!port)
- return ERR_PTR(-ENOMEM);
+ if (!port) {
+ err = -ENOMEM;
+ goto err;
+ }
port->hwe = hwe;
+ port->lrc = xe_lrc_create(hwe, NULL, SZ_16K);
+ if (IS_ERR(port->lrc)) {
+ err = PTR_ERR(port->lrc);
+ goto err;
+ }
+
spin_lock_init(&port->lock);
for (i = 0; i < ARRAY_SIZE(port->active); i++)
INIT_LIST_HEAD(&port->active[i]);
@@ -277,6 +285,9 @@ struct xe_execlist_port *xe_execlist_port_create(struct xe_device *xe,
add_timer(&port->irq_fail);
return port;
+
+err:
+ return ERR_PTR(err);
}
void xe_execlist_port_destroy(struct xe_execlist_port *port)
@@ -287,6 +298,8 @@ void xe_execlist_port_destroy(struct xe_execlist_port *port)
spin_lock_irq(&gt_to_xe(port->hwe->gt)->irq.lock);
port->hwe->irq_handler = NULL;
spin_unlock_irq(&gt_to_xe(port->hwe->gt)->irq.lock);
+
+ xe_lrc_put(port->lrc);
}
static struct dma_fence *
@@ -422,10 +435,11 @@ static int execlist_exec_queue_suspend(struct xe_exec_queue *q)
return 0;
}
-static void execlist_exec_queue_suspend_wait(struct xe_exec_queue *q)
+static int execlist_exec_queue_suspend_wait(struct xe_exec_queue *q)
{
/* NIY */
+ return 0;
}
static void execlist_exec_queue_resume(struct xe_exec_queue *q)
diff --git a/drivers/gpu/drm/xe/xe_execlist_types.h b/drivers/gpu/drm/xe/xe_execlist_types.h
index f94bbf4c53e4..415140936f11 100644
--- a/drivers/gpu/drm/xe/xe_execlist_types.h
+++ b/drivers/gpu/drm/xe/xe_execlist_types.h
@@ -27,6 +27,8 @@ struct xe_execlist_port {
struct xe_execlist_exec_queue *running_exl;
struct timer_list irq_fail;
+
+ struct xe_lrc *lrc;
};
struct xe_execlist_exec_queue {
diff --git a/drivers/gpu/drm/xe/xe_gen_wa_oob.c b/drivers/gpu/drm/xe/xe_gen_wa_oob.c
index 106ee2b027f0..904cf47925aa 100644
--- a/drivers/gpu/drm/xe/xe_gen_wa_oob.c
+++ b/drivers/gpu/drm/xe/xe_gen_wa_oob.c
@@ -97,19 +97,27 @@ static int parse(FILE *input, FILE *csource, FILE *cheader)
if (name) {
fprintf(cheader, "\tXE_WA_OOB_%s = %u,\n", name, idx);
- fprintf(csource, "{ XE_RTP_NAME(\"%s\"), XE_RTP_RULES(%s) },\n",
+
+ /* Close previous entry before starting a new one */
+ if (idx)
+ fprintf(csource, ") },\n");
+
+ fprintf(csource, "{ XE_RTP_NAME(\"%s\"),\n XE_RTP_RULES(%s",
name, rules);
+ idx++;
} else {
- fprintf(csource, "{ XE_RTP_NAME(NULL), XE_RTP_RULES(%s) },\n",
- rules);
+ fprintf(csource, ", OR,\n\t%s", rules);
}
- idx++;
lineno++;
if (!is_continuation)
prev_name = name;
}
+ /* Close last entry */
+ if (idx)
+ fprintf(csource, ") },\n");
+
fprintf(cheader, "\t_XE_WA_OOB_COUNT = %u\n", idx);
return 0;
diff --git a/drivers/gpu/drm/xe/xe_ggtt.c b/drivers/gpu/drm/xe/xe_ggtt.c
index 0cdbc1296e88..2895f154654c 100644
--- a/drivers/gpu/drm/xe/xe_ggtt.c
+++ b/drivers/gpu/drm/xe/xe_ggtt.c
@@ -30,6 +30,39 @@
#include "xe_wa.h"
#include "xe_wopcm.h"
+/**
+ * DOC: Global Graphics Translation Table (GGTT)
+ *
+ * Xe GGTT implements the support for a Global Virtual Address space that is used
+ * for resources that are accessible to privileged (i.e. kernel-mode) processes,
+ * and not tied to a specific user-level process. For example, the Graphics
+ * micro-Controller (GuC) and Display Engine (if present) utilize this Global
+ * address space.
+ *
+ * The Global GTT (GGTT) translates from the Global virtual address to a physical
+ * address that can be accessed by HW. The GGTT is a flat, single-level table.
+ *
+ * Xe implements a simplified version of the GGTT specifically managing only a
+ * certain range of it that goes from the Write Once Protected Content Memory (WOPCM)
+ * Layout to a predefined GUC_GGTT_TOP. This approach avoids complications related to
+ * the GuC (Graphics Microcontroller) hardware limitations. The GuC address space
+ * is limited on both ends of the GGTT, because the GuC shim HW redirects
+ * accesses to those addresses to other HW areas instead of going through the
+ * GGTT. On the bottom end, the GuC can't access offsets below the WOPCM size,
+ * while on the top side the limit is fixed at GUC_GGTT_TOP. To keep things
+ * simple, instead of checking each object to see if they are accessed by GuC or
+ * not, we just exclude those areas from the allocator. Additionally, to simplify
+ * the driver load, we use the maximum WOPCM size in this logic instead of the
+ * programmed one, so we don't need to wait until the actual size to be
+ * programmed is determined (which requires FW fetch) before initializing the
+ * GGTT. These simplifications might waste space in the GGTT (about 20-25 MBs
+ * depending on the platform) but we can live with this. Another benefit of this
+ * is the GuC bootrom can't access anything below the WOPCM max size so anything
+ * the bootrom needs to access (e.g. a RSA key) needs to be placed in the GGTT
+ * above the WOPCM max size. Starting the GGTT allocations above the WOPCM max
+ * give us the correct placement for free.
+ */
+
static u64 xelp_ggtt_pte_encode_bo(struct xe_bo *bo, u64 bo_offset,
u16 pat_index)
{
@@ -128,11 +161,12 @@ static void ggtt_fini_early(struct drm_device *drm, void *arg)
{
struct xe_ggtt *ggtt = arg;
+ destroy_workqueue(ggtt->wq);
mutex_destroy(&ggtt->lock);
drm_mm_takedown(&ggtt->mm);
}
-static void ggtt_fini(struct drm_device *drm, void *arg)
+static void ggtt_fini(void *arg)
{
struct xe_ggtt *ggtt = arg;
@@ -164,12 +198,16 @@ static const struct xe_ggtt_pt_ops xelpg_pt_wa_ops = {
.ggtt_set_pte = xe_ggtt_set_pte_and_flush,
};
-/*
- * Early GGTT initialization, which allows to create new mappings usable by the
- * GuC.
- * Mappings are not usable by the HW engines, as it doesn't have scratch /
+/**
+ * xe_ggtt_init_early - Early GGTT initialization
+ * @ggtt: the &xe_ggtt to be initialized
+ *
+ * It allows to create new mappings usable by the GuC.
+ * Mappings are not usable by the HW engines, as it doesn't have scratch nor
* initial clear done to it yet. That will happen in the regular, non-early
- * GGTT init.
+ * GGTT initialization.
+ *
+ * Return: 0 on success or a negative error code on failure.
*/
int xe_ggtt_init_early(struct xe_ggtt *ggtt)
{
@@ -194,29 +232,6 @@ int xe_ggtt_init_early(struct xe_ggtt *ggtt)
if (IS_DGFX(xe) && xe->info.vram_flags & XE_VRAM_FLAGS_NEED64K)
ggtt->flags |= XE_GGTT_FLAGS_64K;
- /*
- * 8B per entry, each points to a 4KB page.
- *
- * The GuC address space is limited on both ends of the GGTT, because
- * the GuC shim HW redirects accesses to those addresses to other HW
- * areas instead of going through the GGTT. On the bottom end, the GuC
- * can't access offsets below the WOPCM size, while on the top side the
- * limit is fixed at GUC_GGTT_TOP. To keep things simple, instead of
- * checking each object to see if they are accessed by GuC or not, we
- * just exclude those areas from the allocator. Additionally, to
- * simplify the driver load, we use the maximum WOPCM size in this logic
- * instead of the programmed one, so we don't need to wait until the
- * actual size to be programmed is determined (which requires FW fetch)
- * before initializing the GGTT. These simplifications might waste space
- * in the GGTT (about 20-25 MBs depending on the platform) but we can
- * live with this.
- *
- * Another benifit of this is the GuC bootrom can't access anything
- * below the WOPCM max size so anything the bootom needs to access (e.g.
- * a RSA key) needs to be placed in the GGTT above the WOPCM max size.
- * Starting the GGTT allocations above the WOPCM max give us the correct
- * placement for free.
- */
if (ggtt->size > GUC_GGTT_TOP)
ggtt->size = GUC_GGTT_TOP;
@@ -228,6 +243,8 @@ int xe_ggtt_init_early(struct xe_ggtt *ggtt)
else
ggtt->pt_ops = &xelp_pt_ops;
+ ggtt->wq = alloc_workqueue("xe-ggtt-wq", 0, 0);
+
drm_mm_init(&ggtt->mm, xe_wopcm_size(xe),
ggtt->size - xe_wopcm_size(xe));
mutex_init(&ggtt->lock);
@@ -262,6 +279,77 @@ static void xe_ggtt_initial_clear(struct xe_ggtt *ggtt)
mutex_unlock(&ggtt->lock);
}
+static void ggtt_node_remove(struct xe_ggtt_node *node)
+{
+ struct xe_ggtt *ggtt = node->ggtt;
+ struct xe_device *xe = tile_to_xe(ggtt->tile);
+ bool bound;
+ int idx;
+
+ bound = drm_dev_enter(&xe->drm, &idx);
+
+ mutex_lock(&ggtt->lock);
+ if (bound)
+ xe_ggtt_clear(ggtt, node->base.start, node->base.size);
+ drm_mm_remove_node(&node->base);
+ node->base.size = 0;
+ mutex_unlock(&ggtt->lock);
+
+ if (!bound)
+ goto free_node;
+
+ if (node->invalidate_on_remove)
+ xe_ggtt_invalidate(ggtt);
+
+ drm_dev_exit(idx);
+
+free_node:
+ xe_ggtt_node_fini(node);
+}
+
+static void ggtt_node_remove_work_func(struct work_struct *work)
+{
+ struct xe_ggtt_node *node = container_of(work, typeof(*node),
+ delayed_removal_work);
+ struct xe_device *xe = tile_to_xe(node->ggtt->tile);
+
+ xe_pm_runtime_get(xe);
+ ggtt_node_remove(node);
+ xe_pm_runtime_put(xe);
+}
+
+/**
+ * xe_ggtt_node_remove - Remove a &xe_ggtt_node from the GGTT
+ * @node: the &xe_ggtt_node to be removed
+ * @invalidate: if node needs invalidation upon removal
+ */
+void xe_ggtt_node_remove(struct xe_ggtt_node *node, bool invalidate)
+{
+ struct xe_ggtt *ggtt;
+ struct xe_device *xe;
+
+ if (!node || !node->ggtt)
+ return;
+
+ ggtt = node->ggtt;
+ xe = tile_to_xe(ggtt->tile);
+
+ node->invalidate_on_remove = invalidate;
+
+ if (xe_pm_runtime_get_if_active(xe)) {
+ ggtt_node_remove(node);
+ xe_pm_runtime_put(xe);
+ } else {
+ queue_work(ggtt->wq, &node->delayed_removal_work);
+ }
+}
+
+/**
+ * xe_ggtt_init - Regular non-early GGTT initialization
+ * @ggtt: the &xe_ggtt to be initialized
+ *
+ * Return: 0 on success or a negative error code on failure.
+ */
int xe_ggtt_init(struct xe_ggtt *ggtt)
{
struct xe_device *xe = tile_to_xe(ggtt->tile);
@@ -289,7 +377,7 @@ int xe_ggtt_init(struct xe_ggtt *ggtt)
xe_ggtt_initial_clear(ggtt);
- return drmm_add_action_or_reset(&xe->drm, ggtt_fini, ggtt);
+ return devm_add_action_or_reset(xe->drm.dev, ggtt_fini, ggtt);
err:
ggtt->scratch = NULL;
return err;
@@ -314,26 +402,6 @@ static void xe_ggtt_invalidate(struct xe_ggtt *ggtt)
ggtt_invalidate_gt_tlb(ggtt->tile->media_gt);
}
-void xe_ggtt_printk(struct xe_ggtt *ggtt, const char *prefix)
-{
- u16 pat_index = tile_to_xe(ggtt->tile)->pat.idx[XE_CACHE_WB];
- u64 addr, scratch_pte;
-
- scratch_pte = ggtt->pt_ops->pte_encode_bo(ggtt->scratch, 0, pat_index);
-
- printk("%sGlobal GTT:", prefix);
- for (addr = 0; addr < ggtt->size; addr += XE_PAGE_SIZE) {
- unsigned int i = addr / XE_PAGE_SIZE;
-
- xe_tile_assert(ggtt->tile, addr <= U32_MAX);
- if (ggtt->gsm[i] == scratch_pte)
- continue;
-
- printk("%s ggtt[0x%08x] = 0x%016llx",
- prefix, (u32)addr, ggtt->gsm[i]);
- }
-}
-
static void xe_ggtt_dump_node(struct xe_ggtt *ggtt,
const struct drm_mm_node *node, const char *description)
{
@@ -347,88 +415,180 @@ static void xe_ggtt_dump_node(struct xe_ggtt *ggtt,
}
/**
- * xe_ggtt_balloon - prevent allocation of specified GGTT addresses
- * @ggtt: the &xe_ggtt where we want to make reservation
+ * xe_ggtt_node_insert_balloon - prevent allocation of specified GGTT addresses
+ * @node: the &xe_ggtt_node to hold reserved GGTT node
* @start: the starting GGTT address of the reserved region
* @end: then end GGTT address of the reserved region
- * @node: the &drm_mm_node to hold reserved GGTT node
*
- * Use xe_ggtt_deballoon() to release a reserved GGTT node.
+ * Use xe_ggtt_node_remove_balloon() to release a reserved GGTT node.
*
* Return: 0 on success or a negative error code on failure.
*/
-int xe_ggtt_balloon(struct xe_ggtt *ggtt, u64 start, u64 end, struct drm_mm_node *node)
+int xe_ggtt_node_insert_balloon(struct xe_ggtt_node *node, u64 start, u64 end)
{
+ struct xe_ggtt *ggtt = node->ggtt;
int err;
xe_tile_assert(ggtt->tile, start < end);
xe_tile_assert(ggtt->tile, IS_ALIGNED(start, XE_PAGE_SIZE));
xe_tile_assert(ggtt->tile, IS_ALIGNED(end, XE_PAGE_SIZE));
- xe_tile_assert(ggtt->tile, !drm_mm_node_allocated(node));
+ xe_tile_assert(ggtt->tile, !drm_mm_node_allocated(&node->base));
- node->color = 0;
- node->start = start;
- node->size = end - start;
+ node->base.color = 0;
+ node->base.start = start;
+ node->base.size = end - start;
mutex_lock(&ggtt->lock);
- err = drm_mm_reserve_node(&ggtt->mm, node);
+ err = drm_mm_reserve_node(&ggtt->mm, &node->base);
mutex_unlock(&ggtt->lock);
if (xe_gt_WARN(ggtt->tile->primary_gt, err,
"Failed to balloon GGTT %#llx-%#llx (%pe)\n",
- node->start, node->start + node->size, ERR_PTR(err)))
+ node->base.start, node->base.start + node->base.size, ERR_PTR(err)))
return err;
- xe_ggtt_dump_node(ggtt, node, "balloon");
+ xe_ggtt_dump_node(ggtt, &node->base, "balloon");
return 0;
}
/**
- * xe_ggtt_deballoon - release a reserved GGTT region
- * @ggtt: the &xe_ggtt where reserved node belongs
- * @node: the &drm_mm_node with reserved GGTT region
+ * xe_ggtt_node_remove_balloon - release a reserved GGTT region
+ * @node: the &xe_ggtt_node with reserved GGTT region
*
- * See xe_ggtt_balloon() for details.
+ * See xe_ggtt_node_insert_balloon() for details.
*/
-void xe_ggtt_deballoon(struct xe_ggtt *ggtt, struct drm_mm_node *node)
+void xe_ggtt_node_remove_balloon(struct xe_ggtt_node *node)
{
- if (!drm_mm_node_allocated(node))
+ if (!node || !node->ggtt)
return;
- xe_ggtt_dump_node(ggtt, node, "deballoon");
+ if (!drm_mm_node_allocated(&node->base))
+ goto free_node;
- mutex_lock(&ggtt->lock);
- drm_mm_remove_node(node);
- mutex_unlock(&ggtt->lock);
+ xe_ggtt_dump_node(node->ggtt, &node->base, "remove-balloon");
+
+ mutex_lock(&node->ggtt->lock);
+ drm_mm_remove_node(&node->base);
+ mutex_unlock(&node->ggtt->lock);
+
+free_node:
+ xe_ggtt_node_fini(node);
}
-int xe_ggtt_insert_special_node_locked(struct xe_ggtt *ggtt, struct drm_mm_node *node,
- u32 size, u32 align, u32 mm_flags)
+/**
+ * xe_ggtt_node_insert_locked - Locked version to insert a &xe_ggtt_node into the GGTT
+ * @node: the &xe_ggtt_node to be inserted
+ * @size: size of the node
+ * @align: alignment constrain of the node
+ * @mm_flags: flags to control the node behavior
+ *
+ * It cannot be called without first having called xe_ggtt_init() once.
+ * To be used in cases where ggtt->lock is already taken.
+ *
+ * Return: 0 on success or a negative error code on failure.
+ */
+int xe_ggtt_node_insert_locked(struct xe_ggtt_node *node,
+ u32 size, u32 align, u32 mm_flags)
{
- return drm_mm_insert_node_generic(&ggtt->mm, node, size, align, 0,
+ return drm_mm_insert_node_generic(&node->ggtt->mm, &node->base, size, align, 0,
mm_flags);
}
-int xe_ggtt_insert_special_node(struct xe_ggtt *ggtt, struct drm_mm_node *node,
- u32 size, u32 align)
+/**
+ * xe_ggtt_node_insert - Insert a &xe_ggtt_node into the GGTT
+ * @node: the &xe_ggtt_node to be inserted
+ * @size: size of the node
+ * @align: alignment constrain of the node
+ *
+ * It cannot be called without first having called xe_ggtt_init() once.
+ *
+ * Return: 0 on success or a negative error code on failure.
+ */
+int xe_ggtt_node_insert(struct xe_ggtt_node *node, u32 size, u32 align)
{
int ret;
- mutex_lock(&ggtt->lock);
- ret = xe_ggtt_insert_special_node_locked(ggtt, node, size,
- align, DRM_MM_INSERT_HIGH);
- mutex_unlock(&ggtt->lock);
+ if (!node || !node->ggtt)
+ return -ENOENT;
+
+ mutex_lock(&node->ggtt->lock);
+ ret = xe_ggtt_node_insert_locked(node, size, align,
+ DRM_MM_INSERT_HIGH);
+ mutex_unlock(&node->ggtt->lock);
return ret;
}
+/**
+ * xe_ggtt_node_init - Initialize %xe_ggtt_node struct
+ * @ggtt: the &xe_ggtt where the new node will later be inserted/reserved.
+ *
+ * This function will allocated the struct %xe_ggtt_node and return it's pointer.
+ * This struct will then be freed after the node removal upon xe_ggtt_node_remove()
+ * or xe_ggtt_node_remove_balloon().
+ * Having %xe_ggtt_node struct allocated doesn't mean that the node is already allocated
+ * in GGTT. Only the xe_ggtt_node_insert(), xe_ggtt_node_insert_locked(),
+ * xe_ggtt_node_insert_balloon() will ensure the node is inserted or reserved in GGTT.
+ *
+ * Return: A pointer to %xe_ggtt_node struct on success. An ERR_PTR otherwise.
+ **/
+struct xe_ggtt_node *xe_ggtt_node_init(struct xe_ggtt *ggtt)
+{
+ struct xe_ggtt_node *node = kzalloc(sizeof(*node), GFP_NOFS);
+
+ if (!node)
+ return ERR_PTR(-ENOMEM);
+
+ INIT_WORK(&node->delayed_removal_work, ggtt_node_remove_work_func);
+ node->ggtt = ggtt;
+
+ return node;
+}
+
+/**
+ * xe_ggtt_node_fini - Forcebly finalize %xe_ggtt_node struct
+ * @node: the &xe_ggtt_node to be freed
+ *
+ * If anything went wrong with either xe_ggtt_node_insert(), xe_ggtt_node_insert_locked(),
+ * or xe_ggtt_node_insert_balloon(); and this @node is not going to be reused, then,
+ * this function needs to be called to free the %xe_ggtt_node struct
+ **/
+void xe_ggtt_node_fini(struct xe_ggtt_node *node)
+{
+ kfree(node);
+}
+
+/**
+ * xe_ggtt_node_allocated - Check if node is allocated in GGTT
+ * @node: the &xe_ggtt_node to be inspected
+ *
+ * Return: True if allocated, False otherwise.
+ */
+bool xe_ggtt_node_allocated(const struct xe_ggtt_node *node)
+{
+ if (!node || !node->ggtt)
+ return false;
+
+ return drm_mm_node_allocated(&node->base);
+}
+
+/**
+ * xe_ggtt_map_bo - Map the BO into GGTT
+ * @ggtt: the &xe_ggtt where node will be mapped
+ * @bo: the &xe_bo to be mapped
+ */
void xe_ggtt_map_bo(struct xe_ggtt *ggtt, struct xe_bo *bo)
{
u16 cache_mode = bo->flags & XE_BO_FLAG_NEEDS_UC ? XE_CACHE_NONE : XE_CACHE_WB;
u16 pat_index = tile_to_xe(ggtt->tile)->pat.idx[cache_mode];
- u64 start = bo->ggtt_node.start;
+ u64 start;
u64 offset, pte;
+ if (XE_WARN_ON(!bo->ggtt_node))
+ return;
+
+ start = bo->ggtt_node->base.start;
+
for (offset = 0; offset < bo->size; offset += XE_PAGE_SIZE) {
pte = ggtt->pt_ops->pte_encode_bo(bo, offset, pat_index);
ggtt->pt_ops->ggtt_set_pte(ggtt, start + offset, pte);
@@ -444,9 +604,9 @@ static int __xe_ggtt_insert_bo_at(struct xe_ggtt *ggtt, struct xe_bo *bo,
if (xe_bo_is_vram(bo) && ggtt->flags & XE_GGTT_FLAGS_64K)
alignment = SZ_64K;
- if (XE_WARN_ON(bo->ggtt_node.size)) {
+ if (XE_WARN_ON(bo->ggtt_node)) {
/* Someone's already inserted this BO in the GGTT */
- xe_tile_assert(ggtt->tile, bo->ggtt_node.size == bo->size);
+ xe_tile_assert(ggtt->tile, bo->ggtt_node->base.size == bo->size);
return 0;
}
@@ -455,69 +615,111 @@ static int __xe_ggtt_insert_bo_at(struct xe_ggtt *ggtt, struct xe_bo *bo,
return err;
xe_pm_runtime_get_noresume(tile_to_xe(ggtt->tile));
+
+ bo->ggtt_node = xe_ggtt_node_init(ggtt);
+ if (IS_ERR(bo->ggtt_node)) {
+ err = PTR_ERR(bo->ggtt_node);
+ bo->ggtt_node = NULL;
+ goto out;
+ }
+
mutex_lock(&ggtt->lock);
- err = drm_mm_insert_node_in_range(&ggtt->mm, &bo->ggtt_node, bo->size,
+ err = drm_mm_insert_node_in_range(&ggtt->mm, &bo->ggtt_node->base, bo->size,
alignment, 0, start, end, 0);
- if (!err)
+ if (err) {
+ xe_ggtt_node_fini(bo->ggtt_node);
+ bo->ggtt_node = NULL;
+ } else {
xe_ggtt_map_bo(ggtt, bo);
+ }
mutex_unlock(&ggtt->lock);
if (!err && bo->flags & XE_BO_FLAG_GGTT_INVALIDATE)
xe_ggtt_invalidate(ggtt);
+
+out:
xe_pm_runtime_put(tile_to_xe(ggtt->tile));
return err;
}
+/**
+ * xe_ggtt_insert_bo_at - Insert BO at a specific GGTT space
+ * @ggtt: the &xe_ggtt where bo will be inserted
+ * @bo: the &xe_bo to be inserted
+ * @start: address where it will be inserted
+ * @end: end of the range where it will be inserted
+ *
+ * Return: 0 on success or a negative error code on failure.
+ */
int xe_ggtt_insert_bo_at(struct xe_ggtt *ggtt, struct xe_bo *bo,
u64 start, u64 end)
{
return __xe_ggtt_insert_bo_at(ggtt, bo, start, end);
}
+/**
+ * xe_ggtt_insert_bo - Insert BO into GGTT
+ * @ggtt: the &xe_ggtt where bo will be inserted
+ * @bo: the &xe_bo to be inserted
+ *
+ * Return: 0 on success or a negative error code on failure.
+ */
int xe_ggtt_insert_bo(struct xe_ggtt *ggtt, struct xe_bo *bo)
{
return __xe_ggtt_insert_bo_at(ggtt, bo, 0, U64_MAX);
}
-void xe_ggtt_remove_node(struct xe_ggtt *ggtt, struct drm_mm_node *node,
- bool invalidate)
+/**
+ * xe_ggtt_remove_bo - Remove a BO from the GGTT
+ * @ggtt: the &xe_ggtt where node will be removed
+ * @bo: the &xe_bo to be removed
+ */
+void xe_ggtt_remove_bo(struct xe_ggtt *ggtt, struct xe_bo *bo)
{
- struct xe_device *xe = tile_to_xe(ggtt->tile);
- bool bound;
- int idx;
-
- bound = drm_dev_enter(&xe->drm, &idx);
- if (bound)
- xe_pm_runtime_get_noresume(xe);
-
- mutex_lock(&ggtt->lock);
- if (bound)
- xe_ggtt_clear(ggtt, node->start, node->size);
- drm_mm_remove_node(node);
- node->size = 0;
- mutex_unlock(&ggtt->lock);
-
- if (!bound)
+ if (XE_WARN_ON(!bo->ggtt_node))
return;
- if (invalidate)
- xe_ggtt_invalidate(ggtt);
+ /* This BO is not currently in the GGTT */
+ xe_tile_assert(ggtt->tile, bo->ggtt_node->base.size == bo->size);
- xe_pm_runtime_put(xe);
- drm_dev_exit(idx);
+ xe_ggtt_node_remove(bo->ggtt_node,
+ bo->flags & XE_BO_FLAG_GGTT_INVALIDATE);
}
-void xe_ggtt_remove_bo(struct xe_ggtt *ggtt, struct xe_bo *bo)
+/**
+ * xe_ggtt_largest_hole - Largest GGTT hole
+ * @ggtt: the &xe_ggtt that will be inspected
+ * @alignment: minimum alignment
+ * @spare: If not NULL: in: desired memory size to be spared / out: Adjusted possible spare
+ *
+ * Return: size of the largest continuous GGTT region
+ */
+u64 xe_ggtt_largest_hole(struct xe_ggtt *ggtt, u64 alignment, u64 *spare)
{
- if (XE_WARN_ON(!bo->ggtt_node.size))
- return;
+ const struct drm_mm *mm = &ggtt->mm;
+ const struct drm_mm_node *entry;
+ u64 hole_min_start = xe_wopcm_size(tile_to_xe(ggtt->tile));
+ u64 hole_start, hole_end, hole_size;
+ u64 max_hole = 0;
- /* This BO is not currently in the GGTT */
- xe_tile_assert(ggtt->tile, bo->ggtt_node.size == bo->size);
+ mutex_lock(&ggtt->lock);
- xe_ggtt_remove_node(ggtt, &bo->ggtt_node,
- bo->flags & XE_BO_FLAG_GGTT_INVALIDATE);
+ drm_mm_for_each_hole(entry, mm, hole_start, hole_end) {
+ hole_start = max(hole_start, hole_min_start);
+ hole_start = ALIGN(hole_start, alignment);
+ hole_end = ALIGN_DOWN(hole_end, alignment);
+ if (hole_start >= hole_end)
+ continue;
+ hole_size = hole_end - hole_start;
+ if (spare)
+ *spare -= min3(*spare, hole_size, max_hole);
+ max_hole = max(max_hole, hole_size);
+ }
+
+ mutex_unlock(&ggtt->lock);
+
+ return max_hole;
}
#ifdef CONFIG_PCI_IOV
@@ -548,22 +750,28 @@ static void xe_ggtt_assign_locked(struct xe_ggtt *ggtt, const struct drm_mm_node
/**
* xe_ggtt_assign - assign a GGTT region to the VF
- * @ggtt: the &xe_ggtt where the node belongs
- * @node: the &drm_mm_node to update
+ * @node: the &xe_ggtt_node to update
* @vfid: the VF identifier
*
* This function is used by the PF driver to assign a GGTT region to the VF.
* In addition to PTE's VFID bits 11:2 also PRESENT bit 0 is set as on some
* platforms VFs can't modify that either.
*/
-void xe_ggtt_assign(struct xe_ggtt *ggtt, const struct drm_mm_node *node, u16 vfid)
+void xe_ggtt_assign(const struct xe_ggtt_node *node, u16 vfid)
{
- mutex_lock(&ggtt->lock);
- xe_ggtt_assign_locked(ggtt, node, vfid);
- mutex_unlock(&ggtt->lock);
+ mutex_lock(&node->ggtt->lock);
+ xe_ggtt_assign_locked(node->ggtt, &node->base, vfid);
+ mutex_unlock(&node->ggtt->lock);
}
#endif
+/**
+ * xe_ggtt_dump - Dump GGTT for debug
+ * @ggtt: the &xe_ggtt to be dumped
+ * @p: the &drm_mm_printer helper handle to be used to dump the information
+ *
+ * Return: 0 on success or a negative error code on failure.
+ */
int xe_ggtt_dump(struct xe_ggtt *ggtt, struct drm_printer *p)
{
int err;
@@ -576,3 +784,43 @@ int xe_ggtt_dump(struct xe_ggtt *ggtt, struct drm_printer *p)
mutex_unlock(&ggtt->lock);
return err;
}
+
+/**
+ * xe_ggtt_print_holes - Print holes
+ * @ggtt: the &xe_ggtt to be inspected
+ * @alignment: min alignment
+ * @p: the &drm_printer
+ *
+ * Print GGTT ranges that are available and return total size available.
+ *
+ * Return: Total available size.
+ */
+u64 xe_ggtt_print_holes(struct xe_ggtt *ggtt, u64 alignment, struct drm_printer *p)
+{
+ const struct drm_mm *mm = &ggtt->mm;
+ const struct drm_mm_node *entry;
+ u64 hole_min_start = xe_wopcm_size(tile_to_xe(ggtt->tile));
+ u64 hole_start, hole_end, hole_size;
+ u64 total = 0;
+ char buf[10];
+
+ mutex_lock(&ggtt->lock);
+
+ drm_mm_for_each_hole(entry, mm, hole_start, hole_end) {
+ hole_start = max(hole_start, hole_min_start);
+ hole_start = ALIGN(hole_start, alignment);
+ hole_end = ALIGN_DOWN(hole_end, alignment);
+ if (hole_start >= hole_end)
+ continue;
+ hole_size = hole_end - hole_start;
+ total += hole_size;
+
+ string_get_size(hole_size, 1, STRING_UNITS_2, buf, sizeof(buf));
+ drm_printf(p, "range:\t%#llx-%#llx\t(%s)\n",
+ hole_start, hole_end - 1, buf);
+ }
+
+ mutex_unlock(&ggtt->lock);
+
+ return total;
+}
diff --git a/drivers/gpu/drm/xe/xe_ggtt.h b/drivers/gpu/drm/xe/xe_ggtt.h
index 6a96fd54bf60..27e7d67de004 100644
--- a/drivers/gpu/drm/xe/xe_ggtt.h
+++ b/drivers/gpu/drm/xe/xe_ggtt.h
@@ -12,28 +12,30 @@ struct drm_printer;
int xe_ggtt_init_early(struct xe_ggtt *ggtt);
int xe_ggtt_init(struct xe_ggtt *ggtt);
-void xe_ggtt_printk(struct xe_ggtt *ggtt, const char *prefix);
-
-int xe_ggtt_balloon(struct xe_ggtt *ggtt, u64 start, u64 size, struct drm_mm_node *node);
-void xe_ggtt_deballoon(struct xe_ggtt *ggtt, struct drm_mm_node *node);
-
-int xe_ggtt_insert_special_node(struct xe_ggtt *ggtt, struct drm_mm_node *node,
- u32 size, u32 align);
-int xe_ggtt_insert_special_node_locked(struct xe_ggtt *ggtt,
- struct drm_mm_node *node,
- u32 size, u32 align, u32 mm_flags);
-void xe_ggtt_remove_node(struct xe_ggtt *ggtt, struct drm_mm_node *node,
- bool invalidate);
+
+struct xe_ggtt_node *xe_ggtt_node_init(struct xe_ggtt *ggtt);
+void xe_ggtt_node_fini(struct xe_ggtt_node *node);
+int xe_ggtt_node_insert_balloon(struct xe_ggtt_node *node,
+ u64 start, u64 size);
+void xe_ggtt_node_remove_balloon(struct xe_ggtt_node *node);
+
+int xe_ggtt_node_insert(struct xe_ggtt_node *node, u32 size, u32 align);
+int xe_ggtt_node_insert_locked(struct xe_ggtt_node *node,
+ u32 size, u32 align, u32 mm_flags);
+void xe_ggtt_node_remove(struct xe_ggtt_node *node, bool invalidate);
+bool xe_ggtt_node_allocated(const struct xe_ggtt_node *node);
void xe_ggtt_map_bo(struct xe_ggtt *ggtt, struct xe_bo *bo);
int xe_ggtt_insert_bo(struct xe_ggtt *ggtt, struct xe_bo *bo);
int xe_ggtt_insert_bo_at(struct xe_ggtt *ggtt, struct xe_bo *bo,
u64 start, u64 end);
void xe_ggtt_remove_bo(struct xe_ggtt *ggtt, struct xe_bo *bo);
+u64 xe_ggtt_largest_hole(struct xe_ggtt *ggtt, u64 alignment, u64 *spare);
int xe_ggtt_dump(struct xe_ggtt *ggtt, struct drm_printer *p);
+u64 xe_ggtt_print_holes(struct xe_ggtt *ggtt, u64 alignment, struct drm_printer *p);
#ifdef CONFIG_PCI_IOV
-void xe_ggtt_assign(struct xe_ggtt *ggtt, const struct drm_mm_node *node, u16 vfid);
+void xe_ggtt_assign(const struct xe_ggtt_node *node, u16 vfid);
#endif
#endif
diff --git a/drivers/gpu/drm/xe/xe_ggtt_types.h b/drivers/gpu/drm/xe/xe_ggtt_types.h
index 2245d88d8f39..cb02b7994a9a 100644
--- a/drivers/gpu/drm/xe/xe_ggtt_types.h
+++ b/drivers/gpu/drm/xe/xe_ggtt_types.h
@@ -13,30 +13,70 @@
struct xe_bo;
struct xe_gt;
+/**
+ * struct xe_ggtt - Main GGTT struct
+ *
+ * In general, each tile can contains its own Global Graphics Translation Table
+ * (GGTT) instance.
+ */
struct xe_ggtt {
+ /** @tile: Back pointer to tile where this GGTT belongs */
struct xe_tile *tile;
-
+ /** @size: Total size of this GGTT */
u64 size;
#define XE_GGTT_FLAGS_64K BIT(0)
+ /**
+ * @flags: Flags for this GGTT
+ * Acceptable flags:
+ * - %XE_GGTT_FLAGS_64K - if PTE size is 64K. Otherwise, regular is 4K.
+ */
unsigned int flags;
-
+ /** @scratch: Internal object allocation used as a scratch page */
struct xe_bo *scratch;
-
+ /** @lock: Mutex lock to protect GGTT data */
struct mutex lock;
-
+ /**
+ * @gsm: The iomem pointer to the actual location of the translation
+ * table located in the GSM for easy PTE manipulation
+ */
u64 __iomem *gsm;
-
+ /** @pt_ops: Page Table operations per platform */
const struct xe_ggtt_pt_ops *pt_ops;
-
+ /** @mm: The memory manager used to manage individual GGTT allocations */
struct drm_mm mm;
-
/** @access_count: counts GGTT writes */
unsigned int access_count;
+ /** @wq: Dedicated unordered work queue to process node removals */
+ struct workqueue_struct *wq;
+};
+
+/**
+ * struct xe_ggtt_node - A node in GGTT.
+ *
+ * This struct needs to be initialized (only-once) with xe_ggtt_node_init() before any node
+ * insertion, reservation, or 'ballooning'.
+ * It will, then, be finalized by either xe_ggtt_node_remove() or xe_ggtt_node_deballoon().
+ */
+struct xe_ggtt_node {
+ /** @ggtt: Back pointer to xe_ggtt where this region will be inserted at */
+ struct xe_ggtt *ggtt;
+ /** @base: A drm_mm_node */
+ struct drm_mm_node base;
+ /** @delayed_removal_work: The work struct for the delayed removal */
+ struct work_struct delayed_removal_work;
+ /** @invalidate_on_remove: If it needs invalidation upon removal */
+ bool invalidate_on_remove;
};
+/**
+ * struct xe_ggtt_pt_ops - GGTT Page table operations
+ * Which can vary from platform to platform.
+ */
struct xe_ggtt_pt_ops {
+ /** @pte_encode_bo: Encode PTE address for a given BO */
u64 (*pte_encode_bo)(struct xe_bo *bo, u64 bo_offset, u16 pat_index);
+ /** @ggtt_set_pte: Directly write into GGTT's PTE */
void (*ggtt_set_pte)(struct xe_ggtt *ggtt, u64 addr, u64 pte);
};
diff --git a/drivers/gpu/drm/xe/xe_gpu_scheduler.c b/drivers/gpu/drm/xe/xe_gpu_scheduler.c
index e4ad1d6ce1d5..c518d1d16d82 100644
--- a/drivers/gpu/drm/xe/xe_gpu_scheduler.c
+++ b/drivers/gpu/drm/xe/xe_gpu_scheduler.c
@@ -15,11 +15,11 @@ static void xe_sched_process_msg_queue_if_ready(struct xe_gpu_scheduler *sched)
{
struct xe_sched_msg *msg;
- spin_lock(&sched->base.job_list_lock);
+ xe_sched_msg_lock(sched);
msg = list_first_entry_or_null(&sched->msgs, struct xe_sched_msg, link);
if (msg)
xe_sched_process_msg_queue(sched);
- spin_unlock(&sched->base.job_list_lock);
+ xe_sched_msg_unlock(sched);
}
static struct xe_sched_msg *
@@ -27,12 +27,12 @@ xe_sched_get_msg(struct xe_gpu_scheduler *sched)
{
struct xe_sched_msg *msg;
- spin_lock(&sched->base.job_list_lock);
+ xe_sched_msg_lock(sched);
msg = list_first_entry_or_null(&sched->msgs,
struct xe_sched_msg, link);
if (msg)
- list_del(&msg->link);
- spin_unlock(&sched->base.job_list_lock);
+ list_del_init(&msg->link);
+ xe_sched_msg_unlock(sched);
return msg;
}
@@ -93,9 +93,16 @@ void xe_sched_submission_stop(struct xe_gpu_scheduler *sched)
void xe_sched_add_msg(struct xe_gpu_scheduler *sched,
struct xe_sched_msg *msg)
{
- spin_lock(&sched->base.job_list_lock);
- list_add_tail(&msg->link, &sched->msgs);
- spin_unlock(&sched->base.job_list_lock);
+ xe_sched_msg_lock(sched);
+ xe_sched_add_msg_locked(sched, msg);
+ xe_sched_msg_unlock(sched);
+}
+void xe_sched_add_msg_locked(struct xe_gpu_scheduler *sched,
+ struct xe_sched_msg *msg)
+{
+ lockdep_assert_held(&sched->base.job_list_lock);
+
+ list_add_tail(&msg->link, &sched->msgs);
xe_sched_process_msg_queue(sched);
}
diff --git a/drivers/gpu/drm/xe/xe_gpu_scheduler.h b/drivers/gpu/drm/xe/xe_gpu_scheduler.h
index 10c6bb9c9386..cee9c6809fc0 100644
--- a/drivers/gpu/drm/xe/xe_gpu_scheduler.h
+++ b/drivers/gpu/drm/xe/xe_gpu_scheduler.h
@@ -24,6 +24,18 @@ void xe_sched_submission_stop(struct xe_gpu_scheduler *sched);
void xe_sched_add_msg(struct xe_gpu_scheduler *sched,
struct xe_sched_msg *msg);
+void xe_sched_add_msg_locked(struct xe_gpu_scheduler *sched,
+ struct xe_sched_msg *msg);
+
+static inline void xe_sched_msg_lock(struct xe_gpu_scheduler *sched)
+{
+ spin_lock(&sched->base.job_list_lock);
+}
+
+static inline void xe_sched_msg_unlock(struct xe_gpu_scheduler *sched)
+{
+ spin_unlock(&sched->base.job_list_lock);
+}
static inline void xe_sched_stop(struct xe_gpu_scheduler *sched)
{
diff --git a/drivers/gpu/drm/xe/xe_gsc.c b/drivers/gpu/drm/xe/xe_gsc.c
index 29f96f409391..6fbea70d3d36 100644
--- a/drivers/gpu/drm/xe/xe_gsc.c
+++ b/drivers/gpu/drm/xe/xe_gsc.c
@@ -8,6 +8,7 @@
#include <linux/delay.h>
#include <drm/drm_managed.h>
+#include <drm/drm_print.h>
#include <generated/xe_wa_oob.h>
@@ -165,10 +166,11 @@ static int query_compatibility_version(struct xe_gsc *gsc)
return err;
}
- compat->major = version_query_rd(xe, &bo->vmap, rd_offset, compat_major);
- compat->minor = version_query_rd(xe, &bo->vmap, rd_offset, compat_minor);
+ compat->major = version_query_rd(xe, &bo->vmap, rd_offset, proj_major);
+ compat->minor = version_query_rd(xe, &bo->vmap, rd_offset, compat_major);
+ compat->patch = version_query_rd(xe, &bo->vmap, rd_offset, compat_minor);
- xe_gt_info(gt, "found GSC cv%u.%u\n", compat->major, compat->minor);
+ xe_gt_info(gt, "found GSC cv%u.%u.%u\n", compat->major, compat->minor, compat->patch);
out_bo:
xe_bo_unpin_map_no_vm(bo);
@@ -333,9 +335,11 @@ static int gsc_er_complete(struct xe_gt *gt)
if (er_status == GSCI_TIMER_STATUS_TIMER_EXPIRED) {
/*
* XXX: we should trigger an FLR here, but we don't have support
- * for that yet.
+ * for that yet. Since we can't recover from the error, we
+ * declare the device as wedged.
*/
xe_gt_err(gt, "GSC ER timed out!\n");
+ xe_device_declare_wedged(gt_to_xe(gt));
return -EIO;
}
@@ -450,11 +454,6 @@ static void free_resources(void *arg)
xe_exec_queue_put(gsc->q);
gsc->q = NULL;
}
-
- if (gsc->private) {
- xe_bo_unpin_map_no_vm(gsc->private);
- gsc->private = NULL;
- }
}
int xe_gsc_init_post_hwconfig(struct xe_gsc *gsc)
@@ -474,10 +473,9 @@ int xe_gsc_init_post_hwconfig(struct xe_gsc *gsc)
if (!hwe)
return -ENODEV;
- bo = xe_bo_create_pin_map(xe, tile, NULL, SZ_4M,
- ttm_bo_type_kernel,
- XE_BO_FLAG_STOLEN |
- XE_BO_FLAG_GGTT);
+ bo = xe_managed_bo_create_pin_map(xe, tile, SZ_4M,
+ XE_BO_FLAG_STOLEN |
+ XE_BO_FLAG_GGTT);
if (IS_ERR(bo))
return PTR_ERR(bo);
@@ -537,7 +535,10 @@ void xe_gsc_load_start(struct xe_gsc *gsc)
/* GSC FW survives GT reset and D3Hot */
if (gsc_fw_is_loaded(gt)) {
- xe_uc_fw_change_status(&gsc->fw, XE_UC_FIRMWARE_TRANSFERRED);
+ if (xe_gsc_proxy_init_done(gsc))
+ xe_uc_fw_change_status(&gsc->fw, XE_UC_FIRMWARE_RUNNING);
+ else
+ xe_uc_fw_change_status(&gsc->fw, XE_UC_FIRMWARE_TRANSFERRED);
return;
}
@@ -589,3 +590,35 @@ void xe_gsc_wa_14015076503(struct xe_gt *gt, bool prep)
msleep(200);
}
}
+
+/**
+ * xe_gsc_print_info - print info about GSC FW status
+ * @gsc: the GSC structure
+ * @p: the printer to be used to print the info
+ */
+void xe_gsc_print_info(struct xe_gsc *gsc, struct drm_printer *p)
+{
+ struct xe_gt *gt = gsc_to_gt(gsc);
+ int err;
+
+ xe_uc_fw_print(&gsc->fw, p);
+
+ drm_printf(p, "\tfound security version %u\n", gsc->security_version);
+
+ if (!xe_uc_fw_is_enabled(&gsc->fw))
+ return;
+
+ err = xe_force_wake_get(gt_to_fw(gt), XE_FW_GSC);
+ if (err)
+ return;
+
+ drm_printf(p, "\nHECI1 FWSTS: 0x%08x 0x%08x 0x%08x 0x%08x 0x%08x 0x%08x\n",
+ xe_mmio_read32(gt, HECI_FWSTS1(MTL_GSC_HECI1_BASE)),
+ xe_mmio_read32(gt, HECI_FWSTS2(MTL_GSC_HECI1_BASE)),
+ xe_mmio_read32(gt, HECI_FWSTS3(MTL_GSC_HECI1_BASE)),
+ xe_mmio_read32(gt, HECI_FWSTS4(MTL_GSC_HECI1_BASE)),
+ xe_mmio_read32(gt, HECI_FWSTS5(MTL_GSC_HECI1_BASE)),
+ xe_mmio_read32(gt, HECI_FWSTS6(MTL_GSC_HECI1_BASE)));
+
+ xe_force_wake_put(gt_to_fw(gt), XE_FW_GSC);
+}
diff --git a/drivers/gpu/drm/xe/xe_gsc.h b/drivers/gpu/drm/xe/xe_gsc.h
index 1c7a623faf11..e282b9ef6ec4 100644
--- a/drivers/gpu/drm/xe/xe_gsc.h
+++ b/drivers/gpu/drm/xe/xe_gsc.h
@@ -8,6 +8,7 @@
#include <linux/types.h>
+struct drm_printer;
struct xe_gsc;
struct xe_gt;
struct xe_hw_engine;
@@ -21,4 +22,6 @@ void xe_gsc_hwe_irq_handler(struct xe_hw_engine *hwe, u16 intr_vec);
void xe_gsc_wa_14015076503(struct xe_gt *gt, bool prep);
+void xe_gsc_print_info(struct xe_gsc *gsc, struct drm_printer *p);
+
#endif
diff --git a/drivers/gpu/drm/xe/xe_gsc_debugfs.c b/drivers/gpu/drm/xe/xe_gsc_debugfs.c
new file mode 100644
index 000000000000..461d7e99c2b3
--- /dev/null
+++ b/drivers/gpu/drm/xe/xe_gsc_debugfs.c
@@ -0,0 +1,71 @@
+// SPDX-License-Identifier: MIT
+/*
+ * Copyright © 2022 Intel Corporation
+ */
+
+#include "xe_gsc_debugfs.h"
+
+#include <drm/drm_debugfs.h>
+#include <drm/drm_managed.h>
+
+#include "xe_device.h"
+#include "xe_gt.h"
+#include "xe_gsc.h"
+#include "xe_macros.h"
+#include "xe_pm.h"
+
+static struct xe_gt *
+gsc_to_gt(struct xe_gsc *gsc)
+{
+ return container_of(gsc, struct xe_gt, uc.gsc);
+}
+
+static struct xe_device *
+gsc_to_xe(struct xe_gsc *gsc)
+{
+ return gt_to_xe(gsc_to_gt(gsc));
+}
+
+static struct xe_gsc *node_to_gsc(struct drm_info_node *node)
+{
+ return node->info_ent->data;
+}
+
+static int gsc_info(struct seq_file *m, void *data)
+{
+ struct xe_gsc *gsc = node_to_gsc(m->private);
+ struct xe_device *xe = gsc_to_xe(gsc);
+ struct drm_printer p = drm_seq_file_printer(m);
+
+ xe_pm_runtime_get(xe);
+ xe_gsc_print_info(gsc, &p);
+ xe_pm_runtime_put(xe);
+
+ return 0;
+}
+
+static const struct drm_info_list debugfs_list[] = {
+ {"gsc_info", gsc_info, 0},
+};
+
+void xe_gsc_debugfs_register(struct xe_gsc *gsc, struct dentry *parent)
+{
+ struct drm_minor *minor = gsc_to_xe(gsc)->drm.primary;
+ struct drm_info_list *local;
+ int i;
+
+#define DEBUGFS_SIZE (ARRAY_SIZE(debugfs_list) * sizeof(struct drm_info_list))
+ local = drmm_kmalloc(&gsc_to_xe(gsc)->drm, DEBUGFS_SIZE, GFP_KERNEL);
+ if (!local)
+ return;
+
+ memcpy(local, debugfs_list, DEBUGFS_SIZE);
+#undef DEBUGFS_SIZE
+
+ for (i = 0; i < ARRAY_SIZE(debugfs_list); ++i)
+ local[i].data = gsc;
+
+ drm_debugfs_create_files(local,
+ ARRAY_SIZE(debugfs_list),
+ parent, minor);
+}
diff --git a/drivers/gpu/drm/xe/xe_gsc_debugfs.h b/drivers/gpu/drm/xe/xe_gsc_debugfs.h
new file mode 100644
index 000000000000..c2e2645dc705
--- /dev/null
+++ b/drivers/gpu/drm/xe/xe_gsc_debugfs.h
@@ -0,0 +1,14 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2024 Intel Corporation
+ */
+
+#ifndef _XE_GSC_DEBUGFS_H_
+#define _XE_GSC_DEBUGFS_H_
+
+struct dentry;
+struct xe_gsc;
+
+void xe_gsc_debugfs_register(struct xe_gsc *gsc, struct dentry *parent);
+
+#endif
diff --git a/drivers/gpu/drm/xe/xe_gsc_proxy.c b/drivers/gpu/drm/xe/xe_gsc_proxy.c
index aa812a2bc3ed..2d6ea8c01445 100644
--- a/drivers/gpu/drm/xe/xe_gsc_proxy.c
+++ b/drivers/gpu/drm/xe/xe_gsc_proxy.c
@@ -62,11 +62,6 @@ gsc_to_gt(struct xe_gsc *gsc)
return container_of(gsc, struct xe_gt, uc.gsc);
}
-static inline struct xe_device *kdev_to_xe(struct device *kdev)
-{
- return dev_get_drvdata(kdev);
-}
-
bool xe_gsc_proxy_init_done(struct xe_gsc *gsc)
{
struct xe_gt *gt = gsc_to_gt(gsc);
@@ -345,7 +340,7 @@ void xe_gsc_proxy_irq_handler(struct xe_gsc *gsc, u32 iir)
static int xe_gsc_proxy_component_bind(struct device *xe_kdev,
struct device *mei_kdev, void *data)
{
- struct xe_device *xe = kdev_to_xe(xe_kdev);
+ struct xe_device *xe = kdev_to_xe_device(xe_kdev);
struct xe_gt *gt = xe->tiles[0].media_gt;
struct xe_gsc *gsc = &gt->uc.gsc;
@@ -360,7 +355,7 @@ static int xe_gsc_proxy_component_bind(struct device *xe_kdev,
static void xe_gsc_proxy_component_unbind(struct device *xe_kdev,
struct device *mei_kdev, void *data)
{
- struct xe_device *xe = kdev_to_xe(xe_kdev);
+ struct xe_device *xe = kdev_to_xe_device(xe_kdev);
struct xe_gt *gt = xe->tiles[0].media_gt;
struct xe_gsc *gsc = &gt->uc.gsc;
@@ -376,27 +371,6 @@ static const struct component_ops xe_gsc_proxy_component_ops = {
.unbind = xe_gsc_proxy_component_unbind,
};
-static void proxy_channel_free(struct drm_device *drm, void *arg)
-{
- struct xe_gsc *gsc = arg;
-
- if (!gsc->proxy.bo)
- return;
-
- if (gsc->proxy.to_csme) {
- kfree(gsc->proxy.to_csme);
- gsc->proxy.to_csme = NULL;
- gsc->proxy.from_csme = NULL;
- }
-
- if (gsc->proxy.bo) {
- iosys_map_clear(&gsc->proxy.to_gsc);
- iosys_map_clear(&gsc->proxy.from_gsc);
- xe_bo_unpin_map_no_vm(gsc->proxy.bo);
- gsc->proxy.bo = NULL;
- }
-}
-
static int proxy_channel_alloc(struct xe_gsc *gsc)
{
struct xe_gt *gt = gsc_to_gt(gsc);
@@ -405,18 +379,15 @@ static int proxy_channel_alloc(struct xe_gsc *gsc)
struct xe_bo *bo;
void *csme;
- csme = kzalloc(GSC_PROXY_CHANNEL_SIZE, GFP_KERNEL);
+ csme = drmm_kzalloc(&xe->drm, GSC_PROXY_CHANNEL_SIZE, GFP_KERNEL);
if (!csme)
return -ENOMEM;
- bo = xe_bo_create_pin_map(xe, tile, NULL, GSC_PROXY_CHANNEL_SIZE,
- ttm_bo_type_kernel,
- XE_BO_FLAG_SYSTEM |
- XE_BO_FLAG_GGTT);
- if (IS_ERR(bo)) {
- kfree(csme);
+ bo = xe_managed_bo_create_pin_map(xe, tile, GSC_PROXY_CHANNEL_SIZE,
+ XE_BO_FLAG_SYSTEM |
+ XE_BO_FLAG_GGTT);
+ if (IS_ERR(bo))
return PTR_ERR(bo);
- }
gsc->proxy.bo = bo;
gsc->proxy.to_gsc = IOSYS_MAP_INIT_OFFSET(&bo->vmap, 0);
@@ -424,7 +395,7 @@ static int proxy_channel_alloc(struct xe_gsc *gsc)
gsc->proxy.to_csme = csme;
gsc->proxy.from_csme = csme + GSC_PROXY_BUFFER_SIZE;
- return drmm_add_action_or_reset(&xe->drm, proxy_channel_free, gsc);
+ return 0;
}
/**
diff --git a/drivers/gpu/drm/xe/xe_gt.c b/drivers/gpu/drm/xe/xe_gt.c
index cb9df15e7137..f0dc2bf24c7b 100644
--- a/drivers/gpu/drm/xe/xe_gt.c
+++ b/drivers/gpu/drm/xe/xe_gt.c
@@ -8,7 +8,7 @@
#include <linux/minmax.h>
#include <drm/drm_managed.h>
-#include <drm/xe_drm.h>
+#include <uapi/drm/xe_drm.h>
#include <generated/xe_wa_oob.h>
@@ -109,9 +109,9 @@ static void xe_gt_enable_host_l2_vram(struct xe_gt *gt)
if (!xe_gt_is_media_type(gt)) {
xe_mmio_write32(gt, SCRATCH1LPFC, EN_L3_RW_CCS_CACHE_FLUSH);
- reg = xe_mmio_read32(gt, XE2_GAMREQSTRM_CTRL);
+ reg = xe_gt_mcr_unicast_read_any(gt, XE2_GAMREQSTRM_CTRL);
reg |= CG_DIS_CNTLBUS;
- xe_mmio_write32(gt, XE2_GAMREQSTRM_CTRL, reg);
+ xe_gt_mcr_multicast_write(gt, XE2_GAMREQSTRM_CTRL, reg);
}
xe_gt_mcr_multicast_write(gt, XEHPC_L3CLOS_MASK(3), 0x3);
@@ -133,9 +133,9 @@ static void xe_gt_disable_host_l2_vram(struct xe_gt *gt)
if (WARN_ON(err))
return;
- reg = xe_mmio_read32(gt, XE2_GAMREQSTRM_CTRL);
+ reg = xe_gt_mcr_unicast_read_any(gt, XE2_GAMREQSTRM_CTRL);
reg &= ~CG_DIS_CNTLBUS;
- xe_mmio_write32(gt, XE2_GAMREQSTRM_CTRL, reg);
+ xe_gt_mcr_multicast_write(gt, XE2_GAMREQSTRM_CTRL, reg);
xe_force_wake_put(gt_to_fw(gt), XE_FW_GT);
}
@@ -555,7 +555,6 @@ int xe_gt_init_hwconfig(struct xe_gt *gt)
xe_gt_mcr_init_early(gt);
xe_pat_init(gt);
- xe_gt_enable_host_l2_vram(gt);
err = xe_uc_init(&gt->uc);
if (err)
@@ -567,6 +566,7 @@ int xe_gt_init_hwconfig(struct xe_gt *gt)
xe_gt_topology_init(gt);
xe_gt_mcr_init(gt);
+ xe_gt_enable_host_l2_vram(gt);
out_fw:
xe_force_wake_put(gt_to_fw(gt), XE_FW_GT);
diff --git a/drivers/gpu/drm/xe/xe_gt.h b/drivers/gpu/drm/xe/xe_gt.h
index 8b1a5027dcf2..ee138e9768a2 100644
--- a/drivers/gpu/drm/xe/xe_gt.h
+++ b/drivers/gpu/drm/xe/xe_gt.h
@@ -6,6 +6,8 @@
#ifndef _XE_GT_H_
#define _XE_GT_H_
+#include <linux/fault-inject.h>
+
#include <drm/drm_util.h>
#include "xe_device.h"
@@ -19,19 +21,11 @@
#define CCS_MASK(gt) (((gt)->info.engine_mask & XE_HW_ENGINE_CCS_MASK) >> XE_HW_ENGINE_CCS0)
-#ifdef CONFIG_FAULT_INJECTION
-#include <linux/fault-inject.h> /* XXX: fault-inject.h is broken */
extern struct fault_attr gt_reset_failure;
static inline bool xe_fault_inject_gt_reset(void)
{
return should_fail(&gt_reset_failure, 1);
}
-#else
-static inline bool xe_fault_inject_gt_reset(void)
-{
- return false;
-}
-#endif
struct xe_gt *xe_gt_alloc(struct xe_tile *tile);
int xe_gt_init_hwconfig(struct xe_gt *gt);
diff --git a/drivers/gpu/drm/xe/xe_gt_debugfs.c b/drivers/gpu/drm/xe/xe_gt_debugfs.c
index 5e7fd937917a..8f95d3a5949b 100644
--- a/drivers/gpu/drm/xe/xe_gt_debugfs.c
+++ b/drivers/gpu/drm/xe/xe_gt_debugfs.c
@@ -17,7 +17,9 @@
#include "xe_gt_mcr.h"
#include "xe_gt_sriov_pf_debugfs.h"
#include "xe_gt_sriov_vf_debugfs.h"
+#include "xe_gt_stats.h"
#include "xe_gt_topology.h"
+#include "xe_guc_hwconfig.h"
#include "xe_hw_engine.h"
#include "xe_lrc.h"
#include "xe_macros.h"
@@ -269,6 +271,15 @@ static int vecs_default_lrc(struct xe_gt *gt, struct drm_printer *p)
return 0;
}
+static int hwconfig(struct xe_gt *gt, struct drm_printer *p)
+{
+ xe_pm_runtime_get(gt_to_xe(gt));
+ xe_guc_hwconfig_dump(&gt->uc.guc, p);
+ xe_pm_runtime_put(gt_to_xe(gt));
+
+ return 0;
+}
+
static const struct drm_info_list debugfs_list[] = {
{"hw_engines", .show = xe_gt_debugfs_simple_show, .data = hw_engines},
{"force_reset", .show = xe_gt_debugfs_simple_show, .data = force_reset},
@@ -286,6 +297,8 @@ static const struct drm_info_list debugfs_list[] = {
{"default_lrc_bcs", .show = xe_gt_debugfs_simple_show, .data = bcs_default_lrc},
{"default_lrc_vcs", .show = xe_gt_debugfs_simple_show, .data = vcs_default_lrc},
{"default_lrc_vecs", .show = xe_gt_debugfs_simple_show, .data = vecs_default_lrc},
+ {"stats", .show = xe_gt_debugfs_simple_show, .data = xe_gt_stats_print_info},
+ {"hwconfig", .show = xe_gt_debugfs_simple_show, .data = hwconfig},
};
void xe_gt_debugfs_register(struct xe_gt *gt)
diff --git a/drivers/gpu/drm/xe/xe_gt_mcr.c b/drivers/gpu/drm/xe/xe_gt_mcr.c
index 6d948a469126..7d7bd0be6233 100644
--- a/drivers/gpu/drm/xe/xe_gt_mcr.c
+++ b/drivers/gpu/drm/xe/xe_gt_mcr.c
@@ -8,8 +8,10 @@
#include "regs/xe_gt_regs.h"
#include "xe_assert.h"
#include "xe_gt.h"
+#include "xe_gt_printk.h"
#include "xe_gt_topology.h"
#include "xe_gt_types.h"
+#include "xe_guc_hwconfig.h"
#include "xe_mmio.h"
#include "xe_sriov.h"
@@ -297,6 +299,36 @@ static void init_steering_mslice(struct xe_gt *gt)
static unsigned int dss_per_group(struct xe_gt *gt)
{
+ struct xe_guc *guc = &gt->uc.guc;
+ u32 max_slices = 0, max_subslices = 0;
+ int ret;
+
+ /*
+ * Try to query the GuC's hwconfig table for the maximum number of
+ * slices and subslices. These don't reflect the platform's actual
+ * slice/DSS counts, just the physical layout by which we should
+ * determine the steering targets. On older platforms with older GuC
+ * firmware releases it's possible that these attributes may not be
+ * included in the table, so we can always fall back to the old
+ * hardcoded layouts.
+ */
+#define HWCONFIG_ATTR_MAX_SLICES 1
+#define HWCONFIG_ATTR_MAX_SUBSLICES 70
+
+ ret = xe_guc_hwconfig_lookup_u32(guc, HWCONFIG_ATTR_MAX_SLICES,
+ &max_slices);
+ if (ret < 0 || max_slices == 0)
+ goto fallback;
+
+ ret = xe_guc_hwconfig_lookup_u32(guc, HWCONFIG_ATTR_MAX_SUBSLICES,
+ &max_subslices);
+ if (ret < 0 || max_subslices == 0)
+ goto fallback;
+
+ return DIV_ROUND_UP(max_subslices, max_slices);
+
+fallback:
+ xe_gt_dbg(gt, "GuC hwconfig cannot provide dss/slice; using typical fallback values\n");
if (gt_to_xe(gt)->info.platform == XE_PVC)
return 8;
else if (GRAPHICS_VERx100(gt_to_xe(gt)) >= 1250)
@@ -314,16 +346,16 @@ static unsigned int dss_per_group(struct xe_gt *gt)
*/
void xe_gt_mcr_get_dss_steering(struct xe_gt *gt, unsigned int dss, u16 *group, u16 *instance)
{
- int dss_per_grp = dss_per_group(gt);
-
xe_gt_assert(gt, dss < XE_MAX_DSS_FUSE_BITS);
- *group = dss / dss_per_grp;
- *instance = dss % dss_per_grp;
+ *group = dss / gt->steering_dss_per_grp;
+ *instance = dss % gt->steering_dss_per_grp;
}
static void init_steering_dss(struct xe_gt *gt)
{
+ gt->steering_dss_per_grp = dss_per_group(gt);
+
xe_gt_mcr_get_dss_steering(gt,
min(xe_dss_mask_group_ffs(gt->fuse_topo.g_dss_mask, 0, 0),
xe_dss_mask_group_ffs(gt->fuse_topo.c_dss_mask, 0, 0)),
diff --git a/drivers/gpu/drm/xe/xe_gt_pagefault.c b/drivers/gpu/drm/xe/xe_gt_pagefault.c
index b2a7fa55bd18..00af059a8971 100644
--- a/drivers/gpu/drm/xe/xe_gt_pagefault.c
+++ b/drivers/gpu/drm/xe/xe_gt_pagefault.c
@@ -212,6 +212,12 @@ static int handle_pagefault(struct xe_gt *gt, struct pagefault *pf)
* TODO: Change to read lock? Using write lock for simplicity.
*/
down_write(&vm->lock);
+
+ if (xe_vm_is_closed(vm)) {
+ err = -ENOENT;
+ goto unlock_vm;
+ }
+
vma = lookup_vma(vm, pf->page_addr);
if (!vma) {
err = -EINVAL;
@@ -287,7 +293,7 @@ static bool get_pagefault(struct pf_queue *pf_queue, struct pagefault *pf)
PFD_VIRTUAL_ADDR_LO_SHIFT;
pf_queue->tail = (pf_queue->tail + PF_MSG_LEN_DW) %
- PF_QUEUE_NUM_DW;
+ pf_queue->num_dw;
ret = true;
}
spin_unlock_irq(&pf_queue->lock);
@@ -299,7 +305,8 @@ static bool pf_queue_full(struct pf_queue *pf_queue)
{
lockdep_assert_held(&pf_queue->lock);
- return CIRC_SPACE(pf_queue->head, pf_queue->tail, PF_QUEUE_NUM_DW) <=
+ return CIRC_SPACE(pf_queue->head, pf_queue->tail,
+ pf_queue->num_dw) <=
PF_MSG_LEN_DW;
}
@@ -312,22 +319,23 @@ int xe_guc_pagefault_handler(struct xe_guc *guc, u32 *msg, u32 len)
u32 asid;
bool full;
- /*
- * The below logic doesn't work unless PF_QUEUE_NUM_DW % PF_MSG_LEN_DW == 0
- */
- BUILD_BUG_ON(PF_QUEUE_NUM_DW % PF_MSG_LEN_DW);
-
if (unlikely(len != PF_MSG_LEN_DW))
return -EPROTO;
asid = FIELD_GET(PFD_ASID, msg[1]);
pf_queue = gt->usm.pf_queue + (asid % NUM_PF_QUEUE);
+ /*
+ * The below logic doesn't work unless PF_QUEUE_NUM_DW % PF_MSG_LEN_DW == 0
+ */
+ xe_gt_assert(gt, !(pf_queue->num_dw % PF_MSG_LEN_DW));
+
spin_lock_irqsave(&pf_queue->lock, flags);
full = pf_queue_full(pf_queue);
if (!full) {
memcpy(pf_queue->data + pf_queue->head, msg, len * sizeof(u32));
- pf_queue->head = (pf_queue->head + len) % PF_QUEUE_NUM_DW;
+ pf_queue->head = (pf_queue->head + len) %
+ pf_queue->num_dw;
queue_work(gt->usm.pf_wq, &pf_queue->worker);
} else {
drm_warn(&xe->drm, "PF Queue full, shouldn't be possible");
@@ -394,18 +402,47 @@ static void pagefault_fini(void *arg)
destroy_workqueue(gt->usm.pf_wq);
}
+static int xe_alloc_pf_queue(struct xe_gt *gt, struct pf_queue *pf_queue)
+{
+ struct xe_device *xe = gt_to_xe(gt);
+ xe_dss_mask_t all_dss;
+ int num_dss, num_eus;
+
+ bitmap_or(all_dss, gt->fuse_topo.g_dss_mask, gt->fuse_topo.c_dss_mask,
+ XE_MAX_DSS_FUSE_BITS);
+
+ num_dss = bitmap_weight(all_dss, XE_MAX_DSS_FUSE_BITS);
+ num_eus = bitmap_weight(gt->fuse_topo.eu_mask_per_dss,
+ XE_MAX_EU_FUSE_BITS) * num_dss;
+
+ /* user can issue separate page faults per EU and per CS */
+ pf_queue->num_dw =
+ (num_eus + XE_NUM_HW_ENGINES) * PF_MSG_LEN_DW;
+
+ pf_queue->gt = gt;
+ pf_queue->data = devm_kcalloc(xe->drm.dev, pf_queue->num_dw,
+ sizeof(u32), GFP_KERNEL);
+ if (!pf_queue->data)
+ return -ENOMEM;
+
+ spin_lock_init(&pf_queue->lock);
+ INIT_WORK(&pf_queue->worker, pf_queue_work_func);
+
+ return 0;
+}
+
int xe_gt_pagefault_init(struct xe_gt *gt)
{
struct xe_device *xe = gt_to_xe(gt);
- int i;
+ int i, ret = 0;
if (!xe->info.has_usm)
return 0;
for (i = 0; i < NUM_PF_QUEUE; ++i) {
- gt->usm.pf_queue[i].gt = gt;
- spin_lock_init(&gt->usm.pf_queue[i].lock);
- INIT_WORK(&gt->usm.pf_queue[i].worker, pf_queue_work_func);
+ ret = xe_alloc_pf_queue(gt, &gt->usm.pf_queue[i]);
+ if (ret)
+ return ret;
}
for (i = 0; i < NUM_ACC_QUEUE; ++i) {
gt->usm.acc_queue[i].gt = gt;
diff --git a/drivers/gpu/drm/xe/xe_gt_sriov_pf.c b/drivers/gpu/drm/xe/xe_gt_sriov_pf.c
index 9dbba9ab7a9a..905f409db74b 100644
--- a/drivers/gpu/drm/xe/xe_gt_sriov_pf.c
+++ b/drivers/gpu/drm/xe/xe_gt_sriov_pf.c
@@ -5,10 +5,11 @@
#include <drm/drm_managed.h>
-#include "regs/xe_sriov_regs.h"
+#include "regs/xe_regs.h"
#include "xe_gt_sriov_pf.h"
#include "xe_gt_sriov_pf_config.h"
+#include "xe_gt_sriov_pf_control.h"
#include "xe_gt_sriov_pf_helpers.h"
#include "xe_gt_sriov_pf_service.h"
#include "xe_mmio.h"
@@ -57,6 +58,10 @@ int xe_gt_sriov_pf_init_early(struct xe_gt *gt)
if (err)
return err;
+ err = xe_gt_sriov_pf_control_init(gt);
+ if (err)
+ return err;
+
return 0;
}
@@ -93,4 +98,5 @@ void xe_gt_sriov_pf_init_hw(struct xe_gt *gt)
void xe_gt_sriov_pf_restart(struct xe_gt *gt)
{
xe_gt_sriov_pf_config_restart(gt);
+ xe_gt_sriov_pf_control_restart(gt);
}
diff --git a/drivers/gpu/drm/xe/xe_gt_sriov_pf_config.c b/drivers/gpu/drm/xe/xe_gt_sriov_pf_config.c
index b6f0a7299c03..8250ef71e685 100644
--- a/drivers/gpu/drm/xe/xe_gt_sriov_pf_config.c
+++ b/drivers/gpu/drm/xe/xe_gt_sriov_pf_config.c
@@ -29,6 +29,7 @@
#include "xe_guc_submit.h"
#include "xe_lmtt.h"
#include "xe_map.h"
+#include "xe_migrate.h"
#include "xe_sriov.h"
#include "xe_ttm_vram_mgr.h"
#include "xe_wopcm.h"
@@ -232,14 +233,14 @@ static u32 encode_config_ggtt(u32 *cfg, const struct xe_gt_sriov_config *config)
{
u32 n = 0;
- if (drm_mm_node_allocated(&config->ggtt_region)) {
+ if (xe_ggtt_node_allocated(config->ggtt_region)) {
cfg[n++] = PREP_GUC_KLV_TAG(VF_CFG_GGTT_START);
- cfg[n++] = lower_32_bits(config->ggtt_region.start);
- cfg[n++] = upper_32_bits(config->ggtt_region.start);
+ cfg[n++] = lower_32_bits(config->ggtt_region->base.start);
+ cfg[n++] = upper_32_bits(config->ggtt_region->base.start);
cfg[n++] = PREP_GUC_KLV_TAG(VF_CFG_GGTT_SIZE);
- cfg[n++] = lower_32_bits(config->ggtt_region.size);
- cfg[n++] = upper_32_bits(config->ggtt_region.size);
+ cfg[n++] = lower_32_bits(config->ggtt_region->base.size);
+ cfg[n++] = upper_32_bits(config->ggtt_region->base.size);
}
return n;
@@ -276,6 +277,14 @@ static u32 encode_config(u32 *cfg, const struct xe_gt_sriov_config *config)
cfg[n++] = PREP_GUC_KLV_TAG(VF_CFG_PREEMPT_TIMEOUT);
cfg[n++] = config->preempt_timeout;
+#define encode_threshold_config(TAG, ...) ({ \
+ cfg[n++] = PREP_GUC_KLV_TAG(VF_CFG_THRESHOLD_##TAG); \
+ cfg[n++] = config->thresholds[MAKE_XE_GUC_KLV_THRESHOLD_INDEX(TAG)]; \
+});
+
+ MAKE_XE_GUC_KLV_THRESHOLDS_SET(encode_threshold_config);
+#undef encode_threshold_config
+
return n;
}
@@ -369,29 +378,28 @@ static int pf_distribute_config_ggtt(struct xe_tile *tile, unsigned int vfid, u6
return err ?: err2;
}
-static void pf_release_ggtt(struct xe_tile *tile, struct drm_mm_node *node)
+static void pf_release_ggtt(struct xe_tile *tile, struct xe_ggtt_node *node)
{
- struct xe_ggtt *ggtt = tile->mem.ggtt;
-
- if (drm_mm_node_allocated(node)) {
+ if (xe_ggtt_node_allocated(node)) {
/*
* explicit GGTT PTE assignment to the PF using xe_ggtt_assign()
* is redundant, as PTE will be implicitly re-assigned to PF by
* the xe_ggtt_clear() called by below xe_ggtt_remove_node().
*/
- xe_ggtt_remove_node(ggtt, node, false);
+ xe_ggtt_node_remove(node, false);
}
}
static void pf_release_vf_config_ggtt(struct xe_gt *gt, struct xe_gt_sriov_config *config)
{
- pf_release_ggtt(gt_to_tile(gt), &config->ggtt_region);
+ pf_release_ggtt(gt_to_tile(gt), config->ggtt_region);
+ config->ggtt_region = NULL;
}
static int pf_provision_vf_ggtt(struct xe_gt *gt, unsigned int vfid, u64 size)
{
struct xe_gt_sriov_config *config = pf_pick_vf_config(gt, vfid);
- struct drm_mm_node *node = &config->ggtt_region;
+ struct xe_ggtt_node *node;
struct xe_tile *tile = gt_to_tile(gt);
struct xe_ggtt *ggtt = tile->mem.ggtt;
u64 alignment = pf_get_ggtt_alignment(gt);
@@ -403,40 +411,48 @@ static int pf_provision_vf_ggtt(struct xe_gt *gt, unsigned int vfid, u64 size)
size = round_up(size, alignment);
- if (drm_mm_node_allocated(node)) {
+ if (xe_ggtt_node_allocated(config->ggtt_region)) {
err = pf_distribute_config_ggtt(tile, vfid, 0, 0);
if (unlikely(err))
return err;
- pf_release_ggtt(tile, node);
+ pf_release_vf_config_ggtt(gt, config);
}
- xe_gt_assert(gt, !drm_mm_node_allocated(node));
+ xe_gt_assert(gt, !xe_ggtt_node_allocated(config->ggtt_region));
if (!size)
return 0;
- err = xe_ggtt_insert_special_node(ggtt, node, size, alignment);
+ node = xe_ggtt_node_init(ggtt);
+ if (IS_ERR(node))
+ return PTR_ERR(node);
+
+ err = xe_ggtt_node_insert(node, size, alignment);
if (unlikely(err))
- return err;
+ goto err;
- xe_ggtt_assign(ggtt, node, vfid);
+ xe_ggtt_assign(node, vfid);
xe_gt_sriov_dbg_verbose(gt, "VF%u assigned GGTT %llx-%llx\n",
- vfid, node->start, node->start + node->size - 1);
+ vfid, node->base.start, node->base.start + node->base.size - 1);
- err = pf_distribute_config_ggtt(gt->tile, vfid, node->start, node->size);
+ err = pf_distribute_config_ggtt(gt->tile, vfid, node->base.start, node->base.size);
if (unlikely(err))
- return err;
+ goto err;
+ config->ggtt_region = node;
return 0;
+err:
+ xe_ggtt_node_fini(node);
+ return err;
}
static u64 pf_get_vf_config_ggtt(struct xe_gt *gt, unsigned int vfid)
{
struct xe_gt_sriov_config *config = pf_pick_vf_config(gt, vfid);
- struct drm_mm_node *node = &config->ggtt_region;
+ struct xe_ggtt_node *node = config->ggtt_region;
xe_gt_assert(gt, !xe_gt_is_media_type(gt));
- return drm_mm_node_allocated(node) ? node->size : 0;
+ return xe_ggtt_node_allocated(node) ? node->base.size : 0;
}
/**
@@ -587,30 +603,11 @@ int xe_gt_sriov_pf_config_bulk_set_ggtt(struct xe_gt *gt, unsigned int vfid,
static u64 pf_get_max_ggtt(struct xe_gt *gt)
{
struct xe_ggtt *ggtt = gt_to_tile(gt)->mem.ggtt;
- const struct drm_mm *mm = &ggtt->mm;
- const struct drm_mm_node *entry;
u64 alignment = pf_get_ggtt_alignment(gt);
u64 spare = pf_get_spare_ggtt(gt);
- u64 hole_min_start = xe_wopcm_size(gt_to_xe(gt));
- u64 hole_start, hole_end, hole_size;
- u64 max_hole = 0;
+ u64 max_hole;
- mutex_lock(&ggtt->lock);
-
- drm_mm_for_each_hole(entry, mm, hole_start, hole_end) {
- hole_start = max(hole_start, hole_min_start);
- hole_start = ALIGN(hole_start, alignment);
- hole_end = ALIGN_DOWN(hole_end, alignment);
- if (hole_start >= hole_end)
- continue;
- hole_size = hole_end - hole_start;
- xe_gt_sriov_dbg_verbose(gt, "HOLE start %llx size %lluK\n",
- hole_start, hole_size / SZ_1K);
- spare -= min3(spare, hole_size, max_hole);
- max_hole = max(max_hole, hole_size);
- }
-
- mutex_unlock(&ggtt->lock);
+ max_hole = xe_ggtt_largest_hole(ggtt, alignment, &spare);
xe_gt_sriov_dbg_verbose(gt, "HOLE max %lluK reserved %lluK\n",
max_hole / SZ_1K, spare / SZ_1K);
@@ -1401,6 +1398,7 @@ static int pf_provision_vf_lmem(struct xe_gt *gt, unsigned int vfid, u64 size)
ALIGN(size, PAGE_SIZE),
ttm_bo_type_kernel,
XE_BO_FLAG_VRAM_IF_DGFX(tile) |
+ XE_BO_FLAG_NEEDS_2M |
XE_BO_FLAG_PINNED);
if (IS_ERR(bo))
return PTR_ERR(bo);
@@ -1844,6 +1842,18 @@ u32 xe_gt_sriov_pf_config_get_threshold(struct xe_gt *gt, unsigned int vfid,
return value;
}
+static void pf_reset_config_thresholds(struct xe_gt *gt, struct xe_gt_sriov_config *config)
+{
+ lockdep_assert_held(xe_gt_sriov_pf_master_mutex(gt));
+
+#define reset_threshold_config(TAG, ...) ({ \
+ config->thresholds[MAKE_XE_GUC_KLV_THRESHOLD_INDEX(TAG)] = 0; \
+});
+
+ MAKE_XE_GUC_KLV_THRESHOLDS_SET(reset_threshold_config);
+#undef reset_threshold_config
+}
+
static void pf_release_vf_config(struct xe_gt *gt, unsigned int vfid)
{
struct xe_gt_sriov_config *config = pf_pick_vf_config(gt, vfid);
@@ -1859,6 +1869,7 @@ static void pf_release_vf_config(struct xe_gt *gt, unsigned int vfid)
pf_release_config_ctxs(gt, config);
pf_release_config_dbs(gt, config);
pf_reset_config_sched(gt, config);
+ pf_reset_config_thresholds(gt, config);
}
/**
@@ -1892,6 +1903,87 @@ int xe_gt_sriov_pf_config_release(struct xe_gt *gt, unsigned int vfid, bool forc
return force ? 0 : err;
}
+static void pf_sanitize_ggtt(struct xe_ggtt_node *ggtt_region, unsigned int vfid)
+{
+ if (xe_ggtt_node_allocated(ggtt_region))
+ xe_ggtt_assign(ggtt_region, vfid);
+}
+
+static int pf_sanitize_lmem(struct xe_tile *tile, struct xe_bo *bo, long timeout)
+{
+ struct xe_migrate *m = tile->migrate;
+ struct dma_fence *fence;
+ int err;
+
+ if (!bo)
+ return 0;
+
+ xe_bo_lock(bo, false);
+ fence = xe_migrate_clear(m, bo, bo->ttm.resource, XE_MIGRATE_CLEAR_FLAG_FULL);
+ if (IS_ERR(fence)) {
+ err = PTR_ERR(fence);
+ } else if (!fence) {
+ err = -ENOMEM;
+ } else {
+ long ret = dma_fence_wait_timeout(fence, false, timeout);
+
+ err = ret > 0 ? 0 : ret < 0 ? ret : -ETIMEDOUT;
+ dma_fence_put(fence);
+ if (!err)
+ xe_gt_sriov_dbg_verbose(tile->primary_gt, "LMEM cleared in %dms\n",
+ jiffies_to_msecs(timeout - ret));
+ }
+ xe_bo_unlock(bo);
+
+ return err;
+}
+
+static int pf_sanitize_vf_resources(struct xe_gt *gt, u32 vfid, long timeout)
+{
+ struct xe_gt_sriov_config *config = pf_pick_vf_config(gt, vfid);
+ struct xe_tile *tile = gt_to_tile(gt);
+ struct xe_device *xe = gt_to_xe(gt);
+ int err = 0;
+
+ /*
+ * Only GGTT and LMEM requires to be cleared by the PF.
+ * GuC doorbell IDs and context IDs do not need any clearing.
+ */
+ if (!xe_gt_is_media_type(gt)) {
+ pf_sanitize_ggtt(config->ggtt_region, vfid);
+ if (IS_DGFX(xe))
+ err = pf_sanitize_lmem(tile, config->lmem_obj, timeout);
+ }
+
+ return err;
+}
+
+/**
+ * xe_gt_sriov_pf_config_sanitize() - Sanitize VF's resources.
+ * @gt: the &xe_gt
+ * @vfid: the VF identifier (can't be PF)
+ * @timeout: maximum timeout to wait for completion in jiffies
+ *
+ * This function can only be called on PF.
+ *
+ * Return: 0 on success or a negative error code on failure.
+ */
+int xe_gt_sriov_pf_config_sanitize(struct xe_gt *gt, unsigned int vfid, long timeout)
+{
+ int err;
+
+ xe_gt_assert(gt, vfid != PFID);
+
+ mutex_lock(xe_gt_sriov_pf_master_mutex(gt));
+ err = pf_sanitize_vf_resources(gt, vfid, timeout);
+ mutex_unlock(xe_gt_sriov_pf_master_mutex(gt));
+
+ if (unlikely(err))
+ xe_gt_sriov_notice(gt, "VF%u resource sanitizing failed (%pe)\n",
+ vfid, ERR_PTR(err));
+ return err;
+}
+
/**
* xe_gt_sriov_pf_config_push - Reprovision VF's configuration.
* @gt: the &xe_gt
@@ -2024,13 +2116,15 @@ int xe_gt_sriov_pf_config_print_ggtt(struct xe_gt *gt, struct drm_printer *p)
for (n = 1; n <= total_vfs; n++) {
config = &gt->sriov.pf.vfs[n].config;
- if (!drm_mm_node_allocated(&config->ggtt_region))
+ if (!xe_ggtt_node_allocated(config->ggtt_region))
continue;
- string_get_size(config->ggtt_region.size, 1, STRING_UNITS_2, buf, sizeof(buf));
+ string_get_size(config->ggtt_region->base.size, 1, STRING_UNITS_2,
+ buf, sizeof(buf));
drm_printf(p, "VF%u:\t%#0llx-%#llx\t(%s)\n",
- n, config->ggtt_region.start,
- config->ggtt_region.start + config->ggtt_region.size - 1, buf);
+ n, config->ggtt_region->base.start,
+ config->ggtt_region->base.start + config->ggtt_region->base.size - 1,
+ buf);
}
return 0;
@@ -2118,12 +2212,8 @@ int xe_gt_sriov_pf_config_print_dbs(struct xe_gt *gt, struct drm_printer *p)
int xe_gt_sriov_pf_config_print_available_ggtt(struct xe_gt *gt, struct drm_printer *p)
{
struct xe_ggtt *ggtt = gt_to_tile(gt)->mem.ggtt;
- const struct drm_mm *mm = &ggtt->mm;
- const struct drm_mm_node *entry;
u64 alignment = pf_get_ggtt_alignment(gt);
- u64 hole_min_start = xe_wopcm_size(gt_to_xe(gt));
- u64 hole_start, hole_end, hole_size;
- u64 spare, avail, total = 0;
+ u64 spare, avail, total;
char buf[10];
xe_gt_assert(gt, IS_SRIOV_PF(gt_to_xe(gt)));
@@ -2131,24 +2221,8 @@ int xe_gt_sriov_pf_config_print_available_ggtt(struct xe_gt *gt, struct drm_prin
mutex_lock(xe_gt_sriov_pf_master_mutex(gt));
spare = pf_get_spare_ggtt(gt);
+ total = xe_ggtt_print_holes(ggtt, alignment, p);
- mutex_lock(&ggtt->lock);
-
- drm_mm_for_each_hole(entry, mm, hole_start, hole_end) {
- hole_start = max(hole_start, hole_min_start);
- hole_start = ALIGN(hole_start, alignment);
- hole_end = ALIGN_DOWN(hole_end, alignment);
- if (hole_start >= hole_end)
- continue;
- hole_size = hole_end - hole_start;
- total += hole_size;
-
- string_get_size(hole_size, 1, STRING_UNITS_2, buf, sizeof(buf));
- drm_printf(p, "range:\t%#llx-%#llx\t(%s)\n",
- hole_start, hole_end - 1, buf);
- }
-
- mutex_unlock(&ggtt->lock);
mutex_unlock(xe_gt_sriov_pf_master_mutex(gt));
string_get_size(total, 1, STRING_UNITS_2, buf, sizeof(buf));
diff --git a/drivers/gpu/drm/xe/xe_gt_sriov_pf_config.h b/drivers/gpu/drm/xe/xe_gt_sriov_pf_config.h
index c0e6e4743dc2..42e64769f666 100644
--- a/drivers/gpu/drm/xe/xe_gt_sriov_pf_config.h
+++ b/drivers/gpu/drm/xe/xe_gt_sriov_pf_config.h
@@ -50,6 +50,7 @@ int xe_gt_sriov_pf_config_set_threshold(struct xe_gt *gt, unsigned int vfid,
enum xe_guc_klv_threshold_index index, u32 value);
int xe_gt_sriov_pf_config_set_fair(struct xe_gt *gt, unsigned int vfid, unsigned int num_vfs);
+int xe_gt_sriov_pf_config_sanitize(struct xe_gt *gt, unsigned int vfid, long timeout);
int xe_gt_sriov_pf_config_release(struct xe_gt *gt, unsigned int vfid, bool force);
int xe_gt_sriov_pf_config_push(struct xe_gt *gt, unsigned int vfid, bool refresh);
diff --git a/drivers/gpu/drm/xe/xe_gt_sriov_pf_config_types.h b/drivers/gpu/drm/xe/xe_gt_sriov_pf_config_types.h
index 7bc66656fcc7..2d3b73d78f14 100644
--- a/drivers/gpu/drm/xe/xe_gt_sriov_pf_config_types.h
+++ b/drivers/gpu/drm/xe/xe_gt_sriov_pf_config_types.h
@@ -6,8 +6,7 @@
#ifndef _XE_GT_SRIOV_PF_CONFIG_TYPES_H_
#define _XE_GT_SRIOV_PF_CONFIG_TYPES_H_
-#include <drm/drm_mm.h>
-
+#include "xe_ggtt_types.h"
#include "xe_guc_klv_thresholds_set_types.h"
struct xe_bo;
@@ -19,7 +18,7 @@ struct xe_bo;
*/
struct xe_gt_sriov_config {
/** @ggtt_region: GGTT region assigned to the VF. */
- struct drm_mm_node ggtt_region;
+ struct xe_ggtt_node *ggtt_region;
/** @lmem_obj: LMEM allocation for use by the VF. */
struct xe_bo *lmem_obj;
/** @num_ctxs: number of GuC contexts IDs. */
diff --git a/drivers/gpu/drm/xe/xe_gt_sriov_pf_control.c b/drivers/gpu/drm/xe/xe_gt_sriov_pf_control.c
index ebf06e037750..02f7328bd6ce 100644
--- a/drivers/gpu/drm/xe/xe_gt_sriov_pf_control.c
+++ b/drivers/gpu/drm/xe/xe_gt_sriov_pf_control.c
@@ -3,11 +3,17 @@
* Copyright © 2023-2024 Intel Corporation
*/
+#include <drm/drm_managed.h>
+
#include "abi/guc_actions_sriov_abi.h"
#include "xe_device.h"
#include "xe_gt.h"
+#include "xe_gt_sriov_pf_config.h"
#include "xe_gt_sriov_pf_control.h"
+#include "xe_gt_sriov_pf_helpers.h"
+#include "xe_gt_sriov_pf_monitor.h"
+#include "xe_gt_sriov_pf_service.h"
#include "xe_gt_sriov_printk.h"
#include "xe_guc_ct.h"
#include "xe_sriov.h"
@@ -41,10 +47,6 @@ static int guc_action_vf_control_cmd(struct xe_guc *guc, u32 vfid, u32 cmd)
};
int ret;
- /* XXX those two commands are now sent from the G2H handler */
- if (cmd == GUC_PF_TRIGGER_VF_FLR_START || cmd == GUC_PF_TRIGGER_VF_FLR_FINISH)
- return xe_guc_ct_send_g2h_handler(&guc->ct, request, ARRAY_SIZE(request));
-
ret = xe_guc_ct_send_block(&guc->ct, request, ARRAY_SIZE(request));
return ret > 0 ? -EPROTO : ret;
}
@@ -54,6 +56,8 @@ static int pf_send_vf_control_cmd(struct xe_gt *gt, unsigned int vfid, u32 cmd)
int err;
xe_gt_assert(gt, vfid != PFID);
+ xe_gt_sriov_dbg_verbose(gt, "sending VF%u control command %s\n",
+ vfid, control_cmd_to_string(cmd));
err = guc_action_vf_control_cmd(&gt->uc.guc, vfid, cmd);
if (unlikely(err))
@@ -88,6 +92,456 @@ static int pf_send_vf_flr_finish(struct xe_gt *gt, unsigned int vfid)
}
/**
+ * DOC: The VF state machine
+ *
+ * The simplified VF state machine could be presented as::
+ *
+ * pause--------------------------o
+ * / |
+ * / v
+ * (READY)<------------------resume-----(PAUSED)
+ * ^ \ / /
+ * | \ / /
+ * | stop---->(STOPPED)<----stop /
+ * | / /
+ * | / /
+ * o--------<-----flr /
+ * \ /
+ * o------<--------------------flr
+ *
+ * Where:
+ *
+ * * READY - represents a state in which VF is fully operable
+ * * PAUSED - represents a state in which VF activity is temporarily suspended
+ * * STOPPED - represents a state in which VF activity is definitely halted
+ * * pause - represents a request to temporarily suspend VF activity
+ * * resume - represents a request to resume VF activity
+ * * stop - represents a request to definitely halt VF activity
+ * * flr - represents a request to perform VF FLR to restore VF activity
+ *
+ * However, each state transition requires additional steps that involves
+ * communication with GuC that might fail or be interrupted by other requests::
+ *
+ * .................................WIP....
+ * : :
+ * pause--------------------->PAUSE_WIP----------------------------o
+ * / : / \ : |
+ * / : o----<---stop flr--o : |
+ * / : | \ / | : V
+ * (READY,RESUMED)<--------+------------RESUME_WIP<----+--<-----resume--(PAUSED)
+ * ^ \ \ : | | : / /
+ * | \ \ : | | : / /
+ * | \ \ : | | : / /
+ * | \ \ : o----<----------------------+--<-------stop /
+ * | \ \ : | | : /
+ * | \ \ : V | : /
+ * | \ stop----->STOP_WIP---------flr--->-----o : /
+ * | \ : | | : /
+ * | \ : | V : /
+ * | flr--------+----->----------------->FLR_WIP<-----flr
+ * | : | / ^ :
+ * | : | / | :
+ * o--------<-------:----+-----<----------------o | :
+ * : | | :
+ * :....|...........................|.....:
+ * | |
+ * V |
+ * (STOPPED)--------------------flr
+ *
+ * For details about each internal WIP state machine see:
+ *
+ * * `The VF PAUSE state machine`_
+ * * `The VF RESUME state machine`_
+ * * `The VF STOP state machine`_
+ * * `The VF FLR state machine`_
+ */
+
+#ifdef CONFIG_DRM_XE_DEBUG_SRIOV
+static const char *control_bit_to_string(enum xe_gt_sriov_control_bits bit)
+{
+ switch (bit) {
+#define CASE2STR(_X) \
+ case XE_GT_SRIOV_STATE_##_X: return #_X
+ CASE2STR(WIP);
+ CASE2STR(FLR_WIP);
+ CASE2STR(FLR_SEND_START);
+ CASE2STR(FLR_WAIT_GUC);
+ CASE2STR(FLR_GUC_DONE);
+ CASE2STR(FLR_RESET_CONFIG);
+ CASE2STR(FLR_RESET_DATA);
+ CASE2STR(FLR_RESET_MMIO);
+ CASE2STR(FLR_SEND_FINISH);
+ CASE2STR(FLR_FAILED);
+ CASE2STR(PAUSE_WIP);
+ CASE2STR(PAUSE_SEND_PAUSE);
+ CASE2STR(PAUSE_WAIT_GUC);
+ CASE2STR(PAUSE_GUC_DONE);
+ CASE2STR(PAUSE_FAILED);
+ CASE2STR(PAUSED);
+ CASE2STR(RESUME_WIP);
+ CASE2STR(RESUME_SEND_RESUME);
+ CASE2STR(RESUME_FAILED);
+ CASE2STR(RESUMED);
+ CASE2STR(STOP_WIP);
+ CASE2STR(STOP_SEND_STOP);
+ CASE2STR(STOP_FAILED);
+ CASE2STR(STOPPED);
+ CASE2STR(MISMATCH);
+#undef CASE2STR
+ default: return "?";
+ }
+}
+#endif
+
+static unsigned long pf_get_default_timeout(enum xe_gt_sriov_control_bits bit)
+{
+ switch (bit) {
+ case XE_GT_SRIOV_STATE_FLR_WAIT_GUC:
+ case XE_GT_SRIOV_STATE_PAUSE_WAIT_GUC:
+ return HZ / 2;
+ case XE_GT_SRIOV_STATE_FLR_WIP:
+ case XE_GT_SRIOV_STATE_FLR_RESET_CONFIG:
+ return 5 * HZ;
+ default:
+ return HZ;
+ }
+}
+
+static struct xe_gt_sriov_control_state *pf_pick_vf_control(struct xe_gt *gt, unsigned int vfid)
+{
+ xe_gt_assert(gt, IS_SRIOV_PF(gt_to_xe(gt)));
+ xe_gt_assert(gt, vfid <= xe_gt_sriov_pf_get_totalvfs(gt));
+
+ return &gt->sriov.pf.vfs[vfid].control;
+}
+
+static unsigned long *pf_peek_vf_state(struct xe_gt *gt, unsigned int vfid)
+{
+ struct xe_gt_sriov_control_state *cs = pf_pick_vf_control(gt, vfid);
+
+ return &cs->state;
+}
+
+static bool pf_check_vf_state(struct xe_gt *gt, unsigned int vfid,
+ enum xe_gt_sriov_control_bits bit)
+{
+ return test_bit(bit, pf_peek_vf_state(gt, vfid));
+}
+
+static void pf_dump_vf_state(struct xe_gt *gt, unsigned int vfid)
+{
+ unsigned long state = *pf_peek_vf_state(gt, vfid);
+ enum xe_gt_sriov_control_bits bit;
+
+ if (state) {
+ xe_gt_sriov_dbg_verbose(gt, "VF%u state %#lx%s%*pbl\n",
+ vfid, state, state ? " bits " : "",
+ (int)BITS_PER_LONG, &state);
+ for_each_set_bit(bit, &state, BITS_PER_LONG)
+ xe_gt_sriov_dbg_verbose(gt, "VF%u state %s(%d)\n",
+ vfid, control_bit_to_string(bit), bit);
+ } else {
+ xe_gt_sriov_dbg_verbose(gt, "VF%u state READY\n", vfid);
+ }
+}
+
+static bool pf_expect_vf_state(struct xe_gt *gt, unsigned int vfid,
+ enum xe_gt_sriov_control_bits bit)
+{
+ bool result = pf_check_vf_state(gt, vfid, bit);
+
+ if (unlikely(!result))
+ pf_dump_vf_state(gt, vfid);
+
+ return result;
+}
+
+static bool pf_expect_vf_not_state(struct xe_gt *gt, unsigned int vfid,
+ enum xe_gt_sriov_control_bits bit)
+{
+ bool result = !pf_check_vf_state(gt, vfid, bit);
+
+ if (unlikely(!result))
+ pf_dump_vf_state(gt, vfid);
+
+ return result;
+}
+
+static bool pf_enter_vf_state(struct xe_gt *gt, unsigned int vfid,
+ enum xe_gt_sriov_control_bits bit)
+{
+ if (!test_and_set_bit(bit, pf_peek_vf_state(gt, vfid))) {
+ xe_gt_sriov_dbg_verbose(gt, "VF%u state %s(%d) enter\n",
+ vfid, control_bit_to_string(bit), bit);
+ return true;
+ }
+ return false;
+}
+
+static bool pf_exit_vf_state(struct xe_gt *gt, unsigned int vfid,
+ enum xe_gt_sriov_control_bits bit)
+{
+ if (test_and_clear_bit(bit, pf_peek_vf_state(gt, vfid))) {
+ xe_gt_sriov_dbg_verbose(gt, "VF%u state %s(%d) exit\n",
+ vfid, control_bit_to_string(bit), bit);
+ return true;
+ }
+ return false;
+}
+
+static void pf_escape_vf_state(struct xe_gt *gt, unsigned int vfid,
+ enum xe_gt_sriov_control_bits bit)
+{
+ if (pf_exit_vf_state(gt, vfid, bit))
+ xe_gt_sriov_dbg_verbose(gt, "VF%u state %s(%d) escaped by %ps\n",
+ vfid, control_bit_to_string(bit), bit,
+ __builtin_return_address(0));
+}
+
+static void pf_enter_vf_mismatch(struct xe_gt *gt, unsigned int vfid)
+{
+ if (pf_enter_vf_state(gt, vfid, XE_GT_SRIOV_STATE_MISMATCH)) {
+ xe_gt_sriov_dbg(gt, "VF%u state mismatch detected by %ps\n",
+ vfid, __builtin_return_address(0));
+ pf_dump_vf_state(gt, vfid);
+ }
+}
+
+static void pf_exit_vf_mismatch(struct xe_gt *gt, unsigned int vfid)
+{
+ if (pf_exit_vf_state(gt, vfid, XE_GT_SRIOV_STATE_MISMATCH))
+ xe_gt_sriov_dbg(gt, "VF%u state mismatch cleared by %ps\n",
+ vfid, __builtin_return_address(0));
+
+ pf_exit_vf_state(gt, vfid, XE_GT_SRIOV_STATE_STOP_FAILED);
+ pf_exit_vf_state(gt, vfid, XE_GT_SRIOV_STATE_PAUSE_FAILED);
+ pf_exit_vf_state(gt, vfid, XE_GT_SRIOV_STATE_RESUME_FAILED);
+ pf_exit_vf_state(gt, vfid, XE_GT_SRIOV_STATE_FLR_FAILED);
+}
+
+#define pf_enter_vf_state_machine_bug(gt, vfid) ({ \
+ pf_enter_vf_mismatch((gt), (vfid)); \
+})
+
+static void pf_queue_control_worker(struct xe_gt *gt)
+{
+ struct xe_device *xe = gt_to_xe(gt);
+
+ xe_gt_assert(gt, IS_SRIOV_PF(xe));
+
+ queue_work(xe->sriov.wq, &gt->sriov.pf.control.worker);
+}
+
+static void pf_queue_vf(struct xe_gt *gt, unsigned int vfid)
+{
+ struct xe_gt_sriov_pf_control *pfc = &gt->sriov.pf.control;
+
+ xe_gt_assert(gt, IS_SRIOV_PF(gt_to_xe(gt)));
+
+ spin_lock(&pfc->lock);
+ list_move_tail(&gt->sriov.pf.vfs[vfid].control.link, &pfc->list);
+ spin_unlock(&pfc->lock);
+
+ pf_queue_control_worker(gt);
+}
+
+static void pf_exit_vf_flr_wip(struct xe_gt *gt, unsigned int vfid);
+static void pf_exit_vf_stop_wip(struct xe_gt *gt, unsigned int vfid);
+static void pf_exit_vf_pause_wip(struct xe_gt *gt, unsigned int vfid);
+static void pf_exit_vf_resume_wip(struct xe_gt *gt, unsigned int vfid);
+
+static bool pf_enter_vf_wip(struct xe_gt *gt, unsigned int vfid)
+{
+ if (pf_enter_vf_state(gt, vfid, XE_GT_SRIOV_STATE_WIP)) {
+ struct xe_gt_sriov_control_state *cs = pf_pick_vf_control(gt, vfid);
+
+ reinit_completion(&cs->done);
+ return true;
+ }
+ return false;
+}
+
+static void pf_exit_vf_wip(struct xe_gt *gt, unsigned int vfid)
+{
+ if (pf_exit_vf_state(gt, vfid, XE_GT_SRIOV_STATE_WIP)) {
+ struct xe_gt_sriov_control_state *cs = pf_pick_vf_control(gt, vfid);
+
+ pf_exit_vf_flr_wip(gt, vfid);
+ pf_exit_vf_stop_wip(gt, vfid);
+ pf_exit_vf_pause_wip(gt, vfid);
+ pf_exit_vf_resume_wip(gt, vfid);
+
+ complete_all(&cs->done);
+ }
+}
+
+static int pf_wait_vf_wip_done(struct xe_gt *gt, unsigned int vfid, unsigned long timeout)
+{
+ struct xe_gt_sriov_control_state *cs = pf_pick_vf_control(gt, vfid);
+
+ return wait_for_completion_timeout(&cs->done, timeout) ? 0 : -ETIMEDOUT;
+}
+
+static void pf_enter_vf_ready(struct xe_gt *gt, unsigned int vfid)
+{
+ pf_exit_vf_state(gt, vfid, XE_GT_SRIOV_STATE_PAUSED);
+ pf_exit_vf_state(gt, vfid, XE_GT_SRIOV_STATE_STOPPED);
+ pf_exit_vf_state(gt, vfid, XE_GT_SRIOV_STATE_RESUMED);
+ pf_exit_vf_mismatch(gt, vfid);
+ pf_exit_vf_wip(gt, vfid);
+}
+
+/**
+ * DOC: The VF PAUSE state machine
+ *
+ * The VF PAUSE state machine looks like::
+ *
+ * (READY,RESUMED)<-------------<---------------------o---------o
+ * | \ \
+ * pause \ \
+ * | \ \
+ * ....V...........................PAUSE_WIP........ \ \
+ * : \ : o \
+ * : \ o------<-----busy : | \
+ * : \ / / : | |
+ * : PAUSE_SEND_PAUSE ---failed--->----------o--->(PAUSE_FAILED) |
+ * : | \ : | |
+ * : acked rejected---->----------o--->(MISMATCH) /
+ * : | : /
+ * : v : /
+ * : PAUSE_WAIT_GUC : /
+ * : | : /
+ * : done : /
+ * : | : /
+ * : v : /
+ * : PAUSE_GUC_DONE o-----restart
+ * : / :
+ * : / :
+ * :....o..............o...............o...........:
+ * | | |
+ * completed flr stop
+ * | | |
+ * V .....V..... ......V.....
+ * (PAUSED) : FLR_WIP : : STOP_WIP :
+ * :.........: :..........:
+ *
+ * For the full state machine view, see `The VF state machine`_.
+ */
+
+static void pf_exit_vf_pause_wip(struct xe_gt *gt, unsigned int vfid)
+{
+ if (pf_exit_vf_state(gt, vfid, XE_GT_SRIOV_STATE_PAUSE_WIP)) {
+ pf_escape_vf_state(gt, vfid, XE_GT_SRIOV_STATE_PAUSE_SEND_PAUSE);
+ pf_escape_vf_state(gt, vfid, XE_GT_SRIOV_STATE_PAUSE_WAIT_GUC);
+ pf_escape_vf_state(gt, vfid, XE_GT_SRIOV_STATE_PAUSE_GUC_DONE);
+ }
+}
+
+static void pf_enter_vf_paused(struct xe_gt *gt, unsigned int vfid)
+{
+ if (!pf_enter_vf_state(gt, vfid, XE_GT_SRIOV_STATE_PAUSED))
+ pf_enter_vf_state_machine_bug(gt, vfid);
+
+ pf_exit_vf_state(gt, vfid, XE_GT_SRIOV_STATE_RESUMED);
+ pf_exit_vf_mismatch(gt, vfid);
+ pf_exit_vf_wip(gt, vfid);
+}
+
+static void pf_enter_vf_pause_completed(struct xe_gt *gt, unsigned int vfid)
+{
+ pf_enter_vf_paused(gt, vfid);
+}
+
+static void pf_enter_vf_pause_failed(struct xe_gt *gt, unsigned int vfid)
+{
+ pf_enter_vf_state(gt, vfid, XE_GT_SRIOV_STATE_PAUSE_FAILED);
+ pf_exit_vf_wip(gt, vfid);
+}
+
+static void pf_enter_vf_pause_rejected(struct xe_gt *gt, unsigned int vfid)
+{
+ pf_enter_vf_mismatch(gt, vfid);
+ pf_enter_vf_pause_failed(gt, vfid);
+}
+
+static bool pf_exit_vf_pause_guc_done(struct xe_gt *gt, unsigned int vfid)
+{
+ if (!pf_exit_vf_state(gt, vfid, XE_GT_SRIOV_STATE_PAUSE_GUC_DONE))
+ return false;
+
+ pf_enter_vf_pause_completed(gt, vfid);
+ return true;
+}
+
+static void pf_enter_vf_pause_guc_done(struct xe_gt *gt, unsigned int vfid)
+{
+ if (pf_enter_vf_state(gt, vfid, XE_GT_SRIOV_STATE_PAUSE_GUC_DONE))
+ pf_queue_vf(gt, vfid);
+}
+
+static void pf_enter_pause_wait_guc(struct xe_gt *gt, unsigned int vfid)
+{
+ if (!pf_enter_vf_state(gt, vfid, XE_GT_SRIOV_STATE_PAUSE_WAIT_GUC))
+ pf_enter_vf_state_machine_bug(gt, vfid);
+}
+
+static bool pf_exit_pause_wait_guc(struct xe_gt *gt, unsigned int vfid)
+{
+ return pf_exit_vf_state(gt, vfid, XE_GT_SRIOV_STATE_PAUSE_WAIT_GUC);
+}
+
+static void pf_enter_vf_pause_send_pause(struct xe_gt *gt, unsigned int vfid)
+{
+ if (!pf_enter_vf_state(gt, vfid, XE_GT_SRIOV_STATE_PAUSE_SEND_PAUSE))
+ pf_enter_vf_state_machine_bug(gt, vfid);
+
+ pf_queue_vf(gt, vfid);
+}
+
+static bool pf_exit_vf_pause_send_pause(struct xe_gt *gt, unsigned int vfid)
+{
+ int err;
+
+ if (!pf_exit_vf_state(gt, vfid, XE_GT_SRIOV_STATE_PAUSE_SEND_PAUSE))
+ return false;
+
+ /* GuC may actually send a PAUSE_DONE before we get a RESPONSE */
+ pf_enter_pause_wait_guc(gt, vfid);
+
+ err = pf_send_vf_pause(gt, vfid);
+ if (err) {
+ /* send failed, so we shouldn't expect PAUSE_DONE from GuC */
+ pf_exit_pause_wait_guc(gt, vfid);
+
+ if (err == -EBUSY)
+ pf_enter_vf_pause_send_pause(gt, vfid);
+ else if (err == -EIO)
+ pf_enter_vf_pause_rejected(gt, vfid);
+ else
+ pf_enter_vf_pause_failed(gt, vfid);
+ } else {
+ /*
+ * we have already moved to WAIT_GUC, maybe even to GUC_DONE
+ * but since GuC didn't complain, we may clear MISMATCH
+ */
+ pf_exit_vf_mismatch(gt, vfid);
+ }
+
+ return true;
+}
+
+static bool pf_enter_vf_pause_wip(struct xe_gt *gt, unsigned int vfid)
+{
+ if (pf_enter_vf_state(gt, vfid, XE_GT_SRIOV_STATE_PAUSE_WIP)) {
+ pf_enter_vf_wip(gt, vfid);
+ pf_enter_vf_pause_send_pause(gt, vfid);
+ return true;
+ }
+
+ return false;
+}
+
+/**
* xe_gt_sriov_pf_control_pause_vf - Pause a VF.
* @gt: the &xe_gt
* @vfid: the VF identifier
@@ -98,7 +552,140 @@ static int pf_send_vf_flr_finish(struct xe_gt *gt, unsigned int vfid)
*/
int xe_gt_sriov_pf_control_pause_vf(struct xe_gt *gt, unsigned int vfid)
{
- return pf_send_vf_pause(gt, vfid);
+ unsigned long timeout = pf_get_default_timeout(XE_GT_SRIOV_STATE_PAUSE_WIP);
+ int err;
+
+ if (pf_check_vf_state(gt, vfid, XE_GT_SRIOV_STATE_STOPPED)) {
+ xe_gt_sriov_dbg(gt, "VF%u is stopped!\n", vfid);
+ return -EPERM;
+ }
+
+ if (pf_check_vf_state(gt, vfid, XE_GT_SRIOV_STATE_PAUSED)) {
+ xe_gt_sriov_dbg(gt, "VF%u was already paused!\n", vfid);
+ return -ESTALE;
+ }
+
+ if (!pf_enter_vf_pause_wip(gt, vfid)) {
+ xe_gt_sriov_dbg(gt, "VF%u pause already in progress!\n", vfid);
+ return -EALREADY;
+ }
+
+ err = pf_wait_vf_wip_done(gt, vfid, timeout);
+ if (err) {
+ xe_gt_sriov_dbg(gt, "VF%u pause didn't finish in %u ms (%pe)\n",
+ vfid, jiffies_to_msecs(timeout), ERR_PTR(err));
+ return err;
+ }
+
+ if (pf_expect_vf_state(gt, vfid, XE_GT_SRIOV_STATE_PAUSED)) {
+ xe_gt_sriov_info(gt, "VF%u paused!\n", vfid);
+ return 0;
+ }
+
+ if (pf_check_vf_state(gt, vfid, XE_GT_SRIOV_STATE_PAUSE_FAILED)) {
+ xe_gt_sriov_dbg(gt, "VF%u pause failed!\n", vfid);
+ return -EIO;
+ }
+
+ xe_gt_sriov_dbg(gt, "VF%u pause was canceled!\n", vfid);
+ return -ECANCELED;
+}
+
+/**
+ * DOC: The VF RESUME state machine
+ *
+ * The VF RESUME state machine looks like::
+ *
+ * (PAUSED)<-----------------<------------------------o
+ * | \
+ * resume \
+ * | \
+ * ....V............................RESUME_WIP...... \
+ * : \ : o
+ * : \ o-------<-----busy : |
+ * : \ / / : |
+ * : RESUME_SEND_RESUME ---failed--->--------o--->(RESUME_FAILED)
+ * : / \ : |
+ * : acked rejected---->---------o--->(MISMATCH)
+ * : / :
+ * :....o..............o...............o.....o.....:
+ * | | | \
+ * completed flr stop restart-->(READY)
+ * | | |
+ * V .....V..... ......V.....
+ * (RESUMED) : FLR_WIP : : STOP_WIP :
+ * :.........: :..........:
+ *
+ * For the full state machine view, see `The VF state machine`_.
+ */
+
+static void pf_exit_vf_resume_wip(struct xe_gt *gt, unsigned int vfid)
+{
+ if (pf_exit_vf_state(gt, vfid, XE_GT_SRIOV_STATE_RESUME_WIP))
+ pf_escape_vf_state(gt, vfid, XE_GT_SRIOV_STATE_RESUME_SEND_RESUME);
+}
+
+static void pf_enter_vf_resumed(struct xe_gt *gt, unsigned int vfid)
+{
+ pf_enter_vf_state(gt, vfid, XE_GT_SRIOV_STATE_RESUMED);
+ pf_exit_vf_state(gt, vfid, XE_GT_SRIOV_STATE_PAUSED);
+ pf_exit_vf_mismatch(gt, vfid);
+ pf_exit_vf_wip(gt, vfid);
+}
+
+static void pf_enter_vf_resume_completed(struct xe_gt *gt, unsigned int vfid)
+{
+ pf_enter_vf_resumed(gt, vfid);
+}
+
+static void pf_enter_vf_resume_failed(struct xe_gt *gt, unsigned int vfid)
+{
+ pf_enter_vf_state(gt, vfid, XE_GT_SRIOV_STATE_RESUME_FAILED);
+ pf_exit_vf_wip(gt, vfid);
+}
+
+static void pf_enter_vf_resume_rejected(struct xe_gt *gt, unsigned int vfid)
+{
+ pf_enter_vf_mismatch(gt, vfid);
+ pf_enter_vf_resume_failed(gt, vfid);
+}
+
+static void pf_enter_vf_resume_send_resume(struct xe_gt *gt, unsigned int vfid)
+{
+ if (!pf_enter_vf_state(gt, vfid, XE_GT_SRIOV_STATE_RESUME_SEND_RESUME))
+ pf_enter_vf_state_machine_bug(gt, vfid);
+
+ pf_queue_vf(gt, vfid);
+}
+
+static bool pf_exit_vf_resume_send_resume(struct xe_gt *gt, unsigned int vfid)
+{
+ int err;
+
+ if (!pf_exit_vf_state(gt, vfid, XE_GT_SRIOV_STATE_RESUME_SEND_RESUME))
+ return false;
+
+ err = pf_send_vf_resume(gt, vfid);
+ if (err == -EBUSY)
+ pf_enter_vf_resume_send_resume(gt, vfid);
+ else if (err == -EIO)
+ pf_enter_vf_resume_rejected(gt, vfid);
+ else if (err)
+ pf_enter_vf_resume_failed(gt, vfid);
+ else
+ pf_enter_vf_resume_completed(gt, vfid);
+ return true;
+}
+
+static bool pf_enter_vf_resume_wip(struct xe_gt *gt, unsigned int vfid)
+{
+ if (pf_enter_vf_state(gt, vfid, XE_GT_SRIOV_STATE_RESUME_WIP)) {
+ pf_enter_vf_wip(gt, vfid);
+ pf_enter_vf_resume_send_resume(gt, vfid);
+ return true;
+ }
+
+ return false;
}
/**
@@ -112,7 +699,134 @@ int xe_gt_sriov_pf_control_pause_vf(struct xe_gt *gt, unsigned int vfid)
*/
int xe_gt_sriov_pf_control_resume_vf(struct xe_gt *gt, unsigned int vfid)
{
- return pf_send_vf_resume(gt, vfid);
+ unsigned long timeout = pf_get_default_timeout(XE_GT_SRIOV_STATE_RESUME_WIP);
+ int err;
+
+ if (!pf_check_vf_state(gt, vfid, XE_GT_SRIOV_STATE_PAUSED)) {
+ xe_gt_sriov_dbg(gt, "VF%u is not paused!\n", vfid);
+ return -EPERM;
+ }
+
+ if (!pf_enter_vf_resume_wip(gt, vfid)) {
+ xe_gt_sriov_dbg(gt, "VF%u resume already in progress!\n", vfid);
+ return -EALREADY;
+ }
+
+ err = pf_wait_vf_wip_done(gt, vfid, timeout);
+ if (err)
+ return err;
+
+ if (pf_expect_vf_state(gt, vfid, XE_GT_SRIOV_STATE_RESUMED)) {
+ xe_gt_sriov_info(gt, "VF%u resumed!\n", vfid);
+ return 0;
+ }
+
+ if (pf_check_vf_state(gt, vfid, XE_GT_SRIOV_STATE_RESUME_FAILED)) {
+ xe_gt_sriov_dbg(gt, "VF%u resume failed!\n", vfid);
+ return -EIO;
+ }
+
+ xe_gt_sriov_dbg(gt, "VF%u resume was canceled!\n", vfid);
+ return -ECANCELED;
+}
+
+/**
+ * DOC: The VF STOP state machine
+ *
+ * The VF STOP state machine looks like::
+ *
+ * (READY,PAUSED,RESUMED)<-------<--------------------o
+ * | \
+ * stop \
+ * | \
+ * ....V..............................STOP_WIP...... \
+ * : \ : o
+ * : \ o----<----busy : |
+ * : \ / / : |
+ * : STOP_SEND_STOP--------failed--->--------o--->(STOP_FAILED)
+ * : / \ : |
+ * : acked rejected-------->--------o--->(MISMATCH)
+ * : / :
+ * :....o..............o...............o...........:
+ * | | |
+ * completed flr restart
+ * | | |
+ * V .....V..... V
+ * (STOPPED) : FLR_WIP : (READY)
+ * :.........:
+ *
+ * For the full state machine view, see `The VF state machine`_.
+ */
+
+static void pf_exit_vf_stop_wip(struct xe_gt *gt, unsigned int vfid)
+{
+ if (pf_exit_vf_state(gt, vfid, XE_GT_SRIOV_STATE_STOP_WIP))
+ pf_escape_vf_state(gt, vfid, XE_GT_SRIOV_STATE_STOP_SEND_STOP);
+}
+
+static void pf_enter_vf_stopped(struct xe_gt *gt, unsigned int vfid)
+{
+ if (!pf_enter_vf_state(gt, vfid, XE_GT_SRIOV_STATE_STOPPED))
+ pf_enter_vf_state_machine_bug(gt, vfid);
+
+ pf_exit_vf_state(gt, vfid, XE_GT_SRIOV_STATE_RESUMED);
+ pf_exit_vf_state(gt, vfid, XE_GT_SRIOV_STATE_PAUSED);
+ pf_exit_vf_mismatch(gt, vfid);
+ pf_exit_vf_wip(gt, vfid);
+}
+
+static void pf_enter_vf_stop_completed(struct xe_gt *gt, unsigned int vfid)
+{
+ pf_enter_vf_stopped(gt, vfid);
+}
+
+static void pf_enter_vf_stop_failed(struct xe_gt *gt, unsigned int vfid)
+{
+ pf_enter_vf_state(gt, vfid, XE_GT_SRIOV_STATE_STOP_FAILED);
+ pf_exit_vf_wip(gt, vfid);
+}
+
+static void pf_enter_vf_stop_rejected(struct xe_gt *gt, unsigned int vfid)
+{
+ pf_enter_vf_mismatch(gt, vfid);
+ pf_enter_vf_stop_failed(gt, vfid);
+}
+
+static void pf_enter_vf_stop_send_stop(struct xe_gt *gt, unsigned int vfid)
+{
+ if (!pf_enter_vf_state(gt, vfid, XE_GT_SRIOV_STATE_STOP_SEND_STOP))
+ pf_enter_vf_state_machine_bug(gt, vfid);
+
+ pf_queue_vf(gt, vfid);
+}
+
+static bool pf_exit_vf_stop_send_stop(struct xe_gt *gt, unsigned int vfid)
+{
+ int err;
+
+ if (!pf_exit_vf_state(gt, vfid, XE_GT_SRIOV_STATE_STOP_SEND_STOP))
+ return false;
+
+ err = pf_send_vf_stop(gt, vfid);
+ if (err == -EBUSY)
+ pf_enter_vf_stop_send_stop(gt, vfid);
+ else if (err == -EIO)
+ pf_enter_vf_stop_rejected(gt, vfid);
+ else if (err)
+ pf_enter_vf_stop_failed(gt, vfid);
+ else
+ pf_enter_vf_stop_completed(gt, vfid);
+ return true;
+}
+
+static bool pf_enter_vf_stop_wip(struct xe_gt *gt, unsigned int vfid)
+{
+ if (pf_enter_vf_state(gt, vfid, XE_GT_SRIOV_STATE_STOP_WIP)) {
+ pf_enter_vf_wip(gt, vfid);
+ pf_enter_vf_stop_send_stop(gt, vfid);
+ return true;
+ }
+ return false;
}
/**
@@ -126,7 +840,280 @@ int xe_gt_sriov_pf_control_resume_vf(struct xe_gt *gt, unsigned int vfid)
*/
int xe_gt_sriov_pf_control_stop_vf(struct xe_gt *gt, unsigned int vfid)
{
- return pf_send_vf_stop(gt, vfid);
+ unsigned long timeout = pf_get_default_timeout(XE_GT_SRIOV_STATE_STOP_WIP);
+ int err;
+
+ if (pf_check_vf_state(gt, vfid, XE_GT_SRIOV_STATE_STOPPED)) {
+ xe_gt_sriov_dbg(gt, "VF%u was already stopped!\n", vfid);
+ return -ESTALE;
+ }
+
+ if (!pf_enter_vf_stop_wip(gt, vfid)) {
+ xe_gt_sriov_dbg(gt, "VF%u stop already in progress!\n", vfid);
+ return -EALREADY;
+ }
+
+ err = pf_wait_vf_wip_done(gt, vfid, timeout);
+ if (err)
+ return err;
+
+ if (pf_expect_vf_state(gt, vfid, XE_GT_SRIOV_STATE_STOPPED)) {
+ xe_gt_sriov_info(gt, "VF%u stopped!\n", vfid);
+ return 0;
+ }
+
+ if (pf_check_vf_state(gt, vfid, XE_GT_SRIOV_STATE_STOP_FAILED)) {
+ xe_gt_sriov_dbg(gt, "VF%u stop failed!\n", vfid);
+ return -EIO;
+ }
+
+ xe_gt_sriov_dbg(gt, "VF%u stop was canceled!\n", vfid);
+ return -ECANCELED;
+}
+
+/**
+ * DOC: The VF FLR state machine
+ *
+ * The VF FLR state machine looks like::
+ *
+ * (READY,PAUSED,STOPPED)<------------<--------------o
+ * | \
+ * flr \
+ * | \
+ * ....V..........................FLR_WIP........... \
+ * : \ : \
+ * : \ o----<----busy : |
+ * : \ / / : |
+ * : FLR_SEND_START---failed----->-----------o--->(FLR_FAILED)<---o
+ * : | \ : | |
+ * : acked rejected----->-----------o--->(MISMATCH) |
+ * : | : ^ |
+ * : v : | |
+ * : FLR_WAIT_GUC : | |
+ * : | : | |
+ * : done : | |
+ * : | : | |
+ * : v : | |
+ * : FLR_GUC_DONE : | |
+ * : | : | |
+ * : FLR_RESET_CONFIG---failed--->-----------o--------+-----------o
+ * : | : | |
+ * : FLR_RESET_DATA : | |
+ * : | : | |
+ * : FLR_RESET_MMIO : | |
+ * : | : | |
+ * : | o----<----busy : | |
+ * : |/ / : | |
+ * : FLR_SEND_FINISH----failed--->-----------o--------+-----------o
+ * : / \ : |
+ * : acked rejected----->-----------o--------o
+ * : / :
+ * :....o..............................o...........:
+ * | |
+ * completed restart
+ * | /
+ * V /
+ * (READY)<----------<------------o
+ *
+ * For the full state machine view, see `The VF state machine`_.
+ */
+
+static void pf_enter_vf_flr_send_start(struct xe_gt *gt, unsigned int vfid)
+{
+ if (!pf_enter_vf_state(gt, vfid, XE_GT_SRIOV_STATE_FLR_SEND_START))
+ pf_enter_vf_state_machine_bug(gt, vfid);
+
+ pf_queue_vf(gt, vfid);
+}
+
+static void pf_enter_vf_flr_wip(struct xe_gt *gt, unsigned int vfid)
+{
+ if (!pf_enter_vf_state(gt, vfid, XE_GT_SRIOV_STATE_FLR_WIP)) {
+ xe_gt_sriov_dbg(gt, "VF%u FLR is already in progress\n", vfid);
+ return;
+ }
+
+ pf_enter_vf_wip(gt, vfid);
+ pf_enter_vf_flr_send_start(gt, vfid);
+}
+
+static void pf_exit_vf_flr_wip(struct xe_gt *gt, unsigned int vfid)
+{
+ if (pf_exit_vf_state(gt, vfid, XE_GT_SRIOV_STATE_FLR_WIP)) {
+ pf_escape_vf_state(gt, vfid, XE_GT_SRIOV_STATE_FLR_SEND_FINISH);
+ pf_escape_vf_state(gt, vfid, XE_GT_SRIOV_STATE_FLR_RESET_MMIO);
+ pf_escape_vf_state(gt, vfid, XE_GT_SRIOV_STATE_FLR_RESET_DATA);
+ pf_escape_vf_state(gt, vfid, XE_GT_SRIOV_STATE_FLR_RESET_CONFIG);
+ pf_escape_vf_state(gt, vfid, XE_GT_SRIOV_STATE_FLR_GUC_DONE);
+ pf_escape_vf_state(gt, vfid, XE_GT_SRIOV_STATE_FLR_WAIT_GUC);
+ pf_escape_vf_state(gt, vfid, XE_GT_SRIOV_STATE_FLR_SEND_START);
+ }
+}
+
+static void pf_enter_vf_flr_completed(struct xe_gt *gt, unsigned int vfid)
+{
+ pf_enter_vf_ready(gt, vfid);
+}
+
+static void pf_enter_vf_flr_failed(struct xe_gt *gt, unsigned int vfid)
+{
+ if (pf_enter_vf_state(gt, vfid, XE_GT_SRIOV_STATE_FLR_FAILED))
+ xe_gt_sriov_notice(gt, "VF%u FLR failed!\n", vfid);
+ pf_exit_vf_wip(gt, vfid);
+}
+
+static void pf_enter_vf_flr_rejected(struct xe_gt *gt, unsigned int vfid)
+{
+ pf_enter_vf_mismatch(gt, vfid);
+ pf_enter_vf_flr_failed(gt, vfid);
+}
+
+static void pf_enter_vf_flr_send_finish(struct xe_gt *gt, unsigned int vfid)
+{
+ if (!pf_enter_vf_state(gt, vfid, XE_GT_SRIOV_STATE_FLR_SEND_FINISH))
+ pf_enter_vf_state_machine_bug(gt, vfid);
+
+ pf_queue_vf(gt, vfid);
+}
+
+static bool pf_exit_vf_flr_send_finish(struct xe_gt *gt, unsigned int vfid)
+{
+ int err;
+
+ if (!pf_exit_vf_state(gt, vfid, XE_GT_SRIOV_STATE_FLR_SEND_FINISH))
+ return false;
+
+ err = pf_send_vf_flr_finish(gt, vfid);
+ if (err == -EBUSY)
+ pf_enter_vf_flr_send_finish(gt, vfid);
+ else if (err == -EIO)
+ pf_enter_vf_flr_rejected(gt, vfid);
+ else if (err)
+ pf_enter_vf_flr_failed(gt, vfid);
+ else
+ pf_enter_vf_flr_completed(gt, vfid);
+ return true;
+}
+
+static void pf_enter_vf_flr_reset_mmio(struct xe_gt *gt, unsigned int vfid)
+{
+ if (!pf_enter_vf_state(gt, vfid, XE_GT_SRIOV_STATE_FLR_RESET_MMIO))
+ pf_enter_vf_state_machine_bug(gt, vfid);
+
+ pf_queue_vf(gt, vfid);
+}
+
+static bool pf_exit_vf_flr_reset_mmio(struct xe_gt *gt, unsigned int vfid)
+{
+ if (!pf_exit_vf_state(gt, vfid, XE_GT_SRIOV_STATE_FLR_RESET_MMIO))
+ return false;
+
+ /* XXX: placeholder */
+
+ pf_enter_vf_flr_send_finish(gt, vfid);
+ return true;
+}
+
+static void pf_enter_vf_flr_reset_data(struct xe_gt *gt, unsigned int vfid)
+{
+ if (!pf_enter_vf_state(gt, vfid, XE_GT_SRIOV_STATE_FLR_RESET_DATA))
+ pf_enter_vf_state_machine_bug(gt, vfid);
+
+ pf_queue_vf(gt, vfid);
+}
+
+static bool pf_exit_vf_flr_reset_data(struct xe_gt *gt, unsigned int vfid)
+{
+ if (!pf_exit_vf_state(gt, vfid, XE_GT_SRIOV_STATE_FLR_RESET_DATA))
+ return false;
+
+ xe_gt_sriov_pf_service_reset(gt, vfid);
+ xe_gt_sriov_pf_monitor_flr(gt, vfid);
+
+ pf_enter_vf_flr_reset_mmio(gt, vfid);
+ return true;
+}
+
+static void pf_enter_vf_flr_reset_config(struct xe_gt *gt, unsigned int vfid)
+{
+ if (!pf_enter_vf_state(gt, vfid, XE_GT_SRIOV_STATE_FLR_RESET_CONFIG))
+ pf_enter_vf_state_machine_bug(gt, vfid);
+
+ pf_queue_vf(gt, vfid);
+}
+
+static bool pf_exit_vf_flr_reset_config(struct xe_gt *gt, unsigned int vfid)
+{
+ unsigned long timeout = pf_get_default_timeout(XE_GT_SRIOV_STATE_FLR_RESET_CONFIG);
+ int err;
+
+ if (!pf_exit_vf_state(gt, vfid, XE_GT_SRIOV_STATE_FLR_RESET_CONFIG))
+ return false;
+
+ err = xe_gt_sriov_pf_config_sanitize(gt, vfid, timeout);
+ if (err)
+ pf_enter_vf_flr_failed(gt, vfid);
+ else
+ pf_enter_vf_flr_reset_data(gt, vfid);
+ return true;
+}
+
+static void pf_enter_vf_flr_wait_guc(struct xe_gt *gt, unsigned int vfid)
+{
+ if (!pf_enter_vf_state(gt, vfid, XE_GT_SRIOV_STATE_FLR_WAIT_GUC))
+ pf_enter_vf_state_machine_bug(gt, vfid);
+}
+
+static bool pf_exit_vf_flr_wait_guc(struct xe_gt *gt, unsigned int vfid)
+{
+ return pf_exit_vf_state(gt, vfid, XE_GT_SRIOV_STATE_FLR_WAIT_GUC);
+}
+
+static bool pf_exit_vf_flr_send_start(struct xe_gt *gt, unsigned int vfid)
+{
+ int err;
+
+ if (!pf_exit_vf_state(gt, vfid, XE_GT_SRIOV_STATE_FLR_SEND_START))
+ return false;
+
+ /* GuC may actually send a FLR_DONE before we get a RESPONSE */
+ pf_enter_vf_flr_wait_guc(gt, vfid);
+
+ err = pf_send_vf_flr_start(gt, vfid);
+ if (err) {
+ /* send failed, so we shouldn't expect FLR_DONE from GuC */
+ pf_exit_vf_flr_wait_guc(gt, vfid);
+
+ if (err == -EBUSY)
+ pf_enter_vf_flr_send_start(gt, vfid);
+ else if (err == -EIO)
+ pf_enter_vf_flr_rejected(gt, vfid);
+ else
+ pf_enter_vf_flr_failed(gt, vfid);
+ } else {
+ /*
+ * we have already moved to WAIT_GUC, maybe even to GUC_DONE
+ * but since GuC didn't complain, we may clear MISMATCH
+ */
+ pf_exit_vf_mismatch(gt, vfid);
+ }
+
+ return true;
+}
+
+static bool pf_exit_vf_flr_guc_done(struct xe_gt *gt, unsigned int vfid)
+{
+ if (!pf_exit_vf_state(gt, vfid, XE_GT_SRIOV_STATE_FLR_GUC_DONE))
+ return false;
+
+ pf_enter_vf_flr_reset_config(gt, vfid);
+ return true;
+}
+
+static void pf_enter_vf_flr_guc_done(struct xe_gt *gt, unsigned int vfid)
+{
+ if (pf_enter_vf_state(gt, vfid, XE_GT_SRIOV_STATE_FLR_GUC_DONE))
+ pf_queue_vf(gt, vfid);
}
/**
@@ -140,46 +1127,56 @@ int xe_gt_sriov_pf_control_stop_vf(struct xe_gt *gt, unsigned int vfid)
*/
int xe_gt_sriov_pf_control_trigger_flr(struct xe_gt *gt, unsigned int vfid)
{
+ unsigned long timeout = pf_get_default_timeout(XE_GT_SRIOV_STATE_FLR_WIP);
int err;
- /* XXX pf_send_vf_flr_start() expects ct->lock */
- mutex_lock(&gt->uc.guc.ct.lock);
- err = pf_send_vf_flr_start(gt, vfid);
- mutex_unlock(&gt->uc.guc.ct.lock);
+ pf_enter_vf_flr_wip(gt, vfid);
- return err;
+ err = pf_wait_vf_wip_done(gt, vfid, timeout);
+ if (err) {
+ xe_gt_sriov_notice(gt, "VF%u FLR didn't finish in %u ms (%pe)\n",
+ vfid, jiffies_to_msecs(timeout), ERR_PTR(err));
+ return err;
+ }
+
+ if (!pf_expect_vf_not_state(gt, vfid, XE_GT_SRIOV_STATE_FLR_FAILED))
+ return -EIO;
+
+ return 0;
}
/**
* DOC: The VF FLR Flow with GuC
*
- * PF GUC PCI
- * ========================================================
- * | | |
- * (1) | [ ] <----- FLR --|
- * | [ ] :
- * (2) [ ] <-------- NOTIFY FLR --[ ]
- * [ ] |
- * (3) [ ] |
- * [ ] |
- * [ ]-- START FLR ---------> [ ]
- * | [ ]
- * (4) | [ ]
- * | [ ]
- * [ ] <--------- FLR DONE -- [ ]
- * [ ] |
- * (5) [ ] |
- * [ ] |
- * [ ]-- FINISH FLR --------> [ ]
- * | |
- *
- * Step 1: PCI HW generates interrupt to the GuC about VF FLR
- * Step 2: GuC FW sends G2H notification to the PF about VF FLR
- * Step 2a: on some platforms G2H is only received from root GuC
- * Step 3: PF sends H2G request to the GuC to start VF FLR sequence
- * Step 3a: on some platforms PF must send H2G to all other GuCs
- * Step 4: GuC FW performs VF FLR cleanups and notifies the PF when done
- * Step 5: PF performs VF FLR cleanups and notifies the GuC FW when finished
+ * The VF FLR flow includes several steps::
+ *
+ * PF GUC PCI
+ * ========================================================
+ * | | |
+ * (1) | [ ] <----- FLR --|
+ * | [ ] :
+ * (2) [ ] <-------- NOTIFY FLR --[ ]
+ * [ ] |
+ * (3) [ ] |
+ * [ ] |
+ * [ ]-- START FLR ---------> [ ]
+ * | [ ]
+ * (4) | [ ]
+ * | [ ]
+ * [ ] <--------- FLR DONE -- [ ]
+ * [ ] |
+ * (5) [ ] |
+ * [ ] |
+ * [ ]-- FINISH FLR --------> [ ]
+ * | |
+ *
+ * * Step 1: PCI HW generates interrupt to the GuC about VF FLR
+ * * Step 2: GuC FW sends G2H notification to the PF about VF FLR
+ * * Step 2a: on some platforms G2H is only received from root GuC
+ * * Step 3: PF sends H2G request to the GuC to start VF FLR sequence
+ * * Step 3a: on some platforms PF must send H2G to all other GuCs
+ * * Step 4: GuC FW performs VF FLR cleanups and notifies the PF when done
+ * * Step 5: PF performs VF FLR cleanups and notifies the GuC FW when finished
*/
static bool needs_dispatch_flr(struct xe_device *xe)
@@ -197,19 +1194,41 @@ static void pf_handle_vf_flr(struct xe_gt *gt, u32 vfid)
if (needs_dispatch_flr(xe)) {
for_each_gt(gtit, xe, gtid)
- pf_send_vf_flr_start(gtit, vfid);
+ pf_enter_vf_flr_wip(gtit, vfid);
} else {
- pf_send_vf_flr_start(gt, vfid);
+ pf_enter_vf_flr_wip(gt, vfid);
}
}
static void pf_handle_vf_flr_done(struct xe_gt *gt, u32 vfid)
{
- pf_send_vf_flr_finish(gt, vfid);
+ if (!pf_exit_vf_flr_wait_guc(gt, vfid)) {
+ xe_gt_sriov_dbg(gt, "Received out of order 'VF%u FLR done'\n", vfid);
+ pf_enter_vf_mismatch(gt, vfid);
+ return;
+ }
+
+ pf_enter_vf_flr_guc_done(gt, vfid);
+}
+
+static void pf_handle_vf_pause_done(struct xe_gt *gt, u32 vfid)
+{
+ if (!pf_exit_pause_wait_guc(gt, vfid)) {
+ xe_gt_sriov_dbg(gt, "Received out of order 'VF%u PAUSE done'\n", vfid);
+ pf_enter_vf_mismatch(gt, vfid);
+ return;
+ }
+
+ pf_enter_vf_pause_guc_done(gt, vfid);
}
static int pf_handle_vf_event(struct xe_gt *gt, u32 vfid, u32 eventid)
{
+ xe_gt_sriov_dbg_verbose(gt, "received VF%u event %#x\n", vfid, eventid);
+
+ if (vfid > xe_gt_sriov_pf_get_totalvfs(gt))
+ return -EPROTO;
+
switch (eventid) {
case GUC_PF_NOTIFY_VF_FLR:
pf_handle_vf_flr(gt, vfid);
@@ -218,6 +1237,7 @@ static int pf_handle_vf_event(struct xe_gt *gt, u32 vfid, u32 eventid)
pf_handle_vf_flr_done(gt, vfid);
break;
case GUC_PF_NOTIFY_VF_PAUSE_DONE:
+ pf_handle_vf_pause_done(gt, vfid);
break;
case GUC_PF_NOTIFY_VF_FIXUP_DONE:
break;
@@ -276,3 +1296,159 @@ int xe_gt_sriov_pf_control_process_guc2pf(struct xe_gt *gt, const u32 *msg, u32
return vfid ? pf_handle_vf_event(gt, vfid, eventid) : pf_handle_pf_event(gt, eventid);
}
+
+static bool pf_process_vf_state_machine(struct xe_gt *gt, unsigned int vfid)
+{
+ if (pf_exit_vf_flr_send_start(gt, vfid))
+ return true;
+
+ if (pf_check_vf_state(gt, vfid, XE_GT_SRIOV_STATE_FLR_WAIT_GUC)) {
+ xe_gt_sriov_dbg_verbose(gt, "VF%u in %s\n", vfid,
+ control_bit_to_string(XE_GT_SRIOV_STATE_FLR_WAIT_GUC));
+ return false;
+ }
+
+ if (pf_exit_vf_flr_guc_done(gt, vfid))
+ return true;
+
+ if (pf_exit_vf_flr_reset_config(gt, vfid))
+ return true;
+
+ if (pf_exit_vf_flr_reset_data(gt, vfid))
+ return true;
+
+ if (pf_exit_vf_flr_reset_mmio(gt, vfid))
+ return true;
+
+ if (pf_exit_vf_flr_send_finish(gt, vfid))
+ return true;
+
+ if (pf_exit_vf_stop_send_stop(gt, vfid))
+ return true;
+
+ if (pf_exit_vf_pause_send_pause(gt, vfid))
+ return true;
+
+ if (pf_check_vf_state(gt, vfid, XE_GT_SRIOV_STATE_PAUSE_WAIT_GUC)) {
+ xe_gt_sriov_dbg_verbose(gt, "VF%u in %s\n", vfid,
+ control_bit_to_string(XE_GT_SRIOV_STATE_PAUSE_WAIT_GUC));
+ return true;
+ }
+
+ if (pf_exit_vf_pause_guc_done(gt, vfid))
+ return true;
+
+ if (pf_exit_vf_resume_send_resume(gt, vfid))
+ return true;
+
+ return false;
+}
+
+static unsigned int pf_control_state_index(struct xe_gt *gt,
+ struct xe_gt_sriov_control_state *cs)
+{
+ return container_of(cs, struct xe_gt_sriov_metadata, control) - gt->sriov.pf.vfs;
+}
+
+static void pf_worker_find_work(struct xe_gt *gt)
+{
+ struct xe_gt_sriov_pf_control *pfc = &gt->sriov.pf.control;
+ struct xe_gt_sriov_control_state *cs;
+ unsigned int vfid;
+ bool empty;
+ bool more;
+
+ spin_lock(&pfc->lock);
+ cs = list_first_entry_or_null(&pfc->list, struct xe_gt_sriov_control_state, link);
+ if (cs)
+ list_del_init(&cs->link);
+ empty = list_empty(&pfc->list);
+ spin_unlock(&pfc->lock);
+
+ if (!cs)
+ return;
+
+ /* VF metadata structures are indexed by the VFID */
+ vfid = pf_control_state_index(gt, cs);
+ xe_gt_assert(gt, vfid <= xe_gt_sriov_pf_get_totalvfs(gt));
+
+ more = pf_process_vf_state_machine(gt, vfid);
+ if (more)
+ pf_queue_vf(gt, vfid);
+ else if (!empty)
+ pf_queue_control_worker(gt);
+}
+
+static void control_worker_func(struct work_struct *w)
+{
+ struct xe_gt *gt = container_of(w, struct xe_gt, sriov.pf.control.worker);
+
+ xe_gt_assert(gt, IS_SRIOV_PF(gt_to_xe(gt)));
+ pf_worker_find_work(gt);
+}
+
+static void pf_stop_worker(struct xe_gt *gt)
+{
+ xe_gt_assert(gt, IS_SRIOV_PF(gt_to_xe(gt)));
+ cancel_work_sync(&gt->sriov.pf.control.worker);
+}
+
+static void control_fini_action(struct drm_device *dev, void *data)
+{
+ struct xe_gt *gt = data;
+
+ pf_stop_worker(gt);
+}
+
+/**
+ * xe_gt_sriov_pf_control_init() - Initialize PF's control data.
+ * @gt: the &xe_gt
+ *
+ * This function is for PF only.
+ *
+ * Return: 0 on success or a negative error code on failure.
+ */
+int xe_gt_sriov_pf_control_init(struct xe_gt *gt)
+{
+ struct xe_device *xe = gt_to_xe(gt);
+ unsigned int n, totalvfs;
+
+ xe_gt_assert(gt, IS_SRIOV_PF(xe));
+
+ totalvfs = xe_sriov_pf_get_totalvfs(xe);
+ for (n = 0; n <= totalvfs; n++) {
+ struct xe_gt_sriov_control_state *cs = pf_pick_vf_control(gt, n);
+
+ init_completion(&cs->done);
+ INIT_LIST_HEAD(&cs->link);
+ }
+
+ spin_lock_init(&gt->sriov.pf.control.lock);
+ INIT_LIST_HEAD(&gt->sriov.pf.control.list);
+ INIT_WORK(&gt->sriov.pf.control.worker, control_worker_func);
+
+ return drmm_add_action_or_reset(&xe->drm, control_fini_action, gt);
+}
+
+/**
+ * xe_gt_sriov_pf_control_restart() - Restart SR-IOV control data after a GT reset.
+ * @gt: the &xe_gt
+ *
+ * Any per-VF status maintained by the PF or any ongoing VF control activity
+ * performed by the PF must be reset or cancelled when the GT is reset.
+ *
+ * This function is for PF only.
+ */
+void xe_gt_sriov_pf_control_restart(struct xe_gt *gt)
+{
+ struct xe_device *xe = gt_to_xe(gt);
+ unsigned int n, totalvfs;
+
+ xe_gt_assert(gt, IS_SRIOV_PF(xe));
+
+ pf_stop_worker(gt);
+
+ totalvfs = xe_sriov_pf_get_totalvfs(xe);
+ for (n = 1; n <= totalvfs; n++)
+ pf_enter_vf_ready(gt, n);
+}
diff --git a/drivers/gpu/drm/xe/xe_gt_sriov_pf_control.h b/drivers/gpu/drm/xe/xe_gt_sriov_pf_control.h
index 405d1586f991..c85e64f099cc 100644
--- a/drivers/gpu/drm/xe/xe_gt_sriov_pf_control.h
+++ b/drivers/gpu/drm/xe/xe_gt_sriov_pf_control.h
@@ -11,6 +11,9 @@
struct xe_gt;
+int xe_gt_sriov_pf_control_init(struct xe_gt *gt);
+void xe_gt_sriov_pf_control_restart(struct xe_gt *gt);
+
int xe_gt_sriov_pf_control_pause_vf(struct xe_gt *gt, unsigned int vfid);
int xe_gt_sriov_pf_control_resume_vf(struct xe_gt *gt, unsigned int vfid);
int xe_gt_sriov_pf_control_stop_vf(struct xe_gt *gt, unsigned int vfid);
diff --git a/drivers/gpu/drm/xe/xe_gt_sriov_pf_control_types.h b/drivers/gpu/drm/xe/xe_gt_sriov_pf_control_types.h
new file mode 100644
index 000000000000..11830aafea45
--- /dev/null
+++ b/drivers/gpu/drm/xe/xe_gt_sriov_pf_control_types.h
@@ -0,0 +1,107 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2024 Intel Corporation
+ */
+
+#ifndef _XE_GT_SRIOV_PF_CONTROL_TYPES_H_
+#define _XE_GT_SRIOV_PF_CONTROL_TYPES_H_
+
+#include <linux/completion.h>
+#include <linux/spinlock.h>
+#include <linux/workqueue_types.h>
+
+/**
+ * enum xe_gt_sriov_control_bits - Various bits used by the PF to represent a VF state
+ *
+ * @XE_GT_SRIOV_STATE_WIP: indicates that some operations are in progress.
+ * @XE_GT_SRIOV_STATE_FLR_WIP: indicates that a VF FLR is in progress.
+ * @XE_GT_SRIOV_STATE_FLR_SEND_START: indicates that the PF wants to send a FLR START command.
+ * @XE_GT_SRIOV_STATE_FLR_WAIT_GUC: indicates that the PF awaits for a response from the GuC.
+ * @XE_GT_SRIOV_STATE_FLR_GUC_DONE: indicates that the PF has received a response from the GuC.
+ * @XE_GT_SRIOV_STATE_FLR_RESET_CONFIG: indicates that the PF needs to clear VF's resources.
+ * @XE_GT_SRIOV_STATE_FLR_RESET_DATA: indicates that the PF needs to clear VF's data.
+ * @XE_GT_SRIOV_STATE_FLR_RESET_MMIO: indicates that the PF needs to reset VF's registers.
+ * @XE_GT_SRIOV_STATE_FLR_SEND_FINISH: indicates that the PF wants to send a FLR FINISH message.
+ * @XE_GT_SRIOV_STATE_FLR_FAILED: indicates that VF FLR sequence failed.
+ * @XE_GT_SRIOV_STATE_PAUSE_WIP: indicates that a VF pause operation is in progress.
+ * @XE_GT_SRIOV_STATE_PAUSE_SEND_PAUSE: indicates that the PF is about to send a PAUSE command.
+ * @XE_GT_SRIOV_STATE_PAUSE_WAIT_GUC: indicates that the PF awaits for a response from the GuC.
+ * @XE_GT_SRIOV_STATE_PAUSE_GUC_DONE: indicates that the PF has received a response from the GuC.
+ * @XE_GT_SRIOV_STATE_PAUSE_FAILED: indicates that a VF pause operation has failed.
+ * @XE_GT_SRIOV_STATE_PAUSED: indicates that the VF is paused.
+ * @XE_GT_SRIOV_STATE_RESUME_WIP: indicates the a VF resume operation is in progress.
+ * @XE_GT_SRIOV_STATE_RESUME_SEND_RESUME: indicates that the PF is about to send RESUME command.
+ * @XE_GT_SRIOV_STATE_RESUME_FAILED: indicates that a VF resume operation has failed.
+ * @XE_GT_SRIOV_STATE_RESUMED: indicates that the VF was resumed.
+ * @XE_GT_SRIOV_STATE_STOP_WIP: indicates that a VF stop operation is in progress.
+ * @XE_GT_SRIOV_STATE_STOP_SEND_STOP: indicates that the PF wants to send a STOP command.
+ * @XE_GT_SRIOV_STATE_STOP_FAILED: indicates that the VF stop operation has failed
+ * @XE_GT_SRIOV_STATE_STOPPED: indicates that the VF was stopped.
+ * @XE_GT_SRIOV_STATE_MISMATCH: indicates that the PF has detected a VF state mismatch.
+ */
+enum xe_gt_sriov_control_bits {
+ XE_GT_SRIOV_STATE_WIP = 1,
+
+ XE_GT_SRIOV_STATE_FLR_WIP,
+ XE_GT_SRIOV_STATE_FLR_SEND_START,
+ XE_GT_SRIOV_STATE_FLR_WAIT_GUC,
+ XE_GT_SRIOV_STATE_FLR_GUC_DONE,
+ XE_GT_SRIOV_STATE_FLR_RESET_CONFIG,
+ XE_GT_SRIOV_STATE_FLR_RESET_DATA,
+ XE_GT_SRIOV_STATE_FLR_RESET_MMIO,
+ XE_GT_SRIOV_STATE_FLR_SEND_FINISH,
+ XE_GT_SRIOV_STATE_FLR_FAILED,
+
+ XE_GT_SRIOV_STATE_PAUSE_WIP,
+ XE_GT_SRIOV_STATE_PAUSE_SEND_PAUSE,
+ XE_GT_SRIOV_STATE_PAUSE_WAIT_GUC,
+ XE_GT_SRIOV_STATE_PAUSE_GUC_DONE,
+ XE_GT_SRIOV_STATE_PAUSE_FAILED,
+ XE_GT_SRIOV_STATE_PAUSED,
+
+ XE_GT_SRIOV_STATE_RESUME_WIP,
+ XE_GT_SRIOV_STATE_RESUME_SEND_RESUME,
+ XE_GT_SRIOV_STATE_RESUME_FAILED,
+ XE_GT_SRIOV_STATE_RESUMED,
+
+ XE_GT_SRIOV_STATE_STOP_WIP,
+ XE_GT_SRIOV_STATE_STOP_SEND_STOP,
+ XE_GT_SRIOV_STATE_STOP_FAILED,
+ XE_GT_SRIOV_STATE_STOPPED,
+
+ XE_GT_SRIOV_STATE_MISMATCH = BITS_PER_LONG - 1,
+};
+
+/**
+ * struct xe_gt_sriov_control_state - GT-level per-VF control state.
+ *
+ * Used by the PF driver to maintain per-VF control data.
+ */
+struct xe_gt_sriov_control_state {
+ /** @state: VF state bits */
+ unsigned long state;
+
+ /** @done: completion of async operations */
+ struct completion done;
+
+ /** @link: link into worker list */
+ struct list_head link;
+};
+
+/**
+ * struct xe_gt_sriov_pf_control - GT-level control data.
+ *
+ * Used by the PF driver to maintain its data.
+ */
+struct xe_gt_sriov_pf_control {
+ /** @worker: worker that executes a VF operations */
+ struct work_struct worker;
+
+ /** @list: list of VF entries that have a pending work */
+ struct list_head list;
+
+ /** @lock: protects VF pending list */
+ spinlock_t lock;
+};
+
+#endif
diff --git a/drivers/gpu/drm/xe/xe_gt_sriov_pf_types.h b/drivers/gpu/drm/xe/xe_gt_sriov_pf_types.h
index 40cbaea3ef44..28e1b130bf87 100644
--- a/drivers/gpu/drm/xe/xe_gt_sriov_pf_types.h
+++ b/drivers/gpu/drm/xe/xe_gt_sriov_pf_types.h
@@ -9,6 +9,7 @@
#include <linux/types.h>
#include "xe_gt_sriov_pf_config_types.h"
+#include "xe_gt_sriov_pf_control_types.h"
#include "xe_gt_sriov_pf_monitor_types.h"
#include "xe_gt_sriov_pf_policy_types.h"
#include "xe_gt_sriov_pf_service_types.h"
@@ -23,6 +24,9 @@ struct xe_gt_sriov_metadata {
/** @monitor: per-VF monitoring data. */
struct xe_gt_sriov_monitor monitor;
+ /** @control: per-VF control data. */
+ struct xe_gt_sriov_control_state control;
+
/** @version: negotiated VF/PF ABI version */
struct xe_gt_sriov_pf_service_version version;
};
@@ -30,12 +34,14 @@ struct xe_gt_sriov_metadata {
/**
* struct xe_gt_sriov_pf - GT level PF virtualization data.
* @service: service data.
+ * @control: control data.
* @policy: policy data.
* @spare: PF-only provisioning configuration.
* @vfs: metadata for all VFs.
*/
struct xe_gt_sriov_pf {
struct xe_gt_sriov_pf_service service;
+ struct xe_gt_sriov_pf_control control;
struct xe_gt_sriov_pf_policy policy;
struct xe_gt_sriov_spare_config spare;
struct xe_gt_sriov_metadata *vfs;
diff --git a/drivers/gpu/drm/xe/xe_gt_sriov_vf.c b/drivers/gpu/drm/xe/xe_gt_sriov_vf.c
index 8892d6c2291e..4ebc82e607af 100644
--- a/drivers/gpu/drm/xe/xe_gt_sriov_vf.c
+++ b/drivers/gpu/drm/xe/xe_gt_sriov_vf.c
@@ -495,6 +495,25 @@ u64 xe_gt_sriov_vf_lmem(struct xe_gt *gt)
return gt->sriov.vf.self_config.lmem_size;
}
+static struct xe_ggtt_node *
+vf_balloon_ggtt_node(struct xe_ggtt *ggtt, u64 start, u64 end)
+{
+ struct xe_ggtt_node *node;
+ int err;
+
+ node = xe_ggtt_node_init(ggtt);
+ if (IS_ERR(node))
+ return node;
+
+ err = xe_ggtt_node_insert_balloon(node, start, end);
+ if (err) {
+ xe_ggtt_node_fini(node);
+ return ERR_PTR(err);
+ }
+
+ return node;
+}
+
static int vf_balloon_ggtt(struct xe_gt *gt)
{
struct xe_gt_sriov_vf_selfconfig *config = &gt->sriov.vf.self_config;
@@ -502,7 +521,6 @@ static int vf_balloon_ggtt(struct xe_gt *gt)
struct xe_ggtt *ggtt = tile->mem.ggtt;
struct xe_device *xe = gt_to_xe(gt);
u64 start, end;
- int err;
xe_gt_assert(gt, IS_SRIOV_VF(xe));
xe_gt_assert(gt, !xe_gt_is_media_type(gt));
@@ -528,35 +546,31 @@ static int vf_balloon_ggtt(struct xe_gt *gt)
start = xe_wopcm_size(xe);
end = config->ggtt_base;
if (end != start) {
- err = xe_ggtt_balloon(ggtt, start, end, &tile->sriov.vf.ggtt_balloon[0]);
- if (err)
- goto failed;
+ tile->sriov.vf.ggtt_balloon[0] = vf_balloon_ggtt_node(ggtt, start, end);
+ if (IS_ERR(tile->sriov.vf.ggtt_balloon[0]))
+ return PTR_ERR(tile->sriov.vf.ggtt_balloon[0]);
}
start = config->ggtt_base + config->ggtt_size;
end = GUC_GGTT_TOP;
if (end != start) {
- err = xe_ggtt_balloon(ggtt, start, end, &tile->sriov.vf.ggtt_balloon[1]);
- if (err)
- goto deballoon;
+ tile->sriov.vf.ggtt_balloon[1] = vf_balloon_ggtt_node(ggtt, start, end);
+ if (IS_ERR(tile->sriov.vf.ggtt_balloon[1])) {
+ xe_ggtt_node_remove_balloon(tile->sriov.vf.ggtt_balloon[0]);
+ return PTR_ERR(tile->sriov.vf.ggtt_balloon[1]);
+ }
}
return 0;
-
-deballoon:
- xe_ggtt_deballoon(ggtt, &tile->sriov.vf.ggtt_balloon[0]);
-failed:
- return err;
}
static void deballoon_ggtt(struct drm_device *drm, void *arg)
{
struct xe_tile *tile = arg;
- struct xe_ggtt *ggtt = tile->mem.ggtt;
xe_tile_assert(tile, IS_SRIOV_VF(tile_to_xe(tile)));
- xe_ggtt_deballoon(ggtt, &tile->sriov.vf.ggtt_balloon[1]);
- xe_ggtt_deballoon(ggtt, &tile->sriov.vf.ggtt_balloon[0]);
+ xe_ggtt_node_remove_balloon(tile->sriov.vf.ggtt_balloon[1]);
+ xe_ggtt_node_remove_balloon(tile->sriov.vf.ggtt_balloon[0]);
}
/**
@@ -893,6 +907,32 @@ u32 xe_gt_sriov_vf_read32(struct xe_gt *gt, struct xe_reg reg)
}
/**
+ * xe_gt_sriov_vf_write32 - Handle a write to an inaccessible register.
+ * @gt: the &xe_gt
+ * @reg: the register to write
+ * @val: value to write
+ *
+ * This function is for VF use only.
+ * Currently it will trigger a WARN if running on debug build.
+ */
+void xe_gt_sriov_vf_write32(struct xe_gt *gt, struct xe_reg reg, u32 val)
+{
+ u32 addr = xe_mmio_adjusted_addr(gt, reg.addr);
+
+ xe_gt_assert(gt, IS_SRIOV_VF(gt_to_xe(gt)));
+ xe_gt_assert(gt, !reg.vf);
+
+ /*
+ * In the future, we may want to handle selected writes to inaccessible
+ * registers in some custom way, but for now let's just log a warning
+ * about such attempt, as likely we might be doing something wrong.
+ */
+ xe_gt_WARN(gt, IS_ENABLED(CONFIG_DRM_XE_DEBUG),
+ "VF is trying to write %#x to an inaccessible register %#x+%#x\n",
+ val, reg.addr, addr - reg.addr);
+}
+
+/**
* xe_gt_sriov_vf_print_config - Print VF self config.
* @gt: the &xe_gt
* @p: the &drm_printer
diff --git a/drivers/gpu/drm/xe/xe_gt_sriov_vf.h b/drivers/gpu/drm/xe/xe_gt_sriov_vf.h
index 0de7f8cbcfa6..e541ce57bec2 100644
--- a/drivers/gpu/drm/xe/xe_gt_sriov_vf.h
+++ b/drivers/gpu/drm/xe/xe_gt_sriov_vf.h
@@ -22,6 +22,7 @@ u32 xe_gt_sriov_vf_gmdid(struct xe_gt *gt);
u16 xe_gt_sriov_vf_guc_ids(struct xe_gt *gt);
u64 xe_gt_sriov_vf_lmem(struct xe_gt *gt);
u32 xe_gt_sriov_vf_read32(struct xe_gt *gt, struct xe_reg reg);
+void xe_gt_sriov_vf_write32(struct xe_gt *gt, struct xe_reg reg, u32 val);
void xe_gt_sriov_vf_print_config(struct xe_gt *gt, struct drm_printer *p);
void xe_gt_sriov_vf_print_runtime(struct xe_gt *gt, struct drm_printer *p);
diff --git a/drivers/gpu/drm/xe/xe_gt_stats.c b/drivers/gpu/drm/xe/xe_gt_stats.c
new file mode 100644
index 000000000000..c7364a5aef8f
--- /dev/null
+++ b/drivers/gpu/drm/xe/xe_gt_stats.c
@@ -0,0 +1,49 @@
+// SPDX-License-Identifier: MIT
+/*
+ * Copyright © 2024 Intel Corporation
+ */
+
+#include <linux/atomic.h>
+
+#include <drm/drm_print.h>
+
+#include "xe_gt.h"
+#include "xe_gt_stats.h"
+
+/**
+ * xe_gt_stats_incr - Increments the specified stats counter
+ * @gt: graphics tile
+ * @id: xe_gt_stats_id type id that needs to be incremented
+ * @incr: value to be incremented with
+ *
+ * Increments the specified stats counter.
+ */
+void xe_gt_stats_incr(struct xe_gt *gt, const enum xe_gt_stats_id id, int incr)
+{
+ if (id >= __XE_GT_STATS_NUM_IDS)
+ return;
+
+ atomic_add(incr, &gt->stats.counters[id]);
+}
+
+static const char *const stat_description[__XE_GT_STATS_NUM_IDS] = {
+ "tlb_inval_count",
+};
+
+/**
+ * xe_gt_stats_print_info - Print the GT stats
+ * @gt: graphics tile
+ * @p: drm_printer where it will be printed out.
+ *
+ * This prints out all the available GT stats.
+ */
+int xe_gt_stats_print_info(struct xe_gt *gt, struct drm_printer *p)
+{
+ enum xe_gt_stats_id id;
+
+ for (id = 0; id < __XE_GT_STATS_NUM_IDS; ++id)
+ drm_printf(p, "%s: %d\n", stat_description[id],
+ atomic_read(&gt->stats.counters[id]));
+
+ return 0;
+}
diff --git a/drivers/gpu/drm/xe/xe_gt_stats.h b/drivers/gpu/drm/xe/xe_gt_stats.h
new file mode 100644
index 000000000000..91d944f6c4e4
--- /dev/null
+++ b/drivers/gpu/drm/xe/xe_gt_stats.h
@@ -0,0 +1,29 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2024 Intel Corporation
+ */
+
+#ifndef _XE_GT_STATS_H_
+#define _XE_GT_STATS_H_
+
+struct xe_gt;
+struct drm_printer;
+
+enum xe_gt_stats_id {
+ XE_GT_STATS_ID_TLB_INVAL,
+ /* must be the last entry */
+ __XE_GT_STATS_NUM_IDS,
+};
+
+#ifdef CONFIG_DEBUG_FS
+int xe_gt_stats_print_info(struct xe_gt *gt, struct drm_printer *p);
+void xe_gt_stats_incr(struct xe_gt *gt, const enum xe_gt_stats_id id, int incr);
+#else
+static inline void
+xe_gt_stats_incr(struct xe_gt *gt, const enum xe_gt_stats_id id,
+ int incr)
+{
+}
+
+#endif
+#endif
diff --git a/drivers/gpu/drm/xe/xe_gt_tlb_invalidation.c b/drivers/gpu/drm/xe/xe_gt_tlb_invalidation.c
index 87cb76a8718c..cca9cf536f76 100644
--- a/drivers/gpu/drm/xe/xe_gt_tlb_invalidation.c
+++ b/drivers/gpu/drm/xe/xe_gt_tlb_invalidation.c
@@ -12,6 +12,7 @@
#include "xe_gt_printk.h"
#include "xe_guc.h"
#include "xe_guc_ct.h"
+#include "xe_gt_stats.h"
#include "xe_mmio.h"
#include "xe_pm.h"
#include "xe_sriov.h"
@@ -213,6 +214,7 @@ static int send_tlb_invalidation(struct xe_guc *guc,
gt->tlb_invalidation.seqno = 1;
}
mutex_unlock(&guc->ct.lock);
+ xe_gt_stats_incr(gt, XE_GT_STATS_ID_TLB_INVAL, 1);
return ret;
}
diff --git a/drivers/gpu/drm/xe/xe_gt_topology.c b/drivers/gpu/drm/xe/xe_gt_topology.c
index 25ff03ab8448..0662f71c6ede 100644
--- a/drivers/gpu/drm/xe/xe_gt_topology.c
+++ b/drivers/gpu/drm/xe/xe_gt_topology.c
@@ -6,6 +6,7 @@
#include "xe_gt_topology.h"
#include <linux/bitmap.h>
+#include <linux/compiler.h>
#include "regs/xe_gt_regs.h"
#include "xe_assert.h"
@@ -31,7 +32,7 @@ load_dss_mask(struct xe_gt *gt, xe_dss_mask_t mask, int numregs, ...)
}
static void
-load_eu_mask(struct xe_gt *gt, xe_eu_mask_t mask)
+load_eu_mask(struct xe_gt *gt, xe_eu_mask_t mask, enum xe_gt_eu_type *eu_type)
{
struct xe_device *xe = gt_to_xe(gt);
u32 reg_val = xe_mmio_read32(gt, XELP_EU_ENABLE);
@@ -47,11 +48,13 @@ load_eu_mask(struct xe_gt *gt, xe_eu_mask_t mask)
if (GRAPHICS_VERx100(xe) < 1250)
reg_val = ~reg_val & XELP_EU_MASK;
- /* On PVC, one bit = one EU */
- if (GRAPHICS_VERx100(xe) == 1260) {
+ if (GRAPHICS_VERx100(xe) == 1260 || GRAPHICS_VER(xe) >= 20) {
+ /* SIMD16 EUs, one bit == one EU */
+ *eu_type = XE_GT_EU_TYPE_SIMD16;
val = reg_val;
} else {
- /* All other platforms, one bit = 2 EU */
+ /* SIMD8 EUs, one bit == 2 EU */
+ *eu_type = XE_GT_EU_TYPE_SIMD8;
for (i = 0; i < fls(reg_val); i++)
if (reg_val & BIT(i))
val |= 0x3 << 2 * i;
@@ -213,7 +216,7 @@ xe_gt_topology_init(struct xe_gt *gt)
XEHP_GT_COMPUTE_DSS_ENABLE,
XEHPC_GT_COMPUTE_DSS_ENABLE_EXT,
XE2_GT_COMPUTE_DSS_2);
- load_eu_mask(gt, gt->fuse_topo.eu_mask_per_dss);
+ load_eu_mask(gt, gt->fuse_topo.eu_mask_per_dss, &gt->fuse_topo.eu_type);
load_l3_bank_mask(gt, gt->fuse_topo.l3_bank_mask);
p = drm_dbg_printer(&gt_to_xe(gt)->drm, DRM_UT_DRIVER, "GT topology");
@@ -221,6 +224,18 @@ xe_gt_topology_init(struct xe_gt *gt)
xe_gt_topology_dump(gt, &p);
}
+static const char *eu_type_to_str(enum xe_gt_eu_type eu_type)
+{
+ switch (eu_type) {
+ case XE_GT_EU_TYPE_SIMD16:
+ return "simd16";
+ case XE_GT_EU_TYPE_SIMD8:
+ return "simd8";
+ }
+
+ return NULL;
+}
+
void
xe_gt_topology_dump(struct xe_gt *gt, struct drm_printer *p)
{
@@ -231,6 +246,8 @@ xe_gt_topology_dump(struct xe_gt *gt, struct drm_printer *p)
drm_printf(p, "EU mask per DSS: %*pb\n", XE_MAX_EU_FUSE_BITS,
gt->fuse_topo.eu_mask_per_dss);
+ drm_printf(p, "EU type: %s\n",
+ eu_type_to_str(gt->fuse_topo.eu_type));
drm_printf(p, "L3 bank mask: %*pb\n", XE_MAX_L3_BANK_MASK_BITS,
gt->fuse_topo.l3_bank_mask);
diff --git a/drivers/gpu/drm/xe/xe_gt_types.h b/drivers/gpu/drm/xe/xe_gt_types.h
index c582541970df..3d1c51de0268 100644
--- a/drivers/gpu/drm/xe/xe_gt_types.h
+++ b/drivers/gpu/drm/xe/xe_gt_types.h
@@ -10,6 +10,7 @@
#include "xe_gt_idle_types.h"
#include "xe_gt_sriov_pf_types.h"
#include "xe_gt_sriov_vf_types.h"
+#include "xe_gt_stats.h"
#include "xe_hw_engine_types.h"
#include "xe_hw_fence_types.h"
#include "xe_oa.h"
@@ -27,6 +28,11 @@ enum xe_gt_type {
XE_GT_TYPE_MEDIA,
};
+enum xe_gt_eu_type {
+ XE_GT_EU_TYPE_SIMD8,
+ XE_GT_EU_TYPE_SIMD16,
+};
+
#define XE_MAX_DSS_FUSE_REGS 3
#define XE_MAX_DSS_FUSE_BITS (32 * XE_MAX_DSS_FUSE_REGS)
#define XE_MAX_EU_FUSE_REGS 1
@@ -128,6 +134,14 @@ struct xe_gt {
u8 has_indirect_ring_state:1;
} info;
+#if IS_ENABLED(CONFIG_DEBUG_FS)
+ /** @stats: GT stats */
+ struct {
+ /** @stats.counters: counters for various GT stats */
+ atomic_t counters[__XE_GT_STATS_NUM_IDS];
+ } stats;
+#endif
+
/**
* @mmio: mmio info for GT. All GTs within a tile share the same
* register space, but have their own copy of GSI registers at a
@@ -233,9 +247,14 @@ struct xe_gt {
struct pf_queue {
/** @usm.pf_queue.gt: back pointer to GT */
struct xe_gt *gt;
-#define PF_QUEUE_NUM_DW 128
/** @usm.pf_queue.data: data in the page fault queue */
- u32 data[PF_QUEUE_NUM_DW];
+ u32 *data;
+ /**
+ * @usm.pf_queue.num_dw: number of DWORDS in the page
+ * fault queue. Dynamically calculated based on the number
+ * of compute resources available.
+ */
+ u32 num_dw;
/**
* @usm.pf_queue.tail: tail pointer in DWs for page fault queue,
* moved by worker which processes faults (consumer).
@@ -337,6 +356,12 @@ struct xe_gt {
/** @fuse_topo.l3_bank_mask: L3 bank mask */
xe_l3_bank_mask_t l3_bank_mask;
+
+ /**
+ * @fuse_topo.eu_type: type/width of EU stored in
+ * fuse_topo.eu_mask_per_dss
+ */
+ enum xe_gt_eu_type eu_type;
} fuse_topo;
/** @steering: register steering for individual HW units */
@@ -351,6 +376,12 @@ struct xe_gt {
} steering[NUM_STEERING_TYPES];
/**
+ * @steering_dss_per_grp: number of DSS per steering group (gslice,
+ * cslice, etc.).
+ */
+ unsigned int steering_dss_per_grp;
+
+ /**
* @mcr_lock: protects the MCR_SELECTOR register for the duration
* of a steered operation
*/
@@ -370,8 +401,14 @@ struct xe_gt {
unsigned long *engine;
/** @wa_active.lrc: bitmap with active LRC workarounds */
unsigned long *lrc;
- /** @wa_active.oob: bitmap with active OOB workaroudns */
+ /** @wa_active.oob: bitmap with active OOB workarounds */
unsigned long *oob;
+ /**
+ * @wa_active.oob_initialized: mark oob as initialized to help
+ * detecting misuse of XE_WA() - it can only be called on
+ * initialization after OOB WAs have being processed
+ */
+ bool oob_initialized;
} wa_active;
/** @user_engines: engines present in GT and available to userspace */
diff --git a/drivers/gpu/drm/xe/xe_guc.c b/drivers/gpu/drm/xe/xe_guc.c
index de0fe9e65746..52df28032a6f 100644
--- a/drivers/gpu/drm/xe/xe_guc.c
+++ b/drivers/gpu/drm/xe/xe_guc.c
@@ -350,6 +350,8 @@ int xe_guc_init(struct xe_guc *guc)
if (ret)
goto out;
+ xe_uc_fw_change_status(&guc->fw, XE_UC_FIRMWARE_LOADABLE);
+
ret = devm_add_action_or_reset(xe->drm.dev, guc_fini_hw, guc);
if (ret)
goto out;
@@ -358,8 +360,6 @@ int xe_guc_init(struct xe_guc *guc)
xe_guc_comm_init_early(guc);
- xe_uc_fw_change_status(&guc->fw, XE_UC_FIRMWARE_LOADABLE);
-
return 0;
out:
diff --git a/drivers/gpu/drm/xe/xe_guc.h b/drivers/gpu/drm/xe/xe_guc.h
index e0bbf98f849d..42116b167c98 100644
--- a/drivers/gpu/drm/xe/xe_guc.h
+++ b/drivers/gpu/drm/xe/xe_guc.h
@@ -11,6 +11,18 @@
#include "xe_hw_engine_types.h"
#include "xe_macros.h"
+/*
+ * GuC version number components are defined to be only 8-bit size,
+ * so converting to a 32bit 8.8.8 integer allows simple (and safe)
+ * numerical comparisons.
+ */
+#define MAKE_GUC_VER(maj, min, pat) (((maj) << 16) | ((min) << 8) | (pat))
+#define MAKE_GUC_VER_STRUCT(ver) MAKE_GUC_VER((ver).major, (ver).minor, (ver).patch)
+#define GUC_SUBMIT_VER(guc) \
+ MAKE_GUC_VER_STRUCT((guc)->fw.versions.found[XE_UC_FW_VER_COMPATIBILITY])
+#define GUC_FIRMWARE_VER(guc) \
+ MAKE_GUC_VER_STRUCT((guc)->fw.versions.found[XE_UC_FW_VER_RELEASE])
+
struct drm_printer;
void xe_guc_comm_init_early(struct xe_guc *guc);
diff --git a/drivers/gpu/drm/xe/xe_guc_ads.c b/drivers/gpu/drm/xe/xe_guc_ads.c
index 1c60b685dbc6..d1902a8581ca 100644
--- a/drivers/gpu/drm/xe/xe_guc_ads.c
+++ b/drivers/gpu/drm/xe/xe_guc_ads.c
@@ -24,6 +24,7 @@
#include "xe_map.h"
#include "xe_mmio.h"
#include "xe_platform_types.h"
+#include "xe_uc_fw.h"
#include "xe_wa.h"
/* Slack of a few additional entries per engine */
@@ -367,6 +368,11 @@ static void guc_waklv_init(struct xe_guc_ads *ads)
0xC40,
&offset, &remain);
+ if (XE_WA(gt, 14022293748) || XE_WA(gt, 22019794406))
+ guc_waklv_enable_simple(ads,
+ GUC_WORKAROUND_KLV_ID_BACK_TO_BACK_RCS_ENGINE_RESET,
+ &offset, &remain);
+
size = guc_ads_waklv_size(ads) - remain;
if (!size)
return;
diff --git a/drivers/gpu/drm/xe/xe_guc_ct.c b/drivers/gpu/drm/xe/xe_guc_ct.c
index 64afc90ad2c5..f24dd5223926 100644
--- a/drivers/gpu/drm/xe/xe_guc_ct.c
+++ b/drivers/gpu/drm/xe/xe_guc_ct.c
@@ -105,12 +105,20 @@ ct_to_xe(struct xe_guc_ct *ct)
* enough space to avoid backpressure on the driver. We increase the size
* of the receive buffer (relative to the send) to ensure a G2H response
* CTB has a landing spot.
+ *
+ * In addition to submissions, the G2H buffer needs to be able to hold
+ * enough space for recoverable page fault notifications. The number of
+ * page faults is interrupt driven and can be as much as the number of
+ * compute resources available. However, most of the actual work for these
+ * is in a separate page fault worker thread. Therefore we only need to
+ * make sure the queue has enough space to handle all of the submissions
+ * and responses and an extra buffer for incoming page faults.
*/
#define CTB_DESC_SIZE ALIGN(sizeof(struct guc_ct_buffer_desc), SZ_2K)
#define CTB_H2G_BUFFER_SIZE (SZ_4K)
-#define CTB_G2H_BUFFER_SIZE (4 * CTB_H2G_BUFFER_SIZE)
-#define G2H_ROOM_BUFFER_SIZE (CTB_G2H_BUFFER_SIZE / 4)
+#define CTB_G2H_BUFFER_SIZE (SZ_128K)
+#define G2H_ROOM_BUFFER_SIZE (CTB_G2H_BUFFER_SIZE / 2)
/**
* xe_guc_ct_queue_proc_time_jiffies - Return maximum time to process a full
@@ -516,6 +524,7 @@ static void __g2h_release_space(struct xe_guc_ct *ct, u32 g2h_len)
lockdep_assert_held(&ct->fast_lock);
xe_gt_assert(ct_to_gt(ct), ct->ctbs.g2h.info.space + g2h_len <=
ct->ctbs.g2h.info.size - ct->ctbs.g2h.info.resv_space);
+ xe_gt_assert(ct_to_gt(ct), ct->g2h_outstanding);
ct->ctbs.g2h.info.space += g2h_len;
if (!--ct->g2h_outstanding)
diff --git a/drivers/gpu/drm/xe/xe_guc_hwconfig.c b/drivers/gpu/drm/xe/xe_guc_hwconfig.c
index d9b570a154a2..af2c817d552c 100644
--- a/drivers/gpu/drm/xe/xe_guc_hwconfig.c
+++ b/drivers/gpu/drm/xe/xe_guc_hwconfig.c
@@ -6,6 +6,7 @@
#include "xe_guc_hwconfig.h"
#include <drm/drm_managed.h>
+#include <drm/drm_print.h>
#include "abi/guc_actions_abi.h"
#include "xe_bo.h"
@@ -103,3 +104,99 @@ void xe_guc_hwconfig_copy(struct xe_guc *guc, void *dst)
xe_map_memcpy_from(xe, dst, &guc->hwconfig.bo->vmap, 0,
guc->hwconfig.size);
}
+
+void xe_guc_hwconfig_dump(struct xe_guc *guc, struct drm_printer *p)
+{
+ size_t size = xe_guc_hwconfig_size(guc);
+ u32 *hwconfig;
+ u64 num_dw;
+ u32 extra_bytes;
+ int i = 0;
+
+ if (size == 0) {
+ drm_printf(p, "No hwconfig available\n");
+ return;
+ }
+
+ num_dw = div_u64_rem(size, sizeof(u32), &extra_bytes);
+
+ hwconfig = kzalloc(size, GFP_KERNEL);
+ if (!hwconfig) {
+ drm_printf(p, "Error: could not allocate hwconfig memory\n");
+ return;
+ }
+
+ xe_guc_hwconfig_copy(guc, hwconfig);
+
+ /* An entry requires at least three dwords for key, length, value */
+ while (i + 3 <= num_dw) {
+ u32 attribute = hwconfig[i++];
+ u32 len_dw = hwconfig[i++];
+
+ if (i + len_dw > num_dw) {
+ drm_printf(p, "Error: Attribute %u is %u dwords, but only %llu remain\n",
+ attribute, len_dw, num_dw - i);
+ len_dw = num_dw - i;
+ }
+
+ /*
+ * If it's a single dword (as most hwconfig attributes are),
+ * then it's probably a number that makes sense to display
+ * in decimal form. In the rare cases where it's more than
+ * one dword, just print it in hex form and let the user
+ * figure out how to interpret it.
+ */
+ if (len_dw == 1)
+ drm_printf(p, "[%2u] = %u\n", attribute, hwconfig[i]);
+ else
+ drm_printf(p, "[%2u] = { %*ph }\n", attribute,
+ (int)(len_dw * sizeof(u32)), &hwconfig[i]);
+ i += len_dw;
+ }
+
+ if (i < num_dw || extra_bytes)
+ drm_printf(p, "Error: %llu extra bytes at end of hwconfig\n",
+ (num_dw - i) * sizeof(u32) + extra_bytes);
+
+ kfree(hwconfig);
+}
+
+/*
+ * Lookup a specific 32-bit attribute value in the GuC's hwconfig table.
+ */
+int xe_guc_hwconfig_lookup_u32(struct xe_guc *guc, u32 attribute, u32 *val)
+{
+ size_t size = xe_guc_hwconfig_size(guc);
+ u64 num_dw = div_u64(size, sizeof(u32));
+ u32 *hwconfig;
+ bool found = false;
+ int i = 0;
+
+ if (num_dw == 0)
+ return -EINVAL;
+
+ hwconfig = kzalloc(size, GFP_KERNEL);
+ if (!hwconfig)
+ return -ENOMEM;
+
+ xe_guc_hwconfig_copy(guc, hwconfig);
+
+ /* An entry requires at least three dwords for key, length, value */
+ while (i + 3 <= num_dw) {
+ u32 key = hwconfig[i++];
+ u32 len_dw = hwconfig[i++];
+
+ if (key != attribute) {
+ i += len_dw;
+ continue;
+ }
+
+ *val = hwconfig[i];
+ found = true;
+ break;
+ }
+
+ kfree(hwconfig);
+
+ return found ? 0 : -ENOENT;
+}
diff --git a/drivers/gpu/drm/xe/xe_guc_hwconfig.h b/drivers/gpu/drm/xe/xe_guc_hwconfig.h
index b5794d641900..ab4e5038236e 100644
--- a/drivers/gpu/drm/xe/xe_guc_hwconfig.h
+++ b/drivers/gpu/drm/xe/xe_guc_hwconfig.h
@@ -8,10 +8,13 @@
#include <linux/types.h>
+struct drm_printer;
struct xe_guc;
int xe_guc_hwconfig_init(struct xe_guc *guc);
u32 xe_guc_hwconfig_size(struct xe_guc *guc);
void xe_guc_hwconfig_copy(struct xe_guc *guc, void *dst);
+void xe_guc_hwconfig_dump(struct xe_guc *guc, struct drm_printer *p);
+int xe_guc_hwconfig_lookup_u32(struct xe_guc *guc, u32 attribute, u32 *val);
#endif
diff --git a/drivers/gpu/drm/xe/xe_guc_id_mgr.c b/drivers/gpu/drm/xe/xe_guc_id_mgr.c
index cd0549d0ef89..e845425d670b 100644
--- a/drivers/gpu/drm/xe/xe_guc_id_mgr.c
+++ b/drivers/gpu/drm/xe/xe_guc_id_mgr.c
@@ -97,8 +97,8 @@ int xe_guc_id_mgr_init(struct xe_guc_id_mgr *idm, unsigned int limit)
if (ret)
return ret;
- xe_gt_info(idm_to_gt(idm), "using %u GUC ID%s\n",
- idm->total, str_plural(idm->total));
+ xe_gt_dbg(idm_to_gt(idm), "using %u GuC ID%s\n",
+ idm->total, str_plural(idm->total));
return 0;
}
diff --git a/drivers/gpu/drm/xe/xe_guc_pc.c b/drivers/gpu/drm/xe/xe_guc_pc.c
index ccd574e948aa..034b29984d5e 100644
--- a/drivers/gpu/drm/xe/xe_guc_pc.c
+++ b/drivers/gpu/drm/xe/xe_guc_pc.c
@@ -1042,7 +1042,7 @@ static void xe_guc_pc_fini_hw(void *arg)
return;
XE_WARN_ON(xe_force_wake_get(gt_to_fw(pc_to_gt(pc)), XE_FORCEWAKE_ALL));
- XE_WARN_ON(xe_guc_pc_gucrc_disable(pc));
+ xe_guc_pc_gucrc_disable(pc);
XE_WARN_ON(xe_guc_pc_stop(pc));
/* Bind requested freq to mert_freq_cap before unload */
diff --git a/drivers/gpu/drm/xe/xe_guc_submit.c b/drivers/gpu/drm/xe/xe_guc_submit.c
index 59b36c7998c2..fbbe6a487bbb 100644
--- a/drivers/gpu/drm/xe/xe_guc_submit.c
+++ b/drivers/gpu/drm/xe/xe_guc_submit.c
@@ -1071,7 +1071,9 @@ guc_exec_queue_timedout_job(struct drm_sched_job *drm_job)
struct xe_exec_queue *q = job->q;
struct xe_gpu_scheduler *sched = &q->guc->sched;
struct xe_guc *guc = exec_queue_to_guc(q);
+ const char *process_name = "no process";
int err = -ETIME;
+ pid_t pid = -1;
int i = 0;
bool wedged, skip_timeout_check;
@@ -1168,9 +1170,14 @@ trigger_reset:
goto sched_enable;
}
- xe_gt_notice(guc_to_gt(guc), "Timedout job: seqno=%u, lrc_seqno=%u, guc_id=%d, flags=0x%lx",
+ if (q->vm && q->vm->xef) {
+ process_name = q->vm->xef->process_name;
+ pid = q->vm->xef->pid;
+ }
+ xe_gt_notice(guc_to_gt(guc), "Timedout job: seqno=%u, lrc_seqno=%u, guc_id=%d, flags=0x%lx in %s [%d]",
xe_sched_job_seqno(job), xe_sched_job_lrc_seqno(job),
- q->guc->id, q->flags);
+ q->guc->id, q->flags, process_name, pid);
+
trace_xe_sched_job_timedout(job);
if (!exec_queue_killed(q))
@@ -1312,6 +1319,15 @@ static void __guc_exec_queue_process_msg_set_sched_props(struct xe_sched_msg *ms
kfree(msg);
}
+static void __suspend_fence_signal(struct xe_exec_queue *q)
+{
+ if (!q->guc->suspend_pending)
+ return;
+
+ WRITE_ONCE(q->guc->suspend_pending, false);
+ wake_up(&q->guc->suspend_wait);
+}
+
static void suspend_fence_signal(struct xe_exec_queue *q)
{
struct xe_guc *guc = exec_queue_to_guc(q);
@@ -1321,9 +1337,7 @@ static void suspend_fence_signal(struct xe_exec_queue *q)
guc_read_stopped(guc));
xe_assert(xe, q->guc->suspend_pending);
- q->guc->suspend_pending = false;
- smp_wmb();
- wake_up(&q->guc->suspend_wait);
+ __suspend_fence_signal(q);
}
static void __guc_exec_queue_process_msg_suspend(struct xe_sched_msg *msg)
@@ -1360,9 +1374,11 @@ static void __guc_exec_queue_process_msg_resume(struct xe_sched_msg *msg)
struct xe_exec_queue *q = msg->private_data;
if (guc_exec_queue_allowed_to_change_state(q)) {
- q->guc->resume_time = RESUME_PENDING;
clear_exec_queue_suspended(q);
- enable_scheduling(q);
+ if (!exec_queue_enabled(q)) {
+ q->guc->resume_time = RESUME_PENDING;
+ enable_scheduling(q);
+ }
} else {
clear_exec_queue_suspended(q);
}
@@ -1372,6 +1388,8 @@ static void __guc_exec_queue_process_msg_resume(struct xe_sched_msg *msg)
#define SET_SCHED_PROPS 2
#define SUSPEND 3
#define RESUME 4
+#define OPCODE_MASK 0xf
+#define MSG_LOCKED BIT(8)
static void guc_exec_queue_process_msg(struct xe_sched_msg *msg)
{
@@ -1416,7 +1434,7 @@ static int guc_exec_queue_init(struct xe_exec_queue *q)
struct xe_device *xe = guc_to_xe(guc);
struct xe_guc_exec_queue *ge;
long timeout;
- int err;
+ int err, i;
xe_assert(xe, xe_device_uc_enabled(guc_to_xe(guc)));
@@ -1428,6 +1446,9 @@ static int guc_exec_queue_init(struct xe_exec_queue *q)
ge->q = q;
init_waitqueue_head(&ge->suspend_wait);
+ for (i = 0; i < MAX_STATIC_MSG_TYPE; ++i)
+ INIT_LIST_HEAD(&ge->static_msgs[i].link);
+
timeout = (q->vm && xe_vm_in_lr_mode(q->vm)) ? MAX_SCHEDULE_TIMEOUT :
msecs_to_jiffies(q->sched_props.job_timeout_ms);
err = xe_sched_init(&ge->sched, &drm_sched_ops, &xe_sched_ops,
@@ -1480,6 +1501,7 @@ static void guc_exec_queue_kill(struct xe_exec_queue *q)
{
trace_xe_exec_queue_kill(q);
set_exec_queue_killed(q);
+ __suspend_fence_signal(q);
xe_guc_exec_queue_trigger_cleanup(q);
}
@@ -1489,11 +1511,26 @@ static void guc_exec_queue_add_msg(struct xe_exec_queue *q, struct xe_sched_msg
xe_pm_runtime_get_noresume(guc_to_xe(exec_queue_to_guc(q)));
INIT_LIST_HEAD(&msg->link);
- msg->opcode = opcode;
+ msg->opcode = opcode & OPCODE_MASK;
msg->private_data = q;
trace_xe_sched_msg_add(msg);
- xe_sched_add_msg(&q->guc->sched, msg);
+ if (opcode & MSG_LOCKED)
+ xe_sched_add_msg_locked(&q->guc->sched, msg);
+ else
+ xe_sched_add_msg(&q->guc->sched, msg);
+}
+
+static bool guc_exec_queue_try_add_msg(struct xe_exec_queue *q,
+ struct xe_sched_msg *msg,
+ u32 opcode)
+{
+ if (!list_empty(&msg->link))
+ return false;
+
+ guc_exec_queue_add_msg(q, msg, opcode | MSG_LOCKED);
+
+ return true;
}
#define STATIC_MSG_CLEANUP 0
@@ -1567,34 +1604,59 @@ static int guc_exec_queue_set_preempt_timeout(struct xe_exec_queue *q,
static int guc_exec_queue_suspend(struct xe_exec_queue *q)
{
+ struct xe_gpu_scheduler *sched = &q->guc->sched;
struct xe_sched_msg *msg = q->guc->static_msgs + STATIC_MSG_SUSPEND;
- if (exec_queue_killed_or_banned_or_wedged(q) || q->guc->suspend_pending)
+ if (exec_queue_killed_or_banned_or_wedged(q))
return -EINVAL;
- q->guc->suspend_pending = true;
- guc_exec_queue_add_msg(q, msg, SUSPEND);
+ xe_sched_msg_lock(sched);
+ if (guc_exec_queue_try_add_msg(q, msg, SUSPEND))
+ q->guc->suspend_pending = true;
+ xe_sched_msg_unlock(sched);
return 0;
}
-static void guc_exec_queue_suspend_wait(struct xe_exec_queue *q)
+static int guc_exec_queue_suspend_wait(struct xe_exec_queue *q)
{
struct xe_guc *guc = exec_queue_to_guc(q);
+ int ret;
- wait_event(q->guc->suspend_wait, !q->guc->suspend_pending ||
- guc_read_stopped(guc));
+ /*
+ * Likely don't need to check exec_queue_killed() as we clear
+ * suspend_pending upon kill but to be paranoid but races in which
+ * suspend_pending is set after kill also check kill here.
+ */
+ ret = wait_event_interruptible_timeout(q->guc->suspend_wait,
+ !READ_ONCE(q->guc->suspend_pending) ||
+ exec_queue_killed(q) ||
+ guc_read_stopped(guc),
+ HZ * 5);
+
+ if (!ret) {
+ xe_gt_warn(guc_to_gt(guc),
+ "Suspend fence, guc_id=%d, failed to respond",
+ q->guc->id);
+ /* XXX: Trigger GT reset? */
+ return -ETIME;
+ }
+
+ return ret < 0 ? ret : 0;
}
static void guc_exec_queue_resume(struct xe_exec_queue *q)
{
+ struct xe_gpu_scheduler *sched = &q->guc->sched;
struct xe_sched_msg *msg = q->guc->static_msgs + STATIC_MSG_RESUME;
struct xe_guc *guc = exec_queue_to_guc(q);
struct xe_device *xe = guc_to_xe(guc);
xe_assert(xe, !q->guc->suspend_pending);
- guc_exec_queue_add_msg(q, msg, RESUME);
+ xe_sched_msg_lock(sched);
+ guc_exec_queue_try_add_msg(q, msg, RESUME);
+ xe_sched_msg_unlock(sched);
}
static bool guc_exec_queue_reset_status(struct xe_exec_queue *q)
diff --git a/drivers/gpu/drm/xe/xe_heci_gsc.c b/drivers/gpu/drm/xe/xe_heci_gsc.c
index 1c9d38b6f5f1..65b2e147c4b9 100644
--- a/drivers/gpu/drm/xe/xe_heci_gsc.c
+++ b/drivers/gpu/drm/xe/xe_heci_gsc.c
@@ -92,7 +92,7 @@ void xe_heci_gsc_fini(struct xe_device *xe)
{
struct xe_heci_gsc *heci_gsc = &xe->heci_gsc;
- if (!HAS_HECI_GSCFI(xe))
+ if (!HAS_HECI_GSCFI(xe) && !HAS_HECI_CSCFI(xe))
return;
if (heci_gsc->adev) {
@@ -177,12 +177,14 @@ void xe_heci_gsc_init(struct xe_device *xe)
const struct heci_gsc_def *def;
int ret;
- if (!HAS_HECI_GSCFI(xe))
+ if (!HAS_HECI_GSCFI(xe) && !HAS_HECI_CSCFI(xe))
return;
heci_gsc->irq = -1;
- if (xe->info.platform == XE_PVC) {
+ if (xe->info.platform == XE_BATTLEMAGE) {
+ def = &heci_gsc_def_dg2;
+ } else if (xe->info.platform == XE_PVC) {
def = &heci_gsc_def_pvc;
} else if (xe->info.platform == XE_DG2) {
def = &heci_gsc_def_dg2;
@@ -232,3 +234,23 @@ void xe_heci_gsc_irq_handler(struct xe_device *xe, u32 iir)
if (ret)
drm_err_ratelimited(&xe->drm, "error handling GSC irq: %d\n", ret);
}
+
+void xe_heci_csc_irq_handler(struct xe_device *xe, u32 iir)
+{
+ int ret;
+
+ if ((iir & CSC_IRQ_INTF(1)) == 0)
+ return;
+
+ if (!HAS_HECI_CSCFI(xe)) {
+ drm_warn_once(&xe->drm, "CSC irq: not supported");
+ return;
+ }
+
+ if (xe->heci_gsc.irq < 0)
+ return;
+
+ ret = generic_handle_irq(xe->heci_gsc.irq);
+ if (ret)
+ drm_err_ratelimited(&xe->drm, "error handling GSC irq: %d\n", ret);
+}
diff --git a/drivers/gpu/drm/xe/xe_heci_gsc.h b/drivers/gpu/drm/xe/xe_heci_gsc.h
index 9db454478fae..48b3b1838045 100644
--- a/drivers/gpu/drm/xe/xe_heci_gsc.h
+++ b/drivers/gpu/drm/xe/xe_heci_gsc.h
@@ -11,10 +11,15 @@ struct xe_device;
struct mei_aux_device;
/*
- * The HECI1 bit corresponds to bit15 and HECI2 to bit14.
+ * GSC HECI1 bit corresponds to bit15 and HECI2 to bit14.
* The reason for this is to allow growth for more interfaces in the future.
*/
-#define GSC_IRQ_INTF(_x) BIT(15 - (_x))
+#define GSC_IRQ_INTF(_x) BIT(15 - (_x))
+
+/*
+ * CSC HECI1 bit corresponds to bit9 and HECI2 to bit10.
+ */
+#define CSC_IRQ_INTF(_x) BIT(9 + (_x))
/**
* struct xe_heci_gsc - graphics security controller for xe, HECI interface
@@ -31,5 +36,6 @@ struct xe_heci_gsc {
void xe_heci_gsc_init(struct xe_device *xe);
void xe_heci_gsc_fini(struct xe_device *xe);
void xe_heci_gsc_irq_handler(struct xe_device *xe, u32 iir);
+void xe_heci_csc_irq_handler(struct xe_device *xe, u32 iir);
#endif /* __XE_HECI_GSC_DEV_H__ */
diff --git a/drivers/gpu/drm/xe/xe_huc.c b/drivers/gpu/drm/xe/xe_huc.c
index bec4366e5513..f5459f97af23 100644
--- a/drivers/gpu/drm/xe/xe_huc.c
+++ b/drivers/gpu/drm/xe/xe_huc.c
@@ -43,14 +43,6 @@ huc_to_guc(struct xe_huc *huc)
return &container_of(huc, struct xe_uc, huc)->guc;
}
-static void free_gsc_pkt(struct drm_device *drm, void *arg)
-{
- struct xe_huc *huc = arg;
-
- xe_bo_unpin_map_no_vm(huc->gsc_pkt);
- huc->gsc_pkt = NULL;
-}
-
#define PXP43_HUC_AUTH_INOUT_SIZE SZ_4K
static int huc_alloc_gsc_pkt(struct xe_huc *huc)
{
@@ -59,17 +51,16 @@ static int huc_alloc_gsc_pkt(struct xe_huc *huc)
struct xe_bo *bo;
/* we use a single object for both input and output */
- bo = xe_bo_create_pin_map(xe, gt_to_tile(gt), NULL,
- PXP43_HUC_AUTH_INOUT_SIZE * 2,
- ttm_bo_type_kernel,
- XE_BO_FLAG_SYSTEM |
- XE_BO_FLAG_GGTT);
+ bo = xe_managed_bo_create_pin_map(xe, gt_to_tile(gt),
+ PXP43_HUC_AUTH_INOUT_SIZE * 2,
+ XE_BO_FLAG_SYSTEM |
+ XE_BO_FLAG_GGTT);
if (IS_ERR(bo))
return PTR_ERR(bo);
huc->gsc_pkt = bo;
- return drmm_add_action_or_reset(&xe->drm, free_gsc_pkt, huc);
+ return 0;
}
int xe_huc_init(struct xe_huc *huc)
diff --git a/drivers/gpu/drm/xe/xe_hw_engine.c b/drivers/gpu/drm/xe/xe_hw_engine.c
index 07ed9fd28f19..c9c3beb3ce8d 100644
--- a/drivers/gpu/drm/xe/xe_hw_engine.c
+++ b/drivers/gpu/drm/xe/xe_hw_engine.c
@@ -5,7 +5,10 @@
#include "xe_hw_engine.h"
+#include <linux/nospec.h>
+
#include <drm/drm_managed.h>
+#include <uapi/drm/xe_drm.h>
#include "regs/xe_engine_regs.h"
#include "regs/xe_gt_regs.h"
@@ -20,6 +23,7 @@
#include "xe_gt_printk.h"
#include "xe_gt_mcr.h"
#include "xe_gt_topology.h"
+#include "xe_hw_engine_group.h"
#include "xe_hw_fence.h"
#include "xe_irq.h"
#include "xe_lrc.h"
@@ -263,19 +267,28 @@ static const struct engine_info engine_infos[] = {
},
};
-static void hw_engine_fini(struct drm_device *drm, void *arg)
+static void hw_engine_fini(void *arg)
{
struct xe_hw_engine *hwe = arg;
if (hwe->exl_port)
xe_execlist_port_destroy(hwe->exl_port);
- xe_lrc_put(hwe->kernel_lrc);
hwe->gt = NULL;
}
-static void hw_engine_mmio_write32(struct xe_hw_engine *hwe, struct xe_reg reg,
- u32 val)
+/**
+ * xe_hw_engine_mmio_write32() - Write engine register
+ * @hwe: engine
+ * @reg: register to write into
+ * @val: desired 32-bit value to write
+ *
+ * This function will write val into an engine specific register.
+ * Forcewake must be held by the caller.
+ *
+ */
+void xe_hw_engine_mmio_write32(struct xe_hw_engine *hwe,
+ struct xe_reg reg, u32 val)
{
xe_gt_assert(hwe->gt, !(reg.addr & hwe->mmio_base));
xe_force_wake_assert_held(gt_to_fw(hwe->gt), hwe->domain);
@@ -285,7 +298,17 @@ static void hw_engine_mmio_write32(struct xe_hw_engine *hwe, struct xe_reg reg,
xe_mmio_write32(hwe->gt, reg, val);
}
-static u32 hw_engine_mmio_read32(struct xe_hw_engine *hwe, struct xe_reg reg)
+/**
+ * xe_hw_engine_mmio_read32() - Read engine register
+ * @hwe: engine
+ * @reg: register to read from
+ *
+ * This function will read from an engine specific register.
+ * Forcewake must be held by the caller.
+ *
+ * Return: value of the 32-bit register.
+ */
+u32 xe_hw_engine_mmio_read32(struct xe_hw_engine *hwe, struct xe_reg reg)
{
xe_gt_assert(hwe->gt, !(reg.addr & hwe->mmio_base));
xe_force_wake_assert_held(gt_to_fw(hwe->gt), hwe->domain);
@@ -304,14 +327,14 @@ void xe_hw_engine_enable_ring(struct xe_hw_engine *hwe)
xe_mmio_write32(hwe->gt, RCU_MODE,
_MASKED_BIT_ENABLE(RCU_MODE_CCS_ENABLE));
- hw_engine_mmio_write32(hwe, RING_HWSTAM(0), ~0x0);
- hw_engine_mmio_write32(hwe, RING_HWS_PGA(0),
- xe_bo_ggtt_addr(hwe->hwsp));
- hw_engine_mmio_write32(hwe, RING_MODE(0),
- _MASKED_BIT_ENABLE(GFX_DISABLE_LEGACY_MODE));
- hw_engine_mmio_write32(hwe, RING_MI_MODE(0),
- _MASKED_BIT_DISABLE(STOP_RING));
- hw_engine_mmio_read32(hwe, RING_MI_MODE(0));
+ xe_hw_engine_mmio_write32(hwe, RING_HWSTAM(0), ~0x0);
+ xe_hw_engine_mmio_write32(hwe, RING_HWS_PGA(0),
+ xe_bo_ggtt_addr(hwe->hwsp));
+ xe_hw_engine_mmio_write32(hwe, RING_MODE(0),
+ _MASKED_BIT_ENABLE(GFX_DISABLE_LEGACY_MODE));
+ xe_hw_engine_mmio_write32(hwe, RING_MI_MODE(0),
+ _MASKED_BIT_DISABLE(STOP_RING));
+ xe_hw_engine_mmio_read32(hwe, RING_MI_MODE(0));
}
static bool xe_hw_engine_match_fixed_cslice_mode(const struct xe_gt *gt,
@@ -425,6 +448,12 @@ hw_engine_setup_default_state(struct xe_hw_engine *hwe)
0xA,
XE_RTP_ACTION_FLAG(ENGINE_BASE)))
},
+ /* Enable Priority Mem Read */
+ { XE_RTP_NAME("Priority_Mem_Read"),
+ XE_RTP_RULES(GRAPHICS_VERSION_RANGE(2001, XE_RTP_END_VERSION_UNDEFINED)),
+ XE_RTP_ACTIONS(SET(CSFE_CHICKEN1(0), CS_PRIORITY_MEM_READ,
+ XE_RTP_ACTION_FLAG(ENGINE_BASE)))
+ },
{}
};
@@ -528,21 +557,13 @@ static int hw_engine_init(struct xe_gt *gt, struct xe_hw_engine *hwe,
goto err_name;
}
- hwe->kernel_lrc = xe_lrc_create(hwe, NULL, SZ_16K);
- if (IS_ERR(hwe->kernel_lrc)) {
- err = PTR_ERR(hwe->kernel_lrc);
- goto err_hwsp;
- }
-
if (!xe_device_uc_enabled(xe)) {
hwe->exl_port = xe_execlist_port_create(xe, hwe);
if (IS_ERR(hwe->exl_port)) {
err = PTR_ERR(hwe->exl_port);
- goto err_kernel_lrc;
+ goto err_hwsp;
}
- }
-
- if (xe_device_uc_enabled(xe)) {
+ } else {
/* GSCCS has a special interrupt for reset */
if (hwe->class == XE_ENGINE_CLASS_OTHER)
hwe->irq_handler = xe_gsc_hwe_irq_handler;
@@ -555,10 +576,8 @@ static int hw_engine_init(struct xe_gt *gt, struct xe_hw_engine *hwe,
if (xe->info.has_usm && hwe->class == XE_ENGINE_CLASS_COPY)
gt->usm.reserved_bcs_instance = hwe->instance;
- return drmm_add_action_or_reset(&xe->drm, hw_engine_fini, hwe);
+ return devm_add_action_or_reset(xe->drm.dev, hw_engine_fini, hwe);
-err_kernel_lrc:
- xe_lrc_put(hwe->kernel_lrc);
err_hwsp:
xe_bo_unpin_map_no_vm(hwe->hwsp);
err_name:
@@ -761,6 +780,9 @@ int xe_hw_engines_init(struct xe_gt *gt)
}
hw_engine_setup_logical_mapping(gt);
+ err = xe_hw_engine_setup_groups(gt);
+ if (err)
+ return err;
return 0;
}
@@ -791,7 +813,7 @@ xe_hw_engine_snapshot_instdone_capture(struct xe_hw_engine *hwe,
unsigned int dss;
u16 group, instance;
- snapshot->reg.instdone.ring = hw_engine_mmio_read32(hwe, RING_INSTDONE(0));
+ snapshot->reg.instdone.ring = xe_hw_engine_mmio_read32(hwe, RING_INSTDONE(0));
if (snapshot->hwe->class != XE_ENGINE_CLASS_RENDER)
return;
@@ -887,53 +909,53 @@ xe_hw_engine_snapshot_capture(struct xe_hw_engine *hwe)
return snapshot;
snapshot->reg.ring_execlist_status =
- hw_engine_mmio_read32(hwe, RING_EXECLIST_STATUS_LO(0));
- val = hw_engine_mmio_read32(hwe, RING_EXECLIST_STATUS_HI(0));
+ xe_hw_engine_mmio_read32(hwe, RING_EXECLIST_STATUS_LO(0));
+ val = xe_hw_engine_mmio_read32(hwe, RING_EXECLIST_STATUS_HI(0));
snapshot->reg.ring_execlist_status |= val << 32;
snapshot->reg.ring_execlist_sq_contents =
- hw_engine_mmio_read32(hwe, RING_EXECLIST_SQ_CONTENTS_LO(0));
- val = hw_engine_mmio_read32(hwe, RING_EXECLIST_SQ_CONTENTS_HI(0));
+ xe_hw_engine_mmio_read32(hwe, RING_EXECLIST_SQ_CONTENTS_LO(0));
+ val = xe_hw_engine_mmio_read32(hwe, RING_EXECLIST_SQ_CONTENTS_HI(0));
snapshot->reg.ring_execlist_sq_contents |= val << 32;
- snapshot->reg.ring_acthd = hw_engine_mmio_read32(hwe, RING_ACTHD(0));
- val = hw_engine_mmio_read32(hwe, RING_ACTHD_UDW(0));
+ snapshot->reg.ring_acthd = xe_hw_engine_mmio_read32(hwe, RING_ACTHD(0));
+ val = xe_hw_engine_mmio_read32(hwe, RING_ACTHD_UDW(0));
snapshot->reg.ring_acthd |= val << 32;
- snapshot->reg.ring_bbaddr = hw_engine_mmio_read32(hwe, RING_BBADDR(0));
- val = hw_engine_mmio_read32(hwe, RING_BBADDR_UDW(0));
+ snapshot->reg.ring_bbaddr = xe_hw_engine_mmio_read32(hwe, RING_BBADDR(0));
+ val = xe_hw_engine_mmio_read32(hwe, RING_BBADDR_UDW(0));
snapshot->reg.ring_bbaddr |= val << 32;
snapshot->reg.ring_dma_fadd =
- hw_engine_mmio_read32(hwe, RING_DMA_FADD(0));
- val = hw_engine_mmio_read32(hwe, RING_DMA_FADD_UDW(0));
+ xe_hw_engine_mmio_read32(hwe, RING_DMA_FADD(0));
+ val = xe_hw_engine_mmio_read32(hwe, RING_DMA_FADD_UDW(0));
snapshot->reg.ring_dma_fadd |= val << 32;
- snapshot->reg.ring_hwstam = hw_engine_mmio_read32(hwe, RING_HWSTAM(0));
- snapshot->reg.ring_hws_pga = hw_engine_mmio_read32(hwe, RING_HWS_PGA(0));
- snapshot->reg.ring_start = hw_engine_mmio_read32(hwe, RING_START(0));
+ snapshot->reg.ring_hwstam = xe_hw_engine_mmio_read32(hwe, RING_HWSTAM(0));
+ snapshot->reg.ring_hws_pga = xe_hw_engine_mmio_read32(hwe, RING_HWS_PGA(0));
+ snapshot->reg.ring_start = xe_hw_engine_mmio_read32(hwe, RING_START(0));
if (GRAPHICS_VERx100(hwe->gt->tile->xe) >= 2000) {
- val = hw_engine_mmio_read32(hwe, RING_START_UDW(0));
+ val = xe_hw_engine_mmio_read32(hwe, RING_START_UDW(0));
snapshot->reg.ring_start |= val << 32;
}
if (xe_gt_has_indirect_ring_state(hwe->gt)) {
snapshot->reg.indirect_ring_state =
- hw_engine_mmio_read32(hwe, INDIRECT_RING_STATE(0));
+ xe_hw_engine_mmio_read32(hwe, INDIRECT_RING_STATE(0));
}
snapshot->reg.ring_head =
- hw_engine_mmio_read32(hwe, RING_HEAD(0)) & HEAD_ADDR;
+ xe_hw_engine_mmio_read32(hwe, RING_HEAD(0)) & HEAD_ADDR;
snapshot->reg.ring_tail =
- hw_engine_mmio_read32(hwe, RING_TAIL(0)) & TAIL_ADDR;
- snapshot->reg.ring_ctl = hw_engine_mmio_read32(hwe, RING_CTL(0));
+ xe_hw_engine_mmio_read32(hwe, RING_TAIL(0)) & TAIL_ADDR;
+ snapshot->reg.ring_ctl = xe_hw_engine_mmio_read32(hwe, RING_CTL(0));
snapshot->reg.ring_mi_mode =
- hw_engine_mmio_read32(hwe, RING_MI_MODE(0));
- snapshot->reg.ring_mode = hw_engine_mmio_read32(hwe, RING_MODE(0));
- snapshot->reg.ring_imr = hw_engine_mmio_read32(hwe, RING_IMR(0));
- snapshot->reg.ring_esr = hw_engine_mmio_read32(hwe, RING_ESR(0));
- snapshot->reg.ring_emr = hw_engine_mmio_read32(hwe, RING_EMR(0));
- snapshot->reg.ring_eir = hw_engine_mmio_read32(hwe, RING_EIR(0));
- snapshot->reg.ipehr = hw_engine_mmio_read32(hwe, RING_IPEHR(0));
+ xe_hw_engine_mmio_read32(hwe, RING_MI_MODE(0));
+ snapshot->reg.ring_mode = xe_hw_engine_mmio_read32(hwe, RING_MODE(0));
+ snapshot->reg.ring_imr = xe_hw_engine_mmio_read32(hwe, RING_IMR(0));
+ snapshot->reg.ring_esr = xe_hw_engine_mmio_read32(hwe, RING_ESR(0));
+ snapshot->reg.ring_emr = xe_hw_engine_mmio_read32(hwe, RING_EMR(0));
+ snapshot->reg.ring_eir = xe_hw_engine_mmio_read32(hwe, RING_EIR(0));
+ snapshot->reg.ipehr = xe_hw_engine_mmio_read32(hwe, RING_IPEHR(0));
xe_hw_engine_snapshot_instdone_capture(hwe, snapshot);
if (snapshot->hwe->class == XE_ENGINE_CLASS_COMPUTE)
@@ -1135,3 +1157,41 @@ enum xe_force_wake_domains xe_hw_engine_to_fw_domain(struct xe_hw_engine *hwe)
{
return engine_infos[hwe->engine_id].domain;
}
+
+static const enum xe_engine_class user_to_xe_engine_class[] = {
+ [DRM_XE_ENGINE_CLASS_RENDER] = XE_ENGINE_CLASS_RENDER,
+ [DRM_XE_ENGINE_CLASS_COPY] = XE_ENGINE_CLASS_COPY,
+ [DRM_XE_ENGINE_CLASS_VIDEO_DECODE] = XE_ENGINE_CLASS_VIDEO_DECODE,
+ [DRM_XE_ENGINE_CLASS_VIDEO_ENHANCE] = XE_ENGINE_CLASS_VIDEO_ENHANCE,
+ [DRM_XE_ENGINE_CLASS_COMPUTE] = XE_ENGINE_CLASS_COMPUTE,
+};
+
+/**
+ * xe_hw_engine_lookup() - Lookup hardware engine for class:instance
+ * @xe: xe device
+ * @eci: engine class and instance
+ *
+ * This function will find a hardware engine for given engine
+ * class and instance.
+ *
+ * Return: If found xe_hw_engine pointer, NULL otherwise.
+ */
+struct xe_hw_engine *
+xe_hw_engine_lookup(struct xe_device *xe,
+ struct drm_xe_engine_class_instance eci)
+{
+ unsigned int idx;
+
+ if (eci.engine_class >= ARRAY_SIZE(user_to_xe_engine_class))
+ return NULL;
+
+ if (eci.gt_id >= xe->info.gt_count)
+ return NULL;
+
+ idx = array_index_nospec(eci.engine_class,
+ ARRAY_SIZE(user_to_xe_engine_class));
+
+ return xe_gt_hw_engine(xe_device_get_gt(xe, eci.gt_id),
+ user_to_xe_engine_class[idx],
+ eci.engine_instance, true);
+}
diff --git a/drivers/gpu/drm/xe/xe_hw_engine.h b/drivers/gpu/drm/xe/xe_hw_engine.h
index 900c8c991430..022819a4a8eb 100644
--- a/drivers/gpu/drm/xe/xe_hw_engine.h
+++ b/drivers/gpu/drm/xe/xe_hw_engine.h
@@ -9,6 +9,8 @@
#include "xe_hw_engine_types.h"
struct drm_printer;
+struct drm_xe_engine_class_instance;
+struct xe_device;
#ifdef CONFIG_DRM_XE_JOB_TIMEOUT_MIN
#define XE_HW_ENGINE_JOB_TIMEOUT_MIN CONFIG_DRM_XE_JOB_TIMEOUT_MIN
@@ -62,6 +64,11 @@ void xe_hw_engine_print(struct xe_hw_engine *hwe, struct drm_printer *p);
void xe_hw_engine_setup_default_lrc_state(struct xe_hw_engine *hwe);
bool xe_hw_engine_is_reserved(struct xe_hw_engine *hwe);
+
+struct xe_hw_engine *
+xe_hw_engine_lookup(struct xe_device *xe,
+ struct drm_xe_engine_class_instance eci);
+
static inline bool xe_hw_engine_is_valid(struct xe_hw_engine *hwe)
{
return hwe->name;
@@ -71,4 +78,7 @@ const char *xe_hw_engine_class_to_str(enum xe_engine_class class);
u64 xe_hw_engine_read_timestamp(struct xe_hw_engine *hwe);
enum xe_force_wake_domains xe_hw_engine_to_fw_domain(struct xe_hw_engine *hwe);
+void xe_hw_engine_mmio_write32(struct xe_hw_engine *hwe, struct xe_reg reg, u32 val);
+u32 xe_hw_engine_mmio_read32(struct xe_hw_engine *hwe, struct xe_reg reg);
+
#endif
diff --git a/drivers/gpu/drm/xe/xe_hw_engine_group.c b/drivers/gpu/drm/xe/xe_hw_engine_group.c
new file mode 100644
index 000000000000..82750520a90a
--- /dev/null
+++ b/drivers/gpu/drm/xe/xe_hw_engine_group.c
@@ -0,0 +1,372 @@
+// SPDX-License-Identifier: MIT
+/*
+ * Copyright © 2024 Intel Corporation
+ */
+
+#include <drm/drm_managed.h>
+
+#include "xe_assert.h"
+#include "xe_device.h"
+#include "xe_exec_queue.h"
+#include "xe_gt.h"
+#include "xe_hw_engine_group.h"
+#include "xe_vm.h"
+
+static void
+hw_engine_group_free(struct drm_device *drm, void *arg)
+{
+ struct xe_hw_engine_group *group = arg;
+
+ destroy_workqueue(group->resume_wq);
+ kfree(group);
+}
+
+static void
+hw_engine_group_resume_lr_jobs_func(struct work_struct *w)
+{
+ struct xe_exec_queue *q;
+ struct xe_hw_engine_group *group = container_of(w, struct xe_hw_engine_group, resume_work);
+ int err;
+ enum xe_hw_engine_group_execution_mode previous_mode;
+
+ err = xe_hw_engine_group_get_mode(group, EXEC_MODE_LR, &previous_mode);
+ if (err)
+ return;
+
+ if (previous_mode == EXEC_MODE_LR)
+ goto put;
+
+ list_for_each_entry(q, &group->exec_queue_list, hw_engine_group_link) {
+ if (!xe_vm_in_fault_mode(q->vm))
+ continue;
+
+ q->ops->resume(q);
+ }
+
+put:
+ xe_hw_engine_group_put(group);
+}
+
+static struct xe_hw_engine_group *
+hw_engine_group_alloc(struct xe_device *xe)
+{
+ struct xe_hw_engine_group *group;
+ int err;
+
+ group = kzalloc(sizeof(*group), GFP_KERNEL);
+ if (!group)
+ return ERR_PTR(-ENOMEM);
+
+ group->resume_wq = alloc_workqueue("xe-resume-lr-jobs-wq", 0, 0);
+ if (!group->resume_wq)
+ return ERR_PTR(-ENOMEM);
+
+ init_rwsem(&group->mode_sem);
+ INIT_WORK(&group->resume_work, hw_engine_group_resume_lr_jobs_func);
+ INIT_LIST_HEAD(&group->exec_queue_list);
+
+ err = drmm_add_action_or_reset(&xe->drm, hw_engine_group_free, group);
+ if (err)
+ return ERR_PTR(err);
+
+ return group;
+}
+
+/**
+ * xe_hw_engine_setup_groups() - Setup the hw engine groups for the gt
+ * @gt: The gt for which groups are setup
+ *
+ * Return: 0 on success, negative error code on error.
+ */
+int xe_hw_engine_setup_groups(struct xe_gt *gt)
+{
+ struct xe_hw_engine *hwe;
+ enum xe_hw_engine_id id;
+ struct xe_hw_engine_group *group_rcs_ccs, *group_bcs, *group_vcs_vecs;
+ struct xe_device *xe = gt_to_xe(gt);
+ int err;
+
+ group_rcs_ccs = hw_engine_group_alloc(xe);
+ if (IS_ERR(group_rcs_ccs)) {
+ err = PTR_ERR(group_rcs_ccs);
+ goto err_group_rcs_ccs;
+ }
+
+ group_bcs = hw_engine_group_alloc(xe);
+ if (IS_ERR(group_bcs)) {
+ err = PTR_ERR(group_bcs);
+ goto err_group_bcs;
+ }
+
+ group_vcs_vecs = hw_engine_group_alloc(xe);
+ if (IS_ERR(group_vcs_vecs)) {
+ err = PTR_ERR(group_vcs_vecs);
+ goto err_group_vcs_vecs;
+ }
+
+ for_each_hw_engine(hwe, gt, id) {
+ switch (hwe->class) {
+ case XE_ENGINE_CLASS_COPY:
+ hwe->hw_engine_group = group_bcs;
+ break;
+ case XE_ENGINE_CLASS_RENDER:
+ case XE_ENGINE_CLASS_COMPUTE:
+ hwe->hw_engine_group = group_rcs_ccs;
+ break;
+ case XE_ENGINE_CLASS_VIDEO_DECODE:
+ case XE_ENGINE_CLASS_VIDEO_ENHANCE:
+ hwe->hw_engine_group = group_vcs_vecs;
+ break;
+ case XE_ENGINE_CLASS_OTHER:
+ break;
+ default:
+ drm_warn(&xe->drm, "NOT POSSIBLE");
+ }
+ }
+
+ return 0;
+
+err_group_vcs_vecs:
+ kfree(group_vcs_vecs);
+err_group_bcs:
+ kfree(group_bcs);
+err_group_rcs_ccs:
+ kfree(group_rcs_ccs);
+
+ return err;
+}
+
+/**
+ * xe_hw_engine_group_add_exec_queue() - Add an exec queue to a hw engine group
+ * @group: The hw engine group
+ * @q: The exec_queue
+ *
+ * Return: 0 on success,
+ * -EINTR if the lock could not be acquired
+ */
+int xe_hw_engine_group_add_exec_queue(struct xe_hw_engine_group *group, struct xe_exec_queue *q)
+{
+ int err;
+ struct xe_device *xe = gt_to_xe(q->gt);
+
+ xe_assert(xe, group);
+ xe_assert(xe, !(q->flags & EXEC_QUEUE_FLAG_VM));
+ xe_assert(xe, q->vm);
+
+ if (xe_vm_in_preempt_fence_mode(q->vm))
+ return 0;
+
+ err = down_write_killable(&group->mode_sem);
+ if (err)
+ return err;
+
+ if (xe_vm_in_fault_mode(q->vm) && group->cur_mode == EXEC_MODE_DMA_FENCE) {
+ q->ops->suspend(q);
+ err = q->ops->suspend_wait(q);
+ if (err)
+ goto err_suspend;
+
+ xe_hw_engine_group_resume_faulting_lr_jobs(group);
+ }
+
+ list_add(&q->hw_engine_group_link, &group->exec_queue_list);
+ up_write(&group->mode_sem);
+
+ return 0;
+
+err_suspend:
+ up_write(&group->mode_sem);
+ return err;
+}
+
+/**
+ * xe_hw_engine_group_del_exec_queue() - Delete an exec queue from a hw engine group
+ * @group: The hw engine group
+ * @q: The exec_queue
+ */
+void xe_hw_engine_group_del_exec_queue(struct xe_hw_engine_group *group, struct xe_exec_queue *q)
+{
+ struct xe_device *xe = gt_to_xe(q->gt);
+
+ xe_assert(xe, group);
+ xe_assert(xe, q->vm);
+
+ down_write(&group->mode_sem);
+
+ if (!list_empty(&q->hw_engine_group_link))
+ list_del(&q->hw_engine_group_link);
+
+ up_write(&group->mode_sem);
+}
+
+/**
+ * xe_hw_engine_group_resume_faulting_lr_jobs() - Asynchronously resume the hw engine group's
+ * faulting LR jobs
+ * @group: The hw engine group
+ */
+void xe_hw_engine_group_resume_faulting_lr_jobs(struct xe_hw_engine_group *group)
+{
+ queue_work(group->resume_wq, &group->resume_work);
+}
+
+/**
+ * xe_hw_engine_group_suspend_faulting_lr_jobs() - Suspend the faulting LR jobs of this group
+ * @group: The hw engine group
+ *
+ * Return: 0 on success, negative error code on error.
+ */
+static int xe_hw_engine_group_suspend_faulting_lr_jobs(struct xe_hw_engine_group *group)
+{
+ int err;
+ struct xe_exec_queue *q;
+ bool need_resume = false;
+
+ lockdep_assert_held_write(&group->mode_sem);
+
+ list_for_each_entry(q, &group->exec_queue_list, hw_engine_group_link) {
+ if (!xe_vm_in_fault_mode(q->vm))
+ continue;
+
+ need_resume = true;
+ q->ops->suspend(q);
+ }
+
+ list_for_each_entry(q, &group->exec_queue_list, hw_engine_group_link) {
+ if (!xe_vm_in_fault_mode(q->vm))
+ continue;
+
+ err = q->ops->suspend_wait(q);
+ if (err)
+ goto err_suspend;
+ }
+
+ if (need_resume)
+ xe_hw_engine_group_resume_faulting_lr_jobs(group);
+
+ return 0;
+
+err_suspend:
+ up_write(&group->mode_sem);
+ return err;
+}
+
+/**
+ * xe_hw_engine_group_wait_for_dma_fence_jobs() - Wait for dma fence jobs to complete
+ * @group: The hw engine group
+ *
+ * This function is not meant to be called directly from a user IOCTL as dma_fence_wait()
+ * is not interruptible.
+ *
+ * Return: 0 on success,
+ * -ETIME if waiting for one job failed
+ */
+static int xe_hw_engine_group_wait_for_dma_fence_jobs(struct xe_hw_engine_group *group)
+{
+ long timeout;
+ struct xe_exec_queue *q;
+ struct dma_fence *fence;
+
+ lockdep_assert_held_write(&group->mode_sem);
+
+ list_for_each_entry(q, &group->exec_queue_list, hw_engine_group_link) {
+ if (xe_vm_in_lr_mode(q->vm))
+ continue;
+
+ fence = xe_exec_queue_last_fence_get_for_resume(q, q->vm);
+ timeout = dma_fence_wait(fence, false);
+ dma_fence_put(fence);
+
+ if (timeout < 0)
+ return -ETIME;
+ }
+
+ return 0;
+}
+
+static int switch_mode(struct xe_hw_engine_group *group)
+{
+ int err = 0;
+ enum xe_hw_engine_group_execution_mode new_mode;
+
+ lockdep_assert_held_write(&group->mode_sem);
+
+ switch (group->cur_mode) {
+ case EXEC_MODE_LR:
+ new_mode = EXEC_MODE_DMA_FENCE;
+ err = xe_hw_engine_group_suspend_faulting_lr_jobs(group);
+ break;
+ case EXEC_MODE_DMA_FENCE:
+ new_mode = EXEC_MODE_LR;
+ err = xe_hw_engine_group_wait_for_dma_fence_jobs(group);
+ break;
+ }
+
+ if (err)
+ return err;
+
+ group->cur_mode = new_mode;
+
+ return 0;
+}
+
+/**
+ * xe_hw_engine_group_get_mode() - Get the group to execute in the new mode
+ * @group: The hw engine group
+ * @new_mode: The new execution mode
+ * @previous_mode: Pointer to the previous mode provided for use by caller
+ *
+ * Return: 0 if successful, -EINTR if locking failed.
+ */
+int xe_hw_engine_group_get_mode(struct xe_hw_engine_group *group,
+ enum xe_hw_engine_group_execution_mode new_mode,
+ enum xe_hw_engine_group_execution_mode *previous_mode)
+__acquires(&group->mode_sem)
+{
+ int err = down_read_interruptible(&group->mode_sem);
+
+ if (err)
+ return err;
+
+ *previous_mode = group->cur_mode;
+
+ if (new_mode != group->cur_mode) {
+ up_read(&group->mode_sem);
+ err = down_write_killable(&group->mode_sem);
+ if (err)
+ return err;
+
+ if (new_mode != group->cur_mode) {
+ err = switch_mode(group);
+ if (err) {
+ up_write(&group->mode_sem);
+ return err;
+ }
+ }
+ downgrade_write(&group->mode_sem);
+ }
+
+ return err;
+}
+
+/**
+ * xe_hw_engine_group_put() - Put the group
+ * @group: The hw engine group
+ */
+void xe_hw_engine_group_put(struct xe_hw_engine_group *group)
+__releases(&group->mode_sem)
+{
+ up_read(&group->mode_sem);
+}
+
+/**
+ * xe_hw_engine_group_find_exec_mode() - Find the execution mode for this exec queue
+ * @q: The exec_queue
+ */
+enum xe_hw_engine_group_execution_mode
+xe_hw_engine_group_find_exec_mode(struct xe_exec_queue *q)
+{
+ if (xe_vm_in_fault_mode(q->vm))
+ return EXEC_MODE_LR;
+ else
+ return EXEC_MODE_DMA_FENCE;
+}
diff --git a/drivers/gpu/drm/xe/xe_hw_engine_group.h b/drivers/gpu/drm/xe/xe_hw_engine_group.h
new file mode 100644
index 000000000000..797ee81acbf2
--- /dev/null
+++ b/drivers/gpu/drm/xe/xe_hw_engine_group.h
@@ -0,0 +1,29 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2024 Intel Corporation
+ */
+
+#ifndef _XE_HW_ENGINE_GROUP_H_
+#define _XE_HW_ENGINE_GROUP_H_
+
+#include "xe_hw_engine_group_types.h"
+
+struct drm_device;
+struct xe_exec_queue;
+struct xe_gt;
+
+int xe_hw_engine_setup_groups(struct xe_gt *gt);
+
+int xe_hw_engine_group_add_exec_queue(struct xe_hw_engine_group *group, struct xe_exec_queue *q);
+void xe_hw_engine_group_del_exec_queue(struct xe_hw_engine_group *group, struct xe_exec_queue *q);
+
+int xe_hw_engine_group_get_mode(struct xe_hw_engine_group *group,
+ enum xe_hw_engine_group_execution_mode new_mode,
+ enum xe_hw_engine_group_execution_mode *previous_mode);
+void xe_hw_engine_group_put(struct xe_hw_engine_group *group);
+
+enum xe_hw_engine_group_execution_mode
+xe_hw_engine_group_find_exec_mode(struct xe_exec_queue *q);
+void xe_hw_engine_group_resume_faulting_lr_jobs(struct xe_hw_engine_group *group);
+
+#endif
diff --git a/drivers/gpu/drm/xe/xe_hw_engine_group_types.h b/drivers/gpu/drm/xe/xe_hw_engine_group_types.h
new file mode 100644
index 000000000000..92b6e0712c03
--- /dev/null
+++ b/drivers/gpu/drm/xe/xe_hw_engine_group_types.h
@@ -0,0 +1,51 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2024 Intel Corporation
+ */
+
+#ifndef _XE_HW_ENGINE_GROUP_TYPES_H_
+#define _XE_HW_ENGINE_GROUP_TYPES_H_
+
+#include "xe_force_wake_types.h"
+#include "xe_lrc_types.h"
+#include "xe_reg_sr_types.h"
+
+/**
+ * enum xe_hw_engine_group_execution_mode - possible execution modes of a hw
+ * engine group
+ *
+ * @EXEC_MODE_LR: execution in long-running mode
+ * @EXEC_MODE_DMA_FENCE: execution in dma fence mode
+ */
+enum xe_hw_engine_group_execution_mode {
+ EXEC_MODE_LR,
+ EXEC_MODE_DMA_FENCE,
+};
+
+/**
+ * struct xe_hw_engine_group - Hardware engine group
+ *
+ * hw engines belong to the same group if they share hardware resources in a way
+ * that prevents them from making progress when one is stuck on a page fault.
+ */
+struct xe_hw_engine_group {
+ /**
+ * @exec_queue_list: list of exec queues attached to this
+ * xe_hw_engine_group
+ */
+ struct list_head exec_queue_list;
+ /** @resume_work: worker to resume faulting LR exec queues */
+ struct work_struct resume_work;
+ /** @resume_wq: workqueue to resume faulting LR exec queues */
+ struct workqueue_struct *resume_wq;
+ /**
+ * @mode_sem: used to protect this group's hardware resources and ensure
+ * mutual exclusion between execution only in faulting LR mode and
+ * execution only in DMA_FENCE mode
+ */
+ struct rw_semaphore mode_sem;
+ /** @cur_mode: current execution mode of this hw engine group */
+ enum xe_hw_engine_group_execution_mode cur_mode;
+};
+
+#endif
diff --git a/drivers/gpu/drm/xe/xe_hw_engine_types.h b/drivers/gpu/drm/xe/xe_hw_engine_types.h
index 70e6434f150d..8be6d420ece4 100644
--- a/drivers/gpu/drm/xe/xe_hw_engine_types.h
+++ b/drivers/gpu/drm/xe/xe_hw_engine_types.h
@@ -136,8 +136,6 @@ struct xe_hw_engine {
enum xe_force_wake_domains domain;
/** @hwsp: hardware status page buffer object */
struct xe_bo *hwsp;
- /** @kernel_lrc: Kernel LRC (should be replaced /w an xe_engine) */
- struct xe_lrc *kernel_lrc;
/** @exl_port: execlists port */
struct xe_execlist_port *exl_port;
/** @fence_irq: fence IRQ to run when a hw engine IRQ is received */
@@ -150,6 +148,8 @@ struct xe_hw_engine {
struct xe_hw_engine_class_intf *eclass;
/** @oa_unit: oa unit for this hw engine */
struct xe_oa_unit *oa_unit;
+ /** @hw_engine_group: the group of hw engines this one belongs to */
+ struct xe_hw_engine_group *hw_engine_group;
};
/**
diff --git a/drivers/gpu/drm/xe/xe_hwmon.c b/drivers/gpu/drm/xe/xe_hwmon.c
index 98e3ec08279e..aa11728e7e79 100644
--- a/drivers/gpu/drm/xe/xe_hwmon.c
+++ b/drivers/gpu/drm/xe/xe_hwmon.c
@@ -12,7 +12,6 @@
#include "regs/xe_mchbar_regs.h"
#include "regs/xe_pcode_regs.h"
#include "xe_device.h"
-#include "xe_gt.h"
#include "xe_hwmon.h"
#include "xe_mmio.h"
#include "xe_pcode.h"
@@ -65,8 +64,8 @@ struct xe_hwmon_energy_info {
struct xe_hwmon {
/** @hwmon_dev: hwmon device for xe */
struct device *hwmon_dev;
- /** @gt: primary gt */
- struct xe_gt *gt;
+ /** @xe: Xe device */
+ struct xe_device *xe;
/** @hwmon_lock: lock for rw attributes*/
struct mutex hwmon_lock;
/** @scl_shift_power: pkg power unit */
@@ -82,7 +81,7 @@ struct xe_hwmon {
static struct xe_reg xe_hwmon_get_reg(struct xe_hwmon *hwmon, enum xe_hwmon_reg hwmon_reg,
int channel)
{
- struct xe_device *xe = gt_to_xe(hwmon->gt);
+ struct xe_device *xe = hwmon->xe;
switch (hwmon_reg) {
case REG_PKG_RAPL_LIMIT:
@@ -148,8 +147,9 @@ static struct xe_reg xe_hwmon_get_reg(struct xe_hwmon *hwmon, enum xe_hwmon_reg
static void xe_hwmon_power_max_read(struct xe_hwmon *hwmon, int channel, long *value)
{
u64 reg_val, min, max;
- struct xe_device *xe = gt_to_xe(hwmon->gt);
+ struct xe_device *xe = hwmon->xe;
struct xe_reg rapl_limit, pkg_power_sku;
+ struct xe_gt *mmio = xe_root_mmio_gt(xe);
rapl_limit = xe_hwmon_get_reg(hwmon, REG_PKG_RAPL_LIMIT, channel);
pkg_power_sku = xe_hwmon_get_reg(hwmon, REG_PKG_POWER_SKU, channel);
@@ -166,7 +166,7 @@ static void xe_hwmon_power_max_read(struct xe_hwmon *hwmon, int channel, long *v
mutex_lock(&hwmon->hwmon_lock);
- reg_val = xe_mmio_read32(hwmon->gt, rapl_limit);
+ reg_val = xe_mmio_read32(mmio, rapl_limit);
/* Check if PL1 limit is disabled */
if (!(reg_val & PKG_PWR_LIM_1_EN)) {
*value = PL1_DISABLE;
@@ -176,7 +176,7 @@ static void xe_hwmon_power_max_read(struct xe_hwmon *hwmon, int channel, long *v
reg_val = REG_FIELD_GET(PKG_PWR_LIM_1, reg_val);
*value = mul_u64_u32_shr(reg_val, SF_POWER, hwmon->scl_shift_power);
- reg_val = xe_mmio_read64_2x32(hwmon->gt, pkg_power_sku);
+ reg_val = xe_mmio_read64_2x32(mmio, pkg_power_sku);
min = REG_FIELD_GET(PKG_MIN_PWR, reg_val);
min = mul_u64_u32_shr(min, SF_POWER, hwmon->scl_shift_power);
max = REG_FIELD_GET(PKG_MAX_PWR, reg_val);
@@ -190,6 +190,7 @@ unlock:
static int xe_hwmon_power_max_write(struct xe_hwmon *hwmon, int channel, long value)
{
+ struct xe_gt *mmio = xe_root_mmio_gt(hwmon->xe);
int ret = 0;
u64 reg_val;
struct xe_reg rapl_limit;
@@ -200,10 +201,10 @@ static int xe_hwmon_power_max_write(struct xe_hwmon *hwmon, int channel, long va
/* Disable PL1 limit and verify, as limit cannot be disabled on all platforms */
if (value == PL1_DISABLE) {
- reg_val = xe_mmio_rmw32(hwmon->gt, rapl_limit, PKG_PWR_LIM_1_EN, 0);
- reg_val = xe_mmio_read32(hwmon->gt, rapl_limit);
+ reg_val = xe_mmio_rmw32(mmio, rapl_limit, PKG_PWR_LIM_1_EN, 0);
+ reg_val = xe_mmio_read32(mmio, rapl_limit);
if (reg_val & PKG_PWR_LIM_1_EN) {
- drm_warn(&gt_to_xe(hwmon->gt)->drm, "PL1 disable is not supported!\n");
+ drm_warn(&hwmon->xe->drm, "PL1 disable is not supported!\n");
ret = -EOPNOTSUPP;
}
goto unlock;
@@ -212,7 +213,7 @@ static int xe_hwmon_power_max_write(struct xe_hwmon *hwmon, int channel, long va
/* Computation in 64-bits to avoid overflow. Round to nearest. */
reg_val = DIV_ROUND_CLOSEST_ULL((u64)value << hwmon->scl_shift_power, SF_POWER);
reg_val = PKG_PWR_LIM_1_EN | REG_FIELD_PREP(PKG_PWR_LIM_1, reg_val);
- reg_val = xe_mmio_rmw32(hwmon->gt, rapl_limit, PKG_PWR_LIM_1_EN | PKG_PWR_LIM_1, reg_val);
+ reg_val = xe_mmio_rmw32(mmio, rapl_limit, PKG_PWR_LIM_1_EN | PKG_PWR_LIM_1, reg_val);
unlock:
mutex_unlock(&hwmon->hwmon_lock);
@@ -221,6 +222,7 @@ unlock:
static void xe_hwmon_power_rated_max_read(struct xe_hwmon *hwmon, int channel, long *value)
{
+ struct xe_gt *mmio = xe_root_mmio_gt(hwmon->xe);
struct xe_reg reg = xe_hwmon_get_reg(hwmon, REG_PKG_POWER_SKU, channel);
u64 reg_val;
@@ -229,7 +231,7 @@ static void xe_hwmon_power_rated_max_read(struct xe_hwmon *hwmon, int channel, l
* for this register can be skipped.
* See xe_hwmon_power_is_visible.
*/
- reg_val = xe_mmio_read32(hwmon->gt, reg);
+ reg_val = xe_mmio_read32(mmio, reg);
reg_val = REG_FIELD_GET(PKG_TDP, reg_val);
*value = mul_u64_u32_shr(reg_val, SF_POWER, hwmon->scl_shift_power);
}
@@ -257,11 +259,12 @@ static void xe_hwmon_power_rated_max_read(struct xe_hwmon *hwmon, int channel, l
static void
xe_hwmon_energy_get(struct xe_hwmon *hwmon, int channel, long *energy)
{
+ struct xe_gt *mmio = xe_root_mmio_gt(hwmon->xe);
struct xe_hwmon_energy_info *ei = &hwmon->ei[channel];
u64 reg_val;
- reg_val = xe_mmio_read32(hwmon->gt, xe_hwmon_get_reg(hwmon, REG_PKG_ENERGY_STATUS,
- channel));
+ reg_val = xe_mmio_read32(mmio, xe_hwmon_get_reg(hwmon, REG_PKG_ENERGY_STATUS,
+ channel));
if (reg_val >= ei->reg_val_prev)
ei->accum_energy += reg_val - ei->reg_val_prev;
@@ -279,19 +282,20 @@ xe_hwmon_power_max_interval_show(struct device *dev, struct device_attribute *at
char *buf)
{
struct xe_hwmon *hwmon = dev_get_drvdata(dev);
+ struct xe_gt *mmio = xe_root_mmio_gt(hwmon->xe);
u32 x, y, x_w = 2; /* 2 bits */
u64 r, tau4, out;
int sensor_index = to_sensor_dev_attr(attr)->index;
- xe_pm_runtime_get(gt_to_xe(hwmon->gt));
+ xe_pm_runtime_get(hwmon->xe);
mutex_lock(&hwmon->hwmon_lock);
- r = xe_mmio_read32(hwmon->gt, xe_hwmon_get_reg(hwmon, REG_PKG_RAPL_LIMIT, sensor_index));
+ r = xe_mmio_read32(mmio, xe_hwmon_get_reg(hwmon, REG_PKG_RAPL_LIMIT, sensor_index));
mutex_unlock(&hwmon->hwmon_lock);
- xe_pm_runtime_put(gt_to_xe(hwmon->gt));
+ xe_pm_runtime_put(hwmon->xe);
x = REG_FIELD_GET(PKG_PWR_LIM_1_TIME_X, r);
y = REG_FIELD_GET(PKG_PWR_LIM_1_TIME_Y, r);
@@ -319,6 +323,7 @@ xe_hwmon_power_max_interval_store(struct device *dev, struct device_attribute *a
const char *buf, size_t count)
{
struct xe_hwmon *hwmon = dev_get_drvdata(dev);
+ struct xe_gt *mmio = xe_root_mmio_gt(hwmon->xe);
u32 x, y, rxy, x_w = 2; /* 2 bits */
u64 tau4, r, max_win;
unsigned long val;
@@ -371,16 +376,16 @@ xe_hwmon_power_max_interval_store(struct device *dev, struct device_attribute *a
rxy = REG_FIELD_PREP(PKG_PWR_LIM_1_TIME_X, x) | REG_FIELD_PREP(PKG_PWR_LIM_1_TIME_Y, y);
- xe_pm_runtime_get(gt_to_xe(hwmon->gt));
+ xe_pm_runtime_get(hwmon->xe);
mutex_lock(&hwmon->hwmon_lock);
- r = xe_mmio_rmw32(hwmon->gt, xe_hwmon_get_reg(hwmon, REG_PKG_RAPL_LIMIT, sensor_index),
+ r = xe_mmio_rmw32(mmio, xe_hwmon_get_reg(hwmon, REG_PKG_RAPL_LIMIT, sensor_index),
PKG_PWR_LIM_1_TIME, rxy);
mutex_unlock(&hwmon->hwmon_lock);
- xe_pm_runtime_put(gt_to_xe(hwmon->gt));
+ xe_pm_runtime_put(hwmon->xe);
return count;
}
@@ -406,11 +411,11 @@ static umode_t xe_hwmon_attributes_visible(struct kobject *kobj,
struct xe_hwmon *hwmon = dev_get_drvdata(dev);
int ret = 0;
- xe_pm_runtime_get(gt_to_xe(hwmon->gt));
+ xe_pm_runtime_get(hwmon->xe);
ret = xe_reg_is_valid(xe_hwmon_get_reg(hwmon, REG_PKG_RAPL_LIMIT, index)) ? attr->mode : 0;
- xe_pm_runtime_put(gt_to_xe(hwmon->gt));
+ xe_pm_runtime_put(hwmon->xe);
return ret;
}
@@ -435,20 +440,24 @@ static const struct hwmon_channel_info * const hwmon_info[] = {
};
/* I1 is exposed as power_crit or as curr_crit depending on bit 31 */
-static int xe_hwmon_pcode_read_i1(struct xe_gt *gt, u32 *uval)
+static int xe_hwmon_pcode_read_i1(const struct xe_hwmon *hwmon, u32 *uval)
{
+ struct xe_tile *root_tile = xe_device_get_root_tile(hwmon->xe);
+
/* Avoid Illegal Subcommand error */
- if (gt_to_xe(gt)->info.platform == XE_DG2)
+ if (hwmon->xe->info.platform == XE_DG2)
return -ENXIO;
- return xe_pcode_read(gt_to_tile(gt), PCODE_MBOX(PCODE_POWER_SETUP,
+ return xe_pcode_read(root_tile, PCODE_MBOX(PCODE_POWER_SETUP,
POWER_SETUP_SUBCOMMAND_READ_I1, 0),
uval, NULL);
}
-static int xe_hwmon_pcode_write_i1(struct xe_gt *gt, u32 uval)
+static int xe_hwmon_pcode_write_i1(const struct xe_hwmon *hwmon, u32 uval)
{
- return xe_pcode_write(gt_to_tile(gt), PCODE_MBOX(PCODE_POWER_SETUP,
+ struct xe_tile *root_tile = xe_device_get_root_tile(hwmon->xe);
+
+ return xe_pcode_write(root_tile, PCODE_MBOX(PCODE_POWER_SETUP,
POWER_SETUP_SUBCOMMAND_WRITE_I1, 0),
(uval & POWER_SETUP_I1_DATA_MASK));
}
@@ -461,7 +470,7 @@ static int xe_hwmon_power_curr_crit_read(struct xe_hwmon *hwmon, int channel,
mutex_lock(&hwmon->hwmon_lock);
- ret = xe_hwmon_pcode_read_i1(hwmon->gt, &uval);
+ ret = xe_hwmon_pcode_read_i1(hwmon, &uval);
if (ret)
goto unlock;
@@ -481,7 +490,7 @@ static int xe_hwmon_power_curr_crit_write(struct xe_hwmon *hwmon, int channel,
mutex_lock(&hwmon->hwmon_lock);
uval = DIV_ROUND_CLOSEST_ULL(value << POWER_SETUP_I1_SHIFT, scale_factor);
- ret = xe_hwmon_pcode_write_i1(hwmon->gt, uval);
+ ret = xe_hwmon_pcode_write_i1(hwmon, uval);
mutex_unlock(&hwmon->hwmon_lock);
return ret;
@@ -489,9 +498,10 @@ static int xe_hwmon_power_curr_crit_write(struct xe_hwmon *hwmon, int channel,
static void xe_hwmon_get_voltage(struct xe_hwmon *hwmon, int channel, long *value)
{
+ struct xe_gt *mmio = xe_root_mmio_gt(hwmon->xe);
u64 reg_val;
- reg_val = xe_mmio_read32(hwmon->gt, xe_hwmon_get_reg(hwmon, REG_GT_PERF_STATUS, channel));
+ reg_val = xe_mmio_read32(mmio, xe_hwmon_get_reg(hwmon, REG_GT_PERF_STATUS, channel));
/* HW register value in units of 2.5 millivolt */
*value = DIV_ROUND_CLOSEST(REG_FIELD_GET(VOLTAGE_MASK, reg_val) * 2500, SF_VOLTAGE);
}
@@ -510,7 +520,7 @@ xe_hwmon_power_is_visible(struct xe_hwmon *hwmon, u32 attr, int channel)
channel)) ? 0444 : 0;
case hwmon_power_crit:
if (channel == CHANNEL_PKG)
- return (xe_hwmon_pcode_read_i1(hwmon->gt, &uval) ||
+ return (xe_hwmon_pcode_read_i1(hwmon, &uval) ||
!(uval & POWER_SETUP_I1_WATTS)) ? 0 : 0644;
break;
case hwmon_power_label:
@@ -563,10 +573,10 @@ xe_hwmon_curr_is_visible(const struct xe_hwmon *hwmon, u32 attr, int channel)
switch (attr) {
case hwmon_curr_crit:
- return (xe_hwmon_pcode_read_i1(hwmon->gt, &uval) ||
+ return (xe_hwmon_pcode_read_i1(hwmon, &uval) ||
(uval & POWER_SETUP_I1_WATTS)) ? 0 : 0644;
case hwmon_curr_label:
- return (xe_hwmon_pcode_read_i1(hwmon->gt, &uval) ||
+ return (xe_hwmon_pcode_read_i1(hwmon, &uval) ||
(uval & POWER_SETUP_I1_WATTS)) ? 0 : 0444;
break;
default:
@@ -654,7 +664,7 @@ xe_hwmon_is_visible(const void *drvdata, enum hwmon_sensor_types type,
struct xe_hwmon *hwmon = (struct xe_hwmon *)drvdata;
int ret;
- xe_pm_runtime_get(gt_to_xe(hwmon->gt));
+ xe_pm_runtime_get(hwmon->xe);
switch (type) {
case hwmon_power:
@@ -674,7 +684,7 @@ xe_hwmon_is_visible(const void *drvdata, enum hwmon_sensor_types type,
break;
}
- xe_pm_runtime_put(gt_to_xe(hwmon->gt));
+ xe_pm_runtime_put(hwmon->xe);
return ret;
}
@@ -686,7 +696,7 @@ xe_hwmon_read(struct device *dev, enum hwmon_sensor_types type, u32 attr,
struct xe_hwmon *hwmon = dev_get_drvdata(dev);
int ret;
- xe_pm_runtime_get(gt_to_xe(hwmon->gt));
+ xe_pm_runtime_get(hwmon->xe);
switch (type) {
case hwmon_power:
@@ -706,7 +716,7 @@ xe_hwmon_read(struct device *dev, enum hwmon_sensor_types type, u32 attr,
break;
}
- xe_pm_runtime_put(gt_to_xe(hwmon->gt));
+ xe_pm_runtime_put(hwmon->xe);
return ret;
}
@@ -718,7 +728,7 @@ xe_hwmon_write(struct device *dev, enum hwmon_sensor_types type, u32 attr,
struct xe_hwmon *hwmon = dev_get_drvdata(dev);
int ret;
- xe_pm_runtime_get(gt_to_xe(hwmon->gt));
+ xe_pm_runtime_get(hwmon->xe);
switch (type) {
case hwmon_power:
@@ -732,7 +742,7 @@ xe_hwmon_write(struct device *dev, enum hwmon_sensor_types type, u32 attr,
break;
}
- xe_pm_runtime_put(gt_to_xe(hwmon->gt));
+ xe_pm_runtime_put(hwmon->xe);
return ret;
}
@@ -771,6 +781,7 @@ static const struct hwmon_chip_info hwmon_chip_info = {
static void
xe_hwmon_get_preregistration_info(struct xe_device *xe)
{
+ struct xe_gt *mmio = xe_root_mmio_gt(xe);
struct xe_hwmon *hwmon = xe->hwmon;
long energy;
u64 val_sku_unit = 0;
@@ -783,7 +794,7 @@ xe_hwmon_get_preregistration_info(struct xe_device *xe)
*/
pkg_power_sku_unit = xe_hwmon_get_reg(hwmon, REG_PKG_POWER_SKU_UNIT, 0);
if (xe_reg_is_valid(pkg_power_sku_unit)) {
- val_sku_unit = xe_mmio_read32(hwmon->gt, pkg_power_sku_unit);
+ val_sku_unit = xe_mmio_read32(mmio, pkg_power_sku_unit);
hwmon->scl_shift_power = REG_FIELD_GET(PKG_PWR_UNIT, val_sku_unit);
hwmon->scl_shift_energy = REG_FIELD_GET(PKG_ENERGY_UNIT, val_sku_unit);
hwmon->scl_shift_time = REG_FIELD_GET(PKG_TIME_UNIT, val_sku_unit);
@@ -828,8 +839,8 @@ void xe_hwmon_register(struct xe_device *xe)
if (devm_add_action_or_reset(dev, xe_hwmon_mutex_destroy, hwmon))
return;
- /* primary GT to access device level properties */
- hwmon->gt = xe->tiles[0].primary_gt;
+ /* There's only one instance of hwmon per device */
+ hwmon->xe = xe;
xe_hwmon_get_preregistration_info(xe);
diff --git a/drivers/gpu/drm/xe/xe_irq.c b/drivers/gpu/drm/xe/xe_irq.c
index 85733f993d09..5f2c368c35ad 100644
--- a/drivers/gpu/drm/xe/xe_irq.c
+++ b/drivers/gpu/drm/xe/xe_irq.c
@@ -459,6 +459,8 @@ static irqreturn_t dg1_irq_handler(int irq, void *arg)
* the primary tile.
*/
if (id == 0) {
+ if (HAS_HECI_CSCFI(xe))
+ xe_heci_csc_irq_handler(xe, master_ctl);
xe_display_irq_handler(xe, master_ctl);
gu_misc_iir = gu_misc_irq_ack(xe, master_ctl);
}
diff --git a/drivers/gpu/drm/xe/xe_lmtt.c b/drivers/gpu/drm/xe/xe_lmtt.c
index 418661a88918..8999ac511555 100644
--- a/drivers/gpu/drm/xe/xe_lmtt.c
+++ b/drivers/gpu/drm/xe/xe_lmtt.c
@@ -7,7 +7,7 @@
#include <drm/drm_managed.h>
-#include "regs/xe_sriov_regs.h"
+#include "regs/xe_gt_regs.h"
#include "xe_assert.h"
#include "xe_bo.h"
@@ -71,7 +71,7 @@ static struct xe_lmtt_pt *lmtt_pt_alloc(struct xe_lmtt *lmtt, unsigned int level
lmtt->ops->lmtt_pte_num(level)),
ttm_bo_type_kernel,
XE_BO_FLAG_VRAM_IF_DGFX(lmtt_to_tile(lmtt)) |
- XE_BO_NEEDS_64K | XE_BO_FLAG_PINNED);
+ XE_BO_FLAG_NEEDS_64K | XE_BO_FLAG_PINNED);
if (IS_ERR(bo)) {
err = PTR_ERR(bo);
goto out_free_pt;
diff --git a/drivers/gpu/drm/xe/xe_lrc.c b/drivers/gpu/drm/xe/xe_lrc.c
index 58121821f081..aec7db39c061 100644
--- a/drivers/gpu/drm/xe/xe_lrc.c
+++ b/drivers/gpu/drm/xe/xe_lrc.c
@@ -5,6 +5,8 @@
#include "xe_lrc.h"
+#include <generated/xe_wa_oob.h>
+
#include <linux/ascii85.h>
#include "instructions/xe_mi_commands.h"
@@ -24,6 +26,7 @@
#include "xe_memirq.h"
#include "xe_sriov.h"
#include "xe_vm.h"
+#include "xe_wa.h"
#define LRC_VALID BIT_ULL(0)
#define LRC_PRIVILEGE BIT_ULL(8)
@@ -1581,19 +1584,31 @@ void xe_lrc_emit_hwe_state_instructions(struct xe_exec_queue *q, struct xe_bb *b
int state_table_size = 0;
/*
- * At the moment we only need to emit non-register state for the RCS
- * engine.
+ * Wa_14019789679
+ *
+ * If the driver doesn't explicitly emit the SVG instructions while
+ * setting up the default LRC, the context switch will write 0's
+ * (noops) into the LRC memory rather than the expected instruction
+ * headers. Application contexts start out as a copy of the default
+ * LRC, and if they also do not emit specific settings for some SVG
+ * state, then on context restore they'll unintentionally inherit
+ * whatever state setting the previous context had programmed into the
+ * hardware (i.e., the lack of a 3DSTATE_* instruction in the LRC will
+ * prevent the hardware from resetting that state back to any specific
+ * value).
+ *
+ * The official workaround only requires emitting 3DSTATE_MESH_CONTROL
+ * since that's a specific state setting that can easily cause GPU
+ * hangs if unintentionally inherited. However to be safe we'll
+ * continue to emit all of the SVG state since it's best not to leak
+ * any of the state between contexts, even if that leakage is harmless.
*/
- if (q->hwe->class != XE_ENGINE_CLASS_RENDER)
- return;
-
- switch (GRAPHICS_VERx100(xe)) {
- case 1255:
- case 1270 ... 2004:
+ if (XE_WA(gt, 14019789679) && q->hwe->class == XE_ENGINE_CLASS_RENDER) {
state_table = xe_hpg_svg_state;
state_table_size = ARRAY_SIZE(xe_hpg_svg_state);
- break;
- default:
+ }
+
+ if (!state_table) {
xe_gt_dbg(gt, "No non-register state to emit on graphics ver %d.%02d\n",
GRAPHICS_VER(xe), GRAPHICS_VERx100(xe) % 100);
return;
@@ -1634,7 +1649,7 @@ struct xe_lrc_snapshot *xe_lrc_snapshot_capture(struct xe_lrc *lrc)
if (!snapshot)
return NULL;
- if (lrc->bo && lrc->bo->vm)
+ if (lrc->bo->vm)
xe_vm_get(lrc->bo->vm);
snapshot->context_desc = xe_lrc_ggtt_addr(lrc);
diff --git a/drivers/gpu/drm/xe/xe_migrate.c b/drivers/gpu/drm/xe/xe_migrate.c
index c9f5673353ee..cfd31ae49cc1 100644
--- a/drivers/gpu/drm/xe/xe_migrate.c
+++ b/drivers/gpu/drm/xe/xe_migrate.c
@@ -10,7 +10,7 @@
#include <drm/drm_managed.h>
#include <drm/ttm/ttm_tt.h>
-#include <drm/xe_drm.h>
+#include <uapi/drm/xe_drm.h>
#include <generated/xe_wa_oob.h>
@@ -73,6 +73,7 @@ struct xe_migrate {
#define NUM_PT_SLOTS 32
#define LEVEL0_PAGE_TABLE_ENCODE_SIZE SZ_2M
#define MAX_NUM_PTE 512
+#define IDENTITY_OFFSET 256ULL
/*
* Although MI_STORE_DATA_IMM's "length" field is 10-bits, 0x3FE is the largest
@@ -84,15 +85,14 @@ struct xe_migrate {
#define MAX_PTE_PER_SDI 0x1FE
/**
- * xe_tile_migrate_engine() - Get this tile's migrate engine.
+ * xe_tile_migrate_exec_queue() - Get this tile's migrate exec queue.
* @tile: The tile.
*
- * Returns the default migrate engine of this tile.
- * TODO: Perhaps this function is slightly misplaced, and even unneeded?
+ * Returns the default migrate exec queue of this tile.
*
- * Return: The default migrate engine
+ * Return: The default migrate exec queue
*/
-struct xe_exec_queue *xe_tile_migrate_engine(struct xe_tile *tile)
+struct xe_exec_queue *xe_tile_migrate_exec_queue(struct xe_tile *tile)
{
return tile->migrate->q;
}
@@ -121,14 +121,64 @@ static u64 xe_migrate_vm_addr(u64 slot, u32 level)
return (slot + 1ULL) << xe_pt_shift(level + 1);
}
-static u64 xe_migrate_vram_ofs(struct xe_device *xe, u64 addr)
+static u64 xe_migrate_vram_ofs(struct xe_device *xe, u64 addr, bool is_comp_pte)
{
/*
* Remove the DPA to get a correct offset into identity table for the
* migrate offset
*/
+ u64 identity_offset = IDENTITY_OFFSET;
+
+ if (GRAPHICS_VER(xe) >= 20 && is_comp_pte)
+ identity_offset += DIV_ROUND_UP_ULL(xe->mem.vram.actual_physical_size, SZ_1G);
+
addr -= xe->mem.vram.dpa_base;
- return addr + (256ULL << xe_pt_shift(2));
+ return addr + (identity_offset << xe_pt_shift(2));
+}
+
+static void xe_migrate_program_identity(struct xe_device *xe, struct xe_vm *vm, struct xe_bo *bo,
+ u64 map_ofs, u64 vram_offset, u16 pat_index, u64 pt_2m_ofs)
+{
+ u64 pos, ofs, flags;
+ u64 entry;
+ /* XXX: Unclear if this should be usable_size? */
+ u64 vram_limit = xe->mem.vram.actual_physical_size +
+ xe->mem.vram.dpa_base;
+ u32 level = 2;
+
+ ofs = map_ofs + XE_PAGE_SIZE * level + vram_offset * 8;
+ flags = vm->pt_ops->pte_encode_addr(xe, 0, pat_index, level,
+ true, 0);
+
+ xe_assert(xe, IS_ALIGNED(xe->mem.vram.usable_size, SZ_2M));
+
+ /*
+ * Use 1GB pages when possible, last chunk always use 2M
+ * pages as mixing reserved memory (stolen, WOCPM) with a single
+ * mapping is not allowed on certain platforms.
+ */
+ for (pos = xe->mem.vram.dpa_base; pos < vram_limit;
+ pos += SZ_1G, ofs += 8) {
+ if (pos + SZ_1G >= vram_limit) {
+ entry = vm->pt_ops->pde_encode_bo(bo, pt_2m_ofs,
+ pat_index);
+ xe_map_wr(xe, &bo->vmap, ofs, u64, entry);
+
+ flags = vm->pt_ops->pte_encode_addr(xe, 0,
+ pat_index,
+ level - 1,
+ true, 0);
+
+ for (ofs = pt_2m_ofs; pos < vram_limit;
+ pos += SZ_2M, ofs += 8)
+ xe_map_wr(xe, &bo->vmap, ofs, u64, pos | flags);
+ break; /* Ensure pos == vram_limit assert correct */
+ }
+
+ xe_map_wr(xe, &bo->vmap, ofs, u64, pos | flags);
+ }
+
+ xe_assert(xe, pos == vram_limit);
}
static int xe_migrate_prepare_vm(struct xe_tile *tile, struct xe_migrate *m,
@@ -137,11 +187,13 @@ static int xe_migrate_prepare_vm(struct xe_tile *tile, struct xe_migrate *m,
struct xe_device *xe = tile_to_xe(tile);
u16 pat_index = xe->pat.idx[XE_CACHE_WB];
u8 id = tile->id;
- u32 num_entries = NUM_PT_SLOTS, num_level = vm->pt_root[id]->level,
- num_setup = num_level + 1;
+ u32 num_entries = NUM_PT_SLOTS, num_level = vm->pt_root[id]->level;
+#define VRAM_IDENTITY_MAP_COUNT 2
+ u32 num_setup = num_level + VRAM_IDENTITY_MAP_COUNT;
+#undef VRAM_IDENTITY_MAP_COUNT
u32 map_ofs, level, i;
struct xe_bo *bo, *batch = tile->mem.kernel_bb_pool->bo;
- u64 entry, pt30_ofs;
+ u64 entry, pt29_ofs;
/* Can't bump NUM_PT_SLOTS too high */
BUILD_BUG_ON(NUM_PT_SLOTS > SZ_2M/XE_PAGE_SIZE);
@@ -161,9 +213,9 @@ static int xe_migrate_prepare_vm(struct xe_tile *tile, struct xe_migrate *m,
if (IS_ERR(bo))
return PTR_ERR(bo);
- /* PT31 reserved for 2M identity map */
- pt30_ofs = bo->size - 2 * XE_PAGE_SIZE;
- entry = vm->pt_ops->pde_encode_bo(bo, pt30_ofs, pat_index);
+ /* PT30 & PT31 reserved for 2M identity map */
+ pt29_ofs = bo->size - 3 * XE_PAGE_SIZE;
+ entry = vm->pt_ops->pde_encode_bo(bo, pt29_ofs, pat_index);
xe_pt_write(xe, &vm->pt_root[id]->bo->vmap, 0, entry);
map_ofs = (num_entries - num_setup) * XE_PAGE_SIZE;
@@ -215,12 +267,12 @@ static int xe_migrate_prepare_vm(struct xe_tile *tile, struct xe_migrate *m,
} else {
u64 batch_addr = xe_bo_addr(batch, 0, XE_PAGE_SIZE);
- m->batch_base_ofs = xe_migrate_vram_ofs(xe, batch_addr);
+ m->batch_base_ofs = xe_migrate_vram_ofs(xe, batch_addr, false);
if (xe->info.has_usm) {
batch = tile->primary_gt->usm.bb_pool->bo;
batch_addr = xe_bo_addr(batch, 0, XE_PAGE_SIZE);
- m->usm_batch_base_ofs = xe_migrate_vram_ofs(xe, batch_addr);
+ m->usm_batch_base_ofs = xe_migrate_vram_ofs(xe, batch_addr, false);
}
}
@@ -254,55 +306,36 @@ static int xe_migrate_prepare_vm(struct xe_tile *tile, struct xe_migrate *m,
/* Identity map the entire vram at 256GiB offset */
if (IS_DGFX(xe)) {
- u64 pos, ofs, flags;
- /* XXX: Unclear if this should be usable_size? */
- u64 vram_limit = xe->mem.vram.actual_physical_size +
- xe->mem.vram.dpa_base;
-
- level = 2;
- ofs = map_ofs + XE_PAGE_SIZE * level + 256 * 8;
- flags = vm->pt_ops->pte_encode_addr(xe, 0, pat_index, level,
- true, 0);
+ u64 pt30_ofs = bo->size - 2 * XE_PAGE_SIZE;
- xe_assert(xe, IS_ALIGNED(xe->mem.vram.usable_size, SZ_2M));
+ xe_migrate_program_identity(xe, vm, bo, map_ofs, IDENTITY_OFFSET,
+ pat_index, pt30_ofs);
+ xe_assert(xe, xe->mem.vram.actual_physical_size <=
+ (MAX_NUM_PTE - IDENTITY_OFFSET) * SZ_1G);
/*
- * Use 1GB pages when possible, last chunk always use 2M
- * pages as mixing reserved memory (stolen, WOCPM) with a single
- * mapping is not allowed on certain platforms.
+ * Identity map the entire vram for compressed pat_index for xe2+
+ * if flat ccs is enabled.
*/
- for (pos = xe->mem.vram.dpa_base; pos < vram_limit;
- pos += SZ_1G, ofs += 8) {
- if (pos + SZ_1G >= vram_limit) {
- u64 pt31_ofs = bo->size - XE_PAGE_SIZE;
-
- entry = vm->pt_ops->pde_encode_bo(bo, pt31_ofs,
- pat_index);
- xe_map_wr(xe, &bo->vmap, ofs, u64, entry);
-
- flags = vm->pt_ops->pte_encode_addr(xe, 0,
- pat_index,
- level - 1,
- true, 0);
-
- for (ofs = pt31_ofs; pos < vram_limit;
- pos += SZ_2M, ofs += 8)
- xe_map_wr(xe, &bo->vmap, ofs, u64, pos | flags);
- break; /* Ensure pos == vram_limit assert correct */
- }
-
- xe_map_wr(xe, &bo->vmap, ofs, u64, pos | flags);
+ if (GRAPHICS_VER(xe) >= 20 && xe_device_has_flat_ccs(xe)) {
+ u16 comp_pat_index = xe->pat.idx[XE_CACHE_NONE_COMPRESSION];
+ u64 vram_offset = IDENTITY_OFFSET +
+ DIV_ROUND_UP_ULL(xe->mem.vram.actual_physical_size, SZ_1G);
+ u64 pt31_ofs = bo->size - XE_PAGE_SIZE;
+
+ xe_assert(xe, xe->mem.vram.actual_physical_size <= (MAX_NUM_PTE -
+ IDENTITY_OFFSET - IDENTITY_OFFSET / 2) * SZ_1G);
+ xe_migrate_program_identity(xe, vm, bo, map_ofs, vram_offset,
+ comp_pat_index, pt31_ofs);
}
-
- xe_assert(xe, pos == vram_limit);
}
/*
* Example layout created above, with root level = 3:
* [PT0...PT7]: kernel PT's for copy/clear; 64 or 4KiB PTE's
* [PT8]: Kernel PT for VM_BIND, 4 KiB PTE's
- * [PT9...PT27]: Userspace PT's for VM_BIND, 4 KiB PTE's
- * [PT28 = PDE 0] [PT29 = PDE 1] [PT30 = PDE 2] [PT31 = 2M vram identity map]
+ * [PT9...PT26]: Userspace PT's for VM_BIND, 4 KiB PTE's
+ * [PT27 = PDE 0] [PT28 = PDE 1] [PT29 = PDE 2] [PT30 & PT31 = 2M vram identity map]
*
* This makes the lowest part of the VM point to the pagetables.
* Hence the lowest 2M in the vm should point to itself, with a few writes
@@ -348,6 +381,11 @@ static u32 xe_migrate_usm_logical_mask(struct xe_gt *gt)
return logical_mask;
}
+static bool xe_migrate_needs_ccs_emit(struct xe_device *xe)
+{
+ return xe_device_has_flat_ccs(xe) && !(GRAPHICS_VER(xe) >= 20 && IS_DGFX(xe));
+}
+
/**
* xe_migrate_init() - Initialize a migrate context
* @tile: Back-pointer to the tile we're initializing for.
@@ -404,7 +442,7 @@ struct xe_migrate *xe_migrate_init(struct xe_tile *tile)
m->q = xe_exec_queue_create_class(xe, primary_gt, vm,
XE_ENGINE_CLASS_COPY,
EXEC_QUEUE_FLAG_KERNEL |
- EXEC_QUEUE_FLAG_PERMANENT);
+ EXEC_QUEUE_FLAG_PERMANENT, 0);
}
if (IS_ERR(m->q)) {
xe_vm_close_and_put(vm);
@@ -421,7 +459,7 @@ struct xe_migrate *xe_migrate_init(struct xe_tile *tile)
return ERR_PTR(err);
if (IS_DGFX(xe)) {
- if (xe_device_has_flat_ccs(xe))
+ if (xe_migrate_needs_ccs_emit(xe))
/* min chunk size corresponds to 4K of CCS Metadata */
m->min_chunk_size = SZ_4K * SZ_64K /
xe_device_ccs_bytes(xe, SZ_64K);
@@ -475,20 +513,26 @@ static bool xe_migrate_allow_identity(u64 size, const struct xe_res_cursor *cur)
return cur->size >= size;
}
+#define PTE_UPDATE_FLAG_IS_VRAM BIT(0)
+#define PTE_UPDATE_FLAG_IS_COMP_PTE BIT(1)
+
static u32 pte_update_size(struct xe_migrate *m,
- bool is_vram,
+ u32 flags,
struct ttm_resource *res,
struct xe_res_cursor *cur,
u64 *L0, u64 *L0_ofs, u32 *L0_pt,
u32 cmd_size, u32 pt_ofs, u32 avail_pts)
{
u32 cmds = 0;
+ bool is_vram = PTE_UPDATE_FLAG_IS_VRAM & flags;
+ bool is_comp_pte = PTE_UPDATE_FLAG_IS_COMP_PTE & flags;
*L0_pt = pt_ofs;
if (is_vram && xe_migrate_allow_identity(*L0, cur)) {
/* Offset into identity map. */
*L0_ofs = xe_migrate_vram_ofs(tile_to_xe(m->tile),
- cur->start + vram_region_gpu_offset(res));
+ cur->start + vram_region_gpu_offset(res),
+ is_comp_pte);
cmds += cmd_size;
} else {
/* Clip L0 to available size */
@@ -661,7 +705,7 @@ static u32 xe_migrate_ccs_copy(struct xe_migrate *m,
struct xe_gt *gt = m->tile->primary_gt;
u32 flush_flags = 0;
- if (xe_device_has_flat_ccs(gt_to_xe(gt)) && !copy_ccs && dst_is_indirect) {
+ if (!copy_ccs && dst_is_indirect) {
/*
* If the src is already in vram, then it should already
* have been cleared by us, or has been populated by the
@@ -737,6 +781,8 @@ struct dma_fence *xe_migrate_copy(struct xe_migrate *m,
bool copy_ccs = xe_device_has_flat_ccs(xe) &&
xe_bo_needs_ccs_pages(src_bo) && xe_bo_needs_ccs_pages(dst_bo);
bool copy_system_ccs = copy_ccs && (!src_is_vram || !dst_is_vram);
+ bool use_comp_pat = xe_device_has_flat_ccs(xe) &&
+ GRAPHICS_VER(xe) >= 20 && src_is_vram && !dst_is_vram;
/* Copying CCS between two different BOs is not supported yet. */
if (XE_WARN_ON(copy_ccs && src_bo != dst_bo))
@@ -763,10 +809,11 @@ struct dma_fence *xe_migrate_copy(struct xe_migrate *m,
u32 batch_size = 2; /* arb_clear() + MI_BATCH_BUFFER_END */
struct xe_sched_job *job;
struct xe_bb *bb;
- u32 flush_flags;
+ u32 flush_flags = 0;
u32 update_idx;
u64 ccs_ofs, ccs_size;
u32 ccs_pt;
+ u32 pte_flags;
bool usm = xe->info.has_usm;
u32 avail_pts = max_mem_transfer_per_pass(xe) / LEVEL0_PAGE_TABLE_ENCODE_SIZE;
@@ -779,17 +826,20 @@ struct dma_fence *xe_migrate_copy(struct xe_migrate *m,
src_L0 = min(src_L0, dst_L0);
- batch_size += pte_update_size(m, src_is_vram, src, &src_it, &src_L0,
+ pte_flags = src_is_vram ? PTE_UPDATE_FLAG_IS_VRAM : 0;
+ pte_flags |= use_comp_pat ? PTE_UPDATE_FLAG_IS_COMP_PTE : 0;
+ batch_size += pte_update_size(m, pte_flags, src, &src_it, &src_L0,
&src_L0_ofs, &src_L0_pt, 0, 0,
avail_pts);
- batch_size += pte_update_size(m, dst_is_vram, dst, &dst_it, &src_L0,
+ pte_flags = dst_is_vram ? PTE_UPDATE_FLAG_IS_VRAM : 0;
+ batch_size += pte_update_size(m, pte_flags, dst, &dst_it, &src_L0,
&dst_L0_ofs, &dst_L0_pt, 0,
avail_pts, avail_pts);
if (copy_system_ccs) {
ccs_size = xe_device_ccs_bytes(xe, src_L0);
- batch_size += pte_update_size(m, false, NULL, &ccs_it, &ccs_size,
+ batch_size += pte_update_size(m, 0, NULL, &ccs_it, &ccs_size,
&ccs_ofs, &ccs_pt, 0,
2 * avail_pts,
avail_pts);
@@ -798,7 +848,7 @@ struct dma_fence *xe_migrate_copy(struct xe_migrate *m,
/* Add copy commands size here */
batch_size += ((copy_only_ccs) ? 0 : EMIT_COPY_DW) +
- ((xe_device_has_flat_ccs(xe) ? EMIT_COPY_CCS_DW : 0));
+ ((xe_migrate_needs_ccs_emit(xe) ? EMIT_COPY_CCS_DW : 0));
bb = xe_bb_new(gt, batch_size, usm);
if (IS_ERR(bb)) {
@@ -827,11 +877,12 @@ struct dma_fence *xe_migrate_copy(struct xe_migrate *m,
if (!copy_only_ccs)
emit_copy(gt, bb, src_L0_ofs, dst_L0_ofs, src_L0, XE_PAGE_SIZE);
- flush_flags = xe_migrate_ccs_copy(m, bb, src_L0_ofs,
- IS_DGFX(xe) ? src_is_vram : src_is_pltt,
- dst_L0_ofs,
- IS_DGFX(xe) ? dst_is_vram : dst_is_pltt,
- src_L0, ccs_ofs, copy_ccs);
+ if (xe_migrate_needs_ccs_emit(xe))
+ flush_flags = xe_migrate_ccs_copy(m, bb, src_L0_ofs,
+ IS_DGFX(xe) ? src_is_vram : src_is_pltt,
+ dst_L0_ofs,
+ IS_DGFX(xe) ? dst_is_vram : dst_is_pltt,
+ src_L0, ccs_ofs, copy_ccs);
job = xe_bb_create_migration_job(m->q, bb,
xe_migrate_batch_base(m, usm),
@@ -986,9 +1037,11 @@ static void emit_clear(struct xe_gt *gt, struct xe_bb *bb, u64 src_ofs,
* @m: The migration context.
* @bo: The buffer object @dst is currently bound to.
* @dst: The dst TTM resource to be cleared.
+ * @clear_flags: flags to specify which data to clear: CCS, BO, or both.
*
- * Clear the contents of @dst to zero. On flat CCS devices,
- * the CCS metadata is cleared to zero as well on VRAM destinations.
+ * Clear the contents of @dst to zero when XE_MIGRATE_CLEAR_FLAG_BO_DATA is set.
+ * On flat CCS devices, the CCS metadata is cleared to zero with XE_MIGRATE_CLEAR_FLAG_CCS_DATA.
+ * Set XE_MIGRATE_CLEAR_FLAG_FULL to clear bo as well as CCS metadata.
* TODO: Eliminate the @bo argument.
*
* Return: Pointer to a dma_fence representing the last clear batch, or
@@ -997,18 +1050,27 @@ static void emit_clear(struct xe_gt *gt, struct xe_bb *bb, u64 src_ofs,
*/
struct dma_fence *xe_migrate_clear(struct xe_migrate *m,
struct xe_bo *bo,
- struct ttm_resource *dst)
+ struct ttm_resource *dst,
+ u32 clear_flags)
{
bool clear_vram = mem_type_is_vram(dst->mem_type);
+ bool clear_bo_data = XE_MIGRATE_CLEAR_FLAG_BO_DATA & clear_flags;
+ bool clear_ccs = XE_MIGRATE_CLEAR_FLAG_CCS_DATA & clear_flags;
struct xe_gt *gt = m->tile->primary_gt;
struct xe_device *xe = gt_to_xe(gt);
- bool clear_system_ccs = (xe_bo_needs_ccs_pages(bo) && !IS_DGFX(xe)) ? true : false;
+ bool clear_only_system_ccs = false;
struct dma_fence *fence = NULL;
u64 size = bo->size;
struct xe_res_cursor src_it;
struct ttm_resource *src = dst;
int err;
+ if (WARN_ON(!clear_bo_data && !clear_ccs))
+ return NULL;
+
+ if (!clear_bo_data && clear_ccs && !IS_DGFX(xe))
+ clear_only_system_ccs = true;
+
if (!clear_vram)
xe_res_first_sg(xe_bo_sg(bo), 0, bo->size, &src_it);
else
@@ -1022,6 +1084,7 @@ struct dma_fence *xe_migrate_clear(struct xe_migrate *m,
struct xe_sched_job *job;
struct xe_bb *bb;
u32 batch_size, update_idx;
+ u32 pte_flags;
bool usm = xe->info.has_usm;
u32 avail_pts = max_mem_transfer_per_pass(xe) / LEVEL0_PAGE_TABLE_ENCODE_SIZE;
@@ -1029,13 +1092,14 @@ struct dma_fence *xe_migrate_clear(struct xe_migrate *m,
clear_L0 = xe_migrate_res_sizes(m, &src_it);
/* Calculate final sizes and batch size.. */
+ pte_flags = clear_vram ? PTE_UPDATE_FLAG_IS_VRAM : 0;
batch_size = 2 +
- pte_update_size(m, clear_vram, src, &src_it,
+ pte_update_size(m, pte_flags, src, &src_it,
&clear_L0, &clear_L0_ofs, &clear_L0_pt,
- clear_system_ccs ? 0 : emit_clear_cmd_len(gt), 0,
+ clear_bo_data ? emit_clear_cmd_len(gt) : 0, 0,
avail_pts);
- if (xe_device_has_flat_ccs(xe))
+ if (xe_migrate_needs_ccs_emit(xe))
batch_size += EMIT_COPY_CCS_DW;
/* Clear commands */
@@ -1054,16 +1118,16 @@ struct dma_fence *xe_migrate_clear(struct xe_migrate *m,
if (clear_vram && xe_migrate_allow_identity(clear_L0, &src_it))
xe_res_next(&src_it, clear_L0);
else
- emit_pte(m, bb, clear_L0_pt, clear_vram, clear_system_ccs,
+ emit_pte(m, bb, clear_L0_pt, clear_vram, clear_only_system_ccs,
&src_it, clear_L0, dst);
bb->cs[bb->len++] = MI_BATCH_BUFFER_END;
update_idx = bb->len;
- if (!clear_system_ccs)
+ if (clear_bo_data)
emit_clear(gt, bb, clear_L0_ofs, clear_L0, XE_PAGE_SIZE, clear_vram);
- if (xe_device_has_flat_ccs(xe)) {
+ if (xe_migrate_needs_ccs_emit(xe)) {
emit_copy_ccs(gt, bb, clear_L0_ofs, true,
m->cleared_mem_ofs, false, clear_L0);
flush_flags = MI_FLUSH_DW_CCS;
@@ -1119,13 +1183,14 @@ err_sync:
return ERR_PTR(err);
}
- if (clear_system_ccs)
+ if (clear_ccs)
bo->ccs_cleared = true;
return fence;
}
static void write_pgtable(struct xe_tile *tile, struct xe_bb *bb, u64 ppgtt_ofs,
+ const struct xe_vm_pgtable_update_op *pt_op,
const struct xe_vm_pgtable_update *update,
struct xe_migrate_pt_update *pt_update)
{
@@ -1146,7 +1211,7 @@ static void write_pgtable(struct xe_tile *tile, struct xe_bb *bb, u64 ppgtt_ofs,
if (!ppgtt_ofs)
ppgtt_ofs = xe_migrate_vram_ofs(tile_to_xe(tile),
xe_bo_addr(update->pt_bo, 0,
- XE_PAGE_SIZE));
+ XE_PAGE_SIZE), false);
do {
u64 addr = ppgtt_ofs + ofs * 8;
@@ -1160,8 +1225,12 @@ static void write_pgtable(struct xe_tile *tile, struct xe_bb *bb, u64 ppgtt_ofs,
bb->cs[bb->len++] = MI_STORE_DATA_IMM | MI_SDI_NUM_QW(chunk);
bb->cs[bb->len++] = lower_32_bits(addr);
bb->cs[bb->len++] = upper_32_bits(addr);
- ops->populate(pt_update, tile, NULL, bb->cs + bb->len, ofs, chunk,
- update);
+ if (pt_op->bind)
+ ops->populate(pt_update, tile, NULL, bb->cs + bb->len,
+ ofs, chunk, update);
+ else
+ ops->clear(pt_update, tile, NULL, bb->cs + bb->len,
+ ofs, chunk, update);
bb->len += chunk * 2;
ofs += chunk;
@@ -1186,114 +1255,58 @@ struct migrate_test_params {
static struct dma_fence *
xe_migrate_update_pgtables_cpu(struct xe_migrate *m,
- struct xe_vm *vm, struct xe_bo *bo,
- const struct xe_vm_pgtable_update *updates,
- u32 num_updates, bool wait_vm,
struct xe_migrate_pt_update *pt_update)
{
XE_TEST_DECLARE(struct migrate_test_params *test =
to_migrate_test_params
(xe_cur_kunit_priv(XE_TEST_LIVE_MIGRATE));)
const struct xe_migrate_pt_update_ops *ops = pt_update->ops;
- struct dma_fence *fence;
+ struct xe_vm *vm = pt_update->vops->vm;
+ struct xe_vm_pgtable_update_ops *pt_update_ops =
+ &pt_update->vops->pt_update_ops[pt_update->tile_id];
int err;
- u32 i;
+ u32 i, j;
if (XE_TEST_ONLY(test && test->force_gpu))
return ERR_PTR(-ETIME);
- if (bo && !dma_resv_test_signaled(bo->ttm.base.resv,
- DMA_RESV_USAGE_KERNEL))
- return ERR_PTR(-ETIME);
-
- if (wait_vm && !dma_resv_test_signaled(xe_vm_resv(vm),
- DMA_RESV_USAGE_BOOKKEEP))
- return ERR_PTR(-ETIME);
-
if (ops->pre_commit) {
pt_update->job = NULL;
err = ops->pre_commit(pt_update);
if (err)
return ERR_PTR(err);
}
- for (i = 0; i < num_updates; i++) {
- const struct xe_vm_pgtable_update *update = &updates[i];
-
- ops->populate(pt_update, m->tile, &update->pt_bo->vmap, NULL,
- update->ofs, update->qwords, update);
- }
- if (vm) {
- trace_xe_vm_cpu_bind(vm);
- xe_device_wmb(vm->xe);
- }
-
- fence = dma_fence_get_stub();
-
- return fence;
-}
-
-static bool no_in_syncs(struct xe_vm *vm, struct xe_exec_queue *q,
- struct xe_sync_entry *syncs, u32 num_syncs)
-{
- struct dma_fence *fence;
- int i;
-
- for (i = 0; i < num_syncs; i++) {
- fence = syncs[i].fence;
-
- if (fence && !test_bit(DMA_FENCE_FLAG_SIGNALED_BIT,
- &fence->flags))
- return false;
- }
- if (q) {
- fence = xe_exec_queue_last_fence_get(q, vm);
- if (!test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags)) {
- dma_fence_put(fence);
- return false;
+ for (i = 0; i < pt_update_ops->num_ops; ++i) {
+ const struct xe_vm_pgtable_update_op *pt_op =
+ &pt_update_ops->ops[i];
+
+ for (j = 0; j < pt_op->num_entries; j++) {
+ const struct xe_vm_pgtable_update *update =
+ &pt_op->entries[j];
+
+ if (pt_op->bind)
+ ops->populate(pt_update, m->tile,
+ &update->pt_bo->vmap, NULL,
+ update->ofs, update->qwords,
+ update);
+ else
+ ops->clear(pt_update, m->tile,
+ &update->pt_bo->vmap, NULL,
+ update->ofs, update->qwords, update);
}
- dma_fence_put(fence);
}
- return true;
+ trace_xe_vm_cpu_bind(vm);
+ xe_device_wmb(vm->xe);
+
+ return dma_fence_get_stub();
}
-/**
- * xe_migrate_update_pgtables() - Pipelined page-table update
- * @m: The migrate context.
- * @vm: The vm we'll be updating.
- * @bo: The bo whose dma-resv we will await before updating, or NULL if userptr.
- * @q: The exec queue to be used for the update or NULL if the default
- * migration engine is to be used.
- * @updates: An array of update descriptors.
- * @num_updates: Number of descriptors in @updates.
- * @syncs: Array of xe_sync_entry to await before updating. Note that waits
- * will block the engine timeline.
- * @num_syncs: Number of entries in @syncs.
- * @pt_update: Pointer to a struct xe_migrate_pt_update, which contains
- * pointers to callback functions and, if subclassed, private arguments to
- * those.
- *
- * Perform a pipelined page-table update. The update descriptors are typically
- * built under the same lock critical section as a call to this function. If
- * using the default engine for the updates, they will be performed in the
- * order they grab the job_mutex. If different engines are used, external
- * synchronization is needed for overlapping updates to maintain page-table
- * consistency. Note that the meaing of "overlapping" is that the updates
- * touch the same page-table, which might be a higher-level page-directory.
- * If no pipelining is needed, then updates may be performed by the cpu.
- *
- * Return: A dma_fence that, when signaled, indicates the update completion.
- */
-struct dma_fence *
-xe_migrate_update_pgtables(struct xe_migrate *m,
- struct xe_vm *vm,
- struct xe_bo *bo,
- struct xe_exec_queue *q,
- const struct xe_vm_pgtable_update *updates,
- u32 num_updates,
- struct xe_sync_entry *syncs, u32 num_syncs,
- struct xe_migrate_pt_update *pt_update)
+static struct dma_fence *
+__xe_migrate_update_pgtables(struct xe_migrate *m,
+ struct xe_migrate_pt_update *pt_update,
+ struct xe_vm_pgtable_update_ops *pt_update_ops)
{
const struct xe_migrate_pt_update_ops *ops = pt_update->ops;
struct xe_tile *tile = m->tile;
@@ -1302,59 +1315,53 @@ xe_migrate_update_pgtables(struct xe_migrate *m,
struct xe_sched_job *job;
struct dma_fence *fence;
struct drm_suballoc *sa_bo = NULL;
- struct xe_vma *vma = pt_update->vma;
struct xe_bb *bb;
- u32 i, batch_size, ppgtt_ofs, update_idx, page_ofs = 0;
+ u32 i, j, batch_size = 0, ppgtt_ofs, update_idx, page_ofs = 0;
+ u32 num_updates = 0, current_update = 0;
u64 addr;
int err = 0;
- bool usm = !q && xe->info.has_usm;
- bool first_munmap_rebind = vma &&
- vma->gpuva.flags & XE_VMA_FIRST_REBIND;
- struct xe_exec_queue *q_override = !q ? m->q : q;
- u16 pat_index = xe->pat.idx[XE_CACHE_WB];
+ bool is_migrate = pt_update_ops->q == m->q;
+ bool usm = is_migrate && xe->info.has_usm;
+
+ for (i = 0; i < pt_update_ops->num_ops; ++i) {
+ struct xe_vm_pgtable_update_op *pt_op = &pt_update_ops->ops[i];
+ struct xe_vm_pgtable_update *updates = pt_op->entries;
- /* Use the CPU if no in syncs and engine is idle */
- if (no_in_syncs(vm, q, syncs, num_syncs) && xe_exec_queue_is_idle(q_override)) {
- fence = xe_migrate_update_pgtables_cpu(m, vm, bo, updates,
- num_updates,
- first_munmap_rebind,
- pt_update);
- if (!IS_ERR(fence) || fence == ERR_PTR(-EAGAIN))
- return fence;
+ num_updates += pt_op->num_entries;
+ for (j = 0; j < pt_op->num_entries; ++j) {
+ u32 num_cmds = DIV_ROUND_UP(updates[j].qwords,
+ MAX_PTE_PER_SDI);
+
+ /* align noop + MI_STORE_DATA_IMM cmd prefix */
+ batch_size += 4 * num_cmds + updates[j].qwords * 2;
+ }
}
/* fixed + PTE entries */
if (IS_DGFX(xe))
- batch_size = 2;
+ batch_size += 2;
else
- batch_size = 6 + num_updates * 2;
+ batch_size += 6 * (num_updates / MAX_PTE_PER_SDI + 1) +
+ num_updates * 2;
- for (i = 0; i < num_updates; i++) {
- u32 num_cmds = DIV_ROUND_UP(updates[i].qwords, MAX_PTE_PER_SDI);
-
- /* align noop + MI_STORE_DATA_IMM cmd prefix */
- batch_size += 4 * num_cmds + updates[i].qwords * 2;
- }
-
- /*
- * XXX: Create temp bo to copy from, if batch_size becomes too big?
- *
- * Worst case: Sum(2 * (each lower level page size) + (top level page size))
- * Should be reasonably bound..
- */
- xe_tile_assert(tile, batch_size < SZ_128K);
-
- bb = xe_bb_new(gt, batch_size, !q && xe->info.has_usm);
+ bb = xe_bb_new(gt, batch_size, usm);
if (IS_ERR(bb))
return ERR_CAST(bb);
/* For sysmem PTE's, need to map them in our hole.. */
if (!IS_DGFX(xe)) {
+ u32 ptes, ofs;
+
ppgtt_ofs = NUM_KERNEL_PDE - 1;
- if (q) {
- xe_tile_assert(tile, num_updates <= NUM_VMUSA_WRITES_PER_UNIT);
+ if (!is_migrate) {
+ u32 num_units = DIV_ROUND_UP(num_updates,
+ NUM_VMUSA_WRITES_PER_UNIT);
- sa_bo = drm_suballoc_new(&m->vm_update_sa, 1,
+ if (num_units > m->vm_update_sa.size) {
+ err = -ENOBUFS;
+ goto err_bb;
+ }
+ sa_bo = drm_suballoc_new(&m->vm_update_sa, num_units,
GFP_KERNEL, true, 0);
if (IS_ERR(sa_bo)) {
err = PTR_ERR(sa_bo);
@@ -1370,18 +1377,49 @@ xe_migrate_update_pgtables(struct xe_migrate *m,
}
/* Map our PT's to gtt */
- bb->cs[bb->len++] = MI_STORE_DATA_IMM | MI_SDI_NUM_QW(num_updates);
- bb->cs[bb->len++] = ppgtt_ofs * XE_PAGE_SIZE + page_ofs;
- bb->cs[bb->len++] = 0; /* upper_32_bits */
-
- for (i = 0; i < num_updates; i++) {
- struct xe_bo *pt_bo = updates[i].pt_bo;
+ i = 0;
+ j = 0;
+ ptes = num_updates;
+ ofs = ppgtt_ofs * XE_PAGE_SIZE + page_ofs;
+ while (ptes) {
+ u32 chunk = min(MAX_PTE_PER_SDI, ptes);
+ u32 idx = 0;
+
+ bb->cs[bb->len++] = MI_STORE_DATA_IMM |
+ MI_SDI_NUM_QW(chunk);
+ bb->cs[bb->len++] = ofs;
+ bb->cs[bb->len++] = 0; /* upper_32_bits */
+
+ for (; i < pt_update_ops->num_ops; ++i) {
+ struct xe_vm_pgtable_update_op *pt_op =
+ &pt_update_ops->ops[i];
+ struct xe_vm_pgtable_update *updates = pt_op->entries;
+
+ for (; j < pt_op->num_entries; ++j, ++current_update, ++idx) {
+ struct xe_vm *vm = pt_update->vops->vm;
+ struct xe_bo *pt_bo = updates[j].pt_bo;
+
+ if (idx == chunk)
+ goto next_cmd;
+
+ xe_tile_assert(tile, pt_bo->size == SZ_4K);
+
+ /* Map a PT at most once */
+ if (pt_bo->update_index < 0)
+ pt_bo->update_index = current_update;
+
+ addr = vm->pt_ops->pte_encode_bo(pt_bo, 0,
+ XE_CACHE_WB, 0);
+ bb->cs[bb->len++] = lower_32_bits(addr);
+ bb->cs[bb->len++] = upper_32_bits(addr);
+ }
- xe_tile_assert(tile, pt_bo->size == SZ_4K);
+ j = 0;
+ }
- addr = vm->pt_ops->pte_encode_bo(pt_bo, 0, pat_index, 0);
- bb->cs[bb->len++] = lower_32_bits(addr);
- bb->cs[bb->len++] = upper_32_bits(addr);
+next_cmd:
+ ptes -= chunk;
+ ofs += chunk * sizeof(u64);
}
bb->cs[bb->len++] = MI_BATCH_BUFFER_END;
@@ -1389,19 +1427,36 @@ xe_migrate_update_pgtables(struct xe_migrate *m,
addr = xe_migrate_vm_addr(ppgtt_ofs, 0) +
(page_ofs / sizeof(u64)) * XE_PAGE_SIZE;
- for (i = 0; i < num_updates; i++)
- write_pgtable(tile, bb, addr + i * XE_PAGE_SIZE,
- &updates[i], pt_update);
+ for (i = 0; i < pt_update_ops->num_ops; ++i) {
+ struct xe_vm_pgtable_update_op *pt_op =
+ &pt_update_ops->ops[i];
+ struct xe_vm_pgtable_update *updates = pt_op->entries;
+
+ for (j = 0; j < pt_op->num_entries; ++j) {
+ struct xe_bo *pt_bo = updates[j].pt_bo;
+
+ write_pgtable(tile, bb, addr +
+ pt_bo->update_index * XE_PAGE_SIZE,
+ pt_op, &updates[j], pt_update);
+ }
+ }
} else {
/* phys pages, no preamble required */
bb->cs[bb->len++] = MI_BATCH_BUFFER_END;
update_idx = bb->len;
- for (i = 0; i < num_updates; i++)
- write_pgtable(tile, bb, 0, &updates[i], pt_update);
+ for (i = 0; i < pt_update_ops->num_ops; ++i) {
+ struct xe_vm_pgtable_update_op *pt_op =
+ &pt_update_ops->ops[i];
+ struct xe_vm_pgtable_update *updates = pt_op->entries;
+
+ for (j = 0; j < pt_op->num_entries; ++j)
+ write_pgtable(tile, bb, 0, pt_op, &updates[j],
+ pt_update);
+ }
}
- job = xe_bb_create_migration_job(q ?: m->q, bb,
+ job = xe_bb_create_migration_job(pt_update_ops->q, bb,
xe_migrate_batch_base(m, usm),
update_idx);
if (IS_ERR(job)) {
@@ -1409,46 +1464,20 @@ xe_migrate_update_pgtables(struct xe_migrate *m,
goto err_sa;
}
- /* Wait on BO move */
- if (bo) {
- err = xe_sched_job_add_deps(job, bo->ttm.base.resv,
- DMA_RESV_USAGE_KERNEL);
- if (err)
- goto err_job;
- }
-
- /*
- * Munmap style VM unbind, need to wait for all jobs to be complete /
- * trigger preempts before moving forward
- */
- if (first_munmap_rebind) {
- err = xe_sched_job_add_deps(job, xe_vm_resv(vm),
- DMA_RESV_USAGE_BOOKKEEP);
- if (err)
- goto err_job;
- }
-
- err = xe_sched_job_last_fence_add_dep(job, vm);
- for (i = 0; !err && i < num_syncs; i++)
- err = xe_sync_entry_add_deps(&syncs[i], job);
-
- if (err)
- goto err_job;
-
if (ops->pre_commit) {
pt_update->job = job;
err = ops->pre_commit(pt_update);
if (err)
goto err_job;
}
- if (!q)
+ if (is_migrate)
mutex_lock(&m->job_mutex);
xe_sched_job_arm(job);
fence = dma_fence_get(&job->drm.s_fence->finished);
xe_sched_job_push(job);
- if (!q)
+ if (is_migrate)
mutex_unlock(&m->job_mutex);
xe_bb_free(bb, fence);
@@ -1466,6 +1495,40 @@ err_bb:
}
/**
+ * xe_migrate_update_pgtables() - Pipelined page-table update
+ * @m: The migrate context.
+ * @pt_update: PT update arguments
+ *
+ * Perform a pipelined page-table update. The update descriptors are typically
+ * built under the same lock critical section as a call to this function. If
+ * using the default engine for the updates, they will be performed in the
+ * order they grab the job_mutex. If different engines are used, external
+ * synchronization is needed for overlapping updates to maintain page-table
+ * consistency. Note that the meaing of "overlapping" is that the updates
+ * touch the same page-table, which might be a higher-level page-directory.
+ * If no pipelining is needed, then updates may be performed by the cpu.
+ *
+ * Return: A dma_fence that, when signaled, indicates the update completion.
+ */
+struct dma_fence *
+xe_migrate_update_pgtables(struct xe_migrate *m,
+ struct xe_migrate_pt_update *pt_update)
+
+{
+ struct xe_vm_pgtable_update_ops *pt_update_ops =
+ &pt_update->vops->pt_update_ops[pt_update->tile_id];
+ struct dma_fence *fence;
+
+ fence = xe_migrate_update_pgtables_cpu(m, pt_update);
+
+ /* -ETIME indicates a job is needed, anything else is legit error */
+ if (!IS_ERR(fence) || PTR_ERR(fence) != -ETIME)
+ return fence;
+
+ return __xe_migrate_update_pgtables(m, pt_update, pt_update_ops);
+}
+
+/**
* xe_migrate_wait() - Complete all operations using the xe_migrate context
* @m: Migrate context to wait for.
*
diff --git a/drivers/gpu/drm/xe/xe_migrate.h b/drivers/gpu/drm/xe/xe_migrate.h
index 951f19318ea4..0109866e398a 100644
--- a/drivers/gpu/drm/xe/xe_migrate.h
+++ b/drivers/gpu/drm/xe/xe_migrate.h
@@ -6,7 +6,7 @@
#ifndef _XE_MIGRATE_
#define _XE_MIGRATE_
-#include <drm/drm_mm.h>
+#include <linux/types.h>
struct dma_fence;
struct iosys_map;
@@ -47,6 +47,24 @@ struct xe_migrate_pt_update_ops {
struct xe_tile *tile, struct iosys_map *map,
void *pos, u32 ofs, u32 num_qwords,
const struct xe_vm_pgtable_update *update);
+ /**
+ * @clear: Clear a command buffer or page-table with ptes.
+ * @pt_update: Embeddable callback argument.
+ * @tile: The tile for the current operation.
+ * @map: struct iosys_map into the memory to be populated.
+ * @pos: If @map is NULL, map into the memory to be populated.
+ * @ofs: qword offset into @map, unused if @map is NULL.
+ * @num_qwords: Number of qwords to write.
+ * @update: Information about the PTEs to be inserted.
+ *
+ * This interface is intended to be used as a callback into the
+ * page-table system to populate command buffers or shared
+ * page-tables with PTEs.
+ */
+ void (*clear)(struct xe_migrate_pt_update *pt_update,
+ struct xe_tile *tile, struct iosys_map *map,
+ void *pos, u32 ofs, u32 num_qwords,
+ const struct xe_vm_pgtable_update *update);
/**
* @pre_commit: Callback to be called just before arming the
@@ -67,14 +85,10 @@ struct xe_migrate_pt_update_ops {
struct xe_migrate_pt_update {
/** @ops: Pointer to the struct xe_migrate_pt_update_ops callbacks */
const struct xe_migrate_pt_update_ops *ops;
- /** @vma: The vma we're updating the pagetable for. */
- struct xe_vma *vma;
+ /** @vops: VMA operations */
+ struct xe_vma_ops *vops;
/** @job: The job if a GPU page-table update. NULL otherwise */
struct xe_sched_job *job;
- /** @start: Start of update for the range fence */
- u64 start;
- /** @last: Last of update for the range fence */
- u64 last;
/** @tile_id: Tile ID of the update */
u8 tile_id;
};
@@ -88,23 +102,22 @@ struct dma_fence *xe_migrate_copy(struct xe_migrate *m,
struct ttm_resource *dst,
bool copy_only_ccs);
+#define XE_MIGRATE_CLEAR_FLAG_BO_DATA BIT(0)
+#define XE_MIGRATE_CLEAR_FLAG_CCS_DATA BIT(1)
+#define XE_MIGRATE_CLEAR_FLAG_FULL (XE_MIGRATE_CLEAR_FLAG_BO_DATA | \
+ XE_MIGRATE_CLEAR_FLAG_CCS_DATA)
struct dma_fence *xe_migrate_clear(struct xe_migrate *m,
struct xe_bo *bo,
- struct ttm_resource *dst);
+ struct ttm_resource *dst,
+ u32 clear_flags);
struct xe_vm *xe_migrate_get_vm(struct xe_migrate *m);
struct dma_fence *
xe_migrate_update_pgtables(struct xe_migrate *m,
- struct xe_vm *vm,
- struct xe_bo *bo,
- struct xe_exec_queue *q,
- const struct xe_vm_pgtable_update *updates,
- u32 num_updates,
- struct xe_sync_entry *syncs, u32 num_syncs,
struct xe_migrate_pt_update *pt_update);
void xe_migrate_wait(struct xe_migrate *m);
-struct xe_exec_queue *xe_tile_migrate_engine(struct xe_tile *tile);
+struct xe_exec_queue *xe_tile_migrate_exec_queue(struct xe_tile *tile);
#endif
diff --git a/drivers/gpu/drm/xe/xe_mmio.c b/drivers/gpu/drm/xe/xe_mmio.c
index aa68cac9fdf8..3fd462fda625 100644
--- a/drivers/gpu/drm/xe/xe_mmio.c
+++ b/drivers/gpu/drm/xe/xe_mmio.c
@@ -29,34 +29,60 @@ static void tiles_fini(void *arg)
struct xe_tile *tile;
int id;
- for_each_tile(tile, xe, id)
- if (tile != xe_device_get_root_tile(xe))
- tile->mmio.regs = NULL;
+ for_each_remote_tile(tile, xe, id)
+ tile->mmio.regs = NULL;
}
-int xe_mmio_probe_tiles(struct xe_device *xe)
+/*
+ * On multi-tile devices, partition the BAR space for MMIO on each tile,
+ * possibly accounting for register override on the number of tiles available.
+ * Resulting memory layout is like below:
+ *
+ * .----------------------. <- tile_count * tile_mmio_size
+ * | .... |
+ * |----------------------| <- 2 * tile_mmio_size
+ * | tile1->mmio.regs |
+ * |----------------------| <- 1 * tile_mmio_size
+ * | tile0->mmio.regs |
+ * '----------------------' <- 0MB
+ */
+static void mmio_multi_tile_setup(struct xe_device *xe, size_t tile_mmio_size)
{
- size_t tile_mmio_size = SZ_16M, tile_mmio_ext_size = xe->info.tile_mmio_ext_size;
- u8 id, tile_count = xe->info.tile_count;
- struct xe_gt *gt = xe_root_mmio_gt(xe);
struct xe_tile *tile;
void __iomem *regs;
- u32 mtcfg;
+ u8 id;
- if (tile_count == 1)
- goto add_mmio_ext;
+ /*
+ * Nothing to be done as tile 0 has already been setup earlier with the
+ * entire BAR mapped - see xe_mmio_init()
+ */
+ if (xe->info.tile_count == 1)
+ return;
+ /* Possibly override number of tile based on configuration register */
if (!xe->info.skip_mtcfg) {
+ struct xe_gt *gt = xe_root_mmio_gt(xe);
+ u8 tile_count;
+ u32 mtcfg;
+
+ /*
+ * Although the per-tile mmio regs are not yet initialized, this
+ * is fine as it's going to the root gt, that's guaranteed to be
+ * initialized earlier in xe_mmio_init()
+ */
mtcfg = xe_mmio_read64_2x32(gt, XEHP_MTCFG_ADDR);
tile_count = REG_FIELD_GET(TILE_COUNT, mtcfg) + 1;
+
if (tile_count < xe->info.tile_count) {
drm_info(&xe->drm, "tile_count: %d, reduced_tile_count %d\n",
xe->info.tile_count, tile_count);
xe->info.tile_count = tile_count;
/*
- * FIXME: Needs some work for standalone media, but should be impossible
- * with multi-tile for now.
+ * FIXME: Needs some work for standalone media, but
+ * should be impossible with multi-tile for now:
+ * multi-tile platform with standalone media doesn't
+ * exist
*/
xe->info.gt_count = xe->info.tile_count;
}
@@ -68,23 +94,51 @@ int xe_mmio_probe_tiles(struct xe_device *xe)
tile->mmio.regs = regs;
regs += tile_mmio_size;
}
+}
-add_mmio_ext:
- /*
- * By design, there's a contiguous multi-tile MMIO space (16MB hard coded per tile).
- * When supported, there could be an additional contiguous multi-tile MMIO extension
- * space ON TOP of it, and hence the necessity for distinguished MMIO spaces.
- */
- if (xe->info.has_mmio_ext) {
- regs = xe->mmio.regs + tile_mmio_size * tile_count;
+/*
+ * On top of all the multi-tile MMIO space there can be a platform-dependent
+ * extension for each tile, resulting in a layout like below:
+ *
+ * .----------------------. <- ext_base + tile_count * tile_mmio_ext_size
+ * | .... |
+ * |----------------------| <- ext_base + 2 * tile_mmio_ext_size
+ * | tile1->mmio_ext.regs |
+ * |----------------------| <- ext_base + 1 * tile_mmio_ext_size
+ * | tile0->mmio_ext.regs |
+ * |======================| <- ext_base = tile_count * tile_mmio_size
+ * | |
+ * | mmio.regs |
+ * | |
+ * '----------------------' <- 0MB
+ *
+ * Set up the tile[]->mmio_ext pointers/sizes.
+ */
+static void mmio_extension_setup(struct xe_device *xe, size_t tile_mmio_size,
+ size_t tile_mmio_ext_size)
+{
+ struct xe_tile *tile;
+ void __iomem *regs;
+ u8 id;
- for_each_tile(tile, xe, id) {
- tile->mmio_ext.size = tile_mmio_ext_size;
- tile->mmio_ext.regs = regs;
+ if (!xe->info.has_mmio_ext)
+ return;
- regs += tile_mmio_ext_size;
- }
+ regs = xe->mmio.regs + tile_mmio_size * xe->info.tile_count;
+ for_each_tile(tile, xe, id) {
+ tile->mmio_ext.size = tile_mmio_ext_size;
+ tile->mmio_ext.regs = regs;
+ regs += tile_mmio_ext_size;
}
+}
+
+int xe_mmio_probe_tiles(struct xe_device *xe)
+{
+ size_t tile_mmio_size = SZ_16M;
+ size_t tile_mmio_ext_size = xe->info.tile_mmio_ext_size;
+
+ mmio_multi_tile_setup(xe, tile_mmio_size);
+ mmio_extension_setup(xe, tile_mmio_size, tile_mmio_ext_size);
return devm_add_action_or_reset(xe->drm.dev, tiles_fini, xe);
}
@@ -174,7 +228,11 @@ void xe_mmio_write32(struct xe_gt *gt, struct xe_reg reg, u32 val)
u32 addr = xe_mmio_adjusted_addr(gt, reg.addr);
trace_xe_reg_rw(gt, true, addr, val, sizeof(val));
- writel(val, (reg.ext ? tile->mmio_ext.regs : tile->mmio.regs) + addr);
+
+ if (!reg.vf && IS_SRIOV_VF(gt_to_xe(gt)))
+ xe_gt_sriov_vf_write32(gt, reg, val);
+ else
+ writel(val, (reg.ext ? tile->mmio_ext.regs : tile->mmio.regs) + addr);
}
u32 xe_mmio_read32(struct xe_gt *gt, struct xe_reg reg)
@@ -277,37 +335,24 @@ u64 xe_mmio_read64_2x32(struct xe_gt *gt, struct xe_reg reg)
return (u64)udw << 32 | ldw;
}
-/**
- * xe_mmio_wait32() - Wait for a register to match the desired masked value
- * @gt: MMIO target GT
- * @reg: register to read value from
- * @mask: mask to be applied to the value read from the register
- * @val: desired value after applying the mask
- * @timeout_us: time out after this period of time. Wait logic tries to be
- * smart, applying an exponential backoff until @timeout_us is reached.
- * @out_val: if not NULL, points where to store the last unmasked value
- * @atomic: needs to be true if calling from an atomic context
- *
- * This function polls for the desired masked value and returns zero on success
- * or -ETIMEDOUT if timed out.
- *
- * Note that @timeout_us represents the minimum amount of time to wait before
- * giving up. The actual time taken by this function can be a little more than
- * @timeout_us for different reasons, specially in non-atomic contexts. Thus,
- * it is possible that this function succeeds even after @timeout_us has passed.
- */
-int xe_mmio_wait32(struct xe_gt *gt, struct xe_reg reg, u32 mask, u32 val, u32 timeout_us,
- u32 *out_val, bool atomic)
+static int __xe_mmio_wait32(struct xe_gt *gt, struct xe_reg reg, u32 mask, u32 val, u32 timeout_us,
+ u32 *out_val, bool atomic, bool expect_match)
{
ktime_t cur = ktime_get_raw();
const ktime_t end = ktime_add_us(cur, timeout_us);
int ret = -ETIMEDOUT;
s64 wait = 10;
u32 read;
+ bool check;
for (;;) {
read = xe_mmio_read32(gt, reg);
- if ((read & mask) == val) {
+
+ check = (read & mask) == val;
+ if (!expect_match)
+ check = !check;
+
+ if (check) {
ret = 0;
break;
}
@@ -328,7 +373,12 @@ int xe_mmio_wait32(struct xe_gt *gt, struct xe_reg reg, u32 mask, u32 val, u32 t
if (ret != 0) {
read = xe_mmio_read32(gt, reg);
- if ((read & mask) == val)
+
+ check = (read & mask) == val;
+ if (!expect_match)
+ check = !check;
+
+ if (check)
ret = 0;
}
@@ -339,62 +389,45 @@ int xe_mmio_wait32(struct xe_gt *gt, struct xe_reg reg, u32 mask, u32 val, u32 t
}
/**
- * xe_mmio_wait32_not() - Wait for a register to return anything other than the given masked value
+ * xe_mmio_wait32() - Wait for a register to match the desired masked value
* @gt: MMIO target GT
* @reg: register to read value from
* @mask: mask to be applied to the value read from the register
- * @val: value to match after applying the mask
+ * @val: desired value after applying the mask
* @timeout_us: time out after this period of time. Wait logic tries to be
* smart, applying an exponential backoff until @timeout_us is reached.
* @out_val: if not NULL, points where to store the last unmasked value
* @atomic: needs to be true if calling from an atomic context
*
- * This function polls for a masked value to change from a given value and
- * returns zero on success or -ETIMEDOUT if timed out.
+ * This function polls for the desired masked value and returns zero on success
+ * or -ETIMEDOUT if timed out.
*
* Note that @timeout_us represents the minimum amount of time to wait before
* giving up. The actual time taken by this function can be a little more than
* @timeout_us for different reasons, specially in non-atomic contexts. Thus,
* it is possible that this function succeeds even after @timeout_us has passed.
*/
+int xe_mmio_wait32(struct xe_gt *gt, struct xe_reg reg, u32 mask, u32 val, u32 timeout_us,
+ u32 *out_val, bool atomic)
+{
+ return __xe_mmio_wait32(gt, reg, mask, val, timeout_us, out_val, atomic, true);
+}
+
+/**
+ * xe_mmio_wait32_not() - Wait for a register to return anything other than the given masked value
+ * @gt: MMIO target GT
+ * @reg: register to read value from
+ * @mask: mask to be applied to the value read from the register
+ * @val: value not to be matched after applying the mask
+ * @timeout_us: time out after this period of time
+ * @out_val: if not NULL, points where to store the last unmasked value
+ * @atomic: needs to be true if calling from an atomic context
+ *
+ * This function works exactly like xe_mmio_wait32() with the exception that
+ * @val is expected not to be matched.
+ */
int xe_mmio_wait32_not(struct xe_gt *gt, struct xe_reg reg, u32 mask, u32 val, u32 timeout_us,
u32 *out_val, bool atomic)
{
- ktime_t cur = ktime_get_raw();
- const ktime_t end = ktime_add_us(cur, timeout_us);
- int ret = -ETIMEDOUT;
- s64 wait = 10;
- u32 read;
-
- for (;;) {
- read = xe_mmio_read32(gt, reg);
- if ((read & mask) != val) {
- ret = 0;
- break;
- }
-
- cur = ktime_get_raw();
- if (!ktime_before(cur, end))
- break;
-
- if (ktime_after(ktime_add_us(cur, wait), end))
- wait = ktime_us_delta(end, cur);
-
- if (atomic)
- udelay(wait);
- else
- usleep_range(wait, wait << 1);
- wait <<= 1;
- }
-
- if (ret != 0) {
- read = xe_mmio_read32(gt, reg);
- if ((read & mask) != val)
- ret = 0;
- }
-
- if (out_val)
- *out_val = read;
-
- return ret;
+ return __xe_mmio_wait32(gt, reg, mask, val, timeout_us, out_val, atomic, false);
}
diff --git a/drivers/gpu/drm/xe/xe_mmio.h b/drivers/gpu/drm/xe/xe_mmio.h
index 6ae0cc32c651..26551410ecc8 100644
--- a/drivers/gpu/drm/xe/xe_mmio.h
+++ b/drivers/gpu/drm/xe/xe_mmio.h
@@ -22,7 +22,6 @@ u32 xe_mmio_rmw32(struct xe_gt *gt, struct xe_reg reg, u32 clr, u32 set);
int xe_mmio_write32_and_verify(struct xe_gt *gt, struct xe_reg reg, u32 val, u32 mask, u32 eval);
bool xe_mmio_in_range(const struct xe_gt *gt, const struct xe_mmio_range *range, struct xe_reg reg);
-int xe_mmio_probe_vram(struct xe_device *xe);
u64 xe_mmio_read64_2x32(struct xe_gt *gt, struct xe_reg reg);
int xe_mmio_wait32(struct xe_gt *gt, struct xe_reg reg, u32 mask, u32 val, u32 timeout_us,
u32 *out_val, bool atomic);
diff --git a/drivers/gpu/drm/xe/xe_module.c b/drivers/gpu/drm/xe/xe_module.c
index 499540add465..bfc3deebdaa2 100644
--- a/drivers/gpu/drm/xe/xe_module.c
+++ b/drivers/gpu/drm/xe/xe_module.c
@@ -8,14 +8,17 @@
#include <linux/init.h>
#include <linux/module.h>
+#include <drm/drm_module.h>
+
#include "xe_drv.h"
#include "xe_hw_fence.h"
#include "xe_pci.h"
+#include "xe_pm.h"
#include "xe_observation.h"
#include "xe_sched_job.h"
struct xe_modparam xe_modparam = {
- .enable_display = true,
+ .probe_display = true,
.guc_log_level = 5,
.force_probe = CONFIG_DRM_XE_FORCE_PROBE,
.wedged_mode = 1,
@@ -25,8 +28,8 @@ struct xe_modparam xe_modparam = {
module_param_named_unsafe(force_execlist, xe_modparam.force_execlist, bool, 0444);
MODULE_PARM_DESC(force_execlist, "Force Execlist submission");
-module_param_named(enable_display, xe_modparam.enable_display, bool, 0444);
-MODULE_PARM_DESC(enable_display, "Enable display");
+module_param_named(probe_display, xe_modparam.probe_display, bool, 0444);
+MODULE_PARM_DESC(probe_display, "Probe display HW, otherwise it's left untouched (default: true)");
module_param_named(vram_bar_size, xe_modparam.force_vram_bar_size, uint, 0600);
MODULE_PARM_DESC(vram_bar_size, "Set the vram bar size(in MiB)");
@@ -61,13 +64,28 @@ module_param_named_unsafe(wedged_mode, xe_modparam.wedged_mode, int, 0600);
MODULE_PARM_DESC(wedged_mode,
"Module's default policy for the wedged mode - 0=never, 1=upon-critical-errors[default], 2=upon-any-hang");
+static int xe_check_nomodeset(void)
+{
+ if (drm_firmware_drivers_only())
+ return -ENODEV;
+
+ return 0;
+}
+
struct init_funcs {
int (*init)(void);
void (*exit)(void);
};
+static void xe_dummy_exit(void)
+{
+}
+
static const struct init_funcs init_funcs[] = {
{
+ .init = xe_check_nomodeset,
+ },
+ {
.init = xe_hw_fence_module_init,
.exit = xe_hw_fence_module_exit,
},
@@ -83,17 +101,41 @@ static const struct init_funcs init_funcs[] = {
.init = xe_observation_sysctl_register,
.exit = xe_observation_sysctl_unregister,
},
+ {
+ .init = xe_pm_module_init,
+ .exit = xe_dummy_exit,
+ },
};
+static int __init xe_call_init_func(unsigned int i)
+{
+ if (WARN_ON(i >= ARRAY_SIZE(init_funcs)))
+ return 0;
+ if (!init_funcs[i].init)
+ return 0;
+
+ return init_funcs[i].init();
+}
+
+static void xe_call_exit_func(unsigned int i)
+{
+ if (WARN_ON(i >= ARRAY_SIZE(init_funcs)))
+ return;
+ if (!init_funcs[i].exit)
+ return;
+
+ init_funcs[i].exit();
+}
+
static int __init xe_init(void)
{
int err, i;
for (i = 0; i < ARRAY_SIZE(init_funcs); i++) {
- err = init_funcs[i].init();
+ err = xe_call_init_func(i);
if (err) {
while (i--)
- init_funcs[i].exit();
+ xe_call_exit_func(i);
return err;
}
}
@@ -106,7 +148,7 @@ static void __exit xe_exit(void)
int i;
for (i = ARRAY_SIZE(init_funcs) - 1; i >= 0; i--)
- init_funcs[i].exit();
+ xe_call_exit_func(i);
}
module_init(xe_init);
diff --git a/drivers/gpu/drm/xe/xe_module.h b/drivers/gpu/drm/xe/xe_module.h
index 61a0d28a28c8..161a5e6f717f 100644
--- a/drivers/gpu/drm/xe/xe_module.h
+++ b/drivers/gpu/drm/xe/xe_module.h
@@ -11,7 +11,7 @@
/* Module modprobe variables */
struct xe_modparam {
bool force_execlist;
- bool enable_display;
+ bool probe_display;
u32 force_vram_bar_size;
int guc_log_level;
char *guc_firmware_path;
diff --git a/drivers/gpu/drm/xe/xe_oa.c b/drivers/gpu/drm/xe/xe_oa.c
index 22f14eba2c63..eae38a49ee8e 100644
--- a/drivers/gpu/drm/xe/xe_oa.c
+++ b/drivers/gpu/drm/xe/xe_oa.c
@@ -10,7 +10,7 @@
#include <drm/drm_drv.h>
#include <drm/drm_managed.h>
-#include <drm/xe_drm.h>
+#include <uapi/drm/xe_drm.h>
#include "abi/guc_actions_slpc_abi.h"
#include "instructions/xe_mi_commands.h"
@@ -645,7 +645,7 @@ static void xe_oa_store_flex(struct xe_oa_stream *stream, struct xe_lrc *lrc,
u32 offset = xe_bo_ggtt_addr(lrc->bo);
do {
- bb->cs[bb->len++] = MI_STORE_DATA_IMM | BIT(22) /* GGTT */ | 2;
+ bb->cs[bb->len++] = MI_STORE_DATA_IMM | MI_SDI_GGTT | MI_SDI_NUM_DW(1);
bb->cs[bb->len++] = offset + flex->offset * sizeof(u32);
bb->cs[bb->len++] = 0;
bb->cs[bb->len++] = flex->value;
@@ -1248,8 +1248,7 @@ static int xe_oa_mmap(struct file *file, struct vm_area_struct *vma)
vm_flags_mod(vma, VM_PFNMAP | VM_DONTEXPAND | VM_DONTDUMP | VM_DONTCOPY,
VM_MAYWRITE | VM_MAYEXEC);
- xe_assert(stream->oa->xe, bo->ttm.ttm->num_pages ==
- (vma->vm_end - vma->vm_start) >> PAGE_SHIFT);
+ xe_assert(stream->oa->xe, bo->ttm.ttm->num_pages == vma_pages(vma));
for (i = 0; i < bo->ttm.ttm->num_pages; i++) {
ret = remap_pfn_range(vma, start, page_to_pfn(bo->ttm.ttm->pages[i]),
PAGE_SIZE, vma->vm_page_prot);
@@ -1264,7 +1263,6 @@ static int xe_oa_mmap(struct file *file, struct vm_area_struct *vma)
static const struct file_operations xe_oa_fops = {
.owner = THIS_MODULE,
- .llseek = no_llseek,
.release = xe_oa_release,
.poll = xe_oa_poll,
.read = xe_oa_read,
diff --git a/drivers/gpu/drm/xe/xe_oa_types.h b/drivers/gpu/drm/xe/xe_oa_types.h
index 540c3ec53a6d..8862eca73fbe 100644
--- a/drivers/gpu/drm/xe/xe_oa_types.h
+++ b/drivers/gpu/drm/xe/xe_oa_types.h
@@ -11,7 +11,7 @@
#include <linux/mutex.h>
#include <linux/types.h>
-#include <drm/xe_drm.h>
+#include <uapi/drm/xe_drm.h>
#include "regs/xe_reg_defs.h"
#include "xe_hw_engine_types.h"
diff --git a/drivers/gpu/drm/xe/xe_observation.c b/drivers/gpu/drm/xe/xe_observation.c
index a78c92a44ec2..8ec1b84cbb9e 100644
--- a/drivers/gpu/drm/xe/xe_observation.c
+++ b/drivers/gpu/drm/xe/xe_observation.c
@@ -6,7 +6,7 @@
#include <linux/errno.h>
#include <linux/sysctl.h>
-#include <drm/xe_drm.h>
+#include <uapi/drm/xe_drm.h>
#include "xe_oa.h"
#include "xe_observation.h"
diff --git a/drivers/gpu/drm/xe/xe_pat.c b/drivers/gpu/drm/xe/xe_pat.c
index 722278cc23fc..f291a1730024 100644
--- a/drivers/gpu/drm/xe/xe_pat.c
+++ b/drivers/gpu/drm/xe/xe_pat.c
@@ -5,7 +5,7 @@
#include "xe_pat.h"
-#include <drm/xe_drm.h>
+#include <uapi/drm/xe_drm.h>
#include <generated/xe_wa_oob.h>
diff --git a/drivers/gpu/drm/xe/xe_pci.c b/drivers/gpu/drm/xe/xe_pci.c
index 732ee0d02124..937c3e064f0d 100644
--- a/drivers/gpu/drm/xe/xe_pci.c
+++ b/drivers/gpu/drm/xe/xe_pci.c
@@ -59,6 +59,7 @@ struct xe_device_desc {
u8 has_display:1;
u8 has_heci_gscfi:1;
+ u8 has_heci_cscfi:1;
u8 has_llc:1;
u8 has_mmio_ext:1;
u8 has_sriov:1;
@@ -337,14 +338,13 @@ static const struct xe_device_desc mtl_desc = {
static const struct xe_device_desc lnl_desc = {
PLATFORM(LUNARLAKE),
.has_display = true,
- .require_force_probe = true,
};
static const struct xe_device_desc bmg_desc = {
DGFX_FEATURES,
PLATFORM(BATTLEMAGE),
.has_display = true,
- .require_force_probe = true,
+ .has_heci_cscfi = 1,
};
#undef PLATFORM
@@ -606,6 +606,7 @@ static int xe_info_init_early(struct xe_device *xe,
xe->info.is_dgfx = desc->is_dgfx;
xe->info.has_heci_gscfi = desc->has_heci_gscfi;
+ xe->info.has_heci_cscfi = desc->has_heci_cscfi;
xe->info.has_llc = desc->has_llc;
xe->info.has_mmio_ext = desc->has_mmio_ext;
xe->info.has_sriov = desc->has_sriov;
@@ -613,9 +614,9 @@ static int xe_info_init_early(struct xe_device *xe,
xe->info.skip_mtcfg = desc->skip_mtcfg;
xe->info.skip_pcode = desc->skip_pcode;
- xe->info.enable_display = IS_ENABLED(CONFIG_DRM_XE_DISPLAY) &&
- xe_modparam.enable_display &&
- desc->has_display;
+ xe->info.probe_display = IS_ENABLED(CONFIG_DRM_XE_DISPLAY) &&
+ xe_modparam.probe_display &&
+ desc->has_display;
err = xe_tile_init_early(xe_device_get_root_tile(xe), xe, 0);
if (err)
@@ -744,7 +745,7 @@ static void xe_pci_remove(struct pci_dev *pdev)
{
struct xe_device *xe;
- xe = pci_get_drvdata(pdev);
+ xe = pdev_to_xe_device(pdev);
if (!xe) /* driver load aborted, nothing to cleanup */
return;
@@ -792,7 +793,7 @@ static int xe_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
if (IS_ERR(xe))
return PTR_ERR(xe);
- pci_set_drvdata(pdev, xe);
+ pci_set_drvdata(pdev, &xe->drm);
xe_pm_assert_unbounded_bridge(xe);
subplatform_desc = find_subplatform(xe, desc);
@@ -815,7 +816,7 @@ static int xe_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
if (err)
return err;
- drm_dbg(&xe->drm, "%s %s %04x:%04x dgfx:%d gfx:%s (%d.%02d) media:%s (%d.%02d) display:%s dma_m_s:%d tc:%d gscfi:%d",
+ drm_dbg(&xe->drm, "%s %s %04x:%04x dgfx:%d gfx:%s (%d.%02d) media:%s (%d.%02d) display:%s dma_m_s:%d tc:%d gscfi:%d cscfi:%d",
desc->platform_name,
subplatform_desc ? subplatform_desc->name : "",
xe->info.devid, xe->info.revid,
@@ -826,14 +827,13 @@ static int xe_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
xe->info.media_name,
xe->info.media_verx100 / 100,
xe->info.media_verx100 % 100,
- str_yes_no(xe->info.enable_display),
+ str_yes_no(xe->info.probe_display),
xe->info.dma_mask_size, xe->info.tile_count,
- xe->info.has_heci_gscfi);
+ xe->info.has_heci_gscfi, xe->info.has_heci_cscfi);
- drm_dbg(&xe->drm, "Stepping = (G:%s, M:%s, D:%s, B:%s)\n",
+ drm_dbg(&xe->drm, "Stepping = (G:%s, M:%s, B:%s)\n",
xe_step_name(xe->info.step.graphics),
xe_step_name(xe->info.step.media),
- xe_step_name(xe->info.step.display),
xe_step_name(xe->info.step.basedie));
drm_dbg(&xe->drm, "SR-IOV support: %s (mode: %s)\n",
diff --git a/drivers/gpu/drm/xe/xe_pm.c b/drivers/gpu/drm/xe/xe_pm.c
index 9a3f618d22dc..7cf2160fe040 100644
--- a/drivers/gpu/drm/xe/xe_pm.c
+++ b/drivers/gpu/drm/xe/xe_pm.c
@@ -20,6 +20,7 @@
#include "xe_guc.h"
#include "xe_irq.h"
#include "xe_pcode.h"
+#include "xe_trace.h"
#include "xe_wa.h"
/**
@@ -69,12 +70,42 @@
*/
#ifdef CONFIG_LOCKDEP
-static struct lockdep_map xe_pm_runtime_lockdep_map = {
- .name = "xe_pm_runtime_lockdep_map"
+static struct lockdep_map xe_pm_runtime_d3cold_map = {
+ .name = "xe_rpm_d3cold_map"
+};
+
+static struct lockdep_map xe_pm_runtime_nod3cold_map = {
+ .name = "xe_rpm_nod3cold_map"
};
#endif
/**
+ * xe_rpm_reclaim_safe() - Whether runtime resume can be done from reclaim context
+ * @xe: The xe device.
+ *
+ * Return: true if it is safe to runtime resume from reclaim context.
+ * false otherwise.
+ */
+bool xe_rpm_reclaim_safe(const struct xe_device *xe)
+{
+ return !xe->d3cold.capable && !xe->info.has_sriov;
+}
+
+static void xe_rpm_lockmap_acquire(const struct xe_device *xe)
+{
+ lock_map_acquire(xe_rpm_reclaim_safe(xe) ?
+ &xe_pm_runtime_nod3cold_map :
+ &xe_pm_runtime_d3cold_map);
+}
+
+static void xe_rpm_lockmap_release(const struct xe_device *xe)
+{
+ lock_map_release(xe_rpm_reclaim_safe(xe) ?
+ &xe_pm_runtime_nod3cold_map :
+ &xe_pm_runtime_d3cold_map);
+}
+
+/**
* xe_pm_suspend - Helper for System suspend, i.e. S0->S3 / S0->S2idle
* @xe: xe device instance
*
@@ -87,6 +118,7 @@ int xe_pm_suspend(struct xe_device *xe)
int err;
drm_dbg(&xe->drm, "Suspending device\n");
+ trace_xe_pm_suspend(xe, __builtin_return_address(0));
for_each_gt(gt, xe, id)
xe_gt_suspend_prepare(gt);
@@ -131,6 +163,7 @@ int xe_pm_resume(struct xe_device *xe)
int err;
drm_dbg(&xe->drm, "Resuming device\n");
+ trace_xe_pm_resume(xe, __builtin_return_address(0));
for_each_tile(tile, xe, id)
xe_wa_apply_tile_workarounds(tile);
@@ -326,6 +359,7 @@ int xe_pm_runtime_suspend(struct xe_device *xe)
u8 id;
int err = 0;
+ trace_xe_pm_runtime_suspend(xe, __builtin_return_address(0));
/* Disable access_ongoing asserts and prevent recursive pm calls */
xe_pm_write_callback_task(xe, current);
@@ -350,7 +384,7 @@ int xe_pm_runtime_suspend(struct xe_device *xe)
* annotation here and in xe_pm_runtime_get() lockdep will see
* the potential lock inversion and give us a nice splat.
*/
- lock_map_acquire(&xe_pm_runtime_lockdep_map);
+ xe_rpm_lockmap_acquire(xe);
/*
* Applying lock for entire list op as xe_ttm_bo_destroy and xe_bo_move_notify
@@ -362,9 +396,9 @@ int xe_pm_runtime_suspend(struct xe_device *xe)
xe_bo_runtime_pm_release_mmap_offset(bo);
mutex_unlock(&xe->mem_access.vram_userfault.lock);
- if (xe->d3cold.allowed) {
- xe_display_pm_suspend(xe, true);
+ xe_display_pm_runtime_suspend(xe);
+ if (xe->d3cold.allowed) {
err = xe_bo_evict_all(xe);
if (err)
goto out;
@@ -382,8 +416,8 @@ int xe_pm_runtime_suspend(struct xe_device *xe)
xe_display_pm_suspend_late(xe);
out:
if (err)
- xe_display_pm_resume(xe, true);
- lock_map_release(&xe_pm_runtime_lockdep_map);
+ xe_display_pm_runtime_resume(xe);
+ xe_rpm_lockmap_release(xe);
xe_pm_write_callback_task(xe, NULL);
return err;
}
@@ -400,10 +434,11 @@ int xe_pm_runtime_resume(struct xe_device *xe)
u8 id;
int err = 0;
+ trace_xe_pm_runtime_resume(xe, __builtin_return_address(0));
/* Disable access_ongoing asserts and prevent recursive pm calls */
xe_pm_write_callback_task(xe, current);
- lock_map_acquire(&xe_pm_runtime_lockdep_map);
+ xe_rpm_lockmap_acquire(xe);
if (xe->d3cold.allowed) {
err = xe_pcode_ready(xe, true);
@@ -426,14 +461,16 @@ int xe_pm_runtime_resume(struct xe_device *xe)
for_each_gt(gt, xe, id)
xe_gt_resume(gt);
+ xe_display_pm_runtime_resume(xe);
+
if (xe->d3cold.allowed) {
- xe_display_pm_resume(xe, true);
err = xe_bo_restore_user(xe);
if (err)
goto out;
}
+
out:
- lock_map_release(&xe_pm_runtime_lockdep_map);
+ xe_rpm_lockmap_release(xe);
xe_pm_write_callback_task(xe, NULL);
return err;
}
@@ -447,15 +484,37 @@ out:
* stuff that can happen inside the runtime_resume callback by acquiring
* a dummy lock (it doesn't protect anything and gets compiled out on
* non-debug builds). Lockdep then only needs to see the
- * xe_pm_runtime_lockdep_map -> runtime_resume callback once, and then can
- * hopefully validate all the (callers_locks) -> xe_pm_runtime_lockdep_map.
+ * xe_pm_runtime_xxx_map -> runtime_resume callback once, and then can
+ * hopefully validate all the (callers_locks) -> xe_pm_runtime_xxx_map.
* For example if the (callers_locks) are ever grabbed in the
* runtime_resume callback, lockdep should give us a nice splat.
*/
-static void pm_runtime_lockdep_prime(void)
+static void xe_rpm_might_enter_cb(const struct xe_device *xe)
{
- lock_map_acquire(&xe_pm_runtime_lockdep_map);
- lock_map_release(&xe_pm_runtime_lockdep_map);
+ xe_rpm_lockmap_acquire(xe);
+ xe_rpm_lockmap_release(xe);
+}
+
+/*
+ * Prime the lockdep maps for known locking orders that need to
+ * be supported but that may not always occur on all systems.
+ */
+static void xe_pm_runtime_lockdep_prime(void)
+{
+ struct dma_resv lockdep_resv;
+
+ dma_resv_init(&lockdep_resv);
+ lock_map_acquire(&xe_pm_runtime_d3cold_map);
+ /* D3Cold takes the dma_resv locks to evict bos */
+ dma_resv_lock(&lockdep_resv, NULL);
+ dma_resv_unlock(&lockdep_resv);
+ lock_map_release(&xe_pm_runtime_d3cold_map);
+
+ /* Shrinkers might like to wake up the device under reclaim. */
+ fs_reclaim_acquire(GFP_KERNEL);
+ lock_map_acquire(&xe_pm_runtime_nod3cold_map);
+ lock_map_release(&xe_pm_runtime_nod3cold_map);
+ fs_reclaim_release(GFP_KERNEL);
}
/**
@@ -464,12 +523,13 @@ static void pm_runtime_lockdep_prime(void)
*/
void xe_pm_runtime_get(struct xe_device *xe)
{
+ trace_xe_pm_runtime_get(xe, __builtin_return_address(0));
pm_runtime_get_noresume(xe->drm.dev);
if (xe_pm_read_callback_task(xe) == current)
return;
- pm_runtime_lockdep_prime();
+ xe_rpm_might_enter_cb(xe);
pm_runtime_resume(xe->drm.dev);
}
@@ -479,6 +539,7 @@ void xe_pm_runtime_get(struct xe_device *xe)
*/
void xe_pm_runtime_put(struct xe_device *xe)
{
+ trace_xe_pm_runtime_put(xe, __builtin_return_address(0));
if (xe_pm_read_callback_task(xe) == current) {
pm_runtime_put_noidle(xe->drm.dev);
} else {
@@ -496,10 +557,11 @@ void xe_pm_runtime_put(struct xe_device *xe)
*/
int xe_pm_runtime_get_ioctl(struct xe_device *xe)
{
+ trace_xe_pm_runtime_get_ioctl(xe, __builtin_return_address(0));
if (WARN_ON(xe_pm_read_callback_task(xe) == current))
return -ELOOP;
- pm_runtime_lockdep_prime();
+ xe_rpm_might_enter_cb(xe);
return pm_runtime_get_sync(xe->drm.dev);
}
@@ -533,6 +595,22 @@ bool xe_pm_runtime_get_if_in_use(struct xe_device *xe)
return pm_runtime_get_if_in_use(xe->drm.dev) > 0;
}
+/*
+ * Very unreliable! Should only be used to suppress the false positive case
+ * in the missing outer rpm protection warning.
+ */
+static bool xe_pm_suspending_or_resuming(struct xe_device *xe)
+{
+#ifdef CONFIG_PM
+ struct device *dev = xe->drm.dev;
+
+ return dev->power.runtime_status == RPM_SUSPENDING ||
+ dev->power.runtime_status == RPM_RESUMING;
+#else
+ return false;
+#endif
+}
+
/**
* xe_pm_runtime_get_noresume - Bump runtime PM usage counter without resuming
* @xe: xe device instance
@@ -549,8 +627,11 @@ void xe_pm_runtime_get_noresume(struct xe_device *xe)
ref = xe_pm_runtime_get_if_in_use(xe);
- if (drm_WARN(&xe->drm, !ref, "Missing outer runtime PM protection\n"))
+ if (!ref) {
pm_runtime_get_noresume(xe->drm.dev);
+ drm_WARN(&xe->drm, !xe_pm_suspending_or_resuming(xe),
+ "Missing outer runtime PM protection\n");
+ }
}
/**
@@ -567,7 +648,7 @@ bool xe_pm_runtime_resume_and_get(struct xe_device *xe)
return true;
}
- pm_runtime_lockdep_prime();
+ xe_rpm_might_enter_cb(xe);
return pm_runtime_resume_and_get(xe->drm.dev) >= 0;
}
@@ -659,3 +740,14 @@ void xe_pm_d3cold_allowed_toggle(struct xe_device *xe)
drm_dbg(&xe->drm,
"d3cold: allowed=%s\n", str_yes_no(xe->d3cold.allowed));
}
+
+/**
+ * xe_pm_module_init() - Perform xe_pm specific module initialization.
+ *
+ * Return: 0 on success. Currently doesn't fail.
+ */
+int __init xe_pm_module_init(void)
+{
+ xe_pm_runtime_lockdep_prime();
+ return 0;
+}
diff --git a/drivers/gpu/drm/xe/xe_pm.h b/drivers/gpu/drm/xe/xe_pm.h
index 104a21ae6dfd..998d1ed64556 100644
--- a/drivers/gpu/drm/xe/xe_pm.h
+++ b/drivers/gpu/drm/xe/xe_pm.h
@@ -31,6 +31,8 @@ bool xe_pm_runtime_resume_and_get(struct xe_device *xe);
void xe_pm_assert_unbounded_bridge(struct xe_device *xe);
int xe_pm_set_vram_threshold(struct xe_device *xe, u32 threshold);
void xe_pm_d3cold_allowed_toggle(struct xe_device *xe);
+bool xe_rpm_reclaim_safe(const struct xe_device *xe);
struct task_struct *xe_pm_read_callback_task(struct xe_device *xe);
+int xe_pm_module_init(void);
#endif
diff --git a/drivers/gpu/drm/xe/xe_preempt_fence.c b/drivers/gpu/drm/xe/xe_preempt_fence.c
index c453f45328b1..83fbeea5aa20 100644
--- a/drivers/gpu/drm/xe/xe_preempt_fence.c
+++ b/drivers/gpu/drm/xe/xe_preempt_fence.c
@@ -17,10 +17,16 @@ static void preempt_fence_work_func(struct work_struct *w)
container_of(w, typeof(*pfence), preempt_work);
struct xe_exec_queue *q = pfence->q;
- if (pfence->error)
+ if (pfence->error) {
dma_fence_set_error(&pfence->base, pfence->error);
- else
- q->ops->suspend_wait(q);
+ } else if (!q->ops->reset_status(q)) {
+ int err = q->ops->suspend_wait(q);
+
+ if (err)
+ dma_fence_set_error(&pfence->base, err);
+ } else {
+ dma_fence_set_error(&pfence->base, -ENOENT);
+ }
dma_fence_signal(&pfence->base);
/*
diff --git a/drivers/gpu/drm/xe/xe_pt.c b/drivers/gpu/drm/xe/xe_pt.c
index 31a751a5de3f..d6353e8969f0 100644
--- a/drivers/gpu/drm/xe/xe_pt.c
+++ b/drivers/gpu/drm/xe/xe_pt.c
@@ -3,18 +3,23 @@
* Copyright © 2022 Intel Corporation
*/
+#include <linux/dma-fence-array.h>
+
#include "xe_pt.h"
#include "regs/xe_gtt_defs.h"
#include "xe_bo.h"
#include "xe_device.h"
#include "xe_drm_client.h"
+#include "xe_exec_queue.h"
#include "xe_gt.h"
#include "xe_gt_tlb_invalidation.h"
#include "xe_migrate.h"
#include "xe_pt_types.h"
#include "xe_pt_walk.h"
#include "xe_res_cursor.h"
+#include "xe_sched_job.h"
+#include "xe_sync.h"
#include "xe_trace.h"
#include "xe_ttm_stolen_mgr.h"
#include "xe_vm.h"
@@ -325,6 +330,7 @@ xe_pt_new_shared(struct xe_walk_update *wupd, struct xe_pt *parent,
entry->pt = parent;
entry->flags = 0;
entry->qwords = 0;
+ entry->pt_bo->update_index = -1;
if (alloc_entries) {
entry->pt_entries = kmalloc_array(XE_PDES,
@@ -842,19 +848,27 @@ xe_vm_populate_pgtable(struct xe_migrate_pt_update *pt_update, struct xe_tile *t
}
}
-static void xe_pt_abort_bind(struct xe_vma *vma,
- struct xe_vm_pgtable_update *entries,
- u32 num_entries)
+static void xe_pt_cancel_bind(struct xe_vma *vma,
+ struct xe_vm_pgtable_update *entries,
+ u32 num_entries)
{
u32 i, j;
for (i = 0; i < num_entries; i++) {
- if (!entries[i].pt_entries)
+ struct xe_pt *pt = entries[i].pt;
+
+ if (!pt)
continue;
- for (j = 0; j < entries[i].qwords; j++)
- xe_pt_destroy(entries[i].pt_entries[j].pt, xe_vma_vm(vma)->flags, NULL);
+ if (pt->level) {
+ for (j = 0; j < entries[i].qwords; j++)
+ xe_pt_destroy(entries[i].pt_entries[j].pt,
+ xe_vma_vm(vma)->flags, NULL);
+ }
+
kfree(entries[i].pt_entries);
+ entries[i].pt_entries = NULL;
+ entries[i].qwords = 0;
}
}
@@ -864,18 +878,15 @@ static void xe_pt_commit_locks_assert(struct xe_vma *vma)
lockdep_assert_held(&vm->lock);
- if (xe_vma_is_userptr(vma))
- lockdep_assert_held_read(&vm->userptr.notifier_lock);
- else if (!xe_vma_is_null(vma))
+ if (!xe_vma_is_userptr(vma) && !xe_vma_is_null(vma))
dma_resv_assert_held(xe_vma_bo(vma)->ttm.base.resv);
xe_vm_assert_held(vm);
}
-static void xe_pt_commit_bind(struct xe_vma *vma,
- struct xe_vm_pgtable_update *entries,
- u32 num_entries, bool rebind,
- struct llist_head *deferred)
+static void xe_pt_commit(struct xe_vma *vma,
+ struct xe_vm_pgtable_update *entries,
+ u32 num_entries, struct llist_head *deferred)
{
u32 i, j;
@@ -883,31 +894,90 @@ static void xe_pt_commit_bind(struct xe_vma *vma,
for (i = 0; i < num_entries; i++) {
struct xe_pt *pt = entries[i].pt;
+
+ if (!pt->level)
+ continue;
+
+ for (j = 0; j < entries[i].qwords; j++) {
+ struct xe_pt *oldpte = entries[i].pt_entries[j].pt;
+
+ xe_pt_destroy(oldpte, xe_vma_vm(vma)->flags, deferred);
+ }
+ }
+}
+
+static void xe_pt_abort_bind(struct xe_vma *vma,
+ struct xe_vm_pgtable_update *entries,
+ u32 num_entries, bool rebind)
+{
+ int i, j;
+
+ xe_pt_commit_locks_assert(vma);
+
+ for (i = num_entries - 1; i >= 0; --i) {
+ struct xe_pt *pt = entries[i].pt;
struct xe_pt_dir *pt_dir;
if (!rebind)
- pt->num_live += entries[i].qwords;
+ pt->num_live -= entries[i].qwords;
- if (!pt->level) {
- kfree(entries[i].pt_entries);
+ if (!pt->level)
continue;
+
+ pt_dir = as_xe_pt_dir(pt);
+ for (j = 0; j < entries[i].qwords; j++) {
+ u32 j_ = j + entries[i].ofs;
+ struct xe_pt *newpte = xe_pt_entry(pt_dir, j_);
+ struct xe_pt *oldpte = entries[i].pt_entries[j].pt;
+
+ pt_dir->children[j_] = oldpte ? &oldpte->base : 0;
+ xe_pt_destroy(newpte, xe_vma_vm(vma)->flags, NULL);
}
+ }
+}
+
+static void xe_pt_commit_prepare_bind(struct xe_vma *vma,
+ struct xe_vm_pgtable_update *entries,
+ u32 num_entries, bool rebind)
+{
+ u32 i, j;
+
+ xe_pt_commit_locks_assert(vma);
+
+ for (i = 0; i < num_entries; i++) {
+ struct xe_pt *pt = entries[i].pt;
+ struct xe_pt_dir *pt_dir;
+
+ if (!rebind)
+ pt->num_live += entries[i].qwords;
+
+ if (!pt->level)
+ continue;
pt_dir = as_xe_pt_dir(pt);
for (j = 0; j < entries[i].qwords; j++) {
u32 j_ = j + entries[i].ofs;
struct xe_pt *newpte = entries[i].pt_entries[j].pt;
+ struct xe_pt *oldpte = NULL;
if (xe_pt_entry(pt_dir, j_))
- xe_pt_destroy(xe_pt_entry(pt_dir, j_),
- xe_vma_vm(vma)->flags, deferred);
+ oldpte = xe_pt_entry(pt_dir, j_);
pt_dir->children[j_] = &newpte->base;
+ entries[i].pt_entries[j].pt = oldpte;
}
- kfree(entries[i].pt_entries);
}
}
+static void xe_pt_free_bind(struct xe_vm_pgtable_update *entries,
+ u32 num_entries)
+{
+ u32 i;
+
+ for (i = 0; i < num_entries; i++)
+ kfree(entries[i].pt_entries);
+}
+
static int
xe_pt_prepare_bind(struct xe_tile *tile, struct xe_vma *vma,
struct xe_vm_pgtable_update *entries, u32 *num_entries)
@@ -918,20 +988,19 @@ xe_pt_prepare_bind(struct xe_tile *tile, struct xe_vma *vma,
err = xe_pt_stage_bind(tile, vma, entries, num_entries);
if (!err)
xe_tile_assert(tile, *num_entries);
- else /* abort! */
- xe_pt_abort_bind(vma, entries, *num_entries);
return err;
}
static void xe_vm_dbg_print_entries(struct xe_device *xe,
const struct xe_vm_pgtable_update *entries,
- unsigned int num_entries)
+ unsigned int num_entries, bool bind)
#if (IS_ENABLED(CONFIG_DRM_XE_DEBUG_VM))
{
unsigned int i;
- vm_dbg(&xe->drm, "%u entries to update\n", num_entries);
+ vm_dbg(&xe->drm, "%s: %u entries to update\n", bind ? "bind" : "unbind",
+ num_entries);
for (i = 0; i < num_entries; i++) {
const struct xe_vm_pgtable_update *entry = &entries[i];
struct xe_pt *xe_pt = entry->pt;
@@ -952,66 +1021,108 @@ static void xe_vm_dbg_print_entries(struct xe_device *xe,
{}
#endif
-#ifdef CONFIG_DRM_XE_USERPTR_INVAL_INJECT
-
-static int xe_pt_userptr_inject_eagain(struct xe_userptr_vma *uvma)
+static bool no_in_syncs(struct xe_sync_entry *syncs, u32 num_syncs)
{
- u32 divisor = uvma->userptr.divisor ? uvma->userptr.divisor : 2;
- static u32 count;
+ int i;
- if (count++ % divisor == divisor - 1) {
- struct xe_vm *vm = xe_vma_vm(&uvma->vma);
+ for (i = 0; i < num_syncs; i++) {
+ struct dma_fence *fence = syncs[i].fence;
- uvma->userptr.divisor = divisor << 1;
- spin_lock(&vm->userptr.invalidated_lock);
- list_move_tail(&uvma->userptr.invalidate_link,
- &vm->userptr.invalidated);
- spin_unlock(&vm->userptr.invalidated_lock);
- return true;
+ if (fence && !test_bit(DMA_FENCE_FLAG_SIGNALED_BIT,
+ &fence->flags))
+ return false;
}
- return false;
+ return true;
}
-#else
+static int job_test_add_deps(struct xe_sched_job *job,
+ struct dma_resv *resv,
+ enum dma_resv_usage usage)
+{
+ if (!job) {
+ if (!dma_resv_test_signaled(resv, usage))
+ return -ETIME;
-static bool xe_pt_userptr_inject_eagain(struct xe_userptr_vma *uvma)
+ return 0;
+ }
+
+ return xe_sched_job_add_deps(job, resv, usage);
+}
+
+static int vma_add_deps(struct xe_vma *vma, struct xe_sched_job *job)
{
- return false;
+ struct xe_bo *bo = xe_vma_bo(vma);
+
+ xe_bo_assert_held(bo);
+
+ if (bo && !bo->vm)
+ return job_test_add_deps(job, bo->ttm.base.resv,
+ DMA_RESV_USAGE_KERNEL);
+
+ return 0;
}
-#endif
+static int op_add_deps(struct xe_vm *vm, struct xe_vma_op *op,
+ struct xe_sched_job *job)
+{
+ int err = 0;
-/**
- * struct xe_pt_migrate_pt_update - Callback argument for pre-commit callbacks
- * @base: Base we derive from.
- * @bind: Whether this is a bind or an unbind operation. A bind operation
- * makes the pre-commit callback error with -EAGAIN if it detects a
- * pending invalidation.
- * @locked: Whether the pre-commit callback locked the userptr notifier lock
- * and it needs unlocking.
- */
-struct xe_pt_migrate_pt_update {
- struct xe_migrate_pt_update base;
- bool bind;
- bool locked;
-};
+ switch (op->base.op) {
+ case DRM_GPUVA_OP_MAP:
+ if (!op->map.immediate && xe_vm_in_fault_mode(vm))
+ break;
+
+ err = vma_add_deps(op->map.vma, job);
+ break;
+ case DRM_GPUVA_OP_REMAP:
+ if (op->remap.prev)
+ err = vma_add_deps(op->remap.prev, job);
+ if (!err && op->remap.next)
+ err = vma_add_deps(op->remap.next, job);
+ break;
+ case DRM_GPUVA_OP_UNMAP:
+ break;
+ case DRM_GPUVA_OP_PREFETCH:
+ err = vma_add_deps(gpuva_to_vma(op->base.prefetch.va), job);
+ break;
+ default:
+ drm_warn(&vm->xe->drm, "NOT POSSIBLE");
+ }
+
+ return err;
+}
-/*
- * This function adds the needed dependencies to a page-table update job
- * to make sure racing jobs for separate bind engines don't race writing
- * to the same page-table range, wreaking havoc. Initially use a single
- * fence for the entire VM. An optimization would use smaller granularity.
- */
static int xe_pt_vm_dependencies(struct xe_sched_job *job,
- struct xe_range_fence_tree *rftree,
- u64 start, u64 last)
+ struct xe_vm *vm,
+ struct xe_vma_ops *vops,
+ struct xe_vm_pgtable_update_ops *pt_update_ops,
+ struct xe_range_fence_tree *rftree)
{
struct xe_range_fence *rtfence;
struct dma_fence *fence;
- int err;
+ struct xe_vma_op *op;
+ int err = 0, i;
+
+ xe_vm_assert_held(vm);
+
+ if (!job && !no_in_syncs(vops->syncs, vops->num_syncs))
+ return -ETIME;
- rtfence = xe_range_fence_tree_first(rftree, start, last);
+ if (!job && !xe_exec_queue_is_idle(pt_update_ops->q))
+ return -ETIME;
+
+ if (pt_update_ops->wait_vm_bookkeep || pt_update_ops->wait_vm_kernel) {
+ err = job_test_add_deps(job, xe_vm_resv(vm),
+ pt_update_ops->wait_vm_bookkeep ?
+ DMA_RESV_USAGE_BOOKKEEP :
+ DMA_RESV_USAGE_KERNEL);
+ if (err)
+ return err;
+ }
+
+ rtfence = xe_range_fence_tree_first(rftree, pt_update_ops->start,
+ pt_update_ops->last);
while (rtfence) {
fence = rtfence->fence;
@@ -1029,80 +1140,175 @@ static int xe_pt_vm_dependencies(struct xe_sched_job *job,
return err;
}
- rtfence = xe_range_fence_tree_next(rtfence, start, last);
+ rtfence = xe_range_fence_tree_next(rtfence,
+ pt_update_ops->start,
+ pt_update_ops->last);
}
- return 0;
+ list_for_each_entry(op, &vops->list, link) {
+ err = op_add_deps(vm, op, job);
+ if (err)
+ return err;
+ }
+
+ if (!(pt_update_ops->q->flags & EXEC_QUEUE_FLAG_KERNEL)) {
+ if (job)
+ err = xe_sched_job_last_fence_add_dep(job, vm);
+ else
+ err = xe_exec_queue_last_fence_test_dep(pt_update_ops->q, vm);
+ }
+
+ for (i = 0; job && !err && i < vops->num_syncs; i++)
+ err = xe_sync_entry_add_deps(&vops->syncs[i], job);
+
+ return err;
}
static int xe_pt_pre_commit(struct xe_migrate_pt_update *pt_update)
{
- struct xe_range_fence_tree *rftree =
- &xe_vma_vm(pt_update->vma)->rftree[pt_update->tile_id];
+ struct xe_vma_ops *vops = pt_update->vops;
+ struct xe_vm *vm = vops->vm;
+ struct xe_range_fence_tree *rftree = &vm->rftree[pt_update->tile_id];
+ struct xe_vm_pgtable_update_ops *pt_update_ops =
+ &vops->pt_update_ops[pt_update->tile_id];
+
+ return xe_pt_vm_dependencies(pt_update->job, vm, pt_update->vops,
+ pt_update_ops, rftree);
+}
+
+#ifdef CONFIG_DRM_XE_USERPTR_INVAL_INJECT
- return xe_pt_vm_dependencies(pt_update->job, rftree,
- pt_update->start, pt_update->last);
+static bool xe_pt_userptr_inject_eagain(struct xe_userptr_vma *uvma)
+{
+ u32 divisor = uvma->userptr.divisor ? uvma->userptr.divisor : 2;
+ static u32 count;
+
+ if (count++ % divisor == divisor - 1) {
+ uvma->userptr.divisor = divisor << 1;
+ return true;
+ }
+
+ return false;
}
-static int xe_pt_userptr_pre_commit(struct xe_migrate_pt_update *pt_update)
+#else
+
+static bool xe_pt_userptr_inject_eagain(struct xe_userptr_vma *uvma)
{
- struct xe_pt_migrate_pt_update *userptr_update =
- container_of(pt_update, typeof(*userptr_update), base);
- struct xe_userptr_vma *uvma = to_userptr_vma(pt_update->vma);
- unsigned long notifier_seq = uvma->userptr.notifier_seq;
- struct xe_vm *vm = xe_vma_vm(&uvma->vma);
- int err = xe_pt_vm_dependencies(pt_update->job,
- &vm->rftree[pt_update->tile_id],
- pt_update->start,
- pt_update->last);
+ return false;
+}
- if (err)
- return err;
+#endif
- userptr_update->locked = false;
+static int vma_check_userptr(struct xe_vm *vm, struct xe_vma *vma,
+ struct xe_vm_pgtable_update_ops *pt_update)
+{
+ struct xe_userptr_vma *uvma;
+ unsigned long notifier_seq;
- /*
- * Wait until nobody is running the invalidation notifier, and
- * since we're exiting the loop holding the notifier lock,
- * nobody can proceed invalidating either.
- *
- * Note that we don't update the vma->userptr.notifier_seq since
- * we don't update the userptr pages.
- */
- do {
- down_read(&vm->userptr.notifier_lock);
- if (!mmu_interval_read_retry(&uvma->userptr.notifier,
- notifier_seq))
- break;
+ lockdep_assert_held_read(&vm->userptr.notifier_lock);
- up_read(&vm->userptr.notifier_lock);
+ if (!xe_vma_is_userptr(vma))
+ return 0;
- if (userptr_update->bind)
- return -EAGAIN;
+ uvma = to_userptr_vma(vma);
+ notifier_seq = uvma->userptr.notifier_seq;
- notifier_seq = mmu_interval_read_begin(&uvma->userptr.notifier);
- } while (true);
+ if (uvma->userptr.initial_bind && !xe_vm_in_fault_mode(vm))
+ return 0;
- /* Inject errors to test_whether they are handled correctly */
- if (userptr_update->bind && xe_pt_userptr_inject_eagain(uvma)) {
- up_read(&vm->userptr.notifier_lock);
+ if (!mmu_interval_read_retry(&uvma->userptr.notifier,
+ notifier_seq) &&
+ !xe_pt_userptr_inject_eagain(uvma))
+ return 0;
+
+ if (xe_vm_in_fault_mode(vm)) {
return -EAGAIN;
- }
+ } else {
+ spin_lock(&vm->userptr.invalidated_lock);
+ list_move_tail(&uvma->userptr.invalidate_link,
+ &vm->userptr.invalidated);
+ spin_unlock(&vm->userptr.invalidated_lock);
- userptr_update->locked = true;
+ if (xe_vm_in_preempt_fence_mode(vm)) {
+ struct dma_resv_iter cursor;
+ struct dma_fence *fence;
+ long err;
+
+ dma_resv_iter_begin(&cursor, xe_vm_resv(vm),
+ DMA_RESV_USAGE_BOOKKEEP);
+ dma_resv_for_each_fence_unlocked(&cursor, fence)
+ dma_fence_enable_sw_signaling(fence);
+ dma_resv_iter_end(&cursor);
+
+ err = dma_resv_wait_timeout(xe_vm_resv(vm),
+ DMA_RESV_USAGE_BOOKKEEP,
+ false, MAX_SCHEDULE_TIMEOUT);
+ XE_WARN_ON(err <= 0);
+ }
+ }
return 0;
}
-static const struct xe_migrate_pt_update_ops bind_ops = {
- .populate = xe_vm_populate_pgtable,
- .pre_commit = xe_pt_pre_commit,
-};
+static int op_check_userptr(struct xe_vm *vm, struct xe_vma_op *op,
+ struct xe_vm_pgtable_update_ops *pt_update)
+{
+ int err = 0;
-static const struct xe_migrate_pt_update_ops userptr_bind_ops = {
- .populate = xe_vm_populate_pgtable,
- .pre_commit = xe_pt_userptr_pre_commit,
-};
+ lockdep_assert_held_read(&vm->userptr.notifier_lock);
+
+ switch (op->base.op) {
+ case DRM_GPUVA_OP_MAP:
+ if (!op->map.immediate && xe_vm_in_fault_mode(vm))
+ break;
+
+ err = vma_check_userptr(vm, op->map.vma, pt_update);
+ break;
+ case DRM_GPUVA_OP_REMAP:
+ if (op->remap.prev)
+ err = vma_check_userptr(vm, op->remap.prev, pt_update);
+ if (!err && op->remap.next)
+ err = vma_check_userptr(vm, op->remap.next, pt_update);
+ break;
+ case DRM_GPUVA_OP_UNMAP:
+ break;
+ case DRM_GPUVA_OP_PREFETCH:
+ err = vma_check_userptr(vm, gpuva_to_vma(op->base.prefetch.va),
+ pt_update);
+ break;
+ default:
+ drm_warn(&vm->xe->drm, "NOT POSSIBLE");
+ }
+
+ return err;
+}
+
+static int xe_pt_userptr_pre_commit(struct xe_migrate_pt_update *pt_update)
+{
+ struct xe_vm *vm = pt_update->vops->vm;
+ struct xe_vma_ops *vops = pt_update->vops;
+ struct xe_vm_pgtable_update_ops *pt_update_ops =
+ &vops->pt_update_ops[pt_update->tile_id];
+ struct xe_vma_op *op;
+ int err;
+
+ err = xe_pt_pre_commit(pt_update);
+ if (err)
+ return err;
+
+ down_read(&vm->userptr.notifier_lock);
+
+ list_for_each_entry(op, &vops->list, link) {
+ err = op_check_userptr(vm, op, pt_update_ops);
+ if (err) {
+ up_read(&vm->userptr.notifier_lock);
+ break;
+ }
+ }
+
+ return err;
+}
struct invalidation_fence {
struct xe_gt_tlb_invalidation_fence base;
@@ -1144,10 +1350,10 @@ static void invalidation_fence_work_func(struct work_struct *w)
ifence->end, ifence->asid);
}
-static int invalidation_fence_init(struct xe_gt *gt,
- struct invalidation_fence *ifence,
- struct dma_fence *fence,
- u64 start, u64 end, u32 asid)
+static void invalidation_fence_init(struct xe_gt *gt,
+ struct invalidation_fence *ifence,
+ struct dma_fence *fence,
+ u64 start, u64 end, u32 asid)
{
int ret;
@@ -1172,192 +1378,6 @@ static int invalidation_fence_init(struct xe_gt *gt,
}
xe_gt_assert(gt, !ret || ret == -ENOENT);
-
- return ret && ret != -ENOENT ? ret : 0;
-}
-
-static void xe_pt_calc_rfence_interval(struct xe_vma *vma,
- struct xe_pt_migrate_pt_update *update,
- struct xe_vm_pgtable_update *entries,
- u32 num_entries)
-{
- int i, level = 0;
-
- for (i = 0; i < num_entries; i++) {
- const struct xe_vm_pgtable_update *entry = &entries[i];
-
- if (entry->pt->level > level)
- level = entry->pt->level;
- }
-
- /* Greedy (non-optimal) calculation but simple */
- update->base.start = ALIGN_DOWN(xe_vma_start(vma),
- 0x1ull << xe_pt_shift(level));
- update->base.last = ALIGN(xe_vma_end(vma),
- 0x1ull << xe_pt_shift(level)) - 1;
-}
-
-/**
- * __xe_pt_bind_vma() - Build and connect a page-table tree for the vma
- * address range.
- * @tile: The tile to bind for.
- * @vma: The vma to bind.
- * @q: The exec_queue with which to do pipelined page-table updates.
- * @syncs: Entries to sync on before binding the built tree to the live vm tree.
- * @num_syncs: Number of @sync entries.
- * @rebind: Whether we're rebinding this vma to the same address range without
- * an unbind in-between.
- *
- * This function builds a page-table tree (see xe_pt_stage_bind() for more
- * information on page-table building), and the xe_vm_pgtable_update entries
- * abstracting the operations needed to attach it to the main vm tree. It
- * then takes the relevant locks and updates the metadata side of the main
- * vm tree and submits the operations for pipelined attachment of the
- * gpu page-table to the vm main tree, (which can be done either by the
- * cpu and the GPU).
- *
- * Return: A valid dma-fence representing the pipelined attachment operation
- * on success, an error pointer on error.
- */
-struct dma_fence *
-__xe_pt_bind_vma(struct xe_tile *tile, struct xe_vma *vma, struct xe_exec_queue *q,
- struct xe_sync_entry *syncs, u32 num_syncs,
- bool rebind)
-{
- struct xe_vm_pgtable_update entries[XE_VM_MAX_LEVEL * 2 + 1];
- struct xe_pt_migrate_pt_update bind_pt_update = {
- .base = {
- .ops = xe_vma_is_userptr(vma) ? &userptr_bind_ops : &bind_ops,
- .vma = vma,
- .tile_id = tile->id,
- },
- .bind = true,
- };
- struct xe_vm *vm = xe_vma_vm(vma);
- u32 num_entries;
- struct dma_fence *fence;
- struct invalidation_fence *ifence = NULL;
- struct xe_range_fence *rfence;
- int err;
-
- bind_pt_update.locked = false;
- xe_bo_assert_held(xe_vma_bo(vma));
- xe_vm_assert_held(vm);
-
- vm_dbg(&xe_vma_vm(vma)->xe->drm,
- "Preparing bind, with range [%llx...%llx) engine %p.\n",
- xe_vma_start(vma), xe_vma_end(vma), q);
-
- err = xe_pt_prepare_bind(tile, vma, entries, &num_entries);
- if (err)
- goto err;
-
- err = dma_resv_reserve_fences(xe_vm_resv(vm), 1);
- if (!err && !xe_vma_has_no_bo(vma) && !xe_vma_bo(vma)->vm)
- err = dma_resv_reserve_fences(xe_vma_bo(vma)->ttm.base.resv, 1);
- if (err)
- goto err;
-
- xe_tile_assert(tile, num_entries <= ARRAY_SIZE(entries));
-
- xe_vm_dbg_print_entries(tile_to_xe(tile), entries, num_entries);
- xe_pt_calc_rfence_interval(vma, &bind_pt_update, entries,
- num_entries);
-
- /*
- * If rebind, we have to invalidate TLB on !LR vms to invalidate
- * cached PTEs point to freed memory. on LR vms this is done
- * automatically when the context is re-enabled by the rebind worker,
- * or in fault mode it was invalidated on PTE zapping.
- *
- * If !rebind, and scratch enabled VMs, there is a chance the scratch
- * PTE is already cached in the TLB so it needs to be invalidated.
- * on !LR VMs this is done in the ring ops preceding a batch, but on
- * non-faulting LR, in particular on user-space batch buffer chaining,
- * it needs to be done here.
- */
- if ((!rebind && xe_vm_has_scratch(vm) && xe_vm_in_preempt_fence_mode(vm))) {
- ifence = kzalloc(sizeof(*ifence), GFP_KERNEL);
- if (!ifence)
- return ERR_PTR(-ENOMEM);
- } else if (rebind && !xe_vm_in_lr_mode(vm)) {
- /* We bump also if batch_invalidate_tlb is true */
- vm->tlb_flush_seqno++;
- }
-
- rfence = kzalloc(sizeof(*rfence), GFP_KERNEL);
- if (!rfence) {
- kfree(ifence);
- return ERR_PTR(-ENOMEM);
- }
-
- fence = xe_migrate_update_pgtables(tile->migrate,
- vm, xe_vma_bo(vma), q,
- entries, num_entries,
- syncs, num_syncs,
- &bind_pt_update.base);
- if (!IS_ERR(fence)) {
- bool last_munmap_rebind = vma->gpuva.flags & XE_VMA_LAST_REBIND;
- LLIST_HEAD(deferred);
- int err;
-
- err = xe_range_fence_insert(&vm->rftree[tile->id], rfence,
- &xe_range_fence_kfree_ops,
- bind_pt_update.base.start,
- bind_pt_update.base.last, fence);
- if (err)
- dma_fence_wait(fence, false);
-
- /* TLB invalidation must be done before signaling rebind */
- if (ifence) {
- int err = invalidation_fence_init(tile->primary_gt,
- ifence, fence,
- xe_vma_start(vma),
- xe_vma_end(vma),
- xe_vma_vm(vma)->usm.asid);
- if (err) {
- dma_fence_put(fence);
- kfree(ifence);
- return ERR_PTR(err);
- }
- fence = &ifence->base.base;
- }
-
- /* add shared fence now for pagetable delayed destroy */
- dma_resv_add_fence(xe_vm_resv(vm), fence, rebind ||
- last_munmap_rebind ?
- DMA_RESV_USAGE_KERNEL :
- DMA_RESV_USAGE_BOOKKEEP);
-
- if (!xe_vma_has_no_bo(vma) && !xe_vma_bo(vma)->vm)
- dma_resv_add_fence(xe_vma_bo(vma)->ttm.base.resv, fence,
- DMA_RESV_USAGE_BOOKKEEP);
- xe_pt_commit_bind(vma, entries, num_entries, rebind,
- bind_pt_update.locked ? &deferred : NULL);
-
- /* This vma is live (again?) now */
- vma->tile_present |= BIT(tile->id);
-
- if (bind_pt_update.locked) {
- to_userptr_vma(vma)->userptr.initial_bind = true;
- up_read(&vm->userptr.notifier_lock);
- xe_bo_put_commit(&deferred);
- }
- if (!rebind && last_munmap_rebind &&
- xe_vm_in_preempt_fence_mode(vm))
- xe_vm_queue_rebind_worker(vm);
- } else {
- kfree(rfence);
- kfree(ifence);
- if (bind_pt_update.locked)
- up_read(&vm->userptr.notifier_lock);
- xe_pt_abort_bind(vma, entries, num_entries);
- }
-
- return fence;
-
-err:
- return ERR_PTR(err);
}
struct xe_pt_stage_unbind_walk {
@@ -1442,6 +1462,7 @@ xe_pt_stage_unbind_post_descend(struct xe_ptw *parent, pgoff_t offset,
struct xe_pt *xe_child = container_of(*child, typeof(*xe_child), base);
pgoff_t end_offset;
u64 size = 1ull << walk->shifts[--level];
+ int err;
if (!IS_ALIGNED(addr, size))
addr = xe_walk->modified_start;
@@ -1457,7 +1478,10 @@ xe_pt_stage_unbind_post_descend(struct xe_ptw *parent, pgoff_t offset,
&end_offset))
return 0;
- (void)xe_pt_new_shared(&xe_walk->wupd, xe_child, offset, false);
+ err = xe_pt_new_shared(&xe_walk->wupd, xe_child, offset, true);
+ if (err)
+ return err;
+
xe_walk->wupd.updates[level].update->qwords = end_offset - offset;
return 0;
@@ -1510,8 +1534,8 @@ xe_migrate_clear_pgtable_callback(struct xe_migrate_pt_update *pt_update,
void *ptr, u32 qword_ofs, u32 num_qwords,
const struct xe_vm_pgtable_update *update)
{
- struct xe_vma *vma = pt_update->vma;
- u64 empty = __xe_pt_empty_pte(tile, xe_vma_vm(vma), update->pt->level);
+ struct xe_vm *vm = pt_update->vops->vm;
+ u64 empty = __xe_pt_empty_pte(tile, vm, update->pt->level);
int i;
if (map && map->is_iomem)
@@ -1525,181 +1549,644 @@ xe_migrate_clear_pgtable_callback(struct xe_migrate_pt_update *pt_update,
memset64(ptr, empty, num_qwords);
}
+static void xe_pt_abort_unbind(struct xe_vma *vma,
+ struct xe_vm_pgtable_update *entries,
+ u32 num_entries)
+{
+ int i, j;
+
+ xe_pt_commit_locks_assert(vma);
+
+ for (i = num_entries - 1; i >= 0; --i) {
+ struct xe_vm_pgtable_update *entry = &entries[i];
+ struct xe_pt *pt = entry->pt;
+ struct xe_pt_dir *pt_dir = as_xe_pt_dir(pt);
+
+ pt->num_live += entry->qwords;
+
+ if (!pt->level)
+ continue;
+
+ for (j = entry->ofs; j < entry->ofs + entry->qwords; j++)
+ pt_dir->children[j] =
+ entries[i].pt_entries[j - entry->ofs].pt ?
+ &entries[i].pt_entries[j - entry->ofs].pt->base : NULL;
+ }
+}
+
static void
-xe_pt_commit_unbind(struct xe_vma *vma,
- struct xe_vm_pgtable_update *entries, u32 num_entries,
- struct llist_head *deferred)
+xe_pt_commit_prepare_unbind(struct xe_vma *vma,
+ struct xe_vm_pgtable_update *entries,
+ u32 num_entries)
{
- u32 j;
+ int i, j;
xe_pt_commit_locks_assert(vma);
- for (j = 0; j < num_entries; ++j) {
- struct xe_vm_pgtable_update *entry = &entries[j];
+ for (i = 0; i < num_entries; ++i) {
+ struct xe_vm_pgtable_update *entry = &entries[i];
struct xe_pt *pt = entry->pt;
+ struct xe_pt_dir *pt_dir;
pt->num_live -= entry->qwords;
- if (pt->level) {
- struct xe_pt_dir *pt_dir = as_xe_pt_dir(pt);
- u32 i;
+ if (!pt->level)
+ continue;
- for (i = entry->ofs; i < entry->ofs + entry->qwords;
- i++) {
- if (xe_pt_entry(pt_dir, i))
- xe_pt_destroy(xe_pt_entry(pt_dir, i),
- xe_vma_vm(vma)->flags, deferred);
+ pt_dir = as_xe_pt_dir(pt);
+ for (j = entry->ofs; j < entry->ofs + entry->qwords; j++) {
+ entry->pt_entries[j - entry->ofs].pt =
+ xe_pt_entry(pt_dir, j);
+ pt_dir->children[j] = NULL;
+ }
+ }
+}
- pt_dir->children[i] = NULL;
- }
+static void
+xe_pt_update_ops_rfence_interval(struct xe_vm_pgtable_update_ops *pt_update_ops,
+ struct xe_vma *vma)
+{
+ u32 current_op = pt_update_ops->current_op;
+ struct xe_vm_pgtable_update_op *pt_op = &pt_update_ops->ops[current_op];
+ int i, level = 0;
+ u64 start, last;
+
+ for (i = 0; i < pt_op->num_entries; i++) {
+ const struct xe_vm_pgtable_update *entry = &pt_op->entries[i];
+
+ if (entry->pt->level > level)
+ level = entry->pt->level;
+ }
+
+ /* Greedy (non-optimal) calculation but simple */
+ start = ALIGN_DOWN(xe_vma_start(vma), 0x1ull << xe_pt_shift(level));
+ last = ALIGN(xe_vma_end(vma), 0x1ull << xe_pt_shift(level)) - 1;
+
+ if (start < pt_update_ops->start)
+ pt_update_ops->start = start;
+ if (last > pt_update_ops->last)
+ pt_update_ops->last = last;
+}
+
+static int vma_reserve_fences(struct xe_device *xe, struct xe_vma *vma)
+{
+ int shift = xe_device_get_root_tile(xe)->media_gt ? 1 : 0;
+
+ if (!xe_vma_has_no_bo(vma) && !xe_vma_bo(vma)->vm)
+ return dma_resv_reserve_fences(xe_vma_bo(vma)->ttm.base.resv,
+ xe->info.tile_count << shift);
+
+ return 0;
+}
+
+static int bind_op_prepare(struct xe_vm *vm, struct xe_tile *tile,
+ struct xe_vm_pgtable_update_ops *pt_update_ops,
+ struct xe_vma *vma)
+{
+ u32 current_op = pt_update_ops->current_op;
+ struct xe_vm_pgtable_update_op *pt_op = &pt_update_ops->ops[current_op];
+ int err;
+
+ xe_bo_assert_held(xe_vma_bo(vma));
+
+ vm_dbg(&xe_vma_vm(vma)->xe->drm,
+ "Preparing bind, with range [%llx...%llx)\n",
+ xe_vma_start(vma), xe_vma_end(vma) - 1);
+
+ pt_op->vma = NULL;
+ pt_op->bind = true;
+ pt_op->rebind = BIT(tile->id) & vma->tile_present;
+
+ err = vma_reserve_fences(tile_to_xe(tile), vma);
+ if (err)
+ return err;
+
+ err = xe_pt_prepare_bind(tile, vma, pt_op->entries,
+ &pt_op->num_entries);
+ if (!err) {
+ xe_tile_assert(tile, pt_op->num_entries <=
+ ARRAY_SIZE(pt_op->entries));
+ xe_vm_dbg_print_entries(tile_to_xe(tile), pt_op->entries,
+ pt_op->num_entries, true);
+
+ xe_pt_update_ops_rfence_interval(pt_update_ops, vma);
+ ++pt_update_ops->current_op;
+ pt_update_ops->needs_userptr_lock |= xe_vma_is_userptr(vma);
+
+ /*
+ * If rebind, we have to invalidate TLB on !LR vms to invalidate
+ * cached PTEs point to freed memory. On LR vms this is done
+ * automatically when the context is re-enabled by the rebind worker,
+ * or in fault mode it was invalidated on PTE zapping.
+ *
+ * If !rebind, and scratch enabled VMs, there is a chance the scratch
+ * PTE is already cached in the TLB so it needs to be invalidated.
+ * On !LR VMs this is done in the ring ops preceding a batch, but on
+ * non-faulting LR, in particular on user-space batch buffer chaining,
+ * it needs to be done here.
+ */
+ if ((!pt_op->rebind && xe_vm_has_scratch(vm) &&
+ xe_vm_in_preempt_fence_mode(vm)))
+ pt_update_ops->needs_invalidation = true;
+ else if (pt_op->rebind && !xe_vm_in_lr_mode(vm))
+ /* We bump also if batch_invalidate_tlb is true */
+ vm->tlb_flush_seqno++;
+
+ vma->tile_staged |= BIT(tile->id);
+ pt_op->vma = vma;
+ xe_pt_commit_prepare_bind(vma, pt_op->entries,
+ pt_op->num_entries, pt_op->rebind);
+ } else {
+ xe_pt_cancel_bind(vma, pt_op->entries, pt_op->num_entries);
+ }
+
+ return err;
+}
+
+static int unbind_op_prepare(struct xe_tile *tile,
+ struct xe_vm_pgtable_update_ops *pt_update_ops,
+ struct xe_vma *vma)
+{
+ u32 current_op = pt_update_ops->current_op;
+ struct xe_vm_pgtable_update_op *pt_op = &pt_update_ops->ops[current_op];
+ int err;
+
+ if (!((vma->tile_present | vma->tile_staged) & BIT(tile->id)))
+ return 0;
+
+ xe_bo_assert_held(xe_vma_bo(vma));
+
+ vm_dbg(&xe_vma_vm(vma)->xe->drm,
+ "Preparing unbind, with range [%llx...%llx)\n",
+ xe_vma_start(vma), xe_vma_end(vma) - 1);
+
+ /*
+ * Wait for invalidation to complete. Can corrupt internal page table
+ * state if an invalidation is running while preparing an unbind.
+ */
+ if (xe_vma_is_userptr(vma) && xe_vm_in_fault_mode(xe_vma_vm(vma)))
+ mmu_interval_read_begin(&to_userptr_vma(vma)->userptr.notifier);
+
+ pt_op->vma = vma;
+ pt_op->bind = false;
+ pt_op->rebind = false;
+
+ err = vma_reserve_fences(tile_to_xe(tile), vma);
+ if (err)
+ return err;
+
+ pt_op->num_entries = xe_pt_stage_unbind(tile, vma, pt_op->entries);
+
+ xe_vm_dbg_print_entries(tile_to_xe(tile), pt_op->entries,
+ pt_op->num_entries, false);
+ xe_pt_update_ops_rfence_interval(pt_update_ops, vma);
+ ++pt_update_ops->current_op;
+ pt_update_ops->needs_userptr_lock |= xe_vma_is_userptr(vma);
+ pt_update_ops->needs_invalidation = true;
+
+ xe_pt_commit_prepare_unbind(vma, pt_op->entries, pt_op->num_entries);
+
+ return 0;
+}
+
+static int op_prepare(struct xe_vm *vm,
+ struct xe_tile *tile,
+ struct xe_vm_pgtable_update_ops *pt_update_ops,
+ struct xe_vma_op *op)
+{
+ int err = 0;
+
+ xe_vm_assert_held(vm);
+
+ switch (op->base.op) {
+ case DRM_GPUVA_OP_MAP:
+ if (!op->map.immediate && xe_vm_in_fault_mode(vm))
+ break;
+
+ err = bind_op_prepare(vm, tile, pt_update_ops, op->map.vma);
+ pt_update_ops->wait_vm_kernel = true;
+ break;
+ case DRM_GPUVA_OP_REMAP:
+ err = unbind_op_prepare(tile, pt_update_ops,
+ gpuva_to_vma(op->base.remap.unmap->va));
+
+ if (!err && op->remap.prev) {
+ err = bind_op_prepare(vm, tile, pt_update_ops,
+ op->remap.prev);
+ pt_update_ops->wait_vm_bookkeep = true;
}
+ if (!err && op->remap.next) {
+ err = bind_op_prepare(vm, tile, pt_update_ops,
+ op->remap.next);
+ pt_update_ops->wait_vm_bookkeep = true;
+ }
+ break;
+ case DRM_GPUVA_OP_UNMAP:
+ err = unbind_op_prepare(tile, pt_update_ops,
+ gpuva_to_vma(op->base.unmap.va));
+ break;
+ case DRM_GPUVA_OP_PREFETCH:
+ err = bind_op_prepare(vm, tile, pt_update_ops,
+ gpuva_to_vma(op->base.prefetch.va));
+ pt_update_ops->wait_vm_kernel = true;
+ break;
+ default:
+ drm_warn(&vm->xe->drm, "NOT POSSIBLE");
}
+
+ return err;
}
-static const struct xe_migrate_pt_update_ops unbind_ops = {
- .populate = xe_migrate_clear_pgtable_callback,
+static void
+xe_pt_update_ops_init(struct xe_vm_pgtable_update_ops *pt_update_ops)
+{
+ init_llist_head(&pt_update_ops->deferred);
+ pt_update_ops->start = ~0x0ull;
+ pt_update_ops->last = 0x0ull;
+}
+
+/**
+ * xe_pt_update_ops_prepare() - Prepare PT update operations
+ * @tile: Tile of PT update operations
+ * @vops: VMA operationa
+ *
+ * Prepare PT update operations which includes updating internal PT state,
+ * allocate memory for page tables, populate page table being pruned in, and
+ * create PT update operations for leaf insertion / removal.
+ *
+ * Return: 0 on success, negative error code on error.
+ */
+int xe_pt_update_ops_prepare(struct xe_tile *tile, struct xe_vma_ops *vops)
+{
+ struct xe_vm_pgtable_update_ops *pt_update_ops =
+ &vops->pt_update_ops[tile->id];
+ struct xe_vma_op *op;
+ int shift = tile->media_gt ? 1 : 0;
+ int err;
+
+ lockdep_assert_held(&vops->vm->lock);
+ xe_vm_assert_held(vops->vm);
+
+ xe_pt_update_ops_init(pt_update_ops);
+
+ err = dma_resv_reserve_fences(xe_vm_resv(vops->vm),
+ tile_to_xe(tile)->info.tile_count << shift);
+ if (err)
+ return err;
+
+ list_for_each_entry(op, &vops->list, link) {
+ err = op_prepare(vops->vm, tile, pt_update_ops, op);
+
+ if (err)
+ return err;
+ }
+
+ xe_tile_assert(tile, pt_update_ops->current_op <=
+ pt_update_ops->num_ops);
+
+#ifdef TEST_VM_OPS_ERROR
+ if (vops->inject_error &&
+ vops->vm->xe->vm_inject_error_position == FORCE_OP_ERROR_PREPARE)
+ return -ENOSPC;
+#endif
+
+ return 0;
+}
+
+static void bind_op_commit(struct xe_vm *vm, struct xe_tile *tile,
+ struct xe_vm_pgtable_update_ops *pt_update_ops,
+ struct xe_vma *vma, struct dma_fence *fence,
+ struct dma_fence *fence2)
+{
+ if (!xe_vma_has_no_bo(vma) && !xe_vma_bo(vma)->vm) {
+ dma_resv_add_fence(xe_vma_bo(vma)->ttm.base.resv, fence,
+ pt_update_ops->wait_vm_bookkeep ?
+ DMA_RESV_USAGE_KERNEL :
+ DMA_RESV_USAGE_BOOKKEEP);
+ if (fence2)
+ dma_resv_add_fence(xe_vma_bo(vma)->ttm.base.resv, fence2,
+ pt_update_ops->wait_vm_bookkeep ?
+ DMA_RESV_USAGE_KERNEL :
+ DMA_RESV_USAGE_BOOKKEEP);
+ }
+ vma->tile_present |= BIT(tile->id);
+ vma->tile_staged &= ~BIT(tile->id);
+ if (xe_vma_is_userptr(vma)) {
+ lockdep_assert_held_read(&vm->userptr.notifier_lock);
+ to_userptr_vma(vma)->userptr.initial_bind = true;
+ }
+
+ /*
+ * Kick rebind worker if this bind triggers preempt fences and not in
+ * the rebind worker
+ */
+ if (pt_update_ops->wait_vm_bookkeep &&
+ xe_vm_in_preempt_fence_mode(vm) &&
+ !current->mm)
+ xe_vm_queue_rebind_worker(vm);
+}
+
+static void unbind_op_commit(struct xe_vm *vm, struct xe_tile *tile,
+ struct xe_vm_pgtable_update_ops *pt_update_ops,
+ struct xe_vma *vma, struct dma_fence *fence,
+ struct dma_fence *fence2)
+{
+ if (!xe_vma_has_no_bo(vma) && !xe_vma_bo(vma)->vm) {
+ dma_resv_add_fence(xe_vma_bo(vma)->ttm.base.resv, fence,
+ pt_update_ops->wait_vm_bookkeep ?
+ DMA_RESV_USAGE_KERNEL :
+ DMA_RESV_USAGE_BOOKKEEP);
+ if (fence2)
+ dma_resv_add_fence(xe_vma_bo(vma)->ttm.base.resv, fence2,
+ pt_update_ops->wait_vm_bookkeep ?
+ DMA_RESV_USAGE_KERNEL :
+ DMA_RESV_USAGE_BOOKKEEP);
+ }
+ vma->tile_present &= ~BIT(tile->id);
+ if (!vma->tile_present) {
+ list_del_init(&vma->combined_links.rebind);
+ if (xe_vma_is_userptr(vma)) {
+ lockdep_assert_held_read(&vm->userptr.notifier_lock);
+
+ spin_lock(&vm->userptr.invalidated_lock);
+ list_del_init(&to_userptr_vma(vma)->userptr.invalidate_link);
+ spin_unlock(&vm->userptr.invalidated_lock);
+ }
+ }
+}
+
+static void op_commit(struct xe_vm *vm,
+ struct xe_tile *tile,
+ struct xe_vm_pgtable_update_ops *pt_update_ops,
+ struct xe_vma_op *op, struct dma_fence *fence,
+ struct dma_fence *fence2)
+{
+ xe_vm_assert_held(vm);
+
+ switch (op->base.op) {
+ case DRM_GPUVA_OP_MAP:
+ if (!op->map.immediate && xe_vm_in_fault_mode(vm))
+ break;
+
+ bind_op_commit(vm, tile, pt_update_ops, op->map.vma, fence,
+ fence2);
+ break;
+ case DRM_GPUVA_OP_REMAP:
+ unbind_op_commit(vm, tile, pt_update_ops,
+ gpuva_to_vma(op->base.remap.unmap->va), fence,
+ fence2);
+
+ if (op->remap.prev)
+ bind_op_commit(vm, tile, pt_update_ops, op->remap.prev,
+ fence, fence2);
+ if (op->remap.next)
+ bind_op_commit(vm, tile, pt_update_ops, op->remap.next,
+ fence, fence2);
+ break;
+ case DRM_GPUVA_OP_UNMAP:
+ unbind_op_commit(vm, tile, pt_update_ops,
+ gpuva_to_vma(op->base.unmap.va), fence, fence2);
+ break;
+ case DRM_GPUVA_OP_PREFETCH:
+ bind_op_commit(vm, tile, pt_update_ops,
+ gpuva_to_vma(op->base.prefetch.va), fence, fence2);
+ break;
+ default:
+ drm_warn(&vm->xe->drm, "NOT POSSIBLE");
+ }
+}
+
+static const struct xe_migrate_pt_update_ops migrate_ops = {
+ .populate = xe_vm_populate_pgtable,
+ .clear = xe_migrate_clear_pgtable_callback,
.pre_commit = xe_pt_pre_commit,
};
-static const struct xe_migrate_pt_update_ops userptr_unbind_ops = {
- .populate = xe_migrate_clear_pgtable_callback,
+static const struct xe_migrate_pt_update_ops userptr_migrate_ops = {
+ .populate = xe_vm_populate_pgtable,
+ .clear = xe_migrate_clear_pgtable_callback,
.pre_commit = xe_pt_userptr_pre_commit,
};
/**
- * __xe_pt_unbind_vma() - Disconnect and free a page-table tree for the vma
- * address range.
- * @tile: The tile to unbind for.
- * @vma: The vma to unbind.
- * @q: The exec_queue with which to do pipelined page-table updates.
- * @syncs: Entries to sync on before disconnecting the tree to be destroyed.
- * @num_syncs: Number of @sync entries.
+ * xe_pt_update_ops_run() - Run PT update operations
+ * @tile: Tile of PT update operations
+ * @vops: VMA operationa
*
- * This function builds a the xe_vm_pgtable_update entries abstracting the
- * operations needed to detach the page-table tree to be destroyed from the
- * man vm tree.
- * It then takes the relevant locks and submits the operations for
- * pipelined detachment of the gpu page-table from the vm main tree,
- * (which can be done either by the cpu and the GPU), Finally it frees the
- * detached page-table tree.
+ * Run PT update operations which includes committing internal PT state changes,
+ * creating job for PT update operations for leaf insertion / removal, and
+ * installing job fence in various places.
*
- * Return: A valid dma-fence representing the pipelined detachment operation
- * on success, an error pointer on error.
+ * Return: fence on success, negative ERR_PTR on error.
*/
struct dma_fence *
-__xe_pt_unbind_vma(struct xe_tile *tile, struct xe_vma *vma, struct xe_exec_queue *q,
- struct xe_sync_entry *syncs, u32 num_syncs)
+xe_pt_update_ops_run(struct xe_tile *tile, struct xe_vma_ops *vops)
{
- struct xe_vm_pgtable_update entries[XE_VM_MAX_LEVEL * 2 + 1];
- struct xe_pt_migrate_pt_update unbind_pt_update = {
- .base = {
- .ops = xe_vma_is_userptr(vma) ? &userptr_unbind_ops :
- &unbind_ops,
- .vma = vma,
- .tile_id = tile->id,
- },
- };
- struct xe_vm *vm = xe_vma_vm(vma);
- u32 num_entries;
- struct dma_fence *fence = NULL;
- struct invalidation_fence *ifence;
+ struct xe_vm *vm = vops->vm;
+ struct xe_vm_pgtable_update_ops *pt_update_ops =
+ &vops->pt_update_ops[tile->id];
+ struct dma_fence *fence;
+ struct invalidation_fence *ifence = NULL, *mfence = NULL;
+ struct dma_fence **fences = NULL;
+ struct dma_fence_array *cf = NULL;
struct xe_range_fence *rfence;
- int err;
-
- LLIST_HEAD(deferred);
+ struct xe_vma_op *op;
+ int err = 0, i;
+ struct xe_migrate_pt_update update = {
+ .ops = pt_update_ops->needs_userptr_lock ?
+ &userptr_migrate_ops :
+ &migrate_ops,
+ .vops = vops,
+ .tile_id = tile->id,
+ };
- xe_bo_assert_held(xe_vma_bo(vma));
+ lockdep_assert_held(&vm->lock);
xe_vm_assert_held(vm);
- vm_dbg(&xe_vma_vm(vma)->xe->drm,
- "Preparing unbind, with range [%llx...%llx) engine %p.\n",
- xe_vma_start(vma), xe_vma_end(vma), q);
+ if (!pt_update_ops->current_op) {
+ xe_tile_assert(tile, xe_vm_in_fault_mode(vm));
- num_entries = xe_pt_stage_unbind(tile, vma, entries);
- xe_tile_assert(tile, num_entries <= ARRAY_SIZE(entries));
-
- xe_vm_dbg_print_entries(tile_to_xe(tile), entries, num_entries);
- xe_pt_calc_rfence_interval(vma, &unbind_pt_update, entries,
- num_entries);
+ return dma_fence_get_stub();
+ }
- err = dma_resv_reserve_fences(xe_vm_resv(vm), 1);
- if (!err && !xe_vma_has_no_bo(vma) && !xe_vma_bo(vma)->vm)
- err = dma_resv_reserve_fences(xe_vma_bo(vma)->ttm.base.resv, 1);
- if (err)
- return ERR_PTR(err);
+#ifdef TEST_VM_OPS_ERROR
+ if (vops->inject_error &&
+ vm->xe->vm_inject_error_position == FORCE_OP_ERROR_RUN)
+ return ERR_PTR(-ENOSPC);
+#endif
- ifence = kzalloc(sizeof(*ifence), GFP_KERNEL);
- if (!ifence)
- return ERR_PTR(-ENOMEM);
+ if (pt_update_ops->needs_invalidation) {
+ ifence = kzalloc(sizeof(*ifence), GFP_KERNEL);
+ if (!ifence) {
+ err = -ENOMEM;
+ goto kill_vm_tile1;
+ }
+ if (tile->media_gt) {
+ mfence = kzalloc(sizeof(*ifence), GFP_KERNEL);
+ if (!mfence) {
+ err = -ENOMEM;
+ goto free_ifence;
+ }
+ fences = kmalloc_array(2, sizeof(*fences), GFP_KERNEL);
+ if (!fences) {
+ err = -ENOMEM;
+ goto free_ifence;
+ }
+ cf = dma_fence_array_alloc(2);
+ if (!cf) {
+ err = -ENOMEM;
+ goto free_ifence;
+ }
+ }
+ }
rfence = kzalloc(sizeof(*rfence), GFP_KERNEL);
if (!rfence) {
- kfree(ifence);
- return ERR_PTR(-ENOMEM);
+ err = -ENOMEM;
+ goto free_ifence;
}
- /*
- * Even if we were already evicted and unbind to destroy, we need to
- * clear again here. The eviction may have updated pagetables at a
- * lower level, because it needs to be more conservative.
- */
- fence = xe_migrate_update_pgtables(tile->migrate,
- vm, NULL, q ? q :
- vm->q[tile->id],
- entries, num_entries,
- syncs, num_syncs,
- &unbind_pt_update.base);
- if (!IS_ERR(fence)) {
- int err;
-
- err = xe_range_fence_insert(&vm->rftree[tile->id], rfence,
- &xe_range_fence_kfree_ops,
- unbind_pt_update.base.start,
- unbind_pt_update.base.last, fence);
- if (err)
- dma_fence_wait(fence, false);
+ fence = xe_migrate_update_pgtables(tile->migrate, &update);
+ if (IS_ERR(fence)) {
+ err = PTR_ERR(fence);
+ goto free_rfence;
+ }
- /* TLB invalidation must be done before signaling unbind */
- err = invalidation_fence_init(tile->primary_gt, ifence, fence,
- xe_vma_start(vma),
- xe_vma_end(vma),
- xe_vma_vm(vma)->usm.asid);
- if (err) {
- dma_fence_put(fence);
- kfree(ifence);
- return ERR_PTR(err);
+ /* Point of no return - VM killed if failure after this */
+ for (i = 0; i < pt_update_ops->current_op; ++i) {
+ struct xe_vm_pgtable_update_op *pt_op = &pt_update_ops->ops[i];
+
+ xe_pt_commit(pt_op->vma, pt_op->entries,
+ pt_op->num_entries, &pt_update_ops->deferred);
+ pt_op->vma = NULL; /* skip in xe_pt_update_ops_abort */
+ }
+
+ if (xe_range_fence_insert(&vm->rftree[tile->id], rfence,
+ &xe_range_fence_kfree_ops,
+ pt_update_ops->start,
+ pt_update_ops->last, fence))
+ dma_fence_wait(fence, false);
+
+ /* tlb invalidation must be done before signaling rebind */
+ if (ifence) {
+ if (mfence)
+ dma_fence_get(fence);
+ invalidation_fence_init(tile->primary_gt, ifence, fence,
+ pt_update_ops->start,
+ pt_update_ops->last, vm->usm.asid);
+ if (mfence) {
+ invalidation_fence_init(tile->media_gt, mfence, fence,
+ pt_update_ops->start,
+ pt_update_ops->last, vm->usm.asid);
+ fences[0] = &ifence->base.base;
+ fences[1] = &mfence->base.base;
+ dma_fence_array_init(cf, 2, fences,
+ vm->composite_fence_ctx,
+ vm->composite_fence_seqno++,
+ false);
+ fence = &cf->base;
+ } else {
+ fence = &ifence->base.base;
}
- fence = &ifence->base.base;
+ }
- /* add shared fence now for pagetable delayed destroy */
+ if (!mfence) {
dma_resv_add_fence(xe_vm_resv(vm), fence,
+ pt_update_ops->wait_vm_bookkeep ?
+ DMA_RESV_USAGE_KERNEL :
DMA_RESV_USAGE_BOOKKEEP);
- /* This fence will be installed by caller when doing eviction */
- if (!xe_vma_has_no_bo(vma) && !xe_vma_bo(vma)->vm)
- dma_resv_add_fence(xe_vma_bo(vma)->ttm.base.resv, fence,
- DMA_RESV_USAGE_BOOKKEEP);
- xe_pt_commit_unbind(vma, entries, num_entries,
- unbind_pt_update.locked ? &deferred : NULL);
- vma->tile_present &= ~BIT(tile->id);
+ list_for_each_entry(op, &vops->list, link)
+ op_commit(vops->vm, tile, pt_update_ops, op, fence, NULL);
} else {
- kfree(rfence);
- kfree(ifence);
- }
+ dma_resv_add_fence(xe_vm_resv(vm), &ifence->base.base,
+ pt_update_ops->wait_vm_bookkeep ?
+ DMA_RESV_USAGE_KERNEL :
+ DMA_RESV_USAGE_BOOKKEEP);
- if (!vma->tile_present)
- list_del_init(&vma->combined_links.rebind);
+ dma_resv_add_fence(xe_vm_resv(vm), &mfence->base.base,
+ pt_update_ops->wait_vm_bookkeep ?
+ DMA_RESV_USAGE_KERNEL :
+ DMA_RESV_USAGE_BOOKKEEP);
- if (unbind_pt_update.locked) {
- xe_tile_assert(tile, xe_vma_is_userptr(vma));
+ list_for_each_entry(op, &vops->list, link)
+ op_commit(vops->vm, tile, pt_update_ops, op,
+ &ifence->base.base, &mfence->base.base);
+ }
- if (!vma->tile_present) {
- spin_lock(&vm->userptr.invalidated_lock);
- list_del_init(&to_userptr_vma(vma)->userptr.invalidate_link);
- spin_unlock(&vm->userptr.invalidated_lock);
- }
+ if (pt_update_ops->needs_userptr_lock)
up_read(&vm->userptr.notifier_lock);
- xe_bo_put_commit(&deferred);
- }
return fence;
+
+free_rfence:
+ kfree(rfence);
+free_ifence:
+ kfree(cf);
+ kfree(fences);
+ kfree(mfence);
+ kfree(ifence);
+kill_vm_tile1:
+ if (err != -EAGAIN && tile->id)
+ xe_vm_kill(vops->vm, false);
+
+ return ERR_PTR(err);
+}
+
+/**
+ * xe_pt_update_ops_fini() - Finish PT update operations
+ * @tile: Tile of PT update operations
+ * @vops: VMA operations
+ *
+ * Finish PT update operations by committing to destroy page table memory
+ */
+void xe_pt_update_ops_fini(struct xe_tile *tile, struct xe_vma_ops *vops)
+{
+ struct xe_vm_pgtable_update_ops *pt_update_ops =
+ &vops->pt_update_ops[tile->id];
+ int i;
+
+ lockdep_assert_held(&vops->vm->lock);
+ xe_vm_assert_held(vops->vm);
+
+ for (i = 0; i < pt_update_ops->current_op; ++i) {
+ struct xe_vm_pgtable_update_op *pt_op = &pt_update_ops->ops[i];
+
+ xe_pt_free_bind(pt_op->entries, pt_op->num_entries);
+ }
+ xe_bo_put_commit(&vops->pt_update_ops[tile->id].deferred);
+}
+
+/**
+ * xe_pt_update_ops_abort() - Abort PT update operations
+ * @tile: Tile of PT update operations
+ * @vops: VMA operationa
+ *
+ * Abort PT update operations by unwinding internal PT state
+ */
+void xe_pt_update_ops_abort(struct xe_tile *tile, struct xe_vma_ops *vops)
+{
+ struct xe_vm_pgtable_update_ops *pt_update_ops =
+ &vops->pt_update_ops[tile->id];
+ int i;
+
+ lockdep_assert_held(&vops->vm->lock);
+ xe_vm_assert_held(vops->vm);
+
+ for (i = pt_update_ops->num_ops - 1; i >= 0; --i) {
+ struct xe_vm_pgtable_update_op *pt_op =
+ &pt_update_ops->ops[i];
+
+ if (!pt_op->vma || i >= pt_update_ops->current_op)
+ continue;
+
+ if (pt_op->bind)
+ xe_pt_abort_bind(pt_op->vma, pt_op->entries,
+ pt_op->num_entries,
+ pt_op->rebind);
+ else
+ xe_pt_abort_unbind(pt_op->vma, pt_op->entries,
+ pt_op->num_entries);
+ }
+
+ xe_bo_put_commit(&vops->pt_update_ops[tile->id].deferred);
}
diff --git a/drivers/gpu/drm/xe/xe_pt.h b/drivers/gpu/drm/xe/xe_pt.h
index 71a4fbfcff43..9ab386431cad 100644
--- a/drivers/gpu/drm/xe/xe_pt.h
+++ b/drivers/gpu/drm/xe/xe_pt.h
@@ -17,6 +17,7 @@ struct xe_sync_entry;
struct xe_tile;
struct xe_vm;
struct xe_vma;
+struct xe_vma_ops;
/* Largest huge pte is currently 1GiB. May become device dependent. */
#define MAX_HUGEPTE_LEVEL 2
@@ -34,14 +35,11 @@ void xe_pt_populate_empty(struct xe_tile *tile, struct xe_vm *vm,
void xe_pt_destroy(struct xe_pt *pt, u32 flags, struct llist_head *deferred);
-struct dma_fence *
-__xe_pt_bind_vma(struct xe_tile *tile, struct xe_vma *vma, struct xe_exec_queue *q,
- struct xe_sync_entry *syncs, u32 num_syncs,
- bool rebind);
-
-struct dma_fence *
-__xe_pt_unbind_vma(struct xe_tile *tile, struct xe_vma *vma, struct xe_exec_queue *q,
- struct xe_sync_entry *syncs, u32 num_syncs);
+int xe_pt_update_ops_prepare(struct xe_tile *tile, struct xe_vma_ops *vops);
+struct dma_fence *xe_pt_update_ops_run(struct xe_tile *tile,
+ struct xe_vma_ops *vops);
+void xe_pt_update_ops_fini(struct xe_tile *tile, struct xe_vma_ops *vops);
+void xe_pt_update_ops_abort(struct xe_tile *tile, struct xe_vma_ops *vops);
bool xe_pt_zap_ptes(struct xe_tile *tile, struct xe_vma *vma);
diff --git a/drivers/gpu/drm/xe/xe_pt_types.h b/drivers/gpu/drm/xe/xe_pt_types.h
index cee70cb0f014..384cc04de719 100644
--- a/drivers/gpu/drm/xe/xe_pt_types.h
+++ b/drivers/gpu/drm/xe/xe_pt_types.h
@@ -74,4 +74,52 @@ struct xe_vm_pgtable_update {
u32 flags;
};
+/** struct xe_vm_pgtable_update_op - Page table update operation */
+struct xe_vm_pgtable_update_op {
+ /** @entries: entries to update for this operation */
+ struct xe_vm_pgtable_update entries[XE_VM_MAX_LEVEL * 2 + 1];
+ /** @vma: VMA for operation, operation not valid if NULL */
+ struct xe_vma *vma;
+ /** @num_entries: number of entries for this update operation */
+ u32 num_entries;
+ /** @bind: is a bind */
+ bool bind;
+ /** @rebind: is a rebind */
+ bool rebind;
+};
+
+/** struct xe_vm_pgtable_update_ops: page table update operations */
+struct xe_vm_pgtable_update_ops {
+ /** @ops: operations */
+ struct xe_vm_pgtable_update_op *ops;
+ /** @deferred: deferred list to destroy PT entries */
+ struct llist_head deferred;
+ /** @q: exec queue for PT operations */
+ struct xe_exec_queue *q;
+ /** @start: start address of ops */
+ u64 start;
+ /** @last: last address of ops */
+ u64 last;
+ /** @num_ops: number of operations */
+ u32 num_ops;
+ /** @current_op: current operations */
+ u32 current_op;
+ /** @needs_userptr_lock: Needs userptr lock */
+ bool needs_userptr_lock;
+ /** @needs_invalidation: Needs invalidation */
+ bool needs_invalidation;
+ /**
+ * @wait_vm_bookkeep: PT operations need to wait until VM is idle
+ * (bookkeep dma-resv slots are idle) and stage all future VM activity
+ * behind these operations (install PT operations into VM kernel
+ * dma-resv slot).
+ */
+ bool wait_vm_bookkeep;
+ /**
+ * @wait_vm_kernel: PT operations need to wait until VM kernel dma-resv
+ * slots are idle.
+ */
+ bool wait_vm_kernel;
+};
+
#endif
diff --git a/drivers/gpu/drm/xe/xe_query.c b/drivers/gpu/drm/xe/xe_query.c
index 4e01df6b1b7a..28d9bb3b825d 100644
--- a/drivers/gpu/drm/xe/xe_query.c
+++ b/drivers/gpu/drm/xe/xe_query.c
@@ -9,7 +9,7 @@
#include <linux/sched/clock.h>
#include <drm/ttm/ttm_placement.h>
-#include <drm/xe_drm.h>
+#include <uapi/drm/xe_drm.h>
#include "regs/xe_engine_regs.h"
#include "regs/xe_gt_regs.h"
@@ -518,7 +518,9 @@ static int query_gt_topology(struct xe_device *xe,
if (err)
return err;
- topo.type = DRM_XE_TOPO_EU_PER_DSS;
+ topo.type = gt->fuse_topo.eu_type == XE_GT_EU_TYPE_SIMD16 ?
+ DRM_XE_TOPO_SIMD16_EU_PER_DSS :
+ DRM_XE_TOPO_EU_PER_DSS;
err = copy_mask(&query_ptr, &topo,
gt->fuse_topo.eu_mask_per_dss,
sizeof(gt->fuse_topo.eu_mask_per_dss));
diff --git a/drivers/gpu/drm/xe/xe_res_cursor.h b/drivers/gpu/drm/xe/xe_res_cursor.h
index 655af89b31a9..dca374b6521c 100644
--- a/drivers/gpu/drm/xe/xe_res_cursor.h
+++ b/drivers/gpu/drm/xe/xe_res_cursor.h
@@ -26,7 +26,6 @@
#include <linux/scatterlist.h>
-#include <drm/drm_mm.h>
#include <drm/ttm/ttm_placement.h>
#include <drm/ttm/ttm_range_manager.h>
#include <drm/ttm/ttm_resource.h>
diff --git a/drivers/gpu/drm/xe/xe_rtp.c b/drivers/gpu/drm/xe/xe_rtp.c
index 5efe83cc82ab..86c705d18c0d 100644
--- a/drivers/gpu/drm/xe/xe_rtp.c
+++ b/drivers/gpu/drm/xe/xe_rtp.c
@@ -7,7 +7,7 @@
#include <kunit/visibility.h>
-#include <drm/xe_drm.h>
+#include <uapi/drm/xe_drm.h>
#include "xe_gt.h"
#include "xe_gt_topology.h"
@@ -217,21 +217,19 @@ void xe_rtp_process_ctx_enable_active_tracking(struct xe_rtp_process_ctx *ctx,
ctx->active_entries = active_entries;
ctx->n_entries = n_entries;
}
+EXPORT_SYMBOL_IF_KUNIT(xe_rtp_process_ctx_enable_active_tracking);
static void rtp_mark_active(struct xe_device *xe,
struct xe_rtp_process_ctx *ctx,
- unsigned int first, unsigned int last)
+ unsigned int idx)
{
if (!ctx->active_entries)
return;
- if (drm_WARN_ON(&xe->drm, last > ctx->n_entries))
+ if (drm_WARN_ON(&xe->drm, idx >= ctx->n_entries))
return;
- if (first == last)
- bitmap_set(ctx->active_entries, first, 1);
- else
- bitmap_set(ctx->active_entries, first, last - first + 1);
+ bitmap_set(ctx->active_entries, idx, 1);
}
/**
@@ -276,8 +274,7 @@ void xe_rtp_process_to_sr(struct xe_rtp_process_ctx *ctx,
}
if (match)
- rtp_mark_active(xe, ctx, entry - entries,
- entry - entries);
+ rtp_mark_active(xe, ctx, entry - entries);
}
}
EXPORT_SYMBOL_IF_KUNIT(xe_rtp_process_to_sr);
@@ -288,44 +285,29 @@ EXPORT_SYMBOL_IF_KUNIT(xe_rtp_process_to_sr);
* @entries: Table with RTP definitions
*
* Walk the table pointed by @entries (with an empty sentinel), executing the
- * rules. A few differences from xe_rtp_process_to_sr():
- *
- * 1. There is no action associated with each entry since this uses
- * struct xe_rtp_entry. Its main use is for marking active workarounds via
- * xe_rtp_process_ctx_enable_active_tracking().
- * 2. There is support for OR operations by having entries with no name.
+ * rules. One difference from xe_rtp_process_to_sr(): there is no action
+ * associated with each entry since this uses struct xe_rtp_entry. Its main use
+ * is for marking active workarounds via
+ * xe_rtp_process_ctx_enable_active_tracking().
*/
void xe_rtp_process(struct xe_rtp_process_ctx *ctx,
const struct xe_rtp_entry *entries)
{
- const struct xe_rtp_entry *entry, *first_entry;
+ const struct xe_rtp_entry *entry;
struct xe_hw_engine *hwe;
struct xe_gt *gt;
struct xe_device *xe;
rtp_get_context(ctx, &hwe, &gt, &xe);
- first_entry = entries;
- if (drm_WARN_ON(&xe->drm, !first_entry->name))
- return;
-
for (entry = entries; entry && entry->rules; entry++) {
- if (entry->name)
- first_entry = entry;
-
if (!rule_matches(xe, gt, hwe, entry->rules, entry->n_rules))
continue;
- /* Fast-forward entry, eliminating the OR'ed entries */
- for (entry++; entry && entry->rules; entry++)
- if (entry->name)
- break;
- entry--;
-
- rtp_mark_active(xe, ctx, first_entry - entries,
- entry - entries);
+ rtp_mark_active(xe, ctx, entry - entries);
}
}
+EXPORT_SYMBOL_IF_KUNIT(xe_rtp_process);
bool xe_rtp_match_even_instance(const struct xe_gt *gt,
const struct xe_hw_engine *hwe)
diff --git a/drivers/gpu/drm/xe/xe_rtp.h b/drivers/gpu/drm/xe/xe_rtp.h
index ad446731192c..827d932b6908 100644
--- a/drivers/gpu/drm/xe/xe_rtp.h
+++ b/drivers/gpu/drm/xe/xe_rtp.h
@@ -374,7 +374,7 @@ struct xe_reg_sr;
* XE_RTP_RULES - Helper to set multiple rules to a struct xe_rtp_entry_sr entry
* @...: Rules
*
- * At least one rule is needed and up to 6 are supported. Multiple rules are
+ * At least one rule is needed and up to 12 are supported. Multiple rules are
* AND'ed together, i.e. all the rules must evaluate to true for the entry to
* be processed. See XE_RTP_MATCH_* for the possible match rules. Example:
*
@@ -399,7 +399,7 @@ struct xe_reg_sr;
* XE_RTP_ACTIONS - Helper to set multiple actions to a struct xe_rtp_entry_sr
* @...: Actions to be taken
*
- * At least one action is needed and up to 6 are supported. See XE_RTP_ACTION_*
+ * At least one action is needed and up to 12 are supported. See XE_RTP_ACTION_*
* for the possible actions. Example:
*
* .. code-block:: c
diff --git a/drivers/gpu/drm/xe/xe_rtp_helpers.h b/drivers/gpu/drm/xe/xe_rtp_helpers.h
index c59e40fd7fff..a33b0ae98bbc 100644
--- a/drivers/gpu/drm/xe/xe_rtp_helpers.h
+++ b/drivers/gpu/drm/xe/xe_rtp_helpers.h
@@ -60,6 +60,12 @@
#define XE_RTP_PASTE_4(prefix_, sep_, args_) _XE_RTP_CONCAT(prefix_, FIRST_ARG args_) __XE_RTP_PASTE_SEP_ ## sep_ XE_RTP_PASTE_3(prefix_, sep_, _XE_TUPLE_TAIL args_)
#define XE_RTP_PASTE_5(prefix_, sep_, args_) _XE_RTP_CONCAT(prefix_, FIRST_ARG args_) __XE_RTP_PASTE_SEP_ ## sep_ XE_RTP_PASTE_4(prefix_, sep_, _XE_TUPLE_TAIL args_)
#define XE_RTP_PASTE_6(prefix_, sep_, args_) _XE_RTP_CONCAT(prefix_, FIRST_ARG args_) __XE_RTP_PASTE_SEP_ ## sep_ XE_RTP_PASTE_5(prefix_, sep_, _XE_TUPLE_TAIL args_)
+#define XE_RTP_PASTE_7(prefix_, sep_, args_) _XE_RTP_CONCAT(prefix_, FIRST_ARG args_) __XE_RTP_PASTE_SEP_ ## sep_ XE_RTP_PASTE_6(prefix_, sep_, _XE_TUPLE_TAIL args_)
+#define XE_RTP_PASTE_8(prefix_, sep_, args_) _XE_RTP_CONCAT(prefix_, FIRST_ARG args_) __XE_RTP_PASTE_SEP_ ## sep_ XE_RTP_PASTE_7(prefix_, sep_, _XE_TUPLE_TAIL args_)
+#define XE_RTP_PASTE_9(prefix_, sep_, args_) _XE_RTP_CONCAT(prefix_, FIRST_ARG args_) __XE_RTP_PASTE_SEP_ ## sep_ XE_RTP_PASTE_8(prefix_, sep_, _XE_TUPLE_TAIL args_)
+#define XE_RTP_PASTE_10(prefix_, sep_, args_) _XE_RTP_CONCAT(prefix_, FIRST_ARG args_) __XE_RTP_PASTE_SEP_ ## sep_ XE_RTP_PASTE_9(prefix_, sep_, _XE_TUPLE_TAIL args_)
+#define XE_RTP_PASTE_11(prefix_, sep_, args_) _XE_RTP_CONCAT(prefix_, FIRST_ARG args_) __XE_RTP_PASTE_SEP_ ## sep_ XE_RTP_PASTE_10(prefix_, sep_, _XE_TUPLE_TAIL args_)
+#define XE_RTP_PASTE_12(prefix_, sep_, args_) _XE_RTP_CONCAT(prefix_, FIRST_ARG args_) __XE_RTP_PASTE_SEP_ ## sep_ XE_RTP_PASTE_11(prefix_, sep_, _XE_TUPLE_TAIL args_)
/*
* XE_RTP_DROP_CAST - Drop cast to convert a compound statement to a initializer
diff --git a/drivers/gpu/drm/xe/xe_sa.c b/drivers/gpu/drm/xe/xe_sa.c
index 8941522b7705..fe2cb2a96f78 100644
--- a/drivers/gpu/drm/xe/xe_sa.c
+++ b/drivers/gpu/drm/xe/xe_sa.c
@@ -25,10 +25,9 @@ static void xe_sa_bo_manager_fini(struct drm_device *drm, void *arg)
drm_suballoc_manager_fini(&sa_manager->base);
- if (bo->vmap.is_iomem)
+ if (sa_manager->is_iomem)
kvfree(sa_manager->cpu_ptr);
- xe_bo_unpin_map_no_vm(bo);
sa_manager->bo = NULL;
}
@@ -47,16 +46,17 @@ struct xe_sa_manager *xe_sa_bo_manager_init(struct xe_tile *tile, u32 size, u32
sa_manager->bo = NULL;
- bo = xe_bo_create_pin_map(xe, tile, NULL, size, ttm_bo_type_kernel,
- XE_BO_FLAG_VRAM_IF_DGFX(tile) |
- XE_BO_FLAG_GGTT |
- XE_BO_FLAG_GGTT_INVALIDATE);
+ bo = xe_managed_bo_create_pin_map(xe, tile, size,
+ XE_BO_FLAG_VRAM_IF_DGFX(tile) |
+ XE_BO_FLAG_GGTT |
+ XE_BO_FLAG_GGTT_INVALIDATE);
if (IS_ERR(bo)) {
drm_err(&xe->drm, "failed to allocate bo for sa manager: %ld\n",
PTR_ERR(bo));
return (struct xe_sa_manager *)bo;
}
sa_manager->bo = bo;
+ sa_manager->is_iomem = bo->vmap.is_iomem;
drm_suballoc_manager_init(&sa_manager->base, managed_size, align);
sa_manager->gpu_addr = xe_bo_ggtt_addr(bo);
@@ -64,7 +64,6 @@ struct xe_sa_manager *xe_sa_bo_manager_init(struct xe_tile *tile, u32 size, u32
if (bo->vmap.is_iomem) {
sa_manager->cpu_ptr = kvzalloc(managed_size, GFP_KERNEL);
if (!sa_manager->cpu_ptr) {
- xe_bo_unpin_map_no_vm(sa_manager->bo);
sa_manager->bo = NULL;
return ERR_PTR(-ENOMEM);
}
@@ -84,6 +83,13 @@ struct xe_sa_manager *xe_sa_bo_manager_init(struct xe_tile *tile, u32 size, u32
struct drm_suballoc *xe_sa_bo_new(struct xe_sa_manager *sa_manager,
unsigned int size)
{
+ /*
+ * BB to large, return -ENOBUFS indicating user should split
+ * array of binds into smaller chunks.
+ */
+ if (size > sa_manager->base.size)
+ return ERR_PTR(-ENOBUFS);
+
return drm_suballoc_new(&sa_manager->base, size, GFP_KERNEL, true, 0);
}
diff --git a/drivers/gpu/drm/xe/xe_sa_types.h b/drivers/gpu/drm/xe/xe_sa_types.h
index 2ef896aeca1d..2b070ff1292e 100644
--- a/drivers/gpu/drm/xe/xe_sa_types.h
+++ b/drivers/gpu/drm/xe/xe_sa_types.h
@@ -14,6 +14,7 @@ struct xe_sa_manager {
struct xe_bo *bo;
u64 gpu_addr;
void *cpu_ptr;
+ bool is_iomem;
};
#endif
diff --git a/drivers/gpu/drm/xe/xe_sched_job.c b/drivers/gpu/drm/xe/xe_sched_job.c
index 9628f9deb3c0..eeccc1c318ae 100644
--- a/drivers/gpu/drm/xe/xe_sched_job.c
+++ b/drivers/gpu/drm/xe/xe_sched_job.c
@@ -5,7 +5,7 @@
#include "xe_sched_job.h"
-#include <drm/xe_drm.h>
+#include <uapi/drm/xe_drm.h>
#include <linux/dma-fence-chain.h>
#include <linux/slab.h>
@@ -89,8 +89,7 @@ static void xe_sched_job_free_fences(struct xe_sched_job *job)
if (ptrs->lrc_fence)
xe_lrc_free_seqno_fence(ptrs->lrc_fence);
- if (ptrs->chain_fence)
- dma_fence_chain_free(ptrs->chain_fence);
+ dma_fence_chain_free(ptrs->chain_fence);
}
}
diff --git a/drivers/gpu/drm/xe/xe_sriov.c b/drivers/gpu/drm/xe/xe_sriov.c
index a274a5fb1401..5a1d65e4f19f 100644
--- a/drivers/gpu/drm/xe/xe_sriov.c
+++ b/drivers/gpu/drm/xe/xe_sriov.c
@@ -5,7 +5,7 @@
#include <drm/drm_managed.h>
-#include "regs/xe_sriov_regs.h"
+#include "regs/xe_regs.h"
#include "xe_assert.h"
#include "xe_device.h"
diff --git a/drivers/gpu/drm/xe/xe_step.c b/drivers/gpu/drm/xe/xe_step.c
index eaf1b718f26c..c77b5c317fa0 100644
--- a/drivers/gpu/drm/xe/xe_step.c
+++ b/drivers/gpu/drm/xe/xe_step.c
@@ -28,23 +28,17 @@
* use a macro to define these to make it easier to identify the platforms
* where the two steppings can deviate.
*/
-#define COMMON_GT_MEDIA_STEP(x_) \
- .graphics = STEP_##x_, \
- .media = STEP_##x_
-
#define COMMON_STEP(x_) \
- COMMON_GT_MEDIA_STEP(x_), \
.graphics = STEP_##x_, \
- .media = STEP_##x_, \
- .display = STEP_##x_
+ .media = STEP_##x_
__diag_push();
__diag_ignore_all("-Woverride-init", "Allow field overrides in table");
/* Same GT stepping between tgl_uy_revids and tgl_revids don't mean the same HW */
static const struct xe_step_info tgl_revids[] = {
- [0] = { COMMON_GT_MEDIA_STEP(A0), .display = STEP_B0 },
- [1] = { COMMON_GT_MEDIA_STEP(B0), .display = STEP_D0 },
+ [0] = { COMMON_STEP(A0) },
+ [1] = { COMMON_STEP(B0) },
};
static const struct xe_step_info dg1_revids[] = {
@@ -53,49 +47,49 @@ static const struct xe_step_info dg1_revids[] = {
};
static const struct xe_step_info adls_revids[] = {
- [0x0] = { COMMON_GT_MEDIA_STEP(A0), .display = STEP_A0 },
- [0x1] = { COMMON_GT_MEDIA_STEP(A0), .display = STEP_A2 },
- [0x4] = { COMMON_GT_MEDIA_STEP(B0), .display = STEP_B0 },
- [0x8] = { COMMON_GT_MEDIA_STEP(C0), .display = STEP_B0 },
- [0xC] = { COMMON_GT_MEDIA_STEP(D0), .display = STEP_C0 },
+ [0x0] = { COMMON_STEP(A0) },
+ [0x1] = { COMMON_STEP(A0) },
+ [0x4] = { COMMON_STEP(B0) },
+ [0x8] = { COMMON_STEP(C0) },
+ [0xC] = { COMMON_STEP(D0) },
};
static const struct xe_step_info adls_rpls_revids[] = {
- [0x4] = { COMMON_GT_MEDIA_STEP(D0), .display = STEP_D0 },
- [0xC] = { COMMON_GT_MEDIA_STEP(D0), .display = STEP_C0 },
+ [0x4] = { COMMON_STEP(D0) },
+ [0xC] = { COMMON_STEP(D0) },
};
static const struct xe_step_info adlp_revids[] = {
- [0x0] = { COMMON_GT_MEDIA_STEP(A0), .display = STEP_A0 },
- [0x4] = { COMMON_GT_MEDIA_STEP(B0), .display = STEP_B0 },
- [0x8] = { COMMON_GT_MEDIA_STEP(C0), .display = STEP_C0 },
- [0xC] = { COMMON_GT_MEDIA_STEP(C0), .display = STEP_D0 },
+ [0x0] = { COMMON_STEP(A0) },
+ [0x4] = { COMMON_STEP(B0) },
+ [0x8] = { COMMON_STEP(C0) },
+ [0xC] = { COMMON_STEP(C0) },
};
static const struct xe_step_info adlp_rpl_revids[] = {
- [0x4] = { COMMON_GT_MEDIA_STEP(C0), .display = STEP_E0 },
+ [0x4] = { COMMON_STEP(C0) },
};
static const struct xe_step_info adln_revids[] = {
- [0x0] = { COMMON_GT_MEDIA_STEP(A0), .display = STEP_D0 },
+ [0x0] = { COMMON_STEP(A0) },
};
static const struct xe_step_info dg2_g10_revid_step_tbl[] = {
- [0x0] = { COMMON_GT_MEDIA_STEP(A0), .display = STEP_A0 },
- [0x1] = { COMMON_GT_MEDIA_STEP(A1), .display = STEP_A0 },
- [0x4] = { COMMON_GT_MEDIA_STEP(B0), .display = STEP_B0 },
- [0x8] = { COMMON_GT_MEDIA_STEP(C0), .display = STEP_C0 },
+ [0x0] = { COMMON_STEP(A0) },
+ [0x1] = { COMMON_STEP(A1) },
+ [0x4] = { COMMON_STEP(B0) },
+ [0x8] = { COMMON_STEP(C0) },
};
static const struct xe_step_info dg2_g11_revid_step_tbl[] = {
- [0x0] = { COMMON_GT_MEDIA_STEP(A0), .display = STEP_B0 },
- [0x4] = { COMMON_GT_MEDIA_STEP(B0), .display = STEP_C0 },
- [0x5] = { COMMON_GT_MEDIA_STEP(B1), .display = STEP_C0 },
+ [0x0] = { COMMON_STEP(A0) },
+ [0x4] = { COMMON_STEP(B0) },
+ [0x5] = { COMMON_STEP(B1) },
};
static const struct xe_step_info dg2_g12_revid_step_tbl[] = {
- [0x0] = { COMMON_GT_MEDIA_STEP(A0), .display = STEP_C0 },
- [0x1] = { COMMON_GT_MEDIA_STEP(A1), .display = STEP_C0 },
+ [0x0] = { COMMON_STEP(A0) },
+ [0x1] = { COMMON_STEP(A1) },
};
static const struct xe_step_info pvc_revid_step_tbl[] = {
@@ -195,7 +189,6 @@ struct xe_step_info xe_step_pre_gmdid_get(struct xe_device *xe)
} else {
drm_dbg(&xe->drm, "Using future steppings\n");
step.graphics = STEP_FUTURE;
- step.display = STEP_FUTURE;
}
}
diff --git a/drivers/gpu/drm/xe/xe_step_types.h b/drivers/gpu/drm/xe/xe_step_types.h
index ccc9b4795e95..d978cc2512f2 100644
--- a/drivers/gpu/drm/xe/xe_step_types.h
+++ b/drivers/gpu/drm/xe/xe_step_types.h
@@ -11,12 +11,15 @@
struct xe_step_info {
u8 graphics;
u8 media;
- u8 display;
u8 basedie;
};
#define STEP_ENUM_VAL(name) STEP_##name,
+/*
+ * Always define four minor steppings 0-3 for each stepping to match GMD ID
+ * spacing of values. See xe_step_gmdid_get().
+ */
#define STEP_NAME_LIST(func) \
func(A0) \
func(A1) \
@@ -34,7 +37,30 @@ struct xe_step_info {
func(D1) \
func(D2) \
func(D3) \
- func(E0)
+ func(E0) \
+ func(E1) \
+ func(E2) \
+ func(E3) \
+ func(F0) \
+ func(F1) \
+ func(F2) \
+ func(F3) \
+ func(G0) \
+ func(G1) \
+ func(G2) \
+ func(G3) \
+ func(H0) \
+ func(H1) \
+ func(H2) \
+ func(H3) \
+ func(I0) \
+ func(I1) \
+ func(I2) \
+ func(I3) \
+ func(J0) \
+ func(J1) \
+ func(J2) \
+ func(J3)
/*
* Symbolic steppings that do not match the hardware. These are valid both as gt
diff --git a/drivers/gpu/drm/xe/xe_sync.c b/drivers/gpu/drm/xe/xe_sync.c
index 80499681bd58..bb3c2a830362 100644
--- a/drivers/gpu/drm/xe/xe_sync.c
+++ b/drivers/gpu/drm/xe/xe_sync.c
@@ -12,7 +12,7 @@
#include <drm/drm_print.h>
#include <drm/drm_syncobj.h>
-#include <drm/xe_drm.h>
+#include <uapi/drm/xe_drm.h>
#include "xe_device_types.h"
#include "xe_exec_queue.h"
@@ -204,26 +204,11 @@ int xe_sync_entry_parse(struct xe_device *xe, struct xe_file *xef,
return 0;
}
-int xe_sync_entry_wait(struct xe_sync_entry *sync)
-{
- if (sync->fence)
- dma_fence_wait(sync->fence, true);
-
- return 0;
-}
-
int xe_sync_entry_add_deps(struct xe_sync_entry *sync, struct xe_sched_job *job)
{
- int err;
-
- if (sync->fence) {
- err = drm_sched_job_add_dependency(&job->drm,
- dma_fence_get(sync->fence));
- if (err) {
- dma_fence_put(sync->fence);
- return err;
- }
- }
+ if (sync->fence)
+ return drm_sched_job_add_dependency(&job->drm,
+ dma_fence_get(sync->fence));
return 0;
}
@@ -264,10 +249,8 @@ void xe_sync_entry_cleanup(struct xe_sync_entry *sync)
{
if (sync->syncobj)
drm_syncobj_put(sync->syncobj);
- if (sync->fence)
- dma_fence_put(sync->fence);
- if (sync->chain_fence)
- dma_fence_chain_free(sync->chain_fence);
+ dma_fence_put(sync->fence);
+ dma_fence_chain_free(sync->chain_fence);
if (sync->ufence)
user_fence_put(sync->ufence);
}
diff --git a/drivers/gpu/drm/xe/xe_sync.h b/drivers/gpu/drm/xe/xe_sync.h
index 006dbf780793..256ffc1e54dc 100644
--- a/drivers/gpu/drm/xe/xe_sync.h
+++ b/drivers/gpu/drm/xe/xe_sync.h
@@ -22,7 +22,6 @@ int xe_sync_entry_parse(struct xe_device *xe, struct xe_file *xef,
struct xe_sync_entry *sync,
struct drm_xe_sync __user *sync_user,
unsigned int flags);
-int xe_sync_entry_wait(struct xe_sync_entry *sync);
int xe_sync_entry_add_deps(struct xe_sync_entry *sync,
struct xe_sched_job *job);
void xe_sync_entry_signal(struct xe_sync_entry *sync,
diff --git a/drivers/gpu/drm/xe/xe_trace.h b/drivers/gpu/drm/xe/xe_trace.h
index 01837f6f609f..8573d7a87d84 100644
--- a/drivers/gpu/drm/xe/xe_trace.h
+++ b/drivers/gpu/drm/xe/xe_trace.h
@@ -369,6 +369,58 @@ TRACE_EVENT(xe_reg_rw,
(u32)(__entry->val >> 32))
);
+DECLARE_EVENT_CLASS(xe_pm_runtime,
+ TP_PROTO(struct xe_device *xe, void *caller),
+ TP_ARGS(xe, caller),
+
+ TP_STRUCT__entry(
+ __string(dev, __dev_name_xe(xe))
+ __field(void *, caller)
+ ),
+
+ TP_fast_assign(
+ __assign_str(dev);
+ __entry->caller = caller;
+ ),
+
+ TP_printk("dev=%s caller_function=%pS", __get_str(dev), __entry->caller)
+);
+
+DEFINE_EVENT(xe_pm_runtime, xe_pm_runtime_get,
+ TP_PROTO(struct xe_device *xe, void *caller),
+ TP_ARGS(xe, caller)
+);
+
+DEFINE_EVENT(xe_pm_runtime, xe_pm_runtime_put,
+ TP_PROTO(struct xe_device *xe, void *caller),
+ TP_ARGS(xe, caller)
+);
+
+DEFINE_EVENT(xe_pm_runtime, xe_pm_resume,
+ TP_PROTO(struct xe_device *xe, void *caller),
+ TP_ARGS(xe, caller)
+);
+
+DEFINE_EVENT(xe_pm_runtime, xe_pm_suspend,
+ TP_PROTO(struct xe_device *xe, void *caller),
+ TP_ARGS(xe, caller)
+);
+
+DEFINE_EVENT(xe_pm_runtime, xe_pm_runtime_resume,
+ TP_PROTO(struct xe_device *xe, void *caller),
+ TP_ARGS(xe, caller)
+);
+
+DEFINE_EVENT(xe_pm_runtime, xe_pm_runtime_suspend,
+ TP_PROTO(struct xe_device *xe, void *caller),
+ TP_ARGS(xe, caller)
+);
+
+DEFINE_EVENT(xe_pm_runtime, xe_pm_runtime_get_ioctl,
+ TP_PROTO(struct xe_device *xe, void *caller),
+ TP_ARGS(xe, caller)
+);
+
#endif
/* This part must be outside protection */
diff --git a/drivers/gpu/drm/xe/xe_trace_bo.h b/drivers/gpu/drm/xe/xe_trace_bo.h
index f39f09ed3495..9b1a1d4304ae 100644
--- a/drivers/gpu/drm/xe/xe_trace_bo.h
+++ b/drivers/gpu/drm/xe/xe_trace_bo.h
@@ -117,11 +117,6 @@ DEFINE_EVENT(xe_vma, xe_vma_acc,
TP_ARGS(vma)
);
-DEFINE_EVENT(xe_vma, xe_vma_fail,
- TP_PROTO(struct xe_vma *vma),
- TP_ARGS(vma)
-);
-
DEFINE_EVENT(xe_vma, xe_vma_bind,
TP_PROTO(struct xe_vma *vma),
TP_ARGS(vma)
@@ -237,6 +232,11 @@ DEFINE_EVENT(xe_vm, xe_vm_rebind_worker_exit,
TP_ARGS(vm)
);
+DEFINE_EVENT(xe_vm, xe_vm_ops_fail,
+ TP_PROTO(struct xe_vm *vm),
+ TP_ARGS(vm)
+);
+
#endif
/* This part must be outside protection */
diff --git a/drivers/gpu/drm/xe/xe_ttm_stolen_mgr.c b/drivers/gpu/drm/xe/xe_ttm_stolen_mgr.c
index f46fd2df84de..f7113cf6109d 100644
--- a/drivers/gpu/drm/xe/xe_ttm_stolen_mgr.c
+++ b/drivers/gpu/drm/xe/xe_ttm_stolen_mgr.c
@@ -5,7 +5,6 @@
*/
#include <drm/drm_managed.h>
-#include <drm/drm_mm.h>
#include <drm/ttm/ttm_device.h>
#include <drm/ttm/ttm_placement.h>
diff --git a/drivers/gpu/drm/xe/xe_tuning.c b/drivers/gpu/drm/xe/xe_tuning.c
index d4e6fa918942..faa1bf42e50e 100644
--- a/drivers/gpu/drm/xe/xe_tuning.c
+++ b/drivers/gpu/drm/xe/xe_tuning.c
@@ -39,12 +39,23 @@ static const struct xe_rtp_entry_sr gt_tunings[] = {
},
{ XE_RTP_NAME("Tuning: Compression Overfetch"),
XE_RTP_RULES(GRAPHICS_VERSION_RANGE(2001, XE_RTP_END_VERSION_UNDEFINED)),
- XE_RTP_ACTIONS(CLR(CCCHKNREG1, ENCOMPPERFFIX)),
+ XE_RTP_ACTIONS(CLR(CCCHKNREG1, ENCOMPPERFFIX),
+ SET(CCCHKNREG1, L3CMPCTRL))
},
{ XE_RTP_NAME("Tuning: Enable compressible partial write overfetch in L3"),
XE_RTP_RULES(GRAPHICS_VERSION_RANGE(2001, XE_RTP_END_VERSION_UNDEFINED)),
XE_RTP_ACTIONS(SET(L3SQCREG3, COMPPWOVERFETCHEN))
},
+ { XE_RTP_NAME("Tuning: L2 Overfetch Compressible Only"),
+ XE_RTP_RULES(GRAPHICS_VERSION_RANGE(2001, XE_RTP_END_VERSION_UNDEFINED)),
+ XE_RTP_ACTIONS(SET(L3SQCREG2,
+ COMPMEMRD256BOVRFETCHEN))
+ },
+ { XE_RTP_NAME("Tuning: Stateless compression control"),
+ XE_RTP_RULES(GRAPHICS_VERSION_RANGE(2001, XE_RTP_END_VERSION_UNDEFINED)),
+ XE_RTP_ACTIONS(FIELD_SET(STATELESS_COMPRESSION_CTRL, UNIFIED_COMPRESSION_FORMAT,
+ REG_FIELD_PREP(UNIFIED_COMPRESSION_FORMAT, 0)))
+ },
{}
};
@@ -93,6 +104,14 @@ static const struct xe_rtp_entry_sr lrc_tunings[] = {
REG_FIELD_PREP(L3_PWM_TIMER_INIT_VAL_MASK, 0x7f)))
},
+ /* Xe2_HPG */
+
+ { XE_RTP_NAME("Tuning: vs hit max value"),
+ XE_RTP_RULES(GRAPHICS_VERSION(2001), ENGINE_CLASS(RENDER)),
+ XE_RTP_ACTIONS(FIELD_SET(FF_MODE, VS_HIT_MAX_VALUE_MASK,
+ REG_FIELD_PREP(VS_HIT_MAX_VALUE_MASK, 0x3f)))
+ },
+
{}
};
diff --git a/drivers/gpu/drm/xe/xe_uc_debugfs.c b/drivers/gpu/drm/xe/xe_uc_debugfs.c
index 78eb8db73791..24a4209051ee 100644
--- a/drivers/gpu/drm/xe/xe_uc_debugfs.c
+++ b/drivers/gpu/drm/xe/xe_uc_debugfs.c
@@ -8,6 +8,7 @@
#include <drm/drm_debugfs.h>
#include "xe_gt.h"
+#include "xe_gsc_debugfs.h"
#include "xe_guc_debugfs.h"
#include "xe_huc_debugfs.h"
#include "xe_macros.h"
@@ -23,6 +24,7 @@ void xe_uc_debugfs_register(struct xe_uc *uc, struct dentry *parent)
return;
}
+ xe_gsc_debugfs_register(&uc->gsc, root);
xe_guc_debugfs_register(&uc->guc, root);
xe_huc_debugfs_register(&uc->huc, root);
}
diff --git a/drivers/gpu/drm/xe/xe_uc_fw.c b/drivers/gpu/drm/xe/xe_uc_fw.c
index 5f23ecd98376..d431d0031185 100644
--- a/drivers/gpu/drm/xe/xe_uc_fw.c
+++ b/drivers/gpu/drm/xe/xe_uc_fw.c
@@ -15,6 +15,7 @@
#include "xe_gsc.h"
#include "xe_gt.h"
#include "xe_gt_printk.h"
+#include "xe_guc.h"
#include "xe_map.h"
#include "xe_mmio.h"
#include "xe_module.h"
@@ -105,17 +106,20 @@ struct fw_blobs_by_type {
};
#define XE_GUC_FIRMWARE_DEFS(fw_def, mmp_ver, major_ver) \
- fw_def(LUNARLAKE, major_ver(xe, guc, lnl, 70, 19, 2)) \
- fw_def(METEORLAKE, major_ver(i915, guc, mtl, 70, 19, 2)) \
- fw_def(DG2, major_ver(i915, guc, dg2, 70, 19, 2)) \
- fw_def(DG1, major_ver(i915, guc, dg1, 70, 19, 2)) \
- fw_def(ALDERLAKE_N, major_ver(i915, guc, tgl, 70, 19, 2)) \
- fw_def(ALDERLAKE_P, major_ver(i915, guc, adlp, 70, 19, 2)) \
- fw_def(ALDERLAKE_S, major_ver(i915, guc, tgl, 70, 19, 2)) \
- fw_def(ROCKETLAKE, major_ver(i915, guc, tgl, 70, 19, 2)) \
- fw_def(TIGERLAKE, major_ver(i915, guc, tgl, 70, 19, 2))
+ fw_def(BATTLEMAGE, major_ver(xe, guc, bmg, 70, 29, 2)) \
+ fw_def(LUNARLAKE, major_ver(xe, guc, lnl, 70, 29, 2)) \
+ fw_def(METEORLAKE, major_ver(i915, guc, mtl, 70, 29, 2)) \
+ fw_def(DG2, major_ver(i915, guc, dg2, 70, 29, 2)) \
+ fw_def(DG1, major_ver(i915, guc, dg1, 70, 29, 2)) \
+ fw_def(ALDERLAKE_N, major_ver(i915, guc, tgl, 70, 29, 2)) \
+ fw_def(ALDERLAKE_P, major_ver(i915, guc, adlp, 70, 29, 2)) \
+ fw_def(ALDERLAKE_S, major_ver(i915, guc, tgl, 70, 29, 2)) \
+ fw_def(ROCKETLAKE, major_ver(i915, guc, tgl, 70, 29, 2)) \
+ fw_def(TIGERLAKE, major_ver(i915, guc, tgl, 70, 29, 2))
#define XE_HUC_FIRMWARE_DEFS(fw_def, mmp_ver, no_ver) \
+ fw_def(BATTLEMAGE, no_ver(xe, huc, bmg)) \
+ fw_def(LUNARLAKE, no_ver(xe, huc, lnl)) \
fw_def(METEORLAKE, no_ver(i915, huc_gsc, mtl)) \
fw_def(DG1, no_ver(i915, huc, dg1)) \
fw_def(ALDERLAKE_P, no_ver(i915, huc, tgl)) \
@@ -125,7 +129,8 @@ struct fw_blobs_by_type {
/* for the GSC FW we match the compatibility version and not the release one */
#define XE_GSC_FIRMWARE_DEFS(fw_def, major_ver) \
- fw_def(METEORLAKE, major_ver(i915, gsc, mtl, 1, 0, 0))
+ fw_def(LUNARLAKE, major_ver(xe, gsc, lnl, 104, 1, 0)) \
+ fw_def(METEORLAKE, major_ver(i915, gsc, mtl, 102, 1, 0))
#define MAKE_FW_PATH(dir__, uc__, shortname__, version__) \
__stringify(dir__) "/" __stringify(shortname__) "_" __stringify(uc__) version__ ".bin"
@@ -136,6 +141,8 @@ struct fw_blobs_by_type {
MAKE_FW_PATH(dir_, uc_, shortname_, "_" __stringify(a))
#define fw_filename_no_ver(dir_, uc_, shortname_) \
MAKE_FW_PATH(dir_, uc_, shortname_, "")
+#define fw_filename_gsc(dir_, uc_, shortname_, a, b, c) \
+ MAKE_FW_PATH(dir_, uc_, shortname_, "_" __stringify(b))
#define uc_fw_entry_mmp_ver(dir_, uc_, shortname_, a, b, c) \
{ fw_filename_mmp_ver(dir_, uc_, shortname_, a, b, c), \
@@ -146,6 +153,9 @@ struct fw_blobs_by_type {
#define uc_fw_entry_no_ver(dir_, uc_, shortname_) \
{ fw_filename_no_ver(dir_, uc_, shortname_), \
0, 0 }
+#define uc_fw_entry_gsc(dir_, uc_, shortname_, a, b, c) \
+ { fw_filename_gsc(dir_, uc_, shortname_, a, b, c), \
+ a, b, c }
/* All blobs need to be declared via MODULE_FIRMWARE() */
#define XE_UC_MODULE_FIRMWARE(platform__, fw_filename) \
@@ -161,7 +171,7 @@ XE_GUC_FIRMWARE_DEFS(XE_UC_MODULE_FIRMWARE,
fw_filename_mmp_ver, fw_filename_major_ver)
XE_HUC_FIRMWARE_DEFS(XE_UC_MODULE_FIRMWARE,
fw_filename_mmp_ver, fw_filename_no_ver)
-XE_GSC_FIRMWARE_DEFS(XE_UC_MODULE_FIRMWARE, fw_filename_major_ver)
+XE_GSC_FIRMWARE_DEFS(XE_UC_MODULE_FIRMWARE, fw_filename_gsc)
static struct xe_gt *
__uc_fw_to_gt(struct xe_uc_fw *uc_fw, enum xe_uc_fw_type type)
@@ -204,7 +214,7 @@ uc_fw_auto_select(struct xe_device *xe, struct xe_uc_fw *uc_fw)
uc_fw_entry_no_ver)
};
static const struct uc_fw_entry entries_gsc[] = {
- XE_GSC_FIRMWARE_DEFS(XE_UC_FW_ENTRY, uc_fw_entry_major_ver)
+ XE_GSC_FIRMWARE_DEFS(XE_UC_FW_ENTRY, uc_fw_entry_gsc)
};
static const struct fw_blobs_by_type blobs_all[XE_UC_FW_NUM_TYPES] = {
[XE_UC_FW_TYPE_GUC] = { entries_guc, ARRAY_SIZE(entries_guc) },
@@ -306,10 +316,10 @@ static int guc_read_css_info(struct xe_uc_fw *uc_fw, struct uc_css_header *css)
xe_gt_assert(gt, uc_fw->type == XE_UC_FW_TYPE_GUC);
- /* We don't support GuC releases older than 70.19 */
- if (release->major < 70 || (release->major == 70 && release->minor < 19)) {
- xe_gt_err(gt, "Unsupported GuC v%u.%u! v70.19 or newer is required\n",
- release->major, release->minor);
+ /* We don't support GuC releases older than 70.29.2 */
+ if (MAKE_GUC_VER_STRUCT(*release) < MAKE_GUC_VER(70, 29, 2)) {
+ xe_gt_err(gt, "Unsupported GuC v%u.%u.%u! v70.29.2 or newer is required\n",
+ release->major, release->minor, release->patch);
return -EINVAL;
}
diff --git a/drivers/gpu/drm/xe/xe_vm.c b/drivers/gpu/drm/xe/xe_vm.c
index 50e8fc49ba6c..7acd5fc9d032 100644
--- a/drivers/gpu/drm/xe/xe_vm.c
+++ b/drivers/gpu/drm/xe/xe_vm.c
@@ -12,7 +12,7 @@
#include <drm/drm_print.h>
#include <drm/ttm/ttm_execbuf_util.h>
#include <drm/ttm/ttm_tt.h>
-#include <drm/xe_drm.h>
+#include <uapi/drm/xe_drm.h>
#include <linux/ascii85.h>
#include <linux/delay.h>
#include <linux/kthread.h>
@@ -133,8 +133,10 @@ static int wait_for_existing_preempt_fences(struct xe_vm *vm)
if (q->lr.pfence) {
long timeout = dma_fence_wait(q->lr.pfence, false);
- if (timeout < 0)
+ /* Only -ETIME on fence indicates VM needs to be killed */
+ if (timeout < 0 || q->lr.pfence->error == -ETIME)
return -ETIME;
+
dma_fence_put(q->lr.pfence);
q->lr.pfence = NULL;
}
@@ -273,6 +275,8 @@ out_up_write:
* xe_vm_remove_compute_exec_queue() - Remove compute exec queue from VM
* @vm: The VM.
* @q: The exec_queue
+ *
+ * Note that this function might be called multiple times on the same queue.
*/
void xe_vm_remove_compute_exec_queue(struct xe_vm *vm, struct xe_exec_queue *q)
{
@@ -280,8 +284,10 @@ void xe_vm_remove_compute_exec_queue(struct xe_vm *vm, struct xe_exec_queue *q)
return;
down_write(&vm->lock);
- list_del(&q->lr.link);
- --vm->preempt.num_exec_queues;
+ if (!list_empty(&q->lr.link)) {
+ list_del_init(&q->lr.link);
+ --vm->preempt.num_exec_queues;
+ }
if (q->lr.pfence) {
dma_fence_enable_sw_signaling(q->lr.pfence);
dma_fence_put(q->lr.pfence);
@@ -311,7 +317,15 @@ int __xe_vm_userptr_needs_repin(struct xe_vm *vm)
#define XE_VM_REBIND_RETRY_TIMEOUT_MS 1000
-static void xe_vm_kill(struct xe_vm *vm, bool unlocked)
+/**
+ * xe_vm_kill() - VM Kill
+ * @vm: The VM.
+ * @unlocked: Flag indicates the VM's dma-resv is not held
+ *
+ * Kill the VM by setting banned flag indicated VM is no longer available for
+ * use. If in preempt fence mode, also kill all exec queue attached to the VM.
+ */
+void xe_vm_kill(struct xe_vm *vm, bool unlocked)
{
struct xe_exec_queue *q;
@@ -708,6 +722,42 @@ int xe_vm_userptr_check_repin(struct xe_vm *vm)
list_empty_careful(&vm->userptr.invalidated)) ? 0 : -EAGAIN;
}
+static int xe_vma_ops_alloc(struct xe_vma_ops *vops, bool array_of_binds)
+{
+ int i;
+
+ for (i = 0; i < XE_MAX_TILES_PER_DEVICE; ++i) {
+ if (!vops->pt_update_ops[i].num_ops)
+ continue;
+
+ vops->pt_update_ops[i].ops =
+ kmalloc_array(vops->pt_update_ops[i].num_ops,
+ sizeof(*vops->pt_update_ops[i].ops),
+ GFP_KERNEL);
+ if (!vops->pt_update_ops[i].ops)
+ return array_of_binds ? -ENOBUFS : -ENOMEM;
+ }
+
+ return 0;
+}
+
+static void xe_vma_ops_fini(struct xe_vma_ops *vops)
+{
+ int i;
+
+ for (i = 0; i < XE_MAX_TILES_PER_DEVICE; ++i)
+ kfree(vops->pt_update_ops[i].ops);
+}
+
+static void xe_vma_ops_incr_pt_update_ops(struct xe_vma_ops *vops, u8 tile_mask)
+{
+ int i;
+
+ for (i = 0; i < XE_MAX_TILES_PER_DEVICE; ++i)
+ if (BIT(i) & tile_mask)
+ ++vops->pt_update_ops[i].num_ops;
+}
+
static void xe_vm_populate_rebind(struct xe_vma_op *op, struct xe_vma *vma,
u8 tile_mask)
{
@@ -735,6 +785,7 @@ static int xe_vm_ops_add_rebind(struct xe_vma_ops *vops, struct xe_vma *vma,
xe_vm_populate_rebind(op, vma, tile_mask);
list_add_tail(&op->link, &vops->list);
+ xe_vma_ops_incr_pt_update_ops(vops, tile_mask);
return 0;
}
@@ -751,7 +802,7 @@ int xe_vm_rebind(struct xe_vm *vm, bool rebind_worker)
struct xe_vma *vma, *next;
struct xe_vma_ops vops;
struct xe_vma_op *op, *next_op;
- int err;
+ int err, i;
lockdep_assert_held(&vm->lock);
if ((xe_vm_in_lr_mode(vm) && !rebind_worker) ||
@@ -759,6 +810,8 @@ int xe_vm_rebind(struct xe_vm *vm, bool rebind_worker)
return 0;
xe_vma_ops_init(&vops, vm, NULL, NULL, 0);
+ for (i = 0; i < XE_MAX_TILES_PER_DEVICE; ++i)
+ vops.pt_update_ops[i].wait_vm_bookkeep = true;
xe_vm_assert_held(vm);
list_for_each_entry(vma, &vm->rebind_list, combined_links.rebind) {
@@ -775,6 +828,10 @@ int xe_vm_rebind(struct xe_vm *vm, bool rebind_worker)
goto free_ops;
}
+ err = xe_vma_ops_alloc(&vops, false);
+ if (err)
+ goto free_ops;
+
fence = ops_execute(vm, &vops);
if (IS_ERR(fence)) {
err = PTR_ERR(fence);
@@ -789,6 +846,7 @@ free_ops:
list_del(&op->link);
kfree(op);
}
+ xe_vma_ops_fini(&vops);
return err;
}
@@ -798,6 +856,8 @@ struct dma_fence *xe_vma_rebind(struct xe_vm *vm, struct xe_vma *vma, u8 tile_ma
struct dma_fence *fence = NULL;
struct xe_vma_ops vops;
struct xe_vma_op *op, *next_op;
+ struct xe_tile *tile;
+ u8 id;
int err;
lockdep_assert_held(&vm->lock);
@@ -805,17 +865,30 @@ struct dma_fence *xe_vma_rebind(struct xe_vm *vm, struct xe_vma *vma, u8 tile_ma
xe_assert(vm->xe, xe_vm_in_fault_mode(vm));
xe_vma_ops_init(&vops, vm, NULL, NULL, 0);
+ for_each_tile(tile, vm->xe, id) {
+ vops.pt_update_ops[id].wait_vm_bookkeep = true;
+ vops.pt_update_ops[tile->id].q =
+ xe_tile_migrate_exec_queue(tile);
+ }
err = xe_vm_ops_add_rebind(&vops, vma, tile_mask);
if (err)
return ERR_PTR(err);
+ err = xe_vma_ops_alloc(&vops, false);
+ if (err) {
+ fence = ERR_PTR(err);
+ goto free_ops;
+ }
+
fence = ops_execute(vm, &vops);
+free_ops:
list_for_each_entry_safe(op, next_op, &vops.list, link) {
list_del(&op->link);
kfree(op);
}
+ xe_vma_ops_fini(&vops);
return fence;
}
@@ -1122,7 +1195,7 @@ static const struct drm_gpuvm_ops gpuvm_ops = {
.vm_free = xe_vm_free,
};
-static u64 pde_encode_pat_index(struct xe_device *xe, u16 pat_index)
+static u64 pde_encode_pat_index(u16 pat_index)
{
u64 pte = 0;
@@ -1135,8 +1208,7 @@ static u64 pde_encode_pat_index(struct xe_device *xe, u16 pat_index)
return pte;
}
-static u64 pte_encode_pat_index(struct xe_device *xe, u16 pat_index,
- u32 pt_level)
+static u64 pte_encode_pat_index(u16 pat_index, u32 pt_level)
{
u64 pte = 0;
@@ -1177,12 +1249,11 @@ static u64 pte_encode_ps(u32 pt_level)
static u64 xelp_pde_encode_bo(struct xe_bo *bo, u64 bo_offset,
const u16 pat_index)
{
- struct xe_device *xe = xe_bo_device(bo);
u64 pde;
pde = xe_bo_addr(bo, bo_offset, XE_PAGE_SIZE);
pde |= XE_PAGE_PRESENT | XE_PAGE_RW;
- pde |= pde_encode_pat_index(xe, pat_index);
+ pde |= pde_encode_pat_index(pat_index);
return pde;
}
@@ -1190,12 +1261,11 @@ static u64 xelp_pde_encode_bo(struct xe_bo *bo, u64 bo_offset,
static u64 xelp_pte_encode_bo(struct xe_bo *bo, u64 bo_offset,
u16 pat_index, u32 pt_level)
{
- struct xe_device *xe = xe_bo_device(bo);
u64 pte;
pte = xe_bo_addr(bo, bo_offset, XE_PAGE_SIZE);
pte |= XE_PAGE_PRESENT | XE_PAGE_RW;
- pte |= pte_encode_pat_index(xe, pat_index, pt_level);
+ pte |= pte_encode_pat_index(pat_index, pt_level);
pte |= pte_encode_ps(pt_level);
if (xe_bo_is_vram(bo) || xe_bo_is_stolen_devmem(bo))
@@ -1207,14 +1277,12 @@ static u64 xelp_pte_encode_bo(struct xe_bo *bo, u64 bo_offset,
static u64 xelp_pte_encode_vma(u64 pte, struct xe_vma *vma,
u16 pat_index, u32 pt_level)
{
- struct xe_device *xe = xe_vma_vm(vma)->xe;
-
pte |= XE_PAGE_PRESENT;
if (likely(!xe_vma_read_only(vma)))
pte |= XE_PAGE_RW;
- pte |= pte_encode_pat_index(xe, pat_index, pt_level);
+ pte |= pte_encode_pat_index(pat_index, pt_level);
pte |= pte_encode_ps(pt_level);
if (unlikely(xe_vma_is_null(vma)))
@@ -1234,7 +1302,7 @@ static u64 xelp_pte_encode_addr(struct xe_device *xe, u64 addr,
pte = addr;
pte |= XE_PAGE_PRESENT | XE_PAGE_RW;
- pte |= pte_encode_pat_index(xe, pat_index, pt_level);
+ pte |= pte_encode_pat_index(pat_index, pt_level);
pte |= pte_encode_ps(pt_level);
if (devmem)
@@ -1333,6 +1401,8 @@ struct xe_vm *xe_vm_create(struct xe_device *xe, u32 flags)
init_rwsem(&vm->userptr.notifier_lock);
spin_lock_init(&vm->userptr.invalidated_lock);
+ ttm_lru_bulk_move_init(&vm->lru_bulk_move);
+
INIT_WORK(&vm->destroy_work, vm_destroy_work_func);
INIT_LIST_HEAD(&vm->preempt.exec_queues);
@@ -1412,19 +1482,13 @@ struct xe_vm *xe_vm_create(struct xe_device *xe, u32 flags)
/* Kernel migration VM shouldn't have a circular loop.. */
if (!(flags & XE_VM_FLAG_MIGRATION)) {
for_each_tile(tile, xe, id) {
- struct xe_gt *gt = tile->primary_gt;
- struct xe_vm *migrate_vm;
struct xe_exec_queue *q;
u32 create_flags = EXEC_QUEUE_FLAG_VM;
if (!vm->pt_root[id])
continue;
- migrate_vm = xe_migrate_get_vm(tile->migrate);
- q = xe_exec_queue_create_class(xe, gt, migrate_vm,
- XE_ENGINE_CLASS_COPY,
- create_flags);
- xe_vm_put(migrate_vm);
+ q = xe_exec_queue_create_bind(xe, tile, create_flags, 0);
if (IS_ERR(q)) {
err = PTR_ERR(q);
goto err_close;
@@ -1437,13 +1501,6 @@ struct xe_vm *xe_vm_create(struct xe_device *xe, u32 flags)
if (number_tiles > 1)
vm->composite_fence_ctx = dma_fence_context_alloc(1);
- mutex_lock(&xe->usm.lock);
- if (flags & XE_VM_FLAG_FAULT_MODE)
- xe->usm.num_vm_in_fault_mode++;
- else if (!(flags & XE_VM_FLAG_MIGRATION))
- xe->usm.num_vm_in_non_fault_mode++;
- mutex_unlock(&xe->usm.lock);
-
trace_xe_vm_create(vm);
return vm;
@@ -1458,6 +1515,7 @@ err_no_resv:
mutex_destroy(&vm->snap_mutex);
for_each_tile(tile, xe, id)
xe_range_fence_tree_fini(&vm->rftree[id]);
+ ttm_lru_bulk_move_fini(&xe->ttm, &vm->lru_bulk_move);
kfree(vm);
if (flags & XE_VM_FLAG_LR_MODE)
xe_pm_runtime_put(xe);
@@ -1556,11 +1614,6 @@ void xe_vm_close_and_put(struct xe_vm *vm)
up_write(&vm->lock);
mutex_lock(&xe->usm.lock);
- if (vm->flags & XE_VM_FLAG_FAULT_MODE)
- xe->usm.num_vm_in_fault_mode--;
- else if (!(vm->flags & XE_VM_FLAG_MIGRATION))
- xe->usm.num_vm_in_non_fault_mode--;
-
if (vm->usm.asid) {
void *lookup;
@@ -1602,6 +1655,8 @@ static void vm_destroy_work_func(struct work_struct *w)
trace_xe_vm_free(vm);
+ ttm_lru_bulk_move_fini(&xe->ttm, &vm->lru_bulk_move);
+
if (vm->xef)
xe_file_put(vm->xef);
@@ -1641,147 +1696,6 @@ to_wait_exec_queue(struct xe_vm *vm, struct xe_exec_queue *q)
return q ? q : vm->q[0];
}
-static struct dma_fence *
-xe_vm_unbind_vma(struct xe_vma *vma, struct xe_exec_queue *q,
- struct xe_sync_entry *syncs, u32 num_syncs,
- bool first_op, bool last_op)
-{
- struct xe_vm *vm = xe_vma_vm(vma);
- struct xe_exec_queue *wait_exec_queue = to_wait_exec_queue(vm, q);
- struct xe_tile *tile;
- struct dma_fence *fence = NULL;
- struct dma_fence **fences = NULL;
- struct dma_fence_array *cf = NULL;
- int cur_fence = 0;
- int number_tiles = hweight8(vma->tile_present);
- int err;
- u8 id;
-
- trace_xe_vma_unbind(vma);
-
- if (number_tiles > 1) {
- fences = kmalloc_array(number_tiles, sizeof(*fences),
- GFP_KERNEL);
- if (!fences)
- return ERR_PTR(-ENOMEM);
- }
-
- for_each_tile(tile, vm->xe, id) {
- if (!(vma->tile_present & BIT(id)))
- goto next;
-
- fence = __xe_pt_unbind_vma(tile, vma, q ? q : vm->q[id],
- first_op ? syncs : NULL,
- first_op ? num_syncs : 0);
- if (IS_ERR(fence)) {
- err = PTR_ERR(fence);
- goto err_fences;
- }
-
- if (fences)
- fences[cur_fence++] = fence;
-
-next:
- if (q && vm->pt_root[id] && !list_empty(&q->multi_gt_list))
- q = list_next_entry(q, multi_gt_list);
- }
-
- if (fences) {
- cf = dma_fence_array_create(number_tiles, fences,
- vm->composite_fence_ctx,
- vm->composite_fence_seqno++,
- false);
- if (!cf) {
- --vm->composite_fence_seqno;
- err = -ENOMEM;
- goto err_fences;
- }
- }
-
- fence = cf ? &cf->base : !fence ?
- xe_exec_queue_last_fence_get(wait_exec_queue, vm) : fence;
-
- return fence;
-
-err_fences:
- if (fences) {
- while (cur_fence)
- dma_fence_put(fences[--cur_fence]);
- kfree(fences);
- }
-
- return ERR_PTR(err);
-}
-
-static struct dma_fence *
-xe_vm_bind_vma(struct xe_vma *vma, struct xe_exec_queue *q,
- struct xe_sync_entry *syncs, u32 num_syncs,
- u8 tile_mask, bool first_op, bool last_op)
-{
- struct xe_tile *tile;
- struct dma_fence *fence;
- struct dma_fence **fences = NULL;
- struct dma_fence_array *cf = NULL;
- struct xe_vm *vm = xe_vma_vm(vma);
- int cur_fence = 0;
- int number_tiles = hweight8(tile_mask);
- int err;
- u8 id;
-
- trace_xe_vma_bind(vma);
-
- if (number_tiles > 1) {
- fences = kmalloc_array(number_tiles, sizeof(*fences),
- GFP_KERNEL);
- if (!fences)
- return ERR_PTR(-ENOMEM);
- }
-
- for_each_tile(tile, vm->xe, id) {
- if (!(tile_mask & BIT(id)))
- goto next;
-
- fence = __xe_pt_bind_vma(tile, vma, q ? q : vm->q[id],
- first_op ? syncs : NULL,
- first_op ? num_syncs : 0,
- vma->tile_present & BIT(id));
- if (IS_ERR(fence)) {
- err = PTR_ERR(fence);
- goto err_fences;
- }
-
- if (fences)
- fences[cur_fence++] = fence;
-
-next:
- if (q && vm->pt_root[id] && !list_empty(&q->multi_gt_list))
- q = list_next_entry(q, multi_gt_list);
- }
-
- if (fences) {
- cf = dma_fence_array_create(number_tiles, fences,
- vm->composite_fence_ctx,
- vm->composite_fence_seqno++,
- false);
- if (!cf) {
- --vm->composite_fence_seqno;
- err = -ENOMEM;
- goto err_fences;
- }
- }
-
- return cf ? &cf->base : fence;
-
-err_fences:
- if (fences) {
- while (cur_fence)
- dma_fence_put(fences[--cur_fence]);
- kfree(fences);
- }
-
- return ERR_PTR(err);
-}
-
static struct xe_user_fence *
find_ufence_get(struct xe_sync_entry *syncs, u32 num_syncs)
{
@@ -1797,48 +1711,6 @@ find_ufence_get(struct xe_sync_entry *syncs, u32 num_syncs)
return NULL;
}
-static struct dma_fence *
-xe_vm_bind(struct xe_vm *vm, struct xe_vma *vma, struct xe_exec_queue *q,
- struct xe_bo *bo, struct xe_sync_entry *syncs, u32 num_syncs,
- u8 tile_mask, bool immediate, bool first_op, bool last_op)
-{
- struct dma_fence *fence;
- struct xe_exec_queue *wait_exec_queue = to_wait_exec_queue(vm, q);
-
- xe_vm_assert_held(vm);
- xe_bo_assert_held(bo);
-
- if (immediate) {
- fence = xe_vm_bind_vma(vma, q, syncs, num_syncs, tile_mask,
- first_op, last_op);
- if (IS_ERR(fence))
- return fence;
- } else {
- xe_assert(vm->xe, xe_vm_in_fault_mode(vm));
-
- fence = xe_exec_queue_last_fence_get(wait_exec_queue, vm);
- }
-
- return fence;
-}
-
-static struct dma_fence *
-xe_vm_unbind(struct xe_vm *vm, struct xe_vma *vma,
- struct xe_exec_queue *q, struct xe_sync_entry *syncs,
- u32 num_syncs, bool first_op, bool last_op)
-{
- struct dma_fence *fence;
-
- xe_vm_assert_held(vm);
- xe_bo_assert_held(xe_vma_bo(vma));
-
- fence = xe_vm_unbind_vma(vma, q, syncs, num_syncs, first_op, last_op);
- if (IS_ERR(fence))
- return fence;
-
- return fence;
-}
-
#define ALL_DRM_XE_VM_CREATE_FLAGS (DRM_XE_VM_CREATE_FLAG_SCRATCH_PAGE | \
DRM_XE_VM_CREATE_FLAG_LR_MODE | \
DRM_XE_VM_CREATE_FLAG_FAULT_MODE)
@@ -1879,14 +1751,6 @@ int xe_vm_create_ioctl(struct drm_device *dev, void *data,
args->flags & DRM_XE_VM_CREATE_FLAG_FAULT_MODE))
return -EINVAL;
- if (XE_IOCTL_DBG(xe, args->flags & DRM_XE_VM_CREATE_FLAG_FAULT_MODE &&
- xe_device_in_non_fault_mode(xe)))
- return -EINVAL;
-
- if (XE_IOCTL_DBG(xe, !(args->flags & DRM_XE_VM_CREATE_FLAG_FAULT_MODE) &&
- xe_device_in_fault_mode(xe)))
- return -EINVAL;
-
if (XE_IOCTL_DBG(xe, args->extensions))
return -EINVAL;
@@ -1979,21 +1843,6 @@ static const u32 region_to_mem_type[] = {
XE_PL_VRAM1,
};
-static struct dma_fence *
-xe_vm_prefetch(struct xe_vm *vm, struct xe_vma *vma,
- struct xe_exec_queue *q, struct xe_sync_entry *syncs,
- u32 num_syncs, bool first_op, bool last_op)
-{
- struct xe_exec_queue *wait_exec_queue = to_wait_exec_queue(vm, q);
-
- if (vma->tile_mask != (vma->tile_present & ~vma->tile_invalidated)) {
- return xe_vm_bind(vm, vma, q, xe_vma_bo(vma), syncs, num_syncs,
- vma->tile_mask, true, first_op, last_op);
- } else {
- return xe_exec_queue_last_fence_get(wait_exec_queue, vm);
- }
-}
-
static void prep_vma_destroy(struct xe_vm *vm, struct xe_vma *vma,
bool post_commit)
{
@@ -2281,14 +2130,10 @@ static int xe_vma_op_commit(struct xe_vm *vm, struct xe_vma_op *op)
return err;
}
-
-static int vm_bind_ioctl_ops_parse(struct xe_vm *vm, struct xe_exec_queue *q,
- struct drm_gpuva_ops *ops,
- struct xe_sync_entry *syncs, u32 num_syncs,
- struct xe_vma_ops *vops, bool last)
+static int vm_bind_ioctl_ops_parse(struct xe_vm *vm, struct drm_gpuva_ops *ops,
+ struct xe_vma_ops *vops)
{
struct xe_device *xe = vm->xe;
- struct xe_vma_op *last_op = NULL;
struct drm_gpuva_op *__op;
struct xe_tile *tile;
u8 id, tile_mask = 0;
@@ -2302,19 +2147,10 @@ static int vm_bind_ioctl_ops_parse(struct xe_vm *vm, struct xe_exec_queue *q,
drm_gpuva_for_each_op(__op, ops) {
struct xe_vma_op *op = gpuva_op_to_vma_op(__op);
struct xe_vma *vma;
- bool first = list_empty(&vops->list);
unsigned int flags = 0;
INIT_LIST_HEAD(&op->link);
list_add_tail(&op->link, &vops->list);
-
- if (first) {
- op->flags |= XE_VMA_OP_FIRST;
- op->num_syncs = num_syncs;
- op->syncs = syncs;
- }
-
- op->q = q;
op->tile_mask = tile_mask;
switch (op->base.op) {
@@ -2333,6 +2169,9 @@ static int vm_bind_ioctl_ops_parse(struct xe_vm *vm, struct xe_exec_queue *q,
return PTR_ERR(vma);
op->map.vma = vma;
+ if (op->map.immediate || !xe_vm_in_fault_mode(vm))
+ xe_vma_ops_incr_pt_update_ops(vops,
+ op->tile_mask);
break;
}
case DRM_GPUVA_OP_REMAP:
@@ -2377,6 +2216,8 @@ static int vm_bind_ioctl_ops_parse(struct xe_vm *vm, struct xe_exec_queue *q,
vm_dbg(&xe->drm, "REMAP:SKIP_PREV: addr=0x%016llx, range=0x%016llx",
(ULL)op->remap.start,
(ULL)op->remap.range);
+ } else {
+ xe_vma_ops_incr_pt_update_ops(vops, op->tile_mask);
}
}
@@ -2413,203 +2254,30 @@ static int vm_bind_ioctl_ops_parse(struct xe_vm *vm, struct xe_exec_queue *q,
vm_dbg(&xe->drm, "REMAP:SKIP_NEXT: addr=0x%016llx, range=0x%016llx",
(ULL)op->remap.start,
(ULL)op->remap.range);
+ } else {
+ xe_vma_ops_incr_pt_update_ops(vops, op->tile_mask);
}
}
+ xe_vma_ops_incr_pt_update_ops(vops, op->tile_mask);
break;
}
case DRM_GPUVA_OP_UNMAP:
case DRM_GPUVA_OP_PREFETCH:
- /* Nothing to do */
+ /* FIXME: Need to skip some prefetch ops */
+ xe_vma_ops_incr_pt_update_ops(vops, op->tile_mask);
break;
default:
drm_warn(&vm->xe->drm, "NOT POSSIBLE");
}
- last_op = op;
-
err = xe_vma_op_commit(vm, op);
if (err)
return err;
}
- /* FIXME: Unhandled corner case */
- XE_WARN_ON(!last_op && last && !list_empty(&vops->list));
-
- if (!last_op)
- return 0;
-
- if (last) {
- last_op->flags |= XE_VMA_OP_LAST;
- last_op->num_syncs = num_syncs;
- last_op->syncs = syncs;
- }
-
return 0;
}
-static struct dma_fence *op_execute(struct xe_vm *vm, struct xe_vma *vma,
- struct xe_vma_op *op)
-{
- struct dma_fence *fence = NULL;
-
- lockdep_assert_held(&vm->lock);
-
- xe_vm_assert_held(vm);
- xe_bo_assert_held(xe_vma_bo(vma));
-
- switch (op->base.op) {
- case DRM_GPUVA_OP_MAP:
- fence = xe_vm_bind(vm, vma, op->q, xe_vma_bo(vma),
- op->syncs, op->num_syncs,
- op->tile_mask,
- op->map.immediate || !xe_vm_in_fault_mode(vm),
- op->flags & XE_VMA_OP_FIRST,
- op->flags & XE_VMA_OP_LAST);
- break;
- case DRM_GPUVA_OP_REMAP:
- {
- bool prev = !!op->remap.prev;
- bool next = !!op->remap.next;
-
- if (!op->remap.unmap_done) {
- if (prev || next)
- vma->gpuva.flags |= XE_VMA_FIRST_REBIND;
- fence = xe_vm_unbind(vm, vma, op->q, op->syncs,
- op->num_syncs,
- op->flags & XE_VMA_OP_FIRST,
- op->flags & XE_VMA_OP_LAST &&
- !prev && !next);
- if (IS_ERR(fence))
- break;
- op->remap.unmap_done = true;
- }
-
- if (prev) {
- op->remap.prev->gpuva.flags |= XE_VMA_LAST_REBIND;
- dma_fence_put(fence);
- fence = xe_vm_bind(vm, op->remap.prev, op->q,
- xe_vma_bo(op->remap.prev), op->syncs,
- op->num_syncs,
- op->remap.prev->tile_mask, true,
- false,
- op->flags & XE_VMA_OP_LAST && !next);
- op->remap.prev->gpuva.flags &= ~XE_VMA_LAST_REBIND;
- if (IS_ERR(fence))
- break;
- op->remap.prev = NULL;
- }
-
- if (next) {
- op->remap.next->gpuva.flags |= XE_VMA_LAST_REBIND;
- dma_fence_put(fence);
- fence = xe_vm_bind(vm, op->remap.next, op->q,
- xe_vma_bo(op->remap.next),
- op->syncs, op->num_syncs,
- op->remap.next->tile_mask, true,
- false, op->flags & XE_VMA_OP_LAST);
- op->remap.next->gpuva.flags &= ~XE_VMA_LAST_REBIND;
- if (IS_ERR(fence))
- break;
- op->remap.next = NULL;
- }
-
- break;
- }
- case DRM_GPUVA_OP_UNMAP:
- fence = xe_vm_unbind(vm, vma, op->q, op->syncs,
- op->num_syncs, op->flags & XE_VMA_OP_FIRST,
- op->flags & XE_VMA_OP_LAST);
- break;
- case DRM_GPUVA_OP_PREFETCH:
- fence = xe_vm_prefetch(vm, vma, op->q, op->syncs, op->num_syncs,
- op->flags & XE_VMA_OP_FIRST,
- op->flags & XE_VMA_OP_LAST);
- break;
- default:
- drm_warn(&vm->xe->drm, "NOT POSSIBLE");
- }
-
- if (IS_ERR(fence))
- trace_xe_vma_fail(vma);
-
- return fence;
-}
-
-static struct dma_fence *
-__xe_vma_op_execute(struct xe_vm *vm, struct xe_vma *vma,
- struct xe_vma_op *op)
-{
- struct dma_fence *fence;
- int err;
-
-retry_userptr:
- fence = op_execute(vm, vma, op);
- if (IS_ERR(fence) && PTR_ERR(fence) == -EAGAIN) {
- lockdep_assert_held_write(&vm->lock);
-
- if (op->base.op == DRM_GPUVA_OP_REMAP) {
- if (!op->remap.unmap_done)
- vma = gpuva_to_vma(op->base.remap.unmap->va);
- else if (op->remap.prev)
- vma = op->remap.prev;
- else
- vma = op->remap.next;
- }
-
- if (xe_vma_is_userptr(vma)) {
- err = xe_vma_userptr_pin_pages(to_userptr_vma(vma));
- if (!err)
- goto retry_userptr;
-
- fence = ERR_PTR(err);
- trace_xe_vma_fail(vma);
- }
- }
-
- return fence;
-}
-
-static struct dma_fence *
-xe_vma_op_execute(struct xe_vm *vm, struct xe_vma_op *op)
-{
- struct dma_fence *fence = ERR_PTR(-ENOMEM);
-
- lockdep_assert_held(&vm->lock);
-
- switch (op->base.op) {
- case DRM_GPUVA_OP_MAP:
- fence = __xe_vma_op_execute(vm, op->map.vma, op);
- break;
- case DRM_GPUVA_OP_REMAP:
- {
- struct xe_vma *vma;
-
- if (!op->remap.unmap_done)
- vma = gpuva_to_vma(op->base.remap.unmap->va);
- else if (op->remap.prev)
- vma = op->remap.prev;
- else
- vma = op->remap.next;
-
- fence = __xe_vma_op_execute(vm, vma, op);
- break;
- }
- case DRM_GPUVA_OP_UNMAP:
- fence = __xe_vma_op_execute(vm, gpuva_to_vma(op->base.unmap.va),
- op);
- break;
- case DRM_GPUVA_OP_PREFETCH:
- fence = __xe_vma_op_execute(vm,
- gpuva_to_vma(op->base.prefetch.va),
- op);
- break;
- default:
- drm_warn(&vm->xe->drm, "NOT POSSIBLE");
- }
-
- return fence;
-}
-
static void xe_vma_op_unwind(struct xe_vm *vm, struct xe_vma_op *op,
bool post_commit, bool prev_post_commit,
bool next_post_commit)
@@ -2792,26 +2460,157 @@ static int vm_bind_ioctl_ops_lock_and_prep(struct drm_exec *exec,
return err;
}
+#ifdef TEST_VM_OPS_ERROR
+ if (vops->inject_error &&
+ vm->xe->vm_inject_error_position == FORCE_OP_ERROR_LOCK)
+ return -ENOSPC;
+#endif
+
return 0;
}
+static void op_trace(struct xe_vma_op *op)
+{
+ switch (op->base.op) {
+ case DRM_GPUVA_OP_MAP:
+ trace_xe_vma_bind(op->map.vma);
+ break;
+ case DRM_GPUVA_OP_REMAP:
+ trace_xe_vma_unbind(gpuva_to_vma(op->base.remap.unmap->va));
+ if (op->remap.prev)
+ trace_xe_vma_bind(op->remap.prev);
+ if (op->remap.next)
+ trace_xe_vma_bind(op->remap.next);
+ break;
+ case DRM_GPUVA_OP_UNMAP:
+ trace_xe_vma_unbind(gpuva_to_vma(op->base.unmap.va));
+ break;
+ case DRM_GPUVA_OP_PREFETCH:
+ trace_xe_vma_bind(gpuva_to_vma(op->base.prefetch.va));
+ break;
+ default:
+ XE_WARN_ON("NOT POSSIBLE");
+ }
+}
+
+static void trace_xe_vm_ops_execute(struct xe_vma_ops *vops)
+{
+ struct xe_vma_op *op;
+
+ list_for_each_entry(op, &vops->list, link)
+ op_trace(op);
+}
+
+static int vm_ops_setup_tile_args(struct xe_vm *vm, struct xe_vma_ops *vops)
+{
+ struct xe_exec_queue *q = vops->q;
+ struct xe_tile *tile;
+ int number_tiles = 0;
+ u8 id;
+
+ for_each_tile(tile, vm->xe, id) {
+ if (vops->pt_update_ops[id].num_ops)
+ ++number_tiles;
+
+ if (vops->pt_update_ops[id].q)
+ continue;
+
+ if (q) {
+ vops->pt_update_ops[id].q = q;
+ if (vm->pt_root[id] && !list_empty(&q->multi_gt_list))
+ q = list_next_entry(q, multi_gt_list);
+ } else {
+ vops->pt_update_ops[id].q = vm->q[id];
+ }
+ }
+
+ return number_tiles;
+}
+
static struct dma_fence *ops_execute(struct xe_vm *vm,
struct xe_vma_ops *vops)
{
- struct xe_vma_op *op, *next;
+ struct xe_tile *tile;
struct dma_fence *fence = NULL;
+ struct dma_fence **fences = NULL;
+ struct dma_fence_array *cf = NULL;
+ int number_tiles = 0, current_fence = 0, err;
+ u8 id;
- list_for_each_entry_safe(op, next, &vops->list, link) {
- dma_fence_put(fence);
- fence = xe_vma_op_execute(vm, op);
- if (IS_ERR(fence)) {
- drm_warn(&vm->xe->drm, "VM op(%d) failed with %ld",
- op->base.op, PTR_ERR(fence));
- fence = ERR_PTR(-ENOSPC);
- break;
+ number_tiles = vm_ops_setup_tile_args(vm, vops);
+ if (number_tiles == 0)
+ return ERR_PTR(-ENODATA);
+
+ if (number_tiles > 1) {
+ fences = kmalloc_array(number_tiles, sizeof(*fences),
+ GFP_KERNEL);
+ if (!fences) {
+ fence = ERR_PTR(-ENOMEM);
+ goto err_trace;
+ }
+ }
+
+ for_each_tile(tile, vm->xe, id) {
+ if (!vops->pt_update_ops[id].num_ops)
+ continue;
+
+ err = xe_pt_update_ops_prepare(tile, vops);
+ if (err) {
+ fence = ERR_PTR(err);
+ goto err_out;
}
}
+ trace_xe_vm_ops_execute(vops);
+
+ for_each_tile(tile, vm->xe, id) {
+ if (!vops->pt_update_ops[id].num_ops)
+ continue;
+
+ fence = xe_pt_update_ops_run(tile, vops);
+ if (IS_ERR(fence))
+ goto err_out;
+
+ if (fences)
+ fences[current_fence++] = fence;
+ }
+
+ if (fences) {
+ cf = dma_fence_array_create(number_tiles, fences,
+ vm->composite_fence_ctx,
+ vm->composite_fence_seqno++,
+ false);
+ if (!cf) {
+ --vm->composite_fence_seqno;
+ fence = ERR_PTR(-ENOMEM);
+ goto err_out;
+ }
+ fence = &cf->base;
+ }
+
+ for_each_tile(tile, vm->xe, id) {
+ if (!vops->pt_update_ops[id].num_ops)
+ continue;
+
+ xe_pt_update_ops_fini(tile, vops);
+ }
+
+ return fence;
+
+err_out:
+ for_each_tile(tile, vm->xe, id) {
+ if (!vops->pt_update_ops[id].num_ops)
+ continue;
+
+ xe_pt_update_ops_abort(tile, vops);
+ }
+ while (current_fence)
+ dma_fence_put(fences[--current_fence]);
+ kfree(fences);
+ kfree(cf);
+
+err_trace:
+ trace_xe_vm_ops_fail(vm);
return fence;
}
@@ -2892,12 +2691,10 @@ static int vm_bind_ioctl_ops_execute(struct xe_vm *vm,
fence = ops_execute(vm, vops);
if (IS_ERR(fence)) {
err = PTR_ERR(fence);
- /* FIXME: Killing VM rather than proper error handling */
- xe_vm_kill(vm, false);
goto unlock;
- } else {
- vm_bind_ioctl_ops_fini(vm, vops, fence);
}
+
+ vm_bind_ioctl_ops_fini(vm, vops, fence);
}
unlock:
@@ -2905,11 +2702,18 @@ unlock:
return err;
}
-#define SUPPORTED_FLAGS \
+#define SUPPORTED_FLAGS_STUB \
(DRM_XE_VM_BIND_FLAG_READONLY | \
DRM_XE_VM_BIND_FLAG_IMMEDIATE | \
DRM_XE_VM_BIND_FLAG_NULL | \
DRM_XE_VM_BIND_FLAG_DUMPABLE)
+
+#ifdef TEST_VM_OPS_ERROR
+#define SUPPORTED_FLAGS (SUPPORTED_FLAGS_STUB | FORCE_OP_ERROR)
+#else
+#define SUPPORTED_FLAGS SUPPORTED_FLAGS_STUB
+#endif
+
#define XE_64K_PAGE_MASK 0xffffull
#define ALL_DRM_XE_SYNCS_FLAGS (DRM_XE_SYNCS_FLAG_WAIT_FOR_OP)
@@ -2935,7 +2739,7 @@ static int vm_bind_ioctl_check_args(struct xe_device *xe,
sizeof(struct drm_xe_vm_bind_op),
GFP_KERNEL | __GFP_ACCOUNT);
if (!*bind_ops)
- return -ENOMEM;
+ return args->num_binds > 1 ? -ENOBUFS : -ENOMEM;
err = __copy_from_user(*bind_ops, bind_user,
sizeof(struct drm_xe_vm_bind_op) *
@@ -3074,7 +2878,16 @@ static int xe_vm_bind_ioctl_validate_bo(struct xe_device *xe, struct xe_bo *bo,
return -EINVAL;
}
- if (bo->flags & XE_BO_FLAG_INTERNAL_64K) {
+ /*
+ * Some platforms require 64k VM_BIND alignment,
+ * specifically those with XE_VRAM_FLAGS_NEED64K.
+ *
+ * Other platforms may have BO's set to 64k physical placement,
+ * but can be mapped at 4k offsets anyway. This check is only
+ * there for the former case.
+ */
+ if ((bo->flags & XE_BO_FLAG_INTERNAL_64K) &&
+ (xe->info.vram_flags & XE_VRAM_FLAGS_NEED64K)) {
if (XE_IOCTL_DBG(xe, obj_offset &
XE_64K_PAGE_MASK) ||
XE_IOCTL_DBG(xe, addr & XE_64K_PAGE_MASK) ||
@@ -3254,10 +3067,18 @@ int xe_vm_bind_ioctl(struct drm_device *dev, void *data, struct drm_file *file)
goto unwind_ops;
}
- err = vm_bind_ioctl_ops_parse(vm, q, ops[i], syncs, num_syncs,
- &vops, i == args->num_binds - 1);
+ err = vm_bind_ioctl_ops_parse(vm, ops[i], &vops);
if (err)
goto unwind_ops;
+
+#ifdef TEST_VM_OPS_ERROR
+ if (flags & FORCE_OP_ERROR) {
+ vops.inject_error = true;
+ vm->xe->vm_inject_error_position =
+ (vm->xe->vm_inject_error_position + 1) %
+ FORCE_OP_ERROR_COUNT;
+ }
+#endif
}
/* Nothing to do */
@@ -3266,11 +3087,16 @@ int xe_vm_bind_ioctl(struct drm_device *dev, void *data, struct drm_file *file)
goto unwind_ops;
}
+ err = xe_vma_ops_alloc(&vops, args->num_binds > 1);
+ if (err)
+ goto unwind_ops;
+
err = vm_bind_ioctl_ops_execute(vm, &vops);
unwind_ops:
if (err && err != -ENODATA)
vm_bind_ioctl_ops_unwind(vm, ops, args->num_binds);
+ xe_vma_ops_fini(&vops);
for (i = args->num_binds - 1; i >= 0; --i)
if (ops[i])
drm_gpuva_ops_free(&vm->gpuvm, ops[i]);
diff --git a/drivers/gpu/drm/xe/xe_vm.h b/drivers/gpu/drm/xe/xe_vm.h
index b481608b12f1..c864dba35e1d 100644
--- a/drivers/gpu/drm/xe/xe_vm.h
+++ b/drivers/gpu/drm/xe/xe_vm.h
@@ -259,6 +259,8 @@ static inline struct dma_resv *xe_vm_resv(struct xe_vm *vm)
return drm_gpuvm_resv(&vm->gpuvm);
}
+void xe_vm_kill(struct xe_vm *vm, bool unlocked);
+
/**
* xe_vm_assert_held(vm) - Assert that the vm's reservation object is held.
* @vm: The vm
diff --git a/drivers/gpu/drm/xe/xe_vm_types.h b/drivers/gpu/drm/xe/xe_vm_types.h
index ce1a63a5e3e7..7f9a303e51d8 100644
--- a/drivers/gpu/drm/xe/xe_vm_types.h
+++ b/drivers/gpu/drm/xe/xe_vm_types.h
@@ -21,18 +21,27 @@ struct xe_bo;
struct xe_sync_entry;
struct xe_user_fence;
struct xe_vm;
+struct xe_vm_pgtable_update_op;
+
+#if IS_ENABLED(CONFIG_DRM_XE_DEBUG)
+#define TEST_VM_OPS_ERROR
+#define FORCE_OP_ERROR BIT(31)
+
+#define FORCE_OP_ERROR_LOCK 0
+#define FORCE_OP_ERROR_PREPARE 1
+#define FORCE_OP_ERROR_RUN 2
+#define FORCE_OP_ERROR_COUNT 3
+#endif
#define XE_VMA_READ_ONLY DRM_GPUVA_USERBITS
#define XE_VMA_DESTROYED (DRM_GPUVA_USERBITS << 1)
#define XE_VMA_ATOMIC_PTE_BIT (DRM_GPUVA_USERBITS << 2)
-#define XE_VMA_FIRST_REBIND (DRM_GPUVA_USERBITS << 3)
-#define XE_VMA_LAST_REBIND (DRM_GPUVA_USERBITS << 4)
-#define XE_VMA_PTE_4K (DRM_GPUVA_USERBITS << 5)
-#define XE_VMA_PTE_2M (DRM_GPUVA_USERBITS << 6)
-#define XE_VMA_PTE_1G (DRM_GPUVA_USERBITS << 7)
-#define XE_VMA_PTE_64K (DRM_GPUVA_USERBITS << 8)
-#define XE_VMA_PTE_COMPACT (DRM_GPUVA_USERBITS << 9)
-#define XE_VMA_DUMPABLE (DRM_GPUVA_USERBITS << 10)
+#define XE_VMA_PTE_4K (DRM_GPUVA_USERBITS << 3)
+#define XE_VMA_PTE_2M (DRM_GPUVA_USERBITS << 4)
+#define XE_VMA_PTE_1G (DRM_GPUVA_USERBITS << 5)
+#define XE_VMA_PTE_64K (DRM_GPUVA_USERBITS << 6)
+#define XE_VMA_PTE_COMPACT (DRM_GPUVA_USERBITS << 7)
+#define XE_VMA_DUMPABLE (DRM_GPUVA_USERBITS << 8)
/** struct xe_userptr - User pointer */
struct xe_userptr {
@@ -99,6 +108,9 @@ struct xe_vma {
*/
u8 tile_present;
+ /** @tile_staged: bind is staged for this VMA */
+ u8 tile_staged;
+
/**
* @pat_index: The pat index to use when encoding the PTEs for this vma.
*/
@@ -314,31 +326,18 @@ struct xe_vma_op_prefetch {
/** enum xe_vma_op_flags - flags for VMA operation */
enum xe_vma_op_flags {
- /** @XE_VMA_OP_FIRST: first VMA operation for a set of syncs */
- XE_VMA_OP_FIRST = BIT(0),
- /** @XE_VMA_OP_LAST: last VMA operation for a set of syncs */
- XE_VMA_OP_LAST = BIT(1),
/** @XE_VMA_OP_COMMITTED: VMA operation committed */
- XE_VMA_OP_COMMITTED = BIT(2),
+ XE_VMA_OP_COMMITTED = BIT(0),
/** @XE_VMA_OP_PREV_COMMITTED: Previous VMA operation committed */
- XE_VMA_OP_PREV_COMMITTED = BIT(3),
+ XE_VMA_OP_PREV_COMMITTED = BIT(1),
/** @XE_VMA_OP_NEXT_COMMITTED: Next VMA operation committed */
- XE_VMA_OP_NEXT_COMMITTED = BIT(4),
+ XE_VMA_OP_NEXT_COMMITTED = BIT(2),
};
/** struct xe_vma_op - VMA operation */
struct xe_vma_op {
/** @base: GPUVA base operation */
struct drm_gpuva_op base;
- /** @q: exec queue for this operation */
- struct xe_exec_queue *q;
- /**
- * @syncs: syncs for this operation, only used on first and last
- * operation
- */
- struct xe_sync_entry *syncs;
- /** @num_syncs: number of syncs */
- u32 num_syncs;
/** @link: async operation link */
struct list_head link;
/** @flags: operation flags */
@@ -362,12 +361,18 @@ struct xe_vma_ops {
struct list_head list;
/** @vm: VM */
struct xe_vm *vm;
- /** @q: exec queue these operations */
+ /** @q: exec queue for VMA operations */
struct xe_exec_queue *q;
/** @syncs: syncs these operation */
struct xe_sync_entry *syncs;
/** @num_syncs: number of syncs */
u32 num_syncs;
+ /** @pt_update_ops: page table update operations */
+ struct xe_vm_pgtable_update_ops pt_update_ops[XE_MAX_TILES_PER_DEVICE];
+#ifdef TEST_VM_OPS_ERROR
+ /** @inject_error: inject error to test error handling */
+ bool inject_error;
+#endif
};
#endif
diff --git a/drivers/gpu/drm/xe/xe_vram.c b/drivers/gpu/drm/xe/xe_vram.c
index 5bcd59190353..80ba2fc78837 100644
--- a/drivers/gpu/drm/xe/xe_vram.c
+++ b/drivers/gpu/drm/xe/xe_vram.c
@@ -182,6 +182,7 @@ static inline u64 get_flat_ccs_offset(struct xe_gt *gt, u64 tile_size)
offset = offset_hi << 32; /* HW view bits 39:32 */
offset |= offset_lo << 6; /* HW view bits 31:6 */
offset *= num_enabled; /* convert to SW view */
+ offset = round_up(offset, SZ_128K); /* SW must round up to nearest 128K */
/* We don't expect any holes */
xe_assert_msg(xe, offset == (xe_mmio_read64_2x32(gt, GSMBASE) - ccs_size),
diff --git a/drivers/gpu/drm/xe/xe_wa.c b/drivers/gpu/drm/xe/xe_wa.c
index e648265d081b..d424992514a4 100644
--- a/drivers/gpu/drm/xe/xe_wa.c
+++ b/drivers/gpu/drm/xe/xe_wa.c
@@ -733,6 +733,10 @@ static const struct xe_rtp_entry_sr lrc_was[] = {
DIS_PARTIAL_AUTOSTRIP |
DIS_AUTOSTRIP))
},
+ { XE_RTP_NAME("15016589081"),
+ XE_RTP_RULES(GRAPHICS_VERSION(2001), ENGINE_CLASS(RENDER)),
+ XE_RTP_ACTIONS(SET(CHICKEN_RASTER_1, DIS_CLIP_NEGATIVE_BOUNDING_BOX))
+ },
{}
};
@@ -759,6 +763,7 @@ void xe_wa_process_oob(struct xe_gt *gt)
xe_rtp_process_ctx_enable_active_tracking(&ctx, gt->wa_active.oob,
ARRAY_SIZE(oob_was));
+ gt->wa_active.oob_initialized = true;
xe_rtp_process(&ctx, oob_was);
}
diff --git a/drivers/gpu/drm/xe/xe_wa.h b/drivers/gpu/drm/xe/xe_wa.h
index db9ddeaf69bf..52337405b5bc 100644
--- a/drivers/gpu/drm/xe/xe_wa.h
+++ b/drivers/gpu/drm/xe/xe_wa.h
@@ -6,6 +6,8 @@
#ifndef _XE_WA_
#define _XE_WA_
+#include "xe_assert.h"
+
struct drm_printer;
struct xe_gt;
struct xe_hw_engine;
@@ -25,6 +27,9 @@ void xe_wa_dump(struct xe_gt *gt, struct drm_printer *p);
* @gt__: gt instance
* @id__: XE_OOB_<id__>, as generated by build system in generated/xe_wa_oob.h
*/
-#define XE_WA(gt__, id__) test_bit(XE_WA_OOB_ ## id__, (gt__)->wa_active.oob)
+#define XE_WA(gt__, id__) ({ \
+ xe_gt_assert(gt__, (gt__)->wa_active.oob_initialized); \
+ test_bit(XE_WA_OOB_ ## id__, (gt__)->wa_active.oob); \
+})
#endif
diff --git a/drivers/gpu/drm/xe/xe_wa_oob.rules b/drivers/gpu/drm/xe/xe_wa_oob.rules
index 08f7336881e3..920ca5060146 100644
--- a/drivers/gpu/drm/xe/xe_wa_oob.rules
+++ b/drivers/gpu/drm/xe/xe_wa_oob.rules
@@ -27,6 +27,13 @@
16022287689 GRAPHICS_VERSION(2001)
GRAPHICS_VERSION(2004)
13011645652 GRAPHICS_VERSION(2004)
+14022293748 GRAPHICS_VERSION(2001)
+ GRAPHICS_VERSION(2004)
+22019794406 GRAPHICS_VERSION(2001)
+ GRAPHICS_VERSION(2004)
22019338487 MEDIA_VERSION(2000)
GRAPHICS_VERSION(2001)
+22019338487_display PLATFORM(LUNARLAKE)
16023588340 GRAPHICS_VERSION(2001)
+14019789679 GRAPHICS_VERSION(1255)
+ GRAPHICS_VERSION_RANGE(1270, 2004)
diff --git a/drivers/gpu/drm/xe/xe_wait_user_fence.c b/drivers/gpu/drm/xe/xe_wait_user_fence.c
index f69721339201..d46fa8374980 100644
--- a/drivers/gpu/drm/xe/xe_wait_user_fence.c
+++ b/drivers/gpu/drm/xe/xe_wait_user_fence.c
@@ -8,7 +8,7 @@
#include <drm/drm_device.h>
#include <drm/drm_file.h>
#include <drm/drm_utils.h>
-#include <drm/xe_drm.h>
+#include <uapi/drm/xe_drm.h>
#include "xe_device.h"
#include "xe_gt.h"
diff --git a/drivers/gpu/host1x/dev.c b/drivers/gpu/host1x/dev.c
index f006bc931324..b62e4f0e8130 100644
--- a/drivers/gpu/host1x/dev.c
+++ b/drivers/gpu/host1x/dev.c
@@ -404,9 +404,10 @@ static struct iommu_domain *host1x_iommu_attach(struct host1x *host)
if (err < 0)
goto put_group;
- host->domain = iommu_domain_alloc(&platform_bus_type);
- if (!host->domain) {
- err = -ENOMEM;
+ host->domain = iommu_paging_domain_alloc(host->dev);
+ if (IS_ERR(host->domain)) {
+ err = PTR_ERR(host->domain);
+ host->domain = NULL;
goto put_cache;
}
diff --git a/drivers/gpu/host1x/dev.h b/drivers/gpu/host1x/dev.h
index 925a118db23f..92031b240a17 100644
--- a/drivers/gpu/host1x/dev.h
+++ b/drivers/gpu/host1x/dev.h
@@ -9,6 +9,7 @@
#include <linux/device.h>
#include <linux/iommu.h>
#include <linux/iova.h>
+#include <linux/irqreturn.h>
#include <linux/platform_device.h>
#include <linux/reset.h>
@@ -81,6 +82,7 @@ struct host1x_intr_ops {
void (*disable_syncpt_intr)(struct host1x *host, unsigned int id);
void (*disable_all_syncpt_intrs)(struct host1x *host);
int (*free_syncpt_irq)(struct host1x *host);
+ irqreturn_t (*isr)(int irq, void *dev_id);
};
struct host1x_sid_entry {
diff --git a/drivers/gpu/host1x/hw/intr_hw.c b/drivers/gpu/host1x/hw/intr_hw.c
index 9880e0c47235..415f8d7e4202 100644
--- a/drivers/gpu/host1x/hw/intr_hw.c
+++ b/drivers/gpu/host1x/hw/intr_hw.c
@@ -6,18 +6,11 @@
* Copyright (c) 2010-2013, NVIDIA Corporation.
*/
-#include <linux/interrupt.h>
-#include <linux/irq.h>
#include <linux/io.h>
#include "../intr.h"
#include "../dev.h"
-struct host1x_intr_irq_data {
- struct host1x *host;
- u32 offset;
-};
-
static irqreturn_t syncpt_thresh_isr(int irq, void *dev_id)
{
struct host1x_intr_irq_data *irq_data = dev_id;
@@ -54,7 +47,8 @@ static void host1x_intr_disable_all_syncpt_intrs(struct host1x *host)
}
}
-static void intr_hw_init(struct host1x *host, u32 cpm)
+static int
+host1x_intr_init_host_sync(struct host1x *host, u32 cpm)
{
#if HOST1X_HW < 6
/* disable the ip_busy_timeout. this prevents write drops */
@@ -85,32 +79,6 @@ static void intr_hw_init(struct host1x *host, u32 cpm)
host1x_sync_writel(host, irq_index, HOST1X_SYNC_SYNCPT_INTR_DEST(id));
}
#endif
-}
-
-static int
-host1x_intr_init_host_sync(struct host1x *host, u32 cpm)
-{
- int err, i;
- struct host1x_intr_irq_data *irq_data;
-
- irq_data = devm_kcalloc(host->dev, host->num_syncpt_irqs, sizeof(irq_data[0]), GFP_KERNEL);
- if (!irq_data)
- return -ENOMEM;
-
- host1x_hw_intr_disable_all_syncpt_intrs(host);
-
- for (i = 0; i < host->num_syncpt_irqs; i++) {
- irq_data[i].host = host;
- irq_data[i].offset = i;
-
- err = devm_request_irq(host->dev, host->syncpt_irqs[i],
- syncpt_thresh_isr, IRQF_SHARED,
- "host1x_syncpt", &irq_data[i]);
- if (err < 0)
- return err;
- }
-
- intr_hw_init(host, cpm);
return 0;
}
@@ -144,4 +112,5 @@ static const struct host1x_intr_ops host1x_intr_ops = {
.enable_syncpt_intr = host1x_intr_enable_syncpt_intr,
.disable_syncpt_intr = host1x_intr_disable_syncpt_intr,
.disable_all_syncpt_intrs = host1x_intr_disable_all_syncpt_intrs,
+ .isr = syncpt_thresh_isr,
};
diff --git a/drivers/gpu/host1x/intr.c b/drivers/gpu/host1x/intr.c
index 995bfa980837..b3285dd10180 100644
--- a/drivers/gpu/host1x/intr.c
+++ b/drivers/gpu/host1x/intr.c
@@ -6,7 +6,7 @@
*/
#include <linux/clk.h>
-
+#include <linux/interrupt.h>
#include "dev.h"
#include "fence.h"
#include "intr.h"
@@ -100,7 +100,9 @@ void host1x_intr_handle_interrupt(struct host1x *host, unsigned int id)
int host1x_intr_init(struct host1x *host)
{
+ struct host1x_intr_irq_data *irq_data;
unsigned int id;
+ int i, err;
mutex_init(&host->intr_mutex);
@@ -111,6 +113,23 @@ int host1x_intr_init(struct host1x *host)
INIT_LIST_HEAD(&syncpt->fences.list);
}
+ irq_data = devm_kcalloc(host->dev, host->num_syncpt_irqs, sizeof(irq_data[0]), GFP_KERNEL);
+ if (!irq_data)
+ return -ENOMEM;
+
+ host1x_hw_intr_disable_all_syncpt_intrs(host);
+
+ for (i = 0; i < host->num_syncpt_irqs; i++) {
+ irq_data[i].host = host;
+ irq_data[i].offset = i;
+
+ err = devm_request_irq(host->dev, host->syncpt_irqs[i],
+ host->intr_op->isr, IRQF_SHARED,
+ "host1x_syncpt", &irq_data[i]);
+ if (err < 0)
+ return err;
+ }
+
return 0;
}
diff --git a/drivers/gpu/host1x/intr.h b/drivers/gpu/host1x/intr.h
index 3b5610b525e5..11cdf13e32fe 100644
--- a/drivers/gpu/host1x/intr.h
+++ b/drivers/gpu/host1x/intr.h
@@ -11,6 +11,11 @@
struct host1x;
struct host1x_syncpt_fence;
+struct host1x_intr_irq_data {
+ struct host1x *host;
+ u32 offset;
+};
+
/* Initialize host1x sync point interrupt */
int host1x_intr_init(struct host1x *host);
diff --git a/drivers/gpu/vga/vga_switcheroo.c b/drivers/gpu/vga/vga_switcheroo.c
index 365e6ddbe90f..18f2c92beff8 100644
--- a/drivers/gpu/vga/vga_switcheroo.c
+++ b/drivers/gpu/vga/vga_switcheroo.c
@@ -926,8 +926,7 @@ static void vga_switcheroo_debugfs_init(struct vgasr_priv *priv)
/**
* vga_switcheroo_process_delayed_switch() - helper for delayed switching
*
- * Process a delayed switch if one is pending. DRM drivers should call this
- * from their ->lastclose callback.
+ * Process a delayed switch if one is pending.
*
* Return: 0 on success. -EINVAL if no delayed switch is pending, if the client
* has unregistered in the meantime or if there are other clients blocking the
diff --git a/drivers/greybus/Kconfig b/drivers/greybus/Kconfig
index ab81ceceb337..c3f056d28b01 100644
--- a/drivers/greybus/Kconfig
+++ b/drivers/greybus/Kconfig
@@ -21,6 +21,8 @@ config GREYBUS_BEAGLEPLAY
tristate "Greybus BeaglePlay driver"
depends on SERIAL_DEV_BUS
select CRC_CCITT
+ select FW_LOADER
+ select FW_UPLOAD
help
Select this option if you have a BeaglePlay where CC1352
co-processor acts as Greybus SVC.
diff --git a/drivers/greybus/gb-beagleplay.c b/drivers/greybus/gb-beagleplay.c
index 33f8fad70260..3a1ade84737c 100644
--- a/drivers/greybus/gb-beagleplay.c
+++ b/drivers/greybus/gb-beagleplay.c
@@ -6,21 +6,19 @@
* Copyright (c) 2023 BeagleBoard.org Foundation
*/
-#include <linux/gfp.h>
+#include <asm-generic/unaligned.h>
+#include <linux/crc32.h>
+#include <linux/gpio/consumer.h>
+#include <linux/firmware.h>
#include <linux/greybus.h>
-#include <linux/module.h>
-#include <linux/of.h>
-#include <linux/printk.h>
#include <linux/serdev.h>
-#include <linux/tty.h>
-#include <linux/tty_driver.h>
-#include <linux/greybus/hd.h>
-#include <linux/init.h>
-#include <linux/device.h>
#include <linux/crc-ccitt.h>
#include <linux/circ_buf.h>
-#include <linux/types.h>
-#include <linux/workqueue.h>
+
+#define CC1352_FIRMWARE_SIZE (704 * 1024)
+#define CC1352_BOOTLOADER_TIMEOUT 2000
+#define CC1352_BOOTLOADER_ACK 0xcc
+#define CC1352_BOOTLOADER_NACK 0x33
#define RX_HDLC_PAYLOAD 256
#define CRC_LEN 2
@@ -57,6 +55,17 @@
* @rx_buffer_len: length of receive buffer filled.
* @rx_buffer: hdlc frame receive buffer
* @rx_in_esc: hdlc rx flag to indicate ESC frame
+ *
+ * @fwl: underlying firmware upload device
+ * @bootloader_backdoor_gpio: cc1352p7 boot gpio
+ * @rst_gpio: cc1352p7 reset gpio
+ * @flashing_mode: flag to indicate that flashing is currently in progress
+ * @fwl_ack_com: completion to signal an Ack/Nack
+ * @fwl_ack: Ack/Nack byte received
+ * @fwl_cmd_response_com: completion to signal a bootloader command response
+ * @fwl_cmd_response: bootloader command response data
+ * @fwl_crc32: crc32 of firmware to flash
+ * @fwl_reset_addr: flag to indicate if we need to send COMMAND_DOWNLOAD again
*/
struct gb_beagleplay {
struct serdev_device *sd;
@@ -72,6 +81,17 @@ struct gb_beagleplay {
u16 rx_buffer_len;
bool rx_in_esc;
u8 rx_buffer[MAX_RX_HDLC];
+
+ struct fw_upload *fwl;
+ struct gpio_desc *bootloader_backdoor_gpio;
+ struct gpio_desc *rst_gpio;
+ bool flashing_mode;
+ struct completion fwl_ack_com;
+ u8 fwl_ack;
+ struct completion fwl_cmd_response_com;
+ u32 fwl_cmd_response;
+ u32 fwl_crc32;
+ bool fwl_reset_addr;
};
/**
@@ -100,6 +120,87 @@ struct hdlc_greybus_frame {
u8 payload[];
} __packed;
+/**
+ * enum cc1352_bootloader_cmd: CC1352 Bootloader Commands
+ *
+ * @COMMAND_DOWNLOAD: Prepares flash programming
+ * @COMMAND_GET_STATUS: Returns the status of the last command that was issued
+ * @COMMAND_SEND_DATA: Transfers data and programs flash
+ * @COMMAND_RESET: Performs a system reset
+ * @COMMAND_CRC32: Calculates CRC32 over a specified memory area
+ * @COMMAND_BANK_ERASE: Performs an erase of all of the customer-accessible
+ * flash sectors not protected by FCFG1 and CCFG
+ * writeprotect bits.
+ *
+ * CC1352 Bootloader serial bus commands
+ */
+enum cc1352_bootloader_cmd {
+ COMMAND_DOWNLOAD = 0x21,
+ COMMAND_GET_STATUS = 0x23,
+ COMMAND_SEND_DATA = 0x24,
+ COMMAND_RESET = 0x25,
+ COMMAND_CRC32 = 0x27,
+ COMMAND_BANK_ERASE = 0x2c,
+};
+
+/**
+ * enum cc1352_bootloader_status: CC1352 Bootloader COMMAND_GET_STATUS response
+ *
+ * @COMMAND_RET_SUCCESS: Status for successful command
+ * @COMMAND_RET_UNKNOWN_CMD: Status for unknown command
+ * @COMMAND_RET_INVALID_CMD: Status for invalid command (in other words,
+ * incorrect packet size)
+ * @COMMAND_RET_INVALID_ADR: Status for invalid input address
+ * @COMMAND_RET_FLASH_FAIL: Status for failing flash erase or program operation
+ */
+enum cc1352_bootloader_status {
+ COMMAND_RET_SUCCESS = 0x40,
+ COMMAND_RET_UNKNOWN_CMD = 0x41,
+ COMMAND_RET_INVALID_CMD = 0x42,
+ COMMAND_RET_INVALID_ADR = 0x43,
+ COMMAND_RET_FLASH_FAIL = 0x44,
+};
+
+/**
+ * struct cc1352_bootloader_packet: CC1352 Bootloader Request Packet
+ *
+ * @len: length of packet + optional request data
+ * @checksum: 8-bit checksum excluding len
+ * @cmd: bootloader command
+ */
+struct cc1352_bootloader_packet {
+ u8 len;
+ u8 checksum;
+ u8 cmd;
+} __packed;
+
+#define CC1352_BOOTLOADER_PKT_MAX_SIZE \
+ (U8_MAX - sizeof(struct cc1352_bootloader_packet))
+
+/**
+ * struct cc1352_bootloader_download_cmd_data: CC1352 Bootloader COMMAND_DOWNLOAD request data
+ *
+ * @addr: address to start programming data into
+ * @size: size of data that will be sent
+ */
+struct cc1352_bootloader_download_cmd_data {
+ __be32 addr;
+ __be32 size;
+} __packed;
+
+/**
+ * struct cc1352_bootloader_crc32_cmd_data: CC1352 Bootloader COMMAND_CRC32 request data
+ *
+ * @addr: address where crc32 calculation starts
+ * @size: number of bytes comprised by crc32 calculation
+ * @read_repeat: number of read repeats for each data location
+ */
+struct cc1352_bootloader_crc32_cmd_data {
+ __be32 addr;
+ __be32 size;
+ __be32 read_repeat;
+} __packed;
+
static void hdlc_rx_greybus_frame(struct gb_beagleplay *bg, u8 *buf, u16 len)
{
struct hdlc_greybus_frame *gb_frame = (struct hdlc_greybus_frame *)buf;
@@ -331,11 +432,135 @@ static void hdlc_deinit(struct gb_beagleplay *bg)
flush_work(&bg->tx_work);
}
+/**
+ * csum8: Calculate 8-bit checksum on data
+ *
+ * @data: bytes to calculate 8-bit checksum of
+ * @size: number of bytes
+ * @base: starting value for checksum
+ */
+static u8 csum8(const u8 *data, size_t size, u8 base)
+{
+ size_t i;
+ u8 sum = base;
+
+ for (i = 0; i < size; ++i)
+ sum += data[i];
+
+ return sum;
+}
+
+static void cc1352_bootloader_send_ack(struct gb_beagleplay *bg)
+{
+ static const u8 ack[] = { 0x00, CC1352_BOOTLOADER_ACK };
+
+ serdev_device_write_buf(bg->sd, ack, sizeof(ack));
+}
+
+static void cc1352_bootloader_send_nack(struct gb_beagleplay *bg)
+{
+ static const u8 nack[] = { 0x00, CC1352_BOOTLOADER_NACK };
+
+ serdev_device_write_buf(bg->sd, nack, sizeof(nack));
+}
+
+/**
+ * cc1352_bootloader_pkt_rx: Process a CC1352 Bootloader Packet
+ *
+ * @bg: beagleplay greybus driver
+ * @data: packet buffer
+ * @count: packet buffer size
+ *
+ * @return: number of bytes processed
+ *
+ * Here are the steps to successfully receive a packet from cc1352 bootloader
+ * according to the docs:
+ * 1. Wait for nonzero data to be returned from the device. This is important
+ * as the device may send zero bytes between a sent and a received data
+ * packet. The first nonzero byte received is the size of the packet that is
+ * being received.
+ * 2. Read the next byte, which is the checksum for the packet.
+ * 3. Read the data bytes from the device. During the data phase, packet size
+ * minus 2 bytes is sent.
+ * 4. Calculate the checksum of the data bytes and verify it matches the
+ * checksum received in the packet.
+ * 5. Send an acknowledge byte or a not-acknowledge byte to the device to
+ * indicate the successful or unsuccessful reception of the packet.
+ */
+static int cc1352_bootloader_pkt_rx(struct gb_beagleplay *bg, const u8 *data,
+ size_t count)
+{
+ bool is_valid = false;
+
+ switch (data[0]) {
+ /* Skip 0x00 bytes. */
+ case 0x00:
+ return 1;
+ case CC1352_BOOTLOADER_ACK:
+ case CC1352_BOOTLOADER_NACK:
+ WRITE_ONCE(bg->fwl_ack, data[0]);
+ complete(&bg->fwl_ack_com);
+ return 1;
+ case 3:
+ if (count < 3)
+ return 0;
+ is_valid = data[1] == data[2];
+ WRITE_ONCE(bg->fwl_cmd_response, (u32)data[2]);
+ break;
+ case 6:
+ if (count < 6)
+ return 0;
+ is_valid = csum8(&data[2], sizeof(__be32), 0) == data[1];
+ WRITE_ONCE(bg->fwl_cmd_response, get_unaligned_be32(&data[2]));
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ if (is_valid) {
+ cc1352_bootloader_send_ack(bg);
+ complete(&bg->fwl_cmd_response_com);
+ } else {
+ dev_warn(&bg->sd->dev,
+ "Dropping bootloader packet with invalid checksum");
+ cc1352_bootloader_send_nack(bg);
+ }
+
+ return data[0];
+}
+
+static size_t cc1352_bootloader_rx(struct gb_beagleplay *bg, const u8 *data,
+ size_t count)
+{
+ int ret;
+ size_t off = 0;
+
+ memcpy(bg->rx_buffer + bg->rx_buffer_len, data, count);
+ bg->rx_buffer_len += count;
+
+ do {
+ ret = cc1352_bootloader_pkt_rx(bg, bg->rx_buffer + off,
+ bg->rx_buffer_len - off);
+ if (ret < 0)
+ return dev_err_probe(&bg->sd->dev, ret,
+ "Invalid Packet");
+ off += ret;
+ } while (ret > 0 && off < count);
+
+ bg->rx_buffer_len -= off;
+ memmove(bg->rx_buffer, bg->rx_buffer + off, bg->rx_buffer_len);
+
+ return count;
+}
+
static size_t gb_tty_receive(struct serdev_device *sd, const u8 *data,
size_t count)
{
struct gb_beagleplay *bg = serdev_device_get_drvdata(sd);
+ if (READ_ONCE(bg->flashing_mode))
+ return cc1352_bootloader_rx(bg, data, count);
+
return hdlc_rx(bg, data, count);
}
@@ -343,7 +568,8 @@ static void gb_tty_wakeup(struct serdev_device *serdev)
{
struct gb_beagleplay *bg = serdev_device_get_drvdata(serdev);
- schedule_work(&bg->tx_work);
+ if (!READ_ONCE(bg->flashing_mode))
+ schedule_work(&bg->tx_work);
}
static struct serdev_device_ops gb_beagleplay_ops = {
@@ -412,6 +638,195 @@ static void gb_beagleplay_stop_svc(struct gb_beagleplay *bg)
hdlc_tx_frames(bg, ADDRESS_CONTROL, 0x03, &payload, 1);
}
+static int cc1352_bootloader_wait_for_ack(struct gb_beagleplay *bg)
+{
+ int ret;
+
+ ret = wait_for_completion_timeout(
+ &bg->fwl_ack_com, msecs_to_jiffies(CC1352_BOOTLOADER_TIMEOUT));
+ if (ret < 0)
+ return dev_err_probe(&bg->sd->dev, ret,
+ "Failed to acquire ack semaphore");
+
+ switch (READ_ONCE(bg->fwl_ack)) {
+ case CC1352_BOOTLOADER_ACK:
+ return 0;
+ case CC1352_BOOTLOADER_NACK:
+ return -EAGAIN;
+ default:
+ return -EINVAL;
+ }
+}
+
+static int cc1352_bootloader_sync(struct gb_beagleplay *bg)
+{
+ static const u8 sync_bytes[] = { 0x55, 0x55 };
+
+ serdev_device_write_buf(bg->sd, sync_bytes, sizeof(sync_bytes));
+ return cc1352_bootloader_wait_for_ack(bg);
+}
+
+static int cc1352_bootloader_get_status(struct gb_beagleplay *bg)
+{
+ int ret;
+ static const struct cc1352_bootloader_packet pkt = {
+ .len = sizeof(pkt),
+ .checksum = COMMAND_GET_STATUS,
+ .cmd = COMMAND_GET_STATUS
+ };
+
+ serdev_device_write_buf(bg->sd, (const u8 *)&pkt, sizeof(pkt));
+ ret = cc1352_bootloader_wait_for_ack(bg);
+ if (ret < 0)
+ return ret;
+
+ ret = wait_for_completion_timeout(
+ &bg->fwl_cmd_response_com,
+ msecs_to_jiffies(CC1352_BOOTLOADER_TIMEOUT));
+ if (ret < 0)
+ return dev_err_probe(&bg->sd->dev, ret,
+ "Failed to acquire last status semaphore");
+
+ switch (READ_ONCE(bg->fwl_cmd_response)) {
+ case COMMAND_RET_SUCCESS:
+ return 0;
+ default:
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int cc1352_bootloader_erase(struct gb_beagleplay *bg)
+{
+ int ret;
+ static const struct cc1352_bootloader_packet pkt = {
+ .len = sizeof(pkt),
+ .checksum = COMMAND_BANK_ERASE,
+ .cmd = COMMAND_BANK_ERASE
+ };
+
+ serdev_device_write_buf(bg->sd, (const u8 *)&pkt, sizeof(pkt));
+
+ ret = cc1352_bootloader_wait_for_ack(bg);
+ if (ret < 0)
+ return ret;
+
+ return cc1352_bootloader_get_status(bg);
+}
+
+static int cc1352_bootloader_reset(struct gb_beagleplay *bg)
+{
+ static const struct cc1352_bootloader_packet pkt = {
+ .len = sizeof(pkt),
+ .checksum = COMMAND_RESET,
+ .cmd = COMMAND_RESET
+ };
+
+ serdev_device_write_buf(bg->sd, (const u8 *)&pkt, sizeof(pkt));
+
+ return cc1352_bootloader_wait_for_ack(bg);
+}
+
+/**
+ * cc1352_bootloader_empty_pkt: Calculate the number of empty bytes in the current packet
+ *
+ * @data: packet bytes array to check
+ * @size: number of bytes in array
+ */
+static size_t cc1352_bootloader_empty_pkt(const u8 *data, size_t size)
+{
+ size_t i;
+
+ for (i = 0; i < size && data[i] == 0xff; ++i)
+ continue;
+
+ return i;
+}
+
+static int cc1352_bootloader_crc32(struct gb_beagleplay *bg, u32 *crc32)
+{
+ int ret;
+ static const struct cc1352_bootloader_crc32_cmd_data cmd_data = {
+ .addr = 0, .size = cpu_to_be32(704 * 1024), .read_repeat = 0
+ };
+ const struct cc1352_bootloader_packet pkt = {
+ .len = sizeof(pkt) + sizeof(cmd_data),
+ .checksum = csum8((const void *)&cmd_data, sizeof(cmd_data),
+ COMMAND_CRC32),
+ .cmd = COMMAND_CRC32
+ };
+
+ serdev_device_write_buf(bg->sd, (const u8 *)&pkt, sizeof(pkt));
+ serdev_device_write_buf(bg->sd, (const u8 *)&cmd_data,
+ sizeof(cmd_data));
+
+ ret = cc1352_bootloader_wait_for_ack(bg);
+ if (ret < 0)
+ return ret;
+
+ ret = wait_for_completion_timeout(
+ &bg->fwl_cmd_response_com,
+ msecs_to_jiffies(CC1352_BOOTLOADER_TIMEOUT));
+ if (ret < 0)
+ return dev_err_probe(&bg->sd->dev, ret,
+ "Failed to acquire last status semaphore");
+
+ *crc32 = READ_ONCE(bg->fwl_cmd_response);
+
+ return 0;
+}
+
+static int cc1352_bootloader_download(struct gb_beagleplay *bg, u32 size,
+ u32 addr)
+{
+ int ret;
+ const struct cc1352_bootloader_download_cmd_data cmd_data = {
+ .addr = cpu_to_be32(addr),
+ .size = cpu_to_be32(size),
+ };
+ const struct cc1352_bootloader_packet pkt = {
+ .len = sizeof(pkt) + sizeof(cmd_data),
+ .checksum = csum8((const void *)&cmd_data, sizeof(cmd_data),
+ COMMAND_DOWNLOAD),
+ .cmd = COMMAND_DOWNLOAD
+ };
+
+ serdev_device_write_buf(bg->sd, (const u8 *)&pkt, sizeof(pkt));
+ serdev_device_write_buf(bg->sd, (const u8 *)&cmd_data,
+ sizeof(cmd_data));
+
+ ret = cc1352_bootloader_wait_for_ack(bg);
+ if (ret < 0)
+ return ret;
+
+ return cc1352_bootloader_get_status(bg);
+}
+
+static int cc1352_bootloader_send_data(struct gb_beagleplay *bg, const u8 *data,
+ size_t size)
+{
+ int ret, rem = min(size, CC1352_BOOTLOADER_PKT_MAX_SIZE);
+ const struct cc1352_bootloader_packet pkt = {
+ .len = sizeof(pkt) + rem,
+ .checksum = csum8(data, rem, COMMAND_SEND_DATA),
+ .cmd = COMMAND_SEND_DATA
+ };
+
+ serdev_device_write_buf(bg->sd, (const u8 *)&pkt, sizeof(pkt));
+ serdev_device_write_buf(bg->sd, data, rem);
+
+ ret = cc1352_bootloader_wait_for_ack(bg);
+ if (ret < 0)
+ return ret;
+
+ ret = cc1352_bootloader_get_status(bg);
+ if (ret < 0)
+ return ret;
+
+ return rem;
+}
+
static void gb_greybus_deinit(struct gb_beagleplay *bg)
{
gb_hd_del(bg->gb_hd);
@@ -442,6 +857,157 @@ free_gb_hd:
return ret;
}
+static enum fw_upload_err cc1352_prepare(struct fw_upload *fw_upload,
+ const u8 *data, u32 size)
+{
+ int ret;
+ u32 curr_crc32;
+ struct gb_beagleplay *bg = fw_upload->dd_handle;
+
+ dev_info(&bg->sd->dev, "CC1352 Start Flashing...");
+
+ if (size != CC1352_FIRMWARE_SIZE)
+ return FW_UPLOAD_ERR_INVALID_SIZE;
+
+ /* Might involve network calls */
+ gb_greybus_deinit(bg);
+ msleep(5 * MSEC_PER_SEC);
+
+ gb_beagleplay_stop_svc(bg);
+ msleep(200);
+ flush_work(&bg->tx_work);
+
+ serdev_device_wait_until_sent(bg->sd, CC1352_BOOTLOADER_TIMEOUT);
+
+ WRITE_ONCE(bg->flashing_mode, true);
+
+ gpiod_direction_output(bg->bootloader_backdoor_gpio, 0);
+ gpiod_direction_output(bg->rst_gpio, 0);
+ msleep(200);
+
+ gpiod_set_value(bg->rst_gpio, 1);
+ msleep(200);
+
+ gpiod_set_value(bg->bootloader_backdoor_gpio, 1);
+ msleep(200);
+
+ gpiod_direction_input(bg->bootloader_backdoor_gpio);
+ gpiod_direction_input(bg->rst_gpio);
+
+ ret = cc1352_bootloader_sync(bg);
+ if (ret < 0)
+ return dev_err_probe(&bg->sd->dev, FW_UPLOAD_ERR_HW_ERROR,
+ "Failed to sync");
+
+ ret = cc1352_bootloader_crc32(bg, &curr_crc32);
+ if (ret < 0)
+ return dev_err_probe(&bg->sd->dev, FW_UPLOAD_ERR_HW_ERROR,
+ "Failed to fetch crc32");
+
+ bg->fwl_crc32 = crc32(0xffffffff, data, size) ^ 0xffffffff;
+
+ /* Check if attempting to reflash same firmware */
+ if (bg->fwl_crc32 == curr_crc32) {
+ dev_warn(&bg->sd->dev, "Skipping reflashing same image");
+ cc1352_bootloader_reset(bg);
+ WRITE_ONCE(bg->flashing_mode, false);
+ msleep(200);
+ gb_greybus_init(bg);
+ gb_beagleplay_start_svc(bg);
+ return FW_UPLOAD_ERR_FW_INVALID;
+ }
+
+ ret = cc1352_bootloader_erase(bg);
+ if (ret < 0)
+ return dev_err_probe(&bg->sd->dev, FW_UPLOAD_ERR_HW_ERROR,
+ "Failed to erase");
+
+ bg->fwl_reset_addr = true;
+
+ return FW_UPLOAD_ERR_NONE;
+}
+
+static void cc1352_cleanup(struct fw_upload *fw_upload)
+{
+ struct gb_beagleplay *bg = fw_upload->dd_handle;
+
+ WRITE_ONCE(bg->flashing_mode, false);
+}
+
+static enum fw_upload_err cc1352_write(struct fw_upload *fw_upload,
+ const u8 *data, u32 offset, u32 size,
+ u32 *written)
+{
+ int ret;
+ size_t empty_bytes;
+ struct gb_beagleplay *bg = fw_upload->dd_handle;
+
+ /* Skip 0xff packets. Significant performance improvement */
+ empty_bytes = cc1352_bootloader_empty_pkt(data + offset, size);
+ if (empty_bytes >= CC1352_BOOTLOADER_PKT_MAX_SIZE) {
+ bg->fwl_reset_addr = true;
+ *written = empty_bytes;
+ return FW_UPLOAD_ERR_NONE;
+ }
+
+ if (bg->fwl_reset_addr) {
+ ret = cc1352_bootloader_download(bg, size, offset);
+ if (ret < 0)
+ return dev_err_probe(&bg->sd->dev,
+ FW_UPLOAD_ERR_HW_ERROR,
+ "Failed to send download cmd");
+
+ bg->fwl_reset_addr = false;
+ }
+
+ ret = cc1352_bootloader_send_data(bg, data + offset, size);
+ if (ret < 0)
+ return dev_err_probe(&bg->sd->dev, FW_UPLOAD_ERR_HW_ERROR,
+ "Failed to flash firmware");
+ *written = ret;
+
+ return FW_UPLOAD_ERR_NONE;
+}
+
+static enum fw_upload_err cc1352_poll_complete(struct fw_upload *fw_upload)
+{
+ u32 curr_crc32;
+ struct gb_beagleplay *bg = fw_upload->dd_handle;
+
+ if (cc1352_bootloader_crc32(bg, &curr_crc32) < 0)
+ return dev_err_probe(&bg->sd->dev, FW_UPLOAD_ERR_HW_ERROR,
+ "Failed to fetch crc32");
+
+ if (bg->fwl_crc32 != curr_crc32)
+ return dev_err_probe(&bg->sd->dev, FW_UPLOAD_ERR_FW_INVALID,
+ "Invalid CRC32");
+
+ if (cc1352_bootloader_reset(bg) < 0)
+ return dev_err_probe(&bg->sd->dev, FW_UPLOAD_ERR_HW_ERROR,
+ "Failed to reset");
+
+ dev_info(&bg->sd->dev, "CC1352 Flashing Successful");
+ WRITE_ONCE(bg->flashing_mode, false);
+ msleep(200);
+
+ if (gb_greybus_init(bg) < 0)
+ return dev_err_probe(&bg->sd->dev, FW_UPLOAD_ERR_RW_ERROR,
+ "Failed to initialize greybus");
+
+ gb_beagleplay_start_svc(bg);
+
+ return FW_UPLOAD_ERR_NONE;
+}
+
+static void cc1352_cancel(struct fw_upload *fw_upload)
+{
+ struct gb_beagleplay *bg = fw_upload->dd_handle;
+
+ dev_info(&bg->sd->dev, "CC1352 Bootloader Cancel");
+
+ cc1352_bootloader_reset(bg);
+}
+
static void gb_serdev_deinit(struct gb_beagleplay *bg)
{
serdev_device_close(bg->sd);
@@ -463,6 +1029,65 @@ static int gb_serdev_init(struct gb_beagleplay *bg)
return 0;
}
+static const struct fw_upload_ops cc1352_bootloader_ops = {
+ .prepare = cc1352_prepare,
+ .write = cc1352_write,
+ .poll_complete = cc1352_poll_complete,
+ .cancel = cc1352_cancel,
+ .cleanup = cc1352_cleanup
+};
+
+static int gb_fw_init(struct gb_beagleplay *bg)
+{
+ int ret;
+ struct fw_upload *fwl;
+ struct gpio_desc *desc;
+
+ bg->fwl = NULL;
+ bg->bootloader_backdoor_gpio = NULL;
+ bg->rst_gpio = NULL;
+ bg->flashing_mode = false;
+ bg->fwl_cmd_response = 0;
+ bg->fwl_ack = 0;
+ init_completion(&bg->fwl_ack_com);
+ init_completion(&bg->fwl_cmd_response_com);
+
+ desc = devm_gpiod_get(&bg->sd->dev, "bootloader-backdoor", GPIOD_IN);
+ if (IS_ERR(desc))
+ return PTR_ERR(desc);
+ bg->bootloader_backdoor_gpio = desc;
+
+ desc = devm_gpiod_get(&bg->sd->dev, "reset", GPIOD_IN);
+ if (IS_ERR(desc)) {
+ ret = PTR_ERR(desc);
+ goto free_boot;
+ }
+ bg->rst_gpio = desc;
+
+ fwl = firmware_upload_register(THIS_MODULE, &bg->sd->dev, "cc1352p7",
+ &cc1352_bootloader_ops, bg);
+ if (IS_ERR(fwl)) {
+ ret = PTR_ERR(fwl);
+ goto free_reset;
+ }
+ bg->fwl = fwl;
+
+ return 0;
+
+free_reset:
+ devm_gpiod_put(&bg->sd->dev, bg->rst_gpio);
+ bg->rst_gpio = NULL;
+free_boot:
+ devm_gpiod_put(&bg->sd->dev, bg->bootloader_backdoor_gpio);
+ bg->bootloader_backdoor_gpio = NULL;
+ return ret;
+}
+
+static void gb_fw_deinit(struct gb_beagleplay *bg)
+{
+ firmware_upload_unregister(bg->fwl);
+}
+
static int gb_beagleplay_probe(struct serdev_device *serdev)
{
int ret = 0;
@@ -481,14 +1106,20 @@ static int gb_beagleplay_probe(struct serdev_device *serdev)
if (ret)
goto free_serdev;
- ret = gb_greybus_init(bg);
+ ret = gb_fw_init(bg);
if (ret)
goto free_hdlc;
+ ret = gb_greybus_init(bg);
+ if (ret)
+ goto free_fw;
+
gb_beagleplay_start_svc(bg);
return 0;
+free_fw:
+ gb_fw_deinit(bg);
free_hdlc:
hdlc_deinit(bg);
free_serdev:
@@ -500,6 +1131,7 @@ static void gb_beagleplay_remove(struct serdev_device *serdev)
{
struct gb_beagleplay *bg = serdev_device_get_drvdata(serdev);
+ gb_fw_deinit(bg);
gb_greybus_deinit(bg);
gb_beagleplay_stop_svc(bg);
hdlc_deinit(bg);
diff --git a/drivers/hid/uhid.c b/drivers/hid/uhid.c
index a54c7995b9be..21a70420151e 100644
--- a/drivers/hid/uhid.c
+++ b/drivers/hid/uhid.c
@@ -803,7 +803,6 @@ static const struct file_operations uhid_fops = {
.read = uhid_char_read,
.write = uhid_char_write,
.poll = uhid_char_poll,
- .llseek = no_llseek,
};
static struct miscdevice uhid_misc = {
diff --git a/drivers/hv/hv_common.c b/drivers/hv/hv_common.c
index 9c452bfbd571..7a35c82976e0 100644
--- a/drivers/hv/hv_common.c
+++ b/drivers/hv/hv_common.c
@@ -207,13 +207,13 @@ static int hv_die_panic_notify_crash(struct notifier_block *self,
* buffer and call into Hyper-V to transfer the data.
*/
static void hv_kmsg_dump(struct kmsg_dumper *dumper,
- enum kmsg_dump_reason reason)
+ struct kmsg_dump_detail *detail)
{
struct kmsg_dump_iter iter;
size_t bytes_written;
/* We are only interested in panics. */
- if (reason != KMSG_DUMP_PANIC || !sysctl_record_panic_msg)
+ if (detail->reason != KMSG_DUMP_PANIC || !sysctl_record_panic_msg)
return;
/*
diff --git a/drivers/hwmon/asus_atk0110.c b/drivers/hwmon/asus_atk0110.c
index 3751c1e3eddd..1dc7e24fe4c5 100644
--- a/drivers/hwmon/asus_atk0110.c
+++ b/drivers/hwmon/asus_atk0110.c
@@ -783,7 +783,6 @@ static const struct file_operations atk_debugfs_ggrp_fops = {
.read = atk_debugfs_ggrp_read,
.open = atk_debugfs_ggrp_open,
.release = atk_debugfs_ggrp_release,
- .llseek = no_llseek,
};
static void atk_debugfs_init(struct atk_data *data)
diff --git a/drivers/hwmon/fschmd.c b/drivers/hwmon/fschmd.c
index 1811f84d835e..a303959879ef 100644
--- a/drivers/hwmon/fschmd.c
+++ b/drivers/hwmon/fschmd.c
@@ -948,7 +948,6 @@ static long watchdog_ioctl(struct file *filp, unsigned int cmd,
static const struct file_operations watchdog_fops = {
.owner = THIS_MODULE,
- .llseek = no_llseek,
.open = watchdog_open,
.release = watchdog_release,
.write = watchdog_write,
diff --git a/drivers/hwmon/w83793.c b/drivers/hwmon/w83793.c
index 0acf6bd0227f..67728f60333f 100644
--- a/drivers/hwmon/w83793.c
+++ b/drivers/hwmon/w83793.c
@@ -1451,7 +1451,6 @@ static long watchdog_ioctl(struct file *filp, unsigned int cmd,
static const struct file_operations watchdog_fops = {
.owner = THIS_MODULE,
- .llseek = no_llseek,
.open = watchdog_open,
.release = watchdog_close,
.write = watchdog_write,
diff --git a/drivers/hwtracing/coresight/coresight-core.c b/drivers/hwtracing/coresight/coresight-core.c
index 9fc6f6b863e0..ea38ecf26fcb 100644
--- a/drivers/hwtracing/coresight/coresight-core.c
+++ b/drivers/hwtracing/coresight/coresight-core.c
@@ -487,23 +487,25 @@ struct coresight_device *coresight_get_sink(struct list_head *path)
return csdev;
}
+u32 coresight_get_sink_id(struct coresight_device *csdev)
+{
+ if (!csdev->ea)
+ return 0;
+
+ /*
+ * See function etm_perf_add_symlink_sink() to know where
+ * this comes from.
+ */
+ return (u32) (unsigned long) csdev->ea->var;
+}
+
static int coresight_sink_by_id(struct device *dev, const void *data)
{
struct coresight_device *csdev = to_coresight_device(dev);
- unsigned long hash;
if (csdev->type == CORESIGHT_DEV_TYPE_SINK ||
- csdev->type == CORESIGHT_DEV_TYPE_LINKSINK) {
-
- if (!csdev->ea)
- return 0;
- /*
- * See function etm_perf_add_symlink_sink() to know where
- * this comes from.
- */
- hash = (unsigned long)csdev->ea->var;
-
- if ((u32)hash == *(u32 *)data)
+ csdev->type == CORESIGHT_DEV_TYPE_LINKSINK) {
+ if (coresight_get_sink_id(csdev) == *(u32 *)data)
return 1;
}
@@ -902,6 +904,7 @@ static void coresight_device_release(struct device *dev)
struct coresight_device *csdev = to_coresight_device(dev);
fwnode_handle_put(csdev->dev.fwnode);
+ free_percpu(csdev->perf_sink_id_map.cpu_map);
kfree(csdev);
}
@@ -1159,6 +1162,16 @@ struct coresight_device *coresight_register(struct coresight_desc *desc)
csdev->dev.fwnode = fwnode_handle_get(dev_fwnode(desc->dev));
dev_set_name(&csdev->dev, "%s", desc->name);
+ if (csdev->type == CORESIGHT_DEV_TYPE_SINK ||
+ csdev->type == CORESIGHT_DEV_TYPE_LINKSINK) {
+ spin_lock_init(&csdev->perf_sink_id_map.lock);
+ csdev->perf_sink_id_map.cpu_map = alloc_percpu(atomic_t);
+ if (!csdev->perf_sink_id_map.cpu_map) {
+ kfree(csdev);
+ ret = -ENOMEM;
+ goto err_out;
+ }
+ }
/*
* Make sure the device registration and the connection fixup
* are synchronised, so that we don't see uninitialised devices
diff --git a/drivers/hwtracing/coresight/coresight-cti-platform.c b/drivers/hwtracing/coresight/coresight-cti-platform.c
index ccef04f27f12..d0ae10bf6128 100644
--- a/drivers/hwtracing/coresight/coresight-cti-platform.c
+++ b/drivers/hwtracing/coresight/coresight-cti-platform.c
@@ -416,20 +416,16 @@ static int cti_plat_create_impdef_connections(struct device *dev,
struct cti_drvdata *drvdata)
{
int rc = 0;
- struct fwnode_handle *fwnode = dev_fwnode(dev);
- struct fwnode_handle *child = NULL;
- if (IS_ERR_OR_NULL(fwnode))
+ if (IS_ERR_OR_NULL(dev_fwnode(dev)))
return -EINVAL;
- fwnode_for_each_child_node(fwnode, child) {
+ device_for_each_child_node_scoped(dev, child) {
if (cti_plat_node_name_eq(child, CTI_DT_CONNS))
- rc = cti_plat_create_connection(dev, drvdata,
- child);
+ rc = cti_plat_create_connection(dev, drvdata, child);
if (rc != 0)
break;
}
- fwnode_handle_put(child);
return rc;
}
diff --git a/drivers/hwtracing/coresight/coresight-dummy.c b/drivers/hwtracing/coresight/coresight-dummy.c
index ac70c0b491be..bb85fa663ffc 100644
--- a/drivers/hwtracing/coresight/coresight-dummy.c
+++ b/drivers/hwtracing/coresight/coresight-dummy.c
@@ -21,8 +21,12 @@ DEFINE_CORESIGHT_DEVLIST(source_devs, "dummy_source");
DEFINE_CORESIGHT_DEVLIST(sink_devs, "dummy_sink");
static int dummy_source_enable(struct coresight_device *csdev,
- struct perf_event *event, enum cs_mode mode)
+ struct perf_event *event, enum cs_mode mode,
+ __maybe_unused struct coresight_trace_id_map *id_map)
{
+ if (!coresight_take_mode(csdev, mode))
+ return -EBUSY;
+
dev_dbg(csdev->dev.parent, "Dummy source enabled\n");
return 0;
@@ -31,6 +35,7 @@ static int dummy_source_enable(struct coresight_device *csdev,
static void dummy_source_disable(struct coresight_device *csdev,
struct perf_event *event)
{
+ coresight_set_mode(csdev, CS_MODE_DISABLED);
dev_dbg(csdev->dev.parent, "Dummy source disabled\n");
}
diff --git a/drivers/hwtracing/coresight/coresight-etb10.c b/drivers/hwtracing/coresight/coresight-etb10.c
index 7edd3f1d0d46..aea9ac9c4bd0 100644
--- a/drivers/hwtracing/coresight/coresight-etb10.c
+++ b/drivers/hwtracing/coresight/coresight-etb10.c
@@ -652,7 +652,6 @@ static const struct file_operations etb_fops = {
.open = etb_open,
.read = etb_read,
.release = etb_release,
- .llseek = no_llseek,
};
static struct attribute *coresight_etb_mgmt_attrs[] = {
diff --git a/drivers/hwtracing/coresight/coresight-etm-perf.c b/drivers/hwtracing/coresight/coresight-etm-perf.c
index c0c60e6a1703..ad6a8f4b70b6 100644
--- a/drivers/hwtracing/coresight/coresight-etm-perf.c
+++ b/drivers/hwtracing/coresight/coresight-etm-perf.c
@@ -229,15 +229,24 @@ static void free_event_data(struct work_struct *work)
struct list_head **ppath;
ppath = etm_event_cpu_path_ptr(event_data, cpu);
- if (!(IS_ERR_OR_NULL(*ppath)))
+ if (!(IS_ERR_OR_NULL(*ppath))) {
+ struct coresight_device *sink = coresight_get_sink(*ppath);
+
+ /*
+ * Mark perf event as done for trace id allocator, but don't call
+ * coresight_trace_id_put_cpu_id_map() on individual IDs. Perf sessions
+ * never free trace IDs to ensure that the ID associated with a CPU
+ * cannot change during their and other's concurrent sessions. Instead,
+ * a refcount is used so that the last event to call
+ * coresight_trace_id_perf_stop() frees all IDs.
+ */
+ coresight_trace_id_perf_stop(&sink->perf_sink_id_map);
+
coresight_release_path(*ppath);
+ }
*ppath = NULL;
- coresight_trace_id_put_cpu_id(cpu);
}
- /* mark perf event as done for trace id allocator */
- coresight_trace_id_perf_stop();
-
free_percpu(event_data->path);
kfree(event_data);
}
@@ -325,9 +334,6 @@ static void *etm_setup_aux(struct perf_event *event, void **pages,
sink = user_sink = coresight_get_sink_by_id(id);
}
- /* tell the trace ID allocator that a perf event is starting up */
- coresight_trace_id_perf_start();
-
/* check if user wants a coresight configuration selected */
cfg_hash = (u32)((event->attr.config2 & GENMASK_ULL(63, 32)) >> 32);
if (cfg_hash) {
@@ -401,13 +407,14 @@ static void *etm_setup_aux(struct perf_event *event, void **pages,
}
/* ensure we can allocate a trace ID for this CPU */
- trace_id = coresight_trace_id_get_cpu_id(cpu);
+ trace_id = coresight_trace_id_get_cpu_id_map(cpu, &sink->perf_sink_id_map);
if (!IS_VALID_CS_TRACE_ID(trace_id)) {
cpumask_clear_cpu(cpu, mask);
coresight_release_path(path);
continue;
}
+ coresight_trace_id_perf_start(&sink->perf_sink_id_map);
*etm_event_cpu_path_ptr(event_data, cpu) = path;
}
@@ -453,6 +460,7 @@ static void etm_event_start(struct perf_event *event, int flags)
struct coresight_device *sink, *csdev = per_cpu(csdev_src, cpu);
struct list_head *path;
u64 hw_id;
+ u8 trace_id;
if (!csdev)
goto fail;
@@ -495,7 +503,8 @@ static void etm_event_start(struct perf_event *event, int flags)
goto fail_end_stop;
/* Finally enable the tracer */
- if (source_ops(csdev)->enable(csdev, event, CS_MODE_PERF))
+ if (source_ops(csdev)->enable(csdev, event, CS_MODE_PERF,
+ &sink->perf_sink_id_map))
goto fail_disable_path;
/*
@@ -504,10 +513,16 @@ static void etm_event_start(struct perf_event *event, int flags)
*/
if (!cpumask_test_cpu(cpu, &event_data->aux_hwid_done)) {
cpumask_set_cpu(cpu, &event_data->aux_hwid_done);
- hw_id = FIELD_PREP(CS_AUX_HW_ID_VERSION_MASK,
- CS_AUX_HW_ID_CURR_VERSION);
- hw_id |= FIELD_PREP(CS_AUX_HW_ID_TRACE_ID_MASK,
- coresight_trace_id_read_cpu_id(cpu));
+
+ trace_id = coresight_trace_id_read_cpu_id_map(cpu, &sink->perf_sink_id_map);
+
+ hw_id = FIELD_PREP(CS_AUX_HW_ID_MAJOR_VERSION_MASK,
+ CS_AUX_HW_ID_MAJOR_VERSION);
+ hw_id |= FIELD_PREP(CS_AUX_HW_ID_MINOR_VERSION_MASK,
+ CS_AUX_HW_ID_MINOR_VERSION);
+ hw_id |= FIELD_PREP(CS_AUX_HW_ID_TRACE_ID_MASK, trace_id);
+ hw_id |= FIELD_PREP(CS_AUX_HW_ID_SINK_ID_MASK, coresight_get_sink_id(sink));
+
perf_report_aux_output_id(event, hw_id);
}
diff --git a/drivers/hwtracing/coresight/coresight-etm-perf.h b/drivers/hwtracing/coresight/coresight-etm-perf.h
index bebbadee2ceb..744531158d6b 100644
--- a/drivers/hwtracing/coresight/coresight-etm-perf.h
+++ b/drivers/hwtracing/coresight/coresight-etm-perf.h
@@ -62,7 +62,6 @@ struct etm_event_data {
struct list_head * __percpu *path;
};
-#if IS_ENABLED(CONFIG_CORESIGHT)
int etm_perf_symlink(struct coresight_device *csdev, bool link);
int etm_perf_add_symlink_sink(struct coresight_device *csdev);
void etm_perf_del_symlink_sink(struct coresight_device *csdev);
@@ -77,23 +76,6 @@ static inline void *etm_perf_sink_config(struct perf_output_handle *handle)
int etm_perf_add_symlink_cscfg(struct device *dev,
struct cscfg_config_desc *config_desc);
void etm_perf_del_symlink_cscfg(struct cscfg_config_desc *config_desc);
-#else
-static inline int etm_perf_symlink(struct coresight_device *csdev, bool link)
-{ return -EINVAL; }
-int etm_perf_add_symlink_sink(struct coresight_device *csdev)
-{ return -EINVAL; }
-void etm_perf_del_symlink_sink(struct coresight_device *csdev) {}
-static inline void *etm_perf_sink_config(struct perf_output_handle *handle)
-{
- return NULL;
-}
-int etm_perf_add_symlink_cscfg(struct device *dev,
- struct cscfg_config_desc *config_desc)
-{ return -EINVAL; }
-void etm_perf_del_symlink_cscfg(struct cscfg_config_desc *config_desc) {}
-
-#endif /* CONFIG_CORESIGHT */
-
int __init etm_perf_init(void);
void etm_perf_exit(void);
diff --git a/drivers/hwtracing/coresight/coresight-etm3x-core.c b/drivers/hwtracing/coresight/coresight-etm3x-core.c
index 8b362605d242..c103f4c70f5d 100644
--- a/drivers/hwtracing/coresight/coresight-etm3x-core.c
+++ b/drivers/hwtracing/coresight/coresight-etm3x-core.c
@@ -481,7 +481,8 @@ void etm_release_trace_id(struct etm_drvdata *drvdata)
}
static int etm_enable_perf(struct coresight_device *csdev,
- struct perf_event *event)
+ struct perf_event *event,
+ struct coresight_trace_id_map *id_map)
{
struct etm_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent);
int trace_id;
@@ -500,7 +501,7 @@ static int etm_enable_perf(struct coresight_device *csdev,
* with perf locks - we know the ID cannot change until perf shuts down
* the session
*/
- trace_id = coresight_trace_id_read_cpu_id(drvdata->cpu);
+ trace_id = coresight_trace_id_read_cpu_id_map(drvdata->cpu, id_map);
if (!IS_VALID_CS_TRACE_ID(trace_id)) {
dev_err(&drvdata->csdev->dev, "Failed to set trace ID for %s on CPU%d\n",
dev_name(&drvdata->csdev->dev), drvdata->cpu);
@@ -553,7 +554,7 @@ unlock_enable_sysfs:
}
static int etm_enable(struct coresight_device *csdev, struct perf_event *event,
- enum cs_mode mode)
+ enum cs_mode mode, struct coresight_trace_id_map *id_map)
{
int ret;
struct etm_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent);
@@ -568,7 +569,7 @@ static int etm_enable(struct coresight_device *csdev, struct perf_event *event,
ret = etm_enable_sysfs(csdev);
break;
case CS_MODE_PERF:
- ret = etm_enable_perf(csdev, event);
+ ret = etm_enable_perf(csdev, event, id_map);
break;
default:
ret = -EINVAL;
diff --git a/drivers/hwtracing/coresight/coresight-etm4x-core.c b/drivers/hwtracing/coresight/coresight-etm4x-core.c
index bf01f01964cf..66d44a404ad0 100644
--- a/drivers/hwtracing/coresight/coresight-etm4x-core.c
+++ b/drivers/hwtracing/coresight/coresight-etm4x-core.c
@@ -752,7 +752,8 @@ out:
}
static int etm4_enable_perf(struct coresight_device *csdev,
- struct perf_event *event)
+ struct perf_event *event,
+ struct coresight_trace_id_map *id_map)
{
int ret = 0, trace_id;
struct etmv4_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent);
@@ -775,7 +776,7 @@ static int etm4_enable_perf(struct coresight_device *csdev,
* with perf locks - we know the ID cannot change until perf shuts down
* the session
*/
- trace_id = coresight_trace_id_read_cpu_id(drvdata->cpu);
+ trace_id = coresight_trace_id_read_cpu_id_map(drvdata->cpu, id_map);
if (!IS_VALID_CS_TRACE_ID(trace_id)) {
dev_err(&drvdata->csdev->dev, "Failed to set trace ID for %s on CPU%d\n",
dev_name(&drvdata->csdev->dev), drvdata->cpu);
@@ -837,7 +838,7 @@ unlock_sysfs_enable:
}
static int etm4_enable(struct coresight_device *csdev, struct perf_event *event,
- enum cs_mode mode)
+ enum cs_mode mode, struct coresight_trace_id_map *id_map)
{
int ret;
@@ -851,7 +852,7 @@ static int etm4_enable(struct coresight_device *csdev, struct perf_event *event,
ret = etm4_enable_sysfs(csdev);
break;
case CS_MODE_PERF:
- ret = etm4_enable_perf(csdev, event);
+ ret = etm4_enable_perf(csdev, event, id_map);
break;
default:
ret = -EINVAL;
diff --git a/drivers/hwtracing/coresight/coresight-priv.h b/drivers/hwtracing/coresight/coresight-priv.h
index 61a46d3bdcc8..05f891ca6b5c 100644
--- a/drivers/hwtracing/coresight/coresight-priv.h
+++ b/drivers/hwtracing/coresight/coresight-priv.h
@@ -148,6 +148,7 @@ int coresight_make_links(struct coresight_device *orig,
struct coresight_device *target);
void coresight_remove_links(struct coresight_device *orig,
struct coresight_connection *conn);
+u32 coresight_get_sink_id(struct coresight_device *csdev);
#if IS_ENABLED(CONFIG_CORESIGHT_SOURCE_ETM3X)
extern int etm_readl_cp14(u32 off, unsigned int *val);
diff --git a/drivers/hwtracing/coresight/coresight-stm.c b/drivers/hwtracing/coresight/coresight-stm.c
index 117dbb484543..cb3e04755c99 100644
--- a/drivers/hwtracing/coresight/coresight-stm.c
+++ b/drivers/hwtracing/coresight/coresight-stm.c
@@ -194,7 +194,8 @@ static void stm_enable_hw(struct stm_drvdata *drvdata)
}
static int stm_enable(struct coresight_device *csdev, struct perf_event *event,
- enum cs_mode mode)
+ enum cs_mode mode,
+ __maybe_unused struct coresight_trace_id_map *trace_id)
{
struct stm_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent);
diff --git a/drivers/hwtracing/coresight/coresight-sysfs.c b/drivers/hwtracing/coresight/coresight-sysfs.c
index 1e67cc7758d7..a01c9e54e2ed 100644
--- a/drivers/hwtracing/coresight/coresight-sysfs.c
+++ b/drivers/hwtracing/coresight/coresight-sysfs.c
@@ -9,6 +9,7 @@
#include <linux/kernel.h>
#include "coresight-priv.h"
+#include "coresight-trace-id.h"
/*
* Use IDR to map the hash of the source's device name
@@ -63,7 +64,7 @@ static int coresight_enable_source_sysfs(struct coresight_device *csdev,
*/
lockdep_assert_held(&coresight_mutex);
if (coresight_get_mode(csdev) != CS_MODE_SYSFS) {
- ret = source_ops(csdev)->enable(csdev, data, mode);
+ ret = source_ops(csdev)->enable(csdev, data, mode, NULL);
if (ret)
return ret;
}
diff --git a/drivers/hwtracing/coresight/coresight-tmc-core.c b/drivers/hwtracing/coresight/coresight-tmc-core.c
index b54562f392f3..3a482fd2cb22 100644
--- a/drivers/hwtracing/coresight/coresight-tmc-core.c
+++ b/drivers/hwtracing/coresight/coresight-tmc-core.c
@@ -220,7 +220,6 @@ static const struct file_operations tmc_fops = {
.open = tmc_open,
.read = tmc_read,
.release = tmc_release,
- .llseek = no_llseek,
};
static enum tmc_mem_intf_width tmc_get_memwidth(u32 devid)
diff --git a/drivers/hwtracing/coresight/coresight-tmc-etr.c b/drivers/hwtracing/coresight/coresight-tmc-etr.c
index e75428fa1592..a48bb85d0e7f 100644
--- a/drivers/hwtracing/coresight/coresight-tmc-etr.c
+++ b/drivers/hwtracing/coresight/coresight-tmc-etr.c
@@ -36,7 +36,8 @@ struct etr_buf_hw {
* etr_perf_buffer - Perf buffer used for ETR
* @drvdata - The ETR drvdaga this buffer has been allocated for.
* @etr_buf - Actual buffer used by the ETR
- * @pid - The PID this etr_perf_buffer belongs to.
+ * @pid - The PID of the session owner that etr_perf_buffer
+ * belongs to.
* @snaphost - Perf session mode
* @nr_pages - Number of pages in the ring buffer.
* @pages - Array of Pages in the ring buffer.
@@ -261,6 +262,7 @@ void tmc_free_sg_table(struct tmc_sg_table *sg_table)
{
tmc_free_table_pages(sg_table);
tmc_free_data_pages(sg_table);
+ kfree(sg_table);
}
EXPORT_SYMBOL_GPL(tmc_free_sg_table);
@@ -342,7 +344,6 @@ struct tmc_sg_table *tmc_alloc_sg_table(struct device *dev,
rc = tmc_alloc_table_pages(sg_table);
if (rc) {
tmc_free_sg_table(sg_table);
- kfree(sg_table);
return ERR_PTR(rc);
}
@@ -1662,7 +1663,7 @@ static int tmc_enable_etr_sink_perf(struct coresight_device *csdev, void *data)
goto unlock_out;
}
- /* Get a handle on the pid of the process to monitor */
+ /* Get a handle on the pid of the session owner */
pid = etr_perf->pid;
/* Do not proceed if this device is associated with another session */
diff --git a/drivers/hwtracing/coresight/coresight-tmc.h b/drivers/hwtracing/coresight/coresight-tmc.h
index c77763b49de0..2671926be62a 100644
--- a/drivers/hwtracing/coresight/coresight-tmc.h
+++ b/drivers/hwtracing/coresight/coresight-tmc.h
@@ -171,8 +171,9 @@ struct etr_buf {
* @csdev: component vitals needed by the framework.
* @miscdev: specifics to handle "/dev/xyz.tmc" entry.
* @spinlock: only one at a time pls.
- * @pid: Process ID of the process being monitored by the session
- * that is using this component.
+ * @pid: Process ID of the process that owns the session that is using
+ * this component. For example this would be the pid of the Perf
+ * process.
* @buf: Snapshot of the trace data for ETF/ETB.
* @etr_buf: details of buffer used in TMC-ETR
* @len: size of the available trace for ETF/ETB.
diff --git a/drivers/hwtracing/coresight/coresight-tpdm.c b/drivers/hwtracing/coresight/coresight-tpdm.c
index 0726f8842552..b7d99e91ab84 100644
--- a/drivers/hwtracing/coresight/coresight-tpdm.c
+++ b/drivers/hwtracing/coresight/coresight-tpdm.c
@@ -439,7 +439,8 @@ static void __tpdm_enable(struct tpdm_drvdata *drvdata)
}
static int tpdm_enable(struct coresight_device *csdev, struct perf_event *event,
- enum cs_mode mode)
+ enum cs_mode mode,
+ __maybe_unused struct coresight_trace_id_map *id_map)
{
struct tpdm_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent);
@@ -449,6 +450,11 @@ static int tpdm_enable(struct coresight_device *csdev, struct perf_event *event,
return -EBUSY;
}
+ if (!coresight_take_mode(csdev, mode)) {
+ spin_unlock(&drvdata->spinlock);
+ return -EBUSY;
+ }
+
__tpdm_enable(drvdata);
drvdata->enable = true;
spin_unlock(&drvdata->spinlock);
@@ -506,6 +512,7 @@ static void tpdm_disable(struct coresight_device *csdev,
}
__tpdm_disable(drvdata);
+ coresight_set_mode(csdev, CS_MODE_DISABLED);
drvdata->enable = false;
spin_unlock(&drvdata->spinlock);
diff --git a/drivers/hwtracing/coresight/coresight-trace-id.c b/drivers/hwtracing/coresight/coresight-trace-id.c
index af5b4ef59cea..d98e12cb30ec 100644
--- a/drivers/hwtracing/coresight/coresight-trace-id.c
+++ b/drivers/hwtracing/coresight/coresight-trace-id.c
@@ -3,6 +3,7 @@
* Copyright (c) 2022, Linaro Limited, All rights reserved.
* Author: Mike Leach <mike.leach@linaro.org>
*/
+#include <linux/coresight.h>
#include <linux/coresight-pmu.h>
#include <linux/cpumask.h>
#include <linux/kernel.h>
@@ -11,18 +12,12 @@
#include "coresight-trace-id.h"
-/* Default trace ID map. Used on systems that don't require per sink mappings */
-static struct coresight_trace_id_map id_map_default;
-
-/* maintain a record of the mapping of IDs and pending releases per cpu */
-static DEFINE_PER_CPU(atomic_t, cpu_id) = ATOMIC_INIT(0);
-static cpumask_t cpu_id_release_pending;
-
-/* perf session active counter */
-static atomic_t perf_cs_etm_session_active = ATOMIC_INIT(0);
-
-/* lock to protect id_map and cpu data */
-static DEFINE_SPINLOCK(id_map_lock);
+/* Default trace ID map. Used in sysfs mode and for system sources */
+static DEFINE_PER_CPU(atomic_t, id_map_default_cpu_ids) = ATOMIC_INIT(0);
+static struct coresight_trace_id_map id_map_default = {
+ .cpu_map = &id_map_default_cpu_ids,
+ .lock = __SPIN_LOCK_UNLOCKED(id_map_default.lock)
+};
/* #define TRACE_ID_DEBUG 1 */
#if defined(TRACE_ID_DEBUG) || defined(CONFIG_COMPILE_TEST)
@@ -32,7 +27,6 @@ static void coresight_trace_id_dump_table(struct coresight_trace_id_map *id_map,
{
pr_debug("%s id_map::\n", func_name);
pr_debug("Used = %*pb\n", CORESIGHT_TRACE_IDS_MAX, id_map->used_ids);
- pr_debug("Pend = %*pb\n", CORESIGHT_TRACE_IDS_MAX, id_map->pend_rel_ids);
}
#define DUMP_ID_MAP(map) coresight_trace_id_dump_table(map, __func__)
#define DUMP_ID_CPU(cpu, id) pr_debug("%s called; cpu=%d, id=%d\n", __func__, cpu, id)
@@ -46,9 +40,9 @@ static void coresight_trace_id_dump_table(struct coresight_trace_id_map *id_map,
#endif
/* unlocked read of current trace ID value for given CPU */
-static int _coresight_trace_id_read_cpu_id(int cpu)
+static int _coresight_trace_id_read_cpu_id(int cpu, struct coresight_trace_id_map *id_map)
{
- return atomic_read(&per_cpu(cpu_id, cpu));
+ return atomic_read(per_cpu_ptr(id_map->cpu_map, cpu));
}
/* look for next available odd ID, return 0 if none found */
@@ -119,49 +113,33 @@ static void coresight_trace_id_free(int id, struct coresight_trace_id_map *id_ma
clear_bit(id, id_map->used_ids);
}
-static void coresight_trace_id_set_pend_rel(int id, struct coresight_trace_id_map *id_map)
-{
- if (WARN(!IS_VALID_CS_TRACE_ID(id), "Invalid Trace ID %d\n", id))
- return;
- set_bit(id, id_map->pend_rel_ids);
-}
-
/*
- * release all pending IDs for all current maps & clear CPU associations
- *
- * This currently operates on the default id map, but may be extended to
- * operate on all registered id maps if per sink id maps are used.
+ * Release all IDs and clear CPU associations.
*/
-static void coresight_trace_id_release_all_pending(void)
+static void coresight_trace_id_release_all(struct coresight_trace_id_map *id_map)
{
- struct coresight_trace_id_map *id_map = &id_map_default;
unsigned long flags;
- int cpu, bit;
+ int cpu;
- spin_lock_irqsave(&id_map_lock, flags);
- for_each_set_bit(bit, id_map->pend_rel_ids, CORESIGHT_TRACE_ID_RES_TOP) {
- clear_bit(bit, id_map->used_ids);
- clear_bit(bit, id_map->pend_rel_ids);
- }
- for_each_cpu(cpu, &cpu_id_release_pending) {
- atomic_set(&per_cpu(cpu_id, cpu), 0);
- cpumask_clear_cpu(cpu, &cpu_id_release_pending);
- }
- spin_unlock_irqrestore(&id_map_lock, flags);
+ spin_lock_irqsave(&id_map->lock, flags);
+ bitmap_zero(id_map->used_ids, CORESIGHT_TRACE_IDS_MAX);
+ for_each_possible_cpu(cpu)
+ atomic_set(per_cpu_ptr(id_map->cpu_map, cpu), 0);
+ spin_unlock_irqrestore(&id_map->lock, flags);
DUMP_ID_MAP(id_map);
}
-static int coresight_trace_id_map_get_cpu_id(int cpu, struct coresight_trace_id_map *id_map)
+static int _coresight_trace_id_get_cpu_id(int cpu, struct coresight_trace_id_map *id_map)
{
unsigned long flags;
int id;
- spin_lock_irqsave(&id_map_lock, flags);
+ spin_lock_irqsave(&id_map->lock, flags);
/* check for existing allocation for this CPU */
- id = _coresight_trace_id_read_cpu_id(cpu);
+ id = _coresight_trace_id_read_cpu_id(cpu, id_map);
if (id)
- goto get_cpu_id_clr_pend;
+ goto get_cpu_id_out_unlock;
/*
* Find a new ID.
@@ -180,44 +158,32 @@ static int coresight_trace_id_map_get_cpu_id(int cpu, struct coresight_trace_id_
goto get_cpu_id_out_unlock;
/* allocate the new id to the cpu */
- atomic_set(&per_cpu(cpu_id, cpu), id);
-
-get_cpu_id_clr_pend:
- /* we are (re)using this ID - so ensure it is not marked for release */
- cpumask_clear_cpu(cpu, &cpu_id_release_pending);
- clear_bit(id, id_map->pend_rel_ids);
+ atomic_set(per_cpu_ptr(id_map->cpu_map, cpu), id);
get_cpu_id_out_unlock:
- spin_unlock_irqrestore(&id_map_lock, flags);
+ spin_unlock_irqrestore(&id_map->lock, flags);
DUMP_ID_CPU(cpu, id);
DUMP_ID_MAP(id_map);
return id;
}
-static void coresight_trace_id_map_put_cpu_id(int cpu, struct coresight_trace_id_map *id_map)
+static void _coresight_trace_id_put_cpu_id(int cpu, struct coresight_trace_id_map *id_map)
{
unsigned long flags;
int id;
/* check for existing allocation for this CPU */
- id = _coresight_trace_id_read_cpu_id(cpu);
+ id = _coresight_trace_id_read_cpu_id(cpu, id_map);
if (!id)
return;
- spin_lock_irqsave(&id_map_lock, flags);
+ spin_lock_irqsave(&id_map->lock, flags);
- if (atomic_read(&perf_cs_etm_session_active)) {
- /* set release at pending if perf still active */
- coresight_trace_id_set_pend_rel(id, id_map);
- cpumask_set_cpu(cpu, &cpu_id_release_pending);
- } else {
- /* otherwise clear id */
- coresight_trace_id_free(id, id_map);
- atomic_set(&per_cpu(cpu_id, cpu), 0);
- }
+ coresight_trace_id_free(id, id_map);
+ atomic_set(per_cpu_ptr(id_map->cpu_map, cpu), 0);
- spin_unlock_irqrestore(&id_map_lock, flags);
+ spin_unlock_irqrestore(&id_map->lock, flags);
DUMP_ID_CPU(cpu, id);
DUMP_ID_MAP(id_map);
}
@@ -227,10 +193,10 @@ static int coresight_trace_id_map_get_system_id(struct coresight_trace_id_map *i
unsigned long flags;
int id;
- spin_lock_irqsave(&id_map_lock, flags);
+ spin_lock_irqsave(&id_map->lock, flags);
/* prefer odd IDs for system components to avoid legacy CPU IDS */
id = coresight_trace_id_alloc_new_id(id_map, 0, true);
- spin_unlock_irqrestore(&id_map_lock, flags);
+ spin_unlock_irqrestore(&id_map->lock, flags);
DUMP_ID(id);
DUMP_ID_MAP(id_map);
@@ -241,9 +207,9 @@ static void coresight_trace_id_map_put_system_id(struct coresight_trace_id_map *
{
unsigned long flags;
- spin_lock_irqsave(&id_map_lock, flags);
+ spin_lock_irqsave(&id_map->lock, flags);
coresight_trace_id_free(id, id_map);
- spin_unlock_irqrestore(&id_map_lock, flags);
+ spin_unlock_irqrestore(&id_map->lock, flags);
DUMP_ID(id);
DUMP_ID_MAP(id_map);
@@ -253,22 +219,40 @@ static void coresight_trace_id_map_put_system_id(struct coresight_trace_id_map *
int coresight_trace_id_get_cpu_id(int cpu)
{
- return coresight_trace_id_map_get_cpu_id(cpu, &id_map_default);
+ return _coresight_trace_id_get_cpu_id(cpu, &id_map_default);
}
EXPORT_SYMBOL_GPL(coresight_trace_id_get_cpu_id);
+int coresight_trace_id_get_cpu_id_map(int cpu, struct coresight_trace_id_map *id_map)
+{
+ return _coresight_trace_id_get_cpu_id(cpu, id_map);
+}
+EXPORT_SYMBOL_GPL(coresight_trace_id_get_cpu_id_map);
+
void coresight_trace_id_put_cpu_id(int cpu)
{
- coresight_trace_id_map_put_cpu_id(cpu, &id_map_default);
+ _coresight_trace_id_put_cpu_id(cpu, &id_map_default);
}
EXPORT_SYMBOL_GPL(coresight_trace_id_put_cpu_id);
+void coresight_trace_id_put_cpu_id_map(int cpu, struct coresight_trace_id_map *id_map)
+{
+ _coresight_trace_id_put_cpu_id(cpu, id_map);
+}
+EXPORT_SYMBOL_GPL(coresight_trace_id_put_cpu_id_map);
+
int coresight_trace_id_read_cpu_id(int cpu)
{
- return _coresight_trace_id_read_cpu_id(cpu);
+ return _coresight_trace_id_read_cpu_id(cpu, &id_map_default);
}
EXPORT_SYMBOL_GPL(coresight_trace_id_read_cpu_id);
+int coresight_trace_id_read_cpu_id_map(int cpu, struct coresight_trace_id_map *id_map)
+{
+ return _coresight_trace_id_read_cpu_id(cpu, id_map);
+}
+EXPORT_SYMBOL_GPL(coresight_trace_id_read_cpu_id_map);
+
int coresight_trace_id_get_system_id(void)
{
return coresight_trace_id_map_get_system_id(&id_map_default);
@@ -281,17 +265,17 @@ void coresight_trace_id_put_system_id(int id)
}
EXPORT_SYMBOL_GPL(coresight_trace_id_put_system_id);
-void coresight_trace_id_perf_start(void)
+void coresight_trace_id_perf_start(struct coresight_trace_id_map *id_map)
{
- atomic_inc(&perf_cs_etm_session_active);
- PERF_SESSION(atomic_read(&perf_cs_etm_session_active));
+ atomic_inc(&id_map->perf_cs_etm_session_active);
+ PERF_SESSION(atomic_read(&id_map->perf_cs_etm_session_active));
}
EXPORT_SYMBOL_GPL(coresight_trace_id_perf_start);
-void coresight_trace_id_perf_stop(void)
+void coresight_trace_id_perf_stop(struct coresight_trace_id_map *id_map)
{
- if (!atomic_dec_return(&perf_cs_etm_session_active))
- coresight_trace_id_release_all_pending();
- PERF_SESSION(atomic_read(&perf_cs_etm_session_active));
+ if (!atomic_dec_return(&id_map->perf_cs_etm_session_active))
+ coresight_trace_id_release_all(id_map);
+ PERF_SESSION(atomic_read(&id_map->perf_cs_etm_session_active));
}
EXPORT_SYMBOL_GPL(coresight_trace_id_perf_stop);
diff --git a/drivers/hwtracing/coresight/coresight-trace-id.h b/drivers/hwtracing/coresight/coresight-trace-id.h
index 3797777d367e..9aae50a553ca 100644
--- a/drivers/hwtracing/coresight/coresight-trace-id.h
+++ b/drivers/hwtracing/coresight/coresight-trace-id.h
@@ -17,9 +17,10 @@
* released when done.
*
* In order to ensure that a consistent cpu / ID matching is maintained
- * throughout a perf cs_etm event session - a session in progress flag will
- * be maintained, and released IDs not cleared until the perf session is
- * complete. This allows the same CPU to be re-allocated its prior ID.
+ * throughout a perf cs_etm event session - a session in progress flag will be
+ * maintained for each sink, and IDs are cleared when all the perf sessions
+ * complete. This allows the same CPU to be re-allocated its prior ID when
+ * events are scheduled in and out.
*
*
* Trace ID maps will be created and initialised to prevent architecturally
@@ -32,10 +33,6 @@
#include <linux/bitops.h>
#include <linux/types.h>
-
-/* architecturally we have 128 IDs some of which are reserved */
-#define CORESIGHT_TRACE_IDS_MAX 128
-
/* ID 0 is reserved */
#define CORESIGHT_TRACE_ID_RES_0 0
@@ -47,23 +44,6 @@
((id > CORESIGHT_TRACE_ID_RES_0) && (id < CORESIGHT_TRACE_ID_RES_TOP))
/**
- * Trace ID map.
- *
- * @used_ids: Bitmap to register available (bit = 0) and in use (bit = 1) IDs.
- * Initialised so that the reserved IDs are permanently marked as
- * in use.
- * @pend_rel_ids: CPU IDs that have been released by the trace source but not
- * yet marked as available, to allow re-allocation to the same
- * CPU during a perf session.
- */
-struct coresight_trace_id_map {
- DECLARE_BITMAP(used_ids, CORESIGHT_TRACE_IDS_MAX);
- DECLARE_BITMAP(pend_rel_ids, CORESIGHT_TRACE_IDS_MAX);
-};
-
-/* Allocate and release IDs for a single default trace ID map */
-
-/**
* Read and optionally allocate a CoreSight trace ID and associate with a CPU.
*
* Function will read the current trace ID for the associated CPU,
@@ -79,19 +59,27 @@ struct coresight_trace_id_map {
int coresight_trace_id_get_cpu_id(int cpu);
/**
+ * Version of coresight_trace_id_get_cpu_id() that allows the ID map to operate
+ * on to be provided.
+ */
+int coresight_trace_id_get_cpu_id_map(int cpu, struct coresight_trace_id_map *id_map);
+
+/**
* Release an allocated trace ID associated with the CPU.
*
- * This will release the CoreSight trace ID associated with the CPU,
- * unless a perf session is in operation.
- *
- * If a perf session is in operation then the ID will be marked as pending
- * release.
+ * This will release the CoreSight trace ID associated with the CPU.
*
* @cpu: The CPU index to release the associated trace ID.
*/
void coresight_trace_id_put_cpu_id(int cpu);
/**
+ * Version of coresight_trace_id_put_cpu_id() that allows the ID map to operate
+ * on to be provided.
+ */
+void coresight_trace_id_put_cpu_id_map(int cpu, struct coresight_trace_id_map *id_map);
+
+/**
* Read the current allocated CoreSight Trace ID value for the CPU.
*
* Fast read of the current value that does not allocate if no ID allocated
@@ -112,6 +100,12 @@ void coresight_trace_id_put_cpu_id(int cpu);
int coresight_trace_id_read_cpu_id(int cpu);
/**
+ * Version of coresight_trace_id_read_cpu_id() that allows the ID map to operate
+ * on to be provided.
+ */
+int coresight_trace_id_read_cpu_id_map(int cpu, struct coresight_trace_id_map *id_map);
+
+/**
* Allocate a CoreSight trace ID for a system component.
*
* Unconditionally allocates a Trace ID, without associating the ID with a CPU.
@@ -136,21 +130,21 @@ void coresight_trace_id_put_system_id(int id);
/**
* Notify the Trace ID allocator that a perf session is starting.
*
- * Increase the perf session reference count - called by perf when setting up
- * a trace event.
+ * Increase the perf session reference count - called by perf when setting up a
+ * trace event.
*
- * This reference count is used by the ID allocator to ensure that trace IDs
- * associated with a CPU cannot change or be released during a perf session.
+ * Perf sessions never free trace IDs to ensure that the ID associated with a
+ * CPU cannot change during their and other's concurrent sessions. Instead,
+ * this refcount is used so that the last event to finish always frees all IDs.
*/
-void coresight_trace_id_perf_start(void);
+void coresight_trace_id_perf_start(struct coresight_trace_id_map *id_map);
/**
* Notify the ID allocator that a perf session is stopping.
*
- * Decrease the perf session reference count.
- * if this causes the count to go to zero, then all Trace IDs marked as pending
- * release, will be released.
+ * Decrease the perf session reference count. If this causes the count to go to
+ * zero, then all Trace IDs will be released.
*/
-void coresight_trace_id_perf_stop(void);
+void coresight_trace_id_perf_stop(struct coresight_trace_id_map *id_map);
#endif /* _CORESIGHT_TRACE_ID_H */
diff --git a/drivers/hwtracing/coresight/ultrasoc-smb.c b/drivers/hwtracing/coresight/ultrasoc-smb.c
index f9ebf20c91e6..ef7f560f0ffa 100644
--- a/drivers/hwtracing/coresight/ultrasoc-smb.c
+++ b/drivers/hwtracing/coresight/ultrasoc-smb.c
@@ -163,7 +163,6 @@ static const struct file_operations smb_fops = {
.open = smb_open,
.read = smb_read,
.release = smb_release,
- .llseek = no_llseek,
};
static ssize_t buf_size_show(struct device *dev, struct device_attribute *attr,
diff --git a/drivers/hwtracing/intel_th/msu.c b/drivers/hwtracing/intel_th/msu.c
index be63d5b8f193..66123d684ac9 100644
--- a/drivers/hwtracing/intel_th/msu.c
+++ b/drivers/hwtracing/intel_th/msu.c
@@ -1677,7 +1677,6 @@ static const struct file_operations intel_th_msc_fops = {
.release = intel_th_msc_release,
.read = intel_th_msc_read,
.mmap = intel_th_msc_mmap,
- .llseek = no_llseek,
.owner = THIS_MODULE,
};
diff --git a/drivers/hwtracing/stm/core.c b/drivers/hwtracing/stm/core.c
index ccf39a80dc4f..cdba4e875b28 100644
--- a/drivers/hwtracing/stm/core.c
+++ b/drivers/hwtracing/stm/core.c
@@ -839,7 +839,6 @@ static const struct file_operations stm_fops = {
.mmap = stm_char_mmap,
.unlocked_ioctl = stm_char_ioctl,
.compat_ioctl = compat_ptr_ioctl,
- .llseek = no_llseek,
};
static void stm_device_release(struct device *dev)
diff --git a/drivers/i2c/Kconfig b/drivers/i2c/Kconfig
index 44710267d669..c232054fddd6 100644
--- a/drivers/i2c/Kconfig
+++ b/drivers/i2c/Kconfig
@@ -40,14 +40,6 @@ config I2C_BOARDINFO
bool
default y
-config I2C_COMPAT
- bool "Enable compatibility bits for old user-space"
- default y
- help
- Say Y here if you intend to run lm-sensors 3.1.1 or older, or any
- other user-space package which expects i2c adapters to be class
- devices. If you don't know, say Y.
-
config I2C_CHARDEV
tristate "I2C device interface"
help
diff --git a/drivers/i2c/busses/Kconfig b/drivers/i2c/busses/Kconfig
index a22f9125322a..6b3ba7e5723a 100644
--- a/drivers/i2c/busses/Kconfig
+++ b/drivers/i2c/busses/Kconfig
@@ -559,28 +559,33 @@ config I2C_DAVINCI
For details please see http://www.ti.com/davinci
config I2C_DESIGNWARE_CORE
- tristate
+ tristate "Synopsys DesignWare I2C adapter"
select REGMAP
+ help
+ This option enables support for the Synopsys DesignWare I2C adapter.
+ This driver includes support for the I2C host on the Synopsys
+ Designware I2C adapter.
+
+ To compile the driver as a module, choose M here: the module will be
+ called i2c-designware-core.
+
+if I2C_DESIGNWARE_CORE
config I2C_DESIGNWARE_SLAVE
bool "Synopsys DesignWare Slave"
- depends on I2C_DESIGNWARE_CORE
select I2C_SLAVE
help
If you say yes to this option, support will be included for the
Synopsys DesignWare I2C slave adapter.
- This is not a standalone module, this module compiles together with
- i2c-designware-core.
-
config I2C_DESIGNWARE_PLATFORM
- tristate "Synopsys DesignWare Platform"
+ tristate "Synopsys DesignWare Platform driver"
depends on (ACPI && COMMON_CLK) || !ACPI
- select I2C_DESIGNWARE_CORE
select MFD_SYSCON if MIPS_BAIKAL_T1
+ default I2C_DESIGNWARE_CORE
help
If you say yes to this option, support will be included for the
- Synopsys DesignWare I2C adapter.
+ Synopsys DesignWare I2C adapters on the platform bus.
This driver can also be built as a module. If so, the module
will be called i2c-designware-platform.
@@ -613,17 +618,19 @@ config I2C_DESIGNWARE_BAYTRAIL
a BayTrail system using the AXP288.
config I2C_DESIGNWARE_PCI
- tristate "Synopsys DesignWare PCI"
+ tristate "Synopsys DesignWare PCI driver"
depends on PCI
- select I2C_DESIGNWARE_CORE
select I2C_CCGX_UCSI
help
If you say yes to this option, support will be included for the
- Synopsys DesignWare I2C adapter. Only master mode is supported.
+ Synopsys DesignWare I2C adapters on the PCI bus. Only master mode is
+ supported.
This driver can also be built as a module. If so, the module
will be called i2c-designware-pci.
+endif
+
config I2C_DIGICOLOR
tristate "Conexant Digicolor I2C driver"
depends on ARCH_DIGICOLOR || COMPILE_TEST
@@ -772,6 +779,18 @@ config I2C_JZ4780
If you don't know what to do here, say N.
+config I2C_KEBA
+ tristate "KEBA I2C controller support"
+ depends on HAS_IOMEM
+ depends on KEBA_CP500 || COMPILE_TEST
+ select AUXILIARY_BUS
+ help
+ This driver supports the I2C controller found in KEBA system FPGA
+ devices.
+
+ This driver can also be built as a module. If so, the module
+ will be called i2c-keba.
+
config I2C_KEMPLD
tristate "Kontron COM I2C Controller"
depends on MFD_KEMPLD
diff --git a/drivers/i2c/busses/Makefile b/drivers/i2c/busses/Makefile
index 78d0561339e5..ecc07c50f2a0 100644
--- a/drivers/i2c/busses/Makefile
+++ b/drivers/i2c/busses/Makefile
@@ -76,6 +76,7 @@ obj-$(CONFIG_I2C_IMX) += i2c-imx.o
obj-$(CONFIG_I2C_IMX_LPI2C) += i2c-imx-lpi2c.o
obj-$(CONFIG_I2C_IOP3XX) += i2c-iop3xx.o
obj-$(CONFIG_I2C_JZ4780) += i2c-jz4780.o
+obj-$(CONFIG_I2C_KEBA) += i2c-keba.o
obj-$(CONFIG_I2C_KEMPLD) += i2c-kempld.o
obj-$(CONFIG_I2C_LPC2K) += i2c-lpc2k.o
obj-$(CONFIG_I2C_LS2X) += i2c-ls2x.o
diff --git a/drivers/i2c/busses/i2c-ali1535.c b/drivers/i2c/busses/i2c-ali1535.c
index 9d7b4efe26ad..544c94e86b89 100644
--- a/drivers/i2c/busses/i2c-ali1535.c
+++ b/drivers/i2c/busses/i2c-ali1535.c
@@ -479,9 +479,8 @@ static struct i2c_adapter ali1535_adapter = {
static const struct pci_device_id ali1535_ids[] = {
{ PCI_DEVICE(PCI_VENDOR_ID_AL, PCI_DEVICE_ID_AL_M7101) },
- { },
+ { }
};
-
MODULE_DEVICE_TABLE(pci, ali1535_ids);
static int ali1535_probe(struct pci_dev *dev, const struct pci_device_id *id)
diff --git a/drivers/i2c/busses/i2c-amd-mp2-plat.c b/drivers/i2c/busses/i2c-amd-mp2-plat.c
index d3ac1c77a509..6f0ef587e76d 100644
--- a/drivers/i2c/busses/i2c-amd-mp2-plat.c
+++ b/drivers/i2c/busses/i2c-amd-mp2-plat.c
@@ -340,7 +340,7 @@ static void i2c_amd_remove(struct platform_device *pdev)
static const struct acpi_device_id i2c_amd_acpi_match[] = {
{ "AMDI0011" },
- { },
+ { }
};
MODULE_DEVICE_TABLE(acpi, i2c_amd_acpi_match);
diff --git a/drivers/i2c/busses/i2c-aspeed.c b/drivers/i2c/busses/i2c-aspeed.c
index ce8c4846b7fa..cc5a26637fd5 100644
--- a/drivers/i2c/busses/i2c-aspeed.c
+++ b/drivers/i2c/busses/i2c-aspeed.c
@@ -170,6 +170,13 @@ struct aspeed_i2c_bus {
static int aspeed_i2c_reset(struct aspeed_i2c_bus *bus);
+/* precondition: bus.lock has been acquired. */
+static void aspeed_i2c_do_stop(struct aspeed_i2c_bus *bus)
+{
+ bus->master_state = ASPEED_I2C_MASTER_STOP;
+ writel(ASPEED_I2CD_M_STOP_CMD, bus->base + ASPEED_I2C_CMD_REG);
+}
+
static int aspeed_i2c_recover_bus(struct aspeed_i2c_bus *bus)
{
unsigned long time_left, flags;
@@ -187,7 +194,7 @@ static int aspeed_i2c_recover_bus(struct aspeed_i2c_bus *bus)
command);
reinit_completion(&bus->cmd_complete);
- writel(ASPEED_I2CD_M_STOP_CMD, bus->base + ASPEED_I2C_CMD_REG);
+ aspeed_i2c_do_stop(bus);
spin_unlock_irqrestore(&bus->lock, flags);
time_left = wait_for_completion_timeout(
@@ -391,13 +398,6 @@ static void aspeed_i2c_do_start(struct aspeed_i2c_bus *bus)
}
/* precondition: bus.lock has been acquired. */
-static void aspeed_i2c_do_stop(struct aspeed_i2c_bus *bus)
-{
- bus->master_state = ASPEED_I2C_MASTER_STOP;
- writel(ASPEED_I2CD_M_STOP_CMD, bus->base + ASPEED_I2C_CMD_REG);
-}
-
-/* precondition: bus.lock has been acquired. */
static void aspeed_i2c_next_msg_or_stop(struct aspeed_i2c_bus *bus)
{
if (bus->msgs_index + 1 < bus->msgs_count) {
@@ -991,7 +991,7 @@ static const struct of_device_id aspeed_i2c_bus_of_table[] = {
.compatible = "aspeed,ast2600-i2c-bus",
.data = aspeed_i2c_25xx_get_clk_reg_val,
},
- { },
+ { }
};
MODULE_DEVICE_TABLE(of, aspeed_i2c_bus_of_table);
diff --git a/drivers/i2c/busses/i2c-designware-common.c b/drivers/i2c/busses/i2c-designware-common.c
index e8a688d04aee..f31d352d98b5 100644
--- a/drivers/i2c/busses/i2c-designware-common.c
+++ b/drivers/i2c/busses/i2c-designware-common.c
@@ -20,12 +20,17 @@
#include <linux/io.h>
#include <linux/kernel.h>
#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/pm.h>
#include <linux/pm_runtime.h>
+#include <linux/property.h>
#include <linux/regmap.h>
#include <linux/swab.h>
#include <linux/types.h>
#include <linux/units.h>
+#define DEFAULT_SYMBOL_NAMESPACE I2C_DW_COMMON
+
#include "i2c-designware-core.h"
static char *abort_sources[] = {
@@ -188,7 +193,7 @@ static const u32 supported_speeds[] = {
I2C_MAX_STANDARD_MODE_FREQ,
};
-int i2c_dw_validate_speed(struct dw_i2c_dev *dev)
+static int i2c_dw_validate_speed(struct dw_i2c_dev *dev)
{
struct i2c_timings *t = &dev->timings;
unsigned int i;
@@ -208,7 +213,44 @@ int i2c_dw_validate_speed(struct dw_i2c_dev *dev)
return -EINVAL;
}
-EXPORT_SYMBOL_GPL(i2c_dw_validate_speed);
+
+#ifdef CONFIG_OF
+
+#include <linux/platform_device.h>
+
+#define MSCC_ICPU_CFG_TWI_DELAY 0x0
+#define MSCC_ICPU_CFG_TWI_DELAY_ENABLE BIT(0)
+#define MSCC_ICPU_CFG_TWI_SPIKE_FILTER 0x4
+
+static int mscc_twi_set_sda_hold_time(struct dw_i2c_dev *dev)
+{
+ writel((dev->sda_hold_time << 1) | MSCC_ICPU_CFG_TWI_DELAY_ENABLE,
+ dev->ext + MSCC_ICPU_CFG_TWI_DELAY);
+
+ return 0;
+}
+
+static void i2c_dw_of_configure(struct device *device)
+{
+ struct platform_device *pdev = to_platform_device(device);
+ struct dw_i2c_dev *dev = dev_get_drvdata(device);
+
+ switch (dev->flags & MODEL_MASK) {
+ case MODEL_MSCC_OCELOT:
+ dev->ext = devm_platform_ioremap_resource(pdev, 1);
+ if (!IS_ERR(dev->ext))
+ dev->set_sda_hold_time = mscc_twi_set_sda_hold_time;
+ break;
+ default:
+ break;
+ }
+}
+
+#else /* CONFIG_OF */
+
+static inline void i2c_dw_of_configure(struct device *device) { }
+
+#endif /* CONFIG_OF */
#ifdef CONFIG_ACPI
@@ -255,7 +297,7 @@ static void i2c_dw_acpi_params(struct device *device, char method[],
kfree(buf.pointer);
}
-int i2c_dw_acpi_configure(struct device *device)
+static void i2c_dw_acpi_configure(struct device *device)
{
struct dw_i2c_dev *dev = dev_get_drvdata(device);
struct i2c_timings *t = &dev->timings;
@@ -285,10 +327,7 @@ int i2c_dw_acpi_configure(struct device *device)
dev->sda_hold_time = fs_ht;
break;
}
-
- return 0;
}
-EXPORT_SYMBOL_GPL(i2c_dw_acpi_configure);
static u32 i2c_dw_acpi_round_bus_speed(struct device *device)
{
@@ -310,11 +349,13 @@ static u32 i2c_dw_acpi_round_bus_speed(struct device *device)
#else /* CONFIG_ACPI */
+static inline void i2c_dw_acpi_configure(struct device *device) { }
+
static inline u32 i2c_dw_acpi_round_bus_speed(struct device *device) { return 0; }
#endif /* CONFIG_ACPI */
-void i2c_dw_adjust_bus_speed(struct dw_i2c_dev *dev)
+static void i2c_dw_adjust_bus_speed(struct dw_i2c_dev *dev)
{
u32 acpi_speed = i2c_dw_acpi_round_bus_speed(dev->dev);
struct i2c_timings *t = &dev->timings;
@@ -330,10 +371,47 @@ void i2c_dw_adjust_bus_speed(struct dw_i2c_dev *dev)
else
t->bus_freq_hz = I2C_MAX_FAST_MODE_FREQ;
}
-EXPORT_SYMBOL_GPL(i2c_dw_adjust_bus_speed);
-u32 i2c_dw_scl_hcnt(u32 ic_clk, u32 tSYMBOL, u32 tf, int cond, int offset)
+int i2c_dw_fw_parse_and_configure(struct dw_i2c_dev *dev)
+{
+ struct i2c_timings *t = &dev->timings;
+ struct device *device = dev->dev;
+ struct fwnode_handle *fwnode = dev_fwnode(device);
+
+ i2c_parse_fw_timings(device, t, false);
+
+ i2c_dw_adjust_bus_speed(dev);
+
+ if (is_of_node(fwnode))
+ i2c_dw_of_configure(device);
+ else if (is_acpi_node(fwnode))
+ i2c_dw_acpi_configure(device);
+
+ return i2c_dw_validate_speed(dev);
+}
+EXPORT_SYMBOL_GPL(i2c_dw_fw_parse_and_configure);
+
+static u32 i2c_dw_read_scl_reg(struct dw_i2c_dev *dev, u32 reg)
{
+ u32 val;
+ int ret;
+
+ ret = i2c_dw_acquire_lock(dev);
+ if (ret)
+ return 0;
+
+ ret = regmap_read(dev->map, reg, &val);
+ i2c_dw_release_lock(dev);
+
+ return ret ? 0 : val;
+}
+
+u32 i2c_dw_scl_hcnt(struct dw_i2c_dev *dev, unsigned int reg, u32 ic_clk,
+ u32 tSYMBOL, u32 tf, int cond, int offset)
+{
+ if (!ic_clk)
+ return i2c_dw_read_scl_reg(dev, reg);
+
/*
* DesignWare I2C core doesn't seem to have solid strategy to meet
* the tHD;STA timing spec. Configuring _HCNT based on tHIGH spec
@@ -372,8 +450,12 @@ u32 i2c_dw_scl_hcnt(u32 ic_clk, u32 tSYMBOL, u32 tf, int cond, int offset)
3 + offset;
}
-u32 i2c_dw_scl_lcnt(u32 ic_clk, u32 tLOW, u32 tf, int offset)
+u32 i2c_dw_scl_lcnt(struct dw_i2c_dev *dev, unsigned int reg, u32 ic_clk,
+ u32 tLOW, u32 tf, int offset)
{
+ if (!ic_clk)
+ return i2c_dw_read_scl_reg(dev, reg);
+
/*
* Conditional expression:
*
@@ -441,6 +523,7 @@ err_release_lock:
void __i2c_dw_disable(struct dw_i2c_dev *dev)
{
+ struct i2c_timings *t = &dev->timings;
unsigned int raw_intr_stats;
unsigned int enable;
int timeout = 100;
@@ -453,6 +536,19 @@ void __i2c_dw_disable(struct dw_i2c_dev *dev)
abort_needed = raw_intr_stats & DW_IC_INTR_MST_ON_HOLD;
if (abort_needed) {
+ if (!(enable & DW_IC_ENABLE_ENABLE)) {
+ regmap_write(dev->map, DW_IC_ENABLE, DW_IC_ENABLE_ENABLE);
+ /*
+ * Wait 10 times the signaling period of the highest I2C
+ * transfer supported by the driver (for 400KHz this is
+ * 25us) to ensure the I2C ENABLE bit is already set
+ * as described in the DesignWare I2C databook.
+ */
+ fsleep(DIV_ROUND_CLOSEST_ULL(10 * MICRO, t->bus_freq_hz));
+ /* Set ENABLE bit before setting ABORT */
+ enable |= DW_IC_ENABLE_ENABLE;
+ }
+
regmap_write(dev->map, DW_IC_ENABLE, enable | DW_IC_ENABLE_ABORT);
ret = regmap_read_poll_timeout(dev->map, DW_IC_ENABLE, enable,
!(enable & DW_IC_ENABLE_ABORT), 10,
@@ -653,6 +749,84 @@ void i2c_dw_disable(struct dw_i2c_dev *dev)
i2c_dw_release_lock(dev);
}
+EXPORT_SYMBOL_GPL(i2c_dw_disable);
+
+int i2c_dw_probe(struct dw_i2c_dev *dev)
+{
+ device_set_node(&dev->adapter.dev, dev_fwnode(dev->dev));
+
+ switch (dev->mode) {
+ case DW_IC_SLAVE:
+ return i2c_dw_probe_slave(dev);
+ case DW_IC_MASTER:
+ return i2c_dw_probe_master(dev);
+ default:
+ dev_err(dev->dev, "Wrong operation mode: %d\n", dev->mode);
+ return -EINVAL;
+ }
+}
+EXPORT_SYMBOL_GPL(i2c_dw_probe);
+
+static int i2c_dw_prepare(struct device *device)
+{
+ /*
+ * If the ACPI companion device object is present for this device,
+ * it may be accessed during suspend and resume of other devices via
+ * I2C operation regions, so tell the PM core and middle layers to
+ * avoid skipping system suspend/resume callbacks for it in that case.
+ */
+ return !has_acpi_companion(device);
+}
+
+static int i2c_dw_runtime_suspend(struct device *device)
+{
+ struct dw_i2c_dev *dev = dev_get_drvdata(device);
+
+ if (dev->shared_with_punit)
+ return 0;
+
+ i2c_dw_disable(dev);
+ i2c_dw_prepare_clk(dev, false);
+
+ return 0;
+}
+
+static int i2c_dw_suspend(struct device *device)
+{
+ struct dw_i2c_dev *dev = dev_get_drvdata(device);
+
+ i2c_mark_adapter_suspended(&dev->adapter);
+
+ return i2c_dw_runtime_suspend(device);
+}
+
+static int i2c_dw_runtime_resume(struct device *device)
+{
+ struct dw_i2c_dev *dev = dev_get_drvdata(device);
+
+ if (!dev->shared_with_punit)
+ i2c_dw_prepare_clk(dev, true);
+
+ dev->init(dev);
+
+ return 0;
+}
+
+static int i2c_dw_resume(struct device *device)
+{
+ struct dw_i2c_dev *dev = dev_get_drvdata(device);
+
+ i2c_dw_runtime_resume(device);
+ i2c_mark_adapter_resumed(&dev->adapter);
+
+ return 0;
+}
+
+EXPORT_GPL_DEV_PM_OPS(i2c_dw_dev_pm_ops) = {
+ .prepare = pm_sleep_ptr(i2c_dw_prepare),
+ LATE_SYSTEM_SLEEP_PM_OPS(i2c_dw_suspend, i2c_dw_resume)
+ RUNTIME_PM_OPS(i2c_dw_runtime_suspend, i2c_dw_runtime_resume, NULL)
+};
MODULE_DESCRIPTION("Synopsys DesignWare I2C bus adapter core");
MODULE_LICENSE("GPL");
diff --git a/drivers/i2c/busses/i2c-designware-core.h b/drivers/i2c/busses/i2c-designware-core.h
index e9606c00b8d1..8e8854ec9882 100644
--- a/drivers/i2c/busses/i2c-designware-core.h
+++ b/drivers/i2c/busses/i2c-designware-core.h
@@ -10,11 +10,10 @@
*/
#include <linux/bits.h>
-#include <linux/compiler_types.h>
#include <linux/completion.h>
-#include <linux/dev_printk.h>
#include <linux/errno.h>
#include <linux/i2c.h>
+#include <linux/pm.h>
#include <linux/regmap.h>
#include <linux/types.h>
@@ -109,6 +108,7 @@
DW_IC_INTR_RX_UNDER | \
DW_IC_INTR_RD_REQ)
+#define DW_IC_ENABLE_ENABLE BIT(0)
#define DW_IC_ENABLE_ABORT BIT(1)
#define DW_IC_STATUS_ACTIVITY BIT(0)
@@ -237,7 +237,6 @@ struct reset_control;
* @semaphore_idx: Index of table with semaphore type attached to the bus. It's
* -1 if there is no semaphore.
* @shared_with_punit: true if this bus is shared with the SoCs PUNIT
- * @disable: function to disable the controller
* @init: function to initialize the I2C hardware
* @set_sda_hold_time: callback to retrieve IP specific SDA hold timing
* @mode: operation mode - DW_IC_MASTER or DW_IC_SLAVE
@@ -295,7 +294,6 @@ struct dw_i2c_dev {
void (*release_lock)(void);
int semaphore_idx;
bool shared_with_punit;
- void (*disable)(struct dw_i2c_dev *dev);
int (*init)(struct dw_i2c_dev *dev);
int (*set_sda_hold_time)(struct dw_i2c_dev *dev);
int mode;
@@ -329,8 +327,10 @@ struct i2c_dw_semaphore_callbacks {
};
int i2c_dw_init_regmap(struct dw_i2c_dev *dev);
-u32 i2c_dw_scl_hcnt(u32 ic_clk, u32 tSYMBOL, u32 tf, int cond, int offset);
-u32 i2c_dw_scl_lcnt(u32 ic_clk, u32 tLOW, u32 tf, int offset);
+u32 i2c_dw_scl_hcnt(struct dw_i2c_dev *dev, unsigned int reg, u32 ic_clk,
+ u32 tSYMBOL, u32 tf, int cond, int offset);
+u32 i2c_dw_scl_lcnt(struct dw_i2c_dev *dev, unsigned int reg, u32 ic_clk,
+ u32 tLOW, u32 tf, int offset);
int i2c_dw_set_sda_hold(struct dw_i2c_dev *dev);
u32 i2c_dw_clk_rate(struct dw_i2c_dev *dev);
int i2c_dw_prepare_clk(struct dw_i2c_dev *dev, bool prepare);
@@ -340,7 +340,8 @@ int i2c_dw_wait_bus_not_busy(struct dw_i2c_dev *dev);
int i2c_dw_handle_tx_abort(struct dw_i2c_dev *dev);
int i2c_dw_set_fifo_size(struct dw_i2c_dev *dev);
u32 i2c_dw_func(struct i2c_adapter *adap);
-void i2c_dw_disable(struct dw_i2c_dev *dev);
+
+extern const struct dev_pm_ops i2c_dw_dev_pm_ops;
static inline void __i2c_dw_enable(struct dw_i2c_dev *dev)
{
@@ -373,6 +374,7 @@ static inline void __i2c_dw_read_intr_mask(struct dw_i2c_dev *dev,
}
void __i2c_dw_disable(struct dw_i2c_dev *dev);
+void i2c_dw_disable(struct dw_i2c_dev *dev);
extern void i2c_dw_configure_master(struct dw_i2c_dev *dev);
extern int i2c_dw_probe_master(struct dw_i2c_dev *dev);
@@ -385,19 +387,6 @@ static inline void i2c_dw_configure_slave(struct dw_i2c_dev *dev) { }
static inline int i2c_dw_probe_slave(struct dw_i2c_dev *dev) { return -EINVAL; }
#endif
-static inline int i2c_dw_probe(struct dw_i2c_dev *dev)
-{
- switch (dev->mode) {
- case DW_IC_SLAVE:
- return i2c_dw_probe_slave(dev);
- case DW_IC_MASTER:
- return i2c_dw_probe_master(dev);
- default:
- dev_err(dev->dev, "Wrong operation mode: %d\n", dev->mode);
- return -EINVAL;
- }
-}
-
static inline void i2c_dw_configure(struct dw_i2c_dev *dev)
{
if (i2c_detect_slave_mode(dev->dev))
@@ -406,6 +395,8 @@ static inline void i2c_dw_configure(struct dw_i2c_dev *dev)
i2c_dw_configure_master(dev);
}
+int i2c_dw_probe(struct dw_i2c_dev *dev);
+
#if IS_ENABLED(CONFIG_I2C_DESIGNWARE_BAYTRAIL)
int i2c_dw_baytrail_probe_lock_support(struct dw_i2c_dev *dev);
#endif
@@ -414,11 +405,4 @@ int i2c_dw_baytrail_probe_lock_support(struct dw_i2c_dev *dev);
int i2c_dw_amdpsp_probe_lock_support(struct dw_i2c_dev *dev);
#endif
-int i2c_dw_validate_speed(struct dw_i2c_dev *dev);
-void i2c_dw_adjust_bus_speed(struct dw_i2c_dev *dev);
-
-#if IS_ENABLED(CONFIG_ACPI)
-int i2c_dw_acpi_configure(struct device *device);
-#else
-static inline int i2c_dw_acpi_configure(struct device *device) { return -ENODEV; }
-#endif
+int i2c_dw_fw_parse_and_configure(struct dw_i2c_dev *dev);
diff --git a/drivers/i2c/busses/i2c-designware-master.c b/drivers/i2c/busses/i2c-designware-master.c
index c7e56002809a..e8ac9a7bf0b3 100644
--- a/drivers/i2c/busses/i2c-designware-master.c
+++ b/drivers/i2c/busses/i2c-designware-master.c
@@ -22,6 +22,8 @@
#include <linux/regmap.h>
#include <linux/reset.h>
+#define DEFAULT_SYMBOL_NAMESPACE I2C_DW
+
#include "i2c-designware-core.h"
#define AMD_TIMEOUT_MIN_US 25
@@ -64,13 +66,17 @@ static int i2c_dw_set_timings_master(struct dw_i2c_dev *dev)
if (!dev->ss_hcnt || !dev->ss_lcnt) {
ic_clk = i2c_dw_clk_rate(dev);
dev->ss_hcnt =
- i2c_dw_scl_hcnt(ic_clk,
+ i2c_dw_scl_hcnt(dev,
+ DW_IC_SS_SCL_HCNT,
+ ic_clk,
4000, /* tHD;STA = tHIGH = 4.0 us */
sda_falling_time,
0, /* 0: DW default, 1: Ideal */
0); /* No offset */
dev->ss_lcnt =
- i2c_dw_scl_lcnt(ic_clk,
+ i2c_dw_scl_lcnt(dev,
+ DW_IC_SS_SCL_LCNT,
+ ic_clk,
4700, /* tLOW = 4.7 us */
scl_falling_time,
0); /* No offset */
@@ -94,13 +100,17 @@ static int i2c_dw_set_timings_master(struct dw_i2c_dev *dev)
} else {
ic_clk = i2c_dw_clk_rate(dev);
dev->fs_hcnt =
- i2c_dw_scl_hcnt(ic_clk,
+ i2c_dw_scl_hcnt(dev,
+ DW_IC_FS_SCL_HCNT,
+ ic_clk,
260, /* tHIGH = 260 ns */
sda_falling_time,
0, /* DW default */
0); /* No offset */
dev->fs_lcnt =
- i2c_dw_scl_lcnt(ic_clk,
+ i2c_dw_scl_lcnt(dev,
+ DW_IC_FS_SCL_LCNT,
+ ic_clk,
500, /* tLOW = 500 ns */
scl_falling_time,
0); /* No offset */
@@ -114,13 +124,17 @@ static int i2c_dw_set_timings_master(struct dw_i2c_dev *dev)
if (!dev->fs_hcnt || !dev->fs_lcnt) {
ic_clk = i2c_dw_clk_rate(dev);
dev->fs_hcnt =
- i2c_dw_scl_hcnt(ic_clk,
+ i2c_dw_scl_hcnt(dev,
+ DW_IC_FS_SCL_HCNT,
+ ic_clk,
600, /* tHD;STA = tHIGH = 0.6 us */
sda_falling_time,
0, /* 0: DW default, 1: Ideal */
0); /* No offset */
dev->fs_lcnt =
- i2c_dw_scl_lcnt(ic_clk,
+ i2c_dw_scl_lcnt(dev,
+ DW_IC_FS_SCL_LCNT,
+ ic_clk,
1300, /* tLOW = 1.3 us */
scl_falling_time,
0); /* No offset */
@@ -142,13 +156,17 @@ static int i2c_dw_set_timings_master(struct dw_i2c_dev *dev)
} else if (!dev->hs_hcnt || !dev->hs_lcnt) {
ic_clk = i2c_dw_clk_rate(dev);
dev->hs_hcnt =
- i2c_dw_scl_hcnt(ic_clk,
+ i2c_dw_scl_hcnt(dev,
+ DW_IC_HS_SCL_HCNT,
+ ic_clk,
160, /* tHIGH = 160 ns */
sda_falling_time,
0, /* DW default */
0); /* No offset */
dev->hs_lcnt =
- i2c_dw_scl_lcnt(ic_clk,
+ i2c_dw_scl_lcnt(dev,
+ DW_IC_HS_SCL_LCNT,
+ ic_clk,
320, /* tLOW = 320 ns */
scl_falling_time,
0); /* No offset */
@@ -253,6 +271,34 @@ static void i2c_dw_xfer_init(struct dw_i2c_dev *dev)
__i2c_dw_write_intr_mask(dev, DW_IC_INTR_MASTER_MASK);
}
+/*
+ * This function waits for the controller to be idle before disabling I2C
+ * When the controller is not in the IDLE state, the MST_ACTIVITY bit
+ * (IC_STATUS[5]) is set.
+ *
+ * Values:
+ * 0x1 (ACTIVE): Controller not idle
+ * 0x0 (IDLE): Controller is idle
+ *
+ * The function is called after completing the current transfer.
+ *
+ * Returns:
+ * False when the controller is in the IDLE state.
+ * True when the controller is in the ACTIVE state.
+ */
+static bool i2c_dw_is_controller_active(struct dw_i2c_dev *dev)
+{
+ u32 status;
+
+ regmap_read(dev->map, DW_IC_STATUS, &status);
+ if (!(status & DW_IC_STATUS_MASTER_ACTIVITY))
+ return false;
+
+ return regmap_read_poll_timeout(dev->map, DW_IC_STATUS, status,
+ !(status & DW_IC_STATUS_MASTER_ACTIVITY),
+ 1100, 20000) != 0;
+}
+
static int i2c_dw_check_stopbit(struct dw_i2c_dev *dev)
{
u32 val;
@@ -789,6 +835,16 @@ i2c_dw_xfer(struct i2c_adapter *adap, struct i2c_msg msgs[], int num)
}
/*
+ * This happens rarely (~1:500) and is hard to reproduce. Debug trace
+ * showed that IC_STATUS had value of 0x23 when STOP_DET occurred,
+ * if disable IC_ENABLE.ENABLE immediately that can result in
+ * IC_RAW_INTR_STAT.MASTER_ON_HOLD holding SCL low. Check if
+ * controller is still ACTIVE before disabling I2C.
+ */
+ if (i2c_dw_is_controller_active(dev))
+ dev_err(dev->dev, "controller active\n");
+
+ /*
* We must disable the adapter before returning and signaling the end
* of the current transfer. Otherwise the hardware might continue
* generating interrupts which in turn causes a race condition with
@@ -931,7 +987,6 @@ int i2c_dw_probe_master(struct dw_i2c_dev *dev)
init_completion(&dev->cmd_complete);
dev->init = i2c_dw_init_master;
- dev->disable = i2c_dw_disable;
ret = i2c_dw_init_regmap(dev);
if (ret)
@@ -1021,3 +1076,4 @@ EXPORT_SYMBOL_GPL(i2c_dw_probe_master);
MODULE_DESCRIPTION("Synopsys DesignWare I2C bus master adapter");
MODULE_LICENSE("GPL");
+MODULE_IMPORT_NS(I2C_DW_COMMON);
diff --git a/drivers/i2c/busses/i2c-designware-pcidrv.c b/drivers/i2c/busses/i2c-designware-pcidrv.c
index a1b379a1e904..7b2c5d71a7fc 100644
--- a/drivers/i2c/busses/i2c-designware-pcidrv.c
+++ b/drivers/i2c/busses/i2c-designware-pcidrv.c
@@ -9,7 +9,6 @@
* Copyright (C) 2009 Provigent Ltd.
* Copyright (C) 2011, 2015, 2016 Intel Corporation.
*/
-#include <linux/acpi.h>
#include <linux/delay.h>
#include <linux/err.h>
#include <linux/errno.h>
@@ -19,6 +18,7 @@
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/pci.h>
+#include <linux/pm.h>
#include <linux/pm_runtime.h>
#include <linux/power_supply.h>
#include <linux/sched.h>
@@ -102,7 +102,7 @@ static u32 mfld_get_clk_rate_khz(struct dw_i2c_dev *dev)
static int mfld_setup(struct pci_dev *pdev, struct dw_pci_controller *c)
{
- struct dw_i2c_dev *dev = dev_get_drvdata(&pdev->dev);
+ struct dw_i2c_dev *dev = pci_get_drvdata(pdev);
switch (pdev->device) {
case 0x0817:
@@ -152,7 +152,7 @@ static u32 navi_amd_get_clk_rate_khz(struct dw_i2c_dev *dev)
static int navi_amd_setup(struct pci_dev *pdev, struct dw_pci_controller *c)
{
- struct dw_i2c_dev *dev = dev_get_drvdata(&pdev->dev);
+ struct dw_i2c_dev *dev = pci_get_drvdata(pdev);
dev->flags |= MODEL_AMD_NAVI_GPU | ACCESS_POLLING;
dev->timings.bus_freq_hz = I2C_MAX_STANDARD_MODE_FREQ;
@@ -194,47 +194,6 @@ static struct dw_pci_controller dw_pci_controllers[] = {
},
};
-static int __maybe_unused i2c_dw_pci_runtime_suspend(struct device *dev)
-{
- struct dw_i2c_dev *i_dev = dev_get_drvdata(dev);
-
- i_dev->disable(i_dev);
- return 0;
-}
-
-static int __maybe_unused i2c_dw_pci_suspend(struct device *dev)
-{
- struct dw_i2c_dev *i_dev = dev_get_drvdata(dev);
-
- i2c_mark_adapter_suspended(&i_dev->adapter);
-
- return i2c_dw_pci_runtime_suspend(dev);
-}
-
-static int __maybe_unused i2c_dw_pci_runtime_resume(struct device *dev)
-{
- struct dw_i2c_dev *i_dev = dev_get_drvdata(dev);
-
- return i_dev->init(i_dev);
-}
-
-static int __maybe_unused i2c_dw_pci_resume(struct device *dev)
-{
- struct dw_i2c_dev *i_dev = dev_get_drvdata(dev);
- int ret;
-
- ret = i2c_dw_pci_runtime_resume(dev);
-
- i2c_mark_adapter_resumed(&i_dev->adapter);
-
- return ret;
-}
-
-static const struct dev_pm_ops i2c_dw_pm_ops = {
- SET_SYSTEM_SLEEP_PM_OPS(i2c_dw_pci_suspend, i2c_dw_pci_resume)
- SET_RUNTIME_PM_OPS(i2c_dw_pci_runtime_suspend, i2c_dw_pci_runtime_resume, NULL)
-};
-
static const struct property_entry dgpu_properties[] = {
/* USB-C doesn't power the system */
PROPERTY_ENTRY_U8("scope", POWER_SUPPLY_SCOPE_DEVICE),
@@ -253,7 +212,6 @@ static int i2c_dw_pci_probe(struct pci_dev *pdev,
int r;
struct dw_pci_controller *controller;
struct dw_scl_sda_cfg *cfg;
- struct i2c_timings *t;
if (id->driver_data >= ARRAY_SIZE(dw_pci_controllers))
return dev_err_probe(&pdev->dev, -EINVAL,
@@ -288,29 +246,17 @@ static int i2c_dw_pci_probe(struct pci_dev *pdev,
dev->irq = pci_irq_vector(pdev, 0);
dev->flags |= controller->flags;
- t = &dev->timings;
- i2c_parse_fw_timings(&pdev->dev, t, false);
-
pci_set_drvdata(pdev, dev);
if (controller->setup) {
r = controller->setup(pdev, controller);
- if (r) {
- pci_free_irq_vectors(pdev);
+ if (r)
return r;
- }
}
- i2c_dw_adjust_bus_speed(dev);
-
- if (has_acpi_companion(&pdev->dev))
- i2c_dw_acpi_configure(&pdev->dev);
-
- r = i2c_dw_validate_speed(dev);
- if (r) {
- pci_free_irq_vectors(pdev);
+ r = i2c_dw_fw_parse_and_configure(dev);
+ if (r)
return r;
- }
i2c_dw_configure(dev);
@@ -326,14 +272,11 @@ static int i2c_dw_pci_probe(struct pci_dev *pdev,
adap = &dev->adapter;
adap->owner = THIS_MODULE;
adap->class = 0;
- ACPI_COMPANION_SET(&adap->dev, ACPI_COMPANION(&pdev->dev));
adap->nr = controller->bus_num;
r = i2c_dw_probe(dev);
- if (r) {
- pci_free_irq_vectors(pdev);
+ if (r)
return r;
- }
if ((dev->flags & MODEL_MASK) == MODEL_AMD_NAVI_GPU) {
dev->slave = i2c_new_ccgx_ucsi(&dev->adapter, dev->irq, &dgpu_node);
@@ -354,16 +297,15 @@ static void i2c_dw_pci_remove(struct pci_dev *pdev)
{
struct dw_i2c_dev *dev = pci_get_drvdata(pdev);
- dev->disable(dev);
+ i2c_dw_disable(dev);
+
pm_runtime_forbid(&pdev->dev);
pm_runtime_get_noresume(&pdev->dev);
i2c_del_adapter(&dev->adapter);
- devm_free_irq(&pdev->dev, dev->irq, dev);
- pci_free_irq_vectors(pdev);
}
-static const struct pci_device_id i2_designware_pci_ids[] = {
+static const struct pci_device_id i2c_designware_pci_ids[] = {
/* Medfield */
{ PCI_VDEVICE(INTEL, 0x0817), medfield },
{ PCI_VDEVICE(INTEL, 0x0818), medfield },
@@ -409,21 +351,23 @@ static const struct pci_device_id i2_designware_pci_ids[] = {
{ PCI_VDEVICE(ATI, 0x73c4), navi_amd },
{ PCI_VDEVICE(ATI, 0x7444), navi_amd },
{ PCI_VDEVICE(ATI, 0x7464), navi_amd },
- { 0,}
+ {}
};
-MODULE_DEVICE_TABLE(pci, i2_designware_pci_ids);
+MODULE_DEVICE_TABLE(pci, i2c_designware_pci_ids);
static struct pci_driver dw_i2c_driver = {
.name = DRIVER_NAME,
- .id_table = i2_designware_pci_ids,
.probe = i2c_dw_pci_probe,
.remove = i2c_dw_pci_remove,
.driver = {
- .pm = &i2c_dw_pm_ops,
+ .pm = pm_ptr(&i2c_dw_dev_pm_ops),
},
+ .id_table = i2c_designware_pci_ids,
};
module_pci_driver(dw_i2c_driver);
MODULE_AUTHOR("Baruch Siach <baruch@tkos.co.il>");
MODULE_DESCRIPTION("Synopsys DesignWare PCI I2C bus adapter");
MODULE_LICENSE("GPL");
+MODULE_IMPORT_NS(I2C_DW);
+MODULE_IMPORT_NS(I2C_DW_COMMON);
diff --git a/drivers/i2c/busses/i2c-designware-platdrv.c b/drivers/i2c/busses/i2c-designware-platdrv.c
index df3dc1e8093e..2d0c7348e491 100644
--- a/drivers/i2c/busses/i2c-designware-platdrv.c
+++ b/drivers/i2c/busses/i2c-designware-platdrv.c
@@ -8,7 +8,6 @@
* Copyright (C) 2007 MontaVista Software Inc.
* Copyright (C) 2009 Provigent Ltd.
*/
-#include <linux/acpi.h>
#include <linux/clk-provider.h>
#include <linux/clk.h>
#include <linux/delay.h>
@@ -21,7 +20,6 @@
#include <linux/kernel.h>
#include <linux/mfd/syscon.h>
#include <linux/module.h>
-#include <linux/of.h>
#include <linux/platform_device.h>
#include <linux/pm.h>
#include <linux/pm_runtime.h>
@@ -30,7 +28,6 @@
#include <linux/reset.h>
#include <linux/sched.h>
#include <linux/slab.h>
-#include <linux/suspend.h>
#include <linux/units.h>
#include "i2c-designware-core.h"
@@ -40,29 +37,6 @@ static u32 i2c_dw_get_clk_rate_khz(struct dw_i2c_dev *dev)
return clk_get_rate(dev->clk) / KILO;
}
-#ifdef CONFIG_ACPI
-static const struct acpi_device_id dw_i2c_acpi_match[] = {
- { "INT33C2", 0 },
- { "INT33C3", 0 },
- { "INT3432", 0 },
- { "INT3433", 0 },
- { "INTC10EF", 0 },
- { "80860F41", ACCESS_NO_IRQ_SUSPEND },
- { "808622C1", ACCESS_NO_IRQ_SUSPEND },
- { "AMD0010", ACCESS_INTR_MASK },
- { "AMDI0010", ACCESS_INTR_MASK },
- { "AMDI0019", ACCESS_INTR_MASK | ARBITRATION_SEMAPHORE },
- { "AMDI0510", 0 },
- { "APMC0D0F", 0 },
- { "HISI02A1", 0 },
- { "HISI02A2", 0 },
- { "HISI02A3", 0 },
- { "HYGO0010", ACCESS_INTR_MASK },
- { }
-};
-MODULE_DEVICE_TABLE(acpi, dw_i2c_acpi_match);
-#endif
-
#ifdef CONFIG_OF
#define BT1_I2C_CTL 0x100
#define BT1_I2C_CTL_ADDR_MASK GENMASK(7, 0)
@@ -120,53 +94,11 @@ static int bt1_i2c_request_regs(struct dw_i2c_dev *dev)
dev->map = devm_regmap_init(dev->dev, NULL, dev, &bt1_i2c_cfg);
return PTR_ERR_OR_ZERO(dev->map);
}
-
-#define MSCC_ICPU_CFG_TWI_DELAY 0x0
-#define MSCC_ICPU_CFG_TWI_DELAY_ENABLE BIT(0)
-#define MSCC_ICPU_CFG_TWI_SPIKE_FILTER 0x4
-
-static int mscc_twi_set_sda_hold_time(struct dw_i2c_dev *dev)
-{
- writel((dev->sda_hold_time << 1) | MSCC_ICPU_CFG_TWI_DELAY_ENABLE,
- dev->ext + MSCC_ICPU_CFG_TWI_DELAY);
-
- return 0;
-}
-
-static int dw_i2c_of_configure(struct platform_device *pdev)
-{
- struct dw_i2c_dev *dev = platform_get_drvdata(pdev);
-
- switch (dev->flags & MODEL_MASK) {
- case MODEL_MSCC_OCELOT:
- dev->ext = devm_platform_ioremap_resource(pdev, 1);
- if (!IS_ERR(dev->ext))
- dev->set_sda_hold_time = mscc_twi_set_sda_hold_time;
- break;
- default:
- break;
- }
-
- return 0;
-}
-
-static const struct of_device_id dw_i2c_of_match[] = {
- { .compatible = "snps,designware-i2c", },
- { .compatible = "mscc,ocelot-i2c", .data = (void *)MODEL_MSCC_OCELOT },
- { .compatible = "baikal,bt1-sys-i2c", .data = (void *)MODEL_BAIKAL_BT1 },
- {},
-};
-MODULE_DEVICE_TABLE(of, dw_i2c_of_match);
#else
static int bt1_i2c_request_regs(struct dw_i2c_dev *dev)
{
return -ENODEV;
}
-
-static inline int dw_i2c_of_configure(struct platform_device *pdev)
-{
- return -ENODEV;
-}
#endif
static int txgbe_i2c_request_regs(struct dw_i2c_dev *dev)
@@ -238,11 +170,9 @@ static int i2c_dw_probe_lock_support(struct dw_i2c_dev *dev)
int i = 0;
int ret;
- ptr = i2c_dw_semaphore_cb_table;
-
dev->semaphore_idx = -1;
- while (ptr->probe) {
+ for (ptr = i2c_dw_semaphore_cb_table; ptr->probe; ptr++) {
ret = ptr->probe(dev);
if (ret) {
/*
@@ -254,7 +184,6 @@ static int i2c_dw_probe_lock_support(struct dw_i2c_dev *dev)
return ret;
i++;
- ptr++;
continue;
}
@@ -278,7 +207,6 @@ static int dw_i2c_plat_probe(struct platform_device *pdev)
{
struct i2c_adapter *adap;
struct dw_i2c_dev *dev;
- struct i2c_timings *t;
int irq, ret;
irq = platform_get_irq(pdev, 0);
@@ -307,18 +235,7 @@ static int dw_i2c_plat_probe(struct platform_device *pdev)
reset_control_deassert(dev->rst);
- t = &dev->timings;
- i2c_parse_fw_timings(&pdev->dev, t, false);
-
- i2c_dw_adjust_bus_speed(dev);
-
- if (pdev->dev.of_node)
- dw_i2c_of_configure(pdev);
-
- if (has_acpi_companion(&pdev->dev))
- i2c_dw_acpi_configure(&pdev->dev);
-
- ret = i2c_dw_validate_speed(dev);
+ ret = i2c_dw_fw_parse_and_configure(dev);
if (ret)
goto exit_reset;
@@ -346,6 +263,7 @@ static int dw_i2c_plat_probe(struct platform_device *pdev)
goto exit_reset;
if (dev->clk) {
+ struct i2c_timings *t = &dev->timings;
u64 clk_khz;
dev->get_clk_rate_khz = i2c_dw_get_clk_rate_khz;
@@ -360,8 +278,6 @@ static int dw_i2c_plat_probe(struct platform_device *pdev)
adap->owner = THIS_MODULE;
adap->class = dmi_check_system(dw_i2c_hwmon_class_dmi) ?
I2C_CLASS_HWMON : I2C_CLASS_DEPRECATED;
- ACPI_COMPANION_SET(&adap->dev, ACPI_COMPANION(&pdev->dev));
- adap->dev.of_node = pdev->dev.of_node;
adap->nr = -1;
if (dev->flags & ACCESS_NO_IRQ_SUSPEND) {
@@ -408,7 +324,7 @@ static void dw_i2c_plat_remove(struct platform_device *pdev)
i2c_del_adapter(&dev->adapter);
- dev->disable(dev);
+ i2c_dw_disable(dev);
pm_runtime_dont_use_autosuspend(&pdev->dev);
pm_runtime_put_sync(&pdev->dev);
@@ -419,66 +335,34 @@ static void dw_i2c_plat_remove(struct platform_device *pdev)
reset_control_assert(dev->rst);
}
-static int dw_i2c_plat_prepare(struct device *dev)
-{
- /*
- * If the ACPI companion device object is present for this device, it
- * may be accessed during suspend and resume of other devices via I2C
- * operation regions, so tell the PM core and middle layers to avoid
- * skipping system suspend/resume callbacks for it in that case.
- */
- return !has_acpi_companion(dev);
-}
-
-static int dw_i2c_plat_runtime_suspend(struct device *dev)
-{
- struct dw_i2c_dev *i_dev = dev_get_drvdata(dev);
-
- if (i_dev->shared_with_punit)
- return 0;
-
- i_dev->disable(i_dev);
- i2c_dw_prepare_clk(i_dev, false);
-
- return 0;
-}
-
-static int dw_i2c_plat_suspend(struct device *dev)
-{
- struct dw_i2c_dev *i_dev = dev_get_drvdata(dev);
-
- i2c_mark_adapter_suspended(&i_dev->adapter);
-
- return dw_i2c_plat_runtime_suspend(dev);
-}
-
-static int dw_i2c_plat_runtime_resume(struct device *dev)
-{
- struct dw_i2c_dev *i_dev = dev_get_drvdata(dev);
-
- if (!i_dev->shared_with_punit)
- i2c_dw_prepare_clk(i_dev, true);
-
- i_dev->init(i_dev);
-
- return 0;
-}
-
-static int dw_i2c_plat_resume(struct device *dev)
-{
- struct dw_i2c_dev *i_dev = dev_get_drvdata(dev);
-
- dw_i2c_plat_runtime_resume(dev);
- i2c_mark_adapter_resumed(&i_dev->adapter);
-
- return 0;
-}
+static const struct of_device_id dw_i2c_of_match[] = {
+ { .compatible = "snps,designware-i2c", },
+ { .compatible = "mscc,ocelot-i2c", .data = (void *)MODEL_MSCC_OCELOT },
+ { .compatible = "baikal,bt1-sys-i2c", .data = (void *)MODEL_BAIKAL_BT1 },
+ {}
+};
+MODULE_DEVICE_TABLE(of, dw_i2c_of_match);
-static const struct dev_pm_ops dw_i2c_dev_pm_ops = {
- .prepare = pm_sleep_ptr(dw_i2c_plat_prepare),
- LATE_SYSTEM_SLEEP_PM_OPS(dw_i2c_plat_suspend, dw_i2c_plat_resume)
- RUNTIME_PM_OPS(dw_i2c_plat_runtime_suspend, dw_i2c_plat_runtime_resume, NULL)
+static const struct acpi_device_id dw_i2c_acpi_match[] = {
+ { "80860F41", ACCESS_NO_IRQ_SUSPEND },
+ { "808622C1", ACCESS_NO_IRQ_SUSPEND },
+ { "AMD0010", ACCESS_INTR_MASK },
+ { "AMDI0010", ACCESS_INTR_MASK },
+ { "AMDI0019", ACCESS_INTR_MASK | ARBITRATION_SEMAPHORE },
+ { "AMDI0510", 0 },
+ { "APMC0D0F", 0 },
+ { "HISI02A1", 0 },
+ { "HISI02A2", 0 },
+ { "HISI02A3", 0 },
+ { "HYGO0010", ACCESS_INTR_MASK },
+ { "INT33C2", 0 },
+ { "INT33C3", 0 },
+ { "INT3432", 0 },
+ { "INT3433", 0 },
+ { "INTC10EF", 0 },
+ {}
};
+MODULE_DEVICE_TABLE(acpi, dw_i2c_acpi_match);
static const struct platform_device_id dw_i2c_platform_ids[] = {
{ "i2c_designware" },
@@ -491,9 +375,9 @@ static struct platform_driver dw_i2c_driver = {
.remove_new = dw_i2c_plat_remove,
.driver = {
.name = "i2c_designware",
- .of_match_table = of_match_ptr(dw_i2c_of_match),
- .acpi_match_table = ACPI_PTR(dw_i2c_acpi_match),
- .pm = pm_ptr(&dw_i2c_dev_pm_ops),
+ .of_match_table = dw_i2c_of_match,
+ .acpi_match_table = dw_i2c_acpi_match,
+ .pm = pm_ptr(&i2c_dw_dev_pm_ops),
},
.id_table = dw_i2c_platform_ids,
};
@@ -513,3 +397,5 @@ module_exit(dw_i2c_exit_driver);
MODULE_AUTHOR("Baruch Siach <baruch@tkos.co.il>");
MODULE_DESCRIPTION("Synopsys DesignWare I2C bus adapter");
MODULE_LICENSE("GPL");
+MODULE_IMPORT_NS(I2C_DW);
+MODULE_IMPORT_NS(I2C_DW_COMMON);
diff --git a/drivers/i2c/busses/i2c-designware-slave.c b/drivers/i2c/busses/i2c-designware-slave.c
index 78e2c47e3d7d..7035296aa24c 100644
--- a/drivers/i2c/busses/i2c-designware-slave.c
+++ b/drivers/i2c/busses/i2c-designware-slave.c
@@ -16,6 +16,8 @@
#include <linux/pm_runtime.h>
#include <linux/regmap.h>
+#define DEFAULT_SYMBOL_NAMESPACE I2C_DW
+
#include "i2c-designware-core.h"
static void i2c_dw_configure_fifo_slave(struct dw_i2c_dev *dev)
@@ -88,7 +90,7 @@ static int i2c_dw_unreg_slave(struct i2c_client *slave)
struct dw_i2c_dev *dev = i2c_get_adapdata(slave->adapter);
regmap_write(dev->map, DW_IC_INTR_MASK, 0);
- dev->disable(dev);
+ i2c_dw_disable(dev);
synchronize_irq(dev->irq);
dev->slave = NULL;
pm_runtime_put(dev->dev);
@@ -235,7 +237,6 @@ int i2c_dw_probe_slave(struct dw_i2c_dev *dev)
int ret;
dev->init = i2c_dw_init_slave;
- dev->disable = i2c_dw_disable;
ret = i2c_dw_init_regmap(dev);
if (ret)
@@ -279,3 +280,4 @@ EXPORT_SYMBOL_GPL(i2c_dw_probe_slave);
MODULE_AUTHOR("Luis Oliveira <lolivei@synopsys.com>");
MODULE_DESCRIPTION("Synopsys DesignWare I2C bus slave adapter");
MODULE_LICENSE("GPL v2");
+MODULE_IMPORT_NS(I2C_DW_COMMON);
diff --git a/drivers/i2c/busses/i2c-digicolor.c b/drivers/i2c/busses/i2c-digicolor.c
index 3e6b80e59b90..3dc5a46698fc 100644
--- a/drivers/i2c/busses/i2c-digicolor.c
+++ b/drivers/i2c/busses/i2c-digicolor.c
@@ -357,7 +357,7 @@ static void dc_i2c_remove(struct platform_device *pdev)
static const struct of_device_id dc_i2c_match[] = {
{ .compatible = "cnxt,cx92755-i2c" },
- { },
+ { }
};
MODULE_DEVICE_TABLE(of, dc_i2c_match);
diff --git a/drivers/i2c/busses/i2c-emev2.c b/drivers/i2c/busses/i2c-emev2.c
index 557409410445..d08be3f3cede 100644
--- a/drivers/i2c/busses/i2c-emev2.c
+++ b/drivers/i2c/busses/i2c-emev2.c
@@ -67,7 +67,6 @@ struct em_i2c_device {
void __iomem *base;
struct i2c_adapter adap;
struct completion msg_done;
- struct clk *sclk;
struct i2c_client *slave;
int irq;
};
@@ -361,6 +360,7 @@ static const struct i2c_algorithm em_i2c_algo = {
static int em_i2c_probe(struct platform_device *pdev)
{
struct em_i2c_device *priv;
+ struct clk *sclk;
int ret;
priv = devm_kzalloc(&pdev->dev, sizeof(*priv), GFP_KERNEL);
@@ -373,13 +373,9 @@ static int em_i2c_probe(struct platform_device *pdev)
strscpy(priv->adap.name, "EMEV2 I2C", sizeof(priv->adap.name));
- priv->sclk = devm_clk_get(&pdev->dev, "sclk");
- if (IS_ERR(priv->sclk))
- return PTR_ERR(priv->sclk);
-
- ret = clk_prepare_enable(priv->sclk);
- if (ret)
- return ret;
+ sclk = devm_clk_get_enabled(&pdev->dev, "sclk");
+ if (IS_ERR(sclk))
+ return PTR_ERR(sclk);
priv->adap.timeout = msecs_to_jiffies(100);
priv->adap.retries = 5;
@@ -397,26 +393,22 @@ static int em_i2c_probe(struct platform_device *pdev)
ret = platform_get_irq(pdev, 0);
if (ret < 0)
- goto err_clk;
+ return ret;
priv->irq = ret;
+
ret = devm_request_irq(&pdev->dev, priv->irq, em_i2c_irq_handler, 0,
"em_i2c", priv);
if (ret)
- goto err_clk;
+ return ret;
ret = i2c_add_adapter(&priv->adap);
-
if (ret)
- goto err_clk;
+ return ret;
dev_info(&pdev->dev, "Added i2c controller %d, irq %d\n", priv->adap.nr,
priv->irq);
return 0;
-
-err_clk:
- clk_disable_unprepare(priv->sclk);
- return ret;
}
static void em_i2c_remove(struct platform_device *dev)
@@ -424,7 +416,6 @@ static void em_i2c_remove(struct platform_device *dev)
struct em_i2c_device *priv = platform_get_drvdata(dev);
i2c_del_adapter(&priv->adap);
- clk_disable_unprepare(priv->sclk);
}
static const struct of_device_id em_i2c_ids[] = {
diff --git a/drivers/i2c/busses/i2c-i801.c b/drivers/i2c/busses/i2c-i801.c
index 328c0dab6b14..299fe9d3afab 100644
--- a/drivers/i2c/busses/i2c-i801.c
+++ b/drivers/i2c/busses/i2c-i801.c
@@ -1763,8 +1763,15 @@ static int i801_probe(struct pci_dev *dev, const struct pci_device_id *id)
i801_add_tco(priv);
+ /*
+ * adapter.name is used by platform code to find the main I801 adapter
+ * to instantiante i2c_clients, do not change.
+ */
snprintf(priv->adapter.name, sizeof(priv->adapter.name),
- "SMBus I801 adapter at %04lx", priv->smba);
+ "SMBus %s adapter at %04lx",
+ (priv->features & FEATURE_IDF) ? "I801 IDF" : "I801",
+ priv->smba);
+
err = i2c_add_adapter(&priv->adapter);
if (err) {
platform_device_unregister(priv->tco_pdev);
diff --git a/drivers/i2c/busses/i2c-imx-lpi2c.c b/drivers/i2c/busses/i2c-imx-lpi2c.c
index 0197786892a2..976d43f73f38 100644
--- a/drivers/i2c/busses/i2c-imx-lpi2c.c
+++ b/drivers/i2c/busses/i2c-imx-lpi2c.c
@@ -559,7 +559,7 @@ static const struct i2c_algorithm lpi2c_imx_algo = {
static const struct of_device_id lpi2c_imx_of_match[] = {
{ .compatible = "fsl,imx7ulp-lpi2c" },
- { },
+ { }
};
MODULE_DEVICE_TABLE(of, lpi2c_imx_of_match);
diff --git a/drivers/i2c/busses/i2c-imx.c b/drivers/i2c/busses/i2c-imx.c
index 3842e527116b..98539313cbc9 100644
--- a/drivers/i2c/busses/i2c-imx.c
+++ b/drivers/i2c/busses/i2c-imx.c
@@ -687,7 +687,7 @@ static void i2c_imx_stop(struct imx_i2c_struct *i2c_imx, bool atomic)
i2c_imx_bus_busy(i2c_imx, 0, atomic);
/* Disable I2C controller */
- temp = i2c_imx->hwdata->i2cr_ien_opcode ^ I2CR_IEN,
+ temp = i2c_imx->hwdata->i2cr_ien_opcode ^ I2CR_IEN;
imx_i2c_write_reg(temp, i2c_imx, IMX_I2C_I2CR);
}
@@ -1549,7 +1549,7 @@ static void i2c_imx_remove(struct platform_device *pdev)
pm_runtime_disable(&pdev->dev);
}
-static int __maybe_unused i2c_imx_runtime_suspend(struct device *dev)
+static int i2c_imx_runtime_suspend(struct device *dev)
{
struct imx_i2c_struct *i2c_imx = dev_get_drvdata(dev);
@@ -1558,7 +1558,7 @@ static int __maybe_unused i2c_imx_runtime_suspend(struct device *dev)
return 0;
}
-static int __maybe_unused i2c_imx_runtime_resume(struct device *dev)
+static int i2c_imx_runtime_resume(struct device *dev)
{
struct imx_i2c_struct *i2c_imx = dev_get_drvdata(dev);
int ret;
@@ -1571,8 +1571,7 @@ static int __maybe_unused i2c_imx_runtime_resume(struct device *dev)
}
static const struct dev_pm_ops i2c_imx_pm_ops = {
- SET_RUNTIME_PM_OPS(i2c_imx_runtime_suspend,
- i2c_imx_runtime_resume, NULL)
+ RUNTIME_PM_OPS(i2c_imx_runtime_suspend, i2c_imx_runtime_resume, NULL)
};
static struct platform_driver i2c_imx_driver = {
@@ -1580,7 +1579,7 @@ static struct platform_driver i2c_imx_driver = {
.remove_new = i2c_imx_remove,
.driver = {
.name = DRIVER_NAME,
- .pm = &i2c_imx_pm_ops,
+ .pm = pm_ptr(&i2c_imx_pm_ops),
.of_match_table = i2c_imx_dt_ids,
.acpi_match_table = i2c_imx_acpi_ids,
},
diff --git a/drivers/i2c/busses/i2c-isch.c b/drivers/i2c/busses/i2c-isch.c
index 33dbc19d3848..f59158489ad9 100644
--- a/drivers/i2c/busses/i2c-isch.c
+++ b/drivers/i2c/busses/i2c-isch.c
@@ -99,8 +99,7 @@ static int sch_transaction(void)
if (retries > MAX_RETRIES) {
dev_err(&sch_adapter.dev, "SMBus Timeout!\n");
result = -ETIMEDOUT;
- }
- if (temp & 0x04) {
+ } else if (temp & 0x04) {
result = -EIO;
dev_dbg(&sch_adapter.dev, "Bus collision! SMBus may be "
"locked until next hard reset. (sorry!)\n");
diff --git a/drivers/i2c/busses/i2c-ismt.c b/drivers/i2c/busses/i2c-ismt.c
index 655b5d851c48..c93c02aa6ac8 100644
--- a/drivers/i2c/busses/i2c-ismt.c
+++ b/drivers/i2c/busses/i2c-ismt.c
@@ -382,6 +382,15 @@ static int ismt_process_desc(const struct ismt_desc *desc,
}
/**
+ * ismt_kill_transaction() - kill current transaction
+ * @priv: iSMT private data
+ */
+static void ismt_kill_transaction(struct ismt_priv *priv)
+{
+ writel(ISMT_GCTRL_KILL, priv->smba + ISMT_GR_GCTRL);
+}
+
+/**
* ismt_access() - process an SMBus command
* @adap: the i2c host adapter
* @addr: address of the i2c/SMBus target
@@ -623,6 +632,7 @@ static int ismt_access(struct i2c_adapter *adap, u16 addr,
dma_unmap_single(dev, dma_addr, dma_size, dma_direction);
if (unlikely(!time_left)) {
+ ismt_kill_transaction(priv);
ret = -ETIMEDOUT;
goto out;
}
diff --git a/drivers/i2c/busses/i2c-jz4780.c b/drivers/i2c/busses/i2c-jz4780.c
index 4aafdfab6305..92cc5b091137 100644
--- a/drivers/i2c/busses/i2c-jz4780.c
+++ b/drivers/i2c/busses/i2c-jz4780.c
@@ -792,26 +792,22 @@ static int jz4780_i2c_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, i2c);
- i2c->clk = devm_clk_get(&pdev->dev, NULL);
+ i2c->clk = devm_clk_get_enabled(&pdev->dev, NULL);
if (IS_ERR(i2c->clk))
return PTR_ERR(i2c->clk);
- ret = clk_prepare_enable(i2c->clk);
- if (ret)
- return ret;
-
ret = of_property_read_u32(pdev->dev.of_node, "clock-frequency",
&clk_freq);
if (ret) {
dev_err(&pdev->dev, "clock-frequency not specified in DT\n");
- goto err;
+ return ret;
}
i2c->speed = clk_freq / 1000;
if (i2c->speed == 0) {
ret = -EINVAL;
dev_err(&pdev->dev, "clock-frequency minimum is 1000\n");
- goto err;
+ return ret;
}
jz4780_i2c_set_speed(i2c);
@@ -827,29 +823,25 @@ static int jz4780_i2c_probe(struct platform_device *pdev)
ret = platform_get_irq(pdev, 0);
if (ret < 0)
- goto err;
+ return ret;
i2c->irq = ret;
+
ret = devm_request_irq(&pdev->dev, i2c->irq, jz4780_i2c_irq, 0,
dev_name(&pdev->dev), i2c);
if (ret)
- goto err;
+ return ret;
ret = i2c_add_adapter(&i2c->adap);
if (ret < 0)
- goto err;
+ return ret;
return 0;
-
-err:
- clk_disable_unprepare(i2c->clk);
- return ret;
}
static void jz4780_i2c_remove(struct platform_device *pdev)
{
struct jz4780_i2c *i2c = platform_get_drvdata(pdev);
- clk_disable_unprepare(i2c->clk);
i2c_del_adapter(&i2c->adap);
}
diff --git a/drivers/i2c/busses/i2c-keba.c b/drivers/i2c/busses/i2c-keba.c
new file mode 100644
index 000000000000..759732a07ef0
--- /dev/null
+++ b/drivers/i2c/busses/i2c-keba.c
@@ -0,0 +1,598 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) KEBA Industrial Automation Gmbh 2024
+ *
+ * Driver for KEBA I2C controller FPGA IP core
+ */
+
+#include <linux/i2c.h>
+#include <linux/io.h>
+#include <linux/iopoll.h>
+#include <linux/module.h>
+#include <linux/misc/keba.h>
+
+#define KI2C "i2c-keba"
+
+#define KI2C_CAPABILITY_REG 0x02
+#define KI2C_CAPABILITY_CRYPTO 0x01
+#define KI2C_CAPABILITY_DC 0x02
+
+#define KI2C_CONTROL_REG 0x04
+#define KI2C_CONTROL_MEN 0x01
+#define KI2C_CONTROL_MSTA 0x02
+#define KI2C_CONTROL_RSTA 0x04
+#define KI2C_CONTROL_MTX 0x08
+#define KI2C_CONTROL_TXAK 0x10
+#define KI2C_CONTROL_DISABLE 0x00
+
+#define KI2C_CONTROL_DC_REG 0x05
+#define KI2C_CONTROL_DC_SDA 0x01
+#define KI2C_CONTROL_DC_SCL 0x02
+
+#define KI2C_STATUS_REG 0x08
+#define KI2C_STATUS_IN_USE 0x01
+#define KI2C_STATUS_ACK_CYC 0x02
+#define KI2C_STATUS_RXAK 0x04
+#define KI2C_STATUS_MCF 0x08
+
+#define KI2C_STATUS_DC_REG 0x09
+#define KI2C_STATUS_DC_SDA 0x01
+#define KI2C_STATUS_DC_SCL 0x02
+
+#define KI2C_DATA_REG 0x0c
+
+#define KI2C_INUSE_SLEEP_US (2 * USEC_PER_MSEC)
+#define KI2C_INUSE_TIMEOUT_US (10 * USEC_PER_SEC)
+
+#define KI2C_POLL_DELAY_US 5
+
+struct ki2c {
+ struct keba_i2c_auxdev *auxdev;
+ void __iomem *base;
+ struct i2c_adapter adapter;
+
+ struct i2c_client **client;
+ int client_size;
+};
+
+static int ki2c_inuse_lock(struct ki2c *ki2c)
+{
+ u8 sts;
+ int ret;
+
+ /*
+ * The I2C controller has an IN_USE bit for locking access to the
+ * controller. This enables the use of I2C controller by other none
+ * Linux processors.
+ *
+ * If the I2C controller is free, then the first read returns
+ * IN_USE == 0. After that the I2C controller is locked and further
+ * reads of IN_USE return 1.
+ *
+ * The I2C controller is unlocked by writing 1 into IN_USE.
+ *
+ * The IN_USE bit acts as a hardware semaphore for the I2C controller.
+ * Poll for semaphore, but sleep while polling to free the CPU.
+ */
+ ret = readb_poll_timeout(ki2c->base + KI2C_STATUS_REG,
+ sts, (sts & KI2C_STATUS_IN_USE) == 0,
+ KI2C_INUSE_SLEEP_US, KI2C_INUSE_TIMEOUT_US);
+ if (ret)
+ dev_err(&ki2c->auxdev->auxdev.dev, "%s err!\n", __func__);
+
+ return ret;
+}
+
+static void ki2c_inuse_unlock(struct ki2c *ki2c)
+{
+ /* unlock the controller by writing 1 into IN_USE */
+ iowrite8(KI2C_STATUS_IN_USE, ki2c->base + KI2C_STATUS_REG);
+}
+
+static int ki2c_wait_for_bit(void __iomem *addr, u8 mask, unsigned long timeout)
+{
+ u8 val;
+
+ return readb_poll_timeout(addr, val, (val & mask), KI2C_POLL_DELAY_US,
+ jiffies_to_usecs(timeout));
+}
+
+static int ki2c_wait_for_mcf(struct ki2c *ki2c)
+{
+ return ki2c_wait_for_bit(ki2c->base + KI2C_STATUS_REG, KI2C_STATUS_MCF,
+ ki2c->adapter.timeout);
+}
+
+static int ki2c_wait_for_data(struct ki2c *ki2c)
+{
+ int ret;
+
+ ret = ki2c_wait_for_mcf(ki2c);
+ if (ret < 0)
+ return ret;
+
+ return ki2c_wait_for_bit(ki2c->base + KI2C_STATUS_REG,
+ KI2C_STATUS_ACK_CYC,
+ ki2c->adapter.timeout);
+}
+
+static int ki2c_wait_for_data_ack(struct ki2c *ki2c)
+{
+ unsigned int reg;
+ int ret;
+
+ ret = ki2c_wait_for_data(ki2c);
+ if (ret < 0)
+ return ret;
+
+ /* RXAK == 0 means ACK reveived */
+ reg = ioread8(ki2c->base + KI2C_STATUS_REG);
+ if (reg & KI2C_STATUS_RXAK)
+ return -EIO;
+
+ return 0;
+}
+
+static int ki2c_has_capability(struct ki2c *ki2c, unsigned int cap)
+{
+ unsigned int reg = ioread8(ki2c->base + KI2C_CAPABILITY_REG);
+
+ return (reg & cap) != 0;
+}
+
+static int ki2c_get_scl(struct ki2c *ki2c)
+{
+ unsigned int reg = ioread8(ki2c->base + KI2C_STATUS_DC_REG);
+
+ /* capability KI2C_CAPABILITY_DC required */
+ return (reg & KI2C_STATUS_DC_SCL) != 0;
+}
+
+static int ki2c_get_sda(struct ki2c *ki2c)
+{
+ unsigned int reg = ioread8(ki2c->base + KI2C_STATUS_DC_REG);
+
+ /* capability KI2C_CAPABILITY_DC required */
+ return (reg & KI2C_STATUS_DC_SDA) != 0;
+}
+
+static void ki2c_set_scl(struct ki2c *ki2c, int val)
+{
+ u8 control_dc;
+
+ /* capability KI2C_CAPABILITY_DC and KI2C_CONTROL_MEN = 0 reqired */
+ control_dc = ioread8(ki2c->base + KI2C_CONTROL_DC_REG);
+ if (val)
+ control_dc |= KI2C_CONTROL_DC_SCL;
+ else
+ control_dc &= ~KI2C_CONTROL_DC_SCL;
+ iowrite8(control_dc, ki2c->base + KI2C_CONTROL_DC_REG);
+}
+
+/*
+ * Resetting bus bitwise is done by checking SDA and applying clock cycles as
+ * long as SDA is low. 9 clock cycles are applied at most.
+ *
+ * Clock cycles are generated and udelay() determines the duration of clock
+ * cycles. Generated clock rate is 100 KHz and so duration of both clock levels
+ * is: delay in ns = (10^6 / 100) / 2
+ */
+#define KI2C_RECOVERY_CLK_CNT (9 * 2)
+#define KI2C_RECOVERY_UDELAY 5
+static int ki2c_reset_bus_bitwise(struct ki2c *ki2c)
+{
+ int val = 1;
+ int ret = 0;
+ int i;
+
+ /* disable I2C controller (MEN = 0) to get direct access to SCL/SDA */
+ iowrite8(0, ki2c->base + KI2C_CONTROL_REG);
+
+ /* generate clock cycles */
+ ki2c_set_scl(ki2c, val);
+ udelay(KI2C_RECOVERY_UDELAY);
+ for (i = 0; i < KI2C_RECOVERY_CLK_CNT; i++) {
+ if (val) {
+ /* SCL shouldn't be low here */
+ if (!ki2c_get_scl(ki2c)) {
+ dev_err(&ki2c->auxdev->auxdev.dev,
+ "SCL is stuck low!\n");
+ ret = -EBUSY;
+ break;
+ }
+
+ /* break if SDA is high */
+ if (ki2c_get_sda(ki2c))
+ break;
+ }
+
+ val = !val;
+ ki2c_set_scl(ki2c, val);
+ udelay(KI2C_RECOVERY_UDELAY);
+ }
+
+ if (!ki2c_get_sda(ki2c)) {
+ dev_err(&ki2c->auxdev->auxdev.dev, "SDA is still low!\n");
+ ret = -EBUSY;
+ }
+
+ /* reenable controller */
+ iowrite8(KI2C_CONTROL_MEN, ki2c->base + KI2C_CONTROL_REG);
+
+ return ret;
+}
+
+/*
+ * Resetting bus bytewise is done by writing start bit, 9 data bits and stop
+ * bit.
+ *
+ * This is not 100% safe. If target is an EEPROM and a write access was
+ * interrupted during the ACK cycle, this approach might not be able to recover
+ * the bus. The reason is, that after the 9 clock cycles the EEPROM will be in
+ * ACK cycle again and will hold SDA low like it did before the start of the
+ * routine. Furthermore the EEPROM might get written one additional byte with
+ * 0xff into it. Thus, use bitwise approach whenever possible, especially when
+ * EEPROMs are on the bus.
+ */
+static int ki2c_reset_bus_bytewise(struct ki2c *ki2c)
+{
+ int ret;
+
+ /* hold data line high for 9 clock cycles */
+ iowrite8(0xFF, ki2c->base + KI2C_DATA_REG);
+
+ /* create start condition */
+ iowrite8(KI2C_CONTROL_MEN | KI2C_CONTROL_MTX | KI2C_CONTROL_MSTA | KI2C_CONTROL_TXAK,
+ ki2c->base + KI2C_CONTROL_REG);
+ ret = ki2c_wait_for_mcf(ki2c);
+ if (ret < 0) {
+ dev_err(&ki2c->auxdev->auxdev.dev, "Start condition failed\n");
+
+ return ret;
+ }
+
+ /* create stop condition */
+ iowrite8(KI2C_CONTROL_MEN | KI2C_CONTROL_MTX | KI2C_CONTROL_TXAK,
+ ki2c->base + KI2C_CONTROL_REG);
+ ret = ki2c_wait_for_mcf(ki2c);
+ if (ret < 0)
+ dev_err(&ki2c->auxdev->auxdev.dev, "Stop condition failed\n");
+
+ return ret;
+}
+
+static int ki2c_reset_bus(struct ki2c *ki2c)
+{
+ int ret;
+
+ ret = ki2c_inuse_lock(ki2c);
+ if (ret < 0)
+ return ret;
+
+ /*
+ * If the I2C controller is capable of direct control of SCL/SDA, then a
+ * bitwise reset is used. Otherwise fall back to bytewise reset.
+ */
+ if (ki2c_has_capability(ki2c, KI2C_CAPABILITY_DC))
+ ret = ki2c_reset_bus_bitwise(ki2c);
+ else
+ ret = ki2c_reset_bus_bytewise(ki2c);
+
+ ki2c_inuse_unlock(ki2c);
+
+ return ret;
+}
+
+static void ki2c_write_target_addr(struct ki2c *ki2c, struct i2c_msg *m)
+{
+ u8 addr;
+
+ addr = m->addr << 1;
+ /* Bit 0 signals RD/WR */
+ if (m->flags & I2C_M_RD)
+ addr |= 0x01;
+
+ iowrite8(addr, ki2c->base + KI2C_DATA_REG);
+}
+
+static int ki2c_start_addr(struct ki2c *ki2c, struct i2c_msg *m)
+{
+ int ret;
+
+ /*
+ * Store target address byte in the controller. This has to be done
+ * before sending START condition.
+ */
+ ki2c_write_target_addr(ki2c, m);
+
+ /* enable controller for TX */
+ iowrite8(KI2C_CONTROL_MEN | KI2C_CONTROL_MTX,
+ ki2c->base + KI2C_CONTROL_REG);
+
+ /* send START condition and target address byte */
+ iowrite8(KI2C_CONTROL_MEN | KI2C_CONTROL_MTX | KI2C_CONTROL_MSTA,
+ ki2c->base + KI2C_CONTROL_REG);
+
+ ret = ki2c_wait_for_data_ack(ki2c);
+ if (ret < 0)
+ /*
+ * For EEPROMs this is normal behavior during internal write
+ * operation.
+ */
+ dev_dbg(&ki2c->auxdev->auxdev.dev,
+ "%s wait for ACK err at 0x%02x!\n", __func__, m->addr);
+
+ return ret;
+}
+
+static int ki2c_repstart_addr(struct ki2c *ki2c, struct i2c_msg *m)
+{
+ int ret;
+
+ /* repeated start and write is not supported */
+ if ((m->flags & I2C_M_RD) == 0) {
+ dev_err(&ki2c->auxdev->auxdev.dev,
+ "Repeated start not supported for writes\n");
+ return -EINVAL;
+ }
+
+ /* send repeated start */
+ iowrite8(KI2C_CONTROL_MEN | KI2C_CONTROL_MSTA | KI2C_CONTROL_RSTA,
+ ki2c->base + KI2C_CONTROL_REG);
+
+ ret = ki2c_wait_for_mcf(ki2c);
+ if (ret < 0) {
+ dev_err(&ki2c->auxdev->auxdev.dev,
+ "%s wait for MCF err at 0x%02x!\n", __func__, m->addr);
+ return ret;
+ }
+
+ /* write target-address byte */
+ ki2c_write_target_addr(ki2c, m);
+
+ ret = ki2c_wait_for_data_ack(ki2c);
+ if (ret < 0)
+ dev_err(&ki2c->auxdev->auxdev.dev,
+ "%s wait for ACK err at 0x%02x!\n", __func__, m->addr);
+
+ return ret;
+}
+
+static void ki2c_stop(struct ki2c *ki2c)
+{
+ iowrite8(KI2C_CONTROL_MEN, ki2c->base + KI2C_CONTROL_REG);
+ ki2c_wait_for_mcf(ki2c);
+}
+
+static int ki2c_write(struct ki2c *ki2c, const u8 *data, int len)
+{
+ int ret;
+ int i;
+
+ for (i = 0; i < len; i++) {
+ /* write data byte */
+ iowrite8(data[i], ki2c->base + KI2C_DATA_REG);
+
+ ret = ki2c_wait_for_data_ack(ki2c);
+ if (ret < 0)
+ return ret;
+ }
+
+ return 0;
+}
+
+static int ki2c_read(struct ki2c *ki2c, u8 *data, int len)
+{
+ u8 control;
+ int ret;
+ int i;
+
+ if (len == 0)
+ return 0; /* nothing to do */
+
+ control = KI2C_CONTROL_MEN | KI2C_CONTROL_MSTA;
+
+ /* if just one byte => send tx-nack after transfer */
+ if (len == 1)
+ control |= KI2C_CONTROL_TXAK;
+
+ iowrite8(control, ki2c->base + KI2C_CONTROL_REG);
+
+ /* dummy read to start transfer on bus */
+ ioread8(ki2c->base + KI2C_DATA_REG);
+
+ for (i = 0; i < len; i++) {
+ ret = ki2c_wait_for_data(ki2c);
+ if (ret < 0)
+ return ret;
+
+ if (i == len - 2)
+ /* send tx-nack after transfer of last byte */
+ iowrite8(KI2C_CONTROL_MEN | KI2C_CONTROL_MSTA | KI2C_CONTROL_TXAK,
+ ki2c->base + KI2C_CONTROL_REG);
+ else if (i == len - 1)
+ /*
+ * switch to TX on last byte, so that reading DATA
+ * register does not trigger another read transfer
+ */
+ iowrite8(KI2C_CONTROL_MEN | KI2C_CONTROL_MSTA | KI2C_CONTROL_MTX,
+ ki2c->base + KI2C_CONTROL_REG);
+
+ /* read byte and start next transfer (if not last byte) */
+ data[i] = ioread8(ki2c->base + KI2C_DATA_REG);
+ }
+
+ return len;
+}
+
+static int ki2c_xfer(struct i2c_adapter *adap, struct i2c_msg msgs[], int num)
+{
+ struct ki2c *ki2c = i2c_get_adapdata(adap);
+ int ret;
+ int i;
+
+ ret = ki2c_inuse_lock(ki2c);
+ if (ret < 0)
+ return ret;
+
+ for (i = 0; i < num; i++) {
+ struct i2c_msg *m = &msgs[i];
+
+ if (i == 0)
+ ret = ki2c_start_addr(ki2c, m);
+ else
+ ret = ki2c_repstart_addr(ki2c, m);
+ if (ret < 0)
+ break;
+
+ if (m->flags & I2C_M_RD)
+ ret = ki2c_read(ki2c, m->buf, m->len);
+ else
+ ret = ki2c_write(ki2c, m->buf, m->len);
+ if (ret < 0)
+ break;
+ }
+
+ ki2c_stop(ki2c);
+
+ ki2c_inuse_unlock(ki2c);
+
+ return ret < 0 ? ret : num;
+}
+
+static void ki2c_unregister_devices(struct ki2c *ki2c)
+{
+ int i;
+
+ for (i = 0; i < ki2c->client_size; i++) {
+ struct i2c_client *client = ki2c->client[i];
+
+ if (client)
+ i2c_unregister_device(client);
+ }
+}
+
+static int ki2c_register_devices(struct ki2c *ki2c)
+{
+ struct i2c_board_info *info = ki2c->auxdev->info;
+ int i;
+
+ /* register all known I2C devices */
+ for (i = 0; i < ki2c->client_size; i++) {
+ struct i2c_client *client;
+ unsigned short const addr_list[2] = { info[i].addr,
+ I2C_CLIENT_END };
+
+ client = i2c_new_scanned_device(&ki2c->adapter, &info[i],
+ addr_list, NULL);
+ if (!IS_ERR(client)) {
+ ki2c->client[i] = client;
+ } else if (PTR_ERR(client) != -ENODEV) {
+ ki2c->client_size = i;
+ ki2c_unregister_devices(ki2c);
+
+ return PTR_ERR(client);
+ }
+ }
+
+ return 0;
+}
+
+static u32 ki2c_func(struct i2c_adapter *adap)
+{
+ return I2C_FUNC_I2C | I2C_FUNC_SMBUS_EMUL;
+}
+
+static const struct i2c_algorithm ki2c_algo = {
+ .master_xfer = ki2c_xfer,
+ .functionality = ki2c_func,
+};
+
+static int ki2c_probe(struct auxiliary_device *auxdev,
+ const struct auxiliary_device_id *id)
+{
+ struct device *dev = &auxdev->dev;
+ struct i2c_adapter *adap;
+ struct ki2c *ki2c;
+ int ret;
+
+ ki2c = devm_kzalloc(dev, sizeof(*ki2c), GFP_KERNEL);
+ if (!ki2c)
+ return -ENOMEM;
+ ki2c->auxdev = container_of(auxdev, struct keba_i2c_auxdev, auxdev);
+ ki2c->client = devm_kcalloc(dev, ki2c->auxdev->info_size,
+ sizeof(*ki2c->client), GFP_KERNEL);
+ if (!ki2c->client)
+ return -ENOMEM;
+ ki2c->client_size = ki2c->auxdev->info_size;
+ auxiliary_set_drvdata(auxdev, ki2c);
+
+ ki2c->base = devm_ioremap_resource(dev, &ki2c->auxdev->io);
+ if (IS_ERR(ki2c->base))
+ return PTR_ERR(ki2c->base);
+
+ adap = &ki2c->adapter;
+ strscpy(adap->name, "KEBA I2C adapter", sizeof(adap->name));
+ adap->owner = THIS_MODULE;
+ adap->class = I2C_CLASS_HWMON;
+ adap->algo = &ki2c_algo;
+ adap->dev.parent = dev;
+
+ i2c_set_adapdata(adap, ki2c);
+
+ /* enable controller */
+ iowrite8(KI2C_CONTROL_MEN, ki2c->base + KI2C_CONTROL_REG);
+
+ /* reset bus before probing I2C devices */
+ ret = ki2c_reset_bus(ki2c);
+ if (ret)
+ goto out;
+
+ ret = devm_i2c_add_adapter(dev, adap);
+ if (ret) {
+ dev_err(dev, "Failed to add adapter (%d)!\n", ret);
+ goto out;
+ }
+
+ ret = ki2c_register_devices(ki2c);
+ if (ret) {
+ dev_err(dev, "Failed to register devices (%d)!\n", ret);
+ goto out;
+ }
+
+ return 0;
+
+out:
+ iowrite8(KI2C_CONTROL_DISABLE, ki2c->base + KI2C_CONTROL_REG);
+ return ret;
+}
+
+static void ki2c_remove(struct auxiliary_device *auxdev)
+{
+ struct ki2c *ki2c = auxiliary_get_drvdata(auxdev);
+
+ ki2c_unregister_devices(ki2c);
+
+ /* disable controller */
+ iowrite8(KI2C_CONTROL_DISABLE, ki2c->base + KI2C_CONTROL_REG);
+
+ auxiliary_set_drvdata(auxdev, NULL);
+}
+
+static const struct auxiliary_device_id ki2c_devtype_aux[] = {
+ { .name = "keba.i2c" },
+ { }
+};
+MODULE_DEVICE_TABLE(auxiliary, ki2c_devtype_aux);
+
+static struct auxiliary_driver ki2c_driver_aux = {
+ .name = KI2C,
+ .id_table = ki2c_devtype_aux,
+ .probe = ki2c_probe,
+ .remove = ki2c_remove,
+};
+module_auxiliary_driver(ki2c_driver_aux);
+
+MODULE_AUTHOR("Gerhard Engleder <eg@keba.com>");
+MODULE_DESCRIPTION("KEBA I2C bus controller driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/i2c/busses/i2c-ljca.c b/drivers/i2c/busses/i2c-ljca.c
index 0b70621cf9d3..1dc516ef0fdd 100644
--- a/drivers/i2c/busses/i2c-ljca.c
+++ b/drivers/i2c/busses/i2c-ljca.c
@@ -107,7 +107,7 @@ static int ljca_i2c_start(struct ljca_i2c_dev *ljca_i2c, u8 target_addr,
return 0;
}
-static void ljca_i2c_stop(struct ljca_i2c_dev *ljca_i2c, u8 target_addr)
+static void ljca_i2c_stop(struct ljca_i2c_dev *ljca_i2c)
{
struct ljca_i2c_rw_packet *w_packet =
(struct ljca_i2c_rw_packet *)ljca_i2c->obuf;
@@ -178,7 +178,7 @@ static int ljca_i2c_read(struct ljca_i2c_dev *ljca_i2c, u8 target_addr, u8 *data
if (!ret)
ret = ljca_i2c_pure_read(ljca_i2c, data, len);
- ljca_i2c_stop(ljca_i2c, target_addr);
+ ljca_i2c_stop(ljca_i2c);
return ret;
}
@@ -222,7 +222,7 @@ static int ljca_i2c_write(struct ljca_i2c_dev *ljca_i2c, u8 target_addr,
if (!ret)
ret = ljca_i2c_pure_write(ljca_i2c, data, len);
- ljca_i2c_stop(ljca_i2c, target_addr);
+ ljca_i2c_stop(ljca_i2c);
return ret;
}
diff --git a/drivers/i2c/busses/i2c-mpc.c b/drivers/i2c/busses/i2c-mpc.c
index 41d6c8ed163a..236d6b8ba867 100644
--- a/drivers/i2c/busses/i2c-mpc.c
+++ b/drivers/i2c/busses/i2c-mpc.c
@@ -88,7 +88,6 @@ struct mpc_i2c {
int irq;
u32 real_clk;
u8 fdr, dfsrr;
- struct clk *clk_per;
u32 cntl_bits;
enum mpc_i2c_action action;
struct i2c_msg *msgs;
@@ -779,7 +778,6 @@ static int fsl_i2c_probe(struct platform_device *op)
struct clk *clk;
int result;
u32 clock;
- int err;
i2c = devm_kzalloc(&op->dev, sizeof(*i2c), GFP_KERNEL);
if (!i2c)
@@ -809,18 +807,12 @@ static int fsl_i2c_probe(struct platform_device *op)
* enable clock for the I2C peripheral (non fatal),
* keep a reference upon successful allocation
*/
- clk = devm_clk_get_optional(&op->dev, NULL);
- if (IS_ERR(clk))
- return PTR_ERR(clk);
-
- err = clk_prepare_enable(clk);
- if (err) {
+ clk = devm_clk_get_optional_enabled(&op->dev, NULL);
+ if (IS_ERR(clk)) {
dev_err(&op->dev, "failed to enable clock\n");
- return err;
+ return PTR_ERR(clk);
}
- i2c->clk_per = clk;
-
if (of_property_read_bool(op->dev.of_node, "fsl,preserve-clocking")) {
clock = MPC_I2C_CLOCK_PRESERVE;
} else {
@@ -876,14 +868,9 @@ static int fsl_i2c_probe(struct platform_device *op)
result = i2c_add_numbered_adapter(&i2c->adap);
if (result)
- goto fail_add;
+ return result;
return 0;
-
- fail_add:
- clk_disable_unprepare(i2c->clk_per);
-
- return result;
};
static void fsl_i2c_remove(struct platform_device *op)
@@ -891,8 +878,6 @@ static void fsl_i2c_remove(struct platform_device *op)
struct mpc_i2c *i2c = platform_get_drvdata(op);
i2c_del_adapter(&i2c->adap);
-
- clk_disable_unprepare(i2c->clk_per);
};
static int __maybe_unused mpc_i2c_suspend(struct device *dev)
diff --git a/drivers/i2c/busses/i2c-mt65xx.c b/drivers/i2c/busses/i2c-mt65xx.c
index a8b5719c3372..e0ba653dec2d 100644
--- a/drivers/i2c/busses/i2c-mt65xx.c
+++ b/drivers/i2c/busses/i2c-mt65xx.c
@@ -1306,12 +1306,9 @@ err_exit:
static irqreturn_t mtk_i2c_irq(int irqno, void *dev_id)
{
struct mtk_i2c *i2c = dev_id;
- u16 restart_flag = 0;
+ u16 restart_flag = i2c->auto_restart ? I2C_RS_TRANSFER : 0;
u16 intr_stat;
- if (i2c->auto_restart)
- restart_flag = I2C_RS_TRANSFER;
-
intr_stat = mtk_i2c_readw(i2c, OFFSET_INTR_STAT);
mtk_i2c_writew(i2c, intr_stat, OFFSET_INTR_STAT);
diff --git a/drivers/i2c/busses/i2c-npcm7xx.c b/drivers/i2c/busses/i2c-npcm7xx.c
index 2fe68615942e..bbcb4d6668ce 100644
--- a/drivers/i2c/busses/i2c-npcm7xx.c
+++ b/drivers/i2c/busses/i2c-npcm7xx.c
@@ -136,11 +136,13 @@ enum i2c_addr {
* Since the addr regs are sprinkled all over the address space,
* use this array to get the address or each register.
*/
-#define I2C_NUM_OWN_ADDR 2
+#define I2C_NUM_OWN_ADDR 10
#define I2C_NUM_OWN_ADDR_SUPPORTED 2
static const int npcm_i2caddr[I2C_NUM_OWN_ADDR] = {
- NPCM_I2CADDR1, NPCM_I2CADDR2,
+ NPCM_I2CADDR1, NPCM_I2CADDR2, NPCM_I2CADDR3, NPCM_I2CADDR4,
+ NPCM_I2CADDR5, NPCM_I2CADDR6, NPCM_I2CADDR7, NPCM_I2CADDR8,
+ NPCM_I2CADDR9, NPCM_I2CADDR10,
};
#endif
diff --git a/drivers/i2c/busses/i2c-omap.c b/drivers/i2c/busses/i2c-omap.c
index 35a3f0a64986..1d9ad25c89ae 100644
--- a/drivers/i2c/busses/i2c-omap.c
+++ b/drivers/i2c/busses/i2c-omap.c
@@ -1261,7 +1261,7 @@ static const struct of_device_id omap_i2c_of_match[] = {
.compatible = "ti,omap2420-i2c",
.data = &omap2420_pdata,
},
- { },
+ { }
};
MODULE_DEVICE_TABLE(of, omap_i2c_of_match);
#endif
diff --git a/drivers/i2c/busses/i2c-piix4.c b/drivers/i2c/busses/i2c-piix4.c
index 4e32d57ae0bf..febbd9950d8f 100644
--- a/drivers/i2c/busses/i2c-piix4.c
+++ b/drivers/i2c/busses/i2c-piix4.c
@@ -146,7 +146,7 @@ static const struct dmi_system_id piix4_dmi_ibm[] = {
.ident = "IBM",
.matches = { DMI_MATCH(DMI_SYS_VENDOR, "IBM"), },
},
- { },
+ { }
};
/*
diff --git a/drivers/i2c/busses/i2c-pnx.c b/drivers/i2c/busses/i2c-pnx.c
index f448505d5468..1dafadda73af 100644
--- a/drivers/i2c/busses/i2c-pnx.c
+++ b/drivers/i2c/busses/i2c-pnx.c
@@ -721,7 +721,7 @@ static void i2c_pnx_remove(struct platform_device *pdev)
#ifdef CONFIG_OF
static const struct of_device_id i2c_pnx_of_match[] = {
{ .compatible = "nxp,pnx-i2c" },
- { },
+ { }
};
MODULE_DEVICE_TABLE(of, i2c_pnx_of_match);
#endif
diff --git a/drivers/i2c/busses/i2c-pxa-pci.c b/drivers/i2c/busses/i2c-pxa-pci.c
index 6b3c6a733368..af2094720a4d 100644
--- a/drivers/i2c/busses/i2c-pxa-pci.c
+++ b/drivers/i2c/busses/i2c-pxa-pci.c
@@ -135,7 +135,7 @@ err_dev_add:
static const struct pci_device_id ce4100_i2c_devices[] = {
{ PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2e68)},
- { },
+ { }
};
static struct pci_driver ce4100_i2c_driver = {
diff --git a/drivers/i2c/busses/i2c-pxa.c b/drivers/i2c/busses/i2c-pxa.c
index 031175113dd4..4d76e71cdd4b 100644
--- a/drivers/i2c/busses/i2c-pxa.c
+++ b/drivers/i2c/busses/i2c-pxa.c
@@ -218,7 +218,7 @@ static const struct platform_device_id i2c_pxa_id_table[] = {
{ "ce4100-i2c", REGS_CE4100 },
{ "pxa910-i2c", REGS_PXA910 },
{ "armada-3700-i2c", REGS_A3700 },
- { },
+ { }
};
MODULE_DEVICE_TABLE(platform, i2c_pxa_id_table);
diff --git a/drivers/i2c/busses/i2c-qcom-geni.c b/drivers/i2c/busses/i2c-qcom-geni.c
index 06e836e3e877..212336f724a6 100644
--- a/drivers/i2c/busses/i2c-qcom-geni.c
+++ b/drivers/i2c/busses/i2c-qcom-geni.c
@@ -721,7 +721,7 @@ static const struct i2c_algorithm geni_i2c_algo = {
static const struct acpi_device_id geni_i2c_acpi_match[] = {
{ "QCOM0220"},
{ "QCOM0411" },
- { },
+ { }
};
MODULE_DEVICE_TABLE(acpi, geni_i2c_acpi_match);
#endif
@@ -818,15 +818,13 @@ static int geni_i2c_probe(struct platform_device *pdev)
init_completion(&gi2c->done);
spin_lock_init(&gi2c->lock);
platform_set_drvdata(pdev, gi2c);
- ret = devm_request_irq(dev, gi2c->irq, geni_i2c_irq, 0,
+ ret = devm_request_irq(dev, gi2c->irq, geni_i2c_irq, IRQF_NO_AUTOEN,
dev_name(dev), gi2c);
if (ret) {
dev_err(dev, "Request_irq failed:%d: err:%d\n",
gi2c->irq, ret);
return ret;
}
- /* Disable the interrupt so that the system can enter low-power mode */
- disable_irq(gi2c->irq);
i2c_set_adapdata(&gi2c->adap, gi2c);
gi2c->adap.dev.parent = dev;
gi2c->adap.dev.of_node = dev->of_node;
@@ -986,21 +984,24 @@ static int __maybe_unused geni_i2c_runtime_resume(struct device *dev)
return ret;
ret = clk_prepare_enable(gi2c->core_clk);
- if (ret) {
- geni_icc_disable(&gi2c->se);
- return ret;
- }
+ if (ret)
+ goto out_icc_disable;
ret = geni_se_resources_on(&gi2c->se);
- if (ret) {
- clk_disable_unprepare(gi2c->core_clk);
- geni_icc_disable(&gi2c->se);
- return ret;
- }
+ if (ret)
+ goto out_clk_disable;
enable_irq(gi2c->irq);
gi2c->suspended = 0;
+
return 0;
+
+out_clk_disable:
+ clk_disable_unprepare(gi2c->core_clk);
+out_icc_disable:
+ geni_icc_disable(&gi2c->se);
+
+ return ret;
}
static int __maybe_unused geni_i2c_suspend_noirq(struct device *dev)
diff --git a/drivers/i2c/busses/i2c-qup.c b/drivers/i2c/busses/i2c-qup.c
index 4a2c745751a2..d480162a4d39 100644
--- a/drivers/i2c/busses/i2c-qup.c
+++ b/drivers/i2c/busses/i2c-qup.c
@@ -1648,7 +1648,7 @@ static void qup_i2c_disable_clocks(struct qup_i2c_dev *qup)
static const struct acpi_device_id qup_i2c_acpi_match[] = {
{ "QCOM8010"},
- { },
+ { }
};
MODULE_DEVICE_TABLE(acpi, qup_i2c_acpi_match);
diff --git a/drivers/i2c/busses/i2c-rcar.c b/drivers/i2c/busses/i2c-rcar.c
index da4b07c0ed4c..9267df38c2d0 100644
--- a/drivers/i2c/busses/i2c-rcar.c
+++ b/drivers/i2c/busses/i2c-rcar.c
@@ -1164,11 +1164,6 @@ static int rcar_i2c_probe(struct platform_device *pdev)
rcar_i2c_init(priv);
rcar_i2c_reset_slave(priv);
- if (priv->devtype < I2C_RCAR_GEN3) {
- irqflags |= IRQF_NO_THREAD;
- irqhandler = rcar_i2c_gen2_irq;
- }
-
/* Stay always active when multi-master to keep arbitration working */
if (of_property_read_bool(dev->of_node, "multi-master"))
priv->flags |= ID_P_PM_BLOCKED;
@@ -1178,8 +1173,11 @@ static int rcar_i2c_probe(struct platform_device *pdev)
if (of_property_read_bool(dev->of_node, "smbus"))
priv->flags |= ID_P_HOST_NOTIFY;
- /* R-Car Gen3+ needs a reset before every transfer */
- if (priv->devtype >= I2C_RCAR_GEN3) {
+ if (priv->devtype < I2C_RCAR_GEN3) {
+ irqflags |= IRQF_NO_THREAD;
+ irqhandler = rcar_i2c_gen2_irq;
+ } else {
+ /* R-Car Gen3+ needs a reset before every transfer */
priv->rstc = devm_reset_control_get_exclusive(&pdev->dev, NULL);
if (IS_ERR(priv->rstc)) {
ret = PTR_ERR(priv->rstc);
diff --git a/drivers/i2c/busses/i2c-riic.c b/drivers/i2c/busses/i2c-riic.c
index d6f585cdb7e5..c7f3a4c02470 100644
--- a/drivers/i2c/busses/i2c-riic.c
+++ b/drivers/i2c/busses/i2c-riic.c
@@ -63,6 +63,8 @@
#define ICMR3_ACKWP 0x10
#define ICMR3_ACKBT 0x08
+#define ICFER_FMPE 0x80
+
#define ICIER_TIE 0x80
#define ICIER_TEIE 0x40
#define ICIER_RIE 0x20
@@ -80,6 +82,7 @@ enum riic_reg_list {
RIIC_ICCR2,
RIIC_ICMR1,
RIIC_ICMR3,
+ RIIC_ICFER,
RIIC_ICSER,
RIIC_ICIER,
RIIC_ICSR2,
@@ -91,7 +94,8 @@ enum riic_reg_list {
};
struct riic_of_data {
- u8 regs[RIIC_REG_END];
+ const u8 *regs;
+ bool fast_mode_plus;
};
struct riic_dev {
@@ -105,6 +109,8 @@ struct riic_dev {
struct completion msg_done;
struct i2c_adapter adapter;
struct clk *clk;
+ struct reset_control *rstc;
+ struct i2c_timings i2c_t;
};
struct riic_irq_desc {
@@ -131,11 +137,14 @@ static inline void riic_clear_set_bit(struct riic_dev *riic, u8 clear, u8 set, u
static int riic_xfer(struct i2c_adapter *adap, struct i2c_msg msgs[], int num)
{
struct riic_dev *riic = i2c_get_adapdata(adap);
+ struct device *dev = adap->dev.parent;
unsigned long time_left;
- int i;
+ int i, ret;
u8 start_bit;
- pm_runtime_get_sync(adap->dev.parent);
+ ret = pm_runtime_resume_and_get(dev);
+ if (ret)
+ return ret;
if (riic_readb(riic, RIIC_ICCR2) & ICCR2_BBSY) {
riic->err = -EBUSY;
@@ -168,7 +177,8 @@ static int riic_xfer(struct i2c_adapter *adap, struct i2c_msg msgs[], int num)
}
out:
- pm_runtime_put(adap->dev.parent);
+ pm_runtime_mark_last_busy(dev);
+ pm_runtime_put_autosuspend(dev);
return riic->err ?: num;
}
@@ -298,21 +308,21 @@ static const struct i2c_algorithm riic_algo = {
.functionality = riic_func,
};
-static int riic_init_hw(struct riic_dev *riic, struct i2c_timings *t)
+static int riic_init_hw(struct riic_dev *riic)
{
- int ret = 0;
+ int ret;
unsigned long rate;
int total_ticks, cks, brl, brh;
+ struct i2c_timings *t = &riic->i2c_t;
+ struct device *dev = riic->adapter.dev.parent;
+ bool fast_mode_plus = riic->info->fast_mode_plus;
+ u32 max_freq = fast_mode_plus ? I2C_MAX_FAST_MODE_PLUS_FREQ
+ : I2C_MAX_FAST_MODE_FREQ;
- pm_runtime_get_sync(riic->adapter.dev.parent);
-
- if (t->bus_freq_hz > I2C_MAX_FAST_MODE_FREQ) {
- dev_err(&riic->adapter.dev,
- "unsupported bus speed (%dHz). %d max\n",
- t->bus_freq_hz, I2C_MAX_FAST_MODE_FREQ);
- ret = -EINVAL;
- goto out;
- }
+ if (t->bus_freq_hz > max_freq)
+ return dev_err_probe(&riic->adapter.dev, -EINVAL,
+ "unsupported bus speed %uHz (%u max)\n",
+ t->bus_freq_hz, max_freq);
rate = clk_get_rate(riic->clk);
@@ -349,8 +359,7 @@ static int riic_init_hw(struct riic_dev *riic, struct i2c_timings *t)
if (brl > (0x1F + 3)) {
dev_err(&riic->adapter.dev, "invalid speed (%lu). Too slow.\n",
(unsigned long)t->bus_freq_hz);
- ret = -EINVAL;
- goto out;
+ return -EINVAL;
}
brh = total_ticks - brl;
@@ -382,6 +391,10 @@ static int riic_init_hw(struct riic_dev *riic, struct i2c_timings *t)
t->scl_fall_ns / (1000000000 / rate),
t->scl_rise_ns / (1000000000 / rate), cks, brl, brh);
+ ret = pm_runtime_resume_and_get(dev);
+ if (ret)
+ return ret;
+
/* Changing the order of accessing IICRST and ICE may break things! */
riic_writeb(riic, ICCR1_IICRST | ICCR1_SOWP, RIIC_ICCR1);
riic_clear_set_bit(riic, 0, ICCR1_ICE, RIIC_ICCR1);
@@ -393,11 +406,14 @@ static int riic_init_hw(struct riic_dev *riic, struct i2c_timings *t)
riic_writeb(riic, 0, RIIC_ICSER);
riic_writeb(riic, ICMR3_ACKWP | ICMR3_RDRFS, RIIC_ICMR3);
+ if (fast_mode_plus && t->bus_freq_hz > I2C_MAX_FAST_MODE_FREQ)
+ riic_clear_set_bit(riic, 0, ICFER_FMPE, RIIC_ICFER);
+
riic_clear_set_bit(riic, ICCR1_IICRST, 0, RIIC_ICCR1);
-out:
- pm_runtime_put(riic->adapter.dev.parent);
- return ret;
+ pm_runtime_mark_last_busy(dev);
+ pm_runtime_put_autosuspend(dev);
+ return 0;
}
static struct riic_irq_desc riic_irqs[] = {
@@ -415,13 +431,12 @@ static void riic_reset_control_assert(void *data)
static int riic_i2c_probe(struct platform_device *pdev)
{
+ struct device *dev = &pdev->dev;
struct riic_dev *riic;
struct i2c_adapter *adap;
- struct i2c_timings i2c_t;
- struct reset_control *rstc;
int i, ret;
- riic = devm_kzalloc(&pdev->dev, sizeof(*riic), GFP_KERNEL);
+ riic = devm_kzalloc(dev, sizeof(*riic), GFP_KERNEL);
if (!riic)
return -ENOMEM;
@@ -429,22 +444,22 @@ static int riic_i2c_probe(struct platform_device *pdev)
if (IS_ERR(riic->base))
return PTR_ERR(riic->base);
- riic->clk = devm_clk_get(&pdev->dev, NULL);
+ riic->clk = devm_clk_get(dev, NULL);
if (IS_ERR(riic->clk)) {
- dev_err(&pdev->dev, "missing controller clock");
+ dev_err(dev, "missing controller clock");
return PTR_ERR(riic->clk);
}
- rstc = devm_reset_control_get_optional_exclusive(&pdev->dev, NULL);
- if (IS_ERR(rstc))
- return dev_err_probe(&pdev->dev, PTR_ERR(rstc),
+ riic->rstc = devm_reset_control_get_optional_exclusive(dev, NULL);
+ if (IS_ERR(riic->rstc))
+ return dev_err_probe(dev, PTR_ERR(riic->rstc),
"Error: missing reset ctrl\n");
- ret = reset_control_deassert(rstc);
+ ret = reset_control_deassert(riic->rstc);
if (ret)
return ret;
- ret = devm_add_action_or_reset(&pdev->dev, riic_reset_control_assert, rstc);
+ ret = devm_add_action_or_reset(dev, riic_reset_control_assert, riic->rstc);
if (ret)
return ret;
@@ -453,31 +468,34 @@ static int riic_i2c_probe(struct platform_device *pdev)
if (ret < 0)
return ret;
- ret = devm_request_irq(&pdev->dev, ret, riic_irqs[i].isr,
+ ret = devm_request_irq(dev, ret, riic_irqs[i].isr,
0, riic_irqs[i].name, riic);
if (ret) {
- dev_err(&pdev->dev, "failed to request irq %s\n", riic_irqs[i].name);
+ dev_err(dev, "failed to request irq %s\n", riic_irqs[i].name);
return ret;
}
}
- riic->info = of_device_get_match_data(&pdev->dev);
+ riic->info = of_device_get_match_data(dev);
adap = &riic->adapter;
i2c_set_adapdata(adap, riic);
strscpy(adap->name, "Renesas RIIC adapter", sizeof(adap->name));
adap->owner = THIS_MODULE;
adap->algo = &riic_algo;
- adap->dev.parent = &pdev->dev;
- adap->dev.of_node = pdev->dev.of_node;
+ adap->dev.parent = dev;
+ adap->dev.of_node = dev->of_node;
init_completion(&riic->msg_done);
- i2c_parse_fw_timings(&pdev->dev, &i2c_t, true);
+ i2c_parse_fw_timings(dev, &riic->i2c_t, true);
- pm_runtime_enable(&pdev->dev);
+ /* Default 0 to save power. Can be overridden via sysfs for lower latency. */
+ pm_runtime_set_autosuspend_delay(dev, 0);
+ pm_runtime_use_autosuspend(dev);
+ pm_runtime_enable(dev);
- ret = riic_init_hw(riic, &i2c_t);
+ ret = riic_init_hw(riic);
if (ret)
goto out;
@@ -487,60 +505,127 @@ static int riic_i2c_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, riic);
- dev_info(&pdev->dev, "registered with %dHz bus speed\n",
- i2c_t.bus_freq_hz);
+ dev_info(dev, "registered with %dHz bus speed\n", riic->i2c_t.bus_freq_hz);
return 0;
out:
- pm_runtime_disable(&pdev->dev);
+ pm_runtime_disable(dev);
+ pm_runtime_dont_use_autosuspend(dev);
return ret;
}
static void riic_i2c_remove(struct platform_device *pdev)
{
struct riic_dev *riic = platform_get_drvdata(pdev);
+ struct device *dev = &pdev->dev;
+ int ret;
- pm_runtime_get_sync(&pdev->dev);
- riic_writeb(riic, 0, RIIC_ICIER);
- pm_runtime_put(&pdev->dev);
+ ret = pm_runtime_resume_and_get(dev);
+ if (!ret) {
+ riic_writeb(riic, 0, RIIC_ICIER);
+ pm_runtime_put(dev);
+ }
i2c_del_adapter(&riic->adapter);
- pm_runtime_disable(&pdev->dev);
+ pm_runtime_disable(dev);
+ pm_runtime_dont_use_autosuspend(dev);
}
+static const u8 riic_rz_a_regs[RIIC_REG_END] = {
+ [RIIC_ICCR1] = 0x00,
+ [RIIC_ICCR2] = 0x04,
+ [RIIC_ICMR1] = 0x08,
+ [RIIC_ICMR3] = 0x10,
+ [RIIC_ICFER] = 0x14,
+ [RIIC_ICSER] = 0x18,
+ [RIIC_ICIER] = 0x1c,
+ [RIIC_ICSR2] = 0x24,
+ [RIIC_ICBRL] = 0x34,
+ [RIIC_ICBRH] = 0x38,
+ [RIIC_ICDRT] = 0x3c,
+ [RIIC_ICDRR] = 0x40,
+};
+
static const struct riic_of_data riic_rz_a_info = {
- .regs = {
- [RIIC_ICCR1] = 0x00,
- [RIIC_ICCR2] = 0x04,
- [RIIC_ICMR1] = 0x08,
- [RIIC_ICMR3] = 0x10,
- [RIIC_ICSER] = 0x18,
- [RIIC_ICIER] = 0x1c,
- [RIIC_ICSR2] = 0x24,
- [RIIC_ICBRL] = 0x34,
- [RIIC_ICBRH] = 0x38,
- [RIIC_ICDRT] = 0x3c,
- [RIIC_ICDRR] = 0x40,
- },
+ .regs = riic_rz_a_regs,
+ .fast_mode_plus = true,
+};
+
+static const struct riic_of_data riic_rz_a1h_info = {
+ .regs = riic_rz_a_regs,
+};
+
+static const u8 riic_rz_v2h_regs[RIIC_REG_END] = {
+ [RIIC_ICCR1] = 0x00,
+ [RIIC_ICCR2] = 0x01,
+ [RIIC_ICMR1] = 0x02,
+ [RIIC_ICMR3] = 0x04,
+ [RIIC_ICFER] = 0x05,
+ [RIIC_ICSER] = 0x06,
+ [RIIC_ICIER] = 0x07,
+ [RIIC_ICSR2] = 0x09,
+ [RIIC_ICBRL] = 0x10,
+ [RIIC_ICBRH] = 0x11,
+ [RIIC_ICDRT] = 0x12,
+ [RIIC_ICDRR] = 0x13,
};
static const struct riic_of_data riic_rz_v2h_info = {
- .regs = {
- [RIIC_ICCR1] = 0x00,
- [RIIC_ICCR2] = 0x01,
- [RIIC_ICMR1] = 0x02,
- [RIIC_ICMR3] = 0x04,
- [RIIC_ICSER] = 0x06,
- [RIIC_ICIER] = 0x07,
- [RIIC_ICSR2] = 0x09,
- [RIIC_ICBRL] = 0x10,
- [RIIC_ICBRH] = 0x11,
- [RIIC_ICDRT] = 0x12,
- [RIIC_ICDRR] = 0x13,
- },
+ .regs = riic_rz_v2h_regs,
+ .fast_mode_plus = true,
+};
+
+static int riic_i2c_suspend(struct device *dev)
+{
+ struct riic_dev *riic = dev_get_drvdata(dev);
+ int ret;
+
+ ret = pm_runtime_resume_and_get(dev);
+ if (ret)
+ return ret;
+
+ i2c_mark_adapter_suspended(&riic->adapter);
+
+ /* Disable output on SDA, SCL pins. */
+ riic_clear_set_bit(riic, ICCR1_ICE, 0, RIIC_ICCR1);
+
+ pm_runtime_mark_last_busy(dev);
+ pm_runtime_put_sync(dev);
+
+ return reset_control_assert(riic->rstc);
+}
+
+static int riic_i2c_resume(struct device *dev)
+{
+ struct riic_dev *riic = dev_get_drvdata(dev);
+ int ret;
+
+ ret = reset_control_deassert(riic->rstc);
+ if (ret)
+ return ret;
+
+ ret = riic_init_hw(riic);
+ if (ret) {
+ /*
+ * In case this happens there is no way to recover from this
+ * state. The driver will remain loaded. We want to avoid
+ * keeping the reset line de-asserted for no reason.
+ */
+ reset_control_assert(riic->rstc);
+ return ret;
+ }
+
+ i2c_mark_adapter_resumed(&riic->adapter);
+
+ return 0;
+}
+
+static const struct dev_pm_ops riic_i2c_pm_ops = {
+ SYSTEM_SLEEP_PM_OPS(riic_i2c_suspend, riic_i2c_resume)
};
static const struct of_device_id riic_i2c_dt_ids[] = {
{ .compatible = "renesas,riic-rz", .data = &riic_rz_a_info },
+ { .compatible = "renesas,riic-r7s72100", .data = &riic_rz_a1h_info, },
{ .compatible = "renesas,riic-r9a09g057", .data = &riic_rz_v2h_info },
{ /* Sentinel */ },
};
@@ -551,6 +636,7 @@ static struct platform_driver riic_i2c_driver = {
.driver = {
.name = "i2c-riic",
.of_match_table = riic_i2c_dt_ids,
+ .pm = pm_ptr(&riic_i2c_pm_ops),
},
};
diff --git a/drivers/i2c/busses/i2c-s3c2410.c b/drivers/i2c/busses/i2c-s3c2410.c
index 01419c738cfc..7698d9d59744 100644
--- a/drivers/i2c/busses/i2c-s3c2410.c
+++ b/drivers/i2c/busses/i2c-s3c2410.c
@@ -130,7 +130,7 @@ static const struct platform_device_id s3c24xx_driver_ids[] = {
}, {
.name = "s3c2440-hdmiphy-i2c",
.driver_data = QUIRK_S3C2440 | QUIRK_HDMIPHY | QUIRK_NO_GPIO,
- }, { },
+ }, { }
};
MODULE_DEVICE_TABLE(platform, s3c24xx_driver_ids);
diff --git a/drivers/i2c/busses/i2c-synquacer.c b/drivers/i2c/busses/i2c-synquacer.c
index 4eccbcd0fbfc..bbb9062669e4 100644
--- a/drivers/i2c/busses/i2c-synquacer.c
+++ b/drivers/i2c/busses/i2c-synquacer.c
@@ -550,12 +550,13 @@ static int synquacer_i2c_probe(struct platform_device *pdev)
device_property_read_u32(&pdev->dev, "socionext,pclk-rate",
&i2c->pclkrate);
- pclk = devm_clk_get_enabled(&pdev->dev, "pclk");
+ pclk = devm_clk_get_optional_enabled(&pdev->dev, "pclk");
if (IS_ERR(pclk))
return dev_err_probe(&pdev->dev, PTR_ERR(pclk),
"failed to get and enable clock\n");
- i2c->pclkrate = clk_get_rate(pclk);
+ if (pclk)
+ i2c->pclkrate = clk_get_rate(pclk);
if (i2c->pclkrate < SYNQUACER_I2C_MIN_CLK_RATE ||
i2c->pclkrate > SYNQUACER_I2C_MAX_CLK_RATE)
diff --git a/drivers/i2c/busses/i2c-virtio.c b/drivers/i2c/busses/i2c-virtio.c
index 52ba1e0845ca..2a351f961b89 100644
--- a/drivers/i2c/busses/i2c-virtio.c
+++ b/drivers/i2c/busses/i2c-virtio.c
@@ -182,7 +182,7 @@ static u32 virtio_i2c_func(struct i2c_adapter *adap)
return I2C_FUNC_I2C | I2C_FUNC_SMBUS_EMUL;
}
-static struct i2c_algorithm virtio_algorithm = {
+static const struct i2c_algorithm virtio_algorithm = {
.xfer = virtio_i2c_xfer,
.functionality = virtio_i2c_func,
};
@@ -237,7 +237,7 @@ static void virtio_i2c_remove(struct virtio_device *vdev)
virtio_i2c_del_vqs(vdev);
}
-static struct virtio_device_id id_table[] = {
+static const struct virtio_device_id id_table[] = {
{ VIRTIO_ID_I2C_ADAPTER, VIRTIO_DEV_ANY_ID },
{}
};
diff --git a/drivers/i2c/busses/i2c-xiic.c b/drivers/i2c/busses/i2c-xiic.c
index 19468565120e..1d68177241a6 100644
--- a/drivers/i2c/busses/i2c-xiic.c
+++ b/drivers/i2c/busses/i2c-xiic.c
@@ -772,14 +772,17 @@ static irqreturn_t xiic_process(int irq, void *dev_id)
goto out;
}
- xiic_fill_tx_fifo(i2c);
-
- /* current message sent and there is space in the fifo */
- if (!xiic_tx_space(i2c) && xiic_tx_fifo_space(i2c) >= 2) {
+ if (xiic_tx_space(i2c)) {
+ xiic_fill_tx_fifo(i2c);
+ } else {
+ /* current message fully written */
dev_dbg(i2c->adap.dev.parent,
"%s end of message sent, nmsgs: %d\n",
__func__, i2c->nmsgs);
- if (i2c->nmsgs > 1) {
+ /* Don't move onto the next message until the TX FIFO empties,
+ * to ensure that a NAK is not missed.
+ */
+ if (i2c->nmsgs > 1 && (pend & XIIC_INTR_TX_EMPTY_MASK)) {
i2c->nmsgs--;
i2c->tx_msg++;
xfer_more = 1;
@@ -790,11 +793,7 @@ static irqreturn_t xiic_process(int irq, void *dev_id)
"%s Got TX IRQ but no more to do...\n",
__func__);
}
- } else if (!xiic_tx_space(i2c) && (i2c->nmsgs == 1))
- /* current frame is sent and is last,
- * make sure to disable tx half
- */
- xiic_irq_dis(i2c, XIIC_INTR_TX_HALF_MASK);
+ }
}
if (pend & XIIC_INTR_BNB_MASK) {
@@ -844,23 +843,11 @@ static int xiic_bus_busy(struct xiic_i2c *i2c)
return (sr & XIIC_SR_BUS_BUSY_MASK) ? -EBUSY : 0;
}
-static int xiic_busy(struct xiic_i2c *i2c)
+static int xiic_wait_not_busy(struct xiic_i2c *i2c)
{
int tries = 3;
int err;
- if (i2c->tx_msg || i2c->rx_msg)
- return -EBUSY;
-
- /* In single master mode bus can only be busy, when in use by this
- * driver. If the register indicates bus being busy for some reason we
- * should ignore it, since bus will never be released and i2c will be
- * stuck forever.
- */
- if (i2c->singlemaster) {
- return 0;
- }
-
/* for instance if previous transfer was terminated due to TX error
* it might be that the bus is on it's way to become available
* give it at most 3 ms to wake
@@ -1104,13 +1091,36 @@ static int xiic_start_xfer(struct xiic_i2c *i2c, struct i2c_msg *msgs, int num)
mutex_lock(&i2c->lock);
- ret = xiic_busy(i2c);
- if (ret) {
+ if (i2c->tx_msg || i2c->rx_msg) {
dev_err(i2c->adap.dev.parent,
"cannot start a transfer while busy\n");
+ ret = -EBUSY;
goto out;
}
+ /* In single master mode bus can only be busy, when in use by this
+ * driver. If the register indicates bus being busy for some reason we
+ * should ignore it, since bus will never be released and i2c will be
+ * stuck forever.
+ */
+ if (!i2c->singlemaster) {
+ ret = xiic_wait_not_busy(i2c);
+ if (ret) {
+ /* If the bus is stuck in a busy state, such as due to spurious low
+ * pulses on the bus causing a false start condition to be detected,
+ * then try to recover by re-initializing the controller and check
+ * again if the bus is still busy.
+ */
+ dev_warn(i2c->adap.dev.parent, "I2C bus busy timeout, reinitializing\n");
+ ret = xiic_reinit(i2c);
+ if (ret)
+ goto out;
+ ret = xiic_wait_not_busy(i2c);
+ if (ret)
+ goto out;
+ }
+ }
+
i2c->tx_msg = msgs;
i2c->rx_msg = NULL;
i2c->nmsgs = num;
@@ -1327,8 +1337,8 @@ static int xiic_i2c_probe(struct platform_device *pdev)
return 0;
err_pm_disable:
- pm_runtime_set_suspended(&pdev->dev);
pm_runtime_disable(&pdev->dev);
+ pm_runtime_set_suspended(&pdev->dev);
return ret;
}
diff --git a/drivers/i2c/i2c-core-base.c b/drivers/i2c/i2c-core-base.c
index b63f75e44296..7c810893bfa3 100644
--- a/drivers/i2c/i2c-core-base.c
+++ b/drivers/i2c/i2c-core-base.c
@@ -915,6 +915,27 @@ int i2c_dev_irq_from_resources(const struct resource *resources,
return 0;
}
+/*
+ * Serialize device instantiation in case it can be instantiated explicitly
+ * and by auto-detection
+ */
+static int i2c_lock_addr(struct i2c_adapter *adap, unsigned short addr,
+ unsigned short flags)
+{
+ if (!(flags & I2C_CLIENT_TEN) &&
+ test_and_set_bit(addr, adap->addrs_in_instantiation))
+ return -EBUSY;
+
+ return 0;
+}
+
+static void i2c_unlock_addr(struct i2c_adapter *adap, unsigned short addr,
+ unsigned short flags)
+{
+ if (!(flags & I2C_CLIENT_TEN))
+ clear_bit(addr, adap->addrs_in_instantiation);
+}
+
/**
* i2c_new_client_device - instantiate an i2c device
* @adap: the adapter managing the device
@@ -962,6 +983,10 @@ i2c_new_client_device(struct i2c_adapter *adap, struct i2c_board_info const *inf
goto out_err_silent;
}
+ status = i2c_lock_addr(adap, client->addr, client->flags);
+ if (status)
+ goto out_err_silent;
+
/* Check for address business */
status = i2c_check_addr_busy(adap, i2c_encode_flags_to_addr(client));
if (status)
@@ -993,6 +1018,8 @@ i2c_new_client_device(struct i2c_adapter *adap, struct i2c_board_info const *inf
dev_dbg(&adap->dev, "client [%s] registered with bus id %s\n",
client->name, dev_name(&client->dev));
+ i2c_unlock_addr(adap, client->addr, client->flags);
+
return client;
out_remove_swnode:
@@ -1004,6 +1031,7 @@ out_err:
dev_err(&adap->dev,
"Failed to register i2c client %s at 0x%02x (%d)\n",
client->name, client->addr, status);
+ i2c_unlock_addr(adap, client->addr, client->flags);
out_err_silent:
if (need_put)
put_device(&client->dev);
@@ -1068,7 +1096,7 @@ EXPORT_SYMBOL(i2c_find_device_by_fwnode);
static const struct i2c_device_id dummy_id[] = {
{ "dummy", },
{ "smbus_host_notify", },
- { },
+ { }
};
static int dummy_probe(struct i2c_client *client)
@@ -1367,10 +1395,6 @@ struct i2c_adapter *i2c_verify_adapter(struct device *dev)
}
EXPORT_SYMBOL(i2c_verify_adapter);
-#ifdef CONFIG_I2C_COMPAT
-static struct class_compat *i2c_adapter_compat_class;
-#endif
-
static void i2c_scan_static_board_info(struct i2c_adapter *adapter)
{
struct i2c_devinfo *devinfo;
@@ -1524,7 +1548,18 @@ static int i2c_register_adapter(struct i2c_adapter *adap)
dev_set_name(&adap->dev, "i2c-%d", adap->nr);
adap->dev.bus = &i2c_bus_type;
adap->dev.type = &i2c_adapter_type;
- res = device_register(&adap->dev);
+ device_initialize(&adap->dev);
+
+ /*
+ * This adapter can be used as a parent immediately after device_add(),
+ * setup runtime-pm (especially ignore-children) before hand.
+ */
+ device_enable_async_suspend(&adap->dev);
+ pm_runtime_no_callbacks(&adap->dev);
+ pm_suspend_ignore_children(&adap->dev, true);
+ pm_runtime_enable(&adap->dev);
+
+ res = device_add(&adap->dev);
if (res) {
pr_err("adapter '%s': can't register device (%d)\n", adap->name, res);
goto out_list;
@@ -1536,25 +1571,12 @@ static int i2c_register_adapter(struct i2c_adapter *adap)
if (res)
goto out_reg;
- device_enable_async_suspend(&adap->dev);
- pm_runtime_no_callbacks(&adap->dev);
- pm_suspend_ignore_children(&adap->dev, true);
- pm_runtime_enable(&adap->dev);
-
res = i2c_init_recovery(adap);
if (res == -EPROBE_DEFER)
goto out_reg;
dev_dbg(&adap->dev, "adapter [%s] registered\n", adap->name);
-#ifdef CONFIG_I2C_COMPAT
- res = class_compat_create_link(i2c_adapter_compat_class, &adap->dev,
- adap->dev.parent);
- if (res)
- dev_warn(&adap->dev,
- "Failed to create compatibility class link\n");
-#endif
-
/* create pre-declared device nodes */
of_i2c_register_devices(adap);
i2c_acpi_install_space_handler(adap);
@@ -1761,11 +1783,6 @@ void i2c_del_adapter(struct i2c_adapter *adap)
device_for_each_child(&adap->dev, NULL, __unregister_client);
device_for_each_child(&adap->dev, NULL, __unregister_dummy);
-#ifdef CONFIG_I2C_COMPAT
- class_compat_remove_link(i2c_adapter_compat_class, &adap->dev,
- adap->dev.parent);
-#endif
-
/* device name is gone after device_unregister */
dev_dbg(&adap->dev, "adapter [%s] unregistered\n", adap->name);
@@ -2074,13 +2091,6 @@ static int __init i2c_init(void)
i2c_debugfs_root = debugfs_create_dir("i2c", NULL);
-#ifdef CONFIG_I2C_COMPAT
- i2c_adapter_compat_class = class_compat_register("i2c-adapter");
- if (!i2c_adapter_compat_class) {
- retval = -ENOMEM;
- goto bus_err;
- }
-#endif
retval = i2c_add_driver(&dummy_driver);
if (retval)
goto class_err;
@@ -2093,10 +2103,6 @@ static int __init i2c_init(void)
return 0;
class_err:
-#ifdef CONFIG_I2C_COMPAT
- class_compat_unregister(i2c_adapter_compat_class);
-bus_err:
-#endif
is_registered = false;
bus_unregister(&i2c_bus_type);
return retval;
@@ -2109,9 +2115,6 @@ static void __exit i2c_exit(void)
if (IS_ENABLED(CONFIG_OF_DYNAMIC))
WARN_ON(of_reconfig_notifier_unregister(&i2c_of_notifier));
i2c_del_driver(&dummy_driver);
-#ifdef CONFIG_I2C_COMPAT
- class_compat_unregister(i2c_adapter_compat_class);
-#endif
debugfs_remove_recursive(i2c_debugfs_root);
bus_unregister(&i2c_bus_type);
tracepoint_synchronize_unregister();
diff --git a/drivers/i2c/i2c-core-slave.c b/drivers/i2c/i2c-core-slave.c
index e3765e12f93b..faefe1dfa8e5 100644
--- a/drivers/i2c/i2c-core-slave.c
+++ b/drivers/i2c/i2c-core-slave.c
@@ -109,15 +109,12 @@ EXPORT_SYMBOL_GPL(i2c_slave_event);
bool i2c_detect_slave_mode(struct device *dev)
{
if (IS_BUILTIN(CONFIG_OF) && dev->of_node) {
- struct device_node *child;
u32 reg;
- for_each_child_of_node(dev->of_node, child) {
+ for_each_child_of_node_scoped(dev->of_node, child) {
of_property_read_u32(child, "reg", &reg);
- if (reg & I2C_OWN_SLAVE_ADDRESS) {
- of_node_put(child);
+ if (reg & I2C_OWN_SLAVE_ADDRESS)
return true;
- }
}
} else if (IS_BUILTIN(CONFIG_ACPI) && ACPI_HANDLE(dev)) {
dev_dbg(dev, "ACPI slave is not supported yet\n");
diff --git a/drivers/i2c/i2c-dev.c b/drivers/i2c/i2c-dev.c
index f4fb212b7f39..61f7c4003d2f 100644
--- a/drivers/i2c/i2c-dev.c
+++ b/drivers/i2c/i2c-dev.c
@@ -637,7 +637,6 @@ static int i2cdev_release(struct inode *inode, struct file *file)
static const struct file_operations i2cdev_fops = {
.owner = THIS_MODULE,
- .llseek = no_llseek,
.read = i2cdev_read,
.write = i2cdev_write,
.unlocked_ioctl = i2cdev_ioctl,
diff --git a/drivers/i2c/i2c-slave-testunit.c b/drivers/i2c/i2c-slave-testunit.c
index 4c550306f3ec..9fe3150378e8 100644
--- a/drivers/i2c/i2c-slave-testunit.c
+++ b/drivers/i2c/i2c-slave-testunit.c
@@ -6,7 +6,10 @@
* Copyright (C) 2020 by Renesas Electronics Corporation
*/
+#include <generated/utsrelease.h>
#include <linux/bitops.h>
+#include <linux/completion.h>
+#include <linux/gpio/consumer.h>
#include <linux/i2c.h>
#include <linux/init.h>
#include <linux/module.h>
@@ -14,12 +17,14 @@
#include <linux/slab.h>
#include <linux/workqueue.h> /* FIXME: is system_long_wq the best choice? */
-#define TU_CUR_VERSION 0x01
+#define TU_VERSION_MAX_LENGTH 128
enum testunit_cmds {
TU_CMD_READ_BYTES = 1, /* save 0 for ABORT, RESET or similar */
TU_CMD_SMBUS_HOST_NOTIFY,
TU_CMD_SMBUS_BLOCK_PROC_CALL,
+ TU_CMD_GET_VERSION_WITH_REP_START,
+ TU_CMD_SMBUS_ALERT_REQUEST,
TU_NUM_CMDS
};
@@ -39,50 +44,38 @@ struct testunit_data {
unsigned long flags;
u8 regs[TU_NUM_REGS];
u8 reg_idx;
+ u8 read_idx;
struct i2c_client *client;
struct delayed_work worker;
+ struct gpio_desc *gpio;
+ struct completion alert_done;
};
-static void i2c_slave_testunit_work(struct work_struct *work)
-{
- struct testunit_data *tu = container_of(work, struct testunit_data, worker.work);
- struct i2c_msg msg;
- u8 msgbuf[256];
- int ret = 0;
-
- msg.addr = I2C_CLIENT_END;
- msg.buf = msgbuf;
+static char tu_version_info[] = "v" UTS_RELEASE "\n\0";
- switch (tu->regs[TU_REG_CMD]) {
- case TU_CMD_READ_BYTES:
- msg.addr = tu->regs[TU_REG_DATAL];
- msg.flags = I2C_M_RD;
- msg.len = tu->regs[TU_REG_DATAH];
- break;
+static int i2c_slave_testunit_smbalert_cb(struct i2c_client *client,
+ enum i2c_slave_event event, u8 *val)
+{
+ struct testunit_data *tu = i2c_get_clientdata(client);
- case TU_CMD_SMBUS_HOST_NOTIFY:
- msg.addr = 0x08;
- msg.flags = 0;
- msg.len = 3;
- msgbuf[0] = tu->client->addr;
- msgbuf[1] = tu->regs[TU_REG_DATAL];
- msgbuf[2] = tu->regs[TU_REG_DATAH];
+ switch (event) {
+ case I2C_SLAVE_READ_PROCESSED:
+ gpiod_set_value(tu->gpio, 0);
+ fallthrough;
+ case I2C_SLAVE_READ_REQUESTED:
+ *val = tu->regs[TU_REG_DATAL];
break;
- default:
+ case I2C_SLAVE_STOP:
+ complete(&tu->alert_done);
break;
- }
- if (msg.addr != I2C_CLIENT_END) {
- ret = i2c_transfer(tu->client->adapter, &msg, 1);
- /* convert '0 msgs transferred' to errno */
- ret = (ret == 0) ? -EIO : ret;
+ case I2C_SLAVE_WRITE_REQUESTED:
+ case I2C_SLAVE_WRITE_RECEIVED:
+ return -EOPNOTSUPP;
}
- if (ret < 0)
- dev_err(&tu->client->dev, "CMD%02X failed (%d)\n", tu->regs[TU_REG_CMD], ret);
-
- clear_bit(TU_FLAG_IN_PROCESS, &tu->flags);
+ return 0;
}
static int i2c_slave_testunit_slave_cb(struct i2c_client *client,
@@ -91,9 +84,20 @@ static int i2c_slave_testunit_slave_cb(struct i2c_client *client,
struct testunit_data *tu = i2c_get_clientdata(client);
bool is_proc_call = tu->reg_idx == 3 && tu->regs[TU_REG_DATAL] == 1 &&
tu->regs[TU_REG_CMD] == TU_CMD_SMBUS_BLOCK_PROC_CALL;
+ bool is_get_version = tu->reg_idx == 3 &&
+ tu->regs[TU_REG_CMD] == TU_CMD_GET_VERSION_WITH_REP_START;
int ret = 0;
switch (event) {
+ case I2C_SLAVE_WRITE_REQUESTED:
+ if (test_bit(TU_FLAG_IN_PROCESS, &tu->flags))
+ return -EBUSY;
+
+ memset(tu->regs, 0, TU_NUM_REGS);
+ tu->reg_idx = 0;
+ tu->read_idx = 0;
+ break;
+
case I2C_SLAVE_WRITE_RECEIVED:
if (test_bit(TU_FLAG_IN_PROCESS, &tu->flags))
return -EBUSY;
@@ -127,27 +131,93 @@ static int i2c_slave_testunit_slave_cb(struct i2c_client *client,
tu->reg_idx = 0;
break;
- case I2C_SLAVE_WRITE_REQUESTED:
- if (test_bit(TU_FLAG_IN_PROCESS, &tu->flags))
- return -EBUSY;
-
- memset(tu->regs, 0, TU_NUM_REGS);
- tu->reg_idx = 0;
- break;
-
case I2C_SLAVE_READ_PROCESSED:
- if (is_proc_call && tu->regs[TU_REG_DATAH])
+ /* Advance until we reach the NUL character */
+ if (is_get_version && tu_version_info[tu->read_idx] != 0)
+ tu->read_idx++;
+ else if (is_proc_call && tu->regs[TU_REG_DATAH])
tu->regs[TU_REG_DATAH]--;
+
fallthrough;
case I2C_SLAVE_READ_REQUESTED:
- *val = is_proc_call ? tu->regs[TU_REG_DATAH] : TU_CUR_VERSION;
+ if (is_get_version)
+ *val = tu_version_info[tu->read_idx];
+ else if (is_proc_call)
+ *val = tu->regs[TU_REG_DATAH];
+ else
+ *val = test_bit(TU_FLAG_IN_PROCESS, &tu->flags) ?
+ tu->regs[TU_REG_CMD] : 0;
break;
}
return ret;
}
+static void i2c_slave_testunit_work(struct work_struct *work)
+{
+ struct testunit_data *tu = container_of(work, struct testunit_data, worker.work);
+ unsigned long time_left;
+ struct i2c_msg msg;
+ u8 msgbuf[256];
+ u16 orig_addr;
+ int ret = 0;
+
+ msg.addr = I2C_CLIENT_END;
+ msg.buf = msgbuf;
+
+ switch (tu->regs[TU_REG_CMD]) {
+ case TU_CMD_READ_BYTES:
+ msg.addr = tu->regs[TU_REG_DATAL];
+ msg.flags = I2C_M_RD;
+ msg.len = tu->regs[TU_REG_DATAH];
+ break;
+
+ case TU_CMD_SMBUS_HOST_NOTIFY:
+ msg.addr = 0x08;
+ msg.flags = 0;
+ msg.len = 3;
+ msgbuf[0] = tu->client->addr;
+ msgbuf[1] = tu->regs[TU_REG_DATAL];
+ msgbuf[2] = tu->regs[TU_REG_DATAH];
+ break;
+
+ case TU_CMD_SMBUS_ALERT_REQUEST:
+ i2c_slave_unregister(tu->client);
+ orig_addr = tu->client->addr;
+ tu->client->addr = 0x0c;
+ ret = i2c_slave_register(tu->client, i2c_slave_testunit_smbalert_cb);
+ if (ret)
+ goto out_smbalert;
+
+ reinit_completion(&tu->alert_done);
+ gpiod_set_value(tu->gpio, 1);
+ time_left = wait_for_completion_timeout(&tu->alert_done, HZ);
+ if (!time_left)
+ ret = -ETIMEDOUT;
+
+ i2c_slave_unregister(tu->client);
+out_smbalert:
+ tu->client->addr = orig_addr;
+ i2c_slave_register(tu->client, i2c_slave_testunit_slave_cb);
+ break;
+
+ default:
+ break;
+ }
+
+ if (msg.addr != I2C_CLIENT_END) {
+ ret = i2c_transfer(tu->client->adapter, &msg, 1);
+ /* convert '0 msgs transferred' to errno */
+ ret = (ret == 0) ? -EIO : ret;
+ }
+
+ if (ret < 0)
+ dev_err(&tu->client->dev, "CMD%02X failed (%d)\n", tu->regs[TU_REG_CMD], ret);
+
+ clear_bit(TU_FLAG_IN_PROCESS, &tu->flags);
+}
+
static int i2c_slave_testunit_probe(struct i2c_client *client)
{
struct testunit_data *tu;
@@ -158,8 +228,18 @@ static int i2c_slave_testunit_probe(struct i2c_client *client)
tu->client = client;
i2c_set_clientdata(client, tu);
+ init_completion(&tu->alert_done);
INIT_DELAYED_WORK(&tu->worker, i2c_slave_testunit_work);
+ tu->gpio = devm_gpiod_get_index_optional(&client->dev, NULL, 0, GPIOD_OUT_LOW);
+ if (gpiod_cansleep(tu->gpio)) {
+ dev_err(&client->dev, "GPIO access which may sleep is not allowed\n");
+ return -EDEADLK;
+ }
+
+ if (sizeof(tu_version_info) > TU_VERSION_MAX_LENGTH)
+ tu_version_info[TU_VERSION_MAX_LENGTH - 1] = 0;
+
return i2c_slave_register(client, i2c_slave_testunit_slave_cb);
};
diff --git a/drivers/i2c/muxes/Kconfig b/drivers/i2c/muxes/Kconfig
index db1b9057612a..6d2f66810cdc 100644
--- a/drivers/i2c/muxes/Kconfig
+++ b/drivers/i2c/muxes/Kconfig
@@ -119,4 +119,20 @@ config I2C_MUX_MLXCPLD
This driver can also be built as a module. If so, the module
will be called i2c-mux-mlxcpld.
+config I2C_MUX_MULE
+ tristate "Theobroma Systems Mule I2C device multiplexer"
+ depends on OF && SENSORS_AMC6821
+ help
+ Mule is an MCU that emulates a set of I2C devices, among which
+ devices that are reachable through an I2C-mux. The devices on the mux
+ can be selected by writing the appropriate device number to an I2C
+ configuration register.
+
+ If you say yes to this option, support will be included for a
+ Theobroma Systems Mule I2C multiplexer. This driver provides access to
+ I2C devices connected on this mux.
+
+ This driver can also be built as a module. If so, the module
+ will be called i2c-mux-mule.
+
endmenu
diff --git a/drivers/i2c/muxes/Makefile b/drivers/i2c/muxes/Makefile
index 6d9d865e8518..4b24f49515a7 100644
--- a/drivers/i2c/muxes/Makefile
+++ b/drivers/i2c/muxes/Makefile
@@ -10,6 +10,7 @@ obj-$(CONFIG_I2C_MUX_GPIO) += i2c-mux-gpio.o
obj-$(CONFIG_I2C_MUX_GPMUX) += i2c-mux-gpmux.o
obj-$(CONFIG_I2C_MUX_LTC4306) += i2c-mux-ltc4306.o
obj-$(CONFIG_I2C_MUX_MLXCPLD) += i2c-mux-mlxcpld.o
+obj-$(CONFIG_I2C_MUX_MULE) += i2c-mux-mule.o
obj-$(CONFIG_I2C_MUX_PCA9541) += i2c-mux-pca9541.o
obj-$(CONFIG_I2C_MUX_PCA954x) += i2c-mux-pca954x.o
obj-$(CONFIG_I2C_MUX_PINCTRL) += i2c-mux-pinctrl.o
diff --git a/drivers/i2c/muxes/i2c-mux-mule.c b/drivers/i2c/muxes/i2c-mux-mule.c
new file mode 100644
index 000000000000..8e942470b35f
--- /dev/null
+++ b/drivers/i2c/muxes/i2c-mux-mule.c
@@ -0,0 +1,148 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Theobroma Systems Mule I2C device multiplexer
+ *
+ * Copyright (C) 2024 Theobroma Systems Design und Consulting GmbH
+ */
+
+#include <linux/i2c-mux.h>
+#include <linux/i2c.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+#include <linux/property.h>
+#include <linux/regmap.h>
+
+#define MULE_I2C_MUX_CONFIG_REG 0xff
+#define MULE_I2C_MUX_DEFAULT_DEV 0x0
+
+struct mule_i2c_reg_mux {
+ struct regmap *regmap;
+};
+
+static int mule_i2c_mux_select(struct i2c_mux_core *muxc, u32 dev)
+{
+ struct mule_i2c_reg_mux *mux = muxc->priv;
+
+ return regmap_write(mux->regmap, MULE_I2C_MUX_CONFIG_REG, dev);
+}
+
+static int mule_i2c_mux_deselect(struct i2c_mux_core *muxc, u32 dev)
+{
+ return mule_i2c_mux_select(muxc, MULE_I2C_MUX_DEFAULT_DEV);
+}
+
+static void mule_i2c_mux_remove(void *data)
+{
+ struct i2c_mux_core *muxc = data;
+
+ i2c_mux_del_adapters(muxc);
+
+ mule_i2c_mux_deselect(muxc, MULE_I2C_MUX_DEFAULT_DEV);
+}
+
+static int mule_i2c_mux_probe(struct platform_device *pdev)
+{
+ struct device *mux_dev = &pdev->dev;
+ struct mule_i2c_reg_mux *priv;
+ struct i2c_client *client;
+ struct i2c_mux_core *muxc;
+ struct device_node *dev;
+ unsigned int readback;
+ int ndev, ret;
+ bool old_fw;
+
+ /* Count devices on the mux */
+ ndev = of_get_child_count(mux_dev->of_node);
+ dev_dbg(mux_dev, "%d devices on the mux\n", ndev);
+
+ client = to_i2c_client(mux_dev->parent);
+
+ muxc = i2c_mux_alloc(client->adapter, mux_dev, ndev, sizeof(*priv),
+ I2C_MUX_LOCKED, mule_i2c_mux_select, mule_i2c_mux_deselect);
+ if (!muxc)
+ return -ENOMEM;
+
+ priv = i2c_mux_priv(muxc);
+
+ priv->regmap = dev_get_regmap(mux_dev->parent, NULL);
+ if (IS_ERR(priv->regmap))
+ return dev_err_probe(mux_dev, PTR_ERR(priv->regmap),
+ "No parent i2c register map\n");
+
+ platform_set_drvdata(pdev, muxc);
+
+ /*
+ * MULE_I2C_MUX_DEFAULT_DEV is guaranteed to exist on all old and new
+ * mule fw. Mule fw without mux support will accept write ops to the
+ * config register, but readback returns 0xff (register not updated).
+ */
+ ret = mule_i2c_mux_select(muxc, MULE_I2C_MUX_DEFAULT_DEV);
+ if (ret)
+ return dev_err_probe(mux_dev, ret,
+ "Failed to write config register\n");
+
+ ret = regmap_read(priv->regmap, MULE_I2C_MUX_CONFIG_REG, &readback);
+ if (ret)
+ return dev_err_probe(mux_dev, ret,
+ "Failed to read config register\n");
+
+ old_fw = (readback != MULE_I2C_MUX_DEFAULT_DEV);
+
+ ret = devm_add_action_or_reset(mux_dev, mule_i2c_mux_remove, muxc);
+ if (ret)
+ return dev_err_probe(mux_dev, ret,
+ "Failed to register mux remove\n");
+
+ /* Create device adapters */
+ for_each_child_of_node(mux_dev->of_node, dev) {
+ u32 reg;
+
+ ret = of_property_read_u32(dev, "reg", &reg);
+ if (ret)
+ return dev_err_probe(mux_dev, ret,
+ "No reg property found for %s\n",
+ of_node_full_name(dev));
+
+ if (old_fw && reg != 0) {
+ dev_warn(mux_dev,
+ "Mux is not supported, please update Mule FW\n");
+ continue;
+ }
+
+ ret = mule_i2c_mux_select(muxc, reg);
+ if (ret) {
+ dev_warn(mux_dev,
+ "Device %d not supported, please update Mule FW\n", reg);
+ continue;
+ }
+
+ ret = i2c_mux_add_adapter(muxc, 0, reg);
+ if (ret)
+ return ret;
+ }
+
+ mule_i2c_mux_deselect(muxc, MULE_I2C_MUX_DEFAULT_DEV);
+
+ return 0;
+}
+
+static const struct of_device_id mule_i2c_mux_of_match[] = {
+ { .compatible = "tsd,mule-i2c-mux", },
+ {},
+};
+MODULE_DEVICE_TABLE(of, mule_i2c_mux_of_match);
+
+static struct platform_driver mule_i2c_mux_driver = {
+ .driver = {
+ .name = "mule-i2c-mux",
+ .of_match_table = mule_i2c_mux_of_match,
+ },
+ .probe = mule_i2c_mux_probe,
+};
+
+module_platform_driver(mule_i2c_mux_driver);
+
+MODULE_AUTHOR("Farouk Bouabid <farouk.bouabid@cherry.de>");
+MODULE_DESCRIPTION("I2C mux driver for Theobroma Systems Mule");
+MODULE_LICENSE("GPL");
diff --git a/drivers/i3c/master.c b/drivers/i3c/master.c
index 7028f03c2c42..6f3eb710a75d 100644
--- a/drivers/i3c/master.c
+++ b/drivers/i3c/master.c
@@ -1868,6 +1868,12 @@ static int i3c_master_bus_init(struct i3c_master_controller *master)
goto err_bus_cleanup;
}
+ if (master->ops->set_speed) {
+ ret = master->ops->set_speed(master, I3C_OPEN_DRAIN_SLOW_SPEED);
+ if (ret)
+ goto err_bus_cleanup;
+ }
+
/*
* Reset all dynamic address that may have been assigned before
* (assigned by the bootloader for example).
@@ -1876,6 +1882,12 @@ static int i3c_master_bus_init(struct i3c_master_controller *master)
if (ret && ret != I3C_ERROR_M2)
goto err_bus_cleanup;
+ if (master->ops->set_speed) {
+ master->ops->set_speed(master, I3C_OPEN_DRAIN_NORMAL_SPEED);
+ if (ret)
+ goto err_bus_cleanup;
+ }
+
/* Disable all slave events before starting DAA. */
ret = i3c_master_disec_locked(master, I3C_BROADCAST_ADDR,
I3C_CCC_EVENT_SIR | I3C_CCC_EVENT_MR |
diff --git a/drivers/i3c/master/i3c-master-cdns.c b/drivers/i3c/master/i3c-master-cdns.c
index c1627f3552ce..fe4d59833ad5 100644
--- a/drivers/i3c/master/i3c-master-cdns.c
+++ b/drivers/i3c/master/i3c-master-cdns.c
@@ -1562,6 +1562,7 @@ static const struct of_device_id cdns_i3c_master_of_ids[] = {
{ .compatible = "cdns,i3c-master", .data = &cdns_i3c_devdata },
{ /* sentinel */ },
};
+MODULE_DEVICE_TABLE(of, cdns_i3c_master_of_ids);
static int cdns_i3c_master_probe(struct platform_device *pdev)
{
@@ -1666,6 +1667,7 @@ static void cdns_i3c_master_remove(struct platform_device *pdev)
{
struct cdns_i3c_master *master = platform_get_drvdata(pdev);
+ cancel_work_sync(&master->hj_work);
i3c_master_unregister(&master->base);
clk_disable_unprepare(master->sysclk);
diff --git a/drivers/i3c/master/mipi-i3c-hci/Makefile b/drivers/i3c/master/mipi-i3c-hci/Makefile
index a658e7b8262c..1f8cd5c48fde 100644
--- a/drivers/i3c/master/mipi-i3c-hci/Makefile
+++ b/drivers/i3c/master/mipi-i3c-hci/Makefile
@@ -3,4 +3,5 @@
obj-$(CONFIG_MIPI_I3C_HCI) += mipi-i3c-hci.o
mipi-i3c-hci-y := core.o ext_caps.o pio.o dma.o \
cmd_v1.o cmd_v2.o \
- dat_v1.o dct_v1.o
+ dat_v1.o dct_v1.o \
+ hci_quirks.o
diff --git a/drivers/i3c/master/mipi-i3c-hci/cmd_v1.c b/drivers/i3c/master/mipi-i3c-hci/cmd_v1.c
index 638b054d6c92..dd636094b07f 100644
--- a/drivers/i3c/master/mipi-i3c-hci/cmd_v1.c
+++ b/drivers/i3c/master/mipi-i3c-hci/cmd_v1.c
@@ -123,17 +123,15 @@ static enum hci_cmd_mode get_i3c_mode(struct i3c_hci *hci)
{
struct i3c_bus *bus = i3c_master_get_bus(&hci->master);
- if (bus->scl_rate.i3c >= 12500000)
- return MODE_I3C_SDR0;
if (bus->scl_rate.i3c > 8000000)
- return MODE_I3C_SDR1;
+ return MODE_I3C_SDR0;
if (bus->scl_rate.i3c > 6000000)
- return MODE_I3C_SDR2;
+ return MODE_I3C_SDR1;
if (bus->scl_rate.i3c > 4000000)
- return MODE_I3C_SDR3;
+ return MODE_I3C_SDR2;
if (bus->scl_rate.i3c > 2000000)
- return MODE_I3C_SDR4;
- return MODE_I3C_Fm_FmP;
+ return MODE_I3C_SDR3;
+ return MODE_I3C_SDR4;
}
static enum hci_cmd_mode get_i2c_mode(struct i3c_hci *hci)
diff --git a/drivers/i3c/master/mipi-i3c-hci/core.c b/drivers/i3c/master/mipi-i3c-hci/core.c
index 4e7d6a43ee9b..a82c47c9986d 100644
--- a/drivers/i3c/master/mipi-i3c-hci/core.c
+++ b/drivers/i3c/master/mipi-i3c-hci/core.c
@@ -12,7 +12,6 @@
#include <linux/errno.h>
#include <linux/i3c/master.h>
#include <linux/interrupt.h>
-#include <linux/io.h>
#include <linux/iopoll.h>
#include <linux/module.h>
#include <linux/platform_device.h>
@@ -27,11 +26,6 @@
* Host Controller Capabilities and Operation Registers
*/
-#define reg_read(r) readl(hci->base_regs + (r))
-#define reg_write(r, v) writel(v, hci->base_regs + (r))
-#define reg_set(r, v) reg_write(r, reg_read(r) | (v))
-#define reg_clear(r, v) reg_write(r, reg_read(r) & ~(v))
-
#define HCI_VERSION 0x00 /* HCI Version (in BCD) */
#define HC_CONTROL 0x04
@@ -152,6 +146,10 @@ static int i3c_hci_bus_init(struct i3c_master_controller *m)
if (ret)
return ret;
+ /* Set RESP_BUF_THLD to 0(n) to get 1(n+1) response */
+ if (hci->quirks & HCI_QUIRK_RESP_BUF_THLD)
+ amd_set_resp_buf_thld(hci);
+
reg_set(HC_CONTROL, HC_CONTROL_BUS_ENABLE);
DBG("HC_CONTROL = %#x", reg_read(HC_CONTROL));
@@ -630,8 +628,8 @@ static irqreturn_t i3c_hci_irq_handler(int irq, void *dev_id)
static int i3c_hci_init(struct i3c_hci *hci)
{
+ bool size_in_dwords, mode_selector;
u32 regval, offset;
- bool size_in_dwords;
int ret;
/* Validate HCI hardware version */
@@ -753,10 +751,17 @@ static int i3c_hci_init(struct i3c_hci *hci)
return -EINVAL;
}
+ mode_selector = hci->version_major > 1 ||
+ (hci->version_major == 1 && hci->version_minor > 0);
+
+ /* Quirk for HCI_QUIRK_PIO_MODE on AMD platforms */
+ if (hci->quirks & HCI_QUIRK_PIO_MODE)
+ hci->RHS_regs = NULL;
+
/* Try activating DMA operations first */
if (hci->RHS_regs) {
reg_clear(HC_CONTROL, HC_CONTROL_PIO_MODE);
- if (reg_read(HC_CONTROL) & HC_CONTROL_PIO_MODE) {
+ if (mode_selector && (reg_read(HC_CONTROL) & HC_CONTROL_PIO_MODE)) {
dev_err(&hci->master.dev, "PIO mode is stuck\n");
ret = -EIO;
} else {
@@ -768,7 +773,7 @@ static int i3c_hci_init(struct i3c_hci *hci)
/* If no DMA, try PIO */
if (!hci->io && hci->PIO_regs) {
reg_set(HC_CONTROL, HC_CONTROL_PIO_MODE);
- if (!(reg_read(HC_CONTROL) & HC_CONTROL_PIO_MODE)) {
+ if (mode_selector && !(reg_read(HC_CONTROL) & HC_CONTROL_PIO_MODE)) {
dev_err(&hci->master.dev, "DMA mode is stuck\n");
ret = -EIO;
} else {
@@ -784,6 +789,10 @@ static int i3c_hci_init(struct i3c_hci *hci)
return ret;
}
+ /* Configure OD and PP timings for AMD platforms */
+ if (hci->quirks & HCI_QUIRK_OD_PP_TIMING)
+ amd_set_od_pp_timing(hci);
+
return 0;
}
@@ -803,6 +812,8 @@ static int i3c_hci_probe(struct platform_device *pdev)
/* temporary for dev_printk's, to be replaced in i3c_master_register */
hci->master.dev.init_name = dev_name(&pdev->dev);
+ hci->quirks = (unsigned long)device_get_match_data(&pdev->dev);
+
ret = i3c_hci_init(hci);
if (ret)
return ret;
@@ -834,12 +845,19 @@ static const __maybe_unused struct of_device_id i3c_hci_of_match[] = {
};
MODULE_DEVICE_TABLE(of, i3c_hci_of_match);
+static const struct acpi_device_id i3c_hci_acpi_match[] = {
+ { "AMDI5017", HCI_QUIRK_PIO_MODE | HCI_QUIRK_OD_PP_TIMING | HCI_QUIRK_RESP_BUF_THLD },
+ {}
+};
+MODULE_DEVICE_TABLE(acpi, i3c_hci_acpi_match);
+
static struct platform_driver i3c_hci_driver = {
.probe = i3c_hci_probe,
.remove_new = i3c_hci_remove,
.driver = {
.name = "mipi-i3c-hci",
.of_match_table = of_match_ptr(i3c_hci_of_match),
+ .acpi_match_table = i3c_hci_acpi_match,
},
};
module_platform_driver(i3c_hci_driver);
diff --git a/drivers/i3c/master/mipi-i3c-hci/hci.h b/drivers/i3c/master/mipi-i3c-hci/hci.h
index f94d95e024be..aaa47ac47381 100644
--- a/drivers/i3c/master/mipi-i3c-hci/hci.h
+++ b/drivers/i3c/master/mipi-i3c-hci/hci.h
@@ -10,6 +10,7 @@
#ifndef HCI_H
#define HCI_H
+#include <linux/io.h>
/* Handy logging macro to save on line length */
#define DBG(x, ...) pr_devel("%s: " x "\n", __func__, ##__VA_ARGS__)
@@ -26,6 +27,10 @@
#define W2_BIT_(x) BIT((x) - 64)
#define W3_BIT_(x) BIT((x) - 96)
+#define reg_read(r) readl(hci->base_regs + (r))
+#define reg_write(r, v) writel(v, hci->base_regs + (r))
+#define reg_set(r, v) reg_write(r, reg_read(r) | (v))
+#define reg_clear(r, v) reg_write(r, reg_read(r) & ~(v))
struct hci_cmd_ops;
@@ -135,11 +140,16 @@ struct i3c_hci_dev_data {
/* list of quirks */
#define HCI_QUIRK_RAW_CCC BIT(1) /* CCC framing must be explicit */
+#define HCI_QUIRK_PIO_MODE BIT(2) /* Set PIO mode for AMD platforms */
+#define HCI_QUIRK_OD_PP_TIMING BIT(3) /* Set OD and PP timings for AMD platforms */
+#define HCI_QUIRK_RESP_BUF_THLD BIT(4) /* Set resp buf thld to 0 for AMD platforms */
/* global functions */
void mipi_i3c_hci_resume(struct i3c_hci *hci);
void mipi_i3c_hci_pio_reset(struct i3c_hci *hci);
void mipi_i3c_hci_dct_index_reset(struct i3c_hci *hci);
+void amd_set_od_pp_timing(struct i3c_hci *hci);
+void amd_set_resp_buf_thld(struct i3c_hci *hci);
#endif
diff --git a/drivers/i3c/master/mipi-i3c-hci/hci_quirks.c b/drivers/i3c/master/mipi-i3c-hci/hci_quirks.c
new file mode 100644
index 000000000000..3b9c6e76c536
--- /dev/null
+++ b/drivers/i3c/master/mipi-i3c-hci/hci_quirks.c
@@ -0,0 +1,44 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * I3C HCI Quirks
+ *
+ * Copyright 2024 Advanced Micro Devices, Inc.
+ *
+ * Authors: Shyam Sundar S K <Shyam-sundar.S-k@amd.com>
+ * Guruvendra Punugupati <Guruvendra.Punugupati@amd.com>
+ */
+
+#include <linux/i3c/master.h>
+#include "hci.h"
+
+/* Timing registers */
+#define HCI_SCL_I3C_OD_TIMING 0x214
+#define HCI_SCL_I3C_PP_TIMING 0x218
+#define HCI_SDA_HOLD_SWITCH_DLY_TIMING 0x230
+
+/* Timing values to configure 9MHz frequency */
+#define AMD_SCL_I3C_OD_TIMING 0x00cf00cf
+#define AMD_SCL_I3C_PP_TIMING 0x00160016
+
+#define QUEUE_THLD_CTRL 0xD0
+
+void amd_set_od_pp_timing(struct i3c_hci *hci)
+{
+ u32 data;
+
+ reg_write(HCI_SCL_I3C_OD_TIMING, AMD_SCL_I3C_OD_TIMING);
+ reg_write(HCI_SCL_I3C_PP_TIMING, AMD_SCL_I3C_PP_TIMING);
+ data = reg_read(HCI_SDA_HOLD_SWITCH_DLY_TIMING);
+ /* Configure maximum TX hold time */
+ data |= W0_MASK(18, 16);
+ reg_write(HCI_SDA_HOLD_SWITCH_DLY_TIMING, data);
+}
+
+void amd_set_resp_buf_thld(struct i3c_hci *hci)
+{
+ u32 data;
+
+ data = reg_read(QUEUE_THLD_CTRL);
+ data = data & ~W0_MASK(15, 8);
+ reg_write(QUEUE_THLD_CTRL, data);
+}
diff --git a/drivers/i3c/master/svc-i3c-master.c b/drivers/i3c/master/svc-i3c-master.c
index 0a68fd1b81d4..a7bfc678153e 100644
--- a/drivers/i3c/master/svc-i3c-master.c
+++ b/drivers/i3c/master/svc-i3c-master.c
@@ -127,6 +127,8 @@
/* This parameter depends on the implementation and may be tuned */
#define SVC_I3C_FIFO_SIZE 16
+#define SVC_I3C_PPBAUD_MAX 15
+#define SVC_I3C_QUICK_I2C_CLK 4170000
#define SVC_I3C_EVENT_IBI BIT(0)
#define SVC_I3C_EVENT_HOTJOIN BIT(1)
@@ -182,6 +184,7 @@ struct svc_i3c_regs_save {
* @ibi.lock: IBI lock
* @lock: Transfer lock, protect between IBI work thread and callbacks from master
* @enabled_events: Bit masks for enable events (IBI, HotJoin).
+ * @mctrl_config: Configuration value in SVC_I3C_MCTRL for setting speed back.
*/
struct svc_i3c_master {
struct i3c_master_controller base;
@@ -212,6 +215,7 @@ struct svc_i3c_master {
} ibi;
struct mutex lock;
int enabled_events;
+ u32 mctrl_config;
};
/**
@@ -529,12 +533,61 @@ static irqreturn_t svc_i3c_master_irq_handler(int irq, void *dev_id)
return IRQ_HANDLED;
}
+static int svc_i3c_master_set_speed(struct i3c_master_controller *m,
+ enum i3c_open_drain_speed speed)
+{
+ struct svc_i3c_master *master = to_svc_i3c_master(m);
+ struct i3c_bus *bus = i3c_master_get_bus(&master->base);
+ u32 ppbaud, odbaud, odhpp, mconfig;
+ unsigned long fclk_rate;
+ int ret;
+
+ ret = pm_runtime_resume_and_get(master->dev);
+ if (ret < 0) {
+ dev_err(master->dev, "<%s> Cannot get runtime PM.\n", __func__);
+ return ret;
+ }
+
+ switch (speed) {
+ case I3C_OPEN_DRAIN_SLOW_SPEED:
+ fclk_rate = clk_get_rate(master->fclk);
+ if (!fclk_rate) {
+ ret = -EINVAL;
+ goto rpm_out;
+ }
+ /*
+ * Set 50% duty-cycle I2C speed to I3C OPEN-DRAIN mode, so the first
+ * broadcast address is visible to all I2C/I3C devices on the I3C bus.
+ * I3C device working as a I2C device will turn off its 50ns Spike
+ * Filter to change to I3C mode.
+ */
+ mconfig = master->mctrl_config;
+ ppbaud = FIELD_GET(GENMASK(11, 8), mconfig);
+ odhpp = 0;
+ odbaud = DIV_ROUND_UP(fclk_rate, bus->scl_rate.i2c * (2 + 2 * ppbaud)) - 1;
+ mconfig &= ~GENMASK(24, 16);
+ mconfig |= SVC_I3C_MCONFIG_ODBAUD(odbaud) | SVC_I3C_MCONFIG_ODHPP(odhpp);
+ writel(mconfig, master->regs + SVC_I3C_MCONFIG);
+ break;
+ case I3C_OPEN_DRAIN_NORMAL_SPEED:
+ writel(master->mctrl_config, master->regs + SVC_I3C_MCONFIG);
+ break;
+ }
+
+rpm_out:
+ pm_runtime_mark_last_busy(master->dev);
+ pm_runtime_put_autosuspend(master->dev);
+
+ return ret;
+}
+
static int svc_i3c_master_bus_init(struct i3c_master_controller *m)
{
struct svc_i3c_master *master = to_svc_i3c_master(m);
struct i3c_bus *bus = i3c_master_get_bus(m);
struct i3c_device_info info = {};
unsigned long fclk_rate, fclk_period_ns;
+ unsigned long i2c_period_ns, i2c_scl_rate, i3c_scl_rate;
unsigned int high_period_ns, od_low_period_ns;
u32 ppbaud, pplow, odhpp, odbaud, odstop, i2cbaud, reg;
int ret;
@@ -555,12 +608,15 @@ static int svc_i3c_master_bus_init(struct i3c_master_controller *m)
}
fclk_period_ns = DIV_ROUND_UP(1000000000, fclk_rate);
+ i2c_period_ns = DIV_ROUND_UP(1000000000, bus->scl_rate.i2c);
+ i2c_scl_rate = bus->scl_rate.i2c;
+ i3c_scl_rate = bus->scl_rate.i3c;
/*
* Using I3C Push-Pull mode, target is 12.5MHz/80ns period.
* Simplest configuration is using a 50% duty-cycle of 40ns.
*/
- ppbaud = DIV_ROUND_UP(40, fclk_period_ns) - 1;
+ ppbaud = DIV_ROUND_UP(fclk_rate / 2, i3c_scl_rate) - 1;
pplow = 0;
/*
@@ -570,7 +626,7 @@ static int svc_i3c_master_bus_init(struct i3c_master_controller *m)
*/
odhpp = 1;
high_period_ns = (ppbaud + 1) * fclk_period_ns;
- odbaud = DIV_ROUND_UP(240 - high_period_ns, high_period_ns) - 1;
+ odbaud = DIV_ROUND_UP(fclk_rate, SVC_I3C_QUICK_I2C_CLK * (1 + ppbaud)) - 2;
od_low_period_ns = (odbaud + 1) * high_period_ns;
switch (bus->mode) {
@@ -579,20 +635,27 @@ static int svc_i3c_master_bus_init(struct i3c_master_controller *m)
odstop = 0;
break;
case I3C_BUS_MODE_MIXED_FAST:
- case I3C_BUS_MODE_MIXED_LIMITED:
/*
* Using I2C Fm+ mode, target is 1MHz/1000ns, the difference
* between the high and low period does not really matter.
*/
- i2cbaud = DIV_ROUND_UP(1000, od_low_period_ns) - 2;
+ i2cbaud = DIV_ROUND_UP(i2c_period_ns, od_low_period_ns) - 2;
odstop = 1;
break;
+ case I3C_BUS_MODE_MIXED_LIMITED:
case I3C_BUS_MODE_MIXED_SLOW:
- /*
- * Using I2C Fm mode, target is 0.4MHz/2500ns, with the same
- * constraints as the FM+ mode.
- */
- i2cbaud = DIV_ROUND_UP(2500, od_low_period_ns) - 2;
+ /* I3C PP + I3C OP + I2C OP both use i2c clk rate */
+ if (ppbaud > SVC_I3C_PPBAUD_MAX) {
+ ppbaud = SVC_I3C_PPBAUD_MAX;
+ pplow = DIV_ROUND_UP(fclk_rate, i3c_scl_rate) - (2 + 2 * ppbaud);
+ }
+
+ high_period_ns = (ppbaud + 1) * fclk_period_ns;
+ odhpp = 0;
+ odbaud = DIV_ROUND_UP(fclk_rate, i2c_scl_rate * (2 + 2 * ppbaud)) - 1;
+
+ od_low_period_ns = (odbaud + 1) * high_period_ns;
+ i2cbaud = DIV_ROUND_UP(i2c_period_ns, od_low_period_ns) - 2;
odstop = 1;
break;
default:
@@ -611,6 +674,7 @@ static int svc_i3c_master_bus_init(struct i3c_master_controller *m)
SVC_I3C_MCONFIG_I2CBAUD(i2cbaud);
writel(reg, master->regs + SVC_I3C_MCONFIG);
+ master->mctrl_config = reg;
/* Master core's registration */
ret = i3c_master_get_free_addr(m, 0);
if (ret < 0)
@@ -1645,6 +1709,7 @@ static const struct i3c_master_controller_ops svc_i3c_master_ops = {
.disable_ibi = svc_i3c_master_disable_ibi,
.enable_hotjoin = svc_i3c_master_enable_hotjoin,
.disable_hotjoin = svc_i3c_master_disable_hotjoin,
+ .set_speed = svc_i3c_master_set_speed,
};
static int svc_i3c_master_prepare_clks(struct svc_i3c_master *master)
@@ -1775,6 +1840,7 @@ static void svc_i3c_master_remove(struct platform_device *pdev)
{
struct svc_i3c_master *master = platform_get_drvdata(pdev);
+ cancel_work_sync(&master->hj_work);
i3c_master_unregister(&master->base);
pm_runtime_dont_use_autosuspend(&pdev->dev);
diff --git a/drivers/idle/intel_idle.c b/drivers/idle/intel_idle.c
index 9457e34b9e32..67aebfe0fed6 100644
--- a/drivers/idle/intel_idle.c
+++ b/drivers/idle/intel_idle.c
@@ -121,6 +121,12 @@ static unsigned int mwait_substates __initdata;
#define CPUIDLE_FLAG_INIT_XSTATE BIT(17)
/*
+ * Ignore the sub-state when matching mwait hints between the ACPI _CST and
+ * custom tables.
+ */
+#define CPUIDLE_FLAG_PARTIAL_HINT_MATCH BIT(18)
+
+/*
* MWAIT takes an 8-bit "hint" in EAX "suggesting"
* the C-state (top nibble) and sub-state (bottom nibble)
* 0x00 means "MWAIT(C1)", 0x10 means "MWAIT(C2)" etc.
@@ -1043,7 +1049,8 @@ static struct cpuidle_state gnr_cstates[] __initdata = {
.name = "C6",
.desc = "MWAIT 0x20",
.flags = MWAIT2flg(0x20) | CPUIDLE_FLAG_TLB_FLUSHED |
- CPUIDLE_FLAG_INIT_XSTATE,
+ CPUIDLE_FLAG_INIT_XSTATE |
+ CPUIDLE_FLAG_PARTIAL_HINT_MATCH,
.exit_latency = 170,
.target_residency = 650,
.enter = &intel_idle,
@@ -1052,7 +1059,8 @@ static struct cpuidle_state gnr_cstates[] __initdata = {
.name = "C6P",
.desc = "MWAIT 0x21",
.flags = MWAIT2flg(0x21) | CPUIDLE_FLAG_TLB_FLUSHED |
- CPUIDLE_FLAG_INIT_XSTATE,
+ CPUIDLE_FLAG_INIT_XSTATE |
+ CPUIDLE_FLAG_PARTIAL_HINT_MATCH,
.exit_latency = 210,
.target_residency = 1000,
.enter = &intel_idle,
@@ -1354,7 +1362,8 @@ static struct cpuidle_state srf_cstates[] __initdata = {
{
.name = "C6S",
.desc = "MWAIT 0x22",
- .flags = MWAIT2flg(0x22) | CPUIDLE_FLAG_TLB_FLUSHED,
+ .flags = MWAIT2flg(0x22) | CPUIDLE_FLAG_TLB_FLUSHED |
+ CPUIDLE_FLAG_PARTIAL_HINT_MATCH,
.exit_latency = 270,
.target_residency = 700,
.enter = &intel_idle,
@@ -1362,7 +1371,8 @@ static struct cpuidle_state srf_cstates[] __initdata = {
{
.name = "C6SP",
.desc = "MWAIT 0x23",
- .flags = MWAIT2flg(0x23) | CPUIDLE_FLAG_TLB_FLUSHED,
+ .flags = MWAIT2flg(0x23) | CPUIDLE_FLAG_TLB_FLUSHED |
+ CPUIDLE_FLAG_PARTIAL_HINT_MATCH,
.exit_latency = 310,
.target_residency = 900,
.enter = &intel_idle,
@@ -1744,7 +1754,7 @@ static void __init intel_idle_init_cstates_acpi(struct cpuidle_driver *drv)
}
}
-static bool __init intel_idle_off_by_default(u32 mwait_hint)
+static bool __init intel_idle_off_by_default(unsigned int flags, u32 mwait_hint)
{
int cstate, limit;
@@ -1761,7 +1771,15 @@ static bool __init intel_idle_off_by_default(u32 mwait_hint)
* the interesting states are ACPI_CSTATE_FFH.
*/
for (cstate = 1; cstate < limit; cstate++) {
- if (acpi_state_table.states[cstate].address == mwait_hint)
+ u32 acpi_hint = acpi_state_table.states[cstate].address;
+ u32 table_hint = mwait_hint;
+
+ if (flags & CPUIDLE_FLAG_PARTIAL_HINT_MATCH) {
+ acpi_hint &= ~MWAIT_SUBSTATE_MASK;
+ table_hint &= ~MWAIT_SUBSTATE_MASK;
+ }
+
+ if (acpi_hint == table_hint)
return false;
}
return true;
@@ -1771,7 +1789,10 @@ static bool __init intel_idle_off_by_default(u32 mwait_hint)
static inline bool intel_idle_acpi_cst_extract(void) { return false; }
static inline void intel_idle_init_cstates_acpi(struct cpuidle_driver *drv) { }
-static inline bool intel_idle_off_by_default(u32 mwait_hint) { return false; }
+static inline bool intel_idle_off_by_default(unsigned int flags, u32 mwait_hint)
+{
+ return false;
+}
#endif /* !CONFIG_ACPI_PROCESSOR_CSTATE */
/**
@@ -2098,7 +2119,7 @@ static void __init intel_idle_init_cstates_icpu(struct cpuidle_driver *drv)
if ((disabled_states_mask & BIT(drv->state_count)) ||
((icpu->use_acpi || force_use_acpi) &&
- intel_idle_off_by_default(mwait_hint) &&
+ intel_idle_off_by_default(state->flags, mwait_hint) &&
!(state->flags & CPUIDLE_FLAG_ALWAYS_ENABLE)))
state->flags |= CPUIDLE_FLAG_OFF;
diff --git a/drivers/iio/accel/Kconfig b/drivers/iio/accel/Kconfig
index 80b57d3ee3a7..516c1a8e4d56 100644
--- a/drivers/iio/accel/Kconfig
+++ b/drivers/iio/accel/Kconfig
@@ -177,6 +177,33 @@ config ADXL372_I2C
To compile this driver as a module, choose M here: the
module will be called adxl372_i2c.
+config ADXL380
+ tristate
+ select IIO_BUFFER
+ select IIO_TRIGGERED_BUFFER
+
+config ADXL380_SPI
+ tristate "Analog Devices ADXL380 3-Axis Accelerometer SPI Driver"
+ depends on SPI
+ select ADXL380
+ select REGMAP_SPI
+ help
+ Say yes here to add support for the Analog Devices ADXL380 triaxial
+ acceleration sensor.
+ To compile this driver as a module, choose M here: the
+ module will be called adxl380_spi.
+
+config ADXL380_I2C
+ tristate "Analog Devices ADXL380 3-Axis Accelerometer I2C Driver"
+ depends on I2C
+ select ADXL380
+ select REGMAP_I2C
+ help
+ Say yes here to add support for the Analog Devices ADXL380 triaxial
+ acceleration sensor.
+ To compile this driver as a module, choose M here: the
+ module will be called adxl380_i2c.
+
config BMA180
tristate "Bosch BMA023/BMA1x0/BMA250 3-Axis Accelerometer Driver"
depends on I2C && INPUT_BMA150=n
diff --git a/drivers/iio/accel/Makefile b/drivers/iio/accel/Makefile
index db90532ba24a..ca8569e25aba 100644
--- a/drivers/iio/accel/Makefile
+++ b/drivers/iio/accel/Makefile
@@ -21,6 +21,9 @@ obj-$(CONFIG_ADXL367_SPI) += adxl367_spi.o
obj-$(CONFIG_ADXL372) += adxl372.o
obj-$(CONFIG_ADXL372_I2C) += adxl372_i2c.o
obj-$(CONFIG_ADXL372_SPI) += adxl372_spi.o
+obj-$(CONFIG_ADXL380) += adxl380.o
+obj-$(CONFIG_ADXL380_I2C) += adxl380_i2c.o
+obj-$(CONFIG_ADXL380_SPI) += adxl380_spi.o
obj-$(CONFIG_BMA180) += bma180.o
obj-$(CONFIG_BMA220) += bma220_spi.o
obj-$(CONFIG_BMA400) += bma400_core.o
diff --git a/drivers/iio/accel/adxl367.c b/drivers/iio/accel/adxl367.c
index 5cf4828a5eb5..1c046e96aef9 100644
--- a/drivers/iio/accel/adxl367.c
+++ b/drivers/iio/accel/adxl367.c
@@ -1220,7 +1220,7 @@ static int adxl367_update_scan_mode(struct iio_dev *indio_dev,
return ret;
st->fifo_set_size = bitmap_weight(active_scan_mask,
- indio_dev->masklength);
+ iio_get_masklength(indio_dev));
return 0;
}
diff --git a/drivers/iio/accel/adxl367_spi.c b/drivers/iio/accel/adxl367_spi.c
index 118c894015a5..b70117265791 100644
--- a/drivers/iio/accel/adxl367_spi.c
+++ b/drivers/iio/accel/adxl367_spi.c
@@ -72,7 +72,7 @@ static int adxl367_write(void *context, const void *val_buf, size_t val_size)
return spi_sync(st->spi, &st->reg_write_msg);
}
-static struct regmap_bus adxl367_spi_regmap_bus = {
+static const struct regmap_bus adxl367_spi_regmap_bus = {
.read = adxl367_read,
.write = adxl367_write,
};
diff --git a/drivers/iio/accel/adxl372.c b/drivers/iio/accel/adxl372.c
index c4193286eb05..ef8dd557877b 100644
--- a/drivers/iio/accel/adxl372.c
+++ b/drivers/iio/accel/adxl372.c
@@ -1050,7 +1050,7 @@ static int adxl372_buffer_postenable(struct iio_dev *indio_dev)
st->fifo_format = adxl372_axis_lookup_table[i].fifo_format;
st->fifo_axis_mask = adxl372_axis_lookup_table[i].bits;
st->fifo_set_size = bitmap_weight(indio_dev->active_scan_mask,
- indio_dev->masklength);
+ iio_get_masklength(indio_dev));
/* Configure the FIFO to store sets of impact event peak. */
if (st->peak_fifo_mode_en) {
diff --git a/drivers/iio/accel/adxl380.c b/drivers/iio/accel/adxl380.c
new file mode 100644
index 000000000000..98863e22bb6b
--- /dev/null
+++ b/drivers/iio/accel/adxl380.c
@@ -0,0 +1,1905 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * ADXL380 3-Axis Digital Accelerometer core driver
+ *
+ * Copyright 2024 Analog Devices Inc.
+ */
+
+#include <linux/bitfield.h>
+#include <linux/interrupt.h>
+#include <linux/irq.h>
+#include <linux/module.h>
+#include <linux/property.h>
+#include <linux/regmap.h>
+#include <linux/units.h>
+
+#include <asm/unaligned.h>
+
+#include <linux/iio/buffer.h>
+#include <linux/iio/events.h>
+#include <linux/iio/iio.h>
+#include <linux/iio/kfifo_buf.h>
+#include <linux/iio/sysfs.h>
+
+#include <linux/regulator/consumer.h>
+
+#include "adxl380.h"
+
+#define ADXL380_ID_VAL 380
+#define ADXL382_ID_VAL 382
+
+#define ADXL380_DEVID_AD_REG 0x00
+#define ADLX380_PART_ID_REG 0x02
+
+#define ADXL380_X_DATA_H_REG 0x15
+#define ADXL380_Y_DATA_H_REG 0x17
+#define ADXL380_Z_DATA_H_REG 0x19
+#define ADXL380_T_DATA_H_REG 0x1B
+
+#define ADXL380_MISC_0_REG 0x20
+#define ADXL380_XL382_MSK BIT(7)
+
+#define ADXL380_MISC_1_REG 0x21
+
+#define ADXL380_X_DSM_OFFSET_REG 0x4D
+
+#define ADXL380_ACT_INACT_CTL_REG 0x37
+#define ADXL380_INACT_EN_MSK BIT(2)
+#define ADXL380_ACT_EN_MSK BIT(0)
+
+#define ADXL380_SNSR_AXIS_EN_REG 0x38
+#define ADXL380_ACT_INACT_AXIS_EN_MSK GENMASK(2, 0)
+
+#define ADXL380_THRESH_ACT_H_REG 0x39
+#define ADXL380_TIME_ACT_H_REG 0x3B
+#define ADXL380_THRESH_INACT_H_REG 0x3E
+#define ADXL380_TIME_INACT_H_REG 0x40
+#define ADXL380_THRESH_MAX GENMASK(12, 0)
+#define ADXL380_TIME_MAX GENMASK(24, 0)
+
+#define ADXL380_FIFO_CONFIG_0_REG 0x30
+#define ADXL380_FIFO_SAMPLES_8_MSK BIT(0)
+#define ADXL380_FIFO_MODE_MSK GENMASK(5, 4)
+
+#define ADXL380_FIFO_DISABLED 0
+#define ADXL380_FIFO_NORMAL 1
+#define ADXL380_FIFO_STREAMED 2
+#define ADXL380_FIFO_TRIGGERED 3
+
+#define ADXL380_FIFO_CONFIG_1_REG 0x31
+#define ADXL380_FIFO_STATUS_0_REG 0x1E
+
+#define ADXL380_TAP_THRESH_REG 0x43
+#define ADXL380_TAP_DUR_REG 0x44
+#define ADXL380_TAP_LATENT_REG 0x45
+#define ADXL380_TAP_WINDOW_REG 0x46
+#define ADXL380_TAP_TIME_MAX GENMASK(7, 0)
+
+#define ADXL380_TAP_CFG_REG 0x47
+#define ADXL380_TAP_AXIS_MSK GENMASK(1, 0)
+
+#define ADXL380_TRIG_CFG_REG 0x49
+#define ADXL380_TRIG_CFG_DEC_2X_MSK BIT(7)
+#define ADXL380_TRIG_CFG_SINC_RATE_MSK BIT(6)
+
+#define ADXL380_FILTER_REG 0x50
+#define ADXL380_FILTER_EQ_FILT_MSK BIT(6)
+#define ADXL380_FILTER_LPF_MODE_MSK GENMASK(5, 4)
+#define ADXL380_FILTER_HPF_PATH_MSK BIT(3)
+#define ADXL380_FILTER_HPF_CORNER_MSK GENMASK(2, 0)
+
+#define ADXL380_OP_MODE_REG 0x26
+#define ADXL380_OP_MODE_RANGE_MSK GENMASK(7, 6)
+#define ADXL380_OP_MODE_MSK GENMASK(3, 0)
+#define ADXL380_OP_MODE_STANDBY 0
+#define ADXL380_OP_MODE_HEART_SOUND 1
+#define ADXL380_OP_MODE_ULP 2
+#define ADXL380_OP_MODE_VLP 3
+#define ADXL380_OP_MODE_LP 4
+#define ADXL380_OP_MODE_LP_ULP 6
+#define ADXL380_OP_MODE_LP_VLP 7
+#define ADXL380_OP_MODE_RBW 8
+#define ADXL380_OP_MODE_RBW_ULP 10
+#define ADXL380_OP_MODE_RBW_VLP 11
+#define ADXL380_OP_MODE_HP 12
+#define ADXL380_OP_MODE_HP_ULP 14
+#define ADXL380_OP_MODE_HP_VLP 15
+
+#define ADXL380_OP_MODE_4G_RANGE 0
+#define ADXL382_OP_MODE_15G_RANGE 0
+#define ADXL380_OP_MODE_8G_RANGE 1
+#define ADXL382_OP_MODE_30G_RANGE 1
+#define ADXL380_OP_MODE_16G_RANGE 2
+#define ADXL382_OP_MODE_60G_RANGE 2
+
+#define ADXL380_DIG_EN_REG 0x27
+#define ADXL380_CHAN_EN_MSK(chan) BIT(4 + (chan))
+#define ADXL380_FIFO_EN_MSK BIT(3)
+
+#define ADXL380_INT0_MAP0_REG 0x2B
+#define ADXL380_INT1_MAP0_REG 0x2D
+#define ADXL380_INT_MAP0_INACT_INT0_MSK BIT(6)
+#define ADXL380_INT_MAP0_ACT_INT0_MSK BIT(5)
+#define ADXL380_INT_MAP0_FIFO_WM_INT0_MSK BIT(3)
+
+#define ADXL380_INT0_MAP1_REG 0x2C
+#define ADXL380_INT1_MAP1_REG 0x2E
+#define ADXL380_INT_MAP1_DOUBLE_TAP_INT0_MSK BIT(1)
+#define ADXL380_INT_MAP1_SINGLE_TAP_INT0_MSK BIT(0)
+
+#define ADXL380_INT0_REG 0x5D
+#define ADXL380_INT0_POL_MSK BIT(7)
+
+#define ADXL380_RESET_REG 0x2A
+#define ADXL380_FIFO_DATA 0x1D
+
+#define ADXL380_DEVID_AD_VAL 0xAD
+#define ADXL380_RESET_CODE 0x52
+
+#define ADXL380_STATUS_0_REG 0x11
+#define ADXL380_STATUS_0_FIFO_FULL_MSK BIT(1)
+#define ADXL380_STATUS_0_FIFO_WM_MSK BIT(3)
+
+#define ADXL380_STATUS_1_INACT_MSK BIT(6)
+#define ADXL380_STATUS_1_ACT_MSK BIT(5)
+#define ADXL380_STATUS_1_DOUBLE_TAP_MSK BIT(1)
+#define ADXL380_STATUS_1_SINGLE_TAP_MSK BIT(0)
+
+#define ADXL380_FIFO_SAMPLES 315UL
+
+enum adxl380_channels {
+ ADXL380_ACCEL_X,
+ ADXL380_ACCEL_Y,
+ ADXL380_ACCEL_Z,
+ ADXL380_TEMP,
+ ADXL380_CH_NUM
+};
+
+enum adxl380_axis {
+ ADXL380_X_AXIS,
+ ADXL380_Y_AXIS,
+ ADXL380_Z_AXIS,
+};
+
+enum adxl380_activity_type {
+ ADXL380_ACTIVITY,
+ ADXL380_INACTIVITY,
+};
+
+enum adxl380_tap_type {
+ ADXL380_SINGLE_TAP,
+ ADXL380_DOUBLE_TAP,
+};
+
+enum adxl380_tap_time_type {
+ ADXL380_TAP_TIME_LATENT,
+ ADXL380_TAP_TIME_WINDOW,
+};
+
+static const int adxl380_range_scale_factor_tbl[] = { 1, 2, 4 };
+
+const struct adxl380_chip_info adxl380_chip_info = {
+ .name = "adxl380",
+ .chip_id = ADXL380_ID_VAL,
+ .scale_tbl = {
+ [ADXL380_OP_MODE_4G_RANGE] = { 0, 1307226 },
+ [ADXL380_OP_MODE_8G_RANGE] = { 0, 2615434 },
+ [ADXL380_OP_MODE_16G_RANGE] = { 0, 5229886 },
+ },
+ .samp_freq_tbl = { 8000, 16000, 32000 },
+ /*
+ * The datasheet defines an intercept of 470 LSB at 25 degC
+ * and a sensitivity of 10.2 LSB/C.
+ */
+ .temp_offset = 25 * 102 / 10 - 470,
+
+};
+EXPORT_SYMBOL_NS_GPL(adxl380_chip_info, IIO_ADXL380);
+
+const struct adxl380_chip_info adxl382_chip_info = {
+ .name = "adxl382",
+ .chip_id = ADXL382_ID_VAL,
+ .scale_tbl = {
+ [ADXL382_OP_MODE_15G_RANGE] = { 0, 4903325 },
+ [ADXL382_OP_MODE_30G_RANGE] = { 0, 9806650 },
+ [ADXL382_OP_MODE_60G_RANGE] = { 0, 19613300 },
+ },
+ .samp_freq_tbl = { 16000, 32000, 64000 },
+ /*
+ * The datasheet defines an intercept of 570 LSB at 25 degC
+ * and a sensitivity of 10.2 LSB/C.
+ */
+ .temp_offset = 25 * 102 / 10 - 570,
+};
+EXPORT_SYMBOL_NS_GPL(adxl382_chip_info, IIO_ADXL380);
+
+static const unsigned int adxl380_th_reg_high_addr[2] = {
+ [ADXL380_ACTIVITY] = ADXL380_THRESH_ACT_H_REG,
+ [ADXL380_INACTIVITY] = ADXL380_THRESH_INACT_H_REG,
+};
+
+static const unsigned int adxl380_time_reg_high_addr[2] = {
+ [ADXL380_ACTIVITY] = ADXL380_TIME_ACT_H_REG,
+ [ADXL380_INACTIVITY] = ADXL380_TIME_INACT_H_REG,
+};
+
+static const unsigned int adxl380_tap_time_reg[2] = {
+ [ADXL380_TAP_TIME_LATENT] = ADXL380_TAP_LATENT_REG,
+ [ADXL380_TAP_TIME_WINDOW] = ADXL380_TAP_WINDOW_REG,
+};
+
+struct adxl380_state {
+ struct regmap *regmap;
+ struct device *dev;
+ const struct adxl380_chip_info *chip_info;
+ /*
+ * Synchronize access to members of driver state, and ensure atomicity
+ * of consecutive regmap operations.
+ */
+ struct mutex lock;
+ enum adxl380_axis tap_axis_en;
+ u8 range;
+ u8 odr;
+ u8 fifo_set_size;
+ u8 transf_buf[3];
+ u16 watermark;
+ u32 act_time_ms;
+ u32 act_threshold;
+ u32 inact_time_ms;
+ u32 inact_threshold;
+ u32 tap_latent_us;
+ u32 tap_window_us;
+ u32 tap_duration_us;
+ u32 tap_threshold;
+ int irq;
+ int int_map[2];
+ int lpf_tbl[4];
+ int hpf_tbl[7][2];
+
+ __be16 fifo_buf[ADXL380_FIFO_SAMPLES] __aligned(IIO_DMA_MINALIGN);
+};
+
+bool adxl380_readable_noinc_reg(struct device *dev, unsigned int reg)
+{
+ return reg == ADXL380_FIFO_DATA;
+}
+EXPORT_SYMBOL_NS_GPL(adxl380_readable_noinc_reg, IIO_ADXL380);
+
+static int adxl380_set_measure_en(struct adxl380_state *st, bool en)
+{
+ int ret;
+ unsigned int act_inact_ctl;
+ u8 op_mode = ADXL380_OP_MODE_STANDBY;
+
+ if (en) {
+ ret = regmap_read(st->regmap, ADXL380_ACT_INACT_CTL_REG, &act_inact_ctl);
+ if (ret)
+ return ret;
+
+ /* Activity/ Inactivity detection available only in VLP/ULP mode */
+ if (FIELD_GET(ADXL380_ACT_EN_MSK, act_inact_ctl) ||
+ FIELD_GET(ADXL380_INACT_EN_MSK, act_inact_ctl))
+ op_mode = ADXL380_OP_MODE_VLP;
+ else
+ op_mode = ADXL380_OP_MODE_HP;
+ }
+
+ return regmap_update_bits(st->regmap, ADXL380_OP_MODE_REG,
+ ADXL380_OP_MODE_MSK,
+ FIELD_PREP(ADXL380_OP_MODE_MSK, op_mode));
+}
+
+static void adxl380_scale_act_inact_thresholds(struct adxl380_state *st,
+ u8 old_range,
+ u8 new_range)
+{
+ st->act_threshold = mult_frac(st->act_threshold,
+ adxl380_range_scale_factor_tbl[old_range],
+ adxl380_range_scale_factor_tbl[new_range]);
+ st->inact_threshold = mult_frac(st->inact_threshold,
+ adxl380_range_scale_factor_tbl[old_range],
+ adxl380_range_scale_factor_tbl[new_range]);
+}
+
+static int adxl380_write_act_inact_threshold(struct adxl380_state *st,
+ enum adxl380_activity_type act,
+ unsigned int th)
+{
+ int ret;
+ u8 reg = adxl380_th_reg_high_addr[act];
+
+ if (th > ADXL380_THRESH_MAX)
+ return -EINVAL;
+
+ ret = regmap_write(st->regmap, reg + 1, th & GENMASK(7, 0));
+ if (ret)
+ return ret;
+
+ ret = regmap_update_bits(st->regmap, reg, GENMASK(2, 0), th >> 8);
+ if (ret)
+ return ret;
+
+ if (act == ADXL380_ACTIVITY)
+ st->act_threshold = th;
+ else
+ st->inact_threshold = th;
+
+ return 0;
+}
+
+static int adxl380_set_act_inact_threshold(struct iio_dev *indio_dev,
+ enum adxl380_activity_type act,
+ u16 th)
+{
+ struct adxl380_state *st = iio_priv(indio_dev);
+ int ret;
+
+ guard(mutex)(&st->lock);
+
+ ret = adxl380_set_measure_en(st, false);
+ if (ret)
+ return ret;
+
+ ret = adxl380_write_act_inact_threshold(st, act, th);
+ if (ret)
+ return ret;
+
+ return adxl380_set_measure_en(st, true);
+}
+
+static int adxl380_set_tap_threshold_value(struct iio_dev *indio_dev, u8 th)
+{
+ int ret;
+ struct adxl380_state *st = iio_priv(indio_dev);
+
+ guard(mutex)(&st->lock);
+
+ ret = adxl380_set_measure_en(st, false);
+ if (ret)
+ return ret;
+
+ ret = regmap_write(st->regmap, ADXL380_TAP_THRESH_REG, th);
+ if (ret)
+ return ret;
+
+ st->tap_threshold = th;
+
+ return adxl380_set_measure_en(st, true);
+}
+
+static int _adxl380_write_tap_time_us(struct adxl380_state *st,
+ enum adxl380_tap_time_type tap_time_type,
+ u32 us)
+{
+ u8 reg = adxl380_tap_time_reg[tap_time_type];
+ unsigned int reg_val;
+ int ret;
+
+ /* scale factor for tap window is 1250us / LSB */
+ reg_val = DIV_ROUND_CLOSEST(us, 1250);
+ if (reg_val > ADXL380_TAP_TIME_MAX)
+ reg_val = ADXL380_TAP_TIME_MAX;
+
+ ret = regmap_write(st->regmap, reg, reg_val);
+ if (ret)
+ return ret;
+
+ if (tap_time_type == ADXL380_TAP_TIME_WINDOW)
+ st->tap_window_us = us;
+ else
+ st->tap_latent_us = us;
+
+ return 0;
+}
+
+static int adxl380_write_tap_time_us(struct adxl380_state *st,
+ enum adxl380_tap_time_type tap_time_type, u32 us)
+{
+ int ret;
+
+ guard(mutex)(&st->lock);
+
+ ret = adxl380_set_measure_en(st, false);
+ if (ret)
+ return ret;
+
+ ret = _adxl380_write_tap_time_us(st, tap_time_type, us);
+ if (ret)
+ return ret;
+
+ return adxl380_set_measure_en(st, true);
+}
+
+static int adxl380_write_tap_dur_us(struct iio_dev *indio_dev, u32 us)
+{
+ int ret;
+ unsigned int reg_val;
+ struct adxl380_state *st = iio_priv(indio_dev);
+
+ /* 625us per code is the scale factor of TAP_DUR register */
+ reg_val = DIV_ROUND_CLOSEST(us, 625);
+
+ ret = adxl380_set_measure_en(st, false);
+ if (ret)
+ return ret;
+
+ ret = regmap_write(st->regmap, ADXL380_TAP_DUR_REG, reg_val);
+ if (ret)
+ return ret;
+
+ return adxl380_set_measure_en(st, true);
+}
+
+static int adxl380_read_chn(struct adxl380_state *st, u8 addr)
+{
+ int ret;
+
+ guard(mutex)(&st->lock);
+
+ ret = regmap_bulk_read(st->regmap, addr, &st->transf_buf, 2);
+ if (ret)
+ return ret;
+
+ return get_unaligned_be16(st->transf_buf);
+}
+
+static int adxl380_get_odr(struct adxl380_state *st, int *odr)
+{
+ int ret;
+ unsigned int trig_cfg, odr_idx;
+
+ ret = regmap_read(st->regmap, ADXL380_TRIG_CFG_REG, &trig_cfg);
+ if (ret)
+ return ret;
+
+ odr_idx = (FIELD_GET(ADXL380_TRIG_CFG_SINC_RATE_MSK, trig_cfg) << 1) |
+ (FIELD_GET(ADXL380_TRIG_CFG_DEC_2X_MSK, trig_cfg) & 1);
+
+ *odr = st->chip_info->samp_freq_tbl[odr_idx];
+
+ return 0;
+}
+
+static const int adxl380_lpf_div[] = {
+ 1, 4, 8, 16,
+};
+
+static int adxl380_fill_lpf_tbl(struct adxl380_state *st)
+{
+ int ret, i;
+ int odr;
+
+ ret = adxl380_get_odr(st, &odr);
+ if (ret)
+ return ret;
+
+ for (i = 0; i < ARRAY_SIZE(st->lpf_tbl); i++)
+ st->lpf_tbl[i] = DIV_ROUND_CLOSEST(odr, adxl380_lpf_div[i]);
+
+ return 0;
+}
+
+static const int adxl380_hpf_mul[] = {
+ 0, 247000, 62084, 15545, 3862, 954, 238,
+};
+
+static int adxl380_fill_hpf_tbl(struct adxl380_state *st)
+{
+ int i, ret, odr_hz;
+ u32 multiplier;
+ u64 div, rem, odr;
+
+ ret = adxl380_get_odr(st, &odr_hz);
+ if (ret)
+ return ret;
+
+ for (i = 0; i < ARRAY_SIZE(adxl380_hpf_mul); i++) {
+ odr = mul_u64_u32_shr(odr_hz, MEGA, 0);
+ multiplier = adxl380_hpf_mul[i];
+ div = div64_u64_rem(mul_u64_u32_shr(odr, multiplier, 0),
+ TERA * 100, &rem);
+
+ st->hpf_tbl[i][0] = div;
+ st->hpf_tbl[i][1] = div_u64(rem, MEGA * 100);
+ }
+
+ return 0;
+}
+
+static int adxl380_set_odr(struct adxl380_state *st, u8 odr)
+{
+ int ret;
+
+ guard(mutex)(&st->lock);
+
+ ret = adxl380_set_measure_en(st, false);
+ if (ret)
+ return ret;
+
+ ret = regmap_update_bits(st->regmap, ADXL380_TRIG_CFG_REG,
+ ADXL380_TRIG_CFG_DEC_2X_MSK,
+ FIELD_PREP(ADXL380_TRIG_CFG_DEC_2X_MSK, odr & 1));
+ if (ret)
+ return ret;
+
+ ret = regmap_update_bits(st->regmap, ADXL380_TRIG_CFG_REG,
+ ADXL380_TRIG_CFG_SINC_RATE_MSK,
+ FIELD_PREP(ADXL380_TRIG_CFG_SINC_RATE_MSK, odr >> 1));
+ if (ret)
+ return ret;
+
+ ret = adxl380_set_measure_en(st, true);
+ if (ret)
+ return ret;
+
+ ret = adxl380_fill_lpf_tbl(st);
+ if (ret)
+ return ret;
+
+ return adxl380_fill_hpf_tbl(st);
+}
+
+static int adxl380_find_match_1d_tbl(const int *array, unsigned int size,
+ int val)
+{
+ int i;
+
+ for (i = 0; i < size; i++) {
+ if (val == array[i])
+ return i;
+ }
+
+ return size - 1;
+}
+
+static int adxl380_find_match_2d_tbl(const int (*freq_tbl)[2], int n, int val, int val2)
+{
+ int i;
+
+ for (i = 0; i < n; i++) {
+ if (freq_tbl[i][0] == val && freq_tbl[i][1] == val2)
+ return i;
+ }
+
+ return -EINVAL;
+}
+
+static int adxl380_get_lpf(struct adxl380_state *st, int *lpf)
+{
+ int ret;
+ unsigned int trig_cfg, lpf_idx;
+
+ guard(mutex)(&st->lock);
+
+ ret = regmap_read(st->regmap, ADXL380_FILTER_REG, &trig_cfg);
+ if (ret)
+ return ret;
+
+ lpf_idx = FIELD_GET(ADXL380_FILTER_LPF_MODE_MSK, trig_cfg);
+
+ *lpf = st->lpf_tbl[lpf_idx];
+
+ return 0;
+}
+
+static int adxl380_set_lpf(struct adxl380_state *st, u8 lpf)
+{
+ int ret;
+ u8 eq_bypass = 0;
+
+ guard(mutex)(&st->lock);
+
+ ret = adxl380_set_measure_en(st, false);
+ if (ret)
+ return ret;
+
+ if (lpf)
+ eq_bypass = 1;
+
+ ret = regmap_update_bits(st->regmap, ADXL380_FILTER_REG,
+ ADXL380_FILTER_EQ_FILT_MSK,
+ FIELD_PREP(ADXL380_FILTER_EQ_FILT_MSK, eq_bypass));
+ if (ret)
+ return ret;
+
+ ret = regmap_update_bits(st->regmap, ADXL380_FILTER_REG,
+ ADXL380_FILTER_LPF_MODE_MSK,
+ FIELD_PREP(ADXL380_FILTER_LPF_MODE_MSK, lpf));
+ if (ret)
+ return ret;
+
+ return adxl380_set_measure_en(st, true);
+}
+
+static int adxl380_get_hpf(struct adxl380_state *st, int *hpf_int, int *hpf_frac)
+{
+ int ret;
+ unsigned int trig_cfg, hpf_idx;
+
+ guard(mutex)(&st->lock);
+
+ ret = regmap_read(st->regmap, ADXL380_FILTER_REG, &trig_cfg);
+ if (ret)
+ return ret;
+
+ hpf_idx = FIELD_GET(ADXL380_FILTER_HPF_CORNER_MSK, trig_cfg);
+
+ *hpf_int = st->hpf_tbl[hpf_idx][0];
+ *hpf_frac = st->hpf_tbl[hpf_idx][1];
+
+ return 0;
+}
+
+static int adxl380_set_hpf(struct adxl380_state *st, u8 hpf)
+{
+ int ret;
+ u8 hpf_path = 0;
+
+ guard(mutex)(&st->lock);
+
+ ret = adxl380_set_measure_en(st, false);
+ if (ret)
+ return ret;
+
+ if (hpf)
+ hpf_path = 1;
+
+ ret = regmap_update_bits(st->regmap, ADXL380_FILTER_REG,
+ ADXL380_FILTER_HPF_PATH_MSK,
+ FIELD_PREP(ADXL380_FILTER_HPF_PATH_MSK, hpf_path));
+ if (ret)
+ return ret;
+
+ ret = regmap_update_bits(st->regmap, ADXL380_FILTER_REG,
+ ADXL380_FILTER_HPF_CORNER_MSK,
+ FIELD_PREP(ADXL380_FILTER_HPF_CORNER_MSK, hpf));
+ if (ret)
+ return ret;
+
+ return adxl380_set_measure_en(st, true);
+}
+
+static int _adxl380_set_act_inact_time_ms(struct adxl380_state *st,
+ enum adxl380_activity_type act,
+ u32 ms)
+{
+ u8 reg = adxl380_time_reg_high_addr[act];
+ unsigned int reg_val;
+ int ret;
+
+ /* 500us per code is the scale factor of TIME_ACT / TIME_INACT registers */
+ reg_val = min(DIV_ROUND_CLOSEST(ms * 1000, 500), ADXL380_TIME_MAX);
+
+ put_unaligned_be24(reg_val, &st->transf_buf[0]);
+
+ ret = regmap_bulk_write(st->regmap, reg, st->transf_buf, sizeof(st->transf_buf));
+ if (ret)
+ return ret;
+
+ if (act == ADXL380_ACTIVITY)
+ st->act_time_ms = ms;
+ else
+ st->inact_time_ms = ms;
+
+ return 0;
+}
+
+static int adxl380_set_act_inact_time_ms(struct adxl380_state *st,
+ enum adxl380_activity_type act,
+ u32 ms)
+{
+ int ret;
+
+ guard(mutex)(&st->lock);
+
+ ret = adxl380_set_measure_en(st, false);
+ if (ret)
+ return ret;
+
+ ret = _adxl380_set_act_inact_time_ms(st, act, ms);
+ if (ret)
+ return ret;
+
+ return adxl380_set_measure_en(st, true);
+}
+
+static int adxl380_set_range(struct adxl380_state *st, u8 range)
+{
+ int ret;
+
+ guard(mutex)(&st->lock);
+
+ ret = adxl380_set_measure_en(st, false);
+ if (ret)
+ return ret;
+
+ ret = regmap_update_bits(st->regmap, ADXL380_OP_MODE_REG,
+ ADXL380_OP_MODE_RANGE_MSK,
+ FIELD_PREP(ADXL380_OP_MODE_RANGE_MSK, range));
+
+ if (ret)
+ return ret;
+
+ adxl380_scale_act_inact_thresholds(st, st->range, range);
+
+ /* Activity thresholds depend on range */
+ ret = adxl380_write_act_inact_threshold(st, ADXL380_ACTIVITY,
+ st->act_threshold);
+ if (ret)
+ return ret;
+
+ ret = adxl380_write_act_inact_threshold(st, ADXL380_INACTIVITY,
+ st->inact_threshold);
+ if (ret)
+ return ret;
+
+ st->range = range;
+
+ return adxl380_set_measure_en(st, true);
+}
+
+static int adxl380_write_act_inact_en(struct adxl380_state *st,
+ enum adxl380_activity_type type,
+ bool en)
+{
+ if (type == ADXL380_ACTIVITY)
+ return regmap_update_bits(st->regmap, ADXL380_ACT_INACT_CTL_REG,
+ ADXL380_ACT_EN_MSK,
+ FIELD_PREP(ADXL380_ACT_EN_MSK, en));
+
+ return regmap_update_bits(st->regmap, ADXL380_ACT_INACT_CTL_REG,
+ ADXL380_INACT_EN_MSK,
+ FIELD_PREP(ADXL380_INACT_EN_MSK, en));
+}
+
+static int adxl380_read_act_inact_int(struct adxl380_state *st,
+ enum adxl380_activity_type type,
+ bool *en)
+{
+ int ret;
+ unsigned int reg_val;
+
+ guard(mutex)(&st->lock);
+
+ ret = regmap_read(st->regmap, st->int_map[0], &reg_val);
+ if (ret)
+ return ret;
+
+ if (type == ADXL380_ACTIVITY)
+ *en = FIELD_GET(ADXL380_INT_MAP0_ACT_INT0_MSK, reg_val);
+ else
+ *en = FIELD_GET(ADXL380_INT_MAP0_INACT_INT0_MSK, reg_val);
+
+ return 0;
+}
+
+static int adxl380_write_act_inact_int(struct adxl380_state *st,
+ enum adxl380_activity_type act,
+ bool en)
+{
+ if (act == ADXL380_ACTIVITY)
+ return regmap_update_bits(st->regmap, st->int_map[0],
+ ADXL380_INT_MAP0_ACT_INT0_MSK,
+ FIELD_PREP(ADXL380_INT_MAP0_ACT_INT0_MSK, en));
+
+ return regmap_update_bits(st->regmap, st->int_map[0],
+ ADXL380_INT_MAP0_INACT_INT0_MSK,
+ FIELD_PREP(ADXL380_INT_MAP0_INACT_INT0_MSK, en));
+}
+
+static int adxl380_act_inact_config(struct adxl380_state *st,
+ enum adxl380_activity_type type,
+ bool en)
+{
+ int ret;
+
+ guard(mutex)(&st->lock);
+
+ ret = adxl380_set_measure_en(st, false);
+ if (ret)
+ return ret;
+
+ ret = adxl380_write_act_inact_en(st, type, en);
+ if (ret)
+ return ret;
+
+ ret = adxl380_write_act_inact_int(st, type, en);
+ if (ret)
+ return ret;
+
+ return adxl380_set_measure_en(st, true);
+}
+
+static int adxl380_write_tap_axis(struct adxl380_state *st,
+ enum adxl380_axis axis)
+{
+ int ret;
+
+ ret = regmap_update_bits(st->regmap, ADXL380_TAP_CFG_REG,
+ ADXL380_TAP_AXIS_MSK,
+ FIELD_PREP(ADXL380_TAP_AXIS_MSK, axis));
+
+ if (ret)
+ return ret;
+
+ st->tap_axis_en = axis;
+
+ return 0;
+}
+
+static int adxl380_read_tap_int(struct adxl380_state *st, enum adxl380_tap_type type, bool *en)
+{
+ int ret;
+ unsigned int reg_val;
+
+ ret = regmap_read(st->regmap, st->int_map[1], &reg_val);
+ if (ret)
+ return ret;
+
+ if (type == ADXL380_SINGLE_TAP)
+ *en = FIELD_GET(ADXL380_INT_MAP1_SINGLE_TAP_INT0_MSK, reg_val);
+ else
+ *en = FIELD_GET(ADXL380_INT_MAP1_DOUBLE_TAP_INT0_MSK, reg_val);
+
+ return 0;
+}
+
+static int adxl380_write_tap_int(struct adxl380_state *st, enum adxl380_tap_type type, bool en)
+{
+ if (type == ADXL380_SINGLE_TAP)
+ return regmap_update_bits(st->regmap, st->int_map[1],
+ ADXL380_INT_MAP1_SINGLE_TAP_INT0_MSK,
+ FIELD_PREP(ADXL380_INT_MAP1_SINGLE_TAP_INT0_MSK, en));
+
+ return regmap_update_bits(st->regmap, st->int_map[1],
+ ADXL380_INT_MAP1_DOUBLE_TAP_INT0_MSK,
+ FIELD_PREP(ADXL380_INT_MAP1_DOUBLE_TAP_INT0_MSK, en));
+}
+
+static int adxl380_tap_config(struct adxl380_state *st,
+ enum adxl380_axis axis,
+ enum adxl380_tap_type type,
+ bool en)
+{
+ int ret;
+
+ guard(mutex)(&st->lock);
+
+ ret = adxl380_set_measure_en(st, false);
+ if (ret)
+ return ret;
+
+ ret = adxl380_write_tap_axis(st, axis);
+ if (ret)
+ return ret;
+
+ ret = adxl380_write_tap_int(st, type, en);
+ if (ret)
+ return ret;
+
+ return adxl380_set_measure_en(st, true);
+}
+
+static int adxl380_set_fifo_samples(struct adxl380_state *st)
+{
+ int ret;
+ u16 fifo_samples = st->watermark * st->fifo_set_size;
+
+ ret = regmap_update_bits(st->regmap, ADXL380_FIFO_CONFIG_0_REG,
+ ADXL380_FIFO_SAMPLES_8_MSK,
+ FIELD_PREP(ADXL380_FIFO_SAMPLES_8_MSK,
+ (fifo_samples & BIT(8))));
+ if (ret)
+ return ret;
+
+ return regmap_write(st->regmap, ADXL380_FIFO_CONFIG_1_REG,
+ fifo_samples & 0xFF);
+}
+
+static int adxl380_get_status(struct adxl380_state *st, u8 *status0, u8 *status1)
+{
+ int ret;
+
+ /* STATUS0, STATUS1 are adjacent regs */
+ ret = regmap_bulk_read(st->regmap, ADXL380_STATUS_0_REG,
+ &st->transf_buf, 2);
+ if (ret)
+ return ret;
+
+ *status0 = st->transf_buf[0];
+ *status1 = st->transf_buf[1];
+
+ return 0;
+}
+
+static int adxl380_get_fifo_entries(struct adxl380_state *st, u16 *fifo_entries)
+{
+ int ret;
+
+ ret = regmap_bulk_read(st->regmap, ADXL380_FIFO_STATUS_0_REG,
+ &st->transf_buf, 2);
+ if (ret)
+ return ret;
+
+ *fifo_entries = st->transf_buf[0] | ((BIT(0) & st->transf_buf[1]) << 8);
+
+ return 0;
+}
+
+static void adxl380_push_event(struct iio_dev *indio_dev, s64 timestamp,
+ u8 status1)
+{
+ if (FIELD_GET(ADXL380_STATUS_1_ACT_MSK, status1))
+ iio_push_event(indio_dev,
+ IIO_MOD_EVENT_CODE(IIO_ACCEL, 0, IIO_MOD_X_OR_Y_OR_Z,
+ IIO_EV_TYPE_THRESH, IIO_EV_DIR_RISING),
+ timestamp);
+
+ if (FIELD_GET(ADXL380_STATUS_1_INACT_MSK, status1))
+ iio_push_event(indio_dev,
+ IIO_MOD_EVENT_CODE(IIO_ACCEL, 0, IIO_MOD_X_OR_Y_OR_Z,
+ IIO_EV_TYPE_THRESH, IIO_EV_DIR_FALLING),
+ timestamp);
+ if (FIELD_GET(ADXL380_STATUS_1_SINGLE_TAP_MSK, status1))
+ iio_push_event(indio_dev,
+ IIO_MOD_EVENT_CODE(IIO_ACCEL, 0, IIO_MOD_X_OR_Y_OR_Z,
+ IIO_EV_TYPE_GESTURE, IIO_EV_DIR_SINGLETAP),
+ timestamp);
+
+ if (FIELD_GET(ADXL380_STATUS_1_DOUBLE_TAP_MSK, status1))
+ iio_push_event(indio_dev,
+ IIO_MOD_EVENT_CODE(IIO_ACCEL, 0, IIO_MOD_X_OR_Y_OR_Z,
+ IIO_EV_TYPE_GESTURE, IIO_EV_DIR_DOUBLETAP),
+ timestamp);
+}
+
+static irqreturn_t adxl380_irq_handler(int irq, void *p)
+{
+ struct iio_dev *indio_dev = p;
+ struct adxl380_state *st = iio_priv(indio_dev);
+ u8 status0, status1;
+ u16 fifo_entries;
+ int i;
+ int ret;
+
+ guard(mutex)(&st->lock);
+
+ ret = adxl380_get_status(st, &status0, &status1);
+ if (ret)
+ return IRQ_HANDLED;
+
+ adxl380_push_event(indio_dev, iio_get_time_ns(indio_dev), status1);
+
+ if (!FIELD_GET(ADXL380_STATUS_0_FIFO_WM_MSK, status0))
+ return IRQ_HANDLED;
+
+ ret = adxl380_get_fifo_entries(st, &fifo_entries);
+ if (ret)
+ return IRQ_HANDLED;
+
+ for (i = 0; i < fifo_entries; i += st->fifo_set_size) {
+ ret = regmap_noinc_read(st->regmap, ADXL380_FIFO_DATA,
+ &st->fifo_buf[i],
+ 2 * st->fifo_set_size);
+ if (ret)
+ return IRQ_HANDLED;
+ iio_push_to_buffers(indio_dev, &st->fifo_buf[i]);
+ }
+
+ return IRQ_HANDLED;
+}
+
+static int adxl380_write_calibbias_value(struct adxl380_state *st,
+ unsigned long chan_addr,
+ s8 calibbias)
+{
+ int ret;
+
+ guard(mutex)(&st->lock);
+
+ ret = adxl380_set_measure_en(st, false);
+ if (ret)
+ return ret;
+
+ ret = regmap_write(st->regmap, ADXL380_X_DSM_OFFSET_REG + chan_addr, calibbias);
+ if (ret)
+ return ret;
+
+ return adxl380_set_measure_en(st, true);
+}
+
+static int adxl380_read_calibbias_value(struct adxl380_state *st,
+ unsigned long chan_addr,
+ int *calibbias)
+{
+ int ret;
+ unsigned int reg_val;
+
+ guard(mutex)(&st->lock);
+
+ ret = regmap_read(st->regmap, ADXL380_X_DSM_OFFSET_REG + chan_addr, &reg_val);
+ if (ret)
+ return ret;
+
+ *calibbias = sign_extend32(reg_val, 7);
+
+ return 0;
+}
+
+static ssize_t hwfifo_watermark_min_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ return sysfs_emit(buf, "1\n");
+}
+
+static ssize_t hwfifo_watermark_max_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ return sysfs_emit(buf, "%lu\n", ADXL380_FIFO_SAMPLES);
+}
+
+static ssize_t adxl380_get_fifo_watermark(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct iio_dev *indio_dev = dev_to_iio_dev(dev);
+ struct adxl380_state *st = iio_priv(indio_dev);
+
+ return sysfs_emit(buf, "%d\n", st->watermark);
+}
+
+static ssize_t adxl380_get_fifo_enabled(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct iio_dev *indio_dev = dev_to_iio_dev(dev);
+ struct adxl380_state *st = iio_priv(indio_dev);
+ int ret;
+ unsigned int reg_val;
+
+ ret = regmap_read(st->regmap, ADXL380_DIG_EN_REG, &reg_val);
+ if (ret)
+ return ret;
+
+ return sysfs_emit(buf, "%lu\n",
+ FIELD_GET(ADXL380_FIFO_EN_MSK, reg_val));
+}
+
+static IIO_DEVICE_ATTR_RO(hwfifo_watermark_min, 0);
+static IIO_DEVICE_ATTR_RO(hwfifo_watermark_max, 0);
+static IIO_DEVICE_ATTR(hwfifo_watermark, 0444,
+ adxl380_get_fifo_watermark, NULL, 0);
+static IIO_DEVICE_ATTR(hwfifo_enabled, 0444,
+ adxl380_get_fifo_enabled, NULL, 0);
+
+static const struct iio_dev_attr *adxl380_fifo_attributes[] = {
+ &iio_dev_attr_hwfifo_watermark_min,
+ &iio_dev_attr_hwfifo_watermark_max,
+ &iio_dev_attr_hwfifo_watermark,
+ &iio_dev_attr_hwfifo_enabled,
+ NULL
+};
+
+static int adxl380_buffer_postenable(struct iio_dev *indio_dev)
+{
+ struct adxl380_state *st = iio_priv(indio_dev);
+ int i;
+ int ret;
+
+ guard(mutex)(&st->lock);
+
+ ret = adxl380_set_measure_en(st, false);
+ if (ret)
+ return ret;
+
+ ret = regmap_update_bits(st->regmap,
+ st->int_map[0],
+ ADXL380_INT_MAP0_FIFO_WM_INT0_MSK,
+ FIELD_PREP(ADXL380_INT_MAP0_FIFO_WM_INT0_MSK, 1));
+ if (ret)
+ return ret;
+
+ for_each_clear_bit(i, indio_dev->active_scan_mask, ADXL380_CH_NUM) {
+ ret = regmap_update_bits(st->regmap, ADXL380_DIG_EN_REG,
+ ADXL380_CHAN_EN_MSK(i),
+ 0 << (4 + i));
+ if (ret)
+ return ret;
+ }
+
+ st->fifo_set_size = bitmap_weight(indio_dev->active_scan_mask,
+ iio_get_masklength(indio_dev));
+
+ if ((st->watermark * st->fifo_set_size) > ADXL380_FIFO_SAMPLES)
+ st->watermark = (ADXL380_FIFO_SAMPLES / st->fifo_set_size);
+
+ ret = adxl380_set_fifo_samples(st);
+ if (ret)
+ return ret;
+
+ ret = regmap_update_bits(st->regmap, ADXL380_DIG_EN_REG, ADXL380_FIFO_EN_MSK,
+ FIELD_PREP(ADXL380_FIFO_EN_MSK, 1));
+ if (ret)
+ return ret;
+
+ return adxl380_set_measure_en(st, true);
+}
+
+static int adxl380_buffer_predisable(struct iio_dev *indio_dev)
+{
+ struct adxl380_state *st = iio_priv(indio_dev);
+ int ret, i;
+
+ guard(mutex)(&st->lock);
+
+ ret = adxl380_set_measure_en(st, false);
+ if (ret)
+ return ret;
+
+ ret = regmap_update_bits(st->regmap,
+ st->int_map[0],
+ ADXL380_INT_MAP0_FIFO_WM_INT0_MSK,
+ FIELD_PREP(ADXL380_INT_MAP0_FIFO_WM_INT0_MSK, 0));
+ if (ret)
+ return ret;
+
+ for (i = 0; i < indio_dev->num_channels; i++) {
+ ret = regmap_update_bits(st->regmap, ADXL380_DIG_EN_REG,
+ ADXL380_CHAN_EN_MSK(i),
+ 1 << (4 + i));
+ if (ret)
+ return ret;
+ }
+
+ ret = regmap_update_bits(st->regmap, ADXL380_DIG_EN_REG, ADXL380_FIFO_EN_MSK,
+ FIELD_PREP(ADXL380_FIFO_EN_MSK, 0));
+ if (ret)
+ return ret;
+
+ return adxl380_set_measure_en(st, true);
+}
+
+static const struct iio_buffer_setup_ops adxl380_buffer_ops = {
+ .postenable = adxl380_buffer_postenable,
+ .predisable = adxl380_buffer_predisable,
+};
+
+static int adxl380_read_raw(struct iio_dev *indio_dev,
+ struct iio_chan_spec const *chan,
+ int *val, int *val2, long info)
+{
+ struct adxl380_state *st = iio_priv(indio_dev);
+ int ret;
+
+ switch (info) {
+ case IIO_CHAN_INFO_RAW:
+ ret = iio_device_claim_direct_mode(indio_dev);
+ if (ret)
+ return ret;
+
+ ret = adxl380_read_chn(st, chan->address);
+ iio_device_release_direct_mode(indio_dev);
+ if (ret)
+ return ret;
+
+ *val = sign_extend32(ret >> chan->scan_type.shift,
+ chan->scan_type.realbits - 1);
+ return IIO_VAL_INT;
+ case IIO_CHAN_INFO_SCALE:
+ switch (chan->type) {
+ case IIO_ACCEL:
+ scoped_guard(mutex, &st->lock) {
+ *val = st->chip_info->scale_tbl[st->range][0];
+ *val2 = st->chip_info->scale_tbl[st->range][1];
+ }
+ return IIO_VAL_INT_PLUS_NANO;
+ case IIO_TEMP:
+ /* 10.2 LSB / Degree Celsius */
+ *val = 10000;
+ *val2 = 102;
+ return IIO_VAL_FRACTIONAL;
+ default:
+ return -EINVAL;
+ }
+ case IIO_CHAN_INFO_OFFSET:
+ switch (chan->type) {
+ case IIO_TEMP:
+ *val = st->chip_info->temp_offset;
+ return IIO_VAL_INT;
+ default:
+ return -EINVAL;
+ }
+ case IIO_CHAN_INFO_CALIBBIAS:
+ switch (chan->type) {
+ case IIO_ACCEL:
+ ret = adxl380_read_calibbias_value(st, chan->scan_index, val);
+ if (ret)
+ return ret;
+ return IIO_VAL_INT;
+ default:
+ return -EINVAL;
+ }
+ case IIO_CHAN_INFO_SAMP_FREQ:
+ ret = adxl380_get_odr(st, val);
+ if (ret)
+ return ret;
+ return IIO_VAL_INT;
+ case IIO_CHAN_INFO_LOW_PASS_FILTER_3DB_FREQUENCY:
+ ret = adxl380_get_lpf(st, val);
+ if (ret)
+ return ret;
+ return IIO_VAL_INT;
+ case IIO_CHAN_INFO_HIGH_PASS_FILTER_3DB_FREQUENCY:
+ ret = adxl380_get_hpf(st, val, val2);
+ if (ret)
+ return ret;
+ return IIO_VAL_INT_PLUS_MICRO;
+ }
+
+ return -EINVAL;
+}
+
+static int adxl380_read_avail(struct iio_dev *indio_dev,
+ struct iio_chan_spec const *chan,
+ const int **vals, int *type, int *length,
+ long mask)
+{
+ struct adxl380_state *st = iio_priv(indio_dev);
+
+ if (chan->type != IIO_ACCEL)
+ return -EINVAL;
+
+ switch (mask) {
+ case IIO_CHAN_INFO_SCALE:
+ *vals = (const int *)st->chip_info->scale_tbl;
+ *type = IIO_VAL_INT_PLUS_NANO;
+ *length = ARRAY_SIZE(st->chip_info->scale_tbl) * 2;
+ return IIO_AVAIL_LIST;
+ case IIO_CHAN_INFO_SAMP_FREQ:
+ *vals = (const int *)st->chip_info->samp_freq_tbl;
+ *type = IIO_VAL_INT;
+ *length = ARRAY_SIZE(st->chip_info->samp_freq_tbl);
+ return IIO_AVAIL_LIST;
+ case IIO_CHAN_INFO_LOW_PASS_FILTER_3DB_FREQUENCY:
+ *vals = (const int *)st->lpf_tbl;
+ *type = IIO_VAL_INT;
+ *length = ARRAY_SIZE(st->lpf_tbl);
+ return IIO_AVAIL_LIST;
+ case IIO_CHAN_INFO_HIGH_PASS_FILTER_3DB_FREQUENCY:
+ *vals = (const int *)st->hpf_tbl;
+ *type = IIO_VAL_INT_PLUS_MICRO;
+ /* Values are stored in a 2D matrix */
+ *length = ARRAY_SIZE(st->hpf_tbl) * 2;
+ return IIO_AVAIL_LIST;
+ default:
+ return -EINVAL;
+ }
+}
+
+static int adxl380_write_raw(struct iio_dev *indio_dev,
+ struct iio_chan_spec const *chan,
+ int val, int val2, long info)
+{
+ struct adxl380_state *st = iio_priv(indio_dev);
+ int odr_index, lpf_index, hpf_index, range_index;
+
+ switch (info) {
+ case IIO_CHAN_INFO_SAMP_FREQ:
+ odr_index = adxl380_find_match_1d_tbl(st->chip_info->samp_freq_tbl,
+ ARRAY_SIZE(st->chip_info->samp_freq_tbl),
+ val);
+ return adxl380_set_odr(st, odr_index);
+ case IIO_CHAN_INFO_CALIBBIAS:
+ return adxl380_write_calibbias_value(st, chan->scan_index, val);
+ case IIO_CHAN_INFO_LOW_PASS_FILTER_3DB_FREQUENCY:
+ lpf_index = adxl380_find_match_1d_tbl(st->lpf_tbl,
+ ARRAY_SIZE(st->lpf_tbl),
+ val);
+ return adxl380_set_lpf(st, lpf_index);
+ case IIO_CHAN_INFO_HIGH_PASS_FILTER_3DB_FREQUENCY:
+ hpf_index = adxl380_find_match_2d_tbl(st->hpf_tbl,
+ ARRAY_SIZE(st->hpf_tbl),
+ val, val2);
+ if (hpf_index < 0)
+ return hpf_index;
+ return adxl380_set_hpf(st, hpf_index);
+ case IIO_CHAN_INFO_SCALE:
+ range_index = adxl380_find_match_2d_tbl(st->chip_info->scale_tbl,
+ ARRAY_SIZE(st->chip_info->scale_tbl),
+ val, val2);
+ if (range_index < 0)
+ return range_index;
+ return adxl380_set_range(st, range_index);
+ default:
+ return -EINVAL;
+ }
+}
+
+static int adxl380_write_raw_get_fmt(struct iio_dev *indio_dev,
+ struct iio_chan_spec const *chan,
+ long info)
+{
+ switch (info) {
+ case IIO_CHAN_INFO_SCALE:
+ if (chan->type != IIO_ACCEL)
+ return -EINVAL;
+
+ return IIO_VAL_INT_PLUS_NANO;
+ default:
+ return IIO_VAL_INT_PLUS_MICRO;
+ }
+}
+
+static int adxl380_read_event_config(struct iio_dev *indio_dev,
+ const struct iio_chan_spec *chan,
+ enum iio_event_type type,
+ enum iio_event_direction dir)
+{
+ struct adxl380_state *st = iio_priv(indio_dev);
+ int ret;
+ bool int_en;
+ bool tap_axis_en = false;
+
+ switch (chan->channel2) {
+ case IIO_MOD_X:
+ tap_axis_en = st->tap_axis_en == ADXL380_X_AXIS;
+ break;
+ case IIO_MOD_Y:
+ tap_axis_en = st->tap_axis_en == ADXL380_Y_AXIS;
+ break;
+ case IIO_MOD_Z:
+ tap_axis_en = st->tap_axis_en == ADXL380_Z_AXIS;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ switch (dir) {
+ case IIO_EV_DIR_RISING:
+ ret = adxl380_read_act_inact_int(st, ADXL380_ACTIVITY, &int_en);
+ if (ret)
+ return ret;
+ return int_en;
+ case IIO_EV_DIR_FALLING:
+ ret = adxl380_read_act_inact_int(st, ADXL380_INACTIVITY, &int_en);
+ if (ret)
+ return ret;
+ return int_en;
+ case IIO_EV_DIR_SINGLETAP:
+ ret = adxl380_read_tap_int(st, ADXL380_SINGLE_TAP, &int_en);
+ if (ret)
+ return ret;
+ return int_en && tap_axis_en;
+ case IIO_EV_DIR_DOUBLETAP:
+ ret = adxl380_read_tap_int(st, ADXL380_DOUBLE_TAP, &int_en);
+ if (ret)
+ return ret;
+ return int_en && tap_axis_en;
+ default:
+ return -EINVAL;
+ }
+}
+
+static int adxl380_write_event_config(struct iio_dev *indio_dev,
+ const struct iio_chan_spec *chan,
+ enum iio_event_type type,
+ enum iio_event_direction dir,
+ int state)
+{
+ struct adxl380_state *st = iio_priv(indio_dev);
+ enum adxl380_axis axis;
+
+ switch (chan->channel2) {
+ case IIO_MOD_X:
+ axis = ADXL380_X_AXIS;
+ break;
+ case IIO_MOD_Y:
+ axis = ADXL380_Y_AXIS;
+ break;
+ case IIO_MOD_Z:
+ axis = ADXL380_Z_AXIS;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ switch (dir) {
+ case IIO_EV_DIR_RISING:
+ return adxl380_act_inact_config(st, ADXL380_ACTIVITY, state);
+ case IIO_EV_DIR_FALLING:
+ return adxl380_act_inact_config(st, ADXL380_INACTIVITY, state);
+ case IIO_EV_DIR_SINGLETAP:
+ return adxl380_tap_config(st, axis, ADXL380_SINGLE_TAP, state);
+ case IIO_EV_DIR_DOUBLETAP:
+ return adxl380_tap_config(st, axis, ADXL380_DOUBLE_TAP, state);
+ default:
+ return -EINVAL;
+ }
+}
+
+static int adxl380_read_event_value(struct iio_dev *indio_dev,
+ const struct iio_chan_spec *chan,
+ enum iio_event_type type,
+ enum iio_event_direction dir,
+ enum iio_event_info info,
+ int *val, int *val2)
+{
+ struct adxl380_state *st = iio_priv(indio_dev);
+
+ guard(mutex)(&st->lock);
+
+ switch (type) {
+ case IIO_EV_TYPE_THRESH:
+ switch (info) {
+ case IIO_EV_INFO_VALUE: {
+ switch (dir) {
+ case IIO_EV_DIR_RISING:
+ *val = st->act_threshold;
+ return IIO_VAL_INT;
+ case IIO_EV_DIR_FALLING:
+ *val = st->inact_threshold;
+ return IIO_VAL_INT;
+ default:
+ return -EINVAL;
+ }
+ }
+ case IIO_EV_INFO_PERIOD:
+ switch (dir) {
+ case IIO_EV_DIR_RISING:
+ *val = st->act_time_ms;
+ *val2 = 1000;
+ return IIO_VAL_FRACTIONAL;
+ case IIO_EV_DIR_FALLING:
+ *val = st->inact_time_ms;
+ *val2 = 1000;
+ return IIO_VAL_FRACTIONAL;
+ default:
+ return -EINVAL;
+ }
+ default:
+ return -EINVAL;
+ }
+ case IIO_EV_TYPE_GESTURE:
+ switch (info) {
+ case IIO_EV_INFO_VALUE:
+ *val = st->tap_threshold;
+ return IIO_VAL_INT;
+ case IIO_EV_INFO_RESET_TIMEOUT:
+ *val = st->tap_window_us;
+ *val2 = 1000000;
+ return IIO_VAL_FRACTIONAL;
+ case IIO_EV_INFO_TAP2_MIN_DELAY:
+ *val = st->tap_latent_us;
+ *val2 = 1000000;
+ return IIO_VAL_FRACTIONAL;
+ default:
+ return -EINVAL;
+ }
+ default:
+ return -EINVAL;
+ }
+}
+
+static int adxl380_write_event_value(struct iio_dev *indio_dev,
+ const struct iio_chan_spec *chan,
+ enum iio_event_type type, enum iio_event_direction dir,
+ enum iio_event_info info, int val, int val2)
+{
+ struct adxl380_state *st = iio_priv(indio_dev);
+ u32 val_ms, val_us;
+
+ if (chan->type != IIO_ACCEL)
+ return -EINVAL;
+
+ switch (type) {
+ case IIO_EV_TYPE_THRESH:
+ switch (info) {
+ case IIO_EV_INFO_VALUE:
+ switch (dir) {
+ case IIO_EV_DIR_RISING:
+ return adxl380_set_act_inact_threshold(indio_dev,
+ ADXL380_ACTIVITY, val);
+ case IIO_EV_DIR_FALLING:
+ return adxl380_set_act_inact_threshold(indio_dev,
+ ADXL380_INACTIVITY, val);
+ default:
+ return -EINVAL;
+ }
+ case IIO_EV_INFO_PERIOD:
+ val_ms = val * 1000 + DIV_ROUND_UP(val2, 1000);
+ switch (dir) {
+ case IIO_EV_DIR_RISING:
+ return adxl380_set_act_inact_time_ms(st,
+ ADXL380_ACTIVITY, val_ms);
+ case IIO_EV_DIR_FALLING:
+ return adxl380_set_act_inact_time_ms(st,
+ ADXL380_INACTIVITY, val_ms);
+ default:
+ return -EINVAL;
+ }
+
+ default:
+ return -EINVAL;
+ }
+ case IIO_EV_TYPE_GESTURE:
+ switch (info) {
+ case IIO_EV_INFO_VALUE:
+ return adxl380_set_tap_threshold_value(indio_dev, val);
+ case IIO_EV_INFO_RESET_TIMEOUT:
+ val_us = val * 1000000 + val2;
+ return adxl380_write_tap_time_us(st,
+ ADXL380_TAP_TIME_WINDOW,
+ val_us);
+ case IIO_EV_INFO_TAP2_MIN_DELAY:
+ val_us = val * 1000000 + val2;
+ return adxl380_write_tap_time_us(st,
+ ADXL380_TAP_TIME_LATENT,
+ val_us);
+ default:
+ return -EINVAL;
+ }
+ default:
+ return -EINVAL;
+ }
+}
+
+static ssize_t in_accel_gesture_tap_maxtomin_time_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ int vals[2];
+ struct iio_dev *indio_dev = dev_to_iio_dev(dev);
+ struct adxl380_state *st = iio_priv(indio_dev);
+
+ guard(mutex)(&st->lock);
+
+ vals[0] = st->tap_duration_us;
+ vals[1] = MICRO;
+
+ return iio_format_value(buf, IIO_VAL_FRACTIONAL, 2, vals);
+}
+
+static ssize_t in_accel_gesture_tap_maxtomin_time_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t len)
+{
+ struct iio_dev *indio_dev = dev_to_iio_dev(dev);
+ struct adxl380_state *st = iio_priv(indio_dev);
+ int ret, val_int, val_fract_us;
+
+ guard(mutex)(&st->lock);
+
+ ret = iio_str_to_fixpoint(buf, 100000, &val_int, &val_fract_us);
+ if (ret)
+ return ret;
+
+ /* maximum value is 255 * 625 us = 0.159375 seconds */
+ if (val_int || val_fract_us > 159375 || val_fract_us < 0)
+ return -EINVAL;
+
+ ret = adxl380_write_tap_dur_us(indio_dev, val_fract_us);
+ if (ret)
+ return ret;
+
+ return len;
+}
+
+static IIO_DEVICE_ATTR_RW(in_accel_gesture_tap_maxtomin_time, 0);
+
+static struct attribute *adxl380_event_attributes[] = {
+ &iio_dev_attr_in_accel_gesture_tap_maxtomin_time.dev_attr.attr,
+ NULL
+};
+
+static const struct attribute_group adxl380_event_attribute_group = {
+ .attrs = adxl380_event_attributes,
+};
+
+static int adxl380_reg_access(struct iio_dev *indio_dev,
+ unsigned int reg,
+ unsigned int writeval,
+ unsigned int *readval)
+{
+ struct adxl380_state *st = iio_priv(indio_dev);
+
+ if (readval)
+ return regmap_read(st->regmap, reg, readval);
+
+ return regmap_write(st->regmap, reg, writeval);
+}
+
+static int adxl380_set_watermark(struct iio_dev *indio_dev, unsigned int val)
+{
+ struct adxl380_state *st = iio_priv(indio_dev);
+
+ st->watermark = min(val, ADXL380_FIFO_SAMPLES);
+
+ return 0;
+}
+
+static const struct iio_info adxl380_info = {
+ .read_raw = adxl380_read_raw,
+ .read_avail = &adxl380_read_avail,
+ .write_raw = adxl380_write_raw,
+ .write_raw_get_fmt = adxl380_write_raw_get_fmt,
+ .read_event_config = adxl380_read_event_config,
+ .write_event_config = adxl380_write_event_config,
+ .read_event_value = adxl380_read_event_value,
+ .write_event_value = adxl380_write_event_value,
+ .event_attrs = &adxl380_event_attribute_group,
+ .debugfs_reg_access = &adxl380_reg_access,
+ .hwfifo_set_watermark = adxl380_set_watermark,
+};
+
+static const struct iio_event_spec adxl380_events[] = {
+ {
+ .type = IIO_EV_TYPE_THRESH,
+ .dir = IIO_EV_DIR_RISING,
+ .mask_shared_by_type = BIT(IIO_EV_INFO_ENABLE) |
+ BIT(IIO_EV_INFO_VALUE) |
+ BIT(IIO_EV_INFO_PERIOD),
+ },
+ {
+ .type = IIO_EV_TYPE_THRESH,
+ .dir = IIO_EV_DIR_FALLING,
+ .mask_shared_by_type = BIT(IIO_EV_INFO_ENABLE) |
+ BIT(IIO_EV_INFO_VALUE) |
+ BIT(IIO_EV_INFO_PERIOD),
+ },
+ {
+ .type = IIO_EV_TYPE_GESTURE,
+ .dir = IIO_EV_DIR_SINGLETAP,
+ .mask_separate = BIT(IIO_EV_INFO_ENABLE),
+ .mask_shared_by_type = BIT(IIO_EV_INFO_VALUE) |
+ BIT(IIO_EV_INFO_RESET_TIMEOUT),
+ },
+ {
+ .type = IIO_EV_TYPE_GESTURE,
+ .dir = IIO_EV_DIR_DOUBLETAP,
+ .mask_separate = BIT(IIO_EV_INFO_ENABLE),
+ .mask_shared_by_type = BIT(IIO_EV_INFO_VALUE) |
+ BIT(IIO_EV_INFO_RESET_TIMEOUT) |
+ BIT(IIO_EV_INFO_TAP2_MIN_DELAY),
+ },
+};
+
+#define ADXL380_ACCEL_CHANNEL(index, reg, axis) { \
+ .type = IIO_ACCEL, \
+ .address = reg, \
+ .modified = 1, \
+ .channel2 = IIO_MOD_##axis, \
+ .info_mask_separate = BIT(IIO_CHAN_INFO_RAW) | \
+ BIT(IIO_CHAN_INFO_CALIBBIAS), \
+ .info_mask_shared_by_all = BIT(IIO_CHAN_INFO_SAMP_FREQ), \
+ .info_mask_shared_by_all_available = \
+ BIT(IIO_CHAN_INFO_SAMP_FREQ), \
+ .info_mask_shared_by_type = \
+ BIT(IIO_CHAN_INFO_SCALE) | \
+ BIT(IIO_CHAN_INFO_LOW_PASS_FILTER_3DB_FREQUENCY) | \
+ BIT(IIO_CHAN_INFO_HIGH_PASS_FILTER_3DB_FREQUENCY), \
+ .info_mask_shared_by_type_available = \
+ BIT(IIO_CHAN_INFO_SCALE) | \
+ BIT(IIO_CHAN_INFO_LOW_PASS_FILTER_3DB_FREQUENCY) | \
+ BIT(IIO_CHAN_INFO_HIGH_PASS_FILTER_3DB_FREQUENCY), \
+ .scan_index = index, \
+ .scan_type = { \
+ .sign = 's', \
+ .realbits = 16, \
+ .storagebits = 16, \
+ .endianness = IIO_BE, \
+ }, \
+ .event_spec = adxl380_events, \
+ .num_event_specs = ARRAY_SIZE(adxl380_events) \
+}
+
+static const struct iio_chan_spec adxl380_channels[] = {
+ ADXL380_ACCEL_CHANNEL(0, ADXL380_X_DATA_H_REG, X),
+ ADXL380_ACCEL_CHANNEL(1, ADXL380_Y_DATA_H_REG, Y),
+ ADXL380_ACCEL_CHANNEL(2, ADXL380_Z_DATA_H_REG, Z),
+ {
+ .type = IIO_TEMP,
+ .address = ADXL380_T_DATA_H_REG,
+ .info_mask_separate = BIT(IIO_CHAN_INFO_RAW) |
+ BIT(IIO_CHAN_INFO_SCALE) |
+ BIT(IIO_CHAN_INFO_OFFSET),
+ .scan_index = 3,
+ .scan_type = {
+ .sign = 's',
+ .realbits = 12,
+ .storagebits = 16,
+ .shift = 4,
+ .endianness = IIO_BE,
+ },
+ },
+};
+
+static int adxl380_config_irq(struct iio_dev *indio_dev)
+{
+ struct adxl380_state *st = iio_priv(indio_dev);
+ unsigned long irq_flag;
+ struct irq_data *desc;
+ u32 irq_type;
+ u8 polarity;
+ int ret;
+
+ st->irq = fwnode_irq_get_byname(dev_fwnode(st->dev), "INT0");
+ if (st->irq > 0) {
+ st->int_map[0] = ADXL380_INT0_MAP0_REG;
+ st->int_map[1] = ADXL380_INT0_MAP1_REG;
+ } else {
+ st->irq = fwnode_irq_get_byname(dev_fwnode(st->dev), "INT1");
+ if (st->irq > 0)
+ return dev_err_probe(st->dev, -ENODEV,
+ "no interrupt name specified");
+ st->int_map[0] = ADXL380_INT1_MAP0_REG;
+ st->int_map[1] = ADXL380_INT1_MAP1_REG;
+ }
+
+ desc = irq_get_irq_data(st->irq);
+ if (!desc)
+ return dev_err_probe(st->dev, -EINVAL, "Could not find IRQ %d\n", st->irq);
+
+ irq_type = irqd_get_trigger_type(desc);
+ if (irq_type == IRQ_TYPE_LEVEL_HIGH) {
+ polarity = 0;
+ irq_flag = IRQF_TRIGGER_HIGH | IRQF_ONESHOT;
+ } else if (irq_type == IRQ_TYPE_LEVEL_LOW) {
+ polarity = 1;
+ irq_flag = IRQF_TRIGGER_LOW | IRQF_ONESHOT;
+ } else {
+ return dev_err_probe(st->dev, -EINVAL,
+ "Invalid interrupt 0x%x. Only level interrupts supported\n",
+ irq_type);
+ }
+
+ ret = regmap_update_bits(st->regmap, ADXL380_INT0_REG,
+ ADXL380_INT0_POL_MSK,
+ FIELD_PREP(ADXL380_INT0_POL_MSK, polarity));
+ if (ret)
+ return ret;
+
+ return devm_request_threaded_irq(st->dev, st->irq, NULL,
+ adxl380_irq_handler, irq_flag,
+ indio_dev->name, indio_dev);
+}
+
+static int adxl380_setup(struct iio_dev *indio_dev)
+{
+ unsigned int reg_val;
+ u16 part_id, chip_id;
+ int ret, i;
+ struct adxl380_state *st = iio_priv(indio_dev);
+
+ ret = regmap_read(st->regmap, ADXL380_DEVID_AD_REG, &reg_val);
+ if (ret)
+ return ret;
+
+ if (reg_val != ADXL380_DEVID_AD_VAL)
+ dev_warn(st->dev, "Unknown chip id %x\n", reg_val);
+
+ ret = regmap_bulk_read(st->regmap, ADLX380_PART_ID_REG,
+ &st->transf_buf, 2);
+ if (ret)
+ return ret;
+
+ part_id = get_unaligned_be16(st->transf_buf);
+ part_id >>= 4;
+
+ if (part_id != ADXL380_ID_VAL)
+ dev_warn(st->dev, "Unknown part id %x\n", part_id);
+
+ ret = regmap_read(st->regmap, ADXL380_MISC_0_REG, &reg_val);
+ if (ret)
+ return ret;
+
+ /* Bit to differentiate between ADXL380/382. */
+ if (reg_val & ADXL380_XL382_MSK)
+ chip_id = ADXL382_ID_VAL;
+ else
+ chip_id = ADXL380_ID_VAL;
+
+ if (chip_id != st->chip_info->chip_id)
+ dev_warn(st->dev, "Unknown chip id %x\n", chip_id);
+
+ ret = regmap_write(st->regmap, ADXL380_RESET_REG, ADXL380_RESET_CODE);
+ if (ret)
+ return ret;
+
+ /*
+ * A latency of approximately 0.5 ms is required after soft reset.
+ * Stated in the register REG_RESET description.
+ */
+ fsleep(500);
+
+ for (i = 0; i < indio_dev->num_channels; i++) {
+ ret = regmap_update_bits(st->regmap, ADXL380_DIG_EN_REG,
+ ADXL380_CHAN_EN_MSK(i),
+ 1 << (4 + i));
+ if (ret)
+ return ret;
+ }
+
+ ret = regmap_update_bits(st->regmap, ADXL380_FIFO_CONFIG_0_REG,
+ ADXL380_FIFO_MODE_MSK,
+ FIELD_PREP(ADXL380_FIFO_MODE_MSK, ADXL380_FIFO_STREAMED));
+ if (ret)
+ return ret;
+
+ /* Select all 3 axis for act/inact detection. */
+ ret = regmap_update_bits(st->regmap, ADXL380_SNSR_AXIS_EN_REG,
+ ADXL380_ACT_INACT_AXIS_EN_MSK,
+ FIELD_PREP(ADXL380_ACT_INACT_AXIS_EN_MSK,
+ ADXL380_ACT_INACT_AXIS_EN_MSK));
+ if (ret)
+ return ret;
+
+ ret = adxl380_config_irq(indio_dev);
+ if (ret)
+ return ret;
+
+ ret = adxl380_fill_lpf_tbl(st);
+ if (ret)
+ return ret;
+
+ ret = adxl380_fill_hpf_tbl(st);
+ if (ret)
+ return ret;
+
+ return adxl380_set_measure_en(st, true);
+}
+
+int adxl380_probe(struct device *dev, struct regmap *regmap,
+ const struct adxl380_chip_info *chip_info)
+{
+ struct iio_dev *indio_dev;
+ struct adxl380_state *st;
+ int ret;
+
+ indio_dev = devm_iio_device_alloc(dev, sizeof(*st));
+ if (!indio_dev)
+ return -ENOMEM;
+
+ st = iio_priv(indio_dev);
+
+ st->dev = dev;
+ st->regmap = regmap;
+ st->chip_info = chip_info;
+
+ mutex_init(&st->lock);
+
+ indio_dev->channels = adxl380_channels;
+ indio_dev->num_channels = ARRAY_SIZE(adxl380_channels);
+ indio_dev->name = chip_info->name;
+ indio_dev->info = &adxl380_info;
+ indio_dev->modes = INDIO_DIRECT_MODE;
+
+ ret = devm_regulator_get_enable(dev, "vddio");
+ if (ret)
+ return dev_err_probe(st->dev, ret,
+ "Failed to get vddio regulator\n");
+
+ ret = devm_regulator_get_enable(st->dev, "vsupply");
+ if (ret)
+ return dev_err_probe(st->dev, ret,
+ "Failed to get vsupply regulator\n");
+
+ ret = adxl380_setup(indio_dev);
+ if (ret)
+ return ret;
+
+ ret = devm_iio_kfifo_buffer_setup_ext(st->dev, indio_dev,
+ &adxl380_buffer_ops,
+ adxl380_fifo_attributes);
+ if (ret)
+ return ret;
+
+ return devm_iio_device_register(dev, indio_dev);
+}
+EXPORT_SYMBOL_NS_GPL(adxl380_probe, IIO_ADXL380);
+
+MODULE_AUTHOR("Ramona Gradinariu <ramona.gradinariu@analog.com>");
+MODULE_AUTHOR("Antoniu Miclaus <antoniu.miclaus@analog.com>");
+MODULE_DESCRIPTION("Analog Devices ADXL380 3-axis accelerometer driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/iio/accel/adxl380.h b/drivers/iio/accel/adxl380.h
new file mode 100644
index 000000000000..a683625d897a
--- /dev/null
+++ b/drivers/iio/accel/adxl380.h
@@ -0,0 +1,26 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
+/*
+ * ADXL380 3-Axis Digital Accelerometer
+ *
+ * Copyright 2024 Analog Devices Inc.
+ */
+
+#ifndef _ADXL380_H_
+#define _ADXL380_H_
+
+struct adxl380_chip_info {
+ const char *name;
+ const int scale_tbl[3][2];
+ const int samp_freq_tbl[3];
+ const int temp_offset;
+ const u16 chip_id;
+};
+
+extern const struct adxl380_chip_info adxl380_chip_info;
+extern const struct adxl380_chip_info adxl382_chip_info;
+
+int adxl380_probe(struct device *dev, struct regmap *regmap,
+ const struct adxl380_chip_info *chip_info);
+bool adxl380_readable_noinc_reg(struct device *dev, unsigned int reg);
+
+#endif /* _ADXL380_H_ */
diff --git a/drivers/iio/accel/adxl380_i2c.c b/drivers/iio/accel/adxl380_i2c.c
new file mode 100644
index 000000000000..1dc1e77be815
--- /dev/null
+++ b/drivers/iio/accel/adxl380_i2c.c
@@ -0,0 +1,64 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * ADXL380 3-Axis Digital Accelerometer I2C driver
+ *
+ * Copyright 2024 Analog Devices Inc.
+ */
+
+#include <linux/i2c.h>
+#include <linux/mod_devicetable.h>
+#include <linux/module.h>
+#include <linux/regmap.h>
+
+#include "adxl380.h"
+
+static const struct regmap_config adxl380_regmap_config = {
+ .reg_bits = 8,
+ .val_bits = 8,
+ .readable_noinc_reg = adxl380_readable_noinc_reg,
+};
+
+static int adxl380_i2c_probe(struct i2c_client *client)
+{
+ struct regmap *regmap;
+ const struct adxl380_chip_info *chip_data;
+
+ chip_data = i2c_get_match_data(client);
+
+ regmap = devm_regmap_init_i2c(client, &adxl380_regmap_config);
+ if (IS_ERR(regmap))
+ return PTR_ERR(regmap);
+
+ return adxl380_probe(&client->dev, regmap, chip_data);
+}
+
+static const struct i2c_device_id adxl380_i2c_id[] = {
+ { "adxl380", (kernel_ulong_t)&adxl380_chip_info },
+ { "adxl382", (kernel_ulong_t)&adxl382_chip_info },
+ { }
+};
+MODULE_DEVICE_TABLE(i2c, adxl380_i2c_id);
+
+static const struct of_device_id adxl380_of_match[] = {
+ { .compatible = "adi,adxl380", .data = &adxl380_chip_info },
+ { .compatible = "adi,adxl382", .data = &adxl382_chip_info },
+ { }
+};
+MODULE_DEVICE_TABLE(of, adxl380_of_match);
+
+static struct i2c_driver adxl380_i2c_driver = {
+ .driver = {
+ .name = "adxl380_i2c",
+ .of_match_table = adxl380_of_match,
+ },
+ .probe = adxl380_i2c_probe,
+ .id_table = adxl380_i2c_id,
+};
+
+module_i2c_driver(adxl380_i2c_driver);
+
+MODULE_AUTHOR("Ramona Gradinariu <ramona.gradinariu@analog.com>");
+MODULE_AUTHOR("Antoniu Miclaus <antoniu.miclaus@analog.com>");
+MODULE_DESCRIPTION("Analog Devices ADXL380 3-axis accelerometer I2C driver");
+MODULE_LICENSE("GPL");
+MODULE_IMPORT_NS(IIO_ADXL380);
diff --git a/drivers/iio/accel/adxl380_spi.c b/drivers/iio/accel/adxl380_spi.c
new file mode 100644
index 000000000000..e7b5778cb6cf
--- /dev/null
+++ b/drivers/iio/accel/adxl380_spi.c
@@ -0,0 +1,66 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * ADXL380 3-Axis Digital Accelerometer SPI driver
+ *
+ * Copyright 2024 Analog Devices Inc.
+ */
+
+#include <linux/mod_devicetable.h>
+#include <linux/module.h>
+#include <linux/regmap.h>
+#include <linux/spi/spi.h>
+
+#include "adxl380.h"
+
+static const struct regmap_config adxl380_spi_regmap_config = {
+ .reg_bits = 7,
+ .pad_bits = 1,
+ .val_bits = 8,
+ .read_flag_mask = BIT(0),
+ .readable_noinc_reg = adxl380_readable_noinc_reg,
+};
+
+static int adxl380_spi_probe(struct spi_device *spi)
+{
+ const struct adxl380_chip_info *chip_data;
+ struct regmap *regmap;
+
+ chip_data = spi_get_device_match_data(spi);
+
+ regmap = devm_regmap_init_spi(spi, &adxl380_spi_regmap_config);
+ if (IS_ERR(regmap))
+ return PTR_ERR(regmap);
+
+ return adxl380_probe(&spi->dev, regmap, chip_data);
+}
+
+static const struct spi_device_id adxl380_spi_id[] = {
+ { "adxl380", (kernel_ulong_t)&adxl380_chip_info },
+ { "adxl382", (kernel_ulong_t)&adxl382_chip_info },
+ { }
+};
+MODULE_DEVICE_TABLE(spi, adxl380_spi_id);
+
+static const struct of_device_id adxl380_of_match[] = {
+ { .compatible = "adi,adxl380", .data = &adxl380_chip_info },
+ { .compatible = "adi,adxl382", .data = &adxl382_chip_info },
+ { }
+};
+MODULE_DEVICE_TABLE(of, adxl380_of_match);
+
+static struct spi_driver adxl380_spi_driver = {
+ .driver = {
+ .name = "adxl380_spi",
+ .of_match_table = adxl380_of_match,
+ },
+ .probe = adxl380_spi_probe,
+ .id_table = adxl380_spi_id,
+};
+
+module_spi_driver(adxl380_spi_driver);
+
+MODULE_AUTHOR("Ramona Gradinariu <ramona.gradinariu@analog.com>");
+MODULE_AUTHOR("Antoniu Miclaus <antoniu.miclaus@analog.com>");
+MODULE_DESCRIPTION("Analog Devices ADXL380 3-axis accelerometer SPI driver");
+MODULE_LICENSE("GPL");
+MODULE_IMPORT_NS(IIO_ADXL380);
diff --git a/drivers/iio/accel/bma180.c b/drivers/iio/accel/bma180.c
index 6581772cb0c4..2445a0f7bc2b 100644
--- a/drivers/iio/accel/bma180.c
+++ b/drivers/iio/accel/bma180.c
@@ -876,8 +876,7 @@ static irqreturn_t bma180_trigger_handler(int irq, void *p)
mutex_lock(&data->mutex);
- for_each_set_bit(bit, indio_dev->active_scan_mask,
- indio_dev->masklength) {
+ iio_for_each_active_channel(indio_dev, bit) {
ret = bma180_get_data_reg(data, bit);
if (ret < 0) {
mutex_unlock(&data->mutex);
diff --git a/drivers/iio/accel/bma400_core.c b/drivers/iio/accel/bma400_core.c
index e90e2f01550a..89db242f06e0 100644
--- a/drivers/iio/accel/bma400_core.c
+++ b/drivers/iio/accel/bma400_core.c
@@ -13,6 +13,7 @@
#include <linux/bitfield.h>
#include <linux/bitops.h>
+#include <linux/cleanup.h>
#include <linux/device.h>
#include <linux/kernel.h>
#include <linux/module.h>
@@ -795,21 +796,19 @@ static int bma400_enable_steps(struct bma400_data *data, int val)
static int bma400_get_steps_reg(struct bma400_data *data, int *val)
{
- u8 *steps_raw;
int ret;
- steps_raw = kmalloc(BMA400_STEP_RAW_LEN, GFP_KERNEL);
+ u8 *steps_raw __free(kfree) = kmalloc(BMA400_STEP_RAW_LEN, GFP_KERNEL);
if (!steps_raw)
return -ENOMEM;
ret = regmap_bulk_read(data->regmap, BMA400_STEP_CNT0_REG,
steps_raw, BMA400_STEP_RAW_LEN);
- if (ret) {
- kfree(steps_raw);
+ if (ret)
return ret;
- }
+
*val = get_unaligned_le24(steps_raw);
- kfree(steps_raw);
+
return IIO_VAL_INT;
}
diff --git a/drivers/iio/accel/bma400_spi.c b/drivers/iio/accel/bma400_spi.c
index ec13c044b304..765d8c4a4c4d 100644
--- a/drivers/iio/accel/bma400_spi.c
+++ b/drivers/iio/accel/bma400_spi.c
@@ -53,7 +53,7 @@ static int bma400_regmap_spi_write(void *context, const void *data,
return spi_write(spi, data, count);
}
-static struct regmap_bus bma400_regmap_bus = {
+static const struct regmap_bus bma400_regmap_bus = {
.read = bma400_regmap_spi_read,
.write = bma400_regmap_spi_write,
.read_flag_mask = BIT(7),
diff --git a/drivers/iio/accel/bmc150-accel-core.c b/drivers/iio/accel/bmc150-accel-core.c
index ae0cd48a3e29..0f32c1e92b4d 100644
--- a/drivers/iio/accel/bmc150-accel-core.c
+++ b/drivers/iio/accel/bmc150-accel-core.c
@@ -10,9 +10,9 @@
#include <linux/delay.h>
#include <linux/slab.h>
#include <linux/acpi.h>
-#include <linux/of_irq.h>
#include <linux/pm.h>
#include <linux/pm_runtime.h>
+#include <linux/property.h>
#include <linux/iio/iio.h>
#include <linux/iio/sysfs.h>
#include <linux/iio/buffer.h>
@@ -387,7 +387,7 @@ static bool bmc150_apply_bosc0200_acpi_orientation(struct device *dev,
struct iio_mount_matrix *orientation)
{
struct iio_dev *indio_dev = dev_get_drvdata(dev);
- struct acpi_device *adev = ACPI_COMPANION(dev);
+ acpi_handle handle = ACPI_HANDLE(dev);
char *name, *alt_name, *label;
if (strcmp(dev_name(dev), "i2c-BOSC0200:base") == 0) {
@@ -398,9 +398,9 @@ static bool bmc150_apply_bosc0200_acpi_orientation(struct device *dev,
label = "accel-display";
}
- if (acpi_has_method(adev->handle, "ROTM")) {
+ if (acpi_has_method(handle, "ROTM")) {
name = "ROTM";
- } else if (acpi_has_method(adev->handle, alt_name)) {
+ } else if (acpi_has_method(handle, alt_name)) {
name = alt_name;
indio_dev->label = label;
} else {
@@ -514,7 +514,7 @@ static void bmc150_accel_interrupts_setup(struct iio_dev *indio_dev,
*/
irq_info = bmc150_accel_interrupts_int1;
if (data->type == BOSCH_BMC156 ||
- irq == of_irq_get_byname(dev->of_node, "INT2"))
+ irq == fwnode_irq_get_byname(dev_fwnode(dev), "INT2"))
irq_info = bmc150_accel_interrupts_int2;
for (i = 0; i < BMC150_ACCEL_INTERRUPTS; i++)
@@ -1007,8 +1007,7 @@ static int __bmc150_accel_fifo_flush(struct iio_dev *indio_dev,
int j, bit;
j = 0;
- for_each_set_bit(bit, indio_dev->active_scan_mask,
- indio_dev->masklength)
+ iio_for_each_active_channel(indio_dev, bit)
memcpy(&data->scan.channels[j++], &buffer[i * 3 + bit],
sizeof(data->scan.channels[0]));
diff --git a/drivers/iio/accel/bmi088-accel-spi.c b/drivers/iio/accel/bmi088-accel-spi.c
index 7b419a7b2478..df1adc059aa9 100644
--- a/drivers/iio/accel/bmi088-accel-spi.c
+++ b/drivers/iio/accel/bmi088-accel-spi.c
@@ -36,7 +36,7 @@ static int bmi088_regmap_spi_read(void *context, const void *reg,
return spi_write_then_read(spi, addr, sizeof(addr), val, val_size);
}
-static struct regmap_bus bmi088_regmap_bus = {
+static const struct regmap_bus bmi088_regmap_bus = {
.write = bmi088_regmap_spi_write,
.read = bmi088_regmap_spi_read,
};
diff --git a/drivers/iio/accel/cros_ec_accel_legacy.c b/drivers/iio/accel/cros_ec_accel_legacy.c
index 0f403342b1fc..f7e4dc02b34d 100644
--- a/drivers/iio/accel/cros_ec_accel_legacy.c
+++ b/drivers/iio/accel/cros_ec_accel_legacy.c
@@ -62,7 +62,7 @@ static int cros_ec_accel_legacy_read_cmd(struct iio_dev *indio_dev,
return ret;
}
- for_each_set_bit(i, &scan_mask, indio_dev->masklength) {
+ for_each_set_bit(i, &scan_mask, iio_get_masklength(indio_dev)) {
*data = st->resp->dump.sensor[sensor_num].data[i] *
st->sign[i];
data++;
diff --git a/drivers/iio/accel/fxls8962af-core.c b/drivers/iio/accel/fxls8962af-core.c
index d25e31613413..acadabec4df7 100644
--- a/drivers/iio/accel/fxls8962af-core.c
+++ b/drivers/iio/accel/fxls8962af-core.c
@@ -966,8 +966,7 @@ static int fxls8962af_fifo_flush(struct iio_dev *indio_dev)
int j, bit;
j = 0;
- for_each_set_bit(bit, indio_dev->active_scan_mask,
- indio_dev->masklength) {
+ iio_for_each_active_channel(indio_dev, bit) {
memcpy(&data->scan.channels[j++], &buffer[i * 3 + bit],
sizeof(data->scan.channels[0]));
}
diff --git a/drivers/iio/accel/kxcjk-1013.c b/drivers/iio/accel/kxcjk-1013.c
index 8280d2bef0a3..b76df8816323 100644
--- a/drivers/iio/accel/kxcjk-1013.c
+++ b/drivers/iio/accel/kxcjk-1013.c
@@ -173,6 +173,7 @@ enum kx_chipset {
KXCJ91008,
KXTJ21009,
KXTF9,
+ KX0221020,
KX0231025,
KX_MAX_CHIPS /* this must be last */
};
@@ -580,8 +581,8 @@ static int kxcjk1013_chip_init(struct kxcjk1013_data *data)
return ret;
}
- /* On KX023, route all used interrupts to INT1 for now */
- if (data->chipset == KX0231025 && data->client->irq > 0) {
+ /* On KX023 and KX022, route all used interrupts to INT1 for now */
+ if ((data->chipset == KX0231025 || data->chipset == KX0221020) && data->client->irq > 0) {
ret = i2c_smbus_write_byte_data(data->client, KX023_REG_INC4,
KX023_REG_INC4_DRDY1 |
KX023_REG_INC4_WUFI1);
@@ -1507,6 +1508,7 @@ static int kxcjk1013_probe(struct i2c_client *client)
case KXTF9:
data->regs = &kxtf9_regs;
break;
+ case KX0221020:
case KX0231025:
data->regs = &kx0231025_regs;
break;
@@ -1712,6 +1714,7 @@ static const struct i2c_device_id kxcjk1013_id[] = {
{"kxcj91008", KXCJ91008},
{"kxtj21009", KXTJ21009},
{"kxtf9", KXTF9},
+ {"kx022-1020", KX0221020},
{"kx023-1025", KX0231025},
{"SMO8500", KXCJ91008},
{}
@@ -1724,6 +1727,7 @@ static const struct of_device_id kxcjk1013_of_match[] = {
{ .compatible = "kionix,kxcj91008", },
{ .compatible = "kionix,kxtj21009", },
{ .compatible = "kionix,kxtf9", },
+ { .compatible = "kionix,kx022-1020", },
{ .compatible = "kionix,kx023-1025", },
{ }
};
diff --git a/drivers/iio/accel/msa311.c b/drivers/iio/accel/msa311.c
index 4cdbf5424a53..57025354c7cd 100644
--- a/drivers/iio/accel/msa311.c
+++ b/drivers/iio/accel/msa311.c
@@ -900,8 +900,7 @@ static irqreturn_t msa311_buffer_thread(int irq, void *p)
mutex_lock(&msa311->lock);
- for_each_set_bit(bit, indio_dev->active_scan_mask,
- indio_dev->masklength) {
+ iio_for_each_active_channel(indio_dev, bit) {
chan = &msa311_channels[bit];
err = msa311_get_axis(msa311, chan, &axis);
diff --git a/drivers/iio/accel/sca3300.c b/drivers/iio/accel/sca3300.c
index 306482b70fad..fca77d660625 100644
--- a/drivers/iio/accel/sca3300.c
+++ b/drivers/iio/accel/sca3300.c
@@ -494,8 +494,7 @@ static irqreturn_t sca3300_trigger_handler(int irq, void *p)
int bit, ret, val, i = 0;
s16 *channels = (s16 *)data->buffer;
- for_each_set_bit(bit, indio_dev->active_scan_mask,
- indio_dev->masklength) {
+ iio_for_each_active_channel(indio_dev, bit) {
ret = sca3300_read_reg(data, indio_dev->channels[bit].address, &val);
if (ret) {
dev_err_ratelimited(&data->spi->dev,
diff --git a/drivers/iio/accel/stk8312.c b/drivers/iio/accel/stk8312.c
index b3534d5751b9..abead190254b 100644
--- a/drivers/iio/accel/stk8312.c
+++ b/drivers/iio/accel/stk8312.c
@@ -448,8 +448,7 @@ static irqreturn_t stk8312_trigger_handler(int irq, void *p)
goto err;
}
} else {
- for_each_set_bit(bit, indio_dev->active_scan_mask,
- indio_dev->masklength) {
+ iio_for_each_active_channel(indio_dev, bit) {
ret = stk8312_read_accel(data, bit);
if (ret < 0) {
mutex_unlock(&data->lock);
diff --git a/drivers/iio/accel/stk8ba50.c b/drivers/iio/accel/stk8ba50.c
index 6d3c7f444d21..a32a77324e92 100644
--- a/drivers/iio/accel/stk8ba50.c
+++ b/drivers/iio/accel/stk8ba50.c
@@ -330,8 +330,7 @@ static irqreturn_t stk8ba50_trigger_handler(int irq, void *p)
goto err;
}
} else {
- for_each_set_bit(bit, indio_dev->active_scan_mask,
- indio_dev->masklength) {
+ iio_for_each_active_channel(indio_dev, bit) {
ret = stk8ba50_read_accel(data,
stk8ba50_channel_table[bit]);
if (ret < 0)
diff --git a/drivers/iio/adc/Kconfig b/drivers/iio/adc/Kconfig
index f60fe85a30d5..97ece1a4b7e3 100644
--- a/drivers/iio/adc/Kconfig
+++ b/drivers/iio/adc/Kconfig
@@ -21,6 +21,18 @@ config AD_SIGMA_DELTA
select IIO_BUFFER
select IIO_TRIGGERED_BUFFER
+config AD4000
+ tristate "Analog Devices AD4000 ADC Driver"
+ depends on SPI
+ select IIO_BUFFER
+ select IIO_TRIGGERED_BUFFER
+ help
+ Say yes here to build support for Analog Devices AD4000 high speed
+ SPI analog to digital converters (ADC).
+
+ To compile this driver as a module, choose M here: the module will be
+ called ad4000.
+
config AD4130
tristate "Analog Device AD4130 ADC Driver"
depends on SPI
@@ -36,6 +48,17 @@ config AD4130
To compile this driver as a module, choose M here: the module will be
called ad4130.
+config AD4695
+ tristate "Analog Device AD4695 ADC Driver"
+ depends on SPI
+ select REGMAP_SPI
+ help
+ Say yes here to build support for Analog Devices AD4695 and similar
+ analog to digital converters (ADC).
+
+ To compile this driver as a module, choose M here: the module will be
+ called ad4695.
+
config AD7091R
tristate
@@ -991,6 +1014,19 @@ config NPCM_ADC
This driver can also be built as a module. If so, the module
will be called npcm_adc.
+config PAC1921
+ tristate "Microchip Technology PAC1921 driver"
+ depends on I2C
+ select REGMAP_I2C
+ select IIO_BUFFER
+ select IIO_TRIGGERED_BUFFER
+ help
+ Say yes here to build support for Microchip Technology's PAC1921
+ High-Side Power/Current Monitor with Analog Output.
+
+ This driver can also be built as a module. If so, the module
+ will be called pac1921.
+
config PAC1934
tristate "Microchip Technology PAC1934 driver"
depends on I2C
@@ -1156,6 +1192,16 @@ config SC27XX_ADC
This driver can also be built as a module. If so, the module
will be called sc27xx_adc.
+config SOPHGO_CV1800B_ADC
+ tristate "Sophgo CV1800B SARADC"
+ depends on ARCH_SOPHGO || COMPILE_TEST
+ help
+ Say yes here to build support for the SARADC integrated inside
+ the Sophgo CV1800B SoC.
+
+ This driver can also be built as a module. If so, the module
+ will be called sophgo_cv1800b_adc.
+
config SPEAR_ADC
tristate "ST SPEAr ADC"
depends on PLAT_SPEAR || COMPILE_TEST
@@ -1171,6 +1217,7 @@ config SD_ADC_MODULATOR
tristate "Generic sigma delta modulator"
select IIO_BUFFER
select IIO_TRIGGERED_BUFFER
+ select IIO_BACKEND
help
Select this option to enables sigma delta modulator. This driver can
support generic sigma delta modulators.
@@ -1225,6 +1272,7 @@ config STM32_DFSDM_ADC
select IIO_BUFFER
select IIO_BUFFER_HW_CONSUMER
select IIO_TRIGGERED_BUFFER
+ select IIO_BACKEND
help
Select this option to support ADCSigma delta modulator for
STMicroelectronics STM32 digital filter for sigma delta converter.
diff --git a/drivers/iio/adc/Makefile b/drivers/iio/adc/Makefile
index d370e066544e..7b91cd98c0e0 100644
--- a/drivers/iio/adc/Makefile
+++ b/drivers/iio/adc/Makefile
@@ -6,7 +6,9 @@
# When adding new entries keep the list in alphabetical order
obj-$(CONFIG_AB8500_GPADC) += ab8500-gpadc.o
obj-$(CONFIG_AD_SIGMA_DELTA) += ad_sigma_delta.o
+obj-$(CONFIG_AD4000) += ad4000.o
obj-$(CONFIG_AD4130) += ad4130.o
+obj-$(CONFIG_AD4695) += ad4695.o
obj-$(CONFIG_AD7091R) += ad7091r-base.o
obj-$(CONFIG_AD7091R5) += ad7091r5.o
obj-$(CONFIG_AD7091R8) += ad7091r8.o
@@ -90,6 +92,7 @@ obj-$(CONFIG_MP2629_ADC) += mp2629_adc.o
obj-$(CONFIG_MXS_LRADC_ADC) += mxs-lradc-adc.o
obj-$(CONFIG_NAU7802) += nau7802.o
obj-$(CONFIG_NPCM_ADC) += npcm_adc.o
+obj-$(CONFIG_PAC1921) += pac1921.o
obj-$(CONFIG_PAC1934) += pac1934.o
obj-$(CONFIG_PALMAS_GPADC) += palmas_gpadc.o
obj-$(CONFIG_QCOM_PM8XXX_XOADC) += qcom-pm8xxx-xoadc.o
@@ -105,6 +108,7 @@ obj-$(CONFIG_ROCKCHIP_SARADC) += rockchip_saradc.o
obj-$(CONFIG_RZG2L_ADC) += rzg2l_adc.o
obj-$(CONFIG_SC27XX_ADC) += sc27xx_adc.o
obj-$(CONFIG_SD_ADC_MODULATOR) += sd_adc_modulator.o
+obj-$(CONFIG_SOPHGO_CV1800B_ADC) += sophgo-cv1800b-adc.o
obj-$(CONFIG_SPEAR_ADC) += spear_adc.o
obj-$(CONFIG_STM32_ADC_CORE) += stm32-adc-core.o
obj-$(CONFIG_STM32_ADC) += stm32-adc.o
diff --git a/drivers/iio/adc/ad4000.c b/drivers/iio/adc/ad4000.c
new file mode 100644
index 000000000000..6ea491245084
--- /dev/null
+++ b/drivers/iio/adc/ad4000.c
@@ -0,0 +1,722 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * AD4000 SPI ADC driver
+ *
+ * Copyright 2024 Analog Devices Inc.
+ */
+#include <linux/bits.h>
+#include <linux/bitfield.h>
+#include <linux/byteorder/generic.h>
+#include <linux/cleanup.h>
+#include <linux/device.h>
+#include <linux/err.h>
+#include <linux/math.h>
+#include <linux/module.h>
+#include <linux/mod_devicetable.h>
+#include <linux/gpio/consumer.h>
+#include <linux/regulator/consumer.h>
+#include <linux/spi/spi.h>
+#include <linux/units.h>
+#include <linux/util_macros.h>
+#include <linux/iio/iio.h>
+
+#include <linux/iio/buffer.h>
+#include <linux/iio/triggered_buffer.h>
+#include <linux/iio/trigger_consumer.h>
+
+#define AD4000_READ_COMMAND 0x54
+#define AD4000_WRITE_COMMAND 0x14
+
+#define AD4000_CONFIG_REG_DEFAULT 0xE1
+
+/* AD4000 Configuration Register programmable bits */
+#define AD4000_CFG_SPAN_COMP BIT(3) /* Input span compression */
+#define AD4000_CFG_HIGHZ BIT(2) /* High impedance mode */
+
+#define AD4000_SCALE_OPTIONS 2
+
+#define AD4000_TQUIET1_NS 190
+#define AD4000_TQUIET2_NS 60
+#define AD4000_TCONV_NS 320
+
+#define __AD4000_DIFF_CHANNEL(_sign, _real_bits, _storage_bits, _reg_access) \
+{ \
+ .type = IIO_VOLTAGE, \
+ .indexed = 1, \
+ .differential = 1, \
+ .channel = 0, \
+ .channel2 = 1, \
+ .info_mask_separate = BIT(IIO_CHAN_INFO_RAW) | \
+ BIT(IIO_CHAN_INFO_SCALE), \
+ .info_mask_separate_available = _reg_access ? BIT(IIO_CHAN_INFO_SCALE) : 0,\
+ .scan_type = { \
+ .sign = _sign, \
+ .realbits = _real_bits, \
+ .storagebits = _storage_bits, \
+ .shift = _storage_bits - _real_bits, \
+ .endianness = IIO_BE, \
+ }, \
+}
+
+#define AD4000_DIFF_CHANNEL(_sign, _real_bits, _reg_access) \
+ __AD4000_DIFF_CHANNEL((_sign), (_real_bits), \
+ ((_real_bits) > 16 ? 32 : 16), (_reg_access))
+
+#define __AD4000_PSEUDO_DIFF_CHANNEL(_sign, _real_bits, _storage_bits, _reg_access)\
+{ \
+ .type = IIO_VOLTAGE, \
+ .indexed = 1, \
+ .channel = 0, \
+ .info_mask_separate = BIT(IIO_CHAN_INFO_RAW) | \
+ BIT(IIO_CHAN_INFO_SCALE) | \
+ BIT(IIO_CHAN_INFO_OFFSET), \
+ .info_mask_separate_available = _reg_access ? BIT(IIO_CHAN_INFO_SCALE) : 0,\
+ .scan_type = { \
+ .sign = _sign, \
+ .realbits = _real_bits, \
+ .storagebits = _storage_bits, \
+ .shift = _storage_bits - _real_bits, \
+ .endianness = IIO_BE, \
+ }, \
+}
+
+#define AD4000_PSEUDO_DIFF_CHANNEL(_sign, _real_bits, _reg_access) \
+ __AD4000_PSEUDO_DIFF_CHANNEL((_sign), (_real_bits), \
+ ((_real_bits) > 16 ? 32 : 16), (_reg_access))
+
+static const char * const ad4000_power_supplies[] = {
+ "vdd", "vio"
+};
+
+enum ad4000_sdi {
+ AD4000_SDI_MOSI,
+ AD4000_SDI_VIO,
+ AD4000_SDI_CS,
+ AD4000_SDI_GND,
+};
+
+/* maps adi,sdi-pin property value to enum */
+static const char * const ad4000_sdi_pin[] = {
+ [AD4000_SDI_MOSI] = "sdi",
+ [AD4000_SDI_VIO] = "high",
+ [AD4000_SDI_CS] = "cs",
+ [AD4000_SDI_GND] = "low",
+};
+
+/* Gains stored as fractions of 1000 so they can be expressed by integers. */
+static const int ad4000_gains[] = {
+ 454, 909, 1000, 1900,
+};
+
+struct ad4000_chip_info {
+ const char *dev_name;
+ struct iio_chan_spec chan_spec;
+ struct iio_chan_spec reg_access_chan_spec;
+ bool has_hardware_gain;
+};
+
+static const struct ad4000_chip_info ad4000_chip_info = {
+ .dev_name = "ad4000",
+ .chan_spec = AD4000_PSEUDO_DIFF_CHANNEL('u', 16, 0),
+ .reg_access_chan_spec = AD4000_PSEUDO_DIFF_CHANNEL('u', 16, 1),
+};
+
+static const struct ad4000_chip_info ad4001_chip_info = {
+ .dev_name = "ad4001",
+ .chan_spec = AD4000_DIFF_CHANNEL('s', 16, 0),
+ .reg_access_chan_spec = AD4000_DIFF_CHANNEL('s', 16, 1),
+};
+
+static const struct ad4000_chip_info ad4002_chip_info = {
+ .dev_name = "ad4002",
+ .chan_spec = AD4000_PSEUDO_DIFF_CHANNEL('u', 18, 0),
+ .reg_access_chan_spec = AD4000_PSEUDO_DIFF_CHANNEL('u', 18, 1),
+};
+
+static const struct ad4000_chip_info ad4003_chip_info = {
+ .dev_name = "ad4003",
+ .chan_spec = AD4000_DIFF_CHANNEL('s', 18, 0),
+ .reg_access_chan_spec = AD4000_DIFF_CHANNEL('s', 18, 1),
+};
+
+static const struct ad4000_chip_info ad4004_chip_info = {
+ .dev_name = "ad4004",
+ .chan_spec = AD4000_PSEUDO_DIFF_CHANNEL('u', 16, 0),
+ .reg_access_chan_spec = AD4000_PSEUDO_DIFF_CHANNEL('u', 16, 1),
+};
+
+static const struct ad4000_chip_info ad4005_chip_info = {
+ .dev_name = "ad4005",
+ .chan_spec = AD4000_DIFF_CHANNEL('s', 16, 0),
+ .reg_access_chan_spec = AD4000_DIFF_CHANNEL('s', 16, 1),
+};
+
+static const struct ad4000_chip_info ad4006_chip_info = {
+ .dev_name = "ad4006",
+ .chan_spec = AD4000_PSEUDO_DIFF_CHANNEL('u', 18, 0),
+ .reg_access_chan_spec = AD4000_PSEUDO_DIFF_CHANNEL('u', 18, 1),
+};
+
+static const struct ad4000_chip_info ad4007_chip_info = {
+ .dev_name = "ad4007",
+ .chan_spec = AD4000_DIFF_CHANNEL('s', 18, 0),
+ .reg_access_chan_spec = AD4000_DIFF_CHANNEL('s', 18, 1),
+};
+
+static const struct ad4000_chip_info ad4008_chip_info = {
+ .dev_name = "ad4008",
+ .chan_spec = AD4000_PSEUDO_DIFF_CHANNEL('u', 16, 0),
+ .reg_access_chan_spec = AD4000_PSEUDO_DIFF_CHANNEL('u', 16, 1),
+};
+
+static const struct ad4000_chip_info ad4010_chip_info = {
+ .dev_name = "ad4010",
+ .chan_spec = AD4000_PSEUDO_DIFF_CHANNEL('u', 18, 0),
+ .reg_access_chan_spec = AD4000_PSEUDO_DIFF_CHANNEL('u', 18, 1),
+};
+
+static const struct ad4000_chip_info ad4011_chip_info = {
+ .dev_name = "ad4011",
+ .chan_spec = AD4000_DIFF_CHANNEL('s', 18, 0),
+ .reg_access_chan_spec = AD4000_DIFF_CHANNEL('s', 18, 1),
+};
+
+static const struct ad4000_chip_info ad4020_chip_info = {
+ .dev_name = "ad4020",
+ .chan_spec = AD4000_DIFF_CHANNEL('s', 20, 0),
+ .reg_access_chan_spec = AD4000_DIFF_CHANNEL('s', 20, 1),
+};
+
+static const struct ad4000_chip_info ad4021_chip_info = {
+ .dev_name = "ad4021",
+ .chan_spec = AD4000_DIFF_CHANNEL('s', 20, 0),
+ .reg_access_chan_spec = AD4000_DIFF_CHANNEL('s', 20, 1),
+};
+
+static const struct ad4000_chip_info ad4022_chip_info = {
+ .dev_name = "ad4022",
+ .chan_spec = AD4000_DIFF_CHANNEL('s', 20, 0),
+ .reg_access_chan_spec = AD4000_DIFF_CHANNEL('s', 20, 1),
+};
+
+static const struct ad4000_chip_info adaq4001_chip_info = {
+ .dev_name = "adaq4001",
+ .chan_spec = AD4000_DIFF_CHANNEL('s', 16, 0),
+ .reg_access_chan_spec = AD4000_DIFF_CHANNEL('s', 16, 1),
+ .has_hardware_gain = true,
+};
+
+static const struct ad4000_chip_info adaq4003_chip_info = {
+ .dev_name = "adaq4003",
+ .chan_spec = AD4000_DIFF_CHANNEL('s', 18, 0),
+ .reg_access_chan_spec = AD4000_DIFF_CHANNEL('s', 18, 1),
+ .has_hardware_gain = true,
+};
+
+struct ad4000_state {
+ struct spi_device *spi;
+ struct gpio_desc *cnv_gpio;
+ struct spi_transfer xfers[2];
+ struct spi_message msg;
+ struct mutex lock; /* Protect read modify write cycle */
+ int vref_mv;
+ enum ad4000_sdi sdi_pin;
+ bool span_comp;
+ u16 gain_milli;
+ int scale_tbl[AD4000_SCALE_OPTIONS][2];
+
+ /*
+ * DMA (thus cache coherency maintenance) requires the transfer buffers
+ * to live in their own cache lines.
+ */
+ struct {
+ union {
+ __be16 sample_buf16;
+ __be32 sample_buf32;
+ } data;
+ s64 timestamp __aligned(8);
+ } scan __aligned(IIO_DMA_MINALIGN);
+ u8 tx_buf[2];
+ u8 rx_buf[2];
+};
+
+static void ad4000_fill_scale_tbl(struct ad4000_state *st,
+ struct iio_chan_spec const *chan)
+{
+ int val, tmp0, tmp1;
+ int scale_bits;
+ u64 tmp2;
+
+ /*
+ * ADCs that output two's complement code have one less bit to express
+ * voltage magnitude.
+ */
+ if (chan->scan_type.sign == 's')
+ scale_bits = chan->scan_type.realbits - 1;
+ else
+ scale_bits = chan->scan_type.realbits;
+
+ /*
+ * The gain is stored as a fraction of 1000 and, as we need to
+ * divide vref_mv by the gain, we invert the gain/1000 fraction.
+ * Also multiply by an extra MILLI to preserve precision.
+ * Thus, we have MILLI * MILLI equals MICRO as fraction numerator.
+ */
+ val = mult_frac(st->vref_mv, MICRO, st->gain_milli);
+
+ /* Would multiply by NANO here but we multiplied by extra MILLI */
+ tmp2 = shift_right((u64)val * MICRO, scale_bits);
+ tmp0 = div_s64_rem(tmp2, NANO, &tmp1);
+
+ /* Store scale for when span compression is disabled */
+ st->scale_tbl[0][0] = tmp0; /* Integer part */
+ st->scale_tbl[0][1] = abs(tmp1); /* Fractional part */
+
+ /* Store scale for when span compression is enabled */
+ st->scale_tbl[1][0] = tmp0;
+
+ /* The integer part is always zero so don't bother to divide it. */
+ if (chan->differential)
+ st->scale_tbl[1][1] = DIV_ROUND_CLOSEST(abs(tmp1) * 4, 5);
+ else
+ st->scale_tbl[1][1] = DIV_ROUND_CLOSEST(abs(tmp1) * 9, 10);
+}
+
+static int ad4000_write_reg(struct ad4000_state *st, uint8_t val)
+{
+ st->tx_buf[0] = AD4000_WRITE_COMMAND;
+ st->tx_buf[1] = val;
+ return spi_write(st->spi, st->tx_buf, ARRAY_SIZE(st->tx_buf));
+}
+
+static int ad4000_read_reg(struct ad4000_state *st, unsigned int *val)
+{
+ struct spi_transfer t = {
+ .tx_buf = st->tx_buf,
+ .rx_buf = st->rx_buf,
+ .len = 2,
+ };
+ int ret;
+
+ st->tx_buf[0] = AD4000_READ_COMMAND;
+ ret = spi_sync_transfer(st->spi, &t, 1);
+ if (ret < 0)
+ return ret;
+
+ *val = st->rx_buf[1];
+ return ret;
+}
+
+static int ad4000_convert_and_acquire(struct ad4000_state *st)
+{
+ int ret;
+
+ /*
+ * In 4-wire mode, the CNV line is held high for the entire conversion
+ * and acquisition process. In other modes, the CNV GPIO is optional
+ * and, if provided, replaces controller CS. If CNV GPIO is not defined
+ * gpiod_set_value_cansleep() has no effect.
+ */
+ gpiod_set_value_cansleep(st->cnv_gpio, 1);
+ ret = spi_sync(st->spi, &st->msg);
+ gpiod_set_value_cansleep(st->cnv_gpio, 0);
+
+ return ret;
+}
+
+static int ad4000_single_conversion(struct iio_dev *indio_dev,
+ const struct iio_chan_spec *chan, int *val)
+{
+ struct ad4000_state *st = iio_priv(indio_dev);
+ u32 sample;
+ int ret;
+
+ ret = ad4000_convert_and_acquire(st);
+ if (ret < 0)
+ return ret;
+
+ if (chan->scan_type.storagebits > 16)
+ sample = be32_to_cpu(st->scan.data.sample_buf32);
+ else
+ sample = be16_to_cpu(st->scan.data.sample_buf16);
+
+ sample >>= chan->scan_type.shift;
+
+ if (chan->scan_type.sign == 's')
+ *val = sign_extend32(sample, chan->scan_type.realbits - 1);
+
+ return IIO_VAL_INT;
+}
+
+static int ad4000_read_raw(struct iio_dev *indio_dev,
+ struct iio_chan_spec const *chan, int *val,
+ int *val2, long info)
+{
+ struct ad4000_state *st = iio_priv(indio_dev);
+
+ switch (info) {
+ case IIO_CHAN_INFO_RAW:
+ iio_device_claim_direct_scoped(return -EBUSY, indio_dev)
+ return ad4000_single_conversion(indio_dev, chan, val);
+ unreachable();
+ case IIO_CHAN_INFO_SCALE:
+ *val = st->scale_tbl[st->span_comp][0];
+ *val2 = st->scale_tbl[st->span_comp][1];
+ return IIO_VAL_INT_PLUS_NANO;
+ case IIO_CHAN_INFO_OFFSET:
+ *val = 0;
+ if (st->span_comp)
+ *val = mult_frac(st->vref_mv, 1, 10);
+
+ return IIO_VAL_INT;
+ default:
+ return -EINVAL;
+ }
+}
+
+static int ad4000_read_avail(struct iio_dev *indio_dev,
+ struct iio_chan_spec const *chan,
+ const int **vals, int *type, int *length,
+ long info)
+{
+ struct ad4000_state *st = iio_priv(indio_dev);
+
+ switch (info) {
+ case IIO_CHAN_INFO_SCALE:
+ *vals = (int *)st->scale_tbl;
+ *length = AD4000_SCALE_OPTIONS * 2;
+ *type = IIO_VAL_INT_PLUS_NANO;
+ return IIO_AVAIL_LIST;
+ default:
+ return -EINVAL;
+ }
+}
+
+static int ad4000_write_raw_get_fmt(struct iio_dev *indio_dev,
+ struct iio_chan_spec const *chan, long mask)
+{
+ switch (mask) {
+ case IIO_CHAN_INFO_SCALE:
+ return IIO_VAL_INT_PLUS_NANO;
+ default:
+ return IIO_VAL_INT_PLUS_MICRO;
+ }
+}
+
+static int ad4000_write_raw(struct iio_dev *indio_dev,
+ struct iio_chan_spec const *chan, int val, int val2,
+ long mask)
+{
+ struct ad4000_state *st = iio_priv(indio_dev);
+ unsigned int reg_val;
+ bool span_comp_en;
+ int ret;
+
+ switch (mask) {
+ case IIO_CHAN_INFO_SCALE:
+ iio_device_claim_direct_scoped(return -EBUSY, indio_dev) {
+ guard(mutex)(&st->lock);
+
+ ret = ad4000_read_reg(st, &reg_val);
+ if (ret < 0)
+ return ret;
+
+ span_comp_en = val2 == st->scale_tbl[1][1];
+ reg_val &= ~AD4000_CFG_SPAN_COMP;
+ reg_val |= FIELD_PREP(AD4000_CFG_SPAN_COMP, span_comp_en);
+
+ ret = ad4000_write_reg(st, reg_val);
+ if (ret < 0)
+ return ret;
+
+ st->span_comp = span_comp_en;
+ return 0;
+ }
+ unreachable();
+ default:
+ return -EINVAL;
+ }
+}
+
+static irqreturn_t ad4000_trigger_handler(int irq, void *p)
+{
+ struct iio_poll_func *pf = p;
+ struct iio_dev *indio_dev = pf->indio_dev;
+ struct ad4000_state *st = iio_priv(indio_dev);
+ int ret;
+
+ ret = ad4000_convert_and_acquire(st);
+ if (ret < 0)
+ goto err_out;
+
+ iio_push_to_buffers_with_timestamp(indio_dev, &st->scan, pf->timestamp);
+
+err_out:
+ iio_trigger_notify_done(indio_dev->trig);
+ return IRQ_HANDLED;
+}
+
+static const struct iio_info ad4000_reg_access_info = {
+ .read_raw = &ad4000_read_raw,
+ .read_avail = &ad4000_read_avail,
+ .write_raw = &ad4000_write_raw,
+ .write_raw_get_fmt = &ad4000_write_raw_get_fmt,
+};
+
+static const struct iio_info ad4000_info = {
+ .read_raw = &ad4000_read_raw,
+};
+
+/*
+ * This executes a data sample transfer for when the device connections are
+ * in "3-wire" mode, selected when the adi,sdi-pin device tree property is
+ * absent or set to "high". In this connection mode, the ADC SDI pin is
+ * connected to MOSI or to VIO and ADC CNV pin is connected either to a SPI
+ * controller CS or to a GPIO.
+ * AD4000 series of devices initiate conversions on the rising edge of CNV pin.
+ *
+ * If the CNV pin is connected to an SPI controller CS line (which is by default
+ * active low), the ADC readings would have a latency (delay) of one read.
+ * Moreover, since we also do ADC sampling for filling the buffer on triggered
+ * buffer mode, the timestamps of buffer readings would be disarranged.
+ * To prevent the read latency and reduce the time discrepancy between the
+ * sample read request and the time of actual sampling by the ADC, do a
+ * preparatory transfer to pulse the CS/CNV line.
+ */
+static int ad4000_prepare_3wire_mode_message(struct ad4000_state *st,
+ const struct iio_chan_spec *chan)
+{
+ unsigned int cnv_pulse_time = AD4000_TCONV_NS;
+ struct spi_transfer *xfers = st->xfers;
+
+ xfers[0].cs_change = 1;
+ xfers[0].cs_change_delay.value = cnv_pulse_time;
+ xfers[0].cs_change_delay.unit = SPI_DELAY_UNIT_NSECS;
+
+ xfers[1].rx_buf = &st->scan.data;
+ xfers[1].len = BITS_TO_BYTES(chan->scan_type.storagebits);
+ xfers[1].delay.value = AD4000_TQUIET2_NS;
+ xfers[1].delay.unit = SPI_DELAY_UNIT_NSECS;
+
+ spi_message_init_with_transfers(&st->msg, st->xfers, 2);
+
+ return devm_spi_optimize_message(&st->spi->dev, st->spi, &st->msg);
+}
+
+/*
+ * This executes a data sample transfer for when the device connections are
+ * in "4-wire" mode, selected when the adi,sdi-pin device tree property is
+ * set to "cs". In this connection mode, the controller CS pin is connected to
+ * ADC SDI pin and a GPIO is connected to ADC CNV pin.
+ * The GPIO connected to ADC CNV pin is set outside of the SPI transfer.
+ */
+static int ad4000_prepare_4wire_mode_message(struct ad4000_state *st,
+ const struct iio_chan_spec *chan)
+{
+ unsigned int cnv_to_sdi_time = AD4000_TCONV_NS;
+ struct spi_transfer *xfers = st->xfers;
+
+ /*
+ * Dummy transfer to cause enough delay between CNV going high and SDI
+ * going low.
+ */
+ xfers[0].cs_off = 1;
+ xfers[0].delay.value = cnv_to_sdi_time;
+ xfers[0].delay.unit = SPI_DELAY_UNIT_NSECS;
+
+ xfers[1].rx_buf = &st->scan.data;
+ xfers[1].len = BITS_TO_BYTES(chan->scan_type.storagebits);
+
+ spi_message_init_with_transfers(&st->msg, st->xfers, 2);
+
+ return devm_spi_optimize_message(&st->spi->dev, st->spi, &st->msg);
+}
+
+static int ad4000_config(struct ad4000_state *st)
+{
+ unsigned int reg_val = AD4000_CONFIG_REG_DEFAULT;
+
+ if (device_property_present(&st->spi->dev, "adi,high-z-input"))
+ reg_val |= FIELD_PREP(AD4000_CFG_HIGHZ, 1);
+
+ return ad4000_write_reg(st, reg_val);
+}
+
+static int ad4000_probe(struct spi_device *spi)
+{
+ const struct ad4000_chip_info *chip;
+ struct device *dev = &spi->dev;
+ struct iio_dev *indio_dev;
+ struct ad4000_state *st;
+ int gain_idx, ret;
+
+ indio_dev = devm_iio_device_alloc(dev, sizeof(*st));
+ if (!indio_dev)
+ return -ENOMEM;
+
+ chip = spi_get_device_match_data(spi);
+ if (!chip)
+ return -EINVAL;
+
+ st = iio_priv(indio_dev);
+ st->spi = spi;
+
+ ret = devm_regulator_bulk_get_enable(dev, ARRAY_SIZE(ad4000_power_supplies),
+ ad4000_power_supplies);
+ if (ret)
+ return dev_err_probe(dev, ret, "Failed to enable power supplies\n");
+
+ ret = devm_regulator_get_enable_read_voltage(dev, "ref");
+ if (ret < 0)
+ return dev_err_probe(dev, ret,
+ "Failed to get ref regulator reference\n");
+ st->vref_mv = ret / 1000;
+
+ st->cnv_gpio = devm_gpiod_get_optional(dev, "cnv", GPIOD_OUT_HIGH);
+ if (IS_ERR(st->cnv_gpio))
+ return dev_err_probe(dev, PTR_ERR(st->cnv_gpio),
+ "Failed to get CNV GPIO");
+
+ ret = device_property_match_property_string(dev, "adi,sdi-pin",
+ ad4000_sdi_pin,
+ ARRAY_SIZE(ad4000_sdi_pin));
+ if (ret < 0 && ret != -EINVAL)
+ return dev_err_probe(dev, ret,
+ "getting adi,sdi-pin property failed\n");
+
+ /* Default to usual SPI connections if pin properties are not present */
+ st->sdi_pin = ret == -EINVAL ? AD4000_SDI_MOSI : ret;
+ switch (st->sdi_pin) {
+ case AD4000_SDI_MOSI:
+ indio_dev->info = &ad4000_reg_access_info;
+ indio_dev->channels = &chip->reg_access_chan_spec;
+
+ /*
+ * In "3-wire mode", the ADC SDI line must be kept high when
+ * data is not being clocked out of the controller.
+ * Request the SPI controller to make MOSI idle high.
+ */
+ spi->mode |= SPI_MOSI_IDLE_HIGH;
+ ret = spi_setup(spi);
+ if (ret < 0)
+ return ret;
+
+ ret = ad4000_prepare_3wire_mode_message(st, indio_dev->channels);
+ if (ret)
+ return ret;
+
+ ret = ad4000_config(st);
+ if (ret < 0)
+ return dev_err_probe(dev, ret, "Failed to config device\n");
+
+ break;
+ case AD4000_SDI_VIO:
+ indio_dev->info = &ad4000_info;
+ indio_dev->channels = &chip->chan_spec;
+ ret = ad4000_prepare_3wire_mode_message(st, indio_dev->channels);
+ if (ret)
+ return ret;
+
+ break;
+ case AD4000_SDI_CS:
+ indio_dev->info = &ad4000_info;
+ indio_dev->channels = &chip->chan_spec;
+ ret = ad4000_prepare_4wire_mode_message(st, indio_dev->channels);
+ if (ret)
+ return ret;
+
+ break;
+ case AD4000_SDI_GND:
+ return dev_err_probe(dev, -EPROTONOSUPPORT,
+ "Unsupported connection mode\n");
+
+ default:
+ return dev_err_probe(dev, -EINVAL, "Unrecognized connection mode\n");
+ }
+
+ indio_dev->name = chip->dev_name;
+ indio_dev->num_channels = 1;
+
+ devm_mutex_init(dev, &st->lock);
+
+ st->gain_milli = 1000;
+ if (chip->has_hardware_gain) {
+ ret = device_property_read_u16(dev, "adi,gain-milli",
+ &st->gain_milli);
+ if (!ret) {
+ /* Match gain value from dt to one of supported gains */
+ gain_idx = find_closest(st->gain_milli, ad4000_gains,
+ ARRAY_SIZE(ad4000_gains));
+ st->gain_milli = ad4000_gains[gain_idx];
+ } else {
+ return dev_err_probe(dev, ret,
+ "Failed to read gain property\n");
+ }
+ }
+
+ ad4000_fill_scale_tbl(st, indio_dev->channels);
+
+ ret = devm_iio_triggered_buffer_setup(dev, indio_dev,
+ &iio_pollfunc_store_time,
+ &ad4000_trigger_handler, NULL);
+ if (ret)
+ return ret;
+
+ return devm_iio_device_register(dev, indio_dev);
+}
+
+static const struct spi_device_id ad4000_id[] = {
+ { "ad4000", (kernel_ulong_t)&ad4000_chip_info },
+ { "ad4001", (kernel_ulong_t)&ad4001_chip_info },
+ { "ad4002", (kernel_ulong_t)&ad4002_chip_info },
+ { "ad4003", (kernel_ulong_t)&ad4003_chip_info },
+ { "ad4004", (kernel_ulong_t)&ad4004_chip_info },
+ { "ad4005", (kernel_ulong_t)&ad4005_chip_info },
+ { "ad4006", (kernel_ulong_t)&ad4006_chip_info },
+ { "ad4007", (kernel_ulong_t)&ad4007_chip_info },
+ { "ad4008", (kernel_ulong_t)&ad4008_chip_info },
+ { "ad4010", (kernel_ulong_t)&ad4010_chip_info },
+ { "ad4011", (kernel_ulong_t)&ad4011_chip_info },
+ { "ad4020", (kernel_ulong_t)&ad4020_chip_info },
+ { "ad4021", (kernel_ulong_t)&ad4021_chip_info },
+ { "ad4022", (kernel_ulong_t)&ad4022_chip_info },
+ { "adaq4001", (kernel_ulong_t)&adaq4001_chip_info },
+ { "adaq4003", (kernel_ulong_t)&adaq4003_chip_info },
+ { }
+};
+MODULE_DEVICE_TABLE(spi, ad4000_id);
+
+static const struct of_device_id ad4000_of_match[] = {
+ { .compatible = "adi,ad4000", .data = &ad4000_chip_info },
+ { .compatible = "adi,ad4001", .data = &ad4001_chip_info },
+ { .compatible = "adi,ad4002", .data = &ad4002_chip_info },
+ { .compatible = "adi,ad4003", .data = &ad4003_chip_info },
+ { .compatible = "adi,ad4004", .data = &ad4004_chip_info },
+ { .compatible = "adi,ad4005", .data = &ad4005_chip_info },
+ { .compatible = "adi,ad4006", .data = &ad4006_chip_info },
+ { .compatible = "adi,ad4007", .data = &ad4007_chip_info },
+ { .compatible = "adi,ad4008", .data = &ad4008_chip_info },
+ { .compatible = "adi,ad4010", .data = &ad4010_chip_info },
+ { .compatible = "adi,ad4011", .data = &ad4011_chip_info },
+ { .compatible = "adi,ad4020", .data = &ad4020_chip_info },
+ { .compatible = "adi,ad4021", .data = &ad4021_chip_info },
+ { .compatible = "adi,ad4022", .data = &ad4022_chip_info },
+ { .compatible = "adi,adaq4001", .data = &adaq4001_chip_info },
+ { .compatible = "adi,adaq4003", .data = &adaq4003_chip_info },
+ { }
+};
+MODULE_DEVICE_TABLE(of, ad4000_of_match);
+
+static struct spi_driver ad4000_driver = {
+ .driver = {
+ .name = "ad4000",
+ .of_match_table = ad4000_of_match,
+ },
+ .probe = ad4000_probe,
+ .id_table = ad4000_id,
+};
+module_spi_driver(ad4000_driver);
+
+MODULE_AUTHOR("Marcelo Schmitt <marcelo.schmitt@analog.com>");
+MODULE_DESCRIPTION("Analog Devices AD4000 ADC driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/iio/adc/ad4695.c b/drivers/iio/adc/ad4695.c
new file mode 100644
index 000000000000..595ec4158e73
--- /dev/null
+++ b/drivers/iio/adc/ad4695.c
@@ -0,0 +1,1185 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * SPI ADC driver for Analog Devices Inc. AD4695 and similar chips
+ *
+ * https://www.analog.com/en/products/ad4695.html
+ * https://www.analog.com/en/products/ad4696.html
+ * https://www.analog.com/en/products/ad4697.html
+ * https://www.analog.com/en/products/ad4698.html
+ *
+ * Copyright 2024 Analog Devices Inc.
+ * Copyright 2024 BayLibre, SAS
+ */
+
+#include <linux/align.h>
+#include <linux/bitfield.h>
+#include <linux/bits.h>
+#include <linux/compiler.h>
+#include <linux/delay.h>
+#include <linux/device.h>
+#include <linux/err.h>
+#include <linux/gpio/consumer.h>
+#include <linux/iio/buffer.h>
+#include <linux/iio/iio.h>
+#include <linux/iio/triggered_buffer.h>
+#include <linux/iio/trigger_consumer.h>
+#include <linux/minmax.h>
+#include <linux/property.h>
+#include <linux/regmap.h>
+#include <linux/regulator/consumer.h>
+#include <linux/spi/spi.h>
+#include <linux/units.h>
+
+#include <dt-bindings/iio/adi,ad4695.h>
+
+/* AD4695 registers */
+#define AD4695_REG_SPI_CONFIG_A 0x0000
+#define AD4695_REG_SPI_CONFIG_A_SW_RST (BIT(7) | BIT(0))
+#define AD4695_REG_SPI_CONFIG_A_ADDR_DIR BIT(5)
+#define AD4695_REG_SPI_CONFIG_B 0x0001
+#define AD4695_REG_SPI_CONFIG_B_INST_MODE BIT(7)
+#define AD4695_REG_DEVICE_TYPE 0x0003
+#define AD4695_REG_SCRATCH_PAD 0x000A
+#define AD4695_REG_VENDOR_L 0x000C
+#define AD4695_REG_VENDOR_H 0x000D
+#define AD4695_REG_LOOP_MODE 0x000E
+#define AD4695_REG_SPI_CONFIG_C 0x0010
+#define AD4695_REG_SPI_CONFIG_C_MB_STRICT BIT(7)
+#define AD4695_REG_SPI_STATUS 0x0011
+#define AD4695_REG_STATUS 0x0014
+#define AD4695_REG_ALERT_STATUS1 0x0015
+#define AD4695_REG_ALERT_STATUS2 0x0016
+#define AD4695_REG_CLAMP_STATUS 0x001A
+#define AD4695_REG_SETUP 0x0020
+#define AD4695_REG_SETUP_LDO_EN BIT(4)
+#define AD4695_REG_SETUP_SPI_MODE BIT(2)
+#define AD4695_REG_SETUP_SPI_CYC_CTRL BIT(1)
+#define AD4695_REG_REF_CTRL 0x0021
+#define AD4695_REG_REF_CTRL_OV_MODE BIT(7)
+#define AD4695_REG_REF_CTRL_VREF_SET GENMASK(4, 2)
+#define AD4695_REG_REF_CTRL_REFHIZ_EN BIT(1)
+#define AD4695_REG_REF_CTRL_REFBUF_EN BIT(0)
+#define AD4695_REG_SEQ_CTRL 0x0022
+#define AD4695_REG_SEQ_CTRL_STD_SEQ_EN BIT(7)
+#define AD4695_REG_SEQ_CTRL_NUM_SLOTS_AS GENMASK(6, 0)
+#define AD4695_REG_AC_CTRL 0x0023
+#define AD4695_REG_STD_SEQ_CONFIG 0x0024
+#define AD4695_REG_GPIO_CTRL 0x0026
+#define AD4695_REG_GP_MODE 0x0027
+#define AD4695_REG_TEMP_CTRL 0x0029
+#define AD4695_REG_TEMP_CTRL_TEMP_EN BIT(0)
+#define AD4695_REG_CONFIG_IN(n) (0x0030 | (n))
+#define AD4695_REG_CONFIG_IN_MODE BIT(6)
+#define AD4695_REG_CONFIG_IN_PAIR GENMASK(5, 4)
+#define AD4695_REG_CONFIG_IN_AINHIGHZ_EN BIT(3)
+#define AD4695_REG_UPPER_IN(n) (0x0040 | (2 * (n)))
+#define AD4695_REG_LOWER_IN(n) (0x0060 | (2 * (n)))
+#define AD4695_REG_HYST_IN(n) (0x0080 | (2 * (n)))
+#define AD4695_REG_OFFSET_IN(n) (0x00A0 | (2 * (n)))
+#define AD4695_REG_GAIN_IN(n) (0x00C0 | (2 * (n)))
+#define AD4695_REG_AS_SLOT(n) (0x0100 | (n))
+#define AD4695_REG_AS_SLOT_INX GENMASK(3, 0)
+
+/* Conversion mode commands */
+#define AD4695_CMD_EXIT_CNV_MODE 0x0A
+#define AD4695_CMD_TEMP_CHAN 0x0F
+#define AD4695_CMD_VOLTAGE_CHAN(n) (0x10 | (n))
+
+/* timing specs */
+#define AD4695_T_CONVERT_NS 415
+#define AD4695_T_WAKEUP_HW_MS 3
+#define AD4695_T_WAKEUP_SW_MS 3
+#define AD4695_T_REFBUF_MS 100
+#define AD4695_T_REGCONFIG_NS 20
+#define AD4695_REG_ACCESS_SCLK_HZ (10 * MEGA)
+
+/* Max number of voltage input channels. */
+#define AD4695_MAX_CHANNELS 16
+/* Max size of 1 raw sample in bytes. */
+#define AD4695_MAX_CHANNEL_SIZE 2
+
+enum ad4695_in_pair {
+ AD4695_IN_PAIR_REFGND,
+ AD4695_IN_PAIR_COM,
+ AD4695_IN_PAIR_EVEN_ODD,
+};
+
+struct ad4695_chip_info {
+ const char *name;
+ int max_sample_rate;
+ u32 t_acq_ns;
+ u8 num_voltage_inputs;
+};
+
+struct ad4695_channel_config {
+ unsigned int channel;
+ bool highz_en;
+ bool bipolar;
+ enum ad4695_in_pair pin_pairing;
+ unsigned int common_mode_mv;
+};
+
+struct ad4695_state {
+ struct spi_device *spi;
+ struct regmap *regmap;
+ struct regmap *regmap16;
+ struct gpio_desc *reset_gpio;
+ /* voltages channels plus temperature and timestamp */
+ struct iio_chan_spec iio_chan[AD4695_MAX_CHANNELS + 2];
+ struct ad4695_channel_config channels_cfg[AD4695_MAX_CHANNELS];
+ const struct ad4695_chip_info *chip_info;
+ /* Reference voltage. */
+ unsigned int vref_mv;
+ /* Common mode input pin voltage. */
+ unsigned int com_mv;
+ /* 1 per voltage and temperature chan plus 1 xfer to trigger 1st CNV */
+ struct spi_transfer buf_read_xfer[AD4695_MAX_CHANNELS + 2];
+ struct spi_message buf_read_msg;
+ /* Raw conversion data received. */
+ u8 buf[ALIGN((AD4695_MAX_CHANNELS + 2) * AD4695_MAX_CHANNEL_SIZE,
+ sizeof(s64)) + sizeof(s64)] __aligned(IIO_DMA_MINALIGN);
+ u16 raw_data;
+ /* Commands to send for single conversion. */
+ u16 cnv_cmd;
+ u8 cnv_cmd2;
+};
+
+static const struct regmap_range ad4695_regmap_rd_ranges[] = {
+ regmap_reg_range(AD4695_REG_SPI_CONFIG_A, AD4695_REG_SPI_CONFIG_B),
+ regmap_reg_range(AD4695_REG_DEVICE_TYPE, AD4695_REG_DEVICE_TYPE),
+ regmap_reg_range(AD4695_REG_SCRATCH_PAD, AD4695_REG_SCRATCH_PAD),
+ regmap_reg_range(AD4695_REG_VENDOR_L, AD4695_REG_LOOP_MODE),
+ regmap_reg_range(AD4695_REG_SPI_CONFIG_C, AD4695_REG_SPI_STATUS),
+ regmap_reg_range(AD4695_REG_STATUS, AD4695_REG_ALERT_STATUS2),
+ regmap_reg_range(AD4695_REG_CLAMP_STATUS, AD4695_REG_CLAMP_STATUS),
+ regmap_reg_range(AD4695_REG_SETUP, AD4695_REG_AC_CTRL),
+ regmap_reg_range(AD4695_REG_GPIO_CTRL, AD4695_REG_TEMP_CTRL),
+ regmap_reg_range(AD4695_REG_CONFIG_IN(0), AD4695_REG_CONFIG_IN(15)),
+ regmap_reg_range(AD4695_REG_AS_SLOT(0), AD4695_REG_AS_SLOT(127)),
+};
+
+static const struct regmap_access_table ad4695_regmap_rd_table = {
+ .yes_ranges = ad4695_regmap_rd_ranges,
+ .n_yes_ranges = ARRAY_SIZE(ad4695_regmap_rd_ranges),
+};
+
+static const struct regmap_range ad4695_regmap_wr_ranges[] = {
+ regmap_reg_range(AD4695_REG_SPI_CONFIG_A, AD4695_REG_SPI_CONFIG_B),
+ regmap_reg_range(AD4695_REG_SCRATCH_PAD, AD4695_REG_SCRATCH_PAD),
+ regmap_reg_range(AD4695_REG_LOOP_MODE, AD4695_REG_LOOP_MODE),
+ regmap_reg_range(AD4695_REG_SPI_CONFIG_C, AD4695_REG_SPI_STATUS),
+ regmap_reg_range(AD4695_REG_SETUP, AD4695_REG_AC_CTRL),
+ regmap_reg_range(AD4695_REG_GPIO_CTRL, AD4695_REG_TEMP_CTRL),
+ regmap_reg_range(AD4695_REG_CONFIG_IN(0), AD4695_REG_CONFIG_IN(15)),
+ regmap_reg_range(AD4695_REG_AS_SLOT(0), AD4695_REG_AS_SLOT(127)),
+};
+
+static const struct regmap_access_table ad4695_regmap_wr_table = {
+ .yes_ranges = ad4695_regmap_wr_ranges,
+ .n_yes_ranges = ARRAY_SIZE(ad4695_regmap_wr_ranges),
+};
+
+static const struct regmap_config ad4695_regmap_config = {
+ .name = "ad4695-8",
+ .reg_bits = 16,
+ .val_bits = 8,
+ .max_register = AD4695_REG_AS_SLOT(127),
+ .rd_table = &ad4695_regmap_rd_table,
+ .wr_table = &ad4695_regmap_wr_table,
+ .can_multi_write = true,
+};
+
+static const struct regmap_range ad4695_regmap16_rd_ranges[] = {
+ regmap_reg_range(AD4695_REG_STD_SEQ_CONFIG, AD4695_REG_STD_SEQ_CONFIG),
+ regmap_reg_range(AD4695_REG_UPPER_IN(0), AD4695_REG_GAIN_IN(15)),
+};
+
+static const struct regmap_access_table ad4695_regmap16_rd_table = {
+ .yes_ranges = ad4695_regmap16_rd_ranges,
+ .n_yes_ranges = ARRAY_SIZE(ad4695_regmap16_rd_ranges),
+};
+
+static const struct regmap_range ad4695_regmap16_wr_ranges[] = {
+ regmap_reg_range(AD4695_REG_STD_SEQ_CONFIG, AD4695_REG_STD_SEQ_CONFIG),
+ regmap_reg_range(AD4695_REG_UPPER_IN(0), AD4695_REG_GAIN_IN(15)),
+};
+
+static const struct regmap_access_table ad4695_regmap16_wr_table = {
+ .yes_ranges = ad4695_regmap16_wr_ranges,
+ .n_yes_ranges = ARRAY_SIZE(ad4695_regmap16_wr_ranges),
+};
+
+static const struct regmap_config ad4695_regmap16_config = {
+ .name = "ad4695-16",
+ .reg_bits = 16,
+ .reg_stride = 2,
+ .val_bits = 16,
+ .val_format_endian = REGMAP_ENDIAN_LITTLE,
+ .max_register = AD4695_REG_GAIN_IN(15),
+ .rd_table = &ad4695_regmap16_rd_table,
+ .wr_table = &ad4695_regmap16_wr_table,
+ .can_multi_write = true,
+};
+
+static const struct iio_chan_spec ad4695_channel_template = {
+ .type = IIO_VOLTAGE,
+ .indexed = 1,
+ .info_mask_separate = BIT(IIO_CHAN_INFO_RAW) |
+ BIT(IIO_CHAN_INFO_SCALE) |
+ BIT(IIO_CHAN_INFO_OFFSET) |
+ BIT(IIO_CHAN_INFO_CALIBSCALE) |
+ BIT(IIO_CHAN_INFO_CALIBBIAS),
+ .info_mask_separate_available = BIT(IIO_CHAN_INFO_CALIBSCALE) |
+ BIT(IIO_CHAN_INFO_CALIBBIAS),
+ .scan_type = {
+ .sign = 'u',
+ .realbits = 16,
+ .storagebits = 16,
+ },
+};
+
+static const struct iio_chan_spec ad4695_temp_channel_template = {
+ .address = AD4695_CMD_TEMP_CHAN,
+ .type = IIO_TEMP,
+ .info_mask_separate = BIT(IIO_CHAN_INFO_RAW) |
+ BIT(IIO_CHAN_INFO_SCALE) |
+ BIT(IIO_CHAN_INFO_OFFSET),
+ .scan_type = {
+ .sign = 's',
+ .realbits = 16,
+ .storagebits = 16,
+ },
+};
+
+static const struct iio_chan_spec ad4695_soft_timestamp_channel_template =
+ IIO_CHAN_SOFT_TIMESTAMP(0);
+
+static const char * const ad4695_power_supplies[] = {
+ "avdd", "vio"
+};
+
+static const struct ad4695_chip_info ad4695_chip_info = {
+ .name = "ad4695",
+ .max_sample_rate = 500 * KILO,
+ .t_acq_ns = 1715,
+ .num_voltage_inputs = 16,
+};
+
+static const struct ad4695_chip_info ad4696_chip_info = {
+ .name = "ad4696",
+ .max_sample_rate = 1 * MEGA,
+ .t_acq_ns = 715,
+ .num_voltage_inputs = 16,
+};
+
+static const struct ad4695_chip_info ad4697_chip_info = {
+ .name = "ad4697",
+ .max_sample_rate = 500 * KILO,
+ .t_acq_ns = 1715,
+ .num_voltage_inputs = 8,
+};
+
+static const struct ad4695_chip_info ad4698_chip_info = {
+ .name = "ad4698",
+ .max_sample_rate = 1 * MEGA,
+ .t_acq_ns = 715,
+ .num_voltage_inputs = 8,
+};
+
+/**
+ * ad4695_set_single_cycle_mode - Set the device in single cycle mode
+ * @st: The AD4695 state
+ * @channel: The first channel to read
+ *
+ * As per the datasheet, to enable single cycle mode, we need to set
+ * STD_SEQ_EN=0, NUM_SLOTS_AS=0 and CYC_CTRL=1 (Table 15). Setting SPI_MODE=1
+ * triggers the first conversion using the channel in AS_SLOT0.
+ *
+ * Context: can sleep, must be called with iio_device_claim_direct held
+ * Return: 0 on success, a negative error code on failure
+ */
+static int ad4695_set_single_cycle_mode(struct ad4695_state *st,
+ unsigned int channel)
+{
+ int ret;
+
+ ret = regmap_clear_bits(st->regmap, AD4695_REG_SEQ_CTRL,
+ AD4695_REG_SEQ_CTRL_STD_SEQ_EN |
+ AD4695_REG_SEQ_CTRL_NUM_SLOTS_AS);
+ if (ret)
+ return ret;
+
+ ret = regmap_write(st->regmap, AD4695_REG_AS_SLOT(0),
+ FIELD_PREP(AD4695_REG_AS_SLOT_INX, channel));
+ if (ret)
+ return ret;
+
+ return regmap_set_bits(st->regmap, AD4695_REG_SETUP,
+ AD4695_REG_SETUP_SPI_MODE |
+ AD4695_REG_SETUP_SPI_CYC_CTRL);
+}
+
+/**
+ * ad4695_enter_advanced_sequencer_mode - Put the ADC in advanced sequencer mode
+ * @st: The driver state
+ * @n: The number of slots to use - must be >= 2, <= 128
+ *
+ * As per the datasheet, to enable advanced sequencer, we need to set
+ * STD_SEQ_EN=0, NUM_SLOTS_AS=n-1 and CYC_CTRL=0 (Table 15). Setting SPI_MODE=1
+ * triggers the first conversion using the channel in AS_SLOT0.
+ *
+ * Return: 0 on success, a negative error code on failure
+ */
+static int ad4695_enter_advanced_sequencer_mode(struct ad4695_state *st, u32 n)
+{
+ int ret;
+
+ ret = regmap_update_bits(st->regmap, AD4695_REG_SEQ_CTRL,
+ AD4695_REG_SEQ_CTRL_STD_SEQ_EN |
+ AD4695_REG_SEQ_CTRL_NUM_SLOTS_AS,
+ FIELD_PREP(AD4695_REG_SEQ_CTRL_STD_SEQ_EN, 0) |
+ FIELD_PREP(AD4695_REG_SEQ_CTRL_NUM_SLOTS_AS, n - 1));
+ if (ret)
+ return ret;
+
+ return regmap_update_bits(st->regmap, AD4695_REG_SETUP,
+ AD4695_REG_SETUP_SPI_MODE | AD4695_REG_SETUP_SPI_CYC_CTRL,
+ FIELD_PREP(AD4695_REG_SETUP_SPI_MODE, 1) |
+ FIELD_PREP(AD4695_REG_SETUP_SPI_CYC_CTRL, 0));
+}
+
+/**
+ * ad4695_exit_conversion_mode - Exit conversion mode
+ * @st: The AD4695 state
+ *
+ * Sends SPI command to exit conversion mode.
+ *
+ * Return: 0 on success, a negative error code on failure
+ */
+static int ad4695_exit_conversion_mode(struct ad4695_state *st)
+{
+ struct spi_transfer xfer = {
+ .tx_buf = &st->cnv_cmd2,
+ .len = 1,
+ .delay.value = AD4695_T_REGCONFIG_NS,
+ .delay.unit = SPI_DELAY_UNIT_NSECS,
+ };
+
+ /*
+ * Technically, could do a 5-bit transfer, but shifting to start of
+ * 8 bits instead for better SPI controller support.
+ */
+ st->cnv_cmd2 = AD4695_CMD_EXIT_CNV_MODE << 3;
+
+ return spi_sync_transfer(st->spi, &xfer, 1);
+}
+
+static int ad4695_set_ref_voltage(struct ad4695_state *st, int vref_mv)
+{
+ u8 val;
+
+ if (vref_mv >= 2400 && vref_mv <= 2750)
+ val = 0;
+ else if (vref_mv > 2750 && vref_mv <= 3250)
+ val = 1;
+ else if (vref_mv > 3250 && vref_mv <= 3750)
+ val = 2;
+ else if (vref_mv > 3750 && vref_mv <= 4500)
+ val = 3;
+ else if (vref_mv > 4500 && vref_mv <= 5100)
+ val = 4;
+ else
+ return -EINVAL;
+
+ return regmap_update_bits(st->regmap, AD4695_REG_REF_CTRL,
+ AD4695_REG_REF_CTRL_VREF_SET,
+ FIELD_PREP(AD4695_REG_REF_CTRL_VREF_SET, val));
+}
+
+static int ad4695_write_chn_cfg(struct ad4695_state *st,
+ struct ad4695_channel_config *cfg)
+{
+ u32 mask, val;
+
+ mask = AD4695_REG_CONFIG_IN_MODE;
+ val = FIELD_PREP(AD4695_REG_CONFIG_IN_MODE, cfg->bipolar ? 1 : 0);
+
+ mask |= AD4695_REG_CONFIG_IN_PAIR;
+ val |= FIELD_PREP(AD4695_REG_CONFIG_IN_PAIR, cfg->pin_pairing);
+
+ mask |= AD4695_REG_CONFIG_IN_AINHIGHZ_EN;
+ val |= FIELD_PREP(AD4695_REG_CONFIG_IN_AINHIGHZ_EN,
+ cfg->highz_en ? 1 : 0);
+
+ return regmap_update_bits(st->regmap,
+ AD4695_REG_CONFIG_IN(cfg->channel),
+ mask, val);
+}
+
+static int ad4695_buffer_preenable(struct iio_dev *indio_dev)
+{
+ struct ad4695_state *st = iio_priv(indio_dev);
+ struct spi_transfer *xfer;
+ u8 temp_chan_bit = st->chip_info->num_voltage_inputs;
+ u32 bit, num_xfer, num_slots;
+ u32 temp_en = 0;
+ int ret;
+
+ /*
+ * We are using the advanced sequencer since it is the only way to read
+ * multiple channels that allows individual configuration of each
+ * voltage input channel. Slot 0 in the advanced sequencer is used to
+ * account for the gap between trigger polls - we don't read data from
+ * this slot. Each enabled voltage channel is assigned a slot starting
+ * with slot 1.
+ */
+ num_slots = 1;
+
+ memset(st->buf_read_xfer, 0, sizeof(st->buf_read_xfer));
+
+ /* First xfer is only to trigger conversion of slot 1, so no rx. */
+ xfer = &st->buf_read_xfer[0];
+ xfer->cs_change = 1;
+ xfer->delay.value = st->chip_info->t_acq_ns;
+ xfer->delay.unit = SPI_DELAY_UNIT_NSECS;
+ xfer->cs_change_delay.value = AD4695_T_CONVERT_NS;
+ xfer->cs_change_delay.unit = SPI_DELAY_UNIT_NSECS;
+ num_xfer = 1;
+
+ iio_for_each_active_channel(indio_dev, bit) {
+ xfer = &st->buf_read_xfer[num_xfer];
+ xfer->bits_per_word = 16;
+ xfer->rx_buf = &st->buf[(num_xfer - 1) * 2];
+ xfer->len = 2;
+ xfer->cs_change = 1;
+ xfer->cs_change_delay.value = AD4695_T_CONVERT_NS;
+ xfer->cs_change_delay.unit = SPI_DELAY_UNIT_NSECS;
+
+ if (bit == temp_chan_bit) {
+ temp_en = 1;
+ } else {
+ ret = regmap_write(st->regmap,
+ AD4695_REG_AS_SLOT(num_slots),
+ FIELD_PREP(AD4695_REG_AS_SLOT_INX, bit));
+ if (ret)
+ return ret;
+
+ num_slots++;
+ }
+
+ num_xfer++;
+ }
+
+ /*
+ * The advanced sequencer requires that at least 2 slots are enabled.
+ * Since slot 0 is always used for other purposes, we need only 1
+ * enabled voltage channel to meet this requirement. If the temperature
+ * channel is the only enabled channel, we need to add one more slot
+ * in the sequence but not read from it.
+ */
+ if (num_slots < 2) {
+ /* move last xfer so we can insert one more xfer before it */
+ st->buf_read_xfer[num_xfer] = *xfer;
+ num_xfer++;
+
+ /* modify 2nd to last xfer for extra slot */
+ memset(xfer, 0, sizeof(*xfer));
+ xfer->cs_change = 1;
+ xfer->delay.value = st->chip_info->t_acq_ns;
+ xfer->delay.unit = SPI_DELAY_UNIT_NSECS;
+ xfer->cs_change_delay.value = AD4695_T_CONVERT_NS;
+ xfer->cs_change_delay.unit = SPI_DELAY_UNIT_NSECS;
+ xfer++;
+
+ /* and add the extra slot in the sequencer */
+ ret = regmap_write(st->regmap,
+ AD4695_REG_AS_SLOT(num_slots),
+ FIELD_PREP(AD4695_REG_AS_SLOT_INX, 0));
+ if (ret)
+ return ret;
+
+ num_slots++;
+ }
+
+ /*
+ * Don't keep CS asserted after last xfer. Also triggers conversion of
+ * slot 0.
+ */
+ xfer->cs_change = 0;
+
+ /*
+ * Temperature channel isn't included in the sequence, but rather
+ * controlled by setting a bit in the TEMP_CTRL register.
+ */
+
+ ret = regmap_update_bits(st->regmap, AD4695_REG_TEMP_CTRL,
+ AD4695_REG_TEMP_CTRL_TEMP_EN,
+ FIELD_PREP(AD4695_REG_TEMP_CTRL_TEMP_EN, temp_en));
+ if (ret)
+ return ret;
+
+ spi_message_init_with_transfers(&st->buf_read_msg, st->buf_read_xfer,
+ num_xfer);
+
+ ret = spi_optimize_message(st->spi, &st->buf_read_msg);
+ if (ret)
+ return ret;
+
+ /* This triggers conversion of slot 0. */
+ ret = ad4695_enter_advanced_sequencer_mode(st, num_slots);
+ if (ret)
+ spi_unoptimize_message(&st->buf_read_msg);
+
+ return ret;
+}
+
+static int ad4695_buffer_postdisable(struct iio_dev *indio_dev)
+{
+ struct ad4695_state *st = iio_priv(indio_dev);
+ int ret;
+
+ ret = ad4695_exit_conversion_mode(st);
+ if (ret)
+ return ret;
+
+ spi_unoptimize_message(&st->buf_read_msg);
+
+ return 0;
+}
+
+static const struct iio_buffer_setup_ops ad4695_buffer_setup_ops = {
+ .preenable = ad4695_buffer_preenable,
+ .postdisable = ad4695_buffer_postdisable,
+};
+
+static irqreturn_t ad4695_trigger_handler(int irq, void *p)
+{
+ struct iio_poll_func *pf = p;
+ struct iio_dev *indio_dev = pf->indio_dev;
+ struct ad4695_state *st = iio_priv(indio_dev);
+ int ret;
+
+ ret = spi_sync(st->spi, &st->buf_read_msg);
+ if (ret)
+ goto out;
+
+ iio_push_to_buffers_with_timestamp(indio_dev, st->buf, pf->timestamp);
+
+out:
+ iio_trigger_notify_done(indio_dev->trig);
+
+ return IRQ_HANDLED;
+}
+
+/**
+ * ad4695_read_one_sample - Read a single sample using single-cycle mode
+ * @st: The AD4695 state
+ * @address: The address of the channel to read
+ *
+ * Upon successful return, the sample will be stored in `st->raw_data`.
+ *
+ * Context: can sleep, must be called with iio_device_claim_direct held
+ * Return: 0 on success, a negative error code on failure
+ */
+static int ad4695_read_one_sample(struct ad4695_state *st, unsigned int address)
+{
+ struct spi_transfer xfer[2] = { };
+ int ret, i = 0;
+
+ ret = ad4695_set_single_cycle_mode(st, address);
+ if (ret)
+ return ret;
+
+ /*
+ * Setting the first channel to the temperature channel isn't supported
+ * in single-cycle mode, so we have to do an extra xfer to read the
+ * temperature.
+ */
+ if (address == AD4695_CMD_TEMP_CHAN) {
+ /* We aren't reading, so we can make this a short xfer. */
+ st->cnv_cmd2 = AD4695_CMD_TEMP_CHAN << 3;
+ xfer[0].tx_buf = &st->cnv_cmd2;
+ xfer[0].len = 1;
+ xfer[0].cs_change = 1;
+ xfer[0].cs_change_delay.value = AD4695_T_CONVERT_NS;
+ xfer[0].cs_change_delay.unit = SPI_DELAY_UNIT_NSECS;
+
+ i = 1;
+ }
+
+ /* Then read the result and exit conversion mode. */
+ st->cnv_cmd = AD4695_CMD_EXIT_CNV_MODE << 11;
+ xfer[i].bits_per_word = 16;
+ xfer[i].tx_buf = &st->cnv_cmd;
+ xfer[i].rx_buf = &st->raw_data;
+ xfer[i].len = 2;
+
+ return spi_sync_transfer(st->spi, xfer, i + 1);
+}
+
+static int ad4695_read_raw(struct iio_dev *indio_dev,
+ struct iio_chan_spec const *chan,
+ int *val, int *val2, long mask)
+{
+ struct ad4695_state *st = iio_priv(indio_dev);
+ struct ad4695_channel_config *cfg = &st->channels_cfg[chan->scan_index];
+ u8 realbits = chan->scan_type.realbits;
+ unsigned int reg_val;
+ int ret, tmp;
+
+ switch (mask) {
+ case IIO_CHAN_INFO_RAW:
+ iio_device_claim_direct_scoped(return -EBUSY, indio_dev) {
+ ret = ad4695_read_one_sample(st, chan->address);
+ if (ret)
+ return ret;
+
+ if (chan->scan_type.sign == 's')
+ *val = sign_extend32(st->raw_data, realbits - 1);
+ else
+ *val = st->raw_data;
+
+ return IIO_VAL_INT;
+ }
+ unreachable();
+ case IIO_CHAN_INFO_SCALE:
+ switch (chan->type) {
+ case IIO_VOLTAGE:
+ *val = st->vref_mv;
+ *val2 = chan->scan_type.realbits;
+ return IIO_VAL_FRACTIONAL_LOG2;
+ case IIO_TEMP:
+ /* T_scale (°C) = raw * V_REF (mV) / (-1.8 mV/°C * 2^16) */
+ *val = st->vref_mv * -556;
+ *val2 = 16;
+ return IIO_VAL_FRACTIONAL_LOG2;
+ default:
+ return -EINVAL;
+ }
+ case IIO_CHAN_INFO_OFFSET:
+ switch (chan->type) {
+ case IIO_VOLTAGE:
+ if (cfg->pin_pairing == AD4695_IN_PAIR_COM)
+ *val = st->com_mv * (1 << realbits) / st->vref_mv;
+ else if (cfg->pin_pairing == AD4695_IN_PAIR_EVEN_ODD)
+ *val = cfg->common_mode_mv * (1 << realbits) / st->vref_mv;
+ else
+ *val = 0;
+
+ return IIO_VAL_INT;
+ case IIO_TEMP:
+ /* T_offset (°C) = -725 mV / (-1.8 mV/°C) */
+ /* T_offset (raw) = T_offset (°C) * (-1.8 mV/°C) * 2^16 / V_REF (mV) */
+ *val = -47513600;
+ *val2 = st->vref_mv;
+ return IIO_VAL_FRACTIONAL;
+ default:
+ return -EINVAL;
+ }
+ case IIO_CHAN_INFO_CALIBSCALE:
+ switch (chan->type) {
+ case IIO_VOLTAGE:
+ iio_device_claim_direct_scoped(return -EBUSY, indio_dev) {
+ ret = regmap_read(st->regmap16,
+ AD4695_REG_GAIN_IN(chan->scan_index),
+ &reg_val);
+ if (ret)
+ return ret;
+
+ *val = reg_val;
+ *val2 = 15;
+
+ return IIO_VAL_FRACTIONAL_LOG2;
+ }
+ unreachable();
+ default:
+ return -EINVAL;
+ }
+ case IIO_CHAN_INFO_CALIBBIAS:
+ switch (chan->type) {
+ case IIO_VOLTAGE:
+ iio_device_claim_direct_scoped(return -EBUSY, indio_dev) {
+ ret = regmap_read(st->regmap16,
+ AD4695_REG_OFFSET_IN(chan->scan_index),
+ &reg_val);
+ if (ret)
+ return ret;
+
+ tmp = sign_extend32(reg_val, 15);
+
+ *val = tmp / 4;
+ *val2 = abs(tmp) % 4 * MICRO / 4;
+
+ if (tmp < 0 && *val2) {
+ *val *= -1;
+ *val2 *= -1;
+ }
+
+ return IIO_VAL_INT_PLUS_MICRO;
+ }
+ unreachable();
+ default:
+ return -EINVAL;
+ }
+ default:
+ return -EINVAL;
+ }
+}
+
+static int ad4695_write_raw(struct iio_dev *indio_dev,
+ struct iio_chan_spec const *chan,
+ int val, int val2, long mask)
+{
+ struct ad4695_state *st = iio_priv(indio_dev);
+ unsigned int reg_val;
+
+ iio_device_claim_direct_scoped(return -EBUSY, indio_dev) {
+ switch (mask) {
+ case IIO_CHAN_INFO_CALIBSCALE:
+ switch (chan->type) {
+ case IIO_VOLTAGE:
+ if (val < 0 || val2 < 0)
+ reg_val = 0;
+ else if (val > 1)
+ reg_val = U16_MAX;
+ else
+ reg_val = (val * (1 << 16) +
+ mul_u64_u32_div(val2, 1 << 16,
+ MICRO)) / 2;
+
+ return regmap_write(st->regmap16,
+ AD4695_REG_GAIN_IN(chan->scan_index),
+ reg_val);
+ default:
+ return -EINVAL;
+ }
+ case IIO_CHAN_INFO_CALIBBIAS:
+ switch (chan->type) {
+ case IIO_VOLTAGE:
+ if (val2 >= 0 && val > S16_MAX / 4)
+ reg_val = S16_MAX;
+ else if ((val2 < 0 ? -val : val) < S16_MIN / 4)
+ reg_val = S16_MIN;
+ else if (val2 < 0)
+ reg_val = clamp_t(int,
+ -(val * 4 + -val2 * 4 / MICRO),
+ S16_MIN, S16_MAX);
+ else if (val < 0)
+ reg_val = clamp_t(int,
+ val * 4 - val2 * 4 / MICRO,
+ S16_MIN, S16_MAX);
+ else
+ reg_val = clamp_t(int,
+ val * 4 + val2 * 4 / MICRO,
+ S16_MIN, S16_MAX);
+
+ return regmap_write(st->regmap16,
+ AD4695_REG_OFFSET_IN(chan->scan_index),
+ reg_val);
+ default:
+ return -EINVAL;
+ }
+ default:
+ return -EINVAL;
+ }
+ }
+ unreachable();
+}
+
+static int ad4695_read_avail(struct iio_dev *indio_dev,
+ struct iio_chan_spec const *chan,
+ const int **vals, int *type, int *length,
+ long mask)
+{
+ static const int ad4695_calibscale_available[6] = {
+ /* Range of 0 (inclusive) to 2 (exclusive) */
+ 0, 15, 1, 15, U16_MAX, 15
+ };
+ static const int ad4695_calibbias_available[6] = {
+ /*
+ * Datasheet says FSR/8 which translates to signed/4. The step
+ * depends on oversampling ratio which is always 1 for now.
+ */
+ S16_MIN / 4, 0, 0, MICRO / 4, S16_MAX / 4, S16_MAX % 4 * MICRO / 4
+ };
+
+ switch (mask) {
+ case IIO_CHAN_INFO_CALIBSCALE:
+ switch (chan->type) {
+ case IIO_VOLTAGE:
+ *vals = ad4695_calibscale_available;
+ *type = IIO_VAL_FRACTIONAL_LOG2;
+ return IIO_AVAIL_RANGE;
+ default:
+ return -EINVAL;
+ }
+ case IIO_CHAN_INFO_CALIBBIAS:
+ switch (chan->type) {
+ case IIO_VOLTAGE:
+ *vals = ad4695_calibbias_available;
+ *type = IIO_VAL_INT_PLUS_MICRO;
+ return IIO_AVAIL_RANGE;
+ default:
+ return -EINVAL;
+ }
+ default:
+ return -EINVAL;
+ }
+}
+
+static int ad4695_debugfs_reg_access(struct iio_dev *indio_dev,
+ unsigned int reg,
+ unsigned int writeval,
+ unsigned int *readval)
+{
+ struct ad4695_state *st = iio_priv(indio_dev);
+
+ iio_device_claim_direct_scoped(return -EBUSY, indio_dev) {
+ if (readval) {
+ if (regmap_check_range_table(st->regmap, reg,
+ &ad4695_regmap_rd_table))
+ return regmap_read(st->regmap, reg, readval);
+ if (regmap_check_range_table(st->regmap16, reg,
+ &ad4695_regmap16_rd_table))
+ return regmap_read(st->regmap16, reg, readval);
+ } else {
+ if (regmap_check_range_table(st->regmap, reg,
+ &ad4695_regmap_wr_table))
+ return regmap_write(st->regmap, reg, writeval);
+ if (regmap_check_range_table(st->regmap16, reg,
+ &ad4695_regmap16_wr_table))
+ return regmap_write(st->regmap16, reg, writeval);
+ }
+ }
+
+ return -EINVAL;
+}
+
+static const struct iio_info ad4695_info = {
+ .read_raw = &ad4695_read_raw,
+ .write_raw = &ad4695_write_raw,
+ .read_avail = &ad4695_read_avail,
+ .debugfs_reg_access = &ad4695_debugfs_reg_access,
+};
+
+static int ad4695_parse_channel_cfg(struct ad4695_state *st)
+{
+ struct device *dev = &st->spi->dev;
+ struct ad4695_channel_config *chan_cfg;
+ struct iio_chan_spec *iio_chan;
+ int ret, i;
+
+ /* populate defaults */
+ for (i = 0; i < st->chip_info->num_voltage_inputs; i++) {
+ chan_cfg = &st->channels_cfg[i];
+ iio_chan = &st->iio_chan[i];
+
+ chan_cfg->highz_en = true;
+ chan_cfg->channel = i;
+
+ *iio_chan = ad4695_channel_template;
+ iio_chan->channel = i;
+ iio_chan->scan_index = i;
+ iio_chan->address = AD4695_CMD_VOLTAGE_CHAN(i);
+ }
+
+ /* modify based on firmware description */
+ device_for_each_child_node_scoped(dev, child) {
+ u32 reg, val;
+
+ ret = fwnode_property_read_u32(child, "reg", &reg);
+ if (ret)
+ return dev_err_probe(dev, ret,
+ "failed to read reg property (%s)\n",
+ fwnode_get_name(child));
+
+ if (reg >= st->chip_info->num_voltage_inputs)
+ return dev_err_probe(dev, -EINVAL,
+ "reg out of range (%s)\n",
+ fwnode_get_name(child));
+
+ iio_chan = &st->iio_chan[reg];
+ chan_cfg = &st->channels_cfg[reg];
+
+ chan_cfg->highz_en =
+ !fwnode_property_read_bool(child, "adi,no-high-z");
+ chan_cfg->bipolar = fwnode_property_read_bool(child, "bipolar");
+
+ ret = fwnode_property_read_u32(child, "common-mode-channel",
+ &val);
+ if (ret && ret != -EINVAL)
+ return dev_err_probe(dev, ret,
+ "failed to read common-mode-channel (%s)\n",
+ fwnode_get_name(child));
+
+ if (ret == -EINVAL || val == AD4695_COMMON_MODE_REFGND)
+ chan_cfg->pin_pairing = AD4695_IN_PAIR_REFGND;
+ else if (val == AD4695_COMMON_MODE_COM)
+ chan_cfg->pin_pairing = AD4695_IN_PAIR_COM;
+ else
+ chan_cfg->pin_pairing = AD4695_IN_PAIR_EVEN_ODD;
+
+ if (chan_cfg->pin_pairing == AD4695_IN_PAIR_EVEN_ODD &&
+ val % 2 == 0)
+ return dev_err_probe(dev, -EINVAL,
+ "common-mode-channel must be odd number (%s)\n",
+ fwnode_get_name(child));
+
+ if (chan_cfg->pin_pairing == AD4695_IN_PAIR_EVEN_ODD &&
+ val != reg + 1)
+ return dev_err_probe(dev, -EINVAL,
+ "common-mode-channel must be next consecutive channel (%s)\n",
+ fwnode_get_name(child));
+
+ if (chan_cfg->pin_pairing == AD4695_IN_PAIR_EVEN_ODD) {
+ char name[5];
+
+ snprintf(name, sizeof(name), "in%d", reg + 1);
+
+ ret = devm_regulator_get_enable_read_voltage(dev, name);
+ if (ret < 0)
+ return dev_err_probe(dev, ret,
+ "failed to get %s voltage (%s)\n",
+ name, fwnode_get_name(child));
+
+ chan_cfg->common_mode_mv = ret / 1000;
+ }
+
+ if (chan_cfg->bipolar &&
+ chan_cfg->pin_pairing == AD4695_IN_PAIR_REFGND)
+ return dev_err_probe(dev, -EINVAL,
+ "bipolar mode is not available for inputs paired with REFGND (%s).\n",
+ fwnode_get_name(child));
+
+ if (chan_cfg->bipolar)
+ iio_chan->scan_type.sign = 's';
+
+ ret = ad4695_write_chn_cfg(st, chan_cfg);
+ if (ret)
+ return ret;
+ }
+
+ /* Temperature channel must be next scan index after voltage channels. */
+ st->iio_chan[i] = ad4695_temp_channel_template;
+ st->iio_chan[i].scan_index = i;
+ i++;
+
+ st->iio_chan[i] = ad4695_soft_timestamp_channel_template;
+ st->iio_chan[i].scan_index = i;
+
+ return 0;
+}
+
+static int ad4695_probe(struct spi_device *spi)
+{
+ struct device *dev = &spi->dev;
+ struct ad4695_state *st;
+ struct iio_dev *indio_dev;
+ struct gpio_desc *cnv_gpio;
+ bool use_internal_ldo_supply;
+ bool use_internal_ref_buffer;
+ int ret;
+
+ cnv_gpio = devm_gpiod_get_optional(dev, "cnv", GPIOD_OUT_LOW);
+ if (IS_ERR(cnv_gpio))
+ return dev_err_probe(dev, PTR_ERR(cnv_gpio),
+ "Failed to get CNV GPIO\n");
+
+ /* Driver currently requires CNV pin to be connected to SPI CS */
+ if (cnv_gpio)
+ return dev_err_probe(dev, -ENODEV,
+ "CNV GPIO is not supported\n");
+
+ indio_dev = devm_iio_device_alloc(dev, sizeof(*st));
+ if (!indio_dev)
+ return -ENOMEM;
+
+ st = iio_priv(indio_dev);
+ st->spi = spi;
+
+ st->chip_info = spi_get_device_match_data(spi);
+ if (!st->chip_info)
+ return -EINVAL;
+
+ /* Registers cannot be read at the max allowable speed */
+ spi->max_speed_hz = AD4695_REG_ACCESS_SCLK_HZ;
+
+ st->regmap = devm_regmap_init_spi(spi, &ad4695_regmap_config);
+ if (IS_ERR(st->regmap))
+ return dev_err_probe(dev, PTR_ERR(st->regmap),
+ "Failed to initialize regmap\n");
+
+ st->regmap16 = devm_regmap_init_spi(spi, &ad4695_regmap16_config);
+ if (IS_ERR(st->regmap16))
+ return dev_err_probe(dev, PTR_ERR(st->regmap16),
+ "Failed to initialize regmap16\n");
+
+ ret = devm_regulator_bulk_get_enable(dev,
+ ARRAY_SIZE(ad4695_power_supplies),
+ ad4695_power_supplies);
+ if (ret)
+ return dev_err_probe(dev, ret,
+ "Failed to enable power supplies\n");
+
+ /* If LDO_IN supply is present, then we are using internal LDO. */
+ ret = devm_regulator_get_enable_optional(dev, "ldo-in");
+ if (ret < 0 && ret != -ENODEV)
+ return dev_err_probe(dev, ret,
+ "Failed to enable LDO_IN supply\n");
+
+ use_internal_ldo_supply = ret == 0;
+
+ if (!use_internal_ldo_supply) {
+ /* Otherwise we need an external VDD supply. */
+ ret = devm_regulator_get_enable(dev, "vdd");
+ if (ret < 0)
+ return dev_err_probe(dev, ret,
+ "Failed to enable VDD supply\n");
+ }
+
+ /* If REFIN supply is given, then we are using internal buffer */
+ ret = devm_regulator_get_enable_read_voltage(dev, "refin");
+ if (ret < 0 && ret != -ENODEV)
+ return dev_err_probe(dev, ret, "Failed to get REFIN voltage\n");
+
+ if (ret != -ENODEV) {
+ st->vref_mv = ret / 1000;
+ use_internal_ref_buffer = true;
+ } else {
+ /* Otherwise, we need an external reference. */
+ ret = devm_regulator_get_enable_read_voltage(dev, "ref");
+ if (ret < 0)
+ return dev_err_probe(dev, ret,
+ "Failed to get REF voltage\n");
+
+ st->vref_mv = ret / 1000;
+ use_internal_ref_buffer = false;
+ }
+
+ ret = devm_regulator_get_enable_read_voltage(dev, "com");
+ if (ret < 0 && ret != -ENODEV)
+ return dev_err_probe(dev, ret, "Failed to get COM voltage\n");
+
+ st->com_mv = ret == -ENODEV ? 0 : ret / 1000;
+
+ /*
+ * Reset the device using hardware reset if available or fall back to
+ * software reset.
+ */
+
+ st->reset_gpio = devm_gpiod_get_optional(dev, "reset", GPIOD_OUT_HIGH);
+ if (IS_ERR(st->reset_gpio))
+ return PTR_ERR(st->reset_gpio);
+
+ if (st->reset_gpio) {
+ gpiod_set_value(st->reset_gpio, 0);
+ msleep(AD4695_T_WAKEUP_HW_MS);
+ } else {
+ ret = regmap_write(st->regmap, AD4695_REG_SPI_CONFIG_A,
+ AD4695_REG_SPI_CONFIG_A_SW_RST);
+ if (ret)
+ return ret;
+
+ msleep(AD4695_T_WAKEUP_SW_MS);
+ }
+
+ /* Needed for regmap16 to be able to work correctly. */
+ ret = regmap_set_bits(st->regmap, AD4695_REG_SPI_CONFIG_A,
+ AD4695_REG_SPI_CONFIG_A_ADDR_DIR);
+ if (ret)
+ return ret;
+
+ /* Disable internal LDO if it isn't needed. */
+ ret = regmap_update_bits(st->regmap, AD4695_REG_SETUP,
+ AD4695_REG_SETUP_LDO_EN,
+ FIELD_PREP(AD4695_REG_SETUP_LDO_EN,
+ use_internal_ldo_supply ? 1 : 0));
+ if (ret)
+ return ret;
+
+ /* configure reference supply */
+
+ if (device_property_present(dev, "adi,no-ref-current-limit")) {
+ ret = regmap_set_bits(st->regmap, AD4695_REG_REF_CTRL,
+ AD4695_REG_REF_CTRL_OV_MODE);
+ if (ret)
+ return ret;
+ }
+
+ if (device_property_present(dev, "adi,no-ref-high-z")) {
+ if (use_internal_ref_buffer)
+ return dev_err_probe(dev, -EINVAL,
+ "Cannot disable high-Z mode for internal reference buffer\n");
+
+ ret = regmap_clear_bits(st->regmap, AD4695_REG_REF_CTRL,
+ AD4695_REG_REF_CTRL_REFHIZ_EN);
+ if (ret)
+ return ret;
+ }
+
+ ret = ad4695_set_ref_voltage(st, st->vref_mv);
+ if (ret)
+ return ret;
+
+ if (use_internal_ref_buffer) {
+ ret = regmap_set_bits(st->regmap, AD4695_REG_REF_CTRL,
+ AD4695_REG_REF_CTRL_REFBUF_EN);
+ if (ret)
+ return ret;
+
+ /* Give the capacitor some time to charge up. */
+ msleep(AD4695_T_REFBUF_MS);
+ }
+
+ ret = ad4695_parse_channel_cfg(st);
+ if (ret)
+ return ret;
+
+ indio_dev->name = st->chip_info->name;
+ indio_dev->info = &ad4695_info;
+ indio_dev->modes = INDIO_DIRECT_MODE;
+ indio_dev->channels = st->iio_chan;
+ indio_dev->num_channels = st->chip_info->num_voltage_inputs + 2;
+
+ ret = devm_iio_triggered_buffer_setup(dev, indio_dev,
+ iio_pollfunc_store_time,
+ ad4695_trigger_handler,
+ &ad4695_buffer_setup_ops);
+ if (ret)
+ return ret;
+
+ return devm_iio_device_register(dev, indio_dev);
+}
+
+static const struct spi_device_id ad4695_spi_id_table[] = {
+ { .name = "ad4695", .driver_data = (kernel_ulong_t)&ad4695_chip_info },
+ { .name = "ad4696", .driver_data = (kernel_ulong_t)&ad4696_chip_info },
+ { .name = "ad4697", .driver_data = (kernel_ulong_t)&ad4697_chip_info },
+ { .name = "ad4698", .driver_data = (kernel_ulong_t)&ad4698_chip_info },
+ { }
+};
+MODULE_DEVICE_TABLE(spi, ad4695_spi_id_table);
+
+static const struct of_device_id ad4695_of_match_table[] = {
+ { .compatible = "adi,ad4695", .data = &ad4695_chip_info, },
+ { .compatible = "adi,ad4696", .data = &ad4696_chip_info, },
+ { .compatible = "adi,ad4697", .data = &ad4697_chip_info, },
+ { .compatible = "adi,ad4698", .data = &ad4698_chip_info, },
+ { }
+};
+MODULE_DEVICE_TABLE(of, ad4695_of_match_table);
+
+static struct spi_driver ad4695_driver = {
+ .driver = {
+ .name = "ad4695",
+ .of_match_table = ad4695_of_match_table,
+ },
+ .probe = ad4695_probe,
+ .id_table = ad4695_spi_id_table,
+};
+module_spi_driver(ad4695_driver);
+
+MODULE_AUTHOR("Ramona Gradinariu <ramona.gradinariu@analog.com>");
+MODULE_AUTHOR("David Lechner <dlechner@baylibre.com>");
+MODULE_DESCRIPTION("Analog Devices AD4695 ADC driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/iio/adc/ad7091r5.c b/drivers/iio/adc/ad7091r5.c
index a75837334157..1b59708abf30 100644
--- a/drivers/iio/adc/ad7091r5.c
+++ b/drivers/iio/adc/ad7091r5.c
@@ -112,13 +112,13 @@ static int ad7091r5_i2c_probe(struct i2c_client *i2c)
static const struct of_device_id ad7091r5_dt_ids[] = {
{ .compatible = "adi,ad7091r5", .data = &ad7091r5_init_info },
- {},
+ { }
};
MODULE_DEVICE_TABLE(of, ad7091r5_dt_ids);
static const struct i2c_device_id ad7091r5_i2c_ids[] = {
- {"ad7091r5", (kernel_ulong_t)&ad7091r5_init_info },
- {}
+ { "ad7091r5", (kernel_ulong_t)&ad7091r5_init_info },
+ { }
};
MODULE_DEVICE_TABLE(i2c, ad7091r5_i2c_ids);
diff --git a/drivers/iio/adc/ad7091r8.c b/drivers/iio/adc/ad7091r8.c
index 700564305057..c9e014d6a77c 100644
--- a/drivers/iio/adc/ad7091r8.c
+++ b/drivers/iio/adc/ad7091r8.c
@@ -159,7 +159,7 @@ static int ad7091r_regmap_bus_reg_write(void *context, unsigned int reg,
return spi_write(spi, &st->tx_buf, 2);
}
-static struct regmap_bus ad7091r8_regmap_bus = {
+static const struct regmap_bus ad7091r8_regmap_bus = {
.reg_read = ad7091r_regmap_bus_reg_read,
.reg_write = ad7091r_regmap_bus_reg_write,
.reg_format_endian_default = REGMAP_ENDIAN_BIG,
diff --git a/drivers/iio/adc/ad7124.c b/drivers/iio/adc/ad7124.c
index 108e9ccab1ef..a5d91933f505 100644
--- a/drivers/iio/adc/ad7124.c
+++ b/drivers/iio/adc/ad7124.c
@@ -382,8 +382,7 @@ static int ad7124_init_config_vref(struct ad7124_state *st, struct ad7124_channe
cfg->vref_mv = 2500;
st->adc_control &= ~AD7124_ADC_CTRL_REF_EN_MSK;
st->adc_control |= AD7124_ADC_CTRL_REF_EN(1);
- return ad_sd_write_reg(&st->sd, AD7124_ADC_CONTROL,
- 2, st->adc_control);
+ return 0;
default:
dev_err(&st->sd.spi->dev, "Invalid reference %d\n", refsel);
return -EINVAL;
@@ -401,24 +400,17 @@ static int ad7124_write_config(struct ad7124_state *st, struct ad7124_channel_co
tmp = (cfg->buf_positive << 1) + cfg->buf_negative;
val = AD7124_CONFIG_BIPOLAR(cfg->bipolar) | AD7124_CONFIG_REF_SEL(cfg->refsel) |
- AD7124_CONFIG_IN_BUFF(tmp);
- ret = ad_sd_write_reg(&st->sd, AD7124_CONFIG(cfg->cfg_slot), 2, val);
- if (ret < 0)
- return ret;
+ AD7124_CONFIG_IN_BUFF(tmp) | AD7124_CONFIG_PGA(cfg->pga_bits);
- tmp = AD7124_FILTER_TYPE_SEL(cfg->filter_type);
- ret = ad7124_spi_write_mask(st, AD7124_FILTER(cfg->cfg_slot), AD7124_FILTER_TYPE_MSK,
- tmp, 3);
- if (ret < 0)
- return ret;
-
- ret = ad7124_spi_write_mask(st, AD7124_FILTER(cfg->cfg_slot), AD7124_FILTER_FS_MSK,
- AD7124_FILTER_FS(cfg->odr_sel_bits), 3);
+ ret = ad_sd_write_reg(&st->sd, AD7124_CONFIG(cfg->cfg_slot), 2, val);
if (ret < 0)
return ret;
- return ad7124_spi_write_mask(st, AD7124_CONFIG(cfg->cfg_slot), AD7124_CONFIG_PGA_MSK,
- AD7124_CONFIG_PGA(cfg->pga_bits), 2);
+ tmp = AD7124_FILTER_TYPE_SEL(cfg->filter_type) |
+ AD7124_FILTER_FS(cfg->odr_sel_bits);
+ return ad7124_spi_write_mask(st, AD7124_FILTER(cfg->cfg_slot),
+ AD7124_FILTER_TYPE_MSK | AD7124_FILTER_FS_MSK,
+ tmp, 3);
}
static struct ad7124_channel_config *ad7124_pop_config(struct ad7124_state *st)
@@ -907,9 +899,9 @@ static int ad7124_setup(struct ad7124_state *st)
/* Set the power mode */
st->adc_control &= ~AD7124_ADC_CTRL_PWR_MSK;
st->adc_control |= AD7124_ADC_CTRL_PWR(power_mode);
- ret = ad_sd_write_reg(&st->sd, AD7124_ADC_CONTROL, 2, st->adc_control);
- if (ret < 0)
- return ret;
+
+ st->adc_control &= ~AD7124_ADC_CTRL_MODE_MSK;
+ st->adc_control |= AD7124_ADC_CTRL_MODE(AD_SD_MODE_IDLE);
mutex_init(&st->cfgs_lock);
INIT_KFIFO(st->live_cfgs_fifo);
@@ -927,6 +919,10 @@ static int ad7124_setup(struct ad7124_state *st)
ad7124_set_channel_odr(st, i, 10);
}
+ ret = ad_sd_write_reg(&st->sd, AD7124_ADC_CONTROL, 2, st->adc_control);
+ if (ret < 0)
+ return ret;
+
return ret;
}
@@ -1016,14 +1012,14 @@ static const struct of_device_id ad7124_of_match[] = {
.data = &ad7124_chip_info_tbl[ID_AD7124_4], },
{ .compatible = "adi,ad7124-8",
.data = &ad7124_chip_info_tbl[ID_AD7124_8], },
- { },
+ { }
};
MODULE_DEVICE_TABLE(of, ad7124_of_match);
static const struct spi_device_id ad71124_ids[] = {
{ "ad7124-4", (kernel_ulong_t)&ad7124_chip_info_tbl[ID_AD7124_4] },
{ "ad7124-8", (kernel_ulong_t)&ad7124_chip_info_tbl[ID_AD7124_8] },
- {}
+ { }
};
MODULE_DEVICE_TABLE(spi, ad71124_ids);
diff --git a/drivers/iio/adc/ad7192.c b/drivers/iio/adc/ad7192.c
index 334ab90991d4..7042ddfdfc03 100644
--- a/drivers/iio/adc/ad7192.c
+++ b/drivers/iio/adc/ad7192.c
@@ -8,6 +8,7 @@
#include <linux/interrupt.h>
#include <linux/bitfield.h>
#include <linux/clk.h>
+#include <linux/clk-provider.h>
#include <linux/device.h>
#include <linux/kernel.h>
#include <linux/slab.h>
@@ -201,6 +202,7 @@ struct ad7192_chip_info {
struct ad7192_state {
const struct ad7192_chip_info *chip_info;
struct clk *mclk;
+ struct clk_hw int_clk_hw;
u16 int_vref_mv;
u32 aincom_mv;
u32 fclk;
@@ -284,7 +286,7 @@ static const struct iio_chan_spec_ext_info ad7192_calibsys_ext_info[] = {
&ad7192_syscalib_mode_enum),
IIO_ENUM_AVAILABLE("sys_calibration_mode", IIO_SHARED_BY_TYPE,
&ad7192_syscalib_mode_enum),
- {}
+ { }
};
static struct ad7192_state *ad_sigma_delta_to_ad7192(struct ad_sigma_delta *sd)
@@ -396,25 +398,162 @@ static inline bool ad7192_valid_external_frequency(u32 freq)
freq <= AD7192_EXT_FREQ_MHZ_MAX);
}
-static int ad7192_clock_select(struct ad7192_state *st)
+/*
+ * Position 0 of ad7192_clock_names, xtal, corresponds to clock source
+ * configuration AD7192_CLK_EXT_MCLK1_2 and position 1, mclk, corresponds to
+ * AD7192_CLK_EXT_MCLK2
+ */
+static const char *const ad7192_clock_names[] = {
+ "xtal",
+ "mclk"
+};
+
+static struct ad7192_state *clk_hw_to_ad7192(struct clk_hw *hw)
+{
+ return container_of(hw, struct ad7192_state, int_clk_hw);
+}
+
+static unsigned long ad7192_clk_recalc_rate(struct clk_hw *hw,
+ unsigned long parent_rate)
+{
+ return AD7192_INT_FREQ_MHZ;
+}
+
+static int ad7192_clk_output_is_enabled(struct clk_hw *hw)
+{
+ struct ad7192_state *st = clk_hw_to_ad7192(hw);
+
+ return st->clock_sel == AD7192_CLK_INT_CO;
+}
+
+static int ad7192_clk_prepare(struct clk_hw *hw)
+{
+ struct ad7192_state *st = clk_hw_to_ad7192(hw);
+ int ret;
+
+ st->mode &= ~AD7192_MODE_CLKSRC_MASK;
+ st->mode |= AD7192_CLK_INT_CO;
+
+ ret = ad_sd_write_reg(&st->sd, AD7192_REG_MODE, 3, st->mode);
+ if (ret)
+ return ret;
+
+ st->clock_sel = AD7192_CLK_INT_CO;
+
+ return 0;
+}
+
+static void ad7192_clk_unprepare(struct clk_hw *hw)
+{
+ struct ad7192_state *st = clk_hw_to_ad7192(hw);
+ int ret;
+
+ st->mode &= ~AD7192_MODE_CLKSRC_MASK;
+ st->mode |= AD7192_CLK_INT;
+
+ ret = ad_sd_write_reg(&st->sd, AD7192_REG_MODE, 3, st->mode);
+ if (ret)
+ return;
+
+ st->clock_sel = AD7192_CLK_INT;
+}
+
+static const struct clk_ops ad7192_int_clk_ops = {
+ .recalc_rate = ad7192_clk_recalc_rate,
+ .is_enabled = ad7192_clk_output_is_enabled,
+ .prepare = ad7192_clk_prepare,
+ .unprepare = ad7192_clk_unprepare,
+};
+
+static int ad7192_register_clk_provider(struct ad7192_state *st)
{
struct device *dev = &st->sd.spi->dev;
- unsigned int clock_sel;
+ struct clk_init_data init = {};
+ int ret;
- clock_sel = AD7192_CLK_INT;
+ if (!IS_ENABLED(CONFIG_COMMON_CLK))
+ return 0;
- /* use internal clock */
- if (!st->mclk) {
- if (device_property_read_bool(dev, "adi,int-clock-output-enable"))
- clock_sel = AD7192_CLK_INT_CO;
- } else {
- if (device_property_read_bool(dev, "adi,clock-xtal"))
- clock_sel = AD7192_CLK_EXT_MCLK1_2;
- else
- clock_sel = AD7192_CLK_EXT_MCLK2;
+ if (!device_property_present(dev, "#clock-cells"))
+ return 0;
+
+ init.name = devm_kasprintf(dev, GFP_KERNEL, "%s-clk",
+ fwnode_get_name(dev_fwnode(dev)));
+ if (!init.name)
+ return -ENOMEM;
+
+ init.ops = &ad7192_int_clk_ops;
+
+ st->int_clk_hw.init = &init;
+ ret = devm_clk_hw_register(dev, &st->int_clk_hw);
+ if (ret)
+ return ret;
+
+ return devm_of_clk_add_hw_provider(dev, of_clk_hw_simple_get,
+ &st->int_clk_hw);
+}
+
+static int ad7192_clock_setup(struct ad7192_state *st)
+{
+ struct device *dev = &st->sd.spi->dev;
+ int ret;
+
+ /*
+ * The following two if branches are kept for backward compatibility but
+ * the use of the two devicetree properties is highly discouraged. Clock
+ * configuration should be done according to the bindings.
+ */
+
+ if (device_property_read_bool(dev, "adi,int-clock-output-enable")) {
+ st->clock_sel = AD7192_CLK_INT_CO;
+ st->fclk = AD7192_INT_FREQ_MHZ;
+ dev_warn(dev, "Property adi,int-clock-output-enable is deprecated! Check bindings!\n");
+ return 0;
+ }
+
+ if (device_property_read_bool(dev, "adi,clock-xtal")) {
+ st->clock_sel = AD7192_CLK_EXT_MCLK1_2;
+ st->mclk = devm_clk_get_enabled(dev, "mclk");
+ if (IS_ERR(st->mclk))
+ return dev_err_probe(dev, PTR_ERR(st->mclk),
+ "Failed to get mclk\n");
+
+ st->fclk = clk_get_rate(st->mclk);
+ if (!ad7192_valid_external_frequency(st->fclk))
+ return dev_err_probe(dev, -EINVAL,
+ "External clock frequency out of bounds\n");
+
+ dev_warn(dev, "Property adi,clock-xtal is deprecated! Check bindings!\n");
+ return 0;
+ }
+
+ ret = device_property_match_property_string(dev, "clock-names",
+ ad7192_clock_names,
+ ARRAY_SIZE(ad7192_clock_names));
+ if (ret < 0) {
+ st->clock_sel = AD7192_CLK_INT;
+ st->fclk = AD7192_INT_FREQ_MHZ;
+
+ ret = ad7192_register_clk_provider(st);
+ if (ret)
+ return dev_err_probe(dev, ret,
+ "Failed to register clock provider\n");
+ return 0;
}
- return clock_sel;
+ st->clock_sel = AD7192_CLK_EXT_MCLK1_2 + ret;
+
+ st->mclk = devm_clk_get_enabled(dev, ad7192_clock_names[ret]);
+ if (IS_ERR(st->mclk))
+ return dev_err_probe(dev, PTR_ERR(st->mclk),
+ "Failed to get clock source\n");
+
+ st->fclk = clk_get_rate(st->mclk);
+ if (!ad7192_valid_external_frequency(st->fclk))
+ return dev_err_probe(dev, -EINVAL,
+ "External clock frequency out of bounds\n");
+
+ return 0;
}
static int ad7192_setup(struct iio_dev *indio_dev, struct device *dev)
@@ -1275,21 +1414,9 @@ static int ad7192_probe(struct spi_device *spi)
if (ret)
return ret;
- st->fclk = AD7192_INT_FREQ_MHZ;
-
- st->mclk = devm_clk_get_optional_enabled(dev, "mclk");
- if (IS_ERR(st->mclk))
- return PTR_ERR(st->mclk);
-
- st->clock_sel = ad7192_clock_select(st);
-
- if (st->clock_sel == AD7192_CLK_EXT_MCLK1_2 ||
- st->clock_sel == AD7192_CLK_EXT_MCLK2) {
- st->fclk = clk_get_rate(st->mclk);
- if (!ad7192_valid_external_frequency(st->fclk))
- return dev_err_probe(dev, -EINVAL,
- "External clock frequency out of bounds\n");
- }
+ ret = ad7192_clock_setup(st);
+ if (ret)
+ return ret;
ret = ad7192_setup(indio_dev, dev);
if (ret)
@@ -1304,7 +1431,7 @@ static const struct of_device_id ad7192_of_match[] = {
{ .compatible = "adi,ad7193", .data = &ad7192_chip_info_tbl[ID_AD7193] },
{ .compatible = "adi,ad7194", .data = &ad7192_chip_info_tbl[ID_AD7194] },
{ .compatible = "adi,ad7195", .data = &ad7192_chip_info_tbl[ID_AD7195] },
- {}
+ { }
};
MODULE_DEVICE_TABLE(of, ad7192_of_match);
@@ -1314,7 +1441,7 @@ static const struct spi_device_id ad7192_ids[] = {
{ "ad7193", (kernel_ulong_t)&ad7192_chip_info_tbl[ID_AD7193] },
{ "ad7194", (kernel_ulong_t)&ad7192_chip_info_tbl[ID_AD7194] },
{ "ad7195", (kernel_ulong_t)&ad7192_chip_info_tbl[ID_AD7195] },
- {}
+ { }
};
MODULE_DEVICE_TABLE(spi, ad7192_ids);
diff --git a/drivers/iio/adc/ad7266.c b/drivers/iio/adc/ad7266.c
index bdac020045b4..7949b076fb87 100644
--- a/drivers/iio/adc/ad7266.c
+++ b/drivers/iio/adc/ad7266.c
@@ -123,7 +123,8 @@ static int ad7266_update_scan_mode(struct iio_dev *indio_dev,
const unsigned long *scan_mask)
{
struct ad7266_state *st = iio_priv(indio_dev);
- unsigned int nr = find_first_bit(scan_mask, indio_dev->masklength);
+ unsigned int nr = find_first_bit(scan_mask,
+ iio_get_masklength(indio_dev));
ad7266_select_input(st, nr);
@@ -456,8 +457,8 @@ static int ad7266_probe(struct spi_device *spi)
}
static const struct spi_device_id ad7266_id[] = {
- {"ad7265", 0},
- {"ad7266", 0},
+ { "ad7265", 0 },
+ { "ad7266", 0 },
{ }
};
MODULE_DEVICE_TABLE(spi, ad7266_id);
diff --git a/drivers/iio/adc/ad7280a.c b/drivers/iio/adc/ad7280a.c
index d4a4e15c8244..35aa39fe4bde 100644
--- a/drivers/iio/adc/ad7280a.c
+++ b/drivers/iio/adc/ad7280a.c
@@ -7,6 +7,7 @@
#include <linux/bitfield.h>
#include <linux/bits.h>
+#include <linux/cleanup.h>
#include <linux/crc8.h>
#include <linux/delay.h>
#include <linux/device.h>
@@ -803,16 +804,16 @@ static irqreturn_t ad7280_event_handler(int irq, void *private)
{
struct iio_dev *indio_dev = private;
struct ad7280_state *st = iio_priv(indio_dev);
- unsigned int *channels;
int i, ret;
- channels = kcalloc(st->scan_cnt, sizeof(*channels), GFP_KERNEL);
+ unsigned int *channels __free(kfree) = kcalloc(st->scan_cnt, sizeof(*channels),
+ GFP_KERNEL);
if (!channels)
return IRQ_HANDLED;
ret = ad7280_read_all_channels(st, st->scan_cnt, channels);
if (ret < 0)
- goto out;
+ return IRQ_HANDLED;
for (i = 0; i < st->scan_cnt; i++) {
unsigned int val;
@@ -852,9 +853,6 @@ static irqreturn_t ad7280_event_handler(int irq, void *private)
}
}
-out:
- kfree(channels);
-
return IRQ_HANDLED;
}
@@ -1092,8 +1090,8 @@ static int ad7280_probe(struct spi_device *spi)
}
static const struct spi_device_id ad7280_id[] = {
- {"ad7280a", 0},
- {}
+ { "ad7280a", 0 },
+ { }
};
MODULE_DEVICE_TABLE(spi, ad7280_id);
diff --git a/drivers/iio/adc/ad7291.c b/drivers/iio/adc/ad7291.c
index b59b2a51623c..4c7f887adbbf 100644
--- a/drivers/iio/adc/ad7291.c
+++ b/drivers/iio/adc/ad7291.c
@@ -537,14 +537,14 @@ static int ad7291_probe(struct i2c_client *client)
static const struct i2c_device_id ad7291_id[] = {
{ "ad7291" },
- {}
+ { }
};
MODULE_DEVICE_TABLE(i2c, ad7291_id);
static const struct of_device_id ad7291_of_match[] = {
{ .compatible = "adi,ad7291" },
- {}
+ { }
};
MODULE_DEVICE_TABLE(of, ad7291_of_match);
diff --git a/drivers/iio/adc/ad7292.c b/drivers/iio/adc/ad7292.c
index ede80f380911..a398973f313d 100644
--- a/drivers/iio/adc/ad7292.c
+++ b/drivers/iio/adc/ad7292.c
@@ -301,13 +301,13 @@ static int ad7292_probe(struct spi_device *spi)
static const struct spi_device_id ad7292_id_table[] = {
{ "ad7292", 0 },
- {}
+ { }
};
MODULE_DEVICE_TABLE(spi, ad7292_id_table);
static const struct of_device_id ad7292_of_match[] = {
{ .compatible = "adi,ad7292" },
- { },
+ { }
};
MODULE_DEVICE_TABLE(of, ad7292_of_match);
diff --git a/drivers/iio/adc/ad7298.c b/drivers/iio/adc/ad7298.c
index c0430f71f592..b35bd4d9ef81 100644
--- a/drivers/iio/adc/ad7298.c
+++ b/drivers/iio/adc/ad7298.c
@@ -109,7 +109,8 @@ static int ad7298_update_scan_mode(struct iio_dev *indio_dev,
int scan_count;
/* Now compute overall size */
- scan_count = bitmap_weight(active_scan_mask, indio_dev->masklength);
+ scan_count = bitmap_weight(active_scan_mask,
+ iio_get_masklength(indio_dev));
command = AD7298_WRITE | st->ext_ref;
@@ -354,8 +355,8 @@ static const struct acpi_device_id ad7298_acpi_ids[] = {
MODULE_DEVICE_TABLE(acpi, ad7298_acpi_ids);
static const struct spi_device_id ad7298_id[] = {
- {"ad7298", 0},
- {}
+ { "ad7298", 0 },
+ { }
};
MODULE_DEVICE_TABLE(spi, ad7298_id);
diff --git a/drivers/iio/adc/ad7380.c b/drivers/iio/adc/ad7380.c
index 7568cd0a2b32..e8bddfb0d07d 100644
--- a/drivers/iio/adc/ad7380.c
+++ b/drivers/iio/adc/ad7380.c
@@ -8,9 +8,11 @@
* Datasheets of supported parts:
* ad7380/1 : https://www.analog.com/media/en/technical-documentation/data-sheets/AD7380-7381.pdf
* ad7383/4 : https://www.analog.com/media/en/technical-documentation/data-sheets/ad7383-7384.pdf
+ * ad7386/7/8 : https://www.analog.com/media/en/technical-documentation/data-sheets/AD7386-7387-7388.pdf
* ad7380-4 : https://www.analog.com/media/en/technical-documentation/data-sheets/ad7380-4.pdf
* ad7381-4 : https://www.analog.com/media/en/technical-documentation/data-sheets/ad7381-4.pdf
* ad7383/4-4 : https://www.analog.com/media/en/technical-documentation/data-sheets/ad7383-4-ad7384-4.pdf
+ * ad7386/7/8-4 : https://www.analog.com/media/en/technical-documentation/data-sheets/ad7386-4-7387-4-7388-4.pdf
*/
#include <linux/align.h>
@@ -31,7 +33,7 @@
#include <linux/iio/trigger_consumer.h>
#include <linux/iio/triggered_buffer.h>
-#define MAX_NUM_CHANNELS 4
+#define MAX_NUM_CHANNELS 8
/* 2.5V internal reference voltage */
#define AD7380_INTERNAL_REF_MV 2500
@@ -49,6 +51,8 @@
#define AD7380_REG_ADDR_ALERT_LOW_TH 0x4
#define AD7380_REG_ADDR_ALERT_HIGH_TH 0x5
+#define AD7380_CONFIG1_CH BIT(11)
+#define AD7380_CONFIG1_SEQ BIT(10)
#define AD7380_CONFIG1_OS_MODE BIT(9)
#define AD7380_CONFIG1_OSR GENMASK(8, 6)
#define AD7380_CONFIG1_CRC_W BIT(5)
@@ -80,6 +84,8 @@ struct ad7380_chip_info {
const char *name;
const struct iio_chan_spec *channels;
unsigned int num_channels;
+ unsigned int num_simult_channels;
+ bool has_mux;
const char * const *vcm_supplies;
unsigned int num_vcm_supplies;
const unsigned long *available_scan_masks;
@@ -91,82 +97,151 @@ enum {
AD7380_SCAN_TYPE_RESOLUTION_BOOST,
};
-/* Extended scan types for 14-bit chips. */
-static const struct iio_scan_type ad7380_scan_type_14[] = {
+/* Extended scan types for 12-bit unsigned chips. */
+static const struct iio_scan_type ad7380_scan_type_12_u[] = {
+ [AD7380_SCAN_TYPE_NORMAL] = {
+ .sign = 'u',
+ .realbits = 12,
+ .storagebits = 16,
+ .endianness = IIO_CPU,
+ },
+ [AD7380_SCAN_TYPE_RESOLUTION_BOOST] = {
+ .sign = 'u',
+ .realbits = 14,
+ .storagebits = 16,
+ .endianness = IIO_CPU,
+ },
+};
+
+/* Extended scan types for 14-bit signed chips. */
+static const struct iio_scan_type ad7380_scan_type_14_s[] = {
[AD7380_SCAN_TYPE_NORMAL] = {
.sign = 's',
.realbits = 14,
.storagebits = 16,
- .endianness = IIO_CPU
+ .endianness = IIO_CPU,
},
[AD7380_SCAN_TYPE_RESOLUTION_BOOST] = {
.sign = 's',
.realbits = 16,
.storagebits = 16,
- .endianness = IIO_CPU
+ .endianness = IIO_CPU,
},
};
-/* Extended scan types for 16-bit chips. */
-static const struct iio_scan_type ad7380_scan_type_16[] = {
+/* Extended scan types for 14-bit unsigned chips. */
+static const struct iio_scan_type ad7380_scan_type_14_u[] = {
+ [AD7380_SCAN_TYPE_NORMAL] = {
+ .sign = 'u',
+ .realbits = 14,
+ .storagebits = 16,
+ .endianness = IIO_CPU,
+ },
+ [AD7380_SCAN_TYPE_RESOLUTION_BOOST] = {
+ .sign = 'u',
+ .realbits = 16,
+ .storagebits = 16,
+ .endianness = IIO_CPU,
+ },
+};
+
+/* Extended scan types for 16-bit signed_chips. */
+static const struct iio_scan_type ad7380_scan_type_16_s[] = {
[AD7380_SCAN_TYPE_NORMAL] = {
.sign = 's',
.realbits = 16,
.storagebits = 16,
- .endianness = IIO_CPU
+ .endianness = IIO_CPU,
},
[AD7380_SCAN_TYPE_RESOLUTION_BOOST] = {
.sign = 's',
.realbits = 18,
.storagebits = 32,
- .endianness = IIO_CPU
+ .endianness = IIO_CPU,
+ },
+};
+
+/* Extended scan types for 16-bit unsigned chips. */
+static const struct iio_scan_type ad7380_scan_type_16_u[] = {
+ [AD7380_SCAN_TYPE_NORMAL] = {
+ .sign = 'u',
+ .realbits = 16,
+ .storagebits = 16,
+ .endianness = IIO_CPU,
+ },
+ [AD7380_SCAN_TYPE_RESOLUTION_BOOST] = {
+ .sign = 'u',
+ .realbits = 18,
+ .storagebits = 32,
+ .endianness = IIO_CPU,
},
};
-#define AD7380_CHANNEL(index, bits, diff) { \
- .type = IIO_VOLTAGE, \
- .info_mask_separate = BIT(IIO_CHAN_INFO_RAW) | \
- ((diff) ? 0 : BIT(IIO_CHAN_INFO_OFFSET)), \
- .info_mask_shared_by_type = BIT(IIO_CHAN_INFO_SCALE) | \
- BIT(IIO_CHAN_INFO_OVERSAMPLING_RATIO), \
- .info_mask_shared_by_type_available = \
- BIT(IIO_CHAN_INFO_OVERSAMPLING_RATIO), \
- .indexed = 1, \
- .differential = (diff), \
- .channel = (diff) ? (2 * (index)) : (index), \
- .channel2 = (diff) ? (2 * (index) + 1) : 0, \
- .scan_index = (index), \
- .has_ext_scan_type = 1, \
- .ext_scan_type = ad7380_scan_type_##bits, \
- .num_ext_scan_type = ARRAY_SIZE(ad7380_scan_type_##bits),\
+#define AD7380_CHANNEL(index, bits, diff, sign) { \
+ .type = IIO_VOLTAGE, \
+ .info_mask_separate = BIT(IIO_CHAN_INFO_RAW) | \
+ ((diff) ? 0 : BIT(IIO_CHAN_INFO_OFFSET)), \
+ .info_mask_shared_by_type = BIT(IIO_CHAN_INFO_SCALE) | \
+ BIT(IIO_CHAN_INFO_OVERSAMPLING_RATIO), \
+ .info_mask_shared_by_type_available = \
+ BIT(IIO_CHAN_INFO_OVERSAMPLING_RATIO), \
+ .indexed = 1, \
+ .differential = (diff), \
+ .channel = (diff) ? (2 * (index)) : (index), \
+ .channel2 = (diff) ? (2 * (index) + 1) : 0, \
+ .scan_index = (index), \
+ .has_ext_scan_type = 1, \
+ .ext_scan_type = ad7380_scan_type_##bits##_##sign, \
+ .num_ext_scan_type = ARRAY_SIZE(ad7380_scan_type_##bits##_##sign), \
}
-#define DEFINE_AD7380_2_CHANNEL(name, bits, diff) \
+#define DEFINE_AD7380_2_CHANNEL(name, bits, diff, sign) \
static const struct iio_chan_spec name[] = { \
- AD7380_CHANNEL(0, bits, diff), \
- AD7380_CHANNEL(1, bits, diff), \
+ AD7380_CHANNEL(0, bits, diff, sign), \
+ AD7380_CHANNEL(1, bits, diff, sign), \
IIO_CHAN_SOFT_TIMESTAMP(2), \
}
-#define DEFINE_AD7380_4_CHANNEL(name, bits, diff) \
+#define DEFINE_AD7380_4_CHANNEL(name, bits, diff, sign) \
static const struct iio_chan_spec name[] = { \
- AD7380_CHANNEL(0, bits, diff), \
- AD7380_CHANNEL(1, bits, diff), \
- AD7380_CHANNEL(2, bits, diff), \
- AD7380_CHANNEL(3, bits, diff), \
+ AD7380_CHANNEL(0, bits, diff, sign), \
+ AD7380_CHANNEL(1, bits, diff, sign), \
+ AD7380_CHANNEL(2, bits, diff, sign), \
+ AD7380_CHANNEL(3, bits, diff, sign), \
IIO_CHAN_SOFT_TIMESTAMP(4), \
}
+#define DEFINE_AD7380_8_CHANNEL(name, bits, diff, sign) \
+static const struct iio_chan_spec name[] = { \
+ AD7380_CHANNEL(0, bits, diff, sign), \
+ AD7380_CHANNEL(1, bits, diff, sign), \
+ AD7380_CHANNEL(2, bits, diff, sign), \
+ AD7380_CHANNEL(3, bits, diff, sign), \
+ AD7380_CHANNEL(4, bits, diff, sign), \
+ AD7380_CHANNEL(5, bits, diff, sign), \
+ AD7380_CHANNEL(6, bits, diff, sign), \
+ AD7380_CHANNEL(7, bits, diff, sign), \
+ IIO_CHAN_SOFT_TIMESTAMP(8), \
+}
+
/* fully differential */
-DEFINE_AD7380_2_CHANNEL(ad7380_channels, 16, 1);
-DEFINE_AD7380_2_CHANNEL(ad7381_channels, 14, 1);
-DEFINE_AD7380_4_CHANNEL(ad7380_4_channels, 16, 1);
-DEFINE_AD7380_4_CHANNEL(ad7381_4_channels, 14, 1);
+DEFINE_AD7380_2_CHANNEL(ad7380_channels, 16, 1, s);
+DEFINE_AD7380_2_CHANNEL(ad7381_channels, 14, 1, s);
+DEFINE_AD7380_4_CHANNEL(ad7380_4_channels, 16, 1, s);
+DEFINE_AD7380_4_CHANNEL(ad7381_4_channels, 14, 1, s);
/* pseudo differential */
-DEFINE_AD7380_2_CHANNEL(ad7383_channels, 16, 0);
-DEFINE_AD7380_2_CHANNEL(ad7384_channels, 14, 0);
-DEFINE_AD7380_4_CHANNEL(ad7383_4_channels, 16, 0);
-DEFINE_AD7380_4_CHANNEL(ad7384_4_channels, 14, 0);
+DEFINE_AD7380_2_CHANNEL(ad7383_channels, 16, 0, s);
+DEFINE_AD7380_2_CHANNEL(ad7384_channels, 14, 0, s);
+DEFINE_AD7380_4_CHANNEL(ad7383_4_channels, 16, 0, s);
+DEFINE_AD7380_4_CHANNEL(ad7384_4_channels, 14, 0, s);
+
+/* Single ended */
+DEFINE_AD7380_4_CHANNEL(ad7386_channels, 16, 0, u);
+DEFINE_AD7380_4_CHANNEL(ad7387_channels, 14, 0, u);
+DEFINE_AD7380_4_CHANNEL(ad7388_channels, 12, 0, u);
+DEFINE_AD7380_8_CHANNEL(ad7386_4_channels, 16, 0, u);
+DEFINE_AD7380_8_CHANNEL(ad7387_4_channels, 14, 0, u);
+DEFINE_AD7380_8_CHANNEL(ad7388_4_channels, 12, 0, u);
static const char * const ad7380_2_channel_vcm_supplies[] = {
"aina", "ainb",
@@ -187,6 +262,60 @@ static const unsigned long ad7380_4_channel_scan_masks[] = {
0
};
+/*
+ * Single ended parts have a 2:1 multiplexer in front of each ADC.
+ *
+ * From an IIO point of view, all inputs are exported, i.e ad7386/7/8
+ * export 4 channels and ad7386-4/7-4/8-4 export 8 channels.
+ *
+ * Inputs AinX0 of multiplexers correspond to the first half of IIO channels
+ * (i.e 0-1 or 0-3) and inputs AinX1 correspond to second half (i.e 2-3 or
+ * 4-7). Example for AD7386/7/8 (2 channels parts):
+ *
+ * IIO | AD7386/7/8
+ * | +----------------------------
+ * | | _____ ______
+ * | | | | | |
+ * voltage0 | AinA0 --|--->| | | |
+ * | | | mux |----->| ADCA |---
+ * voltage2 | AinA1 --|--->| | | |
+ * | | |_____| |_____ |
+ * | | _____ ______
+ * | | | | | |
+ * voltage1 | AinB0 --|--->| | | |
+ * | | | mux |----->| ADCB |---
+ * voltage3 | AinB1 --|--->| | | |
+ * | | |_____| |______|
+ * | |
+ * | +----------------------------
+ *
+ * Since this is simultaneous sampling for AinX0 OR AinX1 we have two separate
+ * scan masks.
+ * When sequencer mode is enabled, chip automatically cycles through
+ * AinX0 and AinX1 channels. From an IIO point of view, we ca enable all
+ * channels, at the cost of an extra read, thus dividing the maximum rate by
+ * two.
+ */
+enum {
+ AD7380_SCAN_MASK_CH_0,
+ AD7380_SCAN_MASK_CH_1,
+ AD7380_SCAN_MASK_SEQ,
+};
+
+static const unsigned long ad7380_2x2_channel_scan_masks[] = {
+ [AD7380_SCAN_MASK_CH_0] = GENMASK(1, 0),
+ [AD7380_SCAN_MASK_CH_1] = GENMASK(3, 2),
+ [AD7380_SCAN_MASK_SEQ] = GENMASK(3, 0),
+ 0
+};
+
+static const unsigned long ad7380_2x4_channel_scan_masks[] = {
+ [AD7380_SCAN_MASK_CH_0] = GENMASK(3, 0),
+ [AD7380_SCAN_MASK_CH_1] = GENMASK(7, 4),
+ [AD7380_SCAN_MASK_SEQ] = GENMASK(7, 0),
+ 0
+};
+
static const struct ad7380_timing_specs ad7380_timing = {
.t_csh_ns = 10,
};
@@ -208,6 +337,7 @@ static const struct ad7380_chip_info ad7380_chip_info = {
.name = "ad7380",
.channels = ad7380_channels,
.num_channels = ARRAY_SIZE(ad7380_channels),
+ .num_simult_channels = 2,
.available_scan_masks = ad7380_2_channel_scan_masks,
.timing_specs = &ad7380_timing,
};
@@ -216,6 +346,7 @@ static const struct ad7380_chip_info ad7381_chip_info = {
.name = "ad7381",
.channels = ad7381_channels,
.num_channels = ARRAY_SIZE(ad7381_channels),
+ .num_simult_channels = 2,
.available_scan_masks = ad7380_2_channel_scan_masks,
.timing_specs = &ad7380_timing,
};
@@ -224,6 +355,7 @@ static const struct ad7380_chip_info ad7383_chip_info = {
.name = "ad7383",
.channels = ad7383_channels,
.num_channels = ARRAY_SIZE(ad7383_channels),
+ .num_simult_channels = 2,
.vcm_supplies = ad7380_2_channel_vcm_supplies,
.num_vcm_supplies = ARRAY_SIZE(ad7380_2_channel_vcm_supplies),
.available_scan_masks = ad7380_2_channel_scan_masks,
@@ -234,16 +366,48 @@ static const struct ad7380_chip_info ad7384_chip_info = {
.name = "ad7384",
.channels = ad7384_channels,
.num_channels = ARRAY_SIZE(ad7384_channels),
+ .num_simult_channels = 2,
.vcm_supplies = ad7380_2_channel_vcm_supplies,
.num_vcm_supplies = ARRAY_SIZE(ad7380_2_channel_vcm_supplies),
.available_scan_masks = ad7380_2_channel_scan_masks,
.timing_specs = &ad7380_timing,
};
+static const struct ad7380_chip_info ad7386_chip_info = {
+ .name = "ad7386",
+ .channels = ad7386_channels,
+ .num_channels = ARRAY_SIZE(ad7386_channels),
+ .num_simult_channels = 2,
+ .has_mux = true,
+ .available_scan_masks = ad7380_2x2_channel_scan_masks,
+ .timing_specs = &ad7380_timing,
+};
+
+static const struct ad7380_chip_info ad7387_chip_info = {
+ .name = "ad7387",
+ .channels = ad7387_channels,
+ .num_channels = ARRAY_SIZE(ad7387_channels),
+ .num_simult_channels = 2,
+ .has_mux = true,
+ .available_scan_masks = ad7380_2x2_channel_scan_masks,
+ .timing_specs = &ad7380_timing,
+};
+
+static const struct ad7380_chip_info ad7388_chip_info = {
+ .name = "ad7388",
+ .channels = ad7388_channels,
+ .num_channels = ARRAY_SIZE(ad7388_channels),
+ .num_simult_channels = 2,
+ .has_mux = true,
+ .available_scan_masks = ad7380_2x2_channel_scan_masks,
+ .timing_specs = &ad7380_timing,
+};
+
static const struct ad7380_chip_info ad7380_4_chip_info = {
.name = "ad7380-4",
.channels = ad7380_4_channels,
.num_channels = ARRAY_SIZE(ad7380_4_channels),
+ .num_simult_channels = 4,
.available_scan_masks = ad7380_4_channel_scan_masks,
.timing_specs = &ad7380_4_timing,
};
@@ -252,6 +416,7 @@ static const struct ad7380_chip_info ad7381_4_chip_info = {
.name = "ad7381-4",
.channels = ad7381_4_channels,
.num_channels = ARRAY_SIZE(ad7381_4_channels),
+ .num_simult_channels = 4,
.available_scan_masks = ad7380_4_channel_scan_masks,
.timing_specs = &ad7380_4_timing,
};
@@ -260,6 +425,7 @@ static const struct ad7380_chip_info ad7383_4_chip_info = {
.name = "ad7383-4",
.channels = ad7383_4_channels,
.num_channels = ARRAY_SIZE(ad7383_4_channels),
+ .num_simult_channels = 4,
.vcm_supplies = ad7380_4_channel_vcm_supplies,
.num_vcm_supplies = ARRAY_SIZE(ad7380_4_channel_vcm_supplies),
.available_scan_masks = ad7380_4_channel_scan_masks,
@@ -270,23 +436,58 @@ static const struct ad7380_chip_info ad7384_4_chip_info = {
.name = "ad7384-4",
.channels = ad7384_4_channels,
.num_channels = ARRAY_SIZE(ad7384_4_channels),
+ .num_simult_channels = 4,
.vcm_supplies = ad7380_4_channel_vcm_supplies,
.num_vcm_supplies = ARRAY_SIZE(ad7380_4_channel_vcm_supplies),
.available_scan_masks = ad7380_4_channel_scan_masks,
.timing_specs = &ad7380_4_timing,
};
+static const struct ad7380_chip_info ad7386_4_chip_info = {
+ .name = "ad7386-4",
+ .channels = ad7386_4_channels,
+ .num_channels = ARRAY_SIZE(ad7386_4_channels),
+ .num_simult_channels = 4,
+ .has_mux = true,
+ .available_scan_masks = ad7380_2x4_channel_scan_masks,
+ .timing_specs = &ad7380_4_timing,
+};
+
+static const struct ad7380_chip_info ad7387_4_chip_info = {
+ .name = "ad7387-4",
+ .channels = ad7387_4_channels,
+ .num_channels = ARRAY_SIZE(ad7387_4_channels),
+ .num_simult_channels = 4,
+ .has_mux = true,
+ .available_scan_masks = ad7380_2x4_channel_scan_masks,
+ .timing_specs = &ad7380_4_timing,
+};
+
+static const struct ad7380_chip_info ad7388_4_chip_info = {
+ .name = "ad7388-4",
+ .channels = ad7388_4_channels,
+ .num_channels = ARRAY_SIZE(ad7388_4_channels),
+ .num_simult_channels = 4,
+ .has_mux = true,
+ .available_scan_masks = ad7380_2x4_channel_scan_masks,
+ .timing_specs = &ad7380_4_timing,
+};
+
struct ad7380_state {
const struct ad7380_chip_info *chip_info;
struct spi_device *spi;
struct regmap *regmap;
unsigned int oversampling_ratio;
bool resolution_boost_enabled;
+ unsigned int ch;
+ bool seq;
unsigned int vref_mv;
unsigned int vcm_mv[MAX_NUM_CHANNELS];
/* xfers, message an buffer for reading sample data */
- struct spi_transfer xfer[2];
- struct spi_message msg;
+ struct spi_transfer normal_xfer[2];
+ struct spi_message normal_msg;
+ struct spi_transfer seq_xfer[4];
+ struct spi_message seq_msg;
/*
* DMA (thus cache coherency maintenance) requires the transfer buffers
* to live in their own cache lines.
@@ -379,6 +580,43 @@ static int ad7380_debugfs_reg_access(struct iio_dev *indio_dev, u32 reg,
unreachable();
}
+/*
+ * When switching channel, the ADC require an additional settling time.
+ * According to the datasheet, data is value on the third CS low. We already
+ * have an extra toggle before each read (either direct reads or buffered reads)
+ * to sample correct data, so we just add a single CS toggle at the end of the
+ * register write.
+ */
+static int ad7380_set_ch(struct ad7380_state *st, unsigned int ch)
+{
+ struct spi_transfer xfer = {
+ .delay = {
+ .value = T_CONVERT_NS,
+ .unit = SPI_DELAY_UNIT_NSECS,
+ }
+ };
+ int ret;
+
+ if (st->ch == ch)
+ return 0;
+
+ ret = regmap_update_bits(st->regmap,
+ AD7380_REG_ADDR_CONFIG1,
+ AD7380_CONFIG1_CH,
+ FIELD_PREP(AD7380_CONFIG1_CH, ch));
+
+ if (ret)
+ return ret;
+
+ st->ch = ch;
+
+ if (st->oversampling_ratio > 1)
+ xfer.delay.value = T_CONVERT_0_NS +
+ T_CONVERT_X_NS * (st->oversampling_ratio - 1);
+
+ return spi_sync_transfer(st->spi, &xfer, 1);
+}
+
/**
* ad7380_update_xfers - update the SPI transfers base on the current scan type
* @st: device instance specific state
@@ -387,33 +625,47 @@ static int ad7380_debugfs_reg_access(struct iio_dev *indio_dev, u32 reg,
static void ad7380_update_xfers(struct ad7380_state *st,
const struct iio_scan_type *scan_type)
{
- /*
- * First xfer only triggers conversion and has to be long enough for
- * all conversions to complete, which can be multiple conversion in the
- * case of oversampling. Technically T_CONVERT_X_NS is lower for some
- * chips, but we use the maximum value for simplicity for now.
- */
- if (st->oversampling_ratio > 1)
- st->xfer[0].delay.value = T_CONVERT_0_NS + T_CONVERT_X_NS *
- (st->oversampling_ratio - 1);
- else
- st->xfer[0].delay.value = T_CONVERT_NS;
-
- st->xfer[0].delay.unit = SPI_DELAY_UNIT_NSECS;
+ struct spi_transfer *xfer = st->seq ? st->seq_xfer : st->normal_xfer;
+ unsigned int t_convert = T_CONVERT_NS;
/*
- * Second xfer reads all channels. Data size depends on if resolution
- * boost is enabled or not.
+ * In the case of oversampling, conversion time is higher than in normal
+ * mode. Technically T_CONVERT_X_NS is lower for some chips, but we use
+ * the maximum value for simplicity for now.
*/
- st->xfer[1].bits_per_word = scan_type->realbits;
- st->xfer[1].len = BITS_TO_BYTES(scan_type->storagebits) *
- (st->chip_info->num_channels - 1);
+ if (st->oversampling_ratio > 1)
+ t_convert = T_CONVERT_0_NS + T_CONVERT_X_NS *
+ (st->oversampling_ratio - 1);
+
+ if (st->seq) {
+ xfer[0].delay.value = xfer[1].delay.value = t_convert;
+ xfer[0].delay.unit = xfer[1].delay.unit = SPI_DELAY_UNIT_NSECS;
+ xfer[2].bits_per_word = xfer[3].bits_per_word =
+ scan_type->realbits;
+ xfer[2].len = xfer[3].len =
+ BITS_TO_BYTES(scan_type->storagebits) *
+ st->chip_info->num_simult_channels;
+ xfer[3].rx_buf = xfer[2].rx_buf + xfer[2].len;
+ /* Additional delay required here when oversampling is enabled */
+ if (st->oversampling_ratio > 1)
+ xfer[2].delay.value = t_convert;
+ else
+ xfer[2].delay.value = 0;
+ xfer[2].delay.unit = SPI_DELAY_UNIT_NSECS;
+ } else {
+ xfer[0].delay.value = t_convert;
+ xfer[0].delay.unit = SPI_DELAY_UNIT_NSECS;
+ xfer[1].bits_per_word = scan_type->realbits;
+ xfer[1].len = BITS_TO_BYTES(scan_type->storagebits) *
+ st->chip_info->num_simult_channels;
+ }
}
static int ad7380_triggered_buffer_preenable(struct iio_dev *indio_dev)
{
struct ad7380_state *st = iio_priv(indio_dev);
const struct iio_scan_type *scan_type;
+ struct spi_message *msg = &st->normal_msg;
/*
* Currently, we always read all channels at the same time. The scan_type
@@ -423,16 +675,63 @@ static int ad7380_triggered_buffer_preenable(struct iio_dev *indio_dev)
if (IS_ERR(scan_type))
return PTR_ERR(scan_type);
+ if (st->chip_info->has_mux) {
+ unsigned int index;
+ int ret;
+
+ /*
+ * Depending on the requested scan_mask and current state,
+ * we need to either change CH bit, or enable sequencer mode
+ * to sample correct data.
+ * Sequencer mode is enabled if active mask corresponds to all
+ * IIO channels enabled. Otherwise, CH bit is set.
+ */
+ ret = iio_active_scan_mask_index(indio_dev);
+ if (ret < 0)
+ return ret;
+
+ index = ret;
+ if (index == AD7380_SCAN_MASK_SEQ) {
+ ret = regmap_update_bits(st->regmap,
+ AD7380_REG_ADDR_CONFIG1,
+ AD7380_CONFIG1_SEQ,
+ FIELD_PREP(AD7380_CONFIG1_SEQ, 1));
+ if (ret)
+ return ret;
+ msg = &st->seq_msg;
+ st->seq = true;
+ } else {
+ ret = ad7380_set_ch(st, index);
+ if (ret)
+ return ret;
+ }
+
+ }
+
ad7380_update_xfers(st, scan_type);
- return spi_optimize_message(st->spi, &st->msg);
+ return spi_optimize_message(st->spi, msg);
}
static int ad7380_triggered_buffer_postdisable(struct iio_dev *indio_dev)
{
struct ad7380_state *st = iio_priv(indio_dev);
+ struct spi_message *msg = &st->normal_msg;
+ int ret;
+
+ if (st->seq) {
+ ret = regmap_update_bits(st->regmap,
+ AD7380_REG_ADDR_CONFIG1,
+ AD7380_CONFIG1_SEQ,
+ FIELD_PREP(AD7380_CONFIG1_SEQ, 0));
+ if (ret)
+ return ret;
+
+ msg = &st->seq_msg;
+ st->seq = false;
+ }
- spi_unoptimize_message(&st->msg);
+ spi_unoptimize_message(msg);
return 0;
}
@@ -447,9 +746,10 @@ static irqreturn_t ad7380_trigger_handler(int irq, void *p)
struct iio_poll_func *pf = p;
struct iio_dev *indio_dev = pf->indio_dev;
struct ad7380_state *st = iio_priv(indio_dev);
+ struct spi_message *msg = st->seq ? &st->seq_msg : &st->normal_msg;
int ret;
- ret = spi_sync(st->spi, &st->msg);
+ ret = spi_sync(st->spi, msg);
if (ret)
goto out;
@@ -465,20 +765,43 @@ out:
static int ad7380_read_direct(struct ad7380_state *st, unsigned int scan_index,
const struct iio_scan_type *scan_type, int *val)
{
+ unsigned int index = scan_index;
int ret;
+ if (st->chip_info->has_mux) {
+ unsigned int ch = 0;
+
+ if (index >= st->chip_info->num_simult_channels) {
+ index -= st->chip_info->num_simult_channels;
+ ch = 1;
+ }
+
+ ret = ad7380_set_ch(st, ch);
+ if (ret)
+ return ret;
+ }
+
ad7380_update_xfers(st, scan_type);
- ret = spi_sync(st->spi, &st->msg);
+ ret = spi_sync(st->spi, &st->normal_msg);
if (ret < 0)
return ret;
- if (scan_type->storagebits > 16)
- *val = sign_extend32(*(u32 *)(st->scan_data + 4 * scan_index),
- scan_type->realbits - 1);
- else
- *val = sign_extend32(*(u16 *)(st->scan_data + 2 * scan_index),
- scan_type->realbits - 1);
+ if (scan_type->storagebits > 16) {
+ if (scan_type->sign == 's')
+ *val = sign_extend32(*(u32 *)(st->scan_data + 4 * index),
+ scan_type->realbits - 1);
+ else
+ *val = *(u32 *)(st->scan_data + 4 * index) &
+ GENMASK(scan_type->realbits - 1, 0);
+ } else {
+ if (scan_type->sign == 's')
+ *val = sign_extend32(*(u16 *)(st->scan_data + 2 * index),
+ scan_type->realbits - 1);
+ else
+ *val = *(u16 *)(st->scan_data + 2 * index) &
+ GENMASK(scan_type->realbits - 1, 0);
+ }
return IIO_VAL_INT;
}
@@ -655,6 +978,8 @@ static int ad7380_init(struct ad7380_state *st, struct regulator *vref)
/* This is the default value after reset. */
st->oversampling_ratio = 1;
+ st->ch = 0;
+ st->seq = false;
/* SPI 1-wire mode */
return regmap_update_bits(st->regmap, AD7380_REG_ADDR_CONFIG2,
@@ -756,21 +1081,45 @@ static int ad7380_probe(struct spi_device *spi)
"failed to allocate register map\n");
/*
- * Setting up a low latency read for getting sample data. Used for both
- * direct read an triggered buffer. Additional fields will be set up in
- * ad7380_update_xfers() based on the current state of the driver at the
- * time of the read.
+ * Setting up xfer structures for both normal and sequence mode. These
+ * struct are used for both direct read and triggered buffer. Additional
+ * fields will be set up in ad7380_update_xfers() based on the current
+ * state of the driver at the time of the read.
+ */
+
+ /*
+ * In normal mode a read is composed of two steps:
+ * - first, toggle CS (no data xfer) to trigger a conversion
+ * - then, read data
*/
+ st->normal_xfer[0].cs_change = 1;
+ st->normal_xfer[0].cs_change_delay.value = st->chip_info->timing_specs->t_csh_ns;
+ st->normal_xfer[0].cs_change_delay.unit = SPI_DELAY_UNIT_NSECS;
+ st->normal_xfer[1].rx_buf = st->scan_data;
- /* toggle CS (no data xfer) to trigger a conversion */
- st->xfer[0].cs_change = 1;
- st->xfer[0].cs_change_delay.value = st->chip_info->timing_specs->t_csh_ns;
- st->xfer[0].cs_change_delay.unit = SPI_DELAY_UNIT_NSECS;
+ spi_message_init_with_transfers(&st->normal_msg, st->normal_xfer,
+ ARRAY_SIZE(st->normal_xfer));
+ /*
+ * In sequencer mode a read is composed of four steps:
+ * - CS toggle (no data xfer) to get the right point in the sequence
+ * - CS toggle (no data xfer) to trigger a conversion of AinX0 and
+ * acquisition of AinX1
+ * - 2 data reads, to read AinX0 and AinX1
+ */
+ st->seq_xfer[0].cs_change = 1;
+ st->seq_xfer[0].cs_change_delay.value = st->chip_info->timing_specs->t_csh_ns;
+ st->seq_xfer[0].cs_change_delay.unit = SPI_DELAY_UNIT_NSECS;
+ st->seq_xfer[1].cs_change = 1;
+ st->seq_xfer[1].cs_change_delay.value = st->chip_info->timing_specs->t_csh_ns;
+ st->seq_xfer[1].cs_change_delay.unit = SPI_DELAY_UNIT_NSECS;
- /* then do a second xfer to read the data */
- st->xfer[1].rx_buf = st->scan_data;
+ st->seq_xfer[2].rx_buf = st->scan_data;
+ st->seq_xfer[2].cs_change = 1;
+ st->seq_xfer[2].cs_change_delay.value = st->chip_info->timing_specs->t_csh_ns;
+ st->seq_xfer[2].cs_change_delay.unit = SPI_DELAY_UNIT_NSECS;
- spi_message_init_with_transfers(&st->msg, st->xfer, ARRAY_SIZE(st->xfer));
+ spi_message_init_with_transfers(&st->seq_msg, st->seq_xfer,
+ ARRAY_SIZE(st->seq_xfer));
indio_dev->channels = st->chip_info->channels;
indio_dev->num_channels = st->chip_info->num_channels;
@@ -798,10 +1147,16 @@ static const struct of_device_id ad7380_of_match_table[] = {
{ .compatible = "adi,ad7381", .data = &ad7381_chip_info },
{ .compatible = "adi,ad7383", .data = &ad7383_chip_info },
{ .compatible = "adi,ad7384", .data = &ad7384_chip_info },
+ { .compatible = "adi,ad7386", .data = &ad7386_chip_info },
+ { .compatible = "adi,ad7387", .data = &ad7387_chip_info },
+ { .compatible = "adi,ad7388", .data = &ad7388_chip_info },
{ .compatible = "adi,ad7380-4", .data = &ad7380_4_chip_info },
{ .compatible = "adi,ad7381-4", .data = &ad7381_4_chip_info },
{ .compatible = "adi,ad7383-4", .data = &ad7383_4_chip_info },
{ .compatible = "adi,ad7384-4", .data = &ad7384_4_chip_info },
+ { .compatible = "adi,ad7386-4", .data = &ad7386_4_chip_info },
+ { .compatible = "adi,ad7387-4", .data = &ad7387_4_chip_info },
+ { .compatible = "adi,ad7388-4", .data = &ad7388_4_chip_info },
{ }
};
@@ -810,10 +1165,16 @@ static const struct spi_device_id ad7380_id_table[] = {
{ "ad7381", (kernel_ulong_t)&ad7381_chip_info },
{ "ad7383", (kernel_ulong_t)&ad7383_chip_info },
{ "ad7384", (kernel_ulong_t)&ad7384_chip_info },
+ { "ad7386", (kernel_ulong_t)&ad7386_chip_info },
+ { "ad7387", (kernel_ulong_t)&ad7387_chip_info },
+ { "ad7388", (kernel_ulong_t)&ad7388_chip_info },
{ "ad7380-4", (kernel_ulong_t)&ad7380_4_chip_info },
{ "ad7381-4", (kernel_ulong_t)&ad7381_4_chip_info },
{ "ad7383-4", (kernel_ulong_t)&ad7383_4_chip_info },
{ "ad7384-4", (kernel_ulong_t)&ad7384_4_chip_info },
+ { "ad7386-4", (kernel_ulong_t)&ad7386_4_chip_info },
+ { "ad7387-4", (kernel_ulong_t)&ad7387_4_chip_info },
+ { "ad7388-4", (kernel_ulong_t)&ad7388_4_chip_info },
{ }
};
MODULE_DEVICE_TABLE(spi, ad7380_id_table);
diff --git a/drivers/iio/adc/ad7476.c b/drivers/iio/adc/ad7476.c
index 80aebed47d1f..aeb8e383fe71 100644
--- a/drivers/iio/adc/ad7476.c
+++ b/drivers/iio/adc/ad7476.c
@@ -409,35 +409,35 @@ static int ad7476_probe(struct spi_device *spi)
}
static const struct spi_device_id ad7476_id[] = {
- {"ad7091", ID_AD7091},
- {"ad7091r", ID_AD7091R},
- {"ad7273", ID_AD7273},
- {"ad7274", ID_AD7274},
- {"ad7276", ID_AD7276},
- {"ad7277", ID_AD7277},
- {"ad7278", ID_AD7278},
- {"ad7466", ID_AD7466},
- {"ad7467", ID_AD7467},
- {"ad7468", ID_AD7468},
- {"ad7475", ID_AD7475},
- {"ad7476", ID_AD7466},
- {"ad7476a", ID_AD7466},
- {"ad7477", ID_AD7467},
- {"ad7477a", ID_AD7467},
- {"ad7478", ID_AD7468},
- {"ad7478a", ID_AD7468},
- {"ad7495", ID_AD7495},
- {"ad7910", ID_AD7467},
- {"ad7920", ID_AD7466},
- {"ad7940", ID_AD7940},
- {"adc081s", ID_ADC081S},
- {"adc101s", ID_ADC101S},
- {"adc121s", ID_ADC121S},
- {"ads7866", ID_ADS7866},
- {"ads7867", ID_ADS7867},
- {"ads7868", ID_ADS7868},
- {"ltc2314-14", ID_LTC2314_14},
- {}
+ { "ad7091", ID_AD7091 },
+ { "ad7091r", ID_AD7091R },
+ { "ad7273", ID_AD7273 },
+ { "ad7274", ID_AD7274 },
+ { "ad7276", ID_AD7276},
+ { "ad7277", ID_AD7277 },
+ { "ad7278", ID_AD7278 },
+ { "ad7466", ID_AD7466 },
+ { "ad7467", ID_AD7467 },
+ { "ad7468", ID_AD7468 },
+ { "ad7475", ID_AD7475 },
+ { "ad7476", ID_AD7466 },
+ { "ad7476a", ID_AD7466 },
+ { "ad7477", ID_AD7467 },
+ { "ad7477a", ID_AD7467 },
+ { "ad7478", ID_AD7468 },
+ { "ad7478a", ID_AD7468 },
+ { "ad7495", ID_AD7495 },
+ { "ad7910", ID_AD7467 },
+ { "ad7920", ID_AD7466 },
+ { "ad7940", ID_AD7940 },
+ { "adc081s", ID_ADC081S },
+ { "adc101s", ID_ADC101S },
+ { "adc121s", ID_ADC121S },
+ { "ads7866", ID_ADS7866 },
+ { "ads7867", ID_ADS7867 },
+ { "ads7868", ID_ADS7868 },
+ { "ltc2314-14", ID_LTC2314_14 },
+ { }
};
MODULE_DEVICE_TABLE(spi, ad7476_id);
diff --git a/drivers/iio/adc/ad7606.c b/drivers/iio/adc/ad7606.c
index c321c6ef48df..9b457472d49c 100644
--- a/drivers/iio/adc/ad7606.c
+++ b/drivers/iio/adc/ad7606.c
@@ -70,19 +70,17 @@ static int ad7606_reg_access(struct iio_dev *indio_dev,
struct ad7606_state *st = iio_priv(indio_dev);
int ret;
- mutex_lock(&st->lock);
+ guard(mutex)(&st->lock);
+
if (readval) {
ret = st->bops->reg_read(st, reg);
if (ret < 0)
- goto err_unlock;
+ return ret;
*readval = ret;
- ret = 0;
+ return 0;
} else {
- ret = st->bops->reg_write(st, reg, writeval);
+ return st->bops->reg_write(st, reg, writeval);
}
-err_unlock:
- mutex_unlock(&st->lock);
- return ret;
}
static int ad7606_read_samples(struct ad7606_state *st)
@@ -100,19 +98,19 @@ static irqreturn_t ad7606_trigger_handler(int irq, void *p)
struct ad7606_state *st = iio_priv(indio_dev);
int ret;
- mutex_lock(&st->lock);
+ guard(mutex)(&st->lock);
ret = ad7606_read_samples(st);
- if (ret == 0)
- iio_push_to_buffers_with_timestamp(indio_dev, st->data,
- iio_get_time_ns(indio_dev));
+ if (ret)
+ goto error_ret;
+ iio_push_to_buffers_with_timestamp(indio_dev, st->data,
+ iio_get_time_ns(indio_dev));
+error_ret:
iio_trigger_notify_done(indio_dev->trig);
/* The rising edge of the CONVST signal starts a new conversion. */
gpiod_set_value(st->gpio_convst, 1);
- mutex_unlock(&st->lock);
-
return IRQ_HANDLED;
}
@@ -212,9 +210,9 @@ static int ad7606_write_os_hw(struct iio_dev *indio_dev, int val)
struct ad7606_state *st = iio_priv(indio_dev);
DECLARE_BITMAP(values, 3);
- values[0] = val;
+ values[0] = val & GENMASK(2, 0);
- gpiod_set_array_value(ARRAY_SIZE(values), st->gpio_os->desc,
+ gpiod_set_array_value(st->gpio_os->ndescs, st->gpio_os->desc,
st->gpio_os->info, values);
/* AD7616 requires a reset to update value */
@@ -233,19 +231,17 @@ static int ad7606_write_raw(struct iio_dev *indio_dev,
struct ad7606_state *st = iio_priv(indio_dev);
int i, ret, ch = 0;
+ guard(mutex)(&st->lock);
+
switch (mask) {
case IIO_CHAN_INFO_SCALE:
- mutex_lock(&st->lock);
i = find_closest(val2, st->scale_avail, st->num_scales);
if (st->sw_mode_en)
ch = chan->address;
ret = st->write_scale(indio_dev, ch, i);
- if (ret < 0) {
- mutex_unlock(&st->lock);
+ if (ret < 0)
return ret;
- }
st->range[ch] = i;
- mutex_unlock(&st->lock);
return 0;
case IIO_CHAN_INFO_OVERSAMPLING_RATIO:
@@ -253,14 +249,9 @@ static int ad7606_write_raw(struct iio_dev *indio_dev,
return -EINVAL;
i = find_closest(val, st->oversampling_avail,
st->num_os_ratios);
- mutex_lock(&st->lock);
ret = st->write_os(indio_dev, i);
- if (ret < 0) {
- mutex_unlock(&st->lock);
+ if (ret < 0)
return ret;
- }
- st->oversampling = st->oversampling_avail[i];
- mutex_unlock(&st->lock);
return 0;
default:
@@ -419,7 +410,7 @@ static int ad7606_request_gpios(struct ad7606_state *st)
return PTR_ERR(st->gpio_range);
st->gpio_standby = devm_gpiod_get_optional(dev, "standby",
- GPIOD_OUT_HIGH);
+ GPIOD_OUT_LOW);
if (IS_ERR(st->gpio_standby))
return PTR_ERR(st->gpio_standby);
@@ -662,7 +653,7 @@ static int ad7606_suspend(struct device *dev)
if (st->gpio_standby) {
gpiod_set_value(st->gpio_range, 1);
- gpiod_set_value(st->gpio_standby, 0);
+ gpiod_set_value(st->gpio_standby, 1);
}
return 0;
diff --git a/drivers/iio/adc/ad7606_par.c b/drivers/iio/adc/ad7606_par.c
index 6bc587b20f05..02d8c309304e 100644
--- a/drivers/iio/adc/ad7606_par.c
+++ b/drivers/iio/adc/ad7606_par.c
@@ -125,7 +125,7 @@ static const struct of_device_id ad7606_of_match[] = {
{ .compatible = "adi,ad7606-4" },
{ .compatible = "adi,ad7606-6" },
{ .compatible = "adi,ad7606-8" },
- { },
+ { }
};
MODULE_DEVICE_TABLE(of, ad7606_of_match);
diff --git a/drivers/iio/adc/ad7606_spi.c b/drivers/iio/adc/ad7606_spi.c
index 263a778bcf25..62ec12195307 100644
--- a/drivers/iio/adc/ad7606_spi.c
+++ b/drivers/iio/adc/ad7606_spi.c
@@ -249,8 +249,9 @@ static int ad7616_sw_mode_config(struct iio_dev *indio_dev)
static int ad7606B_sw_mode_config(struct iio_dev *indio_dev)
{
struct ad7606_state *st = iio_priv(indio_dev);
- unsigned long os[3] = {1};
+ DECLARE_BITMAP(os, 3);
+ bitmap_fill(os, 3);
/*
* Software mode is enabled when all three oversampling
* pins are set to high. If oversampling gpios are defined
@@ -258,7 +259,7 @@ static int ad7606B_sw_mode_config(struct iio_dev *indio_dev)
* otherwise, they must be hardwired to VDD
*/
if (st->gpio_os) {
- gpiod_set_array_value(ARRAY_SIZE(os),
+ gpiod_set_array_value(st->gpio_os->ndescs,
st->gpio_os->desc, st->gpio_os->info, os);
}
/* OS of 128 and 256 are available only in software mode */
@@ -333,7 +334,7 @@ static const struct spi_device_id ad7606_id_table[] = {
{ "ad7606-8", ID_AD7606_8 },
{ "ad7606b", ID_AD7606B },
{ "ad7616", ID_AD7616 },
- {}
+ { }
};
MODULE_DEVICE_TABLE(spi, ad7606_id_table);
@@ -344,7 +345,7 @@ static const struct of_device_id ad7606_of_match[] = {
{ .compatible = "adi,ad7606-8" },
{ .compatible = "adi,ad7606b" },
{ .compatible = "adi,ad7616" },
- { },
+ { }
};
MODULE_DEVICE_TABLE(of, ad7606_of_match);
diff --git a/drivers/iio/adc/ad7766.c b/drivers/iio/adc/ad7766.c
index 3079a0872947..4d570383ef02 100644
--- a/drivers/iio/adc/ad7766.c
+++ b/drivers/iio/adc/ad7766.c
@@ -291,13 +291,13 @@ static int ad7766_probe(struct spi_device *spi)
}
static const struct spi_device_id ad7766_id[] = {
- {"ad7766", ID_AD7766},
- {"ad7766-1", ID_AD7766_1},
- {"ad7766-2", ID_AD7766_2},
- {"ad7767", ID_AD7766},
- {"ad7767-1", ID_AD7766_1},
- {"ad7767-2", ID_AD7766_2},
- {}
+ { "ad7766", ID_AD7766 },
+ { "ad7766-1", ID_AD7766_1 },
+ { "ad7766-2", ID_AD7766_2 },
+ { "ad7767", ID_AD7766 },
+ { "ad7767-1", ID_AD7766_1 },
+ { "ad7767-2", ID_AD7766_2 },
+ { }
};
MODULE_DEVICE_TABLE(spi, ad7766_id);
diff --git a/drivers/iio/adc/ad7768-1.c b/drivers/iio/adc/ad7768-1.c
index 70a25949142c..113703fb7245 100644
--- a/drivers/iio/adc/ad7768-1.c
+++ b/drivers/iio/adc/ad7768-1.c
@@ -544,13 +544,10 @@ static int ad7768_set_channel_label(struct iio_dev *indio_dev,
{
struct ad7768_state *st = iio_priv(indio_dev);
struct device *device = indio_dev->dev.parent;
- struct fwnode_handle *fwnode;
- struct fwnode_handle *child;
const char *label;
int crt_ch = 0;
- fwnode = dev_fwnode(device);
- fwnode_for_each_child_node(fwnode, child) {
+ device_for_each_child_node_scoped(device, child) {
if (fwnode_property_read_u32(child, "reg", &crt_ch))
continue;
@@ -658,7 +655,7 @@ MODULE_DEVICE_TABLE(spi, ad7768_id_table);
static const struct of_device_id ad7768_of_match[] = {
{ .compatible = "adi,ad7768-1" },
- { },
+ { }
};
MODULE_DEVICE_TABLE(of, ad7768_of_match);
diff --git a/drivers/iio/adc/ad7780.c b/drivers/iio/adc/ad7780.c
index a813fe04787c..e9b0c577c9cc 100644
--- a/drivers/iio/adc/ad7780.c
+++ b/drivers/iio/adc/ad7780.c
@@ -355,11 +355,11 @@ static int ad7780_probe(struct spi_device *spi)
}
static const struct spi_device_id ad7780_id[] = {
- {"ad7170", ID_AD7170},
- {"ad7171", ID_AD7171},
- {"ad7780", ID_AD7780},
- {"ad7781", ID_AD7781},
- {}
+ { "ad7170", ID_AD7170 },
+ { "ad7171", ID_AD7171 },
+ { "ad7780", ID_AD7780 },
+ { "ad7781", ID_AD7781 },
+ { }
};
MODULE_DEVICE_TABLE(spi, ad7780_id);
diff --git a/drivers/iio/adc/ad7793.c b/drivers/iio/adc/ad7793.c
index d4ad7e0b515a..abebd519cafa 100644
--- a/drivers/iio/adc/ad7793.c
+++ b/drivers/iio/adc/ad7793.c
@@ -824,16 +824,16 @@ static int ad7793_probe(struct spi_device *spi)
}
static const struct spi_device_id ad7793_id[] = {
- {"ad7785", ID_AD7785},
- {"ad7792", ID_AD7792},
- {"ad7793", ID_AD7793},
- {"ad7794", ID_AD7794},
- {"ad7795", ID_AD7795},
- {"ad7796", ID_AD7796},
- {"ad7797", ID_AD7797},
- {"ad7798", ID_AD7798},
- {"ad7799", ID_AD7799},
- {}
+ { "ad7785", ID_AD7785 },
+ { "ad7792", ID_AD7792 },
+ { "ad7793", ID_AD7793 },
+ { "ad7794", ID_AD7794 },
+ { "ad7795", ID_AD7795 },
+ { "ad7796", ID_AD7796 },
+ { "ad7797", ID_AD7797 },
+ { "ad7798", ID_AD7798 },
+ { "ad7799", ID_AD7799 },
+ { }
};
MODULE_DEVICE_TABLE(spi, ad7793_id);
diff --git a/drivers/iio/adc/ad7887.c b/drivers/iio/adc/ad7887.c
index 965bdc8aa696..6265ce7df703 100644
--- a/drivers/iio/adc/ad7887.c
+++ b/drivers/iio/adc/ad7887.c
@@ -329,8 +329,8 @@ static int ad7887_probe(struct spi_device *spi)
}
static const struct spi_device_id ad7887_id[] = {
- {"ad7887", ID_AD7887},
- {}
+ { "ad7887", ID_AD7887 },
+ { }
};
MODULE_DEVICE_TABLE(spi, ad7887_id);
diff --git a/drivers/iio/adc/ad7923.c b/drivers/iio/adc/ad7923.c
index 9d6bf6d0927a..09680015a7ab 100644
--- a/drivers/iio/adc/ad7923.c
+++ b/drivers/iio/adc/ad7923.c
@@ -361,14 +361,14 @@ static int ad7923_probe(struct spi_device *spi)
}
static const struct spi_device_id ad7923_id[] = {
- {"ad7904", AD7904},
- {"ad7914", AD7914},
- {"ad7923", AD7924},
- {"ad7924", AD7924},
- {"ad7908", AD7908},
- {"ad7918", AD7918},
- {"ad7928", AD7928},
- {}
+ { "ad7904", AD7904 },
+ { "ad7914", AD7914 },
+ { "ad7923", AD7924 },
+ { "ad7924", AD7924 },
+ { "ad7908", AD7908 },
+ { "ad7918", AD7918 },
+ { "ad7928", AD7928 },
+ { }
};
MODULE_DEVICE_TABLE(spi, ad7923_id);
@@ -380,7 +380,7 @@ static const struct of_device_id ad7923_of_match[] = {
{ .compatible = "adi,ad7908", },
{ .compatible = "adi,ad7918", },
{ .compatible = "adi,ad7928", },
- { },
+ { }
};
MODULE_DEVICE_TABLE(of, ad7923_of_match);
diff --git a/drivers/iio/adc/ad799x.c b/drivers/iio/adc/ad799x.c
index 0f0dcd9ca6b6..0f107e3fc2c8 100644
--- a/drivers/iio/adc/ad799x.c
+++ b/drivers/iio/adc/ad799x.c
@@ -237,7 +237,8 @@ static int ad799x_update_scan_mode(struct iio_dev *indio_dev,
if (!st->rx_buf)
return -ENOMEM;
- st->transfer_size = bitmap_weight(scan_mask, indio_dev->masklength) * 2;
+ st->transfer_size = bitmap_weight(scan_mask,
+ iio_get_masklength(indio_dev)) * 2;
switch (st->id) {
case ad7992:
diff --git a/drivers/iio/adc/ad9467.c b/drivers/iio/adc/ad9467.c
index 41c1b519c573..05fb7a75531f 100644
--- a/drivers/iio/adc/ad9467.c
+++ b/drivers/iio/adc/ad9467.c
@@ -15,6 +15,7 @@
#include <linux/kernel.h>
#include <linux/slab.h>
#include <linux/spi/spi.h>
+#include <linux/seq_file.h>
#include <linux/err.h>
#include <linux/delay.h>
#include <linux/gpio/consumer.h>
@@ -104,7 +105,30 @@
#define AD9467_DEF_OUTPUT_MODE 0x08
#define AD9467_REG_VREF_MASK 0x0F
+/*
+ * Analog Devices AD9643 14-Bit, 170/210/250 MSPS ADC
+ */
+
+#define CHIPID_AD9643 0x82
+#define AD9643_REG_VREF_MASK 0x1F
+
+/*
+ * Analog Devices AD9652 16-bit 310 MSPS ADC
+ */
+
+#define CHIPID_AD9652 0xC1
+#define AD9652_REG_VREF_MASK 0xC0
+
+/*
+ * Analog Devices AD9649 14-bit 20/40/65/80 MSPS ADC
+ */
+
+#define CHIPID_AD9649 0x6F
+#define AD9649_TEST_POINTS 8
+
#define AD9647_MAX_TEST_POINTS 32
+#define AD9467_CAN_INVERT(st) \
+ (!(st)->info->has_dco || (st)->info->has_dco_invert)
struct ad9467_chip_info {
const char *name;
@@ -113,12 +137,23 @@ struct ad9467_chip_info {
unsigned int num_channels;
const unsigned int (*scale_table)[2];
int num_scales;
+ unsigned long test_mask;
+ unsigned int test_mask_len;
unsigned long max_rate;
unsigned int default_output_mode;
unsigned int vref_mask;
unsigned int num_lanes;
+ unsigned int dco_en;
+ unsigned int test_points;
/* data clock output */
bool has_dco;
+ bool has_dco_invert;
+};
+
+struct ad9467_chan_test_mode {
+ struct ad9467_state *st;
+ unsigned int idx;
+ u8 mode;
};
struct ad9467_state {
@@ -126,6 +161,8 @@ struct ad9467_state {
struct iio_backend *back;
struct spi_device *spi;
struct clk *clk;
+ /* used for debugfs */
+ struct ad9467_chan_test_mode *chan_test;
unsigned int output_mode;
unsigned int (*scales)[2];
/*
@@ -138,6 +175,8 @@ struct ad9467_state {
* at the io delay control section.
*/
DECLARE_BITMAP(calib_map, AD9647_MAX_TEST_POINTS * 2);
+ /* number of bits of the map */
+ unsigned int calib_map_size;
struct gpio_desc *pwrdown_gpio;
/* ensure consistent state obtained on multiple related accesses */
struct mutex lock;
@@ -211,6 +250,24 @@ static const unsigned int ad9467_scale_table[][2] = {
{2300, 8}, {2400, 9}, {2500, 10},
};
+static const unsigned int ad9643_scale_table[][2] = {
+ {2087, 0x0F}, {2065, 0x0E}, {2042, 0x0D}, {2020, 0x0C}, {1997, 0x0B},
+ {1975, 0x0A}, {1952, 0x09}, {1930, 0x08}, {1907, 0x07}, {1885, 0x06},
+ {1862, 0x05}, {1840, 0x04}, {1817, 0x03}, {1795, 0x02}, {1772, 0x01},
+ {1750, 0x00}, {1727, 0x1F}, {1704, 0x1E}, {1681, 0x1D}, {1658, 0x1C},
+ {1635, 0x1B}, {1612, 0x1A}, {1589, 0x19}, {1567, 0x18}, {1544, 0x17},
+ {1521, 0x16}, {1498, 0x15}, {1475, 0x14}, {1452, 0x13}, {1429, 0x12},
+ {1406, 0x11}, {1383, 0x10},
+};
+
+static const unsigned int ad9649_scale_table[][2] = {
+ {2000, 0},
+};
+
+static const unsigned int ad9652_scale_table[][2] = {
+ {1250, 0}, {1125, 1}, {1200, 2}, {1250, 3}, {1000, 5},
+};
+
static void __ad9467_get_scale(struct ad9467_state *st, int index,
unsigned int *val, unsigned int *val2)
{
@@ -224,14 +281,14 @@ static void __ad9467_get_scale(struct ad9467_state *st, int index,
*val2 = tmp % 1000000;
}
-#define AD9467_CHAN(_chan, _si, _bits, _sign) \
+#define AD9467_CHAN(_chan, avai_mask, _si, _bits, _sign) \
{ \
.type = IIO_VOLTAGE, \
.indexed = 1, \
.channel = _chan, \
.info_mask_shared_by_type = BIT(IIO_CHAN_INFO_SCALE) | \
BIT(IIO_CHAN_INFO_SAMP_FREQ), \
- .info_mask_shared_by_type_available = BIT(IIO_CHAN_INFO_SCALE), \
+ .info_mask_shared_by_type_available = avai_mask, \
.scan_index = _si, \
.scan_type = { \
.sign = _sign, \
@@ -241,11 +298,42 @@ static void __ad9467_get_scale(struct ad9467_state *st, int index,
}
static const struct iio_chan_spec ad9434_channels[] = {
- AD9467_CHAN(0, 0, 12, 's'),
+ AD9467_CHAN(0, BIT(IIO_CHAN_INFO_SCALE), 0, 12, 's'),
};
static const struct iio_chan_spec ad9467_channels[] = {
- AD9467_CHAN(0, 0, 16, 's'),
+ AD9467_CHAN(0, BIT(IIO_CHAN_INFO_SCALE), 0, 16, 's'),
+};
+
+static const struct iio_chan_spec ad9643_channels[] = {
+ AD9467_CHAN(0, BIT(IIO_CHAN_INFO_SCALE), 0, 14, 's'),
+ AD9467_CHAN(1, BIT(IIO_CHAN_INFO_SCALE), 1, 14, 's'),
+};
+
+static const struct iio_chan_spec ad9649_channels[] = {
+ AD9467_CHAN(0, 0, 0, 14, 's'),
+};
+
+static const struct iio_chan_spec ad9652_channels[] = {
+ AD9467_CHAN(0, BIT(IIO_CHAN_INFO_SCALE), 0, 16, 's'),
+ AD9467_CHAN(1, BIT(IIO_CHAN_INFO_SCALE), 1, 16, 's'),
+};
+
+static const char * const ad9467_test_modes[] = {
+ [AN877_ADC_TESTMODE_OFF] = "off",
+ [AN877_ADC_TESTMODE_MIDSCALE_SHORT] = "midscale_short",
+ [AN877_ADC_TESTMODE_POS_FULLSCALE] = "pos_fullscale",
+ [AN877_ADC_TESTMODE_NEG_FULLSCALE] = "neg_fullscale",
+ [AN877_ADC_TESTMODE_ALT_CHECKERBOARD] = "checkerboard",
+ [AN877_ADC_TESTMODE_PN23_SEQ] = "prbs23",
+ [AN877_ADC_TESTMODE_PN9_SEQ] = "prbs9",
+ [AN877_ADC_TESTMODE_ONE_ZERO_TOGGLE] = "one_zero_toggle",
+ [AN877_ADC_TESTMODE_USER] = "user",
+ [AN877_ADC_TESTMODE_BIT_TOGGLE] = "bit_toggle",
+ [AN877_ADC_TESTMODE_SYNC] = "sync",
+ [AN877_ADC_TESTMODE_ONE_BIT_HIGH] = "one_bit_high",
+ [AN877_ADC_TESTMODE_MIXED_BIT_FREQUENCY] = "mixed_bit_frequency",
+ [AN877_ADC_TESTMODE_RAMP] = "ramp",
};
static const struct ad9467_chip_info ad9467_chip_tbl = {
@@ -256,6 +344,10 @@ static const struct ad9467_chip_info ad9467_chip_tbl = {
.num_scales = ARRAY_SIZE(ad9467_scale_table),
.channels = ad9467_channels,
.num_channels = ARRAY_SIZE(ad9467_channels),
+ .test_points = AD9647_MAX_TEST_POINTS,
+ .test_mask = GENMASK(AN877_ADC_TESTMODE_ONE_ZERO_TOGGLE,
+ AN877_ADC_TESTMODE_OFF),
+ .test_mask_len = AN877_ADC_TESTMODE_ONE_ZERO_TOGGLE + 1,
.default_output_mode = AD9467_DEF_OUTPUT_MODE,
.vref_mask = AD9467_REG_VREF_MASK,
.num_lanes = 8,
@@ -269,6 +361,9 @@ static const struct ad9467_chip_info ad9434_chip_tbl = {
.num_scales = ARRAY_SIZE(ad9434_scale_table),
.channels = ad9434_channels,
.num_channels = ARRAY_SIZE(ad9434_channels),
+ .test_points = AD9647_MAX_TEST_POINTS,
+ .test_mask = GENMASK(AN877_ADC_TESTMODE_USER, AN877_ADC_TESTMODE_OFF),
+ .test_mask_len = AN877_ADC_TESTMODE_USER + 1,
.default_output_mode = AD9434_DEF_OUTPUT_MODE,
.vref_mask = AD9434_REG_VREF_MASK,
.num_lanes = 6,
@@ -282,17 +377,78 @@ static const struct ad9467_chip_info ad9265_chip_tbl = {
.num_scales = ARRAY_SIZE(ad9265_scale_table),
.channels = ad9467_channels,
.num_channels = ARRAY_SIZE(ad9467_channels),
+ .test_points = AD9647_MAX_TEST_POINTS,
+ .test_mask = GENMASK(AN877_ADC_TESTMODE_ONE_ZERO_TOGGLE,
+ AN877_ADC_TESTMODE_OFF),
+ .test_mask_len = AN877_ADC_TESTMODE_ONE_ZERO_TOGGLE + 1,
.default_output_mode = AD9265_DEF_OUTPUT_MODE,
.vref_mask = AD9265_REG_VREF_MASK,
.has_dco = true,
+ .has_dco_invert = true,
+};
+
+static const struct ad9467_chip_info ad9643_chip_tbl = {
+ .name = "ad9643",
+ .id = CHIPID_AD9643,
+ .max_rate = 250000000UL,
+ .scale_table = ad9643_scale_table,
+ .num_scales = ARRAY_SIZE(ad9643_scale_table),
+ .channels = ad9643_channels,
+ .num_channels = ARRAY_SIZE(ad9643_channels),
+ .test_points = AD9647_MAX_TEST_POINTS,
+ .test_mask = BIT(AN877_ADC_TESTMODE_RAMP) |
+ GENMASK(AN877_ADC_TESTMODE_MIXED_BIT_FREQUENCY, AN877_ADC_TESTMODE_OFF),
+ .test_mask_len = AN877_ADC_TESTMODE_RAMP + 1,
+ .vref_mask = AD9643_REG_VREF_MASK,
+ .has_dco = true,
+ .has_dco_invert = true,
+ .dco_en = AN877_ADC_DCO_DELAY_ENABLE,
+};
+
+static const struct ad9467_chip_info ad9649_chip_tbl = {
+ .name = "ad9649",
+ .id = CHIPID_AD9649,
+ .max_rate = 80000000UL,
+ .scale_table = ad9649_scale_table,
+ .num_scales = ARRAY_SIZE(ad9649_scale_table),
+ .channels = ad9649_channels,
+ .num_channels = ARRAY_SIZE(ad9649_channels),
+ .test_points = AD9649_TEST_POINTS,
+ .test_mask = GENMASK(AN877_ADC_TESTMODE_MIXED_BIT_FREQUENCY,
+ AN877_ADC_TESTMODE_OFF),
+ .test_mask_len = AN877_ADC_TESTMODE_MIXED_BIT_FREQUENCY + 1,
+ .has_dco = true,
+ .has_dco_invert = true,
+ .dco_en = AN877_ADC_DCO_DELAY_ENABLE,
+};
+
+static const struct ad9467_chip_info ad9652_chip_tbl = {
+ .name = "ad9652",
+ .id = CHIPID_AD9652,
+ .max_rate = 310000000UL,
+ .scale_table = ad9652_scale_table,
+ .num_scales = ARRAY_SIZE(ad9652_scale_table),
+ .channels = ad9652_channels,
+ .num_channels = ARRAY_SIZE(ad9652_channels),
+ .test_points = AD9647_MAX_TEST_POINTS,
+ .test_mask = GENMASK(AN877_ADC_TESTMODE_ONE_ZERO_TOGGLE,
+ AN877_ADC_TESTMODE_OFF),
+ .test_mask_len = AN877_ADC_TESTMODE_ONE_ZERO_TOGGLE + 1,
+ .vref_mask = AD9652_REG_VREF_MASK,
+ .has_dco = true,
};
static int ad9467_get_scale(struct ad9467_state *st, int *val, int *val2)
{
const struct ad9467_chip_info *info = st->info;
- unsigned int i, vref_val;
+ unsigned int vref_val;
+ unsigned int i = 0;
int ret;
+ /* nothing to read if we only have one possible scale */
+ if (info->num_scales == 1)
+ goto out_get_scale;
+
ret = ad9467_spi_read(st, AN877_ADC_REG_VREF);
if (ret < 0)
return ret;
@@ -307,6 +463,7 @@ static int ad9467_get_scale(struct ad9467_state *st, int *val, int *val2)
if (i == info->num_scales)
return -ERANGE;
+out_get_scale:
__ad9467_get_scale(st, i, val, val2);
return IIO_VAL_INT_PLUS_MICRO;
@@ -321,6 +478,8 @@ static int ad9467_set_scale(struct ad9467_state *st, int val, int val2)
if (val != 0)
return -EINVAL;
+ if (info->num_scales == 1)
+ return -EOPNOTSUPP;
for (i = 0; i < info->num_scales; i++) {
__ad9467_get_scale(st, i, &scale_val[0], &scale_val[1]);
@@ -352,40 +511,96 @@ static int ad9467_outputmode_set(struct ad9467_state *st, unsigned int mode)
AN877_ADC_TRANSFER_SYNC);
}
-static int ad9647_calibrate_prepare(struct ad9467_state *st)
+static int ad9467_testmode_set(struct ad9467_state *st, unsigned int chan,
+ unsigned int test_mode)
+{
+ int ret;
+
+ if (st->info->num_channels > 1) {
+ /* so that the test mode is only applied to one channel */
+ ret = ad9467_spi_write(st, AN877_ADC_REG_CHAN_INDEX, BIT(chan));
+ if (ret)
+ return ret;
+ }
+
+ ret = ad9467_spi_write(st, AN877_ADC_REG_TEST_IO, test_mode);
+ if (ret)
+ return ret;
+
+ if (st->info->num_channels > 1) {
+ /* go to default state where all channels get write commands */
+ ret = ad9467_spi_write(st, AN877_ADC_REG_CHAN_INDEX,
+ GENMASK(st->info->num_channels - 1, 0));
+ if (ret)
+ return ret;
+ }
+
+ return ad9467_spi_write(st, AN877_ADC_REG_TRANSFER,
+ AN877_ADC_TRANSFER_SYNC);
+}
+
+static int ad9467_backend_testmode_on(struct ad9467_state *st,
+ unsigned int chan,
+ enum iio_backend_test_pattern pattern)
{
struct iio_backend_data_fmt data = {
.enable = false,
};
- unsigned int c;
int ret;
- ret = ad9467_spi_write(st, AN877_ADC_REG_TEST_IO,
- AN877_ADC_TESTMODE_PN9_SEQ);
+ ret = iio_backend_data_format_set(st->back, chan, &data);
+ if (ret)
+ return ret;
+
+ ret = iio_backend_test_pattern_set(st->back, chan, pattern);
+ if (ret)
+ return ret;
+
+ return iio_backend_chan_enable(st->back, chan);
+}
+
+static int ad9467_backend_testmode_off(struct ad9467_state *st,
+ unsigned int chan)
+{
+ struct iio_backend_data_fmt data = {
+ .enable = true,
+ .sign_extend = true,
+ };
+ int ret;
+
+ ret = iio_backend_chan_disable(st->back, chan);
if (ret)
return ret;
- ret = ad9467_spi_write(st, AN877_ADC_REG_TRANSFER,
- AN877_ADC_TRANSFER_SYNC);
+ ret = iio_backend_test_pattern_set(st->back, chan,
+ IIO_BACKEND_NO_TEST_PATTERN);
if (ret)
return ret;
+ return iio_backend_data_format_set(st->back, chan, &data);
+}
+
+static int ad9647_calibrate_prepare(struct ad9467_state *st)
+{
+ unsigned int c;
+ int ret;
+
ret = ad9467_outputmode_set(st, st->info->default_output_mode);
if (ret)
return ret;
for (c = 0; c < st->info->num_channels; c++) {
- ret = iio_backend_data_format_set(st->back, c, &data);
+ ret = ad9467_testmode_set(st, c, AN877_ADC_TESTMODE_PN9_SEQ);
if (ret)
return ret;
- }
- ret = iio_backend_test_pattern_set(st->back, 0,
- IIO_BACKEND_ADI_PRBS_9A);
- if (ret)
- return ret;
+ ret = ad9467_backend_testmode_on(st, c,
+ IIO_BACKEND_ADI_PRBS_9A);
+ if (ret)
+ return ret;
+ }
- return iio_backend_chan_enable(st->back, 0);
+ return 0;
}
static int ad9647_calibrate_polarity_set(struct ad9467_state *st,
@@ -442,7 +657,7 @@ static int ad9467_calibrate_apply(struct ad9467_state *st, unsigned int val)
if (st->info->has_dco) {
ret = ad9467_spi_write(st, AN877_ADC_REG_OUTPUT_DELAY,
- val);
+ val | st->info->dco_en);
if (ret)
return ret;
@@ -461,57 +676,38 @@ static int ad9467_calibrate_apply(struct ad9467_state *st, unsigned int val)
static int ad9647_calibrate_stop(struct ad9467_state *st)
{
- struct iio_backend_data_fmt data = {
- .sign_extend = true,
- .enable = true,
- };
unsigned int c, mode;
int ret;
- ret = iio_backend_chan_disable(st->back, 0);
- if (ret)
- return ret;
-
- ret = iio_backend_test_pattern_set(st->back, 0,
- IIO_BACKEND_NO_TEST_PATTERN);
- if (ret)
- return ret;
-
for (c = 0; c < st->info->num_channels; c++) {
- ret = iio_backend_data_format_set(st->back, c, &data);
+ ret = ad9467_backend_testmode_off(st, c);
+ if (ret)
+ return ret;
+
+ ret = ad9467_testmode_set(st, c, AN877_ADC_TESTMODE_OFF);
if (ret)
return ret;
}
mode = st->info->default_output_mode | AN877_ADC_OUTPUT_MODE_TWOS_COMPLEMENT;
- ret = ad9467_outputmode_set(st, mode);
- if (ret)
- return ret;
-
- ret = ad9467_spi_write(st, AN877_ADC_REG_TEST_IO,
- AN877_ADC_TESTMODE_OFF);
- if (ret)
- return ret;
-
- return ad9467_spi_write(st, AN877_ADC_REG_TRANSFER,
- AN877_ADC_TRANSFER_SYNC);
+ return ad9467_outputmode_set(st, mode);
}
static int ad9467_calibrate(struct ad9467_state *st)
{
- unsigned int point, val, inv_val, cnt, inv_cnt = 0;
+ unsigned int point, val, inv_val, cnt, inv_cnt = 0, c;
/*
* Half of the bitmap is for the inverted signal. The number of test
* points is the same though...
*/
- unsigned int test_points = AD9647_MAX_TEST_POINTS;
+ unsigned int test_points = st->info->test_points;
unsigned long sample_rate = clk_get_rate(st->clk);
struct device *dev = &st->spi->dev;
bool invert = false, stat;
int ret;
/* all points invalid */
- bitmap_fill(st->calib_map, BITS_PER_TYPE(st->calib_map));
+ bitmap_fill(st->calib_map, st->calib_map_size);
ret = ad9647_calibrate_prepare(st);
if (ret)
@@ -521,16 +717,31 @@ retune:
if (ret)
return ret;
- for (point = 0; point < test_points; point++) {
+ for (point = 0; point < st->info->test_points; point++) {
ret = ad9467_calibrate_apply(st, point);
if (ret)
return ret;
- ret = iio_backend_chan_status(st->back, 0, &stat);
- if (ret)
- return ret;
+ for (c = 0; c < st->info->num_channels; c++) {
+ ret = iio_backend_chan_status(st->back, c, &stat);
+ if (ret)
+ return ret;
- __assign_bit(point + invert * test_points, st->calib_map, stat);
+ /*
+ * A point is considered valid if all channels report no
+ * error. If one reports an error, then we consider the
+ * point as invalid and we can break the loop right away.
+ */
+ if (stat) {
+ dev_dbg(dev, "Invalid point(%u, inv:%u) for CH:%u\n",
+ point, invert, c);
+ break;
+ }
+
+ if (c == st->info->num_channels - 1)
+ __clear_bit(point + invert * test_points,
+ st->calib_map);
+ }
}
if (!invert) {
@@ -541,8 +752,13 @@ retune:
* a row.
*/
if (cnt < 3) {
- invert = true;
- goto retune;
+ if (AD9467_CAN_INVERT(st)) {
+ invert = true;
+ goto retune;
+ }
+
+ if (!cnt)
+ return -EIO;
}
} else {
inv_cnt = ad9467_find_optimal_point(st->calib_map, test_points,
@@ -679,7 +895,7 @@ static int ad9467_update_scan_mode(struct iio_dev *indio_dev,
return 0;
}
-static const struct iio_info ad9467_info = {
+static struct iio_info ad9467_info = {
.read_raw = ad9467_read_raw,
.write_raw = ad9467_write_raw,
.update_scan_mode = ad9467_update_scan_mode,
@@ -762,12 +978,134 @@ static int ad9467_iio_backend_get(struct ad9467_state *st)
return -ENODEV;
}
+static int ad9467_test_mode_available_show(struct seq_file *s, void *ignored)
+{
+ struct ad9467_state *st = s->private;
+ unsigned int bit;
+
+ for_each_set_bit(bit, &st->info->test_mask, st->info->test_mask_len)
+ seq_printf(s, "%s\n", ad9467_test_modes[bit]);
+
+ return 0;
+}
+DEFINE_SHOW_ATTRIBUTE(ad9467_test_mode_available);
+
+static ssize_t ad9467_chan_test_mode_read(struct file *file,
+ char __user *userbuf, size_t count,
+ loff_t *ppos)
+{
+ struct ad9467_chan_test_mode *chan = file->private_data;
+ struct ad9467_state *st = chan->st;
+ char buf[128] = {0};
+ size_t len;
+ int ret;
+
+ if (chan->mode == AN877_ADC_TESTMODE_PN9_SEQ ||
+ chan->mode == AN877_ADC_TESTMODE_PN23_SEQ) {
+ len = scnprintf(buf, sizeof(buf), "Running \"%s\" Test:\n\t",
+ ad9467_test_modes[chan->mode]);
+
+ ret = iio_backend_debugfs_print_chan_status(st->back, chan->idx,
+ buf + len,
+ sizeof(buf) - len);
+ if (ret < 0)
+ return ret;
+ len += ret;
+ } else if (chan->mode == AN877_ADC_TESTMODE_OFF) {
+ len = scnprintf(buf, sizeof(buf), "No test Running...\n");
+ } else {
+ len = scnprintf(buf, sizeof(buf), "Running \"%s\" Test on CH:%u\n",
+ ad9467_test_modes[chan->mode], chan->idx);
+ }
+
+ return simple_read_from_buffer(userbuf, count, ppos, buf, len);
+}
+
+static ssize_t ad9467_chan_test_mode_write(struct file *file,
+ const char __user *userbuf,
+ size_t count, loff_t *ppos)
+{
+ struct ad9467_chan_test_mode *chan = file->private_data;
+ struct ad9467_state *st = chan->st;
+ char test_mode[32] = {0};
+ unsigned int mode;
+ int ret;
+
+ ret = simple_write_to_buffer(test_mode, sizeof(test_mode) - 1, ppos,
+ userbuf, count);
+ if (ret < 0)
+ return ret;
+
+ for_each_set_bit(mode, &st->info->test_mask, st->info->test_mask_len) {
+ if (sysfs_streq(test_mode, ad9467_test_modes[mode]))
+ break;
+ }
+
+ if (mode == st->info->test_mask_len)
+ return -EINVAL;
+
+ guard(mutex)(&st->lock);
+
+ if (mode == AN877_ADC_TESTMODE_OFF) {
+ unsigned int out_mode;
+
+ if (chan->mode == AN877_ADC_TESTMODE_PN9_SEQ ||
+ chan->mode == AN877_ADC_TESTMODE_PN23_SEQ) {
+ ret = ad9467_backend_testmode_off(st, chan->idx);
+ if (ret)
+ return ret;
+ }
+
+ ret = ad9467_testmode_set(st, chan->idx, mode);
+ if (ret)
+ return ret;
+
+ out_mode = st->info->default_output_mode | AN877_ADC_OUTPUT_MODE_TWOS_COMPLEMENT;
+ ret = ad9467_outputmode_set(st, out_mode);
+ if (ret)
+ return ret;
+ } else {
+ ret = ad9467_outputmode_set(st, st->info->default_output_mode);
+ if (ret)
+ return ret;
+
+ ret = ad9467_testmode_set(st, chan->idx, mode);
+ if (ret)
+ return ret;
+
+ /* some patterns have a backend matching monitoring block */
+ if (mode == AN877_ADC_TESTMODE_PN9_SEQ) {
+ ret = ad9467_backend_testmode_on(st, chan->idx,
+ IIO_BACKEND_ADI_PRBS_9A);
+ if (ret)
+ return ret;
+ } else if (mode == AN877_ADC_TESTMODE_PN23_SEQ) {
+ ret = ad9467_backend_testmode_on(st, chan->idx,
+ IIO_BACKEND_ADI_PRBS_23A);
+ if (ret)
+ return ret;
+ }
+ }
+
+ chan->mode = mode;
+
+ return count;
+}
+
+static const struct file_operations ad9467_chan_test_mode_fops = {
+ .open = simple_open,
+ .read = ad9467_chan_test_mode_read,
+ .write = ad9467_chan_test_mode_write,
+ .llseek = default_llseek,
+ .owner = THIS_MODULE,
+};
+
static ssize_t ad9467_dump_calib_table(struct file *file,
char __user *userbuf,
size_t count, loff_t *ppos)
{
struct ad9467_state *st = file->private_data;
- unsigned int bit, size = BITS_PER_TYPE(st->calib_map);
+ unsigned int bit;
/* +2 for the newline and +1 for the string termination */
unsigned char map[AD9647_MAX_TEST_POINTS * 2 + 3];
ssize_t len = 0;
@@ -776,8 +1114,8 @@ static ssize_t ad9467_dump_calib_table(struct file *file,
if (*ppos)
goto out_read;
- for (bit = 0; bit < size; bit++) {
- if (bit == size / 2)
+ for (bit = 0; bit < st->calib_map_size; bit++) {
+ if (AD9467_CAN_INVERT(st) && bit == st->calib_map_size / 2)
len += scnprintf(map + len, sizeof(map) - len, "\n");
len += scnprintf(map + len, sizeof(map) - len, "%c",
@@ -800,12 +1138,33 @@ static void ad9467_debugfs_init(struct iio_dev *indio_dev)
{
struct dentry *d = iio_get_debugfs_dentry(indio_dev);
struct ad9467_state *st = iio_priv(indio_dev);
+ char attr_name[32];
+ unsigned int chan;
if (!IS_ENABLED(CONFIG_DEBUG_FS))
return;
+ st->chan_test = devm_kcalloc(&st->spi->dev, st->info->num_channels,
+ sizeof(*st->chan_test), GFP_KERNEL);
+ if (!st->chan_test)
+ return;
+
debugfs_create_file("calibration_table_dump", 0400, d, st,
&ad9467_calib_table_fops);
+
+ for (chan = 0; chan < st->info->num_channels; chan++) {
+ snprintf(attr_name, sizeof(attr_name), "in_voltage%u_test_mode",
+ chan);
+ st->chan_test[chan].idx = chan;
+ st->chan_test[chan].st = st;
+ debugfs_create_file(attr_name, 0600, d, &st->chan_test[chan],
+ &ad9467_chan_test_mode_fops);
+ }
+
+ debugfs_create_file("in_voltage_test_mode_available", 0400, d, st,
+ &ad9467_test_mode_available_fops);
+
+ iio_backend_debugfs_add(st->back, indio_dev);
}
static int ad9467_probe(struct spi_device *spi)
@@ -826,6 +1185,10 @@ static int ad9467_probe(struct spi_device *spi)
if (!st->info)
return -ENODEV;
+ st->calib_map_size = st->info->test_points;
+ if (AD9467_CAN_INVERT(st))
+ st->calib_map_size *= 2;
+
st->clk = devm_clk_get_enabled(&spi->dev, "adc-clk");
if (IS_ERR(st->clk))
return PTR_ERR(st->clk);
@@ -850,6 +1213,8 @@ static int ad9467_probe(struct spi_device *spi)
return -ENODEV;
}
+ if (st->info->num_scales > 1)
+ ad9467_info.read_avail = ad9467_read_avail;
indio_dev->name = st->info->name;
indio_dev->channels = st->info->channels;
indio_dev->num_channels = st->info->num_channels;
@@ -884,7 +1249,10 @@ static const struct of_device_id ad9467_of_match[] = {
{ .compatible = "adi,ad9265", .data = &ad9265_chip_tbl, },
{ .compatible = "adi,ad9434", .data = &ad9434_chip_tbl, },
{ .compatible = "adi,ad9467", .data = &ad9467_chip_tbl, },
- {}
+ { .compatible = "adi,ad9643", .data = &ad9643_chip_tbl, },
+ { .compatible = "adi,ad9649", .data = &ad9649_chip_tbl, },
+ { .compatible = "adi,ad9652", .data = &ad9652_chip_tbl, },
+ { }
};
MODULE_DEVICE_TABLE(of, ad9467_of_match);
@@ -892,7 +1260,10 @@ static const struct spi_device_id ad9467_ids[] = {
{ "ad9265", (kernel_ulong_t)&ad9265_chip_tbl },
{ "ad9434", (kernel_ulong_t)&ad9434_chip_tbl },
{ "ad9467", (kernel_ulong_t)&ad9467_chip_tbl },
- {}
+ { "ad9643", (kernel_ulong_t)&ad9643_chip_tbl },
+ { "ad9649", (kernel_ulong_t)&ad9649_chip_tbl, },
+ { "ad9652", (kernel_ulong_t)&ad9652_chip_tbl, },
+ { }
};
MODULE_DEVICE_TABLE(spi, ad9467_ids);
diff --git a/drivers/iio/adc/ad_sigma_delta.c b/drivers/iio/adc/ad_sigma_delta.c
index dcd557e93586..e2bed2d648f2 100644
--- a/drivers/iio/adc/ad_sigma_delta.c
+++ b/drivers/iio/adc/ad_sigma_delta.c
@@ -351,7 +351,7 @@ static int ad_sd_buffer_postenable(struct iio_dev *indio_dev)
if (sigma_delta->num_slots == 1) {
channel = find_first_bit(indio_dev->active_scan_mask,
- indio_dev->masklength);
+ iio_get_masklength(indio_dev));
ret = ad_sigma_delta_set_channel(sigma_delta,
indio_dev->channels[channel].address);
if (ret)
@@ -364,7 +364,7 @@ static int ad_sd_buffer_postenable(struct iio_dev *indio_dev)
* implementation is mandatory.
*/
slot = 0;
- for_each_set_bit(i, indio_dev->active_scan_mask, indio_dev->masklength) {
+ iio_for_each_active_channel(indio_dev, i) {
sigma_delta->slots[slot] = indio_dev->channels[i].address;
slot++;
}
@@ -526,7 +526,7 @@ static bool ad_sd_validate_scan_mask(struct iio_dev *indio_dev, const unsigned l
{
struct ad_sigma_delta *sigma_delta = iio_device_get_drvdata(indio_dev);
- return bitmap_weight(mask, indio_dev->masklength) <= sigma_delta->num_slots;
+ return bitmap_weight(mask, iio_get_masklength(indio_dev)) <= sigma_delta->num_slots;
}
static const struct iio_buffer_setup_ops ad_sd_buffer_setup_ops = {
diff --git a/drivers/iio/adc/adi-axi-adc.c b/drivers/iio/adc/adi-axi-adc.c
index 21ce7564e83d..5c8c87eb36d1 100644
--- a/drivers/iio/adc/adi-axi-adc.c
+++ b/drivers/iio/adc/adi-axi-adc.c
@@ -61,6 +61,10 @@
#define ADI_AXI_ADC_REG_CHAN_STATUS(c) (0x0404 + (c) * 0x40)
#define ADI_AXI_ADC_CHAN_STAT_PN_MASK GENMASK(2, 1)
+/* out of sync */
+#define ADI_AXI_ADC_CHAN_STAT_PN_OOS BIT(1)
+/* spurious out of sync */
+#define ADI_AXI_ADC_CHAN_STAT_PN_ERR BIT(2)
#define ADI_AXI_ADC_REG_CHAN_CTRL_3(c) (0x0418 + (c) * 0x40)
#define ADI_AXI_ADC_CHAN_PN_SEL_MASK GENMASK(19, 16)
@@ -199,17 +203,19 @@ static int axi_adc_test_pattern_set(struct iio_backend *back,
return regmap_update_bits(st->regmap, ADI_AXI_ADC_REG_CHAN_CTRL_3(chan),
ADI_AXI_ADC_CHAN_PN_SEL_MASK,
FIELD_PREP(ADI_AXI_ADC_CHAN_PN_SEL_MASK, 0));
+ case IIO_BACKEND_ADI_PRBS_23A:
+ return regmap_update_bits(st->regmap, ADI_AXI_ADC_REG_CHAN_CTRL_3(chan),
+ ADI_AXI_ADC_CHAN_PN_SEL_MASK,
+ FIELD_PREP(ADI_AXI_ADC_CHAN_PN_SEL_MASK, 1));
default:
return -EINVAL;
}
}
-static int axi_adc_chan_status(struct iio_backend *back, unsigned int chan,
- bool *error)
+static int axi_adc_read_chan_status(struct adi_axi_adc_state *st, unsigned int chan,
+ unsigned int *status)
{
- struct adi_axi_adc_state *st = iio_backend_get_priv(back);
int ret;
- u32 val;
guard(mutex)(&st->lock);
/* reset test bits by setting them */
@@ -221,7 +227,18 @@ static int axi_adc_chan_status(struct iio_backend *back, unsigned int chan,
/* let's give enough time to validate or erroring the incoming pattern */
fsleep(1000);
- ret = regmap_read(st->regmap, ADI_AXI_ADC_REG_CHAN_STATUS(chan), &val);
+ return regmap_read(st->regmap, ADI_AXI_ADC_REG_CHAN_STATUS(chan),
+ status);
+}
+
+static int axi_adc_chan_status(struct iio_backend *back, unsigned int chan,
+ bool *error)
+{
+ struct adi_axi_adc_state *st = iio_backend_get_priv(back);
+ u32 val;
+ int ret;
+
+ ret = axi_adc_read_chan_status(st, chan, &val);
if (ret)
return ret;
@@ -233,6 +250,30 @@ static int axi_adc_chan_status(struct iio_backend *back, unsigned int chan,
return 0;
}
+static int axi_adc_debugfs_print_chan_status(struct iio_backend *back,
+ unsigned int chan, char *buf,
+ size_t len)
+{
+ struct adi_axi_adc_state *st = iio_backend_get_priv(back);
+ u32 val;
+ int ret;
+
+ ret = axi_adc_read_chan_status(st, chan, &val);
+ if (ret)
+ return ret;
+
+ /*
+ * PN_ERR is cleared in case out of sync is set. Hence, no point in
+ * checking both bits.
+ */
+ if (val & ADI_AXI_ADC_CHAN_STAT_PN_OOS)
+ return scnprintf(buf, len, "CH%u: Out of Sync.\n", chan);
+ if (val & ADI_AXI_ADC_CHAN_STAT_PN_ERR)
+ return scnprintf(buf, len, "CH%u: Spurious Out of Sync.\n", chan);
+
+ return scnprintf(buf, len, "CH%u: OK.\n", chan);
+}
+
static int axi_adc_chan_enable(struct iio_backend *back, unsigned int chan)
{
struct adi_axi_adc_state *st = iio_backend_get_priv(back);
@@ -267,13 +308,24 @@ static void axi_adc_free_buffer(struct iio_backend *back,
iio_dmaengine_buffer_free(buffer);
}
+static int axi_adc_reg_access(struct iio_backend *back, unsigned int reg,
+ unsigned int writeval, unsigned int *readval)
+{
+ struct adi_axi_adc_state *st = iio_backend_get_priv(back);
+
+ if (readval)
+ return regmap_read(st->regmap, reg, readval);
+
+ return regmap_write(st->regmap, reg, writeval);
+}
+
static const struct regmap_config axi_adc_regmap_config = {
.val_bits = 32,
.reg_bits = 32,
.reg_stride = 4,
};
-static const struct iio_backend_ops adi_axi_adc_generic = {
+static const struct iio_backend_ops adi_axi_adc_ops = {
.enable = axi_adc_enable,
.disable = axi_adc_disable,
.data_format_set = axi_adc_data_format_set,
@@ -285,6 +337,13 @@ static const struct iio_backend_ops adi_axi_adc_generic = {
.iodelay_set = axi_adc_iodelays_set,
.test_pattern_set = axi_adc_test_pattern_set,
.chan_status = axi_adc_chan_status,
+ .debugfs_reg_access = iio_backend_debugfs_ptr(axi_adc_reg_access),
+ .debugfs_print_chan_status = iio_backend_debugfs_ptr(axi_adc_debugfs_print_chan_status),
+};
+
+static const struct iio_backend_info adi_axi_adc_generic = {
+ .name = "axi-adc",
+ .ops = &adi_axi_adc_ops,
};
static int adi_axi_adc_probe(struct platform_device *pdev)
diff --git a/drivers/iio/adc/aspeed_adc.c b/drivers/iio/adc/aspeed_adc.c
index 090416c0d622..1d5fd5f534b8 100644
--- a/drivers/iio/adc/aspeed_adc.c
+++ b/drivers/iio/adc/aspeed_adc.c
@@ -555,8 +555,7 @@ static int aspeed_adc_probe(struct platform_device *pdev)
if (ret)
return ret;
- if (of_find_property(data->dev->of_node, "aspeed,battery-sensing",
- NULL)) {
+ if (of_property_present(data->dev->of_node, "aspeed,battery-sensing")) {
if (data->model_data->bat_sense_sup) {
data->battery_sensing = 1;
if (readl(data->base + ASPEED_REG_ENGINE_CONTROL) &
@@ -695,7 +694,7 @@ static const struct of_device_id aspeed_adc_matches[] = {
{ .compatible = "aspeed,ast2500-adc", .data = &ast2500_model_data },
{ .compatible = "aspeed,ast2600-adc0", .data = &ast2600_adc0_model_data },
{ .compatible = "aspeed,ast2600-adc1", .data = &ast2600_adc1_model_data },
- {},
+ { }
};
MODULE_DEVICE_TABLE(of, aspeed_adc_matches);
diff --git a/drivers/iio/adc/at91_adc.c b/drivers/iio/adc/at91_adc.c
index eb501e3c86a5..9c39acff17e6 100644
--- a/drivers/iio/adc/at91_adc.c
+++ b/drivers/iio/adc/at91_adc.c
@@ -7,6 +7,7 @@
#include <linux/bitmap.h>
#include <linux/bitops.h>
+#include <linux/cleanup.h>
#include <linux/clk.h>
#include <linux/err.h>
#include <linux/io.h>
@@ -268,9 +269,7 @@ static irqreturn_t at91_adc_trigger_handler(int irq, void *p)
struct iio_chan_spec const *chan;
int i, j = 0;
- for (i = 0; i < idev->masklength; i++) {
- if (!test_bit(i, idev->active_scan_mask))
- continue;
+ iio_for_each_active_channel(idev, i) {
chan = idev->channels + i;
st->buffer[j] = at91_adc_readl(st, AT91_ADC_CHAN(st, chan->channel));
j++;
@@ -543,22 +542,18 @@ static int at91_adc_get_trigger_value_by_name(struct iio_dev *idev,
int i;
for (i = 0; i < st->caps->trigger_number; i++) {
- char *name = kasprintf(GFP_KERNEL,
- "%s-dev%d-%s",
- idev->name,
- iio_device_id(idev),
- triggers[i].name);
+ char *name __free(kfree) = kasprintf(GFP_KERNEL, "%s-dev%d-%s",
+ idev->name,
+ iio_device_id(idev),
+ triggers[i].name);
if (!name)
return -ENOMEM;
if (strcmp(trigger_name, name) == 0) {
- kfree(name);
if (triggers[i].value == 0)
return -EINVAL;
return triggers[i].value;
}
-
- kfree(name);
}
return -EINVAL;
@@ -1340,7 +1335,7 @@ static const struct of_device_id at91_adc_dt_ids[] = {
{ .compatible = "atmel,at91sam9g45-adc", .data = &at91sam9g45_caps },
{ .compatible = "atmel,at91sam9x5-adc", .data = &at91sam9x5_caps },
{ .compatible = "atmel,sama5d3-adc", .data = &sama5d3_caps },
- {},
+ { }
};
MODULE_DEVICE_TABLE(of, at91_adc_dt_ids);
diff --git a/drivers/iio/adc/axp20x_adc.c b/drivers/iio/adc/axp20x_adc.c
index b487e577befb..d43c8d124a0c 100644
--- a/drivers/iio/adc/axp20x_adc.c
+++ b/drivers/iio/adc/axp20x_adc.c
@@ -5,6 +5,7 @@
* Quentin Schulz <quentin.schulz@free-electrons.com>
*/
+#include <asm/unaligned.h>
#include <linux/bitfield.h>
#include <linux/completion.h>
#include <linux/interrupt.h>
@@ -30,6 +31,8 @@
#define AXP22X_ADC_EN1_MASK (GENMASK(7, 5) | BIT(0))
+#define AXP717_ADC_EN1_MASK GENMASK(7, 0)
+
#define AXP192_GPIO30_IN_RANGE_GPIO0 BIT(0)
#define AXP192_GPIO30_IN_RANGE_GPIO1 BIT(1)
#define AXP192_GPIO30_IN_RANGE_GPIO2 BIT(2)
@@ -43,6 +46,13 @@
#define AXP22X_ADC_RATE_HZ(x) ((ilog2((x) / 100) << 6) & AXP20X_ADC_RATE_MASK)
+#define AXP717_ADC_DATA_TS 0x00
+#define AXP717_ADC_DATA_TEMP 0x01
+#define AXP717_ADC_DATA_VMID 0x02
+#define AXP717_ADC_DATA_BKUP_BATT 0x03
+
+#define AXP717_ADC_DATA_MASK GENMASK(13, 0)
+
#define AXP813_V_I_ADC_RATE_MASK GENMASK(5, 4)
#define AXP813_ADC_RATE_MASK (AXP20X_ADC_RATE_MASK | AXP813_V_I_ADC_RATE_MASK)
#define AXP813_TS_GPIO0_ADC_RATE_HZ(x) AXP20X_ADC_RATE_HZ(x)
@@ -125,6 +135,20 @@ enum axp22x_adc_channel_i {
AXP22X_BATT_DISCHRG_I,
};
+enum axp717_adc_channel_v {
+ AXP717_BATT_V = 0,
+ AXP717_TS_IN,
+ AXP717_VBUS_V,
+ AXP717_VSYS_V,
+ AXP717_DIE_TEMP_V,
+ AXP717_VMID_V = 6,
+ AXP717_BKUP_BATT_V,
+};
+
+enum axp717_adc_channel_i {
+ AXP717_BATT_CHRG_I = 5,
+};
+
enum axp813_adc_channel_v {
AXP813_TS_IN = 0,
AXP813_GPIO0_V,
@@ -179,6 +203,22 @@ static struct iio_map axp22x_maps[] = {
}, { /* sentinel */ }
};
+static struct iio_map axp717_maps[] = {
+ {
+ .consumer_dev_name = "axp20x-usb-power-supply",
+ .consumer_channel = "vbus_v",
+ .adc_channel_label = "vbus_v",
+ }, {
+ .consumer_dev_name = "axp20x-battery-power-supply",
+ .consumer_channel = "batt_v",
+ .adc_channel_label = "batt_v",
+ }, {
+ .consumer_dev_name = "axp20x-battery-power-supply",
+ .consumer_channel = "batt_chrg_i",
+ .adc_channel_label = "batt_chrg_i",
+ },
+};
+
/*
* Channels are mapped by physical system. Their channels share the same index.
* i.e. acin_i is in_current0_raw and acin_v is in_voltage0_raw.
@@ -274,6 +314,29 @@ static const struct iio_chan_spec axp22x_adc_channels[] = {
AXP22X_TS_ADC_H),
};
+/*
+ * Scale and offset is unknown for temp, ts, batt_chrg_i, vmid_v, and
+ * bkup_batt_v channels. Leaving scale and offset undefined for now.
+ */
+static const struct iio_chan_spec axp717_adc_channels[] = {
+ AXP20X_ADC_CHANNEL(AXP717_BATT_V, "batt_v", IIO_VOLTAGE,
+ AXP717_BATT_V_H),
+ AXP20X_ADC_CHANNEL(AXP717_TS_IN, "ts_v", IIO_VOLTAGE,
+ AXP717_ADC_DATA_H),
+ AXP20X_ADC_CHANNEL(AXP717_VBUS_V, "vbus_v", IIO_VOLTAGE,
+ AXP717_VBUS_V_H),
+ AXP20X_ADC_CHANNEL(AXP717_VSYS_V, "vsys_v", IIO_VOLTAGE,
+ AXP717_VSYS_V_H),
+ AXP20X_ADC_CHANNEL(AXP717_DIE_TEMP_V, "pmic_temp", IIO_TEMP,
+ AXP717_ADC_DATA_H),
+ AXP20X_ADC_CHANNEL(AXP717_BATT_CHRG_I, "batt_chrg_i", IIO_CURRENT,
+ AXP717_BATT_CHRG_I_H),
+ AXP20X_ADC_CHANNEL(AXP717_VMID_V, "vmid_v", IIO_VOLTAGE,
+ AXP717_ADC_DATA_H),
+ AXP20X_ADC_CHANNEL(AXP717_BKUP_BATT_V, "bkup_batt_v", IIO_VOLTAGE,
+ AXP717_ADC_DATA_H),
+};
+
static const struct iio_chan_spec axp813_adc_channels[] = {
{
.type = IIO_TEMP,
@@ -354,6 +417,51 @@ static int axp22x_adc_raw(struct iio_dev *indio_dev,
return IIO_VAL_INT;
}
+static int axp717_adc_raw(struct iio_dev *indio_dev,
+ struct iio_chan_spec const *chan, int *val)
+{
+ struct axp20x_adc_iio *info = iio_priv(indio_dev);
+ u8 bulk_reg[2];
+ int ret;
+
+ /*
+ * A generic "ADC data" channel is used for TS, tdie, vmid,
+ * and vbackup. This channel must both first be enabled and
+ * also selected before it can be read.
+ */
+ switch (chan->channel) {
+ case AXP717_TS_IN:
+ regmap_write(info->regmap, AXP717_ADC_DATA_SEL,
+ AXP717_ADC_DATA_TS);
+ break;
+ case AXP717_DIE_TEMP_V:
+ regmap_write(info->regmap, AXP717_ADC_DATA_SEL,
+ AXP717_ADC_DATA_TEMP);
+ break;
+ case AXP717_VMID_V:
+ regmap_write(info->regmap, AXP717_ADC_DATA_SEL,
+ AXP717_ADC_DATA_VMID);
+ break;
+ case AXP717_BKUP_BATT_V:
+ regmap_write(info->regmap, AXP717_ADC_DATA_SEL,
+ AXP717_ADC_DATA_BKUP_BATT);
+ break;
+ default:
+ break;
+ }
+
+ /*
+ * All channels are 14 bits, with the first 2 bits on the high
+ * register reserved and the remaining bits as the ADC value.
+ */
+ ret = regmap_bulk_read(info->regmap, chan->address, bulk_reg, 2);
+ if (ret < 0)
+ return ret;
+
+ *val = FIELD_GET(AXP717_ADC_DATA_MASK, get_unaligned_be16(bulk_reg));
+ return IIO_VAL_INT;
+}
+
static int axp813_adc_raw(struct iio_dev *indio_dev,
struct iio_chan_spec const *chan, int *val)
{
@@ -571,6 +679,27 @@ static int axp22x_adc_scale(struct iio_chan_spec const *chan, int *val,
}
}
+static int axp717_adc_scale(struct iio_chan_spec const *chan, int *val,
+ int *val2)
+{
+ switch (chan->type) {
+ case IIO_VOLTAGE:
+ *val = 1;
+ return IIO_VAL_INT;
+
+ case IIO_CURRENT:
+ *val = 1;
+ return IIO_VAL_INT;
+
+ case IIO_TEMP:
+ *val = 100;
+ return IIO_VAL_INT;
+
+ default:
+ return -EINVAL;
+ }
+}
+
static int axp813_adc_scale(struct iio_chan_spec const *chan, int *val,
int *val2)
{
@@ -746,6 +875,22 @@ static int axp22x_read_raw(struct iio_dev *indio_dev,
}
}
+static int axp717_read_raw(struct iio_dev *indio_dev,
+ struct iio_chan_spec const *chan, int *val,
+ int *val2, long mask)
+{
+ switch (mask) {
+ case IIO_CHAN_INFO_SCALE:
+ return axp717_adc_scale(chan, val, val2);
+
+ case IIO_CHAN_INFO_RAW:
+ return axp717_adc_raw(indio_dev, chan, val);
+
+ default:
+ return -EINVAL;
+ }
+}
+
static int axp813_read_raw(struct iio_dev *indio_dev,
struct iio_chan_spec const *chan, int *val,
int *val2, long mask)
@@ -860,6 +1005,10 @@ static const struct iio_info axp22x_adc_iio_info = {
.read_raw = axp22x_read_raw,
};
+static const struct iio_info axp717_adc_iio_info = {
+ .read_raw = axp717_read_raw,
+};
+
static const struct iio_info axp813_adc_iio_info = {
.read_raw = axp813_read_raw,
};
@@ -889,7 +1038,9 @@ struct axp_data {
const struct iio_info *iio_info;
int num_channels;
struct iio_chan_spec const *channels;
+ unsigned long adc_en1;
unsigned long adc_en1_mask;
+ unsigned long adc_en2;
unsigned long adc_en2_mask;
int (*adc_rate)(struct axp20x_adc_iio *info,
int rate);
@@ -910,7 +1061,9 @@ static const struct axp_data axp20x_data = {
.iio_info = &axp20x_adc_iio_info,
.num_channels = ARRAY_SIZE(axp20x_adc_channels),
.channels = axp20x_adc_channels,
+ .adc_en1 = AXP20X_ADC_EN1,
.adc_en1_mask = AXP20X_ADC_EN1_MASK,
+ .adc_en2 = AXP20X_ADC_EN2,
.adc_en2_mask = AXP20X_ADC_EN2_MASK,
.adc_rate = axp20x_adc_rate,
.maps = axp20x_maps,
@@ -920,15 +1073,26 @@ static const struct axp_data axp22x_data = {
.iio_info = &axp22x_adc_iio_info,
.num_channels = ARRAY_SIZE(axp22x_adc_channels),
.channels = axp22x_adc_channels,
+ .adc_en1 = AXP20X_ADC_EN1,
.adc_en1_mask = AXP22X_ADC_EN1_MASK,
.adc_rate = axp22x_adc_rate,
.maps = axp22x_maps,
};
+static const struct axp_data axp717_data = {
+ .iio_info = &axp717_adc_iio_info,
+ .num_channels = ARRAY_SIZE(axp717_adc_channels),
+ .channels = axp717_adc_channels,
+ .adc_en1 = AXP717_ADC_CH_EN_CONTROL,
+ .adc_en1_mask = AXP717_ADC_EN1_MASK,
+ .maps = axp717_maps,
+};
+
static const struct axp_data axp813_data = {
.iio_info = &axp813_adc_iio_info,
.num_channels = ARRAY_SIZE(axp813_adc_channels),
.channels = axp813_adc_channels,
+ .adc_en1 = AXP20X_ADC_EN1,
.adc_en1_mask = AXP22X_ADC_EN1_MASK,
.adc_rate = axp813_adc_rate,
.maps = axp22x_maps,
@@ -938,6 +1102,7 @@ static const struct of_device_id axp20x_adc_of_match[] = {
{ .compatible = "x-powers,axp192-adc", .data = (void *)&axp192_data, },
{ .compatible = "x-powers,axp209-adc", .data = (void *)&axp20x_data, },
{ .compatible = "x-powers,axp221-adc", .data = (void *)&axp22x_data, },
+ { .compatible = "x-powers,axp717-adc", .data = (void *)&axp717_data, },
{ .compatible = "x-powers,axp813-adc", .data = (void *)&axp813_data, },
{ /* sentinel */ }
};
@@ -947,6 +1112,7 @@ static const struct platform_device_id axp20x_adc_id_match[] = {
{ .name = "axp192-adc", .driver_data = (kernel_ulong_t)&axp192_data, },
{ .name = "axp20x-adc", .driver_data = (kernel_ulong_t)&axp20x_data, },
{ .name = "axp22x-adc", .driver_data = (kernel_ulong_t)&axp22x_data, },
+ { .name = "axp717-adc", .driver_data = (kernel_ulong_t)&axp717_data, },
{ .name = "axp813-adc", .driver_data = (kernel_ulong_t)&axp813_data, },
{ /* sentinel */ },
};
@@ -988,14 +1154,16 @@ static int axp20x_probe(struct platform_device *pdev)
indio_dev->channels = info->data->channels;
/* Enable the ADCs on IP */
- regmap_write(info->regmap, AXP20X_ADC_EN1, info->data->adc_en1_mask);
+ regmap_write(info->regmap, info->data->adc_en1,
+ info->data->adc_en1_mask);
if (info->data->adc_en2_mask)
- regmap_set_bits(info->regmap, AXP20X_ADC_EN2,
+ regmap_set_bits(info->regmap, info->data->adc_en2,
info->data->adc_en2_mask);
/* Configure ADCs rate */
- info->data->adc_rate(info, 100);
+ if (info->data->adc_rate)
+ info->data->adc_rate(info, 100);
ret = iio_map_array_register(indio_dev, info->data->maps);
if (ret < 0) {
@@ -1015,10 +1183,10 @@ fail_register:
iio_map_array_unregister(indio_dev);
fail_map:
- regmap_write(info->regmap, AXP20X_ADC_EN1, 0);
+ regmap_write(info->regmap, info->data->adc_en1, 0);
if (info->data->adc_en2_mask)
- regmap_write(info->regmap, AXP20X_ADC_EN2, 0);
+ regmap_write(info->regmap, info->data->adc_en2, 0);
return ret;
}
@@ -1031,10 +1199,10 @@ static void axp20x_remove(struct platform_device *pdev)
iio_device_unregister(indio_dev);
iio_map_array_unregister(indio_dev);
- regmap_write(info->regmap, AXP20X_ADC_EN1, 0);
+ regmap_write(info->regmap, info->data->adc_en1, 0);
if (info->data->adc_en2_mask)
- regmap_write(info->regmap, AXP20X_ADC_EN2, 0);
+ regmap_write(info->regmap, info->data->adc_en2, 0);
}
static struct platform_driver axp20x_adc_driver = {
diff --git a/drivers/iio/adc/axp288_adc.c b/drivers/iio/adc/axp288_adc.c
index f135cf2362df..8c3acc0cd7e9 100644
--- a/drivers/iio/adc/axp288_adc.c
+++ b/drivers/iio/adc/axp288_adc.c
@@ -299,7 +299,7 @@ static int axp288_adc_probe(struct platform_device *pdev)
static const struct platform_device_id axp288_adc_id_table[] = {
{ .name = "axp288_adc" },
- {},
+ { }
};
static struct platform_driver axp288_adc_driver = {
diff --git a/drivers/iio/adc/bcm_iproc_adc.c b/drivers/iio/adc/bcm_iproc_adc.c
index 6bc149c51414..cdfe304eaa20 100644
--- a/drivers/iio/adc/bcm_iproc_adc.c
+++ b/drivers/iio/adc/bcm_iproc_adc.c
@@ -606,7 +606,7 @@ static void iproc_adc_remove(struct platform_device *pdev)
static const struct of_device_id iproc_adc_of_match[] = {
{.compatible = "brcm,iproc-static-adc", },
- { },
+ { }
};
MODULE_DEVICE_TABLE(of, iproc_adc_of_match);
diff --git a/drivers/iio/adc/berlin2-adc.c b/drivers/iio/adc/berlin2-adc.c
index 4cdddc6e36e9..fa04e0a5f645 100644
--- a/drivers/iio/adc/berlin2-adc.c
+++ b/drivers/iio/adc/berlin2-adc.c
@@ -351,7 +351,7 @@ static int berlin2_adc_probe(struct platform_device *pdev)
static const struct of_device_id berlin2_adc_match[] = {
{ .compatible = "marvell,berlin2-adc", },
- { },
+ { }
};
MODULE_DEVICE_TABLE(of, berlin2_adc_match);
diff --git a/drivers/iio/adc/cc10001_adc.c b/drivers/iio/adc/cc10001_adc.c
index a432342348ab..2c51b90b7101 100644
--- a/drivers/iio/adc/cc10001_adc.c
+++ b/drivers/iio/adc/cc10001_adc.c
@@ -157,9 +157,7 @@ static irqreturn_t cc10001_adc_trigger_h(int irq, void *p)
i = 0;
sample_invalid = false;
- for_each_set_bit(scan_idx, indio_dev->active_scan_mask,
- indio_dev->masklength) {
-
+ iio_for_each_active_channel(indio_dev, scan_idx) {
channel = indio_dev->channels[scan_idx].channel;
cc10001_adc_start(adc_dev, channel);
diff --git a/drivers/iio/adc/dln2-adc.c b/drivers/iio/adc/dln2-adc.c
index 06cfbbabaf8d..de7252a10047 100644
--- a/drivers/iio/adc/dln2-adc.c
+++ b/drivers/iio/adc/dln2-adc.c
@@ -108,7 +108,7 @@ static void dln2_adc_update_demux(struct dln2_adc *dln2)
dln2->demux_count = 0;
/* Optimize all 8-channels case */
- if (indio_dev->masklength &&
+ if (iio_get_masklength(indio_dev) &&
(*indio_dev->active_scan_mask & 0xff) == 0xff) {
dln2_adc_add_demux(dln2, 0, 0, 16);
dln2->ts_pad_offset = 0;
@@ -117,9 +117,7 @@ static void dln2_adc_update_demux(struct dln2_adc *dln2)
}
/* Build demux table from fixed 8-channels to active_scan_mask */
- for_each_set_bit(out_ind,
- indio_dev->active_scan_mask,
- indio_dev->masklength) {
+ iio_for_each_active_channel(indio_dev, out_ind) {
/* Handle timestamp separately */
if (out_ind == DLN2_ADC_MAX_CHANNELS)
break;
@@ -541,7 +539,7 @@ static int dln2_adc_triggered_buffer_postenable(struct iio_dev *indio_dev)
/* Assign trigger channel based on first enabled channel */
trigger_chan = find_first_bit(indio_dev->active_scan_mask,
- indio_dev->masklength);
+ iio_get_masklength(indio_dev));
if (trigger_chan < DLN2_ADC_MAX_CHANNELS) {
dln2->trigger_chan = trigger_chan;
ret = dln2_adc_set_chan_period(dln2, dln2->trigger_chan,
diff --git a/drivers/iio/adc/ep93xx_adc.c b/drivers/iio/adc/ep93xx_adc.c
index 971942ce4c66..cc38d5e0608e 100644
--- a/drivers/iio/adc/ep93xx_adc.c
+++ b/drivers/iio/adc/ep93xx_adc.c
@@ -228,7 +228,7 @@ static void ep93xx_adc_remove(struct platform_device *pdev)
static const struct of_device_id ep93xx_adc_of_ids[] = {
{ .compatible = "cirrus,ep9301-adc" },
- {}
+ { }
};
MODULE_DEVICE_TABLE(of, ep93xx_adc_of_ids);
diff --git a/drivers/iio/adc/exynos_adc.c b/drivers/iio/adc/exynos_adc.c
index 78fada4b7b1c..4d00ee8dd14d 100644
--- a/drivers/iio/adc/exynos_adc.c
+++ b/drivers/iio/adc/exynos_adc.c
@@ -519,7 +519,7 @@ static const struct of_device_id exynos_adc_match[] = {
.compatible = "samsung,exynos7-adc",
.data = &exynos7_adc_data,
},
- {},
+ { }
};
MODULE_DEVICE_TABLE(of, exynos_adc_match);
diff --git a/drivers/iio/adc/hi8435.c b/drivers/iio/adc/hi8435.c
index 771fa12bdc02..fb635a756440 100644
--- a/drivers/iio/adc/hi8435.c
+++ b/drivers/iio/adc/hi8435.c
@@ -524,7 +524,7 @@ static int hi8435_probe(struct spi_device *spi)
static const struct of_device_id hi8435_dt_ids[] = {
{ .compatible = "holt,hi8435" },
- {},
+ { }
};
MODULE_DEVICE_TABLE(of, hi8435_dt_ids);
diff --git a/drivers/iio/adc/hx711.c b/drivers/iio/adc/hx711.c
index b3372ccff7d5..8da0419ecfa3 100644
--- a/drivers/iio/adc/hx711.c
+++ b/drivers/iio/adc/hx711.c
@@ -363,10 +363,7 @@ static irqreturn_t hx711_trigger(int irq, void *p)
memset(hx711_data->buffer, 0, sizeof(hx711_data->buffer));
- for (i = 0; i < indio_dev->masklength; i++) {
- if (!test_bit(i, indio_dev->active_scan_mask))
- continue;
-
+ iio_for_each_active_channel(indio_dev, i) {
hx711_data->buffer[j] = hx711_reset_read(hx711_data,
indio_dev->channels[i].channel);
j++;
@@ -555,7 +552,7 @@ static int hx711_probe(struct platform_device *pdev)
static const struct of_device_id of_hx711_match[] = {
{ .compatible = "avia,hx711", },
- {},
+ { }
};
MODULE_DEVICE_TABLE(of, of_hx711_match);
diff --git a/drivers/iio/adc/ina2xx-adc.c b/drivers/iio/adc/ina2xx-adc.c
index 727e390bd979..48c95e12e791 100644
--- a/drivers/iio/adc/ina2xx-adc.c
+++ b/drivers/iio/adc/ina2xx-adc.c
@@ -755,8 +755,7 @@ static int ina2xx_work_buffer(struct iio_dev *indio_dev)
* Single register reads: bulk_read will not work with ina226/219
* as there is no auto-increment of the register pointer.
*/
- for_each_set_bit(bit, indio_dev->active_scan_mask,
- indio_dev->masklength) {
+ iio_for_each_active_channel(indio_dev, bit) {
unsigned int val;
ret = regmap_read(chip->regmap,
@@ -1053,12 +1052,12 @@ static void ina2xx_remove(struct i2c_client *client)
}
static const struct i2c_device_id ina2xx_id[] = {
- {"ina219", ina219},
- {"ina220", ina219},
- {"ina226", ina226},
- {"ina230", ina226},
- {"ina231", ina226},
- {}
+ { "ina219", ina219 },
+ { "ina220", ina219 },
+ { "ina226", ina226 },
+ { "ina230", ina226 },
+ { "ina231", ina226 },
+ { }
};
MODULE_DEVICE_TABLE(i2c, ina2xx_id);
@@ -1083,7 +1082,7 @@ static const struct of_device_id ina2xx_of_match[] = {
.compatible = "ti,ina231",
.data = (void *)ina226
},
- {},
+ { }
};
MODULE_DEVICE_TABLE(of, ina2xx_of_match);
diff --git a/drivers/iio/adc/ingenic-adc.c b/drivers/iio/adc/ingenic-adc.c
index af70ca760797..1e802c8779a4 100644
--- a/drivers/iio/adc/ingenic-adc.c
+++ b/drivers/iio/adc/ingenic-adc.c
@@ -908,7 +908,7 @@ static const struct of_device_id ingenic_adc_of_match[] = {
{ .compatible = "ingenic,jz4760-adc", .data = &jz4760_adc_soc_data, },
{ .compatible = "ingenic,jz4760b-adc", .data = &jz4760_adc_soc_data, },
{ .compatible = "ingenic,jz4770-adc", .data = &jz4770_adc_soc_data, },
- { },
+ { }
};
MODULE_DEVICE_TABLE(of, ingenic_adc_of_match);
diff --git a/drivers/iio/adc/lpc32xx_adc.c b/drivers/iio/adc/lpc32xx_adc.c
index e34ed7dacd89..43a7bc8158b5 100644
--- a/drivers/iio/adc/lpc32xx_adc.c
+++ b/drivers/iio/adc/lpc32xx_adc.c
@@ -217,7 +217,7 @@ static int lpc32xx_adc_probe(struct platform_device *pdev)
static const struct of_device_id lpc32xx_adc_match[] = {
{ .compatible = "nxp,lpc3220-adc" },
- {},
+ { }
};
MODULE_DEVICE_TABLE(of, lpc32xx_adc_match);
diff --git a/drivers/iio/adc/ltc2496.c b/drivers/iio/adc/ltc2496.c
index 2593fa4322eb..f06dd0b9a858 100644
--- a/drivers/iio/adc/ltc2496.c
+++ b/drivers/iio/adc/ltc2496.c
@@ -94,7 +94,7 @@ static const struct ltc2497_chip_info ltc2496_info = {
static const struct of_device_id ltc2496_of_match[] = {
{ .compatible = "lltc,ltc2496", .data = &ltc2496_info, },
- {},
+ { }
};
MODULE_DEVICE_TABLE(of, ltc2496_of_match);
diff --git a/drivers/iio/adc/ltc2497.c b/drivers/iio/adc/ltc2497.c
index 6401a7727c31..f010b2fd1202 100644
--- a/drivers/iio/adc/ltc2497.c
+++ b/drivers/iio/adc/ltc2497.c
@@ -151,7 +151,7 @@ MODULE_DEVICE_TABLE(i2c, ltc2497_id);
static const struct of_device_id ltc2497_of_match[] = {
{ .compatible = "lltc,ltc2497", .data = &ltc2497_info[TYPE_LTC2497] },
{ .compatible = "lltc,ltc2499", .data = &ltc2497_info[TYPE_LTC2499] },
- {},
+ { }
};
MODULE_DEVICE_TABLE(of, ltc2497_of_match);
diff --git a/drivers/iio/adc/max1027.c b/drivers/iio/adc/max1027.c
index 136fcf753837..f5ba4a1b5a7d 100644
--- a/drivers/iio/adc/max1027.c
+++ b/drivers/iio/adc/max1027.c
@@ -73,13 +73,13 @@ enum max1027_id {
};
static const struct spi_device_id max1027_id[] = {
- {"max1027", max1027},
- {"max1029", max1029},
- {"max1031", max1031},
- {"max1227", max1227},
- {"max1229", max1229},
- {"max1231", max1231},
- {}
+ { "max1027", max1027 },
+ { "max1029", max1029 },
+ { "max1031", max1031 },
+ { "max1227", max1227 },
+ { "max1229", max1229 },
+ { "max1231", max1231 },
+ { }
};
MODULE_DEVICE_TABLE(spi, max1027_id);
@@ -90,7 +90,7 @@ static const struct of_device_id max1027_adc_dt_ids[] = {
{ .compatible = "maxim,max1227" },
{ .compatible = "maxim,max1229" },
{ .compatible = "maxim,max1231" },
- {},
+ { }
};
MODULE_DEVICE_TABLE(of, max1027_adc_dt_ids);
diff --git a/drivers/iio/adc/max11100.c b/drivers/iio/adc/max11100.c
index 49e38dca8fe2..2f07437caec3 100644
--- a/drivers/iio/adc/max11100.c
+++ b/drivers/iio/adc/max11100.c
@@ -143,8 +143,8 @@ static int max11100_probe(struct spi_device *spi)
}
static const struct of_device_id max11100_ids[] = {
- {.compatible = "maxim,max11100"},
- { },
+ { .compatible = "maxim,max11100" },
+ { }
};
MODULE_DEVICE_TABLE(of, max11100_ids);
diff --git a/drivers/iio/adc/max1118.c b/drivers/iio/adc/max1118.c
index 75ab57d9aef7..3d0a7d0eb7ee 100644
--- a/drivers/iio/adc/max1118.c
+++ b/drivers/iio/adc/max1118.c
@@ -174,8 +174,7 @@ static irqreturn_t max1118_trigger_handler(int irq, void *p)
mutex_lock(&adc->lock);
- for_each_set_bit(scan_index, indio_dev->active_scan_mask,
- indio_dev->masklength) {
+ iio_for_each_active_channel(indio_dev, scan_index) {
const struct iio_chan_spec *scan_chan =
&indio_dev->channels[scan_index];
int ret = max1118_read(indio_dev, scan_chan->channel);
@@ -261,7 +260,7 @@ static const struct spi_device_id max1118_id[] = {
{ "max1117", max1117 },
{ "max1118", max1118 },
{ "max1119", max1119 },
- {}
+ { }
};
MODULE_DEVICE_TABLE(spi, max1118_id);
@@ -269,7 +268,7 @@ static const struct of_device_id max1118_dt_ids[] = {
{ .compatible = "maxim,max1117" },
{ .compatible = "maxim,max1118" },
{ .compatible = "maxim,max1119" },
- {},
+ { }
};
MODULE_DEVICE_TABLE(of, max1118_dt_ids);
diff --git a/drivers/iio/adc/max1241.c b/drivers/iio/adc/max1241.c
index 500bb09ab19b..d62c1a011659 100644
--- a/drivers/iio/adc/max1241.c
+++ b/drivers/iio/adc/max1241.c
@@ -177,12 +177,12 @@ static int max1241_probe(struct spi_device *spi)
static const struct spi_device_id max1241_id[] = {
{ "max1241", max1241 },
- {}
+ { }
};
static const struct of_device_id max1241_dt_ids[] = {
{ .compatible = "maxim,max1241" },
- {}
+ { }
};
MODULE_DEVICE_TABLE(of, max1241_dt_ids);
diff --git a/drivers/iio/adc/max1363.c b/drivers/iio/adc/max1363.c
index bf4b6dc53fd2..d0c6e94f7204 100644
--- a/drivers/iio/adc/max1363.c
+++ b/drivers/iio/adc/max1363.c
@@ -13,6 +13,7 @@
*/
#include <linux/interrupt.h>
+#include <linux/cleanup.h>
#include <linux/device.h>
#include <linux/kernel.h>
#include <linux/sysfs.h>
@@ -818,7 +819,6 @@ static int max1363_read_event_config(struct iio_dev *indio_dev,
static int max1363_monitor_mode_update(struct max1363_state *st, int enabled)
{
- u8 *tx_buf;
int ret, i = 3, j;
unsigned long numelements;
int len;
@@ -850,11 +850,10 @@ static int max1363_monitor_mode_update(struct max1363_state *st, int enabled)
}
numelements = bitmap_weight(modemask, MAX1363_MAX_CHANNELS);
len = 3 * numelements + 3;
- tx_buf = kmalloc(len, GFP_KERNEL);
- if (!tx_buf) {
- ret = -ENOMEM;
- goto error_ret;
- }
+ u8 *tx_buf __free(kfree) = kmalloc(len, GFP_KERNEL);
+ if (!tx_buf)
+ return -ENOMEM;
+
tx_buf[0] = st->configbyte;
tx_buf[1] = st->setupbyte;
tx_buf[2] = (st->monitor_speed << 1);
@@ -893,11 +892,9 @@ static int max1363_monitor_mode_update(struct max1363_state *st, int enabled)
ret = st->send(st->client, tx_buf, len);
if (ret < 0)
- goto error_ret;
- if (ret != len) {
- ret = -EIO;
- goto error_ret;
- }
+ return ret;
+ if (ret != len)
+ return -EIO;
/*
* Now that we hopefully have sensible thresholds in place it is
@@ -910,18 +907,13 @@ static int max1363_monitor_mode_update(struct max1363_state *st, int enabled)
tx_buf[1] = MAX1363_MON_INT_ENABLE | (st->monitor_speed << 1) | 0xF0;
ret = st->send(st->client, tx_buf, 2);
if (ret < 0)
- goto error_ret;
- if (ret != 2) {
- ret = -EIO;
- goto error_ret;
- }
- ret = 0;
- st->monitor_on = true;
-error_ret:
+ return ret;
+ if (ret != 2)
+ return -EIO;
- kfree(tx_buf);
+ st->monitor_on = true;
- return ret;
+ return 0;
}
/*
diff --git a/drivers/iio/adc/max34408.c b/drivers/iio/adc/max34408.c
index 6c2ea2bc52c6..ffec22be2d59 100644
--- a/drivers/iio/adc/max34408.c
+++ b/drivers/iio/adc/max34408.c
@@ -250,14 +250,14 @@ static const struct of_device_id max34408_of_match[] = {
.compatible = "maxim,max34409",
.data = &max34409_model_data,
},
- {}
+ { }
};
MODULE_DEVICE_TABLE(of, max34408_of_match);
static const struct i2c_device_id max34408_id[] = {
{ "max34408", (kernel_ulong_t)&max34408_model_data },
{ "max34409", (kernel_ulong_t)&max34409_model_data },
- {}
+ { }
};
MODULE_DEVICE_TABLE(i2c, max34408_id);
diff --git a/drivers/iio/adc/max9611.c b/drivers/iio/adc/max9611.c
index 76e517b7b1e4..14fe42fc4b7d 100644
--- a/drivers/iio/adc/max9611.c
+++ b/drivers/iio/adc/max9611.c
@@ -504,9 +504,9 @@ static int max9611_init(struct max9611_dev *max9611)
}
static const struct of_device_id max9611_of_table[] = {
- {.compatible = "maxim,max9611", .data = "max9611"},
- {.compatible = "maxim,max9612", .data = "max9612"},
- { },
+ { .compatible = "maxim,max9611", .data = "max9611" },
+ { .compatible = "maxim,max9612", .data = "max9612" },
+ { }
};
MODULE_DEVICE_TABLE(of, max9611_of_table);
diff --git a/drivers/iio/adc/mcp320x.c b/drivers/iio/adc/mcp320x.c
index da1421bd7b62..57cff3772ebe 100644
--- a/drivers/iio/adc/mcp320x.c
+++ b/drivers/iio/adc/mcp320x.c
@@ -459,16 +459,6 @@ static int mcp320x_probe(struct spi_device *spi)
}
static const struct of_device_id mcp320x_dt_ids[] = {
- /* NOTE: The use of compatibles with no vendor prefix is deprecated. */
- { .compatible = "mcp3001" },
- { .compatible = "mcp3002" },
- { .compatible = "mcp3004" },
- { .compatible = "mcp3008" },
- { .compatible = "mcp3201" },
- { .compatible = "mcp3202" },
- { .compatible = "mcp3204" },
- { .compatible = "mcp3208" },
- { .compatible = "mcp3301" },
{ .compatible = "microchip,mcp3001" },
{ .compatible = "microchip,mcp3002" },
{ .compatible = "microchip,mcp3004" },
diff --git a/drivers/iio/adc/mcp3564.c b/drivers/iio/adc/mcp3564.c
index d83bed0e63d2..a68f1cd6883e 100644
--- a/drivers/iio/adc/mcp3564.c
+++ b/drivers/iio/adc/mcp3564.c
@@ -349,8 +349,6 @@ struct mcp3564_chip_info {
* struct mcp3564_state - working data for a ADC device
* @chip_info: chip specific data
* @spi: SPI device structure
- * @vref: the regulator device used as a voltage reference in case
- * external voltage reference is used
* @vref_mv: voltage reference value in miliVolts
* @lock: synchronize access to driver's state members
* @dev_addr: hardware device address
@@ -369,7 +367,6 @@ struct mcp3564_chip_info {
struct mcp3564_state {
const struct mcp3564_chip_info *chip_info;
struct spi_device *spi;
- struct regulator *vref;
unsigned short vref_mv;
struct mutex lock; /* Synchronize access to driver's state members */
u8 dev_addr;
@@ -1085,11 +1082,6 @@ static int mcp3564_parse_fw_children(struct iio_dev *indio_dev)
return 0;
}
-static void mcp3564_disable_reg(void *reg)
-{
- regulator_disable(reg);
-}
-
static void mcp3564_fill_scale_tbls(struct mcp3564_state *adc)
{
unsigned int pow = adc->chip_info->resolution - 1;
@@ -1110,7 +1102,7 @@ static void mcp3564_fill_scale_tbls(struct mcp3564_state *adc)
}
}
-static int mcp3564_config(struct iio_dev *indio_dev)
+static int mcp3564_config(struct iio_dev *indio_dev, bool *use_internal_vref_attr)
{
struct mcp3564_state *adc = iio_priv(indio_dev);
struct device *dev = &adc->spi->dev;
@@ -1119,6 +1111,7 @@ static int mcp3564_config(struct iio_dev *indio_dev)
enum mcp3564_ids ids;
int ret = 0;
unsigned int tmp = 0x01;
+ bool internal_vref;
bool err = false;
/*
@@ -1218,36 +1211,22 @@ static int mcp3564_config(struct iio_dev *indio_dev)
dev_dbg(dev, "Found %s chip\n", adc->chip_info->name);
- adc->vref = devm_regulator_get_optional(dev, "vref");
- if (IS_ERR(adc->vref)) {
- if (PTR_ERR(adc->vref) != -ENODEV)
- return dev_err_probe(dev, PTR_ERR(adc->vref),
- "failed to get regulator\n");
+ ret = devm_regulator_get_enable_read_voltage(dev, "vref");
+ if (ret < 0 && ret != -ENODEV)
+ return dev_err_probe(dev, ret, "Failed to get vref voltage\n");
+
+ internal_vref = ret == -ENODEV;
+ adc->vref_mv = internal_vref ? MCP3564R_INT_VREF_MV : ret / MILLI;
+ *use_internal_vref_attr = internal_vref;
+ if (internal_vref) {
/* Check if chip has internal vref */
if (!adc->have_vref)
- return dev_err_probe(dev, PTR_ERR(adc->vref),
- "Unknown Vref\n");
- adc->vref = NULL;
+ return dev_err_probe(dev, -ENODEV, "Unknown Vref\n");
+
dev_dbg(dev, "%s: Using internal Vref\n", __func__);
} else {
- ret = regulator_enable(adc->vref);
- if (ret)
- return ret;
-
- ret = devm_add_action_or_reset(dev, mcp3564_disable_reg,
- adc->vref);
- if (ret)
- return ret;
-
dev_dbg(dev, "%s: Using External Vref\n", __func__);
-
- ret = regulator_get_voltage(adc->vref);
- if (ret < 0)
- return dev_err_probe(dev, ret,
- "Failed to read vref regulator\n");
-
- adc->vref_mv = ret / MILLI;
}
ret = mcp3564_parse_fw_children(indio_dev);
@@ -1350,10 +1329,8 @@ static int mcp3564_config(struct iio_dev *indio_dev)
tmp_reg |= FIELD_PREP(MCP3564_CONFIG0_CLK_SEL_MASK, MCP3564_CONFIG0_USE_INT_CLK);
tmp_reg |= MCP3456_CONFIG0_BIT6_DEFAULT;
- if (!adc->vref) {
+ if (internal_vref)
tmp_reg |= FIELD_PREP(MCP3456_CONFIG0_VREF_MASK, 1);
- adc->vref_mv = MCP3564R_INT_VREF_MV;
- }
ret = mcp3564_write_8bits(adc, MCP3564_CONFIG0_REG, tmp_reg);
@@ -1412,6 +1389,7 @@ static int mcp3564_probe(struct spi_device *spi)
int ret;
struct iio_dev *indio_dev;
struct mcp3564_state *adc;
+ bool use_internal_vref_attr;
indio_dev = devm_iio_device_alloc(&spi->dev, sizeof(*adc));
if (!indio_dev)
@@ -1428,7 +1406,7 @@ static int mcp3564_probe(struct spi_device *spi)
* enable/disable certain channels
* change the sampling rate to the requested value
*/
- ret = mcp3564_config(indio_dev);
+ ret = mcp3564_config(indio_dev, &use_internal_vref_attr);
if (ret)
return dev_err_probe(&spi->dev, ret,
"Can't configure MCP356X device\n");
@@ -1440,7 +1418,7 @@ static int mcp3564_probe(struct spi_device *spi)
indio_dev->name = adc->chip_info->name;
indio_dev->modes = INDIO_DIRECT_MODE;
- if (!adc->vref)
+ if (use_internal_vref_attr)
indio_dev->info = &mcp3564r_info;
else
indio_dev->info = &mcp3564_info;
diff --git a/drivers/iio/adc/mcp3911.c b/drivers/iio/adc/mcp3911.c
index 7a32e7a1be9d..d0e77971c5d3 100644
--- a/drivers/iio/adc/mcp3911.c
+++ b/drivers/iio/adc/mcp3911.c
@@ -103,7 +103,7 @@ struct mcp3911_chip_info {
const struct iio_chan_spec *channels;
unsigned int num_channels;
- int (*config)(struct mcp3911 *adc);
+ int (*config)(struct mcp3911 *adc, bool external_vref);
int (*get_osr)(struct mcp3911 *adc, u32 *val);
int (*set_osr)(struct mcp3911 *adc, u32 val);
int (*enable_offset)(struct mcp3911 *adc, bool enable);
@@ -115,7 +115,6 @@ struct mcp3911_chip_info {
struct mcp3911 {
struct spi_device *spi;
struct mutex lock;
- struct regulator *vref;
struct clk *clki;
u32 dev_addr;
struct iio_trigger *trig;
@@ -385,23 +384,11 @@ static int mcp3911_write_raw(struct iio_dev *indio_dev,
}
}
-static int mcp3911_calc_scale_table(struct mcp3911 *adc)
+static int mcp3911_calc_scale_table(u32 vref_mv)
{
- struct device *dev = &adc->spi->dev;
- u32 ref = MCP3911_INT_VREF_MV;
u32 div;
- int ret;
u64 tmp;
- if (adc->vref) {
- ret = regulator_get_voltage(adc->vref);
- if (ret < 0) {
- return dev_err_probe(dev, ret, "failed to get vref voltage\n");
- }
-
- ref = ret / 1000;
- }
-
/*
* For 24-bit Conversion
* Raw = ((Voltage)/(Vref) * 2^23 * Gain * 1.5
@@ -412,7 +399,7 @@ static int mcp3911_calc_scale_table(struct mcp3911 *adc)
*/
for (int i = 0; i < MCP3911_NUM_SCALES; i++) {
div = 12582912 * BIT(i);
- tmp = div_s64((s64)ref * 1000000000LL, div);
+ tmp = div_s64((s64)vref_mv * 1000000000LL, div);
mcp3911_scale_table[i][0] = 0;
mcp3911_scale_table[i][1] = tmp;
@@ -523,7 +510,7 @@ static irqreturn_t mcp3911_trigger_handler(int irq, void *p)
goto out;
}
- for_each_set_bit(scan_index, indio_dev->active_scan_mask, indio_dev->masklength) {
+ iio_for_each_active_channel(indio_dev, scan_index) {
const struct iio_chan_spec *scan_chan = &indio_dev->channels[scan_index];
adc->scan.channels[i] = get_unaligned_be24(&adc->rx_buf[scan_chan->channel * 3]);
@@ -544,7 +531,7 @@ static const struct iio_info mcp3911_info = {
.write_raw_get_fmt = mcp3911_write_raw_get_fmt,
};
-static int mcp3911_config(struct mcp3911 *adc)
+static int mcp3911_config(struct mcp3911 *adc, bool external_vref)
{
struct device *dev = &adc->spi->dev;
u32 regval;
@@ -555,7 +542,7 @@ static int mcp3911_config(struct mcp3911 *adc)
return ret;
regval &= ~MCP3911_CONFIG_VREFEXT;
- if (adc->vref) {
+ if (external_vref) {
dev_dbg(dev, "use external voltage reference\n");
regval |= FIELD_PREP(MCP3911_CONFIG_VREFEXT, 1);
} else {
@@ -610,7 +597,7 @@ static int mcp3911_config(struct mcp3911 *adc)
return mcp3911_write(adc, MCP3911_REG_GAIN, regval, 1);
}
-static int mcp3910_config(struct mcp3911 *adc)
+static int mcp3910_config(struct mcp3911 *adc, bool external_vref)
{
struct device *dev = &adc->spi->dev;
u32 regval;
@@ -621,7 +608,7 @@ static int mcp3910_config(struct mcp3911 *adc)
return ret;
regval &= ~MCP3910_CONFIG1_VREFEXT;
- if (adc->vref) {
+ if (external_vref) {
dev_dbg(dev, "use external voltage reference\n");
regval |= FIELD_PREP(MCP3910_CONFIG1_VREFEXT, 1);
} else {
@@ -677,11 +664,6 @@ static int mcp3910_config(struct mcp3911 *adc)
return adc->chip->enable_offset(adc, 0);
}
-static void mcp3911_cleanup_regulator(void *vref)
-{
- regulator_disable(vref);
-}
-
static int mcp3911_set_trigger_state(struct iio_trigger *trig, bool enable)
{
struct mcp3911 *adc = iio_trigger_get_drvdata(trig);
@@ -704,6 +686,8 @@ static int mcp3911_probe(struct spi_device *spi)
struct device *dev = &spi->dev;
struct iio_dev *indio_dev;
struct mcp3911 *adc;
+ bool external_vref;
+ u32 vref_mv;
int ret;
indio_dev = devm_iio_device_alloc(dev, sizeof(*adc));
@@ -714,23 +698,12 @@ static int mcp3911_probe(struct spi_device *spi)
adc->spi = spi;
adc->chip = spi_get_device_match_data(spi);
- adc->vref = devm_regulator_get_optional(dev, "vref");
- if (IS_ERR(adc->vref)) {
- if (PTR_ERR(adc->vref) == -ENODEV) {
- adc->vref = NULL;
- } else {
- return dev_err_probe(dev, PTR_ERR(adc->vref), "failed to get regulator\n");
- }
+ ret = devm_regulator_get_enable_read_voltage(dev, "vref");
+ if (ret < 0 && ret != -ENODEV)
+ return dev_err_probe(dev, ret, "failed to get vref voltage\n");
- } else {
- ret = regulator_enable(adc->vref);
- if (ret)
- return ret;
-
- ret = devm_add_action_or_reset(dev, mcp3911_cleanup_regulator, adc->vref);
- if (ret)
- return ret;
- }
+ external_vref = ret != -ENODEV;
+ vref_mv = external_vref ? ret / 1000 : MCP3911_INT_VREF_MV;
adc->clki = devm_clk_get_enabled(dev, NULL);
if (IS_ERR(adc->clki)) {
@@ -755,11 +728,11 @@ static int mcp3911_probe(struct spi_device *spi)
}
dev_dbg(dev, "use device address %i\n", adc->dev_addr);
- ret = adc->chip->config(adc);
+ ret = adc->chip->config(adc, external_vref);
if (ret)
return ret;
- ret = mcp3911_calc_scale_table(adc);
+ ret = mcp3911_calc_scale_table(vref_mv);
if (ret)
return ret;
diff --git a/drivers/iio/adc/mp2629_adc.c b/drivers/iio/adc/mp2629_adc.c
index 5f672765d4a2..5fbf9b6abd9c 100644
--- a/drivers/iio/adc/mp2629_adc.c
+++ b/drivers/iio/adc/mp2629_adc.c
@@ -184,8 +184,8 @@ static void mp2629_adc_remove(struct platform_device *pdev)
}
static const struct of_device_id mp2629_adc_of_match[] = {
- { .compatible = "mps,mp2629_adc"},
- {}
+ { .compatible = "mps,mp2629_adc" },
+ { }
};
MODULE_DEVICE_TABLE(of, mp2629_adc_of_match);
diff --git a/drivers/iio/adc/mt6360-adc.c b/drivers/iio/adc/mt6360-adc.c
index 3710473e526f..e2ec805e834f 100644
--- a/drivers/iio/adc/mt6360-adc.c
+++ b/drivers/iio/adc/mt6360-adc.c
@@ -268,7 +268,7 @@ static irqreturn_t mt6360_adc_trigger_handler(int irq, void *p)
int i = 0, bit, val, ret;
memset(&data, 0, sizeof(data));
- for_each_set_bit(bit, indio_dev->active_scan_mask, indio_dev->masklength) {
+ iio_for_each_active_channel(indio_dev, bit) {
ret = mt6360_adc_read_channel(mad, bit, &val);
if (ret < 0) {
dev_warn(&indio_dev->dev, "Failed to get channel %d conversion val\n", bit);
@@ -355,7 +355,7 @@ static int mt6360_adc_probe(struct platform_device *pdev)
static const struct of_device_id mt6360_adc_of_id[] = {
{ .compatible = "mediatek,mt6360-adc", },
- {}
+ { }
};
MODULE_DEVICE_TABLE(of, mt6360_adc_of_id);
diff --git a/drivers/iio/adc/nau7802.c b/drivers/iio/adc/nau7802.c
index 600151a62f1f..458544cb8ee4 100644
--- a/drivers/iio/adc/nau7802.c
+++ b/drivers/iio/adc/nau7802.c
@@ -539,7 +539,7 @@ MODULE_DEVICE_TABLE(i2c, nau7802_i2c_id);
static const struct of_device_id nau7802_dt_ids[] = {
{ .compatible = "nuvoton,nau7802" },
- {},
+ { }
};
MODULE_DEVICE_TABLE(of, nau7802_dt_ids);
diff --git a/drivers/iio/adc/pac1921.c b/drivers/iio/adc/pac1921.c
new file mode 100644
index 000000000000..4c2a1c07bc39
--- /dev/null
+++ b/drivers/iio/adc/pac1921.c
@@ -0,0 +1,1261 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * IIO driver for PAC1921 High-Side Power/Current Monitor
+ *
+ * Copyright (C) 2024 Matteo Martelli <matteomartelli3@gmail.com>
+ */
+
+#include <asm/unaligned.h>
+#include <linux/bitfield.h>
+#include <linux/i2c.h>
+#include <linux/iio/events.h>
+#include <linux/iio/iio.h>
+#include <linux/iio/trigger_consumer.h>
+#include <linux/iio/triggered_buffer.h>
+#include <linux/regmap.h>
+#include <linux/units.h>
+
+/* pac1921 registers */
+#define PAC1921_REG_GAIN_CFG 0x00
+#define PAC1921_REG_INT_CFG 0x01
+#define PAC1921_REG_CONTROL 0x02
+#define PAC1921_REG_VBUS 0x10
+#define PAC1921_REG_VSENSE 0x12
+#define PAC1921_REG_OVERFLOW_STS 0x1C
+#define PAC1921_REG_VPOWER 0x1D
+
+/* pac1921 gain configuration bits */
+#define PAC1921_GAIN_DI_GAIN_MASK GENMASK(5, 3)
+#define PAC1921_GAIN_DV_GAIN_MASK GENMASK(2, 0)
+
+/* pac1921 integration configuration bits */
+#define PAC1921_INT_CFG_SMPL_MASK GENMASK(7, 4)
+#define PAC1921_INT_CFG_VSFEN BIT(3)
+#define PAC1921_INT_CFG_VBFEN BIT(2)
+#define PAC1921_INT_CFG_RIOV BIT(1)
+#define PAC1921_INT_CFG_INTEN BIT(0)
+
+/* pac1921 control bits */
+#define PAC1921_CONTROL_MXSL_MASK GENMASK(7, 6)
+enum pac1921_mxsl {
+ PAC1921_MXSL_VPOWER_PIN = 0,
+ PAC1921_MXSL_VSENSE_FREE_RUN = 1,
+ PAC1921_MXSL_VBUS_FREE_RUN = 2,
+ PAC1921_MXSL_VPOWER_FREE_RUN = 3,
+};
+#define PAC1921_CONTROL_SLEEP BIT(2)
+
+/* pac1921 result registers mask and resolution */
+#define PAC1921_RES_MASK GENMASK(15, 6)
+#define PAC1921_RES_RESOLUTION 1023
+
+/* pac1921 overflow status bits */
+#define PAC1921_OVERFLOW_VSOV BIT(2)
+#define PAC1921_OVERFLOW_VBOV BIT(1)
+#define PAC1921_OVERFLOW_VPOV BIT(0)
+
+/* pac1921 constants */
+#define PAC1921_MAX_VSENSE_MV 100
+#define PAC1921_MAX_VBUS_V 32
+/* Time to first communication after power up (tINT_T) */
+#define PAC1921_POWERUP_TIME_MS 20
+/* Time from Sleep State to Start of Integration Period (tSLEEP_TO_INT) */
+#define PAC1921_SLEEP_TO_INT_TIME_US 86
+
+/* pac1921 defaults */
+#define PAC1921_DEFAULT_DV_GAIN 0 /* 2^(value): 1x gain (HW default) */
+#define PAC1921_DEFAULT_DI_GAIN 0 /* 2^(value): 1x gain (HW default) */
+#define PAC1921_DEFAULT_NUM_SAMPLES 0 /* 2^(value): 1 sample (HW default) */
+
+/*
+ * Pre-computed scale factors for BUS voltage
+ * format: IIO_VAL_INT_PLUS_NANO
+ * unit: mV
+ *
+ * Vbus scale (mV) = max_vbus (mV) / dv_gain / resolution
+ */
+static const int pac1921_vbus_scales[][2] = {
+ { 31, 280547409 }, /* dv_gain x1 */
+ { 15, 640273704 }, /* dv_gain x2 */
+ { 7, 820136852 }, /* dv_gain x4 */
+ { 3, 910068426 }, /* dv_gain x8 */
+ { 1, 955034213 }, /* dv_gain x16 */
+ { 0, 977517106 }, /* dv_gain x32 */
+};
+
+/*
+ * Pre-computed scales for SENSE voltage
+ * format: IIO_VAL_INT_PLUS_NANO
+ * unit: mV
+ *
+ * Vsense scale (mV) = max_vsense (mV) / di_gain / resolution
+ */
+static const int pac1921_vsense_scales[][2] = {
+ { 0, 97751710 }, /* di_gain x1 */
+ { 0, 48875855 }, /* di_gain x2 */
+ { 0, 24437927 }, /* di_gain x4 */
+ { 0, 12218963 }, /* di_gain x8 */
+ { 0, 6109481 }, /* di_gain x16 */
+ { 0, 3054740 }, /* di_gain x32 */
+ { 0, 1527370 }, /* di_gain x64 */
+ { 0, 763685 }, /* di_gain x128 */
+};
+
+/*
+ * Numbers of samples used to integrate measurements at the end of an
+ * integration period.
+ *
+ * Changing the number of samples affects the integration period: higher the
+ * number of samples, longer the integration period.
+ *
+ * These correspond to the oversampling ratios available exposed to userspace.
+ */
+static const int pac1921_int_num_samples[] = {
+ 1, 2, 4, 8, 16, 32, 64, 128, 256, 512, 1024, 2048
+};
+
+/*
+ * The integration period depends on the configuration of number of integration
+ * samples, measurement resolution and post filters. The following array
+ * contains integration periods, in microsecs unit, based on table 4-5 from
+ * datasheet considering power integration mode, 14-Bit resolution and post
+ * filters on. Each index corresponds to a specific number of samples from 1
+ * to 2048.
+ */
+static const unsigned int pac1921_int_periods_usecs[] = {
+ 2720, /* 1 sample */
+ 4050, /* 2 samples */
+ 6790, /* 4 samples */
+ 12200, /* 8 samples */
+ 23000, /* 16 samples */
+ 46000, /* 32 samples */
+ 92000, /* 64 samples */
+ 184000, /* 128 samples */
+ 368000, /* 256 samples */
+ 736000, /* 512 samples */
+ 1471000, /* 1024 samples */
+ 2941000 /* 2048 samples */
+};
+
+/* pac1921 regmap configuration */
+static const struct regmap_range pac1921_regmap_wr_ranges[] = {
+ regmap_reg_range(PAC1921_REG_GAIN_CFG, PAC1921_REG_CONTROL),
+};
+
+static const struct regmap_access_table pac1921_regmap_wr_table = {
+ .yes_ranges = pac1921_regmap_wr_ranges,
+ .n_yes_ranges = ARRAY_SIZE(pac1921_regmap_wr_ranges),
+};
+
+static const struct regmap_range pac1921_regmap_rd_ranges[] = {
+ regmap_reg_range(PAC1921_REG_GAIN_CFG, PAC1921_REG_CONTROL),
+ regmap_reg_range(PAC1921_REG_VBUS, PAC1921_REG_VPOWER + 1),
+};
+
+static const struct regmap_access_table pac1921_regmap_rd_table = {
+ .yes_ranges = pac1921_regmap_rd_ranges,
+ .n_yes_ranges = ARRAY_SIZE(pac1921_regmap_rd_ranges),
+};
+
+static const struct regmap_config pac1921_regmap_config = {
+ .reg_bits = 8,
+ .val_bits = 8,
+ .rd_table = &pac1921_regmap_rd_table,
+ .wr_table = &pac1921_regmap_wr_table,
+};
+
+enum pac1921_channels {
+ PAC1921_CHAN_VBUS = 0,
+ PAC1921_CHAN_VSENSE = 1,
+ PAC1921_CHAN_CURRENT = 2,
+ PAC1921_CHAN_POWER = 3,
+};
+#define PAC1921_NUM_MEAS_CHANS 4
+
+struct pac1921_priv {
+ struct i2c_client *client;
+ struct regmap *regmap;
+ struct regulator *vdd;
+ struct iio_info iio_info;
+
+ /*
+ * Synchronize access to private members, and ensure atomicity of
+ * consecutive regmap operations.
+ */
+ struct mutex lock;
+
+ u32 rshunt_uohm; /* uOhm */
+ u8 dv_gain;
+ u8 di_gain;
+ u8 n_samples;
+ u8 prev_ovf_flags;
+ u8 ovf_enabled_events;
+
+ bool first_integr_started;
+ bool first_integr_done;
+ unsigned long integr_started_time_jiffies;
+ unsigned int integr_period_usecs;
+
+ int current_scales[ARRAY_SIZE(pac1921_vsense_scales)][2];
+
+ struct {
+ u16 chan[PAC1921_NUM_MEAS_CHANS];
+ s64 timestamp __aligned(8);
+ } scan;
+};
+
+/*
+ * Check if first integration after configuration update has completed.
+ *
+ * Must be called with lock held.
+ */
+static bool pac1921_data_ready(struct pac1921_priv *priv)
+{
+ if (!priv->first_integr_started)
+ return false;
+
+ if (!priv->first_integr_done) {
+ unsigned long t_ready;
+
+ /*
+ * Data valid after the device entered into integration state,
+ * considering worst case where the device was in sleep state,
+ * and completed the first integration period.
+ */
+ t_ready = priv->integr_started_time_jiffies +
+ usecs_to_jiffies(PAC1921_SLEEP_TO_INT_TIME_US) +
+ usecs_to_jiffies(priv->integr_period_usecs);
+
+ if (time_before(jiffies, t_ready))
+ return false;
+
+ priv->first_integr_done = true;
+ }
+
+ return true;
+}
+
+static inline void pac1921_calc_scale(int dividend, int divisor, int *val,
+ int *val2)
+{
+ s64 tmp;
+
+ tmp = div_s64(dividend * (s64)NANO, divisor);
+ *val = (int)div_s64_rem(tmp, NANO, val2);
+}
+
+/*
+ * Fill the table of scale factors for current
+ * format: IIO_VAL_INT_PLUS_NANO
+ * unit: mA
+ *
+ * Vsense LSB (nV) = max_vsense (nV) * di_gain / resolution
+ * Current scale (mA) = Vsense LSB (nV) / shunt (uOhm)
+ *
+ * Must be called with held lock when updating after first initialization.
+ */
+static void pac1921_calc_current_scales(struct pac1921_priv *priv)
+{
+ for (unsigned int i = 0; i < ARRAY_SIZE(priv->current_scales); i++) {
+ int max = (PAC1921_MAX_VSENSE_MV * MICRO) >> i;
+ int vsense_lsb = DIV_ROUND_CLOSEST(max, PAC1921_RES_RESOLUTION);
+
+ pac1921_calc_scale(vsense_lsb, (int)priv->rshunt_uohm,
+ &priv->current_scales[i][0],
+ &priv->current_scales[i][1]);
+ }
+}
+
+/*
+ * Check if overflow occurred and if so, push the corresponding events.
+ *
+ * Must be called with lock held.
+ */
+static int pac1921_check_push_overflow(struct iio_dev *indio_dev, s64 timestamp)
+{
+ struct pac1921_priv *priv = iio_priv(indio_dev);
+ unsigned int flags;
+ int ret;
+
+ ret = regmap_read(priv->regmap, PAC1921_REG_OVERFLOW_STS, &flags);
+ if (ret)
+ return ret;
+
+ if (flags & PAC1921_OVERFLOW_VBOV &&
+ !(priv->prev_ovf_flags & PAC1921_OVERFLOW_VBOV) &&
+ priv->ovf_enabled_events & PAC1921_OVERFLOW_VBOV) {
+ iio_push_event(indio_dev,
+ IIO_UNMOD_EVENT_CODE(
+ IIO_VOLTAGE, PAC1921_CHAN_VBUS,
+ IIO_EV_TYPE_THRESH, IIO_EV_DIR_RISING),
+ timestamp);
+ }
+ if (flags & PAC1921_OVERFLOW_VSOV &&
+ !(priv->prev_ovf_flags & PAC1921_OVERFLOW_VSOV) &&
+ priv->ovf_enabled_events & PAC1921_OVERFLOW_VSOV) {
+ iio_push_event(indio_dev,
+ IIO_UNMOD_EVENT_CODE(
+ IIO_VOLTAGE, PAC1921_CHAN_VSENSE,
+ IIO_EV_TYPE_THRESH, IIO_EV_DIR_RISING),
+ timestamp);
+ iio_push_event(indio_dev,
+ IIO_UNMOD_EVENT_CODE(
+ IIO_CURRENT, PAC1921_CHAN_CURRENT,
+ IIO_EV_TYPE_THRESH, IIO_EV_DIR_RISING),
+ timestamp);
+ }
+ if (flags & PAC1921_OVERFLOW_VPOV &&
+ !(priv->prev_ovf_flags & PAC1921_OVERFLOW_VPOV) &&
+ priv->ovf_enabled_events & PAC1921_OVERFLOW_VPOV) {
+ iio_push_event(indio_dev,
+ IIO_UNMOD_EVENT_CODE(
+ IIO_POWER, PAC1921_CHAN_POWER,
+ IIO_EV_TYPE_THRESH, IIO_EV_DIR_RISING),
+ timestamp);
+ }
+
+ priv->prev_ovf_flags = (u8)flags;
+
+ return 0;
+}
+
+/*
+ * Read the value from a result register
+ *
+ * Result registers contain the most recent averaged values of Vbus, Vsense and
+ * Vpower. Each value is 10 bits wide and spread across two consecutive 8 bit
+ * registers, with 6 bit LSB zero padding.
+ */
+static int pac1921_read_res(struct pac1921_priv *priv, unsigned long reg,
+ u16 *val)
+{
+ int ret = regmap_bulk_read(priv->regmap, (unsigned int)reg, val,
+ sizeof(*val));
+ if (ret)
+ return ret;
+
+ *val = FIELD_GET(PAC1921_RES_MASK, get_unaligned_be16(val));
+
+ return 0;
+}
+
+static int pac1921_read_raw(struct iio_dev *indio_dev,
+ struct iio_chan_spec const *chan, int *val,
+ int *val2, long mask)
+{
+ struct pac1921_priv *priv = iio_priv(indio_dev);
+
+ guard(mutex)(&priv->lock);
+
+ switch (mask) {
+ case IIO_CHAN_INFO_RAW: {
+ s64 ts;
+ u16 res_val;
+ int ret;
+
+ if (!pac1921_data_ready(priv))
+ return -EBUSY;
+
+ ts = iio_get_time_ns(indio_dev);
+
+ ret = pac1921_check_push_overflow(indio_dev, ts);
+ if (ret)
+ return ret;
+
+ ret = pac1921_read_res(priv, chan->address, &res_val);
+ if (ret)
+ return ret;
+
+ *val = (int)res_val;
+
+ return IIO_VAL_INT;
+ }
+ case IIO_CHAN_INFO_SCALE:
+ switch (chan->channel) {
+ case PAC1921_CHAN_VBUS:
+ *val = pac1921_vbus_scales[priv->dv_gain][0];
+ *val2 = pac1921_vbus_scales[priv->dv_gain][1];
+ return IIO_VAL_INT_PLUS_NANO;
+
+ case PAC1921_CHAN_VSENSE:
+ *val = pac1921_vsense_scales[priv->di_gain][0];
+ *val2 = pac1921_vsense_scales[priv->di_gain][1];
+ return IIO_VAL_INT_PLUS_NANO;
+
+ case PAC1921_CHAN_CURRENT:
+ *val = priv->current_scales[priv->di_gain][0];
+ *val2 = priv->current_scales[priv->di_gain][1];
+ return IIO_VAL_INT_PLUS_NANO;
+
+ case PAC1921_CHAN_POWER: {
+ /*
+ * Power scale factor in mW:
+ * Current scale (mA) * max_vbus (V) / dv_gain
+ */
+
+ /* Get current scale based on di_gain */
+ int *curr_scale = priv->current_scales[priv->di_gain];
+
+ /* Convert current_scale from INT_PLUS_NANO to INT */
+ s64 tmp = curr_scale[0] * (s64)NANO + curr_scale[1];
+
+ /* Multiply by max_vbus (V) / dv_gain */
+ tmp *= PAC1921_MAX_VBUS_V >> (int)priv->dv_gain;
+
+ /* Convert back to INT_PLUS_NANO */
+ *val = (int)div_s64_rem(tmp, NANO, val2);
+
+ return IIO_VAL_INT_PLUS_NANO;
+ }
+ default:
+ return -EINVAL;
+ }
+
+ case IIO_CHAN_INFO_OVERSAMPLING_RATIO:
+ *val = pac1921_int_num_samples[priv->n_samples];
+ return IIO_VAL_INT;
+
+ case IIO_CHAN_INFO_SAMP_FREQ:
+ /*
+ * The sampling frequency (Hz) is read-only and corresponds to
+ * how often the device provides integrated measurements into
+ * the result registers, thus it's 1/integration_period.
+ * The integration period depends on the number of integration
+ * samples, measurement resolution and post filters.
+ *
+ * 1/(integr_period_usecs/MICRO) = MICRO/integr_period_usecs
+ */
+ *val = MICRO;
+ *val2 = (int)priv->integr_period_usecs;
+ return IIO_VAL_FRACTIONAL;
+
+ default:
+ return -EINVAL;
+ }
+}
+
+static int pac1921_read_avail(struct iio_dev *indio_dev,
+ struct iio_chan_spec const *chan,
+ const int **vals, int *type, int *length,
+ long mask)
+{
+ switch (mask) {
+ case IIO_CHAN_INFO_OVERSAMPLING_RATIO:
+ *type = IIO_VAL_INT;
+ *vals = pac1921_int_num_samples;
+ *length = ARRAY_SIZE(pac1921_int_num_samples);
+ return IIO_AVAIL_LIST;
+ default:
+ return -EINVAL;
+ }
+}
+
+/*
+ * Perform configuration update sequence: set the device into read state, then
+ * write the config register and set the device back into integration state.
+ * Also reset integration start time and mark first integration to be yet
+ * completed.
+ *
+ * Must be called with lock held.
+ */
+static int pac1921_update_cfg_reg(struct pac1921_priv *priv, unsigned int reg,
+ unsigned int mask, unsigned int val)
+{
+ /* Enter READ state before configuration */
+ int ret = regmap_update_bits(priv->regmap, PAC1921_REG_INT_CFG,
+ PAC1921_INT_CFG_INTEN, 0);
+ if (ret)
+ return ret;
+
+ /* Update configuration value */
+ ret = regmap_update_bits(priv->regmap, reg, mask, val);
+ if (ret)
+ return ret;
+
+ /* Re-enable integration */
+ ret = regmap_update_bits(priv->regmap, PAC1921_REG_INT_CFG,
+ PAC1921_INT_CFG_INTEN, PAC1921_INT_CFG_INTEN);
+ if (ret)
+ return ret;
+
+ /*
+ * Reset integration started time and mark this integration period as
+ * the first one so that new measurements will be considered as valid
+ * only at the end of this integration period.
+ */
+ priv->integr_started_time_jiffies = jiffies;
+ priv->first_integr_done = false;
+
+ return 0;
+}
+
+/*
+ * Retrieve the index of the given scale (represented by scale_val and
+ * scale_val2) from scales_tbl. The returned index (if found) is the log2 of
+ * the gain corresponding to the given scale.
+ *
+ * Must be called with lock held if the scales_tbl can change runtime (e.g. for
+ * the current scales table)
+ */
+static int pac1921_lookup_scale(const int (*const scales_tbl)[2], size_t size,
+ int scale_val, int scale_val2)
+{
+ for (unsigned int i = 0; i < size; i++)
+ if (scales_tbl[i][0] == scale_val &&
+ scales_tbl[i][1] == scale_val2)
+ return (int)i;
+
+ return -EINVAL;
+}
+
+/*
+ * Configure device with the given gain (only if changed)
+ *
+ * Must be called with lock held.
+ */
+static int pac1921_update_gain(struct pac1921_priv *priv, u8 *priv_val, u8 gain,
+ unsigned int mask)
+{
+ unsigned int reg_val;
+ int ret;
+
+ if (*priv_val == gain)
+ return 0;
+
+ reg_val = (gain << __ffs(mask)) & mask;
+ ret = pac1921_update_cfg_reg(priv, PAC1921_REG_GAIN_CFG, mask, reg_val);
+ if (ret)
+ return ret;
+
+ *priv_val = gain;
+
+ return 0;
+}
+
+/*
+ * Given a scale factor represented by scale_val and scale_val2 with format
+ * IIO_VAL_INT_PLUS_NANO, find the corresponding gain value and write it to the
+ * device.
+ *
+ * Must be called with lock held.
+ */
+static int pac1921_update_gain_from_scale(struct pac1921_priv *priv,
+ struct iio_chan_spec const *chan,
+ int scale_val, int scale_val2)
+{
+ int ret;
+
+ switch (chan->channel) {
+ case PAC1921_CHAN_VBUS:
+ ret = pac1921_lookup_scale(pac1921_vbus_scales,
+ ARRAY_SIZE(pac1921_vbus_scales),
+ scale_val, scale_val2);
+ if (ret < 0)
+ return ret;
+
+ return pac1921_update_gain(priv, &priv->dv_gain, (u8)ret,
+ PAC1921_GAIN_DV_GAIN_MASK);
+ case PAC1921_CHAN_VSENSE:
+ ret = pac1921_lookup_scale(pac1921_vsense_scales,
+ ARRAY_SIZE(pac1921_vsense_scales),
+ scale_val, scale_val2);
+ if (ret < 0)
+ return ret;
+
+ return pac1921_update_gain(priv, &priv->di_gain, (u8)ret,
+ PAC1921_GAIN_DI_GAIN_MASK);
+ case PAC1921_CHAN_CURRENT:
+ ret = pac1921_lookup_scale(priv->current_scales,
+ ARRAY_SIZE(priv->current_scales),
+ scale_val, scale_val2);
+ if (ret < 0)
+ return ret;
+
+ return pac1921_update_gain(priv, &priv->di_gain, (u8)ret,
+ PAC1921_GAIN_DI_GAIN_MASK);
+ default:
+ return -EINVAL;
+ }
+}
+
+/*
+ * Retrieve the index of the given number of samples from the constant table.
+ * The returned index (if found) is the log2 of the given num_samples.
+ */
+static int pac1921_lookup_int_num_samples(int num_samples)
+{
+ for (unsigned int i = 0; i < ARRAY_SIZE(pac1921_int_num_samples); i++)
+ if (pac1921_int_num_samples[i] == num_samples)
+ return (int)i;
+
+ return -EINVAL;
+}
+
+/*
+ * Update the device with the given number of integration samples.
+ *
+ * Must be called with lock held.
+ */
+static int pac1921_update_int_num_samples(struct pac1921_priv *priv,
+ int num_samples)
+{
+ unsigned int reg_val;
+ u8 n_samples;
+ int ret;
+
+ ret = pac1921_lookup_int_num_samples(num_samples);
+ if (ret < 0)
+ return ret;
+
+ n_samples = (u8)ret;
+
+ if (priv->n_samples == n_samples)
+ return 0;
+
+ reg_val = FIELD_PREP(PAC1921_INT_CFG_SMPL_MASK, n_samples);
+
+ ret = pac1921_update_cfg_reg(priv, PAC1921_REG_INT_CFG,
+ PAC1921_INT_CFG_SMPL_MASK, reg_val);
+ if (ret)
+ return ret;
+
+ priv->n_samples = n_samples;
+
+ priv->integr_period_usecs = pac1921_int_periods_usecs[priv->n_samples];
+
+ return 0;
+}
+
+static int pac1921_write_raw_get_fmt(struct iio_dev *indio_dev,
+ struct iio_chan_spec const *chan,
+ long info)
+{
+ switch (info) {
+ case IIO_CHAN_INFO_SCALE:
+ return IIO_VAL_INT_PLUS_NANO;
+ case IIO_CHAN_INFO_OVERSAMPLING_RATIO:
+ return IIO_VAL_INT;
+ default:
+ return -EINVAL;
+ }
+}
+
+static int pac1921_write_raw(struct iio_dev *indio_dev,
+ struct iio_chan_spec const *chan, int val,
+ int val2, long mask)
+{
+ struct pac1921_priv *priv = iio_priv(indio_dev);
+
+ guard(mutex)(&priv->lock);
+
+ switch (mask) {
+ case IIO_CHAN_INFO_SCALE:
+ return pac1921_update_gain_from_scale(priv, chan, val, val2);
+ case IIO_CHAN_INFO_OVERSAMPLING_RATIO:
+ return pac1921_update_int_num_samples(priv, val);
+ default:
+ return -EINVAL;
+ }
+}
+
+static int pac1921_read_label(struct iio_dev *indio_dev,
+ struct iio_chan_spec const *chan, char *label)
+{
+ switch (chan->channel) {
+ case PAC1921_CHAN_VBUS:
+ return sprintf(label, "vbus\n");
+ case PAC1921_CHAN_VSENSE:
+ return sprintf(label, "vsense\n");
+ case PAC1921_CHAN_CURRENT:
+ return sprintf(label, "current\n");
+ case PAC1921_CHAN_POWER:
+ return sprintf(label, "power\n");
+ default:
+ return -EINVAL;
+ }
+}
+
+static int pac1921_read_event_config(struct iio_dev *indio_dev,
+ const struct iio_chan_spec *chan,
+ enum iio_event_type type,
+ enum iio_event_direction dir)
+{
+ struct pac1921_priv *priv = iio_priv(indio_dev);
+
+ guard(mutex)(&priv->lock);
+
+ switch (chan->channel) {
+ case PAC1921_CHAN_VBUS:
+ return !!(priv->ovf_enabled_events & PAC1921_OVERFLOW_VBOV);
+ case PAC1921_CHAN_VSENSE:
+ case PAC1921_CHAN_CURRENT:
+ return !!(priv->ovf_enabled_events & PAC1921_OVERFLOW_VSOV);
+ case PAC1921_CHAN_POWER:
+ return !!(priv->ovf_enabled_events & PAC1921_OVERFLOW_VPOV);
+ default:
+ return -EINVAL;
+ }
+}
+
+static int pac1921_write_event_config(struct iio_dev *indio_dev,
+ const struct iio_chan_spec *chan,
+ enum iio_event_type type,
+ enum iio_event_direction dir, int state)
+{
+ struct pac1921_priv *priv = iio_priv(indio_dev);
+ u8 ovf_bit;
+
+ guard(mutex)(&priv->lock);
+
+ switch (chan->channel) {
+ case PAC1921_CHAN_VBUS:
+ ovf_bit = PAC1921_OVERFLOW_VBOV;
+ break;
+ case PAC1921_CHAN_VSENSE:
+ case PAC1921_CHAN_CURRENT:
+ ovf_bit = PAC1921_OVERFLOW_VSOV;
+ break;
+ case PAC1921_CHAN_POWER:
+ ovf_bit = PAC1921_OVERFLOW_VPOV;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ if (state)
+ priv->ovf_enabled_events |= ovf_bit;
+ else
+ priv->ovf_enabled_events &= ~ovf_bit;
+
+ return 0;
+}
+
+static int pac1921_read_event_value(struct iio_dev *indio_dev,
+ const struct iio_chan_spec *chan,
+ enum iio_event_type type,
+ enum iio_event_direction dir,
+ enum iio_event_info info, int *val,
+ int *val2)
+{
+ switch (info) {
+ case IIO_EV_INFO_VALUE:
+ *val = PAC1921_RES_RESOLUTION;
+ return IIO_VAL_INT;
+ default:
+ return -EINVAL;
+ }
+}
+
+static const struct iio_info pac1921_iio = {
+ .read_raw = pac1921_read_raw,
+ .read_avail = pac1921_read_avail,
+ .write_raw = pac1921_write_raw,
+ .write_raw_get_fmt = pac1921_write_raw_get_fmt,
+ .read_label = pac1921_read_label,
+ .read_event_config = pac1921_read_event_config,
+ .write_event_config = pac1921_write_event_config,
+ .read_event_value = pac1921_read_event_value,
+};
+
+static ssize_t pac1921_read_shunt_resistor(struct iio_dev *indio_dev,
+ uintptr_t private,
+ const struct iio_chan_spec *chan,
+ char *buf)
+{
+ struct pac1921_priv *priv = iio_priv(indio_dev);
+ int vals[2];
+
+ if (chan->channel != PAC1921_CHAN_CURRENT)
+ return -EINVAL;
+
+ guard(mutex)(&priv->lock);
+
+ vals[0] = (int)priv->rshunt_uohm;
+ vals[1] = MICRO;
+
+ return iio_format_value(buf, IIO_VAL_FRACTIONAL, 1, vals);
+}
+
+static ssize_t pac1921_write_shunt_resistor(struct iio_dev *indio_dev,
+ uintptr_t private,
+ const struct iio_chan_spec *chan,
+ const char *buf, size_t len)
+{
+ struct pac1921_priv *priv = iio_priv(indio_dev);
+ u64 rshunt_uohm;
+ int val, val_fract;
+ int ret;
+
+ if (chan->channel != PAC1921_CHAN_CURRENT)
+ return -EINVAL;
+
+ ret = iio_str_to_fixpoint(buf, 100000, &val, &val_fract);
+ if (ret)
+ return ret;
+
+ rshunt_uohm = (u32)val * MICRO + (u32)val_fract;
+ if (rshunt_uohm == 0 || rshunt_uohm > INT_MAX)
+ return -EINVAL;
+
+ guard(mutex)(&priv->lock);
+
+ priv->rshunt_uohm = (u32)rshunt_uohm;
+
+ pac1921_calc_current_scales(priv);
+
+ return len;
+}
+
+/*
+ * Emit on sysfs the list of available scales contained in scales_tbl
+ *
+ * TODO:: this function can be replaced with iio_format_avail_list() if the
+ * latter will ever be exported.
+ *
+ * Must be called with lock held if the scales_tbl can change runtime (e.g. for
+ * the current scales table)
+ */
+static ssize_t pac1921_format_scale_avail(const int (*const scales_tbl)[2],
+ size_t size, char *buf)
+{
+ ssize_t len = 0;
+
+ for (unsigned int i = 0; i < size; i++) {
+ if (i != 0) {
+ len += sysfs_emit_at(buf, len, " ");
+ if (len >= PAGE_SIZE)
+ return -EFBIG;
+ }
+ len += sysfs_emit_at(buf, len, "%d.%09d", scales_tbl[i][0],
+ scales_tbl[i][1]);
+ if (len >= PAGE_SIZE)
+ return -EFBIG;
+ }
+
+ len += sysfs_emit_at(buf, len, "\n");
+ return len;
+}
+
+/*
+ * Read available scales for a specific channel
+ *
+ * NOTE: using extended info insted of iio.read_avail() because access to
+ * current scales must be locked as they depend on shunt resistor which may
+ * change runtime. Caller of iio.read_avail() would access the table unlocked
+ * instead.
+ */
+static ssize_t pac1921_read_scale_avail(struct iio_dev *indio_dev,
+ uintptr_t private,
+ const struct iio_chan_spec *chan,
+ char *buf)
+{
+ struct pac1921_priv *priv = iio_priv(indio_dev);
+ const int (*scales_tbl)[2];
+ size_t size;
+
+ switch (chan->channel) {
+ case PAC1921_CHAN_VBUS:
+ scales_tbl = pac1921_vbus_scales;
+ size = ARRAY_SIZE(pac1921_vbus_scales);
+ return pac1921_format_scale_avail(scales_tbl, size, buf);
+
+ case PAC1921_CHAN_VSENSE:
+ scales_tbl = pac1921_vsense_scales;
+ size = ARRAY_SIZE(pac1921_vsense_scales);
+ return pac1921_format_scale_avail(scales_tbl, size, buf);
+
+ case PAC1921_CHAN_CURRENT: {
+ guard(mutex)(&priv->lock);
+ scales_tbl = priv->current_scales;
+ size = ARRAY_SIZE(priv->current_scales);
+ return pac1921_format_scale_avail(scales_tbl, size, buf);
+ }
+ default:
+ return -EINVAL;
+ }
+}
+
+#define PAC1921_EXT_INFO_SCALE_AVAIL { \
+ .name = "scale_available", \
+ .read = pac1921_read_scale_avail, \
+ .shared = IIO_SEPARATE, \
+}
+
+static const struct iio_chan_spec_ext_info pac1921_ext_info_voltage[] = {
+ PAC1921_EXT_INFO_SCALE_AVAIL,
+ {}
+};
+
+static const struct iio_chan_spec_ext_info pac1921_ext_info_current[] = {
+ PAC1921_EXT_INFO_SCALE_AVAIL,
+ {
+ .name = "shunt_resistor",
+ .read = pac1921_read_shunt_resistor,
+ .write = pac1921_write_shunt_resistor,
+ .shared = IIO_SEPARATE,
+ },
+ {}
+};
+
+static const struct iio_event_spec pac1921_overflow_event[] = {
+ {
+ .type = IIO_EV_TYPE_THRESH,
+ .dir = IIO_EV_DIR_RISING,
+ .mask_shared_by_all = BIT(IIO_EV_INFO_VALUE),
+ .mask_separate = BIT(IIO_EV_INFO_ENABLE),
+ },
+};
+
+static const struct iio_chan_spec pac1921_channels[] = {
+ {
+ .type = IIO_VOLTAGE,
+ .info_mask_separate = BIT(IIO_CHAN_INFO_RAW) |
+ BIT(IIO_CHAN_INFO_SCALE),
+ .info_mask_shared_by_all =
+ BIT(IIO_CHAN_INFO_OVERSAMPLING_RATIO) |
+ BIT(IIO_CHAN_INFO_SAMP_FREQ),
+ .info_mask_shared_by_all_available =
+ BIT(IIO_CHAN_INFO_OVERSAMPLING_RATIO),
+ .channel = PAC1921_CHAN_VBUS,
+ .address = PAC1921_REG_VBUS,
+ .scan_index = PAC1921_CHAN_VBUS,
+ .scan_type = {
+ .sign = 'u',
+ .realbits = 10,
+ .storagebits = 16,
+ .endianness = IIO_CPU
+ },
+ .indexed = 1,
+ .event_spec = pac1921_overflow_event,
+ .num_event_specs = ARRAY_SIZE(pac1921_overflow_event),
+ .ext_info = pac1921_ext_info_voltage,
+ },
+ {
+ .type = IIO_VOLTAGE,
+ .info_mask_separate = BIT(IIO_CHAN_INFO_RAW) |
+ BIT(IIO_CHAN_INFO_SCALE),
+ .info_mask_shared_by_all =
+ BIT(IIO_CHAN_INFO_OVERSAMPLING_RATIO) |
+ BIT(IIO_CHAN_INFO_SAMP_FREQ),
+ .info_mask_shared_by_all_available =
+ BIT(IIO_CHAN_INFO_OVERSAMPLING_RATIO),
+ .channel = PAC1921_CHAN_VSENSE,
+ .address = PAC1921_REG_VSENSE,
+ .scan_index = PAC1921_CHAN_VSENSE,
+ .scan_type = {
+ .sign = 'u',
+ .realbits = 10,
+ .storagebits = 16,
+ .endianness = IIO_CPU
+ },
+ .indexed = 1,
+ .event_spec = pac1921_overflow_event,
+ .num_event_specs = ARRAY_SIZE(pac1921_overflow_event),
+ .ext_info = pac1921_ext_info_voltage,
+ },
+ {
+ .type = IIO_CURRENT,
+ .info_mask_separate = BIT(IIO_CHAN_INFO_RAW) |
+ BIT(IIO_CHAN_INFO_SCALE),
+ .info_mask_shared_by_all =
+ BIT(IIO_CHAN_INFO_OVERSAMPLING_RATIO) |
+ BIT(IIO_CHAN_INFO_SAMP_FREQ),
+ .info_mask_shared_by_all_available =
+ BIT(IIO_CHAN_INFO_OVERSAMPLING_RATIO),
+ .channel = PAC1921_CHAN_CURRENT,
+ .address = PAC1921_REG_VSENSE,
+ .scan_index = PAC1921_CHAN_CURRENT,
+ .scan_type = {
+ .sign = 'u',
+ .realbits = 10,
+ .storagebits = 16,
+ .endianness = IIO_CPU
+ },
+ .event_spec = pac1921_overflow_event,
+ .num_event_specs = ARRAY_SIZE(pac1921_overflow_event),
+ .ext_info = pac1921_ext_info_current,
+ },
+ {
+ .type = IIO_POWER,
+ .info_mask_separate = BIT(IIO_CHAN_INFO_RAW) |
+ BIT(IIO_CHAN_INFO_SCALE),
+ .info_mask_shared_by_all =
+ BIT(IIO_CHAN_INFO_OVERSAMPLING_RATIO) |
+ BIT(IIO_CHAN_INFO_SAMP_FREQ),
+ .info_mask_shared_by_all_available =
+ BIT(IIO_CHAN_INFO_OVERSAMPLING_RATIO),
+ .channel = PAC1921_CHAN_POWER,
+ .address = PAC1921_REG_VPOWER,
+ .scan_index = PAC1921_CHAN_POWER,
+ .scan_type = {
+ .sign = 'u',
+ .realbits = 10,
+ .storagebits = 16,
+ .endianness = IIO_CPU
+ },
+ .event_spec = pac1921_overflow_event,
+ .num_event_specs = ARRAY_SIZE(pac1921_overflow_event),
+ },
+ IIO_CHAN_SOFT_TIMESTAMP(PAC1921_NUM_MEAS_CHANS),
+};
+
+static irqreturn_t pac1921_trigger_handler(int irq, void *p)
+{
+ struct iio_poll_func *pf = p;
+ struct iio_dev *idev = pf->indio_dev;
+ struct pac1921_priv *priv = iio_priv(idev);
+ int ret;
+ int bit;
+ int ch = 0;
+
+ guard(mutex)(&priv->lock);
+
+ if (!pac1921_data_ready(priv))
+ goto done;
+
+ ret = pac1921_check_push_overflow(idev, pf->timestamp);
+ if (ret)
+ goto done;
+
+ iio_for_each_active_channel(idev, bit) {
+ u16 val;
+
+ ret = pac1921_read_res(priv, idev->channels[ch].address, &val);
+ if (ret)
+ goto done;
+
+ priv->scan.chan[ch++] = val;
+ }
+
+ iio_push_to_buffers_with_timestamp(idev, &priv->scan, pf->timestamp);
+
+done:
+ iio_trigger_notify_done(idev->trig);
+
+ return IRQ_HANDLED;
+}
+
+/*
+ * Initialize device by writing initial configuration and putting it into
+ * integration state.
+ *
+ * Must be called with lock held when called after first initialization
+ * (e.g. from pm resume)
+ */
+static int pac1921_init(struct pac1921_priv *priv)
+{
+ unsigned int val;
+ int ret;
+
+ /* Enter READ state before configuration */
+ ret = regmap_update_bits(priv->regmap, PAC1921_REG_INT_CFG,
+ PAC1921_INT_CFG_INTEN, 0);
+ if (ret)
+ return ret;
+
+ /* Configure gains, use 14-bits measurement resolution (HW default) */
+ val = FIELD_PREP(PAC1921_GAIN_DI_GAIN_MASK, priv->di_gain) |
+ FIELD_PREP(PAC1921_GAIN_DV_GAIN_MASK, priv->dv_gain);
+ ret = regmap_write(priv->regmap, PAC1921_REG_GAIN_CFG, val);
+ if (ret)
+ return ret;
+
+ /*
+ * Configure integration:
+ * - num of integration samples
+ * - filters enabled (HW default)
+ * - set READ/INT pin override (RIOV) to control operation mode via
+ * register instead of pin
+ */
+ val = FIELD_PREP(PAC1921_INT_CFG_SMPL_MASK, priv->n_samples) |
+ PAC1921_INT_CFG_VSFEN | PAC1921_INT_CFG_VBFEN |
+ PAC1921_INT_CFG_RIOV;
+ ret = regmap_write(priv->regmap, PAC1921_REG_INT_CFG, val);
+ if (ret)
+ return ret;
+
+ /*
+ * Init control register:
+ * - VPower free run integration mode
+ * - OUT pin full scale range: 3V (HW detault)
+ * - no timeout, no sleep, no sleep override, no recalc (HW defaults)
+ */
+ val = FIELD_PREP(PAC1921_CONTROL_MXSL_MASK,
+ PAC1921_MXSL_VPOWER_FREE_RUN);
+ ret = regmap_write(priv->regmap, PAC1921_REG_CONTROL, val);
+ if (ret)
+ return ret;
+
+ /* Enable integration */
+ ret = regmap_update_bits(priv->regmap, PAC1921_REG_INT_CFG,
+ PAC1921_INT_CFG_INTEN, PAC1921_INT_CFG_INTEN);
+ if (ret)
+ return ret;
+
+ priv->first_integr_started = true;
+ priv->integr_started_time_jiffies = jiffies;
+ priv->integr_period_usecs = pac1921_int_periods_usecs[priv->n_samples];
+
+ return 0;
+}
+
+static int pac1921_suspend(struct device *dev)
+{
+ struct iio_dev *indio_dev = dev_get_drvdata(dev);
+ struct pac1921_priv *priv = iio_priv(indio_dev);
+ int ret;
+
+ guard(mutex)(&priv->lock);
+
+ priv->first_integr_started = false;
+ priv->first_integr_done = false;
+
+ ret = regmap_update_bits(priv->regmap, PAC1921_REG_INT_CFG,
+ PAC1921_INT_CFG_INTEN, 0);
+ if (ret)
+ return ret;
+
+ ret = regmap_update_bits(priv->regmap, PAC1921_REG_CONTROL,
+ PAC1921_CONTROL_SLEEP, PAC1921_CONTROL_SLEEP);
+ if (ret)
+ return ret;
+
+ return regulator_disable(priv->vdd);
+
+}
+
+static int pac1921_resume(struct device *dev)
+{
+ struct iio_dev *indio_dev = dev_get_drvdata(dev);
+ struct pac1921_priv *priv = iio_priv(indio_dev);
+ int ret;
+
+ guard(mutex)(&priv->lock);
+
+ ret = regulator_enable(priv->vdd);
+ if (ret)
+ return ret;
+
+ msleep(PAC1921_POWERUP_TIME_MS);
+
+ return pac1921_init(priv);
+}
+
+static DEFINE_SIMPLE_DEV_PM_OPS(pac1921_pm_ops, pac1921_suspend,
+ pac1921_resume);
+
+static void pac1921_regulator_disable(void *data)
+{
+ struct regulator *regulator = data;
+
+ regulator_disable(regulator);
+}
+
+static int pac1921_probe(struct i2c_client *client)
+{
+ struct device *dev = &client->dev;
+ struct pac1921_priv *priv;
+ struct iio_dev *indio_dev;
+ int ret;
+
+ indio_dev = devm_iio_device_alloc(dev, sizeof(*priv));
+ if (!indio_dev)
+ return -ENOMEM;
+
+ priv = iio_priv(indio_dev);
+ priv->client = client;
+ i2c_set_clientdata(client, indio_dev);
+
+ priv->regmap = devm_regmap_init_i2c(client, &pac1921_regmap_config);
+ if (IS_ERR(priv->regmap))
+ return dev_err_probe(dev, (int)PTR_ERR(priv->regmap),
+ "Cannot initialize register map\n");
+
+ devm_mutex_init(dev, &priv->lock);
+
+ priv->dv_gain = PAC1921_DEFAULT_DV_GAIN;
+ priv->di_gain = PAC1921_DEFAULT_DI_GAIN;
+ priv->n_samples = PAC1921_DEFAULT_NUM_SAMPLES;
+
+ ret = device_property_read_u32(dev, "shunt-resistor-micro-ohms",
+ &priv->rshunt_uohm);
+ if (ret)
+ return dev_err_probe(dev, ret,
+ "Cannot read shunt resistor property\n");
+ if (priv->rshunt_uohm == 0 || priv->rshunt_uohm > INT_MAX)
+ return dev_err_probe(dev, -EINVAL,
+ "Invalid shunt resistor: %u\n",
+ priv->rshunt_uohm);
+
+ pac1921_calc_current_scales(priv);
+
+ priv->vdd = devm_regulator_get(dev, "vdd");
+ if (IS_ERR(priv->vdd))
+ return dev_err_probe(dev, (int)PTR_ERR(priv->vdd),
+ "Cannot get vdd regulator\n");
+
+ ret = regulator_enable(priv->vdd);
+ if (ret)
+ return dev_err_probe(dev, ret, "Cannot enable vdd regulator\n");
+
+ ret = devm_add_action_or_reset(dev, pac1921_regulator_disable,
+ priv->vdd);
+ if (ret)
+ return dev_err_probe(dev, ret,
+ "Cannot add action for vdd regulator disposal\n");
+
+ msleep(PAC1921_POWERUP_TIME_MS);
+
+ ret = pac1921_init(priv);
+ if (ret)
+ return dev_err_probe(dev, ret, "Cannot initialize device\n");
+
+ priv->iio_info = pac1921_iio;
+
+ indio_dev->name = "pac1921";
+ indio_dev->info = &priv->iio_info;
+ indio_dev->modes = INDIO_DIRECT_MODE;
+ indio_dev->channels = pac1921_channels;
+ indio_dev->num_channels = ARRAY_SIZE(pac1921_channels);
+
+ ret = devm_iio_triggered_buffer_setup(dev, indio_dev,
+ &iio_pollfunc_store_time,
+ &pac1921_trigger_handler, NULL);
+ if (ret)
+ return dev_err_probe(dev, ret,
+ "Cannot setup IIO triggered buffer\n");
+
+ ret = devm_iio_device_register(dev, indio_dev);
+ if (ret)
+ return dev_err_probe(dev, ret, "Cannot register IIO device\n");
+
+ return 0;
+}
+
+static const struct i2c_device_id pac1921_id[] = {
+ { .name = "pac1921", 0 },
+ { }
+};
+MODULE_DEVICE_TABLE(i2c, pac1921_id);
+
+static const struct of_device_id pac1921_of_match[] = {
+ { .compatible = "microchip,pac1921" },
+ { }
+};
+MODULE_DEVICE_TABLE(of, pac1921_of_match);
+
+static struct i2c_driver pac1921_driver = {
+ .driver = {
+ .name = "pac1921",
+ .pm = pm_sleep_ptr(&pac1921_pm_ops),
+ .of_match_table = pac1921_of_match,
+ },
+ .probe = pac1921_probe,
+ .id_table = pac1921_id,
+};
+
+module_i2c_driver(pac1921_driver);
+
+MODULE_AUTHOR("Matteo Martelli <matteomartelli3@gmail.com>");
+MODULE_DESCRIPTION("IIO driver for PAC1921 High-Side Power/Current Monitor");
+MODULE_LICENSE("GPL");
diff --git a/drivers/iio/adc/pac1934.c b/drivers/iio/adc/pac1934.c
index ae24a27805ab..8210728034d0 100644
--- a/drivers/iio/adc/pac1934.c
+++ b/drivers/iio/adc/pac1934.c
@@ -1571,7 +1571,7 @@ static const struct i2c_device_id pac1934_id[] = {
{ .name = "pac1932", .driver_data = (kernel_ulong_t)&pac1934_chip_config[PAC1932] },
{ .name = "pac1933", .driver_data = (kernel_ulong_t)&pac1934_chip_config[PAC1933] },
{ .name = "pac1934", .driver_data = (kernel_ulong_t)&pac1934_chip_config[PAC1934] },
- {}
+ { }
};
MODULE_DEVICE_TABLE(i2c, pac1934_id);
@@ -1592,7 +1592,7 @@ static const struct of_device_id pac1934_of_match[] = {
.compatible = "microchip,pac1934",
.data = &pac1934_chip_config[PAC1934]
},
- {}
+ { }
};
MODULE_DEVICE_TABLE(of, pac1934_of_match);
@@ -1602,7 +1602,7 @@ MODULE_DEVICE_TABLE(of, pac1934_of_match);
*/
static const struct acpi_device_id pac1934_acpi_match[] = {
{ "MCHP1930", .driver_data = (kernel_ulong_t)&pac1934_chip_config[PAC1934] },
- {}
+ { }
};
MODULE_DEVICE_TABLE(acpi, pac1934_acpi_match);
diff --git a/drivers/iio/adc/qcom-pm8xxx-xoadc.c b/drivers/iio/adc/qcom-pm8xxx-xoadc.c
index c9d2c66434e4..9e1112f5acc6 100644
--- a/drivers/iio/adc/qcom-pm8xxx-xoadc.c
+++ b/drivers/iio/adc/qcom-pm8xxx-xoadc.c
@@ -1006,7 +1006,7 @@ static const struct of_device_id pm8xxx_xoadc_id_table[] = {
.compatible = "qcom,pm8921-adc",
.data = &pm8921_variant,
},
- { },
+ { }
};
MODULE_DEVICE_TABLE(of, pm8xxx_xoadc_id_table);
diff --git a/drivers/iio/adc/qcom-spmi-rradc.c b/drivers/iio/adc/qcom-spmi-rradc.c
index 1402df68dd52..6aa70b4629a7 100644
--- a/drivers/iio/adc/qcom-spmi-rradc.c
+++ b/drivers/iio/adc/qcom-spmi-rradc.c
@@ -1002,7 +1002,7 @@ static int rradc_probe(struct platform_device *pdev)
static const struct of_device_id rradc_match_table[] = {
{ .compatible = "qcom,pm660-rradc" },
{ .compatible = "qcom,pmi8998-rradc" },
- {}
+ { }
};
MODULE_DEVICE_TABLE(of, rradc_match_table);
diff --git a/drivers/iio/adc/rockchip_saradc.c b/drivers/iio/adc/rockchip_saradc.c
index bbe954a738c7..240cfa391674 100644
--- a/drivers/iio/adc/rockchip_saradc.c
+++ b/drivers/iio/adc/rockchip_saradc.c
@@ -331,7 +331,7 @@ static const struct of_device_id rockchip_saradc_match[] = {
.compatible = "rockchip,rk3588-saradc",
.data = &rk3588_saradc_data,
},
- {},
+ { }
};
MODULE_DEVICE_TABLE(of, rockchip_saradc_match);
@@ -370,7 +370,7 @@ static irqreturn_t rockchip_saradc_trigger_handler(int irq, void *p)
mutex_lock(&info->lock);
- for_each_set_bit(i, i_dev->active_scan_mask, i_dev->masklength) {
+ iio_for_each_active_channel(i_dev, i) {
const struct iio_chan_spec *chan = &i_dev->channels[i];
ret = rockchip_saradc_conversion(info, chan);
diff --git a/drivers/iio/adc/rtq6056.c b/drivers/iio/adc/rtq6056.c
index bcb129840908..56ed948a8ae1 100644
--- a/drivers/iio/adc/rtq6056.c
+++ b/drivers/iio/adc/rtq6056.c
@@ -643,7 +643,7 @@ static irqreturn_t rtq6056_buffer_trigger_handler(int irq, void *p)
pm_runtime_get_sync(dev);
- for_each_set_bit(bit, indio_dev->active_scan_mask, indio_dev->masklength) {
+ iio_for_each_active_channel(indio_dev, bit) {
unsigned int addr = rtq6056_channels[bit].address;
ret = regmap_read(priv->regmap, addr, &raw);
@@ -865,7 +865,7 @@ static const struct richtek_dev_data rtq6059_devdata = {
static const struct of_device_id rtq6056_device_match[] = {
{ .compatible = "richtek,rtq6056", .data = &rtq6056_devdata },
{ .compatible = "richtek,rtq6059", .data = &rtq6059_devdata },
- {}
+ { }
};
MODULE_DEVICE_TABLE(of, rtq6056_device_match);
diff --git a/drivers/iio/adc/sd_adc_modulator.c b/drivers/iio/adc/sd_adc_modulator.c
index 327cc2097f6c..654b6a38b650 100644
--- a/drivers/iio/adc/sd_adc_modulator.c
+++ b/drivers/iio/adc/sd_adc_modulator.c
@@ -6,11 +6,14 @@
* Author: Arnaud Pouliquen <arnaud.pouliquen@st.com>.
*/
+#include <linux/iio/backend.h>
#include <linux/iio/iio.h>
#include <linux/iio/triggered_buffer.h>
#include <linux/module.h>
#include <linux/mod_devicetable.h>
#include <linux/platform_device.h>
+#include <linux/property.h>
+#include <linux/regulator/consumer.h>
static const struct iio_info iio_sd_mod_iio_info;
@@ -24,7 +27,59 @@ static const struct iio_chan_spec iio_sd_mod_ch = {
},
};
-static int iio_sd_mod_probe(struct platform_device *pdev)
+struct iio_sd_backend_priv {
+ struct regulator *vref;
+ int vref_mv;
+};
+
+static int iio_sd_mod_enable(struct iio_backend *backend)
+{
+ struct iio_sd_backend_priv *priv = iio_backend_get_priv(backend);
+
+ if (priv->vref)
+ return regulator_enable(priv->vref);
+
+ return 0;
+};
+
+static void iio_sd_mod_disable(struct iio_backend *backend)
+{
+ struct iio_sd_backend_priv *priv = iio_backend_get_priv(backend);
+
+ if (priv->vref)
+ regulator_disable(priv->vref);
+};
+
+static int iio_sd_mod_read(struct iio_backend *backend, struct iio_chan_spec const *chan, int *val,
+ int *val2, long mask)
+{
+ struct iio_sd_backend_priv *priv = iio_backend_get_priv(backend);
+
+ switch (mask) {
+ case IIO_CHAN_INFO_SCALE:
+ *val = priv->vref_mv;
+ return IIO_VAL_INT;
+
+ case IIO_CHAN_INFO_OFFSET:
+ *val = 0;
+ return IIO_VAL_INT;
+ }
+
+ return -EOPNOTSUPP;
+};
+
+static const struct iio_backend_ops sd_backend_ops = {
+ .enable = iio_sd_mod_enable,
+ .disable = iio_sd_mod_disable,
+ .read_raw = iio_sd_mod_read,
+};
+
+static const struct iio_backend_info sd_backend_info = {
+ .name = "sd-modulator",
+ .ops = &sd_backend_ops,
+};
+
+static int iio_sd_mod_register(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
struct iio_dev *iio;
@@ -45,6 +100,45 @@ static int iio_sd_mod_probe(struct platform_device *pdev)
return devm_iio_device_register(&pdev->dev, iio);
}
+static int iio_sd_mod_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct regulator *vref;
+ struct iio_sd_backend_priv *priv;
+ int ret;
+
+ /* If sd modulator is not defined as an IIO backend device, fallback to legacy */
+ if (!device_property_present(dev, "#io-backend-cells"))
+ return iio_sd_mod_register(pdev);
+
+ priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+
+ /*
+ * Get regulator reference if any, but don't enable regulator right now.
+ * Rely on enable and disable callbacks to manage regulator power.
+ */
+ vref = devm_regulator_get_optional(dev, "vref");
+ if (IS_ERR(vref)) {
+ if (PTR_ERR(vref) != -ENODEV)
+ return dev_err_probe(dev, PTR_ERR(vref), "Failed to get vref\n");
+ } else {
+ /*
+ * Retrieve voltage right now, as regulator_get_voltage() provides it whatever
+ * the state of the regulator.
+ */
+ ret = regulator_get_voltage(vref);
+ if (ret < 0)
+ return ret;
+
+ priv->vref = vref;
+ priv->vref_mv = ret / 1000;
+ }
+
+ return devm_iio_backend_register(&pdev->dev, &sd_backend_info, priv);
+};
+
static const struct of_device_id sd_adc_of_match[] = {
{ .compatible = "sd-modulator" },
{ .compatible = "ads1201" },
@@ -65,3 +159,4 @@ module_platform_driver(iio_sd_mod_adc);
MODULE_DESCRIPTION("Basic sigma delta modulator");
MODULE_AUTHOR("Arnaud Pouliquen <arnaud.pouliquen@st.com>");
MODULE_LICENSE("GPL v2");
+MODULE_IMPORT_NS(IIO_BACKEND);
diff --git a/drivers/iio/adc/sophgo-cv1800b-adc.c b/drivers/iio/adc/sophgo-cv1800b-adc.c
new file mode 100644
index 000000000000..0951deb7b111
--- /dev/null
+++ b/drivers/iio/adc/sophgo-cv1800b-adc.c
@@ -0,0 +1,227 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * Sophgo CV1800B SARADC Driver
+ *
+ * Copyright (C) Bootlin 2024
+ * Author: Thomas Bonnefille <thomas.bonnefille@bootlin.com>
+ */
+
+#include <linux/array_size.h>
+#include <linux/bitfield.h>
+#include <linux/bits.h>
+#include <linux/cleanup.h>
+#include <linux/clk.h>
+#include <linux/completion.h>
+#include <linux/err.h>
+#include <linux/interrupt.h>
+#include <linux/iopoll.h>
+#include <linux/mod_devicetable.h>
+#include <linux/module.h>
+#include <linux/mutex.h>
+#include <linux/platform_device.h>
+#include <linux/types.h>
+
+#include <linux/iio/iio.h>
+
+#define CV1800B_ADC_CTRL_REG 0x04
+#define CV1800B_ADC_EN BIT(0)
+#define CV1800B_ADC_SEL(x) BIT((x) + 5)
+#define CV1800B_ADC_STATUS_REG 0x08
+#define CV1800B_ADC_BUSY BIT(0)
+#define CV1800B_ADC_CYC_SET_REG 0x0C
+#define CV1800B_MASK_STARTUP_CYCLE GENMASK(4, 0)
+#define CV1800B_MASK_SAMPLE_WINDOW GENMASK(11, 8)
+#define CV1800B_MASK_CLKDIV GENMASK(15, 12)
+#define CV1800B_MASK_COMPARE_CYCLE GENMASK(19, 16)
+#define CV1800B_ADC_CH_RESULT_REG(x) (0x14 + 4 * (x))
+#define CV1800B_ADC_CH_RESULT GENMASK(11, 0)
+#define CV1800B_ADC_CH_VALID BIT(15)
+#define CV1800B_ADC_INTR_EN_REG 0x20
+#define CV1800B_ADC_INTR_CLR_REG 0x24
+#define CV1800B_ADC_INTR_CLR_BIT BIT(0)
+#define CV1800B_ADC_INTR_STA_REG 0x28
+#define CV1800B_ADC_INTR_STA_BIT BIT(0)
+#define CV1800B_READ_TIMEOUT_MS 1000
+#define CV1800B_READ_TIMEOUT_US (CV1800B_READ_TIMEOUT_MS * 1000)
+
+#define CV1800B_ADC_CHANNEL(index) \
+ { \
+ .type = IIO_VOLTAGE, \
+ .indexed = 1, \
+ .channel = index, \
+ .info_mask_separate = BIT(IIO_CHAN_INFO_RAW), \
+ .info_mask_shared_by_type = BIT(IIO_CHAN_INFO_SCALE), \
+ .info_mask_shared_by_all = BIT(IIO_CHAN_INFO_SAMP_FREQ),\
+ .scan_index = index, \
+ }
+
+struct cv1800b_adc {
+ struct completion completion;
+ void __iomem *regs;
+ struct mutex lock; /* ADC Control and Result register */
+ struct clk *clk;
+ int irq;
+};
+
+static const struct iio_chan_spec sophgo_channels[] = {
+ CV1800B_ADC_CHANNEL(0),
+ CV1800B_ADC_CHANNEL(1),
+ CV1800B_ADC_CHANNEL(2),
+};
+
+static void cv1800b_adc_start_measurement(struct cv1800b_adc *saradc,
+ int channel)
+{
+ writel(0, saradc->regs + CV1800B_ADC_CTRL_REG);
+ writel(CV1800B_ADC_SEL(channel) | CV1800B_ADC_EN,
+ saradc->regs + CV1800B_ADC_CTRL_REG);
+}
+
+static int cv1800b_adc_wait(struct cv1800b_adc *saradc)
+{
+ if (saradc->irq < 0) {
+ u32 reg;
+
+ return readl_poll_timeout(saradc->regs + CV1800B_ADC_STATUS_REG,
+ reg, !(reg & CV1800B_ADC_BUSY),
+ 500, CV1800B_READ_TIMEOUT_US);
+ }
+
+ return wait_for_completion_timeout(&saradc->completion,
+ msecs_to_jiffies(CV1800B_READ_TIMEOUT_MS)) > 0 ?
+ 0 : -ETIMEDOUT;
+}
+
+static int cv1800b_adc_read_raw(struct iio_dev *indio_dev,
+ struct iio_chan_spec const *chan,
+ int *val, int *val2, long mask)
+{
+ struct cv1800b_adc *saradc = iio_priv(indio_dev);
+
+ switch (mask) {
+ case IIO_CHAN_INFO_RAW: {
+ u32 sample;
+
+ scoped_guard(mutex, &saradc->lock) {
+ int ret;
+
+ cv1800b_adc_start_measurement(saradc, chan->scan_index);
+ ret = cv1800b_adc_wait(saradc);
+ if (ret < 0)
+ return ret;
+
+ sample = readl(saradc->regs + CV1800B_ADC_CH_RESULT_REG(chan->scan_index));
+ }
+ if (!(sample & CV1800B_ADC_CH_VALID))
+ return -ENODATA;
+
+ *val = sample & CV1800B_ADC_CH_RESULT;
+ return IIO_VAL_INT;
+ }
+ case IIO_CHAN_INFO_SCALE:
+ *val = 3300;
+ *val2 = 12;
+ return IIO_VAL_FRACTIONAL_LOG2;
+ case IIO_CHAN_INFO_SAMP_FREQ: {
+ u32 status_reg = readl(saradc->regs + CV1800B_ADC_CYC_SET_REG);
+ unsigned int clk_div = (1 + FIELD_GET(CV1800B_MASK_CLKDIV, status_reg));
+ unsigned int freq = clk_get_rate(saradc->clk) / clk_div;
+ unsigned int nb_startup_cycle = 1 + FIELD_GET(CV1800B_MASK_STARTUP_CYCLE, status_reg);
+ unsigned int nb_sample_cycle = 1 + FIELD_GET(CV1800B_MASK_SAMPLE_WINDOW, status_reg);
+ unsigned int nb_compare_cycle = 1 + FIELD_GET(CV1800B_MASK_COMPARE_CYCLE, status_reg);
+
+ *val = freq / (nb_startup_cycle + nb_sample_cycle + nb_compare_cycle);
+ return IIO_VAL_INT;
+ }
+ default:
+ return -EINVAL;
+ }
+}
+
+static irqreturn_t cv1800b_adc_interrupt_handler(int irq, void *private)
+{
+ struct cv1800b_adc *saradc = private;
+ u32 reg = readl(saradc->regs + CV1800B_ADC_INTR_STA_REG);
+
+ if (!(FIELD_GET(CV1800B_ADC_INTR_STA_BIT, reg)))
+ return IRQ_NONE;
+
+ writel(CV1800B_ADC_INTR_CLR_BIT, saradc->regs + CV1800B_ADC_INTR_CLR_REG);
+ complete(&saradc->completion);
+
+ return IRQ_HANDLED;
+}
+
+static const struct iio_info cv1800b_adc_info = {
+ .read_raw = &cv1800b_adc_read_raw,
+};
+
+static int cv1800b_adc_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct cv1800b_adc *saradc;
+ struct iio_dev *indio_dev;
+ int ret;
+
+ indio_dev = devm_iio_device_alloc(dev, sizeof(*saradc));
+ if (!indio_dev)
+ return -ENOMEM;
+
+ saradc = iio_priv(indio_dev);
+ indio_dev->name = "sophgo-cv1800b-adc";
+ indio_dev->modes = INDIO_DIRECT_MODE;
+ indio_dev->info = &cv1800b_adc_info;
+ indio_dev->num_channels = ARRAY_SIZE(sophgo_channels);
+ indio_dev->channels = sophgo_channels;
+
+ saradc->clk = devm_clk_get_enabled(dev, NULL);
+ if (IS_ERR(saradc->clk))
+ return PTR_ERR(saradc->clk);
+
+ saradc->regs = devm_platform_ioremap_resource(pdev, 0);
+ if (IS_ERR(saradc->regs))
+ return PTR_ERR(saradc->regs);
+
+ saradc->irq = platform_get_irq_optional(pdev, 0);
+ if (saradc->irq > 0) {
+ init_completion(&saradc->completion);
+ ret = devm_request_irq(dev, saradc->irq,
+ cv1800b_adc_interrupt_handler, 0,
+ dev_name(dev), saradc);
+ if (ret)
+ return ret;
+
+ writel(1, saradc->regs + CV1800B_ADC_INTR_EN_REG);
+ }
+
+ ret = devm_mutex_init(dev, &saradc->lock);
+ if (ret)
+ return ret;
+
+ writel(FIELD_PREP(CV1800B_MASK_STARTUP_CYCLE, 15) |
+ FIELD_PREP(CV1800B_MASK_SAMPLE_WINDOW, 15) |
+ FIELD_PREP(CV1800B_MASK_CLKDIV, 1) |
+ FIELD_PREP(CV1800B_MASK_COMPARE_CYCLE, 15),
+ saradc->regs + CV1800B_ADC_CYC_SET_REG);
+
+ return devm_iio_device_register(dev, indio_dev);
+}
+
+static const struct of_device_id cv1800b_adc_match[] = {
+ { .compatible = "sophgo,cv1800b-saradc", },
+ { }
+};
+MODULE_DEVICE_TABLE(of, cv1800b_adc_match);
+
+static struct platform_driver cv1800b_adc_driver = {
+ .driver = {
+ .name = "sophgo-cv1800b-saradc",
+ .of_match_table = cv1800b_adc_match,
+ },
+ .probe = cv1800b_adc_probe,
+};
+module_platform_driver(cv1800b_adc_driver);
+
+MODULE_AUTHOR("Thomas Bonnefille <thomas.bonnefille@bootlin.com>");
+MODULE_DESCRIPTION("Sophgo CV1800B SARADC driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/iio/adc/stm32-adc.c b/drivers/iio/adc/stm32-adc.c
index 375aa7720f80..32ca26ed59f7 100644
--- a/drivers/iio/adc/stm32-adc.c
+++ b/drivers/iio/adc/stm32-adc.c
@@ -1261,7 +1261,7 @@ static int stm32_adc_conf_scan_seq(struct iio_dev *indio_dev,
stm32_adc_writel(adc, adc->cfg->regs->smpr[0], adc->smpr_val[0]);
stm32_adc_writel(adc, adc->cfg->regs->smpr[1], adc->smpr_val[1]);
- for_each_set_bit(bit, scan_mask, indio_dev->masklength) {
+ for_each_set_bit(bit, scan_mask, iio_get_masklength(indio_dev)) {
chan = indio_dev->channels + bit;
/*
* Assign one channel per SQ entry in regular
@@ -1619,7 +1619,7 @@ static int stm32_adc_update_scan_mode(struct iio_dev *indio_dev,
if (ret < 0)
return ret;
- adc->num_conv = bitmap_weight(scan_mask, indio_dev->masklength);
+ adc->num_conv = bitmap_weight(scan_mask, iio_get_masklength(indio_dev));
ret = stm32_adc_conf_scan_seq(indio_dev, scan_mask);
pm_runtime_mark_last_busy(dev);
@@ -2638,7 +2638,7 @@ static const struct of_device_id stm32_adc_of_match[] = {
{ .compatible = "st,stm32h7-adc", .data = (void *)&stm32h7_adc_cfg },
{ .compatible = "st,stm32mp1-adc", .data = (void *)&stm32mp1_adc_cfg },
{ .compatible = "st,stm32mp13-adc", .data = (void *)&stm32mp13_adc_cfg },
- {},
+ { }
};
MODULE_DEVICE_TABLE(of, stm32_adc_of_match);
diff --git a/drivers/iio/adc/stm32-dfsdm-adc.c b/drivers/iio/adc/stm32-dfsdm-adc.c
index fabd654245f5..2037f73426d4 100644
--- a/drivers/iio/adc/stm32-dfsdm-adc.c
+++ b/drivers/iio/adc/stm32-dfsdm-adc.c
@@ -9,6 +9,7 @@
#include <linux/dmaengine.h>
#include <linux/dma-mapping.h>
#include <linux/iio/adc/stm32-dfsdm-adc.h>
+#include <linux/iio/backend.h>
#include <linux/iio/buffer.h>
#include <linux/iio/hw-consumer.h>
#include <linux/iio/sysfs.h>
@@ -78,6 +79,7 @@ struct stm32_dfsdm_adc {
/* ADC specific */
unsigned int oversamp;
struct iio_hw_consumer *hwc;
+ struct iio_backend **backend;
struct completion completion;
u32 *buffer;
@@ -666,6 +668,74 @@ static int stm32_dfsdm_channel_parse_of(struct stm32_dfsdm *dfsdm,
return 0;
}
+static int stm32_dfsdm_generic_channel_parse_of(struct stm32_dfsdm *dfsdm,
+ struct iio_dev *indio_dev,
+ struct iio_chan_spec *ch,
+ struct fwnode_handle *node)
+{
+ struct stm32_dfsdm_channel *df_ch;
+ struct stm32_dfsdm_adc *adc = iio_priv(indio_dev);
+ struct iio_backend *backend;
+ const char *of_str;
+ int ret, val;
+
+ ret = fwnode_property_read_u32(node, "reg", &ch->channel);
+ if (ret < 0) {
+ dev_err(&indio_dev->dev, "Missing channel index %d\n", ret);
+ return ret;
+ }
+
+ if (ch->channel >= dfsdm->num_chs) {
+ dev_err(&indio_dev->dev, " Error bad channel number %d (max = %d)\n",
+ ch->channel, dfsdm->num_chs);
+ return -EINVAL;
+ }
+
+ ret = fwnode_property_read_string(node, "label", &ch->datasheet_name);
+ if (ret < 0) {
+ dev_err(&indio_dev->dev,
+ " Error parsing 'label' for idx %d\n", ch->channel);
+ return ret;
+ }
+
+ df_ch = &dfsdm->ch_list[ch->channel];
+ df_ch->id = ch->channel;
+
+ ret = fwnode_property_read_string(node, "st,adc-channel-type", &of_str);
+ if (!ret) {
+ val = stm32_dfsdm_str2val(of_str, stm32_dfsdm_chan_type);
+ if (val < 0)
+ return val;
+ } else {
+ val = 0;
+ }
+ df_ch->type = val;
+
+ ret = fwnode_property_read_string(node, "st,adc-channel-clk-src", &of_str);
+ if (!ret) {
+ val = stm32_dfsdm_str2val(of_str, stm32_dfsdm_chan_src);
+ if (val < 0)
+ return val;
+ } else {
+ val = 0;
+ }
+ df_ch->src = val;
+
+ ret = fwnode_property_read_u32(node, "st,adc-alt-channel", &df_ch->alt_si);
+ if (ret != -EINVAL)
+ df_ch->alt_si = 0;
+
+ if (adc->dev_data->type == DFSDM_IIO) {
+ backend = devm_iio_backend_fwnode_get(&indio_dev->dev, NULL, node);
+ if (IS_ERR(backend))
+ return dev_err_probe(&indio_dev->dev, PTR_ERR(backend),
+ "Failed to get backend\n");
+ adc->backend[ch->scan_index] = backend;
+ }
+
+ return 0;
+}
+
static ssize_t dfsdm_adc_audio_get_spiclk(struct iio_dev *indio_dev,
uintptr_t priv,
const struct iio_chan_spec *chan,
@@ -987,7 +1057,7 @@ static int stm32_dfsdm_update_scan_mode(struct iio_dev *indio_dev,
{
struct stm32_dfsdm_adc *adc = iio_priv(indio_dev);
- adc->nconv = bitmap_weight(scan_mask, indio_dev->masklength);
+ adc->nconv = bitmap_weight(scan_mask, iio_get_masklength(indio_dev));
adc->smask = *scan_mask;
dev_dbg(&indio_dev->dev, "nconv=%d mask=%lx\n", adc->nconv, *scan_mask);
@@ -998,6 +1068,7 @@ static int stm32_dfsdm_update_scan_mode(struct iio_dev *indio_dev,
static int stm32_dfsdm_postenable(struct iio_dev *indio_dev)
{
struct stm32_dfsdm_adc *adc = iio_priv(indio_dev);
+ int i = 0;
int ret;
/* Reset adc buffer index */
@@ -1009,6 +1080,15 @@ static int stm32_dfsdm_postenable(struct iio_dev *indio_dev)
return ret;
}
+ if (adc->backend) {
+ while (adc->backend[i]) {
+ ret = iio_backend_enable(adc->backend[i]);
+ if (ret < 0)
+ return ret;
+ i++;
+ }
+ }
+
ret = stm32_dfsdm_start_dfsdm(adc->dfsdm);
if (ret < 0)
goto err_stop_hwc;
@@ -1041,6 +1121,7 @@ err_stop_hwc:
static int stm32_dfsdm_predisable(struct iio_dev *indio_dev)
{
struct stm32_dfsdm_adc *adc = iio_priv(indio_dev);
+ int i = 0;
stm32_dfsdm_stop_conv(indio_dev);
@@ -1048,6 +1129,13 @@ static int stm32_dfsdm_predisable(struct iio_dev *indio_dev)
stm32_dfsdm_stop_dfsdm(adc->dfsdm);
+ if (adc->backend) {
+ while (adc->backend[i]) {
+ iio_backend_disable(adc->backend[i]);
+ i++;
+ }
+ }
+
if (adc->hwc)
iio_hw_consumer_disable(adc->hwc);
@@ -1220,14 +1308,25 @@ static int stm32_dfsdm_read_raw(struct iio_dev *indio_dev,
int *val2, long mask)
{
struct stm32_dfsdm_adc *adc = iio_priv(indio_dev);
+
+ struct stm32_dfsdm_filter *fl = &adc->dfsdm->fl_list[adc->fl_id];
+ struct stm32_dfsdm_filter_osr *flo = &fl->flo[fl->fast];
+ u32 max = flo->max << (flo->lshift - chan->scan_type.shift);
+ int idx = chan->scan_index;
int ret;
+ if (flo->lshift < chan->scan_type.shift)
+ max = flo->max >> (chan->scan_type.shift - flo->lshift);
+
switch (mask) {
case IIO_CHAN_INFO_RAW:
ret = iio_device_claim_direct_mode(indio_dev);
if (ret)
return ret;
- ret = iio_hw_consumer_enable(adc->hwc);
+ if (adc->hwc)
+ ret = iio_hw_consumer_enable(adc->hwc);
+ if (adc->backend)
+ ret = iio_backend_enable(adc->backend[idx]);
if (ret < 0) {
dev_err(&indio_dev->dev,
"%s: IIO enable failed (channel %d)\n",
@@ -1236,7 +1335,10 @@ static int stm32_dfsdm_read_raw(struct iio_dev *indio_dev,
return ret;
}
ret = stm32_dfsdm_single_conv(indio_dev, chan, val);
- iio_hw_consumer_disable(adc->hwc);
+ if (adc->hwc)
+ iio_hw_consumer_disable(adc->hwc);
+ if (adc->backend)
+ iio_backend_disable(adc->backend[idx]);
if (ret < 0) {
dev_err(&indio_dev->dev,
"%s: Conversion failed (channel %d)\n",
@@ -1256,6 +1358,50 @@ static int stm32_dfsdm_read_raw(struct iio_dev *indio_dev,
*val = adc->sample_freq;
return IIO_VAL_INT;
+
+ case IIO_CHAN_INFO_SCALE:
+ /*
+ * Scale is expressed in mV.
+ * When fast mode is disabled, actual resolution may be lower
+ * than 2^n, where n = realbits - 1.
+ * This leads to underestimating the input voltage.
+ * To compensate this deviation, the voltage reference can be
+ * corrected with a factor = realbits resolution / actual max
+ */
+ if (adc->backend) {
+ ret = iio_backend_read_scale(adc->backend[idx], chan, val, NULL);
+ if (ret < 0)
+ return ret;
+
+ *val = div_u64((u64)*val * (u64)BIT(DFSDM_DATA_RES - 1), max);
+ *val2 = chan->scan_type.realbits;
+ if (chan->differential)
+ *val *= 2;
+ }
+ return IIO_VAL_FRACTIONAL_LOG2;
+
+ case IIO_CHAN_INFO_OFFSET:
+ /*
+ * DFSDM output data are in the range [-2^n, 2^n],
+ * with n = realbits - 1.
+ * - Differential modulator:
+ * Offset correspond to SD modulator offset.
+ * - Single ended modulator:
+ * Input is in [0V, Vref] range,
+ * where 0V corresponds to -2^n, and Vref to 2^n.
+ * Add 2^n to offset. (i.e. middle of input range)
+ * offset = offset(sd) * vref / res(sd) * max / vref.
+ */
+ if (adc->backend) {
+ ret = iio_backend_read_offset(adc->backend[idx], chan, val, NULL);
+ if (ret < 0)
+ return ret;
+
+ *val = div_u64((u64)max * *val, BIT(*val2 - 1));
+ if (!chan->differential)
+ *val += max;
+ }
+ return IIO_VAL_INT;
}
return -EINVAL;
@@ -1320,7 +1466,7 @@ static const struct iio_chan_spec_ext_info dfsdm_adc_audio_ext_info[] = {
.read = dfsdm_adc_audio_get_spiclk,
.write = dfsdm_adc_audio_set_spiclk,
},
- {},
+ { }
};
static void stm32_dfsdm_dma_release(struct iio_dev *indio_dev)
@@ -1362,15 +1508,18 @@ static int stm32_dfsdm_dma_request(struct device *dev,
return 0;
}
-static int stm32_dfsdm_adc_chan_init_one(struct iio_dev *indio_dev,
- struct iio_chan_spec *ch)
+static int stm32_dfsdm_adc_chan_init_one(struct iio_dev *indio_dev, struct iio_chan_spec *ch,
+ struct fwnode_handle *child)
{
struct stm32_dfsdm_adc *adc = iio_priv(indio_dev);
int ret;
- ret = stm32_dfsdm_channel_parse_of(adc->dfsdm, indio_dev, ch);
+ if (child)
+ ret = stm32_dfsdm_generic_channel_parse_of(adc->dfsdm, indio_dev, ch, child);
+ else /* Legacy binding */
+ ret = stm32_dfsdm_channel_parse_of(adc->dfsdm, indio_dev, ch);
if (ret < 0)
- return ret;
+ return dev_err_probe(&indio_dev->dev, ret, "Failed to parse channel\n");
ch->type = IIO_VOLTAGE;
ch->indexed = 1;
@@ -1379,12 +1528,21 @@ static int stm32_dfsdm_adc_chan_init_one(struct iio_dev *indio_dev,
* IIO_CHAN_INFO_RAW: used to compute regular conversion
* IIO_CHAN_INFO_OVERSAMPLING_RATIO: used to set oversampling
*/
- ch->info_mask_separate = BIT(IIO_CHAN_INFO_RAW);
+ if (child) {
+ ch->info_mask_separate = BIT(IIO_CHAN_INFO_RAW) |
+ BIT(IIO_CHAN_INFO_SCALE) |
+ BIT(IIO_CHAN_INFO_OFFSET);
+ } else {
+ /* Legacy. Scaling not supported */
+ ch->info_mask_separate = BIT(IIO_CHAN_INFO_RAW);
+ }
+
ch->info_mask_shared_by_all = BIT(IIO_CHAN_INFO_OVERSAMPLING_RATIO) |
BIT(IIO_CHAN_INFO_SAMP_FREQ);
if (adc->dev_data->type == DFSDM_AUDIO) {
ch->ext_info = dfsdm_adc_audio_ext_info;
+ ch->scan_index = 0;
} else {
ch->scan_type.shift = 8;
}
@@ -1396,20 +1554,67 @@ static int stm32_dfsdm_adc_chan_init_one(struct iio_dev *indio_dev,
&adc->dfsdm->ch_list[ch->channel]);
}
+static int stm32_dfsdm_chan_init(struct iio_dev *indio_dev, struct iio_chan_spec *channels)
+{
+ int num_ch = indio_dev->num_channels;
+ int chan_idx = 0;
+ int ret;
+
+ for (chan_idx = 0; chan_idx < num_ch; chan_idx++) {
+ channels[chan_idx].scan_index = chan_idx;
+ ret = stm32_dfsdm_adc_chan_init_one(indio_dev, &channels[chan_idx], NULL);
+ if (ret < 0)
+ return dev_err_probe(&indio_dev->dev, ret, "Channels init failed\n");
+ }
+
+ return 0;
+}
+
+static int stm32_dfsdm_generic_chan_init(struct iio_dev *indio_dev, struct iio_chan_spec *channels)
+{
+ int chan_idx = 0, ret;
+
+ device_for_each_child_node_scoped(&indio_dev->dev, child) {
+ /* Skip DAI node in DFSDM audio nodes */
+ if (fwnode_property_present(child, "compatible"))
+ continue;
+
+ channels[chan_idx].scan_index = chan_idx;
+ ret = stm32_dfsdm_adc_chan_init_one(indio_dev, &channels[chan_idx], child);
+ if (ret < 0)
+ return dev_err_probe(&indio_dev->dev, ret, "Channels init failed\n");
+
+ chan_idx++;
+ }
+
+ return chan_idx;
+}
+
static int stm32_dfsdm_audio_init(struct device *dev, struct iio_dev *indio_dev)
{
struct iio_chan_spec *ch;
struct stm32_dfsdm_adc *adc = iio_priv(indio_dev);
struct stm32_dfsdm_channel *d_ch;
- int ret;
+ bool legacy = false;
+ int num_ch, ret;
+
+ /* If st,adc-channels is defined legacy binding is used. Else assume generic binding. */
+ num_ch = of_property_count_u32_elems(indio_dev->dev.of_node, "st,adc-channels");
+ if (num_ch == 1)
+ legacy = true;
ch = devm_kzalloc(&indio_dev->dev, sizeof(*ch), GFP_KERNEL);
if (!ch)
return -ENOMEM;
- ch->scan_index = 0;
+ indio_dev->num_channels = 1;
+ indio_dev->channels = ch;
+
+ if (legacy)
+ ret = stm32_dfsdm_chan_init(indio_dev, ch);
+ else
+ ret = stm32_dfsdm_generic_chan_init(indio_dev, ch);
- ret = stm32_dfsdm_adc_chan_init_one(indio_dev, ch);
if (ret < 0) {
dev_err(&indio_dev->dev, "Channels init failed\n");
return ret;
@@ -1420,9 +1625,6 @@ static int stm32_dfsdm_audio_init(struct device *dev, struct iio_dev *indio_dev)
if (d_ch->src != DFSDM_CHANNEL_SPI_CLOCK_EXTERNAL)
adc->spi_freq = adc->dfsdm->spi_master_freq;
- indio_dev->num_channels = 1;
- indio_dev->channels = ch;
-
return stm32_dfsdm_dma_request(dev, indio_dev);
}
@@ -1430,43 +1632,61 @@ static int stm32_dfsdm_adc_init(struct device *dev, struct iio_dev *indio_dev)
{
struct iio_chan_spec *ch;
struct stm32_dfsdm_adc *adc = iio_priv(indio_dev);
- int num_ch;
- int ret, chan_idx;
+ int num_ch, ret;
+ bool legacy = false;
adc->oversamp = DFSDM_DEFAULT_OVERSAMPLING;
ret = stm32_dfsdm_compute_all_osrs(indio_dev, adc->oversamp);
if (ret < 0)
return ret;
- num_ch = of_property_count_u32_elems(indio_dev->dev.of_node,
- "st,adc-channels");
- if (num_ch < 0 || num_ch > adc->dfsdm->num_chs) {
- dev_err(&indio_dev->dev, "Bad st,adc-channels\n");
- return num_ch < 0 ? num_ch : -EINVAL;
+ num_ch = device_get_child_node_count(&indio_dev->dev);
+ if (!num_ch) {
+ /* No channels nodes found. Assume legacy binding */
+ num_ch = of_property_count_u32_elems(indio_dev->dev.of_node, "st,adc-channels");
+ if (num_ch < 0) {
+ dev_err(&indio_dev->dev, "Bad st,adc-channels\n");
+ return num_ch;
+ }
+
+ legacy = true;
}
- /* Bind to SD modulator IIO device */
- adc->hwc = devm_iio_hw_consumer_alloc(&indio_dev->dev);
- if (IS_ERR(adc->hwc))
- return -EPROBE_DEFER;
+ if (num_ch > adc->dfsdm->num_chs) {
+ dev_err(&indio_dev->dev, "Number of channel [%d] exceeds [%d]\n",
+ num_ch, adc->dfsdm->num_chs);
+ return -EINVAL;
+ }
+ indio_dev->num_channels = num_ch;
- ch = devm_kcalloc(&indio_dev->dev, num_ch, sizeof(*ch),
- GFP_KERNEL);
- if (!ch)
- return -ENOMEM;
+ if (legacy) {
+ /* Bind to SD modulator IIO device. */
+ adc->hwc = devm_iio_hw_consumer_alloc(&indio_dev->dev);
+ if (IS_ERR(adc->hwc))
+ return dev_err_probe(&indio_dev->dev, -EPROBE_DEFER,
+ "waiting for SD modulator\n");
+ } else {
+ /* Generic binding. SD modulator IIO device not used. Use SD modulator backend. */
+ adc->hwc = NULL;
- for (chan_idx = 0; chan_idx < num_ch; chan_idx++) {
- ch[chan_idx].scan_index = chan_idx;
- ret = stm32_dfsdm_adc_chan_init_one(indio_dev, &ch[chan_idx]);
- if (ret < 0) {
- dev_err(&indio_dev->dev, "Channels init failed\n");
- return ret;
- }
+ adc->backend = devm_kcalloc(&indio_dev->dev, num_ch, sizeof(*adc->backend),
+ GFP_KERNEL);
+ if (!adc->backend)
+ return -ENOMEM;
}
- indio_dev->num_channels = num_ch;
+ ch = devm_kcalloc(&indio_dev->dev, num_ch, sizeof(*ch), GFP_KERNEL);
+ if (!ch)
+ return -ENOMEM;
indio_dev->channels = ch;
+ if (legacy)
+ ret = stm32_dfsdm_chan_init(indio_dev, ch);
+ else
+ ret = stm32_dfsdm_generic_chan_init(indio_dev, ch);
+ if (ret < 0)
+ return ret;
+
init_completion(&adc->completion);
/* Optionally request DMA */
@@ -1677,3 +1897,4 @@ module_platform_driver(stm32_dfsdm_adc_driver);
MODULE_DESCRIPTION("STM32 sigma delta ADC");
MODULE_AUTHOR("Arnaud Pouliquen <arnaud.pouliquen@st.com>");
MODULE_LICENSE("GPL v2");
+MODULE_IMPORT_NS(IIO_BACKEND);
diff --git a/drivers/iio/adc/stm32-dfsdm-core.c b/drivers/iio/adc/stm32-dfsdm-core.c
index a05d978b8cb8..bef59fcc0d80 100644
--- a/drivers/iio/adc/stm32-dfsdm-core.c
+++ b/drivers/iio/adc/stm32-dfsdm-core.c
@@ -299,7 +299,7 @@ static const struct of_device_id stm32_dfsdm_of_match[] = {
.compatible = "st,stm32mp1-dfsdm",
.data = &stm32mp1_dfsdm_data,
},
- {}
+ { }
};
MODULE_DEVICE_TABLE(of, stm32_dfsdm_of_match);
diff --git a/drivers/iio/adc/stmpe-adc.c b/drivers/iio/adc/stmpe-adc.c
index 8e56def1c9e5..b0add5a2eab5 100644
--- a/drivers/iio/adc/stmpe-adc.c
+++ b/drivers/iio/adc/stmpe-adc.c
@@ -347,7 +347,7 @@ static DEFINE_SIMPLE_DEV_PM_OPS(stmpe_adc_pm_ops, NULL, stmpe_adc_resume);
static const struct of_device_id stmpe_adc_ids[] = {
{ .compatible = "st,stmpe-adc", },
- { },
+ { }
};
MODULE_DEVICE_TABLE(of, stmpe_adc_ids);
diff --git a/drivers/iio/adc/ti-adc0832.c b/drivers/iio/adc/ti-adc0832.c
index b11ce555ba3b..e2dbd070c7c4 100644
--- a/drivers/iio/adc/ti-adc0832.c
+++ b/drivers/iio/adc/ti-adc0832.c
@@ -211,8 +211,7 @@ static irqreturn_t adc0832_trigger_handler(int irq, void *p)
mutex_lock(&adc->lock);
- for_each_set_bit(scan_index, indio_dev->active_scan_mask,
- indio_dev->masklength) {
+ iio_for_each_active_channel(indio_dev, scan_index) {
const struct iio_chan_spec *scan_chan =
&indio_dev->channels[scan_index];
int ret = adc0832_adc_conversion(adc, scan_chan->channel,
@@ -310,7 +309,7 @@ static const struct of_device_id adc0832_dt_ids[] = {
{ .compatible = "ti,adc0832", },
{ .compatible = "ti,adc0834", },
{ .compatible = "ti,adc0838", },
- {}
+ { }
};
MODULE_DEVICE_TABLE(of, adc0832_dt_ids);
@@ -319,7 +318,7 @@ static const struct spi_device_id adc0832_id[] = {
{ "adc0832", adc0832 },
{ "adc0834", adc0834 },
{ "adc0838", adc0838 },
- {}
+ { }
};
MODULE_DEVICE_TABLE(spi, adc0832_id);
diff --git a/drivers/iio/adc/ti-adc084s021.c b/drivers/iio/adc/ti-adc084s021.c
index 1f6e53832e06..bf98f9bf942a 100644
--- a/drivers/iio/adc/ti-adc084s021.c
+++ b/drivers/iio/adc/ti-adc084s021.c
@@ -166,8 +166,7 @@ static int adc084s021_buffer_preenable(struct iio_dev *indio_dev)
int scan_index;
int i = 0;
- for_each_set_bit(scan_index, indio_dev->active_scan_mask,
- indio_dev->masklength) {
+ iio_for_each_active_channel(indio_dev, scan_index) {
const struct iio_chan_spec *channel =
&indio_dev->channels[scan_index];
adc->tx_buf[i++] = channel->channel << 3;
@@ -243,13 +242,13 @@ static int adc084s021_probe(struct spi_device *spi)
static const struct of_device_id adc084s021_of_match[] = {
{ .compatible = "ti,adc084s021", },
- {},
+ { }
};
MODULE_DEVICE_TABLE(of, adc084s021_of_match);
static const struct spi_device_id adc084s021_id[] = {
{ ADC084S021_DRIVER_NAME, 0 },
- {}
+ { }
};
MODULE_DEVICE_TABLE(spi, adc084s021_id);
diff --git a/drivers/iio/adc/ti-adc12138.c b/drivers/iio/adc/ti-adc12138.c
index c0a72d72f3a9..7f065f457b36 100644
--- a/drivers/iio/adc/ti-adc12138.c
+++ b/drivers/iio/adc/ti-adc12138.c
@@ -344,8 +344,7 @@ static irqreturn_t adc12138_trigger_handler(int irq, void *p)
mutex_lock(&adc->lock);
- for_each_set_bit(scan_index, indio_dev->active_scan_mask,
- indio_dev->masklength) {
+ iio_for_each_active_channel(indio_dev, scan_index) {
const struct iio_chan_spec *scan_chan =
&indio_dev->channels[scan_index];
@@ -520,7 +519,7 @@ static const struct of_device_id adc12138_dt_ids[] = {
{ .compatible = "ti,adc12130", },
{ .compatible = "ti,adc12132", },
{ .compatible = "ti,adc12138", },
- {}
+ { }
};
MODULE_DEVICE_TABLE(of, adc12138_dt_ids);
@@ -528,7 +527,7 @@ static const struct spi_device_id adc12138_id[] = {
{ "adc12130", adc12130 },
{ "adc12132", adc12132 },
{ "adc12138", adc12138 },
- {}
+ { }
};
MODULE_DEVICE_TABLE(spi, adc12138_id);
diff --git a/drivers/iio/adc/ti-adc161s626.c b/drivers/iio/adc/ti-adc161s626.c
index f7c78d0dd449..474e733fb8e0 100644
--- a/drivers/iio/adc/ti-adc161s626.c
+++ b/drivers/iio/adc/ti-adc161s626.c
@@ -226,14 +226,14 @@ static int ti_adc_probe(struct spi_device *spi)
static const struct of_device_id ti_adc_dt_ids[] = {
{ .compatible = "ti,adc141s626", },
{ .compatible = "ti,adc161s626", },
- {}
+ { }
};
MODULE_DEVICE_TABLE(of, ti_adc_dt_ids);
static const struct spi_device_id ti_adc_id[] = {
- {"adc141s626", TI_ADC141S626},
- {"adc161s626", TI_ADC161S626},
- {},
+ { "adc141s626", TI_ADC141S626 },
+ { "adc161s626", TI_ADC161S626 },
+ { }
};
MODULE_DEVICE_TABLE(spi, ti_adc_id);
diff --git a/drivers/iio/adc/ti-ads1015.c b/drivers/iio/adc/ti-ads1015.c
index d3363d02f292..6d1bc9659946 100644
--- a/drivers/iio/adc/ti-ads1015.c
+++ b/drivers/iio/adc/ti-ads1015.c
@@ -456,7 +456,7 @@ static irqreturn_t ads1015_trigger_handler(int irq, void *p)
mutex_lock(&data->lock);
chan = find_first_bit(indio_dev->active_scan_mask,
- indio_dev->masklength);
+ iio_get_masklength(indio_dev));
ret = ads1015_get_adc_result(data, chan, &res);
if (ret < 0) {
mutex_unlock(&data->lock);
@@ -1173,7 +1173,7 @@ static const struct i2c_device_id ads1015_id[] = {
{ "ads1015", (kernel_ulong_t)&ads1015_data },
{ "ads1115", (kernel_ulong_t)&ads1115_data },
{ "tla2024", (kernel_ulong_t)&tla2024_data },
- {}
+ { }
};
MODULE_DEVICE_TABLE(i2c, ads1015_id);
@@ -1181,7 +1181,7 @@ static const struct of_device_id ads1015_of_match[] = {
{ .compatible = "ti,ads1015", .data = &ads1015_data },
{ .compatible = "ti,ads1115", .data = &ads1115_data },
{ .compatible = "ti,tla2024", .data = &tla2024_data },
- {}
+ { }
};
MODULE_DEVICE_TABLE(of, ads1015_of_match);
diff --git a/drivers/iio/adc/ti-ads1119.c b/drivers/iio/adc/ti-ads1119.c
index d649980479e4..1c7606375149 100644
--- a/drivers/iio/adc/ti-ads1119.c
+++ b/drivers/iio/adc/ti-ads1119.c
@@ -435,7 +435,7 @@ static int ads1119_triggered_buffer_preenable(struct iio_dev *indio_dev)
int ret;
index = find_first_bit(indio_dev->active_scan_mask,
- indio_dev->masklength);
+ iio_get_masklength(indio_dev));
ret = ads1119_set_conv_mode(st, true);
if (ret)
@@ -508,7 +508,7 @@ static irqreturn_t ads1119_trigger_handler(int irq, void *private)
if (!iio_trigger_using_own(indio_dev)) {
index = find_first_bit(indio_dev->active_scan_mask,
- indio_dev->masklength);
+ iio_get_masklength(indio_dev));
ret = ads1119_poll_data_ready(st, &indio_dev->channels[index]);
if (ret) {
diff --git a/drivers/iio/adc/ti-ads124s08.c b/drivers/iio/adc/ti-ads124s08.c
index 4ca62121f0d1..14941f384dad 100644
--- a/drivers/iio/adc/ti-ads124s08.c
+++ b/drivers/iio/adc/ti-ads124s08.c
@@ -279,8 +279,7 @@ static irqreturn_t ads124s_trigger_handler(int irq, void *p)
int scan_index, j = 0;
int ret;
- for_each_set_bit(scan_index, indio_dev->active_scan_mask,
- indio_dev->masklength) {
+ iio_for_each_active_channel(indio_dev, scan_index) {
ret = ads124s_write_reg(indio_dev, ADS124S08_INPUT_MUX,
scan_index);
if (ret)
@@ -358,7 +357,7 @@ MODULE_DEVICE_TABLE(spi, ads124s_id);
static const struct of_device_id ads124s_of_table[] = {
{ .compatible = "ti,ads124s06" },
{ .compatible = "ti,ads124s08" },
- { },
+ { }
};
MODULE_DEVICE_TABLE(of, ads124s_of_table);
diff --git a/drivers/iio/adc/ti-ads1298.c b/drivers/iio/adc/ti-ads1298.c
index 1d1eaba3d6d1..13cb32125eef 100644
--- a/drivers/iio/adc/ti-ads1298.c
+++ b/drivers/iio/adc/ti-ads1298.c
@@ -502,8 +502,7 @@ static void ads1298_rdata_complete(void *context)
}
/* Demux the channel data into our bounce buffer */
- for_each_set_bit(scan_index, indio_dev->active_scan_mask,
- indio_dev->masklength) {
+ iio_for_each_active_channel(indio_dev, scan_index) {
const struct iio_chan_spec *scan_chan =
&indio_dev->channels[scan_index];
const u8 *data = priv->rx_buffer + scan_chan->address;
diff --git a/drivers/iio/adc/ti-ads131e08.c b/drivers/iio/adc/ti-ads131e08.c
index 96dd9366f8ff..91a427eb0882 100644
--- a/drivers/iio/adc/ti-ads131e08.c
+++ b/drivers/iio/adc/ti-ads131e08.c
@@ -637,7 +637,7 @@ static irqreturn_t ads131e08_trigger_handler(int irq, void *private)
if (ret)
goto out;
- for_each_set_bit(chn, indio_dev->active_scan_mask, indio_dev->masklength) {
+ iio_for_each_active_channel(indio_dev, chn) {
src = st->rx_buf + ADS131E08_NUM_STATUS_BYTES + chn * num_bytes;
dest = st->tmp_buf.data + i * ADS131E08_NUM_STORAGE_BYTES;
@@ -918,7 +918,7 @@ static const struct of_device_id ads131e08_of_match[] = {
.data = &ads131e08_info_tbl[ads131e06], },
{ .compatible = "ti,ads131e08",
.data = &ads131e08_info_tbl[ads131e08], },
- {}
+ { }
};
MODULE_DEVICE_TABLE(of, ads131e08_of_match);
@@ -926,7 +926,7 @@ static const struct spi_device_id ads131e08_ids[] = {
{ "ads131e04", (kernel_ulong_t)&ads131e08_info_tbl[ads131e04] },
{ "ads131e06", (kernel_ulong_t)&ads131e08_info_tbl[ads131e06] },
{ "ads131e08", (kernel_ulong_t)&ads131e08_info_tbl[ads131e08] },
- {}
+ { }
};
MODULE_DEVICE_TABLE(spi, ads131e08_ids);
diff --git a/drivers/iio/adc/ti-ads7924.c b/drivers/iio/adc/ti-ads7924.c
index 4da78302359b..66b54c0d75aa 100644
--- a/drivers/iio/adc/ti-ads7924.c
+++ b/drivers/iio/adc/ti-ads7924.c
@@ -448,13 +448,13 @@ static int ads7924_probe(struct i2c_client *client)
static const struct i2c_device_id ads7924_id[] = {
{ "ads7924" },
- {}
+ { }
};
MODULE_DEVICE_TABLE(i2c, ads7924_id);
static const struct of_device_id ads7924_of_match[] = {
{ .compatible = "ti,ads7924", },
- {}
+ { }
};
MODULE_DEVICE_TABLE(of, ads7924_of_match);
diff --git a/drivers/iio/adc/ti-ads7950.c b/drivers/iio/adc/ti-ads7950.c
index 263fc3a1b87e..af28672aa803 100644
--- a/drivers/iio/adc/ti-ads7950.c
+++ b/drivers/iio/adc/ti-ads7950.c
@@ -705,7 +705,7 @@ static const struct of_device_id ads7950_of_table[] = {
{ .compatible = "ti,ads7959", .data = &ti_ads7950_chip_info[TI_ADS7959] },
{ .compatible = "ti,ads7960", .data = &ti_ads7950_chip_info[TI_ADS7960] },
{ .compatible = "ti,ads7961", .data = &ti_ads7950_chip_info[TI_ADS7961] },
- { },
+ { }
};
MODULE_DEVICE_TABLE(of, ads7950_of_table);
diff --git a/drivers/iio/adc/ti-ads8344.c b/drivers/iio/adc/ti-ads8344.c
index bbd85cb47f81..3bec8a2e61ab 100644
--- a/drivers/iio/adc/ti-ads8344.c
+++ b/drivers/iio/adc/ti-ads8344.c
@@ -175,7 +175,7 @@ static int ads8344_probe(struct spi_device *spi)
static const struct of_device_id ads8344_of_match[] = {
{ .compatible = "ti,ads8344", },
- {}
+ { }
};
MODULE_DEVICE_TABLE(of, ads8344_of_match);
diff --git a/drivers/iio/adc/ti-ads8688.c b/drivers/iio/adc/ti-ads8688.c
index 7a79f0cebfbf..9b1814f1965a 100644
--- a/drivers/iio/adc/ti-ads8688.c
+++ b/drivers/iio/adc/ti-ads8688.c
@@ -384,9 +384,7 @@ static irqreturn_t ads8688_trigger_handler(int irq, void *p)
u16 buffer[ADS8688_MAX_CHANNELS + sizeof(s64)/sizeof(u16)] __aligned(8);
int i, j = 0;
- for (i = 0; i < indio_dev->masklength; i++) {
- if (!test_bit(i, indio_dev->active_scan_mask))
- continue;
+ iio_for_each_active_channel(indio_dev, i) {
buffer[j] = ads8688_read(indio_dev, i);
j++;
}
@@ -454,9 +452,9 @@ static int ads8688_probe(struct spi_device *spi)
}
static const struct spi_device_id ads8688_id[] = {
- {"ads8684", ID_ADS8684},
- {"ads8688", ID_ADS8688},
- {}
+ { "ads8684", ID_ADS8684 },
+ { "ads8688", ID_ADS8688 },
+ { }
};
MODULE_DEVICE_TABLE(spi, ads8688_id);
diff --git a/drivers/iio/adc/ti-lmp92064.c b/drivers/iio/adc/ti-lmp92064.c
index 84ba5c4a0eea..169e3591320b 100644
--- a/drivers/iio/adc/ti-lmp92064.c
+++ b/drivers/iio/adc/ti-lmp92064.c
@@ -360,7 +360,7 @@ static int lmp92064_adc_probe(struct spi_device *spi)
static const struct spi_device_id lmp92064_id_table[] = {
{ "lmp92064" },
- {}
+ { }
};
MODULE_DEVICE_TABLE(spi, lmp92064_id_table);
diff --git a/drivers/iio/adc/ti-tlc4541.c b/drivers/iio/adc/ti-tlc4541.c
index 30f629a553a1..08de997584fd 100644
--- a/drivers/iio/adc/ti-tlc4541.c
+++ b/drivers/iio/adc/ti-tlc4541.c
@@ -237,14 +237,14 @@ static void tlc4541_remove(struct spi_device *spi)
static const struct of_device_id tlc4541_dt_ids[] = {
{ .compatible = "ti,tlc3541", },
{ .compatible = "ti,tlc4541", },
- {}
+ { }
};
MODULE_DEVICE_TABLE(of, tlc4541_dt_ids);
static const struct spi_device_id tlc4541_id[] = {
- {"tlc3541", TLC3541},
- {"tlc4541", TLC4541},
- {}
+ { "tlc3541", TLC3541 },
+ { "tlc4541", TLC4541 },
+ { }
};
MODULE_DEVICE_TABLE(spi, tlc4541_id);
diff --git a/drivers/iio/adc/ti-tsc2046.c b/drivers/iio/adc/ti-tsc2046.c
index edcef8f11522..311d97001249 100644
--- a/drivers/iio/adc/ti-tsc2046.c
+++ b/drivers/iio/adc/ti-tsc2046.c
@@ -6,6 +6,7 @@
*/
#include <linux/bitfield.h>
+#include <linux/cleanup.h>
#include <linux/delay.h>
#include <linux/module.h>
#include <linux/regulator/consumer.h>
@@ -141,7 +142,7 @@ enum tsc2046_state {
struct tsc2046_adc_priv {
struct spi_device *spi;
const struct tsc2046_adc_dcfg *dcfg;
- struct regulator *vref_reg;
+ bool internal_vref;
struct iio_trigger *trig;
struct hrtimer trig_timer;
@@ -257,7 +258,7 @@ static u8 tsc2046_adc_get_cmd(struct tsc2046_adc_priv *priv, int ch_idx,
case TI_TSC2046_ADDR_VBAT:
case TI_TSC2046_ADDR_TEMP0:
pd |= TI_TSC2046_SER;
- if (!priv->vref_reg)
+ if (priv->internal_vref)
pd |= TI_TSC2046_PD1_VREF_ON;
}
@@ -273,7 +274,6 @@ static int tsc2046_adc_read_one(struct tsc2046_adc_priv *priv, int ch_idx,
u32 *effective_speed_hz)
{
struct tsc2046_adc_ch_cfg *ch = &priv->ch_cfg[ch_idx];
- struct tsc2046_adc_atom *rx_buf, *tx_buf;
unsigned int val, val_normalized = 0;
int ret, i, count_skip = 0, max_count;
struct spi_transfer xfer;
@@ -287,18 +287,20 @@ static int tsc2046_adc_read_one(struct tsc2046_adc_priv *priv, int ch_idx,
max_count = 1;
}
- if (sizeof(*tx_buf) * max_count > PAGE_SIZE)
+ if (sizeof(struct tsc2046_adc_atom) * max_count > PAGE_SIZE)
return -ENOSPC;
- tx_buf = kcalloc(max_count, sizeof(*tx_buf), GFP_KERNEL);
+ struct tsc2046_adc_atom *tx_buf __free(kfree) = kcalloc(max_count,
+ sizeof(*tx_buf),
+ GFP_KERNEL);
if (!tx_buf)
return -ENOMEM;
- rx_buf = kcalloc(max_count, sizeof(*rx_buf), GFP_KERNEL);
- if (!rx_buf) {
- ret = -ENOMEM;
- goto free_tx;
- }
+ struct tsc2046_adc_atom *rx_buf __free(kfree) = kcalloc(max_count,
+ sizeof(*rx_buf),
+ GFP_KERNEL);
+ if (!rx_buf)
+ return -ENOMEM;
/*
* Do not enable automatic power down on working samples. Otherwise the
@@ -326,7 +328,7 @@ static int tsc2046_adc_read_one(struct tsc2046_adc_priv *priv, int ch_idx,
if (ret) {
dev_err_ratelimited(&priv->spi->dev, "SPI transfer failed %pe\n",
ERR_PTR(ret));
- goto free_bufs;
+ return ret;
}
if (effective_speed_hz)
@@ -337,14 +339,7 @@ static int tsc2046_adc_read_one(struct tsc2046_adc_priv *priv, int ch_idx,
val_normalized += val;
}
- ret = DIV_ROUND_UP(val_normalized, max_count - count_skip);
-
-free_bufs:
- kfree(rx_buf);
-free_tx:
- kfree(tx_buf);
-
- return ret;
+ return DIV_ROUND_UP(val_normalized, max_count - count_skip);
}
static size_t tsc2046_adc_group_set_layout(struct tsc2046_adc_priv *priv,
@@ -746,49 +741,6 @@ static void tsc2046_adc_parse_fwnode(struct tsc2046_adc_priv *priv)
}
}
-static void tsc2046_adc_regulator_disable(void *data)
-{
- struct tsc2046_adc_priv *priv = data;
-
- regulator_disable(priv->vref_reg);
-}
-
-static int tsc2046_adc_configure_regulator(struct tsc2046_adc_priv *priv)
-{
- struct device *dev = &priv->spi->dev;
- int ret;
-
- priv->vref_reg = devm_regulator_get_optional(dev, "vref");
- if (IS_ERR(priv->vref_reg)) {
- /* If regulator exists but can't be get, return an error */
- if (PTR_ERR(priv->vref_reg) != -ENODEV)
- return PTR_ERR(priv->vref_reg);
- priv->vref_reg = NULL;
- }
- if (!priv->vref_reg) {
- /* Use internal reference */
- priv->vref_mv = TI_TSC2046_INT_VREF;
- return 0;
- }
-
- ret = regulator_enable(priv->vref_reg);
- if (ret)
- return ret;
-
- ret = devm_add_action_or_reset(dev, tsc2046_adc_regulator_disable,
- priv);
- if (ret)
- return ret;
-
- ret = regulator_get_voltage(priv->vref_reg);
- if (ret < 0)
- return ret;
-
- priv->vref_mv = ret / MILLI;
-
- return 0;
-}
-
static int tsc2046_adc_probe(struct spi_device *spi)
{
const struct tsc2046_adc_dcfg *dcfg;
@@ -830,10 +782,13 @@ static int tsc2046_adc_probe(struct spi_device *spi)
indio_dev->num_channels = dcfg->num_channels;
indio_dev->info = &tsc2046_adc_info;
- ret = tsc2046_adc_configure_regulator(priv);
- if (ret)
+ ret = devm_regulator_get_enable_read_voltage(dev, "vref");
+ if (ret < 0 && ret != -ENODEV)
return ret;
+ priv->internal_vref = ret == -ENODEV;
+ priv->vref_mv = priv->internal_vref ? TI_TSC2046_INT_VREF : ret / MILLI;
+
tsc2046_adc_parse_fwnode(priv);
ret = tsc2046_adc_setup_spi_msg(priv);
diff --git a/drivers/iio/adc/vf610_adc.c b/drivers/iio/adc/vf610_adc.c
index e4548df3f8fb..5afd2feb8c3d 100644
--- a/drivers/iio/adc/vf610_adc.c
+++ b/drivers/iio/adc/vf610_adc.c
@@ -752,7 +752,7 @@ static int vf610_adc_buffer_postenable(struct iio_dev *indio_dev)
writel(val, info->regs + VF610_REG_ADC_GC);
channel = find_first_bit(indio_dev->active_scan_mask,
- indio_dev->masklength);
+ iio_get_masklength(indio_dev));
val = VF610_ADC_ADCHC(channel);
val |= VF610_ADC_AIEN;
diff --git a/drivers/iio/adc/xilinx-ams.c b/drivers/iio/adc/xilinx-ams.c
index f051358d6b50..ebc583b07e0c 100644
--- a/drivers/iio/adc/xilinx-ams.c
+++ b/drivers/iio/adc/xilinx-ams.c
@@ -1275,7 +1275,6 @@ static int ams_parse_firmware(struct iio_dev *indio_dev)
struct ams *ams = iio_priv(indio_dev);
struct iio_chan_spec *ams_channels, *dev_channels;
struct device *dev = indio_dev->dev.parent;
- struct fwnode_handle *child = NULL;
struct fwnode_handle *fwnode = dev_fwnode(dev);
size_t ams_size;
int ret, ch_cnt = 0, i, rising_off, falling_off;
@@ -1297,16 +1296,12 @@ static int ams_parse_firmware(struct iio_dev *indio_dev)
num_channels += ret;
}
- fwnode_for_each_child_node(fwnode, child) {
- if (fwnode_device_is_available(child)) {
- ret = ams_init_module(indio_dev, child, ams_channels + num_channels);
- if (ret < 0) {
- fwnode_handle_put(child);
- return ret;
- }
+ device_for_each_child_node_scoped(dev, child) {
+ ret = ams_init_module(indio_dev, child, ams_channels + num_channels);
+ if (ret < 0)
+ return ret;
- num_channels += ret;
- }
+ num_channels += ret;
}
for (i = 0; i < num_channels; i++) {
diff --git a/drivers/iio/adc/xilinx-xadc-core.c b/drivers/iio/adc/xilinx-xadc-core.c
index 564c0cad0fc7..cfbfcaefec0f 100644
--- a/drivers/iio/adc/xilinx-xadc-core.c
+++ b/drivers/iio/adc/xilinx-xadc-core.c
@@ -628,7 +628,7 @@ static int xadc_update_scan_mode(struct iio_dev *indio_dev,
size_t n;
void *data;
- n = bitmap_weight(mask, indio_dev->masklength);
+ n = bitmap_weight(mask, iio_get_masklength(indio_dev));
data = devm_krealloc_array(indio_dev->dev.parent, xadc->data,
n, sizeof(*xadc->data), GFP_KERNEL);
@@ -681,8 +681,7 @@ static irqreturn_t xadc_trigger_handler(int irq, void *p)
goto out;
j = 0;
- for_each_set_bit(i, indio_dev->active_scan_mask,
- indio_dev->masklength) {
+ iio_for_each_active_channel(indio_dev, i) {
chan = xadc_scan_index_to_channel(i);
xadc_read_adc_reg(xadc, chan, &xadc->data[j]);
j++;
diff --git a/drivers/iio/buffer/industrialio-buffer-cb.c b/drivers/iio/buffer/industrialio-buffer-cb.c
index 4c12b7a94af5..4befc9f55201 100644
--- a/drivers/iio/buffer/industrialio-buffer-cb.c
+++ b/drivers/iio/buffer/industrialio-buffer-cb.c
@@ -77,7 +77,7 @@ struct iio_cb_buffer *iio_channel_get_all_cb(struct device *dev,
}
cb_buff->indio_dev = cb_buff->channels[0].indio_dev;
- cb_buff->buffer.scan_mask = bitmap_zalloc(cb_buff->indio_dev->masklength,
+ cb_buff->buffer.scan_mask = bitmap_zalloc(iio_get_masklength(cb_buff->indio_dev),
GFP_KERNEL);
if (cb_buff->buffer.scan_mask == NULL) {
ret = -ENOMEM;
diff --git a/drivers/iio/buffer/industrialio-buffer-dma.c b/drivers/iio/buffer/industrialio-buffer-dma.c
index 647f417a045e..dbde1443d6ed 100644
--- a/drivers/iio/buffer/industrialio-buffer-dma.c
+++ b/drivers/iio/buffer/industrialio-buffer-dma.c
@@ -248,7 +248,7 @@ void iio_dma_buffer_block_done(struct iio_dma_buffer_block *block)
iio_dma_buffer_queue_wake(queue);
dma_fence_end_signalling(cookie);
}
-EXPORT_SYMBOL_GPL(iio_dma_buffer_block_done);
+EXPORT_SYMBOL_NS_GPL(iio_dma_buffer_block_done, IIO_DMA_BUFFER);
/**
* iio_dma_buffer_block_list_abort() - Indicate that a list block has been
@@ -287,7 +287,7 @@ void iio_dma_buffer_block_list_abort(struct iio_dma_buffer_queue *queue,
iio_dma_buffer_queue_wake(queue);
dma_fence_end_signalling(cookie);
}
-EXPORT_SYMBOL_GPL(iio_dma_buffer_block_list_abort);
+EXPORT_SYMBOL_NS_GPL(iio_dma_buffer_block_list_abort, IIO_DMA_BUFFER);
static bool iio_dma_block_reusable(struct iio_dma_buffer_block *block)
{
@@ -420,7 +420,7 @@ out_unlock:
return ret;
}
-EXPORT_SYMBOL_GPL(iio_dma_buffer_request_update);
+EXPORT_SYMBOL_NS_GPL(iio_dma_buffer_request_update, IIO_DMA_BUFFER);
static void iio_dma_buffer_fileio_free(struct iio_dma_buffer_queue *queue)
{
@@ -506,7 +506,7 @@ int iio_dma_buffer_enable(struct iio_buffer *buffer,
return 0;
}
-EXPORT_SYMBOL_GPL(iio_dma_buffer_enable);
+EXPORT_SYMBOL_NS_GPL(iio_dma_buffer_enable, IIO_DMA_BUFFER);
/**
* iio_dma_buffer_disable() - Disable DMA buffer
@@ -530,7 +530,7 @@ int iio_dma_buffer_disable(struct iio_buffer *buffer,
return 0;
}
-EXPORT_SYMBOL_GPL(iio_dma_buffer_disable);
+EXPORT_SYMBOL_NS_GPL(iio_dma_buffer_disable, IIO_DMA_BUFFER);
static void iio_dma_buffer_enqueue(struct iio_dma_buffer_queue *queue,
struct iio_dma_buffer_block *block)
@@ -636,7 +636,7 @@ int iio_dma_buffer_read(struct iio_buffer *buffer, size_t n,
{
return iio_dma_buffer_io(buffer, n, user_buffer, false);
}
-EXPORT_SYMBOL_GPL(iio_dma_buffer_read);
+EXPORT_SYMBOL_NS_GPL(iio_dma_buffer_read, IIO_DMA_BUFFER);
/**
* iio_dma_buffer_write() - DMA buffer write callback
@@ -653,7 +653,7 @@ int iio_dma_buffer_write(struct iio_buffer *buffer, size_t n,
return iio_dma_buffer_io(buffer, n,
(__force __user char *)user_buffer, true);
}
-EXPORT_SYMBOL_GPL(iio_dma_buffer_write);
+EXPORT_SYMBOL_NS_GPL(iio_dma_buffer_write, IIO_DMA_BUFFER);
/**
* iio_dma_buffer_usage() - DMA buffer data_available and
@@ -696,7 +696,7 @@ size_t iio_dma_buffer_usage(struct iio_buffer *buf)
return data_available;
}
-EXPORT_SYMBOL_GPL(iio_dma_buffer_usage);
+EXPORT_SYMBOL_NS_GPL(iio_dma_buffer_usage, IIO_DMA_BUFFER);
struct iio_dma_buffer_block *
iio_dma_buffer_attach_dmabuf(struct iio_buffer *buffer,
@@ -723,7 +723,7 @@ iio_dma_buffer_attach_dmabuf(struct iio_buffer *buffer,
return block;
}
-EXPORT_SYMBOL_GPL(iio_dma_buffer_attach_dmabuf);
+EXPORT_SYMBOL_NS_GPL(iio_dma_buffer_attach_dmabuf, IIO_DMA_BUFFER);
void iio_dma_buffer_detach_dmabuf(struct iio_buffer *buffer,
struct iio_dma_buffer_block *block)
@@ -731,7 +731,7 @@ void iio_dma_buffer_detach_dmabuf(struct iio_buffer *buffer,
block->state = IIO_BLOCK_STATE_DEAD;
iio_buffer_block_put_atomic(block);
}
-EXPORT_SYMBOL_GPL(iio_dma_buffer_detach_dmabuf);
+EXPORT_SYMBOL_NS_GPL(iio_dma_buffer_detach_dmabuf, IIO_DMA_BUFFER);
static int iio_dma_can_enqueue_block(struct iio_dma_buffer_block *block)
{
@@ -784,7 +784,7 @@ out_end_signalling:
return ret;
}
-EXPORT_SYMBOL_GPL(iio_dma_buffer_enqueue_dmabuf);
+EXPORT_SYMBOL_NS_GPL(iio_dma_buffer_enqueue_dmabuf, IIO_DMA_BUFFER);
void iio_dma_buffer_lock_queue(struct iio_buffer *buffer)
{
@@ -792,7 +792,7 @@ void iio_dma_buffer_lock_queue(struct iio_buffer *buffer)
mutex_lock(&queue->lock);
}
-EXPORT_SYMBOL_GPL(iio_dma_buffer_lock_queue);
+EXPORT_SYMBOL_NS_GPL(iio_dma_buffer_lock_queue, IIO_DMA_BUFFER);
void iio_dma_buffer_unlock_queue(struct iio_buffer *buffer)
{
@@ -800,7 +800,7 @@ void iio_dma_buffer_unlock_queue(struct iio_buffer *buffer)
mutex_unlock(&queue->lock);
}
-EXPORT_SYMBOL_GPL(iio_dma_buffer_unlock_queue);
+EXPORT_SYMBOL_NS_GPL(iio_dma_buffer_unlock_queue, IIO_DMA_BUFFER);
/**
* iio_dma_buffer_set_bytes_per_datum() - DMA buffer set_bytes_per_datum callback
@@ -816,7 +816,7 @@ int iio_dma_buffer_set_bytes_per_datum(struct iio_buffer *buffer, size_t bpd)
return 0;
}
-EXPORT_SYMBOL_GPL(iio_dma_buffer_set_bytes_per_datum);
+EXPORT_SYMBOL_NS_GPL(iio_dma_buffer_set_bytes_per_datum, IIO_DMA_BUFFER);
/**
* iio_dma_buffer_set_length - DMA buffer set_length callback
@@ -836,7 +836,7 @@ int iio_dma_buffer_set_length(struct iio_buffer *buffer, unsigned int length)
return 0;
}
-EXPORT_SYMBOL_GPL(iio_dma_buffer_set_length);
+EXPORT_SYMBOL_NS_GPL(iio_dma_buffer_set_length, IIO_DMA_BUFFER);
/**
* iio_dma_buffer_init() - Initialize DMA buffer queue
@@ -864,7 +864,7 @@ int iio_dma_buffer_init(struct iio_dma_buffer_queue *queue,
return 0;
}
-EXPORT_SYMBOL_GPL(iio_dma_buffer_init);
+EXPORT_SYMBOL_NS_GPL(iio_dma_buffer_init, IIO_DMA_BUFFER);
/**
* iio_dma_buffer_exit() - Cleanup DMA buffer queue
@@ -882,7 +882,7 @@ void iio_dma_buffer_exit(struct iio_dma_buffer_queue *queue)
mutex_unlock(&queue->lock);
}
-EXPORT_SYMBOL_GPL(iio_dma_buffer_exit);
+EXPORT_SYMBOL_NS_GPL(iio_dma_buffer_exit, IIO_DMA_BUFFER);
/**
* iio_dma_buffer_release() - Release final buffer resources
@@ -896,7 +896,7 @@ void iio_dma_buffer_release(struct iio_dma_buffer_queue *queue)
{
mutex_destroy(&queue->lock);
}
-EXPORT_SYMBOL_GPL(iio_dma_buffer_release);
+EXPORT_SYMBOL_NS_GPL(iio_dma_buffer_release, IIO_DMA_BUFFER);
MODULE_AUTHOR("Lars-Peter Clausen <lars@metafoo.de>");
MODULE_DESCRIPTION("DMA buffer for the IIO framework");
diff --git a/drivers/iio/buffer/industrialio-buffer-dmaengine.c b/drivers/iio/buffer/industrialio-buffer-dmaengine.c
index 426cc614587a..19af1caf14cd 100644
--- a/drivers/iio/buffer/industrialio-buffer-dmaengine.c
+++ b/drivers/iio/buffer/industrialio-buffer-dmaengine.c
@@ -350,3 +350,4 @@ EXPORT_SYMBOL_NS_GPL(devm_iio_dmaengine_buffer_setup_ext, IIO_DMAENGINE_BUFFER);
MODULE_AUTHOR("Lars-Peter Clausen <lars@metafoo.de>");
MODULE_DESCRIPTION("DMA buffer for the IIO framework");
MODULE_LICENSE("GPL");
+MODULE_IMPORT_NS(IIO_DMA_BUFFER);
diff --git a/drivers/iio/buffer/industrialio-hw-consumer.c b/drivers/iio/buffer/industrialio-hw-consumer.c
index fb58f599a80b..526b2a8d725d 100644
--- a/drivers/iio/buffer/industrialio-hw-consumer.c
+++ b/drivers/iio/buffer/industrialio-hw-consumer.c
@@ -52,6 +52,7 @@ static const struct iio_buffer_access_funcs iio_hw_buf_access = {
static struct hw_consumer_buffer *iio_hw_consumer_get_buffer(
struct iio_hw_consumer *hwc, struct iio_dev *indio_dev)
{
+ unsigned int mask_longs = BITS_TO_LONGS(iio_get_masklength(indio_dev));
struct hw_consumer_buffer *buf;
list_for_each_entry(buf, &hwc->buffers, head) {
@@ -59,8 +60,7 @@ static struct hw_consumer_buffer *iio_hw_consumer_get_buffer(
return buf;
}
- buf = kzalloc(struct_size(buf, scan_mask, BITS_TO_LONGS(indio_dev->masklength)),
- GFP_KERNEL);
+ buf = kzalloc(struct_size(buf, scan_mask, mask_longs), GFP_KERNEL);
if (!buf)
return NULL;
diff --git a/drivers/iio/chemical/bme680.h b/drivers/iio/chemical/bme680.h
index f959252a4fe6..b2c547ac8d34 100644
--- a/drivers/iio/chemical/bme680.h
+++ b/drivers/iio/chemical/bme680.h
@@ -12,7 +12,7 @@
#define BME680_REG_TEMP_MSB 0x22
#define BME680_REG_PRESS_MSB 0x1F
-#define BM6880_REG_HUMIDITY_MSB 0x25
+#define BME680_REG_HUMIDITY_MSB 0x25
#define BME680_REG_GAS_MSB 0x2A
#define BME680_REG_GAS_R_LSB 0x2B
#define BME680_GAS_STAB_BIT BIT(4)
@@ -39,14 +39,12 @@
#define BME680_HUM_REG_SHIFT_VAL 4
#define BME680_BIT_H1_DATA_MASK GENMASK(3, 0)
-#define BME680_REG_RES_HEAT_RANGE 0x02
#define BME680_RHRANGE_MASK GENMASK(5, 4)
#define BME680_REG_RES_HEAT_VAL 0x00
-#define BME680_REG_RANGE_SW_ERR 0x04
#define BME680_RSERROR_MASK GENMASK(7, 4)
#define BME680_REG_RES_HEAT_0 0x5A
#define BME680_REG_GAS_WAIT_0 0x64
-#define BME680_ADC_GAS_RES_SHIFT 6
+#define BME680_ADC_GAS_RES GENMASK(15, 6)
#define BME680_AMB_TEMP 25
#define BME680_REG_CTRL_GAS_1 0x71
@@ -58,33 +56,24 @@
#define BME680_GAS_MEAS_BIT BIT(6)
#define BME680_MEAS_BIT BIT(5)
+#define BME680_TEMP_NUM_BYTES 3
+#define BME680_PRESS_NUM_BYTES 3
+#define BME680_HUMID_NUM_BYTES 2
+#define BME680_GAS_NUM_BYTES 2
+
+#define BME680_MEAS_TRIM_MASK GENMASK(24, 4)
+
+#define BME680_STARTUP_TIME_US 5000
+
/* Calibration Parameters */
#define BME680_T2_LSB_REG 0x8A
-#define BME680_T3_REG 0x8C
-#define BME680_P1_LSB_REG 0x8E
-#define BME680_P2_LSB_REG 0x90
-#define BME680_P3_REG 0x92
-#define BME680_P4_LSB_REG 0x94
-#define BME680_P5_LSB_REG 0x96
-#define BME680_P7_REG 0x98
-#define BME680_P6_REG 0x99
-#define BME680_P8_LSB_REG 0x9C
-#define BME680_P9_LSB_REG 0x9E
-#define BME680_P10_REG 0xA0
-#define BME680_H2_LSB_REG 0xE2
#define BME680_H2_MSB_REG 0xE1
-#define BME680_H1_MSB_REG 0xE3
-#define BME680_H1_LSB_REG 0xE2
-#define BME680_H3_REG 0xE4
-#define BME680_H4_REG 0xE5
-#define BME680_H5_REG 0xE6
-#define BME680_H6_REG 0xE7
-#define BME680_H7_REG 0xE8
-#define BME680_T1_LSB_REG 0xE9
-#define BME680_GH2_LSB_REG 0xEB
-#define BME680_GH1_REG 0xED
#define BME680_GH3_REG 0xEE
+#define BME680_CALIB_RANGE_1_LEN 23
+#define BME680_CALIB_RANGE_2_LEN 14
+#define BME680_CALIB_RANGE_3_LEN 5
+
extern const struct regmap_config bme680_regmap_config;
int bme680_core_probe(struct device *dev, struct regmap *regmap,
diff --git a/drivers/iio/chemical/bme680_core.c b/drivers/iio/chemical/bme680_core.c
index 500f56834b01..5d2e750ca2b9 100644
--- a/drivers/iio/chemical/bme680_core.c
+++ b/drivers/iio/chemical/bme680_core.c
@@ -8,18 +8,64 @@
* Datasheet:
* https://ae-bst.resource.bosch.com/media/_tech/media/datasheets/BST-BME680-DS001-00.pdf
*/
-#include <linux/acpi.h>
#include <linux/bitfield.h>
+#include <linux/cleanup.h>
#include <linux/delay.h>
#include <linux/device.h>
-#include <linux/module.h>
#include <linux/log2.h>
+#include <linux/module.h>
#include <linux/regmap.h>
+
#include <linux/iio/iio.h>
#include <linux/iio/sysfs.h>
+#include <asm/unaligned.h>
+
#include "bme680.h"
+/* 1st set of calibration data */
+enum {
+ /* Temperature calib indexes */
+ T2_LSB = 0,
+ T3 = 2,
+ /* Pressure calib indexes */
+ P1_LSB = 4,
+ P2_LSB = 6,
+ P3 = 8,
+ P4_LSB = 10,
+ P5_LSB = 12,
+ P7 = 14,
+ P6 = 15,
+ P8_LSB = 18,
+ P9_LSB = 20,
+ P10 = 22,
+};
+
+/* 2nd set of calibration data */
+enum {
+ /* Humidity calib indexes */
+ H2_MSB = 0,
+ H1_LSB = 1,
+ H3 = 3,
+ H4 = 4,
+ H5 = 5,
+ H6 = 6,
+ H7 = 7,
+ /* Stray T1 calib index */
+ T1_LSB = 8,
+ /* Gas heater calib indexes */
+ GH2_LSB = 10,
+ GH1 = 12,
+ GH3 = 13,
+};
+
+/* 3rd set of calibration data */
+enum {
+ RES_HEAT_VAL = 0,
+ RES_HEAT_RANGE = 2,
+ RANGE_SW_ERR = 4,
+};
+
struct bme680_calib {
u16 par_t1;
s16 par_t2;
@@ -52,16 +98,21 @@ struct bme680_calib {
struct bme680_data {
struct regmap *regmap;
struct bme680_calib bme680;
+ struct mutex lock; /* Protect multiple serial R/W ops to device. */
u8 oversampling_temp;
u8 oversampling_press;
u8 oversampling_humid;
u16 heater_dur;
u16 heater_temp;
- /*
- * Carryover value from temperature conversion, used in pressure
- * and humidity compensation calculations.
- */
- s32 t_fine;
+
+ union {
+ u8 buf[3];
+ unsigned int check;
+ __be16 be16;
+ u8 bme680_cal_buf_1[BME680_CALIB_RANGE_1_LEN];
+ u8 bme680_cal_buf_2[BME680_CALIB_RANGE_2_LEN];
+ u8 bme680_cal_buf_3[BME680_CALIB_RANGE_3_LEN];
+ };
};
static const struct regmap_range bme680_volatile_ranges[] = {
@@ -110,217 +161,98 @@ static int bme680_read_calib(struct bme680_data *data,
struct bme680_calib *calib)
{
struct device *dev = regmap_get_device(data->regmap);
- unsigned int tmp, tmp_msb, tmp_lsb;
+ unsigned int tmp_msb, tmp_lsb;
int ret;
- __le16 buf;
-
- /* Temperature related coefficients */
- ret = regmap_bulk_read(data->regmap, BME680_T1_LSB_REG,
- &buf, sizeof(buf));
- if (ret < 0) {
- dev_err(dev, "failed to read BME680_T1_LSB_REG\n");
- return ret;
- }
- calib->par_t1 = le16_to_cpu(buf);
ret = regmap_bulk_read(data->regmap, BME680_T2_LSB_REG,
- &buf, sizeof(buf));
+ data->bme680_cal_buf_1,
+ sizeof(data->bme680_cal_buf_1));
if (ret < 0) {
- dev_err(dev, "failed to read BME680_T2_LSB_REG\n");
+ dev_err(dev, "failed to read 1st set of calib data;\n");
return ret;
}
- calib->par_t2 = le16_to_cpu(buf);
- ret = regmap_read(data->regmap, BME680_T3_REG, &tmp);
- if (ret < 0) {
- dev_err(dev, "failed to read BME680_T3_REG\n");
- return ret;
- }
- calib->par_t3 = tmp;
-
- /* Pressure related coefficients */
- ret = regmap_bulk_read(data->regmap, BME680_P1_LSB_REG,
- &buf, sizeof(buf));
- if (ret < 0) {
- dev_err(dev, "failed to read BME680_P1_LSB_REG\n");
- return ret;
- }
- calib->par_p1 = le16_to_cpu(buf);
-
- ret = regmap_bulk_read(data->regmap, BME680_P2_LSB_REG,
- &buf, sizeof(buf));
- if (ret < 0) {
- dev_err(dev, "failed to read BME680_P2_LSB_REG\n");
- return ret;
- }
- calib->par_p2 = le16_to_cpu(buf);
-
- ret = regmap_read(data->regmap, BME680_P3_REG, &tmp);
- if (ret < 0) {
- dev_err(dev, "failed to read BME680_P3_REG\n");
- return ret;
- }
- calib->par_p3 = tmp;
-
- ret = regmap_bulk_read(data->regmap, BME680_P4_LSB_REG,
- &buf, sizeof(buf));
- if (ret < 0) {
- dev_err(dev, "failed to read BME680_P4_LSB_REG\n");
- return ret;
- }
- calib->par_p4 = le16_to_cpu(buf);
+ calib->par_t2 = get_unaligned_le16(&data->bme680_cal_buf_1[T2_LSB]);
+ calib->par_t3 = data->bme680_cal_buf_1[T3];
+ calib->par_p1 = get_unaligned_le16(&data->bme680_cal_buf_1[P1_LSB]);
+ calib->par_p2 = get_unaligned_le16(&data->bme680_cal_buf_1[P2_LSB]);
+ calib->par_p3 = data->bme680_cal_buf_1[P3];
+ calib->par_p4 = get_unaligned_le16(&data->bme680_cal_buf_1[P4_LSB]);
+ calib->par_p5 = get_unaligned_le16(&data->bme680_cal_buf_1[P5_LSB]);
+ calib->par_p7 = data->bme680_cal_buf_1[P7];
+ calib->par_p6 = data->bme680_cal_buf_1[P6];
+ calib->par_p8 = get_unaligned_le16(&data->bme680_cal_buf_1[P8_LSB]);
+ calib->par_p9 = get_unaligned_le16(&data->bme680_cal_buf_1[P9_LSB]);
+ calib->par_p10 = data->bme680_cal_buf_1[P10];
- ret = regmap_bulk_read(data->regmap, BME680_P5_LSB_REG,
- &buf, sizeof(buf));
+ ret = regmap_bulk_read(data->regmap, BME680_H2_MSB_REG,
+ data->bme680_cal_buf_2,
+ sizeof(data->bme680_cal_buf_2));
if (ret < 0) {
- dev_err(dev, "failed to read BME680_P5_LSB_REG\n");
+ dev_err(dev, "failed to read 2nd set of calib data;\n");
return ret;
}
- calib->par_p5 = le16_to_cpu(buf);
- ret = regmap_read(data->regmap, BME680_P6_REG, &tmp);
- if (ret < 0) {
- dev_err(dev, "failed to read BME680_P6_REG\n");
- return ret;
- }
- calib->par_p6 = tmp;
-
- ret = regmap_read(data->regmap, BME680_P7_REG, &tmp);
- if (ret < 0) {
- dev_err(dev, "failed to read BME680_P7_REG\n");
- return ret;
- }
- calib->par_p7 = tmp;
-
- ret = regmap_bulk_read(data->regmap, BME680_P8_LSB_REG,
- &buf, sizeof(buf));
- if (ret < 0) {
- dev_err(dev, "failed to read BME680_P8_LSB_REG\n");
- return ret;
- }
- calib->par_p8 = le16_to_cpu(buf);
-
- ret = regmap_bulk_read(data->regmap, BME680_P9_LSB_REG,
- &buf, sizeof(buf));
- if (ret < 0) {
- dev_err(dev, "failed to read BME680_P9_LSB_REG\n");
- return ret;
- }
- calib->par_p9 = le16_to_cpu(buf);
-
- ret = regmap_read(data->regmap, BME680_P10_REG, &tmp);
- if (ret < 0) {
- dev_err(dev, "failed to read BME680_P10_REG\n");
- return ret;
- }
- calib->par_p10 = tmp;
-
- /* Humidity related coefficients */
- ret = regmap_read(data->regmap, BME680_H1_MSB_REG, &tmp_msb);
- if (ret < 0) {
- dev_err(dev, "failed to read BME680_H1_MSB_REG\n");
- return ret;
- }
- ret = regmap_read(data->regmap, BME680_H1_LSB_REG, &tmp_lsb);
- if (ret < 0) {
- dev_err(dev, "failed to read BME680_H1_LSB_REG\n");
- return ret;
- }
+ tmp_lsb = data->bme680_cal_buf_2[H1_LSB];
+ tmp_msb = data->bme680_cal_buf_2[H1_LSB + 1];
calib->par_h1 = (tmp_msb << BME680_HUM_REG_SHIFT_VAL) |
(tmp_lsb & BME680_BIT_H1_DATA_MASK);
- ret = regmap_read(data->regmap, BME680_H2_MSB_REG, &tmp_msb);
- if (ret < 0) {
- dev_err(dev, "failed to read BME680_H2_MSB_REG\n");
- return ret;
- }
- ret = regmap_read(data->regmap, BME680_H2_LSB_REG, &tmp_lsb);
- if (ret < 0) {
- dev_err(dev, "failed to read BME680_H2_LSB_REG\n");
- return ret;
- }
+ tmp_msb = data->bme680_cal_buf_2[H2_MSB];
+ tmp_lsb = data->bme680_cal_buf_2[H2_MSB + 1];
calib->par_h2 = (tmp_msb << BME680_HUM_REG_SHIFT_VAL) |
(tmp_lsb >> BME680_HUM_REG_SHIFT_VAL);
- ret = regmap_read(data->regmap, BME680_H3_REG, &tmp);
- if (ret < 0) {
- dev_err(dev, "failed to read BME680_H3_REG\n");
- return ret;
- }
- calib->par_h3 = tmp;
-
- ret = regmap_read(data->regmap, BME680_H4_REG, &tmp);
- if (ret < 0) {
- dev_err(dev, "failed to read BME680_H4_REG\n");
- return ret;
- }
- calib->par_h4 = tmp;
+ calib->par_h3 = data->bme680_cal_buf_2[H3];
+ calib->par_h4 = data->bme680_cal_buf_2[H4];
+ calib->par_h5 = data->bme680_cal_buf_2[H5];
+ calib->par_h6 = data->bme680_cal_buf_2[H6];
+ calib->par_h7 = data->bme680_cal_buf_2[H7];
+ calib->par_t1 = get_unaligned_le16(&data->bme680_cal_buf_2[T1_LSB]);
+ calib->par_gh2 = get_unaligned_le16(&data->bme680_cal_buf_2[GH2_LSB]);
+ calib->par_gh1 = data->bme680_cal_buf_2[GH1];
+ calib->par_gh3 = data->bme680_cal_buf_2[GH3];
- ret = regmap_read(data->regmap, BME680_H5_REG, &tmp);
+ ret = regmap_bulk_read(data->regmap, BME680_REG_RES_HEAT_VAL,
+ data->bme680_cal_buf_3,
+ sizeof(data->bme680_cal_buf_3));
if (ret < 0) {
- dev_err(dev, "failed to read BME680_H5_REG\n");
+ dev_err(dev, "failed to read 3rd set of calib data;\n");
return ret;
}
- calib->par_h5 = tmp;
- ret = regmap_read(data->regmap, BME680_H6_REG, &tmp);
- if (ret < 0) {
- dev_err(dev, "failed to read BME680_H6_REG\n");
- return ret;
- }
- calib->par_h6 = tmp;
+ calib->res_heat_val = data->bme680_cal_buf_3[RES_HEAT_VAL];
- ret = regmap_read(data->regmap, BME680_H7_REG, &tmp);
- if (ret < 0) {
- dev_err(dev, "failed to read BME680_H7_REG\n");
- return ret;
- }
- calib->par_h7 = tmp;
+ calib->res_heat_range = FIELD_GET(BME680_RHRANGE_MASK,
+ data->bme680_cal_buf_3[RES_HEAT_RANGE]);
- /* Gas heater related coefficients */
- ret = regmap_read(data->regmap, BME680_GH1_REG, &tmp);
- if (ret < 0) {
- dev_err(dev, "failed to read BME680_GH1_REG\n");
- return ret;
- }
- calib->par_gh1 = tmp;
+ calib->range_sw_err = FIELD_GET(BME680_RSERROR_MASK,
+ data->bme680_cal_buf_3[RANGE_SW_ERR]);
- ret = regmap_bulk_read(data->regmap, BME680_GH2_LSB_REG,
- &buf, sizeof(buf));
- if (ret < 0) {
- dev_err(dev, "failed to read BME680_GH2_LSB_REG\n");
- return ret;
- }
- calib->par_gh2 = le16_to_cpu(buf);
-
- ret = regmap_read(data->regmap, BME680_GH3_REG, &tmp);
- if (ret < 0) {
- dev_err(dev, "failed to read BME680_GH3_REG\n");
- return ret;
- }
- calib->par_gh3 = tmp;
+ return 0;
+}
- /* Other coefficients */
- ret = regmap_read(data->regmap, BME680_REG_RES_HEAT_RANGE, &tmp);
- if (ret < 0) {
- dev_err(dev, "failed to read resistance heat range\n");
- return ret;
- }
- calib->res_heat_range = FIELD_GET(BME680_RHRANGE_MASK, tmp);
+static int bme680_read_temp_adc(struct bme680_data *data, u32 *adc_temp)
+{
+ struct device *dev = regmap_get_device(data->regmap);
+ u32 value_temp;
+ int ret;
- ret = regmap_read(data->regmap, BME680_REG_RES_HEAT_VAL, &tmp);
+ ret = regmap_bulk_read(data->regmap, BME680_REG_TEMP_MSB,
+ data->buf, BME680_TEMP_NUM_BYTES);
if (ret < 0) {
- dev_err(dev, "failed to read resistance heat value\n");
+ dev_err(dev, "failed to read temperature\n");
return ret;
}
- calib->res_heat_val = tmp;
- ret = regmap_read(data->regmap, BME680_REG_RANGE_SW_ERR, &tmp);
- if (ret < 0) {
- dev_err(dev, "failed to read range software error\n");
- return ret;
+ value_temp = FIELD_GET(BME680_MEAS_TRIM_MASK,
+ get_unaligned_be24(data->buf));
+ if (value_temp == BME680_MEAS_SKIPPED) {
+ /* reading was skipped */
+ dev_err(dev, "reading temperature skipped\n");
+ return -EINVAL;
}
- calib->range_sw_err = FIELD_GET(BME680_RSERROR_MASK, tmp);
+ *adc_temp = value_temp;
return 0;
}
@@ -332,25 +264,65 @@ static int bme680_read_calib(struct bme680_data *data,
* Returns temperature measurement in DegC, resolutions is 0.01 DegC. Therefore,
* output value of "3233" represents 32.33 DegC.
*/
-static s16 bme680_compensate_temp(struct bme680_data *data,
- s32 adc_temp)
+static s32 bme680_calc_t_fine(struct bme680_data *data, u32 adc_temp)
{
struct bme680_calib *calib = &data->bme680;
s64 var1, var2, var3;
- s16 calc_temp;
/* If the calibration is invalid, attempt to reload it */
if (!calib->par_t2)
bme680_read_calib(data, calib);
- var1 = (adc_temp >> 3) - ((s32)calib->par_t1 << 1);
+ var1 = ((s32)adc_temp >> 3) - ((s32)calib->par_t1 << 1);
var2 = (var1 * calib->par_t2) >> 11;
var3 = ((var1 >> 1) * (var1 >> 1)) >> 12;
var3 = (var3 * ((s32)calib->par_t3 << 4)) >> 14;
- data->t_fine = var2 + var3;
- calc_temp = (data->t_fine * 5 + 128) >> 8;
+ return var2 + var3; /* t_fine = var2 + var3 */
+}
- return calc_temp;
+static int bme680_get_t_fine(struct bme680_data *data, s32 *t_fine)
+{
+ u32 adc_temp;
+ int ret;
+
+ ret = bme680_read_temp_adc(data, &adc_temp);
+ if (ret)
+ return ret;
+
+ *t_fine = bme680_calc_t_fine(data, adc_temp);
+
+ return 0;
+}
+
+static s16 bme680_compensate_temp(struct bme680_data *data,
+ u32 adc_temp)
+{
+ return (bme680_calc_t_fine(data, adc_temp) * 5 + 128) / 256;
+}
+
+static int bme680_read_press_adc(struct bme680_data *data, u32 *adc_press)
+{
+ struct device *dev = regmap_get_device(data->regmap);
+ u32 value_press;
+ int ret;
+
+ ret = regmap_bulk_read(data->regmap, BME680_REG_PRESS_MSB,
+ data->buf, BME680_PRESS_NUM_BYTES);
+ if (ret < 0) {
+ dev_err(dev, "failed to read pressure\n");
+ return ret;
+ }
+
+ value_press = FIELD_GET(BME680_MEAS_TRIM_MASK,
+ get_unaligned_be24(data->buf));
+ if (value_press == BME680_MEAS_SKIPPED) {
+ /* reading was skipped */
+ dev_err(dev, "reading pressure skipped\n");
+ return -EINVAL;
+ }
+ *adc_press = value_press;
+
+ return 0;
}
/*
@@ -361,12 +333,12 @@ static s16 bme680_compensate_temp(struct bme680_data *data,
* 97356 Pa = 973.56 hPa.
*/
static u32 bme680_compensate_press(struct bme680_data *data,
- u32 adc_press)
+ u32 adc_press, s32 t_fine)
{
struct bme680_calib *calib = &data->bme680;
s32 var1, var2, var3, press_comp;
- var1 = (data->t_fine >> 1) - 64000;
+ var1 = (t_fine >> 1) - 64000;
var2 = ((((var1 >> 2) * (var1 >> 2)) >> 11) * calib->par_p6) >> 2;
var2 = var2 + (var1 * calib->par_p5 << 1);
var2 = (var2 >> 2) + ((s32)calib->par_p4 << 16);
@@ -394,6 +366,30 @@ static u32 bme680_compensate_press(struct bme680_data *data,
return press_comp;
}
+static int bme680_read_humid_adc(struct bme680_data *data, u32 *adc_humidity)
+{
+ struct device *dev = regmap_get_device(data->regmap);
+ u32 value_humidity;
+ int ret;
+
+ ret = regmap_bulk_read(data->regmap, BME680_REG_HUMIDITY_MSB,
+ &data->be16, BME680_HUMID_NUM_BYTES);
+ if (ret < 0) {
+ dev_err(dev, "failed to read humidity\n");
+ return ret;
+ }
+
+ value_humidity = be16_to_cpu(data->be16);
+ if (value_humidity == BME680_MEAS_SKIPPED) {
+ /* reading was skipped */
+ dev_err(dev, "reading humidity skipped\n");
+ return -EINVAL;
+ }
+ *adc_humidity = value_humidity;
+
+ return 0;
+}
+
/*
* Taken from Bosch BME680 API:
* https://github.com/BoschSensortec/BME680_driver/blob/63bb5336/bme680.c#L937
@@ -402,15 +398,15 @@ static u32 bme680_compensate_press(struct bme680_data *data,
* value of "43215" represents 43.215 %rH.
*/
static u32 bme680_compensate_humid(struct bme680_data *data,
- u16 adc_humid)
+ u16 adc_humid, s32 t_fine)
{
struct bme680_calib *calib = &data->bme680;
s32 var1, var2, var3, var4, var5, var6, temp_scaled, calc_hum;
- temp_scaled = (data->t_fine * 5 + 128) >> 8;
- var1 = (adc_humid - ((s32) ((s32) calib->par_h1 * 16))) -
- (((temp_scaled * (s32) calib->par_h3) / 100) >> 1);
- var2 = ((s32) calib->par_h2 *
+ temp_scaled = (t_fine * 5 + 128) >> 8;
+ var1 = (adc_humid - (((s32)calib->par_h1 * 16))) -
+ (((temp_scaled * calib->par_h3) / 100) >> 1);
+ var2 = (calib->par_h2 *
(((temp_scaled * calib->par_h4) / 100) +
(((temp_scaled * ((temp_scaled * calib->par_h5) / 100))
>> 6) / 100) + (1 << 14))) >> 10;
@@ -442,7 +438,7 @@ static u32 bme680_compensate_gas(struct bme680_data *data, u16 gas_res_adc,
u32 calc_gas_res;
/* Look up table for the possible gas range values */
- const u32 lookupTable[16] = {2147483647u, 2147483647u,
+ static const u32 lookupTable[16] = {2147483647u, 2147483647u,
2147483647u, 2147483647u, 2147483647u,
2126008810u, 2147483647u, 2130303777u,
2147483647u, 2147483647u, 2143188679u,
@@ -540,7 +536,6 @@ static u8 bme680_oversampling_to_reg(u8 val)
static int bme680_wait_for_eoc(struct bme680_data *data)
{
struct device *dev = regmap_get_device(data->regmap);
- unsigned int check;
int ret;
/*
* (Sum of oversampling ratios * time per oversampling) +
@@ -553,16 +548,16 @@ static int bme680_wait_for_eoc(struct bme680_data *data)
usleep_range(wait_eoc_us, wait_eoc_us + 100);
- ret = regmap_read(data->regmap, BME680_REG_MEAS_STAT_0, &check);
+ ret = regmap_read(data->regmap, BME680_REG_MEAS_STAT_0, &data->check);
if (ret) {
dev_err(dev, "failed to read measurement status register.\n");
return ret;
}
- if (check & BME680_MEAS_BIT) {
+ if (data->check & BME680_MEAS_BIT) {
dev_err(dev, "Device measurement cycle incomplete.\n");
return -EBUSY;
}
- if (!(check & BME680_NEW_DATA_BIT)) {
+ if (!(data->check & BME680_NEW_DATA_BIT)) {
dev_err(dev, "No new data available from the device.\n");
return -ENODATA;
}
@@ -606,10 +601,12 @@ static int bme680_chip_config(struct bme680_data *data)
ret = regmap_write_bits(data->regmap, BME680_REG_CTRL_MEAS,
BME680_OSRS_TEMP_MASK | BME680_OSRS_PRESS_MASK,
osrs);
- if (ret < 0)
+ if (ret < 0) {
dev_err(dev, "failed to write ctrl_meas register\n");
+ return ret;
+ }
- return ret;
+ return 0;
}
static int bme680_gas_config(struct bme680_data *data)
@@ -618,6 +615,11 @@ static int bme680_gas_config(struct bme680_data *data)
int ret;
u8 heatr_res, heatr_dur;
+ /* Go to sleep */
+ ret = bme680_set_mode(data, false);
+ if (ret < 0)
+ return ret;
+
heatr_res = bme680_calc_heater_res(data, data->heater_temp);
/* set target heater temperature */
@@ -649,77 +651,35 @@ static int bme680_gas_config(struct bme680_data *data)
static int bme680_read_temp(struct bme680_data *data, int *val)
{
- struct device *dev = regmap_get_device(data->regmap);
int ret;
- __be32 tmp = 0;
- s32 adc_temp;
+ u32 adc_temp;
s16 comp_temp;
- /* set forced mode to trigger measurement */
- ret = bme680_set_mode(data, true);
- if (ret < 0)
- return ret;
-
- ret = bme680_wait_for_eoc(data);
+ ret = bme680_read_temp_adc(data, &adc_temp);
if (ret)
return ret;
- ret = regmap_bulk_read(data->regmap, BME680_REG_TEMP_MSB,
- &tmp, 3);
- if (ret < 0) {
- dev_err(dev, "failed to read temperature\n");
- return ret;
- }
-
- adc_temp = be32_to_cpu(tmp) >> 12;
- if (adc_temp == BME680_MEAS_SKIPPED) {
- /* reading was skipped */
- dev_err(dev, "reading temperature skipped\n");
- return -EINVAL;
- }
comp_temp = bme680_compensate_temp(data, adc_temp);
- /*
- * val might be NULL if we're called by the read_press/read_humid
- * routine which is called to get t_fine value used in
- * compensate_press/compensate_humid to get compensated
- * pressure/humidity readings.
- */
- if (val) {
- *val = comp_temp * 10; /* Centidegrees to millidegrees */
- return IIO_VAL_INT;
- }
-
- return ret;
+ *val = comp_temp * 10; /* Centidegrees to millidegrees */
+ return IIO_VAL_INT;
}
static int bme680_read_press(struct bme680_data *data,
int *val, int *val2)
{
- struct device *dev = regmap_get_device(data->regmap);
int ret;
- __be32 tmp = 0;
- s32 adc_press;
+ u32 adc_press;
+ s32 t_fine;
- /* Read and compensate temperature to get a reading of t_fine */
- ret = bme680_read_temp(data, NULL);
- if (ret < 0)
+ ret = bme680_get_t_fine(data, &t_fine);
+ if (ret)
return ret;
- ret = regmap_bulk_read(data->regmap, BME680_REG_PRESS_MSB,
- &tmp, 3);
- if (ret < 0) {
- dev_err(dev, "failed to read pressure\n");
+ ret = bme680_read_press_adc(data, &adc_press);
+ if (ret)
return ret;
- }
-
- adc_press = be32_to_cpu(tmp) >> 12;
- if (adc_press == BME680_MEAS_SKIPPED) {
- /* reading was skipped */
- dev_err(dev, "reading pressure skipped\n");
- return -EINVAL;
- }
- *val = bme680_compensate_press(data, adc_press);
+ *val = bme680_compensate_press(data, adc_press, t_fine);
*val2 = 1000;
return IIO_VAL_FRACTIONAL;
}
@@ -727,31 +687,19 @@ static int bme680_read_press(struct bme680_data *data,
static int bme680_read_humid(struct bme680_data *data,
int *val, int *val2)
{
- struct device *dev = regmap_get_device(data->regmap);
int ret;
- __be16 tmp = 0;
- s32 adc_humidity;
- u32 comp_humidity;
+ u32 adc_humidity, comp_humidity;
+ s32 t_fine;
- /* Read and compensate temperature to get a reading of t_fine */
- ret = bme680_read_temp(data, NULL);
- if (ret < 0)
+ ret = bme680_get_t_fine(data, &t_fine);
+ if (ret)
return ret;
- ret = regmap_bulk_read(data->regmap, BM6880_REG_HUMIDITY_MSB,
- &tmp, sizeof(tmp));
- if (ret < 0) {
- dev_err(dev, "failed to read humidity\n");
+ ret = bme680_read_humid_adc(data, &adc_humidity);
+ if (ret)
return ret;
- }
- adc_humidity = be16_to_cpu(tmp);
- if (adc_humidity == BME680_MEAS_SKIPPED) {
- /* reading was skipped */
- dev_err(dev, "reading humidity skipped\n");
- return -EINVAL;
- }
- comp_humidity = bme680_compensate_humid(data, adc_humidity);
+ comp_humidity = bme680_compensate_humid(data, adc_humidity, t_fine);
*val = comp_humidity;
*val2 = 1000;
@@ -763,59 +711,37 @@ static int bme680_read_gas(struct bme680_data *data,
{
struct device *dev = regmap_get_device(data->regmap);
int ret;
- __be16 tmp = 0;
- unsigned int check;
- u16 adc_gas_res;
+ u16 adc_gas_res, gas_regs_val;
u8 gas_range;
- /* Set heater settings */
- ret = bme680_gas_config(data);
- if (ret < 0) {
- dev_err(dev, "failed to set gas config\n");
- return ret;
- }
-
- /* set forced mode to trigger measurement */
- ret = bme680_set_mode(data, true);
- if (ret < 0)
- return ret;
-
- ret = bme680_wait_for_eoc(data);
- if (ret)
- return ret;
-
- ret = regmap_read(data->regmap, BME680_REG_MEAS_STAT_0, &check);
- if (check & BME680_GAS_MEAS_BIT) {
+ ret = regmap_read(data->regmap, BME680_REG_MEAS_STAT_0, &data->check);
+ if (data->check & BME680_GAS_MEAS_BIT) {
dev_err(dev, "gas measurement incomplete\n");
return -EBUSY;
}
- ret = regmap_read(data->regmap, BME680_REG_GAS_R_LSB, &check);
+ ret = regmap_bulk_read(data->regmap, BME680_REG_GAS_MSB,
+ &data->be16, BME680_GAS_NUM_BYTES);
if (ret < 0) {
- dev_err(dev, "failed to read gas_r_lsb register\n");
+ dev_err(dev, "failed to read gas resistance\n");
return ret;
}
+ gas_regs_val = be16_to_cpu(data->be16);
+ adc_gas_res = FIELD_GET(BME680_ADC_GAS_RES, gas_regs_val);
+
/*
* occurs if either the gas heating duration was insuffient
* to reach the target heater temperature or the target
* heater temperature was too high for the heater sink to
* reach.
*/
- if ((check & BME680_GAS_STAB_BIT) == 0) {
+ if ((gas_regs_val & BME680_GAS_STAB_BIT) == 0) {
dev_err(dev, "heater failed to reach the target temperature\n");
return -EINVAL;
}
- ret = regmap_bulk_read(data->regmap, BME680_REG_GAS_MSB,
- &tmp, sizeof(tmp));
- if (ret < 0) {
- dev_err(dev, "failed to read gas resistance\n");
- return ret;
- }
-
- gas_range = check & BME680_GAS_RANGE_MASK;
- adc_gas_res = be16_to_cpu(tmp) >> BME680_ADC_GAS_RES_SHIFT;
+ gas_range = FIELD_GET(BME680_GAS_RANGE_MASK, gas_regs_val);
*val = bme680_compensate_gas(data, adc_gas_res, gas_range);
return IIO_VAL_INT;
@@ -826,6 +752,18 @@ static int bme680_read_raw(struct iio_dev *indio_dev,
int *val, int *val2, long mask)
{
struct bme680_data *data = iio_priv(indio_dev);
+ int ret;
+
+ guard(mutex)(&data->lock);
+
+ /* set forced mode to trigger measurement */
+ ret = bme680_set_mode(data, true);
+ if (ret < 0)
+ return ret;
+
+ ret = bme680_wait_for_eoc(data);
+ if (ret)
+ return ret;
switch (mask) {
case IIO_CHAN_INFO_PROCESSED:
@@ -871,6 +809,8 @@ static int bme680_write_raw(struct iio_dev *indio_dev,
{
struct bme680_data *data = iio_priv(indio_dev);
+ guard(mutex)(&data->lock);
+
if (val2 != 0)
return -EINVAL;
@@ -921,52 +861,19 @@ static const struct iio_info bme680_info = {
.attrs = &bme680_attribute_group,
};
-static const char *bme680_match_acpi_device(struct device *dev)
-{
- const struct acpi_device_id *id;
-
- id = acpi_match_device(dev->driver->acpi_match_table, dev);
- if (!id)
- return NULL;
-
- return dev_name(dev);
-}
-
int bme680_core_probe(struct device *dev, struct regmap *regmap,
const char *name)
{
struct iio_dev *indio_dev;
struct bme680_data *data;
- unsigned int val;
int ret;
- ret = regmap_write(regmap, BME680_REG_SOFT_RESET,
- BME680_CMD_SOFTRESET);
- if (ret < 0) {
- dev_err(dev, "Failed to reset chip\n");
- return ret;
- }
-
- ret = regmap_read(regmap, BME680_REG_CHIP_ID, &val);
- if (ret < 0) {
- dev_err(dev, "Error reading chip ID\n");
- return ret;
- }
-
- if (val != BME680_CHIP_ID_VAL) {
- dev_err(dev, "Wrong chip ID, got %x expected %x\n",
- val, BME680_CHIP_ID_VAL);
- return -ENODEV;
- }
-
indio_dev = devm_iio_device_alloc(dev, sizeof(*data));
if (!indio_dev)
return -ENOMEM;
- if (!name && ACPI_HANDLE(dev))
- name = bme680_match_acpi_device(dev);
-
data = iio_priv(indio_dev);
+ mutex_init(&data->lock);
dev_set_drvdata(dev, indio_dev);
data->regmap = regmap;
indio_dev->name = name;
@@ -982,25 +889,39 @@ int bme680_core_probe(struct device *dev, struct regmap *regmap,
data->heater_temp = 320; /* degree Celsius */
data->heater_dur = 150; /* milliseconds */
- ret = bme680_chip_config(data);
- if (ret < 0) {
- dev_err(dev, "failed to set chip_config data\n");
- return ret;
- }
+ ret = regmap_write(regmap, BME680_REG_SOFT_RESET,
+ BME680_CMD_SOFTRESET);
+ if (ret < 0)
+ return dev_err_probe(dev, ret, "Failed to reset chip\n");
- ret = bme680_gas_config(data);
- if (ret < 0) {
- dev_err(dev, "failed to set gas config data\n");
- return ret;
+ usleep_range(BME680_STARTUP_TIME_US, BME680_STARTUP_TIME_US + 1000);
+
+ ret = regmap_read(regmap, BME680_REG_CHIP_ID, &data->check);
+ if (ret < 0)
+ return dev_err_probe(dev, ret, "Error reading chip ID\n");
+
+ if (data->check != BME680_CHIP_ID_VAL) {
+ dev_err(dev, "Wrong chip ID, got %x expected %x\n",
+ data->check, BME680_CHIP_ID_VAL);
+ return -ENODEV;
}
ret = bme680_read_calib(data, &data->bme680);
if (ret < 0) {
- dev_err(dev,
+ return dev_err_probe(dev, ret,
"failed to read calibration coefficients at probe\n");
- return ret;
}
+ ret = bme680_chip_config(data);
+ if (ret < 0)
+ return dev_err_probe(dev, ret,
+ "failed to set chip_config data\n");
+
+ ret = bme680_gas_config(data);
+ if (ret < 0)
+ return dev_err_probe(dev, ret,
+ "failed to set gas config data\n");
+
return devm_iio_device_register(dev, indio_dev);
}
EXPORT_SYMBOL_NS_GPL(bme680_core_probe, IIO_BME680);
diff --git a/drivers/iio/chemical/bme680_spi.c b/drivers/iio/chemical/bme680_spi.c
index 4404d42ae5ec..7c54bd17d4b0 100644
--- a/drivers/iio/chemical/bme680_spi.c
+++ b/drivers/iio/chemical/bme680_spi.c
@@ -100,7 +100,7 @@ static int bme680_regmap_spi_read(void *context, const void *reg,
return spi_write_then_read(spi, &addr, 1, val, val_size);
}
-static struct regmap_bus bme680_regmap_bus = {
+static const struct regmap_bus bme680_regmap_bus = {
.write = bme680_regmap_spi_write,
.read = bme680_regmap_spi_read,
.reg_format_endian_default = REGMAP_ENDIAN_BIG,
diff --git a/drivers/iio/chemical/sgp40.c b/drivers/iio/chemical/sgp40.c
index 7f0de14a1956..07d8ab830211 100644
--- a/drivers/iio/chemical/sgp40.c
+++ b/drivers/iio/chemical/sgp40.c
@@ -14,11 +14,16 @@
* 1) read raw logarithmic resistance value from sensor
* --> useful to pass it to the algorithm of the sensor vendor for
* measuring deteriorations and improvements of air quality.
+ * It can be read from the attribute in_resistance_raw.
*
- * 2) calculate an estimated absolute voc index (0 - 500 index points) for
- * measuring the air quality.
+ * 2) calculate an estimated absolute voc index (in_concentration_input)
+ * with 0 - 500 index points) for measuring the air quality.
* For this purpose the value of the resistance for which the voc index
- * will be 250 can be set up using calibbias.
+ * will be 250 can be set up using in_resistance_calibbias (default 30000).
+ *
+ * The voc index is calculated as:
+ * x = (in_resistance_raw - in_resistance_calibbias) * 0.65
+ * in_concentration_input = 500 / (1 + e^x)
*
* Compensation values of relative humidity and temperature can be set up
* by writing to the out values of temp and humidityrelative.
diff --git a/drivers/iio/common/cros_ec_sensors/cros_ec_sensors_core.c b/drivers/iio/common/cros_ec_sensors/cros_ec_sensors_core.c
index 6bfe5d6847e7..9fc71a73caa1 100644
--- a/drivers/iio/common/cros_ec_sensors/cros_ec_sensors_core.c
+++ b/drivers/iio/common/cros_ec_sensors/cros_ec_sensors_core.c
@@ -198,9 +198,7 @@ int cros_ec_sensors_push_data(struct iio_dev *indio_dev,
return 0;
out = (s16 *)st->samples;
- for_each_set_bit(i,
- indio_dev->active_scan_mask,
- indio_dev->masklength) {
+ iio_for_each_active_channel(indio_dev, i) {
*out = data[i];
out++;
}
@@ -587,7 +585,7 @@ static int cros_ec_sensors_read_data_unsafe(struct iio_dev *indio_dev,
int ret;
/* Read all sensors enabled in scan_mask. Each value is 2 bytes. */
- for_each_set_bit(i, &scan_mask, indio_dev->masklength) {
+ for_each_set_bit(i, &scan_mask, iio_get_masklength(indio_dev)) {
ret = cros_ec_sensors_cmd_read_u16(ec,
cros_ec_sensors_idx_to_reg(st, i),
data);
@@ -683,7 +681,7 @@ int cros_ec_sensors_read_cmd(struct iio_dev *indio_dev,
return ret;
}
- for_each_set_bit(i, &scan_mask, indio_dev->masklength) {
+ for_each_set_bit(i, &scan_mask, iio_get_masklength(indio_dev)) {
*data = st->resp->data.data[i];
data++;
}
diff --git a/drivers/iio/common/scmi_sensors/scmi_iio.c b/drivers/iio/common/scmi_sensors/scmi_iio.c
index 7190eaede7fb..ed15dcbf4cf6 100644
--- a/drivers/iio/common/scmi_sensors/scmi_iio.c
+++ b/drivers/iio/common/scmi_sensors/scmi_iio.c
@@ -158,7 +158,7 @@ static int scmi_iio_set_odr_val(struct iio_dev *iio_dev, int val, int val2)
* To calculate the multiplier,we convert the sf into char string and
* count the number of characters
*/
- sf = (u64)uHz * 0xFFFF;
+ sf = uHz * 0xFFFF;
do_div(sf, MICROHZ_PER_HZ);
mult = scnprintf(buf, sizeof(buf), "%llu", sf) - 1;
diff --git a/drivers/iio/dac/Kconfig b/drivers/iio/dac/Kconfig
index a2596c2d3de3..1cfd7e2a622f 100644
--- a/drivers/iio/dac/Kconfig
+++ b/drivers/iio/dac/Kconfig
@@ -371,6 +371,17 @@ config LTC2632
To compile this driver as a module, choose M here: the
module will be called ltc2632.
+config LTC2664
+ tristate "Analog Devices LTC2664 and LTC2672 DAC SPI driver"
+ depends on SPI
+ select REGMAP
+ help
+ Say yes here to build support for Analog Devices
+ LTC2664 and LTC2672 converters (DAC).
+
+ To compile this driver as a module, choose M here: the
+ module will be called ltc2664.
+
config M62332
tristate "Mitsubishi M62332 DAC driver"
depends on I2C
diff --git a/drivers/iio/dac/Makefile b/drivers/iio/dac/Makefile
index 8432a81a19dc..2cf148f16306 100644
--- a/drivers/iio/dac/Makefile
+++ b/drivers/iio/dac/Makefile
@@ -37,6 +37,7 @@ obj-$(CONFIG_DS4424) += ds4424.o
obj-$(CONFIG_LPC18XX_DAC) += lpc18xx_dac.o
obj-$(CONFIG_LTC1660) += ltc1660.o
obj-$(CONFIG_LTC2632) += ltc2632.o
+obj-$(CONFIG_LTC2664) += ltc2664.o
obj-$(CONFIG_LTC2688) += ltc2688.o
obj-$(CONFIG_M62332) += m62332.o
obj-$(CONFIG_MAX517) += max517.o
diff --git a/drivers/iio/dac/ad5449.c b/drivers/iio/dac/ad5449.c
index 4572d6f49275..953fcfa2110b 100644
--- a/drivers/iio/dac/ad5449.c
+++ b/drivers/iio/dac/ad5449.c
@@ -20,8 +20,6 @@
#include <linux/iio/iio.h>
#include <linux/iio/sysfs.h>
-#include <linux/platform_data/ad5449.h>
-
#define AD5449_MAX_CHANNELS 2
#define AD5449_MAX_VREFS 2
@@ -268,7 +266,6 @@ static const char *ad5449_vref_name(struct ad5449 *st, int n)
static int ad5449_spi_probe(struct spi_device *spi)
{
- struct ad5449_platform_data *pdata = spi->dev.platform_data;
const struct spi_device_id *id = spi_get_device_id(spi);
struct iio_dev *indio_dev;
struct ad5449 *st;
@@ -306,16 +303,8 @@ static int ad5449_spi_probe(struct spi_device *spi)
mutex_init(&st->lock);
if (st->chip_info->has_ctrl) {
- unsigned int ctrl = 0x00;
- if (pdata) {
- if (pdata->hardware_clear_to_midscale)
- ctrl |= AD5449_CTRL_HCLR_TO_MIDSCALE;
- ctrl |= pdata->sdo_mode << AD5449_CTRL_SDO_OFFSET;
- st->has_sdo = pdata->sdo_mode != AD5449_SDO_DISABLED;
- } else {
- st->has_sdo = true;
- }
- ad5449_write(indio_dev, AD5449_CMD_CTRL, ctrl);
+ st->has_sdo = true;
+ ad5449_write(indio_dev, AD5449_CMD_CTRL, 0x0);
}
ret = iio_device_register(indio_dev);
diff --git a/drivers/iio/dac/ad9739a.c b/drivers/iio/dac/ad9739a.c
index f56eabe53723..615d1a196db3 100644
--- a/drivers/iio/dac/ad9739a.c
+++ b/drivers/iio/dac/ad9739a.c
@@ -145,7 +145,7 @@ static int ad9739a_buffer_postdisable(struct iio_dev *indio_dev)
struct ad9739a_state *st = iio_priv(indio_dev);
return iio_backend_data_source_set(st->back, 0,
- IIO_BACKEND_INTERNAL_CONTINUOS_WAVE);
+ IIO_BACKEND_INTERNAL_CONTINUOUS_WAVE);
}
static bool ad9739a_reg_accessible(struct device *dev, unsigned int reg)
@@ -413,8 +413,7 @@ static int ad9739a_probe(struct spi_device *spi)
if (ret)
return ret;
- ret = iio_backend_extend_chan_spec(indio_dev, st->back,
- &ad9739a_channels[0]);
+ ret = iio_backend_extend_chan_spec(st->back, &ad9739a_channels[0]);
if (ret)
return ret;
@@ -432,7 +431,13 @@ static int ad9739a_probe(struct spi_device *spi)
indio_dev->num_channels = ARRAY_SIZE(ad9739a_channels);
indio_dev->setup_ops = &ad9739a_buffer_setup_ops;
- return devm_iio_device_register(&spi->dev, indio_dev);
+ ret = devm_iio_device_register(&spi->dev, indio_dev);
+ if (ret)
+ return ret;
+
+ iio_backend_debugfs_add(st->back, indio_dev);
+
+ return 0;
}
static const struct of_device_id ad9739a_of_match[] = {
diff --git a/drivers/iio/dac/adi-axi-dac.c b/drivers/iio/dac/adi-axi-dac.c
index 6d56428e623d..0cb00f3bec04 100644
--- a/drivers/iio/dac/adi-axi-dac.c
+++ b/drivers/iio/dac/adi-axi-dac.c
@@ -452,7 +452,7 @@ static int axi_dac_data_source_set(struct iio_backend *back, unsigned int chan,
struct axi_dac_state *st = iio_backend_get_priv(back);
switch (data) {
- case IIO_BACKEND_INTERNAL_CONTINUOS_WAVE:
+ case IIO_BACKEND_INTERNAL_CONTINUOUS_WAVE:
return regmap_update_bits(st->regmap,
AXI_DAC_REG_CHAN_CNTRL_7(chan),
AXI_DAC_DATA_SEL,
@@ -507,7 +507,18 @@ static int axi_dac_set_sample_rate(struct iio_backend *back, unsigned int chan,
return 0;
}
-static const struct iio_backend_ops axi_dac_generic = {
+static int axi_dac_reg_access(struct iio_backend *back, unsigned int reg,
+ unsigned int writeval, unsigned int *readval)
+{
+ struct axi_dac_state *st = iio_backend_get_priv(back);
+
+ if (readval)
+ return regmap_read(st->regmap, reg, readval);
+
+ return regmap_write(st->regmap, reg, writeval);
+}
+
+static const struct iio_backend_ops axi_dac_generic_ops = {
.enable = axi_dac_enable,
.disable = axi_dac_disable,
.request_buffer = axi_dac_request_buffer,
@@ -517,6 +528,12 @@ static const struct iio_backend_ops axi_dac_generic = {
.ext_info_get = axi_dac_ext_info_get,
.data_source_set = axi_dac_data_source_set,
.set_sample_rate = axi_dac_set_sample_rate,
+ .debugfs_reg_access = iio_backend_debugfs_ptr(axi_dac_reg_access),
+};
+
+static const struct iio_backend_info axi_dac_generic = {
+ .name = "axi-dac",
+ .ops = &axi_dac_generic_ops,
};
static const struct regmap_config axi_dac_regmap_config = {
diff --git a/drivers/iio/dac/ltc2664.c b/drivers/iio/dac/ltc2664.c
new file mode 100644
index 000000000000..5be5345ac5c8
--- /dev/null
+++ b/drivers/iio/dac/ltc2664.c
@@ -0,0 +1,735 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * LTC2664 4 channel, 12-/16-Bit Voltage Output SoftSpan DAC driver
+ * LTC2672 5 channel, 12-/16-Bit Current Output Softspan DAC driver
+ *
+ * Copyright 2024 Analog Devices Inc.
+ */
+
+#include <linux/bitfield.h>
+#include <linux/cleanup.h>
+#include <linux/device.h>
+#include <linux/gpio/consumer.h>
+#include <linux/iio/iio.h>
+#include <linux/kernel.h>
+#include <linux/math64.h>
+#include <linux/module.h>
+#include <linux/mod_devicetable.h>
+#include <linux/mutex.h>
+#include <linux/property.h>
+#include <linux/regmap.h>
+#include <linux/regulator/consumer.h>
+#include <linux/spi/spi.h>
+
+#define LTC2664_CMD_WRITE_N(n) (0x00 + (n))
+#define LTC2664_CMD_UPDATE_N(n) (0x10 + (n))
+#define LTC2664_CMD_WRITE_N_UPDATE_ALL 0x20
+#define LTC2664_CMD_WRITE_N_UPDATE_N(n) (0x30 + (n))
+#define LTC2664_CMD_POWER_DOWN_N(n) (0x40 + (n))
+#define LTC2664_CMD_POWER_DOWN_ALL 0x50
+#define LTC2664_CMD_SPAN_N(n) (0x60 + (n))
+#define LTC2664_CMD_CONFIG 0x70
+#define LTC2664_CMD_MUX 0xB0
+#define LTC2664_CMD_TOGGLE_SEL 0xC0
+#define LTC2664_CMD_GLOBAL_TOGGLE 0xD0
+#define LTC2664_CMD_NO_OPERATION 0xF0
+#define LTC2664_REF_DISABLE 0x0001
+#define LTC2664_MSPAN_SOFTSPAN 7
+
+#define LTC2672_MAX_CHANNEL 5
+#define LTC2672_MAX_SPAN 7
+#define LTC2672_SCALE_MULTIPLIER(n) (50 * BIT(n))
+
+enum {
+ LTC2664_SPAN_RANGE_0V_5V,
+ LTC2664_SPAN_RANGE_0V_10V,
+ LTC2664_SPAN_RANGE_M5V_5V,
+ LTC2664_SPAN_RANGE_M10V_10V,
+ LTC2664_SPAN_RANGE_M2V5_2V5,
+};
+
+enum {
+ LTC2664_INPUT_A,
+ LTC2664_INPUT_B,
+ LTC2664_INPUT_B_AVAIL,
+ LTC2664_POWERDOWN,
+ LTC2664_POWERDOWN_MODE,
+ LTC2664_TOGGLE_EN,
+ LTC2664_GLOBAL_TOGGLE,
+};
+
+static const u16 ltc2664_mspan_lut[8][2] = {
+ { LTC2664_SPAN_RANGE_M10V_10V, 32768 }, /* MPS2=0, MPS1=0, MSP0=0 (0)*/
+ { LTC2664_SPAN_RANGE_M5V_5V, 32768 }, /* MPS2=0, MPS1=0, MSP0=1 (1)*/
+ { LTC2664_SPAN_RANGE_M2V5_2V5, 32768 }, /* MPS2=0, MPS1=1, MSP0=0 (2)*/
+ { LTC2664_SPAN_RANGE_0V_10V, 0 }, /* MPS2=0, MPS1=1, MSP0=1 (3)*/
+ { LTC2664_SPAN_RANGE_0V_10V, 32768 }, /* MPS2=1, MPS1=0, MSP0=0 (4)*/
+ { LTC2664_SPAN_RANGE_0V_5V, 0 }, /* MPS2=1, MPS1=0, MSP0=1 (5)*/
+ { LTC2664_SPAN_RANGE_0V_5V, 32768 }, /* MPS2=1, MPS1=1, MSP0=0 (6)*/
+ { LTC2664_SPAN_RANGE_0V_5V, 0 } /* MPS2=1, MPS1=1, MSP0=1 (7)*/
+};
+
+struct ltc2664_state;
+
+struct ltc2664_chip_info {
+ const char *name;
+ int (*scale_get)(const struct ltc2664_state *st, int c);
+ int (*offset_get)(const struct ltc2664_state *st, int c);
+ int measurement_type;
+ unsigned int num_channels;
+ const int (*span_helper)[2];
+ unsigned int num_span;
+ unsigned int internal_vref_mv;
+ bool manual_span_support;
+ bool rfsadj_support;
+};
+
+struct ltc2664_chan {
+ /* indicates if the channel should be toggled */
+ bool toggle_chan;
+ /* indicates if the channel is in powered down state */
+ bool powerdown;
+ /* span code of the channel */
+ u8 span;
+ /* raw data of the current state of the chip registers (A/B) */
+ u16 raw[2];
+};
+
+struct ltc2664_state {
+ struct spi_device *spi;
+ struct regmap *regmap;
+ struct ltc2664_chan channels[LTC2672_MAX_CHANNEL];
+ /* lock to protect against multiple access to the device and shared data */
+ struct mutex lock;
+ const struct ltc2664_chip_info *chip_info;
+ struct iio_chan_spec *iio_channels;
+ int vref_mv;
+ u32 rfsadj_ohms;
+ u32 toggle_sel;
+ bool global_toggle;
+};
+
+static const int ltc2664_span_helper[][2] = {
+ { 0, 5000 },
+ { 0, 10000 },
+ { -5000, 5000 },
+ { -10000, 10000 },
+ { -2500, 2500 },
+};
+
+static const int ltc2672_span_helper[][2] = {
+ { 0, 0 },
+ { 0, 3125 },
+ { 0, 6250 },
+ { 0, 12500 },
+ { 0, 25000 },
+ { 0, 50000 },
+ { 0, 100000 },
+ { 0, 200000 },
+ { 0, 300000 },
+};
+
+static int ltc2664_scale_get(const struct ltc2664_state *st, int c)
+{
+ const struct ltc2664_chan *chan = &st->channels[c];
+ const int (*span_helper)[2] = st->chip_info->span_helper;
+ int span, fs;
+
+ span = chan->span;
+ if (span < 0)
+ return span;
+
+ fs = span_helper[span][1] - span_helper[span][0];
+
+ return fs * st->vref_mv / 2500;
+}
+
+static int ltc2672_scale_get(const struct ltc2664_state *st, int c)
+{
+ const struct ltc2664_chan *chan = &st->channels[c];
+ int span, fs;
+
+ span = chan->span - 1;
+ if (span < 0)
+ return span;
+
+ fs = 1000 * st->vref_mv;
+
+ if (span == LTC2672_MAX_SPAN)
+ return mul_u64_u32_div(4800, fs, st->rfsadj_ohms);
+
+ return mul_u64_u32_div(LTC2672_SCALE_MULTIPLIER(span), fs, st->rfsadj_ohms);
+}
+
+static int ltc2664_offset_get(const struct ltc2664_state *st, int c)
+{
+ const struct ltc2664_chan *chan = &st->channels[c];
+ int span;
+
+ span = chan->span;
+ if (span < 0)
+ return span;
+
+ if (st->chip_info->span_helper[span][0] < 0)
+ return -32768;
+
+ return 0;
+}
+
+static int ltc2664_dac_code_write(struct ltc2664_state *st, u32 chan, u32 input,
+ u16 code)
+{
+ struct ltc2664_chan *c = &st->channels[chan];
+ int ret, reg;
+
+ guard(mutex)(&st->lock);
+ /* select the correct input register to write to */
+ if (c->toggle_chan) {
+ ret = regmap_write(st->regmap, LTC2664_CMD_TOGGLE_SEL,
+ input << chan);
+ if (ret)
+ return ret;
+ }
+ /*
+ * If in toggle mode the dac should be updated by an
+ * external signal (or sw toggle) and not here.
+ */
+ if (st->toggle_sel & BIT(chan))
+ reg = LTC2664_CMD_WRITE_N(chan);
+ else
+ reg = LTC2664_CMD_WRITE_N_UPDATE_N(chan);
+
+ ret = regmap_write(st->regmap, reg, code);
+ if (ret)
+ return ret;
+
+ c->raw[input] = code;
+
+ if (c->toggle_chan) {
+ ret = regmap_write(st->regmap, LTC2664_CMD_TOGGLE_SEL,
+ st->toggle_sel);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
+
+static void ltc2664_dac_code_read(struct ltc2664_state *st, u32 chan, u32 input,
+ u32 *code)
+{
+ guard(mutex)(&st->lock);
+ *code = st->channels[chan].raw[input];
+}
+
+static const int ltc2664_raw_range[] = { 0, 1, U16_MAX };
+
+static int ltc2664_read_avail(struct iio_dev *indio_dev,
+ struct iio_chan_spec const *chan,
+ const int **vals, int *type, int *length,
+ long info)
+{
+ switch (info) {
+ case IIO_CHAN_INFO_RAW:
+ *vals = ltc2664_raw_range;
+ *type = IIO_VAL_INT;
+
+ return IIO_AVAIL_RANGE;
+ default:
+ return -EINVAL;
+ }
+}
+
+static int ltc2664_read_raw(struct iio_dev *indio_dev,
+ struct iio_chan_spec const *chan, int *val,
+ int *val2, long info)
+{
+ struct ltc2664_state *st = iio_priv(indio_dev);
+
+ switch (info) {
+ case IIO_CHAN_INFO_RAW:
+ ltc2664_dac_code_read(st, chan->channel, LTC2664_INPUT_A, val);
+
+ return IIO_VAL_INT;
+ case IIO_CHAN_INFO_OFFSET:
+ *val = st->chip_info->offset_get(st, chan->channel);
+
+ return IIO_VAL_INT;
+ case IIO_CHAN_INFO_SCALE:
+ *val = st->chip_info->scale_get(st, chan->channel);
+
+ *val2 = 16;
+ return IIO_VAL_FRACTIONAL_LOG2;
+ default:
+ return -EINVAL;
+ }
+}
+
+static int ltc2664_write_raw(struct iio_dev *indio_dev,
+ struct iio_chan_spec const *chan, int val,
+ int val2, long info)
+{
+ struct ltc2664_state *st = iio_priv(indio_dev);
+
+ switch (info) {
+ case IIO_CHAN_INFO_RAW:
+ if (val > U16_MAX || val < 0)
+ return -EINVAL;
+
+ return ltc2664_dac_code_write(st, chan->channel,
+ LTC2664_INPUT_A, val);
+ default:
+ return -EINVAL;
+ }
+}
+
+static ssize_t ltc2664_reg_bool_get(struct iio_dev *indio_dev,
+ uintptr_t private,
+ const struct iio_chan_spec *chan,
+ char *buf)
+{
+ struct ltc2664_state *st = iio_priv(indio_dev);
+ u32 val;
+
+ guard(mutex)(&st->lock);
+ switch (private) {
+ case LTC2664_POWERDOWN:
+ val = st->channels[chan->channel].powerdown;
+
+ return sysfs_emit(buf, "%u\n", val);
+ case LTC2664_POWERDOWN_MODE:
+ return sysfs_emit(buf, "42kohm_to_gnd\n");
+ case LTC2664_TOGGLE_EN:
+ val = !!(st->toggle_sel & BIT(chan->channel));
+
+ return sysfs_emit(buf, "%u\n", val);
+ case LTC2664_GLOBAL_TOGGLE:
+ val = st->global_toggle;
+
+ return sysfs_emit(buf, "%u\n", val);
+ default:
+ return -EINVAL;
+ }
+}
+
+static ssize_t ltc2664_reg_bool_set(struct iio_dev *indio_dev,
+ uintptr_t private,
+ const struct iio_chan_spec *chan,
+ const char *buf, size_t len)
+{
+ struct ltc2664_state *st = iio_priv(indio_dev);
+ int ret;
+ bool en;
+
+ ret = kstrtobool(buf, &en);
+ if (ret)
+ return ret;
+
+ guard(mutex)(&st->lock);
+ switch (private) {
+ case LTC2664_POWERDOWN:
+ ret = regmap_write(st->regmap,
+ en ? LTC2664_CMD_POWER_DOWN_N(chan->channel) :
+ LTC2664_CMD_UPDATE_N(chan->channel), en);
+ if (ret)
+ return ret;
+
+ st->channels[chan->channel].powerdown = en;
+
+ return len;
+ case LTC2664_TOGGLE_EN:
+ if (en)
+ st->toggle_sel |= BIT(chan->channel);
+ else
+ st->toggle_sel &= ~BIT(chan->channel);
+
+ ret = regmap_write(st->regmap, LTC2664_CMD_TOGGLE_SEL,
+ st->toggle_sel);
+ if (ret)
+ return ret;
+
+ return len;
+ case LTC2664_GLOBAL_TOGGLE:
+ ret = regmap_write(st->regmap, LTC2664_CMD_GLOBAL_TOGGLE, en);
+ if (ret)
+ return ret;
+
+ st->global_toggle = en;
+
+ return len;
+ default:
+ return -EINVAL;
+ }
+}
+
+static ssize_t ltc2664_dac_input_read(struct iio_dev *indio_dev,
+ uintptr_t private,
+ const struct iio_chan_spec *chan,
+ char *buf)
+{
+ struct ltc2664_state *st = iio_priv(indio_dev);
+ u32 val;
+
+ if (private == LTC2664_INPUT_B_AVAIL)
+ return sysfs_emit(buf, "[%u %u %u]\n", ltc2664_raw_range[0],
+ ltc2664_raw_range[1],
+ ltc2664_raw_range[2] / 4);
+
+ ltc2664_dac_code_read(st, chan->channel, private, &val);
+
+ return sysfs_emit(buf, "%u\n", val);
+}
+
+static ssize_t ltc2664_dac_input_write(struct iio_dev *indio_dev,
+ uintptr_t private,
+ const struct iio_chan_spec *chan,
+ const char *buf, size_t len)
+{
+ struct ltc2664_state *st = iio_priv(indio_dev);
+ int ret;
+ u16 val;
+
+ if (private == LTC2664_INPUT_B_AVAIL)
+ return -EINVAL;
+
+ ret = kstrtou16(buf, 10, &val);
+ if (ret)
+ return ret;
+
+ ret = ltc2664_dac_code_write(st, chan->channel, private, val);
+ if (ret)
+ return ret;
+
+ return len;
+}
+
+static int ltc2664_reg_access(struct iio_dev *indio_dev,
+ unsigned int reg,
+ unsigned int writeval,
+ unsigned int *readval)
+{
+ struct ltc2664_state *st = iio_priv(indio_dev);
+
+ if (readval)
+ return -EOPNOTSUPP;
+
+ return regmap_write(st->regmap, reg, writeval);
+}
+
+#define LTC2664_CHAN_EXT_INFO(_name, _what, _shared, _read, _write) { \
+ .name = _name, \
+ .read = (_read), \
+ .write = (_write), \
+ .private = (_what), \
+ .shared = (_shared), \
+}
+
+/*
+ * For toggle mode we only expose the symbol attr (sw_toggle) in case a TGPx is
+ * not provided in dts.
+ */
+static const struct iio_chan_spec_ext_info ltc2664_toggle_sym_ext_info[] = {
+ LTC2664_CHAN_EXT_INFO("raw0", LTC2664_INPUT_A, IIO_SEPARATE,
+ ltc2664_dac_input_read, ltc2664_dac_input_write),
+ LTC2664_CHAN_EXT_INFO("raw1", LTC2664_INPUT_B, IIO_SEPARATE,
+ ltc2664_dac_input_read, ltc2664_dac_input_write),
+ LTC2664_CHAN_EXT_INFO("powerdown", LTC2664_POWERDOWN, IIO_SEPARATE,
+ ltc2664_reg_bool_get, ltc2664_reg_bool_set),
+ LTC2664_CHAN_EXT_INFO("powerdown_mode", LTC2664_POWERDOWN_MODE,
+ IIO_SEPARATE, ltc2664_reg_bool_get, NULL),
+ LTC2664_CHAN_EXT_INFO("symbol", LTC2664_GLOBAL_TOGGLE, IIO_SEPARATE,
+ ltc2664_reg_bool_get, ltc2664_reg_bool_set),
+ LTC2664_CHAN_EXT_INFO("toggle_en", LTC2664_TOGGLE_EN,
+ IIO_SEPARATE, ltc2664_reg_bool_get,
+ ltc2664_reg_bool_set),
+ { }
+};
+
+static const struct iio_chan_spec_ext_info ltc2664_ext_info[] = {
+ LTC2664_CHAN_EXT_INFO("powerdown", LTC2664_POWERDOWN, IIO_SEPARATE,
+ ltc2664_reg_bool_get, ltc2664_reg_bool_set),
+ LTC2664_CHAN_EXT_INFO("powerdown_mode", LTC2664_POWERDOWN_MODE,
+ IIO_SEPARATE, ltc2664_reg_bool_get, NULL),
+ { }
+};
+
+static const struct iio_chan_spec ltc2664_channel_template = {
+ .indexed = 1,
+ .output = 1,
+ .info_mask_separate = BIT(IIO_CHAN_INFO_SCALE) |
+ BIT(IIO_CHAN_INFO_OFFSET) |
+ BIT(IIO_CHAN_INFO_RAW),
+ .info_mask_separate_available = BIT(IIO_CHAN_INFO_RAW),
+ .ext_info = ltc2664_ext_info,
+};
+
+static const struct ltc2664_chip_info ltc2664_chip = {
+ .name = "ltc2664",
+ .scale_get = ltc2664_scale_get,
+ .offset_get = ltc2664_offset_get,
+ .measurement_type = IIO_VOLTAGE,
+ .num_channels = 4,
+ .span_helper = ltc2664_span_helper,
+ .num_span = ARRAY_SIZE(ltc2664_span_helper),
+ .internal_vref_mv = 2500,
+ .manual_span_support = true,
+ .rfsadj_support = false,
+};
+
+static const struct ltc2664_chip_info ltc2672_chip = {
+ .name = "ltc2672",
+ .scale_get = ltc2672_scale_get,
+ .offset_get = ltc2664_offset_get,
+ .measurement_type = IIO_CURRENT,
+ .num_channels = 5,
+ .span_helper = ltc2672_span_helper,
+ .num_span = ARRAY_SIZE(ltc2672_span_helper),
+ .internal_vref_mv = 1250,
+ .manual_span_support = false,
+ .rfsadj_support = true,
+};
+
+static int ltc2664_set_span(const struct ltc2664_state *st, int min, int max,
+ int chan)
+{
+ const struct ltc2664_chip_info *chip_info = st->chip_info;
+ const int (*span_helper)[2] = chip_info->span_helper;
+ int span, ret;
+
+ for (span = 0; span < chip_info->num_span; span++) {
+ if (min == span_helper[span][0] && max == span_helper[span][1])
+ break;
+ }
+
+ if (span == chip_info->num_span)
+ return -EINVAL;
+
+ ret = regmap_write(st->regmap, LTC2664_CMD_SPAN_N(chan), span);
+ if (ret)
+ return ret;
+
+ return span;
+}
+
+static int ltc2664_channel_config(struct ltc2664_state *st)
+{
+ const struct ltc2664_chip_info *chip_info = st->chip_info;
+ struct device *dev = &st->spi->dev;
+ u32 reg, tmp[2], mspan;
+ int ret, span = 0;
+
+ mspan = LTC2664_MSPAN_SOFTSPAN;
+ ret = device_property_read_u32(dev, "adi,manual-span-operation-config",
+ &mspan);
+ if (!ret) {
+ if (!chip_info->manual_span_support)
+ return dev_err_probe(dev, -EINVAL,
+ "adi,manual-span-operation-config not supported\n");
+
+ if (mspan >= ARRAY_SIZE(ltc2664_mspan_lut))
+ return dev_err_probe(dev, -EINVAL,
+ "adi,manual-span-operation-config not in range\n");
+ }
+
+ st->rfsadj_ohms = 20000;
+ ret = device_property_read_u32(dev, "adi,rfsadj-ohms", &st->rfsadj_ohms);
+ if (!ret) {
+ if (!chip_info->rfsadj_support)
+ return dev_err_probe(dev, -EINVAL,
+ "adi,rfsadj-ohms not supported\n");
+
+ if (st->rfsadj_ohms < 19000 || st->rfsadj_ohms > 41000)
+ return dev_err_probe(dev, -EINVAL,
+ "adi,rfsadj-ohms not in range\n");
+ }
+
+ device_for_each_child_node_scoped(dev, child) {
+ struct ltc2664_chan *chan;
+
+ ret = fwnode_property_read_u32(child, "reg", &reg);
+ if (ret)
+ return dev_err_probe(dev, ret,
+ "Failed to get reg property\n");
+
+ if (reg >= chip_info->num_channels)
+ return dev_err_probe(dev, -EINVAL,
+ "reg bigger than: %d\n",
+ chip_info->num_channels);
+
+ chan = &st->channels[reg];
+
+ if (fwnode_property_read_bool(child, "adi,toggle-mode")) {
+ chan->toggle_chan = true;
+ /* assume sw toggle ABI */
+ st->iio_channels[reg].ext_info = ltc2664_toggle_sym_ext_info;
+
+ /*
+ * Clear IIO_CHAN_INFO_RAW bit as toggle channels expose
+ * out_voltage/current_raw{0|1} files.
+ */
+ __clear_bit(IIO_CHAN_INFO_RAW,
+ &st->iio_channels[reg].info_mask_separate);
+ }
+
+ chan->raw[0] = ltc2664_mspan_lut[mspan][1];
+ chan->raw[1] = ltc2664_mspan_lut[mspan][1];
+
+ chan->span = ltc2664_mspan_lut[mspan][0];
+
+ ret = fwnode_property_read_u32_array(child, "output-range-microvolt",
+ tmp, ARRAY_SIZE(tmp));
+ if (!ret && mspan == LTC2664_MSPAN_SOFTSPAN) {
+ chan->span = ltc2664_set_span(st, tmp[0] / 1000,
+ tmp[1] / 1000, reg);
+ if (span < 0)
+ return dev_err_probe(dev, span,
+ "Failed to set span\n");
+ }
+
+ ret = fwnode_property_read_u32_array(child, "output-range-microamp",
+ tmp, ARRAY_SIZE(tmp));
+ if (!ret) {
+ chan->span = ltc2664_set_span(st, 0, tmp[1] / 1000, reg);
+ if (span < 0)
+ return dev_err_probe(dev, span,
+ "Failed to set span\n");
+ }
+ }
+
+ return 0;
+}
+
+static int ltc2664_setup(struct ltc2664_state *st)
+{
+ const struct ltc2664_chip_info *chip_info = st->chip_info;
+ struct gpio_desc *gpio;
+ int ret, i;
+
+ /* If we have a clr/reset pin, use that to reset the chip. */
+ gpio = devm_gpiod_get_optional(&st->spi->dev, "reset", GPIOD_OUT_HIGH);
+ if (IS_ERR(gpio))
+ return dev_err_probe(&st->spi->dev, PTR_ERR(gpio),
+ "Failed to get reset gpio");
+ if (gpio) {
+ fsleep(1000);
+ gpiod_set_value_cansleep(gpio, 0);
+ }
+
+ /*
+ * Duplicate the default channel configuration as it can change during
+ * @ltc2664_channel_config()
+ */
+ st->iio_channels = devm_kcalloc(&st->spi->dev,
+ chip_info->num_channels,
+ sizeof(struct iio_chan_spec),
+ GFP_KERNEL);
+ if (!st->iio_channels)
+ return -ENOMEM;
+
+ for (i = 0; i < chip_info->num_channels; i++) {
+ st->iio_channels[i] = ltc2664_channel_template;
+ st->iio_channels[i].type = chip_info->measurement_type;
+ st->iio_channels[i].channel = i;
+ }
+
+ ret = ltc2664_channel_config(st);
+ if (ret)
+ return ret;
+
+ return regmap_set_bits(st->regmap, LTC2664_CMD_CONFIG, LTC2664_REF_DISABLE);
+}
+
+static const struct regmap_config ltc2664_regmap_config = {
+ .reg_bits = 8,
+ .val_bits = 16,
+ .max_register = LTC2664_CMD_NO_OPERATION,
+};
+
+static const struct iio_info ltc2664_info = {
+ .write_raw = ltc2664_write_raw,
+ .read_raw = ltc2664_read_raw,
+ .read_avail = ltc2664_read_avail,
+ .debugfs_reg_access = ltc2664_reg_access,
+};
+
+static int ltc2664_probe(struct spi_device *spi)
+{
+ static const char * const regulators[] = { "vcc", "iovcc", "v-neg" };
+ const struct ltc2664_chip_info *chip_info;
+ struct device *dev = &spi->dev;
+ struct iio_dev *indio_dev;
+ struct ltc2664_state *st;
+ int ret;
+
+ indio_dev = devm_iio_device_alloc(dev, sizeof(*st));
+ if (!indio_dev)
+ return -ENOMEM;
+
+ st = iio_priv(indio_dev);
+ st->spi = spi;
+
+ chip_info = spi_get_device_match_data(spi);
+ if (!chip_info)
+ return -ENODEV;
+
+ st->chip_info = chip_info;
+
+ mutex_init(&st->lock);
+
+ st->regmap = devm_regmap_init_spi(spi, &ltc2664_regmap_config);
+ if (IS_ERR(st->regmap))
+ return dev_err_probe(dev, PTR_ERR(st->regmap),
+ "Failed to init regmap");
+
+ ret = devm_regulator_bulk_get_enable(dev, ARRAY_SIZE(regulators),
+ regulators);
+ if (ret)
+ return dev_err_probe(dev, ret, "Failed to enable regulators\n");
+
+ ret = devm_regulator_get_enable_read_voltage(dev, "ref");
+ if (ret < 0 && ret != -ENODEV)
+ return ret;
+
+ st->vref_mv = ret > 0 ? ret / 1000 : chip_info->internal_vref_mv;
+
+ ret = ltc2664_setup(st);
+ if (ret)
+ return ret;
+
+ indio_dev->name = chip_info->name;
+ indio_dev->info = &ltc2664_info;
+ indio_dev->modes = INDIO_DIRECT_MODE;
+ indio_dev->channels = st->iio_channels;
+ indio_dev->num_channels = chip_info->num_channels;
+
+ return devm_iio_device_register(dev, indio_dev);
+}
+
+static const struct spi_device_id ltc2664_id[] = {
+ { "ltc2664", (kernel_ulong_t)&ltc2664_chip },
+ { "ltc2672", (kernel_ulong_t)&ltc2672_chip },
+ { }
+};
+MODULE_DEVICE_TABLE(spi, ltc2664_id);
+
+static const struct of_device_id ltc2664_of_id[] = {
+ { .compatible = "adi,ltc2664", .data = &ltc2664_chip },
+ { .compatible = "adi,ltc2672", .data = &ltc2672_chip },
+ { }
+};
+MODULE_DEVICE_TABLE(of, ltc2664_of_id);
+
+static struct spi_driver ltc2664_driver = {
+ .driver = {
+ .name = "ltc2664",
+ .of_match_table = ltc2664_of_id,
+ },
+ .probe = ltc2664_probe,
+ .id_table = ltc2664_id,
+};
+module_spi_driver(ltc2664_driver);
+
+MODULE_AUTHOR("Michael Hennerich <michael.hennerich@analog.com>");
+MODULE_AUTHOR("Kim Seer Paller <kimseer.paller@analog.com>");
+MODULE_DESCRIPTION("Analog Devices LTC2664 and LTC2672 DAC");
+MODULE_LICENSE("GPL");
diff --git a/drivers/iio/dac/ltc2688.c b/drivers/iio/dac/ltc2688.c
index af50d2a95898..376dca163c91 100644
--- a/drivers/iio/dac/ltc2688.c
+++ b/drivers/iio/dac/ltc2688.c
@@ -918,7 +918,7 @@ static bool ltc2688_reg_writable(struct device *dev, unsigned int reg)
return false;
}
-static struct regmap_bus ltc2688_regmap_bus = {
+static const struct regmap_bus ltc2688_regmap_bus = {
.read = ltc2688_spi_read,
.write = ltc2688_spi_write,
.read_flag_mask = LTC2688_READ_OPERATION,
diff --git a/drivers/iio/dac/mcp4728.c b/drivers/iio/dac/mcp4728.c
index c449ca949465..192175dc6419 100644
--- a/drivers/iio/dac/mcp4728.c
+++ b/drivers/iio/dac/mcp4728.c
@@ -84,7 +84,6 @@ enum mcp4728_scale {
struct mcp4728_data {
struct i2c_client *client;
- struct regulator *vdd_reg;
bool powerdown;
int scales_avail[MCP4728_N_SCALES * 2];
struct mcp4728_channel_data chdata[MCP4728_N_CHANNELS];
@@ -415,15 +414,9 @@ static void mcp4728_init_scale_avail(enum mcp4728_scale scale, int vref_mv,
data->scales_avail[scale * 2 + 1] = value_micro;
}
-static int mcp4728_init_scales_avail(struct mcp4728_data *data)
+static int mcp4728_init_scales_avail(struct mcp4728_data *data, int vdd_mv)
{
- int ret;
-
- ret = regulator_get_voltage(data->vdd_reg);
- if (ret < 0)
- return ret;
-
- mcp4728_init_scale_avail(MCP4728_SCALE_VDD, ret / 1000, data);
+ mcp4728_init_scale_avail(MCP4728_SCALE_VDD, vdd_mv, data);
mcp4728_init_scale_avail(MCP4728_SCALE_VINT_NO_GAIN, 2048, data);
mcp4728_init_scale_avail(MCP4728_SCALE_VINT_GAIN_X2, 4096, data);
@@ -530,17 +523,12 @@ static int mcp4728_init_channels_data(struct mcp4728_data *data)
return 0;
}
-static void mcp4728_reg_disable(void *reg)
-{
- regulator_disable(reg);
-}
-
static int mcp4728_probe(struct i2c_client *client)
{
const struct i2c_device_id *id = i2c_client_get_device_id(client);
struct mcp4728_data *data;
struct iio_dev *indio_dev;
- int err;
+ int ret, vdd_mv;
indio_dev = devm_iio_device_alloc(&client->dev, sizeof(*data));
if (!indio_dev)
@@ -550,18 +538,11 @@ static int mcp4728_probe(struct i2c_client *client)
i2c_set_clientdata(client, indio_dev);
data->client = client;
- data->vdd_reg = devm_regulator_get(&client->dev, "vdd");
- if (IS_ERR(data->vdd_reg))
- return PTR_ERR(data->vdd_reg);
-
- err = regulator_enable(data->vdd_reg);
- if (err)
- return err;
+ ret = devm_regulator_get_enable_read_voltage(&client->dev, "vdd");
+ if (ret < 0)
+ return ret;
- err = devm_add_action_or_reset(&client->dev, mcp4728_reg_disable,
- data->vdd_reg);
- if (err)
- return err;
+ vdd_mv = ret / 1000;
/*
* MCP4728 has internal EEPROM that save each channel boot
@@ -569,15 +550,15 @@ static int mcp4728_probe(struct i2c_client *client)
* driver at kernel boot. mcp4728_init_channels_data() reads back DAC
* settings and stores them in data structure.
*/
- err = mcp4728_init_channels_data(data);
- if (err) {
- return dev_err_probe(&client->dev, err,
+ ret = mcp4728_init_channels_data(data);
+ if (ret) {
+ return dev_err_probe(&client->dev, ret,
"failed to read mcp4728 current configuration\n");
}
- err = mcp4728_init_scales_avail(data);
- if (err) {
- return dev_err_probe(&client->dev, err,
+ ret = mcp4728_init_scales_avail(data, vdd_mv);
+ if (ret) {
+ return dev_err_probe(&client->dev, ret,
"failed to init scales\n");
}
diff --git a/drivers/iio/dac/mcp4922.c b/drivers/iio/dac/mcp4922.c
index da4327624d45..26aa99059813 100644
--- a/drivers/iio/dac/mcp4922.c
+++ b/drivers/iio/dac/mcp4922.c
@@ -30,7 +30,6 @@ struct mcp4922_state {
struct spi_device *spi;
unsigned int value[MCP4922_NUM_CHANNELS];
unsigned int vref_mv;
- struct regulator *vref_reg;
u8 mosi[2] __aligned(IIO_DMA_MINALIGN);
};
@@ -132,27 +131,13 @@ static int mcp4922_probe(struct spi_device *spi)
state = iio_priv(indio_dev);
state->spi = spi;
- state->vref_reg = devm_regulator_get(&spi->dev, "vref");
- if (IS_ERR(state->vref_reg))
- return dev_err_probe(&spi->dev, PTR_ERR(state->vref_reg),
- "Vref regulator not specified\n");
-
- ret = regulator_enable(state->vref_reg);
- if (ret) {
- dev_err(&spi->dev, "Failed to enable vref regulator: %d\n",
- ret);
- return ret;
- }
- ret = regulator_get_voltage(state->vref_reg);
- if (ret < 0) {
- dev_err(&spi->dev, "Failed to read vref regulator: %d\n",
- ret);
- goto error_disable_reg;
- }
+ ret = devm_regulator_get_enable_read_voltage(&spi->dev, "vref");
+ if (ret < 0)
+ return dev_err_probe(&spi->dev, ret, "Failed to get vref voltage\n");
+
state->vref_mv = ret / 1000;
- spi_set_drvdata(spi, indio_dev);
id = spi_get_device_id(spi);
indio_dev->info = &mcp4922_info;
indio_dev->modes = INDIO_DIRECT_MODE;
@@ -163,30 +148,13 @@ static int mcp4922_probe(struct spi_device *spi)
indio_dev->num_channels = MCP4922_NUM_CHANNELS;
indio_dev->name = id->name;
- ret = iio_device_register(indio_dev);
- if (ret) {
- dev_err(&spi->dev, "Failed to register iio device: %d\n",
- ret);
- goto error_disable_reg;
- }
+ ret = devm_iio_device_register(&spi->dev, indio_dev);
+ if (ret)
+ return dev_err_probe(&spi->dev, ret, "Failed to register iio device\n");
return 0;
-
-error_disable_reg:
- regulator_disable(state->vref_reg);
-
- return ret;
}
-static void mcp4922_remove(struct spi_device *spi)
-{
- struct iio_dev *indio_dev = spi_get_drvdata(spi);
- struct mcp4922_state *state;
-
- iio_device_unregister(indio_dev);
- state = iio_priv(indio_dev);
- regulator_disable(state->vref_reg);
-}
static const struct spi_device_id mcp4922_id[] = {
{"mcp4902", ID_MCP4902},
@@ -202,7 +170,6 @@ static struct spi_driver mcp4922_driver = {
.name = "mcp4922",
},
.probe = mcp4922_probe,
- .remove = mcp4922_remove,
.id_table = mcp4922_id,
};
module_spi_driver(mcp4922_driver);
diff --git a/drivers/iio/dac/ti-dac7311.c b/drivers/iio/dac/ti-dac7311.c
index 7f89d2a52f49..6f4aa4794a0c 100644
--- a/drivers/iio/dac/ti-dac7311.c
+++ b/drivers/iio/dac/ti-dac7311.c
@@ -249,7 +249,9 @@ static int ti_dac_probe(struct spi_device *spi)
spi->mode = SPI_MODE_1;
spi->bits_per_word = 16;
- spi_setup(spi);
+ ret = spi_setup(spi);
+ if (ret < 0)
+ return dev_err_probe(dev, ret, "spi_setup failed\n");
indio_dev->info = &ti_dac_info;
indio_dev->name = spi_get_device_id(spi)->name;
diff --git a/drivers/iio/dummy/iio_simple_dummy_buffer.c b/drivers/iio/dummy/iio_simple_dummy_buffer.c
index 9b2f99449a82..4ca3f1aaff99 100644
--- a/drivers/iio/dummy/iio_simple_dummy_buffer.c
+++ b/drivers/iio/dummy/iio_simple_dummy_buffer.c
@@ -68,7 +68,7 @@ static irqreturn_t iio_simple_dummy_trigger_h(int irq, void *p)
* Here let's pretend we have random access. And the values are in the
* constant table fakedata.
*/
- for_each_set_bit(j, indio_dev->active_scan_mask, indio_dev->masklength)
+ iio_for_each_active_channel(indio_dev, j)
data[i++] = fakedata[j];
iio_push_to_buffers_with_timestamp(indio_dev, data,
diff --git a/drivers/iio/frequency/adf4377.c b/drivers/iio/frequency/adf4377.c
index 9284c13f1abb..25fbc2cef1f7 100644
--- a/drivers/iio/frequency/adf4377.c
+++ b/drivers/iio/frequency/adf4377.c
@@ -400,7 +400,13 @@ enum muxout_select_mode {
ADF4377_MUXOUT_HIGH = 0x8,
};
+struct adf4377_chip_info {
+ const char *name;
+ bool has_gpio_enclk2;
+};
+
struct adf4377_state {
+ const struct adf4377_chip_info *chip_info;
struct spi_device *spi;
struct regmap *regmap;
struct clk *clkin;
@@ -889,11 +895,13 @@ static int adf4377_properties_parse(struct adf4377_state *st)
return dev_err_probe(&spi->dev, PTR_ERR(st->gpio_enclk1),
"failed to get the CE GPIO\n");
- st->gpio_enclk2 = devm_gpiod_get_optional(&st->spi->dev, "clk2-enable",
- GPIOD_OUT_LOW);
- if (IS_ERR(st->gpio_enclk2))
- return dev_err_probe(&spi->dev, PTR_ERR(st->gpio_enclk2),
- "failed to get the CE GPIO\n");
+ if (st->chip_info->has_gpio_enclk2) {
+ st->gpio_enclk2 = devm_gpiod_get_optional(&st->spi->dev, "clk2-enable",
+ GPIOD_OUT_LOW);
+ if (IS_ERR(st->gpio_enclk2))
+ return dev_err_probe(&spi->dev, PTR_ERR(st->gpio_enclk2),
+ "failed to get the CE GPIO\n");
+ }
ret = device_property_match_property_string(&spi->dev, "adi,muxout-select",
adf4377_muxout_modes,
@@ -921,6 +929,16 @@ static int adf4377_freq_change(struct notifier_block *nb, unsigned long action,
return NOTIFY_OK;
}
+static const struct adf4377_chip_info adf4377_chip_info = {
+ .name = "adf4377",
+ .has_gpio_enclk2 = true,
+};
+
+static const struct adf4377_chip_info adf4378_chip_info = {
+ .name = "adf4378",
+ .has_gpio_enclk2 = false,
+};
+
static int adf4377_probe(struct spi_device *spi)
{
struct iio_dev *indio_dev;
@@ -945,6 +963,7 @@ static int adf4377_probe(struct spi_device *spi)
st->regmap = regmap;
st->spi = spi;
+ st->chip_info = spi_get_device_match_data(spi);
mutex_init(&st->lock);
ret = adf4377_properties_parse(st);
@@ -964,13 +983,15 @@ static int adf4377_probe(struct spi_device *spi)
}
static const struct spi_device_id adf4377_id[] = {
- { "adf4377", 0 },
+ { "adf4377", (kernel_ulong_t)&adf4377_chip_info },
+ { "adf4378", (kernel_ulong_t)&adf4378_chip_info },
{}
};
MODULE_DEVICE_TABLE(spi, adf4377_id);
static const struct of_device_id adf4377_of_match[] = {
- { .compatible = "adi,adf4377" },
+ { .compatible = "adi,adf4377", .data = &adf4377_chip_info },
+ { .compatible = "adi,adf4378", .data = &adf4378_chip_info },
{}
};
MODULE_DEVICE_TABLE(of, adf4377_of_match);
diff --git a/drivers/iio/health/afe4403.c b/drivers/iio/health/afe4403.c
index 52326dc521ac..85637e8ac45f 100644
--- a/drivers/iio/health/afe4403.c
+++ b/drivers/iio/health/afe4403.c
@@ -321,8 +321,7 @@ static irqreturn_t afe4403_trigger_handler(int irq, void *private)
if (ret)
goto err;
- for_each_set_bit(bit, indio_dev->active_scan_mask,
- indio_dev->masklength) {
+ iio_for_each_active_channel(indio_dev, bit) {
ret = spi_write_then_read(afe->spi,
&afe4403_channel_values[bit], 1,
rx, sizeof(rx));
diff --git a/drivers/iio/health/afe4404.c b/drivers/iio/health/afe4404.c
index 7f69baa1ed53..d49e1572a439 100644
--- a/drivers/iio/health/afe4404.c
+++ b/drivers/iio/health/afe4404.c
@@ -333,8 +333,7 @@ static irqreturn_t afe4404_trigger_handler(int irq, void *private)
struct afe4404_data *afe = iio_priv(indio_dev);
int ret, bit, i = 0;
- for_each_set_bit(bit, indio_dev->active_scan_mask,
- indio_dev->masklength) {
+ iio_for_each_active_channel(indio_dev, bit) {
ret = regmap_read(afe->regmap, afe4404_channel_values[bit],
&afe->buffer[i++]);
if (ret)
diff --git a/drivers/iio/health/max30102.c b/drivers/iio/health/max30102.c
index 07a343e35a81..1d074eb6a8c5 100644
--- a/drivers/iio/health/max30102.c
+++ b/drivers/iio/health/max30102.c
@@ -293,7 +293,7 @@ static irqreturn_t max30102_interrupt_handler(int irq, void *private)
struct iio_dev *indio_dev = private;
struct max30102_data *data = iio_priv(indio_dev);
unsigned int measurements = bitmap_weight(indio_dev->active_scan_mask,
- indio_dev->masklength);
+ iio_get_masklength(indio_dev));
int ret, cnt = 0;
mutex_lock(&data->lock);
diff --git a/drivers/iio/humidity/Kconfig b/drivers/iio/humidity/Kconfig
index b15b7a3b66d5..54f11f000b6f 100644
--- a/drivers/iio/humidity/Kconfig
+++ b/drivers/iio/humidity/Kconfig
@@ -25,6 +25,17 @@ config DHT11
Other sensors should work as well as long as they speak the
same protocol.
+config ENS210
+ tristate "ENS210 temperature and humidity sensor"
+ depends on I2C
+ select CRC7
+ help
+ Say yes here to get support for the ScioSense ENS210 family of
+ humidity and temperature sensors.
+
+ This driver can also be built as a module. If so, the module will be
+ called ens210.
+
config HDC100X
tristate "TI HDC100x relative humidity and temperature sensor"
depends on I2C
diff --git a/drivers/iio/humidity/Makefile b/drivers/iio/humidity/Makefile
index 5fbeef299f61..34b3dc749466 100644
--- a/drivers/iio/humidity/Makefile
+++ b/drivers/iio/humidity/Makefile
@@ -5,6 +5,7 @@
obj-$(CONFIG_AM2315) += am2315.o
obj-$(CONFIG_DHT11) += dht11.o
+obj-$(CONFIG_ENS210) += ens210.o
obj-$(CONFIG_HDC100X) += hdc100x.o
obj-$(CONFIG_HDC2010) += hdc2010.o
obj-$(CONFIG_HDC3020) += hdc3020.o
diff --git a/drivers/iio/humidity/am2315.c b/drivers/iio/humidity/am2315.c
index a56474be5dd2..6b0aa3a3f025 100644
--- a/drivers/iio/humidity/am2315.c
+++ b/drivers/iio/humidity/am2315.c
@@ -174,8 +174,7 @@ static irqreturn_t am2315_trigger_handler(int irq, void *p)
data->scan.chans[1] = sensor_data.temp_data;
} else {
i = 0;
- for_each_set_bit(bit, indio_dev->active_scan_mask,
- indio_dev->masklength) {
+ iio_for_each_active_channel(indio_dev, bit) {
data->scan.chans[i] = (bit ? sensor_data.temp_data :
sensor_data.hum_data);
i++;
diff --git a/drivers/iio/humidity/ens210.c b/drivers/iio/humidity/ens210.c
new file mode 100644
index 000000000000..e9167574203a
--- /dev/null
+++ b/drivers/iio/humidity/ens210.c
@@ -0,0 +1,339 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * ens210.c - Support for ScioSense ens210 temperature & humidity sensor family
+ *
+ * (7-bit I2C slave address 0x43 ENS210)
+ * (7-bit I2C slave address 0x43 ENS210A)
+ * (7-bit I2C slave address 0x44 ENS211)
+ * (7-bit I2C slave address 0x45 ENS212)
+ * (7-bit I2C slave address 0x46 ENS213A)
+ * (7-bit I2C slave address 0x47 ENS215)
+ *
+ * Datasheet:
+ * https://www.sciosense.com/wp-content/uploads/2024/04/ENS21x-Datasheet.pdf
+ * https://www.sciosense.com/wp-content/uploads/2023/12/ENS210-Datasheet.pdf
+ */
+
+#include <linux/crc7.h>
+#include <linux/delay.h>
+#include <linux/i2c.h>
+#include <linux/iio/iio.h>
+#include <linux/mod_devicetable.h>
+#include <linux/module.h>
+#include <linux/types.h>
+
+#include <asm/unaligned.h>
+
+/* register definitions */
+#define ENS210_REG_PART_ID 0x00
+#define ENS210_REG_DIE_REV 0x02
+#define ENS210_REG_UID 0x04
+#define ENS210_REG_SYS_CTRL 0x10
+#define ENS210_REG_SYS_STAT 0x11
+#define ENS210_REG_SENS_RUN 0x21
+#define ENS210_REG_SENS_START 0x22
+#define ENS210_REG_SENS_STOP 0x23
+#define ENS210_REG_SENS_STAT 0x24
+#define ENS210_REG_T_VAL 0x30
+#define ENS210_REG_H_VAL 0x33
+
+/* value definitions */
+#define ENS210_SENS_START_T_START BIT(0)
+#define ENS210_SENS_START_H_START BIT(1)
+
+#define ENS210_SENS_STAT_T_ACTIVE BIT(0)
+#define ENS210_SENS_STAT_H_ACTIVE BIT(1)
+
+#define ENS210_SYS_CTRL_LOW_POWER_ENABLE BIT(0)
+#define ENS210_SYS_CTRL_SYS_RESET BIT(7)
+
+#define ENS210_SYS_STAT_SYS_ACTIVE BIT(0)
+
+enum ens210_partnumber {
+ ENS210 = 0x0210,
+ ENS210A = 0xa210,
+ ENS211 = 0x0211,
+ ENS212 = 0x0212,
+ ENS213A = 0xa213,
+ ENS215 = 0x0215,
+};
+
+/**
+ * struct ens210_chip_info - Humidity/Temperature chip specific information
+ * @name: name of device
+ * @part_id: chip identifier
+ * @conv_time_msec: time for conversion calculation in m/s
+ */
+struct ens210_chip_info {
+ const char *name;
+ enum ens210_partnumber part_id;
+ unsigned int conv_time_msec;
+};
+
+/**
+ * struct ens210_data - Humidity/Temperature sensor device structure
+ * @client: i2c client
+ * @chip_info: chip specific information
+ * @lock: lock protecting against simultaneous callers of get_measurement
+ * since multiple uninterrupted transactions are required
+ */
+struct ens210_data {
+ struct i2c_client *client;
+ const struct ens210_chip_info *chip_info;
+ struct mutex lock;
+};
+
+/* calculate 17-bit crc7 */
+static u8 ens210_crc7(u32 val)
+{
+ unsigned int val_be = (val & 0x1ffff) >> 0x8;
+
+ return crc7_be(0xde, (u8 *)&val_be, 3) >> 1;
+}
+
+static int ens210_get_measurement(struct iio_dev *indio_dev, bool temp, int *val)
+{
+ struct ens210_data *data = iio_priv(indio_dev);
+ struct device *dev = &data->client->dev;
+ u32 regval;
+ u8 regval_le[3];
+ int ret;
+
+ /* assert read */
+ ret = i2c_smbus_write_byte_data(data->client, ENS210_REG_SENS_START,
+ temp ? ENS210_SENS_START_T_START :
+ ENS210_SENS_START_H_START);
+ if (ret)
+ return ret;
+
+ /* wait for conversion to be ready */
+ msleep(data->chip_info->conv_time_msec);
+
+ ret = i2c_smbus_read_byte_data(data->client, ENS210_REG_SENS_STAT);
+ if (ret < 0)
+ return ret;
+
+ /* perform read */
+ ret = i2c_smbus_read_i2c_block_data(
+ data->client, temp ? ENS210_REG_T_VAL : ENS210_REG_H_VAL, 3,
+ regval_le);
+ if (ret < 0) {
+ dev_err(dev, "failed to read register");
+ return -EIO;
+ }
+ if (ret != 3) {
+ dev_err(dev, "expected 3 bytes, received %d\n", ret);
+ return -EIO;
+ }
+
+ regval = get_unaligned_le24(regval_le);
+ if (ens210_crc7(regval) != ((regval >> 17) & 0x7f)) {
+ dev_err(dev, "invalid crc\n");
+ return -EIO;
+ }
+
+ if (!((regval >> 16) & 0x1)) {
+ dev_err(dev, "data is not valid");
+ return -EIO;
+ }
+
+ *val = regval & GENMASK(15, 0);
+ return IIO_VAL_INT;
+}
+
+static int ens210_read_raw(struct iio_dev *indio_dev,
+ struct iio_chan_spec const *channel, int *val,
+ int *val2, long mask)
+{
+ struct ens210_data *data = iio_priv(indio_dev);
+ int ret;
+
+ switch (mask) {
+ case IIO_CHAN_INFO_RAW:
+ scoped_guard(mutex, &data->lock) {
+ ret = ens210_get_measurement(
+ indio_dev, channel->type == IIO_TEMP, val);
+ if (ret)
+ return ret;
+ return IIO_VAL_INT;
+ }
+ return -EINVAL; /* compiler warning workaround */
+ case IIO_CHAN_INFO_SCALE:
+ if (channel->type == IIO_TEMP) {
+ *val = 15;
+ *val2 = 625000;
+ } else {
+ *val = 1;
+ *val2 = 953125;
+ }
+ return IIO_VAL_INT_PLUS_MICRO;
+ case IIO_CHAN_INFO_OFFSET:
+ *val = -17481;
+ *val2 = 600000;
+ return IIO_VAL_INT_PLUS_MICRO;
+ default:
+ return -EINVAL;
+ }
+}
+
+static const struct iio_chan_spec ens210_channels[] = {
+ {
+ .type = IIO_TEMP,
+ .info_mask_separate = BIT(IIO_CHAN_INFO_RAW) |
+ BIT(IIO_CHAN_INFO_SCALE) |
+ BIT(IIO_CHAN_INFO_OFFSET),
+ },
+ {
+ .type = IIO_HUMIDITYRELATIVE,
+ .info_mask_separate = BIT(IIO_CHAN_INFO_RAW) |
+ BIT(IIO_CHAN_INFO_SCALE),
+ }
+};
+
+static const struct iio_info ens210_info = {
+ .read_raw = ens210_read_raw,
+};
+
+static int ens210_probe(struct i2c_client *client)
+{
+ struct ens210_data *data;
+ struct iio_dev *indio_dev;
+ uint16_t part_id;
+ int ret;
+
+ if (!i2c_check_functionality(client->adapter,
+ I2C_FUNC_SMBUS_WRITE_BYTE_DATA |
+ I2C_FUNC_SMBUS_WRITE_BYTE |
+ I2C_FUNC_SMBUS_READ_I2C_BLOCK)) {
+ return dev_err_probe(&client->dev, -EOPNOTSUPP,
+ "adapter does not support some i2c transactions\n");
+ }
+
+ indio_dev = devm_iio_device_alloc(&client->dev, sizeof(*data));
+ if (!indio_dev)
+ return -ENOMEM;
+
+ data = iio_priv(indio_dev);
+ data->client = client;
+ mutex_init(&data->lock);
+ data->chip_info = i2c_get_match_data(client);
+
+ ret = devm_regulator_get_enable(&client->dev, "vdd");
+ if (ret)
+ return ret;
+
+ /* reset device */
+ ret = i2c_smbus_write_byte_data(client, ENS210_REG_SYS_CTRL,
+ ENS210_SYS_CTRL_SYS_RESET);
+ if (ret)
+ return ret;
+
+ /* wait for device to become active */
+ usleep_range(4000, 5000);
+
+ /* disable low power mode */
+ ret = i2c_smbus_write_byte_data(client, ENS210_REG_SYS_CTRL, 0x00);
+ if (ret)
+ return ret;
+
+ /* wait for device to finish */
+ usleep_range(4000, 5000);
+
+ /* get part_id */
+ ret = i2c_smbus_read_word_data(client, ENS210_REG_PART_ID);
+ if (ret < 0)
+ return ret;
+ part_id = ret;
+
+ if (part_id != data->chip_info->part_id) {
+ dev_info(&client->dev,
+ "Part ID does not match (0x%04x != 0x%04x)\n", part_id,
+ data->chip_info->part_id);
+ }
+
+ /* reenable low power */
+ ret = i2c_smbus_write_byte_data(client, ENS210_REG_SYS_CTRL,
+ ENS210_SYS_CTRL_LOW_POWER_ENABLE);
+ if (ret)
+ return ret;
+
+ indio_dev->name = data->chip_info->name;
+ indio_dev->modes = INDIO_DIRECT_MODE;
+ indio_dev->channels = ens210_channels;
+ indio_dev->num_channels = ARRAY_SIZE(ens210_channels);
+ indio_dev->info = &ens210_info;
+
+ return devm_iio_device_register(&client->dev, indio_dev);
+}
+
+static const struct ens210_chip_info ens210_chip_info_data = {
+ .name = "ens210",
+ .part_id = ENS210,
+ .conv_time_msec = 130,
+};
+
+static const struct ens210_chip_info ens210a_chip_info_data = {
+ .name = "ens210a",
+ .part_id = ENS210A,
+ .conv_time_msec = 130,
+};
+
+static const struct ens210_chip_info ens211_chip_info_data = {
+ .name = "ens211",
+ .part_id = ENS211,
+ .conv_time_msec = 32,
+};
+
+static const struct ens210_chip_info ens212_chip_info_data = {
+ .name = "ens212",
+ .part_id = ENS212,
+ .conv_time_msec = 32,
+};
+
+static const struct ens210_chip_info ens213a_chip_info_data = {
+ .name = "ens213a",
+ .part_id = ENS213A,
+ .conv_time_msec = 130,
+};
+
+static const struct ens210_chip_info ens215_chip_info_data = {
+ .name = "ens215",
+ .part_id = ENS215,
+ .conv_time_msec = 130,
+};
+
+static const struct of_device_id ens210_of_match[] = {
+ { .compatible = "sciosense,ens210", .data = &ens210_chip_info_data },
+ { .compatible = "sciosense,ens210a", .data = &ens210a_chip_info_data },
+ { .compatible = "sciosense,ens211", .data = &ens211_chip_info_data },
+ { .compatible = "sciosense,ens212", .data = &ens212_chip_info_data },
+ { .compatible = "sciosense,ens213a", .data = &ens213a_chip_info_data },
+ { .compatible = "sciosense,ens215", .data = &ens215_chip_info_data },
+ { }
+};
+MODULE_DEVICE_TABLE(of, ens210_of_match);
+
+static const struct i2c_device_id ens210_id_table[] = {
+ { "ens210", (kernel_ulong_t)&ens210_chip_info_data },
+ { "ens210a", (kernel_ulong_t)&ens210a_chip_info_data },
+ { "ens211", (kernel_ulong_t)&ens211_chip_info_data },
+ { "ens212", (kernel_ulong_t)&ens212_chip_info_data },
+ { "ens213a", (kernel_ulong_t)&ens213a_chip_info_data },
+ { "ens215", (kernel_ulong_t)&ens215_chip_info_data },
+ { }
+};
+MODULE_DEVICE_TABLE(i2c, ens210_id_table);
+
+static struct i2c_driver ens210_driver = {
+ .probe = ens210_probe,
+ .id_table = ens210_id_table,
+ .driver = {
+ .name = "ens210",
+ .of_match_table = ens210_of_match,
+ },
+};
+module_i2c_driver(ens210_driver);
+
+MODULE_DESCRIPTION("ScioSense ENS210 temperature and humidity sensor driver");
+MODULE_AUTHOR("Joshua Felmeden <jfelmeden@thegoodpenguin.co.uk>");
+MODULE_LICENSE("GPL");
diff --git a/drivers/iio/imu/adis16400.c b/drivers/iio/imu/adis16400.c
index 0bfd6205f5f6..6484ab8aff55 100644
--- a/drivers/iio/imu/adis16400.c
+++ b/drivers/iio/imu/adis16400.c
@@ -202,8 +202,6 @@ enum {
ADIS16400_SCAN_TIMESTAMP,
};
-#ifdef CONFIG_DEBUG_FS
-
static ssize_t adis16400_show_serial_number(struct file *file,
char __user *userbuf, size_t count, loff_t *ppos)
{
@@ -273,11 +271,14 @@ static int adis16400_show_flash_count(void *arg, u64 *val)
DEFINE_DEBUGFS_ATTRIBUTE(adis16400_flash_count_fops,
adis16400_show_flash_count, NULL, "%lld\n");
-static int adis16400_debugfs_init(struct iio_dev *indio_dev)
+static void adis16400_debugfs_init(struct iio_dev *indio_dev)
{
struct adis16400_state *st = iio_priv(indio_dev);
struct dentry *d = iio_get_debugfs_dentry(indio_dev);
+ if (!IS_ENABLED(CONFIG_DEBUG_FS))
+ return;
+
if (st->variant->flags & ADIS16400_HAS_SERIAL_NUMBER)
debugfs_create_file_unsafe("serial_number", 0400,
d, st, &adis16400_serial_number_fops);
@@ -286,19 +287,8 @@ static int adis16400_debugfs_init(struct iio_dev *indio_dev)
d, st, &adis16400_product_id_fops);
debugfs_create_file_unsafe("flash_count", 0400,
d, st, &adis16400_flash_count_fops);
-
- return 0;
}
-#else
-
-static int adis16400_debugfs_init(struct iio_dev *indio_dev)
-{
- return 0;
-}
-
-#endif
-
enum adis16400_chip_variant {
ADIS16300,
ADIS16334,
diff --git a/drivers/iio/imu/adis16460.c b/drivers/iio/imu/adis16460.c
index 69facd72bd7d..eaa38dd6201f 100644
--- a/drivers/iio/imu/adis16460.c
+++ b/drivers/iio/imu/adis16460.c
@@ -69,8 +69,6 @@ struct adis16460 {
struct adis adis;
};
-#ifdef CONFIG_DEBUG_FS
-
static int adis16460_show_serial_number(void *arg, u64 *val)
{
struct adis16460 *adis16460 = arg;
@@ -125,30 +123,22 @@ static int adis16460_show_flash_count(void *arg, u64 *val)
DEFINE_DEBUGFS_ATTRIBUTE(adis16460_flash_count_fops,
adis16460_show_flash_count, NULL, "%lld\n");
-static int adis16460_debugfs_init(struct iio_dev *indio_dev)
+static void adis16460_debugfs_init(struct iio_dev *indio_dev)
{
struct adis16460 *adis16460 = iio_priv(indio_dev);
struct dentry *d = iio_get_debugfs_dentry(indio_dev);
+ if (!IS_ENABLED(CONFIG_DEBUG_FS))
+ return;
+
debugfs_create_file_unsafe("serial_number", 0400,
d, adis16460, &adis16460_serial_number_fops);
debugfs_create_file_unsafe("product_id", 0400,
d, adis16460, &adis16460_product_id_fops);
debugfs_create_file_unsafe("flash_count", 0400,
d, adis16460, &adis16460_flash_count_fops);
-
- return 0;
-}
-
-#else
-
-static int adis16460_debugfs_init(struct iio_dev *indio_dev)
-{
- return 0;
}
-#endif
-
static int adis16460_set_freq(struct iio_dev *indio_dev, int val, int val2)
{
struct adis16460 *st = iio_priv(indio_dev);
diff --git a/drivers/iio/imu/adis16475.c b/drivers/iio/imu/adis16475.c
index 482258ed5a3c..88efe728b61b 100644
--- a/drivers/iio/imu/adis16475.c
+++ b/drivers/iio/imu/adis16475.c
@@ -164,7 +164,6 @@ module_param(low_rate_allow, bool, 0444);
MODULE_PARM_DESC(low_rate_allow,
"Allow IMU rates below the minimum advisable when external clk is used in SCALED mode (default: N)");
-#ifdef CONFIG_DEBUG_FS
static ssize_t adis16475_show_firmware_revision(struct file *file,
char __user *userbuf,
size_t count, loff_t *ppos)
@@ -279,6 +278,9 @@ static void adis16475_debugfs_init(struct iio_dev *indio_dev)
struct adis16475 *st = iio_priv(indio_dev);
struct dentry *d = iio_get_debugfs_dentry(indio_dev);
+ if (!IS_ENABLED(CONFIG_DEBUG_FS))
+ return;
+
debugfs_create_file_unsafe("serial_number", 0400,
d, st, &adis16475_serial_number_fops);
debugfs_create_file_unsafe("product_id", 0400,
@@ -290,11 +292,6 @@ static void adis16475_debugfs_init(struct iio_dev *indio_dev)
debugfs_create_file("firmware_date", 0400, d,
st, &adis16475_firmware_date_fops);
}
-#else
-static void adis16475_debugfs_init(struct iio_dev *indio_dev)
-{
-}
-#endif
static int adis16475_get_freq(struct adis16475 *st, u32 *freq)
{
@@ -1593,8 +1590,7 @@ static int adis16475_push_single_sample(struct iio_poll_func *pf)
return -EINVAL;
}
- for_each_set_bit(bit, indio_dev->active_scan_mask,
- indio_dev->masklength) {
+ iio_for_each_active_channel(indio_dev, bit) {
/*
* When burst mode is used, system flags is the first data
* channel in the sequence, but the scan index is 7.
diff --git a/drivers/iio/imu/adis16480.c b/drivers/iio/imu/adis16480.c
index c59ef6f7cfd4..294181f2fcb3 100644
--- a/drivers/iio/imu/adis16480.c
+++ b/drivers/iio/imu/adis16480.c
@@ -193,8 +193,6 @@ module_param(low_rate_allow, bool, 0444);
MODULE_PARM_DESC(low_rate_allow,
"Allow IMU rates below the minimum advisable when external clk is used in PPS mode (default: N)");
-#ifdef CONFIG_DEBUG_FS
-
static ssize_t adis16480_show_firmware_revision(struct file *file,
char __user *userbuf, size_t count, loff_t *ppos)
{
@@ -304,11 +302,14 @@ static int adis16480_show_flash_count(void *arg, u64 *val)
DEFINE_DEBUGFS_ATTRIBUTE(adis16480_flash_count_fops,
adis16480_show_flash_count, NULL, "%lld\n");
-static int adis16480_debugfs_init(struct iio_dev *indio_dev)
+static void adis16480_debugfs_init(struct iio_dev *indio_dev)
{
struct adis16480 *adis16480 = iio_priv(indio_dev);
struct dentry *d = iio_get_debugfs_dentry(indio_dev);
+ if (!IS_ENABLED(CONFIG_DEBUG_FS))
+ return;
+
debugfs_create_file_unsafe("firmware_revision", 0400,
d, adis16480, &adis16480_firmware_revision_fops);
debugfs_create_file_unsafe("firmware_date", 0400,
@@ -319,19 +320,8 @@ static int adis16480_debugfs_init(struct iio_dev *indio_dev)
d, adis16480, &adis16480_product_id_fops);
debugfs_create_file_unsafe("flash_count", 0400,
d, adis16480, &adis16480_flash_count_fops);
-
- return 0;
-}
-
-#else
-
-static int adis16480_debugfs_init(struct iio_dev *indio_dev)
-{
- return 0;
}
-#endif
-
static int adis16480_set_freq(struct iio_dev *indio_dev, int val, int val2)
{
struct adis16480 *st = iio_priv(indio_dev);
@@ -1395,7 +1385,7 @@ static irqreturn_t adis16480_trigger_handler(int irq, void *p)
goto irq_done;
}
- for_each_set_bit(bit, indio_dev->active_scan_mask, indio_dev->masklength) {
+ iio_for_each_active_channel(indio_dev, bit) {
/*
* When burst mode is used, temperature is the first data
* channel in the sequence, but the temperature scan index
diff --git a/drivers/iio/imu/bmi160/bmi160_core.c b/drivers/iio/imu/bmi160/bmi160_core.c
index 90aa04d94da5..495e8a74ac67 100644
--- a/drivers/iio/imu/bmi160/bmi160_core.c
+++ b/drivers/iio/imu/bmi160/bmi160_core.c
@@ -435,8 +435,7 @@ static irqreturn_t bmi160_trigger_handler(int irq, void *p)
int i, ret, j = 0, base = BMI160_REG_DATA_MAGN_XOUT_L;
__le16 sample;
- for_each_set_bit(i, indio_dev->active_scan_mask,
- indio_dev->masklength) {
+ iio_for_each_active_channel(indio_dev, i) {
ret = regmap_bulk_read(data->regmap, base + i * sizeof(sample),
&sample, sizeof(sample));
if (ret)
diff --git a/drivers/iio/imu/bmi323/bmi323.h b/drivers/iio/imu/bmi323/bmi323.h
index dff126d41658..209bccb1f335 100644
--- a/drivers/iio/imu/bmi323/bmi323.h
+++ b/drivers/iio/imu/bmi323/bmi323.h
@@ -205,5 +205,6 @@
struct device;
int bmi323_core_probe(struct device *dev);
extern const struct regmap_config bmi323_regmap_config;
+extern const struct dev_pm_ops bmi323_core_pm_ops;
#endif
diff --git a/drivers/iio/imu/bmi323/bmi323_core.c b/drivers/iio/imu/bmi323/bmi323_core.c
index d708d1fe3e42..671401ce80dc 100644
--- a/drivers/iio/imu/bmi323/bmi323_core.c
+++ b/drivers/iio/imu/bmi323/bmi323_core.c
@@ -118,6 +118,38 @@ static const struct bmi323_hw bmi323_hw[2] = {
},
};
+static const unsigned int bmi323_reg_savestate[] = {
+ BMI323_INT_MAP1_REG,
+ BMI323_INT_MAP2_REG,
+ BMI323_IO_INT_CTR_REG,
+ BMI323_IO_INT_CONF_REG,
+ BMI323_ACC_CONF_REG,
+ BMI323_GYRO_CONF_REG,
+ BMI323_FEAT_IO0_REG,
+ BMI323_FIFO_WTRMRK_REG,
+ BMI323_FIFO_CONF_REG
+};
+
+static const unsigned int bmi323_ext_reg_savestate[] = {
+ BMI323_GEN_SET1_REG,
+ BMI323_TAP1_REG,
+ BMI323_TAP2_REG,
+ BMI323_TAP3_REG,
+ BMI323_FEAT_IO0_S_TAP_MSK,
+ BMI323_STEP_SC1_REG,
+ BMI323_ANYMO1_REG,
+ BMI323_NOMO1_REG,
+ BMI323_ANYMO1_REG + BMI323_MO2_OFFSET,
+ BMI323_NOMO1_REG + BMI323_MO2_OFFSET,
+ BMI323_ANYMO1_REG + BMI323_MO3_OFFSET,
+ BMI323_NOMO1_REG + BMI323_MO3_OFFSET
+};
+
+struct bmi323_regs_runtime_pm {
+ unsigned int reg_settings[ARRAY_SIZE(bmi323_reg_savestate)];
+ unsigned int ext_reg_settings[ARRAY_SIZE(bmi323_ext_reg_savestate)];
+};
+
struct bmi323_data {
struct device *dev;
struct regmap *regmap;
@@ -130,6 +162,7 @@ struct bmi323_data {
u32 odrns[BMI323_SENSORS_CNT];
u32 odrhz[BMI323_SENSORS_CNT];
unsigned int feature_events;
+ struct bmi323_regs_runtime_pm runtime_pm_status;
/*
* Lock to protect the members of device's private data from concurrent
@@ -1972,6 +2005,11 @@ static void bmi323_disable(void *data_ptr)
bmi323_set_mode(data, BMI323_ACCEL, ACC_GYRO_MODE_DISABLE);
bmi323_set_mode(data, BMI323_GYRO, ACC_GYRO_MODE_DISABLE);
+
+ /*
+ * Place the peripheral in its lowest power consuming state.
+ */
+ regmap_write(data->regmap, BMI323_CMD_REG, BMI323_RST_VAL);
}
static int bmi323_set_bw(struct bmi323_data *data,
@@ -2030,6 +2068,13 @@ static int bmi323_init(struct bmi323_data *data)
return dev_err_probe(data->dev, -EINVAL,
"Sensor power error = 0x%x\n", val);
+ return 0;
+}
+
+static int bmi323_init_reset(struct bmi323_data *data)
+{
+ int ret;
+
/*
* Set the Bandwidth coefficient which defines the 3 dB cutoff
* frequency in relation to the ODR.
@@ -2078,12 +2123,18 @@ int bmi323_core_probe(struct device *dev)
data = iio_priv(indio_dev);
data->dev = dev;
data->regmap = regmap;
+ data->irq_pin = BMI323_IRQ_DISABLED;
+ data->state = BMI323_IDLE;
mutex_init(&data->mutex);
ret = bmi323_init(data);
if (ret)
return -EINVAL;
+ ret = bmi323_init_reset(data);
+ if (ret)
+ return -EINVAL;
+
if (!iio_read_acpi_mount_matrix(dev, &data->orientation, "ROTM")) {
ret = iio_read_mount_matrix(dev, &data->orientation);
if (ret)
@@ -2117,10 +2168,139 @@ int bmi323_core_probe(struct device *dev)
return dev_err_probe(data->dev, ret,
"Unable to register iio device\n");
- return 0;
+ return bmi323_fifo_disable(data);
}
EXPORT_SYMBOL_NS_GPL(bmi323_core_probe, IIO_BMI323);
+#if defined(CONFIG_PM)
+static int bmi323_core_runtime_suspend(struct device *dev)
+{
+ struct iio_dev *indio_dev = dev_get_drvdata(dev);
+ struct bmi323_data *data = iio_priv(indio_dev);
+ struct bmi323_regs_runtime_pm *savestate = &data->runtime_pm_status;
+ int ret;
+
+ guard(mutex)(&data->mutex);
+
+ ret = iio_device_suspend_triggering(indio_dev);
+ if (ret)
+ return ret;
+
+ /* Save registers meant to be restored by resume pm callback. */
+ for (unsigned int i = 0; i < ARRAY_SIZE(bmi323_reg_savestate); i++) {
+ ret = regmap_read(data->regmap, bmi323_reg_savestate[i],
+ &savestate->reg_settings[i]);
+ if (ret) {
+ dev_err(data->dev,
+ "Error reading bmi323 reg 0x%x: %d\n",
+ bmi323_reg_savestate[i], ret);
+ return ret;
+ }
+ }
+
+ for (unsigned int i = 0; i < ARRAY_SIZE(bmi323_ext_reg_savestate); i++) {
+ ret = bmi323_read_ext_reg(data, bmi323_reg_savestate[i],
+ &savestate->reg_settings[i]);
+ if (ret) {
+ dev_err(data->dev,
+ "Error reading bmi323 external reg 0x%x: %d\n",
+ bmi323_reg_savestate[i], ret);
+ return ret;
+ }
+ }
+
+ /* Perform soft reset to place the device in its lowest power state. */
+ ret = regmap_write(data->regmap, BMI323_CMD_REG, BMI323_RST_VAL);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+
+static int bmi323_core_runtime_resume(struct device *dev)
+{
+ struct iio_dev *indio_dev = dev_get_drvdata(dev);
+ struct bmi323_data *data = iio_priv(indio_dev);
+ struct bmi323_regs_runtime_pm *savestate = &data->runtime_pm_status;
+ unsigned int val;
+ int ret;
+
+ guard(mutex)(&data->mutex);
+
+ /*
+ * Perform the device power-on and initial setup once again
+ * after being reset in the lower power state by runtime-pm.
+ */
+ ret = bmi323_init(data);
+ if (!ret)
+ return ret;
+
+ /* Register must be cleared before changing an active config */
+ ret = regmap_write(data->regmap, BMI323_FEAT_IO0_REG, 0);
+ if (ret) {
+ dev_err(data->dev, "Error stopping feature engine\n");
+ return ret;
+ }
+
+ for (unsigned int i = 0; i < ARRAY_SIZE(bmi323_ext_reg_savestate); i++) {
+ ret = bmi323_write_ext_reg(data, bmi323_reg_savestate[i],
+ savestate->reg_settings[i]);
+ if (ret) {
+ dev_err(data->dev,
+ "Error writing bmi323 external reg 0x%x: %d\n",
+ bmi323_reg_savestate[i], ret);
+ return ret;
+ }
+ }
+
+ for (unsigned int i = 0; i < ARRAY_SIZE(bmi323_reg_savestate); i++) {
+ ret = regmap_write(data->regmap, bmi323_reg_savestate[i],
+ savestate->reg_settings[i]);
+ if (ret) {
+ dev_err(data->dev,
+ "Error writing bmi323 reg 0x%x: %d\n",
+ bmi323_reg_savestate[i], ret);
+ return ret;
+ }
+ }
+
+ /*
+ * Clear old FIFO samples that might be generated before suspend
+ * or generated from a peripheral state not equal to the saved one.
+ */
+ if (data->state == BMI323_BUFFER_FIFO) {
+ ret = regmap_write(data->regmap, BMI323_FIFO_CTRL_REG,
+ BMI323_FIFO_FLUSH_MSK);
+ if (ret) {
+ dev_err(data->dev, "Error flushing FIFO buffer: %d\n", ret);
+ return ret;
+ }
+ }
+
+ ret = regmap_read(data->regmap, BMI323_ERR_REG, &val);
+ if (ret) {
+ dev_err(data->dev,
+ "Error reading bmi323 error register: %d\n", ret);
+ return ret;
+ }
+
+ if (val) {
+ dev_err(data->dev,
+ "Sensor power error in PM = 0x%x\n", val);
+ return -EINVAL;
+ }
+
+ return iio_device_resume_triggering(indio_dev);
+}
+
+#endif
+
+const struct dev_pm_ops bmi323_core_pm_ops = {
+ SET_RUNTIME_PM_OPS(bmi323_core_runtime_suspend,
+ bmi323_core_runtime_resume, NULL)
+};
+EXPORT_SYMBOL_NS_GPL(bmi323_core_pm_ops, IIO_BMI323);
+
MODULE_DESCRIPTION("Bosch BMI323 IMU driver");
MODULE_AUTHOR("Jagath Jog J <jagathjog1996@gmail.com>");
MODULE_LICENSE("GPL");
diff --git a/drivers/iio/imu/bmi323/bmi323_i2c.c b/drivers/iio/imu/bmi323/bmi323_i2c.c
index 52140bf05765..0ba5d69d8329 100644
--- a/drivers/iio/imu/bmi323/bmi323_i2c.c
+++ b/drivers/iio/imu/bmi323/bmi323_i2c.c
@@ -61,7 +61,7 @@ static int bmi323_regmap_i2c_write(void *context, const void *data,
data + sizeof(u8));
}
-static struct regmap_bus bmi323_regmap_bus = {
+static const struct regmap_bus bmi323_regmap_bus = {
.read = bmi323_regmap_i2c_read,
.write = bmi323_regmap_i2c_write,
};
@@ -128,6 +128,7 @@ MODULE_DEVICE_TABLE(of, bmi323_of_i2c_match);
static struct i2c_driver bmi323_i2c_driver = {
.driver = {
.name = "bmi323",
+ .pm = pm_ptr(&bmi323_core_pm_ops),
.of_match_table = bmi323_of_i2c_match,
.acpi_match_table = bmi323_acpi_match,
},
diff --git a/drivers/iio/imu/bmi323/bmi323_spi.c b/drivers/iio/imu/bmi323/bmi323_spi.c
index 7b1e8127d0dd..9de3ade78d71 100644
--- a/drivers/iio/imu/bmi323/bmi323_spi.c
+++ b/drivers/iio/imu/bmi323/bmi323_spi.c
@@ -36,7 +36,7 @@ static int bmi323_regmap_spi_write(void *context, const void *data,
return spi_write(spi, data_buff + 1, count - 1);
}
-static struct regmap_bus bmi323_regmap_bus = {
+static const struct regmap_bus bmi323_regmap_bus = {
.read = bmi323_regmap_spi_read,
.write = bmi323_regmap_spi_write,
};
@@ -79,6 +79,7 @@ MODULE_DEVICE_TABLE(of, bmi323_of_spi_match);
static struct spi_driver bmi323_spi_driver = {
.driver = {
.name = "bmi323",
+ .pm = pm_ptr(&bmi323_core_pm_ops),
.of_match_table = bmi323_of_spi_match,
},
.probe = bmi323_spi_probe,
diff --git a/drivers/iio/imu/bno055/bno055.c b/drivers/iio/imu/bno055/bno055.c
index 52744dd98e65..ea6519b22b2f 100644
--- a/drivers/iio/imu/bno055/bno055.c
+++ b/drivers/iio/imu/bno055/bno055.c
@@ -1458,7 +1458,7 @@ static irqreturn_t bno055_trigger_handler(int irq, void *p)
* then we split the transfer, skipping the gap.
*/
for_each_set_bitrange(start, end, iio_dev->active_scan_mask,
- iio_dev->masklength) {
+ iio_get_masklength(iio_dev)) {
/*
* First transfer will start from the beginning of the first
* ones-field in the bitmap
diff --git a/drivers/iio/imu/bno055/bno055_ser_core.c b/drivers/iio/imu/bno055/bno055_ser_core.c
index 694ff14a3aa2..da7873bfd348 100644
--- a/drivers/iio/imu/bno055/bno055_ser_core.c
+++ b/drivers/iio/imu/bno055/bno055_ser_core.c
@@ -492,7 +492,7 @@ static const struct serdev_device_ops bno055_ser_serdev_ops = {
.write_wakeup = serdev_device_write_wakeup,
};
-static struct regmap_bus bno055_ser_regmap_bus = {
+static const struct regmap_bus bno055_ser_regmap_bus = {
.write = bno055_ser_write_reg,
.read = bno055_ser_read_reg,
};
diff --git a/drivers/iio/imu/kmx61.c b/drivers/iio/imu/kmx61.c
index d37eca5ef761..c61c012e25bb 100644
--- a/drivers/iio/imu/kmx61.c
+++ b/drivers/iio/imu/kmx61.c
@@ -1200,8 +1200,7 @@ static irqreturn_t kmx61_trigger_handler(int irq, void *p)
base = KMX61_MAG_XOUT_L;
mutex_lock(&data->lock);
- for_each_set_bit(bit, indio_dev->active_scan_mask,
- indio_dev->masklength) {
+ iio_for_each_active_channel(indio_dev, bit) {
ret = kmx61_read_measurement(data, base, bit);
if (ret < 0) {
mutex_unlock(&data->lock);
diff --git a/drivers/iio/imu/st_lsm6dsx/st_lsm6dsx_core.c b/drivers/iio/imu/st_lsm6dsx/st_lsm6dsx_core.c
index 937ff9c5a74c..ed0267929725 100644
--- a/drivers/iio/imu/st_lsm6dsx/st_lsm6dsx_core.c
+++ b/drivers/iio/imu/st_lsm6dsx/st_lsm6dsx_core.c
@@ -2127,25 +2127,15 @@ static const struct iio_info st_lsm6dsx_gyro_info = {
.write_raw_get_fmt = st_lsm6dsx_write_raw_get_fmt,
};
-static int st_lsm6dsx_get_drdy_pin(struct st_lsm6dsx_hw *hw, int *drdy_pin)
-{
- struct device *dev = hw->dev;
-
- if (!dev_fwnode(dev))
- return -EINVAL;
-
- return device_property_read_u32(dev, "st,drdy-int-pin", drdy_pin);
-}
-
static int
st_lsm6dsx_get_drdy_reg(struct st_lsm6dsx_hw *hw,
const struct st_lsm6dsx_reg **drdy_reg)
{
+ struct device *dev = hw->dev;
int err = 0, drdy_pin;
- if (st_lsm6dsx_get_drdy_pin(hw, &drdy_pin) < 0) {
+ if (device_property_read_u32(dev, "st,drdy-int-pin", &drdy_pin) < 0) {
struct st_sensors_platform_data *pdata;
- struct device *dev = hw->dev;
pdata = (struct st_sensors_platform_data *)dev->platform_data;
drdy_pin = pdata ? pdata->drdy_int_pin : 1;
@@ -2180,7 +2170,7 @@ static int st_lsm6dsx_init_shub(struct st_lsm6dsx_hw *hw)
hub_settings = &hw->settings->shub_settings;
pdata = (struct st_sensors_platform_data *)dev->platform_data;
- if ((dev_fwnode(dev) && device_property_read_bool(dev, "st,pullups")) ||
+ if (device_property_read_bool(dev, "st,pullups") ||
(pdata && pdata->pullups)) {
if (hub_settings->pullup_en.sec_page) {
err = st_lsm6dsx_set_page(hw, true);
@@ -2565,7 +2555,7 @@ static int st_lsm6dsx_irq_setup(struct st_lsm6dsx_hw *hw)
return err;
pdata = (struct st_sensors_platform_data *)dev->platform_data;
- if ((dev_fwnode(dev) && device_property_read_bool(dev, "drive-open-drain")) ||
+ if (device_property_read_bool(dev, "drive-open-drain") ||
(pdata && pdata->open_drain)) {
reg = &hw->settings->irq_config.od;
err = regmap_update_bits(hw->regmap, reg->addr, reg->mask,
@@ -2646,73 +2636,6 @@ static int st_lsm6dsx_init_regulators(struct device *dev)
return 0;
}
-#ifdef CONFIG_ACPI
-
-static int lsm6dsx_get_acpi_mount_matrix(struct device *dev,
- struct iio_mount_matrix *orientation)
-{
- struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL };
- struct acpi_device *adev = ACPI_COMPANION(dev);
- union acpi_object *obj, *elements;
- acpi_status status;
- int i, j, val[3];
- char *str;
-
- if (!has_acpi_companion(dev))
- return -EINVAL;
-
- if (!acpi_has_method(adev->handle, "ROTM"))
- return -EINVAL;
-
- status = acpi_evaluate_object(adev->handle, "ROTM", NULL, &buffer);
- if (ACPI_FAILURE(status)) {
- dev_warn(dev, "Failed to get ACPI mount matrix: %d\n", status);
- return -EINVAL;
- }
-
- obj = buffer.pointer;
- if (obj->type != ACPI_TYPE_PACKAGE || obj->package.count != 3)
- goto unknown_format;
-
- elements = obj->package.elements;
- for (i = 0; i < 3; i++) {
- if (elements[i].type != ACPI_TYPE_STRING)
- goto unknown_format;
-
- str = elements[i].string.pointer;
- if (sscanf(str, "%d %d %d", &val[0], &val[1], &val[2]) != 3)
- goto unknown_format;
-
- for (j = 0; j < 3; j++) {
- switch (val[j]) {
- case -1: str = "-1"; break;
- case 0: str = "0"; break;
- case 1: str = "1"; break;
- default: goto unknown_format;
- }
- orientation->rotation[i * 3 + j] = str;
- }
- }
-
- kfree(buffer.pointer);
- return 0;
-
-unknown_format:
- dev_warn(dev, "Unknown ACPI mount matrix format, ignoring\n");
- kfree(buffer.pointer);
- return -EINVAL;
-}
-
-#else
-
-static int lsm6dsx_get_acpi_mount_matrix(struct device *dev,
- struct iio_mount_matrix *orientation)
-{
- return -EOPNOTSUPP;
-}
-
-#endif
-
int st_lsm6dsx_probe(struct device *dev, int irq, int hw_id,
struct regmap *regmap)
{
@@ -2760,8 +2683,7 @@ int st_lsm6dsx_probe(struct device *dev, int irq, int hw_id,
hub_settings = &hw->settings->shub_settings;
if (hub_settings->master_en.addr &&
- (!dev_fwnode(dev) ||
- !device_property_read_bool(dev, "st,disable-sensor-hub"))) {
+ !device_property_read_bool(dev, "st,disable-sensor-hub")) {
err = st_lsm6dsx_shub_probe(hw, name);
if (err < 0)
return err;
@@ -2787,8 +2709,7 @@ int st_lsm6dsx_probe(struct device *dev, int irq, int hw_id,
return err;
}
- err = lsm6dsx_get_acpi_mount_matrix(hw->dev, &hw->orientation);
- if (err) {
+ if (!iio_read_acpi_mount_matrix(hw->dev, &hw->orientation, "ROTM")) {
err = iio_read_mount_matrix(hw->dev, &hw->orientation);
if (err)
return err;
@@ -2803,7 +2724,7 @@ int st_lsm6dsx_probe(struct device *dev, int irq, int hw_id,
return err;
}
- if ((dev_fwnode(dev) && device_property_read_bool(dev, "wakeup-source")) ||
+ if (device_property_read_bool(dev, "wakeup-source") ||
(pdata && pdata->wakeup_source))
device_init_wakeup(dev, true);
diff --git a/drivers/iio/industrialio-backend.c b/drivers/iio/industrialio-backend.c
index efe05be284b6..20b3b5212da7 100644
--- a/drivers/iio/industrialio-backend.c
+++ b/drivers/iio/industrialio-backend.c
@@ -32,6 +32,7 @@
#define dev_fmt(fmt) "iio-backend: " fmt
#include <linux/cleanup.h>
+#include <linux/debugfs.h>
#include <linux/device.h>
#include <linux/err.h>
#include <linux/errno.h>
@@ -40,6 +41,7 @@
#include <linux/mutex.h>
#include <linux/property.h>
#include <linux/slab.h>
+#include <linux/stringify.h>
#include <linux/types.h>
#include <linux/iio/backend.h>
@@ -52,6 +54,14 @@ struct iio_backend {
struct device *dev;
struct module *owner;
void *priv;
+ const char *name;
+ unsigned int cached_reg_addr;
+ /*
+ * This index is relative to the frontend. Meaning that for
+ * frontends with multiple backends, this will be the index of this
+ * backend. Used for the debugfs directory name.
+ */
+ u8 idx;
};
/*
@@ -111,7 +121,142 @@ static DEFINE_MUTEX(iio_back_lock);
__ret = iio_backend_check_op(__back, op); \
if (!__ret) \
__back->ops->op(__back, ##args); \
+ else \
+ dev_dbg(__back->dev, "Op(%s) not implemented\n",\
+ __stringify(op)); \
+}
+
+static ssize_t iio_backend_debugfs_read_reg(struct file *file,
+ char __user *userbuf,
+ size_t count, loff_t *ppos)
+{
+ struct iio_backend *back = file->private_data;
+ char read_buf[20];
+ unsigned int val;
+ int ret, len;
+
+ ret = iio_backend_op_call(back, debugfs_reg_access,
+ back->cached_reg_addr, 0, &val);
+ if (ret)
+ return ret;
+
+ len = scnprintf(read_buf, sizeof(read_buf), "0x%X\n", val);
+
+ return simple_read_from_buffer(userbuf, count, ppos, read_buf, len);
+}
+
+static ssize_t iio_backend_debugfs_write_reg(struct file *file,
+ const char __user *userbuf,
+ size_t count, loff_t *ppos)
+{
+ struct iio_backend *back = file->private_data;
+ unsigned int val;
+ char buf[80];
+ ssize_t rc;
+ int ret;
+
+ rc = simple_write_to_buffer(buf, sizeof(buf), ppos, userbuf, count);
+ if (rc < 0)
+ return rc;
+
+ ret = sscanf(buf, "%i %i", &back->cached_reg_addr, &val);
+
+ switch (ret) {
+ case 1:
+ return count;
+ case 2:
+ ret = iio_backend_op_call(back, debugfs_reg_access,
+ back->cached_reg_addr, val, NULL);
+ if (ret)
+ return ret;
+ return count;
+ default:
+ return -EINVAL;
+ }
+}
+
+static const struct file_operations iio_backend_debugfs_reg_fops = {
+ .open = simple_open,
+ .read = iio_backend_debugfs_read_reg,
+ .write = iio_backend_debugfs_write_reg,
+};
+
+static ssize_t iio_backend_debugfs_read_name(struct file *file,
+ char __user *userbuf,
+ size_t count, loff_t *ppos)
+{
+ struct iio_backend *back = file->private_data;
+ char name[128];
+ int len;
+
+ len = scnprintf(name, sizeof(name), "%s\n", back->name);
+
+ return simple_read_from_buffer(userbuf, count, ppos, name, len);
+}
+
+static const struct file_operations iio_backend_debugfs_name_fops = {
+ .open = simple_open,
+ .read = iio_backend_debugfs_read_name,
+};
+
+/**
+ * iio_backend_debugfs_add - Add debugfs interfaces for Backends
+ * @back: Backend device
+ * @indio_dev: IIO device
+ */
+void iio_backend_debugfs_add(struct iio_backend *back,
+ struct iio_dev *indio_dev)
+{
+ struct dentry *d = iio_get_debugfs_dentry(indio_dev);
+ struct dentry *back_d;
+ char name[128];
+
+ if (!IS_ENABLED(CONFIG_DEBUG_FS) || !d)
+ return;
+ if (!back->ops->debugfs_reg_access && !back->name)
+ return;
+
+ snprintf(name, sizeof(name), "backend%d", back->idx);
+
+ back_d = debugfs_create_dir(name, d);
+ if (IS_ERR(back_d))
+ return;
+
+ if (back->ops->debugfs_reg_access)
+ debugfs_create_file("direct_reg_access", 0600, back_d, back,
+ &iio_backend_debugfs_reg_fops);
+
+ if (back->name)
+ debugfs_create_file("name", 0400, back_d, back,
+ &iio_backend_debugfs_name_fops);
}
+EXPORT_SYMBOL_NS_GPL(iio_backend_debugfs_add, IIO_BACKEND);
+
+/**
+ * iio_backend_debugfs_print_chan_status - Print channel status
+ * @back: Backend device
+ * @chan: Channel number
+ * @buf: Buffer where to print the status
+ * @len: Available space
+ *
+ * One usecase where this is useful is for testing test tones in a digital
+ * interface and "ask" the backend to dump more details on why a test tone might
+ * have errors.
+ *
+ * RETURNS:
+ * Number of copied bytes on success, negative error code on failure.
+ */
+ssize_t iio_backend_debugfs_print_chan_status(struct iio_backend *back,
+ unsigned int chan, char *buf,
+ size_t len)
+{
+ if (!IS_ENABLED(CONFIG_DEBUG_FS))
+ return -ENODEV;
+
+ return iio_backend_op_call(back, debugfs_print_chan_status, chan, buf,
+ len);
+}
+EXPORT_SYMBOL_NS_GPL(iio_backend_debugfs_print_chan_status, IIO_BACKEND);
/**
* iio_backend_chan_enable - Enable a backend channel
@@ -147,6 +292,29 @@ static void __iio_backend_disable(void *back)
}
/**
+ * iio_backend_disable - Backend disable
+ * @back: Backend device
+ */
+void iio_backend_disable(struct iio_backend *back)
+{
+ __iio_backend_disable(back);
+}
+EXPORT_SYMBOL_NS_GPL(iio_backend_disable, IIO_BACKEND);
+
+/**
+ * iio_backend_enable - Backend enable
+ * @back: Backend device
+ *
+ * RETURNS:
+ * 0 on success, negative error number on failure.
+ */
+int iio_backend_enable(struct iio_backend *back)
+{
+ return iio_backend_op_call(back, enable);
+}
+EXPORT_SYMBOL_NS_GPL(iio_backend_enable, IIO_BACKEND);
+
+/**
* devm_iio_backend_enable - Device managed backend enable
* @dev: Consumer device for the backend
* @back: Backend device
@@ -158,7 +326,7 @@ int devm_iio_backend_enable(struct device *dev, struct iio_backend *back)
{
int ret;
- ret = iio_backend_op_call(back, enable);
+ ret = iio_backend_enable(back);
if (ret)
return ret;
@@ -357,6 +525,25 @@ int devm_iio_backend_request_buffer(struct device *dev,
}
EXPORT_SYMBOL_NS_GPL(devm_iio_backend_request_buffer, IIO_BACKEND);
+/**
+ * iio_backend_read_raw - Read a channel attribute from a backend device.
+ * @back: Backend device
+ * @chan: IIO channel reference
+ * @val: First returned value
+ * @val2: Second returned value
+ * @mask: Specify the attribute to return
+ *
+ * RETURNS:
+ * 0 on success, negative error number on failure.
+ */
+int iio_backend_read_raw(struct iio_backend *back,
+ struct iio_chan_spec const *chan, int *val, int *val2,
+ long mask)
+{
+ return iio_backend_op_call(back, read_raw, chan, val, val2, mask);
+}
+EXPORT_SYMBOL_NS_GPL(iio_backend_read_raw, IIO_BACKEND);
+
static struct iio_backend *iio_backend_from_indio_dev_parent(const struct device *dev)
{
struct iio_backend *back = ERR_PTR(-ENODEV), *iter;
@@ -451,7 +638,6 @@ EXPORT_SYMBOL_NS_GPL(iio_backend_ext_info_set, IIO_BACKEND);
/**
* iio_backend_extend_chan_spec - Extend an IIO channel
- * @indio_dev: IIO device
* @back: Backend device
* @chan: IIO channel
*
@@ -461,8 +647,7 @@ EXPORT_SYMBOL_NS_GPL(iio_backend_ext_info_set, IIO_BACKEND);
* RETURNS:
* 0 on success, negative error number on failure.
*/
-int iio_backend_extend_chan_spec(struct iio_dev *indio_dev,
- struct iio_backend *back,
+int iio_backend_extend_chan_spec(struct iio_backend *back,
struct iio_chan_spec *chan)
{
const struct iio_chan_spec_ext_info *frontend_ext_info = chan->ext_info;
@@ -533,19 +718,10 @@ static int __devm_iio_backend_get(struct device *dev, struct iio_backend *back)
return 0;
}
-/**
- * devm_iio_backend_get - Device managed backend device get
- * @dev: Consumer device for the backend
- * @name: Backend name
- *
- * Get's the backend associated with @dev.
- *
- * RETURNS:
- * A backend pointer, negative error pointer otherwise.
- */
-struct iio_backend *devm_iio_backend_get(struct device *dev, const char *name)
+static struct iio_backend *__devm_iio_backend_fwnode_get(struct device *dev, const char *name,
+ struct fwnode_handle *fwnode)
{
- struct fwnode_handle *fwnode;
+ struct fwnode_handle *fwnode_back;
struct iio_backend *back;
unsigned int index;
int ret;
@@ -560,30 +736,67 @@ struct iio_backend *devm_iio_backend_get(struct device *dev, const char *name)
index = 0;
}
- fwnode = fwnode_find_reference(dev_fwnode(dev), "io-backends", index);
+ fwnode_back = fwnode_find_reference(fwnode, "io-backends", index);
if (IS_ERR(fwnode))
return dev_err_cast_probe(dev, fwnode,
"Cannot get Firmware reference\n");
guard(mutex)(&iio_back_lock);
list_for_each_entry(back, &iio_back_list, entry) {
- if (!device_match_fwnode(back->dev, fwnode))
+ if (!device_match_fwnode(back->dev, fwnode_back))
continue;
- fwnode_handle_put(fwnode);
+ fwnode_handle_put(fwnode_back);
ret = __devm_iio_backend_get(dev, back);
if (ret)
return ERR_PTR(ret);
+ if (name)
+ back->idx = index;
+
return back;
}
- fwnode_handle_put(fwnode);
+ fwnode_handle_put(fwnode_back);
return ERR_PTR(-EPROBE_DEFER);
}
+
+/**
+ * devm_iio_backend_get - Device managed backend device get
+ * @dev: Consumer device for the backend
+ * @name: Backend name
+ *
+ * Get's the backend associated with @dev.
+ *
+ * RETURNS:
+ * A backend pointer, negative error pointer otherwise.
+ */
+struct iio_backend *devm_iio_backend_get(struct device *dev, const char *name)
+{
+ return __devm_iio_backend_fwnode_get(dev, name, dev_fwnode(dev));
+}
EXPORT_SYMBOL_NS_GPL(devm_iio_backend_get, IIO_BACKEND);
/**
+ * devm_iio_backend_fwnode_get - Device managed backend firmware node get
+ * @dev: Consumer device for the backend
+ * @name: Backend name
+ * @fwnode: Firmware node of the backend consumer
+ *
+ * Get's the backend associated with a firmware node.
+ *
+ * RETURNS:
+ * A backend pointer, negative error pointer otherwise.
+ */
+struct iio_backend *devm_iio_backend_fwnode_get(struct device *dev,
+ const char *name,
+ struct fwnode_handle *fwnode)
+{
+ return __devm_iio_backend_fwnode_get(dev, name, fwnode);
+}
+EXPORT_SYMBOL_NS_GPL(devm_iio_backend_fwnode_get, IIO_BACKEND);
+
+/**
* __devm_iio_backend_get_from_fwnode_lookup - Device managed fwnode backend device get
* @dev: Consumer device for the backend
* @fwnode: Firmware node of the backend device
@@ -639,20 +852,20 @@ static void iio_backend_unregister(void *arg)
/**
* devm_iio_backend_register - Device managed backend device register
* @dev: Backend device being registered
- * @ops: Backend ops
+ * @info: Backend info
* @priv: Device private data
*
- * @ops is mandatory. Not providing it results in -EINVAL.
+ * @info is mandatory. Not providing it results in -EINVAL.
*
* RETURNS:
* 0 on success, negative error number on failure.
*/
int devm_iio_backend_register(struct device *dev,
- const struct iio_backend_ops *ops, void *priv)
+ const struct iio_backend_info *info, void *priv)
{
struct iio_backend *back;
- if (!ops)
+ if (!info || !info->ops)
return dev_err_probe(dev, -EINVAL, "No backend ops given\n");
/*
@@ -665,7 +878,8 @@ int devm_iio_backend_register(struct device *dev,
if (!back)
return -ENOMEM;
- back->ops = ops;
+ back->ops = info->ops;
+ back->name = info->name;
back->owner = dev->driver->owner;
back->dev = dev;
back->priv = priv;
diff --git a/drivers/iio/industrialio-buffer.c b/drivers/iio/industrialio-buffer.c
index d6fe105d2f40..8104696cd475 100644
--- a/drivers/iio/industrialio-buffer.c
+++ b/drivers/iio/industrialio-buffer.c
@@ -508,18 +508,19 @@ static bool iio_validate_scan_mask(struct iio_dev *indio_dev,
static int iio_scan_mask_set(struct iio_dev *indio_dev,
struct iio_buffer *buffer, int bit)
{
+ unsigned int masklength = iio_get_masklength(indio_dev);
const unsigned long *mask;
unsigned long *trialmask;
- if (!indio_dev->masklength) {
+ if (!masklength) {
WARN(1, "Trying to set scanmask prior to registering buffer\n");
return -EINVAL;
}
- trialmask = bitmap_alloc(indio_dev->masklength, GFP_KERNEL);
+ trialmask = bitmap_alloc(masklength, GFP_KERNEL);
if (!trialmask)
return -ENOMEM;
- bitmap_copy(trialmask, buffer->scan_mask, indio_dev->masklength);
+ bitmap_copy(trialmask, buffer->scan_mask, masklength);
set_bit(bit, trialmask);
if (!iio_validate_scan_mask(indio_dev, trialmask))
@@ -527,12 +528,11 @@ static int iio_scan_mask_set(struct iio_dev *indio_dev,
if (indio_dev->available_scan_masks) {
mask = iio_scan_mask_match(indio_dev->available_scan_masks,
- indio_dev->masklength,
- trialmask, false);
+ masklength, trialmask, false);
if (!mask)
goto err_invalid_mask;
}
- bitmap_copy(buffer->scan_mask, trialmask, indio_dev->masklength);
+ bitmap_copy(buffer->scan_mask, trialmask, masklength);
bitmap_free(trialmask);
@@ -552,7 +552,7 @@ static int iio_scan_mask_clear(struct iio_buffer *buffer, int bit)
static int iio_scan_mask_query(struct iio_dev *indio_dev,
struct iio_buffer *buffer, int bit)
{
- if (bit > indio_dev->masklength)
+ if (bit > iio_get_masklength(indio_dev))
return -EINVAL;
if (!buffer->scan_mask)
@@ -768,8 +768,7 @@ static int iio_compute_scan_bytes(struct iio_dev *indio_dev,
int length, i, largest = 0;
/* How much space will the demuxed element take? */
- for_each_set_bit(i, mask,
- indio_dev->masklength) {
+ for_each_set_bit(i, mask, iio_get_masklength(indio_dev)) {
length = iio_storage_bytes_for_si(indio_dev, i);
if (length < 0)
return length;
@@ -890,6 +889,7 @@ static int iio_verify_update(struct iio_dev *indio_dev,
struct iio_device_config *config)
{
struct iio_dev_opaque *iio_dev_opaque = to_iio_dev_opaque(indio_dev);
+ unsigned int masklength = iio_get_masklength(indio_dev);
unsigned long *compound_mask;
const unsigned long *scan_mask;
bool strict_scanmask = false;
@@ -898,7 +898,7 @@ static int iio_verify_update(struct iio_dev *indio_dev,
unsigned int modes;
if (insert_buffer &&
- bitmap_empty(insert_buffer->scan_mask, indio_dev->masklength)) {
+ bitmap_empty(insert_buffer->scan_mask, masklength)) {
dev_dbg(&indio_dev->dev,
"At least one scan element must be enabled first\n");
return -EINVAL;
@@ -952,7 +952,7 @@ static int iio_verify_update(struct iio_dev *indio_dev,
}
/* What scan mask do we actually have? */
- compound_mask = bitmap_zalloc(indio_dev->masklength, GFP_KERNEL);
+ compound_mask = bitmap_zalloc(masklength, GFP_KERNEL);
if (!compound_mask)
return -ENOMEM;
@@ -962,20 +962,19 @@ static int iio_verify_update(struct iio_dev *indio_dev,
if (buffer == remove_buffer)
continue;
bitmap_or(compound_mask, compound_mask, buffer->scan_mask,
- indio_dev->masklength);
+ masklength);
scan_timestamp |= buffer->scan_timestamp;
}
if (insert_buffer) {
bitmap_or(compound_mask, compound_mask,
- insert_buffer->scan_mask, indio_dev->masklength);
+ insert_buffer->scan_mask, masklength);
scan_timestamp |= insert_buffer->scan_timestamp;
}
if (indio_dev->available_scan_masks) {
scan_mask = iio_scan_mask_match(indio_dev->available_scan_masks,
- indio_dev->masklength,
- compound_mask,
+ masklength, compound_mask,
strict_scanmask);
bitmap_free(compound_mask);
if (!scan_mask)
@@ -1040,6 +1039,7 @@ static int iio_buffer_add_demux(struct iio_buffer *buffer,
static int iio_buffer_update_demux(struct iio_dev *indio_dev,
struct iio_buffer *buffer)
{
+ unsigned int masklength = iio_get_masklength(indio_dev);
int ret, in_ind = -1, out_ind, length;
unsigned int in_loc = 0, out_loc = 0;
struct iio_demux_table *p = NULL;
@@ -1051,17 +1051,13 @@ static int iio_buffer_update_demux(struct iio_dev *indio_dev,
/* First work out which scan mode we will actually have */
if (bitmap_equal(indio_dev->active_scan_mask,
- buffer->scan_mask,
- indio_dev->masklength))
+ buffer->scan_mask, masklength))
return 0;
/* Now we have the two masks, work from least sig and build up sizes */
- for_each_set_bit(out_ind,
- buffer->scan_mask,
- indio_dev->masklength) {
+ for_each_set_bit(out_ind, buffer->scan_mask, masklength) {
in_ind = find_next_bit(indio_dev->active_scan_mask,
- indio_dev->masklength,
- in_ind + 1);
+ masklength, in_ind + 1);
while (in_ind != out_ind) {
ret = iio_storage_bytes_for_si(indio_dev, in_ind);
if (ret < 0)
@@ -1071,8 +1067,7 @@ static int iio_buffer_update_demux(struct iio_dev *indio_dev,
/* Make sure we are aligned */
in_loc = roundup(in_loc, length) + length;
in_ind = find_next_bit(indio_dev->active_scan_mask,
- indio_dev->masklength,
- in_ind + 1);
+ masklength, in_ind + 1);
}
ret = iio_storage_bytes_for_si(indio_dev, in_ind);
if (ret < 0)
@@ -2104,6 +2099,7 @@ static int __iio_buffer_alloc_sysfs_and_mask(struct iio_buffer *buffer,
int index)
{
struct iio_dev_opaque *iio_dev_opaque = to_iio_dev_opaque(indio_dev);
+ unsigned int masklength = iio_get_masklength(indio_dev);
struct iio_dev_attr *p;
const struct iio_dev_attr *id_attr;
struct attribute **attr;
@@ -2166,8 +2162,8 @@ static int __iio_buffer_alloc_sysfs_and_mask(struct iio_buffer *buffer,
iio_dev_opaque->scan_index_timestamp =
channels[i].scan_index;
}
- if (indio_dev->masklength && !buffer->scan_mask) {
- buffer->scan_mask = bitmap_zalloc(indio_dev->masklength,
+ if (masklength && !buffer->scan_mask) {
+ buffer->scan_mask = bitmap_zalloc(masklength,
GFP_KERNEL);
if (!buffer->scan_mask) {
ret = -ENOMEM;
@@ -2273,7 +2269,7 @@ int iio_buffers_alloc_sysfs_and_mask(struct iio_dev *indio_dev)
for (i = 0; i < indio_dev->num_channels; i++)
ml = max(ml, channels[i].scan_index + 1);
- indio_dev->masklength = ml;
+ ACCESS_PRIVATE(indio_dev, masklength) = ml;
}
if (!iio_dev_opaque->attached_buffers_cnt)
@@ -2337,7 +2333,7 @@ void iio_buffers_free_sysfs_and_mask(struct iio_dev *indio_dev)
bool iio_validate_scan_mask_onehot(struct iio_dev *indio_dev,
const unsigned long *mask)
{
- return bitmap_weight(mask, indio_dev->masklength) == 1;
+ return bitmap_weight(mask, iio_get_masklength(indio_dev)) == 1;
}
EXPORT_SYMBOL_GPL(iio_validate_scan_mask_onehot);
diff --git a/drivers/iio/industrialio-core.c b/drivers/iio/industrialio-core.c
index 0f6cda7ffe45..6a6568d4a2cb 100644
--- a/drivers/iio/industrialio-core.c
+++ b/drivers/iio/industrialio-core.c
@@ -667,7 +667,6 @@ static ssize_t __iio_format_value(char *buf, size_t offset, unsigned int type,
vals[1]);
case IIO_VAL_FRACTIONAL:
tmp2 = div_s64((s64)vals[0] * 1000000000LL, vals[1]);
- tmp1 = vals[1];
tmp0 = (int)div_s64_rem(tmp2, 1000000000, &tmp1);
if ((tmp2 < 0) && (tmp0 == 0))
return sysfs_emit_at(buf, offset, "-0.%09u", abs(tmp1));
@@ -1912,7 +1911,7 @@ static void iio_sanity_check_avail_scan_masks(struct iio_dev *indio_dev)
int i;
av_masks = indio_dev->available_scan_masks;
- masklength = indio_dev->masklength;
+ masklength = iio_get_masklength(indio_dev);
longs_per_mask = BITS_TO_LONGS(masklength);
/*
@@ -1965,6 +1964,49 @@ static void iio_sanity_check_avail_scan_masks(struct iio_dev *indio_dev)
}
}
+/**
+ * iio_active_scan_mask_index - Get index of the active scan mask inside the
+ * available scan masks array
+ * @indio_dev: the IIO device containing the active and available scan masks
+ *
+ * Returns: the index or -EINVAL if active_scan_mask is not set
+ */
+int iio_active_scan_mask_index(struct iio_dev *indio_dev)
+
+{
+ const unsigned long *av_masks;
+ unsigned int masklength = iio_get_masklength(indio_dev);
+ int i = 0;
+
+ if (!indio_dev->active_scan_mask)
+ return -EINVAL;
+
+ /*
+ * As in iio_scan_mask_match and iio_sanity_check_avail_scan_masks,
+ * the condition here do not handle multi-long masks correctly.
+ * It only checks the first long to be zero, and will use such mask
+ * as a terminator even if there was bits set after the first long.
+ *
+ * This should be fine since the available_scan_mask has already been
+ * sanity tested using iio_sanity_check_avail_scan_masks.
+ *
+ * See iio_scan_mask_match and iio_sanity_check_avail_scan_masks for
+ * more details
+ */
+ av_masks = indio_dev->available_scan_masks;
+ while (*av_masks) {
+ if (indio_dev->active_scan_mask == av_masks)
+ return i;
+ av_masks += BITS_TO_LONGS(masklength);
+ i++;
+ }
+
+ dev_warn(indio_dev->dev.parent,
+ "active scan mask is not part of the available scan masks\n");
+ return -EINVAL;
+}
+EXPORT_SYMBOL_GPL(iio_active_scan_mask_index);
+
int __iio_device_register(struct iio_dev *indio_dev, struct module *this_mod)
{
struct iio_dev_opaque *iio_dev_opaque = to_iio_dev_opaque(indio_dev);
diff --git a/drivers/iio/industrialio-trigger.c b/drivers/iio/industrialio-trigger.c
index 2e84776f4fbd..54416a384232 100644
--- a/drivers/iio/industrialio-trigger.c
+++ b/drivers/iio/industrialio-trigger.c
@@ -347,6 +347,7 @@ int iio_trigger_detach_poll_func(struct iio_trigger *trig,
iio_trigger_put_irq(trig, pf->irq);
free_irq(pf->irq, pf);
module_put(iio_dev_opaque->driver_module);
+ pf->irq = 0;
return ret;
}
@@ -770,3 +771,29 @@ void iio_device_unregister_trigger_consumer(struct iio_dev *indio_dev)
if (indio_dev->trig)
iio_trigger_put(indio_dev->trig);
}
+
+int iio_device_suspend_triggering(struct iio_dev *indio_dev)
+{
+ struct iio_dev_opaque *iio_dev_opaque = to_iio_dev_opaque(indio_dev);
+
+ guard(mutex)(&iio_dev_opaque->mlock);
+
+ if ((indio_dev->pollfunc) && (indio_dev->pollfunc->irq > 0))
+ disable_irq(indio_dev->pollfunc->irq);
+
+ return 0;
+}
+EXPORT_SYMBOL(iio_device_suspend_triggering);
+
+int iio_device_resume_triggering(struct iio_dev *indio_dev)
+{
+ struct iio_dev_opaque *iio_dev_opaque = to_iio_dev_opaque(indio_dev);
+
+ guard(mutex)(&iio_dev_opaque->mlock);
+
+ if ((indio_dev->pollfunc) && (indio_dev->pollfunc->irq > 0))
+ enable_irq(indio_dev->pollfunc->irq);
+
+ return 0;
+}
+EXPORT_SYMBOL(iio_device_resume_triggering);
diff --git a/drivers/iio/light/Kconfig b/drivers/iio/light/Kconfig
index b68dcc1fbaca..515ff46b5b82 100644
--- a/drivers/iio/light/Kconfig
+++ b/drivers/iio/light/Kconfig
@@ -114,6 +114,19 @@ config AS73211
This driver can also be built as a module. If so, the module
will be called as73211.
+config BH1745
+ tristate "ROHM BH1745 colour sensor"
+ depends on I2C
+ select REGMAP_I2C
+ select IIO_BUFFER
+ select IIO_TRIGGERED_BUFFER
+ select IIO_GTS_HELPER
+ help
+ Say Y here to build support for the ROHM bh1745 colour sensor.
+
+ To compile this driver as a module, choose M here: the module will
+ be called bh1745.
+
config BH1750
tristate "ROHM BH1750 ambient light sensor"
depends on I2C
diff --git a/drivers/iio/light/Makefile b/drivers/iio/light/Makefile
index 1a071a8e9f8e..321010fc0b93 100644
--- a/drivers/iio/light/Makefile
+++ b/drivers/iio/light/Makefile
@@ -13,6 +13,7 @@ obj-$(CONFIG_APDS9300) += apds9300.o
obj-$(CONFIG_APDS9306) += apds9306.o
obj-$(CONFIG_APDS9960) += apds9960.o
obj-$(CONFIG_AS73211) += as73211.o
+obj-$(CONFIG_BH1745) += bh1745.o
obj-$(CONFIG_BH1750) += bh1750.o
obj-$(CONFIG_BH1780) += bh1780.o
obj-$(CONFIG_CM32181) += cm32181.o
diff --git a/drivers/iio/light/adjd_s311.c b/drivers/iio/light/adjd_s311.c
index 5169f12c3eba..c1b43053fbc7 100644
--- a/drivers/iio/light/adjd_s311.c
+++ b/drivers/iio/light/adjd_s311.c
@@ -125,8 +125,7 @@ static irqreturn_t adjd_s311_trigger_handler(int irq, void *p)
if (ret < 0)
goto done;
- for_each_set_bit(i, indio_dev->active_scan_mask,
- indio_dev->masklength) {
+ iio_for_each_active_channel(indio_dev, i) {
ret = i2c_smbus_read_word_data(data->client,
ADJD_S311_DATA_REG(i));
if (ret < 0)
diff --git a/drivers/iio/light/apds9960.c b/drivers/iio/light/apds9960.c
index e9e65130b6f9..3c14e4c30805 100644
--- a/drivers/iio/light/apds9960.c
+++ b/drivers/iio/light/apds9960.c
@@ -146,6 +146,25 @@ struct apds9960_data {
/* gesture buffer */
u8 buffer[4]; /* 4 8-bit channels */
+
+ /* calibration value buffer */
+ int calibbias[5];
+};
+
+enum {
+ APDS9960_CHAN_PROXIMITY,
+ APDS9960_CHAN_GESTURE_UP,
+ APDS9960_CHAN_GESTURE_DOWN,
+ APDS9960_CHAN_GESTURE_LEFT,
+ APDS9960_CHAN_GESTURE_RIGHT,
+};
+
+static const unsigned int apds9960_offset_regs[][2] = {
+ [APDS9960_CHAN_PROXIMITY] = {APDS9960_REG_POFFSET_UR, APDS9960_REG_POFFSET_DL},
+ [APDS9960_CHAN_GESTURE_UP] = {APDS9960_REG_GOFFSET_U, 0},
+ [APDS9960_CHAN_GESTURE_DOWN] = {APDS9960_REG_GOFFSET_D, 0},
+ [APDS9960_CHAN_GESTURE_LEFT] = {APDS9960_REG_GOFFSET_L, 0},
+ [APDS9960_CHAN_GESTURE_RIGHT] = {APDS9960_REG_GOFFSET_R, 0},
};
static const struct reg_default apds9960_reg_defaults[] = {
@@ -255,6 +274,7 @@ static const struct iio_event_spec apds9960_als_event_spec[] = {
#define APDS9960_GESTURE_CHANNEL(_dir, _si) { \
.type = IIO_PROXIMITY, \
+ .info_mask_separate = BIT(IIO_CHAN_INFO_CALIBBIAS), \
.channel = _si + 1, \
.scan_index = _si, \
.indexed = 1, \
@@ -282,7 +302,8 @@ static const struct iio_chan_spec apds9960_channels[] = {
{
.type = IIO_PROXIMITY,
.address = APDS9960_REG_PDATA,
- .info_mask_separate = BIT(IIO_CHAN_INFO_RAW),
+ .info_mask_separate = BIT(IIO_CHAN_INFO_RAW) |
+ BIT(IIO_CHAN_INFO_CALIBBIAS),
.info_mask_shared_by_type = BIT(IIO_CHAN_INFO_SCALE),
.channel = 0,
.indexed = 0,
@@ -316,6 +337,28 @@ static const struct iio_chan_spec apds9960_channels[] = {
APDS9960_INTENSITY_CHANNEL(BLUE),
};
+static int apds9960_set_calibbias(struct apds9960_data *data,
+ struct iio_chan_spec const *chan, int calibbias)
+{
+ int ret, i;
+
+ if (calibbias < S8_MIN || calibbias > S8_MAX)
+ return -EINVAL;
+
+ guard(mutex)(&data->lock);
+ for (i = 0; i < 2; i++) {
+ if (apds9960_offset_regs[chan->channel][i] == 0)
+ break;
+
+ ret = regmap_write(data->regmap, apds9960_offset_regs[chan->channel][i], calibbias);
+ if (ret < 0)
+ return ret;
+ }
+ data->calibbias[chan->channel] = calibbias;
+
+ return 0;
+}
+
/* integration time in us */
static const int apds9960_int_time[][2] = {
{ 28000, 246},
@@ -531,6 +574,12 @@ static int apds9960_read_raw(struct iio_dev *indio_dev,
}
mutex_unlock(&data->lock);
break;
+ case IIO_CHAN_INFO_CALIBBIAS:
+ mutex_lock(&data->lock);
+ *val = data->calibbias[chan->channel];
+ ret = IIO_VAL_INT;
+ mutex_unlock(&data->lock);
+ break;
}
return ret;
@@ -564,6 +613,10 @@ static int apds9960_write_raw(struct iio_dev *indio_dev,
default:
return -EINVAL;
}
+ case IIO_CHAN_INFO_CALIBBIAS:
+ if (val2 != 0)
+ return -EINVAL;
+ return apds9960_set_calibbias(data, chan, val);
default:
return -EINVAL;
}
diff --git a/drivers/iio/light/bh1745.c b/drivers/iio/light/bh1745.c
new file mode 100644
index 000000000000..2e458e9d5d85
--- /dev/null
+++ b/drivers/iio/light/bh1745.c
@@ -0,0 +1,906 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * ROHM BH1745 digital colour sensor driver
+ *
+ * Copyright (C) Mudit Sharma <muditsharma.info@gmail.com>
+ *
+ * 7-bit I2C slave addresses:
+ * 0x38 (ADDR pin low)
+ * 0x39 (ADDR pin high)
+ */
+
+#include <linux/i2c.h>
+#include <linux/mutex.h>
+#include <linux/util_macros.h>
+#include <linux/iio/events.h>
+#include <linux/regmap.h>
+#include <linux/bits.h>
+#include <linux/bitfield.h>
+
+#include <linux/iio/iio.h>
+#include <linux/iio/sysfs.h>
+#include <linux/iio/trigger.h>
+#include <linux/iio/trigger_consumer.h>
+#include <linux/iio/triggered_buffer.h>
+#include <linux/iio/iio-gts-helper.h>
+
+/* BH1745 configuration registers */
+
+/* System control */
+#define BH1745_SYS_CTRL 0x40
+#define BH1745_SYS_CTRL_SW_RESET BIT(7)
+#define BH1745_SYS_CTRL_INTR_RESET BIT(6)
+#define BH1745_SYS_CTRL_PART_ID_MASK GENMASK(5, 0)
+#define BH1745_PART_ID 0x0B
+
+/* Mode control 1 */
+#define BH1745_MODE_CTRL1 0x41
+#define BH1745_CTRL1_MEASUREMENT_TIME_MASK GENMASK(2, 0)
+
+/* Mode control 2 */
+#define BH1745_MODE_CTRL2 0x42
+#define BH1745_CTRL2_RGBC_EN BIT(4)
+#define BH1745_CTRL2_ADC_GAIN_MASK GENMASK(1, 0)
+
+/* Interrupt */
+#define BH1745_INTR 0x60
+#define BH1745_INTR_STATUS BIT(7)
+#define BH1745_INTR_SOURCE_MASK GENMASK(3, 2)
+#define BH1745_INTR_ENABLE BIT(0)
+
+#define BH1745_PERSISTENCE 0x61
+
+/* Threshold high */
+#define BH1745_TH_LSB 0x62
+#define BH1745_TH_MSB 0x63
+
+/* Threshold low */
+#define BH1745_TL_LSB 0x64
+#define BH1745_TL_MSB 0x65
+
+/* BH1745 data output regs */
+#define BH1745_RED_LSB 0x50
+#define BH1745_RED_MSB 0x51
+#define BH1745_GREEN_LSB 0x52
+#define BH1745_GREEN_MSB 0x53
+#define BH1745_BLUE_LSB 0x54
+#define BH1745_BLUE_MSB 0x55
+#define BH1745_CLEAR_LSB 0x56
+#define BH1745_CLEAR_MSB 0x57
+
+#define BH1745_MANU_ID_REG 0x92
+
+/* From 16x max HW gain and 32x max integration time */
+#define BH1745_MAX_GAIN 512
+
+enum bh1745_int_source {
+ BH1745_INTR_SOURCE_RED,
+ BH1745_INTR_SOURCE_GREEN,
+ BH1745_INTR_SOURCE_BLUE,
+ BH1745_INTR_SOURCE_CLEAR,
+};
+
+enum bh1745_gain {
+ BH1745_ADC_GAIN_1X,
+ BH1745_ADC_GAIN_2X,
+ BH1745_ADC_GAIN_16X,
+};
+
+enum bh1745_measurement_time {
+ BH1745_MEASUREMENT_TIME_160MS,
+ BH1745_MEASUREMENT_TIME_320MS,
+ BH1745_MEASUREMENT_TIME_640MS,
+ BH1745_MEASUREMENT_TIME_1280MS,
+ BH1745_MEASUREMENT_TIME_2560MS,
+ BH1745_MEASUREMENT_TIME_5120MS,
+};
+
+enum bh1745_presistence_value {
+ BH1745_PRESISTENCE_UPDATE_TOGGLE,
+ BH1745_PRESISTENCE_UPDATE_EACH_MEASUREMENT,
+ BH1745_PRESISTENCE_UPDATE_FOUR_MEASUREMENT,
+ BH1745_PRESISTENCE_UPDATE_EIGHT_MEASUREMENT,
+};
+
+static const struct iio_gain_sel_pair bh1745_gain[] = {
+ GAIN_SCALE_GAIN(1, BH1745_ADC_GAIN_1X),
+ GAIN_SCALE_GAIN(2, BH1745_ADC_GAIN_2X),
+ GAIN_SCALE_GAIN(16, BH1745_ADC_GAIN_16X),
+};
+
+static const struct iio_itime_sel_mul bh1745_itimes[] = {
+ GAIN_SCALE_ITIME_US(5120000, BH1745_MEASUREMENT_TIME_5120MS, 32),
+ GAIN_SCALE_ITIME_US(2560000, BH1745_MEASUREMENT_TIME_2560MS, 16),
+ GAIN_SCALE_ITIME_US(1280000, BH1745_MEASUREMENT_TIME_1280MS, 8),
+ GAIN_SCALE_ITIME_US(640000, BH1745_MEASUREMENT_TIME_640MS, 4),
+ GAIN_SCALE_ITIME_US(320000, BH1745_MEASUREMENT_TIME_320MS, 2),
+ GAIN_SCALE_ITIME_US(160000, BH1745_MEASUREMENT_TIME_160MS, 1),
+};
+
+struct bh1745_data {
+ /*
+ * Lock to prevent device setting update or read before
+ * related calculations are completed
+ */
+ struct mutex lock;
+ struct regmap *regmap;
+ struct device *dev;
+ struct iio_trigger *trig;
+ struct iio_gts gts;
+};
+
+static const struct regmap_range bh1745_volatile_ranges[] = {
+ regmap_reg_range(BH1745_MODE_CTRL2, BH1745_MODE_CTRL2), /* VALID */
+ regmap_reg_range(BH1745_RED_LSB, BH1745_CLEAR_MSB), /* Data */
+ regmap_reg_range(BH1745_INTR, BH1745_INTR), /* Interrupt */
+};
+
+static const struct regmap_access_table bh1745_volatile_regs = {
+ .yes_ranges = bh1745_volatile_ranges,
+ .n_yes_ranges = ARRAY_SIZE(bh1745_volatile_ranges),
+};
+
+static const struct regmap_range bh1745_readable_ranges[] = {
+ regmap_reg_range(BH1745_SYS_CTRL, BH1745_MODE_CTRL2),
+ regmap_reg_range(BH1745_RED_LSB, BH1745_CLEAR_MSB),
+ regmap_reg_range(BH1745_INTR, BH1745_INTR),
+ regmap_reg_range(BH1745_PERSISTENCE, BH1745_TL_MSB),
+ regmap_reg_range(BH1745_MANU_ID_REG, BH1745_MANU_ID_REG),
+};
+
+static const struct regmap_access_table bh1745_readable_regs = {
+ .yes_ranges = bh1745_readable_ranges,
+ .n_yes_ranges = ARRAY_SIZE(bh1745_readable_ranges),
+};
+
+static const struct regmap_range bh1745_writable_ranges[] = {
+ regmap_reg_range(BH1745_SYS_CTRL, BH1745_MODE_CTRL2),
+ regmap_reg_range(BH1745_INTR, BH1745_INTR),
+ regmap_reg_range(BH1745_PERSISTENCE, BH1745_TL_MSB),
+};
+
+static const struct regmap_access_table bh1745_writable_regs = {
+ .yes_ranges = bh1745_writable_ranges,
+ .n_yes_ranges = ARRAY_SIZE(bh1745_writable_ranges),
+};
+
+static const struct regmap_config bh1745_regmap = {
+ .reg_bits = 8,
+ .val_bits = 8,
+ .max_register = BH1745_MANU_ID_REG,
+ .cache_type = REGCACHE_RBTREE,
+ .volatile_table = &bh1745_volatile_regs,
+ .wr_table = &bh1745_writable_regs,
+ .rd_table = &bh1745_readable_regs,
+};
+
+static const struct iio_event_spec bh1745_event_spec[] = {
+ {
+ .type = IIO_EV_TYPE_THRESH,
+ .dir = IIO_EV_DIR_RISING,
+ .mask_shared_by_type = BIT(IIO_EV_INFO_VALUE),
+ },
+ {
+ .type = IIO_EV_TYPE_THRESH,
+ .dir = IIO_EV_DIR_FALLING,
+ .mask_shared_by_type = BIT(IIO_EV_INFO_VALUE),
+ },
+ {
+ .type = IIO_EV_TYPE_THRESH,
+ .dir = IIO_EV_DIR_EITHER,
+ .mask_shared_by_type = BIT(IIO_EV_INFO_PERIOD),
+ .mask_separate = BIT(IIO_EV_INFO_ENABLE),
+ },
+};
+
+#define BH1745_CHANNEL(_colour, _si, _addr) \
+ { \
+ .type = IIO_INTENSITY, .modified = 1, \
+ .info_mask_separate = BIT(IIO_CHAN_INFO_RAW), \
+ .info_mask_shared_by_all = BIT(IIO_CHAN_INFO_SCALE) | \
+ BIT(IIO_CHAN_INFO_INT_TIME), \
+ .info_mask_shared_by_all_available = \
+ BIT(IIO_CHAN_INFO_SCALE) | \
+ BIT(IIO_CHAN_INFO_INT_TIME), \
+ .event_spec = bh1745_event_spec, \
+ .num_event_specs = ARRAY_SIZE(bh1745_event_spec), \
+ .channel2 = IIO_MOD_LIGHT_##_colour, .address = _addr, \
+ .scan_index = _si, \
+ .scan_type = { \
+ .sign = 'u', \
+ .realbits = 16, \
+ .storagebits = 16, \
+ .endianness = IIO_CPU, \
+ }, \
+ }
+
+static const struct iio_chan_spec bh1745_channels[] = {
+ BH1745_CHANNEL(RED, 0, BH1745_RED_LSB),
+ BH1745_CHANNEL(GREEN, 1, BH1745_GREEN_LSB),
+ BH1745_CHANNEL(BLUE, 2, BH1745_BLUE_LSB),
+ BH1745_CHANNEL(CLEAR, 3, BH1745_CLEAR_LSB),
+ IIO_CHAN_SOFT_TIMESTAMP(4),
+};
+
+static int bh1745_reset(struct bh1745_data *data)
+{
+ return regmap_set_bits(data->regmap, BH1745_SYS_CTRL,
+ BH1745_SYS_CTRL_SW_RESET |
+ BH1745_SYS_CTRL_INTR_RESET);
+}
+
+static int bh1745_power_on(struct bh1745_data *data)
+{
+ return regmap_set_bits(data->regmap, BH1745_MODE_CTRL2,
+ BH1745_CTRL2_RGBC_EN);
+}
+
+static void bh1745_power_off(void *data_ptr)
+{
+ struct bh1745_data *data = data_ptr;
+ struct device *dev = data->dev;
+ int ret;
+
+ ret = regmap_clear_bits(data->regmap, BH1745_MODE_CTRL2,
+ BH1745_CTRL2_RGBC_EN);
+ if (ret)
+ dev_err(dev, "Failed to turn off device\n");
+}
+
+static int bh1745_get_scale(struct bh1745_data *data, int *val, int *val2)
+{
+ int ret;
+ int value;
+ int gain_sel, int_time_sel;
+ int gain;
+ const struct iio_itime_sel_mul *int_time;
+
+ ret = regmap_read(data->regmap, BH1745_MODE_CTRL2, &value);
+ if (ret)
+ return ret;
+
+ gain_sel = FIELD_GET(BH1745_CTRL2_ADC_GAIN_MASK, value);
+ gain = iio_gts_find_gain_by_sel(&data->gts, gain_sel);
+
+ ret = regmap_read(data->regmap, BH1745_MODE_CTRL1, &value);
+ if (ret)
+ return ret;
+
+ int_time_sel = FIELD_GET(BH1745_CTRL1_MEASUREMENT_TIME_MASK, value);
+ int_time = iio_gts_find_itime_by_sel(&data->gts, int_time_sel);
+
+ return iio_gts_get_scale(&data->gts, gain, int_time->time_us, val,
+ val2);
+}
+
+static int bh1745_set_scale(struct bh1745_data *data, int val)
+{
+ struct device *dev = data->dev;
+ int ret;
+ int value;
+ int hw_gain_sel, current_int_time_sel, new_int_time_sel;
+
+ ret = regmap_read(data->regmap, BH1745_MODE_CTRL1, &value);
+ if (ret)
+ return ret;
+
+ current_int_time_sel = FIELD_GET(BH1745_CTRL1_MEASUREMENT_TIME_MASK,
+ value);
+ ret = iio_gts_find_gain_sel_for_scale_using_time(&data->gts,
+ current_int_time_sel,
+ val, 0, &hw_gain_sel);
+ if (ret) {
+ for (int i = 0; i < data->gts.num_itime; i++) {
+ new_int_time_sel = data->gts.itime_table[i].sel;
+
+ if (new_int_time_sel == current_int_time_sel)
+ continue;
+
+ ret = iio_gts_find_gain_sel_for_scale_using_time(&data->gts,
+ new_int_time_sel,
+ val, 0,
+ &hw_gain_sel);
+ if (!ret)
+ break;
+ }
+
+ if (ret) {
+ dev_dbg(dev, "Unsupported scale value requested: %d\n",
+ val);
+ return -EINVAL;
+ }
+
+ ret = regmap_write_bits(data->regmap, BH1745_MODE_CTRL1,
+ BH1745_CTRL1_MEASUREMENT_TIME_MASK,
+ new_int_time_sel);
+ if (ret)
+ return ret;
+ }
+
+ return regmap_write_bits(data->regmap, BH1745_MODE_CTRL2,
+ BH1745_CTRL2_ADC_GAIN_MASK, hw_gain_sel);
+}
+
+static int bh1745_get_int_time(struct bh1745_data *data, int *val)
+{
+ int ret;
+ int value;
+ int int_time, int_time_sel;
+
+ ret = regmap_read(data->regmap, BH1745_MODE_CTRL1, &value);
+ if (ret)
+ return ret;
+
+ int_time_sel = FIELD_GET(BH1745_CTRL1_MEASUREMENT_TIME_MASK, value);
+ int_time = iio_gts_find_int_time_by_sel(&data->gts, int_time_sel);
+ if (int_time < 0)
+ return int_time;
+
+ *val = int_time;
+
+ return 0;
+}
+
+static int bh1745_set_int_time(struct bh1745_data *data, int val, int val2)
+{
+ struct device *dev = data->dev;
+ int ret;
+ int value;
+ int current_int_time, current_hwgain_sel, current_hwgain;
+ int new_hwgain, new_hwgain_sel, new_int_time_sel;
+ int req_int_time = (1000000 * val) + val2;
+
+ if (!iio_gts_valid_time(&data->gts, req_int_time)) {
+ dev_dbg(dev, "Unsupported integration time requested: %d\n",
+ req_int_time);
+ return -EINVAL;
+ }
+
+ ret = bh1745_get_int_time(data, &current_int_time);
+ if (ret)
+ return ret;
+
+ if (current_int_time == req_int_time)
+ return 0;
+
+ ret = regmap_read(data->regmap, BH1745_MODE_CTRL2, &value);
+ if (ret)
+ return ret;
+
+ current_hwgain_sel = FIELD_GET(BH1745_CTRL2_ADC_GAIN_MASK, value);
+ current_hwgain = iio_gts_find_gain_by_sel(&data->gts,
+ current_hwgain_sel);
+ ret = iio_gts_find_new_gain_by_old_gain_time(&data->gts, current_hwgain,
+ current_int_time,
+ req_int_time,
+ &new_hwgain);
+ if (new_hwgain < 0) {
+ dev_dbg(dev, "No corresponding gain for requested integration time\n");
+ return ret;
+ }
+
+ if (ret) {
+ bool in_range;
+
+ new_hwgain = iio_find_closest_gain_low(&data->gts, new_hwgain,
+ &in_range);
+ if (new_hwgain < 0) {
+ new_hwgain = iio_gts_get_min_gain(&data->gts);
+ if (new_hwgain < 0)
+ return ret;
+ }
+
+ if (!in_range)
+ dev_dbg(dev, "Optimal gain out of range\n");
+
+ dev_dbg(dev, "Scale changed, new hw_gain %d\n", new_hwgain);
+ }
+
+ new_hwgain_sel = iio_gts_find_sel_by_gain(&data->gts, new_hwgain);
+ if (new_hwgain_sel < 0)
+ return new_hwgain_sel;
+
+ ret = regmap_write_bits(data->regmap, BH1745_MODE_CTRL2,
+ BH1745_CTRL2_ADC_GAIN_MASK,
+ new_hwgain_sel);
+ if (ret)
+ return ret;
+
+ new_int_time_sel = iio_gts_find_sel_by_int_time(&data->gts,
+ req_int_time);
+ if (new_int_time_sel < 0)
+ return new_int_time_sel;
+
+ return regmap_write_bits(data->regmap, BH1745_MODE_CTRL1,
+ BH1745_CTRL1_MEASUREMENT_TIME_MASK,
+ new_int_time_sel);
+}
+
+static int bh1745_read_raw(struct iio_dev *indio_dev,
+ struct iio_chan_spec const *chan,
+ int *val, int *val2, long mask)
+{
+ struct bh1745_data *data = iio_priv(indio_dev);
+ int ret;
+ int value;
+
+ switch (mask) {
+ case IIO_CHAN_INFO_RAW:
+ iio_device_claim_direct_scoped(return -EBUSY, indio_dev) {
+ ret = regmap_bulk_read(data->regmap, chan->address,
+ &value, 2);
+ if (ret)
+ return ret;
+ *val = value;
+
+ return IIO_VAL_INT;
+ }
+ unreachable();
+
+ case IIO_CHAN_INFO_SCALE: {
+ guard(mutex)(&data->lock);
+ ret = bh1745_get_scale(data, val, val2);
+ if (ret)
+ return ret;
+
+ return IIO_VAL_INT;
+ }
+
+ case IIO_CHAN_INFO_INT_TIME: {
+ guard(mutex)(&data->lock);
+ *val = 0;
+ ret = bh1745_get_int_time(data, val2);
+ if (ret)
+ return 0;
+
+ return IIO_VAL_INT_PLUS_MICRO;
+ }
+
+ default:
+ return -EINVAL;
+ }
+}
+
+static int bh1745_write_raw(struct iio_dev *indio_dev,
+ struct iio_chan_spec const *chan,
+ int val, int val2, long mask)
+{
+ struct bh1745_data *data = iio_priv(indio_dev);
+
+ guard(mutex)(&data->lock);
+ switch (mask) {
+ case IIO_CHAN_INFO_SCALE:
+ return bh1745_set_scale(data, val);
+
+ case IIO_CHAN_INFO_INT_TIME:
+ return bh1745_set_int_time(data, val, val2);
+
+ default:
+ return -EINVAL;
+ }
+}
+
+static int bh1745_write_raw_get_fmt(struct iio_dev *indio_dev,
+ struct iio_chan_spec const *chan,
+ long mask)
+{
+ switch (mask) {
+ case IIO_CHAN_INFO_SCALE:
+ return IIO_VAL_INT;
+
+ case IIO_CHAN_INFO_INT_TIME:
+ return IIO_VAL_INT_PLUS_MICRO;
+
+ default:
+ return -EINVAL;
+ }
+}
+
+static int bh1745_read_thresh(struct iio_dev *indio_dev,
+ const struct iio_chan_spec *chan,
+ enum iio_event_type type,
+ enum iio_event_direction dir,
+ enum iio_event_info info, int *val, int *val2)
+{
+ struct bh1745_data *data = iio_priv(indio_dev);
+ int ret;
+
+ switch (info) {
+ case IIO_EV_INFO_VALUE:
+ switch (dir) {
+ case IIO_EV_DIR_RISING:
+ ret = regmap_bulk_read(data->regmap, BH1745_TH_LSB,
+ val, 2);
+ if (ret)
+ return ret;
+
+ return IIO_VAL_INT;
+
+ case IIO_EV_DIR_FALLING:
+ ret = regmap_bulk_read(data->regmap, BH1745_TL_LSB,
+ val, 2);
+ if (ret)
+ return ret;
+
+ return IIO_VAL_INT;
+
+ default:
+ return -EINVAL;
+ }
+
+ case IIO_EV_INFO_PERIOD:
+ ret = regmap_read(data->regmap, BH1745_PERSISTENCE, val);
+ if (ret)
+ return ret;
+
+ return IIO_VAL_INT;
+
+ default:
+ return -EINVAL;
+ }
+}
+
+static int bh1745_write_thresh(struct iio_dev *indio_dev,
+ const struct iio_chan_spec *chan,
+ enum iio_event_type type,
+ enum iio_event_direction dir,
+ enum iio_event_info info, int val, int val2)
+{
+ struct bh1745_data *data = iio_priv(indio_dev);
+ int ret;
+
+ switch (info) {
+ case IIO_EV_INFO_VALUE:
+ if (val < 0x0 || val > 0xFFFF)
+ return -EINVAL;
+
+ switch (dir) {
+ case IIO_EV_DIR_RISING:
+ ret = regmap_bulk_write(data->regmap, BH1745_TH_LSB,
+ &val, 2);
+ if (ret)
+ return ret;
+
+ return IIO_VAL_INT;
+
+ case IIO_EV_DIR_FALLING:
+ ret = regmap_bulk_write(data->regmap, BH1745_TL_LSB,
+ &val, 2);
+ if (ret)
+ return ret;
+
+ return IIO_VAL_INT;
+
+ default:
+ return -EINVAL;
+ }
+
+ case IIO_EV_INFO_PERIOD:
+ if (val < BH1745_PRESISTENCE_UPDATE_TOGGLE ||
+ val > BH1745_PRESISTENCE_UPDATE_EIGHT_MEASUREMENT)
+ return -EINVAL;
+ ret = regmap_write(data->regmap, BH1745_PERSISTENCE, val);
+ if (ret)
+ return ret;
+
+ return IIO_VAL_INT;
+
+ default:
+ return -EINVAL;
+ }
+}
+
+static int bh1745_read_event_config(struct iio_dev *indio_dev,
+ const struct iio_chan_spec *chan,
+ enum iio_event_type type,
+ enum iio_event_direction dir)
+{
+ struct bh1745_data *data = iio_priv(indio_dev);
+ int ret;
+ int value;
+ int int_src;
+
+ ret = regmap_read(data->regmap, BH1745_INTR, &value);
+ if (ret)
+ return ret;
+
+ if (!FIELD_GET(BH1745_INTR_ENABLE, value))
+ return 0;
+
+ int_src = FIELD_GET(BH1745_INTR_SOURCE_MASK, value);
+
+ switch (chan->channel2) {
+ case IIO_MOD_LIGHT_RED:
+ if (int_src == BH1745_INTR_SOURCE_RED)
+ return 1;
+ return 0;
+
+ case IIO_MOD_LIGHT_GREEN:
+ if (int_src == BH1745_INTR_SOURCE_GREEN)
+ return 1;
+ return 0;
+
+ case IIO_MOD_LIGHT_BLUE:
+ if (int_src == BH1745_INTR_SOURCE_BLUE)
+ return 1;
+ return 0;
+
+ case IIO_MOD_LIGHT_CLEAR:
+ if (int_src == BH1745_INTR_SOURCE_CLEAR)
+ return 1;
+ return 0;
+
+ default:
+ return -EINVAL;
+ }
+}
+
+static int bh1745_write_event_config(struct iio_dev *indio_dev,
+ const struct iio_chan_spec *chan,
+ enum iio_event_type type,
+ enum iio_event_direction dir, int state)
+{
+ struct bh1745_data *data = iio_priv(indio_dev);
+ int value;
+
+ if (state == 0)
+ return regmap_clear_bits(data->regmap,
+ BH1745_INTR, BH1745_INTR_ENABLE);
+
+ if (state == 1) {
+ /* Latch is always enabled when enabling interrupt */
+ value = BH1745_INTR_ENABLE;
+
+ switch (chan->channel2) {
+ case IIO_MOD_LIGHT_RED:
+ return regmap_write(data->regmap, BH1745_INTR,
+ value | FIELD_PREP(BH1745_INTR_SOURCE_MASK,
+ BH1745_INTR_SOURCE_RED));
+
+ case IIO_MOD_LIGHT_GREEN:
+ return regmap_write(data->regmap, BH1745_INTR,
+ value | FIELD_PREP(BH1745_INTR_SOURCE_MASK,
+ BH1745_INTR_SOURCE_GREEN));
+
+ case IIO_MOD_LIGHT_BLUE:
+ return regmap_write(data->regmap, BH1745_INTR,
+ value | FIELD_PREP(BH1745_INTR_SOURCE_MASK,
+ BH1745_INTR_SOURCE_BLUE));
+
+ case IIO_MOD_LIGHT_CLEAR:
+ return regmap_write(data->regmap, BH1745_INTR,
+ value | FIELD_PREP(BH1745_INTR_SOURCE_MASK,
+ BH1745_INTR_SOURCE_CLEAR));
+
+ default:
+ return -EINVAL;
+ }
+ }
+
+ return -EINVAL;
+}
+
+static int bh1745_read_avail(struct iio_dev *indio_dev,
+ struct iio_chan_spec const *chan, const int **vals,
+ int *type, int *length, long mask)
+{
+ struct bh1745_data *data = iio_priv(indio_dev);
+
+ switch (mask) {
+ case IIO_CHAN_INFO_INT_TIME:
+ return iio_gts_avail_times(&data->gts, vals, type, length);
+
+ case IIO_CHAN_INFO_SCALE:
+ return iio_gts_all_avail_scales(&data->gts, vals, type, length);
+
+ default:
+ return -EINVAL;
+ }
+}
+
+static const struct iio_info bh1745_info = {
+ .read_raw = bh1745_read_raw,
+ .write_raw = bh1745_write_raw,
+ .write_raw_get_fmt = bh1745_write_raw_get_fmt,
+ .read_event_value = bh1745_read_thresh,
+ .write_event_value = bh1745_write_thresh,
+ .read_event_config = bh1745_read_event_config,
+ .write_event_config = bh1745_write_event_config,
+ .read_avail = bh1745_read_avail,
+};
+
+static irqreturn_t bh1745_interrupt_handler(int interrupt, void *p)
+{
+ struct iio_dev *indio_dev = p;
+ struct bh1745_data *data = iio_priv(indio_dev);
+ int ret;
+ int value;
+ int int_src;
+
+ ret = regmap_read(data->regmap, BH1745_INTR, &value);
+ if (ret)
+ return IRQ_NONE;
+
+ int_src = FIELD_GET(BH1745_INTR_SOURCE_MASK, value);
+
+ if (value & BH1745_INTR_STATUS) {
+ iio_push_event(indio_dev,
+ IIO_UNMOD_EVENT_CODE(IIO_INTENSITY, int_src,
+ IIO_EV_TYPE_THRESH,
+ IIO_EV_DIR_EITHER),
+ iio_get_time_ns(indio_dev));
+
+ return IRQ_HANDLED;
+ }
+
+ return IRQ_NONE;
+}
+
+static irqreturn_t bh1745_trigger_handler(int interrupt, void *p)
+{
+ struct iio_poll_func *pf = p;
+ struct iio_dev *indio_dev = pf->indio_dev;
+ struct bh1745_data *data = iio_priv(indio_dev);
+ struct {
+ u16 chans[4];
+ s64 timestamp __aligned(8);
+ } scan;
+ u16 value;
+ int ret;
+ int i;
+ int j = 0;
+
+ iio_for_each_active_channel(indio_dev, i) {
+ ret = regmap_bulk_read(data->regmap, BH1745_RED_LSB + 2 * i,
+ &value, 2);
+ if (ret)
+ goto err;
+
+ scan.chans[j++] = value;
+ }
+
+ iio_push_to_buffers_with_timestamp(indio_dev, &scan,
+ iio_get_time_ns(indio_dev));
+
+err:
+ iio_trigger_notify_done(indio_dev->trig);
+
+ return IRQ_HANDLED;
+}
+
+static int bh1745_setup_triggered_buffer(struct iio_dev *indio_dev,
+ struct device *parent,
+ int irq)
+{
+ struct bh1745_data *data = iio_priv(indio_dev);
+ struct device *dev = data->dev;
+ int ret;
+
+ ret = devm_iio_triggered_buffer_setup(parent, indio_dev, NULL,
+ bh1745_trigger_handler, NULL);
+ if (ret)
+ return dev_err_probe(dev, ret,
+ "Triggered buffer setup failed\n");
+
+ if (irq) {
+ ret = devm_request_threaded_irq(dev, irq, NULL,
+ bh1745_interrupt_handler,
+ IRQF_ONESHOT,
+ "bh1745_interrupt", indio_dev);
+ if (ret)
+ return dev_err_probe(dev, ret,
+ "Request for IRQ failed\n");
+ }
+
+ return 0;
+}
+
+static int bh1745_init(struct bh1745_data *data)
+{
+ int ret;
+ struct device *dev = data->dev;
+
+ mutex_init(&data->lock);
+
+ ret = devm_iio_init_iio_gts(dev, BH1745_MAX_GAIN, 0, bh1745_gain,
+ ARRAY_SIZE(bh1745_gain), bh1745_itimes,
+ ARRAY_SIZE(bh1745_itimes), &data->gts);
+ if (ret)
+ return ret;
+
+ ret = bh1745_reset(data);
+ if (ret)
+ return dev_err_probe(dev, ret, "Failed to reset sensor\n");
+
+ ret = bh1745_power_on(data);
+ if (ret)
+ return dev_err_probe(dev, ret, "Failed to turn on sensor\n");
+
+ ret = devm_add_action_or_reset(dev, bh1745_power_off, data);
+ if (ret)
+ return dev_err_probe(dev, ret,
+ "Failed to add action or reset\n");
+
+ return 0;
+}
+
+static int bh1745_probe(struct i2c_client *client)
+{
+ int ret;
+ int value;
+ int part_id;
+ struct bh1745_data *data;
+ struct iio_dev *indio_dev;
+ struct device *dev = &client->dev;
+
+ indio_dev = devm_iio_device_alloc(dev, sizeof(*data));
+ if (!indio_dev)
+ return -ENOMEM;
+
+ indio_dev->info = &bh1745_info;
+ indio_dev->name = "bh1745";
+ indio_dev->channels = bh1745_channels;
+ indio_dev->modes = INDIO_DIRECT_MODE;
+ indio_dev->num_channels = ARRAY_SIZE(bh1745_channels);
+ data = iio_priv(indio_dev);
+ data->dev = &client->dev;
+ data->regmap = devm_regmap_init_i2c(client, &bh1745_regmap);
+ if (IS_ERR(data->regmap))
+ return dev_err_probe(dev, PTR_ERR(data->regmap),
+ "Failed to initialize Regmap\n");
+
+ ret = regmap_read(data->regmap, BH1745_SYS_CTRL, &value);
+ if (ret)
+ return ret;
+
+ part_id = FIELD_GET(BH1745_SYS_CTRL_PART_ID_MASK, value);
+ if (part_id != BH1745_PART_ID)
+ dev_warn(dev, "Unknown part ID 0x%x\n", part_id);
+
+ ret = devm_regulator_get_enable(dev, "vdd");
+ if (ret)
+ return dev_err_probe(dev, ret,
+ "Failed to get and enable regulator\n");
+
+ ret = bh1745_init(data);
+ if (ret)
+ return ret;
+
+ ret = bh1745_setup_triggered_buffer(indio_dev, indio_dev->dev.parent,
+ client->irq);
+ if (ret)
+ return ret;
+
+ ret = devm_iio_device_register(dev, indio_dev);
+ if (ret)
+ return dev_err_probe(dev, ret, "Failed to register device\n");
+
+ return 0;
+}
+
+static const struct i2c_device_id bh1745_idtable[] = {
+ { "bh1745" },
+ { }
+};
+MODULE_DEVICE_TABLE(i2c, bh1745_idtable);
+
+static const struct of_device_id bh1745_of_match[] = {
+ { .compatible = "rohm,bh1745" },
+ { }
+};
+MODULE_DEVICE_TABLE(of, bh1745_of_match);
+
+static struct i2c_driver bh1745_driver = {
+ .driver = {
+ .name = "bh1745",
+ .of_match_table = bh1745_of_match,
+ },
+ .probe = bh1745_probe,
+ .id_table = bh1745_idtable,
+};
+module_i2c_driver(bh1745_driver);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Mudit Sharma <muditsharma.info@gmail.com>");
+MODULE_DESCRIPTION("BH1745 colour sensor driver");
+MODULE_IMPORT_NS(IIO_GTS_HELPER);
diff --git a/drivers/iio/light/gp2ap002.c b/drivers/iio/light/gp2ap002.c
index 7125e011a38a..f8b1d7dd6f5f 100644
--- a/drivers/iio/light/gp2ap002.c
+++ b/drivers/iio/light/gp2ap002.c
@@ -420,7 +420,7 @@ static int gp2ap002_regmap_i2c_write(void *context, unsigned int reg,
return i2c_smbus_write_byte_data(i2c, reg, val);
}
-static struct regmap_bus gp2ap002_regmap_bus = {
+static const struct regmap_bus gp2ap002_regmap_bus = {
.reg_read = gp2ap002_regmap_i2c_read,
.reg_write = gp2ap002_regmap_i2c_write,
};
diff --git a/drivers/iio/light/gp2ap020a00f.c b/drivers/iio/light/gp2ap020a00f.c
index 757383456da6..b3f87dded040 100644
--- a/drivers/iio/light/gp2ap020a00f.c
+++ b/drivers/iio/light/gp2ap020a00f.c
@@ -965,8 +965,7 @@ static irqreturn_t gp2ap020a00f_trigger_handler(int irq, void *data)
size_t d_size = 0;
int i, out_val, ret;
- for_each_set_bit(i, indio_dev->active_scan_mask,
- indio_dev->masklength) {
+ iio_for_each_active_channel(indio_dev, i) {
ret = regmap_bulk_read(priv->regmap,
GP2AP020A00F_DATA_REG(i),
&priv->buffer[d_size], 2);
@@ -1397,8 +1396,7 @@ static int gp2ap020a00f_buffer_postenable(struct iio_dev *indio_dev)
* two separate IIO channels they are treated in the driver logic
* as if they were controlled independently.
*/
- for_each_set_bit(i, indio_dev->active_scan_mask,
- indio_dev->masklength) {
+ iio_for_each_active_channel(indio_dev, i) {
switch (i) {
case GP2AP020A00F_SCAN_MODE_LIGHT_CLEAR:
err = gp2ap020a00f_exec_cmd(data,
@@ -1435,8 +1433,7 @@ static int gp2ap020a00f_buffer_predisable(struct iio_dev *indio_dev)
mutex_lock(&data->lock);
- for_each_set_bit(i, indio_dev->active_scan_mask,
- indio_dev->masklength) {
+ iio_for_each_active_channel(indio_dev, i) {
switch (i) {
case GP2AP020A00F_SCAN_MODE_LIGHT_CLEAR:
err = gp2ap020a00f_exec_cmd(data,
diff --git a/drivers/iio/light/isl29125.c b/drivers/iio/light/isl29125.c
index 59329546df58..b176bf4c884b 100644
--- a/drivers/iio/light/isl29125.c
+++ b/drivers/iio/light/isl29125.c
@@ -181,8 +181,7 @@ static irqreturn_t isl29125_trigger_handler(int irq, void *p)
struct isl29125_data *data = iio_priv(indio_dev);
int i, j = 0;
- for_each_set_bit(i, indio_dev->active_scan_mask,
- indio_dev->masklength) {
+ iio_for_each_active_channel(indio_dev, i) {
int ret = i2c_smbus_read_word_data(data->client,
isl29125_regs[i].data);
if (ret < 0)
diff --git a/drivers/iio/light/ltr390.c b/drivers/iio/light/ltr390.c
index fff1e899097d..7e58b50f3660 100644
--- a/drivers/iio/light/ltr390.c
+++ b/drivers/iio/light/ltr390.c
@@ -23,20 +23,30 @@
#include <linux/module.h>
#include <linux/mutex.h>
#include <linux/regmap.h>
+#include <linux/bitfield.h>
#include <linux/iio/iio.h>
#include <asm/unaligned.h>
-#define LTR390_MAIN_CTRL 0x00
-#define LTR390_PART_ID 0x06
-#define LTR390_UVS_DATA 0x10
+#define LTR390_MAIN_CTRL 0x00
+#define LTR390_ALS_UVS_MEAS_RATE 0x04
+#define LTR390_ALS_UVS_GAIN 0x05
+#define LTR390_PART_ID 0x06
+#define LTR390_ALS_DATA 0x0D
+#define LTR390_UVS_DATA 0x10
+#define LTR390_INT_CFG 0x19
+
+#define LTR390_PART_NUMBER_ID 0xb
+#define LTR390_ALS_UVS_GAIN_MASK 0x07
+#define LTR390_ALS_UVS_INT_TIME_MASK 0x70
+#define LTR390_ALS_UVS_INT_TIME(x) FIELD_PREP(LTR390_ALS_UVS_INT_TIME_MASK, (x))
#define LTR390_SW_RESET BIT(4)
#define LTR390_UVS_MODE BIT(3)
#define LTR390_SENSOR_ENABLE BIT(1)
-#define LTR390_PART_NUMBER_ID 0xb
+#define LTR390_FRACTIONAL_PRECISION 100
/*
* At 20-bit resolution (integration time: 400ms) and 18x gain, 2300 counts of
@@ -55,11 +65,19 @@
*/
#define LTR390_WINDOW_FACTOR 1
+enum ltr390_mode {
+ LTR390_SET_ALS_MODE,
+ LTR390_SET_UVS_MODE,
+};
+
struct ltr390_data {
struct regmap *regmap;
struct i2c_client *client;
/* Protects device from simulataneous reads */
struct mutex lock;
+ enum ltr390_mode mode;
+ int gain;
+ int int_time_us;
};
static const struct regmap_config ltr390_regmap_config = {
@@ -75,8 +93,6 @@ static int ltr390_register_read(struct ltr390_data *data, u8 register_address)
int ret;
u8 recieve_buffer[3];
- guard(mutex)(&data->lock);
-
ret = regmap_bulk_read(data->regmap, register_address, recieve_buffer,
sizeof(recieve_buffer));
if (ret) {
@@ -87,6 +103,38 @@ static int ltr390_register_read(struct ltr390_data *data, u8 register_address)
return get_unaligned_le24(recieve_buffer);
}
+static int ltr390_set_mode(struct ltr390_data *data, enum ltr390_mode mode)
+{
+ int ret;
+
+ if (data->mode == mode)
+ return 0;
+
+ switch (mode) {
+ case LTR390_SET_ALS_MODE:
+ ret = regmap_clear_bits(data->regmap, LTR390_MAIN_CTRL, LTR390_UVS_MODE);
+ break;
+
+ case LTR390_SET_UVS_MODE:
+ ret = regmap_set_bits(data->regmap, LTR390_MAIN_CTRL, LTR390_UVS_MODE);
+ break;
+ }
+
+ if (ret)
+ return ret;
+
+ data->mode = mode;
+ return 0;
+}
+
+static int ltr390_counts_per_uvi(struct ltr390_data *data)
+{
+ const int orig_gain = 18;
+ const int orig_int_time = 400;
+
+ return DIV_ROUND_CLOSEST(23 * data->gain * data->int_time_us, 10 * orig_gain * orig_int_time);
+}
+
static int ltr390_read_raw(struct iio_dev *iio_device,
struct iio_chan_spec const *chan, int *val,
int *val2, long mask)
@@ -94,29 +142,174 @@ static int ltr390_read_raw(struct iio_dev *iio_device,
int ret;
struct ltr390_data *data = iio_priv(iio_device);
+ guard(mutex)(&data->lock);
switch (mask) {
case IIO_CHAN_INFO_RAW:
- ret = ltr390_register_read(data, LTR390_UVS_DATA);
- if (ret < 0)
- return ret;
+ switch (chan->type) {
+ case IIO_UVINDEX:
+ ret = ltr390_set_mode(data, LTR390_SET_UVS_MODE);
+ if (ret < 0)
+ return ret;
+
+ ret = ltr390_register_read(data, LTR390_UVS_DATA);
+ if (ret < 0)
+ return ret;
+ break;
+
+ case IIO_LIGHT:
+ ret = ltr390_set_mode(data, LTR390_SET_ALS_MODE);
+ if (ret < 0)
+ return ret;
+
+ ret = ltr390_register_read(data, LTR390_ALS_DATA);
+ if (ret < 0)
+ return ret;
+ break;
+
+ default:
+ return -EINVAL;
+ }
*val = ret;
return IIO_VAL_INT;
case IIO_CHAN_INFO_SCALE:
- *val = LTR390_WINDOW_FACTOR;
- *val2 = LTR390_COUNTS_PER_UVI;
- return IIO_VAL_FRACTIONAL;
+ switch (chan->type) {
+ case IIO_UVINDEX:
+ *val = LTR390_WINDOW_FACTOR * LTR390_FRACTIONAL_PRECISION;
+ *val2 = ltr390_counts_per_uvi(data);
+ return IIO_VAL_FRACTIONAL;
+
+ case IIO_LIGHT:
+ *val = LTR390_WINDOW_FACTOR * 6 * 100;
+ *val2 = data->gain * data->int_time_us;
+ return IIO_VAL_FRACTIONAL;
+
+ default:
+ return -EINVAL;
+ }
+
+ case IIO_CHAN_INFO_INT_TIME:
+ *val = data->int_time_us;
+ return IIO_VAL_INT;
+
default:
return -EINVAL;
}
}
-static const struct iio_info ltr390_info = {
- .read_raw = ltr390_read_raw,
+/* integration time in us */
+static const int ltr390_int_time_map_us[] = { 400000, 200000, 100000, 50000, 25000, 12500 };
+static const int ltr390_gain_map[] = { 1, 3, 6, 9, 18 };
+
+static const struct iio_chan_spec ltr390_channels[] = {
+ /* UV sensor */
+ {
+ .type = IIO_UVINDEX,
+ .scan_index = 0,
+ .info_mask_separate = BIT(IIO_CHAN_INFO_RAW) | BIT(IIO_CHAN_INFO_SCALE),
+ .info_mask_shared_by_all = BIT(IIO_CHAN_INFO_INT_TIME),
+ .info_mask_shared_by_all_available = BIT(IIO_CHAN_INFO_INT_TIME) | BIT(IIO_CHAN_INFO_SCALE)
+ },
+ /* ALS sensor */
+ {
+ .type = IIO_LIGHT,
+ .scan_index = 1,
+ .info_mask_separate = BIT(IIO_CHAN_INFO_RAW) | BIT(IIO_CHAN_INFO_SCALE),
+ .info_mask_shared_by_all = BIT(IIO_CHAN_INFO_INT_TIME),
+ .info_mask_shared_by_all_available = BIT(IIO_CHAN_INFO_INT_TIME) | BIT(IIO_CHAN_INFO_SCALE)
+ },
};
-static const struct iio_chan_spec ltr390_channel = {
- .type = IIO_UVINDEX,
- .info_mask_separate = BIT(IIO_CHAN_INFO_RAW) | BIT(IIO_CHAN_INFO_SCALE)
+static int ltr390_set_gain(struct ltr390_data *data, int val)
+{
+ int ret, idx;
+
+ for (idx = 0; idx < ARRAY_SIZE(ltr390_gain_map); idx++) {
+ if (ltr390_gain_map[idx] != val)
+ continue;
+
+ guard(mutex)(&data->lock);
+ ret = regmap_update_bits(data->regmap,
+ LTR390_ALS_UVS_GAIN,
+ LTR390_ALS_UVS_GAIN_MASK, idx);
+ if (ret)
+ return ret;
+
+ data->gain = ltr390_gain_map[idx];
+ return 0;
+ }
+
+ return -EINVAL;
+}
+
+static int ltr390_set_int_time(struct ltr390_data *data, int val)
+{
+ int ret, idx;
+
+ for (idx = 0; idx < ARRAY_SIZE(ltr390_int_time_map_us); idx++) {
+ if (ltr390_int_time_map_us[idx] != val)
+ continue;
+
+ guard(mutex)(&data->lock);
+ ret = regmap_update_bits(data->regmap,
+ LTR390_ALS_UVS_MEAS_RATE,
+ LTR390_ALS_UVS_INT_TIME_MASK,
+ LTR390_ALS_UVS_INT_TIME(idx));
+ if (ret)
+ return ret;
+
+ data->int_time_us = ltr390_int_time_map_us[idx];
+ return 0;
+ }
+
+ return -EINVAL;
+}
+
+static int ltr390_read_avail(struct iio_dev *indio_dev, struct iio_chan_spec const *chan,
+ const int **vals, int *type, int *length, long mask)
+{
+ switch (mask) {
+ case IIO_CHAN_INFO_SCALE:
+ *length = ARRAY_SIZE(ltr390_gain_map);
+ *type = IIO_VAL_INT;
+ *vals = ltr390_gain_map;
+ return IIO_AVAIL_LIST;
+ case IIO_CHAN_INFO_INT_TIME:
+ *length = ARRAY_SIZE(ltr390_int_time_map_us);
+ *type = IIO_VAL_INT;
+ *vals = ltr390_int_time_map_us;
+ return IIO_AVAIL_LIST;
+ default:
+ return -EINVAL;
+ }
+}
+
+static int ltr390_write_raw(struct iio_dev *indio_dev, struct iio_chan_spec const *chan,
+ int val, int val2, long mask)
+{
+ struct ltr390_data *data = iio_priv(indio_dev);
+
+ switch (mask) {
+ case IIO_CHAN_INFO_SCALE:
+ if (val2 != 0)
+ return -EINVAL;
+
+ return ltr390_set_gain(data, val);
+
+ case IIO_CHAN_INFO_INT_TIME:
+ if (val2 != 0)
+ return -EINVAL;
+
+ return ltr390_set_int_time(data, val);
+
+ default:
+ return -EINVAL;
+ }
+}
+
+static const struct iio_info ltr390_info = {
+ .read_raw = ltr390_read_raw,
+ .write_raw = ltr390_write_raw,
+ .read_avail = ltr390_read_avail,
};
static int ltr390_probe(struct i2c_client *client)
@@ -139,11 +332,18 @@ static int ltr390_probe(struct i2c_client *client)
"regmap initialization failed\n");
data->client = client;
+ /* default value of integration time from pg: 15 of the datasheet */
+ data->int_time_us = 100000;
+ /* default value of gain from pg: 16 of the datasheet */
+ data->gain = 3;
+ /* default mode for ltr390 is ALS mode */
+ data->mode = LTR390_SET_ALS_MODE;
+
mutex_init(&data->lock);
indio_dev->info = &ltr390_info;
- indio_dev->channels = &ltr390_channel;
- indio_dev->num_channels = 1;
+ indio_dev->channels = ltr390_channels;
+ indio_dev->num_channels = ARRAY_SIZE(ltr390_channels);
indio_dev->name = "ltr390";
ret = regmap_read(data->regmap, LTR390_PART_ID, &part_number);
@@ -161,8 +361,7 @@ static int ltr390_probe(struct i2c_client *client)
/* Wait for the registers to reset before proceeding */
usleep_range(1000, 2000);
- ret = regmap_set_bits(data->regmap, LTR390_MAIN_CTRL,
- LTR390_SENSOR_ENABLE | LTR390_UVS_MODE);
+ ret = regmap_set_bits(data->regmap, LTR390_MAIN_CTRL, LTR390_SENSOR_ENABLE);
if (ret)
return dev_err_probe(dev, ret, "failed to enable the sensor\n");
diff --git a/drivers/iio/light/ltrf216a.c b/drivers/iio/light/ltrf216a.c
index 68dc48420a88..bc8444516689 100644
--- a/drivers/iio/light/ltrf216a.c
+++ b/drivers/iio/light/ltrf216a.c
@@ -68,6 +68,13 @@ static const int ltrf216a_int_time_reg[][2] = {
{ 25, 0x40 },
};
+struct ltr_chip_info {
+ /* Chip contains CLEAR_DATA_0/1/2 registers at offset 0xa..0xc */
+ bool has_clear_data;
+ /* Lux calculation multiplier for ALS data */
+ int lux_multiplier;
+};
+
/*
* Window Factor is needed when the device is under Window glass
* with coated tinted ink. This is to compensate for the light loss
@@ -79,6 +86,7 @@ static const int ltrf216a_int_time_reg[][2] = {
struct ltrf216a_data {
struct regmap *regmap;
struct i2c_client *client;
+ const struct ltr_chip_info *info;
u32 int_time;
u16 int_time_fac;
u8 als_gain_fac;
@@ -246,7 +254,7 @@ static int ltrf216a_get_lux(struct ltrf216a_data *data)
ltrf216a_set_power_state(data, false);
- lux = greendata * 45 * LTRF216A_WIN_FAC;
+ lux = greendata * data->info->lux_multiplier * LTRF216A_WIN_FAC;
return lux;
}
@@ -334,15 +342,15 @@ static const struct iio_info ltrf216a_info = {
static bool ltrf216a_readable_reg(struct device *dev, unsigned int reg)
{
+ struct iio_dev *indio_dev = i2c_get_clientdata(to_i2c_client(dev));
+ struct ltrf216a_data *data = iio_priv(indio_dev);
+
switch (reg) {
case LTRF216A_MAIN_CTRL:
case LTRF216A_ALS_MEAS_RES:
case LTRF216A_ALS_GAIN:
case LTRF216A_PART_ID:
case LTRF216A_MAIN_STATUS:
- case LTRF216A_ALS_CLEAR_DATA_0:
- case LTRF216A_ALS_CLEAR_DATA_1:
- case LTRF216A_ALS_CLEAR_DATA_2:
case LTRF216A_ALS_DATA_0:
case LTRF216A_ALS_DATA_1:
case LTRF216A_ALS_DATA_2:
@@ -355,6 +363,10 @@ static bool ltrf216a_readable_reg(struct device *dev, unsigned int reg)
case LTRF216A_ALS_THRES_LOW_1:
case LTRF216A_ALS_THRES_LOW_2:
return true;
+ case LTRF216A_ALS_CLEAR_DATA_0:
+ case LTRF216A_ALS_CLEAR_DATA_1:
+ case LTRF216A_ALS_CLEAR_DATA_2:
+ return data->info->has_clear_data;
default:
return false;
}
@@ -382,15 +394,23 @@ static bool ltrf216a_writable_reg(struct device *dev, unsigned int reg)
static bool ltrf216a_volatile_reg(struct device *dev, unsigned int reg)
{
+ struct iio_dev *indio_dev = i2c_get_clientdata(to_i2c_client(dev));
+ struct ltrf216a_data *data = iio_priv(indio_dev);
+
switch (reg) {
case LTRF216A_MAIN_STATUS:
- case LTRF216A_ALS_CLEAR_DATA_0:
- case LTRF216A_ALS_CLEAR_DATA_1:
- case LTRF216A_ALS_CLEAR_DATA_2:
case LTRF216A_ALS_DATA_0:
case LTRF216A_ALS_DATA_1:
case LTRF216A_ALS_DATA_2:
return true;
+ /*
+ * If these registers are not present on a chip (like LTR-308),
+ * the missing registers are not considered volatile.
+ */
+ case LTRF216A_ALS_CLEAR_DATA_0:
+ case LTRF216A_ALS_CLEAR_DATA_1:
+ case LTRF216A_ALS_CLEAR_DATA_2:
+ return data->info->has_clear_data;
default:
return false;
}
@@ -433,6 +453,7 @@ static int ltrf216a_probe(struct i2c_client *client)
i2c_set_clientdata(client, indio_dev);
data->client = client;
+ data->info = i2c_get_match_data(client);
mutex_init(&data->lock);
@@ -520,15 +541,27 @@ cache_only:
static DEFINE_RUNTIME_DEV_PM_OPS(ltrf216a_pm_ops, ltrf216a_runtime_suspend,
ltrf216a_runtime_resume, NULL);
+static const struct ltr_chip_info ltr308_chip_info = {
+ .has_clear_data = false,
+ .lux_multiplier = 60,
+};
+
+static const struct ltr_chip_info ltrf216a_chip_info = {
+ .has_clear_data = true,
+ .lux_multiplier = 45,
+};
+
static const struct i2c_device_id ltrf216a_id[] = {
- { "ltrf216a" },
+ { "ltr308", .driver_data = (kernel_ulong_t)&ltr308_chip_info },
+ { "ltrf216a", .driver_data = (kernel_ulong_t)&ltrf216a_chip_info },
{}
};
MODULE_DEVICE_TABLE(i2c, ltrf216a_id);
static const struct of_device_id ltrf216a_of_match[] = {
- { .compatible = "liteon,ltrf216a" },
- { .compatible = "ltr,ltrf216a" },
+ { .compatible = "liteon,ltr308", .data = &ltr308_chip_info },
+ { .compatible = "liteon,ltrf216a", .data = &ltrf216a_chip_info },
+ { .compatible = "ltr,ltrf216a", .data = &ltrf216a_chip_info },
{}
};
MODULE_DEVICE_TABLE(of, ltrf216a_of_match);
diff --git a/drivers/iio/light/noa1305.c b/drivers/iio/light/noa1305.c
index 596cc48c4c34..25f63da70297 100644
--- a/drivers/iio/light/noa1305.c
+++ b/drivers/iio/light/noa1305.c
@@ -29,6 +29,7 @@
#define NOA1305_INTEGR_TIME_25MS 0x05
#define NOA1305_INTEGR_TIME_12_5MS 0x06
#define NOA1305_INTEGR_TIME_6_25MS 0x07
+#define NOA1305_INTEGR_TIME_MASK 0x07
#define NOA1305_REG_INT_SELECT 0x3
#define NOA1305_INT_SEL_ACTIVE_HIGH 0x01
#define NOA1305_INT_SEL_ACTIVE_LOW 0x02
@@ -43,12 +44,34 @@
#define NOA1305_DEVICE_ID 0x0519
#define NOA1305_DRIVER_NAME "noa1305"
+static int noa1305_scale_available[] = {
+ 100, 8 * 77, /* 800 ms */
+ 100, 4 * 77, /* 400 ms */
+ 100, 2 * 77, /* 200 ms */
+ 100, 1 * 77, /* 100 ms */
+ 1000, 5 * 77, /* 50 ms */
+ 10000, 25 * 77, /* 25 ms */
+ 100000, 125 * 77, /* 12.5 ms */
+ 1000000, 625 * 77, /* 6.25 ms */
+};
+
+static int noa1305_int_time_available[] = {
+ 0, 800000, /* 800 ms */
+ 0, 400000, /* 400 ms */
+ 0, 200000, /* 200 ms */
+ 0, 100000, /* 100 ms */
+ 0, 50000, /* 50 ms */
+ 0, 25000, /* 25 ms */
+ 0, 12500, /* 12.5 ms */
+ 0, 6250, /* 6.25 ms */
+};
+
struct noa1305_priv {
struct i2c_client *client;
struct regmap *regmap;
};
-static int noa1305_measure(struct noa1305_priv *priv)
+static int noa1305_measure(struct noa1305_priv *priv, int *val)
{
__le16 data;
int ret;
@@ -58,7 +81,9 @@ static int noa1305_measure(struct noa1305_priv *priv)
if (ret < 0)
return ret;
- return le16_to_cpu(data);
+ *val = le16_to_cpu(data);
+
+ return IIO_VAL_INT;
}
static int noa1305_scale(struct noa1305_priv *priv, int *val, int *val2)
@@ -76,91 +101,113 @@ static int noa1305_scale(struct noa1305_priv *priv, int *val, int *val2)
* Integration Constant = 7.7
* Integration Time in Seconds
*/
- switch (data) {
- case NOA1305_INTEGR_TIME_800MS:
- *val = 100;
- *val2 = 77 * 8;
- break;
- case NOA1305_INTEGR_TIME_400MS:
- *val = 100;
- *val2 = 77 * 4;
- break;
- case NOA1305_INTEGR_TIME_200MS:
- *val = 100;
- *val2 = 77 * 2;
- break;
- case NOA1305_INTEGR_TIME_100MS:
- *val = 100;
- *val2 = 77;
- break;
- case NOA1305_INTEGR_TIME_50MS:
- *val = 1000;
- *val2 = 77 * 5;
- break;
- case NOA1305_INTEGR_TIME_25MS:
- *val = 10000;
- *val2 = 77 * 25;
- break;
- case NOA1305_INTEGR_TIME_12_5MS:
- *val = 100000;
- *val2 = 77 * 125;
- break;
- case NOA1305_INTEGR_TIME_6_25MS:
- *val = 1000000;
- *val2 = 77 * 625;
- break;
- default:
- return -EINVAL;
- }
+ data &= NOA1305_INTEGR_TIME_MASK;
+ *val = noa1305_scale_available[2 * data + 0];
+ *val2 = noa1305_scale_available[2 * data + 1];
return IIO_VAL_FRACTIONAL;
}
+static int noa1305_int_time(struct noa1305_priv *priv, int *val, int *val2)
+{
+ int data;
+ int ret;
+
+ ret = regmap_read(priv->regmap, NOA1305_REG_INTEGRATION_TIME, &data);
+ if (ret < 0)
+ return ret;
+
+ data &= NOA1305_INTEGR_TIME_MASK;
+ *val = noa1305_int_time_available[2 * data + 0];
+ *val2 = noa1305_int_time_available[2 * data + 1];
+
+ return IIO_VAL_INT_PLUS_MICRO;
+}
+
static const struct iio_chan_spec noa1305_channels[] = {
{
.type = IIO_LIGHT,
.info_mask_separate = BIT(IIO_CHAN_INFO_RAW),
.info_mask_shared_by_type = BIT(IIO_CHAN_INFO_SCALE),
+ .info_mask_shared_by_type_available = BIT(IIO_CHAN_INFO_SCALE),
+ .info_mask_shared_by_all = BIT(IIO_CHAN_INFO_INT_TIME),
+ .info_mask_shared_by_all_available = BIT(IIO_CHAN_INFO_INT_TIME),
}
};
+static int noa1305_read_avail(struct iio_dev *indio_dev,
+ struct iio_chan_spec const *chan,
+ const int **vals, int *type,
+ int *length, long mask)
+{
+ if (chan->type != IIO_LIGHT)
+ return -EINVAL;
+
+ switch (mask) {
+ case IIO_CHAN_INFO_SCALE:
+ *vals = noa1305_scale_available;
+ *length = ARRAY_SIZE(noa1305_scale_available);
+ *type = IIO_VAL_FRACTIONAL;
+ return IIO_AVAIL_LIST;
+ case IIO_CHAN_INFO_INT_TIME:
+ *vals = noa1305_int_time_available;
+ *length = ARRAY_SIZE(noa1305_int_time_available);
+ *type = IIO_VAL_INT_PLUS_MICRO;
+ return IIO_AVAIL_LIST;
+ default:
+ return -EINVAL;
+ }
+}
+
static int noa1305_read_raw(struct iio_dev *indio_dev,
- struct iio_chan_spec const *chan,
- int *val, int *val2, long mask)
+ struct iio_chan_spec const *chan,
+ int *val, int *val2, long mask)
{
- int ret = -EINVAL;
struct noa1305_priv *priv = iio_priv(indio_dev);
+ if (chan->type != IIO_LIGHT)
+ return -EINVAL;
+
switch (mask) {
case IIO_CHAN_INFO_RAW:
- switch (chan->type) {
- case IIO_LIGHT:
- ret = noa1305_measure(priv);
- if (ret < 0)
- return ret;
- *val = ret;
- return IIO_VAL_INT;
- default:
- break;
- }
- break;
+ return noa1305_measure(priv, val);
case IIO_CHAN_INFO_SCALE:
- switch (chan->type) {
- case IIO_LIGHT:
- return noa1305_scale(priv, val, val2);
- default:
- break;
- }
- break;
+ return noa1305_scale(priv, val, val2);
+ case IIO_CHAN_INFO_INT_TIME:
+ return noa1305_int_time(priv, val, val2);
default:
- break;
+ return -EINVAL;
}
+}
- return ret;
+static int noa1305_write_raw(struct iio_dev *indio_dev,
+ struct iio_chan_spec const *chan,
+ int val, int val2, long mask)
+{
+ struct noa1305_priv *priv = iio_priv(indio_dev);
+ int i;
+
+ if (chan->type != IIO_LIGHT)
+ return -EINVAL;
+
+ if (mask != IIO_CHAN_INFO_INT_TIME)
+ return -EINVAL;
+
+ if (val) /* >= 1s integration time not supported */
+ return -EINVAL;
+
+ /* Look up integration time register settings and write it if found. */
+ for (i = 0; i < ARRAY_SIZE(noa1305_int_time_available) / 2; i++)
+ if (noa1305_int_time_available[2 * i + 1] == val2)
+ return regmap_write(priv->regmap, NOA1305_REG_INTEGRATION_TIME, i);
+
+ return -EINVAL;
}
static const struct iio_info noa1305_info = {
+ .read_avail = noa1305_read_avail,
.read_raw = noa1305_read_raw,
+ .write_raw = noa1305_write_raw,
};
static bool noa1305_writable_reg(struct device *dev, unsigned int reg)
diff --git a/drivers/iio/light/rohm-bu27034.c b/drivers/iio/light/rohm-bu27034.c
index 4937bf6fa046..76711c3cdf7c 100644
--- a/drivers/iio/light/rohm-bu27034.c
+++ b/drivers/iio/light/rohm-bu27034.c
@@ -1,9 +1,8 @@
// SPDX-License-Identifier: GPL-2.0-only
/*
- * BU27034 ROHM Ambient Light Sensor
+ * BU27034ANUC ROHM Ambient Light Sensor
*
* Copyright (c) 2023, ROHM Semiconductor.
- * https://fscdn.rohm.com/en/products/databook/datasheet/ic/sensor/light/bu27034nuc-e.pdf
*/
#include <linux/bitfield.h>
@@ -30,17 +29,15 @@
#define BU27034_REG_MODE_CONTROL2 0x42
#define BU27034_MASK_D01_GAIN GENMASK(7, 3)
-#define BU27034_MASK_D2_GAIN_HI GENMASK(7, 6)
-#define BU27034_MASK_D2_GAIN_LO GENMASK(2, 0)
#define BU27034_REG_MODE_CONTROL3 0x43
#define BU27034_REG_MODE_CONTROL4 0x44
#define BU27034_MASK_MEAS_EN BIT(0)
#define BU27034_MASK_VALID BIT(7)
+#define BU27034_NUM_HW_DATA_CHANS 2
#define BU27034_REG_DATA0_LO 0x50
#define BU27034_REG_DATA1_LO 0x52
-#define BU27034_REG_DATA2_LO 0x54
-#define BU27034_REG_DATA2_HI 0x55
+#define BU27034_REG_DATA1_HI 0x53
#define BU27034_REG_MANUFACTURER_ID 0x92
#define BU27034_REG_MAX BU27034_REG_MANUFACTURER_ID
@@ -88,58 +85,48 @@ enum {
BU27034_CHAN_ALS,
BU27034_CHAN_DATA0,
BU27034_CHAN_DATA1,
- BU27034_CHAN_DATA2,
BU27034_NUM_CHANS
};
static const unsigned long bu27034_scan_masks[] = {
- GENMASK(BU27034_CHAN_DATA2, BU27034_CHAN_ALS), 0
+ GENMASK(BU27034_CHAN_DATA1, BU27034_CHAN_DATA0),
+ GENMASK(BU27034_CHAN_DATA1, BU27034_CHAN_ALS), 0
};
/*
- * Available scales with gain 1x - 4096x, timings 55, 100, 200, 400 mS
+ * Available scales with gain 1x - 1024x, timings 55, 100, 200, 400 mS
* Time impacts to gain: 1x, 2x, 4x, 8x.
*
- * => Max total gain is HWGAIN * gain by integration time (8 * 4096) = 32768
+ * => Max total gain is HWGAIN * gain by integration time (8 * 1024) = 8192
+ * if 1x gain is scale 1, scale for 2x gain is 0.5, 4x => 0.25,
+ * ... 8192x => 0.0001220703125 => 122070.3125 nanos
*
- * Using NANO precision for scale we must use scale 64x corresponding gain 1x
- * to avoid precision loss. (32x would result scale 976 562.5(nanos).
+ * Using NANO precision for scale, we must use scale 16x corresponding gain 1x
+ * to avoid precision loss. (8x would result scale 976 562.5(nanos).
*/
-#define BU27034_SCALE_1X 64
+#define BU27034_SCALE_1X 16
/* See the data sheet for the "Gain Setting" table */
#define BU27034_GSEL_1X 0x00 /* 00000 */
#define BU27034_GSEL_4X 0x08 /* 01000 */
-#define BU27034_GSEL_16X 0x0a /* 01010 */
#define BU27034_GSEL_32X 0x0b /* 01011 */
-#define BU27034_GSEL_64X 0x0c /* 01100 */
#define BU27034_GSEL_256X 0x18 /* 11000 */
#define BU27034_GSEL_512X 0x19 /* 11001 */
#define BU27034_GSEL_1024X 0x1a /* 11010 */
-#define BU27034_GSEL_2048X 0x1b /* 11011 */
-#define BU27034_GSEL_4096X 0x1c /* 11100 */
/* Available gain settings */
static const struct iio_gain_sel_pair bu27034_gains[] = {
GAIN_SCALE_GAIN(1, BU27034_GSEL_1X),
GAIN_SCALE_GAIN(4, BU27034_GSEL_4X),
- GAIN_SCALE_GAIN(16, BU27034_GSEL_16X),
GAIN_SCALE_GAIN(32, BU27034_GSEL_32X),
- GAIN_SCALE_GAIN(64, BU27034_GSEL_64X),
GAIN_SCALE_GAIN(256, BU27034_GSEL_256X),
GAIN_SCALE_GAIN(512, BU27034_GSEL_512X),
GAIN_SCALE_GAIN(1024, BU27034_GSEL_1024X),
- GAIN_SCALE_GAIN(2048, BU27034_GSEL_2048X),
- GAIN_SCALE_GAIN(4096, BU27034_GSEL_4096X),
};
/*
- * The IC has 5 modes for sampling time. 5 mS mode is exceptional as it limits
- * the data collection to data0-channel only and cuts the supported range to
- * 10 bit. It is not supported by the driver.
- *
- * "normal" modes are 55, 100, 200 and 400 mS modes - which do have direct
- * multiplying impact to the register values (similar to gain).
+ * Measurement modes are 55, 100, 200 and 400 mS modes - which do have direct
+ * multiplying impact to the data register values (similar to gain).
*
* This means that if meas-mode is changed for example from 400 => 200,
* the scale is doubled. Eg, time impact to total gain is x1, x2, x4, x8.
@@ -156,13 +143,13 @@ static const struct iio_itime_sel_mul bu27034_itimes[] = {
GAIN_SCALE_ITIME_US(55000, BU27034_MEAS_MODE_55MS, 1),
};
-#define BU27034_CHAN_DATA(_name, _ch2) \
+#define BU27034_CHAN_DATA(_name) \
{ \
.type = IIO_INTENSITY, \
.channel = BU27034_CHAN_##_name, \
- .channel2 = (_ch2), \
.info_mask_separate = BIT(IIO_CHAN_INFO_RAW) | \
- BIT(IIO_CHAN_INFO_SCALE), \
+ BIT(IIO_CHAN_INFO_SCALE) | \
+ BIT(IIO_CHAN_INFO_HARDWAREGAIN), \
.info_mask_separate_available = BIT(IIO_CHAN_INFO_SCALE), \
.info_mask_shared_by_all = BIT(IIO_CHAN_INFO_INT_TIME), \
.info_mask_shared_by_all_available = \
@@ -195,13 +182,12 @@ static const struct iio_chan_spec bu27034_channels[] = {
/*
* The BU27034 DATA0 and DATA1 channels are both on the visible light
* area (mostly). The data0 sensitivity peaks at 500nm, DATA1 at 600nm.
- * These wave lengths are pretty much on the border of colours making
- * these a poor candidates for R/G/B standardization. Hence they're both
- * marked as clear channels
+ * These wave lengths are cyan(ish) and orange(ish), making these
+ * sub-optiomal candidates for R/G/B standardization. Hence the
+ * colour modifier is omitted.
*/
- BU27034_CHAN_DATA(DATA0, IIO_MOD_LIGHT_CLEAR),
- BU27034_CHAN_DATA(DATA1, IIO_MOD_LIGHT_CLEAR),
- BU27034_CHAN_DATA(DATA2, IIO_MOD_LIGHT_IR),
+ BU27034_CHAN_DATA(DATA0),
+ BU27034_CHAN_DATA(DATA1),
IIO_CHAN_SOFT_TIMESTAMP(4),
};
@@ -215,10 +201,10 @@ struct bu27034_data {
struct mutex mutex;
struct iio_gts gts;
struct task_struct *task;
- __le16 raw[3];
+ __le16 raw[BU27034_NUM_HW_DATA_CHANS];
struct {
u32 mlux;
- __le16 channels[3];
+ __le16 channels[BU27034_NUM_HW_DATA_CHANS];
s64 ts __aligned(8);
} scan;
};
@@ -232,7 +218,7 @@ static const struct regmap_range bu27034_volatile_ranges[] = {
.range_max = BU27034_REG_MODE_CONTROL4,
}, {
.range_min = BU27034_REG_DATA0_LO,
- .range_max = BU27034_REG_DATA2_HI,
+ .range_max = BU27034_REG_DATA1_HI,
},
};
@@ -244,7 +230,7 @@ static const struct regmap_access_table bu27034_volatile_regs = {
static const struct regmap_range bu27034_read_only_ranges[] = {
{
.range_min = BU27034_REG_DATA0_LO,
- .range_max = BU27034_REG_DATA2_HI,
+ .range_max = BU27034_REG_DATA1_HI,
}, {
.range_min = BU27034_REG_MANUFACTURER_ID,
.range_max = BU27034_REG_MANUFACTURER_ID,
@@ -273,41 +259,17 @@ struct bu27034_gain_check {
static int bu27034_get_gain_sel(struct bu27034_data *data, int chan)
{
+ int reg[] = {
+ [BU27034_CHAN_DATA0] = BU27034_REG_MODE_CONTROL2,
+ [BU27034_CHAN_DATA1] = BU27034_REG_MODE_CONTROL3,
+ };
int ret, val;
- switch (chan) {
- case BU27034_CHAN_DATA0:
- case BU27034_CHAN_DATA1:
- {
- int reg[] = {
- [BU27034_CHAN_DATA0] = BU27034_REG_MODE_CONTROL2,
- [BU27034_CHAN_DATA1] = BU27034_REG_MODE_CONTROL3,
- };
- ret = regmap_read(data->regmap, reg[chan], &val);
- if (ret)
- return ret;
-
- return FIELD_GET(BU27034_MASK_D01_GAIN, val);
- }
- case BU27034_CHAN_DATA2:
- {
- int d2_lo_bits = fls(BU27034_MASK_D2_GAIN_LO);
-
- ret = regmap_read(data->regmap, BU27034_REG_MODE_CONTROL2, &val);
- if (ret)
- return ret;
+ ret = regmap_read(data->regmap, reg[chan], &val);
+ if (ret)
+ return ret;
- /*
- * The data2 channel gain is composed by 5 non continuous bits
- * [7:6], [2:0]. Thus when we combine the 5-bit 'selector'
- * from register value we must right shift the high bits by 3.
- */
- return FIELD_GET(BU27034_MASK_D2_GAIN_HI, val) << d2_lo_bits |
- FIELD_GET(BU27034_MASK_D2_GAIN_LO, val);
- }
- default:
- return -EINVAL;
- }
+ return FIELD_GET(BU27034_MASK_D01_GAIN, val);
}
static int bu27034_get_gain(struct bu27034_data *data, int chan, int *gain)
@@ -390,44 +352,9 @@ static int bu27034_write_gain_sel(struct bu27034_data *data, int chan, int sel)
};
int mask, val;
- if (chan != BU27034_CHAN_DATA0 && chan != BU27034_CHAN_DATA1)
- return -EINVAL;
-
val = FIELD_PREP(BU27034_MASK_D01_GAIN, sel);
-
mask = BU27034_MASK_D01_GAIN;
- if (chan == BU27034_CHAN_DATA0) {
- /*
- * We keep the same gain for channel 2 as we set for channel 0
- * We can't allow them to be individually controlled because
- * setting one will impact also the other. Also, if we don't
- * always update both gains we may result unsupported bit
- * combinations.
- *
- * This is not nice but this is yet another place where the
- * user space must be prepared to surprizes. Namely, see chan 2
- * gain changed when chan 0 gain is changed.
- *
- * This is not fatal for most users though. I don't expect the
- * channel 2 to be used in any generic cases - the intensity
- * values provided by the sensor for IR area are not openly
- * documented. Also, channel 2 is not used for visible light.
- *
- * So, if there is application which is written to utilize the
- * channel 2 - then it is probably specifically targeted to this
- * sensor and knows how to utilize those values. It is safe to
- * hope such user can also cope with the gain changes.
- */
- mask |= BU27034_MASK_D2_GAIN_LO;
-
- /*
- * The D2 gain bits are directly the lowest bits of selector.
- * Just do add those bits to the value
- */
- val |= sel & BU27034_MASK_D2_GAIN_LO;
- }
-
return regmap_update_bits(data->regmap, reg[chan], mask, val);
}
@@ -435,13 +362,6 @@ static int bu27034_set_gain(struct bu27034_data *data, int chan, int gain)
{
int ret;
- /*
- * We don't allow setting channel 2 gain as it messes up the
- * gain for channel 0 - which shares the high bits
- */
- if (chan != BU27034_CHAN_DATA0 && chan != BU27034_CHAN_DATA1)
- return -EINVAL;
-
ret = iio_gts_find_sel_by_gain(&data->gts, gain);
if (ret < 0)
return ret;
@@ -565,9 +485,6 @@ static int bu27034_set_scale(struct bu27034_data *data, int chan,
int ret, time_sel, gain_sel, i;
bool found = false;
- if (chan == BU27034_CHAN_DATA2)
- return -EINVAL;
-
if (chan == BU27034_CHAN_ALS) {
if (val == 0 && val2 == 1000000)
return 0;
@@ -592,9 +509,7 @@ static int bu27034_set_scale(struct bu27034_data *data, int chan,
/*
* Populate information for the other channel which should also
- * maintain the scale. (Due to the HW limitations the chan2
- * gets the same gain as chan0, so we only need to explicitly
- * set the chan 0 and 1).
+ * maintain the scale.
*/
if (chan == BU27034_CHAN_DATA0)
gain.chan = BU27034_CHAN_DATA1;
@@ -608,7 +523,7 @@ static int bu27034_set_scale(struct bu27034_data *data, int chan,
/*
* Iterate through all the times to see if we find one which
* can support requested scale for requested channel, while
- * maintaining the scale for other channels
+ * maintaining the scale for the other channel
*/
for (i = 0; i < data->gts.num_itime; i++) {
new_time_sel = data->gts.itime_table[i].sel;
@@ -623,7 +538,7 @@ static int bu27034_set_scale(struct bu27034_data *data, int chan,
if (ret)
continue;
- /* Can the other channel(s) maintain scale? */
+ /* Can the other channel maintain scale? */
ret = iio_gts_find_new_gain_sel_by_old_gain_time(
&data->gts, gain.old_gain, time_sel,
new_time_sel, &gain.new_gain);
@@ -635,7 +550,7 @@ static int bu27034_set_scale(struct bu27034_data *data, int chan,
}
if (!found) {
dev_dbg(data->dev,
- "Can't set scale maintaining other channels\n");
+ "Can't set scale maintaining other channel\n");
ret = -EINVAL;
goto unlock_out;
@@ -659,102 +574,21 @@ unlock_out:
}
/*
- * for (D1/D0 < 0.87):
- * lx = 0.004521097 * D1 - 0.002663996 * D0 +
- * 0.00012213 * D1 * D1 / D0
- *
- * => 115.7400832 * ch1 / gain1 / mt -
- * 68.1982976 * ch0 / gain0 / mt +
- * 0.00012213 * 25600 * (ch1 / gain1 / mt) * 25600 *
- * (ch1 /gain1 / mt) / (25600 * ch0 / gain0 / mt)
- *
- * A = 0.00012213 * 25600 * (ch1 /gain1 / mt) * 25600 *
- * (ch1 /gain1 / mt) / (25600 * ch0 / gain0 / mt)
- * => 0.00012213 * 25600 * (ch1 /gain1 / mt) *
- * (ch1 /gain1 / mt) / (ch0 / gain0 / mt)
- * => 0.00012213 * 25600 * (ch1 / gain1) * (ch1 /gain1 / mt) /
- * (ch0 / gain0)
- * => 0.00012213 * 25600 * (ch1 / gain1) * (ch1 /gain1 / mt) *
- * gain0 / ch0
- * => 3.126528 * ch1 * ch1 * gain0 / gain1 / gain1 / mt /ch0
- *
- * lx = (115.7400832 * ch1 / gain1 - 68.1982976 * ch0 / gain0) /
- * mt + A
- * => (115.7400832 * ch1 / gain1 - 68.1982976 * ch0 / gain0) /
- * mt + 3.126528 * ch1 * ch1 * gain0 / gain1 / gain1 / mt /
- * ch0
- *
- * => (115.7400832 * ch1 / gain1 - 68.1982976 * ch0 / gain0 +
- * 3.126528 * ch1 * ch1 * gain0 / gain1 / gain1 / ch0) /
- * mt
- *
- * For (0.87 <= D1/D0 < 1.00)
- * lx = (0.001331* D0 + 0.0000354 * D1) * ((D1/D0 – 0.87) * (0.385) + 1)
- * => (0.001331 * 256 * 100 * ch0 / gain0 / mt + 0.0000354 * 256 *
- * 100 * ch1 / gain1 / mt) * ((D1/D0 - 0.87) * (0.385) + 1)
- * => (34.0736 * ch0 / gain0 / mt + 0.90624 * ch1 / gain1 / mt) *
- * ((D1/D0 - 0.87) * (0.385) + 1)
- * => (34.0736 * ch0 / gain0 / mt + 0.90624 * ch1 / gain1 / mt) *
- * (0.385 * D1/D0 - 0.66505)
- * => (34.0736 * ch0 / gain0 / mt + 0.90624 * ch1 / gain1 / mt) *
- * (0.385 * 256 * 100 * ch1 / gain1 / mt / (256 * 100 * ch0 / gain0 / mt) - 0.66505)
- * => (34.0736 * ch0 / gain0 / mt + 0.90624 * ch1 / gain1 / mt) *
- * (9856 * ch1 / gain1 / mt / (25600 * ch0 / gain0 / mt) + 0.66505)
- * => 13.118336 * ch1 / (gain1 * mt)
- * + 22.66064768 * ch0 / (gain0 * mt)
- * + 8931.90144 * ch1 * ch1 * gain0 /
- * (25600 * ch0 * gain1 * gain1 * mt)
- * + 0.602694912 * ch1 / (gain1 * mt)
- *
- * => [0.3489024 * ch1 * ch1 * gain0 / (ch0 * gain1 * gain1)
- * + 22.66064768 * ch0 / gain0
- * + 13.721030912 * ch1 / gain1
- * ] / mt
- *
- * For (D1/D0 >= 1.00)
+ * for (D1/D0 < 1.5):
+ * lx = (0.001193 * D0 + (-0.0000747) * D1) * ((D1/D0 – 1.5) * (0.25) + 1)
*
- * lx = (0.001331* D0 + 0.0000354 * D1) * ((D1/D0 – 2.0) * (-0.05) + 1)
- * => (0.001331* D0 + 0.0000354 * D1) * (-0.05D1/D0 + 1.1)
- * => (0.001331 * 256 * 100 * ch0 / gain0 / mt + 0.0000354 * 256 *
- * 100 * ch1 / gain1 / mt) * (-0.05D1/D0 + 1.1)
- * => (34.0736 * ch0 / gain0 / mt + 0.90624 * ch1 / gain1 / mt) *
- * (-0.05 * 256 * 100 * ch1 / gain1 / mt / (256 * 100 * ch0 / gain0 / mt) + 1.1)
- * => (34.0736 * ch0 / gain0 / mt + 0.90624 * ch1 / gain1 / mt) *
- * (-1280 * ch1 / (gain1 * mt * 25600 * ch0 / gain0 / mt) + 1.1)
- * => (34.0736 * ch0 * -1280 * ch1 * gain0 * mt /( gain0 * mt * gain1 * mt * 25600 * ch0)
- * + 34.0736 * 1.1 * ch0 / (gain0 * mt)
- * + 0.90624 * ch1 * -1280 * ch1 *gain0 * mt / (gain1 * mt *gain1 * mt * 25600 * ch0)
- * + 1.1 * 0.90624 * ch1 / (gain1 * mt)
- * => -43614.208 * ch1 / (gain1 * mt * 25600)
- * + 37.48096 ch0 / (gain0 * mt)
- * - 1159.9872 * ch1 * ch1 * gain0 / (gain1 * gain1 * mt * 25600 * ch0)
- * + 0.996864 ch1 / (gain1 * mt)
- * => [
- * - 0.045312 * ch1 * ch1 * gain0 / (gain1 * gain1 * ch0)
- * - 0.706816 * ch1 / gain1
- * + 37.48096 ch0 /gain0
- * ] * mt
+ * => -0.000745625 * D0 + 0.0002515625 * D1 + -0.000018675 * D1 * D1 / D0
*
+ * => (6.44 * ch1 / gain1 + 19.088 * ch0 / gain0 -
+ * 0.47808 * ch1 * ch1 * gain0 / gain1 / gain1 / ch0) /
+ * mt
*
- * So, the first case (D1/D0 < 0.87) can be computed to a form:
+ * Else
+ * lx = 0.001193 * D0 - 0.0000747 * D1
*
- * lx = (3.126528 * ch1 * ch1 * gain0 / (ch0 * gain1 * gain1) +
- * 115.7400832 * ch1 / gain1 +
- * -68.1982976 * ch0 / gain0
- * / mt
- *
- * Second case (0.87 <= D1/D0 < 1.00) goes to form:
- *
- * => [0.3489024 * ch1 * ch1 * gain0 / (ch0 * gain1 * gain1) +
- * 13.721030912 * ch1 / gain1 +
- * 22.66064768 * ch0 / gain0
- * ] / mt
- *
- * Third case (D1/D0 >= 1.00) goes to form:
- * => [-0.045312 * ch1 * ch1 * gain0 / (ch0 * gain1 * gain1) +
- * -0.706816 * ch1 / gain1 +
- * 37.48096 ch0 /(gain0
- * ] / mt
+ * => (1.91232 * ch1 / gain1 + 30.5408 * ch0 / gain0 +
+ * [0 * ch1 * ch1 * gain0 / gain1 / gain1 / ch0] ) /
+ * mt
*
* This can be unified to format:
* lx = [
@@ -764,19 +598,14 @@ unlock_out:
* ] / mt
*
* For case 1:
- * A = 3.126528,
- * B = 115.7400832
- * C = -68.1982976
+ * A = -0.47808,
+ * B = 6.44,
+ * C = 19.088
*
* For case 2:
- * A = 0.3489024
- * B = 13.721030912
- * C = 22.66064768
- *
- * For case 3:
- * A = -0.045312
- * B = -0.706816
- * C = 37.48096
+ * A = 0
+ * B = 1.91232
+ * C = 30.5408
*/
struct bu27034_lx_coeff {
@@ -881,21 +710,16 @@ static int bu27034_fixp_calc_lx(unsigned int ch0, unsigned int ch1,
{
static const struct bu27034_lx_coeff coeff[] = {
{
- .A = 31265280, /* 3.126528 */
- .B = 1157400832, /*115.7400832 */
- .C = 681982976, /* -68.1982976 */
- .is_neg = {false, false, true},
+ .A = 4780800, /* -0.47808 */
+ .B = 64400000, /* 6.44 */
+ .C = 190880000, /* 19.088 */
+ .is_neg = { true, false, false },
}, {
- .A = 3489024, /* 0.3489024 */
- .B = 137210309, /* 13.721030912 */
- .C = 226606476, /* 22.66064768 */
+ .A = 0, /* 0 */
+ .B = 19123200, /* 1.91232 */
+ .C = 305408000, /* 30.5408 */
/* All terms positive */
- }, {
- .A = 453120, /* -0.045312 */
- .B = 7068160, /* -0.706816 */
- .C = 374809600, /* 37.48096 */
- .is_neg = {true, true, false},
- }
+ },
};
const struct bu27034_lx_coeff *c = &coeff[coeff_idx];
u64 res = 0, terms[3];
@@ -967,7 +791,6 @@ static int bu27034_read_result(struct bu27034_data *data, int chan, int *res)
int reg[] = {
[BU27034_CHAN_DATA0] = BU27034_REG_DATA0_LO,
[BU27034_CHAN_DATA1] = BU27034_REG_DATA1_LO,
- [BU27034_CHAN_DATA2] = BU27034_REG_DATA2_LO,
};
int valid, ret;
__le16 val;
@@ -1034,7 +857,7 @@ static int bu27034_get_single_result(struct bu27034_data *data, int chan,
{
int ret;
- if (chan < BU27034_CHAN_DATA0 || chan > BU27034_CHAN_DATA2)
+ if (chan < BU27034_CHAN_DATA0 || chan > BU27034_CHAN_DATA1)
return -EINVAL;
ret = bu27034_meas_set(data, true);
@@ -1059,12 +882,10 @@ static int bu27034_get_single_result(struct bu27034_data *data, int chan,
* D1 = data1/ch1_gain/meas_time_ms * 25600
*
* Then:
- * if (D1/D0 < 0.87)
- * lx = (0.001331 * D0 + 0.0000354 * D1) * ((D1 / D0 - 0.87) * 3.45 + 1)
- * else if (D1/D0 < 1)
- * lx = (0.001331 * D0 + 0.0000354 * D1) * ((D1 / D0 - 0.87) * 0.385 + 1)
- * else
- * lx = (0.001331 * D0 + 0.0000354 * D1) * ((D1 / D0 - 2) * -0.05 + 1)
+ * If (D1/D0 < 1.5)
+ * lx = (0.001193 * D0 + (-0.0000747) * D1) * ((D1 / D0 – 1.5) * 0.25 + 1)
+ * Else
+ * lx = (0.001193 * D0 + (-0.0000747) * D1)
*
* We use it here. Users who have for example some colored lens
* need to modify the calculation but I hope this gives a starting point for
@@ -1115,12 +936,10 @@ static int bu27034_calc_mlux(struct bu27034_data *data, __le16 *res, int *val)
d1_d0_ratio_scaled /= ch0 * gain1;
}
- if (d1_d0_ratio_scaled < 87)
+ if (d1_d0_ratio_scaled < 150)
ret = bu27034_fixp_calc_lx(ch0, ch1, gain0, gain1, meastime, 0);
- else if (d1_d0_ratio_scaled < 100)
- ret = bu27034_fixp_calc_lx(ch0, ch1, gain0, gain1, meastime, 1);
else
- ret = bu27034_fixp_calc_lx(ch0, ch1, gain0, gain1, meastime, 2);
+ ret = bu27034_fixp_calc_lx(ch0, ch1, gain0, gain1, meastime, 1);
if (ret < 0)
return ret;
@@ -1133,7 +952,7 @@ static int bu27034_calc_mlux(struct bu27034_data *data, __le16 *res, int *val)
static int bu27034_get_mlux(struct bu27034_data *data, int chan, int *val)
{
- __le16 res[3];
+ __le16 res[BU27034_NUM_HW_DATA_CHANS];
int ret;
ret = bu27034_meas_set(data, true);
@@ -1171,6 +990,13 @@ static int bu27034_read_raw(struct iio_dev *idev,
return IIO_VAL_INT_PLUS_MICRO;
+ case IIO_CHAN_INFO_HARDWAREGAIN:
+ ret = bu27034_get_gain(data, chan->channel, val);
+ if (ret)
+ return ret;
+
+ return IIO_VAL_INT;
+
case IIO_CHAN_INFO_SCALE:
return bu27034_get_scale(data, chan->channel, val, val2);
@@ -1215,12 +1041,17 @@ static int bu27034_write_raw_get_fmt(struct iio_dev *indio_dev,
struct iio_chan_spec const *chan,
long mask)
{
+ struct bu27034_data *data = iio_priv(indio_dev);
switch (mask) {
case IIO_CHAN_INFO_SCALE:
return IIO_VAL_INT_PLUS_NANO;
case IIO_CHAN_INFO_INT_TIME:
return IIO_VAL_INT_PLUS_MICRO;
+ case IIO_CHAN_INFO_HARDWAREGAIN:
+ dev_dbg(data->dev,
+ "HARDWAREGAIN is read-only, use scale to set\n");
+ return -EINVAL;
default:
return -EINVAL;
}
@@ -1501,7 +1332,7 @@ static int bu27034_probe(struct i2c_client *i2c)
}
static const struct of_device_id bu27034_of_match[] = {
- { .compatible = "rohm,bu27034" },
+ { .compatible = "rohm,bu27034anuc" },
{ }
};
MODULE_DEVICE_TABLE(of, bu27034_of_match);
diff --git a/drivers/iio/light/si1145.c b/drivers/iio/light/si1145.c
index 77666b780a5c..66abda021696 100644
--- a/drivers/iio/light/si1145.c
+++ b/drivers/iio/light/si1145.c
@@ -465,11 +465,10 @@ static irqreturn_t si1145_trigger_handler(int irq, void *private)
goto done;
}
- for_each_set_bit(i, indio_dev->active_scan_mask,
- indio_dev->masklength) {
+ iio_for_each_active_channel(indio_dev, i) {
int run = 1;
- while (i + run < indio_dev->masklength) {
+ while (i + run < iio_get_masklength(indio_dev)) {
if (!test_bit(i + run, indio_dev->active_scan_mask))
break;
if (indio_dev->channels[i + run].address !=
@@ -514,7 +513,7 @@ static int si1145_set_chlist(struct iio_dev *indio_dev, unsigned long scan_mask)
if (data->scan_mask == scan_mask)
return 0;
- for_each_set_bit(i, &scan_mask, indio_dev->masklength) {
+ for_each_set_bit(i, &scan_mask, iio_get_masklength(indio_dev)) {
switch (indio_dev->channels[i].address) {
case SI1145_REG_ALSVIS_DATA:
reg |= SI1145_CHLIST_EN_ALSVIS;
diff --git a/drivers/iio/light/stk3310.c b/drivers/iio/light/stk3310.c
index e3470d6743ef..ed20b6714546 100644
--- a/drivers/iio/light/stk3310.c
+++ b/drivers/iio/light/stk3310.c
@@ -35,6 +35,7 @@
#define STK3310_STATE_EN_ALS BIT(1)
#define STK3310_STATE_STANDBY 0x00
+#define STK3013_CHIP_ID_VAL 0x31
#define STK3310_CHIP_ID_VAL 0x13
#define STK3311_CHIP_ID_VAL 0x1D
#define STK3311A_CHIP_ID_VAL 0x15
@@ -84,6 +85,7 @@ static const struct reg_field stk3310_reg_field_flag_nf =
REG_FIELD(STK3310_REG_FLAG, 0, 0);
static const u8 stk3310_chip_ids[] = {
+ STK3013_CHIP_ID_VAL,
STK3310_CHIP_ID_VAL,
STK3311A_CHIP_ID_VAL,
STK3311S34_CHIP_ID_VAL,
@@ -496,7 +498,7 @@ static int stk3310_init(struct iio_dev *indio_dev)
ret = stk3310_check_chip_id(chipid);
if (ret < 0)
- dev_warn(&client->dev, "unknown chip id: 0x%x\n", chipid);
+ dev_info(&client->dev, "new unknown chip id: 0x%x\n", chipid);
state = STK3310_STATE_EN_ALS | STK3310_STATE_EN_PS;
ret = stk3310_set_state(data, state);
@@ -700,6 +702,7 @@ static DEFINE_SIMPLE_DEV_PM_OPS(stk3310_pm_ops, stk3310_suspend,
stk3310_resume);
static const struct i2c_device_id stk3310_i2c_id[] = {
+ { "STK3013" },
{ "STK3310" },
{ "STK3311" },
{ "STK3335" },
@@ -708,6 +711,7 @@ static const struct i2c_device_id stk3310_i2c_id[] = {
MODULE_DEVICE_TABLE(i2c, stk3310_i2c_id);
static const struct acpi_device_id stk3310_acpi_id[] = {
+ {"STK3013", 0},
{"STK3310", 0},
{"STK3311", 0},
{}
@@ -716,6 +720,7 @@ static const struct acpi_device_id stk3310_acpi_id[] = {
MODULE_DEVICE_TABLE(acpi, stk3310_acpi_id);
static const struct of_device_id stk3310_of_match[] = {
+ { .compatible = "sensortek,stk3013", },
{ .compatible = "sensortek,stk3310", },
{ .compatible = "sensortek,stk3311", },
{ .compatible = "sensortek,stk3335", },
diff --git a/drivers/iio/light/tcs3414.c b/drivers/iio/light/tcs3414.c
index c9566615b964..4fecdf10aeb1 100644
--- a/drivers/iio/light/tcs3414.c
+++ b/drivers/iio/light/tcs3414.c
@@ -206,8 +206,7 @@ static irqreturn_t tcs3414_trigger_handler(int irq, void *p)
struct tcs3414_data *data = iio_priv(indio_dev);
int i, j = 0;
- for_each_set_bit(i, indio_dev->active_scan_mask,
- indio_dev->masklength) {
+ iio_for_each_active_channel(indio_dev, i) {
int ret = i2c_smbus_read_word_data(data->client,
TCS3414_DATA_GREEN + 2*i);
if (ret < 0)
diff --git a/drivers/iio/light/tcs3472.c b/drivers/iio/light/tcs3472.c
index 89384dba83dd..04452b4664f3 100644
--- a/drivers/iio/light/tcs3472.c
+++ b/drivers/iio/light/tcs3472.c
@@ -383,8 +383,7 @@ static irqreturn_t tcs3472_trigger_handler(int irq, void *p)
if (ret < 0)
goto done;
- for_each_set_bit(i, indio_dev->active_scan_mask,
- indio_dev->masklength) {
+ iio_for_each_active_channel(indio_dev, i) {
ret = i2c_smbus_read_word_data(data->client,
TCS3472_CDATA + 2*i);
if (ret < 0)
diff --git a/drivers/iio/magnetometer/Kconfig b/drivers/iio/magnetometer/Kconfig
index cd2917d71904..8eb718f5e50f 100644
--- a/drivers/iio/magnetometer/Kconfig
+++ b/drivers/iio/magnetometer/Kconfig
@@ -39,7 +39,7 @@ config AK8975
select IIO_TRIGGERED_BUFFER
help
Say yes here to build support for Asahi Kasei AK8975, AK8963,
- AK09911, AK09912 or AK09916 3-Axis Magnetometer.
+ AK09911, AK09912, AK09916 or AK09918 3-Axis Magnetometer.
To compile this driver as a module, choose M here: the module
will be called ak8975.
diff --git a/drivers/iio/magnetometer/ak8975.c b/drivers/iio/magnetometer/ak8975.c
index dd466c5fa621..18077fb463a9 100644
--- a/drivers/iio/magnetometer/ak8975.c
+++ b/drivers/iio/magnetometer/ak8975.c
@@ -78,6 +78,7 @@
*/
#define AK09912_REG_WIA1 0x00
#define AK09912_REG_WIA2 0x01
+#define AK09918_DEVICE_ID 0x0C
#define AK09916_DEVICE_ID 0x09
#define AK09912_DEVICE_ID 0x04
#define AK09911_DEVICE_ID 0x05
@@ -209,6 +210,7 @@ enum asahi_compass_chipset {
AK09911,
AK09912,
AK09916,
+ AK09918,
};
enum ak_ctrl_reg_addr {
@@ -371,6 +373,34 @@ static const struct ak_def ak_def_array[] = {
AK09912_REG_HXL,
AK09912_REG_HYL,
AK09912_REG_HZL},
+ },
+ [AK09918] = {
+ /* ak09918 is register compatible with ak09912 this is for avoid
+ * unknown id messages.
+ */
+ .type = AK09918,
+ .raw_to_gauss = ak09912_raw_to_gauss,
+ .range = 32752,
+ .ctrl_regs = {
+ AK09912_REG_ST1,
+ AK09912_REG_ST2,
+ AK09912_REG_CNTL2,
+ AK09912_REG_ASAX,
+ AK09912_MAX_REGS},
+ .ctrl_masks = {
+ AK09912_REG_ST1_DRDY_MASK,
+ AK09912_REG_ST2_HOFL_MASK,
+ 0,
+ AK09912_REG_CNTL2_MODE_MASK},
+ .ctrl_modes = {
+ AK09912_REG_CNTL_MODE_POWER_DOWN,
+ AK09912_REG_CNTL_MODE_ONCE,
+ AK09912_REG_CNTL_MODE_SELF_TEST,
+ AK09912_REG_CNTL_MODE_FUSE_ROM},
+ .data_regs = {
+ AK09912_REG_HXL,
+ AK09912_REG_HYL,
+ AK09912_REG_HZL},
}
};
@@ -452,6 +482,7 @@ static int ak8975_who_i_am(struct i2c_client *client,
/*
* Signature for each device:
* Device | WIA1 | WIA2
+ * AK09918 | DEVICE_ID_| AK09918_DEVICE_ID
* AK09916 | DEVICE_ID_| AK09916_DEVICE_ID
* AK09912 | DEVICE_ID | AK09912_DEVICE_ID
* AK09911 | DEVICE_ID | AK09911_DEVICE_ID
@@ -484,10 +515,18 @@ static int ak8975_who_i_am(struct i2c_client *client,
if (wia_val[1] == AK09916_DEVICE_ID)
return 0;
break;
- default:
- dev_err(&client->dev, "Type %d unknown\n", type);
+ case AK09918:
+ if (wia_val[1] == AK09918_DEVICE_ID)
+ return 0;
+ break;
}
- return -ENODEV;
+
+ dev_info(&client->dev, "Device ID %x is unknown.\n", wia_val[1]);
+ /*
+ * Let driver to probe on unknown id for support more register
+ * compatible variants.
+ */
+ return 0;
}
/*
@@ -692,22 +731,8 @@ static int ak8975_start_read_axis(struct ak8975_data *data,
if (ret < 0)
return ret;
- /* This will be executed only for non-interrupt based waiting case */
- if (ret & data->def->ctrl_masks[ST1_DRDY]) {
- ret = i2c_smbus_read_byte_data(client,
- data->def->ctrl_regs[ST2]);
- if (ret < 0) {
- dev_err(&client->dev, "Error in reading ST2\n");
- return ret;
- }
- if (ret & (data->def->ctrl_masks[ST2_DERR] |
- data->def->ctrl_masks[ST2_HOFL])) {
- dev_err(&client->dev, "ST2 status error 0x%x\n", ret);
- return -EINVAL;
- }
- }
-
- return 0;
+ /* Return with zero if the data is ready. */
+ return !data->def->ctrl_regs[ST1_DRDY];
}
/* Retrieve raw flux value for one of the x, y, or z axis. */
@@ -734,6 +759,20 @@ static int ak8975_read_axis(struct iio_dev *indio_dev, int index, int *val)
if (ret < 0)
goto exit;
+ /* Read out ST2 for release lock on measurment data. */
+ ret = i2c_smbus_read_byte_data(client, data->def->ctrl_regs[ST2]);
+ if (ret < 0) {
+ dev_err(&client->dev, "Error in reading ST2\n");
+ goto exit;
+ }
+
+ if (ret & (data->def->ctrl_masks[ST2_DERR] |
+ data->def->ctrl_masks[ST2_HOFL])) {
+ dev_err(&client->dev, "ST2 status error 0x%x\n", ret);
+ ret = -EINVAL;
+ goto exit;
+ }
+
mutex_unlock(&data->lock);
pm_runtime_mark_last_busy(&data->client->dev);
@@ -1067,6 +1106,7 @@ static const struct i2c_device_id ak8975_id[] = {
{"ak09911", (kernel_ulong_t)&ak_def_array[AK09911] },
{"ak09912", (kernel_ulong_t)&ak_def_array[AK09912] },
{"ak09916", (kernel_ulong_t)&ak_def_array[AK09916] },
+ {"ak09918", (kernel_ulong_t)&ak_def_array[AK09918] },
{}
};
MODULE_DEVICE_TABLE(i2c, ak8975_id);
@@ -1081,7 +1121,7 @@ static const struct of_device_id ak8975_of_match[] = {
{ .compatible = "asahi-kasei,ak09912", .data = &ak_def_array[AK09912] },
{ .compatible = "ak09912", .data = &ak_def_array[AK09912] },
{ .compatible = "asahi-kasei,ak09916", .data = &ak_def_array[AK09916] },
- { .compatible = "ak09916", .data = &ak_def_array[AK09916] },
+ { .compatible = "asahi-kasei,ak09918", .data = &ak_def_array[AK09918] },
{}
};
MODULE_DEVICE_TABLE(of, ak8975_of_match);
diff --git a/drivers/iio/magnetometer/rm3100-core.c b/drivers/iio/magnetometer/rm3100-core.c
index 42b70cd42b39..0e03a772fa43 100644
--- a/drivers/iio/magnetometer/rm3100-core.c
+++ b/drivers/iio/magnetometer/rm3100-core.c
@@ -464,7 +464,7 @@ static irqreturn_t rm3100_trigger_handler(int irq, void *p)
struct iio_poll_func *pf = p;
struct iio_dev *indio_dev = pf->indio_dev;
unsigned long scan_mask = *indio_dev->active_scan_mask;
- unsigned int mask_len = indio_dev->masklength;
+ unsigned int mask_len = iio_get_masklength(indio_dev);
struct rm3100_data *data = iio_priv(indio_dev);
struct regmap *regmap = data->regmap;
int ret, i, bit;
diff --git a/drivers/iio/pressure/Kconfig b/drivers/iio/pressure/Kconfig
index 3ad38506028e..ce369dbb17fc 100644
--- a/drivers/iio/pressure/Kconfig
+++ b/drivers/iio/pressure/Kconfig
@@ -31,6 +31,8 @@ config BMP280
select REGMAP
select BMP280_I2C if (I2C)
select BMP280_SPI if (SPI_MASTER)
+ select IIO_BUFFER
+ select IIO_TRIGGERED_BUFFER
help
Say yes here to build support for Bosch Sensortec BMP180, BMP280, BMP380
and BMP580 pressure and temperature sensors. Also supports the BME280 with
@@ -248,6 +250,15 @@ config MS5637
This driver can also be built as a module. If so, the module will
be called ms5637.
+config SDP500
+ tristate "Sensirion SDP500 differential pressure sensor I2C driver"
+ depends on I2C
+ help
+ Say Y here to build support for Sensirion SDP500 differential pressure
+ sensor I2C driver.
+ To compile this driver as a module, choose M here: the core module
+ will be called sdp500.
+
config IIO_ST_PRESS
tristate "STMicroelectronics pressure sensor Driver"
depends on (I2C || SPI_MASTER) && SYSFS
diff --git a/drivers/iio/pressure/Makefile b/drivers/iio/pressure/Makefile
index a93709e35760..6482288e07ee 100644
--- a/drivers/iio/pressure/Makefile
+++ b/drivers/iio/pressure/Makefile
@@ -30,6 +30,7 @@ obj-$(CONFIG_MS5611) += ms5611_core.o
obj-$(CONFIG_MS5611_I2C) += ms5611_i2c.o
obj-$(CONFIG_MS5611_SPI) += ms5611_spi.o
obj-$(CONFIG_MS5637) += ms5637.o
+obj-$(CONFIG_SDP500) += sdp500.o
obj-$(CONFIG_IIO_ST_PRESS) += st_pressure.o
st_pressure-y := st_pressure_core.o
st_pressure-$(CONFIG_IIO_BUFFER) += st_pressure_buffer.o
diff --git a/drivers/iio/pressure/bmp280-core.c b/drivers/iio/pressure/bmp280-core.c
index 49081b729618..da379230c837 100644
--- a/drivers/iio/pressure/bmp280-core.c
+++ b/drivers/iio/pressure/bmp280-core.c
@@ -41,7 +41,10 @@
#include <linux/regmap.h>
#include <linux/regulator/consumer.h>
+#include <linux/iio/buffer.h>
#include <linux/iio/iio.h>
+#include <linux/iio/trigger_consumer.h>
+#include <linux/iio/triggered_buffer.h>
#include <asm/unaligned.h>
@@ -134,46 +137,169 @@ enum {
BMP380_P11 = 20,
};
+enum bmp280_scan {
+ BMP280_PRESS,
+ BMP280_TEMP,
+ BME280_HUMID,
+};
+
static const struct iio_chan_spec bmp280_channels[] = {
{
.type = IIO_PRESSURE,
+ /* PROCESSED maintained for ABI backwards compatibility */
+ .info_mask_separate = BIT(IIO_CHAN_INFO_PROCESSED) |
+ BIT(IIO_CHAN_INFO_RAW) |
+ BIT(IIO_CHAN_INFO_SCALE) |
+ BIT(IIO_CHAN_INFO_OVERSAMPLING_RATIO),
+ .scan_index = 0,
+ .scan_type = {
+ .sign = 'u',
+ .realbits = 32,
+ .storagebits = 32,
+ .endianness = IIO_CPU,
+ },
+ },
+ {
+ .type = IIO_TEMP,
+ /* PROCESSED maintained for ABI backwards compatibility */
+ .info_mask_separate = BIT(IIO_CHAN_INFO_PROCESSED) |
+ BIT(IIO_CHAN_INFO_RAW) |
+ BIT(IIO_CHAN_INFO_SCALE) |
+ BIT(IIO_CHAN_INFO_OVERSAMPLING_RATIO),
+ .scan_index = 1,
+ .scan_type = {
+ .sign = 's',
+ .realbits = 32,
+ .storagebits = 32,
+ .endianness = IIO_CPU,
+ },
+ },
+ IIO_CHAN_SOFT_TIMESTAMP(2),
+};
+
+static const struct iio_chan_spec bme280_channels[] = {
+ {
+ .type = IIO_PRESSURE,
+ /* PROCESSED maintained for ABI backwards compatibility */
.info_mask_separate = BIT(IIO_CHAN_INFO_PROCESSED) |
+ BIT(IIO_CHAN_INFO_RAW) |
+ BIT(IIO_CHAN_INFO_SCALE) |
BIT(IIO_CHAN_INFO_OVERSAMPLING_RATIO),
+ .scan_index = 0,
+ .scan_type = {
+ .sign = 'u',
+ .realbits = 32,
+ .storagebits = 32,
+ .endianness = IIO_CPU,
+ },
},
{
.type = IIO_TEMP,
+ /* PROCESSED maintained for ABI backwards compatibility */
.info_mask_separate = BIT(IIO_CHAN_INFO_PROCESSED) |
+ BIT(IIO_CHAN_INFO_RAW) |
+ BIT(IIO_CHAN_INFO_SCALE) |
BIT(IIO_CHAN_INFO_OVERSAMPLING_RATIO),
+ .scan_index = 1,
+ .scan_type = {
+ .sign = 's',
+ .realbits = 32,
+ .storagebits = 32,
+ .endianness = IIO_CPU,
+ },
},
{
.type = IIO_HUMIDITYRELATIVE,
+ /* PROCESSED maintained for ABI backwards compatibility */
.info_mask_separate = BIT(IIO_CHAN_INFO_PROCESSED) |
+ BIT(IIO_CHAN_INFO_RAW) |
+ BIT(IIO_CHAN_INFO_SCALE) |
BIT(IIO_CHAN_INFO_OVERSAMPLING_RATIO),
+ .scan_index = 2,
+ .scan_type = {
+ .sign = 'u',
+ .realbits = 32,
+ .storagebits = 32,
+ .endianness = IIO_CPU,
+ },
},
+ IIO_CHAN_SOFT_TIMESTAMP(3),
};
static const struct iio_chan_spec bmp380_channels[] = {
{
.type = IIO_PRESSURE,
+ /* PROCESSED maintained for ABI backwards compatibility */
.info_mask_separate = BIT(IIO_CHAN_INFO_PROCESSED) |
+ BIT(IIO_CHAN_INFO_RAW) |
+ BIT(IIO_CHAN_INFO_SCALE) |
BIT(IIO_CHAN_INFO_OVERSAMPLING_RATIO),
.info_mask_shared_by_all = BIT(IIO_CHAN_INFO_SAMP_FREQ) |
BIT(IIO_CHAN_INFO_LOW_PASS_FILTER_3DB_FREQUENCY),
+ .scan_index = 0,
+ .scan_type = {
+ .sign = 'u',
+ .realbits = 32,
+ .storagebits = 32,
+ .endianness = IIO_CPU,
+ },
},
{
.type = IIO_TEMP,
+ /* PROCESSED maintained for ABI backwards compatibility */
.info_mask_separate = BIT(IIO_CHAN_INFO_PROCESSED) |
+ BIT(IIO_CHAN_INFO_RAW) |
+ BIT(IIO_CHAN_INFO_SCALE) |
BIT(IIO_CHAN_INFO_OVERSAMPLING_RATIO),
.info_mask_shared_by_all = BIT(IIO_CHAN_INFO_SAMP_FREQ) |
BIT(IIO_CHAN_INFO_LOW_PASS_FILTER_3DB_FREQUENCY),
+ .scan_index = 1,
+ .scan_type = {
+ .sign = 's',
+ .realbits = 32,
+ .storagebits = 32,
+ .endianness = IIO_CPU,
+ },
},
+ IIO_CHAN_SOFT_TIMESTAMP(2),
+};
+
+static const struct iio_chan_spec bmp580_channels[] = {
{
- .type = IIO_HUMIDITYRELATIVE,
+ .type = IIO_PRESSURE,
+ /* PROCESSED maintained for ABI backwards compatibility */
+ .info_mask_separate = BIT(IIO_CHAN_INFO_PROCESSED) |
+ BIT(IIO_CHAN_INFO_RAW) |
+ BIT(IIO_CHAN_INFO_SCALE) |
+ BIT(IIO_CHAN_INFO_OVERSAMPLING_RATIO),
+ .info_mask_shared_by_all = BIT(IIO_CHAN_INFO_SAMP_FREQ) |
+ BIT(IIO_CHAN_INFO_LOW_PASS_FILTER_3DB_FREQUENCY),
+ .scan_index = 0,
+ .scan_type = {
+ .sign = 'u',
+ .realbits = 24,
+ .storagebits = 32,
+ .endianness = IIO_LE,
+ },
+ },
+ {
+ .type = IIO_TEMP,
+ /* PROCESSED maintained for ABI backwards compatibility */
.info_mask_separate = BIT(IIO_CHAN_INFO_PROCESSED) |
+ BIT(IIO_CHAN_INFO_RAW) |
+ BIT(IIO_CHAN_INFO_SCALE) |
BIT(IIO_CHAN_INFO_OVERSAMPLING_RATIO),
.info_mask_shared_by_all = BIT(IIO_CHAN_INFO_SAMP_FREQ) |
BIT(IIO_CHAN_INFO_LOW_PASS_FILTER_3DB_FREQUENCY),
+ .scan_index = 1,
+ .scan_type = {
+ .sign = 's',
+ .realbits = 24,
+ .storagebits = 32,
+ .endianness = IIO_LE,
+ },
},
+ IIO_CHAN_SOFT_TIMESTAMP(2),
};
static int bmp280_read_calib(struct bmp280_data *data)
@@ -289,7 +415,7 @@ static int bme280_read_humid_adc(struct bmp280_data *data, u16 *adc_humidity)
int ret;
ret = regmap_bulk_read(data->regmap, BME280_REG_HUMIDITY_MSB,
- &data->be16, sizeof(data->be16));
+ &data->be16, BME280_NUM_HUMIDITY_BYTES);
if (ret) {
dev_err(data->dev, "failed to read humidity\n");
return ret;
@@ -335,7 +461,7 @@ static int bmp280_read_temp_adc(struct bmp280_data *data, u32 *adc_temp)
int ret;
ret = regmap_bulk_read(data->regmap, BMP280_REG_TEMP_MSB,
- data->buf, sizeof(data->buf));
+ data->buf, BMP280_NUM_TEMP_BYTES);
if (ret) {
dev_err(data->dev, "failed to read temperature\n");
return ret;
@@ -396,7 +522,7 @@ static int bmp280_read_press_adc(struct bmp280_data *data, u32 *adc_press)
int ret;
ret = regmap_bulk_read(data->regmap, BMP280_REG_PRESS_MSB,
- data->buf, sizeof(data->buf));
+ data->buf, BMP280_NUM_PRESS_BYTES);
if (ret) {
dev_err(data->dev, "failed to read pressure\n");
return ret;
@@ -445,10 +571,8 @@ static u32 bmp280_compensate_press(struct bmp280_data *data,
return (u32)p;
}
-static int bmp280_read_temp(struct bmp280_data *data,
- int *val, int *val2)
+static int bmp280_read_temp(struct bmp280_data *data, s32 *comp_temp)
{
- s32 comp_temp;
u32 adc_temp;
int ret;
@@ -456,16 +580,15 @@ static int bmp280_read_temp(struct bmp280_data *data,
if (ret)
return ret;
- comp_temp = bmp280_compensate_temp(data, adc_temp);
+ *comp_temp = bmp280_compensate_temp(data, adc_temp);
- *val = comp_temp * 10;
- return IIO_VAL_INT;
+ return 0;
}
-static int bmp280_read_press(struct bmp280_data *data,
- int *val, int *val2)
+static int bmp280_read_press(struct bmp280_data *data, u32 *comp_press)
{
- u32 comp_press, adc_press, t_fine;
+ u32 adc_press;
+ s32 t_fine;
int ret;
ret = bmp280_get_t_fine(data, &t_fine);
@@ -476,17 +599,13 @@ static int bmp280_read_press(struct bmp280_data *data,
if (ret)
return ret;
- comp_press = bmp280_compensate_press(data, adc_press, t_fine);
+ *comp_press = bmp280_compensate_press(data, adc_press, t_fine);
- *val = comp_press;
- *val2 = 256000;
-
- return IIO_VAL_FRACTIONAL;
+ return 0;
}
-static int bme280_read_humid(struct bmp280_data *data, int *val, int *val2)
+static int bme280_read_humid(struct bmp280_data *data, u32 *comp_humidity)
{
- u32 comp_humidity;
u16 adc_humidity;
s32 t_fine;
int ret;
@@ -499,11 +618,9 @@ static int bme280_read_humid(struct bmp280_data *data, int *val, int *val2)
if (ret)
return ret;
- comp_humidity = bme280_compensate_humidity(data, adc_humidity, t_fine);
+ *comp_humidity = bme280_compensate_humidity(data, adc_humidity, t_fine);
- *val = comp_humidity * 1000 / 1024;
-
- return IIO_VAL_INT;
+ return 0;
}
static int bmp280_read_raw_impl(struct iio_dev *indio_dev,
@@ -511,6 +628,8 @@ static int bmp280_read_raw_impl(struct iio_dev *indio_dev,
int *val, int *val2, long mask)
{
struct bmp280_data *data = iio_priv(indio_dev);
+ int chan_value;
+ int ret;
guard(mutex)(&data->lock);
@@ -518,11 +637,72 @@ static int bmp280_read_raw_impl(struct iio_dev *indio_dev,
case IIO_CHAN_INFO_PROCESSED:
switch (chan->type) {
case IIO_HUMIDITYRELATIVE:
- return data->chip_info->read_humid(data, val, val2);
+ ret = data->chip_info->read_humid(data, &chan_value);
+ if (ret)
+ return ret;
+
+ *val = data->chip_info->humid_coeffs[0] * chan_value;
+ *val2 = data->chip_info->humid_coeffs[1];
+ return data->chip_info->humid_coeffs_type;
+ case IIO_PRESSURE:
+ ret = data->chip_info->read_press(data, &chan_value);
+ if (ret)
+ return ret;
+
+ *val = data->chip_info->press_coeffs[0] * chan_value;
+ *val2 = data->chip_info->press_coeffs[1];
+ return data->chip_info->press_coeffs_type;
+ case IIO_TEMP:
+ ret = data->chip_info->read_temp(data, &chan_value);
+ if (ret)
+ return ret;
+
+ *val = data->chip_info->temp_coeffs[0] * chan_value;
+ *val2 = data->chip_info->temp_coeffs[1];
+ return data->chip_info->temp_coeffs_type;
+ default:
+ return -EINVAL;
+ }
+ case IIO_CHAN_INFO_RAW:
+ switch (chan->type) {
+ case IIO_HUMIDITYRELATIVE:
+ ret = data->chip_info->read_humid(data, &chan_value);
+ if (ret)
+ return ret;
+
+ *val = chan_value;
+ return IIO_VAL_INT;
+ case IIO_PRESSURE:
+ ret = data->chip_info->read_press(data, &chan_value);
+ if (ret)
+ return ret;
+
+ *val = chan_value;
+ return IIO_VAL_INT;
+ case IIO_TEMP:
+ ret = data->chip_info->read_temp(data, &chan_value);
+ if (ret)
+ return ret;
+
+ *val = chan_value;
+ return IIO_VAL_INT;
+ default:
+ return -EINVAL;
+ }
+ case IIO_CHAN_INFO_SCALE:
+ switch (chan->type) {
+ case IIO_HUMIDITYRELATIVE:
+ *val = data->chip_info->humid_coeffs[0];
+ *val2 = data->chip_info->humid_coeffs[1];
+ return data->chip_info->humid_coeffs_type;
case IIO_PRESSURE:
- return data->chip_info->read_press(data, val, val2);
+ *val = data->chip_info->press_coeffs[0];
+ *val2 = data->chip_info->press_coeffs[1];
+ return data->chip_info->press_coeffs_type;
case IIO_TEMP:
- return data->chip_info->read_temp(data, val, val2);
+ *val = data->chip_info->temp_coeffs[0];
+ *val2 = data->chip_info->temp_coeffs[1];
+ return data->chip_info->temp_coeffs_type;
default:
return -EINVAL;
}
@@ -793,6 +973,16 @@ static const struct iio_info bmp280_info = {
.write_raw = &bmp280_write_raw,
};
+static const unsigned long bmp280_avail_scan_masks[] = {
+ BIT(BMP280_TEMP) | BIT(BMP280_PRESS),
+ 0
+};
+
+static const unsigned long bme280_avail_scan_masks[] = {
+ BIT(BME280_HUMID) | BIT(BMP280_TEMP) | BIT(BMP280_PRESS),
+ 0
+};
+
static int bmp280_chip_config(struct bmp280_data *data)
{
u8 osrs = FIELD_PREP(BMP280_OSRS_TEMP_MASK, data->oversampling_temp + 1) |
@@ -820,8 +1010,57 @@ static int bmp280_chip_config(struct bmp280_data *data)
return ret;
}
+static irqreturn_t bmp280_trigger_handler(int irq, void *p)
+{
+ struct iio_poll_func *pf = p;
+ struct iio_dev *indio_dev = pf->indio_dev;
+ struct bmp280_data *data = iio_priv(indio_dev);
+ s32 adc_temp, adc_press, t_fine;
+ int ret;
+
+ guard(mutex)(&data->lock);
+
+ /* Burst read data registers */
+ ret = regmap_bulk_read(data->regmap, BMP280_REG_PRESS_MSB,
+ data->buf, BMP280_BURST_READ_BYTES);
+ if (ret) {
+ dev_err(data->dev, "failed to burst read sensor data\n");
+ goto out;
+ }
+
+ /* Temperature calculations */
+ adc_temp = FIELD_GET(BMP280_MEAS_TRIM_MASK, get_unaligned_be24(&data->buf[3]));
+ if (adc_temp == BMP280_TEMP_SKIPPED) {
+ dev_err(data->dev, "reading temperature skipped\n");
+ goto out;
+ }
+
+ data->sensor_data[1] = bmp280_compensate_temp(data, adc_temp);
+
+ /* Pressure calculations */
+ adc_press = FIELD_GET(BMP280_MEAS_TRIM_MASK, get_unaligned_be24(&data->buf[0]));
+ if (adc_press == BMP280_PRESS_SKIPPED) {
+ dev_err(data->dev, "reading pressure skipped\n");
+ goto out;
+ }
+
+ t_fine = bmp280_calc_t_fine(data, adc_temp);
+
+ data->sensor_data[0] = bmp280_compensate_press(data, adc_press, t_fine);
+
+ iio_push_to_buffers_with_timestamp(indio_dev, &data->sensor_data,
+ iio_get_time_ns(indio_dev));
+
+out:
+ iio_trigger_notify_done(indio_dev->trig);
+
+ return IRQ_HANDLED;
+}
+
static const int bmp280_oversampling_avail[] = { 1, 2, 4, 8, 16 };
static const u8 bmp280_chip_ids[] = { BMP280_CHIP_ID };
+static const int bmp280_temp_coeffs[] = { 10, 1 };
+static const int bmp280_press_coeffs[] = { 1, 256000 };
const struct bmp280_chip_info bmp280_chip_info = {
.id_reg = BMP280_REG_ID,
@@ -830,7 +1069,8 @@ const struct bmp280_chip_info bmp280_chip_info = {
.regmap_config = &bmp280_regmap_config,
.start_up_time = 2000,
.channels = bmp280_channels,
- .num_channels = 2,
+ .num_channels = ARRAY_SIZE(bmp280_channels),
+ .avail_scan_masks = bmp280_avail_scan_masks,
.oversampling_temp_avail = bmp280_oversampling_avail,
.num_oversampling_temp_avail = ARRAY_SIZE(bmp280_oversampling_avail),
@@ -850,10 +1090,17 @@ const struct bmp280_chip_info bmp280_chip_info = {
.num_oversampling_press_avail = ARRAY_SIZE(bmp280_oversampling_avail),
.oversampling_press_default = BMP280_OSRS_PRESS_16X - 1,
+ .temp_coeffs = bmp280_temp_coeffs,
+ .temp_coeffs_type = IIO_VAL_FRACTIONAL,
+ .press_coeffs = bmp280_press_coeffs,
+ .press_coeffs_type = IIO_VAL_FRACTIONAL,
+
.chip_config = bmp280_chip_config,
.read_temp = bmp280_read_temp,
.read_press = bmp280_read_press,
.read_calib = bmp280_read_calib,
+
+ .trigger_handler = bmp280_trigger_handler,
};
EXPORT_SYMBOL_NS(bmp280_chip_info, IIO_BMP280);
@@ -876,16 +1123,74 @@ static int bme280_chip_config(struct bmp280_data *data)
return bmp280_chip_config(data);
}
+static irqreturn_t bme280_trigger_handler(int irq, void *p)
+{
+ struct iio_poll_func *pf = p;
+ struct iio_dev *indio_dev = pf->indio_dev;
+ struct bmp280_data *data = iio_priv(indio_dev);
+ s32 adc_temp, adc_press, adc_humidity, t_fine;
+ int ret;
+
+ guard(mutex)(&data->lock);
+
+ /* Burst read data registers */
+ ret = regmap_bulk_read(data->regmap, BMP280_REG_PRESS_MSB,
+ data->buf, BME280_BURST_READ_BYTES);
+ if (ret) {
+ dev_err(data->dev, "failed to burst read sensor data\n");
+ goto out;
+ }
+
+ /* Temperature calculations */
+ adc_temp = FIELD_GET(BMP280_MEAS_TRIM_MASK, get_unaligned_be24(&data->buf[3]));
+ if (adc_temp == BMP280_TEMP_SKIPPED) {
+ dev_err(data->dev, "reading temperature skipped\n");
+ goto out;
+ }
+
+ data->sensor_data[1] = bmp280_compensate_temp(data, adc_temp);
+
+ /* Pressure calculations */
+ adc_press = FIELD_GET(BMP280_MEAS_TRIM_MASK, get_unaligned_be24(&data->buf[0]));
+ if (adc_press == BMP280_PRESS_SKIPPED) {
+ dev_err(data->dev, "reading pressure skipped\n");
+ goto out;
+ }
+
+ t_fine = bmp280_calc_t_fine(data, adc_temp);
+
+ data->sensor_data[0] = bmp280_compensate_press(data, adc_press, t_fine);
+
+ /* Humidity calculations */
+ adc_humidity = get_unaligned_be16(&data->buf[6]);
+
+ if (adc_humidity == BMP280_HUMIDITY_SKIPPED) {
+ dev_err(data->dev, "reading humidity skipped\n");
+ goto out;
+ }
+ data->sensor_data[2] = bme280_compensate_humidity(data, adc_humidity, t_fine);
+
+ iio_push_to_buffers_with_timestamp(indio_dev, &data->sensor_data,
+ iio_get_time_ns(indio_dev));
+
+out:
+ iio_trigger_notify_done(indio_dev->trig);
+
+ return IRQ_HANDLED;
+}
+
static const u8 bme280_chip_ids[] = { BME280_CHIP_ID };
+static const int bme280_humid_coeffs[] = { 1000, 1024 };
const struct bmp280_chip_info bme280_chip_info = {
.id_reg = BMP280_REG_ID,
.chip_id = bme280_chip_ids,
.num_chip_id = ARRAY_SIZE(bme280_chip_ids),
- .regmap_config = &bmp280_regmap_config,
+ .regmap_config = &bme280_regmap_config,
.start_up_time = 2000,
- .channels = bmp280_channels,
- .num_channels = 3,
+ .channels = bme280_channels,
+ .num_channels = ARRAY_SIZE(bme280_channels),
+ .avail_scan_masks = bme280_avail_scan_masks,
.oversampling_temp_avail = bmp280_oversampling_avail,
.num_oversampling_temp_avail = ARRAY_SIZE(bmp280_oversampling_avail),
@@ -899,11 +1204,20 @@ const struct bmp280_chip_info bme280_chip_info = {
.num_oversampling_humid_avail = ARRAY_SIZE(bmp280_oversampling_avail),
.oversampling_humid_default = BME280_OSRS_HUMIDITY_16X - 1,
+ .temp_coeffs = bmp280_temp_coeffs,
+ .temp_coeffs_type = IIO_VAL_FRACTIONAL,
+ .press_coeffs = bmp280_press_coeffs,
+ .press_coeffs_type = IIO_VAL_FRACTIONAL,
+ .humid_coeffs = bme280_humid_coeffs,
+ .humid_coeffs_type = IIO_VAL_FRACTIONAL,
+
.chip_config = bme280_chip_config,
.read_temp = bmp280_read_temp,
.read_press = bmp280_read_press,
.read_humid = bme280_read_humid,
.read_calib = bme280_read_calib,
+
+ .trigger_handler = bme280_trigger_handler,
};
EXPORT_SYMBOL_NS(bme280_chip_info, IIO_BMP280);
@@ -958,7 +1272,7 @@ static int bmp380_read_temp_adc(struct bmp280_data *data, u32 *adc_temp)
int ret;
ret = regmap_bulk_read(data->regmap, BMP380_REG_TEMP_XLSB,
- data->buf, sizeof(data->buf));
+ data->buf, BMP280_NUM_TEMP_BYTES);
if (ret) {
dev_err(data->dev, "failed to read temperature\n");
return ret;
@@ -1027,7 +1341,7 @@ static int bmp380_read_press_adc(struct bmp280_data *data, u32 *adc_press)
int ret;
ret = regmap_bulk_read(data->regmap, BMP380_REG_PRESS_XLSB,
- data->buf, sizeof(data->buf));
+ data->buf, BMP280_NUM_PRESS_BYTES);
if (ret) {
dev_err(data->dev, "failed to read pressure\n");
return ret;
@@ -1091,9 +1405,8 @@ static u32 bmp380_compensate_press(struct bmp280_data *data,
return comp_press;
}
-static int bmp380_read_temp(struct bmp280_data *data, int *val, int *val2)
+static int bmp380_read_temp(struct bmp280_data *data, s32 *comp_temp)
{
- s32 comp_temp;
u32 adc_temp;
int ret;
@@ -1101,15 +1414,14 @@ static int bmp380_read_temp(struct bmp280_data *data, int *val, int *val2)
if (ret)
return ret;
- comp_temp = bmp380_compensate_temp(data, adc_temp);
+ *comp_temp = bmp380_compensate_temp(data, adc_temp);
- *val = comp_temp * 10;
- return IIO_VAL_INT;
+ return 0;
}
-static int bmp380_read_press(struct bmp280_data *data, int *val, int *val2)
+static int bmp380_read_press(struct bmp280_data *data, u32 *comp_press)
{
- u32 adc_press, comp_press, t_fine;
+ u32 adc_press, t_fine;
int ret;
ret = bmp380_get_t_fine(data, &t_fine);
@@ -1120,12 +1432,9 @@ static int bmp380_read_press(struct bmp280_data *data, int *val, int *val2)
if (ret)
return ret;
- comp_press = bmp380_compensate_press(data, adc_press, t_fine);
-
- *val = comp_press;
- *val2 = 100000;
+ *comp_press = bmp380_compensate_press(data, adc_press, t_fine);
- return IIO_VAL_FRACTIONAL;
+ return 0;
}
static int bmp380_read_calib(struct bmp280_data *data)
@@ -1272,10 +1581,11 @@ static int bmp380_chip_config(struct bmp280_data *data)
}
/*
* Waits for measurement before checking configuration error
- * flag. Selected longest measure time indicated in
- * section 3.9.1 in the datasheet.
+ * flag. Selected longest measurement time, calculated from
+ * formula in datasheet section 3.9.2 with an offset of ~+15%
+ * as it seen as well in table 3.9.1.
*/
- msleep(80);
+ msleep(150);
/* Check config error flag */
ret = regmap_read(data->regmap, BMP380_REG_ERROR, &tmp);
@@ -1293,9 +1603,58 @@ static int bmp380_chip_config(struct bmp280_data *data)
return 0;
}
+static irqreturn_t bmp380_trigger_handler(int irq, void *p)
+{
+ struct iio_poll_func *pf = p;
+ struct iio_dev *indio_dev = pf->indio_dev;
+ struct bmp280_data *data = iio_priv(indio_dev);
+ s32 adc_temp, adc_press, t_fine;
+ int ret;
+
+ guard(mutex)(&data->lock);
+
+ /* Burst read data registers */
+ ret = regmap_bulk_read(data->regmap, BMP380_REG_PRESS_XLSB,
+ data->buf, BMP280_BURST_READ_BYTES);
+ if (ret) {
+ dev_err(data->dev, "failed to burst read sensor data\n");
+ goto out;
+ }
+
+ /* Temperature calculations */
+ adc_temp = get_unaligned_le24(&data->buf[3]);
+ if (adc_temp == BMP380_TEMP_SKIPPED) {
+ dev_err(data->dev, "reading temperature skipped\n");
+ goto out;
+ }
+
+ data->sensor_data[1] = bmp380_compensate_temp(data, adc_temp);
+
+ /* Pressure calculations */
+ adc_press = get_unaligned_le24(&data->buf[0]);
+ if (adc_press == BMP380_PRESS_SKIPPED) {
+ dev_err(data->dev, "reading pressure skipped\n");
+ goto out;
+ }
+
+ t_fine = bmp380_calc_t_fine(data, adc_temp);
+
+ data->sensor_data[0] = bmp380_compensate_press(data, adc_press, t_fine);
+
+ iio_push_to_buffers_with_timestamp(indio_dev, &data->sensor_data,
+ iio_get_time_ns(indio_dev));
+
+out:
+ iio_trigger_notify_done(indio_dev->trig);
+
+ return IRQ_HANDLED;
+}
+
static const int bmp380_oversampling_avail[] = { 1, 2, 4, 8, 16, 32 };
static const int bmp380_iir_filter_coeffs_avail[] = { 1, 2, 4, 8, 16, 32, 64, 128};
static const u8 bmp380_chip_ids[] = { BMP380_CHIP_ID, BMP390_CHIP_ID };
+static const int bmp380_temp_coeffs[] = { 10, 1 };
+static const int bmp380_press_coeffs[] = { 1, 100000 };
const struct bmp280_chip_info bmp380_chip_info = {
.id_reg = BMP380_REG_ID,
@@ -1305,7 +1664,8 @@ const struct bmp280_chip_info bmp380_chip_info = {
.spi_read_extra_byte = true,
.start_up_time = 2000,
.channels = bmp380_channels,
- .num_channels = 2,
+ .num_channels = ARRAY_SIZE(bmp380_channels),
+ .avail_scan_masks = bmp280_avail_scan_masks,
.oversampling_temp_avail = bmp380_oversampling_avail,
.num_oversampling_temp_avail = ARRAY_SIZE(bmp380_oversampling_avail),
@@ -1323,11 +1683,18 @@ const struct bmp280_chip_info bmp380_chip_info = {
.num_iir_filter_coeffs_avail = ARRAY_SIZE(bmp380_iir_filter_coeffs_avail),
.iir_filter_coeff_default = 2,
+ .temp_coeffs = bmp380_temp_coeffs,
+ .temp_coeffs_type = IIO_VAL_FRACTIONAL,
+ .press_coeffs = bmp380_press_coeffs,
+ .press_coeffs_type = IIO_VAL_FRACTIONAL,
+
.chip_config = bmp380_chip_config,
.read_temp = bmp380_read_temp,
.read_press = bmp380_read_press,
.read_calib = bmp380_read_calib,
.preinit = bmp380_preinit,
+
+ .trigger_handler = bmp380_trigger_handler,
};
EXPORT_SYMBOL_NS(bmp380_chip_info, IIO_BMP280);
@@ -1443,58 +1810,48 @@ static int bmp580_nvm_operation(struct bmp280_data *data, bool is_write)
* for what is expected on IIO ABI.
*/
-static int bmp580_read_temp(struct bmp280_data *data, int *val, int *val2)
+static int bmp580_read_temp(struct bmp280_data *data, s32 *raw_temp)
{
- s32 raw_temp;
+ s32 value_temp;
int ret;
- ret = regmap_bulk_read(data->regmap, BMP580_REG_TEMP_XLSB, data->buf,
- sizeof(data->buf));
+ ret = regmap_bulk_read(data->regmap, BMP580_REG_TEMP_XLSB,
+ data->buf, BMP280_NUM_TEMP_BYTES);
if (ret) {
dev_err(data->dev, "failed to read temperature\n");
return ret;
}
- raw_temp = get_unaligned_le24(data->buf);
- if (raw_temp == BMP580_TEMP_SKIPPED) {
+ value_temp = get_unaligned_le24(data->buf);
+ if (value_temp == BMP580_TEMP_SKIPPED) {
dev_err(data->dev, "reading temperature skipped\n");
return -EIO;
}
+ *raw_temp = sign_extend32(value_temp, 23);
- /*
- * Temperature is returned in Celsius degrees in fractional
- * form down 2^16. We rescale by x1000 to return millidegrees
- * Celsius to respect IIO ABI.
- */
- raw_temp = sign_extend32(raw_temp, 23);
- *val = ((s64)raw_temp * 1000) / (1 << 16);
- return IIO_VAL_INT;
+ return 0;
}
-static int bmp580_read_press(struct bmp280_data *data, int *val, int *val2)
+static int bmp580_read_press(struct bmp280_data *data, u32 *raw_press)
{
- u32 raw_press;
+ u32 value_press;
int ret;
- ret = regmap_bulk_read(data->regmap, BMP580_REG_PRESS_XLSB, data->buf,
- sizeof(data->buf));
+ ret = regmap_bulk_read(data->regmap, BMP580_REG_PRESS_XLSB,
+ data->buf, BMP280_NUM_PRESS_BYTES);
if (ret) {
dev_err(data->dev, "failed to read pressure\n");
return ret;
}
- raw_press = get_unaligned_le24(data->buf);
- if (raw_press == BMP580_PRESS_SKIPPED) {
+ value_press = get_unaligned_le24(data->buf);
+ if (value_press == BMP580_PRESS_SKIPPED) {
dev_err(data->dev, "reading pressure skipped\n");
return -EIO;
}
- /*
- * Pressure is returned in Pascals in fractional form down 2^16.
- * We rescale /1000 to convert to kilopascal to respect IIO ABI.
- */
- *val = raw_press;
- *val2 = 64000; /* 2^6 * 1000 */
- return IIO_VAL_FRACTIONAL;
+ *raw_press = value_press;
+
+ return 0;
}
static const int bmp580_odr_table[][2] = {
@@ -1828,8 +2185,43 @@ static int bmp580_chip_config(struct bmp280_data *data)
return 0;
}
+static irqreturn_t bmp580_trigger_handler(int irq, void *p)
+{
+ struct iio_poll_func *pf = p;
+ struct iio_dev *indio_dev = pf->indio_dev;
+ struct bmp280_data *data = iio_priv(indio_dev);
+ int ret;
+
+ guard(mutex)(&data->lock);
+
+ /* Burst read data registers */
+ ret = regmap_bulk_read(data->regmap, BMP580_REG_TEMP_XLSB,
+ data->buf, BMP280_BURST_READ_BYTES);
+ if (ret) {
+ dev_err(data->dev, "failed to burst read sensor data\n");
+ goto out;
+ }
+
+ /* Temperature calculations */
+ memcpy(&data->sensor_data[1], &data->buf[0], 3);
+
+ /* Pressure calculations */
+ memcpy(&data->sensor_data[0], &data->buf[3], 3);
+
+ iio_push_to_buffers_with_timestamp(indio_dev, &data->sensor_data,
+ iio_get_time_ns(indio_dev));
+
+out:
+ iio_trigger_notify_done(indio_dev->trig);
+
+ return IRQ_HANDLED;
+}
+
static const int bmp580_oversampling_avail[] = { 1, 2, 4, 8, 16, 32, 64, 128 };
static const u8 bmp580_chip_ids[] = { BMP580_CHIP_ID, BMP580_CHIP_ID_ALT };
+/* Instead of { 1000, 16 } we do this, to avoid overflow issues */
+static const int bmp580_temp_coeffs[] = { 125, 13 };
+static const int bmp580_press_coeffs[] = { 1, 64000};
const struct bmp280_chip_info bmp580_chip_info = {
.id_reg = BMP580_REG_CHIP_ID,
@@ -1837,8 +2229,9 @@ const struct bmp280_chip_info bmp580_chip_info = {
.num_chip_id = ARRAY_SIZE(bmp580_chip_ids),
.regmap_config = &bmp580_regmap_config,
.start_up_time = 2000,
- .channels = bmp380_channels,
- .num_channels = 2,
+ .channels = bmp580_channels,
+ .num_channels = ARRAY_SIZE(bmp580_channels),
+ .avail_scan_masks = bmp280_avail_scan_masks,
.oversampling_temp_avail = bmp580_oversampling_avail,
.num_oversampling_temp_avail = ARRAY_SIZE(bmp580_oversampling_avail),
@@ -1856,16 +2249,23 @@ const struct bmp280_chip_info bmp580_chip_info = {
.num_iir_filter_coeffs_avail = ARRAY_SIZE(bmp380_iir_filter_coeffs_avail),
.iir_filter_coeff_default = 2,
+ .temp_coeffs = bmp580_temp_coeffs,
+ .temp_coeffs_type = IIO_VAL_FRACTIONAL_LOG2,
+ .press_coeffs = bmp580_press_coeffs,
+ .press_coeffs_type = IIO_VAL_FRACTIONAL,
+
.chip_config = bmp580_chip_config,
.read_temp = bmp580_read_temp,
.read_press = bmp580_read_press,
.preinit = bmp580_preinit,
+
+ .trigger_handler = bmp580_trigger_handler,
};
EXPORT_SYMBOL_NS(bmp580_chip_info, IIO_BMP280);
static int bmp180_wait_for_eoc(struct bmp280_data *data, u8 ctrl_meas)
{
- const int conversion_time_max[] = { 4500, 7500, 13500, 25500 };
+ static const int conversion_time_max[] = { 4500, 7500, 13500, 25500 };
unsigned int delay_us;
unsigned int ctrl;
int ret;
@@ -2011,9 +2411,8 @@ static s32 bmp180_compensate_temp(struct bmp280_data *data, u32 adc_temp)
return (bmp180_calc_t_fine(data, adc_temp) + 8) / 16;
}
-static int bmp180_read_temp(struct bmp280_data *data, int *val, int *val2)
+static int bmp180_read_temp(struct bmp280_data *data, s32 *comp_temp)
{
- s32 comp_temp;
u32 adc_temp;
int ret;
@@ -2021,10 +2420,9 @@ static int bmp180_read_temp(struct bmp280_data *data, int *val, int *val2)
if (ret)
return ret;
- comp_temp = bmp180_compensate_temp(data, adc_temp);
+ *comp_temp = bmp180_compensate_temp(data, adc_temp);
- *val = comp_temp * 100;
- return IIO_VAL_INT;
+ return 0;
}
static int bmp180_read_press_adc(struct bmp280_data *data, u32 *adc_press)
@@ -2040,7 +2438,7 @@ static int bmp180_read_press_adc(struct bmp280_data *data, u32 *adc_press)
return ret;
ret = regmap_bulk_read(data->regmap, BMP180_REG_OUT_MSB,
- data->buf, sizeof(data->buf));
+ data->buf, BMP280_NUM_PRESS_BYTES);
if (ret) {
dev_err(data->dev, "failed to read pressure\n");
return ret;
@@ -2087,9 +2485,9 @@ static u32 bmp180_compensate_press(struct bmp280_data *data, u32 adc_press,
return p + ((x1 + x2 + 3791) >> 4);
}
-static int bmp180_read_press(struct bmp280_data *data, int *val, int *val2)
+static int bmp180_read_press(struct bmp280_data *data, u32 *comp_press)
{
- u32 comp_press, adc_press;
+ u32 adc_press;
s32 t_fine;
int ret;
@@ -2101,12 +2499,9 @@ static int bmp180_read_press(struct bmp280_data *data, int *val, int *val2)
if (ret)
return ret;
- comp_press = bmp180_compensate_press(data, adc_press, t_fine);
-
- *val = comp_press;
- *val2 = 1000;
+ *comp_press = bmp180_compensate_press(data, adc_press, t_fine);
- return IIO_VAL_FRACTIONAL;
+ return 0;
}
static int bmp180_chip_config(struct bmp280_data *data)
@@ -2114,9 +2509,41 @@ static int bmp180_chip_config(struct bmp280_data *data)
return 0;
}
+static irqreturn_t bmp180_trigger_handler(int irq, void *p)
+{
+ struct iio_poll_func *pf = p;
+ struct iio_dev *indio_dev = pf->indio_dev;
+ struct bmp280_data *data = iio_priv(indio_dev);
+ int ret, chan_value;
+
+ guard(mutex)(&data->lock);
+
+ ret = bmp180_read_temp(data, &chan_value);
+ if (ret)
+ goto out;
+
+ data->sensor_data[1] = chan_value;
+
+ ret = bmp180_read_press(data, &chan_value);
+ if (ret)
+ goto out;
+
+ data->sensor_data[0] = chan_value;
+
+ iio_push_to_buffers_with_timestamp(indio_dev, &data->sensor_data,
+ iio_get_time_ns(indio_dev));
+
+out:
+ iio_trigger_notify_done(indio_dev->trig);
+
+ return IRQ_HANDLED;
+}
+
static const int bmp180_oversampling_temp_avail[] = { 1 };
static const int bmp180_oversampling_press_avail[] = { 1, 2, 4, 8 };
static const u8 bmp180_chip_ids[] = { BMP180_CHIP_ID };
+static const int bmp180_temp_coeffs[] = { 100, 1 };
+static const int bmp180_press_coeffs[] = { 1, 1000 };
const struct bmp280_chip_info bmp180_chip_info = {
.id_reg = BMP280_REG_ID,
@@ -2125,7 +2552,8 @@ const struct bmp280_chip_info bmp180_chip_info = {
.regmap_config = &bmp180_regmap_config,
.start_up_time = 2000,
.channels = bmp280_channels,
- .num_channels = 2,
+ .num_channels = ARRAY_SIZE(bmp280_channels),
+ .avail_scan_masks = bmp280_avail_scan_masks,
.oversampling_temp_avail = bmp180_oversampling_temp_avail,
.num_oversampling_temp_avail =
@@ -2137,10 +2565,17 @@ const struct bmp280_chip_info bmp180_chip_info = {
ARRAY_SIZE(bmp180_oversampling_press_avail),
.oversampling_press_default = BMP180_MEAS_PRESS_8X,
+ .temp_coeffs = bmp180_temp_coeffs,
+ .temp_coeffs_type = IIO_VAL_FRACTIONAL,
+ .press_coeffs = bmp180_press_coeffs,
+ .press_coeffs_type = IIO_VAL_FRACTIONAL,
+
.chip_config = bmp180_chip_config,
.read_temp = bmp180_read_temp,
.read_press = bmp180_read_press,
.read_calib = bmp180_read_calib,
+
+ .trigger_handler = bmp180_trigger_handler,
};
EXPORT_SYMBOL_NS(bmp180_chip_info, IIO_BMP280);
@@ -2186,6 +2621,30 @@ static int bmp085_fetch_eoc_irq(struct device *dev,
return 0;
}
+static int bmp280_buffer_preenable(struct iio_dev *indio_dev)
+{
+ struct bmp280_data *data = iio_priv(indio_dev);
+
+ pm_runtime_get_sync(data->dev);
+
+ return 0;
+}
+
+static int bmp280_buffer_postdisable(struct iio_dev *indio_dev)
+{
+ struct bmp280_data *data = iio_priv(indio_dev);
+
+ pm_runtime_mark_last_busy(data->dev);
+ pm_runtime_put_autosuspend(data->dev);
+
+ return 0;
+}
+
+static const struct iio_buffer_setup_ops bmp280_buffer_setup_ops = {
+ .preenable = bmp280_buffer_preenable,
+ .postdisable = bmp280_buffer_postdisable,
+};
+
static void bmp280_pm_disable(void *data)
{
struct device *dev = data;
@@ -2232,6 +2691,7 @@ int bmp280_common_probe(struct device *dev,
/* Apply initial values from chip info structure */
indio_dev->channels = chip_info->channels;
indio_dev->num_channels = chip_info->num_channels;
+ indio_dev->available_scan_masks = chip_info->avail_scan_masks;
data->oversampling_press = chip_info->oversampling_press_default;
data->oversampling_humid = chip_info->oversampling_humid_default;
data->oversampling_temp = chip_info->oversampling_temp_default;
@@ -2317,6 +2777,14 @@ int bmp280_common_probe(struct device *dev,
"failed to read calibration coefficients\n");
}
+ ret = devm_iio_triggered_buffer_setup(data->dev, indio_dev,
+ iio_pollfunc_store_time,
+ data->chip_info->trigger_handler,
+ &bmp280_buffer_setup_ops);
+ if (ret)
+ return dev_err_probe(data->dev, ret,
+ "iio triggered buffer setup failed\n");
+
/*
* Attempt to grab an optional EOC IRQ - only the BMP085 has this
* however as it happens, the BMP085 shares the chip ID of BMP180
diff --git a/drivers/iio/pressure/bmp280-i2c.c b/drivers/iio/pressure/bmp280-i2c.c
index 34e3bc758493..5c3a63b4327c 100644
--- a/drivers/iio/pressure/bmp280-i2c.c
+++ b/drivers/iio/pressure/bmp280-i2c.c
@@ -1,6 +1,6 @@
// SPDX-License-Identifier: GPL-2.0-only
-#include <linux/module.h>
#include <linux/i2c.h>
+#include <linux/module.h>
#include <linux/regmap.h>
#include "bmp280.h"
diff --git a/drivers/iio/pressure/bmp280-regmap.c b/drivers/iio/pressure/bmp280-regmap.c
index fa52839474b1..d27d68edd906 100644
--- a/drivers/iio/pressure/bmp280-regmap.c
+++ b/drivers/iio/pressure/bmp280-regmap.c
@@ -41,7 +41,7 @@ const struct regmap_config bmp180_regmap_config = {
};
EXPORT_SYMBOL_NS(bmp180_regmap_config, IIO_BMP280);
-static bool bmp280_is_writeable_reg(struct device *dev, unsigned int reg)
+static bool bme280_is_writeable_reg(struct device *dev, unsigned int reg)
{
switch (reg) {
case BMP280_REG_CONFIG:
@@ -54,9 +54,37 @@ static bool bmp280_is_writeable_reg(struct device *dev, unsigned int reg)
}
}
+static bool bmp280_is_writeable_reg(struct device *dev, unsigned int reg)
+{
+ switch (reg) {
+ case BMP280_REG_CONFIG:
+ case BMP280_REG_CTRL_MEAS:
+ case BMP280_REG_RESET:
+ return true;
+ default:
+ return false;
+ }
+}
+
static bool bmp280_is_volatile_reg(struct device *dev, unsigned int reg)
{
switch (reg) {
+ case BMP280_REG_TEMP_XLSB:
+ case BMP280_REG_TEMP_LSB:
+ case BMP280_REG_TEMP_MSB:
+ case BMP280_REG_PRESS_XLSB:
+ case BMP280_REG_PRESS_LSB:
+ case BMP280_REG_PRESS_MSB:
+ case BMP280_REG_STATUS:
+ return true;
+ default:
+ return false;
+ }
+}
+
+static bool bme280_is_volatile_reg(struct device *dev, unsigned int reg)
+{
+ switch (reg) {
case BME280_REG_HUMIDITY_LSB:
case BME280_REG_HUMIDITY_MSB:
case BMP280_REG_TEMP_XLSB:
@@ -71,7 +99,6 @@ static bool bmp280_is_volatile_reg(struct device *dev, unsigned int reg)
return false;
}
}
-
static bool bmp380_is_writeable_reg(struct device *dev, unsigned int reg)
{
switch (reg) {
@@ -167,7 +194,7 @@ const struct regmap_config bmp280_regmap_config = {
.reg_bits = 8,
.val_bits = 8,
- .max_register = BME280_REG_HUMIDITY_LSB,
+ .max_register = BMP280_REG_TEMP_XLSB,
.cache_type = REGCACHE_RBTREE,
.writeable_reg = bmp280_is_writeable_reg,
@@ -175,6 +202,18 @@ const struct regmap_config bmp280_regmap_config = {
};
EXPORT_SYMBOL_NS(bmp280_regmap_config, IIO_BMP280);
+const struct regmap_config bme280_regmap_config = {
+ .reg_bits = 8,
+ .val_bits = 8,
+
+ .max_register = BME280_REG_HUMIDITY_LSB,
+ .cache_type = REGCACHE_RBTREE,
+
+ .writeable_reg = bme280_is_writeable_reg,
+ .volatile_reg = bme280_is_volatile_reg,
+};
+EXPORT_SYMBOL_NS(bme280_regmap_config, IIO_BMP280);
+
const struct regmap_config bmp380_regmap_config = {
.reg_bits = 8,
.val_bits = 8,
diff --git a/drivers/iio/pressure/bmp280-spi.c b/drivers/iio/pressure/bmp280-spi.c
index 62b4e58104cf..d18549d9bb64 100644
--- a/drivers/iio/pressure/bmp280-spi.c
+++ b/drivers/iio/pressure/bmp280-spi.c
@@ -5,10 +5,10 @@
* Inspired by the older BMP085 driver drivers/misc/bmp085-spi.c
*/
#include <linux/bits.h>
-#include <linux/module.h>
-#include <linux/spi/spi.h>
#include <linux/err.h>
+#include <linux/module.h>
#include <linux/regmap.h>
+#include <linux/spi/spi.h>
#include "bmp280.h"
@@ -40,14 +40,10 @@ static int bmp380_regmap_spi_read(void *context, const void *reg,
size_t reg_size, void *val, size_t val_size)
{
struct spi_device *spi = to_spi_device(context);
- u8 rx_buf[4];
+ u8 rx_buf[BME280_BURST_READ_BYTES + 1];
ssize_t status;
- /*
- * Maximum number of consecutive bytes read for a temperature or
- * pressure measurement is 3.
- */
- if (val_size > 3)
+ if (val_size > BME280_BURST_READ_BYTES)
return -EINVAL;
/*
@@ -64,14 +60,14 @@ static int bmp380_regmap_spi_read(void *context, const void *reg,
return 0;
}
-static struct regmap_bus bmp280_regmap_bus = {
+static const struct regmap_bus bmp280_regmap_bus = {
.write = bmp280_regmap_spi_write,
.read = bmp280_regmap_spi_read,
.reg_format_endian_default = REGMAP_ENDIAN_BIG,
.val_format_endian_default = REGMAP_ENDIAN_BIG,
};
-static struct regmap_bus bmp380_regmap_bus = {
+static const struct regmap_bus bmp380_regmap_bus = {
.write = bmp280_regmap_spi_write,
.read = bmp380_regmap_spi_read,
.read_flag_mask = BIT(7),
@@ -83,7 +79,7 @@ static int bmp280_spi_probe(struct spi_device *spi)
{
const struct spi_device_id *id = spi_get_device_id(spi);
const struct bmp280_chip_info *chip_info;
- struct regmap_bus *bmp_regmap_bus;
+ struct regmap_bus const *bmp_regmap_bus;
struct regmap *regmap;
int ret;
diff --git a/drivers/iio/pressure/bmp280.h b/drivers/iio/pressure/bmp280.h
index 7c30e4d523be..ccacc67c1473 100644
--- a/drivers/iio/pressure/bmp280.h
+++ b/drivers/iio/pressure/bmp280.h
@@ -1,10 +1,10 @@
/* SPDX-License-Identifier: GPL-2.0 */
#include <linux/bitops.h>
#include <linux/device.h>
-#include <linux/iio/iio.h>
#include <linux/regmap.h>
#include <linux/regulator/consumer.h>
+#include <linux/iio/iio.h>
/* BMP580 specific registers */
#define BMP580_REG_CMD 0x7E
@@ -304,6 +304,16 @@
#define BMP280_PRESS_SKIPPED 0x80000
#define BMP280_HUMIDITY_SKIPPED 0x8000
+/* Number of bytes for each value */
+#define BMP280_NUM_PRESS_BYTES 3
+#define BMP280_NUM_TEMP_BYTES 3
+#define BME280_NUM_HUMIDITY_BYTES 2
+#define BMP280_BURST_READ_BYTES (BMP280_NUM_PRESS_BYTES + \
+ BMP280_NUM_TEMP_BYTES)
+#define BME280_BURST_READ_BYTES (BMP280_NUM_PRESS_BYTES + \
+ BMP280_NUM_TEMP_BYTES + \
+ BME280_NUM_HUMIDITY_BYTES)
+
/* Core exported structs */
static const char *const bmp280_supply_names[] = {
@@ -398,12 +408,18 @@ struct bmp280_data {
int sampling_freq;
/*
+ * Data to push to userspace triggered buffer. Up to 3 channels and
+ * s64 timestamp, aligned.
+ */
+ s32 sensor_data[6] __aligned(8);
+
+ /*
* DMA (thus cache coherency maintenance) may require the
* transfer buffers to live in their own cache lines.
*/
union {
/* Sensor data buffer */
- u8 buf[3];
+ u8 buf[BME280_BURST_READ_BYTES];
/* Calibration data buffers */
__le16 bmp280_cal_buf[BMP280_CONTIGUOUS_CALIB_REGS / 2];
__be16 bmp180_cal_buf[BMP180_REG_CALIB_COUNT / 2];
@@ -425,6 +441,7 @@ struct bmp280_chip_info {
const struct iio_chan_spec *channels;
int num_channels;
unsigned int start_up_time;
+ const unsigned long *avail_scan_masks;
const int *oversampling_temp_avail;
int num_oversampling_temp_avail;
@@ -446,12 +463,21 @@ struct bmp280_chip_info {
int num_sampling_freq_avail;
int sampling_freq_default;
+ const int *temp_coeffs;
+ const int temp_coeffs_type;
+ const int *press_coeffs;
+ const int press_coeffs_type;
+ const int *humid_coeffs;
+ const int humid_coeffs_type;
+
int (*chip_config)(struct bmp280_data *data);
- int (*read_temp)(struct bmp280_data *data, int *val, int *val2);
- int (*read_press)(struct bmp280_data *data, int *val, int *val2);
- int (*read_humid)(struct bmp280_data *data, int *val, int *val2);
+ int (*read_temp)(struct bmp280_data *data, s32 *adc_temp);
+ int (*read_press)(struct bmp280_data *data, u32 *adc_press);
+ int (*read_humid)(struct bmp280_data *data, u32 *adc_humidity);
int (*read_calib)(struct bmp280_data *data);
int (*preinit)(struct bmp280_data *data);
+
+ irqreturn_t (*trigger_handler)(int irq, void *p);
};
/* Chip infos for each variant */
@@ -464,6 +490,7 @@ extern const struct bmp280_chip_info bmp580_chip_info;
/* Regmap configurations */
extern const struct regmap_config bmp180_regmap_config;
extern const struct regmap_config bmp280_regmap_config;
+extern const struct regmap_config bme280_regmap_config;
extern const struct regmap_config bmp380_regmap_config;
extern const struct regmap_config bmp580_regmap_config;
diff --git a/drivers/iio/pressure/dlhl60d.c b/drivers/iio/pressure/dlhl60d.c
index 0bba4c5a8d40..c1cea9d40424 100644
--- a/drivers/iio/pressure/dlhl60d.c
+++ b/drivers/iio/pressure/dlhl60d.c
@@ -256,8 +256,7 @@ static irqreturn_t dlh_trigger_handler(int irq, void *private)
if (ret)
goto out;
- for_each_set_bit(chn, indio_dev->active_scan_mask,
- indio_dev->masklength) {
+ iio_for_each_active_channel(indio_dev, chn) {
memcpy(&tmp_buf[i++],
&st->rx_buf[1] + chn * DLH_NUM_DATA_BYTES,
DLH_NUM_DATA_BYTES);
diff --git a/drivers/iio/pressure/sdp500.c b/drivers/iio/pressure/sdp500.c
new file mode 100644
index 000000000000..6ff32e3fa637
--- /dev/null
+++ b/drivers/iio/pressure/sdp500.c
@@ -0,0 +1,156 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Driver for Sensirion sdp500 and sdp510 pressure sensors
+ *
+ * Datasheet: https://sensirion.com/resource/datasheet/sdp600
+ */
+
+#include <linux/i2c.h>
+#include <linux/crc8.h>
+#include <linux/iio/iio.h>
+#include <linux/mod_devicetable.h>
+#include <linux/regulator/consumer.h>
+#include <asm/unaligned.h>
+
+#define SDP500_CRC8_POLYNOMIAL 0x31 /* x8+x5+x4+1 (normalized to 0x31) */
+#define SDP500_READ_SIZE 3
+
+#define SDP500_I2C_START_MEAS 0xF1
+
+struct sdp500_data {
+ struct device *dev;
+};
+
+DECLARE_CRC8_TABLE(sdp500_crc8_table);
+
+static int sdp500_start_measurement(struct sdp500_data *data)
+{
+ struct i2c_client *client = to_i2c_client(data->dev);
+
+ return i2c_smbus_write_byte(client, SDP500_I2C_START_MEAS);
+}
+
+static const struct iio_chan_spec sdp500_channels[] = {
+ {
+ .type = IIO_PRESSURE,
+ .info_mask_separate = BIT(IIO_CHAN_INFO_RAW) |
+ BIT(IIO_CHAN_INFO_SCALE),
+ },
+};
+
+static int sdp500_read_raw(struct iio_dev *indio_dev,
+ struct iio_chan_spec const *chan,
+ int *val, int *val2, long mask)
+{
+ int ret;
+ u8 rxbuf[SDP500_READ_SIZE];
+ u8 received_crc, calculated_crc;
+ struct sdp500_data *data = iio_priv(indio_dev);
+ struct i2c_client *client = to_i2c_client(data->dev);
+
+ switch (mask) {
+ case IIO_CHAN_INFO_RAW:
+ ret = i2c_master_recv(client, rxbuf, SDP500_READ_SIZE);
+ if (ret < 0) {
+ dev_err(data->dev, "Failed to receive data");
+ return ret;
+ }
+ if (ret != SDP500_READ_SIZE) {
+ dev_err(data->dev, "Data is received wrongly");
+ return -EIO;
+ }
+
+ received_crc = rxbuf[2];
+ calculated_crc = crc8(sdp500_crc8_table, rxbuf,
+ sizeof(rxbuf) - 1, 0x00);
+ if (received_crc != calculated_crc) {
+ dev_err(data->dev,
+ "calculated crc = 0x%.2X, received 0x%.2X",
+ calculated_crc, received_crc);
+ return -EIO;
+ }
+
+ *val = get_unaligned_be16(rxbuf);
+ return IIO_VAL_INT;
+ case IIO_CHAN_INFO_SCALE:
+ *val = 1;
+ *val2 = 60;
+
+ return IIO_VAL_FRACTIONAL;
+ default:
+ return -EINVAL;
+ }
+}
+
+static const struct iio_info sdp500_info = {
+ .read_raw = &sdp500_read_raw,
+};
+
+static int sdp500_probe(struct i2c_client *client)
+{
+ struct iio_dev *indio_dev;
+ struct sdp500_data *data;
+ struct device *dev = &client->dev;
+ int ret;
+ u8 rxbuf[SDP500_READ_SIZE];
+
+ ret = devm_regulator_get_enable(dev, "vdd");
+ if (ret)
+ return dev_err_probe(dev, ret,
+ "Failed to get and enable regulator\n");
+
+ indio_dev = devm_iio_device_alloc(dev, sizeof(*data));
+ if (!indio_dev)
+ return -ENOMEM;
+
+ /* has to be done before the first i2c communication */
+ crc8_populate_msb(sdp500_crc8_table, SDP500_CRC8_POLYNOMIAL);
+
+ data = iio_priv(indio_dev);
+ data->dev = dev;
+
+ indio_dev->name = "sdp500";
+ indio_dev->channels = sdp500_channels;
+ indio_dev->info = &sdp500_info;
+ indio_dev->modes = INDIO_DIRECT_MODE;
+ indio_dev->num_channels = ARRAY_SIZE(sdp500_channels);
+
+ ret = sdp500_start_measurement(data);
+ if (ret)
+ return dev_err_probe(dev, ret, "Failed to start measurement");
+
+ /* First measurement is not correct, read it out to get rid of it */
+ i2c_master_recv(client, rxbuf, SDP500_READ_SIZE);
+
+ ret = devm_iio_device_register(dev, indio_dev);
+ if (ret)
+ return dev_err_probe(dev, ret, "Failed to register indio_dev");
+
+ return 0;
+}
+
+static const struct i2c_device_id sdp500_id[] = {
+ { "sdp500" },
+ { }
+};
+MODULE_DEVICE_TABLE(i2c, sdp500_id);
+
+static const struct of_device_id sdp500_of_match[] = {
+ { .compatible = "sensirion,sdp500" },
+ { }
+};
+MODULE_DEVICE_TABLE(of, sdp500_of_match);
+
+static struct i2c_driver sdp500_driver = {
+ .driver = {
+ .name = "sensirion,sdp500",
+ .of_match_table = sdp500_of_match,
+ },
+ .probe = sdp500_probe,
+ .id_table = sdp500_id,
+};
+module_i2c_driver(sdp500_driver);
+
+MODULE_AUTHOR("Thomas Sioutas <thomas.sioutas@prodrive-technologies.com>");
+MODULE_DESCRIPTION("Driver for Sensirion SDP500 differential pressure sensor");
+MODULE_LICENSE("GPL");
diff --git a/drivers/iio/proximity/Kconfig b/drivers/iio/proximity/Kconfig
index 2ca3b0bc5eba..31c679074b25 100644
--- a/drivers/iio/proximity/Kconfig
+++ b/drivers/iio/proximity/Kconfig
@@ -32,6 +32,20 @@ config CROS_EC_MKBP_PROXIMITY
To compile this driver as a module, choose M here: the
module will be called cros_ec_mkbp_proximity.
+config HX9023S
+ tristate "TYHX HX9023S SAR sensor"
+ select IIO_BUFFER
+ select IIO_TRIGGERED_BUFFER
+ select REGMAP_I2C
+ depends on I2C
+ help
+ Say Y here to build a driver for TYHX HX9023S capacitive SAR sensor.
+ This driver supports the TYHX HX9023S capacitive
+ SAR sensors. This sensors is used for proximity detection applications.
+
+ To compile this driver as a module, choose M here: the
+ module will be called hx9023s.
+
config IRSD200
tristate "Murata IRS-D200 PIR sensor"
select IIO_BUFFER
@@ -219,4 +233,15 @@ config VL53L0X_I2C
To compile this driver as a module, choose M here: the
module will be called vl53l0x-i2c.
+config AW96103
+ tristate "AW96103/AW96105 Awinic proximity sensor"
+ select REGMAP_I2C
+ depends on I2C
+ help
+ Say Y here to build a driver for Awinic's AW96103/AW96105 capacitive
+ proximity sensor.
+
+ To compile this driver as a module, choose M here: the
+ module will be called aw96103.
+
endmenu
diff --git a/drivers/iio/proximity/Makefile b/drivers/iio/proximity/Makefile
index f36598380446..c5e76995764a 100644
--- a/drivers/iio/proximity/Makefile
+++ b/drivers/iio/proximity/Makefile
@@ -6,6 +6,7 @@
# When adding new entries keep the list in alphabetical order
obj-$(CONFIG_AS3935) += as3935.o
obj-$(CONFIG_CROS_EC_MKBP_PROXIMITY) += cros_ec_mkbp_proximity.o
+obj-$(CONFIG_HX9023S) += hx9023s.o
obj-$(CONFIG_IRSD200) += irsd200.o
obj-$(CONFIG_ISL29501) += isl29501.o
obj-$(CONFIG_LIDAR_LITE_V2) += pulsedlight-lidar-lite-v2.o
@@ -21,4 +22,5 @@ obj-$(CONFIG_SX_COMMON) += sx_common.o
obj-$(CONFIG_SX9500) += sx9500.o
obj-$(CONFIG_VCNL3020) += vcnl3020.o
obj-$(CONFIG_VL53L0X_I2C) += vl53l0x-i2c.o
+obj-$(CONFIG_AW96103) += aw96103.o
diff --git a/drivers/iio/proximity/aw96103.c b/drivers/iio/proximity/aw96103.c
new file mode 100644
index 000000000000..db9d78e961fd
--- /dev/null
+++ b/drivers/iio/proximity/aw96103.c
@@ -0,0 +1,846 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * AWINIC aw96103 proximity sensor driver
+ *
+ * Author: Wang Shuaijie <wangshuaijie@awinic.com>
+ *
+ * Copyright (c) 2024 awinic Technology CO., LTD
+ */
+#include <linux/bits.h>
+#include <linux/bitfield.h>
+#include <linux/delay.h>
+#include <linux/firmware.h>
+#include <linux/i2c.h>
+#include <linux/interrupt.h>
+#include <linux/iio/events.h>
+#include <linux/iio/iio.h>
+#include <linux/regulator/consumer.h>
+#include <linux/regmap.h>
+#include <linux/slab.h>
+#include <asm/unaligned.h>
+
+#define AW_DATA_PROCESS_FACTOR 1024
+#define AW96103_CHIP_ID 0xa961
+#define AW96103_BIN_VALID_DATA_OFFSET 64
+#define AW96103_BIN_DATA_LEN_OFFSET 16
+#define AW96103_BIN_DATA_REG_NUM_SIZE 4
+#define AW96103_BIN_CHIP_TYPE_SIZE 8
+#define AW96103_BIN_CHIP_TYPE_OFFSET 24
+
+#define AW96103_REG_SCANCTRL0 0x0000
+#define AW96103_REG_STAT0 0x0090
+#define AW96103_REG_BLFILT_CH0 0x00A8
+#define AW96103_REG_BLRSTRNG_CH0 0x00B4
+#define AW96103_REG_DIFF_CH0 0x0240
+#define AW96103_REG_FWVER2 0x0410
+#define AW96103_REG_CMD 0xF008
+#define AW96103_REG_IRQSRC 0xF080
+#define AW96103_REG_IRQEN 0xF084
+#define AW96103_REG_RESET 0xFF0C
+#define AW96103_REG_CHIPID 0xFF10
+#define AW96103_REG_EEDA0 0x0408
+#define AW96103_REG_EEDA1 0x040C
+#define AW96103_REG_PROXCTRL_CH0 0x00B0
+#define AW96103_REG_PROXTH0_CH0 0x00B8
+#define AW96103_PROXTH_CH_STEP 0x3C
+#define AW96103_THHYST_MASK GENMASK(13, 12)
+#define AW96103_INDEB_MASK GENMASK(11, 10)
+#define AW96103_OUTDEB_MASK GENMASK(9, 8)
+#define AW96103_INITOVERIRQ_MASK BIT(0)
+#define AW96103_BLFILT_CH_STEP 0x3C
+#define AW96103_BLRSTRNG_MASK GENMASK(5, 0)
+#define AW96103_CHIPID_MASK GENMASK(31, 16)
+#define AW96103_BLERRTRIG_MASK BIT(25)
+#define AW96103_CHAN_EN_MASK GENMASK(5, 0)
+#define AW96103_REG_PROXCTRL_CH(x) \
+ (AW96103_REG_PROXCTRL_CH0 + (x) * AW96103_PROXTH_CH_STEP)
+
+#define AW96103_REG_PROXTH0_CH(x) \
+ (AW96103_REG_PROXTH0_CH0 + (x) * AW96103_PROXTH_CH_STEP)
+
+/**
+ * struct aw_bin - Store the data obtained from parsing the configuration file.
+ * @chip_type: Frame header information-chip type
+ * @valid_data_len: Length of valid data obtained after parsing
+ * @valid_data_addr: The offset address of the valid data obtained
+ * after parsing relative to info
+ * @len: The size of the bin file obtained from the firmware
+ * @data: Store the bin file obtained from the firmware
+ */
+struct aw_bin {
+ unsigned char chip_type[8];
+ unsigned int valid_data_len;
+ unsigned int valid_data_addr;
+ unsigned int len;
+ unsigned char data[] __counted_by(len);
+};
+
+enum aw96103_sar_vers {
+ AW96103 = 2,
+ AW96103A = 6,
+ AW96103B = 0xa,
+};
+
+enum aw96103_operation_mode {
+ AW96103_ACTIVE_MODE = 1,
+ AW96103_SLEEP_MODE = 2,
+ AW96103_DEEPSLEEP_MODE = 3,
+ AW96103B_DEEPSLEEP_MODE = 4,
+};
+
+enum aw96103_sensor_type {
+ AW96103_VAL,
+ AW96105_VAL,
+};
+
+struct aw_channels_info {
+ bool used;
+ unsigned int old_irq_status;
+};
+
+struct aw_chip_info {
+ const char *name;
+ struct iio_chan_spec const *channels;
+ int num_channels;
+};
+
+struct aw96103 {
+ unsigned int hostirqen;
+ struct regmap *regmap;
+ struct device *dev;
+ /*
+ * There is one more logical channel than the actual channels,
+ * and the extra logical channel is used for temperature detection
+ * but not for status detection. The specific channel used for
+ * temperature detection is determined by the register configuration.
+ */
+ struct aw_channels_info channels_arr[6];
+ unsigned int max_channels;
+ unsigned int chan_en;
+};
+
+static const unsigned int aw96103_reg_default[] = {
+ 0x0000, 0x00003f3f, 0x0004, 0x00000064, 0x0008, 0x0017c11e,
+ 0x000c, 0x05000000, 0x0010, 0x00093ffd, 0x0014, 0x19240009,
+ 0x0018, 0xd81c0207, 0x001c, 0xff000000, 0x0020, 0x00241900,
+ 0x0024, 0x00093ff7, 0x0028, 0x58020009, 0x002c, 0xd81c0207,
+ 0x0030, 0xff000000, 0x0034, 0x00025800, 0x0038, 0x00093fdf,
+ 0x003c, 0x7d3b0009, 0x0040, 0xd81c0207, 0x0044, 0xff000000,
+ 0x0048, 0x003b7d00, 0x004c, 0x00093f7f, 0x0050, 0xe9310009,
+ 0x0054, 0xd81c0207, 0x0058, 0xff000000, 0x005c, 0x0031e900,
+ 0x0060, 0x00093dff, 0x0064, 0x1a0c0009, 0x0068, 0xd81c0207,
+ 0x006c, 0xff000000, 0x0070, 0x000c1a00, 0x0074, 0x80093fff,
+ 0x0078, 0x043d0009, 0x007c, 0xd81c0207, 0x0080, 0xff000000,
+ 0x0084, 0x003d0400, 0x00a0, 0xe6400000, 0x00a4, 0x00000000,
+ 0x00a8, 0x010408d2, 0x00ac, 0x00000000, 0x00b0, 0x00000000,
+ 0x00b8, 0x00005fff, 0x00bc, 0x00000000, 0x00c0, 0x00000000,
+ 0x00c4, 0x00000000, 0x00c8, 0x00000000, 0x00cc, 0x00000000,
+ 0x00d0, 0x00000000, 0x00d4, 0x00000000, 0x00d8, 0x00000000,
+ 0x00dc, 0xe6447800, 0x00e0, 0x78000000, 0x00e4, 0x010408d2,
+ 0x00e8, 0x00000000, 0x00ec, 0x00000000, 0x00f4, 0x00005fff,
+ 0x00f8, 0x00000000, 0x00fc, 0x00000000, 0x0100, 0x00000000,
+ 0x0104, 0x00000000, 0x0108, 0x00000000, 0x010c, 0x02000000,
+ 0x0110, 0x00000000, 0x0114, 0x00000000, 0x0118, 0xe6447800,
+ 0x011c, 0x78000000, 0x0120, 0x010408d2, 0x0124, 0x00000000,
+ 0x0128, 0x00000000, 0x0130, 0x00005fff, 0x0134, 0x00000000,
+ 0x0138, 0x00000000, 0x013c, 0x00000000, 0x0140, 0x00000000,
+ 0x0144, 0x00000000, 0x0148, 0x02000000, 0x014c, 0x00000000,
+ 0x0150, 0x00000000, 0x0154, 0xe6447800, 0x0158, 0x78000000,
+ 0x015c, 0x010408d2, 0x0160, 0x00000000, 0x0164, 0x00000000,
+ 0x016c, 0x00005fff, 0x0170, 0x00000000, 0x0174, 0x00000000,
+ 0x0178, 0x00000000, 0x017c, 0x00000000, 0x0180, 0x00000000,
+ 0x0184, 0x02000000, 0x0188, 0x00000000, 0x018c, 0x00000000,
+ 0x0190, 0xe6447800, 0x0194, 0x78000000, 0x0198, 0x010408d2,
+ 0x019c, 0x00000000, 0x01a0, 0x00000000, 0x01a8, 0x00005fff,
+ 0x01ac, 0x00000000, 0x01b0, 0x00000000, 0x01b4, 0x00000000,
+ 0x01b8, 0x00000000, 0x01bc, 0x00000000, 0x01c0, 0x02000000,
+ 0x01c4, 0x00000000, 0x01c8, 0x00000000, 0x01cc, 0xe6407800,
+ 0x01d0, 0x78000000, 0x01d4, 0x010408d2, 0x01d8, 0x00000000,
+ 0x01dc, 0x00000000, 0x01e4, 0x00005fff, 0x01e8, 0x00000000,
+ 0x01ec, 0x00000000, 0x01f0, 0x00000000, 0x01f4, 0x00000000,
+ 0x01f8, 0x00000000, 0x01fc, 0x02000000, 0x0200, 0x00000000,
+ 0x0204, 0x00000000, 0x0208, 0x00000008, 0x020c, 0x0000000d,
+ 0x41fc, 0x00000000, 0x4400, 0x00000000, 0x4410, 0x00000000,
+ 0x4420, 0x00000000, 0x4430, 0x00000000, 0x4440, 0x00000000,
+ 0x4450, 0x00000000, 0x4460, 0x00000000, 0x4470, 0x00000000,
+ 0xf080, 0x00003018, 0xf084, 0x00000fff, 0xf800, 0x00000000,
+ 0xf804, 0x00002e00, 0xf8d0, 0x00000001, 0xf8d4, 0x00000000,
+ 0xff00, 0x00000301, 0xff0c, 0x01000000, 0xffe0, 0x00000000,
+ 0xfff4, 0x00004011, 0x0090, 0x00000000, 0x0094, 0x00000000,
+ 0x0098, 0x00000000, 0x009c, 0x3f3f3f3f,
+};
+
+static const struct iio_event_spec aw_common_events[3] = {
+ {
+ .type = IIO_EV_TYPE_THRESH,
+ .dir = IIO_EV_DIR_RISING,
+ .mask_separate = BIT(IIO_EV_INFO_PERIOD),
+ },
+ {
+ .type = IIO_EV_TYPE_THRESH,
+ .dir = IIO_EV_DIR_FALLING,
+ .mask_separate = BIT(IIO_EV_INFO_PERIOD),
+ },
+ {
+ .type = IIO_EV_TYPE_THRESH,
+ .dir = IIO_EV_DIR_EITHER,
+ .mask_separate = BIT(IIO_EV_INFO_ENABLE) |
+ BIT(IIO_EV_INFO_HYSTERESIS) |
+ BIT(IIO_EV_INFO_VALUE),
+ }
+};
+
+#define AW_IIO_CHANNEL(idx) \
+{ \
+ .type = IIO_PROXIMITY, \
+ .info_mask_separate = BIT(IIO_CHAN_INFO_RAW), \
+ .indexed = 1, \
+ .channel = idx, \
+ .event_spec = aw_common_events, \
+ .num_event_specs = ARRAY_SIZE(aw_common_events), \
+} \
+
+static const struct iio_chan_spec aw96103_channels[] = {
+ AW_IIO_CHANNEL(0),
+ AW_IIO_CHANNEL(1),
+ AW_IIO_CHANNEL(2),
+ AW_IIO_CHANNEL(3),
+};
+
+static const struct iio_chan_spec aw96105_channels[] = {
+ AW_IIO_CHANNEL(0),
+ AW_IIO_CHANNEL(1),
+ AW_IIO_CHANNEL(2),
+ AW_IIO_CHANNEL(3),
+ AW_IIO_CHANNEL(4),
+ AW_IIO_CHANNEL(5),
+};
+
+static const struct aw_chip_info aw_chip_info_tbl[] = {
+ [AW96103_VAL] = {
+ .name = "aw96103_sensor",
+ .channels = aw96103_channels,
+ .num_channels = ARRAY_SIZE(aw96103_channels),
+ },
+ [AW96105_VAL] = {
+ .name = "aw96105_sensor",
+ .channels = aw96105_channels,
+ .num_channels = ARRAY_SIZE(aw96105_channels),
+ },
+};
+
+static void aw96103_parsing_bin_file(struct aw_bin *bin)
+{
+ bin->valid_data_addr = AW96103_BIN_VALID_DATA_OFFSET;
+ bin->valid_data_len =
+ *(unsigned int *)(bin->data + AW96103_BIN_DATA_LEN_OFFSET) -
+ AW96103_BIN_DATA_REG_NUM_SIZE;
+ memcpy(bin->chip_type, bin->data + AW96103_BIN_CHIP_TYPE_OFFSET,
+ AW96103_BIN_CHIP_TYPE_SIZE);
+}
+
+static const struct regmap_config aw96103_regmap_confg = {
+ .reg_bits = 16,
+ .val_bits = 32,
+};
+
+static int aw96103_get_diff_raw(struct aw96103 *aw96103, unsigned int chan,
+ int *buf)
+{
+ u32 data;
+ int ret;
+
+ ret = regmap_read(aw96103->regmap,
+ AW96103_REG_DIFF_CH0 + chan * 4, &data);
+ if (ret)
+ return ret;
+ *buf = (int)(data / AW_DATA_PROCESS_FACTOR);
+
+ return 0;
+}
+
+static int aw96103_read_raw(struct iio_dev *indio_dev,
+ const struct iio_chan_spec *chan,
+ int *val, int *val2, long mask)
+{
+ struct aw96103 *aw96103 = iio_priv(indio_dev);
+ int ret;
+
+ switch (mask) {
+ case IIO_CHAN_INFO_RAW:
+ ret = aw96103_get_diff_raw(aw96103, chan->channel, val);
+ if (ret)
+ return ret;
+
+ return IIO_VAL_INT;
+ default:
+ return -EINVAL;
+ }
+}
+
+static int aw96103_read_thresh(struct aw96103 *aw96103,
+ const struct iio_chan_spec *chan, int *val)
+{
+ int ret;
+
+ ret = regmap_read(aw96103->regmap,
+ AW96103_REG_PROXTH0_CH(chan->channel), val);
+ if (ret)
+ return ret;
+
+ return IIO_VAL_INT;
+}
+
+static int aw96103_read_out_debounce(struct aw96103 *aw96103,
+ const struct iio_chan_spec *chan,
+ int *val)
+{
+ unsigned int reg_val;
+ int ret;
+
+ ret = regmap_read(aw96103->regmap,
+ AW96103_REG_PROXCTRL_CH(chan->channel), &reg_val);
+ if (ret)
+ return ret;
+ *val = FIELD_GET(AW96103_OUTDEB_MASK, reg_val);
+
+ return IIO_VAL_INT;
+}
+
+static int aw96103_read_in_debounce(struct aw96103 *aw96103,
+ const struct iio_chan_spec *chan, int *val)
+{
+ unsigned int reg_val;
+ int ret;
+
+ ret = regmap_read(aw96103->regmap,
+ AW96103_REG_PROXCTRL_CH(chan->channel), &reg_val);
+ if (ret)
+ return ret;
+ *val = FIELD_GET(AW96103_INDEB_MASK, reg_val);
+
+ return IIO_VAL_INT;
+}
+
+static int aw96103_read_hysteresis(struct aw96103 *aw96103,
+ const struct iio_chan_spec *chan, int *val)
+{
+ unsigned int reg_val;
+ int ret;
+
+ ret = regmap_read(aw96103->regmap,
+ AW96103_REG_PROXCTRL_CH(chan->channel), &reg_val);
+ if (ret)
+ return ret;
+ *val = FIELD_GET(AW96103_THHYST_MASK, reg_val);
+
+ return IIO_VAL_INT;
+}
+
+static int aw96103_read_event_val(struct iio_dev *indio_dev,
+ const struct iio_chan_spec *chan,
+ enum iio_event_type type,
+ enum iio_event_direction dir,
+ enum iio_event_info info,
+ int *val, int *val2)
+{
+ struct aw96103 *aw96103 = iio_priv(indio_dev);
+
+ if (chan->type != IIO_PROXIMITY)
+ return -EINVAL;
+
+ switch (info) {
+ case IIO_EV_INFO_VALUE:
+ return aw96103_read_thresh(aw96103, chan, val);
+ case IIO_EV_INFO_PERIOD:
+ switch (dir) {
+ case IIO_EV_DIR_RISING:
+ return aw96103_read_out_debounce(aw96103, chan, val);
+ case IIO_EV_DIR_FALLING:
+ return aw96103_read_in_debounce(aw96103, chan, val);
+ default:
+ return -EINVAL;
+ }
+ case IIO_EV_INFO_HYSTERESIS:
+ return aw96103_read_hysteresis(aw96103, chan, val);
+ default:
+ return -EINVAL;
+ }
+}
+
+static int aw96103_write_event_val(struct iio_dev *indio_dev,
+ const struct iio_chan_spec *chan,
+ enum iio_event_type type,
+ enum iio_event_direction dir,
+ enum iio_event_info info, int val, int val2)
+{
+ struct aw96103 *aw96103 = iio_priv(indio_dev);
+
+ if (chan->type != IIO_PROXIMITY)
+ return -EINVAL;
+
+ switch (info) {
+ case IIO_EV_INFO_VALUE:
+ return regmap_write(aw96103->regmap,
+ AW96103_REG_PROXTH0_CH(chan->channel), val);
+ case IIO_EV_INFO_PERIOD:
+ switch (dir) {
+ case IIO_EV_DIR_RISING:
+ return regmap_update_bits(aw96103->regmap,
+ AW96103_REG_PROXCTRL_CH(chan->channel),
+ AW96103_OUTDEB_MASK,
+ FIELD_PREP(AW96103_OUTDEB_MASK, val));
+
+ case IIO_EV_DIR_FALLING:
+ return regmap_update_bits(aw96103->regmap,
+ AW96103_REG_PROXCTRL_CH(chan->channel),
+ AW96103_INDEB_MASK,
+ FIELD_PREP(AW96103_INDEB_MASK, val));
+ default:
+ return -EINVAL;
+ }
+ case IIO_EV_INFO_HYSTERESIS:
+ return regmap_update_bits(aw96103->regmap,
+ AW96103_REG_PROXCTRL_CH(chan->channel),
+ AW96103_THHYST_MASK,
+ FIELD_PREP(AW96103_THHYST_MASK, val));
+ default:
+ return -EINVAL;
+ }
+}
+
+static int aw96103_read_event_config(struct iio_dev *indio_dev,
+ const struct iio_chan_spec *chan,
+ enum iio_event_type type,
+ enum iio_event_direction dir)
+{
+ struct aw96103 *aw96103 = iio_priv(indio_dev);
+
+ return aw96103->channels_arr[chan->channel].used;
+}
+
+static int aw96103_write_event_config(struct iio_dev *indio_dev,
+ const struct iio_chan_spec *chan,
+ enum iio_event_type type,
+ enum iio_event_direction dir, int state)
+{
+ struct aw96103 *aw96103 = iio_priv(indio_dev);
+
+ aw96103->channels_arr[chan->channel].used = !!state;
+
+ return regmap_update_bits(aw96103->regmap, AW96103_REG_SCANCTRL0,
+ BIT(chan->channel),
+ state ? BIT(chan->channel) : 0);
+}
+
+static struct iio_info iio_info = {
+ .read_raw = aw96103_read_raw,
+ .read_event_value = aw96103_read_event_val,
+ .write_event_value = aw96103_write_event_val,
+ .read_event_config = aw96103_read_event_config,
+ .write_event_config = aw96103_write_event_config,
+};
+
+static int aw96103_channel_scan_start(struct aw96103 *aw96103)
+{
+ int ret;
+
+ ret = regmap_write(aw96103->regmap, AW96103_REG_CMD,
+ AW96103_ACTIVE_MODE);
+ if (ret)
+ return ret;
+
+ return regmap_write(aw96103->regmap, AW96103_REG_IRQEN,
+ aw96103->hostirqen);
+}
+
+static int aw96103_reg_version_comp(struct aw96103 *aw96103,
+ struct aw_bin *aw_bin)
+{
+ u32 blfilt1_data, fw_ver;
+ unsigned char i;
+ int ret;
+
+ ret = regmap_read(aw96103->regmap, AW96103_REG_FWVER2, &fw_ver);
+ if (ret)
+ return ret;
+ /*
+ * If the chip version is AW96103A and the loaded register
+ * configuration file is for AW96103, special handling of the
+ * AW96103_REG_BLRSTRNG_CH0 register is required.
+ */
+ if ((fw_ver != AW96103A) || (aw_bin->chip_type[7] != '\0'))
+ return 0;
+
+ for (i = 0; i < aw96103->max_channels; i++) {
+ ret = regmap_read(aw96103->regmap,
+ AW96103_REG_BLFILT_CH0 + (AW96103_BLFILT_CH_STEP * i),
+ &blfilt1_data);
+ if (ret)
+ return ret;
+ if (FIELD_GET(AW96103_BLERRTRIG_MASK, blfilt1_data) != 1)
+ return 0;
+
+ ret = regmap_update_bits(aw96103->regmap,
+ AW96103_REG_BLRSTRNG_CH0 + (AW96103_BLFILT_CH_STEP * i),
+ AW96103_BLRSTRNG_MASK, 1 << i);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
+
+static int aw96103_bin_valid_loaded(struct aw96103 *aw96103,
+ struct aw_bin *aw_bin_data_s)
+{
+ unsigned int start_addr = aw_bin_data_s->valid_data_addr;
+ u32 i, reg_data;
+ u16 reg_addr;
+ int ret;
+
+ for (i = 0; i < aw_bin_data_s->valid_data_len;
+ i += 6, start_addr += 6) {
+ reg_addr = get_unaligned_le16(aw_bin_data_s->data + start_addr);
+ reg_data = get_unaligned_le32(aw_bin_data_s->data +
+ start_addr + 2);
+ if ((reg_addr == AW96103_REG_EEDA0) ||
+ (reg_addr == AW96103_REG_EEDA1))
+ continue;
+ if (reg_addr == AW96103_REG_IRQEN) {
+ aw96103->hostirqen = reg_data;
+ continue;
+ }
+ if (reg_addr == AW96103_REG_SCANCTRL0)
+ aw96103->chan_en = FIELD_GET(AW96103_CHAN_EN_MASK,
+ reg_data);
+
+ ret = regmap_write(aw96103->regmap, reg_addr, reg_data);
+ if (ret < 0)
+ return ret;
+ }
+
+ ret = aw96103_reg_version_comp(aw96103, aw_bin_data_s);
+ if (ret)
+ return ret;
+
+ return aw96103_channel_scan_start(aw96103);
+}
+
+static int aw96103_para_loaded(struct aw96103 *aw96103)
+{
+ int i, ret;
+
+ for (i = 0; i < ARRAY_SIZE(aw96103_reg_default); i += 2) {
+ ret = regmap_write(aw96103->regmap,
+ (u16)aw96103_reg_default[i],
+ (u32)aw96103_reg_default[i + 1]);
+ if (ret)
+ return ret;
+ if (aw96103_reg_default[i] == AW96103_REG_IRQEN)
+ aw96103->hostirqen = aw96103_reg_default[i + 1];
+ else if (aw96103_reg_default[i] == AW96103_REG_SCANCTRL0)
+ aw96103->chan_en = FIELD_GET(AW96103_CHAN_EN_MASK,
+ aw96103_reg_default[i + 1]);
+ }
+
+ return aw96103_channel_scan_start(aw96103);
+}
+
+static int aw96103_cfg_all_loaded(const struct firmware *cont,
+ struct aw96103 *aw96103)
+{
+ if (!cont)
+ return -EINVAL;
+
+ struct aw_bin *aw_bin __free(kfree) =
+ kzalloc(cont->size + sizeof(*aw_bin), GFP_KERNEL);
+ if (!aw_bin)
+ return -ENOMEM;
+
+ aw_bin->len = cont->size;
+ memcpy(aw_bin->data, cont->data, cont->size);
+ release_firmware(cont);
+ aw96103_parsing_bin_file(aw_bin);
+
+ return aw96103_bin_valid_loaded(aw96103, aw_bin);
+}
+
+static void aw96103_cfg_update(const struct firmware *fw, void *data)
+{
+ struct aw96103 *aw96103 = data;
+ int ret, i;
+
+ if (!fw || !fw->data) {
+ dev_err(aw96103->dev, "No firmware.\n");
+ return;
+ }
+
+ ret = aw96103_cfg_all_loaded(fw, aw96103);
+ /*
+ * If loading the register configuration file fails,
+ * load the default register configuration in the driver to
+ * ensure the basic functionality of the device.
+ */
+ if (ret) {
+ ret = aw96103_para_loaded(aw96103);
+ if (ret) {
+ dev_err(aw96103->dev, "load param error.\n");
+ return;
+ }
+ }
+
+ for (i = 0; i < aw96103->max_channels; i++) {
+ if ((aw96103->chan_en >> i) & 0x01)
+ aw96103->channels_arr[i].used = true;
+ else
+ aw96103->channels_arr[i].used = false;
+ }
+}
+
+static int aw96103_sw_reset(struct aw96103 *aw96103)
+{
+ int ret;
+
+ ret = regmap_write(aw96103->regmap, AW96103_REG_RESET, 0);
+ /*
+ * After reset, the initialization process starts to perform and
+ * it will last for a bout 20ms.
+ */
+ msleep(20);
+
+ return ret;
+}
+
+enum aw96103_irq_trigger_position {
+ FAR = 0,
+ TRIGGER_TH0 = 0x01,
+ TRIGGER_TH1 = 0x03,
+ TRIGGER_TH2 = 0x07,
+ TRIGGER_TH3 = 0x0f,
+};
+
+static irqreturn_t aw96103_irq(int irq, void *data)
+{
+ unsigned int irq_status, curr_status_val, curr_status;
+ struct iio_dev *indio_dev = data;
+ struct aw96103 *aw96103 = iio_priv(indio_dev);
+ int ret, i;
+
+ ret = regmap_read(aw96103->regmap, AW96103_REG_IRQSRC, &irq_status);
+ if (ret)
+ return IRQ_HANDLED;
+
+ ret = regmap_read(aw96103->regmap, AW96103_REG_STAT0, &curr_status_val);
+ if (ret)
+ return IRQ_HANDLED;
+
+ /*
+ * Iteratively analyze the interrupt status of different channels,
+ * with each channel having 4 interrupt states.
+ */
+ for (i = 0; i < aw96103->max_channels; i++) {
+ if (!aw96103->channels_arr[i].used)
+ continue;
+
+ curr_status = (((curr_status_val >> (24 + i)) & 0x1)) |
+ (((curr_status_val >> (16 + i)) & 0x1) << 1) |
+ (((curr_status_val >> (8 + i)) & 0x1) << 2) |
+ (((curr_status_val >> i) & 0x1) << 3);
+ if (aw96103->channels_arr[i].old_irq_status == curr_status)
+ continue;
+
+ switch (curr_status) {
+ case FAR:
+ iio_push_event(indio_dev,
+ IIO_UNMOD_EVENT_CODE(IIO_PROXIMITY, i,
+ IIO_EV_TYPE_THRESH,
+ IIO_EV_DIR_RISING),
+ iio_get_time_ns(indio_dev));
+ break;
+ case TRIGGER_TH0:
+ case TRIGGER_TH1:
+ case TRIGGER_TH2:
+ case TRIGGER_TH3:
+ iio_push_event(indio_dev,
+ IIO_UNMOD_EVENT_CODE(IIO_PROXIMITY, i,
+ IIO_EV_TYPE_THRESH,
+ IIO_EV_DIR_FALLING),
+ iio_get_time_ns(indio_dev));
+ break;
+ default:
+ return IRQ_HANDLED;
+ }
+ aw96103->channels_arr[i].old_irq_status = curr_status;
+ }
+
+ return IRQ_HANDLED;
+}
+
+static int aw96103_interrupt_init(struct iio_dev *indio_dev,
+ struct i2c_client *i2c)
+{
+ struct aw96103 *aw96103 = iio_priv(indio_dev);
+ unsigned int irq_status;
+ int ret;
+
+ ret = regmap_write(aw96103->regmap, AW96103_REG_IRQEN, 0);
+ if (ret)
+ return ret;
+ ret = regmap_read(aw96103->regmap, AW96103_REG_IRQSRC, &irq_status);
+ if (ret)
+ return ret;
+ ret = devm_request_threaded_irq(aw96103->dev, i2c->irq, NULL,
+ aw96103_irq, IRQF_ONESHOT,
+ "aw96103_irq", indio_dev);
+ if (ret)
+ return ret;
+
+ return regmap_write(aw96103->regmap, AW96103_REG_IRQEN,
+ aw96103->hostirqen);
+}
+
+static int aw96103_wait_chip_init(struct aw96103 *aw96103)
+{
+ unsigned int cnt = 20;
+ u32 reg_data;
+ int ret;
+
+ while (cnt--) {
+ /*
+ * The device should generate an initialization completion
+ * interrupt within 20ms.
+ */
+ ret = regmap_read(aw96103->regmap, AW96103_REG_IRQSRC,
+ &reg_data);
+ if (ret)
+ return ret;
+
+ if (FIELD_GET(AW96103_INITOVERIRQ_MASK, reg_data))
+ return 0;
+ fsleep(1000);
+ }
+
+ return -ETIMEDOUT;
+}
+
+static int aw96103_read_chipid(struct aw96103 *aw96103)
+{
+ unsigned char cnt = 0;
+ u32 reg_val = 0;
+ int ret;
+
+ while (cnt < 3) {
+ /*
+ * This retry mechanism and the subsequent delay are just
+ * attempts to read the chip ID as much as possible,
+ * preventing occasional communication failures from causing
+ * the chip ID read to fail.
+ */
+ ret = regmap_read(aw96103->regmap, AW96103_REG_CHIPID,
+ &reg_val);
+ if (ret < 0) {
+ cnt++;
+ fsleep(2000);
+ continue;
+ }
+ break;
+ }
+ if (cnt == 3)
+ return -ETIMEDOUT;
+
+ if (FIELD_GET(AW96103_CHIPID_MASK, reg_val) != AW96103_CHIP_ID)
+ dev_info(aw96103->dev,
+ "unexpected chipid, id=0x%08X\n", reg_val);
+
+ return 0;
+}
+
+static int aw96103_i2c_probe(struct i2c_client *i2c)
+{
+ const struct aw_chip_info *chip_info;
+ struct iio_dev *indio_dev;
+ struct aw96103 *aw96103;
+ int ret;
+
+ indio_dev = devm_iio_device_alloc(&i2c->dev, sizeof(*aw96103));
+ if (!indio_dev)
+ return -ENOMEM;
+
+ aw96103 = iio_priv(indio_dev);
+ aw96103->dev = &i2c->dev;
+ chip_info = i2c_get_match_data(i2c);
+ aw96103->max_channels = chip_info->num_channels;
+
+ aw96103->regmap = devm_regmap_init_i2c(i2c, &aw96103_regmap_confg);
+ if (IS_ERR(aw96103->regmap))
+ return PTR_ERR(aw96103->regmap);
+
+ ret = devm_regulator_get_enable(aw96103->dev, "vcc");
+ if (ret < 0)
+ return ret;
+
+ ret = aw96103_read_chipid(aw96103);
+ if (ret)
+ return ret;
+
+ ret = aw96103_sw_reset(aw96103);
+ if (ret)
+ return ret;
+
+ ret = aw96103_wait_chip_init(aw96103);
+ if (ret)
+ return ret;
+
+ ret = request_firmware_nowait(THIS_MODULE, true, "aw96103_0.bin",
+ aw96103->dev, GFP_KERNEL, aw96103,
+ aw96103_cfg_update);
+ if (ret)
+ return ret;
+
+ ret = aw96103_interrupt_init(indio_dev, i2c);
+ if (ret)
+ return ret;
+
+ indio_dev->modes = INDIO_DIRECT_MODE;
+ indio_dev->num_channels = chip_info->num_channels;
+ indio_dev->channels = chip_info->channels;
+ indio_dev->info = &iio_info;
+ indio_dev->name = chip_info->name;
+
+ return devm_iio_device_register(aw96103->dev, indio_dev);
+}
+
+static const struct of_device_id aw96103_dt_match[] = {
+ {
+ .compatible = "awinic,aw96103",
+ .data = &aw_chip_info_tbl[AW96103_VAL]
+ },
+ {
+ .compatible = "awinic,aw96105",
+ .data = &aw_chip_info_tbl[AW96105_VAL]
+ },
+ { }
+};
+MODULE_DEVICE_TABLE(of, aw96103_dt_match);
+
+static const struct i2c_device_id aw96103_i2c_id[] = {
+ { "aw96103", (kernel_ulong_t)&aw_chip_info_tbl[AW96103_VAL] },
+ { "aw96105", (kernel_ulong_t)&aw_chip_info_tbl[AW96105_VAL] },
+ { }
+};
+MODULE_DEVICE_TABLE(i2c, aw96103_i2c_id);
+
+static struct i2c_driver aw96103_i2c_driver = {
+ .driver = {
+ .name = "aw96103_sensor",
+ .of_match_table = aw96103_dt_match,
+ },
+ .probe = aw96103_i2c_probe,
+ .id_table = aw96103_i2c_id,
+};
+module_i2c_driver(aw96103_i2c_driver);
+
+MODULE_AUTHOR("Wang Shuaijie <wangshuaijie@awinic.com>");
+MODULE_DESCRIPTION("Driver for Awinic AW96103 proximity sensor");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/iio/proximity/cros_ec_mkbp_proximity.c b/drivers/iio/proximity/cros_ec_mkbp_proximity.c
index 4df506bb8b38..cff57d851762 100644
--- a/drivers/iio/proximity/cros_ec_mkbp_proximity.c
+++ b/drivers/iio/proximity/cros_ec_mkbp_proximity.c
@@ -6,10 +6,10 @@
*/
#include <linux/kernel.h>
+#include <linux/mod_devicetable.h>
#include <linux/module.h>
#include <linux/mutex.h>
#include <linux/notifier.h>
-#include <linux/of.h>
#include <linux/platform_device.h>
#include <linux/slab.h>
#include <linux/types.h>
diff --git a/drivers/iio/proximity/hx9023s.c b/drivers/iio/proximity/hx9023s.c
new file mode 100644
index 000000000000..8b9f84400e00
--- /dev/null
+++ b/drivers/iio/proximity/hx9023s.c
@@ -0,0 +1,1144 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2024 NanjingTianyihexin Electronics Ltd.
+ * http://www.tianyihexin.com
+ *
+ * Driver for NanjingTianyihexin HX9023S Cap Sensor.
+ * Datasheet available at:
+ * http://www.tianyihexin.com/ueditor/php/upload/file/20240614/1718336303992081.pdf
+ */
+
+#include <linux/array_size.h>
+#include <linux/bitfield.h>
+#include <linux/bitops.h>
+#include <linux/cleanup.h>
+#include <linux/device.h>
+#include <linux/errno.h>
+#include <linux/i2c.h>
+#include <linux/interrupt.h>
+#include <linux/irqreturn.h>
+#include <linux/math64.h>
+#include <linux/mod_devicetable.h>
+#include <linux/module.h>
+#include <linux/mutex.h>
+#include <linux/pm.h>
+#include <linux/property.h>
+#include <linux/regmap.h>
+#include <linux/regulator/consumer.h>
+#include <linux/types.h>
+#include <linux/units.h>
+
+#include <asm/byteorder.h>
+#include <asm/unaligned.h>
+
+#include <linux/iio/buffer.h>
+#include <linux/iio/events.h>
+#include <linux/iio/iio.h>
+#include <linux/iio/trigger.h>
+#include <linux/iio/triggered_buffer.h>
+#include <linux/iio/trigger_consumer.h>
+#include <linux/iio/types.h>
+
+#define HX9023S_CHIP_ID 0x1D
+#define HX9023S_CH_NUM 5
+#define HX9023S_POS 0x03
+#define HX9023S_NEG 0x02
+#define HX9023S_NOT_CONNECTED 16
+
+#define HX9023S_GLOBAL_CTRL0 0x00
+#define HX9023S_PRF_CFG 0x02
+#define HX9023S_CH0_CFG_7_0 0x03
+#define HX9023S_CH4_CFG_9_8 0x0C
+#define HX9023S_RANGE_7_0 0x0D
+#define HX9023S_RANGE_9_8 0x0E
+#define HX9023S_RANGE_18_16 0x0F
+#define HX9023S_AVG0_NOSR0_CFG 0x10
+#define HX9023S_NOSR12_CFG 0x11
+#define HX9023S_NOSR34_CFG 0x12
+#define HX9023S_AVG12_CFG 0x13
+#define HX9023S_AVG34_CFG 0x14
+#define HX9023S_OFFSET_DAC0_7_0 0x15
+#define HX9023S_OFFSET_DAC4_9_8 0x1E
+#define HX9023S_SAMPLE_NUM_7_0 0x1F
+#define HX9023S_INTEGRATION_NUM_7_0 0x21
+#define HX9023S_CH_NUM_CFG 0x24
+#define HX9023S_LP_ALP_4_CFG 0x29
+#define HX9023S_LP_ALP_1_0_CFG 0x2A
+#define HX9023S_LP_ALP_3_2_CFG 0x2B
+#define HX9023S_UP_ALP_1_0_CFG 0x2C
+#define HX9023S_UP_ALP_3_2_CFG 0x2D
+#define HX9023S_DN_UP_ALP_0_4_CFG 0x2E
+#define HX9023S_DN_ALP_2_1_CFG 0x2F
+#define HX9023S_DN_ALP_4_3_CFG 0x30
+#define HX9023S_RAW_BL_RD_CFG 0x38
+#define HX9023S_INTERRUPT_CFG 0x39
+#define HX9023S_INTERRUPT_CFG1 0x3A
+#define HX9023S_CALI_DIFF_CFG 0x3B
+#define HX9023S_DITHER_CFG 0x3C
+#define HX9023S_DEVICE_ID 0x60
+#define HX9023S_PROX_STATUS 0x6B
+#define HX9023S_PROX_INT_HIGH_CFG 0x6C
+#define HX9023S_PROX_INT_LOW_CFG 0x6D
+#define HX9023S_PROX_HIGH_DIFF_CFG_CH0_0 0x80
+#define HX9023S_PROX_LOW_DIFF_CFG_CH0_0 0x88
+#define HX9023S_PROX_LOW_DIFF_CFG_CH3_1 0x8F
+#define HX9023S_PROX_HIGH_DIFF_CFG_CH4_0 0x9E
+#define HX9023S_PROX_HIGH_DIFF_CFG_CH4_1 0x9F
+#define HX9023S_PROX_LOW_DIFF_CFG_CH4_0 0xA2
+#define HX9023S_PROX_LOW_DIFF_CFG_CH4_1 0xA3
+#define HX9023S_CAP_INI_CH4_0 0xB3
+#define HX9023S_LP_DIFF_CH4_2 0xBA
+#define HX9023S_RAW_BL_CH4_0 0xB5
+#define HX9023S_LP_DIFF_CH4_0 0xB8
+#define HX9023S_DSP_CONFIG_CTRL1 0xC8
+#define HX9023S_CAP_INI_CH0_0 0xE0
+#define HX9023S_RAW_BL_CH0_0 0xE8
+#define HX9023S_LP_DIFF_CH0_0 0xF4
+#define HX9023S_LP_DIFF_CH3_2 0xFF
+
+#define HX9023S_DATA_LOCK_MASK BIT(4)
+#define HX9023S_INTERRUPT_MASK GENMASK(9, 0)
+#define HX9023S_PROX_DEBOUNCE_MASK GENMASK(3, 0)
+
+struct hx9023s_ch_data {
+ s16 raw; /* Raw Data*/
+ s16 lp; /* Low Pass Filter Data*/
+ s16 bl; /* Base Line Data */
+ s16 diff; /* Difference of Low Pass Data and Base Line Data */
+
+ struct {
+ unsigned int near;
+ unsigned int far;
+ } thres;
+
+ u16 dac;
+ u8 channel_positive;
+ u8 channel_negative;
+ bool sel_bl;
+ bool sel_raw;
+ bool sel_diff;
+ bool sel_lp;
+ bool enable;
+};
+
+struct hx9023s_data {
+ struct iio_trigger *trig;
+ struct regmap *regmap;
+ unsigned long chan_prox_stat;
+ unsigned long chan_read;
+ unsigned long chan_event;
+ unsigned long ch_en_stat;
+ unsigned long chan_in_use;
+ unsigned int prox_state_reg;
+ bool trigger_enabled;
+
+ struct {
+ __le16 channels[HX9023S_CH_NUM];
+ s64 ts __aligned(8);
+ } buffer;
+
+ /*
+ * Serialize access to registers below:
+ * HX9023S_PROX_INT_LOW_CFG,
+ * HX9023S_PROX_INT_HIGH_CFG,
+ * HX9023S_INTERRUPT_CFG,
+ * HX9023S_CH_NUM_CFG
+ * Serialize access to channel configuration in
+ * hx9023s_push_events and hx9023s_trigger_handler.
+ */
+ struct mutex mutex;
+ struct hx9023s_ch_data ch_data[HX9023S_CH_NUM];
+};
+
+static const struct reg_sequence hx9023s_reg_init_list[] = {
+ /* scan period */
+ REG_SEQ0(HX9023S_PRF_CFG, 0x17),
+
+ /* full scale of conversion phase of each channel */
+ REG_SEQ0(HX9023S_RANGE_7_0, 0x11),
+ REG_SEQ0(HX9023S_RANGE_9_8, 0x02),
+ REG_SEQ0(HX9023S_RANGE_18_16, 0x00),
+
+ /* ADC average number and OSR number of each channel */
+ REG_SEQ0(HX9023S_AVG0_NOSR0_CFG, 0x71),
+ REG_SEQ0(HX9023S_NOSR12_CFG, 0x44),
+ REG_SEQ0(HX9023S_NOSR34_CFG, 0x00),
+ REG_SEQ0(HX9023S_AVG12_CFG, 0x33),
+ REG_SEQ0(HX9023S_AVG34_CFG, 0x00),
+
+ /* sample & integration frequency of the ADC */
+ REG_SEQ0(HX9023S_SAMPLE_NUM_7_0, 0x65),
+ REG_SEQ0(HX9023S_INTEGRATION_NUM_7_0, 0x65),
+
+ /* coefficient of the first order low pass filter during each channel */
+ REG_SEQ0(HX9023S_LP_ALP_1_0_CFG, 0x22),
+ REG_SEQ0(HX9023S_LP_ALP_3_2_CFG, 0x22),
+ REG_SEQ0(HX9023S_LP_ALP_4_CFG, 0x02),
+
+ /* up coefficient of the first order low pass filter during each channel */
+ REG_SEQ0(HX9023S_UP_ALP_1_0_CFG, 0x88),
+ REG_SEQ0(HX9023S_UP_ALP_3_2_CFG, 0x88),
+ REG_SEQ0(HX9023S_DN_UP_ALP_0_4_CFG, 0x18),
+
+ /* down coefficient of the first order low pass filter during each channel */
+ REG_SEQ0(HX9023S_DN_ALP_2_1_CFG, 0x11),
+ REG_SEQ0(HX9023S_DN_ALP_4_3_CFG, 0x11),
+
+ /* selection of data for the Data Mux Register to output data */
+ REG_SEQ0(HX9023S_RAW_BL_RD_CFG, 0xF0),
+
+ /* enable the interrupt function */
+ REG_SEQ0(HX9023S_INTERRUPT_CFG, 0xFF),
+ REG_SEQ0(HX9023S_INTERRUPT_CFG1, 0x3B),
+ REG_SEQ0(HX9023S_DITHER_CFG, 0x21),
+
+ /* threshold of the offset compensation */
+ REG_SEQ0(HX9023S_CALI_DIFF_CFG, 0x07),
+
+ /* proximity persistency number(near & far) */
+ REG_SEQ0(HX9023S_PROX_INT_HIGH_CFG, 0x01),
+ REG_SEQ0(HX9023S_PROX_INT_LOW_CFG, 0x01),
+
+ /* disable the data lock */
+ REG_SEQ0(HX9023S_DSP_CONFIG_CTRL1, 0x00),
+};
+
+static const struct iio_event_spec hx9023s_events[] = {
+ {
+ .type = IIO_EV_TYPE_THRESH,
+ .dir = IIO_EV_DIR_RISING,
+ .mask_shared_by_all = BIT(IIO_EV_INFO_PERIOD),
+ .mask_separate = BIT(IIO_EV_INFO_VALUE),
+ },
+ {
+ .type = IIO_EV_TYPE_THRESH,
+ .dir = IIO_EV_DIR_FALLING,
+ .mask_shared_by_all = BIT(IIO_EV_INFO_PERIOD),
+ .mask_separate = BIT(IIO_EV_INFO_VALUE),
+
+ },
+ {
+ .type = IIO_EV_TYPE_THRESH,
+ .dir = IIO_EV_DIR_EITHER,
+ .mask_separate = BIT(IIO_EV_INFO_ENABLE),
+ },
+};
+
+#define HX9023S_CHANNEL(idx) \
+{ \
+ .type = IIO_PROXIMITY, \
+ .info_mask_separate = BIT(IIO_CHAN_INFO_RAW), \
+ .info_mask_shared_by_all = BIT(IIO_CHAN_INFO_SAMP_FREQ),\
+ .indexed = 1, \
+ .channel = idx, \
+ .address = 0, \
+ .event_spec = hx9023s_events, \
+ .num_event_specs = ARRAY_SIZE(hx9023s_events), \
+ .scan_index = idx, \
+ .scan_type = { \
+ .sign = 's', \
+ .realbits = 16, \
+ .storagebits = 16, \
+ .endianness = IIO_BE, \
+ }, \
+}
+
+static const struct iio_chan_spec hx9023s_channels[] = {
+ HX9023S_CHANNEL(0),
+ HX9023S_CHANNEL(1),
+ HX9023S_CHANNEL(2),
+ HX9023S_CHANNEL(3),
+ HX9023S_CHANNEL(4),
+ IIO_CHAN_SOFT_TIMESTAMP(5),
+};
+
+static const unsigned int hx9023s_samp_freq_table[] = {
+ 2, 2, 4, 6, 8, 10, 14, 18, 22, 26,
+ 30, 34, 38, 42, 46, 50, 56, 62, 68, 74,
+ 80, 90, 100, 200, 300, 400, 600, 800, 1000, 2000,
+ 3000, 4000,
+};
+
+static const struct regmap_range hx9023s_rd_reg_ranges[] = {
+ regmap_reg_range(HX9023S_GLOBAL_CTRL0, HX9023S_LP_DIFF_CH3_2),
+};
+
+static const struct regmap_range hx9023s_wr_reg_ranges[] = {
+ regmap_reg_range(HX9023S_GLOBAL_CTRL0, HX9023S_LP_DIFF_CH3_2),
+};
+
+static const struct regmap_range hx9023s_volatile_reg_ranges[] = {
+ regmap_reg_range(HX9023S_CAP_INI_CH4_0, HX9023S_LP_DIFF_CH4_2),
+ regmap_reg_range(HX9023S_CAP_INI_CH0_0, HX9023S_LP_DIFF_CH3_2),
+ regmap_reg_range(HX9023S_PROX_STATUS, HX9023S_PROX_STATUS),
+};
+
+static const struct regmap_access_table hx9023s_rd_regs = {
+ .yes_ranges = hx9023s_rd_reg_ranges,
+ .n_yes_ranges = ARRAY_SIZE(hx9023s_rd_reg_ranges),
+};
+
+static const struct regmap_access_table hx9023s_wr_regs = {
+ .yes_ranges = hx9023s_wr_reg_ranges,
+ .n_yes_ranges = ARRAY_SIZE(hx9023s_wr_reg_ranges),
+};
+
+static const struct regmap_access_table hx9023s_volatile_regs = {
+ .yes_ranges = hx9023s_volatile_reg_ranges,
+ .n_yes_ranges = ARRAY_SIZE(hx9023s_volatile_reg_ranges),
+};
+
+static const struct regmap_config hx9023s_regmap_config = {
+ .reg_bits = 8,
+ .val_bits = 8,
+ .cache_type = REGCACHE_MAPLE,
+ .rd_table = &hx9023s_rd_regs,
+ .wr_table = &hx9023s_wr_regs,
+ .volatile_table = &hx9023s_volatile_regs,
+};
+
+static int hx9023s_interrupt_enable(struct hx9023s_data *data)
+{
+ return regmap_update_bits(data->regmap, HX9023S_INTERRUPT_CFG,
+ HX9023S_INTERRUPT_MASK, HX9023S_INTERRUPT_MASK);
+}
+
+static int hx9023s_interrupt_disable(struct hx9023s_data *data)
+{
+ return regmap_update_bits(data->regmap, HX9023S_INTERRUPT_CFG,
+ HX9023S_INTERRUPT_MASK, 0x00);
+}
+
+static int hx9023s_data_lock(struct hx9023s_data *data, bool locked)
+{
+ if (locked)
+ return regmap_update_bits(data->regmap,
+ HX9023S_DSP_CONFIG_CTRL1,
+ HX9023S_DATA_LOCK_MASK,
+ HX9023S_DATA_LOCK_MASK);
+ else
+ return regmap_update_bits(data->regmap,
+ HX9023S_DSP_CONFIG_CTRL1,
+ HX9023S_DATA_LOCK_MASK, 0);
+}
+
+static int hx9023s_ch_cfg(struct hx9023s_data *data)
+{
+ __le16 reg_list[HX9023S_CH_NUM];
+ u8 ch_pos[HX9023S_CH_NUM];
+ u8 ch_neg[HX9023S_CH_NUM];
+ /* Bit positions corresponding to input pin connections */
+ u8 conn_cs[HX9023S_CH_NUM] = { 0, 2, 4, 6, 8 };
+ unsigned int i;
+ u16 reg;
+
+ for (i = 0; i < HX9023S_CH_NUM; i++) {
+ ch_pos[i] = data->ch_data[i].channel_positive == HX9023S_NOT_CONNECTED ?
+ HX9023S_NOT_CONNECTED : conn_cs[data->ch_data[i].channel_positive];
+ ch_neg[i] = data->ch_data[i].channel_negative == HX9023S_NOT_CONNECTED ?
+ HX9023S_NOT_CONNECTED : conn_cs[data->ch_data[i].channel_negative];
+
+ reg = (HX9023S_POS << ch_pos[i]) | (HX9023S_NEG << ch_neg[i]);
+ reg_list[i] = cpu_to_le16(reg);
+ }
+
+ return regmap_bulk_write(data->regmap, HX9023S_CH0_CFG_7_0, reg_list,
+ sizeof(reg_list));
+}
+
+static int hx9023s_write_far_debounce(struct hx9023s_data *data, int val)
+{
+ guard(mutex)(&data->mutex);
+ return regmap_update_bits(data->regmap, HX9023S_PROX_INT_LOW_CFG,
+ HX9023S_PROX_DEBOUNCE_MASK,
+ FIELD_GET(HX9023S_PROX_DEBOUNCE_MASK, val));
+}
+
+static int hx9023s_write_near_debounce(struct hx9023s_data *data, int val)
+{
+ guard(mutex)(&data->mutex);
+ return regmap_update_bits(data->regmap, HX9023S_PROX_INT_HIGH_CFG,
+ HX9023S_PROX_DEBOUNCE_MASK,
+ FIELD_GET(HX9023S_PROX_DEBOUNCE_MASK, val));
+}
+
+static int hx9023s_read_far_debounce(struct hx9023s_data *data, int *val)
+{
+ int ret;
+
+ ret = regmap_read(data->regmap, HX9023S_PROX_INT_LOW_CFG, val);
+ if (ret)
+ return ret;
+
+ *val = FIELD_GET(HX9023S_PROX_DEBOUNCE_MASK, *val);
+
+ return IIO_VAL_INT;
+}
+
+static int hx9023s_read_near_debounce(struct hx9023s_data *data, int *val)
+{
+ int ret;
+
+ ret = regmap_read(data->regmap, HX9023S_PROX_INT_HIGH_CFG, val);
+ if (ret)
+ return ret;
+
+ *val = FIELD_GET(HX9023S_PROX_DEBOUNCE_MASK, *val);
+
+ return IIO_VAL_INT;
+}
+
+static int hx9023s_get_thres_near(struct hx9023s_data *data, u8 ch, int *val)
+{
+ int ret;
+ __le16 buf;
+ unsigned int reg, tmp;
+
+ reg = (ch == 4) ? HX9023S_PROX_HIGH_DIFF_CFG_CH4_0 :
+ HX9023S_PROX_HIGH_DIFF_CFG_CH0_0 + (ch * 2);
+
+ ret = regmap_bulk_read(data->regmap, reg, &buf, sizeof(buf));
+ if (ret)
+ return ret;
+
+ tmp = (le16_to_cpu(buf) & GENMASK(9, 0)) * 32;
+ data->ch_data[ch].thres.near = tmp;
+ *val = tmp;
+
+ return IIO_VAL_INT;
+}
+
+static int hx9023s_get_thres_far(struct hx9023s_data *data, u8 ch, int *val)
+{
+ int ret;
+ __le16 buf;
+ unsigned int reg, tmp;
+
+ reg = (ch == 4) ? HX9023S_PROX_LOW_DIFF_CFG_CH4_0 :
+ HX9023S_PROX_LOW_DIFF_CFG_CH0_0 + (ch * 2);
+
+ ret = regmap_bulk_read(data->regmap, reg, &buf, sizeof(buf));
+ if (ret)
+ return ret;
+
+ tmp = (le16_to_cpu(buf) & GENMASK(9, 0)) * 32;
+ data->ch_data[ch].thres.far = tmp;
+ *val = tmp;
+
+ return IIO_VAL_INT;
+}
+
+static int hx9023s_set_thres_near(struct hx9023s_data *data, u8 ch, int val)
+{
+ __le16 val_le16 = cpu_to_le16((val / 32) & GENMASK(9, 0));
+ unsigned int reg;
+
+ data->ch_data[ch].thres.near = ((val / 32) & GENMASK(9, 0)) * 32;
+ reg = (ch == 4) ? HX9023S_PROX_HIGH_DIFF_CFG_CH4_0 :
+ HX9023S_PROX_HIGH_DIFF_CFG_CH0_0 + (ch * 2);
+
+ return regmap_bulk_write(data->regmap, reg, &val_le16, sizeof(val_le16));
+}
+
+static int hx9023s_set_thres_far(struct hx9023s_data *data, u8 ch, int val)
+{
+ __le16 val_le16 = cpu_to_le16((val / 32) & GENMASK(9, 0));
+ unsigned int reg;
+
+ data->ch_data[ch].thres.far = ((val / 32) & GENMASK(9, 0)) * 32;
+ reg = (ch == 4) ? HX9023S_PROX_LOW_DIFF_CFG_CH4_0 :
+ HX9023S_PROX_LOW_DIFF_CFG_CH0_0 + (ch * 2);
+
+ return regmap_bulk_write(data->regmap, reg, &val_le16, sizeof(val_le16));
+}
+
+static int hx9023s_get_prox_state(struct hx9023s_data *data)
+{
+ return regmap_read(data->regmap, HX9023S_PROX_STATUS, &data->prox_state_reg);
+}
+
+static int hx9023s_data_select(struct hx9023s_data *data)
+{
+ int ret;
+ unsigned int i, buf;
+ unsigned long tmp;
+
+ ret = regmap_read(data->regmap, HX9023S_RAW_BL_RD_CFG, &buf);
+ if (ret)
+ return ret;
+
+ tmp = buf;
+ for (i = 0; i < 4; i++) {
+ data->ch_data[i].sel_diff = test_bit(i, &tmp);
+ data->ch_data[i].sel_lp = !data->ch_data[i].sel_diff;
+ data->ch_data[i].sel_bl = test_bit(i + 4, &tmp);
+ data->ch_data[i].sel_raw = !data->ch_data[i].sel_bl;
+ }
+
+ ret = regmap_read(data->regmap, HX9023S_INTERRUPT_CFG1, &buf);
+ if (ret)
+ return ret;
+
+ tmp = buf;
+ data->ch_data[4].sel_diff = test_bit(2, &tmp);
+ data->ch_data[4].sel_lp = !data->ch_data[4].sel_diff;
+ data->ch_data[4].sel_bl = test_bit(3, &tmp);
+ data->ch_data[4].sel_raw = !data->ch_data[4].sel_bl;
+
+ return 0;
+}
+
+static int hx9023s_sample(struct hx9023s_data *data)
+{
+ int ret;
+ unsigned int i;
+ u8 buf[HX9023S_CH_NUM * 3];
+ u16 value;
+
+ ret = hx9023s_data_lock(data, true);
+ if (ret)
+ return ret;
+
+ ret = hx9023s_data_select(data);
+ if (ret)
+ goto err;
+
+ /* 3 bytes for each of channels 0 to 3 which have contiguous registers */
+ ret = regmap_bulk_read(data->regmap, HX9023S_RAW_BL_CH0_0, buf, 12);
+ if (ret)
+ goto err;
+
+ /* 3 bytes for channel 4 */
+ ret = regmap_bulk_read(data->regmap, HX9023S_RAW_BL_CH4_0, buf + 12, 3);
+ if (ret)
+ goto err;
+
+ for (i = 0; i < HX9023S_CH_NUM; i++) {
+ value = get_unaligned_le16(&buf[i * 3 + 1]);
+ data->ch_data[i].raw = 0;
+ data->ch_data[i].bl = 0;
+ if (data->ch_data[i].sel_raw)
+ data->ch_data[i].raw = value;
+ if (data->ch_data[i].sel_bl)
+ data->ch_data[i].bl = value;
+ }
+
+ /* 3 bytes for each of channels 0 to 3 which have contiguous registers */
+ ret = regmap_bulk_read(data->regmap, HX9023S_LP_DIFF_CH0_0, buf, 12);
+ if (ret)
+ goto err;
+
+ /* 3 bytes for channel 4 */
+ ret = regmap_bulk_read(data->regmap, HX9023S_LP_DIFF_CH4_0, buf + 12, 3);
+ if (ret)
+ goto err;
+
+ for (i = 0; i < HX9023S_CH_NUM; i++) {
+ value = get_unaligned_le16(&buf[i * 3 + 1]);
+ data->ch_data[i].lp = 0;
+ data->ch_data[i].diff = 0;
+ if (data->ch_data[i].sel_lp)
+ data->ch_data[i].lp = value;
+ if (data->ch_data[i].sel_diff)
+ data->ch_data[i].diff = value;
+ }
+
+ for (i = 0; i < HX9023S_CH_NUM; i++) {
+ if (data->ch_data[i].sel_lp && data->ch_data[i].sel_bl)
+ data->ch_data[i].diff = data->ch_data[i].lp - data->ch_data[i].bl;
+ }
+
+ /* 2 bytes for each of channels 0 to 4 which have contiguous registers */
+ ret = regmap_bulk_read(data->regmap, HX9023S_OFFSET_DAC0_7_0, buf, 10);
+ if (ret)
+ goto err;
+
+ for (i = 0; i < HX9023S_CH_NUM; i++) {
+ value = get_unaligned_le16(&buf[i * 2]);
+ value = FIELD_GET(GENMASK(11, 0), value);
+ data->ch_data[i].dac = value;
+ }
+
+err:
+ return hx9023s_data_lock(data, false);
+}
+
+static int hx9023s_ch_en(struct hx9023s_data *data, u8 ch_id, bool en)
+{
+ int ret;
+ unsigned int buf;
+
+ ret = regmap_read(data->regmap, HX9023S_CH_NUM_CFG, &buf);
+ if (ret)
+ return ret;
+
+ data->ch_en_stat = buf;
+ if (en && data->ch_en_stat == 0)
+ data->prox_state_reg = 0;
+
+ data->ch_data[ch_id].enable = en;
+ __assign_bit(ch_id, &data->ch_en_stat, en);
+
+ return regmap_write(data->regmap, HX9023S_CH_NUM_CFG, data->ch_en_stat);
+}
+
+static int hx9023s_property_get(struct hx9023s_data *data)
+{
+ struct device *dev = regmap_get_device(data->regmap);
+ u32 array[2];
+ u32 i, reg, temp;
+ int ret;
+
+ data->chan_in_use = 0;
+ for (i = 0; i < HX9023S_CH_NUM; i++) {
+ data->ch_data[i].channel_positive = HX9023S_NOT_CONNECTED;
+ data->ch_data[i].channel_negative = HX9023S_NOT_CONNECTED;
+ }
+
+ device_for_each_child_node_scoped(dev, child) {
+ ret = fwnode_property_read_u32(child, "reg", &reg);
+ if (ret || reg >= HX9023S_CH_NUM)
+ return dev_err_probe(dev, ret < 0 ? ret : -EINVAL,
+ "Failed to read reg\n");
+ __set_bit(reg, &data->chan_in_use);
+
+ ret = fwnode_property_read_u32(child, "single-channel", &temp);
+ if (ret == 0) {
+ data->ch_data[reg].channel_positive = temp;
+ data->ch_data[reg].channel_negative = HX9023S_NOT_CONNECTED;
+ } else {
+ ret = fwnode_property_read_u32_array(child, "diff-channels",
+ array, ARRAY_SIZE(array));
+ if (ret == 0) {
+ data->ch_data[reg].channel_positive = array[0];
+ data->ch_data[reg].channel_negative = array[1];
+ } else {
+ return dev_err_probe(dev, ret,
+ "Property read failed: %d\n",
+ reg);
+ }
+ }
+ }
+
+ return 0;
+}
+
+static int hx9023s_update_chan_en(struct hx9023s_data *data,
+ unsigned long chan_read,
+ unsigned long chan_event)
+{
+ unsigned int i;
+ unsigned long channels = chan_read | chan_event;
+
+ if ((data->chan_read | data->chan_event) != channels) {
+ for_each_set_bit(i, &channels, HX9023S_CH_NUM)
+ hx9023s_ch_en(data, i, test_bit(i, &data->chan_in_use));
+ for_each_clear_bit(i, &channels, HX9023S_CH_NUM)
+ hx9023s_ch_en(data, i, false);
+ }
+
+ data->chan_read = chan_read;
+ data->chan_event = chan_event;
+
+ return 0;
+}
+
+static int hx9023s_get_proximity(struct hx9023s_data *data,
+ const struct iio_chan_spec *chan,
+ int *val)
+{
+ int ret;
+
+ ret = hx9023s_sample(data);
+ if (ret)
+ return ret;
+
+ ret = hx9023s_get_prox_state(data);
+ if (ret)
+ return ret;
+
+ *val = data->ch_data[chan->channel].diff;
+ return IIO_VAL_INT;
+}
+
+static int hx9023s_get_samp_freq(struct hx9023s_data *data, int *val, int *val2)
+{
+ int ret;
+ unsigned int odr, index;
+
+ ret = regmap_read(data->regmap, HX9023S_PRF_CFG, &index);
+ if (ret)
+ return ret;
+
+ odr = hx9023s_samp_freq_table[index];
+ *val = KILO / odr;
+ *val2 = div_u64((KILO % odr) * MICRO, odr);
+
+ return IIO_VAL_INT_PLUS_MICRO;
+}
+
+static int hx9023s_read_raw(struct iio_dev *indio_dev,
+ const struct iio_chan_spec *chan,
+ int *val, int *val2, long mask)
+{
+ struct hx9023s_data *data = iio_priv(indio_dev);
+ int ret;
+
+ if (chan->type != IIO_PROXIMITY)
+ return -EINVAL;
+
+ switch (mask) {
+ case IIO_CHAN_INFO_RAW:
+ ret = iio_device_claim_direct_mode(indio_dev);
+ if (ret)
+ return ret;
+
+ ret = hx9023s_get_proximity(data, chan, val);
+ iio_device_release_direct_mode(indio_dev);
+ return ret;
+ case IIO_CHAN_INFO_SAMP_FREQ:
+ return hx9023s_get_samp_freq(data, val, val2);
+ default:
+ return -EINVAL;
+ }
+}
+
+static int hx9023s_set_samp_freq(struct hx9023s_data *data, int val, int val2)
+{
+ struct device *dev = regmap_get_device(data->regmap);
+ unsigned int i, period_ms;
+
+ period_ms = div_u64(NANO, (val * MEGA + val2));
+
+ for (i = 0; i < ARRAY_SIZE(hx9023s_samp_freq_table); i++) {
+ if (period_ms == hx9023s_samp_freq_table[i])
+ break;
+ }
+ if (i == ARRAY_SIZE(hx9023s_samp_freq_table)) {
+ dev_err(dev, "Period:%dms NOT found!\n", period_ms);
+ return -EINVAL;
+ }
+
+ return regmap_write(data->regmap, HX9023S_PRF_CFG, i);
+}
+
+static int hx9023s_write_raw(struct iio_dev *indio_dev,
+ const struct iio_chan_spec *chan,
+ int val, int val2, long mask)
+{
+ struct hx9023s_data *data = iio_priv(indio_dev);
+
+ if (chan->type != IIO_PROXIMITY)
+ return -EINVAL;
+
+ if (mask != IIO_CHAN_INFO_SAMP_FREQ)
+ return -EINVAL;
+
+ return hx9023s_set_samp_freq(data, val, val2);
+}
+
+static irqreturn_t hx9023s_irq_handler(int irq, void *private)
+{
+ struct iio_dev *indio_dev = private;
+ struct hx9023s_data *data = iio_priv(indio_dev);
+
+ if (data->trigger_enabled)
+ iio_trigger_poll(data->trig);
+
+ return IRQ_WAKE_THREAD;
+}
+
+static void hx9023s_push_events(struct iio_dev *indio_dev)
+{
+ struct hx9023s_data *data = iio_priv(indio_dev);
+ s64 timestamp = iio_get_time_ns(indio_dev);
+ unsigned long prox_changed;
+ unsigned int chan;
+ int ret;
+
+ ret = hx9023s_sample(data);
+ if (ret)
+ return;
+
+ ret = hx9023s_get_prox_state(data);
+ if (ret)
+ return;
+
+ prox_changed = (data->chan_prox_stat ^ data->prox_state_reg) & data->chan_event;
+ for_each_set_bit(chan, &prox_changed, HX9023S_CH_NUM) {
+ unsigned int dir;
+
+ dir = (data->prox_state_reg & BIT(chan)) ?
+ IIO_EV_DIR_FALLING : IIO_EV_DIR_RISING;
+
+ iio_push_event(indio_dev,
+ IIO_UNMOD_EVENT_CODE(IIO_PROXIMITY, chan,
+ IIO_EV_TYPE_THRESH, dir),
+ timestamp);
+ }
+ data->chan_prox_stat = data->prox_state_reg;
+}
+
+static irqreturn_t hx9023s_irq_thread_handler(int irq, void *private)
+{
+ struct iio_dev *indio_dev = private;
+ struct hx9023s_data *data = iio_priv(indio_dev);
+
+ guard(mutex)(&data->mutex);
+ hx9023s_push_events(indio_dev);
+
+ return IRQ_HANDLED;
+}
+
+static int hx9023s_read_event_val(struct iio_dev *indio_dev,
+ const struct iio_chan_spec *chan,
+ enum iio_event_type type,
+ enum iio_event_direction dir,
+ enum iio_event_info info, int *val, int *val2)
+{
+ struct hx9023s_data *data = iio_priv(indio_dev);
+
+ if (chan->type != IIO_PROXIMITY)
+ return -EINVAL;
+
+ switch (info) {
+ case IIO_EV_INFO_VALUE:
+ switch (dir) {
+ case IIO_EV_DIR_RISING:
+ return hx9023s_get_thres_far(data, chan->channel, val);
+ case IIO_EV_DIR_FALLING:
+ return hx9023s_get_thres_near(data, chan->channel, val);
+ default:
+ return -EINVAL;
+ }
+ case IIO_EV_INFO_PERIOD:
+ switch (dir) {
+ case IIO_EV_DIR_RISING:
+ return hx9023s_read_far_debounce(data, val);
+ case IIO_EV_DIR_FALLING:
+ return hx9023s_read_near_debounce(data, val);
+ default:
+ return -EINVAL;
+ }
+ default:
+ return -EINVAL;
+ }
+}
+
+static int hx9023s_write_event_val(struct iio_dev *indio_dev,
+ const struct iio_chan_spec *chan,
+ enum iio_event_type type,
+ enum iio_event_direction dir,
+ enum iio_event_info info, int val, int val2)
+{
+ struct hx9023s_data *data = iio_priv(indio_dev);
+
+ if (chan->type != IIO_PROXIMITY)
+ return -EINVAL;
+
+ switch (info) {
+ case IIO_EV_INFO_VALUE:
+ switch (dir) {
+ case IIO_EV_DIR_RISING:
+ return hx9023s_set_thres_far(data, chan->channel, val);
+ case IIO_EV_DIR_FALLING:
+ return hx9023s_set_thres_near(data, chan->channel, val);
+ default:
+ return -EINVAL;
+ }
+ case IIO_EV_INFO_PERIOD:
+ switch (dir) {
+ case IIO_EV_DIR_RISING:
+ return hx9023s_write_far_debounce(data, val);
+ case IIO_EV_DIR_FALLING:
+ return hx9023s_write_near_debounce(data, val);
+ default:
+ return -EINVAL;
+ }
+ default:
+ return -EINVAL;
+ }
+}
+
+static int hx9023s_read_event_config(struct iio_dev *indio_dev,
+ const struct iio_chan_spec *chan,
+ enum iio_event_type type,
+ enum iio_event_direction dir)
+{
+ struct hx9023s_data *data = iio_priv(indio_dev);
+
+ return test_bit(chan->channel, &data->chan_event);
+}
+
+static int hx9023s_write_event_config(struct iio_dev *indio_dev,
+ const struct iio_chan_spec *chan,
+ enum iio_event_type type,
+ enum iio_event_direction dir,
+ int state)
+{
+ struct hx9023s_data *data = iio_priv(indio_dev);
+
+ if (test_bit(chan->channel, &data->chan_in_use)) {
+ hx9023s_ch_en(data, chan->channel, !!state);
+ __assign_bit(chan->channel, &data->chan_event,
+ data->ch_data[chan->channel].enable);
+ }
+
+ return 0;
+}
+
+static const struct iio_info hx9023s_info = {
+ .read_raw = hx9023s_read_raw,
+ .write_raw = hx9023s_write_raw,
+ .read_event_value = hx9023s_read_event_val,
+ .write_event_value = hx9023s_write_event_val,
+ .read_event_config = hx9023s_read_event_config,
+ .write_event_config = hx9023s_write_event_config,
+};
+
+static int hx9023s_set_trigger_state(struct iio_trigger *trig, bool state)
+{
+ struct iio_dev *indio_dev = iio_trigger_get_drvdata(trig);
+ struct hx9023s_data *data = iio_priv(indio_dev);
+
+ guard(mutex)(&data->mutex);
+ if (state)
+ hx9023s_interrupt_enable(data);
+ else if (!data->chan_read)
+ hx9023s_interrupt_disable(data);
+ data->trigger_enabled = state;
+
+ return 0;
+}
+
+static const struct iio_trigger_ops hx9023s_trigger_ops = {
+ .set_trigger_state = hx9023s_set_trigger_state,
+};
+
+static irqreturn_t hx9023s_trigger_handler(int irq, void *private)
+{
+ struct iio_poll_func *pf = private;
+ struct iio_dev *indio_dev = pf->indio_dev;
+ struct hx9023s_data *data = iio_priv(indio_dev);
+ struct device *dev = regmap_get_device(data->regmap);
+ unsigned int bit, index, i = 0;
+ int ret;
+
+ guard(mutex)(&data->mutex);
+ ret = hx9023s_sample(data);
+ if (ret) {
+ dev_warn(dev, "sampling failed\n");
+ goto out;
+ }
+
+ ret = hx9023s_get_prox_state(data);
+ if (ret) {
+ dev_warn(dev, "get prox failed\n");
+ goto out;
+ }
+
+ iio_for_each_active_channel(indio_dev, bit) {
+ index = indio_dev->channels[bit].channel;
+ data->buffer.channels[i++] = cpu_to_le16(data->ch_data[index].diff);
+ }
+
+ iio_push_to_buffers_with_timestamp(indio_dev, &data->buffer,
+ pf->timestamp);
+
+out:
+ iio_trigger_notify_done(indio_dev->trig);
+
+ return IRQ_HANDLED;
+}
+
+static int hx9023s_buffer_preenable(struct iio_dev *indio_dev)
+{
+ struct hx9023s_data *data = iio_priv(indio_dev);
+ unsigned long channels = 0;
+ unsigned int bit;
+
+ guard(mutex)(&data->mutex);
+ iio_for_each_active_channel(indio_dev, bit)
+ __set_bit(indio_dev->channels[bit].channel, &channels);
+
+ hx9023s_update_chan_en(data, channels, data->chan_event);
+
+ return 0;
+}
+
+static int hx9023s_buffer_postdisable(struct iio_dev *indio_dev)
+{
+ struct hx9023s_data *data = iio_priv(indio_dev);
+
+ guard(mutex)(&data->mutex);
+ hx9023s_update_chan_en(data, 0, data->chan_event);
+
+ return 0;
+}
+
+static const struct iio_buffer_setup_ops hx9023s_buffer_setup_ops = {
+ .preenable = hx9023s_buffer_preenable,
+ .postdisable = hx9023s_buffer_postdisable,
+};
+
+static int hx9023s_id_check(struct iio_dev *indio_dev)
+{
+ struct hx9023s_data *data = iio_priv(indio_dev);
+ struct device *dev = regmap_get_device(data->regmap);
+ unsigned int id;
+ int ret;
+
+ ret = regmap_read(data->regmap, HX9023S_DEVICE_ID, &id);
+ if (ret)
+ return ret;
+
+ if (id != HX9023S_CHIP_ID)
+ dev_warn(dev, "Unexpected chip ID, assuming compatible\n");
+
+ return 0;
+}
+
+static int hx9023s_probe(struct i2c_client *client)
+{
+ struct device *dev = &client->dev;
+ struct iio_dev *indio_dev;
+ struct hx9023s_data *data;
+ int ret;
+
+ indio_dev = devm_iio_device_alloc(dev, sizeof(*data));
+ if (!indio_dev)
+ return -ENOMEM;
+
+ data = iio_priv(indio_dev);
+ mutex_init(&data->mutex);
+
+ data->regmap = devm_regmap_init_i2c(client, &hx9023s_regmap_config);
+ if (IS_ERR(data->regmap))
+ return dev_err_probe(dev, PTR_ERR(data->regmap),
+ "regmap init failed\n");
+
+ ret = hx9023s_property_get(data);
+ if (ret)
+ return dev_err_probe(dev, ret, "dts phase failed\n");
+
+ ret = devm_regulator_get_enable(dev, "vdd");
+ if (ret)
+ return dev_err_probe(dev, ret, "regulator get failed\n");
+
+ ret = hx9023s_id_check(indio_dev);
+ if (ret)
+ return dev_err_probe(dev, ret, "id check failed\n");
+
+ indio_dev->name = "hx9023s";
+ indio_dev->channels = hx9023s_channels;
+ indio_dev->num_channels = ARRAY_SIZE(hx9023s_channels);
+ indio_dev->info = &hx9023s_info;
+ indio_dev->modes = INDIO_DIRECT_MODE;
+ i2c_set_clientdata(client, indio_dev);
+
+ ret = regmap_multi_reg_write(data->regmap, hx9023s_reg_init_list,
+ ARRAY_SIZE(hx9023s_reg_init_list));
+ if (ret)
+ return dev_err_probe(dev, ret, "device init failed\n");
+
+ ret = hx9023s_ch_cfg(data);
+ if (ret)
+ return dev_err_probe(dev, ret, "channel config failed\n");
+
+ ret = regcache_sync(data->regmap);
+ if (ret)
+ return dev_err_probe(dev, ret, "regcache sync failed\n");
+
+ if (client->irq) {
+ ret = devm_request_threaded_irq(dev, client->irq,
+ hx9023s_irq_handler,
+ hx9023s_irq_thread_handler,
+ IRQF_ONESHOT,
+ "hx9023s_event", indio_dev);
+ if (ret)
+ return dev_err_probe(dev, ret, "irq request failed\n");
+
+ data->trig = devm_iio_trigger_alloc(dev, "%s-dev%d",
+ indio_dev->name,
+ iio_device_id(indio_dev));
+ if (!data->trig)
+ return dev_err_probe(dev, -ENOMEM,
+ "iio trigger alloc failed\n");
+
+ data->trig->ops = &hx9023s_trigger_ops;
+ iio_trigger_set_drvdata(data->trig, indio_dev);
+
+ ret = devm_iio_trigger_register(dev, data->trig);
+ if (ret)
+ return dev_err_probe(dev, ret,
+ "iio trigger register failed\n");
+ }
+
+ ret = devm_iio_triggered_buffer_setup(dev, indio_dev,
+ iio_pollfunc_store_time,
+ hx9023s_trigger_handler,
+ &hx9023s_buffer_setup_ops);
+ if (ret)
+ return dev_err_probe(dev, ret,
+ "iio triggered buffer setup failed\n");
+
+ return devm_iio_device_register(dev, indio_dev);
+}
+
+static int hx9023s_suspend(struct device *dev)
+{
+ struct hx9023s_data *data = iio_priv(dev_get_drvdata(dev));
+
+ guard(mutex)(&data->mutex);
+ hx9023s_interrupt_disable(data);
+
+ return 0;
+}
+
+static int hx9023s_resume(struct device *dev)
+{
+ struct hx9023s_data *data = iio_priv(dev_get_drvdata(dev));
+
+ guard(mutex)(&data->mutex);
+ if (data->trigger_enabled)
+ hx9023s_interrupt_enable(data);
+
+ return 0;
+}
+
+static DEFINE_SIMPLE_DEV_PM_OPS(hx9023s_pm_ops, hx9023s_suspend,
+ hx9023s_resume);
+
+static const struct of_device_id hx9023s_of_match[] = {
+ { .compatible = "tyhx,hx9023s" },
+ {}
+};
+MODULE_DEVICE_TABLE(of, hx9023s_of_match);
+
+static const struct i2c_device_id hx9023s_id[] = {
+ { "hx9023s" },
+ {}
+};
+MODULE_DEVICE_TABLE(i2c, hx9023s_id);
+
+static struct i2c_driver hx9023s_driver = {
+ .driver = {
+ .name = "hx9023s",
+ .of_match_table = hx9023s_of_match,
+ .pm = &hx9023s_pm_ops,
+
+ /*
+ * The I2C operations in hx9023s_reg_init() and hx9023s_ch_cfg()
+ * are time-consuming. Prefer async so we don't delay boot
+ * if we're builtin to the kernel.
+ */
+ .probe_type = PROBE_PREFER_ASYNCHRONOUS,
+ },
+ .probe = hx9023s_probe,
+ .id_table = hx9023s_id,
+};
+module_i2c_driver(hx9023s_driver);
+
+MODULE_AUTHOR("Yasin Lee <yasin.lee.x@gmail.com>");
+MODULE_DESCRIPTION("Driver for TYHX HX9023S SAR sensor");
+MODULE_LICENSE("GPL");
diff --git a/drivers/iio/proximity/sx9500.c b/drivers/iio/proximity/sx9500.c
index 92630812ece2..3f4eace05cfc 100644
--- a/drivers/iio/proximity/sx9500.c
+++ b/drivers/iio/proximity/sx9500.c
@@ -654,8 +654,7 @@ static irqreturn_t sx9500_trigger_handler(int irq, void *private)
mutex_lock(&data->mutex);
- for_each_set_bit(bit, indio_dev->active_scan_mask,
- indio_dev->masklength) {
+ iio_for_each_active_channel(indio_dev, bit) {
ret = sx9500_read_prox_data(data, &indio_dev->channels[bit],
&val);
if (ret < 0)
diff --git a/drivers/iio/proximity/sx_common.c b/drivers/iio/proximity/sx_common.c
index a95e9814aaf2..71aa6dced7d3 100644
--- a/drivers/iio/proximity/sx_common.c
+++ b/drivers/iio/proximity/sx_common.c
@@ -369,8 +369,7 @@ static irqreturn_t sx_common_trigger_handler(int irq, void *private)
mutex_lock(&data->mutex);
- for_each_set_bit(bit, indio_dev->active_scan_mask,
- indio_dev->masklength) {
+ iio_for_each_active_channel(indio_dev, bit) {
ret = data->chip_info->ops.read_prox_data(data,
&indio_dev->channels[bit],
&val);
@@ -398,8 +397,7 @@ static int sx_common_buffer_preenable(struct iio_dev *indio_dev)
int bit, ret;
mutex_lock(&data->mutex);
- for_each_set_bit(bit, indio_dev->active_scan_mask,
- indio_dev->masklength)
+ iio_for_each_active_channel(indio_dev, bit)
__set_bit(indio_dev->channels[bit].channel, &channels);
ret = sx_common_update_chan_en(data, channels, data->chan_event);
diff --git a/drivers/infiniband/core/cache.c b/drivers/infiniband/core/cache.c
index 6791df64a5fe..b7c078b7f7cf 100644
--- a/drivers/infiniband/core/cache.c
+++ b/drivers/infiniband/core/cache.c
@@ -1640,8 +1640,10 @@ int ib_cache_setup_one(struct ib_device *device)
rdma_for_each_port (device, p) {
err = ib_cache_update(device, p, true, true, true);
- if (err)
+ if (err) {
+ gid_table_cleanup_one(device);
return err;
+ }
}
return 0;
diff --git a/drivers/infiniband/core/core_priv.h b/drivers/infiniband/core/core_priv.h
index dd7715ba9fd1..05102769a918 100644
--- a/drivers/infiniband/core/core_priv.h
+++ b/drivers/infiniband/core/core_priv.h
@@ -325,9 +325,6 @@ void ib_qp_usecnt_inc(struct ib_qp *qp);
void ib_qp_usecnt_dec(struct ib_qp *qp);
struct rdma_dev_addr;
-int rdma_resolve_ip_route(struct sockaddr *src_addr,
- const struct sockaddr *dst_addr,
- struct rdma_dev_addr *addr);
int rdma_addr_find_l2_eth_by_grh(const union ib_gid *sgid,
const union ib_gid *dgid,
diff --git a/drivers/infiniband/core/device.c b/drivers/infiniband/core/device.c
index 0290aca18d26..e029401b5680 100644
--- a/drivers/infiniband/core/device.c
+++ b/drivers/infiniband/core/device.c
@@ -1351,6 +1351,29 @@ static void prevent_dealloc_device(struct ib_device *ib_dev)
{
}
+static void ib_device_notify_register(struct ib_device *device)
+{
+ struct net_device *netdev;
+ u32 port;
+ int ret;
+
+ ret = rdma_nl_notify_event(device, 0, RDMA_REGISTER_EVENT);
+ if (ret)
+ return;
+
+ rdma_for_each_port(device, port) {
+ netdev = ib_device_get_netdev(device, port);
+ if (!netdev)
+ continue;
+
+ ret = rdma_nl_notify_event(device, port,
+ RDMA_NETDEV_ATTACH_EVENT);
+ dev_put(netdev);
+ if (ret)
+ return;
+ }
+}
+
/**
* ib_register_device - Register an IB device with IB core
* @device: Device to register
@@ -1449,6 +1472,8 @@ int ib_register_device(struct ib_device *device, const char *name,
dev_set_uevent_suppress(&device->dev, false);
/* Mark for userspace that device is ready */
kobject_uevent(&device->dev.kobj, KOBJ_ADD);
+
+ ib_device_notify_register(device);
ib_device_put(device);
return 0;
@@ -1491,6 +1516,7 @@ static void __ib_unregister_device(struct ib_device *ib_dev)
goto out;
disable_device(ib_dev);
+ rdma_nl_notify_event(ib_dev, 0, RDMA_UNREGISTER_EVENT);
/* Expedite removing unregistered pointers from the hash table */
free_netdevs(ib_dev);
@@ -2159,6 +2185,7 @@ static void add_ndev_hash(struct ib_port_data *pdata)
int ib_device_set_netdev(struct ib_device *ib_dev, struct net_device *ndev,
u32 port)
{
+ enum rdma_nl_notify_event_type etype;
struct net_device *old_ndev;
struct ib_port_data *pdata;
unsigned long flags;
@@ -2190,6 +2217,14 @@ int ib_device_set_netdev(struct ib_device *ib_dev, struct net_device *ndev,
spin_unlock_irqrestore(&pdata->netdev_lock, flags);
add_ndev_hash(pdata);
+
+ /* Make sure that the device is registered before we send events */
+ if (xa_load(&devices, ib_dev->index) != ib_dev)
+ return 0;
+
+ etype = ndev ? RDMA_NETDEV_ATTACH_EVENT : RDMA_NETDEV_DETACH_EVENT;
+ rdma_nl_notify_event(ib_dev, port, etype);
+
return 0;
}
EXPORT_SYMBOL(ib_device_set_netdev);
@@ -2236,6 +2271,9 @@ struct net_device *ib_device_get_netdev(struct ib_device *ib_dev,
if (!rdma_is_port_valid(ib_dev, port))
return NULL;
+ if (!ib_dev->port_data)
+ return NULL;
+
pdata = &ib_dev->port_data[port];
/*
@@ -2252,17 +2290,9 @@ struct net_device *ib_device_get_netdev(struct ib_device *ib_dev,
spin_unlock(&pdata->netdev_lock);
}
- /*
- * If we are starting to unregister expedite things by preventing
- * propagation of an unregistering netdev.
- */
- if (res && res->reg_state != NETREG_REGISTERED) {
- dev_put(res);
- return NULL;
- }
-
return res;
}
+EXPORT_SYMBOL(ib_device_get_netdev);
/**
* ib_device_get_by_netdev - Find an IB device associated with a netdev
diff --git a/drivers/infiniband/core/iwcm.c b/drivers/infiniband/core/iwcm.c
index 1a6339f3a63f..7e3a55349e10 100644
--- a/drivers/infiniband/core/iwcm.c
+++ b/drivers/infiniband/core/iwcm.c
@@ -1182,7 +1182,7 @@ static int __init iw_cm_init(void)
if (ret)
return ret;
- iwcm_wq = alloc_ordered_workqueue("iw_cm_wq", 0);
+ iwcm_wq = alloc_ordered_workqueue("iw_cm_wq", WQ_MEM_RECLAIM);
if (!iwcm_wq)
goto err_alloc;
diff --git a/drivers/infiniband/core/mad.c b/drivers/infiniband/core/mad.c
index 7439e47ff951..1fd54d5c4dd8 100644
--- a/drivers/infiniband/core/mad.c
+++ b/drivers/infiniband/core/mad.c
@@ -2616,14 +2616,16 @@ static int retry_send(struct ib_mad_send_wr_private *mad_send_wr)
static void timeout_sends(struct work_struct *work)
{
+ struct ib_mad_send_wr_private *mad_send_wr, *n;
struct ib_mad_agent_private *mad_agent_priv;
- struct ib_mad_send_wr_private *mad_send_wr;
struct ib_mad_send_wc mad_send_wc;
+ struct list_head local_list;
unsigned long flags, delay;
mad_agent_priv = container_of(work, struct ib_mad_agent_private,
timed_work.work);
mad_send_wc.vendor_err = 0;
+ INIT_LIST_HEAD(&local_list);
spin_lock_irqsave(&mad_agent_priv->lock, flags);
while (!list_empty(&mad_agent_priv->wait_list)) {
@@ -2641,13 +2643,16 @@ static void timeout_sends(struct work_struct *work)
break;
}
- list_del(&mad_send_wr->agent_list);
+ list_del_init(&mad_send_wr->agent_list);
if (mad_send_wr->status == IB_WC_SUCCESS &&
!retry_send(mad_send_wr))
continue;
- spin_unlock_irqrestore(&mad_agent_priv->lock, flags);
+ list_add_tail(&mad_send_wr->agent_list, &local_list);
+ }
+ spin_unlock_irqrestore(&mad_agent_priv->lock, flags);
+ list_for_each_entry_safe(mad_send_wr, n, &local_list, agent_list) {
if (mad_send_wr->status == IB_WC_SUCCESS)
mad_send_wc.status = IB_WC_RESP_TIMEOUT_ERR;
else
@@ -2655,11 +2660,8 @@ static void timeout_sends(struct work_struct *work)
mad_send_wc.send_buf = &mad_send_wr->send_buf;
mad_agent_priv->agent.send_handler(&mad_agent_priv->agent,
&mad_send_wc);
-
deref_mad_agent(mad_agent_priv);
- spin_lock_irqsave(&mad_agent_priv->lock, flags);
}
- spin_unlock_irqrestore(&mad_agent_priv->lock, flags);
}
/*
@@ -2937,7 +2939,6 @@ static int ib_mad_port_open(struct ib_device *device,
int ret, cq_size;
struct ib_mad_port_private *port_priv;
unsigned long flags;
- char name[sizeof "ib_mad123"];
int has_smi;
if (WARN_ON(rdma_max_mad_size(device, port_num) < IB_MGMT_MAD_SIZE))
@@ -2990,8 +2991,8 @@ static int ib_mad_port_open(struct ib_device *device,
goto error7;
}
- snprintf(name, sizeof(name), "ib_mad%u", port_num);
- port_priv->wq = alloc_ordered_workqueue(name, WQ_MEM_RECLAIM);
+ port_priv->wq = alloc_ordered_workqueue("ib_mad%u", WQ_MEM_RECLAIM,
+ port_num);
if (!port_priv->wq) {
ret = -ENOMEM;
goto error8;
diff --git a/drivers/infiniband/core/netlink.c b/drivers/infiniband/core/netlink.c
index ae2db0c70788..def14c54b648 100644
--- a/drivers/infiniband/core/netlink.c
+++ b/drivers/infiniband/core/netlink.c
@@ -311,6 +311,7 @@ int rdma_nl_net_init(struct rdma_dev_net *rnet)
struct net *net = read_pnet(&rnet->net);
struct netlink_kernel_cfg cfg = {
.input = rdma_nl_rcv,
+ .flags = NL_CFG_F_NONROOT_RECV,
};
struct sock *nls;
diff --git a/drivers/infiniband/core/nldev.c b/drivers/infiniband/core/nldev.c
index a6b80cdc96f7..39f89a4b8649 100644
--- a/drivers/infiniband/core/nldev.c
+++ b/drivers/infiniband/core/nldev.c
@@ -170,6 +170,7 @@ static const struct nla_policy nldev_policy[RDMA_NLDEV_ATTR_MAX] = {
[RDMA_NLDEV_ATTR_DEV_TYPE] = { .type = NLA_U8 },
[RDMA_NLDEV_ATTR_PARENT_NAME] = { .type = NLA_NUL_STRING },
[RDMA_NLDEV_ATTR_NAME_ASSIGN_TYPE] = { .type = NLA_U8 },
+ [RDMA_NLDEV_ATTR_EVENT_TYPE] = { .type = NLA_U8 },
};
static int put_driver_name_print_type(struct sk_buff *msg, const char *name,
@@ -1074,8 +1075,8 @@ static int nldev_get_doit(struct sk_buff *skb, struct nlmsghdr *nlh,
u32 index;
int err;
- err = nlmsg_parse_deprecated(nlh, 0, tb, RDMA_NLDEV_ATTR_MAX - 1,
- nldev_policy, extack);
+ err = __nlmsg_parse(nlh, 0, tb, RDMA_NLDEV_ATTR_MAX - 1,
+ nldev_policy, NL_VALIDATE_LIBERAL, extack);
if (err || !tb[RDMA_NLDEV_ATTR_DEV_INDEX])
return -EINVAL;
@@ -1123,8 +1124,8 @@ static int nldev_set_doit(struct sk_buff *skb, struct nlmsghdr *nlh,
u32 index;
int err;
- err = nlmsg_parse_deprecated(nlh, 0, tb, RDMA_NLDEV_ATTR_MAX - 1,
- nldev_policy, extack);
+ err = nlmsg_parse(nlh, 0, tb, RDMA_NLDEV_ATTR_MAX - 1,
+ nldev_policy, extack);
if (err || !tb[RDMA_NLDEV_ATTR_DEV_INDEX])
return -EINVAL;
@@ -1215,8 +1216,8 @@ static int nldev_port_get_doit(struct sk_buff *skb, struct nlmsghdr *nlh,
u32 port;
int err;
- err = nlmsg_parse_deprecated(nlh, 0, tb, RDMA_NLDEV_ATTR_MAX - 1,
- nldev_policy, extack);
+ err = __nlmsg_parse(nlh, 0, tb, RDMA_NLDEV_ATTR_MAX - 1,
+ nldev_policy, NL_VALIDATE_LIBERAL, extack);
if (err ||
!tb[RDMA_NLDEV_ATTR_DEV_INDEX] ||
!tb[RDMA_NLDEV_ATTR_PORT_INDEX])
@@ -1275,8 +1276,8 @@ static int nldev_port_get_dumpit(struct sk_buff *skb,
int err;
unsigned int p;
- err = nlmsg_parse_deprecated(cb->nlh, 0, tb, RDMA_NLDEV_ATTR_MAX - 1,
- nldev_policy, NULL);
+ err = __nlmsg_parse(cb->nlh, 0, tb, RDMA_NLDEV_ATTR_MAX - 1,
+ nldev_policy, NL_VALIDATE_LIBERAL, NULL);
if (err || !tb[RDMA_NLDEV_ATTR_DEV_INDEX])
return -EINVAL;
@@ -1331,8 +1332,8 @@ static int nldev_res_get_doit(struct sk_buff *skb, struct nlmsghdr *nlh,
u32 index;
int ret;
- ret = nlmsg_parse_deprecated(nlh, 0, tb, RDMA_NLDEV_ATTR_MAX - 1,
- nldev_policy, extack);
+ ret = __nlmsg_parse(nlh, 0, tb, RDMA_NLDEV_ATTR_MAX - 1,
+ nldev_policy, NL_VALIDATE_LIBERAL, extack);
if (ret || !tb[RDMA_NLDEV_ATTR_DEV_INDEX])
return -EINVAL;
@@ -1481,8 +1482,8 @@ static int res_get_common_doit(struct sk_buff *skb, struct nlmsghdr *nlh,
struct sk_buff *msg;
int ret;
- ret = nlmsg_parse_deprecated(nlh, 0, tb, RDMA_NLDEV_ATTR_MAX - 1,
- nldev_policy, extack);
+ ret = __nlmsg_parse(nlh, 0, tb, RDMA_NLDEV_ATTR_MAX - 1,
+ nldev_policy, NL_VALIDATE_LIBERAL, extack);
if (ret || !tb[RDMA_NLDEV_ATTR_DEV_INDEX] || !fe->id || !tb[fe->id])
return -EINVAL;
@@ -1569,8 +1570,8 @@ static int res_get_common_dumpit(struct sk_buff *skb,
u32 index, port = 0;
bool filled = false;
- err = nlmsg_parse_deprecated(cb->nlh, 0, tb, RDMA_NLDEV_ATTR_MAX - 1,
- nldev_policy, NULL);
+ err = __nlmsg_parse(cb->nlh, 0, tb, RDMA_NLDEV_ATTR_MAX - 1,
+ nldev_policy, NL_VALIDATE_LIBERAL, NULL);
/*
* Right now, we are expecting the device index to get res information,
* but it is possible to extend this code to return all devices in
@@ -1762,8 +1763,8 @@ static int nldev_newlink(struct sk_buff *skb, struct nlmsghdr *nlh,
char type[IFNAMSIZ];
int err;
- err = nlmsg_parse_deprecated(nlh, 0, tb, RDMA_NLDEV_ATTR_MAX - 1,
- nldev_policy, extack);
+ err = nlmsg_parse(nlh, 0, tb, RDMA_NLDEV_ATTR_MAX - 1,
+ nldev_policy, extack);
if (err || !tb[RDMA_NLDEV_ATTR_DEV_NAME] ||
!tb[RDMA_NLDEV_ATTR_LINK_TYPE] || !tb[RDMA_NLDEV_ATTR_NDEV_NAME])
return -EINVAL;
@@ -1806,8 +1807,8 @@ static int nldev_dellink(struct sk_buff *skb, struct nlmsghdr *nlh,
u32 index;
int err;
- err = nlmsg_parse_deprecated(nlh, 0, tb, RDMA_NLDEV_ATTR_MAX - 1,
- nldev_policy, extack);
+ err = nlmsg_parse(nlh, 0, tb, RDMA_NLDEV_ATTR_MAX - 1,
+ nldev_policy, extack);
if (err || !tb[RDMA_NLDEV_ATTR_DEV_INDEX])
return -EINVAL;
@@ -1836,8 +1837,8 @@ static int nldev_get_chardev(struct sk_buff *skb, struct nlmsghdr *nlh,
u32 index;
int err;
- err = nlmsg_parse(nlh, 0, tb, RDMA_NLDEV_ATTR_MAX - 1, nldev_policy,
- extack);
+ err = __nlmsg_parse(nlh, 0, tb, RDMA_NLDEV_ATTR_MAX - 1, nldev_policy,
+ NL_VALIDATE_LIBERAL, extack);
if (err || !tb[RDMA_NLDEV_ATTR_CHARDEV_TYPE])
return -EINVAL;
@@ -1920,8 +1921,8 @@ static int nldev_sys_get_doit(struct sk_buff *skb, struct nlmsghdr *nlh,
struct sk_buff *msg;
int err;
- err = nlmsg_parse(nlh, 0, tb, RDMA_NLDEV_ATTR_MAX - 1,
- nldev_policy, extack);
+ err = __nlmsg_parse(nlh, 0, tb, RDMA_NLDEV_ATTR_MAX - 1,
+ nldev_policy, NL_VALIDATE_LIBERAL, extack);
if (err)
return err;
@@ -1951,6 +1952,12 @@ static int nldev_sys_get_doit(struct sk_buff *skb, struct nlmsghdr *nlh,
nlmsg_free(msg);
return err;
}
+
+ err = nla_put_u8(msg, RDMA_NLDEV_SYS_ATTR_MONITOR_MODE, 1);
+ if (err) {
+ nlmsg_free(msg);
+ return err;
+ }
/*
* Copy-on-fork is supported.
* See commits:
@@ -2420,8 +2427,8 @@ static int nldev_stat_get_doit(struct sk_buff *skb, struct nlmsghdr *nlh,
struct nlattr *tb[RDMA_NLDEV_ATTR_MAX];
int ret;
- ret = nlmsg_parse(nlh, 0, tb, RDMA_NLDEV_ATTR_MAX - 1,
- nldev_policy, extack);
+ ret = __nlmsg_parse(nlh, 0, tb, RDMA_NLDEV_ATTR_MAX - 1,
+ nldev_policy, NL_VALIDATE_LIBERAL, extack);
if (ret)
return -EINVAL;
@@ -2450,8 +2457,8 @@ static int nldev_stat_get_dumpit(struct sk_buff *skb,
struct nlattr *tb[RDMA_NLDEV_ATTR_MAX];
int ret;
- ret = nlmsg_parse(cb->nlh, 0, tb, RDMA_NLDEV_ATTR_MAX - 1,
- nldev_policy, NULL);
+ ret = __nlmsg_parse(cb->nlh, 0, tb, RDMA_NLDEV_ATTR_MAX - 1,
+ nldev_policy, NL_VALIDATE_LIBERAL, NULL);
if (ret || !tb[RDMA_NLDEV_ATTR_STAT_RES])
return -EINVAL;
@@ -2482,8 +2489,8 @@ static int nldev_stat_get_counter_status_doit(struct sk_buff *skb,
u32 devid, port;
int ret, i;
- ret = nlmsg_parse(nlh, 0, tb, RDMA_NLDEV_ATTR_MAX - 1,
- nldev_policy, extack);
+ ret = __nlmsg_parse(nlh, 0, tb, RDMA_NLDEV_ATTR_MAX - 1,
+ nldev_policy, NL_VALIDATE_LIBERAL, extack);
if (ret || !tb[RDMA_NLDEV_ATTR_DEV_INDEX] ||
!tb[RDMA_NLDEV_ATTR_PORT_INDEX])
return -EINVAL;
@@ -2722,6 +2729,130 @@ static const struct rdma_nl_cbs nldev_cb_table[RDMA_NLDEV_NUM_OPS] = {
},
};
+static int fill_mon_netdev_association(struct sk_buff *msg,
+ struct ib_device *device, u32 port,
+ const struct net *net)
+{
+ struct net_device *netdev = ib_device_get_netdev(device, port);
+ int ret = 0;
+
+ if (netdev && !net_eq(dev_net(netdev), net))
+ goto out;
+
+ ret = nla_put_u32(msg, RDMA_NLDEV_ATTR_DEV_INDEX, device->index);
+ if (ret)
+ goto out;
+
+ ret = nla_put_string(msg, RDMA_NLDEV_ATTR_DEV_NAME,
+ dev_name(&device->dev));
+ if (ret)
+ goto out;
+
+ ret = nla_put_u32(msg, RDMA_NLDEV_ATTR_PORT_INDEX, port);
+ if (ret)
+ goto out;
+
+ if (netdev) {
+ ret = nla_put_u32(msg,
+ RDMA_NLDEV_ATTR_NDEV_INDEX, netdev->ifindex);
+ if (ret)
+ goto out;
+
+ ret = nla_put_string(msg,
+ RDMA_NLDEV_ATTR_NDEV_NAME, netdev->name);
+ }
+
+out:
+ dev_put(netdev);
+ return ret;
+}
+
+static void rdma_nl_notify_err_msg(struct ib_device *device, u32 port_num,
+ enum rdma_nl_notify_event_type type)
+{
+ struct net_device *netdev;
+
+ switch (type) {
+ case RDMA_REGISTER_EVENT:
+ dev_warn_ratelimited(&device->dev,
+ "Failed to send RDMA monitor register device event\n");
+ break;
+ case RDMA_UNREGISTER_EVENT:
+ dev_warn_ratelimited(&device->dev,
+ "Failed to send RDMA monitor unregister device event\n");
+ break;
+ case RDMA_NETDEV_ATTACH_EVENT:
+ netdev = ib_device_get_netdev(device, port_num);
+ dev_warn_ratelimited(&device->dev,
+ "Failed to send RDMA monitor netdev attach event: port %d netdev %d\n",
+ port_num, netdev->ifindex);
+ dev_put(netdev);
+ break;
+ case RDMA_NETDEV_DETACH_EVENT:
+ dev_warn_ratelimited(&device->dev,
+ "Failed to send RDMA monitor netdev detach event: port %d\n",
+ port_num);
+ break;
+ default:
+ break;
+ }
+}
+
+int rdma_nl_notify_event(struct ib_device *device, u32 port_num,
+ enum rdma_nl_notify_event_type type)
+{
+ struct sk_buff *skb;
+ struct net *net;
+ int ret = 0;
+ void *nlh;
+
+ net = read_pnet(&device->coredev.rdma_net);
+ if (!net)
+ return -EINVAL;
+
+ skb = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
+ if (!skb)
+ return -ENOMEM;
+ nlh = nlmsg_put(skb, 0, 0,
+ RDMA_NL_GET_TYPE(RDMA_NL_NLDEV, RDMA_NLDEV_CMD_MONITOR),
+ 0, 0);
+
+ switch (type) {
+ case RDMA_REGISTER_EVENT:
+ case RDMA_UNREGISTER_EVENT:
+ ret = fill_nldev_handle(skb, device);
+ if (ret)
+ goto err_free;
+ break;
+ case RDMA_NETDEV_ATTACH_EVENT:
+ case RDMA_NETDEV_DETACH_EVENT:
+ ret = fill_mon_netdev_association(skb, device,
+ port_num, net);
+ if (ret)
+ goto err_free;
+ break;
+ default:
+ break;
+ }
+
+ ret = nla_put_u8(skb, RDMA_NLDEV_ATTR_EVENT_TYPE, type);
+ if (ret)
+ goto err_free;
+
+ nlmsg_end(skb, nlh);
+ ret = rdma_nl_multicast(net, skb, RDMA_NL_GROUP_NOTIFY, GFP_KERNEL);
+ if (ret && ret != -ESRCH) {
+ skb = NULL; /* skb is freed in the netlink send-op handling */
+ goto err_free;
+ }
+ return 0;
+
+err_free:
+ rdma_nl_notify_err_msg(device, port_num, type);
+ nlmsg_free(skb);
+ return ret;
+}
+
void __init nldev_init(void)
{
rdma_nl_register(RDMA_NL_NLDEV, nldev_cb_table);
diff --git a/drivers/infiniband/core/sa_query.c b/drivers/infiniband/core/sa_query.c
index 8175dde60b0a..53571e6b3162 100644
--- a/drivers/infiniband/core/sa_query.c
+++ b/drivers/infiniband/core/sa_query.c
@@ -1420,7 +1420,7 @@ enum opa_pr_supported {
/*
* opa_pr_query_possible - Check if current PR query can be an OPA query.
*
- * Retuns PR_NOT_SUPPORTED if a path record query is not
+ * Returns PR_NOT_SUPPORTED if a path record query is not
* possible, PR_OPA_SUPPORTED if an OPA path record query
* is possible and PR_IB_SUPPORTED if an IB path record
* query is possible.
diff --git a/drivers/infiniband/core/ucma.c b/drivers/infiniband/core/ucma.c
index 5f5ad8faf86e..5dbb248e9625 100644
--- a/drivers/infiniband/core/ucma.c
+++ b/drivers/infiniband/core/ucma.c
@@ -1624,13 +1624,13 @@ static ssize_t ucma_migrate_id(struct ucma_file *new_file,
/* Get current fd to protect against it being closed */
f = fdget(cmd.fd);
- if (!f.file)
+ if (!fd_file(f))
return -ENOENT;
- if (f.file->f_op != &ucma_fops) {
+ if (fd_file(f)->f_op != &ucma_fops) {
ret = -EINVAL;
goto file_put;
}
- cur_file = f.file->private_data;
+ cur_file = fd_file(f)->private_data;
/* Validate current fd and prevent destruction of id. */
ctx = ucma_get_ctx(cur_file, cmd.id);
@@ -1817,7 +1817,6 @@ static const struct file_operations ucma_fops = {
.release = ucma_close,
.write = ucma_write,
.poll = ucma_poll,
- .llseek = no_llseek,
};
static struct miscdevice ucma_misc = {
diff --git a/drivers/infiniband/core/umem_dmabuf.c b/drivers/infiniband/core/umem_dmabuf.c
index 39357dc2d229..9fcd37761264 100644
--- a/drivers/infiniband/core/umem_dmabuf.c
+++ b/drivers/infiniband/core/umem_dmabuf.c
@@ -23,6 +23,9 @@ int ib_umem_dmabuf_map_pages(struct ib_umem_dmabuf *umem_dmabuf)
dma_resv_assert_held(umem_dmabuf->attach->dmabuf->resv);
+ if (umem_dmabuf->revoked)
+ return -EINVAL;
+
if (umem_dmabuf->sgt)
goto wait_fence;
@@ -110,10 +113,12 @@ void ib_umem_dmabuf_unmap_pages(struct ib_umem_dmabuf *umem_dmabuf)
}
EXPORT_SYMBOL(ib_umem_dmabuf_unmap_pages);
-struct ib_umem_dmabuf *ib_umem_dmabuf_get(struct ib_device *device,
- unsigned long offset, size_t size,
- int fd, int access,
- const struct dma_buf_attach_ops *ops)
+static struct ib_umem_dmabuf *
+ib_umem_dmabuf_get_with_dma_device(struct ib_device *device,
+ struct device *dma_device,
+ unsigned long offset, size_t size,
+ int fd, int access,
+ const struct dma_buf_attach_ops *ops)
{
struct dma_buf *dmabuf;
struct ib_umem_dmabuf *umem_dmabuf;
@@ -152,7 +157,7 @@ struct ib_umem_dmabuf *ib_umem_dmabuf_get(struct ib_device *device,
umem_dmabuf->attach = dma_buf_dynamic_attach(
dmabuf,
- device->dma_device,
+ dma_device,
ops,
umem_dmabuf);
if (IS_ERR(umem_dmabuf->attach)) {
@@ -168,6 +173,15 @@ out_release_dmabuf:
dma_buf_put(dmabuf);
return ret;
}
+
+struct ib_umem_dmabuf *ib_umem_dmabuf_get(struct ib_device *device,
+ unsigned long offset, size_t size,
+ int fd, int access,
+ const struct dma_buf_attach_ops *ops)
+{
+ return ib_umem_dmabuf_get_with_dma_device(device, device->dma_device,
+ offset, size, fd, access, ops);
+}
EXPORT_SYMBOL(ib_umem_dmabuf_get);
static void
@@ -184,16 +198,18 @@ static struct dma_buf_attach_ops ib_umem_dmabuf_attach_pinned_ops = {
.move_notify = ib_umem_dmabuf_unsupported_move_notify,
};
-struct ib_umem_dmabuf *ib_umem_dmabuf_get_pinned(struct ib_device *device,
- unsigned long offset,
- size_t size, int fd,
- int access)
+struct ib_umem_dmabuf *
+ib_umem_dmabuf_get_pinned_with_dma_device(struct ib_device *device,
+ struct device *dma_device,
+ unsigned long offset, size_t size,
+ int fd, int access)
{
struct ib_umem_dmabuf *umem_dmabuf;
int err;
- umem_dmabuf = ib_umem_dmabuf_get(device, offset, size, fd, access,
- &ib_umem_dmabuf_attach_pinned_ops);
+ umem_dmabuf = ib_umem_dmabuf_get_with_dma_device(device, dma_device, offset,
+ size, fd, access,
+ &ib_umem_dmabuf_attach_pinned_ops);
if (IS_ERR(umem_dmabuf))
return umem_dmabuf;
@@ -217,17 +233,41 @@ err_release:
ib_umem_release(&umem_dmabuf->umem);
return ERR_PTR(err);
}
+EXPORT_SYMBOL(ib_umem_dmabuf_get_pinned_with_dma_device);
+
+struct ib_umem_dmabuf *ib_umem_dmabuf_get_pinned(struct ib_device *device,
+ unsigned long offset,
+ size_t size, int fd,
+ int access)
+{
+ return ib_umem_dmabuf_get_pinned_with_dma_device(device, device->dma_device,
+ offset, size, fd, access);
+}
EXPORT_SYMBOL(ib_umem_dmabuf_get_pinned);
-void ib_umem_dmabuf_release(struct ib_umem_dmabuf *umem_dmabuf)
+void ib_umem_dmabuf_revoke(struct ib_umem_dmabuf *umem_dmabuf)
{
struct dma_buf *dmabuf = umem_dmabuf->attach->dmabuf;
dma_resv_lock(dmabuf->resv, NULL);
+ if (umem_dmabuf->revoked)
+ goto end;
ib_umem_dmabuf_unmap_pages(umem_dmabuf);
- if (umem_dmabuf->pinned)
+ if (umem_dmabuf->pinned) {
dma_buf_unpin(umem_dmabuf->attach);
+ umem_dmabuf->pinned = 0;
+ }
+ umem_dmabuf->revoked = 1;
+end:
dma_resv_unlock(dmabuf->resv);
+}
+EXPORT_SYMBOL(ib_umem_dmabuf_revoke);
+
+void ib_umem_dmabuf_release(struct ib_umem_dmabuf *umem_dmabuf)
+{
+ struct dma_buf *dmabuf = umem_dmabuf->attach->dmabuf;
+
+ ib_umem_dmabuf_revoke(umem_dmabuf);
dma_buf_detach(dmabuf, umem_dmabuf->attach);
dma_buf_put(dmabuf);
diff --git a/drivers/infiniband/core/user_mad.c b/drivers/infiniband/core/user_mad.c
index f760dfffa188..fd67fc9fe85a 100644
--- a/drivers/infiniband/core/user_mad.c
+++ b/drivers/infiniband/core/user_mad.c
@@ -1082,7 +1082,6 @@ static const struct file_operations umad_fops = {
#endif
.open = ib_umad_open,
.release = ib_umad_close,
- .llseek = no_llseek,
};
static int ib_umad_sm_open(struct inode *inode, struct file *filp)
@@ -1150,7 +1149,6 @@ static const struct file_operations umad_sm_fops = {
.owner = THIS_MODULE,
.open = ib_umad_sm_open,
.release = ib_umad_sm_close,
- .llseek = no_llseek,
};
static struct ib_umad_port *get_port(struct ib_device *ibdev,
diff --git a/drivers/infiniband/core/uverbs_cmd.c b/drivers/infiniband/core/uverbs_cmd.c
index 1b3ea71f2c33..a4cce360df21 100644
--- a/drivers/infiniband/core/uverbs_cmd.c
+++ b/drivers/infiniband/core/uverbs_cmd.c
@@ -572,7 +572,7 @@ static int ib_uverbs_open_xrcd(struct uverbs_attr_bundle *attrs)
struct inode *inode = NULL;
int new_xrcd = 0;
struct ib_device *ib_dev;
- struct fd f = {};
+ struct fd f = EMPTY_FD;
int ret;
ret = uverbs_request(attrs, &cmd, sizeof(cmd));
@@ -584,12 +584,12 @@ static int ib_uverbs_open_xrcd(struct uverbs_attr_bundle *attrs)
if (cmd.fd != -1) {
/* search for file descriptor */
f = fdget(cmd.fd);
- if (!f.file) {
+ if (!fd_file(f)) {
ret = -EBADF;
goto err_tree_mutex_unlock;
}
- inode = file_inode(f.file);
+ inode = file_inode(fd_file(f));
xrcd = find_xrcd(ibudev, inode);
if (!xrcd && !(cmd.oflags & O_CREAT)) {
/* no file descriptor. Need CREATE flag */
@@ -632,7 +632,7 @@ static int ib_uverbs_open_xrcd(struct uverbs_attr_bundle *attrs)
atomic_inc(&xrcd->usecnt);
}
- if (f.file)
+ if (fd_file(f))
fdput(f);
mutex_unlock(&ibudev->xrcd_tree_mutex);
@@ -648,7 +648,7 @@ err:
uobj_alloc_abort(&obj->uobject, attrs);
err_tree_mutex_unlock:
- if (f.file)
+ if (fd_file(f))
fdput(f);
mutex_unlock(&ibudev->xrcd_tree_mutex);
diff --git a/drivers/infiniband/core/uverbs_main.c b/drivers/infiniband/core/uverbs_main.c
index bc099287de9a..94454186ed81 100644
--- a/drivers/infiniband/core/uverbs_main.c
+++ b/drivers/infiniband/core/uverbs_main.c
@@ -353,7 +353,6 @@ const struct file_operations uverbs_event_fops = {
.poll = ib_uverbs_comp_event_poll,
.release = uverbs_uobject_fd_release,
.fasync = ib_uverbs_comp_event_fasync,
- .llseek = no_llseek,
};
const struct file_operations uverbs_async_event_fops = {
@@ -362,7 +361,6 @@ const struct file_operations uverbs_async_event_fops = {
.poll = ib_uverbs_async_event_poll,
.release = uverbs_async_event_release,
.fasync = ib_uverbs_async_event_fasync,
- .llseek = no_llseek,
};
void ib_uverbs_comp_handler(struct ib_cq *cq, void *cq_context)
@@ -991,7 +989,6 @@ static const struct file_operations uverbs_fops = {
.write = ib_uverbs_write,
.open = ib_uverbs_open,
.release = ib_uverbs_close,
- .llseek = no_llseek,
.unlocked_ioctl = ib_uverbs_ioctl,
.compat_ioctl = compat_ptr_ioctl,
};
@@ -1002,7 +999,6 @@ static const struct file_operations uverbs_mmap_fops = {
.mmap = ib_uverbs_mmap,
.open = ib_uverbs_open,
.release = ib_uverbs_close,
- .llseek = no_llseek,
.unlocked_ioctl = ib_uverbs_ioctl,
.compat_ioctl = compat_ptr_ioctl,
};
diff --git a/drivers/infiniband/core/uverbs_std_types_mr.c b/drivers/infiniband/core/uverbs_std_types_mr.c
index 03e1db5d1e8c..7ebc7bd3caae 100644
--- a/drivers/infiniband/core/uverbs_std_types_mr.c
+++ b/drivers/infiniband/core/uverbs_std_types_mr.c
@@ -239,7 +239,7 @@ static int UVERBS_HANDLER(UVERBS_METHOD_REG_DMABUF_MR)(
mr = pd->device->ops.reg_user_mr_dmabuf(pd, offset, length, iova, fd,
access_flags,
- &attrs->driver_udata);
+ attrs);
if (IS_ERR(mr))
return PTR_ERR(mr);
diff --git a/drivers/infiniband/hw/bnxt_re/bnxt_re.h b/drivers/infiniband/hw/bnxt_re/bnxt_re.h
index 0912d2fa9634..e94518b12f86 100644
--- a/drivers/infiniband/hw/bnxt_re/bnxt_re.h
+++ b/drivers/infiniband/hw/bnxt_re/bnxt_re.h
@@ -91,6 +91,15 @@ struct bnxt_re_ring_attr {
u8 mode;
};
+/*
+ * Data structure and defines to handle
+ * recovery
+ */
+#define BNXT_RE_PRE_RECOVERY_REMOVE 0x1
+#define BNXT_RE_COMPLETE_REMOVE 0x2
+#define BNXT_RE_POST_RECOVERY_INIT 0x4
+#define BNXT_RE_COMPLETE_INIT 0x8
+
struct bnxt_re_sqp_entries {
struct bnxt_qplib_sge sge;
u64 wrid;
@@ -107,6 +116,11 @@ struct bnxt_re_gsi_context {
struct bnxt_re_sqp_entries *sqp_tbl;
};
+struct bnxt_re_en_dev_info {
+ struct bnxt_en_dev *en_dev;
+ struct bnxt_re_dev *rdev;
+};
+
#define BNXT_RE_AEQ_IDX 0
#define BNXT_RE_NQ_IDX 1
#define BNXT_RE_GEN_P5_MAX_VF 64
@@ -141,6 +155,7 @@ struct bnxt_re_pacing {
#define BNXT_RE_GRC_FIFO_REG_BASE 0x2000
#define MAX_CQ_HASH_BITS (16)
+#define MAX_SRQ_HASH_BITS (16)
struct bnxt_re_dev {
struct ib_device ibdev;
struct list_head list;
@@ -154,6 +169,7 @@ struct bnxt_re_dev {
#define BNXT_RE_FLAG_ERR_DEVICE_DETACHED 17
#define BNXT_RE_FLAG_ISSUE_ROCE_STATS 29
struct net_device *netdev;
+ struct auxiliary_device *adev;
struct notifier_block nb;
unsigned int version, major, minor;
struct bnxt_qplib_chip_ctx *chip_ctx;
@@ -196,6 +212,7 @@ struct bnxt_re_dev {
struct work_struct dbq_fifo_check_work;
struct delayed_work dbq_pacing_work;
DECLARE_HASHTABLE(cq_hash, MAX_CQ_HASH_BITS);
+ DECLARE_HASHTABLE(srq_hash, MAX_SRQ_HASH_BITS);
};
#define to_bnxt_re_dev(ptr, member) \
@@ -216,4 +233,10 @@ static inline struct device *rdev_to_dev(struct bnxt_re_dev *rdev)
}
extern const struct uapi_definition bnxt_re_uapi_defs[];
+
+static inline void bnxt_re_set_pacing_dev_state(struct bnxt_re_dev *rdev)
+{
+ rdev->qplib_res.pacing_data->dev_err_state =
+ test_bit(BNXT_RE_FLAG_ERR_DEVICE_DETACHED, &rdev->flags);
+}
#endif
diff --git a/drivers/infiniband/hw/bnxt_re/ib_verbs.c b/drivers/infiniband/hw/bnxt_re/ib_verbs.c
index 7c757351a016..460f33914825 100644
--- a/drivers/infiniband/hw/bnxt_re/ib_verbs.c
+++ b/drivers/infiniband/hw/bnxt_re/ib_verbs.c
@@ -115,6 +115,14 @@ static enum ib_access_flags __to_ib_access_flags(int qflags)
return iflags;
};
+static void bnxt_re_check_and_set_relaxed_ordering(struct bnxt_re_dev *rdev,
+ struct bnxt_qplib_mrw *qplib_mr)
+{
+ if (_is_relaxed_ordering_supported(rdev->dev_attr.dev_cap_flags2) &&
+ pcie_relaxed_ordering_enabled(rdev->en_dev->pdev))
+ qplib_mr->flags |= CMDQ_REGISTER_MR_FLAGS_ENABLE_RO;
+}
+
static int bnxt_re_build_sgl(struct ib_sge *ib_sg_list,
struct bnxt_qplib_sge *sg_list, int num)
{
@@ -517,15 +525,19 @@ static int bnxt_re_create_fence_mr(struct bnxt_re_pd *pd)
mr->rdev = rdev;
mr->qplib_mr.pd = &pd->qplib_pd;
mr->qplib_mr.type = CMDQ_ALLOCATE_MRW_MRW_FLAGS_PMR;
- mr->qplib_mr.flags = __from_ib_access_flags(mr_access_flags);
- rc = bnxt_qplib_alloc_mrw(&rdev->qplib_res, &mr->qplib_mr);
- if (rc) {
- ibdev_err(&rdev->ibdev, "Failed to alloc fence-HW-MR\n");
- goto fail;
- }
+ mr->qplib_mr.access_flags = __from_ib_access_flags(mr_access_flags);
+ if (!_is_alloc_mr_unified(rdev->dev_attr.dev_cap_flags)) {
+ rc = bnxt_qplib_alloc_mrw(&rdev->qplib_res, &mr->qplib_mr);
+ if (rc) {
+ ibdev_err(&rdev->ibdev, "Failed to alloc fence-HW-MR\n");
+ goto fail;
+ }
- /* Register MR */
- mr->ib_mr.lkey = mr->qplib_mr.lkey;
+ /* Register MR */
+ mr->ib_mr.lkey = mr->qplib_mr.lkey;
+ } else {
+ mr->qplib_mr.flags = CMDQ_REGISTER_MR_FLAGS_ALLOC_MR;
+ }
mr->qplib_mr.va = (u64)(unsigned long)fence->va;
mr->qplib_mr.total_size = BNXT_RE_FENCE_BYTES;
rc = bnxt_qplib_reg_mr(&rdev->qplib_res, &mr->qplib_mr, NULL,
@@ -994,43 +1006,37 @@ static int bnxt_re_setup_swqe_size(struct bnxt_re_qp *qp,
align = sizeof(struct sq_send_hdr);
ilsize = ALIGN(init_attr->cap.max_inline_data, align);
- sq->wqe_size = bnxt_re_get_wqe_size(ilsize, sq->max_sge);
- if (sq->wqe_size > bnxt_re_get_swqe_size(dev_attr->max_qp_sges))
- return -EINVAL;
- /* For gen p4 and gen p5 backward compatibility mode
- * wqe size is fixed to 128 bytes
+ /* For gen p4 and gen p5 fixed wqe compatibility mode
+ * wqe size is fixed to 128 bytes - ie 6 SGEs
*/
- if (sq->wqe_size < bnxt_re_get_swqe_size(dev_attr->max_qp_sges) &&
- qplqp->wqe_mode == BNXT_QPLIB_WQE_MODE_STATIC)
- sq->wqe_size = bnxt_re_get_swqe_size(dev_attr->max_qp_sges);
+ if (qplqp->wqe_mode == BNXT_QPLIB_WQE_MODE_STATIC) {
+ sq->wqe_size = bnxt_re_get_swqe_size(BNXT_STATIC_MAX_SGE);
+ sq->max_sge = BNXT_STATIC_MAX_SGE;
+ } else {
+ sq->wqe_size = bnxt_re_get_wqe_size(ilsize, sq->max_sge);
+ if (sq->wqe_size > bnxt_re_get_swqe_size(dev_attr->max_qp_sges))
+ return -EINVAL;
+ }
if (init_attr->cap.max_inline_data) {
qplqp->max_inline_data = sq->wqe_size -
sizeof(struct sq_send_hdr);
init_attr->cap.max_inline_data = qplqp->max_inline_data;
- if (qplqp->wqe_mode == BNXT_QPLIB_WQE_MODE_STATIC)
- sq->max_sge = qplqp->max_inline_data /
- sizeof(struct sq_sge);
}
return 0;
}
static int bnxt_re_init_user_qp(struct bnxt_re_dev *rdev, struct bnxt_re_pd *pd,
- struct bnxt_re_qp *qp, struct ib_udata *udata)
+ struct bnxt_re_qp *qp, struct bnxt_re_ucontext *cntx,
+ struct bnxt_re_qp_req *ureq)
{
struct bnxt_qplib_qp *qplib_qp;
- struct bnxt_re_ucontext *cntx;
- struct bnxt_re_qp_req ureq;
int bytes = 0, psn_sz;
struct ib_umem *umem;
int psn_nume;
qplib_qp = &qp->qplib_qp;
- cntx = rdma_udata_to_drv_context(udata, struct bnxt_re_ucontext,
- ib_uctx);
- if (ib_copy_from_udata(&ureq, udata, sizeof(ureq)))
- return -EFAULT;
bytes = (qplib_qp->sq.max_wqe * qplib_qp->sq.wqe_size);
/* Consider mapping PSN search memory only for RC QPs. */
@@ -1038,15 +1044,20 @@ static int bnxt_re_init_user_qp(struct bnxt_re_dev *rdev, struct bnxt_re_pd *pd,
psn_sz = bnxt_qplib_is_chip_gen_p5_p7(rdev->chip_ctx) ?
sizeof(struct sq_psn_search_ext) :
sizeof(struct sq_psn_search);
- psn_nume = (qplib_qp->wqe_mode == BNXT_QPLIB_WQE_MODE_STATIC) ?
- qplib_qp->sq.max_wqe :
- ((qplib_qp->sq.max_wqe * qplib_qp->sq.wqe_size) /
- sizeof(struct bnxt_qplib_sge));
+ if (cntx && bnxt_re_is_var_size_supported(rdev, cntx)) {
+ psn_nume = ureq->sq_slots;
+ } else {
+ psn_nume = (qplib_qp->wqe_mode == BNXT_QPLIB_WQE_MODE_STATIC) ?
+ qplib_qp->sq.max_wqe : ((qplib_qp->sq.max_wqe * qplib_qp->sq.wqe_size) /
+ sizeof(struct bnxt_qplib_sge));
+ }
+ if (_is_host_msn_table(rdev->qplib_res.dattr->dev_cap_flags2))
+ psn_nume = roundup_pow_of_two(psn_nume);
bytes += (psn_nume * psn_sz);
}
bytes = PAGE_ALIGN(bytes);
- umem = ib_umem_get(&rdev->ibdev, ureq.qpsva, bytes,
+ umem = ib_umem_get(&rdev->ibdev, ureq->qpsva, bytes,
IB_ACCESS_LOCAL_WRITE);
if (IS_ERR(umem))
return PTR_ERR(umem);
@@ -1055,12 +1066,12 @@ static int bnxt_re_init_user_qp(struct bnxt_re_dev *rdev, struct bnxt_re_pd *pd,
qplib_qp->sq.sg_info.umem = umem;
qplib_qp->sq.sg_info.pgsize = PAGE_SIZE;
qplib_qp->sq.sg_info.pgshft = PAGE_SHIFT;
- qplib_qp->qp_handle = ureq.qp_handle;
+ qplib_qp->qp_handle = ureq->qp_handle;
if (!qp->qplib_qp.srq) {
bytes = (qplib_qp->rq.max_wqe * qplib_qp->rq.wqe_size);
bytes = PAGE_ALIGN(bytes);
- umem = ib_umem_get(&rdev->ibdev, ureq.qprva, bytes,
+ umem = ib_umem_get(&rdev->ibdev, ureq->qprva, bytes,
IB_ACCESS_LOCAL_WRITE);
if (IS_ERR(umem))
goto rqfail;
@@ -1156,6 +1167,7 @@ static struct bnxt_re_qp *bnxt_re_create_shadow_qp
/* Shadow QP SQ depth should be same as QP1 RQ depth */
qp->qplib_qp.sq.wqe_size = bnxt_re_get_wqe_size(0, 6);
qp->qplib_qp.sq.max_wqe = qp1_qp->rq.max_wqe;
+ qp->qplib_qp.sq.max_sw_wqe = qp1_qp->rq.max_wqe;
qp->qplib_qp.sq.max_sge = 2;
/* Q full delta can be 1 since it is internal QP */
qp->qplib_qp.sq.q_full_delta = 1;
@@ -1167,6 +1179,7 @@ static struct bnxt_re_qp *bnxt_re_create_shadow_qp
qp->qplib_qp.rq.wqe_size = bnxt_re_get_rwqe_size(6);
qp->qplib_qp.rq.max_wqe = qp1_qp->rq.max_wqe;
+ qp->qplib_qp.rq.max_sw_wqe = qp1_qp->rq.max_wqe;
qp->qplib_qp.rq.max_sge = qp1_qp->rq.max_sge;
/* Q full delta can be 1 since it is internal QP */
qp->qplib_qp.rq.q_full_delta = 1;
@@ -1228,6 +1241,7 @@ static int bnxt_re_init_rq_attr(struct bnxt_re_qp *qp,
*/
entries = bnxt_re_init_depth(init_attr->cap.max_recv_wr + 1, uctx);
rq->max_wqe = min_t(u32, entries, dev_attr->max_qp_wqes + 1);
+ rq->max_sw_wqe = rq->max_wqe;
rq->q_full_delta = 0;
rq->sg_info.pgsize = PAGE_SIZE;
rq->sg_info.pgshft = PAGE_SHIFT;
@@ -1256,14 +1270,15 @@ static void bnxt_re_adjust_gsi_rq_attr(struct bnxt_re_qp *qp)
static int bnxt_re_init_sq_attr(struct bnxt_re_qp *qp,
struct ib_qp_init_attr *init_attr,
- struct bnxt_re_ucontext *uctx)
+ struct bnxt_re_ucontext *uctx,
+ struct bnxt_re_qp_req *ureq)
{
struct bnxt_qplib_dev_attr *dev_attr;
struct bnxt_qplib_qp *qplqp;
struct bnxt_re_dev *rdev;
struct bnxt_qplib_q *sq;
+ int diff = 0;
int entries;
- int diff;
int rc;
rdev = qp->rdev;
@@ -1272,21 +1287,28 @@ static int bnxt_re_init_sq_attr(struct bnxt_re_qp *qp,
dev_attr = &rdev->dev_attr;
sq->max_sge = init_attr->cap.max_send_sge;
- if (sq->max_sge > dev_attr->max_qp_sges) {
- sq->max_sge = dev_attr->max_qp_sges;
- init_attr->cap.max_send_sge = sq->max_sge;
- }
+ entries = init_attr->cap.max_send_wr;
+ if (uctx && qplqp->wqe_mode == BNXT_QPLIB_WQE_MODE_VARIABLE) {
+ sq->max_wqe = ureq->sq_slots;
+ sq->max_sw_wqe = ureq->sq_slots;
+ sq->wqe_size = sizeof(struct sq_sge);
+ } else {
+ if (sq->max_sge > dev_attr->max_qp_sges) {
+ sq->max_sge = dev_attr->max_qp_sges;
+ init_attr->cap.max_send_sge = sq->max_sge;
+ }
- rc = bnxt_re_setup_swqe_size(qp, init_attr);
- if (rc)
- return rc;
+ rc = bnxt_re_setup_swqe_size(qp, init_attr);
+ if (rc)
+ return rc;
- entries = init_attr->cap.max_send_wr;
- /* Allocate 128 + 1 more than what's provided */
- diff = (qplqp->wqe_mode == BNXT_QPLIB_WQE_MODE_VARIABLE) ?
- 0 : BNXT_QPLIB_RESERVED_QP_WRS;
- entries = bnxt_re_init_depth(entries + diff + 1, uctx);
- sq->max_wqe = min_t(u32, entries, dev_attr->max_qp_wqes + diff + 1);
+ /* Allocate 128 + 1 more than what's provided */
+ diff = (qplqp->wqe_mode == BNXT_QPLIB_WQE_MODE_VARIABLE) ?
+ 0 : BNXT_QPLIB_RESERVED_QP_WRS;
+ entries = bnxt_re_init_depth(entries + diff + 1, uctx);
+ sq->max_wqe = min_t(u32, entries, dev_attr->max_qp_wqes + diff + 1);
+ sq->max_sw_wqe = bnxt_qplib_get_depth(sq, qplqp->wqe_mode, true);
+ }
sq->q_full_delta = diff + 1;
/*
* Reserving one slot for Phantom WQE. Application can
@@ -1349,10 +1371,10 @@ out:
static int bnxt_re_init_qp_attr(struct bnxt_re_qp *qp, struct bnxt_re_pd *pd,
struct ib_qp_init_attr *init_attr,
- struct ib_udata *udata)
+ struct bnxt_re_ucontext *uctx,
+ struct bnxt_re_qp_req *ureq)
{
struct bnxt_qplib_dev_attr *dev_attr;
- struct bnxt_re_ucontext *uctx;
struct bnxt_qplib_qp *qplqp;
struct bnxt_re_dev *rdev;
struct bnxt_re_cq *cq;
@@ -1362,7 +1384,6 @@ static int bnxt_re_init_qp_attr(struct bnxt_re_qp *qp, struct bnxt_re_pd *pd,
qplqp = &qp->qplib_qp;
dev_attr = &rdev->dev_attr;
- uctx = rdma_udata_to_drv_context(udata, struct bnxt_re_ucontext, ib_uctx);
/* Setup misc params */
ether_addr_copy(qplqp->smac, rdev->netdev->dev_addr);
qplqp->pd = &pd->qplib_pd;
@@ -1375,8 +1396,7 @@ static int bnxt_re_init_qp_attr(struct bnxt_re_qp *qp, struct bnxt_re_pd *pd,
goto out;
}
qplqp->type = (u8)qptype;
- qplqp->wqe_mode = rdev->chip_ctx->modes.wqe_mode;
-
+ qplqp->wqe_mode = bnxt_re_is_var_size_supported(rdev, uctx);
if (init_attr->qp_type == IB_QPT_RC) {
qplqp->max_rd_atomic = dev_attr->max_qp_rd_atom;
qplqp->max_dest_rd_atomic = dev_attr->max_qp_init_rd_atom;
@@ -1411,14 +1431,14 @@ static int bnxt_re_init_qp_attr(struct bnxt_re_qp *qp, struct bnxt_re_pd *pd,
bnxt_re_adjust_gsi_rq_attr(qp);
/* Setup SQ */
- rc = bnxt_re_init_sq_attr(qp, init_attr, uctx);
+ rc = bnxt_re_init_sq_attr(qp, init_attr, uctx, ureq);
if (rc)
goto out;
if (init_attr->qp_type == IB_QPT_GSI)
bnxt_re_adjust_gsi_sq_attr(qp, init_attr, uctx);
- if (udata) /* This will update DPI and qp_handle */
- rc = bnxt_re_init_user_qp(rdev, pd, qp, udata);
+ if (uctx) /* This will update DPI and qp_handle */
+ rc = bnxt_re_init_user_qp(rdev, pd, qp, uctx, ureq);
out:
return rc;
}
@@ -1519,14 +1539,27 @@ static bool bnxt_re_test_qp_limits(struct bnxt_re_dev *rdev,
int bnxt_re_create_qp(struct ib_qp *ib_qp, struct ib_qp_init_attr *qp_init_attr,
struct ib_udata *udata)
{
- struct ib_pd *ib_pd = ib_qp->pd;
- struct bnxt_re_pd *pd = container_of(ib_pd, struct bnxt_re_pd, ib_pd);
- struct bnxt_re_dev *rdev = pd->rdev;
- struct bnxt_qplib_dev_attr *dev_attr = &rdev->dev_attr;
- struct bnxt_re_qp *qp = container_of(ib_qp, struct bnxt_re_qp, ib_qp);
+ struct bnxt_qplib_dev_attr *dev_attr;
+ struct bnxt_re_ucontext *uctx;
+ struct bnxt_re_qp_req ureq;
+ struct bnxt_re_dev *rdev;
+ struct bnxt_re_pd *pd;
+ struct bnxt_re_qp *qp;
+ struct ib_pd *ib_pd;
u32 active_qps;
int rc;
+ ib_pd = ib_qp->pd;
+ pd = container_of(ib_pd, struct bnxt_re_pd, ib_pd);
+ rdev = pd->rdev;
+ dev_attr = &rdev->dev_attr;
+ qp = container_of(ib_qp, struct bnxt_re_qp, ib_qp);
+
+ uctx = rdma_udata_to_drv_context(udata, struct bnxt_re_ucontext, ib_uctx);
+ if (udata)
+ if (ib_copy_from_udata(&ureq, udata, min(udata->inlen, sizeof(ureq))))
+ return -EFAULT;
+
rc = bnxt_re_test_qp_limits(rdev, qp_init_attr, dev_attr);
if (!rc) {
rc = -EINVAL;
@@ -1534,7 +1567,7 @@ int bnxt_re_create_qp(struct ib_qp *ib_qp, struct ib_qp_init_attr *qp_init_attr,
}
qp->rdev = rdev;
- rc = bnxt_re_init_qp_attr(qp, pd, qp_init_attr, udata);
+ rc = bnxt_re_init_qp_attr(qp, pd, qp_init_attr, uctx, &ureq);
if (rc)
goto fail;
@@ -1685,6 +1718,10 @@ int bnxt_re_destroy_srq(struct ib_srq *ib_srq, struct ib_udata *udata)
if (qplib_srq->cq)
nq = qplib_srq->cq->nq;
+ if (rdev->chip_ctx->modes.toggle_bits & BNXT_QPLIB_SRQ_TOGGLE_BIT) {
+ free_page((unsigned long)srq->uctx_srq_page);
+ hash_del(&srq->hash_entry);
+ }
bnxt_qplib_destroy_srq(&rdev->qplib_res, qplib_srq);
ib_umem_release(srq->umem);
atomic_dec(&rdev->stats.res.srq_count);
@@ -1789,9 +1826,18 @@ int bnxt_re_create_srq(struct ib_srq *ib_srq,
}
if (udata) {
- struct bnxt_re_srq_resp resp;
+ struct bnxt_re_srq_resp resp = {};
resp.srqid = srq->qplib_srq.id;
+ if (rdev->chip_ctx->modes.toggle_bits & BNXT_QPLIB_SRQ_TOGGLE_BIT) {
+ hash_add(rdev->srq_hash, &srq->hash_entry, srq->qplib_srq.id);
+ srq->uctx_srq_page = (void *)get_zeroed_page(GFP_KERNEL);
+ if (!srq->uctx_srq_page) {
+ rc = -ENOMEM;
+ goto fail;
+ }
+ resp.comp_mask |= BNXT_RE_SRQ_TOGGLE_PAGE_SUPPORT;
+ }
rc = ib_copy_to_udata(udata, &resp, sizeof(resp));
if (rc) {
ibdev_err(&rdev->ibdev, "SRQ copy to udata failed!");
@@ -2155,6 +2201,7 @@ int bnxt_re_modify_qp(struct ib_qp *ib_qp, struct ib_qp_attr *qp_attr,
entries = bnxt_re_init_depth(qp_attr->cap.max_recv_wr, uctx);
qp->qplib_qp.rq.max_wqe =
min_t(u32, entries, dev_attr->max_qp_wqes + 1);
+ qp->qplib_qp.rq.max_sw_wqe = qp->qplib_qp.rq.max_wqe;
qp->qplib_qp.rq.q_full_delta = qp->qplib_qp.rq.max_wqe -
qp_attr->cap.max_recv_wr;
qp->qplib_qp.rq.max_sge = qp_attr->cap.max_recv_sge;
@@ -3845,9 +3892,12 @@ struct ib_mr *bnxt_re_get_dma_mr(struct ib_pd *ib_pd, int mr_access_flags)
mr->rdev = rdev;
mr->qplib_mr.pd = &pd->qplib_pd;
- mr->qplib_mr.flags = __from_ib_access_flags(mr_access_flags);
+ mr->qplib_mr.access_flags = __from_ib_access_flags(mr_access_flags);
mr->qplib_mr.type = CMDQ_ALLOCATE_MRW_MRW_FLAGS_PMR;
+ if (mr_access_flags & IB_ACCESS_RELAXED_ORDERING)
+ bnxt_re_check_and_set_relaxed_ordering(rdev, &mr->qplib_mr);
+
/* Allocate and register 0 as the address */
rc = bnxt_qplib_alloc_mrw(&rdev->qplib_res, &mr->qplib_mr);
if (rc)
@@ -3945,7 +3995,7 @@ struct ib_mr *bnxt_re_alloc_mr(struct ib_pd *ib_pd, enum ib_mr_type type,
mr->rdev = rdev;
mr->qplib_mr.pd = &pd->qplib_pd;
- mr->qplib_mr.flags = BNXT_QPLIB_FR_PMR;
+ mr->qplib_mr.access_flags = BNXT_QPLIB_FR_PMR;
mr->qplib_mr.type = CMDQ_ALLOCATE_MRW_MRW_FLAGS_PMR;
rc = bnxt_qplib_alloc_mrw(&rdev->qplib_res, &mr->qplib_mr);
@@ -4062,21 +4112,28 @@ static struct ib_mr *__bnxt_re_user_reg_mr(struct ib_pd *ib_pd, u64 length, u64
mr->rdev = rdev;
mr->qplib_mr.pd = &pd->qplib_pd;
- mr->qplib_mr.flags = __from_ib_access_flags(mr_access_flags);
+ mr->qplib_mr.access_flags = __from_ib_access_flags(mr_access_flags);
mr->qplib_mr.type = CMDQ_ALLOCATE_MRW_MRW_FLAGS_MR;
- rc = bnxt_qplib_alloc_mrw(&rdev->qplib_res, &mr->qplib_mr);
- if (rc) {
- ibdev_err(&rdev->ibdev, "Failed to allocate MR rc = %d", rc);
- rc = -EIO;
- goto free_mr;
+ if (!_is_alloc_mr_unified(rdev->dev_attr.dev_cap_flags)) {
+ rc = bnxt_qplib_alloc_mrw(&rdev->qplib_res, &mr->qplib_mr);
+ if (rc) {
+ ibdev_err(&rdev->ibdev, "Failed to allocate MR rc = %d", rc);
+ rc = -EIO;
+ goto free_mr;
+ }
+ /* The fixed portion of the rkey is the same as the lkey */
+ mr->ib_mr.rkey = mr->qplib_mr.rkey;
+ } else {
+ mr->qplib_mr.flags = CMDQ_REGISTER_MR_FLAGS_ALLOC_MR;
}
- /* The fixed portion of the rkey is the same as the lkey */
- mr->ib_mr.rkey = mr->qplib_mr.rkey;
mr->ib_umem = umem;
mr->qplib_mr.va = virt_addr;
mr->qplib_mr.total_size = length;
+ if (mr_access_flags & IB_ACCESS_RELAXED_ORDERING)
+ bnxt_re_check_and_set_relaxed_ordering(rdev, &mr->qplib_mr);
+
umem_pgs = ib_umem_num_dma_blocks(umem, page_size);
rc = bnxt_qplib_reg_mr(&rdev->qplib_res, &mr->qplib_mr, umem,
umem_pgs, page_size);
@@ -4122,7 +4179,8 @@ struct ib_mr *bnxt_re_reg_user_mr(struct ib_pd *ib_pd, u64 start, u64 length,
struct ib_mr *bnxt_re_reg_user_mr_dmabuf(struct ib_pd *ib_pd, u64 start,
u64 length, u64 virt_addr, int fd,
- int mr_access_flags, struct ib_udata *udata)
+ int mr_access_flags,
+ struct uverbs_attr_bundle *attrs)
{
struct bnxt_re_pd *pd = container_of(ib_pd, struct bnxt_re_pd, ib_pd);
struct bnxt_re_dev *rdev = pd->rdev;
@@ -4187,9 +4245,6 @@ int bnxt_re_alloc_ucontext(struct ib_ucontext *ctx, struct ib_udata *udata)
resp.cqe_sz = sizeof(struct cq_base);
resp.max_cqd = dev_attr->max_cq_wqes;
- resp.comp_mask |= BNXT_RE_UCNTX_CMASK_HAVE_MODE;
- resp.mode = rdev->chip_ctx->modes.wqe_mode;
-
if (rdev->chip_ctx->modes.db_push)
resp.comp_mask |= BNXT_RE_UCNTX_CMASK_WC_DPI_ENABLED;
@@ -4211,7 +4266,13 @@ int bnxt_re_alloc_ucontext(struct ib_ucontext *ctx, struct ib_udata *udata)
goto cfail;
if (ureq.comp_mask & BNXT_RE_COMP_MASK_REQ_UCNTX_POW2_SUPPORT) {
resp.comp_mask |= BNXT_RE_UCNTX_CMASK_POW2_DISABLED;
- uctx->cmask |= BNXT_RE_UCNTX_CMASK_POW2_DISABLED;
+ uctx->cmask |= BNXT_RE_UCNTX_CAP_POW2_DISABLED;
+ }
+ if (ureq.comp_mask & BNXT_RE_COMP_MASK_REQ_UCNTX_VAR_WQE_SUPPORT) {
+ resp.comp_mask |= BNXT_RE_UCNTX_CMASK_HAVE_MODE;
+ resp.mode = rdev->chip_ctx->modes.wqe_mode;
+ if (resp.mode == BNXT_QPLIB_WQE_MODE_VARIABLE)
+ uctx->cmask |= BNXT_RE_UCNTX_CAP_VAR_WQE_ENABLED;
}
}
@@ -4265,6 +4326,19 @@ static struct bnxt_re_cq *bnxt_re_search_for_cq(struct bnxt_re_dev *rdev, u32 cq
return cq;
}
+static struct bnxt_re_srq *bnxt_re_search_for_srq(struct bnxt_re_dev *rdev, u32 srq_id)
+{
+ struct bnxt_re_srq *srq = NULL, *tmp_srq;
+
+ hash_for_each_possible(rdev->srq_hash, tmp_srq, hash_entry, srq_id) {
+ if (tmp_srq->qplib_srq.id == srq_id) {
+ srq = tmp_srq;
+ break;
+ }
+ }
+ return srq;
+}
+
/* Helper function to mmap the virtual memory from user app */
int bnxt_re_mmap(struct ib_ucontext *ib_uctx, struct vm_area_struct *vma)
{
@@ -4493,12 +4567,13 @@ static int UVERBS_HANDLER(BNXT_RE_METHOD_GET_TOGGLE_MEM)(struct uverbs_attr_bund
struct bnxt_re_ucontext *uctx;
struct ib_ucontext *ib_uctx;
struct bnxt_re_dev *rdev;
+ struct bnxt_re_srq *srq;
+ u32 length = PAGE_SIZE;
struct bnxt_re_cq *cq;
u64 mem_offset;
+ u32 offset = 0;
u64 addr = 0;
- u32 length;
- u32 offset;
- u32 cq_id;
+ u32 res_id;
int err;
ib_uctx = ib_uverbs_get_ucontext(attrs);
@@ -4511,23 +4586,24 @@ static int UVERBS_HANDLER(BNXT_RE_METHOD_GET_TOGGLE_MEM)(struct uverbs_attr_bund
uctx = container_of(ib_uctx, struct bnxt_re_ucontext, ib_uctx);
rdev = uctx->rdev;
+ err = uverbs_copy_from(&res_id, attrs, BNXT_RE_TOGGLE_MEM_RES_ID);
+ if (err)
+ return err;
switch (res_type) {
case BNXT_RE_CQ_TOGGLE_MEM:
- err = uverbs_copy_from(&cq_id, attrs, BNXT_RE_TOGGLE_MEM_RES_ID);
- if (err)
- return err;
-
- cq = bnxt_re_search_for_cq(rdev, cq_id);
+ cq = bnxt_re_search_for_cq(rdev, res_id);
if (!cq)
return -EINVAL;
- length = PAGE_SIZE;
addr = (u64)cq->uctx_cq_page;
- mmap_flag = BNXT_RE_MMAP_TOGGLE_PAGE;
- offset = 0;
break;
case BNXT_RE_SRQ_TOGGLE_MEM:
+ srq = bnxt_re_search_for_srq(rdev, res_id);
+ if (!srq)
+ return -EINVAL;
+
+ addr = (u64)srq->uctx_srq_page;
break;
default:
diff --git a/drivers/infiniband/hw/bnxt_re/ib_verbs.h b/drivers/infiniband/hw/bnxt_re/ib_verbs.h
index e98cb1717338..b789e47ec97a 100644
--- a/drivers/infiniband/hw/bnxt_re/ib_verbs.h
+++ b/drivers/infiniband/hw/bnxt_re/ib_verbs.h
@@ -77,6 +77,8 @@ struct bnxt_re_srq {
struct bnxt_qplib_srq qplib_srq;
struct ib_umem *umem;
spinlock_t lock; /* protect srq */
+ void *uctx_srq_page;
+ struct hlist_node hash_entry;
};
struct bnxt_re_qp {
@@ -171,12 +173,26 @@ static inline u16 bnxt_re_get_rwqe_size(int nsge)
return sizeof(struct rq_wqe_hdr) + (nsge * sizeof(struct sq_sge));
}
+enum {
+ BNXT_RE_UCNTX_CAP_POW2_DISABLED = 0x1ULL,
+ BNXT_RE_UCNTX_CAP_VAR_WQE_ENABLED = 0x2ULL,
+};
+
static inline u32 bnxt_re_init_depth(u32 ent, struct bnxt_re_ucontext *uctx)
{
- return uctx ? (uctx->cmask & BNXT_RE_UCNTX_CMASK_POW2_DISABLED) ?
+ return uctx ? (uctx->cmask & BNXT_RE_UCNTX_CAP_POW2_DISABLED) ?
ent : roundup_pow_of_two(ent) : ent;
}
+static inline bool bnxt_re_is_var_size_supported(struct bnxt_re_dev *rdev,
+ struct bnxt_re_ucontext *uctx)
+{
+ if (uctx)
+ return uctx->cmask & BNXT_RE_UCNTX_CAP_VAR_WQE_ENABLED;
+ else
+ return rdev->chip_ctx->modes.wqe_mode;
+}
+
int bnxt_re_query_device(struct ib_device *ibdev,
struct ib_device_attr *ib_attr,
struct ib_udata *udata);
@@ -242,7 +258,7 @@ struct ib_mr *bnxt_re_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
struct ib_mr *bnxt_re_reg_user_mr_dmabuf(struct ib_pd *ib_pd, u64 start,
u64 length, u64 virt_addr,
int fd, int mr_access_flags,
- struct ib_udata *udata);
+ struct uverbs_attr_bundle *attrs);
int bnxt_re_alloc_ucontext(struct ib_ucontext *ctx, struct ib_udata *udata);
void bnxt_re_dealloc_ucontext(struct ib_ucontext *context);
int bnxt_re_mmap(struct ib_ucontext *context, struct vm_area_struct *vma);
diff --git a/drivers/infiniband/hw/bnxt_re/main.c b/drivers/infiniband/hw/bnxt_re/main.c
index 9714b9ab7524..777068de4bbc 100644
--- a/drivers/infiniband/hw/bnxt_re/main.c
+++ b/drivers/infiniband/hw/bnxt_re/main.c
@@ -83,11 +83,12 @@ static void bnxt_re_dev_stop(struct bnxt_re_dev *rdev);
static int bnxt_re_netdev_event(struct notifier_block *notifier,
unsigned long event, void *ptr);
static struct bnxt_re_dev *bnxt_re_from_netdev(struct net_device *netdev);
-static void bnxt_re_dev_uninit(struct bnxt_re_dev *rdev);
+static void bnxt_re_dev_uninit(struct bnxt_re_dev *rdev, u8 op_type);
static int bnxt_re_hwrm_qcaps(struct bnxt_re_dev *rdev);
static int bnxt_re_hwrm_qcfg(struct bnxt_re_dev *rdev, u32 *db_len,
u32 *offset);
+static void bnxt_re_setup_cc(struct bnxt_re_dev *rdev, bool enable);
static void bnxt_re_set_db_offset(struct bnxt_re_dev *rdev)
{
struct bnxt_qplib_chip_ctx *cctx;
@@ -129,18 +130,20 @@ static void bnxt_re_set_db_offset(struct bnxt_re_dev *rdev)
}
}
-static void bnxt_re_set_drv_mode(struct bnxt_re_dev *rdev, u8 mode)
+static void bnxt_re_set_drv_mode(struct bnxt_re_dev *rdev)
{
struct bnxt_qplib_chip_ctx *cctx;
cctx = rdev->chip_ctx;
- cctx->modes.wqe_mode = bnxt_qplib_is_chip_gen_p5_p7(rdev->chip_ctx) ?
- mode : BNXT_QPLIB_WQE_MODE_STATIC;
+ cctx->modes.wqe_mode = bnxt_qplib_is_chip_gen_p7(rdev->chip_ctx) ?
+ BNXT_QPLIB_WQE_MODE_VARIABLE : BNXT_QPLIB_WQE_MODE_STATIC;
if (bnxt_re_hwrm_qcaps(rdev))
dev_err(rdev_to_dev(rdev),
"Failed to query hwrm qcaps\n");
- if (bnxt_qplib_is_chip_gen_p7(rdev->chip_ctx))
+ if (bnxt_qplib_is_chip_gen_p7(rdev->chip_ctx)) {
cctx->modes.toggle_bits |= BNXT_QPLIB_CQ_TOGGLE_BIT;
+ cctx->modes.toggle_bits |= BNXT_QPLIB_SRQ_TOGGLE_BIT;
+ }
}
static void bnxt_re_destroy_chip_ctx(struct bnxt_re_dev *rdev)
@@ -158,7 +161,7 @@ static void bnxt_re_destroy_chip_ctx(struct bnxt_re_dev *rdev)
kfree(chip_ctx);
}
-static int bnxt_re_setup_chip_ctx(struct bnxt_re_dev *rdev, u8 wqe_mode)
+static int bnxt_re_setup_chip_ctx(struct bnxt_re_dev *rdev)
{
struct bnxt_qplib_chip_ctx *chip_ctx;
struct bnxt_en_dev *en_dev;
@@ -166,6 +169,7 @@ static int bnxt_re_setup_chip_ctx(struct bnxt_re_dev *rdev, u8 wqe_mode)
en_dev = rdev->en_dev;
+ rdev->qplib_res.pdev = en_dev->pdev;
chip_ctx = kzalloc(sizeof(*chip_ctx), GFP_KERNEL);
if (!chip_ctx)
return -ENOMEM;
@@ -180,7 +184,7 @@ static int bnxt_re_setup_chip_ctx(struct bnxt_re_dev *rdev, u8 wqe_mode)
rdev->qplib_res.dattr = &rdev->dev_attr;
rdev->qplib_res.is_vf = BNXT_EN_VF(en_dev);
- bnxt_re_set_drv_mode(rdev, wqe_mode);
+ bnxt_re_set_drv_mode(rdev);
bnxt_re_set_db_offset(rdev);
rc = bnxt_qplib_map_db_bar(&rdev->qplib_res);
@@ -290,21 +294,31 @@ static void bnxt_re_vf_res_config(struct bnxt_re_dev *rdev)
static void bnxt_re_shutdown(struct auxiliary_device *adev)
{
- struct bnxt_re_dev *rdev = auxiliary_get_drvdata(adev);
+ struct bnxt_re_en_dev_info *en_info = auxiliary_get_drvdata(adev);
+ struct bnxt_re_dev *rdev;
- if (!rdev)
+ if (!en_info)
return;
+
+ rdev = en_info->rdev;
ib_unregister_device(&rdev->ibdev);
- bnxt_re_dev_uninit(rdev);
+ bnxt_re_dev_uninit(rdev, BNXT_RE_COMPLETE_REMOVE);
}
static void bnxt_re_stop_irq(void *handle)
{
- struct bnxt_re_dev *rdev = (struct bnxt_re_dev *)handle;
- struct bnxt_qplib_rcfw *rcfw = &rdev->rcfw;
+ struct bnxt_re_en_dev_info *en_info = auxiliary_get_drvdata(handle);
+ struct bnxt_qplib_rcfw *rcfw;
+ struct bnxt_re_dev *rdev;
struct bnxt_qplib_nq *nq;
int indx;
+ if (!en_info)
+ return;
+
+ rdev = en_info->rdev;
+ rcfw = &rdev->rcfw;
+
for (indx = BNXT_RE_NQ_IDX; indx < rdev->num_msix; indx++) {
nq = &rdev->nq[indx - 1];
bnxt_qplib_nq_stop_irq(nq, false);
@@ -315,12 +329,19 @@ static void bnxt_re_stop_irq(void *handle)
static void bnxt_re_start_irq(void *handle, struct bnxt_msix_entry *ent)
{
- struct bnxt_re_dev *rdev = (struct bnxt_re_dev *)handle;
- struct bnxt_msix_entry *msix_ent = rdev->en_dev->msix_entries;
- struct bnxt_qplib_rcfw *rcfw = &rdev->rcfw;
+ struct bnxt_re_en_dev_info *en_info = auxiliary_get_drvdata(handle);
+ struct bnxt_msix_entry *msix_ent;
+ struct bnxt_qplib_rcfw *rcfw;
+ struct bnxt_re_dev *rdev;
struct bnxt_qplib_nq *nq;
int indx, rc;
+ if (!en_info)
+ return;
+
+ rdev = en_info->rdev;
+ msix_ent = rdev->en_dev->msix_entries;
+ rcfw = &rdev->rcfw;
if (!ent) {
/* Not setting the f/w timeout bit in rcfw.
* During the driver unload the first command
@@ -365,14 +386,9 @@ static struct bnxt_ulp_ops bnxt_re_ulp_ops = {
static int bnxt_re_register_netdev(struct bnxt_re_dev *rdev)
{
struct bnxt_en_dev *en_dev;
- int rc;
en_dev = rdev->en_dev;
-
- rc = bnxt_register_dev(en_dev, &bnxt_re_ulp_ops, rdev);
- if (!rc)
- rdev->qplib_res.pdev = rdev->en_dev->pdev;
- return rc;
+ return bnxt_register_dev(en_dev, &bnxt_re_ulp_ops, rdev->adev);
}
static void bnxt_re_init_hwrm_hdr(struct input *hdr, u16 opcd)
@@ -1573,7 +1589,7 @@ static int bnxt_re_ib_init(struct bnxt_re_dev *rdev)
return rc;
}
-static void bnxt_re_dev_uninit(struct bnxt_re_dev *rdev)
+static void bnxt_re_dev_uninit(struct bnxt_re_dev *rdev, u8 op_type)
{
u8 type;
int rc;
@@ -1606,8 +1622,10 @@ static void bnxt_re_dev_uninit(struct bnxt_re_dev *rdev)
bnxt_re_deinitialize_dbr_pacing(rdev);
bnxt_re_destroy_chip_ctx(rdev);
- if (test_and_clear_bit(BNXT_RE_FLAG_NETDEV_REGISTERED, &rdev->flags))
- bnxt_unregister_dev(rdev->en_dev);
+ if (op_type == BNXT_RE_COMPLETE_REMOVE) {
+ if (test_and_clear_bit(BNXT_RE_FLAG_NETDEV_REGISTERED, &rdev->flags))
+ bnxt_unregister_dev(rdev->en_dev);
+ }
}
/* worker thread for polling periodic events. Now used for QoS programming*/
@@ -1620,7 +1638,7 @@ static void bnxt_re_worker(struct work_struct *work)
schedule_delayed_work(&rdev->worker, msecs_to_jiffies(30000));
}
-static int bnxt_re_dev_init(struct bnxt_re_dev *rdev, u8 wqe_mode)
+static int bnxt_re_dev_init(struct bnxt_re_dev *rdev, u8 op_type)
{
struct bnxt_re_ring_attr rattr = {};
struct bnxt_qplib_creq_ctx *creq;
@@ -1629,16 +1647,18 @@ static int bnxt_re_dev_init(struct bnxt_re_dev *rdev, u8 wqe_mode)
u8 type;
int rc;
- /* Registered a new RoCE device instance to netdev */
- rc = bnxt_re_register_netdev(rdev);
- if (rc) {
- ibdev_err(&rdev->ibdev,
- "Failed to register with netedev: %#x\n", rc);
- return -EINVAL;
+ if (op_type == BNXT_RE_COMPLETE_INIT) {
+ /* Registered a new RoCE device instance to netdev */
+ rc = bnxt_re_register_netdev(rdev);
+ if (rc) {
+ ibdev_err(&rdev->ibdev,
+ "Failed to register with netedev: %#x\n", rc);
+ return -EINVAL;
+ }
}
set_bit(BNXT_RE_FLAG_NETDEV_REGISTERED, &rdev->flags);
- rc = bnxt_re_setup_chip_ctx(rdev, wqe_mode);
+ rc = bnxt_re_setup_chip_ctx(rdev);
if (rc) {
bnxt_unregister_dev(rdev->en_dev);
clear_bit(BNXT_RE_FLAG_NETDEV_REGISTERED, &rdev->flags);
@@ -1771,6 +1791,8 @@ static int bnxt_re_dev_init(struct bnxt_re_dev *rdev, u8 wqe_mode)
bnxt_re_vf_res_config(rdev);
}
hash_init(rdev->cq_hash);
+ if (rdev->chip_ctx->modes.toggle_bits & BNXT_QPLIB_SRQ_TOGGLE_BIT)
+ hash_init(rdev->srq_hash);
return 0;
free_sctx:
@@ -1785,21 +1807,38 @@ free_ring:
free_rcfw:
bnxt_qplib_free_rcfw_channel(&rdev->rcfw);
fail:
- bnxt_re_dev_uninit(rdev);
+ bnxt_re_dev_uninit(rdev, BNXT_RE_COMPLETE_REMOVE);
return rc;
}
-static int bnxt_re_add_device(struct auxiliary_device *adev, u8 wqe_mode)
+static void bnxt_re_update_en_info_rdev(struct bnxt_re_dev *rdev,
+ struct bnxt_re_en_dev_info *en_info,
+ struct auxiliary_device *adev)
+{
+ /* Before updating the rdev pointer in bnxt_re_en_dev_info structure,
+ * take the rtnl lock to avoid accessing invalid rdev pointer from
+ * L2 ULP callbacks. This is applicable in all the places where rdev
+ * pointer is updated in bnxt_re_en_dev_info.
+ */
+ rtnl_lock();
+ en_info->rdev = rdev;
+ rdev->adev = adev;
+ rtnl_unlock();
+}
+
+static int bnxt_re_add_device(struct auxiliary_device *adev, u8 op_type)
{
struct bnxt_aux_priv *aux_priv =
container_of(adev, struct bnxt_aux_priv, aux_dev);
+ struct bnxt_re_en_dev_info *en_info;
struct bnxt_en_dev *en_dev;
struct bnxt_re_dev *rdev;
int rc;
- /* en_dev should never be NULL as long as adev and aux_dev are valid. */
- en_dev = aux_priv->edev;
+ en_info = auxiliary_get_drvdata(adev);
+ en_dev = en_info->en_dev;
+
rdev = bnxt_re_dev_add(aux_priv, en_dev);
if (!rdev || !rdev_to_dev(rdev)) {
@@ -1807,7 +1846,9 @@ static int bnxt_re_add_device(struct auxiliary_device *adev, u8 wqe_mode)
goto exit;
}
- rc = bnxt_re_dev_init(rdev, wqe_mode);
+ bnxt_re_update_en_info_rdev(rdev, en_info, adev);
+
+ rc = bnxt_re_dev_init(rdev, op_type);
if (rc)
goto re_dev_dealloc;
@@ -1817,12 +1858,22 @@ static int bnxt_re_add_device(struct auxiliary_device *adev, u8 wqe_mode)
aux_priv->aux_dev.name);
goto re_dev_uninit;
}
- auxiliary_set_drvdata(adev, rdev);
+
+ rdev->nb.notifier_call = bnxt_re_netdev_event;
+ rc = register_netdevice_notifier(&rdev->nb);
+ if (rc) {
+ rdev->nb.notifier_call = NULL;
+ pr_err("%s: Cannot register to netdevice_notifier",
+ ROCE_DRV_MODULE_NAME);
+ return rc;
+ }
+ bnxt_re_setup_cc(rdev, true);
return 0;
re_dev_uninit:
- bnxt_re_dev_uninit(rdev);
+ bnxt_re_update_en_info_rdev(NULL, en_info, adev);
+ bnxt_re_dev_uninit(rdev, BNXT_RE_COMPLETE_REMOVE);
re_dev_dealloc:
ib_dealloc_device(&rdev->ibdev);
exit:
@@ -1905,14 +1956,9 @@ exit:
#define BNXT_ADEV_NAME "bnxt_en"
-static void bnxt_re_remove(struct auxiliary_device *adev)
+static void bnxt_re_remove_device(struct bnxt_re_dev *rdev, u8 op_type,
+ struct auxiliary_device *aux_dev)
{
- struct bnxt_re_dev *rdev = auxiliary_get_drvdata(adev);
-
- if (!rdev)
- return;
-
- mutex_lock(&bnxt_re_mutex);
if (rdev->nb.notifier_call) {
unregister_netdevice_notifier(&rdev->nb);
rdev->nb.notifier_call = NULL;
@@ -1920,41 +1966,56 @@ static void bnxt_re_remove(struct auxiliary_device *adev)
/* If notifier is null, we should have already done a
* clean up before coming here.
*/
- goto skip_remove;
+ return;
}
bnxt_re_setup_cc(rdev, false);
ib_unregister_device(&rdev->ibdev);
- bnxt_re_dev_uninit(rdev);
+ bnxt_re_dev_uninit(rdev, op_type);
ib_dealloc_device(&rdev->ibdev);
-skip_remove:
+}
+
+static void bnxt_re_remove(struct auxiliary_device *adev)
+{
+ struct bnxt_re_en_dev_info *en_info = auxiliary_get_drvdata(adev);
+ struct bnxt_re_dev *rdev;
+
+ mutex_lock(&bnxt_re_mutex);
+ if (!en_info) {
+ mutex_unlock(&bnxt_re_mutex);
+ return;
+ }
+ rdev = en_info->rdev;
+
+ if (rdev)
+ bnxt_re_remove_device(rdev, BNXT_RE_COMPLETE_REMOVE, adev);
+ kfree(en_info);
mutex_unlock(&bnxt_re_mutex);
}
static int bnxt_re_probe(struct auxiliary_device *adev,
const struct auxiliary_device_id *id)
{
- struct bnxt_re_dev *rdev;
+ struct bnxt_aux_priv *aux_priv =
+ container_of(adev, struct bnxt_aux_priv, aux_dev);
+ struct bnxt_re_en_dev_info *en_info;
+ struct bnxt_en_dev *en_dev;
int rc;
+ en_dev = aux_priv->edev;
+
mutex_lock(&bnxt_re_mutex);
- rc = bnxt_re_add_device(adev, BNXT_QPLIB_WQE_MODE_STATIC);
- if (rc) {
+ en_info = kzalloc(sizeof(*en_info), GFP_KERNEL);
+ if (!en_info) {
mutex_unlock(&bnxt_re_mutex);
- return rc;
+ return -ENOMEM;
}
+ en_info->en_dev = en_dev;
- rdev = auxiliary_get_drvdata(adev);
+ auxiliary_set_drvdata(adev, en_info);
- rdev->nb.notifier_call = bnxt_re_netdev_event;
- rc = register_netdevice_notifier(&rdev->nb);
- if (rc) {
- rdev->nb.notifier_call = NULL;
- pr_err("%s: Cannot register to netdevice_notifier",
- ROCE_DRV_MODULE_NAME);
+ rc = bnxt_re_add_device(adev, BNXT_RE_COMPLETE_INIT);
+ if (rc)
goto err;
- }
-
- bnxt_re_setup_cc(rdev, true);
mutex_unlock(&bnxt_re_mutex);
return 0;
@@ -1967,11 +2028,15 @@ err:
static int bnxt_re_suspend(struct auxiliary_device *adev, pm_message_t state)
{
- struct bnxt_re_dev *rdev = auxiliary_get_drvdata(adev);
+ struct bnxt_re_en_dev_info *en_info = auxiliary_get_drvdata(adev);
+ struct bnxt_en_dev *en_dev;
+ struct bnxt_re_dev *rdev;
- if (!rdev)
+ if (!en_info)
return 0;
+ rdev = en_info->rdev;
+ en_dev = en_info->en_dev;
mutex_lock(&bnxt_re_mutex);
/* L2 driver may invoke this callback during device error/crash or device
* reset. Current RoCE driver doesn't recover the device in case of
@@ -1990,13 +2055,20 @@ static int bnxt_re_suspend(struct auxiliary_device *adev, pm_message_t state)
set_bit(ERR_DEVICE_DETACHED, &rdev->rcfw.cmdq.flags);
bnxt_re_dev_stop(rdev);
- bnxt_re_stop_irq(rdev);
+ bnxt_re_stop_irq(adev);
/* Move the device states to detached and avoid sending any more
* commands to HW
*/
set_bit(BNXT_RE_FLAG_ERR_DEVICE_DETACHED, &rdev->flags);
set_bit(ERR_DEVICE_DETACHED, &rdev->rcfw.cmdq.flags);
wake_up_all(&rdev->rcfw.cmdq.waitq);
+
+ if (rdev->pacing.dbr_pacing)
+ bnxt_re_set_pacing_dev_state(rdev);
+
+ ibdev_info(&rdev->ibdev, "%s: L2 driver notified to stop en_state 0x%lx",
+ __func__, en_dev->en_state);
+ bnxt_re_remove_device(rdev, BNXT_RE_PRE_RECOVERY_REMOVE, adev);
mutex_unlock(&bnxt_re_mutex);
return 0;
@@ -2004,9 +2076,10 @@ static int bnxt_re_suspend(struct auxiliary_device *adev, pm_message_t state)
static int bnxt_re_resume(struct auxiliary_device *adev)
{
- struct bnxt_re_dev *rdev = auxiliary_get_drvdata(adev);
+ struct bnxt_re_en_dev_info *en_info = auxiliary_get_drvdata(adev);
+ struct bnxt_re_dev *rdev;
- if (!rdev)
+ if (!en_info)
return 0;
mutex_lock(&bnxt_re_mutex);
@@ -2017,7 +2090,9 @@ static int bnxt_re_resume(struct auxiliary_device *adev)
* L2 driver want to modify the MSIx table.
*/
- ibdev_info(&rdev->ibdev, "Handle device resume call");
+ bnxt_re_add_device(adev, BNXT_RE_POST_RECOVERY_INIT);
+ rdev = en_info->rdev;
+ ibdev_info(&rdev->ibdev, "Device resume completed");
mutex_unlock(&bnxt_re_mutex);
return 0;
diff --git a/drivers/infiniband/hw/bnxt_re/qplib_fp.c b/drivers/infiniband/hw/bnxt_re/qplib_fp.c
index 49e4a4a50bfa..42e98e5f94cb 100644
--- a/drivers/infiniband/hw/bnxt_re/qplib_fp.c
+++ b/drivers/infiniband/hw/bnxt_re/qplib_fp.c
@@ -54,6 +54,10 @@
#include "qplib_rcfw.h"
#include "qplib_sp.h"
#include "qplib_fp.h"
+#include <rdma/ib_addr.h>
+#include "bnxt_ulp.h"
+#include "bnxt_re.h"
+#include "ib_verbs.h"
static void __clean_cq(struct bnxt_qplib_cq *cq, u64 qp);
@@ -347,6 +351,7 @@ static void bnxt_qplib_service_nq(struct tasklet_struct *t)
case NQ_BASE_TYPE_SRQ_EVENT:
{
struct bnxt_qplib_srq *srq;
+ struct bnxt_re_srq *srq_p;
struct nq_srq_event *nqsrqe =
(struct nq_srq_event *)nqe;
@@ -354,6 +359,12 @@ static void bnxt_qplib_service_nq(struct tasklet_struct *t)
q_handle |= (u64)le32_to_cpu(nqsrqe->srq_handle_high)
<< 32;
srq = (struct bnxt_qplib_srq *)q_handle;
+ srq->toggle = (le16_to_cpu(nqe->info10_type) & NQ_CN_TOGGLE_MASK)
+ >> NQ_CN_TOGGLE_SFT;
+ srq->dbinfo.toggle = srq->toggle;
+ srq_p = container_of(srq, struct bnxt_re_srq, qplib_srq);
+ if (srq_p->uctx_srq_page)
+ *((u32 *)srq_p->uctx_srq_page) = srq->toggle;
bnxt_qplib_armen_db(&srq->dbinfo,
DBC_DBC_TYPE_SRQ_ARMENA);
if (nq->srqn_handler(nq,
@@ -809,13 +820,13 @@ static int bnxt_qplib_alloc_init_swq(struct bnxt_qplib_q *que)
{
int indx;
- que->swq = kcalloc(que->max_wqe, sizeof(*que->swq), GFP_KERNEL);
+ que->swq = kcalloc(que->max_sw_wqe, sizeof(*que->swq), GFP_KERNEL);
if (!que->swq)
return -ENOMEM;
que->swq_start = 0;
- que->swq_last = que->max_wqe - 1;
- for (indx = 0; indx < que->max_wqe; indx++)
+ que->swq_last = que->max_sw_wqe - 1;
+ for (indx = 0; indx < que->max_sw_wqe; indx++)
que->swq[indx].next_idx = indx + 1;
que->swq[que->swq_last].next_idx = 0; /* Make it circular */
que->swq_last = 0;
@@ -851,7 +862,7 @@ int bnxt_qplib_create_qp1(struct bnxt_qplib_res *res, struct bnxt_qplib_qp *qp)
hwq_attr.res = res;
hwq_attr.sginfo = &sq->sg_info;
hwq_attr.stride = sizeof(struct sq_sge);
- hwq_attr.depth = bnxt_qplib_get_depth(sq);
+ hwq_attr.depth = bnxt_qplib_get_depth(sq, qp->wqe_mode, false);
hwq_attr.type = HWQ_TYPE_QUEUE;
rc = bnxt_qplib_alloc_init_hwq(&sq->hwq, &hwq_attr);
if (rc)
@@ -879,7 +890,7 @@ int bnxt_qplib_create_qp1(struct bnxt_qplib_res *res, struct bnxt_qplib_qp *qp)
hwq_attr.res = res;
hwq_attr.sginfo = &rq->sg_info;
hwq_attr.stride = sizeof(struct sq_sge);
- hwq_attr.depth = bnxt_qplib_get_depth(rq);
+ hwq_attr.depth = bnxt_qplib_get_depth(rq, qp->wqe_mode, false);
hwq_attr.type = HWQ_TYPE_QUEUE;
rc = bnxt_qplib_alloc_init_hwq(&rq->hwq, &hwq_attr);
if (rc)
@@ -1011,7 +1022,7 @@ int bnxt_qplib_create_qp(struct bnxt_qplib_res *res, struct bnxt_qplib_qp *qp)
hwq_attr.res = res;
hwq_attr.sginfo = &sq->sg_info;
hwq_attr.stride = sizeof(struct sq_sge);
- hwq_attr.depth = bnxt_qplib_get_depth(sq);
+ hwq_attr.depth = bnxt_qplib_get_depth(sq, qp->wqe_mode, true);
hwq_attr.aux_stride = psn_sz;
hwq_attr.aux_depth = psn_sz ? bnxt_qplib_set_sq_size(sq, qp->wqe_mode)
: 0;
@@ -1052,7 +1063,7 @@ int bnxt_qplib_create_qp(struct bnxt_qplib_res *res, struct bnxt_qplib_qp *qp)
hwq_attr.res = res;
hwq_attr.sginfo = &rq->sg_info;
hwq_attr.stride = sizeof(struct sq_sge);
- hwq_attr.depth = bnxt_qplib_get_depth(rq);
+ hwq_attr.depth = bnxt_qplib_get_depth(rq, qp->wqe_mode, false);
hwq_attr.aux_stride = 0;
hwq_attr.aux_depth = 0;
hwq_attr.type = HWQ_TYPE_QUEUE;
@@ -2471,6 +2482,32 @@ out:
return rc;
}
+static int bnxt_qplib_get_cqe_sq_cons(struct bnxt_qplib_q *sq, u32 cqe_slot)
+{
+ struct bnxt_qplib_hwq *sq_hwq;
+ struct bnxt_qplib_swq *swq;
+ int cqe_sq_cons = -1;
+ u32 start, last;
+
+ sq_hwq = &sq->hwq;
+
+ start = sq->swq_start;
+ last = sq->swq_last;
+
+ while (last != start) {
+ swq = &sq->swq[last];
+ if (swq->slot_idx == cqe_slot) {
+ cqe_sq_cons = swq->next_idx;
+ dev_err(&sq_hwq->pdev->dev, "%s: Found cons wqe = %d slot = %d\n",
+ __func__, cqe_sq_cons, cqe_slot);
+ break;
+ }
+
+ last = swq->next_idx;
+ }
+ return cqe_sq_cons;
+}
+
static int bnxt_qplib_cq_process_req(struct bnxt_qplib_cq *cq,
struct cq_req *hwcqe,
struct bnxt_qplib_cqe **pcqe, int *budget,
@@ -2478,9 +2515,10 @@ static int bnxt_qplib_cq_process_req(struct bnxt_qplib_cq *cq,
{
struct bnxt_qplib_swq *swq;
struct bnxt_qplib_cqe *cqe;
+ u32 cqe_sq_cons, slot_num;
struct bnxt_qplib_qp *qp;
struct bnxt_qplib_q *sq;
- u32 cqe_sq_cons;
+ int cqe_cons;
int rc = 0;
qp = (struct bnxt_qplib_qp *)((unsigned long)
@@ -2492,12 +2530,26 @@ static int bnxt_qplib_cq_process_req(struct bnxt_qplib_cq *cq,
}
sq = &qp->sq;
- cqe_sq_cons = le16_to_cpu(hwcqe->sq_cons_idx) % sq->max_wqe;
+ cqe_sq_cons = le16_to_cpu(hwcqe->sq_cons_idx) % sq->max_sw_wqe;
if (qp->sq.flushed) {
dev_dbg(&cq->hwq.pdev->dev,
"%s: QP in Flush QP = %p\n", __func__, qp);
goto done;
}
+
+ if (__is_err_cqe_for_var_wqe(qp, hwcqe->status)) {
+ slot_num = le16_to_cpu(hwcqe->sq_cons_idx);
+ cqe_cons = bnxt_qplib_get_cqe_sq_cons(sq, slot_num);
+ if (cqe_cons < 0) {
+ dev_err(&cq->hwq.pdev->dev, "%s: Wrong SQ cons cqe_slot_indx = %d\n",
+ __func__, slot_num);
+ goto done;
+ }
+ cqe_sq_cons = cqe_cons;
+ dev_err(&cq->hwq.pdev->dev, "%s: cqe_sq_cons = %d swq_last = %d swq_start = %d\n",
+ __func__, cqe_sq_cons, sq->swq_last, sq->swq_start);
+ }
+
/* Require to walk the sq's swq to fabricate CQEs for all previously
* signaled SWQEs due to CQE aggregation from the current sq cons
* to the cqe_sq_cons
@@ -2882,7 +2934,7 @@ static int bnxt_qplib_cq_process_terminal(struct bnxt_qplib_cq *cq,
cqe_cons = le16_to_cpu(hwcqe->sq_cons_idx);
if (cqe_cons == 0xFFFF)
goto do_rq;
- cqe_cons %= sq->max_wqe;
+ cqe_cons %= sq->max_sw_wqe;
if (qp->sq.flushed) {
dev_dbg(&cq->hwq.pdev->dev,
diff --git a/drivers/infiniband/hw/bnxt_re/qplib_fp.h b/drivers/infiniband/hw/bnxt_re/qplib_fp.h
index 56538b90d6c5..b62df8701950 100644
--- a/drivers/infiniband/hw/bnxt_re/qplib_fp.h
+++ b/drivers/infiniband/hw/bnxt_re/qplib_fp.h
@@ -105,6 +105,7 @@ struct bnxt_qplib_srq {
struct bnxt_qplib_sg_info sg_info;
u16 eventq_hw_ring_id;
spinlock_t lock; /* protect SRQE link list */
+ u8 toggle;
};
struct bnxt_qplib_sge {
@@ -251,6 +252,7 @@ struct bnxt_qplib_q {
struct bnxt_qplib_db_info dbinfo;
struct bnxt_qplib_sg_info sg_info;
u32 max_wqe;
+ u32 max_sw_wqe;
u16 wqe_size;
u16 q_full_delta;
u16 max_sge;
@@ -586,15 +588,22 @@ static inline void bnxt_qplib_swq_mod_start(struct bnxt_qplib_q *que, u32 idx)
que->swq_start = que->swq[idx].next_idx;
}
-static inline u32 bnxt_qplib_get_depth(struct bnxt_qplib_q *que)
+static inline u32 bnxt_qplib_get_depth(struct bnxt_qplib_q *que, u8 wqe_mode, bool is_sq)
{
- return (que->wqe_size * que->max_wqe) / sizeof(struct sq_sge);
+ u32 slots;
+
+ /* Queue depth is the number of slots. */
+ slots = (que->wqe_size * que->max_wqe) / sizeof(struct sq_sge);
+ /* For variable WQE mode, need to align the slots to 256 */
+ if (wqe_mode == BNXT_QPLIB_WQE_MODE_VARIABLE && is_sq)
+ slots = ALIGN(slots, BNXT_VAR_MAX_SLOT_ALIGN);
+ return slots;
}
static inline u32 bnxt_qplib_set_sq_size(struct bnxt_qplib_q *que, u8 wqe_mode)
{
return (wqe_mode == BNXT_QPLIB_WQE_MODE_STATIC) ?
- que->max_wqe : bnxt_qplib_get_depth(que);
+ que->max_wqe : bnxt_qplib_get_depth(que, wqe_mode, true);
}
static inline u32 bnxt_qplib_set_sq_max_slot(u8 wqe_mode)
@@ -641,4 +650,14 @@ static inline __le64 bnxt_re_update_msn_tbl(u32 st_idx, u32 npsn, u32 start_psn)
(((start_psn) << SQ_MSN_SEARCH_START_PSN_SFT) &
SQ_MSN_SEARCH_START_PSN_MASK));
}
+
+static inline bool __is_var_wqe(struct bnxt_qplib_qp *qp)
+{
+ return (qp->wqe_mode == BNXT_QPLIB_WQE_MODE_VARIABLE);
+}
+
+static inline bool __is_err_cqe_for_var_wqe(struct bnxt_qplib_qp *qp, u8 status)
+{
+ return (status != CQ_REQ_STATUS_OK) && __is_var_wqe(qp);
+}
#endif /* __BNXT_QPLIB_FP_H__ */
diff --git a/drivers/infiniband/hw/bnxt_re/qplib_res.h b/drivers/infiniband/hw/bnxt_re/qplib_res.h
index a0f78cde314f..c2f710364e0f 100644
--- a/drivers/infiniband/hw/bnxt_re/qplib_res.h
+++ b/drivers/infiniband/hw/bnxt_re/qplib_res.h
@@ -82,6 +82,7 @@ struct bnxt_qplib_db_pacing_data {
u32 fifo_room_mask;
u32 fifo_room_shift;
u32 grc_reg_offset;
+ u32 dev_err_state;
};
#define BNXT_QPLIB_DBR_PF_DB_OFFSET 0x10000
@@ -565,4 +566,14 @@ static inline u8 bnxt_qplib_dbr_pacing_en(struct bnxt_qplib_chip_ctx *cctx)
return cctx->modes.dbr_pacing;
}
+static inline bool _is_alloc_mr_unified(u16 dev_cap_flags)
+{
+ return dev_cap_flags & CREQ_QUERY_FUNC_RESP_SB_MR_REGISTER_ALLOC;
+}
+
+static inline bool _is_relaxed_ordering_supported(u16 dev_cap_ext_flags2)
+{
+ return dev_cap_ext_flags2 & CREQ_QUERY_FUNC_RESP_SB_MEMORY_REGION_RO_SUPPORTED;
+}
+
#endif /* __BNXT_QPLIB_RES_H__ */
diff --git a/drivers/infiniband/hw/bnxt_re/qplib_sp.c b/drivers/infiniband/hw/bnxt_re/qplib_sp.c
index 9328db92fa6d..4f75e7e5bcf7 100644
--- a/drivers/infiniband/hw/bnxt_re/qplib_sp.c
+++ b/drivers/infiniband/hw/bnxt_re/qplib_sp.c
@@ -95,11 +95,13 @@ int bnxt_qplib_get_dev_attr(struct bnxt_qplib_rcfw *rcfw,
struct bnxt_qplib_cmdqmsg msg = {};
struct creq_query_func_resp_sb *sb;
struct bnxt_qplib_rcfw_sbuf sbuf;
+ struct bnxt_qplib_chip_ctx *cctx;
struct cmdq_query_func req = {};
u8 *tqm_alloc;
int i, rc;
u32 temp;
+ cctx = rcfw->res->cctx;
bnxt_qplib_rcfw_cmd_prep((struct cmdq_base *)&req,
CMDQ_BASE_OPCODE_QUERY_FUNC,
sizeof(req));
@@ -133,8 +135,9 @@ int bnxt_qplib_get_dev_attr(struct bnxt_qplib_rcfw *rcfw,
* reporting the max number
*/
attr->max_qp_wqes -= BNXT_QPLIB_RESERVED_QP_WRS + 1;
- attr->max_qp_sges = bnxt_qplib_is_chip_gen_p5_p7(rcfw->res->cctx) ?
- 6 : sb->max_sge;
+
+ attr->max_qp_sges = cctx->modes.wqe_mode == BNXT_QPLIB_WQE_MODE_VARIABLE ?
+ min_t(u32, sb->max_sge_var_wqe, BNXT_VAR_MAX_SGE) : 6;
attr->max_cq = le32_to_cpu(sb->max_cq);
attr->max_cq_wqes = le32_to_cpu(sb->max_cqe);
attr->max_cq_sges = attr->max_qp_sges;
@@ -541,7 +544,7 @@ int bnxt_qplib_alloc_mrw(struct bnxt_qplib_res *res, struct bnxt_qplib_mrw *mrw)
req.pd_id = cpu_to_le32(mrw->pd->id);
req.mrw_flags = mrw->type;
if ((mrw->type == CMDQ_ALLOCATE_MRW_MRW_FLAGS_PMR &&
- mrw->flags & BNXT_QPLIB_FR_PMR) ||
+ mrw->access_flags & BNXT_QPLIB_FR_PMR) ||
mrw->type == CMDQ_ALLOCATE_MRW_MRW_FLAGS_MW_TYPE2A ||
mrw->type == CMDQ_ALLOCATE_MRW_MRW_FLAGS_MW_TYPE2B)
req.access = CMDQ_ALLOCATE_MRW_ACCESS_CONSUMER_OWNED_KEY;
@@ -653,9 +656,12 @@ int bnxt_qplib_reg_mr(struct bnxt_qplib_res *res, struct bnxt_qplib_mrw *mr,
req.log2_pbl_pg_size = cpu_to_le16(((ilog2(PAGE_SIZE) <<
CMDQ_REGISTER_MR_LOG2_PBL_PG_SIZE_SFT) &
CMDQ_REGISTER_MR_LOG2_PBL_PG_SIZE_MASK));
- req.access = (mr->flags & 0xFFFF);
+ req.access = (mr->access_flags & 0xFFFF);
req.va = cpu_to_le64(mr->va);
req.key = cpu_to_le32(mr->lkey);
+ if (_is_alloc_mr_unified(res->dattr->dev_cap_flags))
+ req.key = cpu_to_le32(mr->pd->id);
+ req.flags = cpu_to_le16(mr->flags);
req.mr_size = cpu_to_le64(mr->total_size);
bnxt_qplib_fill_cmdqmsg(&msg, &req, &resp, NULL, sizeof(req),
@@ -664,6 +670,11 @@ int bnxt_qplib_reg_mr(struct bnxt_qplib_res *res, struct bnxt_qplib_mrw *mr,
if (rc)
goto fail;
+ if (_is_alloc_mr_unified(res->dattr->dev_cap_flags)) {
+ mr->lkey = le32_to_cpu(resp.xid);
+ mr->rkey = mr->lkey;
+ }
+
return 0;
fail:
diff --git a/drivers/infiniband/hw/bnxt_re/qplib_sp.h b/drivers/infiniband/hw/bnxt_re/qplib_sp.h
index 16a67d70a6fc..acd9c14a31c4 100644
--- a/drivers/infiniband/hw/bnxt_re/qplib_sp.h
+++ b/drivers/infiniband/hw/bnxt_re/qplib_sp.h
@@ -40,6 +40,7 @@
#ifndef __BNXT_QPLIB_SP_H__
#define __BNXT_QPLIB_SP_H__
+#include <rdma/bnxt_re-abi.h>
#define BNXT_QPLIB_RESERVED_QP_WRS 128
struct bnxt_qplib_dev_attr {
@@ -108,7 +109,7 @@ struct bnxt_qplib_ah {
struct bnxt_qplib_mrw {
struct bnxt_qplib_pd *pd;
int type;
- u32 flags;
+ u32 access_flags;
#define BNXT_QPLIB_FR_PMR 0x80000000
u32 lkey;
u32 rkey;
@@ -116,6 +117,7 @@ struct bnxt_qplib_mrw {
u64 va;
u64 total_size;
u32 npages;
+ u16 flags;
u64 mr_handle;
struct bnxt_qplib_hwq hwq;
};
@@ -351,4 +353,11 @@ int bnxt_qplib_qext_stat(struct bnxt_qplib_rcfw *rcfw, u32 fid,
int bnxt_qplib_modify_cc(struct bnxt_qplib_res *res,
struct bnxt_qplib_cc_param *cc_param);
+#define BNXT_VAR_MAX_WQE 4352
+#define BNXT_VAR_MAX_SLOT_ALIGN 256
+#define BNXT_VAR_MAX_SGE 13
+#define BNXT_RE_MAX_RQ_WQES 65536
+
+#define BNXT_STATIC_MAX_SGE 6
+
#endif /* __BNXT_QPLIB_SP_H__*/
diff --git a/drivers/infiniband/hw/bnxt_re/roce_hsi.h b/drivers/infiniband/hw/bnxt_re/roce_hsi.h
index 042530969505..3ec895284e49 100644
--- a/drivers/infiniband/hw/bnxt_re/roce_hsi.h
+++ b/drivers/infiniband/hw/bnxt_re/roce_hsi.h
@@ -409,7 +409,7 @@ struct creq_deinitialize_fw_resp {
u8 reserved48[6];
};
-/* cmdq_create_qp (size:768b/96B) */
+/* cmdq_create_qp (size:832b/104B) */
struct cmdq_create_qp {
u8 opcode;
#define CMDQ_CREATE_QP_OPCODE_CREATE_QP 0x1UL
@@ -430,8 +430,11 @@ struct cmdq_create_qp {
#define CMDQ_CREATE_QP_QP_FLAGS_OPTIMIZED_TRANSMIT_ENABLED 0x20UL
#define CMDQ_CREATE_QP_QP_FLAGS_RESPONDER_UD_CQE_WITH_CFA 0x40UL
#define CMDQ_CREATE_QP_QP_FLAGS_EXT_STATS_ENABLED 0x80UL
+ #define CMDQ_CREATE_QP_QP_FLAGS_EXPRESS_MODE_ENABLED 0x100UL
+ #define CMDQ_CREATE_QP_QP_FLAGS_STEERING_TAG_VALID 0x200UL
+ #define CMDQ_CREATE_QP_QP_FLAGS_RDMA_READ_OR_ATOMICS_USED 0x400UL
#define CMDQ_CREATE_QP_QP_FLAGS_LAST \
- CMDQ_CREATE_QP_QP_FLAGS_EXT_STATS_ENABLED
+ CMDQ_CREATE_QP_QP_FLAGS_RDMA_READ_OR_ATOMICS_USED
u8 type;
#define CMDQ_CREATE_QP_TYPE_RC 0x2UL
#define CMDQ_CREATE_QP_TYPE_UD 0x4UL
@@ -492,6 +495,9 @@ struct cmdq_create_qp {
__le64 rq_pbl;
__le64 irrq_addr;
__le64 orrq_addr;
+ __le32 request_xid;
+ __le16 steering_tag;
+ __le16 reserved16;
};
/* creq_create_qp_resp (size:128b/16B) */
@@ -972,13 +978,14 @@ struct creq_query_qp_extend_resp_sb_tlv {
__le16 reserved_16;
};
-/* cmdq_create_srq (size:384b/48B) */
+/* cmdq_create_srq (size:448b/56B) */
struct cmdq_create_srq {
u8 opcode;
#define CMDQ_CREATE_SRQ_OPCODE_CREATE_SRQ 0x5UL
#define CMDQ_CREATE_SRQ_OPCODE_LAST CMDQ_CREATE_SRQ_OPCODE_CREATE_SRQ
u8 cmd_size;
__le16 flags;
+ #define CMDQ_CREATE_SRQ_FLAGS_STEERING_TAG_VALID 0x1UL
__le16 cookie;
u8 resp_size;
u8 reserved8;
@@ -1012,6 +1019,8 @@ struct cmdq_create_srq {
__le32 dpi;
__le32 pd_id;
__le64 pbl;
+ __le16 steering_tag;
+ u8 reserved48[6];
};
/* creq_create_srq_resp (size:128b/16B) */
@@ -1118,7 +1127,7 @@ struct creq_query_srq_resp_sb {
__le32 data[4];
};
-/* cmdq_create_cq (size:384b/48B) */
+/* cmdq_create_cq (size:448b/56B) */
struct cmdq_create_cq {
u8 opcode;
#define CMDQ_CREATE_CQ_OPCODE_CREATE_CQ 0x9UL
@@ -1126,6 +1135,8 @@ struct cmdq_create_cq {
u8 cmd_size;
__le16 flags;
#define CMDQ_CREATE_CQ_FLAGS_DISABLE_CQ_OVERFLOW_DETECTION 0x1UL
+ #define CMDQ_CREATE_CQ_FLAGS_STEERING_TAG_VALID 0x2UL
+ #define CMDQ_CREATE_CQ_FLAGS_INFINITE_CQ_MODE 0x4UL
__le16 cookie;
u8 resp_size;
u8 reserved8;
@@ -1157,6 +1168,8 @@ struct cmdq_create_cq {
__le32 dpi;
__le32 cq_size;
__le64 pbl;
+ __le16 steering_tag;
+ u8 reserved48[6];
};
/* creq_create_cq_resp (size:128b/16B) */
@@ -1288,11 +1301,12 @@ struct cmdq_allocate_mrw {
#define CMDQ_ALLOCATE_MRW_MRW_FLAGS_MW_TYPE2A 0x3UL
#define CMDQ_ALLOCATE_MRW_MRW_FLAGS_MW_TYPE2B 0x4UL
#define CMDQ_ALLOCATE_MRW_MRW_FLAGS_LAST CMDQ_ALLOCATE_MRW_MRW_FLAGS_MW_TYPE2B
- #define CMDQ_ALLOCATE_MRW_UNUSED4_MASK 0xf0UL
- #define CMDQ_ALLOCATE_MRW_UNUSED4_SFT 4
+ #define CMDQ_ALLOCATE_MRW_STEERING_TAG_VALID 0x10UL
+ #define CMDQ_ALLOCATE_MRW_UNUSED4_MASK 0xe0UL
+ #define CMDQ_ALLOCATE_MRW_UNUSED4_SFT 5
u8 access;
#define CMDQ_ALLOCATE_MRW_ACCESS_CONSUMER_OWNED_KEY 0x20UL
- __le16 unused16;
+ __le16 steering_tag;
__le32 pd_id;
};
@@ -1359,14 +1373,16 @@ struct creq_deallocate_key_resp {
__le32 bound_window_info;
};
-/* cmdq_register_mr (size:384b/48B) */
+/* cmdq_register_mr (size:448b/56B) */
struct cmdq_register_mr {
u8 opcode;
#define CMDQ_REGISTER_MR_OPCODE_REGISTER_MR 0xfUL
#define CMDQ_REGISTER_MR_OPCODE_LAST CMDQ_REGISTER_MR_OPCODE_REGISTER_MR
u8 cmd_size;
__le16 flags;
- #define CMDQ_REGISTER_MR_FLAGS_ALLOC_MR 0x1UL
+ #define CMDQ_REGISTER_MR_FLAGS_ALLOC_MR 0x1UL
+ #define CMDQ_REGISTER_MR_FLAGS_STEERING_TAG_VALID 0x2UL
+ #define CMDQ_REGISTER_MR_FLAGS_ENABLE_RO 0x4UL
__le16 cookie;
u8 resp_size;
u8 reserved8;
@@ -1415,6 +1431,8 @@ struct cmdq_register_mr {
__le64 pbl;
__le64 va;
__le64 mr_size;
+ __le16 steering_tag;
+ u8 reserved48[6];
};
/* creq_register_mr_resp (size:128b/16B) */
diff --git a/drivers/infiniband/hw/cxgb4/cm.c b/drivers/infiniband/hw/cxgb4/cm.c
index 040ba2224f9f..b3757c6a0457 100644
--- a/drivers/infiniband/hw/cxgb4/cm.c
+++ b/drivers/infiniband/hw/cxgb4/cm.c
@@ -1222,6 +1222,8 @@ static int act_establish(struct c4iw_dev *dev, struct sk_buff *skb)
int ret;
ep = lookup_atid(t, atid);
+ if (!ep)
+ return -EINVAL;
pr_debug("ep %p tid %u snd_isn %u rcv_isn %u\n", ep, tid,
be32_to_cpu(req->snd_isn), be32_to_cpu(req->rcv_isn));
@@ -2279,6 +2281,9 @@ static int act_open_rpl(struct c4iw_dev *dev, struct sk_buff *skb)
int ret = 0;
ep = lookup_atid(t, atid);
+ if (!ep)
+ return -EINVAL;
+
la = (struct sockaddr_in *)&ep->com.local_addr;
ra = (struct sockaddr_in *)&ep->com.remote_addr;
la6 = (struct sockaddr_in6 *)&ep->com.local_addr;
diff --git a/drivers/infiniband/hw/cxgb4/cq.c b/drivers/infiniband/hw/cxgb4/cq.c
index 5111421f9473..14ced7b667fa 100644
--- a/drivers/infiniband/hw/cxgb4/cq.c
+++ b/drivers/infiniband/hw/cxgb4/cq.c
@@ -1126,13 +1126,19 @@ int c4iw_create_cq(struct ib_cq *ibcq, const struct ib_cq_init_attr *attr,
goto err_free_mm2;
mm->key = uresp.key;
- mm->addr = virt_to_phys(chp->cq.queue);
+ mm->addr = 0;
+ mm->vaddr = chp->cq.queue;
+ mm->dma_addr = chp->cq.dma_addr;
mm->len = chp->cq.memsize;
+ insert_flag_to_mmap(&rhp->rdev, mm, mm->addr);
insert_mmap(ucontext, mm);
mm2->key = uresp.gts_key;
mm2->addr = chp->cq.bar2_pa;
mm2->len = PAGE_SIZE;
+ mm2->vaddr = NULL;
+ mm2->dma_addr = 0;
+ insert_flag_to_mmap(&rhp->rdev, mm2, mm2->addr);
insert_mmap(ucontext, mm2);
}
diff --git a/drivers/infiniband/hw/cxgb4/iw_cxgb4.h b/drivers/infiniband/hw/cxgb4/iw_cxgb4.h
index f838bb6718af..5b3007acaa1f 100644
--- a/drivers/infiniband/hw/cxgb4/iw_cxgb4.h
+++ b/drivers/infiniband/hw/cxgb4/iw_cxgb4.h
@@ -532,11 +532,21 @@ static inline struct c4iw_ucontext *to_c4iw_ucontext(struct ib_ucontext *c)
return container_of(c, struct c4iw_ucontext, ibucontext);
}
+enum {
+ CXGB4_MMAP_BAR,
+ CXGB4_MMAP_BAR_WC,
+ CXGB4_MMAP_CONTIG,
+ CXGB4_MMAP_NON_CONTIG,
+};
+
struct c4iw_mm_entry {
struct list_head entry;
u64 addr;
u32 key;
+ void *vaddr;
+ dma_addr_t dma_addr;
unsigned len;
+ u8 mmap_flag;
};
static inline struct c4iw_mm_entry *remove_mmap(struct c4iw_ucontext *ucontext,
@@ -561,6 +571,32 @@ static inline struct c4iw_mm_entry *remove_mmap(struct c4iw_ucontext *ucontext,
return NULL;
}
+static inline void insert_flag_to_mmap(struct c4iw_rdev *rdev,
+ struct c4iw_mm_entry *mm, u64 addr)
+{
+ if (addr >= pci_resource_start(rdev->lldi.pdev, 0) &&
+ (addr < (pci_resource_start(rdev->lldi.pdev, 0) +
+ pci_resource_len(rdev->lldi.pdev, 0))))
+ mm->mmap_flag = CXGB4_MMAP_BAR;
+ else if (addr >= pci_resource_start(rdev->lldi.pdev, 2) &&
+ (addr < (pci_resource_start(rdev->lldi.pdev, 2) +
+ pci_resource_len(rdev->lldi.pdev, 2)))) {
+ if (addr >= rdev->oc_mw_pa) {
+ mm->mmap_flag = CXGB4_MMAP_BAR_WC;
+ } else {
+ if (is_t4(rdev->lldi.adapter_type))
+ mm->mmap_flag = CXGB4_MMAP_BAR;
+ else
+ mm->mmap_flag = CXGB4_MMAP_BAR_WC;
+ }
+ } else {
+ if (addr)
+ mm->mmap_flag = CXGB4_MMAP_CONTIG;
+ else
+ mm->mmap_flag = CXGB4_MMAP_NON_CONTIG;
+ }
+}
+
static inline void insert_mmap(struct c4iw_ucontext *ucontext,
struct c4iw_mm_entry *mm)
{
@@ -936,7 +972,6 @@ u32 c4iw_get_resource(struct c4iw_id_table *id_table);
void c4iw_put_resource(struct c4iw_id_table *id_table, u32 entry);
int c4iw_init_resource(struct c4iw_rdev *rdev, u32 nr_tpt,
u32 nr_pdid, u32 nr_srqt);
-int c4iw_init_ctrl_qp(struct c4iw_rdev *rdev);
int c4iw_pblpool_create(struct c4iw_rdev *rdev);
int c4iw_rqtpool_create(struct c4iw_rdev *rdev);
int c4iw_ocqp_pool_create(struct c4iw_rdev *rdev);
@@ -944,7 +979,6 @@ void c4iw_pblpool_destroy(struct c4iw_rdev *rdev);
void c4iw_rqtpool_destroy(struct c4iw_rdev *rdev);
void c4iw_ocqp_pool_destroy(struct c4iw_rdev *rdev);
void c4iw_destroy_resource(struct c4iw_resource *rscp);
-int c4iw_destroy_ctrl_qp(struct c4iw_rdev *rdev);
void c4iw_register_device(struct work_struct *work);
void c4iw_unregister_device(struct c4iw_dev *dev);
int __init c4iw_cm_init(void);
@@ -1006,8 +1040,6 @@ int c4iw_ep_disconnect(struct c4iw_ep *ep, int abrupt, gfp_t gfp);
int c4iw_flush_rq(struct t4_wq *wq, struct t4_cq *cq, int count);
int c4iw_flush_sq(struct c4iw_qp *qhp);
int c4iw_ev_handler(struct c4iw_dev *rnicp, u32 qid);
-u16 c4iw_rqes_posted(struct c4iw_qp *qhp);
-int c4iw_post_terminate(struct c4iw_qp *qhp, struct t4_cqe *err_cqe);
u32 c4iw_get_cqid(struct c4iw_rdev *rdev, struct c4iw_dev_ucontext *uctx);
void c4iw_put_cqid(struct c4iw_rdev *rdev, u32 qid,
struct c4iw_dev_ucontext *uctx);
diff --git a/drivers/infiniband/hw/cxgb4/provider.c b/drivers/infiniband/hw/cxgb4/provider.c
index 246b739ddb2b..10a4c738b59f 100644
--- a/drivers/infiniband/hw/cxgb4/provider.c
+++ b/drivers/infiniband/hw/cxgb4/provider.c
@@ -113,6 +113,9 @@ static int c4iw_alloc_ucontext(struct ib_ucontext *ucontext,
mm->key = uresp.status_page_key;
mm->addr = virt_to_phys(rhp->rdev.status_page);
mm->len = PAGE_SIZE;
+ mm->vaddr = NULL;
+ mm->dma_addr = 0;
+ insert_flag_to_mmap(&rhp->rdev, mm, mm->addr);
insert_mmap(context, mm);
}
return 0;
@@ -131,6 +134,11 @@ static int c4iw_mmap(struct ib_ucontext *context, struct vm_area_struct *vma)
struct c4iw_mm_entry *mm;
struct c4iw_ucontext *ucontext;
u64 addr;
+ u8 mmap_flag;
+ size_t size;
+ void *vaddr;
+ unsigned long vm_pgoff;
+ dma_addr_t dma_addr;
pr_debug("pgoff 0x%lx key 0x%x len %d\n", vma->vm_pgoff,
key, len);
@@ -145,47 +153,38 @@ static int c4iw_mmap(struct ib_ucontext *context, struct vm_area_struct *vma)
if (!mm)
return -EINVAL;
addr = mm->addr;
+ vaddr = mm->vaddr;
+ dma_addr = mm->dma_addr;
+ size = mm->len;
+ mmap_flag = mm->mmap_flag;
kfree(mm);
- if ((addr >= pci_resource_start(rdev->lldi.pdev, 0)) &&
- (addr < (pci_resource_start(rdev->lldi.pdev, 0) +
- pci_resource_len(rdev->lldi.pdev, 0)))) {
-
- /*
- * MA_SYNC register...
- */
- vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
+ switch (mmap_flag) {
+ case CXGB4_MMAP_BAR:
+ ret = io_remap_pfn_range(vma, vma->vm_start, addr >> PAGE_SHIFT,
+ len,
+ pgprot_noncached(vma->vm_page_prot));
+ break;
+ case CXGB4_MMAP_BAR_WC:
ret = io_remap_pfn_range(vma, vma->vm_start,
addr >> PAGE_SHIFT,
- len, vma->vm_page_prot);
- } else if ((addr >= pci_resource_start(rdev->lldi.pdev, 2)) &&
- (addr < (pci_resource_start(rdev->lldi.pdev, 2) +
- pci_resource_len(rdev->lldi.pdev, 2)))) {
-
- /*
- * Map user DB or OCQP memory...
- */
- if (addr >= rdev->oc_mw_pa)
- vma->vm_page_prot = t4_pgprot_wc(vma->vm_page_prot);
- else {
- if (!is_t4(rdev->lldi.adapter_type))
- vma->vm_page_prot =
- t4_pgprot_wc(vma->vm_page_prot);
- else
- vma->vm_page_prot =
- pgprot_noncached(vma->vm_page_prot);
- }
+ len, t4_pgprot_wc(vma->vm_page_prot));
+ break;
+ case CXGB4_MMAP_CONTIG:
ret = io_remap_pfn_range(vma, vma->vm_start,
addr >> PAGE_SHIFT,
len, vma->vm_page_prot);
- } else {
-
- /*
- * Map WQ or CQ contig dma memory...
- */
- ret = remap_pfn_range(vma, vma->vm_start,
- addr >> PAGE_SHIFT,
- len, vma->vm_page_prot);
+ break;
+ case CXGB4_MMAP_NON_CONTIG:
+ vm_pgoff = vma->vm_pgoff;
+ vma->vm_pgoff = 0;
+ ret = dma_mmap_coherent(&rdev->lldi.pdev->dev, vma,
+ vaddr, dma_addr, size);
+ vma->vm_pgoff = vm_pgoff;
+ break;
+ default:
+ ret = -EINVAL;
+ break;
}
return ret;
diff --git a/drivers/infiniband/hw/cxgb4/qp.c b/drivers/infiniband/hw/cxgb4/qp.c
index d16d8eaa1415..7b5c4522b426 100644
--- a/drivers/infiniband/hw/cxgb4/qp.c
+++ b/drivers/infiniband/hw/cxgb4/qp.c
@@ -2281,24 +2281,39 @@ int c4iw_create_qp(struct ib_qp *qp, struct ib_qp_init_attr *attrs,
if (ret)
goto err_free_ma_sync_key;
sq_key_mm->key = uresp.sq_key;
- sq_key_mm->addr = qhp->wq.sq.phys_addr;
+ sq_key_mm->addr = 0;
+ sq_key_mm->vaddr = qhp->wq.sq.queue;
+ sq_key_mm->dma_addr = qhp->wq.sq.dma_addr;
sq_key_mm->len = PAGE_ALIGN(qhp->wq.sq.memsize);
+ insert_flag_to_mmap(&rhp->rdev, sq_key_mm, sq_key_mm->addr);
insert_mmap(ucontext, sq_key_mm);
if (!attrs->srq) {
rq_key_mm->key = uresp.rq_key;
- rq_key_mm->addr = virt_to_phys(qhp->wq.rq.queue);
+ rq_key_mm->addr = 0;
+ rq_key_mm->vaddr = qhp->wq.rq.queue;
+ rq_key_mm->dma_addr = qhp->wq.rq.dma_addr;
rq_key_mm->len = PAGE_ALIGN(qhp->wq.rq.memsize);
+ insert_flag_to_mmap(&rhp->rdev, rq_key_mm,
+ rq_key_mm->addr);
insert_mmap(ucontext, rq_key_mm);
}
sq_db_key_mm->key = uresp.sq_db_gts_key;
sq_db_key_mm->addr = (u64)(unsigned long)qhp->wq.sq.bar2_pa;
+ sq_db_key_mm->vaddr = NULL;
+ sq_db_key_mm->dma_addr = 0;
sq_db_key_mm->len = PAGE_SIZE;
+ insert_flag_to_mmap(&rhp->rdev, sq_db_key_mm,
+ sq_db_key_mm->addr);
insert_mmap(ucontext, sq_db_key_mm);
if (!attrs->srq) {
rq_db_key_mm->key = uresp.rq_db_gts_key;
rq_db_key_mm->addr =
(u64)(unsigned long)qhp->wq.rq.bar2_pa;
rq_db_key_mm->len = PAGE_SIZE;
+ rq_db_key_mm->vaddr = NULL;
+ rq_db_key_mm->dma_addr = 0;
+ insert_flag_to_mmap(&rhp->rdev, rq_db_key_mm,
+ rq_db_key_mm->addr);
insert_mmap(ucontext, rq_db_key_mm);
}
if (ma_sync_key_mm) {
@@ -2307,6 +2322,10 @@ int c4iw_create_qp(struct ib_qp *qp, struct ib_qp_init_attr *attrs,
(pci_resource_start(rhp->rdev.lldi.pdev, 0) +
PCIE_MA_SYNC_A) & PAGE_MASK;
ma_sync_key_mm->len = PAGE_SIZE;
+ ma_sync_key_mm->vaddr = NULL;
+ ma_sync_key_mm->dma_addr = 0;
+ insert_flag_to_mmap(&rhp->rdev, ma_sync_key_mm,
+ ma_sync_key_mm->addr);
insert_mmap(ucontext, ma_sync_key_mm);
}
@@ -2761,12 +2780,19 @@ int c4iw_create_srq(struct ib_srq *ib_srq, struct ib_srq_init_attr *attrs,
if (ret)
goto err_free_srq_db_key_mm;
srq_key_mm->key = uresp.srq_key;
- srq_key_mm->addr = virt_to_phys(srq->wq.queue);
+ srq_key_mm->addr = 0;
srq_key_mm->len = PAGE_ALIGN(srq->wq.memsize);
+ srq_key_mm->vaddr = srq->wq.queue;
+ srq_key_mm->dma_addr = srq->wq.dma_addr;
+ insert_flag_to_mmap(&rhp->rdev, srq_key_mm, srq_key_mm->addr);
insert_mmap(ucontext, srq_key_mm);
srq_db_key_mm->key = uresp.srq_db_gts_key;
srq_db_key_mm->addr = (u64)(unsigned long)srq->wq.bar2_pa;
srq_db_key_mm->len = PAGE_SIZE;
+ srq_db_key_mm->vaddr = NULL;
+ srq_db_key_mm->dma_addr = 0;
+ insert_flag_to_mmap(&rhp->rdev, srq_db_key_mm,
+ srq_db_key_mm->addr);
insert_mmap(ucontext, srq_db_key_mm);
}
diff --git a/drivers/infiniband/hw/efa/efa.h b/drivers/infiniband/hw/efa/efa.h
index e580e087e9da..d7fc9d5eeefd 100644
--- a/drivers/infiniband/hw/efa/efa.h
+++ b/drivers/infiniband/hw/efa/efa.h
@@ -168,7 +168,7 @@ struct ib_mr *efa_reg_mr(struct ib_pd *ibpd, u64 start, u64 length,
struct ib_mr *efa_reg_user_mr_dmabuf(struct ib_pd *ibpd, u64 start,
u64 length, u64 virt_addr,
int fd, int access_flags,
- struct ib_udata *udata);
+ struct uverbs_attr_bundle *attrs);
int efa_dereg_mr(struct ib_mr *ibmr, struct ib_udata *udata);
int efa_get_port_immutable(struct ib_device *ibdev, u32 port_num,
struct ib_port_immutable *immutable);
diff --git a/drivers/infiniband/hw/efa/efa_admin_cmds_defs.h b/drivers/infiniband/hw/efa/efa_admin_cmds_defs.h
index 4296662e59c3..cd03a5429beb 100644
--- a/drivers/infiniband/hw/efa/efa_admin_cmds_defs.h
+++ b/drivers/infiniband/hw/efa/efa_admin_cmds_defs.h
@@ -674,6 +674,9 @@ struct efa_admin_feature_device_attr_desc {
/* Max RDMA transfer size in bytes */
u32 max_rdma_size;
+
+ /* Unique global ID for an EFA device */
+ u64 guid;
};
struct efa_admin_feature_queue_attr_desc {
diff --git a/drivers/infiniband/hw/efa/efa_com_cmd.c b/drivers/infiniband/hw/efa/efa_com_cmd.c
index 5b9c2b16df0e..5a774925cdea 100644
--- a/drivers/infiniband/hw/efa/efa_com_cmd.c
+++ b/drivers/infiniband/hw/efa/efa_com_cmd.c
@@ -465,6 +465,7 @@ int efa_com_get_device_attr(struct efa_com_dev *edev,
result->db_bar = resp.u.device_attr.db_bar;
result->max_rdma_size = resp.u.device_attr.max_rdma_size;
result->device_caps = resp.u.device_attr.device_caps;
+ result->guid = resp.u.device_attr.guid;
if (result->admin_api_version < 1) {
ibdev_err_ratelimited(
diff --git a/drivers/infiniband/hw/efa/efa_com_cmd.h b/drivers/infiniband/hw/efa/efa_com_cmd.h
index 9714105fcf7e..668d033f7477 100644
--- a/drivers/infiniband/hw/efa/efa_com_cmd.h
+++ b/drivers/infiniband/hw/efa/efa_com_cmd.h
@@ -112,6 +112,7 @@ struct efa_com_get_device_attr_result {
u8 addr[EFA_GID_SIZE];
u64 page_size_cap;
u64 max_mr_pages;
+ u64 guid;
u32 mtu;
u32 fw_version;
u32 admin_api_version;
diff --git a/drivers/infiniband/hw/efa/efa_main.c b/drivers/infiniband/hw/efa/efa_main.c
index 1a777791bea3..ad225823e6f2 100644
--- a/drivers/infiniband/hw/efa/efa_main.c
+++ b/drivers/infiniband/hw/efa/efa_main.c
@@ -441,6 +441,7 @@ static int efa_ib_device_add(struct efa_dev *dev)
efa_set_host_info(dev);
dev->ibdev.node_type = RDMA_NODE_UNSPECIFIED;
+ dev->ibdev.node_guid = dev->dev_attr.guid;
dev->ibdev.phys_port_cnt = 1;
dev->ibdev.num_comp_vectors = dev->neqs ?: 1;
dev->ibdev.dev.parent = &pdev->dev;
diff --git a/drivers/infiniband/hw/efa/efa_verbs.c b/drivers/infiniband/hw/efa/efa_verbs.c
index b1e0a1b7c59d..cc13415ff7e7 100644
--- a/drivers/infiniband/hw/efa/efa_verbs.c
+++ b/drivers/infiniband/hw/efa/efa_verbs.c
@@ -1684,14 +1684,14 @@ static int efa_register_mr(struct ib_pd *ibpd, struct efa_mr *mr, u64 start,
struct ib_mr *efa_reg_user_mr_dmabuf(struct ib_pd *ibpd, u64 start,
u64 length, u64 virt_addr,
int fd, int access_flags,
- struct ib_udata *udata)
+ struct uverbs_attr_bundle *attrs)
{
struct efa_dev *dev = to_edev(ibpd->device);
struct ib_umem_dmabuf *umem_dmabuf;
struct efa_mr *mr;
int err;
- mr = efa_alloc_mr(ibpd, access_flags, udata);
+ mr = efa_alloc_mr(ibpd, access_flags, &attrs->driver_udata);
if (IS_ERR(mr)) {
err = PTR_ERR(mr);
goto err_out;
diff --git a/drivers/infiniband/hw/erdma/erdma.h b/drivers/infiniband/hw/erdma/erdma.h
index c8bd698e21b0..3c166359448d 100644
--- a/drivers/infiniband/hw/erdma/erdma.h
+++ b/drivers/infiniband/hw/erdma/erdma.h
@@ -274,7 +274,8 @@ void notify_eq(struct erdma_eq *eq);
void *get_next_valid_eqe(struct erdma_eq *eq);
int erdma_aeq_init(struct erdma_dev *dev);
-void erdma_aeq_destroy(struct erdma_dev *dev);
+int erdma_eq_common_init(struct erdma_dev *dev, struct erdma_eq *eq, u32 depth);
+void erdma_eq_destroy(struct erdma_dev *dev, struct erdma_eq *eq);
void erdma_aeq_event_handler(struct erdma_dev *dev);
void erdma_ceq_completion_handler(struct erdma_eq_cb *ceq_cb);
diff --git a/drivers/infiniband/hw/erdma/erdma_cmdq.c b/drivers/infiniband/hw/erdma/erdma_cmdq.c
index 43ff40b5a09d..a3d8922d1ad1 100644
--- a/drivers/infiniband/hw/erdma/erdma_cmdq.c
+++ b/drivers/infiniband/hw/erdma/erdma_cmdq.c
@@ -158,20 +158,13 @@ static int erdma_cmdq_eq_init(struct erdma_dev *dev)
{
struct erdma_cmdq *cmdq = &dev->cmdq;
struct erdma_eq *eq = &cmdq->eq;
+ int ret;
- eq->depth = cmdq->max_outstandings;
- eq->qbuf = dma_alloc_coherent(&dev->pdev->dev, eq->depth << EQE_SHIFT,
- &eq->qbuf_dma_addr, GFP_KERNEL);
- if (!eq->qbuf)
- return -ENOMEM;
-
- spin_lock_init(&eq->lock);
- atomic64_set(&eq->event_num, 0);
+ ret = erdma_eq_common_init(dev, eq, cmdq->max_outstandings);
+ if (ret)
+ return ret;
eq->db = dev->func_bar + ERDMA_REGS_CEQ_DB_BASE_REG;
- eq->dbrec = dma_pool_zalloc(dev->db_pool, GFP_KERNEL, &eq->dbrec_dma);
- if (!eq->dbrec)
- goto err_out;
erdma_reg_write32(dev, ERDMA_REGS_CMDQ_EQ_ADDR_H_REG,
upper_32_bits(eq->qbuf_dma_addr));
@@ -181,12 +174,6 @@ static int erdma_cmdq_eq_init(struct erdma_dev *dev)
erdma_reg_write64(dev, ERDMA_CMDQ_EQ_DB_HOST_ADDR_REG, eq->dbrec_dma);
return 0;
-
-err_out:
- dma_free_coherent(&dev->pdev->dev, eq->depth << EQE_SHIFT, eq->qbuf,
- eq->qbuf_dma_addr);
-
- return -ENOMEM;
}
int erdma_cmdq_init(struct erdma_dev *dev)
@@ -247,10 +234,7 @@ void erdma_cmdq_destroy(struct erdma_dev *dev)
clear_bit(ERDMA_CMDQ_STATE_OK_BIT, &cmdq->state);
- dma_free_coherent(&dev->pdev->dev, cmdq->eq.depth << EQE_SHIFT,
- cmdq->eq.qbuf, cmdq->eq.qbuf_dma_addr);
-
- dma_pool_free(dev->db_pool, cmdq->eq.dbrec, cmdq->eq.dbrec_dma);
+ erdma_eq_destroy(dev, &cmdq->eq);
dma_free_coherent(&dev->pdev->dev, cmdq->sq.depth << SQEBB_SHIFT,
cmdq->sq.qbuf, cmdq->sq.qbuf_dma_addr);
diff --git a/drivers/infiniband/hw/erdma/erdma_eq.c b/drivers/infiniband/hw/erdma/erdma_eq.c
index 84ccdd8144c9..9a72fec6d5cc 100644
--- a/drivers/infiniband/hw/erdma/erdma_eq.c
+++ b/drivers/infiniband/hw/erdma/erdma_eq.c
@@ -80,50 +80,60 @@ void erdma_aeq_event_handler(struct erdma_dev *dev)
notify_eq(&dev->aeq);
}
-int erdma_aeq_init(struct erdma_dev *dev)
+int erdma_eq_common_init(struct erdma_dev *dev, struct erdma_eq *eq, u32 depth)
{
- struct erdma_eq *eq = &dev->aeq;
+ u32 buf_size = depth << EQE_SHIFT;
- eq->depth = ERDMA_DEFAULT_EQ_DEPTH;
-
- eq->qbuf = dma_alloc_coherent(&dev->pdev->dev, eq->depth << EQE_SHIFT,
+ eq->qbuf = dma_alloc_coherent(&dev->pdev->dev, buf_size,
&eq->qbuf_dma_addr, GFP_KERNEL);
if (!eq->qbuf)
return -ENOMEM;
- spin_lock_init(&eq->lock);
- atomic64_set(&eq->event_num, 0);
- atomic64_set(&eq->notify_num, 0);
-
- eq->db = dev->func_bar + ERDMA_REGS_AEQ_DB_REG;
eq->dbrec = dma_pool_zalloc(dev->db_pool, GFP_KERNEL, &eq->dbrec_dma);
if (!eq->dbrec)
- goto err_out;
+ goto err_free_qbuf;
- erdma_reg_write32(dev, ERDMA_REGS_AEQ_ADDR_H_REG,
- upper_32_bits(eq->qbuf_dma_addr));
- erdma_reg_write32(dev, ERDMA_REGS_AEQ_ADDR_L_REG,
- lower_32_bits(eq->qbuf_dma_addr));
- erdma_reg_write32(dev, ERDMA_REGS_AEQ_DEPTH_REG, eq->depth);
- erdma_reg_write64(dev, ERDMA_AEQ_DB_HOST_ADDR_REG, eq->dbrec_dma);
+ spin_lock_init(&eq->lock);
+ atomic64_set(&eq->event_num, 0);
+ atomic64_set(&eq->notify_num, 0);
+ eq->ci = 0;
+ eq->depth = depth;
return 0;
-err_out:
- dma_free_coherent(&dev->pdev->dev, eq->depth << EQE_SHIFT, eq->qbuf,
+err_free_qbuf:
+ dma_free_coherent(&dev->pdev->dev, buf_size, eq->qbuf,
eq->qbuf_dma_addr);
return -ENOMEM;
}
-void erdma_aeq_destroy(struct erdma_dev *dev)
+void erdma_eq_destroy(struct erdma_dev *dev, struct erdma_eq *eq)
{
- struct erdma_eq *eq = &dev->aeq;
-
+ dma_pool_free(dev->db_pool, eq->dbrec, eq->dbrec_dma);
dma_free_coherent(&dev->pdev->dev, eq->depth << EQE_SHIFT, eq->qbuf,
eq->qbuf_dma_addr);
+}
- dma_pool_free(dev->db_pool, eq->dbrec, eq->dbrec_dma);
+int erdma_aeq_init(struct erdma_dev *dev)
+{
+ struct erdma_eq *eq = &dev->aeq;
+ int ret;
+
+ ret = erdma_eq_common_init(dev, &dev->aeq, ERDMA_DEFAULT_EQ_DEPTH);
+ if (ret)
+ return ret;
+
+ eq->db = dev->func_bar + ERDMA_REGS_AEQ_DB_REG;
+
+ erdma_reg_write32(dev, ERDMA_REGS_AEQ_ADDR_H_REG,
+ upper_32_bits(eq->qbuf_dma_addr));
+ erdma_reg_write32(dev, ERDMA_REGS_AEQ_ADDR_L_REG,
+ lower_32_bits(eq->qbuf_dma_addr));
+ erdma_reg_write32(dev, ERDMA_REGS_AEQ_DEPTH_REG, eq->depth);
+ erdma_reg_write64(dev, ERDMA_AEQ_DB_HOST_ADDR_REG, eq->dbrec_dma);
+
+ return 0;
}
void erdma_ceq_completion_handler(struct erdma_eq_cb *ceq_cb)
@@ -234,32 +244,21 @@ static int erdma_ceq_init_one(struct erdma_dev *dev, u16 ceqn)
struct erdma_eq *eq = &dev->ceqs[ceqn].eq;
int ret;
- eq->depth = ERDMA_DEFAULT_EQ_DEPTH;
- eq->qbuf = dma_alloc_coherent(&dev->pdev->dev, eq->depth << EQE_SHIFT,
- &eq->qbuf_dma_addr, GFP_KERNEL);
- if (!eq->qbuf)
- return -ENOMEM;
-
- spin_lock_init(&eq->lock);
- atomic64_set(&eq->event_num, 0);
- atomic64_set(&eq->notify_num, 0);
+ ret = erdma_eq_common_init(dev, eq, ERDMA_DEFAULT_EQ_DEPTH);
+ if (ret)
+ return ret;
eq->db = dev->func_bar + ERDMA_REGS_CEQ_DB_BASE_REG +
(ceqn + 1) * ERDMA_DB_SIZE;
-
- eq->dbrec = dma_pool_zalloc(dev->db_pool, GFP_KERNEL, &eq->dbrec_dma);
- if (!eq->dbrec) {
- dma_free_coherent(&dev->pdev->dev, eq->depth << EQE_SHIFT,
- eq->qbuf, eq->qbuf_dma_addr);
- return -ENOMEM;
- }
-
- eq->ci = 0;
dev->ceqs[ceqn].dev = dev;
+ dev->ceqs[ceqn].ready = true;
/* CEQ indexed from 1, 0 rsvd for CMDQ-EQ. */
ret = create_eq_cmd(dev, ceqn + 1, eq);
- dev->ceqs[ceqn].ready = ret ? false : true;
+ if (ret) {
+ erdma_eq_destroy(dev, eq);
+ dev->ceqs[ceqn].ready = false;
+ }
return ret;
}
@@ -283,9 +282,7 @@ static void erdma_ceq_uninit_one(struct erdma_dev *dev, u16 ceqn)
if (err)
return;
- dma_free_coherent(&dev->pdev->dev, eq->depth << EQE_SHIFT, eq->qbuf,
- eq->qbuf_dma_addr);
- dma_pool_free(dev->db_pool, eq->dbrec, eq->dbrec_dma);
+ erdma_eq_destroy(dev, eq);
}
int erdma_ceqs_init(struct erdma_dev *dev)
diff --git a/drivers/infiniband/hw/erdma/erdma_main.c b/drivers/infiniband/hw/erdma/erdma_main.c
index 7080f8a71ec4..62f497a71004 100644
--- a/drivers/infiniband/hw/erdma/erdma_main.c
+++ b/drivers/infiniband/hw/erdma/erdma_main.c
@@ -333,7 +333,7 @@ err_uninit_cmdq:
erdma_cmdq_destroy(dev);
err_uninit_aeq:
- erdma_aeq_destroy(dev);
+ erdma_eq_destroy(dev, &dev->aeq);
err_uninit_comm_irq:
erdma_comm_irq_uninit(dev);
@@ -366,7 +366,7 @@ static void erdma_remove_dev(struct pci_dev *pdev)
erdma_ceqs_uninit(dev);
erdma_hw_reset(dev);
erdma_cmdq_destroy(dev);
- erdma_aeq_destroy(dev);
+ erdma_eq_destroy(dev, &dev->aeq);
erdma_comm_irq_uninit(dev);
pci_free_irq_vectors(dev->pdev);
erdma_device_uninit(dev);
@@ -490,6 +490,7 @@ static const struct ib_device_ops erdma_device_ops = {
.dereg_mr = erdma_dereg_mr,
.destroy_cq = erdma_destroy_cq,
.destroy_qp = erdma_destroy_qp,
+ .disassociate_ucontext = erdma_disassociate_ucontext,
.get_dma_mr = erdma_get_dma_mr,
.get_hw_stats = erdma_get_hw_stats,
.get_port_immutable = erdma_get_port_immutable,
diff --git a/drivers/infiniband/hw/erdma/erdma_verbs.c b/drivers/infiniband/hw/erdma/erdma_verbs.c
index d7e1cbf9f5c2..51d619edb6c5 100644
--- a/drivers/infiniband/hw/erdma/erdma_verbs.c
+++ b/drivers/infiniband/hw/erdma/erdma_verbs.c
@@ -1544,11 +1544,31 @@ int erdma_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr, int attr_mask,
return ret;
}
+static enum ib_qp_state query_qp_state(struct erdma_qp *qp)
+{
+ switch (qp->attrs.state) {
+ case ERDMA_QP_STATE_IDLE:
+ return IB_QPS_INIT;
+ case ERDMA_QP_STATE_RTR:
+ return IB_QPS_RTR;
+ case ERDMA_QP_STATE_RTS:
+ return IB_QPS_RTS;
+ case ERDMA_QP_STATE_CLOSING:
+ return IB_QPS_ERR;
+ case ERDMA_QP_STATE_TERMINATE:
+ return IB_QPS_ERR;
+ case ERDMA_QP_STATE_ERROR:
+ return IB_QPS_ERR;
+ default:
+ return IB_QPS_ERR;
+ }
+}
+
int erdma_query_qp(struct ib_qp *ibqp, struct ib_qp_attr *qp_attr,
int qp_attr_mask, struct ib_qp_init_attr *qp_init_attr)
{
- struct erdma_qp *qp;
struct erdma_dev *dev;
+ struct erdma_qp *qp;
if (ibqp && qp_attr && qp_init_attr) {
qp = to_eqp(ibqp);
@@ -1575,6 +1595,9 @@ int erdma_query_qp(struct ib_qp *ibqp, struct ib_qp_attr *qp_attr,
qp_init_attr->cap = qp_attr->cap;
+ qp_attr->qp_state = query_qp_state(qp);
+ qp_attr->cur_qp_state = query_qp_state(qp);
+
return 0;
}
@@ -1701,6 +1724,10 @@ err_out_xa:
return ret;
}
+void erdma_disassociate_ucontext(struct ib_ucontext *ibcontext)
+{
+}
+
void erdma_set_mtu(struct erdma_dev *dev, u32 mtu)
{
struct erdma_cmdq_config_mtu_req req;
diff --git a/drivers/infiniband/hw/erdma/erdma_verbs.h b/drivers/infiniband/hw/erdma/erdma_verbs.h
index 6afdc02f5869..c998acd39a78 100644
--- a/drivers/infiniband/hw/erdma/erdma_verbs.h
+++ b/drivers/infiniband/hw/erdma/erdma_verbs.h
@@ -344,6 +344,7 @@ int erdma_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr, int mask,
struct ib_udata *data);
int erdma_destroy_qp(struct ib_qp *ibqp, struct ib_udata *udata);
int erdma_destroy_cq(struct ib_cq *ibcq, struct ib_udata *udata);
+void erdma_disassociate_ucontext(struct ib_ucontext *ibcontext);
int erdma_req_notify_cq(struct ib_cq *ibcq, enum ib_cq_notify_flags flags);
struct ib_mr *erdma_reg_user_mr(struct ib_pd *ibpd, u64 start, u64 len,
u64 virt, int access, struct ib_udata *udata);
diff --git a/drivers/infiniband/hw/hfi1/fault.c b/drivers/infiniband/hw/hfi1/fault.c
index 35d2382ee618..ec9ee59fcf0c 100644
--- a/drivers/infiniband/hw/hfi1/fault.c
+++ b/drivers/infiniband/hw/hfi1/fault.c
@@ -203,7 +203,6 @@ static const struct file_operations __fault_opcodes_fops = {
.open = fault_opcodes_open,
.read = fault_opcodes_read,
.write = fault_opcodes_write,
- .llseek = no_llseek
};
void hfi1_fault_exit_debugfs(struct hfi1_ibdev *ibd)
diff --git a/drivers/infiniband/hw/hns/hns_roce_ah.c b/drivers/infiniband/hw/hns/hns_roce_ah.c
index 3e02c474f59f..4fc5b9d5fea8 100644
--- a/drivers/infiniband/hw/hns/hns_roce_ah.c
+++ b/drivers/infiniband/hw/hns/hns_roce_ah.c
@@ -64,8 +64,10 @@ int hns_roce_create_ah(struct ib_ah *ibah, struct rdma_ah_init_attr *init_attr,
u8 tc_mode = 0;
int ret;
- if (hr_dev->pci_dev->revision == PCI_REVISION_ID_HIP08 && udata)
- return -EOPNOTSUPP;
+ if (hr_dev->pci_dev->revision == PCI_REVISION_ID_HIP08 && udata) {
+ ret = -EOPNOTSUPP;
+ goto err_out;
+ }
ah->av.port = rdma_ah_get_port_num(ah_attr);
ah->av.gid_index = grh->sgid_index;
@@ -83,7 +85,7 @@ int hns_roce_create_ah(struct ib_ah *ibah, struct rdma_ah_init_attr *init_attr,
ret = 0;
if (ret && grh->sgid_attr->gid_type == IB_GID_TYPE_ROCE_UDP_ENCAP)
- return ret;
+ goto err_out;
if (tc_mode == HNAE3_TC_MAP_MODE_DSCP &&
grh->sgid_attr->gid_type == IB_GID_TYPE_ROCE_UDP_ENCAP)
@@ -91,8 +93,10 @@ int hns_roce_create_ah(struct ib_ah *ibah, struct rdma_ah_init_attr *init_attr,
else
ah->av.sl = rdma_ah_get_sl(ah_attr);
- if (!check_sl_valid(hr_dev, ah->av.sl))
- return -EINVAL;
+ if (!check_sl_valid(hr_dev, ah->av.sl)) {
+ ret = -EINVAL;
+ goto err_out;
+ }
memcpy(ah->av.dgid, grh->dgid.raw, HNS_ROCE_GID_SIZE);
memcpy(ah->av.mac, ah_attr->roce.dmac, ETH_ALEN);
diff --git a/drivers/infiniband/hw/hns/hns_roce_hem.c b/drivers/infiniband/hw/hns/hns_roce_hem.c
index 02baa853a76c..c7c167e2a045 100644
--- a/drivers/infiniband/hw/hns/hns_roce_hem.c
+++ b/drivers/infiniband/hw/hns/hns_roce_hem.c
@@ -1041,9 +1041,9 @@ static bool hem_list_is_bottom_bt(int hopnum, int bt_level)
* @bt_level: base address table level
* @unit: ba entries per bt page
*/
-static u32 hem_list_calc_ba_range(int hopnum, int bt_level, int unit)
+static u64 hem_list_calc_ba_range(int hopnum, int bt_level, int unit)
{
- u32 step;
+ u64 step;
int max;
int i;
@@ -1079,7 +1079,7 @@ int hns_roce_hem_list_calc_root_ba(const struct hns_roce_buf_region *regions,
{
struct hns_roce_buf_region *r;
int total = 0;
- int step;
+ u64 step;
int i;
for (i = 0; i < region_cnt; i++) {
@@ -1110,7 +1110,7 @@ static int hem_list_alloc_mid_bt(struct hns_roce_dev *hr_dev,
int ret = 0;
int max_ofs;
int level;
- u32 step;
+ u64 step;
int end;
if (hopnum <= 1)
@@ -1134,10 +1134,12 @@ static int hem_list_alloc_mid_bt(struct hns_roce_dev *hr_dev,
/* config L1 bt to last bt and link them to corresponding parent */
for (level = 1; level < hopnum; level++) {
- cur = hem_list_search_item(&mid_bt[level], offset);
- if (cur) {
- hem_ptrs[level] = cur;
- continue;
+ if (!hem_list_is_bottom_bt(hopnum, level)) {
+ cur = hem_list_search_item(&mid_bt[level], offset);
+ if (cur) {
+ hem_ptrs[level] = cur;
+ continue;
+ }
}
step = hem_list_calc_ba_range(hopnum, level, unit);
@@ -1147,7 +1149,7 @@ static int hem_list_alloc_mid_bt(struct hns_roce_dev *hr_dev,
}
start_aligned = (distance / step) * step + r->offset;
- end = min_t(int, start_aligned + step - 1, max_ofs);
+ end = min_t(u64, start_aligned + step - 1, max_ofs);
cur = hem_list_alloc_item(hr_dev, start_aligned, end, unit,
true);
if (!cur) {
@@ -1235,7 +1237,7 @@ static int setup_middle_bt(struct hns_roce_dev *hr_dev, void *cpu_base,
struct hns_roce_hem_item *hem, *temp_hem;
int total = 0;
int offset;
- int step;
+ u64 step;
step = hem_list_calc_ba_range(r->hopnum, 1, unit);
if (step < 1)
diff --git a/drivers/infiniband/hw/hns/hns_roce_hw_v2.c b/drivers/infiniband/hw/hns/hns_roce_hw_v2.c
index 621b057fb9da..24e906b9d3ae 100644
--- a/drivers/infiniband/hw/hns/hns_roce_hw_v2.c
+++ b/drivers/infiniband/hw/hns/hns_roce_hw_v2.c
@@ -1681,8 +1681,8 @@ static int hns_roce_hw_v2_query_counter(struct hns_roce_dev *hr_dev,
for (i = 0; i < HNS_ROCE_HW_CNT_TOTAL && i < *num_counters; i++) {
bd_idx = i / CNT_PER_DESC;
- if (!(desc[bd_idx].flag & HNS_ROCE_CMD_FLAG_NEXT) &&
- bd_idx != HNS_ROCE_HW_CNT_TOTAL / CNT_PER_DESC)
+ if (bd_idx != HNS_ROCE_HW_CNT_TOTAL / CNT_PER_DESC &&
+ !(desc[bd_idx].flag & cpu_to_le16(HNS_ROCE_CMD_FLAG_NEXT)))
break;
cnt_data = (__le64 *)&desc[bd_idx].data[0];
@@ -2972,6 +2972,9 @@ err_llm_init_failed:
static void hns_roce_v2_exit(struct hns_roce_dev *hr_dev)
{
+ if (hr_dev->pci_dev->revision == PCI_REVISION_ID_HIP08)
+ free_mr_exit(hr_dev);
+
hns_roce_function_clear(hr_dev);
if (!hr_dev->is_vf)
@@ -4423,12 +4426,14 @@ static int config_qp_rq_buf(struct hns_roce_dev *hr_dev,
upper_32_bits(to_hr_hw_page_addr(mtts[0])));
hr_reg_clear(qpc_mask, QPC_RQ_CUR_BLK_ADDR_H);
- context->rq_nxt_blk_addr = cpu_to_le32(to_hr_hw_page_addr(mtts[1]));
- qpc_mask->rq_nxt_blk_addr = 0;
-
- hr_reg_write(context, QPC_RQ_NXT_BLK_ADDR_H,
- upper_32_bits(to_hr_hw_page_addr(mtts[1])));
- hr_reg_clear(qpc_mask, QPC_RQ_NXT_BLK_ADDR_H);
+ if (hr_dev->pci_dev->revision == PCI_REVISION_ID_HIP08) {
+ context->rq_nxt_blk_addr =
+ cpu_to_le32(to_hr_hw_page_addr(mtts[1]));
+ qpc_mask->rq_nxt_blk_addr = 0;
+ hr_reg_write(context, QPC_RQ_NXT_BLK_ADDR_H,
+ upper_32_bits(to_hr_hw_page_addr(mtts[1])));
+ hr_reg_clear(qpc_mask, QPC_RQ_NXT_BLK_ADDR_H);
+ }
return 0;
}
@@ -6193,6 +6198,7 @@ static irqreturn_t abnormal_interrupt_basic(struct hns_roce_dev *hr_dev,
struct pci_dev *pdev = hr_dev->pci_dev;
struct hnae3_ae_dev *ae_dev = pci_get_drvdata(pdev);
const struct hnae3_ae_ops *ops = ae_dev->ops;
+ enum hnae3_reset_type reset_type;
irqreturn_t int_work = IRQ_NONE;
u32 int_en;
@@ -6204,10 +6210,12 @@ static irqreturn_t abnormal_interrupt_basic(struct hns_roce_dev *hr_dev,
roce_write(hr_dev, ROCEE_VF_ABN_INT_ST_REG,
1 << HNS_ROCE_V2_VF_INT_ST_AEQ_OVERFLOW_S);
+ reset_type = hr_dev->is_vf ?
+ HNAE3_VF_FUNC_RESET : HNAE3_FUNC_RESET;
+
/* Set reset level for reset_event() */
if (ops->set_default_reset_request)
- ops->set_default_reset_request(ae_dev,
- HNAE3_FUNC_RESET);
+ ops->set_default_reset_request(ae_dev, reset_type);
if (ops->reset_event)
ops->reset_event(pdev, NULL);
@@ -6277,7 +6285,7 @@ static u64 fmea_get_ram_res_addr(u32 res_type, __le64 *data)
res_type == ECC_RESOURCE_SCCC)
return le64_to_cpu(*data);
- return le64_to_cpu(*data) << PAGE_SHIFT;
+ return le64_to_cpu(*data) << HNS_HW_PAGE_SHIFT;
}
static int fmea_recover_others(struct hns_roce_dev *hr_dev, u32 res_type,
@@ -6949,9 +6957,6 @@ static void __hns_roce_hw_v2_uninit_instance(struct hnae3_handle *handle,
hr_dev->state = HNS_ROCE_DEVICE_STATE_UNINIT;
hns_roce_handle_device_err(hr_dev);
- if (hr_dev->pci_dev->revision == PCI_REVISION_ID_HIP08)
- free_mr_exit(hr_dev);
-
hns_roce_exit(hr_dev);
kfree(hr_dev->priv);
ib_dealloc_device(&hr_dev->ib_dev);
diff --git a/drivers/infiniband/hw/hns/hns_roce_qp.c b/drivers/infiniband/hw/hns/hns_roce_qp.c
index 1de384ce4d0e..6b03ba671ff8 100644
--- a/drivers/infiniband/hw/hns/hns_roce_qp.c
+++ b/drivers/infiniband/hw/hns/hns_roce_qp.c
@@ -1460,19 +1460,19 @@ void hns_roce_lock_cqs(struct hns_roce_cq *send_cq, struct hns_roce_cq *recv_cq)
__acquire(&send_cq->lock);
__acquire(&recv_cq->lock);
} else if (unlikely(send_cq != NULL && recv_cq == NULL)) {
- spin_lock_irq(&send_cq->lock);
+ spin_lock(&send_cq->lock);
__acquire(&recv_cq->lock);
} else if (unlikely(send_cq == NULL && recv_cq != NULL)) {
- spin_lock_irq(&recv_cq->lock);
+ spin_lock(&recv_cq->lock);
__acquire(&send_cq->lock);
} else if (send_cq == recv_cq) {
- spin_lock_irq(&send_cq->lock);
+ spin_lock(&send_cq->lock);
__acquire(&recv_cq->lock);
} else if (send_cq->cqn < recv_cq->cqn) {
- spin_lock_irq(&send_cq->lock);
+ spin_lock(&send_cq->lock);
spin_lock_nested(&recv_cq->lock, SINGLE_DEPTH_NESTING);
} else {
- spin_lock_irq(&recv_cq->lock);
+ spin_lock(&recv_cq->lock);
spin_lock_nested(&send_cq->lock, SINGLE_DEPTH_NESTING);
}
}
@@ -1492,13 +1492,13 @@ void hns_roce_unlock_cqs(struct hns_roce_cq *send_cq,
spin_unlock(&recv_cq->lock);
} else if (send_cq == recv_cq) {
__release(&recv_cq->lock);
- spin_unlock_irq(&send_cq->lock);
+ spin_unlock(&send_cq->lock);
} else if (send_cq->cqn < recv_cq->cqn) {
spin_unlock(&recv_cq->lock);
- spin_unlock_irq(&send_cq->lock);
+ spin_unlock(&send_cq->lock);
} else {
spin_unlock(&send_cq->lock);
- spin_unlock_irq(&recv_cq->lock);
+ spin_unlock(&recv_cq->lock);
}
}
diff --git a/drivers/infiniband/hw/irdma/verbs.c b/drivers/infiniband/hw/irdma/verbs.c
index fc0ce35da14e..eeb932e58730 100644
--- a/drivers/infiniband/hw/irdma/verbs.c
+++ b/drivers/infiniband/hw/irdma/verbs.c
@@ -1347,7 +1347,7 @@ int irdma_modify_qp_roce(struct ib_qp *ibqp, struct ib_qp_attr *attr,
if (attr->max_dest_rd_atomic > dev->hw_attrs.max_hw_ird) {
ibdev_err(&iwdev->ibdev,
"rd_atomic = %d, above max_hw_ird=%d\n",
- attr->max_rd_atomic,
+ attr->max_dest_rd_atomic,
dev->hw_attrs.max_hw_ird);
return -EINVAL;
}
@@ -3085,7 +3085,7 @@ error:
static struct ib_mr *irdma_reg_user_mr_dmabuf(struct ib_pd *pd, u64 start,
u64 len, u64 virt,
int fd, int access,
- struct ib_udata *udata)
+ struct uverbs_attr_bundle *attrs)
{
struct irdma_device *iwdev = to_iwdev(pd->device);
struct ib_umem_dmabuf *umem_dmabuf;
diff --git a/drivers/infiniband/hw/mana/main.c b/drivers/infiniband/hw/mana/main.c
index d13abc954d2a..67c2d43135a8 100644
--- a/drivers/infiniband/hw/mana/main.c
+++ b/drivers/infiniband/hw/mana/main.c
@@ -383,7 +383,7 @@ static int mana_ib_gd_create_dma_region(struct mana_ib_dev *dev, struct ib_umem
create_req->length = umem->length;
create_req->offset_in_page = ib_umem_dma_offset(umem, page_sz);
- create_req->gdma_page_type = order_base_2(page_sz) - PAGE_SHIFT;
+ create_req->gdma_page_type = order_base_2(page_sz) - MANA_PAGE_SHIFT;
create_req->page_count = num_pages_total;
ibdev_dbg(&dev->ib_dev, "size_dma_region %lu num_pages_total %lu\n",
@@ -511,13 +511,13 @@ int mana_ib_mmap(struct ib_ucontext *ibcontext, struct vm_area_struct *vma)
PAGE_SHIFT;
prot = pgprot_writecombine(vma->vm_page_prot);
- ret = rdma_user_mmap_io(ibcontext, vma, pfn, gc->db_page_size, prot,
+ ret = rdma_user_mmap_io(ibcontext, vma, pfn, PAGE_SIZE, prot,
NULL);
if (ret)
ibdev_dbg(ibdev, "can't rdma_user_mmap_io ret %d\n", ret);
else
- ibdev_dbg(ibdev, "mapped I/O pfn 0x%llx page_size %u, ret %d\n",
- pfn, gc->db_page_size, ret);
+ ibdev_dbg(ibdev, "mapped I/O pfn 0x%llx page_size %lu, ret %d\n",
+ pfn, PAGE_SIZE, ret);
return ret;
}
diff --git a/drivers/infiniband/hw/mlx4/alias_GUID.c b/drivers/infiniband/hw/mlx4/alias_GUID.c
index 9a439569ffcf..d7327735b8d0 100644
--- a/drivers/infiniband/hw/mlx4/alias_GUID.c
+++ b/drivers/infiniband/hw/mlx4/alias_GUID.c
@@ -829,7 +829,6 @@ void mlx4_ib_destroy_alias_guid_service(struct mlx4_ib_dev *dev)
int mlx4_ib_init_alias_guid_service(struct mlx4_ib_dev *dev)
{
- char alias_wq_name[22];
int ret = 0;
int i, j;
union ib_gid gid;
@@ -875,9 +874,8 @@ int mlx4_ib_init_alias_guid_service(struct mlx4_ib_dev *dev)
dev->sriov.alias_guid.ports_guid[i].parent = &dev->sriov.alias_guid;
dev->sriov.alias_guid.ports_guid[i].port = i;
- snprintf(alias_wq_name, sizeof alias_wq_name, "alias_guid%d", i);
dev->sriov.alias_guid.ports_guid[i].wq =
- alloc_ordered_workqueue(alias_wq_name, WQ_MEM_RECLAIM);
+ alloc_ordered_workqueue("alias_guid%d", WQ_MEM_RECLAIM, i);
if (!dev->sriov.alias_guid.ports_guid[i].wq) {
ret = -ENOMEM;
goto err_thread;
diff --git a/drivers/infiniband/hw/mlx4/mad.c b/drivers/infiniband/hw/mlx4/mad.c
index dc9cf45d2d32..e6e132f10625 100644
--- a/drivers/infiniband/hw/mlx4/mad.c
+++ b/drivers/infiniband/hw/mlx4/mad.c
@@ -2158,7 +2158,6 @@ static int mlx4_ib_alloc_demux_ctx(struct mlx4_ib_dev *dev,
struct mlx4_ib_demux_ctx *ctx,
int port)
{
- char name[21];
int ret = 0;
int i;
@@ -2194,24 +2193,21 @@ static int mlx4_ib_alloc_demux_ctx(struct mlx4_ib_dev *dev,
goto err_mcg;
}
- snprintf(name, sizeof(name), "mlx4_ibt%d", port);
- ctx->wq = alloc_ordered_workqueue(name, WQ_MEM_RECLAIM);
+ ctx->wq = alloc_ordered_workqueue("mlx4_ibt%d", WQ_MEM_RECLAIM, port);
if (!ctx->wq) {
pr_err("Failed to create tunnelling WQ for port %d\n", port);
ret = -ENOMEM;
goto err_wq;
}
- snprintf(name, sizeof(name), "mlx4_ibwi%d", port);
- ctx->wi_wq = alloc_ordered_workqueue(name, WQ_MEM_RECLAIM);
+ ctx->wi_wq = alloc_ordered_workqueue("mlx4_ibwi%d", WQ_MEM_RECLAIM, port);
if (!ctx->wi_wq) {
pr_err("Failed to create wire WQ for port %d\n", port);
ret = -ENOMEM;
goto err_wiwq;
}
- snprintf(name, sizeof(name), "mlx4_ibud%d", port);
- ctx->ud_wq = alloc_ordered_workqueue(name, WQ_MEM_RECLAIM);
+ ctx->ud_wq = alloc_ordered_workqueue("mlx4_ibud%d", WQ_MEM_RECLAIM, port);
if (!ctx->ud_wq) {
pr_err("Failed to create up/down WQ for port %d\n", port);
ret = -ENOMEM;
diff --git a/drivers/infiniband/hw/mlx5/Makefile b/drivers/infiniband/hw/mlx5/Makefile
index 72a526236c2e..b38961f5058e 100644
--- a/drivers/infiniband/hw/mlx5/Makefile
+++ b/drivers/infiniband/hw/mlx5/Makefile
@@ -6,6 +6,7 @@ mlx5_ib-y := ah.o \
cong.o \
counters.o \
cq.o \
+ data_direct.o \
dm.o \
doorbell.o \
gsi.o \
diff --git a/drivers/infiniband/hw/mlx5/cmd.c b/drivers/infiniband/hw/mlx5/cmd.c
index 895b62cc528d..7c08e3008927 100644
--- a/drivers/infiniband/hw/mlx5/cmd.c
+++ b/drivers/infiniband/hw/mlx5/cmd.c
@@ -245,3 +245,24 @@ int mlx5_cmd_uar_dealloc(struct mlx5_core_dev *dev, u32 uarn, u16 uid)
MLX5_SET(dealloc_uar_in, in, uid, uid);
return mlx5_cmd_exec_in(dev, dealloc_uar, in);
}
+
+int mlx5_cmd_query_vuid(struct mlx5_core_dev *dev, bool data_direct,
+ char *out_vuid)
+{
+ u8 out[MLX5_ST_SZ_BYTES(query_vuid_out) +
+ MLX5_ST_SZ_BYTES(array1024_auto)] = {};
+ u8 in[MLX5_ST_SZ_BYTES(query_vuid_in)] = {};
+ char *vuid;
+ int err;
+
+ MLX5_SET(query_vuid_in, in, opcode, MLX5_CMD_OPCODE_QUERY_VUID);
+ MLX5_SET(query_vuid_in, in, vhca_id, MLX5_CAP_GEN(dev, vhca_id));
+ MLX5_SET(query_vuid_in, in, data_direct, data_direct);
+ err = mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
+ if (err)
+ return err;
+
+ vuid = MLX5_ADDR_OF(query_vuid_out, out, vuid);
+ memcpy(out_vuid, vuid, MLX5_ST_SZ_BYTES(array1024_auto));
+ return 0;
+}
diff --git a/drivers/infiniband/hw/mlx5/cmd.h b/drivers/infiniband/hw/mlx5/cmd.h
index e5cd31270443..e6c88b6ebd0d 100644
--- a/drivers/infiniband/hw/mlx5/cmd.h
+++ b/drivers/infiniband/hw/mlx5/cmd.h
@@ -58,4 +58,6 @@ int mlx5_cmd_mad_ifc(struct mlx5_ib_dev *dev, const void *inb, void *outb,
u16 opmod, u8 port);
int mlx5_cmd_uar_alloc(struct mlx5_core_dev *dev, u32 *uarn, u16 uid);
int mlx5_cmd_uar_dealloc(struct mlx5_core_dev *dev, u32 uarn, u16 uid);
+int mlx5_cmd_query_vuid(struct mlx5_core_dev *dev, bool data_direct,
+ char *out_vuid);
#endif /* MLX5_IB_CMD_H */
diff --git a/drivers/infiniband/hw/mlx5/data_direct.c b/drivers/infiniband/hw/mlx5/data_direct.c
new file mode 100644
index 000000000000..b9ba84afaae2
--- /dev/null
+++ b/drivers/infiniband/hw/mlx5/data_direct.c
@@ -0,0 +1,227 @@
+// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
+/*
+ * Copyright (c) 2024, NVIDIA CORPORATION & AFFILIATES. All rights reserved
+ */
+
+#include "mlx5_ib.h"
+#include "data_direct.h"
+
+static LIST_HEAD(mlx5_data_direct_dev_list);
+static LIST_HEAD(mlx5_data_direct_reg_list);
+
+/*
+ * This mutex should be held when accessing either of the above lists
+ */
+static DEFINE_MUTEX(mlx5_data_direct_mutex);
+
+struct mlx5_data_direct_registration {
+ struct mlx5_ib_dev *ibdev;
+ char vuid[MLX5_ST_SZ_BYTES(array1024_auto) + 1];
+ struct list_head list;
+};
+
+static const struct pci_device_id mlx5_data_direct_pci_table[] = {
+ { PCI_VDEVICE(MELLANOX, 0x2100) }, /* ConnectX-8 Data Direct */
+ { 0, }
+};
+
+static int mlx5_data_direct_vpd_get_vuid(struct mlx5_data_direct_dev *dev)
+{
+ struct pci_dev *pdev = dev->pdev;
+ unsigned int vpd_size, kw_len;
+ u8 *vpd_data;
+ int start;
+ int ret;
+
+ vpd_data = pci_vpd_alloc(pdev, &vpd_size);
+ if (IS_ERR(vpd_data)) {
+ pci_err(pdev, "Unable to read VPD, err=%ld\n", PTR_ERR(vpd_data));
+ return PTR_ERR(vpd_data);
+ }
+
+ start = pci_vpd_find_ro_info_keyword(vpd_data, vpd_size, "VU", &kw_len);
+ if (start < 0) {
+ ret = start;
+ pci_err(pdev, "VU keyword not found, err=%d\n", ret);
+ goto end;
+ }
+
+ dev->vuid = kmemdup_nul(vpd_data + start, kw_len, GFP_KERNEL);
+ ret = dev->vuid ? 0 : -ENOMEM;
+
+end:
+ kfree(vpd_data);
+ return ret;
+}
+
+static void mlx5_data_direct_shutdown(struct pci_dev *pdev)
+{
+ pci_disable_device(pdev);
+}
+
+static int mlx5_data_direct_set_dma_caps(struct pci_dev *pdev)
+{
+ int err;
+
+ err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
+ if (err) {
+ dev_warn(&pdev->dev,
+ "Warning: couldn't set 64-bit PCI DMA mask, err=%d\n", err);
+ err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
+ if (err) {
+ dev_err(&pdev->dev, "Can't set PCI DMA mask, err=%d\n", err);
+ return err;
+ }
+ }
+
+ dma_set_max_seg_size(&pdev->dev, SZ_2G);
+ return 0;
+}
+
+int mlx5_data_direct_ib_reg(struct mlx5_ib_dev *ibdev, char *vuid)
+{
+ struct mlx5_data_direct_registration *reg;
+ struct mlx5_data_direct_dev *dev;
+
+ reg = kzalloc(sizeof(*reg), GFP_KERNEL);
+ if (!reg)
+ return -ENOMEM;
+
+ reg->ibdev = ibdev;
+ strcpy(reg->vuid, vuid);
+
+ mutex_lock(&mlx5_data_direct_mutex);
+ list_for_each_entry(dev, &mlx5_data_direct_dev_list, list) {
+ if (strcmp(dev->vuid, vuid) == 0) {
+ mlx5_ib_data_direct_bind(ibdev, dev);
+ break;
+ }
+ }
+
+ /* Add the registration to its global list, to be used upon bind/unbind
+ * of its affiliated data direct device
+ */
+ list_add_tail(&reg->list, &mlx5_data_direct_reg_list);
+ mutex_unlock(&mlx5_data_direct_mutex);
+ return 0;
+}
+
+void mlx5_data_direct_ib_unreg(struct mlx5_ib_dev *ibdev)
+{
+ struct mlx5_data_direct_registration *reg;
+
+ mutex_lock(&mlx5_data_direct_mutex);
+ list_for_each_entry(reg, &mlx5_data_direct_reg_list, list) {
+ if (reg->ibdev == ibdev) {
+ list_del(&reg->list);
+ kfree(reg);
+ goto end;
+ }
+ }
+
+ WARN_ON(true);
+end:
+ mutex_unlock(&mlx5_data_direct_mutex);
+}
+
+static void mlx5_data_direct_dev_reg(struct mlx5_data_direct_dev *dev)
+{
+ struct mlx5_data_direct_registration *reg;
+
+ mutex_lock(&mlx5_data_direct_mutex);
+ list_for_each_entry(reg, &mlx5_data_direct_reg_list, list) {
+ if (strcmp(dev->vuid, reg->vuid) == 0)
+ mlx5_ib_data_direct_bind(reg->ibdev, dev);
+ }
+
+ /* Add the data direct device to the global list, further IB devices may
+ * use it later as well
+ */
+ list_add_tail(&dev->list, &mlx5_data_direct_dev_list);
+ mutex_unlock(&mlx5_data_direct_mutex);
+}
+
+static void mlx5_data_direct_dev_unreg(struct mlx5_data_direct_dev *dev)
+{
+ struct mlx5_data_direct_registration *reg;
+
+ mutex_lock(&mlx5_data_direct_mutex);
+ /* Prevent any further affiliations */
+ list_del(&dev->list);
+ list_for_each_entry(reg, &mlx5_data_direct_reg_list, list) {
+ if (strcmp(dev->vuid, reg->vuid) == 0)
+ mlx5_ib_data_direct_unbind(reg->ibdev);
+ }
+ mutex_unlock(&mlx5_data_direct_mutex);
+}
+
+static int mlx5_data_direct_probe(struct pci_dev *pdev, const struct pci_device_id *id)
+{
+ struct mlx5_data_direct_dev *dev;
+ int err;
+
+ dev = kzalloc(sizeof(*dev), GFP_KERNEL);
+ if (!dev)
+ return -ENOMEM;
+
+ dev->device = &pdev->dev;
+ dev->pdev = pdev;
+
+ pci_set_drvdata(dev->pdev, dev);
+ err = pci_enable_device(pdev);
+ if (err) {
+ dev_err(dev->device, "Cannot enable PCI device, err=%d\n", err);
+ goto err;
+ }
+
+ pci_set_master(pdev);
+ err = mlx5_data_direct_set_dma_caps(pdev);
+ if (err)
+ goto err_disable;
+
+ if (pci_enable_atomic_ops_to_root(pdev, PCI_EXP_DEVCAP2_ATOMIC_COMP32) &&
+ pci_enable_atomic_ops_to_root(pdev, PCI_EXP_DEVCAP2_ATOMIC_COMP64) &&
+ pci_enable_atomic_ops_to_root(pdev, PCI_EXP_DEVCAP2_ATOMIC_COMP128))
+ dev_dbg(dev->device, "Enabling pci atomics failed\n");
+
+ err = mlx5_data_direct_vpd_get_vuid(dev);
+ if (err)
+ goto err_disable;
+
+ mlx5_data_direct_dev_reg(dev);
+ return 0;
+
+err_disable:
+ pci_disable_device(pdev);
+err:
+ kfree(dev);
+ return err;
+}
+
+static void mlx5_data_direct_remove(struct pci_dev *pdev)
+{
+ struct mlx5_data_direct_dev *dev = pci_get_drvdata(pdev);
+
+ mlx5_data_direct_dev_unreg(dev);
+ pci_disable_device(pdev);
+ kfree(dev->vuid);
+ kfree(dev);
+}
+
+static struct pci_driver mlx5_data_direct_driver = {
+ .name = KBUILD_MODNAME,
+ .id_table = mlx5_data_direct_pci_table,
+ .probe = mlx5_data_direct_probe,
+ .remove = mlx5_data_direct_remove,
+ .shutdown = mlx5_data_direct_shutdown,
+};
+
+int mlx5_data_direct_driver_register(void)
+{
+ return pci_register_driver(&mlx5_data_direct_driver);
+}
+
+void mlx5_data_direct_driver_unregister(void)
+{
+ pci_unregister_driver(&mlx5_data_direct_driver);
+}
diff --git a/drivers/infiniband/hw/mlx5/data_direct.h b/drivers/infiniband/hw/mlx5/data_direct.h
new file mode 100644
index 000000000000..2fd2bdbe8f69
--- /dev/null
+++ b/drivers/infiniband/hw/mlx5/data_direct.h
@@ -0,0 +1,23 @@
+/* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB */
+/*
+ * Copyright (c) 2024, NVIDIA CORPORATION & AFFILIATES. All rights reserved
+ */
+
+#ifndef _MLX5_IB_DATA_DIRECT_H
+#define _MLX5_IB_DATA_DIRECT_H
+
+struct mlx5_ib_dev;
+
+struct mlx5_data_direct_dev {
+ struct device *device;
+ struct pci_dev *pdev;
+ char *vuid;
+ struct list_head list;
+};
+
+int mlx5_data_direct_ib_reg(struct mlx5_ib_dev *ibdev, char *vuid);
+void mlx5_data_direct_ib_unreg(struct mlx5_ib_dev *ibdev);
+int mlx5_data_direct_driver_register(void);
+void mlx5_data_direct_driver_unregister(void);
+
+#endif
diff --git a/drivers/infiniband/hw/mlx5/devx.c b/drivers/infiniband/hw/mlx5/devx.c
index 253fea374a72..69999d8d24f3 100644
--- a/drivers/infiniband/hw/mlx5/devx.c
+++ b/drivers/infiniband/hw/mlx5/devx.c
@@ -2673,7 +2673,6 @@ static const struct file_operations devx_async_cmd_event_fops = {
.read = devx_async_cmd_event_read,
.poll = devx_async_cmd_event_poll,
.release = uverbs_uobject_fd_release,
- .llseek = no_llseek,
};
static ssize_t devx_async_event_read(struct file *filp, char __user *buf,
@@ -2788,7 +2787,6 @@ static const struct file_operations devx_async_event_fops = {
.read = devx_async_event_read,
.poll = devx_async_event_poll,
.release = uverbs_uobject_fd_release,
- .llseek = no_llseek,
};
static void devx_async_cmd_event_destroy_uobj(struct ib_uobject *uobj,
diff --git a/drivers/infiniband/hw/mlx5/ib_rep.c b/drivers/infiniband/hw/mlx5/ib_rep.c
index c7a4ee896121..49af1cfbe6d1 100644
--- a/drivers/infiniband/hw/mlx5/ib_rep.c
+++ b/drivers/infiniband/hw/mlx5/ib_rep.c
@@ -13,6 +13,7 @@ mlx5_ib_set_vport_rep(struct mlx5_core_dev *dev,
int vport_index)
{
struct mlx5_ib_dev *ibdev;
+ struct net_device *ndev;
ibdev = mlx5_eswitch_uplink_get_proto_dev(dev->priv.eswitch, REP_IB);
if (!ibdev)
@@ -20,12 +21,9 @@ mlx5_ib_set_vport_rep(struct mlx5_core_dev *dev,
ibdev->port[vport_index].rep = rep;
rep->rep_data[REP_IB].priv = ibdev;
- write_lock(&ibdev->port[vport_index].roce.netdev_lock);
- ibdev->port[vport_index].roce.netdev =
- mlx5_ib_get_rep_netdev(rep->esw, rep->vport);
- write_unlock(&ibdev->port[vport_index].roce.netdev_lock);
+ ndev = mlx5_ib_get_rep_netdev(rep->esw, rep->vport);
- return 0;
+ return ib_device_set_netdev(&ibdev->ib_dev, ndev, vport_index + 1);
}
static void mlx5_ib_register_peer_vport_reps(struct mlx5_core_dev *mdev);
@@ -104,10 +102,15 @@ mlx5_ib_vport_rep_load(struct mlx5_core_dev *dev, struct mlx5_eswitch_rep *rep)
ibdev->is_rep = true;
vport_index = rep->vport_index;
ibdev->port[vport_index].rep = rep;
- ibdev->port[vport_index].roce.netdev =
- mlx5_ib_get_rep_netdev(lag_master->priv.eswitch, rep->vport);
ibdev->mdev = lag_master;
ibdev->num_ports = num_ports;
+ ibdev->ib_dev.phys_port_cnt = num_ports;
+ ret = ib_device_set_netdev(&ibdev->ib_dev,
+ mlx5_ib_get_rep_netdev(lag_master->priv.eswitch,
+ rep->vport),
+ vport_index + 1);
+ if (ret)
+ goto fail_add;
ret = __mlx5_ib_add(ibdev, profile);
if (ret)
@@ -160,9 +163,8 @@ mlx5_ib_vport_rep_unload(struct mlx5_eswitch_rep *rep)
}
port = &dev->port[vport_index];
- write_lock(&port->roce.netdev_lock);
- port->roce.netdev = NULL;
- write_unlock(&port->roce.netdev_lock);
+
+ ib_device_set_netdev(&dev->ib_dev, NULL, vport_index + 1);
rep->rep_data[REP_IB].priv = NULL;
port->rep = NULL;
diff --git a/drivers/infiniband/hw/mlx5/main.c b/drivers/infiniband/hw/mlx5/main.c
index 6048b9ad13bb..4999239c8f41 100644
--- a/drivers/infiniband/hw/mlx5/main.c
+++ b/drivers/infiniband/hw/mlx5/main.c
@@ -48,6 +48,7 @@
#include <rdma/mlx5_user_ioctl_verbs.h>
#include <rdma/mlx5_user_ioctl_cmds.h>
#include "macsec.h"
+#include "data_direct.h"
#define UVERBS_MODULE_NAME mlx5_ib
#include <rdma/uverbs_named_ioctl.h>
@@ -146,16 +147,52 @@ static struct mlx5_roce *mlx5_get_rep_roce(struct mlx5_ib_dev *dev,
if (upper && port->rep->vport == MLX5_VPORT_UPLINK)
continue;
-
- read_lock(&port->roce.netdev_lock);
- rep_ndev = mlx5_ib_get_rep_netdev(port->rep->esw,
- port->rep->vport);
- if (rep_ndev == ndev) {
- read_unlock(&port->roce.netdev_lock);
+ rep_ndev = ib_device_get_netdev(&dev->ib_dev, i + 1);
+ if (rep_ndev && rep_ndev == ndev) {
+ dev_put(rep_ndev);
*port_num = i + 1;
return &port->roce;
}
- read_unlock(&port->roce.netdev_lock);
+
+ dev_put(rep_ndev);
+ }
+
+ return NULL;
+}
+
+static bool mlx5_netdev_send_event(struct mlx5_ib_dev *dev,
+ struct net_device *ndev,
+ struct net_device *upper,
+ struct net_device *ib_ndev)
+{
+ if (!dev->ib_active)
+ return false;
+
+ /* Event is about our upper device */
+ if (upper == ndev)
+ return true;
+
+ /* RDMA device is not in lag and not in switchdev */
+ if (!dev->is_rep && !upper && ndev == ib_ndev)
+ return true;
+
+ /* RDMA devie is in switchdev */
+ if (dev->is_rep && ndev == ib_ndev)
+ return true;
+
+ return false;
+}
+
+static struct net_device *mlx5_ib_get_rep_uplink_netdev(struct mlx5_ib_dev *ibdev)
+{
+ struct mlx5_ib_port *port;
+ int i;
+
+ for (i = 0; i < ibdev->num_ports; i++) {
+ port = &ibdev->port[i];
+ if (port->rep && port->rep->vport == MLX5_VPORT_UPLINK) {
+ return ib_device_get_netdev(&ibdev->ib_dev, i + 1);
+ }
}
return NULL;
@@ -167,6 +204,7 @@ static int mlx5_netdev_event(struct notifier_block *this,
struct mlx5_roce *roce = container_of(this, struct mlx5_roce, nb);
struct net_device *ndev = netdev_notifier_info_to_dev(ptr);
u32 port_num = roce->native_port_num;
+ struct net_device *ib_ndev = NULL;
struct mlx5_core_dev *mdev;
struct mlx5_ib_dev *ibdev;
@@ -180,47 +218,63 @@ static int mlx5_netdev_event(struct notifier_block *this,
/* Should already be registered during the load */
if (ibdev->is_rep)
break;
- write_lock(&roce->netdev_lock);
+
+ ib_ndev = ib_device_get_netdev(&ibdev->ib_dev, port_num);
+ /* Exit if already registered */
+ if (ib_ndev)
+ goto put_ndev;
+
if (ndev->dev.parent == mdev->device)
- roce->netdev = ndev;
- write_unlock(&roce->netdev_lock);
+ ib_device_set_netdev(&ibdev->ib_dev, ndev, port_num);
break;
case NETDEV_UNREGISTER:
/* In case of reps, ib device goes away before the netdevs */
- write_lock(&roce->netdev_lock);
- if (roce->netdev == ndev)
- roce->netdev = NULL;
- write_unlock(&roce->netdev_lock);
- break;
+ if (ibdev->is_rep)
+ break;
+ ib_ndev = ib_device_get_netdev(&ibdev->ib_dev, port_num);
+ if (ib_ndev == ndev)
+ ib_device_set_netdev(&ibdev->ib_dev, NULL, port_num);
+ goto put_ndev;
case NETDEV_CHANGE:
case NETDEV_UP:
case NETDEV_DOWN: {
- struct net_device *lag_ndev = mlx5_lag_get_roce_netdev(mdev);
struct net_device *upper = NULL;
- if (lag_ndev) {
- upper = netdev_master_upper_dev_get(lag_ndev);
- dev_put(lag_ndev);
+ if (mlx5_lag_is_roce(mdev) || mlx5_lag_is_sriov(mdev)) {
+ struct net_device *lag_ndev;
+
+ if(mlx5_lag_is_roce(mdev))
+ lag_ndev = ib_device_get_netdev(&ibdev->ib_dev, 1);
+ else /* sriov lag */
+ lag_ndev = mlx5_ib_get_rep_uplink_netdev(ibdev);
+
+ if (lag_ndev) {
+ upper = netdev_master_upper_dev_get(lag_ndev);
+ dev_put(lag_ndev);
+ } else {
+ goto done;
+ }
}
if (ibdev->is_rep)
roce = mlx5_get_rep_roce(ibdev, ndev, upper, &port_num);
if (!roce)
return NOTIFY_DONE;
- if ((upper == ndev ||
- ((!upper || ibdev->is_rep) && ndev == roce->netdev)) &&
- ibdev->ib_active) {
+
+ ib_ndev = ib_device_get_netdev(&ibdev->ib_dev, port_num);
+
+ if (mlx5_netdev_send_event(ibdev, ndev, upper, ib_ndev)) {
struct ib_event ibev = { };
enum ib_port_state port_state;
if (get_port_state(&ibdev->ib_dev, port_num,
&port_state))
- goto done;
+ goto put_ndev;
if (roce->last_port_state == port_state)
- goto done;
+ goto put_ndev;
roce->last_port_state = port_state;
ibev.device = &ibdev->ib_dev;
@@ -229,7 +283,7 @@ static int mlx5_netdev_event(struct notifier_block *this,
else if (port_state == IB_PORT_ACTIVE)
ibev.event = IB_EVENT_PORT_ACTIVE;
else
- goto done;
+ goto put_ndev;
ibev.element.port_num = port_num;
ib_dispatch_event(&ibev);
@@ -240,38 +294,13 @@ static int mlx5_netdev_event(struct notifier_block *this,
default:
break;
}
+put_ndev:
+ dev_put(ib_ndev);
done:
mlx5_ib_put_native_port_mdev(ibdev, port_num);
return NOTIFY_DONE;
}
-static struct net_device *mlx5_ib_get_netdev(struct ib_device *device,
- u32 port_num)
-{
- struct mlx5_ib_dev *ibdev = to_mdev(device);
- struct net_device *ndev;
- struct mlx5_core_dev *mdev;
-
- mdev = mlx5_ib_get_native_port_mdev(ibdev, port_num, NULL);
- if (!mdev)
- return NULL;
-
- ndev = mlx5_lag_get_roce_netdev(mdev);
- if (ndev)
- goto out;
-
- /* Ensure ndev does not disappear before we invoke dev_hold()
- */
- read_lock(&ibdev->port[port_num - 1].roce.netdev_lock);
- ndev = ibdev->port[port_num - 1].roce.netdev;
- dev_hold(ndev);
- read_unlock(&ibdev->port[port_num - 1].roce.netdev_lock);
-
-out:
- mlx5_ib_put_native_port_mdev(ibdev, port_num);
- return ndev;
-}
-
struct mlx5_core_dev *mlx5_ib_get_native_port_mdev(struct mlx5_ib_dev *ibdev,
u32 ib_port_num,
u32 *native_port_num)
@@ -546,11 +575,11 @@ static int mlx5_query_port_roce(struct ib_device *device, u32 port_num,
if (!put_mdev)
goto out;
- ndev = mlx5_ib_get_netdev(device, port_num);
+ ndev = ib_device_get_netdev(device, port_num);
if (!ndev)
goto out;
- if (dev->lag_active) {
+ if (mlx5_lag_is_roce(mdev) || mlx5_lag_is_sriov(mdev)) {
rcu_read_lock();
upper = netdev_master_upper_dev_get_rcu(ndev);
if (upper) {
@@ -3024,6 +3053,59 @@ static void mlx5_ib_dev_res_cleanup(struct mlx5_ib_dev *dev)
mutex_destroy(&devr->srq_lock);
}
+static int
+mlx5_ib_create_data_direct_resources(struct mlx5_ib_dev *dev)
+{
+ int inlen = MLX5_ST_SZ_BYTES(create_mkey_in);
+ struct mlx5_core_dev *mdev = dev->mdev;
+ void *mkc;
+ u32 mkey;
+ u32 pdn;
+ u32 *in;
+ int err;
+
+ err = mlx5_core_alloc_pd(mdev, &pdn);
+ if (err)
+ return err;
+
+ in = kvzalloc(inlen, GFP_KERNEL);
+ if (!in) {
+ err = -ENOMEM;
+ goto err;
+ }
+
+ MLX5_SET(create_mkey_in, in, data_direct, 1);
+ mkc = MLX5_ADDR_OF(create_mkey_in, in, memory_key_mkey_entry);
+ MLX5_SET(mkc, mkc, access_mode_1_0, MLX5_MKC_ACCESS_MODE_PA);
+ MLX5_SET(mkc, mkc, lw, 1);
+ MLX5_SET(mkc, mkc, lr, 1);
+ MLX5_SET(mkc, mkc, rw, 1);
+ MLX5_SET(mkc, mkc, rr, 1);
+ MLX5_SET(mkc, mkc, a, 1);
+ MLX5_SET(mkc, mkc, pd, pdn);
+ MLX5_SET(mkc, mkc, length64, 1);
+ MLX5_SET(mkc, mkc, qpn, 0xffffff);
+ err = mlx5_core_create_mkey(mdev, &mkey, in, inlen);
+ kvfree(in);
+ if (err)
+ goto err;
+
+ dev->ddr.mkey = mkey;
+ dev->ddr.pdn = pdn;
+ return 0;
+
+err:
+ mlx5_core_dealloc_pd(mdev, pdn);
+ return err;
+}
+
+static void
+mlx5_ib_free_data_direct_resources(struct mlx5_ib_dev *dev)
+{
+ mlx5_core_destroy_mkey(dev->mdev, dev->ddr.mkey);
+ mlx5_core_dealloc_pd(dev->mdev, dev->ddr.pdn);
+}
+
static u32 get_core_cap_flags(struct ib_device *ibdev,
struct mlx5_hca_vport_context *rep)
{
@@ -3124,6 +3206,60 @@ static void get_dev_fw_str(struct ib_device *ibdev, char *str)
fw_rev_sub(dev->mdev));
}
+static int lag_event(struct notifier_block *nb, unsigned long event, void *data)
+{
+ struct mlx5_ib_dev *dev = container_of(nb, struct mlx5_ib_dev,
+ lag_events);
+ struct mlx5_core_dev *mdev = dev->mdev;
+ struct mlx5_ib_port *port;
+ struct net_device *ndev;
+ int i, err;
+ int portnum;
+
+ portnum = 0;
+ switch (event) {
+ case MLX5_DRIVER_EVENT_ACTIVE_BACKUP_LAG_CHANGE_LOWERSTATE:
+ ndev = data;
+ if (ndev) {
+ if (!mlx5_lag_is_roce(mdev)) {
+ // sriov lag
+ for (i = 0; i < dev->num_ports; i++) {
+ port = &dev->port[i];
+ if (port->rep && port->rep->vport ==
+ MLX5_VPORT_UPLINK) {
+ portnum = i;
+ break;
+ }
+ }
+ }
+ err = ib_device_set_netdev(&dev->ib_dev, ndev,
+ portnum + 1);
+ dev_put(ndev);
+ if (err)
+ return err;
+ /* Rescan gids after new netdev assignment */
+ rdma_roce_rescan_device(&dev->ib_dev);
+ }
+ break;
+ default:
+ return NOTIFY_DONE;
+ }
+ return NOTIFY_OK;
+}
+
+static void mlx5e_lag_event_register(struct mlx5_ib_dev *dev)
+{
+ dev->lag_events.notifier_call = lag_event;
+ blocking_notifier_chain_register(&dev->mdev->priv.lag_nh,
+ &dev->lag_events);
+}
+
+static void mlx5e_lag_event_unregister(struct mlx5_ib_dev *dev)
+{
+ blocking_notifier_chain_unregister(&dev->mdev->priv.lag_nh,
+ &dev->lag_events);
+}
+
static int mlx5_eth_lag_init(struct mlx5_ib_dev *dev)
{
struct mlx5_core_dev *mdev = dev->mdev;
@@ -3145,6 +3281,7 @@ static int mlx5_eth_lag_init(struct mlx5_ib_dev *dev)
goto err_destroy_vport_lag;
}
+ mlx5e_lag_event_register(dev);
dev->flow_db->lag_demux_ft = ft;
dev->lag_ports = mlx5_lag_get_num_ports(mdev);
dev->lag_active = true;
@@ -3162,6 +3299,7 @@ static void mlx5_eth_lag_cleanup(struct mlx5_ib_dev *dev)
if (dev->lag_active) {
dev->lag_active = false;
+ mlx5e_lag_event_unregister(dev);
mlx5_destroy_flow_table(dev->flow_db->lag_demux_ft);
dev->flow_db->lag_demux_ft = NULL;
@@ -3420,6 +3558,41 @@ unbind:
return false;
}
+static int mlx5_ib_data_direct_init(struct mlx5_ib_dev *dev)
+{
+ char vuid[MLX5_ST_SZ_BYTES(array1024_auto) + 1] = {};
+ int ret;
+
+ if (!MLX5_CAP_GEN(dev->mdev, data_direct) ||
+ !MLX5_CAP_GEN_2(dev->mdev, query_vuid))
+ return 0;
+
+ ret = mlx5_cmd_query_vuid(dev->mdev, true, vuid);
+ if (ret)
+ return ret;
+
+ ret = mlx5_ib_create_data_direct_resources(dev);
+ if (ret)
+ return ret;
+
+ INIT_LIST_HEAD(&dev->data_direct_mr_list);
+ ret = mlx5_data_direct_ib_reg(dev, vuid);
+ if (ret)
+ mlx5_ib_free_data_direct_resources(dev);
+
+ return ret;
+}
+
+static void mlx5_ib_data_direct_cleanup(struct mlx5_ib_dev *dev)
+{
+ if (!MLX5_CAP_GEN(dev->mdev, data_direct) ||
+ !MLX5_CAP_GEN_2(dev->mdev, query_vuid))
+ return;
+
+ mlx5_data_direct_ib_unreg(dev);
+ mlx5_ib_free_data_direct_resources(dev);
+}
+
static int mlx5_ib_init_multiport_master(struct mlx5_ib_dev *dev)
{
u32 port_num = mlx5_core_native_port_num(dev->mdev) - 1;
@@ -3796,6 +3969,14 @@ ADD_UVERBS_ATTRIBUTES_SIMPLE(
dump_fill_mkey),
UA_MANDATORY));
+ADD_UVERBS_ATTRIBUTES_SIMPLE(
+ mlx5_ib_reg_dmabuf_mr,
+ UVERBS_OBJECT_MR,
+ UVERBS_METHOD_REG_DMABUF_MR,
+ UVERBS_ATTR_FLAGS_IN(MLX5_IB_ATTR_REG_DMABUF_MR_ACCESS_FLAGS,
+ enum mlx5_ib_uapi_reg_dmabuf_flags,
+ UA_OPTIONAL));
+
static const struct uapi_definition mlx5_ib_defs[] = {
UAPI_DEF_CHAIN(mlx5_ib_devx_defs),
UAPI_DEF_CHAIN(mlx5_ib_flow_defs),
@@ -3805,6 +3986,7 @@ static const struct uapi_definition mlx5_ib_defs[] = {
UAPI_DEF_CHAIN(mlx5_ib_create_cq_defs),
UAPI_DEF_CHAIN_OBJ_TREE(UVERBS_OBJECT_DEVICE, &mlx5_ib_query_context),
+ UAPI_DEF_CHAIN_OBJ_TREE(UVERBS_OBJECT_MR, &mlx5_ib_reg_dmabuf_mr),
UAPI_DEF_CHAIN_OBJ_TREE_NAMED(MLX5_IB_OBJECT_VAR,
UAPI_DEF_IS_OBJ_SUPPORTED(var_is_supported)),
UAPI_DEF_CHAIN_OBJ_TREE_NAMED(MLX5_IB_OBJECT_UAR),
@@ -3813,6 +3995,7 @@ static const struct uapi_definition mlx5_ib_defs[] = {
static void mlx5_ib_stage_init_cleanup(struct mlx5_ib_dev *dev)
{
+ mlx5_ib_data_direct_cleanup(dev);
mlx5_ib_cleanup_multiport_master(dev);
WARN_ON(!xa_empty(&dev->odp_mkeys));
mutex_destroy(&dev->cap_mask_mutex);
@@ -3828,13 +4011,11 @@ static int mlx5_ib_stage_init_init(struct mlx5_ib_dev *dev)
dev->ib_dev.node_type = RDMA_NODE_IB_CA;
dev->ib_dev.local_dma_lkey = 0 /* not supported for now */;
- dev->ib_dev.phys_port_cnt = dev->num_ports;
dev->ib_dev.dev.parent = mdev->device;
dev->ib_dev.lag_flags = RDMA_LAG_FLAGS_HASH_ALL_SLAVES;
for (i = 0; i < dev->num_ports; i++) {
spin_lock_init(&dev->port[i].mp.mpi_lock);
- rwlock_init(&dev->port[i].roce.netdev_lock);
dev->port[i].roce.dev = dev;
dev->port[i].roce.native_port_num = i + 1;
dev->port[i].roce.last_port_state = IB_PORT_DOWN;
@@ -3866,6 +4047,7 @@ static int mlx5_ib_stage_init_init(struct mlx5_ib_dev *dev)
dev->ib_dev.num_comp_vectors = mlx5_comp_vectors_max(mdev);
mutex_init(&dev->cap_mask_mutex);
+ mutex_init(&dev->data_direct_lock);
INIT_LIST_HEAD(&dev->qp_list);
spin_lock_init(&dev->reset_flow_resource_lock);
xa_init(&dev->odp_mkeys);
@@ -3874,6 +4056,10 @@ static int mlx5_ib_stage_init_init(struct mlx5_ib_dev *dev)
spin_lock_init(&dev->dm.lock);
dev->dm.dev = mdev;
+ err = mlx5_ib_data_direct_init(dev);
+ if (err)
+ goto err_mp;
+
return 0;
err_mp:
mlx5_ib_cleanup_multiport_master(dev);
@@ -4094,7 +4280,6 @@ static const struct ib_device_ops mlx5_ib_dev_common_roce_ops = {
.create_wq = mlx5_ib_create_wq,
.destroy_rwq_ind_table = mlx5_ib_destroy_rwq_ind_table,
.destroy_wq = mlx5_ib_destroy_wq,
- .get_netdev = mlx5_ib_get_netdev,
.modify_wq = mlx5_ib_modify_wq,
INIT_RDMA_OBJ_SIZE(ib_rwq_ind_table, mlx5_ib_rwq_ind_table,
@@ -4293,6 +4478,22 @@ static void mlx5_ib_stage_dev_notifier_cleanup(struct mlx5_ib_dev *dev)
mlx5_notifier_unregister(dev->mdev, &dev->mdev_events);
}
+void mlx5_ib_data_direct_bind(struct mlx5_ib_dev *ibdev,
+ struct mlx5_data_direct_dev *dev)
+{
+ mutex_lock(&ibdev->data_direct_lock);
+ ibdev->data_direct_dev = dev;
+ mutex_unlock(&ibdev->data_direct_lock);
+}
+
+void mlx5_ib_data_direct_unbind(struct mlx5_ib_dev *ibdev)
+{
+ mutex_lock(&ibdev->data_direct_lock);
+ mlx5_ib_revoke_data_direct_mrs(ibdev);
+ ibdev->data_direct_dev = NULL;
+ mutex_unlock(&ibdev->data_direct_lock);
+}
+
void __mlx5_ib_remove(struct mlx5_ib_dev *dev,
const struct mlx5_ib_profile *profile,
int stage)
@@ -4522,6 +4723,7 @@ static struct ib_device *mlx5_ib_add_sub_dev(struct ib_device *parent,
mplane->mdev = mparent->mdev;
mplane->num_ports = mparent->num_plane;
mplane->sub_dev_name = name;
+ mplane->ib_dev.phys_port_cnt = mplane->num_ports;
ret = __mlx5_ib_add(mplane, &plane_profile);
if (ret)
@@ -4638,6 +4840,7 @@ static int mlx5r_probe(struct auxiliary_device *adev,
dev->mdev = mdev;
dev->num_ports = num_ports;
+ dev->ib_dev.phys_port_cnt = num_ports;
if (ll == IB_LINK_LAYER_ETHERNET && !mlx5_get_roce_state(mdev))
profile = &raw_eth_profile;
@@ -4715,17 +4918,23 @@ static int __init mlx5_ib_init(void)
ret = mlx5r_rep_init();
if (ret)
goto rep_err;
+ ret = mlx5_data_direct_driver_register();
+ if (ret)
+ goto dd_err;
ret = auxiliary_driver_register(&mlx5r_mp_driver);
if (ret)
goto mp_err;
ret = auxiliary_driver_register(&mlx5r_driver);
if (ret)
goto drv_err;
+
return 0;
drv_err:
auxiliary_driver_unregister(&mlx5r_mp_driver);
mp_err:
+ mlx5_data_direct_driver_unregister();
+dd_err:
mlx5r_rep_cleanup();
rep_err:
mlx5_ib_qp_event_cleanup();
@@ -4737,6 +4946,7 @@ qp_event_err:
static void __exit mlx5_ib_cleanup(void)
{
+ mlx5_data_direct_driver_unregister();
auxiliary_driver_unregister(&mlx5r_driver);
auxiliary_driver_unregister(&mlx5r_mp_driver);
mlx5r_rep_cleanup();
diff --git a/drivers/infiniband/hw/mlx5/mlx5_ib.h b/drivers/infiniband/hw/mlx5/mlx5_ib.h
index d5eb1b726675..23fd72f7f63d 100644
--- a/drivers/infiniband/hw/mlx5/mlx5_ib.h
+++ b/drivers/infiniband/hw/mlx5/mlx5_ib.h
@@ -63,17 +63,6 @@ __mlx5_log_page_size_to_bitmap(unsigned int log_pgsz_bits,
return GENMASK(largest_pg_shift, pgsz_shift);
}
-/*
- * For mkc users, instead of a page_offset the command has a start_iova which
- * specifies both the page_offset and the on-the-wire IOVA
- */
-#define mlx5_umem_find_best_pgsz(umem, typ, log_pgsz_fld, pgsz_shift, iova) \
- ib_umem_find_best_pgsz(umem, \
- __mlx5_log_page_size_to_bitmap( \
- __mlx5_bit_sz(typ, log_pgsz_fld), \
- pgsz_shift), \
- iova)
-
static __always_inline unsigned long
__mlx5_page_offset_to_bitmask(unsigned int page_offset_bits,
unsigned int offset_shift)
@@ -640,6 +629,8 @@ enum mlx5_mkey_type {
MLX5_MKEY_MR = 1,
MLX5_MKEY_MW,
MLX5_MKEY_INDIRECT_DEVX,
+ MLX5_MKEY_NULL,
+ MLX5_MKEY_IMPLICIT_CHILD,
};
struct mlx5r_cache_rb_key {
@@ -682,6 +673,8 @@ struct mlx5_ib_mr {
struct mlx5_ib_mkey mmkey;
struct ib_umem *umem;
+ /* The mr is data direct related */
+ u8 data_direct :1;
union {
/* Used only by kernel MRs (umem == NULL) */
@@ -719,6 +712,11 @@ struct mlx5_ib_mr {
} odp_destroy;
struct ib_odp_counters odp_stats;
bool is_odp_implicit;
+ /* The affilated data direct crossed mr */
+ struct mlx5_ib_mr *dd_crossed_mr;
+ struct list_head dd_node;
+ u8 revoked :1;
+ struct mlx5_ib_mkey null_mmkey;
};
};
};
@@ -796,6 +794,7 @@ struct mlx5_cache_ent {
u8 is_tmp:1;
u8 disabled:1;
u8 fill_to_high_water:1;
+ u8 tmp_cleanup_scheduled:1;
/*
* - limit is the low water mark for stored mkeys, 2* limit is the
@@ -827,7 +826,6 @@ struct mlx5_mkey_cache {
struct mutex rb_lock;
struct dentry *fs_root;
unsigned long last_add;
- struct delayed_work remove_ent_dwork;
};
struct mlx5_ib_port_resources {
@@ -835,6 +833,11 @@ struct mlx5_ib_port_resources {
struct work_struct pkey_change_work;
};
+struct mlx5_data_direct_resources {
+ u32 pdn;
+ u32 mkey;
+};
+
struct mlx5_ib_resources {
struct ib_cq *c0;
struct mutex cq_lock;
@@ -885,8 +888,6 @@ struct mlx5_roce {
/* Protect mlx5_ib_get_netdev from invoking dev_hold() with a NULL
* netdev pointer
*/
- rwlock_t netdev_lock;
- struct net_device *netdev;
struct notifier_block nb;
struct netdev_net_notifier nn;
struct notifier_block mdev_nb;
@@ -1131,7 +1132,11 @@ struct mlx5_macsec {
struct mlx5_ib_dev {
struct ib_device ib_dev;
struct mlx5_core_dev *mdev;
+ struct mlx5_data_direct_dev *data_direct_dev;
+ /* protect accessing data_direct_dev */
+ struct mutex data_direct_lock;
struct notifier_block mdev_events;
+ struct notifier_block lag_events;
int num_ports;
/* serialize update of capability mask
*/
@@ -1161,6 +1166,7 @@ struct mlx5_ib_dev {
/* protect resources needed as part of reset flow */
spinlock_t reset_flow_resource_lock;
struct list_head qp_list;
+ struct list_head data_direct_mr_list;
/* Array with num_ports elements */
struct mlx5_ib_port *port;
struct mlx5_sq_bfreg bfreg;
@@ -1185,6 +1191,7 @@ struct mlx5_ib_dev {
u16 pkey_table_len;
u8 lag_ports;
struct mlx5_special_mkeys mkeys;
+ struct mlx5_data_direct_resources ddr;
#ifdef CONFIG_MLX5_MACSEC
struct mlx5_macsec macsec;
@@ -1345,7 +1352,7 @@ struct ib_mr *mlx5_ib_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
struct ib_mr *mlx5_ib_reg_user_mr_dmabuf(struct ib_pd *pd, u64 start,
u64 length, u64 virt_addr,
int fd, int access_flags,
- struct ib_udata *udata);
+ struct uverbs_attr_bundle *attrs);
int mlx5_ib_advise_mr(struct ib_pd *pd,
enum ib_uverbs_advise_mr_advice advice,
u32 flags,
@@ -1356,7 +1363,6 @@ int mlx5_ib_alloc_mw(struct ib_mw *mw, struct ib_udata *udata);
int mlx5_ib_dealloc_mw(struct ib_mw *mw);
struct mlx5_ib_mr *mlx5_ib_alloc_implicit_mr(struct mlx5_ib_pd *pd,
int access_flags);
-void mlx5_ib_free_implicit_mr(struct mlx5_ib_mr *mr);
void mlx5_ib_free_odp_mr(struct mlx5_ib_mr *mr);
struct ib_mr *mlx5_ib_rereg_user_mr(struct ib_mr *ib_mr, int flags, u64 start,
u64 length, u64 virt_addr, int access_flags,
@@ -1425,6 +1431,10 @@ int mlx5_ib_destroy_rwq_ind_table(struct ib_rwq_ind_table *wq_ind_table);
struct ib_mr *mlx5_ib_reg_dm_mr(struct ib_pd *pd, struct ib_dm *dm,
struct ib_dm_mr_attr *attr,
struct uverbs_attr_bundle *attrs);
+void mlx5_ib_data_direct_bind(struct mlx5_ib_dev *ibdev,
+ struct mlx5_data_direct_dev *dev);
+void mlx5_ib_data_direct_unbind(struct mlx5_ib_dev *ibdev);
+void mlx5_ib_revoke_data_direct_mrs(struct mlx5_ib_dev *dev);
#ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING
int mlx5_ib_odp_init_one(struct mlx5_ib_dev *ibdev);
@@ -1633,8 +1643,6 @@ static inline void mlx5r_deref_wait_odp_mkey(struct mlx5_ib_mkey *mmkey)
wait_event(mmkey->wait, refcount_read(&mmkey->usecount) == 0);
}
-int mlx5_ib_test_wc(struct mlx5_ib_dev *dev);
-
static inline bool mlx5_ib_lag_should_assign_affinity(struct mlx5_ib_dev *dev)
{
/*
@@ -1707,4 +1715,20 @@ static inline u32 smi_to_native_portnum(struct mlx5_ib_dev *dev, u32 port)
return (port - 1) / dev->num_ports + 1;
}
+/*
+ * For mkc users, instead of a page_offset the command has a start_iova which
+ * specifies both the page_offset and the on-the-wire IOVA
+ */
+static __always_inline unsigned long
+mlx5_umem_mkc_find_best_pgsz(struct mlx5_ib_dev *dev, struct ib_umem *umem,
+ u64 iova)
+{
+ int page_size_bits =
+ MLX5_CAP_GEN_2(dev->mdev, umr_log_entity_size_5) ? 6 : 5;
+ unsigned long bitmap =
+ __mlx5_log_page_size_to_bitmap(page_size_bits, 0);
+
+ return ib_umem_find_best_pgsz(umem, bitmap, iova);
+}
+
#endif /* MLX5_IB_H */
diff --git a/drivers/infiniband/hw/mlx5/mr.c b/drivers/infiniband/hw/mlx5/mr.c
index 98bd8eaa393e..45d9dc9c6c8f 100644
--- a/drivers/infiniband/hw/mlx5/mr.c
+++ b/drivers/infiniband/hw/mlx5/mr.c
@@ -43,18 +43,22 @@
#include "dm.h"
#include "mlx5_ib.h"
#include "umr.h"
+#include "data_direct.h"
enum {
MAX_PENDING_REG_MR = 8,
};
+#define MLX5_MR_CACHE_PERSISTENT_ENTRY_MIN_DESCS 4
#define MLX5_UMR_ALIGN 2048
static void
create_mkey_callback(int status, struct mlx5_async_work *context);
static struct mlx5_ib_mr *reg_create(struct ib_pd *pd, struct ib_umem *umem,
u64 iova, int access_flags,
- unsigned int page_size, bool populate);
+ unsigned int page_size, bool populate,
+ int access_mode);
+static int __mlx5_ib_dereg_mr(struct ib_mr *ibmr);
static void set_mkc_access_pd_addr_fields(void *mkc, int acc, u64 start_addr,
struct ib_pd *pd)
@@ -211,9 +215,9 @@ static void create_mkey_callback(int status, struct mlx5_async_work *context)
spin_lock_irqsave(&ent->mkeys_queue.lock, flags);
push_mkey_locked(ent, mkey_out->mkey);
+ ent->pending--;
/* If we are doing fill_to_high_water then keep going. */
queue_adjust_cache_locked(ent);
- ent->pending--;
spin_unlock_irqrestore(&ent->mkeys_queue.lock, flags);
kfree(mkey_out);
}
@@ -527,6 +531,21 @@ static void queue_adjust_cache_locked(struct mlx5_cache_ent *ent)
}
}
+static void clean_keys(struct mlx5_ib_dev *dev, struct mlx5_cache_ent *ent)
+{
+ u32 mkey;
+
+ spin_lock_irq(&ent->mkeys_queue.lock);
+ while (ent->mkeys_queue.ci) {
+ mkey = pop_mkey_locked(ent);
+ spin_unlock_irq(&ent->mkeys_queue.lock);
+ mlx5_core_destroy_mkey(dev->mdev, mkey);
+ spin_lock_irq(&ent->mkeys_queue.lock);
+ }
+ ent->tmp_cleanup_scheduled = false;
+ spin_unlock_irq(&ent->mkeys_queue.lock);
+}
+
static void __cache_work_func(struct mlx5_cache_ent *ent)
{
struct mlx5_ib_dev *dev = ent->dev;
@@ -598,7 +617,11 @@ static void delayed_cache_work_func(struct work_struct *work)
struct mlx5_cache_ent *ent;
ent = container_of(work, struct mlx5_cache_ent, dwork.work);
- __cache_work_func(ent);
+ /* temp entries are never filled, only cleaned */
+ if (ent->is_tmp)
+ clean_keys(ent->dev, ent);
+ else
+ __cache_work_func(ent);
}
static int cache_ent_key_cmp(struct mlx5r_cache_rb_key key1,
@@ -659,6 +682,7 @@ mkey_cache_ent_from_rb_key(struct mlx5_ib_dev *dev,
{
struct rb_node *node = dev->cache.rb_root.rb_node;
struct mlx5_cache_ent *cur, *smallest = NULL;
+ u64 ndescs_limit;
int cmp;
/*
@@ -677,10 +701,18 @@ mkey_cache_ent_from_rb_key(struct mlx5_ib_dev *dev,
return cur;
}
+ /*
+ * Limit the usage of mkeys larger than twice the required size while
+ * also allowing the usage of smallest cache entry for small MRs.
+ */
+ ndescs_limit = max_t(u64, rb_key.ndescs * 2,
+ MLX5_MR_CACHE_PERSISTENT_ENTRY_MIN_DESCS);
+
return (smallest &&
smallest->rb_key.access_mode == rb_key.access_mode &&
smallest->rb_key.access_flags == rb_key.access_flags &&
- smallest->rb_key.ats == rb_key.ats) ?
+ smallest->rb_key.ats == rb_key.ats &&
+ smallest->rb_key.ndescs <= ndescs_limit) ?
smallest :
NULL;
}
@@ -765,21 +797,6 @@ struct mlx5_ib_mr *mlx5_mr_cache_alloc(struct mlx5_ib_dev *dev,
return _mlx5_mr_cache_alloc(dev, ent, access_flags);
}
-static void clean_keys(struct mlx5_ib_dev *dev, struct mlx5_cache_ent *ent)
-{
- u32 mkey;
-
- cancel_delayed_work(&ent->dwork);
- spin_lock_irq(&ent->mkeys_queue.lock);
- while (ent->mkeys_queue.ci) {
- mkey = pop_mkey_locked(ent);
- spin_unlock_irq(&ent->mkeys_queue.lock);
- mlx5_core_destroy_mkey(dev->mdev, mkey);
- spin_lock_irq(&ent->mkeys_queue.lock);
- }
- spin_unlock_irq(&ent->mkeys_queue.lock);
-}
-
static void mlx5_mkey_cache_debugfs_cleanup(struct mlx5_ib_dev *dev)
{
if (!mlx5_debugfs_root || dev->is_rep)
@@ -892,10 +909,6 @@ mlx5r_cache_create_ent_locked(struct mlx5_ib_dev *dev,
ent->limit = 0;
mlx5_mkey_cache_debugfs_add_ent(dev, ent);
- } else {
- mod_delayed_work(ent->dev->cache.wq,
- &ent->dev->cache.remove_ent_dwork,
- msecs_to_jiffies(30 * 1000));
}
return ent;
@@ -906,35 +919,6 @@ mkeys_err:
return ERR_PTR(ret);
}
-static void remove_ent_work_func(struct work_struct *work)
-{
- struct mlx5_mkey_cache *cache;
- struct mlx5_cache_ent *ent;
- struct rb_node *cur;
-
- cache = container_of(work, struct mlx5_mkey_cache,
- remove_ent_dwork.work);
- mutex_lock(&cache->rb_lock);
- cur = rb_last(&cache->rb_root);
- while (cur) {
- ent = rb_entry(cur, struct mlx5_cache_ent, node);
- cur = rb_prev(cur);
- mutex_unlock(&cache->rb_lock);
-
- spin_lock_irq(&ent->mkeys_queue.lock);
- if (!ent->is_tmp) {
- spin_unlock_irq(&ent->mkeys_queue.lock);
- mutex_lock(&cache->rb_lock);
- continue;
- }
- spin_unlock_irq(&ent->mkeys_queue.lock);
-
- clean_keys(ent->dev, ent);
- mutex_lock(&cache->rb_lock);
- }
- mutex_unlock(&cache->rb_lock);
-}
-
int mlx5_mkey_cache_init(struct mlx5_ib_dev *dev)
{
struct mlx5_mkey_cache *cache = &dev->cache;
@@ -950,7 +934,6 @@ int mlx5_mkey_cache_init(struct mlx5_ib_dev *dev)
mutex_init(&dev->slow_path_mutex);
mutex_init(&dev->cache.rb_lock);
dev->cache.rb_root = RB_ROOT;
- INIT_DELAYED_WORK(&dev->cache.remove_ent_dwork, remove_ent_work_func);
cache->wq = alloc_ordered_workqueue("mkey_cache", WQ_MEM_RECLAIM);
if (!cache->wq) {
mlx5_ib_warn(dev, "failed to create work queue\n");
@@ -962,7 +945,7 @@ int mlx5_mkey_cache_init(struct mlx5_ib_dev *dev)
mlx5_mkey_cache_debugfs_init(dev);
mutex_lock(&cache->rb_lock);
for (i = 0; i <= mkey_cache_max_order(dev); i++) {
- rb_key.ndescs = 1 << (i + 2);
+ rb_key.ndescs = MLX5_MR_CACHE_PERSISTENT_ENTRY_MIN_DESCS << i;
ent = mlx5r_cache_create_ent_locked(dev, rb_key, true);
if (IS_ERR(ent)) {
ret = PTR_ERR(ent);
@@ -1001,7 +984,6 @@ void mlx5_mkey_cache_cleanup(struct mlx5_ib_dev *dev)
return;
mutex_lock(&dev->cache.rb_lock);
- cancel_delayed_work(&dev->cache.remove_ent_dwork);
for (node = rb_first(root); node; node = rb_next(node)) {
ent = rb_entry(node, struct mlx5_cache_ent, node);
spin_lock_irq(&ent->mkeys_queue.lock);
@@ -1062,6 +1044,7 @@ struct ib_mr *mlx5_ib_get_dma_mr(struct ib_pd *pd, int acc)
MLX5_SET(mkc, mkc, length64, 1);
set_mkc_access_pd_addr_fields(mkc, acc | IB_ACCESS_RELAXED_ORDERING, 0,
pd);
+ MLX5_SET(mkc, mkc, ma_translation_mode, MLX5_CAP_GEN(dev->mdev, ats));
err = mlx5_ib_create_mkey(dev, &mr->mmkey, in, inlen);
if (err)
@@ -1126,12 +1109,10 @@ static unsigned int mlx5_umem_dmabuf_default_pgsz(struct ib_umem *umem,
static struct mlx5_ib_mr *alloc_cacheable_mr(struct ib_pd *pd,
struct ib_umem *umem, u64 iova,
- int access_flags)
+ int access_flags, int access_mode)
{
- struct mlx5r_cache_rb_key rb_key = {
- .access_mode = MLX5_MKC_ACCESS_MODE_MTT,
- };
struct mlx5_ib_dev *dev = to_mdev(pd->device);
+ struct mlx5r_cache_rb_key rb_key = {};
struct mlx5_cache_ent *ent;
struct mlx5_ib_mr *mr;
unsigned int page_size;
@@ -1139,11 +1120,11 @@ static struct mlx5_ib_mr *alloc_cacheable_mr(struct ib_pd *pd,
if (umem->is_dmabuf)
page_size = mlx5_umem_dmabuf_default_pgsz(umem, iova);
else
- page_size = mlx5_umem_find_best_pgsz(umem, mkc, log_page_size,
- 0, iova);
+ page_size = mlx5_umem_mkc_find_best_pgsz(dev, umem, iova);
if (WARN_ON(!page_size))
return ERR_PTR(-EINVAL);
+ rb_key.access_mode = access_mode;
rb_key.ndescs = ib_umem_num_dma_blocks(umem, page_size);
rb_key.ats = mlx5_umem_needs_ats(dev, umem, access_flags);
rb_key.access_flags = get_unchangeable_access_flags(dev, access_flags);
@@ -1154,7 +1135,7 @@ static struct mlx5_ib_mr *alloc_cacheable_mr(struct ib_pd *pd,
*/
if (!ent) {
mutex_lock(&dev->slow_path_mutex);
- mr = reg_create(pd, umem, iova, access_flags, page_size, false);
+ mr = reg_create(pd, umem, iova, access_flags, page_size, false, access_mode);
mutex_unlock(&dev->slow_path_mutex);
if (IS_ERR(mr))
return mr;
@@ -1175,13 +1156,71 @@ static struct mlx5_ib_mr *alloc_cacheable_mr(struct ib_pd *pd,
return mr;
}
+static struct ib_mr *
+reg_create_crossing_vhca_mr(struct ib_pd *pd, u64 iova, u64 length, int access_flags,
+ u32 crossed_lkey)
+{
+ struct mlx5_ib_dev *dev = to_mdev(pd->device);
+ int access_mode = MLX5_MKC_ACCESS_MODE_CROSSING;
+ struct mlx5_ib_mr *mr;
+ void *mkc;
+ int inlen;
+ u32 *in;
+ int err;
+
+ if (!MLX5_CAP_GEN(dev->mdev, crossing_vhca_mkey))
+ return ERR_PTR(-EOPNOTSUPP);
+
+ mr = kzalloc(sizeof(*mr), GFP_KERNEL);
+ if (!mr)
+ return ERR_PTR(-ENOMEM);
+
+ inlen = MLX5_ST_SZ_BYTES(create_mkey_in);
+ in = kvzalloc(inlen, GFP_KERNEL);
+ if (!in) {
+ err = -ENOMEM;
+ goto err_1;
+ }
+
+ mkc = MLX5_ADDR_OF(create_mkey_in, in, memory_key_mkey_entry);
+ MLX5_SET(mkc, mkc, crossing_target_vhca_id,
+ MLX5_CAP_GEN(dev->mdev, vhca_id));
+ MLX5_SET(mkc, mkc, translations_octword_size, crossed_lkey);
+ MLX5_SET(mkc, mkc, access_mode_1_0, access_mode & 0x3);
+ MLX5_SET(mkc, mkc, access_mode_4_2, (access_mode >> 2) & 0x7);
+
+ /* for this crossing mkey IOVA should be 0 and len should be IOVA + len */
+ set_mkc_access_pd_addr_fields(mkc, access_flags, 0, pd);
+ MLX5_SET64(mkc, mkc, len, iova + length);
+
+ MLX5_SET(mkc, mkc, free, 0);
+ MLX5_SET(mkc, mkc, umr_en, 0);
+ err = mlx5_ib_create_mkey(dev, &mr->mmkey, in, inlen);
+ if (err)
+ goto err_2;
+
+ mr->mmkey.type = MLX5_MKEY_MR;
+ set_mr_fields(dev, mr, length, access_flags, iova);
+ mr->ibmr.pd = pd;
+ kvfree(in);
+ mlx5_ib_dbg(dev, "crossing mkey = 0x%x\n", mr->mmkey.key);
+
+ return &mr->ibmr;
+err_2:
+ kvfree(in);
+err_1:
+ kfree(mr);
+ return ERR_PTR(err);
+}
+
/*
* If ibmr is NULL it will be allocated by reg_create.
* Else, the given ibmr will be used.
*/
static struct mlx5_ib_mr *reg_create(struct ib_pd *pd, struct ib_umem *umem,
u64 iova, int access_flags,
- unsigned int page_size, bool populate)
+ unsigned int page_size, bool populate,
+ int access_mode)
{
struct mlx5_ib_dev *dev = to_mdev(pd->device);
struct mlx5_ib_mr *mr;
@@ -1190,7 +1229,9 @@ static struct mlx5_ib_mr *reg_create(struct ib_pd *pd, struct ib_umem *umem,
int inlen;
u32 *in;
int err;
- bool pg_cap = !!(MLX5_CAP_GEN(dev->mdev, pg));
+ bool pg_cap = !!(MLX5_CAP_GEN(dev->mdev, pg)) &&
+ (access_mode == MLX5_MKC_ACCESS_MODE_MTT);
+ bool ksm_mode = (access_mode == MLX5_MKC_ACCESS_MODE_KSM);
if (!page_size)
return ERR_PTR(-EINVAL);
@@ -1213,7 +1254,7 @@ static struct mlx5_ib_mr *reg_create(struct ib_pd *pd, struct ib_umem *umem,
}
pas = (__be64 *)MLX5_ADDR_OF(create_mkey_in, in, klm_pas_mtt);
if (populate) {
- if (WARN_ON(access_flags & IB_ACCESS_ON_DEMAND)) {
+ if (WARN_ON(access_flags & IB_ACCESS_ON_DEMAND || ksm_mode)) {
err = -EINVAL;
goto err_2;
}
@@ -1229,14 +1270,22 @@ static struct mlx5_ib_mr *reg_create(struct ib_pd *pd, struct ib_umem *umem,
mkc = MLX5_ADDR_OF(create_mkey_in, in, memory_key_mkey_entry);
set_mkc_access_pd_addr_fields(mkc, access_flags, iova,
populate ? pd : dev->umrc.pd);
+ /* In case a data direct flow, overwrite the pdn field by its internal kernel PD */
+ if (umem->is_dmabuf && ksm_mode)
+ MLX5_SET(mkc, mkc, pd, dev->ddr.pdn);
+
MLX5_SET(mkc, mkc, free, !populate);
- MLX5_SET(mkc, mkc, access_mode_1_0, MLX5_MKC_ACCESS_MODE_MTT);
+ MLX5_SET(mkc, mkc, access_mode_1_0, access_mode);
MLX5_SET(mkc, mkc, umr_en, 1);
MLX5_SET64(mkc, mkc, len, umem->length);
MLX5_SET(mkc, mkc, bsf_octword_size, 0);
- MLX5_SET(mkc, mkc, translations_octword_size,
- get_octo_len(iova, umem->length, mr->page_shift));
+ if (ksm_mode)
+ MLX5_SET(mkc, mkc, translations_octword_size,
+ get_octo_len(iova, umem->length, mr->page_shift) * 2);
+ else
+ MLX5_SET(mkc, mkc, translations_octword_size,
+ get_octo_len(iova, umem->length, mr->page_shift));
MLX5_SET(mkc, mkc, log_page_size, mr->page_shift);
if (mlx5_umem_needs_ats(dev, umem, access_flags))
MLX5_SET(mkc, mkc, ma_translation_mode, 1);
@@ -1373,13 +1422,15 @@ static struct ib_mr *create_real_mr(struct ib_pd *pd, struct ib_umem *umem,
xlt_with_umr = mlx5r_umr_can_load_pas(dev, umem->length);
if (xlt_with_umr) {
- mr = alloc_cacheable_mr(pd, umem, iova, access_flags);
+ mr = alloc_cacheable_mr(pd, umem, iova, access_flags,
+ MLX5_MKC_ACCESS_MODE_MTT);
} else {
- unsigned int page_size = mlx5_umem_find_best_pgsz(
- umem, mkc, log_page_size, 0, iova);
+ unsigned int page_size =
+ mlx5_umem_mkc_find_best_pgsz(dev, umem, iova);
mutex_lock(&dev->slow_path_mutex);
- mr = reg_create(pd, umem, iova, access_flags, page_size, true);
+ mr = reg_create(pd, umem, iova, access_flags, page_size,
+ true, MLX5_MKC_ACCESS_MODE_MTT);
mutex_unlock(&dev->slow_path_mutex);
}
if (IS_ERR(mr)) {
@@ -1442,7 +1493,8 @@ static struct ib_mr *create_user_odp_mr(struct ib_pd *pd, u64 start, u64 length,
if (IS_ERR(odp))
return ERR_CAST(odp);
- mr = alloc_cacheable_mr(pd, &odp->umem, iova, access_flags);
+ mr = alloc_cacheable_mr(pd, &odp->umem, iova, access_flags,
+ MLX5_MKC_ACCESS_MODE_MTT);
if (IS_ERR(mr)) {
ib_umem_release(&odp->umem);
return ERR_CAST(mr);
@@ -1510,35 +1562,31 @@ static struct dma_buf_attach_ops mlx5_ib_dmabuf_attach_ops = {
.move_notify = mlx5_ib_dmabuf_invalidate_cb,
};
-struct ib_mr *mlx5_ib_reg_user_mr_dmabuf(struct ib_pd *pd, u64 offset,
- u64 length, u64 virt_addr,
- int fd, int access_flags,
- struct ib_udata *udata)
+static struct ib_mr *
+reg_user_mr_dmabuf(struct ib_pd *pd, struct device *dma_device,
+ u64 offset, u64 length, u64 virt_addr,
+ int fd, int access_flags, int access_mode)
{
+ bool pinned_mode = (access_mode == MLX5_MKC_ACCESS_MODE_KSM);
struct mlx5_ib_dev *dev = to_mdev(pd->device);
struct mlx5_ib_mr *mr = NULL;
struct ib_umem_dmabuf *umem_dmabuf;
int err;
- if (!IS_ENABLED(CONFIG_INFINIBAND_USER_MEM) ||
- !IS_ENABLED(CONFIG_INFINIBAND_ON_DEMAND_PAGING))
- return ERR_PTR(-EOPNOTSUPP);
-
- mlx5_ib_dbg(dev,
- "offset 0x%llx, virt_addr 0x%llx, length 0x%llx, fd %d, access_flags 0x%x\n",
- offset, virt_addr, length, fd, access_flags);
-
err = mlx5r_umr_resource_init(dev);
if (err)
return ERR_PTR(err);
- /* dmabuf requires xlt update via umr to work. */
- if (!mlx5r_umr_can_load_pas(dev, length))
- return ERR_PTR(-EINVAL);
+ if (!pinned_mode)
+ umem_dmabuf = ib_umem_dmabuf_get(&dev->ib_dev,
+ offset, length, fd,
+ access_flags,
+ &mlx5_ib_dmabuf_attach_ops);
+ else
+ umem_dmabuf = ib_umem_dmabuf_get_pinned_with_dma_device(&dev->ib_dev,
+ dma_device, offset, length,
+ fd, access_flags);
- umem_dmabuf = ib_umem_dmabuf_get(&dev->ib_dev, offset, length, fd,
- access_flags,
- &mlx5_ib_dmabuf_attach_ops);
if (IS_ERR(umem_dmabuf)) {
mlx5_ib_dbg(dev, "umem_dmabuf get failed (%ld)\n",
PTR_ERR(umem_dmabuf));
@@ -1546,7 +1594,7 @@ struct ib_mr *mlx5_ib_reg_user_mr_dmabuf(struct ib_pd *pd, u64 offset,
}
mr = alloc_cacheable_mr(pd, &umem_dmabuf->umem, virt_addr,
- access_flags);
+ access_flags, access_mode);
if (IS_ERR(mr)) {
ib_umem_release(&umem_dmabuf->umem);
return ERR_CAST(mr);
@@ -1556,9 +1604,13 @@ struct ib_mr *mlx5_ib_reg_user_mr_dmabuf(struct ib_pd *pd, u64 offset,
atomic_add(ib_umem_num_pages(mr->umem), &dev->mdev->priv.reg_pages);
umem_dmabuf->private = mr;
- err = mlx5r_store_odp_mkey(dev, &mr->mmkey);
- if (err)
- goto err_dereg_mr;
+ if (!pinned_mode) {
+ err = mlx5r_store_odp_mkey(dev, &mr->mmkey);
+ if (err)
+ goto err_dereg_mr;
+ } else {
+ mr->data_direct = true;
+ }
err = mlx5_ib_init_dmabuf_mr(mr);
if (err)
@@ -1566,10 +1618,101 @@ struct ib_mr *mlx5_ib_reg_user_mr_dmabuf(struct ib_pd *pd, u64 offset,
return &mr->ibmr;
err_dereg_mr:
- mlx5_ib_dereg_mr(&mr->ibmr, NULL);
+ __mlx5_ib_dereg_mr(&mr->ibmr);
return ERR_PTR(err);
}
+static struct ib_mr *
+reg_user_mr_dmabuf_by_data_direct(struct ib_pd *pd, u64 offset,
+ u64 length, u64 virt_addr,
+ int fd, int access_flags)
+{
+ struct mlx5_ib_dev *dev = to_mdev(pd->device);
+ struct mlx5_data_direct_dev *data_direct_dev;
+ struct ib_mr *crossing_mr;
+ struct ib_mr *crossed_mr;
+ int ret = 0;
+
+ /* As of HW behaviour the IOVA must be page aligned in KSM mode */
+ if (!PAGE_ALIGNED(virt_addr) || (access_flags & IB_ACCESS_ON_DEMAND))
+ return ERR_PTR(-EOPNOTSUPP);
+
+ mutex_lock(&dev->data_direct_lock);
+ data_direct_dev = dev->data_direct_dev;
+ if (!data_direct_dev) {
+ ret = -EINVAL;
+ goto end;
+ }
+
+ /* The device's 'data direct mkey' was created without RO flags to
+ * simplify things and allow for a single mkey per device.
+ * Since RO is not a must, mask it out accordingly.
+ */
+ access_flags &= ~IB_ACCESS_RELAXED_ORDERING;
+ crossed_mr = reg_user_mr_dmabuf(pd, &data_direct_dev->pdev->dev,
+ offset, length, virt_addr, fd,
+ access_flags, MLX5_MKC_ACCESS_MODE_KSM);
+ if (IS_ERR(crossed_mr)) {
+ ret = PTR_ERR(crossed_mr);
+ goto end;
+ }
+
+ mutex_lock(&dev->slow_path_mutex);
+ crossing_mr = reg_create_crossing_vhca_mr(pd, virt_addr, length, access_flags,
+ crossed_mr->lkey);
+ mutex_unlock(&dev->slow_path_mutex);
+ if (IS_ERR(crossing_mr)) {
+ __mlx5_ib_dereg_mr(crossed_mr);
+ ret = PTR_ERR(crossing_mr);
+ goto end;
+ }
+
+ list_add_tail(&to_mmr(crossed_mr)->dd_node, &dev->data_direct_mr_list);
+ to_mmr(crossing_mr)->dd_crossed_mr = to_mmr(crossed_mr);
+ to_mmr(crossing_mr)->data_direct = true;
+end:
+ mutex_unlock(&dev->data_direct_lock);
+ return ret ? ERR_PTR(ret) : crossing_mr;
+}
+
+struct ib_mr *mlx5_ib_reg_user_mr_dmabuf(struct ib_pd *pd, u64 offset,
+ u64 length, u64 virt_addr,
+ int fd, int access_flags,
+ struct uverbs_attr_bundle *attrs)
+{
+ struct mlx5_ib_dev *dev = to_mdev(pd->device);
+ int mlx5_access_flags = 0;
+ int err;
+
+ if (!IS_ENABLED(CONFIG_INFINIBAND_USER_MEM) ||
+ !IS_ENABLED(CONFIG_INFINIBAND_ON_DEMAND_PAGING))
+ return ERR_PTR(-EOPNOTSUPP);
+
+ if (uverbs_attr_is_valid(attrs, MLX5_IB_ATTR_REG_DMABUF_MR_ACCESS_FLAGS)) {
+ err = uverbs_get_flags32(&mlx5_access_flags, attrs,
+ MLX5_IB_ATTR_REG_DMABUF_MR_ACCESS_FLAGS,
+ MLX5_IB_UAPI_REG_DMABUF_ACCESS_DATA_DIRECT);
+ if (err)
+ return ERR_PTR(err);
+ }
+
+ mlx5_ib_dbg(dev,
+ "offset 0x%llx, virt_addr 0x%llx, length 0x%llx, fd %d, access_flags 0x%x, mlx5_access_flags 0x%x\n",
+ offset, virt_addr, length, fd, access_flags, mlx5_access_flags);
+
+ /* dmabuf requires xlt update via umr to work. */
+ if (!mlx5r_umr_can_load_pas(dev, length))
+ return ERR_PTR(-EINVAL);
+
+ if (mlx5_access_flags & MLX5_IB_UAPI_REG_DMABUF_ACCESS_DATA_DIRECT)
+ return reg_user_mr_dmabuf_by_data_direct(pd, offset, length, virt_addr,
+ fd, access_flags);
+
+ return reg_user_mr_dmabuf(pd, pd->device->dma_device,
+ offset, length, virt_addr,
+ fd, access_flags, MLX5_MKC_ACCESS_MODE_MTT);
+}
+
/*
* True if the change in access flags can be done via UMR, only some access
* flags can be updated.
@@ -1601,8 +1744,7 @@ static bool can_use_umr_rereg_pas(struct mlx5_ib_mr *mr,
if (!mlx5r_umr_can_load_pas(dev, new_umem->length))
return false;
- *page_size =
- mlx5_umem_find_best_pgsz(new_umem, mkc, log_page_size, 0, iova);
+ *page_size = mlx5_umem_mkc_find_best_pgsz(dev, new_umem, iova);
if (WARN_ON(!*page_size))
return false;
return (mr->mmkey.cache_ent->rb_key.ndescs) >=
@@ -1665,7 +1807,7 @@ struct ib_mr *mlx5_ib_rereg_user_mr(struct ib_mr *ib_mr, int flags, u64 start,
struct mlx5_ib_mr *mr = to_mmr(ib_mr);
int err;
- if (!IS_ENABLED(CONFIG_INFINIBAND_USER_MEM))
+ if (!IS_ENABLED(CONFIG_INFINIBAND_USER_MEM) || mr->data_direct)
return ERR_PTR(-EOPNOTSUPP);
mlx5_ib_dbg(
@@ -1793,7 +1935,7 @@ err:
static void
mlx5_free_priv_descs(struct mlx5_ib_mr *mr)
{
- if (!mr->umem && mr->descs) {
+ if (!mr->umem && !mr->data_direct && mr->descs) {
struct ib_device *device = mr->ibmr.device;
int size = mr->max_descs * mr->desc_size;
struct mlx5_ib_dev *dev = to_mdev(device);
@@ -1847,13 +1989,51 @@ end:
return ret;
}
+static int mlx5_ib_revoke_data_direct_mr(struct mlx5_ib_mr *mr)
+{
+ struct mlx5_ib_dev *dev = to_mdev(mr->ibmr.device);
+ struct ib_umem_dmabuf *umem_dmabuf = to_ib_umem_dmabuf(mr->umem);
+ int err;
+
+ lockdep_assert_held(&dev->data_direct_lock);
+ mr->revoked = true;
+ err = mlx5r_umr_revoke_mr(mr);
+ if (WARN_ON(err))
+ return err;
+
+ ib_umem_dmabuf_revoke(umem_dmabuf);
+ return 0;
+}
+
+void mlx5_ib_revoke_data_direct_mrs(struct mlx5_ib_dev *dev)
+{
+ struct mlx5_ib_mr *mr, *next;
+
+ lockdep_assert_held(&dev->data_direct_lock);
+
+ list_for_each_entry_safe(mr, next, &dev->data_direct_mr_list, dd_node) {
+ list_del(&mr->dd_node);
+ mlx5_ib_revoke_data_direct_mr(mr);
+ }
+}
+
static int mlx5_revoke_mr(struct mlx5_ib_mr *mr)
{
struct mlx5_ib_dev *dev = to_mdev(mr->ibmr.device);
struct mlx5_cache_ent *ent = mr->mmkey.cache_ent;
- if (mr->mmkey.cacheable && !mlx5r_umr_revoke_mr(mr) && !cache_ent_find_and_store(dev, mr))
+ if (mr->mmkey.cacheable && !mlx5r_umr_revoke_mr(mr) && !cache_ent_find_and_store(dev, mr)) {
+ ent = mr->mmkey.cache_ent;
+ /* upon storing to a clean temp entry - schedule its cleanup */
+ spin_lock_irq(&ent->mkeys_queue.lock);
+ if (ent->is_tmp && !ent->tmp_cleanup_scheduled) {
+ mod_delayed_work(ent->dev->cache.wq, &ent->dwork,
+ msecs_to_jiffies(30 * 1000));
+ ent->tmp_cleanup_scheduled = true;
+ }
+ spin_unlock_irq(&ent->mkeys_queue.lock);
return 0;
+ }
if (ent) {
spin_lock_irq(&ent->mkeys_queue.lock);
@@ -1864,7 +2044,7 @@ static int mlx5_revoke_mr(struct mlx5_ib_mr *mr)
return destroy_mkey(dev, mr);
}
-int mlx5_ib_dereg_mr(struct ib_mr *ibmr, struct ib_udata *udata)
+static int __mlx5_ib_dereg_mr(struct ib_mr *ibmr)
{
struct mlx5_ib_mr *mr = to_mmr(ibmr);
struct mlx5_ib_dev *dev = to_mdev(ibmr->device);
@@ -1931,9 +2111,40 @@ int mlx5_ib_dereg_mr(struct ib_mr *ibmr, struct ib_udata *udata)
return 0;
}
+static int dereg_crossing_data_direct_mr(struct mlx5_ib_dev *dev,
+ struct mlx5_ib_mr *mr)
+{
+ struct mlx5_ib_mr *dd_crossed_mr = mr->dd_crossed_mr;
+ int ret;
+
+ ret = __mlx5_ib_dereg_mr(&mr->ibmr);
+ if (ret)
+ return ret;
+
+ mutex_lock(&dev->data_direct_lock);
+ if (!dd_crossed_mr->revoked)
+ list_del(&dd_crossed_mr->dd_node);
+
+ ret = __mlx5_ib_dereg_mr(&dd_crossed_mr->ibmr);
+ mutex_unlock(&dev->data_direct_lock);
+ return ret;
+}
+
+int mlx5_ib_dereg_mr(struct ib_mr *ibmr, struct ib_udata *udata)
+{
+ struct mlx5_ib_mr *mr = to_mmr(ibmr);
+ struct mlx5_ib_dev *dev = to_mdev(ibmr->device);
+
+ if (mr->data_direct)
+ return dereg_crossing_data_direct_mr(dev, mr);
+
+ return __mlx5_ib_dereg_mr(ibmr);
+}
+
static void mlx5_set_umr_free_mkey(struct ib_pd *pd, u32 *in, int ndescs,
int access_mode, int page_shift)
{
+ struct mlx5_ib_dev *dev = to_mdev(pd->device);
void *mkc;
mkc = MLX5_ADDR_OF(create_mkey_in, in, memory_key_mkey_entry);
@@ -1946,6 +2157,9 @@ static void mlx5_set_umr_free_mkey(struct ib_pd *pd, u32 *in, int ndescs,
MLX5_SET(mkc, mkc, access_mode_4_2, (access_mode >> 2) & 0x7);
MLX5_SET(mkc, mkc, umr_en, 1);
MLX5_SET(mkc, mkc, log_page_size, page_shift);
+ if (access_mode == MLX5_MKC_ACCESS_MODE_PA ||
+ access_mode == MLX5_MKC_ACCESS_MODE_MTT)
+ MLX5_SET(mkc, mkc, ma_translation_mode, MLX5_CAP_GEN(dev->mdev, ats));
}
static int _mlx5_alloc_mkey_descs(struct ib_pd *pd, struct mlx5_ib_mr *mr,
diff --git a/drivers/infiniband/hw/mlx5/odp.c b/drivers/infiniband/hw/mlx5/odp.c
index a524181f34df..4b37446758fd 100644
--- a/drivers/infiniband/hw/mlx5/odp.c
+++ b/drivers/infiniband/hw/mlx5/odp.c
@@ -45,7 +45,7 @@
/* Contains the details of a pagefault. */
struct mlx5_pagefault {
u32 bytes_committed;
- u32 token;
+ u64 token;
u8 event_subtype;
u8 type;
union {
@@ -74,6 +74,14 @@ struct mlx5_pagefault {
u32 rdma_op_len;
u64 rdma_va;
} rdma;
+ struct {
+ u64 va;
+ u32 mkey;
+ u32 fault_byte_count;
+ u32 prefetch_before_byte_count;
+ u32 prefetch_after_byte_count;
+ u8 flags;
+ } memory;
};
struct mlx5_ib_pf_eq *eq;
@@ -99,13 +107,20 @@ static u64 mlx5_imr_ksm_entries;
static void populate_klm(struct mlx5_klm *pklm, size_t idx, size_t nentries,
struct mlx5_ib_mr *imr, int flags)
{
+ struct mlx5_core_dev *dev = mr_to_mdev(imr)->mdev;
struct mlx5_klm *end = pklm + nentries;
+ int step = MLX5_CAP_ODP(dev, mem_page_fault) ? MLX5_IMR_MTT_SIZE : 0;
+ __be32 key = MLX5_CAP_ODP(dev, mem_page_fault) ?
+ cpu_to_be32(imr->null_mmkey.key) :
+ mr_to_mdev(imr)->mkeys.null_mkey;
+ u64 va =
+ MLX5_CAP_ODP(dev, mem_page_fault) ? idx * MLX5_IMR_MTT_SIZE : 0;
if (flags & MLX5_IB_UPD_XLT_ZAP) {
- for (; pklm != end; pklm++, idx++) {
+ for (; pklm != end; pklm++, idx++, va += step) {
pklm->bcount = cpu_to_be32(MLX5_IMR_MTT_SIZE);
- pklm->key = mr_to_mdev(imr)->mkeys.null_mkey;
- pklm->va = 0;
+ pklm->key = key;
+ pklm->va = cpu_to_be64(va);
}
return;
}
@@ -129,7 +144,7 @@ static void populate_klm(struct mlx5_klm *pklm, size_t idx, size_t nentries,
*/
lockdep_assert_held(&to_ib_umem_odp(imr->umem)->umem_mutex);
- for (; pklm != end; pklm++, idx++) {
+ for (; pklm != end; pklm++, idx++, va += step) {
struct mlx5_ib_mr *mtt = xa_load(&imr->implicit_children, idx);
pklm->bcount = cpu_to_be32(MLX5_IMR_MTT_SIZE);
@@ -137,8 +152,8 @@ static void populate_klm(struct mlx5_klm *pklm, size_t idx, size_t nentries,
pklm->key = cpu_to_be32(mtt->ibmr.lkey);
pklm->va = cpu_to_be64(idx * MLX5_IMR_MTT_SIZE);
} else {
- pklm->key = mr_to_mdev(imr)->mkeys.null_mkey;
- pklm->va = 0;
+ pklm->key = key;
+ pklm->va = cpu_to_be64(va);
}
}
}
@@ -217,6 +232,9 @@ static void destroy_unused_implicit_child_mr(struct mlx5_ib_mr *mr)
return;
xa_erase(&imr->implicit_children, idx);
+ if (MLX5_CAP_ODP(mr_to_mdev(mr)->mdev, mem_page_fault))
+ xa_erase(&mr_to_mdev(mr)->odp_mkeys,
+ mlx5_base_mkey(mr->mmkey.key));
/* Freeing a MR is a sleeping operation, so bounce to a work queue */
INIT_WORK(&mr->odp_destroy.work, free_implicit_child_mr_work);
@@ -332,46 +350,46 @@ static void internal_fill_odp_caps(struct mlx5_ib_dev *dev)
else
dev->odp_max_size = BIT_ULL(MLX5_MAX_UMR_SHIFT + PAGE_SHIFT);
- if (MLX5_CAP_ODP(dev->mdev, ud_odp_caps.send))
+ if (MLX5_CAP_ODP_SCHEME(dev->mdev, ud_odp_caps.send))
caps->per_transport_caps.ud_odp_caps |= IB_ODP_SUPPORT_SEND;
- if (MLX5_CAP_ODP(dev->mdev, ud_odp_caps.srq_receive))
+ if (MLX5_CAP_ODP_SCHEME(dev->mdev, ud_odp_caps.srq_receive))
caps->per_transport_caps.ud_odp_caps |= IB_ODP_SUPPORT_SRQ_RECV;
- if (MLX5_CAP_ODP(dev->mdev, rc_odp_caps.send))
+ if (MLX5_CAP_ODP_SCHEME(dev->mdev, rc_odp_caps.send))
caps->per_transport_caps.rc_odp_caps |= IB_ODP_SUPPORT_SEND;
- if (MLX5_CAP_ODP(dev->mdev, rc_odp_caps.receive))
+ if (MLX5_CAP_ODP_SCHEME(dev->mdev, rc_odp_caps.receive))
caps->per_transport_caps.rc_odp_caps |= IB_ODP_SUPPORT_RECV;
- if (MLX5_CAP_ODP(dev->mdev, rc_odp_caps.write))
+ if (MLX5_CAP_ODP_SCHEME(dev->mdev, rc_odp_caps.write))
caps->per_transport_caps.rc_odp_caps |= IB_ODP_SUPPORT_WRITE;
- if (MLX5_CAP_ODP(dev->mdev, rc_odp_caps.read))
+ if (MLX5_CAP_ODP_SCHEME(dev->mdev, rc_odp_caps.read))
caps->per_transport_caps.rc_odp_caps |= IB_ODP_SUPPORT_READ;
- if (MLX5_CAP_ODP(dev->mdev, rc_odp_caps.atomic))
+ if (MLX5_CAP_ODP_SCHEME(dev->mdev, rc_odp_caps.atomic))
caps->per_transport_caps.rc_odp_caps |= IB_ODP_SUPPORT_ATOMIC;
- if (MLX5_CAP_ODP(dev->mdev, rc_odp_caps.srq_receive))
+ if (MLX5_CAP_ODP_SCHEME(dev->mdev, rc_odp_caps.srq_receive))
caps->per_transport_caps.rc_odp_caps |= IB_ODP_SUPPORT_SRQ_RECV;
- if (MLX5_CAP_ODP(dev->mdev, xrc_odp_caps.send))
+ if (MLX5_CAP_ODP_SCHEME(dev->mdev, xrc_odp_caps.send))
caps->per_transport_caps.xrc_odp_caps |= IB_ODP_SUPPORT_SEND;
- if (MLX5_CAP_ODP(dev->mdev, xrc_odp_caps.receive))
+ if (MLX5_CAP_ODP_SCHEME(dev->mdev, xrc_odp_caps.receive))
caps->per_transport_caps.xrc_odp_caps |= IB_ODP_SUPPORT_RECV;
- if (MLX5_CAP_ODP(dev->mdev, xrc_odp_caps.write))
+ if (MLX5_CAP_ODP_SCHEME(dev->mdev, xrc_odp_caps.write))
caps->per_transport_caps.xrc_odp_caps |= IB_ODP_SUPPORT_WRITE;
- if (MLX5_CAP_ODP(dev->mdev, xrc_odp_caps.read))
+ if (MLX5_CAP_ODP_SCHEME(dev->mdev, xrc_odp_caps.read))
caps->per_transport_caps.xrc_odp_caps |= IB_ODP_SUPPORT_READ;
- if (MLX5_CAP_ODP(dev->mdev, xrc_odp_caps.atomic))
+ if (MLX5_CAP_ODP_SCHEME(dev->mdev, xrc_odp_caps.atomic))
caps->per_transport_caps.xrc_odp_caps |= IB_ODP_SUPPORT_ATOMIC;
- if (MLX5_CAP_ODP(dev->mdev, xrc_odp_caps.srq_receive))
+ if (MLX5_CAP_ODP_SCHEME(dev->mdev, xrc_odp_caps.srq_receive))
caps->per_transport_caps.xrc_odp_caps |= IB_ODP_SUPPORT_SRQ_RECV;
if (MLX5_CAP_GEN(dev->mdev, fixed_buffer_size) &&
@@ -388,13 +406,29 @@ static void mlx5_ib_page_fault_resume(struct mlx5_ib_dev *dev,
int wq_num = pfault->event_subtype == MLX5_PFAULT_SUBTYPE_WQE ?
pfault->wqe.wq_num : pfault->token;
u32 in[MLX5_ST_SZ_DW(page_fault_resume_in)] = {};
+ void *info;
int err;
MLX5_SET(page_fault_resume_in, in, opcode, MLX5_CMD_OP_PAGE_FAULT_RESUME);
- MLX5_SET(page_fault_resume_in, in, page_fault_type, pfault->type);
- MLX5_SET(page_fault_resume_in, in, token, pfault->token);
- MLX5_SET(page_fault_resume_in, in, wq_number, wq_num);
- MLX5_SET(page_fault_resume_in, in, error, !!error);
+
+ if (pfault->event_subtype == MLX5_PFAULT_SUBTYPE_MEMORY) {
+ info = MLX5_ADDR_OF(page_fault_resume_in, in,
+ page_fault_info.mem_page_fault_info);
+ MLX5_SET(mem_page_fault_info, info, fault_token_31_0,
+ pfault->token & 0xffffffff);
+ MLX5_SET(mem_page_fault_info, info, fault_token_47_32,
+ (pfault->token >> 32) & 0xffff);
+ MLX5_SET(mem_page_fault_info, info, error, !!error);
+ } else {
+ info = MLX5_ADDR_OF(page_fault_resume_in, in,
+ page_fault_info.trans_page_fault_info);
+ MLX5_SET(trans_page_fault_info, info, page_fault_type,
+ pfault->type);
+ MLX5_SET(trans_page_fault_info, info, fault_token,
+ pfault->token);
+ MLX5_SET(trans_page_fault_info, info, wq_number, wq_num);
+ MLX5_SET(trans_page_fault_info, info, error, !!error);
+ }
err = mlx5_cmd_exec_in(dev->mdev, page_fault_resume, in);
if (err)
@@ -468,6 +502,16 @@ static struct mlx5_ib_mr *implicit_get_child_mr(struct mlx5_ib_mr *imr,
}
xa_unlock(&imr->implicit_children);
+ if (MLX5_CAP_ODP(dev->mdev, mem_page_fault)) {
+ ret = xa_store(&dev->odp_mkeys, mlx5_base_mkey(mr->mmkey.key),
+ &mr->mmkey, GFP_KERNEL);
+ if (xa_is_err(ret)) {
+ ret = ERR_PTR(xa_err(ret));
+ xa_erase(&imr->implicit_children, idx);
+ goto out_mr;
+ }
+ mr->mmkey.type = MLX5_MKEY_IMPLICIT_CHILD;
+ }
mlx5_ib_dbg(mr_to_mdev(imr), "key %x mr %p\n", mr->mmkey.key, mr);
return mr;
@@ -478,6 +522,57 @@ out_mr:
return ret;
}
+/*
+ * When using memory scheme ODP, implicit MRs can't use the reserved null mkey
+ * and each implicit MR needs to assign a private null mkey to get the page
+ * faults on.
+ * The null mkey is created with the properties to enable getting the page
+ * fault for every time it is accessed and having all relevant access flags.
+ */
+static int alloc_implicit_mr_null_mkey(struct mlx5_ib_dev *dev,
+ struct mlx5_ib_mr *imr,
+ struct mlx5_ib_pd *pd)
+{
+ size_t inlen = MLX5_ST_SZ_BYTES(create_mkey_in) + 64;
+ void *mkc;
+ u32 *in;
+ int err;
+
+ in = kzalloc(inlen, GFP_KERNEL);
+ if (!in)
+ return -ENOMEM;
+
+ MLX5_SET(create_mkey_in, in, translations_octword_actual_size, 4);
+ MLX5_SET(create_mkey_in, in, pg_access, 1);
+
+ mkc = MLX5_ADDR_OF(create_mkey_in, in, memory_key_mkey_entry);
+ MLX5_SET(mkc, mkc, a, 1);
+ MLX5_SET(mkc, mkc, rw, 1);
+ MLX5_SET(mkc, mkc, rr, 1);
+ MLX5_SET(mkc, mkc, lw, 1);
+ MLX5_SET(mkc, mkc, lr, 1);
+ MLX5_SET(mkc, mkc, free, 0);
+ MLX5_SET(mkc, mkc, umr_en, 0);
+ MLX5_SET(mkc, mkc, access_mode_1_0, MLX5_MKC_ACCESS_MODE_MTT);
+
+ MLX5_SET(mkc, mkc, translations_octword_size, 4);
+ MLX5_SET(mkc, mkc, log_page_size, 61);
+ MLX5_SET(mkc, mkc, length64, 1);
+ MLX5_SET(mkc, mkc, pd, pd->pdn);
+ MLX5_SET64(mkc, mkc, start_addr, 0);
+ MLX5_SET(mkc, mkc, qpn, 0xffffff);
+
+ err = mlx5_core_create_mkey(dev->mdev, &imr->null_mmkey.key, in, inlen);
+ if (err)
+ goto free_in;
+
+ imr->null_mmkey.type = MLX5_MKEY_NULL;
+
+free_in:
+ kfree(in);
+ return err;
+}
+
struct mlx5_ib_mr *mlx5_ib_alloc_implicit_mr(struct mlx5_ib_pd *pd,
int access_flags)
{
@@ -510,6 +605,16 @@ struct mlx5_ib_mr *mlx5_ib_alloc_implicit_mr(struct mlx5_ib_pd *pd,
imr->is_odp_implicit = true;
xa_init(&imr->implicit_children);
+ if (MLX5_CAP_ODP(dev->mdev, mem_page_fault)) {
+ err = alloc_implicit_mr_null_mkey(dev, imr, pd);
+ if (err)
+ goto out_mr;
+
+ err = mlx5r_store_odp_mkey(dev, &imr->null_mmkey);
+ if (err)
+ goto out_mr;
+ }
+
err = mlx5r_umr_update_xlt(imr, 0,
mlx5_imr_ksm_entries,
MLX5_KSM_PAGE_SHIFT,
@@ -544,6 +649,14 @@ void mlx5_ib_free_odp_mr(struct mlx5_ib_mr *mr)
xa_erase(&mr->implicit_children, idx);
mlx5_ib_dereg_mr(&mtt->ibmr, NULL);
}
+
+ if (mr->null_mmkey.key) {
+ xa_erase(&mr_to_mdev(mr)->odp_mkeys,
+ mlx5_base_mkey(mr->null_mmkey.key));
+
+ mlx5_core_destroy_mkey(mr_to_mdev(mr)->mdev,
+ mr->null_mmkey.key);
+ }
}
#define MLX5_PF_FLAGS_DOWNGRADE BIT(1)
@@ -693,7 +806,7 @@ static int pagefault_dmabuf_mr(struct mlx5_ib_mr *mr, size_t bcnt,
struct ib_umem_dmabuf *umem_dmabuf = to_ib_umem_dmabuf(mr->umem);
u32 xlt_flags = 0;
int err;
- unsigned int page_size;
+ unsigned long page_size;
if (flags & MLX5_PF_FLAGS_ENABLE)
xlt_flags |= MLX5_IB_UPD_XLT_ENABLE;
@@ -710,7 +823,10 @@ static int pagefault_dmabuf_mr(struct mlx5_ib_mr *mr, size_t bcnt,
ib_umem_dmabuf_unmap_pages(umem_dmabuf);
err = -EINVAL;
} else {
- err = mlx5r_umr_update_mr_pas(mr, xlt_flags);
+ if (mr->data_direct)
+ err = mlx5r_umr_update_data_direct_ksm_pas(mr, xlt_flags);
+ else
+ err = mlx5r_umr_update_mr_pas(mr, xlt_flags);
}
dma_resv_unlock(umem_dmabuf->attach->dmabuf->resv);
@@ -733,24 +849,31 @@ static int pagefault_dmabuf_mr(struct mlx5_ib_mr *mr, size_t bcnt,
* >0: Number of pages mapped
*/
static int pagefault_mr(struct mlx5_ib_mr *mr, u64 io_virt, size_t bcnt,
- u32 *bytes_mapped, u32 flags)
+ u32 *bytes_mapped, u32 flags, bool permissive_fault)
{
struct ib_umem_odp *odp = to_ib_umem_odp(mr->umem);
- if (unlikely(io_virt < mr->ibmr.iova))
+ if (unlikely(io_virt < mr->ibmr.iova) && !permissive_fault)
return -EFAULT;
if (mr->umem->is_dmabuf)
return pagefault_dmabuf_mr(mr, bcnt, bytes_mapped, flags);
if (!odp->is_implicit_odp) {
+ u64 offset = io_virt < mr->ibmr.iova ? 0 : io_virt - mr->ibmr.iova;
u64 user_va;
- if (check_add_overflow(io_virt - mr->ibmr.iova,
- (u64)odp->umem.address, &user_va))
+ if (check_add_overflow(offset, (u64)odp->umem.address,
+ &user_va))
return -EFAULT;
- if (unlikely(user_va >= ib_umem_end(odp) ||
- ib_umem_end(odp) - user_va < bcnt))
+
+ if (permissive_fault) {
+ if (user_va < ib_umem_start(odp))
+ user_va = ib_umem_start(odp);
+ if ((user_va + bcnt) > ib_umem_end(odp))
+ bcnt = ib_umem_end(odp) - user_va;
+ } else if (unlikely(user_va >= ib_umem_end(odp) ||
+ ib_umem_end(odp) - user_va < bcnt))
return -EFAULT;
return pagefault_real_mr(mr, odp, user_va, bcnt, bytes_mapped,
flags);
@@ -797,6 +920,27 @@ static bool mkey_is_eq(struct mlx5_ib_mkey *mmkey, u32 key)
return mmkey->key == key;
}
+static struct mlx5_ib_mkey *find_odp_mkey(struct mlx5_ib_dev *dev, u32 key)
+{
+ struct mlx5_ib_mkey *mmkey;
+
+ xa_lock(&dev->odp_mkeys);
+ mmkey = xa_load(&dev->odp_mkeys, mlx5_base_mkey(key));
+ if (!mmkey) {
+ mmkey = ERR_PTR(-ENOENT);
+ goto out;
+ }
+ if (!mkey_is_eq(mmkey, key)) {
+ mmkey = ERR_PTR(-EFAULT);
+ goto out;
+ }
+ refcount_inc(&mmkey->usecount);
+out:
+ xa_unlock(&dev->odp_mkeys);
+
+ return mmkey;
+}
+
/*
* Handle a single data segment in a page-fault WQE or RDMA region.
*
@@ -824,32 +968,24 @@ static int pagefault_single_data_segment(struct mlx5_ib_dev *dev,
io_virt += *bytes_committed;
bcnt -= *bytes_committed;
-
next_mr:
- xa_lock(&dev->odp_mkeys);
- mmkey = xa_load(&dev->odp_mkeys, mlx5_base_mkey(key));
- if (!mmkey) {
- xa_unlock(&dev->odp_mkeys);
- mlx5_ib_dbg(
- dev,
- "skipping non ODP MR (lkey=0x%06x) in page fault handler.\n",
- key);
- if (bytes_mapped)
- *bytes_mapped += bcnt;
- /*
- * The user could specify a SGL with multiple lkeys and only
- * some of them are ODP. Treat the non-ODP ones as fully
- * faulted.
- */
- ret = 0;
- goto end;
- }
- refcount_inc(&mmkey->usecount);
- xa_unlock(&dev->odp_mkeys);
-
- if (!mkey_is_eq(mmkey, key)) {
- mlx5_ib_dbg(dev, "failed to find mkey %x\n", key);
- ret = -EFAULT;
+ mmkey = find_odp_mkey(dev, key);
+ if (IS_ERR(mmkey)) {
+ ret = PTR_ERR(mmkey);
+ if (ret == -ENOENT) {
+ mlx5_ib_dbg(
+ dev,
+ "skipping non ODP MR (lkey=0x%06x) in page fault handler.\n",
+ key);
+ if (bytes_mapped)
+ *bytes_mapped += bcnt;
+ /*
+ * The user could specify a SGL with multiple lkeys and
+ * only some of them are ODP. Treat the non-ODP ones as
+ * fully faulted.
+ */
+ ret = 0;
+ }
goto end;
}
@@ -857,7 +993,7 @@ next_mr:
case MLX5_MKEY_MR:
mr = container_of(mmkey, struct mlx5_ib_mr, mmkey);
- ret = pagefault_mr(mr, io_virt, bcnt, bytes_mapped, 0);
+ ret = pagefault_mr(mr, io_virt, bcnt, bytes_mapped, 0, false);
if (ret < 0)
goto end;
@@ -944,7 +1080,7 @@ next_mr:
}
end:
- if (mmkey)
+ if (!IS_ERR(mmkey))
mlx5r_deref_odp_mkey(mmkey);
while (head) {
frame = head;
@@ -1266,7 +1402,7 @@ read_user:
if (ret)
mlx5_ib_err(
dev,
- "Failed reading a WQE following page fault, error %d, wqe_index %x, qpn %x\n",
+ "Failed reading a WQE following page fault, error %d, wqe_index %x, qpn %llx\n",
ret, wqe_index, pfault->token);
resolve_page_fault:
@@ -1325,13 +1461,13 @@ static void mlx5_ib_mr_rdma_pfault_handler(struct mlx5_ib_dev *dev,
} else if (ret < 0 || pages_in_range(address, length) > ret) {
mlx5_ib_page_fault_resume(dev, pfault, 1);
if (ret != -ENOENT)
- mlx5_ib_dbg(dev, "PAGE FAULT error %d. QP 0x%x, type: 0x%x\n",
+ mlx5_ib_dbg(dev, "PAGE FAULT error %d. QP 0x%llx, type: 0x%x\n",
ret, pfault->token, pfault->type);
return;
}
mlx5_ib_page_fault_resume(dev, pfault, 0);
- mlx5_ib_dbg(dev, "PAGE FAULT completed. QP 0x%x, type: 0x%x, prefetch_activated: %d\n",
+ mlx5_ib_dbg(dev, "PAGE FAULT completed. QP 0x%llx, type: 0x%x, prefetch_activated: %d\n",
pfault->token, pfault->type,
prefetch_activated);
@@ -1347,12 +1483,80 @@ static void mlx5_ib_mr_rdma_pfault_handler(struct mlx5_ib_dev *dev,
prefetch_len,
&bytes_committed, NULL);
if (ret < 0 && ret != -EAGAIN) {
- mlx5_ib_dbg(dev, "Prefetch failed. ret: %d, QP 0x%x, address: 0x%.16llx, length = 0x%.16x\n",
+ mlx5_ib_dbg(dev, "Prefetch failed. ret: %d, QP 0x%llx, address: 0x%.16llx, length = 0x%.16x\n",
ret, pfault->token, address, prefetch_len);
}
}
}
+#define MLX5_MEMORY_PAGE_FAULT_FLAGS_LAST BIT(7)
+static void mlx5_ib_mr_memory_pfault_handler(struct mlx5_ib_dev *dev,
+ struct mlx5_pagefault *pfault)
+{
+ u64 prefetch_va =
+ pfault->memory.va - pfault->memory.prefetch_before_byte_count;
+ size_t prefetch_size = pfault->memory.prefetch_before_byte_count +
+ pfault->memory.fault_byte_count +
+ pfault->memory.prefetch_after_byte_count;
+ struct mlx5_ib_mkey *mmkey;
+ struct mlx5_ib_mr *mr, *child_mr;
+ int ret = 0;
+
+ mmkey = find_odp_mkey(dev, pfault->memory.mkey);
+ if (IS_ERR(mmkey))
+ goto err;
+
+ switch (mmkey->type) {
+ case MLX5_MKEY_IMPLICIT_CHILD:
+ child_mr = container_of(mmkey, struct mlx5_ib_mr, mmkey);
+ mr = child_mr->parent;
+ break;
+ case MLX5_MKEY_NULL:
+ mr = container_of(mmkey, struct mlx5_ib_mr, null_mmkey);
+ break;
+ default:
+ mr = container_of(mmkey, struct mlx5_ib_mr, mmkey);
+ break;
+ }
+
+ /* If prefetch fails, handle only demanded page fault */
+ ret = pagefault_mr(mr, prefetch_va, prefetch_size, NULL, 0, true);
+ if (ret < 0) {
+ ret = pagefault_mr(mr, pfault->memory.va,
+ pfault->memory.fault_byte_count, NULL, 0,
+ true);
+ if (ret < 0)
+ goto err;
+ }
+
+ mlx5_update_odp_stats(mr, faults, ret);
+ mlx5r_deref_odp_mkey(mmkey);
+
+ if (pfault->memory.flags & MLX5_MEMORY_PAGE_FAULT_FLAGS_LAST)
+ mlx5_ib_page_fault_resume(dev, pfault, 0);
+
+ mlx5_ib_dbg(
+ dev,
+ "PAGE FAULT completed %s. token 0x%llx, mkey: 0x%x, va: 0x%llx, byte_count: 0x%x\n",
+ pfault->memory.flags & MLX5_MEMORY_PAGE_FAULT_FLAGS_LAST ?
+ "" :
+ "without resume cmd",
+ pfault->token, pfault->memory.mkey, pfault->memory.va,
+ pfault->memory.fault_byte_count);
+
+ return;
+
+err:
+ if (!IS_ERR(mmkey))
+ mlx5r_deref_odp_mkey(mmkey);
+ mlx5_ib_page_fault_resume(dev, pfault, 1);
+ mlx5_ib_dbg(
+ dev,
+ "PAGE FAULT error. token 0x%llx, mkey: 0x%x, va: 0x%llx, byte_count: 0x%x, err: %d\n",
+ pfault->token, pfault->memory.mkey, pfault->memory.va,
+ pfault->memory.fault_byte_count, ret);
+}
+
static void mlx5_ib_pfault(struct mlx5_ib_dev *dev, struct mlx5_pagefault *pfault)
{
u8 event_subtype = pfault->event_subtype;
@@ -1364,6 +1568,9 @@ static void mlx5_ib_pfault(struct mlx5_ib_dev *dev, struct mlx5_pagefault *pfaul
case MLX5_PFAULT_SUBTYPE_RDMA:
mlx5_ib_mr_rdma_pfault_handler(dev, pfault);
break;
+ case MLX5_PFAULT_SUBTYPE_MEMORY:
+ mlx5_ib_mr_memory_pfault_handler(dev, pfault);
+ break;
default:
mlx5_ib_err(dev, "Invalid page fault event subtype: 0x%x\n",
event_subtype);
@@ -1382,6 +1589,7 @@ static void mlx5_ib_eqe_pf_action(struct work_struct *work)
mempool_free(pfault, eq->pool);
}
+#define MEMORY_SCHEME_PAGE_FAULT_GRANULARITY 4096
static void mlx5_ib_eq_pf_process(struct mlx5_ib_pf_eq *eq)
{
struct mlx5_eqe_page_fault *pf_eqe;
@@ -1398,15 +1606,12 @@ static void mlx5_ib_eq_pf_process(struct mlx5_ib_pf_eq *eq)
pf_eqe = &eqe->data.page_fault;
pfault->event_subtype = eqe->sub_type;
- pfault->bytes_committed = be32_to_cpu(pf_eqe->bytes_committed);
-
- mlx5_ib_dbg(eq->dev,
- "PAGE_FAULT: subtype: 0x%02x, bytes_committed: 0x%06x\n",
- eqe->sub_type, pfault->bytes_committed);
switch (eqe->sub_type) {
case MLX5_PFAULT_SUBTYPE_RDMA:
/* RDMA based event */
+ pfault->bytes_committed =
+ be32_to_cpu(pf_eqe->rdma.bytes_committed);
pfault->type =
be32_to_cpu(pf_eqe->rdma.pftype_token) >> 24;
pfault->token =
@@ -1420,10 +1625,12 @@ static void mlx5_ib_eq_pf_process(struct mlx5_ib_pf_eq *eq)
be32_to_cpu(pf_eqe->rdma.rdma_op_len);
pfault->rdma.rdma_va =
be64_to_cpu(pf_eqe->rdma.rdma_va);
- mlx5_ib_dbg(eq->dev,
- "PAGE_FAULT: type:0x%x, token: 0x%06x, r_key: 0x%08x\n",
- pfault->type, pfault->token,
- pfault->rdma.r_key);
+ mlx5_ib_dbg(
+ eq->dev,
+ "PAGE_FAULT: subtype: 0x%02x, bytes_committed: 0x%06x, type:0x%x, token: 0x%06llx, r_key: 0x%08x\n",
+ eqe->sub_type, pfault->bytes_committed,
+ pfault->type, pfault->token,
+ pfault->rdma.r_key);
mlx5_ib_dbg(eq->dev,
"PAGE_FAULT: rdma_op_len: 0x%08x, rdma_va: 0x%016llx\n",
pfault->rdma.rdma_op_len,
@@ -1432,6 +1639,8 @@ static void mlx5_ib_eq_pf_process(struct mlx5_ib_pf_eq *eq)
case MLX5_PFAULT_SUBTYPE_WQE:
/* WQE based event */
+ pfault->bytes_committed =
+ be32_to_cpu(pf_eqe->wqe.bytes_committed);
pfault->type =
(be32_to_cpu(pf_eqe->wqe.pftype_wq) >> 24) & 0x7;
pfault->token =
@@ -1443,11 +1652,47 @@ static void mlx5_ib_eq_pf_process(struct mlx5_ib_pf_eq *eq)
be16_to_cpu(pf_eqe->wqe.wqe_index);
pfault->wqe.packet_size =
be16_to_cpu(pf_eqe->wqe.packet_length);
- mlx5_ib_dbg(eq->dev,
- "PAGE_FAULT: type:0x%x, token: 0x%06x, wq_num: 0x%06x, wqe_index: 0x%04x\n",
- pfault->type, pfault->token,
- pfault->wqe.wq_num,
- pfault->wqe.wqe_index);
+ mlx5_ib_dbg(
+ eq->dev,
+ "PAGE_FAULT: subtype: 0x%02x, bytes_committed: 0x%06x, type:0x%x, token: 0x%06llx, wq_num: 0x%06x, wqe_index: 0x%04x\n",
+ eqe->sub_type, pfault->bytes_committed,
+ pfault->type, pfault->token, pfault->wqe.wq_num,
+ pfault->wqe.wqe_index);
+ break;
+
+ case MLX5_PFAULT_SUBTYPE_MEMORY:
+ /* Memory based event */
+ pfault->bytes_committed = 0;
+ pfault->token =
+ be32_to_cpu(pf_eqe->memory.token31_0) |
+ ((u64)be16_to_cpu(pf_eqe->memory.token47_32)
+ << 32);
+ pfault->memory.va = be64_to_cpu(pf_eqe->memory.va);
+ pfault->memory.mkey = be32_to_cpu(pf_eqe->memory.mkey);
+ pfault->memory.fault_byte_count = (be32_to_cpu(
+ pf_eqe->memory.demand_fault_pages) >> 12) *
+ MEMORY_SCHEME_PAGE_FAULT_GRANULARITY;
+ pfault->memory.prefetch_before_byte_count =
+ be16_to_cpu(
+ pf_eqe->memory.pre_demand_fault_pages) *
+ MEMORY_SCHEME_PAGE_FAULT_GRANULARITY;
+ pfault->memory.prefetch_after_byte_count =
+ be16_to_cpu(
+ pf_eqe->memory.post_demand_fault_pages) *
+ MEMORY_SCHEME_PAGE_FAULT_GRANULARITY;
+ pfault->memory.flags = pf_eqe->memory.flags;
+ mlx5_ib_dbg(
+ eq->dev,
+ "PAGE_FAULT: subtype: 0x%02x, token: 0x%06llx, mkey: 0x%06x, fault_byte_count: 0x%06x, va: 0x%016llx, flags: 0x%02x\n",
+ eqe->sub_type, pfault->token,
+ pfault->memory.mkey,
+ pfault->memory.fault_byte_count,
+ pfault->memory.va, pfault->memory.flags);
+ mlx5_ib_dbg(
+ eq->dev,
+ "PAGE_FAULT: prefetch size: before: 0x%06x, after 0x%06x\n",
+ pfault->memory.prefetch_before_byte_count,
+ pfault->memory.prefetch_after_byte_count);
break;
default:
@@ -1710,7 +1955,7 @@ static void mlx5_ib_prefetch_mr_work(struct work_struct *w)
for (i = 0; i < work->num_sge; ++i) {
ret = pagefault_mr(work->frags[i].mr, work->frags[i].io_virt,
work->frags[i].length, &bytes_mapped,
- work->pf_flags);
+ work->pf_flags, false);
if (ret <= 0)
continue;
mlx5_update_odp_stats(work->frags[i].mr, prefetch, ret);
@@ -1761,7 +2006,7 @@ static int mlx5_ib_prefetch_sg_list(struct ib_pd *pd,
if (IS_ERR(mr))
return PTR_ERR(mr);
ret = pagefault_mr(mr, sg_list[i].addr, sg_list[i].length,
- &bytes_mapped, pf_flags);
+ &bytes_mapped, pf_flags, false);
if (ret < 0) {
mlx5r_deref_odp_mkey(&mr->mmkey);
return ret;
diff --git a/drivers/infiniband/hw/mlx5/std_types.c b/drivers/infiniband/hw/mlx5/std_types.c
index bbfcce3bdc84..bdb568411091 100644
--- a/drivers/infiniband/hw/mlx5/std_types.c
+++ b/drivers/infiniband/hw/mlx5/std_types.c
@@ -10,6 +10,7 @@
#include <linux/mlx5/eswitch.h>
#include <linux/mlx5/vport.h>
#include "mlx5_ib.h"
+#include "data_direct.h"
#define UVERBS_MODULE_NAME mlx5_ib
#include <rdma/uverbs_named_ioctl.h>
@@ -111,6 +112,23 @@ out:
return err;
}
+static int fill_multiport_info(struct mlx5_ib_dev *dev, u32 port_num,
+ struct mlx5_ib_uapi_query_port *info)
+{
+ struct mlx5_core_dev *mdev;
+
+ mdev = mlx5_ib_get_native_port_mdev(dev, port_num, NULL);
+ if (!mdev)
+ return -EINVAL;
+
+ info->vport_vhca_id = MLX5_CAP_GEN(mdev, vhca_id);
+ info->flags |= MLX5_IB_UAPI_QUERY_PORT_VPORT_VHCA_ID;
+
+ mlx5_ib_put_native_port_mdev(dev, port_num);
+
+ return 0;
+}
+
static int fill_switchdev_info(struct mlx5_ib_dev *dev, u32 port_num,
struct mlx5_ib_uapi_query_port *info)
{
@@ -177,12 +195,60 @@ static int UVERBS_HANDLER(MLX5_IB_METHOD_QUERY_PORT)(
ret = fill_switchdev_info(dev, port_num, &info);
if (ret)
return ret;
+ } else if (mlx5_core_mp_enabled(dev->mdev)) {
+ ret = fill_multiport_info(dev, port_num, &info);
+ if (ret)
+ return ret;
}
return uverbs_copy_to_struct_or_zero(attrs, MLX5_IB_ATTR_QUERY_PORT, &info,
sizeof(info));
}
+static int UVERBS_HANDLER(MLX5_IB_METHOD_GET_DATA_DIRECT_SYSFS_PATH)(
+ struct uverbs_attr_bundle *attrs)
+{
+ struct mlx5_data_direct_dev *data_direct_dev;
+ struct mlx5_ib_ucontext *c;
+ struct mlx5_ib_dev *dev;
+ int out_len = uverbs_attr_get_len(attrs,
+ MLX5_IB_ATTR_GET_DATA_DIRECT_SYSFS_PATH);
+ u32 dev_path_len;
+ char *dev_path;
+ int ret;
+
+ c = to_mucontext(ib_uverbs_get_ucontext(attrs));
+ if (IS_ERR(c))
+ return PTR_ERR(c);
+ dev = to_mdev(c->ibucontext.device);
+ mutex_lock(&dev->data_direct_lock);
+ data_direct_dev = dev->data_direct_dev;
+ if (!data_direct_dev) {
+ ret = -ENODEV;
+ goto end;
+ }
+
+ dev_path = kobject_get_path(&data_direct_dev->device->kobj, GFP_KERNEL);
+ if (!dev_path) {
+ ret = -ENOMEM;
+ goto end;
+ }
+
+ dev_path_len = strlen(dev_path) + 1;
+ if (dev_path_len > out_len) {
+ ret = -ENOSPC;
+ goto end;
+ }
+
+ ret = uverbs_copy_to(attrs, MLX5_IB_ATTR_GET_DATA_DIRECT_SYSFS_PATH, dev_path,
+ dev_path_len);
+ kfree(dev_path);
+
+end:
+ mutex_unlock(&dev->data_direct_lock);
+ return ret;
+}
+
DECLARE_UVERBS_NAMED_METHOD(
MLX5_IB_METHOD_QUERY_PORT,
UVERBS_ATTR_PTR_IN(MLX5_IB_ATTR_QUERY_PORT_PORT_NUM,
@@ -193,9 +259,17 @@ DECLARE_UVERBS_NAMED_METHOD(
reg_c0),
UA_MANDATORY));
+DECLARE_UVERBS_NAMED_METHOD(
+ MLX5_IB_METHOD_GET_DATA_DIRECT_SYSFS_PATH,
+ UVERBS_ATTR_PTR_OUT(
+ MLX5_IB_ATTR_GET_DATA_DIRECT_SYSFS_PATH,
+ UVERBS_ATTR_MIN_SIZE(0),
+ UA_MANDATORY));
+
ADD_UVERBS_METHODS(mlx5_ib_device,
UVERBS_OBJECT_DEVICE,
- &UVERBS_METHOD(MLX5_IB_METHOD_QUERY_PORT));
+ &UVERBS_METHOD(MLX5_IB_METHOD_QUERY_PORT),
+ &UVERBS_METHOD(MLX5_IB_METHOD_GET_DATA_DIRECT_SYSFS_PATH));
DECLARE_UVERBS_NAMED_METHOD(
MLX5_IB_METHOD_PD_QUERY,
diff --git a/drivers/infiniband/hw/mlx5/umr.c b/drivers/infiniband/hw/mlx5/umr.c
index ffc31b01f690..887fd6fa3ba9 100644
--- a/drivers/infiniband/hw/mlx5/umr.c
+++ b/drivers/infiniband/hw/mlx5/umr.c
@@ -224,6 +224,9 @@ int mlx5r_umr_init(struct mlx5_ib_dev *dev)
void mlx5r_umr_cleanup(struct mlx5_ib_dev *dev)
{
+ if (!dev->umrc.pd)
+ return;
+
mutex_destroy(&dev->umrc.init_lock);
ib_dealloc_pd(dev->umrc.pd);
}
@@ -632,44 +635,47 @@ static void mlx5r_umr_final_update_xlt(struct mlx5_ib_dev *dev,
wqe->data_seg.byte_count = cpu_to_be32(sg->length);
}
-/*
- * Send the DMA list to the HW for a normal MR using UMR.
- * Dmabuf MR is handled in a similar way, except that the MLX5_IB_UPD_XLT_ZAP
- * flag may be used.
- */
-int mlx5r_umr_update_mr_pas(struct mlx5_ib_mr *mr, unsigned int flags)
+static int
+_mlx5r_umr_update_mr_pas(struct mlx5_ib_mr *mr, unsigned int flags, bool dd)
{
+ size_t ent_size = dd ? sizeof(struct mlx5_ksm) : sizeof(struct mlx5_mtt);
struct mlx5_ib_dev *dev = mr_to_mdev(mr);
struct device *ddev = &dev->mdev->pdev->dev;
struct mlx5r_umr_wqe wqe = {};
struct ib_block_iter biter;
+ struct mlx5_ksm *cur_ksm;
struct mlx5_mtt *cur_mtt;
size_t orig_sg_length;
- struct mlx5_mtt *mtt;
size_t final_size;
+ void *curr_entry;
struct ib_sge sg;
+ void *entry;
u64 offset = 0;
int err = 0;
- if (WARN_ON(mr->umem->is_odp))
- return -EINVAL;
-
- mtt = mlx5r_umr_create_xlt(
- dev, &sg, ib_umem_num_dma_blocks(mr->umem, 1 << mr->page_shift),
- sizeof(*mtt), flags);
- if (!mtt)
+ entry = mlx5r_umr_create_xlt(dev, &sg,
+ ib_umem_num_dma_blocks(mr->umem, 1 << mr->page_shift),
+ ent_size, flags);
+ if (!entry)
return -ENOMEM;
orig_sg_length = sg.length;
-
mlx5r_umr_set_update_xlt_ctrl_seg(&wqe.ctrl_seg, flags, &sg);
mlx5r_umr_set_update_xlt_mkey_seg(dev, &wqe.mkey_seg, mr,
mr->page_shift);
+ if (dd) {
+ /* Use the data direct internal kernel PD */
+ MLX5_SET(mkc, &wqe.mkey_seg, pd, dev->ddr.pdn);
+ cur_ksm = entry;
+ } else {
+ cur_mtt = entry;
+ }
+
mlx5r_umr_set_update_xlt_data_seg(&wqe.data_seg, &sg);
- cur_mtt = mtt;
+ curr_entry = entry;
rdma_umem_for_each_dma_block(mr->umem, &biter, BIT(mr->page_shift)) {
- if (cur_mtt == (void *)mtt + sg.length) {
+ if (curr_entry == entry + sg.length) {
dma_sync_single_for_device(ddev, sg.addr, sg.length,
DMA_TO_DEVICE);
@@ -681,23 +687,31 @@ int mlx5r_umr_update_mr_pas(struct mlx5_ib_mr *mr, unsigned int flags)
DMA_TO_DEVICE);
offset += sg.length;
mlx5r_umr_update_offset(&wqe.ctrl_seg, offset);
-
- cur_mtt = mtt;
+ if (dd)
+ cur_ksm = entry;
+ else
+ cur_mtt = entry;
}
- cur_mtt->ptag =
- cpu_to_be64(rdma_block_iter_dma_address(&biter) |
- MLX5_IB_MTT_PRESENT);
-
- if (mr->umem->is_dmabuf && (flags & MLX5_IB_UPD_XLT_ZAP))
- cur_mtt->ptag = 0;
-
- cur_mtt++;
+ if (dd) {
+ cur_ksm->va = cpu_to_be64(rdma_block_iter_dma_address(&biter));
+ cur_ksm->key = cpu_to_be32(dev->ddr.mkey);
+ cur_ksm++;
+ curr_entry = cur_ksm;
+ } else {
+ cur_mtt->ptag =
+ cpu_to_be64(rdma_block_iter_dma_address(&biter) |
+ MLX5_IB_MTT_PRESENT);
+ if (mr->umem->is_dmabuf && (flags & MLX5_IB_UPD_XLT_ZAP))
+ cur_mtt->ptag = 0;
+ cur_mtt++;
+ curr_entry = cur_mtt;
+ }
}
- final_size = (void *)cur_mtt - (void *)mtt;
+ final_size = curr_entry - entry;
sg.length = ALIGN(final_size, MLX5_UMR_FLEX_ALIGNMENT);
- memset(cur_mtt, 0, sg.length - final_size);
+ memset(curr_entry, 0, sg.length - final_size);
mlx5r_umr_final_update_xlt(dev, &wqe, mr, &sg, flags);
dma_sync_single_for_device(ddev, sg.addr, sg.length, DMA_TO_DEVICE);
@@ -705,10 +719,32 @@ int mlx5r_umr_update_mr_pas(struct mlx5_ib_mr *mr, unsigned int flags)
err:
sg.length = orig_sg_length;
- mlx5r_umr_unmap_free_xlt(dev, mtt, &sg);
+ mlx5r_umr_unmap_free_xlt(dev, entry, &sg);
return err;
}
+int mlx5r_umr_update_data_direct_ksm_pas(struct mlx5_ib_mr *mr, unsigned int flags)
+{
+ /* No invalidation flow is expected */
+ if (WARN_ON(!mr->umem->is_dmabuf) || (flags & MLX5_IB_UPD_XLT_ZAP))
+ return -EINVAL;
+
+ return _mlx5r_umr_update_mr_pas(mr, flags, true);
+}
+
+/*
+ * Send the DMA list to the HW for a normal MR using UMR.
+ * Dmabuf MR is handled in a similar way, except that the MLX5_IB_UPD_XLT_ZAP
+ * flag may be used.
+ */
+int mlx5r_umr_update_mr_pas(struct mlx5_ib_mr *mr, unsigned int flags)
+{
+ if (WARN_ON(mr->umem->is_odp))
+ return -EINVAL;
+
+ return _mlx5r_umr_update_mr_pas(mr, flags, false);
+}
+
static bool umr_can_use_indirect_mkey(struct mlx5_ib_dev *dev)
{
return !MLX5_CAP_GEN(dev->mdev, umr_indirect_mkey_disabled);
diff --git a/drivers/infiniband/hw/mlx5/umr.h b/drivers/infiniband/hw/mlx5/umr.h
index 5f734dc72bef..4a02c9b5aad8 100644
--- a/drivers/infiniband/hw/mlx5/umr.h
+++ b/drivers/infiniband/hw/mlx5/umr.h
@@ -95,6 +95,7 @@ int mlx5r_umr_revoke_mr(struct mlx5_ib_mr *mr);
int mlx5r_umr_rereg_pd_access(struct mlx5_ib_mr *mr, struct ib_pd *pd,
int access_flags);
int mlx5r_umr_update_mr_pas(struct mlx5_ib_mr *mr, unsigned int flags);
+int mlx5r_umr_update_data_direct_ksm_pas(struct mlx5_ib_mr *mr, unsigned int flags);
int mlx5r_umr_update_xlt(struct mlx5_ib_mr *mr, u64 idx, int npages,
int page_shift, int flags);
diff --git a/drivers/infiniband/hw/qib/qib_init.c b/drivers/infiniband/hw/qib/qib_init.c
index db3b25c8433a..4100656fe9a3 100644
--- a/drivers/infiniband/hw/qib/qib_init.c
+++ b/drivers/infiniband/hw/qib/qib_init.c
@@ -581,12 +581,9 @@ static int qib_create_workqueues(struct qib_devdata *dd)
for (pidx = 0; pidx < dd->num_pports; ++pidx) {
ppd = dd->pport + pidx;
if (!ppd->qib_wq) {
- char wq_name[23];
-
- snprintf(wq_name, sizeof(wq_name), "qib%d_%d",
- dd->unit, pidx);
- ppd->qib_wq = alloc_ordered_workqueue(wq_name,
- WQ_MEM_RECLAIM);
+ ppd->qib_wq = alloc_ordered_workqueue("qib%d_%d",
+ WQ_MEM_RECLAIM,
+ dd->unit, pidx);
if (!ppd->qib_wq)
goto wq_error;
}
diff --git a/drivers/infiniband/hw/qib/qib_verbs.h b/drivers/infiniband/hw/qib/qib_verbs.h
index 07548fac1d8e..408fe1ba74b9 100644
--- a/drivers/infiniband/hw/qib/qib_verbs.h
+++ b/drivers/infiniband/hw/qib/qib_verbs.h
@@ -303,8 +303,6 @@ int qib_check_send_wqe(struct rvt_qp *qp, struct rvt_swqe *wqe,
struct ib_ah *qib_create_qp0_ah(struct qib_ibport *ibp, u16 dlid);
-void qib_rc_rnr_retry(unsigned long arg);
-
void qib_rc_send_complete(struct rvt_qp *qp, struct ib_header *hdr);
int qib_post_ud_send(struct rvt_qp *qp, const struct ib_send_wr *wr);
@@ -312,8 +310,6 @@ int qib_post_ud_send(struct rvt_qp *qp, const struct ib_send_wr *wr);
void qib_ud_rcv(struct qib_ibport *ibp, struct ib_header *hdr,
int has_grh, void *data, u32 tlen, struct rvt_qp *qp);
-void mr_rcu_callback(struct rcu_head *list);
-
void qib_migrate_qp(struct rvt_qp *qp);
int qib_ruc_check_hdr(struct qib_ibport *ibp, struct ib_header *hdr,
diff --git a/drivers/infiniband/sw/rdmavt/mr.c b/drivers/infiniband/sw/rdmavt/mr.c
index 7a9afd5231d5..5ed5cfc2b280 100644
--- a/drivers/infiniband/sw/rdmavt/mr.c
+++ b/drivers/infiniband/sw/rdmavt/mr.c
@@ -348,13 +348,13 @@ struct ib_mr *rvt_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
umem = ib_umem_get(pd->device, start, length, mr_access_flags);
if (IS_ERR(umem))
- return (void *)umem;
+ return ERR_CAST(umem);
n = ib_umem_num_pages(umem);
mr = __rvt_alloc_mr(n, pd);
if (IS_ERR(mr)) {
- ret = (struct ib_mr *)mr;
+ ret = ERR_CAST(mr);
goto bail_umem;
}
@@ -542,7 +542,7 @@ struct ib_mr *rvt_alloc_mr(struct ib_pd *pd, enum ib_mr_type mr_type,
mr = __rvt_alloc_mr(max_num_sg, pd);
if (IS_ERR(mr))
- return (struct ib_mr *)mr;
+ return ERR_CAST(mr);
return &mr->ibmr;
}
diff --git a/drivers/infiniband/sw/rxe/rxe_hdr.h b/drivers/infiniband/sw/rxe/rxe_hdr.h
index 46f82b27fcd2..1f0322491d8c 100644
--- a/drivers/infiniband/sw/rxe/rxe_hdr.h
+++ b/drivers/infiniband/sw/rxe/rxe_hdr.h
@@ -234,7 +234,7 @@ static inline void __bth_set_resv6a(void *arg)
{
struct rxe_bth *bth = arg;
- bth->qpn = cpu_to_be32(~BTH_RESV6A_MASK);
+ bth->qpn &= cpu_to_be32(~BTH_RESV6A_MASK);
}
static inline int __bth_ack(void *arg)
diff --git a/drivers/infiniband/sw/rxe/rxe_resp.c b/drivers/infiniband/sw/rxe/rxe_resp.c
index 6596a85723c9..c11ab280551a 100644
--- a/drivers/infiniband/sw/rxe/rxe_resp.c
+++ b/drivers/infiniband/sw/rxe/rxe_resp.c
@@ -341,7 +341,7 @@ static enum resp_states rxe_resp_check_length(struct rxe_qp *qp,
/*
* See IBA C9-92
* For UD QPs we only check if the packet will fit in the
- * receive buffer later. For rmda operations additional
+ * receive buffer later. For RDMA operations additional
* length checks are performed in check_rkey.
*/
if ((qp_type(qp) == IB_QPT_GSI) || (qp_type(qp) == IB_QPT_UD)) {
@@ -351,7 +351,7 @@ static enum resp_states rxe_resp_check_length(struct rxe_qp *qp,
for (i = 0; i < qp->resp.wqe->dma.num_sge; i++)
recv_buffer_len += qp->resp.wqe->dma.sge[i].length;
- if (payload + 40 > recv_buffer_len) {
+ if (payload + sizeof(union rdma_network_hdr) > recv_buffer_len) {
rxe_dbg_qp(qp, "The receive buffer is too small for this UD packet.\n");
return RESPST_ERR_LENGTH;
}
diff --git a/drivers/infiniband/sw/siw/siw.h b/drivers/infiniband/sw/siw/siw.h
index 75253f2b3e3d..86d4d6a2170e 100644
--- a/drivers/infiniband/sw/siw/siw.h
+++ b/drivers/infiniband/sw/siw/siw.h
@@ -94,8 +94,6 @@ struct siw_device {
atomic_t num_mr;
atomic_t num_srq;
atomic_t num_ctx;
-
- struct work_struct netdev_down;
};
struct siw_ucontext {
diff --git a/drivers/infiniband/sw/siw/siw_main.c b/drivers/infiniband/sw/siw/siw_main.c
index b2b54242aa69..17abef48abcd 100644
--- a/drivers/infiniband/sw/siw/siw_main.c
+++ b/drivers/infiniband/sw/siw/siw_main.c
@@ -364,39 +364,6 @@ error:
return NULL;
}
-/*
- * Network link becomes unavailable. Mark all
- * affected QP's accordingly.
- */
-static void siw_netdev_down(struct work_struct *work)
-{
- struct siw_device *sdev =
- container_of(work, struct siw_device, netdev_down);
-
- struct siw_qp_attrs qp_attrs;
- struct list_head *pos, *tmp;
-
- memset(&qp_attrs, 0, sizeof(qp_attrs));
- qp_attrs.state = SIW_QP_STATE_ERROR;
-
- list_for_each_safe(pos, tmp, &sdev->qp_list) {
- struct siw_qp *qp = list_entry(pos, struct siw_qp, devq);
-
- down_write(&qp->state_lock);
- WARN_ON(siw_qp_modify(qp, &qp_attrs, SIW_QP_ATTR_STATE));
- up_write(&qp->state_lock);
- }
- ib_device_put(&sdev->base_dev);
-}
-
-static void siw_device_goes_down(struct siw_device *sdev)
-{
- if (ib_device_try_get(&sdev->base_dev)) {
- INIT_WORK(&sdev->netdev_down, siw_netdev_down);
- schedule_work(&sdev->netdev_down);
- }
-}
-
static int siw_netdev_event(struct notifier_block *nb, unsigned long event,
void *arg)
{
@@ -418,10 +385,6 @@ static int siw_netdev_event(struct notifier_block *nb, unsigned long event,
siw_port_event(sdev, 1, IB_EVENT_PORT_ACTIVE);
break;
- case NETDEV_GOING_DOWN:
- siw_device_goes_down(sdev);
- break;
-
case NETDEV_DOWN:
sdev->state = IB_PORT_DOWN;
siw_port_event(sdev, 1, IB_EVENT_PORT_ERR);
diff --git a/drivers/infiniband/ulp/ipoib/ipoib.h b/drivers/infiniband/ulp/ipoib/ipoib.h
index 963e936da5e3..abe0522b7df4 100644
--- a/drivers/infiniband/ulp/ipoib/ipoib.h
+++ b/drivers/infiniband/ulp/ipoib/ipoib.h
@@ -509,12 +509,10 @@ struct net_device *ipoib_intf_alloc(struct ib_device *hca, u32 port,
const char *format);
int ipoib_intf_init(struct ib_device *hca, u32 port, const char *format,
struct net_device *dev);
-void ipoib_ib_tx_timer_func(struct timer_list *t);
void ipoib_ib_dev_flush_light(struct work_struct *work);
void ipoib_ib_dev_flush_normal(struct work_struct *work);
void ipoib_ib_dev_flush_heavy(struct work_struct *work);
void ipoib_ib_tx_timeout_work(struct work_struct *work);
-void ipoib_pkey_event(struct work_struct *work);
void ipoib_ib_dev_cleanup(struct net_device *dev);
int ipoib_ib_dev_open_default(struct net_device *dev);
@@ -533,7 +531,6 @@ void ipoib_mcast_restart_task(struct work_struct *work);
void ipoib_mcast_start_thread(struct net_device *dev);
void ipoib_mcast_stop_thread(struct net_device *dev);
-void ipoib_mcast_dev_down(struct net_device *dev);
void ipoib_mcast_dev_flush(struct net_device *dev);
int ipoib_dma_map_tx(struct ib_device *ca, struct ipoib_tx_buf *tx_req);
@@ -610,7 +607,6 @@ int ipoib_set_mode(struct net_device *dev, const char *buf);
void ipoib_setup_common(struct net_device *dev);
-void ipoib_pkey_open(struct ipoib_dev_priv *priv);
void ipoib_drain_cq(struct net_device *dev);
void ipoib_set_ethtool_ops(struct net_device *dev);
diff --git a/drivers/infiniband/ulp/iser/iscsi_iser.h b/drivers/infiniband/ulp/iser/iscsi_iser.h
index 68429a5f796d..1d7ac24c4c00 100644
--- a/drivers/infiniband/ulp/iser/iscsi_iser.h
+++ b/drivers/infiniband/ulp/iser/iscsi_iser.h
@@ -507,10 +507,6 @@ void iser_task_rdma_finalize(struct iscsi_iser_task *task);
void iser_free_rx_descriptors(struct iser_conn *iser_conn);
-void iser_finalize_rdma_unaligned_sg(struct iscsi_iser_task *iser_task,
- struct iser_data_buf *mem,
- enum iser_data_dir cmd_dir);
-
int iser_reg_mem_fastreg(struct iscsi_iser_task *task,
enum iser_data_dir dir,
bool all_imm);
diff --git a/drivers/infiniband/ulp/rtrs/rtrs-clt.c b/drivers/infiniband/ulp/rtrs/rtrs-clt.c
index 88106cf5ce55..71387811b281 100644
--- a/drivers/infiniband/ulp/rtrs/rtrs-clt.c
+++ b/drivers/infiniband/ulp/rtrs/rtrs-clt.c
@@ -331,7 +331,7 @@ static void rtrs_clt_fast_reg_done(struct ib_cq *cq, struct ib_wc *wc)
struct rtrs_clt_con *con = to_clt_con(wc->qp->qp_context);
if (wc->status != IB_WC_SUCCESS) {
- rtrs_err(con->c.path, "Failed IB_WR_REG_MR: %s\n",
+ rtrs_err_rl(con->c.path, "Failed IB_WR_REG_MR: %s\n",
ib_wc_status_msg(wc->status));
rtrs_rdma_error_recovery(con);
}
@@ -351,11 +351,11 @@ static void rtrs_clt_inv_rkey_done(struct ib_cq *cq, struct ib_wc *wc)
struct rtrs_clt_con *con = to_clt_con(wc->qp->qp_context);
if (wc->status != IB_WC_SUCCESS) {
- rtrs_err(con->c.path, "Failed IB_WR_LOCAL_INV: %s\n",
+ rtrs_err_rl(con->c.path, "Failed IB_WR_LOCAL_INV: %s\n",
ib_wc_status_msg(wc->status));
rtrs_rdma_error_recovery(con);
}
- req->need_inv = false;
+ req->mr->need_inval = false;
if (req->need_inv_comp)
complete(&req->inv_comp);
else
@@ -391,12 +391,13 @@ static void complete_rdma_req(struct rtrs_clt_io_req *req, int errno,
clt_path = to_clt_path(con->c.path);
if (req->sg_cnt) {
- if (req->dir == DMA_FROM_DEVICE && req->need_inv) {
+ if (req->mr->need_inval) {
/*
- * We are here to invalidate read requests
+ * We are here to invalidate read/write requests
* ourselves. In normal scenario server should
- * send INV for all read requests, but
- * we are here, thus two things could happen:
+ * send INV for all read requests, we do local
+ * invalidate for write requests ourselves, but
+ * we are here, thus three things could happen:
*
* 1. this is failover, when errno != 0
* and can_wait == 1,
@@ -404,6 +405,9 @@ static void complete_rdma_req(struct rtrs_clt_io_req *req, int errno,
* 2. something totally bad happened and
* server forgot to send INV, so we
* should do that ourselves.
+ *
+ * 3. write request finishes, we need to do local
+ * invalidate
*/
if (can_wait) {
@@ -418,18 +422,10 @@ static void complete_rdma_req(struct rtrs_clt_io_req *req, int errno,
refcount_inc(&req->ref);
err = rtrs_inv_rkey(req);
if (err) {
- rtrs_err(con->c.path, "Send INV WR key=%#x: %d\n",
+ rtrs_err_rl(con->c.path, "Send INV WR key=%#x: %d\n",
req->mr->rkey, err);
} else if (can_wait) {
wait_for_completion(&req->inv_comp);
- } else {
- /*
- * Something went wrong, so request will be
- * completed from INV callback.
- */
- WARN_ON_ONCE(1);
-
- return;
}
if (!refcount_dec_and_test(&req->ref))
return;
@@ -446,8 +442,10 @@ static void complete_rdma_req(struct rtrs_clt_io_req *req, int errno,
req->con = NULL;
if (errno) {
- rtrs_err_rl(con->c.path, "IO request failed: error=%d path=%s [%s:%u] notify=%d\n",
- errno, kobject_name(&clt_path->kobj), clt_path->hca_name,
+ rtrs_err_rl(con->c.path,
+ "IO %s request failed: error=%d path=%s [%s:%u] notify=%d\n",
+ req->dir == DMA_TO_DEVICE ? "write" : "read", errno,
+ kobject_name(&clt_path->kobj), clt_path->hca_name,
clt_path->hca_port, notify);
}
@@ -501,7 +499,7 @@ static void process_io_rsp(struct rtrs_clt_path *clt_path, u32 msg_id,
req = &clt_path->reqs[msg_id];
/* Drop need_inv if server responded with send with invalidation */
- req->need_inv &= !w_inval;
+ req->mr->need_inval &= !w_inval;
complete_rdma_req(req, errno, true, false);
}
@@ -626,6 +624,7 @@ static void rtrs_clt_rdma_done(struct ib_cq *cq, struct ib_wc *wc)
*/
if (WARN_ON(wc->wr_cqe->done != rtrs_clt_rdma_done))
return;
+ clt_path->s.hb_missed_cnt = 0;
rtrs_from_imm(be32_to_cpu(wc->ex.imm_data),
&imm_type, &imm_payload);
if (imm_type == RTRS_IO_RSP_IMM ||
@@ -643,7 +642,6 @@ static void rtrs_clt_rdma_done(struct ib_cq *cq, struct ib_wc *wc)
return rtrs_clt_recv_done(con, wc);
} else if (imm_type == RTRS_HB_ACK_IMM) {
WARN_ON(con->c.cid);
- clt_path->s.hb_missed_cnt = 0;
clt_path->s.hb_cur_latency =
ktime_sub(ktime_get(), clt_path->s.hb_last_sent);
if (clt_path->flags & RTRS_MSG_NEW_RKEY_F)
@@ -670,6 +668,7 @@ static void rtrs_clt_rdma_done(struct ib_cq *cq, struct ib_wc *wc)
/*
* Key invalidations from server side
*/
+ clt_path->s.hb_missed_cnt = 0;
WARN_ON(!(wc->wc_flags & IB_WC_WITH_INVALIDATE ||
wc->wc_flags & IB_WC_WITH_IMM));
WARN_ON(wc->wr_cqe->done != rtrs_clt_rdma_done);
@@ -967,7 +966,7 @@ static void rtrs_clt_init_req(struct rtrs_clt_io_req *req,
req->dir = dir;
req->con = rtrs_permit_to_clt_con(clt_path, permit);
req->conf = conf;
- req->need_inv = false;
+ req->mr->need_inval = false;
req->need_inv_comp = false;
req->inv_errno = 0;
refcount_set(&req->ref, 1);
@@ -1089,7 +1088,6 @@ static int rtrs_clt_write_req(struct rtrs_clt_io_req *req)
int ret, count = 0;
u32 imm, buf_id;
struct ib_reg_wr rwr;
- struct ib_send_wr inv_wr;
struct ib_send_wr *wr = NULL;
bool fr_en = false;
@@ -1130,13 +1128,6 @@ static int rtrs_clt_write_req(struct rtrs_clt_io_req *req)
req->sg_cnt, req->dir);
return ret;
}
- inv_wr = (struct ib_send_wr) {
- .opcode = IB_WR_LOCAL_INV,
- .wr_cqe = &req->inv_cqe,
- .send_flags = IB_SEND_SIGNALED,
- .ex.invalidate_rkey = req->mr->rkey,
- };
- req->inv_cqe.done = rtrs_clt_inv_rkey_done;
rwr = (struct ib_reg_wr) {
.wr.opcode = IB_WR_REG_MR,
.wr.wr_cqe = &fast_reg_cqe,
@@ -1146,7 +1137,7 @@ static int rtrs_clt_write_req(struct rtrs_clt_io_req *req)
};
wr = &rwr.wr;
fr_en = true;
- refcount_inc(&req->ref);
+ req->mr->need_inval = true;
}
/*
* Update stats now, after request is successfully sent it is not
@@ -1156,7 +1147,7 @@ static int rtrs_clt_write_req(struct rtrs_clt_io_req *req)
ret = rtrs_post_rdma_write_sg(req->con, req, rbuf, fr_en, count,
req->usr_len + sizeof(*msg),
- imm, wr, &inv_wr);
+ imm, wr, NULL);
if (ret) {
rtrs_err_rl(s,
"Write request failed: error=%d path=%s [%s:%u]\n",
@@ -1164,6 +1155,10 @@ static int rtrs_clt_write_req(struct rtrs_clt_io_req *req)
clt_path->hca_port);
if (req->mp_policy == MP_POLICY_MIN_INFLIGHT)
atomic_dec(&clt_path->stats->inflight);
+ if (req->mr->need_inval) {
+ req->mr->need_inval = false;
+ refcount_dec(&req->ref);
+ }
if (req->sg_cnt)
ib_dma_unmap_sg(clt_path->s.dev->ib_dev, req->sglist,
req->sg_cnt, req->dir);
@@ -1213,7 +1208,7 @@ static int rtrs_clt_read_req(struct rtrs_clt_io_req *req)
ret = rtrs_map_sg_fr(req, count);
if (ret < 0) {
rtrs_err_rl(s,
- "Read request failed, failed to map fast reg. data, err: %d\n",
+ "Read request failed, failed to map fast reg. data, err: %d\n",
ret);
ib_dma_unmap_sg(dev->ib_dev, req->sglist, req->sg_cnt,
req->dir);
@@ -1237,7 +1232,7 @@ static int rtrs_clt_read_req(struct rtrs_clt_io_req *req)
msg->desc[0].len = cpu_to_le32(req->mr->length);
/* Further invalidation is required */
- req->need_inv = !!RTRS_MSG_NEED_INVAL_F;
+ req->mr->need_inval = !!RTRS_MSG_NEED_INVAL_F;
} else {
msg->sg_cnt = 0;
@@ -1270,7 +1265,7 @@ static int rtrs_clt_read_req(struct rtrs_clt_io_req *req)
clt_path->hca_port);
if (req->mp_policy == MP_POLICY_MIN_INFLIGHT)
atomic_dec(&clt_path->stats->inflight);
- req->need_inv = false;
+ req->mr->need_inval = false;
if (req->sg_cnt)
ib_dma_unmap_sg(dev->ib_dev, req->sglist,
req->sg_cnt, req->dir);
@@ -1494,7 +1489,9 @@ static bool rtrs_clt_change_state_get_old(struct rtrs_clt_path *clt_path,
static void rtrs_clt_hb_err_handler(struct rtrs_con *c)
{
struct rtrs_clt_con *con = container_of(c, typeof(*con), c);
+ struct rtrs_clt_path *clt_path = to_clt_path(con->c.path);
+ rtrs_err(con->c.path, "HB err handler for path=%s\n", kobject_name(&clt_path->kobj));
rtrs_rdma_error_recovery(con);
}
@@ -2346,6 +2343,12 @@ static int init_conns(struct rtrs_clt_path *clt_path)
if (err)
goto destroy;
}
+
+ /*
+ * Set the cid to con_num - 1, since if we fail later, we want to stay in bounds.
+ */
+ cid = clt_path->s.con_num - 1;
+
err = alloc_path_reqs(clt_path);
if (err)
goto destroy;
@@ -3140,8 +3143,20 @@ close_path:
return err;
}
+void rtrs_clt_ib_event_handler(struct ib_event_handler *handler,
+ struct ib_event *ibevent)
+{
+ pr_info("Handling event: %s (%d).\n", ib_event_msg(ibevent->event),
+ ibevent->event);
+}
+
+
static int rtrs_clt_ib_dev_init(struct rtrs_ib_dev *dev)
{
+ INIT_IB_EVENT_HANDLER(&dev->event_handler, dev->ib_dev,
+ rtrs_clt_ib_event_handler);
+ ib_register_event_handler(&dev->event_handler);
+
if (!(dev->ib_dev->attrs.device_cap_flags &
IB_DEVICE_MEM_MGT_EXTENSIONS)) {
pr_err("Memory registrations not supported.\n");
@@ -3151,8 +3166,15 @@ static int rtrs_clt_ib_dev_init(struct rtrs_ib_dev *dev)
return 0;
}
+static void rtrs_clt_ib_dev_deinit(struct rtrs_ib_dev *dev)
+{
+ ib_unregister_event_handler(&dev->event_handler);
+}
+
+
static const struct rtrs_rdma_dev_pd_ops dev_pd_ops = {
- .init = rtrs_clt_ib_dev_init
+ .init = rtrs_clt_ib_dev_init,
+ .deinit = rtrs_clt_ib_dev_deinit
};
static int __init rtrs_client_init(void)
diff --git a/drivers/infiniband/ulp/rtrs/rtrs-clt.h b/drivers/infiniband/ulp/rtrs/rtrs-clt.h
index f848c0392d98..0f57759b3080 100644
--- a/drivers/infiniband/ulp/rtrs/rtrs-clt.h
+++ b/drivers/infiniband/ulp/rtrs/rtrs-clt.h
@@ -115,7 +115,6 @@ struct rtrs_clt_io_req {
struct completion inv_comp;
int inv_errno;
bool need_inv_comp;
- bool need_inv;
refcount_t ref;
};
@@ -213,6 +212,8 @@ int rtrs_clt_remove_path_from_sysfs(struct rtrs_clt_path *path,
void rtrs_clt_set_max_reconnect_attempts(struct rtrs_clt_sess *clt, int value);
int rtrs_clt_get_max_reconnect_attempts(const struct rtrs_clt_sess *clt);
void free_path(struct rtrs_clt_path *clt_path);
+void rtrs_clt_ib_event_handler(struct ib_event_handler *handler,
+ struct ib_event *ibevent);
/* rtrs-clt-stats.c */
diff --git a/drivers/infiniband/ulp/rtrs/rtrs-pri.h b/drivers/infiniband/ulp/rtrs/rtrs-pri.h
index ab25619261d2..ef29bd483b5a 100644
--- a/drivers/infiniband/ulp/rtrs/rtrs-pri.h
+++ b/drivers/infiniband/ulp/rtrs/rtrs-pri.h
@@ -69,6 +69,7 @@ struct rtrs_ib_dev;
struct rtrs_rdma_dev_pd_ops {
int (*init)(struct rtrs_ib_dev *dev);
+ void (*deinit)(struct rtrs_ib_dev *dev);
};
struct rtrs_rdma_dev_pd {
@@ -84,6 +85,7 @@ struct rtrs_ib_dev {
struct kref ref;
struct list_head entry;
struct rtrs_rdma_dev_pd *pool;
+ struct ib_event_handler event_handler;
};
struct rtrs_con {
diff --git a/drivers/infiniband/ulp/rtrs/rtrs-srv.c b/drivers/infiniband/ulp/rtrs/rtrs-srv.c
index 1d33efb8fb03..e83d95647852 100644
--- a/drivers/infiniband/ulp/rtrs/rtrs-srv.c
+++ b/drivers/infiniband/ulp/rtrs/rtrs-srv.c
@@ -26,7 +26,10 @@ MODULE_LICENSE("GPL");
#define DEFAULT_SESS_QUEUE_DEPTH 512
#define MAX_HDR_SIZE PAGE_SIZE
-static struct rtrs_rdma_dev_pd dev_pd;
+static const struct rtrs_rdma_dev_pd_ops dev_pd_ops;
+static struct rtrs_rdma_dev_pd dev_pd = {
+ .ops = &dev_pd_ops
+};
const struct class rtrs_dev_class = {
.name = "rtrs-server",
};
@@ -672,6 +675,10 @@ err:
static void rtrs_srv_hb_err_handler(struct rtrs_con *c)
{
+ struct rtrs_srv_con *con = container_of(c, typeof(*con), c);
+ struct rtrs_srv_path *srv_path = to_srv_path(con->c.path);
+
+ rtrs_err(con->c.path, "HB err handler for path=%s\n", kobject_name(&srv_path->kobj));
close_path(to_srv_path(c->path));
}
@@ -931,12 +938,11 @@ static void rtrs_srv_info_req_done(struct ib_cq *cq, struct ib_wc *wc)
if (err)
goto close;
-out:
rtrs_iu_free(iu, srv_path->s.dev->ib_dev, 1);
return;
close:
+ rtrs_iu_free(iu, srv_path->s.dev->ib_dev, 1);
close_path(srv_path);
- goto out;
}
static int post_recv_info_req(struct rtrs_srv_con *con)
@@ -987,6 +993,16 @@ static int post_recv_path(struct rtrs_srv_path *srv_path)
q_size = SERVICE_CON_QUEUE_DEPTH;
else
q_size = srv->queue_depth;
+ if (srv_path->state != RTRS_SRV_CONNECTING) {
+ rtrs_err(s, "Path state invalid. state %s\n",
+ rtrs_srv_state_str(srv_path->state));
+ return -EIO;
+ }
+
+ if (!srv_path->s.con[cid]) {
+ rtrs_err(s, "Conn not set for %d\n", cid);
+ return -EIO;
+ }
err = post_recv_io(to_srv_con(srv_path->s.con[cid]), q_size);
if (err) {
@@ -1229,6 +1245,7 @@ static void rtrs_srv_rdma_done(struct ib_cq *cq, struct ib_wc *wc)
*/
if (WARN_ON(wc->wr_cqe != &io_comp_cqe))
return;
+ srv_path->s.hb_missed_cnt = 0;
err = rtrs_post_recv_empty(&con->c, &io_comp_cqe);
if (err) {
rtrs_err(s, "rtrs_post_recv(), err: %d\n", err);
@@ -2255,6 +2272,34 @@ static int check_module_params(void)
return 0;
}
+void rtrs_srv_ib_event_handler(struct ib_event_handler *handler,
+ struct ib_event *ibevent)
+{
+ pr_info("Handling event: %s (%d).\n", ib_event_msg(ibevent->event),
+ ibevent->event);
+}
+
+static int rtrs_srv_ib_dev_init(struct rtrs_ib_dev *dev)
+{
+ INIT_IB_EVENT_HANDLER(&dev->event_handler, dev->ib_dev,
+ rtrs_srv_ib_event_handler);
+ ib_register_event_handler(&dev->event_handler);
+
+ return 0;
+}
+
+static void rtrs_srv_ib_dev_deinit(struct rtrs_ib_dev *dev)
+{
+ ib_unregister_event_handler(&dev->event_handler);
+}
+
+
+static const struct rtrs_rdma_dev_pd_ops dev_pd_ops = {
+ .init = rtrs_srv_ib_dev_init,
+ .deinit = rtrs_srv_ib_dev_deinit
+};
+
+
static int __init rtrs_server_init(void)
{
int err;
diff --git a/drivers/infiniband/ulp/rtrs/rtrs-srv.h b/drivers/infiniband/ulp/rtrs/rtrs-srv.h
index 5e325b82ff33..014f85681f37 100644
--- a/drivers/infiniband/ulp/rtrs/rtrs-srv.h
+++ b/drivers/infiniband/ulp/rtrs/rtrs-srv.h
@@ -132,6 +132,8 @@ struct rtrs_srv_ib_ctx {
extern const struct class rtrs_dev_class;
void close_path(struct rtrs_srv_path *srv_path);
+void rtrs_srv_ib_event_handler(struct ib_event_handler *handler,
+ struct ib_event *ibevent);
static inline void rtrs_srv_update_rdma_stats(struct rtrs_srv_stats *s,
size_t size, int d)
diff --git a/drivers/input/evdev.c b/drivers/input/evdev.c
index a8ce3d140722..b5cbb57ee5f6 100644
--- a/drivers/input/evdev.c
+++ b/drivers/input/evdev.c
@@ -498,6 +498,13 @@ static ssize_t evdev_write(struct file *file, const char __user *buffer,
struct input_event event;
int retval = 0;
+ /*
+ * Limit amount of data we inject into the input subsystem so that
+ * we do not hold evdev->mutex for too long. 4096 bytes corresponds
+ * to 170 input events.
+ */
+ count = min(count, 4096);
+
if (count != 0 && count < input_event_size())
return -EINVAL;
@@ -1292,7 +1299,6 @@ static const struct file_operations evdev_fops = {
.compat_ioctl = evdev_ioctl_compat,
#endif
.fasync = evdev_fasync,
- .llseek = no_llseek,
};
/*
diff --git a/drivers/input/input.c b/drivers/input/input.c
index 19ea1888da9f..47fac29cf7c3 100644
--- a/drivers/input/input.c
+++ b/drivers/input/input.c
@@ -2221,7 +2221,7 @@ static unsigned int input_estimate_events_per_packet(struct input_dev *dev)
mt_slots = dev->mt->num_slots;
} else if (test_bit(ABS_MT_TRACKING_ID, dev->absbit)) {
mt_slots = dev->absinfo[ABS_MT_TRACKING_ID].maximum -
- dev->absinfo[ABS_MT_TRACKING_ID].minimum + 1,
+ dev->absinfo[ABS_MT_TRACKING_ID].minimum + 1;
mt_slots = clamp(mt_slots, 2, 32);
} else if (test_bit(ABS_MT_POSITION_X, dev->absbit)) {
mt_slots = 2;
diff --git a/drivers/input/joydev.c b/drivers/input/joydev.c
index 5824bca02e5a..ba2b17288bcd 100644
--- a/drivers/input/joydev.c
+++ b/drivers/input/joydev.c
@@ -718,7 +718,6 @@ static const struct file_operations joydev_fops = {
.compat_ioctl = joydev_compat_ioctl,
#endif
.fasync = joydev_fasync,
- .llseek = no_llseek,
};
/*
diff --git a/drivers/input/joystick/adc-joystick.c b/drivers/input/joystick/adc-joystick.c
index de1fa4cf291b..02713e624df1 100644
--- a/drivers/input/joystick/adc-joystick.c
+++ b/drivers/input/joystick/adc-joystick.c
@@ -132,7 +132,6 @@ static void adc_joystick_cleanup(void *data)
static int adc_joystick_set_axes(struct device *dev, struct adc_joystick *joy)
{
struct adc_joystick_axis *axes = joy->axes;
- struct fwnode_handle *child;
s32 range[2], fuzz, flat;
unsigned int num_axes;
int error, i;
@@ -149,31 +148,30 @@ static int adc_joystick_set_axes(struct device *dev, struct adc_joystick *joy)
return -EINVAL;
}
- device_for_each_child_node(dev, child) {
+ device_for_each_child_node_scoped(dev, child) {
error = fwnode_property_read_u32(child, "reg", &i);
if (error) {
dev_err(dev, "reg invalid or missing\n");
- goto err_fwnode_put;
+ return error;
}
if (i >= num_axes) {
- error = -EINVAL;
dev_err(dev, "No matching axis for reg %d\n", i);
- goto err_fwnode_put;
+ return -EINVAL;
}
error = fwnode_property_read_u32(child, "linux,code",
&axes[i].code);
if (error) {
dev_err(dev, "linux,code invalid or missing\n");
- goto err_fwnode_put;
+ return error;
}
error = fwnode_property_read_u32_array(child, "abs-range",
range, 2);
if (error) {
dev_err(dev, "abs-range invalid or missing\n");
- goto err_fwnode_put;
+ return error;
}
if (range[0] > range[1]) {
@@ -193,10 +191,6 @@ static int adc_joystick_set_axes(struct device *dev, struct adc_joystick *joy)
}
return 0;
-
-err_fwnode_put:
- fwnode_handle_put(child);
- return error;
}
diff --git a/drivers/input/keyboard/Kconfig b/drivers/input/keyboard/Kconfig
index 1c3fef7d34af..721ab69e84ac 100644
--- a/drivers/input/keyboard/Kconfig
+++ b/drivers/input/keyboard/Kconfig
@@ -421,18 +421,6 @@ config KEYBOARD_MAX7359
To compile this driver as a module, choose M here: the
module will be called max7359_keypad.
-config KEYBOARD_MCS
- tristate "MELFAS MCS Touchkey"
- depends on I2C
- help
- Say Y here if you have the MELFAS MCS5000/5080 touchkey controller
- chip in your system.
-
- If unsure, say N.
-
- To compile this driver as a module, choose M here: the
- module will be called mcs_touchkey.
-
config KEYBOARD_MPR121
tristate "Freescale MPR121 Touchkey"
depends on I2C
@@ -496,17 +484,6 @@ config KEYBOARD_NEWTON
To compile this driver as a module, choose M here: the
module will be called newtonkbd.
-config KEYBOARD_NOMADIK
- tristate "ST-Ericsson Nomadik SKE keyboard"
- depends on (ARCH_NOMADIK || ARCH_U8500 || COMPILE_TEST)
- select INPUT_MATRIXKMAP
- help
- Say Y here if you want to use a keypad provided on the SKE controller
- used on the Ux500 and Nomadik platforms
-
- To compile this driver as a module, choose M here: the
- module will be called nmk-ske-keypad.
-
config KEYBOARD_NSPIRE
tristate "TI-NSPIRE built-in keyboard"
depends on ARCH_NSPIRE && OF
diff --git a/drivers/input/keyboard/Makefile b/drivers/input/keyboard/Makefile
index 624c90adde89..1e0721c30709 100644
--- a/drivers/input/keyboard/Makefile
+++ b/drivers/input/keyboard/Makefile
@@ -42,12 +42,10 @@ obj-$(CONFIG_KEYBOARD_LPC32XX) += lpc32xx-keys.o
obj-$(CONFIG_KEYBOARD_MAPLE) += maple_keyb.o
obj-$(CONFIG_KEYBOARD_MATRIX) += matrix_keypad.o
obj-$(CONFIG_KEYBOARD_MAX7359) += max7359_keypad.o
-obj-$(CONFIG_KEYBOARD_MCS) += mcs_touchkey.o
obj-$(CONFIG_KEYBOARD_MPR121) += mpr121_touchkey.o
obj-$(CONFIG_KEYBOARD_MT6779) += mt6779-keypad.o
obj-$(CONFIG_KEYBOARD_MTK_PMIC) += mtk-pmic-keys.o
obj-$(CONFIG_KEYBOARD_NEWTON) += newtonkbd.o
-obj-$(CONFIG_KEYBOARD_NOMADIK) += nomadik-ske-keypad.o
obj-$(CONFIG_KEYBOARD_NSPIRE) += nspire-keypad.o
obj-$(CONFIG_KEYBOARD_OMAP) += omap-keypad.o
obj-$(CONFIG_KEYBOARD_OMAP4) += omap4-keypad.o
diff --git a/drivers/input/keyboard/adc-keys.c b/drivers/input/keyboard/adc-keys.c
index bf72ab8df817..f1753207429d 100644
--- a/drivers/input/keyboard/adc-keys.c
+++ b/drivers/input/keyboard/adc-keys.c
@@ -66,7 +66,6 @@ static void adc_keys_poll(struct input_dev *input)
static int adc_keys_load_keymap(struct device *dev, struct adc_keys_state *st)
{
struct adc_keys_button *map;
- struct fwnode_handle *child;
int i;
st->num_keys = device_get_child_node_count(dev);
@@ -80,11 +79,10 @@ static int adc_keys_load_keymap(struct device *dev, struct adc_keys_state *st)
return -ENOMEM;
i = 0;
- device_for_each_child_node(dev, child) {
+ device_for_each_child_node_scoped(dev, child) {
if (fwnode_property_read_u32(child, "press-threshold-microvolt",
&map[i].voltage)) {
dev_err(dev, "Key with invalid or missing voltage\n");
- fwnode_handle_put(child);
return -EINVAL;
}
map[i].voltage /= 1000;
@@ -92,7 +90,6 @@ static int adc_keys_load_keymap(struct device *dev, struct adc_keys_state *st)
if (fwnode_property_read_u32(child, "linux,code",
&map[i].keycode)) {
dev_err(dev, "Key with invalid or missing linux,code\n");
- fwnode_handle_put(child);
return -EINVAL;
}
diff --git a/drivers/input/keyboard/adp5588-keys.c b/drivers/input/keyboard/adp5588-keys.c
index 1b0279393df4..d25d63a807f2 100644
--- a/drivers/input/keyboard/adp5588-keys.c
+++ b/drivers/input/keyboard/adp5588-keys.c
@@ -188,6 +188,7 @@ struct adp5588_kpad {
u32 cols;
u32 unlock_keys[2];
int nkeys_unlock;
+ bool gpio_only;
unsigned short keycode[ADP5588_KEYMAPSIZE];
unsigned char gpiomap[ADP5588_MAXGPIO];
struct gpio_chip gc;
@@ -221,15 +222,13 @@ static int adp5588_gpio_get_value(struct gpio_chip *chip, unsigned int off)
unsigned int bit = ADP5588_BIT(kpad->gpiomap[off]);
int val;
- mutex_lock(&kpad->gpio_lock);
+ guard(mutex)(&kpad->gpio_lock);
if (kpad->dir[bank] & bit)
val = kpad->dat_out[bank];
else
val = adp5588_read(kpad->client, GPIO_DAT_STAT1 + bank);
- mutex_unlock(&kpad->gpio_lock);
-
return !!(val & bit);
}
@@ -240,7 +239,7 @@ static void adp5588_gpio_set_value(struct gpio_chip *chip,
unsigned int bank = ADP5588_BANK(kpad->gpiomap[off]);
unsigned int bit = ADP5588_BIT(kpad->gpiomap[off]);
- mutex_lock(&kpad->gpio_lock);
+ guard(mutex)(&kpad->gpio_lock);
if (val)
kpad->dat_out[bank] |= bit;
@@ -248,8 +247,6 @@ static void adp5588_gpio_set_value(struct gpio_chip *chip,
kpad->dat_out[bank] &= ~bit;
adp5588_write(kpad->client, GPIO_DAT_OUT1 + bank, kpad->dat_out[bank]);
-
- mutex_unlock(&kpad->gpio_lock);
}
static int adp5588_gpio_set_config(struct gpio_chip *chip, unsigned int off,
@@ -259,7 +256,6 @@ static int adp5588_gpio_set_config(struct gpio_chip *chip, unsigned int off,
unsigned int bank = ADP5588_BANK(kpad->gpiomap[off]);
unsigned int bit = ADP5588_BIT(kpad->gpiomap[off]);
bool pull_disable;
- int ret;
switch (pinconf_to_config_param(config)) {
case PIN_CONFIG_BIAS_PULL_UP:
@@ -272,19 +268,15 @@ static int adp5588_gpio_set_config(struct gpio_chip *chip, unsigned int off,
return -ENOTSUPP;
}
- mutex_lock(&kpad->gpio_lock);
+ guard(mutex)(&kpad->gpio_lock);
if (pull_disable)
kpad->pull_dis[bank] |= bit;
else
kpad->pull_dis[bank] &= bit;
- ret = adp5588_write(kpad->client, GPIO_PULL1 + bank,
- kpad->pull_dis[bank]);
-
- mutex_unlock(&kpad->gpio_lock);
-
- return ret;
+ return adp5588_write(kpad->client, GPIO_PULL1 + bank,
+ kpad->pull_dis[bank]);
}
static int adp5588_gpio_direction_input(struct gpio_chip *chip, unsigned int off)
@@ -292,16 +284,11 @@ static int adp5588_gpio_direction_input(struct gpio_chip *chip, unsigned int off
struct adp5588_kpad *kpad = gpiochip_get_data(chip);
unsigned int bank = ADP5588_BANK(kpad->gpiomap[off]);
unsigned int bit = ADP5588_BIT(kpad->gpiomap[off]);
- int ret;
- mutex_lock(&kpad->gpio_lock);
+ guard(mutex)(&kpad->gpio_lock);
kpad->dir[bank] &= ~bit;
- ret = adp5588_write(kpad->client, GPIO_DIR1 + bank, kpad->dir[bank]);
-
- mutex_unlock(&kpad->gpio_lock);
-
- return ret;
+ return adp5588_write(kpad->client, GPIO_DIR1 + bank, kpad->dir[bank]);
}
static int adp5588_gpio_direction_output(struct gpio_chip *chip,
@@ -310,9 +297,9 @@ static int adp5588_gpio_direction_output(struct gpio_chip *chip,
struct adp5588_kpad *kpad = gpiochip_get_data(chip);
unsigned int bank = ADP5588_BANK(kpad->gpiomap[off]);
unsigned int bit = ADP5588_BIT(kpad->gpiomap[off]);
- int ret;
+ int error;
- mutex_lock(&kpad->gpio_lock);
+ guard(mutex)(&kpad->gpio_lock);
kpad->dir[bank] |= bit;
@@ -321,17 +308,16 @@ static int adp5588_gpio_direction_output(struct gpio_chip *chip,
else
kpad->dat_out[bank] &= ~bit;
- ret = adp5588_write(kpad->client, GPIO_DAT_OUT1 + bank,
- kpad->dat_out[bank]);
- if (ret)
- goto out_unlock;
-
- ret = adp5588_write(kpad->client, GPIO_DIR1 + bank, kpad->dir[bank]);
+ error = adp5588_write(kpad->client, GPIO_DAT_OUT1 + bank,
+ kpad->dat_out[bank]);
+ if (error)
+ return error;
-out_unlock:
- mutex_unlock(&kpad->gpio_lock);
+ error = adp5588_write(kpad->client, GPIO_DIR1 + bank, kpad->dir[bank]);
+ if (error)
+ return error;
- return ret;
+ return 0;
}
static int adp5588_build_gpiomap(struct adp5588_kpad *kpad)
@@ -446,10 +432,17 @@ static int adp5588_gpio_add(struct adp5588_kpad *kpad)
kpad->gc.label = kpad->client->name;
kpad->gc.owner = THIS_MODULE;
- girq = &kpad->gc.irq;
- gpio_irq_chip_set_chip(girq, &adp5588_irq_chip);
- girq->handler = handle_bad_irq;
- girq->threaded = true;
+ if (device_property_present(dev, "interrupt-controller")) {
+ if (!kpad->client->irq) {
+ dev_err(dev, "Unable to serve as interrupt controller without interrupt");
+ return -EINVAL;
+ }
+
+ girq = &kpad->gc.irq;
+ gpio_irq_chip_set_chip(girq, &adp5588_irq_chip);
+ girq->handler = handle_bad_irq;
+ girq->threaded = true;
+ }
mutex_init(&kpad->gpio_lock);
@@ -627,7 +620,7 @@ static int adp5588_setup(struct adp5588_kpad *kpad)
for (i = 0; i < KEYP_MAX_EVENT; i++) {
ret = adp5588_read(client, KEY_EVENTA);
- if (ret)
+ if (ret < 0)
return ret;
}
@@ -647,6 +640,18 @@ static int adp5588_fw_parse(struct adp5588_kpad *kpad)
struct i2c_client *client = kpad->client;
int ret, i;
+ /*
+ * Check if the device is to be operated purely in GPIO mode. To do
+ * so, check that no keypad rows or columns have been specified,
+ * since all GPINS should be configured as GPIO.
+ */
+ if (!device_property_present(&client->dev, "keypad,num-rows") &&
+ !device_property_present(&client->dev, "keypad,num-columns")) {
+ /* If purely GPIO, skip keypad setup */
+ kpad->gpio_only = true;
+ return 0;
+ }
+
ret = matrix_keypad_parse_properties(&client->dev, &kpad->rows,
&kpad->cols);
if (ret)
@@ -790,17 +795,19 @@ static int adp5588_probe(struct i2c_client *client)
if (error)
return error;
- error = devm_request_threaded_irq(&client->dev, client->irq,
- adp5588_hard_irq, adp5588_thread_irq,
- IRQF_TRIGGER_FALLING | IRQF_ONESHOT,
- client->dev.driver->name, kpad);
- if (error) {
- dev_err(&client->dev, "failed to request irq %d: %d\n",
- client->irq, error);
- return error;
+ if (client->irq) {
+ error = devm_request_threaded_irq(&client->dev, client->irq,
+ adp5588_hard_irq, adp5588_thread_irq,
+ IRQF_TRIGGER_FALLING | IRQF_ONESHOT,
+ client->dev.driver->name, kpad);
+ if (error) {
+ dev_err(&client->dev, "failed to request irq %d: %d\n",
+ client->irq, error);
+ return error;
+ }
}
- dev_info(&client->dev, "Rev.%d keypad, irq %d\n", revid, client->irq);
+ dev_info(&client->dev, "Rev.%d controller\n", revid);
return 0;
}
diff --git a/drivers/input/keyboard/adp5589-keys.c b/drivers/input/keyboard/adp5589-keys.c
index 8996e00cd63a..922d3ab998f3 100644
--- a/drivers/input/keyboard/adp5589-keys.c
+++ b/drivers/input/keyboard/adp5589-keys.c
@@ -391,10 +391,17 @@ static int adp5589_gpio_get_value(struct gpio_chip *chip, unsigned off)
struct adp5589_kpad *kpad = gpiochip_get_data(chip);
unsigned int bank = kpad->var->bank(kpad->gpiomap[off]);
unsigned int bit = kpad->var->bit(kpad->gpiomap[off]);
+ int val;
- return !!(adp5589_read(kpad->client,
- kpad->var->reg(ADP5589_GPI_STATUS_A) + bank) &
- bit);
+ mutex_lock(&kpad->gpio_lock);
+ if (kpad->dir[bank] & bit)
+ val = kpad->dat_out[bank];
+ else
+ val = adp5589_read(kpad->client,
+ kpad->var->reg(ADP5589_GPI_STATUS_A) + bank);
+ mutex_unlock(&kpad->gpio_lock);
+
+ return !!(val & bit);
}
static void adp5589_gpio_set_value(struct gpio_chip *chip,
@@ -936,10 +943,9 @@ static int adp5589_keypad_add(struct adp5589_kpad *kpad, unsigned int revid)
static void adp5589_clear_config(void *data)
{
- struct i2c_client *client = data;
- struct adp5589_kpad *kpad = i2c_get_clientdata(client);
+ struct adp5589_kpad *kpad = data;
- adp5589_write(client, kpad->var->reg(ADP5589_GENERAL_CFG), 0);
+ adp5589_write(kpad->client, kpad->var->reg(ADP5589_GENERAL_CFG), 0);
}
static int adp5589_probe(struct i2c_client *client)
@@ -983,7 +989,7 @@ static int adp5589_probe(struct i2c_client *client)
}
error = devm_add_action_or_reset(&client->dev, adp5589_clear_config,
- client);
+ kpad);
if (error)
return error;
@@ -1010,8 +1016,6 @@ static int adp5589_probe(struct i2c_client *client)
if (error)
return error;
- i2c_set_clientdata(client, kpad);
-
dev_info(&client->dev, "Rev.%d keypad, irq %d\n", revid, client->irq);
return 0;
}
diff --git a/drivers/input/keyboard/applespi.c b/drivers/input/keyboard/applespi.c
index cf25177b4830..707c5a8ae736 100644
--- a/drivers/input/keyboard/applespi.c
+++ b/drivers/input/keyboard/applespi.c
@@ -1007,7 +1007,6 @@ static const struct file_operations applespi_tp_dim_fops = {
.owner = THIS_MODULE,
.open = applespi_tp_dim_open,
.read = applespi_tp_dim_read,
- .llseek = no_llseek,
};
static void report_finger_data(struct input_dev *input, int slot,
diff --git a/drivers/input/keyboard/atkbd.c b/drivers/input/keyboard/atkbd.c
index f4f2078cf501..5855d4fc6e6a 100644
--- a/drivers/input/keyboard/atkbd.c
+++ b/drivers/input/keyboard/atkbd.c
@@ -639,7 +639,7 @@ static void atkbd_event_work(struct work_struct *work)
{
struct atkbd *atkbd = container_of(work, struct atkbd, event_work.work);
- mutex_lock(&atkbd->mutex);
+ guard(mutex)(&atkbd->mutex);
if (!atkbd->enabled) {
/*
@@ -657,8 +657,6 @@ static void atkbd_event_work(struct work_struct *work)
if (test_and_clear_bit(ATKBD_REP_EVENT_BIT, &atkbd->event_mask))
atkbd_set_repeat_rate(atkbd);
}
-
- mutex_unlock(&atkbd->mutex);
}
/*
@@ -1361,7 +1359,7 @@ static int atkbd_reconnect(struct serio *serio)
{
struct atkbd *atkbd = atkbd_from_serio(serio);
struct serio_driver *drv = serio->drv;
- int retval = -1;
+ int error;
if (!atkbd || !drv) {
dev_dbg(&serio->dev,
@@ -1369,16 +1367,17 @@ static int atkbd_reconnect(struct serio *serio)
return -1;
}
- mutex_lock(&atkbd->mutex);
+ guard(mutex)(&atkbd->mutex);
atkbd_disable(atkbd);
if (atkbd->write) {
- if (atkbd_probe(atkbd))
- goto out;
+ error = atkbd_probe(atkbd);
+ if (error)
+ return error;
if (atkbd->set != atkbd_select_set(atkbd, atkbd->set, atkbd->extra))
- goto out;
+ return -EIO;
/*
* Restore LED state and repeat rate. While input core
@@ -1404,11 +1403,7 @@ static int atkbd_reconnect(struct serio *serio)
if (atkbd->write)
atkbd_activate(atkbd);
- retval = 0;
-
- out:
- mutex_unlock(&atkbd->mutex);
- return retval;
+ return 0;
}
static const struct serio_device_id atkbd_serio_ids[] = {
@@ -1465,17 +1460,15 @@ static ssize_t atkbd_attr_set_helper(struct device *dev, const char *buf, size_t
struct atkbd *atkbd = atkbd_from_serio(serio);
int retval;
- retval = mutex_lock_interruptible(&atkbd->mutex);
- if (retval)
- return retval;
+ scoped_guard(mutex_intr, &atkbd->mutex) {
+ atkbd_disable(atkbd);
+ retval = handler(atkbd, buf, count);
+ atkbd_enable(atkbd);
- atkbd_disable(atkbd);
- retval = handler(atkbd, buf, count);
- atkbd_enable(atkbd);
-
- mutex_unlock(&atkbd->mutex);
+ return retval;
+ }
- return retval;
+ return -EINTR;
}
static ssize_t atkbd_show_extra(struct atkbd *atkbd, char *buf)
diff --git a/drivers/input/keyboard/ep93xx_keypad.c b/drivers/input/keyboard/ep93xx_keypad.c
index 6b811d6bf625..dcbc50304a5a 100644
--- a/drivers/input/keyboard/ep93xx_keypad.c
+++ b/drivers/input/keyboard/ep93xx_keypad.c
@@ -6,20 +6,13 @@
*
* Based on the pxa27x matrix keypad controller by Rodolfo Giometti.
*
- * NOTE:
- *
- * The 3-key reset is triggered by pressing the 3 keys in
- * Row 0, Columns 2, 4, and 7 at the same time. This action can
- * be disabled by setting the EP93XX_KEYPAD_DISABLE_3_KEY flag.
- *
- * Normal operation for the matrix does not autorepeat the key press.
- * This action can be enabled by setting the EP93XX_KEYPAD_AUTOREPEAT
- * flag.
*/
#include <linux/bits.h>
+#include <linux/mod_devicetable.h>
#include <linux/module.h>
#include <linux/platform_device.h>
+#include <linux/property.h>
#include <linux/interrupt.h>
#include <linux/clk.h>
#include <linux/io.h>
@@ -27,7 +20,6 @@
#include <linux/input/matrix_keypad.h>
#include <linux/slab.h>
#include <linux/soc/cirrus/ep93xx.h>
-#include <linux/platform_data/keypad-ep93xx.h>
#include <linux/pm_wakeirq.h>
/*
@@ -61,12 +53,16 @@
#define KEY_REG_KEY1_MASK GENMASK(5, 0)
#define KEY_REG_KEY1_SHIFT 0
+#define EP93XX_MATRIX_ROWS (8)
+#define EP93XX_MATRIX_COLS (8)
+
#define EP93XX_MATRIX_SIZE (EP93XX_MATRIX_ROWS * EP93XX_MATRIX_COLS)
struct ep93xx_keypad {
- struct ep93xx_keypad_platform_data *pdata;
struct input_dev *input_dev;
struct clk *clk;
+ unsigned int debounce;
+ u16 prescale;
void __iomem *mmio_base;
@@ -133,23 +129,11 @@ static irqreturn_t ep93xx_keypad_irq_handler(int irq, void *dev_id)
static void ep93xx_keypad_config(struct ep93xx_keypad *keypad)
{
- struct ep93xx_keypad_platform_data *pdata = keypad->pdata;
unsigned int val = 0;
- clk_set_rate(keypad->clk, pdata->clk_rate);
-
- if (pdata->flags & EP93XX_KEYPAD_DISABLE_3_KEY)
- val |= KEY_INIT_DIS3KY;
- if (pdata->flags & EP93XX_KEYPAD_DIAG_MODE)
- val |= KEY_INIT_DIAG;
- if (pdata->flags & EP93XX_KEYPAD_BACK_DRIVE)
- val |= KEY_INIT_BACK;
- if (pdata->flags & EP93XX_KEYPAD_TEST_MODE)
- val |= KEY_INIT_T2;
-
- val |= ((pdata->debounce << KEY_INIT_DBNC_SHIFT) & KEY_INIT_DBNC_MASK);
+ val |= (keypad->debounce << KEY_INIT_DBNC_SHIFT) & KEY_INIT_DBNC_MASK;
- val |= ((pdata->prescale << KEY_INIT_PRSCL_SHIFT) & KEY_INIT_PRSCL_MASK);
+ val |= (keypad->prescale << KEY_INIT_PRSCL_SHIFT) & KEY_INIT_PRSCL_MASK;
__raw_writel(val, keypad->mmio_base + KEY_INIT);
}
@@ -220,17 +204,10 @@ static int ep93xx_keypad_resume(struct device *dev)
static DEFINE_SIMPLE_DEV_PM_OPS(ep93xx_keypad_pm_ops,
ep93xx_keypad_suspend, ep93xx_keypad_resume);
-static void ep93xx_keypad_release_gpio_action(void *_pdev)
-{
- struct platform_device *pdev = _pdev;
-
- ep93xx_keypad_release_gpio(pdev);
-}
-
static int ep93xx_keypad_probe(struct platform_device *pdev)
{
+ struct device *dev = &pdev->dev;
struct ep93xx_keypad *keypad;
- const struct matrix_keymap_data *keymap_data;
struct input_dev *input_dev;
int err;
@@ -238,14 +215,6 @@ static int ep93xx_keypad_probe(struct platform_device *pdev)
if (!keypad)
return -ENOMEM;
- keypad->pdata = dev_get_platdata(&pdev->dev);
- if (!keypad->pdata)
- return -EINVAL;
-
- keymap_data = keypad->pdata->keymap_data;
- if (!keymap_data)
- return -EINVAL;
-
keypad->irq = platform_get_irq(pdev, 0);
if (keypad->irq < 0)
return keypad->irq;
@@ -254,19 +223,13 @@ static int ep93xx_keypad_probe(struct platform_device *pdev)
if (IS_ERR(keypad->mmio_base))
return PTR_ERR(keypad->mmio_base);
- err = ep93xx_keypad_acquire_gpio(pdev);
- if (err)
- return err;
-
- err = devm_add_action_or_reset(&pdev->dev,
- ep93xx_keypad_release_gpio_action, pdev);
- if (err)
- return err;
-
keypad->clk = devm_clk_get(&pdev->dev, NULL);
if (IS_ERR(keypad->clk))
return PTR_ERR(keypad->clk);
+ device_property_read_u32(dev, "debounce-delay-ms", &keypad->debounce);
+ device_property_read_u16(dev, "cirrus,prescale", &keypad->prescale);
+
input_dev = devm_input_allocate_device(&pdev->dev);
if (!input_dev)
return -ENOMEM;
@@ -278,13 +241,13 @@ static int ep93xx_keypad_probe(struct platform_device *pdev)
input_dev->open = ep93xx_keypad_open;
input_dev->close = ep93xx_keypad_close;
- err = matrix_keypad_build_keymap(keymap_data, NULL,
+ err = matrix_keypad_build_keymap(NULL, NULL,
EP93XX_MATRIX_ROWS, EP93XX_MATRIX_COLS,
keypad->keycodes, input_dev);
if (err)
return err;
- if (keypad->pdata->flags & EP93XX_KEYPAD_AUTOREPEAT)
+ if (device_property_read_bool(&pdev->dev, "autorepeat"))
__set_bit(EV_REP, input_dev->evbit);
input_set_drvdata(input_dev, keypad);
@@ -313,10 +276,17 @@ static void ep93xx_keypad_remove(struct platform_device *pdev)
dev_pm_clear_wake_irq(&pdev->dev);
}
+static const struct of_device_id ep93xx_keypad_of_ids[] = {
+ { .compatible = "cirrus,ep9307-keypad" },
+ { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(of, ep93xx_keypad_of_ids);
+
static struct platform_driver ep93xx_keypad_driver = {
.driver = {
.name = "ep93xx-keypad",
.pm = pm_sleep_ptr(&ep93xx_keypad_pm_ops),
+ .of_match_table = ep93xx_keypad_of_ids,
},
.probe = ep93xx_keypad_probe,
.remove_new = ep93xx_keypad_remove,
diff --git a/drivers/input/keyboard/gpio_keys.c b/drivers/input/keyboard/gpio_keys.c
index 9f3bcd41cf67..380fe8dab3b0 100644
--- a/drivers/input/keyboard/gpio_keys.c
+++ b/drivers/input/keyboard/gpio_keys.c
@@ -245,23 +245,20 @@ static ssize_t gpio_keys_attr_store_helper(struct gpio_keys_drvdata *ddata,
{
int n_events = get_n_events_by_type(type);
const unsigned long *bitmap = get_bm_events_by_type(ddata->input, type);
- unsigned long *bits;
ssize_t error;
int i;
- bits = bitmap_alloc(n_events, GFP_KERNEL);
+ unsigned long *bits __free(bitmap) = bitmap_alloc(n_events, GFP_KERNEL);
if (!bits)
return -ENOMEM;
error = bitmap_parselist(buf, bits, n_events);
if (error)
- goto out;
+ return error;
/* First validate */
- if (!bitmap_subset(bits, bitmap, n_events)) {
- error = -EINVAL;
- goto out;
- }
+ if (!bitmap_subset(bits, bitmap, n_events))
+ return -EINVAL;
for (i = 0; i < ddata->pdata->nbuttons; i++) {
struct gpio_button_data *bdata = &ddata->data[i];
@@ -271,12 +268,11 @@ static ssize_t gpio_keys_attr_store_helper(struct gpio_keys_drvdata *ddata,
if (test_bit(*bdata->code, bits) &&
!bdata->button->can_disable) {
- error = -EINVAL;
- goto out;
+ return -EINVAL;
}
}
- mutex_lock(&ddata->disable_lock);
+ guard(mutex)(&ddata->disable_lock);
for (i = 0; i < ddata->pdata->nbuttons; i++) {
struct gpio_button_data *bdata = &ddata->data[i];
@@ -290,11 +286,7 @@ static ssize_t gpio_keys_attr_store_helper(struct gpio_keys_drvdata *ddata,
gpio_keys_enable_button(bdata);
}
- mutex_unlock(&ddata->disable_lock);
-
-out:
- bitmap_free(bits);
- return error;
+ return 0;
}
#define ATTR_SHOW_FN(name, type, only_disabled) \
@@ -470,11 +462,10 @@ static irqreturn_t gpio_keys_irq_isr(int irq, void *dev_id)
{
struct gpio_button_data *bdata = dev_id;
struct input_dev *input = bdata->input;
- unsigned long flags;
BUG_ON(irq != bdata->irq);
- spin_lock_irqsave(&bdata->lock, flags);
+ guard(spinlock_irqsave)(&bdata->lock);
if (!bdata->key_pressed) {
if (bdata->button->wakeup)
@@ -497,7 +488,6 @@ static irqreturn_t gpio_keys_irq_isr(int irq, void *dev_id)
ms_to_ktime(bdata->release_delay),
HRTIMER_MODE_REL_HARD);
out:
- spin_unlock_irqrestore(&bdata->lock, flags);
return IRQ_HANDLED;
}
@@ -768,7 +758,6 @@ gpio_keys_get_devtree_pdata(struct device *dev)
{
struct gpio_keys_platform_data *pdata;
struct gpio_keys_button *button;
- struct fwnode_handle *child;
int nbuttons, irq;
nbuttons = device_get_child_node_count(dev);
@@ -790,7 +779,7 @@ gpio_keys_get_devtree_pdata(struct device *dev)
device_property_read_string(dev, "label", &pdata->name);
- device_for_each_child_node(dev, child) {
+ device_for_each_child_node_scoped(dev, child) {
if (is_of_node(child)) {
irq = of_irq_get_byname(to_of_node(child), "irq");
if (irq > 0)
@@ -808,7 +797,6 @@ gpio_keys_get_devtree_pdata(struct device *dev)
if (fwnode_property_read_u32(child, "linux,code",
&button->code)) {
dev_err(dev, "Button without keycode\n");
- fwnode_handle_put(child);
return ERR_PTR(-EINVAL);
}
@@ -1064,10 +1052,10 @@ static int gpio_keys_suspend(struct device *dev)
if (error)
return error;
} else {
- mutex_lock(&input->mutex);
+ guard(mutex)(&input->mutex);
+
if (input_device_enabled(input))
gpio_keys_close(input);
- mutex_unlock(&input->mutex);
}
return 0;
@@ -1077,20 +1065,20 @@ static int gpio_keys_resume(struct device *dev)
{
struct gpio_keys_drvdata *ddata = dev_get_drvdata(dev);
struct input_dev *input = ddata->input;
- int error = 0;
+ int error;
if (device_may_wakeup(dev)) {
gpio_keys_disable_wakeup(ddata);
} else {
- mutex_lock(&input->mutex);
- if (input_device_enabled(input))
+ guard(mutex)(&input->mutex);
+
+ if (input_device_enabled(input)) {
error = gpio_keys_open(input);
- mutex_unlock(&input->mutex);
+ if (error)
+ return error;
+ }
}
- if (error)
- return error;
-
gpio_keys_report_state(ddata);
return 0;
}
diff --git a/drivers/input/keyboard/gpio_keys_polled.c b/drivers/input/keyboard/gpio_keys_polled.c
index b41fd1240f43..41ca0d3c9098 100644
--- a/drivers/input/keyboard/gpio_keys_polled.c
+++ b/drivers/input/keyboard/gpio_keys_polled.c
@@ -144,7 +144,6 @@ gpio_keys_polled_get_devtree_pdata(struct device *dev)
{
struct gpio_keys_platform_data *pdata;
struct gpio_keys_button *button;
- struct fwnode_handle *child;
int nbuttons;
nbuttons = device_get_child_node_count(dev);
@@ -166,11 +165,10 @@ gpio_keys_polled_get_devtree_pdata(struct device *dev)
device_property_read_string(dev, "label", &pdata->name);
- device_for_each_child_node(dev, child) {
+ device_for_each_child_node_scoped(dev, child) {
if (fwnode_property_read_u32(child, "linux,code",
&button->code)) {
dev_err(dev, "button without keycode\n");
- fwnode_handle_put(child);
return ERR_PTR(-EINVAL);
}
diff --git a/drivers/input/keyboard/iqs62x-keys.c b/drivers/input/keyboard/iqs62x-keys.c
index 688d61244b5f..1315b0f0862f 100644
--- a/drivers/input/keyboard/iqs62x-keys.c
+++ b/drivers/input/keyboard/iqs62x-keys.c
@@ -45,7 +45,6 @@ struct iqs62x_keys_private {
static int iqs62x_keys_parse_prop(struct platform_device *pdev,
struct iqs62x_keys_private *iqs62x_keys)
{
- struct fwnode_handle *child;
unsigned int val;
int ret, i;
@@ -68,7 +67,8 @@ static int iqs62x_keys_parse_prop(struct platform_device *pdev,
}
for (i = 0; i < ARRAY_SIZE(iqs62x_keys->switches); i++) {
- child = device_get_named_child_node(&pdev->dev,
+ struct fwnode_handle *child __free(fwnode_handle) =
+ device_get_named_child_node(&pdev->dev,
iqs62x_switch_names[i]);
if (!child)
continue;
@@ -77,7 +77,6 @@ static int iqs62x_keys_parse_prop(struct platform_device *pdev,
if (ret) {
dev_err(&pdev->dev, "Failed to read switch code: %d\n",
ret);
- fwnode_handle_put(child);
return ret;
}
iqs62x_keys->switches[i].code = val;
@@ -91,8 +90,6 @@ static int iqs62x_keys_parse_prop(struct platform_device *pdev,
iqs62x_keys->switches[i].flag = (i == IQS62X_SW_HALL_N ?
IQS62X_EVENT_HALL_N_T :
IQS62X_EVENT_HALL_S_T);
-
- fwnode_handle_put(child);
}
return 0;
diff --git a/drivers/input/keyboard/matrix_keypad.c b/drivers/input/keyboard/matrix_keypad.c
index 7a56f3d3aacd..3c38bae576ed 100644
--- a/drivers/input/keyboard/matrix_keypad.c
+++ b/drivers/input/keyboard/matrix_keypad.c
@@ -17,19 +17,27 @@
#include <linux/jiffies.h>
#include <linux/module.h>
#include <linux/gpio.h>
+#include <linux/gpio/consumer.h>
#include <linux/input/matrix_keypad.h>
#include <linux/slab.h>
#include <linux/of.h>
-#include <linux/of_gpio.h>
-#include <linux/of_platform.h>
struct matrix_keypad {
- const struct matrix_keypad_platform_data *pdata;
struct input_dev *input_dev;
unsigned int row_shift;
+ unsigned int col_scan_delay_us;
+ /* key debounce interval in milli-second */
+ unsigned int debounce_ms;
+ bool drive_inactive_cols;
+
+ struct gpio_desc *row_gpios[MATRIX_MAX_ROWS];
+ unsigned int num_row_gpios;
+
+ struct gpio_desc *col_gpios[MATRIX_MAX_ROWS];
+ unsigned int num_col_gpios;
+
unsigned int row_irqs[MATRIX_MAX_ROWS];
- unsigned int num_row_irqs;
DECLARE_BITMAP(wakeup_enabled_irqs, MATRIX_MAX_ROWS);
uint32_t last_key_state[MATRIX_MAX_COLS];
@@ -45,50 +53,43 @@ struct matrix_keypad {
* columns. In that case it is configured here to be input, otherwise it is
* driven with the inactive value.
*/
-static void __activate_col(const struct matrix_keypad_platform_data *pdata,
- int col, bool on)
+static void __activate_col(struct matrix_keypad *keypad, int col, bool on)
{
- bool level_on = !pdata->active_low;
-
if (on) {
- gpio_direction_output(pdata->col_gpios[col], level_on);
+ gpiod_direction_output(keypad->col_gpios[col], 1);
} else {
- gpio_set_value_cansleep(pdata->col_gpios[col], !level_on);
- if (!pdata->drive_inactive_cols)
- gpio_direction_input(pdata->col_gpios[col]);
+ gpiod_set_value_cansleep(keypad->col_gpios[col], 0);
+ if (!keypad->drive_inactive_cols)
+ gpiod_direction_input(keypad->col_gpios[col]);
}
}
-static void activate_col(const struct matrix_keypad_platform_data *pdata,
- int col, bool on)
+static void activate_col(struct matrix_keypad *keypad, int col, bool on)
{
- __activate_col(pdata, col, on);
+ __activate_col(keypad, col, on);
- if (on && pdata->col_scan_delay_us)
- udelay(pdata->col_scan_delay_us);
+ if (on && keypad->col_scan_delay_us)
+ udelay(keypad->col_scan_delay_us);
}
-static void activate_all_cols(const struct matrix_keypad_platform_data *pdata,
- bool on)
+static void activate_all_cols(struct matrix_keypad *keypad, bool on)
{
int col;
- for (col = 0; col < pdata->num_col_gpios; col++)
- __activate_col(pdata, col, on);
+ for (col = 0; col < keypad->num_col_gpios; col++)
+ __activate_col(keypad, col, on);
}
-static bool row_asserted(const struct matrix_keypad_platform_data *pdata,
- int row)
+static bool row_asserted(struct matrix_keypad *keypad, int row)
{
- return gpio_get_value_cansleep(pdata->row_gpios[row]) ?
- !pdata->active_low : pdata->active_low;
+ return gpiod_get_value_cansleep(keypad->row_gpios[row]);
}
static void enable_row_irqs(struct matrix_keypad *keypad)
{
int i;
- for (i = 0; i < keypad->num_row_irqs; i++)
+ for (i = 0; i < keypad->num_row_gpios; i++)
enable_irq(keypad->row_irqs[i]);
}
@@ -96,7 +97,7 @@ static void disable_row_irqs(struct matrix_keypad *keypad)
{
int i;
- for (i = 0; i < keypad->num_row_irqs; i++)
+ for (i = 0; i < keypad->num_row_gpios; i++)
disable_irq_nosync(keypad->row_irqs[i]);
}
@@ -109,39 +110,38 @@ static void matrix_keypad_scan(struct work_struct *work)
container_of(work, struct matrix_keypad, work.work);
struct input_dev *input_dev = keypad->input_dev;
const unsigned short *keycodes = input_dev->keycode;
- const struct matrix_keypad_platform_data *pdata = keypad->pdata;
uint32_t new_state[MATRIX_MAX_COLS];
int row, col, code;
/* de-activate all columns for scanning */
- activate_all_cols(pdata, false);
+ activate_all_cols(keypad, false);
memset(new_state, 0, sizeof(new_state));
- for (row = 0; row < pdata->num_row_gpios; row++)
- gpio_direction_input(pdata->row_gpios[row]);
+ for (row = 0; row < keypad->num_row_gpios; row++)
+ gpiod_direction_input(keypad->row_gpios[row]);
/* assert each column and read the row status out */
- for (col = 0; col < pdata->num_col_gpios; col++) {
+ for (col = 0; col < keypad->num_col_gpios; col++) {
- activate_col(pdata, col, true);
+ activate_col(keypad, col, true);
- for (row = 0; row < pdata->num_row_gpios; row++)
+ for (row = 0; row < keypad->num_row_gpios; row++)
new_state[col] |=
- row_asserted(pdata, row) ? (1 << row) : 0;
+ row_asserted(keypad, row) ? BIT(row) : 0;
- activate_col(pdata, col, false);
+ activate_col(keypad, col, false);
}
- for (col = 0; col < pdata->num_col_gpios; col++) {
+ for (col = 0; col < keypad->num_col_gpios; col++) {
uint32_t bits_changed;
bits_changed = keypad->last_key_state[col] ^ new_state[col];
if (bits_changed == 0)
continue;
- for (row = 0; row < pdata->num_row_gpios; row++) {
- if ((bits_changed & (1 << row)) == 0)
+ for (row = 0; row < keypad->num_row_gpios; row++) {
+ if (!(bits_changed & BIT(row)))
continue;
code = MATRIX_SCAN_CODE(row, col, keypad->row_shift);
@@ -155,7 +155,7 @@ static void matrix_keypad_scan(struct work_struct *work)
memcpy(keypad->last_key_state, new_state, sizeof(new_state));
- activate_all_cols(pdata, true);
+ activate_all_cols(keypad, true);
/* Enable IRQs again */
spin_lock_irq(&keypad->lock);
@@ -182,7 +182,7 @@ static irqreturn_t matrix_keypad_interrupt(int irq, void *id)
disable_row_irqs(keypad);
keypad->scan_pending = true;
schedule_delayed_work(&keypad->work,
- msecs_to_jiffies(keypad->pdata->debounce_ms));
+ msecs_to_jiffies(keypad->debounce_ms));
out:
spin_unlock_irqrestore(&keypad->lock, flags);
@@ -225,7 +225,8 @@ static void matrix_keypad_enable_wakeup(struct matrix_keypad *keypad)
{
int i;
- for_each_clear_bit(i, keypad->wakeup_enabled_irqs, keypad->num_row_irqs)
+ for_each_clear_bit(i, keypad->wakeup_enabled_irqs,
+ keypad->num_row_gpios)
if (enable_irq_wake(keypad->row_irqs[i]) == 0)
__set_bit(i, keypad->wakeup_enabled_irqs);
}
@@ -234,7 +235,8 @@ static void matrix_keypad_disable_wakeup(struct matrix_keypad *keypad)
{
int i;
- for_each_set_bit(i, keypad->wakeup_enabled_irqs, keypad->num_row_irqs) {
+ for_each_set_bit(i, keypad->wakeup_enabled_irqs,
+ keypad->num_row_gpios) {
disable_irq_wake(keypad->row_irqs[i]);
__clear_bit(i, keypad->wakeup_enabled_irqs);
}
@@ -272,182 +274,108 @@ static DEFINE_SIMPLE_DEV_PM_OPS(matrix_keypad_pm_ops,
static int matrix_keypad_init_gpio(struct platform_device *pdev,
struct matrix_keypad *keypad)
{
- const struct matrix_keypad_platform_data *pdata = keypad->pdata;
- int i, irq, err;
-
- /* initialized strobe lines as outputs, activated */
- for (i = 0; i < pdata->num_col_gpios; i++) {
- err = devm_gpio_request(&pdev->dev,
- pdata->col_gpios[i], "matrix_kbd_col");
- if (err) {
- dev_err(&pdev->dev,
- "failed to request GPIO%d for COL%d\n",
- pdata->col_gpios[i], i);
- return err;
- }
+ bool active_low;
+ int nrow, ncol;
+ int err;
+ int i;
- gpio_direction_output(pdata->col_gpios[i], !pdata->active_low);
+ nrow = gpiod_count(&pdev->dev, "row");
+ ncol = gpiod_count(&pdev->dev, "col");
+ if (nrow < 0 || ncol < 0) {
+ dev_err(&pdev->dev, "missing row or column GPIOs\n");
+ return -EINVAL;
}
- for (i = 0; i < pdata->num_row_gpios; i++) {
- err = devm_gpio_request(&pdev->dev,
- pdata->row_gpios[i], "matrix_kbd_row");
+ keypad->num_row_gpios = nrow;
+ keypad->num_col_gpios = ncol;
+
+ active_low = device_property_read_bool(&pdev->dev, "gpio-activelow");
+
+ /* initialize strobe lines as outputs, activated */
+ for (i = 0; i < keypad->num_col_gpios; i++) {
+ keypad->col_gpios[i] = devm_gpiod_get_index(&pdev->dev, "col",
+ i, GPIOD_ASIS);
+ err = PTR_ERR_OR_ZERO(keypad->col_gpios[i]);
if (err) {
dev_err(&pdev->dev,
- "failed to request GPIO%d for ROW%d\n",
- pdata->row_gpios[i], i);
+ "failed to request GPIO for COL%d: %d\n",
+ i, err);
return err;
}
- gpio_direction_input(pdata->row_gpios[i]);
+ gpiod_set_consumer_name(keypad->col_gpios[i], "matrix_kbd_col");
+
+ if (active_low ^ gpiod_is_active_low(keypad->col_gpios[i]))
+ gpiod_toggle_active_low(keypad->col_gpios[i]);
+
+ gpiod_direction_output(keypad->col_gpios[i], 1);
}
- if (pdata->clustered_irq > 0) {
- err = devm_request_any_context_irq(&pdev->dev,
- pdata->clustered_irq,
- matrix_keypad_interrupt,
- pdata->clustered_irq_flags,
- "matrix-keypad", keypad);
- if (err < 0) {
+ for (i = 0; i < keypad->num_row_gpios; i++) {
+ keypad->row_gpios[i] = devm_gpiod_get_index(&pdev->dev, "row",
+ i, GPIOD_IN);
+ err = PTR_ERR_OR_ZERO(keypad->row_gpios[i]);
+ if (err) {
dev_err(&pdev->dev,
- "Unable to acquire clustered interrupt\n");
+ "failed to request GPIO for ROW%d: %d\n",
+ i, err);
return err;
}
- keypad->row_irqs[0] = pdata->clustered_irq;
- keypad->num_row_irqs = 1;
- } else {
- for (i = 0; i < pdata->num_row_gpios; i++) {
- irq = gpio_to_irq(pdata->row_gpios[i]);
- if (irq < 0) {
- err = irq;
- dev_err(&pdev->dev,
- "Unable to convert GPIO line %i to irq: %d\n",
- pdata->row_gpios[i], err);
- return err;
- }
-
- err = devm_request_any_context_irq(&pdev->dev,
- irq,
- matrix_keypad_interrupt,
- IRQF_TRIGGER_RISING |
- IRQF_TRIGGER_FALLING,
- "matrix-keypad", keypad);
- if (err < 0) {
- dev_err(&pdev->dev,
- "Unable to acquire interrupt for GPIO line %i\n",
- pdata->row_gpios[i]);
- return err;
- }
-
- keypad->row_irqs[i] = irq;
- }
+ gpiod_set_consumer_name(keypad->row_gpios[i], "matrix_kbd_row");
- keypad->num_row_irqs = pdata->num_row_gpios;
+ if (active_low ^ gpiod_is_active_low(keypad->row_gpios[i]))
+ gpiod_toggle_active_low(keypad->row_gpios[i]);
}
- /* initialized as disabled - enabled by input->open */
- disable_row_irqs(keypad);
-
return 0;
}
-#ifdef CONFIG_OF
-static struct matrix_keypad_platform_data *
-matrix_keypad_parse_dt(struct device *dev)
+static int matrix_keypad_setup_interrupts(struct platform_device *pdev,
+ struct matrix_keypad *keypad)
{
- struct matrix_keypad_platform_data *pdata;
- struct device_node *np = dev->of_node;
- unsigned int *gpios;
- int ret, i, nrow, ncol;
-
- if (!np) {
- dev_err(dev, "device lacks DT data\n");
- return ERR_PTR(-ENODEV);
- }
-
- pdata = devm_kzalloc(dev, sizeof(*pdata), GFP_KERNEL);
- if (!pdata) {
- dev_err(dev, "could not allocate memory for platform data\n");
- return ERR_PTR(-ENOMEM);
- }
-
- pdata->num_row_gpios = nrow = gpiod_count(dev, "row");
- pdata->num_col_gpios = ncol = gpiod_count(dev, "col");
- if (nrow < 0 || ncol < 0) {
- dev_err(dev, "number of keypad rows/columns not specified\n");
- return ERR_PTR(-EINVAL);
- }
-
- pdata->no_autorepeat = of_property_read_bool(np, "linux,no-autorepeat");
-
- pdata->wakeup = of_property_read_bool(np, "wakeup-source") ||
- of_property_read_bool(np, "linux,wakeup"); /* legacy */
-
- pdata->active_low = of_property_read_bool(np, "gpio-activelow");
-
- pdata->drive_inactive_cols =
- of_property_read_bool(np, "drive-inactive-cols");
-
- of_property_read_u32(np, "debounce-delay-ms", &pdata->debounce_ms);
- of_property_read_u32(np, "col-scan-delay-us",
- &pdata->col_scan_delay_us);
+ int err;
+ int irq;
+ int i;
- gpios = devm_kcalloc(dev,
- pdata->num_row_gpios + pdata->num_col_gpios,
- sizeof(unsigned int),
- GFP_KERNEL);
- if (!gpios) {
- dev_err(dev, "could not allocate memory for gpios\n");
- return ERR_PTR(-ENOMEM);
- }
+ for (i = 0; i < keypad->num_row_gpios; i++) {
+ irq = gpiod_to_irq(keypad->row_gpios[i]);
+ if (irq < 0) {
+ err = irq;
+ dev_err(&pdev->dev,
+ "Unable to convert GPIO line %i to irq: %d\n",
+ i, err);
+ return err;
+ }
- for (i = 0; i < nrow; i++) {
- ret = of_get_named_gpio(np, "row-gpios", i);
- if (ret < 0)
- return ERR_PTR(ret);
- gpios[i] = ret;
- }
+ err = devm_request_any_context_irq(&pdev->dev, irq,
+ matrix_keypad_interrupt,
+ IRQF_TRIGGER_RISING |
+ IRQF_TRIGGER_FALLING,
+ "matrix-keypad", keypad);
+ if (err < 0) {
+ dev_err(&pdev->dev,
+ "Unable to acquire interrupt for row %i: %d\n",
+ i, err);
+ return err;
+ }
- for (i = 0; i < ncol; i++) {
- ret = of_get_named_gpio(np, "col-gpios", i);
- if (ret < 0)
- return ERR_PTR(ret);
- gpios[nrow + i] = ret;
+ keypad->row_irqs[i] = irq;
}
- pdata->row_gpios = gpios;
- pdata->col_gpios = &gpios[pdata->num_row_gpios];
-
- return pdata;
-}
-#else
-static inline struct matrix_keypad_platform_data *
-matrix_keypad_parse_dt(struct device *dev)
-{
- dev_err(dev, "no platform data defined\n");
+ /* initialized as disabled - enabled by input->open */
+ disable_row_irqs(keypad);
- return ERR_PTR(-EINVAL);
+ return 0;
}
-#endif
static int matrix_keypad_probe(struct platform_device *pdev)
{
- const struct matrix_keypad_platform_data *pdata;
struct matrix_keypad *keypad;
struct input_dev *input_dev;
+ bool wakeup;
int err;
- pdata = dev_get_platdata(&pdev->dev);
- if (!pdata) {
- pdata = matrix_keypad_parse_dt(&pdev->dev);
- if (IS_ERR(pdata))
- return PTR_ERR(pdata);
- } else if (!pdata->keymap_data) {
- dev_err(&pdev->dev, "no keymap data defined\n");
- return -EINVAL;
- }
-
keypad = devm_kzalloc(&pdev->dev, sizeof(*keypad), GFP_KERNEL);
if (!keypad)
return -ENOMEM;
@@ -457,40 +385,56 @@ static int matrix_keypad_probe(struct platform_device *pdev)
return -ENOMEM;
keypad->input_dev = input_dev;
- keypad->pdata = pdata;
- keypad->row_shift = get_count_order(pdata->num_col_gpios);
keypad->stopped = true;
INIT_DELAYED_WORK(&keypad->work, matrix_keypad_scan);
spin_lock_init(&keypad->lock);
+ keypad->drive_inactive_cols =
+ device_property_read_bool(&pdev->dev, "drive-inactive-cols");
+ device_property_read_u32(&pdev->dev, "debounce-delay-ms",
+ &keypad->debounce_ms);
+ device_property_read_u32(&pdev->dev, "col-scan-delay-us",
+ &keypad->col_scan_delay_us);
+
+ err = matrix_keypad_init_gpio(pdev, keypad);
+ if (err)
+ return err;
+
+ keypad->row_shift = get_count_order(keypad->num_col_gpios);
+
+ err = matrix_keypad_setup_interrupts(pdev, keypad);
+ if (err)
+ return err;
+
input_dev->name = pdev->name;
input_dev->id.bustype = BUS_HOST;
input_dev->open = matrix_keypad_start;
input_dev->close = matrix_keypad_stop;
- err = matrix_keypad_build_keymap(pdata->keymap_data, NULL,
- pdata->num_row_gpios,
- pdata->num_col_gpios,
+ err = matrix_keypad_build_keymap(NULL, NULL,
+ keypad->num_row_gpios,
+ keypad->num_col_gpios,
NULL, input_dev);
if (err) {
dev_err(&pdev->dev, "failed to build keymap\n");
return -ENOMEM;
}
- if (!pdata->no_autorepeat)
+ if (!device_property_read_bool(&pdev->dev, "linux,no-autorepeat"))
__set_bit(EV_REP, input_dev->evbit);
+
input_set_capability(input_dev, EV_MSC, MSC_SCAN);
input_set_drvdata(input_dev, keypad);
- err = matrix_keypad_init_gpio(pdev, keypad);
- if (err)
- return err;
-
err = input_register_device(keypad->input_dev);
if (err)
return err;
- device_init_wakeup(&pdev->dev, pdata->wakeup);
+ wakeup = device_property_read_bool(&pdev->dev, "wakeup-source") ||
+ /* legacy */
+ device_property_read_bool(&pdev->dev, "linux,wakeup");
+ device_init_wakeup(&pdev->dev, wakeup);
+
platform_set_drvdata(pdev, keypad);
return 0;
diff --git a/drivers/input/keyboard/mcs_touchkey.c b/drivers/input/keyboard/mcs_touchkey.c
deleted file mode 100644
index 2410f676c7f9..000000000000
--- a/drivers/input/keyboard/mcs_touchkey.c
+++ /dev/null
@@ -1,268 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-or-later
-/*
- * Touchkey driver for MELFAS MCS5000/5080 controller
- *
- * Copyright (C) 2010 Samsung Electronics Co.Ltd
- * Author: HeungJun Kim <riverful.kim@samsung.com>
- * Author: Joonyoung Shim <jy0922.shim@samsung.com>
- */
-
-#include <linux/module.h>
-#include <linux/i2c.h>
-#include <linux/interrupt.h>
-#include <linux/input.h>
-#include <linux/irq.h>
-#include <linux/slab.h>
-#include <linux/platform_data/mcs.h>
-#include <linux/pm.h>
-
-/* MCS5000 Touchkey */
-#define MCS5000_TOUCHKEY_STATUS 0x04
-#define MCS5000_TOUCHKEY_STATUS_PRESS 7
-#define MCS5000_TOUCHKEY_FW 0x0a
-#define MCS5000_TOUCHKEY_BASE_VAL 0x61
-
-/* MCS5080 Touchkey */
-#define MCS5080_TOUCHKEY_STATUS 0x00
-#define MCS5080_TOUCHKEY_STATUS_PRESS 3
-#define MCS5080_TOUCHKEY_FW 0x01
-#define MCS5080_TOUCHKEY_BASE_VAL 0x1
-
-enum mcs_touchkey_type {
- MCS5000_TOUCHKEY,
- MCS5080_TOUCHKEY,
-};
-
-struct mcs_touchkey_chip {
- unsigned int status_reg;
- unsigned int pressbit;
- unsigned int press_invert;
- unsigned int baseval;
-};
-
-struct mcs_touchkey_data {
- void (*poweron)(bool);
-
- struct i2c_client *client;
- struct input_dev *input_dev;
- struct mcs_touchkey_chip chip;
- unsigned int key_code;
- unsigned int key_val;
- unsigned short keycodes[];
-};
-
-static irqreturn_t mcs_touchkey_interrupt(int irq, void *dev_id)
-{
- struct mcs_touchkey_data *data = dev_id;
- struct mcs_touchkey_chip *chip = &data->chip;
- struct i2c_client *client = data->client;
- struct input_dev *input = data->input_dev;
- unsigned int key_val;
- unsigned int pressed;
- int val;
-
- val = i2c_smbus_read_byte_data(client, chip->status_reg);
- if (val < 0) {
- dev_err(&client->dev, "i2c read error [%d]\n", val);
- goto out;
- }
-
- pressed = (val & (1 << chip->pressbit)) >> chip->pressbit;
- if (chip->press_invert)
- pressed ^= chip->press_invert;
-
- /* key_val is 0 when released, so we should use key_val of press. */
- if (pressed) {
- key_val = val & (0xff >> (8 - chip->pressbit));
- if (!key_val)
- goto out;
- key_val -= chip->baseval;
- data->key_code = data->keycodes[key_val];
- data->key_val = key_val;
- }
-
- input_event(input, EV_MSC, MSC_SCAN, data->key_val);
- input_report_key(input, data->key_code, pressed);
- input_sync(input);
-
- dev_dbg(&client->dev, "key %d %d %s\n", data->key_val, data->key_code,
- pressed ? "pressed" : "released");
-
- out:
- return IRQ_HANDLED;
-}
-
-static void mcs_touchkey_poweroff(void *data)
-{
- struct mcs_touchkey_data *touchkey = data;
-
- touchkey->poweron(false);
-}
-
-static int mcs_touchkey_probe(struct i2c_client *client)
-{
- const struct i2c_device_id *id = i2c_client_get_device_id(client);
- const struct mcs_platform_data *pdata;
- struct mcs_touchkey_data *data;
- struct input_dev *input_dev;
- unsigned int fw_reg;
- int fw_ver;
- int error;
- int i;
-
- pdata = dev_get_platdata(&client->dev);
- if (!pdata) {
- dev_err(&client->dev, "no platform data defined\n");
- return -EINVAL;
- }
-
- data = devm_kzalloc(&client->dev,
- struct_size(data, keycodes, pdata->key_maxval + 1),
- GFP_KERNEL);
- if (!data)
- return -ENOMEM;
-
- input_dev = devm_input_allocate_device(&client->dev);
- if (!input_dev) {
- dev_err(&client->dev, "Failed to allocate input device\n");
- return -ENOMEM;
- }
-
- data->client = client;
- data->input_dev = input_dev;
-
- if (id->driver_data == MCS5000_TOUCHKEY) {
- data->chip.status_reg = MCS5000_TOUCHKEY_STATUS;
- data->chip.pressbit = MCS5000_TOUCHKEY_STATUS_PRESS;
- data->chip.baseval = MCS5000_TOUCHKEY_BASE_VAL;
- fw_reg = MCS5000_TOUCHKEY_FW;
- } else {
- data->chip.status_reg = MCS5080_TOUCHKEY_STATUS;
- data->chip.pressbit = MCS5080_TOUCHKEY_STATUS_PRESS;
- data->chip.press_invert = 1;
- data->chip.baseval = MCS5080_TOUCHKEY_BASE_VAL;
- fw_reg = MCS5080_TOUCHKEY_FW;
- }
-
- fw_ver = i2c_smbus_read_byte_data(client, fw_reg);
- if (fw_ver < 0) {
- dev_err(&client->dev, "i2c read error[%d]\n", fw_ver);
- return fw_ver;
- }
- dev_info(&client->dev, "Firmware version: %d\n", fw_ver);
-
- input_dev->name = "MELFAS MCS Touchkey";
- input_dev->id.bustype = BUS_I2C;
- input_dev->evbit[0] = BIT_MASK(EV_KEY);
- if (!pdata->no_autorepeat)
- input_dev->evbit[0] |= BIT_MASK(EV_REP);
- input_dev->keycode = data->keycodes;
- input_dev->keycodesize = sizeof(data->keycodes[0]);
- input_dev->keycodemax = pdata->key_maxval + 1;
-
- for (i = 0; i < pdata->keymap_size; i++) {
- unsigned int val = MCS_KEY_VAL(pdata->keymap[i]);
- unsigned int code = MCS_KEY_CODE(pdata->keymap[i]);
-
- data->keycodes[val] = code;
- __set_bit(code, input_dev->keybit);
- }
-
- input_set_capability(input_dev, EV_MSC, MSC_SCAN);
- input_set_drvdata(input_dev, data);
-
- if (pdata->cfg_pin)
- pdata->cfg_pin();
-
- if (pdata->poweron) {
- data->poweron = pdata->poweron;
- data->poweron(true);
-
- error = devm_add_action_or_reset(&client->dev,
- mcs_touchkey_poweroff, data);
- if (error)
- return error;
- }
-
- error = devm_request_threaded_irq(&client->dev, client->irq,
- NULL, mcs_touchkey_interrupt,
- IRQF_TRIGGER_FALLING | IRQF_ONESHOT,
- client->dev.driver->name, data);
- if (error) {
- dev_err(&client->dev, "Failed to register interrupt\n");
- return error;
- }
-
- error = input_register_device(input_dev);
- if (error)
- return error;
-
- i2c_set_clientdata(client, data);
- return 0;
-}
-
-static void mcs_touchkey_shutdown(struct i2c_client *client)
-{
- struct mcs_touchkey_data *data = i2c_get_clientdata(client);
-
- if (data->poweron)
- data->poweron(false);
-}
-
-static int mcs_touchkey_suspend(struct device *dev)
-{
- struct mcs_touchkey_data *data = dev_get_drvdata(dev);
- struct i2c_client *client = data->client;
-
- /* Disable the work */
- disable_irq(client->irq);
-
- /* Finally turn off the power */
- if (data->poweron)
- data->poweron(false);
-
- return 0;
-}
-
-static int mcs_touchkey_resume(struct device *dev)
-{
- struct mcs_touchkey_data *data = dev_get_drvdata(dev);
- struct i2c_client *client = data->client;
-
- /* Enable the device first */
- if (data->poweron)
- data->poweron(true);
-
- /* Enable irq again */
- enable_irq(client->irq);
-
- return 0;
-}
-
-static DEFINE_SIMPLE_DEV_PM_OPS(mcs_touchkey_pm_ops,
- mcs_touchkey_suspend, mcs_touchkey_resume);
-
-static const struct i2c_device_id mcs_touchkey_id[] = {
- { "mcs5000_touchkey", MCS5000_TOUCHKEY },
- { "mcs5080_touchkey", MCS5080_TOUCHKEY },
- { }
-};
-MODULE_DEVICE_TABLE(i2c, mcs_touchkey_id);
-
-static struct i2c_driver mcs_touchkey_driver = {
- .driver = {
- .name = "mcs_touchkey",
- .pm = pm_sleep_ptr(&mcs_touchkey_pm_ops),
- },
- .probe = mcs_touchkey_probe,
- .shutdown = mcs_touchkey_shutdown,
- .id_table = mcs_touchkey_id,
-};
-
-module_i2c_driver(mcs_touchkey_driver);
-
-/* Module information */
-MODULE_AUTHOR("Joonyoung Shim <jy0922.shim@samsung.com>");
-MODULE_AUTHOR("HeungJun Kim <riverful.kim@samsung.com>");
-MODULE_DESCRIPTION("Touchkey driver for MELFAS MCS5000/5080 controller");
-MODULE_LICENSE("GPL");
diff --git a/drivers/input/keyboard/mt6779-keypad.c b/drivers/input/keyboard/mt6779-keypad.c
index 19f69d167fbd..e5eb025c5e99 100644
--- a/drivers/input/keyboard/mt6779-keypad.c
+++ b/drivers/input/keyboard/mt6779-keypad.c
@@ -92,11 +92,6 @@ static irqreturn_t mt6779_keypad_irq_handler(int irq, void *dev_id)
return IRQ_HANDLED;
}
-static void mt6779_keypad_clk_disable(void *data)
-{
- clk_disable_unprepare(data);
-}
-
static void mt6779_keypad_calc_row_col_single(unsigned int key,
unsigned int *row,
unsigned int *col)
@@ -213,21 +208,10 @@ static int mt6779_keypad_pdrv_probe(struct platform_device *pdev)
regmap_update_bits(keypad->regmap, MTK_KPD_SEL, MTK_KPD_SEL_COL,
MTK_KPD_SEL_COLMASK(keypad->n_cols));
- keypad->clk = devm_clk_get(&pdev->dev, "kpd");
+ keypad->clk = devm_clk_get_enabled(&pdev->dev, "kpd");
if (IS_ERR(keypad->clk))
return PTR_ERR(keypad->clk);
- error = clk_prepare_enable(keypad->clk);
- if (error) {
- dev_err(&pdev->dev, "cannot prepare/enable keypad clock\n");
- return error;
- }
-
- error = devm_add_action_or_reset(&pdev->dev, mt6779_keypad_clk_disable,
- keypad->clk);
- if (error)
- return error;
-
irq = platform_get_irq(pdev, 0);
if (irq < 0)
return irq;
@@ -260,6 +244,7 @@ static const struct of_device_id mt6779_keypad_of_match[] = {
{ .compatible = "mediatek,mt6873-keypad" },
{ /* sentinel */ }
};
+MODULE_DEVICE_TABLE(of, mt6779_keypad_of_match);
static struct platform_driver mt6779_keypad_pdrv = {
.probe = mt6779_keypad_pdrv_probe,
diff --git a/drivers/input/keyboard/nomadik-ske-keypad.c b/drivers/input/keyboard/nomadik-ske-keypad.c
deleted file mode 100644
index b3ccc97f61e1..000000000000
--- a/drivers/input/keyboard/nomadik-ske-keypad.c
+++ /dev/null
@@ -1,378 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-only
-/*
- * Copyright (C) ST-Ericsson SA 2010
- *
- * Author: Naveen Kumar G <naveen.gaddipati@stericsson.com> for ST-Ericsson
- * Author: Sundar Iyer <sundar.iyer@stericsson.com> for ST-Ericsson
- *
- * Keypad controller driver for the SKE (Scroll Key Encoder) module used in
- * the Nomadik 8815 and Ux500 platforms.
- */
-
-#include <linux/platform_device.h>
-#include <linux/interrupt.h>
-#include <linux/spinlock.h>
-#include <linux/io.h>
-#include <linux/delay.h>
-#include <linux/input.h>
-#include <linux/slab.h>
-#include <linux/clk.h>
-#include <linux/module.h>
-
-#include <linux/platform_data/keypad-nomadik-ske.h>
-
-/* SKE_CR bits */
-#define SKE_KPMLT (0x1 << 6)
-#define SKE_KPCN (0x7 << 3)
-#define SKE_KPASEN (0x1 << 2)
-#define SKE_KPASON (0x1 << 7)
-
-/* SKE_IMSC bits */
-#define SKE_KPIMA (0x1 << 2)
-
-/* SKE_ICR bits */
-#define SKE_KPICS (0x1 << 3)
-#define SKE_KPICA (0x1 << 2)
-
-/* SKE_RIS bits */
-#define SKE_KPRISA (0x1 << 2)
-
-#define SKE_KEYPAD_ROW_SHIFT 3
-#define SKE_KPD_NUM_ROWS 8
-#define SKE_KPD_NUM_COLS 8
-
-/* keypad auto scan registers */
-#define SKE_ASR0 0x20
-#define SKE_ASR1 0x24
-#define SKE_ASR2 0x28
-#define SKE_ASR3 0x2C
-
-#define SKE_NUM_ASRX_REGISTERS (4)
-#define KEY_PRESSED_DELAY 10
-
-/**
- * struct ske_keypad - data structure used by keypad driver
- * @irq: irq no
- * @reg_base: ske registers base address
- * @input: pointer to input device object
- * @board: keypad platform device
- * @keymap: matrix scan code table for keycodes
- * @clk: clock structure pointer
- * @pclk: clock structure pointer
- * @ske_keypad_lock: spinlock protecting the keypad read/writes
- */
-struct ske_keypad {
- int irq;
- void __iomem *reg_base;
- struct input_dev *input;
- const struct ske_keypad_platform_data *board;
- unsigned short keymap[SKE_KPD_NUM_ROWS * SKE_KPD_NUM_COLS];
- struct clk *clk;
- struct clk *pclk;
- spinlock_t ske_keypad_lock;
-};
-
-static void ske_keypad_set_bits(struct ske_keypad *keypad, u16 addr,
- u8 mask, u8 data)
-{
- u32 ret;
-
- spin_lock(&keypad->ske_keypad_lock);
-
- ret = readl(keypad->reg_base + addr);
- ret &= ~mask;
- ret |= data;
- writel(ret, keypad->reg_base + addr);
-
- spin_unlock(&keypad->ske_keypad_lock);
-}
-
-/*
- * ske_keypad_chip_init: init keypad controller configuration
- *
- * Enable Multi key press detection, auto scan mode
- */
-static int __init ske_keypad_chip_init(struct ske_keypad *keypad)
-{
- u32 value;
- int timeout = keypad->board->debounce_ms;
-
- /* check SKE_RIS to be 0 */
- while ((readl(keypad->reg_base + SKE_RIS) != 0x00000000) && timeout--)
- cpu_relax();
-
- if (timeout == -1)
- return -EINVAL;
-
- /*
- * set debounce value
- * keypad dbounce is configured in DBCR[15:8]
- * dbounce value in steps of 32/32.768 ms
- */
- spin_lock(&keypad->ske_keypad_lock);
- value = readl(keypad->reg_base + SKE_DBCR);
- value = value & 0xff;
- value |= ((keypad->board->debounce_ms * 32000)/32768) << 8;
- writel(value, keypad->reg_base + SKE_DBCR);
- spin_unlock(&keypad->ske_keypad_lock);
-
- /* enable multi key detection */
- ske_keypad_set_bits(keypad, SKE_CR, 0x0, SKE_KPMLT);
-
- /*
- * set up the number of columns
- * KPCN[5:3] defines no. of keypad columns to be auto scanned
- */
- value = (keypad->board->kcol - 1) << 3;
- ske_keypad_set_bits(keypad, SKE_CR, SKE_KPCN, value);
-
- /* clear keypad interrupt for auto(and pending SW) scans */
- ske_keypad_set_bits(keypad, SKE_ICR, 0x0, SKE_KPICA | SKE_KPICS);
-
- /* un-mask keypad interrupts */
- ske_keypad_set_bits(keypad, SKE_IMSC, 0x0, SKE_KPIMA);
-
- /* enable automatic scan */
- ske_keypad_set_bits(keypad, SKE_CR, 0x0, SKE_KPASEN);
-
- return 0;
-}
-
-static void ske_keypad_report(struct ske_keypad *keypad, u8 status, int col)
-{
- int row = 0, code, pos;
- struct input_dev *input = keypad->input;
- u32 ske_ris;
- int key_pressed;
- int num_of_rows;
-
- /* find out the row */
- num_of_rows = hweight8(status);
- do {
- pos = __ffs(status);
- row = pos;
- status &= ~(1 << pos);
-
- code = MATRIX_SCAN_CODE(row, col, SKE_KEYPAD_ROW_SHIFT);
- ske_ris = readl(keypad->reg_base + SKE_RIS);
- key_pressed = ske_ris & SKE_KPRISA;
-
- input_event(input, EV_MSC, MSC_SCAN, code);
- input_report_key(input, keypad->keymap[code], key_pressed);
- input_sync(input);
- num_of_rows--;
- } while (num_of_rows);
-}
-
-static void ske_keypad_read_data(struct ske_keypad *keypad)
-{
- u8 status;
- int col = 0;
- int ske_asr, i;
-
- /*
- * Read the auto scan registers
- *
- * Each SKE_ASRx (x=0 to x=3) contains two row values.
- * lower byte contains row value for column 2*x,
- * upper byte contains row value for column 2*x + 1
- */
- for (i = 0; i < SKE_NUM_ASRX_REGISTERS; i++) {
- ske_asr = readl(keypad->reg_base + SKE_ASR0 + (4 * i));
- if (!ske_asr)
- continue;
-
- /* now that ASRx is zero, find out the coloumn x and row y */
- status = ske_asr & 0xff;
- if (status) {
- col = i * 2;
- ske_keypad_report(keypad, status, col);
- }
- status = (ske_asr & 0xff00) >> 8;
- if (status) {
- col = (i * 2) + 1;
- ske_keypad_report(keypad, status, col);
- }
- }
-}
-
-static irqreturn_t ske_keypad_irq(int irq, void *dev_id)
-{
- struct ske_keypad *keypad = dev_id;
- int timeout = keypad->board->debounce_ms;
-
- /* disable auto scan interrupt; mask the interrupt generated */
- ske_keypad_set_bits(keypad, SKE_IMSC, ~SKE_KPIMA, 0x0);
- ske_keypad_set_bits(keypad, SKE_ICR, 0x0, SKE_KPICA);
-
- while ((readl(keypad->reg_base + SKE_CR) & SKE_KPASON) && --timeout)
- cpu_relax();
-
- /* SKEx registers are stable and can be read */
- ske_keypad_read_data(keypad);
-
- /* wait until raw interrupt is clear */
- while ((readl(keypad->reg_base + SKE_RIS)) && --timeout)
- msleep(KEY_PRESSED_DELAY);
-
- /* enable auto scan interrupts */
- ske_keypad_set_bits(keypad, SKE_IMSC, 0x0, SKE_KPIMA);
-
- return IRQ_HANDLED;
-}
-
-static void ske_keypad_board_exit(void *data)
-{
- struct ske_keypad *keypad = data;
-
- keypad->board->exit();
-}
-
-static int __init ske_keypad_probe(struct platform_device *pdev)
-{
- const struct ske_keypad_platform_data *plat =
- dev_get_platdata(&pdev->dev);
- struct device *dev = &pdev->dev;
- struct ske_keypad *keypad;
- struct input_dev *input;
- int irq;
- int error;
-
- if (!plat) {
- dev_err(&pdev->dev, "invalid keypad platform data\n");
- return -EINVAL;
- }
-
- irq = platform_get_irq(pdev, 0);
- if (irq < 0)
- return irq;
-
- keypad = devm_kzalloc(dev, sizeof(struct ske_keypad),
- GFP_KERNEL);
- input = devm_input_allocate_device(dev);
- if (!keypad || !input) {
- dev_err(&pdev->dev, "failed to allocate keypad memory\n");
- return -ENOMEM;
- }
-
- keypad->irq = irq;
- keypad->board = plat;
- keypad->input = input;
- spin_lock_init(&keypad->ske_keypad_lock);
-
- keypad->reg_base = devm_platform_ioremap_resource(pdev, 0);
- if (IS_ERR(keypad->reg_base))
- return PTR_ERR(keypad->reg_base);
-
- keypad->pclk = devm_clk_get_enabled(dev, "apb_pclk");
- if (IS_ERR(keypad->pclk)) {
- dev_err(&pdev->dev, "failed to get pclk\n");
- return PTR_ERR(keypad->pclk);
- }
-
- keypad->clk = devm_clk_get_enabled(dev, NULL);
- if (IS_ERR(keypad->clk)) {
- dev_err(&pdev->dev, "failed to get clk\n");
- return PTR_ERR(keypad->clk);
- }
-
- input->id.bustype = BUS_HOST;
- input->name = "ux500-ske-keypad";
- input->dev.parent = &pdev->dev;
-
- error = matrix_keypad_build_keymap(plat->keymap_data, NULL,
- SKE_KPD_NUM_ROWS, SKE_KPD_NUM_COLS,
- keypad->keymap, input);
- if (error) {
- dev_err(&pdev->dev, "Failed to build keymap\n");
- return error;
- }
-
- input_set_capability(input, EV_MSC, MSC_SCAN);
- if (!plat->no_autorepeat)
- __set_bit(EV_REP, input->evbit);
-
- /* go through board initialization helpers */
- if (keypad->board->init)
- keypad->board->init();
-
- if (keypad->board->exit) {
- error = devm_add_action_or_reset(dev, ske_keypad_board_exit,
- keypad);
- if (error)
- return error;
- }
-
- error = ske_keypad_chip_init(keypad);
- if (error) {
- dev_err(&pdev->dev, "unable to init keypad hardware\n");
- return error;
- }
-
- error = devm_request_threaded_irq(dev, keypad->irq,
- NULL, ske_keypad_irq,
- IRQF_ONESHOT, "ske-keypad", keypad);
- if (error) {
- dev_err(&pdev->dev, "allocate irq %d failed\n", keypad->irq);
- return error;
- }
-
- error = input_register_device(input);
- if (error) {
- dev_err(&pdev->dev,
- "unable to register input device: %d\n", error);
- return error;
- }
-
- if (plat->wakeup_enable)
- device_init_wakeup(&pdev->dev, true);
-
- platform_set_drvdata(pdev, keypad);
-
- return 0;
-}
-
-static int ske_keypad_suspend(struct device *dev)
-{
- struct platform_device *pdev = to_platform_device(dev);
- struct ske_keypad *keypad = platform_get_drvdata(pdev);
- int irq = platform_get_irq(pdev, 0);
-
- if (device_may_wakeup(dev))
- enable_irq_wake(irq);
- else
- ske_keypad_set_bits(keypad, SKE_IMSC, ~SKE_KPIMA, 0x0);
-
- return 0;
-}
-
-static int ske_keypad_resume(struct device *dev)
-{
- struct platform_device *pdev = to_platform_device(dev);
- struct ske_keypad *keypad = platform_get_drvdata(pdev);
- int irq = platform_get_irq(pdev, 0);
-
- if (device_may_wakeup(dev))
- disable_irq_wake(irq);
- else
- ske_keypad_set_bits(keypad, SKE_IMSC, 0x0, SKE_KPIMA);
-
- return 0;
-}
-
-static DEFINE_SIMPLE_DEV_PM_OPS(ske_keypad_dev_pm_ops,
- ske_keypad_suspend, ske_keypad_resume);
-
-static struct platform_driver ske_keypad_driver = {
- .driver = {
- .name = "nmk-ske-keypad",
- .pm = pm_sleep_ptr(&ske_keypad_dev_pm_ops),
- },
-};
-
-module_platform_driver_probe(ske_keypad_driver, ske_keypad_probe);
-
-MODULE_LICENSE("GPL v2");
-MODULE_AUTHOR("Naveen Kumar <naveen.gaddipati@stericsson.com> / Sundar Iyer <sundar.iyer@stericsson.com>");
-MODULE_DESCRIPTION("Nomadik Scroll-Key-Encoder Keypad Driver");
-MODULE_ALIAS("platform:nomadik-ske-keypad");
diff --git a/drivers/input/keyboard/qt1050.c b/drivers/input/keyboard/qt1050.c
index 5a2592e6293d..bce8157d1871 100644
--- a/drivers/input/keyboard/qt1050.c
+++ b/drivers/input/keyboard/qt1050.c
@@ -346,35 +346,34 @@ static int qt1050_apply_fw_data(struct qt1050_priv *ts)
static int qt1050_parse_fw(struct qt1050_priv *ts)
{
struct device *dev = &ts->client->dev;
- struct fwnode_handle *child;
int nbuttons;
nbuttons = device_get_child_node_count(dev);
if (nbuttons == 0 || nbuttons > QT1050_MAX_KEYS)
return -ENODEV;
- device_for_each_child_node(dev, child) {
+ device_for_each_child_node_scoped(dev, child) {
struct qt1050_key button;
/* Required properties */
if (fwnode_property_read_u32(child, "linux,code",
&button.keycode)) {
dev_err(dev, "Button without keycode\n");
- goto err;
+ return -EINVAL;
}
if (button.keycode >= KEY_MAX) {
dev_err(dev, "Invalid keycode 0x%x\n",
button.keycode);
- goto err;
+ return -EINVAL;
}
if (fwnode_property_read_u32(child, "reg",
&button.num)) {
dev_err(dev, "Button without pad number\n");
- goto err;
+ return -EINVAL;
}
if (button.num < 0 || button.num > QT1050_MAX_KEYS - 1)
- goto err;
+ return -EINVAL;
ts->reg_keys |= BIT(button.num);
@@ -424,10 +423,6 @@ static int qt1050_parse_fw(struct qt1050_priv *ts)
}
return 0;
-
-err:
- fwnode_handle_put(child);
- return -EINVAL;
}
static int qt1050_probe(struct i2c_client *client)
diff --git a/drivers/input/keyboard/snvs_pwrkey.c b/drivers/input/keyboard/snvs_pwrkey.c
index ad8660be0127..f7b5f1e25c80 100644
--- a/drivers/input/keyboard/snvs_pwrkey.c
+++ b/drivers/input/keyboard/snvs_pwrkey.c
@@ -100,11 +100,6 @@ static irqreturn_t imx_snvs_pwrkey_interrupt(int irq, void *dev_id)
return IRQ_HANDLED;
}
-static void imx_snvs_pwrkey_disable_clk(void *data)
-{
- clk_disable_unprepare(data);
-}
-
static void imx_snvs_pwrkey_act(void *pdata)
{
struct pwrkey_drv_data *pd = pdata;
@@ -141,28 +136,12 @@ static int imx_snvs_pwrkey_probe(struct platform_device *pdev)
dev_warn(&pdev->dev, "KEY_POWER without setting in dts\n");
}
- clk = devm_clk_get_optional(&pdev->dev, NULL);
+ clk = devm_clk_get_optional_enabled(&pdev->dev, NULL);
if (IS_ERR(clk)) {
dev_err(&pdev->dev, "Failed to get snvs clock (%pe)\n", clk);
return PTR_ERR(clk);
}
- error = clk_prepare_enable(clk);
- if (error) {
- dev_err(&pdev->dev, "Failed to enable snvs clock (%pe)\n",
- ERR_PTR(error));
- return error;
- }
-
- error = devm_add_action_or_reset(&pdev->dev,
- imx_snvs_pwrkey_disable_clk, clk);
- if (error) {
- dev_err(&pdev->dev,
- "Failed to register clock cleanup handler (%pe)\n",
- ERR_PTR(error));
- return error;
- }
-
pdata->wakeup = of_property_read_bool(np, "wakeup-source");
pdata->irq = platform_get_irq(pdev, 0);
@@ -204,7 +183,6 @@ static int imx_snvs_pwrkey_probe(struct platform_device *pdev)
error = devm_request_irq(&pdev->dev, pdata->irq,
imx_snvs_pwrkey_interrupt,
0, pdev->name, pdev);
-
if (error) {
dev_err(&pdev->dev, "interrupt not available.\n");
return error;
diff --git a/drivers/input/keyboard/spear-keyboard.c b/drivers/input/keyboard/spear-keyboard.c
index 557d00a667ce..1df4feb8ba01 100644
--- a/drivers/input/keyboard/spear-keyboard.c
+++ b/drivers/input/keyboard/spear-keyboard.c
@@ -222,7 +222,7 @@ static int spear_kbd_probe(struct platform_device *pdev)
if (IS_ERR(kbd->io_base))
return PTR_ERR(kbd->io_base);
- kbd->clk = devm_clk_get(&pdev->dev, NULL);
+ kbd->clk = devm_clk_get_prepared(&pdev->dev, NULL);
if (IS_ERR(kbd->clk))
return PTR_ERR(kbd->clk);
@@ -255,14 +255,9 @@ static int spear_kbd_probe(struct platform_device *pdev)
return error;
}
- error = clk_prepare(kbd->clk);
- if (error)
- return error;
-
error = input_register_device(input_dev);
if (error) {
dev_err(&pdev->dev, "Unable to register keyboard device\n");
- clk_unprepare(kbd->clk);
return error;
}
@@ -272,14 +267,6 @@ static int spear_kbd_probe(struct platform_device *pdev)
return 0;
}
-static void spear_kbd_remove(struct platform_device *pdev)
-{
- struct spear_kbd *kbd = platform_get_drvdata(pdev);
-
- input_unregister_device(kbd->input);
- clk_unprepare(kbd->clk);
-}
-
static int spear_kbd_suspend(struct device *dev)
{
struct platform_device *pdev = to_platform_device(dev);
@@ -373,7 +360,6 @@ MODULE_DEVICE_TABLE(of, spear_kbd_id_table);
static struct platform_driver spear_kbd_driver = {
.probe = spear_kbd_probe,
- .remove_new = spear_kbd_remove,
.driver = {
.name = "keyboard",
.pm = pm_sleep_ptr(&spear_kbd_pm_ops),
diff --git a/drivers/input/keyboard/tc3589x-keypad.c b/drivers/input/keyboard/tc3589x-keypad.c
index b0d86621c60a..11988cffdfae 100644
--- a/drivers/input/keyboard/tc3589x-keypad.c
+++ b/drivers/input/keyboard/tc3589x-keypad.c
@@ -325,7 +325,6 @@ tc3589x_keypad_of_probe(struct device *dev)
struct tc3589x_keypad_platform_data *plat;
u32 cols, rows;
u32 debounce_ms;
- int proplen;
if (!np)
return ERR_PTR(-ENODEV);
@@ -346,7 +345,7 @@ tc3589x_keypad_of_probe(struct device *dev)
return ERR_PTR(-EINVAL);
}
- if (!of_get_property(np, "linux,keymap", &proplen)) {
+ if (!of_property_present(np, "linux,keymap")) {
dev_err(dev, "property linux,keymap not found\n");
return ERR_PTR(-ENOENT);
}
diff --git a/drivers/input/keyboard/tegra-kbc.c b/drivers/input/keyboard/tegra-kbc.c
index a1765ed8c825..6776dd94ce76 100644
--- a/drivers/input/keyboard/tegra-kbc.c
+++ b/drivers/input/keyboard/tegra-kbc.c
@@ -241,11 +241,10 @@ static void tegra_kbc_set_fifo_interrupt(struct tegra_kbc *kbc, bool enable)
static void tegra_kbc_keypress_timer(struct timer_list *t)
{
struct tegra_kbc *kbc = from_timer(kbc, t, timer);
- unsigned long flags;
u32 val;
unsigned int i;
- spin_lock_irqsave(&kbc->lock, flags);
+ guard(spinlock_irqsave)(&kbc->lock);
val = (readl(kbc->mmio + KBC_INT_0) >> 4) & 0xf;
if (val) {
@@ -270,17 +269,14 @@ static void tegra_kbc_keypress_timer(struct timer_list *t)
/* All keys are released so enable the keypress interrupt */
tegra_kbc_set_fifo_interrupt(kbc, true);
}
-
- spin_unlock_irqrestore(&kbc->lock, flags);
}
static irqreturn_t tegra_kbc_isr(int irq, void *args)
{
struct tegra_kbc *kbc = args;
- unsigned long flags;
u32 val;
- spin_lock_irqsave(&kbc->lock, flags);
+ guard(spinlock_irqsave)(&kbc->lock);
/*
* Quickly bail out & reenable interrupts if the fifo threshold
@@ -301,8 +297,6 @@ static irqreturn_t tegra_kbc_isr(int irq, void *args)
kbc->keypress_caused_wake = true;
}
- spin_unlock_irqrestore(&kbc->lock, flags);
-
return IRQ_HANDLED;
}
@@ -413,14 +407,13 @@ static int tegra_kbc_start(struct tegra_kbc *kbc)
static void tegra_kbc_stop(struct tegra_kbc *kbc)
{
- unsigned long flags;
u32 val;
- spin_lock_irqsave(&kbc->lock, flags);
- val = readl(kbc->mmio + KBC_CONTROL_0);
- val &= ~1;
- writel(val, kbc->mmio + KBC_CONTROL_0);
- spin_unlock_irqrestore(&kbc->lock, flags);
+ scoped_guard(spinlock_irqsave, &kbc->lock) {
+ val = readl(kbc->mmio + KBC_CONTROL_0);
+ val &= ~1;
+ writel(val, kbc->mmio + KBC_CONTROL_0);
+ }
disable_irq(kbc->irq);
del_timer_sync(&kbc->timer);
@@ -491,12 +484,10 @@ static int tegra_kbc_parse_dt(struct tegra_kbc *kbc)
struct device_node *np = kbc->dev->of_node;
u32 prop;
int i;
- u32 num_rows = 0;
- u32 num_cols = 0;
+ int num_rows;
+ int num_cols;
u32 cols_cfg[KBC_MAX_GPIO];
u32 rows_cfg[KBC_MAX_GPIO];
- int proplen;
- int ret;
if (!of_property_read_u32(np, "nvidia,debounce-delay-ms", &prop))
kbc->debounce_cnt = prop;
@@ -510,56 +501,23 @@ static int tegra_kbc_parse_dt(struct tegra_kbc *kbc)
of_property_read_bool(np, "nvidia,wakeup-source")) /* legacy */
kbc->wakeup = true;
- if (!of_get_property(np, "nvidia,kbc-row-pins", &proplen)) {
- dev_err(kbc->dev, "property nvidia,kbc-row-pins not found\n");
- return -ENOENT;
- }
- num_rows = proplen / sizeof(u32);
-
- if (!of_get_property(np, "nvidia,kbc-col-pins", &proplen)) {
- dev_err(kbc->dev, "property nvidia,kbc-col-pins not found\n");
- return -ENOENT;
- }
- num_cols = proplen / sizeof(u32);
-
- if (num_rows > kbc->hw_support->max_rows) {
- dev_err(kbc->dev,
- "Number of rows is more than supported by hardware\n");
- return -EINVAL;
- }
-
- if (num_cols > kbc->hw_support->max_columns) {
- dev_err(kbc->dev,
- "Number of cols is more than supported by hardware\n");
- return -EINVAL;
- }
-
- if (!of_get_property(np, "linux,keymap", &proplen)) {
+ if (!of_property_present(np, "linux,keymap")) {
dev_err(kbc->dev, "property linux,keymap not found\n");
return -ENOENT;
}
- if (!num_rows || !num_cols || ((num_rows + num_cols) > KBC_MAX_GPIO)) {
- dev_err(kbc->dev,
- "keypad rows/columns not properly specified\n");
- return -EINVAL;
- }
-
/* Set all pins as non-configured */
for (i = 0; i < kbc->num_rows_and_columns; i++)
kbc->pin_cfg[i].type = PIN_CFG_IGNORE;
- ret = of_property_read_u32_array(np, "nvidia,kbc-row-pins",
- rows_cfg, num_rows);
- if (ret < 0) {
+ num_rows = of_property_read_variable_u32_array(np, "nvidia,kbc-row-pins",
+ rows_cfg, 1, KBC_MAX_GPIO);
+ if (num_rows < 0) {
dev_err(kbc->dev, "Rows configurations are not proper\n");
- return -EINVAL;
- }
-
- ret = of_property_read_u32_array(np, "nvidia,kbc-col-pins",
- cols_cfg, num_cols);
- if (ret < 0) {
- dev_err(kbc->dev, "Cols configurations are not proper\n");
+ return num_rows;
+ } else if (num_rows > kbc->hw_support->max_rows) {
+ dev_err(kbc->dev,
+ "Number of rows is more than supported by hardware\n");
return -EINVAL;
}
@@ -568,11 +526,28 @@ static int tegra_kbc_parse_dt(struct tegra_kbc *kbc)
kbc->pin_cfg[rows_cfg[i]].num = i;
}
+ num_cols = of_property_read_variable_u32_array(np, "nvidia,kbc-col-pins",
+ cols_cfg, 1, KBC_MAX_GPIO);
+ if (num_cols < 0) {
+ dev_err(kbc->dev, "Cols configurations are not proper\n");
+ return num_cols;
+ } else if (num_cols > kbc->hw_support->max_columns) {
+ dev_err(kbc->dev,
+ "Number of cols is more than supported by hardware\n");
+ return -EINVAL;
+ }
+
for (i = 0; i < num_cols; i++) {
kbc->pin_cfg[cols_cfg[i]].type = PIN_CFG_COL;
kbc->pin_cfg[cols_cfg[i]].num = i;
}
+ if (!num_rows || !num_cols || ((num_rows + num_cols) > KBC_MAX_GPIO)) {
+ dev_err(kbc->dev,
+ "keypad rows/columns not properly specified\n");
+ return -EINVAL;
+ }
+
return 0;
}
@@ -724,7 +699,8 @@ static int tegra_kbc_suspend(struct device *dev)
struct platform_device *pdev = to_platform_device(dev);
struct tegra_kbc *kbc = platform_get_drvdata(pdev);
- mutex_lock(&kbc->idev->mutex);
+ guard(mutex)(&kbc->idev->mutex);
+
if (device_may_wakeup(&pdev->dev)) {
disable_irq(kbc->irq);
del_timer_sync(&kbc->timer);
@@ -747,11 +723,9 @@ static int tegra_kbc_suspend(struct device *dev)
tegra_kbc_set_keypress_interrupt(kbc, true);
enable_irq(kbc->irq);
enable_irq_wake(kbc->irq);
- } else {
- if (input_device_enabled(kbc->idev))
- tegra_kbc_stop(kbc);
+ } else if (input_device_enabled(kbc->idev)) {
+ tegra_kbc_stop(kbc);
}
- mutex_unlock(&kbc->idev->mutex);
return 0;
}
@@ -760,9 +734,10 @@ static int tegra_kbc_resume(struct device *dev)
{
struct platform_device *pdev = to_platform_device(dev);
struct tegra_kbc *kbc = platform_get_drvdata(pdev);
- int err = 0;
+ int err;
+
+ guard(mutex)(&kbc->idev->mutex);
- mutex_lock(&kbc->idev->mutex);
if (device_may_wakeup(&pdev->dev)) {
disable_irq_wake(kbc->irq);
tegra_kbc_setup_wakekeys(kbc, false);
@@ -787,13 +762,13 @@ static int tegra_kbc_resume(struct device *dev)
input_report_key(kbc->idev, kbc->wakeup_key, 0);
input_sync(kbc->idev);
}
- } else {
- if (input_device_enabled(kbc->idev))
- err = tegra_kbc_start(kbc);
+ } else if (input_device_enabled(kbc->idev)) {
+ err = tegra_kbc_start(kbc);
+ if (err)
+ return err;
}
- mutex_unlock(&kbc->idev->mutex);
- return err;
+ return 0;
}
static DEFINE_SIMPLE_DEV_PM_OPS(tegra_kbc_pm_ops,
diff --git a/drivers/input/matrix-keymap.c b/drivers/input/matrix-keymap.c
index 5d93043bad8e..3bea3575a0a9 100644
--- a/drivers/input/matrix-keymap.c
+++ b/drivers/input/matrix-keymap.c
@@ -73,10 +73,9 @@ static int matrix_keypad_parse_keymap(const char *propname,
struct device *dev = input_dev->dev.parent;
unsigned int row_shift = get_count_order(cols);
unsigned int max_keys = rows << row_shift;
- u32 *keys;
int i;
int size;
- int retval;
+ int error;
if (!propname)
propname = "linux,keymap";
@@ -94,30 +93,24 @@ static int matrix_keypad_parse_keymap(const char *propname,
return -EINVAL;
}
- keys = kmalloc_array(size, sizeof(u32), GFP_KERNEL);
+ u32 *keys __free(kfree) = kmalloc_array(size, sizeof(*keys), GFP_KERNEL);
if (!keys)
return -ENOMEM;
- retval = device_property_read_u32_array(dev, propname, keys, size);
- if (retval) {
+ error = device_property_read_u32_array(dev, propname, keys, size);
+ if (error) {
dev_err(dev, "failed to read %s property: %d\n",
- propname, retval);
- goto out;
+ propname, error);
+ return error;
}
for (i = 0; i < size; i++) {
if (!matrix_keypad_map_key(input_dev, rows, cols,
- row_shift, keys[i])) {
- retval = -EINVAL;
- goto out;
- }
+ row_shift, keys[i]))
+ return -EINVAL;
}
- retval = 0;
-
-out:
- kfree(keys);
- return retval;
+ return 0;
}
/**
diff --git a/drivers/input/misc/ims-pcu.c b/drivers/input/misc/ims-pcu.c
index c086dadb45e3..058f3470b7ae 100644
--- a/drivers/input/misc/ims-pcu.c
+++ b/drivers/input/misc/ims-pcu.c
@@ -1067,7 +1067,7 @@ static ssize_t ims_pcu_attribute_store(struct device *dev,
if (data_len > attr->field_length)
return -EINVAL;
- scoped_cond_guard(mutex, return -EINTR, &pcu->cmd_mutex) {
+ scoped_cond_guard(mutex_intr, return -EINTR, &pcu->cmd_mutex) {
memset(field, 0, attr->field_length);
memcpy(field, buf, data_len);
diff --git a/drivers/input/misc/iqs269a.c b/drivers/input/misc/iqs269a.c
index cd14ff9f57cf..843f8a3f3410 100644
--- a/drivers/input/misc/iqs269a.c
+++ b/drivers/input/misc/iqs269a.c
@@ -811,7 +811,6 @@ static int iqs269_parse_prop(struct iqs269_private *iqs269)
{
struct iqs269_sys_reg *sys_reg = &iqs269->sys_reg;
struct i2c_client *client = iqs269->client;
- struct fwnode_handle *ch_node;
u16 general, misc_a, misc_b;
unsigned int val;
int error;
@@ -1049,12 +1048,10 @@ static int iqs269_parse_prop(struct iqs269_private *iqs269)
sys_reg->event_mask = ~((u8)IQS269_EVENT_MASK_SYS);
- device_for_each_child_node(&client->dev, ch_node) {
+ device_for_each_child_node_scoped(&client->dev, ch_node) {
error = iqs269_parse_chan(iqs269, ch_node);
- if (error) {
- fwnode_handle_put(ch_node);
+ if (error)
return error;
- }
}
/*
diff --git a/drivers/input/misc/nxp-bbnsm-pwrkey.c b/drivers/input/misc/nxp-bbnsm-pwrkey.c
index 1d99206dd3a8..eb4173f9c820 100644
--- a/drivers/input/misc/nxp-bbnsm-pwrkey.c
+++ b/drivers/input/misc/nxp-bbnsm-pwrkey.c
@@ -38,6 +38,7 @@ struct bbnsm_pwrkey {
int irq;
int keycode;
int keystate; /* 1:pressed */
+ bool suspended;
struct timer_list check_timer;
struct input_dev *input;
};
@@ -70,6 +71,7 @@ static irqreturn_t bbnsm_pwrkey_interrupt(int irq, void *dev_id)
{
struct platform_device *pdev = dev_id;
struct bbnsm_pwrkey *bbnsm = platform_get_drvdata(pdev);
+ struct input_dev *input = bbnsm->input;
u32 event;
regmap_read(bbnsm->regmap, BBNSM_EVENTS, &event);
@@ -78,6 +80,18 @@ static irqreturn_t bbnsm_pwrkey_interrupt(int irq, void *dev_id)
pm_wakeup_event(bbnsm->input->dev.parent, 0);
+ /*
+ * Directly report key event after resume to make sure key press
+ * event is never missed.
+ */
+ if (bbnsm->suspended) {
+ bbnsm->keystate = 1;
+ input_event(input, EV_KEY, bbnsm->keycode, 1);
+ input_sync(input);
+ /* Fire at most once per suspend/resume cycle */
+ bbnsm->suspended = false;
+ }
+
mod_timer(&bbnsm->check_timer,
jiffies + msecs_to_jiffies(DEBOUNCE_TIME));
@@ -173,6 +187,29 @@ static int bbnsm_pwrkey_probe(struct platform_device *pdev)
return 0;
}
+static int __maybe_unused bbnsm_pwrkey_suspend(struct device *dev)
+{
+ struct platform_device *pdev = to_platform_device(dev);
+ struct bbnsm_pwrkey *bbnsm = platform_get_drvdata(pdev);
+
+ bbnsm->suspended = true;
+
+ return 0;
+}
+
+static int __maybe_unused bbnsm_pwrkey_resume(struct device *dev)
+{
+ struct platform_device *pdev = to_platform_device(dev);
+ struct bbnsm_pwrkey *bbnsm = platform_get_drvdata(pdev);
+
+ bbnsm->suspended = false;
+
+ return 0;
+}
+
+static SIMPLE_DEV_PM_OPS(bbnsm_pwrkey_pm_ops, bbnsm_pwrkey_suspend,
+ bbnsm_pwrkey_resume);
+
static const struct of_device_id bbnsm_pwrkey_ids[] = {
{ .compatible = "nxp,imx93-bbnsm-pwrkey" },
{ /* sentinel */ }
@@ -182,6 +219,7 @@ MODULE_DEVICE_TABLE(of, bbnsm_pwrkey_ids);
static struct platform_driver bbnsm_pwrkey_driver = {
.driver = {
.name = "bbnsm_pwrkey",
+ .pm = &bbnsm_pwrkey_pm_ops,
.of_match_table = bbnsm_pwrkey_ids,
},
.probe = bbnsm_pwrkey_probe,
diff --git a/drivers/input/misc/uinput.c b/drivers/input/misc/uinput.c
index 445856c9127a..2c51ea9d01d7 100644
--- a/drivers/input/misc/uinput.c
+++ b/drivers/input/misc/uinput.c
@@ -1132,7 +1132,6 @@ static const struct file_operations uinput_fops = {
#ifdef CONFIG_COMPAT
.compat_ioctl = uinput_compat_ioctl,
#endif
- .llseek = no_llseek,
};
static struct miscdevice uinput_misc = {
diff --git a/drivers/input/misc/wistron_btns.c b/drivers/input/misc/wistron_btns.c
index 5c4956678cd0..5a64557920fa 100644
--- a/drivers/input/misc/wistron_btns.c
+++ b/drivers/input/misc/wistron_btns.c
@@ -990,8 +990,8 @@ static int __init copy_keymap(void)
for (key = keymap; key->type != KE_END; key++)
length++;
- new_keymap = kmemdup(keymap, length * sizeof(struct key_entry),
- GFP_KERNEL);
+ new_keymap = kmemdup_array(keymap, length, sizeof(struct key_entry),
+ GFP_KERNEL);
if (!new_keymap)
return -ENOMEM;
@@ -1075,7 +1075,7 @@ static void wistron_led_init(struct device *parent)
}
if (leds_present & FE_MAIL_LED) {
- /* bios_get_default_setting(MAIL) always retuns 0, so just turn the led off */
+ /* bios_get_default_setting(MAIL) always returns 0, so just turn the led off */
wistron_mail_led.brightness = LED_OFF;
if (led_classdev_register(parent, &wistron_mail_led))
leds_present &= ~FE_MAIL_LED;
diff --git a/drivers/input/mouse/alps.c b/drivers/input/mouse/alps.c
index d5ef5a112d6f..4e37fc3f1a9e 100644
--- a/drivers/input/mouse/alps.c
+++ b/drivers/input/mouse/alps.c
@@ -1396,24 +1396,16 @@ static bool alps_is_valid_package_ss4_v2(struct psmouse *psmouse)
static DEFINE_MUTEX(alps_mutex);
-static void alps_register_bare_ps2_mouse(struct work_struct *work)
+static int alps_do_register_bare_ps2_mouse(struct alps_data *priv)
{
- struct alps_data *priv =
- container_of(work, struct alps_data, dev3_register_work.work);
struct psmouse *psmouse = priv->psmouse;
struct input_dev *dev3;
- int error = 0;
-
- mutex_lock(&alps_mutex);
-
- if (priv->dev3)
- goto out;
+ int error;
dev3 = input_allocate_device();
if (!dev3) {
psmouse_err(psmouse, "failed to allocate secondary device\n");
- error = -ENOMEM;
- goto out;
+ return -ENOMEM;
}
snprintf(priv->phys3, sizeof(priv->phys3), "%s/%s",
@@ -1446,21 +1438,35 @@ static void alps_register_bare_ps2_mouse(struct work_struct *work)
psmouse_err(psmouse,
"failed to register secondary device: %d\n",
error);
- input_free_device(dev3);
- goto out;
+ goto err_free_input;
}
priv->dev3 = dev3;
+ return 0;
-out:
- /*
- * Save the error code so that we can detect that we
- * already tried to create the device.
- */
- if (error)
- priv->dev3 = ERR_PTR(error);
+err_free_input:
+ input_free_device(dev3);
+ return error;
+}
- mutex_unlock(&alps_mutex);
+static void alps_register_bare_ps2_mouse(struct work_struct *work)
+{
+ struct alps_data *priv = container_of(work, struct alps_data,
+ dev3_register_work.work);
+ int error;
+
+ guard(mutex)(&alps_mutex);
+
+ if (!priv->dev3) {
+ error = alps_do_register_bare_ps2_mouse(priv);
+ if (error) {
+ /*
+ * Save the error code so that we can detect that we
+ * already tried to create the device.
+ */
+ priv->dev3 = ERR_PTR(error);
+ }
+ }
}
static void alps_report_bare_ps2_packet(struct psmouse *psmouse,
diff --git a/drivers/input/mouse/bcm5974.c b/drivers/input/mouse/bcm5974.c
index 10a03a566905..dfdfb59cc8b5 100644
--- a/drivers/input/mouse/bcm5974.c
+++ b/drivers/input/mouse/bcm5974.c
@@ -834,13 +834,11 @@ static int bcm5974_open(struct input_dev *input)
if (error)
return error;
- mutex_lock(&dev->pm_mutex);
-
- error = bcm5974_start_traffic(dev);
- if (!error)
- dev->opened = 1;
-
- mutex_unlock(&dev->pm_mutex);
+ scoped_guard(mutex, &dev->pm_mutex) {
+ error = bcm5974_start_traffic(dev);
+ if (!error)
+ dev->opened = 1;
+ }
if (error)
usb_autopm_put_interface(dev->intf);
@@ -852,12 +850,10 @@ static void bcm5974_close(struct input_dev *input)
{
struct bcm5974 *dev = input_get_drvdata(input);
- mutex_lock(&dev->pm_mutex);
-
- bcm5974_pause_traffic(dev);
- dev->opened = 0;
-
- mutex_unlock(&dev->pm_mutex);
+ scoped_guard(mutex, &dev->pm_mutex) {
+ bcm5974_pause_traffic(dev);
+ dev->opened = 0;
+ }
usb_autopm_put_interface(dev->intf);
}
@@ -866,29 +862,24 @@ static int bcm5974_suspend(struct usb_interface *iface, pm_message_t message)
{
struct bcm5974 *dev = usb_get_intfdata(iface);
- mutex_lock(&dev->pm_mutex);
+ guard(mutex)(&dev->pm_mutex);
if (dev->opened)
bcm5974_pause_traffic(dev);
- mutex_unlock(&dev->pm_mutex);
-
return 0;
}
static int bcm5974_resume(struct usb_interface *iface)
{
struct bcm5974 *dev = usb_get_intfdata(iface);
- int error = 0;
- mutex_lock(&dev->pm_mutex);
+ guard(mutex)(&dev->pm_mutex);
if (dev->opened)
- error = bcm5974_start_traffic(dev);
+ return bcm5974_start_traffic(dev);
- mutex_unlock(&dev->pm_mutex);
-
- return error;
+ return 0;
}
static int bcm5974_probe(struct usb_interface *iface,
diff --git a/drivers/input/rmi4/rmi_f12.c b/drivers/input/rmi4/rmi_f12.c
index 7e97944f7616..8246fe77114b 100644
--- a/drivers/input/rmi4/rmi_f12.c
+++ b/drivers/input/rmi4/rmi_f12.c
@@ -24,6 +24,7 @@ enum rmi_f12_object_type {
};
#define F12_DATA1_BYTES_PER_OBJ 8
+#define RMI_F12_QUERY_RESOLUTION 29
struct f12_data {
struct rmi_2d_sensor sensor;
@@ -73,6 +74,8 @@ static int rmi_f12_read_sensor_tuning(struct f12_data *f12)
int pitch_y = 0;
int rx_receivers = 0;
int tx_receivers = 0;
+ u16 query_dpm_addr = 0;
+ int dpm_resolution = 0;
item = rmi_get_register_desc_item(&f12->control_reg_desc, 8);
if (!item) {
@@ -122,18 +125,38 @@ static int rmi_f12_read_sensor_tuning(struct f12_data *f12)
offset += 4;
}
- if (rmi_register_desc_has_subpacket(item, 3)) {
- rx_receivers = buf[offset];
- tx_receivers = buf[offset + 1];
- offset += 2;
- }
+ /*
+ * Use the Query DPM feature when the resolution query register
+ * exists.
+ */
+ if (rmi_get_register_desc_item(&f12->query_reg_desc,
+ RMI_F12_QUERY_RESOLUTION)) {
+ offset = rmi_register_desc_calc_reg_offset(&f12->query_reg_desc,
+ RMI_F12_QUERY_RESOLUTION);
+ query_dpm_addr = fn->fd.query_base_addr + offset;
+ ret = rmi_read(fn->rmi_dev, query_dpm_addr, buf);
+ if (ret < 0) {
+ dev_err(&fn->dev, "Failed to read DPM value: %d\n", ret);
+ return -ENODEV;
+ }
+ dpm_resolution = buf[0];
- /* Skip over sensor flags */
- if (rmi_register_desc_has_subpacket(item, 4))
- offset += 1;
+ sensor->x_mm = sensor->max_x / dpm_resolution;
+ sensor->y_mm = sensor->max_y / dpm_resolution;
+ } else {
+ if (rmi_register_desc_has_subpacket(item, 3)) {
+ rx_receivers = buf[offset];
+ tx_receivers = buf[offset + 1];
+ offset += 2;
+ }
- sensor->x_mm = (pitch_x * rx_receivers) >> 12;
- sensor->y_mm = (pitch_y * tx_receivers) >> 12;
+ /* Skip over sensor flags */
+ if (rmi_register_desc_has_subpacket(item, 4))
+ offset += 1;
+
+ sensor->x_mm = (pitch_x * rx_receivers) >> 12;
+ sensor->y_mm = (pitch_y * tx_receivers) >> 12;
+ }
rmi_dbg(RMI_DEBUG_FN, &fn->dev, "%s: x_mm: %d y_mm: %d\n", __func__,
sensor->x_mm, sensor->y_mm);
diff --git a/drivers/input/serio/i8042-acpipnpio.h b/drivers/input/serio/i8042-acpipnpio.h
index bad238f69a7a..34d1f07ea4c3 100644
--- a/drivers/input/serio/i8042-acpipnpio.h
+++ b/drivers/input/serio/i8042-acpipnpio.h
@@ -1121,6 +1121,43 @@ static const struct dmi_system_id i8042_dmi_quirk_table[] __initconst = {
.driver_data = (void *)(SERIO_QUIRK_NOLOOP)
},
/*
+ * Some TongFang barebones have touchpad and/or keyboard issues after
+ * suspend fixable with nomux + reset + noloop + nopnp. Luckily, none of
+ * them have an external PS/2 port so this can safely be set for all of
+ * them.
+ * TongFang barebones come with board_vendor and/or system_vendor set to
+ * a different value for each individual reseller. The only somewhat
+ * universal way to identify them is by board_name.
+ */
+ {
+ .matches = {
+ DMI_MATCH(DMI_BOARD_NAME, "GM6XGxX"),
+ },
+ .driver_data = (void *)(SERIO_QUIRK_NOMUX | SERIO_QUIRK_RESET_ALWAYS |
+ SERIO_QUIRK_NOLOOP | SERIO_QUIRK_NOPNP)
+ },
+ {
+ .matches = {
+ DMI_MATCH(DMI_BOARD_NAME, "GMxXGxx"),
+ },
+ .driver_data = (void *)(SERIO_QUIRK_NOMUX | SERIO_QUIRK_RESET_ALWAYS |
+ SERIO_QUIRK_NOLOOP | SERIO_QUIRK_NOPNP)
+ },
+ {
+ .matches = {
+ DMI_MATCH(DMI_BOARD_NAME, "GMxXGxX"),
+ },
+ .driver_data = (void *)(SERIO_QUIRK_NOMUX | SERIO_QUIRK_RESET_ALWAYS |
+ SERIO_QUIRK_NOLOOP | SERIO_QUIRK_NOPNP)
+ },
+ {
+ .matches = {
+ DMI_MATCH(DMI_BOARD_NAME, "GMxHGxx"),
+ },
+ .driver_data = (void *)(SERIO_QUIRK_NOMUX | SERIO_QUIRK_RESET_ALWAYS |
+ SERIO_QUIRK_NOLOOP | SERIO_QUIRK_NOPNP)
+ },
+ /*
* A lot of modern Clevo barebones have touchpad and/or keyboard issues
* after suspend fixable with nomux + reset + noloop + nopnp. Luckily,
* none of them have an external PS/2 port so this can safely be set for
diff --git a/drivers/input/serio/ps2-gpio.c b/drivers/input/serio/ps2-gpio.c
index 0c8b390b8b4f..3a431395c464 100644
--- a/drivers/input/serio/ps2-gpio.c
+++ b/drivers/input/serio/ps2-gpio.c
@@ -429,16 +429,14 @@ static int ps2_gpio_probe(struct platform_device *pdev)
}
error = devm_request_irq(dev, drvdata->irq, ps2_gpio_irq,
- IRQF_NO_THREAD, DRIVER_NAME, drvdata);
+ IRQF_NO_THREAD | IRQF_NO_AUTOEN, DRIVER_NAME,
+ drvdata);
if (error) {
dev_err(dev, "failed to request irq %d: %d\n",
drvdata->irq, error);
goto err_free_serio;
}
- /* Keep irq disabled until serio->open is called. */
- disable_irq(drvdata->irq);
-
serio->id.type = SERIO_8042;
serio->open = ps2_gpio_open;
serio->close = ps2_gpio_close;
diff --git a/drivers/input/serio/userio.c b/drivers/input/serio/userio.c
index a88e2eee55c3..1ab12b247f98 100644
--- a/drivers/input/serio/userio.c
+++ b/drivers/input/serio/userio.c
@@ -267,7 +267,6 @@ static const struct file_operations userio_fops = {
.read = userio_char_read,
.write = userio_char_write,
.poll = userio_char_poll,
- .llseek = no_llseek,
};
static struct miscdevice userio_misc = {
diff --git a/drivers/input/touchscreen/Kconfig b/drivers/input/touchscreen/Kconfig
index c821fe3ee794..1ac26fc2e3eb 100644
--- a/drivers/input/touchscreen/Kconfig
+++ b/drivers/input/touchscreen/Kconfig
@@ -254,36 +254,6 @@ config TOUCHSCREEN_CYTTSP_SPI
To compile this driver as a module, choose M here: the
module will be called cyttsp_spi.
-config TOUCHSCREEN_CYTTSP4_CORE
- tristate "Cypress TrueTouch Gen4 Touchscreen Driver"
- help
- Core driver for Cypress TrueTouch(tm) Standard Product
- Generation4 touchscreen controllers.
-
- Say Y here if you have a Cypress Gen4 touchscreen.
-
- If unsure, say N.
-
- To compile this driver as a module, choose M here.
-
-config TOUCHSCREEN_CYTTSP4_I2C
- tristate "support I2C bus connection"
- depends on TOUCHSCREEN_CYTTSP4_CORE && I2C
- help
- Say Y here if the touchscreen is connected via I2C bus.
-
- To compile this driver as a module, choose M here: the
- module will be called cyttsp4_i2c.
-
-config TOUCHSCREEN_CYTTSP4_SPI
- tristate "support SPI bus connection"
- depends on TOUCHSCREEN_CYTTSP4_CORE && SPI_MASTER
- help
- Say Y here if the touchscreen is connected via SPI bus.
-
- To compile this driver as a module, choose M here: the
- module will be called cyttsp4_spi.
-
config TOUCHSCREEN_CYTTSP5
tristate "Cypress TrueTouch Gen5 Touchscreen Driver"
depends on I2C
@@ -626,18 +596,6 @@ config TOUCHSCREEN_MAX11801
To compile this driver as a module, choose M here: the
module will be called max11801_ts.
-config TOUCHSCREEN_MCS5000
- tristate "MELFAS MCS-5000 touchscreen"
- depends on I2C
- help
- Say Y here if you have the MELFAS MCS-5000 touchscreen controller
- chip in your system.
-
- If unsure, say N.
-
- To compile this driver as a module, choose M here: the
- module will be called mcs5000_ts.
-
config TOUCHSCREEN_MMS114
tristate "MELFAS MMS114 touchscreen"
depends on I2C
diff --git a/drivers/input/touchscreen/Makefile b/drivers/input/touchscreen/Makefile
index a81cb5aa21a5..82bc837ca01e 100644
--- a/drivers/input/touchscreen/Makefile
+++ b/drivers/input/touchscreen/Makefile
@@ -25,11 +25,8 @@ obj-$(CONFIG_TOUCHSCREEN_CHIPONE_ICN8505) += chipone_icn8505.o
obj-$(CONFIG_TOUCHSCREEN_CY8CTMA140) += cy8ctma140.o
obj-$(CONFIG_TOUCHSCREEN_CY8CTMG110) += cy8ctmg110_ts.o
obj-$(CONFIG_TOUCHSCREEN_CYTTSP_CORE) += cyttsp_core.o
-obj-$(CONFIG_TOUCHSCREEN_CYTTSP_I2C) += cyttsp_i2c.o cyttsp_i2c_common.o
+obj-$(CONFIG_TOUCHSCREEN_CYTTSP_I2C) += cyttsp_i2c.o
obj-$(CONFIG_TOUCHSCREEN_CYTTSP_SPI) += cyttsp_spi.o
-obj-$(CONFIG_TOUCHSCREEN_CYTTSP4_CORE) += cyttsp4_core.o
-obj-$(CONFIG_TOUCHSCREEN_CYTTSP4_I2C) += cyttsp4_i2c.o cyttsp_i2c_common.o
-obj-$(CONFIG_TOUCHSCREEN_CYTTSP4_SPI) += cyttsp4_spi.o
obj-$(CONFIG_TOUCHSCREEN_CYTTSP5) += cyttsp5.o
obj-$(CONFIG_TOUCHSCREEN_DA9034) += da9034-ts.o
obj-$(CONFIG_TOUCHSCREEN_DA9052) += da9052_tsi.o
@@ -63,7 +60,6 @@ obj-$(CONFIG_TOUCHSCREEN_MAX11801) += max11801_ts.o
obj-$(CONFIG_TOUCHSCREEN_MXS_LRADC) += mxs-lradc-ts.o
obj-$(CONFIG_TOUCHSCREEN_MX25) += fsl-imx25-tcq.o
obj-$(CONFIG_TOUCHSCREEN_MC13783) += mc13783_ts.o
-obj-$(CONFIG_TOUCHSCREEN_MCS5000) += mcs5000_ts.o
obj-$(CONFIG_TOUCHSCREEN_MELFAS_MIP4) += melfas_mip4.o
obj-$(CONFIG_TOUCHSCREEN_MIGOR) += migor_ts.o
obj-$(CONFIG_TOUCHSCREEN_MMS114) += mms114.o
diff --git a/drivers/input/touchscreen/colibri-vf50-ts.c b/drivers/input/touchscreen/colibri-vf50-ts.c
index aa829725ded7..98d5b2ba63fb 100644
--- a/drivers/input/touchscreen/colibri-vf50-ts.c
+++ b/drivers/input/touchscreen/colibri-vf50-ts.c
@@ -239,14 +239,10 @@ static void vf50_ts_close(struct input_dev *dev_input)
static int vf50_ts_get_gpiod(struct device *dev, struct gpio_desc **gpio_d,
const char *con_id, enum gpiod_flags flags)
{
- int error;
-
*gpio_d = devm_gpiod_get(dev, con_id, flags);
- if (IS_ERR(*gpio_d)) {
- error = PTR_ERR(*gpio_d);
- dev_err(dev, "Could not get gpio_%s %d\n", con_id, error);
- return error;
- }
+ if (IS_ERR(*gpio_d))
+ return dev_err_probe(dev, PTR_ERR(*gpio_d),
+ "Could not get gpio_%s\n", con_id);
return 0;
}
diff --git a/drivers/input/touchscreen/cyttsp4_core.c b/drivers/input/touchscreen/cyttsp4_core.c
deleted file mode 100644
index 9dc25eb2be44..000000000000
--- a/drivers/input/touchscreen/cyttsp4_core.c
+++ /dev/null
@@ -1,2174 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-only
-/*
- * cyttsp4_core.c
- * Cypress TrueTouch(TM) Standard Product V4 Core driver module.
- * For use with Cypress Txx4xx parts.
- * Supported parts include:
- * TMA4XX
- * TMA1036
- *
- * Copyright (C) 2012 Cypress Semiconductor
- *
- * Contact Cypress Semiconductor at www.cypress.com <ttdrivers@cypress.com>
- */
-
-#include "cyttsp4_core.h"
-#include <linux/delay.h>
-#include <linux/gpio.h>
-#include <linux/input/mt.h>
-#include <linux/interrupt.h>
-#include <linux/pm_runtime.h>
-#include <linux/sched.h>
-#include <linux/slab.h>
-
-/* Timeout in ms. */
-#define CY_CORE_REQUEST_EXCLUSIVE_TIMEOUT 500
-#define CY_CORE_SLEEP_REQUEST_EXCLUSIVE_TIMEOUT 5000
-#define CY_CORE_MODE_CHANGE_TIMEOUT 1000
-#define CY_CORE_RESET_AND_WAIT_TIMEOUT 500
-#define CY_CORE_WAKEUP_TIMEOUT 500
-
-#define CY_CORE_STARTUP_RETRY_COUNT 3
-
-static const char * const cyttsp4_tch_abs_string[] = {
- [CY_TCH_X] = "X",
- [CY_TCH_Y] = "Y",
- [CY_TCH_P] = "P",
- [CY_TCH_T] = "T",
- [CY_TCH_E] = "E",
- [CY_TCH_O] = "O",
- [CY_TCH_W] = "W",
- [CY_TCH_MAJ] = "MAJ",
- [CY_TCH_MIN] = "MIN",
- [CY_TCH_OR] = "OR",
- [CY_TCH_NUM_ABS] = "INVALID"
-};
-
-static const u8 ldr_exit[] = {
- 0xFF, 0x01, 0x3B, 0x00, 0x00, 0x4F, 0x6D, 0x17
-};
-
-static const u8 ldr_err_app[] = {
- 0x01, 0x02, 0x00, 0x00, 0x55, 0xDD, 0x17
-};
-
-static inline size_t merge_bytes(u8 high, u8 low)
-{
- return (high << 8) + low;
-}
-
-#ifdef VERBOSE_DEBUG
-static void cyttsp4_pr_buf(struct device *dev, u8 *pr_buf, u8 *dptr, int size,
- const char *data_name)
-{
- int i, k;
- const char fmt[] = "%02X ";
- int max;
-
- if (!size)
- return;
-
- max = (CY_MAX_PRBUF_SIZE - 1) - sizeof(CY_PR_TRUNCATED);
-
- pr_buf[0] = 0;
- for (i = k = 0; i < size && k < max; i++, k += 3)
- scnprintf(pr_buf + k, CY_MAX_PRBUF_SIZE, fmt, dptr[i]);
-
- dev_vdbg(dev, "%s: %s[0..%d]=%s%s\n", __func__, data_name, size - 1,
- pr_buf, size <= max ? "" : CY_PR_TRUNCATED);
-}
-#else
-#define cyttsp4_pr_buf(dev, pr_buf, dptr, size, data_name) do { } while (0)
-#endif
-
-static int cyttsp4_load_status_regs(struct cyttsp4 *cd)
-{
- struct cyttsp4_sysinfo *si = &cd->sysinfo;
- struct device *dev = cd->dev;
- int rc;
-
- rc = cyttsp4_adap_read(cd, CY_REG_BASE, si->si_ofs.mode_size,
- si->xy_mode);
- if (rc < 0)
- dev_err(dev, "%s: fail read mode regs r=%d\n",
- __func__, rc);
- else
- cyttsp4_pr_buf(dev, cd->pr_buf, si->xy_mode,
- si->si_ofs.mode_size, "xy_mode");
-
- return rc;
-}
-
-static int cyttsp4_handshake(struct cyttsp4 *cd, u8 mode)
-{
- u8 cmd = mode ^ CY_HST_TOGGLE;
- int rc;
-
- /*
- * Mode change issued, handshaking now will cause endless mode change
- * requests, for sync mode modechange will do same with handshake
- * */
- if (mode & CY_HST_MODE_CHANGE)
- return 0;
-
- rc = cyttsp4_adap_write(cd, CY_REG_BASE, sizeof(cmd), &cmd);
- if (rc < 0)
- dev_err(cd->dev, "%s: bus write fail on handshake (ret=%d)\n",
- __func__, rc);
-
- return rc;
-}
-
-static int cyttsp4_hw_soft_reset(struct cyttsp4 *cd)
-{
- u8 cmd = CY_HST_RESET;
- int rc = cyttsp4_adap_write(cd, CY_REG_BASE, sizeof(cmd), &cmd);
- if (rc < 0) {
- dev_err(cd->dev, "%s: FAILED to execute SOFT reset\n",
- __func__);
- return rc;
- }
- return 0;
-}
-
-static int cyttsp4_hw_hard_reset(struct cyttsp4 *cd)
-{
- if (cd->cpdata->xres) {
- cd->cpdata->xres(cd->cpdata, cd->dev);
- dev_dbg(cd->dev, "%s: execute HARD reset\n", __func__);
- return 0;
- }
- dev_err(cd->dev, "%s: FAILED to execute HARD reset\n", __func__);
- return -ENOSYS;
-}
-
-static int cyttsp4_hw_reset(struct cyttsp4 *cd)
-{
- int rc = cyttsp4_hw_hard_reset(cd);
- if (rc == -ENOSYS)
- rc = cyttsp4_hw_soft_reset(cd);
- return rc;
-}
-
-/*
- * Gets number of bits for a touch filed as parameter,
- * sets maximum value for field which is used as bit mask
- * and returns number of bytes required for that field
- */
-static int cyttsp4_bits_2_bytes(unsigned int nbits, size_t *max)
-{
- *max = 1UL << nbits;
- return (nbits + 7) / 8;
-}
-
-static int cyttsp4_si_data_offsets(struct cyttsp4 *cd)
-{
- struct cyttsp4_sysinfo *si = &cd->sysinfo;
- int rc = cyttsp4_adap_read(cd, CY_REG_BASE, sizeof(si->si_data),
- &si->si_data);
- if (rc < 0) {
- dev_err(cd->dev, "%s: fail read sysinfo data offsets r=%d\n",
- __func__, rc);
- return rc;
- }
-
- /* Print sysinfo data offsets */
- cyttsp4_pr_buf(cd->dev, cd->pr_buf, (u8 *)&si->si_data,
- sizeof(si->si_data), "sysinfo_data_offsets");
-
- /* convert sysinfo data offset bytes into integers */
-
- si->si_ofs.map_sz = merge_bytes(si->si_data.map_szh,
- si->si_data.map_szl);
- si->si_ofs.map_sz = merge_bytes(si->si_data.map_szh,
- si->si_data.map_szl);
- si->si_ofs.cydata_ofs = merge_bytes(si->si_data.cydata_ofsh,
- si->si_data.cydata_ofsl);
- si->si_ofs.test_ofs = merge_bytes(si->si_data.test_ofsh,
- si->si_data.test_ofsl);
- si->si_ofs.pcfg_ofs = merge_bytes(si->si_data.pcfg_ofsh,
- si->si_data.pcfg_ofsl);
- si->si_ofs.opcfg_ofs = merge_bytes(si->si_data.opcfg_ofsh,
- si->si_data.opcfg_ofsl);
- si->si_ofs.ddata_ofs = merge_bytes(si->si_data.ddata_ofsh,
- si->si_data.ddata_ofsl);
- si->si_ofs.mdata_ofs = merge_bytes(si->si_data.mdata_ofsh,
- si->si_data.mdata_ofsl);
- return rc;
-}
-
-static int cyttsp4_si_get_cydata(struct cyttsp4 *cd)
-{
- struct cyttsp4_sysinfo *si = &cd->sysinfo;
- int read_offset;
- int mfgid_sz, calc_mfgid_sz;
- void *p;
- int rc;
-
- if (si->si_ofs.test_ofs <= si->si_ofs.cydata_ofs) {
- dev_err(cd->dev,
- "%s: invalid offset test_ofs: %zu, cydata_ofs: %zu\n",
- __func__, si->si_ofs.test_ofs, si->si_ofs.cydata_ofs);
- return -EINVAL;
- }
-
- si->si_ofs.cydata_size = si->si_ofs.test_ofs - si->si_ofs.cydata_ofs;
- dev_dbg(cd->dev, "%s: cydata size: %zd\n", __func__,
- si->si_ofs.cydata_size);
-
- p = krealloc(si->si_ptrs.cydata, si->si_ofs.cydata_size, GFP_KERNEL);
- if (p == NULL) {
- dev_err(cd->dev, "%s: failed to allocate cydata memory\n",
- __func__);
- return -ENOMEM;
- }
- si->si_ptrs.cydata = p;
-
- read_offset = si->si_ofs.cydata_ofs;
-
- /* Read the CYDA registers up to MFGID field */
- rc = cyttsp4_adap_read(cd, read_offset,
- offsetof(struct cyttsp4_cydata, mfgid_sz)
- + sizeof(si->si_ptrs.cydata->mfgid_sz),
- si->si_ptrs.cydata);
- if (rc < 0) {
- dev_err(cd->dev, "%s: fail read cydata r=%d\n",
- __func__, rc);
- return rc;
- }
-
- /* Check MFGID size */
- mfgid_sz = si->si_ptrs.cydata->mfgid_sz;
- calc_mfgid_sz = si->si_ofs.cydata_size - sizeof(struct cyttsp4_cydata);
- if (mfgid_sz != calc_mfgid_sz) {
- dev_err(cd->dev, "%s: mismatch in MFGID size, reported:%d calculated:%d\n",
- __func__, mfgid_sz, calc_mfgid_sz);
- return -EINVAL;
- }
-
- read_offset += offsetof(struct cyttsp4_cydata, mfgid_sz)
- + sizeof(si->si_ptrs.cydata->mfgid_sz);
-
- /* Read the CYDA registers for MFGID field */
- rc = cyttsp4_adap_read(cd, read_offset, si->si_ptrs.cydata->mfgid_sz,
- si->si_ptrs.cydata->mfg_id);
- if (rc < 0) {
- dev_err(cd->dev, "%s: fail read cydata r=%d\n",
- __func__, rc);
- return rc;
- }
-
- read_offset += si->si_ptrs.cydata->mfgid_sz;
-
- /* Read the rest of the CYDA registers */
- rc = cyttsp4_adap_read(cd, read_offset,
- sizeof(struct cyttsp4_cydata)
- - offsetof(struct cyttsp4_cydata, cyito_idh),
- &si->si_ptrs.cydata->cyito_idh);
- if (rc < 0) {
- dev_err(cd->dev, "%s: fail read cydata r=%d\n",
- __func__, rc);
- return rc;
- }
-
- cyttsp4_pr_buf(cd->dev, cd->pr_buf, (u8 *)si->si_ptrs.cydata,
- si->si_ofs.cydata_size, "sysinfo_cydata");
- return rc;
-}
-
-static int cyttsp4_si_get_test_data(struct cyttsp4 *cd)
-{
- struct cyttsp4_sysinfo *si = &cd->sysinfo;
- void *p;
- int rc;
-
- if (si->si_ofs.pcfg_ofs <= si->si_ofs.test_ofs) {
- dev_err(cd->dev,
- "%s: invalid offset pcfg_ofs: %zu, test_ofs: %zu\n",
- __func__, si->si_ofs.pcfg_ofs, si->si_ofs.test_ofs);
- return -EINVAL;
- }
-
- si->si_ofs.test_size = si->si_ofs.pcfg_ofs - si->si_ofs.test_ofs;
-
- p = krealloc(si->si_ptrs.test, si->si_ofs.test_size, GFP_KERNEL);
- if (p == NULL) {
- dev_err(cd->dev, "%s: failed to allocate test memory\n",
- __func__);
- return -ENOMEM;
- }
- si->si_ptrs.test = p;
-
- rc = cyttsp4_adap_read(cd, si->si_ofs.test_ofs, si->si_ofs.test_size,
- si->si_ptrs.test);
- if (rc < 0) {
- dev_err(cd->dev, "%s: fail read test data r=%d\n",
- __func__, rc);
- return rc;
- }
-
- cyttsp4_pr_buf(cd->dev, cd->pr_buf,
- (u8 *)si->si_ptrs.test, si->si_ofs.test_size,
- "sysinfo_test_data");
- if (si->si_ptrs.test->post_codel &
- CY_POST_CODEL_WDG_RST)
- dev_info(cd->dev, "%s: %s codel=%02X\n",
- __func__, "Reset was a WATCHDOG RESET",
- si->si_ptrs.test->post_codel);
-
- if (!(si->si_ptrs.test->post_codel &
- CY_POST_CODEL_CFG_DATA_CRC_FAIL))
- dev_info(cd->dev, "%s: %s codel=%02X\n", __func__,
- "Config Data CRC FAIL",
- si->si_ptrs.test->post_codel);
-
- if (!(si->si_ptrs.test->post_codel &
- CY_POST_CODEL_PANEL_TEST_FAIL))
- dev_info(cd->dev, "%s: %s codel=%02X\n",
- __func__, "PANEL TEST FAIL",
- si->si_ptrs.test->post_codel);
-
- dev_info(cd->dev, "%s: SCANNING is %s codel=%02X\n",
- __func__, si->si_ptrs.test->post_codel & 0x08 ?
- "ENABLED" : "DISABLED",
- si->si_ptrs.test->post_codel);
- return rc;
-}
-
-static int cyttsp4_si_get_pcfg_data(struct cyttsp4 *cd)
-{
- struct cyttsp4_sysinfo *si = &cd->sysinfo;
- void *p;
- int rc;
-
- if (si->si_ofs.opcfg_ofs <= si->si_ofs.pcfg_ofs) {
- dev_err(cd->dev,
- "%s: invalid offset opcfg_ofs: %zu, pcfg_ofs: %zu\n",
- __func__, si->si_ofs.opcfg_ofs, si->si_ofs.pcfg_ofs);
- return -EINVAL;
- }
-
- si->si_ofs.pcfg_size = si->si_ofs.opcfg_ofs - si->si_ofs.pcfg_ofs;
-
- p = krealloc(si->si_ptrs.pcfg, si->si_ofs.pcfg_size, GFP_KERNEL);
- if (p == NULL) {
- dev_err(cd->dev, "%s: failed to allocate pcfg memory\n",
- __func__);
- return -ENOMEM;
- }
- si->si_ptrs.pcfg = p;
-
- rc = cyttsp4_adap_read(cd, si->si_ofs.pcfg_ofs, si->si_ofs.pcfg_size,
- si->si_ptrs.pcfg);
- if (rc < 0) {
- dev_err(cd->dev, "%s: fail read pcfg data r=%d\n",
- __func__, rc);
- return rc;
- }
-
- si->si_ofs.max_x = merge_bytes((si->si_ptrs.pcfg->res_xh
- & CY_PCFG_RESOLUTION_X_MASK), si->si_ptrs.pcfg->res_xl);
- si->si_ofs.x_origin = !!(si->si_ptrs.pcfg->res_xh
- & CY_PCFG_ORIGIN_X_MASK);
- si->si_ofs.max_y = merge_bytes((si->si_ptrs.pcfg->res_yh
- & CY_PCFG_RESOLUTION_Y_MASK), si->si_ptrs.pcfg->res_yl);
- si->si_ofs.y_origin = !!(si->si_ptrs.pcfg->res_yh
- & CY_PCFG_ORIGIN_Y_MASK);
- si->si_ofs.max_p = merge_bytes(si->si_ptrs.pcfg->max_zh,
- si->si_ptrs.pcfg->max_zl);
-
- cyttsp4_pr_buf(cd->dev, cd->pr_buf,
- (u8 *)si->si_ptrs.pcfg,
- si->si_ofs.pcfg_size, "sysinfo_pcfg_data");
- return rc;
-}
-
-static int cyttsp4_si_get_opcfg_data(struct cyttsp4 *cd)
-{
- struct cyttsp4_sysinfo *si = &cd->sysinfo;
- struct cyttsp4_tch_abs_params *tch;
- struct cyttsp4_tch_rec_params *tch_old, *tch_new;
- enum cyttsp4_tch_abs abs;
- int i;
- void *p;
- int rc;
-
- if (si->si_ofs.ddata_ofs <= si->si_ofs.opcfg_ofs) {
- dev_err(cd->dev,
- "%s: invalid offset ddata_ofs: %zu, opcfg_ofs: %zu\n",
- __func__, si->si_ofs.ddata_ofs, si->si_ofs.opcfg_ofs);
- return -EINVAL;
- }
-
- si->si_ofs.opcfg_size = si->si_ofs.ddata_ofs - si->si_ofs.opcfg_ofs;
-
- p = krealloc(si->si_ptrs.opcfg, si->si_ofs.opcfg_size, GFP_KERNEL);
- if (p == NULL) {
- dev_err(cd->dev, "%s: failed to allocate opcfg memory\n",
- __func__);
- return -ENOMEM;
- }
- si->si_ptrs.opcfg = p;
-
- rc = cyttsp4_adap_read(cd, si->si_ofs.opcfg_ofs, si->si_ofs.opcfg_size,
- si->si_ptrs.opcfg);
- if (rc < 0) {
- dev_err(cd->dev, "%s: fail read opcfg data r=%d\n",
- __func__, rc);
- return rc;
- }
- si->si_ofs.cmd_ofs = si->si_ptrs.opcfg->cmd_ofs;
- si->si_ofs.rep_ofs = si->si_ptrs.opcfg->rep_ofs;
- si->si_ofs.rep_sz = (si->si_ptrs.opcfg->rep_szh * 256) +
- si->si_ptrs.opcfg->rep_szl;
- si->si_ofs.num_btns = si->si_ptrs.opcfg->num_btns;
- si->si_ofs.num_btn_regs = (si->si_ofs.num_btns +
- CY_NUM_BTN_PER_REG - 1) / CY_NUM_BTN_PER_REG;
- si->si_ofs.tt_stat_ofs = si->si_ptrs.opcfg->tt_stat_ofs;
- si->si_ofs.obj_cfg0 = si->si_ptrs.opcfg->obj_cfg0;
- si->si_ofs.max_tchs = si->si_ptrs.opcfg->max_tchs &
- CY_BYTE_OFS_MASK;
- si->si_ofs.tch_rec_size = si->si_ptrs.opcfg->tch_rec_size &
- CY_BYTE_OFS_MASK;
-
- /* Get the old touch fields */
- for (abs = CY_TCH_X; abs < CY_NUM_TCH_FIELDS; abs++) {
- tch = &si->si_ofs.tch_abs[abs];
- tch_old = &si->si_ptrs.opcfg->tch_rec_old[abs];
-
- tch->ofs = tch_old->loc & CY_BYTE_OFS_MASK;
- tch->size = cyttsp4_bits_2_bytes(tch_old->size,
- &tch->max);
- tch->bofs = (tch_old->loc & CY_BOFS_MASK) >> CY_BOFS_SHIFT;
- }
-
- /* button fields */
- si->si_ofs.btn_rec_size = si->si_ptrs.opcfg->btn_rec_size;
- si->si_ofs.btn_diff_ofs = si->si_ptrs.opcfg->btn_diff_ofs;
- si->si_ofs.btn_diff_size = si->si_ptrs.opcfg->btn_diff_size;
-
- if (si->si_ofs.tch_rec_size > CY_TMA1036_TCH_REC_SIZE) {
- /* Get the extended touch fields */
- for (i = 0; i < CY_NUM_EXT_TCH_FIELDS; abs++, i++) {
- tch = &si->si_ofs.tch_abs[abs];
- tch_new = &si->si_ptrs.opcfg->tch_rec_new[i];
-
- tch->ofs = tch_new->loc & CY_BYTE_OFS_MASK;
- tch->size = cyttsp4_bits_2_bytes(tch_new->size,
- &tch->max);
- tch->bofs = (tch_new->loc & CY_BOFS_MASK) >> CY_BOFS_SHIFT;
- }
- }
-
- for (abs = 0; abs < CY_TCH_NUM_ABS; abs++) {
- dev_dbg(cd->dev, "%s: tch_rec_%s\n", __func__,
- cyttsp4_tch_abs_string[abs]);
- dev_dbg(cd->dev, "%s: ofs =%2zd\n", __func__,
- si->si_ofs.tch_abs[abs].ofs);
- dev_dbg(cd->dev, "%s: siz =%2zd\n", __func__,
- si->si_ofs.tch_abs[abs].size);
- dev_dbg(cd->dev, "%s: max =%2zd\n", __func__,
- si->si_ofs.tch_abs[abs].max);
- dev_dbg(cd->dev, "%s: bofs=%2zd\n", __func__,
- si->si_ofs.tch_abs[abs].bofs);
- }
-
- si->si_ofs.mode_size = si->si_ofs.tt_stat_ofs + 1;
- si->si_ofs.data_size = si->si_ofs.max_tchs *
- si->si_ptrs.opcfg->tch_rec_size;
-
- cyttsp4_pr_buf(cd->dev, cd->pr_buf, (u8 *)si->si_ptrs.opcfg,
- si->si_ofs.opcfg_size, "sysinfo_opcfg_data");
-
- return 0;
-}
-
-static int cyttsp4_si_get_ddata(struct cyttsp4 *cd)
-{
- struct cyttsp4_sysinfo *si = &cd->sysinfo;
- void *p;
- int rc;
-
- si->si_ofs.ddata_size = si->si_ofs.mdata_ofs - si->si_ofs.ddata_ofs;
-
- p = krealloc(si->si_ptrs.ddata, si->si_ofs.ddata_size, GFP_KERNEL);
- if (p == NULL) {
- dev_err(cd->dev, "%s: fail alloc ddata memory\n", __func__);
- return -ENOMEM;
- }
- si->si_ptrs.ddata = p;
-
- rc = cyttsp4_adap_read(cd, si->si_ofs.ddata_ofs, si->si_ofs.ddata_size,
- si->si_ptrs.ddata);
- if (rc < 0)
- dev_err(cd->dev, "%s: fail read ddata data r=%d\n",
- __func__, rc);
- else
- cyttsp4_pr_buf(cd->dev, cd->pr_buf,
- (u8 *)si->si_ptrs.ddata,
- si->si_ofs.ddata_size, "sysinfo_ddata");
- return rc;
-}
-
-static int cyttsp4_si_get_mdata(struct cyttsp4 *cd)
-{
- struct cyttsp4_sysinfo *si = &cd->sysinfo;
- void *p;
- int rc;
-
- si->si_ofs.mdata_size = si->si_ofs.map_sz - si->si_ofs.mdata_ofs;
-
- p = krealloc(si->si_ptrs.mdata, si->si_ofs.mdata_size, GFP_KERNEL);
- if (p == NULL) {
- dev_err(cd->dev, "%s: fail alloc mdata memory\n", __func__);
- return -ENOMEM;
- }
- si->si_ptrs.mdata = p;
-
- rc = cyttsp4_adap_read(cd, si->si_ofs.mdata_ofs, si->si_ofs.mdata_size,
- si->si_ptrs.mdata);
- if (rc < 0)
- dev_err(cd->dev, "%s: fail read mdata data r=%d\n",
- __func__, rc);
- else
- cyttsp4_pr_buf(cd->dev, cd->pr_buf,
- (u8 *)si->si_ptrs.mdata,
- si->si_ofs.mdata_size, "sysinfo_mdata");
- return rc;
-}
-
-static int cyttsp4_si_get_btn_data(struct cyttsp4 *cd)
-{
- struct cyttsp4_sysinfo *si = &cd->sysinfo;
- int btn;
- int num_defined_keys;
- u16 *key_table;
- void *p;
- int rc = 0;
-
- if (si->si_ofs.num_btns) {
- si->si_ofs.btn_keys_size = si->si_ofs.num_btns *
- sizeof(struct cyttsp4_btn);
-
- p = krealloc(si->btn, si->si_ofs.btn_keys_size,
- GFP_KERNEL|__GFP_ZERO);
- if (p == NULL) {
- dev_err(cd->dev, "%s: %s\n", __func__,
- "fail alloc btn_keys memory");
- return -ENOMEM;
- }
- si->btn = p;
-
- if (cd->cpdata->sett[CY_IC_GRPNUM_BTN_KEYS] == NULL)
- num_defined_keys = 0;
- else if (cd->cpdata->sett[CY_IC_GRPNUM_BTN_KEYS]->data == NULL)
- num_defined_keys = 0;
- else
- num_defined_keys = cd->cpdata->sett
- [CY_IC_GRPNUM_BTN_KEYS]->size;
-
- for (btn = 0; btn < si->si_ofs.num_btns &&
- btn < num_defined_keys; btn++) {
- key_table = (u16 *)cd->cpdata->sett
- [CY_IC_GRPNUM_BTN_KEYS]->data;
- si->btn[btn].key_code = key_table[btn];
- si->btn[btn].state = CY_BTN_RELEASED;
- si->btn[btn].enabled = true;
- }
- for (; btn < si->si_ofs.num_btns; btn++) {
- si->btn[btn].key_code = KEY_RESERVED;
- si->btn[btn].state = CY_BTN_RELEASED;
- si->btn[btn].enabled = true;
- }
-
- return rc;
- }
-
- si->si_ofs.btn_keys_size = 0;
- kfree(si->btn);
- si->btn = NULL;
- return rc;
-}
-
-static int cyttsp4_si_get_op_data_ptrs(struct cyttsp4 *cd)
-{
- struct cyttsp4_sysinfo *si = &cd->sysinfo;
- void *p;
-
- p = krealloc(si->xy_mode, si->si_ofs.mode_size, GFP_KERNEL|__GFP_ZERO);
- if (p == NULL)
- return -ENOMEM;
- si->xy_mode = p;
-
- p = krealloc(si->xy_data, si->si_ofs.data_size, GFP_KERNEL|__GFP_ZERO);
- if (p == NULL)
- return -ENOMEM;
- si->xy_data = p;
-
- p = krealloc(si->btn_rec_data,
- si->si_ofs.btn_rec_size * si->si_ofs.num_btns,
- GFP_KERNEL|__GFP_ZERO);
- if (p == NULL)
- return -ENOMEM;
- si->btn_rec_data = p;
-
- return 0;
-}
-
-static void cyttsp4_si_put_log_data(struct cyttsp4 *cd)
-{
- struct cyttsp4_sysinfo *si = &cd->sysinfo;
- dev_dbg(cd->dev, "%s: cydata_ofs =%4zd siz=%4zd\n", __func__,
- si->si_ofs.cydata_ofs, si->si_ofs.cydata_size);
- dev_dbg(cd->dev, "%s: test_ofs =%4zd siz=%4zd\n", __func__,
- si->si_ofs.test_ofs, si->si_ofs.test_size);
- dev_dbg(cd->dev, "%s: pcfg_ofs =%4zd siz=%4zd\n", __func__,
- si->si_ofs.pcfg_ofs, si->si_ofs.pcfg_size);
- dev_dbg(cd->dev, "%s: opcfg_ofs =%4zd siz=%4zd\n", __func__,
- si->si_ofs.opcfg_ofs, si->si_ofs.opcfg_size);
- dev_dbg(cd->dev, "%s: ddata_ofs =%4zd siz=%4zd\n", __func__,
- si->si_ofs.ddata_ofs, si->si_ofs.ddata_size);
- dev_dbg(cd->dev, "%s: mdata_ofs =%4zd siz=%4zd\n", __func__,
- si->si_ofs.mdata_ofs, si->si_ofs.mdata_size);
-
- dev_dbg(cd->dev, "%s: cmd_ofs =%4zd\n", __func__,
- si->si_ofs.cmd_ofs);
- dev_dbg(cd->dev, "%s: rep_ofs =%4zd\n", __func__,
- si->si_ofs.rep_ofs);
- dev_dbg(cd->dev, "%s: rep_sz =%4zd\n", __func__,
- si->si_ofs.rep_sz);
- dev_dbg(cd->dev, "%s: num_btns =%4zd\n", __func__,
- si->si_ofs.num_btns);
- dev_dbg(cd->dev, "%s: num_btn_regs =%4zd\n", __func__,
- si->si_ofs.num_btn_regs);
- dev_dbg(cd->dev, "%s: tt_stat_ofs =%4zd\n", __func__,
- si->si_ofs.tt_stat_ofs);
- dev_dbg(cd->dev, "%s: tch_rec_size =%4zd\n", __func__,
- si->si_ofs.tch_rec_size);
- dev_dbg(cd->dev, "%s: max_tchs =%4zd\n", __func__,
- si->si_ofs.max_tchs);
- dev_dbg(cd->dev, "%s: mode_size =%4zd\n", __func__,
- si->si_ofs.mode_size);
- dev_dbg(cd->dev, "%s: data_size =%4zd\n", __func__,
- si->si_ofs.data_size);
- dev_dbg(cd->dev, "%s: map_sz =%4zd\n", __func__,
- si->si_ofs.map_sz);
-
- dev_dbg(cd->dev, "%s: btn_rec_size =%2zd\n", __func__,
- si->si_ofs.btn_rec_size);
- dev_dbg(cd->dev, "%s: btn_diff_ofs =%2zd\n", __func__,
- si->si_ofs.btn_diff_ofs);
- dev_dbg(cd->dev, "%s: btn_diff_size =%2zd\n", __func__,
- si->si_ofs.btn_diff_size);
-
- dev_dbg(cd->dev, "%s: max_x = 0x%04zX (%zd)\n", __func__,
- si->si_ofs.max_x, si->si_ofs.max_x);
- dev_dbg(cd->dev, "%s: x_origin = %zd (%s)\n", __func__,
- si->si_ofs.x_origin,
- si->si_ofs.x_origin == CY_NORMAL_ORIGIN ?
- "left corner" : "right corner");
- dev_dbg(cd->dev, "%s: max_y = 0x%04zX (%zd)\n", __func__,
- si->si_ofs.max_y, si->si_ofs.max_y);
- dev_dbg(cd->dev, "%s: y_origin = %zd (%s)\n", __func__,
- si->si_ofs.y_origin,
- si->si_ofs.y_origin == CY_NORMAL_ORIGIN ?
- "upper corner" : "lower corner");
- dev_dbg(cd->dev, "%s: max_p = 0x%04zX (%zd)\n", __func__,
- si->si_ofs.max_p, si->si_ofs.max_p);
-
- dev_dbg(cd->dev, "%s: xy_mode=%p xy_data=%p\n", __func__,
- si->xy_mode, si->xy_data);
-}
-
-static int cyttsp4_get_sysinfo_regs(struct cyttsp4 *cd)
-{
- struct cyttsp4_sysinfo *si = &cd->sysinfo;
- int rc;
-
- rc = cyttsp4_si_data_offsets(cd);
- if (rc < 0)
- return rc;
-
- rc = cyttsp4_si_get_cydata(cd);
- if (rc < 0)
- return rc;
-
- rc = cyttsp4_si_get_test_data(cd);
- if (rc < 0)
- return rc;
-
- rc = cyttsp4_si_get_pcfg_data(cd);
- if (rc < 0)
- return rc;
-
- rc = cyttsp4_si_get_opcfg_data(cd);
- if (rc < 0)
- return rc;
-
- rc = cyttsp4_si_get_ddata(cd);
- if (rc < 0)
- return rc;
-
- rc = cyttsp4_si_get_mdata(cd);
- if (rc < 0)
- return rc;
-
- rc = cyttsp4_si_get_btn_data(cd);
- if (rc < 0)
- return rc;
-
- rc = cyttsp4_si_get_op_data_ptrs(cd);
- if (rc < 0) {
- dev_err(cd->dev, "%s: failed to get_op_data\n",
- __func__);
- return rc;
- }
-
- cyttsp4_si_put_log_data(cd);
-
- /* provide flow control handshake */
- rc = cyttsp4_handshake(cd, si->si_data.hst_mode);
- if (rc < 0)
- dev_err(cd->dev, "%s: handshake fail on sysinfo reg\n",
- __func__);
-
- si->ready = true;
- return rc;
-}
-
-static void cyttsp4_queue_startup_(struct cyttsp4 *cd)
-{
- if (cd->startup_state == STARTUP_NONE) {
- cd->startup_state = STARTUP_QUEUED;
- schedule_work(&cd->startup_work);
- dev_dbg(cd->dev, "%s: cyttsp4_startup queued\n", __func__);
- } else {
- dev_dbg(cd->dev, "%s: startup_state = %d\n", __func__,
- cd->startup_state);
- }
-}
-
-static void cyttsp4_report_slot_liftoff(struct cyttsp4_mt_data *md,
- int max_slots)
-{
- int t;
-
- if (md->num_prv_tch == 0)
- return;
-
- for (t = 0; t < max_slots; t++) {
- input_mt_slot(md->input, t);
- input_mt_report_slot_inactive(md->input);
- }
-}
-
-static void cyttsp4_lift_all(struct cyttsp4_mt_data *md)
-{
- if (!md->si)
- return;
-
- if (md->num_prv_tch != 0) {
- cyttsp4_report_slot_liftoff(md,
- md->si->si_ofs.tch_abs[CY_TCH_T].max);
- input_sync(md->input);
- md->num_prv_tch = 0;
- }
-}
-
-static void cyttsp4_get_touch_axis(struct cyttsp4_mt_data *md,
- int *axis, int size, int max, u8 *xy_data, int bofs)
-{
- int nbyte;
- int next;
-
- for (nbyte = 0, *axis = 0, next = 0; nbyte < size; nbyte++) {
- dev_vdbg(&md->input->dev,
- "%s: *axis=%02X(%d) size=%d max=%08X xy_data=%p"
- " xy_data[%d]=%02X(%d) bofs=%d\n",
- __func__, *axis, *axis, size, max, xy_data, next,
- xy_data[next], xy_data[next], bofs);
- *axis = (*axis * 256) + (xy_data[next] >> bofs);
- next++;
- }
-
- *axis &= max - 1;
-
- dev_vdbg(&md->input->dev,
- "%s: *axis=%02X(%d) size=%d max=%08X xy_data=%p"
- " xy_data[%d]=%02X(%d)\n",
- __func__, *axis, *axis, size, max, xy_data, next,
- xy_data[next], xy_data[next]);
-}
-
-static void cyttsp4_get_touch(struct cyttsp4_mt_data *md,
- struct cyttsp4_touch *touch, u8 *xy_data)
-{
- struct device *dev = &md->input->dev;
- struct cyttsp4_sysinfo *si = md->si;
- enum cyttsp4_tch_abs abs;
- bool flipped;
-
- for (abs = CY_TCH_X; abs < CY_TCH_NUM_ABS; abs++) {
- cyttsp4_get_touch_axis(md, &touch->abs[abs],
- si->si_ofs.tch_abs[abs].size,
- si->si_ofs.tch_abs[abs].max,
- xy_data + si->si_ofs.tch_abs[abs].ofs,
- si->si_ofs.tch_abs[abs].bofs);
- dev_vdbg(dev, "%s: get %s=%04X(%d)\n", __func__,
- cyttsp4_tch_abs_string[abs],
- touch->abs[abs], touch->abs[abs]);
- }
-
- if (md->pdata->flags & CY_FLAG_FLIP) {
- swap(touch->abs[CY_TCH_X], touch->abs[CY_TCH_Y]);
- flipped = true;
- } else
- flipped = false;
-
- if (md->pdata->flags & CY_FLAG_INV_X) {
- if (flipped)
- touch->abs[CY_TCH_X] = md->si->si_ofs.max_y -
- touch->abs[CY_TCH_X];
- else
- touch->abs[CY_TCH_X] = md->si->si_ofs.max_x -
- touch->abs[CY_TCH_X];
- }
- if (md->pdata->flags & CY_FLAG_INV_Y) {
- if (flipped)
- touch->abs[CY_TCH_Y] = md->si->si_ofs.max_x -
- touch->abs[CY_TCH_Y];
- else
- touch->abs[CY_TCH_Y] = md->si->si_ofs.max_y -
- touch->abs[CY_TCH_Y];
- }
-
- dev_vdbg(dev, "%s: flip=%s inv-x=%s inv-y=%s x=%04X(%d) y=%04X(%d)\n",
- __func__, flipped ? "true" : "false",
- md->pdata->flags & CY_FLAG_INV_X ? "true" : "false",
- md->pdata->flags & CY_FLAG_INV_Y ? "true" : "false",
- touch->abs[CY_TCH_X], touch->abs[CY_TCH_X],
- touch->abs[CY_TCH_Y], touch->abs[CY_TCH_Y]);
-}
-
-static void cyttsp4_final_sync(struct input_dev *input, int max_slots, int *ids)
-{
- int t;
-
- for (t = 0; t < max_slots; t++) {
- if (ids[t])
- continue;
- input_mt_slot(input, t);
- input_mt_report_slot_inactive(input);
- }
-
- input_sync(input);
-}
-
-static void cyttsp4_get_mt_touches(struct cyttsp4_mt_data *md, int num_cur_tch)
-{
- struct device *dev = &md->input->dev;
- struct cyttsp4_sysinfo *si = md->si;
- struct cyttsp4_touch tch;
- int sig;
- int i, j, t = 0;
- int ids[MAX(CY_TMA1036_MAX_TCH, CY_TMA4XX_MAX_TCH)];
-
- memset(ids, 0, si->si_ofs.tch_abs[CY_TCH_T].max * sizeof(int));
- for (i = 0; i < num_cur_tch; i++) {
- cyttsp4_get_touch(md, &tch, si->xy_data +
- (i * si->si_ofs.tch_rec_size));
- if ((tch.abs[CY_TCH_T] < md->pdata->frmwrk->abs
- [(CY_ABS_ID_OST * CY_NUM_ABS_SET) + CY_MIN_OST]) ||
- (tch.abs[CY_TCH_T] > md->pdata->frmwrk->abs
- [(CY_ABS_ID_OST * CY_NUM_ABS_SET) + CY_MAX_OST])) {
- dev_err(dev, "%s: tch=%d -> bad trk_id=%d max_id=%d\n",
- __func__, i, tch.abs[CY_TCH_T],
- md->pdata->frmwrk->abs[(CY_ABS_ID_OST *
- CY_NUM_ABS_SET) + CY_MAX_OST]);
- continue;
- }
-
- /* use 0 based track id's */
- sig = md->pdata->frmwrk->abs
- [(CY_ABS_ID_OST * CY_NUM_ABS_SET) + 0];
- if (sig != CY_IGNORE_VALUE) {
- t = tch.abs[CY_TCH_T] - md->pdata->frmwrk->abs
- [(CY_ABS_ID_OST * CY_NUM_ABS_SET) + CY_MIN_OST];
- if (tch.abs[CY_TCH_E] == CY_EV_LIFTOFF) {
- dev_dbg(dev, "%s: t=%d e=%d lift-off\n",
- __func__, t, tch.abs[CY_TCH_E]);
- goto cyttsp4_get_mt_touches_pr_tch;
- }
- input_mt_slot(md->input, t);
- input_mt_report_slot_state(md->input, MT_TOOL_FINGER,
- true);
- ids[t] = true;
- }
-
- /* all devices: position and pressure fields */
- for (j = 0; j <= CY_ABS_W_OST; j++) {
- sig = md->pdata->frmwrk->abs[((CY_ABS_X_OST + j) *
- CY_NUM_ABS_SET) + 0];
- if (sig != CY_IGNORE_VALUE)
- input_report_abs(md->input, sig,
- tch.abs[CY_TCH_X + j]);
- }
- if (si->si_ofs.tch_rec_size > CY_TMA1036_TCH_REC_SIZE) {
- /*
- * TMA400 size and orientation fields:
- * if pressure is non-zero and major touch
- * signal is zero, then set major and minor touch
- * signals to minimum non-zero value
- */
- if (tch.abs[CY_TCH_P] > 0 && tch.abs[CY_TCH_MAJ] == 0)
- tch.abs[CY_TCH_MAJ] = tch.abs[CY_TCH_MIN] = 1;
-
- /* Get the extended touch fields */
- for (j = 0; j < CY_NUM_EXT_TCH_FIELDS; j++) {
- sig = md->pdata->frmwrk->abs
- [((CY_ABS_MAJ_OST + j) *
- CY_NUM_ABS_SET) + 0];
- if (sig != CY_IGNORE_VALUE)
- input_report_abs(md->input, sig,
- tch.abs[CY_TCH_MAJ + j]);
- }
- }
-
-cyttsp4_get_mt_touches_pr_tch:
- if (si->si_ofs.tch_rec_size > CY_TMA1036_TCH_REC_SIZE)
- dev_dbg(dev,
- "%s: t=%d x=%d y=%d z=%d M=%d m=%d o=%d e=%d\n",
- __func__, t,
- tch.abs[CY_TCH_X],
- tch.abs[CY_TCH_Y],
- tch.abs[CY_TCH_P],
- tch.abs[CY_TCH_MAJ],
- tch.abs[CY_TCH_MIN],
- tch.abs[CY_TCH_OR],
- tch.abs[CY_TCH_E]);
- else
- dev_dbg(dev,
- "%s: t=%d x=%d y=%d z=%d e=%d\n", __func__,
- t,
- tch.abs[CY_TCH_X],
- tch.abs[CY_TCH_Y],
- tch.abs[CY_TCH_P],
- tch.abs[CY_TCH_E]);
- }
-
- cyttsp4_final_sync(md->input, si->si_ofs.tch_abs[CY_TCH_T].max, ids);
-
- md->num_prv_tch = num_cur_tch;
-
- return;
-}
-
-/* read xy_data for all current touches */
-static int cyttsp4_xy_worker(struct cyttsp4 *cd)
-{
- struct cyttsp4_mt_data *md = &cd->md;
- struct device *dev = &md->input->dev;
- struct cyttsp4_sysinfo *si = md->si;
- u8 num_cur_tch;
- u8 hst_mode;
- u8 rep_len;
- u8 rep_stat;
- u8 tt_stat;
- int rc = 0;
-
- /*
- * Get event data from cyttsp4 device.
- * The event data includes all data
- * for all active touches.
- * Event data also includes button data
- */
- /*
- * Use 2 reads:
- * 1st read to get mode + button bytes + touch count (core)
- * 2nd read (optional) to get touch 1 - touch n data
- */
- hst_mode = si->xy_mode[CY_REG_BASE];
- rep_len = si->xy_mode[si->si_ofs.rep_ofs];
- rep_stat = si->xy_mode[si->si_ofs.rep_ofs + 1];
- tt_stat = si->xy_mode[si->si_ofs.tt_stat_ofs];
- dev_vdbg(dev, "%s: %s%02X %s%d %s%02X %s%02X\n", __func__,
- "hst_mode=", hst_mode, "rep_len=", rep_len,
- "rep_stat=", rep_stat, "tt_stat=", tt_stat);
-
- num_cur_tch = GET_NUM_TOUCHES(tt_stat);
- dev_vdbg(dev, "%s: num_cur_tch=%d\n", __func__, num_cur_tch);
-
- if (rep_len == 0 && num_cur_tch > 0) {
- dev_err(dev, "%s: report length error rep_len=%d num_tch=%d\n",
- __func__, rep_len, num_cur_tch);
- goto cyttsp4_xy_worker_exit;
- }
-
- /* read touches */
- if (num_cur_tch > 0) {
- rc = cyttsp4_adap_read(cd, si->si_ofs.tt_stat_ofs + 1,
- num_cur_tch * si->si_ofs.tch_rec_size,
- si->xy_data);
- if (rc < 0) {
- dev_err(dev, "%s: read fail on touch regs r=%d\n",
- __func__, rc);
- goto cyttsp4_xy_worker_exit;
- }
- }
-
- /* print xy data */
- cyttsp4_pr_buf(dev, cd->pr_buf, si->xy_data, num_cur_tch *
- si->si_ofs.tch_rec_size, "xy_data");
-
- /* check any error conditions */
- if (IS_BAD_PKT(rep_stat)) {
- dev_dbg(dev, "%s: Invalid buffer detected\n", __func__);
- rc = 0;
- goto cyttsp4_xy_worker_exit;
- }
-
- if (IS_LARGE_AREA(tt_stat))
- dev_dbg(dev, "%s: Large area detected\n", __func__);
-
- if (num_cur_tch > si->si_ofs.max_tchs) {
- dev_err(dev, "%s: too many tch; set to max tch (n=%d c=%zd)\n",
- __func__, num_cur_tch, si->si_ofs.max_tchs);
- num_cur_tch = si->si_ofs.max_tchs;
- }
-
- /* extract xy_data for all currently reported touches */
- dev_vdbg(dev, "%s: extract data num_cur_tch=%d\n", __func__,
- num_cur_tch);
- if (num_cur_tch)
- cyttsp4_get_mt_touches(md, num_cur_tch);
- else
- cyttsp4_lift_all(md);
-
- rc = 0;
-
-cyttsp4_xy_worker_exit:
- return rc;
-}
-
-static int cyttsp4_mt_attention(struct cyttsp4 *cd)
-{
- struct device *dev = cd->dev;
- struct cyttsp4_mt_data *md = &cd->md;
- int rc = 0;
-
- if (!md->si)
- return 0;
-
- mutex_lock(&md->report_lock);
- if (!md->is_suspended) {
- /* core handles handshake */
- rc = cyttsp4_xy_worker(cd);
- } else {
- dev_vdbg(dev, "%s: Ignoring report while suspended\n",
- __func__);
- }
- mutex_unlock(&md->report_lock);
- if (rc < 0)
- dev_err(dev, "%s: xy_worker error r=%d\n", __func__, rc);
-
- return rc;
-}
-
-static irqreturn_t cyttsp4_irq(int irq, void *handle)
-{
- struct cyttsp4 *cd = handle;
- struct device *dev = cd->dev;
- enum cyttsp4_mode cur_mode;
- u8 cmd_ofs = cd->sysinfo.si_ofs.cmd_ofs;
- u8 mode[3];
- int rc;
-
- /*
- * Check whether this IRQ should be ignored (external)
- * This should be the very first thing to check since
- * ignore_irq may be set for a very short period of time
- */
- if (atomic_read(&cd->ignore_irq)) {
- dev_vdbg(dev, "%s: Ignoring IRQ\n", __func__);
- return IRQ_HANDLED;
- }
-
- dev_dbg(dev, "%s int:0x%x\n", __func__, cd->int_status);
-
- mutex_lock(&cd->system_lock);
-
- /* Just to debug */
- if (cd->sleep_state == SS_SLEEP_ON || cd->sleep_state == SS_SLEEPING)
- dev_vdbg(dev, "%s: Received IRQ while in sleep\n", __func__);
-
- rc = cyttsp4_adap_read(cd, CY_REG_BASE, sizeof(mode), mode);
- if (rc) {
- dev_err(cd->dev, "%s: Fail read adapter r=%d\n", __func__, rc);
- goto cyttsp4_irq_exit;
- }
- dev_vdbg(dev, "%s mode[0-2]:0x%X 0x%X 0x%X\n", __func__,
- mode[0], mode[1], mode[2]);
-
- if (IS_BOOTLOADER(mode[0], mode[1])) {
- cur_mode = CY_MODE_BOOTLOADER;
- dev_vdbg(dev, "%s: bl running\n", __func__);
- if (cd->mode == CY_MODE_BOOTLOADER) {
- /* Signal bootloader heartbeat heard */
- wake_up(&cd->wait_q);
- goto cyttsp4_irq_exit;
- }
-
- /* switch to bootloader */
- dev_dbg(dev, "%s: restart switch to bl m=%d -> m=%d\n",
- __func__, cd->mode, cur_mode);
-
- /* catch operation->bl glitch */
- if (cd->mode != CY_MODE_UNKNOWN) {
- /* Incase startup_state do not let startup_() */
- cd->mode = CY_MODE_UNKNOWN;
- cyttsp4_queue_startup_(cd);
- goto cyttsp4_irq_exit;
- }
-
- /*
- * do not wake thread on this switch since
- * it is possible to get an early heartbeat
- * prior to performing the reset
- */
- cd->mode = cur_mode;
-
- goto cyttsp4_irq_exit;
- }
-
- switch (mode[0] & CY_HST_MODE) {
- case CY_HST_OPERATE:
- cur_mode = CY_MODE_OPERATIONAL;
- dev_vdbg(dev, "%s: operational\n", __func__);
- break;
- case CY_HST_CAT:
- cur_mode = CY_MODE_CAT;
- dev_vdbg(dev, "%s: CaT\n", __func__);
- break;
- case CY_HST_SYSINFO:
- cur_mode = CY_MODE_SYSINFO;
- dev_vdbg(dev, "%s: sysinfo\n", __func__);
- break;
- default:
- cur_mode = CY_MODE_UNKNOWN;
- dev_err(dev, "%s: unknown HST mode 0x%02X\n", __func__,
- mode[0]);
- break;
- }
-
- /* Check whether this IRQ should be ignored (internal) */
- if (cd->int_status & CY_INT_IGNORE) {
- dev_vdbg(dev, "%s: Ignoring IRQ\n", __func__);
- goto cyttsp4_irq_exit;
- }
-
- /* Check for wake up interrupt */
- if (cd->int_status & CY_INT_AWAKE) {
- cd->int_status &= ~CY_INT_AWAKE;
- wake_up(&cd->wait_q);
- dev_vdbg(dev, "%s: Received wake up interrupt\n", __func__);
- goto cyttsp4_irq_handshake;
- }
-
- /* Expecting mode change interrupt */
- if ((cd->int_status & CY_INT_MODE_CHANGE)
- && (mode[0] & CY_HST_MODE_CHANGE) == 0) {
- cd->int_status &= ~CY_INT_MODE_CHANGE;
- dev_dbg(dev, "%s: finish mode switch m=%d -> m=%d\n",
- __func__, cd->mode, cur_mode);
- cd->mode = cur_mode;
- wake_up(&cd->wait_q);
- goto cyttsp4_irq_handshake;
- }
-
- /* compare current core mode to current device mode */
- dev_vdbg(dev, "%s: cd->mode=%d cur_mode=%d\n",
- __func__, cd->mode, cur_mode);
- if ((mode[0] & CY_HST_MODE_CHANGE) == 0 && cd->mode != cur_mode) {
- /* Unexpected mode change occurred */
- dev_err(dev, "%s %d->%d 0x%x\n", __func__, cd->mode,
- cur_mode, cd->int_status);
- dev_dbg(dev, "%s: Unexpected mode change, startup\n",
- __func__);
- cyttsp4_queue_startup_(cd);
- goto cyttsp4_irq_exit;
- }
-
- /* Expecting command complete interrupt */
- dev_vdbg(dev, "%s: command byte:0x%x\n", __func__, mode[cmd_ofs]);
- if ((cd->int_status & CY_INT_EXEC_CMD)
- && mode[cmd_ofs] & CY_CMD_COMPLETE) {
- cd->int_status &= ~CY_INT_EXEC_CMD;
- dev_vdbg(dev, "%s: Received command complete interrupt\n",
- __func__);
- wake_up(&cd->wait_q);
- /*
- * It is possible to receive a single interrupt for
- * command complete and touch/button status report.
- * Continue processing for a possible status report.
- */
- }
-
- /* This should be status report, read status regs */
- if (cd->mode == CY_MODE_OPERATIONAL) {
- dev_vdbg(dev, "%s: Read status registers\n", __func__);
- rc = cyttsp4_load_status_regs(cd);
- if (rc < 0)
- dev_err(dev, "%s: fail read mode regs r=%d\n",
- __func__, rc);
- }
-
- cyttsp4_mt_attention(cd);
-
-cyttsp4_irq_handshake:
- /* handshake the event */
- dev_vdbg(dev, "%s: Handshake mode=0x%02X r=%d\n",
- __func__, mode[0], rc);
- rc = cyttsp4_handshake(cd, mode[0]);
- if (rc < 0)
- dev_err(dev, "%s: Fail handshake mode=0x%02X r=%d\n",
- __func__, mode[0], rc);
-
- /*
- * a non-zero udelay period is required for using
- * IRQF_TRIGGER_LOW in order to delay until the
- * device completes isr deassert
- */
- udelay(cd->cpdata->level_irq_udelay);
-
-cyttsp4_irq_exit:
- mutex_unlock(&cd->system_lock);
- return IRQ_HANDLED;
-}
-
-static void cyttsp4_start_wd_timer(struct cyttsp4 *cd)
-{
- if (!CY_WATCHDOG_TIMEOUT)
- return;
-
- mod_timer(&cd->watchdog_timer, jiffies +
- msecs_to_jiffies(CY_WATCHDOG_TIMEOUT));
-}
-
-static void cyttsp4_stop_wd_timer(struct cyttsp4 *cd)
-{
- if (!CY_WATCHDOG_TIMEOUT)
- return;
-
- /*
- * Ensure we wait until the watchdog timer
- * running on a different CPU finishes
- */
- timer_shutdown_sync(&cd->watchdog_timer);
- cancel_work_sync(&cd->watchdog_work);
-}
-
-static void cyttsp4_watchdog_timer(struct timer_list *t)
-{
- struct cyttsp4 *cd = from_timer(cd, t, watchdog_timer);
-
- dev_vdbg(cd->dev, "%s: Watchdog timer triggered\n", __func__);
-
- schedule_work(&cd->watchdog_work);
-
- return;
-}
-
-static int cyttsp4_request_exclusive(struct cyttsp4 *cd, void *ownptr,
- int timeout_ms)
-{
- int t = msecs_to_jiffies(timeout_ms);
- bool with_timeout = (timeout_ms != 0);
-
- mutex_lock(&cd->system_lock);
- if (!cd->exclusive_dev && cd->exclusive_waits == 0) {
- cd->exclusive_dev = ownptr;
- goto exit;
- }
-
- cd->exclusive_waits++;
-wait:
- mutex_unlock(&cd->system_lock);
- if (with_timeout) {
- t = wait_event_timeout(cd->wait_q, !cd->exclusive_dev, t);
- if (IS_TMO(t)) {
- dev_err(cd->dev, "%s: tmo waiting exclusive access\n",
- __func__);
- mutex_lock(&cd->system_lock);
- cd->exclusive_waits--;
- mutex_unlock(&cd->system_lock);
- return -ETIME;
- }
- } else {
- wait_event(cd->wait_q, !cd->exclusive_dev);
- }
- mutex_lock(&cd->system_lock);
- if (cd->exclusive_dev)
- goto wait;
- cd->exclusive_dev = ownptr;
- cd->exclusive_waits--;
-exit:
- mutex_unlock(&cd->system_lock);
-
- return 0;
-}
-
-/*
- * returns error if was not owned
- */
-static int cyttsp4_release_exclusive(struct cyttsp4 *cd, void *ownptr)
-{
- mutex_lock(&cd->system_lock);
- if (cd->exclusive_dev != ownptr) {
- mutex_unlock(&cd->system_lock);
- return -EINVAL;
- }
-
- dev_vdbg(cd->dev, "%s: exclusive_dev %p freed\n",
- __func__, cd->exclusive_dev);
- cd->exclusive_dev = NULL;
- wake_up(&cd->wait_q);
- mutex_unlock(&cd->system_lock);
- return 0;
-}
-
-static int cyttsp4_wait_bl_heartbeat(struct cyttsp4 *cd)
-{
- long t;
- int rc = 0;
-
- /* wait heartbeat */
- dev_vdbg(cd->dev, "%s: wait heartbeat...\n", __func__);
- t = wait_event_timeout(cd->wait_q, cd->mode == CY_MODE_BOOTLOADER,
- msecs_to_jiffies(CY_CORE_RESET_AND_WAIT_TIMEOUT));
- if (IS_TMO(t)) {
- dev_err(cd->dev, "%s: tmo waiting bl heartbeat cd->mode=%d\n",
- __func__, cd->mode);
- rc = -ETIME;
- }
-
- return rc;
-}
-
-static int cyttsp4_wait_sysinfo_mode(struct cyttsp4 *cd)
-{
- long t;
-
- dev_vdbg(cd->dev, "%s: wait sysinfo...\n", __func__);
-
- t = wait_event_timeout(cd->wait_q, cd->mode == CY_MODE_SYSINFO,
- msecs_to_jiffies(CY_CORE_MODE_CHANGE_TIMEOUT));
- if (IS_TMO(t)) {
- dev_err(cd->dev, "%s: tmo waiting exit bl cd->mode=%d\n",
- __func__, cd->mode);
- mutex_lock(&cd->system_lock);
- cd->int_status &= ~CY_INT_MODE_CHANGE;
- mutex_unlock(&cd->system_lock);
- return -ETIME;
- }
-
- return 0;
-}
-
-static int cyttsp4_reset_and_wait(struct cyttsp4 *cd)
-{
- int rc;
-
- /* reset hardware */
- mutex_lock(&cd->system_lock);
- dev_dbg(cd->dev, "%s: reset hw...\n", __func__);
- rc = cyttsp4_hw_reset(cd);
- cd->mode = CY_MODE_UNKNOWN;
- mutex_unlock(&cd->system_lock);
- if (rc < 0) {
- dev_err(cd->dev, "%s:Fail hw reset r=%d\n", __func__, rc);
- return rc;
- }
-
- return cyttsp4_wait_bl_heartbeat(cd);
-}
-
-/*
- * returns err if refused or timeout; block until mode change complete
- * bit is set (mode change interrupt)
- */
-static int cyttsp4_set_mode(struct cyttsp4 *cd, int new_mode)
-{
- u8 new_dev_mode;
- u8 mode;
- long t;
- int rc;
-
- switch (new_mode) {
- case CY_MODE_OPERATIONAL:
- new_dev_mode = CY_HST_OPERATE;
- break;
- case CY_MODE_SYSINFO:
- new_dev_mode = CY_HST_SYSINFO;
- break;
- case CY_MODE_CAT:
- new_dev_mode = CY_HST_CAT;
- break;
- default:
- dev_err(cd->dev, "%s: invalid mode: %02X(%d)\n",
- __func__, new_mode, new_mode);
- return -EINVAL;
- }
-
- /* change mode */
- dev_dbg(cd->dev, "%s: %s=%p new_dev_mode=%02X new_mode=%d\n",
- __func__, "have exclusive", cd->exclusive_dev,
- new_dev_mode, new_mode);
-
- mutex_lock(&cd->system_lock);
- rc = cyttsp4_adap_read(cd, CY_REG_BASE, sizeof(mode), &mode);
- if (rc < 0) {
- mutex_unlock(&cd->system_lock);
- dev_err(cd->dev, "%s: Fail read mode r=%d\n",
- __func__, rc);
- goto exit;
- }
-
- /* Clear device mode bits and set to new mode */
- mode &= ~CY_HST_MODE;
- mode |= new_dev_mode | CY_HST_MODE_CHANGE;
-
- cd->int_status |= CY_INT_MODE_CHANGE;
- rc = cyttsp4_adap_write(cd, CY_REG_BASE, sizeof(mode), &mode);
- mutex_unlock(&cd->system_lock);
- if (rc < 0) {
- dev_err(cd->dev, "%s: Fail write mode change r=%d\n",
- __func__, rc);
- goto exit;
- }
-
- /* wait for mode change done interrupt */
- t = wait_event_timeout(cd->wait_q,
- (cd->int_status & CY_INT_MODE_CHANGE) == 0,
- msecs_to_jiffies(CY_CORE_MODE_CHANGE_TIMEOUT));
- dev_dbg(cd->dev, "%s: back from wait t=%ld cd->mode=%d\n",
- __func__, t, cd->mode);
-
- if (IS_TMO(t)) {
- dev_err(cd->dev, "%s: %s\n", __func__,
- "tmo waiting mode change");
- mutex_lock(&cd->system_lock);
- cd->int_status &= ~CY_INT_MODE_CHANGE;
- mutex_unlock(&cd->system_lock);
- rc = -EINVAL;
- }
-
-exit:
- return rc;
-}
-
-static void cyttsp4_watchdog_work(struct work_struct *work)
-{
- struct cyttsp4 *cd =
- container_of(work, struct cyttsp4, watchdog_work);
- u8 *mode;
- int retval;
-
- mutex_lock(&cd->system_lock);
- retval = cyttsp4_load_status_regs(cd);
- if (retval < 0) {
- dev_err(cd->dev,
- "%s: failed to access device in watchdog timer r=%d\n",
- __func__, retval);
- cyttsp4_queue_startup_(cd);
- goto cyttsp4_timer_watchdog_exit_error;
- }
- mode = &cd->sysinfo.xy_mode[CY_REG_BASE];
- if (IS_BOOTLOADER(mode[0], mode[1])) {
- dev_err(cd->dev,
- "%s: device found in bootloader mode when operational mode\n",
- __func__);
- cyttsp4_queue_startup_(cd);
- goto cyttsp4_timer_watchdog_exit_error;
- }
-
- cyttsp4_start_wd_timer(cd);
-cyttsp4_timer_watchdog_exit_error:
- mutex_unlock(&cd->system_lock);
- return;
-}
-
-static int cyttsp4_core_sleep_(struct cyttsp4 *cd)
-{
- enum cyttsp4_sleep_state ss = SS_SLEEP_ON;
- enum cyttsp4_int_state int_status = CY_INT_IGNORE;
- int rc = 0;
- u8 mode[2];
-
- /* Already in sleep mode? */
- mutex_lock(&cd->system_lock);
- if (cd->sleep_state == SS_SLEEP_ON) {
- mutex_unlock(&cd->system_lock);
- return 0;
- }
- cd->sleep_state = SS_SLEEPING;
- mutex_unlock(&cd->system_lock);
-
- cyttsp4_stop_wd_timer(cd);
-
- /* Wait until currently running IRQ handler exits and disable IRQ */
- disable_irq(cd->irq);
-
- dev_vdbg(cd->dev, "%s: write DEEP SLEEP...\n", __func__);
- mutex_lock(&cd->system_lock);
- rc = cyttsp4_adap_read(cd, CY_REG_BASE, sizeof(mode), &mode);
- if (rc) {
- mutex_unlock(&cd->system_lock);
- dev_err(cd->dev, "%s: Fail read adapter r=%d\n", __func__, rc);
- goto error;
- }
-
- if (IS_BOOTLOADER(mode[0], mode[1])) {
- mutex_unlock(&cd->system_lock);
- dev_err(cd->dev, "%s: Device in BOOTLOADER mode.\n", __func__);
- rc = -EINVAL;
- goto error;
- }
-
- mode[0] |= CY_HST_SLEEP;
- rc = cyttsp4_adap_write(cd, CY_REG_BASE, sizeof(mode[0]), &mode[0]);
- mutex_unlock(&cd->system_lock);
- if (rc) {
- dev_err(cd->dev, "%s: Fail write adapter r=%d\n", __func__, rc);
- goto error;
- }
- dev_vdbg(cd->dev, "%s: write DEEP SLEEP succeeded\n", __func__);
-
- if (cd->cpdata->power) {
- dev_dbg(cd->dev, "%s: Power down HW\n", __func__);
- rc = cd->cpdata->power(cd->cpdata, 0, cd->dev, &cd->ignore_irq);
- } else {
- dev_dbg(cd->dev, "%s: No power function\n", __func__);
- rc = 0;
- }
- if (rc < 0) {
- dev_err(cd->dev, "%s: HW Power down fails r=%d\n",
- __func__, rc);
- goto error;
- }
-
- /* Give time to FW to sleep */
- msleep(50);
-
- goto exit;
-
-error:
- ss = SS_SLEEP_OFF;
- int_status = CY_INT_NONE;
- cyttsp4_start_wd_timer(cd);
-
-exit:
- mutex_lock(&cd->system_lock);
- cd->sleep_state = ss;
- cd->int_status |= int_status;
- mutex_unlock(&cd->system_lock);
- enable_irq(cd->irq);
- return rc;
-}
-
-static int cyttsp4_startup_(struct cyttsp4 *cd)
-{
- int retry = CY_CORE_STARTUP_RETRY_COUNT;
- int rc;
-
- cyttsp4_stop_wd_timer(cd);
-
-reset:
- if (retry != CY_CORE_STARTUP_RETRY_COUNT)
- dev_dbg(cd->dev, "%s: Retry %d\n", __func__,
- CY_CORE_STARTUP_RETRY_COUNT - retry);
-
- /* reset hardware and wait for heartbeat */
- rc = cyttsp4_reset_and_wait(cd);
- if (rc < 0) {
- dev_err(cd->dev, "%s: Error on h/w reset r=%d\n", __func__, rc);
- if (retry--)
- goto reset;
- goto exit;
- }
-
- /* exit bl into sysinfo mode */
- dev_vdbg(cd->dev, "%s: write exit ldr...\n", __func__);
- mutex_lock(&cd->system_lock);
- cd->int_status &= ~CY_INT_IGNORE;
- cd->int_status |= CY_INT_MODE_CHANGE;
-
- rc = cyttsp4_adap_write(cd, CY_REG_BASE, sizeof(ldr_exit),
- (u8 *)ldr_exit);
- mutex_unlock(&cd->system_lock);
- if (rc < 0) {
- dev_err(cd->dev, "%s: Fail write r=%d\n", __func__, rc);
- if (retry--)
- goto reset;
- goto exit;
- }
-
- rc = cyttsp4_wait_sysinfo_mode(cd);
- if (rc < 0) {
- u8 buf[sizeof(ldr_err_app)];
- int rc1;
-
- /* Check for invalid/corrupted touch application */
- rc1 = cyttsp4_adap_read(cd, CY_REG_BASE, sizeof(ldr_err_app),
- buf);
- if (rc1) {
- dev_err(cd->dev, "%s: Fail read r=%d\n", __func__, rc1);
- } else if (!memcmp(buf, ldr_err_app, sizeof(ldr_err_app))) {
- dev_err(cd->dev, "%s: Error launching touch application\n",
- __func__);
- mutex_lock(&cd->system_lock);
- cd->invalid_touch_app = true;
- mutex_unlock(&cd->system_lock);
- goto exit_no_wd;
- }
-
- if (retry--)
- goto reset;
- goto exit;
- }
-
- mutex_lock(&cd->system_lock);
- cd->invalid_touch_app = false;
- mutex_unlock(&cd->system_lock);
-
- /* read sysinfo data */
- dev_vdbg(cd->dev, "%s: get sysinfo regs..\n", __func__);
- rc = cyttsp4_get_sysinfo_regs(cd);
- if (rc < 0) {
- dev_err(cd->dev, "%s: failed to get sysinfo regs rc=%d\n",
- __func__, rc);
- if (retry--)
- goto reset;
- goto exit;
- }
-
- rc = cyttsp4_set_mode(cd, CY_MODE_OPERATIONAL);
- if (rc < 0) {
- dev_err(cd->dev, "%s: failed to set mode to operational rc=%d\n",
- __func__, rc);
- if (retry--)
- goto reset;
- goto exit;
- }
-
- cyttsp4_lift_all(&cd->md);
-
- /* restore to sleep if was suspended */
- mutex_lock(&cd->system_lock);
- if (cd->sleep_state == SS_SLEEP_ON) {
- cd->sleep_state = SS_SLEEP_OFF;
- mutex_unlock(&cd->system_lock);
- cyttsp4_core_sleep_(cd);
- goto exit_no_wd;
- }
- mutex_unlock(&cd->system_lock);
-
-exit:
- cyttsp4_start_wd_timer(cd);
-exit_no_wd:
- return rc;
-}
-
-static int cyttsp4_startup(struct cyttsp4 *cd)
-{
- int rc;
-
- mutex_lock(&cd->system_lock);
- cd->startup_state = STARTUP_RUNNING;
- mutex_unlock(&cd->system_lock);
-
- rc = cyttsp4_request_exclusive(cd, cd->dev,
- CY_CORE_REQUEST_EXCLUSIVE_TIMEOUT);
- if (rc < 0) {
- dev_err(cd->dev, "%s: fail get exclusive ex=%p own=%p\n",
- __func__, cd->exclusive_dev, cd->dev);
- goto exit;
- }
-
- rc = cyttsp4_startup_(cd);
-
- if (cyttsp4_release_exclusive(cd, cd->dev) < 0)
- /* Don't return fail code, mode is already changed. */
- dev_err(cd->dev, "%s: fail to release exclusive\n", __func__);
- else
- dev_vdbg(cd->dev, "%s: pass release exclusive\n", __func__);
-
-exit:
- mutex_lock(&cd->system_lock);
- cd->startup_state = STARTUP_NONE;
- mutex_unlock(&cd->system_lock);
-
- /* Wake the waiters for end of startup */
- wake_up(&cd->wait_q);
-
- return rc;
-}
-
-static void cyttsp4_startup_work_function(struct work_struct *work)
-{
- struct cyttsp4 *cd = container_of(work, struct cyttsp4, startup_work);
- int rc;
-
- rc = cyttsp4_startup(cd);
- if (rc < 0)
- dev_err(cd->dev, "%s: Fail queued startup r=%d\n",
- __func__, rc);
-}
-
-static void cyttsp4_free_si_ptrs(struct cyttsp4 *cd)
-{
- struct cyttsp4_sysinfo *si = &cd->sysinfo;
-
- if (!si)
- return;
-
- kfree(si->si_ptrs.cydata);
- kfree(si->si_ptrs.test);
- kfree(si->si_ptrs.pcfg);
- kfree(si->si_ptrs.opcfg);
- kfree(si->si_ptrs.ddata);
- kfree(si->si_ptrs.mdata);
- kfree(si->btn);
- kfree(si->xy_mode);
- kfree(si->xy_data);
- kfree(si->btn_rec_data);
-}
-
-static int cyttsp4_core_sleep(struct cyttsp4 *cd)
-{
- int rc;
-
- rc = cyttsp4_request_exclusive(cd, cd->dev,
- CY_CORE_SLEEP_REQUEST_EXCLUSIVE_TIMEOUT);
- if (rc < 0) {
- dev_err(cd->dev, "%s: fail get exclusive ex=%p own=%p\n",
- __func__, cd->exclusive_dev, cd->dev);
- return 0;
- }
-
- rc = cyttsp4_core_sleep_(cd);
-
- if (cyttsp4_release_exclusive(cd, cd->dev) < 0)
- dev_err(cd->dev, "%s: fail to release exclusive\n", __func__);
- else
- dev_vdbg(cd->dev, "%s: pass release exclusive\n", __func__);
-
- return rc;
-}
-
-static int cyttsp4_core_wake_(struct cyttsp4 *cd)
-{
- struct device *dev = cd->dev;
- int rc;
- u8 mode;
- int t;
-
- /* Already woken? */
- mutex_lock(&cd->system_lock);
- if (cd->sleep_state == SS_SLEEP_OFF) {
- mutex_unlock(&cd->system_lock);
- return 0;
- }
- cd->int_status &= ~CY_INT_IGNORE;
- cd->int_status |= CY_INT_AWAKE;
- cd->sleep_state = SS_WAKING;
-
- if (cd->cpdata->power) {
- dev_dbg(dev, "%s: Power up HW\n", __func__);
- rc = cd->cpdata->power(cd->cpdata, 1, dev, &cd->ignore_irq);
- } else {
- dev_dbg(dev, "%s: No power function\n", __func__);
- rc = -ENOSYS;
- }
- if (rc < 0) {
- dev_err(dev, "%s: HW Power up fails r=%d\n",
- __func__, rc);
-
- /* Initiate a read transaction to wake up */
- cyttsp4_adap_read(cd, CY_REG_BASE, sizeof(mode), &mode);
- } else
- dev_vdbg(cd->dev, "%s: HW power up succeeds\n",
- __func__);
- mutex_unlock(&cd->system_lock);
-
- t = wait_event_timeout(cd->wait_q,
- (cd->int_status & CY_INT_AWAKE) == 0,
- msecs_to_jiffies(CY_CORE_WAKEUP_TIMEOUT));
- if (IS_TMO(t)) {
- dev_err(dev, "%s: TMO waiting for wakeup\n", __func__);
- mutex_lock(&cd->system_lock);
- cd->int_status &= ~CY_INT_AWAKE;
- /* Try starting up */
- cyttsp4_queue_startup_(cd);
- mutex_unlock(&cd->system_lock);
- }
-
- mutex_lock(&cd->system_lock);
- cd->sleep_state = SS_SLEEP_OFF;
- mutex_unlock(&cd->system_lock);
-
- cyttsp4_start_wd_timer(cd);
-
- return 0;
-}
-
-static int cyttsp4_core_wake(struct cyttsp4 *cd)
-{
- int rc;
-
- rc = cyttsp4_request_exclusive(cd, cd->dev,
- CY_CORE_REQUEST_EXCLUSIVE_TIMEOUT);
- if (rc < 0) {
- dev_err(cd->dev, "%s: fail get exclusive ex=%p own=%p\n",
- __func__, cd->exclusive_dev, cd->dev);
- return 0;
- }
-
- rc = cyttsp4_core_wake_(cd);
-
- if (cyttsp4_release_exclusive(cd, cd->dev) < 0)
- dev_err(cd->dev, "%s: fail to release exclusive\n", __func__);
- else
- dev_vdbg(cd->dev, "%s: pass release exclusive\n", __func__);
-
- return rc;
-}
-
-static int cyttsp4_core_suspend(struct device *dev)
-{
- struct cyttsp4 *cd = dev_get_drvdata(dev);
- struct cyttsp4_mt_data *md = &cd->md;
- int rc;
-
- md->is_suspended = true;
-
- rc = cyttsp4_core_sleep(cd);
- if (rc < 0) {
- dev_err(dev, "%s: Error on sleep\n", __func__);
- return -EAGAIN;
- }
- return 0;
-}
-
-static int cyttsp4_core_resume(struct device *dev)
-{
- struct cyttsp4 *cd = dev_get_drvdata(dev);
- struct cyttsp4_mt_data *md = &cd->md;
- int rc;
-
- md->is_suspended = false;
-
- rc = cyttsp4_core_wake(cd);
- if (rc < 0) {
- dev_err(dev, "%s: Error on wake\n", __func__);
- return -EAGAIN;
- }
-
- return 0;
-}
-
-EXPORT_GPL_RUNTIME_DEV_PM_OPS(cyttsp4_pm_ops,
- cyttsp4_core_suspend, cyttsp4_core_resume, NULL);
-
-static int cyttsp4_mt_open(struct input_dev *input)
-{
- pm_runtime_get(input->dev.parent);
- return 0;
-}
-
-static void cyttsp4_mt_close(struct input_dev *input)
-{
- struct cyttsp4_mt_data *md = input_get_drvdata(input);
- mutex_lock(&md->report_lock);
- if (!md->is_suspended)
- pm_runtime_put(input->dev.parent);
- mutex_unlock(&md->report_lock);
-}
-
-
-static int cyttsp4_setup_input_device(struct cyttsp4 *cd)
-{
- struct device *dev = cd->dev;
- struct cyttsp4_mt_data *md = &cd->md;
- int signal = CY_IGNORE_VALUE;
- int max_x, max_y, max_p, min, max;
- int max_x_tmp, max_y_tmp;
- int i;
- int rc;
-
- dev_vdbg(dev, "%s: Initialize event signals\n", __func__);
- __set_bit(EV_ABS, md->input->evbit);
- __set_bit(EV_REL, md->input->evbit);
- __set_bit(EV_KEY, md->input->evbit);
-
- max_x_tmp = md->si->si_ofs.max_x;
- max_y_tmp = md->si->si_ofs.max_y;
-
- /* get maximum values from the sysinfo data */
- if (md->pdata->flags & CY_FLAG_FLIP) {
- max_x = max_y_tmp - 1;
- max_y = max_x_tmp - 1;
- } else {
- max_x = max_x_tmp - 1;
- max_y = max_y_tmp - 1;
- }
- max_p = md->si->si_ofs.max_p;
-
- /* set event signal capabilities */
- for (i = 0; i < (md->pdata->frmwrk->size / CY_NUM_ABS_SET); i++) {
- signal = md->pdata->frmwrk->abs
- [(i * CY_NUM_ABS_SET) + CY_SIGNAL_OST];
- if (signal != CY_IGNORE_VALUE) {
- __set_bit(signal, md->input->absbit);
- min = md->pdata->frmwrk->abs
- [(i * CY_NUM_ABS_SET) + CY_MIN_OST];
- max = md->pdata->frmwrk->abs
- [(i * CY_NUM_ABS_SET) + CY_MAX_OST];
- if (i == CY_ABS_ID_OST) {
- /* shift track ids down to start at 0 */
- max = max - min;
- min = min - min;
- } else if (i == CY_ABS_X_OST)
- max = max_x;
- else if (i == CY_ABS_Y_OST)
- max = max_y;
- else if (i == CY_ABS_P_OST)
- max = max_p;
- input_set_abs_params(md->input, signal, min, max,
- md->pdata->frmwrk->abs
- [(i * CY_NUM_ABS_SET) + CY_FUZZ_OST],
- md->pdata->frmwrk->abs
- [(i * CY_NUM_ABS_SET) + CY_FLAT_OST]);
- dev_dbg(dev, "%s: register signal=%02X min=%d max=%d\n",
- __func__, signal, min, max);
- if ((i == CY_ABS_ID_OST) &&
- (md->si->si_ofs.tch_rec_size <
- CY_TMA4XX_TCH_REC_SIZE))
- break;
- }
- }
-
- input_mt_init_slots(md->input, md->si->si_ofs.tch_abs[CY_TCH_T].max,
- INPUT_MT_DIRECT);
- rc = input_register_device(md->input);
- if (rc < 0)
- dev_err(dev, "%s: Error, failed register input device r=%d\n",
- __func__, rc);
- return rc;
-}
-
-static int cyttsp4_mt_probe(struct cyttsp4 *cd)
-{
- struct device *dev = cd->dev;
- struct cyttsp4_mt_data *md = &cd->md;
- struct cyttsp4_mt_platform_data *pdata = cd->pdata->mt_pdata;
- int rc = 0;
-
- mutex_init(&md->report_lock);
- md->pdata = pdata;
- /* Create the input device and register it. */
- dev_vdbg(dev, "%s: Create the input device and register it\n",
- __func__);
- md->input = input_allocate_device();
- if (md->input == NULL) {
- dev_err(dev, "%s: Error, failed to allocate input device\n",
- __func__);
- rc = -ENOSYS;
- goto error_alloc_failed;
- }
-
- md->input->name = pdata->inp_dev_name;
- scnprintf(md->phys, sizeof(md->phys)-1, "%s", dev_name(dev));
- md->input->phys = md->phys;
- md->input->id.bustype = cd->bus_ops->bustype;
- md->input->dev.parent = dev;
- md->input->open = cyttsp4_mt_open;
- md->input->close = cyttsp4_mt_close;
- input_set_drvdata(md->input, md);
-
- /* get sysinfo */
- md->si = &cd->sysinfo;
-
- rc = cyttsp4_setup_input_device(cd);
- if (rc)
- goto error_init_input;
-
- return 0;
-
-error_init_input:
- input_free_device(md->input);
-error_alloc_failed:
- dev_err(dev, "%s failed.\n", __func__);
- return rc;
-}
-
-struct cyttsp4 *cyttsp4_probe(const struct cyttsp4_bus_ops *ops,
- struct device *dev, u16 irq, size_t xfer_buf_size)
-{
- struct cyttsp4 *cd;
- struct cyttsp4_platform_data *pdata = dev_get_platdata(dev);
- unsigned long irq_flags;
- int rc = 0;
-
- if (!pdata || !pdata->core_pdata || !pdata->mt_pdata) {
- dev_err(dev, "%s: Missing platform data\n", __func__);
- rc = -ENODEV;
- goto error_no_pdata;
- }
-
- cd = kzalloc(sizeof(*cd), GFP_KERNEL);
- if (!cd) {
- dev_err(dev, "%s: Error, kzalloc\n", __func__);
- rc = -ENOMEM;
- goto error_alloc_data;
- }
-
- cd->xfer_buf = kzalloc(xfer_buf_size, GFP_KERNEL);
- if (!cd->xfer_buf) {
- dev_err(dev, "%s: Error, kzalloc\n", __func__);
- rc = -ENOMEM;
- goto error_free_cd;
- }
-
- /* Initialize device info */
- cd->dev = dev;
- cd->pdata = pdata;
- cd->cpdata = pdata->core_pdata;
- cd->bus_ops = ops;
-
- /* Initialize mutexes and spinlocks */
- mutex_init(&cd->system_lock);
- mutex_init(&cd->adap_lock);
-
- /* Initialize wait queue */
- init_waitqueue_head(&cd->wait_q);
-
- /* Initialize works */
- INIT_WORK(&cd->startup_work, cyttsp4_startup_work_function);
- INIT_WORK(&cd->watchdog_work, cyttsp4_watchdog_work);
-
- /* Initialize IRQ */
- cd->irq = gpio_to_irq(cd->cpdata->irq_gpio);
- if (cd->irq < 0) {
- rc = -EINVAL;
- goto error_free_xfer;
- }
-
- dev_set_drvdata(dev, cd);
-
- /* Call platform init function */
- if (cd->cpdata->init) {
- dev_dbg(cd->dev, "%s: Init HW\n", __func__);
- rc = cd->cpdata->init(cd->cpdata, 1, cd->dev);
- } else {
- dev_dbg(cd->dev, "%s: No HW INIT function\n", __func__);
- rc = 0;
- }
- if (rc < 0)
- dev_err(cd->dev, "%s: HW Init fail r=%d\n", __func__, rc);
-
- dev_dbg(dev, "%s: initialize threaded irq=%d\n", __func__, cd->irq);
- if (cd->cpdata->level_irq_udelay > 0)
- /* use level triggered interrupts */
- irq_flags = IRQF_TRIGGER_LOW | IRQF_ONESHOT;
- else
- /* use edge triggered interrupts */
- irq_flags = IRQF_TRIGGER_FALLING | IRQF_ONESHOT;
-
- rc = request_threaded_irq(cd->irq, NULL, cyttsp4_irq, irq_flags,
- dev_name(dev), cd);
- if (rc < 0) {
- dev_err(dev, "%s: Error, could not request irq\n", __func__);
- goto error_request_irq;
- }
-
- /* Setup watchdog timer */
- timer_setup(&cd->watchdog_timer, cyttsp4_watchdog_timer, 0);
-
- /*
- * call startup directly to ensure that the device
- * is tested before leaving the probe
- */
- rc = cyttsp4_startup(cd);
-
- /* Do not fail probe if startup fails but the device is detected */
- if (rc < 0 && cd->mode == CY_MODE_UNKNOWN) {
- dev_err(cd->dev, "%s: Fail initial startup r=%d\n",
- __func__, rc);
- goto error_startup;
- }
-
- rc = cyttsp4_mt_probe(cd);
- if (rc < 0) {
- dev_err(dev, "%s: Error, fail mt probe\n", __func__);
- goto error_startup;
- }
-
- pm_runtime_enable(dev);
-
- return cd;
-
-error_startup:
- cancel_work_sync(&cd->startup_work);
- cyttsp4_stop_wd_timer(cd);
- pm_runtime_disable(dev);
- cyttsp4_free_si_ptrs(cd);
- free_irq(cd->irq, cd);
-error_request_irq:
- if (cd->cpdata->init)
- cd->cpdata->init(cd->cpdata, 0, dev);
-error_free_xfer:
- kfree(cd->xfer_buf);
-error_free_cd:
- kfree(cd);
-error_alloc_data:
-error_no_pdata:
- dev_err(dev, "%s failed.\n", __func__);
- return ERR_PTR(rc);
-}
-EXPORT_SYMBOL_GPL(cyttsp4_probe);
-
-static void cyttsp4_mt_release(struct cyttsp4_mt_data *md)
-{
- input_unregister_device(md->input);
- input_set_drvdata(md->input, NULL);
-}
-
-int cyttsp4_remove(struct cyttsp4 *cd)
-{
- struct device *dev = cd->dev;
-
- cyttsp4_mt_release(&cd->md);
-
- /*
- * Suspend the device before freeing the startup_work and stopping
- * the watchdog since sleep function restarts watchdog on failure
- */
- pm_runtime_suspend(dev);
- pm_runtime_disable(dev);
-
- cancel_work_sync(&cd->startup_work);
-
- cyttsp4_stop_wd_timer(cd);
-
- free_irq(cd->irq, cd);
- if (cd->cpdata->init)
- cd->cpdata->init(cd->cpdata, 0, dev);
- cyttsp4_free_si_ptrs(cd);
- kfree(cd);
- return 0;
-}
-EXPORT_SYMBOL_GPL(cyttsp4_remove);
-
-MODULE_LICENSE("GPL");
-MODULE_DESCRIPTION("Cypress TrueTouch(R) Standard touchscreen core driver");
-MODULE_AUTHOR("Cypress");
diff --git a/drivers/input/touchscreen/cyttsp4_core.h b/drivers/input/touchscreen/cyttsp4_core.h
deleted file mode 100644
index 6262f6e45075..000000000000
--- a/drivers/input/touchscreen/cyttsp4_core.h
+++ /dev/null
@@ -1,448 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-only */
-/*
- * cyttsp4_core.h
- * Cypress TrueTouch(TM) Standard Product V4 Core driver module.
- * For use with Cypress Txx4xx parts.
- * Supported parts include:
- * TMA4XX
- * TMA1036
- *
- * Copyright (C) 2012 Cypress Semiconductor
- *
- * Contact Cypress Semiconductor at www.cypress.com <ttdrivers@cypress.com>
- */
-
-#ifndef _LINUX_CYTTSP4_CORE_H
-#define _LINUX_CYTTSP4_CORE_H
-
-#include <linux/device.h>
-#include <linux/err.h>
-#include <linux/input.h>
-#include <linux/kernel.h>
-#include <linux/limits.h>
-#include <linux/module.h>
-#include <linux/stringify.h>
-#include <linux/types.h>
-#include <linux/platform_data/cyttsp4.h>
-
-#define CY_REG_BASE 0x00
-
-#define CY_POST_CODEL_WDG_RST 0x01
-#define CY_POST_CODEL_CFG_DATA_CRC_FAIL 0x02
-#define CY_POST_CODEL_PANEL_TEST_FAIL 0x04
-
-#define CY_NUM_BTN_PER_REG 4
-
-/* touch record system information offset masks and shifts */
-#define CY_BYTE_OFS_MASK 0x1F
-#define CY_BOFS_MASK 0xE0
-#define CY_BOFS_SHIFT 5
-
-#define CY_TMA1036_TCH_REC_SIZE 6
-#define CY_TMA4XX_TCH_REC_SIZE 9
-#define CY_TMA1036_MAX_TCH 0x0E
-#define CY_TMA4XX_MAX_TCH 0x1E
-
-#define CY_NORMAL_ORIGIN 0 /* upper, left corner */
-#define CY_INVERT_ORIGIN 1 /* lower, right corner */
-
-/* helpers */
-#define GET_NUM_TOUCHES(x) ((x) & 0x1F)
-#define IS_LARGE_AREA(x) ((x) & 0x20)
-#define IS_BAD_PKT(x) ((x) & 0x20)
-#define IS_BOOTLOADER(hst_mode, reset_detect) \
- ((hst_mode) & 0x01 || (reset_detect) != 0)
-#define IS_TMO(t) ((t) == 0)
-
-
-enum cyttsp_cmd_bits {
- CY_CMD_COMPLETE = (1 << 6),
-};
-
-/* Timeout in ms. */
-#define CY_WATCHDOG_TIMEOUT 1000
-
-#define CY_MAX_PRINT_SIZE 512
-#ifdef VERBOSE_DEBUG
-#define CY_MAX_PRBUF_SIZE PIPE_BUF
-#define CY_PR_TRUNCATED " truncated..."
-#endif
-
-enum cyttsp4_ic_grpnum {
- CY_IC_GRPNUM_RESERVED,
- CY_IC_GRPNUM_CMD_REGS,
- CY_IC_GRPNUM_TCH_REP,
- CY_IC_GRPNUM_DATA_REC,
- CY_IC_GRPNUM_TEST_REC,
- CY_IC_GRPNUM_PCFG_REC,
- CY_IC_GRPNUM_TCH_PARM_VAL,
- CY_IC_GRPNUM_TCH_PARM_SIZE,
- CY_IC_GRPNUM_RESERVED1,
- CY_IC_GRPNUM_RESERVED2,
- CY_IC_GRPNUM_OPCFG_REC,
- CY_IC_GRPNUM_DDATA_REC,
- CY_IC_GRPNUM_MDATA_REC,
- CY_IC_GRPNUM_TEST_REGS,
- CY_IC_GRPNUM_BTN_KEYS,
- CY_IC_GRPNUM_TTHE_REGS,
- CY_IC_GRPNUM_NUM
-};
-
-enum cyttsp4_int_state {
- CY_INT_NONE,
- CY_INT_IGNORE = (1 << 0),
- CY_INT_MODE_CHANGE = (1 << 1),
- CY_INT_EXEC_CMD = (1 << 2),
- CY_INT_AWAKE = (1 << 3),
-};
-
-enum cyttsp4_mode {
- CY_MODE_UNKNOWN,
- CY_MODE_BOOTLOADER = (1 << 1),
- CY_MODE_OPERATIONAL = (1 << 2),
- CY_MODE_SYSINFO = (1 << 3),
- CY_MODE_CAT = (1 << 4),
- CY_MODE_STARTUP = (1 << 5),
- CY_MODE_LOADER = (1 << 6),
- CY_MODE_CHANGE_MODE = (1 << 7),
- CY_MODE_CHANGED = (1 << 8),
- CY_MODE_CMD_COMPLETE = (1 << 9),
-};
-
-enum cyttsp4_sleep_state {
- SS_SLEEP_OFF,
- SS_SLEEP_ON,
- SS_SLEEPING,
- SS_WAKING,
-};
-
-enum cyttsp4_startup_state {
- STARTUP_NONE,
- STARTUP_QUEUED,
- STARTUP_RUNNING,
-};
-
-#define CY_NUM_REVCTRL 8
-struct cyttsp4_cydata {
- u8 ttpidh;
- u8 ttpidl;
- u8 fw_ver_major;
- u8 fw_ver_minor;
- u8 revctrl[CY_NUM_REVCTRL];
- u8 blver_major;
- u8 blver_minor;
- u8 jtag_si_id3;
- u8 jtag_si_id2;
- u8 jtag_si_id1;
- u8 jtag_si_id0;
- u8 mfgid_sz;
- u8 cyito_idh;
- u8 cyito_idl;
- u8 cyito_verh;
- u8 cyito_verl;
- u8 ttsp_ver_major;
- u8 ttsp_ver_minor;
- u8 device_info;
- u8 mfg_id[];
-} __packed;
-
-struct cyttsp4_test {
- u8 post_codeh;
- u8 post_codel;
-} __packed;
-
-struct cyttsp4_pcfg {
- u8 electrodes_x;
- u8 electrodes_y;
- u8 len_xh;
- u8 len_xl;
- u8 len_yh;
- u8 len_yl;
- u8 res_xh;
- u8 res_xl;
- u8 res_yh;
- u8 res_yl;
- u8 max_zh;
- u8 max_zl;
- u8 panel_info0;
-} __packed;
-
-struct cyttsp4_tch_rec_params {
- u8 loc;
- u8 size;
-} __packed;
-
-#define CY_NUM_TCH_FIELDS 7
-#define CY_NUM_EXT_TCH_FIELDS 3
-struct cyttsp4_opcfg {
- u8 cmd_ofs;
- u8 rep_ofs;
- u8 rep_szh;
- u8 rep_szl;
- u8 num_btns;
- u8 tt_stat_ofs;
- u8 obj_cfg0;
- u8 max_tchs;
- u8 tch_rec_size;
- struct cyttsp4_tch_rec_params tch_rec_old[CY_NUM_TCH_FIELDS];
- u8 btn_rec_size; /* btn record size (in bytes) */
- u8 btn_diff_ofs; /* btn data loc, diff counts */
- u8 btn_diff_size; /* btn size of diff counts (in bits) */
- struct cyttsp4_tch_rec_params tch_rec_new[CY_NUM_EXT_TCH_FIELDS];
-} __packed;
-
-struct cyttsp4_sysinfo_ptr {
- struct cyttsp4_cydata *cydata;
- struct cyttsp4_test *test;
- struct cyttsp4_pcfg *pcfg;
- struct cyttsp4_opcfg *opcfg;
- struct cyttsp4_ddata *ddata;
- struct cyttsp4_mdata *mdata;
-} __packed;
-
-struct cyttsp4_sysinfo_data {
- u8 hst_mode;
- u8 reserved;
- u8 map_szh;
- u8 map_szl;
- u8 cydata_ofsh;
- u8 cydata_ofsl;
- u8 test_ofsh;
- u8 test_ofsl;
- u8 pcfg_ofsh;
- u8 pcfg_ofsl;
- u8 opcfg_ofsh;
- u8 opcfg_ofsl;
- u8 ddata_ofsh;
- u8 ddata_ofsl;
- u8 mdata_ofsh;
- u8 mdata_ofsl;
-} __packed;
-
-enum cyttsp4_tch_abs { /* for ordering within the extracted touch data array */
- CY_TCH_X, /* X */
- CY_TCH_Y, /* Y */
- CY_TCH_P, /* P (Z) */
- CY_TCH_T, /* TOUCH ID */
- CY_TCH_E, /* EVENT ID */
- CY_TCH_O, /* OBJECT ID */
- CY_TCH_W, /* SIZE */
- CY_TCH_MAJ, /* TOUCH_MAJOR */
- CY_TCH_MIN, /* TOUCH_MINOR */
- CY_TCH_OR, /* ORIENTATION */
- CY_TCH_NUM_ABS
-};
-
-struct cyttsp4_touch {
- int abs[CY_TCH_NUM_ABS];
-};
-
-struct cyttsp4_tch_abs_params {
- size_t ofs; /* abs byte offset */
- size_t size; /* size in bits */
- size_t max; /* max value */
- size_t bofs; /* bit offset */
-};
-
-struct cyttsp4_sysinfo_ofs {
- size_t chip_type;
- size_t cmd_ofs;
- size_t rep_ofs;
- size_t rep_sz;
- size_t num_btns;
- size_t num_btn_regs; /* ceil(num_btns/4) */
- size_t tt_stat_ofs;
- size_t tch_rec_size;
- size_t obj_cfg0;
- size_t max_tchs;
- size_t mode_size;
- size_t data_size;
- size_t map_sz;
- size_t max_x;
- size_t x_origin; /* left or right corner */
- size_t max_y;
- size_t y_origin; /* upper or lower corner */
- size_t max_p;
- size_t cydata_ofs;
- size_t test_ofs;
- size_t pcfg_ofs;
- size_t opcfg_ofs;
- size_t ddata_ofs;
- size_t mdata_ofs;
- size_t cydata_size;
- size_t test_size;
- size_t pcfg_size;
- size_t opcfg_size;
- size_t ddata_size;
- size_t mdata_size;
- size_t btn_keys_size;
- struct cyttsp4_tch_abs_params tch_abs[CY_TCH_NUM_ABS];
- size_t btn_rec_size; /* btn record size (in bytes) */
- size_t btn_diff_ofs;/* btn data loc ,diff counts, (Op-Mode byte ofs) */
- size_t btn_diff_size;/* btn size of diff counts (in bits) */
-};
-
-enum cyttsp4_btn_state {
- CY_BTN_RELEASED,
- CY_BTN_PRESSED,
- CY_BTN_NUM_STATE
-};
-
-struct cyttsp4_btn {
- bool enabled;
- int state; /* CY_BTN_PRESSED, CY_BTN_RELEASED */
- int key_code;
-};
-
-struct cyttsp4_sysinfo {
- bool ready;
- struct cyttsp4_sysinfo_data si_data;
- struct cyttsp4_sysinfo_ptr si_ptrs;
- struct cyttsp4_sysinfo_ofs si_ofs;
- struct cyttsp4_btn *btn; /* button states */
- u8 *btn_rec_data; /* button diff count data */
- u8 *xy_mode; /* operational mode and status regs */
- u8 *xy_data; /* operational touch regs */
-};
-
-struct cyttsp4_mt_data {
- struct cyttsp4_mt_platform_data *pdata;
- struct cyttsp4_sysinfo *si;
- struct input_dev *input;
- struct mutex report_lock;
- bool is_suspended;
- char phys[NAME_MAX];
- int num_prv_tch;
-};
-
-struct cyttsp4 {
- struct device *dev;
- struct mutex system_lock;
- struct mutex adap_lock;
- enum cyttsp4_mode mode;
- enum cyttsp4_sleep_state sleep_state;
- enum cyttsp4_startup_state startup_state;
- int int_status;
- wait_queue_head_t wait_q;
- int irq;
- struct work_struct startup_work;
- struct work_struct watchdog_work;
- struct timer_list watchdog_timer;
- struct cyttsp4_sysinfo sysinfo;
- void *exclusive_dev;
- int exclusive_waits;
- atomic_t ignore_irq;
- bool invalid_touch_app;
- struct cyttsp4_mt_data md;
- struct cyttsp4_platform_data *pdata;
- struct cyttsp4_core_platform_data *cpdata;
- const struct cyttsp4_bus_ops *bus_ops;
- u8 *xfer_buf;
-#ifdef VERBOSE_DEBUG
- u8 pr_buf[CY_MAX_PRBUF_SIZE];
-#endif
-};
-
-struct cyttsp4_bus_ops {
- u16 bustype;
- int (*write)(struct device *dev, u8 *xfer_buf, u16 addr, u8 length,
- const void *values);
- int (*read)(struct device *dev, u8 *xfer_buf, u16 addr, u8 length,
- void *values);
-};
-
-enum cyttsp4_hst_mode_bits {
- CY_HST_TOGGLE = (1 << 7),
- CY_HST_MODE_CHANGE = (1 << 3),
- CY_HST_MODE = (7 << 4),
- CY_HST_OPERATE = (0 << 4),
- CY_HST_SYSINFO = (1 << 4),
- CY_HST_CAT = (2 << 4),
- CY_HST_LOWPOW = (1 << 2),
- CY_HST_SLEEP = (1 << 1),
- CY_HST_RESET = (1 << 0),
-};
-
-/* abs settings */
-#define CY_IGNORE_VALUE 0xFFFF
-
-/* abs signal capabilities offsets in the frameworks array */
-enum cyttsp4_sig_caps {
- CY_SIGNAL_OST,
- CY_MIN_OST,
- CY_MAX_OST,
- CY_FUZZ_OST,
- CY_FLAT_OST,
- CY_NUM_ABS_SET /* number of signal capability fields */
-};
-
-/* abs axis signal offsets in the framworks array */
-enum cyttsp4_sig_ost {
- CY_ABS_X_OST,
- CY_ABS_Y_OST,
- CY_ABS_P_OST,
- CY_ABS_W_OST,
- CY_ABS_ID_OST,
- CY_ABS_MAJ_OST,
- CY_ABS_MIN_OST,
- CY_ABS_OR_OST,
- CY_NUM_ABS_OST /* number of abs signals */
-};
-
-enum cyttsp4_flags {
- CY_FLAG_NONE = 0x00,
- CY_FLAG_HOVER = 0x04,
- CY_FLAG_FLIP = 0x08,
- CY_FLAG_INV_X = 0x10,
- CY_FLAG_INV_Y = 0x20,
- CY_FLAG_VKEYS = 0x40,
-};
-
-enum cyttsp4_object_id {
- CY_OBJ_STANDARD_FINGER,
- CY_OBJ_LARGE_OBJECT,
- CY_OBJ_STYLUS,
- CY_OBJ_HOVER,
-};
-
-enum cyttsp4_event_id {
- CY_EV_NO_EVENT,
- CY_EV_TOUCHDOWN,
- CY_EV_MOVE, /* significant displacement (> act dist) */
- CY_EV_LIFTOFF, /* record reports last position */
-};
-
-/* x-axis resolution of panel in pixels */
-#define CY_PCFG_RESOLUTION_X_MASK 0x7F
-
-/* y-axis resolution of panel in pixels */
-#define CY_PCFG_RESOLUTION_Y_MASK 0x7F
-
-/* x-axis, 0:origin is on left side of panel, 1: right */
-#define CY_PCFG_ORIGIN_X_MASK 0x80
-
-/* y-axis, 0:origin is on top side of panel, 1: bottom */
-#define CY_PCFG_ORIGIN_Y_MASK 0x80
-
-static inline int cyttsp4_adap_read(struct cyttsp4 *ts, u16 addr, int size,
- void *buf)
-{
- return ts->bus_ops->read(ts->dev, ts->xfer_buf, addr, size, buf);
-}
-
-static inline int cyttsp4_adap_write(struct cyttsp4 *ts, u16 addr, int size,
- const void *buf)
-{
- return ts->bus_ops->write(ts->dev, ts->xfer_buf, addr, size, buf);
-}
-
-extern struct cyttsp4 *cyttsp4_probe(const struct cyttsp4_bus_ops *ops,
- struct device *dev, u16 irq, size_t xfer_buf_size);
-extern int cyttsp4_remove(struct cyttsp4 *ts);
-int cyttsp_i2c_write_block_data(struct device *dev, u8 *xfer_buf, u16 addr,
- u8 length, const void *values);
-int cyttsp_i2c_read_block_data(struct device *dev, u8 *xfer_buf, u16 addr,
- u8 length, void *values);
-extern const struct dev_pm_ops cyttsp4_pm_ops;
-
-#endif /* _LINUX_CYTTSP4_CORE_H */
diff --git a/drivers/input/touchscreen/cyttsp4_i2c.c b/drivers/input/touchscreen/cyttsp4_i2c.c
deleted file mode 100644
index da32c151def5..000000000000
--- a/drivers/input/touchscreen/cyttsp4_i2c.c
+++ /dev/null
@@ -1,72 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-only
-/*
- * cyttsp_i2c.c
- * Cypress TrueTouch(TM) Standard Product (TTSP) I2C touchscreen driver.
- * For use with Cypress Txx4xx parts.
- * Supported parts include:
- * TMA4XX
- * TMA1036
- *
- * Copyright (C) 2009, 2010, 2011 Cypress Semiconductor, Inc.
- * Copyright (C) 2012 Javier Martinez Canillas <javier@dowhile0.org>
- * Copyright (C) 2013 Cypress Semiconductor
- *
- * Contact Cypress Semiconductor at www.cypress.com <ttdrivers@cypress.com>
- */
-
-#include "cyttsp4_core.h"
-
-#include <linux/i2c.h>
-#include <linux/input.h>
-
-#define CYTTSP4_I2C_DATA_SIZE (3 * 256)
-
-static const struct cyttsp4_bus_ops cyttsp4_i2c_bus_ops = {
- .bustype = BUS_I2C,
- .write = cyttsp_i2c_write_block_data,
- .read = cyttsp_i2c_read_block_data,
-};
-
-static int cyttsp4_i2c_probe(struct i2c_client *client)
-{
- struct cyttsp4 *ts;
-
- if (!i2c_check_functionality(client->adapter, I2C_FUNC_I2C)) {
- dev_err(&client->dev, "I2C functionality not Supported\n");
- return -EIO;
- }
-
- ts = cyttsp4_probe(&cyttsp4_i2c_bus_ops, &client->dev, client->irq,
- CYTTSP4_I2C_DATA_SIZE);
-
- return PTR_ERR_OR_ZERO(ts);
-}
-
-static void cyttsp4_i2c_remove(struct i2c_client *client)
-{
- struct cyttsp4 *ts = i2c_get_clientdata(client);
-
- cyttsp4_remove(ts);
-}
-
-static const struct i2c_device_id cyttsp4_i2c_id[] = {
- { CYTTSP4_I2C_NAME },
- { }
-};
-MODULE_DEVICE_TABLE(i2c, cyttsp4_i2c_id);
-
-static struct i2c_driver cyttsp4_i2c_driver = {
- .driver = {
- .name = CYTTSP4_I2C_NAME,
- .pm = pm_ptr(&cyttsp4_pm_ops),
- },
- .probe = cyttsp4_i2c_probe,
- .remove = cyttsp4_i2c_remove,
- .id_table = cyttsp4_i2c_id,
-};
-
-module_i2c_driver(cyttsp4_i2c_driver);
-
-MODULE_LICENSE("GPL");
-MODULE_DESCRIPTION("Cypress TrueTouch(R) Standard Product (TTSP) I2C driver");
-MODULE_AUTHOR("Cypress");
diff --git a/drivers/input/touchscreen/cyttsp4_spi.c b/drivers/input/touchscreen/cyttsp4_spi.c
deleted file mode 100644
index 944fbbe9113e..000000000000
--- a/drivers/input/touchscreen/cyttsp4_spi.c
+++ /dev/null
@@ -1,187 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-only
-/*
- * Source for:
- * Cypress TrueTouch(TM) Standard Product (TTSP) SPI touchscreen driver.
- * For use with Cypress Txx4xx parts.
- * Supported parts include:
- * TMA4XX
- * TMA1036
- *
- * Copyright (C) 2009, 2010, 2011 Cypress Semiconductor, Inc.
- * Copyright (C) 2012 Javier Martinez Canillas <javier@dowhile0.org>
- * Copyright (C) 2013 Cypress Semiconductor
- *
- * Contact Cypress Semiconductor at www.cypress.com <ttdrivers@cypress.com>
- */
-
-#include "cyttsp4_core.h"
-
-#include <linux/delay.h>
-#include <linux/input.h>
-#include <linux/spi/spi.h>
-
-#define CY_SPI_WR_OP 0x00 /* r/~w */
-#define CY_SPI_RD_OP 0x01
-#define CY_SPI_BITS_PER_WORD 8
-#define CY_SPI_A8_BIT 0x02
-#define CY_SPI_WR_HEADER_BYTES 2
-#define CY_SPI_RD_HEADER_BYTES 1
-#define CY_SPI_CMD_BYTES 2
-#define CY_SPI_SYNC_BYTE 0
-#define CY_SPI_SYNC_ACK 0x62 /* from TRM *A protocol */
-#define CY_SPI_DATA_SIZE (2 * 256)
-
-#define CY_SPI_DATA_BUF_SIZE (CY_SPI_CMD_BYTES + CY_SPI_DATA_SIZE)
-
-static int cyttsp_spi_xfer(struct device *dev, u8 *xfer_buf,
- u8 op, u16 reg, u8 *buf, int length)
-{
- struct spi_device *spi = to_spi_device(dev);
- struct spi_message msg;
- struct spi_transfer xfer[2];
- u8 *wr_buf = &xfer_buf[0];
- u8 rd_buf[CY_SPI_CMD_BYTES];
- int retval;
- int i;
-
- if (length > CY_SPI_DATA_SIZE) {
- dev_err(dev, "%s: length %d is too big.\n",
- __func__, length);
- return -EINVAL;
- }
-
- memset(wr_buf, 0, CY_SPI_DATA_BUF_SIZE);
- memset(rd_buf, 0, CY_SPI_CMD_BYTES);
-
- wr_buf[0] = op + (((reg >> 8) & 0x1) ? CY_SPI_A8_BIT : 0);
- if (op == CY_SPI_WR_OP) {
- wr_buf[1] = reg & 0xFF;
- if (length > 0)
- memcpy(wr_buf + CY_SPI_CMD_BYTES, buf, length);
- }
-
- memset(xfer, 0, sizeof(xfer));
- spi_message_init(&msg);
-
- /*
- We set both TX and RX buffers because Cypress TTSP
- requires full duplex operation.
- */
- xfer[0].tx_buf = wr_buf;
- xfer[0].rx_buf = rd_buf;
- switch (op) {
- case CY_SPI_WR_OP:
- xfer[0].len = length + CY_SPI_CMD_BYTES;
- spi_message_add_tail(&xfer[0], &msg);
- break;
-
- case CY_SPI_RD_OP:
- xfer[0].len = CY_SPI_RD_HEADER_BYTES;
- spi_message_add_tail(&xfer[0], &msg);
-
- xfer[1].rx_buf = buf;
- xfer[1].len = length;
- spi_message_add_tail(&xfer[1], &msg);
- break;
-
- default:
- dev_err(dev, "%s: bad operation code=%d\n", __func__, op);
- return -EINVAL;
- }
-
- retval = spi_sync(spi, &msg);
- if (retval < 0) {
- dev_dbg(dev, "%s: spi_sync() error %d, len=%d, op=%d\n",
- __func__, retval, xfer[1].len, op);
-
- /*
- * do not return here since was a bad ACK sequence
- * let the following ACK check handle any errors and
- * allow silent retries
- */
- }
-
- if (rd_buf[CY_SPI_SYNC_BYTE] != CY_SPI_SYNC_ACK) {
- dev_dbg(dev, "%s: operation %d failed\n", __func__, op);
-
- for (i = 0; i < CY_SPI_CMD_BYTES; i++)
- dev_dbg(dev, "%s: test rd_buf[%d]:0x%02x\n",
- __func__, i, rd_buf[i]);
- for (i = 0; i < length; i++)
- dev_dbg(dev, "%s: test buf[%d]:0x%02x\n",
- __func__, i, buf[i]);
-
- return -EIO;
- }
-
- return 0;
-}
-
-static int cyttsp_spi_read_block_data(struct device *dev, u8 *xfer_buf,
- u16 addr, u8 length, void *data)
-{
- int rc;
-
- rc = cyttsp_spi_xfer(dev, xfer_buf, CY_SPI_WR_OP, addr, NULL, 0);
- if (rc)
- return rc;
- else
- return cyttsp_spi_xfer(dev, xfer_buf, CY_SPI_RD_OP, addr, data,
- length);
-}
-
-static int cyttsp_spi_write_block_data(struct device *dev, u8 *xfer_buf,
- u16 addr, u8 length, const void *data)
-{
- return cyttsp_spi_xfer(dev, xfer_buf, CY_SPI_WR_OP, addr, (void *)data,
- length);
-}
-
-static const struct cyttsp4_bus_ops cyttsp_spi_bus_ops = {
- .bustype = BUS_SPI,
- .write = cyttsp_spi_write_block_data,
- .read = cyttsp_spi_read_block_data,
-};
-
-static int cyttsp4_spi_probe(struct spi_device *spi)
-{
- struct cyttsp4 *ts;
- int error;
-
- /* Set up SPI*/
- spi->bits_per_word = CY_SPI_BITS_PER_WORD;
- spi->mode = SPI_MODE_0;
- error = spi_setup(spi);
- if (error < 0) {
- dev_err(&spi->dev, "%s: SPI setup error %d\n",
- __func__, error);
- return error;
- }
-
- ts = cyttsp4_probe(&cyttsp_spi_bus_ops, &spi->dev, spi->irq,
- CY_SPI_DATA_BUF_SIZE);
-
- return PTR_ERR_OR_ZERO(ts);
-}
-
-static void cyttsp4_spi_remove(struct spi_device *spi)
-{
- struct cyttsp4 *ts = spi_get_drvdata(spi);
- cyttsp4_remove(ts);
-}
-
-static struct spi_driver cyttsp4_spi_driver = {
- .driver = {
- .name = CYTTSP4_SPI_NAME,
- .pm = pm_ptr(&cyttsp4_pm_ops),
- },
- .probe = cyttsp4_spi_probe,
- .remove = cyttsp4_spi_remove,
-};
-
-module_spi_driver(cyttsp4_spi_driver);
-
-MODULE_LICENSE("GPL");
-MODULE_DESCRIPTION("Cypress TrueTouch(R) Standard Product (TTSP) SPI driver");
-MODULE_AUTHOR("Cypress");
-MODULE_ALIAS("spi:cyttsp4");
diff --git a/drivers/input/touchscreen/cyttsp_core.c b/drivers/input/touchscreen/cyttsp_core.c
index 132ed5786e84..b8ce6012364c 100644
--- a/drivers/input/touchscreen/cyttsp_core.c
+++ b/drivers/input/touchscreen/cyttsp_core.c
@@ -17,7 +17,6 @@
#include <linux/input.h>
#include <linux/input/mt.h>
#include <linux/input/touchscreen.h>
-#include <linux/gpio.h>
#include <linux/interrupt.h>
#include <linux/slab.h>
#include <linux/property.h>
@@ -615,17 +614,14 @@ static int cyttsp_parse_properties(struct cyttsp *ts)
return 0;
}
-static void cyttsp_disable_regulators(void *_ts)
-{
- struct cyttsp *ts = _ts;
-
- regulator_bulk_disable(ARRAY_SIZE(ts->regulators),
- ts->regulators);
-}
-
struct cyttsp *cyttsp_probe(const struct cyttsp_bus_ops *bus_ops,
struct device *dev, int irq, size_t xfer_buf_size)
{
+ /*
+ * VCPIN is the analog voltage supply
+ * VDD is the digital voltage supply
+ */
+ static const char * const supplies[] = { "vcpin", "vdd" };
struct cyttsp *ts;
struct input_dev *input_dev;
int error;
@@ -643,29 +639,10 @@ struct cyttsp *cyttsp_probe(const struct cyttsp_bus_ops *bus_ops,
ts->bus_ops = bus_ops;
ts->irq = irq;
- /*
- * VCPIN is the analog voltage supply
- * VDD is the digital voltage supply
- */
- ts->regulators[0].supply = "vcpin";
- ts->regulators[1].supply = "vdd";
- error = devm_regulator_bulk_get(dev, ARRAY_SIZE(ts->regulators),
- ts->regulators);
- if (error) {
- dev_err(dev, "Failed to get regulators: %d\n", error);
- return ERR_PTR(error);
- }
-
- error = regulator_bulk_enable(ARRAY_SIZE(ts->regulators),
- ts->regulators);
- if (error) {
- dev_err(dev, "Cannot enable regulators: %d\n", error);
- return ERR_PTR(error);
- }
-
- error = devm_add_action_or_reset(dev, cyttsp_disable_regulators, ts);
+ error = devm_regulator_bulk_get_enable(dev, ARRAY_SIZE(supplies),
+ supplies);
if (error) {
- dev_err(dev, "failed to install chip disable handler\n");
+ dev_err(dev, "Failed to enable regulators: %d\n", error);
return ERR_PTR(error);
}
diff --git a/drivers/input/touchscreen/cyttsp_core.h b/drivers/input/touchscreen/cyttsp_core.h
index 075509e695a2..40a605d20285 100644
--- a/drivers/input/touchscreen/cyttsp_core.h
+++ b/drivers/input/touchscreen/cyttsp_core.h
@@ -122,7 +122,6 @@ struct cyttsp {
enum cyttsp_state state;
bool suspended;
- struct regulator_bulk_data regulators[2];
struct gpio_desc *reset_gpio;
bool use_hndshk;
u8 act_dist;
@@ -137,10 +136,6 @@ struct cyttsp {
struct cyttsp *cyttsp_probe(const struct cyttsp_bus_ops *bus_ops,
struct device *dev, int irq, size_t xfer_buf_size);
-int cyttsp_i2c_write_block_data(struct device *dev, u8 *xfer_buf, u16 addr,
- u8 length, const void *values);
-int cyttsp_i2c_read_block_data(struct device *dev, u8 *xfer_buf, u16 addr,
- u8 length, void *values);
extern const struct dev_pm_ops cyttsp_pm_ops;
#endif /* __CYTTSP_CORE_H__ */
diff --git a/drivers/input/touchscreen/cyttsp_i2c.c b/drivers/input/touchscreen/cyttsp_i2c.c
index bf13b3448a6b..cb15600549cd 100644
--- a/drivers/input/touchscreen/cyttsp_i2c.c
+++ b/drivers/input/touchscreen/cyttsp_i2c.c
@@ -22,6 +22,61 @@
#define CY_I2C_DATA_SIZE 128
+static int cyttsp_i2c_read_block_data(struct device *dev, u8 *xfer_buf,
+ u16 addr, u8 length, void *values)
+{
+ struct i2c_client *client = to_i2c_client(dev);
+ u8 client_addr = client->addr | ((addr >> 8) & 0x1);
+ u8 addr_lo = addr & 0xFF;
+ struct i2c_msg msgs[] = {
+ {
+ .addr = client_addr,
+ .flags = 0,
+ .len = 1,
+ .buf = &addr_lo,
+ },
+ {
+ .addr = client_addr,
+ .flags = I2C_M_RD,
+ .len = length,
+ .buf = values,
+ },
+ };
+ int retval;
+
+ retval = i2c_transfer(client->adapter, msgs, ARRAY_SIZE(msgs));
+ if (retval < 0)
+ return retval;
+
+ return retval != ARRAY_SIZE(msgs) ? -EIO : 0;
+}
+
+static int cyttsp_i2c_write_block_data(struct device *dev, u8 *xfer_buf,
+ u16 addr, u8 length, const void *values)
+{
+ struct i2c_client *client = to_i2c_client(dev);
+ u8 client_addr = client->addr | ((addr >> 8) & 0x1);
+ u8 addr_lo = addr & 0xFF;
+ struct i2c_msg msgs[] = {
+ {
+ .addr = client_addr,
+ .flags = 0,
+ .len = length + 1,
+ .buf = xfer_buf,
+ },
+ };
+ int retval;
+
+ xfer_buf[0] = addr_lo;
+ memcpy(&xfer_buf[1], values, length);
+
+ retval = i2c_transfer(client->adapter, msgs, ARRAY_SIZE(msgs));
+ if (retval < 0)
+ return retval;
+
+ return retval != ARRAY_SIZE(msgs) ? -EIO : 0;
+}
+
static const struct cyttsp_bus_ops cyttsp_i2c_bus_ops = {
.bustype = BUS_I2C,
.write = cyttsp_i2c_write_block_data,
diff --git a/drivers/input/touchscreen/cyttsp_i2c_common.c b/drivers/input/touchscreen/cyttsp_i2c_common.c
deleted file mode 100644
index 7e752fb9fad7..000000000000
--- a/drivers/input/touchscreen/cyttsp_i2c_common.c
+++ /dev/null
@@ -1,86 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-only
-/*
- * cyttsp_i2c_common.c
- * Cypress TrueTouch(TM) Standard Product (TTSP) I2C touchscreen driver.
- * For use with Cypress Txx3xx and Txx4xx parts.
- * Supported parts include:
- * CY8CTST341
- * CY8CTMA340
- * TMA4XX
- * TMA1036
- *
- * Copyright (C) 2009, 2010, 2011 Cypress Semiconductor, Inc.
- * Copyright (C) 2012 Javier Martinez Canillas <javier@dowhile0.org>
- *
- * Contact Cypress Semiconductor at www.cypress.com <ttdrivers@cypress.com>
- */
-
-#include <linux/device.h>
-#include <linux/export.h>
-#include <linux/i2c.h>
-#include <linux/module.h>
-#include <linux/types.h>
-
-#include "cyttsp4_core.h"
-
-int cyttsp_i2c_read_block_data(struct device *dev, u8 *xfer_buf,
- u16 addr, u8 length, void *values)
-{
- struct i2c_client *client = to_i2c_client(dev);
- u8 client_addr = client->addr | ((addr >> 8) & 0x1);
- u8 addr_lo = addr & 0xFF;
- struct i2c_msg msgs[] = {
- {
- .addr = client_addr,
- .flags = 0,
- .len = 1,
- .buf = &addr_lo,
- },
- {
- .addr = client_addr,
- .flags = I2C_M_RD,
- .len = length,
- .buf = values,
- },
- };
- int retval;
-
- retval = i2c_transfer(client->adapter, msgs, ARRAY_SIZE(msgs));
- if (retval < 0)
- return retval;
-
- return retval != ARRAY_SIZE(msgs) ? -EIO : 0;
-}
-EXPORT_SYMBOL_GPL(cyttsp_i2c_read_block_data);
-
-int cyttsp_i2c_write_block_data(struct device *dev, u8 *xfer_buf,
- u16 addr, u8 length, const void *values)
-{
- struct i2c_client *client = to_i2c_client(dev);
- u8 client_addr = client->addr | ((addr >> 8) & 0x1);
- u8 addr_lo = addr & 0xFF;
- struct i2c_msg msgs[] = {
- {
- .addr = client_addr,
- .flags = 0,
- .len = length + 1,
- .buf = xfer_buf,
- },
- };
- int retval;
-
- xfer_buf[0] = addr_lo;
- memcpy(&xfer_buf[1], values, length);
-
- retval = i2c_transfer(client->adapter, msgs, ARRAY_SIZE(msgs));
- if (retval < 0)
- return retval;
-
- return retval != ARRAY_SIZE(msgs) ? -EIO : 0;
-}
-EXPORT_SYMBOL_GPL(cyttsp_i2c_write_block_data);
-
-
-MODULE_DESCRIPTION("Cypress TrueTouch(TM) Standard Product (TTSP) I2C touchscreen driver");
-MODULE_LICENSE("GPL");
-MODULE_AUTHOR("Cypress");
diff --git a/drivers/input/touchscreen/goodix_berlin.h b/drivers/input/touchscreen/goodix_berlin.h
index 1fd77eb69c9a..38b6f9ddbdef 100644
--- a/drivers/input/touchscreen/goodix_berlin.h
+++ b/drivers/input/touchscreen/goodix_berlin.h
@@ -20,5 +20,6 @@ int goodix_berlin_probe(struct device *dev, int irq, const struct input_id *id,
struct regmap *regmap);
extern const struct dev_pm_ops goodix_berlin_pm_ops;
+extern const struct attribute_group *goodix_berlin_groups[];
#endif
diff --git a/drivers/input/touchscreen/goodix_berlin_core.c b/drivers/input/touchscreen/goodix_berlin_core.c
index e7b41a926ef8..0bfca897ce5a 100644
--- a/drivers/input/touchscreen/goodix_berlin_core.c
+++ b/drivers/input/touchscreen/goodix_berlin_core.c
@@ -672,6 +672,49 @@ static void goodix_berlin_power_off_act(void *data)
goodix_berlin_power_off(cd);
}
+static ssize_t registers_read(struct file *filp, struct kobject *kobj,
+ struct bin_attribute *bin_attr,
+ char *buf, loff_t off, size_t count)
+{
+ struct device *dev = kobj_to_dev(kobj);
+ struct goodix_berlin_core *cd = dev_get_drvdata(dev);
+ int error;
+
+ error = regmap_raw_read(cd->regmap, off, buf, count);
+
+ return error ? error : count;
+}
+
+static ssize_t registers_write(struct file *filp, struct kobject *kobj,
+ struct bin_attribute *bin_attr,
+ char *buf, loff_t off, size_t count)
+{
+ struct device *dev = kobj_to_dev(kobj);
+ struct goodix_berlin_core *cd = dev_get_drvdata(dev);
+ int error;
+
+ error = regmap_raw_write(cd->regmap, off, buf, count);
+
+ return error ? error : count;
+}
+
+static BIN_ATTR_ADMIN_RW(registers, 0);
+
+static struct bin_attribute *goodix_berlin_bin_attrs[] = {
+ &bin_attr_registers,
+ NULL,
+};
+
+static const struct attribute_group goodix_berlin_attr_group = {
+ .bin_attrs = goodix_berlin_bin_attrs,
+};
+
+const struct attribute_group *goodix_berlin_groups[] = {
+ &goodix_berlin_attr_group,
+ NULL,
+};
+EXPORT_SYMBOL_GPL(goodix_berlin_groups);
+
int goodix_berlin_probe(struct device *dev, int irq, const struct input_id *id,
struct regmap *regmap)
{
diff --git a/drivers/input/touchscreen/goodix_berlin_i2c.c b/drivers/input/touchscreen/goodix_berlin_i2c.c
index 2e7098078838..ad7a60d94338 100644
--- a/drivers/input/touchscreen/goodix_berlin_i2c.c
+++ b/drivers/input/touchscreen/goodix_berlin_i2c.c
@@ -64,6 +64,7 @@ static struct i2c_driver goodix_berlin_i2c_driver = {
.name = "goodix-berlin-i2c",
.of_match_table = goodix_berlin_i2c_of_match,
.pm = pm_sleep_ptr(&goodix_berlin_pm_ops),
+ .dev_groups = goodix_berlin_groups,
},
.probe = goodix_berlin_i2c_probe,
.id_table = goodix_berlin_i2c_id,
diff --git a/drivers/input/touchscreen/goodix_berlin_spi.c b/drivers/input/touchscreen/goodix_berlin_spi.c
index 82774a412956..a2d80e84391b 100644
--- a/drivers/input/touchscreen/goodix_berlin_spi.c
+++ b/drivers/input/touchscreen/goodix_berlin_spi.c
@@ -169,6 +169,7 @@ static struct spi_driver goodix_berlin_spi_driver = {
.name = "goodix-berlin-spi",
.of_match_table = goodix_berlin_spi_of_match,
.pm = pm_sleep_ptr(&goodix_berlin_pm_ops),
+ .dev_groups = goodix_berlin_groups,
},
.probe = goodix_berlin_spi_probe,
.id_table = goodix_berlin_spi_ids,
diff --git a/drivers/input/touchscreen/hynitron_cstxxx.c b/drivers/input/touchscreen/hynitron_cstxxx.c
index 05946fee4fd4..f72834859282 100644
--- a/drivers/input/touchscreen/hynitron_cstxxx.c
+++ b/drivers/input/touchscreen/hynitron_cstxxx.c
@@ -470,7 +470,7 @@ static const struct hynitron_ts_chip_data cst3xx_data = {
};
static const struct i2c_device_id hyn_tpd_id[] = {
- { .name = "hynitron_ts", 0 },
+ { .name = "hynitron_ts" },
{ /* sentinel */ },
};
MODULE_DEVICE_TABLE(i2c, hyn_tpd_id);
diff --git a/drivers/input/touchscreen/ilitek_ts_i2c.c b/drivers/input/touchscreen/ilitek_ts_i2c.c
index 3eb762896345..5569641f05f6 100644
--- a/drivers/input/touchscreen/ilitek_ts_i2c.c
+++ b/drivers/input/touchscreen/ilitek_ts_i2c.c
@@ -15,7 +15,6 @@
#include <linux/slab.h>
#include <linux/delay.h>
#include <linux/interrupt.h>
-#include <linux/gpio.h>
#include <linux/gpio/consumer.h>
#include <linux/errno.h>
#include <linux/acpi.h>
@@ -37,6 +36,8 @@
#define ILITEK_TP_CMD_GET_MCU_VER 0x61
#define ILITEK_TP_CMD_GET_IC_MODE 0xC0
+#define ILITEK_TP_I2C_REPORT_ID 0x48
+
#define REPORT_COUNT_ADDRESS 61
#define ILITEK_SUPPORT_MAX_POINT 40
@@ -160,15 +161,19 @@ static int ilitek_process_and_report_v6(struct ilitek_ts_data *ts)
error = ilitek_i2c_write_and_read(ts, NULL, 0, 0, buf, 64);
if (error) {
dev_err(dev, "get touch info failed, err:%d\n", error);
- goto err_sync_frame;
+ return error;
+ }
+
+ if (buf[0] != ILITEK_TP_I2C_REPORT_ID) {
+ dev_err(dev, "get touch info failed. Wrong id: 0x%02X\n", buf[0]);
+ return -EINVAL;
}
report_max_point = buf[REPORT_COUNT_ADDRESS];
if (report_max_point > ts->max_tp) {
dev_err(dev, "FW report max point:%d > panel info. max:%d\n",
report_max_point, ts->max_tp);
- error = -EINVAL;
- goto err_sync_frame;
+ return -EINVAL;
}
count = DIV_ROUND_UP(report_max_point, packet_max_point);
@@ -178,7 +183,7 @@ static int ilitek_process_and_report_v6(struct ilitek_ts_data *ts)
if (error) {
dev_err(dev, "get touch info. failed, cnt:%d, err:%d\n",
count, error);
- goto err_sync_frame;
+ return error;
}
}
@@ -203,10 +208,10 @@ static int ilitek_process_and_report_v6(struct ilitek_ts_data *ts)
ilitek_touch_down(ts, id, x, y);
}
-err_sync_frame:
input_mt_sync_frame(input);
input_sync(input);
- return error;
+
+ return 0;
}
/* APIs of cmds for ILITEK Touch IC */
diff --git a/drivers/input/touchscreen/mcs5000_ts.c b/drivers/input/touchscreen/mcs5000_ts.c
deleted file mode 100644
index 5aff8dcda0dc..000000000000
--- a/drivers/input/touchscreen/mcs5000_ts.c
+++ /dev/null
@@ -1,288 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-or-later
-/*
- * mcs5000_ts.c - Touchscreen driver for MELFAS MCS-5000 controller
- *
- * Copyright (C) 2009 Samsung Electronics Co.Ltd
- * Author: Joonyoung Shim <jy0922.shim@samsung.com>
- *
- * Based on wm97xx-core.c
- */
-
-#include <linux/module.h>
-#include <linux/i2c.h>
-#include <linux/interrupt.h>
-#include <linux/input.h>
-#include <linux/irq.h>
-#include <linux/platform_data/mcs.h>
-#include <linux/slab.h>
-
-/* Registers */
-#define MCS5000_TS_STATUS 0x00
-#define STATUS_OFFSET 0
-#define STATUS_NO (0 << STATUS_OFFSET)
-#define STATUS_INIT (1 << STATUS_OFFSET)
-#define STATUS_SENSING (2 << STATUS_OFFSET)
-#define STATUS_COORD (3 << STATUS_OFFSET)
-#define STATUS_GESTURE (4 << STATUS_OFFSET)
-#define ERROR_OFFSET 4
-#define ERROR_NO (0 << ERROR_OFFSET)
-#define ERROR_POWER_ON_RESET (1 << ERROR_OFFSET)
-#define ERROR_INT_RESET (2 << ERROR_OFFSET)
-#define ERROR_EXT_RESET (3 << ERROR_OFFSET)
-#define ERROR_INVALID_REG_ADDRESS (8 << ERROR_OFFSET)
-#define ERROR_INVALID_REG_VALUE (9 << ERROR_OFFSET)
-
-#define MCS5000_TS_OP_MODE 0x01
-#define RESET_OFFSET 0
-#define RESET_NO (0 << RESET_OFFSET)
-#define RESET_EXT_SOFT (1 << RESET_OFFSET)
-#define OP_MODE_OFFSET 1
-#define OP_MODE_SLEEP (0 << OP_MODE_OFFSET)
-#define OP_MODE_ACTIVE (1 << OP_MODE_OFFSET)
-#define GESTURE_OFFSET 4
-#define GESTURE_DISABLE (0 << GESTURE_OFFSET)
-#define GESTURE_ENABLE (1 << GESTURE_OFFSET)
-#define PROXIMITY_OFFSET 5
-#define PROXIMITY_DISABLE (0 << PROXIMITY_OFFSET)
-#define PROXIMITY_ENABLE (1 << PROXIMITY_OFFSET)
-#define SCAN_MODE_OFFSET 6
-#define SCAN_MODE_INTERRUPT (0 << SCAN_MODE_OFFSET)
-#define SCAN_MODE_POLLING (1 << SCAN_MODE_OFFSET)
-#define REPORT_RATE_OFFSET 7
-#define REPORT_RATE_40 (0 << REPORT_RATE_OFFSET)
-#define REPORT_RATE_80 (1 << REPORT_RATE_OFFSET)
-
-#define MCS5000_TS_SENS_CTL 0x02
-#define MCS5000_TS_FILTER_CTL 0x03
-#define PRI_FILTER_OFFSET 0
-#define SEC_FILTER_OFFSET 4
-
-#define MCS5000_TS_X_SIZE_UPPER 0x08
-#define MCS5000_TS_X_SIZE_LOWER 0x09
-#define MCS5000_TS_Y_SIZE_UPPER 0x0A
-#define MCS5000_TS_Y_SIZE_LOWER 0x0B
-
-#define MCS5000_TS_INPUT_INFO 0x10
-#define INPUT_TYPE_OFFSET 0
-#define INPUT_TYPE_NONTOUCH (0 << INPUT_TYPE_OFFSET)
-#define INPUT_TYPE_SINGLE (1 << INPUT_TYPE_OFFSET)
-#define INPUT_TYPE_DUAL (2 << INPUT_TYPE_OFFSET)
-#define INPUT_TYPE_PALM (3 << INPUT_TYPE_OFFSET)
-#define INPUT_TYPE_PROXIMITY (7 << INPUT_TYPE_OFFSET)
-#define GESTURE_CODE_OFFSET 3
-#define GESTURE_CODE_NO (0 << GESTURE_CODE_OFFSET)
-
-#define MCS5000_TS_X_POS_UPPER 0x11
-#define MCS5000_TS_X_POS_LOWER 0x12
-#define MCS5000_TS_Y_POS_UPPER 0x13
-#define MCS5000_TS_Y_POS_LOWER 0x14
-#define MCS5000_TS_Z_POS 0x15
-#define MCS5000_TS_WIDTH 0x16
-#define MCS5000_TS_GESTURE_VAL 0x17
-#define MCS5000_TS_MODULE_REV 0x20
-#define MCS5000_TS_FIRMWARE_VER 0x21
-
-/* Touchscreen absolute values */
-#define MCS5000_MAX_XC 0x3ff
-#define MCS5000_MAX_YC 0x3ff
-
-enum mcs5000_ts_read_offset {
- READ_INPUT_INFO,
- READ_X_POS_UPPER,
- READ_X_POS_LOWER,
- READ_Y_POS_UPPER,
- READ_Y_POS_LOWER,
- READ_BLOCK_SIZE,
-};
-
-/* Each client has this additional data */
-struct mcs5000_ts_data {
- struct i2c_client *client;
- struct input_dev *input_dev;
- const struct mcs_platform_data *platform_data;
-};
-
-static irqreturn_t mcs5000_ts_interrupt(int irq, void *dev_id)
-{
- struct mcs5000_ts_data *data = dev_id;
- struct i2c_client *client = data->client;
- u8 buffer[READ_BLOCK_SIZE];
- int err;
- int x;
- int y;
-
- err = i2c_smbus_read_i2c_block_data(client, MCS5000_TS_INPUT_INFO,
- READ_BLOCK_SIZE, buffer);
- if (err < 0) {
- dev_err(&client->dev, "%s, err[%d]\n", __func__, err);
- goto out;
- }
-
- switch (buffer[READ_INPUT_INFO]) {
- case INPUT_TYPE_NONTOUCH:
- input_report_key(data->input_dev, BTN_TOUCH, 0);
- input_sync(data->input_dev);
- break;
-
- case INPUT_TYPE_SINGLE:
- x = (buffer[READ_X_POS_UPPER] << 8) | buffer[READ_X_POS_LOWER];
- y = (buffer[READ_Y_POS_UPPER] << 8) | buffer[READ_Y_POS_LOWER];
-
- input_report_key(data->input_dev, BTN_TOUCH, 1);
- input_report_abs(data->input_dev, ABS_X, x);
- input_report_abs(data->input_dev, ABS_Y, y);
- input_sync(data->input_dev);
- break;
-
- case INPUT_TYPE_DUAL:
- /* TODO */
- break;
-
- case INPUT_TYPE_PALM:
- /* TODO */
- break;
-
- case INPUT_TYPE_PROXIMITY:
- /* TODO */
- break;
-
- default:
- dev_err(&client->dev, "Unknown ts input type %d\n",
- buffer[READ_INPUT_INFO]);
- break;
- }
-
- out:
- return IRQ_HANDLED;
-}
-
-static void mcs5000_ts_phys_init(struct mcs5000_ts_data *data,
- const struct mcs_platform_data *platform_data)
-{
- struct i2c_client *client = data->client;
-
- /* Touch reset & sleep mode */
- i2c_smbus_write_byte_data(client, MCS5000_TS_OP_MODE,
- RESET_EXT_SOFT | OP_MODE_SLEEP);
-
- /* Touch size */
- i2c_smbus_write_byte_data(client, MCS5000_TS_X_SIZE_UPPER,
- platform_data->x_size >> 8);
- i2c_smbus_write_byte_data(client, MCS5000_TS_X_SIZE_LOWER,
- platform_data->x_size & 0xff);
- i2c_smbus_write_byte_data(client, MCS5000_TS_Y_SIZE_UPPER,
- platform_data->y_size >> 8);
- i2c_smbus_write_byte_data(client, MCS5000_TS_Y_SIZE_LOWER,
- platform_data->y_size & 0xff);
-
- /* Touch active mode & 80 report rate */
- i2c_smbus_write_byte_data(data->client, MCS5000_TS_OP_MODE,
- OP_MODE_ACTIVE | REPORT_RATE_80);
-}
-
-static int mcs5000_ts_probe(struct i2c_client *client)
-{
- const struct mcs_platform_data *pdata;
- struct mcs5000_ts_data *data;
- struct input_dev *input_dev;
- int error;
-
- pdata = dev_get_platdata(&client->dev);
- if (!pdata)
- return -EINVAL;
-
- data = devm_kzalloc(&client->dev, sizeof(*data), GFP_KERNEL);
- if (!data) {
- dev_err(&client->dev, "Failed to allocate memory\n");
- return -ENOMEM;
- }
-
- data->client = client;
-
- input_dev = devm_input_allocate_device(&client->dev);
- if (!input_dev) {
- dev_err(&client->dev, "Failed to allocate input device\n");
- return -ENOMEM;
- }
-
- input_dev->name = "MELFAS MCS-5000 Touchscreen";
- input_dev->id.bustype = BUS_I2C;
- input_dev->dev.parent = &client->dev;
-
- __set_bit(EV_ABS, input_dev->evbit);
- __set_bit(EV_KEY, input_dev->evbit);
- __set_bit(BTN_TOUCH, input_dev->keybit);
- input_set_abs_params(input_dev, ABS_X, 0, MCS5000_MAX_XC, 0, 0);
- input_set_abs_params(input_dev, ABS_Y, 0, MCS5000_MAX_YC, 0, 0);
-
- data->input_dev = input_dev;
-
- if (pdata->cfg_pin)
- pdata->cfg_pin();
-
- error = devm_request_threaded_irq(&client->dev, client->irq,
- NULL, mcs5000_ts_interrupt,
- IRQF_TRIGGER_LOW | IRQF_ONESHOT,
- "mcs5000_ts", data);
- if (error) {
- dev_err(&client->dev, "Failed to register interrupt\n");
- return error;
- }
-
- error = input_register_device(data->input_dev);
- if (error) {
- dev_err(&client->dev, "Failed to register input device\n");
- return error;
- }
-
- mcs5000_ts_phys_init(data, pdata);
- i2c_set_clientdata(client, data);
-
- return 0;
-}
-
-static int mcs5000_ts_suspend(struct device *dev)
-{
- struct i2c_client *client = to_i2c_client(dev);
-
- /* Touch sleep mode */
- i2c_smbus_write_byte_data(client, MCS5000_TS_OP_MODE, OP_MODE_SLEEP);
-
- return 0;
-}
-
-static int mcs5000_ts_resume(struct device *dev)
-{
- struct i2c_client *client = to_i2c_client(dev);
- struct mcs5000_ts_data *data = i2c_get_clientdata(client);
- const struct mcs_platform_data *pdata = dev_get_platdata(dev);
-
- mcs5000_ts_phys_init(data, pdata);
-
- return 0;
-}
-
-static DEFINE_SIMPLE_DEV_PM_OPS(mcs5000_ts_pm,
- mcs5000_ts_suspend, mcs5000_ts_resume);
-
-static const struct i2c_device_id mcs5000_ts_id[] = {
- { "mcs5000_ts" },
- { }
-};
-MODULE_DEVICE_TABLE(i2c, mcs5000_ts_id);
-
-static struct i2c_driver mcs5000_ts_driver = {
- .probe = mcs5000_ts_probe,
- .driver = {
- .name = "mcs5000_ts",
- .pm = pm_sleep_ptr(&mcs5000_ts_pm),
- },
- .id_table = mcs5000_ts_id,
-};
-
-module_i2c_driver(mcs5000_ts_driver);
-
-/* Module information */
-MODULE_AUTHOR("Joonyoung Shim <jy0922.shim@samsung.com>");
-MODULE_DESCRIPTION("Touchscreen driver for MELFAS MCS-5000 controller");
-MODULE_LICENSE("GPL");
diff --git a/drivers/input/touchscreen/tsc2004.c b/drivers/input/touchscreen/tsc2004.c
index b673098535ad..787f2caf4f73 100644
--- a/drivers/input/touchscreen/tsc2004.c
+++ b/drivers/input/touchscreen/tsc2004.c
@@ -42,11 +42,6 @@ static int tsc2004_probe(struct i2c_client *i2c)
tsc2004_cmd);
}
-static void tsc2004_remove(struct i2c_client *i2c)
-{
- tsc200x_remove(&i2c->dev);
-}
-
static const struct i2c_device_id tsc2004_idtable[] = {
{ "tsc2004" },
{ }
@@ -70,7 +65,6 @@ static struct i2c_driver tsc2004_driver = {
},
.id_table = tsc2004_idtable,
.probe = tsc2004_probe,
- .remove = tsc2004_remove,
};
module_i2c_driver(tsc2004_driver);
diff --git a/drivers/input/touchscreen/tsc2005.c b/drivers/input/touchscreen/tsc2005.c
index 1b40ce0ca1b9..6fe8b41b3ecc 100644
--- a/drivers/input/touchscreen/tsc2005.c
+++ b/drivers/input/touchscreen/tsc2005.c
@@ -64,11 +64,6 @@ static int tsc2005_probe(struct spi_device *spi)
tsc2005_cmd);
}
-static void tsc2005_remove(struct spi_device *spi)
-{
- tsc200x_remove(&spi->dev);
-}
-
#ifdef CONFIG_OF
static const struct of_device_id tsc2005_of_match[] = {
{ .compatible = "ti,tsc2005" },
@@ -85,7 +80,6 @@ static struct spi_driver tsc2005_driver = {
.pm = pm_sleep_ptr(&tsc200x_pm_ops),
},
.probe = tsc2005_probe,
- .remove = tsc2005_remove,
};
module_spi_driver(tsc2005_driver);
diff --git a/drivers/input/touchscreen/tsc200x-core.c b/drivers/input/touchscreen/tsc200x-core.c
index a4c0e9db9bb9..df39dee13e1c 100644
--- a/drivers/input/touchscreen/tsc200x-core.c
+++ b/drivers/input/touchscreen/tsc200x-core.c
@@ -104,11 +104,11 @@ struct tsc200x {
bool pen_down;
- struct regulator *vio;
-
struct gpio_desc *reset_gpio;
int (*tsc200x_cmd)(struct device *dev, u8 cmd);
+
int irq;
+ bool wake_irq_enabled;
};
static void tsc200x_update_pen_state(struct tsc200x *ts,
@@ -136,7 +136,6 @@ static void tsc200x_update_pen_state(struct tsc200x *ts,
static irqreturn_t tsc200x_irq_thread(int irq, void *_ts)
{
struct tsc200x *ts = _ts;
- unsigned long flags;
unsigned int pressure;
struct tsc200x_data tsdata;
int error;
@@ -182,13 +181,11 @@ static irqreturn_t tsc200x_irq_thread(int irq, void *_ts)
if (unlikely(pressure > MAX_12BIT))
goto out;
- spin_lock_irqsave(&ts->lock, flags);
-
- tsc200x_update_pen_state(ts, tsdata.x, tsdata.y, pressure);
- mod_timer(&ts->penup_timer,
- jiffies + msecs_to_jiffies(TSC200X_PENUP_TIME_MS));
-
- spin_unlock_irqrestore(&ts->lock, flags);
+ scoped_guard(spinlock_irqsave, &ts->lock) {
+ tsc200x_update_pen_state(ts, tsdata.x, tsdata.y, pressure);
+ mod_timer(&ts->penup_timer,
+ jiffies + msecs_to_jiffies(TSC200X_PENUP_TIME_MS));
+ }
ts->last_valid_interrupt = jiffies;
out:
@@ -198,11 +195,9 @@ out:
static void tsc200x_penup_timer(struct timer_list *t)
{
struct tsc200x *ts = from_timer(ts, t, penup_timer);
- unsigned long flags;
- spin_lock_irqsave(&ts->lock, flags);
+ guard(spinlock_irqsave)(&ts->lock);
tsc200x_update_pen_state(ts, 0, 0, 0);
- spin_unlock_irqrestore(&ts->lock, flags);
}
static void tsc200x_start_scan(struct tsc200x *ts)
@@ -232,12 +227,10 @@ static void __tsc200x_disable(struct tsc200x *ts)
{
tsc200x_stop_scan(ts);
- disable_irq(ts->irq);
- del_timer_sync(&ts->penup_timer);
+ guard(disable_irq)(&ts->irq);
+ del_timer_sync(&ts->penup_timer);
cancel_delayed_work_sync(&ts->esd_work);
-
- enable_irq(ts->irq);
}
/* must be called with ts->mutex held */
@@ -253,80 +246,79 @@ static void __tsc200x_enable(struct tsc200x *ts)
}
}
-static ssize_t tsc200x_selftest_show(struct device *dev,
- struct device_attribute *attr,
- char *buf)
+/*
+ * Test TSC200X communications via temp high register.
+ */
+static int tsc200x_do_selftest(struct tsc200x *ts)
{
- struct tsc200x *ts = dev_get_drvdata(dev);
- unsigned int temp_high;
unsigned int temp_high_orig;
unsigned int temp_high_test;
- bool success = true;
+ unsigned int temp_high;
int error;
- mutex_lock(&ts->mutex);
-
- /*
- * Test TSC200X communications via temp high register.
- */
- __tsc200x_disable(ts);
-
error = regmap_read(ts->regmap, TSC200X_REG_TEMP_HIGH, &temp_high_orig);
if (error) {
- dev_warn(dev, "selftest failed: read error %d\n", error);
- success = false;
- goto out;
+ dev_warn(ts->dev, "selftest failed: read error %d\n", error);
+ return error;
}
temp_high_test = (temp_high_orig - 1) & MAX_12BIT;
error = regmap_write(ts->regmap, TSC200X_REG_TEMP_HIGH, temp_high_test);
if (error) {
- dev_warn(dev, "selftest failed: write error %d\n", error);
- success = false;
- goto out;
+ dev_warn(ts->dev, "selftest failed: write error %d\n", error);
+ return error;
}
error = regmap_read(ts->regmap, TSC200X_REG_TEMP_HIGH, &temp_high);
if (error) {
- dev_warn(dev, "selftest failed: read error %d after write\n",
- error);
- success = false;
- goto out;
- }
-
- if (temp_high != temp_high_test) {
- dev_warn(dev, "selftest failed: %d != %d\n",
- temp_high, temp_high_test);
- success = false;
+ dev_warn(ts->dev,
+ "selftest failed: read error %d after write\n", error);
+ return error;
}
/* hardware reset */
tsc200x_reset(ts);
- if (!success)
- goto out;
+ if (temp_high != temp_high_test) {
+ dev_warn(ts->dev, "selftest failed: %d != %d\n",
+ temp_high, temp_high_test);
+ return -EINVAL;
+ }
/* test that the reset really happened */
error = regmap_read(ts->regmap, TSC200X_REG_TEMP_HIGH, &temp_high);
if (error) {
- dev_warn(dev, "selftest failed: read error %d after reset\n",
- error);
- success = false;
- goto out;
+ dev_warn(ts->dev,
+ "selftest failed: read error %d after reset\n", error);
+ return error;
}
if (temp_high != temp_high_orig) {
- dev_warn(dev, "selftest failed after reset: %d != %d\n",
+ dev_warn(ts->dev, "selftest failed after reset: %d != %d\n",
temp_high, temp_high_orig);
- success = false;
+ return -EINVAL;
}
-out:
- __tsc200x_enable(ts);
- mutex_unlock(&ts->mutex);
+ return 0;
+}
- return sprintf(buf, "%d\n", success);
+static ssize_t tsc200x_selftest_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct tsc200x *ts = dev_get_drvdata(dev);
+ int error;
+
+ scoped_guard(mutex, &ts->mutex) {
+ __tsc200x_disable(ts);
+
+ error = tsc200x_do_selftest(ts);
+
+ __tsc200x_enable(ts);
+ }
+
+ return sprintf(buf, "%d\n", !error);
}
static DEVICE_ATTR(selftest, S_IRUGO, tsc200x_selftest_show, NULL);
@@ -368,46 +360,42 @@ static void tsc200x_esd_work(struct work_struct *work)
int error;
unsigned int r;
- if (!mutex_trylock(&ts->mutex)) {
- /*
- * If the mutex is taken, it means that disable or enable is in
- * progress. In that case just reschedule the work. If the work
- * is not needed, it will be canceled by disable.
- */
- goto reschedule;
- }
-
- if (time_is_after_jiffies(ts->last_valid_interrupt +
- msecs_to_jiffies(ts->esd_timeout)))
- goto out;
-
- /* We should be able to read register without disabling interrupts. */
- error = regmap_read(ts->regmap, TSC200X_REG_CFR0, &r);
- if (!error &&
- !((r ^ TSC200X_CFR0_INITVALUE) & TSC200X_CFR0_RW_MASK)) {
- goto out;
- }
-
/*
- * If we could not read our known value from configuration register 0
- * then we should reset the controller as if from power-up and start
- * scanning again.
+ * If the mutex is taken, it means that disable or enable is in
+ * progress. In that case just reschedule the work. If the work
+ * is not needed, it will be canceled by disable.
*/
- dev_info(ts->dev, "TSC200X not responding - resetting\n");
+ scoped_guard(mutex_try, &ts->mutex) {
+ if (time_is_after_jiffies(ts->last_valid_interrupt +
+ msecs_to_jiffies(ts->esd_timeout)))
+ break;
- disable_irq(ts->irq);
- del_timer_sync(&ts->penup_timer);
+ /*
+ * We should be able to read register without disabling
+ * interrupts.
+ */
+ error = regmap_read(ts->regmap, TSC200X_REG_CFR0, &r);
+ if (!error &&
+ !((r ^ TSC200X_CFR0_INITVALUE) & TSC200X_CFR0_RW_MASK)) {
+ break;
+ }
- tsc200x_update_pen_state(ts, 0, 0, 0);
+ /*
+ * If we could not read our known value from configuration
+ * register 0 then we should reset the controller as if from
+ * power-up and start scanning again.
+ */
+ dev_info(ts->dev, "TSC200X not responding - resetting\n");
- tsc200x_reset(ts);
+ scoped_guard(disable_irq, &ts->irq) {
+ del_timer_sync(&ts->penup_timer);
+ tsc200x_update_pen_state(ts, 0, 0, 0);
+ tsc200x_reset(ts);
+ }
- enable_irq(ts->irq);
- tsc200x_start_scan(ts);
+ tsc200x_start_scan(ts);
+ }
-out:
- mutex_unlock(&ts->mutex);
-reschedule:
/* re-arm the watchdog */
schedule_delayed_work(&ts->esd_work,
round_jiffies_relative(
@@ -418,15 +406,13 @@ static int tsc200x_open(struct input_dev *input)
{
struct tsc200x *ts = input_get_drvdata(input);
- mutex_lock(&ts->mutex);
+ guard(mutex)(&ts->mutex);
if (!ts->suspended)
__tsc200x_enable(ts);
ts->opened = true;
- mutex_unlock(&ts->mutex);
-
return 0;
}
@@ -434,14 +420,12 @@ static void tsc200x_close(struct input_dev *input)
{
struct tsc200x *ts = input_get_drvdata(input);
- mutex_lock(&ts->mutex);
+ guard(mutex)(&ts->mutex);
if (!ts->suspended)
__tsc200x_disable(ts);
ts->opened = false;
-
- mutex_unlock(&ts->mutex);
}
int tsc200x_probe(struct device *dev, int irq, const struct input_id *tsc_id,
@@ -488,20 +472,6 @@ int tsc200x_probe(struct device *dev, int irq, const struct input_id *tsc_id,
&esd_timeout);
ts->esd_timeout = error ? 0 : esd_timeout;
- ts->reset_gpio = devm_gpiod_get_optional(dev, "reset", GPIOD_OUT_HIGH);
- if (IS_ERR(ts->reset_gpio)) {
- error = PTR_ERR(ts->reset_gpio);
- dev_err(dev, "error acquiring reset gpio: %d\n", error);
- return error;
- }
-
- ts->vio = devm_regulator_get(dev, "vio");
- if (IS_ERR(ts->vio)) {
- error = PTR_ERR(ts->vio);
- dev_err(dev, "error acquiring vio regulator: %d", error);
- return error;
- }
-
mutex_init(&ts->mutex);
spin_lock_init(&ts->lock);
@@ -542,60 +512,60 @@ int tsc200x_probe(struct device *dev, int irq, const struct input_id *tsc_id,
touchscreen_parse_properties(input_dev, false, &ts->prop);
+ ts->reset_gpio = devm_gpiod_get_optional(dev, "reset", GPIOD_OUT_HIGH);
+ error = PTR_ERR_OR_ZERO(ts->reset_gpio);
+ if (error) {
+ dev_err(dev, "error acquiring reset gpio: %d\n", error);
+ return error;
+ }
+
+ error = devm_regulator_get_enable(dev, "vio");
+ if (error) {
+ dev_err(dev, "error acquiring vio regulator: %d\n", error);
+ return error;
+ }
+
+ tsc200x_reset(ts);
+
/* Ensure the touchscreen is off */
tsc200x_stop_scan(ts);
- error = devm_request_threaded_irq(dev, irq, NULL,
- tsc200x_irq_thread,
- IRQF_TRIGGER_RISING | IRQF_ONESHOT,
- "tsc200x", ts);
+ error = devm_request_threaded_irq(dev, irq, NULL, tsc200x_irq_thread,
+ IRQF_ONESHOT, "tsc200x", ts);
if (error) {
dev_err(dev, "Failed to request irq, err: %d\n", error);
return error;
}
- error = regulator_enable(ts->vio);
- if (error)
- return error;
-
dev_set_drvdata(dev, ts);
error = input_register_device(ts->idev);
if (error) {
dev_err(dev,
"Failed to register input device, err: %d\n", error);
- goto disable_regulator;
+ return error;
}
- irq_set_irq_wake(irq, 1);
- return 0;
+ device_init_wakeup(dev,
+ device_property_read_bool(dev, "wakeup-source"));
-disable_regulator:
- regulator_disable(ts->vio);
- return error;
+ return 0;
}
EXPORT_SYMBOL_GPL(tsc200x_probe);
-void tsc200x_remove(struct device *dev)
-{
- struct tsc200x *ts = dev_get_drvdata(dev);
-
- regulator_disable(ts->vio);
-}
-EXPORT_SYMBOL_GPL(tsc200x_remove);
-
static int tsc200x_suspend(struct device *dev)
{
struct tsc200x *ts = dev_get_drvdata(dev);
- mutex_lock(&ts->mutex);
+ guard(mutex)(&ts->mutex);
if (!ts->suspended && ts->opened)
__tsc200x_disable(ts);
ts->suspended = true;
- mutex_unlock(&ts->mutex);
+ if (device_may_wakeup(dev))
+ ts->wake_irq_enabled = enable_irq_wake(ts->irq) == 0;
return 0;
}
@@ -604,15 +574,18 @@ static int tsc200x_resume(struct device *dev)
{
struct tsc200x *ts = dev_get_drvdata(dev);
- mutex_lock(&ts->mutex);
+ guard(mutex)(&ts->mutex);
+
+ if (ts->wake_irq_enabled) {
+ disable_irq_wake(ts->irq);
+ ts->wake_irq_enabled = false;
+ }
if (ts->suspended && ts->opened)
__tsc200x_enable(ts);
ts->suspended = false;
- mutex_unlock(&ts->mutex);
-
return 0;
}
diff --git a/drivers/input/touchscreen/tsc200x-core.h b/drivers/input/touchscreen/tsc200x-core.h
index 37de91efd78e..e76ba7a889dd 100644
--- a/drivers/input/touchscreen/tsc200x-core.h
+++ b/drivers/input/touchscreen/tsc200x-core.h
@@ -75,6 +75,5 @@ extern const struct attribute_group *tsc200x_groups[];
int tsc200x_probe(struct device *dev, int irq, const struct input_id *tsc_id,
struct regmap *regmap,
int (*tsc200x_cmd)(struct device *dev, u8 cmd));
-void tsc200x_remove(struct device *dev);
#endif
diff --git a/drivers/input/touchscreen/usbtouchscreen.c b/drivers/input/touchscreen/usbtouchscreen.c
index dd6b12c6dc58..7567efabe014 100644
--- a/drivers/input/touchscreen/usbtouchscreen.c
+++ b/drivers/input/touchscreen/usbtouchscreen.c
@@ -68,8 +68,6 @@ struct usbtouch_device_info {
*/
bool irq_always;
- void (*process_pkt) (struct usbtouch_usb *usbtouch, unsigned char *pkt, int len);
-
/*
* used to get the packet len. possible return values:
* > 0: packet len
@@ -94,7 +92,7 @@ struct usbtouch_usb {
struct urb *irq;
struct usb_interface *interface;
struct input_dev *input;
- struct usbtouch_device_info *type;
+ const struct usbtouch_device_info *type;
struct mutex pm_mutex; /* serialize access to open/suspend */
bool is_open;
char name[128];
@@ -103,141 +101,8 @@ struct usbtouch_usb {
int x, y;
int touch, press;
-};
-
-
-/* device types */
-enum {
- DEVTYPE_IGNORE = -1,
- DEVTYPE_EGALAX,
- DEVTYPE_PANJIT,
- DEVTYPE_3M,
- DEVTYPE_ITM,
- DEVTYPE_ETURBO,
- DEVTYPE_GUNZE,
- DEVTYPE_DMC_TSC10,
- DEVTYPE_IRTOUCH,
- DEVTYPE_IRTOUCH_HIRES,
- DEVTYPE_IDEALTEK,
- DEVTYPE_GENERAL_TOUCH,
- DEVTYPE_GOTOP,
- DEVTYPE_JASTEC,
- DEVTYPE_E2I,
- DEVTYPE_ZYTRONIC,
- DEVTYPE_TC45USB,
- DEVTYPE_NEXIO,
- DEVTYPE_ELO,
- DEVTYPE_ETOUCH,
-};
-
-#define USB_DEVICE_HID_CLASS(vend, prod) \
- .match_flags = USB_DEVICE_ID_MATCH_INT_CLASS \
- | USB_DEVICE_ID_MATCH_DEVICE, \
- .idVendor = (vend), \
- .idProduct = (prod), \
- .bInterfaceClass = USB_INTERFACE_CLASS_HID
-
-static const struct usb_device_id usbtouch_devices[] = {
-#ifdef CONFIG_TOUCHSCREEN_USB_EGALAX
- /* ignore the HID capable devices, handled by usbhid */
- {USB_DEVICE_HID_CLASS(0x0eef, 0x0001), .driver_info = DEVTYPE_IGNORE},
- {USB_DEVICE_HID_CLASS(0x0eef, 0x0002), .driver_info = DEVTYPE_IGNORE},
-
- /* normal device IDs */
- {USB_DEVICE(0x3823, 0x0001), .driver_info = DEVTYPE_EGALAX},
- {USB_DEVICE(0x3823, 0x0002), .driver_info = DEVTYPE_EGALAX},
- {USB_DEVICE(0x0123, 0x0001), .driver_info = DEVTYPE_EGALAX},
- {USB_DEVICE(0x0eef, 0x0001), .driver_info = DEVTYPE_EGALAX},
- {USB_DEVICE(0x0eef, 0x0002), .driver_info = DEVTYPE_EGALAX},
- {USB_DEVICE(0x1234, 0x0001), .driver_info = DEVTYPE_EGALAX},
- {USB_DEVICE(0x1234, 0x0002), .driver_info = DEVTYPE_EGALAX},
-#endif
-
-#ifdef CONFIG_TOUCHSCREEN_USB_PANJIT
- {USB_DEVICE(0x134c, 0x0001), .driver_info = DEVTYPE_PANJIT},
- {USB_DEVICE(0x134c, 0x0002), .driver_info = DEVTYPE_PANJIT},
- {USB_DEVICE(0x134c, 0x0003), .driver_info = DEVTYPE_PANJIT},
- {USB_DEVICE(0x134c, 0x0004), .driver_info = DEVTYPE_PANJIT},
-#endif
-
-#ifdef CONFIG_TOUCHSCREEN_USB_3M
- {USB_DEVICE(0x0596, 0x0001), .driver_info = DEVTYPE_3M},
-#endif
-
-#ifdef CONFIG_TOUCHSCREEN_USB_ITM
- {USB_DEVICE(0x0403, 0xf9e9), .driver_info = DEVTYPE_ITM},
- {USB_DEVICE(0x16e3, 0xf9e9), .driver_info = DEVTYPE_ITM},
-#endif
-
-#ifdef CONFIG_TOUCHSCREEN_USB_ETURBO
- {USB_DEVICE(0x1234, 0x5678), .driver_info = DEVTYPE_ETURBO},
-#endif
-
-#ifdef CONFIG_TOUCHSCREEN_USB_GUNZE
- {USB_DEVICE(0x0637, 0x0001), .driver_info = DEVTYPE_GUNZE},
-#endif
-
-#ifdef CONFIG_TOUCHSCREEN_USB_DMC_TSC10
- {USB_DEVICE(0x0afa, 0x03e8), .driver_info = DEVTYPE_DMC_TSC10},
-#endif
-
-#ifdef CONFIG_TOUCHSCREEN_USB_IRTOUCH
- {USB_DEVICE(0x255e, 0x0001), .driver_info = DEVTYPE_IRTOUCH},
- {USB_DEVICE(0x595a, 0x0001), .driver_info = DEVTYPE_IRTOUCH},
- {USB_DEVICE(0x6615, 0x0001), .driver_info = DEVTYPE_IRTOUCH},
- {USB_DEVICE(0x6615, 0x0012), .driver_info = DEVTYPE_IRTOUCH_HIRES},
-#endif
-
-#ifdef CONFIG_TOUCHSCREEN_USB_IDEALTEK
- {USB_DEVICE(0x1391, 0x1000), .driver_info = DEVTYPE_IDEALTEK},
-#endif
-
-#ifdef CONFIG_TOUCHSCREEN_USB_GENERAL_TOUCH
- {USB_DEVICE(0x0dfc, 0x0001), .driver_info = DEVTYPE_GENERAL_TOUCH},
-#endif
-
-#ifdef CONFIG_TOUCHSCREEN_USB_GOTOP
- {USB_DEVICE(0x08f2, 0x007f), .driver_info = DEVTYPE_GOTOP},
- {USB_DEVICE(0x08f2, 0x00ce), .driver_info = DEVTYPE_GOTOP},
- {USB_DEVICE(0x08f2, 0x00f4), .driver_info = DEVTYPE_GOTOP},
-#endif
-
-#ifdef CONFIG_TOUCHSCREEN_USB_JASTEC
- {USB_DEVICE(0x0f92, 0x0001), .driver_info = DEVTYPE_JASTEC},
-#endif
-
-#ifdef CONFIG_TOUCHSCREEN_USB_E2I
- {USB_DEVICE(0x1ac7, 0x0001), .driver_info = DEVTYPE_E2I},
-#endif
-
-#ifdef CONFIG_TOUCHSCREEN_USB_ZYTRONIC
- {USB_DEVICE(0x14c8, 0x0003), .driver_info = DEVTYPE_ZYTRONIC},
-#endif
-
-#ifdef CONFIG_TOUCHSCREEN_USB_ETT_TC45USB
- /* TC5UH */
- {USB_DEVICE(0x0664, 0x0309), .driver_info = DEVTYPE_TC45USB},
- /* TC4UM */
- {USB_DEVICE(0x0664, 0x0306), .driver_info = DEVTYPE_TC45USB},
-#endif
-
-#ifdef CONFIG_TOUCHSCREEN_USB_NEXIO
- /* data interface only */
- {USB_DEVICE_AND_INTERFACE_INFO(0x10f0, 0x2002, 0x0a, 0x00, 0x00),
- .driver_info = DEVTYPE_NEXIO},
- {USB_DEVICE_AND_INTERFACE_INFO(0x1870, 0x0001, 0x0a, 0x00, 0x00),
- .driver_info = DEVTYPE_NEXIO},
-#endif
-
-#ifdef CONFIG_TOUCHSCREEN_USB_ELO
- {USB_DEVICE(0x04e7, 0x0020), .driver_info = DEVTYPE_ELO},
-#endif
-
-#ifdef CONFIG_TOUCHSCREEN_USB_EASYTOUCH
- {USB_DEVICE(0x7374, 0x0001), .driver_info = DEVTYPE_ETOUCH},
-#endif
- {}
+ void (*process_pkt)(struct usbtouch_usb *usbtouch, unsigned char *pkt, int len);
};
@@ -273,6 +138,16 @@ static int e2i_read_data(struct usbtouch_usb *dev, unsigned char *pkt)
return 1;
}
+
+static const struct usbtouch_device_info e2i_dev_info = {
+ .min_xc = 0x0,
+ .max_xc = 0x7fff,
+ .min_yc = 0x0,
+ .max_yc = 0x7fff,
+ .rept_size = 6,
+ .init = e2i_init,
+ .read_data = e2i_read_data,
+};
#endif
@@ -292,9 +167,8 @@ static int e2i_read_data(struct usbtouch_usb *dev, unsigned char *pkt)
static int egalax_init(struct usbtouch_usb *usbtouch)
{
- int ret, i;
- unsigned char *buf;
struct usb_device *udev = interface_to_usbdev(usbtouch->interface);
+ int ret, i;
/*
* An eGalax diagnostic packet kicks the device into using the right
@@ -302,7 +176,7 @@ static int egalax_init(struct usbtouch_usb *usbtouch)
* read later and ignored.
*/
- buf = kmalloc(3, GFP_KERNEL);
+ u8 *buf __free(kfree) = kmalloc(3, GFP_KERNEL);
if (!buf)
return -ENOMEM;
@@ -316,17 +190,11 @@ static int egalax_init(struct usbtouch_usb *usbtouch)
USB_DIR_OUT | USB_TYPE_VENDOR | USB_RECIP_DEVICE,
0, 0, buf, 3,
USB_CTRL_SET_TIMEOUT);
- if (ret >= 0) {
- ret = 0;
- break;
- }
if (ret != -EPIPE)
break;
}
- kfree(buf);
-
- return ret;
+ return ret < 0 ? ret : 0;
}
static int egalax_read_data(struct usbtouch_usb *dev, unsigned char *pkt)
@@ -356,6 +224,17 @@ static int egalax_get_pkt_len(unsigned char *buf, int len)
return 0;
}
+
+static const struct usbtouch_device_info egalax_dev_info = {
+ .min_xc = 0x0,
+ .max_xc = 0x07ff,
+ .min_yc = 0x0,
+ .max_yc = 0x07ff,
+ .rept_size = 16,
+ .get_pkt_len = egalax_get_pkt_len,
+ .read_data = egalax_read_data,
+ .init = egalax_init,
+};
#endif
/*****************************************************************************
@@ -402,6 +281,16 @@ static int etouch_get_pkt_len(unsigned char *buf, int len)
return 0;
}
+
+static const struct usbtouch_device_info etouch_dev_info = {
+ .min_xc = 0x0,
+ .max_xc = 0x07ff,
+ .min_yc = 0x0,
+ .max_yc = 0x07ff,
+ .rept_size = 16,
+ .get_pkt_len = etouch_get_pkt_len,
+ .read_data = etouch_read_data,
+};
#endif
/*****************************************************************************
@@ -416,6 +305,15 @@ static int panjit_read_data(struct usbtouch_usb *dev, unsigned char *pkt)
return 1;
}
+
+static const struct usbtouch_device_info panjit_dev_info = {
+ .min_xc = 0x0,
+ .max_xc = 0x0fff,
+ .min_yc = 0x0,
+ .max_yc = 0x0fff,
+ .rept_size = 8,
+ .read_data = panjit_read_data,
+};
#endif
@@ -449,35 +347,13 @@ struct mtouch_priv {
u8 fw_rev_minor;
};
-static ssize_t mtouch_firmware_rev_show(struct device *dev,
- struct device_attribute *attr, char *output)
-{
- struct usb_interface *intf = to_usb_interface(dev);
- struct usbtouch_usb *usbtouch = usb_get_intfdata(intf);
- struct mtouch_priv *priv = usbtouch->priv;
-
- return sysfs_emit(output, "%1x.%1x\n",
- priv->fw_rev_major, priv->fw_rev_minor);
-}
-static DEVICE_ATTR(firmware_rev, 0444, mtouch_firmware_rev_show, NULL);
-
-static struct attribute *mtouch_attrs[] = {
- &dev_attr_firmware_rev.attr,
- NULL
-};
-
-static const struct attribute_group mtouch_attr_group = {
- .attrs = mtouch_attrs,
-};
-
static int mtouch_get_fw_revision(struct usbtouch_usb *usbtouch)
{
struct usb_device *udev = interface_to_usbdev(usbtouch->interface);
struct mtouch_priv *priv = usbtouch->priv;
- u8 *buf;
int ret;
- buf = kzalloc(MTOUCHUSB_REQ_CTRLLR_ID_LEN, GFP_NOIO);
+ u8 *buf __free(kfree) = kzalloc(MTOUCHUSB_REQ_CTRLLR_ID_LEN, GFP_NOIO);
if (!buf)
return -ENOMEM;
@@ -489,38 +365,24 @@ static int mtouch_get_fw_revision(struct usbtouch_usb *usbtouch)
if (ret != MTOUCHUSB_REQ_CTRLLR_ID_LEN) {
dev_warn(&usbtouch->interface->dev,
"Failed to read FW rev: %d\n", ret);
- ret = ret < 0 ? ret : -EIO;
- goto free;
+ return ret < 0 ? ret : -EIO;
}
priv->fw_rev_major = buf[3];
priv->fw_rev_minor = buf[4];
- ret = 0;
-
-free:
- kfree(buf);
- return ret;
+ return 0;
}
static int mtouch_alloc(struct usbtouch_usb *usbtouch)
{
struct mtouch_priv *priv;
- int ret;
priv = kmalloc(sizeof(*priv), GFP_KERNEL);
if (!priv)
return -ENOMEM;
usbtouch->priv = priv;
- ret = sysfs_create_group(&usbtouch->interface->dev.kobj,
- &mtouch_attr_group);
- if (ret) {
- kfree(usbtouch->priv);
- usbtouch->priv = NULL;
- return ret;
- }
-
return 0;
}
@@ -571,9 +433,53 @@ static void mtouch_exit(struct usbtouch_usb *usbtouch)
{
struct mtouch_priv *priv = usbtouch->priv;
- sysfs_remove_group(&usbtouch->interface->dev.kobj, &mtouch_attr_group);
kfree(priv);
}
+
+static struct usbtouch_device_info mtouch_dev_info = {
+ .min_xc = 0x0,
+ .max_xc = 0x4000,
+ .min_yc = 0x0,
+ .max_yc = 0x4000,
+ .rept_size = 11,
+ .read_data = mtouch_read_data,
+ .alloc = mtouch_alloc,
+ .init = mtouch_init,
+ .exit = mtouch_exit,
+};
+
+static ssize_t mtouch_firmware_rev_show(struct device *dev,
+ struct device_attribute *attr, char *output)
+{
+ struct usb_interface *intf = to_usb_interface(dev);
+ struct usbtouch_usb *usbtouch = usb_get_intfdata(intf);
+ struct mtouch_priv *priv = usbtouch->priv;
+
+ return sysfs_emit(output, "%1x.%1x\n",
+ priv->fw_rev_major, priv->fw_rev_minor);
+}
+static DEVICE_ATTR(firmware_rev, 0444, mtouch_firmware_rev_show, NULL);
+
+static struct attribute *mtouch_attrs[] = {
+ &dev_attr_firmware_rev.attr,
+ NULL
+};
+
+static bool mtouch_group_visible(struct kobject *kobj)
+{
+ struct device *dev = kobj_to_dev(kobj);
+ struct usb_interface *intf = to_usb_interface(dev);
+ struct usbtouch_usb *usbtouch = usb_get_intfdata(intf);
+
+ return usbtouch->type == &mtouch_dev_info;
+}
+
+DEFINE_SIMPLE_SYSFS_GROUP_VISIBLE(mtouch);
+
+static const struct attribute_group mtouch_attr_group = {
+ .is_visible = SYSFS_GROUP_VISIBLE(mtouch),
+ .attrs = mtouch_attrs,
+};
#endif
@@ -608,6 +514,16 @@ static int itm_read_data(struct usbtouch_usb *dev, unsigned char *pkt)
return 1;
}
+
+static const struct usbtouch_device_info itm_dev_info = {
+ .min_xc = 0x0,
+ .max_xc = 0x0fff,
+ .min_yc = 0x0,
+ .max_yc = 0x0fff,
+ .max_press = 0xff,
+ .rept_size = 8,
+ .read_data = itm_read_data,
+};
#endif
@@ -642,6 +558,16 @@ static int eturbo_get_pkt_len(unsigned char *buf, int len)
return 3;
return 0;
}
+
+static const struct usbtouch_device_info eturbo_dev_info = {
+ .min_xc = 0x0,
+ .max_xc = 0x07ff,
+ .min_yc = 0x0,
+ .max_yc = 0x07ff,
+ .rept_size = 8,
+ .get_pkt_len = eturbo_get_pkt_len,
+ .read_data = eturbo_read_data,
+};
#endif
@@ -660,6 +586,15 @@ static int gunze_read_data(struct usbtouch_usb *dev, unsigned char *pkt)
return 1;
}
+
+static const struct usbtouch_device_info gunze_dev_info = {
+ .min_xc = 0x0,
+ .max_xc = 0x0fff,
+ .min_yc = 0x0,
+ .max_yc = 0x0fff,
+ .rept_size = 4,
+ .read_data = gunze_read_data,
+};
#endif
/*****************************************************************************
@@ -688,24 +623,23 @@ static int gunze_read_data(struct usbtouch_usb *dev, unsigned char *pkt)
static int dmc_tsc10_init(struct usbtouch_usb *usbtouch)
{
struct usb_device *dev = interface_to_usbdev(usbtouch->interface);
- int ret = -ENOMEM;
- unsigned char *buf;
+ int ret;
- buf = kmalloc(2, GFP_NOIO);
+ u8 *buf __free(kfree) = kmalloc(2, GFP_NOIO);
if (!buf)
- goto err_nobuf;
+ return -ENOMEM;
+
/* reset */
buf[0] = buf[1] = 0xFF;
ret = usb_control_msg(dev, usb_rcvctrlpipe (dev, 0),
- TSC10_CMD_RESET,
- USB_DIR_IN | USB_TYPE_VENDOR | USB_RECIP_DEVICE,
- 0, 0, buf, 2, USB_CTRL_SET_TIMEOUT);
+ TSC10_CMD_RESET,
+ USB_DIR_IN | USB_TYPE_VENDOR | USB_RECIP_DEVICE,
+ 0, 0, buf, 2, USB_CTRL_SET_TIMEOUT);
if (ret < 0)
- goto err_out;
- if (buf[0] != 0x06) {
- ret = -ENODEV;
- goto err_out;
- }
+ return ret;
+
+ if (buf[0] != 0x06)
+ return -ENODEV;
/* TSC-25 data sheet specifies a delay after the RESET command */
msleep(150);
@@ -713,28 +647,22 @@ static int dmc_tsc10_init(struct usbtouch_usb *usbtouch)
/* set coordinate output rate */
buf[0] = buf[1] = 0xFF;
ret = usb_control_msg(dev, usb_rcvctrlpipe (dev, 0),
- TSC10_CMD_RATE,
- USB_DIR_IN | USB_TYPE_VENDOR | USB_RECIP_DEVICE,
- TSC10_RATE_150, 0, buf, 2, USB_CTRL_SET_TIMEOUT);
+ TSC10_CMD_RATE,
+ USB_DIR_IN | USB_TYPE_VENDOR | USB_RECIP_DEVICE,
+ TSC10_RATE_150, 0, buf, 2, USB_CTRL_SET_TIMEOUT);
if (ret < 0)
- goto err_out;
- if ((buf[0] != 0x06) && (buf[0] != 0x15 || buf[1] != 0x01)) {
- ret = -ENODEV;
- goto err_out;
- }
+ return ret;
+
+ if (buf[0] != 0x06 && (buf[0] != 0x15 || buf[1] != 0x01))
+ return -ENODEV;
/* start sending data */
- ret = usb_control_msg(dev, usb_sndctrlpipe(dev, 0),
- TSC10_CMD_DATA1,
- USB_DIR_OUT | USB_TYPE_VENDOR | USB_RECIP_DEVICE,
- 0, 0, NULL, 0, USB_CTRL_SET_TIMEOUT);
-err_out:
- kfree(buf);
-err_nobuf:
- return ret;
+ return usb_control_msg(dev, usb_sndctrlpipe(dev, 0),
+ TSC10_CMD_DATA1,
+ USB_DIR_OUT | USB_TYPE_VENDOR | USB_RECIP_DEVICE,
+ 0, 0, NULL, 0, USB_CTRL_SET_TIMEOUT);
}
-
static int dmc_tsc10_read_data(struct usbtouch_usb *dev, unsigned char *pkt)
{
dev->x = ((pkt[2] & 0x03) << 8) | pkt[1];
@@ -743,6 +671,16 @@ static int dmc_tsc10_read_data(struct usbtouch_usb *dev, unsigned char *pkt)
return 1;
}
+
+static const struct usbtouch_device_info dmc_tsc10_dev_info = {
+ .min_xc = 0x0,
+ .max_xc = 0x03ff,
+ .min_yc = 0x0,
+ .max_yc = 0x03ff,
+ .rept_size = 5,
+ .init = dmc_tsc10_init,
+ .read_data = dmc_tsc10_read_data,
+};
#endif
@@ -758,6 +696,24 @@ static int irtouch_read_data(struct usbtouch_usb *dev, unsigned char *pkt)
return 1;
}
+
+static const struct usbtouch_device_info irtouch_dev_info = {
+ .min_xc = 0x0,
+ .max_xc = 0x0fff,
+ .min_yc = 0x0,
+ .max_yc = 0x0fff,
+ .rept_size = 8,
+ .read_data = irtouch_read_data,
+};
+
+static const struct usbtouch_device_info irtouch_hires_dev_info = {
+ .min_xc = 0x0,
+ .max_xc = 0x7fff,
+ .min_yc = 0x0,
+ .max_yc = 0x7fff,
+ .rept_size = 8,
+ .read_data = irtouch_read_data,
+};
#endif
/*****************************************************************************
@@ -772,6 +728,15 @@ static int tc45usb_read_data(struct usbtouch_usb *dev, unsigned char *pkt)
return 1;
}
+
+static const struct usbtouch_device_info tc45usb_dev_info = {
+ .min_xc = 0x0,
+ .max_xc = 0x0fff,
+ .min_yc = 0x0,
+ .max_yc = 0x0fff,
+ .rept_size = 5,
+ .read_data = tc45usb_read_data,
+};
#endif
/*****************************************************************************
@@ -811,6 +776,16 @@ static int idealtek_read_data(struct usbtouch_usb *dev, unsigned char *pkt)
return 0;
}
}
+
+static const struct usbtouch_device_info idealtek_dev_info = {
+ .min_xc = 0x0,
+ .max_xc = 0x0fff,
+ .min_yc = 0x0,
+ .max_yc = 0x0fff,
+ .rept_size = 8,
+ .get_pkt_len = idealtek_get_pkt_len,
+ .read_data = idealtek_read_data,
+};
#endif
/*****************************************************************************
@@ -826,6 +801,15 @@ static int general_touch_read_data(struct usbtouch_usb *dev, unsigned char *pkt)
return 1;
}
+
+static const struct usbtouch_device_info general_touch_dev_info = {
+ .min_xc = 0x0,
+ .max_xc = 0x7fff,
+ .min_yc = 0x0,
+ .max_yc = 0x7fff,
+ .rept_size = 7,
+ .read_data = general_touch_read_data,
+};
#endif
/*****************************************************************************
@@ -840,6 +824,15 @@ static int gotop_read_data(struct usbtouch_usb *dev, unsigned char *pkt)
return 1;
}
+
+static const struct usbtouch_device_info gotop_dev_info = {
+ .min_xc = 0x0,
+ .max_xc = 0x03ff,
+ .min_yc = 0x0,
+ .max_yc = 0x03ff,
+ .rept_size = 4,
+ .read_data = gotop_read_data,
+};
#endif
/*****************************************************************************
@@ -854,6 +847,15 @@ static int jastec_read_data(struct usbtouch_usb *dev, unsigned char *pkt)
return 1;
}
+
+static const struct usbtouch_device_info jastec_dev_info = {
+ .min_xc = 0x0,
+ .max_xc = 0x0fff,
+ .min_yc = 0x0,
+ .max_yc = 0x0fff,
+ .rept_size = 4,
+ .read_data = jastec_read_data,
+};
#endif
/*****************************************************************************
@@ -890,6 +892,16 @@ static int zytronic_read_data(struct usbtouch_usb *dev, unsigned char *pkt)
return 0;
}
+
+static const struct usbtouch_device_info zytronic_dev_info = {
+ .min_xc = 0x0,
+ .max_xc = 0x03ff,
+ .min_yc = 0x0,
+ .max_yc = 0x03ff,
+ .rept_size = 5,
+ .read_data = zytronic_read_data,
+ .irq_always = true,
+};
#endif
/*****************************************************************************
@@ -960,7 +972,6 @@ static int nexio_init(struct usbtouch_usb *usbtouch)
struct nexio_priv *priv = usbtouch->priv;
int ret = -ENOMEM;
int actual_len, i;
- unsigned char *buf;
char *firmware_ver = NULL, *device_name = NULL;
int input_ep = 0, output_ep = 0;
@@ -976,9 +987,9 @@ static int nexio_init(struct usbtouch_usb *usbtouch)
if (!input_ep || !output_ep)
return -ENXIO;
- buf = kmalloc(NEXIO_BUFSIZE, GFP_NOIO);
+ u8 *buf __free(kfree) = kmalloc(NEXIO_BUFSIZE, GFP_NOIO);
if (!buf)
- goto out_buf;
+ return -ENOMEM;
/* two empty reads */
for (i = 0; i < 2; i++) {
@@ -986,7 +997,7 @@ static int nexio_init(struct usbtouch_usb *usbtouch)
buf, NEXIO_BUFSIZE, &actual_len,
NEXIO_TIMEOUT);
if (ret < 0)
- goto out_buf;
+ return ret;
}
/* send init command */
@@ -995,7 +1006,7 @@ static int nexio_init(struct usbtouch_usb *usbtouch)
buf, sizeof(nexio_init_pkt), &actual_len,
NEXIO_TIMEOUT);
if (ret < 0)
- goto out_buf;
+ return ret;
/* read replies */
for (i = 0; i < 3; i++) {
@@ -1026,11 +1037,8 @@ static int nexio_init(struct usbtouch_usb *usbtouch)
usb_fill_bulk_urb(priv->ack, dev, usb_sndbulkpipe(dev, output_ep),
priv->ack_buf, sizeof(nexio_ack_pkt),
nexio_ack_complete, usbtouch);
- ret = 0;
-out_buf:
- kfree(buf);
- return ret;
+ return 0;
}
static void nexio_exit(struct usbtouch_usb *usbtouch)
@@ -1067,13 +1075,11 @@ static int nexio_read_data(struct usbtouch_usb *usbtouch, unsigned char *pkt)
if (ret)
dev_warn(dev, "Failed to submit ACK URB: %d\n", ret);
- if (!usbtouch->type->max_xc) {
- usbtouch->type->max_xc = 2 * x_len;
+ if (!input_abs_get_max(usbtouch->input, ABS_X)) {
input_set_abs_params(usbtouch->input, ABS_X,
- 0, usbtouch->type->max_xc, 0, 0);
- usbtouch->type->max_yc = 2 * y_len;
+ 0, 2 * x_len, 0, 0);
input_set_abs_params(usbtouch->input, ABS_Y,
- 0, usbtouch->type->max_yc, 0, 0);
+ 0, 2 * y_len, 0, 0);
}
/*
* The device reports state of IR sensors on X and Y axes.
@@ -1128,6 +1134,15 @@ static int nexio_read_data(struct usbtouch_usb *usbtouch, unsigned char *pkt)
}
return 0;
}
+
+static const struct usbtouch_device_info nexio_dev_info = {
+ .rept_size = 1024,
+ .irq_always = true,
+ .read_data = nexio_read_data,
+ .alloc = nexio_alloc,
+ .init = nexio_init,
+ .exit = nexio_exit,
+};
#endif
@@ -1146,241 +1161,17 @@ static int elo_read_data(struct usbtouch_usb *dev, unsigned char *pkt)
return 1;
}
-#endif
-
-
-/*****************************************************************************
- * the different device descriptors
- */
-#ifdef MULTI_PACKET
-static void usbtouch_process_multi(struct usbtouch_usb *usbtouch,
- unsigned char *pkt, int len);
-#endif
-
-static struct usbtouch_device_info usbtouch_dev_info[] = {
-#ifdef CONFIG_TOUCHSCREEN_USB_ELO
- [DEVTYPE_ELO] = {
- .min_xc = 0x0,
- .max_xc = 0x0fff,
- .min_yc = 0x0,
- .max_yc = 0x0fff,
- .max_press = 0xff,
- .rept_size = 8,
- .read_data = elo_read_data,
- },
-#endif
-
-#ifdef CONFIG_TOUCHSCREEN_USB_EGALAX
- [DEVTYPE_EGALAX] = {
- .min_xc = 0x0,
- .max_xc = 0x07ff,
- .min_yc = 0x0,
- .max_yc = 0x07ff,
- .rept_size = 16,
- .process_pkt = usbtouch_process_multi,
- .get_pkt_len = egalax_get_pkt_len,
- .read_data = egalax_read_data,
- .init = egalax_init,
- },
-#endif
-
-#ifdef CONFIG_TOUCHSCREEN_USB_PANJIT
- [DEVTYPE_PANJIT] = {
- .min_xc = 0x0,
- .max_xc = 0x0fff,
- .min_yc = 0x0,
- .max_yc = 0x0fff,
- .rept_size = 8,
- .read_data = panjit_read_data,
- },
-#endif
-
-#ifdef CONFIG_TOUCHSCREEN_USB_3M
- [DEVTYPE_3M] = {
- .min_xc = 0x0,
- .max_xc = 0x4000,
- .min_yc = 0x0,
- .max_yc = 0x4000,
- .rept_size = 11,
- .read_data = mtouch_read_data,
- .alloc = mtouch_alloc,
- .init = mtouch_init,
- .exit = mtouch_exit,
- },
-#endif
-
-#ifdef CONFIG_TOUCHSCREEN_USB_ITM
- [DEVTYPE_ITM] = {
- .min_xc = 0x0,
- .max_xc = 0x0fff,
- .min_yc = 0x0,
- .max_yc = 0x0fff,
- .max_press = 0xff,
- .rept_size = 8,
- .read_data = itm_read_data,
- },
-#endif
-
-#ifdef CONFIG_TOUCHSCREEN_USB_ETURBO
- [DEVTYPE_ETURBO] = {
- .min_xc = 0x0,
- .max_xc = 0x07ff,
- .min_yc = 0x0,
- .max_yc = 0x07ff,
- .rept_size = 8,
- .process_pkt = usbtouch_process_multi,
- .get_pkt_len = eturbo_get_pkt_len,
- .read_data = eturbo_read_data,
- },
-#endif
-
-#ifdef CONFIG_TOUCHSCREEN_USB_GUNZE
- [DEVTYPE_GUNZE] = {
- .min_xc = 0x0,
- .max_xc = 0x0fff,
- .min_yc = 0x0,
- .max_yc = 0x0fff,
- .rept_size = 4,
- .read_data = gunze_read_data,
- },
-#endif
-
-#ifdef CONFIG_TOUCHSCREEN_USB_DMC_TSC10
- [DEVTYPE_DMC_TSC10] = {
- .min_xc = 0x0,
- .max_xc = 0x03ff,
- .min_yc = 0x0,
- .max_yc = 0x03ff,
- .rept_size = 5,
- .init = dmc_tsc10_init,
- .read_data = dmc_tsc10_read_data,
- },
-#endif
-
-#ifdef CONFIG_TOUCHSCREEN_USB_IRTOUCH
- [DEVTYPE_IRTOUCH] = {
- .min_xc = 0x0,
- .max_xc = 0x0fff,
- .min_yc = 0x0,
- .max_yc = 0x0fff,
- .rept_size = 8,
- .read_data = irtouch_read_data,
- },
-
- [DEVTYPE_IRTOUCH_HIRES] = {
- .min_xc = 0x0,
- .max_xc = 0x7fff,
- .min_yc = 0x0,
- .max_yc = 0x7fff,
- .rept_size = 8,
- .read_data = irtouch_read_data,
- },
-#endif
-
-#ifdef CONFIG_TOUCHSCREEN_USB_IDEALTEK
- [DEVTYPE_IDEALTEK] = {
- .min_xc = 0x0,
- .max_xc = 0x0fff,
- .min_yc = 0x0,
- .max_yc = 0x0fff,
- .rept_size = 8,
- .process_pkt = usbtouch_process_multi,
- .get_pkt_len = idealtek_get_pkt_len,
- .read_data = idealtek_read_data,
- },
-#endif
-
-#ifdef CONFIG_TOUCHSCREEN_USB_GENERAL_TOUCH
- [DEVTYPE_GENERAL_TOUCH] = {
- .min_xc = 0x0,
- .max_xc = 0x7fff,
- .min_yc = 0x0,
- .max_yc = 0x7fff,
- .rept_size = 7,
- .read_data = general_touch_read_data,
- },
-#endif
-
-#ifdef CONFIG_TOUCHSCREEN_USB_GOTOP
- [DEVTYPE_GOTOP] = {
- .min_xc = 0x0,
- .max_xc = 0x03ff,
- .min_yc = 0x0,
- .max_yc = 0x03ff,
- .rept_size = 4,
- .read_data = gotop_read_data,
- },
-#endif
-
-#ifdef CONFIG_TOUCHSCREEN_USB_JASTEC
- [DEVTYPE_JASTEC] = {
- .min_xc = 0x0,
- .max_xc = 0x0fff,
- .min_yc = 0x0,
- .max_yc = 0x0fff,
- .rept_size = 4,
- .read_data = jastec_read_data,
- },
-#endif
-
-#ifdef CONFIG_TOUCHSCREEN_USB_E2I
- [DEVTYPE_E2I] = {
- .min_xc = 0x0,
- .max_xc = 0x7fff,
- .min_yc = 0x0,
- .max_yc = 0x7fff,
- .rept_size = 6,
- .init = e2i_init,
- .read_data = e2i_read_data,
- },
-#endif
-
-#ifdef CONFIG_TOUCHSCREEN_USB_ZYTRONIC
- [DEVTYPE_ZYTRONIC] = {
- .min_xc = 0x0,
- .max_xc = 0x03ff,
- .min_yc = 0x0,
- .max_yc = 0x03ff,
- .rept_size = 5,
- .read_data = zytronic_read_data,
- .irq_always = true,
- },
-#endif
-
-#ifdef CONFIG_TOUCHSCREEN_USB_ETT_TC45USB
- [DEVTYPE_TC45USB] = {
- .min_xc = 0x0,
- .max_xc = 0x0fff,
- .min_yc = 0x0,
- .max_yc = 0x0fff,
- .rept_size = 5,
- .read_data = tc45usb_read_data,
- },
-#endif
-#ifdef CONFIG_TOUCHSCREEN_USB_NEXIO
- [DEVTYPE_NEXIO] = {
- .rept_size = 1024,
- .irq_always = true,
- .read_data = nexio_read_data,
- .alloc = nexio_alloc,
- .init = nexio_init,
- .exit = nexio_exit,
- },
-#endif
-#ifdef CONFIG_TOUCHSCREEN_USB_EASYTOUCH
- [DEVTYPE_ETOUCH] = {
- .min_xc = 0x0,
- .max_xc = 0x07ff,
- .min_yc = 0x0,
- .max_yc = 0x07ff,
- .rept_size = 16,
- .process_pkt = usbtouch_process_multi,
- .get_pkt_len = etouch_get_pkt_len,
- .read_data = etouch_read_data,
- },
-#endif
+static const struct usbtouch_device_info elo_dev_info = {
+ .min_xc = 0x0,
+ .max_xc = 0x0fff,
+ .min_yc = 0x0,
+ .max_yc = 0x0fff,
+ .max_press = 0xff,
+ .rept_size = 8,
+ .read_data = elo_read_data,
};
+#endif
/*****************************************************************************
@@ -1389,10 +1180,10 @@ static struct usbtouch_device_info usbtouch_dev_info[] = {
static void usbtouch_process_pkt(struct usbtouch_usb *usbtouch,
unsigned char *pkt, int len)
{
- struct usbtouch_device_info *type = usbtouch->type;
+ const struct usbtouch_device_info *type = usbtouch->type;
if (!type->read_data(usbtouch, pkt))
- return;
+ return;
input_report_key(usbtouch->input, BTN_TOUCH, usbtouch->touch);
@@ -1485,9 +1276,15 @@ out_flush_buf:
usbtouch->buf_len = 0;
return;
}
+#else
+static void usbtouch_process_multi(struct usbtouch_usb *usbtouch,
+ unsigned char *pkt, int len)
+{
+ dev_WARN_ONCE(&usbtouch->interface->dev, 1,
+ "Protocol has ->get_pkt_len() without #define MULTI_PACKET");
+}
#endif
-
static void usbtouch_irq(struct urb *urb)
{
struct usbtouch_usb *usbtouch = urb->context;
@@ -1518,7 +1315,7 @@ static void usbtouch_irq(struct urb *urb)
goto exit;
}
- usbtouch->type->process_pkt(usbtouch, usbtouch->data, urb->actual_length);
+ usbtouch->process_pkt(usbtouch, usbtouch->data, urb->actual_length);
exit:
usb_mark_last_busy(interface_to_usbdev(usbtouch->interface));
@@ -1528,6 +1325,20 @@ exit:
__func__, retval);
}
+static int usbtouch_start_io(struct usbtouch_usb *usbtouch)
+{
+ guard(mutex)(&usbtouch->pm_mutex);
+
+ if (!usbtouch->type->irq_always)
+ if (usb_submit_urb(usbtouch->irq, GFP_KERNEL))
+ return -EIO;
+
+ usbtouch->interface->needs_remote_wakeup = 1;
+ usbtouch->is_open = true;
+
+ return 0;
+}
+
static int usbtouch_open(struct input_dev *input)
{
struct usbtouch_usb *usbtouch = input_get_drvdata(input);
@@ -1536,23 +1347,12 @@ static int usbtouch_open(struct input_dev *input)
usbtouch->irq->dev = interface_to_usbdev(usbtouch->interface);
r = usb_autopm_get_interface(usbtouch->interface) ? -EIO : 0;
- if (r < 0)
- goto out;
-
- mutex_lock(&usbtouch->pm_mutex);
- if (!usbtouch->type->irq_always) {
- if (usb_submit_urb(usbtouch->irq, GFP_KERNEL)) {
- r = -EIO;
- goto out_put;
- }
- }
+ if (r)
+ return r;
+
+ r = usbtouch_start_io(usbtouch);
- usbtouch->interface->needs_remote_wakeup = 1;
- usbtouch->is_open = true;
-out_put:
- mutex_unlock(&usbtouch->pm_mutex);
usb_autopm_put_interface(usbtouch->interface);
-out:
return r;
}
@@ -1561,11 +1361,11 @@ static void usbtouch_close(struct input_dev *input)
struct usbtouch_usb *usbtouch = input_get_drvdata(input);
int r;
- mutex_lock(&usbtouch->pm_mutex);
- if (!usbtouch->type->irq_always)
- usb_kill_urb(usbtouch->irq);
- usbtouch->is_open = false;
- mutex_unlock(&usbtouch->pm_mutex);
+ scoped_guard(mutex, &usbtouch->pm_mutex) {
+ if (!usbtouch->type->irq_always)
+ usb_kill_urb(usbtouch->irq);
+ usbtouch->is_open = false;
+ }
r = usb_autopm_get_interface(usbtouch->interface);
usbtouch->interface->needs_remote_wakeup = 0;
@@ -1573,8 +1373,7 @@ static void usbtouch_close(struct input_dev *input)
usb_autopm_put_interface(usbtouch->interface);
}
-static int usbtouch_suspend
-(struct usb_interface *intf, pm_message_t message)
+static int usbtouch_suspend(struct usb_interface *intf, pm_message_t message)
{
struct usbtouch_usb *usbtouch = usb_get_intfdata(intf);
@@ -1586,20 +1385,19 @@ static int usbtouch_suspend
static int usbtouch_resume(struct usb_interface *intf)
{
struct usbtouch_usb *usbtouch = usb_get_intfdata(intf);
- int result = 0;
- mutex_lock(&usbtouch->pm_mutex);
+ guard(mutex)(&usbtouch->pm_mutex);
+
if (usbtouch->is_open || usbtouch->type->irq_always)
- result = usb_submit_urb(usbtouch->irq, GFP_NOIO);
- mutex_unlock(&usbtouch->pm_mutex);
+ return usb_submit_urb(usbtouch->irq, GFP_NOIO);
- return result;
+ return 0;
}
static int usbtouch_reset_resume(struct usb_interface *intf)
{
struct usbtouch_usb *usbtouch = usb_get_intfdata(intf);
- int err = 0;
+ int err;
/* reinit the device */
if (usbtouch->type->init) {
@@ -1613,12 +1411,12 @@ static int usbtouch_reset_resume(struct usb_interface *intf)
}
/* restart IO if needed */
- mutex_lock(&usbtouch->pm_mutex);
+ guard(mutex)(&usbtouch->pm_mutex);
+
if (usbtouch->is_open)
- err = usb_submit_urb(usbtouch->irq, GFP_NOIO);
- mutex_unlock(&usbtouch->pm_mutex);
+ return usb_submit_urb(usbtouch->irq, GFP_NOIO);
- return err;
+ return 0;
}
static void usbtouch_free_buffers(struct usb_device *udev,
@@ -1648,14 +1446,12 @@ static int usbtouch_probe(struct usb_interface *intf,
struct input_dev *input_dev;
struct usb_endpoint_descriptor *endpoint;
struct usb_device *udev = interface_to_usbdev(intf);
- struct usbtouch_device_info *type;
+ const struct usbtouch_device_info *type;
int err = -ENOMEM;
/* some devices are ignored */
- if (id->driver_info == DEVTYPE_IGNORE)
- return -ENODEV;
-
- if (id->driver_info >= ARRAY_SIZE(usbtouch_dev_info))
+ type = (const struct usbtouch_device_info *)id->driver_info;
+ if (!type)
return -ENODEV;
endpoint = usbtouch_get_input_endpoint(intf->cur_altsetting);
@@ -1668,11 +1464,7 @@ static int usbtouch_probe(struct usb_interface *intf,
goto out_free;
mutex_init(&usbtouch->pm_mutex);
-
- type = &usbtouch_dev_info[id->driver_info];
usbtouch->type = type;
- if (!type->process_pkt)
- type->process_pkt = usbtouch_process_pkt;
usbtouch->data_size = type->rept_size;
if (type->get_pkt_len) {
@@ -1696,6 +1488,9 @@ static int usbtouch_probe(struct usb_interface *intf,
usbtouch->buffer = kmalloc(type->rept_size, GFP_KERNEL);
if (!usbtouch->buffer)
goto out_free_buffers;
+ usbtouch->process_pkt = usbtouch_process_multi;
+ } else {
+ usbtouch->process_pkt = usbtouch_process_pkt;
}
usbtouch->irq = usb_alloc_urb(0, GFP_KERNEL);
@@ -1842,6 +1637,150 @@ static void usbtouch_disconnect(struct usb_interface *intf)
kfree(usbtouch);
}
+static const struct attribute_group *usbtouch_groups[] = {
+#ifdef CONFIG_TOUCHSCREEN_USB_3M
+ &mtouch_attr_group,
+#endif
+ NULL
+};
+
+static const struct usb_device_id usbtouch_devices[] = {
+#ifdef CONFIG_TOUCHSCREEN_USB_EGALAX
+ /* ignore the HID capable devices, handled by usbhid */
+ { USB_DEVICE_INTERFACE_CLASS(0x0eef, 0x0001, USB_INTERFACE_CLASS_HID),
+ .driver_info = 0 },
+ { USB_DEVICE_INTERFACE_CLASS(0x0eef, 0x0002, USB_INTERFACE_CLASS_HID),
+ .driver_info = 0 },
+
+ /* normal device IDs */
+ { USB_DEVICE(0x3823, 0x0001),
+ .driver_info = (kernel_ulong_t)&egalax_dev_info },
+ { USB_DEVICE(0x3823, 0x0002),
+ .driver_info = (kernel_ulong_t)&egalax_dev_info },
+ { USB_DEVICE(0x0123, 0x0001),
+ .driver_info = (kernel_ulong_t)&egalax_dev_info },
+ { USB_DEVICE(0x0eef, 0x0001),
+ .driver_info = (kernel_ulong_t)&egalax_dev_info },
+ { USB_DEVICE(0x0eef, 0x0002),
+ .driver_info = (kernel_ulong_t)&egalax_dev_info },
+ { USB_DEVICE(0x1234, 0x0001),
+ .driver_info = (kernel_ulong_t)&egalax_dev_info },
+ { USB_DEVICE(0x1234, 0x0002),
+ .driver_info = (kernel_ulong_t)&egalax_dev_info },
+#endif
+
+#ifdef CONFIG_TOUCHSCREEN_USB_PANJIT
+ { USB_DEVICE(0x134c, 0x0001),
+ .driver_info = (kernel_ulong_t)&panjit_dev_info },
+ { USB_DEVICE(0x134c, 0x0002),
+ .driver_info = (kernel_ulong_t)&panjit_dev_info },
+ { USB_DEVICE(0x134c, 0x0003),
+ .driver_info = (kernel_ulong_t)&panjit_dev_info },
+ { USB_DEVICE(0x134c, 0x0004),
+ .driver_info = (kernel_ulong_t)&panjit_dev_info },
+#endif
+
+#ifdef CONFIG_TOUCHSCREEN_USB_3M
+ { USB_DEVICE(0x0596, 0x0001),
+ .driver_info = (kernel_ulong_t)&mtouch_dev_info },
+#endif
+
+#ifdef CONFIG_TOUCHSCREEN_USB_ITM
+ { USB_DEVICE(0x0403, 0xf9e9),
+ .driver_info = (kernel_ulong_t)&itm_dev_info },
+ { USB_DEVICE(0x16e3, 0xf9e9),
+ .driver_info = (kernel_ulong_t)&itm_dev_info },
+#endif
+
+#ifdef CONFIG_TOUCHSCREEN_USB_ETURBO
+ { USB_DEVICE(0x1234, 0x5678),
+ .driver_info = (kernel_ulong_t)&eturbo_dev_info },
+#endif
+
+#ifdef CONFIG_TOUCHSCREEN_USB_GUNZE
+ { USB_DEVICE(0x0637, 0x0001),
+ .driver_info = (kernel_ulong_t)&gunze_dev_info },
+#endif
+
+#ifdef CONFIG_TOUCHSCREEN_USB_DMC_TSC10
+ { USB_DEVICE(0x0afa, 0x03e8),
+ .driver_info = (kernel_ulong_t)&dmc_tsc10_dev_info },
+#endif
+
+#ifdef CONFIG_TOUCHSCREEN_USB_IRTOUCH
+ { USB_DEVICE(0x255e, 0x0001),
+ .driver_info = (kernel_ulong_t)&irtouch_dev_info },
+ { USB_DEVICE(0x595a, 0x0001),
+ .driver_info = (kernel_ulong_t)&irtouch_dev_info },
+ { USB_DEVICE(0x6615, 0x0001),
+ .driver_info = (kernel_ulong_t)&irtouch_dev_info },
+ { USB_DEVICE(0x6615, 0x0012),
+ .driver_info = (kernel_ulong_t)&irtouch_hires_dev_info },
+#endif
+
+#ifdef CONFIG_TOUCHSCREEN_USB_IDEALTEK
+ { USB_DEVICE(0x1391, 0x1000),
+ .driver_info = (kernel_ulong_t)&idealtek_dev_info },
+#endif
+
+#ifdef CONFIG_TOUCHSCREEN_USB_GENERAL_TOUCH
+ { USB_DEVICE(0x0dfc, 0x0001),
+ .driver_info = (kernel_ulong_t)&general_touch_dev_info },
+#endif
+
+#ifdef CONFIG_TOUCHSCREEN_USB_GOTOP
+ { USB_DEVICE(0x08f2, 0x007f),
+ .driver_info = (kernel_ulong_t)&gotop_dev_info },
+ { USB_DEVICE(0x08f2, 0x00ce),
+ .driver_info = (kernel_ulong_t)&gotop_dev_info },
+ { USB_DEVICE(0x08f2, 0x00f4),
+ .driver_info = (kernel_ulong_t)&gotop_dev_info },
+#endif
+
+#ifdef CONFIG_TOUCHSCREEN_USB_JASTEC
+ { USB_DEVICE(0x0f92, 0x0001),
+ .driver_info = (kernel_ulong_t)&jastec_dev_info },
+#endif
+
+#ifdef CONFIG_TOUCHSCREEN_USB_E2I
+ { USB_DEVICE(0x1ac7, 0x0001),
+ .driver_info = (kernel_ulong_t)&e2i_dev_info },
+#endif
+
+#ifdef CONFIG_TOUCHSCREEN_USB_ZYTRONIC
+ { USB_DEVICE(0x14c8, 0x0003),
+ .driver_info = (kernel_ulong_t)&zytronic_dev_info },
+#endif
+
+#ifdef CONFIG_TOUCHSCREEN_USB_ETT_TC45USB
+ /* TC5UH */
+ { USB_DEVICE(0x0664, 0x0309),
+ .driver_info = (kernel_ulong_t)&tc45usb_dev_info },
+ /* TC4UM */
+ { USB_DEVICE(0x0664, 0x0306),
+ .driver_info = (kernel_ulong_t)&tc45usb_dev_info },
+#endif
+
+#ifdef CONFIG_TOUCHSCREEN_USB_NEXIO
+ /* data interface only */
+ { USB_DEVICE_AND_INTERFACE_INFO(0x10f0, 0x2002, 0x0a, 0x00, 0x00),
+ .driver_info = (kernel_ulong_t)&nexio_dev_info },
+ { USB_DEVICE_AND_INTERFACE_INFO(0x1870, 0x0001, 0x0a, 0x00, 0x00),
+ .driver_info = (kernel_ulong_t)&nexio_dev_info },
+#endif
+
+#ifdef CONFIG_TOUCHSCREEN_USB_ELO
+ { USB_DEVICE(0x04e7, 0x0020),
+ .driver_info = (kernel_ulong_t)&elo_dev_info },
+#endif
+
+#ifdef CONFIG_TOUCHSCREEN_USB_EASYTOUCH
+ { USB_DEVICE(0x7374, 0x0001),
+ .driver_info = (kernel_ulong_t)&etouch_dev_info },
+#endif
+
+ { }
+};
MODULE_DEVICE_TABLE(usb, usbtouch_devices);
static struct usb_driver usbtouch_driver = {
@@ -1852,6 +1791,7 @@ static struct usb_driver usbtouch_driver = {
.resume = usbtouch_resume,
.reset_resume = usbtouch_reset_resume,
.id_table = usbtouch_devices,
+ .dev_groups = usbtouch_groups,
.supports_autosuspend = 1,
};
diff --git a/drivers/input/touchscreen/zforce_ts.c b/drivers/input/touchscreen/zforce_ts.c
index fdf2d1e770c8..4b8c4ebfff96 100644
--- a/drivers/input/touchscreen/zforce_ts.c
+++ b/drivers/input/touchscreen/zforce_ts.c
@@ -9,21 +9,20 @@
* Author: Pieter Truter<ptruter@intrinsyc.com>
*/
-#include <linux/module.h>
-#include <linux/hrtimer.h>
-#include <linux/slab.h>
-#include <linux/input.h>
-#include <linux/interrupt.h>
-#include <linux/i2c.h>
#include <linux/delay.h>
-#include <linux/gpio/consumer.h>
#include <linux/device.h>
-#include <linux/sysfs.h>
+#include <linux/gpio/consumer.h>
+#include <linux/i2c.h>
+#include <linux/input.h>
#include <linux/input/mt.h>
#include <linux/input/touchscreen.h>
-#include <linux/platform_data/zforce_ts.h>
-#include <linux/regulator/consumer.h>
+#include <linux/interrupt.h>
+#include <linux/module.h>
#include <linux/of.h>
+#include <linux/property.h>
+#include <linux/regulator/consumer.h>
+#include <linux/slab.h>
+#include <asm/unaligned.h>
#define WAIT_TIMEOUT msecs_to_jiffies(1000)
@@ -97,9 +96,7 @@ struct zforce_point {
* @suspending in the process of going to suspend (don't emit wakeup
* events for commands executed to suspend the device)
* @suspended device suspended
- * @access_mutex serialize i2c-access, to keep multipart reads together
* @command_done completion to wait for the command result
- * @command_mutex serialize commands sent to the ic
* @command_waiting the id of the command that is currently waiting
* for a result
* @command_result returned result of the command
@@ -108,11 +105,8 @@ struct zforce_ts {
struct i2c_client *client;
struct input_dev *input;
struct touchscreen_properties prop;
- const struct zforce_ts_platdata *pdata;
char phys[32];
- struct regulator *reg_vdd;
-
struct gpio_desc *gpio_int;
struct gpio_desc *gpio_rst;
@@ -126,10 +120,7 @@ struct zforce_ts {
u16 version_build;
u16 version_rev;
- struct mutex access_mutex;
-
struct completion command_done;
- struct mutex command_mutex;
int command_waiting;
int command_result;
};
@@ -146,9 +137,7 @@ static int zforce_command(struct zforce_ts *ts, u8 cmd)
buf[1] = 1; /* data size, command only */
buf[2] = cmd;
- mutex_lock(&ts->access_mutex);
ret = i2c_master_send(client, &buf[0], ARRAY_SIZE(buf));
- mutex_unlock(&ts->access_mutex);
if (ret < 0) {
dev_err(&client->dev, "i2c send data request error: %d\n", ret);
return ret;
@@ -157,59 +146,36 @@ static int zforce_command(struct zforce_ts *ts, u8 cmd)
return 0;
}
-static void zforce_reset_assert(struct zforce_ts *ts)
-{
- gpiod_set_value_cansleep(ts->gpio_rst, 1);
-}
-
-static void zforce_reset_deassert(struct zforce_ts *ts)
-{
- gpiod_set_value_cansleep(ts->gpio_rst, 0);
-}
-
static int zforce_send_wait(struct zforce_ts *ts, const char *buf, int len)
{
struct i2c_client *client = ts->client;
int ret;
- ret = mutex_trylock(&ts->command_mutex);
- if (!ret) {
- dev_err(&client->dev, "already waiting for a command\n");
- return -EBUSY;
- }
-
dev_dbg(&client->dev, "sending %d bytes for command 0x%x\n",
buf[1], buf[2]);
ts->command_waiting = buf[2];
- mutex_lock(&ts->access_mutex);
ret = i2c_master_send(client, buf, len);
- mutex_unlock(&ts->access_mutex);
if (ret < 0) {
dev_err(&client->dev, "i2c send data request error: %d\n", ret);
- goto unlock;
+ return ret;
}
dev_dbg(&client->dev, "waiting for result for command 0x%x\n", buf[2]);
- if (wait_for_completion_timeout(&ts->command_done, WAIT_TIMEOUT) == 0) {
- ret = -ETIME;
- goto unlock;
- }
+ if (wait_for_completion_timeout(&ts->command_done, WAIT_TIMEOUT) == 0)
+ return -ETIME;
ret = ts->command_result;
-
-unlock:
- mutex_unlock(&ts->command_mutex);
- return ret;
+ return 0;
}
static int zforce_command_wait(struct zforce_ts *ts, u8 cmd)
{
struct i2c_client *client = ts->client;
char buf[3];
- int ret;
+ int error;
dev_dbg(&client->dev, "%s: 0x%x\n", __func__, cmd);
@@ -217,10 +183,11 @@ static int zforce_command_wait(struct zforce_ts *ts, u8 cmd)
buf[1] = 1; /* data size, command only */
buf[2] = cmd;
- ret = zforce_send_wait(ts, &buf[0], ARRAY_SIZE(buf));
- if (ret < 0) {
- dev_err(&client->dev, "i2c send data request error: %d\n", ret);
- return ret;
+ error = zforce_send_wait(ts, &buf[0], ARRAY_SIZE(buf));
+ if (error) {
+ dev_err(&client->dev, "i2c send data request error: %d\n",
+ error);
+ return error;
}
return 0;
@@ -268,40 +235,40 @@ static int zforce_setconfig(struct zforce_ts *ts, char b1)
static int zforce_start(struct zforce_ts *ts)
{
struct i2c_client *client = ts->client;
- int ret;
+ int error;
dev_dbg(&client->dev, "starting device\n");
- ret = zforce_command_wait(ts, COMMAND_INITIALIZE);
- if (ret) {
- dev_err(&client->dev, "Unable to initialize, %d\n", ret);
- return ret;
+ error = zforce_command_wait(ts, COMMAND_INITIALIZE);
+ if (error) {
+ dev_err(&client->dev, "Unable to initialize, %d\n", error);
+ return error;
}
- ret = zforce_resolution(ts, ts->prop.max_x, ts->prop.max_y);
- if (ret) {
- dev_err(&client->dev, "Unable to set resolution, %d\n", ret);
- goto error;
+ error = zforce_resolution(ts, ts->prop.max_x, ts->prop.max_y);
+ if (error) {
+ dev_err(&client->dev, "Unable to set resolution, %d\n", error);
+ goto err_deactivate;
}
- ret = zforce_scan_frequency(ts, 10, 50, 50);
- if (ret) {
+ error = zforce_scan_frequency(ts, 10, 50, 50);
+ if (error) {
dev_err(&client->dev, "Unable to set scan frequency, %d\n",
- ret);
- goto error;
+ error);
+ goto err_deactivate;
}
- ret = zforce_setconfig(ts, SETCONFIG_DUALTOUCH);
- if (ret) {
+ error = zforce_setconfig(ts, SETCONFIG_DUALTOUCH);
+ if (error) {
dev_err(&client->dev, "Unable to set config\n");
- goto error;
+ goto err_deactivate;
}
/* start sending touch events */
- ret = zforce_command(ts, COMMAND_DATAREQUEST);
- if (ret) {
+ error = zforce_command(ts, COMMAND_DATAREQUEST);
+ if (error) {
dev_err(&client->dev, "Unable to request data\n");
- goto error;
+ goto err_deactivate;
}
/*
@@ -312,24 +279,24 @@ static int zforce_start(struct zforce_ts *ts)
return 0;
-error:
+err_deactivate:
zforce_command_wait(ts, COMMAND_DEACTIVATE);
- return ret;
+ return error;
}
static int zforce_stop(struct zforce_ts *ts)
{
struct i2c_client *client = ts->client;
- int ret;
+ int error;
dev_dbg(&client->dev, "stopping device\n");
/* Deactivates touch sensing and puts the device into sleep. */
- ret = zforce_command_wait(ts, COMMAND_DEACTIVATE);
- if (ret != 0) {
+ error = zforce_command_wait(ts, COMMAND_DEACTIVATE);
+ if (error) {
dev_err(&client->dev, "could not deactivate device, %d\n",
- ret);
- return ret;
+ error);
+ return error;
}
return 0;
@@ -340,6 +307,7 @@ static int zforce_touch_event(struct zforce_ts *ts, u8 *payload)
struct i2c_client *client = ts->client;
struct zforce_point point;
int count, i, num = 0;
+ u8 *p;
count = payload[0];
if (count > ZFORCE_REPORT_POINTS) {
@@ -350,10 +318,10 @@ static int zforce_touch_event(struct zforce_ts *ts, u8 *payload)
}
for (i = 0; i < count; i++) {
- point.coord_x =
- payload[9 * i + 2] << 8 | payload[9 * i + 1];
- point.coord_y =
- payload[9 * i + 4] << 8 | payload[9 * i + 3];
+ p = &payload[i * 9 + 1];
+
+ point.coord_x = get_unaligned_le16(&p[0]);
+ point.coord_y = get_unaligned_le16(&p[2]);
if (point.coord_x > ts->prop.max_x ||
point.coord_y > ts->prop.max_y) {
@@ -362,18 +330,16 @@ static int zforce_touch_event(struct zforce_ts *ts, u8 *payload)
point.coord_x = point.coord_y = 0;
}
- point.state = payload[9 * i + 5] & 0x0f;
- point.id = (payload[9 * i + 5] & 0xf0) >> 4;
+ point.state = p[4] & 0x0f;
+ point.id = (p[4] & 0xf0) >> 4;
/* determine touch major, minor and orientation */
- point.area_major = max(payload[9 * i + 6],
- payload[9 * i + 7]);
- point.area_minor = min(payload[9 * i + 6],
- payload[9 * i + 7]);
- point.orientation = payload[9 * i + 6] > payload[9 * i + 7];
+ point.area_major = max(p[5], p[6]);
+ point.area_minor = min(p[5], p[6]);
+ point.orientation = p[5] > p[6];
- point.pressure = payload[9 * i + 8];
- point.prblty = payload[9 * i + 9];
+ point.pressure = p[7];
+ point.prblty = p[8];
dev_dbg(&client->dev,
"point %d/%d: state %d, id %d, pressure %d, prblty %d, x %d, y %d, amajor %d, aminor %d, ori %d\n",
@@ -386,10 +352,8 @@ static int zforce_touch_event(struct zforce_ts *ts, u8 *payload)
/* the zforce id starts with "1", so needs to be decreased */
input_mt_slot(ts->input, point.id - 1);
- input_mt_report_slot_state(ts->input, MT_TOOL_FINGER,
- point.state != STATE_UP);
-
- if (point.state != STATE_UP) {
+ if (input_mt_report_slot_state(ts->input, MT_TOOL_FINGER,
+ point.state != STATE_UP)) {
touchscreen_report_pos(ts->input, &ts->prop,
point.coord_x, point.coord_y,
true);
@@ -417,41 +381,35 @@ static int zforce_read_packet(struct zforce_ts *ts, u8 *buf)
struct i2c_client *client = ts->client;
int ret;
- mutex_lock(&ts->access_mutex);
-
/* read 2 byte message header */
ret = i2c_master_recv(client, buf, 2);
if (ret < 0) {
dev_err(&client->dev, "error reading header: %d\n", ret);
- goto unlock;
+ return ret;
}
if (buf[PAYLOAD_HEADER] != FRAME_START) {
dev_err(&client->dev, "invalid frame start: %d\n", buf[0]);
- ret = -EIO;
- goto unlock;
+ return -EIO;
}
if (buf[PAYLOAD_LENGTH] == 0) {
dev_err(&client->dev, "invalid payload length: %d\n",
buf[PAYLOAD_LENGTH]);
- ret = -EIO;
- goto unlock;
+ return -EIO;
}
/* read the message */
ret = i2c_master_recv(client, &buf[PAYLOAD_BODY], buf[PAYLOAD_LENGTH]);
if (ret < 0) {
dev_err(&client->dev, "error reading payload: %d\n", ret);
- goto unlock;
+ return ret;
}
dev_dbg(&client->dev, "read %d bytes for response command 0x%x\n",
buf[PAYLOAD_LENGTH], buf[PAYLOAD_BODY]);
-unlock:
- mutex_unlock(&ts->access_mutex);
- return ret;
+ return 0;
}
static void zforce_complete(struct zforce_ts *ts, int cmd, int result)
@@ -482,9 +440,10 @@ static irqreturn_t zforce_irq_thread(int irq, void *dev_id)
{
struct zforce_ts *ts = dev_id;
struct i2c_client *client = ts->client;
- int ret;
+ int error;
u8 payload_buffer[FRAME_MAXSIZE];
u8 *payload;
+ bool suspending;
/*
* When still suspended, return.
@@ -498,7 +457,8 @@ static irqreturn_t zforce_irq_thread(int irq, void *dev_id)
dev_dbg(&client->dev, "handling interrupt\n");
/* Don't emit wakeup events from commands run by zforce_suspend */
- if (!ts->suspending && device_may_wakeup(&client->dev))
+ suspending = READ_ONCE(ts->suspending);
+ if (!suspending && device_may_wakeup(&client->dev))
pm_stay_awake(&client->dev);
/*
@@ -511,10 +471,10 @@ static irqreturn_t zforce_irq_thread(int irq, void *dev_id)
* no IRQ any more)
*/
do {
- ret = zforce_read_packet(ts, payload_buffer);
- if (ret < 0) {
+ error = zforce_read_packet(ts, payload_buffer);
+ if (error) {
dev_err(&client->dev,
- "could not read packet, ret: %d\n", ret);
+ "could not read packet, ret: %d\n", error);
break;
}
@@ -526,7 +486,7 @@ static irqreturn_t zforce_irq_thread(int irq, void *dev_id)
* Always report touch-events received while
* suspending, when being a wakeup source
*/
- if (ts->suspending && device_may_wakeup(&client->dev))
+ if (suspending && device_may_wakeup(&client->dev))
pm_wakeup_event(&client->dev, 500);
zforce_touch_event(ts, &payload[RESPONSE_DATA]);
break;
@@ -550,14 +510,15 @@ static irqreturn_t zforce_irq_thread(int irq, void *dev_id)
* Version Payload Results
* [2:major] [2:minor] [2:build] [2:rev]
*/
- ts->version_major = (payload[RESPONSE_DATA + 1] << 8) |
- payload[RESPONSE_DATA];
- ts->version_minor = (payload[RESPONSE_DATA + 3] << 8) |
- payload[RESPONSE_DATA + 2];
- ts->version_build = (payload[RESPONSE_DATA + 5] << 8) |
- payload[RESPONSE_DATA + 4];
- ts->version_rev = (payload[RESPONSE_DATA + 7] << 8) |
- payload[RESPONSE_DATA + 6];
+ ts->version_major =
+ get_unaligned_le16(&payload[RESPONSE_DATA]);
+ ts->version_minor =
+ get_unaligned_le16(&payload[RESPONSE_DATA + 2]);
+ ts->version_build =
+ get_unaligned_le16(&payload[RESPONSE_DATA + 4]);
+ ts->version_rev =
+ get_unaligned_le16(&payload[RESPONSE_DATA + 6]);
+
dev_dbg(&ts->client->dev,
"Firmware Version %04x:%04x %04x:%04x\n",
ts->version_major, ts->version_minor,
@@ -579,7 +540,7 @@ static irqreturn_t zforce_irq_thread(int irq, void *dev_id)
}
} while (gpiod_get_value_cansleep(ts->gpio_int));
- if (!ts->suspending && device_may_wakeup(&client->dev))
+ if (!suspending && device_may_wakeup(&client->dev))
pm_relax(&client->dev);
dev_dbg(&client->dev, "finished interrupt\n");
@@ -598,24 +559,20 @@ static void zforce_input_close(struct input_dev *dev)
{
struct zforce_ts *ts = input_get_drvdata(dev);
struct i2c_client *client = ts->client;
- int ret;
+ int error;
- ret = zforce_stop(ts);
- if (ret)
+ error = zforce_stop(ts);
+ if (error)
dev_warn(&client->dev, "stopping zforce failed\n");
-
- return;
}
-static int zforce_suspend(struct device *dev)
+static int __zforce_suspend(struct zforce_ts *ts)
{
- struct i2c_client *client = to_i2c_client(dev);
- struct zforce_ts *ts = i2c_get_clientdata(client);
+ struct i2c_client *client = ts->client;
struct input_dev *input = ts->input;
- int ret = 0;
+ int error;
- mutex_lock(&input->mutex);
- ts->suspending = true;
+ guard(mutex)(&input->mutex);
/*
* When configured as a wakeup source device should always wake
@@ -626,9 +583,9 @@ static int zforce_suspend(struct device *dev)
/* Need to start device, if not open, to be a wakeup source. */
if (!input_device_enabled(input)) {
- ret = zforce_start(ts);
- if (ret)
- goto unlock;
+ error = zforce_start(ts);
+ if (error)
+ return error;
}
enable_irq_wake(client->irq);
@@ -636,18 +593,30 @@ static int zforce_suspend(struct device *dev)
dev_dbg(&client->dev,
"suspend without being a wakeup source\n");
- ret = zforce_stop(ts);
- if (ret)
- goto unlock;
+ error = zforce_stop(ts);
+ if (error)
+ return error;
disable_irq(client->irq);
}
ts->suspended = true;
+ return 0;
+}
+
+static int zforce_suspend(struct device *dev)
+{
+ struct i2c_client *client = to_i2c_client(dev);
+ struct zforce_ts *ts = i2c_get_clientdata(client);
+ int ret;
+
+ WRITE_ONCE(ts->suspending, true);
+ smp_mb();
-unlock:
- ts->suspending = false;
- mutex_unlock(&input->mutex);
+ ret = __zforce_suspend(ts);
+
+ smp_mb();
+ WRITE_ONCE(ts->suspending, false);
return ret;
}
@@ -657,9 +626,9 @@ static int zforce_resume(struct device *dev)
struct i2c_client *client = to_i2c_client(dev);
struct zforce_ts *ts = i2c_get_clientdata(client);
struct input_dev *input = ts->input;
- int ret = 0;
+ int error;
- mutex_lock(&input->mutex);
+ guard(mutex)(&input->mutex);
ts->suspended = false;
@@ -670,24 +639,21 @@ static int zforce_resume(struct device *dev)
/* need to stop device if it was not open on suspend */
if (!input_device_enabled(input)) {
- ret = zforce_stop(ts);
- if (ret)
- goto unlock;
+ error = zforce_stop(ts);
+ if (error)
+ return error;
}
} else if (input_device_enabled(input)) {
dev_dbg(&client->dev, "resume without being a wakeup source\n");
enable_irq(client->irq);
- ret = zforce_start(ts);
- if (ret < 0)
- goto unlock;
+ error = zforce_start(ts);
+ if (error)
+ return error;
}
-unlock:
- mutex_unlock(&input->mutex);
-
- return ret;
+ return 0;
}
static DEFINE_SIMPLE_DEV_PM_OPS(zforce_pm_ops, zforce_suspend, zforce_resume);
@@ -696,46 +662,27 @@ static void zforce_reset(void *data)
{
struct zforce_ts *ts = data;
- zforce_reset_assert(ts);
-
+ gpiod_set_value_cansleep(ts->gpio_rst, 1);
udelay(10);
-
- if (!IS_ERR(ts->reg_vdd))
- regulator_disable(ts->reg_vdd);
}
-static struct zforce_ts_platdata *zforce_parse_dt(struct device *dev)
+static void zforce_ts_parse_legacy_properties(struct zforce_ts *ts)
{
- struct zforce_ts_platdata *pdata;
- struct device_node *np = dev->of_node;
+ u32 x_max = 0;
+ u32 y_max = 0;
- if (!np)
- return ERR_PTR(-ENOENT);
+ device_property_read_u32(&ts->client->dev, "x-size", &x_max);
+ input_set_abs_params(ts->input, ABS_MT_POSITION_X, 0, x_max, 0, 0);
- pdata = devm_kzalloc(dev, sizeof(*pdata), GFP_KERNEL);
- if (!pdata) {
- dev_err(dev, "failed to allocate platform data\n");
- return ERR_PTR(-ENOMEM);
- }
-
- of_property_read_u32(np, "x-size", &pdata->x_max);
- of_property_read_u32(np, "y-size", &pdata->y_max);
-
- return pdata;
+ device_property_read_u32(&ts->client->dev, "y-size", &y_max);
+ input_set_abs_params(ts->input, ABS_MT_POSITION_Y, 0, y_max, 0, 0);
}
static int zforce_probe(struct i2c_client *client)
{
- const struct zforce_ts_platdata *pdata = dev_get_platdata(&client->dev);
struct zforce_ts *ts;
struct input_dev *input_dev;
- int ret;
-
- if (!pdata) {
- pdata = zforce_parse_dt(&client->dev);
- if (IS_ERR(pdata))
- return PTR_ERR(pdata);
- }
+ int error;
ts = devm_kzalloc(&client->dev, sizeof(struct zforce_ts), GFP_KERNEL);
if (!ts)
@@ -743,22 +690,18 @@ static int zforce_probe(struct i2c_client *client)
ts->gpio_rst = devm_gpiod_get_optional(&client->dev, "reset",
GPIOD_OUT_HIGH);
- if (IS_ERR(ts->gpio_rst)) {
- ret = PTR_ERR(ts->gpio_rst);
- dev_err(&client->dev,
- "failed to request reset GPIO: %d\n", ret);
- return ret;
- }
+ error = PTR_ERR_OR_ZERO(ts->gpio_rst);
+ if (error)
+ return dev_err_probe(&client->dev, error,
+ "failed to request reset GPIO\n");
if (ts->gpio_rst) {
ts->gpio_int = devm_gpiod_get_optional(&client->dev, "irq",
GPIOD_IN);
- if (IS_ERR(ts->gpio_int)) {
- ret = PTR_ERR(ts->gpio_int);
- dev_err(&client->dev,
- "failed to request interrupt GPIO: %d\n", ret);
- return ret;
- }
+ error = PTR_ERR_OR_ZERO(ts->gpio_int);
+ if (error)
+ return dev_err_probe(&client->dev, error,
+ "failed to request interrupt GPIO\n");
} else {
/*
* Deprecated GPIO handling for compatibility
@@ -768,66 +711,45 @@ static int zforce_probe(struct i2c_client *client)
/* INT GPIO */
ts->gpio_int = devm_gpiod_get_index(&client->dev, NULL, 0,
GPIOD_IN);
- if (IS_ERR(ts->gpio_int)) {
- ret = PTR_ERR(ts->gpio_int);
- dev_err(&client->dev,
- "failed to request interrupt GPIO: %d\n", ret);
- return ret;
- }
+
+ error = PTR_ERR_OR_ZERO(ts->gpio_int);
+ if (error)
+ return dev_err_probe(&client->dev, error,
+ "failed to request interrupt GPIO\n");
/* RST GPIO */
ts->gpio_rst = devm_gpiod_get_index(&client->dev, NULL, 1,
GPIOD_OUT_HIGH);
- if (IS_ERR(ts->gpio_rst)) {
- ret = PTR_ERR(ts->gpio_rst);
- dev_err(&client->dev,
- "failed to request reset GPIO: %d\n", ret);
- return ret;
- }
- }
-
- ts->reg_vdd = devm_regulator_get_optional(&client->dev, "vdd");
- if (IS_ERR(ts->reg_vdd)) {
- ret = PTR_ERR(ts->reg_vdd);
- if (ret == -EPROBE_DEFER)
- return ret;
- } else {
- ret = regulator_enable(ts->reg_vdd);
- if (ret)
- return ret;
-
- /*
- * according to datasheet add 100us grace time after regular
- * regulator enable delay.
- */
- udelay(100);
+ error = PTR_ERR_OR_ZERO(ts->gpio_rst);
+ if (error)
+ return dev_err_probe(&client->dev, error,
+ "failed to request reset GPIO\n");
}
- ret = devm_add_action(&client->dev, zforce_reset, ts);
- if (ret) {
- dev_err(&client->dev, "failed to register reset action, %d\n",
- ret);
+ error = devm_regulator_get_enable(&client->dev, "vdd");
+ if (error)
+ return dev_err_probe(&client->dev, error,
+ "failed to request vdd supply\n");
- /* hereafter the regulator will be disabled by the action */
- if (!IS_ERR(ts->reg_vdd))
- regulator_disable(ts->reg_vdd);
+ /*
+ * According to datasheet add 100us grace time after regular
+ * regulator enable delay.
+ */
+ usleep_range(100, 200);
- return ret;
- }
+ error = devm_add_action_or_reset(&client->dev, zforce_reset, ts);
+ if (error)
+ return dev_err_probe(&client->dev, error,
+ "failed to register reset action\n");
snprintf(ts->phys, sizeof(ts->phys),
"%s/input0", dev_name(&client->dev));
input_dev = devm_input_allocate_device(&client->dev);
- if (!input_dev) {
- dev_err(&client->dev, "could not allocate input device\n");
- return -ENOMEM;
- }
-
- mutex_init(&ts->access_mutex);
- mutex_init(&ts->command_mutex);
+ if (!input_dev)
+ return dev_err_probe(&client->dev, -ENOMEM,
+ "could not allocate input device\n");
- ts->pdata = pdata;
ts->client = client;
ts->input = input_dev;
@@ -838,28 +760,21 @@ static int zforce_probe(struct i2c_client *client)
input_dev->open = zforce_input_open;
input_dev->close = zforce_input_close;
- __set_bit(EV_KEY, input_dev->evbit);
- __set_bit(EV_SYN, input_dev->evbit);
- __set_bit(EV_ABS, input_dev->evbit);
-
- /* For multi touch */
- input_set_abs_params(input_dev, ABS_MT_POSITION_X, 0,
- pdata->x_max, 0, 0);
- input_set_abs_params(input_dev, ABS_MT_POSITION_Y, 0,
- pdata->y_max, 0, 0);
-
+ zforce_ts_parse_legacy_properties(ts);
touchscreen_parse_properties(input_dev, true, &ts->prop);
- if (ts->prop.max_x == 0 || ts->prop.max_y == 0) {
- dev_err(&client->dev, "no size specified\n");
- return -EINVAL;
- }
+ if (ts->prop.max_x == 0 || ts->prop.max_y == 0)
+ return dev_err_probe(&client->dev, -EINVAL, "no size specified");
input_set_abs_params(input_dev, ABS_MT_TOUCH_MAJOR, 0,
ZFORCE_MAX_AREA, 0, 0);
input_set_abs_params(input_dev, ABS_MT_TOUCH_MINOR, 0,
ZFORCE_MAX_AREA, 0, 0);
input_set_abs_params(input_dev, ABS_MT_ORIENTATION, 0, 1, 0, 0);
- input_mt_init_slots(input_dev, ZFORCE_REPORT_POINTS, INPUT_MT_DIRECT);
+
+ error = input_mt_init_slots(input_dev, ZFORCE_REPORT_POINTS,
+ INPUT_MT_DIRECT);
+ if (error)
+ return error;
input_set_drvdata(ts->input, ts);
@@ -872,57 +787,51 @@ static int zforce_probe(struct i2c_client *client)
* Therefore we can trigger the interrupt anytime it is low and do
* not need to limit it to the interrupt edge.
*/
- ret = devm_request_threaded_irq(&client->dev, client->irq,
- zforce_irq, zforce_irq_thread,
- IRQF_TRIGGER_LOW | IRQF_ONESHOT,
- input_dev->name, ts);
- if (ret) {
- dev_err(&client->dev, "irq %d request failed\n", client->irq);
- return ret;
- }
+ error = devm_request_threaded_irq(&client->dev, client->irq,
+ zforce_irq, zforce_irq_thread,
+ IRQF_ONESHOT, input_dev->name, ts);
+ if (error)
+ return dev_err_probe(&client->dev, error,
+ "irq %d request failed\n", client->irq);
i2c_set_clientdata(client, ts);
/* let the controller boot */
- zforce_reset_deassert(ts);
+ gpiod_set_value_cansleep(ts->gpio_rst, 0);
ts->command_waiting = NOTIFICATION_BOOTCOMPLETE;
if (wait_for_completion_timeout(&ts->command_done, WAIT_TIMEOUT) == 0)
dev_warn(&client->dev, "bootcomplete timed out\n");
/* need to start device to get version information */
- ret = zforce_command_wait(ts, COMMAND_INITIALIZE);
- if (ret) {
- dev_err(&client->dev, "unable to initialize, %d\n", ret);
- return ret;
- }
+ error = zforce_command_wait(ts, COMMAND_INITIALIZE);
+ if (error)
+ return dev_err_probe(&client->dev, error, "unable to initialize\n");
/* this gets the firmware version among other information */
- ret = zforce_command_wait(ts, COMMAND_STATUS);
- if (ret < 0) {
- dev_err(&client->dev, "couldn't get status, %d\n", ret);
+ error = zforce_command_wait(ts, COMMAND_STATUS);
+ if (error) {
+ dev_err_probe(&client->dev, error, "couldn't get status\n");
zforce_stop(ts);
- return ret;
+ return error;
}
/* stop device and put it into sleep until it is opened */
- ret = zforce_stop(ts);
- if (ret < 0)
- return ret;
+ error = zforce_stop(ts);
+ if (error)
+ return error;
device_set_wakeup_capable(&client->dev, true);
- ret = input_register_device(input_dev);
- if (ret) {
- dev_err(&client->dev, "could not register input device, %d\n",
- ret);
- return ret;
- }
+ error = input_register_device(input_dev);
+ if (error)
+ return dev_err_probe(&client->dev, error,
+ "could not register input device\n");
return 0;
}
-static struct i2c_device_id zforce_idtable[] = {
+static const struct i2c_device_id zforce_idtable[] = {
{ "zforce-ts" },
{ }
};
@@ -941,6 +850,7 @@ static struct i2c_driver zforce_driver = {
.name = "zforce-ts",
.pm = pm_sleep_ptr(&zforce_pm_ops),
.of_match_table = of_match_ptr(zforce_dt_idtable),
+ .probe_type = PROBE_PREFER_ASYNCHRONOUS,
},
.probe = zforce_probe,
.id_table = zforce_idtable,
diff --git a/drivers/input/touchscreen/zinitix.c b/drivers/input/touchscreen/zinitix.c
index 1b4807ba4624..52b3950460e2 100644
--- a/drivers/input/touchscreen/zinitix.c
+++ b/drivers/input/touchscreen/zinitix.c
@@ -10,6 +10,7 @@
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/of.h>
+#include <linux/property.h>
#include <linux/regulator/consumer.h>
#include <linux/slab.h>
@@ -34,7 +35,13 @@
#define ZINITIX_DEBUG_REG 0x0115 /* 0~7 */
#define ZINITIX_TOUCH_MODE 0x0010
+
#define ZINITIX_CHIP_REVISION 0x0011
+#define ZINITIX_CHIP_BTX0X_MASK 0xF0F0
+#define ZINITIX_CHIP_BT4X2 0x4020
+#define ZINITIX_CHIP_BT4X3 0x4030
+#define ZINITIX_CHIP_BT4X4 0x4040
+
#define ZINITIX_FIRMWARE_VERSION 0x0012
#define ZINITIX_USB_DETECT 0x116
@@ -62,7 +69,11 @@
#define ZINITIX_Y_RESOLUTION 0x00C1
#define ZINITIX_POINT_STATUS_REG 0x0080
-#define ZINITIX_ICON_STATUS_REG 0x00AA
+
+#define ZINITIX_BT4X2_ICON_STATUS_REG 0x009A
+#define ZINITIX_BT4X3_ICON_STATUS_REG 0x00A0
+#define ZINITIX_BT4X4_ICON_STATUS_REG 0x00A0
+#define ZINITIX_BT5XX_ICON_STATUS_REG 0x00AA
#define ZINITIX_POINT_COORD_REG (ZINITIX_POINT_STATUS_REG + 2)
@@ -119,6 +130,7 @@
#define DEFAULT_TOUCH_POINT_MODE 2
#define MAX_SUPPORTED_FINGER_NUM 5
+#define MAX_SUPPORTED_BUTTON_NUM 8
#define CHIP_ON_DELAY 15 // ms
#define FIRMWARE_ON_DELAY 40 // ms
@@ -146,6 +158,13 @@ struct bt541_ts_data {
struct touchscreen_properties prop;
struct regulator_bulk_data supplies[2];
u32 zinitix_mode;
+ u32 keycodes[MAX_SUPPORTED_BUTTON_NUM];
+ int num_keycodes;
+ bool have_versioninfo;
+ u16 chip_revision;
+ u16 firmware_version;
+ u16 regdata_version;
+ u16 icon_status_reg;
};
static int zinitix_read_data(struct i2c_client *client,
@@ -190,11 +209,25 @@ static int zinitix_write_cmd(struct i2c_client *client, u16 reg)
return 0;
}
+static u16 zinitix_get_u16_reg(struct bt541_ts_data *bt541, u16 vreg)
+{
+ struct i2c_client *client = bt541->client;
+ int error;
+ __le16 val;
+
+ error = zinitix_read_data(client, vreg, (void *)&val, 2);
+ if (error)
+ return U8_MAX;
+
+ return le16_to_cpu(val);
+}
+
static int zinitix_init_touch(struct bt541_ts_data *bt541)
{
struct i2c_client *client = bt541->client;
int i;
int error;
+ u16 int_flags;
error = zinitix_write_cmd(client, ZINITIX_SWRESET_CMD);
if (error) {
@@ -202,6 +235,47 @@ static int zinitix_init_touch(struct bt541_ts_data *bt541)
return error;
}
+ /*
+ * Read and cache the chip revision and firmware version the first time
+ * we get here.
+ */
+ if (!bt541->have_versioninfo) {
+ bt541->chip_revision = zinitix_get_u16_reg(bt541,
+ ZINITIX_CHIP_REVISION);
+ bt541->firmware_version = zinitix_get_u16_reg(bt541,
+ ZINITIX_FIRMWARE_VERSION);
+ bt541->regdata_version = zinitix_get_u16_reg(bt541,
+ ZINITIX_DATA_VERSION_REG);
+ bt541->have_versioninfo = true;
+
+ dev_dbg(&client->dev,
+ "chip revision %04x firmware version %04x regdata version %04x\n",
+ bt541->chip_revision, bt541->firmware_version,
+ bt541->regdata_version);
+
+ /*
+ * Determine the "icon" status register which varies by the
+ * chip.
+ */
+ switch (bt541->chip_revision & ZINITIX_CHIP_BTX0X_MASK) {
+ case ZINITIX_CHIP_BT4X2:
+ bt541->icon_status_reg = ZINITIX_BT4X2_ICON_STATUS_REG;
+ break;
+
+ case ZINITIX_CHIP_BT4X3:
+ bt541->icon_status_reg = ZINITIX_BT4X3_ICON_STATUS_REG;
+ break;
+
+ case ZINITIX_CHIP_BT4X4:
+ bt541->icon_status_reg = ZINITIX_BT4X4_ICON_STATUS_REG;
+ break;
+
+ default:
+ bt541->icon_status_reg = ZINITIX_BT5XX_ICON_STATUS_REG;
+ break;
+ }
+ }
+
error = zinitix_write_u16(client, ZINITIX_INT_ENABLE_FLAG, 0x0);
if (error) {
dev_err(&client->dev,
@@ -225,6 +299,11 @@ static int zinitix_init_touch(struct bt541_ts_data *bt541)
if (error)
return error;
+ error = zinitix_write_u16(client, ZINITIX_BUTTON_SUPPORTED_NUM,
+ bt541->num_keycodes);
+ if (error)
+ return error;
+
error = zinitix_write_u16(client, ZINITIX_INITIAL_TOUCH_MODE,
bt541->zinitix_mode);
if (error)
@@ -235,9 +314,11 @@ static int zinitix_init_touch(struct bt541_ts_data *bt541)
if (error)
return error;
- error = zinitix_write_u16(client, ZINITIX_INT_ENABLE_FLAG,
- BIT_PT_CNT_CHANGE | BIT_DOWN | BIT_MOVE |
- BIT_UP);
+ int_flags = BIT_PT_CNT_CHANGE | BIT_DOWN | BIT_MOVE | BIT_UP;
+ if (bt541->num_keycodes)
+ int_flags |= BIT_ICON_EVENT;
+
+ error = zinitix_write_u16(client, ZINITIX_INT_ENABLE_FLAG, int_flags);
if (error)
return error;
@@ -350,12 +431,22 @@ static void zinitix_report_finger(struct bt541_ts_data *bt541, int slot,
}
}
+static void zinitix_report_keys(struct bt541_ts_data *bt541, u16 icon_events)
+{
+ int i;
+
+ for (i = 0; i < bt541->num_keycodes; i++)
+ input_report_key(bt541->input_dev,
+ bt541->keycodes[i], icon_events & BIT(i));
+}
+
static irqreturn_t zinitix_ts_irq_handler(int irq, void *bt541_handler)
{
struct bt541_ts_data *bt541 = bt541_handler;
struct i2c_client *client = bt541->client;
struct touch_event touch_event;
unsigned long finger_mask;
+ __le16 icon_events;
int error;
int i;
@@ -368,6 +459,17 @@ static irqreturn_t zinitix_ts_irq_handler(int irq, void *bt541_handler)
goto out;
}
+ if (le16_to_cpu(touch_event.status) & BIT_ICON_EVENT) {
+ error = zinitix_read_data(bt541->client, bt541->icon_status_reg,
+ &icon_events, sizeof(icon_events));
+ if (error) {
+ dev_err(&client->dev, "Failed to read icon events\n");
+ goto out;
+ }
+
+ zinitix_report_keys(bt541, le16_to_cpu(icon_events));
+ }
+
finger_mask = touch_event.finger_mask;
for_each_set_bit(i, &finger_mask, MAX_SUPPORTED_FINGER_NUM) {
const struct point_coord *p = &touch_event.point_coord[i];
@@ -453,6 +555,7 @@ static int zinitix_init_input_dev(struct bt541_ts_data *bt541)
{
struct input_dev *input_dev;
int error;
+ int i;
input_dev = devm_input_allocate_device(&bt541->client->dev);
if (!input_dev) {
@@ -470,6 +573,14 @@ static int zinitix_init_input_dev(struct bt541_ts_data *bt541)
input_dev->open = zinitix_input_open;
input_dev->close = zinitix_input_close;
+ if (bt541->num_keycodes) {
+ input_dev->keycode = bt541->keycodes;
+ input_dev->keycodemax = bt541->num_keycodes;
+ input_dev->keycodesize = sizeof(bt541->keycodes[0]);
+ for (i = 0; i < bt541->num_keycodes; i++)
+ input_set_capability(input_dev, EV_KEY, bt541->keycodes[i]);
+ }
+
input_set_capability(input_dev, EV_ABS, ABS_MT_POSITION_X);
input_set_capability(input_dev, EV_ABS, ABS_MT_POSITION_Y);
input_set_abs_params(input_dev, ABS_MT_WIDTH_MAJOR, 0, 255, 0, 0);
@@ -534,6 +645,21 @@ static int zinitix_ts_probe(struct i2c_client *client)
return error;
}
+ bt541->num_keycodes = device_property_count_u32(&client->dev, "linux,keycodes");
+ if (bt541->num_keycodes > ARRAY_SIZE(bt541->keycodes)) {
+ dev_err(&client->dev, "too many keys defined (%d)\n", bt541->num_keycodes);
+ return -EINVAL;
+ }
+
+ error = device_property_read_u32_array(&client->dev, "linux,keycodes",
+ bt541->keycodes,
+ bt541->num_keycodes);
+ if (error) {
+ dev_err(&client->dev,
+ "Unable to parse \"linux,keycodes\" property: %d\n", error);
+ return error;
+ }
+
error = zinitix_init_input_dev(bt541);
if (error) {
dev_err(&client->dev,
diff --git a/drivers/interconnect/icc-clk.c b/drivers/interconnect/icc-clk.c
index f788db15cd76..b956e4050f38 100644
--- a/drivers/interconnect/icc-clk.c
+++ b/drivers/interconnect/icc-clk.c
@@ -87,6 +87,7 @@ struct icc_provider *icc_clk_register(struct device *dev,
onecell = devm_kzalloc(dev, struct_size(onecell, nodes, 2 * num_clocks), GFP_KERNEL);
if (!onecell)
return ERR_PTR(-ENOMEM);
+ onecell->num_nodes = 2 * num_clocks;
qp = devm_kzalloc(dev, struct_size(qp, clocks, num_clocks), GFP_KERNEL);
if (!qp)
@@ -133,8 +134,6 @@ struct icc_provider *icc_clk_register(struct device *dev,
onecell->nodes[j++] = node;
}
- onecell->num_nodes = j;
-
ret = icc_provider_register(provider);
if (ret)
goto err;
diff --git a/drivers/interconnect/qcom/Kconfig b/drivers/interconnect/qcom/Kconfig
index 9b84cd8becef..de96d4661340 100644
--- a/drivers/interconnect/qcom/Kconfig
+++ b/drivers/interconnect/qcom/Kconfig
@@ -26,6 +26,15 @@ config INTERCONNECT_QCOM_MSM8916
This is a driver for the Qualcomm Network-on-Chip on msm8916-based
platforms.
+config INTERCONNECT_QCOM_MSM8937
+ tristate "Qualcomm MSM8937 interconnect driver"
+ depends on INTERCONNECT_QCOM
+ depends on QCOM_SMD_RPM
+ select INTERCONNECT_QCOM_SMD_RPM
+ help
+ This is a driver for the Qualcomm Network-on-Chip on msm8937-based
+ platforms.
+
config INTERCONNECT_QCOM_MSM8939
tristate "Qualcomm MSM8939 interconnect driver"
depends on INTERCONNECT_QCOM
@@ -53,6 +62,15 @@ config INTERCONNECT_QCOM_MSM8974
This is a driver for the Qualcomm Network-on-Chip on msm8974-based
platforms.
+config INTERCONNECT_QCOM_MSM8976
+ tristate "Qualcomm MSM8976 interconnect driver"
+ depends on INTERCONNECT_QCOM
+ depends on QCOM_SMD_RPM
+ select INTERCONNECT_QCOM_SMD_RPM
+ help
+ This is a driver for the Qualcomm Network-on-Chip on msm8976-based
+ platforms.
+
config INTERCONNECT_QCOM_MSM8996
tristate "Qualcomm MSM8996 interconnect driver"
depends on INTERCONNECT_QCOM
diff --git a/drivers/interconnect/qcom/Makefile b/drivers/interconnect/qcom/Makefile
index 7a7b6a71876f..bfeea8416fcf 100644
--- a/drivers/interconnect/qcom/Makefile
+++ b/drivers/interconnect/qcom/Makefile
@@ -6,9 +6,11 @@ interconnect_qcom-y := icc-common.o
icc-bcm-voter-objs := bcm-voter.o
qnoc-msm8909-objs := msm8909.o
qnoc-msm8916-objs := msm8916.o
+qnoc-msm8937-objs := msm8937.o
qnoc-msm8939-objs := msm8939.o
qnoc-msm8953-objs := msm8953.o
qnoc-msm8974-objs := msm8974.o
+qnoc-msm8976-objs := msm8976.o
qnoc-msm8996-objs := msm8996.o
icc-osm-l3-objs := osm-l3.o
qnoc-qcm2290-objs := qcm2290.o
@@ -41,9 +43,11 @@ icc-smd-rpm-objs := smd-rpm.o icc-rpm.o icc-rpm-clocks.o
obj-$(CONFIG_INTERCONNECT_QCOM_BCM_VOTER) += icc-bcm-voter.o
obj-$(CONFIG_INTERCONNECT_QCOM_MSM8909) += qnoc-msm8909.o
obj-$(CONFIG_INTERCONNECT_QCOM_MSM8916) += qnoc-msm8916.o
+obj-$(CONFIG_INTERCONNECT_QCOM_MSM8937) += qnoc-msm8937.o
obj-$(CONFIG_INTERCONNECT_QCOM_MSM8939) += qnoc-msm8939.o
obj-$(CONFIG_INTERCONNECT_QCOM_MSM8953) += qnoc-msm8953.o
obj-$(CONFIG_INTERCONNECT_QCOM_MSM8974) += qnoc-msm8974.o
+obj-$(CONFIG_INTERCONNECT_QCOM_MSM8976) += qnoc-msm8976.o
obj-$(CONFIG_INTERCONNECT_QCOM_MSM8996) += qnoc-msm8996.o
obj-$(CONFIG_INTERCONNECT_QCOM_OSM_L3) += icc-osm-l3.o
obj-$(CONFIG_INTERCONNECT_QCOM_QCM2290) += qnoc-qcm2290.o
diff --git a/drivers/interconnect/qcom/msm8937.c b/drivers/interconnect/qcom/msm8937.c
new file mode 100644
index 000000000000..052b14c28ef8
--- /dev/null
+++ b/drivers/interconnect/qcom/msm8937.c
@@ -0,0 +1,1350 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Based on data from msm8937-bus.dtsi in Qualcomm's msm-3.18 release:
+ * Copyright (c) 2014-2016, The Linux Foundation. All rights reserved.
+ */
+
+#include <linux/device.h>
+#include <linux/interconnect-provider.h>
+#include <linux/mod_devicetable.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/regmap.h>
+
+#include <dt-bindings/interconnect/qcom,msm8937.h>
+
+#include "icc-rpm.h"
+
+enum {
+ QNOC_MASTER_AMPSS_M0 = 1,
+ QNOC_MASTER_GRAPHICS_3D,
+ QNOC_SNOC_BIMC_0_MAS,
+ QNOC_SNOC_BIMC_2_MAS,
+ QNOC_SNOC_BIMC_1_MAS,
+ QNOC_MASTER_TCU_0,
+ QNOC_MASTER_SPDM,
+ QNOC_MASTER_BLSP_1,
+ QNOC_MASTER_BLSP_2,
+ QNOC_MASTER_USB_HS,
+ QNOC_MASTER_XM_USB_HS1,
+ QNOC_MASTER_CRYPTO_CORE0,
+ QNOC_MASTER_SDCC_1,
+ QNOC_MASTER_SDCC_2,
+ QNOC_SNOC_PNOC_MAS,
+ QNOC_MASTER_QDSS_BAM,
+ QNOC_BIMC_SNOC_MAS,
+ QNOC_MASTER_JPEG,
+ QNOC_MASTER_MDP_PORT0,
+ QNOC_PNOC_SNOC_MAS,
+ QNOC_MASTER_VIDEO_P0,
+ QNOC_MASTER_VFE,
+ QNOC_MASTER_VFE1,
+ QNOC_MASTER_CPP,
+ QNOC_MASTER_QDSS_ETR,
+ QNOC_PNOC_M_0,
+ QNOC_PNOC_M_1,
+ QNOC_PNOC_INT_0,
+ QNOC_PNOC_INT_1,
+ QNOC_PNOC_INT_2,
+ QNOC_PNOC_INT_3,
+ QNOC_PNOC_SLV_0,
+ QNOC_PNOC_SLV_1,
+ QNOC_PNOC_SLV_2,
+ QNOC_PNOC_SLV_3,
+ QNOC_PNOC_SLV_4,
+ QNOC_PNOC_SLV_6,
+ QNOC_PNOC_SLV_7,
+ QNOC_PNOC_SLV_8,
+ QNOC_SNOC_QDSS_INT,
+ QNOC_SNOC_INT_0,
+ QNOC_SNOC_INT_1,
+ QNOC_SNOC_INT_2,
+ QNOC_SLAVE_EBI_CH0,
+ QNOC_BIMC_SNOC_SLV,
+ QNOC_SLAVE_SDCC_2,
+ QNOC_SLAVE_SPDM_WRAPPER,
+ QNOC_SLAVE_PDM,
+ QNOC_SLAVE_PRNG,
+ QNOC_SLAVE_TCSR,
+ QNOC_SLAVE_SNOC_CFG,
+ QNOC_SLAVE_MESSAGE_RAM,
+ QNOC_SLAVE_CAMERA_CFG,
+ QNOC_SLAVE_DISPLAY_CFG,
+ QNOC_SLAVE_VENUS_CFG,
+ QNOC_SLAVE_GRAPHICS_3D_CFG,
+ QNOC_SLAVE_TLMM,
+ QNOC_SLAVE_BLSP_1,
+ QNOC_SLAVE_BLSP_2,
+ QNOC_SLAVE_PMIC_ARB,
+ QNOC_SLAVE_SDCC_1,
+ QNOC_SLAVE_CRYPTO_0_CFG,
+ QNOC_SLAVE_USB_HS,
+ QNOC_SLAVE_TCU,
+ QNOC_PNOC_SNOC_SLV,
+ QNOC_SLAVE_APPSS,
+ QNOC_SLAVE_WCSS,
+ QNOC_SNOC_BIMC_0_SLV,
+ QNOC_SNOC_BIMC_1_SLV,
+ QNOC_SNOC_BIMC_2_SLV,
+ QNOC_SLAVE_OCIMEM,
+ QNOC_SNOC_PNOC_SLV,
+ QNOC_SLAVE_QDSS_STM,
+ QNOC_SLAVE_CATS_128,
+ QNOC_SLAVE_OCMEM_64,
+ QNOC_SLAVE_LPASS,
+};
+
+static const u16 mas_apps_proc_links[] = {
+ QNOC_SLAVE_EBI_CH0,
+ QNOC_BIMC_SNOC_SLV
+};
+
+static struct qcom_icc_node mas_apps_proc = {
+ .name = "mas_apps_proc",
+ .id = QNOC_MASTER_AMPSS_M0,
+ .buswidth = 8,
+ .mas_rpm_id = 0,
+ .slv_rpm_id = -1,
+ .qos.ap_owned = true,
+ .qos.qos_mode = NOC_QOS_MODE_FIXED,
+ .qos.areq_prio = 0,
+ .qos.prio_level = 0,
+ .qos.qos_port = 0,
+ .num_links = ARRAY_SIZE(mas_apps_proc_links),
+ .links = mas_apps_proc_links,
+};
+
+static const u16 mas_oxili_links[] = {
+ QNOC_SLAVE_EBI_CH0,
+ QNOC_BIMC_SNOC_SLV
+};
+
+static struct qcom_icc_node mas_oxili = {
+ .name = "mas_oxili",
+ .id = QNOC_MASTER_GRAPHICS_3D,
+ .buswidth = 8,
+ .mas_rpm_id = 6,
+ .slv_rpm_id = -1,
+ .qos.ap_owned = true,
+ .qos.qos_mode = NOC_QOS_MODE_FIXED,
+ .qos.areq_prio = 0,
+ .qos.prio_level = 0,
+ .qos.qos_port = 2,
+ .num_links = ARRAY_SIZE(mas_oxili_links),
+ .links = mas_oxili_links,
+};
+
+static const u16 mas_snoc_bimc_0_links[] = {
+ QNOC_SLAVE_EBI_CH0,
+ QNOC_BIMC_SNOC_SLV
+};
+
+static struct qcom_icc_node mas_snoc_bimc_0 = {
+ .name = "mas_snoc_bimc_0",
+ .id = QNOC_SNOC_BIMC_0_MAS,
+ .buswidth = 8,
+ .mas_rpm_id = 3,
+ .slv_rpm_id = -1,
+ .qos.ap_owned = true,
+ .qos.qos_mode = NOC_QOS_MODE_BYPASS,
+ .qos.areq_prio = 0,
+ .qos.prio_level = 0,
+ .qos.qos_port = 3,
+ .num_links = ARRAY_SIZE(mas_snoc_bimc_0_links),
+ .links = mas_snoc_bimc_0_links,
+};
+
+static const u16 mas_snoc_bimc_2_links[] = {
+ QNOC_SLAVE_EBI_CH0,
+ QNOC_BIMC_SNOC_SLV
+};
+
+static struct qcom_icc_node mas_snoc_bimc_2 = {
+ .name = "mas_snoc_bimc_2",
+ .id = QNOC_SNOC_BIMC_2_MAS,
+ .buswidth = 8,
+ .mas_rpm_id = 108,
+ .slv_rpm_id = -1,
+ .qos.ap_owned = true,
+ .qos.qos_mode = NOC_QOS_MODE_BYPASS,
+ .qos.areq_prio = 0,
+ .qos.prio_level = 0,
+ .qos.qos_port = 4,
+ .num_links = ARRAY_SIZE(mas_snoc_bimc_2_links),
+ .links = mas_snoc_bimc_2_links,
+};
+
+static const u16 mas_snoc_bimc_1_links[] = {
+ QNOC_SLAVE_EBI_CH0
+};
+
+static struct qcom_icc_node mas_snoc_bimc_1 = {
+ .name = "mas_snoc_bimc_1",
+ .id = QNOC_SNOC_BIMC_1_MAS,
+ .buswidth = 8,
+ .mas_rpm_id = 76,
+ .slv_rpm_id = -1,
+ .qos.qos_mode = NOC_QOS_MODE_BYPASS,
+ .qos.areq_prio = 0,
+ .qos.prio_level = 0,
+ .qos.qos_port = 5,
+ .num_links = ARRAY_SIZE(mas_snoc_bimc_1_links),
+ .links = mas_snoc_bimc_1_links,
+};
+
+static const u16 mas_tcu_0_links[] = {
+ QNOC_SLAVE_EBI_CH0,
+ QNOC_BIMC_SNOC_SLV
+};
+
+static struct qcom_icc_node mas_tcu_0 = {
+ .name = "mas_tcu_0",
+ .id = QNOC_MASTER_TCU_0,
+ .buswidth = 8,
+ .mas_rpm_id = 102,
+ .slv_rpm_id = -1,
+ .qos.ap_owned = true,
+ .qos.qos_mode = NOC_QOS_MODE_FIXED,
+ .qos.areq_prio = 0,
+ .qos.prio_level = 2,
+ .qos.qos_port = 6,
+ .num_links = ARRAY_SIZE(mas_tcu_0_links),
+ .links = mas_tcu_0_links,
+};
+
+static const u16 mas_spdm_links[] = {
+ QNOC_PNOC_M_0
+};
+
+static struct qcom_icc_node mas_spdm = {
+ .name = "mas_spdm",
+ .id = QNOC_MASTER_SPDM,
+ .buswidth = 4,
+ .mas_rpm_id = 50,
+ .slv_rpm_id = -1,
+ .qos.ap_owned = true,
+ .qos.qos_mode = NOC_QOS_MODE_INVALID,
+ .num_links = ARRAY_SIZE(mas_spdm_links),
+ .links = mas_spdm_links,
+};
+
+static const u16 mas_blsp_1_links[] = {
+ QNOC_PNOC_M_1
+};
+
+static struct qcom_icc_node mas_blsp_1 = {
+ .name = "mas_blsp_1",
+ .id = QNOC_MASTER_BLSP_1,
+ .buswidth = 4,
+ .mas_rpm_id = 41,
+ .slv_rpm_id = -1,
+ .num_links = ARRAY_SIZE(mas_blsp_1_links),
+ .links = mas_blsp_1_links,
+};
+
+static const u16 mas_blsp_2_links[] = {
+ QNOC_PNOC_M_1
+};
+
+static struct qcom_icc_node mas_blsp_2 = {
+ .name = "mas_blsp_2",
+ .id = QNOC_MASTER_BLSP_2,
+ .buswidth = 4,
+ .mas_rpm_id = 39,
+ .slv_rpm_id = -1,
+ .num_links = ARRAY_SIZE(mas_blsp_2_links),
+ .links = mas_blsp_2_links,
+};
+
+static const u16 mas_usb_hs1_links[] = {
+ QNOC_PNOC_INT_0
+};
+
+static struct qcom_icc_node mas_usb_hs1 = {
+ .name = "mas_usb_hs1",
+ .id = QNOC_MASTER_USB_HS,
+ .buswidth = 4,
+ .mas_rpm_id = 42,
+ .slv_rpm_id = -1,
+ .qos.ap_owned = true,
+ .qos.qos_mode = NOC_QOS_MODE_FIXED,
+ .qos.areq_prio = 1,
+ .qos.prio_level = 1,
+ .qos.qos_port = 12,
+ .num_links = ARRAY_SIZE(mas_usb_hs1_links),
+ .links = mas_usb_hs1_links,
+};
+
+static const u16 mas_xi_usb_hs1_links[] = {
+ QNOC_PNOC_INT_0
+};
+
+static struct qcom_icc_node mas_xi_usb_hs1 = {
+ .name = "mas_xi_usb_hs1",
+ .id = QNOC_MASTER_XM_USB_HS1,
+ .buswidth = 8,
+ .mas_rpm_id = 138,
+ .slv_rpm_id = -1,
+ .qos.qos_mode = NOC_QOS_MODE_FIXED,
+ .qos.areq_prio = 0,
+ .qos.prio_level = 0,
+ .qos.qos_port = 11,
+ .num_links = ARRAY_SIZE(mas_xi_usb_hs1_links),
+ .links = mas_xi_usb_hs1_links,
+};
+
+static const u16 mas_crypto_links[] = {
+ QNOC_PNOC_INT_0
+};
+
+static struct qcom_icc_node mas_crypto = {
+ .name = "mas_crypto",
+ .id = QNOC_MASTER_CRYPTO_CORE0,
+ .buswidth = 8,
+ .mas_rpm_id = 23,
+ .slv_rpm_id = -1,
+ .qos.ap_owned = true,
+ .qos.qos_mode = NOC_QOS_MODE_FIXED,
+ .qos.areq_prio = 1,
+ .qos.prio_level = 1,
+ .qos.qos_port = 0,
+ .num_links = ARRAY_SIZE(mas_crypto_links),
+ .links = mas_crypto_links,
+};
+
+static const u16 mas_sdcc_1_links[] = {
+ QNOC_PNOC_INT_0
+};
+
+static struct qcom_icc_node mas_sdcc_1 = {
+ .name = "mas_sdcc_1",
+ .id = QNOC_MASTER_SDCC_1,
+ .buswidth = 8,
+ .mas_rpm_id = 33,
+ .slv_rpm_id = -1,
+ .qos.qos_mode = NOC_QOS_MODE_FIXED,
+ .qos.areq_prio = 0,
+ .qos.prio_level = 0,
+ .qos.qos_port = 7,
+ .num_links = ARRAY_SIZE(mas_sdcc_1_links),
+ .links = mas_sdcc_1_links,
+};
+
+static const u16 mas_sdcc_2_links[] = {
+ QNOC_PNOC_INT_0
+};
+
+static struct qcom_icc_node mas_sdcc_2 = {
+ .name = "mas_sdcc_2",
+ .id = QNOC_MASTER_SDCC_2,
+ .buswidth = 8,
+ .mas_rpm_id = 35,
+ .slv_rpm_id = -1,
+ .qos.qos_mode = NOC_QOS_MODE_FIXED,
+ .qos.areq_prio = 0,
+ .qos.prio_level = 0,
+ .qos.qos_port = 8,
+ .num_links = ARRAY_SIZE(mas_sdcc_2_links),
+ .links = mas_sdcc_2_links,
+};
+
+static const u16 mas_snoc_pcnoc_links[] = {
+ QNOC_PNOC_SLV_7,
+ QNOC_PNOC_INT_2,
+ QNOC_PNOC_INT_3
+};
+
+static struct qcom_icc_node mas_snoc_pcnoc = {
+ .name = "mas_snoc_pcnoc",
+ .id = QNOC_SNOC_PNOC_MAS,
+ .buswidth = 8,
+ .mas_rpm_id = 77,
+ .slv_rpm_id = -1,
+ .qos.qos_mode = NOC_QOS_MODE_FIXED,
+ .qos.areq_prio = 0,
+ .qos.prio_level = 0,
+ .qos.qos_port = 9,
+ .num_links = ARRAY_SIZE(mas_snoc_pcnoc_links),
+ .links = mas_snoc_pcnoc_links,
+};
+
+static const u16 mas_qdss_bam_links[] = {
+ QNOC_SNOC_QDSS_INT
+};
+
+static struct qcom_icc_node mas_qdss_bam = {
+ .name = "mas_qdss_bam",
+ .id = QNOC_MASTER_QDSS_BAM,
+ .buswidth = 4,
+ .mas_rpm_id = 19,
+ .slv_rpm_id = -1,
+ .qos.ap_owned = true,
+ .qos.qos_mode = NOC_QOS_MODE_FIXED,
+ .qos.areq_prio = 1,
+ .qos.prio_level = 1,
+ .qos.qos_port = 11,
+ .num_links = ARRAY_SIZE(mas_qdss_bam_links),
+ .links = mas_qdss_bam_links,
+};
+
+static const u16 mas_bimc_snoc_links[] = {
+ QNOC_SNOC_INT_0,
+ QNOC_SNOC_INT_1,
+ QNOC_SNOC_INT_2
+};
+
+static struct qcom_icc_node mas_bimc_snoc = {
+ .name = "mas_bimc_snoc",
+ .id = QNOC_BIMC_SNOC_MAS,
+ .buswidth = 8,
+ .mas_rpm_id = 21,
+ .slv_rpm_id = -1,
+ .num_links = ARRAY_SIZE(mas_bimc_snoc_links),
+ .links = mas_bimc_snoc_links,
+};
+
+static const u16 mas_jpeg_links[] = {
+ QNOC_SNOC_BIMC_2_SLV
+};
+
+static struct qcom_icc_node mas_jpeg = {
+ .name = "mas_jpeg",
+ .id = QNOC_MASTER_JPEG,
+ .buswidth = 16,
+ .mas_rpm_id = 7,
+ .slv_rpm_id = -1,
+ .qos.ap_owned = true,
+ .qos.qos_mode = NOC_QOS_MODE_BYPASS,
+ .qos.areq_prio = 0,
+ .qos.prio_level = 0,
+ .qos.qos_port = 6,
+ .num_links = ARRAY_SIZE(mas_jpeg_links),
+ .links = mas_jpeg_links,
+};
+
+static const u16 mas_mdp_links[] = {
+ QNOC_SNOC_BIMC_0_SLV
+};
+
+static struct qcom_icc_node mas_mdp = {
+ .name = "mas_mdp",
+ .id = QNOC_MASTER_MDP_PORT0,
+ .buswidth = 16,
+ .mas_rpm_id = 8,
+ .slv_rpm_id = -1,
+ .qos.ap_owned = true,
+ .qos.qos_mode = NOC_QOS_MODE_BYPASS,
+ .qos.areq_prio = 0,
+ .qos.prio_level = 0,
+ .qos.qos_port = 7,
+ .num_links = ARRAY_SIZE(mas_mdp_links),
+ .links = mas_mdp_links,
+};
+
+static const u16 mas_pcnoc_snoc_links[] = {
+ QNOC_SNOC_INT_0,
+ QNOC_SNOC_INT_1,
+ QNOC_SNOC_BIMC_1_SLV
+};
+
+static struct qcom_icc_node mas_pcnoc_snoc = {
+ .name = "mas_pcnoc_snoc",
+ .id = QNOC_PNOC_SNOC_MAS,
+ .buswidth = 8,
+ .mas_rpm_id = 29,
+ .slv_rpm_id = -1,
+ .qos.qos_mode = NOC_QOS_MODE_FIXED,
+ .qos.areq_prio = 0,
+ .qos.prio_level = 0,
+ .qos.qos_port = 5,
+ .num_links = ARRAY_SIZE(mas_pcnoc_snoc_links),
+ .links = mas_pcnoc_snoc_links,
+};
+
+static const u16 mas_venus_links[] = {
+ QNOC_SNOC_BIMC_2_SLV
+};
+
+static struct qcom_icc_node mas_venus = {
+ .name = "mas_venus",
+ .id = QNOC_MASTER_VIDEO_P0,
+ .buswidth = 16,
+ .mas_rpm_id = 9,
+ .slv_rpm_id = -1,
+ .qos.ap_owned = true,
+ .qos.qos_mode = NOC_QOS_MODE_BYPASS,
+ .qos.areq_prio = 0,
+ .qos.prio_level = 0,
+ .qos.qos_port = 8,
+ .num_links = ARRAY_SIZE(mas_venus_links),
+ .links = mas_venus_links,
+};
+
+static const u16 mas_vfe0_links[] = {
+ QNOC_SNOC_BIMC_0_SLV
+};
+
+static struct qcom_icc_node mas_vfe0 = {
+ .name = "mas_vfe0",
+ .id = QNOC_MASTER_VFE,
+ .buswidth = 16,
+ .mas_rpm_id = 11,
+ .slv_rpm_id = -1,
+ .qos.ap_owned = true,
+ .qos.qos_mode = NOC_QOS_MODE_BYPASS,
+ .qos.areq_prio = 0,
+ .qos.prio_level = 0,
+ .qos.qos_port = 9,
+ .num_links = ARRAY_SIZE(mas_vfe0_links),
+ .links = mas_vfe0_links,
+};
+
+static const u16 mas_vfe1_links[] = {
+ QNOC_SNOC_BIMC_0_SLV
+};
+
+static struct qcom_icc_node mas_vfe1 = {
+ .name = "mas_vfe1",
+ .id = QNOC_MASTER_VFE1,
+ .buswidth = 16,
+ .mas_rpm_id = 133,
+ .slv_rpm_id = -1,
+ .qos.ap_owned = true,
+ .qos.qos_mode = NOC_QOS_MODE_BYPASS,
+ .qos.areq_prio = 0,
+ .qos.prio_level = 0,
+ .qos.qos_port = 13,
+ .num_links = ARRAY_SIZE(mas_vfe1_links),
+ .links = mas_vfe1_links,
+};
+
+static const u16 mas_cpp_links[] = {
+ QNOC_SNOC_BIMC_2_SLV
+};
+
+static struct qcom_icc_node mas_cpp = {
+ .name = "mas_cpp",
+ .id = QNOC_MASTER_CPP,
+ .buswidth = 16,
+ .mas_rpm_id = 115,
+ .slv_rpm_id = -1,
+ .qos.ap_owned = true,
+ .qos.qos_mode = NOC_QOS_MODE_BYPASS,
+ .qos.areq_prio = 0,
+ .qos.prio_level = 0,
+ .qos.qos_port = 12,
+ .num_links = ARRAY_SIZE(mas_cpp_links),
+ .links = mas_cpp_links,
+};
+
+static const u16 mas_qdss_etr_links[] = {
+ QNOC_SNOC_QDSS_INT
+};
+
+static struct qcom_icc_node mas_qdss_etr = {
+ .name = "mas_qdss_etr",
+ .id = QNOC_MASTER_QDSS_ETR,
+ .buswidth = 8,
+ .mas_rpm_id = 31,
+ .slv_rpm_id = -1,
+ .qos.ap_owned = true,
+ .qos.qos_mode = NOC_QOS_MODE_FIXED,
+ .qos.areq_prio = 1,
+ .qos.prio_level = 1,
+ .qos.qos_port = 10,
+ .num_links = ARRAY_SIZE(mas_qdss_etr_links),
+ .links = mas_qdss_etr_links,
+};
+
+static const u16 pcnoc_m_0_links[] = {
+ QNOC_PNOC_INT_0
+};
+
+static struct qcom_icc_node pcnoc_m_0 = {
+ .name = "pcnoc_m_0",
+ .id = QNOC_PNOC_M_0,
+ .buswidth = 4,
+ .mas_rpm_id = 87,
+ .slv_rpm_id = 116,
+ .qos.ap_owned = true,
+ .qos.qos_mode = NOC_QOS_MODE_FIXED,
+ .qos.areq_prio = 1,
+ .qos.prio_level = 1,
+ .qos.qos_port = 5,
+ .num_links = ARRAY_SIZE(pcnoc_m_0_links),
+ .links = pcnoc_m_0_links,
+};
+
+static const u16 pcnoc_m_1_links[] = {
+ QNOC_PNOC_INT_0
+};
+
+static struct qcom_icc_node pcnoc_m_1 = {
+ .name = "pcnoc_m_1",
+ .id = QNOC_PNOC_M_1,
+ .buswidth = 4,
+ .mas_rpm_id = 88,
+ .slv_rpm_id = 117,
+ .num_links = ARRAY_SIZE(pcnoc_m_1_links),
+ .links = pcnoc_m_1_links,
+};
+
+static const u16 pcnoc_int_0_links[] = {
+ QNOC_PNOC_SNOC_SLV,
+ QNOC_PNOC_SLV_7,
+ QNOC_PNOC_INT_3,
+ QNOC_PNOC_INT_2
+};
+
+static struct qcom_icc_node pcnoc_int_0 = {
+ .name = "pcnoc_int_0",
+ .id = QNOC_PNOC_INT_0,
+ .buswidth = 8,
+ .mas_rpm_id = 85,
+ .slv_rpm_id = 114,
+ .num_links = ARRAY_SIZE(pcnoc_int_0_links),
+ .links = pcnoc_int_0_links,
+};
+
+static const u16 pcnoc_int_1_links[] = {
+ QNOC_PNOC_SNOC_SLV,
+ QNOC_PNOC_SLV_7,
+ QNOC_PNOC_INT_3,
+ QNOC_PNOC_INT_2
+};
+
+static struct qcom_icc_node pcnoc_int_1 = {
+ .name = "pcnoc_int_1",
+ .id = QNOC_PNOC_INT_1,
+ .buswidth = 8,
+ .mas_rpm_id = -1,
+ .slv_rpm_id = -1,
+ .num_links = ARRAY_SIZE(pcnoc_int_1_links),
+ .links = pcnoc_int_1_links,
+};
+
+static const u16 pcnoc_int_2_links[] = {
+ QNOC_PNOC_SLV_2,
+ QNOC_PNOC_SLV_3,
+ QNOC_PNOC_SLV_6,
+ QNOC_PNOC_SLV_8
+};
+
+static struct qcom_icc_node pcnoc_int_2 = {
+ .name = "pcnoc_int_2",
+ .id = QNOC_PNOC_INT_2,
+ .buswidth = 8,
+ .mas_rpm_id = 124,
+ .slv_rpm_id = 184,
+ .num_links = ARRAY_SIZE(pcnoc_int_2_links),
+ .links = pcnoc_int_2_links,
+};
+
+static const u16 pcnoc_int_3_links[] = {
+ QNOC_PNOC_SLV_1,
+ QNOC_PNOC_SLV_0,
+ QNOC_PNOC_SLV_4,
+ QNOC_SLAVE_GRAPHICS_3D_CFG,
+ QNOC_SLAVE_TCU
+};
+
+static struct qcom_icc_node pcnoc_int_3 = {
+ .name = "pcnoc_int_3",
+ .id = QNOC_PNOC_INT_3,
+ .buswidth = 8,
+ .mas_rpm_id = 125,
+ .slv_rpm_id = 185,
+ .num_links = ARRAY_SIZE(pcnoc_int_3_links),
+ .links = pcnoc_int_3_links,
+};
+
+static const u16 pcnoc_s_0_links[] = {
+ QNOC_SLAVE_SPDM_WRAPPER,
+ QNOC_SLAVE_PDM,
+ QNOC_SLAVE_PRNG,
+ QNOC_SLAVE_SDCC_2
+};
+
+static struct qcom_icc_node pcnoc_s_0 = {
+ .name = "pcnoc_s_0",
+ .id = QNOC_PNOC_SLV_0,
+ .buswidth = 4,
+ .mas_rpm_id = 89,
+ .slv_rpm_id = 118,
+ .num_links = ARRAY_SIZE(pcnoc_s_0_links),
+ .links = pcnoc_s_0_links,
+};
+
+static const u16 pcnoc_s_1_links[] = {
+ QNOC_SLAVE_TCSR
+};
+
+static struct qcom_icc_node pcnoc_s_1 = {
+ .name = "pcnoc_s_1",
+ .id = QNOC_PNOC_SLV_1,
+ .buswidth = 4,
+ .mas_rpm_id = 90,
+ .slv_rpm_id = 119,
+ .num_links = ARRAY_SIZE(pcnoc_s_1_links),
+ .links = pcnoc_s_1_links,
+};
+
+static const u16 pcnoc_s_2_links[] = {
+ QNOC_SLAVE_SNOC_CFG
+};
+
+static struct qcom_icc_node pcnoc_s_2 = {
+ .name = "pcnoc_s_2",
+ .id = QNOC_PNOC_SLV_2,
+ .buswidth = 4,
+ .mas_rpm_id = 91,
+ .slv_rpm_id = 120,
+ .num_links = ARRAY_SIZE(pcnoc_s_2_links),
+ .links = pcnoc_s_2_links,
+};
+
+static const u16 pcnoc_s_3_links[] = {
+ QNOC_SLAVE_MESSAGE_RAM
+};
+
+static struct qcom_icc_node pcnoc_s_3 = {
+ .name = "pcnoc_s_3",
+ .id = QNOC_PNOC_SLV_3,
+ .buswidth = 4,
+ .mas_rpm_id = 92,
+ .slv_rpm_id = 121,
+ .num_links = ARRAY_SIZE(pcnoc_s_3_links),
+ .links = pcnoc_s_3_links,
+};
+
+static const u16 pcnoc_s_4_links[] = {
+ QNOC_SLAVE_CAMERA_CFG,
+ QNOC_SLAVE_DISPLAY_CFG,
+ QNOC_SLAVE_VENUS_CFG
+};
+
+static struct qcom_icc_node pcnoc_s_4 = {
+ .name = "pcnoc_s_4",
+ .id = QNOC_PNOC_SLV_4,
+ .buswidth = 4,
+ .mas_rpm_id = 93,
+ .slv_rpm_id = 122,
+ .qos.ap_owned = true,
+ .qos.qos_mode = NOC_QOS_MODE_INVALID,
+ .num_links = ARRAY_SIZE(pcnoc_s_4_links),
+ .links = pcnoc_s_4_links,
+};
+
+static const u16 pcnoc_s_6_links[] = {
+ QNOC_SLAVE_TLMM,
+ QNOC_SLAVE_BLSP_1,
+ QNOC_SLAVE_BLSP_2
+};
+
+static struct qcom_icc_node pcnoc_s_6 = {
+ .name = "pcnoc_s_6",
+ .id = QNOC_PNOC_SLV_6,
+ .buswidth = 4,
+ .mas_rpm_id = 94,
+ .slv_rpm_id = 123,
+ .num_links = ARRAY_SIZE(pcnoc_s_6_links),
+ .links = pcnoc_s_6_links,
+};
+
+static const u16 pcnoc_s_7_links[] = {
+ QNOC_SLAVE_SDCC_1,
+ QNOC_SLAVE_PMIC_ARB
+};
+
+static struct qcom_icc_node pcnoc_s_7 = {
+ .name = "pcnoc_s_7",
+ .id = QNOC_PNOC_SLV_7,
+ .buswidth = 4,
+ .mas_rpm_id = 95,
+ .slv_rpm_id = 124,
+ .num_links = ARRAY_SIZE(pcnoc_s_7_links),
+ .links = pcnoc_s_7_links,
+};
+
+static const u16 pcnoc_s_8_links[] = {
+ QNOC_SLAVE_USB_HS,
+ QNOC_SLAVE_CRYPTO_0_CFG
+};
+
+static struct qcom_icc_node pcnoc_s_8 = {
+ .name = "pcnoc_s_8",
+ .id = QNOC_PNOC_SLV_8,
+ .buswidth = 4,
+ .mas_rpm_id = 96,
+ .slv_rpm_id = 125,
+ .num_links = ARRAY_SIZE(pcnoc_s_8_links),
+ .links = pcnoc_s_8_links,
+};
+
+static const u16 qdss_int_links[] = {
+ QNOC_SNOC_INT_1,
+ QNOC_SNOC_BIMC_1_SLV
+};
+
+static struct qcom_icc_node qdss_int = {
+ .name = "qdss_int",
+ .id = QNOC_SNOC_QDSS_INT,
+ .buswidth = 8,
+ .mas_rpm_id = 98,
+ .slv_rpm_id = 128,
+ .qos.ap_owned = true,
+ .qos.qos_mode = NOC_QOS_MODE_INVALID,
+ .num_links = ARRAY_SIZE(qdss_int_links),
+ .links = qdss_int_links,
+};
+
+static const u16 snoc_int_0_links[] = {
+ QNOC_SLAVE_LPASS,
+ QNOC_SLAVE_WCSS,
+ QNOC_SLAVE_APPSS
+};
+
+static struct qcom_icc_node snoc_int_0 = {
+ .name = "snoc_int_0",
+ .id = QNOC_SNOC_INT_0,
+ .buswidth = 8,
+ .mas_rpm_id = 99,
+ .slv_rpm_id = 130,
+ .qos.ap_owned = true,
+ .qos.qos_mode = NOC_QOS_MODE_INVALID,
+ .num_links = ARRAY_SIZE(snoc_int_0_links),
+ .links = snoc_int_0_links,
+};
+
+static const u16 snoc_int_1_links[] = {
+ QNOC_SLAVE_QDSS_STM,
+ QNOC_SLAVE_OCIMEM,
+ QNOC_SNOC_PNOC_SLV
+};
+
+static struct qcom_icc_node snoc_int_1 = {
+ .name = "snoc_int_1",
+ .id = QNOC_SNOC_INT_1,
+ .buswidth = 8,
+ .mas_rpm_id = 100,
+ .slv_rpm_id = 131,
+ .num_links = ARRAY_SIZE(snoc_int_1_links),
+ .links = snoc_int_1_links,
+};
+
+static const u16 snoc_int_2_links[] = {
+ QNOC_SLAVE_CATS_128,
+ QNOC_SLAVE_OCMEM_64
+};
+
+static struct qcom_icc_node snoc_int_2 = {
+ .name = "snoc_int_2",
+ .id = QNOC_SNOC_INT_2,
+ .buswidth = 8,
+ .mas_rpm_id = 134,
+ .slv_rpm_id = 197,
+ .qos.ap_owned = true,
+ .qos.qos_mode = NOC_QOS_MODE_INVALID,
+ .num_links = ARRAY_SIZE(snoc_int_2_links),
+ .links = snoc_int_2_links,
+};
+
+static struct qcom_icc_node slv_ebi = {
+ .name = "slv_ebi",
+ .id = QNOC_SLAVE_EBI_CH0,
+ .buswidth = 8,
+ .mas_rpm_id = -1,
+ .slv_rpm_id = 0,
+};
+
+static const u16 slv_bimc_snoc_links[] = {
+ QNOC_BIMC_SNOC_MAS
+};
+
+static struct qcom_icc_node slv_bimc_snoc = {
+ .name = "slv_bimc_snoc",
+ .id = QNOC_BIMC_SNOC_SLV,
+ .buswidth = 8,
+ .mas_rpm_id = -1,
+ .slv_rpm_id = 2,
+ .num_links = ARRAY_SIZE(slv_bimc_snoc_links),
+ .links = slv_bimc_snoc_links,
+};
+
+static struct qcom_icc_node slv_sdcc_2 = {
+ .name = "slv_sdcc_2",
+ .id = QNOC_SLAVE_SDCC_2,
+ .buswidth = 4,
+ .mas_rpm_id = -1,
+ .slv_rpm_id = 33,
+};
+
+static struct qcom_icc_node slv_spdm = {
+ .name = "slv_spdm",
+ .id = QNOC_SLAVE_SPDM_WRAPPER,
+ .buswidth = 4,
+ .mas_rpm_id = -1,
+ .slv_rpm_id = 60,
+ .qos.ap_owned = true,
+ .qos.qos_mode = NOC_QOS_MODE_INVALID,
+};
+
+static struct qcom_icc_node slv_pdm = {
+ .name = "slv_pdm",
+ .id = QNOC_SLAVE_PDM,
+ .buswidth = 4,
+ .mas_rpm_id = -1,
+ .slv_rpm_id = 41,
+};
+
+static struct qcom_icc_node slv_prng = {
+ .name = "slv_prng",
+ .id = QNOC_SLAVE_PRNG,
+ .buswidth = 4,
+ .mas_rpm_id = -1,
+ .slv_rpm_id = 44,
+};
+
+static struct qcom_icc_node slv_tcsr = {
+ .name = "slv_tcsr",
+ .id = QNOC_SLAVE_TCSR,
+ .buswidth = 4,
+ .mas_rpm_id = -1,
+ .slv_rpm_id = 50,
+};
+
+static struct qcom_icc_node slv_snoc_cfg = {
+ .name = "slv_snoc_cfg",
+ .id = QNOC_SLAVE_SNOC_CFG,
+ .buswidth = 4,
+ .mas_rpm_id = -1,
+ .slv_rpm_id = 70,
+};
+
+static struct qcom_icc_node slv_message_ram = {
+ .name = "slv_message_ram",
+ .id = QNOC_SLAVE_MESSAGE_RAM,
+ .buswidth = 4,
+ .mas_rpm_id = -1,
+ .slv_rpm_id = 55,
+};
+
+static struct qcom_icc_node slv_camera_ss_cfg = {
+ .name = "slv_camera_ss_cfg",
+ .id = QNOC_SLAVE_CAMERA_CFG,
+ .buswidth = 4,
+ .mas_rpm_id = -1,
+ .slv_rpm_id = 3,
+ .qos.ap_owned = true,
+ .qos.qos_mode = NOC_QOS_MODE_INVALID,
+};
+
+static struct qcom_icc_node slv_disp_ss_cfg = {
+ .name = "slv_disp_ss_cfg",
+ .id = QNOC_SLAVE_DISPLAY_CFG,
+ .buswidth = 4,
+ .mas_rpm_id = -1,
+ .slv_rpm_id = 4,
+ .qos.ap_owned = true,
+ .qos.qos_mode = NOC_QOS_MODE_INVALID,
+};
+
+static struct qcom_icc_node slv_venus_cfg = {
+ .name = "slv_venus_cfg",
+ .id = QNOC_SLAVE_VENUS_CFG,
+ .buswidth = 4,
+ .mas_rpm_id = -1,
+ .slv_rpm_id = 10,
+ .qos.ap_owned = true,
+ .qos.qos_mode = NOC_QOS_MODE_INVALID,
+};
+
+static struct qcom_icc_node slv_gpu_cfg = {
+ .name = "slv_gpu_cfg",
+ .id = QNOC_SLAVE_GRAPHICS_3D_CFG,
+ .buswidth = 8,
+ .mas_rpm_id = -1,
+ .slv_rpm_id = 11,
+ .qos.ap_owned = true,
+ .qos.qos_mode = NOC_QOS_MODE_INVALID,
+};
+
+static struct qcom_icc_node slv_tlmm = {
+ .name = "slv_tlmm",
+ .id = QNOC_SLAVE_TLMM,
+ .buswidth = 4,
+ .mas_rpm_id = -1,
+ .slv_rpm_id = 51,
+};
+
+static struct qcom_icc_node slv_blsp_1 = {
+ .name = "slv_blsp_1",
+ .id = QNOC_SLAVE_BLSP_1,
+ .buswidth = 4,
+ .mas_rpm_id = -1,
+ .slv_rpm_id = 39,
+};
+
+static struct qcom_icc_node slv_blsp_2 = {
+ .name = "slv_blsp_2",
+ .id = QNOC_SLAVE_BLSP_2,
+ .buswidth = 4,
+ .mas_rpm_id = -1,
+ .slv_rpm_id = 37,
+};
+
+static struct qcom_icc_node slv_pmic_arb = {
+ .name = "slv_pmic_arb",
+ .id = QNOC_SLAVE_PMIC_ARB,
+ .buswidth = 4,
+ .mas_rpm_id = -1,
+ .slv_rpm_id = 59,
+};
+
+static struct qcom_icc_node slv_sdcc_1 = {
+ .name = "slv_sdcc_1",
+ .id = QNOC_SLAVE_SDCC_1,
+ .buswidth = 4,
+ .mas_rpm_id = -1,
+ .slv_rpm_id = 31,
+};
+
+static struct qcom_icc_node slv_crypto_0_cfg = {
+ .name = "slv_crypto_0_cfg",
+ .id = QNOC_SLAVE_CRYPTO_0_CFG,
+ .buswidth = 4,
+ .mas_rpm_id = -1,
+ .slv_rpm_id = 52,
+};
+
+static struct qcom_icc_node slv_usb_hs = {
+ .name = "slv_usb_hs",
+ .id = QNOC_SLAVE_USB_HS,
+ .buswidth = 4,
+ .mas_rpm_id = -1,
+ .slv_rpm_id = 40,
+};
+
+static struct qcom_icc_node slv_tcu = {
+ .name = "slv_tcu",
+ .id = QNOC_SLAVE_TCU,
+ .buswidth = 8,
+ .mas_rpm_id = -1,
+ .slv_rpm_id = 133,
+ .qos.ap_owned = true,
+ .qos.qos_mode = NOC_QOS_MODE_INVALID,
+};
+
+static const u16 slv_pcnoc_snoc_links[] = {
+ QNOC_PNOC_SNOC_MAS
+};
+
+static struct qcom_icc_node slv_pcnoc_snoc = {
+ .name = "slv_pcnoc_snoc",
+ .id = QNOC_PNOC_SNOC_SLV,
+ .buswidth = 8,
+ .mas_rpm_id = -1,
+ .slv_rpm_id = 45,
+ .num_links = ARRAY_SIZE(slv_pcnoc_snoc_links),
+ .links = slv_pcnoc_snoc_links,
+};
+
+static struct qcom_icc_node slv_kpss_ahb = {
+ .name = "slv_kpss_ahb",
+ .id = QNOC_SLAVE_APPSS,
+ .buswidth = 4,
+ .mas_rpm_id = -1,
+ .slv_rpm_id = 20,
+ .qos.ap_owned = true,
+ .qos.qos_mode = NOC_QOS_MODE_INVALID,
+};
+
+static struct qcom_icc_node slv_wcss = {
+ .name = "slv_wcss",
+ .id = QNOC_SLAVE_WCSS,
+ .buswidth = 4,
+ .mas_rpm_id = -1,
+ .slv_rpm_id = 23,
+ .qos.ap_owned = true,
+ .qos.qos_mode = NOC_QOS_MODE_INVALID,
+};
+
+static const u16 slv_snoc_bimc_0_links[] = {
+ QNOC_SNOC_BIMC_0_MAS
+};
+
+static struct qcom_icc_node slv_snoc_bimc_0 = {
+ .name = "slv_snoc_bimc_0",
+ .id = QNOC_SNOC_BIMC_0_SLV,
+ .buswidth = 16,
+ .mas_rpm_id = -1,
+ .slv_rpm_id = 24,
+ .qos.ap_owned = true,
+ .qos.qos_mode = NOC_QOS_MODE_INVALID,
+ .num_links = ARRAY_SIZE(slv_snoc_bimc_0_links),
+ .links = slv_snoc_bimc_0_links,
+};
+
+static const u16 slv_snoc_bimc_1_links[] = {
+ QNOC_SNOC_BIMC_1_MAS
+};
+
+static struct qcom_icc_node slv_snoc_bimc_1 = {
+ .name = "slv_snoc_bimc_1",
+ .id = QNOC_SNOC_BIMC_1_SLV,
+ .buswidth = 8,
+ .mas_rpm_id = -1,
+ .slv_rpm_id = 104,
+ .num_links = ARRAY_SIZE(slv_snoc_bimc_1_links),
+ .links = slv_snoc_bimc_1_links,
+};
+
+static const u16 slv_snoc_bimc_2_links[] = {
+ QNOC_SNOC_BIMC_2_MAS
+};
+
+static struct qcom_icc_node slv_snoc_bimc_2 = {
+ .name = "slv_snoc_bimc_2",
+ .id = QNOC_SNOC_BIMC_2_SLV,
+ .buswidth = 16,
+ .mas_rpm_id = -1,
+ .slv_rpm_id = 137,
+ .qos.ap_owned = true,
+ .qos.qos_mode = NOC_QOS_MODE_INVALID,
+ .num_links = ARRAY_SIZE(slv_snoc_bimc_2_links),
+ .links = slv_snoc_bimc_2_links,
+};
+
+static struct qcom_icc_node slv_imem = {
+ .name = "slv_imem",
+ .id = QNOC_SLAVE_OCIMEM,
+ .buswidth = 8,
+ .mas_rpm_id = -1,
+ .slv_rpm_id = 26,
+};
+
+static const u16 slv_snoc_pcnoc_links[] = {
+ QNOC_SNOC_PNOC_MAS
+};
+
+static struct qcom_icc_node slv_snoc_pcnoc = {
+ .name = "slv_snoc_pcnoc",
+ .id = QNOC_SNOC_PNOC_SLV,
+ .buswidth = 8,
+ .mas_rpm_id = -1,
+ .slv_rpm_id = 28,
+ .num_links = ARRAY_SIZE(slv_snoc_pcnoc_links),
+ .links = slv_snoc_pcnoc_links,
+};
+
+static struct qcom_icc_node slv_qdss_stm = {
+ .name = "slv_qdss_stm",
+ .id = QNOC_SLAVE_QDSS_STM,
+ .buswidth = 4,
+ .mas_rpm_id = -1,
+ .slv_rpm_id = 30,
+};
+
+static struct qcom_icc_node slv_cats_0 = {
+ .name = "slv_cats_0",
+ .id = QNOC_SLAVE_CATS_128,
+ .buswidth = 16,
+ .mas_rpm_id = -1,
+ .slv_rpm_id = 106,
+ .qos.ap_owned = true,
+ .qos.qos_mode = NOC_QOS_MODE_INVALID,
+};
+
+static struct qcom_icc_node slv_cats_1 = {
+ .name = "slv_cats_1",
+ .id = QNOC_SLAVE_OCMEM_64,
+ .buswidth = 8,
+ .mas_rpm_id = -1,
+ .slv_rpm_id = 107,
+ .qos.ap_owned = true,
+ .qos.qos_mode = NOC_QOS_MODE_INVALID,
+};
+
+static struct qcom_icc_node slv_lpass = {
+ .name = "slv_lpass",
+ .id = QNOC_SLAVE_LPASS,
+ .buswidth = 8,
+ .mas_rpm_id = -1,
+ .slv_rpm_id = 21,
+ .qos.ap_owned = true,
+ .qos.qos_mode = NOC_QOS_MODE_INVALID,
+};
+
+static struct qcom_icc_node *msm8937_bimc_nodes[] = {
+ [MAS_APPS_PROC] = &mas_apps_proc,
+ [MAS_OXILI] = &mas_oxili,
+ [MAS_SNOC_BIMC_0] = &mas_snoc_bimc_0,
+ [MAS_SNOC_BIMC_2] = &mas_snoc_bimc_2,
+ [MAS_SNOC_BIMC_1] = &mas_snoc_bimc_1,
+ [MAS_TCU_0] = &mas_tcu_0,
+ [SLV_EBI] = &slv_ebi,
+ [SLV_BIMC_SNOC] = &slv_bimc_snoc,
+};
+
+static const struct regmap_config msm8937_bimc_regmap_config = {
+ .reg_bits = 32,
+ .reg_stride = 4,
+ .val_bits = 32,
+ .max_register = 0x5A000,
+ .fast_io = true,
+};
+
+static const struct qcom_icc_desc msm8937_bimc = {
+ .type = QCOM_ICC_BIMC,
+ .nodes = msm8937_bimc_nodes,
+ .num_nodes = ARRAY_SIZE(msm8937_bimc_nodes),
+ .bus_clk_desc = &bimc_clk,
+ .regmap_cfg = &msm8937_bimc_regmap_config,
+ .qos_offset = 0x8000,
+ .ab_coeff = 154,
+};
+
+static struct qcom_icc_node *msm8937_pcnoc_nodes[] = {
+ [MAS_SPDM] = &mas_spdm,
+ [MAS_BLSP_1] = &mas_blsp_1,
+ [MAS_BLSP_2] = &mas_blsp_2,
+ [MAS_USB_HS1] = &mas_usb_hs1,
+ [MAS_XI_USB_HS1] = &mas_xi_usb_hs1,
+ [MAS_CRYPTO] = &mas_crypto,
+ [MAS_SDCC_1] = &mas_sdcc_1,
+ [MAS_SDCC_2] = &mas_sdcc_2,
+ [MAS_SNOC_PCNOC] = &mas_snoc_pcnoc,
+ [PCNOC_M_0] = &pcnoc_m_0,
+ [PCNOC_M_1] = &pcnoc_m_1,
+ [PCNOC_INT_0] = &pcnoc_int_0,
+ [PCNOC_INT_1] = &pcnoc_int_1,
+ [PCNOC_INT_2] = &pcnoc_int_2,
+ [PCNOC_INT_3] = &pcnoc_int_3,
+ [PCNOC_S_0] = &pcnoc_s_0,
+ [PCNOC_S_1] = &pcnoc_s_1,
+ [PCNOC_S_2] = &pcnoc_s_2,
+ [PCNOC_S_3] = &pcnoc_s_3,
+ [PCNOC_S_4] = &pcnoc_s_4,
+ [PCNOC_S_6] = &pcnoc_s_6,
+ [PCNOC_S_7] = &pcnoc_s_7,
+ [PCNOC_S_8] = &pcnoc_s_8,
+ [SLV_SDCC_2] = &slv_sdcc_2,
+ [SLV_SPDM] = &slv_spdm,
+ [SLV_PDM] = &slv_pdm,
+ [SLV_PRNG] = &slv_prng,
+ [SLV_TCSR] = &slv_tcsr,
+ [SLV_SNOC_CFG] = &slv_snoc_cfg,
+ [SLV_MESSAGE_RAM] = &slv_message_ram,
+ [SLV_CAMERA_SS_CFG] = &slv_camera_ss_cfg,
+ [SLV_DISP_SS_CFG] = &slv_disp_ss_cfg,
+ [SLV_VENUS_CFG] = &slv_venus_cfg,
+ [SLV_GPU_CFG] = &slv_gpu_cfg,
+ [SLV_TLMM] = &slv_tlmm,
+ [SLV_BLSP_1] = &slv_blsp_1,
+ [SLV_BLSP_2] = &slv_blsp_2,
+ [SLV_PMIC_ARB] = &slv_pmic_arb,
+ [SLV_SDCC_1] = &slv_sdcc_1,
+ [SLV_CRYPTO_0_CFG] = &slv_crypto_0_cfg,
+ [SLV_USB_HS] = &slv_usb_hs,
+ [SLV_TCU] = &slv_tcu,
+ [SLV_PCNOC_SNOC] = &slv_pcnoc_snoc,
+};
+
+static const struct regmap_config msm8937_pcnoc_regmap_config = {
+ .reg_bits = 32,
+ .reg_stride = 4,
+ .val_bits = 32,
+ .max_register = 0x13080,
+ .fast_io = true,
+};
+
+static const struct qcom_icc_desc msm8937_pcnoc = {
+ .type = QCOM_ICC_NOC,
+ .nodes = msm8937_pcnoc_nodes,
+ .num_nodes = ARRAY_SIZE(msm8937_pcnoc_nodes),
+ .bus_clk_desc = &bus_0_clk,
+ .qos_offset = 0x7000,
+ .keep_alive = true,
+ .regmap_cfg = &msm8937_pcnoc_regmap_config,
+};
+
+static struct qcom_icc_node *msm8937_snoc_nodes[] = {
+ [MAS_QDSS_BAM] = &mas_qdss_bam,
+ [MAS_BIMC_SNOC] = &mas_bimc_snoc,
+ [MAS_PCNOC_SNOC] = &mas_pcnoc_snoc,
+ [MAS_QDSS_ETR] = &mas_qdss_etr,
+ [QDSS_INT] = &qdss_int,
+ [SNOC_INT_0] = &snoc_int_0,
+ [SNOC_INT_1] = &snoc_int_1,
+ [SNOC_INT_2] = &snoc_int_2,
+ [SLV_KPSS_AHB] = &slv_kpss_ahb,
+ [SLV_WCSS] = &slv_wcss,
+ [SLV_SNOC_BIMC_1] = &slv_snoc_bimc_1,
+ [SLV_IMEM] = &slv_imem,
+ [SLV_SNOC_PCNOC] = &slv_snoc_pcnoc,
+ [SLV_QDSS_STM] = &slv_qdss_stm,
+ [SLV_CATS_1] = &slv_cats_1,
+ [SLV_LPASS] = &slv_lpass,
+};
+
+static const struct regmap_config msm8937_snoc_regmap_config = {
+ .reg_bits = 32,
+ .reg_stride = 4,
+ .val_bits = 32,
+ .max_register = 0x16080,
+ .fast_io = true,
+};
+
+static const struct qcom_icc_desc msm8937_snoc = {
+ .type = QCOM_ICC_NOC,
+ .nodes = msm8937_snoc_nodes,
+ .num_nodes = ARRAY_SIZE(msm8937_snoc_nodes),
+ .bus_clk_desc = &bus_1_clk,
+ .regmap_cfg = &msm8937_snoc_regmap_config,
+ .qos_offset = 0x7000,
+};
+
+static struct qcom_icc_node *msm8937_snoc_mm_nodes[] = {
+ [MAS_JPEG] = &mas_jpeg,
+ [MAS_MDP] = &mas_mdp,
+ [MAS_VENUS] = &mas_venus,
+ [MAS_VFE0] = &mas_vfe0,
+ [MAS_VFE1] = &mas_vfe1,
+ [MAS_CPP] = &mas_cpp,
+ [SLV_SNOC_BIMC_0] = &slv_snoc_bimc_0,
+ [SLV_SNOC_BIMC_2] = &slv_snoc_bimc_2,
+ [SLV_CATS_0] = &slv_cats_0,
+};
+
+static const struct qcom_icc_desc msm8937_snoc_mm = {
+ .type = QCOM_ICC_NOC,
+ .nodes = msm8937_snoc_mm_nodes,
+ .num_nodes = ARRAY_SIZE(msm8937_snoc_mm_nodes),
+ .bus_clk_desc = &bus_2_clk,
+ .regmap_cfg = &msm8937_snoc_regmap_config,
+ .qos_offset = 0x7000,
+ .ab_coeff = 154,
+};
+
+static const struct of_device_id msm8937_noc_of_match[] = {
+ { .compatible = "qcom,msm8937-bimc", .data = &msm8937_bimc },
+ { .compatible = "qcom,msm8937-pcnoc", .data = &msm8937_pcnoc },
+ { .compatible = "qcom,msm8937-snoc", .data = &msm8937_snoc },
+ { .compatible = "qcom,msm8937-snoc-mm", .data = &msm8937_snoc_mm },
+ { }
+};
+MODULE_DEVICE_TABLE(of, msm8937_noc_of_match);
+
+static struct platform_driver msm8937_noc_driver = {
+ .probe = qnoc_probe,
+ .remove_new = qnoc_remove,
+ .driver = {
+ .name = "qnoc-msm8937",
+ .of_match_table = msm8937_noc_of_match,
+ .sync_state = icc_sync_state,
+ },
+};
+module_platform_driver(msm8937_noc_driver);
+
+MODULE_DESCRIPTION("Qualcomm MSM8937 NoC driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/interconnect/qcom/msm8953.c b/drivers/interconnect/qcom/msm8953.c
index 9e8867c07692..62f8c0774b3e 100644
--- a/drivers/interconnect/qcom/msm8953.c
+++ b/drivers/interconnect/qcom/msm8953.c
@@ -1169,6 +1169,7 @@ static const struct qcom_icc_desc msm8953_bimc = {
.nodes = msm8953_bimc_nodes,
.num_nodes = ARRAY_SIZE(msm8953_bimc_nodes),
.qos_offset = 0x8000,
+ .ab_coeff = 153,
.regmap_cfg = &msm8953_bimc_regmap_config
};
@@ -1295,6 +1296,7 @@ static const struct qcom_icc_desc msm8953_snoc_mm = {
.nodes = msm8953_snoc_mm_nodes,
.num_nodes = ARRAY_SIZE(msm8953_snoc_mm_nodes),
.qos_offset = 0x7000,
+ .ab_coeff = 153,
.regmap_cfg = &msm8953_snoc_regmap_config,
};
diff --git a/drivers/interconnect/qcom/msm8976.c b/drivers/interconnect/qcom/msm8976.c
new file mode 100644
index 000000000000..ab963def77c3
--- /dev/null
+++ b/drivers/interconnect/qcom/msm8976.c
@@ -0,0 +1,1440 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Based on data from msm8976-bus.dtsi in Qualcomm's msm-3.10 release:
+ * Copyright (c) 2014-2016, The Linux Foundation. All rights reserved.
+ */
+
+#include <linux/device.h>
+#include <linux/interconnect-provider.h>
+#include <linux/mod_devicetable.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/regmap.h>
+
+#include <dt-bindings/interconnect/qcom,msm8976.h>
+
+#include "icc-rpm.h"
+
+enum {
+ QNOC_MASTER_AMPSS_M0 = 1,
+ QNOC_MNOC_BIMC_MAS,
+ QNOC_SNOC_BIMC_MAS,
+ QNOC_MASTER_TCU_0,
+ QNOC_MASTER_USB_HS2,
+ QNOC_MASTER_BLSP_1,
+ QNOC_MASTER_USB_HS,
+ QNOC_MASTER_BLSP_2,
+ QNOC_MASTER_CRYPTO_CORE0,
+ QNOC_MASTER_SDCC_1,
+ QNOC_MASTER_SDCC_2,
+ QNOC_MASTER_SDCC_3,
+ QNOC_SNOC_PNOC_MAS,
+ QNOC_MASTER_LPASS_AHB,
+ QNOC_MASTER_SPDM,
+ QNOC_MASTER_DEHR,
+ QNOC_MASTER_XM_USB_HS1,
+ QNOC_MASTER_QDSS_BAM,
+ QNOC_BIMC_SNOC_MAS,
+ QNOC_MASTER_JPEG,
+ QNOC_MASTER_GRAPHICS_3D,
+ QNOC_MASTER_MDP_PORT0,
+ QNOC_MASTER_MDP_PORT1,
+ QNOC_PNOC_SNOC_MAS,
+ QNOC_MASTER_VIDEO_P0,
+ QNOC_MASTER_VIDEO_P1,
+ QNOC_MASTER_VFE0,
+ QNOC_MASTER_VFE1,
+ QNOC_MASTER_CPP,
+ QNOC_MASTER_QDSS_ETR,
+ QNOC_MASTER_LPASS_PROC,
+ QNOC_MASTER_IPA,
+ QNOC_PNOC_M_0,
+ QNOC_PNOC_M_1,
+ QNOC_PNOC_INT_0,
+ QNOC_PNOC_INT_1,
+ QNOC_PNOC_INT_2,
+ QNOC_PNOC_SLV_1,
+ QNOC_PNOC_SLV_2,
+ QNOC_PNOC_SLV_3,
+ QNOC_PNOC_SLV_4,
+ QNOC_PNOC_SLV_8,
+ QNOC_PNOC_SLV_9,
+ QNOC_SNOC_MM_INT_0,
+ QNOC_SNOC_QDSS_INT,
+ QNOC_SNOC_INT_0,
+ QNOC_SNOC_INT_1,
+ QNOC_SNOC_INT_2,
+ QNOC_SLAVE_EBI_CH0,
+ QNOC_BIMC_SNOC_SLV,
+ QNOC_SLAVE_TCSR,
+ QNOC_SLAVE_TLMM,
+ QNOC_SLAVE_CRYPTO_0_CFG,
+ QNOC_SLAVE_MESSAGE_RAM,
+ QNOC_SLAVE_PDM,
+ QNOC_SLAVE_PRNG,
+ QNOC_SLAVE_PMIC_ARB,
+ QNOC_SLAVE_SNOC_CFG,
+ QNOC_SLAVE_DCC_CFG,
+ QNOC_SLAVE_CAMERA_CFG,
+ QNOC_SLAVE_DISPLAY_CFG,
+ QNOC_SLAVE_VENUS_CFG,
+ QNOC_SLAVE_SDCC_1,
+ QNOC_SLAVE_BLSP_1,
+ QNOC_SLAVE_USB_HS,
+ QNOC_SLAVE_SDCC_3,
+ QNOC_SLAVE_SDCC_2,
+ QNOC_SLAVE_GRAPHICS_3D_CFG,
+ QNOC_SLAVE_USB_HS2,
+ QNOC_SLAVE_BLSP_2,
+ QNOC_PNOC_SNOC_SLV,
+ QNOC_SLAVE_APPSS,
+ QNOC_MNOC_BIMC_SLV,
+ QNOC_SNOC_BIMC_SLV,
+ QNOC_SLAVE_SYSTEM_IMEM,
+ QNOC_SNOC_PNOC_SLV,
+ QNOC_SLAVE_QDSS_STM,
+ QNOC_SLAVE_CATS_128,
+ QNOC_SLAVE_OCMEM_64,
+ QNOC_SLAVE_LPASS,
+};
+
+static const u16 mas_apps_proc_links[] = {
+ QNOC_SLAVE_EBI_CH0,
+ QNOC_BIMC_SNOC_SLV
+};
+
+static struct qcom_icc_node mas_apps_proc = {
+ .name = "mas_apps_proc",
+ .id = QNOC_MASTER_AMPSS_M0,
+ .buswidth = 16,
+ .mas_rpm_id = 0,
+ .slv_rpm_id = -1,
+ .qos.ap_owned = true,
+ .qos.qos_mode = NOC_QOS_MODE_FIXED,
+ .qos.areq_prio = 0,
+ .qos.prio_level = 0,
+ .qos.qos_port = 0,
+ .num_links = ARRAY_SIZE(mas_apps_proc_links),
+ .links = mas_apps_proc_links,
+};
+
+static const u16 mas_smmnoc_bimc_links[] = {
+ QNOC_SLAVE_EBI_CH0
+};
+
+static struct qcom_icc_node mas_smmnoc_bimc = {
+ .name = "mas_smmnoc_bimc",
+ .id = QNOC_MNOC_BIMC_MAS,
+ .channels = 2,
+ .buswidth = 16,
+ .mas_rpm_id = 135,
+ .slv_rpm_id = -1,
+ .qos.ap_owned = true,
+ .qos.qos_mode = NOC_QOS_MODE_BYPASS,
+ .qos.areq_prio = 0,
+ .qos.prio_level = 0,
+ .qos.qos_port = 2,
+ .num_links = ARRAY_SIZE(mas_smmnoc_bimc_links),
+ .links = mas_smmnoc_bimc_links,
+};
+
+static const u16 mas_snoc_bimc_links[] = {
+ QNOC_SLAVE_EBI_CH0
+};
+
+static struct qcom_icc_node mas_snoc_bimc = {
+ .name = "mas_snoc_bimc",
+ .id = QNOC_SNOC_BIMC_MAS,
+ .channels = 2,
+ .buswidth = 16,
+ .mas_rpm_id = 3,
+ .slv_rpm_id = -1,
+ .qos.ap_owned = true,
+ .qos.qos_mode = NOC_QOS_MODE_BYPASS,
+ .qos.areq_prio = 0,
+ .qos.prio_level = 0,
+ .qos.qos_port = 3,
+ .num_links = ARRAY_SIZE(mas_snoc_bimc_links),
+ .links = mas_snoc_bimc_links,
+};
+
+static const u16 mas_tcu_0_links[] = {
+ QNOC_SLAVE_EBI_CH0,
+ QNOC_BIMC_SNOC_SLV
+};
+
+static struct qcom_icc_node mas_tcu_0 = {
+ .name = "mas_tcu_0",
+ .id = QNOC_MASTER_TCU_0,
+ .buswidth = 16,
+ .mas_rpm_id = 102,
+ .slv_rpm_id = -1,
+ .qos.ap_owned = true,
+ .qos.qos_mode = NOC_QOS_MODE_FIXED,
+ .qos.areq_prio = 0,
+ .qos.prio_level = 2,
+ .qos.qos_port = 4,
+ .num_links = ARRAY_SIZE(mas_tcu_0_links),
+ .links = mas_tcu_0_links,
+};
+
+static const u16 mas_usb_hs2_links[] = {
+ QNOC_PNOC_M_0
+};
+
+static struct qcom_icc_node mas_usb_hs2 = {
+ .name = "mas_usb_hs2",
+ .id = QNOC_MASTER_USB_HS2,
+ .buswidth = 4,
+ .mas_rpm_id = 57,
+ .slv_rpm_id = -1,
+ .num_links = ARRAY_SIZE(mas_usb_hs2_links),
+ .links = mas_usb_hs2_links,
+};
+
+static const u16 mas_blsp_1_links[] = {
+ QNOC_PNOC_M_1
+};
+
+static struct qcom_icc_node mas_blsp_1 = {
+ .name = "mas_blsp_1",
+ .id = QNOC_MASTER_BLSP_1,
+ .buswidth = 4,
+ .mas_rpm_id = 41,
+ .slv_rpm_id = -1,
+ .num_links = ARRAY_SIZE(mas_blsp_1_links),
+ .links = mas_blsp_1_links,
+};
+
+static const u16 mas_usb_hs1_links[] = {
+ QNOC_PNOC_M_1
+};
+
+static struct qcom_icc_node mas_usb_hs1 = {
+ .name = "mas_usb_hs1",
+ .id = QNOC_MASTER_USB_HS,
+ .buswidth = 4,
+ .mas_rpm_id = 42,
+ .slv_rpm_id = -1,
+ .num_links = ARRAY_SIZE(mas_usb_hs1_links),
+ .links = mas_usb_hs1_links,
+};
+
+static const u16 mas_blsp_2_links[] = {
+ QNOC_PNOC_M_1
+};
+
+static struct qcom_icc_node mas_blsp_2 = {
+ .name = "mas_blsp_2",
+ .id = QNOC_MASTER_BLSP_2,
+ .buswidth = 4,
+ .mas_rpm_id = 39,
+ .slv_rpm_id = -1,
+ .num_links = ARRAY_SIZE(mas_blsp_2_links),
+ .links = mas_blsp_2_links,
+};
+
+static const u16 mas_crypto_links[] = {
+ QNOC_PNOC_INT_1
+};
+
+static struct qcom_icc_node mas_crypto = {
+ .name = "mas_crypto",
+ .id = QNOC_MASTER_CRYPTO_CORE0,
+ .buswidth = 8,
+ .mas_rpm_id = 23,
+ .slv_rpm_id = -1,
+ .qos.ap_owned = true,
+ .qos.qos_mode = NOC_QOS_MODE_FIXED,
+ .qos.areq_prio = 0,
+ .qos.prio_level = 0,
+ .qos.qos_port = 0,
+ .num_links = ARRAY_SIZE(mas_crypto_links),
+ .links = mas_crypto_links,
+};
+
+static const u16 mas_sdcc_1_links[] = {
+ QNOC_PNOC_INT_1
+};
+
+static struct qcom_icc_node mas_sdcc_1 = {
+ .name = "mas_sdcc_1",
+ .id = QNOC_MASTER_SDCC_1,
+ .buswidth = 8,
+ .mas_rpm_id = 33,
+ .slv_rpm_id = -1,
+ .qos.qos_mode = NOC_QOS_MODE_FIXED,
+ .qos.areq_prio = 0,
+ .qos.prio_level = 0,
+ .qos.qos_port = 7,
+ .num_links = ARRAY_SIZE(mas_sdcc_1_links),
+ .links = mas_sdcc_1_links,
+};
+
+static const u16 mas_sdcc_2_links[] = {
+ QNOC_PNOC_INT_1
+};
+
+static struct qcom_icc_node mas_sdcc_2 = {
+ .name = "mas_sdcc_2",
+ .id = QNOC_MASTER_SDCC_2,
+ .buswidth = 8,
+ .mas_rpm_id = 35,
+ .slv_rpm_id = -1,
+ .qos.qos_mode = NOC_QOS_MODE_FIXED,
+ .qos.areq_prio = 0,
+ .qos.prio_level = 0,
+ .qos.qos_port = 8,
+ .num_links = ARRAY_SIZE(mas_sdcc_2_links),
+ .links = mas_sdcc_2_links,
+};
+
+static const u16 mas_sdcc_3_links[] = {
+ QNOC_PNOC_INT_1
+};
+
+static struct qcom_icc_node mas_sdcc_3 = {
+ .name = "mas_sdcc_3",
+ .id = QNOC_MASTER_SDCC_3,
+ .buswidth = 8,
+ .mas_rpm_id = 34,
+ .slv_rpm_id = -1,
+ .qos.qos_mode = NOC_QOS_MODE_FIXED,
+ .qos.areq_prio = 0,
+ .qos.prio_level = 0,
+ .qos.qos_port = 10,
+ .num_links = ARRAY_SIZE(mas_sdcc_3_links),
+ .links = mas_sdcc_3_links,
+};
+
+static const u16 mas_snoc_pcnoc_links[] = {
+ QNOC_PNOC_INT_2
+};
+
+static struct qcom_icc_node mas_snoc_pcnoc = {
+ .name = "mas_snoc_pcnoc",
+ .id = QNOC_SNOC_PNOC_MAS,
+ .buswidth = 8,
+ .mas_rpm_id = 77,
+ .slv_rpm_id = -1,
+ .qos.qos_mode = NOC_QOS_MODE_FIXED,
+ .qos.areq_prio = 0,
+ .qos.prio_level = 0,
+ .qos.qos_port = 9,
+ .num_links = ARRAY_SIZE(mas_snoc_pcnoc_links),
+ .links = mas_snoc_pcnoc_links,
+};
+
+static const u16 mas_lpass_ahb_links[] = {
+ QNOC_PNOC_SNOC_SLV
+};
+
+static struct qcom_icc_node mas_lpass_ahb = {
+ .name = "mas_lpass_ahb",
+ .id = QNOC_MASTER_LPASS_AHB,
+ .buswidth = 8,
+ .mas_rpm_id = 18,
+ .slv_rpm_id = -1,
+ .qos.qos_mode = NOC_QOS_MODE_BYPASS,
+ .qos.areq_prio = 0,
+ .qos.prio_level = 0,
+ .qos.qos_port = 12,
+ .num_links = ARRAY_SIZE(mas_lpass_ahb_links),
+ .links = mas_lpass_ahb_links,
+};
+
+static const u16 mas_spdm_links[] = {
+ QNOC_PNOC_M_0
+};
+
+static struct qcom_icc_node mas_spdm = {
+ .name = "mas_spdm",
+ .id = QNOC_MASTER_SPDM,
+ .buswidth = 4,
+ .mas_rpm_id = -1,
+ .slv_rpm_id = -1,
+ .num_links = ARRAY_SIZE(mas_spdm_links),
+ .links = mas_spdm_links,
+};
+
+static const u16 mas_dehr_links[] = {
+ QNOC_PNOC_M_0
+};
+
+static struct qcom_icc_node mas_dehr = {
+ .name = "mas_dehr",
+ .id = QNOC_MASTER_DEHR,
+ .buswidth = 4,
+ .mas_rpm_id = -1,
+ .slv_rpm_id = -1,
+ .num_links = ARRAY_SIZE(mas_dehr_links),
+ .links = mas_dehr_links,
+};
+
+static const u16 mas_xm_usb_hs1_links[] = {
+ QNOC_PNOC_INT_0
+};
+
+static struct qcom_icc_node mas_xm_usb_hs1 = {
+ .name = "mas_xm_usb_hs1",
+ .id = QNOC_MASTER_XM_USB_HS1,
+ .buswidth = 8,
+ .mas_rpm_id = -1,
+ .slv_rpm_id = -1,
+ .num_links = ARRAY_SIZE(mas_xm_usb_hs1_links),
+ .links = mas_xm_usb_hs1_links,
+};
+
+static const u16 mas_qdss_bam_links[] = {
+ QNOC_SNOC_QDSS_INT
+};
+
+static struct qcom_icc_node mas_qdss_bam = {
+ .name = "mas_qdss_bam",
+ .id = QNOC_MASTER_QDSS_BAM,
+ .buswidth = 4,
+ .mas_rpm_id = 19,
+ .slv_rpm_id = -1,
+ .qos.ap_owned = true,
+ .qos.qos_mode = NOC_QOS_MODE_FIXED,
+ .qos.areq_prio = 1,
+ .qos.prio_level = 1,
+ .qos.qos_port = 11,
+ .num_links = ARRAY_SIZE(mas_qdss_bam_links),
+ .links = mas_qdss_bam_links,
+};
+
+static const u16 mas_bimc_snoc_links[] = {
+ QNOC_SNOC_INT_2
+};
+
+static struct qcom_icc_node mas_bimc_snoc = {
+ .name = "mas_bimc_snoc",
+ .id = QNOC_BIMC_SNOC_MAS,
+ .buswidth = 8,
+ .mas_rpm_id = 21,
+ .slv_rpm_id = -1,
+ .num_links = ARRAY_SIZE(mas_bimc_snoc_links),
+ .links = mas_bimc_snoc_links,
+};
+
+static const u16 mas_jpeg_links[] = {
+ QNOC_SNOC_MM_INT_0,
+ QNOC_MNOC_BIMC_SLV
+};
+
+static struct qcom_icc_node mas_jpeg = {
+ .name = "mas_jpeg",
+ .id = QNOC_MASTER_JPEG,
+ .buswidth = 16,
+ .mas_rpm_id = 7,
+ .slv_rpm_id = -1,
+ .qos.ap_owned = true,
+ .qos.qos_mode = NOC_QOS_MODE_BYPASS,
+ .qos.areq_prio = 0,
+ .qos.prio_level = 0,
+ .qos.qos_port = 6,
+ .num_links = ARRAY_SIZE(mas_jpeg_links),
+ .links = mas_jpeg_links,
+};
+
+static const u16 mas_oxili_links[] = {
+ QNOC_MNOC_BIMC_SLV,
+ QNOC_SNOC_MM_INT_0
+};
+
+static struct qcom_icc_node mas_oxili = {
+ .name = "mas_oxili",
+ .id = QNOC_MASTER_GRAPHICS_3D,
+ .channels = 2,
+ .buswidth = 16,
+ .ib_coeff = 200,
+ .mas_rpm_id = 6,
+ .slv_rpm_id = -1,
+ .qos.ap_owned = true,
+ .qos.qos_mode = NOC_QOS_MODE_BYPASS,
+ .qos.areq_prio = 0,
+ .qos.prio_level = 0,
+ .qos.qos_port = 16, /* [16, 17] */
+ .num_links = ARRAY_SIZE(mas_oxili_links),
+ .links = mas_oxili_links,
+};
+
+static const u16 mas_mdp0_links[] = {
+ QNOC_SNOC_MM_INT_0,
+ QNOC_MNOC_BIMC_SLV
+};
+
+static struct qcom_icc_node mas_mdp0 = {
+ .name = "mas_mdp0",
+ .id = QNOC_MASTER_MDP_PORT0,
+ .buswidth = 16,
+ .ib_coeff = 50,
+ .mas_rpm_id = 8,
+ .slv_rpm_id = -1,
+ .qos.ap_owned = true,
+ .qos.qos_mode = NOC_QOS_MODE_BYPASS,
+ .qos.areq_prio = 0,
+ .qos.prio_level = 0,
+ .qos.qos_port = 7,
+ .num_links = ARRAY_SIZE(mas_mdp0_links),
+ .links = mas_mdp0_links,
+};
+
+static const u16 mas_mdp1_links[] = {
+ QNOC_SNOC_MM_INT_0,
+ QNOC_MNOC_BIMC_SLV
+};
+
+static struct qcom_icc_node mas_mdp1 = {
+ .name = "mas_mdp1",
+ .id = QNOC_MASTER_MDP_PORT1,
+ .buswidth = 16,
+ .ib_coeff = 50,
+ .mas_rpm_id = 61,
+ .slv_rpm_id = -1,
+ .qos.ap_owned = true,
+ .qos.qos_mode = NOC_QOS_MODE_BYPASS,
+ .qos.areq_prio = 0,
+ .qos.prio_level = 0,
+ .qos.qos_port = 13,
+ .num_links = ARRAY_SIZE(mas_mdp1_links),
+ .links = mas_mdp1_links,
+};
+
+static const u16 mas_pcnoc_snoc_links[] = {
+ QNOC_SNOC_INT_2
+};
+
+static struct qcom_icc_node mas_pcnoc_snoc = {
+ .name = "mas_pcnoc_snoc",
+ .id = QNOC_PNOC_SNOC_MAS,
+ .buswidth = 8,
+ .mas_rpm_id = 29,
+ .slv_rpm_id = -1,
+ .qos.qos_mode = NOC_QOS_MODE_FIXED,
+ .qos.areq_prio = 0,
+ .qos.prio_level = 0,
+ .qos.qos_port = 5,
+ .num_links = ARRAY_SIZE(mas_pcnoc_snoc_links),
+ .links = mas_pcnoc_snoc_links,
+};
+
+static const u16 mas_venus_0_links[] = {
+ QNOC_SNOC_MM_INT_0,
+ QNOC_MNOC_BIMC_SLV
+};
+
+static struct qcom_icc_node mas_venus_0 = {
+ .name = "mas_venus_0",
+ .id = QNOC_MASTER_VIDEO_P0,
+ .buswidth = 16,
+ .mas_rpm_id = 9,
+ .slv_rpm_id = -1,
+ .qos.ap_owned = true,
+ .qos.qos_mode = NOC_QOS_MODE_BYPASS,
+ .qos.areq_prio = 0,
+ .qos.prio_level = 0,
+ .qos.qos_port = 8,
+ .num_links = ARRAY_SIZE(mas_venus_0_links),
+ .links = mas_venus_0_links,
+};
+
+static const u16 mas_venus_1_links[] = {
+ QNOC_SNOC_MM_INT_0,
+ QNOC_MNOC_BIMC_SLV
+};
+
+static struct qcom_icc_node mas_venus_1 = {
+ .name = "mas_venus_1",
+ .id = QNOC_MASTER_VIDEO_P1,
+ .buswidth = 16,
+ .mas_rpm_id = 10,
+ .slv_rpm_id = -1,
+ .qos.ap_owned = true,
+ .qos.qos_mode = NOC_QOS_MODE_BYPASS,
+ .qos.areq_prio = 0,
+ .qos.prio_level = 0,
+ .qos.qos_port = 14,
+ .num_links = ARRAY_SIZE(mas_venus_1_links),
+ .links = mas_venus_1_links,
+};
+
+static const u16 mas_vfe_0_links[] = {
+ QNOC_SNOC_MM_INT_0,
+ QNOC_MNOC_BIMC_SLV
+};
+
+static struct qcom_icc_node mas_vfe_0 = {
+ .name = "mas_vfe_0",
+ .id = QNOC_MASTER_VFE0,
+ .buswidth = 16,
+ .mas_rpm_id = 11,
+ .slv_rpm_id = -1,
+ .qos.ap_owned = true,
+ .qos.qos_mode = NOC_QOS_MODE_BYPASS,
+ .qos.areq_prio = 0,
+ .qos.prio_level = 0,
+ .qos.qos_port = 9,
+ .num_links = ARRAY_SIZE(mas_vfe_0_links),
+ .links = mas_vfe_0_links,
+};
+
+static const u16 mas_vfe_1_links[] = {
+ QNOC_SNOC_MM_INT_0,
+ QNOC_MNOC_BIMC_SLV
+};
+
+static struct qcom_icc_node mas_vfe_1 = {
+ .name = "mas_vfe_1",
+ .id = QNOC_MASTER_VFE1,
+ .buswidth = 16,
+ .mas_rpm_id = 133,
+ .slv_rpm_id = -1,
+ .qos.ap_owned = true,
+ .qos.qos_mode = NOC_QOS_MODE_BYPASS,
+ .qos.areq_prio = 0,
+ .qos.prio_level = 0,
+ .qos.qos_port = 15,
+ .num_links = ARRAY_SIZE(mas_vfe_1_links),
+ .links = mas_vfe_1_links,
+};
+
+static const u16 mas_cpp_links[] = {
+ QNOC_SNOC_MM_INT_0,
+ QNOC_MNOC_BIMC_SLV
+};
+
+static struct qcom_icc_node mas_cpp = {
+ .name = "mas_cpp",
+ .id = QNOC_MASTER_CPP,
+ .buswidth = 16,
+ .mas_rpm_id = 115,
+ .slv_rpm_id = -1,
+ .qos.ap_owned = true,
+ .qos.qos_mode = NOC_QOS_MODE_BYPASS,
+ .qos.areq_prio = 0,
+ .qos.prio_level = 0,
+ .qos.qos_port = 12,
+ .num_links = ARRAY_SIZE(mas_cpp_links),
+ .links = mas_cpp_links,
+};
+
+static const u16 mas_qdss_etr_links[] = {
+ QNOC_SNOC_QDSS_INT
+};
+
+static struct qcom_icc_node mas_qdss_etr = {
+ .name = "mas_qdss_etr",
+ .id = QNOC_MASTER_QDSS_ETR,
+ .buswidth = 8,
+ .mas_rpm_id = 31,
+ .slv_rpm_id = -1,
+ .qos.ap_owned = true,
+ .qos.qos_mode = NOC_QOS_MODE_FIXED,
+ .qos.areq_prio = 1,
+ .qos.prio_level = 1,
+ .qos.qos_port = 10,
+ .num_links = ARRAY_SIZE(mas_qdss_etr_links),
+ .links = mas_qdss_etr_links,
+};
+
+static const u16 mas_lpass_proc_links[] = {
+ QNOC_SNOC_INT_0,
+ QNOC_SNOC_INT_1,
+ QNOC_SNOC_BIMC_SLV
+};
+
+static struct qcom_icc_node mas_lpass_proc = {
+ .name = "mas_lpass_proc",
+ .id = QNOC_MASTER_LPASS_PROC,
+ .buswidth = 8,
+ .mas_rpm_id = -1,
+ .slv_rpm_id = -1,
+ .qos.qos_mode = NOC_QOS_MODE_BYPASS,
+ .qos.areq_prio = 0,
+ .qos.prio_level = 0,
+ .qos.qos_port = 19,
+ .num_links = ARRAY_SIZE(mas_lpass_proc_links),
+ .links = mas_lpass_proc_links,
+};
+
+static const u16 mas_ipa_links[] = {
+ QNOC_SNOC_INT_2
+};
+
+static struct qcom_icc_node mas_ipa = {
+ .name = "mas_ipa",
+ .id = QNOC_MASTER_IPA,
+ .buswidth = 8,
+ .mas_rpm_id = 59,
+ .slv_rpm_id = -1,
+ .qos.ap_owned = true,
+ .qos.qos_mode = NOC_QOS_MODE_FIXED,
+ .qos.areq_prio = 1,
+ .qos.prio_level = 1,
+ .qos.qos_port = 18,
+ .num_links = ARRAY_SIZE(mas_ipa_links),
+ .links = mas_ipa_links,
+};
+
+static const u16 pcnoc_m_0_links[] = {
+ QNOC_PNOC_SNOC_SLV
+};
+
+static struct qcom_icc_node pcnoc_m_0 = {
+ .name = "pcnoc_m_0",
+ .id = QNOC_PNOC_M_0,
+ .buswidth = 4,
+ .mas_rpm_id = 87,
+ .slv_rpm_id = 116,
+ .qos.qos_mode = NOC_QOS_MODE_FIXED,
+ .qos.areq_prio = 0,
+ .qos.prio_level = 0,
+ .qos.qos_port = 5,
+ .num_links = ARRAY_SIZE(pcnoc_m_0_links),
+ .links = pcnoc_m_0_links,
+};
+
+static const u16 pcnoc_m_1_links[] = {
+ QNOC_PNOC_SNOC_SLV
+};
+
+static struct qcom_icc_node pcnoc_m_1 = {
+ .name = "pcnoc_m_1",
+ .id = QNOC_PNOC_M_1,
+ .buswidth = 4,
+ .mas_rpm_id = 88,
+ .slv_rpm_id = 117,
+ .qos.qos_mode = NOC_QOS_MODE_FIXED,
+ .qos.areq_prio = 0,
+ .qos.prio_level = 0,
+ .qos.qos_port = 6,
+ .num_links = ARRAY_SIZE(pcnoc_m_1_links),
+ .links = pcnoc_m_1_links,
+};
+
+static const u16 pcnoc_int_0_links[] = {
+ QNOC_PNOC_SNOC_SLV,
+ QNOC_PNOC_INT_2
+};
+
+static struct qcom_icc_node pcnoc_int_0 = {
+ .name = "pcnoc_int_0",
+ .id = QNOC_PNOC_INT_0,
+ .buswidth = 4,
+ .mas_rpm_id = -1,
+ .slv_rpm_id = -1,
+ .num_links = ARRAY_SIZE(pcnoc_int_0_links),
+ .links = pcnoc_int_0_links,
+};
+
+static const u16 pcnoc_int_1_links[] = {
+ QNOC_PNOC_SNOC_SLV,
+ QNOC_PNOC_INT_2
+};
+
+static struct qcom_icc_node pcnoc_int_1 = {
+ .name = "pcnoc_int_1",
+ .id = QNOC_PNOC_INT_1,
+ .buswidth = 8,
+ .mas_rpm_id = 86,
+ .slv_rpm_id = 115,
+ .num_links = ARRAY_SIZE(pcnoc_int_1_links),
+ .links = pcnoc_int_1_links,
+};
+
+static const u16 pcnoc_int_2_links[] = {
+ QNOC_PNOC_SLV_1,
+ QNOC_PNOC_SLV_2,
+ QNOC_PNOC_SLV_4,
+ QNOC_PNOC_SLV_8,
+ QNOC_PNOC_SLV_9,
+ QNOC_PNOC_SLV_3
+};
+
+static struct qcom_icc_node pcnoc_int_2 = {
+ .name = "pcnoc_int_2",
+ .id = QNOC_PNOC_INT_2,
+ .buswidth = 8,
+ .mas_rpm_id = 124,
+ .slv_rpm_id = 184,
+ .num_links = ARRAY_SIZE(pcnoc_int_2_links),
+ .links = pcnoc_int_2_links,
+};
+
+static const u16 pcnoc_s_1_links[] = {
+ QNOC_SLAVE_CRYPTO_0_CFG,
+ QNOC_SLAVE_PRNG,
+ QNOC_SLAVE_PDM,
+ QNOC_SLAVE_MESSAGE_RAM
+};
+
+static struct qcom_icc_node pcnoc_s_1 = {
+ .name = "pcnoc_s_1",
+ .id = QNOC_PNOC_SLV_1,
+ .buswidth = 4,
+ .mas_rpm_id = 90,
+ .slv_rpm_id = 119,
+ .num_links = ARRAY_SIZE(pcnoc_s_1_links),
+ .links = pcnoc_s_1_links,
+};
+
+static const u16 pcnoc_s_2_links[] = {
+ QNOC_SLAVE_PMIC_ARB
+};
+
+static struct qcom_icc_node pcnoc_s_2 = {
+ .name = "pcnoc_s_2",
+ .id = QNOC_PNOC_SLV_2,
+ .buswidth = 4,
+ .mas_rpm_id = 91,
+ .slv_rpm_id = 120,
+ .num_links = ARRAY_SIZE(pcnoc_s_2_links),
+ .links = pcnoc_s_2_links,
+};
+
+static const u16 pcnoc_s_3_links[] = {
+ QNOC_SLAVE_SNOC_CFG,
+ QNOC_SLAVE_DCC_CFG
+};
+
+static struct qcom_icc_node pcnoc_s_3 = {
+ .name = "pcnoc_s_3",
+ .id = QNOC_PNOC_SLV_3,
+ .buswidth = 4,
+ .mas_rpm_id = 92,
+ .slv_rpm_id = 121,
+ .num_links = ARRAY_SIZE(pcnoc_s_3_links),
+ .links = pcnoc_s_3_links,
+};
+
+static const u16 pcnoc_s_4_links[] = {
+ QNOC_SLAVE_CAMERA_CFG,
+ QNOC_SLAVE_DISPLAY_CFG,
+ QNOC_SLAVE_VENUS_CFG
+};
+
+static struct qcom_icc_node pcnoc_s_4 = {
+ .name = "pcnoc_s_4",
+ .id = QNOC_PNOC_SLV_4,
+ .buswidth = 4,
+ .mas_rpm_id = 93,
+ .slv_rpm_id = 122,
+ .qos.ap_owned = true,
+ .qos.qos_mode = NOC_QOS_MODE_INVALID,
+ .num_links = ARRAY_SIZE(pcnoc_s_4_links),
+ .links = pcnoc_s_4_links,
+};
+
+static const u16 pcnoc_s_8_links[] = {
+ QNOC_SLAVE_USB_HS,
+ QNOC_SLAVE_SDCC_3,
+ QNOC_SLAVE_BLSP_1,
+ QNOC_SLAVE_SDCC_1
+};
+
+static struct qcom_icc_node pcnoc_s_8 = {
+ .name = "pcnoc_s_8",
+ .id = QNOC_PNOC_SLV_8,
+ .buswidth = 4,
+ .mas_rpm_id = 96,
+ .slv_rpm_id = 125,
+ .num_links = ARRAY_SIZE(pcnoc_s_8_links),
+ .links = pcnoc_s_8_links,
+};
+
+static const u16 pcnoc_s_9_links[] = {
+ QNOC_SLAVE_GRAPHICS_3D_CFG,
+ QNOC_SLAVE_USB_HS2,
+ QNOC_SLAVE_SDCC_2,
+ QNOC_SLAVE_BLSP_2
+};
+
+static struct qcom_icc_node pcnoc_s_9 = {
+ .name = "pcnoc_s_9",
+ .id = QNOC_PNOC_SLV_9,
+ .buswidth = 4,
+ .mas_rpm_id = 97,
+ .slv_rpm_id = 126,
+ .num_links = ARRAY_SIZE(pcnoc_s_9_links),
+ .links = pcnoc_s_9_links,
+};
+
+static const u16 mm_int_0_links[] = {
+ QNOC_SNOC_INT_0
+};
+
+static struct qcom_icc_node mm_int_0 = {
+ .name = "mm_int_0",
+ .id = QNOC_SNOC_MM_INT_0,
+ .buswidth = 16,
+ .ib_coeff = 200,
+ .mas_rpm_id = 79,
+ .slv_rpm_id = 108,
+ .qos.ap_owned = true,
+ .qos.qos_mode = NOC_QOS_MODE_INVALID,
+ .num_links = ARRAY_SIZE(mm_int_0_links),
+ .links = mm_int_0_links,
+};
+
+static const u16 qdss_int_links[] = {
+ QNOC_SNOC_INT_2
+};
+
+static struct qcom_icc_node qdss_int = {
+ .name = "qdss_int",
+ .id = QNOC_SNOC_QDSS_INT,
+ .buswidth = 8,
+ .mas_rpm_id = 98,
+ .slv_rpm_id = 128,
+ .qos.ap_owned = true,
+ .qos.qos_mode = NOC_QOS_MODE_INVALID,
+ .num_links = ARRAY_SIZE(qdss_int_links),
+ .links = qdss_int_links,
+};
+
+static const u16 snoc_int_0_links[] = {
+ QNOC_SLAVE_QDSS_STM,
+ QNOC_SLAVE_SYSTEM_IMEM,
+ QNOC_SNOC_PNOC_SLV
+};
+
+static struct qcom_icc_node snoc_int_0 = {
+ .name = "snoc_int_0",
+ .id = QNOC_SNOC_INT_0,
+ .buswidth = 8,
+ .mas_rpm_id = 99,
+ .slv_rpm_id = 130,
+ .num_links = ARRAY_SIZE(snoc_int_0_links),
+ .links = snoc_int_0_links,
+};
+
+static const u16 snoc_int_1_links[] = {
+ QNOC_SLAVE_LPASS,
+ QNOC_SLAVE_CATS_128,
+ QNOC_SLAVE_OCMEM_64,
+ QNOC_SLAVE_APPSS
+};
+
+static struct qcom_icc_node snoc_int_1 = {
+ .name = "snoc_int_1",
+ .id = QNOC_SNOC_INT_1,
+ .buswidth = 8,
+ .mas_rpm_id = 100,
+ .slv_rpm_id = 131,
+ .qos.ap_owned = true,
+ .qos.qos_mode = NOC_QOS_MODE_INVALID,
+ .num_links = ARRAY_SIZE(snoc_int_1_links),
+ .links = snoc_int_1_links,
+};
+
+static const u16 snoc_int_2_links[] = {
+ QNOC_SNOC_INT_0,
+ QNOC_SNOC_INT_1,
+ QNOC_SNOC_BIMC_SLV
+};
+
+static struct qcom_icc_node snoc_int_2 = {
+ .name = "snoc_int_2",
+ .id = QNOC_SNOC_INT_2,
+ .buswidth = 8,
+ .mas_rpm_id = 134,
+ .slv_rpm_id = 197,
+ .num_links = ARRAY_SIZE(snoc_int_2_links),
+ .links = snoc_int_2_links,
+};
+
+static struct qcom_icc_node slv_ebi = {
+ .name = "slv_ebi",
+ .id = QNOC_SLAVE_EBI_CH0,
+ .channels = 2,
+ .buswidth = 16,
+ .mas_rpm_id = -1,
+ .slv_rpm_id = 0,
+};
+
+static const u16 slv_bimc_snoc_links[] = {
+ QNOC_BIMC_SNOC_MAS
+};
+
+static struct qcom_icc_node slv_bimc_snoc = {
+ .name = "slv_bimc_snoc",
+ .id = QNOC_BIMC_SNOC_SLV,
+ .buswidth = 16,
+ .mas_rpm_id = -1,
+ .slv_rpm_id = 2,
+ .num_links = ARRAY_SIZE(slv_bimc_snoc_links),
+ .links = slv_bimc_snoc_links,
+};
+
+static struct qcom_icc_node slv_tcsr = {
+ .name = "slv_tcsr",
+ .id = QNOC_SLAVE_TCSR,
+ .buswidth = 4,
+ .mas_rpm_id = -1,
+ .slv_rpm_id = 50,
+};
+
+static struct qcom_icc_node slv_tlmm = {
+ .name = "slv_tlmm",
+ .id = QNOC_SLAVE_TLMM,
+ .buswidth = 4,
+ .mas_rpm_id = -1,
+ .slv_rpm_id = 51,
+};
+
+static struct qcom_icc_node slv_crypto_0_cfg = {
+ .name = "slv_crypto_0_cfg",
+ .id = QNOC_SLAVE_CRYPTO_0_CFG,
+ .buswidth = 4,
+ .mas_rpm_id = -1,
+ .slv_rpm_id = 52,
+ .qos.ap_owned = true,
+ .qos.qos_mode = NOC_QOS_MODE_INVALID,
+};
+
+static struct qcom_icc_node slv_message_ram = {
+ .name = "slv_message_ram",
+ .id = QNOC_SLAVE_MESSAGE_RAM,
+ .buswidth = 4,
+ .mas_rpm_id = -1,
+ .slv_rpm_id = 55,
+};
+
+static struct qcom_icc_node slv_pdm = {
+ .name = "slv_pdm",
+ .id = QNOC_SLAVE_PDM,
+ .buswidth = 4,
+ .mas_rpm_id = -1,
+ .slv_rpm_id = 41,
+};
+
+static struct qcom_icc_node slv_prng = {
+ .name = "slv_prng",
+ .id = QNOC_SLAVE_PRNG,
+ .buswidth = 4,
+ .mas_rpm_id = -1,
+ .slv_rpm_id = 44,
+ .qos.ap_owned = true,
+ .qos.qos_mode = NOC_QOS_MODE_INVALID,
+};
+
+static struct qcom_icc_node slv_pmic_arb = {
+ .name = "slv_pmic_arb",
+ .id = QNOC_SLAVE_PMIC_ARB,
+ .buswidth = 4,
+ .mas_rpm_id = -1,
+ .slv_rpm_id = 59,
+};
+
+static struct qcom_icc_node slv_snoc_cfg = {
+ .name = "slv_snoc_cfg",
+ .id = QNOC_SLAVE_SNOC_CFG,
+ .buswidth = 4,
+ .mas_rpm_id = -1,
+ .slv_rpm_id = 70,
+};
+
+static struct qcom_icc_node slv_dcc_cfg = {
+ .name = "slv_dcc_cfg",
+ .id = QNOC_SLAVE_DCC_CFG,
+ .buswidth = 4,
+ .mas_rpm_id = -1,
+ .slv_rpm_id = 155,
+ .qos.ap_owned = true,
+ .qos.qos_mode = NOC_QOS_MODE_INVALID,
+};
+
+static struct qcom_icc_node slv_camera_ss_cfg = {
+ .name = "slv_camera_ss_cfg",
+ .id = QNOC_SLAVE_CAMERA_CFG,
+ .buswidth = 4,
+ .mas_rpm_id = -1,
+ .slv_rpm_id = 3,
+ .qos.ap_owned = true,
+ .qos.qos_mode = NOC_QOS_MODE_INVALID,
+};
+
+static struct qcom_icc_node slv_disp_ss_cfg = {
+ .name = "slv_disp_ss_cfg",
+ .id = QNOC_SLAVE_DISPLAY_CFG,
+ .buswidth = 4,
+ .mas_rpm_id = -1,
+ .slv_rpm_id = 4,
+ .qos.ap_owned = true,
+ .qos.qos_mode = NOC_QOS_MODE_INVALID,
+};
+
+static struct qcom_icc_node slv_venus_cfg = {
+ .name = "slv_venus_cfg",
+ .id = QNOC_SLAVE_VENUS_CFG,
+ .buswidth = 4,
+ .mas_rpm_id = -1,
+ .slv_rpm_id = 10,
+ .qos.ap_owned = true,
+ .qos.qos_mode = NOC_QOS_MODE_INVALID,
+};
+
+static struct qcom_icc_node slv_sdcc_1 = {
+ .name = "slv_sdcc_1",
+ .id = QNOC_SLAVE_SDCC_1,
+ .buswidth = 4,
+ .mas_rpm_id = -1,
+ .slv_rpm_id = 31,
+};
+
+static struct qcom_icc_node slv_blsp_1 = {
+ .name = "slv_blsp_1",
+ .id = QNOC_SLAVE_BLSP_1,
+ .buswidth = 4,
+ .mas_rpm_id = -1,
+ .slv_rpm_id = 39,
+};
+
+static struct qcom_icc_node slv_usb_hs = {
+ .name = "slv_usb_hs",
+ .id = QNOC_SLAVE_USB_HS,
+ .buswidth = 4,
+ .mas_rpm_id = -1,
+ .slv_rpm_id = 40,
+};
+
+static struct qcom_icc_node slv_sdcc_3 = {
+ .name = "slv_sdcc_3",
+ .id = QNOC_SLAVE_SDCC_3,
+ .buswidth = 4,
+ .mas_rpm_id = -1,
+ .slv_rpm_id = 32,
+};
+
+static struct qcom_icc_node slv_sdcc_2 = {
+ .name = "slv_sdcc_2",
+ .id = QNOC_SLAVE_SDCC_2,
+ .buswidth = 4,
+ .mas_rpm_id = -1,
+ .slv_rpm_id = 33,
+};
+
+static struct qcom_icc_node slv_gpu_cfg = {
+ .name = "slv_gpu_cfg",
+ .id = QNOC_SLAVE_GRAPHICS_3D_CFG,
+ .buswidth = 4,
+ .mas_rpm_id = -1,
+ .slv_rpm_id = 11,
+ .qos.ap_owned = true,
+ .qos.qos_mode = NOC_QOS_MODE_INVALID,
+};
+
+static struct qcom_icc_node slv_usb_hs2 = {
+ .name = "slv_usb_hs2",
+ .id = QNOC_SLAVE_USB_HS2,
+ .buswidth = 4,
+ .mas_rpm_id = -1,
+ .slv_rpm_id = 79,
+};
+
+static struct qcom_icc_node slv_blsp_2 = {
+ .name = "slv_blsp_2",
+ .id = QNOC_SLAVE_BLSP_2,
+ .buswidth = 4,
+ .mas_rpm_id = -1,
+ .slv_rpm_id = 37,
+};
+
+static const u16 slv_pcnoc_snoc_links[] = {
+ QNOC_PNOC_SNOC_MAS
+};
+
+static struct qcom_icc_node slv_pcnoc_snoc = {
+ .name = "slv_pcnoc_snoc",
+ .id = QNOC_PNOC_SNOC_SLV,
+ .buswidth = 8,
+ .mas_rpm_id = -1,
+ .slv_rpm_id = 45,
+ .num_links = ARRAY_SIZE(slv_pcnoc_snoc_links),
+ .links = slv_pcnoc_snoc_links,
+};
+
+static struct qcom_icc_node slv_kpss_ahb = {
+ .name = "slv_kpss_ahb",
+ .id = QNOC_SLAVE_APPSS,
+ .buswidth = 4,
+ .mas_rpm_id = -1,
+ .slv_rpm_id = 20,
+ .qos.ap_owned = true,
+ .qos.qos_mode = NOC_QOS_MODE_INVALID,
+};
+
+static const u16 slv_smmnoc_bimc_links[] = {
+ QNOC_MNOC_BIMC_MAS
+};
+
+static struct qcom_icc_node slv_smmnoc_bimc = {
+ .name = "slv_smmnoc_bimc",
+ .id = QNOC_MNOC_BIMC_SLV,
+ .channels = 2,
+ .buswidth = 16,
+ .ib_coeff = 200,
+ .mas_rpm_id = -1,
+ .slv_rpm_id = 198,
+ .qos.ap_owned = true,
+ .qos.qos_mode = NOC_QOS_MODE_INVALID,
+ .num_links = ARRAY_SIZE(slv_smmnoc_bimc_links),
+ .links = slv_smmnoc_bimc_links,
+};
+
+static const u16 slv_snoc_bimc_links[] = {
+ QNOC_SNOC_BIMC_MAS
+};
+
+static struct qcom_icc_node slv_snoc_bimc = {
+ .name = "slv_snoc_bimc",
+ .id = QNOC_SNOC_BIMC_SLV,
+ .channels = 2,
+ .buswidth = 8,
+ .mas_rpm_id = -1,
+ .slv_rpm_id = 24,
+ .qos.ap_owned = true,
+ .qos.qos_mode = NOC_QOS_MODE_INVALID,
+ .num_links = ARRAY_SIZE(slv_snoc_bimc_links),
+ .links = slv_snoc_bimc_links,
+};
+
+static struct qcom_icc_node slv_imem = {
+ .name = "slv_imem",
+ .id = QNOC_SLAVE_SYSTEM_IMEM,
+ .buswidth = 8,
+ .mas_rpm_id = -1,
+ .slv_rpm_id = 26,
+};
+
+static const u16 slv_snoc_pcnoc_links[] = {
+ QNOC_SNOC_PNOC_MAS
+};
+
+static struct qcom_icc_node slv_snoc_pcnoc = {
+ .name = "slv_snoc_pcnoc",
+ .id = QNOC_SNOC_PNOC_SLV,
+ .buswidth = 8,
+ .mas_rpm_id = -1,
+ .slv_rpm_id = 28,
+ .num_links = ARRAY_SIZE(slv_snoc_pcnoc_links),
+ .links = slv_snoc_pcnoc_links,
+};
+
+static struct qcom_icc_node slv_qdss_stm = {
+ .name = "slv_qdss_stm",
+ .id = QNOC_SLAVE_QDSS_STM,
+ .buswidth = 4,
+ .mas_rpm_id = -1,
+ .slv_rpm_id = 30,
+};
+
+static struct qcom_icc_node slv_cats_0 = {
+ .name = "slv_cats_0",
+ .id = QNOC_SLAVE_CATS_128,
+ .buswidth = 16,
+ .mas_rpm_id = -1,
+ .slv_rpm_id = 106,
+ .qos.ap_owned = true,
+ .qos.qos_mode = NOC_QOS_MODE_INVALID,
+};
+
+static struct qcom_icc_node slv_cats_1 = {
+ .name = "slv_cats_1",
+ .id = QNOC_SLAVE_OCMEM_64,
+ .buswidth = 8,
+ .mas_rpm_id = -1,
+ .slv_rpm_id = 107,
+ .qos.ap_owned = true,
+ .qos.qos_mode = NOC_QOS_MODE_INVALID,
+};
+
+static struct qcom_icc_node slv_lpass = {
+ .name = "slv_lpass",
+ .id = QNOC_SLAVE_LPASS,
+ .buswidth = 8,
+ .mas_rpm_id = -1,
+ .slv_rpm_id = 21,
+ .qos.ap_owned = true,
+ .qos.qos_mode = NOC_QOS_MODE_INVALID,
+};
+
+static struct qcom_icc_node * const msm8976_bimc_nodes[] = {
+ [MAS_APPS_PROC] = &mas_apps_proc,
+ [MAS_SMMNOC_BIMC] = &mas_smmnoc_bimc,
+ [MAS_SNOC_BIMC] = &mas_snoc_bimc,
+ [MAS_TCU_0] = &mas_tcu_0,
+ [SLV_EBI] = &slv_ebi,
+ [SLV_BIMC_SNOC] = &slv_bimc_snoc,
+};
+
+static const struct regmap_config msm8976_bimc_regmap_config = {
+ .reg_bits = 32,
+ .reg_stride = 4,
+ .val_bits = 32,
+ .max_register = 0x62000,
+ .fast_io = true,
+};
+
+static const struct qcom_icc_desc msm8976_bimc = {
+ .type = QCOM_ICC_BIMC,
+ .nodes = msm8976_bimc_nodes,
+ .num_nodes = ARRAY_SIZE(msm8976_bimc_nodes),
+ .bus_clk_desc = &bimc_clk,
+ .regmap_cfg = &msm8976_bimc_regmap_config,
+ .qos_offset = 0x8000,
+ .ab_coeff = 154,
+};
+
+static struct qcom_icc_node * const msm8976_pcnoc_nodes[] = {
+ [MAS_USB_HS2] = &mas_usb_hs2,
+ [MAS_BLSP_1] = &mas_blsp_1,
+ [MAS_USB_HS1] = &mas_usb_hs1,
+ [MAS_BLSP_2] = &mas_blsp_2,
+ [MAS_CRYPTO] = &mas_crypto,
+ [MAS_SDCC_1] = &mas_sdcc_1,
+ [MAS_SDCC_2] = &mas_sdcc_2,
+ [MAS_SDCC_3] = &mas_sdcc_3,
+ [MAS_SNOC_PCNOC] = &mas_snoc_pcnoc,
+ [MAS_LPASS_AHB] = &mas_lpass_ahb,
+ [MAS_SPDM] = &mas_spdm,
+ [MAS_DEHR] = &mas_dehr,
+ [MAS_XM_USB_HS1] = &mas_xm_usb_hs1,
+ [PCNOC_M_0] = &pcnoc_m_0,
+ [PCNOC_M_1] = &pcnoc_m_1,
+ [PCNOC_INT_0] = &pcnoc_int_0,
+ [PCNOC_INT_1] = &pcnoc_int_1,
+ [PCNOC_INT_2] = &pcnoc_int_2,
+ [PCNOC_S_1] = &pcnoc_s_1,
+ [PCNOC_S_2] = &pcnoc_s_2,
+ [PCNOC_S_3] = &pcnoc_s_3,
+ [PCNOC_S_4] = &pcnoc_s_4,
+ [PCNOC_S_8] = &pcnoc_s_8,
+ [PCNOC_S_9] = &pcnoc_s_9,
+ [SLV_TCSR] = &slv_tcsr,
+ [SLV_TLMM] = &slv_tlmm,
+ [SLV_CRYPTO_0_CFG] = &slv_crypto_0_cfg,
+ [SLV_MESSAGE_RAM] = &slv_message_ram,
+ [SLV_PDM] = &slv_pdm,
+ [SLV_PRNG] = &slv_prng,
+ [SLV_PMIC_ARB] = &slv_pmic_arb,
+ [SLV_SNOC_CFG] = &slv_snoc_cfg,
+ [SLV_DCC_CFG] = &slv_dcc_cfg,
+ [SLV_CAMERA_SS_CFG] = &slv_camera_ss_cfg,
+ [SLV_DISP_SS_CFG] = &slv_disp_ss_cfg,
+ [SLV_VENUS_CFG] = &slv_venus_cfg,
+ [SLV_SDCC_1] = &slv_sdcc_1,
+ [SLV_BLSP_1] = &slv_blsp_1,
+ [SLV_USB_HS] = &slv_usb_hs,
+ [SLV_SDCC_3] = &slv_sdcc_3,
+ [SLV_SDCC_2] = &slv_sdcc_2,
+ [SLV_GPU_CFG] = &slv_gpu_cfg,
+ [SLV_USB_HS2] = &slv_usb_hs2,
+ [SLV_BLSP_2] = &slv_blsp_2,
+ [SLV_PCNOC_SNOC] = &slv_pcnoc_snoc,
+};
+
+static const struct regmap_config msm8976_pcnoc_regmap_config = {
+ .reg_bits = 32,
+ .reg_stride = 4,
+ .val_bits = 32,
+ .max_register = 0x14000,
+ .fast_io = true,
+};
+
+static const struct qcom_icc_desc msm8976_pcnoc = {
+ .type = QCOM_ICC_NOC,
+ .nodes = msm8976_pcnoc_nodes,
+ .num_nodes = ARRAY_SIZE(msm8976_pcnoc_nodes),
+ .bus_clk_desc = &bus_0_clk,
+ .qos_offset = 0x7000,
+ .keep_alive = true,
+ .regmap_cfg = &msm8976_pcnoc_regmap_config,
+};
+
+static struct qcom_icc_node * const msm8976_snoc_nodes[] = {
+ [MAS_QDSS_BAM] = &mas_qdss_bam,
+ [MAS_BIMC_SNOC] = &mas_bimc_snoc,
+ [MAS_PCNOC_SNOC] = &mas_pcnoc_snoc,
+ [MAS_QDSS_ETR] = &mas_qdss_etr,
+ [MAS_LPASS_PROC] = &mas_lpass_proc,
+ [MAS_IPA] = &mas_ipa,
+ [QDSS_INT] = &qdss_int,
+ [SNOC_INT_0] = &snoc_int_0,
+ [SNOC_INT_1] = &snoc_int_1,
+ [SNOC_INT_2] = &snoc_int_2,
+ [SLV_KPSS_AHB] = &slv_kpss_ahb,
+ [SLV_SNOC_BIMC] = &slv_snoc_bimc,
+ [SLV_IMEM] = &slv_imem,
+ [SLV_SNOC_PCNOC] = &slv_snoc_pcnoc,
+ [SLV_QDSS_STM] = &slv_qdss_stm,
+ [SLV_CATS_0] = &slv_cats_0,
+ [SLV_CATS_1] = &slv_cats_1,
+ [SLV_LPASS] = &slv_lpass,
+};
+
+static const struct regmap_config msm8976_snoc_regmap_config = {
+ .reg_bits = 32,
+ .reg_stride = 4,
+ .val_bits = 32,
+ .max_register = 0x1A000,
+ .fast_io = true,
+};
+
+static const struct qcom_icc_desc msm8976_snoc = {
+ .type = QCOM_ICC_NOC,
+ .nodes = msm8976_snoc_nodes,
+ .num_nodes = ARRAY_SIZE(msm8976_snoc_nodes),
+ .bus_clk_desc = &bus_1_clk,
+ .regmap_cfg = &msm8976_snoc_regmap_config,
+ .qos_offset = 0x7000,
+};
+
+static struct qcom_icc_node * const msm8976_snoc_mm_nodes[] = {
+ [MAS_JPEG] = &mas_jpeg,
+ [MAS_OXILI] = &mas_oxili,
+ [MAS_MDP0] = &mas_mdp0,
+ [MAS_MDP1] = &mas_mdp1,
+ [MAS_VENUS_0] = &mas_venus_0,
+ [MAS_VENUS_1] = &mas_venus_1,
+ [MAS_VFE_0] = &mas_vfe_0,
+ [MAS_VFE_1] = &mas_vfe_1,
+ [MAS_CPP] = &mas_cpp,
+ [MM_INT_0] = &mm_int_0,
+ [SLV_SMMNOC_BIMC] = &slv_smmnoc_bimc,
+};
+
+static const struct qcom_icc_desc msm8976_snoc_mm = {
+ .type = QCOM_ICC_NOC,
+ .nodes = msm8976_snoc_mm_nodes,
+ .num_nodes = ARRAY_SIZE(msm8976_snoc_mm_nodes),
+ .bus_clk_desc = &bus_2_clk,
+ .regmap_cfg = &msm8976_snoc_regmap_config,
+ .qos_offset = 0x7000,
+ .ab_coeff = 154,
+};
+
+static const struct of_device_id msm8976_noc_of_match[] = {
+ { .compatible = "qcom,msm8976-bimc", .data = &msm8976_bimc },
+ { .compatible = "qcom,msm8976-pcnoc", .data = &msm8976_pcnoc },
+ { .compatible = "qcom,msm8976-snoc", .data = &msm8976_snoc },
+ { .compatible = "qcom,msm8976-snoc-mm", .data = &msm8976_snoc_mm },
+ { }
+};
+MODULE_DEVICE_TABLE(of, msm8976_noc_of_match);
+
+static struct platform_driver msm8976_noc_driver = {
+ .probe = qnoc_probe,
+ .remove_new = qnoc_remove,
+ .driver = {
+ .name = "qnoc-msm8976",
+ .of_match_table = msm8976_noc_of_match,
+ .sync_state = icc_sync_state,
+ },
+};
+module_platform_driver(msm8976_noc_driver);
+
+MODULE_DESCRIPTION("Qualcomm MSM8976 NoC driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/interconnect/qcom/qcs404.c b/drivers/interconnect/qcom/qcs404.c
index 11b49a89c03d..63e9ff223ac4 100644
--- a/drivers/interconnect/qcom/qcs404.c
+++ b/drivers/interconnect/qcom/qcs404.c
@@ -10,6 +10,7 @@
#include <linux/module.h>
#include <linux/mod_devicetable.h>
#include <linux/platform_device.h>
+#include <linux/regmap.h>
#include "icc-rpm.h"
@@ -101,6 +102,11 @@ static struct qcom_icc_node mas_apps_proc = {
.buswidth = 8,
.mas_rpm_id = 0,
.slv_rpm_id = -1,
+ .qos.ap_owned = true,
+ .qos.qos_mode = NOC_QOS_MODE_FIXED,
+ .qos.areq_prio = 0,
+ .qos.prio_level = 0,
+ .qos.qos_port = 0,
.num_links = ARRAY_SIZE(mas_apps_proc_links),
.links = mas_apps_proc_links,
};
@@ -116,6 +122,11 @@ static struct qcom_icc_node mas_oxili = {
.buswidth = 8,
.mas_rpm_id = -1,
.slv_rpm_id = -1,
+ .qos.ap_owned = true,
+ .qos.qos_mode = NOC_QOS_MODE_FIXED,
+ .qos.areq_prio = 0,
+ .qos.prio_level = 0,
+ .qos.qos_port = 2,
.num_links = ARRAY_SIZE(mas_oxili_links),
.links = mas_oxili_links,
};
@@ -131,6 +142,11 @@ static struct qcom_icc_node mas_mdp = {
.buswidth = 8,
.mas_rpm_id = -1,
.slv_rpm_id = -1,
+ .qos.ap_owned = true,
+ .qos.qos_mode = NOC_QOS_MODE_FIXED,
+ .qos.areq_prio = 0,
+ .qos.prio_level = 1,
+ .qos.qos_port = 4,
.num_links = ARRAY_SIZE(mas_mdp_links),
.links = mas_mdp_links,
};
@@ -145,6 +161,10 @@ static struct qcom_icc_node mas_snoc_bimc_1 = {
.buswidth = 8,
.mas_rpm_id = 76,
.slv_rpm_id = -1,
+ .qos.qos_mode = NOC_QOS_MODE_BYPASS,
+ .qos.areq_prio = 0,
+ .qos.prio_level = 0,
+ .qos.qos_port = 5,
.num_links = ARRAY_SIZE(mas_snoc_bimc_1_links),
.links = mas_snoc_bimc_1_links,
};
@@ -160,6 +180,11 @@ static struct qcom_icc_node mas_tcu_0 = {
.buswidth = 8,
.mas_rpm_id = -1,
.slv_rpm_id = -1,
+ .qos.ap_owned = true,
+ .qos.qos_mode = NOC_QOS_MODE_FIXED,
+ .qos.areq_prio = 0,
+ .qos.prio_level = 2,
+ .qos.qos_port = 6,
.num_links = ARRAY_SIZE(mas_tcu_0_links),
.links = mas_tcu_0_links,
};
@@ -174,6 +199,8 @@ static struct qcom_icc_node mas_spdm = {
.buswidth = 4,
.mas_rpm_id = -1,
.slv_rpm_id = -1,
+ .qos.ap_owned = true,
+ .qos.qos_mode = NOC_QOS_MODE_INVALID,
.num_links = ARRAY_SIZE(mas_spdm_links),
.links = mas_spdm_links,
};
@@ -231,6 +258,11 @@ static struct qcom_icc_node mas_crypto = {
.buswidth = 8,
.mas_rpm_id = 23,
.slv_rpm_id = -1,
+ .qos.ap_owned = true,
+ .qos.qos_mode = NOC_QOS_MODE_FIXED,
+ .qos.areq_prio = 1,
+ .qos.prio_level = 1,
+ .qos.qos_port = 0,
.num_links = ARRAY_SIZE(mas_crypto_links),
.links = mas_crypto_links,
};
@@ -287,6 +319,11 @@ static struct qcom_icc_node mas_qpic = {
.buswidth = 4,
.mas_rpm_id = -1,
.slv_rpm_id = -1,
+ .qos.ap_owned = true,
+ .qos.qos_mode = NOC_QOS_MODE_FIXED,
+ .qos.areq_prio = 1,
+ .qos.prio_level = 1,
+ .qos.qos_port = 14,
.num_links = ARRAY_SIZE(mas_qpic_links),
.links = mas_qpic_links,
};
@@ -301,6 +338,11 @@ static struct qcom_icc_node mas_qdss_bam = {
.buswidth = 4,
.mas_rpm_id = -1,
.slv_rpm_id = -1,
+ .qos.ap_owned = true,
+ .qos.qos_mode = NOC_QOS_MODE_FIXED,
+ .qos.areq_prio = 1,
+ .qos.prio_level = 1,
+ .qos.qos_port = 1,
.num_links = ARRAY_SIZE(mas_qdss_bam_links),
.links = mas_qdss_bam_links,
};
@@ -348,6 +390,11 @@ static struct qcom_icc_node mas_qdss_etr = {
.buswidth = 8,
.mas_rpm_id = -1,
.slv_rpm_id = -1,
+ .qos.ap_owned = true,
+ .qos.qos_mode = NOC_QOS_MODE_FIXED,
+ .qos.areq_prio = 1,
+ .qos.prio_level = 1,
+ .qos.qos_port = 0,
.num_links = ARRAY_SIZE(mas_qdss_etr_links),
.links = mas_qdss_etr_links,
};
@@ -363,6 +410,11 @@ static struct qcom_icc_node mas_emac = {
.buswidth = 8,
.mas_rpm_id = -1,
.slv_rpm_id = -1,
+ .qos.ap_owned = true,
+ .qos.qos_mode = NOC_QOS_MODE_FIXED,
+ .qos.areq_prio = 1,
+ .qos.prio_level = 1,
+ .qos.qos_port = 17,
.num_links = ARRAY_SIZE(mas_emac_links),
.links = mas_emac_links,
};
@@ -378,6 +430,11 @@ static struct qcom_icc_node mas_pcie = {
.buswidth = 8,
.mas_rpm_id = -1,
.slv_rpm_id = -1,
+ .qos.ap_owned = true,
+ .qos.qos_mode = NOC_QOS_MODE_FIXED,
+ .qos.areq_prio = 1,
+ .qos.prio_level = 1,
+ .qos.qos_port = 8,
.num_links = ARRAY_SIZE(mas_pcie_links),
.links = mas_pcie_links,
};
@@ -393,6 +450,11 @@ static struct qcom_icc_node mas_usb3 = {
.buswidth = 8,
.mas_rpm_id = -1,
.slv_rpm_id = -1,
+ .qos.ap_owned = true,
+ .qos.qos_mode = NOC_QOS_MODE_FIXED,
+ .qos.areq_prio = 1,
+ .qos.prio_level = 1,
+ .qos.qos_port = 16,
.num_links = ARRAY_SIZE(mas_usb3_links),
.links = mas_usb3_links,
};
@@ -491,6 +553,8 @@ static struct qcom_icc_node pcnoc_s_2 = {
.buswidth = 4,
.mas_rpm_id = -1,
.slv_rpm_id = -1,
+ .qos.ap_owned = true,
+ .qos.qos_mode = NOC_QOS_MODE_INVALID,
.num_links = ARRAY_SIZE(pcnoc_s_2_links),
.links = pcnoc_s_2_links,
};
@@ -626,6 +690,8 @@ static struct qcom_icc_node qdss_int = {
.buswidth = 8,
.mas_rpm_id = -1,
.slv_rpm_id = -1,
+ .qos.ap_owned = true,
+ .qos.qos_mode = NOC_QOS_MODE_INVALID,
.num_links = ARRAY_SIZE(qdss_int_links),
.links = qdss_int_links,
};
@@ -704,6 +770,8 @@ static struct qcom_icc_node slv_spdm = {
.buswidth = 4,
.mas_rpm_id = -1,
.slv_rpm_id = -1,
+ .qos.ap_owned = true,
+ .qos.qos_mode = NOC_QOS_MODE_INVALID,
};
static struct qcom_icc_node slv_pdm = {
@@ -752,6 +820,8 @@ static struct qcom_icc_node slv_disp_ss_cfg = {
.buswidth = 4,
.mas_rpm_id = -1,
.slv_rpm_id = -1,
+ .qos.ap_owned = true,
+ .qos.qos_mode = NOC_QOS_MODE_INVALID,
};
static struct qcom_icc_node slv_gpu_cfg = {
@@ -760,6 +830,8 @@ static struct qcom_icc_node slv_gpu_cfg = {
.buswidth = 4,
.mas_rpm_id = -1,
.slv_rpm_id = -1,
+ .qos.ap_owned = true,
+ .qos.qos_mode = NOC_QOS_MODE_INVALID,
};
static struct qcom_icc_node slv_blsp_1 = {
@@ -784,6 +856,8 @@ static struct qcom_icc_node slv_pcie = {
.buswidth = 4,
.mas_rpm_id = -1,
.slv_rpm_id = -1,
+ .qos.ap_owned = true,
+ .qos.qos_mode = NOC_QOS_MODE_INVALID,
};
static struct qcom_icc_node slv_ethernet = {
@@ -792,6 +866,8 @@ static struct qcom_icc_node slv_ethernet = {
.buswidth = 4,
.mas_rpm_id = -1,
.slv_rpm_id = -1,
+ .qos.ap_owned = true,
+ .qos.qos_mode = NOC_QOS_MODE_INVALID,
};
static struct qcom_icc_node slv_blsp_2 = {
@@ -816,6 +892,8 @@ static struct qcom_icc_node slv_tcu = {
.buswidth = 8,
.mas_rpm_id = -1,
.slv_rpm_id = -1,
+ .qos.ap_owned = true,
+ .qos.qos_mode = NOC_QOS_MODE_INVALID,
};
static struct qcom_icc_node slv_pmic_arb = {
@@ -894,6 +972,8 @@ static struct qcom_icc_node slv_kpss_ahb = {
.buswidth = 4,
.mas_rpm_id = -1,
.slv_rpm_id = -1,
+ .qos.ap_owned = true,
+ .qos.qos_mode = NOC_QOS_MODE_INVALID,
};
static struct qcom_icc_node slv_wcss = {
@@ -954,6 +1034,8 @@ static struct qcom_icc_node slv_cats_0 = {
.buswidth = 16,
.mas_rpm_id = -1,
.slv_rpm_id = -1,
+ .qos.ap_owned = true,
+ .qos.qos_mode = NOC_QOS_MODE_INVALID,
};
static struct qcom_icc_node slv_cats_1 = {
@@ -962,6 +1044,8 @@ static struct qcom_icc_node slv_cats_1 = {
.buswidth = 8,
.mas_rpm_id = -1,
.slv_rpm_id = -1,
+ .qos.ap_owned = true,
+ .qos.qos_mode = NOC_QOS_MODE_INVALID,
};
static struct qcom_icc_node slv_lpass = {
@@ -970,6 +1054,8 @@ static struct qcom_icc_node slv_lpass = {
.buswidth = 4,
.mas_rpm_id = -1,
.slv_rpm_id = -1,
+ .qos.ap_owned = true,
+ .qos.qos_mode = NOC_QOS_MODE_INVALID,
};
static struct qcom_icc_node * const qcs404_bimc_nodes[] = {
@@ -982,10 +1068,22 @@ static struct qcom_icc_node * const qcs404_bimc_nodes[] = {
[SLAVE_BIMC_SNOC] = &slv_bimc_snoc,
};
+static const struct regmap_config qcs404_bimc_regmap_config = {
+ .reg_bits = 32,
+ .reg_stride = 4,
+ .val_bits = 32,
+ .max_register = 0x80000,
+ .fast_io = true,
+};
+
static const struct qcom_icc_desc qcs404_bimc = {
- .bus_clk_desc = &bimc_clk,
+ .type = QCOM_ICC_BIMC,
.nodes = qcs404_bimc_nodes,
.num_nodes = ARRAY_SIZE(qcs404_bimc_nodes),
+ .bus_clk_desc = &bimc_clk,
+ .regmap_cfg = &qcs404_bimc_regmap_config,
+ .qos_offset = 0x8000,
+ .ab_coeff = 153,
};
static struct qcom_icc_node * const qcs404_pcnoc_nodes[] = {
@@ -1037,10 +1135,22 @@ static struct qcom_icc_node * const qcs404_pcnoc_nodes[] = {
[SLAVE_PCNOC_SNOC] = &slv_pcnoc_snoc,
};
+static const struct regmap_config qcs404_pcnoc_regmap_config = {
+ .reg_bits = 32,
+ .reg_stride = 4,
+ .val_bits = 32,
+ .max_register = 0x15080,
+ .fast_io = true,
+};
+
static const struct qcom_icc_desc qcs404_pcnoc = {
- .bus_clk_desc = &bus_0_clk,
+ .type = QCOM_ICC_NOC,
.nodes = qcs404_pcnoc_nodes,
.num_nodes = ARRAY_SIZE(qcs404_pcnoc_nodes),
+ .bus_clk_desc = &bus_0_clk,
+ .qos_offset = 0x7000,
+ .keep_alive = true,
+ .regmap_cfg = &qcs404_pcnoc_regmap_config,
};
static struct qcom_icc_node * const qcs404_snoc_nodes[] = {
@@ -1066,10 +1176,21 @@ static struct qcom_icc_node * const qcs404_snoc_nodes[] = {
[SLAVE_LPASS] = &slv_lpass,
};
+static const struct regmap_config qcs404_snoc_regmap_config = {
+ .reg_bits = 32,
+ .reg_stride = 4,
+ .val_bits = 32,
+ .max_register = 0x23080,
+ .fast_io = true,
+};
+
static const struct qcom_icc_desc qcs404_snoc = {
- .bus_clk_desc = &bus_1_clk,
+ .type = QCOM_ICC_NOC,
.nodes = qcs404_snoc_nodes,
.num_nodes = ARRAY_SIZE(qcs404_snoc_nodes),
+ .bus_clk_desc = &bus_1_clk,
+ .qos_offset = 0x11000,
+ .regmap_cfg = &qcs404_snoc_regmap_config,
};
diff --git a/drivers/interconnect/qcom/sm8350.c b/drivers/interconnect/qcom/sm8350.c
index b321c3009acb..4236a43dc256 100644
--- a/drivers/interconnect/qcom/sm8350.c
+++ b/drivers/interconnect/qcom/sm8350.c
@@ -628,60 +628,6 @@ static struct qcom_icc_node xm_gic = {
.links = { SM8350_SLAVE_SNOC_GEM_NOC_GC },
};
-static struct qcom_icc_node qnm_mnoc_hf_disp = {
- .name = "qnm_mnoc_hf_disp",
- .id = SM8350_MASTER_MNOC_HF_MEM_NOC_DISP,
- .channels = 2,
- .buswidth = 32,
- .num_links = 1,
- .links = { SM8350_SLAVE_LLCC_DISP },
-};
-
-static struct qcom_icc_node qnm_mnoc_sf_disp = {
- .name = "qnm_mnoc_sf_disp",
- .id = SM8350_MASTER_MNOC_SF_MEM_NOC_DISP,
- .channels = 2,
- .buswidth = 32,
- .num_links = 1,
- .links = { SM8350_SLAVE_LLCC_DISP },
-};
-
-static struct qcom_icc_node llcc_mc_disp = {
- .name = "llcc_mc_disp",
- .id = SM8350_MASTER_LLCC_DISP,
- .channels = 4,
- .buswidth = 4,
- .num_links = 1,
- .links = { SM8350_SLAVE_EBI1_DISP },
-};
-
-static struct qcom_icc_node qxm_mdp0_disp = {
- .name = "qxm_mdp0_disp",
- .id = SM8350_MASTER_MDP0_DISP,
- .channels = 1,
- .buswidth = 32,
- .num_links = 1,
- .links = { SM8350_SLAVE_MNOC_HF_MEM_NOC_DISP },
-};
-
-static struct qcom_icc_node qxm_mdp1_disp = {
- .name = "qxm_mdp1_disp",
- .id = SM8350_MASTER_MDP1_DISP,
- .channels = 1,
- .buswidth = 32,
- .num_links = 1,
- .links = { SM8350_SLAVE_MNOC_HF_MEM_NOC_DISP },
-};
-
-static struct qcom_icc_node qxm_rot_disp = {
- .name = "qxm_rot_disp",
- .id = SM8350_MASTER_ROTATOR_DISP,
- .channels = 1,
- .buswidth = 32,
- .num_links = 1,
- .links = { SM8350_SLAVE_MNOC_SF_MEM_NOC_DISP },
-};
-
static struct qcom_icc_node qns_a1noc_snoc = {
.name = "qns_a1noc_snoc",
.id = SM8350_SLAVE_A1NOC_SNOC,
@@ -1320,40 +1266,6 @@ static struct qcom_icc_node srvc_snoc = {
.buswidth = 4,
};
-static struct qcom_icc_node qns_llcc_disp = {
- .name = "qns_llcc_disp",
- .id = SM8350_SLAVE_LLCC_DISP,
- .channels = 4,
- .buswidth = 16,
- .num_links = 1,
- .links = { SM8350_MASTER_LLCC_DISP },
-};
-
-static struct qcom_icc_node ebi_disp = {
- .name = "ebi_disp",
- .id = SM8350_SLAVE_EBI1_DISP,
- .channels = 4,
- .buswidth = 4,
-};
-
-static struct qcom_icc_node qns_mem_noc_hf_disp = {
- .name = "qns_mem_noc_hf_disp",
- .id = SM8350_SLAVE_MNOC_HF_MEM_NOC_DISP,
- .channels = 2,
- .buswidth = 32,
- .num_links = 1,
- .links = { SM8350_MASTER_MNOC_HF_MEM_NOC_DISP },
-};
-
-static struct qcom_icc_node qns_mem_noc_sf_disp = {
- .name = "qns_mem_noc_sf_disp",
- .id = SM8350_SLAVE_MNOC_SF_MEM_NOC_DISP,
- .channels = 2,
- .buswidth = 32,
- .num_links = 1,
- .links = { SM8350_MASTER_MNOC_SF_MEM_NOC_DISP },
-};
-
static struct qcom_icc_bcm bcm_acv = {
.name = "ACV",
.enable_mask = BIT(3),
@@ -1583,55 +1495,6 @@ static struct qcom_icc_bcm bcm_sn14 = {
.nodes = { &qns_pcie_mem_noc },
};
-static struct qcom_icc_bcm bcm_acv_disp = {
- .name = "ACV",
- .keepalive = false,
- .num_nodes = 1,
- .nodes = { &ebi_disp },
-};
-
-static struct qcom_icc_bcm bcm_mc0_disp = {
- .name = "MC0",
- .keepalive = false,
- .num_nodes = 1,
- .nodes = { &ebi_disp },
-};
-
-static struct qcom_icc_bcm bcm_mm0_disp = {
- .name = "MM0",
- .keepalive = false,
- .num_nodes = 1,
- .nodes = { &qns_mem_noc_hf_disp },
-};
-
-static struct qcom_icc_bcm bcm_mm1_disp = {
- .name = "MM1",
- .keepalive = false,
- .num_nodes = 2,
- .nodes = { &qxm_mdp0_disp, &qxm_mdp1_disp },
-};
-
-static struct qcom_icc_bcm bcm_mm4_disp = {
- .name = "MM4",
- .keepalive = false,
- .num_nodes = 1,
- .nodes = { &qns_mem_noc_sf_disp },
-};
-
-static struct qcom_icc_bcm bcm_mm5_disp = {
- .name = "MM5",
- .keepalive = false,
- .num_nodes = 1,
- .nodes = { &qxm_rot_disp },
-};
-
-static struct qcom_icc_bcm bcm_sh0_disp = {
- .name = "SH0",
- .keepalive = false,
- .num_nodes = 1,
- .nodes = { &qns_llcc_disp },
-};
-
static struct qcom_icc_bcm * const aggre1_noc_bcms[] = {
};
@@ -1785,7 +1648,6 @@ static struct qcom_icc_bcm * const gem_noc_bcms[] = {
&bcm_sh2,
&bcm_sh3,
&bcm_sh4,
- &bcm_sh0_disp,
};
static struct qcom_icc_node * const gem_noc_nodes[] = {
@@ -1808,9 +1670,6 @@ static struct qcom_icc_node * const gem_noc_nodes[] = {
[SLAVE_SERVICE_GEM_NOC_1] = &srvc_even_gemnoc,
[SLAVE_SERVICE_GEM_NOC_2] = &srvc_odd_gemnoc,
[SLAVE_SERVICE_GEM_NOC] = &srvc_sys_gemnoc,
- [MASTER_MNOC_HF_MEM_NOC_DISP] = &qnm_mnoc_hf_disp,
- [MASTER_MNOC_SF_MEM_NOC_DISP] = &qnm_mnoc_sf_disp,
- [SLAVE_LLCC_DISP] = &qns_llcc_disp,
};
static const struct qcom_icc_desc sm8350_gem_noc = {
@@ -1843,15 +1702,11 @@ static const struct qcom_icc_desc sm8350_lpass_ag_noc = {
static struct qcom_icc_bcm * const mc_virt_bcms[] = {
&bcm_acv,
&bcm_mc0,
- &bcm_acv_disp,
- &bcm_mc0_disp,
};
static struct qcom_icc_node * const mc_virt_nodes[] = {
[MASTER_LLCC] = &llcc_mc,
[SLAVE_EBI1] = &ebi,
- [MASTER_LLCC_DISP] = &llcc_mc_disp,
- [SLAVE_EBI1_DISP] = &ebi_disp,
};
static const struct qcom_icc_desc sm8350_mc_virt = {
@@ -1866,10 +1721,6 @@ static struct qcom_icc_bcm * const mmss_noc_bcms[] = {
&bcm_mm1,
&bcm_mm4,
&bcm_mm5,
- &bcm_mm0_disp,
- &bcm_mm1_disp,
- &bcm_mm4_disp,
- &bcm_mm5_disp,
};
static struct qcom_icc_node * const mmss_noc_nodes[] = {
@@ -1886,11 +1737,6 @@ static struct qcom_icc_node * const mmss_noc_nodes[] = {
[SLAVE_MNOC_HF_MEM_NOC] = &qns_mem_noc_hf,
[SLAVE_MNOC_SF_MEM_NOC] = &qns_mem_noc_sf,
[SLAVE_SERVICE_MNOC] = &srvc_mnoc,
- [MASTER_MDP0_DISP] = &qxm_mdp0_disp,
- [MASTER_MDP1_DISP] = &qxm_mdp1_disp,
- [MASTER_ROTATOR_DISP] = &qxm_rot_disp,
- [SLAVE_MNOC_HF_MEM_NOC_DISP] = &qns_mem_noc_hf_disp,
- [SLAVE_MNOC_SF_MEM_NOC_DISP] = &qns_mem_noc_sf_disp,
};
static const struct qcom_icc_desc sm8350_mmss_noc = {
@@ -1965,6 +1811,7 @@ static struct platform_driver qnoc_driver = {
.driver = {
.name = "qnoc-sm8350",
.of_match_table = qnoc_of_match,
+ .sync_state = icc_sync_state,
},
};
module_platform_driver(qnoc_driver);
diff --git a/drivers/interconnect/qcom/sm8350.h b/drivers/interconnect/qcom/sm8350.h
index 328d15238a0d..074c6131ab36 100644
--- a/drivers/interconnect/qcom/sm8350.h
+++ b/drivers/interconnect/qcom/sm8350.h
@@ -154,15 +154,5 @@
#define SM8350_SLAVE_PCIE_1 143
#define SM8350_SLAVE_QDSS_STM 144
#define SM8350_SLAVE_TCU 145
-#define SM8350_MASTER_LLCC_DISP 146
-#define SM8350_MASTER_MNOC_HF_MEM_NOC_DISP 147
-#define SM8350_MASTER_MNOC_SF_MEM_NOC_DISP 148
-#define SM8350_MASTER_MDP0_DISP 149
-#define SM8350_MASTER_MDP1_DISP 150
-#define SM8350_MASTER_ROTATOR_DISP 151
-#define SM8350_SLAVE_EBI1_DISP 152
-#define SM8350_SLAVE_LLCC_DISP 153
-#define SM8350_SLAVE_MNOC_HF_MEM_NOC_DISP 154
-#define SM8350_SLAVE_MNOC_SF_MEM_NOC_DISP 155
#endif
diff --git a/drivers/iommu/Kconfig b/drivers/iommu/Kconfig
index 22addaedf64d..b3aa1f5d5321 100644
--- a/drivers/iommu/Kconfig
+++ b/drivers/iommu/Kconfig
@@ -151,7 +151,7 @@ config OF_IOMMU
# IOMMU-agnostic DMA-mapping layer
config IOMMU_DMA
def_bool ARM64 || X86 || S390
- select DMA_OPS
+ select DMA_OPS_HELPERS
select IOMMU_API
select IOMMU_IOVA
select IRQ_MSI_IOMMU
diff --git a/drivers/iommu/dma-iommu.c b/drivers/iommu/dma-iommu.c
index 7b1dfa0665df..2a9fa0c8cc00 100644
--- a/drivers/iommu/dma-iommu.c
+++ b/drivers/iommu/dma-iommu.c
@@ -17,6 +17,7 @@
#include <linux/gfp.h>
#include <linux/huge_mm.h>
#include <linux/iommu.h>
+#include <linux/iommu-dma.h>
#include <linux/iova.h>
#include <linux/irq.h>
#include <linux/list_sort.h>
@@ -1037,9 +1038,23 @@ out_unmap:
return NULL;
}
-static struct sg_table *iommu_dma_alloc_noncontiguous(struct device *dev,
- size_t size, enum dma_data_direction dir, gfp_t gfp,
- unsigned long attrs)
+/*
+ * This is the actual return value from the iommu_dma_alloc_noncontiguous.
+ *
+ * The users of the DMA API should only care about the sg_table, but to make
+ * the DMA-API internal vmaping and freeing easier we stash away the page
+ * array as well (except for the fallback case). This can go away any time,
+ * e.g. when a vmap-variant that takes a scatterlist comes along.
+ */
+struct dma_sgt_handle {
+ struct sg_table sgt;
+ struct page **pages;
+};
+#define sgt_handle(sgt) \
+ container_of((sgt), struct dma_sgt_handle, sgt)
+
+struct sg_table *iommu_dma_alloc_noncontiguous(struct device *dev, size_t size,
+ enum dma_data_direction dir, gfp_t gfp, unsigned long attrs)
{
struct dma_sgt_handle *sh;
@@ -1055,7 +1070,7 @@ static struct sg_table *iommu_dma_alloc_noncontiguous(struct device *dev,
return &sh->sgt;
}
-static void iommu_dma_free_noncontiguous(struct device *dev, size_t size,
+void iommu_dma_free_noncontiguous(struct device *dev, size_t size,
struct sg_table *sgt, enum dma_data_direction dir)
{
struct dma_sgt_handle *sh = sgt_handle(sgt);
@@ -1066,8 +1081,26 @@ static void iommu_dma_free_noncontiguous(struct device *dev, size_t size,
kfree(sh);
}
-static void iommu_dma_sync_single_for_cpu(struct device *dev,
- dma_addr_t dma_handle, size_t size, enum dma_data_direction dir)
+void *iommu_dma_vmap_noncontiguous(struct device *dev, size_t size,
+ struct sg_table *sgt)
+{
+ unsigned long count = PAGE_ALIGN(size) >> PAGE_SHIFT;
+
+ return vmap(sgt_handle(sgt)->pages, count, VM_MAP, PAGE_KERNEL);
+}
+
+int iommu_dma_mmap_noncontiguous(struct device *dev, struct vm_area_struct *vma,
+ size_t size, struct sg_table *sgt)
+{
+ unsigned long count = PAGE_ALIGN(size) >> PAGE_SHIFT;
+
+ if (vma->vm_pgoff >= count || vma_pages(vma) > count - vma->vm_pgoff)
+ return -ENXIO;
+ return vm_map_pages(vma, sgt_handle(sgt)->pages, count);
+}
+
+void iommu_dma_sync_single_for_cpu(struct device *dev, dma_addr_t dma_handle,
+ size_t size, enum dma_data_direction dir)
{
phys_addr_t phys;
@@ -1081,8 +1114,8 @@ static void iommu_dma_sync_single_for_cpu(struct device *dev,
swiotlb_sync_single_for_cpu(dev, phys, size, dir);
}
-static void iommu_dma_sync_single_for_device(struct device *dev,
- dma_addr_t dma_handle, size_t size, enum dma_data_direction dir)
+void iommu_dma_sync_single_for_device(struct device *dev, dma_addr_t dma_handle,
+ size_t size, enum dma_data_direction dir)
{
phys_addr_t phys;
@@ -1096,9 +1129,8 @@ static void iommu_dma_sync_single_for_device(struct device *dev,
arch_sync_dma_for_device(phys, size, dir);
}
-static void iommu_dma_sync_sg_for_cpu(struct device *dev,
- struct scatterlist *sgl, int nelems,
- enum dma_data_direction dir)
+void iommu_dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sgl,
+ int nelems, enum dma_data_direction dir)
{
struct scatterlist *sg;
int i;
@@ -1112,9 +1144,8 @@ static void iommu_dma_sync_sg_for_cpu(struct device *dev,
arch_sync_dma_for_cpu(sg_phys(sg), sg->length, dir);
}
-static void iommu_dma_sync_sg_for_device(struct device *dev,
- struct scatterlist *sgl, int nelems,
- enum dma_data_direction dir)
+void iommu_dma_sync_sg_for_device(struct device *dev, struct scatterlist *sgl,
+ int nelems, enum dma_data_direction dir)
{
struct scatterlist *sg;
int i;
@@ -1129,9 +1160,9 @@ static void iommu_dma_sync_sg_for_device(struct device *dev,
arch_sync_dma_for_device(sg_phys(sg), sg->length, dir);
}
-static dma_addr_t iommu_dma_map_page(struct device *dev, struct page *page,
- unsigned long offset, size_t size, enum dma_data_direction dir,
- unsigned long attrs)
+dma_addr_t iommu_dma_map_page(struct device *dev, struct page *page,
+ unsigned long offset, size_t size, enum dma_data_direction dir,
+ unsigned long attrs)
{
phys_addr_t phys = page_to_phys(page) + offset;
bool coherent = dev_is_dma_coherent(dev);
@@ -1189,7 +1220,7 @@ static dma_addr_t iommu_dma_map_page(struct device *dev, struct page *page,
return iova;
}
-static void iommu_dma_unmap_page(struct device *dev, dma_addr_t dma_handle,
+void iommu_dma_unmap_page(struct device *dev, dma_addr_t dma_handle,
size_t size, enum dma_data_direction dir, unsigned long attrs)
{
struct iommu_domain *domain = iommu_get_dma_domain(dev);
@@ -1342,8 +1373,8 @@ out_unmap:
* impedance-matching, to be able to hand off a suitably-aligned list,
* but still preserve the original offsets and sizes for the caller.
*/
-static int iommu_dma_map_sg(struct device *dev, struct scatterlist *sg,
- int nents, enum dma_data_direction dir, unsigned long attrs)
+int iommu_dma_map_sg(struct device *dev, struct scatterlist *sg, int nents,
+ enum dma_data_direction dir, unsigned long attrs)
{
struct iommu_domain *domain = iommu_get_dma_domain(dev);
struct iommu_dma_cookie *cookie = domain->iova_cookie;
@@ -1462,8 +1493,8 @@ out:
return ret;
}
-static void iommu_dma_unmap_sg(struct device *dev, struct scatterlist *sg,
- int nents, enum dma_data_direction dir, unsigned long attrs)
+void iommu_dma_unmap_sg(struct device *dev, struct scatterlist *sg, int nents,
+ enum dma_data_direction dir, unsigned long attrs)
{
dma_addr_t end = 0, start;
struct scatterlist *tmp;
@@ -1512,7 +1543,7 @@ static void iommu_dma_unmap_sg(struct device *dev, struct scatterlist *sg,
__iommu_dma_unmap(dev, start, end - start);
}
-static dma_addr_t iommu_dma_map_resource(struct device *dev, phys_addr_t phys,
+dma_addr_t iommu_dma_map_resource(struct device *dev, phys_addr_t phys,
size_t size, enum dma_data_direction dir, unsigned long attrs)
{
return __iommu_dma_map(dev, phys, size,
@@ -1520,7 +1551,7 @@ static dma_addr_t iommu_dma_map_resource(struct device *dev, phys_addr_t phys,
dma_get_mask(dev));
}
-static void iommu_dma_unmap_resource(struct device *dev, dma_addr_t handle,
+void iommu_dma_unmap_resource(struct device *dev, dma_addr_t handle,
size_t size, enum dma_data_direction dir, unsigned long attrs)
{
__iommu_dma_unmap(dev, handle, size);
@@ -1557,7 +1588,7 @@ static void __iommu_dma_free(struct device *dev, size_t size, void *cpu_addr)
dma_free_contiguous(dev, page, alloc_size);
}
-static void iommu_dma_free(struct device *dev, size_t size, void *cpu_addr,
+void iommu_dma_free(struct device *dev, size_t size, void *cpu_addr,
dma_addr_t handle, unsigned long attrs)
{
__iommu_dma_unmap(dev, handle, size);
@@ -1601,8 +1632,8 @@ out_free_pages:
return NULL;
}
-static void *iommu_dma_alloc(struct device *dev, size_t size,
- dma_addr_t *handle, gfp_t gfp, unsigned long attrs)
+void *iommu_dma_alloc(struct device *dev, size_t size, dma_addr_t *handle,
+ gfp_t gfp, unsigned long attrs)
{
bool coherent = dev_is_dma_coherent(dev);
int ioprot = dma_info_to_prot(DMA_BIDIRECTIONAL, coherent, attrs);
@@ -1635,7 +1666,7 @@ static void *iommu_dma_alloc(struct device *dev, size_t size,
return cpu_addr;
}
-static int iommu_dma_mmap(struct device *dev, struct vm_area_struct *vma,
+int iommu_dma_mmap(struct device *dev, struct vm_area_struct *vma,
void *cpu_addr, dma_addr_t dma_addr, size_t size,
unsigned long attrs)
{
@@ -1666,7 +1697,7 @@ static int iommu_dma_mmap(struct device *dev, struct vm_area_struct *vma,
vma->vm_page_prot);
}
-static int iommu_dma_get_sgtable(struct device *dev, struct sg_table *sgt,
+int iommu_dma_get_sgtable(struct device *dev, struct sg_table *sgt,
void *cpu_addr, dma_addr_t dma_addr, size_t size,
unsigned long attrs)
{
@@ -1693,19 +1724,19 @@ static int iommu_dma_get_sgtable(struct device *dev, struct sg_table *sgt,
return ret;
}
-static unsigned long iommu_dma_get_merge_boundary(struct device *dev)
+unsigned long iommu_dma_get_merge_boundary(struct device *dev)
{
struct iommu_domain *domain = iommu_get_dma_domain(dev);
return (1UL << __ffs(domain->pgsize_bitmap)) - 1;
}
-static size_t iommu_dma_opt_mapping_size(void)
+size_t iommu_dma_opt_mapping_size(void)
{
return iova_rcache_range();
}
-static size_t iommu_dma_max_mapping_size(struct device *dev)
+size_t iommu_dma_max_mapping_size(struct device *dev)
{
if (dev_is_untrusted(dev))
return swiotlb_max_mapping_size(dev);
@@ -1713,32 +1744,6 @@ static size_t iommu_dma_max_mapping_size(struct device *dev)
return SIZE_MAX;
}
-static const struct dma_map_ops iommu_dma_ops = {
- .flags = DMA_F_PCI_P2PDMA_SUPPORTED |
- DMA_F_CAN_SKIP_SYNC,
- .alloc = iommu_dma_alloc,
- .free = iommu_dma_free,
- .alloc_pages_op = dma_common_alloc_pages,
- .free_pages = dma_common_free_pages,
- .alloc_noncontiguous = iommu_dma_alloc_noncontiguous,
- .free_noncontiguous = iommu_dma_free_noncontiguous,
- .mmap = iommu_dma_mmap,
- .get_sgtable = iommu_dma_get_sgtable,
- .map_page = iommu_dma_map_page,
- .unmap_page = iommu_dma_unmap_page,
- .map_sg = iommu_dma_map_sg,
- .unmap_sg = iommu_dma_unmap_sg,
- .sync_single_for_cpu = iommu_dma_sync_single_for_cpu,
- .sync_single_for_device = iommu_dma_sync_single_for_device,
- .sync_sg_for_cpu = iommu_dma_sync_sg_for_cpu,
- .sync_sg_for_device = iommu_dma_sync_sg_for_device,
- .map_resource = iommu_dma_map_resource,
- .unmap_resource = iommu_dma_unmap_resource,
- .get_merge_boundary = iommu_dma_get_merge_boundary,
- .opt_mapping_size = iommu_dma_opt_mapping_size,
- .max_mapping_size = iommu_dma_max_mapping_size,
-};
-
void iommu_setup_dma_ops(struct device *dev)
{
struct iommu_domain *domain = iommu_get_domain_for_dev(dev);
@@ -1746,19 +1751,15 @@ void iommu_setup_dma_ops(struct device *dev)
if (dev_is_pci(dev))
dev->iommu->pci_32bit_workaround = !iommu_dma_forcedac;
- if (iommu_is_dma_domain(domain)) {
- if (iommu_dma_init_domain(domain, dev))
- goto out_err;
- dev->dma_ops = &iommu_dma_ops;
- } else if (dev->dma_ops == &iommu_dma_ops) {
- /* Clean up if we've switched *from* a DMA domain */
- dev->dma_ops = NULL;
- }
+ dev->dma_iommu = iommu_is_dma_domain(domain);
+ if (dev->dma_iommu && iommu_dma_init_domain(domain, dev))
+ goto out_err;
return;
out_err:
- pr_warn("Failed to set up IOMMU for device %s; retaining platform DMA ops\n",
- dev_name(dev));
+ pr_warn("Failed to set up IOMMU for device %s; retaining platform DMA ops\n",
+ dev_name(dev));
+ dev->dma_iommu = false;
}
static struct iommu_dma_msi_page *iommu_dma_get_msi_page(struct device *dev,
diff --git a/drivers/iommu/intel/Kconfig b/drivers/iommu/intel/Kconfig
index f52fb39c968e..88fd32a9323c 100644
--- a/drivers/iommu/intel/Kconfig
+++ b/drivers/iommu/intel/Kconfig
@@ -12,7 +12,6 @@ config DMAR_DEBUG
config INTEL_IOMMU
bool "Support for Intel IOMMU using DMA Remapping Devices"
depends on PCI_MSI && ACPI && X86
- select DMA_OPS
select IOMMU_API
select IOMMU_IOVA
select IOMMUFD_DRIVER if IOMMUFD
diff --git a/drivers/iommu/iommu.c b/drivers/iommu/iommu.c
index ed6c5cb60c5a..83c8e617a2c5 100644
--- a/drivers/iommu/iommu.c
+++ b/drivers/iommu/iommu.c
@@ -3578,6 +3578,7 @@ int iommu_replace_group_handle(struct iommu_group *group,
ret = xa_reserve(&group->pasid_array, IOMMU_NO_PASID, GFP_KERNEL);
if (ret)
goto err_unlock;
+ handle->domain = new_domain;
}
ret = __iommu_group_set_domain(group, new_domain);
diff --git a/drivers/iommu/iommufd/device.c b/drivers/iommu/iommufd/device.c
index 3214a4c17c6b..5fd3dd420290 100644
--- a/drivers/iommu/iommufd/device.c
+++ b/drivers/iommu/iommufd/device.c
@@ -1,12 +1,12 @@
// SPDX-License-Identifier: GPL-2.0-only
/* Copyright (c) 2021-2022, NVIDIA CORPORATION & AFFILIATES
*/
+#include <linux/iommu.h>
#include <linux/iommufd.h>
#include <linux/slab.h>
-#include <linux/iommu.h>
#include <uapi/linux/iommufd.h>
-#include "../iommu-priv.h"
+#include "../iommu-priv.h"
#include "io_pagetable.h"
#include "iommufd_private.h"
@@ -327,8 +327,9 @@ static int iommufd_group_setup_msi(struct iommufd_group *igroup,
return 0;
}
-static int iommufd_hwpt_paging_attach(struct iommufd_hwpt_paging *hwpt_paging,
- struct iommufd_device *idev)
+static int
+iommufd_device_attach_reserved_iova(struct iommufd_device *idev,
+ struct iommufd_hwpt_paging *hwpt_paging)
{
int rc;
@@ -354,6 +355,7 @@ static int iommufd_hwpt_paging_attach(struct iommufd_hwpt_paging *hwpt_paging,
int iommufd_hw_pagetable_attach(struct iommufd_hw_pagetable *hwpt,
struct iommufd_device *idev)
{
+ struct iommufd_hwpt_paging *hwpt_paging = find_hwpt_paging(hwpt);
int rc;
mutex_lock(&idev->igroup->lock);
@@ -363,8 +365,8 @@ int iommufd_hw_pagetable_attach(struct iommufd_hw_pagetable *hwpt,
goto err_unlock;
}
- if (hwpt_is_paging(hwpt)) {
- rc = iommufd_hwpt_paging_attach(to_hwpt_paging(hwpt), idev);
+ if (hwpt_paging) {
+ rc = iommufd_device_attach_reserved_iova(idev, hwpt_paging);
if (rc)
goto err_unlock;
}
@@ -387,9 +389,8 @@ int iommufd_hw_pagetable_attach(struct iommufd_hw_pagetable *hwpt,
mutex_unlock(&idev->igroup->lock);
return 0;
err_unresv:
- if (hwpt_is_paging(hwpt))
- iopt_remove_reserved_iova(&to_hwpt_paging(hwpt)->ioas->iopt,
- idev->dev);
+ if (hwpt_paging)
+ iopt_remove_reserved_iova(&hwpt_paging->ioas->iopt, idev->dev);
err_unlock:
mutex_unlock(&idev->igroup->lock);
return rc;
@@ -399,6 +400,7 @@ struct iommufd_hw_pagetable *
iommufd_hw_pagetable_detach(struct iommufd_device *idev)
{
struct iommufd_hw_pagetable *hwpt = idev->igroup->hwpt;
+ struct iommufd_hwpt_paging *hwpt_paging = find_hwpt_paging(hwpt);
mutex_lock(&idev->igroup->lock);
list_del(&idev->group_item);
@@ -406,9 +408,8 @@ iommufd_hw_pagetable_detach(struct iommufd_device *idev)
iommufd_hwpt_detach_device(hwpt, idev);
idev->igroup->hwpt = NULL;
}
- if (hwpt_is_paging(hwpt))
- iopt_remove_reserved_iova(&to_hwpt_paging(hwpt)->ioas->iopt,
- idev->dev);
+ if (hwpt_paging)
+ iopt_remove_reserved_iova(&hwpt_paging->ioas->iopt, idev->dev);
mutex_unlock(&idev->igroup->lock);
/* Caller must destroy hwpt */
@@ -440,17 +441,17 @@ iommufd_group_remove_reserved_iova(struct iommufd_group *igroup,
}
static int
-iommufd_group_do_replace_paging(struct iommufd_group *igroup,
- struct iommufd_hwpt_paging *hwpt_paging)
+iommufd_group_do_replace_reserved_iova(struct iommufd_group *igroup,
+ struct iommufd_hwpt_paging *hwpt_paging)
{
- struct iommufd_hw_pagetable *old_hwpt = igroup->hwpt;
+ struct iommufd_hwpt_paging *old_hwpt_paging;
struct iommufd_device *cur;
int rc;
lockdep_assert_held(&igroup->lock);
- if (!hwpt_is_paging(old_hwpt) ||
- hwpt_paging->ioas != to_hwpt_paging(old_hwpt)->ioas) {
+ old_hwpt_paging = find_hwpt_paging(igroup->hwpt);
+ if (!old_hwpt_paging || hwpt_paging->ioas != old_hwpt_paging->ioas) {
list_for_each_entry(cur, &igroup->device_list, group_item) {
rc = iopt_table_enforce_dev_resv_regions(
&hwpt_paging->ioas->iopt, cur->dev, NULL);
@@ -473,6 +474,8 @@ static struct iommufd_hw_pagetable *
iommufd_device_do_replace(struct iommufd_device *idev,
struct iommufd_hw_pagetable *hwpt)
{
+ struct iommufd_hwpt_paging *hwpt_paging = find_hwpt_paging(hwpt);
+ struct iommufd_hwpt_paging *old_hwpt_paging;
struct iommufd_group *igroup = idev->igroup;
struct iommufd_hw_pagetable *old_hwpt;
unsigned int num_devices;
@@ -491,9 +494,8 @@ iommufd_device_do_replace(struct iommufd_device *idev,
}
old_hwpt = igroup->hwpt;
- if (hwpt_is_paging(hwpt)) {
- rc = iommufd_group_do_replace_paging(igroup,
- to_hwpt_paging(hwpt));
+ if (hwpt_paging) {
+ rc = iommufd_group_do_replace_reserved_iova(igroup, hwpt_paging);
if (rc)
goto err_unlock;
}
@@ -502,11 +504,10 @@ iommufd_device_do_replace(struct iommufd_device *idev,
if (rc)
goto err_unresv;
- if (hwpt_is_paging(old_hwpt) &&
- (!hwpt_is_paging(hwpt) ||
- to_hwpt_paging(hwpt)->ioas != to_hwpt_paging(old_hwpt)->ioas))
- iommufd_group_remove_reserved_iova(igroup,
- to_hwpt_paging(old_hwpt));
+ old_hwpt_paging = find_hwpt_paging(old_hwpt);
+ if (old_hwpt_paging &&
+ (!hwpt_paging || hwpt_paging->ioas != old_hwpt_paging->ioas))
+ iommufd_group_remove_reserved_iova(igroup, old_hwpt_paging);
igroup->hwpt = hwpt;
@@ -524,9 +525,8 @@ iommufd_device_do_replace(struct iommufd_device *idev,
/* Caller must destroy old_hwpt */
return old_hwpt;
err_unresv:
- if (hwpt_is_paging(hwpt))
- iommufd_group_remove_reserved_iova(igroup,
- to_hwpt_paging(hwpt));
+ if (hwpt_paging)
+ iommufd_group_remove_reserved_iova(igroup, hwpt_paging);
err_unlock:
mutex_unlock(&idev->igroup->lock);
return ERR_PTR(rc);
diff --git a/drivers/iommu/iommufd/fault.c b/drivers/iommu/iommufd/fault.c
index a643d5c7c535..e590973ce5cf 100644
--- a/drivers/iommu/iommufd/fault.c
+++ b/drivers/iommu/iommufd/fault.c
@@ -3,14 +3,14 @@
*/
#define pr_fmt(fmt) "iommufd: " fmt
+#include <linux/anon_inodes.h>
#include <linux/file.h>
#include <linux/fs.h>
+#include <linux/iommufd.h>
#include <linux/module.h>
#include <linux/mutex.h>
-#include <linux/iommufd.h>
#include <linux/pci.h>
#include <linux/poll.h>
-#include <linux/anon_inodes.h>
#include <uapi/linux/iommufd.h>
#include "../iommu-priv.h"
@@ -161,7 +161,6 @@ static int __fault_domain_replace_dev(struct iommufd_device *idev,
if (!handle)
return -ENOMEM;
- handle->handle.domain = hwpt->domain;
handle->idev = idev;
ret = iommu_replace_group_handle(idev->igroup->group,
hwpt->domain, &handle->handle);
@@ -361,7 +360,6 @@ static const struct file_operations iommufd_fault_fops = {
.write = iommufd_fault_fops_write,
.poll = iommufd_fault_fops_poll,
.release = iommufd_fault_fops_release,
- .llseek = no_llseek,
};
int iommufd_fault_alloc(struct iommufd_ucmd *ucmd)
diff --git a/drivers/iommu/iommufd/hw_pagetable.c b/drivers/iommu/iommufd/hw_pagetable.c
index aefde4443671..d06bf6e6c19f 100644
--- a/drivers/iommu/iommufd/hw_pagetable.c
+++ b/drivers/iommu/iommufd/hw_pagetable.c
@@ -225,7 +225,8 @@ iommufd_hwpt_nested_alloc(struct iommufd_ctx *ictx,
if ((flags & ~IOMMU_HWPT_FAULT_ID_VALID) ||
!user_data->len || !ops->domain_alloc_user)
return ERR_PTR(-EOPNOTSUPP);
- if (parent->auto_domain || !parent->nest_parent)
+ if (parent->auto_domain || !parent->nest_parent ||
+ parent->common.domain->owner != ops)
return ERR_PTR(-EINVAL);
hwpt_nested = __iommufd_object_alloc(
diff --git a/drivers/iommu/iommufd/io_pagetable.c b/drivers/iommu/iommufd/io_pagetable.c
index 05fd9d3abf1b..4bf7ccd39d46 100644
--- a/drivers/iommu/iommufd/io_pagetable.c
+++ b/drivers/iommu/iommufd/io_pagetable.c
@@ -8,17 +8,17 @@
* The datastructure uses the iopt_pages to optimize the storage of the PFNs
* between the domains and xarray.
*/
+#include <linux/err.h>
+#include <linux/errno.h>
+#include <linux/iommu.h>
#include <linux/iommufd.h>
#include <linux/lockdep.h>
-#include <linux/iommu.h>
#include <linux/sched/mm.h>
-#include <linux/err.h>
#include <linux/slab.h>
-#include <linux/errno.h>
#include <uapi/linux/iommufd.h>
-#include "io_pagetable.h"
#include "double_span.h"
+#include "io_pagetable.h"
struct iopt_pages_list {
struct iopt_pages *pages;
@@ -112,6 +112,7 @@ static int iopt_alloc_iova(struct io_pagetable *iopt, unsigned long *iova,
unsigned long page_offset = uptr % PAGE_SIZE;
struct interval_tree_double_span_iter used_span;
struct interval_tree_span_iter allowed_span;
+ unsigned long max_alignment = PAGE_SIZE;
unsigned long iova_alignment;
lockdep_assert_held(&iopt->iova_rwsem);
@@ -131,6 +132,13 @@ static int iopt_alloc_iova(struct io_pagetable *iopt, unsigned long *iova,
roundup_pow_of_two(length),
1UL << __ffs64(uptr));
+#ifdef CONFIG_TRANSPARENT_HUGEPAGE
+ max_alignment = HPAGE_SIZE;
+#endif
+ /* Protect against ALIGN() overflow */
+ if (iova_alignment >= max_alignment)
+ iova_alignment = max_alignment;
+
if (iova_alignment < iopt->iova_alignment)
return -EINVAL;
diff --git a/drivers/iommu/iommufd/io_pagetable.h b/drivers/iommu/iommufd/io_pagetable.h
index 0ec3509b7e33..c61d74471684 100644
--- a/drivers/iommu/iommufd/io_pagetable.h
+++ b/drivers/iommu/iommufd/io_pagetable.h
@@ -6,8 +6,8 @@
#define __IO_PAGETABLE_H
#include <linux/interval_tree.h>
-#include <linux/mutex.h>
#include <linux/kref.h>
+#include <linux/mutex.h>
#include <linux/xarray.h>
#include "iommufd_private.h"
diff --git a/drivers/iommu/iommufd/ioas.c b/drivers/iommu/iommufd/ioas.c
index 157a89b993e4..2c4b2bb11e78 100644
--- a/drivers/iommu/iommufd/ioas.c
+++ b/drivers/iommu/iommufd/ioas.c
@@ -3,8 +3,8 @@
* Copyright (c) 2021-2022, NVIDIA CORPORATION & AFFILIATES
*/
#include <linux/interval_tree.h>
-#include <linux/iommufd.h>
#include <linux/iommu.h>
+#include <linux/iommufd.h>
#include <uapi/linux/iommufd.h>
#include "io_pagetable.h"
diff --git a/drivers/iommu/iommufd/iommufd_private.h b/drivers/iommu/iommufd/iommufd_private.h
index 92efe30a8f0d..f1d865e6fab6 100644
--- a/drivers/iommu/iommufd/iommufd_private.h
+++ b/drivers/iommu/iommufd/iommufd_private.h
@@ -4,13 +4,14 @@
#ifndef __IOMMUFD_PRIVATE_H
#define __IOMMUFD_PRIVATE_H
-#include <linux/rwsem.h>
-#include <linux/xarray.h>
-#include <linux/refcount.h>
-#include <linux/uaccess.h>
#include <linux/iommu.h>
#include <linux/iova_bitmap.h>
+#include <linux/refcount.h>
+#include <linux/rwsem.h>
+#include <linux/uaccess.h>
+#include <linux/xarray.h>
#include <uapi/linux/iommufd.h>
+
#include "../iommu-priv.h"
struct iommu_domain;
@@ -324,6 +325,25 @@ to_hwpt_paging(struct iommufd_hw_pagetable *hwpt)
return container_of(hwpt, struct iommufd_hwpt_paging, common);
}
+static inline struct iommufd_hwpt_nested *
+to_hwpt_nested(struct iommufd_hw_pagetable *hwpt)
+{
+ return container_of(hwpt, struct iommufd_hwpt_nested, common);
+}
+
+static inline struct iommufd_hwpt_paging *
+find_hwpt_paging(struct iommufd_hw_pagetable *hwpt)
+{
+ switch (hwpt->obj.type) {
+ case IOMMUFD_OBJ_HWPT_PAGING:
+ return to_hwpt_paging(hwpt);
+ case IOMMUFD_OBJ_HWPT_NESTED:
+ return to_hwpt_nested(hwpt)->parent;
+ default:
+ return NULL;
+ }
+}
+
static inline struct iommufd_hwpt_paging *
iommufd_get_hwpt_paging(struct iommufd_ucmd *ucmd, u32 id)
{
@@ -490,8 +510,10 @@ static inline int iommufd_hwpt_attach_device(struct iommufd_hw_pagetable *hwpt,
static inline void iommufd_hwpt_detach_device(struct iommufd_hw_pagetable *hwpt,
struct iommufd_device *idev)
{
- if (hwpt->fault)
+ if (hwpt->fault) {
iommufd_fault_domain_detach_dev(hwpt, idev);
+ return;
+ }
iommu_detach_group(hwpt->domain, idev->igroup->group);
}
diff --git a/drivers/iommu/iommufd/iommufd_test.h b/drivers/iommu/iommufd/iommufd_test.h
index acbbba1c6671..f4bc23a92f9a 100644
--- a/drivers/iommu/iommufd/iommufd_test.h
+++ b/drivers/iommu/iommufd/iommufd_test.h
@@ -4,8 +4,8 @@
#ifndef _UAPI_IOMMUFD_TEST_H
#define _UAPI_IOMMUFD_TEST_H
-#include <linux/types.h>
#include <linux/iommufd.h>
+#include <linux/types.h>
enum {
IOMMU_TEST_OP_ADD_RESERVED = 1,
diff --git a/drivers/iommu/iommufd/iova_bitmap.c b/drivers/iommu/iommufd/iova_bitmap.c
index b9e964b1ad5c..d90b9e253412 100644
--- a/drivers/iommu/iommufd/iova_bitmap.c
+++ b/drivers/iommu/iommufd/iova_bitmap.c
@@ -3,10 +3,10 @@
* Copyright (c) 2022, Oracle and/or its affiliates.
* Copyright (c) 2022, NVIDIA CORPORATION & AFFILIATES. All rights reserved
*/
+#include <linux/highmem.h>
#include <linux/iova_bitmap.h>
#include <linux/mm.h>
#include <linux/slab.h>
-#include <linux/highmem.h>
#define BITS_PER_PAGE (PAGE_SIZE * BITS_PER_BYTE)
diff --git a/drivers/iommu/iommufd/main.c b/drivers/iommu/iommufd/main.c
index 83bbd7c5d160..b5f5d27ee963 100644
--- a/drivers/iommu/iommufd/main.c
+++ b/drivers/iommu/iommufd/main.c
@@ -8,15 +8,15 @@
*/
#define pr_fmt(fmt) "iommufd: " fmt
+#include <linux/bug.h>
#include <linux/file.h>
#include <linux/fs.h>
-#include <linux/module.h>
-#include <linux/slab.h>
+#include <linux/iommufd.h>
#include <linux/miscdevice.h>
+#include <linux/module.h>
#include <linux/mutex.h>
-#include <linux/bug.h>
+#include <linux/slab.h>
#include <uapi/linux/iommufd.h>
-#include <linux/iommufd.h>
#include "io_pagetable.h"
#include "iommufd_private.h"
diff --git a/drivers/iommu/iommufd/pages.c b/drivers/iommu/iommufd/pages.c
index 117f644a0c5b..93d806c9c073 100644
--- a/drivers/iommu/iommufd/pages.c
+++ b/drivers/iommu/iommufd/pages.c
@@ -45,16 +45,16 @@
* last_iova + 1 can overflow. An iopt_pages index will always be much less than
* ULONG_MAX so last_index + 1 cannot overflow.
*/
+#include <linux/highmem.h>
+#include <linux/iommu.h>
+#include <linux/iommufd.h>
+#include <linux/kthread.h>
#include <linux/overflow.h>
#include <linux/slab.h>
-#include <linux/iommu.h>
#include <linux/sched/mm.h>
-#include <linux/highmem.h>
-#include <linux/kthread.h>
-#include <linux/iommufd.h>
-#include "io_pagetable.h"
#include "double_span.h"
+#include "io_pagetable.h"
#ifndef CONFIG_IOMMUFD_TEST
#define TEMP_MEMORY_LIMIT 65536
diff --git a/drivers/iommu/iommufd/selftest.c b/drivers/iommu/iommufd/selftest.c
index 222cfc11ebfd..540437be168a 100644
--- a/drivers/iommu/iommufd/selftest.c
+++ b/drivers/iommu/iommufd/selftest.c
@@ -3,13 +3,14 @@
*
* Kernel side components to support tools/testing/selftests/iommu
*/
-#include <linux/slab.h>
-#include <linux/iommu.h>
-#include <linux/xarray.h>
-#include <linux/file.h>
#include <linux/anon_inodes.h>
+#include <linux/debugfs.h>
#include <linux/fault-inject.h>
+#include <linux/file.h>
+#include <linux/iommu.h>
#include <linux/platform_device.h>
+#include <linux/slab.h>
+#include <linux/xarray.h>
#include <uapi/linux/iommufd.h>
#include "../iommu-priv.h"
@@ -1342,7 +1343,7 @@ static int iommufd_test_dirty(struct iommufd_ucmd *ucmd, unsigned int mockpt_id,
unsigned long page_size, void __user *uptr,
u32 flags)
{
- unsigned long bitmap_size, i, max;
+ unsigned long i, max;
struct iommu_test_cmd *cmd = ucmd->cmd;
struct iommufd_hw_pagetable *hwpt;
struct mock_iommu_domain *mock;
@@ -1363,15 +1364,14 @@ static int iommufd_test_dirty(struct iommufd_ucmd *ucmd, unsigned int mockpt_id,
}
max = length / page_size;
- bitmap_size = DIV_ROUND_UP(max, BITS_PER_BYTE);
-
- tmp = kvzalloc(bitmap_size, GFP_KERNEL_ACCOUNT);
+ tmp = kvzalloc(DIV_ROUND_UP(max, BITS_PER_LONG) * sizeof(unsigned long),
+ GFP_KERNEL_ACCOUNT);
if (!tmp) {
rc = -ENOMEM;
goto out_put;
}
- if (copy_from_user(tmp, uptr, bitmap_size)) {
+ if (copy_from_user(tmp, uptr,DIV_ROUND_UP(max, BITS_PER_BYTE))) {
rc = -EFAULT;
goto out_free;
}
diff --git a/drivers/isdn/capi/capi.c b/drivers/isdn/capi/capi.c
index 3ed257334562..70dee9ad4bae 100644
--- a/drivers/isdn/capi/capi.c
+++ b/drivers/isdn/capi/capi.c
@@ -1024,7 +1024,6 @@ static int capi_release(struct inode *inode, struct file *file)
static const struct file_operations capi_fops =
{
.owner = THIS_MODULE,
- .llseek = no_llseek,
.read = capi_read,
.write = capi_write,
.poll = capi_poll,
diff --git a/drivers/isdn/mISDN/timerdev.c b/drivers/isdn/mISDN/timerdev.c
index 83d6b484d3c6..7cfa8c61dba0 100644
--- a/drivers/isdn/mISDN/timerdev.c
+++ b/drivers/isdn/mISDN/timerdev.c
@@ -266,7 +266,6 @@ static const struct file_operations mISDN_fops = {
.unlocked_ioctl = mISDN_ioctl,
.open = mISDN_open,
.release = mISDN_close,
- .llseek = no_llseek,
};
static struct miscdevice mISDNtimer = {
diff --git a/drivers/leds/Kconfig b/drivers/leds/Kconfig
index 8d9d8da376e4..b784bb74a837 100644
--- a/drivers/leds/Kconfig
+++ b/drivers/leds/Kconfig
@@ -825,6 +825,14 @@ config LEDS_BLINKM
This option enables support for the BlinkM RGB LED connected
through I2C. Say Y to enable support for the BlinkM LED.
+config LEDS_BLINKM_MULTICOLOR
+ bool "Enable multicolor support for BlinkM I2C RGB LED"
+ depends on LEDS_BLINKM
+ depends on LEDS_CLASS_MULTICOLOR=y || LEDS_CLASS_MULTICOLOR=LEDS_BLINKM
+ help
+ This option enables multicolor sysfs class support for BlinkM LED and
+ disables the older, separated sysfs interface
+
config LEDS_POWERNV
tristate "LED support for PowerNV Platform"
depends on LEDS_CLASS
diff --git a/drivers/leds/flash/leds-aat1290.c b/drivers/leds/flash/leds-aat1290.c
index e8f9dd293592..c7b6a1f01288 100644
--- a/drivers/leds/flash/leds-aat1290.c
+++ b/drivers/leds/flash/leds-aat1290.c
@@ -7,6 +7,7 @@
* Author: Jacek Anaszewski <j.anaszewski@samsung.com>
*/
+#include <linux/cleanup.h>
#include <linux/delay.h>
#include <linux/gpio/consumer.h>
#include <linux/led-class-flash.h>
@@ -215,7 +216,6 @@ static int aat1290_led_parse_dt(struct aat1290_led *led,
struct device_node **sub_node)
{
struct device *dev = &led->pdev->dev;
- struct device_node *child_node;
#if IS_ENABLED(CONFIG_V4L2_FLASH_LED_CLASS)
struct pinctrl *pinctrl;
#endif
@@ -246,7 +246,8 @@ static int aat1290_led_parse_dt(struct aat1290_led *led,
}
#endif
- child_node = of_get_next_available_child(dev_of_node(dev), NULL);
+ struct device_node *child_node __free(device_node) =
+ of_get_next_available_child(dev_of_node(dev), NULL);
if (!child_node) {
dev_err(dev, "No DT child node found for connected LED.\n");
return -EINVAL;
@@ -267,7 +268,7 @@ static int aat1290_led_parse_dt(struct aat1290_led *led,
if (ret < 0) {
dev_err(dev,
"flash-max-microamp DT property missing\n");
- goto err_parse_dt;
+ return ret;
}
ret = of_property_read_u32(child_node, "flash-max-timeout-us",
@@ -275,15 +276,12 @@ static int aat1290_led_parse_dt(struct aat1290_led *led,
if (ret < 0) {
dev_err(dev,
"flash-max-timeout-us DT property missing\n");
- goto err_parse_dt;
+ return ret;
}
*sub_node = child_node;
-err_parse_dt:
- of_node_put(child_node);
-
- return ret;
+ return 0;
}
static void aat1290_led_validate_mm_current(struct aat1290_led *led,
diff --git a/drivers/leds/flash/leds-as3645a.c b/drivers/leds/flash/leds-as3645a.c
index 2c6ef321b7c8..2f2d783c62c3 100644
--- a/drivers/leds/flash/leds-as3645a.c
+++ b/drivers/leds/flash/leds-as3645a.c
@@ -478,14 +478,12 @@ static int as3645a_detect(struct as3645a *flash)
return as3645a_write(flash, AS_BOOST_REG, AS_BOOST_CURRENT_DISABLE);
}
-static int as3645a_parse_node(struct as3645a *flash,
- struct fwnode_handle *fwnode)
+static int as3645a_parse_node(struct device *dev, struct as3645a *flash)
{
struct as3645a_config *cfg = &flash->cfg;
- struct fwnode_handle *child;
int rval;
- fwnode_for_each_child_node(fwnode, child) {
+ device_for_each_child_node_scoped(dev, child) {
u32 id = 0;
fwnode_property_read_u32(child, "reg", &id);
@@ -686,7 +684,7 @@ static int as3645a_probe(struct i2c_client *client)
flash->client = client;
- rval = as3645a_parse_node(flash, dev_fwnode(&client->dev));
+ rval = as3645a_parse_node(&client->dev, flash);
if (rval < 0)
return rval;
diff --git a/drivers/leds/flash/leds-ktd2692.c b/drivers/leds/flash/leds-ktd2692.c
index 7bb0aa2753e3..16a01a200c0b 100644
--- a/drivers/leds/flash/leds-ktd2692.c
+++ b/drivers/leds/flash/leds-ktd2692.c
@@ -6,6 +6,7 @@
* Ingi Kim <ingi2.kim@samsung.com>
*/
+#include <linux/cleanup.h>
#include <linux/err.h>
#include <linux/gpio/consumer.h>
#include <linux/leds-expresswire.h>
@@ -208,7 +209,6 @@ static int ktd2692_parse_dt(struct ktd2692_context *led, struct device *dev,
struct ktd2692_led_config_data *cfg)
{
struct device_node *np = dev_of_node(dev);
- struct device_node *child_node;
int ret;
if (!np)
@@ -239,7 +239,8 @@ static int ktd2692_parse_dt(struct ktd2692_context *led, struct device *dev,
}
}
- child_node = of_get_next_available_child(np, NULL);
+ struct device_node *child_node __free(device_node) =
+ of_get_next_available_child(np, NULL);
if (!child_node) {
dev_err(dev, "No DT child node found for connected LED.\n");
return -EINVAL;
@@ -252,26 +253,24 @@ static int ktd2692_parse_dt(struct ktd2692_context *led, struct device *dev,
&cfg->movie_max_microamp);
if (ret) {
dev_err(dev, "failed to parse led-max-microamp\n");
- goto err_parse_dt;
+ return ret;
}
ret = of_property_read_u32(child_node, "flash-max-microamp",
&cfg->flash_max_microamp);
if (ret) {
dev_err(dev, "failed to parse flash-max-microamp\n");
- goto err_parse_dt;
+ return ret;
}
ret = of_property_read_u32(child_node, "flash-max-timeout-us",
&cfg->flash_max_timeout);
if (ret) {
dev_err(dev, "failed to parse flash-max-timeout-us\n");
- goto err_parse_dt;
+ return ret;
}
-err_parse_dt:
- of_node_put(child_node);
- return ret;
+ return 0;
}
static const struct led_flash_ops flash_ops = {
diff --git a/drivers/leds/flash/leds-lm3601x.c b/drivers/leds/flash/leds-lm3601x.c
index 7e93c447fec5..abf6b96ade3d 100644
--- a/drivers/leds/flash/leds-lm3601x.c
+++ b/drivers/leds/flash/leds-lm3601x.c
@@ -190,7 +190,7 @@ static int lm3601x_brightness_set(struct led_classdev *cdev,
goto out;
}
- ret = regmap_write(led->regmap, LM3601X_LED_TORCH_REG, brightness);
+ ret = regmap_write(led->regmap, LM3601X_LED_TORCH_REG, brightness - 1);
if (ret < 0)
goto out;
@@ -341,8 +341,9 @@ static int lm3601x_register_leds(struct lm3601x_led *led,
led_cdev = &led->fled_cdev.led_cdev;
led_cdev->brightness_set_blocking = lm3601x_brightness_set;
- led_cdev->max_brightness = DIV_ROUND_UP(led->torch_current_max,
- LM3601X_TORCH_REG_DIV);
+ led_cdev->max_brightness =
+ DIV_ROUND_UP(led->torch_current_max - LM3601X_MIN_TORCH_I_UA + 1,
+ LM3601X_TORCH_REG_DIV);
led_cdev->flags |= LED_DEV_CAP_FLASH;
init_data.fwnode = fwnode;
@@ -386,6 +387,14 @@ static int lm3601x_parse_node(struct lm3601x_led *led,
goto out_err;
}
+ if (led->torch_current_max > LM3601X_MAX_TORCH_I_UA) {
+ dev_warn(&led->client->dev,
+ "Max torch current set too high (%d vs %d)\n",
+ led->torch_current_max,
+ LM3601X_MAX_TORCH_I_UA);
+ led->torch_current_max = LM3601X_MAX_TORCH_I_UA;
+ }
+
ret = fwnode_property_read_u32(child, "flash-max-microamp",
&led->flash_current_max);
if (ret) {
@@ -434,6 +443,10 @@ static int lm3601x_probe(struct i2c_client *client)
return ret;
}
+ ret = regmap_write(led->regmap, LM3601X_DEV_ID_REG, LM3601X_SW_RESET);
+ if (ret)
+ dev_warn(&client->dev, "Failed to reset the LED controller\n");
+
mutex_init(&led->lock);
return lm3601x_register_leds(led, fwnode);
diff --git a/drivers/leds/flash/leds-max77693.c b/drivers/leds/flash/leds-max77693.c
index 9f016b851193..90d78b3d22f8 100644
--- a/drivers/leds/flash/leds-max77693.c
+++ b/drivers/leds/flash/leds-max77693.c
@@ -599,7 +599,7 @@ static int max77693_led_parse_dt(struct max77693_led_device *led,
{
struct device *dev = &led->pdev->dev;
struct max77693_sub_led *sub_leds = led->sub_leds;
- struct device_node *node = dev_of_node(dev), *child_node;
+ struct device_node *node = dev_of_node(dev);
struct property *prop;
u32 led_sources[2];
int i, ret, fled_id;
@@ -608,7 +608,7 @@ static int max77693_led_parse_dt(struct max77693_led_device *led,
of_property_read_u32(node, "maxim,boost-mvout", &cfg->boost_vout);
of_property_read_u32(node, "maxim,mvsys-min", &cfg->low_vsys);
- for_each_available_child_of_node(node, child_node) {
+ for_each_available_child_of_node_scoped(node, child_node) {
prop = of_find_property(child_node, "led-sources", NULL);
if (prop) {
const __be32 *srcs = NULL;
@@ -622,7 +622,6 @@ static int max77693_led_parse_dt(struct max77693_led_device *led,
} else {
dev_err(dev,
"led-sources DT property missing\n");
- of_node_put(child_node);
return -EINVAL;
}
@@ -638,18 +637,16 @@ static int max77693_led_parse_dt(struct max77693_led_device *led,
} else {
dev_err(dev,
"Wrong led-sources DT property value.\n");
- of_node_put(child_node);
return -EINVAL;
}
if (sub_nodes[fled_id]) {
dev_err(dev,
"Conflicting \"led-sources\" DT properties\n");
- of_node_put(child_node);
return -EINVAL;
}
- sub_nodes[fled_id] = child_node;
+ sub_nodes[fled_id] = of_node_get(child_node);
sub_leds[fled_id].fled_id = fled_id;
cfg->label[fled_id] =
@@ -681,10 +678,8 @@ static int max77693_led_parse_dt(struct max77693_led_device *led,
if (++cfg->num_leds == 2 ||
(max77693_fled_used(led, FLED1) &&
- max77693_fled_used(led, FLED2))) {
- of_node_put(child_node);
+ max77693_fled_used(led, FLED2)))
break;
- }
}
if (cfg->num_leds == 0) {
@@ -968,7 +963,7 @@ static int max77693_led_probe(struct platform_device *pdev)
ret = max77693_setup(led, &led_cfg);
if (ret < 0)
- return ret;
+ goto err_setup;
mutex_init(&led->lock);
@@ -1000,6 +995,8 @@ static int max77693_led_probe(struct platform_device *pdev)
else
goto err_register_led1;
}
+ of_node_put(sub_nodes[i]);
+ sub_nodes[i] = NULL;
}
return 0;
@@ -1013,6 +1010,9 @@ err_register_led2:
err_register_led1:
mutex_destroy(&led->lock);
+err_setup:
+ for (i = FLED1; i <= FLED2; i++)
+ of_node_put(sub_nodes[i]);
return ret;
}
diff --git a/drivers/leds/flash/leds-qcom-flash.c b/drivers/leds/flash/leds-qcom-flash.c
index bf70bf6fb0d5..41ce034f700e 100644
--- a/drivers/leds/flash/leds-qcom-flash.c
+++ b/drivers/leds/flash/leds-qcom-flash.c
@@ -1,6 +1,6 @@
// SPDX-License-Identifier: GPL-2.0-only
/*
- * Copyright (c) 2022 Qualcomm Innovation Center, Inc. All rights reserved.
+ * Copyright (c) 2022, 2024 Qualcomm Innovation Center, Inc. All rights reserved.
*/
#include <linux/bitfield.h>
@@ -14,6 +14,9 @@
#include <media/v4l2-flash-led-class.h>
/* registers definitions */
+#define FLASH_REVISION_REG 0x00
+#define FLASH_4CH_REVISION_V0P1 0x01
+
#define FLASH_TYPE_REG 0x04
#define FLASH_TYPE_VAL 0x18
@@ -73,6 +76,16 @@
#define UA_PER_MA 1000
+/* thermal threshold constants */
+#define OTST_3CH_MIN_VAL 3
+#define OTST1_4CH_MIN_VAL 0
+#define OTST1_4CH_V0P1_MIN_VAL 3
+#define OTST2_4CH_MIN_VAL 0
+
+#define OTST1_MAX_CURRENT_MA 1000
+#define OTST2_MAX_CURRENT_MA 500
+#define OTST3_MAX_CURRENT_MA 200
+
enum hw_type {
QCOM_MVFLASH_3CH,
QCOM_MVFLASH_4CH,
@@ -98,6 +111,9 @@ enum {
REG_IRESOLUTION,
REG_CHAN_STROBE,
REG_CHAN_EN,
+ REG_THERM_THRSH1,
+ REG_THERM_THRSH2,
+ REG_THERM_THRSH3,
REG_MAX_COUNT,
};
@@ -111,6 +127,9 @@ static struct reg_field mvflash_3ch_regs[REG_MAX_COUNT] = {
REG_FIELD(0x47, 0, 5), /* iresolution */
REG_FIELD_ID(0x49, 0, 2, 3, 1), /* chan_strobe */
REG_FIELD(0x4c, 0, 2), /* chan_en */
+ REG_FIELD(0x56, 0, 2), /* therm_thrsh1 */
+ REG_FIELD(0x57, 0, 2), /* therm_thrsh2 */
+ REG_FIELD(0x58, 0, 2), /* therm_thrsh3 */
};
static struct reg_field mvflash_4ch_regs[REG_MAX_COUNT] = {
@@ -123,6 +142,8 @@ static struct reg_field mvflash_4ch_regs[REG_MAX_COUNT] = {
REG_FIELD(0x49, 0, 3), /* iresolution */
REG_FIELD_ID(0x4a, 0, 6, 4, 1), /* chan_strobe */
REG_FIELD(0x4e, 0, 3), /* chan_en */
+ REG_FIELD(0x7a, 0, 2), /* therm_thrsh1 */
+ REG_FIELD(0x78, 0, 2), /* therm_thrsh2 */
};
struct qcom_flash_data {
@@ -130,9 +151,11 @@ struct qcom_flash_data {
struct regmap_field *r_fields[REG_MAX_COUNT];
struct mutex lock;
enum hw_type hw_type;
+ u32 total_ma;
u8 leds_count;
u8 max_channels;
u8 chan_en_bits;
+ u8 revision;
};
struct qcom_flash_led {
@@ -143,6 +166,7 @@ struct qcom_flash_led {
u32 max_timeout_ms;
u32 flash_current_ma;
u32 flash_timeout_ms;
+ u32 current_in_use_ma;
u8 *chan_id;
u8 chan_count;
bool enabled;
@@ -172,6 +196,127 @@ static int set_flash_module_en(struct qcom_flash_led *led, bool en)
return rc;
}
+static int update_allowed_flash_current(struct qcom_flash_led *led, u32 *current_ma, bool strobe)
+{
+ struct qcom_flash_data *flash_data = led->flash_data;
+ u32 therm_ma, avail_ma, thrsh[3], min_thrsh, sts;
+ int rc = 0;
+
+ mutex_lock(&flash_data->lock);
+ /*
+ * Put previously allocated current into allowed budget in either of these two cases:
+ * 1) LED is disabled;
+ * 2) LED is enabled repeatedly
+ */
+ if (!strobe || led->current_in_use_ma != 0) {
+ if (flash_data->total_ma >= led->current_in_use_ma)
+ flash_data->total_ma -= led->current_in_use_ma;
+ else
+ flash_data->total_ma = 0;
+
+ led->current_in_use_ma = 0;
+ if (!strobe)
+ goto unlock;
+ }
+
+ /*
+ * Cache the default thermal threshold settings, and set them to the lowest levels before
+ * reading over-temp real time status. If over-temp has been triggered at the lowest
+ * threshold, it's very likely that it would be triggered at a higher (default) threshold
+ * when more flash current is requested. Prevent device from triggering over-temp condition
+ * by limiting the flash current for the new request.
+ */
+ rc = regmap_field_read(flash_data->r_fields[REG_THERM_THRSH1], &thrsh[0]);
+ if (rc < 0)
+ goto unlock;
+
+ rc = regmap_field_read(flash_data->r_fields[REG_THERM_THRSH2], &thrsh[1]);
+ if (rc < 0)
+ goto unlock;
+
+ if (flash_data->hw_type == QCOM_MVFLASH_3CH) {
+ rc = regmap_field_read(flash_data->r_fields[REG_THERM_THRSH3], &thrsh[2]);
+ if (rc < 0)
+ goto unlock;
+ }
+
+ min_thrsh = OTST_3CH_MIN_VAL;
+ if (flash_data->hw_type == QCOM_MVFLASH_4CH)
+ min_thrsh = (flash_data->revision == FLASH_4CH_REVISION_V0P1) ?
+ OTST1_4CH_V0P1_MIN_VAL : OTST1_4CH_MIN_VAL;
+
+ rc = regmap_field_write(flash_data->r_fields[REG_THERM_THRSH1], min_thrsh);
+ if (rc < 0)
+ goto unlock;
+
+ if (flash_data->hw_type == QCOM_MVFLASH_4CH)
+ min_thrsh = OTST2_4CH_MIN_VAL;
+
+ /*
+ * The default thermal threshold settings have been updated hence
+ * restore them if any fault happens starting from here.
+ */
+ rc = regmap_field_write(flash_data->r_fields[REG_THERM_THRSH2], min_thrsh);
+ if (rc < 0)
+ goto restore;
+
+ if (flash_data->hw_type == QCOM_MVFLASH_3CH) {
+ rc = regmap_field_write(flash_data->r_fields[REG_THERM_THRSH3], min_thrsh);
+ if (rc < 0)
+ goto restore;
+ }
+
+ /* Read thermal level status to get corresponding derating flash current */
+ rc = regmap_field_read(flash_data->r_fields[REG_STATUS2], &sts);
+ if (rc)
+ goto restore;
+
+ therm_ma = FLASH_TOTAL_CURRENT_MAX_UA / 1000;
+ if (flash_data->hw_type == QCOM_MVFLASH_3CH) {
+ if (sts & FLASH_STS_3CH_OTST3)
+ therm_ma = OTST3_MAX_CURRENT_MA;
+ else if (sts & FLASH_STS_3CH_OTST2)
+ therm_ma = OTST2_MAX_CURRENT_MA;
+ else if (sts & FLASH_STS_3CH_OTST1)
+ therm_ma = OTST1_MAX_CURRENT_MA;
+ } else {
+ if (sts & FLASH_STS_4CH_OTST2)
+ therm_ma = OTST2_MAX_CURRENT_MA;
+ else if (sts & FLASH_STS_4CH_OTST1)
+ therm_ma = OTST1_MAX_CURRENT_MA;
+ }
+
+ /* Calculate the allowed flash current for the request */
+ if (therm_ma <= flash_data->total_ma)
+ avail_ma = 0;
+ else
+ avail_ma = therm_ma - flash_data->total_ma;
+
+ *current_ma = min_t(u32, *current_ma, avail_ma);
+ led->current_in_use_ma = *current_ma;
+ flash_data->total_ma += led->current_in_use_ma;
+
+ dev_dbg(led->flash.led_cdev.dev, "allowed flash current: %dmA, total current: %dmA\n",
+ led->current_in_use_ma, flash_data->total_ma);
+
+restore:
+ /* Restore to default thermal threshold settings */
+ rc = regmap_field_write(flash_data->r_fields[REG_THERM_THRSH1], thrsh[0]);
+ if (rc < 0)
+ goto unlock;
+
+ rc = regmap_field_write(flash_data->r_fields[REG_THERM_THRSH2], thrsh[1]);
+ if (rc < 0)
+ goto unlock;
+
+ if (flash_data->hw_type == QCOM_MVFLASH_3CH)
+ rc = regmap_field_write(flash_data->r_fields[REG_THERM_THRSH3], thrsh[2]);
+
+unlock:
+ mutex_unlock(&flash_data->lock);
+ return rc;
+}
+
static int set_flash_current(struct qcom_flash_led *led, u32 current_ma, enum led_mode mode)
{
struct qcom_flash_data *flash_data = led->flash_data;
@@ -313,6 +458,10 @@ static int qcom_flash_strobe_set(struct led_classdev_flash *fled_cdev, bool stat
if (rc)
return rc;
+ rc = update_allowed_flash_current(led, &led->flash_current_ma, state);
+ if (rc < 0)
+ return rc;
+
rc = set_flash_current(led, led->flash_current_ma, FLASH_MODE);
if (rc)
return rc;
@@ -429,6 +578,10 @@ static int qcom_flash_led_brightness_set(struct led_classdev *led_cdev,
if (rc)
return rc;
+ rc = update_allowed_flash_current(led, &current_ma, enable);
+ if (rc < 0)
+ return rc;
+
rc = set_flash_current(led, current_ma, TORCH_MODE);
if (rc)
return rc;
@@ -707,6 +860,14 @@ static int qcom_flash_led_probe(struct platform_device *pdev)
flash_data->hw_type = QCOM_MVFLASH_4CH;
flash_data->max_channels = 4;
regs = mvflash_4ch_regs;
+
+ rc = regmap_read(regmap, reg_base + FLASH_REVISION_REG, &val);
+ if (rc < 0) {
+ dev_err(dev, "Failed to read flash LED module revision, rc=%d\n", rc);
+ return rc;
+ }
+
+ flash_data->revision = val;
} else {
dev_err(dev, "flash LED subtype %#x is not yet supported\n", val);
return -ENODEV;
diff --git a/drivers/leds/leds-88pm860x.c b/drivers/leds/leds-88pm860x.c
index 033ab5fed38a..81238376484b 100644
--- a/drivers/leds/leds-88pm860x.c
+++ b/drivers/leds/leds-88pm860x.c
@@ -115,7 +115,7 @@ static int pm860x_led_set(struct led_classdev *cdev,
static int pm860x_led_dt_init(struct platform_device *pdev,
struct pm860x_led *data)
{
- struct device_node *nproot, *np;
+ struct device_node *nproot;
int iset = 0;
if (!dev_of_node(pdev->dev.parent))
@@ -125,12 +125,11 @@ static int pm860x_led_dt_init(struct platform_device *pdev,
dev_err(&pdev->dev, "failed to find leds node\n");
return -ENODEV;
}
- for_each_available_child_of_node(nproot, np) {
+ for_each_available_child_of_node_scoped(nproot, np) {
if (of_node_name_eq(np, data->name)) {
of_property_read_u32(np, "marvell,88pm860x-iset",
&iset);
data->iset = PM8606_LED_CURRENT(iset);
- of_node_put(np);
break;
}
}
diff --git a/drivers/leds/leds-aw2013.c b/drivers/leds/leds-aw2013.c
index 6475eadcb0df..216755d6010f 100644
--- a/drivers/leds/leds-aw2013.c
+++ b/drivers/leds/leds-aw2013.c
@@ -263,7 +263,7 @@ out:
static int aw2013_probe_dt(struct aw2013 *chip)
{
- struct device_node *np = dev_of_node(&chip->client->dev), *child;
+ struct device_node *np = dev_of_node(&chip->client->dev);
int count, ret = 0, i = 0;
struct aw2013_led *led;
@@ -273,7 +273,7 @@ static int aw2013_probe_dt(struct aw2013 *chip)
regmap_write(chip->regmap, AW2013_RSTR, AW2013_RSTR_RESET);
- for_each_available_child_of_node(np, child) {
+ for_each_available_child_of_node_scoped(np, child) {
struct led_init_data init_data = {};
u32 source;
u32 imax;
@@ -304,10 +304,8 @@ static int aw2013_probe_dt(struct aw2013 *chip)
ret = devm_led_classdev_register_ext(&chip->client->dev,
&led->cdev, &init_data);
- if (ret < 0) {
- of_node_put(child);
+ if (ret < 0)
return ret;
- }
i++;
}
diff --git a/drivers/leds/leds-bcm6328.c b/drivers/leds/leds-bcm6328.c
index 246f1296ab09..29f5bad61796 100644
--- a/drivers/leds/leds-bcm6328.c
+++ b/drivers/leds/leds-bcm6328.c
@@ -392,7 +392,6 @@ static int bcm6328_leds_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
struct device_node *np = dev_of_node(&pdev->dev);
- struct device_node *child;
void __iomem *mem;
spinlock_t *lock; /* memory lock */
unsigned long val, *blink_leds, *blink_delay;
@@ -435,7 +434,7 @@ static int bcm6328_leds_probe(struct platform_device *pdev)
val |= BCM6328_SERIAL_LED_SHIFT_DIR;
bcm6328_led_write(mem + BCM6328_REG_INIT, val);
- for_each_available_child_of_node(np, child) {
+ for_each_available_child_of_node_scoped(np, child) {
int rc;
u32 reg;
@@ -454,10 +453,8 @@ static int bcm6328_leds_probe(struct platform_device *pdev)
rc = bcm6328_led(dev, child, reg, mem, lock,
blink_leds, blink_delay);
- if (rc < 0) {
- of_node_put(child);
+ if (rc < 0)
return rc;
- }
}
return 0;
diff --git a/drivers/leds/leds-bcm6358.c b/drivers/leds/leds-bcm6358.c
index 86e51d44a5a7..51fcff2a64fd 100644
--- a/drivers/leds/leds-bcm6358.c
+++ b/drivers/leds/leds-bcm6358.c
@@ -147,7 +147,6 @@ static int bcm6358_leds_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
struct device_node *np = dev_of_node(&pdev->dev);
- struct device_node *child;
void __iomem *mem;
spinlock_t *lock; /* memory lock */
unsigned long val;
@@ -184,7 +183,7 @@ static int bcm6358_leds_probe(struct platform_device *pdev)
}
bcm6358_led_write(mem + BCM6358_REG_CTRL, val);
- for_each_available_child_of_node(np, child) {
+ for_each_available_child_of_node_scoped(np, child) {
int rc;
u32 reg;
@@ -198,10 +197,8 @@ static int bcm6358_leds_probe(struct platform_device *pdev)
}
rc = bcm6358_led(dev, child, reg, mem, lock);
- if (rc < 0) {
- of_node_put(child);
+ if (rc < 0)
return rc;
- }
}
return 0;
diff --git a/drivers/leds/leds-bd2606mvv.c b/drivers/leds/leds-bd2606mvv.c
index 3fda712d2f80..c1181a35d0f7 100644
--- a/drivers/leds/leds-bd2606mvv.c
+++ b/drivers/leds/leds-bd2606mvv.c
@@ -69,16 +69,14 @@ static const struct regmap_config bd2606mvv_regmap = {
static int bd2606mvv_probe(struct i2c_client *client)
{
- struct fwnode_handle *np, *child;
struct device *dev = &client->dev;
struct bd2606mvv_priv *priv;
struct fwnode_handle *led_fwnodes[BD2606_MAX_LEDS] = { 0 };
int active_pairs[BD2606_MAX_LEDS / 2] = { 0 };
int err, reg;
- int i;
+ int i, j;
- np = dev_fwnode(dev);
- if (!np)
+ if (!dev_fwnode(dev))
return -ENODEV;
priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
@@ -94,20 +92,18 @@ static int bd2606mvv_probe(struct i2c_client *client)
i2c_set_clientdata(client, priv);
- fwnode_for_each_available_child_node(np, child) {
+ device_for_each_child_node_scoped(dev, child) {
struct bd2606mvv_led *led;
err = fwnode_property_read_u32(child, "reg", &reg);
- if (err) {
- fwnode_handle_put(child);
+ if (err)
return err;
- }
- if (reg < 0 || reg >= BD2606_MAX_LEDS || led_fwnodes[reg]) {
- fwnode_handle_put(child);
+
+ if (reg < 0 || reg >= BD2606_MAX_LEDS || led_fwnodes[reg])
return -EINVAL;
- }
+
led = &priv->leds[reg];
- led_fwnodes[reg] = child;
+ led_fwnodes[reg] = fwnode_handle_get(child);
active_pairs[reg / 2]++;
led->priv = priv;
led->led_no = reg;
@@ -130,7 +126,8 @@ static int bd2606mvv_probe(struct i2c_client *client)
&priv->leds[i].ldev,
&init_data);
if (err < 0) {
- fwnode_handle_put(child);
+ for (j = i; j < BD2606_MAX_LEDS; j++)
+ fwnode_handle_put(led_fwnodes[j]);
return dev_err_probe(dev, err,
"couldn't register LED %s\n",
priv->leds[i].ldev.name);
diff --git a/drivers/leds/leds-blinkm.c b/drivers/leds/leds-blinkm.c
index e40b87aead2d..577497b9d426 100644
--- a/drivers/leds/leds-blinkm.c
+++ b/drivers/leds/leds-blinkm.c
@@ -2,6 +2,7 @@
/*
* leds-blinkm.c
* (c) Jan-Simon Möller (dl9pf@gmx.de)
+ * (c) Joseph Strauss (jstrauss@mailbox.org)
*/
#include <linux/module.h>
@@ -15,6 +16,10 @@
#include <linux/pm_runtime.h>
#include <linux/leds.h>
#include <linux/delay.h>
+#include <linux/led-class-multicolor.h>
+#include <linux/kconfig.h>
+
+#define NUM_LEDS 3
/* Addresses to scan - BlinkM is on 0x09 by default*/
static const unsigned short normal_i2c[] = { 0x09, I2C_CLIENT_END };
@@ -22,19 +27,25 @@ static const unsigned short normal_i2c[] = { 0x09, I2C_CLIENT_END };
static int blinkm_transfer_hw(struct i2c_client *client, int cmd);
static int blinkm_test_run(struct i2c_client *client);
+/* Contains structs for both the color-separated sysfs classes, and the new multicolor class */
struct blinkm_led {
struct i2c_client *i2c_client;
- struct led_classdev led_cdev;
+ union {
+ /* used when multicolor support is disabled */
+ struct led_classdev led_cdev;
+ struct led_classdev_mc mcled_cdev;
+ } cdev;
int id;
};
-#define cdev_to_blmled(c) container_of(c, struct blinkm_led, led_cdev)
+#define led_cdev_to_blmled(c) container_of(c, struct blinkm_led, cdev.led_cdev)
+#define mcled_cdev_to_led(c) container_of(c, struct blinkm_led, cdev.mcled_cdev)
struct blinkm_data {
struct i2c_client *i2c_client;
struct mutex update_lock;
/* used for led class interface */
- struct blinkm_led blinkm_leds[3];
+ struct blinkm_led blinkm_leds[NUM_LEDS];
/* used for "blinkm" sysfs interface */
u8 red; /* color red */
u8 green; /* color green */
@@ -419,11 +430,29 @@ static int blinkm_transfer_hw(struct i2c_client *client, int cmd)
return 0;
}
+static int blinkm_set_mc_brightness(struct led_classdev *led_cdev,
+ enum led_brightness value)
+{
+ struct led_classdev_mc *mcled_cdev = lcdev_to_mccdev(led_cdev);
+ struct blinkm_led *led = mcled_cdev_to_led(mcled_cdev);
+ struct blinkm_data *data = i2c_get_clientdata(led->i2c_client);
+
+ led_mc_calc_color_components(mcled_cdev, value);
+
+ data->next_red = (u8) mcled_cdev->subled_info[RED].brightness;
+ data->next_green = (u8) mcled_cdev->subled_info[GREEN].brightness;
+ data->next_blue = (u8) mcled_cdev->subled_info[BLUE].brightness;
+
+ blinkm_transfer_hw(led->i2c_client, BLM_GO_RGB);
+
+ return 0;
+}
+
static int blinkm_led_common_set(struct led_classdev *led_cdev,
enum led_brightness value, int color)
{
/* led_brightness is 0, 127 or 255 - we just use it here as-is */
- struct blinkm_led *led = cdev_to_blmled(led_cdev);
+ struct blinkm_led *led = led_cdev_to_blmled(led_cdev);
struct blinkm_data *data = i2c_get_clientdata(led->i2c_client);
switch (color) {
@@ -565,117 +594,175 @@ static int blinkm_detect(struct i2c_client *client, struct i2c_board_info *info)
return 0;
}
-static int blinkm_probe(struct i2c_client *client)
+static int register_separate_colors(struct i2c_client *client, struct blinkm_data *data)
{
- struct blinkm_data *data;
- struct blinkm_led *led[3];
- int err, i;
+ /* 3 separate classes for red, green, and blue respectively */
+ struct blinkm_led *leds[NUM_LEDS];
+ int err;
char blinkm_led_name[28];
-
- data = devm_kzalloc(&client->dev,
- sizeof(struct blinkm_data), GFP_KERNEL);
- if (!data) {
- err = -ENOMEM;
- goto exit;
- }
-
- data->i2c_addr = 0x08;
- /* i2c addr - use fake addr of 0x08 initially (real is 0x09) */
- data->fw_ver = 0xfe;
- /* firmware version - use fake until we read real value
- * (currently broken - BlinkM confused!) */
- data->script_id = 0x01;
- data->i2c_client = client;
-
- i2c_set_clientdata(client, data);
- mutex_init(&data->update_lock);
-
- /* Register sysfs hooks */
- err = sysfs_create_group(&client->dev.kobj, &blinkm_group);
- if (err < 0) {
- dev_err(&client->dev, "couldn't register sysfs group\n");
- goto exit;
- }
-
- for (i = 0; i < 3; i++) {
+ /* Register red, green, and blue sysfs classes */
+ for (int i = 0; i < NUM_LEDS; i++) {
/* RED = 0, GREEN = 1, BLUE = 2 */
- led[i] = &data->blinkm_leds[i];
- led[i]->i2c_client = client;
- led[i]->id = i;
- led[i]->led_cdev.max_brightness = 255;
- led[i]->led_cdev.flags = LED_CORE_SUSPENDRESUME;
+ leds[i] = &data->blinkm_leds[i];
+ leds[i]->i2c_client = client;
+ leds[i]->id = i;
+ leds[i]->cdev.led_cdev.max_brightness = 255;
+ leds[i]->cdev.led_cdev.flags = LED_CORE_SUSPENDRESUME;
switch (i) {
case RED:
- snprintf(blinkm_led_name, sizeof(blinkm_led_name),
+ scnprintf(blinkm_led_name, sizeof(blinkm_led_name),
"blinkm-%d-%d-red",
client->adapter->nr,
client->addr);
- led[i]->led_cdev.name = blinkm_led_name;
- led[i]->led_cdev.brightness_set_blocking =
+ leds[i]->cdev.led_cdev.name = blinkm_led_name;
+ leds[i]->cdev.led_cdev.brightness_set_blocking =
blinkm_led_red_set;
err = led_classdev_register(&client->dev,
- &led[i]->led_cdev);
+ &leds[i]->cdev.led_cdev);
if (err < 0) {
dev_err(&client->dev,
"couldn't register LED %s\n",
- led[i]->led_cdev.name);
+ leds[i]->cdev.led_cdev.name);
goto failred;
}
break;
case GREEN:
- snprintf(blinkm_led_name, sizeof(blinkm_led_name),
+ scnprintf(blinkm_led_name, sizeof(blinkm_led_name),
"blinkm-%d-%d-green",
client->adapter->nr,
client->addr);
- led[i]->led_cdev.name = blinkm_led_name;
- led[i]->led_cdev.brightness_set_blocking =
+ leds[i]->cdev.led_cdev.name = blinkm_led_name;
+ leds[i]->cdev.led_cdev.brightness_set_blocking =
blinkm_led_green_set;
err = led_classdev_register(&client->dev,
- &led[i]->led_cdev);
+ &leds[i]->cdev.led_cdev);
if (err < 0) {
dev_err(&client->dev,
"couldn't register LED %s\n",
- led[i]->led_cdev.name);
+ leds[i]->cdev.led_cdev.name);
goto failgreen;
}
break;
case BLUE:
- snprintf(blinkm_led_name, sizeof(blinkm_led_name),
+ scnprintf(blinkm_led_name, sizeof(blinkm_led_name),
"blinkm-%d-%d-blue",
client->adapter->nr,
client->addr);
- led[i]->led_cdev.name = blinkm_led_name;
- led[i]->led_cdev.brightness_set_blocking =
+ leds[i]->cdev.led_cdev.name = blinkm_led_name;
+ leds[i]->cdev.led_cdev.brightness_set_blocking =
blinkm_led_blue_set;
err = led_classdev_register(&client->dev,
- &led[i]->led_cdev);
+ &leds[i]->cdev.led_cdev);
if (err < 0) {
dev_err(&client->dev,
"couldn't register LED %s\n",
- led[i]->led_cdev.name);
+ leds[i]->cdev.led_cdev.name);
goto failblue;
}
break;
+ default:
+ break;
} /* end switch */
} /* end for */
-
- /* Initialize the blinkm */
- blinkm_init_hw(client);
-
return 0;
failblue:
- led_classdev_unregister(&led[GREEN]->led_cdev);
-
+ led_classdev_unregister(&leds[GREEN]->cdev.led_cdev);
failgreen:
- led_classdev_unregister(&led[RED]->led_cdev);
-
+ led_classdev_unregister(&leds[RED]->cdev.led_cdev);
failred:
sysfs_remove_group(&client->dev.kobj, &blinkm_group);
-exit:
+
return err;
}
+static int register_multicolor(struct i2c_client *client, struct blinkm_data *data)
+{
+ struct blinkm_led *mc_led;
+ struct mc_subled *mc_led_info;
+ char blinkm_led_name[28];
+ int err;
+
+ /* Register multicolor sysfs class */
+ /* The first element of leds is used for multicolor facilities */
+ mc_led = &data->blinkm_leds[RED];
+ mc_led->i2c_client = client;
+
+ mc_led_info = devm_kcalloc(&client->dev, NUM_LEDS, sizeof(*mc_led_info),
+ GFP_KERNEL);
+ if (!mc_led_info)
+ return -ENOMEM;
+
+ mc_led_info[RED].color_index = LED_COLOR_ID_RED;
+ mc_led_info[GREEN].color_index = LED_COLOR_ID_GREEN;
+ mc_led_info[BLUE].color_index = LED_COLOR_ID_BLUE;
+
+ mc_led->cdev.mcled_cdev.subled_info = mc_led_info;
+ mc_led->cdev.mcled_cdev.num_colors = NUM_LEDS;
+ mc_led->cdev.mcled_cdev.led_cdev.brightness = 255;
+ mc_led->cdev.mcled_cdev.led_cdev.max_brightness = 255;
+ mc_led->cdev.mcled_cdev.led_cdev.flags = LED_CORE_SUSPENDRESUME;
+
+ scnprintf(blinkm_led_name, sizeof(blinkm_led_name),
+ "blinkm-%d-%d:rgb:indicator",
+ client->adapter->nr,
+ client->addr);
+ mc_led->cdev.mcled_cdev.led_cdev.name = blinkm_led_name;
+ mc_led->cdev.mcled_cdev.led_cdev.brightness_set_blocking = blinkm_set_mc_brightness;
+
+ err = led_classdev_multicolor_register(&client->dev, &mc_led->cdev.mcled_cdev);
+ if (err < 0) {
+ dev_err(&client->dev, "couldn't register LED %s\n",
+ mc_led->cdev.led_cdev.name);
+ sysfs_remove_group(&client->dev.kobj, &blinkm_group);
+ }
+ return 0;
+}
+
+static int blinkm_probe(struct i2c_client *client)
+{
+ struct blinkm_data *data;
+ int err;
+
+ data = devm_kzalloc(&client->dev,
+ sizeof(struct blinkm_data), GFP_KERNEL);
+ if (!data)
+ return -ENOMEM;
+
+ data->i2c_addr = 0x08;
+ /* i2c addr - use fake addr of 0x08 initially (real is 0x09) */
+ data->fw_ver = 0xfe;
+ /* firmware version - use fake until we read real value
+ * (currently broken - BlinkM confused!)
+ */
+ data->script_id = 0x01;
+ data->i2c_client = client;
+
+ i2c_set_clientdata(client, data);
+ mutex_init(&data->update_lock);
+
+ /* Register sysfs hooks */
+ err = sysfs_create_group(&client->dev.kobj, &blinkm_group);
+ if (err < 0) {
+ dev_err(&client->dev, "couldn't register sysfs group\n");
+ return err;
+ }
+
+ if (!IS_ENABLED(CONFIG_LEDS_BLINKM_MULTICOLOR)) {
+ err = register_separate_colors(client, data);
+ if (err < 0)
+ return err;
+ } else {
+ err = register_multicolor(client, data);
+ if (err < 0)
+ return err;
+ }
+
+ blinkm_init_hw(client);
+
+ return 0;
+}
+
static void blinkm_remove(struct i2c_client *client)
{
struct blinkm_data *data = i2c_get_clientdata(client);
@@ -683,8 +770,8 @@ static void blinkm_remove(struct i2c_client *client)
int i;
/* make sure no workqueue entries are pending */
- for (i = 0; i < 3; i++)
- led_classdev_unregister(&data->blinkm_leds[i].led_cdev);
+ for (i = 0; i < NUM_LEDS; i++)
+ led_classdev_unregister(&data->blinkm_leds[i].cdev.led_cdev);
/* reset rgb */
data->next_red = 0x00;
@@ -740,6 +827,7 @@ static struct i2c_driver blinkm_driver = {
module_i2c_driver(blinkm_driver);
MODULE_AUTHOR("Jan-Simon Moeller <dl9pf@gmx.de>");
+MODULE_AUTHOR("Joseph Strauss <jstrauss@mailbox.org>");
MODULE_DESCRIPTION("BlinkM RGB LED driver");
MODULE_LICENSE("GPL");
diff --git a/drivers/leds/leds-gpio.c b/drivers/leds/leds-gpio.c
index 83fcd7b6afff..4d1612d557c8 100644
--- a/drivers/leds/leds-gpio.c
+++ b/drivers/leds/leds-gpio.c
@@ -150,7 +150,7 @@ static struct gpio_leds_priv *gpio_leds_create(struct device *dev)
{
struct fwnode_handle *child;
struct gpio_leds_priv *priv;
- int count, ret;
+ int count, used, ret;
count = device_get_child_node_count(dev);
if (!count)
@@ -159,9 +159,11 @@ static struct gpio_leds_priv *gpio_leds_create(struct device *dev)
priv = devm_kzalloc(dev, struct_size(priv, leds, count), GFP_KERNEL);
if (!priv)
return ERR_PTR(-ENOMEM);
+ priv->num_leds = count;
+ used = 0;
device_for_each_child_node(dev, child) {
- struct gpio_led_data *led_dat = &priv->leds[priv->num_leds];
+ struct gpio_led_data *led_dat = &priv->leds[used];
struct gpio_led led = {};
/*
@@ -197,8 +199,9 @@ static struct gpio_leds_priv *gpio_leds_create(struct device *dev)
/* Set gpiod label to match the corresponding LED name. */
gpiod_set_consumer_name(led_dat->gpiod,
led_dat->cdev.dev->kobj.name);
- priv->num_leds++;
+ used++;
}
+ priv->num_leds = used;
return priv;
}
diff --git a/drivers/leds/leds-is31fl319x.c b/drivers/leds/leds-is31fl319x.c
index 5e1a4d39a107..27bfab3da479 100644
--- a/drivers/leds/leds-is31fl319x.c
+++ b/drivers/leds/leds-is31fl319x.c
@@ -392,7 +392,7 @@ static int is31fl319x_parse_child_fw(const struct device *dev,
static int is31fl319x_parse_fw(struct device *dev, struct is31fl319x_chip *is31)
{
- struct fwnode_handle *fwnode = dev_fwnode(dev), *child;
+ struct fwnode_handle *fwnode = dev_fwnode(dev);
int count;
int ret;
@@ -404,7 +404,7 @@ static int is31fl319x_parse_fw(struct device *dev, struct is31fl319x_chip *is31)
is31->cdef = device_get_match_data(dev);
count = 0;
- fwnode_for_each_available_child_node(fwnode, child)
+ device_for_each_child_node_scoped(dev, child)
count++;
dev_dbg(dev, "probing with %d leds defined in DT\n", count);
@@ -414,33 +414,25 @@ static int is31fl319x_parse_fw(struct device *dev, struct is31fl319x_chip *is31)
"Number of leds defined must be between 1 and %u\n",
is31->cdef->num_leds);
- fwnode_for_each_available_child_node(fwnode, child) {
+ device_for_each_child_node_scoped(dev, child) {
struct is31fl319x_led *led;
u32 reg;
ret = fwnode_property_read_u32(child, "reg", &reg);
- if (ret) {
- ret = dev_err_probe(dev, ret, "Failed to read led 'reg' property\n");
- goto put_child_node;
- }
+ if (ret)
+ return dev_err_probe(dev, ret, "Failed to read led 'reg' property\n");
- if (reg < 1 || reg > is31->cdef->num_leds) {
- ret = dev_err_probe(dev, -EINVAL, "invalid led reg %u\n", reg);
- goto put_child_node;
- }
+ if (reg < 1 || reg > is31->cdef->num_leds)
+ return dev_err_probe(dev, -EINVAL, "invalid led reg %u\n", reg);
led = &is31->leds[reg - 1];
- if (led->configured) {
- ret = dev_err_probe(dev, -EINVAL, "led %u is already configured\n", reg);
- goto put_child_node;
- }
+ if (led->configured)
+ return dev_err_probe(dev, -EINVAL, "led %u is already configured\n", reg);
ret = is31fl319x_parse_child_fw(dev, child, led, is31);
- if (ret) {
- ret = dev_err_probe(dev, ret, "led %u DT parsing failed\n", reg);
- goto put_child_node;
- }
+ if (ret)
+ return dev_err_probe(dev, ret, "led %u DT parsing failed\n", reg);
led->configured = true;
}
@@ -454,10 +446,6 @@ static int is31fl319x_parse_fw(struct device *dev, struct is31fl319x_chip *is31)
}
return 0;
-
-put_child_node:
- fwnode_handle_put(child);
- return ret;
}
static inline int is31fl3190_microamp_to_cs(struct device *dev, u32 microamp)
diff --git a/drivers/leds/leds-is31fl32xx.c b/drivers/leds/leds-is31fl32xx.c
index b0a0be77bb33..8793330dd414 100644
--- a/drivers/leds/leds-is31fl32xx.c
+++ b/drivers/leds/leds-is31fl32xx.c
@@ -363,10 +363,9 @@ static struct is31fl32xx_led_data *is31fl32xx_find_led_data(
static int is31fl32xx_parse_dt(struct device *dev,
struct is31fl32xx_priv *priv)
{
- struct device_node *child;
int ret = 0;
- for_each_available_child_of_node(dev_of_node(dev), child) {
+ for_each_available_child_of_node_scoped(dev_of_node(dev), child) {
struct led_init_data init_data = {};
struct is31fl32xx_led_data *led_data =
&priv->leds[priv->num_leds];
@@ -376,7 +375,7 @@ static int is31fl32xx_parse_dt(struct device *dev,
ret = is31fl32xx_parse_child_dt(dev, child, led_data);
if (ret)
- goto err;
+ return ret;
/* Detect if channel is already in use by another child */
other_led_data = is31fl32xx_find_led_data(priv,
@@ -385,8 +384,7 @@ static int is31fl32xx_parse_dt(struct device *dev,
dev_err(dev,
"Node %pOF 'reg' conflicts with another LED\n",
child);
- ret = -EINVAL;
- goto err;
+ return -EINVAL;
}
init_data.fwnode = of_fwnode_handle(child);
@@ -396,17 +394,13 @@ static int is31fl32xx_parse_dt(struct device *dev,
if (ret) {
dev_err(dev, "Failed to register LED for %pOF: %d\n",
child, ret);
- goto err;
+ return ret;
}
priv->num_leds++;
}
return 0;
-
-err:
- of_node_put(child);
- return ret;
}
static const struct of_device_id of_is31fl32xx_match[] = {
diff --git a/drivers/leds/leds-lp55xx-common.c b/drivers/leds/leds-lp55xx-common.c
index 29e7142dca72..5a2e259679cf 100644
--- a/drivers/leds/leds-lp55xx-common.c
+++ b/drivers/leds/leds-lp55xx-common.c
@@ -965,24 +965,16 @@ EXPORT_SYMBOL_GPL(lp55xx_update_bits);
bool lp55xx_is_extclk_used(struct lp55xx_chip *chip)
{
struct clk *clk;
- int err;
- clk = devm_clk_get(&chip->cl->dev, "32k_clk");
+ clk = devm_clk_get_enabled(&chip->cl->dev, "32k_clk");
if (IS_ERR(clk))
goto use_internal_clk;
- err = clk_prepare_enable(clk);
- if (err)
+ if (clk_get_rate(clk) != LP55XX_CLK_32K)
goto use_internal_clk;
- if (clk_get_rate(clk) != LP55XX_CLK_32K) {
- clk_disable_unprepare(clk);
- goto use_internal_clk;
- }
-
dev_info(&chip->cl->dev, "%dHz external clock used\n", LP55XX_CLK_32K);
- chip->clk = clk;
return true;
use_internal_clk:
@@ -995,9 +987,6 @@ static void lp55xx_deinit_device(struct lp55xx_chip *chip)
{
struct lp55xx_platform_data *pdata = chip->pdata;
- if (chip->clk)
- clk_disable_unprepare(chip->clk);
-
if (pdata->enable_gpiod)
gpiod_set_value(pdata->enable_gpiod, 0);
}
@@ -1173,16 +1162,13 @@ static int lp55xx_parse_multi_led(struct device_node *np,
struct lp55xx_led_config *cfg,
int child_number)
{
- struct device_node *child;
int num_colors = 0, ret;
- for_each_available_child_of_node(np, child) {
+ for_each_available_child_of_node_scoped(np, child) {
ret = lp55xx_parse_multi_led_child(child, cfg, child_number,
num_colors);
- if (ret) {
- of_node_put(child);
+ if (ret)
return ret;
- }
num_colors++;
}
diff --git a/drivers/leds/leds-lp55xx-common.h b/drivers/leds/leds-lp55xx-common.h
index 1bb7c559662c..8fd64ec40919 100644
--- a/drivers/leds/leds-lp55xx-common.h
+++ b/drivers/leds/leds-lp55xx-common.h
@@ -193,7 +193,6 @@ struct lp55xx_engine {
*/
struct lp55xx_chip {
struct i2c_client *cl;
- struct clk *clk;
struct lp55xx_platform_data *pdata;
struct mutex lock; /* lock for user-space interface */
int num_leds;
diff --git a/drivers/leds/leds-mc13783.c b/drivers/leds/leds-mc13783.c
index bbd1d359bba4..da99d114bfb2 100644
--- a/drivers/leds/leds-mc13783.c
+++ b/drivers/leds/leds-mc13783.c
@@ -12,6 +12,7 @@
* Eric Miao <eric.miao@marvell.com>
*/
+#include <linux/cleanup.h>
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/platform_device.h>
@@ -113,7 +114,7 @@ static struct mc13xxx_leds_platform_data __init *mc13xxx_led_probe_dt(
{
struct mc13xxx_leds *leds = platform_get_drvdata(pdev);
struct mc13xxx_leds_platform_data *pdata;
- struct device_node *parent, *child;
+ struct device_node *child;
struct device *dev = &pdev->dev;
int i = 0, ret = -ENODATA;
@@ -121,24 +122,23 @@ static struct mc13xxx_leds_platform_data __init *mc13xxx_led_probe_dt(
if (!pdata)
return ERR_PTR(-ENOMEM);
- parent = of_get_child_by_name(dev_of_node(dev->parent), "leds");
+ struct device_node *parent __free(device_node) =
+ of_get_child_by_name(dev_of_node(dev->parent), "leds");
if (!parent)
- goto out_node_put;
+ return ERR_PTR(-ENODATA);
ret = of_property_read_u32_array(parent, "led-control",
pdata->led_control,
leds->devtype->num_regs);
if (ret)
- goto out_node_put;
+ return ERR_PTR(ret);
pdata->num_leds = of_get_available_child_count(parent);
pdata->led = devm_kcalloc(dev, pdata->num_leds, sizeof(*pdata->led),
GFP_KERNEL);
- if (!pdata->led) {
- ret = -ENOMEM;
- goto out_node_put;
- }
+ if (!pdata->led)
+ return ERR_PTR(-ENOMEM);
for_each_available_child_of_node(parent, child) {
const char *str;
@@ -158,12 +158,10 @@ static struct mc13xxx_leds_platform_data __init *mc13xxx_led_probe_dt(
}
pdata->num_leds = i;
- ret = i > 0 ? 0 : -ENODATA;
-
-out_node_put:
- of_node_put(parent);
+ if (i <= 0)
+ return ERR_PTR(-ENODATA);
- return ret ? ERR_PTR(ret) : pdata;
+ return pdata;
}
#else
static inline struct mc13xxx_leds_platform_data __init *mc13xxx_led_probe_dt(
diff --git a/drivers/leds/leds-mt6323.c b/drivers/leds/leds-mt6323.c
index 40d508510823..a19e8e0b6d1b 100644
--- a/drivers/leds/leds-mt6323.c
+++ b/drivers/leds/leds-mt6323.c
@@ -527,7 +527,6 @@ static int mt6323_led_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
struct device_node *np = dev_of_node(dev);
- struct device_node *child;
struct mt6397_chip *hw = dev_get_drvdata(dev->parent);
struct mt6323_leds *leds;
struct mt6323_led *led;
@@ -565,28 +564,25 @@ static int mt6323_led_probe(struct platform_device *pdev)
return ret;
}
- for_each_available_child_of_node(np, child) {
+ for_each_available_child_of_node_scoped(np, child) {
struct led_init_data init_data = {};
bool is_wled;
ret = of_property_read_u32(child, "reg", &reg);
if (ret) {
dev_err(dev, "Failed to read led 'reg' property\n");
- goto put_child_node;
+ return ret;
}
if (reg >= max_leds || reg >= MAX_SUPPORTED_LEDS ||
leds->led[reg]) {
dev_err(dev, "Invalid led reg %u\n", reg);
- ret = -EINVAL;
- goto put_child_node;
+ return -EINVAL;
}
led = devm_kzalloc(dev, sizeof(*led), GFP_KERNEL);
- if (!led) {
- ret = -ENOMEM;
- goto put_child_node;
- }
+ if (!led)
+ return -ENOMEM;
is_wled = of_property_read_bool(child, "mediatek,is-wled");
@@ -612,7 +608,7 @@ static int mt6323_led_probe(struct platform_device *pdev)
if (ret < 0) {
dev_err(leds->dev,
"Failed to LED set default from devicetree\n");
- goto put_child_node;
+ return ret;
}
init_data.fwnode = of_fwnode_handle(child);
@@ -621,15 +617,11 @@ static int mt6323_led_probe(struct platform_device *pdev)
&init_data);
if (ret) {
dev_err(dev, "Failed to register LED: %d\n", ret);
- goto put_child_node;
+ return ret;
}
}
return 0;
-
-put_child_node:
- of_node_put(child);
- return ret;
}
static void mt6323_led_remove(struct platform_device *pdev)
diff --git a/drivers/leds/leds-netxbig.c b/drivers/leds/leds-netxbig.c
index 77213b79f84d..af5a908b8d9e 100644
--- a/drivers/leds/leds-netxbig.c
+++ b/drivers/leds/leds-netxbig.c
@@ -423,7 +423,6 @@ static int netxbig_leds_get_of_pdata(struct device *dev,
struct device_node *gpio_ext_np;
struct platform_device *gpio_ext_pdev;
struct device *gpio_ext_dev;
- struct device_node *child;
struct netxbig_gpio_ext *gpio_ext;
struct netxbig_led_timer *timers;
struct netxbig_led *leds, *led;
@@ -507,7 +506,7 @@ static int netxbig_leds_get_of_pdata(struct device *dev,
}
led = leds;
- for_each_available_child_of_node(np, child) {
+ for_each_available_child_of_node_scoped(np, child) {
const char *string;
int *mode_val;
int num_modes;
@@ -515,17 +514,17 @@ static int netxbig_leds_get_of_pdata(struct device *dev,
ret = of_property_read_u32(child, "mode-addr",
&led->mode_addr);
if (ret)
- goto err_node_put;
+ goto put_device;
ret = of_property_read_u32(child, "bright-addr",
&led->bright_addr);
if (ret)
- goto err_node_put;
+ goto put_device;
ret = of_property_read_u32(child, "max-brightness",
&led->bright_max);
if (ret)
- goto err_node_put;
+ goto put_device;
mode_val =
devm_kcalloc(dev,
@@ -533,7 +532,7 @@ static int netxbig_leds_get_of_pdata(struct device *dev,
GFP_KERNEL);
if (!mode_val) {
ret = -ENOMEM;
- goto err_node_put;
+ goto put_device;
}
for (i = 0; i < NETXBIG_LED_MODE_NUM; i++)
@@ -542,12 +541,12 @@ static int netxbig_leds_get_of_pdata(struct device *dev,
ret = of_property_count_u32_elems(child, "mode-val");
if (ret < 0 || ret % 2) {
ret = -EINVAL;
- goto err_node_put;
+ goto put_device;
}
num_modes = ret / 2;
if (num_modes > NETXBIG_LED_MODE_NUM) {
ret = -EINVAL;
- goto err_node_put;
+ goto put_device;
}
for (i = 0; i < num_modes; i++) {
@@ -560,7 +559,7 @@ static int netxbig_leds_get_of_pdata(struct device *dev,
"mode-val", 2 * i + 1, &val);
if (mode >= NETXBIG_LED_MODE_NUM) {
ret = -EINVAL;
- goto err_node_put;
+ goto put_device;
}
mode_val[mode] = val;
}
@@ -583,8 +582,6 @@ static int netxbig_leds_get_of_pdata(struct device *dev,
return 0;
-err_node_put:
- of_node_put(child);
put_device:
put_device(gpio_ext_dev);
return ret;
diff --git a/drivers/leds/leds-pca9532.c b/drivers/leds/leds-pca9532.c
index 9f3fac66a11c..1b47acf54720 100644
--- a/drivers/leds/leds-pca9532.c
+++ b/drivers/leds/leds-pca9532.c
@@ -215,8 +215,7 @@ static int pca9532_update_hw_blink(struct pca9532_led *led,
if (other->state == PCA9532_PWM1) {
if (other->ldev.blink_delay_on != delay_on ||
other->ldev.blink_delay_off != delay_off) {
- dev_err(&led->client->dev,
- "HW can handle only one blink configuration at a time\n");
+ /* HW can handle only one blink configuration at a time */
return -EINVAL;
}
}
@@ -224,7 +223,7 @@ static int pca9532_update_hw_blink(struct pca9532_led *led,
psc = ((delay_on + delay_off) * PCA9532_PWM_PERIOD_DIV - 1) / 1000;
if (psc > U8_MAX) {
- dev_err(&led->client->dev, "Blink period too long to be handled by hardware\n");
+ /* Blink period too long to be handled by hardware */
return -EINVAL;
}
@@ -506,7 +505,6 @@ static struct pca9532_platform_data *
pca9532_of_populate_pdata(struct device *dev, struct device_node *np)
{
struct pca9532_platform_data *pdata;
- struct device_node *child;
int devid, maxleds;
int i = 0;
const char *state;
@@ -525,7 +523,7 @@ pca9532_of_populate_pdata(struct device *dev, struct device_node *np)
of_property_read_u8_array(np, "nxp,psc", &pdata->psc[PCA9532_PWM_ID_0],
ARRAY_SIZE(pdata->psc));
- for_each_available_child_of_node(np, child) {
+ for_each_available_child_of_node_scoped(np, child) {
if (of_property_read_string(child, "label",
&pdata->leds[i].name))
pdata->leds[i].name = child->name;
@@ -538,10 +536,8 @@ pca9532_of_populate_pdata(struct device *dev, struct device_node *np)
else if (!strcmp(state, "keep"))
pdata->leds[i].state = PCA9532_KEEP;
}
- if (++i >= maxleds) {
- of_node_put(child);
+ if (++i >= maxleds)
break;
- }
}
return pdata;
diff --git a/drivers/leds/leds-pca995x.c b/drivers/leds/leds-pca995x.c
index 78215dff1499..11c7bb69573e 100644
--- a/drivers/leds/leds-pca995x.c
+++ b/drivers/leds/leds-pca995x.c
@@ -19,10 +19,6 @@
#define PCA995X_MODE1 0x00
#define PCA995X_MODE2 0x01
#define PCA995X_LEDOUT0 0x02
-#define PCA9955B_PWM0 0x08
-#define PCA9952_PWM0 0x0A
-#define PCA9952_IREFALL 0x43
-#define PCA9955B_IREFALL 0x45
/* Auto-increment disabled. Normal mode */
#define PCA995X_MODE1_CFG 0x00
@@ -34,17 +30,38 @@
#define PCA995X_LDRX_MASK 0x3
#define PCA995X_LDRX_BITS 2
-#define PCA995X_MAX_OUTPUTS 16
+#define PCA995X_MAX_OUTPUTS 24
#define PCA995X_OUTPUTS_PER_REG 4
#define PCA995X_IREFALL_FULL_CFG 0xFF
#define PCA995X_IREFALL_HALF_CFG (PCA995X_IREFALL_FULL_CFG / 2)
-#define PCA995X_TYPE_NON_B 0
-#define PCA995X_TYPE_B 1
-
#define ldev_to_led(c) container_of(c, struct pca995x_led, ldev)
+struct pca995x_chipdef {
+ unsigned int num_leds;
+ u8 pwm_base;
+ u8 irefall;
+};
+
+static const struct pca995x_chipdef pca9952_chipdef = {
+ .num_leds = 16,
+ .pwm_base = 0x0a,
+ .irefall = 0x43,
+};
+
+static const struct pca995x_chipdef pca9955b_chipdef = {
+ .num_leds = 16,
+ .pwm_base = 0x08,
+ .irefall = 0x45,
+};
+
+static const struct pca995x_chipdef pca9956b_chipdef = {
+ .num_leds = 24,
+ .pwm_base = 0x0a,
+ .irefall = 0x40,
+};
+
struct pca995x_led {
unsigned int led_no;
struct led_classdev ldev;
@@ -54,7 +71,7 @@ struct pca995x_led {
struct pca995x_chip {
struct regmap *regmap;
struct pca995x_led leds[PCA995X_MAX_OUTPUTS];
- int btype;
+ const struct pca995x_chipdef *chipdef;
};
static int pca995x_brightness_set(struct led_classdev *led_cdev,
@@ -62,10 +79,11 @@ static int pca995x_brightness_set(struct led_classdev *led_cdev,
{
struct pca995x_led *led = ldev_to_led(led_cdev);
struct pca995x_chip *chip = led->chip;
+ const struct pca995x_chipdef *chipdef = chip->chipdef;
u8 ledout_addr, pwmout_addr;
int shift, ret;
- pwmout_addr = (chip->btype ? PCA9955B_PWM0 : PCA9952_PWM0) + led->led_no;
+ pwmout_addr = chipdef->pwm_base + led->led_no;
ledout_addr = PCA995X_LEDOUT0 + (led->led_no / PCA995X_OUTPUTS_PER_REG);
shift = PCA995X_LDRX_BITS * (led->led_no % PCA995X_OUTPUTS_PER_REG);
@@ -102,43 +120,38 @@ static const struct regmap_config pca995x_regmap = {
static int pca995x_probe(struct i2c_client *client)
{
struct fwnode_handle *led_fwnodes[PCA995X_MAX_OUTPUTS] = { 0 };
- struct fwnode_handle *np, *child;
struct device *dev = &client->dev;
+ const struct pca995x_chipdef *chipdef;
struct pca995x_chip *chip;
struct pca995x_led *led;
- int i, btype, reg, ret;
+ int i, j, reg, ret;
- btype = (unsigned long)device_get_match_data(&client->dev);
+ chipdef = device_get_match_data(&client->dev);
- np = dev_fwnode(dev);
- if (!np)
+ if (!dev_fwnode(dev))
return -ENODEV;
chip = devm_kzalloc(dev, sizeof(*chip), GFP_KERNEL);
if (!chip)
return -ENOMEM;
- chip->btype = btype;
+ chip->chipdef = chipdef;
chip->regmap = devm_regmap_init_i2c(client, &pca995x_regmap);
if (IS_ERR(chip->regmap))
return PTR_ERR(chip->regmap);
i2c_set_clientdata(client, chip);
- fwnode_for_each_available_child_node(np, child) {
+ device_for_each_child_node_scoped(dev, child) {
ret = fwnode_property_read_u32(child, "reg", &reg);
- if (ret) {
- fwnode_handle_put(child);
+ if (ret)
return ret;
- }
- if (reg < 0 || reg >= PCA995X_MAX_OUTPUTS || led_fwnodes[reg]) {
- fwnode_handle_put(child);
+ if (reg < 0 || reg >= PCA995X_MAX_OUTPUTS || led_fwnodes[reg])
return -EINVAL;
- }
led = &chip->leds[reg];
- led_fwnodes[reg] = child;
+ led_fwnodes[reg] = fwnode_handle_get(child);
led->chip = chip;
led->led_no = reg;
led->ldev.brightness_set_blocking = pca995x_brightness_set;
@@ -157,7 +170,8 @@ static int pca995x_probe(struct i2c_client *client)
&chip->leds[i].ldev,
&init_data);
if (ret < 0) {
- fwnode_handle_put(child);
+ for (j = i; j < PCA995X_MAX_OUTPUTS; j++)
+ fwnode_handle_put(led_fwnodes[j]);
return dev_err_probe(dev, ret,
"Could not register LED %s\n",
chip->leds[i].ldev.name);
@@ -170,21 +184,21 @@ static int pca995x_probe(struct i2c_client *client)
return ret;
/* IREF Output current value for all LEDn outputs */
- return regmap_write(chip->regmap,
- btype ? PCA9955B_IREFALL : PCA9952_IREFALL,
- PCA995X_IREFALL_HALF_CFG);
+ return regmap_write(chip->regmap, chipdef->irefall, PCA995X_IREFALL_HALF_CFG);
}
static const struct i2c_device_id pca995x_id[] = {
- { "pca9952", .driver_data = (kernel_ulong_t)PCA995X_TYPE_NON_B },
- { "pca9955b", .driver_data = (kernel_ulong_t)PCA995X_TYPE_B },
+ { "pca9952", .driver_data = (kernel_ulong_t)&pca9952_chipdef },
+ { "pca9955b", .driver_data = (kernel_ulong_t)&pca9955b_chipdef },
+ { "pca9956b", .driver_data = (kernel_ulong_t)&pca9956b_chipdef },
{}
};
MODULE_DEVICE_TABLE(i2c, pca995x_id);
static const struct of_device_id pca995x_of_match[] = {
- { .compatible = "nxp,pca9952", .data = (void *)PCA995X_TYPE_NON_B },
- { .compatible = "nxp,pca9955b", .data = (void *)PCA995X_TYPE_B },
+ { .compatible = "nxp,pca9952", .data = &pca9952_chipdef },
+ { .compatible = "nxp,pca9955b", . data = &pca9955b_chipdef },
+ { .compatible = "nxp,pca9956b", .data = &pca9956b_chipdef },
{},
};
MODULE_DEVICE_TABLE(of, pca995x_of_match);
diff --git a/drivers/leds/leds-sc27xx-bltc.c b/drivers/leds/leds-sc27xx-bltc.c
index f04db793e8d6..cca98c644aa6 100644
--- a/drivers/leds/leds-sc27xx-bltc.c
+++ b/drivers/leds/leds-sc27xx-bltc.c
@@ -276,7 +276,7 @@ static int sc27xx_led_register(struct device *dev, struct sc27xx_led_priv *priv)
static int sc27xx_led_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
- struct device_node *np = dev_of_node(dev), *child;
+ struct device_node *np = dev_of_node(dev);
struct sc27xx_led_priv *priv;
u32 base, count, reg;
int err;
@@ -304,17 +304,13 @@ static int sc27xx_led_probe(struct platform_device *pdev)
return err;
}
- for_each_available_child_of_node(np, child) {
+ for_each_available_child_of_node_scoped(np, child) {
err = of_property_read_u32(child, "reg", &reg);
- if (err) {
- of_node_put(child);
+ if (err)
return err;
- }
- if (reg >= SC27XX_LEDS_MAX || priv->leds[reg].active) {
- of_node_put(child);
+ if (reg >= SC27XX_LEDS_MAX || priv->leds[reg].active)
return -EINVAL;
- }
priv->leds[reg].fwnode = of_fwnode_handle(child);
priv->leds[reg].active = true;
diff --git a/drivers/leds/leds-sun50i-a100.c b/drivers/leds/leds-sun50i-a100.c
index 119eff9471f0..4c468d487486 100644
--- a/drivers/leds/leds-sun50i-a100.c
+++ b/drivers/leds/leds-sun50i-a100.c
@@ -368,7 +368,7 @@ static int sun50i_a100_ledc_suspend(struct device *dev)
if (!xfer_active)
break;
- msleep(1);
+ usleep_range(1000, 1100);
}
clk_disable_unprepare(priv->mod_clk);
diff --git a/drivers/leds/leds-turris-omnia.c b/drivers/leds/leds-turris-omnia.c
index 39f740be058f..4cff8c4b020c 100644
--- a/drivers/leds/leds-turris-omnia.c
+++ b/drivers/leds/leds-turris-omnia.c
@@ -452,7 +452,7 @@ static int omnia_mcu_get_features(const struct i2c_client *client)
static int omnia_leds_probe(struct i2c_client *client)
{
struct device *dev = &client->dev;
- struct device_node *np = dev_of_node(dev), *child;
+ struct device_node *np = dev_of_node(dev);
struct omnia_leds *leds;
struct omnia_led *led;
int ret, count;
@@ -497,12 +497,10 @@ static int omnia_leds_probe(struct i2c_client *client)
}
led = &leds->leds[0];
- for_each_available_child_of_node(np, child) {
+ for_each_available_child_of_node_scoped(np, child) {
ret = omnia_led_register(client, led, child);
- if (ret < 0) {
- of_node_put(child);
+ if (ret < 0)
return ret;
- }
led += ret;
}
@@ -532,6 +530,7 @@ static const struct of_device_id of_omnia_leds_match[] = {
{ .compatible = "cznic,turris-omnia-leds", },
{},
};
+MODULE_DEVICE_TABLE(of, of_omnia_leds_match);
static const struct i2c_device_id omnia_id[] = {
{ "omnia" },
diff --git a/drivers/leds/rgb/leds-qcom-lpg.c b/drivers/leds/rgb/leds-qcom-lpg.c
index e74b2ceed1c2..f3c9ef2bfa57 100644
--- a/drivers/leds/rgb/leds-qcom-lpg.c
+++ b/drivers/leds/rgb/leds-qcom-lpg.c
@@ -1368,7 +1368,6 @@ static int lpg_add_led(struct lpg *lpg, struct device_node *np)
{
struct led_init_data init_data = {};
struct led_classdev *cdev;
- struct device_node *child;
struct mc_subled *info;
struct lpg_led *led;
const char *state;
@@ -1399,12 +1398,10 @@ static int lpg_add_led(struct lpg *lpg, struct device_node *np)
if (!info)
return -ENOMEM;
i = 0;
- for_each_available_child_of_node(np, child) {
+ for_each_available_child_of_node_scoped(np, child) {
ret = lpg_parse_channel(lpg, child, &led->channels[i]);
- if (ret < 0) {
- of_node_put(child);
+ if (ret < 0)
return ret;
- }
info[i].color_index = led->channels[i]->color;
info[i].intensity = 0;
@@ -1600,7 +1597,6 @@ static int lpg_init_sdam(struct lpg *lpg)
static int lpg_probe(struct platform_device *pdev)
{
- struct device_node *np;
struct lpg *lpg;
int ret;
int i;
@@ -1640,12 +1636,10 @@ static int lpg_probe(struct platform_device *pdev)
if (ret < 0)
return ret;
- for_each_available_child_of_node(pdev->dev.of_node, np) {
+ for_each_available_child_of_node_scoped(pdev->dev.of_node, np) {
ret = lpg_add_led(lpg, np);
- if (ret) {
- of_node_put(np);
+ if (ret)
return ret;
- }
}
for (i = 0; i < lpg->num_channels; i++)
diff --git a/drivers/leds/trigger/ledtrig-netdev.c b/drivers/leds/trigger/ledtrig-netdev.c
index 22bba8e97642..4b0863db901a 100644
--- a/drivers/leds/trigger/ledtrig-netdev.c
+++ b/drivers/leds/trigger/ledtrig-netdev.c
@@ -39,6 +39,8 @@
* (has carrier) or not
* tx - LED blinks on transmitted data
* rx - LED blinks on receive data
+ * tx_err - LED blinks on transmit error
+ * rx_err - LED blinks on receive error
*
* Note: If the user selects a mode that is not supported by hw, default
* behavior is to fall back to software control of the LED. However not every
@@ -144,7 +146,9 @@ static void set_baseline_state(struct led_netdev_data *trigger_data)
* checking stats
*/
if (test_bit(TRIGGER_NETDEV_TX, &trigger_data->mode) ||
- test_bit(TRIGGER_NETDEV_RX, &trigger_data->mode))
+ test_bit(TRIGGER_NETDEV_RX, &trigger_data->mode) ||
+ test_bit(TRIGGER_NETDEV_TX_ERR, &trigger_data->mode) ||
+ test_bit(TRIGGER_NETDEV_RX_ERR, &trigger_data->mode))
schedule_delayed_work(&trigger_data->work, 0);
}
}
@@ -337,6 +341,8 @@ static ssize_t netdev_led_attr_show(struct device *dev, char *buf,
case TRIGGER_NETDEV_FULL_DUPLEX:
case TRIGGER_NETDEV_TX:
case TRIGGER_NETDEV_RX:
+ case TRIGGER_NETDEV_TX_ERR:
+ case TRIGGER_NETDEV_RX_ERR:
bit = attr;
break;
default:
@@ -371,6 +377,8 @@ static ssize_t netdev_led_attr_store(struct device *dev, const char *buf,
case TRIGGER_NETDEV_FULL_DUPLEX:
case TRIGGER_NETDEV_TX:
case TRIGGER_NETDEV_RX:
+ case TRIGGER_NETDEV_TX_ERR:
+ case TRIGGER_NETDEV_RX_ERR:
bit = attr;
break;
default:
@@ -429,6 +437,8 @@ DEFINE_NETDEV_TRIGGER(half_duplex, TRIGGER_NETDEV_HALF_DUPLEX);
DEFINE_NETDEV_TRIGGER(full_duplex, TRIGGER_NETDEV_FULL_DUPLEX);
DEFINE_NETDEV_TRIGGER(tx, TRIGGER_NETDEV_TX);
DEFINE_NETDEV_TRIGGER(rx, TRIGGER_NETDEV_RX);
+DEFINE_NETDEV_TRIGGER(tx_err, TRIGGER_NETDEV_TX_ERR);
+DEFINE_NETDEV_TRIGGER(rx_err, TRIGGER_NETDEV_RX_ERR);
static ssize_t interval_show(struct device *dev,
struct device_attribute *attr, char *buf)
@@ -538,6 +548,8 @@ static struct attribute *netdev_trig_attrs[] = {
&dev_attr_half_duplex.attr,
&dev_attr_rx.attr,
&dev_attr_tx.attr,
+ &dev_attr_rx_err.attr,
+ &dev_attr_tx_err.attr,
&dev_attr_interval.attr,
&dev_attr_offloaded.attr,
NULL
@@ -628,7 +640,9 @@ static void netdev_trig_work(struct work_struct *work)
/* If we are not looking for RX/TX then return */
if (!test_bit(TRIGGER_NETDEV_TX, &trigger_data->mode) &&
- !test_bit(TRIGGER_NETDEV_RX, &trigger_data->mode))
+ !test_bit(TRIGGER_NETDEV_RX, &trigger_data->mode) &&
+ !test_bit(TRIGGER_NETDEV_TX_ERR, &trigger_data->mode) &&
+ !test_bit(TRIGGER_NETDEV_RX_ERR, &trigger_data->mode))
return;
dev_stats = dev_get_stats(trigger_data->net_dev, &temp);
@@ -636,7 +650,11 @@ static void netdev_trig_work(struct work_struct *work)
(test_bit(TRIGGER_NETDEV_TX, &trigger_data->mode) ?
dev_stats->tx_packets : 0) +
(test_bit(TRIGGER_NETDEV_RX, &trigger_data->mode) ?
- dev_stats->rx_packets : 0);
+ dev_stats->rx_packets : 0) +
+ (test_bit(TRIGGER_NETDEV_TX_ERR, &trigger_data->mode) ?
+ dev_stats->tx_errors : 0) +
+ (test_bit(TRIGGER_NETDEV_RX_ERR, &trigger_data->mode) ?
+ dev_stats->rx_errors : 0);
if (trigger_data->last_activity != new_activity) {
led_stop_software_blink(trigger_data->led_cdev);
diff --git a/drivers/leds/uleds.c b/drivers/leds/uleds.c
index 3d361c920030..374a841f18c3 100644
--- a/drivers/leds/uleds.c
+++ b/drivers/leds/uleds.c
@@ -200,7 +200,6 @@ static const struct file_operations uleds_fops = {
.read = uleds_read,
.write = uleds_write,
.poll = uleds_poll,
- .llseek = no_llseek,
};
static struct miscdevice uleds_misc = {
diff --git a/drivers/macintosh/adb.c b/drivers/macintosh/adb.c
index b0407c5fadb2..88adee42ba82 100644
--- a/drivers/macintosh/adb.c
+++ b/drivers/macintosh/adb.c
@@ -842,7 +842,6 @@ out:
static const struct file_operations adb_fops = {
.owner = THIS_MODULE,
- .llseek = no_llseek,
.read = adb_read,
.write = adb_write,
.open = adb_open,
diff --git a/drivers/macintosh/macio_asic.c b/drivers/macintosh/macio_asic.c
index 13626205530d..bede200e32e8 100644
--- a/drivers/macintosh/macio_asic.c
+++ b/drivers/macintosh/macio_asic.c
@@ -387,7 +387,7 @@ static struct macio_dev * macio_add_one_device(struct macio_chip *chip,
dma_set_max_seg_size(&dev->ofdev.dev, 65536);
dma_set_seg_boundary(&dev->ofdev.dev, 0xffffffff);
-#if defined(CONFIG_PCI) && defined(CONFIG_DMA_OPS)
+#if defined(CONFIG_PCI) && defined(CONFIG_ARCH_HAS_DMA_OPS)
/* Set the DMA ops to the ones from the PCI device, this could be
* fishy if we didn't know that on PowerMac it's always direct ops
* or iommu ops that will work fine
@@ -396,7 +396,7 @@ static struct macio_dev * macio_add_one_device(struct macio_chip *chip,
*/
dev->ofdev.dev.archdata = chip->lbus.pdev->dev.archdata;
dev->ofdev.dev.dma_ops = chip->lbus.pdev->dev.dma_ops;
-#endif /* CONFIG_PCI && CONFIG_DMA_OPS */
+#endif /* CONFIG_PCI && CONFIG_ARCH_HAS_DMA_OPS */
#ifdef DEBUG
printk("preparing mdev @%p, ofdev @%p, dev @%p, kobj @%p\n",
diff --git a/drivers/macintosh/smu.c b/drivers/macintosh/smu.c
index b2b78a53e532..a01bc5090cdf 100644
--- a/drivers/macintosh/smu.c
+++ b/drivers/macintosh/smu.c
@@ -1314,7 +1314,6 @@ static int smu_release(struct inode *inode, struct file *file)
static const struct file_operations smu_device_fops = {
- .llseek = no_llseek,
.read = smu_read,
.write = smu_write,
.poll = smu_fpoll,
diff --git a/drivers/mailbox/Kconfig b/drivers/mailbox/Kconfig
index 4eed97295927..6fb995778636 100644
--- a/drivers/mailbox/Kconfig
+++ b/drivers/mailbox/Kconfig
@@ -25,6 +25,7 @@ config ARM_MHU_V2
config ARM_MHU_V3
tristate "ARM MHUv3 Mailbox"
+ depends on ARM64 || COMPILE_TEST
depends on HAS_IOMEM || COMPILE_TEST
depends on OF
help
@@ -73,7 +74,7 @@ config ARMADA_37XX_RWTM_MBOX
config OMAP2PLUS_MBOX
tristate "OMAP2+ Mailbox framework support"
- depends on ARCH_OMAP2PLUS || ARCH_K3
+ depends on ARCH_OMAP2PLUS || ARCH_K3 || COMPILE_TEST
help
Mailbox implementation for OMAP family chips with hardware for
interprocessor communication involving DSP, IVA1.0 and IVA2 in
diff --git a/drivers/mailbox/bcm2835-mailbox.c b/drivers/mailbox/bcm2835-mailbox.c
index fbfd0202047c..ea12fb8d2401 100644
--- a/drivers/mailbox/bcm2835-mailbox.c
+++ b/drivers/mailbox/bcm2835-mailbox.c
@@ -145,7 +145,8 @@ static int bcm2835_mbox_probe(struct platform_device *pdev)
spin_lock_init(&mbox->lock);
ret = devm_request_irq(dev, irq_of_parse_and_map(dev->of_node, 0),
- bcm2835_mbox_irq, 0, dev_name(dev), mbox);
+ bcm2835_mbox_irq, IRQF_NO_SUSPEND, dev_name(dev),
+ mbox);
if (ret) {
dev_err(dev, "Failed to register a mailbox IRQ handler: %d\n",
ret);
diff --git a/drivers/mailbox/imx-mailbox.c b/drivers/mailbox/imx-mailbox.c
index d17efb1dd0cb..f815dab3be50 100644
--- a/drivers/mailbox/imx-mailbox.c
+++ b/drivers/mailbox/imx-mailbox.c
@@ -30,7 +30,7 @@
#define IMX_MU_SCU_CHANS 6
/* TX0/RX0 */
#define IMX_MU_S4_CHANS 2
-#define IMX_MU_CHAN_NAME_SIZE 20
+#define IMX_MU_CHAN_NAME_SIZE 32
#define IMX_MU_V2_PAR_OFF 0x4
#define IMX_MU_V2_TR_MASK GENMASK(7, 0)
@@ -782,7 +782,7 @@ static int imx_mu_init_generic(struct imx_mu_priv *priv)
cp->chan = &priv->mbox_chans[i];
priv->mbox_chans[i].con_priv = cp;
snprintf(cp->irq_desc, sizeof(cp->irq_desc),
- "imx_mu_chan[%i-%i]", cp->type, cp->idx);
+ "%s[%i-%i]", dev_name(priv->dev), cp->type, cp->idx);
}
priv->mbox.num_chans = IMX_MU_CHANS;
@@ -819,7 +819,7 @@ static int imx_mu_init_specific(struct imx_mu_priv *priv)
cp->chan = &priv->mbox_chans[i];
priv->mbox_chans[i].con_priv = cp;
snprintf(cp->irq_desc, sizeof(cp->irq_desc),
- "imx_mu_chan[%i-%i]", cp->type, cp->idx);
+ "%s[%i-%i]", dev_name(priv->dev), cp->type, cp->idx);
}
priv->mbox.num_chans = num_chans;
diff --git a/drivers/mailbox/mailbox.c b/drivers/mailbox/mailbox.c
index ebff3baf3045..d3d26a2c9895 100644
--- a/drivers/mailbox/mailbox.c
+++ b/drivers/mailbox/mailbox.c
@@ -450,30 +450,20 @@ struct mbox_chan *mbox_request_channel_byname(struct mbox_client *cl,
const char *name)
{
struct device_node *np = cl->dev->of_node;
- struct property *prop;
- const char *mbox_name;
- int index = 0;
+ int index;
if (!np) {
dev_err(cl->dev, "%s() currently only supports DT\n", __func__);
return ERR_PTR(-EINVAL);
}
- if (!of_get_property(np, "mbox-names", NULL)) {
- dev_err(cl->dev,
- "%s() requires an \"mbox-names\" property\n", __func__);
+ index = of_property_match_string(np, "mbox-names", name);
+ if (index < 0) {
+ dev_err(cl->dev, "%s() could not locate channel named \"%s\"\n",
+ __func__, name);
return ERR_PTR(-EINVAL);
}
-
- of_property_for_each_string(np, "mbox-names", prop, mbox_name) {
- if (!strncmp(name, mbox_name, strlen(name)))
- return mbox_request_channel(cl, index);
- index++;
- }
-
- dev_err(cl->dev, "%s() could not locate channel named \"%s\"\n",
- __func__, name);
- return ERR_PTR(-EINVAL);
+ return mbox_request_channel(cl, index);
}
EXPORT_SYMBOL_GPL(mbox_request_channel_byname);
diff --git a/drivers/mailbox/omap-mailbox.c b/drivers/mailbox/omap-mailbox.c
index 7a87424657a1..6797770474a5 100644
--- a/drivers/mailbox/omap-mailbox.c
+++ b/drivers/mailbox/omap-mailbox.c
@@ -603,7 +603,7 @@ static struct platform_driver omap_mbox_driver = {
.driver = {
.name = "omap-mailbox",
.pm = &omap_mbox_pm_ops,
- .of_match_table = of_match_ptr(omap_mailbox_of_match),
+ .of_match_table = omap_mailbox_of_match,
},
};
module_platform_driver(omap_mbox_driver);
diff --git a/drivers/mailbox/rockchip-mailbox.c b/drivers/mailbox/rockchip-mailbox.c
index 8ffad059e898..4d966cb2ed03 100644
--- a/drivers/mailbox/rockchip-mailbox.c
+++ b/drivers/mailbox/rockchip-mailbox.c
@@ -159,7 +159,7 @@ static const struct of_device_id rockchip_mbox_of_match[] = {
{ .compatible = "rockchip,rk3368-mailbox", .data = &rk3368_drv_data},
{ },
};
-MODULE_DEVICE_TABLE(of, rockchp_mbox_of_match);
+MODULE_DEVICE_TABLE(of, rockchip_mbox_of_match);
static int rockchip_mbox_probe(struct platform_device *pdev)
{
diff --git a/drivers/mailbox/sprd-mailbox.c b/drivers/mailbox/sprd-mailbox.c
index 9ae57de77d4d..ee8539dfcef5 100644
--- a/drivers/mailbox/sprd-mailbox.c
+++ b/drivers/mailbox/sprd-mailbox.c
@@ -62,7 +62,6 @@ struct sprd_mbox_priv {
void __iomem *outbox_base;
/* Base register address for supplementary outbox */
void __iomem *supp_base;
- struct clk *clk;
u32 outbox_fifo_depth;
struct mutex lock;
@@ -291,19 +290,13 @@ static const struct mbox_chan_ops sprd_mbox_ops = {
.shutdown = sprd_mbox_shutdown,
};
-static void sprd_mbox_disable(void *data)
-{
- struct sprd_mbox_priv *priv = data;
-
- clk_disable_unprepare(priv->clk);
-}
-
static int sprd_mbox_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
struct sprd_mbox_priv *priv;
int ret, inbox_irq, outbox_irq, supp_irq;
unsigned long id, supp;
+ struct clk *clk;
priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
if (!priv)
@@ -331,20 +324,10 @@ static int sprd_mbox_probe(struct platform_device *pdev)
if (IS_ERR(priv->outbox_base))
return PTR_ERR(priv->outbox_base);
- priv->clk = devm_clk_get(dev, "enable");
- if (IS_ERR(priv->clk)) {
+ clk = devm_clk_get_enabled(dev, "enable");
+ if (IS_ERR(clk)) {
dev_err(dev, "failed to get mailbox clock\n");
- return PTR_ERR(priv->clk);
- }
-
- ret = clk_prepare_enable(priv->clk);
- if (ret)
- return ret;
-
- ret = devm_add_action_or_reset(dev, sprd_mbox_disable, priv);
- if (ret) {
- dev_err(dev, "failed to add mailbox disable action\n");
- return ret;
+ return PTR_ERR(clk);
}
inbox_irq = platform_get_irq_byname(pdev, "inbox");
diff --git a/drivers/md/dm-bufio.c b/drivers/md/dm-bufio.c
index 098bf526136c..d478aafa02c9 100644
--- a/drivers/md/dm-bufio.c
+++ b/drivers/md/dm-bufio.c
@@ -529,9 +529,6 @@ static struct dm_buffer *list_to_buffer(struct list_head *l)
{
struct lru_entry *le = list_entry(l, struct lru_entry, list);
- if (!le)
- return NULL;
-
return le_to_buffer(le);
}
diff --git a/drivers/md/dm-cache-target.c b/drivers/md/dm-cache-target.c
index 17f0fab1e254..aaeeabfab09b 100644
--- a/drivers/md/dm-cache-target.c
+++ b/drivers/md/dm-cache-target.c
@@ -1368,7 +1368,7 @@ static void mg_copy(struct work_struct *ws)
*/
bool rb = bio_detain_shared(mg->cache, mg->op->oblock, mg->overwrite_bio);
- BUG_ON(rb); /* An exclussive lock must _not_ be held for this block */
+ BUG_ON(rb); /* An exclusive lock must _not_ be held for this block */
mg->overwrite_bio = NULL;
inc_io_migrations(mg->cache);
mg_full_copy(ws);
@@ -3200,8 +3200,6 @@ static int parse_cblock_range(struct cache *cache, const char *str,
* Try and parse form (ii) first.
*/
r = sscanf(str, "%llu-%llu%c", &b, &e, &dummy);
- if (r < 0)
- return r;
if (r == 2) {
result->begin = to_cblock(b);
@@ -3213,8 +3211,6 @@ static int parse_cblock_range(struct cache *cache, const char *str,
* That didn't work, try form (i).
*/
r = sscanf(str, "%llu%c", &b, &dummy);
- if (r < 0)
- return r;
if (r == 1) {
result->begin = to_cblock(b);
diff --git a/drivers/md/dm-clone-metadata.c b/drivers/md/dm-clone-metadata.c
index 2db84cd2202b..14c5c28d938b 100644
--- a/drivers/md/dm-clone-metadata.c
+++ b/drivers/md/dm-clone-metadata.c
@@ -530,10 +530,7 @@ static int __load_bitset_in_core(struct dm_clone_metadata *cmd)
return r;
for (i = 0; ; i++) {
- if (dm_bitset_cursor_get_value(&c))
- __set_bit(i, cmd->region_map);
- else
- __clear_bit(i, cmd->region_map);
+ __assign_bit(i, cmd->region_map, dm_bitset_cursor_get_value(&c));
if (i >= (cmd->nr_regions - 1))
break;
diff --git a/drivers/md/dm-crypt.c b/drivers/md/dm-crypt.c
index 348b4b26c272..5228b03b6fe0 100644
--- a/drivers/md/dm-crypt.c
+++ b/drivers/md/dm-crypt.c
@@ -147,6 +147,7 @@ enum cipher_flags {
CRYPT_MODE_INTEGRITY_AEAD, /* Use authenticated mode for cipher */
CRYPT_IV_LARGE_SECTORS, /* Calculate IV from sector_size, not 512B sectors */
CRYPT_ENCRYPT_PREPROCESS, /* Must preprocess data for encryption (elephant) */
+ CRYPT_KEY_MAC_SIZE_SET, /* The integrity_key_size option was used */
};
/*
@@ -2613,35 +2614,31 @@ static int crypt_set_keyring_key(struct crypt_config *cc, const char *key_string
key = request_key(type, key_desc + 1, NULL);
if (IS_ERR(key)) {
- kfree_sensitive(new_key_string);
- return PTR_ERR(key);
+ ret = PTR_ERR(key);
+ goto free_new_key_string;
}
down_read(&key->sem);
-
ret = set_key(cc, key);
- if (ret < 0) {
- up_read(&key->sem);
- key_put(key);
- kfree_sensitive(new_key_string);
- return ret;
- }
-
up_read(&key->sem);
key_put(key);
+ if (ret < 0)
+ goto free_new_key_string;
/* clear the flag since following operations may invalidate previously valid key */
clear_bit(DM_CRYPT_KEY_VALID, &cc->flags);
ret = crypt_setkey(cc);
+ if (ret)
+ goto free_new_key_string;
- if (!ret) {
- set_bit(DM_CRYPT_KEY_VALID, &cc->flags);
- kfree_sensitive(cc->key_string);
- cc->key_string = new_key_string;
- } else
- kfree_sensitive(new_key_string);
+ set_bit(DM_CRYPT_KEY_VALID, &cc->flags);
+ kfree_sensitive(cc->key_string);
+ cc->key_string = new_key_string;
+ return 0;
+free_new_key_string:
+ kfree_sensitive(new_key_string);
return ret;
}
@@ -2937,7 +2934,8 @@ static int crypt_ctr_auth_cipher(struct crypt_config *cc, char *cipher_api)
if (IS_ERR(mac))
return PTR_ERR(mac);
- cc->key_mac_size = crypto_ahash_digestsize(mac);
+ if (!test_bit(CRYPT_KEY_MAC_SIZE_SET, &cc->cipher_flags))
+ cc->key_mac_size = crypto_ahash_digestsize(mac);
crypto_free_ahash(mac);
cc->authenc_key = kmalloc(crypt_authenckey_size(cc), GFP_KERNEL);
@@ -3219,6 +3217,13 @@ static int crypt_ctr_optional(struct dm_target *ti, unsigned int argc, char **ar
cc->cipher_auth = kstrdup(sval, GFP_KERNEL);
if (!cc->cipher_auth)
return -ENOMEM;
+ } else if (sscanf(opt_string, "integrity_key_size:%u%c", &val, &dummy) == 1) {
+ if (!val) {
+ ti->error = "Invalid integrity_key_size argument";
+ return -EINVAL;
+ }
+ cc->key_mac_size = val;
+ set_bit(CRYPT_KEY_MAC_SIZE_SET, &cc->cipher_flags);
} else if (sscanf(opt_string, "sector_size:%hu%c", &cc->sector_size, &dummy) == 1) {
if (cc->sector_size < (1 << SECTOR_SHIFT) ||
cc->sector_size > 4096 ||
@@ -3607,10 +3612,10 @@ static void crypt_status(struct dm_target *ti, status_type_t type,
num_feature_args += test_bit(DM_CRYPT_NO_OFFLOAD, &cc->flags);
num_feature_args += test_bit(DM_CRYPT_NO_READ_WORKQUEUE, &cc->flags);
num_feature_args += test_bit(DM_CRYPT_NO_WRITE_WORKQUEUE, &cc->flags);
+ num_feature_args += !!cc->used_tag_size;
num_feature_args += cc->sector_size != (1 << SECTOR_SHIFT);
num_feature_args += test_bit(CRYPT_IV_LARGE_SECTORS, &cc->cipher_flags);
- if (cc->used_tag_size)
- num_feature_args++;
+ num_feature_args += test_bit(CRYPT_KEY_MAC_SIZE_SET, &cc->cipher_flags);
if (num_feature_args) {
DMEMIT(" %d", num_feature_args);
if (ti->num_discard_bios)
@@ -3631,6 +3636,8 @@ static void crypt_status(struct dm_target *ti, status_type_t type,
DMEMIT(" sector_size:%d", cc->sector_size);
if (test_bit(CRYPT_IV_LARGE_SECTORS, &cc->cipher_flags))
DMEMIT(" iv_large_sectors");
+ if (test_bit(CRYPT_KEY_MAC_SIZE_SET, &cc->cipher_flags))
+ DMEMIT(" integrity_key_size:%u", cc->key_mac_size);
}
break;
@@ -3758,7 +3765,7 @@ static void crypt_io_hints(struct dm_target *ti, struct queue_limits *limits)
static struct target_type crypt_target = {
.name = "crypt",
- .version = {1, 27, 0},
+ .version = {1, 28, 0},
.module = THIS_MODULE,
.ctr = crypt_ctr,
.dtr = crypt_dtr,
diff --git a/drivers/md/dm-integrity.c b/drivers/md/dm-integrity.c
index acff2f64f251..ee9f7cecd78e 100644
--- a/drivers/md/dm-integrity.c
+++ b/drivers/md/dm-integrity.c
@@ -284,6 +284,7 @@ struct dm_integrity_c {
mempool_t recheck_pool;
struct bio_set recheck_bios;
+ struct bio_set recalc_bios;
struct notifier_block reboot_notifier;
};
@@ -321,7 +322,9 @@ struct dm_integrity_io {
struct dm_bio_details bio_details;
char *integrity_payload;
+ unsigned payload_len;
bool integrity_payload_from_mempool;
+ bool integrity_range_locked;
};
struct journal_completion {
@@ -359,7 +362,7 @@ static struct kmem_cache *journal_io_cache;
#endif
static void dm_integrity_map_continue(struct dm_integrity_io *dio, bool from_map);
-static int dm_integrity_map_inline(struct dm_integrity_io *dio);
+static int dm_integrity_map_inline(struct dm_integrity_io *dio, bool from_map);
static void integrity_bio_wait(struct work_struct *w);
static void dm_integrity_dtr(struct dm_target *ti);
@@ -491,7 +494,8 @@ static int sb_mac(struct dm_integrity_c *ic, bool wr)
__u8 *sb = (__u8 *)ic->sb;
__u8 *mac = sb + (1 << SECTOR_SHIFT) - mac_size;
- if (sizeof(struct superblock) + mac_size > 1 << SECTOR_SHIFT) {
+ if (sizeof(struct superblock) + mac_size > 1 << SECTOR_SHIFT ||
+ mac_size > HASH_MAX_DIGESTSIZE) {
dm_integrity_io_error(ic, "digest is too long", -EINVAL);
return -EINVAL;
}
@@ -1500,15 +1504,15 @@ static void dm_integrity_flush_buffers(struct dm_integrity_c *ic, bool flush_dat
if (!ic->meta_dev)
flush_data = false;
if (flush_data) {
- fr.io_req.bi_opf = REQ_OP_WRITE | REQ_PREFLUSH | REQ_SYNC,
- fr.io_req.mem.type = DM_IO_KMEM,
- fr.io_req.mem.ptr.addr = NULL,
- fr.io_req.notify.fn = flush_notify,
+ fr.io_req.bi_opf = REQ_OP_WRITE | REQ_PREFLUSH | REQ_SYNC;
+ fr.io_req.mem.type = DM_IO_KMEM;
+ fr.io_req.mem.ptr.addr = NULL;
+ fr.io_req.notify.fn = flush_notify;
fr.io_req.notify.context = &fr;
- fr.io_req.client = dm_bufio_get_dm_io_client(ic->bufio),
- fr.io_reg.bdev = ic->dev->bdev,
- fr.io_reg.sector = 0,
- fr.io_reg.count = 0,
+ fr.io_req.client = dm_bufio_get_dm_io_client(ic->bufio);
+ fr.io_reg.bdev = ic->dev->bdev;
+ fr.io_reg.sector = 0;
+ fr.io_reg.count = 0;
fr.ic = ic;
init_completion(&fr.comp);
r = dm_io(&fr.io_req, 1, &fr.io_reg, NULL, IOPRIO_DEFAULT);
@@ -1946,8 +1950,13 @@ static int dm_integrity_map(struct dm_target *ti, struct bio *bio)
dio->bi_status = 0;
dio->op = bio_op(bio);
- if (ic->mode == 'I')
- return dm_integrity_map_inline(dio);
+ if (ic->mode == 'I') {
+ bio->bi_iter.bi_sector = dm_target_offset(ic->ti, bio->bi_iter.bi_sector);
+ dio->integrity_payload = NULL;
+ dio->integrity_payload_from_mempool = false;
+ dio->integrity_range_locked = false;
+ return dm_integrity_map_inline(dio, true);
+ }
if (unlikely(dio->op == REQ_OP_DISCARD)) {
if (ti->max_io_len) {
@@ -2397,15 +2406,13 @@ journal_read_write:
do_endio_flush(ic, dio);
}
-static int dm_integrity_map_inline(struct dm_integrity_io *dio)
+static int dm_integrity_map_inline(struct dm_integrity_io *dio, bool from_map)
{
struct dm_integrity_c *ic = dio->ic;
struct bio *bio = dm_bio_from_per_bio_data(dio, sizeof(struct dm_integrity_io));
struct bio_integrity_payload *bip;
- unsigned payload_len, digest_size, extra_size, ret;
-
- dio->integrity_payload = NULL;
- dio->integrity_payload_from_mempool = false;
+ unsigned ret;
+ sector_t recalc_sector;
if (unlikely(bio_integrity(bio))) {
bio->bi_status = BLK_STS_NOTSUPP;
@@ -2418,28 +2425,67 @@ static int dm_integrity_map_inline(struct dm_integrity_io *dio)
return DM_MAPIO_REMAPPED;
retry:
- payload_len = ic->tuple_size * (bio_sectors(bio) >> ic->sb->log2_sectors_per_block);
- digest_size = crypto_shash_digestsize(ic->internal_hash);
- extra_size = unlikely(digest_size > ic->tag_size) ? digest_size - ic->tag_size : 0;
- payload_len += extra_size;
- dio->integrity_payload = kmalloc(payload_len, GFP_NOIO | __GFP_NORETRY | __GFP_NOMEMALLOC | __GFP_NOWARN);
- if (unlikely(!dio->integrity_payload)) {
- const unsigned x_size = PAGE_SIZE << 1;
- if (payload_len > x_size) {
- unsigned sectors = ((x_size - extra_size) / ic->tuple_size) << ic->sb->log2_sectors_per_block;
- if (WARN_ON(!sectors || sectors >= bio_sectors(bio))) {
- bio->bi_status = BLK_STS_NOTSUPP;
- bio_endio(bio);
- return DM_MAPIO_SUBMITTED;
+ if (!dio->integrity_payload) {
+ unsigned digest_size, extra_size;
+ dio->payload_len = ic->tuple_size * (bio_sectors(bio) >> ic->sb->log2_sectors_per_block);
+ digest_size = crypto_shash_digestsize(ic->internal_hash);
+ extra_size = unlikely(digest_size > ic->tag_size) ? digest_size - ic->tag_size : 0;
+ dio->payload_len += extra_size;
+ dio->integrity_payload = kmalloc(dio->payload_len, GFP_NOIO | __GFP_NORETRY | __GFP_NOMEMALLOC | __GFP_NOWARN);
+ if (unlikely(!dio->integrity_payload)) {
+ const unsigned x_size = PAGE_SIZE << 1;
+ if (dio->payload_len > x_size) {
+ unsigned sectors = ((x_size - extra_size) / ic->tuple_size) << ic->sb->log2_sectors_per_block;
+ if (WARN_ON(!sectors || sectors >= bio_sectors(bio))) {
+ bio->bi_status = BLK_STS_NOTSUPP;
+ bio_endio(bio);
+ return DM_MAPIO_SUBMITTED;
+ }
+ dm_accept_partial_bio(bio, sectors);
+ goto retry;
}
- dm_accept_partial_bio(bio, sectors);
- goto retry;
}
+ }
+
+ dio->range.logical_sector = bio->bi_iter.bi_sector;
+ dio->range.n_sectors = bio_sectors(bio);
+
+ if (!(ic->sb->flags & cpu_to_le32(SB_FLAG_RECALCULATING)))
+ goto skip_spinlock;
+#ifdef CONFIG_64BIT
+ /*
+ * On 64-bit CPUs we can optimize the lock away (so that it won't cause
+ * cache line bouncing) and use acquire/release barriers instead.
+ *
+ * Paired with smp_store_release in integrity_recalc_inline.
+ */
+ recalc_sector = le64_to_cpu(smp_load_acquire(&ic->sb->recalc_sector));
+ if (likely(dio->range.logical_sector + dio->range.n_sectors <= recalc_sector))
+ goto skip_spinlock;
+#endif
+ spin_lock_irq(&ic->endio_wait.lock);
+ recalc_sector = le64_to_cpu(ic->sb->recalc_sector);
+ if (dio->range.logical_sector + dio->range.n_sectors <= recalc_sector)
+ goto skip_unlock;
+ if (unlikely(!add_new_range(ic, &dio->range, true))) {
+ if (from_map) {
+ spin_unlock_irq(&ic->endio_wait.lock);
+ INIT_WORK(&dio->work, integrity_bio_wait);
+ queue_work(ic->wait_wq, &dio->work);
+ return DM_MAPIO_SUBMITTED;
+ }
+ wait_and_add_new_range(ic, &dio->range);
+ }
+ dio->integrity_range_locked = true;
+skip_unlock:
+ spin_unlock_irq(&ic->endio_wait.lock);
+skip_spinlock:
+
+ if (unlikely(!dio->integrity_payload)) {
dio->integrity_payload = page_to_virt((struct page *)mempool_alloc(&ic->recheck_pool, GFP_NOIO));
dio->integrity_payload_from_mempool = true;
}
- bio->bi_iter.bi_sector = dm_target_offset(ic->ti, bio->bi_iter.bi_sector);
dio->bio_details.bi_iter = bio->bi_iter;
if (unlikely(!dm_integrity_check_limits(ic, bio->bi_iter.bi_sector, bio))) {
@@ -2449,7 +2495,7 @@ retry:
bio->bi_iter.bi_sector += ic->start + SB_SECTORS;
bip = bio_integrity_alloc(bio, GFP_NOIO, 1);
- if (unlikely(IS_ERR(bip))) {
+ if (IS_ERR(bip)) {
bio->bi_status = errno_to_blk_status(PTR_ERR(bip));
bio_endio(bio);
return DM_MAPIO_SUBMITTED;
@@ -2470,8 +2516,8 @@ retry:
}
ret = bio_integrity_add_page(bio, virt_to_page(dio->integrity_payload),
- payload_len, offset_in_page(dio->integrity_payload));
- if (unlikely(ret != payload_len)) {
+ dio->payload_len, offset_in_page(dio->integrity_payload));
+ if (unlikely(ret != dio->payload_len)) {
bio->bi_status = BLK_STS_RESOURCE;
bio_endio(bio);
return DM_MAPIO_SUBMITTED;
@@ -2522,7 +2568,7 @@ static void dm_integrity_inline_recheck(struct work_struct *w)
}
bip = bio_integrity_alloc(outgoing_bio, GFP_NOIO, 1);
- if (unlikely(IS_ERR(bip))) {
+ if (IS_ERR(bip)) {
bio_put(outgoing_bio);
bio->bi_status = errno_to_blk_status(PTR_ERR(bip));
bio_endio(bio);
@@ -2579,6 +2625,9 @@ static int dm_integrity_end_io(struct dm_target *ti, struct bio *bio, blk_status
struct dm_integrity_io *dio = dm_per_bio_data(bio, sizeof(struct dm_integrity_io));
if (dio->op == REQ_OP_READ && likely(*status == BLK_STS_OK)) {
unsigned pos = 0;
+ if (ic->sb->flags & cpu_to_le32(SB_FLAG_RECALCULATING) &&
+ unlikely(dio->integrity_range_locked))
+ goto skip_check;
while (dio->bio_details.bi_iter.bi_size) {
char digest[HASH_MAX_DIGESTSIZE];
struct bio_vec bv = bio_iter_iovec(bio, dio->bio_details.bi_iter);
@@ -2598,9 +2647,10 @@ static int dm_integrity_end_io(struct dm_target *ti, struct bio *bio, blk_status
bio_advance_iter_single(bio, &dio->bio_details.bi_iter, ic->sectors_per_block << SECTOR_SHIFT);
}
}
- if (likely(dio->op == REQ_OP_READ) || likely(dio->op == REQ_OP_WRITE)) {
- dm_integrity_free_payload(dio);
- }
+skip_check:
+ dm_integrity_free_payload(dio);
+ if (unlikely(dio->integrity_range_locked))
+ remove_range(ic, &dio->range);
}
return DM_ENDIO_DONE;
}
@@ -2608,8 +2658,26 @@ static int dm_integrity_end_io(struct dm_target *ti, struct bio *bio, blk_status
static void integrity_bio_wait(struct work_struct *w)
{
struct dm_integrity_io *dio = container_of(w, struct dm_integrity_io, work);
+ struct dm_integrity_c *ic = dio->ic;
- dm_integrity_map_continue(dio, false);
+ if (ic->mode == 'I') {
+ struct bio *bio = dm_bio_from_per_bio_data(dio, sizeof(struct dm_integrity_io));
+ int r = dm_integrity_map_inline(dio, false);
+ switch (r) {
+ case DM_MAPIO_KILL:
+ bio->bi_status = BLK_STS_IOERR;
+ fallthrough;
+ case DM_MAPIO_REMAPPED:
+ submit_bio_noacct(bio);
+ fallthrough;
+ case DM_MAPIO_SUBMITTED:
+ return;
+ default:
+ BUG();
+ }
+ } else {
+ dm_integrity_map_continue(dio, false);
+ }
}
static void pad_uncommitted(struct dm_integrity_c *ic)
@@ -3081,6 +3149,133 @@ free_ret:
kvfree(recalc_tags);
}
+static void integrity_recalc_inline(struct work_struct *w)
+{
+ struct dm_integrity_c *ic = container_of(w, struct dm_integrity_c, recalc_work);
+ size_t recalc_tags_size;
+ u8 *recalc_buffer = NULL;
+ u8 *recalc_tags = NULL;
+ struct dm_integrity_range range;
+ struct bio *bio;
+ struct bio_integrity_payload *bip;
+ __u8 *t;
+ unsigned int i;
+ int r;
+ unsigned ret;
+ unsigned int super_counter = 0;
+ unsigned recalc_sectors = RECALC_SECTORS;
+
+retry:
+ recalc_buffer = kmalloc(recalc_sectors << SECTOR_SHIFT, GFP_NOIO | __GFP_NOWARN);
+ if (!recalc_buffer) {
+oom:
+ recalc_sectors >>= 1;
+ if (recalc_sectors >= 1U << ic->sb->log2_sectors_per_block)
+ goto retry;
+ DMCRIT("out of memory for recalculate buffer - recalculation disabled");
+ goto free_ret;
+ }
+
+ recalc_tags_size = (recalc_sectors >> ic->sb->log2_sectors_per_block) * ic->tuple_size;
+ if (crypto_shash_digestsize(ic->internal_hash) > ic->tuple_size)
+ recalc_tags_size += crypto_shash_digestsize(ic->internal_hash) - ic->tuple_size;
+ recalc_tags = kmalloc(recalc_tags_size, GFP_NOIO | __GFP_NOWARN);
+ if (!recalc_tags) {
+ kfree(recalc_buffer);
+ recalc_buffer = NULL;
+ goto oom;
+ }
+
+ spin_lock_irq(&ic->endio_wait.lock);
+
+next_chunk:
+ if (unlikely(dm_post_suspending(ic->ti)))
+ goto unlock_ret;
+
+ range.logical_sector = le64_to_cpu(ic->sb->recalc_sector);
+ if (unlikely(range.logical_sector >= ic->provided_data_sectors))
+ goto unlock_ret;
+ range.n_sectors = min((sector_t)recalc_sectors, ic->provided_data_sectors - range.logical_sector);
+
+ add_new_range_and_wait(ic, &range);
+ spin_unlock_irq(&ic->endio_wait.lock);
+
+ if (unlikely(++super_counter == RECALC_WRITE_SUPER)) {
+ recalc_write_super(ic);
+ super_counter = 0;
+ }
+
+ if (unlikely(dm_integrity_failed(ic)))
+ goto err;
+
+ DEBUG_print("recalculating: %llx - %llx\n", range.logical_sector, range.n_sectors);
+
+ bio = bio_alloc_bioset(ic->dev->bdev, 1, REQ_OP_READ, GFP_NOIO, &ic->recalc_bios);
+ bio->bi_iter.bi_sector = ic->start + SB_SECTORS + range.logical_sector;
+ __bio_add_page(bio, virt_to_page(recalc_buffer), range.n_sectors << SECTOR_SHIFT, offset_in_page(recalc_buffer));
+ r = submit_bio_wait(bio);
+ bio_put(bio);
+ if (unlikely(r)) {
+ dm_integrity_io_error(ic, "reading data", r);
+ goto err;
+ }
+
+ t = recalc_tags;
+ for (i = 0; i < range.n_sectors; i += ic->sectors_per_block) {
+ memset(t, 0, ic->tuple_size);
+ integrity_sector_checksum(ic, range.logical_sector + i, recalc_buffer + (i << SECTOR_SHIFT), t);
+ t += ic->tuple_size;
+ }
+
+ bio = bio_alloc_bioset(ic->dev->bdev, 1, REQ_OP_WRITE, GFP_NOIO, &ic->recalc_bios);
+ bio->bi_iter.bi_sector = ic->start + SB_SECTORS + range.logical_sector;
+ __bio_add_page(bio, virt_to_page(recalc_buffer), range.n_sectors << SECTOR_SHIFT, offset_in_page(recalc_buffer));
+
+ bip = bio_integrity_alloc(bio, GFP_NOIO, 1);
+ if (unlikely(IS_ERR(bip))) {
+ bio_put(bio);
+ DMCRIT("out of memory for bio integrity payload - recalculation disabled");
+ goto err;
+ }
+ ret = bio_integrity_add_page(bio, virt_to_page(recalc_tags), t - recalc_tags, offset_in_page(recalc_tags));
+ if (unlikely(ret != t - recalc_tags)) {
+ bio_put(bio);
+ dm_integrity_io_error(ic, "attaching integrity tags", -ENOMEM);
+ goto err;
+ }
+
+ r = submit_bio_wait(bio);
+ bio_put(bio);
+ if (unlikely(r)) {
+ dm_integrity_io_error(ic, "writing data", r);
+ goto err;
+ }
+
+ cond_resched();
+ spin_lock_irq(&ic->endio_wait.lock);
+ remove_range_unlocked(ic, &range);
+#ifdef CONFIG_64BIT
+ /* Paired with smp_load_acquire in dm_integrity_map_inline. */
+ smp_store_release(&ic->sb->recalc_sector, cpu_to_le64(range.logical_sector + range.n_sectors));
+#else
+ ic->sb->recalc_sector = cpu_to_le64(range.logical_sector + range.n_sectors);
+#endif
+ goto next_chunk;
+
+err:
+ remove_range(ic, &range);
+ goto free_ret;
+
+unlock_ret:
+ spin_unlock_irq(&ic->endio_wait.lock);
+
+ recalc_write_super(ic);
+
+free_ret:
+ kfree(recalc_buffer);
+ kfree(recalc_tags);
+}
+
static void bitmap_block_work(struct work_struct *w)
{
struct bitmap_block_status *bbs = container_of(w, struct bitmap_block_status, work);
@@ -4619,6 +4814,17 @@ static int dm_integrity_ctr(struct dm_target *ti, unsigned int argc, char **argv
r = -ENOMEM;
goto bad;
}
+ r = bioset_init(&ic->recalc_bios, 1, 0, BIOSET_NEED_BVECS);
+ if (r) {
+ ti->error = "Cannot allocate bio set";
+ goto bad;
+ }
+ r = bioset_integrity_create(&ic->recalc_bios, 1);
+ if (r) {
+ ti->error = "Cannot allocate bio integrity set";
+ r = -ENOMEM;
+ goto bad;
+ }
}
ic->metadata_wq = alloc_workqueue("dm-integrity-metadata",
@@ -4717,13 +4923,18 @@ static int dm_integrity_ctr(struct dm_target *ti, unsigned int argc, char **argv
ti->error = "Block size doesn't match the information in superblock";
goto bad;
}
- if (!le32_to_cpu(ic->sb->journal_sections) != (ic->mode == 'I')) {
- r = -EINVAL;
- if (ic->mode != 'I')
+ if (ic->mode != 'I') {
+ if (!le32_to_cpu(ic->sb->journal_sections)) {
+ r = -EINVAL;
ti->error = "Corrupted superblock, journal_sections is 0";
- else
+ goto bad;
+ }
+ } else {
+ if (le32_to_cpu(ic->sb->journal_sections)) {
+ r = -EINVAL;
ti->error = "Corrupted superblock, journal_sections is not 0";
- goto bad;
+ goto bad;
+ }
}
/* make sure that ti->max_io_len doesn't overflow */
if (!ic->meta_dev) {
@@ -4830,7 +5041,7 @@ try_smaller_buffer:
r = -ENOMEM;
goto bad;
}
- INIT_WORK(&ic->recalc_work, integrity_recalc);
+ INIT_WORK(&ic->recalc_work, ic->mode == 'I' ? integrity_recalc_inline : integrity_recalc);
} else {
if (ic->sb->flags & cpu_to_le32(SB_FLAG_RECALCULATING)) {
ti->error = "Recalculate can only be specified with internal_hash";
@@ -4847,17 +5058,15 @@ try_smaller_buffer:
goto bad;
}
- if (ic->mode != 'I') {
- ic->bufio = dm_bufio_client_create(ic->meta_dev ? ic->meta_dev->bdev : ic->dev->bdev,
- 1U << (SECTOR_SHIFT + ic->log2_buffer_sectors), 1, 0, NULL, NULL, 0);
- if (IS_ERR(ic->bufio)) {
- r = PTR_ERR(ic->bufio);
- ti->error = "Cannot initialize dm-bufio";
- ic->bufio = NULL;
- goto bad;
- }
- dm_bufio_set_sector_offset(ic->bufio, ic->start + ic->initial_sectors);
+ ic->bufio = dm_bufio_client_create(ic->meta_dev ? ic->meta_dev->bdev : ic->dev->bdev,
+ 1U << (SECTOR_SHIFT + ic->log2_buffer_sectors), 1, 0, NULL, NULL, 0);
+ if (IS_ERR(ic->bufio)) {
+ r = PTR_ERR(ic->bufio);
+ ti->error = "Cannot initialize dm-bufio";
+ ic->bufio = NULL;
+ goto bad;
}
+ dm_bufio_set_sector_offset(ic->bufio, ic->start + ic->initial_sectors);
if (ic->mode != 'R' && ic->mode != 'I') {
r = create_journal(ic, &ti->error);
@@ -4979,6 +5188,7 @@ static void dm_integrity_dtr(struct dm_target *ti)
kvfree(ic->bbs);
if (ic->bufio)
dm_bufio_client_destroy(ic->bufio);
+ bioset_exit(&ic->recalc_bios);
bioset_exit(&ic->recheck_bios);
mempool_exit(&ic->recheck_pool);
mempool_exit(&ic->journal_io_mempool);
@@ -5033,7 +5243,7 @@ static void dm_integrity_dtr(struct dm_target *ti)
static struct target_type integrity_target = {
.name = "integrity",
- .version = {1, 12, 0},
+ .version = {1, 13, 0},
.module = THIS_MODULE,
.features = DM_TARGET_SINGLETON | DM_TARGET_INTEGRITY,
.ctr = dm_integrity_ctr,
diff --git a/drivers/md/dm-raid.c b/drivers/md/dm-raid.c
index 63682d27fc8d..1e0d3b9b75d6 100644
--- a/drivers/md/dm-raid.c
+++ b/drivers/md/dm-raid.c
@@ -2519,7 +2519,7 @@ static int super_validate(struct raid_set *rs, struct md_rdev *rdev)
rdev->saved_raid_disk = rdev->raid_disk;
}
- /* Reshape support -> restore repective data offsets */
+ /* Reshape support -> restore respective data offsets */
rdev->data_offset = le64_to_cpu(sb->data_offset);
rdev->new_data_offset = le64_to_cpu(sb->new_data_offset);
diff --git a/drivers/md/dm-rq.c b/drivers/md/dm-rq.c
index f7e9a3632eb3..499f8cc8a39f 100644
--- a/drivers/md/dm-rq.c
+++ b/drivers/md/dm-rq.c
@@ -496,8 +496,10 @@ static blk_status_t dm_mq_queue_rq(struct blk_mq_hw_ctx *hctx,
map = dm_get_live_table(md, &srcu_idx);
if (unlikely(!map)) {
+ DMERR_LIMIT("%s: mapping table unavailable, erroring io",
+ dm_device_name(md));
dm_put_live_table(md, srcu_idx);
- return BLK_STS_RESOURCE;
+ return BLK_STS_IOERR;
}
ti = dm_table_find_target(map, 0);
dm_put_live_table(md, srcu_idx);
diff --git a/drivers/md/dm-thin.c b/drivers/md/dm-thin.c
index a0c1620e90c8..89632ce97760 100644
--- a/drivers/md/dm-thin.c
+++ b/drivers/md/dm-thin.c
@@ -2948,7 +2948,7 @@ static struct pool *pool_create(struct mapped_device *pool_md,
pmd = dm_pool_metadata_open(metadata_dev, block_size, format_device);
if (IS_ERR(pmd)) {
*error = "Error creating metadata object";
- return (struct pool *)pmd;
+ return ERR_CAST(pmd);
}
pool = kzalloc(sizeof(*pool), GFP_KERNEL);
diff --git a/drivers/md/dm-vdo/data-vio.c b/drivers/md/dm-vdo/data-vio.c
index ab3ea8337809..0d502f6a86ad 100644
--- a/drivers/md/dm-vdo/data-vio.c
+++ b/drivers/md/dm-vdo/data-vio.c
@@ -501,6 +501,7 @@ static void launch_data_vio(struct data_vio *data_vio, logical_block_number_t lb
memset(&data_vio->record_name, 0, sizeof(data_vio->record_name));
memset(&data_vio->duplicate, 0, sizeof(data_vio->duplicate));
+ vdo_reset_completion(&data_vio->decrement_completion);
vdo_reset_completion(completion);
completion->error_handler = handle_data_vio_error;
set_data_vio_logical_callback(data_vio, attempt_logical_block_lock);
@@ -1273,12 +1274,14 @@ static void clean_hash_lock(struct vdo_completion *completion)
static void finish_cleanup(struct data_vio *data_vio)
{
struct vdo_completion *completion = &data_vio->vio.completion;
+ u32 discard_size = min_t(u32, data_vio->remaining_discard,
+ VDO_BLOCK_SIZE - data_vio->offset);
VDO_ASSERT_LOG_ONLY(data_vio->allocation.lock == NULL,
"complete data_vio has no allocation lock");
VDO_ASSERT_LOG_ONLY(data_vio->hash_lock == NULL,
"complete data_vio has no hash lock");
- if ((data_vio->remaining_discard <= VDO_BLOCK_SIZE) ||
+ if ((data_vio->remaining_discard <= discard_size) ||
(completion->result != VDO_SUCCESS)) {
struct data_vio_pool *pool = completion->vdo->data_vio_pool;
@@ -1287,12 +1290,12 @@ static void finish_cleanup(struct data_vio *data_vio)
return;
}
- data_vio->remaining_discard -= min_t(u32, data_vio->remaining_discard,
- VDO_BLOCK_SIZE - data_vio->offset);
+ data_vio->remaining_discard -= discard_size;
data_vio->is_partial = (data_vio->remaining_discard < VDO_BLOCK_SIZE);
data_vio->read = data_vio->is_partial;
data_vio->offset = 0;
completion->requeue = true;
+ data_vio->first_reference_operation_complete = false;
launch_data_vio(data_vio, data_vio->logical.lbn + 1);
}
@@ -1965,7 +1968,8 @@ static void allocate_block(struct vdo_completion *completion)
.state = VDO_MAPPING_STATE_UNCOMPRESSED,
};
- if (data_vio->fua) {
+ if (data_vio->fua ||
+ data_vio->remaining_discard > (u32) (VDO_BLOCK_SIZE - data_vio->offset)) {
prepare_for_dedupe(data_vio);
return;
}
@@ -2042,7 +2046,6 @@ void continue_data_vio_with_block_map_slot(struct vdo_completion *completion)
return;
}
-
/*
* We don't need to write any data, so skip allocation and just update the block map and
* reference counts (via the journal).
@@ -2051,7 +2054,7 @@ void continue_data_vio_with_block_map_slot(struct vdo_completion *completion)
if (data_vio->is_zero)
data_vio->new_mapped.state = VDO_MAPPING_STATE_UNCOMPRESSED;
- if (data_vio->remaining_discard > VDO_BLOCK_SIZE) {
+ if (data_vio->remaining_discard > (u32) (VDO_BLOCK_SIZE - data_vio->offset)) {
/* This is not the final block of a discard so we can't acknowledge it yet. */
update_metadata_for_data_vio_write(data_vio, NULL);
return;
diff --git a/drivers/md/dm-vdo/dedupe.c b/drivers/md/dm-vdo/dedupe.c
index 39ac68614419..80628ae93fba 100644
--- a/drivers/md/dm-vdo/dedupe.c
+++ b/drivers/md/dm-vdo/dedupe.c
@@ -729,6 +729,7 @@ static void process_update_result(struct data_vio *agent)
!change_context_state(context, DEDUPE_CONTEXT_COMPLETE, DEDUPE_CONTEXT_IDLE))
return;
+ agent->dedupe_context = NULL;
release_context(context);
}
@@ -1648,6 +1649,7 @@ static void process_query_result(struct data_vio *agent)
if (change_context_state(context, DEDUPE_CONTEXT_COMPLETE, DEDUPE_CONTEXT_IDLE)) {
agent->is_duplicate = decode_uds_advice(context);
+ agent->dedupe_context = NULL;
release_context(context);
}
}
@@ -2321,6 +2323,7 @@ static void timeout_index_operations_callback(struct vdo_completion *completion)
* send its requestor on its way.
*/
list_del_init(&context->list_entry);
+ context->requestor->dedupe_context = NULL;
continue_data_vio(context->requestor);
timed_out++;
}
diff --git a/drivers/md/dm-vdo/dm-vdo-target.c b/drivers/md/dm-vdo/dm-vdo-target.c
index dd05691e4097..0e04c2021682 100644
--- a/drivers/md/dm-vdo/dm-vdo-target.c
+++ b/drivers/md/dm-vdo/dm-vdo-target.c
@@ -1105,6 +1105,9 @@ static int vdo_message(struct dm_target *ti, unsigned int argc, char **argv,
if ((argc == 1) && (strcasecmp(argv[0], "stats") == 0)) {
vdo_write_stats(vdo, result_buffer, maxlen);
result = 1;
+ } else if ((argc == 1) && (strcasecmp(argv[0], "config") == 0)) {
+ vdo_write_config(vdo, &result_buffer, &maxlen);
+ result = 1;
} else {
result = vdo_status_to_errno(process_vdo_message(vdo, argc, argv));
}
@@ -2293,6 +2296,14 @@ static void handle_load_error(struct vdo_completion *completion)
return;
}
+ if ((completion->result == VDO_UNSUPPORTED_VERSION) &&
+ (vdo->admin.phase == LOAD_PHASE_MAKE_DIRTY)) {
+ vdo_log_error("Aborting load due to unsupported version");
+ vdo->admin.phase = LOAD_PHASE_FINISHED;
+ load_callback(completion);
+ return;
+ }
+
vdo_log_error_strerror(completion->result,
"Entering read-only mode due to load error");
vdo->admin.phase = LOAD_PHASE_WAIT_FOR_READ_ONLY;
@@ -2737,6 +2748,19 @@ static int vdo_preresume_registered(struct dm_target *ti, struct vdo *vdo)
vdo_log_info("starting device '%s'", device_name);
result = perform_admin_operation(vdo, LOAD_PHASE_START, load_callback,
handle_load_error, "load");
+ if (result == VDO_UNSUPPORTED_VERSION) {
+ /*
+ * A component version is not supported. This can happen when the
+ * recovery journal metadata is in an old version format. Abort the
+ * load without saving the state.
+ */
+ vdo->suspend_type = VDO_ADMIN_STATE_SUSPENDING;
+ perform_admin_operation(vdo, SUSPEND_PHASE_START,
+ suspend_callback, suspend_callback,
+ "suspend");
+ return result;
+ }
+
if ((result != VDO_SUCCESS) && (result != VDO_READ_ONLY)) {
/*
* Something has gone very wrong. Make sure everything has drained and
@@ -2808,7 +2832,8 @@ static int vdo_preresume(struct dm_target *ti)
vdo_register_thread_device_id(&instance_thread, &vdo->instance);
result = vdo_preresume_registered(ti, vdo);
- if ((result == VDO_PARAMETER_MISMATCH) || (result == VDO_INVALID_ADMIN_STATE))
+ if ((result == VDO_PARAMETER_MISMATCH) || (result == VDO_INVALID_ADMIN_STATE) ||
+ (result == VDO_UNSUPPORTED_VERSION))
result = -EINVAL;
vdo_unregister_thread_device_id();
return vdo_status_to_errno(result);
@@ -2832,7 +2857,7 @@ static void vdo_resume(struct dm_target *ti)
static struct target_type vdo_target_bio = {
.features = DM_TARGET_SINGLETON,
.name = "vdo",
- .version = { 9, 0, 0 },
+ .version = { 9, 1, 0 },
.module = THIS_MODULE,
.ctr = vdo_ctr,
.dtr = vdo_dtr,
diff --git a/drivers/md/dm-vdo/indexer/chapter-index.c b/drivers/md/dm-vdo/indexer/chapter-index.c
index 7e32a25d3f2f..fb1db41c794b 100644
--- a/drivers/md/dm-vdo/indexer/chapter-index.c
+++ b/drivers/md/dm-vdo/indexer/chapter-index.c
@@ -177,7 +177,7 @@ int uds_pack_open_chapter_index_page(struct open_chapter_index *chapter_index,
if (list_number < 0)
return UDS_OVERFLOW;
- next_list = first_list + list_number--,
+ next_list = first_list + list_number--;
result = uds_start_delta_index_search(delta_index, next_list, 0,
&entry);
if (result != UDS_SUCCESS)
diff --git a/drivers/md/dm-vdo/io-submitter.c b/drivers/md/dm-vdo/io-submitter.c
index 9a3716bb3c05..ab62abe18827 100644
--- a/drivers/md/dm-vdo/io-submitter.c
+++ b/drivers/md/dm-vdo/io-submitter.c
@@ -346,7 +346,6 @@ void __submit_metadata_vio(struct vio *vio, physical_block_number_t physical,
VDO_ASSERT_LOG_ONLY(!code->quiescent, "I/O not allowed in state %s", code->name);
- VDO_ASSERT_LOG_ONLY(vio->bio->bi_next == NULL, "metadata bio has no next bio");
vdo_reset_completion(completion);
completion->error_handler = error_handler;
diff --git a/drivers/md/dm-vdo/message-stats.c b/drivers/md/dm-vdo/message-stats.c
index 2802cf92922b..75dfcd7c5f63 100644
--- a/drivers/md/dm-vdo/message-stats.c
+++ b/drivers/md/dm-vdo/message-stats.c
@@ -4,6 +4,7 @@
*/
#include "dedupe.h"
+#include "indexer.h"
#include "logger.h"
#include "memory-alloc.h"
#include "message-stats.h"
@@ -430,3 +431,50 @@ int vdo_write_stats(struct vdo *vdo, char *buf, unsigned int maxlen)
vdo_free(stats);
return VDO_SUCCESS;
}
+
+static void write_index_memory(u32 mem, char **buf, unsigned int *maxlen)
+{
+ char *prefix = "memorySize : ";
+
+ /* Convert index memory to fractional value */
+ if (mem == (u32)UDS_MEMORY_CONFIG_256MB)
+ write_string(prefix, "0.25, ", NULL, buf, maxlen);
+ else if (mem == (u32)UDS_MEMORY_CONFIG_512MB)
+ write_string(prefix, "0.50, ", NULL, buf, maxlen);
+ else if (mem == (u32)UDS_MEMORY_CONFIG_768MB)
+ write_string(prefix, "0.75, ", NULL, buf, maxlen);
+ else
+ write_u32(prefix, mem, ", ", buf, maxlen);
+}
+
+static void write_index_config(struct index_config *config, char **buf,
+ unsigned int *maxlen)
+{
+ write_string("index : ", "{ ", NULL, buf, maxlen);
+ /* index mem size */
+ write_index_memory(config->mem, buf, maxlen);
+ /* whether the index is sparse or not */
+ write_bool("isSparse : ", config->sparse, ", ", buf, maxlen);
+ write_string(NULL, "}", ", ", buf, maxlen);
+}
+
+int vdo_write_config(struct vdo *vdo, char **buf, unsigned int *maxlen)
+{
+ struct vdo_config *config = &vdo->states.vdo.config;
+
+ write_string(NULL, "{ ", NULL, buf, maxlen);
+ /* version */
+ write_u32("version : ", 1, ", ", buf, maxlen);
+ /* physical size */
+ write_block_count_t("physicalSize : ", config->physical_blocks * VDO_BLOCK_SIZE, ", ",
+ buf, maxlen);
+ /* logical size */
+ write_block_count_t("logicalSize : ", config->logical_blocks * VDO_BLOCK_SIZE, ", ",
+ buf, maxlen);
+ /* slab size */
+ write_block_count_t("slabSize : ", config->slab_size, ", ", buf, maxlen);
+ /* index config */
+ write_index_config(&vdo->geometry.index_config, buf, maxlen);
+ write_string(NULL, "}", NULL, buf, maxlen);
+ return VDO_SUCCESS;
+}
diff --git a/drivers/md/dm-vdo/message-stats.h b/drivers/md/dm-vdo/message-stats.h
index f7fceca9acab..f9c95eff569d 100644
--- a/drivers/md/dm-vdo/message-stats.h
+++ b/drivers/md/dm-vdo/message-stats.h
@@ -8,6 +8,7 @@
#include "types.h"
+int vdo_write_config(struct vdo *vdo, char **buf, unsigned int *maxlen);
int vdo_write_stats(struct vdo *vdo, char *buf, unsigned int maxlen);
#endif /* VDO_MESSAGE_STATS_H */
diff --git a/drivers/md/dm-vdo/repair.c b/drivers/md/dm-vdo/repair.c
index 7e0009d2f67d..ffff2c999518 100644
--- a/drivers/md/dm-vdo/repair.c
+++ b/drivers/md/dm-vdo/repair.c
@@ -1202,17 +1202,14 @@ static bool __must_check is_valid_recovery_journal_block(const struct recovery_j
* @journal: The journal to use.
* @header: The unpacked block header to check.
* @sequence: The expected sequence number.
- * @type: The expected metadata type.
*
* Return: True if the block matches.
*/
static bool __must_check is_exact_recovery_journal_block(const struct recovery_journal *journal,
const struct recovery_block_header *header,
- sequence_number_t sequence,
- enum vdo_metadata_type type)
+ sequence_number_t sequence)
{
- return ((header->metadata_type == type) &&
- (header->sequence_number == sequence) &&
+ return ((header->sequence_number == sequence) &&
(is_valid_recovery_journal_block(journal, header, true)));
}
@@ -1371,7 +1368,8 @@ static void extract_entries_from_block(struct repair_completion *repair,
get_recovery_journal_block_header(journal, repair->journal_data,
sequence);
- if (!is_exact_recovery_journal_block(journal, &header, sequence, format)) {
+ if (!is_exact_recovery_journal_block(journal, &header, sequence) ||
+ (header.metadata_type != format)) {
/* This block is invalid, so skip it. */
return;
}
@@ -1557,10 +1555,13 @@ static int parse_journal_for_recovery(struct repair_completion *repair)
sequence_number_t i, head;
bool found_entries = false;
struct recovery_journal *journal = repair->completion.vdo->recovery_journal;
+ struct recovery_block_header header;
+ enum vdo_metadata_type expected_format;
head = min(repair->block_map_head, repair->slab_journal_head);
+ header = get_recovery_journal_block_header(journal, repair->journal_data, head);
+ expected_format = header.metadata_type;
for (i = head; i <= repair->highest_tail; i++) {
- struct recovery_block_header header;
journal_entry_count_t block_entries;
u8 j;
@@ -1572,19 +1573,15 @@ static int parse_journal_for_recovery(struct repair_completion *repair)
};
header = get_recovery_journal_block_header(journal, repair->journal_data, i);
- if (header.metadata_type == VDO_METADATA_RECOVERY_JOURNAL) {
- /* This is an old format block, so we need to upgrade */
- vdo_log_error_strerror(VDO_UNSUPPORTED_VERSION,
- "Recovery journal is in the old format, a read-only rebuild is required.");
- vdo_enter_read_only_mode(repair->completion.vdo,
- VDO_UNSUPPORTED_VERSION);
- return VDO_UNSUPPORTED_VERSION;
- }
-
- if (!is_exact_recovery_journal_block(journal, &header, i,
- VDO_METADATA_RECOVERY_JOURNAL_2)) {
+ if (!is_exact_recovery_journal_block(journal, &header, i)) {
/* A bad block header was found so this must be the end of the journal. */
break;
+ } else if (header.metadata_type != expected_format) {
+ /* There is a mix of old and new format blocks, so we need to rebuild. */
+ vdo_log_error_strerror(VDO_CORRUPT_JOURNAL,
+ "Recovery journal is in an invalid format, a read-only rebuild is required.");
+ vdo_enter_read_only_mode(repair->completion.vdo, VDO_CORRUPT_JOURNAL);
+ return VDO_CORRUPT_JOURNAL;
}
block_entries = header.entry_count;
@@ -1620,8 +1617,14 @@ static int parse_journal_for_recovery(struct repair_completion *repair)
break;
}
- if (!found_entries)
+ if (!found_entries) {
return validate_heads(repair);
+ } else if (expected_format == VDO_METADATA_RECOVERY_JOURNAL) {
+ /* All journal blocks have the old format, so we need to upgrade. */
+ vdo_log_error_strerror(VDO_UNSUPPORTED_VERSION,
+ "Recovery journal is in the old format. Downgrade and complete recovery, then upgrade with a clean volume");
+ return VDO_UNSUPPORTED_VERSION;
+ }
/* Set the tail to the last valid tail block, if there is one. */
if (repair->tail_recovery_point.sector_count == 0)
diff --git a/drivers/md/dm-vdo/status-codes.c b/drivers/md/dm-vdo/status-codes.c
index d3493450b169..dd252d660b6d 100644
--- a/drivers/md/dm-vdo/status-codes.c
+++ b/drivers/md/dm-vdo/status-codes.c
@@ -28,7 +28,7 @@ const struct error_info vdo_status_list[] = {
{ "VDO_LOCK_ERROR", "A lock is held incorrectly" },
{ "VDO_READ_ONLY", "The device is in read-only mode" },
{ "VDO_SHUTTING_DOWN", "The device is shutting down" },
- { "VDO_CORRUPT_JOURNAL", "Recovery journal entries corrupted" },
+ { "VDO_CORRUPT_JOURNAL", "Recovery journal corrupted" },
{ "VDO_TOO_MANY_SLABS", "Exceeds maximum number of slabs supported" },
{ "VDO_INVALID_FRAGMENT", "Compressed block fragment is invalid" },
{ "VDO_RETRY_AFTER_REBUILD", "Retry operation after rebuilding finishes" },
diff --git a/drivers/md/dm-vdo/status-codes.h b/drivers/md/dm-vdo/status-codes.h
index 72da04159f88..426dc8e2ca5d 100644
--- a/drivers/md/dm-vdo/status-codes.h
+++ b/drivers/md/dm-vdo/status-codes.h
@@ -52,7 +52,7 @@ enum vdo_status_codes {
VDO_READ_ONLY,
/* the VDO is shutting down */
VDO_SHUTTING_DOWN,
- /* the recovery journal has corrupt entries */
+ /* the recovery journal has corrupt entries or corrupt metadata */
VDO_CORRUPT_JOURNAL,
/* exceeds maximum number of slabs supported */
VDO_TOO_MANY_SLABS,
diff --git a/drivers/md/dm-verity-target.c b/drivers/md/dm-verity-target.c
index 24ba9a10444c..7d4d90b4395a 100644
--- a/drivers/md/dm-verity-target.c
+++ b/drivers/md/dm-verity-target.c
@@ -36,11 +36,13 @@
#define DM_VERITY_OPT_LOGGING "ignore_corruption"
#define DM_VERITY_OPT_RESTART "restart_on_corruption"
#define DM_VERITY_OPT_PANIC "panic_on_corruption"
+#define DM_VERITY_OPT_ERROR_RESTART "restart_on_error"
+#define DM_VERITY_OPT_ERROR_PANIC "panic_on_error"
#define DM_VERITY_OPT_IGN_ZEROES "ignore_zero_blocks"
#define DM_VERITY_OPT_AT_MOST_ONCE "check_at_most_once"
#define DM_VERITY_OPT_TASKLET_VERIFY "try_verify_in_tasklet"
-#define DM_VERITY_OPTS_MAX (4 + DM_VERITY_OPTS_FEC + \
+#define DM_VERITY_OPTS_MAX (5 + DM_VERITY_OPTS_FEC + \
DM_VERITY_ROOT_HASH_VERIFICATION_OPTS)
static unsigned int dm_verity_prefetch_cluster = DM_VERITY_DEFAULT_PREFETCH_SIZE;
@@ -583,6 +585,11 @@ static inline bool verity_is_system_shutting_down(void)
|| system_state == SYSTEM_RESTART;
}
+static void restart_io_error(struct work_struct *w)
+{
+ kernel_restart("dm-verity device has I/O error");
+}
+
/*
* End one "io" structure with a given error.
*/
@@ -597,6 +604,23 @@ static void verity_finish_io(struct dm_verity_io *io, blk_status_t status)
if (!static_branch_unlikely(&use_bh_wq_enabled) || !io->in_bh)
verity_fec_finish_io(io);
+ if (unlikely(status != BLK_STS_OK) &&
+ unlikely(!(bio->bi_opf & REQ_RAHEAD)) &&
+ !verity_is_system_shutting_down()) {
+ if (v->error_mode == DM_VERITY_MODE_PANIC) {
+ panic("dm-verity device has I/O error");
+ }
+ if (v->error_mode == DM_VERITY_MODE_RESTART) {
+ static DECLARE_WORK(restart_work, restart_io_error);
+ queue_work(v->verify_wq, &restart_work);
+ /*
+ * We deliberately don't call bio_endio here, because
+ * the machine will be restarted anyway.
+ */
+ return;
+ }
+ }
+
bio_endio(bio);
}
@@ -805,6 +829,8 @@ static void verity_status(struct dm_target *ti, status_type_t type,
DMEMIT("%02x", v->salt[x]);
if (v->mode != DM_VERITY_MODE_EIO)
args++;
+ if (v->error_mode != DM_VERITY_MODE_EIO)
+ args++;
if (verity_fec_is_enabled(v))
args += DM_VERITY_OPTS_FEC;
if (v->zero_digest)
@@ -834,6 +860,19 @@ static void verity_status(struct dm_target *ti, status_type_t type,
BUG();
}
}
+ if (v->error_mode != DM_VERITY_MODE_EIO) {
+ DMEMIT(" ");
+ switch (v->error_mode) {
+ case DM_VERITY_MODE_RESTART:
+ DMEMIT(DM_VERITY_OPT_ERROR_RESTART);
+ break;
+ case DM_VERITY_MODE_PANIC:
+ DMEMIT(DM_VERITY_OPT_ERROR_PANIC);
+ break;
+ default:
+ BUG();
+ }
+ }
if (v->zero_digest)
DMEMIT(" " DM_VERITY_OPT_IGN_ZEROES);
if (v->validated_blocks)
@@ -886,6 +925,19 @@ static void verity_status(struct dm_target *ti, status_type_t type,
DMEMIT("invalid");
}
}
+ if (v->error_mode != DM_VERITY_MODE_EIO) {
+ DMEMIT(",verity_error_mode=");
+ switch (v->error_mode) {
+ case DM_VERITY_MODE_RESTART:
+ DMEMIT(DM_VERITY_OPT_ERROR_RESTART);
+ break;
+ case DM_VERITY_MODE_PANIC:
+ DMEMIT(DM_VERITY_OPT_ERROR_PANIC);
+ break;
+ default:
+ DMEMIT("invalid");
+ }
+ }
DMEMIT(";");
break;
}
@@ -1088,6 +1140,25 @@ static int verity_parse_verity_mode(struct dm_verity *v, const char *arg_name)
return 0;
}
+static inline bool verity_is_verity_error_mode(const char *arg_name)
+{
+ return (!strcasecmp(arg_name, DM_VERITY_OPT_ERROR_RESTART) ||
+ !strcasecmp(arg_name, DM_VERITY_OPT_ERROR_PANIC));
+}
+
+static int verity_parse_verity_error_mode(struct dm_verity *v, const char *arg_name)
+{
+ if (v->error_mode)
+ return -EINVAL;
+
+ if (!strcasecmp(arg_name, DM_VERITY_OPT_ERROR_RESTART))
+ v->error_mode = DM_VERITY_MODE_RESTART;
+ else if (!strcasecmp(arg_name, DM_VERITY_OPT_ERROR_PANIC))
+ v->error_mode = DM_VERITY_MODE_PANIC;
+
+ return 0;
+}
+
static int verity_parse_opt_args(struct dm_arg_set *as, struct dm_verity *v,
struct dm_verity_sig_opts *verify_args,
bool only_modifier_opts)
@@ -1122,6 +1193,16 @@ static int verity_parse_opt_args(struct dm_arg_set *as, struct dm_verity *v,
}
continue;
+ } else if (verity_is_verity_error_mode(arg_name)) {
+ if (only_modifier_opts)
+ continue;
+ r = verity_parse_verity_error_mode(v, arg_name);
+ if (r) {
+ ti->error = "Conflicting error handling parameters";
+ return r;
+ }
+ continue;
+
} else if (!strcasecmp(arg_name, DM_VERITY_OPT_IGN_ZEROES)) {
if (only_modifier_opts)
continue;
diff --git a/drivers/md/dm-verity-verify-sig.c b/drivers/md/dm-verity-verify-sig.c
index d351d7d39c60..a9e2c6c0a33c 100644
--- a/drivers/md/dm-verity-verify-sig.c
+++ b/drivers/md/dm-verity-verify-sig.c
@@ -127,7 +127,7 @@ int verity_verify_root_hash(const void *root_hash, size_t root_hash_len,
#endif
VERIFYING_UNSPECIFIED_SIGNATURE, NULL, NULL);
#ifdef CONFIG_DM_VERITY_VERIFY_ROOTHASH_SIG_PLATFORM_KEYRING
- if (ret == -ENOKEY)
+ if (ret == -ENOKEY || ret == -EKEYREJECTED)
ret = verify_pkcs7_signature(root_hash, root_hash_len, sig_data,
sig_len,
VERIFY_USE_PLATFORM_KEYRING,
diff --git a/drivers/md/dm-verity.h b/drivers/md/dm-verity.h
index 754e70bb5fe0..6b75159bf835 100644
--- a/drivers/md/dm-verity.h
+++ b/drivers/md/dm-verity.h
@@ -64,6 +64,7 @@ struct dm_verity {
unsigned int digest_size; /* digest size for the current hash algorithm */
unsigned int hash_reqsize; /* the size of temporary space for crypto */
enum verity_mode mode; /* mode for handling verification errors */
+ enum verity_mode error_mode;/* mode for handling I/O errors */
unsigned int corrupted_errs;/* Number of errors for corrupted blocks */
struct workqueue_struct *verify_wq;
diff --git a/drivers/md/dm.c b/drivers/md/dm.c
index 87bb90303435..ff4a6b570b76 100644
--- a/drivers/md/dm.c
+++ b/drivers/md/dm.c
@@ -2030,10 +2030,15 @@ static void dm_submit_bio(struct bio *bio)
struct dm_table *map;
map = dm_get_live_table(md, &srcu_idx);
+ if (unlikely(!map)) {
+ DMERR_LIMIT("%s: mapping table unavailable, erroring io",
+ dm_device_name(md));
+ bio_io_error(bio);
+ goto out;
+ }
- /* If suspended, or map not yet available, queue this IO for later */
- if (unlikely(test_bit(DMF_BLOCK_IO_FOR_SUSPEND, &md->flags)) ||
- unlikely(!map)) {
+ /* If suspended, queue this IO for later */
+ if (unlikely(test_bit(DMF_BLOCK_IO_FOR_SUSPEND, &md->flags))) {
if (bio->bi_opf & REQ_NOWAIT)
bio_wouldblock_error(bio);
else if (bio->bi_opf & REQ_RAHEAD)
diff --git a/drivers/md/dm.h b/drivers/md/dm.h
index cc466ad5cb1d..8ad782249af8 100644
--- a/drivers/md/dm.h
+++ b/drivers/md/dm.h
@@ -109,7 +109,6 @@ void dm_zone_endio(struct dm_io *io, struct bio *clone);
int dm_blk_report_zones(struct gendisk *disk, sector_t sector,
unsigned int nr_zones, report_zones_cb cb, void *data);
bool dm_is_zone_write(struct mapped_device *md, struct bio *bio);
-int dm_zone_map_bio(struct dm_target_io *io);
int dm_zone_get_reset_bitmap(struct mapped_device *md, struct dm_table *t,
sector_t sector, unsigned int nr_zones,
unsigned long *need_reset);
@@ -119,10 +118,6 @@ static inline bool dm_is_zone_write(struct mapped_device *md, struct bio *bio)
{
return false;
}
-static inline int dm_zone_map_bio(struct dm_target_io *tio)
-{
- return DM_MAPIO_KILL;
-}
#endif
/*
diff --git a/drivers/media/cec/core/cec-adap.c b/drivers/media/cec/core/cec-adap.c
index da09834990b8..c7d36010c890 100644
--- a/drivers/media/cec/core/cec-adap.c
+++ b/drivers/media/cec/core/cec-adap.c
@@ -673,8 +673,9 @@ void cec_transmit_done_ts(struct cec_adapter *adap, u8 status,
/* Retry this message */
data->attempts -= attempts_made;
if (msg->timeout)
- dprintk(2, "retransmit: %*ph (attempts: %d, wait for 0x%02x)\n",
- msg->len, msg->msg, data->attempts, msg->reply);
+ dprintk(2, "retransmit: %*ph (attempts: %d, wait for %*ph)\n",
+ msg->len, msg->msg, data->attempts,
+ data->match_len, data->match_reply);
else
dprintk(2, "retransmit: %*ph (attempts: %d)\n",
msg->len, msg->msg, data->attempts);
@@ -780,6 +781,8 @@ int cec_transmit_msg_fh(struct cec_adapter *adap, struct cec_msg *msg,
{
struct cec_data *data;
bool is_raw = msg_is_raw(msg);
+ bool reply_vendor_id = (msg->flags & CEC_MSG_FL_REPLY_VENDOR_ID) &&
+ msg->len > 1 && msg->msg[1] == CEC_MSG_VENDOR_COMMAND_WITH_ID;
int err;
if (adap->devnode.unregistered)
@@ -794,12 +797,13 @@ int cec_transmit_msg_fh(struct cec_adapter *adap, struct cec_msg *msg,
msg->tx_low_drive_cnt = 0;
msg->tx_error_cnt = 0;
msg->sequence = 0;
+ msg->flags &= CEC_MSG_FL_REPLY_TO_FOLLOWERS | CEC_MSG_FL_RAW |
+ (reply_vendor_id ? CEC_MSG_FL_REPLY_VENDOR_ID : 0);
- if (msg->reply && msg->timeout == 0) {
+ if ((reply_vendor_id || msg->reply) && msg->timeout == 0) {
/* Make sure the timeout isn't 0. */
msg->timeout = 1000;
}
- msg->flags &= CEC_MSG_FL_REPLY_TO_FOLLOWERS | CEC_MSG_FL_RAW;
if (!msg->timeout)
msg->flags &= ~CEC_MSG_FL_REPLY_TO_FOLLOWERS;
@@ -809,6 +813,11 @@ int cec_transmit_msg_fh(struct cec_adapter *adap, struct cec_msg *msg,
dprintk(1, "%s: invalid length %d\n", __func__, msg->len);
return -EINVAL;
}
+ if (reply_vendor_id && msg->len < 6) {
+ dprintk(1, "%s: <Vendor Command With ID> message too short\n",
+ __func__);
+ return -EINVAL;
+ }
memset(msg->msg + msg->len, 0, sizeof(msg->msg) - msg->len);
@@ -900,8 +909,9 @@ int cec_transmit_msg_fh(struct cec_adapter *adap, struct cec_msg *msg,
__func__);
return -ENONET;
}
- if (msg->reply) {
- dprintk(1, "%s: invalid msg->reply\n", __func__);
+ if (reply_vendor_id || msg->reply) {
+ dprintk(1, "%s: adapter is unconfigured so reply is not supported\n",
+ __func__);
return -EINVAL;
}
}
@@ -923,6 +933,14 @@ int cec_transmit_msg_fh(struct cec_adapter *adap, struct cec_msg *msg,
data->fh = fh;
data->adap = adap;
data->blocking = block;
+ if (reply_vendor_id) {
+ memcpy(data->match_reply, msg->msg + 1, 4);
+ data->match_reply[4] = msg->reply;
+ data->match_len = 5;
+ } else if (msg->timeout) {
+ data->match_reply[0] = msg->reply;
+ data->match_len = 1;
+ }
init_completion(&data->c);
INIT_DELAYED_WORK(&data->work, cec_wait_timeout);
@@ -1211,13 +1229,15 @@ void cec_received_msg_ts(struct cec_adapter *adap,
if (!abort && dst->msg[1] == CEC_MSG_INITIATE_ARC &&
(cmd == CEC_MSG_REPORT_ARC_INITIATED ||
cmd == CEC_MSG_REPORT_ARC_TERMINATED) &&
- (dst->reply == CEC_MSG_REPORT_ARC_INITIATED ||
- dst->reply == CEC_MSG_REPORT_ARC_TERMINATED))
+ (data->match_reply[0] == CEC_MSG_REPORT_ARC_INITIATED ||
+ data->match_reply[0] == CEC_MSG_REPORT_ARC_TERMINATED)) {
dst->reply = cmd;
+ data->match_reply[0] = cmd;
+ }
/* Does the command match? */
if ((abort && cmd != dst->msg[1]) ||
- (!abort && cmd != dst->reply))
+ (!abort && memcmp(data->match_reply, msg->msg + 1, data->match_len)))
continue;
/* Does the addressing match? */
@@ -2318,18 +2338,21 @@ int cec_adap_status(struct seq_file *file, void *priv)
}
data = adap->transmitting;
if (data)
- seq_printf(file, "transmitting message: %*ph (reply: %02x, timeout: %ums)\n",
- data->msg.len, data->msg.msg, data->msg.reply,
+ seq_printf(file, "transmitting message: %*ph (reply: %*ph, timeout: %ums)\n",
+ data->msg.len, data->msg.msg,
+ data->match_len, data->match_reply,
data->msg.timeout);
seq_printf(file, "pending transmits: %u\n", adap->transmit_queue_sz);
list_for_each_entry(data, &adap->transmit_queue, list) {
- seq_printf(file, "queued tx message: %*ph (reply: %02x, timeout: %ums)\n",
- data->msg.len, data->msg.msg, data->msg.reply,
+ seq_printf(file, "queued tx message: %*ph (reply: %*ph, timeout: %ums)\n",
+ data->msg.len, data->msg.msg,
+ data->match_len, data->match_reply,
data->msg.timeout);
}
list_for_each_entry(data, &adap->wait_queue, list) {
- seq_printf(file, "message waiting for reply: %*ph (reply: %02x, timeout: %ums)\n",
- data->msg.len, data->msg.msg, data->msg.reply,
+ seq_printf(file, "message waiting for reply: %*ph (reply: %*ph, timeout: %ums)\n",
+ data->msg.len, data->msg.msg,
+ data->match_len, data->match_reply,
data->msg.timeout);
}
diff --git a/drivers/media/cec/core/cec-api.c b/drivers/media/cec/core/cec-api.c
index 3ef915344304..c50299246fc4 100644
--- a/drivers/media/cec/core/cec-api.c
+++ b/drivers/media/cec/core/cec-api.c
@@ -580,7 +580,7 @@ static int cec_open(struct inode *inode, struct file *filp)
fh->mode_initiator = CEC_MODE_INITIATOR;
fh->adap = adap;
- err = cec_get_device(devnode);
+ err = cec_get_device(adap);
if (err) {
kfree(fh);
return err;
@@ -686,7 +686,7 @@ static int cec_release(struct inode *inode, struct file *filp)
mutex_unlock(&fh->lock);
kfree(fh);
- cec_put_device(devnode);
+ cec_put_device(adap);
filp->private_data = NULL;
return 0;
}
@@ -698,5 +698,4 @@ const struct file_operations cec_devnode_fops = {
.compat_ioctl = cec_ioctl,
.release = cec_release,
.poll = cec_poll,
- .llseek = no_llseek,
};
diff --git a/drivers/media/cec/core/cec-core.c b/drivers/media/cec/core/cec-core.c
index 6f940df0230c..48282d272fe6 100644
--- a/drivers/media/cec/core/cec-core.c
+++ b/drivers/media/cec/core/cec-core.c
@@ -51,35 +51,6 @@ static struct dentry *top_cec_dir;
/* dev to cec_devnode */
#define to_cec_devnode(cd) container_of(cd, struct cec_devnode, dev)
-int cec_get_device(struct cec_devnode *devnode)
-{
- /*
- * Check if the cec device is available. This needs to be done with
- * the devnode->lock held to prevent an open/unregister race:
- * without the lock, the device could be unregistered and freed between
- * the devnode->registered check and get_device() calls, leading to
- * a crash.
- */
- mutex_lock(&devnode->lock);
- /*
- * return ENODEV if the cec device has been removed
- * already or if it is not registered anymore.
- */
- if (!devnode->registered) {
- mutex_unlock(&devnode->lock);
- return -ENODEV;
- }
- /* and increase the device refcount */
- get_device(&devnode->dev);
- mutex_unlock(&devnode->lock);
- return 0;
-}
-
-void cec_put_device(struct cec_devnode *devnode)
-{
- put_device(&devnode->dev);
-}
-
/* Called when the last user of the cec device exits. */
static void cec_devnode_release(struct device *cd)
{
@@ -273,7 +244,7 @@ struct cec_adapter *cec_allocate_adapter(const struct cec_adap_ops *ops,
adap->cec_pin_is_high = true;
adap->log_addrs.cec_version = CEC_OP_CEC_VERSION_2_0;
adap->log_addrs.vendor_id = CEC_VENDOR_ID_NONE;
- adap->capabilities = caps;
+ adap->capabilities = caps | CEC_CAP_REPLY_VENDOR_ID;
if (debug_phys_addr)
adap->capabilities |= CEC_CAP_PHYS_ADDR;
adap->needs_hpd = caps & CEC_CAP_NEEDS_HPD;
diff --git a/drivers/media/cec/core/cec-priv.h b/drivers/media/cec/core/cec-priv.h
index ed1f8c67626b..ce42a37c4ac0 100644
--- a/drivers/media/cec/core/cec-priv.h
+++ b/drivers/media/cec/core/cec-priv.h
@@ -37,8 +37,6 @@ static inline bool msg_is_raw(const struct cec_msg *msg)
/* cec-core.c */
extern int cec_debug;
-int cec_get_device(struct cec_devnode *devnode);
-void cec_put_device(struct cec_devnode *devnode);
/* cec-adap.c */
int cec_monitor_all_cnt_inc(struct cec_adapter *adap);
diff --git a/drivers/media/cec/usb/Kconfig b/drivers/media/cec/usb/Kconfig
index 3f3a5c75287a..6faf4742981d 100644
--- a/drivers/media/cec/usb/Kconfig
+++ b/drivers/media/cec/usb/Kconfig
@@ -3,6 +3,7 @@
# USB drivers
if USB_SUPPORT && TTY
+source "drivers/media/cec/usb/extron-da-hd-4k-plus/Kconfig"
source "drivers/media/cec/usb/pulse8/Kconfig"
source "drivers/media/cec/usb/rainshadow/Kconfig"
endif
diff --git a/drivers/media/cec/usb/Makefile b/drivers/media/cec/usb/Makefile
index e4183d1bfa9a..c082679f5318 100644
--- a/drivers/media/cec/usb/Makefile
+++ b/drivers/media/cec/usb/Makefile
@@ -2,5 +2,6 @@
#
# Makefile for the CEC USB device drivers.
#
+obj-$(CONFIG_USB_EXTRON_DA_HD_4K_PLUS_CEC) += extron-da-hd-4k-plus/
obj-$(CONFIG_USB_PULSE8_CEC) += pulse8/
obj-$(CONFIG_USB_RAINSHADOW_CEC) += rainshadow/
diff --git a/drivers/media/cec/usb/extron-da-hd-4k-plus/Kconfig b/drivers/media/cec/usb/extron-da-hd-4k-plus/Kconfig
new file mode 100644
index 000000000000..5354f0eebe5c
--- /dev/null
+++ b/drivers/media/cec/usb/extron-da-hd-4k-plus/Kconfig
@@ -0,0 +1,14 @@
+# SPDX-License-Identifier: GPL-2.0-only
+config USB_EXTRON_DA_HD_4K_PLUS_CEC
+ tristate "Extron DA HD 4K Plus CEC driver"
+ depends on VIDEO_DEV
+ depends on USB
+ depends on USB_ACM
+ select CEC_CORE
+ select SERIO
+ select SERIO_SERPORT
+ help
+ This is a CEC driver for the Extron DA HD 4K Plus HDMI Splitter.
+
+ To compile this driver as a module, choose M here: the
+ module will be called extron-da-hd-4k-plus-cec.
diff --git a/drivers/media/cec/usb/extron-da-hd-4k-plus/Makefile b/drivers/media/cec/usb/extron-da-hd-4k-plus/Makefile
new file mode 100644
index 000000000000..2e8f7f60263f
--- /dev/null
+++ b/drivers/media/cec/usb/extron-da-hd-4k-plus/Makefile
@@ -0,0 +1,8 @@
+extron-da-hd-4k-plus-cec-objs := extron-da-hd-4k-plus.o cec-splitter.o
+obj-$(CONFIG_USB_EXTRON_DA_HD_4K_PLUS_CEC) := extron-da-hd-4k-plus-cec.o
+
+all:
+ $(MAKE) -C $(KDIR) M=$(shell pwd) modules
+
+install:
+ $(MAKE) -C $(KDIR) M=$(shell pwd) modules_install
diff --git a/drivers/media/cec/usb/extron-da-hd-4k-plus/cec-splitter.c b/drivers/media/cec/usb/extron-da-hd-4k-plus/cec-splitter.c
new file mode 100644
index 000000000000..73fdec4b791d
--- /dev/null
+++ b/drivers/media/cec/usb/extron-da-hd-4k-plus/cec-splitter.c
@@ -0,0 +1,657 @@
+// SPDX-License-Identifier: GPL-2.0-only
+
+/*
+ * Copyright 2021-2024 Cisco Systems, Inc. and/or its affiliates. All rights reserved.
+ */
+
+#include <media/cec.h>
+
+#include "cec-splitter.h"
+
+/*
+ * Helper function to reply to a received message with a Feature Abort
+ * message.
+ */
+static int cec_feature_abort_reason(struct cec_adapter *adap,
+ struct cec_msg *msg, u8 reason)
+{
+ struct cec_msg tx_msg = { };
+
+ /*
+ * Don't reply with CEC_MSG_FEATURE_ABORT to a CEC_MSG_FEATURE_ABORT
+ * message!
+ */
+ if (msg->msg[1] == CEC_MSG_FEATURE_ABORT)
+ return 0;
+ /* Don't Feature Abort messages from 'Unregistered' */
+ if (cec_msg_initiator(msg) == CEC_LOG_ADDR_UNREGISTERED)
+ return 0;
+ cec_msg_set_reply_to(&tx_msg, msg);
+ cec_msg_feature_abort(&tx_msg, msg->msg[1], reason);
+ return cec_transmit_msg(adap, &tx_msg, false);
+}
+
+/* Transmit an Active Source message from this output port to a sink */
+static void cec_port_out_active_source(struct cec_splitter_port *p)
+{
+ struct cec_adapter *adap = p->adap;
+ struct cec_msg msg;
+
+ if (!adap->is_configured)
+ return;
+ p->is_active_source = true;
+ cec_msg_init(&msg, adap->log_addrs.log_addr[0], 0);
+ cec_msg_active_source(&msg, adap->phys_addr);
+ cec_transmit_msg(adap, &msg, false);
+}
+
+/* Transmit Active Source messages from all output ports to the sinks */
+static void cec_out_active_source(struct cec_splitter *splitter)
+{
+ unsigned int i;
+
+ for (i = 0; i < splitter->num_out_ports; i++)
+ cec_port_out_active_source(splitter->ports[i]);
+}
+
+/* Transmit a Standby message from this output port to a sink */
+static void cec_port_out_standby(struct cec_splitter_port *p)
+{
+ struct cec_adapter *adap = p->adap;
+ struct cec_msg msg;
+
+ if (!adap->is_configured)
+ return;
+ cec_msg_init(&msg, adap->log_addrs.log_addr[0], 0);
+ cec_msg_standby(&msg);
+ cec_transmit_msg(adap, &msg, false);
+}
+
+/* Transmit Standby messages from all output ports to the sinks */
+static void cec_out_standby(struct cec_splitter *splitter)
+{
+ unsigned int i;
+
+ for (i = 0; i < splitter->num_out_ports; i++)
+ cec_port_out_standby(splitter->ports[i]);
+}
+
+/* Transmit an Image/Text View On message from this output port to a sink */
+static void cec_port_out_wakeup(struct cec_splitter_port *p, u8 opcode)
+{
+ struct cec_adapter *adap = p->adap;
+ u8 la = adap->log_addrs.log_addr[0];
+ struct cec_msg msg;
+
+ if (la == CEC_LOG_ADDR_INVALID)
+ la = CEC_LOG_ADDR_UNREGISTERED;
+ cec_msg_init(&msg, la, 0);
+ msg.len = 2;
+ msg.msg[1] = opcode;
+ cec_transmit_msg(adap, &msg, false);
+}
+
+/* Transmit Image/Text View On messages from all output ports to the sinks */
+static void cec_out_wakeup(struct cec_splitter *splitter, u8 opcode)
+{
+ unsigned int i;
+
+ for (i = 0; i < splitter->num_out_ports; i++)
+ cec_port_out_wakeup(splitter->ports[i], opcode);
+}
+
+/*
+ * Update the power state of the unconfigured CEC device to either
+ * Off or On depending on the current state of the splitter.
+ * This keeps the outputs in a consistent state.
+ */
+void cec_splitter_unconfigured_output(struct cec_splitter_port *p)
+{
+ p->video_latency = 1;
+ p->power_status = p->splitter->is_standby ?
+ CEC_OP_POWER_STATUS_TO_STANDBY : CEC_OP_POWER_STATUS_TO_ON;
+
+ /* The adapter was unconfigured, so clear the sequence and ts values */
+ p->out_give_device_power_status_seq = 0;
+ p->out_give_device_power_status_ts = ktime_set(0, 0);
+ p->out_request_current_latency_seq = 0;
+ p->out_request_current_latency_ts = ktime_set(0, 0);
+}
+
+/*
+ * Update the power state of the newly configured CEC device to either
+ * Off or On depending on the current state of the splitter.
+ * This keeps the outputs in a consistent state.
+ */
+void cec_splitter_configured_output(struct cec_splitter_port *p)
+{
+ p->video_latency = 1;
+ p->power_status = p->splitter->is_standby ?
+ CEC_OP_POWER_STATUS_TO_STANDBY : CEC_OP_POWER_STATUS_TO_ON;
+
+ if (p->splitter->is_standby) {
+ /*
+ * Some sinks only obey Standby if it comes from the
+ * active source.
+ */
+ cec_port_out_active_source(p);
+ cec_port_out_standby(p);
+ } else {
+ cec_port_out_wakeup(p, CEC_MSG_IMAGE_VIEW_ON);
+ }
+}
+
+/* Pass the in_msg on to all output ports */
+static void cec_out_passthrough(struct cec_splitter *splitter,
+ const struct cec_msg *in_msg)
+{
+ unsigned int i;
+
+ for (i = 0; i < splitter->num_out_ports; i++) {
+ struct cec_splitter_port *p = splitter->ports[i];
+ struct cec_adapter *adap = p->adap;
+ struct cec_msg msg;
+
+ if (!adap->is_configured)
+ continue;
+ cec_msg_init(&msg, adap->log_addrs.log_addr[0], 0);
+ msg.len = in_msg->len;
+ memcpy(msg.msg + 1, in_msg->msg + 1, msg.len - 1);
+ cec_transmit_msg(adap, &msg, false);
+ }
+}
+
+/*
+ * See if all output ports received the Report Current Latency message,
+ * and if so, transmit the result from the input port to the video source.
+ */
+static void cec_out_report_current_latency(struct cec_splitter *splitter,
+ struct cec_adapter *input_adap)
+{
+ struct cec_msg reply = {};
+ unsigned int reply_lat = 0;
+ unsigned int cnt = 0;
+ unsigned int i;
+
+ for (i = 0; i < splitter->num_out_ports; i++) {
+ struct cec_splitter_port *p = splitter->ports[i];
+ struct cec_adapter *adap = p->adap;
+
+ /* Skip unconfigured ports */
+ if (!adap->is_configured)
+ continue;
+ /* Return if a port is still waiting for a reply */
+ if (p->out_request_current_latency_seq)
+ return;
+ reply_lat += p->video_latency - 1;
+ cnt++;
+ }
+
+ /*
+ * All ports that can reply, replied, so clear the sequence
+ * and timestamp values.
+ */
+ for (i = 0; i < splitter->num_out_ports; i++) {
+ struct cec_splitter_port *p = splitter->ports[i];
+
+ p->out_request_current_latency_seq = 0;
+ p->out_request_current_latency_ts = ktime_set(0, 0);
+ }
+
+ /*
+ * Return if there were no replies or the input port is no longer
+ * configured.
+ */
+ if (!cnt || !input_adap->is_configured)
+ return;
+
+ /* Reply with the average latency */
+ reply_lat = 1 + reply_lat / cnt;
+ cec_msg_init(&reply, input_adap->log_addrs.log_addr[0],
+ splitter->request_current_latency_dest);
+ cec_msg_report_current_latency(&reply, input_adap->phys_addr,
+ reply_lat, 1, 1, 1);
+ cec_transmit_msg(input_adap, &reply, false);
+}
+
+/* Transmit Request Current Latency to all output ports */
+static int cec_out_request_current_latency(struct cec_splitter *splitter)
+{
+ ktime_t now = ktime_get();
+ bool error = true;
+ unsigned int i;
+
+ for (i = 0; i < splitter->num_out_ports; i++) {
+ struct cec_splitter_port *p = splitter->ports[i];
+ struct cec_adapter *adap = p->adap;
+
+ if (!adap->is_configured) {
+ /* Clear if not configured */
+ p->out_request_current_latency_seq = 0;
+ p->out_request_current_latency_ts = ktime_set(0, 0);
+ } else if (!p->out_request_current_latency_seq) {
+ /*
+ * Keep the old ts if an earlier request is still
+ * pending. This ensures that the request will
+ * eventually time out based on the timestamp of
+ * the first request if the sink is unresponsive.
+ */
+ p->out_request_current_latency_ts = now;
+ }
+ }
+
+ for (i = 0; i < splitter->num_out_ports; i++) {
+ struct cec_splitter_port *p = splitter->ports[i];
+ struct cec_adapter *adap = p->adap;
+ struct cec_msg msg;
+
+ if (!adap->is_configured)
+ continue;
+ cec_msg_init(&msg, adap->log_addrs.log_addr[0], 0);
+ cec_msg_request_current_latency(&msg, true, adap->phys_addr);
+ if (cec_transmit_msg(adap, &msg, false))
+ continue;
+ p->out_request_current_latency_seq = msg.sequence | (1U << 31);
+ error = false;
+ }
+ return error ? -ENODEV : 0;
+}
+
+/*
+ * See if all output ports received the Report Power Status message,
+ * and if so, transmit the result from the input port to the video source.
+ */
+static void cec_out_report_power_status(struct cec_splitter *splitter,
+ struct cec_adapter *input_adap)
+{
+ struct cec_msg reply = {};
+ /* The target power status of the splitter itself */
+ u8 splitter_pwr = splitter->is_standby ?
+ CEC_OP_POWER_STATUS_STANDBY : CEC_OP_POWER_STATUS_ON;
+ /*
+ * The transient power status of the splitter, used if not all
+ * output report the target power status.
+ */
+ u8 splitter_transient_pwr = splitter->is_standby ?
+ CEC_OP_POWER_STATUS_TO_STANDBY : CEC_OP_POWER_STATUS_TO_ON;
+ u8 reply_pwr = splitter_pwr;
+ unsigned int i;
+
+ for (i = 0; i < splitter->num_out_ports; i++) {
+ struct cec_splitter_port *p = splitter->ports[i];
+
+ /* Skip if no sink was found (HPD was low for more than 5s) */
+ if (!p->found_sink)
+ continue;
+
+ /* Return if a port is still waiting for a reply */
+ if (p->out_give_device_power_status_seq)
+ return;
+ if (p->power_status != splitter_pwr)
+ reply_pwr = splitter_transient_pwr;
+ }
+
+ /*
+ * All ports that can reply, replied, so clear the sequence
+ * and timestamp values.
+ */
+ for (i = 0; i < splitter->num_out_ports; i++) {
+ struct cec_splitter_port *p = splitter->ports[i];
+
+ p->out_give_device_power_status_seq = 0;
+ p->out_give_device_power_status_ts = ktime_set(0, 0);
+ }
+
+ /* Return if the input port is no longer configured. */
+ if (!input_adap->is_configured)
+ return;
+
+ /* Reply with the new power status */
+ cec_msg_init(&reply, input_adap->log_addrs.log_addr[0],
+ splitter->give_device_power_status_dest);
+ cec_msg_report_power_status(&reply, reply_pwr);
+ cec_transmit_msg(input_adap, &reply, false);
+}
+
+/* Transmit Give Device Power Status to all output ports */
+static int cec_out_give_device_power_status(struct cec_splitter *splitter)
+{
+ ktime_t now = ktime_get();
+ bool error = true;
+ unsigned int i;
+
+ for (i = 0; i < splitter->num_out_ports; i++) {
+ struct cec_splitter_port *p = splitter->ports[i];
+ struct cec_adapter *adap = p->adap;
+
+ /*
+ * Keep the old ts if an earlier request is still
+ * pending. This ensures that the request will
+ * eventually time out based on the timestamp of
+ * the first request if the sink is unresponsive.
+ */
+ if (adap->is_configured && !p->out_give_device_power_status_seq)
+ p->out_give_device_power_status_ts = now;
+ }
+
+ for (i = 0; i < splitter->num_out_ports; i++) {
+ struct cec_splitter_port *p = splitter->ports[i];
+ struct cec_adapter *adap = p->adap;
+ struct cec_msg msg;
+
+ if (!adap->is_configured)
+ continue;
+
+ cec_msg_init(&msg, adap->log_addrs.log_addr[0], 0);
+ cec_msg_give_device_power_status(&msg, true);
+ if (cec_transmit_msg(adap, &msg, false))
+ continue;
+ p->out_give_device_power_status_seq = msg.sequence | (1U << 31);
+ error = false;
+ }
+ return error ? -ENODEV : 0;
+}
+
+/*
+ * CEC messages received on the HDMI input of the splitter are
+ * forwarded (if relevant) to the HDMI outputs of the splitter.
+ */
+int cec_splitter_received_input(struct cec_splitter_port *p, struct cec_msg *msg)
+{
+ if (!cec_msg_status_is_ok(msg))
+ return 0;
+
+ if (msg->len < 2)
+ return -ENOMSG;
+
+ switch (msg->msg[1]) {
+ case CEC_MSG_DEVICE_VENDOR_ID:
+ case CEC_MSG_REPORT_POWER_STATUS:
+ case CEC_MSG_SET_STREAM_PATH:
+ case CEC_MSG_ROUTING_CHANGE:
+ case CEC_MSG_REQUEST_ACTIVE_SOURCE:
+ case CEC_MSG_SYSTEM_AUDIO_MODE_STATUS:
+ return 0;
+
+ case CEC_MSG_STANDBY:
+ p->splitter->is_standby = true;
+ cec_out_standby(p->splitter);
+ return 0;
+
+ case CEC_MSG_IMAGE_VIEW_ON:
+ case CEC_MSG_TEXT_VIEW_ON:
+ p->splitter->is_standby = false;
+ cec_out_wakeup(p->splitter, msg->msg[1]);
+ return 0;
+
+ case CEC_MSG_ACTIVE_SOURCE:
+ cec_out_active_source(p->splitter);
+ return 0;
+
+ case CEC_MSG_SET_SYSTEM_AUDIO_MODE:
+ cec_out_passthrough(p->splitter, msg);
+ return 0;
+
+ case CEC_MSG_GIVE_DEVICE_POWER_STATUS:
+ p->splitter->give_device_power_status_dest =
+ cec_msg_initiator(msg);
+ if (cec_out_give_device_power_status(p->splitter))
+ cec_feature_abort_reason(p->adap, msg,
+ CEC_OP_ABORT_INCORRECT_MODE);
+ return 0;
+
+ case CEC_MSG_REQUEST_CURRENT_LATENCY: {
+ u16 pa;
+
+ p->splitter->request_current_latency_dest =
+ cec_msg_initiator(msg);
+ cec_ops_request_current_latency(msg, &pa);
+ if (pa == p->adap->phys_addr &&
+ cec_out_request_current_latency(p->splitter))
+ cec_feature_abort_reason(p->adap, msg,
+ CEC_OP_ABORT_INCORRECT_MODE);
+ return 0;
+ }
+
+ default:
+ return -ENOMSG;
+ }
+ return -ENOMSG;
+}
+
+void cec_splitter_nb_transmit_canceled_output(struct cec_splitter_port *p,
+ const struct cec_msg *msg,
+ struct cec_adapter *input_adap)
+{
+ struct cec_splitter *splitter = p->splitter;
+ u32 seq = msg->sequence | (1U << 31);
+
+ /*
+ * If this is the result of a failed non-blocking transmit, or it is
+ * the result of the failed reply to a non-blocking transmit, then
+ * check if the original transmit was to get the current power status
+ * or latency and, if so, assume that the remove device is for one
+ * reason or another unavailable and assume that it is in the same
+ * power status as the splitter, or has no video latency.
+ */
+ if ((cec_msg_recv_is_tx_result(msg) && !(msg->tx_status & CEC_TX_STATUS_OK)) ||
+ (cec_msg_recv_is_rx_result(msg) && !(msg->rx_status & CEC_RX_STATUS_OK))) {
+ u8 tx_op = msg->msg[1];
+
+ if (msg->len < 2)
+ return;
+ if (cec_msg_recv_is_rx_result(msg) &&
+ (msg->rx_status & CEC_RX_STATUS_FEATURE_ABORT))
+ tx_op = msg->msg[2];
+ switch (tx_op) {
+ case CEC_MSG_GIVE_DEVICE_POWER_STATUS:
+ if (p->out_give_device_power_status_seq != seq)
+ break;
+ p->out_give_device_power_status_seq = 0;
+ p->out_give_device_power_status_ts = ktime_set(0, 0);
+ p->power_status = splitter->is_standby ?
+ CEC_OP_POWER_STATUS_STANDBY :
+ CEC_OP_POWER_STATUS_ON;
+ cec_out_report_power_status(splitter, input_adap);
+ break;
+ case CEC_MSG_REQUEST_CURRENT_LATENCY:
+ if (p->out_request_current_latency_seq != seq)
+ break;
+ p->video_latency = 1;
+ p->out_request_current_latency_seq = 0;
+ p->out_request_current_latency_ts = ktime_set(0, 0);
+ cec_out_report_current_latency(splitter, input_adap);
+ break;
+ }
+ return;
+ }
+
+ if (cec_msg_recv_is_tx_result(msg)) {
+ if (p->out_request_current_latency_seq != seq)
+ return;
+ p->out_request_current_latency_ts = ns_to_ktime(msg->tx_ts);
+ return;
+ }
+}
+
+/*
+ * CEC messages received on an HDMI output of the splitter
+ * are processed here.
+ */
+int cec_splitter_received_output(struct cec_splitter_port *p, struct cec_msg *msg,
+ struct cec_adapter *input_adap)
+{
+ struct cec_adapter *adap = p->adap;
+ struct cec_splitter *splitter = p->splitter;
+ u32 seq = msg->sequence | (1U << 31);
+ struct cec_msg reply = {};
+ u16 pa;
+
+ if (!adap->is_configured || msg->len < 2)
+ return -ENOMSG;
+
+ switch (msg->msg[1]) {
+ case CEC_MSG_REPORT_POWER_STATUS: {
+ u8 pwr;
+
+ cec_ops_report_power_status(msg, &pwr);
+ if (pwr > CEC_OP_POWER_STATUS_TO_STANDBY)
+ pwr = splitter->is_standby ?
+ CEC_OP_POWER_STATUS_TO_STANDBY :
+ CEC_OP_POWER_STATUS_TO_ON;
+ p->power_status = pwr;
+ if (p->out_give_device_power_status_seq == seq) {
+ p->out_give_device_power_status_seq = 0;
+ p->out_give_device_power_status_ts = ktime_set(0, 0);
+ }
+ cec_out_report_power_status(splitter, input_adap);
+ return 0;
+ }
+
+ case CEC_MSG_REPORT_CURRENT_LATENCY: {
+ u8 video_lat;
+ u8 low_lat_mode;
+ u8 audio_out_comp;
+ u8 audio_out_delay;
+
+ cec_ops_report_current_latency(msg, &pa,
+ &video_lat, &low_lat_mode,
+ &audio_out_comp, &audio_out_delay);
+ if (!video_lat || video_lat >= 252)
+ video_lat = 1;
+ p->video_latency = video_lat;
+ if (p->out_request_current_latency_seq == seq) {
+ p->out_request_current_latency_seq = 0;
+ p->out_request_current_latency_ts = ktime_set(0, 0);
+ }
+ cec_out_report_current_latency(splitter, input_adap);
+ return 0;
+ }
+
+ case CEC_MSG_STANDBY:
+ case CEC_MSG_ROUTING_CHANGE:
+ case CEC_MSG_GIVE_SYSTEM_AUDIO_MODE_STATUS:
+ return 0;
+
+ case CEC_MSG_ACTIVE_SOURCE:
+ cec_ops_active_source(msg, &pa);
+ if (pa == 0)
+ p->is_active_source = false;
+ return 0;
+
+ case CEC_MSG_REQUEST_ACTIVE_SOURCE:
+ if (!p->is_active_source)
+ return 0;
+ cec_msg_set_reply_to(&reply, msg);
+ cec_msg_active_source(&reply, adap->phys_addr);
+ cec_transmit_msg(adap, &reply, false);
+ return 0;
+
+ case CEC_MSG_GIVE_DEVICE_POWER_STATUS:
+ cec_msg_set_reply_to(&reply, msg);
+ cec_msg_report_power_status(&reply, splitter->is_standby ?
+ CEC_OP_POWER_STATUS_STANDBY :
+ CEC_OP_POWER_STATUS_ON);
+ cec_transmit_msg(adap, &reply, false);
+ return 0;
+
+ case CEC_MSG_SET_STREAM_PATH:
+ cec_ops_set_stream_path(msg, &pa);
+ if (pa == adap->phys_addr) {
+ cec_msg_set_reply_to(&reply, msg);
+ cec_msg_active_source(&reply, pa);
+ cec_transmit_msg(adap, &reply, false);
+ }
+ return 0;
+
+ default:
+ return -ENOMSG;
+ }
+ return -ENOMSG;
+}
+
+/*
+ * Called every second to check for timed out messages and whether there
+ * still is a video sink connected or not.
+ *
+ * Returns true if sinks were lost.
+ */
+bool cec_splitter_poll(struct cec_splitter *splitter,
+ struct cec_adapter *input_adap, bool debug)
+{
+ ktime_t now = ktime_get();
+ u8 pwr = splitter->is_standby ?
+ CEC_OP_POWER_STATUS_STANDBY : CEC_OP_POWER_STATUS_ON;
+ unsigned int max_delay_ms = input_adap->xfer_timeout_ms + 2000;
+ unsigned int i;
+ bool res = false;
+
+ for (i = 0; i < splitter->num_out_ports; i++) {
+ struct cec_splitter_port *p = splitter->ports[i];
+ s64 pwr_delta, lat_delta;
+ bool pwr_timeout, lat_timeout;
+
+ if (!p)
+ continue;
+
+ pwr_delta = ktime_ms_delta(now, p->out_give_device_power_status_ts);
+ pwr_timeout = p->out_give_device_power_status_seq &&
+ pwr_delta >= max_delay_ms;
+ lat_delta = ktime_ms_delta(now, p->out_request_current_latency_ts);
+ lat_timeout = p->out_request_current_latency_seq &&
+ lat_delta >= max_delay_ms;
+
+ /*
+ * If the HPD is low for more than 5 seconds, then assume no display
+ * is connected.
+ */
+ if (p->found_sink && ktime_to_ns(p->lost_sink_ts) &&
+ ktime_ms_delta(now, p->lost_sink_ts) > 5000) {
+ if (debug)
+ dev_info(splitter->dev,
+ "port %u: HPD low for more than 5s, assume no sink is connected.\n",
+ p->port);
+ p->found_sink = false;
+ p->lost_sink_ts = ktime_set(0, 0);
+ res = true;
+ }
+
+ /*
+ * If the power status request timed out, then set the port's
+ * power status to that of the splitter, ensuring a consistent
+ * power state.
+ */
+ if (pwr_timeout) {
+ mutex_lock(&p->adap->lock);
+ if (debug)
+ dev_info(splitter->dev,
+ "port %u: give up on power status for seq %u\n",
+ p->port,
+ p->out_give_device_power_status_seq & ~(1 << 31));
+ p->power_status = pwr;
+ p->out_give_device_power_status_seq = 0;
+ p->out_give_device_power_status_ts = ktime_set(0, 0);
+ mutex_unlock(&p->adap->lock);
+ cec_out_report_power_status(splitter, input_adap);
+ }
+
+ /*
+ * If the current latency request timed out, then set the port's
+ * latency to 1.
+ */
+ if (lat_timeout) {
+ mutex_lock(&p->adap->lock);
+ if (debug)
+ dev_info(splitter->dev,
+ "port %u: give up on latency for seq %u\n",
+ p->port,
+ p->out_request_current_latency_seq & ~(1 << 31));
+ p->video_latency = 1;
+ p->out_request_current_latency_seq = 0;
+ p->out_request_current_latency_ts = ktime_set(0, 0);
+ mutex_unlock(&p->adap->lock);
+ cec_out_report_current_latency(splitter, input_adap);
+ }
+ }
+ return res;
+}
diff --git a/drivers/media/cec/usb/extron-da-hd-4k-plus/cec-splitter.h b/drivers/media/cec/usb/extron-da-hd-4k-plus/cec-splitter.h
new file mode 100644
index 000000000000..7422f7c5719e
--- /dev/null
+++ b/drivers/media/cec/usb/extron-da-hd-4k-plus/cec-splitter.h
@@ -0,0 +1,51 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+
+/*
+ * Copyright 2021-2024 Cisco Systems, Inc. and/or its affiliates. All rights reserved.
+ */
+
+#ifndef _CEC_SPLITTER_H_
+#define _CEC_SPLITTER_H_
+
+struct cec_splitter;
+
+#define STATE_CHANGE_MAX_REPEATS 2
+
+struct cec_splitter_port {
+ struct cec_splitter *splitter;
+ struct cec_adapter *adap;
+ unsigned int port;
+ bool is_active_source;
+ bool found_sink;
+ ktime_t lost_sink_ts;
+ u32 out_request_current_latency_seq;
+ ktime_t out_request_current_latency_ts;
+ u8 video_latency;
+ u32 out_give_device_power_status_seq;
+ ktime_t out_give_device_power_status_ts;
+ u8 power_status;
+};
+
+struct cec_splitter {
+ struct device *dev;
+ unsigned int num_out_ports;
+ struct cec_splitter_port **ports;
+
+ /* High-level splitter state */
+ u8 request_current_latency_dest;
+ u8 give_device_power_status_dest;
+ bool is_standby;
+};
+
+void cec_splitter_unconfigured_output(struct cec_splitter_port *port);
+void cec_splitter_configured_output(struct cec_splitter_port *port);
+int cec_splitter_received_input(struct cec_splitter_port *port, struct cec_msg *msg);
+int cec_splitter_received_output(struct cec_splitter_port *port, struct cec_msg *msg,
+ struct cec_adapter *input_adap);
+void cec_splitter_nb_transmit_canceled_output(struct cec_splitter_port *port,
+ const struct cec_msg *msg,
+ struct cec_adapter *input_adap);
+bool cec_splitter_poll(struct cec_splitter *splitter,
+ struct cec_adapter *input_adap, bool debug);
+
+#endif
diff --git a/drivers/media/cec/usb/extron-da-hd-4k-plus/extron-da-hd-4k-plus.c b/drivers/media/cec/usb/extron-da-hd-4k-plus/extron-da-hd-4k-plus.c
new file mode 100644
index 000000000000..8526f613a40e
--- /dev/null
+++ b/drivers/media/cec/usb/extron-da-hd-4k-plus/extron-da-hd-4k-plus.c
@@ -0,0 +1,1836 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright 2021-2024 Cisco Systems, Inc. and/or its affiliates. All rights reserved.
+ */
+
+/*
+ * Currently this driver does not fully support the serial port of the
+ * Extron, only the USB port is fully supported.
+ *
+ * Issues specific to using the serial port instead of the USB since the
+ * serial port doesn't detect if the device is powered off:
+ *
+ * - Some periodic ping mechanism is needed to detect when the Extron is
+ * powered off and when it is powered on again.
+ * - What to do when it is powered off and the driver is modprobed? Keep
+ * trying to contact the Extron indefinitely?
+ */
+
+#include <linux/completion.h>
+#include <linux/ctype.h>
+#include <linux/delay.h>
+#include <linux/init.h>
+#include <linux/interrupt.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/time.h>
+
+#include "extron-da-hd-4k-plus.h"
+
+MODULE_AUTHOR("Hans Verkuil <hansverk@cisco.com>");
+MODULE_DESCRIPTION("Extron DA HD 4K PLUS HDMI CEC driver");
+MODULE_LICENSE("GPL");
+
+static int debug;
+module_param(debug, int, 0644);
+MODULE_PARM_DESC(debug, "debug level (0-1)");
+
+static unsigned int vendor_id;
+module_param(vendor_id, uint, 0444);
+MODULE_PARM_DESC(vendor_id, "CEC Vendor ID");
+
+static char manufacturer_name[4];
+module_param_string(manufacturer_name, manufacturer_name,
+ sizeof(manufacturer_name), 0644);
+MODULE_PARM_DESC(manufacturer_name,
+ "EDID Vendor String (3 uppercase characters)");
+
+static bool hpd_never_low;
+module_param(hpd_never_low, bool, 0644);
+MODULE_PARM_DESC(hpd_never_low, "Input HPD will never go low (1), or go low if all output HPDs are low (0, default)");
+
+#define EXTRON_TIMEOUT_SECS 6
+
+static const u8 hdmi_edid[256] = {
+ 0x00, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x00,
+ 0x31, 0xd8, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x01, 0x20, 0x01, 0x03, 0x80, 0x60, 0x36, 0x78,
+ 0x0f, 0xee, 0x91, 0xa3, 0x54, 0x4c, 0x99, 0x26,
+ 0x0f, 0x50, 0x54, 0x20, 0x00, 0x00, 0x01, 0x01,
+ 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01,
+ 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x02, 0x3a,
+ 0x80, 0x18, 0x71, 0x38, 0x2d, 0x40, 0x58, 0x2c,
+ 0x45, 0x00, 0xc0, 0x1c, 0x32, 0x00, 0x00, 0x1e,
+ 0x00, 0x00, 0x00, 0xfd, 0x00, 0x18, 0x55, 0x18,
+ 0x87, 0x11, 0x00, 0x0a, 0x20, 0x20, 0x20, 0x20,
+ 0x20, 0x20, 0x00, 0x00, 0x00, 0xfc, 0x00, 0x68,
+ 0x64, 0x6d, 0x69, 0x2d, 0x31, 0x30, 0x38, 0x30,
+ 0x70, 0x36, 0x30, 0x0a, 0x00, 0x00, 0x00, 0xfe,
+ 0x00, 0x73, 0x65, 0x72, 0x69, 0x6f, 0x0a, 0x20,
+ 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x01, 0x95,
+
+ 0x02, 0x03, 0x1b, 0xf1, 0x42, 0x10, 0x01, 0x23,
+ 0x09, 0x07, 0x07, 0x83, 0x01, 0x00, 0x00, 0x68,
+ 0x03, 0x0c, 0x00, 0x10, 0x00, 0x00, 0x21, 0x01,
+ 0xe2, 0x00, 0xca, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x89,
+};
+
+static const u8 hdmi_edid_4k_300[256] = {
+ 0x00, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x00,
+ 0x31, 0xd8, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x01, 0x20, 0x01, 0x03, 0x80, 0x60, 0x36, 0x78,
+ 0x0f, 0xee, 0x91, 0xa3, 0x54, 0x4c, 0x99, 0x26,
+ 0x0f, 0x50, 0x54, 0x20, 0x00, 0x00, 0x01, 0x01,
+ 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01,
+ 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x02, 0x3a,
+ 0x80, 0x18, 0x71, 0x38, 0x2d, 0x40, 0x58, 0x2c,
+ 0x45, 0x00, 0xc0, 0x1c, 0x32, 0x00, 0x00, 0x1e,
+ 0x00, 0x00, 0x00, 0xfd, 0x00, 0x18, 0x55, 0x18,
+ 0x87, 0x3c, 0x00, 0x0a, 0x20, 0x20, 0x20, 0x20,
+ 0x20, 0x20, 0x00, 0x00, 0x00, 0xfc, 0x00, 0x68,
+ 0x64, 0x6d, 0x69, 0x2d, 0x34, 0x6b, 0x2d, 0x36,
+ 0x30, 0x30, 0x0a, 0x20, 0x00, 0x00, 0x00, 0xfe,
+ 0x00, 0x73, 0x65, 0x72, 0x69, 0x6f, 0x0a, 0x20,
+ 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x01, 0x87,
+
+ 0x02, 0x03, 0x1f, 0xf1, 0x43, 0x10, 0x5f, 0x01,
+ 0x23, 0x09, 0x07, 0x07, 0x83, 0x01, 0x00, 0x00,
+ 0x6b, 0x03, 0x0c, 0x00, 0x10, 0x00, 0x00, 0x3c,
+ 0x21, 0x00, 0x20, 0x01, 0xe2, 0x00, 0xca, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xc6,
+};
+
+static const u8 hdmi_edid_4k_600[256] = {
+ 0x00, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x00,
+ 0x31, 0xd8, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x01, 0x20, 0x01, 0x03, 0x80, 0x60, 0x36, 0x78,
+ 0x0f, 0xee, 0x91, 0xa3, 0x54, 0x4c, 0x99, 0x26,
+ 0x0f, 0x50, 0x54, 0x20, 0x00, 0x00, 0x01, 0x01,
+ 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01,
+ 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x08, 0xe8,
+ 0x00, 0x30, 0xf2, 0x70, 0x5a, 0x80, 0xb0, 0x58,
+ 0x8a, 0x00, 0xc0, 0x1c, 0x32, 0x00, 0x00, 0x1e,
+ 0x00, 0x00, 0x00, 0xfd, 0x00, 0x18, 0x55, 0x18,
+ 0x87, 0x3c, 0x00, 0x0a, 0x20, 0x20, 0x20, 0x20,
+ 0x20, 0x20, 0x00, 0x00, 0x00, 0xfc, 0x00, 0x68,
+ 0x64, 0x6d, 0x69, 0x2d, 0x34, 0x6b, 0x2d, 0x36,
+ 0x30, 0x30, 0x0a, 0x20, 0x00, 0x00, 0x00, 0xfe,
+ 0x00, 0x73, 0x65, 0x72, 0x69, 0x6f, 0x0a, 0x20,
+ 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x01, 0x4c,
+
+ 0x02, 0x03, 0x28, 0xf1, 0x44, 0x61, 0x5f, 0x10,
+ 0x01, 0x23, 0x09, 0x07, 0x07, 0x83, 0x01, 0x00,
+ 0x00, 0x6b, 0x03, 0x0c, 0x00, 0x10, 0x00, 0x00,
+ 0x3c, 0x21, 0x00, 0x20, 0x01, 0x67, 0xd8, 0x5d,
+ 0xc4, 0x01, 0x78, 0x00, 0x00, 0xe2, 0x00, 0xca,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x82,
+};
+
+static int extron_send_byte(struct serio *serio, char byte)
+{
+ int err, i;
+
+ for (i = 0; i < 100; i++) {
+ err = serio_write(serio, byte);
+ if (!err)
+ break;
+ usleep_range(80, 120);
+ }
+ if (err)
+ dev_warn(&serio->dev, "unable to write byte after 100 attempts\n");
+ return err ? -EIO : 0;
+}
+
+static int extron_send_len(struct serio *serio, const char *command,
+ const unsigned char *bin, unsigned int len)
+{
+ int err = 0;
+
+ for (; !err && *command; command++)
+ err = extron_send_byte(serio, *command);
+ if (!err)
+ err = extron_send_byte(serio, '\r');
+ if (bin)
+ for (; !err && len; len--)
+ err = extron_send_byte(serio, *bin++);
+ return err;
+}
+
+static int extron_send_and_wait_len(struct extron *extron, struct extron_port *port,
+ const char *cmd, const unsigned char *bin,
+ unsigned int len, const char *response)
+{
+ int timeout = EXTRON_TIMEOUT_SECS * HZ;
+ int err;
+
+ if (debug) {
+ if (response)
+ dev_info(extron->dev, "transmit %s (response: %s)\n",
+ cmd, response);
+ else
+ dev_info(extron->dev, "transmit %s\n", cmd);
+ }
+
+ mutex_lock(&extron->serio_lock);
+ if (port) {
+ init_completion(&port->cmd_done);
+ port->cmd_error = 0;
+ port->response = response;
+ } else {
+ init_completion(&extron->cmd_done);
+ extron->cmd_error = 0;
+ extron->response = response;
+ }
+ err = extron_send_len(extron->serio, cmd, bin, len);
+
+ if (!err && response &&
+ !wait_for_completion_timeout(port ? &port->cmd_done : &extron->cmd_done, timeout)) {
+ dev_info(extron->dev, "transmit %s failed with %s (expected: %s)\n",
+ cmd, extron->reply, response);
+ err = -ETIMEDOUT;
+ }
+
+ if (!err && response && (port ? port->cmd_error : extron->cmd_error)) {
+ dev_info(extron->dev, "transmit %s failed with E%02u (expected: %s)\n",
+ cmd, port ? port->cmd_error : extron->cmd_error, response);
+ if (port)
+ port->cmd_error = 0;
+ else
+ extron->cmd_error = 0;
+ err = -EPROTO;
+ }
+ if (port)
+ port->response = NULL;
+ else
+ extron->response = NULL;
+ mutex_unlock(&extron->serio_lock);
+ return err;
+}
+
+static int extron_send_and_wait(struct extron *extron, struct extron_port *port,
+ const char *cmd, const char *response)
+{
+ return extron_send_and_wait_len(extron, port, cmd, NULL, 0, response);
+}
+
+static void extron_parse_edid(struct extron_port *port)
+{
+ const u8 *edid = port->edid;
+ unsigned int i, end;
+ u8 d;
+
+ port->has_4kp30 = false;
+ port->has_4kp60 = false;
+ port->has_qy = false;
+ port->has_qs = false;
+ /* Store Established Timings 1 and 2 */
+ port->est_i = edid[0x23];
+ port->est_ii = edid[0x24];
+
+ // Check DTDs in base block
+ for (i = 0; i < 4; i++) {
+ const u8 *dtd = edid + 0x36 + i * 18;
+ unsigned int w, h;
+ unsigned int mhz;
+ u64 pclk;
+
+ if (!dtd[0] && !dtd[1])
+ continue;
+ w = dtd[2] + ((dtd[4] & 0xf0) << 4);
+ h = dtd[5] + ((dtd[7] & 0xf0) << 4);
+ if (w != 3840 || h != 2160)
+ continue;
+
+ w += dtd[3] + ((dtd[4] & 0x0f) << 8);
+ h += dtd[6] + ((dtd[7] & 0x0f) << 8);
+ pclk = dtd[0] + (dtd[1] << 8);
+ pclk *= 100000;
+ mhz = div_u64(pclk, w * h);
+ if (mhz >= 297)
+ port->has_4kp30 = true;
+ if (mhz >= 594)
+ port->has_4kp60 = true;
+ }
+
+ if (port->edid_blocks == 1)
+ return;
+
+ edid += 128;
+
+ /* Return if not a CTA-861 extension block */
+ if (edid[0] != 0x02 || edid[1] != 0x03)
+ return;
+
+ /* search Video Data Block (tag 2) */
+ d = edid[2] & 0x7f;
+ /* Check if there are Data Blocks */
+ if (d <= 4)
+ return;
+
+ i = 4;
+ end = d;
+
+ do {
+ u8 tag = edid[i] >> 5;
+ u8 len = edid[i] & 0x1f;
+
+ /* Avoid buffer overrun in case the EDID is malformed */
+ if (i + len + 1 > 0x7f)
+ return;
+
+ switch (tag) {
+ case 2: /* Video Data Block */
+ /* Search for VIC 97 */
+ if (memchr(edid + i + 1, 97, len))
+ port->has_4kp60 = true;
+ /* Search for VIC 95 */
+ if (memchr(edid + i + 1, 95, len))
+ port->has_4kp30 = true;
+ break;
+
+ case 7: /* Use Extended Tag */
+ switch (edid[i + 1]) {
+ case 0: /* Video Capability Data Block */
+ if (edid[i + 2] & 0x80)
+ port->has_qy = true;
+ if (edid[i + 2] & 0x40)
+ port->has_qs = true;
+ break;
+ }
+ break;
+ }
+ i += len + 1;
+ } while (i < end);
+}
+
+static int get_edid_tag_location(const u8 *edid, unsigned int size,
+ u8 want_tag, u8 ext_tag)
+{
+ unsigned int offset = 128;
+ int i, end;
+ u8 d;
+
+ edid += offset;
+
+ /* Return if not a CTA-861 extension block */
+ if (size < 256 || edid[0] != 0x02 || edid[1] != 0x03)
+ return -1;
+
+ /* search tag */
+ d = edid[0x02] & 0x7f;
+ if (d <= 4)
+ return -1;
+
+ i = 0x04;
+ end = 0x00 + d;
+
+ do {
+ unsigned char tag = edid[i] >> 5;
+ unsigned char len = edid[i] & 0x1f;
+
+ if (tag != want_tag || i + len > end) {
+ i += len + 1;
+ continue;
+ }
+
+ if (tag < 7 || (len >= 1 && edid[i + 1] == ext_tag))
+ return offset + i;
+ i += len + 1;
+ } while (i < end);
+ return -1;
+}
+
+static void extron_edid_crc(u8 *edid)
+{
+ u8 sum = 0;
+ int offset;
+
+ /* Update CRC */
+ for (offset = 0; offset < 127; offset++)
+ sum += edid[offset];
+ edid[127] = 256 - sum;
+}
+
+/*
+ * Fill in EDID string. As per VESA EDID-1.3, strings are at most 13 chars
+ * long. If shorter then add a 0x0a character after the string and pad the
+ * remainder with spaces.
+ */
+static void extron_set_edid_string(u8 *start, const char *s)
+{
+ const unsigned int max_len = 13;
+ int len = strlen(s);
+
+ memset(start, ' ', max_len);
+ if (len > max_len)
+ len = max_len;
+ memcpy(start, s, len);
+ if (len < max_len)
+ start[len] = 0x0a;
+}
+
+static void extron_update_edid(struct extron_port *port, unsigned int blocks)
+{
+ int offset;
+ u8 c1, c2;
+
+ c1 = ((manufacturer_name[0] - '@') << 2) |
+ (((manufacturer_name[1] - '@') >> 3) & 0x03);
+ c2 = (((manufacturer_name[1] - '@') & 0x07) << 5) |
+ ((manufacturer_name[2] - '@') & 0x1f);
+
+ port->edid_tmp[8] = c1;
+ port->edid_tmp[9] = c2;
+
+ /* Set Established Timings, but always enable VGA */
+ port->edid_tmp[0x23] = port->est_i | 0x20;
+ port->edid_tmp[0x24] = port->est_ii;
+
+ /* Set the Monitor Name to the unit name */
+ extron_set_edid_string(port->edid_tmp + 0x5f, port->extron->unit_name);
+ /* Set the ASCII String to the CEC adapter name */
+ extron_set_edid_string(port->edid_tmp + 0x71, port->adap->name);
+
+ extron_edid_crc(port->edid_tmp);
+
+ /* Find Video Capability Data Block */
+ offset = get_edid_tag_location(port->edid_tmp, blocks * 128, 7, 0);
+ if (offset > 0) {
+ port->edid_tmp[offset + 2] &= ~0xc0;
+ if (port->has_qy)
+ port->edid_tmp[offset + 2] |= 0x80;
+ if (port->has_qs)
+ port->edid_tmp[offset + 2] |= 0x40;
+ }
+
+ extron_edid_crc(port->edid_tmp + 128);
+}
+
+static int extron_write_edid(struct extron_port *port,
+ const u8 *edid, unsigned int blocks)
+{
+ struct extron *extron = port->extron;
+ u16 phys_addr = CEC_PHYS_ADDR_INVALID;
+ int ret;
+
+ if (cec_get_edid_spa_location(edid, blocks * 128))
+ phys_addr = 0;
+
+ if (mutex_lock_interruptible(&extron->edid_lock))
+ return -EINTR;
+
+ memcpy(port->edid_tmp, edid, blocks * 128);
+
+ if (manufacturer_name[0])
+ extron_update_edid(port, blocks);
+
+ ret = extron_send_and_wait_len(port->extron, port, "W+UF256,in.bin",
+ port->edid_tmp, sizeof(port->edid_tmp),
+ "Upl");
+ if (ret)
+ goto unlock;
+ ret = extron_send_and_wait(port->extron, port, "WI1,in.binEDID",
+ "EdidI01");
+ if (ret)
+ goto unlock;
+
+ port->edid_blocks = blocks;
+ memcpy(port->edid, port->edid_tmp, blocks * 128);
+ port->read_edid = true;
+ mutex_unlock(&extron->edid_lock);
+
+ cec_s_phys_addr(port->adap, phys_addr, false);
+ return 0;
+
+unlock:
+ mutex_unlock(&extron->edid_lock);
+ return ret;
+}
+
+static void update_edid_work(struct work_struct *w)
+{
+ struct extron *extron = container_of(w, struct extron,
+ work_update_edid.work);
+ struct extron_port *in = extron->ports[extron->num_out_ports];
+ struct extron_port *p;
+ bool has_edid = false;
+ bool has_4kp30 = true;
+ bool has_4kp60 = true;
+ bool has_qy = true;
+ bool has_qs = true;
+ u8 est_i = 0xff;
+ u8 est_ii = 0xff;
+ unsigned int out;
+
+ for (out = 0; has_4kp60 && out < extron->num_out_ports; out++) {
+ p = extron->ports[out];
+ if (p->read_edid) {
+ has_4kp60 = p->has_4kp60;
+ est_i &= p->est_i;
+ est_ii &= p->est_ii;
+ has_edid = true;
+ }
+ }
+ for (out = 0; has_4kp30 && out < extron->num_out_ports; out++)
+ if (extron->ports[out]->read_edid)
+ has_4kp30 = extron->ports[out]->has_4kp30;
+
+ for (out = 0; has_qy && out < extron->num_out_ports; out++)
+ if (extron->ports[out]->read_edid)
+ has_qy = extron->ports[out]->has_qy;
+
+ for (out = 0; has_qs && out < extron->num_out_ports; out++)
+ if (extron->ports[out]->read_edid)
+ has_qs = extron->ports[out]->has_qs;
+
+ /* exit if no output port had an EDID */
+ if (!has_edid)
+ return;
+
+ /* exit if the input EDID properties remained unchanged */
+ if (has_4kp60 == in->has_4kp60 && has_4kp30 == in->has_4kp30 &&
+ has_qy == in->has_qy && has_qs == in->has_qs &&
+ est_i == in->est_i && est_ii == in->est_ii)
+ return;
+
+ in->has_4kp60 = has_4kp60;
+ in->has_4kp30 = has_4kp30;
+ in->has_qy = has_qy;
+ in->has_qs = has_qs;
+ in->est_i = est_i;
+ in->est_ii = est_ii;
+ extron_write_edid(extron->ports[extron->num_out_ports],
+ has_4kp60 ? hdmi_edid_4k_600 :
+ (has_4kp30 ? hdmi_edid_4k_300 : hdmi_edid), 2);
+}
+
+static void extron_read_edid(struct extron_port *port)
+{
+ struct extron *extron = port->extron;
+ char cmd[10], reply[10];
+ unsigned int idx;
+
+ idx = port->port.port + (port->is_input ? 0 : extron->num_in_ports);
+ snprintf(cmd, sizeof(cmd), "WR%uEDID", idx);
+ snprintf(reply, sizeof(reply), "EdidR%u", idx);
+ if (mutex_lock_interruptible(&extron->edid_lock))
+ return;
+ if (port->read_edid)
+ goto unlock;
+ extron->edid_bytes_read = 0;
+ extron->edid_port = port;
+ port->edid_blocks = 0;
+ if (!port->has_edid)
+ goto no_edid;
+
+ extron->edid_reading = true;
+
+ if (!extron_send_and_wait(extron, port, cmd, reply))
+ wait_for_completion_killable_timeout(&extron->edid_completion,
+ msecs_to_jiffies(1000));
+ if (port->edid_blocks) {
+ extron_parse_edid(port);
+ port->read_edid = true;
+ if (!port->is_input)
+ v4l2_ctrl_s_ctrl(port->ctrl_tx_edid_present, 1);
+ }
+no_edid:
+ extron->edid_reading = false;
+unlock:
+ mutex_unlock(&extron->edid_lock);
+ cancel_delayed_work_sync(&extron->work_update_edid);
+ if (manufacturer_name[0])
+ schedule_delayed_work(&extron->work_update_edid,
+ msecs_to_jiffies(1000));
+}
+
+static void extron_irq_work_handler(struct work_struct *work)
+{
+ struct extron_port *port =
+ container_of(work, struct extron_port, irq_work);
+ struct extron *extron = port->extron;
+ unsigned long flags;
+ bool update_pa;
+ u16 pa;
+ bool update_has_signal;
+ bool has_signal;
+ bool update_has_edid;
+ bool has_edid;
+ u32 status;
+
+ spin_lock_irqsave(&port->msg_lock, flags);
+ while (port->rx_msg_num) {
+ spin_unlock_irqrestore(&port->msg_lock, flags);
+ cec_received_msg(port->adap,
+ &port->rx_msg[port->rx_msg_cur_idx]);
+ spin_lock_irqsave(&port->msg_lock, flags);
+ if (port->rx_msg_num)
+ port->rx_msg_num--;
+ port->rx_msg_cur_idx =
+ (port->rx_msg_cur_idx + 1) % NUM_MSGS;
+ }
+ update_pa = port->update_phys_addr;
+ pa = port->phys_addr;
+ port->update_phys_addr = false;
+ update_has_signal = port->update_has_signal;
+ has_signal = port->has_signal;
+ port->update_has_signal = false;
+ update_has_edid = port->update_has_edid;
+ has_edid = port->has_edid;
+ port->update_has_edid = false;
+ status = port->tx_done_status;
+ port->tx_done_status = 0;
+ spin_unlock_irqrestore(&port->msg_lock, flags);
+
+ if (status)
+ cec_transmit_done(port->adap, status, 0, 0, 0, 0);
+
+ if (update_has_signal && port->is_input)
+ v4l2_ctrl_s_ctrl(port->ctrl_rx_power_present, has_signal);
+
+ if (update_has_edid && !port->is_input) {
+ v4l2_ctrl_s_ctrl(port->ctrl_tx_hotplug,
+ port->has_edid);
+ if (port->has_edid) {
+ port->port.found_sink = true;
+ port->port.lost_sink_ts = ktime_set(0, 0);
+ } else {
+ port->port.lost_sink_ts = ktime_get();
+ }
+ if (!has_edid) {
+ port->edid_blocks = 0;
+ port->read_edid = false;
+ if (extron->edid_reading && !has_edid &&
+ extron->edid_port == port)
+ extron->edid_reading = false;
+ v4l2_ctrl_s_ctrl(port->ctrl_tx_edid_present, 0);
+ } else if (!extron->edid_reading || extron->edid_port != port) {
+ extron_read_edid(port);
+ }
+ }
+ if (update_pa)
+ cec_s_phys_addr(port->adap, pa, false);
+}
+
+static void extron_process_received(struct extron_port *port, const char *data)
+{
+ struct cec_msg msg = {};
+ unsigned int len = strlen(data);
+ unsigned long irq_flags;
+ unsigned int idx;
+
+ if (!port || port->disconnected)
+ return;
+
+ if (len < 5 || (len - 2) % 3 || data[len - 2] != '*')
+ goto malformed;
+
+ while (*data != '*') {
+ int v = hex2bin(&msg.msg[msg.len], data + 1, 1);
+
+ if (*data != '%' || v)
+ goto malformed;
+ msg.len++;
+ data += 3;
+ }
+
+ spin_lock_irqsave(&port->msg_lock, irq_flags);
+ idx = (port->rx_msg_cur_idx + port->rx_msg_num) %
+ NUM_MSGS;
+ if (port->rx_msg_num == NUM_MSGS) {
+ dev_warn(port->dev,
+ "message queue is full, dropping %*ph\n",
+ msg.len, msg.msg);
+ spin_unlock_irqrestore(&port->msg_lock,
+ irq_flags);
+ return;
+ }
+ port->rx_msg_num++;
+ port->rx_msg[idx] = msg;
+ spin_unlock_irqrestore(&port->msg_lock, irq_flags);
+ if (!port->disconnected)
+ schedule_work(&port->irq_work);
+ return;
+
+malformed:
+ dev_info(port->extron->dev, "malformed msg received: '%s'\n", data);
+}
+
+static void extron_port_signal_change(struct extron_port *port, bool has_sig)
+{
+ unsigned long irq_flags;
+ bool update = false;
+
+ if (!port)
+ return;
+
+ spin_lock_irqsave(&port->msg_lock, irq_flags);
+ if (!port->update_has_signal && port->has_signal != has_sig) {
+ port->update_has_signal = true;
+ update = true;
+ }
+ port->has_signal = has_sig;
+ spin_unlock_irqrestore(&port->msg_lock, irq_flags);
+ if (update && !port->disconnected)
+ schedule_work(&port->irq_work);
+}
+
+static void extron_process_signal_change(struct extron *extron, const char *data)
+{
+ unsigned int i;
+
+ extron_port_signal_change(extron->ports[extron->num_out_ports],
+ data[0] == '1');
+ for (i = 0; i < extron->num_out_ports; i++)
+ extron_port_signal_change(extron->ports[i],
+ data[2 + 2 * i] != '0');
+}
+
+static void extron_port_edid_change(struct extron_port *port, bool has_edid)
+{
+ unsigned long irq_flags;
+ bool update = false;
+
+ if (!port)
+ return;
+
+ spin_lock_irqsave(&port->msg_lock, irq_flags);
+ if (!port->update_has_edid && port->has_edid != has_edid) {
+ port->update_has_edid = true;
+ update = true;
+ }
+ port->has_edid = has_edid;
+ spin_unlock_irqrestore(&port->msg_lock, irq_flags);
+ if (update && !port->disconnected)
+ schedule_work(&port->irq_work);
+}
+
+static void extron_process_edid_change(struct extron *extron, const char *data)
+{
+ unsigned int i;
+
+ /*
+ * Do nothing if the Extron isn't ready yet. Trying to do this
+ * while the Extron firmware is still settling will fail.
+ */
+ if (!extron->is_ready)
+ return;
+
+ for (i = 0; i < extron->num_out_ports; i++)
+ extron_port_edid_change(extron->ports[i],
+ data[2 + 2 * i] != '0');
+}
+
+static void extron_phys_addr_change(struct extron_port *port, u16 pa)
+{
+ unsigned long irq_flags;
+ bool update = false;
+
+ if (!port)
+ return;
+
+ spin_lock_irqsave(&port->msg_lock, irq_flags);
+ if (!port->update_phys_addr && port->phys_addr != pa) {
+ update = true;
+ port->update_phys_addr = true;
+ }
+ port->phys_addr = pa;
+ spin_unlock_irqrestore(&port->msg_lock, irq_flags);
+ if (update && !port->disconnected)
+ schedule_work(&port->irq_work);
+}
+
+static void extron_process_tx_done(struct extron_port *port, char status)
+{
+ unsigned long irq_flags;
+ unsigned int tx_status;
+
+ if (!port)
+ return;
+
+ switch (status) {
+ case '0':
+ tx_status = CEC_TX_STATUS_NACK | CEC_TX_STATUS_MAX_RETRIES;
+ break;
+ case '1':
+ tx_status = CEC_TX_STATUS_OK;
+ break;
+ default:
+ tx_status = CEC_TX_STATUS_ERROR;
+ break;
+ }
+ spin_lock_irqsave(&port->msg_lock, irq_flags);
+ port->tx_done_status = tx_status;
+ spin_unlock_irqrestore(&port->msg_lock, irq_flags);
+ if (!port->disconnected)
+ schedule_work(&port->irq_work);
+}
+
+static void extron_add_edid(struct extron_port *port, const char *hex)
+{
+ struct extron *extron = port ? port->extron : NULL;
+
+ if (!port || port != extron->edid_port)
+ return;
+ while (extron->edid_bytes_read < sizeof(port->edid) && *hex) {
+ int err = hex2bin(&port->edid[extron->edid_bytes_read], hex, 1);
+
+ if (err) {
+ extron->edid_reading = false;
+ complete(&extron->edid_completion);
+ break;
+ }
+ extron->edid_bytes_read++;
+ hex += 2;
+ }
+ if (extron->edid_bytes_read == 128 &&
+ port->edid[126] == 0) {
+ /* There are no extension blocks, we're done */
+ port->edid_blocks = 1;
+ extron->edid_reading = false;
+ complete(&extron->edid_completion);
+ }
+ if (extron->edid_bytes_read < sizeof(port->edid))
+ return;
+ if (!*hex)
+ port->edid_blocks = 2;
+ extron->edid_reading = false;
+ complete(&extron->edid_completion);
+}
+
+static irqreturn_t extron_interrupt(struct serio *serio, unsigned char data,
+ unsigned int flags)
+{
+ struct extron *extron = serio_get_drvdata(serio);
+ struct extron_port *port = NULL;
+ bool found_response;
+ unsigned int p;
+
+ if (data == '\r' || data == '\n') {
+ if (extron->idx == 0)
+ return IRQ_HANDLED;
+ memcpy(extron->data, extron->buf, extron->idx);
+ extron->len = extron->idx;
+ extron->data[extron->len] = 0;
+ if (debug)
+ dev_info(extron->dev, "received %s\n", extron->data);
+ extron->idx = 0;
+ if (!memcmp(extron->data, "Sig", 3) &&
+ extron->data[4] == '*') {
+ extron_process_signal_change(extron, extron->data + 3);
+ } else if (!memcmp(extron->data, "Hdcp", 4) &&
+ extron->data[5] == '*') {
+ extron_process_edid_change(extron, extron->data + 4);
+ } else if (!memcmp(extron->data, "DcecI", 5) &&
+ extron->data[5] >= '1' &&
+ extron->data[5] < '1' + extron->num_in_ports) {
+ unsigned int p = extron->data[5] - '1';
+
+ p += extron->num_out_ports;
+ extron_process_tx_done(extron->ports[p],
+ extron->data[extron->len - 1]);
+ } else if (!memcmp(extron->data, "Ceci", 4) &&
+ extron->data[4] >= '1' &&
+ extron->data[4] < '1' + extron->num_in_ports &&
+ extron->data[5] == '*') {
+ unsigned int p = extron->data[4] - '1';
+
+ p += extron->num_out_ports;
+ extron_process_received(extron->ports[p],
+ extron->data + 6);
+ } else if (!memcmp(extron->data, "DcecO", 5) &&
+ extron->data[5] >= '1' &&
+ extron->data[5] < '1' + extron->num_out_ports) {
+ unsigned int p = extron->data[5] - '1';
+
+ extron_process_tx_done(extron->ports[p],
+ extron->data[extron->len - 1]);
+ } else if (!memcmp(extron->data, "Ceco", 4) &&
+ extron->data[4] >= '1' &&
+ extron->data[4] < '1' + extron->num_out_ports &&
+ extron->data[5] == '*') {
+ unsigned int p = extron->data[4] - '1';
+
+ extron_process_received(extron->ports[p],
+ extron->data + 6);
+ } else if (!memcmp(extron->data, "Pceco", 5) &&
+ extron->data[5] >= '1' &&
+ extron->data[5] < '1' + extron->num_out_ports) {
+ unsigned int p = extron->data[5] - '1';
+ unsigned int tmp_pa[2] = { 0xff, 0xff };
+
+ if (sscanf(extron->data + 7, "%%%02x%%%02x",
+ &tmp_pa[0], &tmp_pa[1]) == 2)
+ extron_phys_addr_change(extron->ports[p],
+ tmp_pa[0] << 8 | tmp_pa[1]);
+ } else if (!memcmp(extron->data, "Pceci", 5) &&
+ extron->data[5] >= '1' &&
+ extron->data[5] < '1' + extron->num_in_ports) {
+ unsigned int p = extron->data[5] - '1';
+ unsigned int tmp_pa[2] = { 0xff, 0xff };
+
+ p += extron->num_out_ports;
+ if (sscanf(extron->data + 7, "%%%02x%%%02x",
+ &tmp_pa[0], &tmp_pa[1]) == 2)
+ extron_phys_addr_change(extron->ports[p],
+ tmp_pa[0] << 8 | tmp_pa[1]);
+ } else if (!memcmp(extron->data, "EdidR", 5) &&
+ extron->data[5] >= '1' &&
+ extron->data[5] < '1' + extron->num_ports &&
+ extron->data[6] == '*') {
+ unsigned int p = extron->data[5] - '1';
+
+ if (p)
+ p--;
+ else
+ p = extron->num_out_ports;
+ extron_add_edid(extron->ports[p], extron->data + 7);
+ } else if (extron->edid_reading && extron->len == 32 &&
+ extron->edid_port) {
+ extron_add_edid(extron->edid_port, extron->data);
+ }
+
+ found_response = false;
+ if (extron->response &&
+ !strncmp(extron->response, extron->data,
+ strlen(extron->response)))
+ found_response = true;
+
+ for (p = 0; !found_response && p < extron->num_ports; p++) {
+ port = extron->ports[p];
+ if (port && port->response &&
+ !strncmp(port->response, extron->data,
+ strlen(port->response)))
+ found_response = true;
+ }
+
+ if (!found_response && extron->response &&
+ extron->data[0] == 'E' &&
+ isdigit(extron->data[1]) &&
+ isdigit(extron->data[2]) &&
+ !extron->data[3]) {
+ extron->cmd_error = (extron->data[1] - '0') * 10 +
+ extron->data[2] - '0';
+ extron->response = NULL;
+ complete(&extron->cmd_done);
+ }
+
+ if (!found_response)
+ return IRQ_HANDLED;
+
+ memcpy(extron->reply, extron->data, extron->len);
+ extron->reply[extron->len] = 0;
+ if (!port) {
+ extron->response = NULL;
+ complete(&extron->cmd_done);
+ } else {
+ port->response = NULL;
+ complete(&port->cmd_done);
+ }
+ return IRQ_HANDLED;
+ }
+
+ if (extron->idx >= DATA_SIZE - 1) {
+ dev_info(extron->dev,
+ "throwing away %d bytes of garbage\n", extron->idx);
+ extron->idx = 0;
+ }
+ extron->buf[extron->idx++] = (char)data;
+ return IRQ_HANDLED;
+}
+
+static int extron_cec_adap_enable(struct cec_adapter *adap, bool enable)
+{
+ struct extron_port *port = cec_get_drvdata(adap);
+
+ return (port->disconnected && enable) ? -ENODEV : 0;
+}
+
+static int extron_cec_adap_log_addr(struct cec_adapter *adap, u8 log_addr)
+{
+ struct extron_port *port = cec_get_drvdata(adap);
+ char cmd[26];
+ char resp[25];
+ u8 la = log_addr == CEC_LOG_ADDR_INVALID ? 15 : log_addr;
+ int err;
+
+ if (port->disconnected)
+ return -ENODEV;
+ snprintf(cmd, sizeof(cmd), "W%c%u*%uLCEC",
+ port->direction, port->port.port, la);
+ snprintf(resp, sizeof(resp), "Lcec%c%u*%u",
+ port->direction, port->port.port, la);
+ err = extron_send_and_wait(port->extron, port, cmd, resp);
+ return log_addr != CEC_LOG_ADDR_INVALID && err ? err : 0;
+}
+
+static int extron_cec_adap_transmit(struct cec_adapter *adap, u8 attempts,
+ u32 signal_free_time, struct cec_msg *msg)
+{
+ struct extron_port *port = cec_get_drvdata(adap);
+ char buf[CEC_MAX_MSG_SIZE * 3 + 1];
+ char cmd[CEC_MAX_MSG_SIZE * 3 + 13];
+ unsigned int i;
+
+ if (port->disconnected)
+ return -ENODEV;
+ buf[0] = 0;
+ for (i = 0; i < msg->len - 1; i++)
+ sprintf(buf + i * 3, "%%%02X", msg->msg[i + 1]);
+ snprintf(cmd, sizeof(cmd), "W%c%u*%u*%u*%sDCEC",
+ port->direction, port->port.port,
+ cec_msg_initiator(msg), cec_msg_destination(msg), buf);
+ return extron_send_and_wait(port->extron, port, cmd, NULL);
+}
+
+static void extron_cec_adap_unconfigured(struct cec_adapter *adap)
+{
+ struct extron_port *port = cec_get_drvdata(adap);
+
+ if (port->disconnected)
+ return;
+ if (debug)
+ dev_info(port->extron->dev, "unconfigured port %d (%s)\n",
+ port->port.port,
+ port->extron->splitter.is_standby ? "Off" : "On");
+ if (!port->is_input)
+ cec_splitter_unconfigured_output(&port->port);
+}
+
+static void extron_cec_configured(struct cec_adapter *adap)
+{
+ struct extron_port *port = cec_get_drvdata(adap);
+
+ if (port->disconnected)
+ return;
+ if (debug)
+ dev_info(port->extron->dev, "configured port %d (%s)\n",
+ port->port.port,
+ port->extron->splitter.is_standby ? "Off" : "On");
+ if (!port->is_input)
+ cec_splitter_configured_output(&port->port);
+}
+
+static void extron_cec_adap_nb_transmit_canceled(struct cec_adapter *adap,
+ const struct cec_msg *msg)
+{
+ struct extron_port *port = cec_get_drvdata(adap);
+ struct cec_adapter *input_adap;
+
+ if (!vendor_id)
+ return;
+ if (port->disconnected || port->is_input)
+ return;
+ input_adap = port->extron->ports[port->extron->num_out_ports]->adap;
+ cec_splitter_nb_transmit_canceled_output(&port->port, msg, input_adap);
+}
+
+static int extron_received(struct cec_adapter *adap, struct cec_msg *msg)
+{
+ struct extron_port *port = cec_get_drvdata(adap);
+
+ if (!vendor_id)
+ return -ENOMSG;
+ if (port->disconnected)
+ return -ENOMSG;
+ if (port->is_input)
+ return cec_splitter_received_input(&port->port, msg);
+ return cec_splitter_received_output(&port->port, msg,
+ port->extron->ports[port->extron->num_out_ports]->adap);
+}
+
+#define log_printf(adap, file, fmt, arg...) \
+ do { \
+ if (file) \
+ seq_printf((file), fmt, ## arg); \
+ else \
+ pr_info("cec-%s: " fmt, (adap)->name, ## arg); \
+ } while (0)
+
+static const char * const pwr_state[] = {
+ "on",
+ "standby",
+ "to on",
+ "to standby",
+};
+
+static void extron_adap_status_port(struct extron_port *port, struct seq_file *file)
+{
+ struct cec_adapter *adap = port->adap;
+
+ if (port->disconnected) {
+ log_printf(adap, file,
+ "\tport %u: disconnected\n", port->port.port);
+ return;
+ }
+ if (port->is_input)
+ log_printf(adap, file,
+ "\tport %u: %s signal, %s edid, %s 4kp30, %s 4kp60, %sQS/%sQY, is %s\n",
+ port->port.port,
+ port->has_signal ? "has" : "no",
+ port->has_edid ? "has" : "no",
+ port->has_4kp30 ? "has" : "no",
+ port->has_4kp60 ? "has" : "no",
+ port->has_qs ? "" : "no ",
+ port->has_qy ? "" : "no ",
+ !port->port.adap->is_configured ? "not configured" :
+ pwr_state[port->extron->splitter.is_standby]);
+ else
+ log_printf(adap, file,
+ "\tport %u: %s sink, %s signal, %s edid, %s 4kp30, %s 4kp60, %sQS/%sQY, is %sactive source, is %s\n",
+ port->port.port,
+ port->port.found_sink ? "found" : "no",
+ port->has_signal ? "has" : "no",
+ port->has_edid ? "has" : "no",
+ port->has_4kp30 ? "has" : "no",
+ port->has_4kp60 ? "has" : "no",
+ port->has_qs ? "" : "no ",
+ port->has_qy ? "" : "no ",
+ port->port.is_active_source ? "" : "not ",
+ !port->port.adap->is_configured ? "not configured" :
+ pwr_state[port->port.power_status & 3]);
+ if (port->port.out_give_device_power_status_seq)
+ log_printf(adap, file,
+ "\tport %u: querying power status (%u, %lldms)\n",
+ port->port.port,
+ port->port.out_give_device_power_status_seq & ~(1 << 31),
+ ktime_ms_delta(ktime_get(),
+ port->port.out_give_device_power_status_ts));
+ if (port->port.out_request_current_latency_seq)
+ log_printf(adap, file,
+ "\tport %u: querying latency (%u, %lldms)\n",
+ port->port.port,
+ port->port.out_request_current_latency_seq & ~(1 << 31),
+ ktime_ms_delta(ktime_get(),
+ port->port.out_request_current_latency_ts));
+}
+
+static void extron_adap_status(struct cec_adapter *adap, struct seq_file *file)
+{
+ struct extron_port *port = cec_get_drvdata(adap);
+ struct extron *extron = port->extron;
+ unsigned int i;
+
+ log_printf(adap, file, "name: %s type: %s\n",
+ extron->unit_name, extron->unit_type);
+ log_printf(adap, file, "model: 60-160%c-01 (1 input, %u outputs)\n",
+ '6' + extron->num_out_ports / 2, extron->num_out_ports);
+ log_printf(adap, file, "firmware version: %s CEC engine version: %s\n",
+ extron->unit_fw_version, extron->unit_cec_engine_version);
+ if (extron->hpd_never_low)
+ log_printf(adap, file, "always keep input HPD high\n");
+ else
+ log_printf(adap, file,
+ "pull input HPD low if all output HPDs are low\n");
+ if (vendor_id)
+ log_printf(adap, file,
+ "splitter vendor ID: 0x%06x\n", vendor_id);
+ if (manufacturer_name[0])
+ log_printf(adap, file, "splitter manufacturer name: %s\n",
+ manufacturer_name);
+ log_printf(adap, file, "splitter power status: %s\n",
+ pwr_state[extron->splitter.is_standby]);
+ log_printf(adap, file, "%s port: %d (%s)\n",
+ port->is_input ? "input" : "output",
+ port->port.port, port->name);
+ log_printf(adap, file, "splitter input port:\n");
+ extron_adap_status_port(extron->ports[extron->num_out_ports], file);
+
+ log_printf(adap, file, "splitter output ports:\n");
+ for (i = 0; i < extron->num_out_ports; i++)
+ extron_adap_status_port(extron->ports[i], file);
+
+ if (!port->has_edid || !port->read_edid)
+ return;
+
+ for (i = 0; i < port->edid_blocks * 128; i += 16) {
+ if (i % 128 == 0)
+ log_printf(adap, file, "\n");
+ log_printf(adap, file, "EDID: %*ph\n", 16, port->edid + i);
+ }
+}
+
+static const struct cec_adap_ops extron_cec_adap_ops = {
+ .adap_enable = extron_cec_adap_enable,
+ .adap_log_addr = extron_cec_adap_log_addr,
+ .adap_transmit = extron_cec_adap_transmit,
+ .adap_nb_transmit_canceled = extron_cec_adap_nb_transmit_canceled,
+ .adap_unconfigured = extron_cec_adap_unconfigured,
+ .adap_status = extron_adap_status,
+ .configured = extron_cec_configured,
+ .received = extron_received,
+};
+
+static int extron_querycap(struct file *file, void *priv,
+ struct v4l2_capability *cap)
+{
+ struct extron_port *port = video_drvdata(file);
+
+ strscpy(cap->driver, "extron-da-hd-4k-plus-cec", sizeof(cap->driver));
+ strscpy(cap->card, cap->driver, sizeof(cap->card));
+ snprintf(cap->bus_info, sizeof(cap->bus_info), "serio:%s", port->name);
+ return 0;
+}
+
+static int extron_enum_input(struct file *file, void *priv, struct v4l2_input *inp)
+{
+ struct extron_port *port = video_drvdata(file);
+
+ if (inp->index)
+ return -EINVAL;
+ inp->type = V4L2_INPUT_TYPE_CAMERA;
+ snprintf(inp->name, sizeof(inp->name), "HDMI IN %u", port->port.port);
+ inp->status = v4l2_ctrl_g_ctrl(port->ctrl_rx_power_present) ?
+ 0 : V4L2_IN_ST_NO_SIGNAL;
+ return 0;
+}
+
+static int extron_g_input(struct file *file, void *priv, unsigned int *i)
+{
+ *i = 0;
+ return 0;
+}
+
+static int extron_s_input(struct file *file, void *priv, unsigned int i)
+{
+ return i ? -EINVAL : 0;
+}
+
+static int extron_enum_output(struct file *file, void *priv, struct v4l2_output *out)
+{
+ struct extron_port *port = video_drvdata(file);
+
+ if (out->index)
+ return -EINVAL;
+ out->type = V4L2_OUTPUT_TYPE_ANALOG;
+ snprintf(out->name, sizeof(out->name), "HDMI OUT %u", port->port.port);
+ return 0;
+}
+
+static int extron_g_output(struct file *file, void *priv, unsigned int *o)
+{
+ *o = 0;
+ return 0;
+}
+
+static int extron_s_output(struct file *file, void *priv, unsigned int o)
+{
+ return o ? -EINVAL : 0;
+}
+
+static int extron_g_edid(struct file *file, void *_fh,
+ struct v4l2_edid *edid)
+{
+ struct extron_port *port = video_drvdata(file);
+
+ memset(edid->reserved, 0, sizeof(edid->reserved));
+ if (port->disconnected)
+ return -ENODEV;
+ if (edid->pad)
+ return -EINVAL;
+ if (!port->has_edid)
+ return -ENODATA;
+ if (!port->read_edid)
+ extron_read_edid(port);
+ if (!port->read_edid)
+ return -ENODATA;
+ if (edid->start_block == 0 && edid->blocks == 0) {
+ edid->blocks = port->edid_blocks;
+ return 0;
+ }
+ if (edid->start_block >= port->edid_blocks)
+ return -EINVAL;
+ if (edid->blocks > port->edid_blocks - edid->start_block)
+ edid->blocks = port->edid_blocks - edid->start_block;
+ memcpy(edid->edid, port->edid + edid->start_block * 128, edid->blocks * 128);
+ return 0;
+}
+
+static int extron_s_edid(struct file *file, void *_fh, struct v4l2_edid *edid)
+{
+ struct extron_port *port = video_drvdata(file);
+
+ memset(edid->reserved, 0, sizeof(edid->reserved));
+ if (port->disconnected)
+ return -ENODEV;
+ if (edid->pad)
+ return -EINVAL;
+
+ /* Unfortunately it is not possible to clear the EDID */
+ if (edid->blocks == 0)
+ return -EINVAL;
+
+ if (edid->blocks > MAX_EDID_BLOCKS) {
+ edid->blocks = MAX_EDID_BLOCKS;
+ return -E2BIG;
+ }
+
+ if (cec_get_edid_spa_location(edid->edid, edid->blocks * 128))
+ v4l2_set_edid_phys_addr(edid->edid, edid->blocks * 128, 0);
+ extron_parse_edid(port);
+ return extron_write_edid(port, edid->edid, edid->blocks);
+}
+
+static int extron_log_status(struct file *file, void *priv)
+{
+ struct extron_port *port = video_drvdata(file);
+
+ extron_adap_status(port->adap, NULL);
+ return v4l2_ctrl_log_status(file, priv);
+}
+
+static const struct v4l2_ioctl_ops extron_ioctl_ops = {
+ .vidioc_querycap = extron_querycap,
+ .vidioc_enum_input = extron_enum_input,
+ .vidioc_g_input = extron_g_input,
+ .vidioc_s_input = extron_s_input,
+ .vidioc_enum_output = extron_enum_output,
+ .vidioc_g_output = extron_g_output,
+ .vidioc_s_output = extron_s_output,
+ .vidioc_g_edid = extron_g_edid,
+ .vidioc_s_edid = extron_s_edid,
+ .vidioc_log_status = extron_log_status,
+ .vidioc_subscribe_event = v4l2_ctrl_subscribe_event,
+ .vidioc_unsubscribe_event = v4l2_event_unsubscribe,
+};
+
+static const struct v4l2_file_operations extron_fops = {
+ .owner = THIS_MODULE,
+ .open = v4l2_fh_open,
+ .release = v4l2_fh_release,
+ .poll = v4l2_ctrl_poll,
+ .unlocked_ioctl = video_ioctl2,
+};
+
+static const struct video_device extron_videodev = {
+ .name = "extron-da-hd-4k-plus-cec",
+ .vfl_dir = VFL_DIR_RX,
+ .fops = &extron_fops,
+ .ioctl_ops = &extron_ioctl_ops,
+ .minor = -1,
+ .release = video_device_release_empty,
+};
+
+static void extron_disconnect(struct serio *serio)
+{
+ struct extron *extron = serio_get_drvdata(serio);
+ unsigned int p;
+
+ kthread_stop(extron->kthread_setup);
+
+ for (p = 0; p < extron->num_ports; p++) {
+ struct extron_port *port = extron->ports[p];
+
+ if (!port)
+ continue;
+ port->disconnected = true;
+ cancel_work_sync(&port->irq_work);
+ }
+ cancel_delayed_work_sync(&extron->work_update_edid);
+ for (p = 0; p < extron->num_ports; p++) {
+ struct extron_port *port = extron->ports[p];
+
+ if (!port)
+ continue;
+
+ if (port->cec_was_registered) {
+ if (cec_is_registered(port->adap))
+ cec_unregister_adapter(port->adap);
+ /*
+ * After registering the adapter, the
+ * extron_setup_thread() function took an extra
+ * reference to the device. We call the corresponding
+ * put here.
+ */
+ cec_put_device(port->adap);
+ } else {
+ cec_delete_adapter(port->adap);
+ }
+ video_unregister_device(&port->vdev);
+ }
+
+ complete(&extron->edid_completion);
+
+ for (p = 0; p < extron->num_ports; p++) {
+ struct extron_port *port = extron->ports[p];
+
+ if (!port)
+ continue;
+ v4l2_ctrl_handler_free(&port->hdl);
+ mutex_destroy(&port->video_lock);
+ kfree(port);
+ }
+ mutex_destroy(&extron->edid_lock);
+ mutex_destroy(&extron->serio_lock);
+ extron->serio = NULL;
+ serio_set_drvdata(serio, NULL);
+ serio_close(serio);
+}
+
+static int extron_setup(struct extron *extron)
+{
+ struct serio *serio = extron->serio;
+ struct extron_port *port;
+ u8 *reply = extron->reply;
+ unsigned int p;
+ unsigned int major, minor;
+ int err;
+
+ /*
+ * Attempt to disable CEC: avoid received CEC messages
+ * from interfering with the other serial port traffic.
+ */
+ extron_send_and_wait(extron, NULL, "WI1*0CCEC", NULL);
+ extron_send_and_wait(extron, NULL, "WO0*CCEC", NULL);
+
+ /* Obtain unit part number */
+ err = extron_send_and_wait(extron, NULL, "N", "Pno");
+ if (err)
+ return err;
+ dev_info(extron->dev, "Unit part number: %s\n", reply + 3);
+ if (strcmp(reply + 3, "60-1607-01") &&
+ strcmp(reply + 3, "60-1608-01") &&
+ strcmp(reply + 3, "60-1609-01")) {
+ dev_err(extron->dev, "Unsupported model\n");
+ return -ENODEV;
+ }
+ /* Up to 6 output ports and one input port */
+ extron->num_out_ports = 2 * (reply[9] - '6');
+ extron->splitter.num_out_ports = extron->num_out_ports;
+ extron->splitter.ports = extron->splitter_ports;
+ extron->splitter.dev = extron->dev;
+ extron->num_in_ports = 1;
+ extron->num_ports = extron->num_out_ports + extron->num_in_ports;
+ dev_info(extron->dev, "Unit output ports: %d\n", extron->num_out_ports);
+ dev_info(extron->dev, "Unit input ports: %d\n", extron->num_in_ports);
+
+ err = extron_send_and_wait(extron, NULL, "W CN", "Ipn ");
+ if (err)
+ return err;
+ dev_info(extron->dev, "Unit name: %s\n", reply + 4);
+ strscpy(extron->unit_name, reply + 4, sizeof(extron->unit_name));
+
+ err = extron_send_and_wait(extron, NULL, "*Q", "Bld");
+ if (err)
+ return err;
+ dev_info(extron->dev, "Unit FW Version: %s\n", reply + 3);
+ strscpy(extron->unit_fw_version, reply + 3,
+ sizeof(extron->unit_fw_version));
+ if (sscanf(reply + 3, "%u.%u.", &major, &minor) < 2 ||
+ major < 1 || minor < 2) {
+ dev_err(extron->dev,
+ "Unsupported FW version (only 1.02 or up is supported)\n");
+ return -ENODEV;
+ }
+
+ err = extron_send_and_wait(extron, NULL, "2i", "Inf02*");
+ if (err)
+ return err;
+ dev_info(extron->dev, "Unit Type: %s\n", reply + 6);
+ strscpy(extron->unit_type, reply + 6, sizeof(extron->unit_type));
+
+ err = extron_send_and_wait(extron, NULL, "39Q", "Ver39*");
+ if (err)
+ return err;
+ dev_info(extron->dev, "CEC Engine Version: %s\n", reply + 6);
+ strscpy(extron->unit_cec_engine_version, reply + 6,
+ sizeof(extron->unit_cec_engine_version));
+
+ /* Disable CEC */
+ err = extron_send_and_wait(extron, NULL, "WI1*0CCEC", "CcecI1*");
+ if (err)
+ return err;
+ err = extron_send_and_wait(extron, NULL, "WO0*CCEC", "CcecO0");
+ if (err)
+ return err;
+
+ extron->hpd_never_low = hpd_never_low;
+
+ /* Pull input port HPD low if all output ports also have a low HPD */
+ if (hpd_never_low) {
+ dev_info(extron->dev, "Always keep input HPD high\n");
+ } else {
+ dev_info(extron->dev, "Pull input HPD low if all output HPDs are low\n");
+ extron_send_and_wait(extron, NULL, "W1ihpd", "Ihpd1");
+ }
+
+ for (p = 0; p < extron->num_ports; p++) {
+ u32 caps = CEC_CAP_DEFAULTS | CEC_CAP_MONITOR_ALL;
+
+ if (vendor_id)
+ caps &= ~CEC_CAP_LOG_ADDRS;
+ port = kzalloc(sizeof(*port), GFP_KERNEL);
+ if (!port)
+ return -ENOMEM;
+
+ INIT_WORK(&port->irq_work, extron_irq_work_handler);
+ spin_lock_init(&port->msg_lock);
+ mutex_init(&port->video_lock);
+ port->extron = extron;
+ port->is_input = p >= extron->num_out_ports;
+ port->direction = port->is_input ? 'I' : 'O';
+ port->port.port = 1 + (port->is_input ? p - extron->num_out_ports : p);
+ port->port.splitter = &extron->splitter;
+ port->phys_addr = CEC_PHYS_ADDR_INVALID;
+ snprintf(port->name, sizeof(port->name), "%s-%s-%u",
+ dev_name(&serio->dev), port->is_input ? "in" : "out",
+ port->port.port);
+
+ port->dev = extron->dev;
+ port->adap = cec_allocate_adapter(&extron_cec_adap_ops, port,
+ port->name, caps, 1);
+ err = PTR_ERR_OR_ZERO(port->adap);
+ if (err < 0) {
+ kfree(port);
+ return err;
+ }
+
+ port->adap->xfer_timeout_ms = EXTRON_TIMEOUT_SECS * 1000;
+ port->port.adap = port->adap;
+ port->vdev = extron_videodev;
+ port->vdev.lock = &port->video_lock;
+ port->vdev.v4l2_dev = &extron->v4l2_dev;
+ port->vdev.ctrl_handler = &port->hdl;
+ port->vdev.device_caps = V4L2_CAP_EDID;
+ video_set_drvdata(&port->vdev, port);
+
+ v4l2_ctrl_handler_init(&port->hdl, 2);
+
+ if (port->is_input) {
+ port->vdev.vfl_dir = VFL_DIR_RX;
+ port->ctrl_rx_power_present =
+ v4l2_ctrl_new_std(&port->hdl, NULL,
+ V4L2_CID_DV_RX_POWER_PRESENT,
+ 0, 1, 0, 0);
+ port->has_edid = true;
+ } else {
+ port->vdev.vfl_dir = VFL_DIR_TX;
+ port->ctrl_tx_hotplug =
+ v4l2_ctrl_new_std(&port->hdl, NULL,
+ V4L2_CID_DV_TX_HOTPLUG,
+ 0, 1, 0, 0);
+ port->ctrl_tx_edid_present =
+ v4l2_ctrl_new_std(&port->hdl, NULL,
+ V4L2_CID_DV_TX_EDID_PRESENT,
+ 0, 1, 0, 0);
+ }
+
+ err = port->hdl.error;
+ if (err < 0) {
+ cec_delete_adapter(port->adap);
+ kfree(port);
+ return err;
+ }
+ extron->ports[p] = port;
+ extron->splitter_ports[p] = &port->port;
+ if (port->is_input && manufacturer_name[0])
+ extron_write_edid(port, hdmi_edid, 2);
+ }
+
+ /* Enable CEC (manual mode, i.e. controlled by the driver) */
+ err = extron_send_and_wait(extron, NULL, "WI1*20CCEC", "CcecI1*");
+ if (err)
+ return err;
+
+ err = extron_send_and_wait(extron, NULL, "WO20*CCEC", "CcecO20");
+ if (err)
+ return err;
+
+ /* Set logical addresses to 15 */
+ err = extron_send_and_wait(extron, NULL, "WI1*15LCEC", "LcecI1*15");
+ if (err)
+ return err;
+
+ for (p = 0; p < extron->num_out_ports; p++) {
+ char cmd[20];
+ char resp[20];
+
+ snprintf(cmd, sizeof(cmd), "WO%u*15LCEC", p + 1);
+ snprintf(resp, sizeof(resp), "LcecO%u*15", p + 1);
+ err = extron_send_and_wait(extron, extron->ports[p], cmd, resp);
+ if (err)
+ return err;
+ }
+
+ /*
+ * The Extron is now ready for operation. Specifically it is now
+ * possible to retrieve EDIDs.
+ */
+ extron->is_ready = true;
+
+ /* Query HDCP and Signal states, used to update the initial state */
+ err = extron_send_and_wait(extron, NULL, "WHDCP", "Hdcp");
+ if (err)
+ return err;
+
+ return extron_send_and_wait(extron, NULL, "WLS", "Sig");
+}
+
+static int extron_setup_thread(void *_extron)
+{
+ struct extron *extron = _extron;
+ struct extron_port *port;
+ unsigned int p;
+ bool poll_splitter = false;
+ bool was_connected = true;
+ int err;
+
+ while (1) {
+ if (kthread_should_stop())
+ return 0;
+ err = extron_send_and_wait(extron, NULL, "W3CV", "Vrb3");
+ // that should make it possible to detect a serio disconnect
+ // here by stopping the workqueue
+ if (err >= 0)
+ break;
+ was_connected = false;
+ ssleep(1);
+ }
+
+ /*
+ * If the Extron was not connected at probe() time, i.e. it just got
+ * powered up and while the serial port is working, the firmware is
+ * still booting up, then wait 10 seconds for the firmware to settle.
+ *
+ * Trying to continue too soon means that some commands will not
+ * work yet.
+ */
+ if (!was_connected)
+ ssleep(10);
+
+ err = extron_setup(extron);
+ if (err)
+ goto disable_ports;
+
+ for (p = 0; p < extron->num_ports; p++) {
+ struct cec_log_addrs log_addrs = {};
+
+ port = extron->ports[p];
+ if (port->is_input && manufacturer_name[0])
+ v4l2_disable_ioctl(&port->vdev, VIDIOC_S_EDID);
+ err = video_register_device(&port->vdev, VFL_TYPE_VIDEO, -1);
+ if (err) {
+ v4l2_err(&extron->v4l2_dev, "Failed to register video device\n");
+ goto disable_ports;
+ }
+
+ err = cec_register_adapter(port->adap, extron->dev);
+ if (err < 0)
+ goto disable_ports;
+ port->dev = &port->adap->devnode.dev;
+ port->cec_was_registered = true;
+ /*
+ * This driver is unusual in that the whole setup takes place
+ * in a thread since it can take such a long time before the
+ * Extron Splitter boots up, and you do not want to block the
+ * probe function on this driver. In addition, as soon as
+ * CEC adapters come online, they can be used, and you cannot
+ * just unregister them again if an error occurs, since that
+ * can delete the underlying CEC adapter, which might already
+ * be in use.
+ *
+ * So we take an additional reference to the adapter. This
+ * allows us to unregister the device node if needed, without
+ * deleting the actual adapter.
+ *
+ * In the disconnect function we will do the corresponding
+ * put call to ensure the adapter is deleted.
+ */
+ cec_get_device(port->adap);
+
+ /*
+ * If vendor_id wasn't set, then userspace configures the
+ * CEC devices. Otherwise the driver configures the CEC
+ * devices as TV (input) and Playback (outputs) devices
+ * and the driver processes all CEC messages.
+ */
+ if (!vendor_id)
+ continue;
+
+ log_addrs.cec_version = CEC_OP_CEC_VERSION_2_0;
+ log_addrs.num_log_addrs = 1;
+ log_addrs.vendor_id = vendor_id;
+ if (port->is_input) {
+ strscpy(log_addrs.osd_name, "Splitter In",
+ sizeof(log_addrs.osd_name));
+ log_addrs.log_addr_type[0] = CEC_LOG_ADDR_TYPE_TV;
+ log_addrs.primary_device_type[0] = CEC_OP_PRIM_DEVTYPE_TV;
+ log_addrs.all_device_types[0] = CEC_OP_ALL_DEVTYPE_TV;
+ } else {
+ snprintf(log_addrs.osd_name, sizeof(log_addrs.osd_name),
+ "Splitter Out%u", port->port.port);
+ log_addrs.log_addr_type[0] = CEC_LOG_ADDR_TYPE_PLAYBACK;
+ log_addrs.primary_device_type[0] = CEC_OP_PRIM_DEVTYPE_PLAYBACK;
+ log_addrs.all_device_types[0] = CEC_OP_ALL_DEVTYPE_PLAYBACK;
+ }
+ err = cec_s_log_addrs(port->adap, &log_addrs, false);
+ if (err < 0)
+ goto disable_ports;
+ }
+ poll_splitter = true;
+
+ port = extron->ports[extron->num_out_ports];
+ while (!kthread_should_stop()) {
+ ssleep(1);
+ if (hpd_never_low != extron->hpd_never_low) {
+ /*
+ * Keep input port HPD high at all times, or pull it low
+ * if all output ports also have a low HPD
+ */
+ if (hpd_never_low) {
+ dev_info(extron->dev, "Always keep input HPD high\n");
+ extron_send_and_wait(extron, NULL, "W0ihpd", "Ihpd0");
+ } else {
+ dev_info(extron->dev, "Pull input HPD low if all output HPDs are low\n");
+ extron_send_and_wait(extron, NULL, "W1ihpd", "Ihpd1");
+ }
+ extron->hpd_never_low = hpd_never_low;
+ }
+ if (poll_splitter &&
+ cec_splitter_poll(&extron->splitter, port->adap, debug) &&
+ manufacturer_name[0]) {
+ /*
+ * Sinks were lost, so see if the input edid needs to
+ * be updated.
+ */
+ cancel_delayed_work_sync(&extron->work_update_edid);
+ schedule_delayed_work(&extron->work_update_edid,
+ msecs_to_jiffies(1000));
+ }
+ }
+ return 0;
+
+disable_ports:
+ extron->is_ready = false;
+ for (p = 0; p < extron->num_ports; p++) {
+ struct extron_port *port = extron->ports[p];
+
+ if (!port)
+ continue;
+ port->disconnected = true;
+ cancel_work_sync(&port->irq_work);
+ video_unregister_device(&port->vdev);
+ if (port->cec_was_registered)
+ cec_unregister_adapter(port->adap);
+ }
+ cancel_delayed_work_sync(&extron->work_update_edid);
+ complete(&extron->edid_completion);
+ dev_err(extron->dev, "Setup failed with error %d\n", err);
+ while (!kthread_should_stop())
+ ssleep(1);
+ return err;
+}
+
+static int extron_connect(struct serio *serio, struct serio_driver *drv)
+{
+ struct extron *extron;
+ int err = -ENOMEM;
+
+ if (manufacturer_name[0] &&
+ (!isupper(manufacturer_name[0]) ||
+ !isupper(manufacturer_name[1]) ||
+ !isupper(manufacturer_name[2]))) {
+ dev_warn(&serio->dev, "ignoring invalid manufacturer name\n");
+ manufacturer_name[0] = 0;
+ }
+
+ extron = kzalloc(sizeof(*extron), GFP_KERNEL);
+
+ if (!extron)
+ return -ENOMEM;
+
+ extron->serio = serio;
+ extron->dev = &serio->dev;
+ mutex_init(&extron->serio_lock);
+ mutex_init(&extron->edid_lock);
+ INIT_DELAYED_WORK(&extron->work_update_edid, update_edid_work);
+
+ err = v4l2_device_register(extron->dev, &extron->v4l2_dev);
+ if (err)
+ goto free_device;
+
+ err = serio_open(serio, drv);
+ if (err)
+ goto unreg_v4l2_dev;
+
+ serio_set_drvdata(serio, extron);
+ init_completion(&extron->edid_completion);
+
+ extron->kthread_setup = kthread_run(extron_setup_thread, extron,
+ "extron-da-hd-4k-plus-cec-%s", dev_name(&serio->dev));
+ if (!IS_ERR(extron->kthread_setup))
+ return 0;
+
+ dev_err(extron->dev, "kthread_run() failed\n");
+ err = PTR_ERR(extron->kthread_setup);
+
+ extron->serio = NULL;
+ serio_set_drvdata(serio, NULL);
+ serio_close(serio);
+unreg_v4l2_dev:
+ v4l2_device_unregister(&extron->v4l2_dev);
+free_device:
+ mutex_destroy(&extron->edid_lock);
+ mutex_destroy(&extron->serio_lock);
+ kfree(extron);
+ return err;
+}
+
+static const struct serio_device_id extron_serio_ids[] = {
+ {
+ .type = SERIO_RS232,
+ .proto = SERIO_EXTRON_DA_HD_4K_PLUS,
+ .id = SERIO_ANY,
+ .extra = SERIO_ANY,
+ },
+ { 0 }
+};
+
+MODULE_DEVICE_TABLE(serio, extron_serio_ids);
+
+static struct serio_driver extron_drv = {
+ .driver = {
+ .name = "extron-da-hd-4k-plus-cec",
+ },
+ .description = "Extron DA HD 4K PLUS HDMI CEC driver",
+ .id_table = extron_serio_ids,
+ .interrupt = extron_interrupt,
+ .connect = extron_connect,
+ .disconnect = extron_disconnect,
+};
+
+module_serio_driver(extron_drv);
diff --git a/drivers/media/cec/usb/extron-da-hd-4k-plus/extron-da-hd-4k-plus.h b/drivers/media/cec/usb/extron-da-hd-4k-plus/extron-da-hd-4k-plus.h
new file mode 100644
index 000000000000..b79f1253ab5d
--- /dev/null
+++ b/drivers/media/cec/usb/extron-da-hd-4k-plus/extron-da-hd-4k-plus.h
@@ -0,0 +1,118 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+
+/*
+ * Copyright 2021-2024 Cisco Systems, Inc. and/or its affiliates. All rights reserved.
+ */
+
+#ifndef _EXTRON_DA_HD_4K_PLUS_H_
+#define _EXTRON_DA_HD_4K_PLUS_H_
+
+#include <linux/kthread.h>
+#include <linux/serio.h>
+#include <linux/workqueue.h>
+#include <media/cec.h>
+#include <media/v4l2-ctrls.h>
+#include <media/v4l2-dev.h>
+#include <media/v4l2-device.h>
+#include <media/v4l2-dv-timings.h>
+#include <media/v4l2-event.h>
+#include <media/v4l2-fh.h>
+#include <media/v4l2-ioctl.h>
+
+#include "cec-splitter.h"
+
+#define DATA_SIZE 256
+
+#define PING_PERIOD (15 * HZ)
+
+#define NUM_MSGS CEC_MAX_MSG_RX_QUEUE_SZ
+
+#define MAX_PORTS (1 + 6)
+
+#define MAX_EDID_BLOCKS 2
+
+struct extron;
+
+struct extron_port {
+ struct cec_splitter_port port;
+ struct device *dev;
+ struct cec_adapter *adap;
+ struct video_device vdev;
+ struct v4l2_ctrl_handler hdl;
+ struct v4l2_ctrl *ctrl_rx_power_present;
+ struct v4l2_ctrl *ctrl_tx_hotplug;
+ struct v4l2_ctrl *ctrl_tx_edid_present;
+ bool is_input;
+ char direction;
+ char name[26];
+ unsigned char edid[MAX_EDID_BLOCKS * 128];
+ unsigned char edid_tmp[MAX_EDID_BLOCKS * 128];
+ unsigned int edid_blocks;
+ bool read_edid;
+ struct extron *extron;
+ struct work_struct irq_work;
+ struct completion cmd_done;
+ const char *response;
+ unsigned int cmd_error;
+ struct cec_msg rx_msg[NUM_MSGS];
+ unsigned int rx_msg_cur_idx, rx_msg_num;
+ /* protect rx_msg_cur_idx and rx_msg_num */
+ spinlock_t msg_lock;
+ u32 tx_done_status;
+ bool update_phys_addr;
+ u16 phys_addr;
+ bool cec_was_registered;
+ bool disconnected;
+ bool update_has_signal;
+ bool has_signal;
+ bool update_has_edid;
+ bool has_edid;
+ bool has_4kp30;
+ bool has_4kp60;
+ bool has_qy;
+ bool has_qs;
+ u8 est_i, est_ii;
+
+ /* locks access to the video_device */
+ struct mutex video_lock;
+};
+
+struct extron {
+ struct cec_splitter splitter;
+ struct device *dev;
+ struct serio *serio;
+ /* locks access to serio */
+ struct mutex serio_lock;
+ unsigned int num_ports;
+ unsigned int num_in_ports;
+ unsigned int num_out_ports;
+ char unit_name[32];
+ char unit_type[64];
+ char unit_fw_version[32];
+ char unit_cec_engine_version[32];
+ struct extron_port *ports[MAX_PORTS];
+ struct cec_splitter_port *splitter_ports[MAX_PORTS];
+ struct v4l2_device v4l2_dev;
+ bool hpd_never_low;
+ struct task_struct *kthread_setup;
+ struct delayed_work work_update_edid;
+
+ /* serializes EDID reading */
+ struct mutex edid_lock;
+ unsigned int edid_bytes_read;
+ struct extron_port *edid_port;
+ struct completion edid_completion;
+ bool edid_reading;
+ bool is_ready;
+
+ struct completion cmd_done;
+ const char *response;
+ unsigned int cmd_error;
+ char data[DATA_SIZE];
+ unsigned int len;
+ char reply[DATA_SIZE];
+ char buf[DATA_SIZE];
+ unsigned int idx;
+};
+
+#endif
diff --git a/drivers/media/common/siano/smscoreapi.c b/drivers/media/common/siano/smscoreapi.c
index b6f1eb5dbbdf..3732367e0c62 100644
--- a/drivers/media/common/siano/smscoreapi.c
+++ b/drivers/media/common/siano/smscoreapi.c
@@ -1132,8 +1132,7 @@ static char *smscore_get_fw_filename(struct smscore_device_t *coredev,
* return: 0 on success, <0 on error.
*/
static int smscore_load_firmware_from_file(struct smscore_device_t *coredev,
- int mode,
- loadfirmware_t loadfirmware_handler)
+ int mode)
{
int rc = -ENOENT;
u8 *fw_buf;
@@ -1147,8 +1146,7 @@ static int smscore_load_firmware_from_file(struct smscore_device_t *coredev,
}
pr_debug("Firmware name: %s\n", fw_filename);
- if (!loadfirmware_handler &&
- !(coredev->device_flags & SMS_DEVICE_FAMILY2))
+ if (!(coredev->device_flags & SMS_DEVICE_FAMILY2))
return -EINVAL;
rc = request_firmware(&fw, fw_filename, coredev->device);
@@ -1166,10 +1164,8 @@ static int smscore_load_firmware_from_file(struct smscore_device_t *coredev,
memcpy(fw_buf, fw->data, fw->size);
fw_buf_size = fw->size;
- rc = (coredev->device_flags & SMS_DEVICE_FAMILY2) ?
- smscore_load_firmware_family2(coredev, fw_buf, fw_buf_size)
- : loadfirmware_handler(coredev->context, fw_buf,
- fw_buf_size);
+ rc = smscore_load_firmware_family2(coredev, fw_buf,
+ fw_buf_size);
}
kfree(fw_buf);
@@ -1353,8 +1349,7 @@ int smscore_set_device_mode(struct smscore_device_t *coredev, int mode)
}
if (!(coredev->modes_supported & (1 << mode))) {
- rc = smscore_load_firmware_from_file(coredev,
- mode, NULL);
+ rc = smscore_load_firmware_from_file(coredev, mode);
if (rc >= 0)
pr_debug("firmware download success\n");
} else {
diff --git a/drivers/media/common/siano/smscoreapi.h b/drivers/media/common/siano/smscoreapi.h
index 82d9f8a64d99..d945a2d6d624 100644
--- a/drivers/media/common/siano/smscoreapi.h
+++ b/drivers/media/common/siano/smscoreapi.h
@@ -97,7 +97,6 @@ typedef int (*hotplug_t)(struct smscore_device_t *coredev,
typedef int (*setmode_t)(void *context, int mode);
typedef void (*detectmode_t)(void *context, int *mode);
typedef int (*sendrequest_t)(void *context, void *buffer, size_t size);
-typedef int (*loadfirmware_t)(void *context, void *buffer, size_t size);
typedef int (*preload_t)(void *context);
typedef int (*postload_t)(void *context);
@@ -1102,9 +1101,6 @@ extern int smscore_register_device(struct smsdevice_params_t *params,
extern void smscore_unregister_device(struct smscore_device_t *coredev);
extern int smscore_start_device(struct smscore_device_t *coredev);
-extern int smscore_load_firmware(struct smscore_device_t *coredev,
- char *filename,
- loadfirmware_t loadfirmware_handler);
extern int smscore_set_device_mode(struct smscore_device_t *coredev, int mode);
extern int smscore_get_device_mode(struct smscore_device_t *coredev);
@@ -1119,12 +1115,6 @@ extern int smsclient_sendrequest(struct smscore_client_t *client,
extern void smscore_onresponse(struct smscore_device_t *coredev,
struct smscore_buffer_t *cb);
-extern int smscore_get_common_buffer_size(struct smscore_device_t *coredev);
-extern int smscore_map_common_buffer(struct smscore_device_t *coredev,
- struct vm_area_struct *vma);
-extern int smscore_send_fw_file(struct smscore_device_t *coredev,
- u8 *ufwbuf, int size);
-
extern
struct smscore_buffer_t *smscore_getbuffer(struct smscore_device_t *coredev);
extern void smscore_putbuffer(struct smscore_device_t *coredev,
diff --git a/drivers/media/common/videobuf2/videobuf2-core.c b/drivers/media/common/videobuf2/videobuf2-core.c
index 0217392fcc0d..29a8d876e6c2 100644
--- a/drivers/media/common/videobuf2/videobuf2-core.c
+++ b/drivers/media/common/videobuf2/videobuf2-core.c
@@ -303,14 +303,22 @@ static void __vb2_plane_dmabuf_put(struct vb2_buffer *vb, struct vb2_plane *p)
if (!p->mem_priv)
return;
- if (p->dbuf_mapped)
- call_void_memop(vb, unmap_dmabuf, p->mem_priv);
+ if (!p->dbuf_duplicated) {
+ if (p->dbuf_mapped)
+ call_void_memop(vb, unmap_dmabuf, p->mem_priv);
+
+ call_void_memop(vb, detach_dmabuf, p->mem_priv);
+ }
- call_void_memop(vb, detach_dmabuf, p->mem_priv);
dma_buf_put(p->dbuf);
p->mem_priv = NULL;
p->dbuf = NULL;
p->dbuf_mapped = 0;
+ p->bytesused = 0;
+ p->length = 0;
+ p->m.fd = 0;
+ p->data_offset = 0;
+ p->dbuf_duplicated = false;
}
/*
@@ -319,9 +327,15 @@ static void __vb2_plane_dmabuf_put(struct vb2_buffer *vb, struct vb2_plane *p)
*/
static void __vb2_buf_dmabuf_put(struct vb2_buffer *vb)
{
- unsigned int plane;
+ int plane;
- for (plane = 0; plane < vb->num_planes; ++plane)
+ /*
+ * When multiple planes share the same DMA buffer attachment, the plane
+ * with the lowest index owns the mem_priv.
+ * Put planes in the reversed order so that we don't leave invalid
+ * mem_priv behind.
+ */
+ for (plane = vb->num_planes - 1; plane >= 0; --plane)
__vb2_plane_dmabuf_put(vb, &vb->planes[plane]);
}
@@ -1369,7 +1383,7 @@ static int __prepare_dmabuf(struct vb2_buffer *vb)
struct vb2_plane planes[VB2_MAX_PLANES];
struct vb2_queue *q = vb->vb2_queue;
void *mem_priv;
- unsigned int plane;
+ unsigned int plane, i;
int ret = 0;
bool reacquired = vb->planes[0].mem_priv == NULL;
@@ -1383,11 +1397,13 @@ static int __prepare_dmabuf(struct vb2_buffer *vb)
for (plane = 0; plane < vb->num_planes; ++plane) {
struct dma_buf *dbuf = dma_buf_get(planes[plane].m.fd);
+ planes[plane].dbuf = dbuf;
+
if (IS_ERR_OR_NULL(dbuf)) {
dprintk(q, 1, "invalid dmabuf fd for plane %d\n",
plane);
ret = -EINVAL;
- goto err;
+ goto err_put_planes;
}
/* use DMABUF size if length is not provided */
@@ -1398,80 +1414,86 @@ static int __prepare_dmabuf(struct vb2_buffer *vb)
dprintk(q, 1, "invalid dmabuf length %u for plane %d, minimum length %u\n",
planes[plane].length, plane,
vb->planes[plane].min_length);
- dma_buf_put(dbuf);
ret = -EINVAL;
- goto err;
+ goto err_put_planes;
}
/* Skip the plane if already verified */
if (dbuf == vb->planes[plane].dbuf &&
- vb->planes[plane].length == planes[plane].length) {
- dma_buf_put(dbuf);
+ vb->planes[plane].length == planes[plane].length)
continue;
- }
dprintk(q, 3, "buffer for plane %d changed\n", plane);
- if (!reacquired) {
- reacquired = true;
+ reacquired = true;
+ }
+
+ if (reacquired) {
+ if (vb->planes[0].mem_priv) {
vb->copied_timestamp = 0;
call_void_vb_qop(vb, buf_cleanup, vb);
+ __vb2_buf_dmabuf_put(vb);
}
- /* Release previously acquired memory if present */
- __vb2_plane_dmabuf_put(vb, &vb->planes[plane]);
- vb->planes[plane].bytesused = 0;
- vb->planes[plane].length = 0;
- vb->planes[plane].m.fd = 0;
- vb->planes[plane].data_offset = 0;
+ for (plane = 0; plane < vb->num_planes; ++plane) {
+ /*
+ * This is an optimization to reduce dma_buf attachment/mapping.
+ * When the same dma_buf is used for multiple planes, there is no need
+ * to create duplicated attachments.
+ */
+ for (i = 0; i < plane; ++i) {
+ if (planes[plane].dbuf == vb->planes[i].dbuf &&
+ q->alloc_devs[plane] == q->alloc_devs[i]) {
+ vb->planes[plane].dbuf_duplicated = true;
+ vb->planes[plane].dbuf = vb->planes[i].dbuf;
+ vb->planes[plane].mem_priv = vb->planes[i].mem_priv;
+ break;
+ }
+ }
- /* Acquire each plane's memory */
- mem_priv = call_ptr_memop(attach_dmabuf,
- vb,
- q->alloc_devs[plane] ? : q->dev,
- dbuf,
- planes[plane].length);
- if (IS_ERR(mem_priv)) {
- dprintk(q, 1, "failed to attach dmabuf\n");
- ret = PTR_ERR(mem_priv);
- dma_buf_put(dbuf);
- goto err;
- }
+ if (vb->planes[plane].dbuf_duplicated)
+ continue;
- vb->planes[plane].dbuf = dbuf;
- vb->planes[plane].mem_priv = mem_priv;
- }
+ /* Acquire each plane's memory */
+ mem_priv = call_ptr_memop(attach_dmabuf,
+ vb,
+ q->alloc_devs[plane] ? : q->dev,
+ planes[plane].dbuf,
+ planes[plane].length);
+ if (IS_ERR(mem_priv)) {
+ dprintk(q, 1, "failed to attach dmabuf\n");
+ ret = PTR_ERR(mem_priv);
+ goto err_put_vb2_buf;
+ }
- /*
- * This pins the buffer(s) with dma_buf_map_attachment()). It's done
- * here instead just before the DMA, while queueing the buffer(s) so
- * userspace knows sooner rather than later if the dma-buf map fails.
- */
- for (plane = 0; plane < vb->num_planes; ++plane) {
- if (vb->planes[plane].dbuf_mapped)
- continue;
+ vb->planes[plane].dbuf = planes[plane].dbuf;
+ vb->planes[plane].mem_priv = mem_priv;
- ret = call_memop(vb, map_dmabuf, vb->planes[plane].mem_priv);
- if (ret) {
- dprintk(q, 1, "failed to map dmabuf for plane %d\n",
- plane);
- goto err;
+ /*
+ * This pins the buffer(s) with dma_buf_map_attachment()). It's done
+ * here instead just before the DMA, while queueing the buffer(s) so
+ * userspace knows sooner rather than later if the dma-buf map fails.
+ */
+ ret = call_memop(vb, map_dmabuf, vb->planes[plane].mem_priv);
+ if (ret) {
+ dprintk(q, 1, "failed to map dmabuf for plane %d\n",
+ plane);
+ goto err_put_vb2_buf;
+ }
+ vb->planes[plane].dbuf_mapped = 1;
}
- vb->planes[plane].dbuf_mapped = 1;
- }
- /*
- * Now that everything is in order, copy relevant information
- * provided by userspace.
- */
- for (plane = 0; plane < vb->num_planes; ++plane) {
- vb->planes[plane].bytesused = planes[plane].bytesused;
- vb->planes[plane].length = planes[plane].length;
- vb->planes[plane].m.fd = planes[plane].m.fd;
- vb->planes[plane].data_offset = planes[plane].data_offset;
- }
+ /*
+ * Now that everything is in order, copy relevant information
+ * provided by userspace.
+ */
+ for (plane = 0; plane < vb->num_planes; ++plane) {
+ vb->planes[plane].bytesused = planes[plane].bytesused;
+ vb->planes[plane].length = planes[plane].length;
+ vb->planes[plane].m.fd = planes[plane].m.fd;
+ vb->planes[plane].data_offset = planes[plane].data_offset;
+ }
- if (reacquired) {
/*
* Call driver-specific initialization on the newly acquired buffer,
* if provided.
@@ -1479,19 +1501,28 @@ static int __prepare_dmabuf(struct vb2_buffer *vb)
ret = call_vb_qop(vb, buf_init, vb);
if (ret) {
dprintk(q, 1, "buffer initialization failed\n");
- goto err;
+ goto err_put_vb2_buf;
}
+ } else {
+ for (plane = 0; plane < vb->num_planes; ++plane)
+ dma_buf_put(planes[plane].dbuf);
}
ret = call_vb_qop(vb, buf_prepare, vb);
if (ret) {
dprintk(q, 1, "buffer preparation failed\n");
call_void_vb_qop(vb, buf_cleanup, vb);
- goto err;
+ goto err_put_vb2_buf;
}
return 0;
-err:
+
+err_put_planes:
+ for (plane = 0; plane < vb->num_planes; ++plane) {
+ if (!IS_ERR_OR_NULL(planes[plane].dbuf))
+ dma_buf_put(planes[plane].dbuf);
+ }
+err_put_vb2_buf:
/* In case of errors, release planes that were already acquired */
__vb2_buf_dmabuf_put(vb);
@@ -2602,13 +2633,6 @@ int vb2_core_queue_init(struct vb2_queue *q)
return -EINVAL;
/*
- * The minimum requirement is 2: one buffer is used
- * by the hardware while the other is being processed by userspace.
- */
- if (q->min_reqbufs_allocation < 2)
- q->min_reqbufs_allocation = 2;
-
- /*
* If the driver needs 'min_queued_buffers' in the queue before
* calling start_streaming() then the minimum requirement is
* 'min_queued_buffers + 1' to keep at least one buffer available
diff --git a/drivers/media/common/videobuf2/videobuf2-dma-contig.c b/drivers/media/common/videobuf2/videobuf2-dma-contig.c
index 3d4fd4ef5310..bb0b7fa67b53 100644
--- a/drivers/media/common/videobuf2/videobuf2-dma-contig.c
+++ b/drivers/media/common/videobuf2/videobuf2-dma-contig.c
@@ -854,8 +854,7 @@ int vb2_dma_contig_set_max_seg_size(struct device *dev, unsigned int size)
return -ENODEV;
}
if (dma_get_max_seg_size(dev) < size)
- return dma_set_max_seg_size(dev, size);
-
+ dma_set_max_seg_size(dev, size);
return 0;
}
EXPORT_SYMBOL_GPL(vb2_dma_contig_set_max_seg_size);
diff --git a/drivers/media/dvb-frontends/a8293.c b/drivers/media/dvb-frontends/a8293.c
index f39887c04978..bf2773c5b97a 100644
--- a/drivers/media/dvb-frontends/a8293.c
+++ b/drivers/media/dvb-frontends/a8293.c
@@ -256,7 +256,7 @@ static void a8293_remove(struct i2c_client *client)
}
static const struct i2c_device_id a8293_id_table[] = {
- {"a8293", 0},
+ { "a8293" },
{}
};
MODULE_DEVICE_TABLE(i2c, a8293_id_table);
diff --git a/drivers/media/dvb-frontends/af9013.c b/drivers/media/dvb-frontends/af9013.c
index 5afdbe244596..befd6a4eafd9 100644
--- a/drivers/media/dvb-frontends/af9013.c
+++ b/drivers/media/dvb-frontends/af9013.c
@@ -1553,7 +1553,7 @@ static void af9013_remove(struct i2c_client *client)
}
static const struct i2c_device_id af9013_id_table[] = {
- {"af9013", 0},
+ { "af9013" },
{}
};
MODULE_DEVICE_TABLE(i2c, af9013_id_table);
diff --git a/drivers/media/dvb-frontends/af9033.c b/drivers/media/dvb-frontends/af9033.c
index 49b7b04a7899..eed2ea4da8fa 100644
--- a/drivers/media/dvb-frontends/af9033.c
+++ b/drivers/media/dvb-frontends/af9033.c
@@ -1173,7 +1173,7 @@ static void af9033_remove(struct i2c_client *client)
}
static const struct i2c_device_id af9033_id_table[] = {
- {"af9033", 0},
+ { "af9033" },
{}
};
MODULE_DEVICE_TABLE(i2c, af9033_id_table);
diff --git a/drivers/media/dvb-frontends/au8522_decoder.c b/drivers/media/dvb-frontends/au8522_decoder.c
index d02a92a81c60..58c4c489bf97 100644
--- a/drivers/media/dvb-frontends/au8522_decoder.c
+++ b/drivers/media/dvb-frontends/au8522_decoder.c
@@ -767,7 +767,7 @@ static void au8522_remove(struct i2c_client *client)
}
static const struct i2c_device_id au8522_id[] = {
- {"au8522", 0},
+ { "au8522" },
{}
};
diff --git a/drivers/media/dvb-frontends/cxd2099.c b/drivers/media/dvb-frontends/cxd2099.c
index 3f3b85743666..5e6e18819a0d 100644
--- a/drivers/media/dvb-frontends/cxd2099.c
+++ b/drivers/media/dvb-frontends/cxd2099.c
@@ -672,7 +672,7 @@ static void cxd2099_remove(struct i2c_client *client)
}
static const struct i2c_device_id cxd2099_id[] = {
- {"cxd2099", 0},
+ { "cxd2099" },
{}
};
MODULE_DEVICE_TABLE(i2c, cxd2099_id);
diff --git a/drivers/media/dvb-frontends/cxd2820r_core.c b/drivers/media/dvb-frontends/cxd2820r_core.c
index 7feb08dccfa1..c3d8ced6c3ba 100644
--- a/drivers/media/dvb-frontends/cxd2820r_core.c
+++ b/drivers/media/dvb-frontends/cxd2820r_core.c
@@ -723,7 +723,7 @@ static void cxd2820r_remove(struct i2c_client *client)
}
static const struct i2c_device_id cxd2820r_id_table[] = {
- {"cxd2820r", 0},
+ { "cxd2820r" },
{}
};
MODULE_DEVICE_TABLE(i2c, cxd2820r_id_table);
diff --git a/drivers/media/dvb-frontends/lgdt3306a.c b/drivers/media/dvb-frontends/lgdt3306a.c
index b25d11be8611..6ab9d4de65ce 100644
--- a/drivers/media/dvb-frontends/lgdt3306a.c
+++ b/drivers/media/dvb-frontends/lgdt3306a.c
@@ -2244,7 +2244,7 @@ static void lgdt3306a_remove(struct i2c_client *client)
}
static const struct i2c_device_id lgdt3306a_id_table[] = {
- {"lgdt3306a", 0},
+ { "lgdt3306a" },
{}
};
MODULE_DEVICE_TABLE(i2c, lgdt3306a_id_table);
diff --git a/drivers/media/dvb-frontends/lgdt330x.c b/drivers/media/dvb-frontends/lgdt330x.c
index 081d6ad3ce72..cab442a350a5 100644
--- a/drivers/media/dvb-frontends/lgdt330x.c
+++ b/drivers/media/dvb-frontends/lgdt330x.c
@@ -983,7 +983,7 @@ static void lgdt330x_remove(struct i2c_client *client)
}
static const struct i2c_device_id lgdt330x_id_table[] = {
- {"lgdt330x", 0},
+ { "lgdt330x" },
{}
};
MODULE_DEVICE_TABLE(i2c, lgdt330x_id_table);
diff --git a/drivers/media/dvb-frontends/mn88472.c b/drivers/media/dvb-frontends/mn88472.c
index 73d1e52de569..729751671c3d 100644
--- a/drivers/media/dvb-frontends/mn88472.c
+++ b/drivers/media/dvb-frontends/mn88472.c
@@ -708,7 +708,7 @@ static void mn88472_remove(struct i2c_client *client)
}
static const struct i2c_device_id mn88472_id_table[] = {
- {"mn88472", 0},
+ { "mn88472" },
{}
};
MODULE_DEVICE_TABLE(i2c, mn88472_id_table);
diff --git a/drivers/media/dvb-frontends/mn88473.c b/drivers/media/dvb-frontends/mn88473.c
index eb50591c0e7a..fefc640d8afb 100644
--- a/drivers/media/dvb-frontends/mn88473.c
+++ b/drivers/media/dvb-frontends/mn88473.c
@@ -743,7 +743,7 @@ static void mn88473_remove(struct i2c_client *client)
}
static const struct i2c_device_id mn88473_id_table[] = {
- {"mn88473", 0},
+ { "mn88473" },
{}
};
MODULE_DEVICE_TABLE(i2c, mn88473_id_table);
diff --git a/drivers/media/dvb-frontends/mxl692.c b/drivers/media/dvb-frontends/mxl692.c
index 2a31bde2630f..bbc2bc778225 100644
--- a/drivers/media/dvb-frontends/mxl692.c
+++ b/drivers/media/dvb-frontends/mxl692.c
@@ -1346,7 +1346,7 @@ static void mxl692_remove(struct i2c_client *client)
}
static const struct i2c_device_id mxl692_id_table[] = {
- {"mxl692", 0},
+ { "mxl692" },
{}
};
MODULE_DEVICE_TABLE(i2c, mxl692_id_table);
diff --git a/drivers/media/dvb-frontends/rtl2830.c b/drivers/media/dvb-frontends/rtl2830.c
index 30d10fe4b33e..aa4ef9aedf17 100644
--- a/drivers/media/dvb-frontends/rtl2830.c
+++ b/drivers/media/dvb-frontends/rtl2830.c
@@ -609,7 +609,7 @@ static int rtl2830_pid_filter(struct dvb_frontend *fe, u8 index, u16 pid, int on
index, pid, onoff);
/* skip invalid PIDs (0x2000) */
- if (pid > 0x1fff || index > 32)
+ if (pid > 0x1fff || index >= 32)
return 0;
if (onoff)
@@ -876,7 +876,7 @@ static void rtl2830_remove(struct i2c_client *client)
}
static const struct i2c_device_id rtl2830_id_table[] = {
- {"rtl2830", 0},
+ { "rtl2830" },
{}
};
MODULE_DEVICE_TABLE(i2c, rtl2830_id_table);
diff --git a/drivers/media/dvb-frontends/rtl2832.c b/drivers/media/dvb-frontends/rtl2832.c
index 5142820b1b3d..3b4e46dac1bf 100644
--- a/drivers/media/dvb-frontends/rtl2832.c
+++ b/drivers/media/dvb-frontends/rtl2832.c
@@ -983,7 +983,7 @@ static int rtl2832_pid_filter(struct dvb_frontend *fe, u8 index, u16 pid,
index, pid, onoff, dev->slave_ts);
/* skip invalid PIDs (0x2000) */
- if (pid > 0x1fff || index > 32)
+ if (pid > 0x1fff || index >= 32)
return 0;
if (onoff)
@@ -1125,7 +1125,7 @@ static void rtl2832_remove(struct i2c_client *client)
}
static const struct i2c_device_id rtl2832_id_table[] = {
- {"rtl2832", 0},
+ { "rtl2832" },
{}
};
MODULE_DEVICE_TABLE(i2c, rtl2832_id_table);
diff --git a/drivers/media/dvb-frontends/si2165.c b/drivers/media/dvb-frontends/si2165.c
index 013d423d3263..f87c9357cee3 100644
--- a/drivers/media/dvb-frontends/si2165.c
+++ b/drivers/media/dvb-frontends/si2165.c
@@ -1281,7 +1281,7 @@ static void si2165_remove(struct i2c_client *client)
}
static const struct i2c_device_id si2165_id_table[] = {
- {"si2165", 0},
+ { "si2165" },
{}
};
MODULE_DEVICE_TABLE(i2c, si2165_id_table);
diff --git a/drivers/media/dvb-frontends/si2168.c b/drivers/media/dvb-frontends/si2168.c
index 26828fd41e68..d6b6b8bc7d4e 100644
--- a/drivers/media/dvb-frontends/si2168.c
+++ b/drivers/media/dvb-frontends/si2168.c
@@ -788,7 +788,7 @@ static void si2168_remove(struct i2c_client *client)
}
static const struct i2c_device_id si2168_id_table[] = {
- {"si2168", 0},
+ { "si2168" },
{}
};
MODULE_DEVICE_TABLE(i2c, si2168_id_table);
diff --git a/drivers/media/dvb-frontends/sp2.c b/drivers/media/dvb-frontends/sp2.c
index 4d7d0b8b51b4..75adf2a4589f 100644
--- a/drivers/media/dvb-frontends/sp2.c
+++ b/drivers/media/dvb-frontends/sp2.c
@@ -407,7 +407,7 @@ static void sp2_remove(struct i2c_client *client)
}
static const struct i2c_device_id sp2_id[] = {
- {"sp2", 0},
+ { "sp2" },
{}
};
MODULE_DEVICE_TABLE(i2c, sp2_id);
diff --git a/drivers/media/dvb-frontends/stv090x.c b/drivers/media/dvb-frontends/stv090x.c
index 3b02d504941f..f273efa330cf 100644
--- a/drivers/media/dvb-frontends/stv090x.c
+++ b/drivers/media/dvb-frontends/stv090x.c
@@ -5079,7 +5079,7 @@ error:
EXPORT_SYMBOL_GPL(stv090x_attach);
static const struct i2c_device_id stv090x_id_table[] = {
- {"stv090x", 0},
+ { "stv090x" },
{}
};
MODULE_DEVICE_TABLE(i2c, stv090x_id_table);
diff --git a/drivers/media/dvb-frontends/stv6110x.c b/drivers/media/dvb-frontends/stv6110x.c
index c678f47d2449..33c8105da1c3 100644
--- a/drivers/media/dvb-frontends/stv6110x.c
+++ b/drivers/media/dvb-frontends/stv6110x.c
@@ -470,7 +470,7 @@ const struct stv6110x_devctl *stv6110x_attach(struct dvb_frontend *fe,
EXPORT_SYMBOL_GPL(stv6110x_attach);
static const struct i2c_device_id stv6110x_id_table[] = {
- {"stv6110x", 0},
+ { "stv6110x" },
{}
};
MODULE_DEVICE_TABLE(i2c, stv6110x_id_table);
diff --git a/drivers/media/dvb-frontends/tda10071.c b/drivers/media/dvb-frontends/tda10071.c
index 6640851d8bbc..e23794b821cd 100644
--- a/drivers/media/dvb-frontends/tda10071.c
+++ b/drivers/media/dvb-frontends/tda10071.c
@@ -1230,7 +1230,7 @@ static void tda10071_remove(struct i2c_client *client)
}
static const struct i2c_device_id tda10071_id_table[] = {
- {"tda10071_cx24118", 0},
+ { "tda10071_cx24118" },
{}
};
MODULE_DEVICE_TABLE(i2c, tda10071_id_table);
diff --git a/drivers/media/dvb-frontends/ts2020.c b/drivers/media/dvb-frontends/ts2020.c
index a5ebce57f35e..a5baca2449c7 100644
--- a/drivers/media/dvb-frontends/ts2020.c
+++ b/drivers/media/dvb-frontends/ts2020.c
@@ -710,8 +710,8 @@ static void ts2020_remove(struct i2c_client *client)
}
static const struct i2c_device_id ts2020_id_table[] = {
- {"ts2020", 0},
- {"ts2022", 0},
+ { "ts2020" },
+ { "ts2022" },
{}
};
MODULE_DEVICE_TABLE(i2c, ts2020_id_table);
diff --git a/drivers/media/i2c/ad5820.c b/drivers/media/i2c/ad5820.c
index 1543d24f522c..f60271082fb5 100644
--- a/drivers/media/i2c/ad5820.c
+++ b/drivers/media/i2c/ad5820.c
@@ -347,8 +347,8 @@ static void ad5820_remove(struct i2c_client *client)
}
static const struct i2c_device_id ad5820_id_table[] = {
- { "ad5820", 0 },
- { "ad5821", 0 },
+ { "ad5820" },
+ { "ad5821" },
{ }
};
MODULE_DEVICE_TABLE(i2c, ad5820_id_table);
diff --git a/drivers/media/i2c/adp1653.c b/drivers/media/i2c/adp1653.c
index 5ace7b5804d4..391bc75bfcd0 100644
--- a/drivers/media/i2c/adp1653.c
+++ b/drivers/media/i2c/adp1653.c
@@ -522,7 +522,7 @@ static void adp1653_remove(struct i2c_client *client)
}
static const struct i2c_device_id adp1653_id_table[] = {
- { ADP1653_NAME, 0 },
+ { ADP1653_NAME },
{ }
};
MODULE_DEVICE_TABLE(i2c, adp1653_id_table);
diff --git a/drivers/media/i2c/adv7170.c b/drivers/media/i2c/adv7170.c
index 4a2b9fd9e2da..ef8682b980b4 100644
--- a/drivers/media/i2c/adv7170.c
+++ b/drivers/media/i2c/adv7170.c
@@ -377,8 +377,8 @@ static void adv7170_remove(struct i2c_client *client)
/* ----------------------------------------------------------------------- */
static const struct i2c_device_id adv7170_id[] = {
- { "adv7170", 0 },
- { "adv7171", 0 },
+ { "adv7170" },
+ { "adv7171" },
{ }
};
MODULE_DEVICE_TABLE(i2c, adv7170_id);
diff --git a/drivers/media/i2c/adv7175.c b/drivers/media/i2c/adv7175.c
index e454cba4b026..384da1ec5bf9 100644
--- a/drivers/media/i2c/adv7175.c
+++ b/drivers/media/i2c/adv7175.c
@@ -432,8 +432,8 @@ static void adv7175_remove(struct i2c_client *client)
/* ----------------------------------------------------------------------- */
static const struct i2c_device_id adv7175_id[] = {
- { "adv7175", 0 },
- { "adv7176", 0 },
+ { "adv7175" },
+ { "adv7176" },
{ }
};
MODULE_DEVICE_TABLE(i2c, adv7175_id);
diff --git a/drivers/media/i2c/adv7183.c b/drivers/media/i2c/adv7183.c
index 2a2cace4a153..25a31a6dd456 100644
--- a/drivers/media/i2c/adv7183.c
+++ b/drivers/media/i2c/adv7183.c
@@ -619,8 +619,8 @@ static void adv7183_remove(struct i2c_client *client)
}
static const struct i2c_device_id adv7183_id[] = {
- {"adv7183", 0},
- {},
+ { "adv7183" },
+ {}
};
MODULE_DEVICE_TABLE(i2c, adv7183_id);
diff --git a/drivers/media/i2c/adv7343.c b/drivers/media/i2c/adv7343.c
index 4fbe4e18570e..b96443404a26 100644
--- a/drivers/media/i2c/adv7343.c
+++ b/drivers/media/i2c/adv7343.c
@@ -502,8 +502,8 @@ static void adv7343_remove(struct i2c_client *client)
}
static const struct i2c_device_id adv7343_id[] = {
- {"adv7343", 0},
- {},
+ { "adv7343" },
+ {}
};
MODULE_DEVICE_TABLE(i2c, adv7343_id);
diff --git a/drivers/media/i2c/adv7393.c b/drivers/media/i2c/adv7393.c
index 7638af455cef..c7994bd0bbd4 100644
--- a/drivers/media/i2c/adv7393.c
+++ b/drivers/media/i2c/adv7393.c
@@ -446,8 +446,8 @@ static void adv7393_remove(struct i2c_client *client)
}
static const struct i2c_device_id adv7393_id[] = {
- {"adv7393", 0},
- {},
+ { "adv7393" },
+ {}
};
MODULE_DEVICE_TABLE(i2c, adv7393_id);
diff --git a/drivers/media/i2c/adv7511-v4l2.c b/drivers/media/i2c/adv7511-v4l2.c
index 261871be833f..e9406d552699 100644
--- a/drivers/media/i2c/adv7511-v4l2.c
+++ b/drivers/media/i2c/adv7511-v4l2.c
@@ -1949,7 +1949,7 @@ static void adv7511_remove(struct i2c_client *client)
/* ----------------------------------------------------------------------- */
static const struct i2c_device_id adv7511_id[] = {
- { "adv7511-v4l2", 0 },
+ { "adv7511-v4l2" },
{ }
};
MODULE_DEVICE_TABLE(i2c, adv7511_id);
diff --git a/drivers/media/i2c/adv7842.c b/drivers/media/i2c/adv7842.c
index f2d4217310e7..014fc913225c 100644
--- a/drivers/media/i2c/adv7842.c
+++ b/drivers/media/i2c/adv7842.c
@@ -3617,7 +3617,7 @@ static void adv7842_remove(struct i2c_client *client)
/* ----------------------------------------------------------------------- */
static const struct i2c_device_id adv7842_id[] = {
- { "adv7842", 0 },
+ { "adv7842" },
{ }
};
MODULE_DEVICE_TABLE(i2c, adv7842_id);
diff --git a/drivers/media/i2c/ak881x.c b/drivers/media/i2c/ak881x.c
index ce840adc2aa7..ee575d01a676 100644
--- a/drivers/media/i2c/ak881x.c
+++ b/drivers/media/i2c/ak881x.c
@@ -304,8 +304,8 @@ static void ak881x_remove(struct i2c_client *client)
}
static const struct i2c_device_id ak881x_id[] = {
- { "ak8813", 0 },
- { "ak8814", 0 },
+ { "ak8813" },
+ { "ak8814" },
{ }
};
MODULE_DEVICE_TABLE(i2c, ak881x_id);
diff --git a/drivers/media/i2c/ar0521.c b/drivers/media/i2c/ar0521.c
index 09331cf95c62..fc27238dd4d3 100644
--- a/drivers/media/i2c/ar0521.c
+++ b/drivers/media/i2c/ar0521.c
@@ -835,21 +835,30 @@ static const struct initial_reg {
be(0x0707)), /* 3F44: couple k factor 2 */
};
-static int ar0521_power_off(struct device *dev)
+static void __ar0521_power_off(struct device *dev)
{
struct v4l2_subdev *sd = dev_get_drvdata(dev);
struct ar0521_dev *sensor = to_ar0521_dev(sd);
int i;
- clk_disable_unprepare(sensor->extclk);
-
if (sensor->reset_gpio)
- gpiod_set_value(sensor->reset_gpio, 1); /* assert RESET signal */
+ /* assert RESET signal */
+ gpiod_set_value_cansleep(sensor->reset_gpio, 1);
for (i = ARRAY_SIZE(ar0521_supply_names) - 1; i >= 0; i--) {
if (sensor->supplies[i])
regulator_disable(sensor->supplies[i]);
}
+}
+
+static int ar0521_power_off(struct device *dev)
+{
+ struct v4l2_subdev *sd = dev_get_drvdata(dev);
+ struct ar0521_dev *sensor = to_ar0521_dev(sd);
+
+ clk_disable_unprepare(sensor->extclk);
+ __ar0521_power_off(dev);
+
return 0;
}
@@ -878,7 +887,7 @@ static int ar0521_power_on(struct device *dev)
if (sensor->reset_gpio)
/* deassert RESET signal */
- gpiod_set_value(sensor->reset_gpio, 0);
+ gpiod_set_value_cansleep(sensor->reset_gpio, 0);
usleep_range(4500, 5000); /* min 45000 clocks */
for (cnt = 0; cnt < ARRAY_SIZE(initial_regs); cnt++) {
@@ -908,7 +917,8 @@ static int ar0521_power_on(struct device *dev)
return 0;
off:
- ar0521_power_off(dev);
+ clk_disable_unprepare(sensor->extclk);
+ __ar0521_power_off(dev);
return ret;
}
diff --git a/drivers/media/i2c/bt819.c b/drivers/media/i2c/bt819.c
index b4a25cc996dc..f97245f91f88 100644
--- a/drivers/media/i2c/bt819.c
+++ b/drivers/media/i2c/bt819.c
@@ -457,9 +457,9 @@ static void bt819_remove(struct i2c_client *client)
/* ----------------------------------------------------------------------- */
static const struct i2c_device_id bt819_id[] = {
- { "bt819a", 0 },
- { "bt817a", 0 },
- { "bt815a", 0 },
+ { "bt819a" },
+ { "bt817a" },
+ { "bt815a" },
{ }
};
MODULE_DEVICE_TABLE(i2c, bt819_id);
diff --git a/drivers/media/i2c/bt856.c b/drivers/media/i2c/bt856.c
index 814acbd6a5a8..6852aa47cafb 100644
--- a/drivers/media/i2c/bt856.c
+++ b/drivers/media/i2c/bt856.c
@@ -230,7 +230,7 @@ static void bt856_remove(struct i2c_client *client)
}
static const struct i2c_device_id bt856_id[] = {
- { "bt856", 0 },
+ { "bt856" },
{ }
};
MODULE_DEVICE_TABLE(i2c, bt856_id);
diff --git a/drivers/media/i2c/bt866.c b/drivers/media/i2c/bt866.c
index dada059cbce4..a2cc34d35ed2 100644
--- a/drivers/media/i2c/bt866.c
+++ b/drivers/media/i2c/bt866.c
@@ -197,7 +197,7 @@ static void bt866_remove(struct i2c_client *client)
}
static const struct i2c_device_id bt866_id[] = {
- { "bt866", 0 },
+ { "bt866" },
{ }
};
MODULE_DEVICE_TABLE(i2c, bt866_id);
diff --git a/drivers/media/i2c/ccs/ccs-reg-access.h b/drivers/media/i2c/ccs/ccs-reg-access.h
index 78c43f92d99a..4b56b21a26b5 100644
--- a/drivers/media/i2c/ccs/ccs-reg-access.h
+++ b/drivers/media/i2c/ccs/ccs-reg-access.h
@@ -21,16 +21,13 @@
struct ccs_sensor;
-int ccs_read_addr_no_quirk(struct ccs_sensor *sensor, u32 reg, u32 *val);
int ccs_read_addr(struct ccs_sensor *sensor, u32 reg, u32 *val);
int ccs_read_addr_8only(struct ccs_sensor *sensor, u32 reg, u32 *val);
int ccs_read_addr_noconv(struct ccs_sensor *sensor, u32 reg, u32 *val);
-int ccs_write_addr_no_quirk(struct ccs_sensor *sensor, u32 reg, u32 val);
int ccs_write_addr(struct ccs_sensor *sensor, u32 reg, u32 val);
int ccs_write_data_regs(struct ccs_sensor *sensor, struct ccs_reg *regs,
size_t num_regs);
-unsigned int ccs_reg_width(u32 reg);
u32 ccs_reg_conv(struct ccs_sensor *sensor, u32 reg, u32 val);
#define ccs_read(sensor, reg_name, val) \
diff --git a/drivers/media/i2c/cs3308.c b/drivers/media/i2c/cs3308.c
index 61afa3d799d2..078e0066ce4b 100644
--- a/drivers/media/i2c/cs3308.c
+++ b/drivers/media/i2c/cs3308.c
@@ -109,7 +109,7 @@ static void cs3308_remove(struct i2c_client *client)
/* ----------------------------------------------------------------------- */
static const struct i2c_device_id cs3308_id[] = {
- { "cs3308", 0 },
+ { "cs3308" },
{ }
};
MODULE_DEVICE_TABLE(i2c, cs3308_id);
diff --git a/drivers/media/i2c/cs5345.c b/drivers/media/i2c/cs5345.c
index 3019a132e079..3a9797a50e82 100644
--- a/drivers/media/i2c/cs5345.c
+++ b/drivers/media/i2c/cs5345.c
@@ -189,7 +189,7 @@ static void cs5345_remove(struct i2c_client *client)
/* ----------------------------------------------------------------------- */
static const struct i2c_device_id cs5345_id[] = {
- { "cs5345", 0 },
+ { "cs5345" },
{ }
};
MODULE_DEVICE_TABLE(i2c, cs5345_id);
diff --git a/drivers/media/i2c/cs53l32a.c b/drivers/media/i2c/cs53l32a.c
index 82881b79e730..c4cad3293905 100644
--- a/drivers/media/i2c/cs53l32a.c
+++ b/drivers/media/i2c/cs53l32a.c
@@ -200,7 +200,7 @@ static void cs53l32a_remove(struct i2c_client *client)
}
static const struct i2c_device_id cs53l32a_id[] = {
- { "cs53l32a", 0 },
+ { "cs53l32a" },
{ }
};
MODULE_DEVICE_TABLE(i2c, cs53l32a_id);
diff --git a/drivers/media/i2c/cx25840/cx25840-core.c b/drivers/media/i2c/cx25840/cx25840-core.c
index 04461c893d90..a90a9e5705a0 100644
--- a/drivers/media/i2c/cx25840/cx25840-core.c
+++ b/drivers/media/i2c/cx25840/cx25840-core.c
@@ -3964,7 +3964,7 @@ static void cx25840_remove(struct i2c_client *client)
}
static const struct i2c_device_id cx25840_id[] = {
- { "cx25840", 0 },
+ { "cx25840" },
{ }
};
MODULE_DEVICE_TABLE(i2c, cx25840_id);
diff --git a/drivers/media/i2c/ds90ub913.c b/drivers/media/i2c/ds90ub913.c
index ca9bb29dab89..8eed4a200fd8 100644
--- a/drivers/media/i2c/ds90ub913.c
+++ b/drivers/media/i2c/ds90ub913.c
@@ -877,7 +877,10 @@ static void ub913_remove(struct i2c_client *client)
ub913_gpiochip_remove(priv);
}
-static const struct i2c_device_id ub913_id[] = { { "ds90ub913a-q1", 0 }, {} };
+static const struct i2c_device_id ub913_id[] = {
+ { "ds90ub913a-q1" },
+ {}
+};
MODULE_DEVICE_TABLE(i2c, ub913_id);
static const struct of_device_id ub913_dt_ids[] = {
diff --git a/drivers/media/i2c/dw9714.c b/drivers/media/i2c/dw9714.c
index 0e88ce0ef8d7..2ddd7daa79e2 100644
--- a/drivers/media/i2c/dw9714.c
+++ b/drivers/media/i2c/dw9714.c
@@ -279,8 +279,8 @@ static int __maybe_unused dw9714_vcm_resume(struct device *dev)
}
static const struct i2c_device_id dw9714_id_table[] = {
- { DW9714_NAME, 0 },
- { { 0 } }
+ { DW9714_NAME },
+ { }
};
MODULE_DEVICE_TABLE(i2c, dw9714_id_table);
diff --git a/drivers/media/i2c/et8ek8/et8ek8_driver.c b/drivers/media/i2c/et8ek8/et8ek8_driver.c
index e932d25ca7b3..7519863d77b1 100644
--- a/drivers/media/i2c/et8ek8/et8ek8_driver.c
+++ b/drivers/media/i2c/et8ek8/et8ek8_driver.c
@@ -1501,7 +1501,7 @@ static const struct of_device_id et8ek8_of_table[] = {
MODULE_DEVICE_TABLE(of, et8ek8_of_table);
static const struct i2c_device_id et8ek8_id_table[] = {
- { ET8EK8_NAME, 0 },
+ { ET8EK8_NAME },
{ }
};
MODULE_DEVICE_TABLE(i2c, et8ek8_id_table);
diff --git a/drivers/media/i2c/gc05a2.c b/drivers/media/i2c/gc05a2.c
index dcba29ee725c..0413c557e594 100644
--- a/drivers/media/i2c/gc05a2.c
+++ b/drivers/media/i2c/gc05a2.c
@@ -65,7 +65,7 @@
static const char *const gc05a2_test_pattern_menu[] = {
"No Pattern", "Fade_to_gray_Color Bar", "Color Bar",
- "PN9", "Horizental_gradient", "Checkboard Pattern",
+ "PN9", "Horizontal_gradient", "Checkboard Pattern",
"Slant", "Resolution", "Solid Black",
"Solid White",
};
diff --git a/drivers/media/i2c/gc08a3.c b/drivers/media/i2c/gc08a3.c
index 7680d807e7a5..84de5cff958d 100644
--- a/drivers/media/i2c/gc08a3.c
+++ b/drivers/media/i2c/gc08a3.c
@@ -948,7 +948,7 @@ static int gc08a3_start_streaming(struct gc08a3 *gc08a3)
ret = cci_write(gc08a3->regmap, GC08A3_STREAMING_REG, 1, NULL);
if (ret < 0) {
- dev_err(gc08a3->dev, "write STRAEMING_REG failed: %d\n", ret);
+ dev_err(gc08a3->dev, "write STREAMING_REG failed: %d\n", ret);
goto err_rpm_put;
}
diff --git a/drivers/media/i2c/imx274.c b/drivers/media/i2c/imx274.c
index 3800de974e8a..a2b824986027 100644
--- a/drivers/media/i2c/imx274.c
+++ b/drivers/media/i2c/imx274.c
@@ -1949,7 +1949,7 @@ static const struct of_device_id imx274_of_id_table[] = {
MODULE_DEVICE_TABLE(of, imx274_of_id_table);
static const struct i2c_device_id imx274_id[] = {
- { "IMX274", 0 },
+ { "IMX274" },
{ }
};
MODULE_DEVICE_TABLE(i2c, imx274_id);
diff --git a/drivers/media/i2c/imx283.c b/drivers/media/i2c/imx283.c
index 8490618c5071..94276f4f2d83 100644
--- a/drivers/media/i2c/imx283.c
+++ b/drivers/media/i2c/imx283.c
@@ -472,6 +472,39 @@ static const struct imx283_mode supported_modes_12bit[] = {
.height = 3648,
},
},
+ {
+ /*
+ * Readout mode 3 : 3/3 binned mode (1824x1216)
+ */
+ .mode = IMX283_MODE_3,
+ .bpp = 12,
+ .width = 1824,
+ .height = 1216,
+ .min_hmax = 1894, /* Pixels (284 * 480MHz/72MHz + padding) */
+ .min_vmax = 4200, /* Lines */
+
+ /* 60.00 fps */
+ .default_hmax = 1900, /* 285 @ 480MHz/72Mhz */
+ .default_vmax = 4200,
+
+ .veff = 1234,
+ .vst = 0,
+ .vct = 0,
+
+ .hbin_ratio = 3,
+ .vbin_ratio = 3,
+
+ .min_shr = 16,
+ .horizontal_ob = 32,
+ .vertical_ob = 4,
+
+ .crop = {
+ .top = 40,
+ .left = 108,
+ .width = 5472,
+ .height = 3648,
+ },
+ },
};
static const struct imx283_mode supported_modes_10bit[] = {
diff --git a/drivers/media/i2c/imx335.c b/drivers/media/i2c/imx335.c
index 990d74214cc2..54a1de53d497 100644
--- a/drivers/media/i2c/imx335.c
+++ b/drivers/media/i2c/imx335.c
@@ -997,7 +997,7 @@ static int imx335_parse_hw_config(struct imx335 *imx335)
/* Request optional reset pin */
imx335->reset_gpio = devm_gpiod_get_optional(imx335->dev, "reset",
- GPIOD_OUT_LOW);
+ GPIOD_OUT_HIGH);
if (IS_ERR(imx335->reset_gpio)) {
dev_err(imx335->dev, "failed to get reset gpio %ld\n",
PTR_ERR(imx335->reset_gpio));
@@ -1110,8 +1110,7 @@ static int imx335_power_on(struct device *dev)
usleep_range(500, 550); /* Tlow */
- /* Set XCLR */
- gpiod_set_value_cansleep(imx335->reset_gpio, 1);
+ gpiod_set_value_cansleep(imx335->reset_gpio, 0);
ret = clk_prepare_enable(imx335->inclk);
if (ret) {
@@ -1124,7 +1123,7 @@ static int imx335_power_on(struct device *dev)
return 0;
error_reset:
- gpiod_set_value_cansleep(imx335->reset_gpio, 0);
+ gpiod_set_value_cansleep(imx335->reset_gpio, 1);
regulator_bulk_disable(ARRAY_SIZE(imx335_supply_name), imx335->supplies);
return ret;
@@ -1141,7 +1140,7 @@ static int imx335_power_off(struct device *dev)
struct v4l2_subdev *sd = dev_get_drvdata(dev);
struct imx335 *imx335 = to_imx335(sd);
- gpiod_set_value_cansleep(imx335->reset_gpio, 0);
+ gpiod_set_value_cansleep(imx335->reset_gpio, 1);
clk_disable_unprepare(imx335->inclk);
regulator_bulk_disable(ARRAY_SIZE(imx335_supply_name), imx335->supplies);
diff --git a/drivers/media/i2c/imx355.c b/drivers/media/i2c/imx355.c
index 7e9c2f65fa08..0dd25eeea60b 100644
--- a/drivers/media/i2c/imx355.c
+++ b/drivers/media/i2c/imx355.c
@@ -1520,6 +1520,7 @@ static const struct v4l2_subdev_internal_ops imx355_internal_ops = {
static int imx355_init_controls(struct imx355 *imx355)
{
struct i2c_client *client = v4l2_get_subdevdata(&imx355->sd);
+ struct v4l2_fwnode_device_properties props;
struct v4l2_ctrl_handler *ctrl_hdlr;
s64 exposure_max;
s64 vblank_def;
@@ -1531,7 +1532,7 @@ static int imx355_init_controls(struct imx355 *imx355)
int ret;
ctrl_hdlr = &imx355->ctrl_handler;
- ret = v4l2_ctrl_handler_init(ctrl_hdlr, 10);
+ ret = v4l2_ctrl_handler_init(ctrl_hdlr, 12);
if (ret)
return ret;
@@ -1603,6 +1604,15 @@ static int imx355_init_controls(struct imx355 *imx355)
goto error;
}
+ ret = v4l2_fwnode_device_parse(&client->dev, &props);
+ if (ret)
+ goto error;
+
+ ret = v4l2_ctrl_new_fwnode_properties(ctrl_hdlr, &imx355_ctrl_ops,
+ &props);
+ if (ret)
+ goto error;
+
imx355->sd.ctrl_handler = ctrl_hdlr;
return 0;
diff --git a/drivers/media/i2c/isl7998x.c b/drivers/media/i2c/isl7998x.c
index c7089035bbc1..5ffd53e005ee 100644
--- a/drivers/media/i2c/isl7998x.c
+++ b/drivers/media/i2c/isl7998x.c
@@ -1561,8 +1561,8 @@ static const struct of_device_id isl7998x_of_match[] = {
MODULE_DEVICE_TABLE(of, isl7998x_of_match);
static const struct i2c_device_id isl7998x_id[] = {
- { "isl79987", 0 },
- { /* sentinel */ },
+ { "isl79987" },
+ { /* sentinel */ }
};
MODULE_DEVICE_TABLE(i2c, isl7998x_id);
diff --git a/drivers/media/i2c/ks0127.c b/drivers/media/i2c/ks0127.c
index 9d0a763cd503..f3fba9179684 100644
--- a/drivers/media/i2c/ks0127.c
+++ b/drivers/media/i2c/ks0127.c
@@ -677,9 +677,9 @@ static void ks0127_remove(struct i2c_client *client)
}
static const struct i2c_device_id ks0127_id[] = {
- { "ks0127", 0 },
- { "ks0127b", 0 },
- { "ks0122s", 0 },
+ { "ks0127" },
+ { "ks0127b" },
+ { "ks0122s" },
{ }
};
MODULE_DEVICE_TABLE(i2c, ks0127_id);
diff --git a/drivers/media/i2c/lm3560.c b/drivers/media/i2c/lm3560.c
index 05283ac68f2d..f4cc844f4e3c 100644
--- a/drivers/media/i2c/lm3560.c
+++ b/drivers/media/i2c/lm3560.c
@@ -455,8 +455,8 @@ static void lm3560_remove(struct i2c_client *client)
}
static const struct i2c_device_id lm3560_id_table[] = {
- {LM3559_NAME, 0},
- {LM3560_NAME, 0},
+ { LM3559_NAME },
+ { LM3560_NAME },
{}
};
diff --git a/drivers/media/i2c/lm3646.c b/drivers/media/i2c/lm3646.c
index fab3a7e05f92..2d16e42ec224 100644
--- a/drivers/media/i2c/lm3646.c
+++ b/drivers/media/i2c/lm3646.c
@@ -386,7 +386,7 @@ static void lm3646_remove(struct i2c_client *client)
}
static const struct i2c_device_id lm3646_id_table[] = {
- {LM3646_NAME, 0},
+ { LM3646_NAME },
{}
};
diff --git a/drivers/media/i2c/m52790.c b/drivers/media/i2c/m52790.c
index f8a69142aae9..9e1ecfd01e2a 100644
--- a/drivers/media/i2c/m52790.c
+++ b/drivers/media/i2c/m52790.c
@@ -163,7 +163,7 @@ static void m52790_remove(struct i2c_client *client)
/* ----------------------------------------------------------------------- */
static const struct i2c_device_id m52790_id[] = {
- { "m52790", 0 },
+ { "m52790" },
{ }
};
MODULE_DEVICE_TABLE(i2c, m52790_id);
diff --git a/drivers/media/i2c/max2175.c b/drivers/media/i2c/max2175.c
index cd73d2096ae4..bf02ca23a284 100644
--- a/drivers/media/i2c/max2175.c
+++ b/drivers/media/i2c/max2175.c
@@ -1413,8 +1413,8 @@ static void max2175_remove(struct i2c_client *client)
}
static const struct i2c_device_id max2175_id[] = {
- { DRIVER_NAME, 0},
- {},
+ { DRIVER_NAME },
+ {}
};
MODULE_DEVICE_TABLE(i2c, max2175_id);
diff --git a/drivers/media/i2c/max96714.c b/drivers/media/i2c/max96714.c
index c97de66631e0..159753b13777 100644
--- a/drivers/media/i2c/max96714.c
+++ b/drivers/media/i2c/max96714.c
@@ -25,6 +25,7 @@
#define MAX96714_NPORTS 2
#define MAX96714_PAD_SINK 0
#define MAX96714_PAD_SOURCE 1
+#define MAX96714_CSI_NLANES 4
/* DEV */
#define MAX96714_REG13 CCI_REG8(0x0d)
@@ -52,9 +53,9 @@
#define MAX96714_PATGEN_V2D CCI_REG24(0x254)
#define MAX96714_PATGEN_DE_HIGH CCI_REG16(0x257)
#define MAX96714_PATGEN_DE_LOW CCI_REG16(0x259)
-#define MAX96714_PATGEN_DE_CNT CCI_REG16(0x25B)
+#define MAX96714_PATGEN_DE_CNT CCI_REG16(0x25b)
#define MAX96714_PATGEN_GRAD_INC CCI_REG8(0x25d)
-#define MAX96714_PATGEN_CHKB_COLOR_A CCI_REG24(0x25E)
+#define MAX96714_PATGEN_CHKB_COLOR_A CCI_REG24(0x25e)
#define MAX96714_PATGEN_CHKB_COLOR_B CCI_REG24(0x261)
#define MAX96714_PATGEN_CHKB_RPT_CNT_A CCI_REG8(0x264)
#define MAX96714_PATGEN_CHKB_RPT_CNT_B CCI_REG8(0x265)
@@ -724,8 +725,9 @@ static int max96714_init_tx_port(struct max96714_priv *priv)
* Unused lanes need to be mapped as well to not have
* the same lanes mapped twice.
*/
- for (; lane < 4; lane++) {
- unsigned int idx = find_first_zero_bit(&lanes_used, 4);
+ for (; lane < MAX96714_CSI_NLANES; lane++) {
+ unsigned int idx = find_first_zero_bit(&lanes_used,
+ MAX96714_CSI_NLANES);
val |= idx << (lane * 2);
lanes_used |= BIT(idx);
@@ -757,9 +759,7 @@ static int max96714_rxport_disable_poc(struct max96714_priv *priv)
static int max96714_parse_dt_txport(struct max96714_priv *priv)
{
struct device *dev = &priv->client->dev;
- struct v4l2_fwnode_endpoint vep = {
- .bus_type = V4L2_MBUS_CSI2_DPHY
- };
+ struct v4l2_fwnode_endpoint vep = { .bus_type = V4L2_MBUS_CSI2_DPHY };
struct fwnode_handle *ep_fwnode;
u32 num_data_lanes;
int ret;
@@ -791,14 +791,14 @@ static int max96714_parse_dt_txport(struct max96714_priv *priv)
}
num_data_lanes = vep.bus.mipi_csi2.num_data_lanes;
- if (num_data_lanes < 1 || num_data_lanes > 4) {
+ if (num_data_lanes < 1 || num_data_lanes > MAX96714_CSI_NLANES) {
dev_err(dev,
"tx: invalid number of data lanes must be 1 to 4\n");
ret = -EINVAL;
goto err_free_vep;
}
- memcpy(&priv->mipi_csi2, &vep.bus.mipi_csi2, sizeof(priv->mipi_csi2));
+ priv->mipi_csi2 = vep.bus.mipi_csi2;
err_free_vep:
v4l2_fwnode_endpoint_free(&vep);
diff --git a/drivers/media/i2c/max96717.c b/drivers/media/i2c/max96717.c
index 949306485873..4e85b8eb1e77 100644
--- a/drivers/media/i2c/max96717.c
+++ b/drivers/media/i2c/max96717.c
@@ -16,6 +16,7 @@
#include <linux/regmap.h>
#include <media/v4l2-cci.h>
+#include <media/v4l2-ctrls.h>
#include <media/v4l2-fwnode.h>
#include <media/v4l2-subdev.h>
@@ -24,6 +25,7 @@
#define MAX96717_PORTS 2
#define MAX96717_PAD_SINK 0
#define MAX96717_PAD_SOURCE 1
+#define MAX96717_CSI_NLANES 4
#define MAX96717_DEFAULT_CLKOUT_RATE 24000000UL
@@ -38,9 +40,35 @@
#define MAX96717_DEV_REV_MASK GENMASK(3, 0)
/* VID_TX Z */
+#define MAX96717_VIDEO_TX0 CCI_REG8(0x110)
+#define MAX96717_VIDEO_AUTO_BPP BIT(3)
#define MAX96717_VIDEO_TX2 CCI_REG8(0x112)
#define MAX96717_VIDEO_PCLKDET BIT(7)
+/* VTX_Z */
+#define MAX96717_VTX0 CCI_REG8(0x24e)
+#define MAX96717_VTX1 CCI_REG8(0x24f)
+#define MAX96717_PATTERN_CLK_FREQ GENMASK(3, 1)
+#define MAX96717_VTX_VS_DLY CCI_REG24(0x250)
+#define MAX96717_VTX_VS_HIGH CCI_REG24(0x253)
+#define MAX96717_VTX_VS_LOW CCI_REG24(0x256)
+#define MAX96717_VTX_V2H CCI_REG24(0x259)
+#define MAX96717_VTX_HS_HIGH CCI_REG16(0x25c)
+#define MAX96717_VTX_HS_LOW CCI_REG16(0x25e)
+#define MAX96717_VTX_HS_CNT CCI_REG16(0x260)
+#define MAX96717_VTX_V2D CCI_REG24(0x262)
+#define MAX96717_VTX_DE_HIGH CCI_REG16(0x265)
+#define MAX96717_VTX_DE_LOW CCI_REG16(0x267)
+#define MAX96717_VTX_DE_CNT CCI_REG16(0x269)
+#define MAX96717_VTX29 CCI_REG8(0x26b)
+#define MAX96717_VTX_MODE GENMASK(1, 0)
+#define MAX96717_VTX_GRAD_INC CCI_REG8(0x26c)
+#define MAX96717_VTX_CHKB_COLOR_A CCI_REG24(0x26d)
+#define MAX96717_VTX_CHKB_COLOR_B CCI_REG24(0x270)
+#define MAX96717_VTX_CHKB_RPT_CNT_A CCI_REG8(0x273)
+#define MAX96717_VTX_CHKB_RPT_CNT_B CCI_REG8(0x274)
+#define MAX96717_VTX_CHKB_ALT CCI_REG8(0x275)
+
/* GPIO */
#define MAX96717_NUM_GPIO 11
#define MAX96717_GPIO_REG_A(gpio) CCI_REG8(0x2be + (gpio) * 3)
@@ -82,6 +110,12 @@
/* MISC */
#define PIO_SLEW_1 CCI_REG8(0x570)
+enum max96717_vpg_mode {
+ MAX96717_VPG_DISABLED = 0,
+ MAX96717_VPG_CHECKERBOARD = 1,
+ MAX96717_VPG_GRADIENT = 2,
+};
+
struct max96717_priv {
struct i2c_client *client;
struct regmap *regmap;
@@ -89,6 +123,7 @@ struct max96717_priv {
struct v4l2_mbus_config_mipi_csi2 mipi_csi2;
struct v4l2_subdev sd;
struct media_pad pads[MAX96717_PORTS];
+ struct v4l2_ctrl_handler ctrl_handler;
struct v4l2_async_notifier notifier;
struct v4l2_subdev *source_sd;
u16 source_sd_pad;
@@ -96,6 +131,7 @@ struct max96717_priv {
u8 pll_predef_index;
struct clk_hw clk_hw;
struct gpio_chip gpio_chip;
+ enum max96717_vpg_mode pattern;
};
static inline struct max96717_priv *sd_to_max96717(struct v4l2_subdev *sd)
@@ -131,6 +167,118 @@ static inline int max96717_start_csi(struct max96717_priv *priv, bool start)
start ? MAX96717_START_PORT_B : 0, NULL);
}
+static int max96717_apply_patgen_timing(struct max96717_priv *priv,
+ struct v4l2_subdev_state *state)
+{
+ struct v4l2_mbus_framefmt *fmt =
+ v4l2_subdev_state_get_format(state, MAX96717_PAD_SOURCE);
+ const u32 h_active = fmt->width;
+ const u32 h_fp = 88;
+ const u32 h_sw = 44;
+ const u32 h_bp = 148;
+ u32 h_tot;
+ const u32 v_active = fmt->height;
+ const u32 v_fp = 4;
+ const u32 v_sw = 5;
+ const u32 v_bp = 36;
+ u32 v_tot;
+ int ret = 0;
+
+ h_tot = h_active + h_fp + h_sw + h_bp;
+ v_tot = v_active + v_fp + v_sw + v_bp;
+
+ /* 75 Mhz pixel clock */
+ cci_update_bits(priv->regmap, MAX96717_VTX1,
+ MAX96717_PATTERN_CLK_FREQ, 0xa, &ret);
+
+ dev_info(&priv->client->dev, "height: %d width: %d\n", fmt->height,
+ fmt->width);
+
+ cci_write(priv->regmap, MAX96717_VTX_VS_DLY, 0, &ret);
+ cci_write(priv->regmap, MAX96717_VTX_VS_HIGH, v_sw * h_tot, &ret);
+ cci_write(priv->regmap, MAX96717_VTX_VS_LOW,
+ (v_active + v_fp + v_bp) * h_tot, &ret);
+ cci_write(priv->regmap, MAX96717_VTX_HS_HIGH, h_sw, &ret);
+ cci_write(priv->regmap, MAX96717_VTX_HS_LOW, h_active + h_fp + h_bp,
+ &ret);
+ cci_write(priv->regmap, MAX96717_VTX_V2D,
+ h_tot * (v_sw + v_bp) + (h_sw + h_bp), &ret);
+ cci_write(priv->regmap, MAX96717_VTX_HS_CNT, v_tot, &ret);
+ cci_write(priv->regmap, MAX96717_VTX_DE_HIGH, h_active, &ret);
+ cci_write(priv->regmap, MAX96717_VTX_DE_LOW, h_fp + h_sw + h_bp,
+ &ret);
+ cci_write(priv->regmap, MAX96717_VTX_DE_CNT, v_active, &ret);
+ /* B G R */
+ cci_write(priv->regmap, MAX96717_VTX_CHKB_COLOR_A, 0xfecc00, &ret);
+ /* B G R */
+ cci_write(priv->regmap, MAX96717_VTX_CHKB_COLOR_B, 0x006aa7, &ret);
+ cci_write(priv->regmap, MAX96717_VTX_CHKB_RPT_CNT_A, 0x3c, &ret);
+ cci_write(priv->regmap, MAX96717_VTX_CHKB_RPT_CNT_B, 0x3c, &ret);
+ cci_write(priv->regmap, MAX96717_VTX_CHKB_ALT, 0x3c, &ret);
+ cci_write(priv->regmap, MAX96717_VTX_GRAD_INC, 0x10, &ret);
+
+ return ret;
+}
+
+static int max96717_apply_patgen(struct max96717_priv *priv,
+ struct v4l2_subdev_state *state)
+{
+ unsigned int val;
+ int ret = 0;
+
+ if (priv->pattern)
+ ret = max96717_apply_patgen_timing(priv, state);
+
+ cci_write(priv->regmap, MAX96717_VTX0, priv->pattern ? 0xfb : 0,
+ &ret);
+
+ val = FIELD_PREP(MAX96717_VTX_MODE, priv->pattern);
+ cci_update_bits(priv->regmap, MAX96717_VTX29, MAX96717_VTX_MODE,
+ val, &ret);
+ return ret;
+}
+
+static int max96717_s_ctrl(struct v4l2_ctrl *ctrl)
+{
+ struct max96717_priv *priv =
+ container_of(ctrl->handler, struct max96717_priv, ctrl_handler);
+ int ret;
+
+ switch (ctrl->id) {
+ case V4L2_CID_TEST_PATTERN:
+ if (priv->enabled_source_streams)
+ return -EBUSY;
+ priv->pattern = ctrl->val;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ /* Use bpp from bpp register */
+ ret = cci_update_bits(priv->regmap, MAX96717_VIDEO_TX0,
+ MAX96717_VIDEO_AUTO_BPP,
+ priv->pattern ? 0 : MAX96717_VIDEO_AUTO_BPP,
+ NULL);
+
+ /*
+ * Pattern generator doesn't work with tunnel mode.
+ * Needs RGB color format and deserializer tunnel mode must be disabled.
+ */
+ return cci_update_bits(priv->regmap, MAX96717_MIPI_RX_EXT11,
+ MAX96717_TUN_MODE,
+ priv->pattern ? 0 : MAX96717_TUN_MODE, &ret);
+}
+
+static const char * const max96717_test_pattern[] = {
+ "Disabled",
+ "Checkerboard",
+ "Gradient"
+};
+
+static const struct v4l2_ctrl_ops max96717_ctrl_ops = {
+ .s_ctrl = max96717_s_ctrl,
+};
+
static int max96717_gpiochip_get(struct gpio_chip *gpiochip,
unsigned int offset)
{
@@ -348,24 +496,28 @@ static int max96717_enable_streams(struct v4l2_subdev *sd,
u64 streams_mask)
{
struct max96717_priv *priv = sd_to_max96717(sd);
- struct device *dev = &priv->client->dev;
u64 sink_streams;
int ret;
- sink_streams = v4l2_subdev_state_xlate_streams(state,
- MAX96717_PAD_SOURCE,
- MAX96717_PAD_SINK,
- &streams_mask);
-
if (!priv->enabled_source_streams)
max96717_start_csi(priv, true);
- ret = v4l2_subdev_enable_streams(priv->source_sd, priv->source_sd_pad,
- sink_streams);
- if (ret) {
- dev_err(dev, "Fail to start streams:%llu on remote subdev\n",
- sink_streams);
+ ret = max96717_apply_patgen(priv, state);
+ if (ret)
goto stop_csi;
+
+ if (!priv->pattern) {
+ sink_streams =
+ v4l2_subdev_state_xlate_streams(state,
+ MAX96717_PAD_SOURCE,
+ MAX96717_PAD_SINK,
+ &streams_mask);
+
+ ret = v4l2_subdev_enable_streams(priv->source_sd,
+ priv->source_sd_pad,
+ sink_streams);
+ if (ret)
+ goto stop_csi;
}
priv->enabled_source_streams |= streams_mask;
@@ -375,6 +527,7 @@ static int max96717_enable_streams(struct v4l2_subdev *sd,
stop_csi:
if (!priv->enabled_source_streams)
max96717_start_csi(priv, false);
+
return ret;
}
@@ -394,13 +547,23 @@ static int max96717_disable_streams(struct v4l2_subdev *sd,
if (!priv->enabled_source_streams)
max96717_start_csi(priv, false);
- sink_streams = v4l2_subdev_state_xlate_streams(state,
- MAX96717_PAD_SOURCE,
- MAX96717_PAD_SINK,
- &streams_mask);
+ if (!priv->pattern) {
+ int ret;
+
+ sink_streams =
+ v4l2_subdev_state_xlate_streams(state,
+ MAX96717_PAD_SOURCE,
+ MAX96717_PAD_SINK,
+ &streams_mask);
+
+ ret = v4l2_subdev_disable_streams(priv->source_sd,
+ priv->source_sd_pad,
+ sink_streams);
+ if (ret)
+ return ret;
+ }
- return v4l2_subdev_disable_streams(priv->source_sd, priv->source_sd_pad,
- sink_streams);
+ return 0;
}
static const struct v4l2_subdev_pad_ops max96717_pad_ops = {
@@ -513,6 +676,19 @@ static int max96717_subdev_init(struct max96717_priv *priv)
v4l2_i2c_subdev_init(&priv->sd, priv->client, &max96717_subdev_ops);
priv->sd.internal_ops = &max96717_internal_ops;
+ v4l2_ctrl_handler_init(&priv->ctrl_handler, 1);
+ priv->sd.ctrl_handler = &priv->ctrl_handler;
+
+ v4l2_ctrl_new_std_menu_items(&priv->ctrl_handler,
+ &max96717_ctrl_ops,
+ V4L2_CID_TEST_PATTERN,
+ ARRAY_SIZE(max96717_test_pattern) - 1,
+ 0, 0, max96717_test_pattern);
+ if (priv->ctrl_handler.error) {
+ ret = priv->ctrl_handler.error;
+ goto err_free_ctrl;
+ }
+
priv->sd.flags |= V4L2_SUBDEV_FL_HAS_DEVNODE | V4L2_SUBDEV_FL_STREAMS;
priv->sd.entity.function = MEDIA_ENT_F_VID_IF_BRIDGE;
priv->sd.entity.ops = &max96717_entity_ops;
@@ -552,6 +728,8 @@ err_free_state:
v4l2_subdev_cleanup(&priv->sd);
err_entity_cleanup:
media_entity_cleanup(&priv->sd.entity);
+err_free_ctrl:
+ v4l2_ctrl_handler_free(&priv->ctrl_handler);
return ret;
}
@@ -563,6 +741,7 @@ static void max96717_subdev_uninit(struct max96717_priv *priv)
v4l2_async_nf_cleanup(&priv->notifier);
v4l2_subdev_cleanup(&priv->sd);
media_entity_cleanup(&priv->sd.entity);
+ v4l2_ctrl_handler_free(&priv->ctrl_handler);
}
struct max96717_pll_predef_freq {
@@ -588,11 +767,8 @@ max96717_clk_recalc_rate(struct clk_hw *hw, unsigned long parent_rate)
static unsigned int max96717_clk_find_best_index(struct max96717_priv *priv,
unsigned long rate)
{
- unsigned int i, idx;
- unsigned long diff_new, diff_old;
-
- diff_old = U32_MAX;
- idx = 0;
+ unsigned int i, idx = 0;
+ unsigned long diff_new, diff_old = U32_MAX;
for (i = 0; i < ARRAY_SIZE(max96717_predef_freqs); i++) {
diff_new = abs(rate - max96717_predef_freqs[i].freq);
@@ -679,8 +855,7 @@ static int max96717_register_clkout(struct max96717_priv *priv)
struct clk_init_data init = { .ops = &max96717_clk_ops };
int ret;
- init.name = kasprintf(GFP_KERNEL, "max96717.%s.clk_out",
- dev_name(dev));
+ init.name = kasprintf(GFP_KERNEL, "max96717.%s.clk_out", dev_name(dev));
if (!init.name)
return -ENOMEM;
@@ -763,8 +938,9 @@ static int max96717_init_csi_lanes(struct max96717_priv *priv)
* Unused lanes need to be mapped as well to not have
* the same lanes mapped twice.
*/
- for (; lane < 4; lane++) {
- unsigned int idx = find_first_zero_bit(&lanes_used, 4);
+ for (; lane < MAX96717_CSI_NLANES; lane++) {
+ unsigned int idx = find_first_zero_bit(&lanes_used,
+ MAX96717_CSI_NLANES);
val |= idx << (lane * 2);
lanes_used |= BIT(idx);
@@ -818,9 +994,7 @@ static int max96717_hw_init(struct max96717_priv *priv)
static int max96717_parse_dt(struct max96717_priv *priv)
{
struct device *dev = &priv->client->dev;
- struct v4l2_fwnode_endpoint vep = {
- .bus_type = V4L2_MBUS_CSI2_DPHY
- };
+ struct v4l2_fwnode_endpoint vep = { .bus_type = V4L2_MBUS_CSI2_DPHY };
struct fwnode_handle *ep_fwnode;
unsigned char num_data_lanes;
int ret;
@@ -838,11 +1012,11 @@ static int max96717_parse_dt(struct max96717_priv *priv)
return dev_err_probe(dev, ret, "Failed to parse sink endpoint");
num_data_lanes = vep.bus.mipi_csi2.num_data_lanes;
- if (num_data_lanes < 1 || num_data_lanes > 4)
+ if (num_data_lanes < 1 || num_data_lanes > MAX96717_CSI_NLANES)
return dev_err_probe(dev, -EINVAL,
"Invalid data lanes must be 1 to 4\n");
- memcpy(&priv->mipi_csi2, &vep.bus.mipi_csi2, sizeof(priv->mipi_csi2));
+ priv->mipi_csi2 = vep.bus.mipi_csi2;
return 0;
}
diff --git a/drivers/media/i2c/ml86v7667.c b/drivers/media/i2c/ml86v7667.c
index 5b72d4434224..57ba3693649a 100644
--- a/drivers/media/i2c/ml86v7667.c
+++ b/drivers/media/i2c/ml86v7667.c
@@ -424,8 +424,8 @@ static void ml86v7667_remove(struct i2c_client *client)
}
static const struct i2c_device_id ml86v7667_id[] = {
- {DRV_NAME, 0},
- {},
+ { DRV_NAME },
+ {}
};
MODULE_DEVICE_TABLE(i2c, ml86v7667_id);
diff --git a/drivers/media/i2c/msp3400-driver.c b/drivers/media/i2c/msp3400-driver.c
index 599a5bc7cbb3..4c0b0ad68c08 100644
--- a/drivers/media/i2c/msp3400-driver.c
+++ b/drivers/media/i2c/msp3400-driver.c
@@ -874,7 +874,7 @@ static const struct dev_pm_ops msp3400_pm_ops = {
};
static const struct i2c_device_id msp_id[] = {
- { "msp3400", 0 },
+ { "msp3400" },
{ }
};
MODULE_DEVICE_TABLE(i2c, msp_id);
diff --git a/drivers/media/i2c/mt9m001.c b/drivers/media/i2c/mt9m001.c
index ad1a3ab77411..12d3e86bdc0f 100644
--- a/drivers/media/i2c/mt9m001.c
+++ b/drivers/media/i2c/mt9m001.c
@@ -854,7 +854,7 @@ static void mt9m001_remove(struct i2c_client *client)
}
static const struct i2c_device_id mt9m001_id[] = {
- { "mt9m001", 0 },
+ { "mt9m001" },
{ }
};
MODULE_DEVICE_TABLE(i2c, mt9m001_id);
diff --git a/drivers/media/i2c/mt9m111.c b/drivers/media/i2c/mt9m111.c
index ceeeb94c38d5..9aa5dcda3805 100644
--- a/drivers/media/i2c/mt9m111.c
+++ b/drivers/media/i2c/mt9m111.c
@@ -1383,7 +1383,7 @@ static const struct of_device_id mt9m111_of_match[] = {
MODULE_DEVICE_TABLE(of, mt9m111_of_match);
static const struct i2c_device_id mt9m111_id[] = {
- { "mt9m111", 0 },
+ { "mt9m111" },
{ }
};
MODULE_DEVICE_TABLE(i2c, mt9m111_id);
diff --git a/drivers/media/i2c/mt9p031.c b/drivers/media/i2c/mt9p031.c
index f4b481212356..d8735c246e52 100644
--- a/drivers/media/i2c/mt9p031.c
+++ b/drivers/media/i2c/mt9p031.c
@@ -15,6 +15,7 @@
#include <linux/gpio/consumer.h>
#include <linux/i2c.h>
#include <linux/log2.h>
+#include <linux/mod_devicetable.h>
#include <linux/module.h>
#include <linux/of.h>
#include <linux/of_graph.h>
@@ -112,11 +113,6 @@
#define MT9P031_TEST_PATTERN_RED 0xa2
#define MT9P031_TEST_PATTERN_BLUE 0xa3
-enum mt9p031_model {
- MT9P031_MODEL_COLOR,
- MT9P031_MODEL_MONOCHROME,
-};
-
struct mt9p031 {
struct v4l2_subdev subdev;
struct media_pad pad;
@@ -129,7 +125,7 @@ struct mt9p031 {
struct clk *clk;
struct regulator_bulk_data regulators[3];
- enum mt9p031_model model;
+ u32 code;
struct aptina_pll pll;
unsigned int clk_div;
bool use_pll;
@@ -712,12 +708,7 @@ static int mt9p031_init_state(struct v4l2_subdev *subdev,
crop->height = MT9P031_WINDOW_HEIGHT_DEF;
format = __mt9p031_get_pad_format(mt9p031, sd_state, 0, which);
-
- if (mt9p031->model == MT9P031_MODEL_MONOCHROME)
- format->code = MEDIA_BUS_FMT_Y12_1X12;
- else
- format->code = MEDIA_BUS_FMT_SGRBG12_1X12;
-
+ format->code = mt9p031->code;
format->width = MT9P031_WINDOW_WIDTH_DEF;
format->height = MT9P031_WINDOW_HEIGHT_DEF;
format->field = V4L2_FIELD_NONE;
@@ -1102,7 +1093,6 @@ done:
static int mt9p031_probe(struct i2c_client *client)
{
- const struct i2c_device_id *did = i2c_client_get_device_id(client);
struct mt9p031_platform_data *pdata = mt9p031_get_pdata(client);
struct i2c_adapter *adapter = client->adapter;
struct mt9p031 *mt9p031;
@@ -1127,7 +1117,7 @@ static int mt9p031_probe(struct i2c_client *client)
mt9p031->pdata = pdata;
mt9p031->output_control = MT9P031_OUTPUT_CONTROL_DEF;
mt9p031->mode2 = MT9P031_READ_MODE_2_ROW_BLC;
- mt9p031->model = did->driver_data;
+ mt9p031->code = (uintptr_t)i2c_get_match_data(client);
mt9p031->regulators[0].supply = "vdd";
mt9p031->regulators[1].supply = "vdd_io";
@@ -1224,26 +1214,24 @@ static void mt9p031_remove(struct i2c_client *client)
}
static const struct i2c_device_id mt9p031_id[] = {
- { "mt9p006", MT9P031_MODEL_COLOR },
- { "mt9p031", MT9P031_MODEL_COLOR },
- { "mt9p031m", MT9P031_MODEL_MONOCHROME },
- { }
+ { "mt9p006", MEDIA_BUS_FMT_SGRBG12_1X12 },
+ { "mt9p031", MEDIA_BUS_FMT_SGRBG12_1X12 },
+ { "mt9p031m", MEDIA_BUS_FMT_Y12_1X12 },
+ { /* sentinel */ }
};
MODULE_DEVICE_TABLE(i2c, mt9p031_id);
-#if IS_ENABLED(CONFIG_OF)
static const struct of_device_id mt9p031_of_match[] = {
- { .compatible = "aptina,mt9p006", },
- { .compatible = "aptina,mt9p031", },
- { .compatible = "aptina,mt9p031m", },
- { /* sentinel */ },
+ { .compatible = "aptina,mt9p006", .data = (void *)MEDIA_BUS_FMT_SGRBG12_1X12 },
+ { .compatible = "aptina,mt9p031", .data = (void *)MEDIA_BUS_FMT_SGRBG12_1X12 },
+ { .compatible = "aptina,mt9p031m", .data = (void *)MEDIA_BUS_FMT_Y12_1X12 },
+ { /* sentinel */ }
};
MODULE_DEVICE_TABLE(of, mt9p031_of_match);
-#endif
static struct i2c_driver mt9p031_i2c_driver = {
.driver = {
- .of_match_table = of_match_ptr(mt9p031_of_match),
+ .of_match_table = mt9p031_of_match,
.name = "mt9p031",
},
.probe = mt9p031_probe,
diff --git a/drivers/media/i2c/mt9t112.c b/drivers/media/i2c/mt9t112.c
index fb1588c57cc8..878dff9b7577 100644
--- a/drivers/media/i2c/mt9t112.c
+++ b/drivers/media/i2c/mt9t112.c
@@ -1109,7 +1109,7 @@ static void mt9t112_remove(struct i2c_client *client)
}
static const struct i2c_device_id mt9t112_id[] = {
- { "mt9t112", 0 },
+ { "mt9t112" },
{ }
};
MODULE_DEVICE_TABLE(i2c, mt9t112_id);
diff --git a/drivers/media/i2c/mt9v011.c b/drivers/media/i2c/mt9v011.c
index 8834ff8786e5..055b7915260a 100644
--- a/drivers/media/i2c/mt9v011.c
+++ b/drivers/media/i2c/mt9v011.c
@@ -582,7 +582,7 @@ static void mt9v011_remove(struct i2c_client *c)
/* ----------------------------------------------------------------------- */
static const struct i2c_device_id mt9v011_id[] = {
- { "mt9v011", 0 },
+ { "mt9v011" },
{ }
};
MODULE_DEVICE_TABLE(i2c, mt9v011_id);
diff --git a/drivers/media/i2c/mt9v111.c b/drivers/media/i2c/mt9v111.c
index b0b98ed3c150..723fe138e7bc 100644
--- a/drivers/media/i2c/mt9v111.c
+++ b/drivers/media/i2c/mt9v111.c
@@ -1263,8 +1263,9 @@ static void mt9v111_remove(struct i2c_client *client)
static const struct of_device_id mt9v111_of_match[] = {
{ .compatible = "aptina,mt9v111", },
- { /* sentinel */ },
+ { /* sentinel */ }
};
+MODULE_DEVICE_TABLE(of, mt9v111_of_match);
static struct i2c_driver mt9v111_driver = {
.driver = {
diff --git a/drivers/media/i2c/og01a1b.c b/drivers/media/i2c/og01a1b.c
index bac9597faf68..e906435fc49a 100644
--- a/drivers/media/i2c/og01a1b.c
+++ b/drivers/media/i2c/og01a1b.c
@@ -3,10 +3,13 @@
#include <asm/unaligned.h>
#include <linux/acpi.h>
+#include <linux/clk.h>
#include <linux/delay.h>
+#include <linux/gpio/consumer.h>
#include <linux/i2c.h>
#include <linux/module.h>
#include <linux/pm_runtime.h>
+#include <linux/regulator/consumer.h>
#include <media/v4l2-ctrls.h>
#include <media/v4l2-device.h>
#include <media/v4l2-fwnode.h>
@@ -418,6 +421,12 @@ static const struct og01a1b_mode supported_modes[] = {
};
struct og01a1b {
+ struct clk *xvclk;
+ struct gpio_desc *reset_gpio;
+ struct regulator *avdd;
+ struct regulator *dovdd;
+ struct regulator *dvdd;
+
struct v4l2_subdev sd;
struct media_pad pad;
struct v4l2_ctrl_handler ctrl_handler;
@@ -898,8 +907,10 @@ static int og01a1b_identify_module(struct og01a1b *og01a1b)
return 0;
}
-static int og01a1b_check_hwcfg(struct device *dev)
+static int og01a1b_check_hwcfg(struct og01a1b *og01a1b)
{
+ struct i2c_client *client = v4l2_get_subdevdata(&og01a1b->sd);
+ struct device *dev = &client->dev;
struct fwnode_handle *ep;
struct fwnode_handle *fwnode = dev_fwnode(dev);
struct v4l2_fwnode_endpoint bus_cfg = {
@@ -913,10 +924,13 @@ static int og01a1b_check_hwcfg(struct device *dev)
return -ENXIO;
ret = fwnode_property_read_u32(fwnode, "clock-frequency", &mclk);
-
if (ret) {
- dev_err(dev, "can't get clock frequency");
- return ret;
+ if (!og01a1b->xvclk) {
+ dev_err(dev, "can't get clock frequency");
+ return ret;
+ }
+
+ mclk = clk_get_rate(og01a1b->xvclk);
}
if (mclk != OG01A1B_MCLK) {
@@ -967,6 +981,83 @@ check_hwcfg_error:
return ret;
}
+/* Power/clock management functions */
+static int og01a1b_power_on(struct device *dev)
+{
+ unsigned long delay = DIV_ROUND_UP(8192UL * USEC_PER_SEC, OG01A1B_MCLK);
+ struct v4l2_subdev *sd = dev_get_drvdata(dev);
+ struct og01a1b *og01a1b = to_og01a1b(sd);
+ int ret;
+
+ if (og01a1b->avdd) {
+ ret = regulator_enable(og01a1b->avdd);
+ if (ret)
+ return ret;
+ }
+
+ if (og01a1b->dovdd) {
+ ret = regulator_enable(og01a1b->dovdd);
+ if (ret)
+ goto avdd_disable;
+ }
+
+ if (og01a1b->dvdd) {
+ ret = regulator_enable(og01a1b->dvdd);
+ if (ret)
+ goto dovdd_disable;
+ }
+
+ ret = clk_prepare_enable(og01a1b->xvclk);
+ if (ret)
+ goto dvdd_disable;
+
+ gpiod_set_value_cansleep(og01a1b->reset_gpio, 0);
+
+ if (og01a1b->reset_gpio)
+ usleep_range(5 * USEC_PER_MSEC, 6 * USEC_PER_MSEC);
+ else if (og01a1b->xvclk)
+ usleep_range(delay, 2 * delay);
+
+ return 0;
+
+dvdd_disable:
+ if (og01a1b->dvdd)
+ regulator_disable(og01a1b->dvdd);
+dovdd_disable:
+ if (og01a1b->dovdd)
+ regulator_disable(og01a1b->dovdd);
+avdd_disable:
+ if (og01a1b->avdd)
+ regulator_disable(og01a1b->avdd);
+
+ return ret;
+}
+
+static int og01a1b_power_off(struct device *dev)
+{
+ unsigned long delay = DIV_ROUND_UP(512 * USEC_PER_SEC, OG01A1B_MCLK);
+ struct v4l2_subdev *sd = dev_get_drvdata(dev);
+ struct og01a1b *og01a1b = to_og01a1b(sd);
+
+ if (og01a1b->xvclk)
+ usleep_range(delay, 2 * delay);
+
+ clk_disable_unprepare(og01a1b->xvclk);
+
+ gpiod_set_value_cansleep(og01a1b->reset_gpio, 1);
+
+ if (og01a1b->dvdd)
+ regulator_disable(og01a1b->dvdd);
+
+ if (og01a1b->dovdd)
+ regulator_disable(og01a1b->dovdd);
+
+ if (og01a1b->avdd)
+ regulator_disable(og01a1b->avdd);
+
+ return 0;
+}
+
static void og01a1b_remove(struct i2c_client *client)
{
struct v4l2_subdev *sd = i2c_get_clientdata(client);
@@ -984,22 +1075,78 @@ static int og01a1b_probe(struct i2c_client *client)
struct og01a1b *og01a1b;
int ret;
- ret = og01a1b_check_hwcfg(&client->dev);
+ og01a1b = devm_kzalloc(&client->dev, sizeof(*og01a1b), GFP_KERNEL);
+ if (!og01a1b)
+ return -ENOMEM;
+
+ v4l2_i2c_subdev_init(&og01a1b->sd, client, &og01a1b_subdev_ops);
+
+ og01a1b->xvclk = devm_clk_get_optional(&client->dev, NULL);
+ if (IS_ERR(og01a1b->xvclk)) {
+ ret = PTR_ERR(og01a1b->xvclk);
+ dev_err(&client->dev, "failed to get xvclk clock: %d\n", ret);
+ return ret;
+ }
+
+ ret = og01a1b_check_hwcfg(og01a1b);
if (ret) {
dev_err(&client->dev, "failed to check HW configuration: %d",
ret);
return ret;
}
- og01a1b = devm_kzalloc(&client->dev, sizeof(*og01a1b), GFP_KERNEL);
- if (!og01a1b)
- return -ENOMEM;
+ og01a1b->reset_gpio = devm_gpiod_get_optional(&client->dev, "reset",
+ GPIOD_OUT_LOW);
+ if (IS_ERR(og01a1b->reset_gpio)) {
+ dev_err(&client->dev, "cannot get reset GPIO\n");
+ return PTR_ERR(og01a1b->reset_gpio);
+ }
+
+ og01a1b->avdd = devm_regulator_get_optional(&client->dev, "avdd");
+ if (IS_ERR(og01a1b->avdd)) {
+ ret = PTR_ERR(og01a1b->avdd);
+ if (ret != -ENODEV) {
+ dev_err_probe(&client->dev, ret,
+ "Failed to get 'avdd' regulator\n");
+ return ret;
+ }
+
+ og01a1b->avdd = NULL;
+ }
+
+ og01a1b->dovdd = devm_regulator_get_optional(&client->dev, "dovdd");
+ if (IS_ERR(og01a1b->dovdd)) {
+ ret = PTR_ERR(og01a1b->dovdd);
+ if (ret != -ENODEV) {
+ dev_err_probe(&client->dev, ret,
+ "Failed to get 'dovdd' regulator\n");
+ return ret;
+ }
+
+ og01a1b->dovdd = NULL;
+ }
+
+ og01a1b->dvdd = devm_regulator_get_optional(&client->dev, "dvdd");
+ if (IS_ERR(og01a1b->dvdd)) {
+ ret = PTR_ERR(og01a1b->dvdd);
+ if (ret != -ENODEV) {
+ dev_err_probe(&client->dev, ret,
+ "Failed to get 'dvdd' regulator\n");
+ return ret;
+ }
+
+ og01a1b->dvdd = NULL;
+ }
+
+ /* The sensor must be powered on to read the CHIP_ID register */
+ ret = og01a1b_power_on(&client->dev);
+ if (ret)
+ return ret;
- v4l2_i2c_subdev_init(&og01a1b->sd, client, &og01a1b_subdev_ops);
ret = og01a1b_identify_module(og01a1b);
if (ret) {
dev_err(&client->dev, "failed to find sensor: %d", ret);
- return ret;
+ goto power_off;
}
mutex_init(&og01a1b->mutex);
@@ -1028,10 +1175,7 @@ static int og01a1b_probe(struct i2c_client *client)
goto probe_error_media_entity_cleanup;
}
- /*
- * Device is already turned on by i2c-core with ACPI domain PM.
- * Enable runtime PM and turn off the device.
- */
+ /* Enable runtime PM and turn off the device */
pm_runtime_set_active(&client->dev);
pm_runtime_enable(&client->dev);
pm_runtime_idle(&client->dev);
@@ -1045,9 +1189,16 @@ probe_error_v4l2_ctrl_handler_free:
v4l2_ctrl_handler_free(og01a1b->sd.ctrl_handler);
mutex_destroy(&og01a1b->mutex);
+power_off:
+ og01a1b_power_off(&client->dev);
+
return ret;
}
+static const struct dev_pm_ops og01a1b_pm_ops = {
+ SET_RUNTIME_PM_OPS(og01a1b_power_off, og01a1b_power_on, NULL)
+};
+
#ifdef CONFIG_ACPI
static const struct acpi_device_id og01a1b_acpi_ids[] = {
{"OVTI01AC"},
@@ -1057,10 +1208,18 @@ static const struct acpi_device_id og01a1b_acpi_ids[] = {
MODULE_DEVICE_TABLE(acpi, og01a1b_acpi_ids);
#endif
+static const struct of_device_id og01a1b_of_match[] = {
+ { .compatible = "ovti,og01a1b" },
+ { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(of, og01a1b_of_match);
+
static struct i2c_driver og01a1b_i2c_driver = {
.driver = {
.name = "og01a1b",
+ .pm = &og01a1b_pm_ops,
.acpi_match_table = ACPI_PTR(og01a1b_acpi_ids),
+ .of_match_table = og01a1b_of_match,
},
.probe = og01a1b_probe,
.remove = og01a1b_remove,
diff --git a/drivers/media/i2c/ov13858.c b/drivers/media/i2c/ov13858.c
index 09387e335d80..7a3fc1d28514 100644
--- a/drivers/media/i2c/ov13858.c
+++ b/drivers/media/i2c/ov13858.c
@@ -1740,8 +1740,8 @@ static void ov13858_remove(struct i2c_client *client)
}
static const struct i2c_device_id ov13858_id_table[] = {
- {"ov13858", 0},
- {},
+ { "ov13858" },
+ {}
};
MODULE_DEVICE_TABLE(i2c, ov13858_id_table);
diff --git a/drivers/media/i2c/ov2640.c b/drivers/media/i2c/ov2640.c
index 67c4bd2916e8..d27fc2df64e6 100644
--- a/drivers/media/i2c/ov2640.c
+++ b/drivers/media/i2c/ov2640.c
@@ -1271,7 +1271,7 @@ static void ov2640_remove(struct i2c_client *client)
}
static const struct i2c_device_id ov2640_id[] = {
- { "ov2640", 0 },
+ { "ov2640" },
{ }
};
MODULE_DEVICE_TABLE(i2c, ov2640_id);
diff --git a/drivers/media/i2c/ov2659.c b/drivers/media/i2c/ov2659.c
index d1653d7431d0..06b7896c3eaf 100644
--- a/drivers/media/i2c/ov2659.c
+++ b/drivers/media/i2c/ov2659.c
@@ -1551,8 +1551,8 @@ static const struct dev_pm_ops ov2659_pm_ops = {
};
static const struct i2c_device_id ov2659_id[] = {
- { "ov2659", 0 },
- { /* sentinel */ },
+ { "ov2659" },
+ { /* sentinel */ }
};
MODULE_DEVICE_TABLE(i2c, ov2659_id);
diff --git a/drivers/media/i2c/ov5640.c b/drivers/media/i2c/ov5640.c
index 5162d45fe73b..c1d3fce4a7d3 100644
--- a/drivers/media/i2c/ov5640.c
+++ b/drivers/media/i2c/ov5640.c
@@ -4003,8 +4003,8 @@ static const struct dev_pm_ops ov5640_pm_ops = {
};
static const struct i2c_device_id ov5640_id[] = {
- {"ov5640", 0},
- {},
+ { "ov5640" },
+ {}
};
MODULE_DEVICE_TABLE(i2c, ov5640_id);
diff --git a/drivers/media/i2c/ov5645.c b/drivers/media/i2c/ov5645.c
index 3b22b9e12787..0c32bd2940ec 100644
--- a/drivers/media/i2c/ov5645.c
+++ b/drivers/media/i2c/ov5645.c
@@ -635,7 +635,7 @@ static int ov5645_set_register_array(struct ov5645 *ov5645,
return 0;
}
-static int ov5645_set_power_off(struct device *dev)
+static void __ov5645_set_power_off(struct device *dev)
{
struct v4l2_subdev *sd = dev_get_drvdata(dev);
struct ov5645 *ov5645 = to_ov5645(sd);
@@ -643,8 +643,16 @@ static int ov5645_set_power_off(struct device *dev)
ov5645_write_reg(ov5645, OV5645_IO_MIPI_CTRL00, 0x58);
gpiod_set_value_cansleep(ov5645->rst_gpio, 1);
gpiod_set_value_cansleep(ov5645->enable_gpio, 0);
- clk_disable_unprepare(ov5645->xclk);
regulator_bulk_disable(OV5645_NUM_SUPPLIES, ov5645->supplies);
+}
+
+static int ov5645_set_power_off(struct device *dev)
+{
+ struct v4l2_subdev *sd = dev_get_drvdata(dev);
+ struct ov5645 *ov5645 = to_ov5645(sd);
+
+ __ov5645_set_power_off(dev);
+ clk_disable_unprepare(ov5645->xclk);
return 0;
}
@@ -686,7 +694,8 @@ static int ov5645_set_power_on(struct device *dev)
return 0;
exit:
- ov5645_set_power_off(dev);
+ __ov5645_set_power_off(dev);
+ clk_disable_unprepare(ov5645->xclk);
return ret;
}
@@ -1272,7 +1281,7 @@ static void ov5645_remove(struct i2c_client *client)
}
static const struct i2c_device_id ov5645_id[] = {
- { "ov5645", 0 },
+ { "ov5645" },
{}
};
MODULE_DEVICE_TABLE(i2c, ov5645_id);
diff --git a/drivers/media/i2c/ov5647.c b/drivers/media/i2c/ov5647.c
index 0fb4d7bff9d1..a727beb9d57e 100644
--- a/drivers/media/i2c/ov5647.c
+++ b/drivers/media/i2c/ov5647.c
@@ -1487,7 +1487,7 @@ static const struct dev_pm_ops ov5647_pm_ops = {
};
static const struct i2c_device_id ov5647_id[] = {
- { "ov5647", 0 },
+ { "ov5647" },
{ /* sentinel */ }
};
MODULE_DEVICE_TABLE(i2c, ov5647_id);
diff --git a/drivers/media/i2c/ov5675.c b/drivers/media/i2c/ov5675.c
index 3641911bc73f..5b5127f8953f 100644
--- a/drivers/media/i2c/ov5675.c
+++ b/drivers/media/i2c/ov5675.c
@@ -972,12 +972,10 @@ static int ov5675_set_stream(struct v4l2_subdev *sd, int enable)
static int ov5675_power_off(struct device *dev)
{
- /* 512 xvclk cycles after the last SCCB transation or MIPI frame end */
- u32 delay_us = DIV_ROUND_UP(512, OV5675_XVCLK_19_2 / 1000 / 1000);
struct v4l2_subdev *sd = dev_get_drvdata(dev);
struct ov5675 *ov5675 = to_ov5675(sd);
- usleep_range(delay_us, delay_us * 2);
+ usleep_range(90, 100);
clk_disable_unprepare(ov5675->xvclk);
gpiod_set_value_cansleep(ov5675->reset_gpio, 1);
@@ -988,7 +986,6 @@ static int ov5675_power_off(struct device *dev)
static int ov5675_power_on(struct device *dev)
{
- u32 delay_us = DIV_ROUND_UP(8192, OV5675_XVCLK_19_2 / 1000 / 1000);
struct v4l2_subdev *sd = dev_get_drvdata(dev);
struct ov5675 *ov5675 = to_ov5675(sd);
int ret;
@@ -1014,8 +1011,11 @@ static int ov5675_power_on(struct device *dev)
gpiod_set_value_cansleep(ov5675->reset_gpio, 0);
- /* 8192 xvclk cycles prior to the first SCCB transation */
- usleep_range(delay_us, delay_us * 2);
+ /* Worst case quiesence gap is 1.365 milliseconds @ 6MHz XVCLK
+ * Add an additional threshold grace period to ensure reset
+ * completion before initiating our first I2C transaction.
+ */
+ usleep_range(1500, 1600);
return 0;
}
diff --git a/drivers/media/i2c/ov6650.c b/drivers/media/i2c/ov6650.c
index b65befb22a79..9c7627161142 100644
--- a/drivers/media/i2c/ov6650.c
+++ b/drivers/media/i2c/ov6650.c
@@ -1128,7 +1128,7 @@ static void ov6650_remove(struct i2c_client *client)
}
static const struct i2c_device_id ov6650_id[] = {
- { "ov6650", 0 },
+ { "ov6650" },
{ }
};
MODULE_DEVICE_TABLE(i2c, ov6650_id);
diff --git a/drivers/media/i2c/ov7640.c b/drivers/media/i2c/ov7640.c
index 293f5f404358..9f68d89936eb 100644
--- a/drivers/media/i2c/ov7640.c
+++ b/drivers/media/i2c/ov7640.c
@@ -77,7 +77,7 @@ static void ov7640_remove(struct i2c_client *client)
}
static const struct i2c_device_id ov7640_id[] = {
- { "ov7640", 0 },
+ { "ov7640" },
{ }
};
MODULE_DEVICE_TABLE(i2c, ov7640_id);
diff --git a/drivers/media/i2c/ov772x.c b/drivers/media/i2c/ov772x.c
index 3e36a55274ef..3b0fdb3c70c0 100644
--- a/drivers/media/i2c/ov772x.c
+++ b/drivers/media/i2c/ov772x.c
@@ -1546,7 +1546,7 @@ static void ov772x_remove(struct i2c_client *client)
}
static const struct i2c_device_id ov772x_id[] = {
- { "ov772x", 0 },
+ { "ov772x" },
{ }
};
MODULE_DEVICE_TABLE(i2c, ov772x_id);
diff --git a/drivers/media/i2c/ov7740.c b/drivers/media/i2c/ov7740.c
index 47b1b14d8796..0830676e5d5a 100644
--- a/drivers/media/i2c/ov7740.c
+++ b/drivers/media/i2c/ov7740.c
@@ -1152,7 +1152,7 @@ static int __maybe_unused ov7740_runtime_resume(struct device *dev)
}
static const struct i2c_device_id ov7740_id[] = {
- { "ov7740", 0 },
+ { "ov7740" },
{ /* sentinel */ }
};
MODULE_DEVICE_TABLE(i2c, ov7740_id);
diff --git a/drivers/media/i2c/ov9640.c b/drivers/media/i2c/ov9640.c
index e9a52a8a9dc0..01dbc0ba89c8 100644
--- a/drivers/media/i2c/ov9640.c
+++ b/drivers/media/i2c/ov9640.c
@@ -751,7 +751,7 @@ static void ov9640_remove(struct i2c_client *client)
}
static const struct i2c_device_id ov9640_id[] = {
- { "ov9640", 0 },
+ { "ov9640" },
{ }
};
MODULE_DEVICE_TABLE(i2c, ov9640_id);
diff --git a/drivers/media/i2c/ov9650.c b/drivers/media/i2c/ov9650.c
index 66cd0e9ddc9a..56df97c9886b 100644
--- a/drivers/media/i2c/ov9650.c
+++ b/drivers/media/i2c/ov9650.c
@@ -1566,8 +1566,8 @@ static void ov965x_remove(struct i2c_client *client)
}
static const struct i2c_device_id ov965x_id[] = {
- { "OV9650", 0 },
- { "OV9652", 0 },
+ { "OV9650" },
+ { "OV9652" },
{ /* sentinel */ }
};
MODULE_DEVICE_TABLE(i2c, ov965x_id);
diff --git a/drivers/media/i2c/rj54n1cb0c.c b/drivers/media/i2c/rj54n1cb0c.c
index a59db10153cd..b7ca39f63dba 100644
--- a/drivers/media/i2c/rj54n1cb0c.c
+++ b/drivers/media/i2c/rj54n1cb0c.c
@@ -1410,7 +1410,7 @@ static void rj54n1_remove(struct i2c_client *client)
}
static const struct i2c_device_id rj54n1_id[] = {
- { "rj54n1cb0c", 0 },
+ { "rj54n1cb0c" },
{ }
};
MODULE_DEVICE_TABLE(i2c, rj54n1_id);
diff --git a/drivers/media/i2c/s5c73m3/s5c73m3-core.c b/drivers/media/i2c/s5c73m3/s5c73m3-core.c
index cf6be509af33..7716dfe2b8c9 100644
--- a/drivers/media/i2c/s5c73m3/s5c73m3-core.c
+++ b/drivers/media/i2c/s5c73m3/s5c73m3-core.c
@@ -1392,6 +1392,16 @@ err_reg_dis:
return ret;
}
+/*
+ * This function has been created just to avoid a smatch warning,
+ * please do not merge into __s5c73m3_power_off() until you have
+ * confirmed that it does not introduce a new warning.
+ */
+static void s5c73m3_enable_clk(struct s5c73m3 *state)
+{
+ clk_prepare_enable(state->clock);
+}
+
static int __s5c73m3_power_off(struct s5c73m3 *state)
{
int i, ret;
@@ -1421,7 +1431,8 @@ err:
state->supplies[i].supply, r);
}
- clk_prepare_enable(state->clock);
+ s5c73m3_enable_clk(state);
+
return ret;
}
@@ -1724,7 +1735,7 @@ static void s5c73m3_remove(struct i2c_client *client)
}
static const struct i2c_device_id s5c73m3_id[] = {
- { DRIVER_NAME, 0 },
+ { DRIVER_NAME },
{ }
};
MODULE_DEVICE_TABLE(i2c, s5c73m3_id);
diff --git a/drivers/media/i2c/s5k5baf.c b/drivers/media/i2c/s5k5baf.c
index 6b11039c3579..24f399cd2124 100644
--- a/drivers/media/i2c/s5k5baf.c
+++ b/drivers/media/i2c/s5k5baf.c
@@ -2018,8 +2018,8 @@ static void s5k5baf_remove(struct i2c_client *c)
}
static const struct i2c_device_id s5k5baf_id[] = {
- { S5K5BAF_DRIVER_NAME, 0 },
- { },
+ { S5K5BAF_DRIVER_NAME },
+ { }
};
MODULE_DEVICE_TABLE(i2c, s5k5baf_id);
diff --git a/drivers/media/i2c/saa6588.c b/drivers/media/i2c/saa6588.c
index dea9fc09356f..fb09e4560d8a 100644
--- a/drivers/media/i2c/saa6588.c
+++ b/drivers/media/i2c/saa6588.c
@@ -496,7 +496,7 @@ static void saa6588_remove(struct i2c_client *client)
/* ----------------------------------------------------------------------- */
static const struct i2c_device_id saa6588_id[] = {
- { "saa6588", 0 },
+ { "saa6588" },
{ }
};
MODULE_DEVICE_TABLE(i2c, saa6588_id);
diff --git a/drivers/media/i2c/saa6752hs.c b/drivers/media/i2c/saa6752hs.c
index 897eaa669b86..1ed8b5edb3fb 100644
--- a/drivers/media/i2c/saa6752hs.c
+++ b/drivers/media/i2c/saa6752hs.c
@@ -770,7 +770,7 @@ static void saa6752hs_remove(struct i2c_client *client)
}
static const struct i2c_device_id saa6752hs_id[] = {
- { "saa6752hs", 0 },
+ { "saa6752hs" },
{ }
};
MODULE_DEVICE_TABLE(i2c, saa6752hs_id);
diff --git a/drivers/media/i2c/saa7110.c b/drivers/media/i2c/saa7110.c
index 1520790338ce..942aeeb40c52 100644
--- a/drivers/media/i2c/saa7110.c
+++ b/drivers/media/i2c/saa7110.c
@@ -439,7 +439,7 @@ static void saa7110_remove(struct i2c_client *client)
/* ----------------------------------------------------------------------- */
static const struct i2c_device_id saa7110_id[] = {
- { "saa7110", 0 },
+ { "saa7110" },
{ }
};
MODULE_DEVICE_TABLE(i2c, saa7110_id);
diff --git a/drivers/media/i2c/saa717x.c b/drivers/media/i2c/saa717x.c
index 933ec0171430..b0793bb0c02a 100644
--- a/drivers/media/i2c/saa717x.c
+++ b/drivers/media/i2c/saa717x.c
@@ -1334,7 +1334,7 @@ static void saa717x_remove(struct i2c_client *client)
/* ----------------------------------------------------------------------- */
static const struct i2c_device_id saa717x_id[] = {
- { "saa717x", 0 },
+ { "saa717x" },
{ }
};
MODULE_DEVICE_TABLE(i2c, saa717x_id);
diff --git a/drivers/media/i2c/saa7185.c b/drivers/media/i2c/saa7185.c
index 5535d71f4860..c04e452a332b 100644
--- a/drivers/media/i2c/saa7185.c
+++ b/drivers/media/i2c/saa7185.c
@@ -334,7 +334,7 @@ static void saa7185_remove(struct i2c_client *client)
/* ----------------------------------------------------------------------- */
static const struct i2c_device_id saa7185_id[] = {
- { "saa7185", 0 },
+ { "saa7185" },
{ }
};
MODULE_DEVICE_TABLE(i2c, saa7185_id);
diff --git a/drivers/media/i2c/sony-btf-mpx.c b/drivers/media/i2c/sony-btf-mpx.c
index 0f53834f3ae4..16072a9f8247 100644
--- a/drivers/media/i2c/sony-btf-mpx.c
+++ b/drivers/media/i2c/sony-btf-mpx.c
@@ -366,7 +366,7 @@ static void sony_btf_mpx_remove(struct i2c_client *client)
/* ----------------------------------------------------------------------- */
static const struct i2c_device_id sony_btf_mpx_id[] = {
- { "sony-btf-mpx", 0 },
+ { "sony-btf-mpx" },
{ }
};
MODULE_DEVICE_TABLE(i2c, sony_btf_mpx_id);
diff --git a/drivers/media/i2c/tc358743.c b/drivers/media/i2c/tc358743.c
index 0307fee3cce9..65d58ddf0287 100644
--- a/drivers/media/i2c/tc358743.c
+++ b/drivers/media/i2c/tc358743.c
@@ -2197,7 +2197,7 @@ static void tc358743_remove(struct i2c_client *client)
}
static const struct i2c_device_id tc358743_id[] = {
- {"tc358743", 0},
+ { "tc358743" },
{}
};
diff --git a/drivers/media/i2c/tc358746.c b/drivers/media/i2c/tc358746.c
index edf79107adc5..389582420ba7 100644
--- a/drivers/media/i2c/tc358746.c
+++ b/drivers/media/i2c/tc358746.c
@@ -1616,6 +1616,16 @@ static void tc358746_remove(struct i2c_client *client)
pm_runtime_dont_use_autosuspend(sd->dev);
}
+/*
+ * This function has been created just to avoid a smatch warning,
+ * please do not merge it into tc358746_suspend until you have
+ * confirmed that it does not introduce a new warning.
+ */
+static void tc358746_clk_enable(struct tc358746 *tc358746)
+{
+ clk_prepare_enable(tc358746->refclk);
+}
+
static int tc358746_suspend(struct device *dev)
{
struct tc358746 *tc358746 = dev_get_drvdata(dev);
@@ -1626,7 +1636,7 @@ static int tc358746_suspend(struct device *dev)
err = regulator_bulk_disable(ARRAY_SIZE(tc358746_supplies),
tc358746->supplies);
if (err)
- clk_prepare_enable(tc358746->refclk);
+ tc358746_clk_enable(tc358746);
return err;
}
diff --git a/drivers/media/i2c/tda1997x.c b/drivers/media/i2c/tda1997x.c
index 58ce8fec3041..3b7e5ff5b010 100644
--- a/drivers/media/i2c/tda1997x.c
+++ b/drivers/media/i2c/tda1997x.c
@@ -2514,7 +2514,7 @@ static void tda1997x_codec_remove(struct snd_soc_component *component)
{
}
-static struct snd_soc_component_driver tda1997x_codec_driver = {
+static const struct snd_soc_component_driver tda1997x_codec_driver = {
.probe = tda1997x_codec_probe,
.remove = tda1997x_codec_remove,
.idle_bias_on = 1,
diff --git a/drivers/media/i2c/tda7432.c b/drivers/media/i2c/tda7432.c
index 6ecdc8e2e0c6..76ef0fdddf76 100644
--- a/drivers/media/i2c/tda7432.c
+++ b/drivers/media/i2c/tda7432.c
@@ -400,7 +400,7 @@ static void tda7432_remove(struct i2c_client *client)
}
static const struct i2c_device_id tda7432_id[] = {
- { "tda7432", 0 },
+ { "tda7432" },
{ }
};
MODULE_DEVICE_TABLE(i2c, tda7432_id);
diff --git a/drivers/media/i2c/tda9840.c b/drivers/media/i2c/tda9840.c
index 1911ef2126be..d61da811c9da 100644
--- a/drivers/media/i2c/tda9840.c
+++ b/drivers/media/i2c/tda9840.c
@@ -182,7 +182,7 @@ static void tda9840_remove(struct i2c_client *client)
}
static const struct i2c_device_id tda9840_id[] = {
- { "tda9840", 0 },
+ { "tda9840" },
{ }
};
MODULE_DEVICE_TABLE(i2c, tda9840_id);
diff --git a/drivers/media/i2c/tea6415c.c b/drivers/media/i2c/tea6415c.c
index 3ed6e441d515..4aaf66353610 100644
--- a/drivers/media/i2c/tea6415c.c
+++ b/drivers/media/i2c/tea6415c.c
@@ -141,7 +141,7 @@ static void tea6415c_remove(struct i2c_client *client)
}
static const struct i2c_device_id tea6415c_id[] = {
- { "tea6415c", 0 },
+ { "tea6415c" },
{ }
};
MODULE_DEVICE_TABLE(i2c, tea6415c_id);
diff --git a/drivers/media/i2c/tea6420.c b/drivers/media/i2c/tea6420.c
index 63f23784bb41..5c5ea3973251 100644
--- a/drivers/media/i2c/tea6420.c
+++ b/drivers/media/i2c/tea6420.c
@@ -123,7 +123,7 @@ static void tea6420_remove(struct i2c_client *client)
}
static const struct i2c_device_id tea6420_id[] = {
- { "tea6420", 0 },
+ { "tea6420" },
{ }
};
MODULE_DEVICE_TABLE(i2c, tea6420_id);
diff --git a/drivers/media/i2c/thp7312.c b/drivers/media/i2c/thp7312.c
index 19bd923a7315..75225ff5eff6 100644
--- a/drivers/media/i2c/thp7312.c
+++ b/drivers/media/i2c/thp7312.c
@@ -1503,7 +1503,7 @@ static int __thp7312_flash_reg_read(struct thp7312_device *thp7312,
msgs[0].addr = client->addr;
msgs[0].flags = 0;
- msgs[0].len = sizeof(thp7312_cmd_read_reg),
+ msgs[0].len = sizeof(thp7312_cmd_read_reg);
msgs[0].buf = (u8 *)thp7312_cmd_read_reg;
msgs[1].addr = client->addr;
diff --git a/drivers/media/i2c/ths7303.c b/drivers/media/i2c/ths7303.c
index 49ed83a0ac94..7526fabc7ee4 100644
--- a/drivers/media/i2c/ths7303.c
+++ b/drivers/media/i2c/ths7303.c
@@ -369,9 +369,9 @@ static void ths7303_remove(struct i2c_client *client)
}
static const struct i2c_device_id ths7303_id[] = {
- {"ths7303", 0},
- {"ths7353", 0},
- {},
+ { "ths7303" },
+ { "ths7353" },
+ {}
};
MODULE_DEVICE_TABLE(i2c, ths7303_id);
diff --git a/drivers/media/i2c/ths8200.c b/drivers/media/i2c/ths8200.c
index ce0a7f809f19..686f10641c7a 100644
--- a/drivers/media/i2c/ths8200.c
+++ b/drivers/media/i2c/ths8200.c
@@ -487,8 +487,8 @@ static void ths8200_remove(struct i2c_client *client)
}
static const struct i2c_device_id ths8200_id[] = {
- { "ths8200", 0 },
- {},
+ { "ths8200" },
+ {}
};
MODULE_DEVICE_TABLE(i2c, ths8200_id);
diff --git a/drivers/media/i2c/tlv320aic23b.c b/drivers/media/i2c/tlv320aic23b.c
index d800ff8af1ff..b7b31b6192af 100644
--- a/drivers/media/i2c/tlv320aic23b.c
+++ b/drivers/media/i2c/tlv320aic23b.c
@@ -188,7 +188,7 @@ static void tlv320aic23b_remove(struct i2c_client *client)
/* ----------------------------------------------------------------------- */
static const struct i2c_device_id tlv320aic23b_id[] = {
- { "tlv320aic23b", 0 },
+ { "tlv320aic23b" },
{ }
};
MODULE_DEVICE_TABLE(i2c, tlv320aic23b_id);
diff --git a/drivers/media/i2c/tvaudio.c b/drivers/media/i2c/tvaudio.c
index ba20f35cafd5..654725dfafac 100644
--- a/drivers/media/i2c/tvaudio.c
+++ b/drivers/media/i2c/tvaudio.c
@@ -2086,7 +2086,7 @@ static void tvaudio_remove(struct i2c_client *client)
detect which device is present. So rather than listing all supported
devices here, we pretend to support a single, fake device type. */
static const struct i2c_device_id tvaudio_id[] = {
- { "tvaudio", 0 },
+ { "tvaudio" },
{ }
};
MODULE_DEVICE_TABLE(i2c, tvaudio_id);
diff --git a/drivers/media/i2c/tvp5150.c b/drivers/media/i2c/tvp5150.c
index 64b91aa3c82a..e3675c744d9e 100644
--- a/drivers/media/i2c/tvp5150.c
+++ b/drivers/media/i2c/tvp5150.c
@@ -514,7 +514,7 @@ struct i2c_vbi_ram_value {
* and so on. There are 16 possible locations from 0 to 15.
*/
-static struct i2c_vbi_ram_value vbi_ram_default[] = {
+static const struct i2c_vbi_ram_value vbi_ram_default[] = {
/*
* FIXME: Current api doesn't handle all VBI types, those not
@@ -1812,7 +1812,7 @@ static const struct regmap_access_table tvp5150_readable_table = {
.n_yes_ranges = ARRAY_SIZE(tvp5150_readable_ranges),
};
-static struct regmap_config tvp5150_config = {
+static const struct regmap_config tvp5150_config = {
.reg_bits = 8,
.val_bits = 8,
.max_register = 0xff,
@@ -2265,7 +2265,7 @@ static const struct dev_pm_ops tvp5150_pm_ops = {
};
static const struct i2c_device_id tvp5150_id[] = {
- { "tvp5150", 0 },
+ { "tvp5150" },
{ }
};
MODULE_DEVICE_TABLE(i2c, tvp5150_id);
diff --git a/drivers/media/i2c/tvp7002.c b/drivers/media/i2c/tvp7002.c
index ea01bd86450e..c09a5bd71fd0 100644
--- a/drivers/media/i2c/tvp7002.c
+++ b/drivers/media/i2c/tvp7002.c
@@ -1070,7 +1070,7 @@ static void tvp7002_remove(struct i2c_client *c)
/* I2C Device ID table */
static const struct i2c_device_id tvp7002_id[] = {
- { "tvp7002", 0 },
+ { "tvp7002" },
{ }
};
MODULE_DEVICE_TABLE(i2c, tvp7002_id);
diff --git a/drivers/media/i2c/tw2804.c b/drivers/media/i2c/tw2804.c
index 6a2521e3a25c..3d154f4fb5f9 100644
--- a/drivers/media/i2c/tw2804.c
+++ b/drivers/media/i2c/tw2804.c
@@ -414,7 +414,7 @@ static void tw2804_remove(struct i2c_client *client)
}
static const struct i2c_device_id tw2804_id[] = {
- { "tw2804", 0 },
+ { "tw2804" },
{ }
};
MODULE_DEVICE_TABLE(i2c, tw2804_id);
diff --git a/drivers/media/i2c/tw9900.c b/drivers/media/i2c/tw9900.c
index bc7623ec46e5..53efdeaed1db 100644
--- a/drivers/media/i2c/tw9900.c
+++ b/drivers/media/i2c/tw9900.c
@@ -753,7 +753,7 @@ static const struct dev_pm_ops tw9900_pm_ops = {
};
static const struct i2c_device_id tw9900_id[] = {
- { "tw9900", 0 },
+ { "tw9900" },
{ }
};
MODULE_DEVICE_TABLE(i2c, tw9900_id);
diff --git a/drivers/media/i2c/tw9903.c b/drivers/media/i2c/tw9903.c
index 996be3960af3..b996a05e56f2 100644
--- a/drivers/media/i2c/tw9903.c
+++ b/drivers/media/i2c/tw9903.c
@@ -245,7 +245,7 @@ static void tw9903_remove(struct i2c_client *client)
/* ----------------------------------------------------------------------- */
static const struct i2c_device_id tw9903_id[] = {
- { "tw9903", 0 },
+ { "tw9903" },
{ }
};
MODULE_DEVICE_TABLE(i2c, tw9903_id);
diff --git a/drivers/media/i2c/tw9906.c b/drivers/media/i2c/tw9906.c
index 25c625f6d6e4..6220f4fddbab 100644
--- a/drivers/media/i2c/tw9906.c
+++ b/drivers/media/i2c/tw9906.c
@@ -213,7 +213,7 @@ static void tw9906_remove(struct i2c_client *client)
/* ----------------------------------------------------------------------- */
static const struct i2c_device_id tw9906_id[] = {
- { "tw9906", 0 },
+ { "tw9906" },
{ }
};
MODULE_DEVICE_TABLE(i2c, tw9906_id);
diff --git a/drivers/media/i2c/tw9910.c b/drivers/media/i2c/tw9910.c
index 6dffaaa9ed56..f3e400304e04 100644
--- a/drivers/media/i2c/tw9910.c
+++ b/drivers/media/i2c/tw9910.c
@@ -996,7 +996,7 @@ static void tw9910_remove(struct i2c_client *client)
}
static const struct i2c_device_id tw9910_id[] = {
- { "tw9910", 0 },
+ { "tw9910" },
{ }
};
MODULE_DEVICE_TABLE(i2c, tw9910_id);
diff --git a/drivers/media/i2c/uda1342.c b/drivers/media/i2c/uda1342.c
index abd052a44bd7..2e4540ee2df2 100644
--- a/drivers/media/i2c/uda1342.c
+++ b/drivers/media/i2c/uda1342.c
@@ -79,7 +79,7 @@ static void uda1342_remove(struct i2c_client *client)
}
static const struct i2c_device_id uda1342_id[] = {
- { "uda1342", 0 },
+ { "uda1342" },
{ }
};
MODULE_DEVICE_TABLE(i2c, uda1342_id);
diff --git a/drivers/media/i2c/upd64031a.c b/drivers/media/i2c/upd64031a.c
index 54c2ba0ba375..9d0b72a213be 100644
--- a/drivers/media/i2c/upd64031a.c
+++ b/drivers/media/i2c/upd64031a.c
@@ -219,7 +219,7 @@ static void upd64031a_remove(struct i2c_client *client)
/* ----------------------------------------------------------------------- */
static const struct i2c_device_id upd64031a_id[] = {
- { "upd64031a", 0 },
+ { "upd64031a" },
{ }
};
MODULE_DEVICE_TABLE(i2c, upd64031a_id);
diff --git a/drivers/media/i2c/upd64083.c b/drivers/media/i2c/upd64083.c
index 2a820589a4cb..2e99ed5da42c 100644
--- a/drivers/media/i2c/upd64083.c
+++ b/drivers/media/i2c/upd64083.c
@@ -190,7 +190,7 @@ static void upd64083_remove(struct i2c_client *client)
/* ----------------------------------------------------------------------- */
static const struct i2c_device_id upd64083_id[] = {
- { "upd64083", 0 },
+ { "upd64083" },
{ }
};
MODULE_DEVICE_TABLE(i2c, upd64083_id);
diff --git a/drivers/media/i2c/vp27smpx.c b/drivers/media/i2c/vp27smpx.c
index 0ba3c2b68037..06fd46a63c72 100644
--- a/drivers/media/i2c/vp27smpx.c
+++ b/drivers/media/i2c/vp27smpx.c
@@ -172,7 +172,7 @@ static void vp27smpx_remove(struct i2c_client *client)
/* ----------------------------------------------------------------------- */
static const struct i2c_device_id vp27smpx_id[] = {
- { "vp27smpx", 0 },
+ { "vp27smpx" },
{ }
};
MODULE_DEVICE_TABLE(i2c, vp27smpx_id);
diff --git a/drivers/media/i2c/vpx3220.c b/drivers/media/i2c/vpx3220.c
index 1eaae886f217..5f1a22284168 100644
--- a/drivers/media/i2c/vpx3220.c
+++ b/drivers/media/i2c/vpx3220.c
@@ -535,9 +535,9 @@ static void vpx3220_remove(struct i2c_client *client)
}
static const struct i2c_device_id vpx3220_id[] = {
- { "vpx3220a", 0 },
- { "vpx3216b", 0 },
- { "vpx3214c", 0 },
+ { "vpx3220a" },
+ { "vpx3216b" },
+ { "vpx3214c" },
{ }
};
MODULE_DEVICE_TABLE(i2c, vpx3220_id);
diff --git a/drivers/media/i2c/wm8739.c b/drivers/media/i2c/wm8739.c
index 19bf7a00dff9..c091b78a5b41 100644
--- a/drivers/media/i2c/wm8739.c
+++ b/drivers/media/i2c/wm8739.c
@@ -243,7 +243,7 @@ static void wm8739_remove(struct i2c_client *client)
}
static const struct i2c_device_id wm8739_id[] = {
- { "wm8739", 0 },
+ { "wm8739" },
{ }
};
MODULE_DEVICE_TABLE(i2c, wm8739_id);
diff --git a/drivers/media/i2c/wm8775.c b/drivers/media/i2c/wm8775.c
index d1b716fd6f11..619b2988577c 100644
--- a/drivers/media/i2c/wm8775.c
+++ b/drivers/media/i2c/wm8775.c
@@ -289,7 +289,7 @@ static void wm8775_remove(struct i2c_client *client)
}
static const struct i2c_device_id wm8775_id[] = {
- { "wm8775", 0 },
+ { "wm8775" },
{ }
};
MODULE_DEVICE_TABLE(i2c, wm8775_id);
diff --git a/drivers/media/mc/mc-devnode.c b/drivers/media/mc/mc-devnode.c
index 318e267e798e..56444edaf136 100644
--- a/drivers/media/mc/mc-devnode.c
+++ b/drivers/media/mc/mc-devnode.c
@@ -204,7 +204,6 @@ static const struct file_operations media_devnode_fops = {
#endif /* CONFIG_COMPAT */
.release = media_release,
.poll = media_poll,
- .llseek = no_llseek,
};
int __must_check media_devnode_register(struct media_device *mdev,
diff --git a/drivers/media/mc/mc-request.c b/drivers/media/mc/mc-request.c
index addb8f2d8939..e064914c476e 100644
--- a/drivers/media/mc/mc-request.c
+++ b/drivers/media/mc/mc-request.c
@@ -254,12 +254,12 @@ media_request_get_by_fd(struct media_device *mdev, int request_fd)
return ERR_PTR(-EBADR);
f = fdget(request_fd);
- if (!f.file)
+ if (!fd_file(f))
goto err_no_req_fd;
- if (f.file->f_op != &request_fops)
+ if (fd_file(f)->f_op != &request_fops)
goto err_fput;
- req = f.file->private_data;
+ req = fd_file(f)->private_data;
if (req->mdev != mdev)
goto err_fput;
diff --git a/drivers/media/pci/intel/ipu6/Kconfig b/drivers/media/pci/intel/ipu6/Kconfig
index 40e20f0aa5ae..49e4fb696573 100644
--- a/drivers/media/pci/intel/ipu6/Kconfig
+++ b/drivers/media/pci/intel/ipu6/Kconfig
@@ -4,8 +4,13 @@ config VIDEO_INTEL_IPU6
depends on VIDEO_DEV
depends on X86 && X86_64 && HAS_DMA
depends on IPU_BRIDGE || !IPU_BRIDGE
+ #
+ # This driver incorrectly tries to override the dma_ops. It should
+ # never have done that, but for now keep it working on architectures
+ # that use dma ops
+ #
+ depends on ARCH_HAS_DMA_OPS
select AUXILIARY_BUS
- select DMA_OPS
select IOMMU_IOVA
select VIDEO_V4L2_SUBDEV_API
select MEDIA_CONTROLLER
diff --git a/drivers/media/pci/intel/ipu6/ipu6.c b/drivers/media/pci/intel/ipu6/ipu6.c
index bbd646378ab3..7fb707d35309 100644
--- a/drivers/media/pci/intel/ipu6/ipu6.c
+++ b/drivers/media/pci/intel/ipu6/ipu6.c
@@ -390,20 +390,18 @@ ipu6_isys_init(struct pci_dev *pdev, struct device *parent,
isys_adev = ipu6_bus_initialize_device(pdev, parent, pdata, ctrl,
IPU6_ISYS_NAME);
if (IS_ERR(isys_adev)) {
- dev_err_probe(dev, PTR_ERR(isys_adev),
- "ipu6_bus_initialize_device isys failed\n");
kfree(pdata);
- return ERR_CAST(isys_adev);
+ return dev_err_cast_probe(dev, isys_adev,
+ "ipu6_bus_initialize_device isys failed\n");
}
isys_adev->mmu = ipu6_mmu_init(dev, base, ISYS_MMID,
&ipdata->hw_variant);
if (IS_ERR(isys_adev->mmu)) {
- dev_err_probe(dev, PTR_ERR(isys_adev->mmu),
- "ipu6_mmu_init(isys_adev->mmu) failed\n");
put_device(&isys_adev->auxdev.dev);
kfree(pdata);
- return ERR_CAST(isys_adev->mmu);
+ return dev_err_cast_probe(dev, isys_adev->mmu,
+ "ipu6_mmu_init(isys_adev->mmu) failed\n");
}
isys_adev->mmu->dev = &isys_adev->auxdev.dev;
@@ -436,20 +434,18 @@ ipu6_psys_init(struct pci_dev *pdev, struct device *parent,
psys_adev = ipu6_bus_initialize_device(pdev, parent, pdata, ctrl,
IPU6_PSYS_NAME);
if (IS_ERR(psys_adev)) {
- dev_err_probe(&pdev->dev, PTR_ERR(psys_adev),
- "ipu6_bus_initialize_device psys failed\n");
kfree(pdata);
- return ERR_CAST(psys_adev);
+ return dev_err_cast_probe(&pdev->dev, psys_adev,
+ "ipu6_bus_initialize_device psys failed\n");
}
psys_adev->mmu = ipu6_mmu_init(&pdev->dev, base, PSYS_MMID,
&ipdata->hw_variant);
if (IS_ERR(psys_adev->mmu)) {
- dev_err_probe(&pdev->dev, PTR_ERR(psys_adev->mmu),
- "ipu6_mmu_init(psys_adev->mmu) failed\n");
put_device(&psys_adev->auxdev.dev);
kfree(pdata);
- return ERR_CAST(psys_adev->mmu);
+ return dev_err_cast_probe(&pdev->dev, psys_adev->mmu,
+ "ipu6_mmu_init(psys_adev->mmu) failed\n");
}
psys_adev->mmu->dev = &psys_adev->auxdev.dev;
@@ -576,9 +572,7 @@ static int ipu6_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
if (ret)
return dev_err_probe(dev, ret, "Failed to set DMA mask\n");
- ret = dma_set_max_seg_size(dev, UINT_MAX);
- if (ret)
- return dev_err_probe(dev, ret, "Failed to set max_seg_size\n");
+ dma_set_max_seg_size(dev, UINT_MAX);
ret = ipu6_pci_config_setup(pdev, isp->hw_ver);
if (ret)
diff --git a/drivers/media/pci/mgb4/mgb4_core.c b/drivers/media/pci/mgb4/mgb4_core.c
index ab4f07e2e560..2819bbdab484 100644
--- a/drivers/media/pci/mgb4/mgb4_core.c
+++ b/drivers/media/pci/mgb4/mgb4_core.c
@@ -302,7 +302,7 @@ static int init_i2c(struct mgb4_dev *mgbdev)
/* create dummy clock required by the xiic-i2c adapter */
snprintf(clk_name, sizeof(clk_name), "xiic-i2c.%d", id);
mgbdev->i2c_clk = clk_hw_register_fixed_rate(NULL, clk_name, NULL,
- 0, 125000000);
+ 0, MGB4_HW_FREQ);
if (IS_ERR(mgbdev->i2c_clk)) {
dev_err(dev, "failed to register I2C clock\n");
return PTR_ERR(mgbdev->i2c_clk);
diff --git a/drivers/media/pci/mgb4/mgb4_core.h b/drivers/media/pci/mgb4/mgb4_core.h
index 2a946e46aec1..b52cd67270b5 100644
--- a/drivers/media/pci/mgb4/mgb4_core.h
+++ b/drivers/media/pci/mgb4/mgb4_core.h
@@ -13,6 +13,8 @@
#include <linux/dmaengine.h>
#include "mgb4_regs.h"
+#define MGB4_HW_FREQ 125000000
+
#define MGB4_VIN_DEVICES 2
#define MGB4_VOUT_DEVICES 2
diff --git a/drivers/media/pci/mgb4/mgb4_io.h b/drivers/media/pci/mgb4/mgb4_io.h
index 8698db1be4a9..dd8696d7df31 100644
--- a/drivers/media/pci/mgb4/mgb4_io.h
+++ b/drivers/media/pci/mgb4/mgb4_io.h
@@ -7,11 +7,9 @@
#ifndef __MGB4_IO_H__
#define __MGB4_IO_H__
+#include <linux/math64.h>
#include <media/v4l2-dev.h>
-
-#define MGB4_DEFAULT_WIDTH 1280
-#define MGB4_DEFAULT_HEIGHT 640
-#define MGB4_DEFAULT_PERIOD (125000000 / 60)
+#include "mgb4_core.h"
/* Register access error indication */
#define MGB4_ERR_NO_REG 0xFFFFFFFE
@@ -20,6 +18,9 @@
#define MGB4_ERR_QUEUE_EMPTY 0xFFFFFFFC
#define MGB4_ERR_QUEUE_FULL 0xFFFFFFFB
+#define MGB4_PERIOD(numerator, denominator) \
+ ((u32)div_u64((MGB4_HW_FREQ * (u64)(numerator)), (denominator)))
+
struct mgb4_frame_buffer {
struct vb2_v4l2_buffer vb;
struct list_head list;
@@ -30,4 +31,24 @@ static inline struct mgb4_frame_buffer *to_frame_buffer(struct vb2_v4l2_buffer *
return container_of(vbuf, struct mgb4_frame_buffer, vb);
}
+static inline bool has_yuv_and_timeperframe(struct mgb4_regs *video)
+{
+ u32 status = mgb4_read_reg(video, 0xD0);
+
+ return (status & (1U << 8));
+}
+
+#define has_yuv(video) has_yuv_and_timeperframe(video)
+#define has_timeperframe(video) has_yuv_and_timeperframe(video)
+
+static inline u32 pixel_size(struct v4l2_dv_timings *timings)
+{
+ struct v4l2_bt_timings *bt = &timings->bt;
+
+ u32 height = bt->height + bt->vfrontporch + bt->vsync + bt->vbackporch;
+ u32 width = bt->width + bt->hfrontporch + bt->hsync + bt->hbackporch;
+
+ return width * height;
+}
+
#endif
diff --git a/drivers/media/pci/mgb4/mgb4_sysfs_out.c b/drivers/media/pci/mgb4/mgb4_sysfs_out.c
index 9f6e81c57726..573aa61c69d4 100644
--- a/drivers/media/pci/mgb4/mgb4_sysfs_out.c
+++ b/drivers/media/pci/mgb4/mgb4_sysfs_out.c
@@ -229,9 +229,9 @@ static ssize_t frame_rate_show(struct device *dev,
struct video_device *vdev = to_video_device(dev);
struct mgb4_vout_dev *voutdev = video_get_drvdata(vdev);
u32 period = mgb4_read_reg(&voutdev->mgbdev->video,
- voutdev->config->regs.frame_period);
+ voutdev->config->regs.frame_limit);
- return sprintf(buf, "%u\n", 125000000 / period);
+ return sprintf(buf, "%u\n", period ? MGB4_HW_FREQ / period : 0);
}
/*
@@ -245,14 +245,15 @@ static ssize_t frame_rate_store(struct device *dev,
struct video_device *vdev = to_video_device(dev);
struct mgb4_vout_dev *voutdev = video_get_drvdata(vdev);
unsigned long val;
- int ret;
+ int limit, ret;
ret = kstrtoul(buf, 10, &val);
if (ret)
return ret;
+ limit = val ? MGB4_HW_FREQ / val : 0;
mgb4_write_reg(&voutdev->mgbdev->video,
- voutdev->config->regs.frame_period, 125000000 / val);
+ voutdev->config->regs.frame_limit, limit);
return count;
}
diff --git a/drivers/media/pci/mgb4/mgb4_vin.c b/drivers/media/pci/mgb4/mgb4_vin.c
index 2cd78c539889..e9332abb3172 100644
--- a/drivers/media/pci/mgb4/mgb4_vin.c
+++ b/drivers/media/pci/mgb4/mgb4_vin.c
@@ -18,6 +18,7 @@
#include <linux/workqueue.h>
#include <linux/align.h>
#include <linux/dma/amd_xdma.h>
+#include <linux/v4l2-dv-timings.h>
#include <media/v4l2-ioctl.h>
#include <media/videobuf2-v4l2.h>
#include <media/videobuf2-dma-sg.h>
@@ -34,8 +35,8 @@ ATTRIBUTE_GROUPS(mgb4_fpdl3_in);
ATTRIBUTE_GROUPS(mgb4_gmsl_in);
static const struct mgb4_vin_config vin_cfg[] = {
- {0, 0, 0, 6, {0x10, 0x00, 0x04, 0x08, 0x1C, 0x14, 0x18, 0x20, 0x24, 0x28}},
- {1, 1, 1, 7, {0x40, 0x30, 0x34, 0x38, 0x4C, 0x44, 0x48, 0x50, 0x54, 0x58}}
+ {0, 0, 0, 6, {0x10, 0x00, 0x04, 0x08, 0x1C, 0x14, 0x18, 0x20, 0x24, 0x28, 0xE8}},
+ {1, 1, 1, 7, {0x40, 0x30, 0x34, 0x38, 0x4C, 0x44, 0x48, 0x50, 0x54, 0x58, 0xEC}}
};
static const struct i2c_board_info fpdl3_deser_info[] = {
@@ -76,6 +77,9 @@ static const struct v4l2_dv_timings_cap video_timings_cap = {
},
};
+/* Dummy timings when no signal present */
+static const struct v4l2_dv_timings cea1080p60 = V4L2_DV_BT_CEA_1920X1080P60;
+
/*
* Returns the video output connected with the given video input if the input
* is in loopback mode.
@@ -186,8 +190,11 @@ static int queue_setup(struct vb2_queue *q, unsigned int *nbuffers,
struct device *alloc_devs[])
{
struct mgb4_vin_dev *vindev = vb2_get_drv_priv(q);
+ struct mgb4_regs *video = &vindev->mgbdev->video;
+ u32 config = mgb4_read_reg(video, vindev->config->regs.config);
+ u32 pixelsize = (config & (1U << 16)) ? 2 : 4;
unsigned int size = (vindev->timings.bt.width + vindev->padding)
- * vindev->timings.bt.height * 4;
+ * vindev->timings.bt.height * pixelsize;
/*
* If I/O reconfiguration is in process, do not allow to start
@@ -220,9 +227,12 @@ static int buffer_init(struct vb2_buffer *vb)
static int buffer_prepare(struct vb2_buffer *vb)
{
struct mgb4_vin_dev *vindev = vb2_get_drv_priv(vb->vb2_queue);
+ struct mgb4_regs *video = &vindev->mgbdev->video;
struct device *dev = &vindev->mgbdev->pdev->dev;
+ u32 config = mgb4_read_reg(video, vindev->config->regs.config);
+ u32 pixelsize = (config & (1U << 16)) ? 2 : 4;
unsigned int size = (vindev->timings.bt.width + vindev->padding)
- * vindev->timings.bt.height * 4;
+ * vindev->timings.bt.height * pixelsize;
if (vb2_plane_size(vb, 0) < size) {
dev_err(dev, "buffer too small (%lu < %u)\n",
@@ -312,7 +322,8 @@ static int fh_open(struct file *file)
if (!v4l2_fh_is_singular_file(file))
goto out;
- get_timings(vindev, &vindev->timings);
+ if (get_timings(vindev, &vindev->timings) < 0)
+ vindev->timings = cea1080p60;
set_loopback_padding(vindev, vindev->padding);
out:
@@ -359,33 +370,42 @@ static int vidioc_querycap(struct file *file, void *priv,
static int vidioc_enum_fmt(struct file *file, void *priv,
struct v4l2_fmtdesc *f)
{
- if (f->index != 0)
- return -EINVAL;
-
- f->pixelformat = V4L2_PIX_FMT_ABGR32;
+ struct mgb4_vin_dev *vindev = video_drvdata(file);
+ struct mgb4_regs *video = &vindev->mgbdev->video;
- return 0;
+ if (f->index == 0) {
+ f->pixelformat = V4L2_PIX_FMT_ABGR32;
+ return 0;
+ } else if (f->index == 1 && has_yuv(video)) {
+ f->pixelformat = V4L2_PIX_FMT_YUYV;
+ return 0;
+ } else {
+ return -EINVAL;
+ }
}
static int vidioc_enum_frameintervals(struct file *file, void *priv,
struct v4l2_frmivalenum *ival)
{
struct mgb4_vin_dev *vindev = video_drvdata(file);
+ struct mgb4_regs *video = &vindev->mgbdev->video;
if (ival->index != 0)
return -EINVAL;
- if (ival->pixel_format != V4L2_PIX_FMT_ABGR32)
+ if (!(ival->pixel_format == V4L2_PIX_FMT_ABGR32 ||
+ ((has_yuv(video) && ival->pixel_format == V4L2_PIX_FMT_YUYV))))
return -EINVAL;
if (ival->width != vindev->timings.bt.width ||
ival->height != vindev->timings.bt.height)
return -EINVAL;
- ival->type = V4L2_FRMIVAL_TYPE_CONTINUOUS;
- ival->stepwise.min.denominator = 60;
- ival->stepwise.min.numerator = 1;
- ival->stepwise.max.denominator = 1;
- ival->stepwise.max.numerator = 1;
- ival->stepwise.step = ival->stepwise.max;
+ ival->type = V4L2_FRMIVAL_TYPE_STEPWISE;
+ ival->stepwise.max.denominator = MGB4_HW_FREQ;
+ ival->stepwise.max.numerator = 0xFFFFFFFF;
+ ival->stepwise.min.denominator = vindev->timings.bt.pixelclock;
+ ival->stepwise.min.numerator = pixel_size(&vindev->timings);
+ ival->stepwise.step.denominator = MGB4_HW_FREQ;
+ ival->stepwise.step.numerator = 1;
return 0;
}
@@ -393,13 +413,29 @@ static int vidioc_enum_frameintervals(struct file *file, void *priv,
static int vidioc_g_fmt(struct file *file, void *priv, struct v4l2_format *f)
{
struct mgb4_vin_dev *vindev = video_drvdata(file);
+ struct mgb4_regs *video = &vindev->mgbdev->video;
+ u32 config = mgb4_read_reg(video, vindev->config->regs.config);
- f->fmt.pix.pixelformat = V4L2_PIX_FMT_ABGR32;
f->fmt.pix.width = vindev->timings.bt.width;
f->fmt.pix.height = vindev->timings.bt.height;
f->fmt.pix.field = V4L2_FIELD_NONE;
- f->fmt.pix.colorspace = V4L2_COLORSPACE_RAW;
- f->fmt.pix.bytesperline = (f->fmt.pix.width + vindev->padding) * 4;
+
+ if (config & (1U << 16)) {
+ f->fmt.pix.pixelformat = V4L2_PIX_FMT_YUYV;
+ if (config & (1U << 20)) {
+ f->fmt.pix.colorspace = V4L2_COLORSPACE_REC709;
+ } else {
+ if (config & (1U << 19))
+ f->fmt.pix.colorspace = V4L2_COLORSPACE_SMPTE170M;
+ else
+ f->fmt.pix.colorspace = V4L2_COLORSPACE_SRGB;
+ }
+ f->fmt.pix.bytesperline = (f->fmt.pix.width + vindev->padding) * 2;
+ } else {
+ f->fmt.pix.pixelformat = V4L2_PIX_FMT_ABGR32;
+ f->fmt.pix.colorspace = V4L2_COLORSPACE_RAW;
+ f->fmt.pix.bytesperline = (f->fmt.pix.width + vindev->padding) * 4;
+ }
f->fmt.pix.sizeimage = f->fmt.pix.bytesperline * f->fmt.pix.height;
return 0;
@@ -408,14 +444,30 @@ static int vidioc_g_fmt(struct file *file, void *priv, struct v4l2_format *f)
static int vidioc_try_fmt(struct file *file, void *priv, struct v4l2_format *f)
{
struct mgb4_vin_dev *vindev = video_drvdata(file);
+ struct mgb4_regs *video = &vindev->mgbdev->video;
+ u32 pixelsize;
- f->fmt.pix.pixelformat = V4L2_PIX_FMT_ABGR32;
f->fmt.pix.width = vindev->timings.bt.width;
f->fmt.pix.height = vindev->timings.bt.height;
f->fmt.pix.field = V4L2_FIELD_NONE;
- f->fmt.pix.colorspace = V4L2_COLORSPACE_RAW;
- f->fmt.pix.bytesperline = max(f->fmt.pix.width * 4,
- ALIGN_DOWN(f->fmt.pix.bytesperline, 4));
+
+ if (has_yuv(video) && f->fmt.pix.pixelformat == V4L2_PIX_FMT_YUYV) {
+ pixelsize = 2;
+ if (!(f->fmt.pix.colorspace == V4L2_COLORSPACE_REC709 ||
+ f->fmt.pix.colorspace == V4L2_COLORSPACE_SMPTE170M))
+ f->fmt.pix.colorspace = V4L2_COLORSPACE_SRGB;
+ } else {
+ pixelsize = 4;
+ f->fmt.pix.pixelformat = V4L2_PIX_FMT_ABGR32;
+ f->fmt.pix.colorspace = V4L2_COLORSPACE_RAW;
+ }
+
+ if (f->fmt.pix.bytesperline > f->fmt.pix.width * pixelsize &&
+ f->fmt.pix.bytesperline < f->fmt.pix.width * pixelsize * 2)
+ f->fmt.pix.bytesperline = ALIGN(f->fmt.pix.bytesperline,
+ pixelsize);
+ else
+ f->fmt.pix.bytesperline = f->fmt.pix.width * pixelsize;
f->fmt.pix.sizeimage = f->fmt.pix.bytesperline * f->fmt.pix.height;
return 0;
@@ -425,13 +477,36 @@ static int vidioc_s_fmt(struct file *file, void *priv, struct v4l2_format *f)
{
struct mgb4_vin_dev *vindev = video_drvdata(file);
struct mgb4_regs *video = &vindev->mgbdev->video;
+ u32 config, pixelsize;
if (vb2_is_busy(&vindev->queue))
return -EBUSY;
vidioc_try_fmt(file, priv, f);
- vindev->padding = (f->fmt.pix.bytesperline - (f->fmt.pix.width * 4)) / 4;
+ config = mgb4_read_reg(video, vindev->config->regs.config);
+ if (f->fmt.pix.pixelformat == V4L2_PIX_FMT_YUYV) {
+ pixelsize = 2;
+ config |= 1U << 16;
+
+ if (f->fmt.pix.colorspace == V4L2_COLORSPACE_REC709) {
+ config |= 1U << 20;
+ config |= 1U << 19;
+ } else if (f->fmt.pix.colorspace == V4L2_COLORSPACE_SMPTE170M) {
+ config &= ~(1U << 20);
+ config |= 1U << 19;
+ } else {
+ config &= ~(1U << 20);
+ config &= ~(1U << 19);
+ }
+ } else {
+ pixelsize = 4;
+ config &= ~(1U << 16);
+ }
+ mgb4_write_reg(video, vindev->config->regs.config, config);
+
+ vindev->padding = (f->fmt.pix.bytesperline - (f->fmt.pix.width
+ * pixelsize)) / pixelsize;
mgb4_write_reg(video, vindev->config->regs.padding, vindev->padding);
set_loopback_padding(vindev, vindev->padding);
@@ -467,7 +542,8 @@ static int vidioc_enum_framesizes(struct file *file, void *fh,
{
struct mgb4_vin_dev *vindev = video_drvdata(file);
- if (fsize->index != 0 || fsize->pixel_format != V4L2_PIX_FMT_ABGR32)
+ if (fsize->index != 0 || !(fsize->pixel_format == V4L2_PIX_FMT_ABGR32 ||
+ fsize->pixel_format == V4L2_PIX_FMT_YUYV))
return -EINVAL;
fsize->discrete.width = vindev->timings.bt.width;
@@ -488,27 +564,56 @@ static int vidioc_g_input(struct file *file, void *priv, unsigned int *i)
return 0;
}
-static int vidioc_parm(struct file *file, void *priv,
- struct v4l2_streamparm *parm)
+static int vidioc_g_parm(struct file *file, void *priv,
+ struct v4l2_streamparm *parm)
{
struct mgb4_vin_dev *vindev = video_drvdata(file);
struct mgb4_regs *video = &vindev->mgbdev->video;
- const struct mgb4_vin_regs *regs = &vindev->config->regs;
- struct v4l2_fract timeperframe = {
- .numerator = mgb4_read_reg(video, regs->frame_period),
- .denominator = 125000000,
- };
-
- if (parm->type != V4L2_BUF_TYPE_VIDEO_CAPTURE)
- return -EINVAL;
+ struct v4l2_fract *tpf = &parm->parm.output.timeperframe;
+ u32 timer;
parm->parm.capture.readbuffers = 2;
- parm->parm.capture.capability = V4L2_CAP_TIMEPERFRAME;
- parm->parm.capture.timeperframe = timeperframe;
+
+ if (has_timeperframe(video)) {
+ timer = mgb4_read_reg(video, vindev->config->regs.timer);
+ if (timer < 0xFFFF) {
+ tpf->numerator = pixel_size(&vindev->timings);
+ tpf->denominator = vindev->timings.bt.pixelclock;
+ } else {
+ tpf->numerator = timer;
+ tpf->denominator = MGB4_HW_FREQ;
+ }
+
+ parm->parm.output.capability = V4L2_CAP_TIMEPERFRAME;
+ }
return 0;
}
+static int vidioc_s_parm(struct file *file, void *priv,
+ struct v4l2_streamparm *parm)
+{
+ struct mgb4_vin_dev *vindev = video_drvdata(file);
+ struct mgb4_regs *video = &vindev->mgbdev->video;
+ struct v4l2_fract *tpf = &parm->parm.output.timeperframe;
+ u32 period, timer;
+
+ if (has_timeperframe(video)) {
+ timer = tpf->denominator ?
+ MGB4_PERIOD(tpf->numerator, tpf->denominator) : 0;
+ if (timer) {
+ period = MGB4_PERIOD(pixel_size(&vindev->timings),
+ vindev->timings.bt.pixelclock);
+ if (timer < period)
+ timer = 0;
+ }
+
+ mgb4_write_reg(video, vindev->config->regs.timer, timer);
+ }
+
+ return vidioc_g_parm(file, priv, parm);
+}
+
static int vidioc_s_dv_timings(struct file *file, void *fh,
struct v4l2_dv_timings *timings)
{
@@ -592,8 +697,8 @@ static const struct v4l2_ioctl_ops video_ioctl_ops = {
.vidioc_expbuf = vb2_ioctl_expbuf,
.vidioc_streamon = vb2_ioctl_streamon,
.vidioc_streamoff = vb2_ioctl_streamoff,
- .vidioc_g_parm = vidioc_parm,
- .vidioc_s_parm = vidioc_parm,
+ .vidioc_g_parm = vidioc_g_parm,
+ .vidioc_s_parm = vidioc_s_parm,
.vidioc_dv_timings_cap = vidioc_dv_timings_cap,
.vidioc_enum_dv_timings = vidioc_enum_dv_timings,
.vidioc_g_dv_timings = vidioc_g_dv_timings,
@@ -776,10 +881,16 @@ static void debugfs_init(struct mgb4_vin_dev *vindev)
vindev->regs[7].offset = vindev->config->regs.signal2;
vindev->regs[8].name = "PADDING_PIXELS";
vindev->regs[8].offset = vindev->config->regs.padding;
+ if (has_timeperframe(video)) {
+ vindev->regs[9].name = "TIMER";
+ vindev->regs[9].offset = vindev->config->regs.timer;
+ vindev->regset.nregs = 10;
+ } else {
+ vindev->regset.nregs = 9;
+ }
vindev->regset.base = video->membase;
vindev->regset.regs = vindev->regs;
- vindev->regset.nregs = ARRAY_SIZE(vindev->regs);
debugfs_create_regset32("registers", 0444, vindev->debugfs,
&vindev->regset);
diff --git a/drivers/media/pci/mgb4/mgb4_vin.h b/drivers/media/pci/mgb4/mgb4_vin.h
index 0249b400ad4d..9693bd0ce180 100644
--- a/drivers/media/pci/mgb4/mgb4_vin.h
+++ b/drivers/media/pci/mgb4/mgb4_vin.h
@@ -25,6 +25,7 @@ struct mgb4_vin_regs {
u32 signal;
u32 signal2;
u32 padding;
+ u32 timer;
};
struct mgb4_vin_config {
@@ -59,7 +60,7 @@ struct mgb4_vin_dev {
#ifdef CONFIG_DEBUG_FS
struct dentry *debugfs;
struct debugfs_regset32 regset;
- struct debugfs_reg32 regs[9];
+ struct debugfs_reg32 regs[sizeof(struct mgb4_vin_regs) / 4];
#endif
};
diff --git a/drivers/media/pci/mgb4/mgb4_vout.c b/drivers/media/pci/mgb4/mgb4_vout.c
index 241353ee77a5..998edcbd9723 100644
--- a/drivers/media/pci/mgb4/mgb4_vout.c
+++ b/drivers/media/pci/mgb4/mgb4_vout.c
@@ -16,6 +16,7 @@
#include <media/v4l2-ioctl.h>
#include <media/videobuf2-v4l2.h>
#include <media/videobuf2-dma-sg.h>
+#include <media/v4l2-dv-timings.h>
#include "mgb4_core.h"
#include "mgb4_dma.h"
#include "mgb4_sysfs.h"
@@ -23,12 +24,16 @@
#include "mgb4_cmt.h"
#include "mgb4_vout.h"
+#define DEFAULT_WIDTH 1280
+#define DEFAULT_HEIGHT 640
+#define DEFAULT_PERIOD (MGB4_HW_FREQ / 60)
+
ATTRIBUTE_GROUPS(mgb4_fpdl3_out);
ATTRIBUTE_GROUPS(mgb4_gmsl_out);
static const struct mgb4_vout_config vout_cfg[] = {
- {0, 0, 8, {0x78, 0x60, 0x64, 0x68, 0x74, 0x6C, 0x70, 0x7c}},
- {1, 1, 9, {0x98, 0x80, 0x84, 0x88, 0x94, 0x8c, 0x90, 0x9c}}
+ {0, 0, 8, {0x78, 0x60, 0x64, 0x68, 0x74, 0x6C, 0x70, 0x7C, 0xE0}},
+ {1, 1, 9, {0x98, 0x80, 0x84, 0x88, 0x94, 0x8C, 0x90, 0x9C, 0xE4}}
};
static const struct i2c_board_info fpdl3_ser_info[] = {
@@ -40,6 +45,49 @@ static const struct mgb4_i2c_kv fpdl3_i2c[] = {
{0x05, 0xFF, 0x04}, {0x06, 0xFF, 0x01}, {0xC2, 0xFF, 0x80}
};
+static const struct v4l2_dv_timings_cap video_timings_cap = {
+ .type = V4L2_DV_BT_656_1120,
+ .bt = {
+ .min_width = 320,
+ .max_width = 4096,
+ .min_height = 240,
+ .max_height = 2160,
+ .min_pixelclock = 1843200, /* 320 x 240 x 24Hz */
+ .max_pixelclock = 530841600, /* 4096 x 2160 x 60Hz */
+ .standards = V4L2_DV_BT_STD_CEA861 | V4L2_DV_BT_STD_DMT |
+ V4L2_DV_BT_STD_CVT | V4L2_DV_BT_STD_GTF,
+ .capabilities = V4L2_DV_BT_CAP_PROGRESSIVE |
+ V4L2_DV_BT_CAP_CUSTOM,
+ },
+};
+
+static void get_timings(struct mgb4_vout_dev *voutdev,
+ struct v4l2_dv_timings *timings)
+{
+ struct mgb4_regs *video = &voutdev->mgbdev->video;
+ const struct mgb4_vout_regs *regs = &voutdev->config->regs;
+
+ u32 hsync = mgb4_read_reg(video, regs->hsync);
+ u32 vsync = mgb4_read_reg(video, regs->vsync);
+ u32 resolution = mgb4_read_reg(video, regs->resolution);
+
+ memset(timings, 0, sizeof(*timings));
+ timings->type = V4L2_DV_BT_656_1120;
+ timings->bt.width = resolution >> 16;
+ timings->bt.height = resolution & 0xFFFF;
+ if (hsync & (1U << 31))
+ timings->bt.polarities |= V4L2_DV_HSYNC_POS_POL;
+ if (vsync & (1U << 31))
+ timings->bt.polarities |= V4L2_DV_VSYNC_POS_POL;
+ timings->bt.pixelclock = voutdev->freq * 1000;
+ timings->bt.hsync = (hsync & 0x00FF0000) >> 16;
+ timings->bt.vsync = (vsync & 0x00FF0000) >> 16;
+ timings->bt.hbackporch = (hsync & 0x0000FF00) >> 8;
+ timings->bt.hfrontporch = hsync & 0x000000FF;
+ timings->bt.vbackporch = (vsync & 0x0000FF00) >> 8;
+ timings->bt.vfrontporch = vsync & 0x000000FF;
+}
+
static void return_all_buffers(struct mgb4_vout_dev *voutdev,
enum vb2_buffer_state state)
{
@@ -59,7 +107,11 @@ static int queue_setup(struct vb2_queue *q, unsigned int *nbuffers,
struct device *alloc_devs[])
{
struct mgb4_vout_dev *voutdev = vb2_get_drv_priv(q);
- unsigned int size;
+ struct mgb4_regs *video = &voutdev->mgbdev->video;
+ u32 config = mgb4_read_reg(video, voutdev->config->regs.config);
+ u32 pixelsize = (config & (1U << 16)) ? 2 : 4;
+ unsigned int size = (voutdev->width + voutdev->padding) * voutdev->height
+ * pixelsize;
/*
* If I/O reconfiguration is in process, do not allow to start
@@ -69,8 +121,6 @@ static int queue_setup(struct vb2_queue *q, unsigned int *nbuffers,
if (test_bit(0, &voutdev->mgbdev->io_reconfig))
return -EBUSY;
- size = (voutdev->width + voutdev->padding) * voutdev->height * 4;
-
if (*nplanes)
return sizes[0] < size ? -EINVAL : 0;
*nplanes = 1;
@@ -93,9 +143,11 @@ static int buffer_prepare(struct vb2_buffer *vb)
{
struct mgb4_vout_dev *voutdev = vb2_get_drv_priv(vb->vb2_queue);
struct device *dev = &voutdev->mgbdev->pdev->dev;
- unsigned int size;
-
- size = (voutdev->width + voutdev->padding) * voutdev->height * 4;
+ struct mgb4_regs *video = &voutdev->mgbdev->video;
+ u32 config = mgb4_read_reg(video, voutdev->config->regs.config);
+ u32 pixelsize = (config & (1U << 16)) ? 2 : 4;
+ unsigned int size = (voutdev->width + voutdev->padding) * voutdev->height
+ * pixelsize;
if (vb2_plane_size(vb, 0) < size) {
dev_err(dev, "buffer too small (%lu < %u)\n",
@@ -194,24 +246,47 @@ static int vidioc_querycap(struct file *file, void *priv,
static int vidioc_enum_fmt(struct file *file, void *priv,
struct v4l2_fmtdesc *f)
{
- if (f->index != 0)
- return -EINVAL;
-
- f->pixelformat = V4L2_PIX_FMT_ABGR32;
+ struct mgb4_vin_dev *voutdev = video_drvdata(file);
+ struct mgb4_regs *video = &voutdev->mgbdev->video;
- return 0;
+ if (f->index == 0) {
+ f->pixelformat = V4L2_PIX_FMT_ABGR32;
+ return 0;
+ } else if (f->index == 1 && has_yuv(video)) {
+ f->pixelformat = V4L2_PIX_FMT_YUYV;
+ return 0;
+ } else {
+ return -EINVAL;
+ }
}
static int vidioc_g_fmt(struct file *file, void *priv, struct v4l2_format *f)
{
struct mgb4_vout_dev *voutdev = video_drvdata(file);
+ struct mgb4_regs *video = &voutdev->mgbdev->video;
+ u32 config = mgb4_read_reg(video, voutdev->config->regs.config);
- f->fmt.pix.pixelformat = V4L2_PIX_FMT_ABGR32;
f->fmt.pix.width = voutdev->width;
f->fmt.pix.height = voutdev->height;
f->fmt.pix.field = V4L2_FIELD_NONE;
- f->fmt.pix.colorspace = V4L2_COLORSPACE_RAW;
- f->fmt.pix.bytesperline = (f->fmt.pix.width + voutdev->padding) * 4;
+
+ if (config & (1U << 16)) {
+ f->fmt.pix.pixelformat = V4L2_PIX_FMT_YUYV;
+ if (config & (1U << 20)) {
+ f->fmt.pix.colorspace = V4L2_COLORSPACE_REC709;
+ } else {
+ if (config & (1U << 19))
+ f->fmt.pix.colorspace = V4L2_COLORSPACE_SMPTE170M;
+ else
+ f->fmt.pix.colorspace = V4L2_COLORSPACE_SRGB;
+ }
+ f->fmt.pix.bytesperline = (f->fmt.pix.width + voutdev->padding) * 2;
+ } else {
+ f->fmt.pix.pixelformat = V4L2_PIX_FMT_ABGR32;
+ f->fmt.pix.colorspace = V4L2_COLORSPACE_RAW;
+ f->fmt.pix.bytesperline = (f->fmt.pix.width + voutdev->padding) * 4;
+ }
+
f->fmt.pix.sizeimage = f->fmt.pix.bytesperline * f->fmt.pix.height;
return 0;
@@ -220,14 +295,30 @@ static int vidioc_g_fmt(struct file *file, void *priv, struct v4l2_format *f)
static int vidioc_try_fmt(struct file *file, void *priv, struct v4l2_format *f)
{
struct mgb4_vout_dev *voutdev = video_drvdata(file);
+ struct mgb4_regs *video = &voutdev->mgbdev->video;
+ u32 pixelsize;
- f->fmt.pix.pixelformat = V4L2_PIX_FMT_ABGR32;
f->fmt.pix.width = voutdev->width;
f->fmt.pix.height = voutdev->height;
f->fmt.pix.field = V4L2_FIELD_NONE;
- f->fmt.pix.colorspace = V4L2_COLORSPACE_RAW;
- f->fmt.pix.bytesperline = max(f->fmt.pix.width * 4,
- ALIGN_DOWN(f->fmt.pix.bytesperline, 4));
+
+ if (has_yuv(video) && f->fmt.pix.pixelformat == V4L2_PIX_FMT_YUYV) {
+ pixelsize = 2;
+ if (!(f->fmt.pix.colorspace == V4L2_COLORSPACE_REC709 ||
+ f->fmt.pix.colorspace == V4L2_COLORSPACE_SMPTE170M))
+ f->fmt.pix.colorspace = V4L2_COLORSPACE_SRGB;
+ } else {
+ pixelsize = 4;
+ f->fmt.pix.pixelformat = V4L2_PIX_FMT_ABGR32;
+ f->fmt.pix.colorspace = V4L2_COLORSPACE_RAW;
+ }
+
+ if (f->fmt.pix.bytesperline > f->fmt.pix.width * pixelsize &&
+ f->fmt.pix.bytesperline < f->fmt.pix.width * pixelsize * 2)
+ f->fmt.pix.bytesperline = ALIGN(f->fmt.pix.bytesperline,
+ pixelsize);
+ else
+ f->fmt.pix.bytesperline = f->fmt.pix.width * pixelsize;
f->fmt.pix.sizeimage = f->fmt.pix.bytesperline * f->fmt.pix.height;
return 0;
@@ -237,13 +328,39 @@ static int vidioc_s_fmt(struct file *file, void *priv, struct v4l2_format *f)
{
struct mgb4_vout_dev *voutdev = video_drvdata(file);
struct mgb4_regs *video = &voutdev->mgbdev->video;
+ u32 config, pixelsize;
+ int ret;
if (vb2_is_busy(&voutdev->queue))
return -EBUSY;
- vidioc_try_fmt(file, priv, f);
+ ret = vidioc_try_fmt(file, priv, f);
+ if (ret < 0)
+ return ret;
+
+ config = mgb4_read_reg(video, voutdev->config->regs.config);
+ if (f->fmt.pix.pixelformat == V4L2_PIX_FMT_YUYV) {
+ pixelsize = 2;
+ config |= 1U << 16;
+
+ if (f->fmt.pix.colorspace == V4L2_COLORSPACE_REC709) {
+ config |= 1U << 20;
+ config |= 1U << 19;
+ } else if (f->fmt.pix.colorspace == V4L2_COLORSPACE_SMPTE170M) {
+ config &= ~(1U << 20);
+ config |= 1U << 19;
+ } else {
+ config &= ~(1U << 20);
+ config &= ~(1U << 19);
+ }
+ } else {
+ pixelsize = 4;
+ config &= ~(1U << 16);
+ }
+ mgb4_write_reg(video, voutdev->config->regs.config, config);
- voutdev->padding = (f->fmt.pix.bytesperline - (f->fmt.pix.width * 4)) / 4;
+ voutdev->padding = (f->fmt.pix.bytesperline - (f->fmt.pix.width
+ * pixelsize)) / pixelsize;
mgb4_write_reg(video, voutdev->config->regs.padding, voutdev->padding);
return 0;
@@ -267,11 +384,128 @@ static int vidioc_enum_output(struct file *file, void *priv,
return -EINVAL;
out->type = V4L2_OUTPUT_TYPE_ANALOG;
+ out->capabilities = V4L2_OUT_CAP_DV_TIMINGS;
strscpy(out->name, "MGB4", sizeof(out->name));
return 0;
}
+static int vidioc_enum_frameintervals(struct file *file, void *priv,
+ struct v4l2_frmivalenum *ival)
+{
+ struct mgb4_vout_dev *voutdev = video_drvdata(file);
+ struct mgb4_regs *video = &voutdev->mgbdev->video;
+ struct v4l2_dv_timings timings;
+
+ if (ival->index != 0)
+ return -EINVAL;
+ if (!(ival->pixel_format == V4L2_PIX_FMT_ABGR32 ||
+ ((has_yuv(video) && ival->pixel_format == V4L2_PIX_FMT_YUYV))))
+ return -EINVAL;
+ if (ival->width != voutdev->width || ival->height != voutdev->height)
+ return -EINVAL;
+
+ get_timings(voutdev, &timings);
+
+ ival->type = V4L2_FRMIVAL_TYPE_STEPWISE;
+ ival->stepwise.max.denominator = MGB4_HW_FREQ;
+ ival->stepwise.max.numerator = 0xFFFFFFFF;
+ ival->stepwise.min.denominator = timings.bt.pixelclock;
+ ival->stepwise.min.numerator = pixel_size(&timings);
+ ival->stepwise.step.denominator = MGB4_HW_FREQ;
+ ival->stepwise.step.numerator = 1;
+
+ return 0;
+}
+
+static int vidioc_g_parm(struct file *file, void *priv,
+ struct v4l2_streamparm *parm)
+{
+ struct mgb4_vout_dev *voutdev = video_drvdata(file);
+ struct mgb4_regs *video = &voutdev->mgbdev->video;
+ struct v4l2_fract *tpf = &parm->parm.output.timeperframe;
+ struct v4l2_dv_timings timings;
+ u32 timer;
+
+ parm->parm.output.writebuffers = 2;
+
+ if (has_timeperframe(video)) {
+ timer = mgb4_read_reg(video, voutdev->config->regs.timer);
+ if (timer < 0xFFFF) {
+ get_timings(voutdev, &timings);
+ tpf->numerator = pixel_size(&timings);
+ tpf->denominator = timings.bt.pixelclock;
+ } else {
+ tpf->numerator = timer;
+ tpf->denominator = MGB4_HW_FREQ;
+ }
+
+ parm->parm.output.capability = V4L2_CAP_TIMEPERFRAME;
+ }
+
+ return 0;
+}
+
+static int vidioc_s_parm(struct file *file, void *priv,
+ struct v4l2_streamparm *parm)
+{
+ struct mgb4_vout_dev *voutdev = video_drvdata(file);
+ struct mgb4_regs *video = &voutdev->mgbdev->video;
+ struct v4l2_fract *tpf = &parm->parm.output.timeperframe;
+ struct v4l2_dv_timings timings;
+ u32 timer, period;
+
+ if (has_timeperframe(video)) {
+ timer = tpf->denominator ?
+ MGB4_PERIOD(tpf->numerator, tpf->denominator) : 0;
+ if (timer) {
+ get_timings(voutdev, &timings);
+ period = MGB4_PERIOD(pixel_size(&timings),
+ timings.bt.pixelclock);
+ if (timer < period)
+ timer = 0;
+ }
+
+ mgb4_write_reg(video, voutdev->config->regs.timer, timer);
+ }
+
+ return vidioc_g_parm(file, priv, parm);
+}
+
+static int vidioc_g_dv_timings(struct file *file, void *fh,
+ struct v4l2_dv_timings *timings)
+{
+ struct mgb4_vout_dev *voutdev = video_drvdata(file);
+
+ get_timings(voutdev, timings);
+
+ return 0;
+}
+
+static int vidioc_s_dv_timings(struct file *file, void *fh,
+ struct v4l2_dv_timings *timings)
+{
+ struct mgb4_vout_dev *voutdev = video_drvdata(file);
+
+ get_timings(voutdev, timings);
+
+ return 0;
+}
+
+static int vidioc_enum_dv_timings(struct file *file, void *fh,
+ struct v4l2_enum_dv_timings *timings)
+{
+ return v4l2_enum_dv_timings_cap(timings, &video_timings_cap, NULL, NULL);
+}
+
+static int vidioc_dv_timings_cap(struct file *file, void *fh,
+ struct v4l2_dv_timings_cap *cap)
+{
+ *cap = video_timings_cap;
+
+ return 0;
+}
+
static const struct v4l2_ioctl_ops video_ioctl_ops = {
.vidioc_querycap = vidioc_querycap,
.vidioc_enum_fmt_vid_out = vidioc_enum_fmt,
@@ -279,8 +513,15 @@ static const struct v4l2_ioctl_ops video_ioctl_ops = {
.vidioc_s_fmt_vid_out = vidioc_s_fmt,
.vidioc_g_fmt_vid_out = vidioc_g_fmt,
.vidioc_enum_output = vidioc_enum_output,
+ .vidioc_enum_frameintervals = vidioc_enum_frameintervals,
.vidioc_g_output = vidioc_g_output,
.vidioc_s_output = vidioc_s_output,
+ .vidioc_g_parm = vidioc_g_parm,
+ .vidioc_s_parm = vidioc_s_parm,
+ .vidioc_dv_timings_cap = vidioc_dv_timings_cap,
+ .vidioc_enum_dv_timings = vidioc_enum_dv_timings,
+ .vidioc_g_dv_timings = vidioc_g_dv_timings,
+ .vidioc_s_dv_timings = vidioc_s_dv_timings,
.vidioc_reqbufs = vb2_ioctl_reqbufs,
.vidioc_create_bufs = vb2_ioctl_create_bufs,
.vidioc_prepare_buf = vb2_ioctl_prepare_buf,
@@ -423,13 +664,13 @@ static void fpga_init(struct mgb4_vout_dev *voutdev)
mgb4_write_reg(video, regs->config, 0x00000011);
mgb4_write_reg(video, regs->resolution,
- (MGB4_DEFAULT_WIDTH << 16) | MGB4_DEFAULT_HEIGHT);
- mgb4_write_reg(video, regs->hsync, 0x00102020);
- mgb4_write_reg(video, regs->vsync, 0x40020202);
- mgb4_write_reg(video, regs->frame_period, MGB4_DEFAULT_PERIOD);
+ (DEFAULT_WIDTH << 16) | DEFAULT_HEIGHT);
+ mgb4_write_reg(video, regs->hsync, 0x00283232);
+ mgb4_write_reg(video, regs->vsync, 0x40141F1E);
+ mgb4_write_reg(video, regs->frame_limit, DEFAULT_PERIOD);
mgb4_write_reg(video, regs->padding, 0x00000000);
- voutdev->freq = mgb4_cmt_set_vout_freq(voutdev, 70000 >> 1) << 1;
+ voutdev->freq = mgb4_cmt_set_vout_freq(voutdev, 61150 >> 1) << 1;
mgb4_write_reg(video, regs->config,
(voutdev->config->id + MGB4_VIN_DEVICES) << 2 | 1 << 4);
@@ -455,14 +696,20 @@ static void debugfs_init(struct mgb4_vout_dev *voutdev)
voutdev->regs[3].offset = voutdev->config->regs.hsync;
voutdev->regs[4].name = "VIDEO_PARAMS_2";
voutdev->regs[4].offset = voutdev->config->regs.vsync;
- voutdev->regs[5].name = "FRAME_PERIOD";
- voutdev->regs[5].offset = voutdev->config->regs.frame_period;
- voutdev->regs[6].name = "PADDING";
+ voutdev->regs[5].name = "FRAME_LIMIT";
+ voutdev->regs[5].offset = voutdev->config->regs.frame_limit;
+ voutdev->regs[6].name = "PADDING_PIXELS";
voutdev->regs[6].offset = voutdev->config->regs.padding;
+ if (has_timeperframe(video)) {
+ voutdev->regs[7].name = "TIMER";
+ voutdev->regs[7].offset = voutdev->config->regs.timer;
+ voutdev->regset.nregs = 8;
+ } else {
+ voutdev->regset.nregs = 7;
+ }
voutdev->regset.base = video->membase;
voutdev->regset.regs = voutdev->regs;
- voutdev->regset.nregs = ARRAY_SIZE(voutdev->regs);
debugfs_create_regset32("registers", 0444, voutdev->debugfs,
&voutdev->regset);
diff --git a/drivers/media/pci/mgb4/mgb4_vout.h b/drivers/media/pci/mgb4/mgb4_vout.h
index b163dee711fd..adc8fe1e7ae6 100644
--- a/drivers/media/pci/mgb4/mgb4_vout.h
+++ b/drivers/media/pci/mgb4/mgb4_vout.h
@@ -19,10 +19,11 @@ struct mgb4_vout_regs {
u32 config;
u32 status;
u32 resolution;
- u32 frame_period;
+ u32 frame_limit;
u32 hsync;
u32 vsync;
u32 padding;
+ u32 timer;
};
struct mgb4_vout_config {
@@ -55,7 +56,7 @@ struct mgb4_vout_dev {
#ifdef CONFIG_DEBUG_FS
struct dentry *debugfs;
struct debugfs_regset32 regset;
- struct debugfs_reg32 regs[7];
+ struct debugfs_reg32 regs[sizeof(struct mgb4_vout_regs) / 4];
#endif
};
diff --git a/drivers/media/pci/solo6x10/solo6x10-p2m.c b/drivers/media/pci/solo6x10/solo6x10-p2m.c
index ca70a864a3ef..5f100e5e03d9 100644
--- a/drivers/media/pci/solo6x10/solo6x10-p2m.c
+++ b/drivers/media/pci/solo6x10/solo6x10-p2m.c
@@ -57,7 +57,7 @@ int solo_p2m_dma_desc(struct solo_dev *solo_dev,
int desc_cnt)
{
struct solo_p2m_dev *p2m_dev;
- unsigned int timeout;
+ unsigned long time_left;
unsigned int config = 0;
int ret = 0;
unsigned int p2m_id = 0;
@@ -99,12 +99,12 @@ int solo_p2m_dma_desc(struct solo_dev *solo_dev,
desc[1].ctrl);
}
- timeout = wait_for_completion_timeout(&p2m_dev->completion,
- solo_dev->p2m_jiffies);
+ time_left = wait_for_completion_timeout(&p2m_dev->completion,
+ solo_dev->p2m_jiffies);
if (WARN_ON_ONCE(p2m_dev->error))
ret = -EIO;
- else if (timeout == 0) {
+ else if (time_left == 0) {
solo_dev->p2m_timeouts++;
ret = -EAGAIN;
}
diff --git a/drivers/media/platform/allegro-dvt/allegro-core.c b/drivers/media/platform/allegro-dvt/allegro-core.c
index da61f9beb6b4..73606cee586e 100644
--- a/drivers/media/platform/allegro-dvt/allegro-core.c
+++ b/drivers/media/platform/allegro-dvt/allegro-core.c
@@ -179,7 +179,7 @@ struct allegro_dev {
struct list_head channels;
};
-static struct regmap_config allegro_regmap_config = {
+static const struct regmap_config allegro_regmap_config = {
.name = "regmap",
.reg_bits = 32,
.val_bits = 32,
@@ -188,7 +188,7 @@ static struct regmap_config allegro_regmap_config = {
.cache_type = REGCACHE_NONE,
};
-static struct regmap_config allegro_sram_config = {
+static const struct regmap_config allegro_sram_config = {
.name = "sram",
.reg_bits = 32,
.val_bits = 32,
@@ -1415,11 +1415,11 @@ static int allegro_mcu_send_encode_frame(struct allegro_dev *dev,
static int allegro_mcu_wait_for_init_timeout(struct allegro_dev *dev,
unsigned long timeout_ms)
{
- unsigned long tmo;
+ unsigned long time_left;
- tmo = wait_for_completion_timeout(&dev->init_complete,
- msecs_to_jiffies(timeout_ms));
- if (tmo == 0)
+ time_left = wait_for_completion_timeout(&dev->init_complete,
+ msecs_to_jiffies(timeout_ms));
+ if (time_left == 0)
return -ETIMEDOUT;
reinit_completion(&dev->init_complete);
@@ -2481,14 +2481,14 @@ static void allegro_mcu_interrupt(struct allegro_dev *dev)
static void allegro_destroy_channel(struct allegro_channel *channel)
{
struct allegro_dev *dev = channel->dev;
- unsigned long timeout;
+ unsigned long time_left;
if (channel_exists(channel)) {
reinit_completion(&channel->completion);
allegro_mcu_send_destroy_channel(dev, channel);
- timeout = wait_for_completion_timeout(&channel->completion,
- msecs_to_jiffies(5000));
- if (timeout == 0)
+ time_left = wait_for_completion_timeout(&channel->completion,
+ msecs_to_jiffies(5000));
+ if (time_left == 0)
v4l2_warn(&dev->v4l2_dev,
"channel %d: timeout while destroying\n",
channel->mcu_channel_id);
@@ -2544,7 +2544,7 @@ static void allegro_destroy_channel(struct allegro_channel *channel)
static int allegro_create_channel(struct allegro_channel *channel)
{
struct allegro_dev *dev = channel->dev;
- unsigned long timeout;
+ unsigned long time_left;
if (channel_exists(channel)) {
v4l2_warn(&dev->v4l2_dev,
@@ -2595,9 +2595,9 @@ static int allegro_create_channel(struct allegro_channel *channel)
reinit_completion(&channel->completion);
allegro_mcu_send_create_channel(dev, channel);
- timeout = wait_for_completion_timeout(&channel->completion,
- msecs_to_jiffies(5000));
- if (timeout == 0)
+ time_left = wait_for_completion_timeout(&channel->completion,
+ msecs_to_jiffies(5000));
+ if (time_left == 0)
channel->error = -ETIMEDOUT;
if (channel->error)
goto err;
diff --git a/drivers/media/platform/atmel/atmel-isi.c b/drivers/media/platform/atmel/atmel-isi.c
index c1108df72dd5..5c823d3f9cc0 100644
--- a/drivers/media/platform/atmel/atmel-isi.c
+++ b/drivers/media/platform/atmel/atmel-isi.c
@@ -242,7 +242,7 @@ static irqreturn_t isi_interrupt(int irq, void *dev_id)
#define WAIT_ISI_DISABLE 0
static int atmel_isi_wait_status(struct atmel_isi *isi, int wait_reset)
{
- unsigned long timeout;
+ unsigned long time_left;
/*
* The reset or disable will only succeed if we have a
* pixel clock from the camera.
@@ -257,9 +257,9 @@ static int atmel_isi_wait_status(struct atmel_isi *isi, int wait_reset)
isi_writel(isi, ISI_CTRL, ISI_CTRL_DIS);
}
- timeout = wait_for_completion_timeout(&isi->complete,
- msecs_to_jiffies(500));
- if (timeout == 0)
+ time_left = wait_for_completion_timeout(&isi->complete,
+ msecs_to_jiffies(500));
+ if (time_left == 0)
return -ETIMEDOUT;
return 0;
diff --git a/drivers/media/platform/chips-media/coda/coda-bit.c b/drivers/media/platform/chips-media/coda/coda-bit.c
index ed47d5bd8d61..84ded154adfe 100644
--- a/drivers/media/platform/chips-media/coda/coda-bit.c
+++ b/drivers/media/platform/chips-media/coda/coda-bit.c
@@ -585,7 +585,7 @@ static int coda_alloc_context_buffers(struct coda_ctx *ctx,
if (!ctx->slicebuf.vaddr && q_data->fourcc == V4L2_PIX_FMT_H264) {
/* worst case slice size */
- size = (DIV_ROUND_UP(q_data->rect.width, 16) *
+ size = (unsigned long)(DIV_ROUND_UP(q_data->rect.width, 16) *
DIV_ROUND_UP(q_data->rect.height, 16)) * 3200 / 8 + 512;
ret = coda_alloc_context_buf(ctx, &ctx->slicebuf, size,
"slicebuf");
diff --git a/drivers/media/platform/imagination/Kconfig b/drivers/media/platform/imagination/Kconfig
index 7139ae22219b..a302c955483d 100644
--- a/drivers/media/platform/imagination/Kconfig
+++ b/drivers/media/platform/imagination/Kconfig
@@ -2,6 +2,7 @@
config VIDEO_E5010_JPEG_ENC
tristate "Imagination E5010 JPEG Encoder Driver"
depends on VIDEO_DEV
+ depends on ARCH_K3 || COMPILE_TEST
select VIDEOBUF2_DMA_CONTIG
select VIDEOBUF2_VMALLOC
select V4L2_MEM2MEM_DEV
diff --git a/drivers/media/platform/mediatek/vcodec/decoder/mtk_vcodec_dec_stateful.c b/drivers/media/platform/mediatek/vcodec/decoder/mtk_vcodec_dec_stateful.c
index 11ca2c2fbaad..e62c1c18758b 100644
--- a/drivers/media/platform/mediatek/vcodec/decoder/mtk_vcodec_dec_stateful.c
+++ b/drivers/media/platform/mediatek/vcodec/decoder/mtk_vcodec_dec_stateful.c
@@ -595,7 +595,7 @@ static void mtk_init_vdec_params(struct mtk_vcodec_dec_ctx *ctx)
}
}
-static struct vb2_ops mtk_vdec_frame_vb2_ops = {
+static const struct vb2_ops mtk_vdec_frame_vb2_ops = {
.queue_setup = vb2ops_vdec_queue_setup,
.buf_prepare = vb2ops_vdec_buf_prepare,
.wait_prepare = vb2_ops_wait_prepare,
diff --git a/drivers/media/platform/mediatek/vcodec/decoder/mtk_vcodec_dec_stateless.c b/drivers/media/platform/mediatek/vcodec/decoder/mtk_vcodec_dec_stateless.c
index b903e39fee89..3307dc15fc1d 100644
--- a/drivers/media/platform/mediatek/vcodec/decoder/mtk_vcodec_dec_stateless.c
+++ b/drivers/media/platform/mediatek/vcodec/decoder/mtk_vcodec_dec_stateless.c
@@ -854,7 +854,7 @@ static int vb2ops_vdec_out_buf_validate(struct vb2_buffer *vb)
return 0;
}
-static struct vb2_ops mtk_vdec_request_vb2_ops = {
+static const struct vb2_ops mtk_vdec_request_vb2_ops = {
.queue_setup = vb2ops_vdec_queue_setup,
.wait_prepare = vb2_ops_wait_prepare,
.wait_finish = vb2_ops_wait_finish,
diff --git a/drivers/media/platform/mediatek/vcodec/decoder/vdec/vdec_h264_req_if.c b/drivers/media/platform/mediatek/vcodec/decoder/vdec/vdec_h264_req_if.c
index 73d5cef33b2a..1e1b32faac77 100644
--- a/drivers/media/platform/mediatek/vcodec/decoder/vdec/vdec_h264_req_if.c
+++ b/drivers/media/platform/mediatek/vcodec/decoder/vdec/vdec_h264_req_if.c
@@ -347,11 +347,16 @@ static int vdec_h264_slice_decode(void *h_vdec, struct mtk_vcodec_mem *bs,
return vpu_dec_reset(vpu);
fb = inst->ctx->dev->vdec_pdata->get_cap_buffer(inst->ctx);
+ if (!fb) {
+ mtk_vdec_err(inst->ctx, "fb buffer is NULL");
+ return -ENOMEM;
+ }
+
src_buf_info = container_of(bs, struct mtk_video_dec_buf, bs_buffer);
dst_buf_info = container_of(fb, struct mtk_video_dec_buf, frame_buffer);
- y_fb_dma = fb ? (u64)fb->base_y.dma_addr : 0;
- c_fb_dma = fb ? (u64)fb->base_c.dma_addr : 0;
+ y_fb_dma = fb->base_y.dma_addr;
+ c_fb_dma = fb->base_c.dma_addr;
mtk_vdec_debug(inst->ctx, "+ [%d] FB y_dma=%llx c_dma=%llx va=%p",
inst->num_nalu, y_fb_dma, c_fb_dma, fb);
diff --git a/drivers/media/platform/mediatek/vcodec/decoder/vdec/vdec_h264_req_multi_if.c b/drivers/media/platform/mediatek/vcodec/decoder/vdec/vdec_h264_req_multi_if.c
index 2d4611e7fa0b..1ed0ccec5665 100644
--- a/drivers/media/platform/mediatek/vcodec/decoder/vdec/vdec_h264_req_multi_if.c
+++ b/drivers/media/platform/mediatek/vcodec/decoder/vdec/vdec_h264_req_multi_if.c
@@ -724,11 +724,16 @@ static int vdec_h264_slice_single_decode(void *h_vdec, struct mtk_vcodec_mem *bs
return vpu_dec_reset(vpu);
fb = inst->ctx->dev->vdec_pdata->get_cap_buffer(inst->ctx);
+ if (!fb) {
+ mtk_vdec_err(inst->ctx, "fb buffer is NULL");
+ return -ENOMEM;
+ }
+
src_buf_info = container_of(bs, struct mtk_video_dec_buf, bs_buffer);
dst_buf_info = container_of(fb, struct mtk_video_dec_buf, frame_buffer);
- y_fb_dma = fb ? (u64)fb->base_y.dma_addr : 0;
- c_fb_dma = fb ? (u64)fb->base_c.dma_addr : 0;
+ y_fb_dma = fb->base_y.dma_addr;
+ c_fb_dma = fb->base_c.dma_addr;
mtk_vdec_debug(inst->ctx, "[h264-dec] [%d] y_dma=%llx c_dma=%llx",
inst->ctx->decoded_frame_cnt, y_fb_dma, c_fb_dma);
diff --git a/drivers/media/platform/mediatek/vcodec/decoder/vdec/vdec_vp8_req_if.c b/drivers/media/platform/mediatek/vcodec/decoder/vdec/vdec_vp8_req_if.c
index e27e728f392e..232ef3bd246a 100644
--- a/drivers/media/platform/mediatek/vcodec/decoder/vdec/vdec_vp8_req_if.c
+++ b/drivers/media/platform/mediatek/vcodec/decoder/vdec/vdec_vp8_req_if.c
@@ -334,14 +334,18 @@ static int vdec_vp8_slice_decode(void *h_vdec, struct mtk_vcodec_mem *bs,
src_buf_info = container_of(bs, struct mtk_video_dec_buf, bs_buffer);
fb = inst->ctx->dev->vdec_pdata->get_cap_buffer(inst->ctx);
- dst_buf_info = container_of(fb, struct mtk_video_dec_buf, frame_buffer);
+ if (!fb) {
+ mtk_vdec_err(inst->ctx, "fb buffer is NULL");
+ return -ENOMEM;
+ }
- y_fb_dma = fb ? (u64)fb->base_y.dma_addr : 0;
+ dst_buf_info = container_of(fb, struct mtk_video_dec_buf, frame_buffer);
+ y_fb_dma = fb->base_y.dma_addr;
if (inst->ctx->q_data[MTK_Q_DATA_DST].fmt->num_planes == 1)
c_fb_dma = y_fb_dma +
inst->ctx->picinfo.buf_w * inst->ctx->picinfo.buf_h;
else
- c_fb_dma = fb ? (u64)fb->base_c.dma_addr : 0;
+ c_fb_dma = fb->base_c.dma_addr;
inst->vsi->dec.bs_dma = (u64)bs->dma_addr;
inst->vsi->dec.bs_sz = bs->size;
diff --git a/drivers/media/platform/microchip/microchip-isc-base.c b/drivers/media/platform/microchip/microchip-isc-base.c
index f3a5cbacadbe..28e56f6a695d 100644
--- a/drivers/media/platform/microchip/microchip-isc-base.c
+++ b/drivers/media/platform/microchip/microchip-isc-base.c
@@ -902,8 +902,11 @@ static int isc_set_fmt(struct isc_device *isc, struct v4l2_format *f)
return 0;
}
-static int isc_validate(struct isc_device *isc)
+static int isc_link_validate(struct media_link *link)
{
+ struct video_device *vdev =
+ media_entity_to_video_device(link->sink->entity);
+ struct isc_device *isc = video_get_drvdata(vdev);
int ret;
int i;
struct isc_format *sd_fmt = NULL;
@@ -1906,20 +1909,6 @@ int microchip_isc_pipeline_init(struct isc_device *isc)
}
EXPORT_SYMBOL_GPL(microchip_isc_pipeline_init);
-static int isc_link_validate(struct media_link *link)
-{
- struct video_device *vdev =
- media_entity_to_video_device(link->sink->entity);
- struct isc_device *isc = video_get_drvdata(vdev);
- int ret;
-
- ret = v4l2_subdev_link_validate(link);
- if (ret)
- return ret;
-
- return isc_validate(isc);
-}
-
static const struct media_entity_operations isc_entity_operations = {
.link_validate = isc_link_validate,
};
diff --git a/drivers/media/platform/microchip/microchip-sama5d2-isc.c b/drivers/media/platform/microchip/microchip-sama5d2-isc.c
index 5ac149cf3647..60b6d922d764 100644
--- a/drivers/media/platform/microchip/microchip-sama5d2-isc.c
+++ b/drivers/media/platform/microchip/microchip-sama5d2-isc.c
@@ -353,33 +353,29 @@ static const u32 isc_sama5d2_gamma_table[][GAMMA_ENTRIES] = {
static int isc_parse_dt(struct device *dev, struct isc_device *isc)
{
struct device_node *np = dev->of_node;
- struct device_node *epn = NULL;
+ struct device_node *epn;
struct isc_subdev_entity *subdev_entity;
unsigned int flags;
- int ret;
INIT_LIST_HEAD(&isc->subdev_entities);
- while (1) {
+ for_each_endpoint_of_node(np, epn) {
struct v4l2_fwnode_endpoint v4l2_epn = { .bus_type = 0 };
-
- epn = of_graph_get_next_endpoint(np, epn);
- if (!epn)
- return 0;
+ int ret;
ret = v4l2_fwnode_endpoint_parse(of_fwnode_handle(epn),
&v4l2_epn);
if (ret) {
- ret = -EINVAL;
+ of_node_put(epn);
dev_err(dev, "Could not parse the endpoint\n");
- break;
+ return -EINVAL;
}
subdev_entity = devm_kzalloc(dev, sizeof(*subdev_entity),
GFP_KERNEL);
if (!subdev_entity) {
- ret = -ENOMEM;
- break;
+ of_node_put(epn);
+ return -ENOMEM;
}
subdev_entity->epn = epn;
@@ -400,9 +396,8 @@ static int isc_parse_dt(struct device *dev, struct isc_device *isc)
list_add_tail(&subdev_entity->list, &isc->subdev_entities);
}
- of_node_put(epn);
- return ret;
+ return 0;
}
static int microchip_isc_probe(struct platform_device *pdev)
diff --git a/drivers/media/platform/microchip/microchip-sama7g5-isc.c b/drivers/media/platform/microchip/microchip-sama7g5-isc.c
index 73445f33d26b..e97abe3e35af 100644
--- a/drivers/media/platform/microchip/microchip-sama7g5-isc.c
+++ b/drivers/media/platform/microchip/microchip-sama7g5-isc.c
@@ -336,36 +336,32 @@ static const u32 isc_sama7g5_gamma_table[][GAMMA_ENTRIES] = {
static int xisc_parse_dt(struct device *dev, struct isc_device *isc)
{
struct device_node *np = dev->of_node;
- struct device_node *epn = NULL;
+ struct device_node *epn;
struct isc_subdev_entity *subdev_entity;
unsigned int flags;
- int ret;
bool mipi_mode;
INIT_LIST_HEAD(&isc->subdev_entities);
mipi_mode = of_property_read_bool(np, "microchip,mipi-mode");
- while (1) {
+ for_each_endpoint_of_node(np, epn) {
struct v4l2_fwnode_endpoint v4l2_epn = { .bus_type = 0 };
-
- epn = of_graph_get_next_endpoint(np, epn);
- if (!epn)
- return 0;
+ int ret;
ret = v4l2_fwnode_endpoint_parse(of_fwnode_handle(epn),
&v4l2_epn);
if (ret) {
- ret = -EINVAL;
+ of_node_put(epn);
dev_err(dev, "Could not parse the endpoint\n");
- break;
+ return -EINVAL;
}
subdev_entity = devm_kzalloc(dev, sizeof(*subdev_entity),
GFP_KERNEL);
if (!subdev_entity) {
- ret = -ENOMEM;
- break;
+ of_node_put(epn);
+ return -ENOMEM;
}
subdev_entity->epn = epn;
@@ -389,9 +385,8 @@ static int xisc_parse_dt(struct device *dev, struct isc_device *isc)
list_add_tail(&subdev_entity->list, &isc->subdev_entities);
}
- of_node_put(epn);
- return ret;
+ return 0;
}
static int microchip_xisc_probe(struct platform_device *pdev)
diff --git a/drivers/media/platform/nvidia/tegra-vde/h264.c b/drivers/media/platform/nvidia/tegra-vde/h264.c
index d8812fc06c67..0e56a4331b0d 100644
--- a/drivers/media/platform/nvidia/tegra-vde/h264.c
+++ b/drivers/media/platform/nvidia/tegra-vde/h264.c
@@ -623,14 +623,14 @@ static int tegra_vde_decode_end(struct tegra_vde *vde)
unsigned int read_bytes, macroblocks_nb;
struct device *dev = vde->dev;
dma_addr_t bsev_ptr;
- long timeout;
+ long time_left;
int ret;
- timeout = wait_for_completion_interruptible_timeout(
+ time_left = wait_for_completion_interruptible_timeout(
&vde->decode_completion, msecs_to_jiffies(1000));
- if (timeout < 0) {
- ret = timeout;
- } else if (timeout == 0) {
+ if (time_left < 0) {
+ ret = time_left;
+ } else if (time_left == 0) {
bsev_ptr = tegra_vde_readl(vde, vde->bsev, 0x10);
macroblocks_nb = tegra_vde_readl(vde, vde->sxe, 0xC8) & 0x1FFF;
read_bytes = bsev_ptr ? bsev_ptr - vde->bitstream_data_addr : 0;
diff --git a/drivers/media/platform/nxp/imx-mipi-csis.c b/drivers/media/platform/nxp/imx-mipi-csis.c
index b9729a8883d6..44e1402e8be1 100644
--- a/drivers/media/platform/nxp/imx-mipi-csis.c
+++ b/drivers/media/platform/nxp/imx-mipi-csis.c
@@ -861,18 +861,21 @@ static void mipi_csis_log_counters(struct mipi_csis_device *csis, bool non_error
{
unsigned int num_events = non_errors ? MIPI_CSIS_NUM_EVENTS
: MIPI_CSIS_NUM_EVENTS - 8;
+ unsigned int counters[MIPI_CSIS_NUM_EVENTS];
unsigned long flags;
unsigned int i;
spin_lock_irqsave(&csis->slock, flags);
+ for (i = 0; i < num_events; ++i)
+ counters[i] = csis->events[i].counter;
+ spin_unlock_irqrestore(&csis->slock, flags);
for (i = 0; i < num_events; ++i) {
- if (csis->events[i].counter > 0 || csis->debug.enable)
+ if (counters[i] > 0 || csis->debug.enable)
dev_info(csis->dev, "%s events: %d\n",
csis->events[i].name,
- csis->events[i].counter);
+ counters[i]);
}
- spin_unlock_irqrestore(&csis->slock, flags);
}
static int mipi_csis_dump_regs(struct mipi_csis_device *csis)
@@ -1344,7 +1347,7 @@ err_parse:
* Suspend/resume
*/
-static int __maybe_unused mipi_csis_runtime_suspend(struct device *dev)
+static int mipi_csis_runtime_suspend(struct device *dev)
{
struct v4l2_subdev *sd = dev_get_drvdata(dev);
struct mipi_csis_device *csis = sd_to_mipi_csis_device(sd);
@@ -1359,7 +1362,7 @@ static int __maybe_unused mipi_csis_runtime_suspend(struct device *dev)
return 0;
}
-static int __maybe_unused mipi_csis_runtime_resume(struct device *dev)
+static int mipi_csis_runtime_resume(struct device *dev)
{
struct v4l2_subdev *sd = dev_get_drvdata(dev);
struct mipi_csis_device *csis = sd_to_mipi_csis_device(sd);
@@ -1379,8 +1382,8 @@ static int __maybe_unused mipi_csis_runtime_resume(struct device *dev)
}
static const struct dev_pm_ops mipi_csis_pm_ops = {
- SET_RUNTIME_PM_OPS(mipi_csis_runtime_suspend, mipi_csis_runtime_resume,
- NULL)
+ RUNTIME_PM_OPS(mipi_csis_runtime_suspend, mipi_csis_runtime_resume,
+ NULL)
};
/* -----------------------------------------------------------------------------
@@ -1571,7 +1574,7 @@ static struct platform_driver mipi_csis_driver = {
.driver = {
.of_match_table = mipi_csis_of_match,
.name = CSIS_DRIVER_NAME,
- .pm = &mipi_csis_pm_ops,
+ .pm = pm_ptr(&mipi_csis_pm_ops),
},
};
diff --git a/drivers/media/platform/nxp/imx-pxp.h b/drivers/media/platform/nxp/imx-pxp.h
index 44f95c749d2e..476f2042fa6f 100644
--- a/drivers/media/platform/nxp/imx-pxp.h
+++ b/drivers/media/platform/nxp/imx-pxp.h
@@ -594,12 +594,17 @@
(((v) << 18) & BM_PXP_CSC1_COEF0_C0)
#define BP_PXP_CSC1_COEF0_UV_OFFSET 9
#define BM_PXP_CSC1_COEF0_UV_OFFSET 0x0003FE00
+
+/*
+ * We use v * (1 << 9) instead of v << 9, to workaround a gcc5 bug.
+ * The compiler cannot understand that the expression is constant.
+ */
#define BF_PXP_CSC1_COEF0_UV_OFFSET(v) \
- (((v) << 9) & BM_PXP_CSC1_COEF0_UV_OFFSET)
+ (((v) * (1 << 9)) & BM_PXP_CSC1_COEF0_UV_OFFSET)
#define BP_PXP_CSC1_COEF0_Y_OFFSET 0
#define BM_PXP_CSC1_COEF0_Y_OFFSET 0x000001FF
#define BF_PXP_CSC1_COEF0_Y_OFFSET(v) \
- (((v) << 0) & BM_PXP_CSC1_COEF0_Y_OFFSET)
+ ((v) & BM_PXP_CSC1_COEF0_Y_OFFSET)
#define HW_PXP_CSC1_COEF1 (0x000001b0)
diff --git a/drivers/media/platform/nxp/imx8mq-mipi-csi2.c b/drivers/media/platform/nxp/imx8mq-mipi-csi2.c
index ba2e81f24965..d4a6c5532969 100644
--- a/drivers/media/platform/nxp/imx8mq-mipi-csi2.c
+++ b/drivers/media/platform/nxp/imx8mq-mipi-csi2.c
@@ -693,7 +693,7 @@ unlock:
return ret ? -EAGAIN : 0;
}
-static int __maybe_unused imx8mq_mipi_csi_suspend(struct device *dev)
+static int imx8mq_mipi_csi_suspend(struct device *dev)
{
struct v4l2_subdev *sd = dev_get_drvdata(dev);
struct csi_state *state = mipi_sd_to_csi2_state(sd);
@@ -705,7 +705,7 @@ static int __maybe_unused imx8mq_mipi_csi_suspend(struct device *dev)
return 0;
}
-static int __maybe_unused imx8mq_mipi_csi_resume(struct device *dev)
+static int imx8mq_mipi_csi_resume(struct device *dev)
{
struct v4l2_subdev *sd = dev_get_drvdata(dev);
struct csi_state *state = mipi_sd_to_csi2_state(sd);
@@ -716,7 +716,7 @@ static int __maybe_unused imx8mq_mipi_csi_resume(struct device *dev)
return imx8mq_mipi_csi_pm_resume(dev);
}
-static int __maybe_unused imx8mq_mipi_csi_runtime_suspend(struct device *dev)
+static int imx8mq_mipi_csi_runtime_suspend(struct device *dev)
{
struct v4l2_subdev *sd = dev_get_drvdata(dev);
struct csi_state *state = mipi_sd_to_csi2_state(sd);
@@ -731,7 +731,7 @@ static int __maybe_unused imx8mq_mipi_csi_runtime_suspend(struct device *dev)
return ret;
}
-static int __maybe_unused imx8mq_mipi_csi_runtime_resume(struct device *dev)
+static int imx8mq_mipi_csi_runtime_resume(struct device *dev)
{
struct v4l2_subdev *sd = dev_get_drvdata(dev);
struct csi_state *state = mipi_sd_to_csi2_state(sd);
@@ -747,10 +747,9 @@ static int __maybe_unused imx8mq_mipi_csi_runtime_resume(struct device *dev)
}
static const struct dev_pm_ops imx8mq_mipi_csi_pm_ops = {
- SET_RUNTIME_PM_OPS(imx8mq_mipi_csi_runtime_suspend,
- imx8mq_mipi_csi_runtime_resume,
- NULL)
- SET_SYSTEM_SLEEP_PM_OPS(imx8mq_mipi_csi_suspend, imx8mq_mipi_csi_resume)
+ RUNTIME_PM_OPS(imx8mq_mipi_csi_runtime_suspend,
+ imx8mq_mipi_csi_runtime_resume, NULL)
+ SYSTEM_SLEEP_PM_OPS(imx8mq_mipi_csi_suspend, imx8mq_mipi_csi_resume)
};
/* -----------------------------------------------------------------------------
@@ -958,7 +957,7 @@ static struct platform_driver imx8mq_mipi_csi_driver = {
.driver = {
.of_match_table = imx8mq_mipi_csi_of_match,
.name = MIPI_CSI2_DRIVER_NAME,
- .pm = &imx8mq_mipi_csi_pm_ops,
+ .pm = pm_ptr(&imx8mq_mipi_csi_pm_ops),
},
};
diff --git a/drivers/media/platform/qcom/camss/camss-video.c b/drivers/media/platform/qcom/camss/camss-video.c
index cd72feca618c..3b8fc31d957c 100644
--- a/drivers/media/platform/qcom/camss/camss-video.c
+++ b/drivers/media/platform/qcom/camss/camss-video.c
@@ -297,12 +297,6 @@ static void video_stop_streaming(struct vb2_queue *q)
ret = v4l2_subdev_call(subdev, video, s_stream, 0);
- if (entity->use_count > 1) {
- /* Don't stop if other instances of the pipeline are still running */
- dev_dbg(video->camss->dev, "Video pipeline still used, don't stop streaming.\n");
- return;
- }
-
if (ret) {
dev_err(video->camss->dev, "Video pipeline stop failed: %d\n", ret);
return;
diff --git a/drivers/media/platform/qcom/camss/camss.c b/drivers/media/platform/qcom/camss/camss.c
index 51b1d3550421..d64985ca6e88 100644
--- a/drivers/media/platform/qcom/camss/camss.c
+++ b/drivers/media/platform/qcom/camss/camss.c
@@ -2283,6 +2283,8 @@ static int camss_probe(struct platform_device *pdev)
v4l2_async_nf_init(&camss->notifier, &camss->v4l2_dev);
+ pm_runtime_enable(dev);
+
num_subdevs = camss_of_parse_ports(camss);
if (num_subdevs < 0) {
ret = num_subdevs;
@@ -2323,8 +2325,6 @@ static int camss_probe(struct platform_device *pdev)
}
}
- pm_runtime_enable(dev);
-
return 0;
err_register_subdevs:
@@ -2332,6 +2332,7 @@ err_register_subdevs:
err_v4l2_device_unregister:
v4l2_device_unregister(&camss->v4l2_dev);
v4l2_async_nf_cleanup(&camss->notifier);
+ pm_runtime_disable(dev);
err_genpd_cleanup:
camss_genpd_cleanup(camss);
diff --git a/drivers/media/platform/qcom/venus/core.c b/drivers/media/platform/qcom/venus/core.c
index 165c947a6703..84e95a46dfc9 100644
--- a/drivers/media/platform/qcom/venus/core.c
+++ b/drivers/media/platform/qcom/venus/core.c
@@ -430,6 +430,7 @@ static void venus_remove(struct platform_device *pdev)
struct device *dev = core->dev;
int ret;
+ cancel_delayed_work_sync(&core->work);
ret = pm_runtime_get_sync(dev);
WARN_ON(ret < 0);
diff --git a/drivers/media/platform/qcom/venus/firmware.c b/drivers/media/platform/qcom/venus/firmware.c
index fe7da2b30482..66a18830e66d 100644
--- a/drivers/media/platform/qcom/venus/firmware.c
+++ b/drivers/media/platform/qcom/venus/firmware.c
@@ -316,10 +316,10 @@ int venus_firmware_init(struct venus_core *core)
core->fw.dev = &pdev->dev;
- iommu_dom = iommu_domain_alloc(&platform_bus_type);
- if (!iommu_dom) {
+ iommu_dom = iommu_paging_domain_alloc(core->fw.dev);
+ if (IS_ERR(iommu_dom)) {
dev_err(core->fw.dev, "Failed to allocate iommu domain\n");
- ret = -ENOMEM;
+ ret = PTR_ERR(iommu_dom);
goto err_unregister;
}
diff --git a/drivers/media/platform/qcom/venus/hfi_cmds.c b/drivers/media/platform/qcom/venus/hfi_cmds.c
index 3418d2dd9371..3ae063094e3e 100644
--- a/drivers/media/platform/qcom/venus/hfi_cmds.c
+++ b/drivers/media/platform/qcom/venus/hfi_cmds.c
@@ -156,7 +156,7 @@ void pkt_sys_image_version(struct hfi_sys_get_property_pkt *pkt)
pkt->hdr.size = sizeof(*pkt);
pkt->hdr.pkt_type = HFI_CMD_SYS_GET_PROPERTY;
pkt->num_properties = 1;
- pkt->data[0] = HFI_PROPERTY_SYS_IMAGE_VERSION;
+ pkt->data = HFI_PROPERTY_SYS_IMAGE_VERSION;
}
int pkt_session_init(struct hfi_session_init_pkt *pkt, void *cookie,
@@ -331,7 +331,7 @@ int pkt_session_ftb(struct hfi_session_fill_buffer_pkt *pkt, void *cookie,
pkt->alloc_len = out_frame->alloc_len;
pkt->filled_len = out_frame->filled_len;
pkt->offset = out_frame->offset;
- pkt->data[0] = out_frame->extradata_size;
+ pkt->data = out_frame->extradata_size;
return 0;
}
@@ -402,7 +402,7 @@ static int pkt_session_get_property_1x(struct hfi_session_get_property_pkt *pkt,
pkt->shdr.hdr.pkt_type = HFI_CMD_SESSION_GET_PROPERTY;
pkt->shdr.session_id = hash32_ptr(cookie);
pkt->num_properties = 1;
- pkt->data[0] = ptype;
+ pkt->data = ptype;
return 0;
}
@@ -1110,7 +1110,7 @@ pkt_session_get_property_3xx(struct hfi_session_get_property_pkt *pkt,
switch (ptype) {
case HFI_PROPERTY_CONFIG_VDEC_ENTROPY:
- pkt->data[0] = HFI_PROPERTY_CONFIG_VDEC_ENTROPY;
+ pkt->data = HFI_PROPERTY_CONFIG_VDEC_ENTROPY;
break;
default:
ret = pkt_session_get_property_1x(pkt, cookie, ptype);
diff --git a/drivers/media/platform/qcom/venus/hfi_cmds.h b/drivers/media/platform/qcom/venus/hfi_cmds.h
index 1adf2d2ae5f2..a83125bc17aa 100644
--- a/drivers/media/platform/qcom/venus/hfi_cmds.h
+++ b/drivers/media/platform/qcom/venus/hfi_cmds.h
@@ -74,7 +74,7 @@ struct hfi_sys_set_property_pkt {
struct hfi_sys_get_property_pkt {
struct hfi_pkt_hdr hdr;
u32 num_properties;
- u32 data[1];
+ u32 data;
};
struct hfi_sys_set_buffers_pkt {
@@ -82,7 +82,7 @@ struct hfi_sys_set_buffers_pkt {
u32 buffer_type;
u32 buffer_size;
u32 num_buffers;
- u32 buffer_addr[1];
+ u32 buffer_addr[];
};
struct hfi_sys_ping_pkt {
@@ -151,7 +151,7 @@ struct hfi_session_empty_buffer_compressed_pkt {
u32 input_tag;
u32 packet_buffer;
u32 extradata_buffer;
- u32 data[1];
+ u32 data;
};
struct hfi_session_empty_buffer_uncompressed_plane0_pkt {
@@ -168,7 +168,7 @@ struct hfi_session_empty_buffer_uncompressed_plane0_pkt {
u32 input_tag;
u32 packet_buffer;
u32 extradata_buffer;
- u32 data[1];
+ u32 data;
};
struct hfi_session_empty_buffer_uncompressed_plane1_pkt {
@@ -177,7 +177,7 @@ struct hfi_session_empty_buffer_uncompressed_plane1_pkt {
u32 filled_len;
u32 offset;
u32 packet_buffer2;
- u32 data[1];
+ u32 data;
};
struct hfi_session_empty_buffer_uncompressed_plane2_pkt {
@@ -186,7 +186,7 @@ struct hfi_session_empty_buffer_uncompressed_plane2_pkt {
u32 filled_len;
u32 offset;
u32 packet_buffer3;
- u32 data[1];
+ u32 data;
};
struct hfi_session_fill_buffer_pkt {
@@ -198,7 +198,7 @@ struct hfi_session_fill_buffer_pkt {
u32 output_tag;
u32 packet_buffer;
u32 extradata_buffer;
- u32 data[1];
+ u32 data;
};
struct hfi_session_flush_pkt {
@@ -217,7 +217,7 @@ struct hfi_session_resume_pkt {
struct hfi_session_get_property_pkt {
struct hfi_session_hdr_pkt shdr;
u32 num_properties;
- u32 data[1];
+ u32 data;
};
struct hfi_session_release_buffer_pkt {
diff --git a/drivers/media/platform/qcom/venus/hfi_helper.h b/drivers/media/platform/qcom/venus/hfi_helper.h
index e4c05d62cfc7..f44059f19505 100644
--- a/drivers/media/platform/qcom/venus/hfi_helper.h
+++ b/drivers/media/platform/qcom/venus/hfi_helper.h
@@ -761,7 +761,7 @@ struct hfi_multi_stream_3x {
struct hfi_multi_view_format {
u32 views;
- u32 view_order[1];
+ u32 view_order[];
};
#define HFI_MULTI_SLICE_OFF 0x1
@@ -1005,13 +1005,13 @@ struct hfi_uncompressed_plane_constraints {
struct hfi_uncompressed_plane_info {
u32 format;
u32 num_planes;
- struct hfi_uncompressed_plane_constraints plane_constraints[1];
+ struct hfi_uncompressed_plane_constraints plane_constraints;
};
struct hfi_uncompressed_format_supported {
u32 buffer_type;
u32 format_entries;
- struct hfi_uncompressed_plane_info plane_info[1];
+ struct hfi_uncompressed_plane_info plane_info;
};
struct hfi_uncompressed_plane_actual {
@@ -1038,7 +1038,7 @@ struct hfi_codec_supported {
struct hfi_properties_supported {
u32 num_properties;
- u32 properties[1];
+ u32 properties[];
};
struct hfi_max_sessions_supported {
@@ -1085,12 +1085,12 @@ struct hfi_resource_ocmem_requirement {
struct hfi_resource_ocmem_requirement_info {
u32 num_entries;
- struct hfi_resource_ocmem_requirement requirements[1];
+ struct hfi_resource_ocmem_requirement requirements[];
};
struct hfi_property_sys_image_version_info_type {
u32 string_size;
- u8 str_image_version[1];
+ u8 str_image_version[];
};
struct hfi_codec_mask_supported {
@@ -1141,7 +1141,7 @@ struct hfi_extradata_header {
u32 port_index;
u32 type;
u32 data_size;
- u8 data[1];
+ u8 data[];
};
struct hfi_batch_info {
@@ -1236,7 +1236,7 @@ static inline void hfi_bufreq_set_count_min_host(struct hfi_buffer_requirements
struct hfi_data_payload {
u32 size;
- u8 data[1];
+ u8 data[];
};
struct hfi_enable_picture {
@@ -1264,12 +1264,12 @@ struct hfi_interlace_format_supported {
struct hfi_buffer_alloc_mode_supported {
u32 buffer_type;
u32 num_entries;
- u32 data[1];
+ u32 data[];
};
struct hfi_mb_error_map {
u32 error_map_size;
- u8 error_map[1];
+ u8 error_map[];
};
struct hfi_metadata_pass_through {
diff --git a/drivers/media/platform/qcom/venus/hfi_parser.c b/drivers/media/platform/qcom/venus/hfi_parser.c
index c43839539d4d..3df241dc3a11 100644
--- a/drivers/media/platform/qcom/venus/hfi_parser.c
+++ b/drivers/media/platform/qcom/venus/hfi_parser.c
@@ -157,7 +157,7 @@ static void
parse_raw_formats(struct venus_core *core, u32 codecs, u32 domain, void *data)
{
struct hfi_uncompressed_format_supported *fmt = data;
- struct hfi_uncompressed_plane_info *pinfo = fmt->plane_info;
+ struct hfi_uncompressed_plane_info *pinfo = &fmt->plane_info;
struct hfi_uncompressed_plane_constraints *constr;
struct raw_formats rawfmts[MAX_FMT_ENTRIES] = {};
u32 entries = fmt->format_entries;
diff --git a/drivers/media/platform/qcom/venus/hfi_plat_bufs_v6.c b/drivers/media/platform/qcom/venus/hfi_plat_bufs_v6.c
index f5a655973c08..6289166786ec 100644
--- a/drivers/media/platform/qcom/venus/hfi_plat_bufs_v6.c
+++ b/drivers/media/platform/qcom/venus/hfi_plat_bufs_v6.c
@@ -1063,51 +1063,51 @@ struct enc_bufsize_ops {
u32 (*persist)(void);
};
-static struct dec_bufsize_ops dec_h264_ops = {
+static const struct dec_bufsize_ops dec_h264_ops = {
.scratch = h264d_scratch_size,
.scratch1 = h264d_scratch1_size,
.persist1 = h264d_persist1_size,
};
-static struct dec_bufsize_ops dec_h265_ops = {
+static const struct dec_bufsize_ops dec_h265_ops = {
.scratch = h265d_scratch_size,
.scratch1 = h265d_scratch1_size,
.persist1 = h265d_persist1_size,
};
-static struct dec_bufsize_ops dec_vp8_ops = {
+static const struct dec_bufsize_ops dec_vp8_ops = {
.scratch = vpxd_scratch_size,
.scratch1 = vp8d_scratch1_size,
.persist1 = vp8d_persist1_size,
};
-static struct dec_bufsize_ops dec_vp9_ops = {
+static const struct dec_bufsize_ops dec_vp9_ops = {
.scratch = vpxd_scratch_size,
.scratch1 = vp9d_scratch1_size,
.persist1 = vp9d_persist1_size,
};
-static struct dec_bufsize_ops dec_mpeg2_ops = {
+static const struct dec_bufsize_ops dec_mpeg2_ops = {
.scratch = mpeg2d_scratch_size,
.scratch1 = mpeg2d_scratch1_size,
.persist1 = mpeg2d_persist1_size,
};
-static struct enc_bufsize_ops enc_h264_ops = {
+static const struct enc_bufsize_ops enc_h264_ops = {
.scratch = h264e_scratch_size,
.scratch1 = h264e_scratch1_size,
.scratch2 = enc_scratch2_size,
.persist = enc_persist_size,
};
-static struct enc_bufsize_ops enc_h265_ops = {
+static const struct enc_bufsize_ops enc_h265_ops = {
.scratch = h265e_scratch_size,
.scratch1 = h265e_scratch1_size,
.scratch2 = enc_scratch2_size,
.persist = enc_persist_size,
};
-static struct enc_bufsize_ops enc_vp8_ops = {
+static const struct enc_bufsize_ops enc_vp8_ops = {
.scratch = vp8e_scratch_size,
.scratch1 = vp8e_scratch1_size,
.scratch2 = enc_scratch2_size,
@@ -1186,7 +1186,7 @@ static int bufreq_dec(struct hfi_plat_buffers_params *params, u32 buftype,
u32 codec = params->codec;
u32 width = params->width, height = params->height, out_min_count;
u32 out_width = params->out_width, out_height = params->out_height;
- struct dec_bufsize_ops *dec_ops;
+ const struct dec_bufsize_ops *dec_ops;
bool is_secondary_output = params->dec.is_secondary_output;
bool is_interlaced = params->dec.is_interlaced;
u32 max_mbs_per_frame = params->dec.max_mbs_per_frame;
@@ -1260,7 +1260,7 @@ static int bufreq_enc(struct hfi_plat_buffers_params *params, u32 buftype,
struct hfi_buffer_requirements *bufreq)
{
enum hfi_version version = params->version;
- struct enc_bufsize_ops *enc_ops;
+ const struct enc_bufsize_ops *enc_ops;
u32 width = params->width;
u32 height = params->height;
bool is_tenbit = params->enc.is_tenbit;
diff --git a/drivers/media/platform/raspberrypi/pisp_be/Kconfig b/drivers/media/platform/raspberrypi/pisp_be/Kconfig
index 38c0f8305d62..46765a2e4c4d 100644
--- a/drivers/media/platform/raspberrypi/pisp_be/Kconfig
+++ b/drivers/media/platform/raspberrypi/pisp_be/Kconfig
@@ -2,6 +2,7 @@ config VIDEO_RASPBERRYPI_PISP_BE
tristate "Raspberry Pi PiSP Backend (BE) ISP driver"
depends on V4L_PLATFORM_DRIVERS
depends on VIDEO_DEV
+ depends on ARCH_BCM2835 || COMPILE_TEST
select VIDEO_V4L2_SUBDEV_API
select MEDIA_CONTROLLER
select VIDEOBUF2_DMA_CONTIG
diff --git a/drivers/media/platform/renesas/rcar-vin/rcar-core.c b/drivers/media/platform/renesas/rcar-vin/rcar-core.c
index 809c3a38cc4a..695d884a22d1 100644
--- a/drivers/media/platform/renesas/rcar-vin/rcar-core.c
+++ b/drivers/media/platform/renesas/rcar-vin/rcar-core.c
@@ -1274,16 +1274,7 @@ static const struct rvin_info rcar_info_r8a77995 = {
.scaler = rvin_scaler_gen3,
};
-static const struct rvin_info rcar_info_r8a779a0 = {
- .model = RCAR_GEN3,
- .use_mc = true,
- .use_isp = true,
- .nv12 = true,
- .max_width = 4096,
- .max_height = 4096,
-};
-
-static const struct rvin_info rcar_info_r8a779g0 = {
+static const struct rvin_info rcar_info_gen4 = {
.model = RCAR_GEN3,
.use_mc = true,
.use_isp = true,
@@ -1354,12 +1345,18 @@ static const struct of_device_id rvin_of_id_table[] = {
.data = &rcar_info_r8a77995,
},
{
+ /* Keep to be compatible with old DTS files. */
.compatible = "renesas,vin-r8a779a0",
- .data = &rcar_info_r8a779a0,
+ .data = &rcar_info_gen4,
},
{
+ /* Keep to be compatible with old DTS files. */
.compatible = "renesas,vin-r8a779g0",
- .data = &rcar_info_r8a779g0,
+ .data = &rcar_info_gen4,
+ },
+ {
+ .compatible = "renesas,rcar-gen4-vin",
+ .data = &rcar_info_gen4,
},
{ /* Sentinel */ },
};
diff --git a/drivers/media/platform/renesas/rzg2l-cru/rzg2l-csi2.c b/drivers/media/platform/renesas/rzg2l-cru/rzg2l-csi2.c
index e68fcdaea207..c7fdee347ac8 100644
--- a/drivers/media/platform/renesas/rzg2l-cru/rzg2l-csi2.c
+++ b/drivers/media/platform/renesas/rzg2l-cru/rzg2l-csi2.c
@@ -865,6 +865,7 @@ static const struct of_device_id rzg2l_csi2_of_table[] = {
{ .compatible = "renesas,rzg2l-csi2", },
{ /* sentinel */ }
};
+MODULE_DEVICE_TABLE(of, rzg2l_csi2_of_table);
static struct platform_driver rzg2l_csi2_pdrv = {
.remove_new = rzg2l_csi2_remove,
diff --git a/drivers/media/platform/renesas/vsp1/vsp1_video.c b/drivers/media/platform/renesas/vsp1/vsp1_video.c
index fdb46ec0c872..e728f9f5160e 100644
--- a/drivers/media/platform/renesas/vsp1/vsp1_video.c
+++ b/drivers/media/platform/renesas/vsp1/vsp1_video.c
@@ -1082,6 +1082,27 @@ static const struct v4l2_file_operations vsp1_video_fops = {
};
/* -----------------------------------------------------------------------------
+ * Media entity operations
+ */
+
+static int vsp1_video_link_validate(struct media_link *link)
+{
+ /*
+ * Ideally, link validation should be implemented here instead of
+ * calling vsp1_video_verify_format() in vsp1_video_streamon()
+ * manually. That would however break userspace that start one video
+ * device before configures formats on other video devices in the
+ * pipeline. This operation is just a no-op to silence the warnings
+ * from v4l2_subdev_link_validate().
+ */
+ return 0;
+}
+
+static const struct media_entity_operations vsp1_video_media_ops = {
+ .link_validate = vsp1_video_link_validate,
+};
+
+/* -----------------------------------------------------------------------------
* Suspend and Resume
*/
@@ -1215,6 +1236,7 @@ struct vsp1_video *vsp1_video_create(struct vsp1_device *vsp1,
/* ... and the video node... */
video->video.v4l2_dev = &video->vsp1->v4l2_dev;
+ video->video.entity.ops = &vsp1_video_media_ops;
video->video.fops = &vsp1_video_fops;
snprintf(video->video.name, sizeof(video->video.name), "%s %s",
rwpf->entity.subdev.name, direction);
diff --git a/drivers/media/platform/rockchip/rkisp1/rkisp1-common.c b/drivers/media/platform/rockchip/rkisp1/rkisp1-common.c
index f956b90a407a..60c97bb7b18b 100644
--- a/drivers/media/platform/rockchip/rkisp1/rkisp1-common.c
+++ b/drivers/media/platform/rockchip/rkisp1/rkisp1-common.c
@@ -178,3 +178,17 @@ void rkisp1_sd_adjust_crop(struct v4l2_rect *crop,
rkisp1_sd_adjust_crop_rect(crop, &crop_bounds);
}
+
+void rkisp1_bls_swap_regs(enum rkisp1_fmt_raw_pat_type pattern,
+ const u32 input[4], u32 output[4])
+{
+ static const unsigned int swap[4][4] = {
+ [RKISP1_RAW_RGGB] = { 0, 1, 2, 3 },
+ [RKISP1_RAW_GRBG] = { 1, 0, 3, 2 },
+ [RKISP1_RAW_GBRG] = { 2, 3, 0, 1 },
+ [RKISP1_RAW_BGGR] = { 3, 2, 1, 0 },
+ };
+
+ for (unsigned int i = 0; i < 4; ++i)
+ output[i] = input[swap[pattern][i]];
+}
diff --git a/drivers/media/platform/rockchip/rkisp1/rkisp1-common.h b/drivers/media/platform/rockchip/rkisp1/rkisp1-common.h
index 26573f6ae575..ca952fd0829b 100644
--- a/drivers/media/platform/rockchip/rkisp1/rkisp1-common.h
+++ b/drivers/media/platform/rockchip/rkisp1/rkisp1-common.h
@@ -33,9 +33,10 @@ struct regmap;
#define RKISP1_ISP_SD_SRC BIT(0)
#define RKISP1_ISP_SD_SINK BIT(1)
-/* min and max values for the widths and heights of the entities */
-#define RKISP1_ISP_MAX_WIDTH 4032
-#define RKISP1_ISP_MAX_HEIGHT 3024
+/*
+ * Minimum values for the width and height of entities. The maximum values are
+ * model-specific and stored in the rkisp1_info structure.
+ */
#define RKISP1_ISP_MIN_WIDTH 32
#define RKISP1_ISP_MIN_HEIGHT 32
@@ -115,6 +116,8 @@ enum rkisp1_isp_pad {
* @RKISP1_FEATURE_SELF_PATH: The ISP has a self path
* @RKISP1_FEATURE_DUAL_CROP: The ISP has the dual crop block at the resizer input
* @RKISP1_FEATURE_DMA_34BIT: The ISP uses 34-bit DMA addresses
+ * @RKISP1_FEATURE_BLS: The ISP has a dedicated BLS block
+ * @RKISP1_FEATURE_COMPAND: The ISP has a companding block
*
* The ISP features are stored in a bitmask in &rkisp1_info.features and allow
* the driver to implement support for features present in some ISP versions
@@ -126,6 +129,8 @@ enum rkisp1_feature {
RKISP1_FEATURE_SELF_PATH = BIT(2),
RKISP1_FEATURE_DUAL_CROP = BIT(3),
RKISP1_FEATURE_DMA_34BIT = BIT(4),
+ RKISP1_FEATURE_BLS = BIT(5),
+ RKISP1_FEATURE_COMPAND = BIT(6),
};
#define rkisp1_has_feature(rkisp1, feature) \
@@ -140,6 +145,8 @@ enum rkisp1_feature {
* @isr_size: number of entries in the @isrs array
* @isp_ver: ISP version
* @features: bitmask of rkisp1_feature features implemented by the ISP
+ * @max_width: maximum input frame width
+ * @max_height: maximum input frame height
*
* This structure contains information about the ISP specific to a particular
* ISP model, version, or integration in a particular SoC.
@@ -151,6 +158,8 @@ struct rkisp1_info {
unsigned int isr_size;
enum rkisp1_cif_isp_version isp_ver;
unsigned int features;
+ unsigned int max_width;
+ unsigned int max_height;
};
/*
@@ -232,7 +241,7 @@ struct rkisp1_vdev_node {
/*
* struct rkisp1_buffer - A container for the vb2 buffers used by the video devices:
- * params, stats, mainpath, selfpath
+ * stats, mainpath, selfpath
*
* @vb: vb2 buffer
* @queue: entry of the buffer in the queue
@@ -245,6 +254,26 @@ struct rkisp1_buffer {
};
/*
+ * struct rkisp1_params_buffer - A container for the vb2 buffers used by the
+ * params video device
+ *
+ * @vb: vb2 buffer
+ * @queue: entry of the buffer in the queue
+ * @cfg: scratch buffer used for caching the ISP configuration parameters
+ */
+struct rkisp1_params_buffer {
+ struct vb2_v4l2_buffer vb;
+ struct list_head queue;
+ void *cfg;
+};
+
+static inline struct rkisp1_params_buffer *
+to_rkisp1_params_buffer(struct vb2_v4l2_buffer *vbuf)
+{
+ return container_of(vbuf, struct rkisp1_params_buffer, vb);
+}
+
+/*
* struct rkisp1_dummy_buffer - A buffer to write the next frame to in case
* there are no vb2 buffers available.
*
@@ -372,9 +401,11 @@ struct rkisp1_params_ops {
* @ops: pointer to the variant-specific operations
* @config_lock: locks the buffer list 'params'
* @params: queue of rkisp1_buffer
- * @vdev_fmt: v4l2_format of the metadata format
+ * @metafmt the currently enabled metadata format
* @quantization: the quantization configured on the isp's src pad
+ * @ycbcr_encoding the YCbCr encoding
* @raw_type: the bayer pattern on the isp video sink pad
+ * @enabled_blocks: bitmask of enabled ISP blocks
*/
struct rkisp1_params {
struct rkisp1_vdev_node vnode;
@@ -383,11 +414,14 @@ struct rkisp1_params {
spinlock_t config_lock; /* locks the buffers list 'params' */
struct list_head params;
- struct v4l2_format vdev_fmt;
+
+ const struct v4l2_meta_format *metafmt;
enum v4l2_quantization quantization;
enum v4l2_ycbcr_encoding ycbcr_encoding;
enum rkisp1_fmt_raw_pat_type raw_type;
+
+ u32 enabled_blocks;
};
/*
@@ -573,6 +607,9 @@ void rkisp1_sd_adjust_crop_rect(struct v4l2_rect *crop,
void rkisp1_sd_adjust_crop(struct v4l2_rect *crop,
const struct v4l2_mbus_framefmt *bounds);
+void rkisp1_bls_swap_regs(enum rkisp1_fmt_raw_pat_type pattern,
+ const u32 input[4], u32 output[4]);
+
/*
* rkisp1_mbus_info_get_by_code - get the isp info of the media bus code
*
diff --git a/drivers/media/platform/rockchip/rkisp1/rkisp1-csi.c b/drivers/media/platform/rockchip/rkisp1/rkisp1-csi.c
index 4202642e0523..841e58c20f7f 100644
--- a/drivers/media/platform/rockchip/rkisp1/rkisp1-csi.c
+++ b/drivers/media/platform/rockchip/rkisp1/rkisp1-csi.c
@@ -307,6 +307,7 @@ static int rkisp1_csi_set_fmt(struct v4l2_subdev *sd,
struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_format *fmt)
{
+ struct rkisp1_csi *csi = to_rkisp1_csi(sd);
const struct rkisp1_mbus_info *mbus_info;
struct v4l2_mbus_framefmt *sink_fmt, *src_fmt;
@@ -326,10 +327,10 @@ static int rkisp1_csi_set_fmt(struct v4l2_subdev *sd,
sink_fmt->width = clamp_t(u32, fmt->format.width,
RKISP1_ISP_MIN_WIDTH,
- RKISP1_ISP_MAX_WIDTH);
+ csi->rkisp1->info->max_width);
sink_fmt->height = clamp_t(u32, fmt->format.height,
RKISP1_ISP_MIN_HEIGHT,
- RKISP1_ISP_MAX_HEIGHT);
+ csi->rkisp1->info->max_height);
fmt->format = *sink_fmt;
diff --git a/drivers/media/platform/rockchip/rkisp1/rkisp1-dev.c b/drivers/media/platform/rockchip/rkisp1/rkisp1-dev.c
index bb0202386c70..dd114ab77800 100644
--- a/drivers/media/platform/rockchip/rkisp1/rkisp1-dev.c
+++ b/drivers/media/platform/rockchip/rkisp1/rkisp1-dev.c
@@ -509,7 +509,10 @@ static const struct rkisp1_info px30_isp_info = {
.isp_ver = RKISP1_V12,
.features = RKISP1_FEATURE_MIPI_CSI2
| RKISP1_FEATURE_SELF_PATH
- | RKISP1_FEATURE_DUAL_CROP,
+ | RKISP1_FEATURE_DUAL_CROP
+ | RKISP1_FEATURE_BLS,
+ .max_width = 3264,
+ .max_height = 2448,
};
static const char * const rk3399_isp_clks[] = {
@@ -530,7 +533,10 @@ static const struct rkisp1_info rk3399_isp_info = {
.isp_ver = RKISP1_V10,
.features = RKISP1_FEATURE_MIPI_CSI2
| RKISP1_FEATURE_SELF_PATH
- | RKISP1_FEATURE_DUAL_CROP,
+ | RKISP1_FEATURE_DUAL_CROP
+ | RKISP1_FEATURE_BLS,
+ .max_width = 4416,
+ .max_height = 3312,
};
static const char * const imx8mp_isp_clks[] = {
@@ -550,7 +556,10 @@ static const struct rkisp1_info imx8mp_isp_info = {
.isr_size = ARRAY_SIZE(imx8mp_isp_isrs),
.isp_ver = RKISP1_V_IMX8MP,
.features = RKISP1_FEATURE_MAIN_STRIDE
- | RKISP1_FEATURE_DMA_34BIT,
+ | RKISP1_FEATURE_DMA_34BIT
+ | RKISP1_FEATURE_COMPAND,
+ .max_width = 4096,
+ .max_height = 3072,
};
static const struct of_device_id rkisp1_of_match[] = {
diff --git a/drivers/media/platform/rockchip/rkisp1/rkisp1-isp.c b/drivers/media/platform/rockchip/rkisp1/rkisp1-isp.c
index 91301d17d356..d94917211828 100644
--- a/drivers/media/platform/rockchip/rkisp1/rkisp1-isp.c
+++ b/drivers/media/platform/rockchip/rkisp1/rkisp1-isp.c
@@ -517,6 +517,7 @@ static int rkisp1_isp_enum_frame_size(struct v4l2_subdev *sd,
struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_frame_size_enum *fse)
{
+ struct rkisp1_isp *isp = to_rkisp1_isp(sd);
const struct rkisp1_mbus_info *mbus_info;
if (fse->pad == RKISP1_ISP_PAD_SINK_PARAMS ||
@@ -539,9 +540,9 @@ static int rkisp1_isp_enum_frame_size(struct v4l2_subdev *sd,
return -EINVAL;
fse->min_width = RKISP1_ISP_MIN_WIDTH;
- fse->max_width = RKISP1_ISP_MAX_WIDTH;
+ fse->max_width = isp->rkisp1->info->max_width;
fse->min_height = RKISP1_ISP_MIN_HEIGHT;
- fse->max_height = RKISP1_ISP_MAX_HEIGHT;
+ fse->max_height = isp->rkisp1->info->max_height;
return 0;
}
@@ -772,10 +773,10 @@ static void rkisp1_isp_set_sink_fmt(struct rkisp1_isp *isp,
sink_fmt->width = clamp_t(u32, format->width,
RKISP1_ISP_MIN_WIDTH,
- RKISP1_ISP_MAX_WIDTH);
+ isp->rkisp1->info->max_width);
sink_fmt->height = clamp_t(u32, format->height,
RKISP1_ISP_MIN_HEIGHT,
- RKISP1_ISP_MAX_HEIGHT);
+ isp->rkisp1->info->max_height);
/*
* Adjust the color space fields. Accept any color primaries and
diff --git a/drivers/media/platform/rockchip/rkisp1/rkisp1-params.c b/drivers/media/platform/rockchip/rkisp1/rkisp1-params.c
index 173d1ea41874..320581a9f866 100644
--- a/drivers/media/platform/rockchip/rkisp1/rkisp1-params.c
+++ b/drivers/media/platform/rockchip/rkisp1/rkisp1-params.c
@@ -5,6 +5,9 @@
* Copyright (C) 2017 Rockchip Electronics Co., Ltd.
*/
+#include <linux/math.h>
+#include <linux/string.h>
+
#include <media/v4l2-common.h>
#include <media/v4l2-event.h>
#include <media/v4l2-ioctl.h>
@@ -33,6 +36,59 @@
#define RKISP1_ISP_CC_COEFF(n) \
(RKISP1_CIF_ISP_CC_COEFF_0 + (n) * 4)
+#define RKISP1_EXT_PARAMS_BLOCK_GROUP_OTHERS BIT(0)
+#define RKISP1_EXT_PARAMS_BLOCK_GROUP_LSC BIT(1)
+
+union rkisp1_ext_params_config {
+ struct rkisp1_ext_params_block_header header;
+ struct rkisp1_ext_params_bls_config bls;
+ struct rkisp1_ext_params_dpcc_config dpcc;
+ struct rkisp1_ext_params_sdg_config sdg;
+ struct rkisp1_ext_params_lsc_config lsc;
+ struct rkisp1_ext_params_awb_gain_config awbg;
+ struct rkisp1_ext_params_flt_config flt;
+ struct rkisp1_ext_params_bdm_config bdm;
+ struct rkisp1_ext_params_ctk_config ctk;
+ struct rkisp1_ext_params_goc_config goc;
+ struct rkisp1_ext_params_dpf_config dpf;
+ struct rkisp1_ext_params_dpf_strength_config dpfs;
+ struct rkisp1_ext_params_cproc_config cproc;
+ struct rkisp1_ext_params_ie_config ie;
+ struct rkisp1_ext_params_awb_meas_config awbm;
+ struct rkisp1_ext_params_hst_config hst;
+ struct rkisp1_ext_params_aec_config aec;
+ struct rkisp1_ext_params_afc_config afc;
+ struct rkisp1_ext_params_compand_bls_config compand_bls;
+ struct rkisp1_ext_params_compand_curve_config compand_curve;
+};
+
+enum rkisp1_params_formats {
+ RKISP1_PARAMS_FIXED,
+ RKISP1_PARAMS_EXTENSIBLE,
+};
+
+static const struct v4l2_meta_format rkisp1_params_formats[] = {
+ [RKISP1_PARAMS_FIXED] = {
+ .dataformat = V4L2_META_FMT_RK_ISP1_PARAMS,
+ .buffersize = sizeof(struct rkisp1_params_cfg),
+ },
+ [RKISP1_PARAMS_EXTENSIBLE] = {
+ .dataformat = V4L2_META_FMT_RK_ISP1_EXT_PARAMS,
+ .buffersize = sizeof(struct rkisp1_ext_params_cfg),
+ },
+};
+
+static const struct v4l2_meta_format *
+rkisp1_params_get_format_info(u32 dataformat)
+{
+ for (unsigned int i = 0; i < ARRAY_SIZE(rkisp1_params_formats); i++) {
+ if (rkisp1_params_formats[i].dataformat == dataformat)
+ return &rkisp1_params_formats[i];
+ }
+
+ return &rkisp1_params_formats[RKISP1_PARAMS_FIXED];
+}
+
static inline void
rkisp1_param_set_bits(struct rkisp1_params *params, u32 reg, u32 bit_mask)
{
@@ -112,54 +168,20 @@ static void rkisp1_bls_config(struct rkisp1_params *params,
new_control &= RKISP1_CIF_ISP_BLS_ENA;
/* fixed subtraction values */
if (!arg->enable_auto) {
- const struct rkisp1_cif_isp_bls_fixed_val *pval =
- &arg->fixed_val;
-
- switch (params->raw_type) {
- case RKISP1_RAW_BGGR:
- rkisp1_write(params->rkisp1, RKISP1_CIF_ISP_BLS_D_FIXED,
- pval->r);
- rkisp1_write(params->rkisp1, RKISP1_CIF_ISP_BLS_C_FIXED,
- pval->gr);
- rkisp1_write(params->rkisp1, RKISP1_CIF_ISP_BLS_B_FIXED,
- pval->gb);
- rkisp1_write(params->rkisp1, RKISP1_CIF_ISP_BLS_A_FIXED,
- pval->b);
- break;
- case RKISP1_RAW_GBRG:
- rkisp1_write(params->rkisp1, RKISP1_CIF_ISP_BLS_C_FIXED,
- pval->r);
- rkisp1_write(params->rkisp1, RKISP1_CIF_ISP_BLS_D_FIXED,
- pval->gr);
- rkisp1_write(params->rkisp1, RKISP1_CIF_ISP_BLS_A_FIXED,
- pval->gb);
- rkisp1_write(params->rkisp1, RKISP1_CIF_ISP_BLS_B_FIXED,
- pval->b);
- break;
- case RKISP1_RAW_GRBG:
- rkisp1_write(params->rkisp1, RKISP1_CIF_ISP_BLS_B_FIXED,
- pval->r);
- rkisp1_write(params->rkisp1, RKISP1_CIF_ISP_BLS_A_FIXED,
- pval->gr);
- rkisp1_write(params->rkisp1, RKISP1_CIF_ISP_BLS_D_FIXED,
- pval->gb);
- rkisp1_write(params->rkisp1, RKISP1_CIF_ISP_BLS_C_FIXED,
- pval->b);
- break;
- case RKISP1_RAW_RGGB:
- rkisp1_write(params->rkisp1, RKISP1_CIF_ISP_BLS_A_FIXED,
- pval->r);
- rkisp1_write(params->rkisp1, RKISP1_CIF_ISP_BLS_B_FIXED,
- pval->gr);
- rkisp1_write(params->rkisp1, RKISP1_CIF_ISP_BLS_C_FIXED,
- pval->gb);
- rkisp1_write(params->rkisp1, RKISP1_CIF_ISP_BLS_D_FIXED,
- pval->b);
- break;
- default:
- break;
- }
-
+ static const u32 regs[] = {
+ RKISP1_CIF_ISP_BLS_A_FIXED,
+ RKISP1_CIF_ISP_BLS_B_FIXED,
+ RKISP1_CIF_ISP_BLS_C_FIXED,
+ RKISP1_CIF_ISP_BLS_D_FIXED,
+ };
+ u32 swapped[4];
+
+ rkisp1_bls_swap_regs(params->raw_type, regs, swapped);
+
+ rkisp1_write(params->rkisp1, swapped[0], arg->fixed_val.r);
+ rkisp1_write(params->rkisp1, swapped[1], arg->fixed_val.gr);
+ rkisp1_write(params->rkisp1, swapped[2], arg->fixed_val.gb);
+ rkisp1_write(params->rkisp1, swapped[3], arg->fixed_val.b);
} else {
if (arg->en_windows & BIT(1)) {
rkisp1_write(params->rkisp1, RKISP1_CIF_ISP_BLS_H2_START,
@@ -1239,6 +1261,93 @@ rkisp1_dpf_strength_config(struct rkisp1_params *params,
rkisp1_write(params->rkisp1, RKISP1_CIF_ISP_DPF_STRENGTH_R, arg->r);
}
+static void rkisp1_compand_write_px_curve(struct rkisp1_params *params,
+ unsigned int addr, const u8 *curve)
+{
+ const unsigned int points_per_reg = 6;
+ const unsigned int num_regs =
+ DIV_ROUND_UP(RKISP1_CIF_ISP_COMPAND_NUM_POINTS,
+ points_per_reg);
+
+ /*
+ * The compand curve is specified as a piecewise linear function with
+ * 64 points. X coordinates are stored as a log2 of the displacement
+ * from the previous point, in 5 bits, with 6 values per register. The
+ * last register stores 4 values.
+ */
+ for (unsigned int reg = 0; reg < num_regs; ++reg) {
+ unsigned int num_points =
+ min(RKISP1_CIF_ISP_COMPAND_NUM_POINTS -
+ reg * points_per_reg, points_per_reg);
+ u32 val = 0;
+
+ for (unsigned int i = 0; i < num_points; i++)
+ val |= (*curve++ & 0x1f) << (i * 5);
+
+ rkisp1_write(params->rkisp1, addr, val);
+ addr += 4;
+ }
+}
+
+static void
+rkisp1_compand_write_curve_mem(struct rkisp1_params *params,
+ unsigned int reg_addr, unsigned int reg_data,
+ const u32 curve[RKISP1_CIF_ISP_COMPAND_NUM_POINTS])
+{
+ for (unsigned int i = 0; i < RKISP1_CIF_ISP_COMPAND_NUM_POINTS; i++) {
+ rkisp1_write(params->rkisp1, reg_addr, i);
+ rkisp1_write(params->rkisp1, reg_data, curve[i]);
+ }
+}
+
+static void
+rkisp1_compand_bls_config(struct rkisp1_params *params,
+ const struct rkisp1_cif_isp_compand_bls_config *arg)
+{
+ static const u32 regs[] = {
+ RKISP1_CIF_ISP_COMPAND_BLS_A_FIXED,
+ RKISP1_CIF_ISP_COMPAND_BLS_B_FIXED,
+ RKISP1_CIF_ISP_COMPAND_BLS_C_FIXED,
+ RKISP1_CIF_ISP_COMPAND_BLS_D_FIXED,
+ };
+ u32 swapped[4];
+
+ rkisp1_bls_swap_regs(params->raw_type, regs, swapped);
+
+ rkisp1_write(params->rkisp1, swapped[0], arg->r);
+ rkisp1_write(params->rkisp1, swapped[1], arg->gr);
+ rkisp1_write(params->rkisp1, swapped[2], arg->gb);
+ rkisp1_write(params->rkisp1, swapped[3], arg->b);
+}
+
+static void
+rkisp1_compand_expand_config(struct rkisp1_params *params,
+ const struct rkisp1_cif_isp_compand_curve_config *arg)
+{
+ rkisp1_compand_write_px_curve(params, RKISP1_CIF_ISP_COMPAND_EXPAND_PX_N(0),
+ arg->px);
+ rkisp1_compand_write_curve_mem(params, RKISP1_CIF_ISP_COMPAND_EXPAND_Y_ADDR,
+ RKISP1_CIF_ISP_COMPAND_EXPAND_Y_WRITE_DATA,
+ arg->y);
+ rkisp1_compand_write_curve_mem(params, RKISP1_CIF_ISP_COMPAND_EXPAND_X_ADDR,
+ RKISP1_CIF_ISP_COMPAND_EXPAND_X_WRITE_DATA,
+ arg->x);
+}
+
+static void
+rkisp1_compand_compress_config(struct rkisp1_params *params,
+ const struct rkisp1_cif_isp_compand_curve_config *arg)
+{
+ rkisp1_compand_write_px_curve(params, RKISP1_CIF_ISP_COMPAND_COMPRESS_PX_N(0),
+ arg->px);
+ rkisp1_compand_write_curve_mem(params, RKISP1_CIF_ISP_COMPAND_COMPRESS_Y_ADDR,
+ RKISP1_CIF_ISP_COMPAND_COMPRESS_Y_WRITE_DATA,
+ arg->y);
+ rkisp1_compand_write_curve_mem(params, RKISP1_CIF_ISP_COMPAND_COMPRESS_X_ADDR,
+ RKISP1_CIF_ISP_COMPAND_COMPRESS_X_WRITE_DATA,
+ arg->x);
+}
+
static void
rkisp1_isp_isr_other_config(struct rkisp1_params *params,
const struct rkisp1_params_cfg *new_params)
@@ -1249,6 +1358,12 @@ rkisp1_isp_isr_other_config(struct rkisp1_params *params,
module_cfg_update = new_params->module_cfg_update;
module_ens = new_params->module_ens;
+ if (!rkisp1_has_feature(params->rkisp1, BLS)) {
+ module_en_update &= ~RKISP1_CIF_ISP_MODULE_BLS;
+ module_cfg_update &= ~RKISP1_CIF_ISP_MODULE_BLS;
+ module_ens &= ~RKISP1_CIF_ISP_MODULE_BLS;
+ }
+
/* update dpc config */
if (module_cfg_update & RKISP1_CIF_ISP_MODULE_DPCC)
rkisp1_dpcc_config(params,
@@ -1501,21 +1616,551 @@ static void rkisp1_isp_isr_meas_config(struct rkisp1_params *params,
}
}
-static bool rkisp1_params_get_buffer(struct rkisp1_params *params,
- struct rkisp1_buffer **buf,
- struct rkisp1_params_cfg **cfg)
+/*------------------------------------------------------------------------------
+ * Extensible parameters format handling
+ */
+
+static void
+rkisp1_ext_params_bls(struct rkisp1_params *params,
+ const union rkisp1_ext_params_config *block)
+{
+ const struct rkisp1_ext_params_bls_config *bls = &block->bls;
+
+ if (bls->header.flags & RKISP1_EXT_PARAMS_FL_BLOCK_DISABLE) {
+ rkisp1_param_clear_bits(params, RKISP1_CIF_ISP_BLS_CTRL,
+ RKISP1_CIF_ISP_BLS_ENA);
+ return;
+ }
+
+ rkisp1_bls_config(params, &bls->config);
+
+ if ((bls->header.flags & RKISP1_EXT_PARAMS_FL_BLOCK_ENABLE) &&
+ !(params->enabled_blocks & BIT(bls->header.type)))
+ rkisp1_param_set_bits(params, RKISP1_CIF_ISP_BLS_CTRL,
+ RKISP1_CIF_ISP_BLS_ENA);
+}
+
+static void
+rkisp1_ext_params_dpcc(struct rkisp1_params *params,
+ const union rkisp1_ext_params_config *block)
{
- if (list_empty(&params->params))
- return false;
+ const struct rkisp1_ext_params_dpcc_config *dpcc = &block->dpcc;
+
+ if (dpcc->header.flags & RKISP1_EXT_PARAMS_FL_BLOCK_DISABLE) {
+ rkisp1_param_clear_bits(params, RKISP1_CIF_ISP_DPCC_MODE,
+ RKISP1_CIF_ISP_DPCC_MODE_DPCC_ENABLE);
+ return;
+ }
- *buf = list_first_entry(&params->params, struct rkisp1_buffer, queue);
- *cfg = vb2_plane_vaddr(&(*buf)->vb.vb2_buf, 0);
+ rkisp1_dpcc_config(params, &dpcc->config);
- return true;
+ if ((dpcc->header.flags & RKISP1_EXT_PARAMS_FL_BLOCK_ENABLE) &&
+ !(params->enabled_blocks & BIT(dpcc->header.type)))
+ rkisp1_param_set_bits(params, RKISP1_CIF_ISP_DPCC_MODE,
+ RKISP1_CIF_ISP_DPCC_MODE_DPCC_ENABLE);
+}
+
+static void
+rkisp1_ext_params_sdg(struct rkisp1_params *params,
+ const union rkisp1_ext_params_config *block)
+{
+ const struct rkisp1_ext_params_sdg_config *sdg = &block->sdg;
+
+ if (sdg->header.flags & RKISP1_EXT_PARAMS_FL_BLOCK_DISABLE) {
+ rkisp1_param_clear_bits(params, RKISP1_CIF_ISP_CTRL,
+ RKISP1_CIF_ISP_CTRL_ISP_GAMMA_IN_ENA);
+ return;
+ }
+
+ rkisp1_sdg_config(params, &sdg->config);
+
+ if ((sdg->header.flags & RKISP1_EXT_PARAMS_FL_BLOCK_ENABLE) &&
+ !(params->enabled_blocks & BIT(sdg->header.type)))
+ rkisp1_param_set_bits(params, RKISP1_CIF_ISP_CTRL,
+ RKISP1_CIF_ISP_CTRL_ISP_GAMMA_IN_ENA);
+}
+
+static void
+rkisp1_ext_params_lsc(struct rkisp1_params *params,
+ const union rkisp1_ext_params_config *block)
+{
+ const struct rkisp1_ext_params_lsc_config *lsc = &block->lsc;
+
+ if (lsc->header.flags & RKISP1_EXT_PARAMS_FL_BLOCK_DISABLE) {
+ rkisp1_param_clear_bits(params, RKISP1_CIF_ISP_LSC_CTRL,
+ RKISP1_CIF_ISP_LSC_CTRL_ENA);
+ return;
+ }
+
+ rkisp1_lsc_config(params, &lsc->config);
+
+ if ((lsc->header.flags & RKISP1_EXT_PARAMS_FL_BLOCK_ENABLE) &&
+ !(params->enabled_blocks & BIT(lsc->header.type)))
+ rkisp1_param_set_bits(params, RKISP1_CIF_ISP_LSC_CTRL,
+ RKISP1_CIF_ISP_LSC_CTRL_ENA);
+}
+
+static void
+rkisp1_ext_params_awbg(struct rkisp1_params *params,
+ const union rkisp1_ext_params_config *block)
+{
+ const struct rkisp1_ext_params_awb_gain_config *awbg = &block->awbg;
+
+ if (awbg->header.flags & RKISP1_EXT_PARAMS_FL_BLOCK_DISABLE) {
+ rkisp1_param_clear_bits(params, RKISP1_CIF_ISP_CTRL,
+ RKISP1_CIF_ISP_CTRL_ISP_AWB_ENA);
+ return;
+ }
+
+ params->ops->awb_gain_config(params, &awbg->config);
+
+ if ((awbg->header.flags & RKISP1_EXT_PARAMS_FL_BLOCK_ENABLE) &&
+ !(params->enabled_blocks & BIT(awbg->header.type)))
+ rkisp1_param_set_bits(params, RKISP1_CIF_ISP_CTRL,
+ RKISP1_CIF_ISP_CTRL_ISP_AWB_ENA);
+}
+
+static void
+rkisp1_ext_params_flt(struct rkisp1_params *params,
+ const union rkisp1_ext_params_config *block)
+{
+ const struct rkisp1_ext_params_flt_config *flt = &block->flt;
+
+ if (flt->header.flags & RKISP1_EXT_PARAMS_FL_BLOCK_DISABLE) {
+ rkisp1_param_clear_bits(params, RKISP1_CIF_ISP_FILT_MODE,
+ RKISP1_CIF_ISP_FLT_ENA);
+ return;
+ }
+
+ rkisp1_flt_config(params, &flt->config);
+
+ if ((flt->header.flags & RKISP1_EXT_PARAMS_FL_BLOCK_ENABLE) &&
+ !(params->enabled_blocks & BIT(flt->header.type)))
+ rkisp1_param_set_bits(params, RKISP1_CIF_ISP_FILT_MODE,
+ RKISP1_CIF_ISP_FLT_ENA);
+}
+
+static void
+rkisp1_ext_params_bdm(struct rkisp1_params *params,
+ const union rkisp1_ext_params_config *block)
+{
+ const struct rkisp1_ext_params_bdm_config *bdm = &block->bdm;
+
+ if (bdm->header.flags & RKISP1_EXT_PARAMS_FL_BLOCK_DISABLE) {
+ rkisp1_param_clear_bits(params, RKISP1_CIF_ISP_DEMOSAIC,
+ RKISP1_CIF_ISP_DEMOSAIC_BYPASS);
+ return;
+ }
+
+ rkisp1_bdm_config(params, &bdm->config);
+
+ if ((bdm->header.flags & RKISP1_EXT_PARAMS_FL_BLOCK_ENABLE) &&
+ !(params->enabled_blocks & BIT(bdm->header.type)))
+ rkisp1_param_set_bits(params, RKISP1_CIF_ISP_DEMOSAIC,
+ RKISP1_CIF_ISP_DEMOSAIC_BYPASS);
+}
+
+static void
+rkisp1_ext_params_ctk(struct rkisp1_params *params,
+ const union rkisp1_ext_params_config *block)
+{
+ const struct rkisp1_ext_params_ctk_config *ctk = &block->ctk;
+
+ if (ctk->header.flags & RKISP1_EXT_PARAMS_FL_BLOCK_DISABLE) {
+ rkisp1_ctk_enable(params, false);
+ return;
+ }
+
+ rkisp1_ctk_config(params, &ctk->config);
+
+ if ((ctk->header.flags & RKISP1_EXT_PARAMS_FL_BLOCK_ENABLE) &&
+ !(params->enabled_blocks & BIT(ctk->header.type)))
+ rkisp1_ctk_enable(params, true);
+}
+
+static void
+rkisp1_ext_params_goc(struct rkisp1_params *params,
+ const union rkisp1_ext_params_config *block)
+{
+ const struct rkisp1_ext_params_goc_config *goc = &block->goc;
+
+ if (goc->header.flags & RKISP1_EXT_PARAMS_FL_BLOCK_DISABLE) {
+ rkisp1_param_clear_bits(params, RKISP1_CIF_ISP_CTRL,
+ RKISP1_CIF_ISP_CTRL_ISP_GAMMA_OUT_ENA);
+ return;
+ }
+
+ params->ops->goc_config(params, &goc->config);
+
+ /*
+ * Unconditionally re-enable the GOC module which gets disabled by
+ * goc_config().
+ */
+ rkisp1_param_set_bits(params, RKISP1_CIF_ISP_CTRL,
+ RKISP1_CIF_ISP_CTRL_ISP_GAMMA_OUT_ENA);
+}
+
+static void
+rkisp1_ext_params_dpf(struct rkisp1_params *params,
+ const union rkisp1_ext_params_config *block)
+{
+ const struct rkisp1_ext_params_dpf_config *dpf = &block->dpf;
+
+ if (dpf->header.flags & RKISP1_EXT_PARAMS_FL_BLOCK_DISABLE) {
+ rkisp1_param_clear_bits(params, RKISP1_CIF_ISP_DPF_MODE,
+ RKISP1_CIF_ISP_DPF_MODE_EN);
+ return;
+ }
+
+ rkisp1_dpf_config(params, &dpf->config);
+
+ if ((dpf->header.flags & RKISP1_EXT_PARAMS_FL_BLOCK_ENABLE) &&
+ !(params->enabled_blocks & BIT(dpf->header.type)))
+ rkisp1_param_set_bits(params, RKISP1_CIF_ISP_DPF_MODE,
+ RKISP1_CIF_ISP_DPF_MODE_EN);
+}
+
+static void
+rkisp1_ext_params_dpfs(struct rkisp1_params *params,
+ const union rkisp1_ext_params_config *block)
+{
+ const struct rkisp1_ext_params_dpf_strength_config *dpfs = &block->dpfs;
+
+ rkisp1_dpf_strength_config(params, &dpfs->config);
+}
+
+static void
+rkisp1_ext_params_cproc(struct rkisp1_params *params,
+ const union rkisp1_ext_params_config *block)
+{
+ const struct rkisp1_ext_params_cproc_config *cproc = &block->cproc;
+
+ if (cproc->header.flags & RKISP1_EXT_PARAMS_FL_BLOCK_DISABLE) {
+ rkisp1_param_clear_bits(params, RKISP1_CIF_C_PROC_CTRL,
+ RKISP1_CIF_C_PROC_CTR_ENABLE);
+ return;
+ }
+
+ rkisp1_cproc_config(params, &cproc->config);
+
+ if ((cproc->header.flags & RKISP1_EXT_PARAMS_FL_BLOCK_ENABLE) &&
+ !(params->enabled_blocks & BIT(cproc->header.type)))
+ rkisp1_param_set_bits(params, RKISP1_CIF_C_PROC_CTRL,
+ RKISP1_CIF_C_PROC_CTR_ENABLE);
+}
+
+static void
+rkisp1_ext_params_ie(struct rkisp1_params *params,
+ const union rkisp1_ext_params_config *block)
+{
+ const struct rkisp1_ext_params_ie_config *ie = &block->ie;
+
+ if (ie->header.flags & RKISP1_EXT_PARAMS_FL_BLOCK_DISABLE) {
+ rkisp1_ie_enable(params, false);
+ return;
+ }
+
+ rkisp1_ie_config(params, &ie->config);
+
+ if ((ie->header.flags & RKISP1_EXT_PARAMS_FL_BLOCK_ENABLE) &&
+ !(params->enabled_blocks & BIT(ie->header.type)))
+ rkisp1_ie_enable(params, true);
+}
+
+static void
+rkisp1_ext_params_awbm(struct rkisp1_params *params,
+ const union rkisp1_ext_params_config *block)
+{
+ const struct rkisp1_ext_params_awb_meas_config *awbm = &block->awbm;
+
+ if (awbm->header.flags & RKISP1_EXT_PARAMS_FL_BLOCK_DISABLE) {
+ params->ops->awb_meas_enable(params, &awbm->config,
+ false);
+ return;
+ }
+
+ params->ops->awb_meas_config(params, &awbm->config);
+
+ if ((awbm->header.flags & RKISP1_EXT_PARAMS_FL_BLOCK_ENABLE) &&
+ !(params->enabled_blocks & BIT(awbm->header.type)))
+ params->ops->awb_meas_enable(params, &awbm->config,
+ true);
+}
+
+static void
+rkisp1_ext_params_hstm(struct rkisp1_params *params,
+ const union rkisp1_ext_params_config *block)
+{
+ const struct rkisp1_ext_params_hst_config *hst = &block->hst;
+
+ if (hst->header.flags & RKISP1_EXT_PARAMS_FL_BLOCK_DISABLE) {
+ params->ops->hst_enable(params, &hst->config, false);
+ return;
+ }
+
+ params->ops->hst_config(params, &hst->config);
+
+ if ((hst->header.flags & RKISP1_EXT_PARAMS_FL_BLOCK_ENABLE) &&
+ !(params->enabled_blocks & BIT(hst->header.type)))
+ params->ops->hst_enable(params, &hst->config, true);
+}
+
+static void
+rkisp1_ext_params_aecm(struct rkisp1_params *params,
+ const union rkisp1_ext_params_config *block)
+{
+ const struct rkisp1_ext_params_aec_config *aec = &block->aec;
+
+ if (aec->header.flags & RKISP1_EXT_PARAMS_FL_BLOCK_DISABLE) {
+ rkisp1_param_clear_bits(params, RKISP1_CIF_ISP_EXP_CTRL,
+ RKISP1_CIF_ISP_EXP_ENA);
+ return;
+ }
+
+ params->ops->aec_config(params, &aec->config);
+
+ if ((aec->header.flags & RKISP1_EXT_PARAMS_FL_BLOCK_ENABLE) &&
+ !(params->enabled_blocks & BIT(aec->header.type)))
+ rkisp1_param_set_bits(params, RKISP1_CIF_ISP_EXP_CTRL,
+ RKISP1_CIF_ISP_EXP_ENA);
+}
+
+static void
+rkisp1_ext_params_afcm(struct rkisp1_params *params,
+ const union rkisp1_ext_params_config *block)
+{
+ const struct rkisp1_ext_params_afc_config *afc = &block->afc;
+
+ if (afc->header.flags & RKISP1_EXT_PARAMS_FL_BLOCK_DISABLE) {
+ rkisp1_param_clear_bits(params, RKISP1_CIF_ISP_AFM_CTRL,
+ RKISP1_CIF_ISP_AFM_ENA);
+ return;
+ }
+
+ params->ops->afm_config(params, &afc->config);
+
+ if ((afc->header.flags & RKISP1_EXT_PARAMS_FL_BLOCK_ENABLE) &&
+ !(params->enabled_blocks & BIT(afc->header.type)))
+ rkisp1_param_set_bits(params, RKISP1_CIF_ISP_AFM_CTRL,
+ RKISP1_CIF_ISP_AFM_ENA);
+}
+
+static void rkisp1_ext_params_compand_bls(struct rkisp1_params *params,
+ const union rkisp1_ext_params_config *block)
+{
+ const struct rkisp1_ext_params_compand_bls_config *bls =
+ &block->compand_bls;
+
+ if (bls->header.flags & RKISP1_EXT_PARAMS_FL_BLOCK_DISABLE) {
+ rkisp1_param_clear_bits(params, RKISP1_CIF_ISP_COMPAND_CTRL,
+ RKISP1_CIF_ISP_COMPAND_CTRL_BLS_ENABLE);
+ return;
+ }
+
+ rkisp1_compand_bls_config(params, &bls->config);
+
+ if ((bls->header.flags & RKISP1_EXT_PARAMS_FL_BLOCK_ENABLE) &&
+ !(params->enabled_blocks & BIT(bls->header.type)))
+ rkisp1_param_set_bits(params, RKISP1_CIF_ISP_COMPAND_CTRL,
+ RKISP1_CIF_ISP_COMPAND_CTRL_BLS_ENABLE);
+}
+
+static void rkisp1_ext_params_compand_expand(struct rkisp1_params *params,
+ const union rkisp1_ext_params_config *block)
+{
+ const struct rkisp1_ext_params_compand_curve_config *curve =
+ &block->compand_curve;
+
+ if (curve->header.flags & RKISP1_EXT_PARAMS_FL_BLOCK_DISABLE) {
+ rkisp1_param_clear_bits(params, RKISP1_CIF_ISP_COMPAND_CTRL,
+ RKISP1_CIF_ISP_COMPAND_CTRL_EXPAND_ENABLE);
+ return;
+ }
+
+ rkisp1_compand_expand_config(params, &curve->config);
+
+ if ((curve->header.flags & RKISP1_EXT_PARAMS_FL_BLOCK_ENABLE) &&
+ !(params->enabled_blocks & BIT(curve->header.type)))
+ rkisp1_param_set_bits(params, RKISP1_CIF_ISP_COMPAND_CTRL,
+ RKISP1_CIF_ISP_COMPAND_CTRL_EXPAND_ENABLE);
+}
+
+static void rkisp1_ext_params_compand_compress(struct rkisp1_params *params,
+ const union rkisp1_ext_params_config *block)
+{
+ const struct rkisp1_ext_params_compand_curve_config *curve =
+ &block->compand_curve;
+
+ if (curve->header.flags & RKISP1_EXT_PARAMS_FL_BLOCK_DISABLE) {
+ rkisp1_param_clear_bits(params, RKISP1_CIF_ISP_COMPAND_CTRL,
+ RKISP1_CIF_ISP_COMPAND_CTRL_COMPRESS_ENABLE);
+ return;
+ }
+
+ rkisp1_compand_compress_config(params, &curve->config);
+
+ if ((curve->header.flags & RKISP1_EXT_PARAMS_FL_BLOCK_ENABLE) &&
+ !(params->enabled_blocks & BIT(curve->header.type)))
+ rkisp1_param_set_bits(params, RKISP1_CIF_ISP_COMPAND_CTRL,
+ RKISP1_CIF_ISP_COMPAND_CTRL_COMPRESS_ENABLE);
+}
+
+typedef void (*rkisp1_block_handler)(struct rkisp1_params *params,
+ const union rkisp1_ext_params_config *config);
+
+static const struct rkisp1_ext_params_handler {
+ size_t size;
+ rkisp1_block_handler handler;
+ unsigned int group;
+ unsigned int features;
+} rkisp1_ext_params_handlers[] = {
+ [RKISP1_EXT_PARAMS_BLOCK_TYPE_BLS] = {
+ .size = sizeof(struct rkisp1_ext_params_bls_config),
+ .handler = rkisp1_ext_params_bls,
+ .group = RKISP1_EXT_PARAMS_BLOCK_GROUP_OTHERS,
+ .features = RKISP1_FEATURE_BLS,
+ },
+ [RKISP1_EXT_PARAMS_BLOCK_TYPE_DPCC] = {
+ .size = sizeof(struct rkisp1_ext_params_dpcc_config),
+ .handler = rkisp1_ext_params_dpcc,
+ .group = RKISP1_EXT_PARAMS_BLOCK_GROUP_OTHERS,
+ },
+ [RKISP1_EXT_PARAMS_BLOCK_TYPE_SDG] = {
+ .size = sizeof(struct rkisp1_ext_params_sdg_config),
+ .handler = rkisp1_ext_params_sdg,
+ .group = RKISP1_EXT_PARAMS_BLOCK_GROUP_OTHERS,
+ },
+ [RKISP1_EXT_PARAMS_BLOCK_TYPE_AWB_GAIN] = {
+ .size = sizeof(struct rkisp1_ext_params_awb_gain_config),
+ .handler = rkisp1_ext_params_awbg,
+ .group = RKISP1_EXT_PARAMS_BLOCK_GROUP_OTHERS,
+ },
+ [RKISP1_EXT_PARAMS_BLOCK_TYPE_FLT] = {
+ .size = sizeof(struct rkisp1_ext_params_flt_config),
+ .handler = rkisp1_ext_params_flt,
+ .group = RKISP1_EXT_PARAMS_BLOCK_GROUP_OTHERS,
+ },
+ [RKISP1_EXT_PARAMS_BLOCK_TYPE_BDM] = {
+ .size = sizeof(struct rkisp1_ext_params_bdm_config),
+ .handler = rkisp1_ext_params_bdm,
+ .group = RKISP1_EXT_PARAMS_BLOCK_GROUP_OTHERS,
+ },
+ [RKISP1_EXT_PARAMS_BLOCK_TYPE_CTK] = {
+ .size = sizeof(struct rkisp1_ext_params_ctk_config),
+ .handler = rkisp1_ext_params_ctk,
+ .group = RKISP1_EXT_PARAMS_BLOCK_GROUP_OTHERS,
+ },
+ [RKISP1_EXT_PARAMS_BLOCK_TYPE_GOC] = {
+ .size = sizeof(struct rkisp1_ext_params_goc_config),
+ .handler = rkisp1_ext_params_goc,
+ .group = RKISP1_EXT_PARAMS_BLOCK_GROUP_OTHERS,
+ },
+ [RKISP1_EXT_PARAMS_BLOCK_TYPE_DPF] = {
+ .size = sizeof(struct rkisp1_ext_params_dpf_config),
+ .handler = rkisp1_ext_params_dpf,
+ .group = RKISP1_EXT_PARAMS_BLOCK_GROUP_OTHERS,
+ },
+ [RKISP1_EXT_PARAMS_BLOCK_TYPE_DPF_STRENGTH] = {
+ .size = sizeof(struct rkisp1_ext_params_dpf_strength_config),
+ .handler = rkisp1_ext_params_dpfs,
+ .group = RKISP1_EXT_PARAMS_BLOCK_GROUP_OTHERS,
+ },
+ [RKISP1_EXT_PARAMS_BLOCK_TYPE_CPROC] = {
+ .size = sizeof(struct rkisp1_ext_params_cproc_config),
+ .handler = rkisp1_ext_params_cproc,
+ .group = RKISP1_EXT_PARAMS_BLOCK_GROUP_OTHERS,
+ },
+ [RKISP1_EXT_PARAMS_BLOCK_TYPE_IE] = {
+ .size = sizeof(struct rkisp1_ext_params_ie_config),
+ .handler = rkisp1_ext_params_ie,
+ .group = RKISP1_EXT_PARAMS_BLOCK_GROUP_OTHERS,
+ },
+ [RKISP1_EXT_PARAMS_BLOCK_TYPE_LSC] = {
+ .size = sizeof(struct rkisp1_ext_params_lsc_config),
+ .handler = rkisp1_ext_params_lsc,
+ .group = RKISP1_EXT_PARAMS_BLOCK_GROUP_LSC,
+ },
+ [RKISP1_EXT_PARAMS_BLOCK_TYPE_AWB_MEAS] = {
+ .size = sizeof(struct rkisp1_ext_params_awb_meas_config),
+ .handler = rkisp1_ext_params_awbm,
+ .group = RKISP1_EXT_PARAMS_BLOCK_GROUP_OTHERS,
+ },
+ [RKISP1_EXT_PARAMS_BLOCK_TYPE_HST_MEAS] = {
+ .size = sizeof(struct rkisp1_ext_params_hst_config),
+ .handler = rkisp1_ext_params_hstm,
+ .group = RKISP1_EXT_PARAMS_BLOCK_GROUP_OTHERS,
+ },
+ [RKISP1_EXT_PARAMS_BLOCK_TYPE_AEC_MEAS] = {
+ .size = sizeof(struct rkisp1_ext_params_aec_config),
+ .handler = rkisp1_ext_params_aecm,
+ .group = RKISP1_EXT_PARAMS_BLOCK_GROUP_OTHERS,
+ },
+ [RKISP1_EXT_PARAMS_BLOCK_TYPE_AFC_MEAS] = {
+ .size = sizeof(struct rkisp1_ext_params_afc_config),
+ .handler = rkisp1_ext_params_afcm,
+ .group = RKISP1_EXT_PARAMS_BLOCK_GROUP_OTHERS,
+ },
+ [RKISP1_EXT_PARAMS_BLOCK_TYPE_COMPAND_BLS] = {
+ .size = sizeof(struct rkisp1_ext_params_compand_bls_config),
+ .handler = rkisp1_ext_params_compand_bls,
+ .group = RKISP1_EXT_PARAMS_BLOCK_GROUP_OTHERS,
+ .features = RKISP1_FEATURE_COMPAND,
+ },
+ [RKISP1_EXT_PARAMS_BLOCK_TYPE_COMPAND_EXPAND] = {
+ .size = sizeof(struct rkisp1_ext_params_compand_curve_config),
+ .handler = rkisp1_ext_params_compand_expand,
+ .group = RKISP1_EXT_PARAMS_BLOCK_GROUP_OTHERS,
+ .features = RKISP1_FEATURE_COMPAND,
+ },
+ [RKISP1_EXT_PARAMS_BLOCK_TYPE_COMPAND_COMPRESS] = {
+ .size = sizeof(struct rkisp1_ext_params_compand_curve_config),
+ .handler = rkisp1_ext_params_compand_compress,
+ .group = RKISP1_EXT_PARAMS_BLOCK_GROUP_OTHERS,
+ .features = RKISP1_FEATURE_COMPAND,
+ },
+};
+
+static void rkisp1_ext_params_config(struct rkisp1_params *params,
+ struct rkisp1_ext_params_cfg *cfg,
+ u32 block_group_mask)
+{
+ size_t block_offset = 0;
+
+ if (WARN_ON(!cfg))
+ return;
+
+ /* Walk the list of parameter blocks and process them. */
+ while (block_offset < cfg->data_size) {
+ const struct rkisp1_ext_params_handler *block_handler;
+ const union rkisp1_ext_params_config *block;
+
+ block = (const union rkisp1_ext_params_config *)
+ &cfg->data[block_offset];
+ block_offset += block->header.size;
+
+ /*
+ * Make sure the block is supported by the platform and in the
+ * list of groups to configure.
+ */
+ block_handler = &rkisp1_ext_params_handlers[block->header.type];
+ if (!(block_handler->group & block_group_mask))
+ continue;
+
+ if ((params->rkisp1->info->features & block_handler->features) !=
+ block_handler->features)
+ continue;
+
+ block_handler->handler(params, block);
+
+ if (block->header.flags & RKISP1_EXT_PARAMS_FL_BLOCK_DISABLE)
+ params->enabled_blocks &= ~BIT(block->header.type);
+ else if (block->header.flags & RKISP1_EXT_PARAMS_FL_BLOCK_ENABLE)
+ params->enabled_blocks |= BIT(block->header.type);
+ }
}
static void rkisp1_params_complete_buffer(struct rkisp1_params *params,
- struct rkisp1_buffer *buf,
+ struct rkisp1_params_buffer *buf,
unsigned int frame_sequence)
{
list_del(&buf->queue);
@@ -1527,17 +2172,24 @@ static void rkisp1_params_complete_buffer(struct rkisp1_params *params,
void rkisp1_params_isr(struct rkisp1_device *rkisp1)
{
struct rkisp1_params *params = &rkisp1->params;
- struct rkisp1_params_cfg *new_params;
- struct rkisp1_buffer *cur_buf;
+ struct rkisp1_params_buffer *cur_buf;
spin_lock(&params->config_lock);
- if (!rkisp1_params_get_buffer(params, &cur_buf, &new_params))
+ cur_buf = list_first_entry_or_null(&params->params,
+ struct rkisp1_params_buffer, queue);
+ if (!cur_buf)
goto unlock;
- rkisp1_isp_isr_other_config(params, new_params);
- rkisp1_isp_isr_lsc_config(params, new_params);
- rkisp1_isp_isr_meas_config(params, new_params);
+ if (params->metafmt->dataformat == V4L2_META_FMT_RK_ISP1_PARAMS) {
+ rkisp1_isp_isr_other_config(params, cur_buf->cfg);
+ rkisp1_isp_isr_lsc_config(params, cur_buf->cfg);
+ rkisp1_isp_isr_meas_config(params, cur_buf->cfg);
+ } else {
+ rkisp1_ext_params_config(params, cur_buf->cfg,
+ RKISP1_EXT_PARAMS_BLOCK_GROUP_OTHERS |
+ RKISP1_EXT_PARAMS_BLOCK_GROUP_LSC);
+ }
/* update shadow register immediately */
rkisp1_param_set_bits(params, RKISP1_CIF_ISP_CTRL,
@@ -1603,8 +2255,7 @@ void rkisp1_params_pre_configure(struct rkisp1_params *params,
enum v4l2_ycbcr_encoding ycbcr_encoding)
{
struct rkisp1_cif_isp_hst_config hst = rkisp1_hst_params_default_config;
- struct rkisp1_params_cfg *new_params;
- struct rkisp1_buffer *cur_buf;
+ struct rkisp1_params_buffer *cur_buf;
params->quantization = quantization;
params->ycbcr_encoding = ycbcr_encoding;
@@ -1633,11 +2284,18 @@ void rkisp1_params_pre_configure(struct rkisp1_params *params,
/* apply the first buffer if there is one already */
- if (!rkisp1_params_get_buffer(params, &cur_buf, &new_params))
+ cur_buf = list_first_entry_or_null(&params->params,
+ struct rkisp1_params_buffer, queue);
+ if (!cur_buf)
goto unlock;
- rkisp1_isp_isr_other_config(params, new_params);
- rkisp1_isp_isr_meas_config(params, new_params);
+ if (params->metafmt->dataformat == V4L2_META_FMT_RK_ISP1_PARAMS) {
+ rkisp1_isp_isr_other_config(params, cur_buf->cfg);
+ rkisp1_isp_isr_meas_config(params, cur_buf->cfg);
+ } else {
+ rkisp1_ext_params_config(params, cur_buf->cfg,
+ RKISP1_EXT_PARAMS_BLOCK_GROUP_OTHERS);
+ }
/* update shadow register immediately */
rkisp1_param_set_bits(params, RKISP1_CIF_ISP_CTRL,
@@ -1649,8 +2307,7 @@ unlock:
void rkisp1_params_post_configure(struct rkisp1_params *params)
{
- struct rkisp1_params_cfg *new_params;
- struct rkisp1_buffer *cur_buf;
+ struct rkisp1_params_buffer *cur_buf;
spin_lock_irq(&params->config_lock);
@@ -1662,11 +2319,16 @@ void rkisp1_params_post_configure(struct rkisp1_params *params)
* ordering doesn't affect other ISP versions negatively, do so
* unconditionally.
*/
-
- if (!rkisp1_params_get_buffer(params, &cur_buf, &new_params))
+ cur_buf = list_first_entry_or_null(&params->params,
+ struct rkisp1_params_buffer, queue);
+ if (!cur_buf)
goto unlock;
- rkisp1_isp_isr_lsc_config(params, new_params);
+ if (params->metafmt->dataformat == V4L2_META_FMT_RK_ISP1_PARAMS)
+ rkisp1_isp_isr_lsc_config(params, cur_buf->cfg);
+ else
+ rkisp1_ext_params_config(params, cur_buf->cfg,
+ RKISP1_EXT_PARAMS_BLOCK_GROUP_LSC);
/* update shadow register immediately */
rkisp1_param_set_bits(params, RKISP1_CIF_ISP_CTRL,
@@ -1742,12 +2404,12 @@ static int rkisp1_params_enum_fmt_meta_out(struct file *file, void *priv,
struct v4l2_fmtdesc *f)
{
struct video_device *video = video_devdata(file);
- struct rkisp1_params *params = video_get_drvdata(video);
- if (f->index > 0 || f->type != video->queue->type)
+ if (f->index >= ARRAY_SIZE(rkisp1_params_formats) ||
+ f->type != video->queue->type)
return -EINVAL;
- f->pixelformat = params->vdev_fmt.fmt.meta.dataformat;
+ f->pixelformat = rkisp1_params_formats[f->index].dataformat;
return 0;
}
@@ -1762,9 +2424,40 @@ static int rkisp1_params_g_fmt_meta_out(struct file *file, void *fh,
if (f->type != video->queue->type)
return -EINVAL;
- memset(meta, 0, sizeof(*meta));
- meta->dataformat = params->vdev_fmt.fmt.meta.dataformat;
- meta->buffersize = params->vdev_fmt.fmt.meta.buffersize;
+ *meta = *params->metafmt;
+
+ return 0;
+}
+
+static int rkisp1_params_try_fmt_meta_out(struct file *file, void *fh,
+ struct v4l2_format *f)
+{
+ struct video_device *video = video_devdata(file);
+ struct v4l2_meta_format *meta = &f->fmt.meta;
+
+ if (f->type != video->queue->type)
+ return -EINVAL;
+
+ *meta = *rkisp1_params_get_format_info(meta->dataformat);
+
+ return 0;
+}
+
+static int rkisp1_params_s_fmt_meta_out(struct file *file, void *fh,
+ struct v4l2_format *f)
+{
+ struct video_device *video = video_devdata(file);
+ struct rkisp1_params *params = video_get_drvdata(video);
+ struct v4l2_meta_format *meta = &f->fmt.meta;
+
+ if (f->type != video->queue->type)
+ return -EINVAL;
+
+ if (vb2_is_busy(video->queue))
+ return -EBUSY;
+
+ params->metafmt = rkisp1_params_get_format_info(meta->dataformat);
+ *meta = *params->metafmt;
return 0;
}
@@ -1794,8 +2487,8 @@ static const struct v4l2_ioctl_ops rkisp1_params_ioctl = {
.vidioc_streamoff = vb2_ioctl_streamoff,
.vidioc_enum_fmt_meta_out = rkisp1_params_enum_fmt_meta_out,
.vidioc_g_fmt_meta_out = rkisp1_params_g_fmt_meta_out,
- .vidioc_s_fmt_meta_out = rkisp1_params_g_fmt_meta_out,
- .vidioc_try_fmt_meta_out = rkisp1_params_g_fmt_meta_out,
+ .vidioc_s_fmt_meta_out = rkisp1_params_s_fmt_meta_out,
+ .vidioc_try_fmt_meta_out = rkisp1_params_try_fmt_meta_out,
.vidioc_querycap = rkisp1_params_querycap,
.vidioc_subscribe_event = v4l2_ctrl_subscribe_event,
.vidioc_unsubscribe_event = v4l2_event_unsubscribe,
@@ -1807,22 +2500,46 @@ static int rkisp1_params_vb2_queue_setup(struct vb2_queue *vq,
unsigned int sizes[],
struct device *alloc_devs[])
{
+ struct rkisp1_params *params = vq->drv_priv;
+
*num_buffers = clamp_t(u32, *num_buffers,
RKISP1_ISP_PARAMS_REQ_BUFS_MIN,
RKISP1_ISP_PARAMS_REQ_BUFS_MAX);
*num_planes = 1;
- sizes[0] = sizeof(struct rkisp1_params_cfg);
+ sizes[0] = params->metafmt->buffersize;
return 0;
}
+static int rkisp1_params_vb2_buf_init(struct vb2_buffer *vb)
+{
+ struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb);
+ struct rkisp1_params_buffer *params_buf = to_rkisp1_params_buffer(vbuf);
+ struct rkisp1_params *params = vb->vb2_queue->drv_priv;
+
+ params_buf->cfg = kvmalloc(params->metafmt->buffersize,
+ GFP_KERNEL);
+ if (!params_buf->cfg)
+ return -ENOMEM;
+
+ return 0;
+}
+
+static void rkisp1_params_vb2_buf_cleanup(struct vb2_buffer *vb)
+{
+ struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb);
+ struct rkisp1_params_buffer *params_buf = to_rkisp1_params_buffer(vbuf);
+
+ kvfree(params_buf->cfg);
+ params_buf->cfg = NULL;
+}
+
static void rkisp1_params_vb2_buf_queue(struct vb2_buffer *vb)
{
struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb);
- struct rkisp1_buffer *params_buf =
- container_of(vbuf, struct rkisp1_buffer, vb);
+ struct rkisp1_params_buffer *params_buf = to_rkisp1_params_buffer(vbuf);
struct vb2_queue *vq = vb->vb2_queue;
struct rkisp1_params *params = vq->drv_priv;
@@ -1831,12 +2548,133 @@ static void rkisp1_params_vb2_buf_queue(struct vb2_buffer *vb)
spin_unlock_irq(&params->config_lock);
}
+static int rkisp1_params_prepare_ext_params(struct rkisp1_params *params,
+ struct vb2_buffer *vb)
+{
+ struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb);
+ struct rkisp1_params_buffer *params_buf = to_rkisp1_params_buffer(vbuf);
+ size_t header_size = offsetof(struct rkisp1_ext_params_cfg, data);
+ struct rkisp1_ext_params_cfg *cfg = params_buf->cfg;
+ size_t payload_size = vb2_get_plane_payload(vb, 0);
+ struct rkisp1_ext_params_cfg *usr_cfg =
+ vb2_plane_vaddr(&vbuf->vb2_buf, 0);
+ size_t block_offset = 0;
+ size_t cfg_size;
+
+ /*
+ * Validate the buffer payload size before copying the parameters. The
+ * payload has to be smaller than the destination buffer size and larger
+ * than the header size.
+ */
+ if (payload_size > params->metafmt->buffersize) {
+ dev_dbg(params->rkisp1->dev,
+ "Too large buffer payload size %zu\n", payload_size);
+ return -EINVAL;
+ }
+
+ if (payload_size < header_size) {
+ dev_dbg(params->rkisp1->dev,
+ "Buffer payload %zu smaller than header size %zu\n",
+ payload_size, header_size);
+ return -EINVAL;
+ }
+
+ /*
+ * Copy the parameters buffer to the internal scratch buffer to avoid
+ * userspace modifying the buffer content while the driver processes it.
+ */
+ memcpy(cfg, usr_cfg, payload_size);
+
+ /* Only v1 is supported at the moment. */
+ if (cfg->version != RKISP1_EXT_PARAM_BUFFER_V1) {
+ dev_dbg(params->rkisp1->dev,
+ "Unsupported extensible format version: %u\n",
+ cfg->version);
+ return -EINVAL;
+ }
+
+ /* Validate the size reported in the parameters buffer header. */
+ cfg_size = header_size + cfg->data_size;
+ if (cfg_size != payload_size) {
+ dev_dbg(params->rkisp1->dev,
+ "Data size %zu different than buffer payload size %zu\n",
+ cfg_size, payload_size);
+ return -EINVAL;
+ }
+
+ /* Walk the list of parameter blocks and validate them. */
+ cfg_size = cfg->data_size;
+ while (cfg_size >= sizeof(struct rkisp1_ext_params_block_header)) {
+ const struct rkisp1_ext_params_block_header *block;
+ const struct rkisp1_ext_params_handler *handler;
+
+ block = (const struct rkisp1_ext_params_block_header *)
+ &cfg->data[block_offset];
+
+ if (block->type >= ARRAY_SIZE(rkisp1_ext_params_handlers)) {
+ dev_dbg(params->rkisp1->dev,
+ "Invalid parameters block type\n");
+ return -EINVAL;
+ }
+
+ if (block->size > cfg_size) {
+ dev_dbg(params->rkisp1->dev,
+ "Premature end of parameters data\n");
+ return -EINVAL;
+ }
+
+ if ((block->flags & (RKISP1_EXT_PARAMS_FL_BLOCK_ENABLE |
+ RKISP1_EXT_PARAMS_FL_BLOCK_DISABLE)) ==
+ (RKISP1_EXT_PARAMS_FL_BLOCK_ENABLE |
+ RKISP1_EXT_PARAMS_FL_BLOCK_DISABLE)) {
+ dev_dbg(params->rkisp1->dev,
+ "Invalid parameters block flags\n");
+ return -EINVAL;
+ }
+
+ handler = &rkisp1_ext_params_handlers[block->type];
+ if (block->size != handler->size) {
+ dev_dbg(params->rkisp1->dev,
+ "Invalid parameters block size\n");
+ return -EINVAL;
+ }
+
+ block_offset += block->size;
+ cfg_size -= block->size;
+ }
+
+ if (cfg_size) {
+ dev_dbg(params->rkisp1->dev,
+ "Unexpected data after the parameters buffer end\n");
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
static int rkisp1_params_vb2_buf_prepare(struct vb2_buffer *vb)
{
- if (vb2_plane_size(vb, 0) < sizeof(struct rkisp1_params_cfg))
+ struct rkisp1_params *params = vb->vb2_queue->drv_priv;
+ struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb);
+ struct rkisp1_params_buffer *params_buf = to_rkisp1_params_buffer(vbuf);
+ struct rkisp1_params_cfg *cfg = vb2_plane_vaddr(&vbuf->vb2_buf, 0);
+ size_t payload = vb2_get_plane_payload(vb, 0);
+
+ if (params->metafmt->dataformat == V4L2_META_FMT_RK_ISP1_EXT_PARAMS)
+ return rkisp1_params_prepare_ext_params(params, vb);
+
+ /*
+ * For the fixed parameters format the payload size must be exactly the
+ * size of the parameters structure.
+ */
+ if (payload != sizeof(*cfg))
return -EINVAL;
- vb2_set_plane_payload(vb, 0, sizeof(struct rkisp1_params_cfg));
+ /*
+ * Copy the parameters buffer to the internal scratch buffer to avoid
+ * userspace modifying the buffer content while the driver processes it.
+ */
+ memcpy(params_buf->cfg, cfg, payload);
return 0;
}
@@ -1844,7 +2682,7 @@ static int rkisp1_params_vb2_buf_prepare(struct vb2_buffer *vb)
static void rkisp1_params_vb2_stop_streaming(struct vb2_queue *vq)
{
struct rkisp1_params *params = vq->drv_priv;
- struct rkisp1_buffer *buf;
+ struct rkisp1_params_buffer *buf;
LIST_HEAD(tmp_list);
/*
@@ -1858,16 +2696,19 @@ static void rkisp1_params_vb2_stop_streaming(struct vb2_queue *vq)
list_for_each_entry(buf, &tmp_list, queue)
vb2_buffer_done(&buf->vb.vb2_buf, VB2_BUF_STATE_ERROR);
+
+ params->enabled_blocks = 0;
}
static const struct vb2_ops rkisp1_params_vb2_ops = {
.queue_setup = rkisp1_params_vb2_queue_setup,
+ .buf_init = rkisp1_params_vb2_buf_init,
+ .buf_cleanup = rkisp1_params_vb2_buf_cleanup,
.wait_prepare = vb2_ops_wait_prepare,
.wait_finish = vb2_ops_wait_finish,
.buf_queue = rkisp1_params_vb2_buf_queue,
.buf_prepare = rkisp1_params_vb2_buf_prepare,
.stop_streaming = rkisp1_params_vb2_stop_streaming,
-
};
static const struct v4l2_file_operations rkisp1_params_fops = {
@@ -1890,26 +2731,13 @@ static int rkisp1_params_init_vb2_queue(struct vb2_queue *q,
q->drv_priv = params;
q->ops = &rkisp1_params_vb2_ops;
q->mem_ops = &vb2_vmalloc_memops;
- q->buf_struct_size = sizeof(struct rkisp1_buffer);
+ q->buf_struct_size = sizeof(struct rkisp1_params_buffer);
q->timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_MONOTONIC;
q->lock = &node->vlock;
return vb2_queue_init(q);
}
-static void rkisp1_init_params(struct rkisp1_params *params)
-{
- params->vdev_fmt.fmt.meta.dataformat =
- V4L2_META_FMT_RK_ISP1_PARAMS;
- params->vdev_fmt.fmt.meta.buffersize =
- sizeof(struct rkisp1_params_cfg);
-
- if (params->rkisp1->info->isp_ver == RKISP1_V12)
- params->ops = &rkisp1_v12_params_ops;
- else
- params->ops = &rkisp1_v10_params_ops;
-}
-
int rkisp1_params_register(struct rkisp1_device *rkisp1)
{
struct rkisp1_params *params = &rkisp1->params;
@@ -1938,7 +2766,14 @@ int rkisp1_params_register(struct rkisp1_device *rkisp1)
vdev->device_caps = V4L2_CAP_STREAMING | V4L2_CAP_META_OUTPUT;
vdev->vfl_dir = VFL_DIR_TX;
rkisp1_params_init_vb2_queue(vdev->queue, params);
- rkisp1_init_params(params);
+
+ params->metafmt = &rkisp1_params_formats[RKISP1_PARAMS_FIXED];
+
+ if (params->rkisp1->info->isp_ver == RKISP1_V12)
+ params->ops = &rkisp1_v12_params_ops;
+ else
+ params->ops = &rkisp1_v10_params_ops;
+
video_set_drvdata(vdev, params);
node->pad.flags = MEDIA_PAD_FL_SOURCE;
diff --git a/drivers/media/platform/rockchip/rkisp1/rkisp1-regs.h b/drivers/media/platform/rockchip/rkisp1/rkisp1-regs.h
index fccf4c17ee8d..bf0260600a19 100644
--- a/drivers/media/platform/rockchip/rkisp1/rkisp1-regs.h
+++ b/drivers/media/platform/rockchip/rkisp1/rkisp1-regs.h
@@ -704,6 +704,12 @@
#define RKISP1_CIF_ISP_DPF_SPATIAL_COEFF_MAX 0x1f
#define RKISP1_CIF_ISP_DPF_NLL_COEFF_N_MAX 0x3ff
+/* COMPAND */
+#define RKISP1_CIF_ISP_COMPAND_CTRL_EXPAND_ENABLE BIT(0)
+#define RKISP1_CIF_ISP_COMPAND_CTRL_COMPRESS_ENABLE BIT(1)
+#define RKISP1_CIF_ISP_COMPAND_CTRL_SOFT_RESET_FLAG BIT(2)
+#define RKISP1_CIF_ISP_COMPAND_CTRL_BLS_ENABLE BIT(3)
+
/* =================================================================== */
/* CIF Registers */
/* =================================================================== */
@@ -1394,6 +1400,23 @@
#define RKISP1_CIF_ISP_VSM_DELTA_H (RKISP1_CIF_ISP_VSM_BASE + 0x0000001c)
#define RKISP1_CIF_ISP_VSM_DELTA_V (RKISP1_CIF_ISP_VSM_BASE + 0x00000020)
+#define RKISP1_CIF_ISP_COMPAND_BASE 0x00003200
+#define RKISP1_CIF_ISP_COMPAND_CTRL (RKISP1_CIF_ISP_COMPAND_BASE + 0x00000000)
+#define RKISP1_CIF_ISP_COMPAND_BLS_A_FIXED (RKISP1_CIF_ISP_COMPAND_BASE + 0x00000004)
+#define RKISP1_CIF_ISP_COMPAND_BLS_B_FIXED (RKISP1_CIF_ISP_COMPAND_BASE + 0x00000008)
+#define RKISP1_CIF_ISP_COMPAND_BLS_C_FIXED (RKISP1_CIF_ISP_COMPAND_BASE + 0x0000000c)
+#define RKISP1_CIF_ISP_COMPAND_BLS_D_FIXED (RKISP1_CIF_ISP_COMPAND_BASE + 0x00000010)
+#define RKISP1_CIF_ISP_COMPAND_EXPAND_PX_N(n) (RKISP1_CIF_ISP_COMPAND_BASE + 0x00000014 + (n) * 4)
+#define RKISP1_CIF_ISP_COMPAND_COMPRESS_PX_N(n) (RKISP1_CIF_ISP_COMPAND_BASE + 0x00000040 + (n) * 4)
+#define RKISP1_CIF_ISP_COMPAND_EXPAND_Y_ADDR (RKISP1_CIF_ISP_COMPAND_BASE + 0x0000006c)
+#define RKISP1_CIF_ISP_COMPAND_EXPAND_Y_WRITE_DATA (RKISP1_CIF_ISP_COMPAND_BASE + 0x00000070)
+#define RKISP1_CIF_ISP_COMPAND_COMPRESS_Y_ADDR (RKISP1_CIF_ISP_COMPAND_BASE + 0x00000074)
+#define RKISP1_CIF_ISP_COMPAND_COMPRESS_Y_WRITE_DATA (RKISP1_CIF_ISP_COMPAND_BASE + 0x00000078)
+#define RKISP1_CIF_ISP_COMPAND_EXPAND_X_ADDR (RKISP1_CIF_ISP_COMPAND_BASE + 0x0000007c)
+#define RKISP1_CIF_ISP_COMPAND_EXPAND_X_WRITE_DATA (RKISP1_CIF_ISP_COMPAND_BASE + 0x00000080)
+#define RKISP1_CIF_ISP_COMPAND_COMPRESS_X_ADDR (RKISP1_CIF_ISP_COMPAND_BASE + 0x00000084)
+#define RKISP1_CIF_ISP_COMPAND_COMPRESS_X_WRITE_DATA (RKISP1_CIF_ISP_COMPAND_BASE + 0x00000088)
+
#define RKISP1_CIF_ISP_CSI0_BASE 0x00007000
#define RKISP1_CIF_ISP_CSI0_CTRL0 (RKISP1_CIF_ISP_CSI0_BASE + 0x00000000)
diff --git a/drivers/media/platform/rockchip/rkisp1/rkisp1-resizer.c b/drivers/media/platform/rockchip/rkisp1/rkisp1-resizer.c
index 1fa991227fa9..f073e72a0d37 100644
--- a/drivers/media/platform/rockchip/rkisp1/rkisp1-resizer.c
+++ b/drivers/media/platform/rockchip/rkisp1/rkisp1-resizer.c
@@ -494,10 +494,10 @@ static void rkisp1_rsz_set_sink_fmt(struct rkisp1_resizer *rsz,
sink_fmt->width = clamp_t(u32, format->width,
RKISP1_ISP_MIN_WIDTH,
- RKISP1_ISP_MAX_WIDTH);
+ rsz->rkisp1->info->max_width);
sink_fmt->height = clamp_t(u32, format->height,
RKISP1_ISP_MIN_HEIGHT,
- RKISP1_ISP_MAX_HEIGHT);
+ rsz->rkisp1->info->max_height);
/*
* Adjust the color space fields. Accept any color primaries and
diff --git a/drivers/media/platform/rockchip/rkisp1/rkisp1-stats.c b/drivers/media/platform/rockchip/rkisp1/rkisp1-stats.c
index 2795eef91bdd..a502719e916a 100644
--- a/drivers/media/platform/rockchip/rkisp1/rkisp1-stats.c
+++ b/drivers/media/platform/rockchip/rkisp1/rkisp1-stats.c
@@ -304,48 +304,25 @@ static void rkisp1_stats_get_hst_meas_v12(struct rkisp1_stats *stats,
static void rkisp1_stats_get_bls_meas(struct rkisp1_stats *stats,
struct rkisp1_stat_buffer *pbuf)
{
+ static const u32 regs[] = {
+ RKISP1_CIF_ISP_BLS_A_MEASURED,
+ RKISP1_CIF_ISP_BLS_B_MEASURED,
+ RKISP1_CIF_ISP_BLS_C_MEASURED,
+ RKISP1_CIF_ISP_BLS_D_MEASURED,
+ };
struct rkisp1_device *rkisp1 = stats->rkisp1;
const struct rkisp1_mbus_info *in_fmt = rkisp1->isp.sink_fmt;
struct rkisp1_cif_isp_bls_meas_val *bls_val;
+ u32 swapped[4];
+
+ rkisp1_bls_swap_regs(in_fmt->bayer_pat, regs, swapped);
bls_val = &pbuf->params.ae.bls_val;
- if (in_fmt->bayer_pat == RKISP1_RAW_BGGR) {
- bls_val->meas_b =
- rkisp1_read(rkisp1, RKISP1_CIF_ISP_BLS_A_MEASURED);
- bls_val->meas_gb =
- rkisp1_read(rkisp1, RKISP1_CIF_ISP_BLS_B_MEASURED);
- bls_val->meas_gr =
- rkisp1_read(rkisp1, RKISP1_CIF_ISP_BLS_C_MEASURED);
- bls_val->meas_r =
- rkisp1_read(rkisp1, RKISP1_CIF_ISP_BLS_D_MEASURED);
- } else if (in_fmt->bayer_pat == RKISP1_RAW_GBRG) {
- bls_val->meas_gb =
- rkisp1_read(rkisp1, RKISP1_CIF_ISP_BLS_A_MEASURED);
- bls_val->meas_b =
- rkisp1_read(rkisp1, RKISP1_CIF_ISP_BLS_B_MEASURED);
- bls_val->meas_r =
- rkisp1_read(rkisp1, RKISP1_CIF_ISP_BLS_C_MEASURED);
- bls_val->meas_gr =
- rkisp1_read(rkisp1, RKISP1_CIF_ISP_BLS_D_MEASURED);
- } else if (in_fmt->bayer_pat == RKISP1_RAW_GRBG) {
- bls_val->meas_gr =
- rkisp1_read(rkisp1, RKISP1_CIF_ISP_BLS_A_MEASURED);
- bls_val->meas_r =
- rkisp1_read(rkisp1, RKISP1_CIF_ISP_BLS_B_MEASURED);
- bls_val->meas_b =
- rkisp1_read(rkisp1, RKISP1_CIF_ISP_BLS_C_MEASURED);
- bls_val->meas_gb =
- rkisp1_read(rkisp1, RKISP1_CIF_ISP_BLS_D_MEASURED);
- } else if (in_fmt->bayer_pat == RKISP1_RAW_RGGB) {
- bls_val->meas_r =
- rkisp1_read(rkisp1, RKISP1_CIF_ISP_BLS_A_MEASURED);
- bls_val->meas_gr =
- rkisp1_read(rkisp1, RKISP1_CIF_ISP_BLS_B_MEASURED);
- bls_val->meas_gb =
- rkisp1_read(rkisp1, RKISP1_CIF_ISP_BLS_C_MEASURED);
- bls_val->meas_b =
- rkisp1_read(rkisp1, RKISP1_CIF_ISP_BLS_D_MEASURED);
- }
+
+ bls_val->meas_r = rkisp1_read(rkisp1, swapped[0]);
+ bls_val->meas_gr = rkisp1_read(rkisp1, swapped[1]);
+ bls_val->meas_gb = rkisp1_read(rkisp1, swapped[2]);
+ bls_val->meas_b = rkisp1_read(rkisp1, swapped[3]);
}
static const struct rkisp1_stats_ops rkisp1_v10_stats_ops = {
diff --git a/drivers/media/platform/samsung/exynos-gsc/gsc-core.c b/drivers/media/platform/samsung/exynos-gsc/gsc-core.c
index 618ae55fe396..f45f5c8612a6 100644
--- a/drivers/media/platform/samsung/exynos-gsc/gsc-core.c
+++ b/drivers/media/platform/samsung/exynos-gsc/gsc-core.c
@@ -1225,7 +1225,7 @@ static void gsc_remove(struct platform_device *pdev)
static int gsc_m2m_suspend(struct gsc_dev *gsc)
{
unsigned long flags;
- int timeout;
+ long time_left;
spin_lock_irqsave(&gsc->slock, flags);
if (!gsc_m2m_pending(gsc)) {
@@ -1236,12 +1236,12 @@ static int gsc_m2m_suspend(struct gsc_dev *gsc)
set_bit(ST_M2M_SUSPENDING, &gsc->state);
spin_unlock_irqrestore(&gsc->slock, flags);
- timeout = wait_event_timeout(gsc->irq_queue,
- test_bit(ST_M2M_SUSPENDED, &gsc->state),
- GSC_SHUTDOWN_TIMEOUT);
+ time_left = wait_event_timeout(gsc->irq_queue,
+ test_bit(ST_M2M_SUSPENDED, &gsc->state),
+ GSC_SHUTDOWN_TIMEOUT);
clear_bit(ST_M2M_SUSPENDING, &gsc->state);
- return timeout == 0 ? -EAGAIN : 0;
+ return time_left == 0 ? -EAGAIN : 0;
}
static void gsc_m2m_resume(struct gsc_dev *gsc)
diff --git a/drivers/media/platform/samsung/exynos4-is/fimc-core.c b/drivers/media/platform/samsung/exynos4-is/fimc-core.c
index aae74b501a42..adfc2d73d04b 100644
--- a/drivers/media/platform/samsung/exynos4-is/fimc-core.c
+++ b/drivers/media/platform/samsung/exynos4-is/fimc-core.c
@@ -822,7 +822,7 @@ err:
static int fimc_m2m_suspend(struct fimc_dev *fimc)
{
unsigned long flags;
- int timeout;
+ long time_left;
spin_lock_irqsave(&fimc->slock, flags);
if (!fimc_m2m_pending(fimc)) {
@@ -833,12 +833,12 @@ static int fimc_m2m_suspend(struct fimc_dev *fimc)
set_bit(ST_M2M_SUSPENDING, &fimc->state);
spin_unlock_irqrestore(&fimc->slock, flags);
- timeout = wait_event_timeout(fimc->irq_queue,
- test_bit(ST_M2M_SUSPENDED, &fimc->state),
- FIMC_SHUTDOWN_TIMEOUT);
+ time_left = wait_event_timeout(fimc->irq_queue,
+ test_bit(ST_M2M_SUSPENDED, &fimc->state),
+ FIMC_SHUTDOWN_TIMEOUT);
clear_bit(ST_M2M_SUSPENDING, &fimc->state);
- return timeout == 0 ? -EAGAIN : 0;
+ return time_left == 0 ? -EAGAIN : 0;
}
static int fimc_m2m_resume(struct fimc_dev *fimc)
diff --git a/drivers/media/platform/st/sti/bdisp/bdisp-v4l2.c b/drivers/media/platform/st/sti/bdisp/bdisp-v4l2.c
index 1328b4eb6b9f..c7ee6e1a4451 100644
--- a/drivers/media/platform/st/sti/bdisp/bdisp-v4l2.c
+++ b/drivers/media/platform/st/sti/bdisp/bdisp-v4l2.c
@@ -1160,7 +1160,7 @@ static void bdisp_irq_timeout(struct work_struct *ptr)
static int bdisp_m2m_suspend(struct bdisp_dev *bdisp)
{
unsigned long flags;
- int timeout;
+ long time_left;
spin_lock_irqsave(&bdisp->slock, flags);
if (!test_bit(ST_M2M_RUNNING, &bdisp->state)) {
@@ -1171,13 +1171,13 @@ static int bdisp_m2m_suspend(struct bdisp_dev *bdisp)
set_bit(ST_M2M_SUSPENDING, &bdisp->state);
spin_unlock_irqrestore(&bdisp->slock, flags);
- timeout = wait_event_timeout(bdisp->irq_queue,
- test_bit(ST_M2M_SUSPENDED, &bdisp->state),
- BDISP_WORK_TIMEOUT);
+ time_left = wait_event_timeout(bdisp->irq_queue,
+ test_bit(ST_M2M_SUSPENDED, &bdisp->state),
+ BDISP_WORK_TIMEOUT);
clear_bit(ST_M2M_SUSPENDING, &bdisp->state);
- if (!timeout) {
+ if (!time_left) {
dev_err(bdisp->dev, "%s IRQ timeout\n", __func__);
return -EAGAIN;
}
diff --git a/drivers/media/platform/sunxi/sun4i-csi/sun4i_csi.c b/drivers/media/platform/sunxi/sun4i-csi/sun4i_csi.c
index 097a3a08ef7d..d07e980aba61 100644
--- a/drivers/media/platform/sunxi/sun4i-csi/sun4i_csi.c
+++ b/drivers/media/platform/sunxi/sun4i-csi/sun4i_csi.c
@@ -35,7 +35,18 @@ struct sun4i_csi_traits {
bool has_isp;
};
+static int sun4i_csi_video_link_validate(struct media_link *link)
+{
+ dev_warn_once(link->graph_obj.mdev->dev,
+ "Driver bug: link validation not implemented\n");
+ return 0;
+}
+
static const struct media_entity_operations sun4i_csi_video_entity_ops = {
+ .link_validate = sun4i_csi_video_link_validate,
+};
+
+static const struct media_entity_operations sun4i_csi_subdev_entity_ops = {
.link_validate = v4l2_subdev_link_validate,
};
@@ -214,6 +225,7 @@ static int sun4i_csi_probe(struct platform_device *pdev)
subdev->internal_ops = &sun4i_csi_subdev_internal_ops;
subdev->flags = V4L2_SUBDEV_FL_HAS_DEVNODE | V4L2_SUBDEV_FL_HAS_EVENTS;
subdev->entity.function = MEDIA_ENT_F_VID_IF_BRIDGE;
+ subdev->entity.ops = &sun4i_csi_subdev_entity_ops;
subdev->owner = THIS_MODULE;
snprintf(subdev->name, sizeof(subdev->name), "sun4i-csi-0");
v4l2_set_subdevdata(subdev, csi);
diff --git a/drivers/media/platform/ti/am437x/am437x-vpfe.c b/drivers/media/platform/ti/am437x/am437x-vpfe.c
index 77e12457d149..009ff68a2b43 100644
--- a/drivers/media/platform/ti/am437x/am437x-vpfe.c
+++ b/drivers/media/platform/ti/am437x/am437x-vpfe.c
@@ -2287,7 +2287,7 @@ static const struct v4l2_async_notifier_operations vpfe_async_ops = {
static struct vpfe_config *
vpfe_get_pdata(struct vpfe_device *vpfe)
{
- struct device_node *endpoint = NULL;
+ struct device_node *endpoint;
struct device *dev = vpfe->pdev;
struct vpfe_subdev_info *sdinfo;
struct vpfe_config *pdata;
@@ -2306,14 +2306,11 @@ vpfe_get_pdata(struct vpfe_device *vpfe)
if (!pdata)
return NULL;
- for (i = 0; ; i++) {
+ i = 0;
+ for_each_endpoint_of_node(dev->of_node, endpoint) {
struct v4l2_fwnode_endpoint bus_cfg = { .bus_type = 0 };
struct device_node *rem;
- endpoint = of_graph_get_next_endpoint(dev->of_node, endpoint);
- if (!endpoint)
- break;
-
sdinfo = &pdata->sub_devs[i];
sdinfo->grp_id = 0;
@@ -2371,9 +2368,10 @@ vpfe_get_pdata(struct vpfe_device *vpfe)
of_node_put(rem);
if (IS_ERR(pdata->asd[i]))
goto cleanup;
+
+ i++;
}
- of_node_put(endpoint);
return pdata;
cleanup:
diff --git a/drivers/media/platform/ti/cal/cal-camerarx.c b/drivers/media/platform/ti/cal/cal-camerarx.c
index 4afc2ad00330..42dfe08b765f 100644
--- a/drivers/media/platform/ti/cal/cal-camerarx.c
+++ b/drivers/media/platform/ti/cal/cal-camerarx.c
@@ -798,7 +798,7 @@ static const struct v4l2_subdev_internal_ops cal_camerarx_internal_ops = {
.init_state = cal_camerarx_sd_init_state,
};
-static struct media_entity_operations cal_camerarx_media_ops = {
+static const struct media_entity_operations cal_camerarx_media_ops = {
.link_validate = v4l2_subdev_link_validate,
};
diff --git a/drivers/media/platform/ti/cal/cal.c b/drivers/media/platform/ti/cal/cal.c
index 528909ae4bd6..5c2c04142aee 100644
--- a/drivers/media/platform/ti/cal/cal.c
+++ b/drivers/media/platform/ti/cal/cal.c
@@ -549,7 +549,7 @@ void cal_ctx_start(struct cal_ctx *ctx)
void cal_ctx_stop(struct cal_ctx *ctx)
{
struct cal_camerarx *phy = ctx->phy;
- long timeout;
+ long time_left;
WARN_ON(phy->vc_enable_count[ctx->vc] == 0);
@@ -565,9 +565,9 @@ void cal_ctx_stop(struct cal_ctx *ctx)
ctx->dma.state = CAL_DMA_STOP_REQUESTED;
spin_unlock_irq(&ctx->dma.lock);
- timeout = wait_event_timeout(ctx->dma.wait, cal_ctx_wr_dma_stopped(ctx),
- msecs_to_jiffies(500));
- if (!timeout) {
+ time_left = wait_event_timeout(ctx->dma.wait, cal_ctx_wr_dma_stopped(ctx),
+ msecs_to_jiffies(500));
+ if (!time_left) {
ctx_err(ctx, "failed to disable dma cleanly\n");
cal_ctx_wr_dma_disable(ctx);
}
diff --git a/drivers/media/platform/ti/davinci/vpif_capture.c b/drivers/media/platform/ti/davinci/vpif_capture.c
index c28794b6677b..16326437767f 100644
--- a/drivers/media/platform/ti/davinci/vpif_capture.c
+++ b/drivers/media/platform/ti/davinci/vpif_capture.c
@@ -1487,7 +1487,7 @@ static struct vpif_capture_config *
vpif_capture_get_pdata(struct platform_device *pdev,
struct v4l2_device *v4l2_dev)
{
- struct device_node *endpoint = NULL;
+ struct device_node *endpoint;
struct device_node *rem = NULL;
struct vpif_capture_config *pdata;
struct vpif_subdev_info *sdinfo;
@@ -1517,16 +1517,12 @@ vpif_capture_get_pdata(struct platform_device *pdev,
if (!pdata->subdev_info)
return NULL;
- for (i = 0; i < VPIF_CAPTURE_NUM_CHANNELS; i++) {
+ i = 0;
+ for_each_endpoint_of_node(pdev->dev.of_node, endpoint) {
struct v4l2_fwnode_endpoint bus_cfg = { .bus_type = 0 };
unsigned int flags;
int err;
- endpoint = of_graph_get_next_endpoint(pdev->dev.of_node,
- endpoint);
- if (!endpoint)
- break;
-
rem = of_graph_get_remote_port_parent(endpoint);
if (!rem) {
dev_dbg(&pdev->dev, "Remote device at %pOF not found\n",
@@ -1577,6 +1573,10 @@ vpif_capture_get_pdata(struct platform_device *pdev,
goto err_cleanup;
of_node_put(rem);
+
+ i++;
+ if (i >= VPIF_CAPTURE_NUM_CHANNELS)
+ break;
}
done:
diff --git a/drivers/media/platform/verisilicon/Kconfig b/drivers/media/platform/verisilicon/Kconfig
index 149d0b32c324..3272a24db71d 100644
--- a/drivers/media/platform/verisilicon/Kconfig
+++ b/drivers/media/platform/verisilicon/Kconfig
@@ -21,6 +21,14 @@ config VIDEO_HANTRO
To compile this driver as a module, choose M here: the module
will be called hantro-vpu.
+config VIDEO_HANTRO_HEVC_RFC
+ bool "Use reference frame compression for HEVC"
+ depends on VIDEO_HANTRO
+ default n
+ help
+ Enable the reference frame compression feature for the HEVC codec.
+ It will use more memory but save bandwidth on memory bus.
+
config VIDEO_HANTRO_IMX8M
bool "Hantro VPU i.MX8M support"
depends on VIDEO_HANTRO
diff --git a/drivers/media/platform/verisilicon/Makefile b/drivers/media/platform/verisilicon/Makefile
index eb38a1833b02..f6f019d04ff0 100644
--- a/drivers/media/platform/verisilicon/Makefile
+++ b/drivers/media/platform/verisilicon/Makefile
@@ -14,13 +14,6 @@ hantro-vpu-y += \
hantro_g2.o \
hantro_g2_hevc_dec.o \
hantro_g2_vp9_dec.o \
- rockchip_vpu2_hw_jpeg_enc.o \
- rockchip_vpu2_hw_h264_dec.o \
- rockchip_vpu2_hw_mpeg2_dec.o \
- rockchip_vpu2_hw_vp8_dec.o \
- rockchip_vpu981_hw_av1_dec.o \
- rockchip_av1_filmgrain.o \
- rockchip_av1_entropymode.o \
hantro_jpeg.o \
hantro_h264.o \
hantro_hevc.o \
@@ -35,6 +28,13 @@ hantro-vpu-$(CONFIG_VIDEO_HANTRO_SAMA5D4) += \
sama5d4_vdec_hw.o
hantro-vpu-$(CONFIG_VIDEO_HANTRO_ROCKCHIP) += \
+ rockchip_vpu2_hw_jpeg_enc.o \
+ rockchip_vpu2_hw_h264_dec.o \
+ rockchip_vpu2_hw_mpeg2_dec.o \
+ rockchip_vpu2_hw_vp8_dec.o \
+ rockchip_vpu981_hw_av1_dec.o \
+ rockchip_av1_filmgrain.o \
+ rockchip_av1_entropymode.o \
rockchip_vpu_hw.o
hantro-vpu-$(CONFIG_VIDEO_HANTRO_SUNXI) += \
diff --git a/drivers/media/platform/verisilicon/hantro_drv.c b/drivers/media/platform/verisilicon/hantro_drv.c
index 34b123dafd89..05bbac853c4f 100644
--- a/drivers/media/platform/verisilicon/hantro_drv.c
+++ b/drivers/media/platform/verisilicon/hantro_drv.c
@@ -722,6 +722,7 @@ static const struct of_device_id of_hantro_match[] = {
{ .compatible = "rockchip,rk3399-vpu", .data = &rk3399_vpu_variant, },
{ .compatible = "rockchip,rk3568-vepu", .data = &rk3568_vepu_variant, },
{ .compatible = "rockchip,rk3568-vpu", .data = &rk3568_vpu_variant, },
+ { .compatible = "rockchip,rk3588-vepu121", .data = &rk3568_vepu_variant, },
{ .compatible = "rockchip,rk3588-av1-vpu", .data = &rk3588_vpu981_variant, },
#endif
#ifdef CONFIG_VIDEO_HANTRO_IMX8M
@@ -992,6 +993,49 @@ static const struct media_device_ops hantro_m2m_media_ops = {
.req_queue = v4l2_m2m_request_queue,
};
+/*
+ * Some SoCs, like RK3588 have multiple identical Hantro cores, but the
+ * kernel is currently missing support for multi-core handling. Exposing
+ * separate devices for each core to userspace is bad, since that does
+ * not allow scheduling tasks properly (and creates ABI). With this workaround
+ * the driver will only probe for the first core and early exit for the other
+ * cores. Once the driver gains multi-core support, the same technique
+ * for detecting the main core can be used to cluster all cores together.
+ */
+static int hantro_disable_multicore(struct hantro_dev *vpu)
+{
+ struct device_node *node = NULL;
+ const char *compatible;
+ bool is_main_core;
+ int ret;
+
+ /* Intentionally ignores the fallback strings */
+ ret = of_property_read_string(vpu->dev->of_node, "compatible", &compatible);
+ if (ret)
+ return ret;
+
+ /* The first compatible and available node found is considered the main core */
+ do {
+ node = of_find_compatible_node(node, NULL, compatible);
+ if (of_device_is_available(node))
+ break;
+ } while (node);
+
+ if (!node)
+ return -EINVAL;
+
+ is_main_core = (vpu->dev->of_node == node);
+
+ of_node_put(node);
+
+ if (!is_main_core) {
+ dev_info(vpu->dev, "missing multi-core support, ignoring this instance\n");
+ return -ENODEV;
+ }
+
+ return 0;
+}
+
static int hantro_probe(struct platform_device *pdev)
{
const struct of_device_id *match;
@@ -1011,6 +1055,10 @@ static int hantro_probe(struct platform_device *pdev)
match = of_match_node(of_hantro_match, pdev->dev.of_node);
vpu->variant = match->data;
+ ret = hantro_disable_multicore(vpu);
+ if (ret)
+ return ret;
+
/*
* Support for nxp,imx8mq-vpu is kept for backwards compatibility
* but it's deprecated. Please update your DTS file to use
diff --git a/drivers/media/platform/verisilicon/hantro_g2.c b/drivers/media/platform/verisilicon/hantro_g2.c
index b880a6849d58..5c1d799d8618 100644
--- a/drivers/media/platform/verisilicon/hantro_g2.c
+++ b/drivers/media/platform/verisilicon/hantro_g2.c
@@ -56,3 +56,32 @@ size_t hantro_g2_motion_vectors_offset(struct hantro_ctx *ctx)
return ALIGN((cr_offset * 3) / 2, G2_ALIGN);
}
+
+static size_t hantro_g2_mv_size(struct hantro_ctx *ctx)
+{
+ const struct hantro_hevc_dec_ctrls *ctrls = &ctx->hevc_dec.ctrls;
+ const struct v4l2_ctrl_hevc_sps *sps = ctrls->sps;
+ unsigned int pic_width_in_ctbs, pic_height_in_ctbs;
+ unsigned int max_log2_ctb_size;
+
+ max_log2_ctb_size = sps->log2_min_luma_coding_block_size_minus3 + 3 +
+ sps->log2_diff_max_min_luma_coding_block_size;
+ pic_width_in_ctbs = (sps->pic_width_in_luma_samples +
+ (1 << max_log2_ctb_size) - 1) >> max_log2_ctb_size;
+ pic_height_in_ctbs = (sps->pic_height_in_luma_samples + (1 << max_log2_ctb_size) - 1)
+ >> max_log2_ctb_size;
+
+ return pic_width_in_ctbs * pic_height_in_ctbs * (1 << (2 * (max_log2_ctb_size - 4))) * 16;
+}
+
+size_t hantro_g2_luma_compress_offset(struct hantro_ctx *ctx)
+{
+ return hantro_g2_motion_vectors_offset(ctx) +
+ hantro_g2_mv_size(ctx);
+}
+
+size_t hantro_g2_chroma_compress_offset(struct hantro_ctx *ctx)
+{
+ return hantro_g2_luma_compress_offset(ctx) +
+ hantro_hevc_luma_compressed_size(ctx->dst_fmt.width, ctx->dst_fmt.height);
+}
diff --git a/drivers/media/platform/verisilicon/hantro_g2_hevc_dec.c b/drivers/media/platform/verisilicon/hantro_g2_hevc_dec.c
index d3f8c33eb16c..85a44143b378 100644
--- a/drivers/media/platform/verisilicon/hantro_g2_hevc_dec.c
+++ b/drivers/media/platform/verisilicon/hantro_g2_hevc_dec.c
@@ -367,11 +367,14 @@ static int set_ref(struct hantro_ctx *ctx)
const struct v4l2_ctrl_hevc_decode_params *decode_params = ctrls->decode_params;
const struct v4l2_hevc_dpb_entry *dpb = decode_params->dpb;
dma_addr_t luma_addr, chroma_addr, mv_addr = 0;
+ dma_addr_t compress_luma_addr, compress_chroma_addr = 0;
struct hantro_dev *vpu = ctx->dev;
struct vb2_v4l2_buffer *vb2_dst;
struct hantro_decoded_buffer *dst;
size_t cr_offset = hantro_g2_chroma_offset(ctx);
size_t mv_offset = hantro_g2_motion_vectors_offset(ctx);
+ size_t compress_luma_offset = hantro_g2_luma_compress_offset(ctx);
+ size_t compress_chroma_offset = hantro_g2_chroma_compress_offset(ctx);
u32 max_ref_frames;
u16 dpb_longterm_e;
static const struct hantro_reg cur_poc[] = {
@@ -445,6 +448,8 @@ static int set_ref(struct hantro_ctx *ctx)
chroma_addr = luma_addr + cr_offset;
mv_addr = luma_addr + mv_offset;
+ compress_luma_addr = luma_addr + compress_luma_offset;
+ compress_chroma_addr = luma_addr + compress_chroma_offset;
if (dpb[i].flags & V4L2_HEVC_DPB_ENTRY_LONG_TERM_REFERENCE)
dpb_longterm_e |= BIT(V4L2_HEVC_DPB_ENTRIES_NUM_MAX - 1 - i);
@@ -452,6 +457,8 @@ static int set_ref(struct hantro_ctx *ctx)
hantro_write_addr(vpu, G2_REF_LUMA_ADDR(i), luma_addr);
hantro_write_addr(vpu, G2_REF_CHROMA_ADDR(i), chroma_addr);
hantro_write_addr(vpu, G2_REF_MV_ADDR(i), mv_addr);
+ hantro_write_addr(vpu, G2_REF_COMP_LUMA_ADDR(i), compress_luma_addr);
+ hantro_write_addr(vpu, G2_REF_COMP_CHROMA_ADDR(i), compress_chroma_addr);
}
vb2_dst = hantro_get_dst_buf(ctx);
@@ -465,19 +472,27 @@ static int set_ref(struct hantro_ctx *ctx)
chroma_addr = luma_addr + cr_offset;
mv_addr = luma_addr + mv_offset;
+ compress_luma_addr = luma_addr + compress_luma_offset;
+ compress_chroma_addr = luma_addr + compress_chroma_offset;
hantro_write_addr(vpu, G2_REF_LUMA_ADDR(i), luma_addr);
hantro_write_addr(vpu, G2_REF_CHROMA_ADDR(i), chroma_addr);
- hantro_write_addr(vpu, G2_REF_MV_ADDR(i++), mv_addr);
+ hantro_write_addr(vpu, G2_REF_MV_ADDR(i), mv_addr);
+ hantro_write_addr(vpu, G2_REF_COMP_LUMA_ADDR(i), compress_luma_addr);
+ hantro_write_addr(vpu, G2_REF_COMP_CHROMA_ADDR(i++), compress_chroma_addr);
hantro_write_addr(vpu, G2_OUT_LUMA_ADDR, luma_addr);
hantro_write_addr(vpu, G2_OUT_CHROMA_ADDR, chroma_addr);
hantro_write_addr(vpu, G2_OUT_MV_ADDR, mv_addr);
+ hantro_write_addr(vpu, G2_OUT_COMP_LUMA_ADDR, compress_luma_addr);
+ hantro_write_addr(vpu, G2_OUT_COMP_CHROMA_ADDR, compress_chroma_addr);
for (; i < V4L2_HEVC_DPB_ENTRIES_NUM_MAX; i++) {
hantro_write_addr(vpu, G2_REF_LUMA_ADDR(i), 0);
hantro_write_addr(vpu, G2_REF_CHROMA_ADDR(i), 0);
hantro_write_addr(vpu, G2_REF_MV_ADDR(i), 0);
+ hantro_write_addr(vpu, G2_REF_COMP_LUMA_ADDR(i), 0);
+ hantro_write_addr(vpu, G2_REF_COMP_CHROMA_ADDR(i), 0);
}
hantro_reg_write(vpu, &g2_refer_lterm_e, dpb_longterm_e);
@@ -594,8 +609,7 @@ int hantro_g2_hevc_dec_run(struct hantro_ctx *ctx)
/* Don't disable output */
hantro_reg_write(vpu, &g2_out_dis, 0);
- /* Don't compress buffers */
- hantro_reg_write(vpu, &g2_ref_compress_bypass, 1);
+ hantro_reg_write(vpu, &g2_ref_compress_bypass, !ctx->hevc_dec.use_compression);
/* Bus width and max burst */
hantro_reg_write(vpu, &g2_buswidth, BUS_WIDTH_128);
diff --git a/drivers/media/platform/verisilicon/hantro_g2_regs.h b/drivers/media/platform/verisilicon/hantro_g2_regs.h
index 82606783591a..b943b1816db7 100644
--- a/drivers/media/platform/verisilicon/hantro_g2_regs.h
+++ b/drivers/media/platform/verisilicon/hantro_g2_regs.h
@@ -318,6 +318,10 @@
#define G2_TILE_BSD_ADDR (G2_SWREG(183))
#define G2_DS_DST (G2_SWREG(186))
#define G2_DS_DST_CHR (G2_SWREG(188))
+#define G2_OUT_COMP_LUMA_ADDR (G2_SWREG(190))
+#define G2_REF_COMP_LUMA_ADDR(i) (G2_SWREG(192) + ((i) * 0x8))
+#define G2_OUT_COMP_CHROMA_ADDR (G2_SWREG(224))
+#define G2_REF_COMP_CHROMA_ADDR(i) (G2_SWREG(226) + ((i) * 0x8))
#define g2_strm_buffer_len G2_DEC_REG(258, 0, 0xffffffff)
#define g2_strm_start_offset G2_DEC_REG(259, 0, 0xffffffff)
diff --git a/drivers/media/platform/verisilicon/hantro_hevc.c b/drivers/media/platform/verisilicon/hantro_hevc.c
index 2c14330bc562..83cd12b0ddd6 100644
--- a/drivers/media/platform/verisilicon/hantro_hevc.c
+++ b/drivers/media/platform/verisilicon/hantro_hevc.c
@@ -25,6 +25,11 @@
#define MAX_TILE_COLS 20
#define MAX_TILE_ROWS 22
+static bool hevc_use_compression = IS_ENABLED(CONFIG_VIDEO_HANTRO_HEVC_RFC);
+module_param_named(hevc_use_compression, hevc_use_compression, bool, 0644);
+MODULE_PARM_DESC(hevc_use_compression,
+ "Use reference frame compression for HEVC");
+
void hantro_hevc_ref_init(struct hantro_ctx *ctx)
{
struct hantro_hevc_dec_hw_ctx *hevc_dec = &ctx->hevc_dec;
@@ -275,5 +280,8 @@ int hantro_hevc_dec_init(struct hantro_ctx *ctx)
hantro_hevc_ref_init(ctx);
+ hevc_dec->use_compression =
+ hevc_use_compression & hantro_needs_postproc(ctx, ctx->vpu_dst_fmt);
+
return 0;
}
diff --git a/drivers/media/platform/verisilicon/hantro_hw.h b/drivers/media/platform/verisilicon/hantro_hw.h
index 7737320cc8cc..c9b6556f8b2b 100644
--- a/drivers/media/platform/verisilicon/hantro_hw.h
+++ b/drivers/media/platform/verisilicon/hantro_hw.h
@@ -42,6 +42,13 @@
#define MAX_POSTPROC_BUFFERS 64
+#define CBS_SIZE 16 /* compression table size in bytes */
+#define CBS_LUMA 8 /* luminance CBS is composed of 1 8x8 coded block */
+#define CBS_CHROMA_W (8 * 2) /* chrominance CBS is composed of two 8x4 coded
+ * blocks, with Cb CB first then Cr CB following
+ */
+#define CBS_CHROMA_H 4
+
struct hantro_dev;
struct hantro_ctx;
struct hantro_buf;
@@ -144,6 +151,7 @@ struct hantro_hevc_dec_ctrls {
* @ref_bufs_used: Bitfield of used reference buffers
* @ctrls: V4L2 controls attached to a run
* @num_tile_cols_allocated: number of allocated tiles
+ * @use_compression: use reference buffer compression
*/
struct hantro_hevc_dec_hw_ctx {
struct hantro_aux_buf tile_sizes;
@@ -156,6 +164,7 @@ struct hantro_hevc_dec_hw_ctx {
u32 ref_bufs_used;
struct hantro_hevc_dec_ctrls ctrls;
unsigned int num_tile_cols_allocated;
+ bool use_compression;
};
/**
@@ -510,6 +519,33 @@ hantro_hevc_mv_size(unsigned int width, unsigned int height)
return width * height / 16;
}
+static inline size_t
+hantro_hevc_luma_compressed_size(unsigned int width, unsigned int height)
+{
+ u32 pic_width_in_cbsy =
+ round_up((width + CBS_LUMA - 1) / CBS_LUMA, CBS_SIZE);
+ u32 pic_height_in_cbsy = (height + CBS_LUMA - 1) / CBS_LUMA;
+
+ return round_up(pic_width_in_cbsy * pic_height_in_cbsy, CBS_SIZE);
+}
+
+static inline size_t
+hantro_hevc_chroma_compressed_size(unsigned int width, unsigned int height)
+{
+ u32 pic_width_in_cbsc =
+ round_up((width + CBS_CHROMA_W - 1) / CBS_CHROMA_W, CBS_SIZE);
+ u32 pic_height_in_cbsc = (height / 2 + CBS_CHROMA_H - 1) / CBS_CHROMA_H;
+
+ return round_up(pic_width_in_cbsc * pic_height_in_cbsc, CBS_SIZE);
+}
+
+static inline size_t
+hantro_hevc_compressed_size(unsigned int width, unsigned int height)
+{
+ return hantro_hevc_luma_compressed_size(width, height) +
+ hantro_hevc_chroma_compressed_size(width, height);
+}
+
static inline unsigned short hantro_av1_num_sbs(unsigned short dimension)
{
return DIV_ROUND_UP(dimension, 64);
@@ -525,6 +561,8 @@ hantro_av1_mv_size(unsigned int width, unsigned int height)
size_t hantro_g2_chroma_offset(struct hantro_ctx *ctx);
size_t hantro_g2_motion_vectors_offset(struct hantro_ctx *ctx);
+size_t hantro_g2_luma_compress_offset(struct hantro_ctx *ctx);
+size_t hantro_g2_chroma_compress_offset(struct hantro_ctx *ctx);
int hantro_g1_mpeg2_dec_run(struct hantro_ctx *ctx);
int rockchip_vpu2_mpeg2_dec_run(struct hantro_ctx *ctx);
diff --git a/drivers/media/platform/verisilicon/hantro_postproc.c b/drivers/media/platform/verisilicon/hantro_postproc.c
index 41e93176300b..232c93eea7ee 100644
--- a/drivers/media/platform/verisilicon/hantro_postproc.c
+++ b/drivers/media/platform/verisilicon/hantro_postproc.c
@@ -213,9 +213,13 @@ static unsigned int hantro_postproc_buffer_size(struct hantro_ctx *ctx)
else if (ctx->vpu_src_fmt->fourcc == V4L2_PIX_FMT_VP9_FRAME)
buf_size += hantro_vp9_mv_size(pix_mp.width,
pix_mp.height);
- else if (ctx->vpu_src_fmt->fourcc == V4L2_PIX_FMT_HEVC_SLICE)
+ else if (ctx->vpu_src_fmt->fourcc == V4L2_PIX_FMT_HEVC_SLICE) {
buf_size += hantro_hevc_mv_size(pix_mp.width,
pix_mp.height);
+ if (ctx->hevc_dec.use_compression)
+ buf_size += hantro_hevc_compressed_size(pix_mp.width,
+ pix_mp.height);
+ }
else if (ctx->vpu_src_fmt->fourcc == V4L2_PIX_FMT_AV1_FRAME)
buf_size += hantro_av1_mv_size(pix_mp.width,
pix_mp.height);
diff --git a/drivers/media/platform/verisilicon/hantro_v4l2.c b/drivers/media/platform/verisilicon/hantro_v4l2.c
index df6f2536263b..62d3962c18d9 100644
--- a/drivers/media/platform/verisilicon/hantro_v4l2.c
+++ b/drivers/media/platform/verisilicon/hantro_v4l2.c
@@ -303,11 +303,7 @@ static int hantro_try_fmt(const struct hantro_ctx *ctx,
coded = capture == ctx->is_encoder;
- vpu_debug(4, "trying format %c%c%c%c\n",
- (pix_mp->pixelformat & 0x7f),
- (pix_mp->pixelformat >> 8) & 0x7f,
- (pix_mp->pixelformat >> 16) & 0x7f,
- (pix_mp->pixelformat >> 24) & 0x7f);
+ vpu_debug(4, "trying format %p4cc\n", &pix_mp->pixelformat);
fmt = hantro_find_format(ctx, pix_mp->pixelformat);
if (!fmt) {
diff --git a/drivers/media/platform/verisilicon/rockchip_vpu981_hw_av1_dec.c b/drivers/media/platform/verisilicon/rockchip_vpu981_hw_av1_dec.c
index cc4483857489..65e8f2d07400 100644
--- a/drivers/media/platform/verisilicon/rockchip_vpu981_hw_av1_dec.c
+++ b/drivers/media/platform/verisilicon/rockchip_vpu981_hw_av1_dec.c
@@ -257,7 +257,8 @@ static int rockchip_vpu981_av1_dec_tiles_reallocate(struct hantro_ctx *ctx)
struct hantro_dev *vpu = ctx->dev;
struct hantro_av1_dec_hw_ctx *av1_dec = &ctx->av1_dec;
struct hantro_av1_dec_ctrls *ctrls = &av1_dec->ctrls;
- unsigned int num_tile_cols = 1 << ctrls->tile_group_entry->tile_col;
+ const struct v4l2_av1_tile_info *tile_info = &ctrls->frame->tile_info;
+ unsigned int num_tile_cols = tile_info->tile_cols;
unsigned int height = ALIGN(ctrls->frame->frame_height_minus_1 + 1, 64);
unsigned int height_in_sb = height / 64;
unsigned int stripe_num = ((height + 8) + 63) / 64;
diff --git a/drivers/media/platform/verisilicon/rockchip_vpu981_regs.h b/drivers/media/platform/verisilicon/rockchip_vpu981_regs.h
index 850ff0f84424..e4008da64f19 100644
--- a/drivers/media/platform/verisilicon/rockchip_vpu981_regs.h
+++ b/drivers/media/platform/verisilicon/rockchip_vpu981_regs.h
@@ -327,7 +327,7 @@
#define av1_apf_threshold AV1_DEC_REG(55, 0, 0xffff)
#define av1_apf_single_pu_mode AV1_DEC_REG(55, 30, 0x1)
-#define av1_apf_disable AV1_DEC_REG(55, 30, 0x1)
+#define av1_apf_disable AV1_DEC_REG(55, 31, 0x1)
#define av1_dec_max_burst AV1_DEC_REG(58, 0, 0xff)
#define av1_dec_buswidth AV1_DEC_REG(58, 8, 0x7)
@@ -337,10 +337,10 @@
#define av1_dec_mc_polltime AV1_DEC_REG(58, 17, 0x3ff)
#define av1_dec_mc_pollmode AV1_DEC_REG(58, 27, 0x3)
-#define av1_filt_ref_adj_3 AV1_DEC_REG(59, 0, 0x3f)
-#define av1_filt_ref_adj_2 AV1_DEC_REG(59, 7, 0x3f)
-#define av1_filt_ref_adj_1 AV1_DEC_REG(59, 14, 0x3f)
-#define av1_filt_ref_adj_0 AV1_DEC_REG(59, 21, 0x3f)
+#define av1_filt_ref_adj_3 AV1_DEC_REG(59, 0, 0x7f)
+#define av1_filt_ref_adj_2 AV1_DEC_REG(59, 7, 0x7f)
+#define av1_filt_ref_adj_1 AV1_DEC_REG(59, 14, 0x7f)
+#define av1_filt_ref_adj_0 AV1_DEC_REG(59, 21, 0x7f)
#define av1_ref0_sign_bias AV1_DEC_REG(59, 28, 0x1)
#define av1_ref1_sign_bias AV1_DEC_REG(59, 29, 0x1)
#define av1_ref2_sign_bias AV1_DEC_REG(59, 30, 0x1)
diff --git a/drivers/media/platform/verisilicon/rockchip_vpu_hw.c b/drivers/media/platform/verisilicon/rockchip_vpu_hw.c
index f97527670783..964122e7c355 100644
--- a/drivers/media/platform/verisilicon/rockchip_vpu_hw.c
+++ b/drivers/media/platform/verisilicon/rockchip_vpu_hw.c
@@ -82,7 +82,6 @@ static const struct hantro_fmt rockchip_vpu981_postproc_fmts[] = {
{
.fourcc = V4L2_PIX_FMT_NV12,
.codec_mode = HANTRO_MODE_NONE,
- .match_depth = true,
.postprocessed = true,
.frmsize = {
.min_width = ROCKCHIP_VPU981_MIN_SIZE,
diff --git a/drivers/media/platform/xilinx/xilinx-vipp.c b/drivers/media/platform/xilinx/xilinx-vipp.c
index 996684a73038..bfe48cc0ab52 100644
--- a/drivers/media/platform/xilinx/xilinx-vipp.c
+++ b/drivers/media/platform/xilinx/xilinx-vipp.c
@@ -199,18 +199,13 @@ static int xvip_graph_build_dma(struct xvip_composite_device *xdev)
struct media_pad *sink_pad;
struct xvip_graph_entity *ent;
struct v4l2_fwnode_link link;
- struct device_node *ep = NULL;
+ struct device_node *ep;
struct xvip_dma *dma;
int ret = 0;
dev_dbg(xdev->dev, "creating links for DMA engines\n");
- while (1) {
- /* Get the next endpoint and parse its link. */
- ep = of_graph_get_next_endpoint(node, ep);
- if (ep == NULL)
- break;
-
+ for_each_endpoint_of_node(node, ep) {
dev_dbg(xdev->dev, "processing endpoint %pOF\n", ep);
ret = v4l2_fwnode_parse_link(of_fwnode_handle(ep), &link);
diff --git a/drivers/media/radio/radio-tea5764.c b/drivers/media/radio/radio-tea5764.c
index 14e7dd3889ff..dd85b0b1bcd9 100644
--- a/drivers/media/radio/radio-tea5764.c
+++ b/drivers/media/radio/radio-tea5764.c
@@ -502,7 +502,7 @@ static void tea5764_i2c_remove(struct i2c_client *client)
/* I2C subsystem interface */
static const struct i2c_device_id tea5764_id[] = {
- { "radio-tea5764", 0 },
+ { "radio-tea5764" },
{ } /* Terminating entry */
};
MODULE_DEVICE_TABLE(i2c, tea5764_id);
diff --git a/drivers/media/radio/saa7706h.c b/drivers/media/radio/saa7706h.c
index 91345198bbf1..d9eecddffd91 100644
--- a/drivers/media/radio/saa7706h.c
+++ b/drivers/media/radio/saa7706h.c
@@ -395,8 +395,8 @@ static void saa7706h_remove(struct i2c_client *client)
}
static const struct i2c_device_id saa7706h_id[] = {
- {DRIVER_NAME, 0},
- {},
+ { DRIVER_NAME },
+ {}
};
MODULE_DEVICE_TABLE(i2c, saa7706h_id);
diff --git a/drivers/media/radio/si470x/radio-si470x-i2c.c b/drivers/media/radio/si470x/radio-si470x-i2c.c
index fd449e42c191..cdd2ac198f2c 100644
--- a/drivers/media/radio/si470x/radio-si470x-i2c.c
+++ b/drivers/media/radio/si470x/radio-si470x-i2c.c
@@ -28,7 +28,7 @@
/* I2C Device ID List */
static const struct i2c_device_id si470x_i2c_id[] = {
/* Generic Entry */
- { "si470x", 0 },
+ { "si470x" },
/* Terminating entry */
{ }
};
diff --git a/drivers/media/radio/si4713/si4713.c b/drivers/media/radio/si4713/si4713.c
index ddaf7a60b7d0..e71272c6de37 100644
--- a/drivers/media/radio/si4713/si4713.c
+++ b/drivers/media/radio/si4713/si4713.c
@@ -1639,8 +1639,8 @@ static void si4713_remove(struct i2c_client *client)
/* si4713_i2c_driver - i2c driver interface */
static const struct i2c_device_id si4713_id[] = {
- { "si4713" , 0 },
- { },
+ { "si4713" },
+ { }
};
MODULE_DEVICE_TABLE(i2c, si4713_id);
diff --git a/drivers/media/radio/tef6862.c b/drivers/media/radio/tef6862.c
index 215168aa1588..b00ccf651922 100644
--- a/drivers/media/radio/tef6862.c
+++ b/drivers/media/radio/tef6862.c
@@ -173,8 +173,8 @@ static void tef6862_remove(struct i2c_client *client)
}
static const struct i2c_device_id tef6862_id[] = {
- {DRIVER_NAME, 0},
- {},
+ { DRIVER_NAME },
+ {}
};
MODULE_DEVICE_TABLE(i2c, tef6862_id);
diff --git a/drivers/media/rc/ene_ir.c b/drivers/media/rc/ene_ir.c
index 11ee21a7db8f..67722e2e47ff 100644
--- a/drivers/media/rc/ene_ir.c
+++ b/drivers/media/rc/ene_ir.c
@@ -451,9 +451,6 @@ select_timeout:
dev->rdev->max_timeout = 200000;
}
- if (dev->hw_learning_and_tx_capable)
- dev->rdev->tx_resolution = sample_period;
-
if (dev->rdev->timeout > dev->rdev->max_timeout)
dev->rdev->timeout = dev->rdev->max_timeout;
if (dev->rdev->timeout < dev->rdev->min_timeout)
diff --git a/drivers/media/rc/ite-cir.c b/drivers/media/rc/ite-cir.c
index fcfadd7ea31c..2bacecb02262 100644
--- a/drivers/media/rc/ite-cir.c
+++ b/drivers/media/rc/ite-cir.c
@@ -1380,7 +1380,6 @@ static int ite_probe(struct pnp_dev *pdev, const struct pnp_device_id
rdev->timeout = IR_DEFAULT_TIMEOUT;
rdev->max_timeout = 10 * IR_DEFAULT_TIMEOUT;
rdev->rx_resolution = ITE_BAUDRATE_DIVISOR * sample_period / 1000;
- rdev->tx_resolution = ITE_BAUDRATE_DIVISOR * sample_period / 1000;
/* set up transmitter related values */
rdev->tx_ir = ite_tx_ir;
diff --git a/drivers/media/rc/lirc_dev.c b/drivers/media/rc/lirc_dev.c
index 717c441b4a86..f042f3f14afa 100644
--- a/drivers/media/rc/lirc_dev.c
+++ b/drivers/media/rc/lirc_dev.c
@@ -706,7 +706,6 @@ static const struct file_operations lirc_fops = {
.poll = lirc_poll,
.open = lirc_open,
.release = lirc_close,
- .llseek = no_llseek,
};
static void lirc_release_device(struct device *ld)
@@ -820,20 +819,20 @@ struct rc_dev *rc_dev_get_from_fd(int fd, bool write)
struct lirc_fh *fh;
struct rc_dev *dev;
- if (!f.file)
+ if (!fd_file(f))
return ERR_PTR(-EBADF);
- if (f.file->f_op != &lirc_fops) {
+ if (fd_file(f)->f_op != &lirc_fops) {
fdput(f);
return ERR_PTR(-EINVAL);
}
- if (write && !(f.file->f_mode & FMODE_WRITE)) {
+ if (write && !(fd_file(f)->f_mode & FMODE_WRITE)) {
fdput(f);
return ERR_PTR(-EPERM);
}
- fh = f.file->private_data;
+ fh = fd_file(f)->private_data;
dev = fh->rc;
get_device(&dev->dev);
diff --git a/drivers/media/rc/meson-ir.c b/drivers/media/rc/meson-ir.c
index 5303e6da5809..9cdb45821ecc 100644
--- a/drivers/media/rc/meson-ir.c
+++ b/drivers/media/rc/meson-ir.c
@@ -567,6 +567,32 @@ static void meson_ir_shutdown(struct platform_device *pdev)
spin_unlock_irqrestore(&ir->lock, flags);
}
+static __maybe_unused int meson_ir_resume(struct device *dev)
+{
+ struct meson_ir *ir = dev_get_drvdata(dev);
+
+ if (ir->param->support_hw_decoder)
+ meson_ir_hw_decoder_init(ir->rc, &ir->rc->enabled_protocols);
+ else
+ meson_ir_sw_decoder_init(ir->rc);
+
+ return 0;
+}
+
+static __maybe_unused int meson_ir_suspend(struct device *dev)
+{
+ struct meson_ir *ir = dev_get_drvdata(dev);
+ unsigned long flags;
+
+ spin_lock_irqsave(&ir->lock, flags);
+ regmap_update_bits(ir->reg, IR_DEC_REG1, IR_DEC_REG1_ENABLE, 0);
+ spin_unlock_irqrestore(&ir->lock, flags);
+
+ return 0;
+}
+
+static SIMPLE_DEV_PM_OPS(meson_ir_pm_ops, meson_ir_suspend, meson_ir_resume);
+
static const struct meson_ir_param meson6_ir_param = {
.support_hw_decoder = false,
.max_register = IR_DEC_REG1,
@@ -607,6 +633,7 @@ static struct platform_driver meson_ir_driver = {
.driver = {
.name = DRIVER_NAME,
.of_match_table = meson_ir_match,
+ .pm = pm_ptr(&meson_ir_pm_ops),
},
};
diff --git a/drivers/media/rc/rc-loopback.c b/drivers/media/rc/rc-loopback.c
index b356041c5c00..8288366f891f 100644
--- a/drivers/media/rc/rc-loopback.c
+++ b/drivers/media/rc/rc-loopback.c
@@ -230,7 +230,6 @@ static int __init loop_init(void)
rc->min_timeout = 1;
rc->max_timeout = IR_MAX_TIMEOUT;
rc->rx_resolution = 1;
- rc->tx_resolution = 1;
rc->s_tx_mask = loop_set_tx_mask;
rc->s_tx_carrier = loop_set_tx_carrier;
rc->s_tx_duty_cycle = loop_set_tx_duty_cycle;
diff --git a/drivers/media/test-drivers/vicodec/vicodec-core.c b/drivers/media/test-drivers/vicodec/vicodec-core.c
index 3e011fe62ae1..846e90c06291 100644
--- a/drivers/media/test-drivers/vicodec/vicodec-core.c
+++ b/drivers/media/test-drivers/vicodec/vicodec-core.c
@@ -1215,8 +1215,7 @@ static int vicodec_encoder_cmd(struct file *file, void *fh,
if (ret < 0)
return ret;
- if (!vb2_is_streaming(&ctx->fh.m2m_ctx->cap_q_ctx.q) ||
- !vb2_is_streaming(&ctx->fh.m2m_ctx->out_q_ctx.q))
+ if (!vb2_is_streaming(&ctx->fh.m2m_ctx->out_q_ctx.q))
return 0;
ret = v4l2_m2m_ioctl_encoder_cmd(file, fh, ec);
@@ -1250,8 +1249,7 @@ static int vicodec_decoder_cmd(struct file *file, void *fh,
if (ret < 0)
return ret;
- if (!vb2_is_streaming(&ctx->fh.m2m_ctx->cap_q_ctx.q) ||
- !vb2_is_streaming(&ctx->fh.m2m_ctx->out_q_ctx.q))
+ if (!vb2_is_streaming(&ctx->fh.m2m_ctx->out_q_ctx.q))
return 0;
ret = v4l2_m2m_ioctl_decoder_cmd(file, fh, dc);
diff --git a/drivers/media/test-drivers/vidtv/vidtv_demod.c b/drivers/media/test-drivers/vidtv/vidtv_demod.c
index 7a0cd9601917..505f96fccbf3 100644
--- a/drivers/media/test-drivers/vidtv/vidtv_demod.c
+++ b/drivers/media/test-drivers/vidtv/vidtv_demod.c
@@ -407,7 +407,7 @@ static const struct dvb_frontend_ops vidtv_demod_ops = {
};
static const struct i2c_device_id vidtv_demod_i2c_id_table[] = {
- {"dvb_vidtv_demod", 0},
+ { "dvb_vidtv_demod" },
{}
};
MODULE_DEVICE_TABLE(i2c, vidtv_demod_i2c_id_table);
diff --git a/drivers/media/test-drivers/vidtv/vidtv_tuner.c b/drivers/media/test-drivers/vidtv/vidtv_tuner.c
index a748737d47f3..4ba302d569d6 100644
--- a/drivers/media/test-drivers/vidtv/vidtv_tuner.c
+++ b/drivers/media/test-drivers/vidtv/vidtv_tuner.c
@@ -385,7 +385,7 @@ static const struct dvb_tuner_ops vidtv_tuner_ops = {
};
static const struct i2c_device_id vidtv_tuner_i2c_id_table[] = {
- {"dvb_vidtv_tuner", 0},
+ { "dvb_vidtv_tuner" },
{}
};
MODULE_DEVICE_TABLE(i2c, vidtv_tuner_i2c_id_table);
diff --git a/drivers/media/test-drivers/vivid/vivid-cec.c b/drivers/media/test-drivers/vivid/vivid-cec.c
index 941ef4263214..356a988dd6a1 100644
--- a/drivers/media/test-drivers/vivid/vivid-cec.c
+++ b/drivers/media/test-drivers/vivid/vivid-cec.c
@@ -316,15 +316,16 @@ static int vivid_received(struct cec_adapter *adap, struct cec_msg *msg)
struct vivid_dev *dev = cec_get_drvdata(adap);
struct cec_msg reply;
u8 dest = cec_msg_destination(msg);
- u8 disp_ctl;
- char osd[14];
if (cec_msg_is_broadcast(msg))
dest = adap->log_addrs.log_addr[0];
cec_msg_init(&reply, dest, cec_msg_initiator(msg));
switch (cec_msg_opcode(msg)) {
- case CEC_MSG_SET_OSD_STRING:
+ case CEC_MSG_SET_OSD_STRING: {
+ u8 disp_ctl;
+ char osd[14];
+
if (!cec_is_sink(adap))
return -ENOMSG;
cec_ops_set_osd_string(msg, &disp_ctl, osd);
@@ -348,6 +349,47 @@ static int vivid_received(struct cec_adapter *adap, struct cec_msg *msg)
break;
}
break;
+ }
+ case CEC_MSG_VENDOR_COMMAND_WITH_ID: {
+ u32 vendor_id;
+ u8 size;
+ const u8 *vendor_cmd;
+
+ /*
+ * If we receive <Vendor Command With ID> with our vendor ID
+ * and with a payload of size 1, and the payload value is odd,
+ * then we reply with the same message, but with the payload
+ * byte incremented by 1.
+ *
+ * If the size is 1 and the payload value is even, then we
+ * ignore the message.
+ *
+ * The reason we reply to odd instead of even payload values
+ * is that it allows for testing of the corner case where the
+ * reply value is 0 (0xff + 1 % 256).
+ *
+ * For other sizes we Feature Abort.
+ *
+ * This is added for the specific purpose of testing the
+ * CEC_MSG_FL_REPLY_VENDOR_ID flag using vivid.
+ */
+ cec_ops_vendor_command_with_id(msg, &vendor_id, &size, &vendor_cmd);
+ if (vendor_id != adap->log_addrs.vendor_id)
+ break;
+ if (size == 1) {
+ // Ignore even op values
+ if (!(vendor_cmd[0] & 1))
+ break;
+ reply.len = msg->len;
+ memcpy(reply.msg + 1, msg->msg + 1, msg->len - 1);
+ reply.msg[msg->len - 1]++;
+ } else {
+ cec_msg_feature_abort(&reply, cec_msg_opcode(msg),
+ CEC_OP_ABORT_INVALID_OP);
+ }
+ cec_transmit_msg(adap, &reply, false);
+ break;
+ }
default:
return -ENOMSG;
}
diff --git a/drivers/media/tuners/e4000.c b/drivers/media/tuners/e4000.c
index 3893a00c18ce..549b2009f974 100644
--- a/drivers/media/tuners/e4000.c
+++ b/drivers/media/tuners/e4000.c
@@ -719,7 +719,7 @@ static void e4000_remove(struct i2c_client *client)
}
static const struct i2c_device_id e4000_id_table[] = {
- {"e4000", 0},
+ { "e4000" },
{}
};
MODULE_DEVICE_TABLE(i2c, e4000_id_table);
diff --git a/drivers/media/tuners/fc2580.c b/drivers/media/tuners/fc2580.c
index f6613dcf40a3..046389896dc5 100644
--- a/drivers/media/tuners/fc2580.c
+++ b/drivers/media/tuners/fc2580.c
@@ -600,7 +600,7 @@ static void fc2580_remove(struct i2c_client *client)
}
static const struct i2c_device_id fc2580_id_table[] = {
- {"fc2580", 0},
+ { "fc2580" },
{}
};
MODULE_DEVICE_TABLE(i2c, fc2580_id_table);
diff --git a/drivers/media/tuners/m88rs6000t.c b/drivers/media/tuners/m88rs6000t.c
index 2cd7f0e0c70d..cc57980ed417 100644
--- a/drivers/media/tuners/m88rs6000t.c
+++ b/drivers/media/tuners/m88rs6000t.c
@@ -709,7 +709,7 @@ static void m88rs6000t_remove(struct i2c_client *client)
}
static const struct i2c_device_id m88rs6000t_id[] = {
- {"m88rs6000t", 0},
+ { "m88rs6000t" },
{}
};
MODULE_DEVICE_TABLE(i2c, m88rs6000t_id);
diff --git a/drivers/media/tuners/mt2060.c b/drivers/media/tuners/mt2060.c
index 4205ed4cf467..4b9dca2f17cc 100644
--- a/drivers/media/tuners/mt2060.c
+++ b/drivers/media/tuners/mt2060.c
@@ -514,7 +514,7 @@ static void mt2060_remove(struct i2c_client *client)
}
static const struct i2c_device_id mt2060_id_table[] = {
- {"mt2060", 0},
+ { "mt2060" },
{}
};
MODULE_DEVICE_TABLE(i2c, mt2060_id_table);
diff --git a/drivers/media/tuners/mxl301rf.c b/drivers/media/tuners/mxl301rf.c
index 9b2b237745ae..7c03d4132763 100644
--- a/drivers/media/tuners/mxl301rf.c
+++ b/drivers/media/tuners/mxl301rf.c
@@ -317,7 +317,7 @@ static void mxl301rf_remove(struct i2c_client *client)
static const struct i2c_device_id mxl301rf_id[] = {
- {"mxl301rf", 0},
+ { "mxl301rf" },
{}
};
MODULE_DEVICE_TABLE(i2c, mxl301rf_id);
diff --git a/drivers/media/tuners/qm1d1b0004.c b/drivers/media/tuners/qm1d1b0004.c
index af2d3618b9d5..c53aeb558413 100644
--- a/drivers/media/tuners/qm1d1b0004.c
+++ b/drivers/media/tuners/qm1d1b0004.c
@@ -243,7 +243,7 @@ static void qm1d1b0004_remove(struct i2c_client *client)
static const struct i2c_device_id qm1d1b0004_id[] = {
- {"qm1d1b0004", 0},
+ { "qm1d1b0004" },
{}
};
diff --git a/drivers/media/tuners/qm1d1c0042.c b/drivers/media/tuners/qm1d1c0042.c
index ce7223315b0c..c58f5b6526f1 100644
--- a/drivers/media/tuners/qm1d1c0042.c
+++ b/drivers/media/tuners/qm1d1c0042.c
@@ -434,7 +434,7 @@ static void qm1d1c0042_remove(struct i2c_client *client)
static const struct i2c_device_id qm1d1c0042_id[] = {
- {"qm1d1c0042", 0},
+ { "qm1d1c0042" },
{}
};
MODULE_DEVICE_TABLE(i2c, qm1d1c0042_id);
diff --git a/drivers/media/tuners/tda18212.c b/drivers/media/tuners/tda18212.c
index 8d742bd61df0..39f2dc9c2845 100644
--- a/drivers/media/tuners/tda18212.c
+++ b/drivers/media/tuners/tda18212.c
@@ -254,7 +254,7 @@ static void tda18212_remove(struct i2c_client *client)
}
static const struct i2c_device_id tda18212_id[] = {
- {"tda18212", 0},
+ { "tda18212" },
{}
};
MODULE_DEVICE_TABLE(i2c, tda18212_id);
diff --git a/drivers/media/tuners/tda18250.c b/drivers/media/tuners/tda18250.c
index 32ea473f3f49..68d0275f29e1 100644
--- a/drivers/media/tuners/tda18250.c
+++ b/drivers/media/tuners/tda18250.c
@@ -868,7 +868,7 @@ static void tda18250_remove(struct i2c_client *client)
}
static const struct i2c_device_id tda18250_id_table[] = {
- {"tda18250", 0},
+ { "tda18250" },
{}
};
MODULE_DEVICE_TABLE(i2c, tda18250_id_table);
diff --git a/drivers/media/tuners/tua9001.c b/drivers/media/tuners/tua9001.c
index 03a3a022b0a8..562a7a5c26f5 100644
--- a/drivers/media/tuners/tua9001.c
+++ b/drivers/media/tuners/tua9001.c
@@ -245,7 +245,7 @@ static void tua9001_remove(struct i2c_client *client)
}
static const struct i2c_device_id tua9001_id_table[] = {
- {"tua9001", 0},
+ { "tua9001" },
{}
};
MODULE_DEVICE_TABLE(i2c, tua9001_id_table);
diff --git a/drivers/media/tuners/tuner-i2c.h b/drivers/media/tuners/tuner-i2c.h
index 07aeead0644a..724952e001cd 100644
--- a/drivers/media/tuners/tuner-i2c.h
+++ b/drivers/media/tuners/tuner-i2c.h
@@ -133,10 +133,8 @@ static inline int tuner_i2c_xfer_send_recv(struct tuner_i2c_props *props,
} \
if (0 == __ret) { \
state = kzalloc(sizeof(type), GFP_KERNEL); \
- if (!state) { \
- __ret = -ENOMEM; \
+ if (NULL == state) \
goto __fail; \
- } \
state->i2c_props.addr = i2caddr; \
state->i2c_props.adap = i2cadap; \
state->i2c_props.name = devname; \
diff --git a/drivers/media/usb/go7007/s2250-board.c b/drivers/media/usb/go7007/s2250-board.c
index db1fab96d529..a155b987282f 100644
--- a/drivers/media/usb/go7007/s2250-board.c
+++ b/drivers/media/usb/go7007/s2250-board.c
@@ -611,7 +611,7 @@ static void s2250_remove(struct i2c_client *client)
}
static const struct i2c_device_id s2250_id[] = {
- { "s2250", 0 },
+ { "s2250" },
{ }
};
MODULE_DEVICE_TABLE(i2c, s2250_id);
diff --git a/drivers/media/usb/uvc/uvc_debugfs.c b/drivers/media/usb/uvc/uvc_debugfs.c
index 1a1258d4ffca..14fa41cb8148 100644
--- a/drivers/media/usb/uvc/uvc_debugfs.c
+++ b/drivers/media/usb/uvc/uvc_debugfs.c
@@ -59,7 +59,6 @@ static int uvc_debugfs_stats_release(struct inode *inode, struct file *file)
static const struct file_operations uvc_debugfs_stats_fops = {
.owner = THIS_MODULE,
.open = uvc_debugfs_stats_open,
- .llseek = no_llseek,
.read = uvc_debugfs_stats_read,
.release = uvc_debugfs_stats_release,
};
diff --git a/drivers/media/v4l2-core/v4l2-dev.c b/drivers/media/v4l2-core/v4l2-dev.c
index be2ba7ca5de2..3d7711cc42bc 100644
--- a/drivers/media/v4l2-core/v4l2-dev.c
+++ b/drivers/media/v4l2-core/v4l2-dev.c
@@ -483,7 +483,6 @@ static const struct file_operations v4l2_fops = {
#endif
.release = v4l2_release,
.poll = v4l2_poll,
- .llseek = no_llseek,
};
/**
@@ -557,6 +556,7 @@ static void determine_valid_ioctls(struct video_device *vdev)
bool is_tx = vdev->vfl_dir != VFL_DIR_RX;
bool is_io_mc = vdev->device_caps & V4L2_CAP_IO_MC;
bool has_streaming = vdev->device_caps & V4L2_CAP_STREAMING;
+ bool is_edid = vdev->device_caps & V4L2_CAP_EDID;
bitmap_zero(valid_ioctls, BASE_VIDIOC_PRIVATE);
@@ -784,6 +784,20 @@ static void determine_valid_ioctls(struct video_device *vdev)
SET_VALID_IOCTL(ops, VIDIOC_S_TUNER, vidioc_s_tuner);
SET_VALID_IOCTL(ops, VIDIOC_S_HW_FREQ_SEEK, vidioc_s_hw_freq_seek);
}
+ if (is_edid) {
+ SET_VALID_IOCTL(ops, VIDIOC_G_EDID, vidioc_g_edid);
+ if (is_tx) {
+ SET_VALID_IOCTL(ops, VIDIOC_G_OUTPUT, vidioc_g_output);
+ SET_VALID_IOCTL(ops, VIDIOC_S_OUTPUT, vidioc_s_output);
+ SET_VALID_IOCTL(ops, VIDIOC_ENUMOUTPUT, vidioc_enum_output);
+ }
+ if (is_rx) {
+ SET_VALID_IOCTL(ops, VIDIOC_ENUMINPUT, vidioc_enum_input);
+ SET_VALID_IOCTL(ops, VIDIOC_G_INPUT, vidioc_g_input);
+ SET_VALID_IOCTL(ops, VIDIOC_S_INPUT, vidioc_s_input);
+ SET_VALID_IOCTL(ops, VIDIOC_S_EDID, vidioc_s_edid);
+ }
+ }
bitmap_andnot(vdev->valid_ioctls, valid_ioctls, vdev->valid_ioctls,
BASE_VIDIOC_PRIVATE);
diff --git a/drivers/media/v4l2-core/v4l2-ioctl.c b/drivers/media/v4l2-core/v4l2-ioctl.c
index 5eb4d797d259..e14db67be97c 100644
--- a/drivers/media/v4l2-core/v4l2-ioctl.c
+++ b/drivers/media/v4l2-core/v4l2-ioctl.c
@@ -484,7 +484,7 @@ static void v4l_print_create_buffers(const void *arg, bool write_only)
{
const struct v4l2_create_buffers *p = arg;
- pr_cont("index=%d, count=%d, memory=%s, capabilities=0x%08x, max num buffers=%u",
+ pr_cont("index=%d, count=%d, memory=%s, capabilities=0x%08x, max num buffers=%u, ",
p->index, p->count, prt_names(p->memory, v4l2_memory_names),
p->capabilities, p->max_num_buffers);
v4l_print_format(&p->format, write_only);
@@ -1458,6 +1458,7 @@ static void v4l_fill_fmtdesc(struct v4l2_fmtdesc *fmt)
case V4L2_META_FMT_VIVID: descr = "Vivid Metadata"; break;
case V4L2_META_FMT_RK_ISP1_PARAMS: descr = "Rockchip ISP1 3A Parameters"; break;
case V4L2_META_FMT_RK_ISP1_STAT_3A: descr = "Rockchip ISP1 3A Statistics"; break;
+ case V4L2_META_FMT_RK_ISP1_EXT_PARAMS: descr = "Rockchip ISP1 Ext 3A Params"; break;
case V4L2_PIX_FMT_NV12_8L128: descr = "NV12 (8x128 Linear)"; break;
case V4L2_PIX_FMT_NV12M_8L128: descr = "NV12M (8x128 Linear)"; break;
case V4L2_PIX_FMT_NV12_10BE_8L128: descr = "10-bit NV12 (8x128 Linear, BE)"; break;
diff --git a/drivers/media/v4l2-core/v4l2-subdev.c b/drivers/media/v4l2-core/v4l2-subdev.c
index 7c5812d55315..3a4ba08810d2 100644
--- a/drivers/media/v4l2-core/v4l2-subdev.c
+++ b/drivers/media/v4l2-core/v4l2-subdev.c
@@ -1443,16 +1443,53 @@ int v4l2_subdev_link_validate(struct media_link *link)
bool states_locked;
int ret;
- if (!is_media_entity_v4l2_subdev(link->sink->entity) ||
- !is_media_entity_v4l2_subdev(link->source->entity)) {
- pr_warn_once("%s of link '%s':%u->'%s':%u is not a V4L2 sub-device, driver bug!\n",
- !is_media_entity_v4l2_subdev(link->sink->entity) ?
- "sink" : "source",
- link->source->entity->name, link->source->index,
- link->sink->entity->name, link->sink->index);
- return 0;
+ /*
+ * Links are validated in the context of the sink entity. Usage of this
+ * helper on a sink that is not a subdev is a clear driver bug.
+ */
+ if (WARN_ON_ONCE(!is_media_entity_v4l2_subdev(link->sink->entity)))
+ return -EINVAL;
+
+ /*
+ * If the source is a video device, delegate link validation to it. This
+ * allows usage of this helper for subdev connected to a video output
+ * device, provided that the driver implement the video output device's
+ * .link_validate() operation.
+ */
+ if (is_media_entity_v4l2_video_device(link->source->entity)) {
+ struct media_entity *source = link->source->entity;
+
+ if (!source->ops || !source->ops->link_validate) {
+ /*
+ * Many existing drivers do not implement the required
+ * .link_validate() operation for their video devices.
+ * Print a warning to get the drivers fixed, and return
+ * 0 to avoid breaking userspace. This should
+ * eventually be turned into a WARN_ON() when all
+ * drivers will have been fixed.
+ */
+ pr_warn_once("video device '%s' does not implement .link_validate(), driver bug!\n",
+ source->name);
+ return 0;
+ }
+
+ /*
+ * Avoid infinite loops in case a video device incorrectly uses
+ * this helper function as its .link_validate() handler.
+ */
+ if (WARN_ON(source->ops->link_validate == v4l2_subdev_link_validate))
+ return -EINVAL;
+
+ return source->ops->link_validate(link);
}
+ /*
+ * If the source is still not a subdev, usage of this helper is a clear
+ * driver bug.
+ */
+ if (WARN_ON(!is_media_entity_v4l2_subdev(link->source->entity)))
+ return -EINVAL;
+
sink_sd = media_entity_to_v4l2_subdev(link->sink->entity);
source_sd = media_entity_to_v4l2_subdev(link->source->entity);
diff --git a/drivers/message/fusion/lsi/mpi_cnfg.h b/drivers/message/fusion/lsi/mpi_cnfg.h
index 3770cb1cff7d..1167a16d8fb4 100644
--- a/drivers/message/fusion/lsi/mpi_cnfg.h
+++ b/drivers/message/fusion/lsi/mpi_cnfg.h
@@ -1018,14 +1018,6 @@ typedef struct _CONFIG_PAGE_IOC_2_RAID_VOL
#define MPI_IOCPAGE2_FLAG_VOLUME_INACTIVE (0x08)
-/*
- * Host code (drivers, BIOS, utilities, etc.) should leave this define set to
- * one and check Header.PageLength at runtime.
- */
-#ifndef MPI_IOC_PAGE_2_RAID_VOLUME_MAX
-#define MPI_IOC_PAGE_2_RAID_VOLUME_MAX (1)
-#endif
-
typedef struct _CONFIG_PAGE_IOC_2
{
CONFIG_PAGE_HEADER Header; /* 00h */
@@ -1034,7 +1026,7 @@ typedef struct _CONFIG_PAGE_IOC_2
U8 MaxVolumes; /* 09h */
U8 NumActivePhysDisks; /* 0Ah */
U8 MaxPhysDisks; /* 0Bh */
- CONFIG_PAGE_IOC_2_RAID_VOL RaidVolume[MPI_IOC_PAGE_2_RAID_VOLUME_MAX];/* 0Ch */
+ CONFIG_PAGE_IOC_2_RAID_VOL RaidVolume[] __counted_by(NumActiveVolumes); /* 0Ch */
} CONFIG_PAGE_IOC_2, MPI_POINTER PTR_CONFIG_PAGE_IOC_2,
IOCPage2_t, MPI_POINTER pIOCPage2_t;
@@ -1064,21 +1056,13 @@ typedef struct _IOC_3_PHYS_DISK
} IOC_3_PHYS_DISK, MPI_POINTER PTR_IOC_3_PHYS_DISK,
Ioc3PhysDisk_t, MPI_POINTER pIoc3PhysDisk_t;
-/*
- * Host code (drivers, BIOS, utilities, etc.) should leave this define set to
- * one and check Header.PageLength at runtime.
- */
-#ifndef MPI_IOC_PAGE_3_PHYSDISK_MAX
-#define MPI_IOC_PAGE_3_PHYSDISK_MAX (1)
-#endif
-
typedef struct _CONFIG_PAGE_IOC_3
{
CONFIG_PAGE_HEADER Header; /* 00h */
U8 NumPhysDisks; /* 04h */
U8 Reserved1; /* 05h */
U16 Reserved2; /* 06h */
- IOC_3_PHYS_DISK PhysDisk[MPI_IOC_PAGE_3_PHYSDISK_MAX]; /* 08h */
+ IOC_3_PHYS_DISK PhysDisk[] __counted_by(NumPhysDisks); /* 08h */
} CONFIG_PAGE_IOC_3, MPI_POINTER PTR_CONFIG_PAGE_IOC_3,
IOCPage3_t, MPI_POINTER pIOCPage3_t;
@@ -1093,21 +1077,13 @@ typedef struct _IOC_4_SEP
} IOC_4_SEP, MPI_POINTER PTR_IOC_4_SEP,
Ioc4Sep_t, MPI_POINTER pIoc4Sep_t;
-/*
- * Host code (drivers, BIOS, utilities, etc.) should leave this define set to
- * one and check Header.PageLength at runtime.
- */
-#ifndef MPI_IOC_PAGE_4_SEP_MAX
-#define MPI_IOC_PAGE_4_SEP_MAX (1)
-#endif
-
typedef struct _CONFIG_PAGE_IOC_4
{
CONFIG_PAGE_HEADER Header; /* 00h */
U8 ActiveSEP; /* 04h */
U8 MaxSEP; /* 05h */
U16 Reserved1; /* 06h */
- IOC_4_SEP SEP[MPI_IOC_PAGE_4_SEP_MAX]; /* 08h */
+ IOC_4_SEP SEP[] __counted_by(ActiveSEP); /* 08h */
} CONFIG_PAGE_IOC_4, MPI_POINTER PTR_CONFIG_PAGE_IOC_4,
IOCPage4_t, MPI_POINTER pIOCPage4_t;
@@ -2295,14 +2271,6 @@ typedef struct _RAID_VOL0_SETTINGS
#define MPI_RAID_HOT_SPARE_POOL_6 (0x40)
#define MPI_RAID_HOT_SPARE_POOL_7 (0x80)
-/*
- * Host code (drivers, BIOS, utilities, etc.) should leave this define set to
- * one and check Header.PageLength at runtime.
- */
-#ifndef MPI_RAID_VOL_PAGE_0_PHYSDISK_MAX
-#define MPI_RAID_VOL_PAGE_0_PHYSDISK_MAX (1)
-#endif
-
typedef struct _CONFIG_PAGE_RAID_VOL_0
{
CONFIG_PAGE_HEADER Header; /* 00h */
@@ -2321,7 +2289,7 @@ typedef struct _CONFIG_PAGE_RAID_VOL_0
U8 DataScrubRate; /* 25h */
U8 ResyncRate; /* 26h */
U8 InactiveStatus; /* 27h */
- RAID_VOL0_PHYS_DISK PhysDisk[MPI_RAID_VOL_PAGE_0_PHYSDISK_MAX];/* 28h */
+ RAID_VOL0_PHYS_DISK PhysDisk[] __counted_by(NumPhysDisks); /* 28h */
} CONFIG_PAGE_RAID_VOL_0, MPI_POINTER PTR_CONFIG_PAGE_RAID_VOL_0,
RaidVolumePage0_t, MPI_POINTER pRaidVolumePage0_t;
@@ -2455,14 +2423,6 @@ typedef struct _RAID_PHYS_DISK1_PATH
#define MPI_RAID_PHYSDISK1_FLAG_INVALID (0x0001)
-/*
- * Host code (drivers, BIOS, utilities, etc.) should leave this define set to
- * one and check Header.PageLength or NumPhysDiskPaths at runtime.
- */
-#ifndef MPI_RAID_PHYS_DISK1_PATH_MAX
-#define MPI_RAID_PHYS_DISK1_PATH_MAX (1)
-#endif
-
typedef struct _CONFIG_PAGE_RAID_PHYS_DISK_1
{
CONFIG_PAGE_HEADER Header; /* 00h */
@@ -2470,7 +2430,7 @@ typedef struct _CONFIG_PAGE_RAID_PHYS_DISK_1
U8 PhysDiskNum; /* 05h */
U16 Reserved2; /* 06h */
U32 Reserved1; /* 08h */
- RAID_PHYS_DISK1_PATH Path[MPI_RAID_PHYS_DISK1_PATH_MAX];/* 0Ch */
+ RAID_PHYS_DISK1_PATH Path[] __counted_by(NumPhysDiskPaths);/* 0Ch */
} CONFIG_PAGE_RAID_PHYS_DISK_1, MPI_POINTER PTR_CONFIG_PAGE_RAID_PHYS_DISK_1,
RaidPhysDiskPage1_t, MPI_POINTER pRaidPhysDiskPage1_t;
@@ -2555,14 +2515,6 @@ typedef struct _MPI_SAS_IO_UNIT0_PHY_DATA
} MPI_SAS_IO_UNIT0_PHY_DATA, MPI_POINTER PTR_MPI_SAS_IO_UNIT0_PHY_DATA,
SasIOUnit0PhyData, MPI_POINTER pSasIOUnit0PhyData;
-/*
- * Host code (drivers, BIOS, utilities, etc.) should leave this define set to
- * one and check Header.PageLength at runtime.
- */
-#ifndef MPI_SAS_IOUNIT0_PHY_MAX
-#define MPI_SAS_IOUNIT0_PHY_MAX (1)
-#endif
-
typedef struct _CONFIG_PAGE_SAS_IO_UNIT_0
{
CONFIG_EXTENDED_PAGE_HEADER Header; /* 00h */
@@ -2571,7 +2523,7 @@ typedef struct _CONFIG_PAGE_SAS_IO_UNIT_0
U8 NumPhys; /* 0Ch */
U8 Reserved2; /* 0Dh */
U16 Reserved3; /* 0Eh */
- MPI_SAS_IO_UNIT0_PHY_DATA PhyData[MPI_SAS_IOUNIT0_PHY_MAX]; /* 10h */
+ MPI_SAS_IO_UNIT0_PHY_DATA PhyData[] __counted_by(NumPhys); /* 10h */
} CONFIG_PAGE_SAS_IO_UNIT_0, MPI_POINTER PTR_CONFIG_PAGE_SAS_IO_UNIT_0,
SasIOUnitPage0_t, MPI_POINTER pSasIOUnitPage0_t;
diff --git a/drivers/message/fusion/mptbase.c b/drivers/message/fusion/mptbase.c
index 4bf669c55649..738bc4e60a18 100644
--- a/drivers/message/fusion/mptbase.c
+++ b/drivers/message/fusion/mptbase.c
@@ -1856,10 +1856,8 @@ mpt_attach(struct pci_dev *pdev, const struct pci_device_id *id)
/* Initialize workqueue */
INIT_DELAYED_WORK(&ioc->fault_reset_work, mpt_fault_reset_work);
- snprintf(ioc->reset_work_q_name, MPT_KOBJ_NAME_LEN,
- "mpt_poll_%d", ioc->id);
- ioc->reset_work_q = alloc_workqueue(ioc->reset_work_q_name,
- WQ_MEM_RECLAIM, 0);
+ ioc->reset_work_q =
+ alloc_workqueue("mpt_poll_%d", WQ_MEM_RECLAIM, 0, ioc->id);
if (!ioc->reset_work_q) {
printk(MYIOC_s_ERR_FMT "Insufficient memory to add adapter!\n",
ioc->name);
@@ -1986,9 +1984,7 @@ mpt_attach(struct pci_dev *pdev, const struct pci_device_id *id)
INIT_LIST_HEAD(&ioc->fw_event_list);
spin_lock_init(&ioc->fw_event_lock);
- snprintf(ioc->fw_event_q_name, MPT_KOBJ_NAME_LEN, "mpt/%d", ioc->id);
- ioc->fw_event_q = alloc_workqueue(ioc->fw_event_q_name,
- WQ_MEM_RECLAIM, 0);
+ ioc->fw_event_q = alloc_workqueue("mpt/%d", WQ_MEM_RECLAIM, 0, ioc->id);
if (!ioc->fw_event_q) {
printk(MYIOC_s_ERR_FMT "Insufficient memory to add adapter!\n",
ioc->name);
diff --git a/drivers/message/fusion/mptbase.h b/drivers/message/fusion/mptbase.h
index 8031173c3655..b406fd676da0 100644
--- a/drivers/message/fusion/mptbase.h
+++ b/drivers/message/fusion/mptbase.h
@@ -729,7 +729,6 @@ typedef struct _MPT_ADAPTER
struct list_head fw_event_list;
spinlock_t fw_event_lock;
u8 fw_events_off; /* if '1', then ignore events */
- char fw_event_q_name[MPT_KOBJ_NAME_LEN];
struct mutex sas_discovery_mutex;
u8 sas_discovery_runtime;
@@ -764,7 +763,6 @@ typedef struct _MPT_ADAPTER
u8 fc_link_speed[2];
spinlock_t fc_rescan_work_lock;
struct work_struct fc_rescan_work;
- char fc_rescan_work_q_name[MPT_KOBJ_NAME_LEN];
struct workqueue_struct *fc_rescan_work_q;
/* driver forced bus resets count */
@@ -778,7 +776,6 @@ typedef struct _MPT_ADAPTER
spinlock_t scsi_lookup_lock;
u64 dma_mask;
u32 broadcast_aen_busy;
- char reset_work_q_name[MPT_KOBJ_NAME_LEN];
struct workqueue_struct *reset_work_q;
struct delayed_work fault_reset_work;
diff --git a/drivers/message/fusion/mptctl.c b/drivers/message/fusion/mptctl.c
index 9f3999750c23..77fa55df70d0 100644
--- a/drivers/message/fusion/mptctl.c
+++ b/drivers/message/fusion/mptctl.c
@@ -1609,7 +1609,7 @@ mptctl_eventreport (MPT_ADAPTER *ioc, unsigned long arg)
maxEvents = numBytes/sizeof(MPT_IOCTL_EVENTS);
- max = MPTCTL_EVENT_LOG_SIZE < maxEvents ? MPTCTL_EVENT_LOG_SIZE : maxEvents;
+ max = min(maxEvents, MPTCTL_EVENT_LOG_SIZE);
/* If fewer than 1 event is requested, there must have
* been some type of error.
@@ -2691,7 +2691,6 @@ mptctl_hp_targetinfo(MPT_ADAPTER *ioc, unsigned long arg)
static const struct file_operations mptctl_fops = {
.owner = THIS_MODULE,
- .llseek = no_llseek,
.fasync = mptctl_fasync,
.unlocked_ioctl = mptctl_ioctl,
#ifdef CONFIG_COMPAT
diff --git a/drivers/message/fusion/mptfc.c b/drivers/message/fusion/mptfc.c
index a3c17c4fe69c..91242f26defb 100644
--- a/drivers/message/fusion/mptfc.c
+++ b/drivers/message/fusion/mptfc.c
@@ -1349,11 +1349,8 @@ mptfc_probe(struct pci_dev *pdev, const struct pci_device_id *id)
/* initialize workqueue */
- snprintf(ioc->fc_rescan_work_q_name, sizeof(ioc->fc_rescan_work_q_name),
- "mptfc_wq_%d", sh->host_no);
- ioc->fc_rescan_work_q =
- alloc_ordered_workqueue(ioc->fc_rescan_work_q_name,
- WQ_MEM_RECLAIM);
+ ioc->fc_rescan_work_q = alloc_ordered_workqueue(
+ "mptfc_wq_%d", WQ_MEM_RECLAIM, sh->host_no);
if (!ioc->fc_rescan_work_q) {
error = -ENOMEM;
goto out_mptfc_host;
diff --git a/drivers/mfd/88pm800.c b/drivers/mfd/88pm800.c
index 384ecf5301d2..e9941da58b18 100644
--- a/drivers/mfd/88pm800.c
+++ b/drivers/mfd/88pm800.c
@@ -391,7 +391,7 @@ static void device_irq_exit_800(struct pm80x_chip *chip)
regmap_del_irq_chip(chip->irq, chip->irq_data);
}
-static struct regmap_irq_chip pm800_irq_chip = {
+static const struct regmap_irq_chip pm800_irq_chip = {
.name = "88pm800",
.irqs = pm800_irqs,
.num_irqs = ARRAY_SIZE(pm800_irqs),
diff --git a/drivers/mfd/88pm805.c b/drivers/mfd/88pm805.c
index 205f0762a928..f5d6663172ee 100644
--- a/drivers/mfd/88pm805.c
+++ b/drivers/mfd/88pm805.c
@@ -73,7 +73,7 @@ static const struct mfd_cell codec_devs[] = {
},
};
-static struct regmap_irq pm805_irqs[] = {
+static const struct regmap_irq pm805_irqs[] = {
/* INT0 */
[PM805_IRQ_LDO_OFF] = {
.mask = PM805_INT1_HP1_SHRT,
@@ -163,7 +163,7 @@ static void device_irq_exit_805(struct pm80x_chip *chip)
regmap_del_irq_chip(chip->irq, chip->irq_data);
}
-static struct regmap_irq_chip pm805_irq_chip = {
+static const struct regmap_irq_chip pm805_irq_chip = {
.name = "88pm805",
.irqs = pm805_irqs,
.num_irqs = ARRAY_SIZE(pm805_irqs),
diff --git a/drivers/mfd/88pm860x-core.c b/drivers/mfd/88pm860x-core.c
index 7f003f71e1af..8e68b64bd7f8 100644
--- a/drivers/mfd/88pm860x-core.c
+++ b/drivers/mfd/88pm860x-core.c
@@ -916,7 +916,7 @@ static void device_power_init(struct pm860x_chip *chip,
power_devs[0].platform_data = pdata->power;
power_devs[0].pdata_size = sizeof(struct pm860x_power_pdata);
power_devs[0].num_resources = ARRAY_SIZE(battery_resources);
- power_devs[0].resources = &battery_resources[0],
+ power_devs[0].resources = &battery_resources[0];
ret = mfd_add_devices(chip->dev, 0, &power_devs[0], 1,
&battery_resources[0], chip->irq_base, NULL);
if (ret < 0)
@@ -925,7 +925,7 @@ static void device_power_init(struct pm860x_chip *chip,
power_devs[1].platform_data = pdata->power;
power_devs[1].pdata_size = sizeof(struct pm860x_power_pdata);
power_devs[1].num_resources = ARRAY_SIZE(charger_resources);
- power_devs[1].resources = &charger_resources[0],
+ power_devs[1].resources = &charger_resources[0];
ret = mfd_add_devices(chip->dev, 0, &power_devs[1], 1,
&charger_resources[0], chip->irq_base, NULL);
if (ret < 0)
@@ -942,7 +942,7 @@ static void device_power_init(struct pm860x_chip *chip,
pdata->chg_desc->charger_regulators =
&chg_desc_regulator_data[0];
pdata->chg_desc->num_charger_regulators =
- ARRAY_SIZE(chg_desc_regulator_data),
+ ARRAY_SIZE(chg_desc_regulator_data);
power_devs[3].platform_data = pdata->chg_desc;
power_devs[3].pdata_size = sizeof(*pdata->chg_desc);
ret = mfd_add_devices(chip->dev, 0, &power_devs[3], 1,
@@ -958,7 +958,7 @@ static void device_onkey_init(struct pm860x_chip *chip,
int ret;
onkey_devs[0].num_resources = ARRAY_SIZE(onkey_resources);
- onkey_devs[0].resources = &onkey_resources[0],
+ onkey_devs[0].resources = &onkey_resources[0];
ret = mfd_add_devices(chip->dev, 0, &onkey_devs[0],
ARRAY_SIZE(onkey_devs), &onkey_resources[0],
chip->irq_base, NULL);
@@ -972,7 +972,7 @@ static void device_codec_init(struct pm860x_chip *chip,
int ret;
codec_devs[0].num_resources = ARRAY_SIZE(codec_resources);
- codec_devs[0].resources = &codec_resources[0],
+ codec_devs[0].resources = &codec_resources[0];
ret = mfd_add_devices(chip->dev, 0, &codec_devs[0],
ARRAY_SIZE(codec_devs), &codec_resources[0], 0,
NULL);
diff --git a/drivers/mfd/atc260x-core.c b/drivers/mfd/atc260x-core.c
index 67473b58b03d..6b6d5f1b9d76 100644
--- a/drivers/mfd/atc260x-core.c
+++ b/drivers/mfd/atc260x-core.c
@@ -235,8 +235,8 @@ int atc260x_match_device(struct atc260x *atc260x, struct regmap_config *regmap_c
mutex_init(atc260x->regmap_mutex);
- regmap_cfg->lock = regmap_lock_mutex,
- regmap_cfg->unlock = regmap_unlock_mutex,
+ regmap_cfg->lock = regmap_lock_mutex;
+ regmap_cfg->unlock = regmap_unlock_mutex;
regmap_cfg->lock_arg = atc260x->regmap_mutex;
return 0;
diff --git a/drivers/mfd/bd9571mwv.c b/drivers/mfd/bd9571mwv.c
index e7c2ac74d998..db8c2963fb48 100644
--- a/drivers/mfd/bd9571mwv.c
+++ b/drivers/mfd/bd9571mwv.c
@@ -93,7 +93,7 @@ static const struct regmap_irq bd9571mwv_irqs[] = {
BD9571MWV_INT_INTREQ_BKUP_TRG_INT),
};
-static struct regmap_irq_chip bd9571mwv_irq_chip = {
+static const struct regmap_irq_chip bd9571mwv_irq_chip = {
.name = "bd9571mwv",
.status_base = BD9571MWV_INT_INTREQ,
.mask_base = BD9571MWV_INT_INTMASK,
@@ -159,7 +159,7 @@ static const struct regmap_config bd9574mwf_regmap_config = {
.max_register = 0xff,
};
-static struct regmap_irq_chip bd9574mwf_irq_chip = {
+static const struct regmap_irq_chip bd9574mwf_irq_chip = {
.name = "bd9574mwf",
.status_base = BD9571MWV_INT_INTREQ,
.mask_base = BD9571MWV_INT_INTMASK,
diff --git a/drivers/mfd/cros_ec_dev.c b/drivers/mfd/cros_ec_dev.c
index e2aae8918679..f3dc812b359f 100644
--- a/drivers/mfd/cros_ec_dev.c
+++ b/drivers/mfd/cros_ec_dev.c
@@ -1,6 +1,6 @@
// SPDX-License-Identifier: GPL-2.0-or-later
/*
- * cros_ec_dev - expose the Chrome OS Embedded Controller to user-space
+ * ChromeOS Embedded Controller
*
* Copyright (C) 2014 Google, Inc.
*/
@@ -353,22 +353,17 @@ static int __init cros_ec_dev_init(void)
{
int ret;
- ret = class_register(&cros_class);
+ ret = class_register(&cros_class);
if (ret) {
pr_err(CROS_EC_DEV_NAME ": failed to register device class\n");
return ret;
}
- /* Register the driver */
ret = platform_driver_register(&cros_ec_dev_driver);
- if (ret < 0) {
+ if (ret) {
pr_warn(CROS_EC_DEV_NAME ": can't register driver: %d\n", ret);
- goto failed_devreg;
+ class_unregister(&cros_class);
}
- return 0;
-
-failed_devreg:
- class_unregister(&cros_class);
return ret;
}
@@ -382,6 +377,6 @@ module_init(cros_ec_dev_init);
module_exit(cros_ec_dev_exit);
MODULE_AUTHOR("Bill Richardson <wfrichar@chromium.org>");
-MODULE_DESCRIPTION("Userspace interface to the Chrome OS Embedded Controller");
+MODULE_DESCRIPTION("ChromeOS Embedded Controller");
MODULE_VERSION("1.0");
MODULE_LICENSE("GPL");
diff --git a/drivers/mfd/da9062-core.c b/drivers/mfd/da9062-core.c
index dbbc4779170a..637c5f47a4b0 100644
--- a/drivers/mfd/da9062-core.c
+++ b/drivers/mfd/da9062-core.c
@@ -25,7 +25,7 @@
#define DA9062_IRQ_LOW 0
#define DA9062_IRQ_HIGH 1
-static struct regmap_irq da9061_irqs[] = {
+static const struct regmap_irq da9061_irqs[] = {
/* EVENT A */
[DA9061_IRQ_ONKEY] = {
.reg_offset = DA9062_REG_EVENT_A_OFFSET,
@@ -79,7 +79,7 @@ static struct regmap_irq da9061_irqs[] = {
},
};
-static struct regmap_irq_chip da9061_irq_chip = {
+static const struct regmap_irq_chip da9061_irq_chip = {
.name = "da9061-irq",
.irqs = da9061_irqs,
.num_irqs = DA9061_NUM_IRQ,
@@ -89,7 +89,7 @@ static struct regmap_irq_chip da9061_irq_chip = {
.ack_base = DA9062AA_EVENT_A,
};
-static struct regmap_irq da9062_irqs[] = {
+static const struct regmap_irq da9062_irqs[] = {
/* EVENT A */
[DA9062_IRQ_ONKEY] = {
.reg_offset = DA9062_REG_EVENT_A_OFFSET,
@@ -151,7 +151,7 @@ static struct regmap_irq da9062_irqs[] = {
},
};
-static struct regmap_irq_chip da9062_irq_chip = {
+static const struct regmap_irq_chip da9062_irq_chip = {
.name = "da9062-irq",
.irqs = da9062_irqs,
.num_irqs = DA9062_NUM_IRQ,
@@ -470,7 +470,7 @@ static const struct regmap_range_cfg da9061_range_cfg[] = {
}
};
-static struct regmap_config da9061_regmap_config = {
+static const struct regmap_config da9061_regmap_config = {
.reg_bits = 8,
.val_bits = 8,
.ranges = da9061_range_cfg,
@@ -576,7 +576,7 @@ static const struct regmap_range_cfg da9062_range_cfg[] = {
}
};
-static struct regmap_config da9062_regmap_config = {
+static const struct regmap_config da9062_regmap_config = {
.reg_bits = 8,
.val_bits = 8,
.ranges = da9062_range_cfg,
diff --git a/drivers/mfd/fsl-imx25-tsadc.c b/drivers/mfd/fsl-imx25-tsadc.c
index 74f38bf3778f..2e4ab2404154 100644
--- a/drivers/mfd/fsl-imx25-tsadc.c
+++ b/drivers/mfd/fsl-imx25-tsadc.c
@@ -16,7 +16,7 @@
#include <linux/platform_device.h>
#include <linux/regmap.h>
-static struct regmap_config mx25_tsadc_regmap_config = {
+static const struct regmap_config mx25_tsadc_regmap_config = {
.fast_io = true,
.max_register = 8,
.reg_bits = 32,
diff --git a/drivers/mfd/gateworks-gsc.c b/drivers/mfd/gateworks-gsc.c
index b02bfdc871e9..6ca867b8f5f1 100644
--- a/drivers/mfd/gateworks-gsc.c
+++ b/drivers/mfd/gateworks-gsc.c
@@ -160,7 +160,7 @@ static const struct of_device_id gsc_of_match[] = {
};
MODULE_DEVICE_TABLE(of, gsc_of_match);
-static struct regmap_bus gsc_regmap_bus = {
+static const struct regmap_bus gsc_regmap_bus = {
.reg_read = gsc_read,
.reg_write = gsc_write,
};
diff --git a/drivers/mfd/hi655x-pmic.c b/drivers/mfd/hi655x-pmic.c
index 042109304db4..5f61909c85e9 100644
--- a/drivers/mfd/hi655x-pmic.c
+++ b/drivers/mfd/hi655x-pmic.c
@@ -41,7 +41,7 @@ static const struct regmap_irq_chip hi655x_irq_chip = {
.mask_base = HI655X_IRQ_MASK_BASE,
};
-static struct regmap_config hi655x_regmap_config = {
+static const struct regmap_config hi655x_regmap_config = {
.reg_bits = 32,
.reg_stride = HI655X_STRIDE,
.val_bits = 8,
diff --git a/drivers/mfd/intel-lpss-pci.c b/drivers/mfd/intel-lpss-pci.c
index 1362b3f64ade..1d8cdc4d5819 100644
--- a/drivers/mfd/intel-lpss-pci.c
+++ b/drivers/mfd/intel-lpss-pci.c
@@ -424,6 +424,19 @@ static const struct pci_device_id intel_lpss_pci_ids[] = {
{ PCI_VDEVICE(INTEL, 0x5ac4), (kernel_ulong_t)&bxt_spi_info },
{ PCI_VDEVICE(INTEL, 0x5ac6), (kernel_ulong_t)&bxt_spi_info },
{ PCI_VDEVICE(INTEL, 0x5aee), (kernel_ulong_t)&bxt_uart_info },
+ /* ARL-H */
+ { PCI_VDEVICE(INTEL, 0x7725), (kernel_ulong_t)&bxt_uart_info },
+ { PCI_VDEVICE(INTEL, 0x7726), (kernel_ulong_t)&bxt_uart_info },
+ { PCI_VDEVICE(INTEL, 0x7727), (kernel_ulong_t)&tgl_spi_info },
+ { PCI_VDEVICE(INTEL, 0x7730), (kernel_ulong_t)&tgl_spi_info },
+ { PCI_VDEVICE(INTEL, 0x7746), (kernel_ulong_t)&tgl_spi_info },
+ { PCI_VDEVICE(INTEL, 0x7750), (kernel_ulong_t)&bxt_i2c_info },
+ { PCI_VDEVICE(INTEL, 0x7751), (kernel_ulong_t)&bxt_i2c_info },
+ { PCI_VDEVICE(INTEL, 0x7752), (kernel_ulong_t)&bxt_uart_info },
+ { PCI_VDEVICE(INTEL, 0x7778), (kernel_ulong_t)&bxt_i2c_info },
+ { PCI_VDEVICE(INTEL, 0x7779), (kernel_ulong_t)&bxt_i2c_info },
+ { PCI_VDEVICE(INTEL, 0x777a), (kernel_ulong_t)&bxt_i2c_info },
+ { PCI_VDEVICE(INTEL, 0x777b), (kernel_ulong_t)&bxt_i2c_info },
/* RPL-S */
{ PCI_VDEVICE(INTEL, 0x7a28), (kernel_ulong_t)&bxt_uart_info },
{ PCI_VDEVICE(INTEL, 0x7a29), (kernel_ulong_t)&bxt_uart_info },
@@ -594,6 +607,32 @@ static const struct pci_device_id intel_lpss_pci_ids[] = {
{ PCI_VDEVICE(INTEL, 0xa879), (kernel_ulong_t)&ehl_i2c_info },
{ PCI_VDEVICE(INTEL, 0xa87a), (kernel_ulong_t)&ehl_i2c_info },
{ PCI_VDEVICE(INTEL, 0xa87b), (kernel_ulong_t)&ehl_i2c_info },
+ /* PTL-H */
+ { PCI_VDEVICE(INTEL, 0xe325), (kernel_ulong_t)&bxt_uart_info },
+ { PCI_VDEVICE(INTEL, 0xe326), (kernel_ulong_t)&bxt_uart_info },
+ { PCI_VDEVICE(INTEL, 0xe327), (kernel_ulong_t)&tgl_spi_info },
+ { PCI_VDEVICE(INTEL, 0xe330), (kernel_ulong_t)&tgl_spi_info },
+ { PCI_VDEVICE(INTEL, 0xe346), (kernel_ulong_t)&tgl_spi_info },
+ { PCI_VDEVICE(INTEL, 0xe350), (kernel_ulong_t)&ehl_i2c_info },
+ { PCI_VDEVICE(INTEL, 0xe351), (kernel_ulong_t)&ehl_i2c_info },
+ { PCI_VDEVICE(INTEL, 0xe352), (kernel_ulong_t)&bxt_uart_info },
+ { PCI_VDEVICE(INTEL, 0xe378), (kernel_ulong_t)&ehl_i2c_info },
+ { PCI_VDEVICE(INTEL, 0xe379), (kernel_ulong_t)&ehl_i2c_info },
+ { PCI_VDEVICE(INTEL, 0xe37a), (kernel_ulong_t)&ehl_i2c_info },
+ { PCI_VDEVICE(INTEL, 0xe37b), (kernel_ulong_t)&ehl_i2c_info },
+ /* PTL-P */
+ { PCI_VDEVICE(INTEL, 0xe425), (kernel_ulong_t)&bxt_uart_info },
+ { PCI_VDEVICE(INTEL, 0xe426), (kernel_ulong_t)&bxt_uart_info },
+ { PCI_VDEVICE(INTEL, 0xe427), (kernel_ulong_t)&tgl_spi_info },
+ { PCI_VDEVICE(INTEL, 0xe430), (kernel_ulong_t)&tgl_spi_info },
+ { PCI_VDEVICE(INTEL, 0xe446), (kernel_ulong_t)&tgl_spi_info },
+ { PCI_VDEVICE(INTEL, 0xe450), (kernel_ulong_t)&ehl_i2c_info },
+ { PCI_VDEVICE(INTEL, 0xe451), (kernel_ulong_t)&ehl_i2c_info },
+ { PCI_VDEVICE(INTEL, 0xe452), (kernel_ulong_t)&bxt_uart_info },
+ { PCI_VDEVICE(INTEL, 0xe478), (kernel_ulong_t)&ehl_i2c_info },
+ { PCI_VDEVICE(INTEL, 0xe479), (kernel_ulong_t)&ehl_i2c_info },
+ { PCI_VDEVICE(INTEL, 0xe47a), (kernel_ulong_t)&ehl_i2c_info },
+ { PCI_VDEVICE(INTEL, 0xe47b), (kernel_ulong_t)&ehl_i2c_info },
{ }
};
MODULE_DEVICE_TABLE(pci, intel_lpss_pci_ids);
diff --git a/drivers/mfd/intel-m10-bmc-pmci.c b/drivers/mfd/intel-m10-bmc-pmci.c
index 698c5933938b..4fa9d380c62b 100644
--- a/drivers/mfd/intel-m10-bmc-pmci.c
+++ b/drivers/mfd/intel-m10-bmc-pmci.c
@@ -336,7 +336,7 @@ static const struct regmap_access_table m10bmc_pmci_access_table = {
.n_yes_ranges = ARRAY_SIZE(m10bmc_pmci_regmap_range),
};
-static struct regmap_config m10bmc_pmci_regmap_config = {
+static const struct regmap_config m10bmc_pmci_regmap_config = {
.reg_bits = 32,
.reg_stride = 4,
.val_bits = 32,
diff --git a/drivers/mfd/intel-m10-bmc-spi.c b/drivers/mfd/intel-m10-bmc-spi.c
index d64d28199df6..36f631ef7a67 100644
--- a/drivers/mfd/intel-m10-bmc-spi.c
+++ b/drivers/mfd/intel-m10-bmc-spi.c
@@ -24,7 +24,7 @@ static const struct regmap_access_table m10bmc_access_table = {
.n_yes_ranges = ARRAY_SIZE(m10bmc_regmap_range),
};
-static struct regmap_config intel_m10bmc_regmap_config = {
+static const struct regmap_config intel_m10bmc_regmap_config = {
.reg_bits = 32,
.val_bits = 32,
.reg_stride = 4,
diff --git a/drivers/mfd/intel_soc_pmic_bxtwc.c b/drivers/mfd/intel_soc_pmic_bxtwc.c
index ab3c94224dd1..ccd76800d8e4 100644
--- a/drivers/mfd/intel_soc_pmic_bxtwc.c
+++ b/drivers/mfd/intel_soc_pmic_bxtwc.c
@@ -137,7 +137,7 @@ static const struct regmap_irq bxtwc_regmap_irqs_crit[] = {
REGMAP_IRQ_REG(BXTWC_CRIT_IRQ, 0, GENMASK(1, 0)),
};
-static struct regmap_irq_chip bxtwc_regmap_irq_chip = {
+static const struct regmap_irq_chip bxtwc_regmap_irq_chip = {
.name = "bxtwc_irq_chip",
.status_base = BXTWC_IRQLVL1,
.mask_base = BXTWC_MIRQLVL1,
@@ -146,7 +146,7 @@ static struct regmap_irq_chip bxtwc_regmap_irq_chip = {
.num_regs = 1,
};
-static struct regmap_irq_chip bxtwc_regmap_irq_chip_pwrbtn = {
+static const struct regmap_irq_chip bxtwc_regmap_irq_chip_pwrbtn = {
.name = "bxtwc_irq_chip_pwrbtn",
.status_base = BXTWC_PWRBTNIRQ,
.mask_base = BXTWC_MPWRBTNIRQ,
@@ -155,7 +155,7 @@ static struct regmap_irq_chip bxtwc_regmap_irq_chip_pwrbtn = {
.num_regs = 1,
};
-static struct regmap_irq_chip bxtwc_regmap_irq_chip_tmu = {
+static const struct regmap_irq_chip bxtwc_regmap_irq_chip_tmu = {
.name = "bxtwc_irq_chip_tmu",
.status_base = BXTWC_TMUIRQ,
.mask_base = BXTWC_MTMUIRQ,
@@ -164,7 +164,7 @@ static struct regmap_irq_chip bxtwc_regmap_irq_chip_tmu = {
.num_regs = 1,
};
-static struct regmap_irq_chip bxtwc_regmap_irq_chip_bcu = {
+static const struct regmap_irq_chip bxtwc_regmap_irq_chip_bcu = {
.name = "bxtwc_irq_chip_bcu",
.status_base = BXTWC_BCUIRQ,
.mask_base = BXTWC_MBCUIRQ,
@@ -173,7 +173,7 @@ static struct regmap_irq_chip bxtwc_regmap_irq_chip_bcu = {
.num_regs = 1,
};
-static struct regmap_irq_chip bxtwc_regmap_irq_chip_adc = {
+static const struct regmap_irq_chip bxtwc_regmap_irq_chip_adc = {
.name = "bxtwc_irq_chip_adc",
.status_base = BXTWC_ADCIRQ,
.mask_base = BXTWC_MADCIRQ,
@@ -182,7 +182,7 @@ static struct regmap_irq_chip bxtwc_regmap_irq_chip_adc = {
.num_regs = 1,
};
-static struct regmap_irq_chip bxtwc_regmap_irq_chip_chgr = {
+static const struct regmap_irq_chip bxtwc_regmap_irq_chip_chgr = {
.name = "bxtwc_irq_chip_chgr",
.status_base = BXTWC_CHGR0IRQ,
.mask_base = BXTWC_MCHGR0IRQ,
@@ -191,7 +191,7 @@ static struct regmap_irq_chip bxtwc_regmap_irq_chip_chgr = {
.num_regs = 2,
};
-static struct regmap_irq_chip bxtwc_regmap_irq_chip_crit = {
+static const struct regmap_irq_chip bxtwc_regmap_irq_chip_crit = {
.name = "bxtwc_irq_chip_crit",
.status_base = BXTWC_CRITIRQ,
.mask_base = BXTWC_MCRITIRQ,
diff --git a/drivers/mfd/intel_soc_pmic_chtwc.c b/drivers/mfd/intel_soc_pmic_chtwc.c
index 7fce3ef7ab45..2a83f540d4c9 100644
--- a/drivers/mfd/intel_soc_pmic_chtwc.c
+++ b/drivers/mfd/intel_soc_pmic_chtwc.c
@@ -178,7 +178,6 @@ static const struct dmi_system_id cht_wc_model_dmi_ids[] = {
.driver_data = (void *)(long)INTEL_CHT_WC_LENOVO_YT3_X90,
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "Intel Corporation"),
- DMI_MATCH(DMI_PRODUCT_NAME, "CHERRYVIEW D1 PLATFORM"),
DMI_MATCH(DMI_PRODUCT_VERSION, "Blade3-10A-001"),
},
},
diff --git a/drivers/mfd/max14577.c b/drivers/mfd/max14577.c
index 67bf4de4c0c1..6fce79ec2dc6 100644
--- a/drivers/mfd/max14577.c
+++ b/drivers/mfd/max14577.c
@@ -143,6 +143,7 @@ static const struct of_device_id max14577_dt_match[] = {
},
{},
};
+MODULE_DEVICE_TABLE(of, max14577_dt_match);
static bool max14577_muic_volatile_reg(struct device *dev, unsigned int reg)
{
diff --git a/drivers/mfd/max77620.c b/drivers/mfd/max77620.c
index 74ef3f6d576c..89b30ef91f4f 100644
--- a/drivers/mfd/max77620.c
+++ b/drivers/mfd/max77620.c
@@ -400,7 +400,7 @@ static int max77620_config_fps(struct max77620_chip *chip,
static int max77620_initialise_fps(struct max77620_chip *chip)
{
struct device *dev = chip->dev;
- struct device_node *fps_np, *fps_child;
+ struct device_node *fps_np;
u8 config;
int fps_id;
int ret;
@@ -414,10 +414,9 @@ static int max77620_initialise_fps(struct max77620_chip *chip)
if (!fps_np)
goto skip_fps;
- for_each_child_of_node(fps_np, fps_child) {
+ for_each_child_of_node_scoped(fps_np, fps_child) {
ret = max77620_config_fps(chip, fps_child);
if (ret < 0) {
- of_node_put(fps_child);
of_node_put(fps_np);
return ret;
}
diff --git a/drivers/mfd/mc13xxx-spi.c b/drivers/mfd/mc13xxx-spi.c
index c973e2579bdf..9f438d5d4326 100644
--- a/drivers/mfd/mc13xxx-spi.c
+++ b/drivers/mfd/mc13xxx-spi.c
@@ -116,7 +116,7 @@ static int mc13xxx_spi_write(void *context, const void *data, size_t count)
* single transfer.
*/
-static struct regmap_bus regmap_mc13xxx_bus = {
+static const struct regmap_bus regmap_mc13xxx_bus = {
.write = mc13xxx_spi_write,
.read = mc13xxx_spi_read,
};
diff --git a/drivers/mfd/mt6360-core.c b/drivers/mfd/mt6360-core.c
index 2685efa5c9e2..b9b1036c8ff4 100644
--- a/drivers/mfd/mt6360-core.c
+++ b/drivers/mfd/mt6360-core.c
@@ -5,6 +5,7 @@
* Author: Gene Chen <gene_chen@richtek.com>
*/
+#include <linux/cleanup.h>
#include <linux/crc8.h>
#include <linux/i2c.h>
#include <linux/init.h>
@@ -404,7 +405,6 @@ static int mt6360_regmap_read(void *context, const void *reg, size_t reg_size,
u8 reg_addr = *(u8 *)(reg + 1);
struct i2c_client *i2c;
bool crc_needed = false;
- u8 *buf;
int buf_len = MT6360_ALLOC_READ_SIZE(val_size);
int read_size = val_size;
u8 crc;
@@ -423,7 +423,7 @@ static int mt6360_regmap_read(void *context, const void *reg, size_t reg_size,
read_size += MT6360_CRC_CRC8_SIZE;
}
- buf = kzalloc(buf_len, GFP_KERNEL);
+ u8 *buf __free(kfree) = kzalloc(buf_len, GFP_KERNEL);
if (!buf)
return -ENOMEM;
@@ -433,24 +433,19 @@ static int mt6360_regmap_read(void *context, const void *reg, size_t reg_size,
ret = i2c_smbus_read_i2c_block_data(i2c, reg_addr, read_size,
buf + MT6360_CRC_PREDATA_OFFSET);
if (ret < 0)
- goto out;
- else if (ret != read_size) {
- ret = -EIO;
- goto out;
- }
+ return ret;
+ else if (ret != read_size)
+ return -EIO;
if (crc_needed) {
crc = crc8(ddata->crc8_tbl, buf, val_size + MT6360_CRC_PREDATA_OFFSET, 0);
- if (crc != buf[val_size + MT6360_CRC_PREDATA_OFFSET]) {
- ret = -EIO;
- goto out;
- }
+ if (crc != buf[val_size + MT6360_CRC_PREDATA_OFFSET])
+ return -EIO;
}
memcpy(val, buf + MT6360_CRC_PREDATA_OFFSET, val_size);
-out:
- kfree(buf);
- return (ret < 0) ? ret : 0;
+
+ return 0;
}
static int mt6360_regmap_write(void *context, const void *val, size_t val_size)
diff --git a/drivers/mfd/qcom-spmi-pmic.c b/drivers/mfd/qcom-spmi-pmic.c
index eab5bf6cff10..b4b178caf754 100644
--- a/drivers/mfd/qcom-spmi-pmic.c
+++ b/drivers/mfd/qcom-spmi-pmic.c
@@ -84,7 +84,6 @@ static const struct of_device_id pmic_spmi_id_table[] = {
static struct spmi_device *qcom_pmic_get_base_usid(struct spmi_device *sdev, struct qcom_spmi_dev *ctx)
{
struct device_node *spmi_bus;
- struct device_node *child;
int function_parent_usid, ret;
u32 pmic_addr;
@@ -108,10 +107,9 @@ static struct spmi_device *qcom_pmic_get_base_usid(struct spmi_device *sdev, str
*/
spmi_bus = of_get_parent(sdev->dev.of_node);
sdev = ERR_PTR(-ENODATA);
- for_each_child_of_node(spmi_bus, child) {
+ for_each_child_of_node_scoped(spmi_bus, child) {
ret = of_property_read_u32_index(child, "reg", 0, &pmic_addr);
if (ret) {
- of_node_put(child);
sdev = ERR_PTR(ret);
break;
}
@@ -125,7 +123,6 @@ static struct spmi_device *qcom_pmic_get_base_usid(struct spmi_device *sdev, str
*/
sdev = ERR_PTR(-EPROBE_DEFER);
}
- of_node_put(child);
break;
}
}
diff --git a/drivers/mfd/retu-mfd.c b/drivers/mfd/retu-mfd.c
index 9184e553fafd..1d43458b4938 100644
--- a/drivers/mfd/retu-mfd.c
+++ b/drivers/mfd/retu-mfd.c
@@ -65,13 +65,13 @@ static const struct mfd_cell retu_devs[] = {
}
};
-static struct regmap_irq retu_irqs[] = {
+static const struct regmap_irq retu_irqs[] = {
[RETU_INT_PWR] = {
.mask = 1 << RETU_INT_PWR,
}
};
-static struct regmap_irq_chip retu_irq_chip = {
+static const struct regmap_irq_chip retu_irq_chip = {
.name = "RETU",
.irqs = retu_irqs,
.num_irqs = ARRAY_SIZE(retu_irqs),
@@ -101,13 +101,13 @@ static const struct mfd_cell tahvo_devs[] = {
},
};
-static struct regmap_irq tahvo_irqs[] = {
+static const struct regmap_irq tahvo_irqs[] = {
[TAHVO_INT_VBUS] = {
.mask = 1 << TAHVO_INT_VBUS,
}
};
-static struct regmap_irq_chip tahvo_irq_chip = {
+static const struct regmap_irq_chip tahvo_irq_chip = {
.name = "TAHVO",
.irqs = tahvo_irqs,
.num_irqs = ARRAY_SIZE(tahvo_irqs),
@@ -120,7 +120,7 @@ static struct regmap_irq_chip tahvo_irq_chip = {
static const struct retu_data {
char *chip_name;
char *companion_name;
- struct regmap_irq_chip *irq_chip;
+ const struct regmap_irq_chip *irq_chip;
const struct mfd_cell *children;
int nchildren;
} retu_data[] = {
@@ -216,7 +216,7 @@ static int retu_regmap_write(void *context, const void *data, size_t count)
return i2c_smbus_write_word_data(i2c, reg, val);
}
-static struct regmap_bus retu_bus = {
+static const struct regmap_bus retu_bus = {
.read = retu_regmap_read,
.write = retu_regmap_write,
.val_format_endian_default = REGMAP_ENDIAN_NATIVE,
diff --git a/drivers/mfd/rk8xx-core.c b/drivers/mfd/rk8xx-core.c
index 5eda3c0dbbdf..39ab114ea669 100644
--- a/drivers/mfd/rk8xx-core.c
+++ b/drivers/mfd/rk8xx-core.c
@@ -531,7 +531,7 @@ static const struct regmap_irq rk817_irqs[RK817_IRQ_END] = {
REGMAP_IRQ_REG_LINE(23, 8)
};
-static struct regmap_irq_chip rk805_irq_chip = {
+static const struct regmap_irq_chip rk805_irq_chip = {
.name = "rk805",
.irqs = rk805_irqs,
.num_irqs = ARRAY_SIZE(rk805_irqs),
@@ -542,7 +542,7 @@ static struct regmap_irq_chip rk805_irq_chip = {
.init_ack_masked = true,
};
-static struct regmap_irq_chip rk806_irq_chip = {
+static const struct regmap_irq_chip rk806_irq_chip = {
.name = "rk806",
.irqs = rk806_irqs,
.num_irqs = ARRAY_SIZE(rk806_irqs),
@@ -578,7 +578,7 @@ static const struct regmap_irq_chip rk816_irq_chip = {
.init_ack_masked = true,
};
-static struct regmap_irq_chip rk817_irq_chip = {
+static const struct regmap_irq_chip rk817_irq_chip = {
.name = "rk817",
.irqs = rk817_irqs,
.num_irqs = ARRAY_SIZE(rk817_irqs),
diff --git a/drivers/mfd/rk8xx-i2c.c b/drivers/mfd/rk8xx-i2c.c
index 69a6b297d723..37287b06dab0 100644
--- a/drivers/mfd/rk8xx-i2c.c
+++ b/drivers/mfd/rk8xx-i2c.c
@@ -21,6 +21,17 @@ struct rk8xx_i2c_platform_data {
int variant;
};
+static bool rk806_is_volatile_reg(struct device *dev, unsigned int reg)
+{
+ switch (reg) {
+ case RK806_POWER_EN0 ... RK806_POWER_EN5:
+ case RK806_DVS_START_CTRL ... RK806_INT_MSK1:
+ return true;
+ }
+
+ return false;
+}
+
static bool rk808_is_volatile_reg(struct device *dev, unsigned int reg)
{
/*
@@ -121,6 +132,14 @@ static const struct regmap_config rk805_regmap_config = {
.volatile_reg = rk808_is_volatile_reg,
};
+static const struct regmap_config rk806_regmap_config = {
+ .reg_bits = 8,
+ .val_bits = 8,
+ .max_register = RK806_BUCK_RSERVE_REG5,
+ .cache_type = REGCACHE_MAPLE,
+ .volatile_reg = rk806_is_volatile_reg,
+};
+
static const struct regmap_config rk808_regmap_config = {
.reg_bits = 8,
.val_bits = 8,
@@ -150,6 +169,11 @@ static const struct rk8xx_i2c_platform_data rk805_data = {
.variant = RK805_ID,
};
+static const struct rk8xx_i2c_platform_data rk806_data = {
+ .regmap_cfg = &rk806_regmap_config,
+ .variant = RK806_ID,
+};
+
static const struct rk8xx_i2c_platform_data rk808_data = {
.regmap_cfg = &rk808_regmap_config,
.variant = RK808_ID,
@@ -201,6 +225,7 @@ static SIMPLE_DEV_PM_OPS(rk8xx_i2c_pm_ops, rk8xx_suspend, rk8xx_resume);
static const struct of_device_id rk8xx_i2c_of_match[] = {
{ .compatible = "rockchip,rk805", .data = &rk805_data },
+ { .compatible = "rockchip,rk806", .data = &rk806_data },
{ .compatible = "rockchip,rk808", .data = &rk808_data },
{ .compatible = "rockchip,rk809", .data = &rk809_data },
{ .compatible = "rockchip,rk816", .data = &rk816_data },
diff --git a/drivers/mfd/rohm-bd71828.c b/drivers/mfd/rohm-bd71828.c
index 5b4290f116fc..39f7514aa3d8 100644
--- a/drivers/mfd/rohm-bd71828.c
+++ b/drivers/mfd/rohm-bd71828.c
@@ -316,7 +316,7 @@ static const struct regmap_irq bd71815_irqs[] = {
REGMAP_IRQ_REG(BD71815_INT_RTC2, 11, BD71815_INT_RTC2_MASK),
};
-static struct regmap_irq bd71828_irqs[] = {
+static const struct regmap_irq bd71828_irqs[] = {
REGMAP_IRQ_REG(BD71828_INT_BUCK1_OCP, 0, BD71828_INT_BUCK1_OCP_MASK),
REGMAP_IRQ_REG(BD71828_INT_BUCK2_OCP, 0, BD71828_INT_BUCK2_OCP_MASK),
REGMAP_IRQ_REG(BD71828_INT_BUCK3_OCP, 0, BD71828_INT_BUCK3_OCP_MASK),
@@ -407,7 +407,7 @@ static struct regmap_irq bd71828_irqs[] = {
REGMAP_IRQ_REG(BD71828_INT_RTC2, 11, BD71828_INT_RTC2_MASK),
};
-static struct regmap_irq_chip bd71828_irq_chip = {
+static const struct regmap_irq_chip bd71828_irq_chip = {
.name = "bd71828_irq",
.main_status = BD71828_REG_INT_MAIN,
.irqs = &bd71828_irqs[0],
@@ -423,7 +423,7 @@ static struct regmap_irq_chip bd71828_irq_chip = {
.irq_reg_stride = 1,
};
-static struct regmap_irq_chip bd71815_irq_chip = {
+static const struct regmap_irq_chip bd71815_irq_chip = {
.name = "bd71815_irq",
.main_status = BD71815_REG_INT_STAT,
.irqs = &bd71815_irqs[0],
@@ -491,7 +491,7 @@ static int bd71828_i2c_probe(struct i2c_client *i2c)
int ret;
struct regmap *regmap;
const struct regmap_config *regmap_config;
- struct regmap_irq_chip *irqchip;
+ const struct regmap_irq_chip *irqchip;
unsigned int chip_type;
struct mfd_cell *mfd;
int cells;
diff --git a/drivers/mfd/rohm-bd718x7.c b/drivers/mfd/rohm-bd718x7.c
index 7755a4c073bf..25e494a93d48 100644
--- a/drivers/mfd/rohm-bd718x7.c
+++ b/drivers/mfd/rohm-bd718x7.c
@@ -60,7 +60,7 @@ static const struct regmap_irq bd718xx_irqs[] = {
REGMAP_IRQ_REG(BD718XX_INT_STBY_REQ, 0, BD718XX_INT_STBY_REQ_MASK),
};
-static struct regmap_irq_chip bd718xx_irq_chip = {
+static const struct regmap_irq_chip bd718xx_irq_chip = {
.name = "bd718xx-irq",
.irqs = bd718xx_irqs,
.num_irqs = ARRAY_SIZE(bd718xx_irqs),
diff --git a/drivers/mfd/rohm-bd9576.c b/drivers/mfd/rohm-bd9576.c
index 3a9f61961721..17323ae39803 100644
--- a/drivers/mfd/rohm-bd9576.c
+++ b/drivers/mfd/rohm-bd9576.c
@@ -57,7 +57,7 @@ static const struct regmap_access_table volatile_regs = {
.n_yes_ranges = ARRAY_SIZE(volatile_ranges),
};
-static struct regmap_config bd957x_regmap = {
+static const struct regmap_config bd957x_regmap = {
.reg_bits = 8,
.val_bits = 8,
.volatile_table = &volatile_regs,
@@ -65,7 +65,7 @@ static struct regmap_config bd957x_regmap = {
.cache_type = REGCACHE_MAPLE,
};
-static struct regmap_irq bd9576_irqs[] = {
+static const struct regmap_irq bd9576_irqs[] = {
REGMAP_IRQ_REG(BD9576_INT_THERM, 0, BD957X_MASK_INT_MAIN_THERM),
REGMAP_IRQ_REG(BD9576_INT_OVP, 0, BD957X_MASK_INT_MAIN_OVP),
REGMAP_IRQ_REG(BD9576_INT_SCP, 0, BD957X_MASK_INT_MAIN_SCP),
@@ -76,7 +76,7 @@ static struct regmap_irq bd9576_irqs[] = {
REGMAP_IRQ_REG(BD9576_INT_SYS, 0, BD957X_MASK_INT_MAIN_SYS),
};
-static struct regmap_irq_chip bd9576_irq_chip = {
+static const struct regmap_irq_chip bd9576_irq_chip = {
.name = "bd9576_irq",
.irqs = &bd9576_irqs[0],
.num_irqs = ARRAY_SIZE(bd9576_irqs),
diff --git a/drivers/mfd/sprd-sc27xx-spi.c b/drivers/mfd/sprd-sc27xx-spi.c
index 81e517cdfb27..7186e2108108 100644
--- a/drivers/mfd/sprd-sc27xx-spi.c
+++ b/drivers/mfd/sprd-sc27xx-spi.c
@@ -135,7 +135,7 @@ static int sprd_pmic_spi_read(void *context,
return 0;
}
-static struct regmap_bus sprd_pmic_regmap = {
+static const struct regmap_bus sprd_pmic_regmap = {
.write = sprd_pmic_spi_write,
.read = sprd_pmic_spi_read,
.reg_format_endian_default = REGMAP_ENDIAN_NATIVE,
diff --git a/drivers/mfd/syscon.c b/drivers/mfd/syscon.c
index 33f1e07ab24d..2ce15f60eb10 100644
--- a/drivers/mfd/syscon.c
+++ b/drivers/mfd/syscon.c
@@ -8,6 +8,7 @@
* Author: Dong Aisheng <dong.aisheng@linaro.org>
*/
+#include <linux/cleanup.h>
#include <linux/clk.h>
#include <linux/err.h>
#include <linux/hwspinlock.h>
@@ -45,7 +46,6 @@ static const struct regmap_config syscon_regmap_config = {
static struct syscon *of_syscon_register(struct device_node *np, bool check_res)
{
struct clk *clk;
- struct syscon *syscon;
struct regmap *regmap;
void __iomem *base;
u32 reg_io_width;
@@ -54,20 +54,16 @@ static struct syscon *of_syscon_register(struct device_node *np, bool check_res)
struct resource res;
struct reset_control *reset;
- syscon = kzalloc(sizeof(*syscon), GFP_KERNEL);
+ struct syscon *syscon __free(kfree) = kzalloc(sizeof(*syscon), GFP_KERNEL);
if (!syscon)
return ERR_PTR(-ENOMEM);
- if (of_address_to_resource(np, 0, &res)) {
- ret = -ENOMEM;
- goto err_map;
- }
+ if (of_address_to_resource(np, 0, &res))
+ return ERR_PTR(-ENOMEM);
base = of_iomap(np, 0);
- if (!base) {
- ret = -ENOMEM;
- goto err_map;
- }
+ if (!base)
+ return ERR_PTR(-ENOMEM);
/* Parse the device's DT node for an endianness specification */
if (of_property_read_bool(np, "big-endian"))
@@ -152,7 +148,7 @@ static struct syscon *of_syscon_register(struct device_node *np, bool check_res)
list_add_tail(&syscon->list, &syscon_list);
spin_unlock(&syscon_list_slock);
- return syscon;
+ return_ptr(syscon);
err_reset:
reset_control_put(reset);
@@ -163,8 +159,6 @@ err_clk:
regmap_exit(regmap);
err_regmap:
iounmap(base);
-err_map:
- kfree(syscon);
return ERR_PTR(ret);
}
diff --git a/drivers/mfd/tc3589x.c b/drivers/mfd/tc3589x.c
index db28eb0c8995..ef953ee73145 100644
--- a/drivers/mfd/tc3589x.c
+++ b/drivers/mfd/tc3589x.c
@@ -312,8 +312,6 @@ static int tc3589x_device_init(struct tc3589x *tc3589x)
}
static const struct of_device_id tc3589x_match[] = {
- /* Legacy compatible string */
- { .compatible = "tc3589x", .data = (void *) TC3589X_UNKNOWN },
{ .compatible = "toshiba,tc35890", .data = (void *) TC3589X_TC35890 },
{ .compatible = "toshiba,tc35892", .data = (void *) TC3589X_TC35892 },
{ .compatible = "toshiba,tc35893", .data = (void *) TC3589X_TC35893 },
diff --git a/drivers/mfd/tps6105x.c b/drivers/mfd/tps6105x.c
index 0da1cecb5af6..e2f6858d101e 100644
--- a/drivers/mfd/tps6105x.c
+++ b/drivers/mfd/tps6105x.c
@@ -23,7 +23,7 @@
#include <linux/mfd/core.h>
#include <linux/mfd/tps6105x.h>
-static struct regmap_config tps6105x_regmap_config = {
+static const struct regmap_config tps6105x_regmap_config = {
.reg_bits = 8,
.val_bits = 8,
.max_register = TPS6105X_REG_3,
diff --git a/drivers/mfd/tps65086.c b/drivers/mfd/tps65086.c
index 5ef0a7e0d61d..54832e9321b9 100644
--- a/drivers/mfd/tps65086.c
+++ b/drivers/mfd/tps65086.c
@@ -45,7 +45,7 @@ static const struct regmap_irq tps65086_irqs[] = {
REGMAP_IRQ_REG(TPS65086_IRQ_FAULT, 0, TPS65086_IRQ_FAULT_MASK),
};
-static struct regmap_irq_chip tps65086_irq_chip = {
+static const struct regmap_irq_chip tps65086_irq_chip = {
.name = "tps65086",
.status_base = TPS65086_IRQ,
.mask_base = TPS65086_IRQ_MASK,
diff --git a/drivers/mfd/tps65090.c b/drivers/mfd/tps65090.c
index b82cd484ac85..24f42175a9b4 100644
--- a/drivers/mfd/tps65090.c
+++ b/drivers/mfd/tps65090.c
@@ -120,7 +120,7 @@ static const struct regmap_irq tps65090_irqs[] = {
},
};
-static struct regmap_irq_chip tps65090_irq_chip = {
+static const struct regmap_irq_chip tps65090_irq_chip = {
.name = "tps65090",
.irqs = tps65090_irqs,
.num_irqs = ARRAY_SIZE(tps65090_irqs),
diff --git a/drivers/mfd/tps65218.c b/drivers/mfd/tps65218.c
index 427a2b97f117..4f3e632f726f 100644
--- a/drivers/mfd/tps65218.c
+++ b/drivers/mfd/tps65218.c
@@ -186,7 +186,7 @@ static const struct regmap_irq tps65218_irqs[] = {
},
};
-static struct regmap_irq_chip tps65218_irq_chip = {
+static const struct regmap_irq_chip tps65218_irq_chip = {
.name = "tps65218",
.irqs = tps65218_irqs,
.num_irqs = ARRAY_SIZE(tps65218_irqs),
diff --git a/drivers/mfd/tps65219.c b/drivers/mfd/tps65219.c
index 0e0c42e4fdfc..57ff5cb294a6 100644
--- a/drivers/mfd/tps65219.c
+++ b/drivers/mfd/tps65219.c
@@ -159,7 +159,7 @@ static struct regmap_irq_sub_irq_map tps65219_sub_irq_offsets[] = {
#define TPS65219_REGMAP_IRQ_REG(int_name, register_position) \
REGMAP_IRQ_REG(int_name, register_position, int_name##_MASK)
-static struct regmap_irq tps65219_irqs[] = {
+static const struct regmap_irq tps65219_irqs[] = {
TPS65219_REGMAP_IRQ_REG(TPS65219_INT_LDO3_SCG, TPS65219_REG_INT_LDO_3_4_POS),
TPS65219_REGMAP_IRQ_REG(TPS65219_INT_LDO3_OC, TPS65219_REG_INT_LDO_3_4_POS),
TPS65219_REGMAP_IRQ_REG(TPS65219_INT_LDO3_UV, TPS65219_REG_INT_LDO_3_4_POS),
@@ -211,7 +211,7 @@ static struct regmap_irq tps65219_irqs[] = {
TPS65219_REGMAP_IRQ_REG(TPS65219_INT_PB_RISING_EDGE_DETECT, TPS65219_REG_INT_PB_POS),
};
-static struct regmap_irq_chip tps65219_irq_chip = {
+static const struct regmap_irq_chip tps65219_irq_chip = {
.name = "tps65219_irq",
.main_status = TPS65219_REG_INT_SOURCE,
.num_main_regs = 1,
diff --git a/drivers/mfd/tps65910.c b/drivers/mfd/tps65910.c
index 8fb0384d5a8e..6a7b7a697fb7 100644
--- a/drivers/mfd/tps65910.c
+++ b/drivers/mfd/tps65910.c
@@ -197,7 +197,7 @@ static const struct regmap_irq tps65910_irqs[] = {
},
};
-static struct regmap_irq_chip tps65911_irq_chip = {
+static const struct regmap_irq_chip tps65911_irq_chip = {
.name = "tps65910",
.irqs = tps65911_irqs,
.num_irqs = ARRAY_SIZE(tps65911_irqs),
@@ -208,7 +208,7 @@ static struct regmap_irq_chip tps65911_irq_chip = {
.ack_base = TPS65910_INT_STS,
};
-static struct regmap_irq_chip tps65910_irq_chip = {
+static const struct regmap_irq_chip tps65910_irq_chip = {
.name = "tps65910",
.irqs = tps65910_irqs,
.num_irqs = ARRAY_SIZE(tps65910_irqs),
@@ -223,7 +223,7 @@ static int tps65910_irq_init(struct tps65910 *tps65910, int irq,
struct tps65910_platform_data *pdata)
{
int ret;
- static struct regmap_irq_chip *tps6591x_irqs_chip;
+ static const struct regmap_irq_chip *tps6591x_irqs_chip;
if (!irq) {
dev_warn(tps65910->dev, "No interrupt support, no core IRQ\n");
diff --git a/drivers/mfd/tps65912-core.c b/drivers/mfd/tps65912-core.c
index 87ee6aac3763..a9dcd7f0d9e3 100644
--- a/drivers/mfd/tps65912-core.c
+++ b/drivers/mfd/tps65912-core.c
@@ -57,7 +57,7 @@ static const struct regmap_irq tps65912_irqs[] = {
REGMAP_IRQ_REG(TPS65912_IRQ_PGOOD_LDO10, 3, TPS65912_INT_STS4_PGOOD_LDO10),
};
-static struct regmap_irq_chip tps65912_irq_chip = {
+static const struct regmap_irq_chip tps65912_irq_chip = {
.name = "tps65912",
.irqs = tps65912_irqs,
.num_irqs = ARRAY_SIZE(tps65912_irqs),
diff --git a/drivers/mfd/twl6040.c b/drivers/mfd/twl6040.c
index c184e8bfab7c..218d6195fad2 100644
--- a/drivers/mfd/twl6040.c
+++ b/drivers/mfd/twl6040.c
@@ -620,7 +620,7 @@ static const struct regmap_irq twl6040_irqs[] = {
{ .reg_offset = 0, .mask = TWL6040_READYINT, },
};
-static struct regmap_irq_chip twl6040_irq_chip = {
+static const struct regmap_irq_chip twl6040_irq_chip = {
.name = "twl6040",
.irqs = twl6040_irqs,
.num_irqs = ARRAY_SIZE(twl6040_irqs),
diff --git a/drivers/mfd/wcd934x.c b/drivers/mfd/wcd934x.c
index 7b9873b72c37..fcd182d51981 100644
--- a/drivers/mfd/wcd934x.c
+++ b/drivers/mfd/wcd934x.c
@@ -109,7 +109,7 @@ static const struct regmap_range_cfg wcd934x_ranges[] = {
},
};
-static struct regmap_config wcd934x_regmap_config = {
+static const struct regmap_config wcd934x_regmap_config = {
.reg_bits = 16,
.val_bits = 8,
.cache_type = REGCACHE_MAPLE,
diff --git a/drivers/misc/cxl/of.c b/drivers/misc/cxl/of.c
index bcc005dff1c0..03633cccd043 100644
--- a/drivers/misc/cxl/of.c
+++ b/drivers/misc/cxl/of.c
@@ -7,65 +7,12 @@
#include <linux/module.h>
#include <linux/platform_device.h>
#include <linux/slab.h>
+#include <linux/of.h>
#include <linux/of_address.h>
#include <linux/of_platform.h>
#include "cxl.h"
-
-static const __be32 *read_prop_string(const struct device_node *np,
- const char *prop_name)
-{
- const __be32 *prop;
-
- prop = of_get_property(np, prop_name, NULL);
- if (cxl_verbose && prop)
- pr_info("%s: %s\n", prop_name, (char *) prop);
- return prop;
-}
-
-static const __be32 *read_prop_dword(const struct device_node *np,
- const char *prop_name, u32 *val)
-{
- const __be32 *prop;
-
- prop = of_get_property(np, prop_name, NULL);
- if (prop)
- *val = be32_to_cpu(prop[0]);
- if (cxl_verbose && prop)
- pr_info("%s: %#x (%u)\n", prop_name, *val, *val);
- return prop;
-}
-
-static const __be64 *read_prop64_dword(const struct device_node *np,
- const char *prop_name, u64 *val)
-{
- const __be64 *prop;
-
- prop = of_get_property(np, prop_name, NULL);
- if (prop)
- *val = be64_to_cpu(prop[0]);
- if (cxl_verbose && prop)
- pr_info("%s: %#llx (%llu)\n", prop_name, *val, *val);
- return prop;
-}
-
-
-static int read_handle(struct device_node *np, u64 *handle)
-{
- const __be32 *prop;
- u64 size;
-
- /* Get address and size of the node */
- prop = of_get_address(np, 0, &size, NULL);
- if (size)
- return -EINVAL;
-
- /* Helper to read a big number; size is in cells (not bytes) */
- *handle = of_read_number(prop, of_n_addr_cells(np));
- return 0;
-}
-
static int read_phys_addr(struct device_node *np, char *prop_name,
struct cxl_afu *afu)
{
@@ -100,9 +47,6 @@ static int read_phys_addr(struct device_node *np, char *prop_name,
type, prop_name);
return -EINVAL;
}
- if (cxl_verbose)
- pr_info("%s: %#x %#llx (size %#llx)\n",
- prop_name, type, addr, size);
}
}
return 0;
@@ -130,36 +74,17 @@ static int read_vpd(struct cxl *adapter, struct cxl_afu *afu)
int cxl_of_read_afu_handle(struct cxl_afu *afu, struct device_node *afu_np)
{
- if (read_handle(afu_np, &afu->guest->handle))
- return -EINVAL;
- pr_devel("AFU handle: 0x%.16llx\n", afu->guest->handle);
-
- return 0;
+ return of_property_read_reg(afu_np, 0, &afu->guest->handle, NULL);
}
int cxl_of_read_afu_properties(struct cxl_afu *afu, struct device_node *np)
{
- int i, len, rc;
- char *p;
- const __be32 *prop;
+ int i, rc;
u16 device_id, vendor_id;
u32 val = 0, class_code;
/* Properties are read in the same order as listed in PAPR */
- if (cxl_verbose) {
- pr_info("Dump of the 'ibm,coherent-platform-function' node properties:\n");
-
- prop = of_get_property(np, "compatible", &len);
- i = 0;
- while (i < len) {
- p = (char *) prop + i;
- pr_info("compatible: %s\n", p);
- i += strlen(p) + 1;
- }
- read_prop_string(np, "name");
- }
-
rc = read_phys_addr(np, "reg", afu);
if (rc)
return rc;
@@ -173,25 +98,15 @@ int cxl_of_read_afu_properties(struct cxl_afu *afu, struct device_node *np)
else
afu->psa = true;
- if (cxl_verbose) {
- read_prop_string(np, "ibm,loc-code");
- read_prop_string(np, "device_type");
- }
+ of_property_read_u32(np, "ibm,#processes", &afu->max_procs_virtualised);
- read_prop_dword(np, "ibm,#processes", &afu->max_procs_virtualised);
-
- if (cxl_verbose) {
- read_prop_dword(np, "ibm,scratchpad-size", &val);
- read_prop_dword(np, "ibm,programmable", &val);
- read_prop_string(np, "ibm,phandle");
+ if (cxl_verbose)
read_vpd(NULL, afu);
- }
- read_prop_dword(np, "ibm,max-ints-per-process", &afu->guest->max_ints);
+ of_property_read_u32(np, "ibm,max-ints-per-process", &afu->guest->max_ints);
afu->irqs_max = afu->guest->max_ints;
- prop = read_prop_dword(np, "ibm,min-ints-per-process", &afu->pp_irqs);
- if (prop) {
+ if (!of_property_read_u32(np, "ibm,min-ints-per-process", &afu->pp_irqs)) {
/* One extra interrupt for the PSL interrupt is already
* included. Remove it now to keep only AFU interrupts and
* match the native case.
@@ -199,21 +114,13 @@ int cxl_of_read_afu_properties(struct cxl_afu *afu, struct device_node *np)
afu->pp_irqs--;
}
- if (cxl_verbose) {
- read_prop_dword(np, "ibm,max-ints", &val);
- read_prop_dword(np, "ibm,vpd-size", &val);
- }
-
- read_prop64_dword(np, "ibm,error-buffer-size", &afu->eb_len);
+ of_property_read_u64(np, "ibm,error-buffer-size", &afu->eb_len);
afu->eb_offset = 0;
- if (cxl_verbose)
- read_prop_dword(np, "ibm,config-record-type", &val);
-
- read_prop64_dword(np, "ibm,config-record-size", &afu->crs_len);
+ of_property_read_u64(np, "ibm,config-record-size", &afu->crs_len);
afu->crs_offset = 0;
- read_prop_dword(np, "ibm,#config-records", &afu->crs_num);
+ of_property_read_u32(np, "ibm,#config-records", &afu->crs_num);
if (cxl_verbose) {
for (i = 0; i < afu->crs_num; i++) {
@@ -235,35 +142,18 @@ int cxl_of_read_afu_properties(struct cxl_afu *afu, struct device_node *np)
i, class_code);
}
}
-
- read_prop_dword(np, "ibm,function-number", &val);
- read_prop_dword(np, "ibm,privileged-function", &val);
- read_prop_dword(np, "vendor-id", &val);
- read_prop_dword(np, "device-id", &val);
- read_prop_dword(np, "revision-id", &val);
- read_prop_dword(np, "class-code", &val);
- read_prop_dword(np, "subsystem-vendor-id", &val);
- read_prop_dword(np, "subsystem-id", &val);
}
/*
* if "ibm,process-mmio" doesn't exist then per-process mmio is
* not supported
*/
val = 0;
- prop = read_prop_dword(np, "ibm,process-mmio", &val);
- if (prop && val == 1)
+ if (!of_property_read_u32(np, "ibm,process-mmio", &val) && val == 1)
afu->pp_psa = true;
else
afu->pp_psa = false;
- if (cxl_verbose) {
- read_prop_dword(np, "ibm,supports-aur", &val);
- read_prop_dword(np, "ibm,supports-csrp", &val);
- read_prop_dword(np, "ibm,supports-prr", &val);
- }
-
- prop = read_prop_dword(np, "ibm,function-error-interrupt", &val);
- if (prop)
+ if (!of_property_read_u32(np, "ibm,function-error-interrupt", &val))
afu->serr_hwirq = val;
pr_devel("AFU handle: %#llx\n", afu->guest->handle);
@@ -334,95 +224,44 @@ err:
int cxl_of_read_adapter_handle(struct cxl *adapter, struct device_node *np)
{
- if (read_handle(np, &adapter->guest->handle))
- return -EINVAL;
- pr_devel("Adapter handle: 0x%.16llx\n", adapter->guest->handle);
-
- return 0;
+ return of_property_read_reg(np, 0, &adapter->guest->handle, NULL);
}
int cxl_of_read_adapter_properties(struct cxl *adapter, struct device_node *np)
{
- int rc, len, naddr, i;
- char *p;
- const __be32 *prop;
+ int rc;
+ const char *p;
u32 val = 0;
/* Properties are read in the same order as listed in PAPR */
- naddr = of_n_addr_cells(np);
-
- if (cxl_verbose) {
- pr_info("Dump of the 'ibm,coherent-platform-facility' node properties:\n");
-
- read_prop_dword(np, "#address-cells", &val);
- read_prop_dword(np, "#size-cells", &val);
-
- prop = of_get_property(np, "compatible", &len);
- i = 0;
- while (i < len) {
- p = (char *) prop + i;
- pr_info("compatible: %s\n", p);
- i += strlen(p) + 1;
- }
- read_prop_string(np, "name");
- read_prop_string(np, "model");
-
- prop = of_get_property(np, "reg", NULL);
- if (prop) {
- pr_info("reg: addr:%#llx size:%#x\n",
- of_read_number(prop, naddr),
- be32_to_cpu(prop[naddr]));
- }
-
- read_prop_string(np, "ibm,loc-code");
- }
-
if ((rc = read_adapter_irq_config(adapter, np)))
return rc;
- if (cxl_verbose) {
- read_prop_string(np, "device_type");
- read_prop_string(np, "ibm,phandle");
- }
-
- prop = read_prop_dword(np, "ibm,caia-version", &val);
- if (prop) {
+ if (!of_property_read_u32(np, "ibm,caia-version", &val)) {
adapter->caia_major = (val & 0xFF00) >> 8;
adapter->caia_minor = val & 0xFF;
}
- prop = read_prop_dword(np, "ibm,psl-revision", &val);
- if (prop)
+ if (!of_property_read_u32(np, "ibm,psl-revision", &val))
adapter->psl_rev = val;
- prop = read_prop_string(np, "status");
- if (prop) {
- adapter->guest->status = kasprintf(GFP_KERNEL, "%s", (char *) prop);
+ if (!of_property_read_string(np, "status", &p)) {
+ adapter->guest->status = kasprintf(GFP_KERNEL, "%s", p);
if (adapter->guest->status == NULL)
return -ENOMEM;
}
- prop = read_prop_dword(np, "vendor-id", &val);
- if (prop)
+ if (!of_property_read_u32(np, "vendor-id", &val))
adapter->guest->vendor = val;
- prop = read_prop_dword(np, "device-id", &val);
- if (prop)
+ if (!of_property_read_u32(np, "device-id", &val))
adapter->guest->device = val;
- if (cxl_verbose) {
- read_prop_dword(np, "ibm,privileged-facility", &val);
- read_prop_dword(np, "revision-id", &val);
- read_prop_dword(np, "class-code", &val);
- }
-
- prop = read_prop_dword(np, "subsystem-vendor-id", &val);
- if (prop)
+ if (!of_property_read_u32(np, "subsystem-vendor-id", &val))
adapter->guest->subsystem_vendor = val;
- prop = read_prop_dword(np, "subsystem-id", &val);
- if (prop)
+ if (!of_property_read_u32(np, "subsystem-id", &val))
adapter->guest->subsystem = val;
if (cxl_verbose)
diff --git a/drivers/misc/cxl/pci.c b/drivers/misc/cxl/pci.c
index 4cf9e7c42a24..3d52f9b92d0d 100644
--- a/drivers/misc/cxl/pci.c
+++ b/drivers/misc/cxl/pci.c
@@ -363,17 +363,17 @@ int cxl_calc_capp_routing(struct pci_dev *dev, u64 *chipid,
{
int rc;
struct device_node *np;
- const __be32 *prop;
+ u32 id;
if (!(np = pnv_pci_get_phb_node(dev)))
return -ENODEV;
- while (np && !(prop = of_get_property(np, "ibm,chip-id", NULL)))
+ while (np && of_property_read_u32(np, "ibm,chip-id", &id))
np = of_get_next_parent(np);
if (!np)
return -ENODEV;
- *chipid = be32_to_cpup(prop);
+ *chipid = id;
rc = get_phb_index(np, phb_index);
if (rc) {
@@ -398,32 +398,26 @@ static DEFINE_MUTEX(indications_mutex);
static int get_phb_indications(struct pci_dev *dev, u64 *capiind, u64 *asnind,
u64 *nbwind)
{
- static u64 nbw, asn, capi = 0;
+ static u32 val[3];
struct device_node *np;
- const __be32 *prop;
mutex_lock(&indications_mutex);
- if (!capi) {
+ if (!val[0]) {
if (!(np = pnv_pci_get_phb_node(dev))) {
mutex_unlock(&indications_mutex);
return -ENODEV;
}
- prop = of_get_property(np, "ibm,phb-indications", NULL);
- if (!prop) {
- nbw = 0x0300UL; /* legacy values */
- asn = 0x0400UL;
- capi = 0x0200UL;
- } else {
- nbw = (u64)be32_to_cpu(prop[2]);
- asn = (u64)be32_to_cpu(prop[1]);
- capi = (u64)be32_to_cpu(prop[0]);
+ if (of_property_read_u32_array(np, "ibm,phb-indications", val, 3)) {
+ val[2] = 0x0300UL; /* legacy values */
+ val[1] = 0x0400UL;
+ val[0] = 0x0200UL;
}
of_node_put(np);
}
- *capiind = capi;
- *asnind = asn;
- *nbwind = nbw;
+ *capiind = val[0];
+ *asnind = val[1];
+ *nbwind = val[2];
mutex_unlock(&indications_mutex);
return 0;
}
@@ -605,7 +599,7 @@ static void cxl_setup_psl_timebase(struct cxl *adapter, struct pci_dev *dev)
/* Do not fail when CAPP timebase sync is not supported by OPAL */
of_node_get(np);
- if (! of_get_property(np, "ibm,capp-timebase-sync", NULL)) {
+ if (!of_property_present(np, "ibm,capp-timebase-sync")) {
of_node_put(np);
dev_info(&dev->dev, "PSL timebase inactive: OPAL support missing\n");
return;
diff --git a/drivers/misc/cxl/sysfs.c b/drivers/misc/cxl/sysfs.c
index 315c43f17dd3..409bd1c39663 100644
--- a/drivers/misc/cxl/sysfs.c
+++ b/drivers/misc/cxl/sysfs.c
@@ -579,7 +579,7 @@ static void release_afu_config_record(struct kobject *kobj)
kfree(cr);
}
-static struct kobj_type afu_config_record_type = {
+static const struct kobj_type afu_config_record_type = {
.sysfs_ops = &kobj_sysfs_ops,
.release = release_afu_config_record,
.default_groups = afu_cr_groups,
diff --git a/drivers/misc/fastrpc.c b/drivers/misc/fastrpc.c
index da87abe93daf..74181b8c386b 100644
--- a/drivers/misc/fastrpc.c
+++ b/drivers/misc/fastrpc.c
@@ -27,7 +27,8 @@
#define MDSP_DOMAIN_ID (1)
#define SDSP_DOMAIN_ID (2)
#define CDSP_DOMAIN_ID (3)
-#define FASTRPC_DEV_MAX 4 /* adsp, mdsp, slpi, cdsp*/
+#define CDSP1_DOMAIN_ID (4)
+#define FASTRPC_DEV_MAX 5 /* adsp, mdsp, slpi, cdsp, cdsp1 */
#define FASTRPC_MAX_SESSIONS 14
#define FASTRPC_MAX_VMIDS 16
#define FASTRPC_ALIGN 128
@@ -106,7 +107,7 @@
#define miscdev_to_fdevice(d) container_of(d, struct fastrpc_device, miscdev)
static const char *domains[FASTRPC_DEV_MAX] = { "adsp", "mdsp",
- "sdsp", "cdsp"};
+ "sdsp", "cdsp", "cdsp1" };
struct fastrpc_phy_page {
u64 addr; /* physical address */
u64 size; /* size of contiguous region */
@@ -2269,7 +2270,7 @@ static int fastrpc_rpmsg_probe(struct rpmsg_device *rpdev)
return err;
}
- for (i = 0; i <= CDSP_DOMAIN_ID; i++) {
+ for (i = 0; i < FASTRPC_DEV_MAX; i++) {
if (!strcmp(domains[i], domain)) {
domain_id = i;
break;
@@ -2327,13 +2328,14 @@ static int fastrpc_rpmsg_probe(struct rpmsg_device *rpdev)
case ADSP_DOMAIN_ID:
case MDSP_DOMAIN_ID:
case SDSP_DOMAIN_ID:
- /* Unsigned PD offloading is only supported on CDSP*/
+ /* Unsigned PD offloading is only supported on CDSP and CDSP1 */
data->unsigned_support = false;
err = fastrpc_device_register(rdev, data, secure_dsp, domains[domain_id]);
if (err)
goto fdev_error;
break;
case CDSP_DOMAIN_ID:
+ case CDSP1_DOMAIN_ID:
data->unsigned_support = true;
/* Create both device nodes so that we can allow both Signed and Unsigned PD */
err = fastrpc_device_register(rdev, data, true, domains[domain_id]);
diff --git a/drivers/misc/kgdbts.c b/drivers/misc/kgdbts.c
index 88b91ad8e541..0cf31164b470 100644
--- a/drivers/misc/kgdbts.c
+++ b/drivers/misc/kgdbts.c
@@ -95,6 +95,7 @@
#include <linux/kallsyms.h>
#include <asm/sections.h>
+#include <asm/rwonce.h>
#define v1printk(a...) do { \
if (verbose) \
@@ -126,7 +127,6 @@ static int final_ack;
static int force_hwbrks;
static int hwbreaks_ok;
static int hw_break_val;
-static int hw_break_val2;
static int cont_instead_of_sstep;
static unsigned long cont_thread_id;
static unsigned long sstep_thread_id;
@@ -284,7 +284,7 @@ static void hw_rem_access_break(char *arg)
static void hw_break_val_access(void)
{
- hw_break_val2 = hw_break_val;
+ READ_ONCE(hw_break_val);
}
static void hw_break_val_write(void)
diff --git a/drivers/misc/lis3lv02d/lis3lv02d.c b/drivers/misc/lis3lv02d/lis3lv02d.c
index 49868a45c0ad..4233dc4cc7d6 100644
--- a/drivers/misc/lis3lv02d/lis3lv02d.c
+++ b/drivers/misc/lis3lv02d/lis3lv02d.c
@@ -669,7 +669,6 @@ static int lis3lv02d_misc_fasync(int fd, struct file *file, int on)
static const struct file_operations lis3lv02d_misc_fops = {
.owner = THIS_MODULE,
- .llseek = no_llseek,
.read = lis3lv02d_misc_read,
.open = lis3lv02d_misc_open,
.release = lis3lv02d_misc_release,
@@ -1038,7 +1037,7 @@ int lis3lv02d_init_dt(struct lis3lv02d *lis3)
pdata->wakeup_flags |= LIS3_WAKEUP_Z_LO;
if (of_property_read_bool(np, "st,wakeup-z-hi"))
pdata->wakeup_flags |= LIS3_WAKEUP_Z_HI;
- if (of_get_property(np, "st,wakeup-threshold", &val))
+ if (!of_property_read_u32(np, "st,wakeup-threshold", &val))
pdata->wakeup_thresh = val;
if (of_property_read_bool(np, "st,wakeup2-x-lo"))
@@ -1053,7 +1052,7 @@ int lis3lv02d_init_dt(struct lis3lv02d *lis3)
pdata->wakeup_flags2 |= LIS3_WAKEUP_Z_LO;
if (of_property_read_bool(np, "st,wakeup2-z-hi"))
pdata->wakeup_flags2 |= LIS3_WAKEUP_Z_HI;
- if (of_get_property(np, "st,wakeup2-threshold", &val))
+ if (!of_property_read_u32(np, "st,wakeup2-threshold", &val))
pdata->wakeup_thresh2 = val;
if (!of_property_read_u32(np, "st,highpass-cutoff-hz", &val)) {
diff --git a/drivers/misc/mei/main.c b/drivers/misc/mei/main.c
index 40c3fe26f76d..1f5aaf16e300 100644
--- a/drivers/misc/mei/main.c
+++ b/drivers/misc/mei/main.c
@@ -1176,7 +1176,6 @@ static const struct file_operations mei_fops = {
.poll = mei_poll,
.fsync = mei_fsync,
.fasync = mei_fasync,
- .llseek = no_llseek
};
/**
diff --git a/drivers/misc/ntsync.c b/drivers/misc/ntsync.c
index 3c2f743c58b0..4954553b7baa 100644
--- a/drivers/misc/ntsync.c
+++ b/drivers/misc/ntsync.c
@@ -126,7 +126,6 @@ static const struct file_operations ntsync_obj_fops = {
.release = ntsync_obj_release,
.unlocked_ioctl = ntsync_obj_ioctl,
.compat_ioctl = compat_ptr_ioctl,
- .llseek = no_llseek,
};
static struct ntsync_obj *ntsync_alloc_obj(struct ntsync_device *dev,
@@ -233,7 +232,6 @@ static const struct file_operations ntsync_fops = {
.release = ntsync_char_release,
.unlocked_ioctl = ntsync_char_ioctl,
.compat_ioctl = compat_ptr_ioctl,
- .llseek = no_llseek,
};
static struct miscdevice ntsync_misc = {
diff --git a/drivers/misc/ocxl/ocxl_internal.h b/drivers/misc/ocxl/ocxl_internal.h
index 10125a22d5a5..d2028d6c6f08 100644
--- a/drivers/misc/ocxl/ocxl_internal.h
+++ b/drivers/misc/ocxl/ocxl_internal.h
@@ -97,8 +97,6 @@ struct ocxl_process_element {
__be32 software_state;
};
-int ocxl_create_cdev(struct ocxl_afu *afu);
-void ocxl_destroy_cdev(struct ocxl_afu *afu);
int ocxl_file_register_afu(struct ocxl_afu *afu);
void ocxl_file_unregister_afu(struct ocxl_afu *afu);
diff --git a/drivers/misc/phantom.c b/drivers/misc/phantom.c
index 30bd7c39c261..701db2c5859b 100644
--- a/drivers/misc/phantom.c
+++ b/drivers/misc/phantom.c
@@ -279,7 +279,6 @@ static const struct file_operations phantom_file_ops = {
.unlocked_ioctl = phantom_ioctl,
.compat_ioctl = phantom_compat_ioctl,
.poll = phantom_poll,
- .llseek = no_llseek,
};
static irqreturn_t phantom_isr(int irq, void *data)
diff --git a/drivers/misc/tsl2550.c b/drivers/misc/tsl2550.c
index 2ad4387c9837..1a7796ab3fad 100644
--- a/drivers/misc/tsl2550.c
+++ b/drivers/misc/tsl2550.c
@@ -185,10 +185,10 @@ static ssize_t tsl2550_store_power_state(struct device *dev,
{
struct i2c_client *client = to_i2c_client(dev);
struct tsl2550_data *data = i2c_get_clientdata(client);
- unsigned long val = simple_strtoul(buf, NULL, 10);
+ unsigned long val;
int ret;
- if (val > 1)
+ if (kstrtoul(buf, 10, &val) || val > 1)
return -EINVAL;
mutex_lock(&data->update_lock);
@@ -217,10 +217,10 @@ static ssize_t tsl2550_store_operating_mode(struct device *dev,
{
struct i2c_client *client = to_i2c_client(dev);
struct tsl2550_data *data = i2c_get_clientdata(client);
- unsigned long val = simple_strtoul(buf, NULL, 10);
+ unsigned long val;
int ret;
- if (val > 1)
+ if (kstrtoul(buf, 10, &val) || val > 1)
return -EINVAL;
if (data->power_state == 0)
diff --git a/drivers/misc/xilinx_tmr_inject.c b/drivers/misc/xilinx_tmr_inject.c
index 73c6da7d0963..734fdfac19ef 100644
--- a/drivers/misc/xilinx_tmr_inject.c
+++ b/drivers/misc/xilinx_tmr_inject.c
@@ -12,6 +12,7 @@
#include <asm/xilinx_mb_manager.h>
#include <linux/module.h>
#include <linux/of.h>
+#include <linux/debugfs.h>
#include <linux/platform_device.h>
#include <linux/fault-inject.h>
diff --git a/drivers/mmc/core/block.c b/drivers/mmc/core/block.c
index f58bea534004..ef06a4d5d65b 100644
--- a/drivers/mmc/core/block.c
+++ b/drivers/mmc/core/block.c
@@ -2734,7 +2734,6 @@ static const struct file_operations mmc_rpmb_fileops = {
.release = mmc_rpmb_chrdev_release,
.open = mmc_rpmb_chrdev_open,
.owner = THIS_MODULE,
- .llseek = no_llseek,
.unlocked_ioctl = mmc_rpmb_ioctl,
#ifdef CONFIG_COMPAT
.compat_ioctl = mmc_rpmb_ioctl_compat,
diff --git a/drivers/mmc/host/mmci_stm32_sdmmc.c b/drivers/mmc/host/mmci_stm32_sdmmc.c
index f5da7f9baa52..9dc51859c2e5 100644
--- a/drivers/mmc/host/mmci_stm32_sdmmc.c
+++ b/drivers/mmc/host/mmci_stm32_sdmmc.c
@@ -213,7 +213,8 @@ static int sdmmc_idma_setup(struct mmci_host *host)
host->mmc->max_seg_size = host->mmc->max_req_size;
}
- return dma_set_max_seg_size(dev, host->mmc->max_seg_size);
+ dma_set_max_seg_size(dev, host->mmc->max_seg_size);
+ return 0;
}
static int sdmmc_idma_start(struct mmci_host *host, unsigned int *datactrl)
diff --git a/drivers/mtd/mtdoops.c b/drivers/mtd/mtdoops.c
index 2f11585b5613..7bf3777e1f13 100644
--- a/drivers/mtd/mtdoops.c
+++ b/drivers/mtd/mtdoops.c
@@ -298,14 +298,14 @@ static void find_next_position(struct mtdoops_context *cxt)
}
static void mtdoops_do_dump(struct kmsg_dumper *dumper,
- enum kmsg_dump_reason reason)
+ struct kmsg_dump_detail *detail)
{
struct mtdoops_context *cxt = container_of(dumper,
struct mtdoops_context, dump);
struct kmsg_dump_iter iter;
/* Only dump oopses if dump_oops is set */
- if (reason == KMSG_DUMP_OOPS && !dump_oops)
+ if (detail->reason == KMSG_DUMP_OOPS && !dump_oops)
return;
kmsg_dump_rewind(&iter);
@@ -317,7 +317,7 @@ static void mtdoops_do_dump(struct kmsg_dumper *dumper,
record_size - sizeof(struct mtdoops_hdr), NULL);
clear_bit(0, &cxt->oops_buf_busy);
- if (reason != KMSG_DUMP_OOPS) {
+ if (detail->reason != KMSG_DUMP_OOPS) {
/* Panics must be written immediately */
mtdoops_write(cxt, 1);
} else {
diff --git a/drivers/mtd/nand/raw/Kconfig b/drivers/mtd/nand/raw/Kconfig
index 614257308516..d0aaccf72d78 100644
--- a/drivers/mtd/nand/raw/Kconfig
+++ b/drivers/mtd/nand/raw/Kconfig
@@ -448,6 +448,12 @@ config MTD_NAND_RENESAS
Enables support for the NAND controller found on Renesas R-Car
Gen3 and RZ/N1 SoC families.
+config MTD_NAND_TS72XX
+ tristate "ts72xx NAND controller"
+ depends on ARCH_EP93XX && HAS_IOMEM
+ help
+ Enables support for NAND controller on ts72xx SBCs.
+
comment "Misc"
config MTD_SM_COMMON
diff --git a/drivers/mtd/nand/raw/Makefile b/drivers/mtd/nand/raw/Makefile
index 25120a4afada..d0b0e6b83568 100644
--- a/drivers/mtd/nand/raw/Makefile
+++ b/drivers/mtd/nand/raw/Makefile
@@ -34,6 +34,7 @@ obj-$(CONFIG_MTD_NAND_MLC_LPC32XX) += lpc32xx_mlc.o
obj-$(CONFIG_MTD_NAND_SH_FLCTL) += sh_flctl.o
obj-$(CONFIG_MTD_NAND_MXC) += mxc_nand.o
obj-$(CONFIG_MTD_NAND_SOCRATES) += socrates_nand.o
+obj-$(CONFIG_MTD_NAND_TS72XX) += technologic-nand-controller.o
obj-$(CONFIG_MTD_NAND_TXX9NDFMC) += txx9ndfmc.o
obj-$(CONFIG_MTD_NAND_MPC5121_NFC) += mpc5121_nfc.o
obj-$(CONFIG_MTD_NAND_VF610_NFC) += vf610_nfc.o
diff --git a/drivers/mtd/nand/raw/nandsim.c b/drivers/mtd/nand/raw/nandsim.c
index 179b28459b4b..df48b7d01d16 100644
--- a/drivers/mtd/nand/raw/nandsim.c
+++ b/drivers/mtd/nand/raw/nandsim.c
@@ -1381,7 +1381,7 @@ static inline union ns_mem *NS_GET_PAGE(struct nandsim *ns)
}
/*
- * Retuns a pointer to the current byte, within the current page.
+ * Returns a pointer to the current byte, within the current page.
*/
static inline u_char *NS_PAGE_BYTE_OFF(struct nandsim *ns)
{
diff --git a/drivers/mtd/nand/raw/technologic-nand-controller.c b/drivers/mtd/nand/raw/technologic-nand-controller.c
new file mode 100644
index 000000000000..0e45a6fd91dd
--- /dev/null
+++ b/drivers/mtd/nand/raw/technologic-nand-controller.c
@@ -0,0 +1,222 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Technologic Systems TS72xx NAND controller driver
+ *
+ * Copyright (C) 2023 Nikita Shubin <nikita.shubin@maquefel.me>
+ *
+ * Derived from: plat_nand.c
+ * Author: Vitaly Wool <vitalywool@gmail.com>
+ */
+
+#include <linux/bits.h>
+#include <linux/err.h>
+#include <linux/io.h>
+#include <linux/iopoll.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+
+#include <linux/mtd/mtd.h>
+#include <linux/mtd/platnand.h>
+
+#define TS72XX_NAND_CONTROL_ADDR_LINE BIT(22) /* 0xN0400000 */
+#define TS72XX_NAND_BUSY_ADDR_LINE BIT(23) /* 0xN0800000 */
+
+#define TS72XX_NAND_ALE BIT(0)
+#define TS72XX_NAND_CLE BIT(1)
+#define TS72XX_NAND_NCE BIT(2)
+
+#define TS72XX_NAND_CTRL_CLE (TS72XX_NAND_NCE | TS72XX_NAND_CLE)
+#define TS72XX_NAND_CTRL_ALE (TS72XX_NAND_NCE | TS72XX_NAND_ALE)
+
+struct ts72xx_nand_data {
+ struct nand_controller controller;
+ struct nand_chip chip;
+ void __iomem *base;
+ void __iomem *ctrl;
+ void __iomem *busy;
+};
+
+static inline struct ts72xx_nand_data *chip_to_ts72xx(struct nand_chip *chip)
+{
+ return container_of(chip, struct ts72xx_nand_data, chip);
+}
+
+static int ts72xx_nand_attach_chip(struct nand_chip *chip)
+{
+ switch (chip->ecc.engine_type) {
+ case NAND_ECC_ENGINE_TYPE_ON_HOST:
+ return -EINVAL;
+ case NAND_ECC_ENGINE_TYPE_SOFT:
+ if (chip->ecc.algo == NAND_ECC_ALGO_UNKNOWN)
+ chip->ecc.algo = NAND_ECC_ALGO_HAMMING;
+ chip->ecc.algo = NAND_ECC_ALGO_HAMMING;
+ fallthrough;
+ default:
+ return 0;
+ }
+}
+
+static void ts72xx_nand_ctrl(struct nand_chip *chip, u8 value)
+{
+ struct ts72xx_nand_data *data = chip_to_ts72xx(chip);
+ unsigned char bits = ioread8(data->ctrl) & ~GENMASK(2, 0);
+
+ iowrite8(bits | value, data->ctrl);
+}
+
+static int ts72xx_nand_exec_instr(struct nand_chip *chip,
+ const struct nand_op_instr *instr)
+{
+ struct ts72xx_nand_data *data = chip_to_ts72xx(chip);
+ unsigned int timeout_us;
+ u32 status;
+ int ret;
+
+ switch (instr->type) {
+ case NAND_OP_CMD_INSTR:
+ ts72xx_nand_ctrl(chip, TS72XX_NAND_CTRL_CLE);
+ iowrite8(instr->ctx.cmd.opcode, data->base);
+ ts72xx_nand_ctrl(chip, TS72XX_NAND_NCE);
+ break;
+
+ case NAND_OP_ADDR_INSTR:
+ ts72xx_nand_ctrl(chip, TS72XX_NAND_CTRL_ALE);
+ iowrite8_rep(data->base, instr->ctx.addr.addrs, instr->ctx.addr.naddrs);
+ ts72xx_nand_ctrl(chip, TS72XX_NAND_NCE);
+ break;
+
+ case NAND_OP_DATA_IN_INSTR:
+ ioread8_rep(data->base, instr->ctx.data.buf.in, instr->ctx.data.len);
+ break;
+
+ case NAND_OP_DATA_OUT_INSTR:
+ iowrite8_rep(data->base, instr->ctx.data.buf.in, instr->ctx.data.len);
+ break;
+
+ case NAND_OP_WAITRDY_INSTR:
+ timeout_us = instr->ctx.waitrdy.timeout_ms * 1000;
+ ret = readb_poll_timeout(data->busy, status, status & BIT(5), 0, timeout_us);
+ if (ret)
+ return ret;
+
+ break;
+ }
+
+ if (instr->delay_ns)
+ ndelay(instr->delay_ns);
+
+ return 0;
+}
+
+static int ts72xx_nand_exec_op(struct nand_chip *chip,
+ const struct nand_operation *op, bool check_only)
+{
+ unsigned int i;
+ int ret;
+
+ if (check_only)
+ return 0;
+
+ for (i = 0; i < op->ninstrs; i++) {
+ ret = ts72xx_nand_exec_instr(chip, &op->instrs[i]);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
+
+static const struct nand_controller_ops ts72xx_nand_ops = {
+ .attach_chip = ts72xx_nand_attach_chip,
+ .exec_op = ts72xx_nand_exec_op,
+};
+
+static int ts72xx_nand_probe(struct platform_device *pdev)
+{
+ struct ts72xx_nand_data *data;
+ struct fwnode_handle *child;
+ struct mtd_info *mtd;
+ int err;
+
+ data = devm_kzalloc(&pdev->dev, sizeof(*data), GFP_KERNEL);
+ if (!data)
+ return -ENOMEM;
+
+ nand_controller_init(&data->controller);
+ data->controller.ops = &ts72xx_nand_ops;
+ data->chip.controller = &data->controller;
+
+ data->base = devm_platform_ioremap_resource(pdev, 0);
+ if (IS_ERR(data->base))
+ return PTR_ERR(data->base);
+ data->ctrl = data->base + TS72XX_NAND_CONTROL_ADDR_LINE;
+ data->busy = data->base + TS72XX_NAND_BUSY_ADDR_LINE;
+
+ child = fwnode_get_next_child_node(dev_fwnode(&pdev->dev), NULL);
+ if (!child)
+ return dev_err_probe(&pdev->dev, -ENXIO,
+ "ts72xx controller node should have exactly one child\n");
+
+ nand_set_flash_node(&data->chip, to_of_node(child));
+ mtd = nand_to_mtd(&data->chip);
+ mtd->dev.parent = &pdev->dev;
+ platform_set_drvdata(pdev, data);
+
+ /*
+ * This driver assumes that the default ECC engine should be TYPE_SOFT.
+ * Set ->engine_type before registering the NAND devices in order to
+ * provide a driver specific default value.
+ */
+ data->chip.ecc.engine_type = NAND_ECC_ENGINE_TYPE_SOFT;
+
+ /* Scan to find existence of the device */
+ err = nand_scan(&data->chip, 1);
+ if (err)
+ goto err_handle_put;
+
+ err = mtd_device_parse_register(mtd, NULL, NULL, NULL, 0);
+ if (err)
+ goto err_clean_nand;
+
+ return 0;
+
+err_clean_nand:
+ nand_cleanup(&data->chip);
+err_handle_put:
+ fwnode_handle_put(child);
+ return err;
+}
+
+static void ts72xx_nand_remove(struct platform_device *pdev)
+{
+ struct ts72xx_nand_data *data = platform_get_drvdata(pdev);
+ struct fwnode_handle *fwnode = dev_fwnode(&pdev->dev);
+ struct nand_chip *chip = &data->chip;
+ int ret;
+
+ ret = mtd_device_unregister(nand_to_mtd(chip));
+ WARN_ON(ret);
+ nand_cleanup(chip);
+ fwnode_handle_put(fwnode);
+}
+
+static const struct of_device_id ts72xx_id_table[] = {
+ { .compatible = "technologic,ts7200-nand" },
+ { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(of, ts72xx_id_table);
+
+static struct platform_driver ts72xx_nand_driver = {
+ .driver = {
+ .name = "ts72xx-nand",
+ .of_match_table = ts72xx_id_table,
+ },
+ .probe = ts72xx_nand_probe,
+ .remove_new = ts72xx_nand_remove,
+};
+module_platform_driver(ts72xx_nand_driver);
+
+MODULE_AUTHOR("Nikita Shubin <nikita.shubin@maquefel.me>");
+MODULE_DESCRIPTION("Technologic Systems TS72xx NAND controller driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/mtd/ubi/cdev.c b/drivers/mtd/ubi/cdev.c
index 0d8f04cf03c5..6bb80d7714bc 100644
--- a/drivers/mtd/ubi/cdev.c
+++ b/drivers/mtd/ubi/cdev.c
@@ -1095,7 +1095,6 @@ const struct file_operations ubi_vol_cdev_operations = {
/* UBI character device operations */
const struct file_operations ubi_cdev_operations = {
.owner = THIS_MODULE,
- .llseek = no_llseek,
.unlocked_ioctl = ubi_cdev_ioctl,
.compat_ioctl = compat_ptr_ioctl,
};
@@ -1105,5 +1104,4 @@ const struct file_operations ubi_ctrl_cdev_operations = {
.owner = THIS_MODULE,
.unlocked_ioctl = ctrl_cdev_ioctl,
.compat_ioctl = compat_ptr_ioctl,
- .llseek = no_llseek,
};
diff --git a/drivers/mtd/ubi/debug.c b/drivers/mtd/ubi/debug.c
index 9ec3b8b6a0aa..d2a53961d8e2 100644
--- a/drivers/mtd/ubi/debug.c
+++ b/drivers/mtd/ubi/debug.c
@@ -470,7 +470,6 @@ static const struct file_operations dfs_fops = {
.read = dfs_file_read,
.write = dfs_file_write,
.open = simple_open,
- .llseek = no_llseek,
.owner = THIS_MODULE,
};
diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c
index b560644ee1b1..b1bffd8e9a95 100644
--- a/drivers/net/bonding/bond_main.c
+++ b/drivers/net/bonding/bond_main.c
@@ -5610,9 +5610,9 @@ bond_xdp_get_xmit_slave(struct net_device *bond_dev, struct xdp_buff *xdp)
break;
default:
- /* Should never happen. Mode guarded by bond_xdp_check() */
- netdev_err(bond_dev, "Unknown bonding mode %d for xdp xmit\n", BOND_MODE(bond));
- WARN_ON_ONCE(1);
+ if (net_ratelimit())
+ netdev_err(bond_dev, "Unknown bonding mode %d for xdp xmit\n",
+ BOND_MODE(bond));
return NULL;
}
diff --git a/drivers/net/ethernet/cirrus/ep93xx_eth.c b/drivers/net/ethernet/cirrus/ep93xx_eth.c
index 1f495cfd7959..c2007cd86416 100644
--- a/drivers/net/ethernet/cirrus/ep93xx_eth.c
+++ b/drivers/net/ethernet/cirrus/ep93xx_eth.c
@@ -16,13 +16,12 @@
#include <linux/ethtool.h>
#include <linux/interrupt.h>
#include <linux/moduleparam.h>
+#include <linux/of.h>
#include <linux/platform_device.h>
#include <linux/delay.h>
#include <linux/io.h>
#include <linux/slab.h>
-#include <linux/platform_data/eth-ep93xx.h>
-
#define DRV_MODULE_NAME "ep93xx-eth"
#define RX_QUEUE_ENTRIES 64
@@ -738,25 +737,6 @@ static const struct net_device_ops ep93xx_netdev_ops = {
.ndo_set_mac_address = eth_mac_addr,
};
-static struct net_device *ep93xx_dev_alloc(struct ep93xx_eth_data *data)
-{
- struct net_device *dev;
-
- dev = alloc_etherdev(sizeof(struct ep93xx_priv));
- if (dev == NULL)
- return NULL;
-
- eth_hw_addr_set(dev, data->dev_addr);
-
- dev->ethtool_ops = &ep93xx_ethtool_ops;
- dev->netdev_ops = &ep93xx_netdev_ops;
-
- dev->features |= NETIF_F_SG | NETIF_F_HW_CSUM;
-
- return dev;
-}
-
-
static void ep93xx_eth_remove(struct platform_device *pdev)
{
struct net_device *dev;
@@ -786,27 +766,49 @@ static void ep93xx_eth_remove(struct platform_device *pdev)
static int ep93xx_eth_probe(struct platform_device *pdev)
{
- struct ep93xx_eth_data *data;
struct net_device *dev;
struct ep93xx_priv *ep;
struct resource *mem;
+ void __iomem *base_addr;
+ struct device_node *np;
+ u8 addr[ETH_ALEN];
+ u32 phy_id;
int irq;
int err;
if (pdev == NULL)
return -ENODEV;
- data = dev_get_platdata(&pdev->dev);
mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
irq = platform_get_irq(pdev, 0);
if (!mem || irq < 0)
return -ENXIO;
- dev = ep93xx_dev_alloc(data);
+ base_addr = ioremap(mem->start, resource_size(mem));
+ if (!base_addr)
+ return dev_err_probe(&pdev->dev, -EIO, "Failed to ioremap ethernet registers\n");
+
+ np = of_parse_phandle(pdev->dev.of_node, "phy-handle", 0);
+ if (!np)
+ return dev_err_probe(&pdev->dev, -ENODEV, "Please provide \"phy-handle\"\n");
+
+ err = of_property_read_u32(np, "reg", &phy_id);
+ of_node_put(np);
+ if (err)
+ return dev_err_probe(&pdev->dev, -ENOENT, "Failed to locate \"phy_id\"\n");
+
+ dev = alloc_etherdev(sizeof(struct ep93xx_priv));
if (dev == NULL) {
err = -ENOMEM;
goto err_out;
}
+
+ memcpy_fromio(addr, base_addr + 0x50, ETH_ALEN);
+ eth_hw_addr_set(dev, addr);
+ dev->ethtool_ops = &ep93xx_ethtool_ops;
+ dev->netdev_ops = &ep93xx_netdev_ops;
+ dev->features |= NETIF_F_SG | NETIF_F_HW_CSUM;
+
ep = netdev_priv(dev);
ep->dev = dev;
SET_NETDEV_DEV(dev, &pdev->dev);
@@ -822,15 +824,10 @@ static int ep93xx_eth_probe(struct platform_device *pdev)
goto err_out;
}
- ep->base_addr = ioremap(mem->start, resource_size(mem));
- if (ep->base_addr == NULL) {
- dev_err(&pdev->dev, "Failed to ioremap ethernet registers\n");
- err = -EIO;
- goto err_out;
- }
+ ep->base_addr = base_addr;
ep->irq = irq;
- ep->mii.phy_id = data->phy_id;
+ ep->mii.phy_id = phy_id;
ep->mii.phy_id_mask = 0x1f;
ep->mii.reg_num_mask = 0x1f;
ep->mii.dev = dev;
@@ -857,12 +854,18 @@ err_out:
return err;
}
+static const struct of_device_id ep93xx_eth_of_ids[] = {
+ { .compatible = "cirrus,ep9301-eth" },
+ { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(of, ep93xx_eth_of_ids);
static struct platform_driver ep93xx_eth_driver = {
.probe = ep93xx_eth_probe,
.remove_new = ep93xx_eth_remove,
.driver = {
.name = "ep93xx-eth",
+ .of_match_table = ep93xx_eth_of_ids,
},
};
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/cmd.c b/drivers/net/ethernet/mellanox/mlx5/core/cmd.c
index 9af8ddb4a78f..a64d96effb9e 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/cmd.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/cmd.c
@@ -1887,10 +1887,12 @@ static int cmd_exec(struct mlx5_core_dev *dev, void *in, int in_size, void *out,
throttle_op = mlx5_cmd_is_throttle_opcode(opcode);
if (throttle_op) {
- /* atomic context may not sleep */
- if (callback)
- return -EINVAL;
- down(&dev->cmd.vars.throttle_sem);
+ if (callback) {
+ if (down_trylock(&dev->cmd.vars.throttle_sem))
+ return -EBUSY;
+ } else {
+ down(&dev->cmd.vars.throttle_sem);
+ }
}
pages_queue = is_manage_pages(in);
@@ -2096,10 +2098,19 @@ static void mlx5_cmd_exec_cb_handler(int status, void *_work)
{
struct mlx5_async_work *work = _work;
struct mlx5_async_ctx *ctx;
+ struct mlx5_core_dev *dev;
+ u16 opcode;
ctx = work->ctx;
- status = cmd_status_err(ctx->dev, status, work->opcode, work->op_mod, work->out);
+ dev = ctx->dev;
+ opcode = work->opcode;
+ status = cmd_status_err(dev, status, work->opcode, work->op_mod, work->out);
work->user_callback(status, work);
+ /* Can't access "work" from this point on. It could have been freed in
+ * the callback.
+ */
+ if (mlx5_cmd_is_throttle_opcode(opcode))
+ up(&dev->cmd.vars.throttle_sem);
if (atomic_dec_and_test(&ctx->num_inflight))
complete(&ctx->inflight_done);
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lag/lag.c b/drivers/net/ethernet/mellanox/mlx5/core/lag/lag.c
index cf8045b92689..8577db3308cc 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/lag/lag.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/lag/lag.c
@@ -445,6 +445,34 @@ static int _mlx5_modify_lag(struct mlx5_lag *ldev, u8 *ports)
return mlx5_cmd_modify_lag(dev0, ldev->ports, ports);
}
+static struct net_device *mlx5_lag_active_backup_get_netdev(struct mlx5_core_dev *dev)
+{
+ struct net_device *ndev = NULL;
+ struct mlx5_lag *ldev;
+ unsigned long flags;
+ int i;
+
+ spin_lock_irqsave(&lag_lock, flags);
+ ldev = mlx5_lag_dev(dev);
+
+ if (!ldev)
+ goto unlock;
+
+ for (i = 0; i < ldev->ports; i++)
+ if (ldev->tracker.netdev_state[i].tx_enabled)
+ ndev = ldev->pf[i].netdev;
+ if (!ndev)
+ ndev = ldev->pf[ldev->ports - 1].netdev;
+
+ if (ndev)
+ dev_hold(ndev);
+
+unlock:
+ spin_unlock_irqrestore(&lag_lock, flags);
+
+ return ndev;
+}
+
void mlx5_modify_lag(struct mlx5_lag *ldev,
struct lag_tracker *tracker)
{
@@ -477,9 +505,18 @@ void mlx5_modify_lag(struct mlx5_lag *ldev,
}
}
- if (tracker->tx_type == NETDEV_LAG_TX_TYPE_ACTIVEBACKUP &&
- !(ldev->mode == MLX5_LAG_MODE_ROCE))
- mlx5_lag_drop_rule_setup(ldev, tracker);
+ if (tracker->tx_type == NETDEV_LAG_TX_TYPE_ACTIVEBACKUP) {
+ struct net_device *ndev = mlx5_lag_active_backup_get_netdev(dev0);
+
+ if(!(ldev->mode == MLX5_LAG_MODE_ROCE))
+ mlx5_lag_drop_rule_setup(ldev, tracker);
+ /** Only sriov and roce lag should have tracker->tx_type set so
+ * no need to check the mode
+ */
+ blocking_notifier_call_chain(&dev0->priv.lag_nh,
+ MLX5_DRIVER_EVENT_ACTIVE_BACKUP_LAG_CHANGE_LOWERSTATE,
+ ndev);
+ }
}
static int mlx5_lag_set_port_sel_mode_roce(struct mlx5_lag *ldev,
@@ -613,6 +650,7 @@ static int mlx5_create_lag(struct mlx5_lag *ldev,
mlx5_core_err(dev0,
"Failed to deactivate RoCE LAG; driver restart required\n");
}
+ BLOCKING_INIT_NOTIFIER_HEAD(&dev0->priv.lag_nh);
return err;
}
@@ -1492,38 +1530,6 @@ void mlx5_lag_enable_change(struct mlx5_core_dev *dev)
mlx5_queue_bond_work(ldev, 0);
}
-struct net_device *mlx5_lag_get_roce_netdev(struct mlx5_core_dev *dev)
-{
- struct net_device *ndev = NULL;
- struct mlx5_lag *ldev;
- unsigned long flags;
- int i;
-
- spin_lock_irqsave(&lag_lock, flags);
- ldev = mlx5_lag_dev(dev);
-
- if (!(ldev && __mlx5_lag_is_roce(ldev)))
- goto unlock;
-
- if (ldev->tracker.tx_type == NETDEV_LAG_TX_TYPE_ACTIVEBACKUP) {
- for (i = 0; i < ldev->ports; i++)
- if (ldev->tracker.netdev_state[i].tx_enabled)
- ndev = ldev->pf[i].netdev;
- if (!ndev)
- ndev = ldev->pf[ldev->ports - 1].netdev;
- } else {
- ndev = ldev->pf[MLX5_LAG_P1].netdev;
- }
- if (ndev)
- dev_hold(ndev);
-
-unlock:
- spin_unlock_irqrestore(&lag_lock, flags);
-
- return ndev;
-}
-EXPORT_SYMBOL(mlx5_lag_get_roce_netdev);
-
u8 mlx5_lag_get_slave_port(struct mlx5_core_dev *dev,
struct net_device *slave)
{
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/main.c b/drivers/net/ethernet/mellanox/mlx5/core/main.c
index 8b0abd61eca6..220a9ac75c8b 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/main.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/main.c
@@ -454,8 +454,8 @@ static int handle_hca_cap_atomic(struct mlx5_core_dev *dev, void *set_ctx)
static int handle_hca_cap_odp(struct mlx5_core_dev *dev, void *set_ctx)
{
+ bool do_set = false, mem_page_fault = false;
void *set_hca_cap;
- bool do_set = false;
int err;
if (!IS_ENABLED(CONFIG_INFINIBAND_ON_DEMAND_PAGING) ||
@@ -470,6 +470,17 @@ static int handle_hca_cap_odp(struct mlx5_core_dev *dev, void *set_ctx)
memcpy(set_hca_cap, dev->caps.hca[MLX5_CAP_ODP]->cur,
MLX5_ST_SZ_BYTES(odp_cap));
+ /* For best performance, enable memory scheme ODP only when
+ * it has page prefetch enabled.
+ */
+ if (MLX5_CAP_ODP_MAX(dev, mem_page_fault) &&
+ MLX5_CAP_ODP_MAX(dev, memory_page_fault_scheme_cap.page_prefetch)) {
+ mem_page_fault = true;
+ do_set = true;
+ MLX5_SET(odp_cap, set_hca_cap, mem_page_fault, mem_page_fault);
+ goto set;
+ }
+
#define ODP_CAP_SET_MAX(dev, field) \
do { \
u32 _res = MLX5_CAP_ODP_MAX(dev, field); \
@@ -479,25 +490,28 @@ static int handle_hca_cap_odp(struct mlx5_core_dev *dev, void *set_ctx)
} \
} while (0)
- ODP_CAP_SET_MAX(dev, ud_odp_caps.srq_receive);
- ODP_CAP_SET_MAX(dev, rc_odp_caps.srq_receive);
- ODP_CAP_SET_MAX(dev, xrc_odp_caps.srq_receive);
- ODP_CAP_SET_MAX(dev, xrc_odp_caps.send);
- ODP_CAP_SET_MAX(dev, xrc_odp_caps.receive);
- ODP_CAP_SET_MAX(dev, xrc_odp_caps.write);
- ODP_CAP_SET_MAX(dev, xrc_odp_caps.read);
- ODP_CAP_SET_MAX(dev, xrc_odp_caps.atomic);
- ODP_CAP_SET_MAX(dev, dc_odp_caps.srq_receive);
- ODP_CAP_SET_MAX(dev, dc_odp_caps.send);
- ODP_CAP_SET_MAX(dev, dc_odp_caps.receive);
- ODP_CAP_SET_MAX(dev, dc_odp_caps.write);
- ODP_CAP_SET_MAX(dev, dc_odp_caps.read);
- ODP_CAP_SET_MAX(dev, dc_odp_caps.atomic);
-
- if (!do_set)
- return 0;
-
- return set_caps(dev, set_ctx, MLX5_SET_HCA_CAP_OP_MOD_ODP);
+ ODP_CAP_SET_MAX(dev, transport_page_fault_scheme_cap.ud_odp_caps.srq_receive);
+ ODP_CAP_SET_MAX(dev, transport_page_fault_scheme_cap.rc_odp_caps.srq_receive);
+ ODP_CAP_SET_MAX(dev, transport_page_fault_scheme_cap.xrc_odp_caps.srq_receive);
+ ODP_CAP_SET_MAX(dev, transport_page_fault_scheme_cap.xrc_odp_caps.send);
+ ODP_CAP_SET_MAX(dev, transport_page_fault_scheme_cap.xrc_odp_caps.receive);
+ ODP_CAP_SET_MAX(dev, transport_page_fault_scheme_cap.xrc_odp_caps.write);
+ ODP_CAP_SET_MAX(dev, transport_page_fault_scheme_cap.xrc_odp_caps.read);
+ ODP_CAP_SET_MAX(dev, transport_page_fault_scheme_cap.xrc_odp_caps.atomic);
+ ODP_CAP_SET_MAX(dev, transport_page_fault_scheme_cap.dc_odp_caps.srq_receive);
+ ODP_CAP_SET_MAX(dev, transport_page_fault_scheme_cap.dc_odp_caps.send);
+ ODP_CAP_SET_MAX(dev, transport_page_fault_scheme_cap.dc_odp_caps.receive);
+ ODP_CAP_SET_MAX(dev, transport_page_fault_scheme_cap.dc_odp_caps.write);
+ ODP_CAP_SET_MAX(dev, transport_page_fault_scheme_cap.dc_odp_caps.read);
+ ODP_CAP_SET_MAX(dev, transport_page_fault_scheme_cap.dc_odp_caps.atomic);
+
+set:
+ if (do_set)
+ err = set_caps(dev, set_ctx, MLX5_SET_HCA_CAP_OP_MOD_ODP);
+
+ mlx5_core_dbg(dev, "Using ODP %s scheme\n",
+ mem_page_fault ? "memory" : "transport");
+ return err;
}
static int max_uc_list_get_devlink_param(struct mlx5_core_dev *dev)
diff --git a/drivers/net/ethernet/microsoft/mana/gdma_main.c b/drivers/net/ethernet/microsoft/mana/gdma_main.c
index ddb8f68d80a2..ca4ed58f1206 100644
--- a/drivers/net/ethernet/microsoft/mana/gdma_main.c
+++ b/drivers/net/ethernet/microsoft/mana/gdma_main.c
@@ -1496,11 +1496,7 @@ static int mana_gd_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
if (err)
goto release_region;
- err = dma_set_max_seg_size(&pdev->dev, UINT_MAX);
- if (err) {
- dev_err(&pdev->dev, "Failed to set dma device segment size\n");
- goto release_region;
- }
+ dma_set_max_seg_size(&pdev->dev, UINT_MAX);
err = -ENOMEM;
gc = vzalloc(sizeof(*gc));
diff --git a/drivers/net/ethernet/realtek/r8169_main.c b/drivers/net/ethernet/realtek/r8169_main.c
index 45ac8befba29..305ec19ccef1 100644
--- a/drivers/net/ethernet/realtek/r8169_main.c
+++ b/drivers/net/ethernet/realtek/r8169_main.c
@@ -579,6 +579,33 @@ struct rtl8169_counters {
__le32 rx_multicast;
__le16 tx_aborted;
__le16 tx_underrun;
+ /* new since RTL8125 */
+ __le64 tx_octets;
+ __le64 rx_octets;
+ __le64 rx_multicast64;
+ __le64 tx_unicast64;
+ __le64 tx_broadcast64;
+ __le64 tx_multicast64;
+ __le32 tx_pause_on;
+ __le32 tx_pause_off;
+ __le32 tx_pause_all;
+ __le32 tx_deferred;
+ __le32 tx_late_collision;
+ __le32 tx_all_collision;
+ __le32 tx_aborted32;
+ __le32 align_errors32;
+ __le32 rx_frame_too_long;
+ __le32 rx_runt;
+ __le32 rx_pause_on;
+ __le32 rx_pause_off;
+ __le32 rx_pause_all;
+ __le32 rx_unknown_opcode;
+ __le32 rx_mac_error;
+ __le32 tx_underrun32;
+ __le32 rx_mac_missed;
+ __le32 rx_tcam_dropped;
+ __le32 tdu;
+ __le32 rdu;
};
struct rtl8169_tc_offsets {
@@ -681,6 +708,7 @@ MODULE_FIRMWARE(FIRMWARE_8107E_2);
MODULE_FIRMWARE(FIRMWARE_8125A_3);
MODULE_FIRMWARE(FIRMWARE_8125B_2);
MODULE_FIRMWARE(FIRMWARE_8126A_2);
+MODULE_FIRMWARE(FIRMWARE_8126A_3);
static inline struct device *tp_to_dev(struct rtl8169_private *tp)
{
diff --git a/drivers/net/ethernet/renesas/ravb.h b/drivers/net/ethernet/renesas/ravb.h
index 9893c91af105..a7de5cf6b317 100644
--- a/drivers/net/ethernet/renesas/ravb.h
+++ b/drivers/net/ethernet/renesas/ravb.h
@@ -1052,6 +1052,7 @@ struct ravb_hw_info {
netdev_features_t net_features;
int stats_len;
u32 tccr_mask;
+ u32 tx_max_frame_size;
u32 rx_max_frame_size;
u32 rx_buffer_size;
u32 rx_desc_size;
diff --git a/drivers/net/ethernet/renesas/ravb_main.c b/drivers/net/ethernet/renesas/ravb_main.c
index c7ec23688d56..d2a6518532f3 100644
--- a/drivers/net/ethernet/renesas/ravb_main.c
+++ b/drivers/net/ethernet/renesas/ravb_main.c
@@ -555,8 +555,16 @@ static void ravb_emac_init_gbeth(struct net_device *ndev)
static void ravb_emac_init_rcar(struct net_device *ndev)
{
- /* Receive frame limit set register */
- ravb_write(ndev, ndev->mtu + ETH_HLEN + VLAN_HLEN + ETH_FCS_LEN, RFLR);
+ struct ravb_private *priv = netdev_priv(ndev);
+
+ /* Set receive frame length
+ *
+ * The length set here describes the frame from the destination address
+ * up to and including the CRC data. However only the frame data,
+ * excluding the CRC, are transferred to memory. To allow for the
+ * largest frames add the CRC length to the maximum Rx descriptor size.
+ */
+ ravb_write(ndev, priv->info->rx_max_frame_size + ETH_FCS_LEN, RFLR);
/* EMAC Mode: PAUSE prohibition; Duplex; RX Checksum; TX; RX */
ravb_write(ndev, ECMR_ZPF | ECMR_DM |
@@ -2674,6 +2682,7 @@ static const struct ravb_hw_info ravb_gen2_hw_info = {
.net_features = NETIF_F_RXCSUM,
.stats_len = ARRAY_SIZE(ravb_gstrings_stats),
.tccr_mask = TCCR_TSRQ0 | TCCR_TSRQ1 | TCCR_TSRQ2 | TCCR_TSRQ3,
+ .tx_max_frame_size = SZ_2K,
.rx_max_frame_size = SZ_2K,
.rx_buffer_size = SZ_2K +
SKB_DATA_ALIGN(sizeof(struct skb_shared_info)),
@@ -2696,6 +2705,7 @@ static const struct ravb_hw_info ravb_gen3_hw_info = {
.net_features = NETIF_F_RXCSUM,
.stats_len = ARRAY_SIZE(ravb_gstrings_stats),
.tccr_mask = TCCR_TSRQ0 | TCCR_TSRQ1 | TCCR_TSRQ2 | TCCR_TSRQ3,
+ .tx_max_frame_size = SZ_2K,
.rx_max_frame_size = SZ_2K,
.rx_buffer_size = SZ_2K +
SKB_DATA_ALIGN(sizeof(struct skb_shared_info)),
@@ -2721,6 +2731,7 @@ static const struct ravb_hw_info ravb_gen4_hw_info = {
.net_features = NETIF_F_RXCSUM,
.stats_len = ARRAY_SIZE(ravb_gstrings_stats),
.tccr_mask = TCCR_TSRQ0 | TCCR_TSRQ1 | TCCR_TSRQ2 | TCCR_TSRQ3,
+ .tx_max_frame_size = SZ_2K,
.rx_max_frame_size = SZ_2K,
.rx_buffer_size = SZ_2K +
SKB_DATA_ALIGN(sizeof(struct skb_shared_info)),
@@ -2770,6 +2781,7 @@ static const struct ravb_hw_info gbeth_hw_info = {
.net_features = NETIF_F_RXCSUM | NETIF_F_HW_CSUM,
.stats_len = ARRAY_SIZE(ravb_gstrings_stats_gbeth),
.tccr_mask = TCCR_TSRQ0,
+ .tx_max_frame_size = 1522,
.rx_max_frame_size = SZ_8K,
.rx_buffer_size = SZ_2K,
.rx_desc_size = sizeof(struct ravb_rx_desc),
@@ -2981,7 +2993,7 @@ static int ravb_probe(struct platform_device *pdev)
priv->avb_link_active_low =
of_property_read_bool(np, "renesas,ether-link-active-low");
- ndev->max_mtu = info->rx_max_frame_size -
+ ndev->max_mtu = info->tx_max_frame_size -
(ETH_HLEN + VLAN_HLEN + ETH_FCS_LEN);
ndev->min_mtu = ETH_MIN_MTU;
diff --git a/drivers/net/ethernet/seeq/ether3.c b/drivers/net/ethernet/seeq/ether3.c
index c672f92d65e9..9319a2675e7b 100644
--- a/drivers/net/ethernet/seeq/ether3.c
+++ b/drivers/net/ethernet/seeq/ether3.c
@@ -847,9 +847,11 @@ static void ether3_remove(struct expansion_card *ec)
{
struct net_device *dev = ecard_get_drvdata(ec);
+ ether3_outw(priv(dev)->regs.config2 |= CFG2_CTRLO, REG_CONFIG2);
ecard_set_drvdata(ec, NULL);
unregister_netdev(dev);
+ del_timer_sync(&priv(dev)->timer);
free_netdev(dev);
ecard_release_resources(ec);
}
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
index d3895d7eecfc..e2140482270a 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
@@ -2035,7 +2035,7 @@ static int __alloc_dma_rx_desc_resources(struct stmmac_priv *priv,
rx_q->queue_index = queue;
rx_q->priv_data = priv;
- pp_params.flags = PP_FLAG_DMA_MAP | PP_FLAG_DMA_SYNC_DEV;
+ pp_params.flags = PP_FLAG_DMA_MAP | (xdp_prog ? PP_FLAG_DMA_SYNC_DEV : 0);
pp_params.pool_size = dma_conf->dma_rx_size;
num_pages = DIV_ROUND_UP(dma_conf->dma_buf_sz, PAGE_SIZE);
pp_params.order = ilog2(num_pages);
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_tc.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_tc.c
index 832998bc020b..75ad2da1a37f 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_tc.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_tc.c
@@ -386,6 +386,7 @@ static int tc_setup_cbs(struct stmmac_priv *priv,
return ret;
priv->plat->tx_queues_cfg[queue].mode_to_use = MTL_QUEUE_DCB;
+ return 0;
}
/* Final adjustments for HW */
diff --git a/drivers/net/ethernet/wangxun/Kconfig b/drivers/net/ethernet/wangxun/Kconfig
index 85cdbdd44fec..e46ccebcfd22 100644
--- a/drivers/net/ethernet/wangxun/Kconfig
+++ b/drivers/net/ethernet/wangxun/Kconfig
@@ -41,10 +41,9 @@ config TXGBE
tristate "Wangxun(R) 10GbE PCI Express adapters support"
depends on PCI
depends on COMMON_CLK
+ depends on I2C_DESIGNWARE_PLATFORM
select MARVELL_10G_PHY
select REGMAP
- select I2C
- select I2C_DESIGNWARE_PLATFORM
select PHYLINK
select HWMON if TXGBE=y
select SFP
diff --git a/drivers/net/ethernet/xilinx/xilinx_axienet_main.c b/drivers/net/ethernet/xilinx/xilinx_axienet_main.c
index ea7d7c03f48e..fc35fcb22d94 100644
--- a/drivers/net/ethernet/xilinx/xilinx_axienet_main.c
+++ b/drivers/net/ethernet/xilinx/xilinx_axienet_main.c
@@ -736,15 +736,15 @@ static int axienet_device_reset(struct net_device *ndev)
*
* Would either be called after a successful transmit operation, or after
* there was an error when setting up the chain.
- * Returns the number of descriptors handled.
+ * Returns the number of packets handled.
*/
static int axienet_free_tx_chain(struct axienet_local *lp, u32 first_bd,
int nr_bds, bool force, u32 *sizep, int budget)
{
struct axidma_bd *cur_p;
unsigned int status;
+ int i, packets = 0;
dma_addr_t phys;
- int i;
for (i = 0; i < nr_bds; i++) {
cur_p = &lp->tx_bd_v[(first_bd + i) % lp->tx_bd_num];
@@ -763,8 +763,10 @@ static int axienet_free_tx_chain(struct axienet_local *lp, u32 first_bd,
(cur_p->cntrl & XAXIDMA_BD_CTRL_LENGTH_MASK),
DMA_TO_DEVICE);
- if (cur_p->skb && (status & XAXIDMA_BD_STS_COMPLETE_MASK))
+ if (cur_p->skb && (status & XAXIDMA_BD_STS_COMPLETE_MASK)) {
napi_consume_skb(cur_p->skb, budget);
+ packets++;
+ }
cur_p->app0 = 0;
cur_p->app1 = 0;
@@ -780,7 +782,13 @@ static int axienet_free_tx_chain(struct axienet_local *lp, u32 first_bd,
*sizep += status & XAXIDMA_BD_STS_ACTUAL_LEN_MASK;
}
- return i;
+ if (!force) {
+ lp->tx_bd_ci += i;
+ if (lp->tx_bd_ci >= lp->tx_bd_num)
+ lp->tx_bd_ci %= lp->tx_bd_num;
+ }
+
+ return packets;
}
/**
@@ -953,13 +961,10 @@ static int axienet_tx_poll(struct napi_struct *napi, int budget)
u32 size = 0;
int packets;
- packets = axienet_free_tx_chain(lp, lp->tx_bd_ci, budget, false, &size, budget);
+ packets = axienet_free_tx_chain(lp, lp->tx_bd_ci, lp->tx_bd_num, false,
+ &size, budget);
if (packets) {
- lp->tx_bd_ci += packets;
- if (lp->tx_bd_ci >= lp->tx_bd_num)
- lp->tx_bd_ci %= lp->tx_bd_num;
-
u64_stats_update_begin(&lp->tx_stat_sync);
u64_stats_add(&lp->tx_packets, packets);
u64_stats_add(&lp->tx_bytes, size);
@@ -1282,9 +1287,10 @@ static irqreturn_t axienet_tx_irq(int irq, void *_ndev)
u32 cr = lp->tx_dma_cr;
cr &= ~(XAXIDMA_IRQ_IOC_MASK | XAXIDMA_IRQ_DELAY_MASK);
- axienet_dma_out32(lp, XAXIDMA_TX_CR_OFFSET, cr);
-
- napi_schedule(&lp->napi_tx);
+ if (napi_schedule_prep(&lp->napi_tx)) {
+ axienet_dma_out32(lp, XAXIDMA_TX_CR_OFFSET, cr);
+ __napi_schedule(&lp->napi_tx);
+ }
}
return IRQ_HANDLED;
@@ -1326,9 +1332,10 @@ static irqreturn_t axienet_rx_irq(int irq, void *_ndev)
u32 cr = lp->rx_dma_cr;
cr &= ~(XAXIDMA_IRQ_IOC_MASK | XAXIDMA_IRQ_DELAY_MASK);
- axienet_dma_out32(lp, XAXIDMA_RX_CR_OFFSET, cr);
-
- napi_schedule(&lp->napi_rx);
+ if (napi_schedule_prep(&lp->napi_rx)) {
+ axienet_dma_out32(lp, XAXIDMA_RX_CR_OFFSET, cr);
+ __napi_schedule(&lp->napi_rx);
+ }
}
return IRQ_HANDLED;
diff --git a/drivers/net/hamradio/6pack.c b/drivers/net/hamradio/6pack.c
index 6ed38a3cdd73..3bf6785f9057 100644
--- a/drivers/net/hamradio/6pack.c
+++ b/drivers/net/hamradio/6pack.c
@@ -37,8 +37,6 @@
#include <linux/semaphore.h>
#include <linux/refcount.h>
-#define SIXPACK_VERSION "Revision: 0.3.0"
-
/* sixpack priority commands */
#define SIXP_SEOF 0x40 /* start and end of a 6pack frame */
#define SIXP_TX_URUN 0x48 /* transmit overrun */
@@ -88,22 +86,18 @@ struct sixpack {
struct net_device *dev; /* easy for intr handling */
/* These are pointers to the malloc()ed frame buffers. */
- unsigned char *rbuff; /* receiver buffer */
int rcount; /* received chars counter */
unsigned char *xbuff; /* transmitter buffer */
unsigned char *xhead; /* next byte to XMIT */
int xleft; /* bytes left in XMIT queue */
- unsigned char raw_buf[4];
- unsigned char cooked_buf[400];
+ u8 raw_buf[4];
+ u8 cooked_buf[400];
unsigned int rx_count;
unsigned int rx_count_cooked;
spinlock_t rxlock;
- int mtu; /* Our mtu (to spot changes!) */
- int buffsize; /* Max buffers sizes */
-
unsigned long flags; /* Flag values/ mode etc */
unsigned char mode; /* 6pack mode */
@@ -113,8 +107,8 @@ struct sixpack {
unsigned char slottime;
unsigned char duplex;
unsigned char led_state;
- unsigned char status;
- unsigned char status1;
+ u8 status;
+ u8 status1;
unsigned char status2;
unsigned char tx_enable;
unsigned char tnc_state;
@@ -128,7 +122,7 @@ struct sixpack {
#define AX25_6PACK_HEADER_LEN 0
-static void sixpack_decode(struct sixpack *, const unsigned char[], int);
+static void sixpack_decode(struct sixpack *, const u8 *, size_t);
static int encode_sixpack(unsigned char *, unsigned char *, int, unsigned char);
/*
@@ -167,7 +161,7 @@ static void sp_encaps(struct sixpack *sp, unsigned char *icp, int len)
unsigned char *msg, *p = icp;
int actual, count;
- if (len > sp->mtu) { /* sp->mtu = AX25_MTU = max. PACLEN = 256 */
+ if (len > AX25_MTU + 73) {
msg = "oversized transmit packet!";
goto out_drop;
}
@@ -333,7 +327,7 @@ static void sp_bump(struct sixpack *sp, char cmd)
{
struct sk_buff *skb;
int count;
- unsigned char *ptr;
+ u8 *ptr;
count = sp->rcount + 1;
@@ -431,7 +425,7 @@ static void sixpack_receive_buf(struct tty_struct *tty, const u8 *cp,
const u8 *fp, size_t count)
{
struct sixpack *sp;
- int count1;
+ size_t count1;
if (!count)
return;
@@ -544,7 +538,7 @@ static inline int tnc_init(struct sixpack *sp)
*/
static int sixpack_open(struct tty_struct *tty)
{
- char *rbuff = NULL, *xbuff = NULL;
+ char *xbuff = NULL;
struct net_device *dev;
struct sixpack *sp;
unsigned long len;
@@ -574,10 +568,8 @@ static int sixpack_open(struct tty_struct *tty)
len = dev->mtu * 2;
- rbuff = kmalloc(len + 4, GFP_KERNEL);
xbuff = kmalloc(len + 4, GFP_KERNEL);
-
- if (rbuff == NULL || xbuff == NULL) {
+ if (xbuff == NULL) {
err = -ENOBUFS;
goto out_free;
}
@@ -586,11 +578,8 @@ static int sixpack_open(struct tty_struct *tty)
sp->tty = tty;
- sp->rbuff = rbuff;
sp->xbuff = xbuff;
- sp->mtu = AX25_MTU + 73;
- sp->buffsize = len;
sp->rcount = 0;
sp->rx_count = 0;
sp->rx_count_cooked = 0;
@@ -631,7 +620,6 @@ static int sixpack_open(struct tty_struct *tty)
out_free:
kfree(xbuff);
- kfree(rbuff);
free_netdev(dev);
@@ -676,7 +664,6 @@ static void sixpack_close(struct tty_struct *tty)
del_timer_sync(&sp->resync_t);
/* Free all 6pack frame buffers after unreg. */
- kfree(sp->rbuff);
kfree(sp->xbuff);
free_netdev(sp->dev);
@@ -756,21 +743,14 @@ static struct tty_ldisc_ops sp_ldisc = {
/* Initialize 6pack control device -- register 6pack line discipline */
-static const char msg_banner[] __initconst = KERN_INFO \
- "AX.25: 6pack driver, " SIXPACK_VERSION "\n";
-static const char msg_regfail[] __initconst = KERN_ERR \
- "6pack: can't register line discipline (err = %d)\n";
-
static int __init sixpack_init_driver(void)
{
int status;
- printk(msg_banner);
-
/* Register the provided line protocol discipline */
status = tty_register_ldisc(&sp_ldisc);
if (status)
- printk(msg_regfail, status);
+ pr_err("6pack: can't register line discipline (err = %d)\n", status);
return status;
}
@@ -820,9 +800,9 @@ static int encode_sixpack(unsigned char *tx_buf, unsigned char *tx_buf_raw,
/* decode 4 sixpack-encoded bytes into 3 data bytes */
-static void decode_data(struct sixpack *sp, unsigned char inbyte)
+static void decode_data(struct sixpack *sp, u8 inbyte)
{
- unsigned char *buf;
+ u8 *buf;
if (sp->rx_count != 3) {
sp->raw_buf[sp->rx_count++] = inbyte;
@@ -848,9 +828,9 @@ static void decode_data(struct sixpack *sp, unsigned char inbyte)
/* identify and execute a 6pack priority command byte */
-static void decode_prio_command(struct sixpack *sp, unsigned char cmd)
+static void decode_prio_command(struct sixpack *sp, u8 cmd)
{
- int actual;
+ ssize_t actual;
if ((cmd & SIXP_PRIO_DATA_MASK) != 0) { /* idle ? */
@@ -898,9 +878,9 @@ static void decode_prio_command(struct sixpack *sp, unsigned char cmd)
/* identify and execute a standard 6pack command byte */
-static void decode_std_command(struct sixpack *sp, unsigned char cmd)
+static void decode_std_command(struct sixpack *sp, u8 cmd)
{
- unsigned char checksum = 0, rest = 0;
+ u8 checksum = 0, rest = 0;
short i;
switch (cmd & SIXP_CMD_MASK) { /* normal command */
@@ -948,10 +928,10 @@ static void decode_std_command(struct sixpack *sp, unsigned char cmd)
/* decode a 6pack packet */
static void
-sixpack_decode(struct sixpack *sp, const unsigned char *pre_rbuff, int count)
+sixpack_decode(struct sixpack *sp, const u8 *pre_rbuff, size_t count)
{
- unsigned char inbyte;
- int count1;
+ size_t count1;
+ u8 inbyte;
for (count1 = 0; count1 < count; count1++) {
inbyte = pre_rbuff[count1];
diff --git a/drivers/net/mctp/mctp-serial.c b/drivers/net/mctp/mctp-serial.c
index f39bbe255497..e63720ec3238 100644
--- a/drivers/net/mctp/mctp-serial.c
+++ b/drivers/net/mctp/mctp-serial.c
@@ -64,18 +64,18 @@ struct mctp_serial {
u16 txfcs, rxfcs, rxfcs_rcvd;
unsigned int txlen, rxlen;
unsigned int txpos, rxpos;
- unsigned char txbuf[BUFSIZE],
+ u8 txbuf[BUFSIZE],
rxbuf[BUFSIZE];
};
-static bool needs_escape(unsigned char c)
+static bool needs_escape(u8 c)
{
return c == BYTE_ESC || c == BYTE_FRAME;
}
-static int next_chunk_len(struct mctp_serial *dev)
+static unsigned int next_chunk_len(struct mctp_serial *dev)
{
- int i;
+ unsigned int i;
/* either we have no bytes to send ... */
if (dev->txpos == dev->txlen)
@@ -99,7 +99,7 @@ static int next_chunk_len(struct mctp_serial *dev)
return i;
}
-static int write_chunk(struct mctp_serial *dev, unsigned char *buf, int len)
+static ssize_t write_chunk(struct mctp_serial *dev, u8 *buf, size_t len)
{
return dev->tty->ops->write(dev->tty, buf, len);
}
@@ -108,9 +108,10 @@ static void mctp_serial_tx_work(struct work_struct *work)
{
struct mctp_serial *dev = container_of(work, struct mctp_serial,
tx_work);
- unsigned char c, buf[3];
unsigned long flags;
- int len, txlen;
+ ssize_t txlen;
+ unsigned int len;
+ u8 c, buf[3];
spin_lock_irqsave(&dev->lock, flags);
@@ -293,7 +294,7 @@ static void mctp_serial_rx(struct mctp_serial *dev)
dev->netdev->stats.rx_bytes += dev->rxlen;
}
-static void mctp_serial_push_header(struct mctp_serial *dev, unsigned char c)
+static void mctp_serial_push_header(struct mctp_serial *dev, u8 c)
{
switch (dev->rxpos) {
case 0:
@@ -323,7 +324,7 @@ static void mctp_serial_push_header(struct mctp_serial *dev, unsigned char c)
}
}
-static void mctp_serial_push_trailer(struct mctp_serial *dev, unsigned char c)
+static void mctp_serial_push_trailer(struct mctp_serial *dev, u8 c)
{
switch (dev->rxpos) {
case 0:
@@ -347,7 +348,7 @@ static void mctp_serial_push_trailer(struct mctp_serial *dev, unsigned char c)
}
}
-static void mctp_serial_push(struct mctp_serial *dev, unsigned char c)
+static void mctp_serial_push(struct mctp_serial *dev, u8 c)
{
switch (dev->rxstate) {
case STATE_IDLE:
@@ -394,7 +395,7 @@ static void mctp_serial_tty_receive_buf(struct tty_struct *tty, const u8 *c,
const u8 *f, size_t len)
{
struct mctp_serial *dev = tty->disc_data;
- int i;
+ size_t i;
if (!netif_running(dev->netdev))
return;
diff --git a/drivers/net/netdevsim/fib.c b/drivers/net/netdevsim/fib.c
index a1f91ff8ec56..41e80f78b316 100644
--- a/drivers/net/netdevsim/fib.c
+++ b/drivers/net/netdevsim/fib.c
@@ -1414,7 +1414,6 @@ out:
static const struct file_operations nsim_nexthop_bucket_activity_fops = {
.open = simple_open,
.write = nsim_nexthop_bucket_activity_write,
- .llseek = no_llseek,
.owner = THIS_MODULE,
};
diff --git a/drivers/net/phy/aquantia/aquantia_firmware.c b/drivers/net/phy/aquantia/aquantia_firmware.c
index 524627a36c6f..dac6464b5fe2 100644
--- a/drivers/net/phy/aquantia/aquantia_firmware.c
+++ b/drivers/net/phy/aquantia/aquantia_firmware.c
@@ -353,26 +353,32 @@ int aqr_firmware_load(struct phy_device *phydev)
{
int ret;
- ret = aqr_wait_reset_complete(phydev);
- if (ret)
- return ret;
-
- /* Check if the firmware is not already loaded by pooling
- * the current version returned by the PHY. If 0 is returned,
- * no firmware is loaded.
+ /* Check if the firmware is not already loaded by polling
+ * the current version returned by the PHY.
*/
- ret = phy_read_mmd(phydev, MDIO_MMD_VEND1, VEND1_GLOBAL_FW_ID);
- if (ret > 0)
- goto exit;
-
- ret = aqr_firmware_load_nvmem(phydev);
- if (!ret)
- goto exit;
-
- ret = aqr_firmware_load_fs(phydev);
- if (ret)
+ ret = aqr_wait_reset_complete(phydev);
+ switch (ret) {
+ case 0:
+ /* Some firmware is loaded => do nothing */
+ return 0;
+ case -ETIMEDOUT:
+ /* VEND1_GLOBAL_FW_ID still reads 0 after 2 seconds of polling.
+ * We don't have full confidence that no firmware is loaded (in
+ * theory it might just not have loaded yet), but we will
+ * assume that, and load a new image.
+ */
+ ret = aqr_firmware_load_nvmem(phydev);
+ if (!ret)
+ return ret;
+
+ ret = aqr_firmware_load_fs(phydev);
+ if (ret)
+ return ret;
+ break;
+ default:
+ /* PHY read error, propagate it to the caller */
return ret;
+ }
-exit:
return 0;
}
diff --git a/drivers/net/phy/aquantia/aquantia_leds.c b/drivers/net/phy/aquantia/aquantia_leds.c
index 0516ac02c3f8..201c8df93fad 100644
--- a/drivers/net/phy/aquantia/aquantia_leds.c
+++ b/drivers/net/phy/aquantia/aquantia_leds.c
@@ -120,7 +120,8 @@ int aqr_phy_led_hw_control_set(struct phy_device *phydev, u8 index,
int aqr_phy_led_active_low_set(struct phy_device *phydev, int index, bool enable)
{
return phy_modify_mmd(phydev, MDIO_MMD_VEND1, AQR_LED_DRIVE(index),
- VEND1_GLOBAL_LED_DRIVE_VDD, enable);
+ VEND1_GLOBAL_LED_DRIVE_VDD,
+ enable ? VEND1_GLOBAL_LED_DRIVE_VDD : 0);
}
int aqr_phy_led_polarity_set(struct phy_device *phydev, int index, unsigned long modes)
diff --git a/drivers/net/phy/aquantia/aquantia_main.c b/drivers/net/phy/aquantia/aquantia_main.c
index e982e9ce44a5..4d156d406bab 100644
--- a/drivers/net/phy/aquantia/aquantia_main.c
+++ b/drivers/net/phy/aquantia/aquantia_main.c
@@ -435,6 +435,9 @@ static int aqr107_set_tunable(struct phy_device *phydev,
}
}
+#define AQR_FW_WAIT_SLEEP_US 20000
+#define AQR_FW_WAIT_TIMEOUT_US 2000000
+
/* If we configure settings whilst firmware is still initializing the chip,
* then these settings may be overwritten. Therefore make sure chip
* initialization has completed. Use presence of the firmware ID as
@@ -444,11 +447,19 @@ static int aqr107_set_tunable(struct phy_device *phydev,
*/
int aqr_wait_reset_complete(struct phy_device *phydev)
{
- int val;
+ int ret, val;
+
+ ret = read_poll_timeout(phy_read_mmd, val, val != 0,
+ AQR_FW_WAIT_SLEEP_US, AQR_FW_WAIT_TIMEOUT_US,
+ false, phydev, MDIO_MMD_VEND1,
+ VEND1_GLOBAL_FW_ID);
+ if (val < 0) {
+ phydev_err(phydev, "Failed to read VEND1_GLOBAL_FW_ID: %pe\n",
+ ERR_PTR(val));
+ return val;
+ }
- return phy_read_mmd_poll_timeout(phydev, MDIO_MMD_VEND1,
- VEND1_GLOBAL_FW_ID, val, val != 0,
- 20000, 2000000, false);
+ return ret;
}
static void aqr107_chip_info(struct phy_device *phydev)
@@ -478,7 +489,7 @@ static int aqr107_config_init(struct phy_device *phydev)
{
struct aqr107_priv *priv = phydev->priv;
u32 led_active_low;
- int ret, index = 0;
+ int ret;
/* Check that the PHY interface type is compatible */
if (phydev->interface != PHY_INTERFACE_MODE_SGMII &&
@@ -505,10 +516,9 @@ static int aqr107_config_init(struct phy_device *phydev)
/* Restore LED polarity state after reset */
for_each_set_bit(led_active_low, &priv->leds_active_low, AQR_MAX_LEDS) {
- ret = aqr_phy_led_active_low_set(phydev, index, led_active_low);
+ ret = aqr_phy_led_active_low_set(phydev, led_active_low, true);
if (ret)
return ret;
- index++;
}
return 0;
diff --git a/drivers/net/tap.c b/drivers/net/tap.c
index 77574f7a3bd4..5aa41d5f7765 100644
--- a/drivers/net/tap.c
+++ b/drivers/net/tap.c
@@ -1162,7 +1162,6 @@ static const struct file_operations tap_fops = {
.read_iter = tap_read_iter,
.write_iter = tap_write_iter,
.poll = tap_poll,
- .llseek = no_llseek,
.unlocked_ioctl = tap_ioctl,
.compat_ioctl = compat_ptr_ioctl,
};
diff --git a/drivers/net/tun.c b/drivers/net/tun.c
index 5f77faef0ff1..9a0f6eb32016 100644
--- a/drivers/net/tun.c
+++ b/drivers/net/tun.c
@@ -3543,7 +3543,6 @@ static void tun_chr_show_fdinfo(struct seq_file *m, struct file *file)
static const struct file_operations tun_fops = {
.owner = THIS_MODULE,
- .llseek = no_llseek,
.read_iter = tun_chr_read_iter,
.write_iter = tun_chr_write_iter,
.poll = tun_chr_poll,
diff --git a/drivers/net/usb/usbnet.c b/drivers/net/usb/usbnet.c
index 18eb5ba436df..2506aa8c603e 100644
--- a/drivers/net/usb/usbnet.c
+++ b/drivers/net/usb/usbnet.c
@@ -464,10 +464,15 @@ static enum skb_state defer_bh(struct usbnet *dev, struct sk_buff *skb,
void usbnet_defer_kevent (struct usbnet *dev, int work)
{
set_bit (work, &dev->flags);
- if (!schedule_work (&dev->kevent))
- netdev_dbg(dev->net, "kevent %s may have been dropped\n", usbnet_event_names[work]);
- else
- netdev_dbg(dev->net, "kevent %s scheduled\n", usbnet_event_names[work]);
+ if (!usbnet_going_away(dev)) {
+ if (!schedule_work(&dev->kevent))
+ netdev_dbg(dev->net,
+ "kevent %s may have been dropped\n",
+ usbnet_event_names[work]);
+ else
+ netdev_dbg(dev->net,
+ "kevent %s scheduled\n", usbnet_event_names[work]);
+ }
}
EXPORT_SYMBOL_GPL(usbnet_defer_kevent);
@@ -535,7 +540,8 @@ static int rx_submit (struct usbnet *dev, struct urb *urb, gfp_t flags)
tasklet_schedule (&dev->bh);
break;
case 0:
- __usbnet_queue_skb(&dev->rxq, skb, rx_start);
+ if (!usbnet_going_away(dev))
+ __usbnet_queue_skb(&dev->rxq, skb, rx_start);
}
} else {
netif_dbg(dev, ifdown, dev->net, "rx: stopped\n");
@@ -843,9 +849,18 @@ int usbnet_stop (struct net_device *net)
/* deferred work (timer, softirq, task) must also stop */
dev->flags = 0;
- del_timer_sync (&dev->delay);
- tasklet_kill (&dev->bh);
+ del_timer_sync(&dev->delay);
+ tasklet_kill(&dev->bh);
cancel_work_sync(&dev->kevent);
+
+ /* We have cyclic dependencies. Those calls are needed
+ * to break a cycle. We cannot fall into the gaps because
+ * we have a flag
+ */
+ tasklet_kill(&dev->bh);
+ del_timer_sync(&dev->delay);
+ cancel_work_sync(&dev->kevent);
+
if (!pm)
usb_autopm_put_interface(dev->intf);
@@ -1171,7 +1186,8 @@ fail_halt:
status);
} else {
clear_bit (EVENT_RX_HALT, &dev->flags);
- tasklet_schedule (&dev->bh);
+ if (!usbnet_going_away(dev))
+ tasklet_schedule(&dev->bh);
}
}
@@ -1196,7 +1212,8 @@ fail_halt:
usb_autopm_put_interface(dev->intf);
fail_lowmem:
if (resched)
- tasklet_schedule (&dev->bh);
+ if (!usbnet_going_away(dev))
+ tasklet_schedule(&dev->bh);
}
}
@@ -1559,6 +1576,7 @@ static void usbnet_bh (struct timer_list *t)
} else if (netif_running (dev->net) &&
netif_device_present (dev->net) &&
netif_carrier_ok(dev->net) &&
+ !usbnet_going_away(dev) &&
!timer_pending(&dev->delay) &&
!test_bit(EVENT_RX_PAUSED, &dev->flags) &&
!test_bit(EVENT_RX_HALT, &dev->flags)) {
@@ -1606,6 +1624,7 @@ void usbnet_disconnect (struct usb_interface *intf)
usb_set_intfdata(intf, NULL);
if (!dev)
return;
+ usbnet_mark_going_away(dev);
xdev = interface_to_usbdev (intf);
diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c
index 6f4781ec2b36..f8131f92a392 100644
--- a/drivers/net/virtio_net.c
+++ b/drivers/net/virtio_net.c
@@ -1807,6 +1807,11 @@ static struct sk_buff *receive_small(struct net_device *dev,
struct page *page = virt_to_head_page(buf);
struct sk_buff *skb;
+ /* We passed the address of virtnet header to virtio-core,
+ * so truncate the padding.
+ */
+ buf -= VIRTNET_RX_PAD + xdp_headroom;
+
len -= vi->hdr_len;
u64_stats_add(&stats->bytes, len);
@@ -2422,8 +2427,9 @@ static int add_recvbuf_small(struct virtnet_info *vi, struct receive_queue *rq,
if (unlikely(!buf))
return -ENOMEM;
- virtnet_rq_init_one_sg(rq, buf + VIRTNET_RX_PAD + xdp_headroom,
- vi->hdr_len + GOOD_PACKET_LEN);
+ buf += VIRTNET_RX_PAD + xdp_headroom;
+
+ virtnet_rq_init_one_sg(rq, buf, vi->hdr_len + GOOD_PACKET_LEN);
err = virtqueue_add_inbuf_ctx(rq->vq, rq->sg, 1, buf, ctx, gfp);
if (err < 0) {
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/core.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/core.c
index df53dd1d7e74..da72fd2d541f 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/core.c
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/core.c
@@ -1184,7 +1184,6 @@ static ssize_t bus_reset_write(struct file *file, const char __user *user_buf,
static const struct file_operations bus_reset_fops = {
.open = simple_open,
- .llseek = no_llseek,
.write = bus_reset_write,
};
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/d3.c b/drivers/net/wireless/intel/iwlwifi/mvm/d3.c
index 99a541d442bb..49a6aff42376 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/d3.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/d3.c
@@ -3768,7 +3768,6 @@ static int iwl_mvm_d3_test_release(struct inode *inode, struct file *file)
}
const struct file_operations iwl_dbgfs_d3_test_ops = {
- .llseek = no_llseek,
.open = iwl_mvm_d3_test_open,
.read = iwl_mvm_d3_test_read,
.release = iwl_mvm_d3_test_release,
diff --git a/drivers/ntb/core.c b/drivers/ntb/core.c
index d702bee78082..ed6f4adc6130 100644
--- a/drivers/ntb/core.c
+++ b/drivers/ntb/core.c
@@ -72,7 +72,7 @@ MODULE_VERSION(DRIVER_VERSION);
MODULE_AUTHOR(DRIVER_AUTHOR);
MODULE_DESCRIPTION(DRIVER_DESCRIPTION);
-static struct bus_type ntb_bus;
+static const struct bus_type ntb_bus;
static void ntb_dev_release(struct device *dev);
int __ntb_register_client(struct ntb_client *client, struct module *mod,
@@ -298,7 +298,7 @@ static void ntb_dev_release(struct device *dev)
complete(&ntb->released);
}
-static struct bus_type ntb_bus = {
+static const struct bus_type ntb_bus = {
.name = "ntb",
.probe = ntb_probe,
.remove = ntb_remove,
diff --git a/drivers/ntb/hw/epf/ntb_hw_epf.c b/drivers/ntb/hw/epf/ntb_hw_epf.c
index b640aa0bf45e..00f0e78f685b 100644
--- a/drivers/ntb/hw/epf/ntb_hw_epf.c
+++ b/drivers/ntb/hw/epf/ntb_hw_epf.c
@@ -1,5 +1,5 @@
// SPDX-License-Identifier: GPL-2.0
-/**
+/*
* Host side endpoint driver to implement Non-Transparent Bridge functionality
*
* Copyright (C) 2020 Texas Instruments
diff --git a/drivers/ntb/hw/idt/ntb_hw_idt.c b/drivers/ntb/hw/idt/ntb_hw_idt.c
index 48dfb1a69a77..6fc9dfe82474 100644
--- a/drivers/ntb/hw/idt/ntb_hw_idt.c
+++ b/drivers/ntb/hw/idt/ntb_hw_idt.c
@@ -2547,7 +2547,7 @@ static void idt_deinit_dbgfs(struct idt_ntb_dev *ndev)
*/
/*
- * idt_check_setup() - Check whether the IDT PCIe-swtich is properly
+ * idt_check_setup() - Check whether the IDT PCIe-switch is properly
* pre-initialized
* @pdev: Pointer to the PCI device descriptor
*
diff --git a/drivers/ntb/hw/intel/ntb_hw_gen1.c b/drivers/ntb/hw/intel/ntb_hw_gen1.c
index 9ab836d0d4f1..079b8cd79785 100644
--- a/drivers/ntb/hw/intel/ntb_hw_gen1.c
+++ b/drivers/ntb/hw/intel/ntb_hw_gen1.c
@@ -778,7 +778,7 @@ static void ndev_init_debugfs(struct intel_ntb_dev *ndev)
ndev->debugfs_dir =
debugfs_create_dir(pci_name(ndev->ntb.pdev),
debugfs_dir);
- if (!ndev->debugfs_dir)
+ if (IS_ERR(ndev->debugfs_dir))
ndev->debugfs_info = NULL;
else
ndev->debugfs_info =
diff --git a/drivers/ntb/hw/mscc/ntb_hw_switchtec.c b/drivers/ntb/hw/mscc/ntb_hw_switchtec.c
index 31946387badf..ad1786be2554 100644
--- a/drivers/ntb/hw/mscc/ntb_hw_switchtec.c
+++ b/drivers/ntb/hw/mscc/ntb_hw_switchtec.c
@@ -1554,6 +1554,7 @@ static void switchtec_ntb_remove(struct device *dev)
switchtec_ntb_deinit_db_msg_irq(sndev);
switchtec_ntb_deinit_shared_mw(sndev);
switchtec_ntb_deinit_crosslink(sndev);
+ cancel_work_sync(&sndev->check_link_status_work);
kfree(sndev);
dev_info(dev, "ntb device unregistered\n");
}
diff --git a/drivers/ntb/ntb_transport.c b/drivers/ntb/ntb_transport.c
index 77e55debeed6..a22ea4a4b202 100644
--- a/drivers/ntb/ntb_transport.c
+++ b/drivers/ntb/ntb_transport.c
@@ -314,7 +314,7 @@ static void ntb_transport_bus_remove(struct device *dev)
put_device(dev);
}
-static struct bus_type ntb_transport_bus = {
+static const struct bus_type ntb_transport_bus = {
.name = "ntb_transport",
.match = ntb_transport_bus_match,
.probe = ntb_transport_bus_probe,
@@ -377,6 +377,8 @@ EXPORT_SYMBOL_GPL(ntb_transport_unregister_client_dev);
* @device_name: Name of NTB client device
*
* Register an NTB client device with the NTB transport layer
+ *
+ * Returns: %0 on success or -errno code on error
*/
int ntb_transport_register_client_dev(char *device_name)
{
@@ -807,16 +809,29 @@ static void ntb_free_mw(struct ntb_transport_ctx *nt, int num_mw)
}
static int ntb_alloc_mw_buffer(struct ntb_transport_mw *mw,
- struct device *dma_dev, size_t align)
+ struct device *ntb_dev, size_t align)
{
dma_addr_t dma_addr;
void *alloc_addr, *virt_addr;
int rc;
- alloc_addr = dma_alloc_coherent(dma_dev, mw->alloc_size,
- &dma_addr, GFP_KERNEL);
+ /*
+ * The buffer here is allocated against the NTB device. The reason to
+ * use dma_alloc_*() call is to allocate a large IOVA contiguous buffer
+ * backing the NTB BAR for the remote host to write to. During receive
+ * processing, the data is being copied out of the receive buffer to
+ * the kernel skbuff. When a DMA device is being used, dma_map_page()
+ * is called on the kvaddr of the receive buffer (from dma_alloc_*())
+ * and remapped against the DMA device. It appears to be a double
+ * DMA mapping of buffers, but first is mapped to the NTB device and
+ * second is to the DMA device. DMA_ATTR_FORCE_CONTIGUOUS is necessary
+ * in order for the later dma_map_page() to not fail.
+ */
+ alloc_addr = dma_alloc_attrs(ntb_dev, mw->alloc_size,
+ &dma_addr, GFP_KERNEL,
+ DMA_ATTR_FORCE_CONTIGUOUS);
if (!alloc_addr) {
- dev_err(dma_dev, "Unable to alloc MW buff of size %zu\n",
+ dev_err(ntb_dev, "Unable to alloc MW buff of size %zu\n",
mw->alloc_size);
return -ENOMEM;
}
@@ -845,7 +860,7 @@ static int ntb_alloc_mw_buffer(struct ntb_transport_mw *mw,
return 0;
err:
- dma_free_coherent(dma_dev, mw->alloc_size, alloc_addr, dma_addr);
+ dma_free_coherent(ntb_dev, mw->alloc_size, alloc_addr, dma_addr);
return rc;
}
@@ -1966,9 +1981,9 @@ static bool ntb_dma_filter_fn(struct dma_chan *chan, void *node)
/**
* ntb_transport_create_queue - Create a new NTB transport layer queue
- * @rx_handler: receive callback function
- * @tx_handler: transmit callback function
- * @event_handler: event callback function
+ * @data: pointer for callback data
+ * @client_dev: &struct device pointer
+ * @handlers: pointer to various ntb queue (callback) handlers
*
* Create a new NTB transport layer queue and provide the queue with a callback
* routine for both transmit and receive. The receive callback routine will be
diff --git a/drivers/ntb/test/ntb_perf.c b/drivers/ntb/test/ntb_perf.c
index 553f1f46bc66..72bc1d017a46 100644
--- a/drivers/ntb/test/ntb_perf.c
+++ b/drivers/ntb/test/ntb_perf.c
@@ -1227,7 +1227,7 @@ static ssize_t perf_dbgfs_read_info(struct file *filep, char __user *ubuf,
"\tOut buffer addr 0x%pK\n", peer->outbuf);
pos += scnprintf(buf + pos, buf_size - pos,
- "\tOut buff phys addr %pa[p]\n", &peer->out_phys_addr);
+ "\tOut buff phys addr %pap\n", &peer->out_phys_addr);
pos += scnprintf(buf + pos, buf_size - pos,
"\tOut buffer size %pa\n", &peer->outbuf_size);
diff --git a/drivers/nvdimm/namespace_devs.c b/drivers/nvdimm/namespace_devs.c
index d6d558f94d6b..55cfbf1e0a95 100644
--- a/drivers/nvdimm/namespace_devs.c
+++ b/drivers/nvdimm/namespace_devs.c
@@ -1612,9 +1612,6 @@ static int select_pmem_id(struct nd_region *nd_region, const uuid_t *pmem_id)
{
int i;
- if (!pmem_id)
- return -ENODEV;
-
for (i = 0; i < nd_region->ndr_mappings; i++) {
struct nd_mapping *nd_mapping = &nd_region->mapping[i];
struct nvdimm_drvdata *ndd = to_ndd(nd_mapping);
@@ -1790,9 +1787,6 @@ static struct device *create_namespace_pmem(struct nd_region *nd_region,
case -EINVAL:
dev_dbg(&nd_region->dev, "invalid label(s)\n");
break;
- case -ENODEV:
- dev_dbg(&nd_region->dev, "label not found\n");
- break;
default:
dev_dbg(&nd_region->dev, "unexpected err: %d\n", rc);
break;
@@ -1937,12 +1931,16 @@ static int cmp_dpa(const void *a, const void *b)
static struct device **scan_labels(struct nd_region *nd_region)
{
int i, count = 0;
- struct device *dev, **devs = NULL;
+ struct device *dev, **devs;
struct nd_label_ent *label_ent, *e;
struct nd_mapping *nd_mapping = &nd_region->mapping[0];
struct nvdimm_drvdata *ndd = to_ndd(nd_mapping);
resource_size_t map_end = nd_mapping->start + nd_mapping->size - 1;
+ devs = kcalloc(2, sizeof(dev), GFP_KERNEL);
+ if (!devs)
+ return NULL;
+
/* "safe" because create_namespace_pmem() might list_move() label_ent */
list_for_each_entry_safe(label_ent, e, &nd_mapping->labels, list) {
struct nd_namespace_label *nd_label = label_ent->label;
@@ -1961,12 +1959,14 @@ static struct device **scan_labels(struct nd_region *nd_region)
goto err;
if (i < count)
continue;
- __devs = kcalloc(count + 2, sizeof(dev), GFP_KERNEL);
- if (!__devs)
- goto err;
- memcpy(__devs, devs, sizeof(dev) * count);
- kfree(devs);
- devs = __devs;
+ if (count) {
+ __devs = kcalloc(count + 2, sizeof(dev), GFP_KERNEL);
+ if (!__devs)
+ goto err;
+ memcpy(__devs, devs, sizeof(dev) * count);
+ kfree(devs);
+ devs = __devs;
+ }
dev = create_namespace_pmem(nd_region, nd_mapping, nd_label);
if (IS_ERR(dev)) {
@@ -1974,9 +1974,6 @@ static struct device **scan_labels(struct nd_region *nd_region)
case -EAGAIN:
/* skip invalid labels */
continue;
- case -ENODEV:
- /* fallthrough to seed creation */
- break;
default:
goto err;
}
@@ -1993,11 +1990,6 @@ static struct device **scan_labels(struct nd_region *nd_region)
/* Publish a zero-sized namespace for userspace to configure. */
nd_mapping_free_labels(nd_mapping);
-
- devs = kcalloc(2, sizeof(dev), GFP_KERNEL);
- if (!devs)
- goto err;
-
nspm = kzalloc(sizeof(*nspm), GFP_KERNEL);
if (!nspm)
goto err;
@@ -2036,11 +2028,10 @@ static struct device **scan_labels(struct nd_region *nd_region)
return devs;
err:
- if (devs) {
- for (i = 0; devs[i]; i++)
- namespace_pmem_release(devs[i]);
- kfree(devs);
- }
+ for (i = 0; devs[i]; i++)
+ namespace_pmem_release(devs[i]);
+ kfree(devs);
+
return NULL;
}
diff --git a/drivers/nvdimm/nd_virtio.c b/drivers/nvdimm/nd_virtio.c
index 35c8fbbba10e..f55d60922b87 100644
--- a/drivers/nvdimm/nd_virtio.c
+++ b/drivers/nvdimm/nd_virtio.c
@@ -44,6 +44,15 @@ static int virtio_pmem_flush(struct nd_region *nd_region)
unsigned long flags;
int err, err1;
+ /*
+ * Don't bother to submit the request to the device if the device is
+ * not activated.
+ */
+ if (vdev->config->get_status(vdev) & VIRTIO_CONFIG_S_NEEDS_RESET) {
+ dev_info(&vdev->dev, "virtio pmem device needs a reset\n");
+ return -EIO;
+ }
+
might_sleep();
req_data = kmalloc(sizeof(*req_data), GFP_KERNEL);
if (!req_data)
diff --git a/drivers/nvdimm/of_pmem.c b/drivers/nvdimm/of_pmem.c
index 403384f25ce3..b4a1cf70e8b7 100644
--- a/drivers/nvdimm/of_pmem.c
+++ b/drivers/nvdimm/of_pmem.c
@@ -47,7 +47,7 @@ static int of_pmem_region_probe(struct platform_device *pdev)
}
platform_set_drvdata(pdev, priv);
- is_volatile = !!of_find_property(np, "volatile", NULL);
+ is_volatile = of_property_read_bool(np, "volatile");
dev_dbg(&pdev->dev, "Registering %s regions from %pOF\n",
is_volatile ? "volatile" : "non-volatile", np);
diff --git a/drivers/nvme/host/core.c b/drivers/nvme/host/core.c
index ca9959a8fb9e..ba6508455e18 100644
--- a/drivers/nvme/host/core.c
+++ b/drivers/nvme/host/core.c
@@ -2468,11 +2468,6 @@ int nvme_enable_ctrl(struct nvme_ctrl *ctrl)
if (ret)
return ret;
- /* Flush write to device (required if transport is PCI) */
- ret = ctrl->ops->reg_read32(ctrl, NVME_REG_CC, &ctrl->ctrl_config);
- if (ret)
- return ret;
-
/* CAP value may change after initial CC write */
ret = ctrl->ops->reg_read64(ctrl, NVME_REG_CAP, &ctrl->cap);
if (ret)
diff --git a/drivers/nvme/host/fault_inject.c b/drivers/nvme/host/fault_inject.c
index 1d1b6441a339..105d6cb41c72 100644
--- a/drivers/nvme/host/fault_inject.c
+++ b/drivers/nvme/host/fault_inject.c
@@ -6,6 +6,7 @@
*/
#include <linux/moduleparam.h>
+#include <linux/debugfs.h>
#include "nvme.h"
static DECLARE_FAULT_ATTR(fail_default_attr);
diff --git a/drivers/nvme/host/ioctl.c b/drivers/nvme/host/ioctl.c
index 1d769c842fbf..b9b79ccfabf8 100644
--- a/drivers/nvme/host/ioctl.c
+++ b/drivers/nvme/host/ioctl.c
@@ -3,7 +3,6 @@
* Copyright (c) 2011-2014, Intel Corporation.
* Copyright (c) 2017-2021 Christoph Hellwig.
*/
-#include <linux/bio-integrity.h>
#include <linux/blk-integrity.h>
#include <linux/ptrace.h> /* for force_successful_syscall_return */
#include <linux/nvme_ioctl.h>
@@ -153,11 +152,10 @@ static int nvme_map_user_request(struct request *req, u64 ubuffer,
bio_set_dev(bio, bdev);
if (has_metadata) {
- ret = bio_integrity_map_user(bio, meta_buffer, meta_len,
- meta_seed);
+ ret = blk_rq_integrity_map_user(req, meta_buffer, meta_len,
+ meta_seed);
if (ret)
goto out_unmap;
- req->cmd_flags |= REQ_INTEGRITY;
}
return ret;
diff --git a/drivers/nvme/host/multipath.c b/drivers/nvme/host/multipath.c
index 518e22dd4f9b..48e7a8906d01 100644
--- a/drivers/nvme/host/multipath.c
+++ b/drivers/nvme/host/multipath.c
@@ -421,6 +421,9 @@ static bool nvme_available_path(struct nvme_ns_head *head)
{
struct nvme_ns *ns;
+ if (!test_bit(NVME_NSHEAD_DISK_LIVE, &head->flags))
+ return NULL;
+
list_for_each_entry_rcu(ns, &head->list, siblings) {
if (test_bit(NVME_CTRL_FAILFAST_EXPIRED, &ns->ctrl->flags))
continue;
@@ -648,7 +651,7 @@ static void nvme_mpath_set_live(struct nvme_ns *ns)
rc = device_add_disk(&head->subsys->dev, head->disk,
nvme_ns_attr_groups);
if (rc) {
- clear_bit(NVME_NSHEAD_DISK_LIVE, &ns->flags);
+ clear_bit(NVME_NSHEAD_DISK_LIVE, &head->flags);
return;
}
nvme_add_ns_head_cdev(head);
@@ -969,11 +972,16 @@ void nvme_mpath_shutdown_disk(struct nvme_ns_head *head)
{
if (!head->disk)
return;
- kblockd_schedule_work(&head->requeue_work);
- if (test_bit(NVME_NSHEAD_DISK_LIVE, &head->flags)) {
+ if (test_and_clear_bit(NVME_NSHEAD_DISK_LIVE, &head->flags)) {
nvme_cdev_del(&head->cdev, &head->cdev_device);
del_gendisk(head->disk);
}
+ /*
+ * requeue I/O after NVME_NSHEAD_DISK_LIVE has been cleared
+ * to allow multipath to fail all I/O.
+ */
+ synchronize_srcu(&head->srcu);
+ kblockd_schedule_work(&head->requeue_work);
}
void nvme_mpath_remove_disk(struct nvme_ns_head *head)
diff --git a/drivers/nvme/host/rdma.c b/drivers/nvme/host/rdma.c
index 15b5e06039a5..c8fd0e8f0237 100644
--- a/drivers/nvme/host/rdma.c
+++ b/drivers/nvme/host/rdma.c
@@ -1496,7 +1496,7 @@ static int nvme_rdma_dma_map_req(struct ib_device *ibdev, struct request *rq,
req->metadata_sgl->sg_table.sgl =
(struct scatterlist *)(req->metadata_sgl + 1);
ret = sg_alloc_table_chained(&req->metadata_sgl->sg_table,
- blk_rq_count_integrity_sg(rq->q, rq->bio),
+ rq->nr_integrity_segments,
req->metadata_sgl->sg_table.sgl,
NVME_INLINE_METADATA_SG_CNT);
if (unlikely(ret)) {
@@ -1504,8 +1504,8 @@ static int nvme_rdma_dma_map_req(struct ib_device *ibdev, struct request *rq,
goto out_unmap_sg;
}
- req->metadata_sgl->nents = blk_rq_map_integrity_sg(rq->q,
- rq->bio, req->metadata_sgl->sg_table.sgl);
+ req->metadata_sgl->nents = blk_rq_map_integrity_sg(rq,
+ req->metadata_sgl->sg_table.sgl);
*pi_count = ib_dma_map_sg(ibdev,
req->metadata_sgl->sg_table.sgl,
req->metadata_sgl->nents,
diff --git a/drivers/nvme/host/sysfs.c b/drivers/nvme/host/sysfs.c
index eb345551d6fe..b68a9e5f1ea3 100644
--- a/drivers/nvme/host/sysfs.c
+++ b/drivers/nvme/host/sysfs.c
@@ -767,6 +767,7 @@ static struct attribute *nvme_tls_attrs[] = {
&dev_attr_tls_key.attr,
&dev_attr_tls_configured_key.attr,
&dev_attr_tls_keyring.attr,
+ NULL,
};
static umode_t nvme_tls_attrs_are_visible(struct kobject *kobj,
diff --git a/drivers/nvmem/Kconfig b/drivers/nvmem/Kconfig
index 283134498fbc..d2c384f58028 100644
--- a/drivers/nvmem/Kconfig
+++ b/drivers/nvmem/Kconfig
@@ -363,8 +363,7 @@ config NVMEM_SUNXI_SID
config NVMEM_U_BOOT_ENV
tristate "U-Boot environment variables support"
depends on OF && MTD
- select CRC32
- select GENERIC_NET_UTILS
+ select NVMEM_LAYOUT_U_BOOT_ENV
help
U-Boot stores its setup as environment variables. This driver adds
support for verifying & exporting such data. It also exposes variables
diff --git a/drivers/nvmem/imx-ocotp-ele.c b/drivers/nvmem/imx-ocotp-ele.c
index cf920542f939..1ba494497698 100644
--- a/drivers/nvmem/imx-ocotp-ele.c
+++ b/drivers/nvmem/imx-ocotp-ele.c
@@ -14,8 +14,9 @@
#include <linux/slab.h>
enum fuse_type {
- FUSE_FSB = 1,
- FUSE_ELE = 2,
+ FUSE_FSB = BIT(0),
+ FUSE_ELE = BIT(1),
+ FUSE_ECC = BIT(2),
FUSE_INVALID = -1
};
@@ -93,7 +94,10 @@ static int imx_ocotp_reg_read(void *context, unsigned int offset, void *val, siz
continue;
}
- *buf++ = readl_relaxed(reg + (i << 2));
+ if (type & FUSE_ECC)
+ *buf++ = readl_relaxed(reg + (i << 2)) & GENMASK(15, 0);
+ else
+ *buf++ = readl_relaxed(reg + (i << 2));
}
memcpy(val, (u8 *)p, bytes);
@@ -155,8 +159,30 @@ static const struct ocotp_devtype_data imx93_ocotp_data = {
},
};
+static const struct ocotp_devtype_data imx95_ocotp_data = {
+ .reg_off = 0x8000,
+ .reg_read = imx_ocotp_reg_read,
+ .size = 2048,
+ .num_entry = 12,
+ .entry = {
+ { 0, 1, FUSE_FSB | FUSE_ECC },
+ { 7, 1, FUSE_FSB | FUSE_ECC },
+ { 9, 3, FUSE_FSB | FUSE_ECC },
+ { 12, 24, FUSE_FSB },
+ { 36, 2, FUSE_FSB | FUSE_ECC },
+ { 38, 14, FUSE_FSB },
+ { 63, 1, FUSE_ELE },
+ { 128, 16, FUSE_ELE },
+ { 188, 1, FUSE_ELE },
+ { 317, 2, FUSE_FSB | FUSE_ECC },
+ { 320, 7, FUSE_FSB },
+ { 328, 184, FUSE_FSB }
+ },
+};
+
static const struct of_device_id imx_ele_ocotp_dt_ids[] = {
{ .compatible = "fsl,imx93-ocotp", .data = &imx93_ocotp_data, },
+ { .compatible = "fsl,imx95-ocotp", .data = &imx95_ocotp_data, },
{},
};
MODULE_DEVICE_TABLE(of, imx_ele_ocotp_dt_ids);
diff --git a/drivers/nvmem/layouts.c b/drivers/nvmem/layouts.c
index 77a4119efea8..65d39e19f6ec 100644
--- a/drivers/nvmem/layouts.c
+++ b/drivers/nvmem/layouts.c
@@ -123,7 +123,7 @@ static int nvmem_layout_bus_populate(struct nvmem_device *nvmem,
int ret;
/* Make sure it has a compatible property */
- if (!of_get_property(layout_dn, "compatible", NULL)) {
+ if (!of_property_present(layout_dn, "compatible")) {
pr_debug("%s() - skipping %pOF, no compatible prop\n",
__func__, layout_dn);
return 0;
diff --git a/drivers/nvmem/layouts/Kconfig b/drivers/nvmem/layouts/Kconfig
index 9c6e672fc350..5e586dfebe47 100644
--- a/drivers/nvmem/layouts/Kconfig
+++ b/drivers/nvmem/layouts/Kconfig
@@ -26,6 +26,17 @@ config NVMEM_LAYOUT_ONIE_TLV
If unsure, say N.
+config NVMEM_LAYOUT_U_BOOT_ENV
+ tristate "U-Boot environment variables layout"
+ select CRC32
+ select GENERIC_NET_UTILS
+ help
+ U-Boot stores its setup as environment variables. This driver adds
+ support for verifying & exporting such data. It also exposes variables
+ as NVMEM cells so they can be referenced by other drivers.
+
+ If unsure, say N.
+
endmenu
endif
diff --git a/drivers/nvmem/layouts/Makefile b/drivers/nvmem/layouts/Makefile
index 2974bd7d33ed..4940c9db0665 100644
--- a/drivers/nvmem/layouts/Makefile
+++ b/drivers/nvmem/layouts/Makefile
@@ -5,3 +5,4 @@
obj-$(CONFIG_NVMEM_LAYOUT_SL28_VPD) += sl28vpd.o
obj-$(CONFIG_NVMEM_LAYOUT_ONIE_TLV) += onie-tlv.o
+obj-$(CONFIG_NVMEM_LAYOUT_U_BOOT_ENV) += u-boot-env.o
diff --git a/drivers/nvmem/layouts/u-boot-env.c b/drivers/nvmem/layouts/u-boot-env.c
new file mode 100644
index 000000000000..731e6f4f12b2
--- /dev/null
+++ b/drivers/nvmem/layouts/u-boot-env.c
@@ -0,0 +1,211 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (C) 2022 - 2023 Rafał Miłecki <rafal@milecki.pl>
+ */
+
+#include <linux/crc32.h>
+#include <linux/etherdevice.h>
+#include <linux/export.h>
+#include <linux/if_ether.h>
+#include <linux/nvmem-consumer.h>
+#include <linux/nvmem-provider.h>
+#include <linux/of.h>
+#include <linux/slab.h>
+
+#include "u-boot-env.h"
+
+struct u_boot_env_image_single {
+ __le32 crc32;
+ uint8_t data[];
+} __packed;
+
+struct u_boot_env_image_redundant {
+ __le32 crc32;
+ u8 mark;
+ uint8_t data[];
+} __packed;
+
+struct u_boot_env_image_broadcom {
+ __le32 magic;
+ __le32 len;
+ __le32 crc32;
+ DECLARE_FLEX_ARRAY(uint8_t, data);
+} __packed;
+
+static int u_boot_env_read_post_process_ethaddr(void *context, const char *id, int index,
+ unsigned int offset, void *buf, size_t bytes)
+{
+ u8 mac[ETH_ALEN];
+
+ if (bytes != 3 * ETH_ALEN - 1)
+ return -EINVAL;
+
+ if (!mac_pton(buf, mac))
+ return -EINVAL;
+
+ if (index)
+ eth_addr_add(mac, index);
+
+ ether_addr_copy(buf, mac);
+
+ return 0;
+}
+
+static int u_boot_env_parse_cells(struct device *dev, struct nvmem_device *nvmem, uint8_t *buf,
+ size_t data_offset, size_t data_len)
+{
+ char *data = buf + data_offset;
+ char *var, *value, *eq;
+
+ for (var = data;
+ var < data + data_len && *var;
+ var = value + strlen(value) + 1) {
+ struct nvmem_cell_info info = {};
+
+ eq = strchr(var, '=');
+ if (!eq)
+ break;
+ *eq = '\0';
+ value = eq + 1;
+
+ info.name = devm_kstrdup(dev, var, GFP_KERNEL);
+ if (!info.name)
+ return -ENOMEM;
+ info.offset = data_offset + value - data;
+ info.bytes = strlen(value);
+ info.np = of_get_child_by_name(dev->of_node, info.name);
+ if (!strcmp(var, "ethaddr")) {
+ info.raw_len = strlen(value);
+ info.bytes = ETH_ALEN;
+ info.read_post_process = u_boot_env_read_post_process_ethaddr;
+ }
+
+ nvmem_add_one_cell(nvmem, &info);
+ }
+
+ return 0;
+}
+
+int u_boot_env_parse(struct device *dev, struct nvmem_device *nvmem,
+ enum u_boot_env_format format)
+{
+ size_t crc32_data_offset;
+ size_t crc32_data_len;
+ size_t crc32_offset;
+ __le32 *crc32_addr;
+ size_t data_offset;
+ size_t data_len;
+ size_t dev_size;
+ uint32_t crc32;
+ uint32_t calc;
+ uint8_t *buf;
+ int bytes;
+ int err;
+
+ dev_size = nvmem_dev_size(nvmem);
+
+ buf = kzalloc(dev_size, GFP_KERNEL);
+ if (!buf) {
+ err = -ENOMEM;
+ goto err_out;
+ }
+
+ bytes = nvmem_device_read(nvmem, 0, dev_size, buf);
+ if (bytes < 0) {
+ err = bytes;
+ goto err_kfree;
+ } else if (bytes != dev_size) {
+ err = -EIO;
+ goto err_kfree;
+ }
+
+ switch (format) {
+ case U_BOOT_FORMAT_SINGLE:
+ crc32_offset = offsetof(struct u_boot_env_image_single, crc32);
+ crc32_data_offset = offsetof(struct u_boot_env_image_single, data);
+ data_offset = offsetof(struct u_boot_env_image_single, data);
+ break;
+ case U_BOOT_FORMAT_REDUNDANT:
+ crc32_offset = offsetof(struct u_boot_env_image_redundant, crc32);
+ crc32_data_offset = offsetof(struct u_boot_env_image_redundant, data);
+ data_offset = offsetof(struct u_boot_env_image_redundant, data);
+ break;
+ case U_BOOT_FORMAT_BROADCOM:
+ crc32_offset = offsetof(struct u_boot_env_image_broadcom, crc32);
+ crc32_data_offset = offsetof(struct u_boot_env_image_broadcom, data);
+ data_offset = offsetof(struct u_boot_env_image_broadcom, data);
+ break;
+ }
+
+ if (dev_size < data_offset) {
+ dev_err(dev, "Device too small for u-boot-env\n");
+ err = -EIO;
+ goto err_kfree;
+ }
+
+ crc32_addr = (__le32 *)(buf + crc32_offset);
+ crc32 = le32_to_cpu(*crc32_addr);
+ crc32_data_len = dev_size - crc32_data_offset;
+ data_len = dev_size - data_offset;
+
+ calc = crc32(~0, buf + crc32_data_offset, crc32_data_len) ^ ~0L;
+ if (calc != crc32) {
+ dev_err(dev, "Invalid calculated CRC32: 0x%08x (expected: 0x%08x)\n", calc, crc32);
+ err = -EINVAL;
+ goto err_kfree;
+ }
+
+ buf[dev_size - 1] = '\0';
+ err = u_boot_env_parse_cells(dev, nvmem, buf, data_offset, data_len);
+
+err_kfree:
+ kfree(buf);
+err_out:
+ return err;
+}
+EXPORT_SYMBOL_GPL(u_boot_env_parse);
+
+static int u_boot_env_add_cells(struct nvmem_layout *layout)
+{
+ struct device *dev = &layout->dev;
+ enum u_boot_env_format format;
+
+ format = (uintptr_t)device_get_match_data(dev);
+
+ return u_boot_env_parse(dev, layout->nvmem, format);
+}
+
+static int u_boot_env_probe(struct nvmem_layout *layout)
+{
+ layout->add_cells = u_boot_env_add_cells;
+
+ return nvmem_layout_register(layout);
+}
+
+static void u_boot_env_remove(struct nvmem_layout *layout)
+{
+ nvmem_layout_unregister(layout);
+}
+
+static const struct of_device_id u_boot_env_of_match_table[] = {
+ { .compatible = "u-boot,env", .data = (void *)U_BOOT_FORMAT_SINGLE, },
+ { .compatible = "u-boot,env-redundant-bool", .data = (void *)U_BOOT_FORMAT_REDUNDANT, },
+ { .compatible = "u-boot,env-redundant-count", .data = (void *)U_BOOT_FORMAT_REDUNDANT, },
+ { .compatible = "brcm,env", .data = (void *)U_BOOT_FORMAT_BROADCOM, },
+ {},
+};
+
+static struct nvmem_layout_driver u_boot_env_layout = {
+ .driver = {
+ .name = "u-boot-env-layout",
+ .of_match_table = u_boot_env_of_match_table,
+ },
+ .probe = u_boot_env_probe,
+ .remove = u_boot_env_remove,
+};
+module_nvmem_layout_driver(u_boot_env_layout);
+
+MODULE_AUTHOR("Rafał Miłecki");
+MODULE_LICENSE("GPL");
+MODULE_DEVICE_TABLE(of, u_boot_env_of_match_table);
+MODULE_DESCRIPTION("NVMEM layout driver for U-Boot environment variables");
diff --git a/drivers/nvmem/layouts/u-boot-env.h b/drivers/nvmem/layouts/u-boot-env.h
new file mode 100644
index 000000000000..dd5f280ac047
--- /dev/null
+++ b/drivers/nvmem/layouts/u-boot-env.h
@@ -0,0 +1,15 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+
+#ifndef _LINUX_NVMEM_LAYOUTS_U_BOOT_ENV_H
+#define _LINUX_NVMEM_LAYOUTS_U_BOOT_ENV_H
+
+enum u_boot_env_format {
+ U_BOOT_FORMAT_SINGLE,
+ U_BOOT_FORMAT_REDUNDANT,
+ U_BOOT_FORMAT_BROADCOM,
+};
+
+int u_boot_env_parse(struct device *dev, struct nvmem_device *nvmem,
+ enum u_boot_env_format format);
+
+#endif /* ifndef _LINUX_NVMEM_LAYOUTS_U_BOOT_ENV_H */
diff --git a/drivers/nvmem/sunplus-ocotp.c b/drivers/nvmem/sunplus-ocotp.c
index 38f5d9df39cd..30d55b111a64 100644
--- a/drivers/nvmem/sunplus-ocotp.c
+++ b/drivers/nvmem/sunplus-ocotp.c
@@ -159,7 +159,6 @@ static int sp_ocotp_probe(struct platform_device *pdev)
struct device *dev = &pdev->dev;
struct nvmem_device *nvmem;
struct sp_ocotp_priv *otp;
- struct resource *res;
int ret;
otp = devm_kzalloc(dev, sizeof(*otp), GFP_KERNEL);
@@ -168,13 +167,11 @@ static int sp_ocotp_probe(struct platform_device *pdev)
otp->dev = dev;
- res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "hb_gpio");
- otp->base[HB_GPIO] = devm_ioremap_resource(dev, res);
+ otp->base[HB_GPIO] = devm_platform_ioremap_resource_byname(pdev, "hb_gpio");
if (IS_ERR(otp->base[HB_GPIO]))
return PTR_ERR(otp->base[HB_GPIO]);
- res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "otprx");
- otp->base[OTPRX] = devm_ioremap_resource(dev, res);
+ otp->base[OTPRX] = devm_platform_ioremap_resource_byname(pdev, "otprx");
if (IS_ERR(otp->base[OTPRX]))
return PTR_ERR(otp->base[OTPRX]);
diff --git a/drivers/nvmem/u-boot-env.c b/drivers/nvmem/u-boot-env.c
index 593f0bf4a395..ced414fc9e60 100644
--- a/drivers/nvmem/u-boot-env.c
+++ b/drivers/nvmem/u-boot-env.c
@@ -3,23 +3,15 @@
* Copyright (C) 2022 Rafał Miłecki <rafal@milecki.pl>
*/
-#include <linux/crc32.h>
-#include <linux/etherdevice.h>
-#include <linux/if_ether.h>
#include <linux/mod_devicetable.h>
#include <linux/module.h>
#include <linux/mtd/mtd.h>
-#include <linux/nvmem-consumer.h>
#include <linux/nvmem-provider.h>
#include <linux/of.h>
#include <linux/platform_device.h>
#include <linux/slab.h>
-enum u_boot_env_format {
- U_BOOT_FORMAT_SINGLE,
- U_BOOT_FORMAT_REDUNDANT,
- U_BOOT_FORMAT_BROADCOM,
-};
+#include "layouts/u-boot-env.h"
struct u_boot_env {
struct device *dev;
@@ -29,24 +21,6 @@ struct u_boot_env {
struct mtd_info *mtd;
};
-struct u_boot_env_image_single {
- __le32 crc32;
- uint8_t data[];
-} __packed;
-
-struct u_boot_env_image_redundant {
- __le32 crc32;
- u8 mark;
- uint8_t data[];
-} __packed;
-
-struct u_boot_env_image_broadcom {
- __le32 magic;
- __le32 len;
- __le32 crc32;
- DECLARE_FLEX_ARRAY(uint8_t, data);
-} __packed;
-
static int u_boot_env_read(void *context, unsigned int offset, void *val,
size_t bytes)
{
@@ -69,141 +43,6 @@ static int u_boot_env_read(void *context, unsigned int offset, void *val,
return 0;
}
-static int u_boot_env_read_post_process_ethaddr(void *context, const char *id, int index,
- unsigned int offset, void *buf, size_t bytes)
-{
- u8 mac[ETH_ALEN];
-
- if (bytes != 3 * ETH_ALEN - 1)
- return -EINVAL;
-
- if (!mac_pton(buf, mac))
- return -EINVAL;
-
- if (index)
- eth_addr_add(mac, index);
-
- ether_addr_copy(buf, mac);
-
- return 0;
-}
-
-static int u_boot_env_add_cells(struct u_boot_env *priv, uint8_t *buf,
- size_t data_offset, size_t data_len)
-{
- struct nvmem_device *nvmem = priv->nvmem;
- struct device *dev = priv->dev;
- char *data = buf + data_offset;
- char *var, *value, *eq;
-
- for (var = data;
- var < data + data_len && *var;
- var = value + strlen(value) + 1) {
- struct nvmem_cell_info info = {};
-
- eq = strchr(var, '=');
- if (!eq)
- break;
- *eq = '\0';
- value = eq + 1;
-
- info.name = devm_kstrdup(dev, var, GFP_KERNEL);
- if (!info.name)
- return -ENOMEM;
- info.offset = data_offset + value - data;
- info.bytes = strlen(value);
- info.np = of_get_child_by_name(dev->of_node, info.name);
- if (!strcmp(var, "ethaddr")) {
- info.raw_len = strlen(value);
- info.bytes = ETH_ALEN;
- info.read_post_process = u_boot_env_read_post_process_ethaddr;
- }
-
- nvmem_add_one_cell(nvmem, &info);
- }
-
- return 0;
-}
-
-static int u_boot_env_parse(struct u_boot_env *priv)
-{
- struct nvmem_device *nvmem = priv->nvmem;
- struct device *dev = priv->dev;
- size_t crc32_data_offset;
- size_t crc32_data_len;
- size_t crc32_offset;
- __le32 *crc32_addr;
- size_t data_offset;
- size_t data_len;
- size_t dev_size;
- uint32_t crc32;
- uint32_t calc;
- uint8_t *buf;
- int bytes;
- int err;
-
- dev_size = nvmem_dev_size(nvmem);
-
- buf = kzalloc(dev_size, GFP_KERNEL);
- if (!buf) {
- err = -ENOMEM;
- goto err_out;
- }
-
- bytes = nvmem_device_read(nvmem, 0, dev_size, buf);
- if (bytes < 0) {
- err = bytes;
- goto err_kfree;
- } else if (bytes != dev_size) {
- err = -EIO;
- goto err_kfree;
- }
-
- switch (priv->format) {
- case U_BOOT_FORMAT_SINGLE:
- crc32_offset = offsetof(struct u_boot_env_image_single, crc32);
- crc32_data_offset = offsetof(struct u_boot_env_image_single, data);
- data_offset = offsetof(struct u_boot_env_image_single, data);
- break;
- case U_BOOT_FORMAT_REDUNDANT:
- crc32_offset = offsetof(struct u_boot_env_image_redundant, crc32);
- crc32_data_offset = offsetof(struct u_boot_env_image_redundant, data);
- data_offset = offsetof(struct u_boot_env_image_redundant, data);
- break;
- case U_BOOT_FORMAT_BROADCOM:
- crc32_offset = offsetof(struct u_boot_env_image_broadcom, crc32);
- crc32_data_offset = offsetof(struct u_boot_env_image_broadcom, data);
- data_offset = offsetof(struct u_boot_env_image_broadcom, data);
- break;
- }
-
- if (dev_size < data_offset) {
- dev_err(dev, "Device too small for u-boot-env\n");
- err = -EIO;
- goto err_kfree;
- }
-
- crc32_addr = (__le32 *)(buf + crc32_offset);
- crc32 = le32_to_cpu(*crc32_addr);
- crc32_data_len = dev_size - crc32_data_offset;
- data_len = dev_size - data_offset;
-
- calc = crc32(~0, buf + crc32_data_offset, crc32_data_len) ^ ~0L;
- if (calc != crc32) {
- dev_err(dev, "Invalid calculated CRC32: 0x%08x (expected: 0x%08x)\n", calc, crc32);
- err = -EINVAL;
- goto err_kfree;
- }
-
- buf[dev_size - 1] = '\0';
- err = u_boot_env_add_cells(priv, buf, data_offset, data_len);
-
-err_kfree:
- kfree(buf);
-err_out:
- return err;
-}
-
static int u_boot_env_probe(struct platform_device *pdev)
{
struct nvmem_config config = {
@@ -235,7 +74,7 @@ static int u_boot_env_probe(struct platform_device *pdev)
if (IS_ERR(priv->nvmem))
return PTR_ERR(priv->nvmem);
- return u_boot_env_parse(priv);
+ return u_boot_env_parse(dev, priv->nvmem, priv->format);
}
static const struct of_device_id u_boot_env_of_match_table[] = {
diff --git a/drivers/of/.kunitconfig b/drivers/of/.kunitconfig
index 5a8fee11978c..4c53d2c7a275 100644
--- a/drivers/of/.kunitconfig
+++ b/drivers/of/.kunitconfig
@@ -1,3 +1,4 @@
CONFIG_KUNIT=y
CONFIG_OF=y
CONFIG_OF_KUNIT_TEST=y
+CONFIG_OF_OVERLAY_KUNIT_TEST=y
diff --git a/drivers/of/Kconfig b/drivers/of/Kconfig
index dd726c7056bf..0e2d608c3e20 100644
--- a/drivers/of/Kconfig
+++ b/drivers/of/Kconfig
@@ -107,6 +107,16 @@ config OF_OVERLAY
While this option is selected automatically when needed, you can
enable it manually to improve device tree unit test coverage.
+config OF_OVERLAY_KUNIT_TEST
+ tristate "Device Tree overlay KUnit tests" if !KUNIT_ALL_TESTS
+ depends on KUNIT
+ default KUNIT_ALL_TESTS
+ select OF_OVERLAY
+ help
+ This option builds KUnit unit tests for the device tree overlay code.
+
+ If unsure, say N here, but this option is safe to enable.
+
config OF_NUMA
bool
diff --git a/drivers/of/Makefile b/drivers/of/Makefile
index 251d33532148..379a0afcbdc0 100644
--- a/drivers/of/Makefile
+++ b/drivers/of/Makefile
@@ -19,6 +19,9 @@ obj-y += kexec.o
endif
endif
+obj-$(CONFIG_KUNIT) += of_kunit_helpers.o
obj-$(CONFIG_OF_KUNIT_TEST) += of_test.o
+obj-$(CONFIG_OF_OVERLAY_KUNIT_TEST) += overlay-test.o
+overlay-test-y := overlay_test.o kunit_overlay_test.dtbo.o
obj-$(CONFIG_OF_UNITTEST) += unittest-data/
diff --git a/drivers/of/fdt.c b/drivers/of/fdt.c
index 68103ad230ee..4d528c10df3a 100644
--- a/drivers/of/fdt.c
+++ b/drivers/of/fdt.c
@@ -34,7 +34,7 @@
/*
* __dtb_empty_root_begin[] and __dtb_empty_root_end[] magically created by
- * cmd_dt_S_dtb in scripts/Makefile.lib
+ * cmd_wrap_S_dtb in scripts/Makefile.dtbs
*/
extern uint8_t __dtb_empty_root_begin[];
extern uint8_t __dtb_empty_root_end[];
diff --git a/drivers/of/kunit_overlay_test.dtso b/drivers/of/kunit_overlay_test.dtso
new file mode 100644
index 000000000000..85f20b4b4c16
--- /dev/null
+++ b/drivers/of/kunit_overlay_test.dtso
@@ -0,0 +1,9 @@
+// SPDX-License-Identifier: GPL-2.0
+/dts-v1/;
+/plugin/;
+
+&{/} {
+ kunit-test {
+ compatible = "test,empty";
+ };
+};
diff --git a/drivers/of/of_kunit_helpers.c b/drivers/of/of_kunit_helpers.c
new file mode 100644
index 000000000000..287d6c91bb37
--- /dev/null
+++ b/drivers/of/of_kunit_helpers.c
@@ -0,0 +1,77 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Test managed DeviceTree APIs
+ */
+
+#include <linux/of.h>
+#include <linux/of_fdt.h>
+
+#include <kunit/of.h>
+#include <kunit/test.h>
+#include <kunit/resource.h>
+
+#if defined(CONFIG_OF_OVERLAY) && defined(CONFIG_OF_EARLY_FLATTREE)
+
+static void of_overlay_fdt_apply_kunit_exit(void *ovcs_id)
+{
+ of_overlay_remove(ovcs_id);
+}
+
+/**
+ * of_overlay_fdt_apply_kunit() - Test managed of_overlay_fdt_apply()
+ * @test: test context
+ * @overlay_fdt: device tree overlay to apply
+ * @overlay_fdt_size: size in bytes of @overlay_fdt
+ * @ovcs_id: identifier of overlay, used to remove the overlay
+ *
+ * Just like of_overlay_fdt_apply(), except the overlay is managed by the test
+ * case and is automatically removed with of_overlay_remove() after the test
+ * case concludes.
+ *
+ * Return: 0 on success, negative errno on failure
+ */
+int of_overlay_fdt_apply_kunit(struct kunit *test, void *overlay_fdt,
+ u32 overlay_fdt_size, int *ovcs_id)
+{
+ int ret;
+ int *copy_id;
+
+ copy_id = kunit_kmalloc(test, sizeof(*copy_id), GFP_KERNEL);
+ if (!copy_id)
+ return -ENOMEM;
+
+ ret = of_overlay_fdt_apply(overlay_fdt, overlay_fdt_size,
+ ovcs_id, NULL);
+ if (ret)
+ return ret;
+
+ *copy_id = *ovcs_id;
+
+ return kunit_add_action_or_reset(test, of_overlay_fdt_apply_kunit_exit,
+ copy_id);
+}
+EXPORT_SYMBOL_GPL(of_overlay_fdt_apply_kunit);
+
+#endif
+
+KUNIT_DEFINE_ACTION_WRAPPER(of_node_put_wrapper, of_node_put, struct device_node *);
+
+/**
+ * of_node_put_kunit() - Test managed of_node_put()
+ * @test: test context
+ * @node: node to pass to `of_node_put()`
+ *
+ * Just like of_node_put(), except the node is managed by the test case and is
+ * automatically put with of_node_put() after the test case concludes.
+ */
+void of_node_put_kunit(struct kunit *test, struct device_node *node)
+{
+ if (kunit_add_action(test, of_node_put_wrapper, node)) {
+ KUNIT_FAIL(test,
+ "Can't allocate a kunit resource to put of_node\n");
+ }
+}
+EXPORT_SYMBOL_GPL(of_node_put_kunit);
+
+MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("Test managed DeviceTree APIs");
diff --git a/drivers/of/of_numa.c b/drivers/of/of_numa.c
index 5949829a1b00..2ec20886d176 100644
--- a/drivers/of/of_numa.c
+++ b/drivers/of/of_numa.c
@@ -10,6 +10,7 @@
#include <linux/of.h>
#include <linux/of_address.h>
#include <linux/nodemask.h>
+#include <linux/numa_memblks.h>
#include <asm/numa.h>
@@ -44,7 +45,7 @@ static int __init of_numa_parse_memory_nodes(void)
struct device_node *np = NULL;
struct resource rsrc;
u32 nid;
- int i, r;
+ int i, r = -EINVAL;
for_each_node_by_type(np, "memory") {
r = of_property_read_u32(np, "numa-node-id", &nid);
@@ -71,7 +72,7 @@ static int __init of_numa_parse_memory_nodes(void)
}
}
- return 0;
+ return r;
}
static int __init of_numa_parse_distance_map_v1(struct device_node *map)
diff --git a/drivers/of/overlay_test.c b/drivers/of/overlay_test.c
new file mode 100644
index 000000000000..19a292cdeee3
--- /dev/null
+++ b/drivers/of/overlay_test.c
@@ -0,0 +1,115 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * KUnit tests for device tree overlays
+ */
+#include <linux/device/bus.h>
+#include <linux/kconfig.h>
+#include <linux/of.h>
+#include <linux/of_platform.h>
+#include <linux/platform_device.h>
+
+#include <kunit/of.h>
+#include <kunit/test.h>
+
+static const char * const kunit_node_name = "kunit-test";
+static const char * const kunit_compatible = "test,empty";
+
+/* Test that of_overlay_apply_kunit() adds a node to the live tree */
+static void of_overlay_apply_kunit_apply(struct kunit *test)
+{
+ struct device_node *np;
+
+ KUNIT_ASSERT_EQ(test, 0,
+ of_overlay_apply_kunit(test, kunit_overlay_test));
+
+ np = of_find_node_by_name(NULL, kunit_node_name);
+ KUNIT_EXPECT_NOT_ERR_OR_NULL(test, np);
+ of_node_put(np);
+}
+
+/*
+ * Test that of_overlay_apply_kunit() creates platform devices with the
+ * expected device_node
+ */
+static void of_overlay_apply_kunit_platform_device(struct kunit *test)
+{
+ struct platform_device *pdev;
+ struct device_node *np;
+
+ KUNIT_ASSERT_EQ(test, 0,
+ of_overlay_apply_kunit(test, kunit_overlay_test));
+
+ np = of_find_node_by_name(NULL, kunit_node_name);
+ of_node_put_kunit(test, np);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, np);
+
+ pdev = of_find_device_by_node(np);
+ KUNIT_EXPECT_NOT_ERR_OR_NULL(test, pdev);
+ if (pdev)
+ put_device(&pdev->dev);
+}
+
+static int of_overlay_bus_match_compatible(struct device *dev, const void *data)
+{
+ return of_device_is_compatible(dev->of_node, data);
+}
+
+/* Test that of_overlay_apply_kunit() cleans up after the test is finished */
+static void of_overlay_apply_kunit_cleanup(struct kunit *test)
+{
+ struct kunit fake;
+ struct platform_device *pdev;
+ struct device *dev;
+ struct device_node *np;
+
+ if (!IS_ENABLED(CONFIG_OF_EARLY_FLATTREE))
+ kunit_skip(test, "requires CONFIG_OF_EARLY_FLATTREE for root node");
+
+ kunit_init_test(&fake, "fake test", NULL);
+ KUNIT_ASSERT_EQ(test, fake.status, KUNIT_SUCCESS);
+
+ KUNIT_ASSERT_EQ(test, 0,
+ of_overlay_apply_kunit(&fake, kunit_overlay_test));
+
+ np = of_find_node_by_name(NULL, kunit_node_name);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, np);
+ of_node_put_kunit(test, np);
+
+ pdev = of_find_device_by_node(np);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, pdev);
+ put_device(&pdev->dev); /* Not derefing 'pdev' after this */
+
+ /* Remove overlay */
+ kunit_cleanup(&fake);
+
+ /* The node and device should be removed */
+ np = of_find_node_by_name(NULL, kunit_node_name);
+ KUNIT_EXPECT_PTR_EQ(test, NULL, np);
+ of_node_put(np);
+
+ dev = bus_find_device(&platform_bus_type, NULL, kunit_compatible,
+ of_overlay_bus_match_compatible);
+ KUNIT_EXPECT_PTR_EQ(test, NULL, dev);
+ put_device(dev);
+}
+
+static struct kunit_case of_overlay_apply_kunit_test_cases[] = {
+ KUNIT_CASE(of_overlay_apply_kunit_apply),
+ KUNIT_CASE(of_overlay_apply_kunit_platform_device),
+ KUNIT_CASE(of_overlay_apply_kunit_cleanup),
+ {}
+};
+
+/*
+ * Test suite for test managed device tree overlays.
+ */
+static struct kunit_suite of_overlay_apply_kunit_suite = {
+ .name = "of_overlay_apply_kunit",
+ .test_cases = of_overlay_apply_kunit_test_cases,
+};
+
+kunit_test_suites(
+ &of_overlay_apply_kunit_suite,
+);
+MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("KUnit tests for device tree overlays");
diff --git a/drivers/of/platform.c b/drivers/of/platform.c
index f416c008cec2..9bafcff3e628 100644
--- a/drivers/of/platform.c
+++ b/drivers/of/platform.c
@@ -726,11 +726,14 @@ static int of_platform_notify(struct notifier_block *nb,
struct of_reconfig_data *rd = arg;
struct platform_device *pdev_parent, *pdev;
bool children_left;
+ struct device_node *parent;
switch (of_reconfig_get_state_change(action, rd)) {
case OF_RECONFIG_CHANGE_ADD:
- /* verify that the parent is a bus */
- if (!of_node_check_flag(rd->dn->parent, OF_POPULATED_BUS))
+ parent = rd->dn->parent;
+ /* verify that the parent is a bus (or the root node) */
+ if (!of_node_is_root(parent) &&
+ !of_node_check_flag(parent, OF_POPULATED_BUS))
return NOTIFY_OK; /* not for us */
/* already populated? (driver using of_populate manually) */
@@ -743,7 +746,7 @@ static int of_platform_notify(struct notifier_block *nb,
*/
rd->dn->fwnode.flags &= ~FWNODE_FLAG_NOT_DEVICE;
/* pdev_parent may be NULL when no bus platform device */
- pdev_parent = of_find_device_by_node(rd->dn->parent);
+ pdev_parent = of_find_device_by_node(parent);
pdev = of_platform_device_create(rd->dn, NULL,
pdev_parent ? &pdev_parent->dev : NULL);
platform_device_put(pdev_parent);
diff --git a/drivers/of/unittest.c b/drivers/of/unittest.c
index b60b4b7d7172..daf9a2dddd7e 100644
--- a/drivers/of/unittest.c
+++ b/drivers/of/unittest.c
@@ -1861,7 +1861,7 @@ static int __init unittest_data_add(void)
struct device_node *unittest_data_node = NULL, *np;
/*
* __dtbo_testcases_begin[] and __dtbo_testcases_end[] are magically
- * created by cmd_dt_S_dtbo in scripts/Makefile.lib
+ * created by cmd_wrap_S_dtbo in scripts/Makefile.dtbs
*/
extern uint8_t __dtbo_testcases_begin[];
extern uint8_t __dtbo_testcases_end[];
@@ -3525,7 +3525,7 @@ out_skip_tests:
/*
* __dtbo_##overlay_name##_begin[] and __dtbo_##overlay_name##_end[] are
- * created by cmd_dt_S_dtbo in scripts/Makefile.lib
+ * created by cmd_wrap_S_dtbo in scripts/Makefile.dtbs
*/
#define OVERLAY_INFO_EXTERN(overlay_name) \
diff --git a/drivers/pci/Kconfig b/drivers/pci/Kconfig
index aa4d1833f442..0d94e4a967d8 100644
--- a/drivers/pci/Kconfig
+++ b/drivers/pci/Kconfig
@@ -143,6 +143,15 @@ config PCI_IOV
If unsure, say N.
+config PCI_NPEM
+ bool "Native PCIe Enclosure Management"
+ depends on LEDS_CLASS=y
+ help
+ Support for Native PCIe Enclosure Management. It allows managing LED
+ indications in storage enclosures. Enclosure must support following
+ indications: OK, Locate, Fail, Rebuild, other indications are
+ optional.
+
config PCI_PRI
bool "PCI PRI support"
select PCI_ATS
diff --git a/drivers/pci/Makefile b/drivers/pci/Makefile
index 8ddad57934a6..374c5c06d92f 100644
--- a/drivers/pci/Makefile
+++ b/drivers/pci/Makefile
@@ -35,6 +35,7 @@ obj-$(CONFIG_XEN_PCIDEV_FRONTEND) += xen-pcifront.o
obj-$(CONFIG_VGA_ARB) += vgaarb.o
obj-$(CONFIG_PCI_DOE) += doe.o
obj-$(CONFIG_PCI_DYNAMIC_OF_NODES) += of_property.o
+obj-$(CONFIG_PCI_NPEM) += npem.o
# Endpoint library must be initialized before its users
obj-$(CONFIG_PCI_ENDPOINT) += endpoint/
diff --git a/drivers/pci/ats.c b/drivers/pci/ats.c
index 87fa03540b8a..6afff1f1b143 100644
--- a/drivers/pci/ats.c
+++ b/drivers/pci/ats.c
@@ -488,8 +488,8 @@ void pci_restore_pasid_state(struct pci_dev *pdev)
* pci_pasid_features - Check which PASID features are supported
* @pdev: PCI device structure
*
- * Returns a negative value when no PASI capability is present.
- * Otherwise is returns a bitmask with supported features. Current
+ * Return a negative value when no PASID capability is present.
+ * Otherwise return a bitmask with supported features. Current
* features reported are:
* PCI_PASID_CAP_EXEC - Execute permission supported
* PCI_PASID_CAP_PRIV - Privileged mode supported
diff --git a/drivers/pci/controller/Kconfig b/drivers/pci/controller/Kconfig
index 4d2c188f5835..9800b7681054 100644
--- a/drivers/pci/controller/Kconfig
+++ b/drivers/pci/controller/Kconfig
@@ -196,7 +196,7 @@ config PCIE_MEDIATEK
config PCIE_MEDIATEK_GEN3
tristate "MediaTek Gen3 PCIe controller"
- depends on ARCH_MEDIATEK || COMPILE_TEST
+ depends on ARCH_AIROHA || ARCH_MEDIATEK || COMPILE_TEST
depends on PCI_MSI
help
Adds support for PCIe Gen3 MAC controller for MediaTek SoCs.
diff --git a/drivers/pci/controller/cadence/Kconfig b/drivers/pci/controller/cadence/Kconfig
index 1d5a70c9055e..8a0044bb3989 100644
--- a/drivers/pci/controller/cadence/Kconfig
+++ b/drivers/pci/controller/cadence/Kconfig
@@ -38,7 +38,7 @@ config PCIE_CADENCE_PLAT_EP
select PCIE_CADENCE_EP
select PCIE_CADENCE_PLAT
help
- Say Y here if you want to support the Cadence PCIe platform controller in
+ Say Y here if you want to support the Cadence PCIe platform controller in
endpoint mode. This PCIe controller may be embedded into many
different vendors SoCs.
diff --git a/drivers/pci/controller/cadence/pci-j721e.c b/drivers/pci/controller/cadence/pci-j721e.c
index 85718246016b..284f2e0e4d26 100644
--- a/drivers/pci/controller/cadence/pci-j721e.c
+++ b/drivers/pci/controller/cadence/pci-j721e.c
@@ -7,6 +7,8 @@
*/
#include <linux/clk.h>
+#include <linux/clk-provider.h>
+#include <linux/container_of.h>
#include <linux/delay.h>
#include <linux/gpio/consumer.h>
#include <linux/io.h>
@@ -22,6 +24,8 @@
#include "../../pci.h"
#include "pcie-cadence.h"
+#define cdns_pcie_to_rc(p) container_of(p, struct cdns_pcie_rc, pcie)
+
#define ENABLE_REG_SYS_2 0x108
#define STATUS_REG_SYS_2 0x508
#define STATUS_CLR_REG_SYS_2 0x708
@@ -44,6 +48,7 @@ enum link_status {
#define J721E_MODE_RC BIT(7)
#define LANE_COUNT(n) ((n) << 8)
+#define ACSPCIE_PAD_DISABLE_MASK GENMASK(1, 0)
#define GENERATION_SEL_MASK GENMASK(1, 0)
struct j721e_pcie {
@@ -52,6 +57,7 @@ struct j721e_pcie {
u32 mode;
u32 num_lanes;
u32 max_lanes;
+ struct gpio_desc *reset_gpio;
void __iomem *user_cfg_base;
void __iomem *intd_cfg_base;
u32 linkdown_irq_regfield;
@@ -220,6 +226,36 @@ static int j721e_pcie_set_lane_count(struct j721e_pcie *pcie,
return ret;
}
+static int j721e_enable_acspcie_refclk(struct j721e_pcie *pcie,
+ struct regmap *syscon)
+{
+ struct device *dev = pcie->cdns_pcie->dev;
+ struct device_node *node = dev->of_node;
+ u32 mask = ACSPCIE_PAD_DISABLE_MASK;
+ struct of_phandle_args args;
+ u32 val;
+ int ret;
+
+ ret = of_parse_phandle_with_fixed_args(node,
+ "ti,syscon-acspcie-proxy-ctrl",
+ 1, 0, &args);
+ if (ret) {
+ dev_err(dev,
+ "ti,syscon-acspcie-proxy-ctrl has invalid arguments\n");
+ return ret;
+ }
+
+ /* Clear PAD IO disable bits to enable refclk output */
+ val = ~(args.args[0]);
+ ret = regmap_update_bits(syscon, 0, mask, val);
+ if (ret) {
+ dev_err(dev, "failed to enable ACSPCIE refclk: %d\n", ret);
+ return ret;
+ }
+
+ return 0;
+}
+
static int j721e_pcie_ctrl_init(struct j721e_pcie *pcie)
{
struct device *dev = pcie->cdns_pcie->dev;
@@ -259,7 +295,13 @@ static int j721e_pcie_ctrl_init(struct j721e_pcie *pcie)
return ret;
}
- return 0;
+ /* Enable ACSPCIE refclk output if the optional property exists */
+ syscon = syscon_regmap_lookup_by_phandle_optional(node,
+ "ti,syscon-acspcie-proxy-ctrl");
+ if (!syscon)
+ return 0;
+
+ return j721e_enable_acspcie_refclk(pcie, syscon);
}
static int cdns_ti_pcie_config_read(struct pci_bus *bus, unsigned int devfn,
@@ -482,20 +524,20 @@ static int j721e_pcie_probe(struct platform_device *pdev)
pm_runtime_enable(dev);
ret = pm_runtime_get_sync(dev);
if (ret < 0) {
- dev_err(dev, "pm_runtime_get_sync failed\n");
+ dev_err_probe(dev, ret, "pm_runtime_get_sync failed\n");
goto err_get_sync;
}
ret = j721e_pcie_ctrl_init(pcie);
if (ret < 0) {
- dev_err(dev, "pm_runtime_get_sync failed\n");
+ dev_err_probe(dev, ret, "pm_runtime_get_sync failed\n");
goto err_get_sync;
}
ret = devm_request_irq(dev, irq, j721e_pcie_link_irq_handler, 0,
"j721e-pcie-link-down-irq", pcie);
if (ret < 0) {
- dev_err(dev, "failed to request link state IRQ %d\n", irq);
+ dev_err_probe(dev, ret, "failed to request link state IRQ %d\n", irq);
goto err_get_sync;
}
@@ -505,42 +547,40 @@ static int j721e_pcie_probe(struct platform_device *pdev)
case PCI_MODE_RC:
gpiod = devm_gpiod_get_optional(dev, "reset", GPIOD_OUT_LOW);
if (IS_ERR(gpiod)) {
- ret = PTR_ERR(gpiod);
- if (ret != -EPROBE_DEFER)
- dev_err(dev, "Failed to get reset GPIO\n");
+ ret = dev_err_probe(dev, PTR_ERR(gpiod), "Failed to get reset GPIO\n");
goto err_get_sync;
}
+ pcie->reset_gpio = gpiod;
ret = cdns_pcie_init_phy(dev, cdns_pcie);
if (ret) {
- dev_err(dev, "Failed to init phy\n");
+ dev_err_probe(dev, ret, "Failed to init phy\n");
goto err_get_sync;
}
clk = devm_clk_get_optional(dev, "pcie_refclk");
if (IS_ERR(clk)) {
- ret = PTR_ERR(clk);
- dev_err(dev, "failed to get pcie_refclk\n");
+ ret = dev_err_probe(dev, PTR_ERR(clk), "failed to get pcie_refclk\n");
goto err_pcie_setup;
}
ret = clk_prepare_enable(clk);
if (ret) {
- dev_err(dev, "failed to enable pcie_refclk\n");
+ dev_err_probe(dev, ret, "failed to enable pcie_refclk\n");
goto err_pcie_setup;
}
pcie->refclk = clk;
/*
- * "Power Sequencing and Reset Signal Timings" table in
- * PCI EXPRESS CARD ELECTROMECHANICAL SPECIFICATION, REV. 3.0
- * indicates PERST# should be deasserted after minimum of 100us
- * once REFCLK is stable. The REFCLK to the connector in RC
- * mode is selected while enabling the PHY. So deassert PERST#
- * after 100 us.
+ * The "Power Sequencing and Reset Signal Timings" table of the
+ * PCI Express Card Electromechanical Specification, Revision
+ * 5.1, Section 2.9.2, Symbol "T_PERST-CLK", indicates PERST#
+ * should be deasserted after minimum of 100us once REFCLK is
+ * stable. The REFCLK to the connector in RC mode is selected
+ * while enabling the PHY. So deassert PERST# after 100 us.
*/
if (gpiod) {
- usleep_range(100, 200);
+ fsleep(PCIE_T_PERST_CLK_US);
gpiod_set_value_cansleep(gpiod, 1);
}
@@ -554,7 +594,7 @@ static int j721e_pcie_probe(struct platform_device *pdev)
case PCI_MODE_EP:
ret = cdns_pcie_init_phy(dev, cdns_pcie);
if (ret) {
- dev_err(dev, "Failed to init phy\n");
+ dev_err_probe(dev, ret, "Failed to init phy\n");
goto err_get_sync;
}
@@ -589,6 +629,87 @@ static void j721e_pcie_remove(struct platform_device *pdev)
pm_runtime_disable(dev);
}
+static int j721e_pcie_suspend_noirq(struct device *dev)
+{
+ struct j721e_pcie *pcie = dev_get_drvdata(dev);
+
+ if (pcie->mode == PCI_MODE_RC) {
+ gpiod_set_value_cansleep(pcie->reset_gpio, 0);
+ clk_disable_unprepare(pcie->refclk);
+ }
+
+ cdns_pcie_disable_phy(pcie->cdns_pcie);
+
+ return 0;
+}
+
+static int j721e_pcie_resume_noirq(struct device *dev)
+{
+ struct j721e_pcie *pcie = dev_get_drvdata(dev);
+ struct cdns_pcie *cdns_pcie = pcie->cdns_pcie;
+ int ret;
+
+ ret = j721e_pcie_ctrl_init(pcie);
+ if (ret < 0)
+ return ret;
+
+ j721e_pcie_config_link_irq(pcie);
+
+ /*
+ * This is not called explicitly in the probe, it is called by
+ * cdns_pcie_init_phy().
+ */
+ ret = cdns_pcie_enable_phy(pcie->cdns_pcie);
+ if (ret < 0)
+ return ret;
+
+ if (pcie->mode == PCI_MODE_RC) {
+ struct cdns_pcie_rc *rc = cdns_pcie_to_rc(cdns_pcie);
+
+ ret = clk_prepare_enable(pcie->refclk);
+ if (ret < 0)
+ return ret;
+
+ /*
+ * The "Power Sequencing and Reset Signal Timings" table of the
+ * PCI Express Card Electromechanical Specification, Revision
+ * 5.1, Section 2.9.2, Symbol "T_PERST-CLK", indicates PERST#
+ * should be deasserted after minimum of 100us once REFCLK is
+ * stable. The REFCLK to the connector in RC mode is selected
+ * while enabling the PHY. So deassert PERST# after 100 us.
+ */
+ if (pcie->reset_gpio) {
+ fsleep(PCIE_T_PERST_CLK_US);
+ gpiod_set_value_cansleep(pcie->reset_gpio, 1);
+ }
+
+ ret = cdns_pcie_host_link_setup(rc);
+ if (ret < 0) {
+ clk_disable_unprepare(pcie->refclk);
+ return ret;
+ }
+
+ /*
+ * Reset internal status of BARs to force reinitialization in
+ * cdns_pcie_host_init().
+ */
+ for (enum cdns_pcie_rp_bar bar = RP_BAR0; bar <= RP_NO_BAR; bar++)
+ rc->avail_ib_bar[bar] = true;
+
+ ret = cdns_pcie_host_init(rc);
+ if (ret) {
+ clk_disable_unprepare(pcie->refclk);
+ return ret;
+ }
+ }
+
+ return 0;
+}
+
+static DEFINE_NOIRQ_DEV_PM_OPS(j721e_pcie_pm_ops,
+ j721e_pcie_suspend_noirq,
+ j721e_pcie_resume_noirq);
+
static struct platform_driver j721e_pcie_driver = {
.probe = j721e_pcie_probe,
.remove_new = j721e_pcie_remove,
@@ -596,6 +717,7 @@ static struct platform_driver j721e_pcie_driver = {
.name = "j721e-pcie",
.of_match_table = of_j721e_pcie_match,
.suppress_bind_attrs = true,
+ .pm = pm_sleep_ptr(&j721e_pcie_pm_ops),
},
};
builtin_platform_driver(j721e_pcie_driver);
diff --git a/drivers/pci/controller/cadence/pcie-cadence-host.c b/drivers/pci/controller/cadence/pcie-cadence-host.c
index 5b14f7ee3c79..8af95e9da7ce 100644
--- a/drivers/pci/controller/cadence/pcie-cadence-host.c
+++ b/drivers/pci/controller/cadence/pcie-cadence-host.c
@@ -485,8 +485,7 @@ static int cdns_pcie_host_init_address_translation(struct cdns_pcie_rc *rc)
return cdns_pcie_host_map_dma_ranges(rc);
}
-static int cdns_pcie_host_init(struct device *dev,
- struct cdns_pcie_rc *rc)
+int cdns_pcie_host_init(struct cdns_pcie_rc *rc)
{
int err;
@@ -497,6 +496,30 @@ static int cdns_pcie_host_init(struct device *dev,
return cdns_pcie_host_init_address_translation(rc);
}
+int cdns_pcie_host_link_setup(struct cdns_pcie_rc *rc)
+{
+ struct cdns_pcie *pcie = &rc->pcie;
+ struct device *dev = rc->pcie.dev;
+ int ret;
+
+ if (rc->quirk_detect_quiet_flag)
+ cdns_pcie_detect_quiet_min_delay_set(&rc->pcie);
+
+ cdns_pcie_host_enable_ptm_response(pcie);
+
+ ret = cdns_pcie_start_link(pcie);
+ if (ret) {
+ dev_err(dev, "Failed to start link\n");
+ return ret;
+ }
+
+ ret = cdns_pcie_host_start_link(rc);
+ if (ret)
+ dev_dbg(dev, "PCIe link never came up\n");
+
+ return 0;
+}
+
int cdns_pcie_host_setup(struct cdns_pcie_rc *rc)
{
struct device *dev = rc->pcie.dev;
@@ -533,25 +556,14 @@ int cdns_pcie_host_setup(struct cdns_pcie_rc *rc)
return PTR_ERR(rc->cfg_base);
rc->cfg_res = res;
- if (rc->quirk_detect_quiet_flag)
- cdns_pcie_detect_quiet_min_delay_set(&rc->pcie);
-
- cdns_pcie_host_enable_ptm_response(pcie);
-
- ret = cdns_pcie_start_link(pcie);
- if (ret) {
- dev_err(dev, "Failed to start link\n");
- return ret;
- }
-
- ret = cdns_pcie_host_start_link(rc);
+ ret = cdns_pcie_host_link_setup(rc);
if (ret)
- dev_dbg(dev, "PCIe link never came up\n");
+ return ret;
for (bar = RP_BAR0; bar <= RP_NO_BAR; bar++)
rc->avail_ib_bar[bar] = true;
- ret = cdns_pcie_host_init(dev, rc);
+ ret = cdns_pcie_host_init(rc);
if (ret)
return ret;
diff --git a/drivers/pci/controller/cadence/pcie-cadence.h b/drivers/pci/controller/cadence/pcie-cadence.h
index 7a66a2f815dc..f5eeff834ec1 100644
--- a/drivers/pci/controller/cadence/pcie-cadence.h
+++ b/drivers/pci/controller/cadence/pcie-cadence.h
@@ -314,7 +314,6 @@ struct cdns_pcie {
/**
* struct cdns_pcie_rc - private data for this PCIe Root Complex driver
* @pcie: Cadence PCIe controller
- * @dev: pointer to PCIe device
* @cfg_res: start/end offsets in the physical system memory to map PCI
* configuration space accesses
* @cfg_base: IO mapped window to access the PCI configuration space of a
@@ -521,10 +520,22 @@ static inline bool cdns_pcie_link_up(struct cdns_pcie *pcie)
}
#ifdef CONFIG_PCIE_CADENCE_HOST
+int cdns_pcie_host_link_setup(struct cdns_pcie_rc *rc);
+int cdns_pcie_host_init(struct cdns_pcie_rc *rc);
int cdns_pcie_host_setup(struct cdns_pcie_rc *rc);
void __iomem *cdns_pci_map_bus(struct pci_bus *bus, unsigned int devfn,
int where);
#else
+static inline int cdns_pcie_host_link_setup(struct cdns_pcie_rc *rc)
+{
+ return 0;
+}
+
+static inline int cdns_pcie_host_init(struct cdns_pcie_rc *rc)
+{
+ return 0;
+}
+
static inline int cdns_pcie_host_setup(struct cdns_pcie_rc *rc)
{
return 0;
diff --git a/drivers/pci/controller/dwc/Kconfig b/drivers/pci/controller/dwc/Kconfig
index 4c38181acffa..b6d6778b0698 100644
--- a/drivers/pci/controller/dwc/Kconfig
+++ b/drivers/pci/controller/dwc/Kconfig
@@ -265,12 +265,16 @@ config PCIE_DW_PLAT_EP
order to enable device-specific features PCI_DW_PLAT_EP must be
selected.
+config PCIE_QCOM_COMMON
+ bool
+
config PCIE_QCOM
bool "Qualcomm PCIe controller (host mode)"
depends on OF && (ARCH_QCOM || COMPILE_TEST)
depends on PCI_MSI
select PCIE_DW_HOST
select CRC8
+ select PCIE_QCOM_COMMON
help
Say Y here to enable PCIe controller support on Qualcomm SoCs. The
PCIe controller uses the DesignWare core plus Qualcomm-specific
@@ -281,6 +285,7 @@ config PCIE_QCOM_EP
depends on OF && (ARCH_QCOM || COMPILE_TEST)
depends on PCI_ENDPOINT
select PCIE_DW_EP
+ select PCIE_QCOM_COMMON
help
Say Y here to enable support for the PCIe controllers on Qualcomm SoCs
to work in endpoint mode. The PCIe controller uses the DesignWare core
diff --git a/drivers/pci/controller/dwc/Makefile b/drivers/pci/controller/dwc/Makefile
index ec215b3d6191..a8308d9ea986 100644
--- a/drivers/pci/controller/dwc/Makefile
+++ b/drivers/pci/controller/dwc/Makefile
@@ -12,6 +12,7 @@ obj-$(CONFIG_PCIE_SPEAR13XX) += pcie-spear13xx.o
obj-$(CONFIG_PCI_KEYSTONE) += pci-keystone.o
obj-$(CONFIG_PCI_LAYERSCAPE) += pci-layerscape.o
obj-$(CONFIG_PCI_LAYERSCAPE_EP) += pci-layerscape-ep.o
+obj-$(CONFIG_PCIE_QCOM_COMMON) += pcie-qcom-common.o
obj-$(CONFIG_PCIE_QCOM) += pcie-qcom.o
obj-$(CONFIG_PCIE_QCOM_EP) += pcie-qcom-ep.o
obj-$(CONFIG_PCIE_ARMADA_8K) += pcie-armada8k.o
diff --git a/drivers/pci/controller/dwc/pci-dra7xx.c b/drivers/pci/controller/dwc/pci-dra7xx.c
index 4fe3b0cb72ec..5c62e1a3ba52 100644
--- a/drivers/pci/controller/dwc/pci-dra7xx.c
+++ b/drivers/pci/controller/dwc/pci-dra7xx.c
@@ -850,14 +850,21 @@ static int dra7xx_pcie_probe(struct platform_device *pdev)
dra7xx->mode = mode;
ret = devm_request_threaded_irq(dev, irq, NULL, dra7xx_pcie_irq_handler,
- IRQF_SHARED, "dra7xx-pcie-main", dra7xx);
+ IRQF_SHARED | IRQF_ONESHOT,
+ "dra7xx-pcie-main", dra7xx);
if (ret) {
dev_err(dev, "failed to request irq\n");
- goto err_gpio;
+ goto err_deinit;
}
return 0;
+err_deinit:
+ if (dra7xx->mode == DW_PCIE_RC_TYPE)
+ dw_pcie_host_deinit(&dra7xx->pci->pp);
+ else
+ dw_pcie_ep_deinit(&dra7xx->pci->ep);
+
err_gpio:
err_get_sync:
pm_runtime_put(dev);
diff --git a/drivers/pci/controller/dwc/pci-imx6.c b/drivers/pci/controller/dwc/pci-imx6.c
index 964d67756eb2..808d1f105417 100644
--- a/drivers/pci/controller/dwc/pci-imx6.c
+++ b/drivers/pci/controller/dwc/pci-imx6.c
@@ -28,6 +28,7 @@
#include <linux/types.h>
#include <linux/interrupt.h>
#include <linux/reset.h>
+#include <linux/phy/pcie.h>
#include <linux/phy/phy.h>
#include <linux/pm_domain.h>
#include <linux/pm_runtime.h>
@@ -54,9 +55,9 @@
#define IMX95_PE0_GEN_CTRL_3 0x1058
#define IMX95_PCIE_LTSSM_EN BIT(0)
-#define to_imx6_pcie(x) dev_get_drvdata((x)->dev)
+#define to_imx_pcie(x) dev_get_drvdata((x)->dev)
-enum imx6_pcie_variants {
+enum imx_pcie_variants {
IMX6Q,
IMX6SX,
IMX6QP,
@@ -64,6 +65,7 @@ enum imx6_pcie_variants {
IMX8MQ,
IMX8MM,
IMX8MP,
+ IMX8Q,
IMX95,
IMX8MQ_EP,
IMX8MM_EP,
@@ -71,25 +73,25 @@ enum imx6_pcie_variants {
IMX95_EP,
};
-#define IMX6_PCIE_FLAG_IMX6_PHY BIT(0)
-#define IMX6_PCIE_FLAG_IMX6_SPEED_CHANGE BIT(1)
-#define IMX6_PCIE_FLAG_SUPPORTS_SUSPEND BIT(2)
-#define IMX6_PCIE_FLAG_HAS_PHYDRV BIT(3)
-#define IMX6_PCIE_FLAG_HAS_APP_RESET BIT(4)
-#define IMX6_PCIE_FLAG_HAS_PHY_RESET BIT(5)
-#define IMX6_PCIE_FLAG_HAS_SERDES BIT(6)
-#define IMX6_PCIE_FLAG_SUPPORT_64BIT BIT(7)
+#define IMX_PCIE_FLAG_IMX_PHY BIT(0)
+#define IMX_PCIE_FLAG_IMX_SPEED_CHANGE BIT(1)
+#define IMX_PCIE_FLAG_SUPPORTS_SUSPEND BIT(2)
+#define IMX_PCIE_FLAG_HAS_PHYDRV BIT(3)
+#define IMX_PCIE_FLAG_HAS_APP_RESET BIT(4)
+#define IMX_PCIE_FLAG_HAS_PHY_RESET BIT(5)
+#define IMX_PCIE_FLAG_HAS_SERDES BIT(6)
+#define IMX_PCIE_FLAG_SUPPORT_64BIT BIT(7)
+#define IMX_PCIE_FLAG_CPU_ADDR_FIXUP BIT(8)
-#define imx6_check_flag(pci, val) (pci->drvdata->flags & val)
+#define imx_check_flag(pci, val) (pci->drvdata->flags & val)
-#define IMX6_PCIE_MAX_CLKS 6
+#define IMX_PCIE_MAX_CLKS 6
+#define IMX_PCIE_MAX_INSTANCES 2
-#define IMX6_PCIE_MAX_INSTANCES 2
+struct imx_pcie;
-struct imx6_pcie;
-
-struct imx6_pcie_drvdata {
- enum imx6_pcie_variants variant;
+struct imx_pcie_drvdata {
+ enum imx_pcie_variants variant;
enum dw_pcie_device_mode mode;
u32 flags;
int dbi_length;
@@ -98,17 +100,19 @@ struct imx6_pcie_drvdata {
const u32 clks_cnt;
const u32 ltssm_off;
const u32 ltssm_mask;
- const u32 mode_off[IMX6_PCIE_MAX_INSTANCES];
- const u32 mode_mask[IMX6_PCIE_MAX_INSTANCES];
+ const u32 mode_off[IMX_PCIE_MAX_INSTANCES];
+ const u32 mode_mask[IMX_PCIE_MAX_INSTANCES];
const struct pci_epc_features *epc_features;
- int (*init_phy)(struct imx6_pcie *pcie);
+ int (*init_phy)(struct imx_pcie *pcie);
+ int (*enable_ref_clk)(struct imx_pcie *pcie, bool enable);
+ int (*core_reset)(struct imx_pcie *pcie, bool assert);
};
-struct imx6_pcie {
+struct imx_pcie {
struct dw_pcie *pci;
struct gpio_desc *reset_gpiod;
bool link_is_up;
- struct clk_bulk_data clks[IMX6_PCIE_MAX_CLKS];
+ struct clk_bulk_data clks[IMX_PCIE_MAX_CLKS];
struct regmap *iomuxc_gpr;
u16 msi_ctrl;
u32 controller_id;
@@ -129,7 +133,7 @@ struct imx6_pcie {
/* power domain for pcie phy */
struct device *pd_pcie_phy;
struct phy *phy;
- const struct imx6_pcie_drvdata *drvdata;
+ const struct imx_pcie_drvdata *drvdata;
};
/* Parameters for the waiting for PCIe PHY PLL to lock on i.MX7 */
@@ -184,28 +188,28 @@ struct imx6_pcie {
#define PHY_RX_OVRD_IN_LO_RX_DATA_EN BIT(5)
#define PHY_RX_OVRD_IN_LO_RX_PLL_EN BIT(3)
-static unsigned int imx6_pcie_grp_offset(const struct imx6_pcie *imx6_pcie)
+static unsigned int imx_pcie_grp_offset(const struct imx_pcie *imx_pcie)
{
- WARN_ON(imx6_pcie->drvdata->variant != IMX8MQ &&
- imx6_pcie->drvdata->variant != IMX8MQ_EP &&
- imx6_pcie->drvdata->variant != IMX8MM &&
- imx6_pcie->drvdata->variant != IMX8MM_EP &&
- imx6_pcie->drvdata->variant != IMX8MP &&
- imx6_pcie->drvdata->variant != IMX8MP_EP);
- return imx6_pcie->controller_id == 1 ? IOMUXC_GPR16 : IOMUXC_GPR14;
+ WARN_ON(imx_pcie->drvdata->variant != IMX8MQ &&
+ imx_pcie->drvdata->variant != IMX8MQ_EP &&
+ imx_pcie->drvdata->variant != IMX8MM &&
+ imx_pcie->drvdata->variant != IMX8MM_EP &&
+ imx_pcie->drvdata->variant != IMX8MP &&
+ imx_pcie->drvdata->variant != IMX8MP_EP);
+ return imx_pcie->controller_id == 1 ? IOMUXC_GPR16 : IOMUXC_GPR14;
}
-static int imx95_pcie_init_phy(struct imx6_pcie *imx6_pcie)
+static int imx95_pcie_init_phy(struct imx_pcie *imx_pcie)
{
- regmap_update_bits(imx6_pcie->iomuxc_gpr,
+ regmap_update_bits(imx_pcie->iomuxc_gpr,
IMX95_PCIE_SS_RW_REG_0,
IMX95_PCIE_PHY_CR_PARA_SEL,
IMX95_PCIE_PHY_CR_PARA_SEL);
- regmap_update_bits(imx6_pcie->iomuxc_gpr,
+ regmap_update_bits(imx_pcie->iomuxc_gpr,
IMX95_PCIE_PHY_GEN_CTRL,
IMX95_PCIE_REF_USE_PAD, 0);
- regmap_update_bits(imx6_pcie->iomuxc_gpr,
+ regmap_update_bits(imx_pcie->iomuxc_gpr,
IMX95_PCIE_SS_RW_REG_0,
IMX95_PCIE_REF_CLKEN,
IMX95_PCIE_REF_CLKEN);
@@ -213,9 +217,9 @@ static int imx95_pcie_init_phy(struct imx6_pcie *imx6_pcie)
return 0;
}
-static void imx6_pcie_configure_type(struct imx6_pcie *imx6_pcie)
+static void imx_pcie_configure_type(struct imx_pcie *imx_pcie)
{
- const struct imx6_pcie_drvdata *drvdata = imx6_pcie->drvdata;
+ const struct imx_pcie_drvdata *drvdata = imx_pcie->drvdata;
unsigned int mask, val, mode, id;
if (drvdata->mode == DW_PCIE_EP_TYPE)
@@ -223,7 +227,11 @@ static void imx6_pcie_configure_type(struct imx6_pcie *imx6_pcie)
else
mode = PCI_EXP_TYPE_ROOT_PORT;
- id = imx6_pcie->controller_id;
+ id = imx_pcie->controller_id;
+
+ /* If mode_mask is 0, then generic PHY driver is used to set the mode */
+ if (!drvdata->mode_mask[0])
+ return;
/* If mode_mask[id] is zero, means each controller have its individual gpr */
if (!drvdata->mode_mask[id])
@@ -232,12 +240,12 @@ static void imx6_pcie_configure_type(struct imx6_pcie *imx6_pcie)
mask = drvdata->mode_mask[id];
val = mode << (ffs(mask) - 1);
- regmap_update_bits(imx6_pcie->iomuxc_gpr, drvdata->mode_off[id], mask, val);
+ regmap_update_bits(imx_pcie->iomuxc_gpr, drvdata->mode_off[id], mask, val);
}
-static int pcie_phy_poll_ack(struct imx6_pcie *imx6_pcie, bool exp_val)
+static int pcie_phy_poll_ack(struct imx_pcie *imx_pcie, bool exp_val)
{
- struct dw_pcie *pci = imx6_pcie->pci;
+ struct dw_pcie *pci = imx_pcie->pci;
bool val;
u32 max_iterations = 10;
u32 wait_counter = 0;
@@ -256,9 +264,9 @@ static int pcie_phy_poll_ack(struct imx6_pcie *imx6_pcie, bool exp_val)
return -ETIMEDOUT;
}
-static int pcie_phy_wait_ack(struct imx6_pcie *imx6_pcie, int addr)
+static int pcie_phy_wait_ack(struct imx_pcie *imx_pcie, int addr)
{
- struct dw_pcie *pci = imx6_pcie->pci;
+ struct dw_pcie *pci = imx_pcie->pci;
u32 val;
int ret;
@@ -268,24 +276,24 @@ static int pcie_phy_wait_ack(struct imx6_pcie *imx6_pcie, int addr)
val |= PCIE_PHY_CTRL_CAP_ADR;
dw_pcie_writel_dbi(pci, PCIE_PHY_CTRL, val);
- ret = pcie_phy_poll_ack(imx6_pcie, true);
+ ret = pcie_phy_poll_ack(imx_pcie, true);
if (ret)
return ret;
val = PCIE_PHY_CTRL_DATA(addr);
dw_pcie_writel_dbi(pci, PCIE_PHY_CTRL, val);
- return pcie_phy_poll_ack(imx6_pcie, false);
+ return pcie_phy_poll_ack(imx_pcie, false);
}
/* Read from the 16-bit PCIe PHY control registers (not memory-mapped) */
-static int pcie_phy_read(struct imx6_pcie *imx6_pcie, int addr, u16 *data)
+static int pcie_phy_read(struct imx_pcie *imx_pcie, int addr, u16 *data)
{
- struct dw_pcie *pci = imx6_pcie->pci;
+ struct dw_pcie *pci = imx_pcie->pci;
u32 phy_ctl;
int ret;
- ret = pcie_phy_wait_ack(imx6_pcie, addr);
+ ret = pcie_phy_wait_ack(imx_pcie, addr);
if (ret)
return ret;
@@ -293,7 +301,7 @@ static int pcie_phy_read(struct imx6_pcie *imx6_pcie, int addr, u16 *data)
phy_ctl = PCIE_PHY_CTRL_RD;
dw_pcie_writel_dbi(pci, PCIE_PHY_CTRL, phy_ctl);
- ret = pcie_phy_poll_ack(imx6_pcie, true);
+ ret = pcie_phy_poll_ack(imx_pcie, true);
if (ret)
return ret;
@@ -302,18 +310,18 @@ static int pcie_phy_read(struct imx6_pcie *imx6_pcie, int addr, u16 *data)
/* deassert Read signal */
dw_pcie_writel_dbi(pci, PCIE_PHY_CTRL, 0x00);
- return pcie_phy_poll_ack(imx6_pcie, false);
+ return pcie_phy_poll_ack(imx_pcie, false);
}
-static int pcie_phy_write(struct imx6_pcie *imx6_pcie, int addr, u16 data)
+static int pcie_phy_write(struct imx_pcie *imx_pcie, int addr, u16 data)
{
- struct dw_pcie *pci = imx6_pcie->pci;
+ struct dw_pcie *pci = imx_pcie->pci;
u32 var;
int ret;
/* write addr */
/* cap addr */
- ret = pcie_phy_wait_ack(imx6_pcie, addr);
+ ret = pcie_phy_wait_ack(imx_pcie, addr);
if (ret)
return ret;
@@ -324,7 +332,7 @@ static int pcie_phy_write(struct imx6_pcie *imx6_pcie, int addr, u16 data)
var |= PCIE_PHY_CTRL_CAP_DAT;
dw_pcie_writel_dbi(pci, PCIE_PHY_CTRL, var);
- ret = pcie_phy_poll_ack(imx6_pcie, true);
+ ret = pcie_phy_poll_ack(imx_pcie, true);
if (ret)
return ret;
@@ -333,7 +341,7 @@ static int pcie_phy_write(struct imx6_pcie *imx6_pcie, int addr, u16 data)
dw_pcie_writel_dbi(pci, PCIE_PHY_CTRL, var);
/* wait for ack de-assertion */
- ret = pcie_phy_poll_ack(imx6_pcie, false);
+ ret = pcie_phy_poll_ack(imx_pcie, false);
if (ret)
return ret;
@@ -342,7 +350,7 @@ static int pcie_phy_write(struct imx6_pcie *imx6_pcie, int addr, u16 data)
dw_pcie_writel_dbi(pci, PCIE_PHY_CTRL, var);
/* wait for ack */
- ret = pcie_phy_poll_ack(imx6_pcie, true);
+ ret = pcie_phy_poll_ack(imx_pcie, true);
if (ret)
return ret;
@@ -351,7 +359,7 @@ static int pcie_phy_write(struct imx6_pcie *imx6_pcie, int addr, u16 data)
dw_pcie_writel_dbi(pci, PCIE_PHY_CTRL, var);
/* wait for ack de-assertion */
- ret = pcie_phy_poll_ack(imx6_pcie, false);
+ ret = pcie_phy_poll_ack(imx_pcie, false);
if (ret)
return ret;
@@ -360,74 +368,74 @@ static int pcie_phy_write(struct imx6_pcie *imx6_pcie, int addr, u16 data)
return 0;
}
-static int imx8mq_pcie_init_phy(struct imx6_pcie *imx6_pcie)
+static int imx8mq_pcie_init_phy(struct imx_pcie *imx_pcie)
{
/* TODO: Currently this code assumes external oscillator is being used */
- regmap_update_bits(imx6_pcie->iomuxc_gpr,
- imx6_pcie_grp_offset(imx6_pcie),
+ regmap_update_bits(imx_pcie->iomuxc_gpr,
+ imx_pcie_grp_offset(imx_pcie),
IMX8MQ_GPR_PCIE_REF_USE_PAD,
IMX8MQ_GPR_PCIE_REF_USE_PAD);
/*
* Regarding the datasheet, the PCIE_VPH is suggested to be 1.8V. If the PCIE_VPH is
* supplied by 3.3V, the VREG_BYPASS should be cleared to zero.
*/
- if (imx6_pcie->vph && regulator_get_voltage(imx6_pcie->vph) > 3000000)
- regmap_update_bits(imx6_pcie->iomuxc_gpr,
- imx6_pcie_grp_offset(imx6_pcie),
+ if (imx_pcie->vph && regulator_get_voltage(imx_pcie->vph) > 3000000)
+ regmap_update_bits(imx_pcie->iomuxc_gpr,
+ imx_pcie_grp_offset(imx_pcie),
IMX8MQ_GPR_PCIE_VREG_BYPASS,
0);
return 0;
}
-static int imx7d_pcie_init_phy(struct imx6_pcie *imx6_pcie)
+static int imx7d_pcie_init_phy(struct imx_pcie *imx_pcie)
{
- regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR12, IMX7D_GPR12_PCIE_PHY_REFCLK_SEL, 0);
+ regmap_update_bits(imx_pcie->iomuxc_gpr, IOMUXC_GPR12, IMX7D_GPR12_PCIE_PHY_REFCLK_SEL, 0);
return 0;
}
-static int imx6_pcie_init_phy(struct imx6_pcie *imx6_pcie)
+static int imx_pcie_init_phy(struct imx_pcie *imx_pcie)
{
- regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR12,
+ regmap_update_bits(imx_pcie->iomuxc_gpr, IOMUXC_GPR12,
IMX6Q_GPR12_PCIE_CTL_2, 0 << 10);
/* configure constant input signal to the pcie ctrl and phy */
- regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR12,
+ regmap_update_bits(imx_pcie->iomuxc_gpr, IOMUXC_GPR12,
IMX6Q_GPR12_LOS_LEVEL, 9 << 4);
- regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR8,
+ regmap_update_bits(imx_pcie->iomuxc_gpr, IOMUXC_GPR8,
IMX6Q_GPR8_TX_DEEMPH_GEN1,
- imx6_pcie->tx_deemph_gen1 << 0);
- regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR8,
+ imx_pcie->tx_deemph_gen1 << 0);
+ regmap_update_bits(imx_pcie->iomuxc_gpr, IOMUXC_GPR8,
IMX6Q_GPR8_TX_DEEMPH_GEN2_3P5DB,
- imx6_pcie->tx_deemph_gen2_3p5db << 6);
- regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR8,
+ imx_pcie->tx_deemph_gen2_3p5db << 6);
+ regmap_update_bits(imx_pcie->iomuxc_gpr, IOMUXC_GPR8,
IMX6Q_GPR8_TX_DEEMPH_GEN2_6DB,
- imx6_pcie->tx_deemph_gen2_6db << 12);
- regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR8,
+ imx_pcie->tx_deemph_gen2_6db << 12);
+ regmap_update_bits(imx_pcie->iomuxc_gpr, IOMUXC_GPR8,
IMX6Q_GPR8_TX_SWING_FULL,
- imx6_pcie->tx_swing_full << 18);
- regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR8,
+ imx_pcie->tx_swing_full << 18);
+ regmap_update_bits(imx_pcie->iomuxc_gpr, IOMUXC_GPR8,
IMX6Q_GPR8_TX_SWING_LOW,
- imx6_pcie->tx_swing_low << 25);
+ imx_pcie->tx_swing_low << 25);
return 0;
}
-static int imx6sx_pcie_init_phy(struct imx6_pcie *imx6_pcie)
+static int imx6sx_pcie_init_phy(struct imx_pcie *imx_pcie)
{
- regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR12,
+ regmap_update_bits(imx_pcie->iomuxc_gpr, IOMUXC_GPR12,
IMX6SX_GPR12_PCIE_RX_EQ_MASK, IMX6SX_GPR12_PCIE_RX_EQ_2);
- return imx6_pcie_init_phy(imx6_pcie);
+ return imx_pcie_init_phy(imx_pcie);
}
-static void imx7d_pcie_wait_for_phy_pll_lock(struct imx6_pcie *imx6_pcie)
+static void imx7d_pcie_wait_for_phy_pll_lock(struct imx_pcie *imx_pcie)
{
u32 val;
- struct device *dev = imx6_pcie->pci->dev;
+ struct device *dev = imx_pcie->pci->dev;
- if (regmap_read_poll_timeout(imx6_pcie->iomuxc_gpr,
+ if (regmap_read_poll_timeout(imx_pcie->iomuxc_gpr,
IOMUXC_GPR22, val,
val & IMX7D_GPR22_PCIE_PHY_PLL_LOCKED,
PHY_PLL_LOCK_WAIT_USLEEP_MAX,
@@ -435,19 +443,19 @@ static void imx7d_pcie_wait_for_phy_pll_lock(struct imx6_pcie *imx6_pcie)
dev_err(dev, "PCIe PLL lock timeout\n");
}
-static int imx6_setup_phy_mpll(struct imx6_pcie *imx6_pcie)
+static int imx_setup_phy_mpll(struct imx_pcie *imx_pcie)
{
unsigned long phy_rate = 0;
int mult, div;
u16 val;
int i;
- if (!(imx6_pcie->drvdata->flags & IMX6_PCIE_FLAG_IMX6_PHY))
+ if (!(imx_pcie->drvdata->flags & IMX_PCIE_FLAG_IMX_PHY))
return 0;
- for (i = 0; i < imx6_pcie->drvdata->clks_cnt; i++)
- if (strncmp(imx6_pcie->clks[i].id, "pcie_phy", 8) == 0)
- phy_rate = clk_get_rate(imx6_pcie->clks[i].clk);
+ for (i = 0; i < imx_pcie->drvdata->clks_cnt; i++)
+ if (strncmp(imx_pcie->clks[i].id, "pcie_phy", 8) == 0)
+ phy_rate = clk_get_rate(imx_pcie->clks[i].clk);
switch (phy_rate) {
case 125000000:
@@ -465,46 +473,46 @@ static int imx6_setup_phy_mpll(struct imx6_pcie *imx6_pcie)
div = 1;
break;
default:
- dev_err(imx6_pcie->pci->dev,
+ dev_err(imx_pcie->pci->dev,
"Unsupported PHY reference clock rate %lu\n", phy_rate);
return -EINVAL;
}
- pcie_phy_read(imx6_pcie, PCIE_PHY_MPLL_OVRD_IN_LO, &val);
+ pcie_phy_read(imx_pcie, PCIE_PHY_MPLL_OVRD_IN_LO, &val);
val &= ~(PCIE_PHY_MPLL_MULTIPLIER_MASK <<
PCIE_PHY_MPLL_MULTIPLIER_SHIFT);
val |= mult << PCIE_PHY_MPLL_MULTIPLIER_SHIFT;
val |= PCIE_PHY_MPLL_MULTIPLIER_OVRD;
- pcie_phy_write(imx6_pcie, PCIE_PHY_MPLL_OVRD_IN_LO, val);
+ pcie_phy_write(imx_pcie, PCIE_PHY_MPLL_OVRD_IN_LO, val);
- pcie_phy_read(imx6_pcie, PCIE_PHY_ATEOVRD, &val);
+ pcie_phy_read(imx_pcie, PCIE_PHY_ATEOVRD, &val);
val &= ~(PCIE_PHY_ATEOVRD_REF_CLKDIV_MASK <<
PCIE_PHY_ATEOVRD_REF_CLKDIV_SHIFT);
val |= div << PCIE_PHY_ATEOVRD_REF_CLKDIV_SHIFT;
val |= PCIE_PHY_ATEOVRD_EN;
- pcie_phy_write(imx6_pcie, PCIE_PHY_ATEOVRD, val);
+ pcie_phy_write(imx_pcie, PCIE_PHY_ATEOVRD, val);
return 0;
}
-static void imx6_pcie_reset_phy(struct imx6_pcie *imx6_pcie)
+static void imx_pcie_reset_phy(struct imx_pcie *imx_pcie)
{
u16 tmp;
- if (!(imx6_pcie->drvdata->flags & IMX6_PCIE_FLAG_IMX6_PHY))
+ if (!(imx_pcie->drvdata->flags & IMX_PCIE_FLAG_IMX_PHY))
return;
- pcie_phy_read(imx6_pcie, PHY_RX_OVRD_IN_LO, &tmp);
+ pcie_phy_read(imx_pcie, PHY_RX_OVRD_IN_LO, &tmp);
tmp |= (PHY_RX_OVRD_IN_LO_RX_DATA_EN |
PHY_RX_OVRD_IN_LO_RX_PLL_EN);
- pcie_phy_write(imx6_pcie, PHY_RX_OVRD_IN_LO, tmp);
+ pcie_phy_write(imx_pcie, PHY_RX_OVRD_IN_LO, tmp);
usleep_range(2000, 3000);
- pcie_phy_read(imx6_pcie, PHY_RX_OVRD_IN_LO, &tmp);
+ pcie_phy_read(imx_pcie, PHY_RX_OVRD_IN_LO, &tmp);
tmp &= ~(PHY_RX_OVRD_IN_LO_RX_DATA_EN |
PHY_RX_OVRD_IN_LO_RX_PLL_EN);
- pcie_phy_write(imx6_pcie, PHY_RX_OVRD_IN_LO, tmp);
+ pcie_phy_write(imx_pcie, PHY_RX_OVRD_IN_LO, tmp);
}
#ifdef CONFIG_ARM
@@ -543,22 +551,22 @@ static int imx6q_pcie_abort_handler(unsigned long addr,
}
#endif
-static int imx6_pcie_attach_pd(struct device *dev)
+static int imx_pcie_attach_pd(struct device *dev)
{
- struct imx6_pcie *imx6_pcie = dev_get_drvdata(dev);
+ struct imx_pcie *imx_pcie = dev_get_drvdata(dev);
struct device_link *link;
/* Do nothing when in a single power domain */
if (dev->pm_domain)
return 0;
- imx6_pcie->pd_pcie = dev_pm_domain_attach_by_name(dev, "pcie");
- if (IS_ERR(imx6_pcie->pd_pcie))
- return PTR_ERR(imx6_pcie->pd_pcie);
+ imx_pcie->pd_pcie = dev_pm_domain_attach_by_name(dev, "pcie");
+ if (IS_ERR(imx_pcie->pd_pcie))
+ return PTR_ERR(imx_pcie->pd_pcie);
/* Do nothing when power domain missing */
- if (!imx6_pcie->pd_pcie)
+ if (!imx_pcie->pd_pcie)
return 0;
- link = device_link_add(dev, imx6_pcie->pd_pcie,
+ link = device_link_add(dev, imx_pcie->pd_pcie,
DL_FLAG_STATELESS |
DL_FLAG_PM_RUNTIME |
DL_FLAG_RPM_ACTIVE);
@@ -567,11 +575,11 @@ static int imx6_pcie_attach_pd(struct device *dev)
return -EINVAL;
}
- imx6_pcie->pd_pcie_phy = dev_pm_domain_attach_by_name(dev, "pcie_phy");
- if (IS_ERR(imx6_pcie->pd_pcie_phy))
- return PTR_ERR(imx6_pcie->pd_pcie_phy);
+ imx_pcie->pd_pcie_phy = dev_pm_domain_attach_by_name(dev, "pcie_phy");
+ if (IS_ERR(imx_pcie->pd_pcie_phy))
+ return PTR_ERR(imx_pcie->pd_pcie_phy);
- link = device_link_add(dev, imx6_pcie->pd_pcie_phy,
+ link = device_link_add(dev, imx_pcie->pd_pcie_phy,
DL_FLAG_STATELESS |
DL_FLAG_PM_RUNTIME |
DL_FLAG_RPM_ACTIVE);
@@ -583,21 +591,20 @@ static int imx6_pcie_attach_pd(struct device *dev)
return 0;
}
-static int imx6_pcie_enable_ref_clk(struct imx6_pcie *imx6_pcie)
+static int imx6sx_pcie_enable_ref_clk(struct imx_pcie *imx_pcie, bool enable)
{
- unsigned int offset;
- int ret = 0;
+ if (enable)
+ regmap_clear_bits(imx_pcie->iomuxc_gpr, IOMUXC_GPR12,
+ IMX6SX_GPR12_PCIE_TEST_POWERDOWN);
- switch (imx6_pcie->drvdata->variant) {
- case IMX6SX:
- regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR12,
- IMX6SX_GPR12_PCIE_TEST_POWERDOWN, 0);
- break;
- case IMX6QP:
- case IMX6Q:
+ return 0;
+}
+
+static int imx6q_pcie_enable_ref_clk(struct imx_pcie *imx_pcie, bool enable)
+{
+ if (enable) {
/* power up core phy and enable ref clock */
- regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR1,
- IMX6Q_GPR1_PCIE_TEST_PD, 0 << 18);
+ regmap_clear_bits(imx_pcie->iomuxc_gpr, IOMUXC_GPR1, IMX6Q_GPR1_PCIE_TEST_PD);
/*
* the async reset input need ref clock to sync internally,
* when the ref clock comes after reset, internal synced
@@ -605,71 +612,51 @@ static int imx6_pcie_enable_ref_clk(struct imx6_pcie *imx6_pcie)
* add one ~10us delay here.
*/
usleep_range(10, 100);
- regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR1,
- IMX6Q_GPR1_PCIE_REF_CLK_EN, 1 << 16);
- break;
- case IMX7D:
- case IMX95:
- case IMX95_EP:
- break;
- case IMX8MM:
- case IMX8MM_EP:
- case IMX8MQ:
- case IMX8MQ_EP:
- case IMX8MP:
- case IMX8MP_EP:
- offset = imx6_pcie_grp_offset(imx6_pcie);
- /*
- * Set the over ride low and enabled
- * make sure that REF_CLK is turned on.
- */
- regmap_update_bits(imx6_pcie->iomuxc_gpr, offset,
- IMX8MQ_GPR_PCIE_CLK_REQ_OVERRIDE,
- 0);
- regmap_update_bits(imx6_pcie->iomuxc_gpr, offset,
- IMX8MQ_GPR_PCIE_CLK_REQ_OVERRIDE_EN,
- IMX8MQ_GPR_PCIE_CLK_REQ_OVERRIDE_EN);
- break;
+ regmap_set_bits(imx_pcie->iomuxc_gpr, IOMUXC_GPR1, IMX6Q_GPR1_PCIE_REF_CLK_EN);
+ } else {
+ regmap_clear_bits(imx_pcie->iomuxc_gpr, IOMUXC_GPR1, IMX6Q_GPR1_PCIE_REF_CLK_EN);
+ regmap_set_bits(imx_pcie->iomuxc_gpr, IOMUXC_GPR1, IMX6Q_GPR1_PCIE_TEST_PD);
}
- return ret;
+ return 0;
}
-static void imx6_pcie_disable_ref_clk(struct imx6_pcie *imx6_pcie)
+static int imx8mm_pcie_enable_ref_clk(struct imx_pcie *imx_pcie, bool enable)
{
- switch (imx6_pcie->drvdata->variant) {
- case IMX6QP:
- case IMX6Q:
- regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR1,
- IMX6Q_GPR1_PCIE_REF_CLK_EN, 0);
- regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR1,
- IMX6Q_GPR1_PCIE_TEST_PD,
- IMX6Q_GPR1_PCIE_TEST_PD);
- break;
- case IMX7D:
- regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR12,
- IMX7D_GPR12_PCIE_PHY_REFCLK_SEL,
- IMX7D_GPR12_PCIE_PHY_REFCLK_SEL);
- break;
- default:
- break;
+ int offset = imx_pcie_grp_offset(imx_pcie);
+
+ if (enable) {
+ regmap_clear_bits(imx_pcie->iomuxc_gpr, offset, IMX8MQ_GPR_PCIE_CLK_REQ_OVERRIDE);
+ regmap_set_bits(imx_pcie->iomuxc_gpr, offset, IMX8MQ_GPR_PCIE_CLK_REQ_OVERRIDE_EN);
}
+
+ return 0;
+}
+
+static int imx7d_pcie_enable_ref_clk(struct imx_pcie *imx_pcie, bool enable)
+{
+ if (!enable)
+ regmap_set_bits(imx_pcie->iomuxc_gpr, IOMUXC_GPR12,
+ IMX7D_GPR12_PCIE_PHY_REFCLK_SEL);
+ return 0;
}
-static int imx6_pcie_clk_enable(struct imx6_pcie *imx6_pcie)
+static int imx_pcie_clk_enable(struct imx_pcie *imx_pcie)
{
- struct dw_pcie *pci = imx6_pcie->pci;
+ struct dw_pcie *pci = imx_pcie->pci;
struct device *dev = pci->dev;
int ret;
- ret = clk_bulk_prepare_enable(imx6_pcie->drvdata->clks_cnt, imx6_pcie->clks);
+ ret = clk_bulk_prepare_enable(imx_pcie->drvdata->clks_cnt, imx_pcie->clks);
if (ret)
return ret;
- ret = imx6_pcie_enable_ref_clk(imx6_pcie);
- if (ret) {
- dev_err(dev, "unable to enable pcie ref clock\n");
- goto err_ref_clk;
+ if (imx_pcie->drvdata->enable_ref_clk) {
+ ret = imx_pcie->drvdata->enable_ref_clk(imx_pcie, true);
+ if (ret) {
+ dev_err(dev, "Failed to enable PCIe REFCLK\n");
+ goto err_ref_clk;
+ }
}
/* allow the clocks to stabilize */
@@ -677,99 +664,120 @@ static int imx6_pcie_clk_enable(struct imx6_pcie *imx6_pcie)
return 0;
err_ref_clk:
- clk_bulk_disable_unprepare(imx6_pcie->drvdata->clks_cnt, imx6_pcie->clks);
+ clk_bulk_disable_unprepare(imx_pcie->drvdata->clks_cnt, imx_pcie->clks);
return ret;
}
-static void imx6_pcie_clk_disable(struct imx6_pcie *imx6_pcie)
+static void imx_pcie_clk_disable(struct imx_pcie *imx_pcie)
{
- imx6_pcie_disable_ref_clk(imx6_pcie);
- clk_bulk_disable_unprepare(imx6_pcie->drvdata->clks_cnt, imx6_pcie->clks);
+ if (imx_pcie->drvdata->enable_ref_clk)
+ imx_pcie->drvdata->enable_ref_clk(imx_pcie, false);
+ clk_bulk_disable_unprepare(imx_pcie->drvdata->clks_cnt, imx_pcie->clks);
}
-static void imx6_pcie_assert_core_reset(struct imx6_pcie *imx6_pcie)
+static int imx6sx_pcie_core_reset(struct imx_pcie *imx_pcie, bool assert)
{
- reset_control_assert(imx6_pcie->pciephy_reset);
- reset_control_assert(imx6_pcie->apps_reset);
+ if (assert)
+ regmap_set_bits(imx_pcie->iomuxc_gpr, IOMUXC_GPR12,
+ IMX6SX_GPR12_PCIE_TEST_POWERDOWN);
- switch (imx6_pcie->drvdata->variant) {
- case IMX6SX:
- regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR12,
- IMX6SX_GPR12_PCIE_TEST_POWERDOWN,
- IMX6SX_GPR12_PCIE_TEST_POWERDOWN);
- /* Force PCIe PHY reset */
- regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR5,
- IMX6SX_GPR5_PCIE_BTNRST_RESET,
- IMX6SX_GPR5_PCIE_BTNRST_RESET);
- break;
- case IMX6QP:
- regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR1,
- IMX6Q_GPR1_PCIE_SW_RST,
- IMX6Q_GPR1_PCIE_SW_RST);
- break;
- case IMX6Q:
- regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR1,
- IMX6Q_GPR1_PCIE_TEST_PD, 1 << 18);
- regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR1,
- IMX6Q_GPR1_PCIE_REF_CLK_EN, 0 << 16);
- break;
- default:
- break;
- }
+ /* Force PCIe PHY reset */
+ regmap_update_bits(imx_pcie->iomuxc_gpr, IOMUXC_GPR5, IMX6SX_GPR5_PCIE_BTNRST_RESET,
+ assert ? IMX6SX_GPR5_PCIE_BTNRST_RESET : 0);
+ return 0;
+}
- /* Some boards don't have PCIe reset GPIO. */
- gpiod_set_value_cansleep(imx6_pcie->reset_gpiod, 1);
+static int imx6qp_pcie_core_reset(struct imx_pcie *imx_pcie, bool assert)
+{
+ regmap_update_bits(imx_pcie->iomuxc_gpr, IOMUXC_GPR1, IMX6Q_GPR1_PCIE_SW_RST,
+ assert ? IMX6Q_GPR1_PCIE_SW_RST : 0);
+ if (!assert)
+ usleep_range(200, 500);
+
+ return 0;
}
-static int imx6_pcie_deassert_core_reset(struct imx6_pcie *imx6_pcie)
+static int imx6q_pcie_core_reset(struct imx_pcie *imx_pcie, bool assert)
{
- struct dw_pcie *pci = imx6_pcie->pci;
- struct device *dev = pci->dev;
+ if (!assert)
+ return 0;
- reset_control_deassert(imx6_pcie->pciephy_reset);
+ regmap_set_bits(imx_pcie->iomuxc_gpr, IOMUXC_GPR1, IMX6Q_GPR1_PCIE_TEST_PD);
+ regmap_set_bits(imx_pcie->iomuxc_gpr, IOMUXC_GPR1, IMX6Q_GPR1_PCIE_REF_CLK_EN);
- switch (imx6_pcie->drvdata->variant) {
- case IMX7D:
- /* Workaround for ERR010728, failure of PCI-e PLL VCO to
- * oscillate, especially when cold. This turns off "Duty-cycle
- * Corrector" and other mysterious undocumented things.
- */
- if (likely(imx6_pcie->phy_base)) {
- /* De-assert DCC_FB_EN */
- writel(PCIE_PHY_CMN_REG4_DCC_FB_EN,
- imx6_pcie->phy_base + PCIE_PHY_CMN_REG4);
- /* Assert RX_EQS and RX_EQS_SEL */
- writel(PCIE_PHY_CMN_REG24_RX_EQ_SEL
- | PCIE_PHY_CMN_REG24_RX_EQ,
- imx6_pcie->phy_base + PCIE_PHY_CMN_REG24);
- /* Assert ATT_MODE */
- writel(PCIE_PHY_CMN_REG26_ATT_MODE,
- imx6_pcie->phy_base + PCIE_PHY_CMN_REG26);
- } else {
- dev_warn(dev, "Unable to apply ERR010728 workaround. DT missing fsl,imx7d-pcie-phy phandle ?\n");
- }
+ return 0;
+}
- imx7d_pcie_wait_for_phy_pll_lock(imx6_pcie);
- break;
- case IMX6SX:
- regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR5,
- IMX6SX_GPR5_PCIE_BTNRST_RESET, 0);
- break;
- case IMX6QP:
- regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR1,
- IMX6Q_GPR1_PCIE_SW_RST, 0);
+static int imx7d_pcie_core_reset(struct imx_pcie *imx_pcie, bool assert)
+{
+ struct dw_pcie *pci = imx_pcie->pci;
+ struct device *dev = pci->dev;
- usleep_range(200, 500);
- break;
- default:
- break;
+ if (assert)
+ return 0;
+
+ /*
+ * Workaround for ERR010728 (IMX7DS_2N09P, Rev. 1.1, 4/2023):
+ *
+ * PCIe: PLL may fail to lock under corner conditions.
+ *
+ * Initial VCO oscillation may fail under corner conditions such as
+ * cold temperature which will cause the PCIe PLL fail to lock in the
+ * initialization phase.
+ *
+ * The Duty-cycle Corrector calibration must be disabled.
+ *
+ * 1. De-assert the G_RST signal by clearing
+ * SRC_PCIEPHY_RCR[PCIEPHY_G_RST].
+ * 2. De-assert DCC_FB_EN by writing data “0x29” to the register
+ * address 0x306d0014 (PCIE_PHY_CMN_REG4).
+ * 3. Assert RX_EQS, RX_EQ_SEL by writing data “0x48” to the register
+ * address 0x306d0090 (PCIE_PHY_CMN_REG24).
+ * 4. Assert ATT_MODE by writing data “0xbc” to the register
+ * address 0x306d0098 (PCIE_PHY_CMN_REG26).
+ * 5. De-assert the CMN_RST signal by clearing register bit
+ * SRC_PCIEPHY_RCR[PCIEPHY_BTN]
+ */
+
+ if (likely(imx_pcie->phy_base)) {
+ /* De-assert DCC_FB_EN */
+ writel(PCIE_PHY_CMN_REG4_DCC_FB_EN, imx_pcie->phy_base + PCIE_PHY_CMN_REG4);
+ /* Assert RX_EQS and RX_EQS_SEL */
+ writel(PCIE_PHY_CMN_REG24_RX_EQ_SEL | PCIE_PHY_CMN_REG24_RX_EQ,
+ imx_pcie->phy_base + PCIE_PHY_CMN_REG24);
+ /* Assert ATT_MODE */
+ writel(PCIE_PHY_CMN_REG26_ATT_MODE, imx_pcie->phy_base + PCIE_PHY_CMN_REG26);
+ } else {
+ dev_warn(dev, "Unable to apply ERR010728 workaround. DT missing fsl,imx7d-pcie-phy phandle ?\n");
}
+ imx7d_pcie_wait_for_phy_pll_lock(imx_pcie);
+ return 0;
+}
+
+static void imx_pcie_assert_core_reset(struct imx_pcie *imx_pcie)
+{
+ reset_control_assert(imx_pcie->pciephy_reset);
+ reset_control_assert(imx_pcie->apps_reset);
+
+ if (imx_pcie->drvdata->core_reset)
+ imx_pcie->drvdata->core_reset(imx_pcie, true);
+
+ /* Some boards don't have PCIe reset GPIO. */
+ gpiod_set_value_cansleep(imx_pcie->reset_gpiod, 1);
+}
+
+static int imx_pcie_deassert_core_reset(struct imx_pcie *imx_pcie)
+{
+ reset_control_deassert(imx_pcie->pciephy_reset);
+
+ if (imx_pcie->drvdata->core_reset)
+ imx_pcie->drvdata->core_reset(imx_pcie, false);
/* Some boards don't have PCIe reset GPIO. */
- if (imx6_pcie->reset_gpiod) {
+ if (imx_pcie->reset_gpiod) {
msleep(100);
- gpiod_set_value_cansleep(imx6_pcie->reset_gpiod, 0);
+ gpiod_set_value_cansleep(imx_pcie->reset_gpiod, 0);
/* Wait for 100ms after PERST# deassertion (PCIe r5.0, 6.6.1) */
msleep(100);
}
@@ -777,9 +785,9 @@ static int imx6_pcie_deassert_core_reset(struct imx6_pcie *imx6_pcie)
return 0;
}
-static int imx6_pcie_wait_for_speed_change(struct imx6_pcie *imx6_pcie)
+static int imx_pcie_wait_for_speed_change(struct imx_pcie *imx_pcie)
{
- struct dw_pcie *pci = imx6_pcie->pci;
+ struct dw_pcie *pci = imx_pcie->pci;
struct device *dev = pci->dev;
u32 tmp;
unsigned int retries;
@@ -796,33 +804,38 @@ static int imx6_pcie_wait_for_speed_change(struct imx6_pcie *imx6_pcie)
return -ETIMEDOUT;
}
-static void imx6_pcie_ltssm_enable(struct device *dev)
+static void imx_pcie_ltssm_enable(struct device *dev)
{
- struct imx6_pcie *imx6_pcie = dev_get_drvdata(dev);
- const struct imx6_pcie_drvdata *drvdata = imx6_pcie->drvdata;
+ struct imx_pcie *imx_pcie = dev_get_drvdata(dev);
+ const struct imx_pcie_drvdata *drvdata = imx_pcie->drvdata;
+ u8 offset = dw_pcie_find_capability(imx_pcie->pci, PCI_CAP_ID_EXP);
+ u32 tmp;
+ tmp = dw_pcie_readl_dbi(imx_pcie->pci, offset + PCI_EXP_LNKCAP);
+ phy_set_speed(imx_pcie->phy, FIELD_GET(PCI_EXP_LNKCAP_SLS, tmp));
if (drvdata->ltssm_mask)
- regmap_update_bits(imx6_pcie->iomuxc_gpr, drvdata->ltssm_off, drvdata->ltssm_mask,
+ regmap_update_bits(imx_pcie->iomuxc_gpr, drvdata->ltssm_off, drvdata->ltssm_mask,
drvdata->ltssm_mask);
- reset_control_deassert(imx6_pcie->apps_reset);
+ reset_control_deassert(imx_pcie->apps_reset);
}
-static void imx6_pcie_ltssm_disable(struct device *dev)
+static void imx_pcie_ltssm_disable(struct device *dev)
{
- struct imx6_pcie *imx6_pcie = dev_get_drvdata(dev);
- const struct imx6_pcie_drvdata *drvdata = imx6_pcie->drvdata;
+ struct imx_pcie *imx_pcie = dev_get_drvdata(dev);
+ const struct imx_pcie_drvdata *drvdata = imx_pcie->drvdata;
+ phy_set_speed(imx_pcie->phy, 0);
if (drvdata->ltssm_mask)
- regmap_update_bits(imx6_pcie->iomuxc_gpr, drvdata->ltssm_off,
+ regmap_update_bits(imx_pcie->iomuxc_gpr, drvdata->ltssm_off,
drvdata->ltssm_mask, 0);
- reset_control_assert(imx6_pcie->apps_reset);
+ reset_control_assert(imx_pcie->apps_reset);
}
-static int imx6_pcie_start_link(struct dw_pcie *pci)
+static int imx_pcie_start_link(struct dw_pcie *pci)
{
- struct imx6_pcie *imx6_pcie = to_imx6_pcie(pci);
+ struct imx_pcie *imx_pcie = to_imx_pcie(pci);
struct device *dev = pci->dev;
u8 offset = dw_pcie_find_capability(pci, PCI_CAP_ID_EXP);
u32 tmp;
@@ -841,18 +854,18 @@ static int imx6_pcie_start_link(struct dw_pcie *pci)
dw_pcie_dbi_ro_wr_dis(pci);
/* Start LTSSM. */
- imx6_pcie_ltssm_enable(dev);
+ imx_pcie_ltssm_enable(dev);
ret = dw_pcie_wait_for_link(pci);
if (ret)
goto err_reset_phy;
- if (pci->link_gen > 1) {
+ if (pci->max_link_speed > 1) {
/* Allow faster modes after the link is up */
dw_pcie_dbi_ro_wr_en(pci);
tmp = dw_pcie_readl_dbi(pci, offset + PCI_EXP_LNKCAP);
tmp &= ~PCI_EXP_LNKCAP_SLS;
- tmp |= pci->link_gen;
+ tmp |= pci->max_link_speed;
dw_pcie_writel_dbi(pci, offset + PCI_EXP_LNKCAP, tmp);
/*
@@ -864,8 +877,8 @@ static int imx6_pcie_start_link(struct dw_pcie *pci)
dw_pcie_writel_dbi(pci, PCIE_LINK_WIDTH_SPEED_CONTROL, tmp);
dw_pcie_dbi_ro_wr_dis(pci);
- if (imx6_pcie->drvdata->flags &
- IMX6_PCIE_FLAG_IMX6_SPEED_CHANGE) {
+ if (imx_pcie->drvdata->flags &
+ IMX_PCIE_FLAG_IMX_SPEED_CHANGE) {
/*
* On i.MX7, DIRECT_SPEED_CHANGE behaves differently
* from i.MX6 family when no link speed transition
@@ -875,7 +888,7 @@ static int imx6_pcie_start_link(struct dw_pcie *pci)
* failure.
*/
- ret = imx6_pcie_wait_for_speed_change(imx6_pcie);
+ ret = imx_pcie_wait_for_speed_change(imx_pcie);
if (ret) {
dev_err(dev, "Failed to bring link up!\n");
goto err_reset_phy;
@@ -890,37 +903,37 @@ static int imx6_pcie_start_link(struct dw_pcie *pci)
dev_info(dev, "Link: Only Gen1 is enabled\n");
}
- imx6_pcie->link_is_up = true;
+ imx_pcie->link_is_up = true;
tmp = dw_pcie_readw_dbi(pci, offset + PCI_EXP_LNKSTA);
dev_info(dev, "Link up, Gen%i\n", tmp & PCI_EXP_LNKSTA_CLS);
return 0;
err_reset_phy:
- imx6_pcie->link_is_up = false;
+ imx_pcie->link_is_up = false;
dev_dbg(dev, "PHY DEBUG_R0=0x%08x DEBUG_R1=0x%08x\n",
dw_pcie_readl_dbi(pci, PCIE_PORT_DEBUG0),
dw_pcie_readl_dbi(pci, PCIE_PORT_DEBUG1));
- imx6_pcie_reset_phy(imx6_pcie);
+ imx_pcie_reset_phy(imx_pcie);
return 0;
}
-static void imx6_pcie_stop_link(struct dw_pcie *pci)
+static void imx_pcie_stop_link(struct dw_pcie *pci)
{
struct device *dev = pci->dev;
/* Turn off PCIe LTSSM */
- imx6_pcie_ltssm_disable(dev);
+ imx_pcie_ltssm_disable(dev);
}
-static int imx6_pcie_host_init(struct dw_pcie_rp *pp)
+static int imx_pcie_host_init(struct dw_pcie_rp *pp)
{
struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
struct device *dev = pci->dev;
- struct imx6_pcie *imx6_pcie = to_imx6_pcie(pci);
+ struct imx_pcie *imx_pcie = to_imx_pcie(pci);
int ret;
- if (imx6_pcie->vpcie) {
- ret = regulator_enable(imx6_pcie->vpcie);
+ if (imx_pcie->vpcie) {
+ ret = regulator_enable(imx_pcie->vpcie);
if (ret) {
dev_err(dev, "failed to enable vpcie regulator: %d\n",
ret);
@@ -928,83 +941,105 @@ static int imx6_pcie_host_init(struct dw_pcie_rp *pp)
}
}
- imx6_pcie_assert_core_reset(imx6_pcie);
+ imx_pcie_assert_core_reset(imx_pcie);
- if (imx6_pcie->drvdata->init_phy)
- imx6_pcie->drvdata->init_phy(imx6_pcie);
+ if (imx_pcie->drvdata->init_phy)
+ imx_pcie->drvdata->init_phy(imx_pcie);
- imx6_pcie_configure_type(imx6_pcie);
+ imx_pcie_configure_type(imx_pcie);
- ret = imx6_pcie_clk_enable(imx6_pcie);
+ ret = imx_pcie_clk_enable(imx_pcie);
if (ret) {
dev_err(dev, "unable to enable pcie clocks: %d\n", ret);
goto err_reg_disable;
}
- if (imx6_pcie->phy) {
- ret = phy_init(imx6_pcie->phy);
+ if (imx_pcie->phy) {
+ ret = phy_init(imx_pcie->phy);
if (ret) {
dev_err(dev, "pcie PHY power up failed\n");
goto err_clk_disable;
}
- }
- if (imx6_pcie->phy) {
- ret = phy_power_on(imx6_pcie->phy);
+ ret = phy_set_mode_ext(imx_pcie->phy, PHY_MODE_PCIE, PHY_MODE_PCIE_RC);
+ if (ret) {
+ dev_err(dev, "unable to set PCIe PHY mode\n");
+ goto err_phy_exit;
+ }
+
+ ret = phy_power_on(imx_pcie->phy);
if (ret) {
dev_err(dev, "waiting for PHY ready timeout!\n");
- goto err_phy_off;
+ goto err_phy_exit;
}
}
- ret = imx6_pcie_deassert_core_reset(imx6_pcie);
+ ret = imx_pcie_deassert_core_reset(imx_pcie);
if (ret < 0) {
dev_err(dev, "pcie deassert core reset failed: %d\n", ret);
goto err_phy_off;
}
- imx6_setup_phy_mpll(imx6_pcie);
+ imx_setup_phy_mpll(imx_pcie);
return 0;
err_phy_off:
- if (imx6_pcie->phy)
- phy_exit(imx6_pcie->phy);
+ phy_power_off(imx_pcie->phy);
+err_phy_exit:
+ phy_exit(imx_pcie->phy);
err_clk_disable:
- imx6_pcie_clk_disable(imx6_pcie);
+ imx_pcie_clk_disable(imx_pcie);
err_reg_disable:
- if (imx6_pcie->vpcie)
- regulator_disable(imx6_pcie->vpcie);
+ if (imx_pcie->vpcie)
+ regulator_disable(imx_pcie->vpcie);
return ret;
}
-static void imx6_pcie_host_exit(struct dw_pcie_rp *pp)
+static void imx_pcie_host_exit(struct dw_pcie_rp *pp)
{
struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
- struct imx6_pcie *imx6_pcie = to_imx6_pcie(pci);
+ struct imx_pcie *imx_pcie = to_imx_pcie(pci);
- if (imx6_pcie->phy) {
- if (phy_power_off(imx6_pcie->phy))
+ if (imx_pcie->phy) {
+ if (phy_power_off(imx_pcie->phy))
dev_err(pci->dev, "unable to power off PHY\n");
- phy_exit(imx6_pcie->phy);
+ phy_exit(imx_pcie->phy);
}
- imx6_pcie_clk_disable(imx6_pcie);
+ imx_pcie_clk_disable(imx_pcie);
- if (imx6_pcie->vpcie)
- regulator_disable(imx6_pcie->vpcie);
+ if (imx_pcie->vpcie)
+ regulator_disable(imx_pcie->vpcie);
}
-static const struct dw_pcie_host_ops imx6_pcie_host_ops = {
- .init = imx6_pcie_host_init,
- .deinit = imx6_pcie_host_exit,
+static u64 imx_pcie_cpu_addr_fixup(struct dw_pcie *pcie, u64 cpu_addr)
+{
+ struct imx_pcie *imx_pcie = to_imx_pcie(pcie);
+ struct dw_pcie_rp *pp = &pcie->pp;
+ struct resource_entry *entry;
+
+ if (!(imx_pcie->drvdata->flags & IMX_PCIE_FLAG_CPU_ADDR_FIXUP))
+ return cpu_addr;
+
+ entry = resource_list_first_type(&pp->bridge->windows, IORESOURCE_MEM);
+ if (!entry)
+ return cpu_addr;
+
+ return cpu_addr - entry->offset;
+}
+
+static const struct dw_pcie_host_ops imx_pcie_host_ops = {
+ .init = imx_pcie_host_init,
+ .deinit = imx_pcie_host_exit,
};
static const struct dw_pcie_ops dw_pcie_ops = {
- .start_link = imx6_pcie_start_link,
- .stop_link = imx6_pcie_stop_link,
+ .start_link = imx_pcie_start_link,
+ .stop_link = imx_pcie_stop_link,
+ .cpu_addr_fixup = imx_pcie_cpu_addr_fixup,
};
-static void imx6_pcie_ep_init(struct dw_pcie_ep *ep)
+static void imx_pcie_ep_init(struct dw_pcie_ep *ep)
{
enum pci_barno bar;
struct dw_pcie *pci = to_dw_pcie_from_ep(ep);
@@ -1013,7 +1048,7 @@ static void imx6_pcie_ep_init(struct dw_pcie_ep *ep)
dw_pcie_ep_reset_bar(pci, bar);
}
-static int imx6_pcie_ep_raise_irq(struct dw_pcie_ep *ep, u8 func_no,
+static int imx_pcie_ep_raise_irq(struct dw_pcie_ep *ep, u8 func_no,
unsigned int type, u16 interrupt_num)
{
struct dw_pcie *pci = to_dw_pcie_from_ep(ep);
@@ -1060,35 +1095,35 @@ static const struct pci_epc_features imx95_pcie_epc_features = {
};
static const struct pci_epc_features*
-imx6_pcie_ep_get_features(struct dw_pcie_ep *ep)
+imx_pcie_ep_get_features(struct dw_pcie_ep *ep)
{
struct dw_pcie *pci = to_dw_pcie_from_ep(ep);
- struct imx6_pcie *imx6_pcie = to_imx6_pcie(pci);
+ struct imx_pcie *imx_pcie = to_imx_pcie(pci);
- return imx6_pcie->drvdata->epc_features;
+ return imx_pcie->drvdata->epc_features;
}
static const struct dw_pcie_ep_ops pcie_ep_ops = {
- .init = imx6_pcie_ep_init,
- .raise_irq = imx6_pcie_ep_raise_irq,
- .get_features = imx6_pcie_ep_get_features,
+ .init = imx_pcie_ep_init,
+ .raise_irq = imx_pcie_ep_raise_irq,
+ .get_features = imx_pcie_ep_get_features,
};
-static int imx6_add_pcie_ep(struct imx6_pcie *imx6_pcie,
+static int imx_add_pcie_ep(struct imx_pcie *imx_pcie,
struct platform_device *pdev)
{
int ret;
unsigned int pcie_dbi2_offset;
struct dw_pcie_ep *ep;
- struct dw_pcie *pci = imx6_pcie->pci;
+ struct dw_pcie *pci = imx_pcie->pci;
struct dw_pcie_rp *pp = &pci->pp;
struct device *dev = pci->dev;
- imx6_pcie_host_init(pp);
+ imx_pcie_host_init(pp);
ep = &pci->ep;
ep->ops = &pcie_ep_ops;
- switch (imx6_pcie->drvdata->variant) {
+ switch (imx_pcie->drvdata->variant) {
case IMX8MQ_EP:
case IMX8MM_EP:
case IMX8MP_EP:
@@ -1110,9 +1145,11 @@ static int imx6_add_pcie_ep(struct imx6_pcie *imx6_pcie,
if (device_property_match_string(dev, "reg-names", "dbi2") >= 0)
pci->dbi_base2 = NULL;
- if (imx6_check_flag(imx6_pcie, IMX6_PCIE_FLAG_SUPPORT_64BIT))
+ if (imx_check_flag(imx_pcie, IMX_PCIE_FLAG_SUPPORT_64BIT))
dma_set_mask_and_coherent(dev, DMA_BIT_MASK(64));
+ ep->page_size = imx_pcie->drvdata->epc_features->align;
+
ret = dw_pcie_ep_init(ep);
if (ret) {
dev_err(dev, "failed to initialize endpoint\n");
@@ -1129,30 +1166,30 @@ static int imx6_add_pcie_ep(struct imx6_pcie *imx6_pcie,
pci_epc_init_notify(ep->epc);
/* Start LTSSM. */
- imx6_pcie_ltssm_enable(dev);
+ imx_pcie_ltssm_enable(dev);
return 0;
}
-static void imx6_pcie_pm_turnoff(struct imx6_pcie *imx6_pcie)
+static void imx_pcie_pm_turnoff(struct imx_pcie *imx_pcie)
{
- struct device *dev = imx6_pcie->pci->dev;
+ struct device *dev = imx_pcie->pci->dev;
/* Some variants have a turnoff reset in DT */
- if (imx6_pcie->turnoff_reset) {
- reset_control_assert(imx6_pcie->turnoff_reset);
- reset_control_deassert(imx6_pcie->turnoff_reset);
+ if (imx_pcie->turnoff_reset) {
+ reset_control_assert(imx_pcie->turnoff_reset);
+ reset_control_deassert(imx_pcie->turnoff_reset);
goto pm_turnoff_sleep;
}
/* Others poke directly at IOMUXC registers */
- switch (imx6_pcie->drvdata->variant) {
+ switch (imx_pcie->drvdata->variant) {
case IMX6SX:
case IMX6QP:
- regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR12,
+ regmap_update_bits(imx_pcie->iomuxc_gpr, IOMUXC_GPR12,
IMX6SX_GPR12_PCIE_PM_TURN_OFF,
IMX6SX_GPR12_PCIE_PM_TURN_OFF);
- regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR12,
+ regmap_update_bits(imx_pcie->iomuxc_gpr, IOMUXC_GPR12,
IMX6SX_GPR12_PCIE_PM_TURN_OFF, 0);
break;
default:
@@ -1171,73 +1208,73 @@ pm_turnoff_sleep:
usleep_range(1000, 10000);
}
-static void imx6_pcie_msi_save_restore(struct imx6_pcie *imx6_pcie, bool save)
+static void imx_pcie_msi_save_restore(struct imx_pcie *imx_pcie, bool save)
{
u8 offset;
u16 val;
- struct dw_pcie *pci = imx6_pcie->pci;
+ struct dw_pcie *pci = imx_pcie->pci;
if (pci_msi_enabled()) {
offset = dw_pcie_find_capability(pci, PCI_CAP_ID_MSI);
if (save) {
val = dw_pcie_readw_dbi(pci, offset + PCI_MSI_FLAGS);
- imx6_pcie->msi_ctrl = val;
+ imx_pcie->msi_ctrl = val;
} else {
dw_pcie_dbi_ro_wr_en(pci);
- val = imx6_pcie->msi_ctrl;
+ val = imx_pcie->msi_ctrl;
dw_pcie_writew_dbi(pci, offset + PCI_MSI_FLAGS, val);
dw_pcie_dbi_ro_wr_dis(pci);
}
}
}
-static int imx6_pcie_suspend_noirq(struct device *dev)
+static int imx_pcie_suspend_noirq(struct device *dev)
{
- struct imx6_pcie *imx6_pcie = dev_get_drvdata(dev);
- struct dw_pcie_rp *pp = &imx6_pcie->pci->pp;
+ struct imx_pcie *imx_pcie = dev_get_drvdata(dev);
+ struct dw_pcie_rp *pp = &imx_pcie->pci->pp;
- if (!(imx6_pcie->drvdata->flags & IMX6_PCIE_FLAG_SUPPORTS_SUSPEND))
+ if (!(imx_pcie->drvdata->flags & IMX_PCIE_FLAG_SUPPORTS_SUSPEND))
return 0;
- imx6_pcie_msi_save_restore(imx6_pcie, true);
- imx6_pcie_pm_turnoff(imx6_pcie);
- imx6_pcie_stop_link(imx6_pcie->pci);
- imx6_pcie_host_exit(pp);
+ imx_pcie_msi_save_restore(imx_pcie, true);
+ imx_pcie_pm_turnoff(imx_pcie);
+ imx_pcie_stop_link(imx_pcie->pci);
+ imx_pcie_host_exit(pp);
return 0;
}
-static int imx6_pcie_resume_noirq(struct device *dev)
+static int imx_pcie_resume_noirq(struct device *dev)
{
int ret;
- struct imx6_pcie *imx6_pcie = dev_get_drvdata(dev);
- struct dw_pcie_rp *pp = &imx6_pcie->pci->pp;
+ struct imx_pcie *imx_pcie = dev_get_drvdata(dev);
+ struct dw_pcie_rp *pp = &imx_pcie->pci->pp;
- if (!(imx6_pcie->drvdata->flags & IMX6_PCIE_FLAG_SUPPORTS_SUSPEND))
+ if (!(imx_pcie->drvdata->flags & IMX_PCIE_FLAG_SUPPORTS_SUSPEND))
return 0;
- ret = imx6_pcie_host_init(pp);
+ ret = imx_pcie_host_init(pp);
if (ret)
return ret;
- imx6_pcie_msi_save_restore(imx6_pcie, false);
+ imx_pcie_msi_save_restore(imx_pcie, false);
dw_pcie_setup_rc(pp);
- if (imx6_pcie->link_is_up)
- imx6_pcie_start_link(imx6_pcie->pci);
+ if (imx_pcie->link_is_up)
+ imx_pcie_start_link(imx_pcie->pci);
return 0;
}
-static const struct dev_pm_ops imx6_pcie_pm_ops = {
- NOIRQ_SYSTEM_SLEEP_PM_OPS(imx6_pcie_suspend_noirq,
- imx6_pcie_resume_noirq)
+static const struct dev_pm_ops imx_pcie_pm_ops = {
+ NOIRQ_SYSTEM_SLEEP_PM_OPS(imx_pcie_suspend_noirq,
+ imx_pcie_resume_noirq)
};
-static int imx6_pcie_probe(struct platform_device *pdev)
+static int imx_pcie_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
struct dw_pcie *pci;
- struct imx6_pcie *imx6_pcie;
+ struct imx_pcie *imx_pcie;
struct device_node *np;
struct resource *dbi_base;
struct device_node *node = dev->of_node;
@@ -1245,8 +1282,8 @@ static int imx6_pcie_probe(struct platform_device *pdev)
u16 val;
int i;
- imx6_pcie = devm_kzalloc(dev, sizeof(*imx6_pcie), GFP_KERNEL);
- if (!imx6_pcie)
+ imx_pcie = devm_kzalloc(dev, sizeof(*imx_pcie), GFP_KERNEL);
+ if (!imx_pcie)
return -ENOMEM;
pci = devm_kzalloc(dev, sizeof(*pci), GFP_KERNEL);
@@ -1255,10 +1292,10 @@ static int imx6_pcie_probe(struct platform_device *pdev)
pci->dev = dev;
pci->ops = &dw_pcie_ops;
- pci->pp.ops = &imx6_pcie_host_ops;
+ pci->pp.ops = &imx_pcie_host_ops;
- imx6_pcie->pci = pci;
- imx6_pcie->drvdata = of_device_get_match_data(dev);
+ imx_pcie->pci = pci;
+ imx_pcie->drvdata = of_device_get_match_data(dev);
/* Find the PHY if one is defined, only imx7d uses it */
np = of_parse_phandle(node, "fsl,imx7d-pcie-phy", 0);
@@ -1270,9 +1307,9 @@ static int imx6_pcie_probe(struct platform_device *pdev)
dev_err(dev, "Unable to map PCIe PHY\n");
return ret;
}
- imx6_pcie->phy_base = devm_ioremap_resource(dev, &res);
- if (IS_ERR(imx6_pcie->phy_base))
- return PTR_ERR(imx6_pcie->phy_base);
+ imx_pcie->phy_base = devm_ioremap_resource(dev, &res);
+ if (IS_ERR(imx_pcie->phy_base))
+ return PTR_ERR(imx_pcie->phy_base);
}
pci->dbi_base = devm_platform_get_and_ioremap_resource(pdev, 0, &dbi_base);
@@ -1280,72 +1317,72 @@ static int imx6_pcie_probe(struct platform_device *pdev)
return PTR_ERR(pci->dbi_base);
/* Fetch GPIOs */
- imx6_pcie->reset_gpiod = devm_gpiod_get_optional(dev, "reset", GPIOD_OUT_HIGH);
- if (IS_ERR(imx6_pcie->reset_gpiod))
- return dev_err_probe(dev, PTR_ERR(imx6_pcie->reset_gpiod),
+ imx_pcie->reset_gpiod = devm_gpiod_get_optional(dev, "reset", GPIOD_OUT_HIGH);
+ if (IS_ERR(imx_pcie->reset_gpiod))
+ return dev_err_probe(dev, PTR_ERR(imx_pcie->reset_gpiod),
"unable to get reset gpio\n");
- gpiod_set_consumer_name(imx6_pcie->reset_gpiod, "PCIe reset");
+ gpiod_set_consumer_name(imx_pcie->reset_gpiod, "PCIe reset");
- if (imx6_pcie->drvdata->clks_cnt >= IMX6_PCIE_MAX_CLKS)
+ if (imx_pcie->drvdata->clks_cnt >= IMX_PCIE_MAX_CLKS)
return dev_err_probe(dev, -ENOMEM, "clks_cnt is too big\n");
- for (i = 0; i < imx6_pcie->drvdata->clks_cnt; i++)
- imx6_pcie->clks[i].id = imx6_pcie->drvdata->clk_names[i];
+ for (i = 0; i < imx_pcie->drvdata->clks_cnt; i++)
+ imx_pcie->clks[i].id = imx_pcie->drvdata->clk_names[i];
/* Fetch clocks */
- ret = devm_clk_bulk_get(dev, imx6_pcie->drvdata->clks_cnt, imx6_pcie->clks);
+ ret = devm_clk_bulk_get(dev, imx_pcie->drvdata->clks_cnt, imx_pcie->clks);
if (ret)
return ret;
- if (imx6_check_flag(imx6_pcie, IMX6_PCIE_FLAG_HAS_PHYDRV)) {
- imx6_pcie->phy = devm_phy_get(dev, "pcie-phy");
- if (IS_ERR(imx6_pcie->phy))
- return dev_err_probe(dev, PTR_ERR(imx6_pcie->phy),
+ if (imx_check_flag(imx_pcie, IMX_PCIE_FLAG_HAS_PHYDRV)) {
+ imx_pcie->phy = devm_phy_get(dev, "pcie-phy");
+ if (IS_ERR(imx_pcie->phy))
+ return dev_err_probe(dev, PTR_ERR(imx_pcie->phy),
"failed to get pcie phy\n");
}
- if (imx6_check_flag(imx6_pcie, IMX6_PCIE_FLAG_HAS_APP_RESET)) {
- imx6_pcie->apps_reset = devm_reset_control_get_exclusive(dev, "apps");
- if (IS_ERR(imx6_pcie->apps_reset))
- return dev_err_probe(dev, PTR_ERR(imx6_pcie->apps_reset),
+ if (imx_check_flag(imx_pcie, IMX_PCIE_FLAG_HAS_APP_RESET)) {
+ imx_pcie->apps_reset = devm_reset_control_get_exclusive(dev, "apps");
+ if (IS_ERR(imx_pcie->apps_reset))
+ return dev_err_probe(dev, PTR_ERR(imx_pcie->apps_reset),
"failed to get pcie apps reset control\n");
}
- if (imx6_check_flag(imx6_pcie, IMX6_PCIE_FLAG_HAS_PHY_RESET)) {
- imx6_pcie->pciephy_reset = devm_reset_control_get_exclusive(dev, "pciephy");
- if (IS_ERR(imx6_pcie->pciephy_reset))
- return dev_err_probe(dev, PTR_ERR(imx6_pcie->pciephy_reset),
+ if (imx_check_flag(imx_pcie, IMX_PCIE_FLAG_HAS_PHY_RESET)) {
+ imx_pcie->pciephy_reset = devm_reset_control_get_exclusive(dev, "pciephy");
+ if (IS_ERR(imx_pcie->pciephy_reset))
+ return dev_err_probe(dev, PTR_ERR(imx_pcie->pciephy_reset),
"Failed to get PCIEPHY reset control\n");
}
- switch (imx6_pcie->drvdata->variant) {
+ switch (imx_pcie->drvdata->variant) {
case IMX8MQ:
case IMX8MQ_EP:
case IMX7D:
if (dbi_base->start == IMX8MQ_PCIE2_BASE_ADDR)
- imx6_pcie->controller_id = 1;
+ imx_pcie->controller_id = 1;
break;
default:
break;
}
/* Grab turnoff reset */
- imx6_pcie->turnoff_reset = devm_reset_control_get_optional_exclusive(dev, "turnoff");
- if (IS_ERR(imx6_pcie->turnoff_reset)) {
+ imx_pcie->turnoff_reset = devm_reset_control_get_optional_exclusive(dev, "turnoff");
+ if (IS_ERR(imx_pcie->turnoff_reset)) {
dev_err(dev, "Failed to get TURNOFF reset control\n");
- return PTR_ERR(imx6_pcie->turnoff_reset);
+ return PTR_ERR(imx_pcie->turnoff_reset);
}
- if (imx6_pcie->drvdata->gpr) {
+ if (imx_pcie->drvdata->gpr) {
/* Grab GPR config register range */
- imx6_pcie->iomuxc_gpr =
- syscon_regmap_lookup_by_compatible(imx6_pcie->drvdata->gpr);
- if (IS_ERR(imx6_pcie->iomuxc_gpr))
- return dev_err_probe(dev, PTR_ERR(imx6_pcie->iomuxc_gpr),
+ imx_pcie->iomuxc_gpr =
+ syscon_regmap_lookup_by_compatible(imx_pcie->drvdata->gpr);
+ if (IS_ERR(imx_pcie->iomuxc_gpr))
+ return dev_err_probe(dev, PTR_ERR(imx_pcie->iomuxc_gpr),
"unable to find iomuxc registers\n");
}
- if (imx6_check_flag(imx6_pcie, IMX6_PCIE_FLAG_HAS_SERDES)) {
+ if (imx_check_flag(imx_pcie, IMX_PCIE_FLAG_HAS_SERDES)) {
void __iomem *off = devm_platform_ioremap_resource_byname(pdev, "app");
if (IS_ERR(off))
@@ -1358,59 +1395,59 @@ static int imx6_pcie_probe(struct platform_device *pdev)
.reg_stride = 4,
};
- imx6_pcie->iomuxc_gpr = devm_regmap_init_mmio(dev, off, &regmap_config);
- if (IS_ERR(imx6_pcie->iomuxc_gpr))
- return dev_err_probe(dev, PTR_ERR(imx6_pcie->iomuxc_gpr),
+ imx_pcie->iomuxc_gpr = devm_regmap_init_mmio(dev, off, &regmap_config);
+ if (IS_ERR(imx_pcie->iomuxc_gpr))
+ return dev_err_probe(dev, PTR_ERR(imx_pcie->iomuxc_gpr),
"unable to find iomuxc registers\n");
}
/* Grab PCIe PHY Tx Settings */
if (of_property_read_u32(node, "fsl,tx-deemph-gen1",
- &imx6_pcie->tx_deemph_gen1))
- imx6_pcie->tx_deemph_gen1 = 0;
+ &imx_pcie->tx_deemph_gen1))
+ imx_pcie->tx_deemph_gen1 = 0;
if (of_property_read_u32(node, "fsl,tx-deemph-gen2-3p5db",
- &imx6_pcie->tx_deemph_gen2_3p5db))
- imx6_pcie->tx_deemph_gen2_3p5db = 0;
+ &imx_pcie->tx_deemph_gen2_3p5db))
+ imx_pcie->tx_deemph_gen2_3p5db = 0;
if (of_property_read_u32(node, "fsl,tx-deemph-gen2-6db",
- &imx6_pcie->tx_deemph_gen2_6db))
- imx6_pcie->tx_deemph_gen2_6db = 20;
+ &imx_pcie->tx_deemph_gen2_6db))
+ imx_pcie->tx_deemph_gen2_6db = 20;
if (of_property_read_u32(node, "fsl,tx-swing-full",
- &imx6_pcie->tx_swing_full))
- imx6_pcie->tx_swing_full = 127;
+ &imx_pcie->tx_swing_full))
+ imx_pcie->tx_swing_full = 127;
if (of_property_read_u32(node, "fsl,tx-swing-low",
- &imx6_pcie->tx_swing_low))
- imx6_pcie->tx_swing_low = 127;
+ &imx_pcie->tx_swing_low))
+ imx_pcie->tx_swing_low = 127;
/* Limit link speed */
- pci->link_gen = 1;
- of_property_read_u32(node, "fsl,max-link-speed", &pci->link_gen);
-
- imx6_pcie->vpcie = devm_regulator_get_optional(&pdev->dev, "vpcie");
- if (IS_ERR(imx6_pcie->vpcie)) {
- if (PTR_ERR(imx6_pcie->vpcie) != -ENODEV)
- return PTR_ERR(imx6_pcie->vpcie);
- imx6_pcie->vpcie = NULL;
+ pci->max_link_speed = 1;
+ of_property_read_u32(node, "fsl,max-link-speed", &pci->max_link_speed);
+
+ imx_pcie->vpcie = devm_regulator_get_optional(&pdev->dev, "vpcie");
+ if (IS_ERR(imx_pcie->vpcie)) {
+ if (PTR_ERR(imx_pcie->vpcie) != -ENODEV)
+ return PTR_ERR(imx_pcie->vpcie);
+ imx_pcie->vpcie = NULL;
}
- imx6_pcie->vph = devm_regulator_get_optional(&pdev->dev, "vph");
- if (IS_ERR(imx6_pcie->vph)) {
- if (PTR_ERR(imx6_pcie->vph) != -ENODEV)
- return PTR_ERR(imx6_pcie->vph);
- imx6_pcie->vph = NULL;
+ imx_pcie->vph = devm_regulator_get_optional(&pdev->dev, "vph");
+ if (IS_ERR(imx_pcie->vph)) {
+ if (PTR_ERR(imx_pcie->vph) != -ENODEV)
+ return PTR_ERR(imx_pcie->vph);
+ imx_pcie->vph = NULL;
}
- platform_set_drvdata(pdev, imx6_pcie);
+ platform_set_drvdata(pdev, imx_pcie);
- ret = imx6_pcie_attach_pd(dev);
+ ret = imx_pcie_attach_pd(dev);
if (ret)
return ret;
- if (imx6_pcie->drvdata->mode == DW_PCIE_EP_TYPE) {
- ret = imx6_add_pcie_ep(imx6_pcie, pdev);
+ if (imx_pcie->drvdata->mode == DW_PCIE_EP_TYPE) {
+ ret = imx_add_pcie_ep(imx_pcie, pdev);
if (ret < 0)
return ret;
} else {
@@ -1430,24 +1467,25 @@ static int imx6_pcie_probe(struct platform_device *pdev)
return 0;
}
-static void imx6_pcie_shutdown(struct platform_device *pdev)
+static void imx_pcie_shutdown(struct platform_device *pdev)
{
- struct imx6_pcie *imx6_pcie = platform_get_drvdata(pdev);
+ struct imx_pcie *imx_pcie = platform_get_drvdata(pdev);
/* bring down link, so bootloader gets clean state in case of reboot */
- imx6_pcie_assert_core_reset(imx6_pcie);
+ imx_pcie_assert_core_reset(imx_pcie);
}
static const char * const imx6q_clks[] = {"pcie_bus", "pcie", "pcie_phy"};
static const char * const imx8mm_clks[] = {"pcie_bus", "pcie", "pcie_aux"};
static const char * const imx8mq_clks[] = {"pcie_bus", "pcie", "pcie_phy", "pcie_aux"};
static const char * const imx6sx_clks[] = {"pcie_bus", "pcie", "pcie_phy", "pcie_inbound_axi"};
+static const char * const imx8q_clks[] = {"mstr", "slv", "dbi"};
-static const struct imx6_pcie_drvdata drvdata[] = {
+static const struct imx_pcie_drvdata drvdata[] = {
[IMX6Q] = {
.variant = IMX6Q,
- .flags = IMX6_PCIE_FLAG_IMX6_PHY |
- IMX6_PCIE_FLAG_IMX6_SPEED_CHANGE,
+ .flags = IMX_PCIE_FLAG_IMX_PHY |
+ IMX_PCIE_FLAG_IMX_SPEED_CHANGE,
.dbi_length = 0x200,
.gpr = "fsl,imx6q-iomuxc-gpr",
.clk_names = imx6q_clks,
@@ -1456,13 +1494,15 @@ static const struct imx6_pcie_drvdata drvdata[] = {
.ltssm_mask = IMX6Q_GPR12_PCIE_CTL_2,
.mode_off[0] = IOMUXC_GPR12,
.mode_mask[0] = IMX6Q_GPR12_DEVICE_TYPE,
- .init_phy = imx6_pcie_init_phy,
+ .init_phy = imx_pcie_init_phy,
+ .enable_ref_clk = imx6q_pcie_enable_ref_clk,
+ .core_reset = imx6q_pcie_core_reset,
},
[IMX6SX] = {
.variant = IMX6SX,
- .flags = IMX6_PCIE_FLAG_IMX6_PHY |
- IMX6_PCIE_FLAG_IMX6_SPEED_CHANGE |
- IMX6_PCIE_FLAG_SUPPORTS_SUSPEND,
+ .flags = IMX_PCIE_FLAG_IMX_PHY |
+ IMX_PCIE_FLAG_IMX_SPEED_CHANGE |
+ IMX_PCIE_FLAG_SUPPORTS_SUSPEND,
.gpr = "fsl,imx6q-iomuxc-gpr",
.clk_names = imx6sx_clks,
.clks_cnt = ARRAY_SIZE(imx6sx_clks),
@@ -1471,12 +1511,14 @@ static const struct imx6_pcie_drvdata drvdata[] = {
.mode_off[0] = IOMUXC_GPR12,
.mode_mask[0] = IMX6Q_GPR12_DEVICE_TYPE,
.init_phy = imx6sx_pcie_init_phy,
+ .enable_ref_clk = imx6sx_pcie_enable_ref_clk,
+ .core_reset = imx6sx_pcie_core_reset,
},
[IMX6QP] = {
.variant = IMX6QP,
- .flags = IMX6_PCIE_FLAG_IMX6_PHY |
- IMX6_PCIE_FLAG_IMX6_SPEED_CHANGE |
- IMX6_PCIE_FLAG_SUPPORTS_SUSPEND,
+ .flags = IMX_PCIE_FLAG_IMX_PHY |
+ IMX_PCIE_FLAG_IMX_SPEED_CHANGE |
+ IMX_PCIE_FLAG_SUPPORTS_SUSPEND,
.dbi_length = 0x200,
.gpr = "fsl,imx6q-iomuxc-gpr",
.clk_names = imx6q_clks,
@@ -1485,24 +1527,28 @@ static const struct imx6_pcie_drvdata drvdata[] = {
.ltssm_mask = IMX6Q_GPR12_PCIE_CTL_2,
.mode_off[0] = IOMUXC_GPR12,
.mode_mask[0] = IMX6Q_GPR12_DEVICE_TYPE,
- .init_phy = imx6_pcie_init_phy,
+ .init_phy = imx_pcie_init_phy,
+ .enable_ref_clk = imx6q_pcie_enable_ref_clk,
+ .core_reset = imx6qp_pcie_core_reset,
},
[IMX7D] = {
.variant = IMX7D,
- .flags = IMX6_PCIE_FLAG_SUPPORTS_SUSPEND |
- IMX6_PCIE_FLAG_HAS_APP_RESET |
- IMX6_PCIE_FLAG_HAS_PHY_RESET,
+ .flags = IMX_PCIE_FLAG_SUPPORTS_SUSPEND |
+ IMX_PCIE_FLAG_HAS_APP_RESET |
+ IMX_PCIE_FLAG_HAS_PHY_RESET,
.gpr = "fsl,imx7d-iomuxc-gpr",
.clk_names = imx6q_clks,
.clks_cnt = ARRAY_SIZE(imx6q_clks),
.mode_off[0] = IOMUXC_GPR12,
.mode_mask[0] = IMX6Q_GPR12_DEVICE_TYPE,
.init_phy = imx7d_pcie_init_phy,
+ .enable_ref_clk = imx7d_pcie_enable_ref_clk,
+ .core_reset = imx7d_pcie_core_reset,
},
[IMX8MQ] = {
.variant = IMX8MQ,
- .flags = IMX6_PCIE_FLAG_HAS_APP_RESET |
- IMX6_PCIE_FLAG_HAS_PHY_RESET,
+ .flags = IMX_PCIE_FLAG_HAS_APP_RESET |
+ IMX_PCIE_FLAG_HAS_PHY_RESET,
.gpr = "fsl,imx8mq-iomuxc-gpr",
.clk_names = imx8mq_clks,
.clks_cnt = ARRAY_SIZE(imx8mq_clks),
@@ -1511,32 +1557,42 @@ static const struct imx6_pcie_drvdata drvdata[] = {
.mode_off[1] = IOMUXC_GPR12,
.mode_mask[1] = IMX8MQ_GPR12_PCIE2_CTRL_DEVICE_TYPE,
.init_phy = imx8mq_pcie_init_phy,
+ .enable_ref_clk = imx8mm_pcie_enable_ref_clk,
},
[IMX8MM] = {
.variant = IMX8MM,
- .flags = IMX6_PCIE_FLAG_SUPPORTS_SUSPEND |
- IMX6_PCIE_FLAG_HAS_PHYDRV |
- IMX6_PCIE_FLAG_HAS_APP_RESET,
+ .flags = IMX_PCIE_FLAG_SUPPORTS_SUSPEND |
+ IMX_PCIE_FLAG_HAS_PHYDRV |
+ IMX_PCIE_FLAG_HAS_APP_RESET,
.gpr = "fsl,imx8mm-iomuxc-gpr",
.clk_names = imx8mm_clks,
.clks_cnt = ARRAY_SIZE(imx8mm_clks),
.mode_off[0] = IOMUXC_GPR12,
.mode_mask[0] = IMX6Q_GPR12_DEVICE_TYPE,
+ .enable_ref_clk = imx8mm_pcie_enable_ref_clk,
},
[IMX8MP] = {
.variant = IMX8MP,
- .flags = IMX6_PCIE_FLAG_SUPPORTS_SUSPEND |
- IMX6_PCIE_FLAG_HAS_PHYDRV |
- IMX6_PCIE_FLAG_HAS_APP_RESET,
+ .flags = IMX_PCIE_FLAG_SUPPORTS_SUSPEND |
+ IMX_PCIE_FLAG_HAS_PHYDRV |
+ IMX_PCIE_FLAG_HAS_APP_RESET,
.gpr = "fsl,imx8mp-iomuxc-gpr",
.clk_names = imx8mm_clks,
.clks_cnt = ARRAY_SIZE(imx8mm_clks),
.mode_off[0] = IOMUXC_GPR12,
.mode_mask[0] = IMX6Q_GPR12_DEVICE_TYPE,
+ .enable_ref_clk = imx8mm_pcie_enable_ref_clk,
+ },
+ [IMX8Q] = {
+ .variant = IMX8Q,
+ .flags = IMX_PCIE_FLAG_HAS_PHYDRV |
+ IMX_PCIE_FLAG_CPU_ADDR_FIXUP,
+ .clk_names = imx8q_clks,
+ .clks_cnt = ARRAY_SIZE(imx8q_clks),
},
[IMX95] = {
.variant = IMX95,
- .flags = IMX6_PCIE_FLAG_HAS_SERDES,
+ .flags = IMX_PCIE_FLAG_HAS_SERDES,
.clk_names = imx8mq_clks,
.clks_cnt = ARRAY_SIZE(imx8mq_clks),
.ltssm_off = IMX95_PE0_GEN_CTRL_3,
@@ -1547,8 +1603,8 @@ static const struct imx6_pcie_drvdata drvdata[] = {
},
[IMX8MQ_EP] = {
.variant = IMX8MQ_EP,
- .flags = IMX6_PCIE_FLAG_HAS_APP_RESET |
- IMX6_PCIE_FLAG_HAS_PHY_RESET,
+ .flags = IMX_PCIE_FLAG_HAS_APP_RESET |
+ IMX_PCIE_FLAG_HAS_PHY_RESET,
.mode = DW_PCIE_EP_TYPE,
.gpr = "fsl,imx8mq-iomuxc-gpr",
.clk_names = imx8mq_clks,
@@ -1559,10 +1615,12 @@ static const struct imx6_pcie_drvdata drvdata[] = {
.mode_mask[1] = IMX8MQ_GPR12_PCIE2_CTRL_DEVICE_TYPE,
.epc_features = &imx8m_pcie_epc_features,
.init_phy = imx8mq_pcie_init_phy,
+ .enable_ref_clk = imx8mm_pcie_enable_ref_clk,
},
[IMX8MM_EP] = {
.variant = IMX8MM_EP,
- .flags = IMX6_PCIE_FLAG_HAS_PHYDRV,
+ .flags = IMX_PCIE_FLAG_HAS_APP_RESET |
+ IMX_PCIE_FLAG_HAS_PHYDRV,
.mode = DW_PCIE_EP_TYPE,
.gpr = "fsl,imx8mm-iomuxc-gpr",
.clk_names = imx8mm_clks,
@@ -1570,10 +1628,12 @@ static const struct imx6_pcie_drvdata drvdata[] = {
.mode_off[0] = IOMUXC_GPR12,
.mode_mask[0] = IMX6Q_GPR12_DEVICE_TYPE,
.epc_features = &imx8m_pcie_epc_features,
+ .enable_ref_clk = imx8mm_pcie_enable_ref_clk,
},
[IMX8MP_EP] = {
.variant = IMX8MP_EP,
- .flags = IMX6_PCIE_FLAG_HAS_PHYDRV,
+ .flags = IMX_PCIE_FLAG_HAS_APP_RESET |
+ IMX_PCIE_FLAG_HAS_PHYDRV,
.mode = DW_PCIE_EP_TYPE,
.gpr = "fsl,imx8mp-iomuxc-gpr",
.clk_names = imx8mm_clks,
@@ -1581,11 +1641,12 @@ static const struct imx6_pcie_drvdata drvdata[] = {
.mode_off[0] = IOMUXC_GPR12,
.mode_mask[0] = IMX6Q_GPR12_DEVICE_TYPE,
.epc_features = &imx8m_pcie_epc_features,
+ .enable_ref_clk = imx8mm_pcie_enable_ref_clk,
},
[IMX95_EP] = {
.variant = IMX95_EP,
- .flags = IMX6_PCIE_FLAG_HAS_SERDES |
- IMX6_PCIE_FLAG_SUPPORT_64BIT,
+ .flags = IMX_PCIE_FLAG_HAS_SERDES |
+ IMX_PCIE_FLAG_SUPPORT_64BIT,
.clk_names = imx8mq_clks,
.clks_cnt = ARRAY_SIZE(imx8mq_clks),
.ltssm_off = IMX95_PE0_GEN_CTRL_3,
@@ -1598,7 +1659,7 @@ static const struct imx6_pcie_drvdata drvdata[] = {
},
};
-static const struct of_device_id imx6_pcie_of_match[] = {
+static const struct of_device_id imx_pcie_of_match[] = {
{ .compatible = "fsl,imx6q-pcie", .data = &drvdata[IMX6Q], },
{ .compatible = "fsl,imx6sx-pcie", .data = &drvdata[IMX6SX], },
{ .compatible = "fsl,imx6qp-pcie", .data = &drvdata[IMX6QP], },
@@ -1606,6 +1667,7 @@ static const struct of_device_id imx6_pcie_of_match[] = {
{ .compatible = "fsl,imx8mq-pcie", .data = &drvdata[IMX8MQ], },
{ .compatible = "fsl,imx8mm-pcie", .data = &drvdata[IMX8MM], },
{ .compatible = "fsl,imx8mp-pcie", .data = &drvdata[IMX8MP], },
+ { .compatible = "fsl,imx8q-pcie", .data = &drvdata[IMX8Q], },
{ .compatible = "fsl,imx95-pcie", .data = &drvdata[IMX95], },
{ .compatible = "fsl,imx8mq-pcie-ep", .data = &drvdata[IMX8MQ_EP], },
{ .compatible = "fsl,imx8mm-pcie-ep", .data = &drvdata[IMX8MM_EP], },
@@ -1614,19 +1676,19 @@ static const struct of_device_id imx6_pcie_of_match[] = {
{},
};
-static struct platform_driver imx6_pcie_driver = {
+static struct platform_driver imx_pcie_driver = {
.driver = {
.name = "imx6q-pcie",
- .of_match_table = imx6_pcie_of_match,
+ .of_match_table = imx_pcie_of_match,
.suppress_bind_attrs = true,
- .pm = &imx6_pcie_pm_ops,
+ .pm = &imx_pcie_pm_ops,
.probe_type = PROBE_PREFER_ASYNCHRONOUS,
},
- .probe = imx6_pcie_probe,
- .shutdown = imx6_pcie_shutdown,
+ .probe = imx_pcie_probe,
+ .shutdown = imx_pcie_shutdown,
};
-static void imx6_pcie_quirk(struct pci_dev *dev)
+static void imx_pcie_quirk(struct pci_dev *dev)
{
struct pci_bus *bus = dev->bus;
struct dw_pcie_rp *pp = bus->sysdata;
@@ -1636,33 +1698,33 @@ static void imx6_pcie_quirk(struct pci_dev *dev)
return;
/* Make sure we only quirk devices associated with this driver */
- if (bus->dev.parent->parent->driver != &imx6_pcie_driver.driver)
+ if (bus->dev.parent->parent->driver != &imx_pcie_driver.driver)
return;
if (pci_is_root_bus(bus)) {
struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
- struct imx6_pcie *imx6_pcie = to_imx6_pcie(pci);
+ struct imx_pcie *imx_pcie = to_imx_pcie(pci);
/*
* Limit config length to avoid the kernel reading beyond
* the register set and causing an abort on i.MX 6Quad
*/
- if (imx6_pcie->drvdata->dbi_length) {
- dev->cfg_size = imx6_pcie->drvdata->dbi_length;
+ if (imx_pcie->drvdata->dbi_length) {
+ dev->cfg_size = imx_pcie->drvdata->dbi_length;
dev_info(&dev->dev, "Limiting cfg_size to %d\n",
dev->cfg_size);
}
}
}
DECLARE_PCI_FIXUP_CLASS_HEADER(PCI_VENDOR_ID_SYNOPSYS, 0xabcd,
- PCI_CLASS_BRIDGE_PCI, 8, imx6_pcie_quirk);
+ PCI_CLASS_BRIDGE_PCI, 8, imx_pcie_quirk);
-static int __init imx6_pcie_init(void)
+static int __init imx_pcie_init(void)
{
#ifdef CONFIG_ARM
struct device_node *np;
- np = of_find_matching_node(NULL, imx6_pcie_of_match);
+ np = of_find_matching_node(NULL, imx_pcie_of_match);
if (!np)
return -ENODEV;
of_node_put(np);
@@ -1678,6 +1740,6 @@ static int __init imx6_pcie_init(void)
"external abort on non-linefetch");
#endif
- return platform_driver_register(&imx6_pcie_driver);
+ return platform_driver_register(&imx_pcie_driver);
}
-device_initcall(imx6_pcie_init);
+device_initcall(imx_pcie_init);
diff --git a/drivers/pci/controller/dwc/pci-keystone.c b/drivers/pci/controller/dwc/pci-keystone.c
index 52c6420ae200..2219b1a866fa 100644
--- a/drivers/pci/controller/dwc/pci-keystone.c
+++ b/drivers/pci/controller/dwc/pci-keystone.c
@@ -189,12 +189,6 @@ static void ks_pcie_compose_msi_msg(struct irq_data *data, struct msi_msg *msg)
(int)data->hwirq, msg->address_hi, msg->address_lo);
}
-static int ks_pcie_msi_set_affinity(struct irq_data *irq_data,
- const struct cpumask *mask, bool force)
-{
- return -EINVAL;
-}
-
static void ks_pcie_msi_mask(struct irq_data *data)
{
struct dw_pcie_rp *pp = irq_data_get_irq_chip_data(data);
@@ -247,7 +241,6 @@ static struct irq_chip ks_pcie_msi_irq_chip = {
.name = "KEYSTONE-PCI-MSI",
.irq_ack = ks_pcie_msi_irq_ack,
.irq_compose_msi_msg = ks_pcie_compose_msi_msg,
- .irq_set_affinity = ks_pcie_msi_set_affinity,
.irq_mask = ks_pcie_msi_mask,
.irq_unmask = ks_pcie_msi_unmask,
};
@@ -577,7 +570,7 @@ static void ks_pcie_quirk(struct pci_dev *dev)
*/
if (pci_match_id(am6_pci_devids, bridge)) {
bridge_dev = pci_get_host_bridge_device(dev);
- if (!bridge_dev && !bridge_dev->parent)
+ if (!bridge_dev || !bridge_dev->parent)
return;
ks_pcie = dev_get_drvdata(bridge_dev->parent);
diff --git a/drivers/pci/controller/dwc/pcie-designware-host.c b/drivers/pci/controller/dwc/pcie-designware-host.c
index a0822d5371bc..3e41865c7290 100644
--- a/drivers/pci/controller/dwc/pcie-designware-host.c
+++ b/drivers/pci/controller/dwc/pcie-designware-host.c
@@ -48,8 +48,9 @@ static struct irq_chip dw_pcie_msi_irq_chip = {
};
static struct msi_domain_info dw_pcie_msi_domain_info = {
- .flags = (MSI_FLAG_USE_DEF_DOM_OPS | MSI_FLAG_USE_DEF_CHIP_OPS |
- MSI_FLAG_PCI_MSIX | MSI_FLAG_MULTI_PCI_MSI),
+ .flags = MSI_FLAG_USE_DEF_DOM_OPS | MSI_FLAG_USE_DEF_CHIP_OPS |
+ MSI_FLAG_NO_AFFINITY | MSI_FLAG_PCI_MSIX |
+ MSI_FLAG_MULTI_PCI_MSI,
.chip = &dw_pcie_msi_irq_chip,
};
@@ -116,12 +117,6 @@ static void dw_pci_setup_msi_msg(struct irq_data *d, struct msi_msg *msg)
(int)d->hwirq, msg->address_hi, msg->address_lo);
}
-static int dw_pci_msi_set_affinity(struct irq_data *d,
- const struct cpumask *mask, bool force)
-{
- return -EINVAL;
-}
-
static void dw_pci_bottom_mask(struct irq_data *d)
{
struct dw_pcie_rp *pp = irq_data_get_irq_chip_data(d);
@@ -177,7 +172,6 @@ static struct irq_chip dw_pci_msi_bottom_irq_chip = {
.name = "DWPCI-MSI",
.irq_ack = dw_pci_bottom_ack,
.irq_compose_msi_msg = dw_pci_setup_msi_msg,
- .irq_set_affinity = dw_pci_msi_set_affinity,
.irq_mask = dw_pci_bottom_mask,
.irq_unmask = dw_pci_bottom_unmask,
};
diff --git a/drivers/pci/controller/dwc/pcie-designware.c b/drivers/pci/controller/dwc/pcie-designware.c
index 1b5aba1f0c92..6d6cbc8b5b2c 100644
--- a/drivers/pci/controller/dwc/pcie-designware.c
+++ b/drivers/pci/controller/dwc/pcie-designware.c
@@ -112,6 +112,7 @@ int dw_pcie_get_resources(struct dw_pcie *pci)
pci->dbi_base = devm_pci_remap_cfg_resource(pci->dev, res);
if (IS_ERR(pci->dbi_base))
return PTR_ERR(pci->dbi_base);
+ pci->dbi_phys_addr = res->start;
}
/* DBI2 is mainly useful for the endpoint controller */
@@ -134,6 +135,7 @@ int dw_pcie_get_resources(struct dw_pcie *pci)
pci->atu_base = devm_ioremap_resource(pci->dev, res);
if (IS_ERR(pci->atu_base))
return PTR_ERR(pci->atu_base);
+ pci->atu_phys_addr = res->start;
} else {
pci->atu_base = pci->dbi_base + DEFAULT_DBI_ATU_OFFSET;
}
@@ -166,8 +168,8 @@ int dw_pcie_get_resources(struct dw_pcie *pci)
return ret;
}
- if (pci->link_gen < 1)
- pci->link_gen = of_pci_get_max_link_speed(np);
+ if (pci->max_link_speed < 1)
+ pci->max_link_speed = of_pci_get_max_link_speed(np);
of_property_read_u32(np, "num-lanes", &pci->num_lanes);
@@ -687,16 +689,27 @@ void dw_pcie_upconfig_setup(struct dw_pcie *pci)
}
EXPORT_SYMBOL_GPL(dw_pcie_upconfig_setup);
-static void dw_pcie_link_set_max_speed(struct dw_pcie *pci, u32 link_gen)
+static void dw_pcie_link_set_max_speed(struct dw_pcie *pci)
{
u32 cap, ctrl2, link_speed;
u8 offset = dw_pcie_find_capability(pci, PCI_CAP_ID_EXP);
cap = dw_pcie_readl_dbi(pci, offset + PCI_EXP_LNKCAP);
+
+ /*
+ * Even if the platform doesn't want to limit the maximum link speed,
+ * just cache the hardware default value so that the vendor drivers can
+ * use it to do any link specific configuration.
+ */
+ if (pci->max_link_speed < 1) {
+ pci->max_link_speed = FIELD_GET(PCI_EXP_LNKCAP_SLS, cap);
+ return;
+ }
+
ctrl2 = dw_pcie_readl_dbi(pci, offset + PCI_EXP_LNKCTL2);
ctrl2 &= ~PCI_EXP_LNKCTL2_TLS;
- switch (pcie_link_speed[link_gen]) {
+ switch (pcie_link_speed[pci->max_link_speed]) {
case PCIE_SPEED_2_5GT:
link_speed = PCI_EXP_LNKCTL2_TLS_2_5GT;
break;
@@ -1058,8 +1071,7 @@ void dw_pcie_setup(struct dw_pcie *pci)
{
u32 val;
- if (pci->link_gen > 0)
- dw_pcie_link_set_max_speed(pci, pci->link_gen);
+ dw_pcie_link_set_max_speed(pci);
/* Configure Gen1 N_FTS */
if (pci->n_fts[0]) {
diff --git a/drivers/pci/controller/dwc/pcie-designware.h b/drivers/pci/controller/dwc/pcie-designware.h
index 53c4c8f399c8..347ab74ac35a 100644
--- a/drivers/pci/controller/dwc/pcie-designware.h
+++ b/drivers/pci/controller/dwc/pcie-designware.h
@@ -125,6 +125,19 @@
#define GEN3_RELATED_OFF_GEN3_EQ_DISABLE BIT(16)
#define GEN3_RELATED_OFF_RATE_SHADOW_SEL_SHIFT 24
#define GEN3_RELATED_OFF_RATE_SHADOW_SEL_MASK GENMASK(25, 24)
+#define GEN3_RELATED_OFF_RATE_SHADOW_SEL_16_0GT 0x1
+
+#define GEN3_EQ_CONTROL_OFF 0x8A8
+#define GEN3_EQ_CONTROL_OFF_FB_MODE GENMASK(3, 0)
+#define GEN3_EQ_CONTROL_OFF_PHASE23_EXIT_MODE BIT(4)
+#define GEN3_EQ_CONTROL_OFF_PSET_REQ_VEC GENMASK(23, 8)
+#define GEN3_EQ_CONTROL_OFF_FOM_INC_INITIAL_EVAL BIT(24)
+
+#define GEN3_EQ_FB_MODE_DIR_CHANGE_OFF 0x8AC
+#define GEN3_EQ_FMDC_T_MIN_PHASE23 GENMASK(4, 0)
+#define GEN3_EQ_FMDC_N_EVALS GENMASK(9, 5)
+#define GEN3_EQ_FMDC_MAX_PRE_CUSROR_DELTA GENMASK(13, 10)
+#define GEN3_EQ_FMDC_MAX_POST_CUSROR_DELTA GENMASK(17, 14)
#define PCIE_PORT_MULTI_LANE_CTRL 0x8C0
#define PORT_MLTI_UPCFG_SUPPORT BIT(7)
@@ -198,6 +211,24 @@
#define PCIE_PL_CHK_REG_ERR_ADDR 0xB28
/*
+ * 16.0 GT/s (Gen 4) lane margining register definitions
+ */
+#define GEN4_LANE_MARGINING_1_OFF 0xB80
+#define MARGINING_MAX_VOLTAGE_OFFSET GENMASK(29, 24)
+#define MARGINING_NUM_VOLTAGE_STEPS GENMASK(22, 16)
+#define MARGINING_MAX_TIMING_OFFSET GENMASK(13, 8)
+#define MARGINING_NUM_TIMING_STEPS GENMASK(5, 0)
+
+#define GEN4_LANE_MARGINING_2_OFF 0xB84
+#define MARGINING_IND_ERROR_SAMPLER BIT(28)
+#define MARGINING_SAMPLE_REPORTING_METHOD BIT(27)
+#define MARGINING_IND_LEFT_RIGHT_TIMING BIT(26)
+#define MARGINING_IND_UP_DOWN_VOLTAGE BIT(25)
+#define MARGINING_VOLTAGE_SUPPORTED BIT(24)
+#define MARGINING_MAXLANES GENMASK(20, 16)
+#define MARGINING_SAMPLE_RATE_TIMING GENMASK(13, 8)
+#define MARGINING_SAMPLE_RATE_VOLTAGE GENMASK(5, 0)
+/*
* iATU Unroll-specific register definitions
* From 4.80 core version the address translation will be made by unroll
*/
@@ -407,8 +438,10 @@ struct dw_pcie_ops {
struct dw_pcie {
struct device *dev;
void __iomem *dbi_base;
+ resource_size_t dbi_phys_addr;
void __iomem *dbi_base2;
void __iomem *atu_base;
+ resource_size_t atu_phys_addr;
size_t atu_size;
u32 num_ib_windows;
u32 num_ob_windows;
@@ -421,7 +454,7 @@ struct dw_pcie {
u32 type;
unsigned long caps;
int num_lanes;
- int link_gen;
+ int max_link_speed;
u8 n_fts[2];
struct dw_edma_chip edma;
struct clk_bulk_data app_clks[DW_PCIE_NUM_APP_CLKS];
diff --git a/drivers/pci/controller/dwc/pcie-intel-gw.c b/drivers/pci/controller/dwc/pcie-intel-gw.c
index acbe4f6d3291..676d2aba4fbd 100644
--- a/drivers/pci/controller/dwc/pcie-intel-gw.c
+++ b/drivers/pci/controller/dwc/pcie-intel-gw.c
@@ -132,7 +132,7 @@ static void intel_pcie_link_setup(struct intel_pcie *pcie)
static void intel_pcie_init_n_fts(struct dw_pcie *pci)
{
- switch (pci->link_gen) {
+ switch (pci->max_link_speed) {
case 3:
pci->n_fts[1] = PORT_AFR_N_FTS_GEN3;
break;
@@ -252,7 +252,7 @@ static int intel_pcie_wait_l2(struct intel_pcie *pcie)
int ret;
struct dw_pcie *pci = &pcie->pci;
- if (pci->link_gen < 3)
+ if (pci->max_link_speed < 3)
return 0;
/* Send PME_TURN_OFF message */
diff --git a/drivers/pci/controller/dwc/pcie-kirin.c b/drivers/pci/controller/dwc/pcie-kirin.c
index 0a29136491b8..85a2c77b1835 100644
--- a/drivers/pci/controller/dwc/pcie-kirin.c
+++ b/drivers/pci/controller/dwc/pcie-kirin.c
@@ -420,11 +420,11 @@ static int kirin_pcie_parse_port(struct kirin_pcie *pcie,
"unable to get a valid reset gpio\n");
}
- pcie->num_slots++;
- if (pcie->num_slots > MAX_PCI_SLOTS) {
+ if (pcie->num_slots + 1 >= MAX_PCI_SLOTS) {
dev_err(dev, "Too many PCI slots!\n");
return -EINVAL;
}
+ pcie->num_slots++;
ret = of_pci_get_devfn(child);
if (ret < 0) {
diff --git a/drivers/pci/controller/dwc/pcie-qcom-common.c b/drivers/pci/controller/dwc/pcie-qcom-common.c
new file mode 100644
index 000000000000..3aad19b56da8
--- /dev/null
+++ b/drivers/pci/controller/dwc/pcie-qcom-common.c
@@ -0,0 +1,78 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (c) 2024 Qualcomm Innovation Center, Inc. All rights reserved.
+ */
+
+#include <linux/pci.h>
+
+#include "pcie-designware.h"
+#include "pcie-qcom-common.h"
+
+void qcom_pcie_common_set_16gt_equalization(struct dw_pcie *pci)
+{
+ u32 reg;
+
+ /*
+ * GEN3_RELATED_OFF register is repurposed to apply equalization
+ * settings at various data transmission rates through registers namely
+ * GEN3_EQ_*. The RATE_SHADOW_SEL bit field of GEN3_RELATED_OFF
+ * determines the data rate for which these equalization settings are
+ * applied.
+ */
+ reg = dw_pcie_readl_dbi(pci, GEN3_RELATED_OFF);
+ reg &= ~GEN3_RELATED_OFF_GEN3_ZRXDC_NONCOMPL;
+ reg &= ~GEN3_RELATED_OFF_RATE_SHADOW_SEL_MASK;
+ reg |= FIELD_PREP(GEN3_RELATED_OFF_RATE_SHADOW_SEL_MASK,
+ GEN3_RELATED_OFF_RATE_SHADOW_SEL_16_0GT);
+ dw_pcie_writel_dbi(pci, GEN3_RELATED_OFF, reg);
+
+ reg = dw_pcie_readl_dbi(pci, GEN3_EQ_FB_MODE_DIR_CHANGE_OFF);
+ reg &= ~(GEN3_EQ_FMDC_T_MIN_PHASE23 |
+ GEN3_EQ_FMDC_N_EVALS |
+ GEN3_EQ_FMDC_MAX_PRE_CUSROR_DELTA |
+ GEN3_EQ_FMDC_MAX_POST_CUSROR_DELTA);
+ reg |= FIELD_PREP(GEN3_EQ_FMDC_T_MIN_PHASE23, 0x1) |
+ FIELD_PREP(GEN3_EQ_FMDC_N_EVALS, 0xd) |
+ FIELD_PREP(GEN3_EQ_FMDC_MAX_PRE_CUSROR_DELTA, 0x5) |
+ FIELD_PREP(GEN3_EQ_FMDC_MAX_POST_CUSROR_DELTA, 0x5);
+ dw_pcie_writel_dbi(pci, GEN3_EQ_FB_MODE_DIR_CHANGE_OFF, reg);
+
+ reg = dw_pcie_readl_dbi(pci, GEN3_EQ_CONTROL_OFF);
+ reg &= ~(GEN3_EQ_CONTROL_OFF_FB_MODE |
+ GEN3_EQ_CONTROL_OFF_PHASE23_EXIT_MODE |
+ GEN3_EQ_CONTROL_OFF_FOM_INC_INITIAL_EVAL |
+ GEN3_EQ_CONTROL_OFF_PSET_REQ_VEC);
+ dw_pcie_writel_dbi(pci, GEN3_EQ_CONTROL_OFF, reg);
+}
+EXPORT_SYMBOL_GPL(qcom_pcie_common_set_16gt_equalization);
+
+void qcom_pcie_common_set_16gt_lane_margining(struct dw_pcie *pci)
+{
+ u32 reg;
+
+ reg = dw_pcie_readl_dbi(pci, GEN4_LANE_MARGINING_1_OFF);
+ reg &= ~(MARGINING_MAX_VOLTAGE_OFFSET |
+ MARGINING_NUM_VOLTAGE_STEPS |
+ MARGINING_MAX_TIMING_OFFSET |
+ MARGINING_NUM_TIMING_STEPS);
+ reg |= FIELD_PREP(MARGINING_MAX_VOLTAGE_OFFSET, 0x24) |
+ FIELD_PREP(MARGINING_NUM_VOLTAGE_STEPS, 0x78) |
+ FIELD_PREP(MARGINING_MAX_TIMING_OFFSET, 0x32) |
+ FIELD_PREP(MARGINING_NUM_TIMING_STEPS, 0x10);
+ dw_pcie_writel_dbi(pci, GEN4_LANE_MARGINING_1_OFF, reg);
+
+ reg = dw_pcie_readl_dbi(pci, GEN4_LANE_MARGINING_2_OFF);
+ reg |= MARGINING_IND_ERROR_SAMPLER |
+ MARGINING_SAMPLE_REPORTING_METHOD |
+ MARGINING_IND_LEFT_RIGHT_TIMING |
+ MARGINING_VOLTAGE_SUPPORTED;
+ reg &= ~(MARGINING_IND_UP_DOWN_VOLTAGE |
+ MARGINING_MAXLANES |
+ MARGINING_SAMPLE_RATE_TIMING |
+ MARGINING_SAMPLE_RATE_VOLTAGE);
+ reg |= FIELD_PREP(MARGINING_MAXLANES, pci->num_lanes) |
+ FIELD_PREP(MARGINING_SAMPLE_RATE_TIMING, 0x3f) |
+ FIELD_PREP(MARGINING_SAMPLE_RATE_VOLTAGE, 0x3f);
+ dw_pcie_writel_dbi(pci, GEN4_LANE_MARGINING_2_OFF, reg);
+}
+EXPORT_SYMBOL_GPL(qcom_pcie_common_set_16gt_lane_margining);
diff --git a/drivers/pci/controller/dwc/pcie-qcom-common.h b/drivers/pci/controller/dwc/pcie-qcom-common.h
new file mode 100644
index 000000000000..7d88d29e4766
--- /dev/null
+++ b/drivers/pci/controller/dwc/pcie-qcom-common.h
@@ -0,0 +1,14 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (c) 2024 Qualcomm Innovation Center, Inc. All rights reserved.
+ */
+
+#ifndef _PCIE_QCOM_COMMON_H
+#define _PCIE_QCOM_COMMON_H
+
+struct dw_pcie;
+
+void qcom_pcie_common_set_16gt_equalization(struct dw_pcie *pci);
+void qcom_pcie_common_set_16gt_lane_margining(struct dw_pcie *pci);
+
+#endif
diff --git a/drivers/pci/controller/dwc/pcie-qcom-ep.c b/drivers/pci/controller/dwc/pcie-qcom-ep.c
index a9b263f749b6..e588fcc54589 100644
--- a/drivers/pci/controller/dwc/pcie-qcom-ep.c
+++ b/drivers/pci/controller/dwc/pcie-qcom-ep.c
@@ -25,6 +25,7 @@
#include "../../pci.h"
#include "pcie-designware.h"
+#include "pcie-qcom-common.h"
/* PARF registers */
#define PARF_SYS_CTRL 0x00
@@ -498,6 +499,11 @@ static int qcom_pcie_perst_deassert(struct dw_pcie *pci)
goto err_disable_resources;
}
+ if (pcie_link_speed[pci->max_link_speed] == PCIE_SPEED_16_0GT) {
+ qcom_pcie_common_set_16gt_equalization(pci);
+ qcom_pcie_common_set_16gt_lane_margining(pci);
+ }
+
/*
* The physical address of the MMIO region which is exposed as the BAR
* should be written to MHI BASE registers.
@@ -659,11 +665,9 @@ static irqreturn_t qcom_pcie_ep_global_irq_thread(int irq, void *data)
struct dw_pcie *pci = &pcie_ep->pci;
struct device *dev = pci->dev;
u32 status = readl_relaxed(pcie_ep->parf + PARF_INT_ALL_STATUS);
- u32 mask = readl_relaxed(pcie_ep->parf + PARF_INT_ALL_MASK);
u32 dstate, val;
writel_relaxed(status, pcie_ep->parf + PARF_INT_ALL_CLEAR);
- status &= mask;
if (FIELD_GET(PARF_INT_ALL_LINK_DOWN, status)) {
dev_dbg(dev, "Received Linkdown event\n");
@@ -693,7 +697,8 @@ static irqreturn_t qcom_pcie_ep_global_irq_thread(int irq, void *data)
dw_pcie_ep_linkup(&pci->ep);
pcie_ep->link_status = QCOM_PCIE_EP_LINK_UP;
} else {
- dev_err(dev, "Received unknown event: %d\n", status);
+ dev_WARN_ONCE(dev, 1, "Received unknown event. INT_STATUS: 0x%08x\n",
+ status);
}
return IRQ_HANDLED;
@@ -724,8 +729,15 @@ static irqreturn_t qcom_pcie_ep_perst_irq_thread(int irq, void *data)
static int qcom_pcie_ep_enable_irq_resources(struct platform_device *pdev,
struct qcom_pcie_ep *pcie_ep)
{
+ struct device *dev = pcie_ep->pci.dev;
+ char *name;
int ret;
+ name = devm_kasprintf(dev, GFP_KERNEL, "qcom_pcie_ep_global_irq%d",
+ pcie_ep->pci.ep.epc->domain_nr);
+ if (!name)
+ return -ENOMEM;
+
pcie_ep->global_irq = platform_get_irq_byname(pdev, "global");
if (pcie_ep->global_irq < 0)
return pcie_ep->global_irq;
@@ -733,18 +745,23 @@ static int qcom_pcie_ep_enable_irq_resources(struct platform_device *pdev,
ret = devm_request_threaded_irq(&pdev->dev, pcie_ep->global_irq, NULL,
qcom_pcie_ep_global_irq_thread,
IRQF_ONESHOT,
- "global_irq", pcie_ep);
+ name, pcie_ep);
if (ret) {
dev_err(&pdev->dev, "Failed to request Global IRQ\n");
return ret;
}
+ name = devm_kasprintf(dev, GFP_KERNEL, "qcom_pcie_ep_perst_irq%d",
+ pcie_ep->pci.ep.epc->domain_nr);
+ if (!name)
+ return -ENOMEM;
+
pcie_ep->perst_irq = gpiod_to_irq(pcie_ep->reset);
irq_set_status_flags(pcie_ep->perst_irq, IRQ_NOAUTOEN);
ret = devm_request_threaded_irq(&pdev->dev, pcie_ep->perst_irq, NULL,
qcom_pcie_ep_perst_irq_thread,
IRQF_TRIGGER_HIGH | IRQF_ONESHOT,
- "perst_irq", pcie_ep);
+ name, pcie_ep);
if (ret) {
dev_err(&pdev->dev, "Failed to request PERST IRQ\n");
disable_irq(pcie_ep->global_irq);
@@ -858,21 +875,15 @@ static int qcom_pcie_ep_probe(struct platform_device *pdev)
if (ret)
return ret;
- ret = qcom_pcie_enable_resources(pcie_ep);
- if (ret) {
- dev_err(dev, "Failed to enable resources: %d\n", ret);
- return ret;
- }
-
ret = dw_pcie_ep_init(&pcie_ep->pci.ep);
if (ret) {
dev_err(dev, "Failed to initialize endpoint: %d\n", ret);
- goto err_disable_resources;
+ return ret;
}
ret = qcom_pcie_ep_enable_irq_resources(pdev, pcie_ep);
if (ret)
- goto err_disable_resources;
+ goto err_ep_deinit;
name = devm_kasprintf(dev, GFP_KERNEL, "%pOFP", dev->of_node);
if (!name) {
@@ -889,8 +900,8 @@ err_disable_irqs:
disable_irq(pcie_ep->global_irq);
disable_irq(pcie_ep->perst_irq);
-err_disable_resources:
- qcom_pcie_disable_resources(pcie_ep);
+err_ep_deinit:
+ dw_pcie_ep_deinit(&pcie_ep->pci.ep);
return ret;
}
diff --git a/drivers/pci/controller/dwc/pcie-qcom.c b/drivers/pci/controller/dwc/pcie-qcom.c
index 6f953e32d990..ef44a82be058 100644
--- a/drivers/pci/controller/dwc/pcie-qcom.c
+++ b/drivers/pci/controller/dwc/pcie-qcom.c
@@ -35,6 +35,7 @@
#include "../../pci.h"
#include "pcie-designware.h"
+#include "pcie-qcom-common.h"
/* PARF registers */
#define PARF_SYS_CTRL 0x00
@@ -45,15 +46,24 @@
#define PARF_PHY_REFCLK 0x4c
#define PARF_CONFIG_BITS 0x50
#define PARF_DBI_BASE_ADDR 0x168
+#define PARF_SLV_ADDR_SPACE_SIZE 0x16c
#define PARF_MHI_CLOCK_RESET_CTRL 0x174
#define PARF_AXI_MSTR_WR_ADDR_HALT 0x178
#define PARF_AXI_MSTR_WR_ADDR_HALT_V2 0x1a8
#define PARF_Q2A_FLUSH 0x1ac
#define PARF_LTSSM 0x1b0
+#define PARF_INT_ALL_STATUS 0x224
+#define PARF_INT_ALL_CLEAR 0x228
+#define PARF_INT_ALL_MASK 0x22c
#define PARF_SID_OFFSET 0x234
#define PARF_BDF_TRANSLATE_CFG 0x24c
-#define PARF_SLV_ADDR_SPACE_SIZE 0x358
+#define PARF_DBI_BASE_ADDR_V2 0x350
+#define PARF_DBI_BASE_ADDR_V2_HI 0x354
+#define PARF_SLV_ADDR_SPACE_SIZE_V2 0x358
+#define PARF_SLV_ADDR_SPACE_SIZE_V2_HI 0x35c
#define PARF_NO_SNOOP_OVERIDE 0x3d4
+#define PARF_ATU_BASE_ADDR 0x634
+#define PARF_ATU_BASE_ADDR_HI 0x638
#define PARF_DEVICE_TYPE 0x1000
#define PARF_BDF_TO_SID_TABLE_N 0x2000
#define PARF_BDF_TO_SID_CFG 0x2c00
@@ -108,7 +118,7 @@
#define PHY_RX0_EQ(x) FIELD_PREP(GENMASK(26, 24), x)
/* PARF_SLV_ADDR_SPACE_SIZE register value */
-#define SLV_ADDR_SPACE_SZ 0x10000000
+#define SLV_ADDR_SPACE_SZ 0x80000000
/* PARF_MHI_CLOCK_RESET_CTRL register fields */
#define AHB_CLK_EN BIT(0)
@@ -121,6 +131,9 @@
/* PARF_LTSSM register fields */
#define LTSSM_EN BIT(8)
+/* PARF_INT_ALL_{STATUS/CLEAR/MASK} register fields */
+#define PARF_INT_ALL_LINK_UP BIT(13)
+
/* PARF_NO_SNOOP_OVERIDE register fields */
#define WR_NO_SNOOP_OVERIDE_EN BIT(1)
#define RD_NO_SNOOP_OVERIDE_EN BIT(3)
@@ -284,6 +297,11 @@ static int qcom_pcie_start_link(struct dw_pcie *pci)
{
struct qcom_pcie *pcie = to_qcom_pcie(pci);
+ if (pcie_link_speed[pci->max_link_speed] == PCIE_SPEED_16_0GT) {
+ qcom_pcie_common_set_16gt_equalization(pci);
+ qcom_pcie_common_set_16gt_lane_margining(pci);
+ }
+
/* Enable Link Training state machine */
if (pcie->cfg->ops->ltssm_enable)
pcie->cfg->ops->ltssm_enable(pcie);
@@ -325,6 +343,50 @@ static void qcom_pcie_clear_hpc(struct dw_pcie *pci)
dw_pcie_dbi_ro_wr_dis(pci);
}
+static void qcom_pcie_configure_dbi_base(struct qcom_pcie *pcie)
+{
+ struct dw_pcie *pci = pcie->pci;
+
+ if (pci->dbi_phys_addr) {
+ /*
+ * PARF_DBI_BASE_ADDR register is in CPU domain and require to
+ * be programmed with CPU physical address.
+ */
+ writel(lower_32_bits(pci->dbi_phys_addr), pcie->parf +
+ PARF_DBI_BASE_ADDR);
+ writel(SLV_ADDR_SPACE_SZ, pcie->parf +
+ PARF_SLV_ADDR_SPACE_SIZE);
+ }
+}
+
+static void qcom_pcie_configure_dbi_atu_base(struct qcom_pcie *pcie)
+{
+ struct dw_pcie *pci = pcie->pci;
+
+ if (pci->dbi_phys_addr) {
+ /*
+ * PARF_DBI_BASE_ADDR_V2 and PARF_ATU_BASE_ADDR registers are
+ * in CPU domain and require to be programmed with CPU
+ * physical addresses.
+ */
+ writel(lower_32_bits(pci->dbi_phys_addr), pcie->parf +
+ PARF_DBI_BASE_ADDR_V2);
+ writel(upper_32_bits(pci->dbi_phys_addr), pcie->parf +
+ PARF_DBI_BASE_ADDR_V2_HI);
+
+ if (pci->atu_phys_addr) {
+ writel(lower_32_bits(pci->atu_phys_addr), pcie->parf +
+ PARF_ATU_BASE_ADDR);
+ writel(upper_32_bits(pci->atu_phys_addr), pcie->parf +
+ PARF_ATU_BASE_ADDR_HI);
+ }
+
+ writel(0x0, pcie->parf + PARF_SLV_ADDR_SPACE_SIZE_V2);
+ writel(SLV_ADDR_SPACE_SZ, pcie->parf +
+ PARF_SLV_ADDR_SPACE_SIZE_V2_HI);
+ }
+}
+
static void qcom_pcie_2_1_0_ltssm_enable(struct qcom_pcie *pcie)
{
u32 val;
@@ -541,8 +603,7 @@ err_assert_reset:
static int qcom_pcie_post_init_1_0_0(struct qcom_pcie *pcie)
{
- /* change DBI base address */
- writel(0, pcie->parf + PARF_DBI_BASE_ADDR);
+ qcom_pcie_configure_dbi_base(pcie);
if (IS_ENABLED(CONFIG_PCI_MSI)) {
u32 val = readl(pcie->parf + PARF_AXI_MSTR_WR_ADDR_HALT);
@@ -629,8 +690,7 @@ static int qcom_pcie_post_init_2_3_2(struct qcom_pcie *pcie)
val &= ~PHY_TEST_PWR_DOWN;
writel(val, pcie->parf + PARF_PHY_CTRL);
- /* change DBI base address */
- writel(0, pcie->parf + PARF_DBI_BASE_ADDR);
+ qcom_pcie_configure_dbi_base(pcie);
/* MAC PHY_POWERDOWN MUX DISABLE */
val = readl(pcie->parf + PARF_SYS_CTRL);
@@ -812,13 +872,11 @@ static int qcom_pcie_post_init_2_3_3(struct qcom_pcie *pcie)
u16 offset = dw_pcie_find_capability(pci, PCI_CAP_ID_EXP);
u32 val;
- writel(SLV_ADDR_SPACE_SZ, pcie->parf + PARF_SLV_ADDR_SPACE_SIZE);
-
val = readl(pcie->parf + PARF_PHY_CTRL);
val &= ~PHY_TEST_PWR_DOWN;
writel(val, pcie->parf + PARF_PHY_CTRL);
- writel(0, pcie->parf + PARF_DBI_BASE_ADDR);
+ qcom_pcie_configure_dbi_atu_base(pcie);
writel(MST_WAKEUP_EN | SLV_WAKEUP_EN | MSTR_ACLK_CGC_DIS
| SLV_ACLK_CGC_DIS | CORE_CLK_CGC_DIS |
@@ -914,8 +972,7 @@ static int qcom_pcie_init_2_7_0(struct qcom_pcie *pcie)
val &= ~PHY_TEST_PWR_DOWN;
writel(val, pcie->parf + PARF_PHY_CTRL);
- /* change DBI base address */
- writel(0, pcie->parf + PARF_DBI_BASE_ADDR);
+ qcom_pcie_configure_dbi_atu_base(pcie);
/* MAC PHY_POWERDOWN MUX DISABLE */
val = readl(pcie->parf + PARF_SYS_CTRL);
@@ -1124,14 +1181,11 @@ static int qcom_pcie_post_init_2_9_0(struct qcom_pcie *pcie)
u32 val;
int i;
- writel(SLV_ADDR_SPACE_SZ,
- pcie->parf + PARF_SLV_ADDR_SPACE_SIZE);
-
val = readl(pcie->parf + PARF_PHY_CTRL);
val &= ~PHY_TEST_PWR_DOWN;
writel(val, pcie->parf + PARF_PHY_CTRL);
- writel(0, pcie->parf + PARF_DBI_BASE_ADDR);
+ qcom_pcie_configure_dbi_atu_base(pcie);
writel(DEVICE_TYPE_RC, pcie->parf + PARF_DEVICE_TYPE);
writel(BYPASS | MSTR_AXI_CLK_EN | AHB_CLK_EN,
@@ -1489,6 +1543,29 @@ static void qcom_pcie_init_debugfs(struct qcom_pcie *pcie)
qcom_pcie_link_transition_count);
}
+static irqreturn_t qcom_pcie_global_irq_thread(int irq, void *data)
+{
+ struct qcom_pcie *pcie = data;
+ struct dw_pcie_rp *pp = &pcie->pci->pp;
+ struct device *dev = pcie->pci->dev;
+ u32 status = readl_relaxed(pcie->parf + PARF_INT_ALL_STATUS);
+
+ writel_relaxed(status, pcie->parf + PARF_INT_ALL_CLEAR);
+
+ if (FIELD_GET(PARF_INT_ALL_LINK_UP, status)) {
+ dev_dbg(dev, "Received Link up event. Starting enumeration!\n");
+ /* Rescan the bus to enumerate endpoint devices */
+ pci_lock_rescan_remove();
+ pci_rescan_bus(pp->bridge->bus);
+ pci_unlock_rescan_remove();
+ } else {
+ dev_WARN_ONCE(dev, 1, "Received unknown event. INT_STATUS: 0x%08x\n",
+ status);
+ }
+
+ return IRQ_HANDLED;
+}
+
static int qcom_pcie_probe(struct platform_device *pdev)
{
const struct qcom_pcie_cfg *pcie_cfg;
@@ -1499,7 +1576,8 @@ static int qcom_pcie_probe(struct platform_device *pdev)
struct dw_pcie_rp *pp;
struct resource *res;
struct dw_pcie *pci;
- int ret;
+ int ret, irq;
+ char *name;
pcie_cfg = of_device_get_match_data(dev);
if (!pcie_cfg || !pcie_cfg->ops) {
@@ -1620,6 +1698,27 @@ static int qcom_pcie_probe(struct platform_device *pdev)
goto err_phy_exit;
}
+ name = devm_kasprintf(dev, GFP_KERNEL, "qcom_pcie_global_irq%d",
+ pci_domain_nr(pp->bridge->bus));
+ if (!name) {
+ ret = -ENOMEM;
+ goto err_host_deinit;
+ }
+
+ irq = platform_get_irq_byname_optional(pdev, "global");
+ if (irq > 0) {
+ ret = devm_request_threaded_irq(&pdev->dev, irq, NULL,
+ qcom_pcie_global_irq_thread,
+ IRQF_ONESHOT, name, pcie);
+ if (ret) {
+ dev_err_probe(&pdev->dev, ret,
+ "Failed to request Global IRQ\n");
+ goto err_host_deinit;
+ }
+
+ writel_relaxed(PARF_INT_ALL_LINK_UP, pcie->parf + PARF_INT_ALL_MASK);
+ }
+
qcom_pcie_icc_opp_update(pcie);
if (pcie->mhi)
@@ -1627,6 +1726,8 @@ static int qcom_pcie_probe(struct platform_device *pdev)
return 0;
+err_host_deinit:
+ dw_pcie_host_deinit(pp);
err_phy_exit:
phy_exit(pcie->phy);
err_pm_runtime_put:
diff --git a/drivers/pci/controller/dwc/pcie-rcar-gen4.c b/drivers/pci/controller/dwc/pcie-rcar-gen4.c
index f0f3ebd1a033..3a5511c3f7d9 100644
--- a/drivers/pci/controller/dwc/pcie-rcar-gen4.c
+++ b/drivers/pci/controller/dwc/pcie-rcar-gen4.c
@@ -141,10 +141,10 @@ static int rcar_gen4_pcie_start_link(struct dw_pcie *dw)
}
/*
- * Require direct speed change with retrying here if the link_gen is
- * PCIe Gen2 or higher.
+ * Require direct speed change with retrying here if the max_link_speed
+ * is PCIe Gen2 or higher.
*/
- changes = min_not_zero(dw->link_gen, RCAR_MAX_LINK_SPEED) - 1;
+ changes = min_not_zero(dw->max_link_speed, RCAR_MAX_LINK_SPEED) - 1;
/*
* Since dw_pcie_setup_rc() sets it once, PCIe Gen2 will be trained.
@@ -606,7 +606,12 @@ static int rcar_gen4_pcie_reg_test_bit(struct rcar_gen4_pcie *rcar,
static int rcar_gen4_pcie_download_phy_firmware(struct rcar_gen4_pcie *rcar)
{
/* The check_addr values are magical numbers in the datasheet */
- const u32 check_addr[] = { 0x00101018, 0x00101118, 0x00101021, 0x00101121};
+ static const u32 check_addr[] = {
+ 0x00101018,
+ 0x00101118,
+ 0x00101021,
+ 0x00101121,
+ };
struct dw_pcie *dw = &rcar->dw;
const struct firmware *fw;
unsigned int i, timeout;
diff --git a/drivers/pci/controller/dwc/pcie-spear13xx.c b/drivers/pci/controller/dwc/pcie-spear13xx.c
index 201dced209f0..ff986ced56b2 100644
--- a/drivers/pci/controller/dwc/pcie-spear13xx.c
+++ b/drivers/pci/controller/dwc/pcie-spear13xx.c
@@ -233,7 +233,7 @@ static int spear13xx_pcie_probe(struct platform_device *pdev)
}
if (of_property_read_bool(np, "st,pcie-is-gen1"))
- pci->link_gen = 1;
+ pci->max_link_speed = 1;
platform_set_drvdata(pdev, spear13xx_pcie);
diff --git a/drivers/pci/controller/dwc/pcie-tegra194.c b/drivers/pci/controller/dwc/pcie-tegra194.c
index 4bf7b433417a..c1394f2ab63f 100644
--- a/drivers/pci/controller/dwc/pcie-tegra194.c
+++ b/drivers/pci/controller/dwc/pcie-tegra194.c
@@ -177,17 +177,12 @@
#define N_FTS_VAL 52
#define FTS_VAL 52
-#define GEN3_EQ_CONTROL_OFF 0x8a8
-#define GEN3_EQ_CONTROL_OFF_PSET_REQ_VEC_SHIFT 8
-#define GEN3_EQ_CONTROL_OFF_PSET_REQ_VEC_MASK GENMASK(23, 8)
-#define GEN3_EQ_CONTROL_OFF_FB_MODE_MASK GENMASK(3, 0)
-
#define PORT_LOGIC_AMBA_ERROR_RESPONSE_DEFAULT 0x8D0
-#define AMBA_ERROR_RESPONSE_CRS_SHIFT 3
-#define AMBA_ERROR_RESPONSE_CRS_MASK GENMASK(1, 0)
-#define AMBA_ERROR_RESPONSE_CRS_OKAY 0
-#define AMBA_ERROR_RESPONSE_CRS_OKAY_FFFFFFFF 1
-#define AMBA_ERROR_RESPONSE_CRS_OKAY_FFFF0001 2
+#define AMBA_ERROR_RESPONSE_RRS_SHIFT 3
+#define AMBA_ERROR_RESPONSE_RRS_MASK GENMASK(1, 0)
+#define AMBA_ERROR_RESPONSE_RRS_OKAY 0
+#define AMBA_ERROR_RESPONSE_RRS_OKAY_FFFFFFFF 1
+#define AMBA_ERROR_RESPONSE_RRS_OKAY_FFFF0001 2
#define MSIX_ADDR_MATCH_LOW_OFF 0x940
#define MSIX_ADDR_MATCH_LOW_OFF_EN BIT(0)
@@ -861,9 +856,9 @@ static void config_gen3_gen4_eq_presets(struct tegra_pcie_dw *pcie)
dw_pcie_writel_dbi(pci, GEN3_RELATED_OFF, val);
val = dw_pcie_readl_dbi(pci, GEN3_EQ_CONTROL_OFF);
- val &= ~GEN3_EQ_CONTROL_OFF_PSET_REQ_VEC_MASK;
- val |= (0x3ff << GEN3_EQ_CONTROL_OFF_PSET_REQ_VEC_SHIFT);
- val &= ~GEN3_EQ_CONTROL_OFF_FB_MODE_MASK;
+ val &= ~GEN3_EQ_CONTROL_OFF_PSET_REQ_VEC;
+ val |= FIELD_PREP(GEN3_EQ_CONTROL_OFF_PSET_REQ_VEC, 0x3ff);
+ val &= ~GEN3_EQ_CONTROL_OFF_FB_MODE;
dw_pcie_writel_dbi(pci, GEN3_EQ_CONTROL_OFF, val);
val = dw_pcie_readl_dbi(pci, GEN3_RELATED_OFF);
@@ -872,10 +867,10 @@ static void config_gen3_gen4_eq_presets(struct tegra_pcie_dw *pcie)
dw_pcie_writel_dbi(pci, GEN3_RELATED_OFF, val);
val = dw_pcie_readl_dbi(pci, GEN3_EQ_CONTROL_OFF);
- val &= ~GEN3_EQ_CONTROL_OFF_PSET_REQ_VEC_MASK;
- val |= (pcie->of_data->gen4_preset_vec <<
- GEN3_EQ_CONTROL_OFF_PSET_REQ_VEC_SHIFT);
- val &= ~GEN3_EQ_CONTROL_OFF_FB_MODE_MASK;
+ val &= ~GEN3_EQ_CONTROL_OFF_PSET_REQ_VEC;
+ val |= FIELD_PREP(GEN3_EQ_CONTROL_OFF_PSET_REQ_VEC,
+ pcie->of_data->gen4_preset_vec);
+ val &= ~GEN3_EQ_CONTROL_OFF_FB_MODE;
dw_pcie_writel_dbi(pci, GEN3_EQ_CONTROL_OFF, val);
val = dw_pcie_readl_dbi(pci, GEN3_RELATED_OFF);
@@ -907,11 +902,11 @@ static int tegra_pcie_dw_host_init(struct dw_pcie_rp *pp)
dw_pcie_writel_dbi(pci, PCI_BASE_ADDRESS_0, 0);
- /* Enable as 0xFFFF0001 response for CRS */
+ /* Enable as 0xFFFF0001 response for RRS */
val = dw_pcie_readl_dbi(pci, PORT_LOGIC_AMBA_ERROR_RESPONSE_DEFAULT);
- val &= ~(AMBA_ERROR_RESPONSE_CRS_MASK << AMBA_ERROR_RESPONSE_CRS_SHIFT);
- val |= (AMBA_ERROR_RESPONSE_CRS_OKAY_FFFF0001 <<
- AMBA_ERROR_RESPONSE_CRS_SHIFT);
+ val &= ~(AMBA_ERROR_RESPONSE_RRS_MASK << AMBA_ERROR_RESPONSE_RRS_SHIFT);
+ val |= (AMBA_ERROR_RESPONSE_RRS_OKAY_FFFF0001 <<
+ AMBA_ERROR_RESPONSE_RRS_SHIFT);
dw_pcie_writel_dbi(pci, PORT_LOGIC_AMBA_ERROR_RESPONSE_DEFAULT, val);
/* Clear Slot Clock Configuration bit if SRNS configuration */
diff --git a/drivers/pci/controller/mobiveil/pcie-mobiveil-host.c b/drivers/pci/controller/mobiveil/pcie-mobiveil-host.c
index 32951f7d6d6d..0e088e74155d 100644
--- a/drivers/pci/controller/mobiveil/pcie-mobiveil-host.c
+++ b/drivers/pci/controller/mobiveil/pcie-mobiveil-host.c
@@ -360,8 +360,8 @@ static struct irq_chip mobiveil_msi_irq_chip = {
};
static struct msi_domain_info mobiveil_msi_domain_info = {
- .flags = (MSI_FLAG_USE_DEF_DOM_OPS | MSI_FLAG_USE_DEF_CHIP_OPS |
- MSI_FLAG_PCI_MSIX),
+ .flags = MSI_FLAG_USE_DEF_DOM_OPS | MSI_FLAG_USE_DEF_CHIP_OPS |
+ MSI_FLAG_NO_AFFINITY | MSI_FLAG_PCI_MSIX,
.chip = &mobiveil_msi_irq_chip,
};
@@ -378,16 +378,9 @@ static void mobiveil_compose_msi_msg(struct irq_data *data, struct msi_msg *msg)
(int)data->hwirq, msg->address_hi, msg->address_lo);
}
-static int mobiveil_msi_set_affinity(struct irq_data *irq_data,
- const struct cpumask *mask, bool force)
-{
- return -EINVAL;
-}
-
static struct irq_chip mobiveil_msi_bottom_irq_chip = {
.name = "Mobiveil MSI",
.irq_compose_msi_msg = mobiveil_compose_msi_msg,
- .irq_set_affinity = mobiveil_msi_set_affinity,
};
static int mobiveil_irq_msi_domain_alloc(struct irq_domain *domain,
diff --git a/drivers/pci/controller/pci-aardvark.c b/drivers/pci/controller/pci-aardvark.c
index 8b3e1a079cf3..a598a98247ce 100644
--- a/drivers/pci/controller/pci-aardvark.c
+++ b/drivers/pci/controller/pci-aardvark.c
@@ -50,7 +50,7 @@
#define PIO_COMPLETION_STATUS_MASK GENMASK(9, 7)
#define PIO_COMPLETION_STATUS_OK 0
#define PIO_COMPLETION_STATUS_UR 1
-#define PIO_COMPLETION_STATUS_CRS 2
+#define PIO_COMPLETION_STATUS_RRS 2
#define PIO_COMPLETION_STATUS_CA 4
#define PIO_NON_POSTED_REQ BIT(10)
#define PIO_ERR_STATUS BIT(11)
@@ -262,7 +262,7 @@ enum {
#define MSI_IRQ_NUM 32
-#define CFG_RD_CRS_VAL 0xffff0001
+#define CFG_RD_RRS_VAL 0xffff0001
struct advk_pcie {
struct platform_device *pdev;
@@ -649,7 +649,7 @@ static void advk_pcie_setup_hw(struct advk_pcie *pcie)
advk_pcie_train_link(pcie);
}
-static int advk_pcie_check_pio_status(struct advk_pcie *pcie, bool allow_crs, u32 *val)
+static int advk_pcie_check_pio_status(struct advk_pcie *pcie, bool allow_rrs, u32 *val)
{
struct device *dev = &pcie->pdev->dev;
u32 reg;
@@ -669,7 +669,7 @@ static int advk_pcie_check_pio_status(struct advk_pcie *pcie, bool allow_crs, u3
* 2) value Unsupported Request(1) of COMPLETION_STATUS(bit9:7) only
* means a PIO write error, and for PIO read it is successful with
* a read value of 0xFFFFFFFF.
- * 3) value Completion Retry Status(CRS) of COMPLETION_STATUS(bit9:7)
+ * 3) value Config Request Retry Status(RRS) of COMPLETION_STATUS(bit9:7)
* only means a PIO write error, and for PIO read it is successful
* with a read value of 0xFFFF0001.
* 4) value Completer Abort (CA) of COMPLETION_STATUS(bit9:7) means
@@ -694,10 +694,10 @@ static int advk_pcie_check_pio_status(struct advk_pcie *pcie, bool allow_crs, u3
strcomp_status = "UR";
ret = -EOPNOTSUPP;
break;
- case PIO_COMPLETION_STATUS_CRS:
- if (allow_crs && val) {
- /* PCIe r4.0, sec 2.3.2, says:
- * If CRS Software Visibility is enabled:
+ case PIO_COMPLETION_STATUS_RRS:
+ if (allow_rrs && val) {
+ /* PCIe r6.0, sec 2.3.2, says:
+ * If Configuration RRS Software Visibility is enabled:
* For a Configuration Read Request that includes both
* bytes of the Vendor ID field of a device Function's
* Configuration Space Header, the Root Complex must
@@ -706,22 +706,22 @@ static int advk_pcie_check_pio_status(struct advk_pcie *pcie, bool allow_crs, u3
* all '1's for any additional bytes included in the
* request.
*
- * So CRS in this case is not an error status.
+ * So RRS in this case is not an error status.
*/
- *val = CFG_RD_CRS_VAL;
+ *val = CFG_RD_RRS_VAL;
strcomp_status = NULL;
ret = 0;
break;
}
- /* PCIe r4.0, sec 2.3.2, says:
- * If CRS Software Visibility is not enabled, the Root Complex
+ /* PCIe r6.0, sec 2.3.2, says:
+ * If RRS Software Visibility is not enabled, the Root Complex
* must re-issue the Configuration Request as a new Request.
- * If CRS Software Visibility is enabled: For a Configuration
+ * If RRS Software Visibility is enabled: For a Configuration
* Write Request or for any other Configuration Read Request,
* the Root Complex must re-issue the Configuration Request as
* a new Request.
* A Root Complex implementation may choose to limit the number
- * of Configuration Request/CRS Completion Status loops before
+ * of Configuration Request/RRS Completion Status loops before
* determining that something is wrong with the target of the
* Request and taking appropriate action, e.g., complete the
* Request to the host as a failed transaction.
@@ -729,7 +729,7 @@ static int advk_pcie_check_pio_status(struct advk_pcie *pcie, bool allow_crs, u3
* So return -EAGAIN and caller (pci-aardvark.c driver) will
* re-issue request again up to the PIO_RETRY_CNT retries.
*/
- strcomp_status = "CRS";
+ strcomp_status = "RRS";
ret = -EAGAIN;
break;
case PIO_COMPLETION_STATUS_CA:
@@ -920,8 +920,8 @@ advk_pci_bridge_emul_pcie_conf_write(struct pci_bridge_emul *bridge,
case PCI_EXP_RTCTL: {
u16 rootctl = le16_to_cpu(bridge->pcie_conf.rootctl);
- /* Only emulation of PMEIE and CRSSVE bits is provided */
- rootctl &= PCI_EXP_RTCTL_PMEIE | PCI_EXP_RTCTL_CRSSVE;
+ /* Only emulation of PMEIE and RRS_SVE bits is provided */
+ rootctl &= PCI_EXP_RTCTL_PMEIE | PCI_EXP_RTCTL_RRS_SVE;
bridge->pcie_conf.rootctl = cpu_to_le16(rootctl);
break;
}
@@ -1075,7 +1075,7 @@ static int advk_sw_pci_bridge_init(struct advk_pcie *pcie)
bridge->pcie_conf.slotsta = cpu_to_le16(PCI_EXP_SLTSTA_PDS);
/* Indicates supports for Completion Retry Status */
- bridge->pcie_conf.rootcap = cpu_to_le16(PCI_EXP_RTCAP_CRSVIS);
+ bridge->pcie_conf.rootcap = cpu_to_le16(PCI_EXP_RTCAP_RRS_SV);
bridge->subsystem_vendor_id = advk_readl(pcie, PCIE_CORE_SSDEV_ID_REG) & 0xffff;
bridge->subsystem_id = advk_readl(pcie, PCIE_CORE_SSDEV_ID_REG) >> 16;
@@ -1141,7 +1141,7 @@ static int advk_pcie_rd_conf(struct pci_bus *bus, u32 devfn,
{
struct advk_pcie *pcie = bus->sysdata;
int retry_count;
- bool allow_crs;
+ bool allow_rrs;
u32 reg;
int ret;
@@ -1153,16 +1153,16 @@ static int advk_pcie_rd_conf(struct pci_bus *bus, u32 devfn,
size, val);
/*
- * Completion Retry Status is possible to return only when reading all
- * 4 bytes from PCI_VENDOR_ID and PCI_DEVICE_ID registers at once and
- * CRSSVE flag on Root Bridge is enabled.
+ * Configuration Request Retry Status (RRS) is possible to return
+ * only when reading both bytes from PCI_VENDOR_ID at once and
+ * RRS_SVE flag on Root Port is enabled.
*/
- allow_crs = (where == PCI_VENDOR_ID) && (size == 4) &&
+ allow_rrs = (where == PCI_VENDOR_ID) && (size >= 2) &&
(le16_to_cpu(pcie->bridge.pcie_conf.rootctl) &
- PCI_EXP_RTCTL_CRSSVE);
+ PCI_EXP_RTCTL_RRS_SVE);
if (advk_pcie_pio_is_running(pcie))
- goto try_crs;
+ goto try_rrs;
/* Program the control register */
reg = advk_readl(pcie, PIO_CTRL);
@@ -1189,12 +1189,12 @@ static int advk_pcie_rd_conf(struct pci_bus *bus, u32 devfn,
ret = advk_pcie_wait_pio(pcie);
if (ret < 0)
- goto try_crs;
+ goto try_rrs;
retry_count += ret;
/* Check PIO status and get the read result */
- ret = advk_pcie_check_pio_status(pcie, allow_crs, val);
+ ret = advk_pcie_check_pio_status(pcie, allow_rrs, val);
} while (ret == -EAGAIN && retry_count < PIO_RETRY_CNT);
if (ret < 0)
@@ -1207,13 +1207,13 @@ static int advk_pcie_rd_conf(struct pci_bus *bus, u32 devfn,
return PCIBIOS_SUCCESSFUL;
-try_crs:
+try_rrs:
/*
- * If it is possible, return Completion Retry Status so that caller
- * tries to issue the request again instead of failing.
+ * If it is possible, return Configuration Request Retry Status so
+ * that caller tries to issue the request again instead of failing.
*/
- if (allow_crs) {
- *val = CFG_RD_CRS_VAL;
+ if (allow_rrs) {
+ *val = CFG_RD_RRS_VAL;
return PCIBIOS_SUCCESSFUL;
}
@@ -1304,12 +1304,6 @@ static void advk_msi_irq_compose_msi_msg(struct irq_data *data,
msg->data = data->hwirq;
}
-static int advk_msi_set_affinity(struct irq_data *irq_data,
- const struct cpumask *mask, bool force)
-{
- return -EINVAL;
-}
-
static void advk_msi_irq_mask(struct irq_data *d)
{
struct advk_pcie *pcie = d->domain->host_data;
@@ -1353,7 +1347,6 @@ static void advk_msi_top_irq_unmask(struct irq_data *d)
static struct irq_chip advk_msi_bottom_irq_chip = {
.name = "MSI",
.irq_compose_msi_msg = advk_msi_irq_compose_msi_msg,
- .irq_set_affinity = advk_msi_set_affinity,
.irq_mask = advk_msi_irq_mask,
.irq_unmask = advk_msi_irq_unmask,
};
@@ -1451,7 +1444,8 @@ static struct irq_chip advk_msi_irq_chip = {
static struct msi_domain_info advk_msi_domain_info = {
.flags = MSI_FLAG_USE_DEF_DOM_OPS | MSI_FLAG_USE_DEF_CHIP_OPS |
- MSI_FLAG_MULTI_PCI_MSI | MSI_FLAG_PCI_MSIX,
+ MSI_FLAG_NO_AFFINITY | MSI_FLAG_MULTI_PCI_MSI |
+ MSI_FLAG_PCI_MSIX,
.chip = &advk_msi_irq_chip,
};
diff --git a/drivers/pci/controller/pci-tegra.c b/drivers/pci/controller/pci-tegra.c
index 038d974a318e..d7517c3976e7 100644
--- a/drivers/pci/controller/pci-tegra.c
+++ b/drivers/pci/controller/pci-tegra.c
@@ -1629,11 +1629,6 @@ static void tegra_msi_irq_unmask(struct irq_data *d)
spin_unlock_irqrestore(&msi->mask_lock, flags);
}
-static int tegra_msi_set_affinity(struct irq_data *d, const struct cpumask *mask, bool force)
-{
- return -EINVAL;
-}
-
static void tegra_compose_msi_msg(struct irq_data *data, struct msi_msg *msg)
{
struct tegra_msi *msi = irq_data_get_irq_chip_data(data);
@@ -1648,7 +1643,6 @@ static struct irq_chip tegra_msi_bottom_chip = {
.irq_ack = tegra_msi_irq_ack,
.irq_mask = tegra_msi_irq_mask,
.irq_unmask = tegra_msi_irq_unmask,
- .irq_set_affinity = tegra_msi_set_affinity,
.irq_compose_msi_msg = tegra_compose_msi_msg,
};
@@ -1697,8 +1691,8 @@ static const struct irq_domain_ops tegra_msi_domain_ops = {
};
static struct msi_domain_info tegra_msi_info = {
- .flags = (MSI_FLAG_USE_DEF_DOM_OPS | MSI_FLAG_USE_DEF_CHIP_OPS |
- MSI_FLAG_PCI_MSIX),
+ .flags = MSI_FLAG_USE_DEF_DOM_OPS | MSI_FLAG_USE_DEF_CHIP_OPS |
+ MSI_FLAG_NO_AFFINITY | MSI_FLAG_PCI_MSIX,
.chip = &tegra_msi_top_chip,
};
diff --git a/drivers/pci/controller/pci-xgene.c b/drivers/pci/controller/pci-xgene.c
index 8e457fa450a2..1e2ebbfa36d1 100644
--- a/drivers/pci/controller/pci-xgene.c
+++ b/drivers/pci/controller/pci-xgene.c
@@ -171,17 +171,17 @@ static int xgene_pcie_config_read32(struct pci_bus *bus, unsigned int devfn,
/*
* The v1 controller has a bug in its Configuration Request Retry
- * Status (CRS) logic: when CRS Software Visibility is enabled and
+ * Status (RRS) logic: when RRS Software Visibility is enabled and
* we read the Vendor and Device ID of a non-existent device, the
* controller fabricates return data of 0xFFFF0001 ("device exists
* but is not ready") instead of 0xFFFFFFFF (PCI_ERROR_RESPONSE)
* ("device does not exist"). This causes the PCI core to retry
* the read until it times out. Avoid this by not claiming to
- * support CRS SV.
+ * support RRS SV.
*/
if (pci_is_root_bus(bus) && (port->version == XGENE_PCIE_IP_VER_1) &&
((where & ~0x3) == XGENE_V1_PCI_EXP_CAP + PCI_EXP_RTCTL))
- *val &= ~(PCI_EXP_RTCAP_CRSVIS << 16);
+ *val &= ~(PCI_EXP_RTCAP_RRS_SV << 16);
if (size <= 2)
*val = (*val >> (8 * (where & 3))) & ((1 << (size * 8)) - 1);
diff --git a/drivers/pci/controller/pcie-altera-msi.c b/drivers/pci/controller/pcie-altera-msi.c
index 16336a525c16..e36a6e158d23 100644
--- a/drivers/pci/controller/pcie-altera-msi.c
+++ b/drivers/pci/controller/pcie-altera-msi.c
@@ -81,8 +81,8 @@ static struct irq_chip altera_msi_irq_chip = {
};
static struct msi_domain_info altera_msi_domain_info = {
- .flags = (MSI_FLAG_USE_DEF_DOM_OPS | MSI_FLAG_USE_DEF_CHIP_OPS |
- MSI_FLAG_PCI_MSIX),
+ .flags = MSI_FLAG_USE_DEF_DOM_OPS | MSI_FLAG_USE_DEF_CHIP_OPS |
+ MSI_FLAG_NO_AFFINITY | MSI_FLAG_PCI_MSIX,
.chip = &altera_msi_irq_chip,
};
@@ -99,16 +99,9 @@ static void altera_compose_msi_msg(struct irq_data *data, struct msi_msg *msg)
(int)data->hwirq, msg->address_hi, msg->address_lo);
}
-static int altera_msi_set_affinity(struct irq_data *irq_data,
- const struct cpumask *mask, bool force)
-{
- return -EINVAL;
-}
-
static struct irq_chip altera_msi_bottom_irq_chip = {
.name = "Altera MSI",
.irq_compose_msi_msg = altera_compose_msi_msg,
- .irq_set_affinity = altera_msi_set_affinity,
};
static int altera_irq_domain_alloc(struct irq_domain *domain, unsigned int virq,
diff --git a/drivers/pci/controller/pcie-altera.c b/drivers/pci/controller/pcie-altera.c
index ef73baefaeb9..650b2dd81c48 100644
--- a/drivers/pci/controller/pcie-altera.c
+++ b/drivers/pci/controller/pcie-altera.c
@@ -55,12 +55,11 @@
#define TLP_READ_TAG 0x1d
#define TLP_WRITE_TAG 0x10
#define RP_DEVFN 0
-#define TLP_REQ_ID(bus, devfn) (((bus) << 8) | (devfn))
#define TLP_CFG_DW0(pcie, cfg) \
(((cfg) << 24) | \
TLP_PAYLOAD_SIZE)
#define TLP_CFG_DW1(pcie, tag, be) \
- (((TLP_REQ_ID(pcie->root_bus_nr, RP_DEVFN)) << 16) | (tag << 8) | (be))
+ (((PCI_DEVID(pcie->root_bus_nr, RP_DEVFN)) << 16) | (tag << 8) | (be))
#define TLP_CFG_DW2(bus, devfn, offset) \
(((bus) << 24) | ((devfn) << 16) | (offset))
#define TLP_COMP_STATUS(s) (((s) >> 13) & 7)
diff --git a/drivers/pci/controller/pcie-brcmstb.c b/drivers/pci/controller/pcie-brcmstb.c
index c08683febdd4..9321280f6edb 100644
--- a/drivers/pci/controller/pcie-brcmstb.c
+++ b/drivers/pci/controller/pcie-brcmstb.c
@@ -75,15 +75,19 @@
#define PCIE_MEM_WIN0_HI(win) \
PCIE_MISC_CPU_2_PCIE_MEM_WIN0_HI + ((win) * 8)
+/*
+ * NOTE: You may see the term "BAR" in a number of register names used by
+ * this driver. The term is an artifact of when the HW core was an
+ * endpoint device (EP). Now it is a root complex (RC) and anywhere a
+ * register has the term "BAR" it is related to an inbound window.
+ */
+
+#define PCIE_BRCM_MAX_INBOUND_WINS 16
#define PCIE_MISC_RC_BAR1_CONFIG_LO 0x402c
#define PCIE_MISC_RC_BAR1_CONFIG_LO_SIZE_MASK 0x1f
-#define PCIE_MISC_RC_BAR2_CONFIG_LO 0x4034
-#define PCIE_MISC_RC_BAR2_CONFIG_LO_SIZE_MASK 0x1f
-#define PCIE_MISC_RC_BAR2_CONFIG_HI 0x4038
+#define PCIE_MISC_RC_BAR4_CONFIG_LO 0x40d4
-#define PCIE_MISC_RC_BAR3_CONFIG_LO 0x403c
-#define PCIE_MISC_RC_BAR3_CONFIG_LO_SIZE_MASK 0x1f
#define PCIE_MISC_MSI_BAR_CONFIG_LO 0x4044
#define PCIE_MISC_MSI_BAR_CONFIG_HI 0x4048
@@ -122,7 +126,6 @@
#define PCIE_MEM_WIN0_LIMIT_HI(win) \
PCIE_MISC_CPU_2_PCIE_MEM_WIN0_LIMIT_HI + ((win) * 8)
-#define PCIE_MISC_HARD_PCIE_HARD_DEBUG 0x4204
#define PCIE_MISC_HARD_PCIE_HARD_DEBUG_CLKREQ_DEBUG_ENABLE_MASK 0x2
#define PCIE_MISC_HARD_PCIE_HARD_DEBUG_L1SS_ENABLE_MASK 0x200000
#define PCIE_MISC_HARD_PCIE_HARD_DEBUG_SERDES_IDDQ_MASK 0x08000000
@@ -131,9 +134,13 @@
(PCIE_MISC_HARD_PCIE_HARD_DEBUG_CLKREQ_DEBUG_ENABLE_MASK | \
PCIE_MISC_HARD_PCIE_HARD_DEBUG_L1SS_ENABLE_MASK)
-#define PCIE_INTR2_CPU_BASE 0x4300
+#define PCIE_MISC_UBUS_BAR1_CONFIG_REMAP 0x40ac
+#define PCIE_MISC_UBUS_BAR1_CONFIG_REMAP_ACCESS_EN_MASK BIT(0)
+#define PCIE_MISC_UBUS_BAR4_CONFIG_REMAP 0x410c
+
#define PCIE_MSI_INTR2_BASE 0x4500
-/* Offsets from PCIE_INTR2_CPU_BASE and PCIE_MSI_INTR2_BASE */
+
+/* Offsets from INTR2_CPU and MSI_INTR2 BASE offsets */
#define MSI_INT_STATUS 0x0
#define MSI_INT_CLR 0x8
#define MSI_INT_MASK_SET 0x10
@@ -184,9 +191,11 @@
#define SSC_STATUS_PLL_LOCK_MASK 0x800
#define PCIE_BRCM_MAX_MEMC 3
-#define IDX_ADDR(pcie) (pcie->reg_offsets[EXT_CFG_INDEX])
-#define DATA_ADDR(pcie) (pcie->reg_offsets[EXT_CFG_DATA])
-#define PCIE_RGR1_SW_INIT_1(pcie) (pcie->reg_offsets[RGR1_SW_INIT_1])
+#define IDX_ADDR(pcie) ((pcie)->reg_offsets[EXT_CFG_INDEX])
+#define DATA_ADDR(pcie) ((pcie)->reg_offsets[EXT_CFG_DATA])
+#define PCIE_RGR1_SW_INIT_1(pcie) ((pcie)->reg_offsets[RGR1_SW_INIT_1])
+#define HARD_DEBUG(pcie) ((pcie)->reg_offsets[PCIE_HARD_DEBUG])
+#define INTR2_CPU_BASE(pcie) ((pcie)->reg_offsets[PCIE_INTR2_CPU_BASE])
/* Rescal registers */
#define PCIE_DVT_PMU_PCIE_PHY_CTRL 0xc700
@@ -205,27 +214,33 @@ enum {
RGR1_SW_INIT_1,
EXT_CFG_INDEX,
EXT_CFG_DATA,
+ PCIE_HARD_DEBUG,
+ PCIE_INTR2_CPU_BASE,
};
-enum {
- RGR1_SW_INIT_1_INIT_MASK,
- RGR1_SW_INIT_1_INIT_SHIFT,
-};
-
-enum pcie_type {
+enum pcie_soc_base {
GENERIC,
- BCM7425,
- BCM7435,
+ BCM2711,
BCM4908,
BCM7278,
- BCM2711,
+ BCM7425,
+ BCM7435,
+ BCM7712,
+};
+
+struct inbound_win {
+ u64 size;
+ u64 pci_offset;
+ u64 cpu_addr;
};
struct pcie_cfg_data {
const int *offsets;
- const enum pcie_type type;
- void (*perst_set)(struct brcm_pcie *pcie, u32 val);
- void (*bridge_sw_init_set)(struct brcm_pcie *pcie, u32 val);
+ const enum pcie_soc_base soc_base;
+ const bool has_phy;
+ u8 num_inbound_wins;
+ int (*perst_set)(struct brcm_pcie *pcie, u32 val);
+ int (*bridge_sw_init_set)(struct brcm_pcie *pcie, u32 val);
};
struct subdev_regulators {
@@ -262,21 +277,25 @@ struct brcm_pcie {
u64 msi_target_addr;
struct brcm_msi *msi;
const int *reg_offsets;
- enum pcie_type type;
+ enum pcie_soc_base soc_base;
struct reset_control *rescal;
struct reset_control *perst_reset;
+ struct reset_control *bridge_reset;
+ struct reset_control *swinit_reset;
int num_memc;
u64 memc_size[PCIE_BRCM_MAX_MEMC];
u32 hw_rev;
- void (*perst_set)(struct brcm_pcie *pcie, u32 val);
- void (*bridge_sw_init_set)(struct brcm_pcie *pcie, u32 val);
+ int (*perst_set)(struct brcm_pcie *pcie, u32 val);
+ int (*bridge_sw_init_set)(struct brcm_pcie *pcie, u32 val);
struct subdev_regulators *sr;
bool ep_wakeup_capable;
+ bool has_phy;
+ u8 num_inbound_wins;
};
static inline bool is_bmips(const struct brcm_pcie *pcie)
{
- return pcie->type == BCM7435 || pcie->type == BCM7425;
+ return pcie->soc_base == BCM7435 || pcie->soc_base == BCM7425;
}
/*
@@ -394,7 +413,7 @@ static void brcm_pcie_set_gen(struct brcm_pcie *pcie, int gen)
}
static void brcm_pcie_set_outbound_win(struct brcm_pcie *pcie,
- unsigned int win, u64 cpu_addr,
+ u8 win, u64 cpu_addr,
u64 pcie_addr, u64 size)
{
u32 cpu_addr_mb_high, limit_addr_mb_high;
@@ -445,8 +464,8 @@ static struct irq_chip brcm_msi_irq_chip = {
};
static struct msi_domain_info brcm_msi_domain_info = {
- .flags = (MSI_FLAG_USE_DEF_DOM_OPS | MSI_FLAG_USE_DEF_CHIP_OPS |
- MSI_FLAG_MULTI_PCI_MSI),
+ .flags = MSI_FLAG_USE_DEF_DOM_OPS | MSI_FLAG_USE_DEF_CHIP_OPS |
+ MSI_FLAG_NO_AFFINITY | MSI_FLAG_MULTI_PCI_MSI,
.chip = &brcm_msi_irq_chip,
};
@@ -484,12 +503,6 @@ static void brcm_msi_compose_msi_msg(struct irq_data *data, struct msi_msg *msg)
msg->data = (0xffff & PCIE_MISC_MSI_DATA_CONFIG_VAL_32) | data->hwirq;
}
-static int brcm_msi_set_affinity(struct irq_data *irq_data,
- const struct cpumask *mask, bool force)
-{
- return -EINVAL;
-}
-
static void brcm_msi_ack_irq(struct irq_data *data)
{
struct brcm_msi *msi = irq_data_get_irq_chip_data(data);
@@ -502,7 +515,6 @@ static void brcm_msi_ack_irq(struct irq_data *data)
static struct irq_chip brcm_msi_bottom_irq_chip = {
.name = "BRCM STB MSI",
.irq_compose_msi_msg = brcm_msi_compose_msi_msg,
- .irq_set_affinity = brcm_msi_set_affinity,
.irq_ack = brcm_msi_ack_irq,
};
@@ -649,7 +661,7 @@ static int brcm_pcie_enable_msi(struct brcm_pcie *pcie)
BUILD_BUG_ON(BRCM_INT_PCI_MSI_LEGACY_NR > BRCM_INT_PCI_MSI_NR);
if (msi->legacy) {
- msi->intr_base = msi->base + PCIE_INTR2_CPU_BASE;
+ msi->intr_base = msi->base + INTR2_CPU_BASE(pcie);
msi->nr = BRCM_INT_PCI_MSI_LEGACY_NR;
msi->legacy_shift = 24;
} else {
@@ -730,17 +742,33 @@ static void __iomem *brcm7425_pcie_map_bus(struct pci_bus *bus,
return base + DATA_ADDR(pcie);
}
-static void brcm_pcie_bridge_sw_init_set_generic(struct brcm_pcie *pcie, u32 val)
+static int brcm_pcie_bridge_sw_init_set_generic(struct brcm_pcie *pcie, u32 val)
{
- u32 tmp, mask = RGR1_SW_INIT_1_INIT_GENERIC_MASK;
+ u32 tmp, mask = RGR1_SW_INIT_1_INIT_GENERIC_MASK;
u32 shift = RGR1_SW_INIT_1_INIT_GENERIC_SHIFT;
+ int ret = 0;
+
+ if (pcie->bridge_reset) {
+ if (val)
+ ret = reset_control_assert(pcie->bridge_reset);
+ else
+ ret = reset_control_deassert(pcie->bridge_reset);
+
+ if (ret)
+ dev_err(pcie->dev, "failed to %s 'bridge' reset, err=%d\n",
+ val ? "assert" : "deassert", ret);
+
+ return ret;
+ }
tmp = readl(pcie->base + PCIE_RGR1_SW_INIT_1(pcie));
tmp = (tmp & ~mask) | ((val << shift) & mask);
writel(tmp, pcie->base + PCIE_RGR1_SW_INIT_1(pcie));
+
+ return ret;
}
-static void brcm_pcie_bridge_sw_init_set_7278(struct brcm_pcie *pcie, u32 val)
+static int brcm_pcie_bridge_sw_init_set_7278(struct brcm_pcie *pcie, u32 val)
{
u32 tmp, mask = RGR1_SW_INIT_1_INIT_7278_MASK;
u32 shift = RGR1_SW_INIT_1_INIT_7278_SHIFT;
@@ -748,20 +776,29 @@ static void brcm_pcie_bridge_sw_init_set_7278(struct brcm_pcie *pcie, u32 val)
tmp = readl(pcie->base + PCIE_RGR1_SW_INIT_1(pcie));
tmp = (tmp & ~mask) | ((val << shift) & mask);
writel(tmp, pcie->base + PCIE_RGR1_SW_INIT_1(pcie));
+
+ return 0;
}
-static void brcm_pcie_perst_set_4908(struct brcm_pcie *pcie, u32 val)
+static int brcm_pcie_perst_set_4908(struct brcm_pcie *pcie, u32 val)
{
+ int ret;
+
if (WARN_ONCE(!pcie->perst_reset, "missing PERST# reset controller\n"))
- return;
+ return -EINVAL;
if (val)
- reset_control_assert(pcie->perst_reset);
+ ret = reset_control_assert(pcie->perst_reset);
else
- reset_control_deassert(pcie->perst_reset);
+ ret = reset_control_deassert(pcie->perst_reset);
+
+ if (ret)
+ dev_err(pcie->dev, "failed to %s 'perst' reset, err=%d\n",
+ val ? "assert" : "deassert", ret);
+ return ret;
}
-static void brcm_pcie_perst_set_7278(struct brcm_pcie *pcie, u32 val)
+static int brcm_pcie_perst_set_7278(struct brcm_pcie *pcie, u32 val)
{
u32 tmp;
@@ -769,34 +806,77 @@ static void brcm_pcie_perst_set_7278(struct brcm_pcie *pcie, u32 val)
tmp = readl(pcie->base + PCIE_MISC_PCIE_CTRL);
u32p_replace_bits(&tmp, !val, PCIE_MISC_PCIE_CTRL_PCIE_PERSTB_MASK);
writel(tmp, pcie->base + PCIE_MISC_PCIE_CTRL);
+
+ return 0;
}
-static void brcm_pcie_perst_set_generic(struct brcm_pcie *pcie, u32 val)
+static int brcm_pcie_perst_set_generic(struct brcm_pcie *pcie, u32 val)
{
u32 tmp;
tmp = readl(pcie->base + PCIE_RGR1_SW_INIT_1(pcie));
u32p_replace_bits(&tmp, val, PCIE_RGR1_SW_INIT_1_PERST_MASK);
writel(tmp, pcie->base + PCIE_RGR1_SW_INIT_1(pcie));
+
+ return 0;
}
-static int brcm_pcie_get_rc_bar2_size_and_offset(struct brcm_pcie *pcie,
- u64 *rc_bar2_size,
- u64 *rc_bar2_offset)
+static void add_inbound_win(struct inbound_win *b, u8 *count, u64 size,
+ u64 cpu_addr, u64 pci_offset)
+{
+ b->size = size;
+ b->cpu_addr = cpu_addr;
+ b->pci_offset = pci_offset;
+ (*count)++;
+}
+
+static int brcm_pcie_get_inbound_wins(struct brcm_pcie *pcie,
+ struct inbound_win inbound_wins[])
{
struct pci_host_bridge *bridge = pci_host_bridge_from_priv(pcie);
+ u64 pci_offset, cpu_addr, size = 0, tot_size = 0;
struct resource_entry *entry;
struct device *dev = pcie->dev;
u64 lowest_pcie_addr = ~(u64)0;
int ret, i = 0;
- u64 size = 0;
+ u8 n = 0;
+
+ /*
+ * The HW registers (and PCIe) use order-1 numbering for BARs. As such,
+ * we have inbound_wins[0] unused and BAR1 starts at inbound_wins[1].
+ */
+ struct inbound_win *b_begin = &inbound_wins[1];
+ struct inbound_win *b = b_begin;
+
+ /*
+ * STB chips beside 7712 disable the first inbound window default.
+ * Rather being mapped to system memory it is mapped to the
+ * internal registers of the SoC. This feature is deprecated, has
+ * security considerations, and is not implemented in our modern
+ * SoCs.
+ */
+ if (pcie->soc_base != BCM7712)
+ add_inbound_win(b++, &n, 0, 0, 0);
resource_list_for_each_entry(entry, &bridge->dma_ranges) {
- u64 pcie_beg = entry->res->start - entry->offset;
+ u64 pcie_start = entry->res->start - entry->offset;
+ u64 cpu_start = entry->res->start;
+
+ size = resource_size(entry->res);
+ tot_size += size;
+ if (pcie_start < lowest_pcie_addr)
+ lowest_pcie_addr = pcie_start;
+ /*
+ * 7712 and newer chips may have many BARs, with each
+ * offering a non-overlapping viewport to system memory.
+ * That being said, each BARs size must still be a power of
+ * two.
+ */
+ if (pcie->soc_base == BCM7712)
+ add_inbound_win(b++, &n, size, cpu_start, pcie_start);
- size += entry->res->end - entry->res->start + 1;
- if (pcie_beg < lowest_pcie_addr)
- lowest_pcie_addr = pcie_beg;
+ if (n > pcie->num_inbound_wins)
+ break;
}
if (lowest_pcie_addr == ~(u64)0) {
@@ -804,13 +884,20 @@ static int brcm_pcie_get_rc_bar2_size_and_offset(struct brcm_pcie *pcie,
return -EINVAL;
}
+ /*
+ * 7712 and newer chips do not have an internal memory mapping system
+ * that enables multiple memory controllers. As such, it can return
+ * now w/o doing special configuration.
+ */
+ if (pcie->soc_base == BCM7712)
+ return n;
+
ret = of_property_read_variable_u64_array(pcie->np, "brcm,scb-sizes", pcie->memc_size, 1,
PCIE_BRCM_MAX_MEMC);
-
if (ret <= 0) {
/* Make an educated guess */
pcie->num_memc = 1;
- pcie->memc_size[0] = 1ULL << fls64(size - 1);
+ pcie->memc_size[0] = 1ULL << fls64(tot_size - 1);
} else {
pcie->num_memc = ret;
}
@@ -819,10 +906,15 @@ static int brcm_pcie_get_rc_bar2_size_and_offset(struct brcm_pcie *pcie,
for (i = 0, size = 0; i < pcie->num_memc; i++)
size += pcie->memc_size[i];
- /* System memory starts at this address in PCIe-space */
- *rc_bar2_offset = lowest_pcie_addr;
- /* The sum of all memc views must also be a power of 2 */
- *rc_bar2_size = 1ULL << fls64(size - 1);
+ /* Our HW mandates that the window size must be a power of 2 */
+ size = 1ULL << fls64(size - 1);
+
+ /*
+ * For STB chips, the BAR2 cpu_addr is hardwired to the start
+ * of system memory, so we set it to 0.
+ */
+ cpu_addr = 0;
+ pci_offset = lowest_pcie_addr;
/*
* We validate the inbound memory view even though we should trust
@@ -857,44 +949,119 @@ static int brcm_pcie_get_rc_bar2_size_and_offset(struct brcm_pcie *pcie,
* outbound memory @ 3GB). So instead it will start at the 1x
* multiple of its size
*/
- if (!*rc_bar2_size || (*rc_bar2_offset & (*rc_bar2_size - 1)) ||
- (*rc_bar2_offset < SZ_4G && *rc_bar2_offset > SZ_2G)) {
- dev_err(dev, "Invalid rc_bar2_offset/size: size 0x%llx, off 0x%llx\n",
- *rc_bar2_size, *rc_bar2_offset);
+ if (!size || (pci_offset & (size - 1)) ||
+ (pci_offset < SZ_4G && pci_offset > SZ_2G)) {
+ dev_err(dev, "Invalid inbound_win2_offset/size: size 0x%llx, off 0x%llx\n",
+ size, pci_offset);
return -EINVAL;
}
- return 0;
+ /* Enable inbound window 2, the main inbound window for STB chips */
+ add_inbound_win(b++, &n, size, cpu_addr, pci_offset);
+
+ /*
+ * Disable inbound window 3. On some chips presents the same
+ * window as #2 but the data appears in a settable endianness.
+ */
+ add_inbound_win(b++, &n, 0, 0, 0);
+
+ return n;
+}
+
+static u32 brcm_bar_reg_offset(int bar)
+{
+ if (bar <= 3)
+ return PCIE_MISC_RC_BAR1_CONFIG_LO + 8 * (bar - 1);
+ else
+ return PCIE_MISC_RC_BAR4_CONFIG_LO + 8 * (bar - 4);
+}
+
+static u32 brcm_ubus_reg_offset(int bar)
+{
+ if (bar <= 3)
+ return PCIE_MISC_UBUS_BAR1_CONFIG_REMAP + 8 * (bar - 1);
+ else
+ return PCIE_MISC_UBUS_BAR4_CONFIG_REMAP + 8 * (bar - 4);
+}
+
+static void set_inbound_win_registers(struct brcm_pcie *pcie,
+ const struct inbound_win *inbound_wins,
+ u8 num_inbound_wins)
+{
+ void __iomem *base = pcie->base;
+ int i;
+
+ for (i = 1; i <= num_inbound_wins; i++) {
+ u64 pci_offset = inbound_wins[i].pci_offset;
+ u64 cpu_addr = inbound_wins[i].cpu_addr;
+ u64 size = inbound_wins[i].size;
+ u32 reg_offset = brcm_bar_reg_offset(i);
+ u32 tmp = lower_32_bits(pci_offset);
+
+ u32p_replace_bits(&tmp, brcm_pcie_encode_ibar_size(size),
+ PCIE_MISC_RC_BAR1_CONFIG_LO_SIZE_MASK);
+
+ /* Write low */
+ writel_relaxed(tmp, base + reg_offset);
+ /* Write high */
+ writel_relaxed(upper_32_bits(pci_offset), base + reg_offset + 4);
+
+ /*
+ * Most STB chips:
+ * Do nothing.
+ * 7712:
+ * All of their BARs need to be set.
+ */
+ if (pcie->soc_base == BCM7712) {
+ /* BUS remap register settings */
+ reg_offset = brcm_ubus_reg_offset(i);
+ tmp = lower_32_bits(cpu_addr) & ~0xfff;
+ tmp |= PCIE_MISC_UBUS_BAR1_CONFIG_REMAP_ACCESS_EN_MASK;
+ writel_relaxed(tmp, base + reg_offset);
+ tmp = upper_32_bits(cpu_addr);
+ writel_relaxed(tmp, base + reg_offset + 4);
+ }
+ }
}
static int brcm_pcie_setup(struct brcm_pcie *pcie)
{
- u64 rc_bar2_offset, rc_bar2_size;
+ struct inbound_win inbound_wins[PCIE_BRCM_MAX_INBOUND_WINS];
void __iomem *base = pcie->base;
struct pci_host_bridge *bridge;
struct resource_entry *entry;
u32 tmp, burst, aspm_support;
- int num_out_wins = 0;
- int ret, memc;
+ u8 num_out_wins = 0;
+ int num_inbound_wins = 0;
+ int memc, ret;
/* Reset the bridge */
- pcie->bridge_sw_init_set(pcie, 1);
+ ret = pcie->bridge_sw_init_set(pcie, 1);
+ if (ret)
+ return ret;
/* Ensure that PERST# is asserted; some bootloaders may deassert it. */
- if (pcie->type == BCM2711)
- pcie->perst_set(pcie, 1);
+ if (pcie->soc_base == BCM2711) {
+ ret = pcie->perst_set(pcie, 1);
+ if (ret) {
+ pcie->bridge_sw_init_set(pcie, 0);
+ return ret;
+ }
+ }
usleep_range(100, 200);
/* Take the bridge out of reset */
- pcie->bridge_sw_init_set(pcie, 0);
+ ret = pcie->bridge_sw_init_set(pcie, 0);
+ if (ret)
+ return ret;
- tmp = readl(base + PCIE_MISC_HARD_PCIE_HARD_DEBUG);
+ tmp = readl(base + HARD_DEBUG(pcie));
if (is_bmips(pcie))
tmp &= ~PCIE_BMIPS_MISC_HARD_PCIE_HARD_DEBUG_SERDES_IDDQ_MASK;
else
tmp &= ~PCIE_MISC_HARD_PCIE_HARD_DEBUG_SERDES_IDDQ_MASK;
- writel(tmp, base + PCIE_MISC_HARD_PCIE_HARD_DEBUG);
+ writel(tmp, base + HARD_DEBUG(pcie));
/* Wait for SerDes to be stable */
usleep_range(100, 200);
@@ -905,9 +1072,9 @@ static int brcm_pcie_setup(struct brcm_pcie *pcie)
*/
if (is_bmips(pcie))
burst = 0x1; /* 256 bytes */
- else if (pcie->type == BCM2711)
+ else if (pcie->soc_base == BCM2711)
burst = 0x0; /* 128 bytes */
- else if (pcie->type == BCM7278)
+ else if (pcie->soc_base == BCM7278)
burst = 0x3; /* 512 bytes */
else
burst = 0x2; /* 512 bytes */
@@ -924,17 +1091,16 @@ static int brcm_pcie_setup(struct brcm_pcie *pcie)
u32p_replace_bits(&tmp, 1, PCIE_MISC_MISC_CTRL_PCIE_RCB_64B_MODE_MASK);
writel(tmp, base + PCIE_MISC_MISC_CTRL);
- ret = brcm_pcie_get_rc_bar2_size_and_offset(pcie, &rc_bar2_size,
- &rc_bar2_offset);
- if (ret)
- return ret;
+ num_inbound_wins = brcm_pcie_get_inbound_wins(pcie, inbound_wins);
+ if (num_inbound_wins < 0)
+ return num_inbound_wins;
- tmp = lower_32_bits(rc_bar2_offset);
- u32p_replace_bits(&tmp, brcm_pcie_encode_ibar_size(rc_bar2_size),
- PCIE_MISC_RC_BAR2_CONFIG_LO_SIZE_MASK);
- writel(tmp, base + PCIE_MISC_RC_BAR2_CONFIG_LO);
- writel(upper_32_bits(rc_bar2_offset),
- base + PCIE_MISC_RC_BAR2_CONFIG_HI);
+ set_inbound_win_registers(pcie, inbound_wins, num_inbound_wins);
+
+ if (!brcm_pcie_rc_mode(pcie)) {
+ dev_err(pcie->dev, "PCIe RC controller misconfigured as Endpoint\n");
+ return -EINVAL;
+ }
tmp = readl(base + PCIE_MISC_MISC_CTRL);
for (memc = 0; memc < pcie->num_memc; memc++) {
@@ -956,25 +1122,12 @@ static int brcm_pcie_setup(struct brcm_pcie *pcie)
* 4GB or when the inbound area is smaller than 4GB (taking into
* account the rounding-up we're forced to perform).
*/
- if (rc_bar2_offset >= SZ_4G || (rc_bar2_size + rc_bar2_offset) < SZ_4G)
+ if (inbound_wins[2].pci_offset >= SZ_4G ||
+ (inbound_wins[2].size + inbound_wins[2].pci_offset) < SZ_4G)
pcie->msi_target_addr = BRCM_MSI_TARGET_ADDR_LT_4GB;
else
pcie->msi_target_addr = BRCM_MSI_TARGET_ADDR_GT_4GB;
- if (!brcm_pcie_rc_mode(pcie)) {
- dev_err(pcie->dev, "PCIe RC controller misconfigured as Endpoint\n");
- return -EINVAL;
- }
-
- /* disable the PCIe->GISB memory window (RC_BAR1) */
- tmp = readl(base + PCIE_MISC_RC_BAR1_CONFIG_LO);
- tmp &= ~PCIE_MISC_RC_BAR1_CONFIG_LO_SIZE_MASK;
- writel(tmp, base + PCIE_MISC_RC_BAR1_CONFIG_LO);
-
- /* disable the PCIe->SCB memory window (RC_BAR3) */
- tmp = readl(base + PCIE_MISC_RC_BAR3_CONFIG_LO);
- tmp &= ~PCIE_MISC_RC_BAR3_CONFIG_LO_SIZE_MASK;
- writel(tmp, base + PCIE_MISC_RC_BAR3_CONFIG_LO);
/* Don't advertise L0s capability if 'aspm-no-l0s' */
aspm_support = PCIE_LINK_STATE_L1;
@@ -1025,7 +1178,7 @@ static int brcm_pcie_setup(struct brcm_pcie *pcie)
num_out_wins++;
}
- /* PCIe->SCB endian mode for BAR */
+ /* PCIe->SCB endian mode for inbound window */
tmp = readl(base + PCIE_RC_CFG_VENDOR_VENDOR_SPECIFIC_REG1);
u32p_replace_bits(&tmp, PCIE_RC_CFG_VENDOR_SPCIFIC_REG1_LITTLE_ENDIAN,
PCIE_RC_CFG_VENDOR_VENDOR_SPECIFIC_REG1_ENDIAN_MODE_BAR2_MASK);
@@ -1045,6 +1198,10 @@ static void brcm_extend_rbus_timeout(struct brcm_pcie *pcie)
const unsigned int REG_OFFSET = PCIE_RGR1_SW_INIT_1(pcie) - 8;
u32 timeout_us = 4000000; /* 4 seconds, our setting for L1SS */
+ /* 7712 does not have this (RGR1) timer */
+ if (pcie->soc_base == BCM7712)
+ return;
+
/* Each unit in timeout register is 1/216,000,000 seconds */
writel(216 * timeout_us, pcie->base + REG_OFFSET);
}
@@ -1063,7 +1220,7 @@ static void brcm_config_clkreq(struct brcm_pcie *pcie)
}
/* Start out assuming safe mode (both mode bits cleared) */
- clkreq_cntl = readl(pcie->base + PCIE_MISC_HARD_PCIE_HARD_DEBUG);
+ clkreq_cntl = readl(pcie->base + HARD_DEBUG(pcie));
clkreq_cntl &= ~PCIE_CLKREQ_MASK;
if (strcmp(mode, "no-l1ss") == 0) {
@@ -1106,7 +1263,7 @@ static void brcm_config_clkreq(struct brcm_pcie *pcie)
dev_err(pcie->dev, err_msg);
mode = "safe";
}
- writel(clkreq_cntl, pcie->base + PCIE_MISC_HARD_PCIE_HARD_DEBUG);
+ writel(clkreq_cntl, pcie->base + HARD_DEBUG(pcie));
dev_info(pcie->dev, "clkreq-mode set to %s\n", mode);
}
@@ -1120,7 +1277,9 @@ static int brcm_pcie_start_link(struct brcm_pcie *pcie)
int ret, i;
/* Unassert the fundamental reset */
- pcie->perst_set(pcie, 0);
+ ret = pcie->perst_set(pcie, 0);
+ if (ret)
+ return ret;
/*
* Wait for 100ms after PERST# deassertion; see PCIe CEM specification
@@ -1304,23 +1463,25 @@ static int brcm_phy_cntl(struct brcm_pcie *pcie, const int start)
static inline int brcm_phy_start(struct brcm_pcie *pcie)
{
- return pcie->rescal ? brcm_phy_cntl(pcie, 1) : 0;
+ return pcie->has_phy ? brcm_phy_cntl(pcie, 1) : 0;
}
static inline int brcm_phy_stop(struct brcm_pcie *pcie)
{
- return pcie->rescal ? brcm_phy_cntl(pcie, 0) : 0;
+ return pcie->has_phy ? brcm_phy_cntl(pcie, 0) : 0;
}
-static void brcm_pcie_turn_off(struct brcm_pcie *pcie)
+static int brcm_pcie_turn_off(struct brcm_pcie *pcie)
{
void __iomem *base = pcie->base;
- int tmp;
+ int tmp, ret;
if (brcm_pcie_link_up(pcie))
brcm_pcie_enter_l23(pcie);
/* Assert fundamental reset */
- pcie->perst_set(pcie, 1);
+ ret = pcie->perst_set(pcie, 1);
+ if (ret)
+ return ret;
/* Deassert request for L23 in case it was asserted */
tmp = readl(base + PCIE_MISC_PCIE_CTRL);
@@ -1328,12 +1489,14 @@ static void brcm_pcie_turn_off(struct brcm_pcie *pcie)
writel(tmp, base + PCIE_MISC_PCIE_CTRL);
/* Turn off SerDes */
- tmp = readl(base + PCIE_MISC_HARD_PCIE_HARD_DEBUG);
+ tmp = readl(base + HARD_DEBUG(pcie));
u32p_replace_bits(&tmp, 1, PCIE_MISC_HARD_PCIE_HARD_DEBUG_SERDES_IDDQ_MASK);
- writel(tmp, base + PCIE_MISC_HARD_PCIE_HARD_DEBUG);
+ writel(tmp, base + HARD_DEBUG(pcie));
/* Shutdown PCIe bridge */
- pcie->bridge_sw_init_set(pcie, 1);
+ ret = pcie->bridge_sw_init_set(pcie, 1);
+
+ return ret;
}
static int pci_dev_may_wakeup(struct pci_dev *dev, void *data)
@@ -1351,9 +1514,12 @@ static int brcm_pcie_suspend_noirq(struct device *dev)
{
struct brcm_pcie *pcie = dev_get_drvdata(dev);
struct pci_host_bridge *bridge = pci_host_bridge_from_priv(pcie);
- int ret;
+ int ret, rret;
+
+ ret = brcm_pcie_turn_off(pcie);
+ if (ret)
+ return ret;
- brcm_pcie_turn_off(pcie);
/*
* If brcm_phy_stop() returns an error, just dev_err(). If we
* return the error it will cause the suspend to fail and this is a
@@ -1382,7 +1548,10 @@ static int brcm_pcie_suspend_noirq(struct device *dev)
pcie->sr->supplies);
if (ret) {
dev_err(dev, "Could not turn off regulators\n");
- reset_control_reset(pcie->rescal);
+ rret = reset_control_reset(pcie->rescal);
+ if (rret)
+ dev_err(dev, "failed to reset 'rascal' controller ret=%d\n",
+ rret);
return ret;
}
}
@@ -1397,7 +1566,7 @@ static int brcm_pcie_resume_noirq(struct device *dev)
struct brcm_pcie *pcie = dev_get_drvdata(dev);
void __iomem *base;
u32 tmp;
- int ret;
+ int ret, rret;
base = pcie->base;
ret = clk_prepare_enable(pcie->clk);
@@ -1416,9 +1585,9 @@ static int brcm_pcie_resume_noirq(struct device *dev)
pcie->bridge_sw_init_set(pcie, 0);
/* SERDES_IDDQ = 0 */
- tmp = readl(base + PCIE_MISC_HARD_PCIE_HARD_DEBUG);
+ tmp = readl(base + HARD_DEBUG(pcie));
u32p_replace_bits(&tmp, 0, PCIE_MISC_HARD_PCIE_HARD_DEBUG_SERDES_IDDQ_MASK);
- writel(tmp, base + PCIE_MISC_HARD_PCIE_HARD_DEBUG);
+ writel(tmp, base + HARD_DEBUG(pcie));
/* wait for serdes to be stable */
udelay(100);
@@ -1459,7 +1628,9 @@ err_regulator:
if (pcie->sr)
regulator_bulk_disable(pcie->sr->num_supplies, pcie->sr->supplies);
err_reset:
- reset_control_rearm(pcie->rescal);
+ rret = reset_control_rearm(pcie->rescal);
+ if (rret)
+ dev_err(pcie->dev, "failed to rearm 'rescal' reset, err=%d\n", rret);
err_disable_clk:
clk_disable_unprepare(pcie->clk);
return ret;
@@ -1487,74 +1658,111 @@ static void brcm_pcie_remove(struct platform_device *pdev)
}
static const int pcie_offsets[] = {
- [RGR1_SW_INIT_1] = 0x9210,
- [EXT_CFG_INDEX] = 0x9000,
- [EXT_CFG_DATA] = 0x9004,
+ [RGR1_SW_INIT_1] = 0x9210,
+ [EXT_CFG_INDEX] = 0x9000,
+ [EXT_CFG_DATA] = 0x9004,
+ [PCIE_HARD_DEBUG] = 0x4204,
+ [PCIE_INTR2_CPU_BASE] = 0x4300,
};
-static const int pcie_offsets_bmips_7425[] = {
- [RGR1_SW_INIT_1] = 0x8010,
- [EXT_CFG_INDEX] = 0x8300,
- [EXT_CFG_DATA] = 0x8304,
+static const int pcie_offsets_bcm7278[] = {
+ [RGR1_SW_INIT_1] = 0xc010,
+ [EXT_CFG_INDEX] = 0x9000,
+ [EXT_CFG_DATA] = 0x9004,
+ [PCIE_HARD_DEBUG] = 0x4204,
+ [PCIE_INTR2_CPU_BASE] = 0x4300,
};
-static const struct pcie_cfg_data generic_cfg = {
- .offsets = pcie_offsets,
- .type = GENERIC,
- .perst_set = brcm_pcie_perst_set_generic,
- .bridge_sw_init_set = brcm_pcie_bridge_sw_init_set_generic,
+static const int pcie_offsets_bcm7425[] = {
+ [RGR1_SW_INIT_1] = 0x8010,
+ [EXT_CFG_INDEX] = 0x8300,
+ [EXT_CFG_DATA] = 0x8304,
+ [PCIE_HARD_DEBUG] = 0x4204,
+ [PCIE_INTR2_CPU_BASE] = 0x4300,
};
-static const struct pcie_cfg_data bcm7425_cfg = {
- .offsets = pcie_offsets_bmips_7425,
- .type = BCM7425,
+static const int pcie_offsets_bcm7712[] = {
+ [EXT_CFG_INDEX] = 0x9000,
+ [EXT_CFG_DATA] = 0x9004,
+ [PCIE_HARD_DEBUG] = 0x4304,
+ [PCIE_INTR2_CPU_BASE] = 0x4400,
+};
+
+static const struct pcie_cfg_data generic_cfg = {
+ .offsets = pcie_offsets,
+ .soc_base = GENERIC,
.perst_set = brcm_pcie_perst_set_generic,
.bridge_sw_init_set = brcm_pcie_bridge_sw_init_set_generic,
+ .num_inbound_wins = 3,
};
-static const struct pcie_cfg_data bcm7435_cfg = {
+static const struct pcie_cfg_data bcm2711_cfg = {
.offsets = pcie_offsets,
- .type = BCM7435,
+ .soc_base = BCM2711,
.perst_set = brcm_pcie_perst_set_generic,
.bridge_sw_init_set = brcm_pcie_bridge_sw_init_set_generic,
+ .num_inbound_wins = 3,
};
static const struct pcie_cfg_data bcm4908_cfg = {
.offsets = pcie_offsets,
- .type = BCM4908,
+ .soc_base = BCM4908,
.perst_set = brcm_pcie_perst_set_4908,
.bridge_sw_init_set = brcm_pcie_bridge_sw_init_set_generic,
-};
-
-static const int pcie_offset_bcm7278[] = {
- [RGR1_SW_INIT_1] = 0xc010,
- [EXT_CFG_INDEX] = 0x9000,
- [EXT_CFG_DATA] = 0x9004,
+ .num_inbound_wins = 3,
};
static const struct pcie_cfg_data bcm7278_cfg = {
- .offsets = pcie_offset_bcm7278,
- .type = BCM7278,
+ .offsets = pcie_offsets_bcm7278,
+ .soc_base = BCM7278,
.perst_set = brcm_pcie_perst_set_7278,
.bridge_sw_init_set = brcm_pcie_bridge_sw_init_set_7278,
+ .num_inbound_wins = 3,
};
-static const struct pcie_cfg_data bcm2711_cfg = {
+static const struct pcie_cfg_data bcm7425_cfg = {
+ .offsets = pcie_offsets_bcm7425,
+ .soc_base = BCM7425,
+ .perst_set = brcm_pcie_perst_set_generic,
+ .bridge_sw_init_set = brcm_pcie_bridge_sw_init_set_generic,
+ .num_inbound_wins = 3,
+};
+
+static const struct pcie_cfg_data bcm7435_cfg = {
.offsets = pcie_offsets,
- .type = BCM2711,
+ .soc_base = BCM7435,
.perst_set = brcm_pcie_perst_set_generic,
.bridge_sw_init_set = brcm_pcie_bridge_sw_init_set_generic,
+ .num_inbound_wins = 3,
+};
+
+static const struct pcie_cfg_data bcm7216_cfg = {
+ .offsets = pcie_offsets_bcm7278,
+ .soc_base = BCM7278,
+ .perst_set = brcm_pcie_perst_set_7278,
+ .bridge_sw_init_set = brcm_pcie_bridge_sw_init_set_7278,
+ .has_phy = true,
+ .num_inbound_wins = 3,
+};
+
+static const struct pcie_cfg_data bcm7712_cfg = {
+ .offsets = pcie_offsets_bcm7712,
+ .perst_set = brcm_pcie_perst_set_7278,
+ .bridge_sw_init_set = brcm_pcie_bridge_sw_init_set_generic,
+ .soc_base = BCM7712,
+ .num_inbound_wins = 10,
};
static const struct of_device_id brcm_pcie_match[] = {
{ .compatible = "brcm,bcm2711-pcie", .data = &bcm2711_cfg },
{ .compatible = "brcm,bcm4908-pcie", .data = &bcm4908_cfg },
{ .compatible = "brcm,bcm7211-pcie", .data = &generic_cfg },
+ { .compatible = "brcm,bcm7216-pcie", .data = &bcm7216_cfg },
{ .compatible = "brcm,bcm7278-pcie", .data = &bcm7278_cfg },
- { .compatible = "brcm,bcm7216-pcie", .data = &bcm7278_cfg },
- { .compatible = "brcm,bcm7445-pcie", .data = &generic_cfg },
- { .compatible = "brcm,bcm7435-pcie", .data = &bcm7435_cfg },
{ .compatible = "brcm,bcm7425-pcie", .data = &bcm7425_cfg },
+ { .compatible = "brcm,bcm7435-pcie", .data = &bcm7435_cfg },
+ { .compatible = "brcm,bcm7445-pcie", .data = &generic_cfg },
+ { .compatible = "brcm,bcm7712-pcie", .data = &bcm7712_cfg },
{},
};
@@ -1596,9 +1804,11 @@ static int brcm_pcie_probe(struct platform_device *pdev)
pcie->dev = &pdev->dev;
pcie->np = np;
pcie->reg_offsets = data->offsets;
- pcie->type = data->type;
+ pcie->soc_base = data->soc_base;
pcie->perst_set = data->perst_set;
pcie->bridge_sw_init_set = data->bridge_sw_init_set;
+ pcie->has_phy = data->has_phy;
+ pcie->num_inbound_wins = data->num_inbound_wins;
pcie->base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(pcie->base))
@@ -1613,25 +1823,52 @@ static int brcm_pcie_probe(struct platform_device *pdev)
pcie->ssc = of_property_read_bool(np, "brcm,enable-ssc");
- ret = clk_prepare_enable(pcie->clk);
- if (ret) {
- dev_err(&pdev->dev, "could not enable clock\n");
- return ret;
- }
pcie->rescal = devm_reset_control_get_optional_shared(&pdev->dev, "rescal");
- if (IS_ERR(pcie->rescal)) {
- clk_disable_unprepare(pcie->clk);
+ if (IS_ERR(pcie->rescal))
return PTR_ERR(pcie->rescal);
- }
+
pcie->perst_reset = devm_reset_control_get_optional_exclusive(&pdev->dev, "perst");
- if (IS_ERR(pcie->perst_reset)) {
- clk_disable_unprepare(pcie->clk);
+ if (IS_ERR(pcie->perst_reset))
return PTR_ERR(pcie->perst_reset);
+
+ pcie->bridge_reset = devm_reset_control_get_optional_exclusive(&pdev->dev, "bridge");
+ if (IS_ERR(pcie->bridge_reset))
+ return PTR_ERR(pcie->bridge_reset);
+
+ pcie->swinit_reset = devm_reset_control_get_optional_exclusive(&pdev->dev, "swinit");
+ if (IS_ERR(pcie->swinit_reset))
+ return PTR_ERR(pcie->swinit_reset);
+
+ ret = clk_prepare_enable(pcie->clk);
+ if (ret)
+ return dev_err_probe(&pdev->dev, ret, "could not enable clock\n");
+
+ pcie->bridge_sw_init_set(pcie, 0);
+
+ if (pcie->swinit_reset) {
+ ret = reset_control_assert(pcie->swinit_reset);
+ if (ret) {
+ clk_disable_unprepare(pcie->clk);
+ return dev_err_probe(&pdev->dev, ret,
+ "could not assert reset 'swinit'\n");
+ }
+
+ /* HW team recommends 1us for proper sync and propagation of reset */
+ udelay(1);
+
+ ret = reset_control_deassert(pcie->swinit_reset);
+ if (ret) {
+ clk_disable_unprepare(pcie->clk);
+ return dev_err_probe(&pdev->dev, ret,
+ "could not de-assert reset 'swinit'\n");
+ }
}
ret = reset_control_reset(pcie->rescal);
- if (ret)
- dev_err(&pdev->dev, "failed to deassert 'rescal'\n");
+ if (ret) {
+ clk_disable_unprepare(pcie->clk);
+ return dev_err_probe(&pdev->dev, ret, "failed to deassert 'rescal'\n");
+ }
ret = brcm_phy_start(pcie);
if (ret) {
@@ -1645,7 +1882,7 @@ static int brcm_pcie_probe(struct platform_device *pdev)
goto fail;
pcie->hw_rev = readl(pcie->base + PCIE_MISC_REVISION);
- if (pcie->type == BCM4908 && pcie->hw_rev >= BRCM_PCIE_HW_REV_3_20) {
+ if (pcie->soc_base == BCM4908 && pcie->hw_rev >= BRCM_PCIE_HW_REV_3_20) {
dev_err(pcie->dev, "hardware revision with unsupported PERST# setup\n");
ret = -ENODEV;
goto fail;
@@ -1660,7 +1897,7 @@ static int brcm_pcie_probe(struct platform_device *pdev)
}
}
- bridge->ops = pcie->type == BCM7425 ? &brcm7425_pcie_ops : &brcm_pcie_ops;
+ bridge->ops = pcie->soc_base == BCM7425 ? &brcm7425_pcie_ops : &brcm_pcie_ops;
bridge->sysdata = pcie;
platform_set_drvdata(pdev, pcie);
@@ -1678,6 +1915,7 @@ static int brcm_pcie_probe(struct platform_device *pdev)
fail:
__brcm_pcie_remove(pcie);
+
return ret;
}
diff --git a/drivers/pci/controller/pcie-iproc.c b/drivers/pci/controller/pcie-iproc.c
index 97f739a2c9f8..22134e95574b 100644
--- a/drivers/pci/controller/pcie-iproc.c
+++ b/drivers/pci/controller/pcie-iproc.c
@@ -54,7 +54,7 @@
#define CFG_RD_SUCCESS 0
#define CFG_RD_UR 1
-#define CFG_RD_CRS 2
+#define CFG_RD_RRS 2
#define CFG_RD_CA 3
#define CFG_RETRY_STATUS 0xffff0001
#define CFG_RETRY_STATUS_TIMEOUT_US 500000 /* 500 milliseconds */
@@ -485,31 +485,31 @@ static unsigned int iproc_pcie_cfg_retry(struct iproc_pcie *pcie,
u32 status;
/*
- * As per PCIe spec r3.1, sec 2.3.2, CRS Software Visibility only
+ * As per PCIe r6.0, sec 2.3.2, Config RRS Software Visibility only
* affects config reads of the Vendor ID. For config writes or any
* other config reads, the Root may automatically reissue the
* configuration request again as a new request.
*
* For config reads, this hardware returns CFG_RETRY_STATUS data
- * when it receives a CRS completion, regardless of the address of
- * the read or the CRS Software Visibility Enable bit. As a
+ * when it receives a RRS completion, regardless of the address of
+ * the read or the RRS Software Visibility Enable bit. As a
* partial workaround for this, we retry in software any read that
* returns CFG_RETRY_STATUS.
*
* Note that a non-Vendor ID config register may have a value of
* CFG_RETRY_STATUS. If we read that, we can't distinguish it from
- * a CRS completion, so we will incorrectly retry the read and
+ * a RRS completion, so we will incorrectly retry the read and
* eventually return the wrong data (0xffffffff).
*/
data = readl(cfg_data_p);
while (data == CFG_RETRY_STATUS && timeout--) {
/*
- * CRS state is set in CFG_RD status register
+ * RRS state is set in CFG_RD status register
* This will handle the case where CFG_RETRY_STATUS is
* valid config data.
*/
status = iproc_pcie_read_reg(pcie, IPROC_PCIE_CFG_RD_STATUS);
- if (status != CFG_RD_CRS)
+ if (status != CFG_RD_RRS)
return data;
udelay(1);
@@ -556,8 +556,8 @@ static void iproc_pcie_fix_cap(struct iproc_pcie *pcie, int where, u32 *val)
break;
case IPROC_PCI_EXP_CAP + PCI_EXP_RTCTL:
- /* Don't advertise CRS SV support */
- *val &= ~(PCI_EXP_RTCAP_CRSVIS << 16);
+ /* Don't advertise RRS SV support */
+ *val &= ~(PCI_EXP_RTCAP_RRS_SV << 16);
break;
default:
diff --git a/drivers/pci/controller/pcie-mediatek-gen3.c b/drivers/pci/controller/pcie-mediatek-gen3.c
index b7e8e24f6a40..66ce4b5d309b 100644
--- a/drivers/pci/controller/pcie-mediatek-gen3.c
+++ b/drivers/pci/controller/pcie-mediatek-gen3.c
@@ -6,7 +6,9 @@
* Author: Jianjun Wang <jianjun.wang@mediatek.com>
*/
+#include <linux/bitfield.h>
#include <linux/clk.h>
+#include <linux/clk-provider.h>
#include <linux/delay.h>
#include <linux/iopoll.h>
#include <linux/irq.h>
@@ -15,6 +17,8 @@
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/msi.h>
+#include <linux/of_device.h>
+#include <linux/of_pci.h>
#include <linux/pci.h>
#include <linux/phy/phy.h>
#include <linux/platform_device.h>
@@ -29,6 +33,12 @@
#define PCI_CLASS(class) (class << 8)
#define PCIE_RC_MODE BIT(0)
+#define PCIE_EQ_PRESET_01_REG 0x100
+#define PCIE_VAL_LN0_DOWNSTREAM GENMASK(6, 0)
+#define PCIE_VAL_LN0_UPSTREAM GENMASK(14, 8)
+#define PCIE_VAL_LN1_DOWNSTREAM GENMASK(22, 16)
+#define PCIE_VAL_LN1_UPSTREAM GENMASK(30, 24)
+
#define PCIE_CFGNUM_REG 0x140
#define PCIE_CFG_DEVFN(devfn) ((devfn) & GENMASK(7, 0))
#define PCIE_CFG_BUS(bus) (((bus) << 8) & GENMASK(15, 8))
@@ -68,6 +78,14 @@
#define PCIE_MSI_SET_ENABLE_REG 0x190
#define PCIE_MSI_SET_ENABLE GENMASK(PCIE_MSI_SET_NUM - 1, 0)
+#define PCIE_PIPE4_PIE8_REG 0x338
+#define PCIE_K_FINETUNE_MAX GENMASK(5, 0)
+#define PCIE_K_FINETUNE_ERR GENMASK(7, 6)
+#define PCIE_K_PRESET_TO_USE GENMASK(18, 8)
+#define PCIE_K_PHYPARAM_QUERY BIT(19)
+#define PCIE_K_QUERY_TIMEOUT BIT(20)
+#define PCIE_K_PRESET_TO_USE_16G GENMASK(31, 21)
+
#define PCIE_MSI_SET_BASE_REG 0xc00
#define PCIE_MSI_SET_OFFSET 0x10
#define PCIE_MSI_SET_STATUS_OFFSET 0x04
@@ -100,6 +118,26 @@
#define PCIE_ATR_TLP_TYPE_MEM PCIE_ATR_TLP_TYPE(0)
#define PCIE_ATR_TLP_TYPE_IO PCIE_ATR_TLP_TYPE(2)
+#define MAX_NUM_PHY_RESETS 3
+
+/* Time in ms needed to complete PCIe reset on EN7581 SoC */
+#define PCIE_EN7581_RESET_TIME_MS 100
+
+struct mtk_gen3_pcie;
+
+/**
+ * struct mtk_gen3_pcie_pdata - differentiate between host generations
+ * @power_up: pcie power_up callback
+ * @phy_resets: phy reset lines SoC data.
+ */
+struct mtk_gen3_pcie_pdata {
+ int (*power_up)(struct mtk_gen3_pcie *pcie);
+ struct {
+ const char *id[MAX_NUM_PHY_RESETS];
+ int num_resets;
+ } phy_resets;
+};
+
/**
* struct mtk_msi_set - MSI information for each set
* @base: IO mapped register base
@@ -118,7 +156,7 @@ struct mtk_msi_set {
* @base: IO mapped register base
* @reg_base: physical register base
* @mac_reset: MAC reset control
- * @phy_reset: PHY reset control
+ * @phy_resets: PHY reset controllers
* @phy: PHY controller block
* @clks: PCIe clocks
* @num_clks: PCIe clocks count for this port
@@ -131,13 +169,14 @@ struct mtk_msi_set {
* @msi_sets: MSI sets information
* @lock: lock protecting IRQ bit map
* @msi_irq_in_use: bit map for assigned MSI IRQ
+ * @soc: pointer to SoC-dependent operations
*/
struct mtk_gen3_pcie {
struct device *dev;
void __iomem *base;
phys_addr_t reg_base;
struct reset_control *mac_reset;
- struct reset_control *phy_reset;
+ struct reset_control_bulk_data phy_resets[MAX_NUM_PHY_RESETS];
struct phy *phy;
struct clk_bulk_data *clks;
int num_clks;
@@ -151,6 +190,8 @@ struct mtk_gen3_pcie {
struct mtk_msi_set msi_sets[PCIE_MSI_SET_NUM];
struct mutex lock;
DECLARE_BITMAP(msi_irq_in_use, PCIE_MSI_IRQS_NUM);
+
+ const struct mtk_gen3_pcie_pdata *soc;
};
/* LTSSM state in PCIE_LTSSM_STATUS_REG bit[28:24] */
@@ -424,12 +465,6 @@ static int mtk_pcie_startup_port(struct mtk_gen3_pcie *pcie)
return 0;
}
-static int mtk_pcie_set_affinity(struct irq_data *data,
- const struct cpumask *mask, bool force)
-{
- return -EINVAL;
-}
-
static void mtk_pcie_msi_irq_mask(struct irq_data *data)
{
pci_msi_mask_irq(data);
@@ -450,8 +485,9 @@ static struct irq_chip mtk_msi_irq_chip = {
};
static struct msi_domain_info mtk_msi_domain_info = {
- .flags = (MSI_FLAG_USE_DEF_DOM_OPS | MSI_FLAG_USE_DEF_CHIP_OPS |
- MSI_FLAG_PCI_MSIX | MSI_FLAG_MULTI_PCI_MSI),
+ .flags = MSI_FLAG_USE_DEF_DOM_OPS | MSI_FLAG_USE_DEF_CHIP_OPS |
+ MSI_FLAG_NO_AFFINITY | MSI_FLAG_PCI_MSIX |
+ MSI_FLAG_MULTI_PCI_MSI,
.chip = &mtk_msi_irq_chip,
};
@@ -517,7 +553,6 @@ static struct irq_chip mtk_msi_bottom_irq_chip = {
.irq_mask = mtk_msi_bottom_irq_mask,
.irq_unmask = mtk_msi_bottom_irq_unmask,
.irq_compose_msi_msg = mtk_compose_msi_msg,
- .irq_set_affinity = mtk_pcie_set_affinity,
.name = "MSI",
};
@@ -618,7 +653,6 @@ static struct irq_chip mtk_intx_irq_chip = {
.irq_mask = mtk_intx_mask,
.irq_unmask = mtk_intx_unmask,
.irq_eoi = mtk_intx_eoi,
- .irq_set_affinity = mtk_pcie_set_affinity,
.name = "INTx",
};
@@ -775,10 +809,10 @@ static int mtk_pcie_setup_irq(struct mtk_gen3_pcie *pcie)
static int mtk_pcie_parse_port(struct mtk_gen3_pcie *pcie)
{
+ int i, ret, num_resets = pcie->soc->phy_resets.num_resets;
struct device *dev = pcie->dev;
struct platform_device *pdev = to_platform_device(dev);
struct resource *regs;
- int ret;
regs = platform_get_resource_byname(pdev, IORESOURCE_MEM, "pcie-mac");
if (!regs)
@@ -791,12 +825,12 @@ static int mtk_pcie_parse_port(struct mtk_gen3_pcie *pcie)
pcie->reg_base = regs->start;
- pcie->phy_reset = devm_reset_control_get_optional_exclusive(dev, "phy");
- if (IS_ERR(pcie->phy_reset)) {
- ret = PTR_ERR(pcie->phy_reset);
- if (ret != -EPROBE_DEFER)
- dev_err(dev, "failed to get PHY reset\n");
+ for (i = 0; i < num_resets; i++)
+ pcie->phy_resets[i].id = pcie->soc->phy_resets.id[i];
+ ret = devm_reset_control_bulk_get_optional_shared(dev, num_resets, pcie->phy_resets);
+ if (ret) {
+ dev_err(dev, "failed to get PHY bulk reset\n");
return ret;
}
@@ -827,13 +861,96 @@ static int mtk_pcie_parse_port(struct mtk_gen3_pcie *pcie)
return 0;
}
+static int mtk_pcie_en7581_power_up(struct mtk_gen3_pcie *pcie)
+{
+ struct device *dev = pcie->dev;
+ int err;
+ u32 val;
+
+ /*
+ * Wait for the time needed to complete the bulk assert in
+ * mtk_pcie_setup for EN7581 SoC.
+ */
+ mdelay(PCIE_EN7581_RESET_TIME_MS);
+
+ err = phy_init(pcie->phy);
+ if (err) {
+ dev_err(dev, "failed to initialize PHY\n");
+ return err;
+ }
+
+ err = phy_power_on(pcie->phy);
+ if (err) {
+ dev_err(dev, "failed to power on PHY\n");
+ goto err_phy_on;
+ }
+
+ err = reset_control_bulk_deassert(pcie->soc->phy_resets.num_resets, pcie->phy_resets);
+ if (err) {
+ dev_err(dev, "failed to deassert PHYs\n");
+ goto err_phy_deassert;
+ }
+
+ /*
+ * Wait for the time needed to complete the bulk de-assert above.
+ * This time is specific for EN7581 SoC.
+ */
+ mdelay(PCIE_EN7581_RESET_TIME_MS);
+
+ pm_runtime_enable(dev);
+ pm_runtime_get_sync(dev);
+
+ err = clk_bulk_prepare(pcie->num_clks, pcie->clks);
+ if (err) {
+ dev_err(dev, "failed to prepare clock\n");
+ goto err_clk_prepare;
+ }
+
+ val = FIELD_PREP(PCIE_VAL_LN0_DOWNSTREAM, 0x47) |
+ FIELD_PREP(PCIE_VAL_LN1_DOWNSTREAM, 0x47) |
+ FIELD_PREP(PCIE_VAL_LN0_UPSTREAM, 0x41) |
+ FIELD_PREP(PCIE_VAL_LN1_UPSTREAM, 0x41);
+ writel_relaxed(val, pcie->base + PCIE_EQ_PRESET_01_REG);
+
+ val = PCIE_K_PHYPARAM_QUERY | PCIE_K_QUERY_TIMEOUT |
+ FIELD_PREP(PCIE_K_PRESET_TO_USE_16G, 0x80) |
+ FIELD_PREP(PCIE_K_PRESET_TO_USE, 0x2) |
+ FIELD_PREP(PCIE_K_FINETUNE_MAX, 0xf);
+ writel_relaxed(val, pcie->base + PCIE_PIPE4_PIE8_REG);
+
+ err = clk_bulk_enable(pcie->num_clks, pcie->clks);
+ if (err) {
+ dev_err(dev, "failed to prepare clock\n");
+ goto err_clk_enable;
+ }
+
+ return 0;
+
+err_clk_enable:
+ clk_bulk_unprepare(pcie->num_clks, pcie->clks);
+err_clk_prepare:
+ pm_runtime_put_sync(dev);
+ pm_runtime_disable(dev);
+ reset_control_bulk_assert(pcie->soc->phy_resets.num_resets, pcie->phy_resets);
+err_phy_deassert:
+ phy_power_off(pcie->phy);
+err_phy_on:
+ phy_exit(pcie->phy);
+
+ return err;
+}
+
static int mtk_pcie_power_up(struct mtk_gen3_pcie *pcie)
{
struct device *dev = pcie->dev;
int err;
/* PHY power on and enable pipe clock */
- reset_control_deassert(pcie->phy_reset);
+ err = reset_control_bulk_deassert(pcie->soc->phy_resets.num_resets, pcie->phy_resets);
+ if (err) {
+ dev_err(dev, "failed to deassert PHYs\n");
+ return err;
+ }
err = phy_init(pcie->phy);
if (err) {
@@ -869,7 +986,7 @@ err_clk_init:
err_phy_on:
phy_exit(pcie->phy);
err_phy_init:
- reset_control_assert(pcie->phy_reset);
+ reset_control_bulk_assert(pcie->soc->phy_resets.num_resets, pcie->phy_resets);
return err;
}
@@ -884,7 +1001,7 @@ static void mtk_pcie_power_down(struct mtk_gen3_pcie *pcie)
phy_power_off(pcie->phy);
phy_exit(pcie->phy);
- reset_control_assert(pcie->phy_reset);
+ reset_control_bulk_assert(pcie->soc->phy_resets.num_resets, pcie->phy_resets);
}
static int mtk_pcie_setup(struct mtk_gen3_pcie *pcie)
@@ -896,15 +1013,21 @@ static int mtk_pcie_setup(struct mtk_gen3_pcie *pcie)
return err;
/*
+ * Deassert the line in order to avoid unbalance in deassert_count
+ * counter since the bulk is shared.
+ */
+ reset_control_bulk_deassert(pcie->soc->phy_resets.num_resets, pcie->phy_resets);
+ /*
* The controller may have been left out of reset by the bootloader
* so make sure that we get a clean start by asserting resets here.
*/
- reset_control_assert(pcie->phy_reset);
+ reset_control_bulk_assert(pcie->soc->phy_resets.num_resets, pcie->phy_resets);
+
reset_control_assert(pcie->mac_reset);
usleep_range(10, 20);
/* Don't touch the hardware registers before power up */
- err = mtk_pcie_power_up(pcie);
+ err = pcie->soc->power_up(pcie);
if (err)
return err;
@@ -939,6 +1062,7 @@ static int mtk_pcie_probe(struct platform_device *pdev)
pcie = pci_host_bridge_priv(host);
pcie->dev = dev;
+ pcie->soc = device_get_match_data(dev);
platform_set_drvdata(pdev, pcie);
err = mtk_pcie_setup(pcie);
@@ -1054,7 +1178,7 @@ static int mtk_pcie_resume_noirq(struct device *dev)
struct mtk_gen3_pcie *pcie = dev_get_drvdata(dev);
int err;
- err = mtk_pcie_power_up(pcie);
+ err = pcie->soc->power_up(pcie);
if (err)
return err;
@@ -1074,8 +1198,27 @@ static const struct dev_pm_ops mtk_pcie_pm_ops = {
mtk_pcie_resume_noirq)
};
+static const struct mtk_gen3_pcie_pdata mtk_pcie_soc_mt8192 = {
+ .power_up = mtk_pcie_power_up,
+ .phy_resets = {
+ .id[0] = "phy",
+ .num_resets = 1,
+ },
+};
+
+static const struct mtk_gen3_pcie_pdata mtk_pcie_soc_en7581 = {
+ .power_up = mtk_pcie_en7581_power_up,
+ .phy_resets = {
+ .id[0] = "phy-lane0",
+ .id[1] = "phy-lane1",
+ .id[2] = "phy-lane2",
+ .num_resets = 3,
+ },
+};
+
static const struct of_device_id mtk_pcie_of_match[] = {
- { .compatible = "mediatek,mt8192-pcie" },
+ { .compatible = "airoha,en7581-pcie", .data = &mtk_pcie_soc_en7581 },
+ { .compatible = "mediatek,mt8192-pcie", .data = &mtk_pcie_soc_mt8192 },
{},
};
MODULE_DEVICE_TABLE(of, mtk_pcie_of_match);
diff --git a/drivers/pci/controller/pcie-mediatek.c b/drivers/pci/controller/pcie-mediatek.c
index 7fc0d7709b7f..7f7d04c2ea57 100644
--- a/drivers/pci/controller/pcie-mediatek.c
+++ b/drivers/pci/controller/pcie-mediatek.c
@@ -211,7 +211,6 @@ struct mtk_pcie_port {
* @base: IO mapped register base
* @cfg: IO mapped register map for PCIe config
* @free_ck: free-run reference clock
- * @mem: non-prefetchable memory resource
* @ports: pointer to PCIe port information
* @soc: pointer to SoC-dependent operations
*/
@@ -407,12 +406,6 @@ static void mtk_compose_msi_msg(struct irq_data *data, struct msi_msg *msg)
(int)data->hwirq, msg->address_hi, msg->address_lo);
}
-static int mtk_msi_set_affinity(struct irq_data *irq_data,
- const struct cpumask *mask, bool force)
-{
- return -EINVAL;
-}
-
static void mtk_msi_ack_irq(struct irq_data *data)
{
struct mtk_pcie_port *port = irq_data_get_irq_chip_data(data);
@@ -424,7 +417,6 @@ static void mtk_msi_ack_irq(struct irq_data *data)
static struct irq_chip mtk_msi_bottom_irq_chip = {
.name = "MTK MSI",
.irq_compose_msi_msg = mtk_compose_msi_msg,
- .irq_set_affinity = mtk_msi_set_affinity,
.irq_ack = mtk_msi_ack_irq,
};
@@ -486,8 +478,8 @@ static struct irq_chip mtk_msi_irq_chip = {
};
static struct msi_domain_info mtk_msi_domain_info = {
- .flags = (MSI_FLAG_USE_DEF_DOM_OPS | MSI_FLAG_USE_DEF_CHIP_OPS |
- MSI_FLAG_PCI_MSIX),
+ .flags = MSI_FLAG_USE_DEF_DOM_OPS | MSI_FLAG_USE_DEF_CHIP_OPS |
+ MSI_FLAG_NO_AFFINITY | MSI_FLAG_PCI_MSIX,
.chip = &mtk_msi_irq_chip,
};
diff --git a/drivers/pci/controller/pcie-rcar-host.c b/drivers/pci/controller/pcie-rcar-host.c
index c01efc6ea64f..3dd653f3d784 100644
--- a/drivers/pci/controller/pcie-rcar-host.c
+++ b/drivers/pci/controller/pcie-rcar-host.c
@@ -658,11 +658,6 @@ static void rcar_msi_irq_unmask(struct irq_data *d)
spin_unlock_irqrestore(&msi->mask_lock, flags);
}
-static int rcar_msi_set_affinity(struct irq_data *d, const struct cpumask *mask, bool force)
-{
- return -EINVAL;
-}
-
static void rcar_compose_msi_msg(struct irq_data *data, struct msi_msg *msg)
{
struct rcar_msi *msi = irq_data_get_irq_chip_data(data);
@@ -678,7 +673,6 @@ static struct irq_chip rcar_msi_bottom_chip = {
.irq_ack = rcar_msi_irq_ack,
.irq_mask = rcar_msi_irq_mask,
.irq_unmask = rcar_msi_irq_unmask,
- .irq_set_affinity = rcar_msi_set_affinity,
.irq_compose_msi_msg = rcar_compose_msi_msg,
};
@@ -725,8 +719,8 @@ static const struct irq_domain_ops rcar_msi_domain_ops = {
};
static struct msi_domain_info rcar_msi_info = {
- .flags = (MSI_FLAG_USE_DEF_DOM_OPS | MSI_FLAG_USE_DEF_CHIP_OPS |
- MSI_FLAG_MULTI_PCI_MSI),
+ .flags = MSI_FLAG_USE_DEF_DOM_OPS | MSI_FLAG_USE_DEF_CHIP_OPS |
+ MSI_FLAG_NO_AFFINITY | MSI_FLAG_MULTI_PCI_MSI,
.chip = &rcar_msi_top_chip,
};
diff --git a/drivers/pci/controller/pcie-xilinx-dma-pl.c b/drivers/pci/controller/pcie-xilinx-dma-pl.c
index 5be5dfd8398f..dd117f07fc95 100644
--- a/drivers/pci/controller/pcie-xilinx-dma-pl.c
+++ b/drivers/pci/controller/pcie-xilinx-dma-pl.c
@@ -71,10 +71,24 @@
/* Phy Status/Control Register definitions */
#define XILINX_PCIE_DMA_REG_PSCR_LNKUP BIT(11)
+#define QDMA_BRIDGE_BASE_OFF 0xcd8
/* Number of MSI IRQs */
#define XILINX_NUM_MSI_IRQS 64
+enum xilinx_pl_dma_version {
+ XDMA,
+ QDMA,
+};
+
+/**
+ * struct xilinx_pl_dma_variant - PL DMA PCIe variant information
+ * @version: DMA version
+ */
+struct xilinx_pl_dma_variant {
+ enum xilinx_pl_dma_version version;
+};
+
struct xilinx_msi {
struct irq_domain *msi_domain;
unsigned long *bitmap;
@@ -88,6 +102,7 @@ struct xilinx_msi {
* struct pl_dma_pcie - PCIe port information
* @dev: Device pointer
* @reg_base: IO Mapped Register Base
+ * @cfg_base: IO Mapped Configuration Base
* @irq: Interrupt number
* @cfg: Holds mappings of config space window
* @phys_reg_base: Physical address of reg base
@@ -97,10 +112,12 @@ struct xilinx_msi {
* @msi: MSI information
* @intx_irq: INTx error interrupt number
* @lock: Lock protecting shared register access
+ * @variant: PL DMA PCIe version check pointer
*/
struct pl_dma_pcie {
struct device *dev;
void __iomem *reg_base;
+ void __iomem *cfg_base;
int irq;
struct pci_config_window *cfg;
phys_addr_t phys_reg_base;
@@ -110,16 +127,23 @@ struct pl_dma_pcie {
struct xilinx_msi msi;
int intx_irq;
raw_spinlock_t lock;
+ const struct xilinx_pl_dma_variant *variant;
};
static inline u32 pcie_read(struct pl_dma_pcie *port, u32 reg)
{
+ if (port->variant->version == QDMA)
+ return readl(port->reg_base + reg + QDMA_BRIDGE_BASE_OFF);
+
return readl(port->reg_base + reg);
}
static inline void pcie_write(struct pl_dma_pcie *port, u32 val, u32 reg)
{
- writel(val, port->reg_base + reg);
+ if (port->variant->version == QDMA)
+ writel(val, port->reg_base + reg + QDMA_BRIDGE_BASE_OFF);
+ else
+ writel(val, port->reg_base + reg);
}
static inline bool xilinx_pl_dma_pcie_link_up(struct pl_dma_pcie *port)
@@ -173,6 +197,9 @@ static void __iomem *xilinx_pl_dma_pcie_map_bus(struct pci_bus *bus,
if (!xilinx_pl_dma_pcie_valid_device(bus, devfn))
return NULL;
+ if (port->variant->version == QDMA)
+ return port->cfg_base + PCIE_ECAM_OFFSET(bus->number, devfn, where);
+
return port->reg_base + PCIE_ECAM_OFFSET(bus->number, devfn, where);
}
@@ -355,8 +382,8 @@ static struct irq_chip xilinx_msi_irq_chip = {
};
static struct msi_domain_info xilinx_msi_domain_info = {
- .flags = (MSI_FLAG_USE_DEF_DOM_OPS | MSI_FLAG_USE_DEF_CHIP_OPS |
- MSI_FLAG_MULTI_PCI_MSI),
+ .flags = MSI_FLAG_USE_DEF_DOM_OPS | MSI_FLAG_USE_DEF_CHIP_OPS |
+ MSI_FLAG_NO_AFFINITY | MSI_FLAG_MULTI_PCI_MSI,
.chip = &xilinx_msi_irq_chip,
};
@@ -370,16 +397,9 @@ static void xilinx_compose_msi_msg(struct irq_data *data, struct msi_msg *msg)
msg->data = data->hwirq;
}
-static int xilinx_msi_set_affinity(struct irq_data *irq_data,
- const struct cpumask *mask, bool force)
-{
- return -EINVAL;
-}
-
static struct irq_chip xilinx_irq_chip = {
.name = "pl_dma:MSI",
.irq_compose_msi_msg = xilinx_compose_msi_msg,
- .irq_set_affinity = xilinx_msi_set_affinity,
};
static int xilinx_irq_domain_alloc(struct irq_domain *domain, unsigned int virq,
@@ -731,6 +751,15 @@ static int xilinx_pl_dma_pcie_parse_dt(struct pl_dma_pcie *port,
port->reg_base = port->cfg->win;
+ if (port->variant->version == QDMA) {
+ port->cfg_base = port->cfg->win;
+ res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "breg");
+ port->reg_base = devm_ioremap_resource(dev, res);
+ if (IS_ERR(port->reg_base))
+ return PTR_ERR(port->reg_base);
+ port->phys_reg_base = res->start;
+ }
+
err = xilinx_request_msi_irq(port);
if (err) {
pci_ecam_free(port->cfg);
@@ -760,6 +789,8 @@ static int xilinx_pl_dma_pcie_probe(struct platform_device *pdev)
if (!bus)
return -ENODEV;
+ port->variant = of_device_get_match_data(dev);
+
err = xilinx_pl_dma_pcie_parse_dt(port, bus->res);
if (err) {
dev_err(dev, "Parsing DT failed\n");
@@ -791,9 +822,22 @@ err_irq_domain:
return err;
}
+static const struct xilinx_pl_dma_variant xdma_host = {
+ .version = XDMA,
+};
+
+static const struct xilinx_pl_dma_variant qdma_host = {
+ .version = QDMA,
+};
+
static const struct of_device_id xilinx_pl_dma_pcie_of_match[] = {
{
.compatible = "xlnx,xdma-host-3.00",
+ .data = &xdma_host,
+ },
+ {
+ .compatible = "xlnx,qdma-host-3.00",
+ .data = &qdma_host,
},
{}
};
diff --git a/drivers/pci/controller/pcie-xilinx-nwl.c b/drivers/pci/controller/pcie-xilinx-nwl.c
index 0408f4d612b5..a8ae14474dd0 100644
--- a/drivers/pci/controller/pcie-xilinx-nwl.c
+++ b/drivers/pci/controller/pcie-xilinx-nwl.c
@@ -19,6 +19,7 @@
#include <linux/of_platform.h>
#include <linux/pci.h>
#include <linux/pci-ecam.h>
+#include <linux/phy/phy.h>
#include <linux/platform_device.h>
#include <linux/irqchip/chained_irq.h>
@@ -80,8 +81,8 @@
#define MSGF_MISC_SR_NON_FATAL_DEV BIT(22)
#define MSGF_MISC_SR_FATAL_DEV BIT(23)
#define MSGF_MISC_SR_LINK_DOWN BIT(24)
-#define MSGF_MSIC_SR_LINK_AUTO_BWIDTH BIT(25)
-#define MSGF_MSIC_SR_LINK_BWIDTH BIT(26)
+#define MSGF_MISC_SR_LINK_AUTO_BWIDTH BIT(25)
+#define MSGF_MISC_SR_LINK_BWIDTH BIT(26)
#define MSGF_MISC_SR_MASKALL (MSGF_MISC_SR_RXMSG_AVAIL | \
MSGF_MISC_SR_RXMSG_OVER | \
@@ -96,8 +97,8 @@
MSGF_MISC_SR_NON_FATAL_DEV | \
MSGF_MISC_SR_FATAL_DEV | \
MSGF_MISC_SR_LINK_DOWN | \
- MSGF_MSIC_SR_LINK_AUTO_BWIDTH | \
- MSGF_MSIC_SR_LINK_BWIDTH)
+ MSGF_MISC_SR_LINK_AUTO_BWIDTH | \
+ MSGF_MISC_SR_LINK_BWIDTH)
/* Legacy interrupt status mask bits */
#define MSGF_LEG_SR_INTA BIT(0)
@@ -157,6 +158,7 @@ struct nwl_pcie {
void __iomem *breg_base;
void __iomem *pcireg_base;
void __iomem *ecam_base;
+ struct phy *phy[4];
phys_addr_t phys_breg_base; /* Physical Bridge Register Base */
phys_addr_t phys_pcie_reg_base; /* Physical PCIe Controller Base */
phys_addr_t phys_ecam_base; /* Physical Configuration Base */
@@ -267,42 +269,42 @@ static irqreturn_t nwl_pcie_misc_handler(int irq, void *data)
return IRQ_NONE;
if (misc_stat & MSGF_MISC_SR_RXMSG_OVER)
- dev_err(dev, "Received Message FIFO Overflow\n");
+ dev_err_ratelimited(dev, "Received Message FIFO Overflow\n");
if (misc_stat & MSGF_MISC_SR_SLAVE_ERR)
- dev_err(dev, "Slave error\n");
+ dev_err_ratelimited(dev, "Slave error\n");
if (misc_stat & MSGF_MISC_SR_MASTER_ERR)
- dev_err(dev, "Master error\n");
+ dev_err_ratelimited(dev, "Master error\n");
if (misc_stat & MSGF_MISC_SR_I_ADDR_ERR)
- dev_err(dev, "In Misc Ingress address translation error\n");
+ dev_err_ratelimited(dev, "In Misc Ingress address translation error\n");
if (misc_stat & MSGF_MISC_SR_E_ADDR_ERR)
- dev_err(dev, "In Misc Egress address translation error\n");
+ dev_err_ratelimited(dev, "In Misc Egress address translation error\n");
if (misc_stat & MSGF_MISC_SR_FATAL_AER)
- dev_err(dev, "Fatal Error in AER Capability\n");
+ dev_err_ratelimited(dev, "Fatal Error in AER Capability\n");
if (misc_stat & MSGF_MISC_SR_NON_FATAL_AER)
- dev_err(dev, "Non-Fatal Error in AER Capability\n");
+ dev_err_ratelimited(dev, "Non-Fatal Error in AER Capability\n");
if (misc_stat & MSGF_MISC_SR_CORR_AER)
- dev_err(dev, "Correctable Error in AER Capability\n");
+ dev_err_ratelimited(dev, "Correctable Error in AER Capability\n");
if (misc_stat & MSGF_MISC_SR_UR_DETECT)
- dev_err(dev, "Unsupported request Detected\n");
+ dev_err_ratelimited(dev, "Unsupported request Detected\n");
if (misc_stat & MSGF_MISC_SR_NON_FATAL_DEV)
- dev_err(dev, "Non-Fatal Error Detected\n");
+ dev_err_ratelimited(dev, "Non-Fatal Error Detected\n");
if (misc_stat & MSGF_MISC_SR_FATAL_DEV)
- dev_err(dev, "Fatal Error Detected\n");
+ dev_err_ratelimited(dev, "Fatal Error Detected\n");
- if (misc_stat & MSGF_MSIC_SR_LINK_AUTO_BWIDTH)
+ if (misc_stat & MSGF_MISC_SR_LINK_AUTO_BWIDTH)
dev_info(dev, "Link Autonomous Bandwidth Management Status bit set\n");
- if (misc_stat & MSGF_MSIC_SR_LINK_BWIDTH)
+ if (misc_stat & MSGF_MISC_SR_LINK_BWIDTH)
dev_info(dev, "Link Bandwidth Management Status bit set\n");
/* Clear misc interrupt status */
@@ -371,7 +373,7 @@ static void nwl_mask_intx_irq(struct irq_data *data)
u32 mask;
u32 val;
- mask = 1 << (data->hwirq - 1);
+ mask = 1 << data->hwirq;
raw_spin_lock_irqsave(&pcie->leg_mask_lock, flags);
val = nwl_bridge_readl(pcie, MSGF_LEG_MASK);
nwl_bridge_writel(pcie, (val & (~mask)), MSGF_LEG_MASK);
@@ -385,7 +387,7 @@ static void nwl_unmask_intx_irq(struct irq_data *data)
u32 mask;
u32 val;
- mask = 1 << (data->hwirq - 1);
+ mask = 1 << data->hwirq;
raw_spin_lock_irqsave(&pcie->leg_mask_lock, flags);
val = nwl_bridge_readl(pcie, MSGF_LEG_MASK);
nwl_bridge_writel(pcie, (val | mask), MSGF_LEG_MASK);
@@ -425,8 +427,8 @@ static struct irq_chip nwl_msi_irq_chip = {
};
static struct msi_domain_info nwl_msi_domain_info = {
- .flags = (MSI_FLAG_USE_DEF_DOM_OPS | MSI_FLAG_USE_DEF_CHIP_OPS |
- MSI_FLAG_MULTI_PCI_MSI),
+ .flags = MSI_FLAG_USE_DEF_DOM_OPS | MSI_FLAG_USE_DEF_CHIP_OPS |
+ MSI_FLAG_NO_AFFINITY | MSI_FLAG_MULTI_PCI_MSI,
.chip = &nwl_msi_irq_chip,
};
#endif
@@ -441,16 +443,9 @@ static void nwl_compose_msi_msg(struct irq_data *data, struct msi_msg *msg)
msg->data = data->hwirq;
}
-static int nwl_msi_set_affinity(struct irq_data *irq_data,
- const struct cpumask *mask, bool force)
-{
- return -EINVAL;
-}
-
static struct irq_chip nwl_irq_chip = {
.name = "Xilinx MSI",
.irq_compose_msi_msg = nwl_compose_msi_msg,
- .irq_set_affinity = nwl_msi_set_affinity,
};
static int nwl_irq_domain_alloc(struct irq_domain *domain, unsigned int virq,
@@ -521,6 +516,60 @@ static int nwl_pcie_init_msi_irq_domain(struct nwl_pcie *pcie)
return 0;
}
+static void nwl_pcie_phy_power_off(struct nwl_pcie *pcie, int i)
+{
+ int err = phy_power_off(pcie->phy[i]);
+
+ if (err)
+ dev_err(pcie->dev, "could not power off phy %d (err=%d)\n", i,
+ err);
+}
+
+static void nwl_pcie_phy_exit(struct nwl_pcie *pcie, int i)
+{
+ int err = phy_exit(pcie->phy[i]);
+
+ if (err)
+ dev_err(pcie->dev, "could not exit phy %d (err=%d)\n", i, err);
+}
+
+static int nwl_pcie_phy_enable(struct nwl_pcie *pcie)
+{
+ int i, ret;
+
+ for (i = 0; i < ARRAY_SIZE(pcie->phy); i++) {
+ ret = phy_init(pcie->phy[i]);
+ if (ret)
+ goto err;
+
+ ret = phy_power_on(pcie->phy[i]);
+ if (ret) {
+ nwl_pcie_phy_exit(pcie, i);
+ goto err;
+ }
+ }
+
+ return 0;
+
+err:
+ while (i--) {
+ nwl_pcie_phy_power_off(pcie, i);
+ nwl_pcie_phy_exit(pcie, i);
+ }
+
+ return ret;
+}
+
+static void nwl_pcie_phy_disable(struct nwl_pcie *pcie)
+{
+ int i;
+
+ for (i = ARRAY_SIZE(pcie->phy); i--;) {
+ nwl_pcie_phy_power_off(pcie, i);
+ nwl_pcie_phy_exit(pcie, i);
+ }
+}
+
static int nwl_pcie_init_irq_domain(struct nwl_pcie *pcie)
{
struct device *dev = pcie->dev;
@@ -732,6 +781,7 @@ static int nwl_pcie_parse_dt(struct nwl_pcie *pcie,
{
struct device *dev = pcie->dev;
struct resource *res;
+ int i;
res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "breg");
pcie->breg_base = devm_ioremap_resource(dev, res);
@@ -759,6 +809,18 @@ static int nwl_pcie_parse_dt(struct nwl_pcie *pcie,
irq_set_chained_handler_and_data(pcie->irq_intx,
nwl_pcie_leg_handler, pcie);
+
+ for (i = 0; i < ARRAY_SIZE(pcie->phy); i++) {
+ pcie->phy[i] = devm_of_phy_get_by_index(dev, dev->of_node, i);
+ if (PTR_ERR(pcie->phy[i]) == -ENODEV) {
+ pcie->phy[i] = NULL;
+ break;
+ }
+
+ if (IS_ERR(pcie->phy[i]))
+ return PTR_ERR(pcie->phy[i]);
+ }
+
return 0;
}
@@ -779,6 +841,7 @@ static int nwl_pcie_probe(struct platform_device *pdev)
return -ENODEV;
pcie = pci_host_bridge_priv(bridge);
+ platform_set_drvdata(pdev, pcie);
pcie->dev = dev;
@@ -798,16 +861,22 @@ static int nwl_pcie_probe(struct platform_device *pdev)
return err;
}
+ err = nwl_pcie_phy_enable(pcie);
+ if (err) {
+ dev_err(dev, "could not enable PHYs\n");
+ goto err_clk;
+ }
+
err = nwl_pcie_bridge_init(pcie);
if (err) {
dev_err(dev, "HW Initialization failed\n");
- return err;
+ goto err_phy;
}
err = nwl_pcie_init_irq_domain(pcie);
if (err) {
dev_err(dev, "Failed creating IRQ Domain\n");
- return err;
+ goto err_phy;
}
bridge->sysdata = pcie;
@@ -817,11 +886,27 @@ static int nwl_pcie_probe(struct platform_device *pdev)
err = nwl_pcie_enable_msi(pcie);
if (err < 0) {
dev_err(dev, "failed to enable MSI support: %d\n", err);
- return err;
+ goto err_phy;
}
}
- return pci_host_probe(bridge);
+ err = pci_host_probe(bridge);
+ if (!err)
+ return 0;
+
+err_phy:
+ nwl_pcie_phy_disable(pcie);
+err_clk:
+ clk_disable_unprepare(pcie->clk);
+ return err;
+}
+
+static void nwl_pcie_remove(struct platform_device *pdev)
+{
+ struct nwl_pcie *pcie = platform_get_drvdata(pdev);
+
+ nwl_pcie_phy_disable(pcie);
+ clk_disable_unprepare(pcie->clk);
}
static struct platform_driver nwl_pcie_driver = {
@@ -831,5 +916,6 @@ static struct platform_driver nwl_pcie_driver = {
.of_match_table = nwl_pcie_of_match,
},
.probe = nwl_pcie_probe,
+ .remove_new = nwl_pcie_remove,
};
builtin_platform_driver(nwl_pcie_driver);
diff --git a/drivers/pci/controller/pcie-xilinx.c b/drivers/pci/controller/pcie-xilinx.c
index cb6e9f7b0152..0b534f73a942 100644
--- a/drivers/pci/controller/pcie-xilinx.c
+++ b/drivers/pci/controller/pcie-xilinx.c
@@ -208,11 +208,6 @@ static struct irq_chip xilinx_msi_top_chip = {
.irq_ack = xilinx_msi_top_irq_ack,
};
-static int xilinx_msi_set_affinity(struct irq_data *d, const struct cpumask *mask, bool force)
-{
- return -EINVAL;
-}
-
static void xilinx_compose_msi_msg(struct irq_data *data, struct msi_msg *msg)
{
struct xilinx_pcie *pcie = irq_data_get_irq_chip_data(data);
@@ -225,7 +220,6 @@ static void xilinx_compose_msi_msg(struct irq_data *data, struct msi_msg *msg)
static struct irq_chip xilinx_msi_bottom_chip = {
.name = "Xilinx MSI",
- .irq_set_affinity = xilinx_msi_set_affinity,
.irq_compose_msi_msg = xilinx_compose_msi_msg,
};
@@ -271,7 +265,8 @@ static const struct irq_domain_ops xilinx_msi_domain_ops = {
};
static struct msi_domain_info xilinx_msi_info = {
- .flags = (MSI_FLAG_USE_DEF_DOM_OPS | MSI_FLAG_USE_DEF_CHIP_OPS),
+ .flags = MSI_FLAG_USE_DEF_DOM_OPS | MSI_FLAG_USE_DEF_CHIP_OPS |
+ MSI_FLAG_NO_AFFINITY,
.chip = &xilinx_msi_top_chip,
};
diff --git a/drivers/pci/controller/plda/pcie-plda-host.c b/drivers/pci/controller/plda/pcie-plda-host.c
index a18923d7cea6..8533dc618d45 100644
--- a/drivers/pci/controller/plda/pcie-plda-host.c
+++ b/drivers/pci/controller/plda/pcie-plda-host.c
@@ -76,17 +76,10 @@ static void plda_compose_msi_msg(struct irq_data *data, struct msi_msg *msg)
(int)data->hwirq, msg->address_hi, msg->address_lo);
}
-static int plda_msi_set_affinity(struct irq_data *irq_data,
- const struct cpumask *mask, bool force)
-{
- return -EINVAL;
-}
-
static struct irq_chip plda_msi_bottom_irq_chip = {
.name = "PLDA MSI",
.irq_ack = plda_msi_bottom_irq_ack,
.irq_compose_msi_msg = plda_compose_msi_msg,
- .irq_set_affinity = plda_msi_set_affinity,
};
static int plda_irq_msi_domain_alloc(struct irq_domain *domain,
@@ -146,8 +139,8 @@ static struct irq_chip plda_msi_irq_chip = {
};
static struct msi_domain_info plda_msi_domain_info = {
- .flags = (MSI_FLAG_USE_DEF_DOM_OPS | MSI_FLAG_USE_DEF_CHIP_OPS |
- MSI_FLAG_PCI_MSIX),
+ .flags = MSI_FLAG_USE_DEF_DOM_OPS | MSI_FLAG_USE_DEF_CHIP_OPS |
+ MSI_FLAG_NO_AFFINITY | MSI_FLAG_PCI_MSIX,
.chip = &plda_msi_irq_chip,
};
diff --git a/drivers/pci/controller/vmd.c b/drivers/pci/controller/vmd.c
index a726de0af011..264a180403a0 100644
--- a/drivers/pci/controller/vmd.c
+++ b/drivers/pci/controller/vmd.c
@@ -204,22 +204,11 @@ static void vmd_irq_disable(struct irq_data *data)
raw_spin_unlock_irqrestore(&list_lock, flags);
}
-/*
- * XXX: Stubbed until we develop acceptable way to not create conflicts with
- * other devices sharing the same vector.
- */
-static int vmd_irq_set_affinity(struct irq_data *data,
- const struct cpumask *dest, bool force)
-{
- return -EINVAL;
-}
-
static struct irq_chip vmd_msi_controller = {
.name = "VMD-MSI",
.irq_enable = vmd_irq_enable,
.irq_disable = vmd_irq_disable,
.irq_compose_msi_msg = vmd_compose_msi_msg,
- .irq_set_affinity = vmd_irq_set_affinity,
};
static irq_hw_number_t vmd_get_hwirq(struct msi_domain_info *info,
@@ -326,7 +315,7 @@ static struct msi_domain_ops vmd_msi_domain_ops = {
static struct msi_domain_info vmd_msi_domain_info = {
.flags = MSI_FLAG_USE_DEF_DOM_OPS | MSI_FLAG_USE_DEF_CHIP_OPS |
- MSI_FLAG_PCI_MSIX,
+ MSI_FLAG_NO_AFFINITY | MSI_FLAG_PCI_MSIX,
.ops = &vmd_msi_domain_ops,
.chip = &vmd_msi_controller,
};
@@ -1053,9 +1042,9 @@ static void vmd_remove(struct pci_dev *dev)
static void vmd_shutdown(struct pci_dev *dev)
{
- struct vmd_dev *vmd = pci_get_drvdata(dev);
+ struct vmd_dev *vmd = pci_get_drvdata(dev);
- vmd_remove_irq_domain(vmd);
+ vmd_remove_irq_domain(vmd);
}
#ifdef CONFIG_PM_SLEEP
diff --git a/drivers/pci/devres.c b/drivers/pci/devres.c
index c7affbbf73ab..b133967faef8 100644
--- a/drivers/pci/devres.c
+++ b/drivers/pci/devres.c
@@ -730,7 +730,7 @@ EXPORT_SYMBOL(pcim_iounmap);
* Mapping and region will get automatically released on driver detach. If
* desired, release manually only with pcim_iounmap_region().
*/
-static void __iomem *pcim_iomap_region(struct pci_dev *pdev, int bar,
+void __iomem *pcim_iomap_region(struct pci_dev *pdev, int bar,
const char *name)
{
int ret;
@@ -763,6 +763,7 @@ err_region:
return IOMEM_ERR_PTR(ret);
}
+EXPORT_SYMBOL(pcim_iomap_region);
/**
* pcim_iounmap_region - Unmap and release a PCI BAR
@@ -785,7 +786,7 @@ static void pcim_iounmap_region(struct pci_dev *pdev, int bar)
}
/**
- * pcim_iomap_regions - Request and iomap PCI BARs
+ * pcim_iomap_regions - Request and iomap PCI BARs (DEPRECATED)
* @pdev: PCI device to map IO resources for
* @mask: Mask of BARs to request and iomap
* @name: Name associated with the requests
@@ -793,6 +794,9 @@ static void pcim_iounmap_region(struct pci_dev *pdev, int bar)
* Returns: 0 on success, negative error code on failure.
*
* Request and iomap regions specified by @mask.
+ *
+ * This function is DEPRECATED. Do not use it in new code.
+ * Use pcim_iomap_region() instead.
*/
int pcim_iomap_regions(struct pci_dev *pdev, int mask, const char *name)
{
@@ -865,6 +869,7 @@ int pcim_request_region(struct pci_dev *pdev, int bar, const char *name)
{
return _pcim_request_region(pdev, bar, name, 0);
}
+EXPORT_SYMBOL(pcim_request_region);
/**
* pcim_request_region_exclusive - Request a PCI BAR exclusively
diff --git a/drivers/pci/endpoint/pci-epc-core.c b/drivers/pci/endpoint/pci-epc-core.c
index 84309dfe0c68..17f007109255 100644
--- a/drivers/pci/endpoint/pci-epc-core.c
+++ b/drivers/pci/endpoint/pci-epc-core.c
@@ -838,6 +838,10 @@ void pci_epc_destroy(struct pci_epc *epc)
{
pci_ep_cfs_remove_epc_group(epc->group);
device_unregister(&epc->dev);
+
+#ifdef CONFIG_PCI_DOMAINS_GENERIC
+ pci_bus_release_domain_nr(&epc->dev, epc->domain_nr);
+#endif
}
EXPORT_SYMBOL_GPL(pci_epc_destroy);
@@ -900,6 +904,16 @@ __pci_epc_create(struct device *dev, const struct pci_epc_ops *ops,
epc->dev.release = pci_epc_release;
epc->ops = ops;
+#ifdef CONFIG_PCI_DOMAINS_GENERIC
+ epc->domain_nr = pci_bus_find_domain_nr(NULL, dev);
+#else
+ /*
+ * TODO: If the architecture doesn't support generic PCI
+ * domains, then a custom implementation has to be used.
+ */
+ WARN_ONCE(1, "This architecture doesn't support generic PCI domains\n");
+#endif
+
ret = dev_set_name(&epc->dev, "%s", dev_name(dev));
if (ret)
goto put_dev;
diff --git a/drivers/pci/hotplug/TODO b/drivers/pci/hotplug/TODO
index 9d428b0ea524..92e6e20e8595 100644
--- a/drivers/pci/hotplug/TODO
+++ b/drivers/pci/hotplug/TODO
@@ -51,11 +51,6 @@ ibmphp:
shpchp:
-* There is only a single implementation of struct hpc_ops. Can the struct be
- removed and its functions invoked directly? This has already been done in
- pciehp with commit 82a9e79ef132 ("PCI: pciehp: remove hpc_ops"). Clarify
- if there was a specific reason not to apply the same change to shpchp.
-
* The hardirq handler shpc_isr() queues events on a workqueue. It can be
simplified by converting it to threaded IRQ handling. Use pciehp as a
template.
diff --git a/drivers/pci/hotplug/cpqphp_core.c b/drivers/pci/hotplug/cpqphp_core.c
index c94b40e64baf..47a3ed16159a 100644
--- a/drivers/pci/hotplug/cpqphp_core.c
+++ b/drivers/pci/hotplug/cpqphp_core.c
@@ -328,7 +328,7 @@ get_slot_mapping(struct pci_bus *bus, u8 bus_num, u8 dev_num, u8 *slot)
} else {
/* Did not get a match on the target PCI device. Check
* if the current IRQ table entry is a PCI-to-PCI
- * bridge device. If so, and it's secondary bus
+ * bridge device. If so, and its secondary bus
* matches the bus number for the target device, I need
* to save the bridge's slot number. If I can not find
* an entry for the target device, I will have to
diff --git a/drivers/pci/hotplug/cpqphp_pci.c b/drivers/pci/hotplug/cpqphp_pci.c
index e9f1fb333a71..718bc6cf12cb 100644
--- a/drivers/pci/hotplug/cpqphp_pci.c
+++ b/drivers/pci/hotplug/cpqphp_pci.c
@@ -138,7 +138,7 @@ static int PCI_RefinedAccessConfig(struct pci_bus *bus, unsigned int devfn, u8 o
if (pci_bus_read_config_dword(bus, devfn, PCI_VENDOR_ID, &vendID) == -1)
return -1;
- if (vendID == 0xffffffff)
+ if (PCI_POSSIBLE_ERROR(vendID))
return -1;
return pci_bus_read_config_dword(bus, devfn, offset, value);
}
@@ -253,7 +253,7 @@ static int PCI_GetBusDevHelper(struct controller *ctrl, u8 *bus_num, u8 *dev_num
*dev_num = tdevice;
ctrl->pci_bus->number = tbus;
pci_bus_read_config_dword(ctrl->pci_bus, *dev_num, PCI_VENDOR_ID, &work);
- if (!nobridge || (work == 0xffffffff))
+ if (!nobridge || PCI_POSSIBLE_ERROR(work))
return 0;
dbg("bus_num %d devfn %d\n", *bus_num, *dev_num);
diff --git a/drivers/pci/hotplug/s390_pci_hpc.c b/drivers/pci/hotplug/s390_pci_hpc.c
index 7333b305f2a5..055518ee354d 100644
--- a/drivers/pci/hotplug/s390_pci_hpc.c
+++ b/drivers/pci/hotplug/s390_pci_hpc.c
@@ -112,7 +112,7 @@ static int get_power_status(struct hotplug_slot *hotplug_slot, u8 *value)
static int get_adapter_status(struct hotplug_slot *hotplug_slot, u8 *value)
{
- /* if the slot exits it always contains a function */
+ /* if the slot exists it always contains a function */
*value = 1;
return 0;
}
diff --git a/drivers/pci/hotplug/shpchp.h b/drivers/pci/hotplug/shpchp.h
index 3a97f455336e..f0e2d2d54d71 100644
--- a/drivers/pci/hotplug/shpchp.h
+++ b/drivers/pci/hotplug/shpchp.h
@@ -72,7 +72,6 @@ struct slot {
u8 latch_save;
u8 pwr_save;
struct controller *ctrl;
- const struct hpc_ops *hpc_ops;
struct hotplug_slot hotplug_slot;
struct list_head slot_list;
struct delayed_work work; /* work for button event */
@@ -94,7 +93,6 @@ struct controller {
int slot_num_inc; /* 1 or -1 */
struct pci_dev *pci_dev;
struct list_head slot_list;
- const struct hpc_ops *hpc_ops;
wait_queue_head_t queue; /* sleep & wake process */
u8 slot_device_offset;
u32 pcix_misc2_reg; /* for amd pogo errata */
@@ -300,24 +298,22 @@ static inline void amd_pogo_errata_restore_misc_reg(struct slot *p_slot)
pci_write_config_dword(p_slot->ctrl->pci_dev, PCIX_MISCII_OFFSET, pcix_misc2_temp);
}
-struct hpc_ops {
- int (*power_on_slot)(struct slot *slot);
- int (*slot_enable)(struct slot *slot);
- int (*slot_disable)(struct slot *slot);
- int (*set_bus_speed_mode)(struct slot *slot, enum pci_bus_speed speed);
- int (*get_power_status)(struct slot *slot, u8 *status);
- int (*get_attention_status)(struct slot *slot, u8 *status);
- int (*set_attention_status)(struct slot *slot, u8 status);
- int (*get_latch_status)(struct slot *slot, u8 *status);
- int (*get_adapter_status)(struct slot *slot, u8 *status);
- int (*get_adapter_speed)(struct slot *slot, enum pci_bus_speed *speed);
- int (*get_prog_int)(struct slot *slot, u8 *prog_int);
- int (*query_power_fault)(struct slot *slot);
- void (*green_led_on)(struct slot *slot);
- void (*green_led_off)(struct slot *slot);
- void (*green_led_blink)(struct slot *slot);
- void (*release_ctlr)(struct controller *ctrl);
- int (*check_cmd_status)(struct controller *ctrl);
-};
+int shpchp_power_on_slot(struct slot *slot);
+int shpchp_slot_enable(struct slot *slot);
+int shpchp_slot_disable(struct slot *slot);
+int shpchp_set_bus_speed_mode(struct slot *slot, enum pci_bus_speed speed);
+int shpchp_get_power_status(struct slot *slot, u8 *status);
+int shpchp_get_attention_status(struct slot *slot, u8 *status);
+int shpchp_set_attention_status(struct slot *slot, u8 status);
+int shpchp_get_latch_status(struct slot *slot, u8 *status);
+int shpchp_get_adapter_status(struct slot *slot, u8 *status);
+int shpchp_get_adapter_speed(struct slot *slot, enum pci_bus_speed *speed);
+int shpchp_get_prog_int(struct slot *slot, u8 *prog_int);
+int shpchp_query_power_fault(struct slot *slot);
+void shpchp_green_led_on(struct slot *slot);
+void shpchp_green_led_off(struct slot *slot);
+void shpchp_green_led_blink(struct slot *slot);
+void shpchp_release_ctlr(struct controller *ctrl);
+int shpchp_check_cmd_status(struct controller *ctrl);
#endif /* _SHPCHP_H */
diff --git a/drivers/pci/hotplug/shpchp_core.c b/drivers/pci/hotplug/shpchp_core.c
index 56c7795ed890..a92e28b72908 100644
--- a/drivers/pci/hotplug/shpchp_core.c
+++ b/drivers/pci/hotplug/shpchp_core.c
@@ -81,7 +81,6 @@ static int init_slots(struct controller *ctrl)
slot->ctrl = ctrl;
slot->bus = ctrl->pci_dev->subordinate->number;
slot->device = ctrl->slot_device_offset + i;
- slot->hpc_ops = ctrl->hpc_ops;
slot->number = ctrl->first_slot + (ctrl->slot_num_inc * i);
slot->wq = alloc_workqueue("shpchp-%d", 0, 0, slot->number);
@@ -150,7 +149,7 @@ static int set_attention_status(struct hotplug_slot *hotplug_slot, u8 status)
__func__, slot_name(slot));
slot->attention_save = status;
- slot->hpc_ops->set_attention_status(slot, status);
+ shpchp_set_attention_status(slot, status);
return 0;
}
@@ -183,7 +182,7 @@ static int get_power_status(struct hotplug_slot *hotplug_slot, u8 *value)
ctrl_dbg(slot->ctrl, "%s: physical_slot = %s\n",
__func__, slot_name(slot));
- retval = slot->hpc_ops->get_power_status(slot, value);
+ retval = shpchp_get_power_status(slot, value);
if (retval < 0)
*value = slot->pwr_save;
@@ -198,7 +197,7 @@ static int get_attention_status(struct hotplug_slot *hotplug_slot, u8 *value)
ctrl_dbg(slot->ctrl, "%s: physical_slot = %s\n",
__func__, slot_name(slot));
- retval = slot->hpc_ops->get_attention_status(slot, value);
+ retval = shpchp_get_attention_status(slot, value);
if (retval < 0)
*value = slot->attention_save;
@@ -213,7 +212,7 @@ static int get_latch_status(struct hotplug_slot *hotplug_slot, u8 *value)
ctrl_dbg(slot->ctrl, "%s: physical_slot = %s\n",
__func__, slot_name(slot));
- retval = slot->hpc_ops->get_latch_status(slot, value);
+ retval = shpchp_get_latch_status(slot, value);
if (retval < 0)
*value = slot->latch_save;
@@ -228,7 +227,7 @@ static int get_adapter_status(struct hotplug_slot *hotplug_slot, u8 *value)
ctrl_dbg(slot->ctrl, "%s: physical_slot = %s\n",
__func__, slot_name(slot));
- retval = slot->hpc_ops->get_adapter_status(slot, value);
+ retval = shpchp_get_adapter_status(slot, value);
if (retval < 0)
*value = slot->presence_save;
@@ -293,7 +292,7 @@ static int shpc_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
err_cleanup_slots:
cleanup_slots(ctrl);
err_out_release_ctlr:
- ctrl->hpc_ops->release_ctlr(ctrl);
+ shpchp_release_ctlr(ctrl);
err_out_free_ctrl:
kfree(ctrl);
err_out_none:
@@ -306,7 +305,7 @@ static void shpc_remove(struct pci_dev *dev)
dev->shpc_managed = 0;
shpchp_remove_ctrl_files(ctrl);
- ctrl->hpc_ops->release_ctlr(ctrl);
+ shpchp_release_ctlr(ctrl);
kfree(ctrl);
}
diff --git a/drivers/pci/hotplug/shpchp_ctrl.c b/drivers/pci/hotplug/shpchp_ctrl.c
index 6a6705e0cf17..e6c6f23bae27 100644
--- a/drivers/pci/hotplug/shpchp_ctrl.c
+++ b/drivers/pci/hotplug/shpchp_ctrl.c
@@ -51,7 +51,7 @@ u8 shpchp_handle_attention_button(u8 hp_slot, struct controller *ctrl)
ctrl_dbg(ctrl, "Attention button interrupt received\n");
p_slot = shpchp_find_slot(ctrl, hp_slot + ctrl->slot_device_offset);
- p_slot->hpc_ops->get_adapter_status(p_slot, &(p_slot->presence_save));
+ shpchp_get_adapter_status(p_slot, &p_slot->presence_save);
/*
* Button pressed - See if need to TAKE ACTION!!!
@@ -75,8 +75,8 @@ u8 shpchp_handle_switch_change(u8 hp_slot, struct controller *ctrl)
ctrl_dbg(ctrl, "Switch interrupt received\n");
p_slot = shpchp_find_slot(ctrl, hp_slot + ctrl->slot_device_offset);
- p_slot->hpc_ops->get_adapter_status(p_slot, &(p_slot->presence_save));
- p_slot->hpc_ops->get_latch_status(p_slot, &getstatus);
+ shpchp_get_adapter_status(p_slot, &p_slot->presence_save);
+ shpchp_get_latch_status(p_slot, &getstatus);
ctrl_dbg(ctrl, "Card present %x Power status %x\n",
p_slot->presence_save, p_slot->pwr_save);
@@ -116,7 +116,7 @@ u8 shpchp_handle_presence_change(u8 hp_slot, struct controller *ctrl)
/*
* Save the presence state
*/
- p_slot->hpc_ops->get_adapter_status(p_slot, &(p_slot->presence_save));
+ shpchp_get_adapter_status(p_slot, &p_slot->presence_save);
if (p_slot->presence_save) {
/*
* Card Present
@@ -148,7 +148,7 @@ u8 shpchp_handle_power_fault(u8 hp_slot, struct controller *ctrl)
p_slot = shpchp_find_slot(ctrl, hp_slot + ctrl->slot_device_offset);
- if (!(p_slot->hpc_ops->query_power_fault(p_slot))) {
+ if (!(shpchp_query_power_fault(p_slot))) {
/*
* Power fault Cleared
*/
@@ -181,7 +181,7 @@ static int change_bus_speed(struct controller *ctrl, struct slot *p_slot,
int rc = 0;
ctrl_dbg(ctrl, "Change speed to %d\n", speed);
- rc = p_slot->hpc_ops->set_bus_speed_mode(p_slot, speed);
+ rc = shpchp_set_bus_speed_mode(p_slot, speed);
if (rc) {
ctrl_err(ctrl, "%s: Issue of set bus speed mode command failed\n",
__func__);
@@ -241,14 +241,14 @@ static int board_added(struct slot *p_slot)
__func__, p_slot->device, ctrl->slot_device_offset, hp_slot);
/* Power on slot without connecting to bus */
- rc = p_slot->hpc_ops->power_on_slot(p_slot);
+ rc = shpchp_power_on_slot(p_slot);
if (rc) {
ctrl_err(ctrl, "Failed to power on slot\n");
return -1;
}
if ((ctrl->pci_dev->vendor == 0x8086) && (ctrl->pci_dev->device == 0x0332)) {
- rc = p_slot->hpc_ops->set_bus_speed_mode(p_slot, PCI_SPEED_33MHz);
+ rc = shpchp_set_bus_speed_mode(p_slot, PCI_SPEED_33MHz);
if (rc) {
ctrl_err(ctrl, "%s: Issue of set bus speed mode command failed\n",
__func__);
@@ -256,14 +256,14 @@ static int board_added(struct slot *p_slot)
}
/* turn on board, blink green LED, turn off Amber LED */
- rc = p_slot->hpc_ops->slot_enable(p_slot);
+ rc = shpchp_slot_enable(p_slot);
if (rc) {
ctrl_err(ctrl, "Issue of Slot Enable command failed\n");
return rc;
}
}
- rc = p_slot->hpc_ops->get_adapter_speed(p_slot, &asp);
+ rc = shpchp_get_adapter_speed(p_slot, &asp);
if (rc) {
ctrl_err(ctrl, "Can't get adapter speed or bus mode mismatch\n");
return WRONG_BUS_FREQUENCY;
@@ -285,7 +285,7 @@ static int board_added(struct slot *p_slot)
return rc;
/* turn on board, blink green LED, turn off Amber LED */
- rc = p_slot->hpc_ops->slot_enable(p_slot);
+ rc = shpchp_slot_enable(p_slot);
if (rc) {
ctrl_err(ctrl, "Issue of Slot Enable command failed\n");
return rc;
@@ -313,13 +313,13 @@ static int board_added(struct slot *p_slot)
p_slot->is_a_board = 0x01;
p_slot->pwr_save = 1;
- p_slot->hpc_ops->green_led_on(p_slot);
+ shpchp_green_led_on(p_slot);
return 0;
err_exit:
/* turn off slot, turn on Amber LED, turn off Green LED */
- rc = p_slot->hpc_ops->slot_disable(p_slot);
+ rc = shpchp_slot_disable(p_slot);
if (rc) {
ctrl_err(ctrl, "%s: Issue of Slot Disable command failed\n",
__func__);
@@ -352,14 +352,14 @@ static int remove_board(struct slot *p_slot)
p_slot->status = 0x01;
/* turn off slot, turn on Amber LED, turn off Green LED */
- rc = p_slot->hpc_ops->slot_disable(p_slot);
+ rc = shpchp_slot_disable(p_slot);
if (rc) {
ctrl_err(ctrl, "%s: Issue of Slot Disable command failed\n",
__func__);
return rc;
}
- rc = p_slot->hpc_ops->set_attention_status(p_slot, 0);
+ rc = shpchp_set_attention_status(p_slot, 0);
if (rc) {
ctrl_err(ctrl, "Issue of Set Attention command failed\n");
return rc;
@@ -401,7 +401,7 @@ static void shpchp_pushbutton_thread(struct work_struct *work)
case POWERON_STATE:
mutex_unlock(&p_slot->lock);
if (shpchp_enable_slot(p_slot))
- p_slot->hpc_ops->green_led_off(p_slot);
+ shpchp_green_led_off(p_slot);
mutex_lock(&p_slot->lock);
p_slot->state = STATIC_STATE;
break;
@@ -446,10 +446,10 @@ void shpchp_queue_pushbutton_work(struct work_struct *work)
static void update_slot_info(struct slot *slot)
{
- slot->hpc_ops->get_power_status(slot, &slot->pwr_save);
- slot->hpc_ops->get_attention_status(slot, &slot->attention_save);
- slot->hpc_ops->get_latch_status(slot, &slot->latch_save);
- slot->hpc_ops->get_adapter_status(slot, &slot->presence_save);
+ shpchp_get_power_status(slot, &slot->pwr_save);
+ shpchp_get_attention_status(slot, &slot->attention_save);
+ shpchp_get_latch_status(slot, &slot->latch_save);
+ shpchp_get_adapter_status(slot, &slot->presence_save);
}
/*
@@ -462,7 +462,7 @@ static void handle_button_press_event(struct slot *p_slot)
switch (p_slot->state) {
case STATIC_STATE:
- p_slot->hpc_ops->get_power_status(p_slot, &getstatus);
+ shpchp_get_power_status(p_slot, &getstatus);
if (getstatus) {
p_slot->state = BLINKINGOFF_STATE;
ctrl_info(ctrl, "PCI slot #%s - powering off due to button press\n",
@@ -473,8 +473,8 @@ static void handle_button_press_event(struct slot *p_slot)
slot_name(p_slot));
}
/* blink green LED and turn off amber */
- p_slot->hpc_ops->green_led_blink(p_slot);
- p_slot->hpc_ops->set_attention_status(p_slot, 0);
+ shpchp_green_led_blink(p_slot);
+ shpchp_set_attention_status(p_slot, 0);
queue_delayed_work(p_slot->wq, &p_slot->work, 5*HZ);
break;
@@ -489,10 +489,10 @@ static void handle_button_press_event(struct slot *p_slot)
slot_name(p_slot));
cancel_delayed_work(&p_slot->work);
if (p_slot->state == BLINKINGOFF_STATE)
- p_slot->hpc_ops->green_led_on(p_slot);
+ shpchp_green_led_on(p_slot);
else
- p_slot->hpc_ops->green_led_off(p_slot);
- p_slot->hpc_ops->set_attention_status(p_slot, 0);
+ shpchp_green_led_off(p_slot);
+ shpchp_set_attention_status(p_slot, 0);
ctrl_info(ctrl, "PCI slot #%s - action canceled due to button press\n",
slot_name(p_slot));
p_slot->state = STATIC_STATE;
@@ -526,8 +526,8 @@ static void interrupt_event_handler(struct work_struct *work)
break;
case INT_POWER_FAULT:
ctrl_dbg(p_slot->ctrl, "%s: Power fault\n", __func__);
- p_slot->hpc_ops->set_attention_status(p_slot, 1);
- p_slot->hpc_ops->green_led_off(p_slot);
+ shpchp_set_attention_status(p_slot, 1);
+ shpchp_green_led_off(p_slot);
break;
default:
update_slot_info(p_slot);
@@ -547,17 +547,17 @@ static int shpchp_enable_slot (struct slot *p_slot)
/* Check to see if (latch closed, card present, power off) */
mutex_lock(&p_slot->ctrl->crit_sect);
- rc = p_slot->hpc_ops->get_adapter_status(p_slot, &getstatus);
+ rc = shpchp_get_adapter_status(p_slot, &getstatus);
if (rc || !getstatus) {
ctrl_info(ctrl, "No adapter on slot(%s)\n", slot_name(p_slot));
goto out;
}
- rc = p_slot->hpc_ops->get_latch_status(p_slot, &getstatus);
+ rc = shpchp_get_latch_status(p_slot, &getstatus);
if (rc || getstatus) {
ctrl_info(ctrl, "Latch open on slot(%s)\n", slot_name(p_slot));
goto out;
}
- rc = p_slot->hpc_ops->get_power_status(p_slot, &getstatus);
+ rc = shpchp_get_power_status(p_slot, &getstatus);
if (rc || getstatus) {
ctrl_info(ctrl, "Already enabled on slot(%s)\n",
slot_name(p_slot));
@@ -567,10 +567,10 @@ static int shpchp_enable_slot (struct slot *p_slot)
p_slot->is_a_board = 1;
/* We have to save the presence info for these slots */
- p_slot->hpc_ops->get_adapter_status(p_slot, &(p_slot->presence_save));
- p_slot->hpc_ops->get_power_status(p_slot, &(p_slot->pwr_save));
+ shpchp_get_adapter_status(p_slot, &p_slot->presence_save);
+ shpchp_get_power_status(p_slot, &p_slot->pwr_save);
ctrl_dbg(ctrl, "%s: p_slot->pwr_save %x\n", __func__, p_slot->pwr_save);
- p_slot->hpc_ops->get_latch_status(p_slot, &getstatus);
+ shpchp_get_latch_status(p_slot, &getstatus);
if ((p_slot->ctrl->pci_dev->vendor == PCI_VENDOR_ID_AMD &&
p_slot->ctrl->pci_dev->device == PCI_DEVICE_ID_AMD_POGO_7458)
@@ -584,9 +584,8 @@ static int shpchp_enable_slot (struct slot *p_slot)
retval = board_added(p_slot);
if (retval) {
- p_slot->hpc_ops->get_adapter_status(p_slot,
- &(p_slot->presence_save));
- p_slot->hpc_ops->get_latch_status(p_slot, &getstatus);
+ shpchp_get_adapter_status(p_slot, &p_slot->presence_save);
+ shpchp_get_latch_status(p_slot, &getstatus);
}
update_slot_info(p_slot);
@@ -608,17 +607,17 @@ static int shpchp_disable_slot (struct slot *p_slot)
/* Check to see if (latch closed, card present, power on) */
mutex_lock(&p_slot->ctrl->crit_sect);
- rc = p_slot->hpc_ops->get_adapter_status(p_slot, &getstatus);
+ rc = shpchp_get_adapter_status(p_slot, &getstatus);
if (rc || !getstatus) {
ctrl_info(ctrl, "No adapter on slot(%s)\n", slot_name(p_slot));
goto out;
}
- rc = p_slot->hpc_ops->get_latch_status(p_slot, &getstatus);
+ rc = shpchp_get_latch_status(p_slot, &getstatus);
if (rc || getstatus) {
ctrl_info(ctrl, "Latch open on slot(%s)\n", slot_name(p_slot));
goto out;
}
- rc = p_slot->hpc_ops->get_power_status(p_slot, &getstatus);
+ rc = shpchp_get_power_status(p_slot, &getstatus);
if (rc || !getstatus) {
ctrl_info(ctrl, "Already disabled on slot(%s)\n",
slot_name(p_slot));
diff --git a/drivers/pci/hotplug/shpchp_hpc.c b/drivers/pci/hotplug/shpchp_hpc.c
index 48e4daefc44a..012b9e3fe5b0 100644
--- a/drivers/pci/hotplug/shpchp_hpc.c
+++ b/drivers/pci/hotplug/shpchp_hpc.c
@@ -167,7 +167,6 @@
static irqreturn_t shpc_isr(int irq, void *dev_id);
static void start_int_poll_timer(struct controller *ctrl, int sec);
-static int hpc_check_cmd_status(struct controller *ctrl);
static inline u8 shpc_readb(struct controller *ctrl, int reg)
{
@@ -317,7 +316,7 @@ static int shpc_write_cmd(struct slot *slot, u8 t_slot, u8 cmd)
if (retval)
goto out;
- cmd_status = hpc_check_cmd_status(slot->ctrl);
+ cmd_status = shpchp_check_cmd_status(slot->ctrl);
if (cmd_status) {
ctrl_err(ctrl, "Failed to issued command 0x%x (error code = %d)\n",
cmd, cmd_status);
@@ -328,7 +327,7 @@ static int shpc_write_cmd(struct slot *slot, u8 t_slot, u8 cmd)
return retval;
}
-static int hpc_check_cmd_status(struct controller *ctrl)
+int shpchp_check_cmd_status(struct controller *ctrl)
{
int retval = 0;
u16 cmd_status = shpc_readw(ctrl, CMD_STATUS) & 0x000F;
@@ -357,7 +356,7 @@ static int hpc_check_cmd_status(struct controller *ctrl)
}
-static int hpc_get_attention_status(struct slot *slot, u8 *status)
+int shpchp_get_attention_status(struct slot *slot, u8 *status)
{
struct controller *ctrl = slot->ctrl;
u32 slot_reg = shpc_readl(ctrl, SLOT_REG(slot->hp_slot));
@@ -381,7 +380,7 @@ static int hpc_get_attention_status(struct slot *slot, u8 *status)
return 0;
}
-static int hpc_get_power_status(struct slot *slot, u8 *status)
+int shpchp_get_power_status(struct slot *slot, u8 *status)
{
struct controller *ctrl = slot->ctrl;
u32 slot_reg = shpc_readl(ctrl, SLOT_REG(slot->hp_slot));
@@ -406,7 +405,7 @@ static int hpc_get_power_status(struct slot *slot, u8 *status)
}
-static int hpc_get_latch_status(struct slot *slot, u8 *status)
+int shpchp_get_latch_status(struct slot *slot, u8 *status)
{
struct controller *ctrl = slot->ctrl;
u32 slot_reg = shpc_readl(ctrl, SLOT_REG(slot->hp_slot));
@@ -416,7 +415,7 @@ static int hpc_get_latch_status(struct slot *slot, u8 *status)
return 0;
}
-static int hpc_get_adapter_status(struct slot *slot, u8 *status)
+int shpchp_get_adapter_status(struct slot *slot, u8 *status)
{
struct controller *ctrl = slot->ctrl;
u32 slot_reg = shpc_readl(ctrl, SLOT_REG(slot->hp_slot));
@@ -427,7 +426,7 @@ static int hpc_get_adapter_status(struct slot *slot, u8 *status)
return 0;
}
-static int hpc_get_prog_int(struct slot *slot, u8 *prog_int)
+int shpchp_get_prog_int(struct slot *slot, u8 *prog_int)
{
struct controller *ctrl = slot->ctrl;
@@ -436,7 +435,7 @@ static int hpc_get_prog_int(struct slot *slot, u8 *prog_int)
return 0;
}
-static int hpc_get_adapter_speed(struct slot *slot, enum pci_bus_speed *value)
+int shpchp_get_adapter_speed(struct slot *slot, enum pci_bus_speed *value)
{
int retval = 0;
struct controller *ctrl = slot->ctrl;
@@ -444,7 +443,7 @@ static int hpc_get_adapter_speed(struct slot *slot, enum pci_bus_speed *value)
u8 m66_cap = !!(slot_reg & MHZ66_CAP);
u8 pi, pcix_cap;
- retval = hpc_get_prog_int(slot, &pi);
+ retval = shpchp_get_prog_int(slot, &pi);
if (retval)
return retval;
@@ -489,7 +488,7 @@ static int hpc_get_adapter_speed(struct slot *slot, enum pci_bus_speed *value)
return retval;
}
-static int hpc_query_power_fault(struct slot *slot)
+int shpchp_query_power_fault(struct slot *slot)
{
struct controller *ctrl = slot->ctrl;
u32 slot_reg = shpc_readl(ctrl, SLOT_REG(slot->hp_slot));
@@ -498,7 +497,7 @@ static int hpc_query_power_fault(struct slot *slot)
return !(slot_reg & POWER_FAULT);
}
-static int hpc_set_attention_status(struct slot *slot, u8 value)
+int shpchp_set_attention_status(struct slot *slot, u8 value)
{
u8 slot_cmd = 0;
@@ -520,22 +519,22 @@ static int hpc_set_attention_status(struct slot *slot, u8 value)
}
-static void hpc_set_green_led_on(struct slot *slot)
+void shpchp_green_led_on(struct slot *slot)
{
shpc_write_cmd(slot, slot->hp_slot, SET_PWR_ON);
}
-static void hpc_set_green_led_off(struct slot *slot)
+void shpchp_green_led_off(struct slot *slot)
{
shpc_write_cmd(slot, slot->hp_slot, SET_PWR_OFF);
}
-static void hpc_set_green_led_blink(struct slot *slot)
+void shpchp_green_led_blink(struct slot *slot)
{
shpc_write_cmd(slot, slot->hp_slot, SET_PWR_BLINK);
}
-static void hpc_release_ctlr(struct controller *ctrl)
+void shpchp_release_ctlr(struct controller *ctrl)
{
int i;
u32 slot_reg, serr_int;
@@ -575,7 +574,7 @@ static void hpc_release_ctlr(struct controller *ctrl)
release_mem_region(ctrl->mmio_base, ctrl->mmio_size);
}
-static int hpc_power_on_slot(struct slot *slot)
+int shpchp_power_on_slot(struct slot *slot)
{
int retval;
@@ -586,7 +585,7 @@ static int hpc_power_on_slot(struct slot *slot)
return retval;
}
-static int hpc_slot_enable(struct slot *slot)
+int shpchp_slot_enable(struct slot *slot)
{
int retval;
@@ -599,7 +598,7 @@ static int hpc_slot_enable(struct slot *slot)
return retval;
}
-static int hpc_slot_disable(struct slot *slot)
+int shpchp_slot_disable(struct slot *slot)
{
int retval;
@@ -681,7 +680,7 @@ static int shpc_get_cur_bus_speed(struct controller *ctrl)
}
-static int hpc_set_bus_speed_mode(struct slot *slot, enum pci_bus_speed value)
+int shpchp_set_bus_speed_mode(struct slot *slot, enum pci_bus_speed value)
{
int retval;
struct controller *ctrl = slot->ctrl;
@@ -871,28 +870,6 @@ static int shpc_get_max_bus_speed(struct controller *ctrl)
return retval;
}
-static const struct hpc_ops shpchp_hpc_ops = {
- .power_on_slot = hpc_power_on_slot,
- .slot_enable = hpc_slot_enable,
- .slot_disable = hpc_slot_disable,
- .set_bus_speed_mode = hpc_set_bus_speed_mode,
- .set_attention_status = hpc_set_attention_status,
- .get_power_status = hpc_get_power_status,
- .get_attention_status = hpc_get_attention_status,
- .get_latch_status = hpc_get_latch_status,
- .get_adapter_status = hpc_get_adapter_status,
-
- .get_adapter_speed = hpc_get_adapter_speed,
- .get_prog_int = hpc_get_prog_int,
-
- .query_power_fault = hpc_query_power_fault,
- .green_led_on = hpc_set_green_led_on,
- .green_led_off = hpc_set_green_led_off,
- .green_led_blink = hpc_set_green_led_blink,
-
- .release_ctlr = hpc_release_ctlr,
-};
-
int shpc_init(struct controller *ctrl, struct pci_dev *pdev)
{
int rc = -1, num_slots = 0;
@@ -978,8 +955,6 @@ int shpc_init(struct controller *ctrl, struct pci_dev *pdev)
/* Setup wait queue */
init_waitqueue_head(&ctrl->queue);
- ctrl->hpc_ops = &shpchp_hpc_ops;
-
/* Return PCI Controller Info */
slot_config = shpc_readl(ctrl, SLOT_CONFIG);
ctrl->slot_device_offset = (slot_config & FIRST_DEV_NUM) >> 8;
diff --git a/drivers/pci/iomap.c b/drivers/pci/iomap.c
index a715a4803c95..9fb7cacc15cd 100644
--- a/drivers/pci/iomap.c
+++ b/drivers/pci/iomap.c
@@ -156,7 +156,7 @@ EXPORT_SYMBOL_GPL(pci_iomap_wc);
* the different IOMAP ranges.
*
* But if the architecture does not use the generic iomap code, and if
- * it has _not_ defined it's own private pci_iounmap function, we define
+ * it has _not_ defined its own private pci_iounmap function, we define
* it here.
*
* NOTE! This default implementation assumes that if the architecture
diff --git a/drivers/pci/npem.c b/drivers/pci/npem.c
new file mode 100644
index 000000000000..97507e0df769
--- /dev/null
+++ b/drivers/pci/npem.c
@@ -0,0 +1,595 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * PCIe Enclosure management driver created for LED interfaces based on
+ * indications. It says *what indications* blink but does not specify *how*
+ * they blink - it is hardware defined.
+ *
+ * The driver name refers to Native PCIe Enclosure Management. It is
+ * first indication oriented standard with specification.
+ *
+ * Native PCIe Enclosure Management (NPEM)
+ * PCIe Base Specification r6.1 sec 6.28, 7.9.19
+ *
+ * _DSM Definitions for PCIe SSD Status LED
+ * PCI Firmware Specification, r3.3 sec 4.7
+ *
+ * Two backends are supported to manipulate indications: Direct NPEM register
+ * access (npem_ops) and indirect access through the ACPI _DSM (dsm_ops).
+ * _DSM is used if supported, else NPEM.
+ *
+ * Copyright (c) 2021-2022 Dell Inc.
+ * Copyright (c) 2023-2024 Intel Corporation
+ * Mariusz Tkaczyk <mariusz.tkaczyk@linux.intel.com>
+ */
+
+#include <linux/acpi.h>
+#include <linux/bitops.h>
+#include <linux/errno.h>
+#include <linux/iopoll.h>
+#include <linux/leds.h>
+#include <linux/mutex.h>
+#include <linux/pci.h>
+#include <linux/pci_regs.h>
+#include <linux/types.h>
+#include <linux/uleds.h>
+
+#include "pci.h"
+
+struct indication {
+ u32 bit;
+ const char *name;
+};
+
+static const struct indication npem_indications[] = {
+ {PCI_NPEM_IND_OK, "enclosure:ok"},
+ {PCI_NPEM_IND_LOCATE, "enclosure:locate"},
+ {PCI_NPEM_IND_FAIL, "enclosure:fail"},
+ {PCI_NPEM_IND_REBUILD, "enclosure:rebuild"},
+ {PCI_NPEM_IND_PFA, "enclosure:pfa"},
+ {PCI_NPEM_IND_HOTSPARE, "enclosure:hotspare"},
+ {PCI_NPEM_IND_ICA, "enclosure:ica"},
+ {PCI_NPEM_IND_IFA, "enclosure:ifa"},
+ {PCI_NPEM_IND_IDT, "enclosure:idt"},
+ {PCI_NPEM_IND_DISABLED, "enclosure:disabled"},
+ {PCI_NPEM_IND_SPEC_0, "enclosure:specific_0"},
+ {PCI_NPEM_IND_SPEC_1, "enclosure:specific_1"},
+ {PCI_NPEM_IND_SPEC_2, "enclosure:specific_2"},
+ {PCI_NPEM_IND_SPEC_3, "enclosure:specific_3"},
+ {PCI_NPEM_IND_SPEC_4, "enclosure:specific_4"},
+ {PCI_NPEM_IND_SPEC_5, "enclosure:specific_5"},
+ {PCI_NPEM_IND_SPEC_6, "enclosure:specific_6"},
+ {PCI_NPEM_IND_SPEC_7, "enclosure:specific_7"},
+ {0, NULL}
+};
+
+/* _DSM PCIe SSD LED States correspond to NPEM register values */
+static const struct indication dsm_indications[] = {
+ {PCI_NPEM_IND_OK, "enclosure:ok"},
+ {PCI_NPEM_IND_LOCATE, "enclosure:locate"},
+ {PCI_NPEM_IND_FAIL, "enclosure:fail"},
+ {PCI_NPEM_IND_REBUILD, "enclosure:rebuild"},
+ {PCI_NPEM_IND_PFA, "enclosure:pfa"},
+ {PCI_NPEM_IND_HOTSPARE, "enclosure:hotspare"},
+ {PCI_NPEM_IND_ICA, "enclosure:ica"},
+ {PCI_NPEM_IND_IFA, "enclosure:ifa"},
+ {PCI_NPEM_IND_IDT, "enclosure:idt"},
+ {PCI_NPEM_IND_DISABLED, "enclosure:disabled"},
+ {0, NULL}
+};
+
+#define for_each_indication(ind, inds) \
+ for (ind = inds; ind->bit; ind++)
+
+/*
+ * The driver has internal list of supported indications. Ideally, the driver
+ * should not touch bits that are not defined and for which LED devices are
+ * not exposed but in reality, it needs to turn them off.
+ *
+ * Otherwise, there will be no possibility to turn off indications turned on by
+ * other utilities or turned on by default and it leads to bad user experience.
+ *
+ * Additionally, it excludes NPEM commands like RESET or ENABLE.
+ */
+static u32 reg_to_indications(u32 caps, const struct indication *inds)
+{
+ const struct indication *ind;
+ u32 supported_indications = 0;
+
+ for_each_indication(ind, inds)
+ supported_indications |= ind->bit;
+
+ return caps & supported_indications;
+}
+
+/**
+ * struct npem_led - LED details
+ * @indication: indication details
+ * @npem: NPEM device
+ * @name: LED name
+ * @led: LED device
+ */
+struct npem_led {
+ const struct indication *indication;
+ struct npem *npem;
+ char name[LED_MAX_NAME_SIZE];
+ struct led_classdev led;
+};
+
+/**
+ * struct npem_ops - backend specific callbacks
+ * @get_active_indications: get active indications
+ * npem: NPEM device
+ * inds: response buffer
+ * @set_active_indications: set new indications
+ * npem: npem device
+ * inds: bit mask to set
+ * @inds: supported indications array, set of indications is backend specific
+ * @name: backend name
+ */
+struct npem_ops {
+ int (*get_active_indications)(struct npem *npem, u32 *inds);
+ int (*set_active_indications)(struct npem *npem, u32 inds);
+ const struct indication *inds;
+ const char *name;
+};
+
+/**
+ * struct npem - NPEM device properties
+ * @dev: PCI device this driver is attached to
+ * @ops: backend specific callbacks
+ * @lock: serializes concurrent access to NPEM device by multiple LED devices
+ * @pos: cached offset of NPEM Capability Register in Configuration Space;
+ * only used if NPEM registers are accessed directly and not through _DSM
+ * @supported_indications: cached bit mask of supported indications;
+ * non-indication and reserved bits in the NPEM Capability Register are
+ * cleared in this bit mask
+ * @active_indications: cached bit mask of active indications;
+ * non-indication and reserved bits in the NPEM Control Register are
+ * cleared in this bit mask
+ * @active_inds_initialized: whether @active_indications has been initialized;
+ * On Dell platforms, it is required that IPMI drivers are loaded before
+ * the GET_STATE_DSM method is invoked: They use an IPMI OpRegion to
+ * get/set the active LEDs. By initializing @active_indications lazily
+ * (on first access to an LED), IPMI drivers are given a chance to load.
+ * If they are not loaded in time, users will see various errors on LED
+ * access in dmesg. Once they are loaded, the errors go away and LED
+ * access becomes possible.
+ * @led_cnt: size of @leds array
+ * @leds: array containing LED class devices of all supported LEDs
+ */
+struct npem {
+ struct pci_dev *dev;
+ const struct npem_ops *ops;
+ struct mutex lock;
+ u16 pos;
+ u32 supported_indications;
+ u32 active_indications;
+ unsigned int active_inds_initialized:1;
+ int led_cnt;
+ struct npem_led leds[];
+};
+
+static int npem_read_reg(struct npem *npem, u16 reg, u32 *val)
+{
+ int ret = pci_read_config_dword(npem->dev, npem->pos + reg, val);
+
+ return pcibios_err_to_errno(ret);
+}
+
+static int npem_write_ctrl(struct npem *npem, u32 reg)
+{
+ int pos = npem->pos + PCI_NPEM_CTRL;
+ int ret = pci_write_config_dword(npem->dev, pos, reg);
+
+ return pcibios_err_to_errno(ret);
+}
+
+static int npem_get_active_indications(struct npem *npem, u32 *inds)
+{
+ u32 ctrl;
+ int ret;
+
+ ret = npem_read_reg(npem, PCI_NPEM_CTRL, &ctrl);
+ if (ret)
+ return ret;
+
+ /* If PCI_NPEM_CTRL_ENABLE is not set then no indication should blink */
+ if (!(ctrl & PCI_NPEM_CTRL_ENABLE)) {
+ *inds = 0;
+ return 0;
+ }
+
+ *inds = ctrl & npem->supported_indications;
+
+ return 0;
+}
+
+static int npem_set_active_indications(struct npem *npem, u32 inds)
+{
+ int ctrl, ret, ret_val;
+ u32 cc_status;
+
+ lockdep_assert_held(&npem->lock);
+
+ /* This bit is always required */
+ ctrl = inds | PCI_NPEM_CTRL_ENABLE;
+
+ ret = npem_write_ctrl(npem, ctrl);
+ if (ret)
+ return ret;
+
+ /*
+ * For the case where a NPEM command has not completed immediately,
+ * it is recommended that software not continuously "spin" on polling
+ * the status register, but rather poll under interrupt at a reduced
+ * rate; for example at 10 ms intervals.
+ *
+ * PCIe r6.1 sec 6.28 "Implementation Note: Software Polling of NPEM
+ * Command Completed"
+ */
+ ret = read_poll_timeout(npem_read_reg, ret_val,
+ ret_val || (cc_status & PCI_NPEM_STATUS_CC),
+ 10 * USEC_PER_MSEC, USEC_PER_SEC, false, npem,
+ PCI_NPEM_STATUS, &cc_status);
+ if (ret)
+ return ret;
+ if (ret_val)
+ return ret_val;
+
+ /*
+ * All writes to control register, including writes that do not change
+ * the register value, are NPEM commands and should eventually result
+ * in a command completion indication in the NPEM Status Register.
+ *
+ * PCIe Base Specification r6.1 sec 7.9.19.3
+ *
+ * Register may not be updated, or other conflicting bits may be
+ * cleared. Spec is not strict here. Read NPEM Control register after
+ * write to keep cache in-sync.
+ */
+ return npem_get_active_indications(npem, &npem->active_indications);
+}
+
+static const struct npem_ops npem_ops = {
+ .get_active_indications = npem_get_active_indications,
+ .set_active_indications = npem_set_active_indications,
+ .name = "Native PCIe Enclosure Management",
+ .inds = npem_indications,
+};
+
+#define DSM_GUID GUID_INIT(0x5d524d9d, 0xfff9, 0x4d4b, 0x8c, 0xb7, 0x74, 0x7e,\
+ 0xd5, 0x1e, 0x19, 0x4d)
+#define GET_SUPPORTED_STATES_DSM 1
+#define GET_STATE_DSM 2
+#define SET_STATE_DSM 3
+
+static const guid_t dsm_guid = DSM_GUID;
+
+static bool npem_has_dsm(struct pci_dev *pdev)
+{
+ acpi_handle handle;
+
+ handle = ACPI_HANDLE(&pdev->dev);
+ if (!handle)
+ return false;
+
+ return acpi_check_dsm(handle, &dsm_guid, 0x1,
+ BIT(GET_SUPPORTED_STATES_DSM) |
+ BIT(GET_STATE_DSM) | BIT(SET_STATE_DSM));
+}
+
+struct dsm_output {
+ u16 status;
+ u8 function_specific_err;
+ u8 vendor_specific_err;
+ u32 state;
+};
+
+/**
+ * dsm_evaluate() - send DSM PCIe SSD Status LED command
+ * @pdev: PCI device
+ * @dsm_func: DSM LED Function
+ * @output: buffer to copy DSM Response
+ * @value_to_set: value for SET_STATE_DSM function
+ *
+ * To not bother caller with ACPI context, the returned _DSM Output Buffer is
+ * copied.
+ */
+static int dsm_evaluate(struct pci_dev *pdev, u64 dsm_func,
+ struct dsm_output *output, u32 value_to_set)
+{
+ acpi_handle handle = ACPI_HANDLE(&pdev->dev);
+ union acpi_object *out_obj, arg3[2];
+ union acpi_object *arg3_p = NULL;
+
+ if (dsm_func == SET_STATE_DSM) {
+ arg3[0].type = ACPI_TYPE_PACKAGE;
+ arg3[0].package.count = 1;
+ arg3[0].package.elements = &arg3[1];
+
+ arg3[1].type = ACPI_TYPE_BUFFER;
+ arg3[1].buffer.length = 4;
+ arg3[1].buffer.pointer = (u8 *)&value_to_set;
+
+ arg3_p = arg3;
+ }
+
+ out_obj = acpi_evaluate_dsm_typed(handle, &dsm_guid, 0x1, dsm_func,
+ arg3_p, ACPI_TYPE_BUFFER);
+ if (!out_obj)
+ return -EIO;
+
+ if (out_obj->buffer.length < sizeof(struct dsm_output)) {
+ ACPI_FREE(out_obj);
+ return -EIO;
+ }
+
+ memcpy(output, out_obj->buffer.pointer, sizeof(struct dsm_output));
+
+ ACPI_FREE(out_obj);
+ return 0;
+}
+
+static int dsm_get(struct pci_dev *pdev, u64 dsm_func, u32 *buf)
+{
+ struct dsm_output output;
+ int ret = dsm_evaluate(pdev, dsm_func, &output, 0);
+
+ if (ret)
+ return ret;
+
+ if (output.status != 0)
+ return -EIO;
+
+ *buf = output.state;
+ return 0;
+}
+
+static int dsm_get_active_indications(struct npem *npem, u32 *buf)
+{
+ int ret = dsm_get(npem->dev, GET_STATE_DSM, buf);
+
+ /* Filter out not supported indications in response */
+ *buf &= npem->supported_indications;
+ return ret;
+}
+
+static int dsm_set_active_indications(struct npem *npem, u32 value)
+{
+ struct dsm_output output;
+ int ret = dsm_evaluate(npem->dev, SET_STATE_DSM, &output, value);
+
+ if (ret)
+ return ret;
+
+ switch (output.status) {
+ case 4:
+ /*
+ * Not all bits are set. If this bit is set, the platform
+ * disregarded some or all of the request state changes. OSPM
+ * should check the resulting PCIe SSD Status LED States to see
+ * what, if anything, has changed.
+ *
+ * PCI Firmware Specification, r3.3 Table 4-19.
+ */
+ if (output.function_specific_err != 1)
+ return -EIO;
+ fallthrough;
+ case 0:
+ break;
+ default:
+ return -EIO;
+ }
+
+ npem->active_indications = output.state;
+
+ return 0;
+}
+
+static const struct npem_ops dsm_ops = {
+ .get_active_indications = dsm_get_active_indications,
+ .set_active_indications = dsm_set_active_indications,
+ .name = "_DSM PCIe SSD Status LED Management",
+ .inds = dsm_indications,
+};
+
+static int npem_initialize_active_indications(struct npem *npem)
+{
+ int ret;
+
+ lockdep_assert_held(&npem->lock);
+
+ if (npem->active_inds_initialized)
+ return 0;
+
+ ret = npem->ops->get_active_indications(npem,
+ &npem->active_indications);
+ if (ret)
+ return ret;
+
+ npem->active_inds_initialized = true;
+ return 0;
+}
+
+/*
+ * The status of each indicator is cached on first brightness_ get/set time
+ * and updated at write time. brightness_get() is only responsible for
+ * reflecting the last written/cached value.
+ */
+static enum led_brightness brightness_get(struct led_classdev *led)
+{
+ struct npem_led *nled = container_of(led, struct npem_led, led);
+ struct npem *npem = nled->npem;
+ int ret, val = 0;
+
+ ret = mutex_lock_interruptible(&npem->lock);
+ if (ret)
+ return ret;
+
+ ret = npem_initialize_active_indications(npem);
+ if (ret)
+ goto out;
+
+ if (npem->active_indications & nled->indication->bit)
+ val = 1;
+
+out:
+ mutex_unlock(&npem->lock);
+ return val;
+}
+
+static int brightness_set(struct led_classdev *led,
+ enum led_brightness brightness)
+{
+ struct npem_led *nled = container_of(led, struct npem_led, led);
+ struct npem *npem = nled->npem;
+ u32 indications;
+ int ret;
+
+ ret = mutex_lock_interruptible(&npem->lock);
+ if (ret)
+ return ret;
+
+ ret = npem_initialize_active_indications(npem);
+ if (ret)
+ goto out;
+
+ if (brightness == 0)
+ indications = npem->active_indications & ~(nled->indication->bit);
+ else
+ indications = npem->active_indications | nled->indication->bit;
+
+ ret = npem->ops->set_active_indications(npem, indications);
+
+out:
+ mutex_unlock(&npem->lock);
+ return ret;
+}
+
+static void npem_free(struct npem *npem)
+{
+ struct npem_led *nled;
+ int cnt;
+
+ if (!npem)
+ return;
+
+ for (cnt = 0; cnt < npem->led_cnt; cnt++) {
+ nled = &npem->leds[cnt];
+
+ if (nled->name[0])
+ led_classdev_unregister(&nled->led);
+ }
+
+ mutex_destroy(&npem->lock);
+ kfree(npem);
+}
+
+static int pci_npem_set_led_classdev(struct npem *npem, struct npem_led *nled)
+{
+ struct led_classdev *led = &nled->led;
+ struct led_init_data init_data = {};
+ char *name = nled->name;
+ int ret;
+
+ init_data.devicename = pci_name(npem->dev);
+ init_data.default_label = nled->indication->name;
+
+ ret = led_compose_name(&npem->dev->dev, &init_data, name);
+ if (ret)
+ return ret;
+
+ led->name = name;
+ led->brightness_set_blocking = brightness_set;
+ led->brightness_get = brightness_get;
+ led->max_brightness = 1;
+ led->default_trigger = "none";
+ led->flags = 0;
+
+ ret = led_classdev_register(&npem->dev->dev, led);
+ if (ret)
+ /* Clear the name to indicate that it is not registered. */
+ name[0] = 0;
+ return ret;
+}
+
+static int pci_npem_init(struct pci_dev *dev, const struct npem_ops *ops,
+ int pos, u32 caps)
+{
+ u32 supported = reg_to_indications(caps, ops->inds);
+ int supported_cnt = hweight32(supported);
+ const struct indication *indication;
+ struct npem_led *nled;
+ struct npem *npem;
+ int led_idx = 0;
+ int ret;
+
+ npem = kzalloc(struct_size(npem, leds, supported_cnt), GFP_KERNEL);
+ if (!npem)
+ return -ENOMEM;
+
+ npem->supported_indications = supported;
+ npem->led_cnt = supported_cnt;
+ npem->pos = pos;
+ npem->dev = dev;
+ npem->ops = ops;
+
+ mutex_init(&npem->lock);
+
+ for_each_indication(indication, npem_indications) {
+ if (!(npem->supported_indications & indication->bit))
+ continue;
+
+ nled = &npem->leds[led_idx++];
+ nled->indication = indication;
+ nled->npem = npem;
+
+ ret = pci_npem_set_led_classdev(npem, nled);
+ if (ret) {
+ npem_free(npem);
+ return ret;
+ }
+ }
+
+ dev->npem = npem;
+ return 0;
+}
+
+void pci_npem_remove(struct pci_dev *dev)
+{
+ npem_free(dev->npem);
+}
+
+void pci_npem_create(struct pci_dev *dev)
+{
+ const struct npem_ops *ops = &npem_ops;
+ int pos = 0, ret;
+ u32 cap;
+
+ if (npem_has_dsm(dev)) {
+ /*
+ * OS should use the DSM for LED control if it is available
+ * PCI Firmware Spec r3.3 sec 4.7.
+ */
+ ret = dsm_get(dev, GET_SUPPORTED_STATES_DSM, &cap);
+ if (ret)
+ return;
+
+ ops = &dsm_ops;
+ } else {
+ pos = pci_find_ext_capability(dev, PCI_EXT_CAP_ID_NPEM);
+ if (pos == 0)
+ return;
+
+ if (pci_read_config_dword(dev, pos + PCI_NPEM_CAP, &cap) != 0 ||
+ (cap & PCI_NPEM_CAP_CAPABLE) == 0)
+ return;
+ }
+
+ pci_info(dev, "Configuring %s\n", ops->name);
+
+ ret = pci_npem_init(dev, ops, pos, cap);
+ if (ret)
+ pci_err(dev, "Failed to register %s, err: %d\n", ops->name,
+ ret);
+}
diff --git a/drivers/pci/pci-bridge-emul.c b/drivers/pci/pci-bridge-emul.c
index 9334b2dd4764..6658c1edd464 100644
--- a/drivers/pci/pci-bridge-emul.c
+++ b/drivers/pci/pci-bridge-emul.c
@@ -257,8 +257,8 @@ struct pci_bridge_reg_behavior pcie_cap_regs_behavior[PCI_CAP_PCIE_SIZEOF / 4] =
*/
.rw = (PCI_EXP_RTCTL_SECEE | PCI_EXP_RTCTL_SENFEE |
PCI_EXP_RTCTL_SEFEE | PCI_EXP_RTCTL_PMEIE |
- PCI_EXP_RTCTL_CRSSVE),
- .ro = PCI_EXP_RTCAP_CRSVIS << 16,
+ PCI_EXP_RTCTL_RRS_SVE),
+ .ro = PCI_EXP_RTCAP_RRS_SV << 16,
},
[PCI_EXP_RTSTA / 4] = {
diff --git a/drivers/pci/pci-driver.c b/drivers/pci/pci-driver.c
index f412ef73a6e4..35270172c833 100644
--- a/drivers/pci/pci-driver.c
+++ b/drivers/pci/pci-driver.c
@@ -1670,7 +1670,7 @@ static void pci_dma_cleanup(struct device *dev)
iommu_device_unuse_default_domain(dev);
}
-struct bus_type pci_bus_type = {
+const struct bus_type pci_bus_type = {
.name = "pci",
.match = pci_bus_match,
.uevent = pci_uevent,
diff --git a/drivers/pci/pci-sysfs.c b/drivers/pci/pci-sysfs.c
index 40cfa716392f..5d0f4db1cab7 100644
--- a/drivers/pci/pci-sysfs.c
+++ b/drivers/pci/pci-sysfs.c
@@ -31,6 +31,10 @@
#include <linux/aperture.h>
#include "pci.h"
+#ifndef ARCH_PCI_DEV_GROUPS
+#define ARCH_PCI_DEV_GROUPS
+#endif
+
static int sysfs_initialized; /* = 0 */
/* show configuration fields */
@@ -1624,6 +1628,7 @@ const struct attribute_group *pci_dev_groups[] = {
&pci_dev_acpi_attr_group,
#endif
&pci_dev_resource_resize_group,
+ ARCH_PCI_DEV_GROUPS
NULL,
};
diff --git a/drivers/pci/pci.c b/drivers/pci/pci.c
index ffaaca0978cb..7d85c04fbba2 100644
--- a/drivers/pci/pci.c
+++ b/drivers/pci/pci.c
@@ -1283,7 +1283,9 @@ static int pci_dev_wait(struct pci_dev *dev, char *reset_type, int timeout)
{
int delay = 1;
bool retrain = false;
- struct pci_dev *bridge;
+ struct pci_dev *root, *bridge;
+
+ root = pcie_find_root_port(dev);
if (pci_is_pcie(dev)) {
bridge = pci_upstream_bridge(dev);
@@ -1292,16 +1294,23 @@ static int pci_dev_wait(struct pci_dev *dev, char *reset_type, int timeout)
}
/*
- * After reset, the device should not silently discard config
- * requests, but it may still indicate that it needs more time by
- * responding to them with CRS completions. The Root Port will
- * generally synthesize ~0 (PCI_ERROR_RESPONSE) data to complete
- * the read (except when CRS SV is enabled and the read was for the
- * Vendor ID; in that case it synthesizes 0x0001 data).
+ * The caller has already waited long enough after a reset that the
+ * device should respond to config requests, but it may respond
+ * with Request Retry Status (RRS) if it needs more time to
+ * initialize.
*
- * Wait for the device to return a non-CRS completion. Read the
- * Command register instead of Vendor ID so we don't have to
- * contend with the CRS SV value.
+ * If the device is below a Root Port with Configuration RRS
+ * Software Visibility enabled, reading the Vendor ID returns a
+ * special data value if the device responded with RRS. Read the
+ * Vendor ID until we get non-RRS status.
+ *
+ * If there's no Root Port or Configuration RRS Software Visibility
+ * is not enabled, the device may still respond with RRS, but
+ * hardware may retry the config request. If no retries receive
+ * Successful Completion, hardware generally synthesizes ~0
+ * (PCI_ERROR_RESPONSE) data to complete the read. Reading Vendor
+ * ID for VFs and non-existent devices also returns ~0, so read the
+ * Command register until it returns something other than ~0.
*/
for (;;) {
u32 id;
@@ -1311,9 +1320,15 @@ static int pci_dev_wait(struct pci_dev *dev, char *reset_type, int timeout)
return -ENOTTY;
}
- pci_read_config_dword(dev, PCI_COMMAND, &id);
- if (!PCI_POSSIBLE_ERROR(id))
- break;
+ if (root && root->config_rrs_sv) {
+ pci_read_config_dword(dev, PCI_VENDOR_ID, &id);
+ if (!pci_bus_rrs_vendor_id(id))
+ break;
+ } else {
+ pci_read_config_dword(dev, PCI_COMMAND, &id);
+ if (!PCI_POSSIBLE_ERROR(id))
+ break;
+ }
if (delay > timeout) {
pci_warn(dev, "not ready %dms after %s; giving up\n",
@@ -1324,7 +1339,7 @@ static int pci_dev_wait(struct pci_dev *dev, char *reset_type, int timeout)
if (delay > PCI_RESET_WAIT) {
if (retrain) {
retrain = false;
- if (pcie_failed_link_retrain(bridge)) {
+ if (pcie_failed_link_retrain(bridge) == 0) {
delay = 1;
continue;
}
@@ -4718,7 +4733,15 @@ int pcie_retrain_link(struct pci_dev *pdev, bool use_lt)
pcie_capability_clear_word(pdev, PCI_EXP_LNKCTL, PCI_EXP_LNKCTL_RL);
}
- return pcie_wait_for_link_status(pdev, use_lt, !use_lt);
+ rc = pcie_wait_for_link_status(pdev, use_lt, !use_lt);
+
+ /*
+ * Clear LBMS after a manual retrain so that the bit can be used
+ * to track link speed or width changes made by hardware itself
+ * in attempt to correct unreliable link operation.
+ */
+ pcie_capability_write_word(pdev, PCI_EXP_LNKSTA, PCI_EXP_LNKSTA_LBMS);
+ return rc;
}
/**
@@ -5672,8 +5695,10 @@ static void pci_bus_restore_locked(struct pci_bus *bus)
list_for_each_entry(dev, &bus->devices, bus_list) {
pci_dev_restore(dev);
- if (dev->subordinate)
+ if (dev->subordinate) {
+ pci_bridge_wait_for_secondary_bus(dev, "bus reset");
pci_bus_restore_locked(dev->subordinate);
+ }
}
}
@@ -5707,8 +5732,10 @@ static void pci_slot_restore_locked(struct pci_slot *slot)
if (!dev->slot || dev->slot != slot)
continue;
pci_dev_restore(dev);
- if (dev->subordinate)
+ if (dev->subordinate) {
+ pci_bridge_wait_for_secondary_bus(dev, "slot reset");
pci_bus_restore_locked(dev->subordinate);
+ }
}
}
@@ -6802,16 +6829,16 @@ static int of_pci_bus_find_domain_nr(struct device *parent)
return ida_alloc(&pci_domain_nr_dynamic_ida, GFP_KERNEL);
}
-static void of_pci_bus_release_domain_nr(struct pci_bus *bus, struct device *parent)
+static void of_pci_bus_release_domain_nr(struct device *parent, int domain_nr)
{
- if (bus->domain_nr < 0)
+ if (domain_nr < 0)
return;
/* Release domain from IDA where it was allocated. */
- if (of_get_pci_domain_nr(parent->of_node) == bus->domain_nr)
- ida_free(&pci_domain_nr_static_ida, bus->domain_nr);
+ if (of_get_pci_domain_nr(parent->of_node) == domain_nr)
+ ida_free(&pci_domain_nr_static_ida, domain_nr);
else
- ida_free(&pci_domain_nr_dynamic_ida, bus->domain_nr);
+ ida_free(&pci_domain_nr_dynamic_ida, domain_nr);
}
int pci_bus_find_domain_nr(struct pci_bus *bus, struct device *parent)
@@ -6820,11 +6847,11 @@ int pci_bus_find_domain_nr(struct pci_bus *bus, struct device *parent)
acpi_pci_bus_find_domain_nr(bus);
}
-void pci_bus_release_domain_nr(struct pci_bus *bus, struct device *parent)
+void pci_bus_release_domain_nr(struct device *parent, int domain_nr)
{
if (!acpi_disabled)
return;
- of_pci_bus_release_domain_nr(bus, parent);
+ of_pci_bus_release_domain_nr(parent, domain_nr);
}
#endif
diff --git a/drivers/pci/pci.h b/drivers/pci/pci.h
index 79c8398f3938..14d00ce45bfa 100644
--- a/drivers/pci/pci.h
+++ b/drivers/pci/pci.h
@@ -13,10 +13,25 @@
#define PCIE_LINK_RETRAIN_TIMEOUT_MS 1000
-/* Power stable to PERST# inactive from PCIe card Electromechanical Spec */
+/*
+ * Power stable to PERST# inactive.
+ *
+ * See the "Power Sequencing and Reset Signal Timings" table of the PCI Express
+ * Card Electromechanical Specification, Revision 5.1, Section 2.9.2, Symbol
+ * "T_PVPERL".
+ */
#define PCIE_T_PVPERL_MS 100
/*
+ * REFCLK stable before PERST# inactive.
+ *
+ * See the "Power Sequencing and Reset Signal Timings" table of the PCI Express
+ * Card Electromechanical Specification, Revision 5.1, Section 2.9.2, Symbol
+ * "T_PERST-CLK".
+ */
+#define PCIE_T_PERST_CLK_US 100
+
+/*
* End of conventional reset (PERST# de-asserted) to first configuration
* request (device able to respond with a "Request Retry Status" completion),
* from PCIe r6.0, sec 6.6.1.
@@ -124,7 +139,6 @@ void pcie_clear_device_status(struct pci_dev *dev);
void pcie_clear_root_pme_status(struct pci_dev *dev);
bool pci_check_pme_status(struct pci_dev *dev);
void pci_pme_wakeup_bus(struct pci_bus *bus);
-int __pci_pme_wakeup(struct pci_dev *dev, void *ign);
void pci_pme_restore(struct pci_dev *dev);
bool pci_dev_need_resume(struct pci_dev *dev);
void pci_dev_adjust_pme(struct pci_dev *dev);
@@ -139,6 +153,11 @@ bool pci_bridge_d3_possible(struct pci_dev *dev);
void pci_bridge_d3_update(struct pci_dev *dev);
int pci_bridge_wait_for_secondary_bus(struct pci_dev *dev, char *reset_type);
+static inline bool pci_bus_rrs_vendor_id(u32 l)
+{
+ return (l & 0xffff) == PCI_VENDOR_ID_PCI_SIG;
+}
+
static inline void pci_wakeup_event(struct pci_dev *dev)
{
/* Wait 100 ms before the system can be put into a sleep state. */
@@ -169,7 +188,6 @@ static inline bool pcie_downstream_port(const struct pci_dev *dev)
}
void pci_vpd_init(struct pci_dev *dev);
-void pci_vpd_release(struct pci_dev *dev);
extern const struct attribute_group pci_dev_vpd_attr_group;
/* PCI Virtual Channel */
@@ -290,10 +308,10 @@ void pci_put_host_bridge_device(struct device *dev);
int pci_configure_extended_tags(struct pci_dev *dev, void *ign);
bool pci_bus_read_dev_vendor_id(struct pci_bus *bus, int devfn, u32 *pl,
- int crs_timeout);
+ int rrs_timeout);
bool pci_bus_generic_read_dev_vendor_id(struct pci_bus *bus, int devfn, u32 *pl,
- int crs_timeout);
-int pci_idt_bus_quirk(struct pci_bus *bus, int devfn, u32 *pl, int crs_timeout);
+ int rrs_timeout);
+int pci_idt_bus_quirk(struct pci_bus *bus, int devfn, u32 *pl, int rrs_timeout);
int pci_setup_device(struct pci_dev *dev);
int __pci_read_base(struct pci_dev *dev, enum pci_bar_type type,
@@ -398,6 +416,14 @@ static inline void pci_doe_destroy(struct pci_dev *pdev) { }
static inline void pci_doe_disconnected(struct pci_dev *pdev) { }
#endif
+#ifdef CONFIG_PCI_NPEM
+void pci_npem_create(struct pci_dev *dev);
+void pci_npem_remove(struct pci_dev *dev);
+#else
+static inline void pci_npem_create(struct pci_dev *dev) { }
+static inline void pci_npem_remove(struct pci_dev *dev) { }
+#endif
+
/**
* pci_dev_set_io_state - Set the new error state if possible.
*
@@ -606,7 +632,7 @@ void pci_acs_init(struct pci_dev *dev);
int pci_dev_specific_acs_enabled(struct pci_dev *dev, u16 acs_flags);
int pci_dev_specific_enable_acs(struct pci_dev *dev);
int pci_dev_specific_disable_acs_redir(struct pci_dev *dev);
-bool pcie_failed_link_retrain(struct pci_dev *dev);
+int pcie_failed_link_retrain(struct pci_dev *dev);
#else
static inline int pci_dev_specific_acs_enabled(struct pci_dev *dev,
u16 acs_flags)
@@ -621,9 +647,9 @@ static inline int pci_dev_specific_disable_acs_redir(struct pci_dev *dev)
{
return -ENOTTY;
}
-static inline bool pcie_failed_link_retrain(struct pci_dev *dev)
+static inline int pcie_failed_link_retrain(struct pci_dev *dev)
{
- return false;
+ return -ENOTTY;
}
#endif
@@ -887,8 +913,6 @@ static inline pci_power_t mid_pci_get_power_state(struct pci_dev *pdev)
#endif
int pcim_intx(struct pci_dev *dev, int enable);
-
-int pcim_request_region(struct pci_dev *pdev, int bar, const char *name);
int pcim_request_region_exclusive(struct pci_dev *pdev, int bar,
const char *name);
void pcim_release_region(struct pci_dev *pdev, int bar);
diff --git a/drivers/pci/pcie/aer_inject.c b/drivers/pci/pcie/aer_inject.c
index f81b2303bf6a..91acc7b17f68 100644
--- a/drivers/pci/pcie/aer_inject.c
+++ b/drivers/pci/pcie/aer_inject.c
@@ -430,7 +430,7 @@ static int aer_inject(struct aer_error_inj *einj)
else
rperr->root_status |= PCI_ERR_ROOT_COR_RCV;
rperr->source_id &= 0xffff0000;
- rperr->source_id |= (einj->bus << 8) | devfn;
+ rperr->source_id |= PCI_DEVID(einj->bus, devfn);
}
if (einj->uncor_status) {
if (rperr->root_status & PCI_ERR_ROOT_UNCOR_RCV)
@@ -443,7 +443,7 @@ static int aer_inject(struct aer_error_inj *einj)
rperr->root_status |= PCI_ERR_ROOT_NONFATAL_RCV;
rperr->root_status |= PCI_ERR_ROOT_UNCOR_RCV;
rperr->source_id &= 0x0000ffff;
- rperr->source_id |= ((einj->bus << 8) | devfn) << 16;
+ rperr->source_id |= PCI_DEVID(einj->bus, devfn) << 16;
}
spin_unlock_irqrestore(&inject_lock, flags);
diff --git a/drivers/pci/probe.c b/drivers/pci/probe.c
index b14b9876c030..4f68414c3086 100644
--- a/drivers/pci/probe.c
+++ b/drivers/pci/probe.c
@@ -1061,7 +1061,7 @@ unregister:
free:
#ifdef CONFIG_PCI_DOMAINS_GENERIC
- pci_bus_release_domain_nr(bus, parent);
+ pci_bus_release_domain_nr(parent, bus->domain_nr);
#endif
kfree(bus);
return err;
@@ -1203,15 +1203,17 @@ struct pci_bus *pci_add_new_bus(struct pci_bus *parent, struct pci_dev *dev,
}
EXPORT_SYMBOL(pci_add_new_bus);
-static void pci_enable_crs(struct pci_dev *pdev)
+static void pci_enable_rrs_sv(struct pci_dev *pdev)
{
u16 root_cap = 0;
- /* Enable CRS Software Visibility if supported */
+ /* Enable Configuration RRS Software Visibility if supported */
pcie_capability_read_word(pdev, PCI_EXP_RTCAP, &root_cap);
- if (root_cap & PCI_EXP_RTCAP_CRSVIS)
+ if (root_cap & PCI_EXP_RTCAP_RRS_SV) {
pcie_capability_set_word(pdev, PCI_EXP_RTCTL,
- PCI_EXP_RTCTL_CRSSVE);
+ PCI_EXP_RTCTL_RRS_SVE);
+ pdev->config_rrs_sv = 1;
+ }
}
static unsigned int pci_scan_child_bus_extend(struct pci_bus *bus,
@@ -1326,7 +1328,7 @@ static int pci_scan_bridge_extend(struct pci_bus *bus, struct pci_dev *dev,
pci_write_config_word(dev, PCI_BRIDGE_CONTROL,
bctl & ~PCI_BRIDGE_CTL_MASTER_ABORT);
- pci_enable_crs(dev);
+ pci_enable_rrs_sv(dev);
if ((secondary || subordinate) && !pcibios_assign_all_busses() &&
!is_cardbus && !broken) {
@@ -2343,28 +2345,23 @@ struct pci_dev *pci_alloc_dev(struct pci_bus *bus)
}
EXPORT_SYMBOL(pci_alloc_dev);
-static bool pci_bus_crs_vendor_id(u32 l)
-{
- return (l & 0xffff) == PCI_VENDOR_ID_PCI_SIG;
-}
-
-static bool pci_bus_wait_crs(struct pci_bus *bus, int devfn, u32 *l,
+static bool pci_bus_wait_rrs(struct pci_bus *bus, int devfn, u32 *l,
int timeout)
{
int delay = 1;
- if (!pci_bus_crs_vendor_id(*l))
- return true; /* not a CRS completion */
+ if (!pci_bus_rrs_vendor_id(*l))
+ return true; /* not a Configuration RRS completion */
if (!timeout)
- return false; /* CRS, but caller doesn't want to wait */
+ return false; /* RRS, but caller doesn't want to wait */
/*
* We got the reserved Vendor ID that indicates a completion with
- * Configuration Request Retry Status (CRS). Retry until we get a
+ * Configuration Request Retry Status (RRS). Retry until we get a
* valid Vendor ID or we time out.
*/
- while (pci_bus_crs_vendor_id(*l)) {
+ while (pci_bus_rrs_vendor_id(*l)) {
if (delay > timeout) {
pr_warn("pci %04x:%02x:%02x.%d: not ready after %dms; giving up\n",
pci_domain_nr(bus), bus->number,
@@ -2403,8 +2400,8 @@ bool pci_bus_generic_read_dev_vendor_id(struct pci_bus *bus, int devfn, u32 *l,
*l == 0x0000ffff || *l == 0xffff0000)
return false;
- if (pci_bus_crs_vendor_id(*l))
- return pci_bus_wait_crs(bus, devfn, l, timeout);
+ if (pci_bus_rrs_vendor_id(*l))
+ return pci_bus_wait_rrs(bus, devfn, l, timeout);
return true;
}
@@ -2593,6 +2590,8 @@ void pci_device_add(struct pci_dev *dev, struct pci_bus *bus)
dev->match_driver = false;
ret = device_add(&dev->dev);
WARN_ON(ret < 0);
+
+ pci_npem_create(dev);
}
struct pci_dev *pci_scan_single_device(struct pci_bus *bus, int devfn)
diff --git a/drivers/pci/pwrctl/pci-pwrctl-pwrseq.c b/drivers/pci/pwrctl/pci-pwrctl-pwrseq.c
index f07758c9edad..a23a4312574b 100644
--- a/drivers/pci/pwrctl/pci-pwrctl-pwrseq.c
+++ b/drivers/pci/pwrctl/pci-pwrctl-pwrseq.c
@@ -67,6 +67,11 @@ static const struct of_device_id pci_pwrctl_pwrseq_of_match[] = {
.data = "wlan",
},
{
+ /* ATH11K in WCN6855 package. */
+ .compatible = "pci17cb,1103",
+ .data = "wlan",
+ },
+ {
/* ATH12K in WCN7850 package. */
.compatible = "pci17cb,1107",
.data = "wlan",
diff --git a/drivers/pci/quirks.c b/drivers/pci/quirks.c
index a2ce4e08edf5..dccb60c1d9cc 100644
--- a/drivers/pci/quirks.c
+++ b/drivers/pci/quirks.c
@@ -66,7 +66,7 @@
* apply this erratum workaround to any downstream ports as long as they
* support Link Active reporting and have the Link Control 2 register.
* Restrict the speed to 2.5GT/s then with the Target Link Speed field,
- * request a retrain and wait 200ms for the data link to go up.
+ * request a retrain and check the result.
*
* If this turns out successful and we know by the Vendor:Device ID it is
* safe to do so, then lift the restriction, letting the devices negotiate
@@ -74,33 +74,45 @@
* firmware may have already arranged and lift it with ports that already
* report their data link being up.
*
- * Return TRUE if the link has been successfully retrained, otherwise FALSE.
+ * Otherwise revert the speed to the original setting and request a retrain
+ * again to remove any residual state, ignoring the result as it's supposed
+ * to fail anyway.
+ *
+ * Return 0 if the link has been successfully retrained. Return an error
+ * if retraining was not needed or we attempted a retrain and it failed.
*/
-bool pcie_failed_link_retrain(struct pci_dev *dev)
+int pcie_failed_link_retrain(struct pci_dev *dev)
{
static const struct pci_device_id ids[] = {
{ PCI_VDEVICE(ASMEDIA, 0x2824) }, /* ASMedia ASM2824 */
{}
};
u16 lnksta, lnkctl2;
+ int ret = -ENOTTY;
if (!pci_is_pcie(dev) || !pcie_downstream_port(dev) ||
!pcie_cap_has_lnkctl2(dev) || !dev->link_active_reporting)
- return false;
+ return ret;
pcie_capability_read_word(dev, PCI_EXP_LNKCTL2, &lnkctl2);
pcie_capability_read_word(dev, PCI_EXP_LNKSTA, &lnksta);
if ((lnksta & (PCI_EXP_LNKSTA_LBMS | PCI_EXP_LNKSTA_DLLLA)) ==
PCI_EXP_LNKSTA_LBMS) {
+ u16 oldlnkctl2 = lnkctl2;
+
pci_info(dev, "broken device, retraining non-functional downstream link at 2.5GT/s\n");
lnkctl2 &= ~PCI_EXP_LNKCTL2_TLS;
lnkctl2 |= PCI_EXP_LNKCTL2_TLS_2_5GT;
pcie_capability_write_word(dev, PCI_EXP_LNKCTL2, lnkctl2);
- if (pcie_retrain_link(dev, false)) {
+ ret = pcie_retrain_link(dev, false);
+ if (ret) {
pci_info(dev, "retraining failed\n");
- return false;
+ pcie_capability_write_word(dev, PCI_EXP_LNKCTL2,
+ oldlnkctl2);
+ pcie_retrain_link(dev, true);
+ return ret;
}
pcie_capability_read_word(dev, PCI_EXP_LNKSTA, &lnksta);
@@ -117,13 +129,14 @@ bool pcie_failed_link_retrain(struct pci_dev *dev)
lnkctl2 |= lnkcap & PCI_EXP_LNKCAP_SLS;
pcie_capability_write_word(dev, PCI_EXP_LNKCTL2, lnkctl2);
- if (pcie_retrain_link(dev, false)) {
+ ret = pcie_retrain_link(dev, false);
+ if (ret) {
pci_info(dev, "retraining failed\n");
- return false;
+ return ret;
}
}
- return true;
+ return ret;
}
static ktime_t fixup_debug_start(struct pci_dev *dev,
@@ -3608,6 +3621,8 @@ DECLARE_PCI_FIXUP_FINAL(0x1814, 0x0601, /* Ralink RT2800 802.11n PCI */
quirk_broken_intx_masking);
DECLARE_PCI_FIXUP_FINAL(0x1b7c, 0x0004, /* Ceton InfiniTV4 */
quirk_broken_intx_masking);
+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_CREATIVE, PCI_DEVICE_ID_CREATIVE_20K2,
+ quirk_broken_intx_masking);
/*
* Realtek RTL8169 PCI Gigabit Ethernet Controller (rev 10)
@@ -4246,6 +4261,10 @@ static void quirk_dma_func0_alias(struct pci_dev *dev)
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_RICOH, 0xe832, quirk_dma_func0_alias);
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_RICOH, 0xe476, quirk_dma_func0_alias);
+/* Some Glenfly chips use function 0 as the PCIe Requester ID for DMA */
+DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_GLENFLY, 0x3d40, quirk_dma_func0_alias);
+DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_GLENFLY, 0x3d41, quirk_dma_func0_alias);
+
static void quirk_dma_func1_alias(struct pci_dev *dev)
{
if (PCI_FUNC(dev->devfn) != 1)
@@ -5070,6 +5089,8 @@ static const struct pci_dev_acs_enabled {
/* QCOM QDF2xxx root ports */
{ PCI_VENDOR_ID_QCOM, 0x0400, pci_quirk_qcom_rp_acs },
{ PCI_VENDOR_ID_QCOM, 0x0401, pci_quirk_qcom_rp_acs },
+ /* QCOM SA8775P root port */
+ { PCI_VENDOR_ID_QCOM, 0x0115, pci_quirk_qcom_rp_acs },
/* HXT SD4800 root ports. The ACS design is same as QCOM QDF2xxx */
{ PCI_VENDOR_ID_HXT, 0x0401, pci_quirk_qcom_rp_acs },
/* Intel PCH root ports */
diff --git a/drivers/pci/remove.c b/drivers/pci/remove.c
index 4770cb87e3f0..e4ce1145aa3e 100644
--- a/drivers/pci/remove.c
+++ b/drivers/pci/remove.c
@@ -50,6 +50,8 @@ static void pci_destroy_dev(struct pci_dev *dev)
if (!dev->dev.kobj.parent)
return;
+ pci_npem_remove(dev);
+
device_del(&dev->dev);
down_write(&pci_bus_sem);
@@ -179,7 +181,7 @@ void pci_remove_root_bus(struct pci_bus *bus)
#ifdef CONFIG_PCI_DOMAINS_GENERIC
/* Release domain_nr if it was dynamically allocated */
if (host_bridge->domain_nr == PCI_DOMAIN_NR_NOT_SET)
- pci_bus_release_domain_nr(bus, host_bridge->dev.parent);
+ pci_bus_release_domain_nr(host_bridge->dev.parent, bus->domain_nr);
#endif
pci_remove_bus(bus);
diff --git a/drivers/perf/riscv_pmu.c b/drivers/perf/riscv_pmu.c
index 0a02e85a8951..7644147d50b4 100644
--- a/drivers/perf/riscv_pmu.c
+++ b/drivers/perf/riscv_pmu.c
@@ -39,7 +39,6 @@ void arch_perf_update_userpage(struct perf_event *event,
userpg->cap_user_time_short = 0;
userpg->cap_user_rdpmc = riscv_perf_user_access(event);
-#ifdef CONFIG_RISCV_PMU
/*
* The counters are 64-bit but the priv spec doesn't mandate all the
* bits to be implemented: that's why, counter width can vary based on
@@ -47,7 +46,6 @@ void arch_perf_update_userpage(struct perf_event *event,
*/
if (userpg->cap_user_rdpmc)
userpg->pmc_width = to_riscv_pmu(event->pmu)->ctr_get_width(event->hw.idx) + 1;
-#endif
do {
rd = sched_clock_read_begin(&seq);
diff --git a/drivers/perf/riscv_pmu_sbi.c b/drivers/perf/riscv_pmu_sbi.c
index 25b1b699b3e2..5c39fbd8ed04 100644
--- a/drivers/perf/riscv_pmu_sbi.c
+++ b/drivers/perf/riscv_pmu_sbi.c
@@ -60,7 +60,7 @@ asm volatile(ALTERNATIVE( \
#define PERF_EVENT_FLAG_LEGACY BIT(SYSCTL_LEGACY)
PMU_FORMAT_ATTR(event, "config:0-47");
-PMU_FORMAT_ATTR(firmware, "config:63");
+PMU_FORMAT_ATTR(firmware, "config:62-63");
static bool sbi_v2_available;
static DEFINE_STATIC_KEY_FALSE(sbi_pmu_snapshot_available);
@@ -507,7 +507,6 @@ static int pmu_sbi_event_map(struct perf_event *event, u64 *econfig)
{
u32 type = event->attr.type;
u64 config = event->attr.config;
- int bSoftware;
u64 raw_config_val;
int ret;
@@ -528,18 +527,32 @@ static int pmu_sbi_event_map(struct perf_event *event, u64 *econfig)
break;
case PERF_TYPE_RAW:
/*
- * As per SBI specification, the upper 16 bits must be unused for
- * a raw event. Use the MSB (63b) to distinguish between hardware
- * raw event and firmware events.
+ * As per SBI specification, the upper 16 bits must be unused
+ * for a raw event.
+ * Bits 63:62 are used to distinguish between raw events
+ * 00 - Hardware raw event
+ * 10 - SBI firmware events
+ * 11 - Risc-V platform specific firmware event
*/
- bSoftware = config >> 63;
raw_config_val = config & RISCV_PMU_RAW_EVENT_MASK;
- if (bSoftware) {
+ switch (config >> 62) {
+ case 0:
+ ret = RISCV_PMU_RAW_EVENT_IDX;
+ *econfig = raw_config_val;
+ break;
+ case 2:
ret = (raw_config_val & 0xFFFF) |
(SBI_PMU_EVENT_TYPE_FW << 16);
- } else {
- ret = RISCV_PMU_RAW_EVENT_IDX;
+ break;
+ case 3:
+ /*
+ * For Risc-V platform specific firmware events
+ * Event code - 0xFFFF
+ * Event data - raw event encoding
+ */
+ ret = SBI_PMU_EVENT_TYPE_FW << 16 | RISCV_PLAT_FW_EVENT;
*econfig = raw_config_val;
+ break;
}
break;
default:
diff --git a/drivers/phy/Kconfig b/drivers/phy/Kconfig
index dfab1c66b3e5..f73abff416be 100644
--- a/drivers/phy/Kconfig
+++ b/drivers/phy/Kconfig
@@ -95,6 +95,7 @@ source "drivers/phy/mediatek/Kconfig"
source "drivers/phy/microchip/Kconfig"
source "drivers/phy/motorola/Kconfig"
source "drivers/phy/mscc/Kconfig"
+source "drivers/phy/nuvoton/Kconfig"
source "drivers/phy/qualcomm/Kconfig"
source "drivers/phy/ralink/Kconfig"
source "drivers/phy/realtek/Kconfig"
diff --git a/drivers/phy/Makefile b/drivers/phy/Makefile
index 5fcbce5f9ab1..ebc399560da4 100644
--- a/drivers/phy/Makefile
+++ b/drivers/phy/Makefile
@@ -25,6 +25,7 @@ obj-y += allwinner/ \
microchip/ \
motorola/ \
mscc/ \
+ nuvoton/ \
qualcomm/ \
ralink/ \
realtek/ \
diff --git a/drivers/phy/broadcom/phy-bcm-cygnus-pcie.c b/drivers/phy/broadcom/phy-bcm-cygnus-pcie.c
index cc29b08e49eb..462c61a24ec5 100644
--- a/drivers/phy/broadcom/phy-bcm-cygnus-pcie.c
+++ b/drivers/phy/broadcom/phy-bcm-cygnus-pcie.c
@@ -113,11 +113,10 @@ static const struct phy_ops cygnus_pcie_phy_ops = {
static int cygnus_pcie_phy_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
- struct device_node *node = dev->of_node, *child;
+ struct device_node *node = dev->of_node;
struct cygnus_pcie_phy_core *core;
struct phy_provider *provider;
unsigned cnt = 0;
- int ret;
if (of_get_child_count(node) == 0) {
dev_err(dev, "PHY no child node\n");
@@ -136,35 +135,31 @@ static int cygnus_pcie_phy_probe(struct platform_device *pdev)
mutex_init(&core->lock);
- for_each_available_child_of_node(node, child) {
+ for_each_available_child_of_node_scoped(node, child) {
unsigned int id;
struct cygnus_pcie_phy *p;
if (of_property_read_u32(child, "reg", &id)) {
dev_err(dev, "missing reg property for %pOFn\n",
child);
- ret = -EINVAL;
- goto put_child;
+ return -EINVAL;
}
if (id >= MAX_NUM_PHYS) {
dev_err(dev, "invalid PHY id: %u\n", id);
- ret = -EINVAL;
- goto put_child;
+ return -EINVAL;
}
if (core->phys[id].phy) {
dev_err(dev, "duplicated PHY id: %u\n", id);
- ret = -EINVAL;
- goto put_child;
+ return -EINVAL;
}
p = &core->phys[id];
p->phy = devm_phy_create(dev, child, &cygnus_pcie_phy_ops);
if (IS_ERR(p->phy)) {
dev_err(dev, "failed to create PHY\n");
- ret = PTR_ERR(p->phy);
- goto put_child;
+ return PTR_ERR(p->phy);
}
p->core = core;
@@ -184,9 +179,6 @@ static int cygnus_pcie_phy_probe(struct platform_device *pdev)
dev_dbg(dev, "registered %u PCIe PHY(s)\n", cnt);
return 0;
-put_child:
- of_node_put(child);
- return ret;
}
static const struct of_device_id cygnus_pcie_phy_match_table[] = {
diff --git a/drivers/phy/broadcom/phy-brcm-sata.c b/drivers/phy/broadcom/phy-brcm-sata.c
index ed9e18791ec9..228100357054 100644
--- a/drivers/phy/broadcom/phy-brcm-sata.c
+++ b/drivers/phy/broadcom/phy-brcm-sata.c
@@ -751,11 +751,11 @@ static int brcm_sata_phy_probe(struct platform_device *pdev)
{
const char *rxaeq_mode;
struct device *dev = &pdev->dev;
- struct device_node *dn = dev->of_node, *child;
+ struct device_node *dn = dev->of_node;
const struct of_device_id *of_id;
struct brcm_sata_phy *priv;
struct phy_provider *provider;
- int ret, count = 0;
+ int count = 0;
if (of_get_child_count(dn) == 0)
return -ENODEV;
@@ -782,26 +782,23 @@ static int brcm_sata_phy_probe(struct platform_device *pdev)
return PTR_ERR(priv->ctrl_base);
}
- for_each_available_child_of_node(dn, child) {
+ for_each_available_child_of_node_scoped(dn, child) {
unsigned int id;
struct brcm_sata_port *port;
if (of_property_read_u32(child, "reg", &id)) {
dev_err(dev, "missing reg property in node %pOFn\n",
child);
- ret = -EINVAL;
- goto put_child;
+ return -EINVAL;
}
if (id >= MAX_PORTS) {
dev_err(dev, "invalid reg: %u\n", id);
- ret = -EINVAL;
- goto put_child;
+ return -EINVAL;
}
if (priv->phys[id].phy) {
dev_err(dev, "already registered port %u\n", id);
- ret = -EINVAL;
- goto put_child;
+ return -EINVAL;
}
port = &priv->phys[id];
@@ -822,8 +819,7 @@ static int brcm_sata_phy_probe(struct platform_device *pdev)
port->ssc_en = of_property_read_bool(child, "brcm,enable-ssc");
if (IS_ERR(port->phy)) {
dev_err(dev, "failed to create PHY\n");
- ret = PTR_ERR(port->phy);
- goto put_child;
+ return PTR_ERR(port->phy);
}
phy_set_drvdata(port->phy, port);
@@ -839,9 +835,6 @@ static int brcm_sata_phy_probe(struct platform_device *pdev)
dev_info(dev, "registered %d port(s)\n", count);
return 0;
-put_child:
- of_node_put(child);
- return ret;
}
static struct platform_driver brcm_sata_phy_driver = {
diff --git a/drivers/phy/cadence/phy-cadence-sierra.c b/drivers/phy/cadence/phy-cadence-sierra.c
index d4eb93ce8232..aeec6eb6be23 100644
--- a/drivers/phy/cadence/phy-cadence-sierra.c
+++ b/drivers/phy/cadence/phy-cadence-sierra.c
@@ -310,7 +310,7 @@ static const struct clk_parent_data pll_mux_parent_data[][SIERRA_NUM_CMN_PLLC_PA
},
};
-static u32 cdns_sierra_pll_mux_table[][SIERRA_NUM_CMN_PLLC_PARENTS] = {
+static const u32 cdns_sierra_pll_mux_table[][SIERRA_NUM_CMN_PLLC_PARENTS] = {
[CMN_PLLLC] = { 0, 1 },
[CMN_PLLLC1] = { 1, 0 },
};
@@ -362,14 +362,14 @@ struct cdns_sierra_data {
u32 id_value;
u8 block_offset_shift;
u8 reg_offset_shift;
- struct cdns_sierra_vals *pcs_cmn_vals[NUM_PHY_TYPE][NUM_PHY_TYPE]
- [NUM_SSC_MODE];
- struct cdns_sierra_vals *phy_pma_ln_vals[NUM_PHY_TYPE][NUM_PHY_TYPE]
- [NUM_SSC_MODE];
- struct cdns_sierra_vals *pma_cmn_vals[NUM_PHY_TYPE][NUM_PHY_TYPE]
- [NUM_SSC_MODE];
- struct cdns_sierra_vals *pma_ln_vals[NUM_PHY_TYPE][NUM_PHY_TYPE]
- [NUM_SSC_MODE];
+ const struct cdns_sierra_vals *pcs_cmn_vals[NUM_PHY_TYPE][NUM_PHY_TYPE]
+ [NUM_SSC_MODE];
+ const struct cdns_sierra_vals *phy_pma_ln_vals[NUM_PHY_TYPE][NUM_PHY_TYPE]
+ [NUM_SSC_MODE];
+ const struct cdns_sierra_vals *pma_cmn_vals[NUM_PHY_TYPE][NUM_PHY_TYPE]
+ [NUM_SSC_MODE];
+ const struct cdns_sierra_vals *pma_ln_vals[NUM_PHY_TYPE][NUM_PHY_TYPE]
+ [NUM_SSC_MODE];
};
struct cdns_regmap_cdb_context {
@@ -539,12 +539,12 @@ static int cdns_sierra_phy_init(struct phy *gphy)
struct cdns_sierra_inst *ins = phy_get_drvdata(gphy);
struct cdns_sierra_phy *phy = dev_get_drvdata(gphy->dev.parent);
const struct cdns_sierra_data *init_data = phy->init_data;
- struct cdns_sierra_vals *pma_cmn_vals, *pma_ln_vals;
+ const struct cdns_sierra_vals *pma_cmn_vals, *pma_ln_vals;
enum cdns_sierra_phy_type phy_type = ins->phy_type;
+ const struct cdns_sierra_vals *phy_pma_ln_vals;
enum cdns_sierra_ssc_mode ssc = ins->ssc_mode;
- struct cdns_sierra_vals *phy_pma_ln_vals;
+ const struct cdns_sierra_vals *pcs_cmn_vals;
const struct cdns_reg_pairs *reg_pairs;
- struct cdns_sierra_vals *pcs_cmn_vals;
struct regmap *regmap;
u32 num_regs;
int i, j;
@@ -1244,12 +1244,12 @@ static int cdns_sierra_phy_get_resets(struct cdns_sierra_phy *sp,
static int cdns_sierra_phy_configure_multilink(struct cdns_sierra_phy *sp)
{
+ const struct cdns_sierra_vals *pma_cmn_vals, *pma_ln_vals;
const struct cdns_sierra_data *init_data = sp->init_data;
- struct cdns_sierra_vals *pma_cmn_vals, *pma_ln_vals;
+ const struct cdns_sierra_vals *phy_pma_ln_vals;
+ const struct cdns_sierra_vals *pcs_cmn_vals;
enum cdns_sierra_phy_type phy_t1, phy_t2;
- struct cdns_sierra_vals *phy_pma_ln_vals;
const struct cdns_reg_pairs *reg_pairs;
- struct cdns_sierra_vals *pcs_cmn_vals;
int i, j, node, mlane, num_lanes, ret;
enum cdns_sierra_ssc_mode ssc;
struct regmap *regmap;
@@ -1366,7 +1366,7 @@ static int cdns_sierra_phy_probe(struct platform_device *pdev)
unsigned int id_value;
int ret, node = 0;
void __iomem *base;
- struct device_node *dn = dev->of_node, *child;
+ struct device_node *dn = dev->of_node;
if (of_get_child_count(dn) == 0)
return -ENODEV;
@@ -1438,7 +1438,7 @@ static int cdns_sierra_phy_probe(struct platform_device *pdev)
sp->autoconf = of_property_read_bool(dn, "cdns,autoconf");
- for_each_available_child_of_node(dn, child) {
+ for_each_available_child_of_node_scoped(dn, child) {
struct phy *gphy;
if (!(of_node_name_eq(child, "phy") ||
@@ -1452,7 +1452,6 @@ static int cdns_sierra_phy_probe(struct platform_device *pdev)
dev_err(dev, "failed to get reset %s\n",
child->full_name);
ret = PTR_ERR(sp->phys[node].lnk_rst);
- of_node_put(child);
goto put_control;
}
@@ -1461,7 +1460,6 @@ static int cdns_sierra_phy_probe(struct platform_device *pdev)
if (ret) {
dev_err(dev, "missing property in node %s\n",
child->name);
- of_node_put(child);
reset_control_put(sp->phys[node].lnk_rst);
goto put_control;
}
@@ -1475,7 +1473,6 @@ static int cdns_sierra_phy_probe(struct platform_device *pdev)
gphy = devm_phy_create(dev, child, &noop_ops);
if (IS_ERR(gphy)) {
ret = PTR_ERR(gphy);
- of_node_put(child);
reset_control_put(sp->phys[node].lnk_rst);
goto put_control;
}
@@ -1544,11 +1541,11 @@ static void cdns_sierra_phy_remove(struct platform_device *pdev)
}
/* SGMII PHY PMA lane configuration */
-static struct cdns_reg_pairs sgmii_phy_pma_ln_regs[] = {
+static const struct cdns_reg_pairs sgmii_phy_pma_ln_regs[] = {
{0x9010, SIERRA_PHY_PMA_XCVR_CTRL}
};
-static struct cdns_sierra_vals sgmii_phy_pma_ln_vals = {
+static const struct cdns_sierra_vals sgmii_phy_pma_ln_vals = {
.reg_pairs = sgmii_phy_pma_ln_regs,
.num_regs = ARRAY_SIZE(sgmii_phy_pma_ln_regs),
};
@@ -1598,22 +1595,22 @@ static const struct cdns_reg_pairs sgmii_100_no_ssc_plllc1_opt3_ln_regs[] = {
{0x0002, SIERRA_RXBUFFER_RCDFECTRL_PREG}
};
-static struct cdns_sierra_vals sgmii_100_no_ssc_plllc1_opt3_cmn_vals = {
+static const struct cdns_sierra_vals sgmii_100_no_ssc_plllc1_opt3_cmn_vals = {
.reg_pairs = sgmii_100_no_ssc_plllc1_opt3_cmn_regs,
.num_regs = ARRAY_SIZE(sgmii_100_no_ssc_plllc1_opt3_cmn_regs),
};
-static struct cdns_sierra_vals sgmii_100_no_ssc_plllc1_opt3_ln_vals = {
+static const struct cdns_sierra_vals sgmii_100_no_ssc_plllc1_opt3_ln_vals = {
.reg_pairs = sgmii_100_no_ssc_plllc1_opt3_ln_regs,
.num_regs = ARRAY_SIZE(sgmii_100_no_ssc_plllc1_opt3_ln_regs),
};
/* QSGMII PHY PMA lane configuration */
-static struct cdns_reg_pairs qsgmii_phy_pma_ln_regs[] = {
+static const struct cdns_reg_pairs qsgmii_phy_pma_ln_regs[] = {
{0x9010, SIERRA_PHY_PMA_XCVR_CTRL}
};
-static struct cdns_sierra_vals qsgmii_phy_pma_ln_vals = {
+static const struct cdns_sierra_vals qsgmii_phy_pma_ln_vals = {
.reg_pairs = qsgmii_phy_pma_ln_regs,
.num_regs = ARRAY_SIZE(qsgmii_phy_pma_ln_regs),
};
@@ -1664,22 +1661,22 @@ static const struct cdns_reg_pairs qsgmii_100_no_ssc_plllc1_ln_regs[] = {
{0x0002, SIERRA_RXBUFFER_RCDFECTRL_PREG}
};
-static struct cdns_sierra_vals qsgmii_100_no_ssc_plllc1_cmn_vals = {
+static const struct cdns_sierra_vals qsgmii_100_no_ssc_plllc1_cmn_vals = {
.reg_pairs = qsgmii_100_no_ssc_plllc1_cmn_regs,
.num_regs = ARRAY_SIZE(qsgmii_100_no_ssc_plllc1_cmn_regs),
};
-static struct cdns_sierra_vals qsgmii_100_no_ssc_plllc1_ln_vals = {
+static const struct cdns_sierra_vals qsgmii_100_no_ssc_plllc1_ln_vals = {
.reg_pairs = qsgmii_100_no_ssc_plllc1_ln_regs,
.num_regs = ARRAY_SIZE(qsgmii_100_no_ssc_plllc1_ln_regs),
};
/* PCIE PHY PCS common configuration */
-static struct cdns_reg_pairs pcie_phy_pcs_cmn_regs[] = {
+static const struct cdns_reg_pairs pcie_phy_pcs_cmn_regs[] = {
{0x0430, SIERRA_PHY_PIPE_CMN_CTRL1}
};
-static struct cdns_sierra_vals pcie_phy_pcs_cmn_vals = {
+static const struct cdns_sierra_vals pcie_phy_pcs_cmn_vals = {
.reg_pairs = pcie_phy_pcs_cmn_regs,
.num_regs = ARRAY_SIZE(pcie_phy_pcs_cmn_regs),
};
@@ -1745,12 +1742,12 @@ static const struct cdns_reg_pairs ml_pcie_100_no_ssc_ln_regs[] = {
{0x4432, SIERRA_RXBUFFER_DFECTRL_PREG}
};
-static struct cdns_sierra_vals pcie_100_no_ssc_plllc_cmn_vals = {
+static const struct cdns_sierra_vals pcie_100_no_ssc_plllc_cmn_vals = {
.reg_pairs = pcie_100_no_ssc_plllc_cmn_regs,
.num_regs = ARRAY_SIZE(pcie_100_no_ssc_plllc_cmn_regs),
};
-static struct cdns_sierra_vals ml_pcie_100_no_ssc_ln_vals = {
+static const struct cdns_sierra_vals ml_pcie_100_no_ssc_ln_vals = {
.reg_pairs = ml_pcie_100_no_ssc_ln_regs,
.num_regs = ARRAY_SIZE(ml_pcie_100_no_ssc_ln_regs),
};
@@ -1810,7 +1807,7 @@ static const struct cdns_reg_pairs ti_ml_pcie_100_no_ssc_ln_regs[] = {
{0x0002, SIERRA_TX_RCVDET_OVRD_PREG}
};
-static struct cdns_sierra_vals ti_ml_pcie_100_no_ssc_ln_vals = {
+static const struct cdns_sierra_vals ti_ml_pcie_100_no_ssc_ln_vals = {
.reg_pairs = ti_ml_pcie_100_no_ssc_ln_regs,
.num_regs = ARRAY_SIZE(ti_ml_pcie_100_no_ssc_ln_regs),
};
@@ -1886,12 +1883,12 @@ static const struct cdns_reg_pairs ml_pcie_100_int_ssc_ln_regs[] = {
{0x4432, SIERRA_RXBUFFER_DFECTRL_PREG}
};
-static struct cdns_sierra_vals pcie_100_int_ssc_plllc_cmn_vals = {
+static const struct cdns_sierra_vals pcie_100_int_ssc_plllc_cmn_vals = {
.reg_pairs = pcie_100_int_ssc_plllc_cmn_regs,
.num_regs = ARRAY_SIZE(pcie_100_int_ssc_plllc_cmn_regs),
};
-static struct cdns_sierra_vals ml_pcie_100_int_ssc_ln_vals = {
+static const struct cdns_sierra_vals ml_pcie_100_int_ssc_ln_vals = {
.reg_pairs = ml_pcie_100_int_ssc_ln_regs,
.num_regs = ARRAY_SIZE(ml_pcie_100_int_ssc_ln_regs),
};
@@ -1954,7 +1951,7 @@ static const struct cdns_reg_pairs ti_ml_pcie_100_int_ssc_ln_regs[] = {
{0x0002, SIERRA_TX_RCVDET_OVRD_PREG}
};
-static struct cdns_sierra_vals ti_ml_pcie_100_int_ssc_ln_vals = {
+static const struct cdns_sierra_vals ti_ml_pcie_100_int_ssc_ln_vals = {
.reg_pairs = ti_ml_pcie_100_int_ssc_ln_regs,
.num_regs = ARRAY_SIZE(ti_ml_pcie_100_int_ssc_ln_regs),
};
@@ -2024,12 +2021,12 @@ static const struct cdns_reg_pairs ml_pcie_100_ext_ssc_ln_regs[] = {
{0x4432, SIERRA_RXBUFFER_DFECTRL_PREG}
};
-static struct cdns_sierra_vals pcie_100_ext_ssc_plllc_cmn_vals = {
+static const struct cdns_sierra_vals pcie_100_ext_ssc_plllc_cmn_vals = {
.reg_pairs = pcie_100_ext_ssc_plllc_cmn_regs,
.num_regs = ARRAY_SIZE(pcie_100_ext_ssc_plllc_cmn_regs),
};
-static struct cdns_sierra_vals ml_pcie_100_ext_ssc_ln_vals = {
+static const struct cdns_sierra_vals ml_pcie_100_ext_ssc_ln_vals = {
.reg_pairs = ml_pcie_100_ext_ssc_ln_regs,
.num_regs = ARRAY_SIZE(ml_pcie_100_ext_ssc_ln_regs),
};
@@ -2092,7 +2089,7 @@ static const struct cdns_reg_pairs ti_ml_pcie_100_ext_ssc_ln_regs[] = {
{0x0002, SIERRA_TX_RCVDET_OVRD_PREG}
};
-static struct cdns_sierra_vals ti_ml_pcie_100_ext_ssc_ln_vals = {
+static const struct cdns_sierra_vals ti_ml_pcie_100_ext_ssc_ln_vals = {
.reg_pairs = ti_ml_pcie_100_ext_ssc_ln_regs,
.num_regs = ARRAY_SIZE(ti_ml_pcie_100_ext_ssc_ln_regs),
};
@@ -2152,12 +2149,12 @@ static const struct cdns_reg_pairs cdns_pcie_ln_regs_no_ssc[] = {
{0x4432, SIERRA_RXBUFFER_DFECTRL_PREG}
};
-static struct cdns_sierra_vals pcie_100_no_ssc_cmn_vals = {
+static const struct cdns_sierra_vals pcie_100_no_ssc_cmn_vals = {
.reg_pairs = cdns_pcie_cmn_regs_no_ssc,
.num_regs = ARRAY_SIZE(cdns_pcie_cmn_regs_no_ssc),
};
-static struct cdns_sierra_vals pcie_100_no_ssc_ln_vals = {
+static const struct cdns_sierra_vals pcie_100_no_ssc_ln_vals = {
.reg_pairs = cdns_pcie_ln_regs_no_ssc,
.num_regs = ARRAY_SIZE(cdns_pcie_ln_regs_no_ssc),
};
@@ -2227,12 +2224,12 @@ static const struct cdns_reg_pairs cdns_pcie_ln_regs_int_ssc[] = {
{0x4432, SIERRA_RXBUFFER_DFECTRL_PREG}
};
-static struct cdns_sierra_vals pcie_100_int_ssc_cmn_vals = {
+static const struct cdns_sierra_vals pcie_100_int_ssc_cmn_vals = {
.reg_pairs = cdns_pcie_cmn_regs_int_ssc,
.num_regs = ARRAY_SIZE(cdns_pcie_cmn_regs_int_ssc),
};
-static struct cdns_sierra_vals pcie_100_int_ssc_ln_vals = {
+static const struct cdns_sierra_vals pcie_100_int_ssc_ln_vals = {
.reg_pairs = cdns_pcie_ln_regs_int_ssc,
.num_regs = ARRAY_SIZE(cdns_pcie_ln_regs_int_ssc),
};
@@ -2296,12 +2293,12 @@ static const struct cdns_reg_pairs cdns_pcie_ln_regs_ext_ssc[] = {
{0x4432, SIERRA_RXBUFFER_DFECTRL_PREG}
};
-static struct cdns_sierra_vals pcie_100_ext_ssc_cmn_vals = {
+static const struct cdns_sierra_vals pcie_100_ext_ssc_cmn_vals = {
.reg_pairs = cdns_pcie_cmn_regs_ext_ssc,
.num_regs = ARRAY_SIZE(cdns_pcie_cmn_regs_ext_ssc),
};
-static struct cdns_sierra_vals pcie_100_ext_ssc_ln_vals = {
+static const struct cdns_sierra_vals pcie_100_ext_ssc_ln_vals = {
.reg_pairs = cdns_pcie_ln_regs_ext_ssc,
.num_regs = ARRAY_SIZE(cdns_pcie_ln_regs_ext_ssc),
};
@@ -2413,12 +2410,12 @@ static const struct cdns_reg_pairs cdns_usb_ln_regs_ext_ssc[] = {
{0x4243, SIERRA_RXBUFFER_DFECTRL_PREG}
};
-static struct cdns_sierra_vals usb_100_ext_ssc_cmn_vals = {
+static const struct cdns_sierra_vals usb_100_ext_ssc_cmn_vals = {
.reg_pairs = cdns_usb_cmn_regs_ext_ssc,
.num_regs = ARRAY_SIZE(cdns_usb_cmn_regs_ext_ssc),
};
-static struct cdns_sierra_vals usb_100_ext_ssc_ln_vals = {
+static const struct cdns_sierra_vals usb_100_ext_ssc_ln_vals = {
.reg_pairs = cdns_usb_ln_regs_ext_ssc,
.num_regs = ARRAY_SIZE(cdns_usb_ln_regs_ext_ssc),
};
@@ -2443,7 +2440,7 @@ static const struct cdns_reg_pairs sgmii_pma_cmn_vals[] = {
{0x0013, SIERRA_CMN_PLLLC1_DCOCAL_CTRL_PREG},
};
-static struct cdns_sierra_vals sgmii_cmn_vals = {
+static const struct cdns_sierra_vals sgmii_cmn_vals = {
.reg_pairs = sgmii_pma_cmn_vals,
.num_regs = ARRAY_SIZE(sgmii_pma_cmn_vals),
};
@@ -2489,7 +2486,7 @@ static const struct cdns_reg_pairs sgmii_ln_regs[] = {
{0x321F, SIERRA_CPICAL_RES_STARTCODE_MODE01_PREG},
};
-static struct cdns_sierra_vals sgmii_pma_ln_vals = {
+static const struct cdns_sierra_vals sgmii_pma_ln_vals = {
.reg_pairs = sgmii_ln_regs,
.num_regs = ARRAY_SIZE(sgmii_ln_regs),
};
diff --git a/drivers/phy/cadence/phy-cadence-torrent.c b/drivers/phy/cadence/phy-cadence-torrent.c
index 56ce82a47f88..8bbbbb87bb22 100644
--- a/drivers/phy/cadence/phy-cadence-torrent.c
+++ b/drivers/phy/cadence/phy-cadence-torrent.c
@@ -285,7 +285,7 @@ static const int refclk_driver_parent_index[] = {
CDNS_TORRENT_RECEIVED_REFCLK
};
-static u32 cdns_torrent_refclk_driver_mux_table[] = { 1, 0 };
+static const u32 cdns_torrent_refclk_driver_mux_table[] = { 1, 0 };
enum cdns_torrent_phy_type {
TYPE_NONE,
@@ -351,6 +351,7 @@ struct cdns_torrent_phy {
void __iomem *sd_base; /* SD0801 registers base */
u32 max_bit_rate; /* Maximum link bit rate to use (in Mbps) */
u32 dp_pll;
+ u32 protocol_bitmask;
struct reset_control *phy_rst;
struct reset_control *apb_rst;
struct device *dev;
@@ -422,17 +423,17 @@ struct cdns_reg_pairs {
};
struct cdns_torrent_vals {
- struct cdns_reg_pairs *reg_pairs;
+ const struct cdns_reg_pairs *reg_pairs;
u32 num_regs;
};
struct cdns_torrent_vals_entry {
u32 key;
- struct cdns_torrent_vals *vals;
+ const struct cdns_torrent_vals *vals;
};
struct cdns_torrent_vals_table {
- struct cdns_torrent_vals_entry *entries;
+ const struct cdns_torrent_vals_entry *entries;
u32 num_entries;
};
@@ -454,12 +455,12 @@ struct cdns_regmap_cdb_context {
u8 reg_offset_shift;
};
-static struct cdns_torrent_vals *cdns_torrent_get_tbl_vals(const struct cdns_torrent_vals_table *tbl,
- enum cdns_torrent_ref_clk refclk0,
- enum cdns_torrent_ref_clk refclk1,
- enum cdns_torrent_phy_type link0,
- enum cdns_torrent_phy_type link1,
- enum cdns_torrent_ssc_mode ssc)
+static const struct cdns_torrent_vals *cdns_torrent_get_tbl_vals(const struct cdns_torrent_vals_table *tbl,
+ enum cdns_torrent_ref_clk refclk0,
+ enum cdns_torrent_ref_clk refclk1,
+ enum cdns_torrent_phy_type link0,
+ enum cdns_torrent_phy_type link1,
+ enum cdns_torrent_ssc_mode ssc)
{
int i;
u32 key = CDNS_TORRENT_KEY(refclk0, refclk1, link0, link1, ssc);
@@ -2306,16 +2307,16 @@ static int cdns_torrent_regmap_init(struct cdns_torrent_phy *cdns_phy)
static int cdns_torrent_phy_init(struct phy *phy)
{
struct cdns_torrent_phy *cdns_phy = dev_get_drvdata(phy->dev.parent);
+ const struct cdns_torrent_vals *cmn_vals, *tx_ln_vals, *rx_ln_vals;
const struct cdns_torrent_data *init_data = cdns_phy->init_data;
- struct cdns_torrent_vals *cmn_vals, *tx_ln_vals, *rx_ln_vals;
+ const struct cdns_torrent_vals *link_cmn_vals, *xcvr_diag_vals;
enum cdns_torrent_ref_clk ref_clk = cdns_phy->ref_clk_rate;
- struct cdns_torrent_vals *link_cmn_vals, *xcvr_diag_vals;
struct cdns_torrent_inst *inst = phy_get_drvdata(phy);
enum cdns_torrent_phy_type phy_type = inst->phy_type;
+ const struct cdns_torrent_vals *phy_pma_cmn_vals;
enum cdns_torrent_ssc_mode ssc = inst->ssc_mode;
- struct cdns_torrent_vals *phy_pma_cmn_vals;
- struct cdns_torrent_vals *pcs_cmn_vals;
- struct cdns_reg_pairs *reg_pairs;
+ const struct cdns_torrent_vals *pcs_cmn_vals;
+ const struct cdns_reg_pairs *reg_pairs;
struct regmap *regmap;
u32 num_regs;
int i, j;
@@ -2463,166 +2464,216 @@ static const struct phy_ops cdns_torrent_phy_ops = {
static
int cdns_torrent_phy_configure_multilink(struct cdns_torrent_phy *cdns_phy)
{
+ const struct cdns_torrent_vals *cmn_vals, *tx_ln_vals, *rx_ln_vals;
const struct cdns_torrent_data *init_data = cdns_phy->init_data;
- struct cdns_torrent_vals *cmn_vals, *tx_ln_vals, *rx_ln_vals;
+ const struct cdns_torrent_vals *link_cmn_vals, *xcvr_diag_vals;
enum cdns_torrent_ref_clk ref_clk1 = cdns_phy->ref_clk1_rate;
enum cdns_torrent_ref_clk ref_clk = cdns_phy->ref_clk_rate;
- struct cdns_torrent_vals *link_cmn_vals, *xcvr_diag_vals;
+ const struct cdns_torrent_vals *phy_pma_cmn_vals;
+ const struct cdns_torrent_vals *pcs_cmn_vals;
enum cdns_torrent_phy_type phy_t1, phy_t2;
- struct cdns_torrent_vals *phy_pma_cmn_vals;
- struct cdns_torrent_vals *pcs_cmn_vals;
+ const struct cdns_reg_pairs *reg_pairs;
int i, j, node, mlane, num_lanes, ret;
- struct cdns_reg_pairs *reg_pairs;
+ struct device *dev = cdns_phy->dev;
enum cdns_torrent_ssc_mode ssc;
struct regmap *regmap;
- u32 num_regs;
+ u32 num_regs, num_protocols, protocol;
- /* Maximum 2 links (subnodes) are supported */
- if (cdns_phy->nsubnodes != 2)
+ num_protocols = hweight32(cdns_phy->protocol_bitmask);
+ /* Maximum 2 protocols are supported */
+ if (num_protocols > 2) {
+ dev_err(dev, "at most 2 protocols are supported\n");
return -EINVAL;
+ }
+
+
+ /**
+ * Get PHY types directly from subnodes if only 2 subnodes exist.
+ * It is possible for phy_t1 to be the same as phy_t2 for special
+ * configurations such as PCIe Multilink.
+ */
+ if (cdns_phy->nsubnodes == 2) {
+ phy_t1 = cdns_phy->phys[0].phy_type;
+ phy_t2 = cdns_phy->phys[1].phy_type;
+ } else {
+ /**
+ * Both PHY types / protocols should be unique.
+ * If they are the same, it should be expressed with either
+ * a) Single-Link (1 Sub-node) - handled via PHY APIs
+ * OR
+ * b) Double-Link (2 Sub-nodes) - handled above
+ */
+ if (num_protocols != 2) {
+ dev_err(dev, "incorrect representation of link\n");
+ return -EINVAL;
+ }
- phy_t1 = cdns_phy->phys[0].phy_type;
- phy_t2 = cdns_phy->phys[1].phy_type;
+ phy_t1 = fns(cdns_phy->protocol_bitmask, 0);
+ phy_t2 = fns(cdns_phy->protocol_bitmask, 1);
+ }
/**
- * First configure the PHY for first link with phy_t1. Get the array
- * values as [phy_t1][phy_t2][ssc].
+ * Configure all links with the protocol phy_t1 first followed by
+ * configuring all links with the protocol phy_t2.
+ *
+ * When phy_t1 = phy_t2, it is a single protocol and configuration
+ * is performed with a single iteration of the protocol and multiple
+ * iterations over the sub-nodes (links).
+ *
+ * When phy_t1 != phy_t2, there are two protocols and configuration
+ * is performed by iterating over all sub-nodes matching the first
+ * protocol and configuring them first, followed by iterating over
+ * all sub-nodes matching the second protocol and configuring them
+ * next.
*/
- for (node = 0; node < cdns_phy->nsubnodes; node++) {
- if (node == 1) {
+ for (protocol = 0; protocol < num_protocols; protocol++) {
+ /**
+ * For the case where num_protocols is 1,
+ * phy_t1 = phy_t2 and the swap is unnecessary.
+ *
+ * Swapping phy_t1 and phy_t2 is only required when the
+ * number of protocols is 2 and there are 2 or more links.
+ */
+ if (protocol == 1) {
/**
- * If first link with phy_t1 is configured, then
- * configure the PHY for second link with phy_t2.
+ * If first protocol with phy_t1 is configured, then
+ * configure the PHY for second protocol with phy_t2.
* Get the array values as [phy_t2][phy_t1][ssc].
*/
swap(phy_t1, phy_t2);
swap(ref_clk, ref_clk1);
}
- mlane = cdns_phy->phys[node].mlane;
- ssc = cdns_phy->phys[node].ssc_mode;
- num_lanes = cdns_phy->phys[node].num_lanes;
+ for (node = 0; node < cdns_phy->nsubnodes; node++) {
+ if (cdns_phy->phys[node].phy_type != phy_t1)
+ continue;
- /**
- * PHY configuration specific registers:
- * link_cmn_vals depend on combination of PHY types being
- * configured and are common for both PHY types, so array
- * values should be same for [phy_t1][phy_t2][ssc] and
- * [phy_t2][phy_t1][ssc].
- * xcvr_diag_vals also depend on combination of PHY types
- * being configured, but these can be different for particular
- * PHY type and are per lane.
- */
- link_cmn_vals = cdns_torrent_get_tbl_vals(&init_data->link_cmn_vals_tbl,
- CLK_ANY, CLK_ANY,
- phy_t1, phy_t2, ANY_SSC);
- if (link_cmn_vals) {
- reg_pairs = link_cmn_vals->reg_pairs;
- num_regs = link_cmn_vals->num_regs;
- regmap = cdns_phy->regmap_common_cdb;
+ mlane = cdns_phy->phys[node].mlane;
+ ssc = cdns_phy->phys[node].ssc_mode;
+ num_lanes = cdns_phy->phys[node].num_lanes;
/**
- * First array value in link_cmn_vals must be of
- * PHY_PLL_CFG register
+ * PHY configuration specific registers:
+ * link_cmn_vals depend on combination of PHY types being
+ * configured and are common for both PHY types, so array
+ * values should be same for [phy_t1][phy_t2][ssc] and
+ * [phy_t2][phy_t1][ssc].
+ * xcvr_diag_vals also depend on combination of PHY types
+ * being configured, but these can be different for particular
+ * PHY type and are per lane.
*/
- regmap_field_write(cdns_phy->phy_pll_cfg,
- reg_pairs[0].val);
+ link_cmn_vals = cdns_torrent_get_tbl_vals(&init_data->link_cmn_vals_tbl,
+ CLK_ANY, CLK_ANY,
+ phy_t1, phy_t2, ANY_SSC);
+ if (link_cmn_vals) {
+ reg_pairs = link_cmn_vals->reg_pairs;
+ num_regs = link_cmn_vals->num_regs;
+ regmap = cdns_phy->regmap_common_cdb;
+
+ /**
+ * First array value in link_cmn_vals must be of
+ * PHY_PLL_CFG register
+ */
+ regmap_field_write(cdns_phy->phy_pll_cfg,
+ reg_pairs[0].val);
+
+ for (i = 1; i < num_regs; i++)
+ regmap_write(regmap, reg_pairs[i].off,
+ reg_pairs[i].val);
+ }
- for (i = 1; i < num_regs; i++)
- regmap_write(regmap, reg_pairs[i].off,
- reg_pairs[i].val);
- }
+ xcvr_diag_vals = cdns_torrent_get_tbl_vals(&init_data->xcvr_diag_vals_tbl,
+ CLK_ANY, CLK_ANY,
+ phy_t1, phy_t2, ANY_SSC);
+ if (xcvr_diag_vals) {
+ reg_pairs = xcvr_diag_vals->reg_pairs;
+ num_regs = xcvr_diag_vals->num_regs;
+ for (i = 0; i < num_lanes; i++) {
+ regmap = cdns_phy->regmap_tx_lane_cdb[i + mlane];
+ for (j = 0; j < num_regs; j++)
+ regmap_write(regmap, reg_pairs[j].off,
+ reg_pairs[j].val);
+ }
+ }
- xcvr_diag_vals = cdns_torrent_get_tbl_vals(&init_data->xcvr_diag_vals_tbl,
- CLK_ANY, CLK_ANY,
- phy_t1, phy_t2, ANY_SSC);
- if (xcvr_diag_vals) {
- reg_pairs = xcvr_diag_vals->reg_pairs;
- num_regs = xcvr_diag_vals->num_regs;
- for (i = 0; i < num_lanes; i++) {
- regmap = cdns_phy->regmap_tx_lane_cdb[i + mlane];
- for (j = 0; j < num_regs; j++)
- regmap_write(regmap, reg_pairs[j].off,
- reg_pairs[j].val);
+ /* PHY PCS common registers configurations */
+ pcs_cmn_vals = cdns_torrent_get_tbl_vals(&init_data->pcs_cmn_vals_tbl,
+ CLK_ANY, CLK_ANY,
+ phy_t1, phy_t2, ANY_SSC);
+ if (pcs_cmn_vals) {
+ reg_pairs = pcs_cmn_vals->reg_pairs;
+ num_regs = pcs_cmn_vals->num_regs;
+ regmap = cdns_phy->regmap_phy_pcs_common_cdb;
+ for (i = 0; i < num_regs; i++)
+ regmap_write(regmap, reg_pairs[i].off,
+ reg_pairs[i].val);
}
- }
- /* PHY PCS common registers configurations */
- pcs_cmn_vals = cdns_torrent_get_tbl_vals(&init_data->pcs_cmn_vals_tbl,
- CLK_ANY, CLK_ANY,
- phy_t1, phy_t2, ANY_SSC);
- if (pcs_cmn_vals) {
- reg_pairs = pcs_cmn_vals->reg_pairs;
- num_regs = pcs_cmn_vals->num_regs;
- regmap = cdns_phy->regmap_phy_pcs_common_cdb;
- for (i = 0; i < num_regs; i++)
- regmap_write(regmap, reg_pairs[i].off,
- reg_pairs[i].val);
- }
+ /* PHY PMA common registers configurations */
+ phy_pma_cmn_vals =
+ cdns_torrent_get_tbl_vals(&init_data->phy_pma_cmn_vals_tbl,
+ CLK_ANY, CLK_ANY, phy_t1, phy_t2,
+ ANY_SSC);
+ if (phy_pma_cmn_vals) {
+ reg_pairs = phy_pma_cmn_vals->reg_pairs;
+ num_regs = phy_pma_cmn_vals->num_regs;
+ regmap = cdns_phy->regmap_phy_pma_common_cdb;
+ for (i = 0; i < num_regs; i++)
+ regmap_write(regmap, reg_pairs[i].off,
+ reg_pairs[i].val);
+ }
- /* PHY PMA common registers configurations */
- phy_pma_cmn_vals = cdns_torrent_get_tbl_vals(&init_data->phy_pma_cmn_vals_tbl,
- CLK_ANY, CLK_ANY,
- phy_t1, phy_t2, ANY_SSC);
- if (phy_pma_cmn_vals) {
- reg_pairs = phy_pma_cmn_vals->reg_pairs;
- num_regs = phy_pma_cmn_vals->num_regs;
- regmap = cdns_phy->regmap_phy_pma_common_cdb;
- for (i = 0; i < num_regs; i++)
- regmap_write(regmap, reg_pairs[i].off,
- reg_pairs[i].val);
- }
+ /* PMA common registers configurations */
+ cmn_vals = cdns_torrent_get_tbl_vals(&init_data->cmn_vals_tbl,
+ ref_clk, ref_clk1,
+ phy_t1, phy_t2, ssc);
+ if (cmn_vals) {
+ reg_pairs = cmn_vals->reg_pairs;
+ num_regs = cmn_vals->num_regs;
+ regmap = cdns_phy->regmap_common_cdb;
+ for (i = 0; i < num_regs; i++)
+ regmap_write(regmap, reg_pairs[i].off,
+ reg_pairs[i].val);
+ }
- /* PMA common registers configurations */
- cmn_vals = cdns_torrent_get_tbl_vals(&init_data->cmn_vals_tbl,
- ref_clk, ref_clk1,
- phy_t1, phy_t2, ssc);
- if (cmn_vals) {
- reg_pairs = cmn_vals->reg_pairs;
- num_regs = cmn_vals->num_regs;
- regmap = cdns_phy->regmap_common_cdb;
- for (i = 0; i < num_regs; i++)
- regmap_write(regmap, reg_pairs[i].off,
- reg_pairs[i].val);
- }
+ /* PMA TX lane registers configurations */
+ tx_ln_vals = cdns_torrent_get_tbl_vals(&init_data->tx_ln_vals_tbl,
+ ref_clk, ref_clk1,
+ phy_t1, phy_t2, ssc);
+ if (tx_ln_vals) {
+ reg_pairs = tx_ln_vals->reg_pairs;
+ num_regs = tx_ln_vals->num_regs;
+ for (i = 0; i < num_lanes; i++) {
+ regmap = cdns_phy->regmap_tx_lane_cdb[i + mlane];
+ for (j = 0; j < num_regs; j++)
+ regmap_write(regmap, reg_pairs[j].off,
+ reg_pairs[j].val);
+ }
+ }
- /* PMA TX lane registers configurations */
- tx_ln_vals = cdns_torrent_get_tbl_vals(&init_data->tx_ln_vals_tbl,
- ref_clk, ref_clk1,
- phy_t1, phy_t2, ssc);
- if (tx_ln_vals) {
- reg_pairs = tx_ln_vals->reg_pairs;
- num_regs = tx_ln_vals->num_regs;
- for (i = 0; i < num_lanes; i++) {
- regmap = cdns_phy->regmap_tx_lane_cdb[i + mlane];
- for (j = 0; j < num_regs; j++)
- regmap_write(regmap, reg_pairs[j].off,
- reg_pairs[j].val);
+ /* PMA RX lane registers configurations */
+ rx_ln_vals = cdns_torrent_get_tbl_vals(&init_data->rx_ln_vals_tbl,
+ ref_clk, ref_clk1,
+ phy_t1, phy_t2, ssc);
+ if (rx_ln_vals) {
+ reg_pairs = rx_ln_vals->reg_pairs;
+ num_regs = rx_ln_vals->num_regs;
+ for (i = 0; i < num_lanes; i++) {
+ regmap = cdns_phy->regmap_rx_lane_cdb[i + mlane];
+ for (j = 0; j < num_regs; j++)
+ regmap_write(regmap, reg_pairs[j].off,
+ reg_pairs[j].val);
+ }
}
- }
- /* PMA RX lane registers configurations */
- rx_ln_vals = cdns_torrent_get_tbl_vals(&init_data->rx_ln_vals_tbl,
- ref_clk, ref_clk1,
- phy_t1, phy_t2, ssc);
- if (rx_ln_vals) {
- reg_pairs = rx_ln_vals->reg_pairs;
- num_regs = rx_ln_vals->num_regs;
- for (i = 0; i < num_lanes; i++) {
- regmap = cdns_phy->regmap_rx_lane_cdb[i + mlane];
- for (j = 0; j < num_regs; j++)
- regmap_write(regmap, reg_pairs[j].off,
- reg_pairs[j].val);
+ if (phy_t1 == TYPE_DP) {
+ ret = cdns_torrent_dp_get_pll(cdns_phy, phy_t2);
+ if (ret)
+ return ret;
}
- }
- if (phy_t1 == TYPE_DP) {
- ret = cdns_torrent_dp_get_pll(cdns_phy, phy_t2);
- if (ret)
- return ret;
+ reset_control_deassert(cdns_phy->phys[node].lnk_rst);
}
-
- reset_control_deassert(cdns_phy->phys[node].lnk_rst);
}
/* Take the PHY out of reset */
@@ -2826,6 +2877,7 @@ static int cdns_torrent_phy_probe(struct platform_device *pdev)
dev_set_drvdata(dev, cdns_phy);
cdns_phy->dev = dev;
cdns_phy->init_data = data;
+ cdns_phy->protocol_bitmask = 0;
cdns_phy->sd_base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(cdns_phy->sd_base))
@@ -3010,6 +3062,7 @@ static int cdns_torrent_phy_probe(struct platform_device *pdev)
}
cdns_phy->phys[node].phy = gphy;
+ cdns_phy->protocol_bitmask |= BIT(cdns_phy->phys[node].phy_type);
phy_set_drvdata(gphy, &cdns_phy->phys[node]);
node++;
@@ -3079,21 +3132,21 @@ static void cdns_torrent_phy_remove(struct platform_device *pdev)
}
/* SGMII and QSGMII link configuration */
-static struct cdns_reg_pairs sgmii_qsgmii_link_cmn_regs[] = {
+static const struct cdns_reg_pairs sgmii_qsgmii_link_cmn_regs[] = {
{0x0002, PHY_PLL_CFG}
};
-static struct cdns_reg_pairs sgmii_qsgmii_xcvr_diag_ln_regs[] = {
+static const struct cdns_reg_pairs sgmii_qsgmii_xcvr_diag_ln_regs[] = {
{0x0003, XCVR_DIAG_HSCLK_DIV},
{0x0113, XCVR_DIAG_PLLDRC_CTRL}
};
-static struct cdns_torrent_vals sgmii_qsgmii_link_cmn_vals = {
+static const struct cdns_torrent_vals sgmii_qsgmii_link_cmn_vals = {
.reg_pairs = sgmii_qsgmii_link_cmn_regs,
.num_regs = ARRAY_SIZE(sgmii_qsgmii_link_cmn_regs),
};
-static struct cdns_torrent_vals sgmii_qsgmii_xcvr_diag_ln_vals = {
+static const struct cdns_torrent_vals sgmii_qsgmii_xcvr_diag_ln_vals = {
.reg_pairs = sgmii_qsgmii_xcvr_diag_ln_regs,
.num_regs = ARRAY_SIZE(sgmii_qsgmii_xcvr_diag_ln_regs),
};
@@ -3155,73 +3208,73 @@ static DEFINE_NOIRQ_DEV_PM_OPS(cdns_torrent_phy_pm_ops,
cdns_torrent_phy_resume_noirq);
/* USB and DP link configuration */
-static struct cdns_reg_pairs usb_dp_link_cmn_regs[] = {
+static const struct cdns_reg_pairs usb_dp_link_cmn_regs[] = {
{0x0002, PHY_PLL_CFG},
{0x8600, CMN_PDIAG_PLL0_CLK_SEL_M0}
};
-static struct cdns_reg_pairs usb_dp_xcvr_diag_ln_regs[] = {
+static const struct cdns_reg_pairs usb_dp_xcvr_diag_ln_regs[] = {
{0x0000, XCVR_DIAG_HSCLK_SEL},
{0x0001, XCVR_DIAG_HSCLK_DIV},
{0x0041, XCVR_DIAG_PLLDRC_CTRL}
};
-static struct cdns_reg_pairs dp_usb_xcvr_diag_ln_regs[] = {
+static const struct cdns_reg_pairs dp_usb_xcvr_diag_ln_regs[] = {
{0x0001, XCVR_DIAG_HSCLK_SEL},
{0x0009, XCVR_DIAG_PLLDRC_CTRL}
};
-static struct cdns_torrent_vals usb_dp_link_cmn_vals = {
+static const struct cdns_torrent_vals usb_dp_link_cmn_vals = {
.reg_pairs = usb_dp_link_cmn_regs,
.num_regs = ARRAY_SIZE(usb_dp_link_cmn_regs),
};
-static struct cdns_torrent_vals usb_dp_xcvr_diag_ln_vals = {
+static const struct cdns_torrent_vals usb_dp_xcvr_diag_ln_vals = {
.reg_pairs = usb_dp_xcvr_diag_ln_regs,
.num_regs = ARRAY_SIZE(usb_dp_xcvr_diag_ln_regs),
};
-static struct cdns_torrent_vals dp_usb_xcvr_diag_ln_vals = {
+static const struct cdns_torrent_vals dp_usb_xcvr_diag_ln_vals = {
.reg_pairs = dp_usb_xcvr_diag_ln_regs,
.num_regs = ARRAY_SIZE(dp_usb_xcvr_diag_ln_regs),
};
/* USXGMII and SGMII/QSGMII link configuration */
-static struct cdns_reg_pairs usxgmii_sgmii_link_cmn_regs[] = {
+static const struct cdns_reg_pairs usxgmii_sgmii_link_cmn_regs[] = {
{0x0002, PHY_PLL_CFG},
{0x0400, CMN_PDIAG_PLL0_CLK_SEL_M0},
{0x0601, CMN_PDIAG_PLL1_CLK_SEL_M0}
};
-static struct cdns_reg_pairs usxgmii_sgmii_xcvr_diag_ln_regs[] = {
+static const struct cdns_reg_pairs usxgmii_sgmii_xcvr_diag_ln_regs[] = {
{0x0000, XCVR_DIAG_HSCLK_SEL},
{0x0001, XCVR_DIAG_HSCLK_DIV},
{0x0001, XCVR_DIAG_PLLDRC_CTRL}
};
-static struct cdns_reg_pairs sgmii_usxgmii_xcvr_diag_ln_regs[] = {
+static const struct cdns_reg_pairs sgmii_usxgmii_xcvr_diag_ln_regs[] = {
{0x0111, XCVR_DIAG_HSCLK_SEL},
{0x0103, XCVR_DIAG_HSCLK_DIV},
{0x0A9B, XCVR_DIAG_PLLDRC_CTRL}
};
-static struct cdns_torrent_vals usxgmii_sgmii_link_cmn_vals = {
+static const struct cdns_torrent_vals usxgmii_sgmii_link_cmn_vals = {
.reg_pairs = usxgmii_sgmii_link_cmn_regs,
.num_regs = ARRAY_SIZE(usxgmii_sgmii_link_cmn_regs),
};
-static struct cdns_torrent_vals usxgmii_sgmii_xcvr_diag_ln_vals = {
+static const struct cdns_torrent_vals usxgmii_sgmii_xcvr_diag_ln_vals = {
.reg_pairs = usxgmii_sgmii_xcvr_diag_ln_regs,
.num_regs = ARRAY_SIZE(usxgmii_sgmii_xcvr_diag_ln_regs),
};
-static struct cdns_torrent_vals sgmii_usxgmii_xcvr_diag_ln_vals = {
+static const struct cdns_torrent_vals sgmii_usxgmii_xcvr_diag_ln_vals = {
.reg_pairs = sgmii_usxgmii_xcvr_diag_ln_regs,
.num_regs = ARRAY_SIZE(sgmii_usxgmii_xcvr_diag_ln_regs),
};
/* Multilink USXGMII, using PLL0, 156.25 MHz Ref clk, no SSC */
-static struct cdns_reg_pairs ml_usxgmii_pll0_156_25_no_ssc_cmn_regs[] = {
+static const struct cdns_reg_pairs ml_usxgmii_pll0_156_25_no_ssc_cmn_regs[] = {
{0x0014, CMN_PLL0_DSM_FBH_OVRD_M0},
{0x0005, CMN_PLL0_DSM_FBL_OVRD_M0},
{0x061B, CMN_PLL0_VCOCAL_INIT_TMR},
@@ -3233,13 +3286,13 @@ static struct cdns_reg_pairs ml_usxgmii_pll0_156_25_no_ssc_cmn_regs[] = {
{0x0138, CMN_PLL0_LOCK_PLLCNT_START}
};
-static struct cdns_torrent_vals ml_usxgmii_pll0_156_25_no_ssc_cmn_vals = {
+static const struct cdns_torrent_vals ml_usxgmii_pll0_156_25_no_ssc_cmn_vals = {
.reg_pairs = ml_usxgmii_pll0_156_25_no_ssc_cmn_regs,
.num_regs = ARRAY_SIZE(ml_usxgmii_pll0_156_25_no_ssc_cmn_regs),
};
/* Multilink SGMII/QSGMII, using PLL1, 100 MHz Ref clk, no SSC */
-static struct cdns_reg_pairs ml_sgmii_pll1_100_no_ssc_cmn_regs[] = {
+static const struct cdns_reg_pairs ml_sgmii_pll1_100_no_ssc_cmn_regs[] = {
{0x0028, CMN_PDIAG_PLL1_CP_PADJ_M0},
{0x001E, CMN_PLL1_DSM_FBH_OVRD_M0},
{0x000C, CMN_PLL1_DSM_FBL_OVRD_M0},
@@ -3248,13 +3301,13 @@ static struct cdns_reg_pairs ml_sgmii_pll1_100_no_ssc_cmn_regs[] = {
{0x007F, CMN_TXPDCAL_TUNE}
};
-static struct cdns_torrent_vals ml_sgmii_pll1_100_no_ssc_cmn_vals = {
+static const struct cdns_torrent_vals ml_sgmii_pll1_100_no_ssc_cmn_vals = {
.reg_pairs = ml_sgmii_pll1_100_no_ssc_cmn_regs,
.num_regs = ARRAY_SIZE(ml_sgmii_pll1_100_no_ssc_cmn_regs),
};
/* TI J7200, Multilink USXGMII, using PLL0, 156.25 MHz Ref clk, no SSC */
-static struct cdns_reg_pairs j7200_ml_usxgmii_pll0_156_25_no_ssc_cmn_regs[] = {
+static const struct cdns_reg_pairs j7200_ml_usxgmii_pll0_156_25_no_ssc_cmn_regs[] = {
{0x0014, CMN_SSM_BIAS_TMR},
{0x0028, CMN_PLLSM0_PLLPRE_TMR},
{0x00A4, CMN_PLLSM0_PLLLOCK_TMR},
@@ -3280,13 +3333,13 @@ static struct cdns_reg_pairs j7200_ml_usxgmii_pll0_156_25_no_ssc_cmn_regs[] = {
{0x0138, CMN_PLL0_LOCK_PLLCNT_START}
};
-static struct cdns_torrent_vals j7200_ml_usxgmii_pll0_156_25_no_ssc_cmn_vals = {
+static const struct cdns_torrent_vals j7200_ml_usxgmii_pll0_156_25_no_ssc_cmn_vals = {
.reg_pairs = j7200_ml_usxgmii_pll0_156_25_no_ssc_cmn_regs,
.num_regs = ARRAY_SIZE(j7200_ml_usxgmii_pll0_156_25_no_ssc_cmn_regs),
};
/* TI J7200, Multilink SGMII/QSGMII, using PLL1, 100 MHz Ref clk, no SSC */
-static struct cdns_reg_pairs j7200_ml_sgmii_pll1_100_no_ssc_cmn_regs[] = {
+static const struct cdns_reg_pairs j7200_ml_sgmii_pll1_100_no_ssc_cmn_regs[] = {
{0x0028, CMN_PLLSM1_PLLPRE_TMR},
{0x00A4, CMN_PLLSM1_PLLLOCK_TMR},
{0x0028, CMN_PDIAG_PLL1_CP_PADJ_M0},
@@ -3297,42 +3350,42 @@ static struct cdns_reg_pairs j7200_ml_sgmii_pll1_100_no_ssc_cmn_regs[] = {
{0x007F, CMN_TXPDCAL_TUNE}
};
-static struct cdns_torrent_vals j7200_ml_sgmii_pll1_100_no_ssc_cmn_vals = {
+static const struct cdns_torrent_vals j7200_ml_sgmii_pll1_100_no_ssc_cmn_vals = {
.reg_pairs = j7200_ml_sgmii_pll1_100_no_ssc_cmn_regs,
.num_regs = ARRAY_SIZE(j7200_ml_sgmii_pll1_100_no_ssc_cmn_regs),
};
/* PCIe and USXGMII link configuration */
-static struct cdns_reg_pairs pcie_usxgmii_link_cmn_regs[] = {
+static const struct cdns_reg_pairs pcie_usxgmii_link_cmn_regs[] = {
{0x0003, PHY_PLL_CFG},
{0x0601, CMN_PDIAG_PLL0_CLK_SEL_M0},
{0x0400, CMN_PDIAG_PLL0_CLK_SEL_M1},
{0x0400, CMN_PDIAG_PLL1_CLK_SEL_M0}
};
-static struct cdns_reg_pairs pcie_usxgmii_xcvr_diag_ln_regs[] = {
+static const struct cdns_reg_pairs pcie_usxgmii_xcvr_diag_ln_regs[] = {
{0x0000, XCVR_DIAG_HSCLK_SEL},
{0x0001, XCVR_DIAG_HSCLK_DIV},
{0x0012, XCVR_DIAG_PLLDRC_CTRL}
};
-static struct cdns_reg_pairs usxgmii_pcie_xcvr_diag_ln_regs[] = {
+static const struct cdns_reg_pairs usxgmii_pcie_xcvr_diag_ln_regs[] = {
{0x0011, XCVR_DIAG_HSCLK_SEL},
{0x0001, XCVR_DIAG_HSCLK_DIV},
{0x0089, XCVR_DIAG_PLLDRC_CTRL}
};
-static struct cdns_torrent_vals pcie_usxgmii_link_cmn_vals = {
+static const struct cdns_torrent_vals pcie_usxgmii_link_cmn_vals = {
.reg_pairs = pcie_usxgmii_link_cmn_regs,
.num_regs = ARRAY_SIZE(pcie_usxgmii_link_cmn_regs),
};
-static struct cdns_torrent_vals pcie_usxgmii_xcvr_diag_ln_vals = {
+static const struct cdns_torrent_vals pcie_usxgmii_xcvr_diag_ln_vals = {
.reg_pairs = pcie_usxgmii_xcvr_diag_ln_regs,
.num_regs = ARRAY_SIZE(pcie_usxgmii_xcvr_diag_ln_regs),
};
-static struct cdns_torrent_vals usxgmii_pcie_xcvr_diag_ln_vals = {
+static const struct cdns_torrent_vals usxgmii_pcie_xcvr_diag_ln_vals = {
.reg_pairs = usxgmii_pcie_xcvr_diag_ln_regs,
.num_regs = ARRAY_SIZE(usxgmii_pcie_xcvr_diag_ln_regs),
};
@@ -3340,7 +3393,7 @@ static struct cdns_torrent_vals usxgmii_pcie_xcvr_diag_ln_vals = {
/*
* Multilink USXGMII, using PLL1, 156.25 MHz Ref clk, no SSC
*/
-static struct cdns_reg_pairs ml_usxgmii_pll1_156_25_no_ssc_cmn_regs[] = {
+static const struct cdns_reg_pairs ml_usxgmii_pll1_156_25_no_ssc_cmn_regs[] = {
{0x0028, CMN_PDIAG_PLL1_CP_PADJ_M0},
{0x0014, CMN_PLL1_DSM_FBH_OVRD_M0},
{0x0005, CMN_PLL1_DSM_FBL_OVRD_M0},
@@ -3355,7 +3408,7 @@ static struct cdns_reg_pairs ml_usxgmii_pll1_156_25_no_ssc_cmn_regs[] = {
{0x007F, CMN_TXPDCAL_TUNE}
};
-static struct cdns_reg_pairs ml_usxgmii_156_25_no_ssc_tx_ln_regs[] = {
+static const struct cdns_reg_pairs ml_usxgmii_156_25_no_ssc_tx_ln_regs[] = {
{0x00F3, TX_PSC_A0},
{0x04A2, TX_PSC_A2},
{0x04A2, TX_PSC_A3 },
@@ -3363,7 +3416,7 @@ static struct cdns_reg_pairs ml_usxgmii_156_25_no_ssc_tx_ln_regs[] = {
{0x0000, XCVR_DIAG_PSC_OVRD}
};
-static struct cdns_reg_pairs ml_usxgmii_156_25_no_ssc_rx_ln_regs[] = {
+static const struct cdns_reg_pairs ml_usxgmii_156_25_no_ssc_rx_ln_regs[] = {
{0x091D, RX_PSC_A0},
{0x0900, RX_PSC_A2},
{0x0100, RX_PSC_A3},
@@ -3381,55 +3434,55 @@ static struct cdns_reg_pairs ml_usxgmii_156_25_no_ssc_rx_ln_regs[] = {
{0x018C, RX_CDRLF_CNFG}
};
-static struct cdns_torrent_vals ml_usxgmii_pll1_156_25_no_ssc_cmn_vals = {
+static const struct cdns_torrent_vals ml_usxgmii_pll1_156_25_no_ssc_cmn_vals = {
.reg_pairs = ml_usxgmii_pll1_156_25_no_ssc_cmn_regs,
.num_regs = ARRAY_SIZE(ml_usxgmii_pll1_156_25_no_ssc_cmn_regs),
};
-static struct cdns_torrent_vals ml_usxgmii_156_25_no_ssc_tx_ln_vals = {
+static const struct cdns_torrent_vals ml_usxgmii_156_25_no_ssc_tx_ln_vals = {
.reg_pairs = ml_usxgmii_156_25_no_ssc_tx_ln_regs,
.num_regs = ARRAY_SIZE(ml_usxgmii_156_25_no_ssc_tx_ln_regs),
};
-static struct cdns_torrent_vals ml_usxgmii_156_25_no_ssc_rx_ln_vals = {
+static const struct cdns_torrent_vals ml_usxgmii_156_25_no_ssc_rx_ln_vals = {
.reg_pairs = ml_usxgmii_156_25_no_ssc_rx_ln_regs,
.num_regs = ARRAY_SIZE(ml_usxgmii_156_25_no_ssc_rx_ln_regs),
};
/* TI USXGMII configuration: Enable cmn_refclk_rcv_out_en */
-static struct cdns_reg_pairs ti_usxgmii_phy_pma_cmn_regs[] = {
+static const struct cdns_reg_pairs ti_usxgmii_phy_pma_cmn_regs[] = {
{0x0040, PHY_PMA_CMN_CTRL1},
};
-static struct cdns_torrent_vals ti_usxgmii_phy_pma_cmn_vals = {
+static const struct cdns_torrent_vals ti_usxgmii_phy_pma_cmn_vals = {
.reg_pairs = ti_usxgmii_phy_pma_cmn_regs,
.num_regs = ARRAY_SIZE(ti_usxgmii_phy_pma_cmn_regs),
};
/* Single USXGMII link configuration */
-static struct cdns_reg_pairs sl_usxgmii_link_cmn_regs[] = {
+static const struct cdns_reg_pairs sl_usxgmii_link_cmn_regs[] = {
{0x0000, PHY_PLL_CFG},
{0x0400, CMN_PDIAG_PLL0_CLK_SEL_M0}
};
-static struct cdns_reg_pairs sl_usxgmii_xcvr_diag_ln_regs[] = {
+static const struct cdns_reg_pairs sl_usxgmii_xcvr_diag_ln_regs[] = {
{0x0000, XCVR_DIAG_HSCLK_SEL},
{0x0001, XCVR_DIAG_HSCLK_DIV},
{0x0001, XCVR_DIAG_PLLDRC_CTRL}
};
-static struct cdns_torrent_vals sl_usxgmii_link_cmn_vals = {
+static const struct cdns_torrent_vals sl_usxgmii_link_cmn_vals = {
.reg_pairs = sl_usxgmii_link_cmn_regs,
.num_regs = ARRAY_SIZE(sl_usxgmii_link_cmn_regs),
};
-static struct cdns_torrent_vals sl_usxgmii_xcvr_diag_ln_vals = {
+static const struct cdns_torrent_vals sl_usxgmii_xcvr_diag_ln_vals = {
.reg_pairs = sl_usxgmii_xcvr_diag_ln_regs,
.num_regs = ARRAY_SIZE(sl_usxgmii_xcvr_diag_ln_regs),
};
/* Single link USXGMII, 156.25 MHz Ref clk, no SSC */
-static struct cdns_reg_pairs sl_usxgmii_156_25_no_ssc_cmn_regs[] = {
+static const struct cdns_reg_pairs sl_usxgmii_156_25_no_ssc_cmn_regs[] = {
{0x0014, CMN_SSM_BIAS_TMR},
{0x0028, CMN_PLLSM0_PLLPRE_TMR},
{0x00A4, CMN_PLLSM0_PLLLOCK_TMR},
@@ -3467,7 +3520,7 @@ static struct cdns_reg_pairs sl_usxgmii_156_25_no_ssc_cmn_regs[] = {
{0x0138, CMN_PLL1_LOCK_PLLCNT_START}
};
-static struct cdns_reg_pairs usxgmii_156_25_no_ssc_tx_ln_regs[] = {
+static const struct cdns_reg_pairs usxgmii_156_25_no_ssc_tx_ln_regs[] = {
{0x07A2, TX_RCVDET_ST_TMR},
{0x00F3, TX_PSC_A0},
{0x04A2, TX_PSC_A2},
@@ -3476,7 +3529,7 @@ static struct cdns_reg_pairs usxgmii_156_25_no_ssc_tx_ln_regs[] = {
{0x0000, XCVR_DIAG_PSC_OVRD}
};
-static struct cdns_reg_pairs usxgmii_156_25_no_ssc_rx_ln_regs[] = {
+static const struct cdns_reg_pairs usxgmii_156_25_no_ssc_rx_ln_regs[] = {
{0x0014, RX_SDCAL0_INIT_TMR},
{0x0062, RX_SDCAL0_ITER_TMR},
{0x0014, RX_SDCAL1_INIT_TMR},
@@ -3498,68 +3551,68 @@ static struct cdns_reg_pairs usxgmii_156_25_no_ssc_rx_ln_regs[] = {
{0x018C, RX_CDRLF_CNFG}
};
-static struct cdns_torrent_vals sl_usxgmii_156_25_no_ssc_cmn_vals = {
+static const struct cdns_torrent_vals sl_usxgmii_156_25_no_ssc_cmn_vals = {
.reg_pairs = sl_usxgmii_156_25_no_ssc_cmn_regs,
.num_regs = ARRAY_SIZE(sl_usxgmii_156_25_no_ssc_cmn_regs),
};
-static struct cdns_torrent_vals usxgmii_156_25_no_ssc_tx_ln_vals = {
+static const struct cdns_torrent_vals usxgmii_156_25_no_ssc_tx_ln_vals = {
.reg_pairs = usxgmii_156_25_no_ssc_tx_ln_regs,
.num_regs = ARRAY_SIZE(usxgmii_156_25_no_ssc_tx_ln_regs),
};
-static struct cdns_torrent_vals usxgmii_156_25_no_ssc_rx_ln_vals = {
+static const struct cdns_torrent_vals usxgmii_156_25_no_ssc_rx_ln_vals = {
.reg_pairs = usxgmii_156_25_no_ssc_rx_ln_regs,
.num_regs = ARRAY_SIZE(usxgmii_156_25_no_ssc_rx_ln_regs),
};
/* PCIe and DP link configuration */
-static struct cdns_reg_pairs pcie_dp_link_cmn_regs[] = {
+static const struct cdns_reg_pairs pcie_dp_link_cmn_regs[] = {
{0x0003, PHY_PLL_CFG},
{0x0601, CMN_PDIAG_PLL0_CLK_SEL_M0},
{0x0400, CMN_PDIAG_PLL0_CLK_SEL_M1}
};
-static struct cdns_reg_pairs pcie_dp_xcvr_diag_ln_regs[] = {
+static const struct cdns_reg_pairs pcie_dp_xcvr_diag_ln_regs[] = {
{0x0000, XCVR_DIAG_HSCLK_SEL},
{0x0001, XCVR_DIAG_HSCLK_DIV},
{0x0012, XCVR_DIAG_PLLDRC_CTRL}
};
-static struct cdns_reg_pairs dp_pcie_xcvr_diag_ln_regs[] = {
+static const struct cdns_reg_pairs dp_pcie_xcvr_diag_ln_regs[] = {
{0x0001, XCVR_DIAG_HSCLK_SEL},
{0x0009, XCVR_DIAG_PLLDRC_CTRL}
};
-static struct cdns_torrent_vals pcie_dp_link_cmn_vals = {
+static const struct cdns_torrent_vals pcie_dp_link_cmn_vals = {
.reg_pairs = pcie_dp_link_cmn_regs,
.num_regs = ARRAY_SIZE(pcie_dp_link_cmn_regs),
};
-static struct cdns_torrent_vals pcie_dp_xcvr_diag_ln_vals = {
+static const struct cdns_torrent_vals pcie_dp_xcvr_diag_ln_vals = {
.reg_pairs = pcie_dp_xcvr_diag_ln_regs,
.num_regs = ARRAY_SIZE(pcie_dp_xcvr_diag_ln_regs),
};
-static struct cdns_torrent_vals dp_pcie_xcvr_diag_ln_vals = {
+static const struct cdns_torrent_vals dp_pcie_xcvr_diag_ln_vals = {
.reg_pairs = dp_pcie_xcvr_diag_ln_regs,
.num_regs = ARRAY_SIZE(dp_pcie_xcvr_diag_ln_regs),
};
/* DP Multilink, 100 MHz Ref clk, no SSC */
-static struct cdns_reg_pairs dp_100_no_ssc_cmn_regs[] = {
+static const struct cdns_reg_pairs dp_100_no_ssc_cmn_regs[] = {
{0x007F, CMN_TXPUCAL_TUNE},
{0x007F, CMN_TXPDCAL_TUNE}
};
-static struct cdns_reg_pairs dp_100_no_ssc_tx_ln_regs[] = {
+static const struct cdns_reg_pairs dp_100_no_ssc_tx_ln_regs[] = {
{0x00FB, TX_PSC_A0},
{0x04AA, TX_PSC_A2},
{0x04AA, TX_PSC_A3},
{0x000F, XCVR_DIAG_BIDI_CTRL}
};
-static struct cdns_reg_pairs dp_100_no_ssc_rx_ln_regs[] = {
+static const struct cdns_reg_pairs dp_100_no_ssc_rx_ln_regs[] = {
{0x0000, RX_PSC_A0},
{0x0000, RX_PSC_A2},
{0x0000, RX_PSC_A3},
@@ -3569,43 +3622,43 @@ static struct cdns_reg_pairs dp_100_no_ssc_rx_ln_regs[] = {
{0x0000, RX_REE_PERGCSM_CTRL}
};
-static struct cdns_torrent_vals dp_100_no_ssc_cmn_vals = {
+static const struct cdns_torrent_vals dp_100_no_ssc_cmn_vals = {
.reg_pairs = dp_100_no_ssc_cmn_regs,
.num_regs = ARRAY_SIZE(dp_100_no_ssc_cmn_regs),
};
-static struct cdns_torrent_vals dp_100_no_ssc_tx_ln_vals = {
+static const struct cdns_torrent_vals dp_100_no_ssc_tx_ln_vals = {
.reg_pairs = dp_100_no_ssc_tx_ln_regs,
.num_regs = ARRAY_SIZE(dp_100_no_ssc_tx_ln_regs),
};
-static struct cdns_torrent_vals dp_100_no_ssc_rx_ln_vals = {
+static const struct cdns_torrent_vals dp_100_no_ssc_rx_ln_vals = {
.reg_pairs = dp_100_no_ssc_rx_ln_regs,
.num_regs = ARRAY_SIZE(dp_100_no_ssc_rx_ln_regs),
};
/* Single DisplayPort(DP) link configuration */
-static struct cdns_reg_pairs sl_dp_link_cmn_regs[] = {
+static const struct cdns_reg_pairs sl_dp_link_cmn_regs[] = {
{0x0000, PHY_PLL_CFG},
};
-static struct cdns_reg_pairs sl_dp_xcvr_diag_ln_regs[] = {
+static const struct cdns_reg_pairs sl_dp_xcvr_diag_ln_regs[] = {
{0x0000, XCVR_DIAG_HSCLK_SEL},
{0x0001, XCVR_DIAG_PLLDRC_CTRL}
};
-static struct cdns_torrent_vals sl_dp_link_cmn_vals = {
+static const struct cdns_torrent_vals sl_dp_link_cmn_vals = {
.reg_pairs = sl_dp_link_cmn_regs,
.num_regs = ARRAY_SIZE(sl_dp_link_cmn_regs),
};
-static struct cdns_torrent_vals sl_dp_xcvr_diag_ln_vals = {
+static const struct cdns_torrent_vals sl_dp_xcvr_diag_ln_vals = {
.reg_pairs = sl_dp_xcvr_diag_ln_regs,
.num_regs = ARRAY_SIZE(sl_dp_xcvr_diag_ln_regs),
};
/* Single DP, 19.2 MHz Ref clk, no SSC */
-static struct cdns_reg_pairs sl_dp_19_2_no_ssc_cmn_regs[] = {
+static const struct cdns_reg_pairs sl_dp_19_2_no_ssc_cmn_regs[] = {
{0x0014, CMN_SSM_BIAS_TMR},
{0x0027, CMN_PLLSM0_PLLPRE_TMR},
{0x00A1, CMN_PLLSM0_PLLLOCK_TMR},
@@ -3642,7 +3695,7 @@ static struct cdns_reg_pairs sl_dp_19_2_no_ssc_cmn_regs[] = {
{0x0003, CMN_PLL1_VCOCAL_TCTRL}
};
-static struct cdns_reg_pairs sl_dp_19_2_no_ssc_tx_ln_regs[] = {
+static const struct cdns_reg_pairs sl_dp_19_2_no_ssc_tx_ln_regs[] = {
{0x0780, TX_RCVDET_ST_TMR},
{0x00FB, TX_PSC_A0},
{0x04AA, TX_PSC_A2},
@@ -3650,7 +3703,7 @@ static struct cdns_reg_pairs sl_dp_19_2_no_ssc_tx_ln_regs[] = {
{0x000F, XCVR_DIAG_BIDI_CTRL}
};
-static struct cdns_reg_pairs sl_dp_19_2_no_ssc_rx_ln_regs[] = {
+static const struct cdns_reg_pairs sl_dp_19_2_no_ssc_rx_ln_regs[] = {
{0x0000, RX_PSC_A0},
{0x0000, RX_PSC_A2},
{0x0000, RX_PSC_A3},
@@ -3660,23 +3713,23 @@ static struct cdns_reg_pairs sl_dp_19_2_no_ssc_rx_ln_regs[] = {
{0x0000, RX_REE_PERGCSM_CTRL}
};
-static struct cdns_torrent_vals sl_dp_19_2_no_ssc_cmn_vals = {
+static const struct cdns_torrent_vals sl_dp_19_2_no_ssc_cmn_vals = {
.reg_pairs = sl_dp_19_2_no_ssc_cmn_regs,
.num_regs = ARRAY_SIZE(sl_dp_19_2_no_ssc_cmn_regs),
};
-static struct cdns_torrent_vals sl_dp_19_2_no_ssc_tx_ln_vals = {
+static const struct cdns_torrent_vals sl_dp_19_2_no_ssc_tx_ln_vals = {
.reg_pairs = sl_dp_19_2_no_ssc_tx_ln_regs,
.num_regs = ARRAY_SIZE(sl_dp_19_2_no_ssc_tx_ln_regs),
};
-static struct cdns_torrent_vals sl_dp_19_2_no_ssc_rx_ln_vals = {
+static const struct cdns_torrent_vals sl_dp_19_2_no_ssc_rx_ln_vals = {
.reg_pairs = sl_dp_19_2_no_ssc_rx_ln_regs,
.num_regs = ARRAY_SIZE(sl_dp_19_2_no_ssc_rx_ln_regs),
};
/* Single DP, 25 MHz Ref clk, no SSC */
-static struct cdns_reg_pairs sl_dp_25_no_ssc_cmn_regs[] = {
+static const struct cdns_reg_pairs sl_dp_25_no_ssc_cmn_regs[] = {
{0x0019, CMN_SSM_BIAS_TMR},
{0x0032, CMN_PLLSM0_PLLPRE_TMR},
{0x00D1, CMN_PLLSM0_PLLLOCK_TMR},
@@ -3713,7 +3766,7 @@ static struct cdns_reg_pairs sl_dp_25_no_ssc_cmn_regs[] = {
{0x0003, CMN_PLL1_VCOCAL_TCTRL}
};
-static struct cdns_reg_pairs sl_dp_25_no_ssc_tx_ln_regs[] = {
+static const struct cdns_reg_pairs sl_dp_25_no_ssc_tx_ln_regs[] = {
{0x09C4, TX_RCVDET_ST_TMR},
{0x00FB, TX_PSC_A0},
{0x04AA, TX_PSC_A2},
@@ -3721,7 +3774,7 @@ static struct cdns_reg_pairs sl_dp_25_no_ssc_tx_ln_regs[] = {
{0x000F, XCVR_DIAG_BIDI_CTRL}
};
-static struct cdns_reg_pairs sl_dp_25_no_ssc_rx_ln_regs[] = {
+static const struct cdns_reg_pairs sl_dp_25_no_ssc_rx_ln_regs[] = {
{0x0000, RX_PSC_A0},
{0x0000, RX_PSC_A2},
{0x0000, RX_PSC_A3},
@@ -3731,35 +3784,35 @@ static struct cdns_reg_pairs sl_dp_25_no_ssc_rx_ln_regs[] = {
{0x0000, RX_REE_PERGCSM_CTRL}
};
-static struct cdns_torrent_vals sl_dp_25_no_ssc_cmn_vals = {
+static const struct cdns_torrent_vals sl_dp_25_no_ssc_cmn_vals = {
.reg_pairs = sl_dp_25_no_ssc_cmn_regs,
.num_regs = ARRAY_SIZE(sl_dp_25_no_ssc_cmn_regs),
};
-static struct cdns_torrent_vals sl_dp_25_no_ssc_tx_ln_vals = {
+static const struct cdns_torrent_vals sl_dp_25_no_ssc_tx_ln_vals = {
.reg_pairs = sl_dp_25_no_ssc_tx_ln_regs,
.num_regs = ARRAY_SIZE(sl_dp_25_no_ssc_tx_ln_regs),
};
-static struct cdns_torrent_vals sl_dp_25_no_ssc_rx_ln_vals = {
+static const struct cdns_torrent_vals sl_dp_25_no_ssc_rx_ln_vals = {
.reg_pairs = sl_dp_25_no_ssc_rx_ln_regs,
.num_regs = ARRAY_SIZE(sl_dp_25_no_ssc_rx_ln_regs),
};
/* Single DP, 100 MHz Ref clk, no SSC */
-static struct cdns_reg_pairs sl_dp_100_no_ssc_cmn_regs[] = {
+static const struct cdns_reg_pairs sl_dp_100_no_ssc_cmn_regs[] = {
{0x0003, CMN_PLL0_VCOCAL_TCTRL},
{0x0003, CMN_PLL1_VCOCAL_TCTRL}
};
-static struct cdns_reg_pairs sl_dp_100_no_ssc_tx_ln_regs[] = {
+static const struct cdns_reg_pairs sl_dp_100_no_ssc_tx_ln_regs[] = {
{0x00FB, TX_PSC_A0},
{0x04AA, TX_PSC_A2},
{0x04AA, TX_PSC_A3},
{0x000F, XCVR_DIAG_BIDI_CTRL}
};
-static struct cdns_reg_pairs sl_dp_100_no_ssc_rx_ln_regs[] = {
+static const struct cdns_reg_pairs sl_dp_100_no_ssc_rx_ln_regs[] = {
{0x0000, RX_PSC_A0},
{0x0000, RX_PSC_A2},
{0x0000, RX_PSC_A3},
@@ -3769,92 +3822,92 @@ static struct cdns_reg_pairs sl_dp_100_no_ssc_rx_ln_regs[] = {
{0x0000, RX_REE_PERGCSM_CTRL}
};
-static struct cdns_torrent_vals sl_dp_100_no_ssc_cmn_vals = {
+static const struct cdns_torrent_vals sl_dp_100_no_ssc_cmn_vals = {
.reg_pairs = sl_dp_100_no_ssc_cmn_regs,
.num_regs = ARRAY_SIZE(sl_dp_100_no_ssc_cmn_regs),
};
-static struct cdns_torrent_vals sl_dp_100_no_ssc_tx_ln_vals = {
+static const struct cdns_torrent_vals sl_dp_100_no_ssc_tx_ln_vals = {
.reg_pairs = sl_dp_100_no_ssc_tx_ln_regs,
.num_regs = ARRAY_SIZE(sl_dp_100_no_ssc_tx_ln_regs),
};
-static struct cdns_torrent_vals sl_dp_100_no_ssc_rx_ln_vals = {
+static const struct cdns_torrent_vals sl_dp_100_no_ssc_rx_ln_vals = {
.reg_pairs = sl_dp_100_no_ssc_rx_ln_regs,
.num_regs = ARRAY_SIZE(sl_dp_100_no_ssc_rx_ln_regs),
};
/* USB and SGMII/QSGMII link configuration */
-static struct cdns_reg_pairs usb_sgmii_link_cmn_regs[] = {
+static const struct cdns_reg_pairs usb_sgmii_link_cmn_regs[] = {
{0x0002, PHY_PLL_CFG},
{0x8600, CMN_PDIAG_PLL0_CLK_SEL_M0},
{0x0601, CMN_PDIAG_PLL1_CLK_SEL_M0}
};
-static struct cdns_reg_pairs usb_sgmii_xcvr_diag_ln_regs[] = {
+static const struct cdns_reg_pairs usb_sgmii_xcvr_diag_ln_regs[] = {
{0x0000, XCVR_DIAG_HSCLK_SEL},
{0x0001, XCVR_DIAG_HSCLK_DIV},
{0x0041, XCVR_DIAG_PLLDRC_CTRL}
};
-static struct cdns_reg_pairs sgmii_usb_xcvr_diag_ln_regs[] = {
+static const struct cdns_reg_pairs sgmii_usb_xcvr_diag_ln_regs[] = {
{0x0011, XCVR_DIAG_HSCLK_SEL},
{0x0003, XCVR_DIAG_HSCLK_DIV},
{0x009B, XCVR_DIAG_PLLDRC_CTRL}
};
-static struct cdns_torrent_vals usb_sgmii_link_cmn_vals = {
+static const struct cdns_torrent_vals usb_sgmii_link_cmn_vals = {
.reg_pairs = usb_sgmii_link_cmn_regs,
.num_regs = ARRAY_SIZE(usb_sgmii_link_cmn_regs),
};
-static struct cdns_torrent_vals usb_sgmii_xcvr_diag_ln_vals = {
+static const struct cdns_torrent_vals usb_sgmii_xcvr_diag_ln_vals = {
.reg_pairs = usb_sgmii_xcvr_diag_ln_regs,
.num_regs = ARRAY_SIZE(usb_sgmii_xcvr_diag_ln_regs),
};
-static struct cdns_torrent_vals sgmii_usb_xcvr_diag_ln_vals = {
+static const struct cdns_torrent_vals sgmii_usb_xcvr_diag_ln_vals = {
.reg_pairs = sgmii_usb_xcvr_diag_ln_regs,
.num_regs = ARRAY_SIZE(sgmii_usb_xcvr_diag_ln_regs),
};
/* PCIe and USB Unique SSC link configuration */
-static struct cdns_reg_pairs pcie_usb_link_cmn_regs[] = {
+static const struct cdns_reg_pairs pcie_usb_link_cmn_regs[] = {
{0x0003, PHY_PLL_CFG},
{0x0601, CMN_PDIAG_PLL0_CLK_SEL_M0},
{0x0400, CMN_PDIAG_PLL0_CLK_SEL_M1},
{0x8600, CMN_PDIAG_PLL1_CLK_SEL_M0}
};
-static struct cdns_reg_pairs pcie_usb_xcvr_diag_ln_regs[] = {
+static const struct cdns_reg_pairs pcie_usb_xcvr_diag_ln_regs[] = {
{0x0000, XCVR_DIAG_HSCLK_SEL},
{0x0001, XCVR_DIAG_HSCLK_DIV},
{0x0012, XCVR_DIAG_PLLDRC_CTRL}
};
-static struct cdns_reg_pairs usb_pcie_xcvr_diag_ln_regs[] = {
+static const struct cdns_reg_pairs usb_pcie_xcvr_diag_ln_regs[] = {
{0x0011, XCVR_DIAG_HSCLK_SEL},
{0x0001, XCVR_DIAG_HSCLK_DIV},
{0x00C9, XCVR_DIAG_PLLDRC_CTRL}
};
-static struct cdns_torrent_vals pcie_usb_link_cmn_vals = {
+static const struct cdns_torrent_vals pcie_usb_link_cmn_vals = {
.reg_pairs = pcie_usb_link_cmn_regs,
.num_regs = ARRAY_SIZE(pcie_usb_link_cmn_regs),
};
-static struct cdns_torrent_vals pcie_usb_xcvr_diag_ln_vals = {
+static const struct cdns_torrent_vals pcie_usb_xcvr_diag_ln_vals = {
.reg_pairs = pcie_usb_xcvr_diag_ln_regs,
.num_regs = ARRAY_SIZE(pcie_usb_xcvr_diag_ln_regs),
};
-static struct cdns_torrent_vals usb_pcie_xcvr_diag_ln_vals = {
+static const struct cdns_torrent_vals usb_pcie_xcvr_diag_ln_vals = {
.reg_pairs = usb_pcie_xcvr_diag_ln_regs,
.num_regs = ARRAY_SIZE(usb_pcie_xcvr_diag_ln_regs),
};
/* USB 100 MHz Ref clk, internal SSC */
-static struct cdns_reg_pairs usb_100_int_ssc_cmn_regs[] = {
+static const struct cdns_reg_pairs usb_100_int_ssc_cmn_regs[] = {
{0x0004, CMN_PLL0_DSM_DIAG_M0},
{0x0004, CMN_PLL0_DSM_DIAG_M1},
{0x0004, CMN_PLL1_DSM_DIAG_M0},
@@ -3907,47 +3960,47 @@ static struct cdns_reg_pairs usb_100_int_ssc_cmn_regs[] = {
{0x007F, CMN_TXPDCAL_TUNE}
};
-static struct cdns_torrent_vals usb_100_int_ssc_cmn_vals = {
+static const struct cdns_torrent_vals usb_100_int_ssc_cmn_vals = {
.reg_pairs = usb_100_int_ssc_cmn_regs,
.num_regs = ARRAY_SIZE(usb_100_int_ssc_cmn_regs),
};
/* Single USB link configuration */
-static struct cdns_reg_pairs sl_usb_link_cmn_regs[] = {
+static const struct cdns_reg_pairs sl_usb_link_cmn_regs[] = {
{0x0000, PHY_PLL_CFG},
{0x8600, CMN_PDIAG_PLL0_CLK_SEL_M0}
};
-static struct cdns_reg_pairs sl_usb_xcvr_diag_ln_regs[] = {
+static const struct cdns_reg_pairs sl_usb_xcvr_diag_ln_regs[] = {
{0x0000, XCVR_DIAG_HSCLK_SEL},
{0x0001, XCVR_DIAG_HSCLK_DIV},
{0x0041, XCVR_DIAG_PLLDRC_CTRL}
};
-static struct cdns_torrent_vals sl_usb_link_cmn_vals = {
+static const struct cdns_torrent_vals sl_usb_link_cmn_vals = {
.reg_pairs = sl_usb_link_cmn_regs,
.num_regs = ARRAY_SIZE(sl_usb_link_cmn_regs),
};
-static struct cdns_torrent_vals sl_usb_xcvr_diag_ln_vals = {
+static const struct cdns_torrent_vals sl_usb_xcvr_diag_ln_vals = {
.reg_pairs = sl_usb_xcvr_diag_ln_regs,
.num_regs = ARRAY_SIZE(sl_usb_xcvr_diag_ln_regs),
};
/* USB PHY PCS common configuration */
-static struct cdns_reg_pairs usb_phy_pcs_cmn_regs[] = {
+static const struct cdns_reg_pairs usb_phy_pcs_cmn_regs[] = {
{0x0A0A, PHY_PIPE_USB3_GEN2_PRE_CFG0},
{0x1000, PHY_PIPE_USB3_GEN2_POST_CFG0},
{0x0010, PHY_PIPE_USB3_GEN2_POST_CFG1}
};
-static struct cdns_torrent_vals usb_phy_pcs_cmn_vals = {
+static const struct cdns_torrent_vals usb_phy_pcs_cmn_vals = {
.reg_pairs = usb_phy_pcs_cmn_regs,
.num_regs = ARRAY_SIZE(usb_phy_pcs_cmn_regs),
};
/* USB 100 MHz Ref clk, no SSC */
-static struct cdns_reg_pairs sl_usb_100_no_ssc_cmn_regs[] = {
+static const struct cdns_reg_pairs sl_usb_100_no_ssc_cmn_regs[] = {
{0x0028, CMN_PDIAG_PLL1_CP_PADJ_M0},
{0x001E, CMN_PLL1_DSM_FBH_OVRD_M0},
{0x000C, CMN_PLL1_DSM_FBL_OVRD_M0},
@@ -3957,19 +4010,19 @@ static struct cdns_reg_pairs sl_usb_100_no_ssc_cmn_regs[] = {
{0x8200, CMN_CDIAG_XCVRC_PWRI_OVRD}
};
-static struct cdns_torrent_vals sl_usb_100_no_ssc_cmn_vals = {
+static const struct cdns_torrent_vals sl_usb_100_no_ssc_cmn_vals = {
.reg_pairs = sl_usb_100_no_ssc_cmn_regs,
.num_regs = ARRAY_SIZE(sl_usb_100_no_ssc_cmn_regs),
};
-static struct cdns_reg_pairs usb_100_no_ssc_cmn_regs[] = {
+static const struct cdns_reg_pairs usb_100_no_ssc_cmn_regs[] = {
{0x8200, CMN_CDIAG_CDB_PWRI_OVRD},
{0x8200, CMN_CDIAG_XCVRC_PWRI_OVRD},
{0x007F, CMN_TXPUCAL_TUNE},
{0x007F, CMN_TXPDCAL_TUNE}
};
-static struct cdns_reg_pairs usb_100_no_ssc_tx_ln_regs[] = {
+static const struct cdns_reg_pairs usb_100_no_ssc_tx_ln_regs[] = {
{0x02FF, TX_PSC_A0},
{0x06AF, TX_PSC_A1},
{0x06AE, TX_PSC_A2},
@@ -3979,7 +4032,7 @@ static struct cdns_reg_pairs usb_100_no_ssc_tx_ln_regs[] = {
{0x0003, XCVR_DIAG_PSC_OVRD}
};
-static struct cdns_reg_pairs usb_100_no_ssc_rx_ln_regs[] = {
+static const struct cdns_reg_pairs usb_100_no_ssc_rx_ln_regs[] = {
{0x0D1D, RX_PSC_A0},
{0x0D1D, RX_PSC_A1},
{0x0D00, RX_PSC_A2},
@@ -4002,23 +4055,23 @@ static struct cdns_reg_pairs usb_100_no_ssc_rx_ln_regs[] = {
{0x0003, RX_CDRLF_CNFG3}
};
-static struct cdns_torrent_vals usb_100_no_ssc_cmn_vals = {
+static const struct cdns_torrent_vals usb_100_no_ssc_cmn_vals = {
.reg_pairs = usb_100_no_ssc_cmn_regs,
.num_regs = ARRAY_SIZE(usb_100_no_ssc_cmn_regs),
};
-static struct cdns_torrent_vals usb_100_no_ssc_tx_ln_vals = {
+static const struct cdns_torrent_vals usb_100_no_ssc_tx_ln_vals = {
.reg_pairs = usb_100_no_ssc_tx_ln_regs,
.num_regs = ARRAY_SIZE(usb_100_no_ssc_tx_ln_regs),
};
-static struct cdns_torrent_vals usb_100_no_ssc_rx_ln_vals = {
+static const struct cdns_torrent_vals usb_100_no_ssc_rx_ln_vals = {
.reg_pairs = usb_100_no_ssc_rx_ln_regs,
.num_regs = ARRAY_SIZE(usb_100_no_ssc_rx_ln_regs),
};
/* Single link USB, 100 MHz Ref clk, internal SSC */
-static struct cdns_reg_pairs sl_usb_100_int_ssc_cmn_regs[] = {
+static const struct cdns_reg_pairs sl_usb_100_int_ssc_cmn_regs[] = {
{0x0004, CMN_PLL0_DSM_DIAG_M0},
{0x0004, CMN_PLL1_DSM_DIAG_M0},
{0x0509, CMN_PDIAG_PLL0_CP_PADJ_M0},
@@ -4059,48 +4112,48 @@ static struct cdns_reg_pairs sl_usb_100_int_ssc_cmn_regs[] = {
{0x8200, CMN_CDIAG_XCVRC_PWRI_OVRD}
};
-static struct cdns_torrent_vals sl_usb_100_int_ssc_cmn_vals = {
+static const struct cdns_torrent_vals sl_usb_100_int_ssc_cmn_vals = {
.reg_pairs = sl_usb_100_int_ssc_cmn_regs,
.num_regs = ARRAY_SIZE(sl_usb_100_int_ssc_cmn_regs),
};
/* PCIe and SGMII/QSGMII Unique SSC link configuration */
-static struct cdns_reg_pairs pcie_sgmii_link_cmn_regs[] = {
+static const struct cdns_reg_pairs pcie_sgmii_link_cmn_regs[] = {
{0x0003, PHY_PLL_CFG},
{0x0601, CMN_PDIAG_PLL0_CLK_SEL_M0},
{0x0400, CMN_PDIAG_PLL0_CLK_SEL_M1},
{0x0601, CMN_PDIAG_PLL1_CLK_SEL_M0}
};
-static struct cdns_reg_pairs pcie_sgmii_xcvr_diag_ln_regs[] = {
+static const struct cdns_reg_pairs pcie_sgmii_xcvr_diag_ln_regs[] = {
{0x0000, XCVR_DIAG_HSCLK_SEL},
{0x0001, XCVR_DIAG_HSCLK_DIV},
{0x0012, XCVR_DIAG_PLLDRC_CTRL}
};
-static struct cdns_reg_pairs sgmii_pcie_xcvr_diag_ln_regs[] = {
+static const struct cdns_reg_pairs sgmii_pcie_xcvr_diag_ln_regs[] = {
{0x0011, XCVR_DIAG_HSCLK_SEL},
{0x0003, XCVR_DIAG_HSCLK_DIV},
{0x009B, XCVR_DIAG_PLLDRC_CTRL}
};
-static struct cdns_torrent_vals pcie_sgmii_link_cmn_vals = {
+static const struct cdns_torrent_vals pcie_sgmii_link_cmn_vals = {
.reg_pairs = pcie_sgmii_link_cmn_regs,
.num_regs = ARRAY_SIZE(pcie_sgmii_link_cmn_regs),
};
-static struct cdns_torrent_vals pcie_sgmii_xcvr_diag_ln_vals = {
+static const struct cdns_torrent_vals pcie_sgmii_xcvr_diag_ln_vals = {
.reg_pairs = pcie_sgmii_xcvr_diag_ln_regs,
.num_regs = ARRAY_SIZE(pcie_sgmii_xcvr_diag_ln_regs),
};
-static struct cdns_torrent_vals sgmii_pcie_xcvr_diag_ln_vals = {
+static const struct cdns_torrent_vals sgmii_pcie_xcvr_diag_ln_vals = {
.reg_pairs = sgmii_pcie_xcvr_diag_ln_regs,
.num_regs = ARRAY_SIZE(sgmii_pcie_xcvr_diag_ln_regs),
};
/* SGMII 100 MHz Ref clk, no SSC */
-static struct cdns_reg_pairs sl_sgmii_100_no_ssc_cmn_regs[] = {
+static const struct cdns_reg_pairs sl_sgmii_100_no_ssc_cmn_regs[] = {
{0x0028, CMN_PDIAG_PLL1_CP_PADJ_M0},
{0x001E, CMN_PLL1_DSM_FBH_OVRD_M0},
{0x000C, CMN_PLL1_DSM_FBL_OVRD_M0},
@@ -4108,17 +4161,17 @@ static struct cdns_reg_pairs sl_sgmii_100_no_ssc_cmn_regs[] = {
{0x0003, CMN_PLL1_VCOCAL_TCTRL}
};
-static struct cdns_torrent_vals sl_sgmii_100_no_ssc_cmn_vals = {
+static const struct cdns_torrent_vals sl_sgmii_100_no_ssc_cmn_vals = {
.reg_pairs = sl_sgmii_100_no_ssc_cmn_regs,
.num_regs = ARRAY_SIZE(sl_sgmii_100_no_ssc_cmn_regs),
};
-static struct cdns_reg_pairs sgmii_100_no_ssc_cmn_regs[] = {
+static const struct cdns_reg_pairs sgmii_100_no_ssc_cmn_regs[] = {
{0x007F, CMN_TXPUCAL_TUNE},
{0x007F, CMN_TXPDCAL_TUNE}
};
-static struct cdns_reg_pairs sgmii_100_no_ssc_tx_ln_regs[] = {
+static const struct cdns_reg_pairs sgmii_100_no_ssc_tx_ln_regs[] = {
{0x00F3, TX_PSC_A0},
{0x04A2, TX_PSC_A2},
{0x04A2, TX_PSC_A3},
@@ -4127,7 +4180,7 @@ static struct cdns_reg_pairs sgmii_100_no_ssc_tx_ln_regs[] = {
{0x0002, XCVR_DIAG_PSC_OVRD}
};
-static struct cdns_reg_pairs ti_sgmii_100_no_ssc_tx_ln_regs[] = {
+static const struct cdns_reg_pairs ti_sgmii_100_no_ssc_tx_ln_regs[] = {
{0x00F3, TX_PSC_A0},
{0x04A2, TX_PSC_A2},
{0x04A2, TX_PSC_A3},
@@ -4137,7 +4190,7 @@ static struct cdns_reg_pairs ti_sgmii_100_no_ssc_tx_ln_regs[] = {
{0x4000, XCVR_DIAG_RXCLK_CTRL}
};
-static struct cdns_reg_pairs sgmii_100_no_ssc_rx_ln_regs[] = {
+static const struct cdns_reg_pairs sgmii_100_no_ssc_rx_ln_regs[] = {
{0x091D, RX_PSC_A0},
{0x0900, RX_PSC_A2},
{0x0100, RX_PSC_A3},
@@ -4155,28 +4208,28 @@ static struct cdns_reg_pairs sgmii_100_no_ssc_rx_ln_regs[] = {
{0x018C, RX_CDRLF_CNFG},
};
-static struct cdns_torrent_vals sgmii_100_no_ssc_cmn_vals = {
+static const struct cdns_torrent_vals sgmii_100_no_ssc_cmn_vals = {
.reg_pairs = sgmii_100_no_ssc_cmn_regs,
.num_regs = ARRAY_SIZE(sgmii_100_no_ssc_cmn_regs),
};
-static struct cdns_torrent_vals sgmii_100_no_ssc_tx_ln_vals = {
+static const struct cdns_torrent_vals sgmii_100_no_ssc_tx_ln_vals = {
.reg_pairs = sgmii_100_no_ssc_tx_ln_regs,
.num_regs = ARRAY_SIZE(sgmii_100_no_ssc_tx_ln_regs),
};
-static struct cdns_torrent_vals ti_sgmii_100_no_ssc_tx_ln_vals = {
+static const struct cdns_torrent_vals ti_sgmii_100_no_ssc_tx_ln_vals = {
.reg_pairs = ti_sgmii_100_no_ssc_tx_ln_regs,
.num_regs = ARRAY_SIZE(ti_sgmii_100_no_ssc_tx_ln_regs),
};
-static struct cdns_torrent_vals sgmii_100_no_ssc_rx_ln_vals = {
+static const struct cdns_torrent_vals sgmii_100_no_ssc_rx_ln_vals = {
.reg_pairs = sgmii_100_no_ssc_rx_ln_regs,
.num_regs = ARRAY_SIZE(sgmii_100_no_ssc_rx_ln_regs),
};
/* TI J7200, multilink SGMII */
-static struct cdns_reg_pairs j7200_sgmii_100_no_ssc_tx_ln_regs[] = {
+static const struct cdns_reg_pairs j7200_sgmii_100_no_ssc_tx_ln_regs[] = {
{0x07A2, TX_RCVDET_ST_TMR},
{0x00F3, TX_PSC_A0},
{0x04A2, TX_PSC_A2},
@@ -4187,12 +4240,12 @@ static struct cdns_reg_pairs j7200_sgmii_100_no_ssc_tx_ln_regs[] = {
{0x4000, XCVR_DIAG_RXCLK_CTRL}
};
-static struct cdns_torrent_vals j7200_sgmii_100_no_ssc_tx_ln_vals = {
+static const struct cdns_torrent_vals j7200_sgmii_100_no_ssc_tx_ln_vals = {
.reg_pairs = j7200_sgmii_100_no_ssc_tx_ln_regs,
.num_regs = ARRAY_SIZE(j7200_sgmii_100_no_ssc_tx_ln_regs),
};
-static struct cdns_reg_pairs j7200_sgmii_100_no_ssc_rx_ln_regs[] = {
+static const struct cdns_reg_pairs j7200_sgmii_100_no_ssc_rx_ln_regs[] = {
{0x0014, RX_SDCAL0_INIT_TMR},
{0x0062, RX_SDCAL0_ITER_TMR},
{0x0014, RX_SDCAL1_INIT_TMR},
@@ -4214,13 +4267,13 @@ static struct cdns_reg_pairs j7200_sgmii_100_no_ssc_rx_ln_regs[] = {
{0x018C, RX_CDRLF_CNFG}
};
-static struct cdns_torrent_vals j7200_sgmii_100_no_ssc_rx_ln_vals = {
+static const struct cdns_torrent_vals j7200_sgmii_100_no_ssc_rx_ln_vals = {
.reg_pairs = j7200_sgmii_100_no_ssc_rx_ln_regs,
.num_regs = ARRAY_SIZE(j7200_sgmii_100_no_ssc_rx_ln_regs),
};
/* SGMII 100 MHz Ref clk, internal SSC */
-static struct cdns_reg_pairs sgmii_100_int_ssc_cmn_regs[] = {
+static const struct cdns_reg_pairs sgmii_100_int_ssc_cmn_regs[] = {
{0x0004, CMN_PLL0_DSM_DIAG_M0},
{0x0004, CMN_PLL0_DSM_DIAG_M1},
{0x0004, CMN_PLL1_DSM_DIAG_M0},
@@ -4271,13 +4324,13 @@ static struct cdns_reg_pairs sgmii_100_int_ssc_cmn_regs[] = {
{0x007F, CMN_TXPDCAL_TUNE}
};
-static struct cdns_torrent_vals sgmii_100_int_ssc_cmn_vals = {
+static const struct cdns_torrent_vals sgmii_100_int_ssc_cmn_vals = {
.reg_pairs = sgmii_100_int_ssc_cmn_regs,
.num_regs = ARRAY_SIZE(sgmii_100_int_ssc_cmn_regs),
};
/* QSGMII 100 MHz Ref clk, no SSC */
-static struct cdns_reg_pairs sl_qsgmii_100_no_ssc_cmn_regs[] = {
+static const struct cdns_reg_pairs sl_qsgmii_100_no_ssc_cmn_regs[] = {
{0x0028, CMN_PDIAG_PLL1_CP_PADJ_M0},
{0x001E, CMN_PLL1_DSM_FBH_OVRD_M0},
{0x000C, CMN_PLL1_DSM_FBL_OVRD_M0},
@@ -4285,17 +4338,17 @@ static struct cdns_reg_pairs sl_qsgmii_100_no_ssc_cmn_regs[] = {
{0x0003, CMN_PLL1_VCOCAL_TCTRL}
};
-static struct cdns_torrent_vals sl_qsgmii_100_no_ssc_cmn_vals = {
+static const struct cdns_torrent_vals sl_qsgmii_100_no_ssc_cmn_vals = {
.reg_pairs = sl_qsgmii_100_no_ssc_cmn_regs,
.num_regs = ARRAY_SIZE(sl_qsgmii_100_no_ssc_cmn_regs),
};
-static struct cdns_reg_pairs qsgmii_100_no_ssc_cmn_regs[] = {
+static const struct cdns_reg_pairs qsgmii_100_no_ssc_cmn_regs[] = {
{0x007F, CMN_TXPUCAL_TUNE},
{0x007F, CMN_TXPDCAL_TUNE}
};
-static struct cdns_reg_pairs qsgmii_100_no_ssc_tx_ln_regs[] = {
+static const struct cdns_reg_pairs qsgmii_100_no_ssc_tx_ln_regs[] = {
{0x00F3, TX_PSC_A0},
{0x04A2, TX_PSC_A2},
{0x04A2, TX_PSC_A3},
@@ -4305,7 +4358,7 @@ static struct cdns_reg_pairs qsgmii_100_no_ssc_tx_ln_regs[] = {
{0x0002, XCVR_DIAG_PSC_OVRD}
};
-static struct cdns_reg_pairs ti_qsgmii_100_no_ssc_tx_ln_regs[] = {
+static const struct cdns_reg_pairs ti_qsgmii_100_no_ssc_tx_ln_regs[] = {
{0x00F3, TX_PSC_A0},
{0x04A2, TX_PSC_A2},
{0x04A2, TX_PSC_A3},
@@ -4316,7 +4369,7 @@ static struct cdns_reg_pairs ti_qsgmii_100_no_ssc_tx_ln_regs[] = {
{0x4000, XCVR_DIAG_RXCLK_CTRL}
};
-static struct cdns_reg_pairs qsgmii_100_no_ssc_rx_ln_regs[] = {
+static const struct cdns_reg_pairs qsgmii_100_no_ssc_rx_ln_regs[] = {
{0x091D, RX_PSC_A0},
{0x0900, RX_PSC_A2},
{0x0100, RX_PSC_A3},
@@ -4334,28 +4387,28 @@ static struct cdns_reg_pairs qsgmii_100_no_ssc_rx_ln_regs[] = {
{0x018C, RX_CDRLF_CNFG},
};
-static struct cdns_torrent_vals qsgmii_100_no_ssc_cmn_vals = {
+static const struct cdns_torrent_vals qsgmii_100_no_ssc_cmn_vals = {
.reg_pairs = qsgmii_100_no_ssc_cmn_regs,
.num_regs = ARRAY_SIZE(qsgmii_100_no_ssc_cmn_regs),
};
-static struct cdns_torrent_vals qsgmii_100_no_ssc_tx_ln_vals = {
+static const struct cdns_torrent_vals qsgmii_100_no_ssc_tx_ln_vals = {
.reg_pairs = qsgmii_100_no_ssc_tx_ln_regs,
.num_regs = ARRAY_SIZE(qsgmii_100_no_ssc_tx_ln_regs),
};
-static struct cdns_torrent_vals ti_qsgmii_100_no_ssc_tx_ln_vals = {
+static const struct cdns_torrent_vals ti_qsgmii_100_no_ssc_tx_ln_vals = {
.reg_pairs = ti_qsgmii_100_no_ssc_tx_ln_regs,
.num_regs = ARRAY_SIZE(ti_qsgmii_100_no_ssc_tx_ln_regs),
};
-static struct cdns_torrent_vals qsgmii_100_no_ssc_rx_ln_vals = {
+static const struct cdns_torrent_vals qsgmii_100_no_ssc_rx_ln_vals = {
.reg_pairs = qsgmii_100_no_ssc_rx_ln_regs,
.num_regs = ARRAY_SIZE(qsgmii_100_no_ssc_rx_ln_regs),
};
/* TI J7200, multilink QSGMII */
-static struct cdns_reg_pairs j7200_qsgmii_100_no_ssc_tx_ln_regs[] = {
+static const struct cdns_reg_pairs j7200_qsgmii_100_no_ssc_tx_ln_regs[] = {
{0x07A2, TX_RCVDET_ST_TMR},
{0x00F3, TX_PSC_A0},
{0x04A2, TX_PSC_A2},
@@ -4367,12 +4420,12 @@ static struct cdns_reg_pairs j7200_qsgmii_100_no_ssc_tx_ln_regs[] = {
{0x4000, XCVR_DIAG_RXCLK_CTRL}
};
-static struct cdns_torrent_vals j7200_qsgmii_100_no_ssc_tx_ln_vals = {
+static const struct cdns_torrent_vals j7200_qsgmii_100_no_ssc_tx_ln_vals = {
.reg_pairs = j7200_qsgmii_100_no_ssc_tx_ln_regs,
.num_regs = ARRAY_SIZE(j7200_qsgmii_100_no_ssc_tx_ln_regs),
};
-static struct cdns_reg_pairs j7200_qsgmii_100_no_ssc_rx_ln_regs[] = {
+static const struct cdns_reg_pairs j7200_qsgmii_100_no_ssc_rx_ln_regs[] = {
{0x0014, RX_SDCAL0_INIT_TMR},
{0x0062, RX_SDCAL0_ITER_TMR},
{0x0014, RX_SDCAL1_INIT_TMR},
@@ -4394,13 +4447,13 @@ static struct cdns_reg_pairs j7200_qsgmii_100_no_ssc_rx_ln_regs[] = {
{0x018C, RX_CDRLF_CNFG}
};
-static struct cdns_torrent_vals j7200_qsgmii_100_no_ssc_rx_ln_vals = {
+static const struct cdns_torrent_vals j7200_qsgmii_100_no_ssc_rx_ln_vals = {
.reg_pairs = j7200_qsgmii_100_no_ssc_rx_ln_regs,
.num_regs = ARRAY_SIZE(j7200_qsgmii_100_no_ssc_rx_ln_regs),
};
/* QSGMII 100 MHz Ref clk, internal SSC */
-static struct cdns_reg_pairs qsgmii_100_int_ssc_cmn_regs[] = {
+static const struct cdns_reg_pairs qsgmii_100_int_ssc_cmn_regs[] = {
{0x0004, CMN_PLL0_DSM_DIAG_M0},
{0x0004, CMN_PLL0_DSM_DIAG_M1},
{0x0004, CMN_PLL1_DSM_DIAG_M0},
@@ -4451,35 +4504,35 @@ static struct cdns_reg_pairs qsgmii_100_int_ssc_cmn_regs[] = {
{0x007F, CMN_TXPDCAL_TUNE}
};
-static struct cdns_torrent_vals qsgmii_100_int_ssc_cmn_vals = {
+static const struct cdns_torrent_vals qsgmii_100_int_ssc_cmn_vals = {
.reg_pairs = qsgmii_100_int_ssc_cmn_regs,
.num_regs = ARRAY_SIZE(qsgmii_100_int_ssc_cmn_regs),
};
/* Single SGMII/QSGMII link configuration */
-static struct cdns_reg_pairs sl_sgmii_link_cmn_regs[] = {
+static const struct cdns_reg_pairs sl_sgmii_link_cmn_regs[] = {
{0x0000, PHY_PLL_CFG},
{0x0601, CMN_PDIAG_PLL0_CLK_SEL_M0}
};
-static struct cdns_reg_pairs sl_sgmii_xcvr_diag_ln_regs[] = {
+static const struct cdns_reg_pairs sl_sgmii_xcvr_diag_ln_regs[] = {
{0x0000, XCVR_DIAG_HSCLK_SEL},
{0x0003, XCVR_DIAG_HSCLK_DIV},
{0x0013, XCVR_DIAG_PLLDRC_CTRL}
};
-static struct cdns_torrent_vals sl_sgmii_link_cmn_vals = {
+static const struct cdns_torrent_vals sl_sgmii_link_cmn_vals = {
.reg_pairs = sl_sgmii_link_cmn_regs,
.num_regs = ARRAY_SIZE(sl_sgmii_link_cmn_regs),
};
-static struct cdns_torrent_vals sl_sgmii_xcvr_diag_ln_vals = {
+static const struct cdns_torrent_vals sl_sgmii_xcvr_diag_ln_vals = {
.reg_pairs = sl_sgmii_xcvr_diag_ln_regs,
.num_regs = ARRAY_SIZE(sl_sgmii_xcvr_diag_ln_regs),
};
/* Multi link PCIe, 100 MHz Ref clk, internal SSC */
-static struct cdns_reg_pairs pcie_100_int_ssc_cmn_regs[] = {
+static const struct cdns_reg_pairs pcie_100_int_ssc_cmn_regs[] = {
{0x0004, CMN_PLL0_DSM_DIAG_M0},
{0x0004, CMN_PLL0_DSM_DIAG_M1},
{0x0004, CMN_PLL1_DSM_DIAG_M0},
@@ -4528,13 +4581,13 @@ static struct cdns_reg_pairs pcie_100_int_ssc_cmn_regs[] = {
{0x0005, CMN_PLL1_LOCK_PLLCNT_THR}
};
-static struct cdns_torrent_vals pcie_100_int_ssc_cmn_vals = {
+static const struct cdns_torrent_vals pcie_100_int_ssc_cmn_vals = {
.reg_pairs = pcie_100_int_ssc_cmn_regs,
.num_regs = ARRAY_SIZE(pcie_100_int_ssc_cmn_regs),
};
/* Single link PCIe, 100 MHz Ref clk, internal SSC */
-static struct cdns_reg_pairs sl_pcie_100_int_ssc_cmn_regs[] = {
+static const struct cdns_reg_pairs sl_pcie_100_int_ssc_cmn_regs[] = {
{0x0004, CMN_PLL0_DSM_DIAG_M0},
{0x0004, CMN_PLL0_DSM_DIAG_M1},
{0x0004, CMN_PLL1_DSM_DIAG_M0},
@@ -4583,35 +4636,35 @@ static struct cdns_reg_pairs sl_pcie_100_int_ssc_cmn_regs[] = {
{0x0005, CMN_PLL1_LOCK_PLLCNT_THR}
};
-static struct cdns_torrent_vals sl_pcie_100_int_ssc_cmn_vals = {
+static const struct cdns_torrent_vals sl_pcie_100_int_ssc_cmn_vals = {
.reg_pairs = sl_pcie_100_int_ssc_cmn_regs,
.num_regs = ARRAY_SIZE(sl_pcie_100_int_ssc_cmn_regs),
};
/* PCIe, 100 MHz Ref clk, no SSC & external SSC */
-static struct cdns_reg_pairs pcie_100_ext_no_ssc_cmn_regs[] = {
+static const struct cdns_reg_pairs pcie_100_ext_no_ssc_cmn_regs[] = {
{0x0028, CMN_PDIAG_PLL1_CP_PADJ_M0},
{0x001E, CMN_PLL1_DSM_FBH_OVRD_M0},
{0x000C, CMN_PLL1_DSM_FBL_OVRD_M0}
};
-static struct cdns_reg_pairs pcie_100_ext_no_ssc_rx_ln_regs[] = {
+static const struct cdns_reg_pairs pcie_100_ext_no_ssc_rx_ln_regs[] = {
{0x0019, RX_REE_TAP1_CLIP},
{0x0019, RX_REE_TAP2TON_CLIP},
{0x0001, RX_DIAG_ACYA}
};
-static struct cdns_torrent_vals pcie_100_no_ssc_cmn_vals = {
+static const struct cdns_torrent_vals pcie_100_no_ssc_cmn_vals = {
.reg_pairs = pcie_100_ext_no_ssc_cmn_regs,
.num_regs = ARRAY_SIZE(pcie_100_ext_no_ssc_cmn_regs),
};
-static struct cdns_torrent_vals pcie_100_no_ssc_rx_ln_vals = {
+static const struct cdns_torrent_vals pcie_100_no_ssc_rx_ln_vals = {
.reg_pairs = pcie_100_ext_no_ssc_rx_ln_regs,
.num_regs = ARRAY_SIZE(pcie_100_ext_no_ssc_rx_ln_regs),
};
-static struct cdns_torrent_vals_entry link_cmn_vals_entries[] = {
+static const struct cdns_torrent_vals_entry link_cmn_vals_entries[] = {
{CDNS_TORRENT_KEY_ANYCLK(TYPE_DP, TYPE_NONE), &sl_dp_link_cmn_vals},
{CDNS_TORRENT_KEY_ANYCLK(TYPE_DP, TYPE_PCIE), &pcie_dp_link_cmn_vals},
{CDNS_TORRENT_KEY_ANYCLK(TYPE_DP, TYPE_USB), &usb_dp_link_cmn_vals},
@@ -4647,7 +4700,7 @@ static struct cdns_torrent_vals_entry link_cmn_vals_entries[] = {
{CDNS_TORRENT_KEY_ANYCLK(TYPE_USXGMII, TYPE_QSGMII), &usxgmii_sgmii_link_cmn_vals},
};
-static struct cdns_torrent_vals_entry xcvr_diag_vals_entries[] = {
+static const struct cdns_torrent_vals_entry xcvr_diag_vals_entries[] = {
{CDNS_TORRENT_KEY_ANYCLK(TYPE_DP, TYPE_NONE), &sl_dp_xcvr_diag_ln_vals},
{CDNS_TORRENT_KEY_ANYCLK(TYPE_DP, TYPE_PCIE), &dp_pcie_xcvr_diag_ln_vals},
{CDNS_TORRENT_KEY_ANYCLK(TYPE_DP, TYPE_USB), &dp_usb_xcvr_diag_ln_vals},
@@ -4683,7 +4736,7 @@ static struct cdns_torrent_vals_entry xcvr_diag_vals_entries[] = {
{CDNS_TORRENT_KEY_ANYCLK(TYPE_USXGMII, TYPE_QSGMII), &usxgmii_sgmii_xcvr_diag_ln_vals},
};
-static struct cdns_torrent_vals_entry pcs_cmn_vals_entries[] = {
+static const struct cdns_torrent_vals_entry pcs_cmn_vals_entries[] = {
{CDNS_TORRENT_KEY_ANYCLK(TYPE_USB, TYPE_NONE), &usb_phy_pcs_cmn_vals},
{CDNS_TORRENT_KEY_ANYCLK(TYPE_USB, TYPE_PCIE), &usb_phy_pcs_cmn_vals},
{CDNS_TORRENT_KEY_ANYCLK(TYPE_USB, TYPE_SGMII), &usb_phy_pcs_cmn_vals},
@@ -4691,7 +4744,7 @@ static struct cdns_torrent_vals_entry pcs_cmn_vals_entries[] = {
{CDNS_TORRENT_KEY_ANYCLK(TYPE_USB, TYPE_DP), &usb_phy_pcs_cmn_vals},
};
-static struct cdns_torrent_vals_entry cmn_vals_entries[] = {
+static const struct cdns_torrent_vals_entry cmn_vals_entries[] = {
{CDNS_TORRENT_KEY(CLK_19_2_MHZ, CLK_19_2_MHZ, TYPE_DP, TYPE_NONE, NO_SSC), &sl_dp_19_2_no_ssc_cmn_vals},
{CDNS_TORRENT_KEY(CLK_25_MHZ, CLK_25_MHZ, TYPE_DP, TYPE_NONE, NO_SSC), &sl_dp_25_no_ssc_cmn_vals},
@@ -4773,7 +4826,7 @@ static struct cdns_torrent_vals_entry cmn_vals_entries[] = {
{CDNS_TORRENT_KEY(CLK_156_25_MHZ, CLK_100_MHZ, TYPE_USXGMII, TYPE_QSGMII, NO_SSC), &ml_usxgmii_pll0_156_25_no_ssc_cmn_vals},
};
-static struct cdns_torrent_vals_entry cdns_tx_ln_vals_entries[] = {
+static const struct cdns_torrent_vals_entry cdns_tx_ln_vals_entries[] = {
{CDNS_TORRENT_KEY(CLK_19_2_MHZ, CLK_19_2_MHZ, TYPE_DP, TYPE_NONE, NO_SSC), &sl_dp_19_2_no_ssc_tx_ln_vals},
{CDNS_TORRENT_KEY(CLK_25_MHZ, CLK_25_MHZ, TYPE_DP, TYPE_NONE, NO_SSC), &sl_dp_25_no_ssc_tx_ln_vals},
@@ -4855,7 +4908,7 @@ static struct cdns_torrent_vals_entry cdns_tx_ln_vals_entries[] = {
{CDNS_TORRENT_KEY(CLK_156_25_MHZ, CLK_100_MHZ, TYPE_USXGMII, TYPE_QSGMII, NO_SSC), &ml_usxgmii_156_25_no_ssc_tx_ln_vals},
};
-static struct cdns_torrent_vals_entry cdns_rx_ln_vals_entries[] = {
+static const struct cdns_torrent_vals_entry cdns_rx_ln_vals_entries[] = {
{CDNS_TORRENT_KEY(CLK_19_2_MHZ, CLK_19_2_MHZ, TYPE_DP, TYPE_NONE, NO_SSC), &sl_dp_19_2_no_ssc_rx_ln_vals},
{CDNS_TORRENT_KEY(CLK_25_MHZ, CLK_25_MHZ, TYPE_DP, TYPE_NONE, NO_SSC), &sl_dp_25_no_ssc_rx_ln_vals},
@@ -4966,14 +5019,14 @@ static const struct cdns_torrent_data cdns_map_torrent = {
},
};
-static struct cdns_torrent_vals_entry j721e_phy_pma_cmn_vals_entries[] = {
+static const struct cdns_torrent_vals_entry j721e_phy_pma_cmn_vals_entries[] = {
{CDNS_TORRENT_KEY_ANYCLK(TYPE_USXGMII, TYPE_NONE), &ti_usxgmii_phy_pma_cmn_vals},
{CDNS_TORRENT_KEY_ANYCLK(TYPE_USXGMII, TYPE_PCIE), &ti_usxgmii_phy_pma_cmn_vals},
{CDNS_TORRENT_KEY_ANYCLK(TYPE_USXGMII, TYPE_SGMII), &ti_usxgmii_phy_pma_cmn_vals},
{CDNS_TORRENT_KEY_ANYCLK(TYPE_USXGMII, TYPE_QSGMII), &ti_usxgmii_phy_pma_cmn_vals},
};
-static struct cdns_torrent_vals_entry ti_tx_ln_vals_entries[] = {
+static const struct cdns_torrent_vals_entry ti_tx_ln_vals_entries[] = {
{CDNS_TORRENT_KEY(CLK_19_2_MHZ, CLK_19_2_MHZ, TYPE_DP, TYPE_NONE, NO_SSC), &sl_dp_19_2_no_ssc_tx_ln_vals},
{CDNS_TORRENT_KEY(CLK_25_MHZ, CLK_25_MHZ, TYPE_DP, TYPE_NONE, NO_SSC), &sl_dp_25_no_ssc_tx_ln_vals},
@@ -5089,7 +5142,7 @@ static const struct cdns_torrent_data ti_j721e_map_torrent = {
};
/* TI J7200 (Torrent SD0805) */
-static struct cdns_torrent_vals_entry ti_j7200_cmn_vals_entries[] = {
+static const struct cdns_torrent_vals_entry ti_j7200_cmn_vals_entries[] = {
{CDNS_TORRENT_KEY(CLK_19_2_MHZ, CLK_19_2_MHZ, TYPE_DP, TYPE_NONE, NO_SSC), &sl_dp_19_2_no_ssc_cmn_vals},
{CDNS_TORRENT_KEY(CLK_25_MHZ, CLK_25_MHZ, TYPE_DP, TYPE_NONE, NO_SSC), &sl_dp_25_no_ssc_cmn_vals},
@@ -5171,7 +5224,7 @@ static struct cdns_torrent_vals_entry ti_j7200_cmn_vals_entries[] = {
{CDNS_TORRENT_KEY(CLK_156_25_MHZ, CLK_100_MHZ, TYPE_USXGMII, TYPE_QSGMII, NO_SSC), &j7200_ml_usxgmii_pll0_156_25_no_ssc_cmn_vals},
};
-static struct cdns_torrent_vals_entry ti_j7200_tx_ln_vals_entries[] = {
+static const struct cdns_torrent_vals_entry ti_j7200_tx_ln_vals_entries[] = {
{CDNS_TORRENT_KEY(CLK_19_2_MHZ, CLK_19_2_MHZ, TYPE_DP, TYPE_NONE, NO_SSC), &sl_dp_19_2_no_ssc_tx_ln_vals},
{CDNS_TORRENT_KEY(CLK_25_MHZ, CLK_25_MHZ, TYPE_DP, TYPE_NONE, NO_SSC), &sl_dp_25_no_ssc_tx_ln_vals},
@@ -5253,7 +5306,7 @@ static struct cdns_torrent_vals_entry ti_j7200_tx_ln_vals_entries[] = {
{CDNS_TORRENT_KEY(CLK_156_25_MHZ, CLK_100_MHZ, TYPE_USXGMII, TYPE_QSGMII, NO_SSC), &usxgmii_156_25_no_ssc_tx_ln_vals},
};
-static struct cdns_torrent_vals_entry ti_j7200_rx_ln_vals_entries[] = {
+static const struct cdns_torrent_vals_entry ti_j7200_rx_ln_vals_entries[] = {
{CDNS_TORRENT_KEY(CLK_19_2_MHZ, CLK_19_2_MHZ, TYPE_DP, TYPE_NONE, NO_SSC), &sl_dp_19_2_no_ssc_rx_ln_vals},
{CDNS_TORRENT_KEY(CLK_25_MHZ, CLK_25_MHZ, TYPE_DP, TYPE_NONE, NO_SSC), &sl_dp_25_no_ssc_rx_ln_vals},
diff --git a/drivers/phy/hisilicon/phy-hisi-inno-usb2.c b/drivers/phy/hisilicon/phy-hisi-inno-usb2.c
index c138cd4807d6..c843923252aa 100644
--- a/drivers/phy/hisilicon/phy-hisi-inno-usb2.c
+++ b/drivers/phy/hisilicon/phy-hisi-inno-usb2.c
@@ -138,7 +138,6 @@ static int hisi_inno_phy_probe(struct platform_device *pdev)
struct device_node *np = dev->of_node;
struct hisi_inno_phy_priv *priv;
struct phy_provider *provider;
- struct device_node *child;
int i = 0;
int ret;
@@ -162,24 +161,20 @@ static int hisi_inno_phy_probe(struct platform_device *pdev)
priv->type = (uintptr_t) of_device_get_match_data(dev);
- for_each_child_of_node(np, child) {
+ for_each_child_of_node_scoped(np, child) {
struct reset_control *rst;
struct phy *phy;
rst = of_reset_control_get_exclusive(child, NULL);
- if (IS_ERR(rst)) {
- of_node_put(child);
+ if (IS_ERR(rst))
return PTR_ERR(rst);
- }
priv->ports[i].utmi_rst = rst;
priv->ports[i].priv = priv;
phy = devm_phy_create(dev, child, &hisi_inno_phy_ops);
- if (IS_ERR(phy)) {
- of_node_put(child);
+ if (IS_ERR(phy))
return PTR_ERR(phy);
- }
phy_set_bus_width(phy, 8);
phy_set_drvdata(phy, &priv->ports[i]);
@@ -187,7 +182,6 @@ static int hisi_inno_phy_probe(struct platform_device *pdev)
if (i >= INNO_PHY_PORT_NUM) {
dev_warn(dev, "Support %d ports in maximum\n", i);
- of_node_put(child);
break;
}
}
diff --git a/drivers/phy/marvell/phy-mvebu-cp110-comphy.c b/drivers/phy/marvell/phy-mvebu-cp110-comphy.c
index da5e8f405749..fefc02d921e6 100644
--- a/drivers/phy/marvell/phy-mvebu-cp110-comphy.c
+++ b/drivers/phy/marvell/phy-mvebu-cp110-comphy.c
@@ -244,8 +244,8 @@ static const struct mvebu_comphy_conf mvebu_comphy_cp110_modes[] = {
GEN_CONF(4, 1, PHY_MODE_USB_HOST_SS, COMPHY_FW_MODE_USB3H),
GEN_CONF(4, 1, PHY_MODE_PCIE, COMPHY_FW_MODE_PCIE),
ETH_CONF(4, 1, PHY_INTERFACE_MODE_SGMII, 0x1, COMPHY_FW_MODE_SGMII),
- ETH_CONF(4, 1, PHY_INTERFACE_MODE_2500BASEX, -1, COMPHY_FW_MODE_2500BASEX),
- ETH_CONF(4, 1, PHY_INTERFACE_MODE_5GBASER, -1, COMPHY_FW_MODE_XFI),
+ ETH_CONF(4, 1, PHY_INTERFACE_MODE_2500BASEX, 0x1, COMPHY_FW_MODE_2500BASEX),
+ ETH_CONF(4, 1, PHY_INTERFACE_MODE_5GBASER, 0x1, COMPHY_FW_MODE_XFI),
ETH_CONF(4, 1, PHY_INTERFACE_MODE_10GBASER, -1, COMPHY_FW_MODE_XFI),
/* lane 5 */
ETH_CONF(5, 1, PHY_INTERFACE_MODE_RXAUI, 0x2, COMPHY_FW_MODE_RXAUI),
diff --git a/drivers/phy/mediatek/phy-mtk-tphy.c b/drivers/phy/mediatek/phy-mtk-tphy.c
index 25b86bbb9cec..3f7095ec5978 100644
--- a/drivers/phy/mediatek/phy-mtk-tphy.c
+++ b/drivers/phy/mediatek/phy-mtk-tphy.c
@@ -1577,12 +1577,11 @@ static int mtk_tphy_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
struct device_node *np = dev->of_node;
- struct device_node *child_np;
struct phy_provider *provider;
struct resource *sif_res;
struct mtk_tphy *tphy;
struct resource res;
- int port, retval;
+ int port;
tphy = devm_kzalloc(dev, sizeof(*tphy), GFP_KERNEL);
if (!tphy)
@@ -1623,25 +1622,23 @@ static int mtk_tphy_probe(struct platform_device *pdev)
}
port = 0;
- for_each_child_of_node(np, child_np) {
+ for_each_child_of_node_scoped(np, child_np) {
struct mtk_phy_instance *instance;
struct clk_bulk_data *clks;
struct device *subdev;
struct phy *phy;
+ int retval;
instance = devm_kzalloc(dev, sizeof(*instance), GFP_KERNEL);
- if (!instance) {
- retval = -ENOMEM;
- goto put_child;
- }
+ if (!instance)
+ return -ENOMEM;
tphy->phys[port] = instance;
phy = devm_phy_create(dev, child_np, &mtk_tphy_ops);
if (IS_ERR(phy)) {
dev_err(dev, "failed to create phy\n");
- retval = PTR_ERR(phy);
- goto put_child;
+ return PTR_ERR(phy);
}
subdev = &phy->dev;
@@ -1649,14 +1646,12 @@ static int mtk_tphy_probe(struct platform_device *pdev)
if (retval) {
dev_err(subdev, "failed to get address resource(id-%d)\n",
port);
- goto put_child;
+ return retval;
}
instance->port_base = devm_ioremap_resource(subdev, &res);
- if (IS_ERR(instance->port_base)) {
- retval = PTR_ERR(instance->port_base);
- goto put_child;
- }
+ if (IS_ERR(instance->port_base))
+ return PTR_ERR(instance->port_base);
instance->phy = phy;
instance->index = port;
@@ -1668,19 +1663,16 @@ static int mtk_tphy_probe(struct platform_device *pdev)
clks[1].id = "da_ref"; /* analog clock */
retval = devm_clk_bulk_get_optional(subdev, TPHY_CLKS_CNT, clks);
if (retval)
- goto put_child;
+ return retval;
retval = phy_type_syscon_get(instance, child_np);
if (retval)
- goto put_child;
+ return retval;
}
provider = devm_of_phy_provider_register(dev, mtk_phy_xlate);
return PTR_ERR_OR_ZERO(provider);
-put_child:
- of_node_put(child_np);
- return retval;
}
static struct platform_driver mtk_tphy_driver = {
diff --git a/drivers/phy/mediatek/phy-mtk-xsphy.c b/drivers/phy/mediatek/phy-mtk-xsphy.c
index 064fd0941727..7c248f5cfca5 100644
--- a/drivers/phy/mediatek/phy-mtk-xsphy.c
+++ b/drivers/phy/mediatek/phy-mtk-xsphy.c
@@ -432,12 +432,11 @@ static int mtk_xsphy_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
struct device_node *np = dev->of_node;
- struct device_node *child_np;
struct phy_provider *provider;
struct resource *glb_res;
struct mtk_xsphy *xsphy;
struct resource res;
- int port, retval;
+ int port;
xsphy = devm_kzalloc(dev, sizeof(*xsphy), GFP_KERNEL);
if (!xsphy)
@@ -471,37 +470,34 @@ static int mtk_xsphy_probe(struct platform_device *pdev)
device_property_read_u32(dev, "mediatek,src-coef", &xsphy->src_coef);
port = 0;
- for_each_child_of_node(np, child_np) {
+ for_each_child_of_node_scoped(np, child_np) {
struct xsphy_instance *inst;
struct phy *phy;
+ int retval;
inst = devm_kzalloc(dev, sizeof(*inst), GFP_KERNEL);
- if (!inst) {
- retval = -ENOMEM;
- goto put_child;
- }
+ if (!inst)
+ return -ENOMEM;
xsphy->phys[port] = inst;
phy = devm_phy_create(dev, child_np, &mtk_xsphy_ops);
if (IS_ERR(phy)) {
dev_err(dev, "failed to create phy\n");
- retval = PTR_ERR(phy);
- goto put_child;
+ return PTR_ERR(phy);
}
retval = of_address_to_resource(child_np, 0, &res);
if (retval) {
dev_err(dev, "failed to get address resource(id-%d)\n",
port);
- goto put_child;
+ return retval;
}
inst->port_base = devm_ioremap_resource(&phy->dev, &res);
if (IS_ERR(inst->port_base)) {
dev_err(dev, "failed to remap phy regs\n");
- retval = PTR_ERR(inst->port_base);
- goto put_child;
+ return PTR_ERR(inst->port_base);
}
inst->phy = phy;
@@ -512,17 +508,12 @@ static int mtk_xsphy_probe(struct platform_device *pdev)
inst->ref_clk = devm_clk_get(&phy->dev, "ref");
if (IS_ERR(inst->ref_clk)) {
dev_err(dev, "failed to get ref_clk(id-%d)\n", port);
- retval = PTR_ERR(inst->ref_clk);
- goto put_child;
+ return PTR_ERR(inst->ref_clk);
}
}
provider = devm_of_phy_provider_register(dev, mtk_phy_xlate);
return PTR_ERR_OR_ZERO(provider);
-
-put_child:
- of_node_put(child_np);
- return retval;
}
static struct platform_driver mtk_xsphy_driver = {
diff --git a/drivers/phy/nuvoton/Kconfig b/drivers/phy/nuvoton/Kconfig
new file mode 100644
index 000000000000..d02cae2db315
--- /dev/null
+++ b/drivers/phy/nuvoton/Kconfig
@@ -0,0 +1,12 @@
+# SPDX-License-Identifier: GPL-2.0-only
+#
+# PHY drivers for Nuvoton MA35 platforms
+#
+config PHY_MA35_USB
+ tristate "Nuvoton MA35 USB2.0 PHY driver"
+ depends on ARCH_MA35 || COMPILE_TEST
+ depends on OF
+ select GENERIC_PHY
+ help
+ Enable this to support the USB2.0 PHY on the Nuvoton MA35
+ series SoCs.
diff --git a/drivers/phy/nuvoton/Makefile b/drivers/phy/nuvoton/Makefile
new file mode 100644
index 000000000000..2937e3921898
--- /dev/null
+++ b/drivers/phy/nuvoton/Makefile
@@ -0,0 +1,3 @@
+# SPDX-License-Identifier: GPL-2.0
+
+obj-$(CONFIG_PHY_MA35_USB) += phy-ma35d1-usb2.o
diff --git a/drivers/phy/nuvoton/phy-ma35d1-usb2.c b/drivers/phy/nuvoton/phy-ma35d1-usb2.c
new file mode 100644
index 000000000000..9a459b700ed4
--- /dev/null
+++ b/drivers/phy/nuvoton/phy-ma35d1-usb2.c
@@ -0,0 +1,143 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2024 Nuvoton Technology Corp.
+ */
+#include <linux/bitfield.h>
+#include <linux/clk.h>
+#include <linux/delay.h>
+#include <linux/io.h>
+#include <linux/kernel.h>
+#include <linux/mfd/syscon.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/phy/phy.h>
+#include <linux/platform_device.h>
+#include <linux/regmap.h>
+
+/* USB PHY Miscellaneous Control Register */
+#define MA35_SYS_REG_USBPMISCR 0x60
+#define PHY0POR BIT(0) /* PHY Power-On Reset Control Bit */
+#define PHY0SUSPEND BIT(1) /* PHY Suspend; 0: suspend, 1: operaion */
+#define PHY0COMN BIT(2) /* PHY Common Block Power-Down Control */
+#define PHY0DEVCKSTB BIT(10) /* PHY 60 MHz UTMI clock stable bit */
+
+struct ma35_usb_phy {
+ struct clk *clk;
+ struct device *dev;
+ struct regmap *sysreg;
+};
+
+static int ma35_usb_phy_power_on(struct phy *phy)
+{
+ struct ma35_usb_phy *p_phy = phy_get_drvdata(phy);
+ unsigned int val;
+ int ret;
+
+ ret = clk_prepare_enable(p_phy->clk);
+ if (ret < 0) {
+ dev_err(p_phy->dev, "Failed to enable PHY clock: %d\n", ret);
+ return ret;
+ }
+
+ regmap_read(p_phy->sysreg, MA35_SYS_REG_USBPMISCR, &val);
+ if (val & PHY0SUSPEND) {
+ /*
+ * USB PHY0 is in operation mode already
+ * make sure USB PHY 60 MHz UTMI Interface Clock ready
+ */
+ ret = regmap_read_poll_timeout(p_phy->sysreg, MA35_SYS_REG_USBPMISCR, val,
+ val & PHY0DEVCKSTB, 10, 1000);
+ if (ret == 0)
+ return 0;
+ }
+
+ /*
+ * reset USB PHY0.
+ * wait until USB PHY0 60 MHz UTMI Interface Clock ready
+ */
+ regmap_update_bits(p_phy->sysreg, MA35_SYS_REG_USBPMISCR, 0x7, (PHY0POR | PHY0SUSPEND));
+ udelay(20);
+
+ /* make USB PHY0 enter operation mode */
+ regmap_update_bits(p_phy->sysreg, MA35_SYS_REG_USBPMISCR, 0x7, PHY0SUSPEND);
+
+ /* make sure USB PHY 60 MHz UTMI Interface Clock ready */
+ ret = regmap_read_poll_timeout(p_phy->sysreg, MA35_SYS_REG_USBPMISCR, val,
+ val & PHY0DEVCKSTB, 10, 1000);
+ if (ret == -ETIMEDOUT) {
+ dev_err(p_phy->dev, "Check PHY clock, Timeout: %d\n", ret);
+ clk_disable_unprepare(p_phy->clk);
+ return ret;
+ }
+
+ return 0;
+}
+
+static int ma35_usb_phy_power_off(struct phy *phy)
+{
+ struct ma35_usb_phy *p_phy = phy_get_drvdata(phy);
+
+ clk_disable_unprepare(p_phy->clk);
+ return 0;
+}
+
+static const struct phy_ops ma35_usb_phy_ops = {
+ .power_on = ma35_usb_phy_power_on,
+ .power_off = ma35_usb_phy_power_off,
+ .owner = THIS_MODULE,
+};
+
+static int ma35_usb_phy_probe(struct platform_device *pdev)
+{
+ struct phy_provider *provider;
+ struct ma35_usb_phy *p_phy;
+ struct phy *phy;
+
+ p_phy = devm_kzalloc(&pdev->dev, sizeof(*p_phy), GFP_KERNEL);
+ if (!p_phy)
+ return -ENOMEM;
+
+ p_phy->dev = &pdev->dev;
+ platform_set_drvdata(pdev, p_phy);
+
+ p_phy->sysreg = syscon_regmap_lookup_by_phandle(pdev->dev.of_node, "nuvoton,sys");
+ if (IS_ERR(p_phy->sysreg))
+ return dev_err_probe(&pdev->dev, PTR_ERR(p_phy->sysreg),
+ "Failed to get SYS registers\n");
+
+ p_phy->clk = of_clk_get(pdev->dev.of_node, 0);
+ if (IS_ERR(p_phy->clk))
+ return dev_err_probe(&pdev->dev, PTR_ERR(p_phy->clk),
+ "failed to find usb_phy clock\n");
+
+ phy = devm_phy_create(&pdev->dev, NULL, &ma35_usb_phy_ops);
+ if (IS_ERR(phy))
+ return dev_err_probe(&pdev->dev, PTR_ERR(phy), "Failed to create PHY\n");
+
+ phy_set_drvdata(phy, p_phy);
+
+ provider = devm_of_phy_provider_register(&pdev->dev, of_phy_simple_xlate);
+ if (IS_ERR(provider))
+ return dev_err_probe(&pdev->dev, PTR_ERR(provider),
+ "Failed to register PHY provider\n");
+ return 0;
+}
+
+static const struct of_device_id ma35_usb_phy_of_match[] = {
+ { .compatible = "nuvoton,ma35d1-usb2-phy", },
+ { },
+};
+MODULE_DEVICE_TABLE(of, ma35_usb_phy_of_match);
+
+static struct platform_driver ma35_usb_phy_driver = {
+ .probe = ma35_usb_phy_probe,
+ .driver = {
+ .name = "ma35d1-usb2-phy",
+ .of_match_table = ma35_usb_phy_of_match,
+ },
+};
+module_platform_driver(ma35_usb_phy_driver);
+
+MODULE_DESCRIPTION("Nuvoton ma35d1 USB2.0 PHY driver");
+MODULE_AUTHOR("Hui-Ping Chen <hpchen0nvt@gmail.com>");
+MODULE_LICENSE("GPL");
diff --git a/drivers/phy/phy-airoha-pcie.c b/drivers/phy/phy-airoha-pcie.c
index bd3edaa986c8..1e410eb41058 100644
--- a/drivers/phy/phy-airoha-pcie.c
+++ b/drivers/phy/phy-airoha-pcie.c
@@ -18,6 +18,9 @@
#define LEQ_LEN_CTRL_MAX_VAL 7
#define FREQ_LOCK_MAX_ATTEMPT 10
+/* PCIe-PHY initialization time in ms needed by the hw to complete */
+#define PHY_HW_INIT_TIME_MS 30
+
enum airoha_pcie_port_gen {
PCIE_PORT_GEN1 = 1,
PCIE_PORT_GEN2,
@@ -1181,7 +1184,8 @@ static int airoha_pcie_phy_init(struct phy *phy)
airoha_phy_pma1_set_bits(pcie_phy, REG_PCIE_PMA_SS_DA_XPON_PWDB0,
PCIE_DA_XPON_CDR_PR_PWDB);
- usleep_range(100, 200);
+ /* Wait for the PCIe PHY to complete initialization before returning */
+ msleep(PHY_HW_INIT_TIME_MS);
return 0;
}
diff --git a/drivers/phy/qualcomm/phy-qcom-qmp-combo.c b/drivers/phy/qualcomm/phy-qcom-qmp-combo.c
index 7b00945f7191..a8adc3214bfe 100644
--- a/drivers/phy/qualcomm/phy-qcom-qmp-combo.c
+++ b/drivers/phy/qualcomm/phy-qcom-qmp-combo.c
@@ -2190,24 +2190,25 @@ static int qmp_combo_dp_serdes_init(struct qmp_combo *qmp)
void __iomem *serdes = qmp->dp_serdes;
const struct phy_configure_opts_dp *dp_opts = &qmp->dp_opts;
- qmp_configure(serdes, cfg->dp_serdes_tbl, cfg->dp_serdes_tbl_num);
+ qmp_configure(qmp->dev, serdes, cfg->dp_serdes_tbl,
+ cfg->dp_serdes_tbl_num);
switch (dp_opts->link_rate) {
case 1620:
- qmp_configure(serdes, cfg->serdes_tbl_rbr,
- cfg->serdes_tbl_rbr_num);
+ qmp_configure(qmp->dev, serdes, cfg->serdes_tbl_rbr,
+ cfg->serdes_tbl_rbr_num);
break;
case 2700:
- qmp_configure(serdes, cfg->serdes_tbl_hbr,
- cfg->serdes_tbl_hbr_num);
+ qmp_configure(qmp->dev, serdes, cfg->serdes_tbl_hbr,
+ cfg->serdes_tbl_hbr_num);
break;
case 5400:
- qmp_configure(serdes, cfg->serdes_tbl_hbr2,
- cfg->serdes_tbl_hbr2_num);
+ qmp_configure(qmp->dev, serdes, cfg->serdes_tbl_hbr2,
+ cfg->serdes_tbl_hbr2_num);
break;
case 8100:
- qmp_configure(serdes, cfg->serdes_tbl_hbr3,
- cfg->serdes_tbl_hbr3_num);
+ qmp_configure(qmp->dev, serdes, cfg->serdes_tbl_hbr3,
+ cfg->serdes_tbl_hbr3_num);
break;
default:
/* Other link rates aren't supported */
@@ -2807,8 +2808,8 @@ static int qmp_combo_dp_power_on(struct phy *phy)
qmp_combo_dp_serdes_init(qmp);
- qmp_configure_lane(tx, cfg->dp_tx_tbl, cfg->dp_tx_tbl_num, 1);
- qmp_configure_lane(tx2, cfg->dp_tx_tbl, cfg->dp_tx_tbl_num, 2);
+ qmp_configure_lane(qmp->dev, tx, cfg->dp_tx_tbl, cfg->dp_tx_tbl_num, 1);
+ qmp_configure_lane(qmp->dev, tx2, cfg->dp_tx_tbl, cfg->dp_tx_tbl_num, 2);
/* Configure special DP tx tunings */
cfg->configure_dp_tx(qmp);
@@ -2850,7 +2851,7 @@ static int qmp_combo_usb_power_on(struct phy *phy)
unsigned int val;
int ret;
- qmp_configure(serdes, cfg->serdes_tbl, cfg->serdes_tbl_num);
+ qmp_configure(qmp->dev, serdes, cfg->serdes_tbl, cfg->serdes_tbl_num);
ret = clk_prepare_enable(qmp->pipe_clk);
if (ret) {
@@ -2859,16 +2860,17 @@ static int qmp_combo_usb_power_on(struct phy *phy)
}
/* Tx, Rx, and PCS configurations */
- qmp_configure_lane(tx, cfg->tx_tbl, cfg->tx_tbl_num, 1);
- qmp_configure_lane(tx2, cfg->tx_tbl, cfg->tx_tbl_num, 2);
+ qmp_configure_lane(qmp->dev, tx, cfg->tx_tbl, cfg->tx_tbl_num, 1);
+ qmp_configure_lane(qmp->dev, tx2, cfg->tx_tbl, cfg->tx_tbl_num, 2);
- qmp_configure_lane(rx, cfg->rx_tbl, cfg->rx_tbl_num, 1);
- qmp_configure_lane(rx2, cfg->rx_tbl, cfg->rx_tbl_num, 2);
+ qmp_configure_lane(qmp->dev, rx, cfg->rx_tbl, cfg->rx_tbl_num, 1);
+ qmp_configure_lane(qmp->dev, rx2, cfg->rx_tbl, cfg->rx_tbl_num, 2);
- qmp_configure(pcs, cfg->pcs_tbl, cfg->pcs_tbl_num);
+ qmp_configure(qmp->dev, pcs, cfg->pcs_tbl, cfg->pcs_tbl_num);
if (pcs_usb)
- qmp_configure(pcs_usb, cfg->pcs_usb_tbl, cfg->pcs_usb_tbl_num);
+ qmp_configure(qmp->dev, pcs_usb, cfg->pcs_usb_tbl,
+ cfg->pcs_usb_tbl_num);
if (cfg->has_pwrdn_delay)
usleep_range(10, 20);
diff --git a/drivers/phy/qualcomm/phy-qcom-qmp-common.h b/drivers/phy/qualcomm/phy-qcom-qmp-common.h
index 799384210509..b945fc14cece 100644
--- a/drivers/phy/qualcomm/phy-qcom-qmp-common.h
+++ b/drivers/phy/qualcomm/phy-qcom-qmp-common.h
@@ -9,6 +9,7 @@
struct qmp_phy_init_tbl {
unsigned int offset;
unsigned int val;
+ char *name;
/*
* mask of lanes for which this register is written
* for cases when second lane needs different values
@@ -20,6 +21,7 @@ struct qmp_phy_init_tbl {
{ \
.offset = o, \
.val = v, \
+ .name = #o, \
.lane_mask = 0xff, \
}
@@ -27,13 +29,13 @@ struct qmp_phy_init_tbl {
{ \
.offset = o, \
.val = v, \
+ .name = #o, \
.lane_mask = l, \
}
-static inline void qmp_configure_lane(void __iomem *base,
- const struct qmp_phy_init_tbl tbl[],
- int num,
- u8 lane_mask)
+static inline void qmp_configure_lane(struct device *dev, void __iomem *base,
+ const struct qmp_phy_init_tbl tbl[],
+ int num, u8 lane_mask)
{
int i;
const struct qmp_phy_init_tbl *t = tbl;
@@ -45,15 +47,16 @@ static inline void qmp_configure_lane(void __iomem *base,
if (!(t->lane_mask & lane_mask))
continue;
+ dev_dbg(dev, "Writing Reg: %s Offset: 0x%04x Val: 0x%02x\n",
+ t->name, t->offset, t->val);
writel(t->val, base + t->offset);
}
}
-static inline void qmp_configure(void __iomem *base,
- const struct qmp_phy_init_tbl tbl[],
- int num)
+static inline void qmp_configure(struct device *dev, void __iomem *base,
+ const struct qmp_phy_init_tbl tbl[], int num)
{
- qmp_configure_lane(base, tbl, num, 0xff);
+ qmp_configure_lane(dev, base, tbl, num, 0xff);
}
#endif
diff --git a/drivers/phy/qualcomm/phy-qcom-qmp-pcie-msm8996.c b/drivers/phy/qualcomm/phy-qcom-qmp-pcie-msm8996.c
index 0442b3120563..a7c65cfe31df 100644
--- a/drivers/phy/qualcomm/phy-qcom-qmp-pcie-msm8996.c
+++ b/drivers/phy/qualcomm/phy-qcom-qmp-pcie-msm8996.c
@@ -288,7 +288,7 @@ static int qmp_pcie_msm8996_serdes_init(struct qmp_phy *qphy)
unsigned int val;
int ret;
- qmp_configure(serdes, serdes_tbl, serdes_tbl_num);
+ qmp_configure(qmp->dev, serdes, serdes_tbl, serdes_tbl_num);
qphy_clrbits(serdes, cfg->regs[QPHY_COM_SW_RESET], SW_RESET);
qphy_setbits(serdes, cfg->regs[QPHY_COM_START_CONTROL],
@@ -431,9 +431,9 @@ static int qmp_pcie_msm8996_power_on(struct phy *phy)
}
/* Tx, Rx, and PCS configurations */
- qmp_configure_lane(tx, cfg->tx_tbl, cfg->tx_tbl_num, 1);
- qmp_configure_lane(rx, cfg->rx_tbl, cfg->rx_tbl_num, 1);
- qmp_configure(pcs, cfg->pcs_tbl, cfg->pcs_tbl_num);
+ qmp_configure_lane(qmp->dev, tx, cfg->tx_tbl, cfg->tx_tbl_num, 1);
+ qmp_configure_lane(qmp->dev, rx, cfg->rx_tbl, cfg->rx_tbl_num, 1);
+ qmp_configure(qmp->dev, pcs, cfg->pcs_tbl, cfg->pcs_tbl_num);
/*
* Pull out PHY from POWER DOWN state.
@@ -725,7 +725,6 @@ static int qmp_pcie_msm8996_probe(struct platform_device *pdev)
{
struct qcom_qmp *qmp;
struct device *dev = &pdev->dev;
- struct device_node *child;
struct phy_provider *phy_provider;
void __iomem *serdes;
const struct qmp_phy_cfg *cfg = NULL;
@@ -773,13 +772,13 @@ static int qmp_pcie_msm8996_probe(struct platform_device *pdev)
return -ENOMEM;
id = 0;
- for_each_available_child_of_node(dev->of_node, child) {
+ for_each_available_child_of_node_scoped(dev->of_node, child) {
/* Create per-lane phy */
ret = qmp_pcie_msm8996_create(dev, child, id, serdes, cfg);
if (ret) {
dev_err(dev, "failed to create lane%d phy, %d\n",
id, ret);
- goto err_node_put;
+ return ret;
}
/*
@@ -790,7 +789,7 @@ static int qmp_pcie_msm8996_probe(struct platform_device *pdev)
if (ret) {
dev_err(qmp->dev,
"failed to register pipe clock source\n");
- goto err_node_put;
+ return ret;
}
id++;
@@ -799,10 +798,6 @@ static int qmp_pcie_msm8996_probe(struct platform_device *pdev)
phy_provider = devm_of_phy_provider_register(dev, of_phy_simple_xlate);
return PTR_ERR_OR_ZERO(phy_provider);
-
-err_node_put:
- of_node_put(child);
- return ret;
}
static struct platform_driver qmp_pcie_msm8996_driver = {
diff --git a/drivers/phy/qualcomm/phy-qcom-qmp-pcie.c b/drivers/phy/qualcomm/phy-qcom-qmp-pcie.c
index 06cd9787e700..f71787fb4d7e 100644
--- a/drivers/phy/qualcomm/phy-qcom-qmp-pcie.c
+++ b/drivers/phy/qualcomm/phy-qcom-qmp-pcie.c
@@ -1242,6 +1242,10 @@ static const struct qmp_phy_init_tbl x1e80100_qmp_gen4x2_pcie_serdes_tbl[] = {
QMP_PHY_INIT_CFG(QSERDES_V6_COM_PLL_VCO_DC_LEVEL_CTRL, 0x0f),
};
+static const struct qmp_phy_init_tbl x1e80100_qmp_gen4x4_pcie_serdes_4ln_tbl[] = {
+ QMP_PHY_INIT_CFG(QSERDES_V6_COM_PLL_BIAS_EN_CLK_BUFLR_EN, 0x1c),
+};
+
static const struct qmp_phy_init_tbl x1e80100_qmp_gen4x2_pcie_ln_shrd_tbl[] = {
QMP_PHY_INIT_CFG(QSERDES_V6_LN_SHRD_RXCLK_DIV2_CTRL, 0x01),
QMP_PHY_INIT_CFG(QSERDES_V6_LN_SHRD_DFE_DAC_ENABLE1, 0x88),
@@ -3654,6 +3658,41 @@ static const struct qmp_phy_cfg x1e80100_qmp_gen4x2_pciephy_cfg = {
.ln_shrd = x1e80100_qmp_gen4x2_pcie_ln_shrd_tbl,
.ln_shrd_num = ARRAY_SIZE(x1e80100_qmp_gen4x2_pcie_ln_shrd_tbl),
},
+
+ .reset_list = sdm845_pciephy_reset_l,
+ .num_resets = ARRAY_SIZE(sdm845_pciephy_reset_l),
+ .vreg_list = sm8550_qmp_phy_vreg_l,
+ .num_vregs = ARRAY_SIZE(sm8550_qmp_phy_vreg_l),
+ .regs = pciephy_v6_regs_layout,
+
+ .pwrdn_ctrl = SW_PWRDN | REFCLK_DRV_DSBL,
+ .phy_status = PHYSTATUS_4_20,
+ .has_nocsr_reset = true,
+};
+
+static const struct qmp_phy_cfg x1e80100_qmp_gen4x4_pciephy_cfg = {
+ .lanes = 4,
+
+ .offsets = &qmp_pcie_offsets_v6_20,
+
+ .tbls = {
+ .serdes = x1e80100_qmp_gen4x2_pcie_serdes_tbl,
+ .serdes_num = ARRAY_SIZE(x1e80100_qmp_gen4x2_pcie_serdes_tbl),
+ .tx = x1e80100_qmp_gen4x2_pcie_tx_tbl,
+ .tx_num = ARRAY_SIZE(x1e80100_qmp_gen4x2_pcie_tx_tbl),
+ .rx = x1e80100_qmp_gen4x2_pcie_rx_tbl,
+ .rx_num = ARRAY_SIZE(x1e80100_qmp_gen4x2_pcie_rx_tbl),
+ .pcs = x1e80100_qmp_gen4x2_pcie_pcs_tbl,
+ .pcs_num = ARRAY_SIZE(x1e80100_qmp_gen4x2_pcie_pcs_tbl),
+ .pcs_misc = x1e80100_qmp_gen4x2_pcie_pcs_misc_tbl,
+ .pcs_misc_num = ARRAY_SIZE(x1e80100_qmp_gen4x2_pcie_pcs_misc_tbl),
+ .ln_shrd = x1e80100_qmp_gen4x2_pcie_ln_shrd_tbl,
+ .ln_shrd_num = ARRAY_SIZE(x1e80100_qmp_gen4x2_pcie_ln_shrd_tbl),
+ },
+
+ .serdes_4ln_tbl = x1e80100_qmp_gen4x4_pcie_serdes_4ln_tbl,
+ .serdes_4ln_num = ARRAY_SIZE(x1e80100_qmp_gen4x4_pcie_serdes_4ln_tbl),
+
.reset_list = sdm845_pciephy_reset_l,
.num_resets = ARRAY_SIZE(sdm845_pciephy_reset_l),
.vreg_list = sm8550_qmp_phy_vreg_l,
@@ -3669,18 +3708,30 @@ static void qmp_pcie_init_port_b(struct qmp_pcie *qmp, const struct qmp_phy_cfg_
{
const struct qmp_phy_cfg *cfg = qmp->cfg;
const struct qmp_pcie_offsets *offs = cfg->offsets;
- void __iomem *tx3, *rx3, *tx4, *rx4;
+ void __iomem *serdes, *tx3, *rx3, *tx4, *rx4, *pcs, *pcs_misc, *ln_shrd;
+ serdes = qmp->port_b + offs->serdes;
tx3 = qmp->port_b + offs->tx;
rx3 = qmp->port_b + offs->rx;
tx4 = qmp->port_b + offs->tx2;
rx4 = qmp->port_b + offs->rx2;
+ pcs = qmp->port_b + offs->pcs;
+ pcs_misc = qmp->port_b + offs->pcs_misc;
+ ln_shrd = qmp->port_b + offs->ln_shrd;
- qmp_configure_lane(tx3, tbls->tx, tbls->tx_num, 1);
- qmp_configure_lane(rx3, tbls->rx, tbls->rx_num, 1);
+ qmp_configure(qmp->dev, serdes, tbls->serdes, tbls->serdes_num);
+ qmp_configure(qmp->dev, serdes, cfg->serdes_4ln_tbl, cfg->serdes_4ln_num);
- qmp_configure_lane(tx4, tbls->tx, tbls->tx_num, 2);
- qmp_configure_lane(rx4, tbls->rx, tbls->rx_num, 2);
+ qmp_configure_lane(qmp->dev, tx3, tbls->tx, tbls->tx_num, 1);
+ qmp_configure_lane(qmp->dev, rx3, tbls->rx, tbls->rx_num, 1);
+
+ qmp_configure_lane(qmp->dev, tx4, tbls->tx, tbls->tx_num, 2);
+ qmp_configure_lane(qmp->dev, rx4, tbls->rx, tbls->rx_num, 2);
+
+ qmp_configure(qmp->dev, pcs, tbls->pcs, tbls->pcs_num);
+ qmp_configure(qmp->dev, pcs_misc, tbls->pcs_misc, tbls->pcs_misc_num);
+
+ qmp_configure(qmp->dev, ln_shrd, tbls->ln_shrd, tbls->ln_shrd_num);
}
static void qmp_pcie_init_registers(struct qmp_pcie *qmp, const struct qmp_phy_cfg_tbls *tbls)
@@ -3698,25 +3749,26 @@ static void qmp_pcie_init_registers(struct qmp_pcie *qmp, const struct qmp_phy_c
if (!tbls)
return;
- qmp_configure(serdes, tbls->serdes, tbls->serdes_num);
+ qmp_configure(qmp->dev, serdes, tbls->serdes, tbls->serdes_num);
- qmp_configure_lane(tx, tbls->tx, tbls->tx_num, 1);
- qmp_configure_lane(rx, tbls->rx, tbls->rx_num, 1);
+ qmp_configure_lane(qmp->dev, tx, tbls->tx, tbls->tx_num, 1);
+ qmp_configure_lane(qmp->dev, rx, tbls->rx, tbls->rx_num, 1);
if (cfg->lanes >= 2) {
- qmp_configure_lane(tx2, tbls->tx, tbls->tx_num, 2);
- qmp_configure_lane(rx2, tbls->rx, tbls->rx_num, 2);
+ qmp_configure_lane(qmp->dev, tx2, tbls->tx, tbls->tx_num, 2);
+ qmp_configure_lane(qmp->dev, rx2, tbls->rx, tbls->rx_num, 2);
}
- qmp_configure(pcs, tbls->pcs, tbls->pcs_num);
- qmp_configure(pcs_misc, tbls->pcs_misc, tbls->pcs_misc_num);
+ qmp_configure(qmp->dev, pcs, tbls->pcs, tbls->pcs_num);
+ qmp_configure(qmp->dev, pcs_misc, tbls->pcs_misc, tbls->pcs_misc_num);
if (cfg->lanes >= 4 && qmp->tcsr_4ln_config) {
- qmp_configure(serdes, cfg->serdes_4ln_tbl, cfg->serdes_4ln_num);
+ qmp_configure(qmp->dev, serdes, cfg->serdes_4ln_tbl,
+ cfg->serdes_4ln_num);
qmp_pcie_init_port_b(qmp, tbls);
}
- qmp_configure(ln_shrd, tbls->ln_shrd, tbls->ln_shrd_num);
+ qmp_configure(qmp->dev, ln_shrd, tbls->ln_shrd, tbls->ln_shrd_num);
}
static int qmp_pcie_init(struct phy *phy)
@@ -4423,6 +4475,9 @@ static const struct of_device_id qmp_pcie_of_match_table[] = {
}, {
.compatible = "qcom,x1e80100-qmp-gen4x2-pcie-phy",
.data = &x1e80100_qmp_gen4x2_pciephy_cfg,
+ }, {
+ .compatible = "qcom,x1e80100-qmp-gen4x4-pcie-phy",
+ .data = &x1e80100_qmp_gen4x4_pciephy_cfg,
},
{ },
};
diff --git a/drivers/phy/qualcomm/phy-qcom-qmp-ufs.c b/drivers/phy/qualcomm/phy-qcom-qmp-ufs.c
index a57e8a4657f4..d964bdfe8700 100644
--- a/drivers/phy/qualcomm/phy-qcom-qmp-ufs.c
+++ b/drivers/phy/qualcomm/phy-qcom-qmp-ufs.c
@@ -1527,7 +1527,7 @@ static void qmp_ufs_serdes_init(struct qmp_ufs *qmp, const struct qmp_phy_cfg_tb
{
void __iomem *serdes = qmp->serdes;
- qmp_configure(serdes, tbls->serdes, tbls->serdes_num);
+ qmp_configure(qmp->dev, serdes, tbls->serdes, tbls->serdes_num);
}
static void qmp_ufs_lanes_init(struct qmp_ufs *qmp, const struct qmp_phy_cfg_tbls *tbls)
@@ -1536,12 +1536,12 @@ static void qmp_ufs_lanes_init(struct qmp_ufs *qmp, const struct qmp_phy_cfg_tbl
void __iomem *tx = qmp->tx;
void __iomem *rx = qmp->rx;
- qmp_configure_lane(tx, tbls->tx, tbls->tx_num, 1);
- qmp_configure_lane(rx, tbls->rx, tbls->rx_num, 1);
+ qmp_configure_lane(qmp->dev, tx, tbls->tx, tbls->tx_num, 1);
+ qmp_configure_lane(qmp->dev, rx, tbls->rx, tbls->rx_num, 1);
if (cfg->lanes >= 2) {
- qmp_configure_lane(qmp->tx2, tbls->tx, tbls->tx_num, 2);
- qmp_configure_lane(qmp->rx2, tbls->rx, tbls->rx_num, 2);
+ qmp_configure_lane(qmp->dev, qmp->tx2, tbls->tx, tbls->tx_num, 2);
+ qmp_configure_lane(qmp->dev, qmp->rx2, tbls->rx, tbls->rx_num, 2);
}
}
@@ -1549,7 +1549,7 @@ static void qmp_ufs_pcs_init(struct qmp_ufs *qmp, const struct qmp_phy_cfg_tbls
{
void __iomem *pcs = qmp->pcs;
- qmp_configure(pcs, tbls->pcs, tbls->pcs_num);
+ qmp_configure(qmp->dev, pcs, tbls->pcs, tbls->pcs_num);
}
static int qmp_ufs_get_gear_overlay(struct qmp_ufs *qmp, const struct qmp_phy_cfg *cfg)
diff --git a/drivers/phy/qualcomm/phy-qcom-qmp-usb.c b/drivers/phy/qualcomm/phy-qcom-qmp-usb.c
index 9b0eb87b1680..2fd49355aa37 100644
--- a/drivers/phy/qualcomm/phy-qcom-qmp-usb.c
+++ b/drivers/phy/qualcomm/phy-qcom-qmp-usb.c
@@ -1649,7 +1649,7 @@ static int qmp_usb_serdes_init(struct qmp_usb *qmp)
const struct qmp_phy_init_tbl *serdes_tbl = cfg->serdes_tbl;
int serdes_tbl_num = cfg->serdes_tbl_num;
- qmp_configure(serdes, serdes_tbl, serdes_tbl_num);
+ qmp_configure(qmp->dev, serdes, serdes_tbl, serdes_tbl_num);
return 0;
}
@@ -1730,13 +1730,13 @@ static int qmp_usb_power_on(struct phy *phy)
}
/* Tx, Rx, and PCS configurations */
- qmp_configure_lane(tx, cfg->tx_tbl, cfg->tx_tbl_num, 1);
- qmp_configure_lane(rx, cfg->rx_tbl, cfg->rx_tbl_num, 1);
+ qmp_configure_lane(qmp->dev, tx, cfg->tx_tbl, cfg->tx_tbl_num, 1);
+ qmp_configure_lane(qmp->dev, rx, cfg->rx_tbl, cfg->rx_tbl_num, 1);
- qmp_configure(pcs, cfg->pcs_tbl, cfg->pcs_tbl_num);
+ qmp_configure(qmp->dev, pcs, cfg->pcs_tbl, cfg->pcs_tbl_num);
if (pcs_usb)
- qmp_configure(pcs_usb, cfg->pcs_usb_tbl, cfg->pcs_usb_tbl_num);
+ qmp_configure(qmp->dev, pcs_usb, cfg->pcs_usb_tbl, cfg->pcs_usb_tbl_num);
if (cfg->has_pwrdn_delay)
usleep_range(10, 20);
diff --git a/drivers/phy/qualcomm/phy-qcom-qmp-usbc.c b/drivers/phy/qualcomm/phy-qcom-qmp-usbc.c
index 5cbc5fd529eb..d4fa1063ea61 100644
--- a/drivers/phy/qualcomm/phy-qcom-qmp-usbc.c
+++ b/drivers/phy/qualcomm/phy-qcom-qmp-usbc.c
@@ -526,7 +526,8 @@ static int qmp_usbc_power_on(struct phy *phy)
unsigned int val;
int ret;
- qmp_configure(qmp->serdes, cfg->serdes_tbl, cfg->serdes_tbl_num);
+ qmp_configure(qmp->dev, qmp->serdes, cfg->serdes_tbl,
+ cfg->serdes_tbl_num);
ret = clk_prepare_enable(qmp->pipe_clk);
if (ret) {
@@ -535,13 +536,13 @@ static int qmp_usbc_power_on(struct phy *phy)
}
/* Tx, Rx, and PCS configurations */
- qmp_configure_lane(qmp->tx, cfg->tx_tbl, cfg->tx_tbl_num, 1);
- qmp_configure_lane(qmp->rx, cfg->rx_tbl, cfg->rx_tbl_num, 1);
+ qmp_configure_lane(qmp->dev, qmp->tx, cfg->tx_tbl, cfg->tx_tbl_num, 1);
+ qmp_configure_lane(qmp->dev, qmp->rx, cfg->rx_tbl, cfg->rx_tbl_num, 1);
- qmp_configure_lane(qmp->tx2, cfg->tx_tbl, cfg->tx_tbl_num, 2);
- qmp_configure_lane(qmp->rx2, cfg->rx_tbl, cfg->rx_tbl_num, 2);
+ qmp_configure_lane(qmp->dev, qmp->tx2, cfg->tx_tbl, cfg->tx_tbl_num, 2);
+ qmp_configure_lane(qmp->dev, qmp->rx2, cfg->rx_tbl, cfg->rx_tbl_num, 2);
- qmp_configure(qmp->pcs, cfg->pcs_tbl, cfg->pcs_tbl_num);
+ qmp_configure(qmp->dev, qmp->pcs, cfg->pcs_tbl, cfg->pcs_tbl_num);
/* Pull PHY out of reset state */
qphy_clrbits(qmp->pcs, cfg->regs[QPHY_SW_RESET], SW_RESET);
diff --git a/drivers/phy/renesas/phy-rcar-gen3-usb2.c b/drivers/phy/renesas/phy-rcar-gen3-usb2.c
index 7594f64eb737..58e123305152 100644
--- a/drivers/phy/renesas/phy-rcar-gen3-usb2.c
+++ b/drivers/phy/renesas/phy-rcar-gen3-usb2.c
@@ -19,12 +19,14 @@
#include <linux/platform_device.h>
#include <linux/pm_runtime.h>
#include <linux/regulator/consumer.h>
+#include <linux/reset.h>
#include <linux/string.h>
#include <linux/usb/of.h>
#include <linux/workqueue.h>
/******* USB2.0 Host registers (original offset is +0x200) *******/
#define USB2_INT_ENABLE 0x000
+#define USB2_AHB_BUS_CTR 0x008
#define USB2_USBCTR 0x00c
#define USB2_SPD_RSM_TIMSET 0x10c
#define USB2_OC_TIMSET 0x110
@@ -40,6 +42,10 @@
#define USB2_INT_ENABLE_USBH_INTB_EN BIT(2) /* For EHCI */
#define USB2_INT_ENABLE_USBH_INTA_EN BIT(1) /* For OHCI */
+/* AHB_BUS_CTR */
+#define USB2_AHB_BUS_CTR_MBL_MASK GENMASK(1, 0)
+#define USB2_AHB_BUS_CTR_MBL_INCR4 2
+
/* USBCTR */
#define USB2_USBCTR_DIRPD BIT(2)
#define USB2_USBCTR_PLL_RST BIT(1)
@@ -111,6 +117,7 @@ struct rcar_gen3_chan {
struct extcon_dev *extcon;
struct rcar_gen3_phy rphys[NUM_OF_PHYS];
struct regulator *vbus;
+ struct reset_control *rstc;
struct work_struct work;
struct mutex lock; /* protects rphys[...].powered */
enum usb_dr_mode dr_mode;
@@ -125,6 +132,7 @@ struct rcar_gen3_chan {
struct rcar_gen3_phy_drv_data {
const struct phy_ops *phy_usb2_ops;
bool no_adp_ctrl;
+ bool init_bus;
};
/*
@@ -575,6 +583,12 @@ static const struct rcar_gen3_phy_drv_data rz_g2l_phy_usb2_data = {
.no_adp_ctrl = true,
};
+static const struct rcar_gen3_phy_drv_data rz_g3s_phy_usb2_data = {
+ .phy_usb2_ops = &rcar_gen3_phy_usb2_ops,
+ .no_adp_ctrl = true,
+ .init_bus = true,
+};
+
static const struct of_device_id rcar_gen3_phy_usb2_match_table[] = {
{
.compatible = "renesas,usb2-phy-r8a77470",
@@ -597,6 +611,10 @@ static const struct of_device_id rcar_gen3_phy_usb2_match_table[] = {
.data = &rz_g2l_phy_usb2_data,
},
{
+ .compatible = "renesas,usb2-phy-r9a08g045",
+ .data = &rz_g3s_phy_usb2_data,
+ },
+ {
.compatible = "renesas,rcar-gen3-usb2-phy",
.data = &rcar_gen3_phy_usb2_data,
},
@@ -650,6 +668,35 @@ static enum usb_dr_mode rcar_gen3_get_dr_mode(struct device_node *np)
return candidate;
}
+static int rcar_gen3_phy_usb2_init_bus(struct rcar_gen3_chan *channel)
+{
+ struct device *dev = channel->dev;
+ int ret;
+ u32 val;
+
+ channel->rstc = devm_reset_control_array_get_shared(dev);
+ if (IS_ERR(channel->rstc))
+ return PTR_ERR(channel->rstc);
+
+ ret = pm_runtime_resume_and_get(dev);
+ if (ret)
+ return ret;
+
+ ret = reset_control_deassert(channel->rstc);
+ if (ret)
+ goto rpm_put;
+
+ val = readl(channel->base + USB2_AHB_BUS_CTR);
+ val &= ~USB2_AHB_BUS_CTR_MBL_MASK;
+ val |= USB2_AHB_BUS_CTR_MBL_INCR4;
+ writel(val, channel->base + USB2_AHB_BUS_CTR);
+
+rpm_put:
+ pm_runtime_put(dev);
+
+ return ret;
+}
+
static int rcar_gen3_phy_usb2_probe(struct platform_device *pdev)
{
const struct rcar_gen3_phy_drv_data *phy_data;
@@ -703,6 +750,15 @@ static int rcar_gen3_phy_usb2_probe(struct platform_device *pdev)
goto error;
}
+ platform_set_drvdata(pdev, channel);
+ channel->dev = dev;
+
+ if (phy_data->init_bus) {
+ ret = rcar_gen3_phy_usb2_init_bus(channel);
+ if (ret)
+ goto error;
+ }
+
channel->soc_no_adp_ctrl = phy_data->no_adp_ctrl;
if (phy_data->no_adp_ctrl)
channel->obint_enable_bits = USB2_OBINT_IDCHG_EN;
@@ -733,9 +789,6 @@ static int rcar_gen3_phy_usb2_probe(struct platform_device *pdev)
channel->vbus = NULL;
}
- platform_set_drvdata(pdev, channel);
- channel->dev = dev;
-
provider = devm_of_phy_provider_register(dev, rcar_gen3_phy_usb2_xlate);
if (IS_ERR(provider)) {
dev_err(dev, "Failed to register PHY provider\n");
@@ -762,6 +815,7 @@ static void rcar_gen3_phy_usb2_remove(struct platform_device *pdev)
if (channel->is_otg_channel)
device_remove_file(&pdev->dev, &dev_attr_role);
+ reset_control_assert(channel->rstc);
pm_runtime_disable(&pdev->dev);
};
diff --git a/drivers/phy/rockchip/phy-rockchip-samsung-hdptx.c b/drivers/phy/rockchip/phy-rockchip-samsung-hdptx.c
index 946c01210ac8..9f084697dd05 100644
--- a/drivers/phy/rockchip/phy-rockchip-samsung-hdptx.c
+++ b/drivers/phy/rockchip/phy-rockchip-samsung-hdptx.c
@@ -8,6 +8,7 @@
*/
#include <linux/bitfield.h>
#include <linux/clk.h>
+#include <linux/clk-provider.h>
#include <linux/delay.h>
#include <linux/mfd/syscon.h>
#include <linux/module.h>
@@ -15,6 +16,7 @@
#include <linux/of_platform.h>
#include <linux/phy/phy.h>
#include <linux/platform_device.h>
+#include <linux/pm_runtime.h>
#include <linux/rational.h>
#include <linux/regmap.h>
#include <linux/reset.h>
@@ -190,6 +192,8 @@
#define LN3_TX_SER_RATE_SEL_HBR2 BIT(3)
#define LN3_TX_SER_RATE_SEL_HBR3 BIT(2)
+#define HDMI20_MAX_RATE 600000000
+
struct lcpll_config {
u32 bit_rate;
u8 lcvco_mode_en;
@@ -272,6 +276,12 @@ struct rk_hdptx_phy {
struct clk_bulk_data *clks;
int nr_clks;
struct reset_control_bulk_data rsts[RST_MAX];
+
+ /* clk provider */
+ struct clk_hw hw;
+ unsigned long rate;
+
+ atomic_t usage_count;
};
static const struct ropll_config ropll_tmds_cfg[] = {
@@ -759,6 +769,8 @@ static int rk_hdptx_ropll_tmds_cmn_config(struct rk_hdptx_phy *hdptx,
struct ropll_config rc = {0};
int i;
+ hdptx->rate = rate * 100;
+
for (i = 0; i < ARRAY_SIZE(ropll_tmds_cfg); i++)
if (rate == ropll_tmds_cfg[i].bit_rate) {
cfg = &ropll_tmds_cfg[i];
@@ -822,19 +834,6 @@ static int rk_hdptx_ropll_tmds_cmn_config(struct rk_hdptx_phy *hdptx,
static int rk_hdptx_ropll_tmds_mode_config(struct rk_hdptx_phy *hdptx,
unsigned int rate)
{
- u32 val;
- int ret;
-
- ret = regmap_read(hdptx->grf, GRF_HDPTX_STATUS, &val);
- if (ret)
- return ret;
-
- if (!(val & HDPTX_O_PLL_LOCK_DONE)) {
- ret = rk_hdptx_ropll_tmds_cmn_config(hdptx, rate);
- if (ret)
- return ret;
- }
-
rk_hdptx_multi_reg_write(hdptx, rk_hdtpx_common_sb_init_seq);
regmap_write(hdptx->regmap, LNTOP_REG(0200), 0x06);
@@ -856,10 +855,68 @@ static int rk_hdptx_ropll_tmds_mode_config(struct rk_hdptx_phy *hdptx,
return rk_hdptx_post_enable_lane(hdptx);
}
+static int rk_hdptx_phy_consumer_get(struct rk_hdptx_phy *hdptx,
+ unsigned int rate)
+{
+ u32 status;
+ int ret;
+
+ if (atomic_inc_return(&hdptx->usage_count) > 1)
+ return 0;
+
+ ret = regmap_read(hdptx->grf, GRF_HDPTX_STATUS, &status);
+ if (ret)
+ goto dec_usage;
+
+ if (status & HDPTX_O_PLL_LOCK_DONE)
+ dev_warn(hdptx->dev, "PLL locked by unknown consumer!\n");
+
+ if (rate) {
+ ret = rk_hdptx_ropll_tmds_cmn_config(hdptx, rate);
+ if (ret)
+ goto dec_usage;
+ }
+
+ return 0;
+
+dec_usage:
+ atomic_dec(&hdptx->usage_count);
+ return ret;
+}
+
+static int rk_hdptx_phy_consumer_put(struct rk_hdptx_phy *hdptx, bool force)
+{
+ u32 status;
+ int ret;
+
+ ret = atomic_dec_return(&hdptx->usage_count);
+ if (ret > 0)
+ return 0;
+
+ if (ret < 0) {
+ dev_warn(hdptx->dev, "Usage count underflow!\n");
+ ret = -EINVAL;
+ } else {
+ ret = regmap_read(hdptx->grf, GRF_HDPTX_STATUS, &status);
+ if (!ret) {
+ if (status & HDPTX_O_PLL_LOCK_DONE)
+ rk_hdptx_phy_disable(hdptx);
+ return 0;
+ } else if (force) {
+ return 0;
+ }
+ }
+
+ atomic_inc(&hdptx->usage_count);
+ return ret;
+}
+
static int rk_hdptx_phy_power_on(struct phy *phy)
{
struct rk_hdptx_phy *hdptx = phy_get_drvdata(phy);
- int ret, bus_width = phy_get_bus_width(hdptx->phy);
+ int bus_width = phy_get_bus_width(hdptx->phy);
+ int ret;
+
/*
* FIXME: Temporary workaround to pass pixel_clk_rate
* from the HDMI bridge driver until phy_configure_opts_hdmi
@@ -870,15 +927,13 @@ static int rk_hdptx_phy_power_on(struct phy *phy)
dev_dbg(hdptx->dev, "%s bus_width=%x rate=%u\n",
__func__, bus_width, rate);
- ret = pm_runtime_resume_and_get(hdptx->dev);
- if (ret) {
- dev_err(hdptx->dev, "Failed to resume phy: %d\n", ret);
+ ret = rk_hdptx_phy_consumer_get(hdptx, rate);
+ if (ret)
return ret;
- }
ret = rk_hdptx_ropll_tmds_mode_config(hdptx, rate);
if (ret)
- pm_runtime_put(hdptx->dev);
+ rk_hdptx_phy_consumer_put(hdptx, true);
return ret;
}
@@ -886,16 +941,8 @@ static int rk_hdptx_phy_power_on(struct phy *phy)
static int rk_hdptx_phy_power_off(struct phy *phy)
{
struct rk_hdptx_phy *hdptx = phy_get_drvdata(phy);
- u32 val;
- int ret;
- ret = regmap_read(hdptx->grf, GRF_HDPTX_STATUS, &val);
- if (ret == 0 && (val & HDPTX_O_PLL_LOCK_DONE))
- rk_hdptx_phy_disable(hdptx);
-
- pm_runtime_put(hdptx->dev);
-
- return ret;
+ return rk_hdptx_phy_consumer_put(hdptx, false);
}
static const struct phy_ops rk_hdptx_phy_ops = {
@@ -904,6 +951,99 @@ static const struct phy_ops rk_hdptx_phy_ops = {
.owner = THIS_MODULE,
};
+static struct rk_hdptx_phy *to_rk_hdptx_phy(struct clk_hw *hw)
+{
+ return container_of(hw, struct rk_hdptx_phy, hw);
+}
+
+static int rk_hdptx_phy_clk_prepare(struct clk_hw *hw)
+{
+ struct rk_hdptx_phy *hdptx = to_rk_hdptx_phy(hw);
+
+ return rk_hdptx_phy_consumer_get(hdptx, hdptx->rate / 100);
+}
+
+static void rk_hdptx_phy_clk_unprepare(struct clk_hw *hw)
+{
+ struct rk_hdptx_phy *hdptx = to_rk_hdptx_phy(hw);
+
+ rk_hdptx_phy_consumer_put(hdptx, true);
+}
+
+static unsigned long rk_hdptx_phy_clk_recalc_rate(struct clk_hw *hw,
+ unsigned long parent_rate)
+{
+ struct rk_hdptx_phy *hdptx = to_rk_hdptx_phy(hw);
+
+ return hdptx->rate;
+}
+
+static long rk_hdptx_phy_clk_round_rate(struct clk_hw *hw, unsigned long rate,
+ unsigned long *parent_rate)
+{
+ u32 bit_rate = rate / 100;
+ int i;
+
+ if (rate > HDMI20_MAX_RATE)
+ return rate;
+
+ for (i = 0; i < ARRAY_SIZE(ropll_tmds_cfg); i++)
+ if (bit_rate == ropll_tmds_cfg[i].bit_rate)
+ break;
+
+ if (i == ARRAY_SIZE(ropll_tmds_cfg) &&
+ !rk_hdptx_phy_clk_pll_calc(bit_rate, NULL))
+ return -EINVAL;
+
+ return rate;
+}
+
+static int rk_hdptx_phy_clk_set_rate(struct clk_hw *hw, unsigned long rate,
+ unsigned long parent_rate)
+{
+ struct rk_hdptx_phy *hdptx = to_rk_hdptx_phy(hw);
+
+ return rk_hdptx_ropll_tmds_cmn_config(hdptx, rate / 100);
+}
+
+static const struct clk_ops hdptx_phy_clk_ops = {
+ .prepare = rk_hdptx_phy_clk_prepare,
+ .unprepare = rk_hdptx_phy_clk_unprepare,
+ .recalc_rate = rk_hdptx_phy_clk_recalc_rate,
+ .round_rate = rk_hdptx_phy_clk_round_rate,
+ .set_rate = rk_hdptx_phy_clk_set_rate,
+};
+
+static int rk_hdptx_phy_clk_register(struct rk_hdptx_phy *hdptx)
+{
+ struct device *dev = hdptx->dev;
+ const char *name, *pname;
+ struct clk *refclk;
+ int ret, id;
+
+ refclk = devm_clk_get(dev, "ref");
+ if (IS_ERR(refclk))
+ return dev_err_probe(dev, PTR_ERR(refclk),
+ "Failed to get ref clock\n");
+
+ id = of_alias_get_id(dev->of_node, "hdptxphy");
+ name = id > 0 ? "clk_hdmiphy_pixel1" : "clk_hdmiphy_pixel0";
+ pname = __clk_get_name(refclk);
+
+ hdptx->hw.init = CLK_HW_INIT(name, pname, &hdptx_phy_clk_ops,
+ CLK_GET_RATE_NOCACHE);
+
+ ret = devm_clk_hw_register(dev, &hdptx->hw);
+ if (ret)
+ return dev_err_probe(dev, ret, "Failed to register clock\n");
+
+ ret = devm_of_clk_add_hw_provider(dev, of_clk_hw_simple_get, &hdptx->hw);
+ if (ret)
+ return dev_err_probe(dev, ret,
+ "Failed to register clk provider\n");
+ return 0;
+}
+
static int rk_hdptx_phy_runtime_suspend(struct device *dev)
{
struct rk_hdptx_phy *hdptx = dev_get_drvdata(dev);
@@ -976,6 +1116,10 @@ static int rk_hdptx_phy_probe(struct platform_device *pdev)
return dev_err_probe(dev, PTR_ERR(hdptx->grf),
"Could not get GRF syscon\n");
+ ret = devm_pm_runtime_enable(dev);
+ if (ret)
+ return dev_err_probe(dev, ret, "Failed to enable runtime PM\n");
+
hdptx->phy = devm_phy_create(dev, NULL, &rk_hdptx_phy_ops);
if (IS_ERR(hdptx->phy))
return dev_err_probe(dev, PTR_ERR(hdptx->phy),
@@ -985,10 +1129,6 @@ static int rk_hdptx_phy_probe(struct platform_device *pdev)
phy_set_drvdata(hdptx->phy, hdptx);
phy_set_bus_width(hdptx->phy, 8);
- ret = devm_pm_runtime_enable(dev);
- if (ret)
- return dev_err_probe(dev, ret, "Failed to enable runtime PM\n");
-
phy_provider = devm_of_phy_provider_register(dev, of_phy_simple_xlate);
if (IS_ERR(phy_provider))
return dev_err_probe(dev, PTR_ERR(phy_provider),
@@ -998,7 +1138,7 @@ static int rk_hdptx_phy_probe(struct platform_device *pdev)
reset_control_deassert(hdptx->rsts[RST_CMN].rstc);
reset_control_deassert(hdptx->rsts[RST_INIT].rstc);
- return 0;
+ return rk_hdptx_phy_clk_register(hdptx);
}
static const struct dev_pm_ops rk_hdptx_phy_pm_ops = {
diff --git a/drivers/phy/samsung/phy-exynos5-usbdrd.c b/drivers/phy/samsung/phy-exynos5-usbdrd.c
index 9cbf90142950..c421b495eb0f 100644
--- a/drivers/phy/samsung/phy-exynos5-usbdrd.c
+++ b/drivers/phy/samsung/phy-exynos5-usbdrd.c
@@ -607,7 +607,7 @@ exynos5_usbdrd_usbdp_g2_v4_ctrl_pma_ready(struct exynos5_usbdrd_phy *phy_drd)
reg = readl(regs_base + EXYNOS850_DRD_SECPMACTL);
reg &= ~SECPMACTL_PMA_REF_FREQ_SEL;
- reg |= FIELD_PREP_CONST(SECPMACTL_PMA_REF_FREQ_SEL, 1);
+ reg |= FIELD_PREP(SECPMACTL_PMA_REF_FREQ_SEL, 1);
/* SFR reset */
reg |= (SECPMACTL_PMA_LOW_PWR | SECPMACTL_PMA_APB_SW_RST);
reg &= ~(SECPMACTL_PMA_ROPLL_REF_CLK_SEL |
@@ -1123,19 +1123,19 @@ static void exynos850_usbdrd_utmi_init(struct exynos5_usbdrd_phy *phy_drd)
reg &= ~SSPPLLCTL_FSEL;
switch (phy_drd->extrefclk) {
case EXYNOS5_FSEL_50MHZ:
- reg |= FIELD_PREP_CONST(SSPPLLCTL_FSEL, 7);
+ reg |= FIELD_PREP(SSPPLLCTL_FSEL, 7);
break;
case EXYNOS5_FSEL_26MHZ:
- reg |= FIELD_PREP_CONST(SSPPLLCTL_FSEL, 6);
+ reg |= FIELD_PREP(SSPPLLCTL_FSEL, 6);
break;
case EXYNOS5_FSEL_24MHZ:
- reg |= FIELD_PREP_CONST(SSPPLLCTL_FSEL, 2);
+ reg |= FIELD_PREP(SSPPLLCTL_FSEL, 2);
break;
case EXYNOS5_FSEL_20MHZ:
- reg |= FIELD_PREP_CONST(SSPPLLCTL_FSEL, 1);
+ reg |= FIELD_PREP(SSPPLLCTL_FSEL, 1);
break;
case EXYNOS5_FSEL_19MHZ2:
- reg |= FIELD_PREP_CONST(SSPPLLCTL_FSEL, 0);
+ reg |= FIELD_PREP(SSPPLLCTL_FSEL, 0);
break;
default:
dev_warn(phy_drd->dev, "unsupported ref clk: %#.2x\n",
diff --git a/drivers/phy/ti/phy-am654-serdes.c b/drivers/phy/ti/phy-am654-serdes.c
index 673449607c02..3bf3aff4b1c7 100644
--- a/drivers/phy/ti/phy-am654-serdes.c
+++ b/drivers/phy/ti/phy-am654-serdes.c
@@ -7,6 +7,7 @@
*/
#include <dt-bindings/phy/phy.h>
+#include <linux/cleanup.h>
#include <linux/clk.h>
#include <linux/clk-provider.h>
#include <linux/delay.h>
@@ -644,7 +645,6 @@ static int serdes_am654_clk_register(struct serdes_am654 *am654_phy,
struct device_node *node = am654_phy->of_node;
struct device *dev = am654_phy->dev;
struct serdes_am654_clk_mux *mux;
- struct device_node *regmap_node;
const char **parent_names;
struct clk_init_data *init;
unsigned int num_parents;
@@ -652,7 +652,6 @@ static int serdes_am654_clk_register(struct serdes_am654 *am654_phy,
const __be32 *addr;
unsigned int reg;
struct clk *clk;
- int ret = 0;
mux = devm_kzalloc(dev, sizeof(*mux), GFP_KERNEL);
if (!mux)
@@ -660,41 +659,30 @@ static int serdes_am654_clk_register(struct serdes_am654 *am654_phy,
init = &mux->clk_data;
- regmap_node = of_parse_phandle(node, "ti,serdes-clk", 0);
- if (!regmap_node) {
- dev_err(dev, "Fail to get serdes-clk node\n");
- ret = -ENODEV;
- goto out_put_node;
- }
+ struct device_node *regmap_node __free(device_node) =
+ of_parse_phandle(node, "ti,serdes-clk", 0);
+ if (!regmap_node)
+ return dev_err_probe(dev, -ENODEV, "Fail to get serdes-clk node\n");
regmap = syscon_node_to_regmap(regmap_node->parent);
- if (IS_ERR(regmap)) {
- dev_err(dev, "Fail to get Syscon regmap\n");
- ret = PTR_ERR(regmap);
- goto out_put_node;
- }
+ if (IS_ERR(regmap))
+ return dev_err_probe(dev, PTR_ERR(regmap),
+ "Fail to get Syscon regmap\n");
num_parents = of_clk_get_parent_count(node);
- if (num_parents < 2) {
- dev_err(dev, "SERDES clock must have parents\n");
- ret = -EINVAL;
- goto out_put_node;
- }
+ if (num_parents < 2)
+ return dev_err_probe(dev, -EINVAL, "SERDES clock must have parents\n");
parent_names = devm_kzalloc(dev, (sizeof(char *) * num_parents),
GFP_KERNEL);
- if (!parent_names) {
- ret = -ENOMEM;
- goto out_put_node;
- }
+ if (!parent_names)
+ return -ENOMEM;
of_clk_parent_fill(node, parent_names, num_parents);
addr = of_get_address(regmap_node, 0, NULL, NULL);
- if (!addr) {
- ret = -EINVAL;
- goto out_put_node;
- }
+ if (!addr)
+ return -EINVAL;
reg = be32_to_cpu(*addr);
@@ -710,16 +698,12 @@ static int serdes_am654_clk_register(struct serdes_am654 *am654_phy,
mux->hw.init = init;
clk = devm_clk_register(dev, &mux->hw);
- if (IS_ERR(clk)) {
- ret = PTR_ERR(clk);
- goto out_put_node;
- }
+ if (IS_ERR(clk))
+ return PTR_ERR(clk);
am654_phy->clks[clock_num] = clk;
-out_put_node:
- of_node_put(regmap_node);
- return ret;
+ return 0;
}
static const struct of_device_id serdes_am654_id_table[] = {
diff --git a/drivers/phy/ti/phy-gmii-sel.c b/drivers/phy/ti/phy-gmii-sel.c
index b30bf740e2e0..103b266fec77 100644
--- a/drivers/phy/ti/phy-gmii-sel.c
+++ b/drivers/phy/ti/phy-gmii-sel.c
@@ -468,11 +468,9 @@ static int phy_gmii_sel_probe(struct platform_device *pdev)
priv->regmap = syscon_node_to_regmap(node->parent);
if (IS_ERR(priv->regmap)) {
priv->regmap = device_node_to_regmap(node);
- if (IS_ERR(priv->regmap)) {
- ret = PTR_ERR(priv->regmap);
- dev_err(dev, "Failed to get syscon %d\n", ret);
- return ret;
- }
+ if (IS_ERR(priv->regmap))
+ return dev_err_probe(dev, PTR_ERR(priv->regmap),
+ "Failed to get syscon\n");
priv->no_offset = true;
}
@@ -485,11 +483,9 @@ static int phy_gmii_sel_probe(struct platform_device *pdev)
priv->phy_provider =
devm_of_phy_provider_register(dev,
phy_gmii_sel_of_xlate);
- if (IS_ERR(priv->phy_provider)) {
- ret = PTR_ERR(priv->phy_provider);
- dev_err(dev, "Failed to create phy provider %d\n", ret);
- return ret;
- }
+ if (IS_ERR(priv->phy_provider))
+ return dev_err_probe(dev, PTR_ERR(priv->phy_provider),
+ "Failed to create phy provider\n");
return 0;
}
diff --git a/drivers/phy/ti/phy-j721e-wiz.c b/drivers/phy/ti/phy-j721e-wiz.c
index 7f626c597025..a6c0c5607ffd 100644
--- a/drivers/phy/ti/phy-j721e-wiz.c
+++ b/drivers/phy/ti/phy-j721e-wiz.c
@@ -1179,14 +1179,13 @@ static int wiz_clock_probe(struct wiz *wiz, struct device_node *node)
ret = wiz_mux_of_clk_register(wiz, clk_node, wiz->mux_sel_field[i],
clk_mux_sel[i].table);
+ of_node_put(clk_node);
if (ret) {
dev_err_probe(dev, ret, "Failed to register %s clock\n",
node_name);
- of_node_put(clk_node);
goto err;
}
- of_node_put(clk_node);
}
for (i = 0; i < wiz->clk_div_sel_num; i++) {
@@ -1199,14 +1198,12 @@ static int wiz_clock_probe(struct wiz *wiz, struct device_node *node)
ret = wiz_div_clk_register(wiz, clk_node, wiz->div_sel_field[i],
clk_div_sel[i].table);
+ of_node_put(clk_node);
if (ret) {
dev_err_probe(dev, ret, "Failed to register %s clock\n",
node_name);
- of_node_put(clk_node);
goto err;
}
-
- of_node_put(clk_node);
}
return 0;
@@ -1407,7 +1404,7 @@ MODULE_DEVICE_TABLE(of, wiz_id_table);
static int wiz_get_lane_phy_types(struct device *dev, struct wiz *wiz)
{
- struct device_node *serdes, *subnode;
+ struct device_node *serdes;
serdes = of_get_child_by_name(dev->of_node, "serdes");
if (!serdes) {
@@ -1415,7 +1412,7 @@ static int wiz_get_lane_phy_types(struct device *dev, struct wiz *wiz)
return -EINVAL;
}
- for_each_child_of_node(serdes, subnode) {
+ for_each_child_of_node_scoped(serdes, subnode) {
u32 reg, num_lanes = 1, phy_type = PHY_NONE;
int ret, i;
@@ -1425,7 +1422,6 @@ static int wiz_get_lane_phy_types(struct device *dev, struct wiz *wiz)
ret = of_property_read_u32(subnode, "reg", &reg);
if (ret) {
- of_node_put(subnode);
dev_err(dev,
"%s: Reading \"reg\" from \"%s\" failed: %d\n",
__func__, subnode->name, ret);
@@ -1578,8 +1574,8 @@ static int wiz_probe(struct platform_device *pdev)
phy_reset_dev = &wiz->wiz_phy_reset_dev;
phy_reset_dev->dev = dev;
- phy_reset_dev->ops = &wiz_phy_reset_ops,
- phy_reset_dev->owner = THIS_MODULE,
+ phy_reset_dev->ops = &wiz_phy_reset_ops;
+ phy_reset_dev->owner = THIS_MODULE;
phy_reset_dev->of_node = node;
/* Reset for each of the lane and one for the entire SERDES */
phy_reset_dev->nr_resets = num_lanes + 1;
diff --git a/drivers/pinctrl/Kconfig b/drivers/pinctrl/Kconfig
index 7e4f93a3bc7a..354536de564b 100644
--- a/drivers/pinctrl/Kconfig
+++ b/drivers/pinctrl/Kconfig
@@ -194,6 +194,13 @@ config PINCTRL_DIGICOLOR
select PINMUX
select GENERIC_PINCONF
+config PINCTRL_EP93XX
+ bool
+ depends on ARCH_EP93XX || COMPILE_TEST
+ select PINMUX
+ select GENERIC_PINCONF
+ select MFD_SYSCON
+
config PINCTRL_EQUILIBRIUM
tristate "Generic pinctrl and GPIO driver for Intel Lightning Mountain SoC"
depends on OF && HAS_IOMEM
@@ -213,6 +220,21 @@ config PINCTRL_EQUILIBRIUM
desired pin functions, configure GPIO attributes for LGM SoC pins.
Pin muxing and pin config settings are retrieved from device tree.
+config PINCTRL_EYEQ5
+ bool "Mobileye EyeQ5 pinctrl driver"
+ depends on OF
+ depends on MACH_EYEQ5 || COMPILE_TEST
+ select PINMUX
+ select GENERIC_PINCONF
+ select AUXILIARY_BUS
+ default MACH_EYEQ5
+ help
+ Pin controller driver for the Mobileye EyeQ5 platform. It does both
+ pin config & pin muxing. It does not handle GPIO.
+
+ Pin muxing supports two functions for each pin: first is GPIO, second
+ is pin-dependent. Pin config is about bias & drive strength.
+
config PINCTRL_GEMINI
bool
depends on ARCH_GEMINI
@@ -583,6 +605,7 @@ source "drivers/pinctrl/qcom/Kconfig"
source "drivers/pinctrl/realtek/Kconfig"
source "drivers/pinctrl/renesas/Kconfig"
source "drivers/pinctrl/samsung/Kconfig"
+source "drivers/pinctrl/sophgo/Kconfig"
source "drivers/pinctrl/spear/Kconfig"
source "drivers/pinctrl/sprd/Kconfig"
source "drivers/pinctrl/starfive/Kconfig"
diff --git a/drivers/pinctrl/Makefile b/drivers/pinctrl/Makefile
index cc809669405a..97823f52b972 100644
--- a/drivers/pinctrl/Makefile
+++ b/drivers/pinctrl/Makefile
@@ -23,6 +23,8 @@ obj-$(CONFIG_PINCTRL_DA850_PUPD) += pinctrl-da850-pupd.o
obj-$(CONFIG_PINCTRL_DA9062) += pinctrl-da9062.o
obj-$(CONFIG_PINCTRL_DIGICOLOR) += pinctrl-digicolor.o
obj-$(CONFIG_PINCTRL_EQUILIBRIUM) += pinctrl-equilibrium.o
+obj-$(CONFIG_PINCTRL_EP93XX) += pinctrl-ep93xx.o
+obj-$(CONFIG_PINCTRL_EYEQ5) += pinctrl-eyeq5.o
obj-$(CONFIG_PINCTRL_GEMINI) += pinctrl-gemini.o
obj-$(CONFIG_PINCTRL_INGENIC) += pinctrl-ingenic.o
obj-$(CONFIG_PINCTRL_K210) += pinctrl-k210.o
@@ -73,6 +75,7 @@ obj-y += qcom/
obj-$(CONFIG_ARCH_REALTEK) += realtek/
obj-$(CONFIG_PINCTRL_RENESAS) += renesas/
obj-$(CONFIG_PINCTRL_SAMSUNG) += samsung/
+obj-y += sophgo/
obj-$(CONFIG_PINCTRL_SPEAR) += spear/
obj-y += sprd/
obj-$(CONFIG_SOC_STARFIVE) += starfive/
diff --git a/drivers/pinctrl/bcm/pinctrl-bcm2835.c b/drivers/pinctrl/bcm/pinctrl-bcm2835.c
index 184641e221d4..cc1fe0555e19 100644
--- a/drivers/pinctrl/bcm/pinctrl-bcm2835.c
+++ b/drivers/pinctrl/bcm/pinctrl-bcm2835.c
@@ -1280,6 +1280,7 @@ static const struct of_device_id bcm2835_pinctrl_match[] = {
},
{}
};
+MODULE_DEVICE_TABLE(of, bcm2835_pinctrl_match);
static int bcm2835_pinctrl_probe(struct platform_device *pdev)
{
diff --git a/drivers/pinctrl/cirrus/pinctrl-madera-core.c b/drivers/pinctrl/cirrus/pinctrl-madera-core.c
index 898b197c3738..2932d7aba725 100644
--- a/drivers/pinctrl/cirrus/pinctrl-madera-core.c
+++ b/drivers/pinctrl/cirrus/pinctrl-madera-core.c
@@ -1063,12 +1063,9 @@ static int madera_pin_probe(struct platform_device *pdev)
if (pdata->gpio_configs) {
ret = pinctrl_register_mappings(pdata->gpio_configs,
pdata->n_gpio_configs);
- if (ret) {
- dev_err(priv->dev,
- "Failed to register pdata mappings (%d)\n",
- ret);
- return ret;
- }
+ if (ret)
+ return dev_err_probe(priv->dev, ret,
+ "Failed to register pdata mappings\n");
}
ret = pinctrl_enable(priv->pctl);
diff --git a/drivers/pinctrl/core.c b/drivers/pinctrl/core.c
index 314ab93d7691..4061890a1748 100644
--- a/drivers/pinctrl/core.c
+++ b/drivers/pinctrl/core.c
@@ -1971,7 +1971,7 @@ static void pinctrl_remove_device_debugfs(struct pinctrl_dev *pctldev)
static void pinctrl_init_debugfs(void)
{
debugfs_root = debugfs_create_dir("pinctrl", NULL);
- if (IS_ERR(debugfs_root) || !debugfs_root) {
+ if (IS_ERR(debugfs_root)) {
pr_warn("failed to create debugfs directory\n");
debugfs_root = NULL;
return;
diff --git a/drivers/pinctrl/freescale/pinctrl-imx-scmi.c b/drivers/pinctrl/freescale/pinctrl-imx-scmi.c
index 2991047535bc..8f15c4c4dc44 100644
--- a/drivers/pinctrl/freescale/pinctrl-imx-scmi.c
+++ b/drivers/pinctrl/freescale/pinctrl-imx-scmi.c
@@ -130,7 +130,7 @@ static int pinctrl_scmi_imx_dt_node_to_map(struct pinctrl_dev *pctldev,
cfg[j++] = pinconf_to_config_packed(IMX_SCMI_PIN_DAISY_CFG, input_val);
}
- configs = kmemdup(cfg, ncfg * sizeof(unsigned long), GFP_KERNEL);
+ configs = kmemdup_array(cfg, ncfg, sizeof(unsigned long), GFP_KERNEL);
new_map[i].type = PIN_MAP_TYPE_CONFIGS_PIN;
new_map[i].data.configs.group_or_pin = pin_get_name(pctldev, pin_id);
diff --git a/drivers/pinctrl/freescale/pinctrl-imx.c b/drivers/pinctrl/freescale/pinctrl-imx.c
index 9c2680df082c..d05c2c478e79 100644
--- a/drivers/pinctrl/freescale/pinctrl-imx.c
+++ b/drivers/pinctrl/freescale/pinctrl-imx.c
@@ -804,14 +804,14 @@ int imx_pinctrl_probe(struct platform_device *pdev,
}
EXPORT_SYMBOL_GPL(imx_pinctrl_probe);
-static int __maybe_unused imx_pinctrl_suspend(struct device *dev)
+static int imx_pinctrl_suspend(struct device *dev)
{
struct imx_pinctrl *ipctl = dev_get_drvdata(dev);
return pinctrl_force_sleep(ipctl->pctl);
}
-static int __maybe_unused imx_pinctrl_resume(struct device *dev)
+static int imx_pinctrl_resume(struct device *dev)
{
struct imx_pinctrl *ipctl = dev_get_drvdata(dev);
@@ -819,8 +819,7 @@ static int __maybe_unused imx_pinctrl_resume(struct device *dev)
}
const struct dev_pm_ops imx_pinctrl_pm_ops = {
- SET_LATE_SYSTEM_SLEEP_PM_OPS(imx_pinctrl_suspend,
- imx_pinctrl_resume)
+ LATE_SYSTEM_SLEEP_PM_OPS(imx_pinctrl_suspend, imx_pinctrl_resume)
};
EXPORT_SYMBOL_GPL(imx_pinctrl_pm_ops);
diff --git a/drivers/pinctrl/freescale/pinctrl-imx8mq.c b/drivers/pinctrl/freescale/pinctrl-imx8mq.c
index 529eebe46298..e59e4fc80193 100644
--- a/drivers/pinctrl/freescale/pinctrl-imx8mq.c
+++ b/drivers/pinctrl/freescale/pinctrl-imx8mq.c
@@ -341,7 +341,7 @@ static struct platform_driver imx8mq_pinctrl_driver = {
.driver = {
.name = "imx8mq-pinctrl",
.of_match_table = imx8mq_pinctrl_of_match,
- .pm = &imx_pinctrl_pm_ops,
+ .pm = pm_sleep_ptr(&imx_pinctrl_pm_ops),
.suppress_bind_attrs = true,
},
.probe = imx8mq_pinctrl_probe,
diff --git a/drivers/pinctrl/intel/pinctrl-baytrail.c b/drivers/pinctrl/intel/pinctrl-baytrail.c
index 4e87f5b875c0..4533c4d0a9e7 100644
--- a/drivers/pinctrl/intel/pinctrl-baytrail.c
+++ b/drivers/pinctrl/intel/pinctrl-baytrail.c
@@ -560,9 +560,10 @@ static DEFINE_RAW_SPINLOCK(byt_lock);
static void __iomem *byt_gpio_reg(struct intel_pinctrl *vg, unsigned int offset,
int reg)
{
- struct intel_community *comm = intel_get_community(vg, offset);
+ const struct intel_community *comm;
u32 reg_offset;
+ comm = intel_get_community(vg, offset);
if (!comm)
return NULL;
@@ -1541,10 +1542,8 @@ static int byt_gpio_probe(struct intel_pinctrl *vg)
}
ret = devm_gpiochip_add_data(vg->dev, gc, vg);
- if (ret) {
+ if (ret)
dev_err(vg->dev, "failed adding byt-gpio chip\n");
- return ret;
- }
return ret;
}
diff --git a/drivers/pinctrl/intel/pinctrl-intel.c b/drivers/pinctrl/intel/pinctrl-intel.c
index 89bd7ce6711a..928607a21d36 100644
--- a/drivers/pinctrl/intel/pinctrl-intel.c
+++ b/drivers/pinctrl/intel/pinctrl-intel.c
@@ -70,6 +70,12 @@
#define PADCFG0_PMODE_SHIFT 10
#define PADCFG0_PMODE_MASK GENMASK(13, 10)
#define PADCFG0_PMODE_GPIO 0
+#define PADCFG0_GPIODIS_SHIFT 8
+#define PADCFG0_GPIODIS_MASK GENMASK(9, 8)
+#define PADCFG0_GPIODIS_NONE 0
+#define PADCFG0_GPIODIS_OUTPUT 1
+#define PADCFG0_GPIODIS_INPUT 2
+#define PADCFG0_GPIODIS_FULL 3
#define PADCFG0_GPIORXDIS BIT(9)
#define PADCFG0_GPIOTXDIS BIT(8)
#define PADCFG0_GPIORXSTATE BIT(1)
@@ -108,13 +114,30 @@ struct intel_community_context {
#define pin_to_padno(c, p) ((p) - (c)->pin_base)
#define padgroup_offset(g, p) ((p) - (g)->base)
-struct intel_community *intel_get_community(struct intel_pinctrl *pctrl, unsigned int pin)
+#define for_each_intel_pin_community(pctrl, community) \
+ for (unsigned int __ci = 0; \
+ __ci < pctrl->ncommunities && (community = &pctrl->communities[__ci]); \
+ __ci++) \
+
+#define for_each_intel_community_pad_group(community, grp) \
+ for (unsigned int __gi = 0; \
+ __gi < community->ngpps && (grp = &community->gpps[__gi]); \
+ __gi++) \
+
+#define for_each_intel_pad_group(pctrl, community, grp) \
+ for_each_intel_pin_community(pctrl, community) \
+ for_each_intel_community_pad_group(community, grp)
+
+#define for_each_intel_gpio_group(pctrl, community, grp) \
+ for_each_intel_pad_group(pctrl, community, grp) \
+ if (grp->gpio_base == INTEL_GPIO_BASE_NOMAP) {} else
+
+const struct intel_community *intel_get_community(const struct intel_pinctrl *pctrl,
+ unsigned int pin)
{
- struct intel_community *community;
- int i;
+ const struct intel_community *community;
- for (i = 0; i < pctrl->ncommunities; i++) {
- community = &pctrl->communities[i];
+ for_each_intel_pin_community(pctrl, community) {
if (pin >= community->pin_base &&
pin < community->pin_base + community->npins)
return community;
@@ -129,11 +152,9 @@ static const struct intel_padgroup *
intel_community_get_padgroup(const struct intel_community *community,
unsigned int pin)
{
- int i;
-
- for (i = 0; i < community->ngpps; i++) {
- const struct intel_padgroup *padgrp = &community->gpps[i];
+ const struct intel_padgroup *padgrp;
+ for_each_intel_community_pad_group(community, padgrp) {
if (pin >= padgrp->base && pin < padgrp->base + padgrp->size)
return padgrp;
}
@@ -161,7 +182,7 @@ static void __iomem *intel_get_padcfg(struct intel_pinctrl *pctrl,
return community->pad_regs + reg + padno * nregs * 4;
}
-static bool intel_pad_owned_by_host(struct intel_pinctrl *pctrl, unsigned int pin)
+static bool intel_pad_owned_by_host(const struct intel_pinctrl *pctrl, unsigned int pin)
{
const struct intel_community *community;
const struct intel_padgroup *padgrp;
@@ -186,7 +207,7 @@ static bool intel_pad_owned_by_host(struct intel_pinctrl *pctrl, unsigned int pi
return !(readl(padown) & PADOWN_MASK(gpp_offset));
}
-static bool intel_pad_acpi_mode(struct intel_pinctrl *pctrl, unsigned int pin)
+static bool intel_pad_acpi_mode(const struct intel_pinctrl *pctrl, unsigned int pin)
{
const struct intel_community *community;
const struct intel_padgroup *padgrp;
@@ -212,7 +233,6 @@ static bool intel_pad_acpi_mode(struct intel_pinctrl *pctrl, unsigned int pin)
/**
* enum - Locking variants of the pad configuration
- *
* @PAD_UNLOCKED: pad is fully controlled by the configuration registers
* @PAD_LOCKED: pad configuration registers, except TX state, are locked
* @PAD_LOCKED_TX: pad configuration TX state is locked
@@ -229,9 +249,9 @@ enum {
PAD_LOCKED_FULL = PAD_LOCKED | PAD_LOCKED_TX,
};
-static int intel_pad_locked(struct intel_pinctrl *pctrl, unsigned int pin)
+static int intel_pad_locked(const struct intel_pinctrl *pctrl, unsigned int pin)
{
- struct intel_community *community;
+ const struct intel_community *community;
const struct intel_padgroup *padgrp;
unsigned int offset, gpp_offset;
u32 value;
@@ -267,19 +287,19 @@ static int intel_pad_locked(struct intel_pinctrl *pctrl, unsigned int pin)
return ret;
}
-static bool intel_pad_is_unlocked(struct intel_pinctrl *pctrl, unsigned int pin)
+static bool intel_pad_is_unlocked(const struct intel_pinctrl *pctrl, unsigned int pin)
{
return (intel_pad_locked(pctrl, pin) & PAD_LOCKED) == PAD_UNLOCKED;
}
-static bool intel_pad_usable(struct intel_pinctrl *pctrl, unsigned int pin)
+static bool intel_pad_usable(const struct intel_pinctrl *pctrl, unsigned int pin)
{
return intel_pad_owned_by_host(pctrl, pin) && intel_pad_is_unlocked(pctrl, pin);
}
int intel_get_groups_count(struct pinctrl_dev *pctldev)
{
- struct intel_pinctrl *pctrl = pinctrl_dev_get_drvdata(pctldev);
+ const struct intel_pinctrl *pctrl = pinctrl_dev_get_drvdata(pctldev);
return pctrl->soc->ngroups;
}
@@ -287,7 +307,7 @@ EXPORT_SYMBOL_NS_GPL(intel_get_groups_count, PINCTRL_INTEL);
const char *intel_get_group_name(struct pinctrl_dev *pctldev, unsigned int group)
{
- struct intel_pinctrl *pctrl = pinctrl_dev_get_drvdata(pctldev);
+ const struct intel_pinctrl *pctrl = pinctrl_dev_get_drvdata(pctldev);
return pctrl->soc->groups[group].grp.name;
}
@@ -296,7 +316,7 @@ EXPORT_SYMBOL_NS_GPL(intel_get_group_name, PINCTRL_INTEL);
int intel_get_group_pins(struct pinctrl_dev *pctldev, unsigned int group,
const unsigned int **pins, unsigned int *npins)
{
- struct intel_pinctrl *pctrl = pinctrl_dev_get_drvdata(pctldev);
+ const struct intel_pinctrl *pctrl = pinctrl_dev_get_drvdata(pctldev);
*pins = pctrl->soc->groups[group].grp.pins;
*npins = pctrl->soc->groups[group].grp.npins;
@@ -364,7 +384,7 @@ static const struct pinctrl_ops intel_pinctrl_ops = {
int intel_get_functions_count(struct pinctrl_dev *pctldev)
{
- struct intel_pinctrl *pctrl = pinctrl_dev_get_drvdata(pctldev);
+ const struct intel_pinctrl *pctrl = pinctrl_dev_get_drvdata(pctldev);
return pctrl->soc->nfunctions;
}
@@ -372,7 +392,7 @@ EXPORT_SYMBOL_NS_GPL(intel_get_functions_count, PINCTRL_INTEL);
const char *intel_get_function_name(struct pinctrl_dev *pctldev, unsigned int function)
{
- struct intel_pinctrl *pctrl = pinctrl_dev_get_drvdata(pctldev);
+ const struct intel_pinctrl *pctrl = pinctrl_dev_get_drvdata(pctldev);
return pctrl->soc->functions[function].func.name;
}
@@ -381,7 +401,7 @@ EXPORT_SYMBOL_NS_GPL(intel_get_function_name, PINCTRL_INTEL);
int intel_get_function_groups(struct pinctrl_dev *pctldev, unsigned int function,
const char * const **groups, unsigned int * const ngroups)
{
- struct intel_pinctrl *pctrl = pinctrl_dev_get_drvdata(pctldev);
+ const struct intel_pinctrl *pctrl = pinctrl_dev_get_drvdata(pctldev);
*groups = pctrl->soc->functions[function].func.groups;
*ngroups = pctrl->soc->functions[function].func.ngroups;
@@ -429,19 +449,49 @@ static int intel_pinmux_set_mux(struct pinctrl_dev *pctldev,
return 0;
}
-static void __intel_gpio_set_direction(void __iomem *padcfg0, bool input)
-{
- u32 value;
+/**
+ * enum - Possible pad physical connections
+ * @PAD_CONNECT_NONE: pad is fully disconnected
+ * @PAD_CONNECT_INPUT: pad is in input only mode
+ * @PAD_CONNECT_OUTPUT: pad is in output only mode
+ * @PAD_CONNECT_FULL: pad is fully connected
+ */
+enum {
+ PAD_CONNECT_NONE = 0,
+ PAD_CONNECT_INPUT = 1,
+ PAD_CONNECT_OUTPUT = 2,
+ PAD_CONNECT_FULL = PAD_CONNECT_INPUT | PAD_CONNECT_OUTPUT,
+};
- value = readl(padcfg0);
- if (input) {
+static int __intel_gpio_get_direction(u32 value)
+{
+ switch ((value & PADCFG0_GPIODIS_MASK) >> PADCFG0_GPIODIS_SHIFT) {
+ case PADCFG0_GPIODIS_FULL:
+ return PAD_CONNECT_NONE;
+ case PADCFG0_GPIODIS_OUTPUT:
+ return PAD_CONNECT_INPUT;
+ case PADCFG0_GPIODIS_INPUT:
+ return PAD_CONNECT_OUTPUT;
+ case PADCFG0_GPIODIS_NONE:
+ return PAD_CONNECT_FULL;
+ default:
+ return -ENOTSUPP;
+ };
+}
+
+static u32 __intel_gpio_set_direction(u32 value, bool input, bool output)
+{
+ if (input)
value &= ~PADCFG0_GPIORXDIS;
- value |= PADCFG0_GPIOTXDIS;
- } else {
- value &= ~PADCFG0_GPIOTXDIS;
+ else
value |= PADCFG0_GPIORXDIS;
- }
- writel(value, padcfg0);
+
+ if (output)
+ value &= ~PADCFG0_GPIOTXDIS;
+ else
+ value |= PADCFG0_GPIOTXDIS;
+
+ return value;
}
static int __intel_gpio_get_gpio_mode(u32 value)
@@ -465,8 +515,7 @@ static void intel_gpio_set_gpio_mode(void __iomem *padcfg0)
value |= PADCFG0_PMODE_GPIO;
/* Disable TX buffer and enable RX (this will be input) */
- value &= ~PADCFG0_GPIORXDIS;
- value |= PADCFG0_GPIOTXDIS;
+ value = __intel_gpio_set_direction(value, true, false);
/* Disable SCI/SMI/NMI generation */
value &= ~(PADCFG0_GPIROUTIOXAPIC | PADCFG0_GPIROUTSCI);
@@ -512,12 +561,18 @@ static int intel_gpio_set_direction(struct pinctrl_dev *pctldev,
{
struct intel_pinctrl *pctrl = pinctrl_dev_get_drvdata(pctldev);
void __iomem *padcfg0;
+ u32 value;
padcfg0 = intel_get_padcfg(pctrl, pin, PADCFG0);
guard(raw_spinlock_irqsave)(&pctrl->lock);
- __intel_gpio_set_direction(padcfg0, input);
+ value = readl(padcfg0);
+ if (input)
+ value = __intel_gpio_set_direction(value, true, false);
+ else
+ value = __intel_gpio_set_direction(value, false, true);
+ writel(value, padcfg0);
return 0;
}
@@ -612,6 +667,23 @@ static int intel_config_get_pull(struct intel_pinctrl *pctrl, unsigned int pin,
return 0;
}
+static int intel_config_get_high_impedance(struct intel_pinctrl *pctrl, unsigned int pin,
+ enum pin_config_param param, u32 *arg)
+{
+ void __iomem *padcfg0;
+ u32 value;
+
+ padcfg0 = intel_get_padcfg(pctrl, pin, PADCFG0);
+
+ scoped_guard(raw_spinlock_irqsave, &pctrl->lock)
+ value = readl(padcfg0);
+
+ if (__intel_gpio_get_direction(value) != PAD_CONNECT_NONE)
+ return -EINVAL;
+
+ return 0;
+}
+
static int intel_config_get_debounce(struct intel_pinctrl *pctrl, unsigned int pin,
enum pin_config_param param, u32 *arg)
{
@@ -655,6 +727,12 @@ static int intel_config_get(struct pinctrl_dev *pctldev, unsigned int pin,
return ret;
break;
+ case PIN_CONFIG_BIAS_HIGH_IMPEDANCE:
+ ret = intel_config_get_high_impedance(pctrl, pin, param, &arg);
+ if (ret)
+ return ret;
+ break;
+
case PIN_CONFIG_INPUT_DEBOUNCE:
ret = intel_config_get_debounce(pctrl, pin, param, &arg);
if (ret)
@@ -753,11 +831,34 @@ static int intel_config_set_pull(struct intel_pinctrl *pctrl, unsigned int pin,
return 0;
}
+static void intel_gpio_set_high_impedance(struct intel_pinctrl *pctrl, unsigned int pin)
+{
+ void __iomem *padcfg0;
+ u32 value;
+
+ padcfg0 = intel_get_padcfg(pctrl, pin, PADCFG0);
+
+ guard(raw_spinlock_irqsave)(&pctrl->lock);
+
+ value = readl(padcfg0);
+ value = __intel_gpio_set_direction(value, false, false);
+ writel(value, padcfg0);
+}
+
static int intel_config_set_debounce(struct intel_pinctrl *pctrl,
unsigned int pin, unsigned int debounce)
{
void __iomem *padcfg0, *padcfg2;
u32 value0, value2;
+ unsigned long v;
+
+ if (debounce) {
+ v = order_base_2(debounce * NSEC_PER_USEC / DEBOUNCE_PERIOD_NSEC);
+ if (v < 3 || v > 15)
+ return -EINVAL;
+ } else {
+ v = 0;
+ }
padcfg2 = intel_get_padcfg(pctrl, pin, PADCFG2);
if (!padcfg2)
@@ -770,21 +871,15 @@ static int intel_config_set_debounce(struct intel_pinctrl *pctrl,
value0 = readl(padcfg0);
value2 = readl(padcfg2);
- /* Disable glitch filter and debouncer */
- value0 &= ~PADCFG0_PREGFRXSEL;
- value2 &= ~(PADCFG2_DEBEN | PADCFG2_DEBOUNCE_MASK);
-
- if (debounce) {
- unsigned long v;
-
- v = order_base_2(debounce * NSEC_PER_USEC / DEBOUNCE_PERIOD_NSEC);
- if (v < 3 || v > 15)
- return -EINVAL;
-
+ value2 = (value2 & ~PADCFG2_DEBOUNCE_MASK) | (v << PADCFG2_DEBOUNCE_SHIFT);
+ if (v) {
/* Enable glitch filter and debouncer */
value0 |= PADCFG0_PREGFRXSEL;
- value2 |= v << PADCFG2_DEBOUNCE_SHIFT;
value2 |= PADCFG2_DEBEN;
+ } else {
+ /* Disable glitch filter and debouncer */
+ value0 &= ~PADCFG0_PREGFRXSEL;
+ value2 &= ~PADCFG2_DEBEN;
}
writel(value0, padcfg0);
@@ -812,6 +907,10 @@ static int intel_config_set(struct pinctrl_dev *pctldev, unsigned int pin,
return ret;
break;
+ case PIN_CONFIG_BIAS_HIGH_IMPEDANCE:
+ intel_gpio_set_high_impedance(pctrl, pin);
+ break;
+
case PIN_CONFIG_INPUT_DEBOUNCE:
ret = intel_config_set_debounce(pctrl, pin,
pinconf_to_config_argument(configs[i]));
@@ -854,34 +953,21 @@ static const struct pinctrl_desc intel_pinctrl_desc = {
* Return: a pin number and pointers to the community and pad group, which
* the pin belongs to, or negative error code if translation can't be done.
*/
-static int intel_gpio_to_pin(struct intel_pinctrl *pctrl, unsigned int offset,
+static int intel_gpio_to_pin(const struct intel_pinctrl *pctrl, unsigned int offset,
const struct intel_community **community,
const struct intel_padgroup **padgrp)
{
- int i;
-
- for (i = 0; i < pctrl->ncommunities; i++) {
- const struct intel_community *comm = &pctrl->communities[i];
- int j;
-
- for (j = 0; j < comm->ngpps; j++) {
- const struct intel_padgroup *pgrp = &comm->gpps[j];
+ const struct intel_community *comm;
+ const struct intel_padgroup *grp;
- if (pgrp->gpio_base == INTEL_GPIO_BASE_NOMAP)
- continue;
+ for_each_intel_gpio_group(pctrl, comm, grp) {
+ if (offset >= grp->gpio_base && offset < grp->gpio_base + grp->size) {
+ if (community)
+ *community = comm;
+ if (padgrp)
+ *padgrp = grp;
- if (offset >= pgrp->gpio_base &&
- offset < pgrp->gpio_base + pgrp->size) {
- int pin;
-
- pin = pgrp->base + offset - pgrp->gpio_base;
- if (community)
- *community = comm;
- if (padgrp)
- *padgrp = pgrp;
-
- return pin;
- }
+ return grp->base + offset - grp->gpio_base;
}
}
@@ -897,7 +983,7 @@ static int intel_gpio_to_pin(struct intel_pinctrl *pctrl, unsigned int offset,
*
* Return: a GPIO offset, or negative error code if translation can't be done.
*/
-static int intel_pin_to_gpio(struct intel_pinctrl *pctrl, int pin)
+static int intel_pin_to_gpio(const struct intel_pinctrl *pctrl, int pin)
{
const struct intel_community *community;
const struct intel_padgroup *padgrp;
@@ -929,7 +1015,7 @@ static int intel_gpio_get(struct gpio_chip *chip, unsigned int offset)
return -EINVAL;
padcfg0 = readl(reg);
- if (!(padcfg0 & PADCFG0_GPIOTXDIS))
+ if (__intel_gpio_get_direction(padcfg0) & PAD_CONNECT_OUTPUT)
return !!(padcfg0 & PADCFG0_GPIOTXSTATE);
return !!(padcfg0 & PADCFG0_GPIORXSTATE);
@@ -982,10 +1068,10 @@ static int intel_gpio_get_direction(struct gpio_chip *chip, unsigned int offset)
if (padcfg0 & PADCFG0_PMODE_MASK)
return -EINVAL;
- if (padcfg0 & PADCFG0_GPIOTXDIS)
- return GPIO_LINE_DIRECTION_IN;
+ if (__intel_gpio_get_direction(padcfg0) & PAD_CONNECT_OUTPUT)
+ return GPIO_LINE_DIRECTION_OUT;
- return GPIO_LINE_DIRECTION_OUT;
+ return GPIO_LINE_DIRECTION_IN;
}
static int intel_gpio_direction_input(struct gpio_chip *chip, unsigned int offset)
@@ -1171,15 +1257,16 @@ static const struct irq_chip intel_gpio_irq_chip = {
GPIOCHIP_IRQ_RESOURCE_HELPERS,
};
-static int intel_gpio_community_irq_handler(struct intel_pinctrl *pctrl,
- const struct intel_community *community)
+static irqreturn_t intel_gpio_irq(int irq, void *data)
{
- struct gpio_chip *gc = &pctrl->chip;
- unsigned int gpp;
+ const struct intel_community *community;
+ const struct intel_padgroup *padgrp;
+ struct intel_pinctrl *pctrl = data;
int ret = 0;
- for (gpp = 0; gpp < community->ngpps; gpp++) {
- const struct intel_padgroup *padgrp = &community->gpps[gpp];
+ /* Need to check all communities for pending interrupts */
+ for_each_intel_pad_group(pctrl, community, padgrp) {
+ struct gpio_chip *gc = &pctrl->chip;
unsigned long pending, enabled;
unsigned int gpp, gpp_offset;
void __iomem *reg, *is;
@@ -1203,36 +1290,17 @@ static int intel_gpio_community_irq_handler(struct intel_pinctrl *pctrl,
ret += pending ? 1 : 0;
}
- return ret;
-}
-
-static irqreturn_t intel_gpio_irq(int irq, void *data)
-{
- const struct intel_community *community;
- struct intel_pinctrl *pctrl = data;
- unsigned int i;
- int ret = 0;
-
- /* Need to check all communities for pending interrupts */
- for (i = 0; i < pctrl->ncommunities; i++) {
- community = &pctrl->communities[i];
- ret += intel_gpio_community_irq_handler(pctrl, community);
- }
-
return IRQ_RETVAL(ret);
}
static void intel_gpio_irq_init(struct intel_pinctrl *pctrl)
{
- int i;
+ const struct intel_community *community;
- for (i = 0; i < pctrl->ncommunities; i++) {
- const struct intel_community *community;
+ for_each_intel_pin_community(pctrl, community) {
void __iomem *reg, *is;
unsigned int gpp;
- community = &pctrl->communities[i];
-
for (gpp = 0; gpp < community->ngpps; gpp++) {
reg = community->regs + community->ie_offset + gpp * 4;
is = community->regs + community->is_offset + gpp * 4;
@@ -1257,36 +1325,17 @@ static int intel_gpio_irq_init_hw(struct gpio_chip *gc)
return 0;
}
-static int intel_gpio_add_community_ranges(struct intel_pinctrl *pctrl,
- const struct intel_community *community)
-{
- int ret = 0, i;
-
- for (i = 0; i < community->ngpps; i++) {
- const struct intel_padgroup *gpp = &community->gpps[i];
-
- if (gpp->gpio_base == INTEL_GPIO_BASE_NOMAP)
- continue;
-
- ret = gpiochip_add_pin_range(&pctrl->chip, dev_name(pctrl->dev),
- gpp->gpio_base, gpp->base,
- gpp->size);
- if (ret)
- return ret;
- }
-
- return ret;
-}
-
static int intel_gpio_add_pin_ranges(struct gpio_chip *gc)
{
struct intel_pinctrl *pctrl = gpiochip_get_data(gc);
- int ret, i;
-
- for (i = 0; i < pctrl->ncommunities; i++) {
- struct intel_community *community = &pctrl->communities[i];
+ const struct intel_community *community;
+ const struct intel_padgroup *grp;
+ int ret;
- ret = intel_gpio_add_community_ranges(pctrl, community);
+ for_each_intel_gpio_group(pctrl, community, grp) {
+ ret = gpiochip_add_pin_range(&pctrl->chip, dev_name(pctrl->dev),
+ grp->gpio_base, grp->base,
+ grp->size);
if (ret) {
dev_err(pctrl->dev, "failed to add GPIO pin range\n");
return ret;
@@ -1299,20 +1348,12 @@ static int intel_gpio_add_pin_ranges(struct gpio_chip *gc)
static unsigned int intel_gpio_ngpio(const struct intel_pinctrl *pctrl)
{
const struct intel_community *community;
+ const struct intel_padgroup *grp;
unsigned int ngpio = 0;
- int i, j;
- for (i = 0; i < pctrl->ncommunities; i++) {
- community = &pctrl->communities[i];
- for (j = 0; j < community->ngpps; j++) {
- const struct intel_padgroup *gpp = &community->gpps[j];
-
- if (gpp->gpio_base == INTEL_GPIO_BASE_NOMAP)
- continue;
-
- if (gpp->gpio_base + gpp->size > ngpio)
- ngpio = gpp->gpio_base + gpp->size;
- }
+ for_each_intel_gpio_group(pctrl, community, grp) {
+ if (grp->gpio_base + grp->size > ngpio)
+ ngpio = grp->gpio_base + grp->size;
}
return ngpio;
@@ -1682,7 +1723,8 @@ EXPORT_SYMBOL_NS_GPL(intel_pinctrl_get_soc_data, PINCTRL_INTEL);
static bool __intel_gpio_is_direct_irq(u32 value)
{
- return (value & PADCFG0_GPIROUTIOXAPIC) && (value & PADCFG0_GPIOTXDIS) &&
+ return (value & PADCFG0_GPIROUTIOXAPIC) &&
+ (__intel_gpio_get_direction(value) == PAD_CONNECT_INPUT) &&
(__intel_gpio_get_gpio_mode(value) == PADCFG0_PMODE_GPIO);
}
diff --git a/drivers/pinctrl/intel/pinctrl-intel.h b/drivers/pinctrl/intel/pinctrl-intel.h
index 6981e2fab93f..4d4e1257afdf 100644
--- a/drivers/pinctrl/intel/pinctrl-intel.h
+++ b/drivers/pinctrl/intel/pinctrl-intel.h
@@ -264,7 +264,8 @@ int intel_pinctrl_probe_by_uid(struct platform_device *pdev);
extern const struct dev_pm_ops intel_pinctrl_pm_ops;
-struct intel_community *intel_get_community(struct intel_pinctrl *pctrl, unsigned int pin);
+const struct intel_community *intel_get_community(const struct intel_pinctrl *pctrl,
+ unsigned int pin);
int intel_get_groups_count(struct pinctrl_dev *pctldev);
const char *intel_get_group_name(struct pinctrl_dev *pctldev, unsigned int group);
diff --git a/drivers/pinctrl/intel/pinctrl-lynxpoint.c b/drivers/pinctrl/intel/pinctrl-lynxpoint.c
index 1fb0bba8b386..bcce97f3b897 100644
--- a/drivers/pinctrl/intel/pinctrl-lynxpoint.c
+++ b/drivers/pinctrl/intel/pinctrl-lynxpoint.c
@@ -211,7 +211,7 @@ static void __iomem *lp_gpio_reg(struct gpio_chip *chip, unsigned int offset,
int reg)
{
struct intel_pinctrl *lg = gpiochip_get_data(chip);
- struct intel_community *comm;
+ const struct intel_community *comm;
int reg_offset;
comm = intel_get_community(lg, offset);
diff --git a/drivers/pinctrl/mediatek/pinctrl-paris.c b/drivers/pinctrl/mediatek/pinctrl-paris.c
index e12316c42698..87e958d827bf 100644
--- a/drivers/pinctrl/mediatek/pinctrl-paris.c
+++ b/drivers/pinctrl/mediatek/pinctrl-paris.c
@@ -1044,11 +1044,8 @@ int mtk_paris_pinctrl_probe(struct platform_device *pdev)
hw->nbase = hw->soc->nbase_names;
- if (of_find_property(hw->dev->of_node,
- "mediatek,rsel-resistance-in-si-unit", NULL))
- hw->rsel_si_unit = true;
- else
- hw->rsel_si_unit = false;
+ hw->rsel_si_unit = of_property_read_bool(hw->dev->of_node,
+ "mediatek,rsel-resistance-in-si-unit");
spin_lock_init(&hw->lock);
diff --git a/drivers/pinctrl/meson/pinctrl-amlogic-c3.c b/drivers/pinctrl/meson/pinctrl-amlogic-c3.c
index 04f1e87bae99..776d32465ab9 100644
--- a/drivers/pinctrl/meson/pinctrl-amlogic-c3.c
+++ b/drivers/pinctrl/meson/pinctrl-amlogic-c3.c
@@ -375,7 +375,7 @@ static const unsigned int spi_a_mosi_a_pins[] = { GPIOA_3 };
static const unsigned int gen_clk_a4_pins[] = { GPIOA_4 };
static const unsigned int clk12_24_a_pins[] = { GPIOA_5 };
-static struct meson_pmx_group c3_periphs_groups[] = {
+static const struct meson_pmx_group c3_periphs_groups[] = {
GPIO_GROUP(GPIOE_0),
GPIO_GROUP(GPIOE_1),
GPIO_GROUP(GPIOE_2),
@@ -987,7 +987,7 @@ static const char * const lcd_groups[] = {
"lcd_clk_a", "lcd_clk_x", "lcd_hs", "lcd_vs",
};
-static struct meson_pmx_func c3_periphs_functions[] = {
+static const struct meson_pmx_func c3_periphs_functions[] = {
FUNCTION(gpio_periphs),
FUNCTION(uart_a),
FUNCTION(uart_b),
@@ -1036,7 +1036,7 @@ static struct meson_pmx_func c3_periphs_functions[] = {
FUNCTION(lcd),
};
-static struct meson_bank c3_periphs_banks[] = {
+static const struct meson_bank c3_periphs_banks[] = {
/* name first last irq pullen pull dir out in ds */
BANK_DS("X", GPIOX_0, GPIOX_13, 40, 53,
0x03, 0, 0x04, 0, 0x02, 0, 0x01, 0, 0x00, 0, 0x07, 0),
@@ -1054,7 +1054,7 @@ static struct meson_bank c3_periphs_banks[] = {
0x73, 0, 0x74, 0, 0x72, 0, 0x71, 0, 0x70, 0, 0x77, 0),
};
-static struct meson_pmx_bank c3_periphs_pmx_banks[] = {
+static const struct meson_pmx_bank c3_periphs_pmx_banks[] = {
/* name first last reg offset */
BANK_PMX("B", GPIOB_0, GPIOB_14, 0x00, 0),
BANK_PMX("X", GPIOX_0, GPIOX_13, 0x03, 0),
@@ -1065,12 +1065,12 @@ static struct meson_pmx_bank c3_periphs_pmx_banks[] = {
BANK_PMX("TEST_N", GPIO_TEST_N, GPIO_TEST_N, 0x02, 0),
};
-static struct meson_axg_pmx_data c3_periphs_pmx_banks_data = {
+static const struct meson_axg_pmx_data c3_periphs_pmx_banks_data = {
.pmx_banks = c3_periphs_pmx_banks,
.num_pmx_banks = ARRAY_SIZE(c3_periphs_pmx_banks),
};
-static struct meson_pinctrl_data c3_periphs_pinctrl_data = {
+static const struct meson_pinctrl_data c3_periphs_pinctrl_data = {
.name = "periphs-banks",
.pins = c3_periphs_pins,
.groups = c3_periphs_groups,
diff --git a/drivers/pinctrl/meson/pinctrl-amlogic-t7.c b/drivers/pinctrl/meson/pinctrl-amlogic-t7.c
index 0aed5de3f068..cfd98b9dcb68 100644
--- a/drivers/pinctrl/meson/pinctrl-amlogic-t7.c
+++ b/drivers/pinctrl/meson/pinctrl-amlogic-t7.c
@@ -535,7 +535,7 @@ static const unsigned int i2c0_sck_h_pins[] = { GPIOH_7 };
/* Bank H func3 */
static const unsigned int pcieck_reqn_h_pins[] = { GPIOH_2 };
-static struct meson_pmx_group t7_periphs_groups[] = {
+static const struct meson_pmx_group t7_periphs_groups[] = {
GPIO_GROUP(GPIOB_0),
GPIO_GROUP(GPIOB_1),
GPIO_GROUP(GPIOB_2),
@@ -1443,7 +1443,7 @@ static const char * const mic_mute_groups[] = {
"mic_mute_key", "mic_mute_led",
};
-static struct meson_pmx_func t7_periphs_functions[] = {
+static const struct meson_pmx_func t7_periphs_functions[] = {
FUNCTION(gpio_periphs),
FUNCTION(emmc),
FUNCTION(nor),
@@ -1524,7 +1524,7 @@ static struct meson_pmx_func t7_periphs_functions[] = {
FUNCTION(mic_mute),
};
-static struct meson_bank t7_periphs_banks[] = {
+static const struct meson_bank t7_periphs_banks[] = {
/* name first last irq pullen pull dir out in ds */
BANK_DS("D", GPIOD_0, GPIOD_12, 57, 69,
0x03, 0, 0x04, 0, 0x02, 0, 0x01, 0, 0x00, 0, 0x07, 0),
@@ -1552,7 +1552,7 @@ static struct meson_bank t7_periphs_banks[] = {
0x83, 0, 0x84, 0, 0x82, 0, 0x81, 0, 0x80, 0, 0x87, 0),
};
-static struct meson_pmx_bank t7_periphs_pmx_banks[] = {
+static const struct meson_pmx_bank t7_periphs_pmx_banks[] = {
/* name first last reg offset */
BANK_PMX("D", GPIOD_0, GPIOD_12, 0x0a, 0),
BANK_PMX("E", GPIOE_0, GPIOE_6, 0x0c, 0),
@@ -1568,12 +1568,12 @@ static struct meson_pmx_bank t7_periphs_pmx_banks[] = {
BANK_PMX("TEST_N", GPIO_TEST_N, GPIO_TEST_N, 0x09, 0),
};
-static struct meson_axg_pmx_data t7_periphs_pmx_banks_data = {
+static const struct meson_axg_pmx_data t7_periphs_pmx_banks_data = {
.pmx_banks = t7_periphs_pmx_banks,
.num_pmx_banks = ARRAY_SIZE(t7_periphs_pmx_banks),
};
-static struct meson_pinctrl_data t7_periphs_pinctrl_data = {
+static const struct meson_pinctrl_data t7_periphs_pinctrl_data = {
.name = "periphs-banks",
.pins = t7_periphs_pins,
.groups = t7_periphs_groups,
diff --git a/drivers/pinctrl/meson/pinctrl-meson-a1.c b/drivers/pinctrl/meson/pinctrl-meson-a1.c
index d2ac9ca72a3e..20c4323d4223 100644
--- a/drivers/pinctrl/meson/pinctrl-meson-a1.c
+++ b/drivers/pinctrl/meson/pinctrl-meson-a1.c
@@ -339,7 +339,7 @@ static const unsigned int tst_out11_pins[] = { GPIOA_11 };
static const unsigned int mute_key_pins[] = { GPIOA_4 };
static const unsigned int mute_en_pins[] = { GPIOA_5 };
-static struct meson_pmx_group meson_a1_periphs_groups[] = {
+static const struct meson_pmx_group meson_a1_periphs_groups[] = {
GPIO_GROUP(GPIOP_0),
GPIO_GROUP(GPIOP_1),
GPIO_GROUP(GPIOP_2),
@@ -832,7 +832,7 @@ static const char * const mute_groups[] = {
"mute_key", "mute_en",
};
-static struct meson_pmx_func meson_a1_periphs_functions[] = {
+static const struct meson_pmx_func meson_a1_periphs_functions[] = {
FUNCTION(gpio_periphs),
FUNCTION(psram),
FUNCTION(pwm_a),
@@ -875,7 +875,7 @@ static struct meson_pmx_func meson_a1_periphs_functions[] = {
FUNCTION(mute),
};
-static struct meson_bank meson_a1_periphs_banks[] = {
+static const struct meson_bank meson_a1_periphs_banks[] = {
/* name first last irq pullen pull dir out in ds*/
BANK_DS("P", GPIOP_0, GPIOP_12, 0, 12, 0x3, 0, 0x4, 0,
0x2, 0, 0x1, 0, 0x0, 0, 0x5, 0),
@@ -889,7 +889,7 @@ static struct meson_bank meson_a1_periphs_banks[] = {
0x42, 0, 0x41, 0, 0x40, 0, 0x45, 0),
};
-static struct meson_pmx_bank meson_a1_periphs_pmx_banks[] = {
+static const struct meson_pmx_bank meson_a1_periphs_pmx_banks[] = {
/* name first lask reg offset */
BANK_PMX("P", GPIOP_0, GPIOP_12, 0x0, 0),
BANK_PMX("B", GPIOB_0, GPIOB_6, 0x2, 0),
@@ -898,12 +898,12 @@ static struct meson_pmx_bank meson_a1_periphs_pmx_banks[] = {
BANK_PMX("A", GPIOA_0, GPIOA_11, 0x8, 0),
};
-static struct meson_axg_pmx_data meson_a1_periphs_pmx_banks_data = {
+static const struct meson_axg_pmx_data meson_a1_periphs_pmx_banks_data = {
.pmx_banks = meson_a1_periphs_pmx_banks,
.num_pmx_banks = ARRAY_SIZE(meson_a1_periphs_pmx_banks),
};
-static struct meson_pinctrl_data meson_a1_periphs_pinctrl_data = {
+static const struct meson_pinctrl_data meson_a1_periphs_pinctrl_data = {
.name = "periphs-banks",
.pins = meson_a1_periphs_pins,
.groups = meson_a1_periphs_groups,
diff --git a/drivers/pinctrl/meson/pinctrl-meson-axg-pmx.c b/drivers/pinctrl/meson/pinctrl-meson-axg-pmx.c
index cad411d90727..00c3829216d6 100644
--- a/drivers/pinctrl/meson/pinctrl-meson-axg-pmx.c
+++ b/drivers/pinctrl/meson/pinctrl-meson-axg-pmx.c
@@ -27,10 +27,10 @@
static int meson_axg_pmx_get_bank(struct meson_pinctrl *pc,
unsigned int pin,
- struct meson_pmx_bank **bank)
+ const struct meson_pmx_bank **bank)
{
int i;
- struct meson_axg_pmx_data *pmx = pc->data->pmx_data;
+ const struct meson_axg_pmx_data *pmx = pc->data->pmx_data;
for (i = 0; i < pmx->num_pmx_banks; i++)
if (pin >= pmx->pmx_banks[i].first &&
@@ -42,7 +42,7 @@ static int meson_axg_pmx_get_bank(struct meson_pinctrl *pc,
return -EINVAL;
}
-static int meson_pmx_calc_reg_and_offset(struct meson_pmx_bank *bank,
+static int meson_pmx_calc_reg_and_offset(const struct meson_pmx_bank *bank,
unsigned int pin, unsigned int *reg,
unsigned int *offset)
{
@@ -59,10 +59,10 @@ static int meson_pmx_calc_reg_and_offset(struct meson_pmx_bank *bank,
static int meson_axg_pmx_update_function(struct meson_pinctrl *pc,
unsigned int pin, unsigned int func)
{
+ const struct meson_pmx_bank *bank;
int ret;
int reg;
int offset;
- struct meson_pmx_bank *bank;
ret = meson_axg_pmx_get_bank(pc, pin, &bank);
if (ret)
@@ -82,8 +82,8 @@ static int meson_axg_pmx_set_mux(struct pinctrl_dev *pcdev,
int i;
int ret;
struct meson_pinctrl *pc = pinctrl_dev_get_drvdata(pcdev);
- struct meson_pmx_func *func = &pc->data->funcs[func_num];
- struct meson_pmx_group *group = &pc->data->groups[group_num];
+ const struct meson_pmx_func *func = &pc->data->funcs[func_num];
+ const struct meson_pmx_group *group = &pc->data->groups[group_num];
struct meson_pmx_axg_data *pmx_data =
(struct meson_pmx_axg_data *)group->data;
diff --git a/drivers/pinctrl/meson/pinctrl-meson-axg-pmx.h b/drivers/pinctrl/meson/pinctrl-meson-axg-pmx.h
index 67147ebaef1b..63b9d471e980 100644
--- a/drivers/pinctrl/meson/pinctrl-meson-axg-pmx.h
+++ b/drivers/pinctrl/meson/pinctrl-meson-axg-pmx.h
@@ -17,7 +17,7 @@ struct meson_pmx_bank {
};
struct meson_axg_pmx_data {
- struct meson_pmx_bank *pmx_banks;
+ const struct meson_pmx_bank *pmx_banks;
unsigned int num_pmx_banks;
};
diff --git a/drivers/pinctrl/meson/pinctrl-meson-axg.c b/drivers/pinctrl/meson/pinctrl-meson-axg.c
index 8f4e7154b73f..fa2df4896390 100644
--- a/drivers/pinctrl/meson/pinctrl-meson-axg.c
+++ b/drivers/pinctrl/meson/pinctrl-meson-axg.c
@@ -352,7 +352,7 @@ static const unsigned int tdmb_dout2_pins[] = {GPIOA_12};
static const unsigned int tdmb_din3_pins[] = {GPIOA_13};
static const unsigned int tdmb_dout3_pins[] = {GPIOA_13};
-static struct meson_pmx_group meson_axg_periphs_groups[] = {
+static const struct meson_pmx_group meson_axg_periphs_groups[] = {
GPIO_GROUP(GPIOZ_0),
GPIO_GROUP(GPIOZ_1),
GPIO_GROUP(GPIOZ_2),
@@ -675,7 +675,7 @@ static const unsigned int jtag_ao_tms_pins[] = {GPIOAO_7};
/* gen_clk */
static const unsigned int gen_clk_ee_pins[] = {GPIOAO_13};
-static struct meson_pmx_group meson_axg_aobus_groups[] = {
+static const struct meson_pmx_group meson_axg_aobus_groups[] = {
GPIO_GROUP(GPIOAO_0),
GPIO_GROUP(GPIOAO_1),
GPIO_GROUP(GPIOAO_2),
@@ -955,7 +955,7 @@ static const char * const gen_clk_ee_groups[] = {
"gen_clk_ee",
};
-static struct meson_pmx_func meson_axg_periphs_functions[] = {
+static const struct meson_pmx_func meson_axg_periphs_functions[] = {
FUNCTION(gpio_periphs),
FUNCTION(emmc),
FUNCTION(nor),
@@ -987,7 +987,7 @@ static struct meson_pmx_func meson_axg_periphs_functions[] = {
FUNCTION(tdmc),
};
-static struct meson_pmx_func meson_axg_aobus_functions[] = {
+static const struct meson_pmx_func meson_axg_aobus_functions[] = {
FUNCTION(gpio_aobus),
FUNCTION(uart_ao_a),
FUNCTION(uart_ao_b),
@@ -1003,7 +1003,7 @@ static struct meson_pmx_func meson_axg_aobus_functions[] = {
FUNCTION(gen_clk_ee),
};
-static struct meson_bank meson_axg_periphs_banks[] = {
+static const struct meson_bank meson_axg_periphs_banks[] = {
/* name first last irq pullen pull dir out in */
BANK("Z", GPIOZ_0, GPIOZ_10, 14, 24, 3, 0, 3, 0, 9, 0, 10, 0, 11, 0),
BANK("BOOT", BOOT_0, BOOT_14, 25, 39, 4, 0, 4, 0, 12, 0, 13, 0, 14, 0),
@@ -1012,12 +1012,12 @@ static struct meson_bank meson_axg_periphs_banks[] = {
BANK("Y", GPIOY_0, GPIOY_15, 84, 99, 1, 0, 1, 0, 3, 0, 4, 0, 5, 0),
};
-static struct meson_bank meson_axg_aobus_banks[] = {
+static const struct meson_bank meson_axg_aobus_banks[] = {
/* name first last irq pullen pull dir out in */
BANK("AO", GPIOAO_0, GPIOAO_13, 0, 13, 0, 16, 0, 0, 0, 0, 0, 16, 1, 0),
};
-static struct meson_pmx_bank meson_axg_periphs_pmx_banks[] = {
+static const struct meson_pmx_bank meson_axg_periphs_pmx_banks[] = {
/* name first lask reg offset */
BANK_PMX("Z", GPIOZ_0, GPIOZ_10, 0x2, 0),
BANK_PMX("BOOT", BOOT_0, BOOT_14, 0x0, 0),
@@ -1026,21 +1026,21 @@ static struct meson_pmx_bank meson_axg_periphs_pmx_banks[] = {
BANK_PMX("Y", GPIOY_0, GPIOY_15, 0x8, 0),
};
-static struct meson_axg_pmx_data meson_axg_periphs_pmx_banks_data = {
+static const struct meson_axg_pmx_data meson_axg_periphs_pmx_banks_data = {
.pmx_banks = meson_axg_periphs_pmx_banks,
.num_pmx_banks = ARRAY_SIZE(meson_axg_periphs_pmx_banks),
};
-static struct meson_pmx_bank meson_axg_aobus_pmx_banks[] = {
+static const struct meson_pmx_bank meson_axg_aobus_pmx_banks[] = {
BANK_PMX("AO", GPIOAO_0, GPIOAO_13, 0x0, 0),
};
-static struct meson_axg_pmx_data meson_axg_aobus_pmx_banks_data = {
+static const struct meson_axg_pmx_data meson_axg_aobus_pmx_banks_data = {
.pmx_banks = meson_axg_aobus_pmx_banks,
.num_pmx_banks = ARRAY_SIZE(meson_axg_aobus_pmx_banks),
};
-static struct meson_pinctrl_data meson_axg_periphs_pinctrl_data = {
+static const struct meson_pinctrl_data meson_axg_periphs_pinctrl_data = {
.name = "periphs-banks",
.pins = meson_axg_periphs_pins,
.groups = meson_axg_periphs_groups,
@@ -1054,7 +1054,7 @@ static struct meson_pinctrl_data meson_axg_periphs_pinctrl_data = {
.pmx_data = &meson_axg_periphs_pmx_banks_data,
};
-static struct meson_pinctrl_data meson_axg_aobus_pinctrl_data = {
+static const struct meson_pinctrl_data meson_axg_aobus_pinctrl_data = {
.name = "aobus-banks",
.pins = meson_axg_aobus_pins,
.groups = meson_axg_aobus_groups,
diff --git a/drivers/pinctrl/meson/pinctrl-meson-g12a.c b/drivers/pinctrl/meson/pinctrl-meson-g12a.c
index 32830269a5b4..e2788bfc5874 100644
--- a/drivers/pinctrl/meson/pinctrl-meson-g12a.c
+++ b/drivers/pinctrl/meson/pinctrl-meson-g12a.c
@@ -436,7 +436,7 @@ static const unsigned int tdm_c_dout1_z_pins[] = { GPIOZ_3 };
static const unsigned int tdm_c_dout2_z_pins[] = { GPIOZ_4 };
static const unsigned int tdm_c_dout3_z_pins[] = { GPIOZ_5 };
-static struct meson_pmx_group meson_g12a_periphs_groups[] = {
+static const struct meson_pmx_group meson_g12a_periphs_groups[] = {
GPIO_GROUP(GPIOZ_0),
GPIO_GROUP(GPIOZ_1),
GPIO_GROUP(GPIOZ_2),
@@ -860,7 +860,7 @@ static const unsigned int tdm_ao_b_dout2_pins[] = { GPIOAO_6 };
/* mclk0_ao */
static const unsigned int mclk0_ao_pins[] = { GPIOAO_9 };
-static struct meson_pmx_group meson_g12a_aobus_groups[] = {
+static const struct meson_pmx_group meson_g12a_aobus_groups[] = {
GPIO_GROUP(GPIOAO_0),
GPIO_GROUP(GPIOAO_1),
GPIO_GROUP(GPIOAO_2),
@@ -1253,7 +1253,7 @@ static const char * const mclk0_ao_groups[] = {
"mclk0_ao",
};
-static struct meson_pmx_func meson_g12a_periphs_functions[] = {
+static const struct meson_pmx_func meson_g12a_periphs_functions[] = {
FUNCTION(gpio_periphs),
FUNCTION(emmc),
FUNCTION(nor),
@@ -1295,7 +1295,7 @@ static struct meson_pmx_func meson_g12a_periphs_functions[] = {
FUNCTION(tdm_c),
};
-static struct meson_pmx_func meson_g12a_aobus_functions[] = {
+static const struct meson_pmx_func meson_g12a_aobus_functions[] = {
FUNCTION(gpio_aobus),
FUNCTION(uart_ao_a),
FUNCTION(uart_ao_b),
@@ -1317,7 +1317,7 @@ static struct meson_pmx_func meson_g12a_aobus_functions[] = {
FUNCTION(mclk0_ao),
};
-static struct meson_bank meson_g12a_periphs_banks[] = {
+static const struct meson_bank meson_g12a_periphs_banks[] = {
/* name first last irq pullen pull dir out in ds */
BANK_DS("Z", GPIOZ_0, GPIOZ_15, IRQID_GPIOZ_0, IRQID_GPIOZ_15,
4, 0, 4, 0, 12, 0, 13, 0, 14, 0, 5, 0),
@@ -1333,7 +1333,7 @@ static struct meson_bank meson_g12a_periphs_banks[] = {
2, 0, 2, 0, 6, 0, 7, 0, 8, 0, 2, 0),
};
-static struct meson_bank meson_g12a_aobus_banks[] = {
+static const struct meson_bank meson_g12a_aobus_banks[] = {
/* name first last irq pullen pull dir out in ds */
BANK_DS("AO", GPIOAO_0, GPIOAO_11, IRQID_GPIOAO_0, IRQID_GPIOAO_11,
3, 0, 2, 0, 0, 0, 4, 0, 1, 0, 0, 0),
@@ -1342,7 +1342,7 @@ static struct meson_bank meson_g12a_aobus_banks[] = {
3, 16, 2, 16, 0, 16, 4, 16, 1, 16, 1, 0),
};
-static struct meson_pmx_bank meson_g12a_periphs_pmx_banks[] = {
+static const struct meson_pmx_bank meson_g12a_periphs_pmx_banks[] = {
/* name first last reg offset */
BANK_PMX("Z", GPIOZ_0, GPIOZ_15, 0x6, 0),
BANK_PMX("H", GPIOH_0, GPIOH_8, 0xb, 0),
@@ -1352,17 +1352,17 @@ static struct meson_pmx_bank meson_g12a_periphs_pmx_banks[] = {
BANK_PMX("X", GPIOX_0, GPIOX_19, 0x3, 0),
};
-static struct meson_axg_pmx_data meson_g12a_periphs_pmx_banks_data = {
+static const struct meson_axg_pmx_data meson_g12a_periphs_pmx_banks_data = {
.pmx_banks = meson_g12a_periphs_pmx_banks,
.num_pmx_banks = ARRAY_SIZE(meson_g12a_periphs_pmx_banks),
};
-static struct meson_pmx_bank meson_g12a_aobus_pmx_banks[] = {
+static const struct meson_pmx_bank meson_g12a_aobus_pmx_banks[] = {
BANK_PMX("AO", GPIOAO_0, GPIOAO_11, 0x0, 0),
BANK_PMX("E", GPIOE_0, GPIOE_2, 0x1, 16),
};
-static struct meson_axg_pmx_data meson_g12a_aobus_pmx_banks_data = {
+static const struct meson_axg_pmx_data meson_g12a_aobus_pmx_banks_data = {
.pmx_banks = meson_g12a_aobus_pmx_banks,
.num_pmx_banks = ARRAY_SIZE(meson_g12a_aobus_pmx_banks),
};
@@ -1375,7 +1375,7 @@ static int meson_g12a_aobus_parse_dt_extra(struct meson_pinctrl *pc)
return 0;
}
-static struct meson_pinctrl_data meson_g12a_periphs_pinctrl_data = {
+static const struct meson_pinctrl_data meson_g12a_periphs_pinctrl_data = {
.name = "periphs-banks",
.pins = meson_g12a_periphs_pins,
.groups = meson_g12a_periphs_groups,
@@ -1389,7 +1389,7 @@ static struct meson_pinctrl_data meson_g12a_periphs_pinctrl_data = {
.pmx_data = &meson_g12a_periphs_pmx_banks_data,
};
-static struct meson_pinctrl_data meson_g12a_aobus_pinctrl_data = {
+static const struct meson_pinctrl_data meson_g12a_aobus_pinctrl_data = {
.name = "aobus-banks",
.pins = meson_g12a_aobus_pins,
.groups = meson_g12a_aobus_groups,
diff --git a/drivers/pinctrl/meson/pinctrl-meson-gxbb.c b/drivers/pinctrl/meson/pinctrl-meson-gxbb.c
index 2867f397fec6..4e8b9d7c2e4b 100644
--- a/drivers/pinctrl/meson/pinctrl-meson-gxbb.c
+++ b/drivers/pinctrl/meson/pinctrl-meson-gxbb.c
@@ -307,7 +307,7 @@ static const unsigned int spdif_out_ao_13_pins[] = { GPIOAO_13 };
static const unsigned int ao_cec_pins[] = { GPIOAO_12 };
static const unsigned int ee_cec_pins[] = { GPIOAO_12 };
-static struct meson_pmx_group meson_gxbb_periphs_groups[] = {
+static const struct meson_pmx_group meson_gxbb_periphs_groups[] = {
GPIO_GROUP(GPIOZ_0),
GPIO_GROUP(GPIOZ_1),
GPIO_GROUP(GPIOZ_2),
@@ -541,7 +541,7 @@ static struct meson_pmx_group meson_gxbb_periphs_groups[] = {
GROUP(sdcard_clk, 2, 11),
};
-static struct meson_pmx_group meson_gxbb_aobus_groups[] = {
+static const struct meson_pmx_group meson_gxbb_aobus_groups[] = {
GPIO_GROUP(GPIOAO_0),
GPIO_GROUP(GPIOAO_1),
GPIO_GROUP(GPIOAO_2),
@@ -798,7 +798,7 @@ static const char * const cec_ao_groups[] = {
"ao_cec", "ee_cec",
};
-static struct meson_pmx_func meson_gxbb_periphs_functions[] = {
+static const struct meson_pmx_func meson_gxbb_periphs_functions[] = {
FUNCTION(gpio_periphs),
FUNCTION(emmc),
FUNCTION(nor),
@@ -829,7 +829,7 @@ static struct meson_pmx_func meson_gxbb_periphs_functions[] = {
FUNCTION(tsin_b),
};
-static struct meson_pmx_func meson_gxbb_aobus_functions[] = {
+static const struct meson_pmx_func meson_gxbb_aobus_functions[] = {
FUNCTION(gpio_aobus),
FUNCTION(uart_ao),
FUNCTION(uart_ao_b),
@@ -845,7 +845,7 @@ static struct meson_pmx_func meson_gxbb_aobus_functions[] = {
FUNCTION(cec_ao),
};
-static struct meson_bank meson_gxbb_periphs_banks[] = {
+static const struct meson_bank meson_gxbb_periphs_banks[] = {
/* name first last irq pullen pull dir out in */
BANK("X", GPIOX_0, GPIOX_22, 106, 128, 4, 0, 4, 0, 12, 0, 13, 0, 14, 0),
BANK("Y", GPIOY_0, GPIOY_16, 89, 105, 1, 0, 1, 0, 3, 0, 4, 0, 5, 0),
@@ -857,12 +857,12 @@ static struct meson_bank meson_gxbb_periphs_banks[] = {
BANK("CLK", GPIOCLK_0, GPIOCLK_3, 129, 132, 3, 28, 3, 28, 9, 28, 10, 28, 11, 28),
};
-static struct meson_bank meson_gxbb_aobus_banks[] = {
+static const struct meson_bank meson_gxbb_aobus_banks[] = {
/* name first last irq pullen pull dir out in */
BANK("AO", GPIOAO_0, GPIOAO_13, 0, 13, 0, 16, 0, 0, 0, 0, 0, 16, 1, 0),
};
-static struct meson_pinctrl_data meson_gxbb_periphs_pinctrl_data = {
+static const struct meson_pinctrl_data meson_gxbb_periphs_pinctrl_data = {
.name = "periphs-banks",
.pins = meson_gxbb_periphs_pins,
.groups = meson_gxbb_periphs_groups,
@@ -875,7 +875,7 @@ static struct meson_pinctrl_data meson_gxbb_periphs_pinctrl_data = {
.pmx_ops = &meson8_pmx_ops,
};
-static struct meson_pinctrl_data meson_gxbb_aobus_pinctrl_data = {
+static const struct meson_pinctrl_data meson_gxbb_aobus_pinctrl_data = {
.name = "aobus-banks",
.pins = meson_gxbb_aobus_pins,
.groups = meson_gxbb_aobus_groups,
diff --git a/drivers/pinctrl/meson/pinctrl-meson-gxl.c b/drivers/pinctrl/meson/pinctrl-meson-gxl.c
index a2f25fa02852..9171de657f97 100644
--- a/drivers/pinctrl/meson/pinctrl-meson-gxl.c
+++ b/drivers/pinctrl/meson/pinctrl-meson-gxl.c
@@ -301,7 +301,7 @@ static const unsigned int spdif_out_ao_9_pins[] = { GPIOAO_9 };
static const unsigned int ao_cec_pins[] = { GPIOAO_8 };
static const unsigned int ee_cec_pins[] = { GPIOAO_8 };
-static struct meson_pmx_group meson_gxl_periphs_groups[] = {
+static const struct meson_pmx_group meson_gxl_periphs_groups[] = {
GPIO_GROUP(GPIOZ_0),
GPIO_GROUP(GPIOZ_1),
GPIO_GROUP(GPIOZ_2),
@@ -527,7 +527,7 @@ static struct meson_pmx_group meson_gxl_periphs_groups[] = {
GROUP(pwm_f_clk, 8, 30),
};
-static struct meson_pmx_group meson_gxl_aobus_groups[] = {
+static const struct meson_pmx_group meson_gxl_aobus_groups[] = {
GPIO_GROUP(GPIOAO_0),
GPIO_GROUP(GPIOAO_1),
GPIO_GROUP(GPIOAO_2),
@@ -763,7 +763,7 @@ static const char * const cec_ao_groups[] = {
"ao_cec", "ee_cec",
};
-static struct meson_pmx_func meson_gxl_periphs_functions[] = {
+static const struct meson_pmx_func meson_gxl_periphs_functions[] = {
FUNCTION(gpio_periphs),
FUNCTION(emmc),
FUNCTION(nor),
@@ -793,7 +793,7 @@ static struct meson_pmx_func meson_gxl_periphs_functions[] = {
FUNCTION(tsin_b),
};
-static struct meson_pmx_func meson_gxl_aobus_functions[] = {
+static const struct meson_pmx_func meson_gxl_aobus_functions[] = {
FUNCTION(gpio_aobus),
FUNCTION(uart_ao),
FUNCTION(uart_ao_b),
@@ -807,7 +807,7 @@ static struct meson_pmx_func meson_gxl_aobus_functions[] = {
FUNCTION(cec_ao),
};
-static struct meson_bank meson_gxl_periphs_banks[] = {
+static const struct meson_bank meson_gxl_periphs_banks[] = {
/* name first last irq pullen pull dir out in */
BANK("X", GPIOX_0, GPIOX_18, 89, 107, 4, 0, 4, 0, 12, 0, 13, 0, 14, 0),
BANK("DV", GPIODV_0, GPIODV_29, 83, 88, 0, 0, 0, 0, 0, 0, 1, 0, 2, 0),
@@ -818,12 +818,12 @@ static struct meson_bank meson_gxl_periphs_banks[] = {
BANK("CLK", GPIOCLK_0, GPIOCLK_1, 108, 109, 3, 28, 3, 28, 9, 28, 10, 28, 11, 28),
};
-static struct meson_bank meson_gxl_aobus_banks[] = {
+static const struct meson_bank meson_gxl_aobus_banks[] = {
/* name first last irq pullen pull dir out in */
BANK("AO", GPIOAO_0, GPIOAO_9, 0, 9, 0, 16, 0, 0, 0, 0, 0, 16, 1, 0),
};
-static struct meson_pinctrl_data meson_gxl_periphs_pinctrl_data = {
+static const struct meson_pinctrl_data meson_gxl_periphs_pinctrl_data = {
.name = "periphs-banks",
.pins = meson_gxl_periphs_pins,
.groups = meson_gxl_periphs_groups,
@@ -836,7 +836,7 @@ static struct meson_pinctrl_data meson_gxl_periphs_pinctrl_data = {
.pmx_ops = &meson8_pmx_ops,
};
-static struct meson_pinctrl_data meson_gxl_aobus_pinctrl_data = {
+static const struct meson_pinctrl_data meson_gxl_aobus_pinctrl_data = {
.name = "aobus-banks",
.pins = meson_gxl_aobus_pins,
.groups = meson_gxl_aobus_groups,
diff --git a/drivers/pinctrl/meson/pinctrl-meson-s4.c b/drivers/pinctrl/meson/pinctrl-meson-s4.c
index 60c7d5003e8a..872948699e9f 100644
--- a/drivers/pinctrl/meson/pinctrl-meson-s4.c
+++ b/drivers/pinctrl/meson/pinctrl-meson-s4.c
@@ -411,7 +411,7 @@ static const unsigned int s2_demod_gpio0_pins[] = { GPIOZ_12 };
static const unsigned int gen_clk_z9_pins[] = { GPIOZ_9 };
static const unsigned int gen_clk_z12_pins[] = { GPIOZ_12 };
-static struct meson_pmx_group meson_s4_periphs_groups[] = {
+static const struct meson_pmx_group meson_s4_periphs_groups[] = {
GPIO_GROUP(GPIOE_0),
GPIO_GROUP(GPIOE_1),
@@ -1100,7 +1100,7 @@ static const char * const s2_demod_groups[] = {
"s2_demod_gpio3", "s2_demod_gpio2", "s2_demod_gpio1", "s2_demod_gpio0",
};
-static struct meson_pmx_func meson_s4_periphs_functions[] = {
+static const struct meson_pmx_func meson_s4_periphs_functions[] = {
FUNCTION(gpio_periphs),
FUNCTION(i2c0),
FUNCTION(i2c1),
@@ -1160,7 +1160,7 @@ static struct meson_pmx_func meson_s4_periphs_functions[] = {
FUNCTION(s2_demod),
};
-static struct meson_bank meson_s4_periphs_banks[] = {
+static const struct meson_bank meson_s4_periphs_banks[] = {
/* name first last irq pullen pull dir out in */
BANK_DS("B", GPIOB_0, GPIOB_13, 0, 13,
0x63, 0, 0x64, 0, 0x62, 0, 0x61, 0, 0x60, 0, 0x67, 0),
@@ -1180,7 +1180,7 @@ static struct meson_bank meson_s4_periphs_banks[] = {
0x83, 0, 0x84, 0, 0x82, 0, 0x81, 0, 0x80, 0, 0x87, 0),
};
-static struct meson_pmx_bank meson_s4_periphs_pmx_banks[] = {
+static const struct meson_pmx_bank meson_s4_periphs_pmx_banks[] = {
/*name first lask reg offset*/
BANK_PMX("B", GPIOB_0, GPIOB_13, 0x00, 0),
BANK_PMX("C", GPIOC_0, GPIOC_7, 0x9, 0),
@@ -1192,12 +1192,12 @@ static struct meson_pmx_bank meson_s4_periphs_pmx_banks[] = {
BANK_PMX("TEST_N", GPIO_TEST_N, GPIO_TEST_N, 0xf, 0)
};
-static struct meson_axg_pmx_data meson_s4_periphs_pmx_banks_data = {
+static const struct meson_axg_pmx_data meson_s4_periphs_pmx_banks_data = {
.pmx_banks = meson_s4_periphs_pmx_banks,
.num_pmx_banks = ARRAY_SIZE(meson_s4_periphs_pmx_banks),
};
-static struct meson_pinctrl_data meson_s4_periphs_pinctrl_data = {
+static const struct meson_pinctrl_data meson_s4_periphs_pinctrl_data = {
.name = "periphs-banks",
.pins = meson_s4_periphs_pins,
.groups = meson_s4_periphs_groups,
diff --git a/drivers/pinctrl/meson/pinctrl-meson.c b/drivers/pinctrl/meson/pinctrl-meson.c
index ef002b9dd464..253a0cc57e39 100644
--- a/drivers/pinctrl/meson/pinctrl-meson.c
+++ b/drivers/pinctrl/meson/pinctrl-meson.c
@@ -70,7 +70,7 @@ static const unsigned int meson_bit_strides[] = {
* Return: 0 on success, a negative value on error
*/
static int meson_get_bank(struct meson_pinctrl *pc, unsigned int pin,
- struct meson_bank **bank)
+ const struct meson_bank **bank)
{
int i;
@@ -94,11 +94,12 @@ static int meson_get_bank(struct meson_pinctrl *pc, unsigned int pin,
* @reg: the computed register offset
* @bit: the computed bit
*/
-static void meson_calc_reg_and_bit(struct meson_bank *bank, unsigned int pin,
+static void meson_calc_reg_and_bit(const struct meson_bank *bank,
+ unsigned int pin,
enum meson_reg_type reg_type,
unsigned int *reg, unsigned int *bit)
{
- struct meson_reg_desc *desc = &bank->regs[reg_type];
+ const struct meson_reg_desc *desc = &bank->regs[reg_type];
*bit = (desc->bit + pin - bank->first) * meson_bit_strides[reg_type];
*reg = (desc->reg + (*bit / 32)) * 4;
@@ -181,7 +182,7 @@ static int meson_pinconf_set_gpio_bit(struct meson_pinctrl *pc,
unsigned int reg_type,
bool arg)
{
- struct meson_bank *bank;
+ const struct meson_bank *bank;
unsigned int reg, bit;
int ret;
@@ -198,7 +199,7 @@ static int meson_pinconf_get_gpio_bit(struct meson_pinctrl *pc,
unsigned int pin,
unsigned int reg_type)
{
- struct meson_bank *bank;
+ const struct meson_bank *bank;
unsigned int reg, bit, val;
int ret;
@@ -261,7 +262,7 @@ static int meson_pinconf_set_output_drive(struct meson_pinctrl *pc,
static int meson_pinconf_disable_bias(struct meson_pinctrl *pc,
unsigned int pin)
{
- struct meson_bank *bank;
+ const struct meson_bank *bank;
unsigned int reg, bit = 0;
int ret;
@@ -280,7 +281,7 @@ static int meson_pinconf_disable_bias(struct meson_pinctrl *pc,
static int meson_pinconf_enable_bias(struct meson_pinctrl *pc, unsigned int pin,
bool pull_up)
{
- struct meson_bank *bank;
+ const struct meson_bank *bank;
unsigned int reg, bit, val = 0;
int ret;
@@ -308,7 +309,7 @@ static int meson_pinconf_set_drive_strength(struct meson_pinctrl *pc,
unsigned int pin,
u16 drive_strength_ua)
{
- struct meson_bank *bank;
+ const struct meson_bank *bank;
unsigned int reg, bit, ds_val;
int ret;
@@ -399,7 +400,7 @@ static int meson_pinconf_set(struct pinctrl_dev *pcdev, unsigned int pin,
static int meson_pinconf_get_pull(struct meson_pinctrl *pc, unsigned int pin)
{
- struct meson_bank *bank;
+ const struct meson_bank *bank;
unsigned int reg, bit, val;
int ret, conf;
@@ -435,7 +436,7 @@ static int meson_pinconf_get_drive_strength(struct meson_pinctrl *pc,
unsigned int pin,
u16 *drive_strength_ua)
{
- struct meson_bank *bank;
+ const struct meson_bank *bank;
unsigned int reg, bit;
unsigned int val;
int ret;
@@ -528,7 +529,7 @@ static int meson_pinconf_group_set(struct pinctrl_dev *pcdev,
unsigned long *configs, unsigned num_configs)
{
struct meson_pinctrl *pc = pinctrl_dev_get_drvdata(pcdev);
- struct meson_pmx_group *group = &pc->data->groups[num_group];
+ const struct meson_pmx_group *group = &pc->data->groups[num_group];
int i;
dev_dbg(pc->dev, "set pinconf for group %s\n", group->name);
@@ -587,8 +588,8 @@ static void meson_gpio_set(struct gpio_chip *chip, unsigned gpio, int value)
static int meson_gpio_get(struct gpio_chip *chip, unsigned gpio)
{
struct meson_pinctrl *pc = gpiochip_get_data(chip);
+ const struct meson_bank *bank;
unsigned int reg, bit, val;
- struct meson_bank *bank;
int ret;
ret = meson_get_bank(pc, gpio, &bank);
diff --git a/drivers/pinctrl/meson/pinctrl-meson.h b/drivers/pinctrl/meson/pinctrl-meson.h
index 34fc4e8612e4..7883ea31a001 100644
--- a/drivers/pinctrl/meson/pinctrl-meson.h
+++ b/drivers/pinctrl/meson/pinctrl-meson.h
@@ -110,15 +110,15 @@ struct meson_bank {
struct meson_pinctrl_data {
const char *name;
const struct pinctrl_pin_desc *pins;
- struct meson_pmx_group *groups;
- struct meson_pmx_func *funcs;
+ const struct meson_pmx_group *groups;
+ const struct meson_pmx_func *funcs;
unsigned int num_pins;
unsigned int num_groups;
unsigned int num_funcs;
- struct meson_bank *banks;
+ const struct meson_bank *banks;
unsigned int num_banks;
const struct pinmux_ops *pmx_ops;
- void *pmx_data;
+ const void *pmx_data;
int (*parse_dt)(struct meson_pinctrl *pc);
};
diff --git a/drivers/pinctrl/meson/pinctrl-meson8-pmx.c b/drivers/pinctrl/meson/pinctrl-meson8-pmx.c
index 7f22aa0f8e36..10adf52edda6 100644
--- a/drivers/pinctrl/meson/pinctrl-meson8-pmx.c
+++ b/drivers/pinctrl/meson/pinctrl-meson8-pmx.c
@@ -32,7 +32,7 @@
static void meson8_pmx_disable_other_groups(struct meson_pinctrl *pc,
unsigned int pin, int sel_group)
{
- struct meson_pmx_group *group;
+ const struct meson_pmx_group *group;
struct meson8_pmx_data *pmx_data;
int i, j;
@@ -57,8 +57,8 @@ static int meson8_pmx_set_mux(struct pinctrl_dev *pcdev, unsigned func_num,
unsigned group_num)
{
struct meson_pinctrl *pc = pinctrl_dev_get_drvdata(pcdev);
- struct meson_pmx_func *func = &pc->data->funcs[func_num];
- struct meson_pmx_group *group = &pc->data->groups[group_num];
+ const struct meson_pmx_func *func = &pc->data->funcs[func_num];
+ const struct meson_pmx_group *group = &pc->data->groups[group_num];
struct meson8_pmx_data *pmx_data =
(struct meson8_pmx_data *)group->data;
int i, ret = 0;
diff --git a/drivers/pinctrl/meson/pinctrl-meson8.c b/drivers/pinctrl/meson/pinctrl-meson8.c
index dd17100efdcf..3da7f3799c3f 100644
--- a/drivers/pinctrl/meson/pinctrl-meson8.c
+++ b/drivers/pinctrl/meson/pinctrl-meson8.c
@@ -405,7 +405,7 @@ static const unsigned int i2s_out_ch01_ao_pins[] = { GPIOAO_11 };
static const unsigned int hdmi_cec_ao_pins[] = { GPIOAO_12 };
-static struct meson_pmx_group meson8_cbus_groups[] = {
+static const struct meson_pmx_group meson8_cbus_groups[] = {
GPIO_GROUP(GPIOX_0),
GPIO_GROUP(GPIOX_1),
GPIO_GROUP(GPIOX_2),
@@ -745,7 +745,7 @@ static struct meson_pmx_group meson8_cbus_groups[] = {
GROUP(sdxc_cmd_b, 2, 4),
};
-static struct meson_pmx_group meson8_aobus_groups[] = {
+static const struct meson_pmx_group meson8_aobus_groups[] = {
GPIO_GROUP(GPIOAO_0),
GPIO_GROUP(GPIOAO_1),
GPIO_GROUP(GPIOAO_2),
@@ -1015,7 +1015,7 @@ static const char * const hdmi_cec_ao_groups[] = {
"hdmi_cec_ao"
};
-static struct meson_pmx_func meson8_cbus_functions[] = {
+static const struct meson_pmx_func meson8_cbus_functions[] = {
FUNCTION(gpio_periphs),
FUNCTION(sd_a),
FUNCTION(sdxc_a),
@@ -1051,7 +1051,7 @@ static struct meson_pmx_func meson8_cbus_functions[] = {
FUNCTION(spdif),
};
-static struct meson_pmx_func meson8_aobus_functions[] = {
+static const struct meson_pmx_func meson8_aobus_functions[] = {
FUNCTION(gpio_aobus),
FUNCTION(uart_ao),
FUNCTION(remote),
@@ -1063,7 +1063,7 @@ static struct meson_pmx_func meson8_aobus_functions[] = {
FUNCTION(hdmi_cec_ao),
};
-static struct meson_bank meson8_cbus_banks[] = {
+static const struct meson_bank meson8_cbus_banks[] = {
/* name first last irq pullen pull dir out in */
BANK("X", GPIOX_0, GPIOX_21, 112, 133, 4, 0, 4, 0, 0, 0, 1, 0, 2, 0),
BANK("Y", GPIOY_0, GPIOY_16, 95, 111, 3, 0, 3, 0, 3, 0, 4, 0, 5, 0),
@@ -1074,12 +1074,12 @@ static struct meson_bank meson8_cbus_banks[] = {
BANK("BOOT", BOOT_0, BOOT_18, 39, 57, 2, 0, 2, 0, 9, 0, 10, 0, 11, 0),
};
-static struct meson_bank meson8_aobus_banks[] = {
+static const struct meson_bank meson8_aobus_banks[] = {
/* name first last irq pullen pull dir out in */
BANK("AO", GPIOAO_0, GPIO_TEST_N, 0, 13, 0, 16, 0, 0, 0, 0, 0, 16, 1, 0),
};
-static struct meson_pinctrl_data meson8_cbus_pinctrl_data = {
+static const struct meson_pinctrl_data meson8_cbus_pinctrl_data = {
.name = "cbus-banks",
.pins = meson8_cbus_pins,
.groups = meson8_cbus_groups,
@@ -1092,7 +1092,7 @@ static struct meson_pinctrl_data meson8_cbus_pinctrl_data = {
.pmx_ops = &meson8_pmx_ops,
};
-static struct meson_pinctrl_data meson8_aobus_pinctrl_data = {
+static const struct meson_pinctrl_data meson8_aobus_pinctrl_data = {
.name = "ao-bank",
.pins = meson8_aobus_pins,
.groups = meson8_aobus_groups,
diff --git a/drivers/pinctrl/meson/pinctrl-meson8b.c b/drivers/pinctrl/meson/pinctrl-meson8b.c
index 6cd4b3ec1b40..a71e1f41358a 100644
--- a/drivers/pinctrl/meson/pinctrl-meson8b.c
+++ b/drivers/pinctrl/meson/pinctrl-meson8b.c
@@ -349,7 +349,7 @@ static const unsigned int eth_ref_clk_pins[] = { DIF_3_N };
static const unsigned int eth_mdc_pins[] = { DIF_4_P };
static const unsigned int eth_mdio_en_pins[] = { DIF_4_N };
-static struct meson_pmx_group meson8b_cbus_groups[] = {
+static const struct meson_pmx_group meson8b_cbus_groups[] = {
GPIO_GROUP(GPIOX_0),
GPIO_GROUP(GPIOX_1),
GPIO_GROUP(GPIOX_2),
@@ -603,7 +603,7 @@ static struct meson_pmx_group meson8b_cbus_groups[] = {
GROUP(eth_rxd2, 7, 23),
};
-static struct meson_pmx_group meson8b_aobus_groups[] = {
+static const struct meson_pmx_group meson8b_aobus_groups[] = {
GPIO_GROUP(GPIOAO_0),
GPIO_GROUP(GPIOAO_1),
GPIO_GROUP(GPIOAO_2),
@@ -869,7 +869,7 @@ static const char * const tsin_b_groups[] = {
"tsin_d0_b", "tsin_clk_b", "tsin_sop_b", "tsin_d_valid_b"
};
-static struct meson_pmx_func meson8b_cbus_functions[] = {
+static const struct meson_pmx_func meson8b_cbus_functions[] = {
FUNCTION(gpio_periphs),
FUNCTION(sd_a),
FUNCTION(sdxc_a),
@@ -903,7 +903,7 @@ static struct meson_pmx_func meson8b_cbus_functions[] = {
FUNCTION(clk_24m),
};
-static struct meson_pmx_func meson8b_aobus_functions[] = {
+static const struct meson_pmx_func meson8b_aobus_functions[] = {
FUNCTION(gpio_aobus),
FUNCTION(uart_ao),
FUNCTION(uart_ao_b),
@@ -917,7 +917,7 @@ static struct meson_pmx_func meson8b_aobus_functions[] = {
FUNCTION(hdmi_cec),
};
-static struct meson_bank meson8b_cbus_banks[] = {
+static const struct meson_bank meson8b_cbus_banks[] = {
/* name first last irq pullen pull dir out in */
BANK("X0..11", GPIOX_0, GPIOX_11, 97, 108, 4, 0, 4, 0, 0, 0, 1, 0, 2, 0),
BANK("X16..21", GPIOX_16, GPIOX_21, 113, 118, 4, 16, 4, 16, 0, 16, 1, 16, 2, 16),
@@ -938,12 +938,12 @@ static struct meson_bank meson8b_cbus_banks[] = {
BANK("DIF", DIF_0_P, DIF_4_N, -1, -1, 5, 8, 5, 8, 12, 12, 13, 12, 14, 12),
};
-static struct meson_bank meson8b_aobus_banks[] = {
+static const struct meson_bank meson8b_aobus_banks[] = {
/* name first lastc irq pullen pull dir out in */
BANK("AO", GPIOAO_0, GPIO_TEST_N, 0, 13, 0, 16, 0, 0, 0, 0, 0, 16, 1, 0),
};
-static struct meson_pinctrl_data meson8b_cbus_pinctrl_data = {
+static const struct meson_pinctrl_data meson8b_cbus_pinctrl_data = {
.name = "cbus-banks",
.pins = meson8b_cbus_pins,
.groups = meson8b_cbus_groups,
@@ -956,7 +956,7 @@ static struct meson_pinctrl_data meson8b_cbus_pinctrl_data = {
.pmx_ops = &meson8_pmx_ops,
};
-static struct meson_pinctrl_data meson8b_aobus_pinctrl_data = {
+static const struct meson_pinctrl_data meson8b_aobus_pinctrl_data = {
.name = "aobus-banks",
.pins = meson8b_aobus_pins,
.groups = meson8b_aobus_groups,
diff --git a/drivers/pinctrl/mvebu/pinctrl-dove.c b/drivers/pinctrl/mvebu/pinctrl-dove.c
index 1947da73e512..dce601d99372 100644
--- a/drivers/pinctrl/mvebu/pinctrl-dove.c
+++ b/drivers/pinctrl/mvebu/pinctrl-dove.c
@@ -767,7 +767,7 @@ static int dove_pinctrl_probe(struct platform_device *pdev)
struct resource fb_res;
struct mvebu_mpp_ctrl_data *mpp_data;
void __iomem *base;
- int i;
+ int i, ret;
pdev->dev.platform_data = (void *)device_get_match_data(&pdev->dev);
@@ -783,13 +783,17 @@ static int dove_pinctrl_probe(struct platform_device *pdev)
clk_prepare_enable(clk);
base = devm_platform_get_and_ioremap_resource(pdev, 0, &mpp_res);
- if (IS_ERR(base))
- return PTR_ERR(base);
+ if (IS_ERR(base)) {
+ ret = PTR_ERR(base);
+ goto err_probe;
+ }
mpp_data = devm_kcalloc(&pdev->dev, dove_pinctrl_info.ncontrols,
sizeof(*mpp_data), GFP_KERNEL);
- if (!mpp_data)
- return -ENOMEM;
+ if (!mpp_data) {
+ ret = -ENOMEM;
+ goto err_probe;
+ }
dove_pinctrl_info.control_data = mpp_data;
for (i = 0; i < ARRAY_SIZE(dove_mpp_controls); i++)
@@ -808,8 +812,10 @@ static int dove_pinctrl_probe(struct platform_device *pdev)
}
mpp4_base = devm_ioremap_resource(&pdev->dev, res);
- if (IS_ERR(mpp4_base))
- return PTR_ERR(mpp4_base);
+ if (IS_ERR(mpp4_base)) {
+ ret = PTR_ERR(mpp4_base);
+ goto err_probe;
+ }
res = platform_get_resource(pdev, IORESOURCE_MEM, 2);
if (!res) {
@@ -820,8 +826,10 @@ static int dove_pinctrl_probe(struct platform_device *pdev)
}
pmu_base = devm_ioremap_resource(&pdev->dev, res);
- if (IS_ERR(pmu_base))
- return PTR_ERR(pmu_base);
+ if (IS_ERR(pmu_base)) {
+ ret = PTR_ERR(pmu_base);
+ goto err_probe;
+ }
gconfmap = syscon_regmap_lookup_by_compatible("marvell,dove-global-config");
if (IS_ERR(gconfmap)) {
@@ -831,12 +839,17 @@ static int dove_pinctrl_probe(struct platform_device *pdev)
adjust_resource(&fb_res,
(mpp_res->start & INT_REGS_MASK) + GC_REGS_OFFS, 0x14);
gc_base = devm_ioremap_resource(&pdev->dev, &fb_res);
- if (IS_ERR(gc_base))
- return PTR_ERR(gc_base);
+ if (IS_ERR(gc_base)) {
+ ret = PTR_ERR(gc_base);
+ goto err_probe;
+ }
+
gconfmap = devm_regmap_init_mmio(&pdev->dev,
gc_base, &gc_regmap_config);
- if (IS_ERR(gconfmap))
- return PTR_ERR(gconfmap);
+ if (IS_ERR(gconfmap)) {
+ ret = PTR_ERR(gconfmap);
+ goto err_probe;
+ }
}
/* Warn on any missing DT resource */
@@ -844,6 +857,9 @@ static int dove_pinctrl_probe(struct platform_device *pdev)
dev_warn(&pdev->dev, FW_BUG "Missing pinctrl regs in DTB. Please update your firmware.\n");
return mvebu_pinctrl_probe(pdev);
+err_probe:
+ clk_disable_unprepare(clk);
+ return ret;
}
static struct platform_driver dove_pinctrl_driver = {
diff --git a/drivers/pinctrl/nomadik/pinctrl-abx500.c b/drivers/pinctrl/nomadik/pinctrl-abx500.c
index 47f62c89955a..68750b6f8e57 100644
--- a/drivers/pinctrl/nomadik/pinctrl-abx500.c
+++ b/drivers/pinctrl/nomadik/pinctrl-abx500.c
@@ -716,8 +716,7 @@ static int abx500_dt_add_map_configs(struct pinctrl_map **map,
if (*num_maps == *reserved_maps)
return -ENOSPC;
- dup_configs = kmemdup(configs, num_configs * sizeof(*dup_configs),
- GFP_KERNEL);
+ dup_configs = kmemdup_array(configs, num_configs, sizeof(*dup_configs), GFP_KERNEL);
if (!dup_configs)
return -ENOMEM;
diff --git a/drivers/pinctrl/nomadik/pinctrl-nomadik.c b/drivers/pinctrl/nomadik/pinctrl-nomadik.c
index fa78d5ecc685..f4f10c60c1d2 100644
--- a/drivers/pinctrl/nomadik/pinctrl-nomadik.c
+++ b/drivers/pinctrl/nomadik/pinctrl-nomadik.c
@@ -601,8 +601,7 @@ static int nmk_dt_add_map_configs(struct pinctrl_map **map,
if (*num_maps == *reserved_maps)
return -ENOSPC;
- dup_configs = kmemdup(configs, num_configs * sizeof(*dup_configs),
- GFP_KERNEL);
+ dup_configs = kmemdup_array(configs, num_configs, sizeof(*dup_configs), GFP_KERNEL);
if (!dup_configs)
return -ENOMEM;
diff --git a/drivers/pinctrl/nuvoton/pinctrl-npcm8xx.c b/drivers/pinctrl/nuvoton/pinctrl-npcm8xx.c
index a377d36b0eb0..471f644c5eef 100644
--- a/drivers/pinctrl/nuvoton/pinctrl-npcm8xx.c
+++ b/drivers/pinctrl/nuvoton/pinctrl-npcm8xx.c
@@ -241,6 +241,7 @@ static int npcmgpio_set_irq_type(struct irq_data *d, unsigned int type)
npcm_gpio_set(&bank->gc, bank->base + NPCM8XX_GP_N_POL, gpio);
break;
case IRQ_TYPE_EDGE_BOTH:
+ npcm_gpio_clr(&bank->gc, bank->base + NPCM8XX_GP_N_POL, gpio);
npcm_gpio_set(&bank->gc, bank->base + NPCM8XX_GP_N_EVBE, gpio);
break;
case IRQ_TYPE_LEVEL_LOW:
@@ -315,8 +316,8 @@ static struct irq_chip npcmgpio_irqchip = {
GPIOCHIP_IRQ_RESOURCE_HELPERS,
};
-static const int gpi36_pins[] = { 58 };
-static const int gpi35_pins[] = { 58 };
+static const int gpi36_pins[] = { 36 };
+static const int gpi35_pins[] = { 35 };
static const int tp_jtag3_pins[] = { 44, 62, 45, 46 };
static const int tp_uart_pins[] = { 50, 51 };
@@ -437,7 +438,6 @@ static const int smb4_pins[] = { 28, 29 };
static const int smb4b_pins[] = { 18, 19 };
static const int smb4c_pins[] = { 20, 21 };
static const int smb4d_pins[] = { 22, 23 };
-static const int smb4den_pins[] = { 17 };
static const int smb5_pins[] = { 26, 27 };
static const int smb5b_pins[] = { 13, 12 };
static const int smb5c_pins[] = { 15, 14 };
@@ -515,7 +515,7 @@ static const int rg2_pins[] = { 110, 111, 112, 113, 208, 209, 210, 211, 212,
static const int rg2mdio_pins[] = { 216, 217 };
static const int ddr_pins[] = { 110, 111, 112, 113, 208, 209, 210, 211, 212,
- 213, 214, 215, 216, 217 };
+ 213, 214, 215, 216, 217, 250 };
static const int iox1_pins[] = { 0, 1, 2, 3 };
static const int iox2_pins[] = { 4, 5, 6, 7 };
@@ -570,7 +570,6 @@ static const int spi3cs3_pins[] = { 189 };
static const int ddc_pins[] = { 204, 205, 206, 207 };
static const int lpc_pins[] = { 95, 161, 163, 164, 165, 166, 167 };
-static const int lpcclk_pins[] = { 168 };
static const int espi_pins[] = { 95, 161, 163, 164, 165, 166, 167, 168 };
static const int lkgpo0_pins[] = { 16 };
@@ -699,7 +698,6 @@ struct npcm8xx_pingroup {
NPCM8XX_GRP(smb4b), \
NPCM8XX_GRP(smb4c), \
NPCM8XX_GRP(smb4d), \
- NPCM8XX_GRP(smb4den), \
NPCM8XX_GRP(smb5), \
NPCM8XX_GRP(smb5b), \
NPCM8XX_GRP(smb5c), \
@@ -808,7 +806,6 @@ struct npcm8xx_pingroup {
NPCM8XX_GRP(spi3cs3), \
NPCM8XX_GRP(spi0cs1), \
NPCM8XX_GRP(lpc), \
- NPCM8XX_GRP(lpcclk), \
NPCM8XX_GRP(espi), \
NPCM8XX_GRP(lkgpo0), \
NPCM8XX_GRP(lkgpo1), \
@@ -948,7 +945,6 @@ NPCM8XX_SFUNC(smb4);
NPCM8XX_SFUNC(smb4b);
NPCM8XX_SFUNC(smb4c);
NPCM8XX_SFUNC(smb4d);
-NPCM8XX_SFUNC(smb4den);
NPCM8XX_SFUNC(smb5);
NPCM8XX_SFUNC(smb5b);
NPCM8XX_SFUNC(smb5c);
@@ -1056,7 +1052,6 @@ NPCM8XX_SFUNC(spi3cs2);
NPCM8XX_SFUNC(spi3cs3);
NPCM8XX_SFUNC(spi0cs1);
NPCM8XX_SFUNC(lpc);
-NPCM8XX_SFUNC(lpcclk);
NPCM8XX_SFUNC(espi);
NPCM8XX_SFUNC(lkgpo0);
NPCM8XX_SFUNC(lkgpo1);
@@ -1172,7 +1167,6 @@ static struct npcm8xx_func npcm8xx_funcs[] = {
NPCM8XX_MKFUNC(smb4b),
NPCM8XX_MKFUNC(smb4c),
NPCM8XX_MKFUNC(smb4d),
- NPCM8XX_MKFUNC(smb4den),
NPCM8XX_MKFUNC(smb5),
NPCM8XX_MKFUNC(smb5b),
NPCM8XX_MKFUNC(smb5c),
@@ -1280,7 +1274,6 @@ static struct npcm8xx_func npcm8xx_funcs[] = {
NPCM8XX_MKFUNC(spi3cs3),
NPCM8XX_MKFUNC(spi0cs1),
NPCM8XX_MKFUNC(lpc),
- NPCM8XX_MKFUNC(lpcclk),
NPCM8XX_MKFUNC(espi),
NPCM8XX_MKFUNC(lkgpo0),
NPCM8XX_MKFUNC(lkgpo1),
@@ -1347,7 +1340,7 @@ static const struct npcm8xx_pincfg pincfg[] = {
NPCM8XX_PINCFG(14, gspi, MFSEL1, 24, smb5c, I2CSEGSEL, 20, none, NONE, 0, none, NONE, 0, none, NONE, 0, SLEW),
NPCM8XX_PINCFG(15, gspi, MFSEL1, 24, smb5c, I2CSEGSEL, 20, none, NONE, 0, none, NONE, 0, none, NONE, 0, SLEW),
NPCM8XX_PINCFG(16, lkgpo0, FLOCKR1, 0, smb7b, I2CSEGSEL, 27, tp_gpio2b, MFSEL7, 10, none, NONE, 0, none, NONE, 0, SLEW),
- NPCM8XX_PINCFG(17, pspi, MFSEL3, 13, cp1gpio5, MFSEL6, 7, smb4den, I2CSEGSEL, 23, none, NONE, 0, none, NONE, 0, SLEW),
+ NPCM8XX_PINCFG(17, pspi, MFSEL3, 13, cp1gpio5, MFSEL6, 7, none, NONE, 0, none, NONE, 0, none, NONE, 0, SLEW),
NPCM8XX_PINCFG(18, pspi, MFSEL3, 13, smb4b, I2CSEGSEL, 14, none, NONE, 0, none, NONE, 0, none, NONE, 0, SLEW),
NPCM8XX_PINCFG(19, pspi, MFSEL3, 13, smb4b, I2CSEGSEL, 14, none, NONE, 0, none, NONE, 0, none, NONE, 0, SLEW),
NPCM8XX_PINCFG(20, hgpio0, MFSEL2, 24, smb15, MFSEL3, 8, smb4c, I2CSEGSEL, 15, none, NONE, 0, none, NONE, 0, SLEW),
@@ -1365,6 +1358,8 @@ static const struct npcm8xx_pincfg pincfg[] = {
NPCM8XX_PINCFG(32, spi0cs1, MFSEL1, 3, smb14b, MFSEL7, 26, none, NONE, 0, none, NONE, 0, none, NONE, 0, SLEW),
NPCM8XX_PINCFG(33, i3c4, MFSEL6, 10, none, NONE, 0, none, NONE, 0, none, NONE, 0, none, NONE, 0, SLEW),
NPCM8XX_PINCFG(34, i3c4, MFSEL6, 10, none, NONE, 0, none, NONE, 0, none, NONE, 0, none, NONE, 0, SLEW),
+ NPCM8XX_PINCFG(35, gpi35, MFSEL5, 16, none, NONE, 0, none, NONE, 0, none, NONE, 0, none, NONE, 0, 0),
+ NPCM8XX_PINCFG(36, gpi36, MFSEL5, 18, none, NONE, 0, none, NONE, 0, none, NONE, 0, none, NONE, 0, 0),
NPCM8XX_PINCFG(37, smb3c, I2CSEGSEL, 12, smb23, MFSEL5, 31, none, NONE, 0, none, NONE, 0, none, NONE, 0, SLEW),
NPCM8XX_PINCFG(38, smb3c, I2CSEGSEL, 12, smb23, MFSEL5, 31, none, NONE, 0, none, NONE, 0, none, NONE, 0, SLEW),
NPCM8XX_PINCFG(39, smb3b, I2CSEGSEL, 11, smb22, MFSEL5, 30, none, NONE, 0, none, NONE, 0, none, NONE, 0, SLEW),
@@ -1438,10 +1433,10 @@ static const struct npcm8xx_pincfg pincfg[] = {
NPCM8XX_PINCFG(107, i3c5, MFSEL3, 22, none, NONE, 0, none, NONE, 0, none, NONE, 0, none, NONE, 0, SLEW),
NPCM8XX_PINCFG(108, sg1mdio, MFSEL4, 21, none, NONE, 0, none, NONE, 0, none, NONE, 0, none, NONE, 0, SLEW),
NPCM8XX_PINCFG(109, sg1mdio, MFSEL4, 21, none, NONE, 0, none, NONE, 0, none, NONE, 0, none, NONE, 0, SLEW),
- NPCM8XX_PINCFG(110, rg2, MFSEL4, 24, ddr, MFSEL3, 26, rmii3, MFSEL5, 11, none, NONE, 0, none, NONE, 0, 0),
- NPCM8XX_PINCFG(111, rg2, MFSEL4, 24, ddr, MFSEL3, 26, rmii3, MFSEL5, 11, none, NONE, 0, none, NONE, 0, 0),
- NPCM8XX_PINCFG(112, rg2, MFSEL4, 24, ddr, MFSEL3, 26, none, NONE, 0, none, NONE, 0, none, NONE, 0, 0),
- NPCM8XX_PINCFG(113, rg2, MFSEL4, 24, ddr, MFSEL3, 26, none, NONE, 0, none, NONE, 0, none, NONE, 0, 0),
+ NPCM8XX_PINCFG(110, rg2, MFSEL4, 24, ddr, MFSEL3, 26, rmii3, MFSEL5, 11, none, NONE, 0, none, NONE, 0, SLEW),
+ NPCM8XX_PINCFG(111, rg2, MFSEL4, 24, ddr, MFSEL3, 26, rmii3, MFSEL5, 11, none, NONE, 0, none, NONE, 0, SLEW),
+ NPCM8XX_PINCFG(112, rg2, MFSEL4, 24, ddr, MFSEL3, 26, none, NONE, 0, none, NONE, 0, none, NONE, 0, SLEW),
+ NPCM8XX_PINCFG(113, rg2, MFSEL4, 24, ddr, MFSEL3, 26, none, NONE, 0, none, NONE, 0, none, NONE, 0, SLEW),
NPCM8XX_PINCFG(114, smb0, MFSEL1, 6, none, NONE, 0, none, NONE, 0, none, NONE, 0, none, NONE, 0, 0),
NPCM8XX_PINCFG(115, smb0, MFSEL1, 6, none, NONE, 0, none, NONE, 0, none, NONE, 0, none, NONE, 0, 0),
NPCM8XX_PINCFG(116, smb1, MFSEL1, 7, none, NONE, 0, none, NONE, 0, none, NONE, 0, none, NONE, 0, 0),
@@ -1490,13 +1485,13 @@ static const struct npcm8xx_pincfg pincfg[] = {
NPCM8XX_PINCFG(159, mmc, MFSEL3, 10, none, NONE, 0, none, NONE, 0, none, NONE, 0, none, NONE, 0, DSTR(8, 12) | SLEW),
NPCM8XX_PINCFG(160, clkout, MFSEL1, 21, none, NONE, 0, none, NONE, 0, none, NONE, 0, none, NONE, 0, DSTR(8, 12) | SLEW),
NPCM8XX_PINCFG(161, lpc, MFSEL1, 26, espi, MFSEL4, 8, none, NONE, 0, none, NONE, 0, none, NONE, 0, 0),
- NPCM8XX_PINCFG(162, serirq, MFSEL1, 31, none, NONE, 0, none, NONE, 0, none, NONE, 0, none, NONE, 0, DSTR(8, 12)),
+ NPCM8XX_PINCFG(162, clkrun, MFSEL3, 16, none, NONE, 0, none, NONE, 0, none, NONE, 0, none, NONE, 0, DSTR(8, 12)),
NPCM8XX_PINCFG(163, lpc, MFSEL1, 26, espi, MFSEL4, 8, none, NONE, 0, none, NONE, 0, none, NONE, 0, 0),
NPCM8XX_PINCFG(164, lpc, MFSEL1, 26, espi, MFSEL4, 8, none, NONE, 0, none, NONE, 0, none, NONE, 0, 0),
NPCM8XX_PINCFG(165, lpc, MFSEL1, 26, espi, MFSEL4, 8, none, NONE, 0, none, NONE, 0, none, NONE, 0, 0),
NPCM8XX_PINCFG(166, lpc, MFSEL1, 26, espi, MFSEL4, 8, none, NONE, 0, none, NONE, 0, none, NONE, 0, 0),
NPCM8XX_PINCFG(167, lpc, MFSEL1, 26, espi, MFSEL4, 8, none, NONE, 0, none, NONE, 0, none, NONE, 0, 0),
- NPCM8XX_PINCFG(168, lpcclk, MFSEL1, 31, espi, MFSEL4, 8, none, NONE, 0, none, NONE, 0, none, NONE, 0, 0),
+ NPCM8XX_PINCFG(168, serirq, MFSEL1, 31, espi, MFSEL4, 8, none, NONE, 0, none, NONE, 0, none, NONE, 0, 0),
NPCM8XX_PINCFG(169, scipme, MFSEL3, 0, smb21, MFSEL5, 29, none, NONE, 0, none, NONE, 0, none, NONE, 0, 0),
NPCM8XX_PINCFG(170, smi, MFSEL1, 22, smb21, MFSEL5, 29, none, NONE, 0, none, NONE, 0, none, NONE, 0, 0),
NPCM8XX_PINCFG(171, smb6, MFSEL3, 1, none, NONE, 0, none, NONE, 0, none, NONE, 0, none, NONE, 0, 0),
@@ -1515,22 +1510,22 @@ static const struct npcm8xx_pincfg pincfg[] = {
NPCM8XX_PINCFG(184, gpio1836, MFSEL6, 19, spi3, MFSEL4, 16, none, NONE, 0, none, NONE, 0, none, NONE, 0, DSTR(8, 12) | SLEW),
NPCM8XX_PINCFG(185, gpio1836, MFSEL6, 19, spi3, MFSEL4, 16, none, NONE, 0, none, NONE, 0, none, NONE, 0, DSTR(8, 12) | SLEW),
NPCM8XX_PINCFG(186, gpio1836, MFSEL6, 19, spi3, MFSEL4, 16, none, NONE, 0, none, NONE, 0, none, NONE, 0, DSTR(8, 12)),
- NPCM8XX_PINCFG(187, gpo187, MFSEL7, 24, smb14b, MFSEL7, 26, spi3cs1, MFSEL4, 17, none, NONE, 0, none, NONE, 0, 0),
+ NPCM8XX_PINCFG(187, gpo187, MFSEL7, 24, smb14b, MFSEL7, 26, spi3cs1, MFSEL4, 17, none, NONE, 0, none, NONE, 0, SLEW),
NPCM8XX_PINCFG(188, gpio1889, MFSEL7, 25, spi3cs2, MFSEL4, 18, spi3quad, MFSEL4, 20, none, NONE, 0, none, NONE, 0, DSTR(8, 12) | SLEW),
NPCM8XX_PINCFG(189, gpio1889, MFSEL7, 25, spi3cs3, MFSEL4, 19, spi3quad, MFSEL4, 20, none, NONE, 0, none, NONE, 0, DSTR(8, 12) | SLEW),
NPCM8XX_PINCFG(190, nprd_smi, FLOCKR1, 20, none, NONE, 0, none, NONE, 0, none, NONE, 0, none, NONE, 0, DSTR(2, 4)),
- NPCM8XX_PINCFG(191, spi1d23, MFSEL5, 3, spi1cs2, MFSEL5, 4, fm1, MFSEL6, 17, smb15, MFSEL7, 27, none, NONE, 0, DSTR(0, 2)), /* XX */
- NPCM8XX_PINCFG(192, spi1d23, MFSEL5, 3, spi1cs3, MFSEL5, 5, fm1, MFSEL6, 17, smb15, MFSEL7, 27, none, NONE, 0, DSTR(0, 2)), /* XX */
+ NPCM8XX_PINCFG(191, spi1d23, MFSEL5, 3, spi1cs2, MFSEL5, 4, fm1, MFSEL6, 17, smb15, MFSEL7, 27, none, NONE, 0, SLEW), /* XX */
+ NPCM8XX_PINCFG(192, spi1d23, MFSEL5, 3, spi1cs3, MFSEL5, 5, fm1, MFSEL6, 17, smb15, MFSEL7, 27, none, NONE, 0, SLEW), /* XX */
NPCM8XX_PINCFG(193, r1, MFSEL3, 9, none, NONE, 0, none, NONE, 0, none, NONE, 0, none, NONE, 0, 0),
- NPCM8XX_PINCFG(194, smb0b, I2CSEGSEL, 0, fm0, MFSEL6, 16, none, NONE, 0, none, NONE, 0, none, NONE, 0, DSTR(0, 1)),
- NPCM8XX_PINCFG(195, smb0b, I2CSEGSEL, 0, fm0, MFSEL6, 16, none, NONE, 0, none, NONE, 0, none, NONE, 0, DSTR(0, 1)),
- NPCM8XX_PINCFG(196, smb0c, I2CSEGSEL, 1, fm0, MFSEL6, 16, none, NONE, 0, none, NONE, 0, none, NONE, 0, DSTR(0, 1)),
- NPCM8XX_PINCFG(197, smb0den, I2CSEGSEL, 22, fm0, MFSEL6, 16, none, NONE, 0, none, NONE, 0, none, NONE, 0, DSTR(0, 1)),
- NPCM8XX_PINCFG(198, smb0d, I2CSEGSEL, 2, fm0, MFSEL6, 16, none, NONE, 0, none, NONE, 0, none, NONE, 0, DSTR(0, 1)),
- NPCM8XX_PINCFG(199, smb0d, I2CSEGSEL, 2, fm0, MFSEL6, 16, none, NONE, 0, none, NONE, 0, none, NONE, 0, DSTR(0, 1)),
+ NPCM8XX_PINCFG(194, smb0b, I2CSEGSEL, 0, fm0, MFSEL6, 16, none, NONE, 0, none, NONE, 0, none, NONE, 0, SLEW),
+ NPCM8XX_PINCFG(195, smb0b, I2CSEGSEL, 0, fm0, MFSEL6, 16, none, NONE, 0, none, NONE, 0, none, NONE, 0, SLEW),
+ NPCM8XX_PINCFG(196, smb0c, I2CSEGSEL, 1, fm0, MFSEL6, 16, none, NONE, 0, none, NONE, 0, none, NONE, 0, SLEW),
+ NPCM8XX_PINCFG(197, smb0den, I2CSEGSEL, 22, fm0, MFSEL6, 16, none, NONE, 0, none, NONE, 0, none, NONE, 0, SLEW),
+ NPCM8XX_PINCFG(198, smb0d, I2CSEGSEL, 2, fm0, MFSEL6, 16, none, NONE, 0, none, NONE, 0, none, NONE, 0, SLEW),
+ NPCM8XX_PINCFG(199, smb0d, I2CSEGSEL, 2, fm0, MFSEL6, 16, none, NONE, 0, none, NONE, 0, none, NONE, 0, SLEW),
NPCM8XX_PINCFG(200, r2, MFSEL1, 14, none, NONE, 0, none, NONE, 0, none, NONE, 0, none, NONE, 0, GPO),
NPCM8XX_PINCFG(201, r1, MFSEL3, 9, none, NONE, 0, none, NONE, 0, none, NONE, 0, none, NONE, 0, GPO),
- NPCM8XX_PINCFG(202, smb0c, I2CSEGSEL, 1, fm0, MFSEL6, 16, none, NONE, 0, none, NONE, 0, none, NONE, 0, DSTR(0, 1)),
+ NPCM8XX_PINCFG(202, smb0c, I2CSEGSEL, 1, fm0, MFSEL6, 16, none, NONE, 0, none, NONE, 0, none, NONE, 0, SLEW),
NPCM8XX_PINCFG(203, faninx, MFSEL3, 3, spi1cs0, MFSEL3, 4, fm1, MFSEL6, 17, none, NONE, 0, none, NONE, 0, DSTR(8, 12)),
NPCM8XX_PINCFG(208, rg2, MFSEL4, 24, ddr, MFSEL3, 26, none, NONE, 0, none, NONE, 0, none, NONE, 0, SLEW), /* DSCNT */
NPCM8XX_PINCFG(209, rg2, MFSEL4, 24, ddr, MFSEL3, 26, rmii3, MFSEL5, 11, none, NONE, 0, none, NONE, 0, SLEW), /* DSCNT */
@@ -1553,10 +1548,10 @@ static const struct npcm8xx_pincfg pincfg[] = {
NPCM8XX_PINCFG(226, none, NONE, 0, none, NONE, 0, none, NONE, 0, none, NONE, 0, none, NONE, 0, GPO | DSTR(8, 12) | SLEW),
NPCM8XX_PINCFG(227, spix, MFSEL4, 27, fm2, MFSEL6, 18, none, NONE, 0, none, NONE, 0, none, NONE, 0, DSTR(8, 12) | SLEW),
NPCM8XX_PINCFG(228, spixcs1, MFSEL4, 28, fm2, MFSEL6, 18, none, NONE, 0, none, NONE, 0, none, NONE, 0, DSTR(8, 12) | SLEW),
- NPCM8XX_PINCFG(229, spix, MFSEL4, 27, fm2, MFSEL6, 18, none, NONE, 0, none, NONE, 0, none, NONE, 0, DSTR(8, 12) | SLEW),
- NPCM8XX_PINCFG(230, spix, MFSEL4, 27, fm2, MFSEL6, 18, none, NONE, 0, none, NONE, 0, none, NONE, 0, DSTR(8, 12) | SLEW),
+ NPCM8XX_PINCFG(229, spix, MFSEL4, 27, fm2, MFSEL6, 18, none, NONE, 0, none, NONE, 0, none, NONE, 0, GPO | DSTR(8, 12) | SLEW),
+ NPCM8XX_PINCFG(230, spix, MFSEL4, 27, fm2, MFSEL6, 18, none, NONE, 0, none, NONE, 0, none, NONE, 0, GPO | DSTR(8, 12) | SLEW),
NPCM8XX_PINCFG(231, clkreq, MFSEL4, 9, none, NONE, 0, none, NONE, 0, none, NONE, 0, none, NONE, 0, DSTR(4, 12) | SLEW),
- NPCM8XX_PINCFG(233, spi1cs1, MFSEL5, 0, fm1, MFSEL6, 17, none, NONE, 0, none, NONE, 0, none, NONE, 0, SLEWLPC), /* slewlpc ? */
+ NPCM8XX_PINCFG(233, spi1cs1, MFSEL5, 0, fm1, MFSEL6, 17, none, NONE, 0, none, NONE, 0, none, NONE, 0, 0), /* slewlpc ? */
NPCM8XX_PINCFG(234, pwm10, MFSEL6, 13, smb20, MFSEL5, 28, none, NONE, 0, none, NONE, 0, none, NONE, 0, 0),
NPCM8XX_PINCFG(235, pwm11, MFSEL6, 14, smb20, MFSEL5, 28, none, NONE, 0, none, NONE, 0, none, NONE, 0, SLEW),
NPCM8XX_PINCFG(240, i3c0, MFSEL5, 17, none, NONE, 0, none, NONE, 0, none, NONE, 0, none, NONE, 0, SLEW),
@@ -1567,7 +1562,8 @@ static const struct npcm8xx_pincfg pincfg[] = {
NPCM8XX_PINCFG(245, i3c2, MFSEL5, 21, none, NONE, 0, none, NONE, 0, none, NONE, 0, none, NONE, 0, SLEW),
NPCM8XX_PINCFG(246, i3c3, MFSEL5, 23, none, NONE, 0, none, NONE, 0, none, NONE, 0, none, NONE, 0, SLEW),
NPCM8XX_PINCFG(247, i3c3, MFSEL5, 23, none, NONE, 0, none, NONE, 0, none, NONE, 0, none, NONE, 0, SLEW),
- NPCM8XX_PINCFG(251, jm2, MFSEL5, 1, none, NONE, 0, none, NONE, 0, none, NONE, 0, none, NONE, 0, SLEW),
+ NPCM8XX_PINCFG(250, ddr, MFSEL3, 26, none, NONE, 0, none, NONE, 0, none, NONE, 0, none, NONE, 0, DSTR(8, 12) | SLEW),
+ NPCM8XX_PINCFG(251, jm2, MFSEL5, 1, none, NONE, 0, none, NONE, 0, none, NONE, 0, none, NONE, 0, 0),
NPCM8XX_PINCFG(253, none, NONE, 0, none, NONE, 0, none, NONE, 0, none, NONE, 0, none, NONE, 0, GPI), /* SDHC1 power */
NPCM8XX_PINCFG(254, none, NONE, 0, none, NONE, 0, none, NONE, 0, none, NONE, 0, none, NONE, 0, GPI), /* SDHC2 power */
NPCM8XX_PINCFG(255, none, NONE, 0, none, NONE, 0, none, NONE, 0, none, NONE, 0, none, NONE, 0, GPI), /* DACOSEL */
@@ -1610,6 +1606,8 @@ static const struct pinctrl_pin_desc npcm8xx_pins[] = {
PINCTRL_PIN(32, "GPIO32/SMB14B_SCL/SPI0_nCS1"),
PINCTRL_PIN(33, "GPIO33/I3C4_SCL"),
PINCTRL_PIN(34, "GPIO34/I3C4_SDA"),
+ PINCTRL_PIN(35, "MCBPCK/GPI35_AHB2PCI_DIS"),
+ PINCTRL_PIN(36, "SYSBPCK/GPI36"),
PINCTRL_PIN(37, "GPIO37/SMB3C_SDA/SMB23_SDA"),
PINCTRL_PIN(38, "GPIO38/SMB3C_SCL/SMB23_SCL"),
PINCTRL_PIN(39, "GPIO39/SMB3B_SDA/SMB22_SDA"),
@@ -2044,7 +2042,7 @@ static int npcm8xx_gpio_request_enable(struct pinctrl_dev *pctldev,
const unsigned int *pin = &offset;
int mode = fn_gpio;
- if (pin[0] >= 183 && pin[0] <= 189)
+ if ((pin[0] >= 183 && pin[0] <= 189) || pin[0] == 35 || pin[0] == 36)
mode = pincfg[pin[0]].fn0;
npcm8xx_setfunc(npcm->gcr_regmap, &offset, 1, mode);
diff --git a/drivers/pinctrl/nxp/pinctrl-s32cc.c b/drivers/pinctrl/nxp/pinctrl-s32cc.c
index f2609a35c312..501eb296c760 100644
--- a/drivers/pinctrl/nxp/pinctrl-s32cc.c
+++ b/drivers/pinctrl/nxp/pinctrl-s32cc.c
@@ -2,7 +2,7 @@
/*
* Core driver for the S32 CC (Common Chassis) pin controller
*
- * Copyright 2017-2022 NXP
+ * Copyright 2017-2022,2024 NXP
* Copyright (C) 2022 SUSE LLC
* Copyright 2015-2016 Freescale Semiconductor, Inc.
*/
@@ -39,6 +39,11 @@
#define S32_MSCR_ODE BIT(20)
#define S32_MSCR_OBE BIT(21)
+enum s32_write_type {
+ S32_PINCONF_UPDATE_ONLY,
+ S32_PINCONF_OVERWRITE,
+};
+
static struct regmap_config s32_regmap_config = {
.reg_bits = 32,
.val_bits = 32,
@@ -431,16 +436,15 @@ static int s32_pmx_gpio_set_direction(struct pinctrl_dev *pctldev,
unsigned int offset,
bool input)
{
- unsigned int config;
+ /* Always enable IBE for GPIOs. This allows us to read the
+ * actual line value and compare it with the one set.
+ */
+ unsigned int config = S32_MSCR_IBE;
unsigned int mask = S32_MSCR_IBE | S32_MSCR_OBE;
- if (input) {
- /* Disable output buffer and enable input buffer */
- config = S32_MSCR_IBE;
- } else {
- /* Disable input buffer and enable output buffer */
- config = S32_MSCR_OBE;
- }
+ /* Enable output buffer */
+ if (!input)
+ config |= S32_MSCR_OBE;
return s32_regmap_update(pctldev, offset, mask, config);
}
@@ -511,6 +515,10 @@ static int s32_parse_pincfg(unsigned long pincfg, unsigned int *mask,
*config |= S32_MSCR_ODE;
*mask |= S32_MSCR_ODE;
break;
+ case PIN_CONFIG_DRIVE_PUSH_PULL:
+ *config &= ~S32_MSCR_ODE;
+ *mask |= S32_MSCR_ODE;
+ break;
case PIN_CONFIG_OUTPUT_ENABLE:
if (arg)
*config |= S32_MSCR_OBE;
@@ -549,10 +557,11 @@ static int s32_parse_pincfg(unsigned long pincfg, unsigned int *mask,
return 0;
}
-static int s32_pinconf_mscr_update(struct pinctrl_dev *pctldev,
+static int s32_pinconf_mscr_write(struct pinctrl_dev *pctldev,
unsigned int pin_id,
unsigned long *configs,
- unsigned int num_configs)
+ unsigned int num_configs,
+ enum s32_write_type write_type)
{
struct s32_pinctrl *ipctl = pinctrl_dev_get_drvdata(pctldev);
unsigned int config = 0, mask = 0;
@@ -571,10 +580,20 @@ static int s32_pinconf_mscr_update(struct pinctrl_dev *pctldev,
return ret;
}
+ /* If the MSCR configuration has to be written,
+ * the SSS field should not be touched.
+ */
+ if (write_type == S32_PINCONF_OVERWRITE)
+ mask = (unsigned int)~S32_MSCR_SSS_MASK;
+
if (!config && !mask)
return 0;
- dev_dbg(ipctl->dev, "update: pin %u cfg 0x%x\n", pin_id, config);
+ if (write_type == S32_PINCONF_OVERWRITE)
+ dev_dbg(ipctl->dev, "set: pin %u cfg 0x%x\n", pin_id, config);
+ else
+ dev_dbg(ipctl->dev, "update: pin %u cfg 0x%x\n", pin_id,
+ config);
return s32_regmap_update(pctldev, pin_id, mask, config);
}
@@ -590,8 +609,8 @@ static int s32_pinconf_set(struct pinctrl_dev *pctldev,
unsigned int pin_id, unsigned long *configs,
unsigned int num_configs)
{
- return s32_pinconf_mscr_update(pctldev, pin_id, configs,
- num_configs);
+ return s32_pinconf_mscr_write(pctldev, pin_id, configs,
+ num_configs, S32_PINCONF_UPDATE_ONLY);
}
static int s32_pconf_group_set(struct pinctrl_dev *pctldev, unsigned int selector,
@@ -604,8 +623,8 @@ static int s32_pconf_group_set(struct pinctrl_dev *pctldev, unsigned int selecto
grp = &info->groups[selector];
for (i = 0; i < grp->data.npins; i++) {
- ret = s32_pinconf_mscr_update(pctldev, grp->data.pins[i],
- configs, num_configs);
+ ret = s32_pinconf_mscr_write(pctldev, grp->data.pins[i],
+ configs, num_configs, S32_PINCONF_OVERWRITE);
if (ret)
return ret;
}
diff --git a/drivers/pinctrl/pinconf-generic.c b/drivers/pinctrl/pinconf-generic.c
index a499b8af5c1f..0b13d7f17b32 100644
--- a/drivers/pinctrl/pinconf-generic.c
+++ b/drivers/pinctrl/pinconf-generic.c
@@ -44,6 +44,7 @@ static const struct pin_config_item conf_items[] = {
PCONFDUMP(PIN_CONFIG_INPUT_DEBOUNCE, "input debounce", "usec", true),
PCONFDUMP(PIN_CONFIG_INPUT_ENABLE, "input enabled", NULL, false),
PCONFDUMP(PIN_CONFIG_INPUT_SCHMITT, "input schmitt trigger", NULL, false),
+ PCONFDUMP(PIN_CONFIG_INPUT_SCHMITT_UV, "input schmitt threshold", "uV", true),
PCONFDUMP(PIN_CONFIG_INPUT_SCHMITT_ENABLE, "input schmitt enabled", NULL, false),
PCONFDUMP(PIN_CONFIG_MODE_LOW_POWER, "pin low power", "mode", true),
PCONFDUMP(PIN_CONFIG_OUTPUT_ENABLE, "output enabled", NULL, false),
@@ -177,6 +178,7 @@ static const struct pinconf_generic_params dt_params[] = {
{ "input-schmitt", PIN_CONFIG_INPUT_SCHMITT, 0 },
{ "input-schmitt-disable", PIN_CONFIG_INPUT_SCHMITT_ENABLE, 0 },
{ "input-schmitt-enable", PIN_CONFIG_INPUT_SCHMITT_ENABLE, 1 },
+ { "input-schmitt-microvolts", PIN_CONFIG_INPUT_SCHMITT_UV, 0 },
{ "low-power-disable", PIN_CONFIG_MODE_LOW_POWER, 0 },
{ "low-power-enable", PIN_CONFIG_MODE_LOW_POWER, 1 },
{ "output-disable", PIN_CONFIG_OUTPUT_ENABLE, 0 },
diff --git a/drivers/pinctrl/pinctrl-ep93xx.c b/drivers/pinctrl/pinctrl-ep93xx.c
new file mode 100644
index 000000000000..abafbbb8fd6a
--- /dev/null
+++ b/drivers/pinctrl/pinctrl-ep93xx.c
@@ -0,0 +1,1434 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Driver for the EP93xx pin controller
+ * based on linux/drivers/pinctrl/pinmux-gemini.c
+ *
+ * Copyright (C) 2022 Nikita Shubin <nikita.shubin@maquefel.me>
+ *
+ * This is a group-only pin controller.
+ */
+#include <linux/array_size.h>
+#include <linux/err.h>
+#include <linux/init.h>
+#include <linux/io.h>
+#include <linux/mfd/syscon.h>
+#include <linux/property.h>
+#include <linux/regmap.h>
+#include <linux/seq_file.h>
+#include <linux/slab.h>
+
+#include <linux/soc/cirrus/ep93xx.h>
+
+#include <linux/pinctrl/machine.h>
+#include <linux/pinctrl/pinconf-generic.h>
+#include <linux/pinctrl/pinconf.h>
+#include <linux/pinctrl/pinctrl.h>
+#include <linux/pinctrl/pinmux.h>
+
+#include "pinctrl-utils.h"
+
+#define DRIVER_NAME "pinctrl-ep93xx"
+
+enum ep93xx_pinctrl_model {
+ EP93XX_9301_PINCTRL,
+ EP93XX_9307_PINCTRL,
+ EP93XX_9312_PINCTRL,
+};
+
+struct ep93xx_pmx {
+ struct device *dev;
+ struct pinctrl_dev *pctl;
+ struct ep93xx_regmap_adev *aux_dev;
+ struct regmap *map;
+ enum ep93xx_pinctrl_model model;
+};
+
+static void ep93xx_pinctrl_update_bits(struct ep93xx_pmx *pmx, unsigned int reg,
+ unsigned int mask, unsigned int val)
+{
+ struct ep93xx_regmap_adev *aux = pmx->aux_dev;
+
+ aux->update_bits(aux->map, aux->lock, reg, mask, val);
+}
+
+struct ep93xx_pin_group {
+ struct pingroup grp;
+ u32 mask;
+ u32 value;
+};
+
+#define PMX_GROUP(_name, _pins, _mask, _value) \
+ { \
+ .grp = PINCTRL_PINGROUP(_name, _pins, ARRAY_SIZE(_pins)), \
+ .mask = _mask, \
+ .value = _value, \
+ }
+
+#define EP93XX_SYSCON_DEVCFG 0x80
+
+/*
+ * There are several system configuration options selectable by the DeviceCfg and SysCfg
+ * registers. These registers provide the selection of several pin multiplexing options and also
+ * provide software access to the system reset configuration options. Please refer to the
+ * descriptions of the registers, “DeviceCfg” on page 5-25 and “SysCfg” on page 5-34, for a
+ * detailed explanation.
+ */
+#define EP93XX_SYSCON_DEVCFG_D1ONG BIT(30)
+#define EP93XX_SYSCON_DEVCFG_D0ONG BIT(29)
+#define EP93XX_SYSCON_DEVCFG_IONU2 BIT(28)
+#define EP93XX_SYSCON_DEVCFG_GONK BIT(27)
+#define EP93XX_SYSCON_DEVCFG_TONG BIT(26)
+#define EP93XX_SYSCON_DEVCFG_MONG BIT(25)
+#define EP93XX_SYSCON_DEVCFG_A2ONG BIT(22)
+#define EP93XX_SYSCON_DEVCFG_A1ONG BIT(21)
+#define EP93XX_SYSCON_DEVCFG_HONIDE BIT(11)
+#define EP93XX_SYSCON_DEVCFG_GONIDE BIT(10)
+#define EP93XX_SYSCON_DEVCFG_PONG BIT(9)
+#define EP93XX_SYSCON_DEVCFG_EONIDE BIT(8)
+#define EP93XX_SYSCON_DEVCFG_I2SONSSP BIT(7)
+#define EP93XX_SYSCON_DEVCFG_I2SONAC97 BIT(6)
+#define EP93XX_SYSCON_DEVCFG_RASONP3 BIT(4)
+
+#define PADS_MASK (GENMASK(30, 25) | BIT(22) | BIT(21) | GENMASK(11, 6) | BIT(4))
+#define PADS_MAXBIT 30
+
+/* Ordered by bit index */
+static const char * const ep93xx_padgroups[] = {
+ NULL, NULL, NULL, NULL,
+ "RasOnP3",
+ NULL,
+ "I2SonAC97",
+ "I2SonSSP",
+ "EonIDE",
+ "PonG",
+ "GonIDE",
+ "HonIDE",
+ NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
+ "A1onG",
+ "A2onG",
+ NULL, NULL,
+ "MonG",
+ "TonG",
+ "GonK",
+ "IonU2",
+ "D0onG",
+ "D1onG",
+};
+
+/* ep9301, ep9302 */
+static const struct pinctrl_pin_desc ep9301_pins[] = {
+ PINCTRL_PIN(1, "CSn[7]"),
+ PINCTRL_PIN(2, "CSn[6]"),
+ PINCTRL_PIN(3, "CSn[3]"),
+ PINCTRL_PIN(4, "CSn[2]"),
+ PINCTRL_PIN(5, "CSn[1]"),
+ PINCTRL_PIN(6, "AD[25]"),
+ PINCTRL_PIN(7, "vdd_ring"),
+ PINCTRL_PIN(8, "gnd_ring"),
+ PINCTRL_PIN(9, "AD[24]"),
+ PINCTRL_PIN(10, "SDCLK"),
+ PINCTRL_PIN(11, "AD[23]"),
+ PINCTRL_PIN(12, "vdd_core"),
+ PINCTRL_PIN(13, "gnd_core"),
+ PINCTRL_PIN(14, "SDWEn"),
+ PINCTRL_PIN(15, "SDCSn[3]"),
+ PINCTRL_PIN(16, "SDCSn[2]"),
+ PINCTRL_PIN(17, "SDCSn[1]"),
+ PINCTRL_PIN(18, "SDCSn[0]"),
+ PINCTRL_PIN(19, "vdd_ring"),
+ PINCTRL_PIN(20, "gnd_ring"),
+ PINCTRL_PIN(21, "RASn"),
+ PINCTRL_PIN(22, "CASn"),
+ PINCTRL_PIN(23, "DQMn[1]"),
+ PINCTRL_PIN(24, "DQMn[0]"),
+ PINCTRL_PIN(25, "AD[22]"),
+ PINCTRL_PIN(26, "AD[21]"),
+ PINCTRL_PIN(27, "vdd_ring"),
+ PINCTRL_PIN(28, "gnd_ring"),
+ PINCTRL_PIN(29, "DA[15]"),
+ PINCTRL_PIN(30, "AD[7]"),
+ PINCTRL_PIN(31, "DA[14]"),
+ PINCTRL_PIN(32, "AD[6]"),
+ PINCTRL_PIN(33, "DA[13]"),
+ PINCTRL_PIN(34, "vdd_core"),
+ PINCTRL_PIN(35, "gnd_core"),
+ PINCTRL_PIN(36, "AD[5]"),
+ PINCTRL_PIN(37, "DA[12]"),
+ PINCTRL_PIN(38, "AD[4]"),
+ PINCTRL_PIN(39, "DA[11]"),
+ PINCTRL_PIN(40, "AD[3]"),
+ PINCTRL_PIN(41, "vdd_ring"),
+ PINCTRL_PIN(42, "gnd_ring"),
+ PINCTRL_PIN(43, "DA[10]"),
+ PINCTRL_PIN(44, "AD[2]"),
+ PINCTRL_PIN(45, "DA[9]"),
+ PINCTRL_PIN(46, "AD[1]"),
+ PINCTRL_PIN(47, "DA[8]"),
+ PINCTRL_PIN(48, "AD[0]"),
+ PINCTRL_PIN(49, "vdd_ring"),
+ PINCTRL_PIN(50, "gnd_ring"),
+ PINCTRL_PIN(51, "NC"),
+ PINCTRL_PIN(52, "NC"),
+ PINCTRL_PIN(53, "vdd_ring"),
+ PINCTRL_PIN(54, "gnd_ring"),
+ PINCTRL_PIN(55, "AD[15]"),
+ PINCTRL_PIN(56, "DA[7]"),
+ PINCTRL_PIN(57, "vdd_core"),
+ PINCTRL_PIN(58, "gnd_core"),
+ PINCTRL_PIN(59, "AD[14]"),
+ PINCTRL_PIN(60, "DA[6]"),
+ PINCTRL_PIN(61, "AD[13]"),
+ PINCTRL_PIN(62, "DA[5]"),
+ PINCTRL_PIN(63, "AD[12]"),
+ PINCTRL_PIN(64, "DA[4]"),
+ PINCTRL_PIN(65, "AD[11]"),
+ PINCTRL_PIN(66, "vdd_ring"),
+ PINCTRL_PIN(67, "gnd_ring"),
+ PINCTRL_PIN(68, "DA[3]"),
+ PINCTRL_PIN(69, "AD[10]"),
+ PINCTRL_PIN(70, "DA[2]"),
+ PINCTRL_PIN(71, "AD[9]"),
+ PINCTRL_PIN(72, "DA[1]"),
+ PINCTRL_PIN(73, "AD[8]"),
+ PINCTRL_PIN(74, "DA[0]"),
+ PINCTRL_PIN(75, "DSRn"),
+ PINCTRL_PIN(76, "DTRn"),
+ PINCTRL_PIN(77, "TCK"),
+ PINCTRL_PIN(78, "TDI"),
+ PINCTRL_PIN(79, "TDO"),
+ PINCTRL_PIN(80, "TMS"),
+ PINCTRL_PIN(81, "vdd_ring"),
+ PINCTRL_PIN(82, "gnd_ring"),
+ PINCTRL_PIN(83, "BOOT[1]"),
+ PINCTRL_PIN(84, "BOOT[0]"),
+ PINCTRL_PIN(85, "gnd_ring"),
+ PINCTRL_PIN(86, "NC"),
+ PINCTRL_PIN(87, "EECLK"),
+ PINCTRL_PIN(88, "EEDAT"),
+ PINCTRL_PIN(89, "ASYNC"),
+ PINCTRL_PIN(90, "vdd_core"),
+ PINCTRL_PIN(91, "gnd_core"),
+ PINCTRL_PIN(92, "ASDO"),
+ PINCTRL_PIN(93, "SCLK1"),
+ PINCTRL_PIN(94, "SFRM1"),
+ PINCTRL_PIN(95, "SSPRX1"),
+ PINCTRL_PIN(96, "SSPTX1"),
+ PINCTRL_PIN(97, "GRLED"),
+ PINCTRL_PIN(98, "RDLED"),
+ PINCTRL_PIN(99, "vdd_ring"),
+ PINCTRL_PIN(100, "gnd_ring"),
+ PINCTRL_PIN(101, "INT[3]"),
+ PINCTRL_PIN(102, "INT[1]"),
+ PINCTRL_PIN(103, "INT[0]"),
+ PINCTRL_PIN(104, "RTSn"),
+ PINCTRL_PIN(105, "USBm[0]"),
+ PINCTRL_PIN(106, "USBp[0]"),
+ PINCTRL_PIN(107, "ABITCLK"),
+ PINCTRL_PIN(108, "CTSn"),
+ PINCTRL_PIN(109, "RXD[0]"),
+ PINCTRL_PIN(110, "RXD[1]"),
+ PINCTRL_PIN(111, "vdd_ring"),
+ PINCTRL_PIN(112, "gnd_ring"),
+ PINCTRL_PIN(113, "TXD[0]"),
+ PINCTRL_PIN(114, "TXD[1]"),
+ PINCTRL_PIN(115, "CGPIO[0]"),
+ PINCTRL_PIN(116, "gnd_core"),
+ PINCTRL_PIN(117, "PLL_GND"),
+ PINCTRL_PIN(118, "XTALI"),
+ PINCTRL_PIN(119, "XTALO"),
+ PINCTRL_PIN(120, "PLL_VDD"),
+ PINCTRL_PIN(121, "vdd_core"),
+ PINCTRL_PIN(122, "gnd_ring"),
+ PINCTRL_PIN(123, "vdd_ring"),
+ PINCTRL_PIN(124, "RSTOn"),
+ PINCTRL_PIN(125, "PRSTn"),
+ PINCTRL_PIN(126, "CSn[0]"),
+ PINCTRL_PIN(127, "gnd_core"),
+ PINCTRL_PIN(128, "vdd_core"),
+ PINCTRL_PIN(129, "gnd_ring"),
+ PINCTRL_PIN(130, "vdd_ring"),
+ PINCTRL_PIN(131, "ADC[4]"),
+ PINCTRL_PIN(132, "ADC[3]"),
+ PINCTRL_PIN(133, "ADC[2]"),
+ PINCTRL_PIN(134, "ADC[1]"),
+ PINCTRL_PIN(135, "ADC[0]"),
+ PINCTRL_PIN(136, "ADC_VDD"),
+ PINCTRL_PIN(137, "RTCXTALI"),
+ PINCTRL_PIN(138, "RTCXTALO"),
+ PINCTRL_PIN(139, "ADC_GND"),
+ PINCTRL_PIN(140, "EGPIO[11]"),
+ PINCTRL_PIN(141, "EGPIO[10]"),
+ PINCTRL_PIN(142, "EGPIO[9]"),
+ PINCTRL_PIN(143, "EGPIO[8]"),
+ PINCTRL_PIN(144, "EGPIO[7]"),
+ PINCTRL_PIN(145, "EGPIO[6]"),
+ PINCTRL_PIN(146, "EGPIO[5]"),
+ PINCTRL_PIN(147, "EGPIO[4]"),
+ PINCTRL_PIN(148, "EGPIO[3]"),
+ PINCTRL_PIN(149, "gnd_ring"),
+ PINCTRL_PIN(150, "vdd_ring"),
+ PINCTRL_PIN(151, "EGPIO[2]"),
+ PINCTRL_PIN(152, "EGPIO[1]"),
+ PINCTRL_PIN(153, "EGPIO[0]"),
+ PINCTRL_PIN(154, "ARSTn"),
+ PINCTRL_PIN(155, "TRSTn"),
+ PINCTRL_PIN(156, "ASDI"),
+ PINCTRL_PIN(157, "USBm[2]"),
+ PINCTRL_PIN(158, "USBp[2]"),
+ PINCTRL_PIN(159, "WAITn"),
+ PINCTRL_PIN(160, "EGPIO[15]"),
+ PINCTRL_PIN(161, "gnd_ring"),
+ PINCTRL_PIN(162, "vdd_ring"),
+ PINCTRL_PIN(163, "EGPIO[14]"),
+ PINCTRL_PIN(164, "EGPIO[13]"),
+ PINCTRL_PIN(165, "EGPIO[12]"),
+ PINCTRL_PIN(166, "gnd_core"),
+ PINCTRL_PIN(167, "vdd_core"),
+ PINCTRL_PIN(168, "FGPIO[3]"),
+ PINCTRL_PIN(169, "FGPIO[2]"),
+ PINCTRL_PIN(170, "FGPIO[1]"),
+ PINCTRL_PIN(171, "gnd_ring"),
+ PINCTRL_PIN(172, "vdd_ring"),
+ PINCTRL_PIN(173, "CLD"),
+ PINCTRL_PIN(174, "CRS"),
+ PINCTRL_PIN(175, "TXERR"),
+ PINCTRL_PIN(176, "TXEN"),
+ PINCTRL_PIN(177, "MIITXD[0]"),
+ PINCTRL_PIN(178, "MIITXD[1]"),
+ PINCTRL_PIN(179, "MIITXD[2]"),
+ PINCTRL_PIN(180, "MIITXD[3]"),
+ PINCTRL_PIN(181, "TXCLK"),
+ PINCTRL_PIN(182, "RXERR"),
+ PINCTRL_PIN(183, "RXDVAL"),
+ PINCTRL_PIN(184, "MIIRXD[0]"),
+ PINCTRL_PIN(185, "MIIRXD[1]"),
+ PINCTRL_PIN(186, "MIIRXD[2]"),
+ PINCTRL_PIN(187, "gnd_ring"),
+ PINCTRL_PIN(188, "vdd_ring"),
+ PINCTRL_PIN(189, "MIIRXD[3]"),
+ PINCTRL_PIN(190, "RXCLK"),
+ PINCTRL_PIN(191, "MDIO"),
+ PINCTRL_PIN(192, "MDC"),
+ PINCTRL_PIN(193, "RDn"),
+ PINCTRL_PIN(194, "WRn"),
+ PINCTRL_PIN(195, "AD[16]"),
+ PINCTRL_PIN(196, "AD[17]"),
+ PINCTRL_PIN(197, "gnd_core"),
+ PINCTRL_PIN(198, "vdd_core"),
+ PINCTRL_PIN(199, "HGPIO[2]"),
+ PINCTRL_PIN(200, "HGPIO[3]"),
+ PINCTRL_PIN(201, "HGPIO[4]"),
+ PINCTRL_PIN(202, "HGPIO[5]"),
+ PINCTRL_PIN(203, "gnd_ring"),
+ PINCTRL_PIN(204, "vdd_ring"),
+ PINCTRL_PIN(205, "AD[18]"),
+ PINCTRL_PIN(206, "AD[19]"),
+ PINCTRL_PIN(207, "AD[20]"),
+ PINCTRL_PIN(208, "SDCLKEN"),
+};
+
+static const unsigned int ssp_ep9301_pins[] = {
+ 93, 94, 95, 96,
+};
+
+static const unsigned int ac97_ep9301_pins[] = {
+ 89, 92, 107, 154, 156,
+};
+
+/*
+ * Note: The EP9307 processor has one PWM with one output, PWMOUT.
+ * Note: The EP9301, EP9302, EP9312, and EP9315 processors each have two PWMs with
+ * two outputs, PWMOUT and PWMO1. PWMO1 is an alternate function for EGPIO14.
+ */
+/* The GPIO14E (14) pin overlap with pwm1 */
+static const unsigned int pwm_9301_pins[] = { 163 };
+
+static const unsigned int gpio1a_9301_pins[] = { 163 };
+
+/* ep9301/9302 have only 0 pin of GPIO C Port exposed */
+static const unsigned int gpio2a_9301_pins[] = { 115 };
+
+/* ep9301/9302 have only 4,5 pin of GPIO E Port exposed */
+static const unsigned int gpio4a_9301_pins[] = { 97, 98 };
+
+/* ep9301/9302 have only 4,5 pin of GPIO G Port exposed */
+static const unsigned int gpio6a_9301_pins[] = { 87, 88 };
+
+static const unsigned int gpio7a_9301_pins[] = { 199, 200, 201, 202 };
+
+/* Groups for the ep9301/ep9302 SoC/package */
+static const struct ep93xx_pin_group ep9301_pin_groups[] = {
+ PMX_GROUP("ssp", ssp_ep9301_pins, EP93XX_SYSCON_DEVCFG_I2SONSSP, 0),
+ PMX_GROUP("i2s_on_ssp", ssp_ep9301_pins, EP93XX_SYSCON_DEVCFG_I2SONSSP,
+ EP93XX_SYSCON_DEVCFG_I2SONSSP),
+ PMX_GROUP("ac97", ac97_ep9301_pins, EP93XX_SYSCON_DEVCFG_I2SONAC97, 0),
+ PMX_GROUP("i2s_on_ac97", ac97_ep9301_pins, EP93XX_SYSCON_DEVCFG_I2SONAC97,
+ EP93XX_SYSCON_DEVCFG_I2SONAC97),
+ PMX_GROUP("pwm1", pwm_9301_pins, EP93XX_SYSCON_DEVCFG_PONG, EP93XX_SYSCON_DEVCFG_PONG),
+ PMX_GROUP("gpio1agrp", gpio1a_9301_pins, EP93XX_SYSCON_DEVCFG_PONG, 0),
+ PMX_GROUP("gpio2agrp", gpio2a_9301_pins, EP93XX_SYSCON_DEVCFG_GONK,
+ EP93XX_SYSCON_DEVCFG_GONK),
+ PMX_GROUP("gpio4agrp", gpio4a_9301_pins, EP93XX_SYSCON_DEVCFG_EONIDE,
+ EP93XX_SYSCON_DEVCFG_EONIDE),
+ PMX_GROUP("gpio6agrp", gpio6a_9301_pins, EP93XX_SYSCON_DEVCFG_GONIDE,
+ EP93XX_SYSCON_DEVCFG_GONIDE),
+ PMX_GROUP("gpio7agrp", gpio7a_9301_pins, EP93XX_SYSCON_DEVCFG_HONIDE,
+ EP93XX_SYSCON_DEVCFG_HONIDE),
+};
+
+static const struct pinctrl_pin_desc ep9307_pins[] = {
+ /* Row A */
+ PINCTRL_PIN(0, "CSn[1]"), /* A1 */
+ PINCTRL_PIN(1, "CSn[7]"), /* A2 */
+ PINCTRL_PIN(2, "SDCLKEN"), /* A3 */
+ PINCTRL_PIN(3, "DA[31]"), /* A4 */
+ PINCTRL_PIN(4, "DA[29]"), /* A5 */
+ PINCTRL_PIN(5, "DA[27]"), /* A6 */
+ PINCTRL_PIN(6, "HGPIO[2]"), /* A7 */
+ PINCTRL_PIN(7, "RDn"), /* A8 */
+ PINCTRL_PIN(8, "MIIRXD[3]"), /* A9 */
+ PINCTRL_PIN(9, "RXDVAL"), /* A10 */
+ PINCTRL_PIN(10, "MIITXD[1]"), /* A11 */
+ PINCTRL_PIN(11, "CRS"), /* A12 */
+ PINCTRL_PIN(12, "FGPIO[7]"), /* A13 */
+ PINCTRL_PIN(13, "FGPIO[0]"), /* A14 */
+ PINCTRL_PIN(14, "WAITn"), /* A15 */
+ PINCTRL_PIN(15, "USBm[2]"), /* A16 */
+ PINCTRL_PIN(16, "ASDI"), /* A17 */
+ /* Row B */
+ PINCTRL_PIN(17, "AD[25]"), /* B1 */
+ PINCTRL_PIN(18, "CSn[2]"), /* B2 */
+ PINCTRL_PIN(19, "CSn[6]"), /* B3 */
+ PINCTRL_PIN(20, "AD[20]"), /* B4 */
+ PINCTRL_PIN(21, "DA[30]"), /* B5 */
+ PINCTRL_PIN(22, "AD[18]"), /* B6 */
+ PINCTRL_PIN(23, "HGPIO[3]"), /* B7 */
+ PINCTRL_PIN(24, "AD[17]"), /* B8 */
+ PINCTRL_PIN(25, "RXCLK"), /* B9 */
+ PINCTRL_PIN(26, "MIIRXD[1]"), /* B10 */
+ PINCTRL_PIN(27, "MIITXD[2]"), /* B11 */
+ PINCTRL_PIN(28, "TXEN"), /* B12 */
+ PINCTRL_PIN(29, "FGPIO[5]"), /* B13 */
+ PINCTRL_PIN(30, "EGPIO[15]"), /* B14 */
+ PINCTRL_PIN(31, "USBp[2]"), /* B15 */
+ PINCTRL_PIN(32, "ARSTn"), /* B16 */
+ PINCTRL_PIN(33, "ADC_VDD"), /* B17 */
+ /* Row C */
+ PINCTRL_PIN(34, "AD[23]"), /* C1 */
+ PINCTRL_PIN(35, "DA[26]"), /* C2 */
+ PINCTRL_PIN(36, "CSn[3]"), /* C3 */
+ PINCTRL_PIN(37, "DA[25]"), /* C4 */
+ PINCTRL_PIN(38, "AD[24]"), /* C5 */
+ PINCTRL_PIN(39, "AD[19]"), /* C6 */
+ PINCTRL_PIN(40, "HGPIO[5]"), /* C7 */
+ PINCTRL_PIN(41, "WRn"), /* C8 */
+ PINCTRL_PIN(42, "MDIO"), /* C9 */
+ PINCTRL_PIN(43, "MIIRXD[2]"), /* C10 */
+ PINCTRL_PIN(44, "TXCLK"), /* C11 */
+ PINCTRL_PIN(45, "MIITXD[0]"), /* C12 */
+ PINCTRL_PIN(46, "CLD"), /* C13 */
+ PINCTRL_PIN(47, "EGPIO[13]"), /* C14 */
+ PINCTRL_PIN(48, "TRSTn"), /* C15 */
+ PINCTRL_PIN(49, "Xp"), /* C16 */
+ PINCTRL_PIN(50, "Xm"), /* C17 */
+ /* Row D */
+ PINCTRL_PIN(51, "SDCSn[3]"), /* D1 */
+ PINCTRL_PIN(52, "DA[23]"), /* D2 */
+ PINCTRL_PIN(53, "SDCLK"), /* D3 */
+ PINCTRL_PIN(54, "DA[24]"), /* D4 */
+ PINCTRL_PIN(55, "HGPIO[7]"), /* D5 */
+ PINCTRL_PIN(56, "HGPIO[6]"), /* D6 */
+ PINCTRL_PIN(57, "A[28]"), /* D7 */
+ PINCTRL_PIN(58, "HGPIO[4]"), /* D8 */
+ PINCTRL_PIN(59, "AD[16]"), /* D9 */
+ PINCTRL_PIN(60, "MDC"), /* D10 */
+ PINCTRL_PIN(61, "RXERR"), /* D11 */
+ PINCTRL_PIN(62, "MIITXD[3]"), /* D12 */
+ PINCTRL_PIN(63, "EGPIO[12]"), /* D13 */
+ PINCTRL_PIN(64, "EGPIO[1]"), /* D14 */
+ PINCTRL_PIN(65, "EGPIO[0]"), /* D15 */
+ PINCTRL_PIN(66, "Ym"), /* D16 */
+ PINCTRL_PIN(67, "Yp"), /* D17 */
+ /* Row E */
+ PINCTRL_PIN(68, "SDCSn[2]"), /* E1 */
+ PINCTRL_PIN(69, "SDWEN"), /* E2 */
+ PINCTRL_PIN(70, "DA[22]"), /* E3 */
+ PINCTRL_PIN(71, "AD[3]"), /* E4 */
+ PINCTRL_PIN(72, "DA[15]"), /* E5 */
+ PINCTRL_PIN(73, "AD[21]"), /* E6 */
+ PINCTRL_PIN(74, "DA[17]"), /* E7 */
+ PINCTRL_PIN(75, "vddr"), /* E8 */
+ PINCTRL_PIN(76, "vddr"), /* E9 */
+ PINCTRL_PIN(77, "vddr"), /* E10 */
+ PINCTRL_PIN(78, "MIIRXD[0]"), /* E11 */
+ PINCTRL_PIN(79, "TXERR"), /* E12 */
+ PINCTRL_PIN(80, "EGPIO[2]"), /* E13 */
+ PINCTRL_PIN(81, "EGPIO[4]"), /* E14 */
+ PINCTRL_PIN(82, "EGPIO[3]"), /* E15 */
+ PINCTRL_PIN(83, "sXp"), /* E16 */
+ PINCTRL_PIN(84, "sXm"), /* E17 */
+ /* Row F */
+ PINCTRL_PIN(85, "RASn"), /* F1 */
+ PINCTRL_PIN(86, "SDCSn[1]"), /* F2 */
+ PINCTRL_PIN(87, "SDCSn[0]"), /* F3 */
+ PINCTRL_PIN(88, "DQMn[3]"), /* F4 */
+ PINCTRL_PIN(89, "AD[5]"), /* F5 */
+ PINCTRL_PIN(90, "gndr"), /* F6 */
+ PINCTRL_PIN(91, "gndr"), /* F7 */
+ PINCTRL_PIN(92, "gndr"), /* F8 */
+ PINCTRL_PIN(93, "vddc"), /* F9 */
+ PINCTRL_PIN(94, "vddc"), /* F10 */
+ PINCTRL_PIN(95, "gndr"), /* F11 */
+ PINCTRL_PIN(96, "EGPIO[7]"), /* F12 */
+ PINCTRL_PIN(97, "EGPIO[5]"), /* F13 */
+ PINCTRL_PIN(98, "ADC GND"), /* F14 */
+ PINCTRL_PIN(99, "EGPIO[6]"), /* F15 */
+ PINCTRL_PIN(100, "sYm"), /* F16 */
+ PINCTRL_PIN(101, "syp"), /* F17 */
+ /* Row G */
+ PINCTRL_PIN(102, "DQMn[0]"), /* G1 */
+ PINCTRL_PIN(103, "CASn"), /* G2 */
+ PINCTRL_PIN(104, "DA[21]"), /* G3 */
+ PINCTRL_PIN(105, "AD[22]"), /* G4 */
+ PINCTRL_PIN(106, "vddr"), /* G5 */
+ PINCTRL_PIN(107, "gndr"), /* G6 */
+ PINCTRL_PIN(108, "gndr"), /* G12 */
+ PINCTRL_PIN(109, "EGPIO[9]"), /* G13 */
+ PINCTRL_PIN(110, "EGPIO[10]"), /* G14 */
+ PINCTRL_PIN(111, "EGPIO[11]"), /* G15 */
+ PINCTRL_PIN(112, "RTCXTALO"), /* G16 */
+ PINCTRL_PIN(113, "RTCXTALI"), /* G17 */
+ /* Row H */
+ PINCTRL_PIN(114, "DA[18]"), /* H1 */
+ PINCTRL_PIN(115, "DA[20]"), /* H2 */
+ PINCTRL_PIN(116, "DA[19]"), /* H3 */
+ PINCTRL_PIN(117, "DA[16]"), /* H4 */
+ PINCTRL_PIN(118, "vddr"), /* H5 */
+ PINCTRL_PIN(119, "vddc"), /* H6 */
+ PINCTRL_PIN(120, "gndc"), /* H7 */
+ PINCTRL_PIN(121, "gndc"), /* H9 */
+ PINCTRL_PIN(122, "gndc"), /* H10 */
+ PINCTRL_PIN(123, "gndr"), /* H12 */
+ PINCTRL_PIN(124, "vddr"), /* H13 */
+ PINCTRL_PIN(125, "EGPIO[8]"), /* H14 */
+ PINCTRL_PIN(126, "PRSTN"), /* H15 */
+ PINCTRL_PIN(127, "COL[7]"), /* H16 */
+ PINCTRL_PIN(128, "RSTON"), /* H17 */
+ /* Row J */
+ PINCTRL_PIN(129, "AD[6]"), /* J1 */
+ PINCTRL_PIN(130, "DA[14]"), /* J2 */
+ PINCTRL_PIN(131, "AD[7]"), /* J3 */
+ PINCTRL_PIN(132, "DA[13]"), /* J4 */
+ PINCTRL_PIN(133, "vddr"), /* J5 */
+ PINCTRL_PIN(134, "vddc"), /* J6 */
+ PINCTRL_PIN(135, "gndc"), /* J8 */
+ PINCTRL_PIN(136, "gndc"), /* J10 */
+ PINCTRL_PIN(137, "vddc"), /* J12 */
+ PINCTRL_PIN(138, "vddr"), /* J13 */
+ PINCTRL_PIN(139, "COL[5]"), /* J14 */
+ PINCTRL_PIN(140, "COL[6]"), /* J15 */
+ PINCTRL_PIN(141, "CSn[0]"), /* J16 */
+ PINCTRL_PIN(142, "COL[3]"), /* J17 */
+ /* Row K */
+ PINCTRL_PIN(143, "AD[4]"), /* K1 */
+ PINCTRL_PIN(144, "DA[12]"), /* K2 */
+ PINCTRL_PIN(145, "DA[10]"), /* K3 */
+ PINCTRL_PIN(146, "DA[11]"), /* K4 */
+ PINCTRL_PIN(147, "vddr"), /* K5 */
+ PINCTRL_PIN(148, "gndr"), /* K6 */
+ PINCTRL_PIN(149, "gndc"), /* K8 */
+ PINCTRL_PIN(150, "gndc"), /* K9 */
+ PINCTRL_PIN(151, "gndc"), /* K10 */
+ PINCTRL_PIN(152, "vddc"), /* K12 */
+ PINCTRL_PIN(153, "COL[4]"), /* K13 */
+ PINCTRL_PIN(154, "PLL_VDD"), /* K14 */
+ PINCTRL_PIN(155, "COL[2]"), /* K15 */
+ PINCTRL_PIN(156, "COL[1]"), /* K16 */
+ PINCTRL_PIN(157, "COL[0]"), /* K17 */
+ /* Row L */
+ PINCTRL_PIN(158, "DA[9]"), /* L1 */
+ PINCTRL_PIN(159, "AD[2]"), /* L2 */
+ PINCTRL_PIN(160, "AD[1]"), /* L3 */
+ PINCTRL_PIN(161, "DA[8]"), /* L4 */
+ PINCTRL_PIN(162, "BLANK"), /* L5 */
+ PINCTRL_PIN(163, "gndr"), /* L6 */
+ PINCTRL_PIN(164, "gndr"), /* L7 */
+ PINCTRL_PIN(165, "ROW[7]"), /* L8 */
+ PINCTRL_PIN(166, "ROW[5]"), /* L9 */
+ PINCTRL_PIN(167, "PLL GND"), /* L10 */
+ PINCTRL_PIN(168, "XTALI"), /* L11 */
+ PINCTRL_PIN(169, "XTALO"), /* L12 */
+ /* Row M */
+ PINCTRL_PIN(170, "BRIGHT"), /* M1 */
+ PINCTRL_PIN(171, "AD[0]"), /* M2 */
+ PINCTRL_PIN(172, "DQMn[1]"), /* M3 */
+ PINCTRL_PIN(173, "DQMn[2]"), /* M4 */
+ PINCTRL_PIN(174, "P[17]"), /* M5 */
+ PINCTRL_PIN(175, "gndr"), /* M6 */
+ PINCTRL_PIN(176, "gndr"), /* M7 */
+ PINCTRL_PIN(177, "vddc"), /* M8 */
+ PINCTRL_PIN(178, "vddc"), /* M9 */
+ PINCTRL_PIN(179, "gndr"), /* M10 */
+ PINCTRL_PIN(180, "gndr"), /* M11 */
+ PINCTRL_PIN(181, "ROW[6]"), /* M12 */
+ PINCTRL_PIN(182, "ROW[4]"), /* M13 */
+ PINCTRL_PIN(183, "ROW[1]"), /* M14 */
+ PINCTRL_PIN(184, "ROW[0]"), /* M15 */
+ PINCTRL_PIN(185, "ROW[3]"), /* M16 */
+ PINCTRL_PIN(186, "ROW[2]"), /* M17 */
+ /* Row N */
+ PINCTRL_PIN(187, "P[14]"), /* N1 */
+ PINCTRL_PIN(188, "P[16]"), /* N2 */
+ PINCTRL_PIN(189, "P[15]"), /* N3 */
+ PINCTRL_PIN(190, "P[13]"), /* N4 */
+ PINCTRL_PIN(191, "P[12]"), /* N5 */
+ PINCTRL_PIN(192, "DA[5]"), /* N6 */
+ PINCTRL_PIN(193, "vddr"), /* N7 */
+ PINCTRL_PIN(194, "vddr"), /* N8 */
+ PINCTRL_PIN(195, "vddr"), /* N9 */
+ PINCTRL_PIN(196, "vddr"), /* N10 */
+ PINCTRL_PIN(197, "EECLK"), /* N11 */
+ PINCTRL_PIN(198, "ASDO"), /* N12 */
+ PINCTRL_PIN(199, "CTSn"), /* N13 */
+ PINCTRL_PIN(200, "RXD[0]"), /* N14 */
+ PINCTRL_PIN(201, "TXD[0]"), /* N15 */
+ PINCTRL_PIN(202, "TXD[1]"), /* N16 */
+ PINCTRL_PIN(203, "TXD[2]"), /* N17 */
+ /* Row P */
+ PINCTRL_PIN(204, "SPCLK"), /* P1 */
+ PINCTRL_PIN(205, "P[10]"), /* P2 */
+ PINCTRL_PIN(206, "P[11]"), /* P3 */
+ PINCTRL_PIN(207, "P[3]"), /* P4 */
+ PINCTRL_PIN(208, "AD[15]"), /* P5 */
+ PINCTRL_PIN(209, "AD[13]"), /* P6 */
+ PINCTRL_PIN(210, "AD[12]"), /* P7 */
+ PINCTRL_PIN(211, "DA[2]"), /* P8 */
+ PINCTRL_PIN(212, "AD[8]"), /* P9 */
+ PINCTRL_PIN(213, "TCK"), /* P10 */
+ PINCTRL_PIN(214, "BOOT[1]"), /* P11 */
+ PINCTRL_PIN(215, "EEDAT"), /* P12 */
+ PINCTRL_PIN(216, "GRLED"), /* P13 */
+ PINCTRL_PIN(217, "RDLED"), /* P14 */
+ PINCTRL_PIN(218, "GGPIO[2]"), /* P15 */
+ PINCTRL_PIN(219, "RXD[1]"), /* P16 */
+ PINCTRL_PIN(220, "RXD[2]"), /* P17 */
+ /* Row R */
+ PINCTRL_PIN(221, "P[9]"), /* R1 */
+ PINCTRL_PIN(222, "HSYNC"), /* R2 */
+ PINCTRL_PIN(223, "P[6]"), /* R3 */
+ PINCTRL_PIN(224, "P[5]"), /* R4 */
+ PINCTRL_PIN(225, "P[0]"), /* R5 */
+ PINCTRL_PIN(226, "AD[14]"), /* R6 */
+ PINCTRL_PIN(227, "DA[4]"), /* R7 */
+ PINCTRL_PIN(228, "DA[1]"), /* R8 */
+ PINCTRL_PIN(229, "DTRn"), /* R9 */
+ PINCTRL_PIN(230, "TDI"), /* R10 */
+ PINCTRL_PIN(231, "BOOT[0]"), /* R11 */
+ PINCTRL_PIN(232, "ASYNC"), /* R12 */
+ PINCTRL_PIN(233, "SSPTX[1]"), /* R13 */
+ PINCTRL_PIN(234, "PWMOUT"), /* R14 */
+ PINCTRL_PIN(235, "USBm[0]"), /* R15 */
+ PINCTRL_PIN(236, "ABITCLK"), /* R16 */
+ PINCTRL_PIN(237, "USBp[0]"), /* R17 */
+ /* Row T */
+ PINCTRL_PIN(238, "NC"), /* T1 */
+ PINCTRL_PIN(239, "NC"), /* T2 */
+ PINCTRL_PIN(240, "V_CSYNC"), /* T3 */
+ PINCTRL_PIN(241, "P[7]"), /* T4 */
+ PINCTRL_PIN(242, "P[2]"), /* T5 */
+ PINCTRL_PIN(243, "DA[7]"), /* T6 */
+ PINCTRL_PIN(244, "AD[11]"), /* T7 */
+ PINCTRL_PIN(245, "AD[9]"), /* T8 */
+ PINCTRL_PIN(246, "DSRn"), /* T9 */
+ PINCTRL_PIN(247, "TMS"), /* T10 */
+ PINCTRL_PIN(248, "gndr"), /* T11 */
+ PINCTRL_PIN(249, "SFRM[1]"), /* T12 */
+ PINCTRL_PIN(250, "INT[2]"), /* T13 */
+ PINCTRL_PIN(251, "INT[0]"), /* T14 */
+ PINCTRL_PIN(252, "USBp[1]"), /* T15 */
+ PINCTRL_PIN(253, "NC"), /* T16 */
+ PINCTRL_PIN(254, "NC"), /* T17 */
+ /* Row U */
+ PINCTRL_PIN(255, "NC"), /* U1 */
+ PINCTRL_PIN(256, "NC"), /* U2 */
+ PINCTRL_PIN(257, "P[8]"), /* U3 */
+ PINCTRL_PIN(258, "P[4]"), /* U4 */
+ PINCTRL_PIN(259, "P[1]"), /* U5 */
+ PINCTRL_PIN(260, "DA[6]"), /* U6 */
+ PINCTRL_PIN(261, "DA[3]"), /* U7 */
+ PINCTRL_PIN(262, "AD[10]"), /* U8 */
+ PINCTRL_PIN(263, "DA[0]"), /* U9 */
+ PINCTRL_PIN(264, "TDO"), /* U10 */
+ PINCTRL_PIN(265, "NC"), /* U11 */
+ PINCTRL_PIN(266, "SCLK[1]"), /* U12 */
+ PINCTRL_PIN(267, "SSPRX[1]"), /* U13 */
+ PINCTRL_PIN(268, "INT[1]"), /* U14 */
+ PINCTRL_PIN(269, "RTSn"), /* U15 */
+ PINCTRL_PIN(270, "USBm[1]"), /* U16 */
+ PINCTRL_PIN(271, "NC"), /* U17 */
+};
+
+static const unsigned int ssp_ep9307_pins[] = {
+ 233, 249, 266, 267,
+};
+
+static const unsigned int ac97_ep9307_pins[] = {
+ 16, 32, 198, 232, 236,
+};
+
+/* I can't find info on those - it's some internal state */
+static const unsigned int raster_on_sdram0_pins[] = {
+};
+
+static const unsigned int raster_on_sdram3_pins[] = {
+};
+
+/* ROW[N] */
+static const unsigned int gpio2a_9307_pins[] = {
+ 165, 166, 181, 182, 183, 184, 185, 186,
+};
+
+/* COL[N] */
+static const unsigned int gpio3a_9307_pins[] = {
+ 127, 139, 140, 142, 153, 155, 156, 157,
+};
+
+static const unsigned int keypad_9307_pins[] = {
+ 127, 139, 140, 142, 153, 155, 156, 157,
+ 165, 166, 181, 182, 183, 184, 185, 186,
+};
+
+/* ep9307 have only 4,5 pin of GPIO E Port exposed */
+static const unsigned int gpio4a_9307_pins[] = { 216, 217 };
+
+/* ep9307 have only 2 pin of GPIO G Port exposed */
+static const unsigned int gpio6a_9307_pins[] = { 219 };
+
+static const unsigned int gpio7a_9307_pins[] = { 7, 24, 41, 56, 57, 59 };
+
+static const struct ep93xx_pin_group ep9307_pin_groups[] = {
+ PMX_GROUP("ssp", ssp_ep9307_pins, EP93XX_SYSCON_DEVCFG_I2SONSSP, 0),
+ PMX_GROUP("i2s_on_ssp", ssp_ep9307_pins, EP93XX_SYSCON_DEVCFG_I2SONSSP,
+ EP93XX_SYSCON_DEVCFG_I2SONSSP),
+ PMX_GROUP("ac97", ac97_ep9307_pins, EP93XX_SYSCON_DEVCFG_I2SONAC97, 0),
+ PMX_GROUP("i2s_on_ac97", ac97_ep9301_pins, EP93XX_SYSCON_DEVCFG_I2SONAC97,
+ EP93XX_SYSCON_DEVCFG_I2SONAC97),
+ PMX_GROUP("rasteronsdram0grp", raster_on_sdram0_pins, EP93XX_SYSCON_DEVCFG_RASONP3, 0),
+ PMX_GROUP("rasteronsdram3grp", raster_on_sdram3_pins, EP93XX_SYSCON_DEVCFG_RASONP3,
+ EP93XX_SYSCON_DEVCFG_RASONP3),
+ PMX_GROUP("gpio2agrp", gpio2a_9307_pins, EP93XX_SYSCON_DEVCFG_GONK,
+ EP93XX_SYSCON_DEVCFG_GONK),
+ PMX_GROUP("gpio3agrp", gpio3a_9307_pins, EP93XX_SYSCON_DEVCFG_GONK,
+ EP93XX_SYSCON_DEVCFG_GONK),
+ PMX_GROUP("keypadgrp", keypad_9307_pins, EP93XX_SYSCON_DEVCFG_GONK, 0),
+ PMX_GROUP("gpio4agrp", gpio4a_9307_pins, EP93XX_SYSCON_DEVCFG_EONIDE,
+ EP93XX_SYSCON_DEVCFG_EONIDE),
+ PMX_GROUP("gpio6agrp", gpio6a_9307_pins, EP93XX_SYSCON_DEVCFG_GONIDE,
+ EP93XX_SYSCON_DEVCFG_GONIDE),
+ PMX_GROUP("gpio7agrp", gpio7a_9307_pins, EP93XX_SYSCON_DEVCFG_HONIDE,
+ EP93XX_SYSCON_DEVCFG_HONIDE),
+};
+
+/* ep9312, ep9315 */
+static const struct pinctrl_pin_desc ep9312_pins[] = {
+ /* Row A */
+ PINCTRL_PIN(0, "CSN[7]"), /* A1 */
+ PINCTRL_PIN(1, "DA[28]"), /* A2 */
+ PINCTRL_PIN(2, "AD[18]"), /* A3 */
+ PINCTRL_PIN(3, "DD[8]"), /* A4 */
+ PINCTRL_PIN(4, "DD[4]"), /* A5 */
+ PINCTRL_PIN(5, "AD[17]"), /* A6 */
+ PINCTRL_PIN(6, "RDN"), /* A7 */
+ PINCTRL_PIN(7, "RXCLK"), /* A8 */
+ PINCTRL_PIN(8, "MIIRXD[0]"), /* A9 */
+ PINCTRL_PIN(9, "RXDVAL"), /* A10 */
+ PINCTRL_PIN(10, "MIITXD[2]"), /* A11 */
+ PINCTRL_PIN(11, "TXERR"), /* A12 */
+ PINCTRL_PIN(12, "CLD"), /* A13 */
+ PINCTRL_PIN(13, "NC"), /* A14 */
+ PINCTRL_PIN(14, "NC"), /* A15 */
+ PINCTRL_PIN(15, "NC"), /* A16 */
+ PINCTRL_PIN(16, "EGPIO[12]"), /* A17 */
+ PINCTRL_PIN(17, "EGPIO[15]"), /* A18 */
+ PINCTRL_PIN(18, "NC"), /* A19 */
+ PINCTRL_PIN(19, "NC"), /* A20 */
+ /* Row B */
+ PINCTRL_PIN(20, "CSN[2]"), /* B1 */
+ PINCTRL_PIN(21, "DA[31]"), /* B2 */
+ PINCTRL_PIN(22, "DA[30]"), /* B3 */
+ PINCTRL_PIN(23, "DA[27]"), /* B4 */
+ PINCTRL_PIN(24, "DD[7]"), /* B5 */
+ PINCTRL_PIN(25, "DD[3]"), /* B6 */
+ PINCTRL_PIN(26, "WRN"), /* B7 */
+ PINCTRL_PIN(27, "MDIO"), /* B8 */
+ PINCTRL_PIN(28, "MIIRXD[1]"), /* B9 */
+ PINCTRL_PIN(29, "RXERR"), /* B10 */
+ PINCTRL_PIN(30, "MIITXD[1]"), /* B11 */
+ PINCTRL_PIN(31, "CRS"), /* B12 */
+ PINCTRL_PIN(32, "NC"), /* B13 */
+ PINCTRL_PIN(33, "NC"), /* B14 */
+ PINCTRL_PIN(34, "NC"), /* B15 */
+ PINCTRL_PIN(35, "NC"), /* B16 */
+ PINCTRL_PIN(36, "EGPIO[13]"), /* B17 */
+ PINCTRL_PIN(37, "NC"), /* B18 */
+ PINCTRL_PIN(38, "WAITN"), /* B19 */
+ PINCTRL_PIN(39, "TRSTN"), /* B20 */
+ /* Row C */
+ PINCTRL_PIN(40, "CSN[1]"), /* C1 */
+ PINCTRL_PIN(41, "CSN[3]"), /* C2 */
+ PINCTRL_PIN(42, "AD[20]"), /* C3 */
+ PINCTRL_PIN(43, "DA[29]"), /* C4 */
+ PINCTRL_PIN(44, "DD[10]"), /* C5 */
+ PINCTRL_PIN(45, "DD[6]"), /* C6 */
+ PINCTRL_PIN(46, "DD[2]"), /* C7 */
+ PINCTRL_PIN(47, "MDC"), /* C8 */
+ PINCTRL_PIN(48, "MIIRXD[3]"), /* C9 */
+ PINCTRL_PIN(49, "TXCLK"), /* C10 */
+ PINCTRL_PIN(50, "MIITXD[0]"), /* C11 */
+ PINCTRL_PIN(51, "NC"), /* C12 */
+ PINCTRL_PIN(52, "NC"), /* C13 */
+ PINCTRL_PIN(53, "NC"), /* C14 */
+ PINCTRL_PIN(54, "NC"), /* C15 */
+ PINCTRL_PIN(55, "NC"), /* C16 */
+ PINCTRL_PIN(56, "NC"), /* C17 */
+ PINCTRL_PIN(57, "USBP[2]"), /* C18 */
+ PINCTRL_PIN(58, "IORDY"), /* C19 */
+ PINCTRL_PIN(59, "DMACKN"), /* C20 */
+ /* Row D */
+ PINCTRL_PIN(60, "AD[24]"), /* D1 */
+ PINCTRL_PIN(61, "DA[25]"), /* D2 */
+ PINCTRL_PIN(62, "DD[11]"), /* D3 */
+ PINCTRL_PIN(63, "SDCLKEN"), /* D4 */
+ PINCTRL_PIN(64, "AD[19]"), /* D5 */
+ PINCTRL_PIN(65, "DD[9]"), /* D6 */
+ PINCTRL_PIN(66, "DD[5]"), /* D7 */
+ PINCTRL_PIN(67, "AD[16]"), /* D8 */
+ PINCTRL_PIN(68, "MIIRXD[2]"), /* D9 */
+ PINCTRL_PIN(69, "MIITXD[3]"), /* D10 */
+ PINCTRL_PIN(70, "TXEN"), /* D11 */
+ PINCTRL_PIN(71, "NC"), /* D12 */
+ PINCTRL_PIN(72, "NC"), /* D13 */
+ PINCTRL_PIN(73, "NC"), /* D14 */
+ PINCTRL_PIN(74, "EGPIO[14]"), /* D15 */
+ PINCTRL_PIN(75, "NC"), /* D16 */
+ PINCTRL_PIN(76, "USBM[2]"), /* D17 */
+ PINCTRL_PIN(77, "ARSTN"), /* D18 */
+ PINCTRL_PIN(78, "DIORN"), /* D19 */
+ PINCTRL_PIN(79, "EGPIO[1]"), /* D20 */
+ /* Row E */
+ PINCTRL_PIN(80, "AD[23]"), /* E1 */
+ PINCTRL_PIN(81, "DA[23]"), /* E2 */
+ PINCTRL_PIN(82, "DA[26]"), /* E3 */
+ PINCTRL_PIN(83, "CSN[6]"), /* E4 */
+ PINCTRL_PIN(84, "GND"), /* E5 */
+ PINCTRL_PIN(85, "GND"), /* E6 */
+ PINCTRL_PIN(86, "CVDD"), /* E7 */
+ PINCTRL_PIN(87, "CVDD"), /* E8 */
+ PINCTRL_PIN(88, "RVDD"), /* E9 */
+ PINCTRL_PIN(89, "GND"), /* E10 */
+ PINCTRL_PIN(90, "GND"), /* E11 */
+ PINCTRL_PIN(91, "RVDD"), /* E12 */
+ PINCTRL_PIN(92, "CVDD"), /* E13 */
+ PINCTRL_PIN(93, "CVDD"), /* E14 */
+ PINCTRL_PIN(94, "GND"), /* E15 */
+ PINCTRL_PIN(95, "ASDI"), /* E16 */
+ PINCTRL_PIN(96, "DIOWN"), /* E17 */
+ PINCTRL_PIN(97, "EGPIO[0]"), /* E18 */
+ PINCTRL_PIN(98, "EGPIO[3]"), /* E19 */
+ PINCTRL_PIN(99, "EGPIO[5]"), /* E20 */
+ /* Row F */
+ PINCTRL_PIN(100, "SDCSN[3]"), /* F1 */
+ PINCTRL_PIN(101, "DA[22]"), /* F2 */
+ PINCTRL_PIN(102, "DA[24]"), /* F3 */
+ PINCTRL_PIN(103, "AD[25]"), /* F4 */
+ PINCTRL_PIN(104, "RVDD"), /* F5 */
+ PINCTRL_PIN(105, "GND"), /* F6 */
+ PINCTRL_PIN(106, "CVDD"), /* F7 */
+ PINCTRL_PIN(107, "CVDD"), /* F14 */
+ PINCTRL_PIN(108, "GND"), /* F15 */
+ PINCTRL_PIN(109, "GND"), /* F16 */
+ PINCTRL_PIN(110, "EGPIO[2]"), /* F17 */
+ PINCTRL_PIN(111, "EGPIO[4]"), /* F18 */
+ PINCTRL_PIN(112, "EGPIO[6]"), /* F19 */
+ PINCTRL_PIN(113, "EGPIO[8]"), /* F20 */
+ /* Row G */
+ PINCTRL_PIN(114, "SDCSN[0]"), /* G1 */
+ PINCTRL_PIN(115, "SDCSN[1]"), /* G2 */
+ PINCTRL_PIN(116, "SDWEN"), /* G3 */
+ PINCTRL_PIN(117, "SDCLK"), /* G4 */
+ PINCTRL_PIN(118, "RVDD"), /* G5 */
+ PINCTRL_PIN(119, "RVDD"), /* G6 */
+ PINCTRL_PIN(120, "RVDD"), /* G15 */
+ PINCTRL_PIN(121, "RVDD"), /* G16 */
+ PINCTRL_PIN(122, "EGPIO[7]"), /* G17 */
+ PINCTRL_PIN(123, "EGPIO[9]"), /* G18 */
+ PINCTRL_PIN(124, "EGPIO[10]"), /* G19 */
+ PINCTRL_PIN(125, "EGPIO[11]"), /* G20 */
+ /* Row H */
+ PINCTRL_PIN(126, "DQMN[3]"), /* H1 */
+ PINCTRL_PIN(127, "CASN"), /* H2 */
+ PINCTRL_PIN(128, "RASN"), /* H3 */
+ PINCTRL_PIN(129, "SDCSN[2]"), /* H4 */
+ PINCTRL_PIN(130, "CVDD"), /* H5 */
+ PINCTRL_PIN(131, "GND"), /* H8 */
+ PINCTRL_PIN(132, "GND"), /* H9 */
+ PINCTRL_PIN(133, "GND"), /* H10 */
+ PINCTRL_PIN(134, "GND"), /* H11 */
+ PINCTRL_PIN(135, "GND"), /* H12 */
+ PINCTRL_PIN(136, "GND"), /* H13 */
+ PINCTRL_PIN(137, "RVDD"), /* H16 */
+ PINCTRL_PIN(138, "RTCXTALO"), /* H17 */
+ PINCTRL_PIN(139, "ADC_VDD"), /* H18 */
+ PINCTRL_PIN(140, "ADC_GND"), /* H19 */
+ PINCTRL_PIN(141, "XP"), /* H20 */
+ /* Row J */
+ PINCTRL_PIN(142, "DA[21]"), /* J1 */
+ PINCTRL_PIN(143, "DQMN[0]"), /* J2 */
+ PINCTRL_PIN(144, "DQMN[1]"), /* J3 */
+ PINCTRL_PIN(145, "DQMN[2]"), /* J4 */
+ PINCTRL_PIN(146, "GND"), /* J5 */
+ PINCTRL_PIN(147, "GND"), /* J8 */
+ PINCTRL_PIN(148, "GND"), /* J9 */
+ PINCTRL_PIN(149, "GND"), /* J10 */
+ PINCTRL_PIN(150, "GND"), /* J11 */
+ PINCTRL_PIN(151, "GND"), /* J12 */
+ PINCTRL_PIN(152, "GND"), /* J13 */
+ PINCTRL_PIN(153, "CVDD"), /* J16 */
+ PINCTRL_PIN(154, "RTCXTALI"), /* J17 */
+ PINCTRL_PIN(155, "XM"), /* J18 */
+ PINCTRL_PIN(156, "YP"), /* J19 */
+ PINCTRL_PIN(157, "YM"), /* J20 */
+ /* Row K */
+ PINCTRL_PIN(158, "AD[22]"), /* K1 */
+ PINCTRL_PIN(159, "DA[20]"), /* K2 */
+ PINCTRL_PIN(160, "AD[21]"), /* K3 */
+ PINCTRL_PIN(161, "DA[19]"), /* K4 */
+ PINCTRL_PIN(162, "RVDD"), /* K5 */
+ PINCTRL_PIN(163, "GND"), /* K8 */
+ PINCTRL_PIN(164, "GND"), /* K9 */
+ PINCTRL_PIN(165, "GND"), /* K10 */
+ PINCTRL_PIN(166, "GND"), /* K11 */
+ PINCTRL_PIN(167, "GND"), /* K12 */
+ PINCTRL_PIN(168, "GND"), /* K13 */
+ PINCTRL_PIN(169, "CVDD"), /* K16 */
+ PINCTRL_PIN(170, "SYM"), /* K17 */
+ PINCTRL_PIN(171, "SYP"), /* K18 */
+ PINCTRL_PIN(172, "SXM"), /* K19 */
+ PINCTRL_PIN(173, "SXP"), /* K20 */
+ /* Row L */
+ PINCTRL_PIN(174, "DA[18]"), /* L1 */
+ PINCTRL_PIN(175, "DA[17]"), /* L2 */
+ PINCTRL_PIN(176, "DA[16]"), /* L3 */
+ PINCTRL_PIN(177, "DA[15]"), /* L4 */
+ PINCTRL_PIN(178, "GND"), /* L5 */
+ PINCTRL_PIN(179, "GND"), /* L8 */
+ PINCTRL_PIN(180, "GND"), /* L9 */
+ PINCTRL_PIN(181, "GND"), /* L10 */
+ PINCTRL_PIN(182, "GND"), /* L11 */
+ PINCTRL_PIN(183, "GND"), /* L12 */
+ PINCTRL_PIN(184, "GND"), /* L13 */
+ PINCTRL_PIN(185, "CVDD"), /* L16 */
+ PINCTRL_PIN(186, "COL[5]"), /* L17 */
+ PINCTRL_PIN(187, "COL[7]"), /* L18 */
+ PINCTRL_PIN(188, "RSTON"), /* L19 */
+ PINCTRL_PIN(189, "PRSTN"), /* L20 */
+ /* Row M */
+ PINCTRL_PIN(190, "AD[7]"), /* M1 */
+ PINCTRL_PIN(191, "DA[14]"), /* M2 */
+ PINCTRL_PIN(192, "AD[6]"), /* M3 */
+ PINCTRL_PIN(193, "AD[5]"), /* M4 */
+ PINCTRL_PIN(194, "CVDD"), /* M5 */
+ PINCTRL_PIN(195, "GND"), /* M8 */
+ PINCTRL_PIN(196, "GND"), /* M9 */
+ PINCTRL_PIN(197, "GND"), /* M10 */
+ PINCTRL_PIN(198, "GND"), /* M11 */
+ PINCTRL_PIN(199, "GND"), /* M12 */
+ PINCTRL_PIN(200, "GND"), /* M13 */
+ PINCTRL_PIN(201, "GND"), /* M16 */
+ PINCTRL_PIN(202, "COL[4]"), /* M17 */
+ PINCTRL_PIN(203, "COL[3]"), /* M18 */
+ PINCTRL_PIN(204, "COL[6]"), /* M19 */
+ PINCTRL_PIN(205, "CSN[0]"), /* M20 */
+ /* Row N */
+ PINCTRL_PIN(206, "DA[13]"), /* N1 */
+ PINCTRL_PIN(207, "DA[12]"), /* N2 */
+ PINCTRL_PIN(208, "DA[11]"), /* N3 */
+ PINCTRL_PIN(209, "AD[3]"), /* N4 */
+ PINCTRL_PIN(210, "CVDD"), /* N5 */
+ PINCTRL_PIN(211, "CVDD"), /* N6 */
+ PINCTRL_PIN(212, "GND"), /* N8 */
+ PINCTRL_PIN(213, "GND"), /* N9 */
+ PINCTRL_PIN(214, "GND"), /* N10 */
+ PINCTRL_PIN(215, "GND"), /* N11 */
+ PINCTRL_PIN(216, "GND"), /* N12 */
+ PINCTRL_PIN(217, "GND"), /* N13 */
+ PINCTRL_PIN(218, "GND"), /* N15 */
+ PINCTRL_PIN(219, "GND"), /* N16 */
+ PINCTRL_PIN(220, "XTALO"), /* N17 */
+ PINCTRL_PIN(221, "COL[0]"), /* N18 */
+ PINCTRL_PIN(222, "COL[1]"), /* N19 */
+ PINCTRL_PIN(223, "COL[2]"), /* N20 */
+ /* Row P */
+ PINCTRL_PIN(224, "AD[4]"), /* P1 */
+ PINCTRL_PIN(225, "DA[10]"), /* P2 */
+ PINCTRL_PIN(226, "DA[9]"), /* P3 */
+ PINCTRL_PIN(227, "BRIGHT"), /* P4 */
+ PINCTRL_PIN(228, "RVDD"), /* P5 */
+ PINCTRL_PIN(229, "RVDD"), /* P6 */
+ PINCTRL_PIN(230, "RVDD"), /* P15 */
+ PINCTRL_PIN(231, "RVDD"), /* P16 */
+ PINCTRL_PIN(232, "XTALI"), /* P17 */
+ PINCTRL_PIN(233, "PLL_VDD"), /* P18 */
+ PINCTRL_PIN(234, "ROW[6]"), /* P19 */
+ PINCTRL_PIN(235, "ROW[7]"), /* P20 */
+ /* Row R */
+ PINCTRL_PIN(236, "AD[2]"), /* R1 */
+ PINCTRL_PIN(237, "AD[1]"), /* R2 */
+ PINCTRL_PIN(238, "P[17]"), /* R3 */
+ PINCTRL_PIN(239, "P[14]"), /* R4 */
+ PINCTRL_PIN(240, "RVDD"), /* R5 */
+ PINCTRL_PIN(241, "RVDD"), /* R6 */
+ PINCTRL_PIN(242, "GND"), /* R7 */
+ PINCTRL_PIN(243, "CVDD"), /* R8 */
+ PINCTRL_PIN(244, "CVDD"), /* R13 */
+ PINCTRL_PIN(245, "GND"), /* R14 */
+ PINCTRL_PIN(246, "RVDD"), /* R15 */
+ PINCTRL_PIN(247, "RVDD"), /* R16 */
+ PINCTRL_PIN(248, "ROW[0]"), /* R17 */
+ PINCTRL_PIN(249, "ROW[3]"), /* R18 */
+ PINCTRL_PIN(250, "PLL_GND"), /* R19 */
+ PINCTRL_PIN(251, "ROW[5]"), /* R20 */
+ /* Row T */
+ PINCTRL_PIN(252, "DA[8]"), /* T1 */
+ PINCTRL_PIN(253, "BLANK"), /* T2 */
+ PINCTRL_PIN(254, "P[13]"), /* T3 */
+ PINCTRL_PIN(255, "SPCLK"), /* T4 */
+ PINCTRL_PIN(256, "V_CSYNC"), /* T5 */
+ PINCTRL_PIN(257, "DD[14]"), /* T6 */
+ PINCTRL_PIN(258, "GND"), /* T7 */
+ PINCTRL_PIN(259, "CVDD"), /* T8 */
+ PINCTRL_PIN(260, "RVDD"), /* T9 */
+ PINCTRL_PIN(261, "GND"), /* T10 */
+ PINCTRL_PIN(262, "GND"), /* T11 */
+ PINCTRL_PIN(263, "RVDD"), /* T12 */
+ PINCTRL_PIN(264, "CVDD"), /* T13 */
+ PINCTRL_PIN(265, "GND"), /* T14 */
+ PINCTRL_PIN(266, "INT[0]"), /* T15 */
+ PINCTRL_PIN(267, "USBM[1]"), /* T16 */
+ PINCTRL_PIN(268, "RXD[0]"), /* T17 */
+ PINCTRL_PIN(269, "TXD[2]"), /* T18 */
+ PINCTRL_PIN(270, "ROW[2]"), /* T19 */
+ PINCTRL_PIN(271, "ROW[4]"), /* T20 */
+ /* Row U */
+ PINCTRL_PIN(272, "AD[0]"), /* U1 */
+ PINCTRL_PIN(273, "P[15]"), /* U2 */
+ PINCTRL_PIN(274, "P[10]"), /* U3 */
+ PINCTRL_PIN(275, "P[7]"), /* U4 */
+ PINCTRL_PIN(276, "P[6]"), /* U5 */
+ PINCTRL_PIN(277, "P[4]"), /* U6 */
+ PINCTRL_PIN(278, "P[0]"), /* U7 */
+ PINCTRL_PIN(279, "AD[13]"), /* U8 */
+ PINCTRL_PIN(280, "DA[3]"), /* U9 */
+ PINCTRL_PIN(281, "DA[0]"), /* U10 */
+ PINCTRL_PIN(282, "DSRN"), /* U11 */
+ PINCTRL_PIN(283, "BOOT[1]"), /* U12 */
+ PINCTRL_PIN(284, "NC"), /* U13 */
+ PINCTRL_PIN(285, "SSPRX1"), /* U14 */
+ PINCTRL_PIN(286, "INT[1]"), /* U15 */
+ PINCTRL_PIN(287, "PWMOUT"), /* U16 */
+ PINCTRL_PIN(288, "USBM[0]"), /* U17 */
+ PINCTRL_PIN(289, "RXD[1]"), /* U18 */
+ PINCTRL_PIN(290, "TXD[1]"), /* U19 */
+ PINCTRL_PIN(291, "ROW[1]"), /* U20 */
+ /* Row V */
+ PINCTRL_PIN(292, "P[16]"), /* V1 */
+ PINCTRL_PIN(293, "P[11]"), /* V2 */
+ PINCTRL_PIN(294, "P[8]"), /* V3 */
+ PINCTRL_PIN(295, "DD[15]"), /* V4 */
+ PINCTRL_PIN(296, "DD[13]"), /* V5 */
+ PINCTRL_PIN(297, "P[1]"), /* V6 */
+ PINCTRL_PIN(298, "AD[14]"), /* V7 */
+ PINCTRL_PIN(299, "AD[12]"), /* V8 */
+ PINCTRL_PIN(300, "DA[2]"), /* V9 */
+ PINCTRL_PIN(301, "IDECS0N"), /* V10 */
+ PINCTRL_PIN(302, "IDEDA[2]"), /* V11 */
+ PINCTRL_PIN(303, "TDI"), /* V12 */
+ PINCTRL_PIN(304, "GND"), /* V13 */
+ PINCTRL_PIN(305, "ASYNC"), /* V14 */
+ PINCTRL_PIN(306, "SSPTX1"), /* V15 */
+ PINCTRL_PIN(307, "INT[2]"), /* V16 */
+ PINCTRL_PIN(308, "RTSN"), /* V17 */
+ PINCTRL_PIN(309, "USBP[0]"), /* V18 */
+ PINCTRL_PIN(310, "CTSN"), /* V19 */
+ PINCTRL_PIN(311, "TXD[0]"), /* V20 */
+ /* Row W */
+ PINCTRL_PIN(312, "P[12]"), /* W1 */
+ PINCTRL_PIN(313, "P[9]"), /* W2 */
+ PINCTRL_PIN(314, "DD[0]"), /* W3 */
+ PINCTRL_PIN(315, "P[5]"), /* W4 */
+ PINCTRL_PIN(316, "P[3]"), /* W5 */
+ PINCTRL_PIN(317, "DA[7]"), /* W6 */
+ PINCTRL_PIN(318, "DA[5]"), /* W7 */
+ PINCTRL_PIN(319, "AD[11]"), /* W8 */
+ PINCTRL_PIN(320, "AD[9]"), /* W9 */
+ PINCTRL_PIN(321, "IDECS1N"), /* W10 */
+ PINCTRL_PIN(322, "IDEDA[1]"), /* W11 */
+ PINCTRL_PIN(323, "TCK"), /* W12 */
+ PINCTRL_PIN(324, "TMS"), /* W13 */
+ PINCTRL_PIN(325, "EECLK"), /* W14 */
+ PINCTRL_PIN(326, "SCLK1"), /* W15 */
+ PINCTRL_PIN(327, "GRLED"), /* W16 */
+ PINCTRL_PIN(328, "INT[3]"), /* W17 */
+ PINCTRL_PIN(329, "SLA[1]"), /* W18 */
+ PINCTRL_PIN(330, "SLA[0]"), /* W19 */
+ PINCTRL_PIN(331, "RXD[2]"), /* W20 */
+ /* Row Y */
+ PINCTRL_PIN(332, "HSYNC"), /* Y1 */
+ PINCTRL_PIN(333, "DD[1]"), /* Y2 */
+ PINCTRL_PIN(334, "DD[12]"), /* Y3 */
+ PINCTRL_PIN(335, "P[2]"), /* Y4 */
+ PINCTRL_PIN(336, "AD[15]"), /* Y5 */
+ PINCTRL_PIN(337, "DA[6]"), /* Y6 */
+ PINCTRL_PIN(338, "DA[4]"), /* Y7 */
+ PINCTRL_PIN(339, "AD[10]"), /* Y8 */
+ PINCTRL_PIN(340, "DA[1]"), /* Y9 */
+ PINCTRL_PIN(341, "AD[8]"), /* Y10 */
+ PINCTRL_PIN(342, "IDEDA[0]"), /* Y11 */
+ PINCTRL_PIN(343, "DTRN"), /* Y12 */
+ PINCTRL_PIN(344, "TDO"), /* Y13 */
+ PINCTRL_PIN(345, "BOOT[0]"), /* Y14 */
+ PINCTRL_PIN(346, "EEDAT"), /* Y15 */
+ PINCTRL_PIN(347, "ASDO"), /* Y16 */
+ PINCTRL_PIN(348, "SFRM1"), /* Y17 */
+ PINCTRL_PIN(349, "RDLED"), /* Y18 */
+ PINCTRL_PIN(350, "USBP[1]"), /* Y19 */
+ PINCTRL_PIN(351, "ABITCLK"), /* Y20 */
+};
+
+static const unsigned int ssp_ep9312_pins[] = {
+ 285, 306, 326, 348,
+};
+
+static const unsigned int ac97_ep9312_pins[] = {
+ 77, 95, 305, 347, 351,
+};
+
+static const unsigned int pwm_ep9312_pins[] = { 74 };
+
+static const unsigned int gpio1a_ep9312_pins[] = { 74 };
+
+static const unsigned int gpio2a_9312_pins[] = {
+ 234, 235, 248, 249, 251, 270, 271, 291,
+};
+
+static const unsigned int gpio3a_9312_pins[] = {
+ 186, 187, 202, 203, 204, 221, 222, 223,
+};
+
+static const unsigned int keypad_9312_pins[] = {
+ 186, 187, 202, 203, 204, 221, 222, 223,
+ 234, 235, 248, 249, 251, 270, 271, 291,
+};
+
+static const unsigned int gpio4a_9312_pins[] = {
+ 78, 301, 302, 321, 322, 342,
+};
+
+static const unsigned int gpio6a_9312_pins[] = {
+ 257, 295, 296, 334,
+};
+
+static const unsigned int gpio7a_9312_pins[] = {
+ 4, 24, 25, 45, 46, 66, 314, 333,
+};
+
+static const unsigned int ide_9312_pins[] = {
+ 78, 301, 302, 321, 322, 342, 257, 295,
+ 296, 334, 4, 24, 25, 45, 46, 66,
+ 314, 333,
+};
+
+static const struct ep93xx_pin_group ep9312_pin_groups[] = {
+ PMX_GROUP("ssp", ssp_ep9312_pins, EP93XX_SYSCON_DEVCFG_I2SONSSP, 0),
+ PMX_GROUP("i2s_on_ssp", ssp_ep9312_pins, EP93XX_SYSCON_DEVCFG_I2SONSSP,
+ EP93XX_SYSCON_DEVCFG_I2SONSSP),
+ PMX_GROUP("pwm1", pwm_ep9312_pins, EP93XX_SYSCON_DEVCFG_PONG,
+ EP93XX_SYSCON_DEVCFG_PONG),
+ PMX_GROUP("gpio1agrp", gpio1a_ep9312_pins, EP93XX_SYSCON_DEVCFG_PONG, 0),
+ PMX_GROUP("ac97", ac97_ep9312_pins, EP93XX_SYSCON_DEVCFG_I2SONAC97, 0),
+ PMX_GROUP("i2s_on_ac97", ac97_ep9312_pins, EP93XX_SYSCON_DEVCFG_I2SONAC97,
+ EP93XX_SYSCON_DEVCFG_I2SONAC97),
+ PMX_GROUP("rasteronsdram0grp", raster_on_sdram0_pins, EP93XX_SYSCON_DEVCFG_RASONP3, 0),
+ PMX_GROUP("rasteronsdram3grp", raster_on_sdram3_pins, EP93XX_SYSCON_DEVCFG_RASONP3,
+ EP93XX_SYSCON_DEVCFG_RASONP3),
+ PMX_GROUP("gpio2agrp", gpio2a_9312_pins, EP93XX_SYSCON_DEVCFG_GONK,
+ EP93XX_SYSCON_DEVCFG_GONK),
+ PMX_GROUP("gpio3agrp", gpio3a_9312_pins, EP93XX_SYSCON_DEVCFG_GONK,
+ EP93XX_SYSCON_DEVCFG_GONK),
+ PMX_GROUP("keypadgrp", keypad_9312_pins, EP93XX_SYSCON_DEVCFG_GONK, 0),
+ PMX_GROUP("gpio4agrp", gpio4a_9312_pins, EP93XX_SYSCON_DEVCFG_EONIDE,
+ EP93XX_SYSCON_DEVCFG_EONIDE),
+ PMX_GROUP("gpio6agrp", gpio6a_9312_pins, EP93XX_SYSCON_DEVCFG_GONIDE,
+ EP93XX_SYSCON_DEVCFG_GONIDE),
+ PMX_GROUP("gpio7agrp", gpio7a_9312_pins, EP93XX_SYSCON_DEVCFG_HONIDE,
+ EP93XX_SYSCON_DEVCFG_HONIDE),
+ PMX_GROUP("idegrp", ide_9312_pins, EP93XX_SYSCON_DEVCFG_EONIDE |
+ EP93XX_SYSCON_DEVCFG_GONIDE | EP93XX_SYSCON_DEVCFG_HONIDE, 0),
+};
+
+static int ep93xx_get_groups_count(struct pinctrl_dev *pctldev)
+{
+ struct ep93xx_pmx *pmx = pinctrl_dev_get_drvdata(pctldev);
+
+ switch (pmx->model) {
+ case EP93XX_9301_PINCTRL:
+ return ARRAY_SIZE(ep9301_pin_groups);
+ case EP93XX_9307_PINCTRL:
+ return ARRAY_SIZE(ep9307_pin_groups);
+ case EP93XX_9312_PINCTRL:
+ return ARRAY_SIZE(ep9312_pin_groups);
+ default:
+ return 0;
+ }
+}
+
+static const char *ep93xx_get_group_name(struct pinctrl_dev *pctldev,
+ unsigned int selector)
+{
+ struct ep93xx_pmx *pmx = pinctrl_dev_get_drvdata(pctldev);
+
+ switch (pmx->model) {
+ case EP93XX_9301_PINCTRL:
+ return ep9301_pin_groups[selector].grp.name;
+ case EP93XX_9307_PINCTRL:
+ return ep9307_pin_groups[selector].grp.name;
+ case EP93XX_9312_PINCTRL:
+ return ep9312_pin_groups[selector].grp.name;
+ default:
+ return NULL;
+ }
+}
+
+static int ep93xx_get_group_pins(struct pinctrl_dev *pctldev,
+ unsigned int selector,
+ const unsigned int **pins,
+ unsigned int *num_pins)
+{
+ struct ep93xx_pmx *pmx = pinctrl_dev_get_drvdata(pctldev);
+
+ switch (pmx->model) {
+ case EP93XX_9301_PINCTRL:
+ *pins = ep9301_pin_groups[selector].grp.pins;
+ *num_pins = ep9301_pin_groups[selector].grp.npins;
+ break;
+ case EP93XX_9307_PINCTRL:
+ *pins = ep9307_pin_groups[selector].grp.pins;
+ *num_pins = ep9307_pin_groups[selector].grp.npins;
+ break;
+ case EP93XX_9312_PINCTRL:
+ *pins = ep9312_pin_groups[selector].grp.pins;
+ *num_pins = ep9312_pin_groups[selector].grp.npins;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static const struct pinctrl_ops ep93xx_pctrl_ops = {
+ .get_groups_count = ep93xx_get_groups_count,
+ .get_group_name = ep93xx_get_group_name,
+ .get_group_pins = ep93xx_get_group_pins,
+ .dt_node_to_map = pinconf_generic_dt_node_to_map_all,
+ .dt_free_map = pinconf_generic_dt_free_map,
+};
+
+static const char * const spigrps[] = { "ssp" };
+static const char * const ac97grps[] = { "ac97" };
+static const char * const i2sgrps[] = { "i2s_on_ssp", "i2s_on_ac97" };
+static const char * const pwm1grps[] = { "pwm1" };
+static const char * const gpiogrps[] = { "gpio1agrp", "gpio2agrp", "gpio3agrp",
+ "gpio4agrp", "gpio6agrp", "gpio7agrp" };
+static const char * const rastergrps[] = { "rasteronsdram0grp", "rasteronsdram3grp"};
+static const char * const keypadgrps[] = { "keypadgrp"};
+static const char * const idegrps[] = { "idegrp"};
+
+static const struct pinfunction ep93xx_pmx_functions[] = {
+ PINCTRL_PINFUNCTION("spi", spigrps, ARRAY_SIZE(spigrps)),
+ PINCTRL_PINFUNCTION("ac97", ac97grps, ARRAY_SIZE(ac97grps)),
+ PINCTRL_PINFUNCTION("i2s", i2sgrps, ARRAY_SIZE(i2sgrps)),
+ PINCTRL_PINFUNCTION("pwm", pwm1grps, ARRAY_SIZE(pwm1grps)),
+ PINCTRL_PINFUNCTION("keypad", keypadgrps, ARRAY_SIZE(keypadgrps)),
+ PINCTRL_PINFUNCTION("pata", idegrps, ARRAY_SIZE(idegrps)),
+ PINCTRL_PINFUNCTION("lcd", rastergrps, ARRAY_SIZE(rastergrps)),
+ PINCTRL_PINFUNCTION("gpio", gpiogrps, ARRAY_SIZE(gpiogrps)),
+};
+
+static int ep93xx_pmx_set_mux(struct pinctrl_dev *pctldev,
+ unsigned int selector,
+ unsigned int group)
+{
+ struct ep93xx_pmx *pmx;
+ const struct pinfunction *func;
+ const struct ep93xx_pin_group *grp;
+ u32 before, after, expected;
+ unsigned long tmp;
+ int i;
+
+ pmx = pinctrl_dev_get_drvdata(pctldev);
+
+ switch (pmx->model) {
+ case EP93XX_9301_PINCTRL:
+ grp = &ep9301_pin_groups[group];
+ break;
+ case EP93XX_9307_PINCTRL:
+ grp = &ep9307_pin_groups[group];
+ break;
+ case EP93XX_9312_PINCTRL:
+ grp = &ep9312_pin_groups[group];
+ break;
+ default:
+ dev_err(pmx->dev, "invalid SoC type\n");
+ return -ENODEV;
+ }
+
+ func = &ep93xx_pmx_functions[selector];
+
+ dev_dbg(pmx->dev,
+ "ACTIVATE function \"%s\" with group \"%s\" (mask=0x%x, value=0x%x)\n",
+ func->name, grp->grp.name, grp->mask, grp->value);
+
+ regmap_read(pmx->map, EP93XX_SYSCON_DEVCFG, &before);
+ ep93xx_pinctrl_update_bits(pmx, EP93XX_SYSCON_DEVCFG,
+ grp->mask, grp->value);
+ regmap_read(pmx->map, EP93XX_SYSCON_DEVCFG, &after);
+
+ dev_dbg(pmx->dev, "before=0x%x, after=0x%x, mask=0x%lx\n",
+ before, after, PADS_MASK);
+
+ /* Which bits changed */
+ before &= PADS_MASK;
+ after &= PADS_MASK;
+ expected = before & ~grp->mask;
+ expected |= grp->value;
+ expected &= PADS_MASK;
+
+ /* Print changed states */
+ tmp = expected ^ after;
+ for_each_set_bit(i, &tmp, PADS_MAXBIT) {
+ bool enabled = expected & BIT(i);
+
+ dev_err(pmx->dev,
+ "pin group %s could not be %s: probably a hardware limitation\n",
+ ep93xx_padgroups[i], str_enabled_disabled(enabled));
+ dev_err(pmx->dev,
+ "DeviceCfg before: %08x, after %08x, expected %08x\n",
+ before, after, expected);
+ }
+
+ return tmp ? -EINVAL : 0;
+};
+
+static int ep93xx_pmx_get_funcs_count(struct pinctrl_dev *pctldev)
+{
+ return ARRAY_SIZE(ep93xx_pmx_functions);
+}
+
+static const char *ep93xx_pmx_get_func_name(struct pinctrl_dev *pctldev,
+ unsigned int selector)
+{
+ return ep93xx_pmx_functions[selector].name;
+}
+
+static int ep93xx_pmx_get_groups(struct pinctrl_dev *pctldev,
+ unsigned int selector,
+ const char * const **groups,
+ unsigned int * const num_groups)
+{
+ *groups = ep93xx_pmx_functions[selector].groups;
+ *num_groups = ep93xx_pmx_functions[selector].ngroups;
+ return 0;
+}
+
+static const struct pinmux_ops ep93xx_pmx_ops = {
+ .get_functions_count = ep93xx_pmx_get_funcs_count,
+ .get_function_name = ep93xx_pmx_get_func_name,
+ .get_function_groups = ep93xx_pmx_get_groups,
+ .set_mux = ep93xx_pmx_set_mux,
+};
+
+static struct pinctrl_desc ep93xx_pmx_desc = {
+ .name = DRIVER_NAME,
+ .pctlops = &ep93xx_pctrl_ops,
+ .pmxops = &ep93xx_pmx_ops,
+ .owner = THIS_MODULE,
+};
+
+static int ep93xx_pmx_probe(struct auxiliary_device *adev,
+ const struct auxiliary_device_id *id)
+{
+ struct ep93xx_regmap_adev *rdev = to_ep93xx_regmap_adev(adev);
+ struct device *dev = &adev->dev;
+ struct ep93xx_pmx *pmx;
+
+ /* Create state holders etc for this driver */
+ pmx = devm_kzalloc(dev, sizeof(*pmx), GFP_KERNEL);
+ if (!pmx)
+ return -ENOMEM;
+
+ pmx->dev = dev;
+ pmx->map = rdev->map;
+ pmx->aux_dev = rdev;
+ pmx->model = (enum ep93xx_pinctrl_model)(uintptr_t)id->driver_data;
+ switch (pmx->model) {
+ case EP93XX_9301_PINCTRL:
+ ep93xx_pmx_desc.pins = ep9301_pins;
+ ep93xx_pmx_desc.npins = ARRAY_SIZE(ep9301_pins);
+ dev_info(dev, "detected 9301/9302 chip variant\n");
+ break;
+ case EP93XX_9307_PINCTRL:
+ ep93xx_pmx_desc.pins = ep9307_pins;
+ ep93xx_pmx_desc.npins = ARRAY_SIZE(ep9307_pins);
+ dev_info(dev, "detected 9307 chip variant\n");
+ break;
+ case EP93XX_9312_PINCTRL:
+ ep93xx_pmx_desc.pins = ep9312_pins;
+ ep93xx_pmx_desc.npins = ARRAY_SIZE(ep9312_pins);
+ dev_info(dev, "detected 9312/9315 chip variant\n");
+ break;
+ default:
+ return dev_err_probe(dev, -EINVAL, "unknown pin control model: %u\n", pmx->model);
+ }
+
+ /* using parent of_node to match in get_pinctrl_dev_from_of_node() */
+ device_set_node(dev, dev_fwnode(adev->dev.parent));
+ pmx->pctl = devm_pinctrl_register(dev, &ep93xx_pmx_desc, pmx);
+ if (IS_ERR(pmx->pctl))
+ return dev_err_probe(dev, PTR_ERR(pmx->pctl), "could not register pinmux driver\n");
+
+ return 0;
+};
+
+static const struct auxiliary_device_id ep93xx_pinctrl_ids[] = {
+ {
+ .name = "soc_ep93xx.pinctrl-ep9301",
+ .driver_data = (kernel_ulong_t)EP93XX_9301_PINCTRL,
+ },
+ {
+ .name = "soc_ep93xx.pinctrl-ep9307",
+ .driver_data = (kernel_ulong_t)EP93XX_9307_PINCTRL,
+ },
+ {
+ .name = "soc_ep93xx.pinctrl-ep9312",
+ .driver_data = (kernel_ulong_t)EP93XX_9312_PINCTRL,
+ },
+ { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(auxiliary, ep93xx_pinctrl_ids);
+
+static struct auxiliary_driver ep93xx_pmx_driver = {
+ .probe = ep93xx_pmx_probe,
+ .id_table = ep93xx_pinctrl_ids,
+};
+module_auxiliary_driver(ep93xx_pmx_driver);
diff --git a/drivers/pinctrl/pinctrl-eyeq5.c b/drivers/pinctrl/pinctrl-eyeq5.c
new file mode 100644
index 000000000000..5f6af934a516
--- /dev/null
+++ b/drivers/pinctrl/pinctrl-eyeq5.c
@@ -0,0 +1,575 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Pinctrl driver for the Mobileye EyeQ5 platform.
+ *
+ * The registers are located in a syscon region called OLB. There are two pin
+ * banks, each being controlled by 5 registers (see enum eq5p_regs) for
+ * pull-down, pull-up, drive strength and muxing.
+ *
+ * For each pin, muxing is between two functions: (0) GPIO or (1) another one
+ * that is pin-dependent. Functions are declared statically in this driver.
+ *
+ * We create pinctrl groups that are 1:1 equivalent to pins: each group has a
+ * single pin, and its index/selector is the pin number.
+ *
+ * We use eq5p_ as prefix, as-in "EyeQ5 Pinctrl", but way shorter.
+ *
+ * Copyright (C) 2024 Mobileye Vision Technologies Ltd.
+ */
+
+#include <linux/array_size.h>
+#include <linux/auxiliary_bus.h>
+#include <linux/bits.h>
+#include <linux/bug.h>
+#include <linux/device.h>
+#include <linux/err.h>
+#include <linux/errno.h>
+#include <linux/io.h>
+#include <linux/mod_devicetable.h>
+#include <linux/seq_file.h>
+#include <linux/slab.h>
+#include <linux/types.h>
+
+#include <linux/pinctrl/pinconf-generic.h>
+#include <linux/pinctrl/pinconf.h>
+#include <linux/pinctrl/pinctrl.h>
+#include <linux/pinctrl/pinmux.h>
+
+#include "core.h"
+#include "pinctrl-utils.h"
+
+struct eq5p_pinctrl {
+ struct pinctrl_desc desc;
+ void __iomem *base;
+};
+
+enum eq5p_bank {
+ EQ5P_BANK_A,
+ EQ5P_BANK_B,
+
+ EQ5P_BANK_COUNT,
+};
+
+enum eq5p_regs {
+ EQ5P_PD,
+ EQ5P_PU,
+ EQ5P_DS_LOW,
+ EQ5P_DS_HIGH,
+ EQ5P_IOCR,
+
+ EQ5P_REG_COUNT,
+};
+
+static const unsigned int eq5p_regs[EQ5P_BANK_COUNT][EQ5P_REG_COUNT] = {
+ [EQ5P_BANK_A] = {0x0C0, 0x0C4, 0x0D0, 0x0D4, 0x0B0},
+ [EQ5P_BANK_B] = {0x0C8, 0x0CC, 0x0D8, 0x0DC, 0x0B4},
+};
+
+/*
+ * Drive strength; two bits per pin.
+ */
+#define EQ5P_DS_MASK GENMASK(1, 0)
+
+/*
+ * Comments to the right of each pin are the "signal name" in the datasheet.
+ */
+static const struct pinctrl_pin_desc eq5p_pins[] = {
+ /* Bank A */
+ PINCTRL_PIN(0, "PA0"), /* A0_TIMER0_CK */
+ PINCTRL_PIN(1, "PA1"), /* A1_TIMER0_EOC */
+ PINCTRL_PIN(2, "PA2"), /* A2_TIMER1_CK */
+ PINCTRL_PIN(3, "PA3"), /* A3_TIMER1_EOC */
+ PINCTRL_PIN(4, "PA4"), /* A4_TIMER2_CK */
+ PINCTRL_PIN(5, "PA5"), /* A5_TIMER2_EOC */
+ PINCTRL_PIN(6, "PA6"), /* A6_TIMER5_EXT_INCAP1 */
+ PINCTRL_PIN(7, "PA7"), /* A7_TIMER5_EXT_INCAP2 */
+ PINCTRL_PIN(8, "PA8"), /* A8_TIMER5_EXT_OUTCMP1 */
+ PINCTRL_PIN(9, "PA9"), /* A9_TIMER5_EXT_OUTCMP2 */
+ PINCTRL_PIN(10, "PA10"), /* A10_UART_0_TX */
+ PINCTRL_PIN(11, "PA11"), /* A11_UART_0_RX */
+ PINCTRL_PIN(12, "PA12"), /* A12_UART_1_TX */
+ PINCTRL_PIN(13, "PA13"), /* A13_UART_1_RX */
+ PINCTRL_PIN(14, "PA14"), /* A14_CAN_0_TX */
+ PINCTRL_PIN(15, "PA15"), /* A15_CAN_0_RX */
+ PINCTRL_PIN(16, "PA16"), /* A16_CAN_1_TX */
+ PINCTRL_PIN(17, "PA17"), /* A17_CAN_1_RX */
+ PINCTRL_PIN(18, "PA18"), /* A18_SPI_0_DO */
+ PINCTRL_PIN(19, "PA19"), /* A19_SPI_0_DI */
+ PINCTRL_PIN(20, "PA20"), /* A20_SPI_0_CK */
+ PINCTRL_PIN(21, "PA21"), /* A21_SPI_0_CS0 */
+ PINCTRL_PIN(22, "PA22"), /* A22_SPI_0_CS1 */
+ PINCTRL_PIN(23, "PA23"), /* A23_SPI_1_DO */
+ PINCTRL_PIN(24, "PA24"), /* A24_SPI_1_DI */
+ PINCTRL_PIN(25, "PA25"), /* A25_SPI_1_CK */
+ PINCTRL_PIN(26, "PA26"), /* A26_SPI_1_CS0 */
+ PINCTRL_PIN(27, "PA27"), /* A27_SPI_1_CS1 */
+ PINCTRL_PIN(28, "PA28"), /* A28_REF_CLK0 */
+
+#define EQ5P_PIN_OFFSET_BANK_B 29
+
+ /* Bank B */
+ PINCTRL_PIN(EQ5P_PIN_OFFSET_BANK_B + 0, "PB0"), /* B0_TIMER3_CK */
+ PINCTRL_PIN(EQ5P_PIN_OFFSET_BANK_B + 1, "PB1"), /* B1_TIMER3_EOC */
+ PINCTRL_PIN(EQ5P_PIN_OFFSET_BANK_B + 2, "PB2"), /* B2_TIMER4_CK */
+ PINCTRL_PIN(EQ5P_PIN_OFFSET_BANK_B + 3, "PB3"), /* B3_TIMER4_EOC */
+ PINCTRL_PIN(EQ5P_PIN_OFFSET_BANK_B + 4, "PB4"), /* B4_TIMER6_EXT_INCAP1 */
+ PINCTRL_PIN(EQ5P_PIN_OFFSET_BANK_B + 5, "PB5"), /* B5_TIMER6_EXT_INCAP2 */
+ PINCTRL_PIN(EQ5P_PIN_OFFSET_BANK_B + 6, "PB6"), /* B6_TIMER6_EXT_OUTCMP1 */
+ PINCTRL_PIN(EQ5P_PIN_OFFSET_BANK_B + 7, "PB7"), /* B7_TIMER6_EXT_OUTCMP2 */
+ PINCTRL_PIN(EQ5P_PIN_OFFSET_BANK_B + 8, "PB8"), /* B8_UART_2_TX */
+ PINCTRL_PIN(EQ5P_PIN_OFFSET_BANK_B + 9, "PB9"), /* B9_UART_2_RX */
+ PINCTRL_PIN(EQ5P_PIN_OFFSET_BANK_B + 10, "PB10"), /* B10_CAN_2_TX */
+ PINCTRL_PIN(EQ5P_PIN_OFFSET_BANK_B + 11, "PB11"), /* B11_CAN_2_RX */
+ PINCTRL_PIN(EQ5P_PIN_OFFSET_BANK_B + 12, "PB12"), /* B12_SPI_2_DO */
+ PINCTRL_PIN(EQ5P_PIN_OFFSET_BANK_B + 13, "PB13"), /* B13_SPI_2_DI */
+ PINCTRL_PIN(EQ5P_PIN_OFFSET_BANK_B + 14, "PB14"), /* B14_SPI_2_CK */
+ PINCTRL_PIN(EQ5P_PIN_OFFSET_BANK_B + 15, "PB15"), /* B15_SPI_2_CS0 */
+ PINCTRL_PIN(EQ5P_PIN_OFFSET_BANK_B + 16, "PB16"), /* B16_SPI_2_CS1 */
+ PINCTRL_PIN(EQ5P_PIN_OFFSET_BANK_B + 17, "PB17"), /* B17_SPI_3_DO */
+ PINCTRL_PIN(EQ5P_PIN_OFFSET_BANK_B + 18, "PB18"), /* B18_SPI_3_DI */
+ PINCTRL_PIN(EQ5P_PIN_OFFSET_BANK_B + 19, "PB19"), /* B19_SPI_3_CK */
+ PINCTRL_PIN(EQ5P_PIN_OFFSET_BANK_B + 20, "PB20"), /* B20_SPI_3_CS0 */
+ PINCTRL_PIN(EQ5P_PIN_OFFSET_BANK_B + 21, "PB21"), /* B21_SPI_3_CS1 */
+ PINCTRL_PIN(EQ5P_PIN_OFFSET_BANK_B + 22, "PB22"), /* B22_MCLK0 */
+};
+
+static const char * const gpio_groups[] = {
+ /* Bank A */
+ "PA0", "PA1", "PA2", "PA3", "PA4", "PA5", "PA6", "PA7",
+ "PA8", "PA9", "PA10", "PA11", "PA12", "PA13", "PA14", "PA15",
+ "PA16", "PA17", "PA18", "PA19", "PA20", "PA21", "PA22", "PA23",
+ "PA24", "PA25", "PA26", "PA27", "PA28",
+
+ /* Bank B */
+ "PB0", "PB1", "PB2", "PB3", "PB4", "PB5", "PB6", "PB7",
+ "PB8", "PB9", "PB10", "PB11", "PB12", "PB13", "PB14", "PB15",
+ "PB16", "PB17", "PB18", "PB19", "PB20", "PB21", "PB22",
+};
+
+/* Groups of functions on bank A */
+static const char * const timer0_groups[] = { "PA0", "PA1" };
+static const char * const timer1_groups[] = { "PA2", "PA3" };
+static const char * const timer2_groups[] = { "PA4", "PA5" };
+static const char * const timer5_groups[] = { "PA6", "PA7", "PA8", "PA9" };
+static const char * const uart0_groups[] = { "PA10", "PA11" };
+static const char * const uart1_groups[] = { "PA12", "PA13" };
+static const char * const can0_groups[] = { "PA14", "PA15" };
+static const char * const can1_groups[] = { "PA16", "PA17" };
+static const char * const spi0_groups[] = { "PA18", "PA19", "PA20", "PA21", "PA22" };
+static const char * const spi1_groups[] = { "PA23", "PA24", "PA25", "PA26", "PA27" };
+static const char * const refclk0_groups[] = { "PA28" };
+
+/* Groups of functions on bank B */
+static const char * const timer3_groups[] = { "PB0", "PB1" };
+static const char * const timer4_groups[] = { "PB2", "PB3" };
+static const char * const timer6_groups[] = { "PB4", "PB5", "PB6", "PB7" };
+static const char * const uart2_groups[] = { "PB8", "PB9" };
+static const char * const can2_groups[] = { "PB10", "PB11" };
+static const char * const spi2_groups[] = { "PB12", "PB13", "PB14", "PB15", "PB16" };
+static const char * const spi3_groups[] = { "PB17", "PB18", "PB19", "PB20", "PB21" };
+static const char * const mclk0_groups[] = { "PB22" };
+
+static const struct pinfunction eq5p_functions[] = {
+ /* GPIO having a fixed index is depended upon, see GPIO_FUNC_SELECTOR. */
+ PINCTRL_PINFUNCTION("gpio", gpio_groups, ARRAY_SIZE(gpio_groups)),
+#define GPIO_FUNC_SELECTOR 0
+
+ /* Bank A functions */
+ PINCTRL_PINFUNCTION("timer0", timer0_groups, ARRAY_SIZE(timer0_groups)),
+ PINCTRL_PINFUNCTION("timer1", timer1_groups, ARRAY_SIZE(timer1_groups)),
+ PINCTRL_PINFUNCTION("timer2", timer2_groups, ARRAY_SIZE(timer2_groups)),
+ PINCTRL_PINFUNCTION("timer5", timer5_groups, ARRAY_SIZE(timer5_groups)),
+ PINCTRL_PINFUNCTION("uart0", uart0_groups, ARRAY_SIZE(uart0_groups)),
+ PINCTRL_PINFUNCTION("uart1", uart1_groups, ARRAY_SIZE(uart1_groups)),
+ PINCTRL_PINFUNCTION("can0", can0_groups, ARRAY_SIZE(can0_groups)),
+ PINCTRL_PINFUNCTION("can1", can1_groups, ARRAY_SIZE(can1_groups)),
+ PINCTRL_PINFUNCTION("spi0", spi0_groups, ARRAY_SIZE(spi0_groups)),
+ PINCTRL_PINFUNCTION("spi1", spi1_groups, ARRAY_SIZE(spi1_groups)),
+ PINCTRL_PINFUNCTION("refclk0", refclk0_groups, ARRAY_SIZE(refclk0_groups)),
+
+ /* Bank B functions */
+ PINCTRL_PINFUNCTION("timer3", timer3_groups, ARRAY_SIZE(timer3_groups)),
+ PINCTRL_PINFUNCTION("timer4", timer4_groups, ARRAY_SIZE(timer4_groups)),
+ PINCTRL_PINFUNCTION("timer6", timer6_groups, ARRAY_SIZE(timer6_groups)),
+ PINCTRL_PINFUNCTION("uart2", uart2_groups, ARRAY_SIZE(uart2_groups)),
+ PINCTRL_PINFUNCTION("can2", can2_groups, ARRAY_SIZE(can2_groups)),
+ PINCTRL_PINFUNCTION("spi2", spi2_groups, ARRAY_SIZE(spi2_groups)),
+ PINCTRL_PINFUNCTION("spi3", spi3_groups, ARRAY_SIZE(spi3_groups)),
+ PINCTRL_PINFUNCTION("mclk0", mclk0_groups, ARRAY_SIZE(mclk0_groups)),
+};
+
+static void eq5p_update_bits(const struct eq5p_pinctrl *pctrl,
+ enum eq5p_bank bank, enum eq5p_regs reg,
+ u32 mask, u32 val)
+{
+ void __iomem *ptr = pctrl->base + eq5p_regs[bank][reg];
+
+ writel((readl(ptr) & ~mask) | (val & mask), ptr);
+}
+
+static bool eq5p_test_bit(const struct eq5p_pinctrl *pctrl,
+ enum eq5p_bank bank, enum eq5p_regs reg, int offset)
+{
+ u32 val = readl(pctrl->base + eq5p_regs[bank][reg]);
+
+ if (WARN_ON(offset > 31))
+ return false;
+
+ return (val & BIT(offset)) != 0;
+}
+
+static enum eq5p_bank eq5p_pin_to_bank(unsigned int pin)
+{
+ if (pin < EQ5P_PIN_OFFSET_BANK_B)
+ return EQ5P_BANK_A;
+ else
+ return EQ5P_BANK_B;
+}
+
+static unsigned int eq5p_pin_to_offset(unsigned int pin)
+{
+ if (pin < EQ5P_PIN_OFFSET_BANK_B)
+ return pin;
+ else
+ return pin - EQ5P_PIN_OFFSET_BANK_B;
+}
+
+static int eq5p_pinctrl_get_groups_count(struct pinctrl_dev *pctldev)
+{
+ return ARRAY_SIZE(eq5p_pins);
+}
+
+static const char *eq5p_pinctrl_get_group_name(struct pinctrl_dev *pctldev,
+ unsigned int selector)
+{
+ return pctldev->desc->pins[selector].name;
+}
+
+static int eq5p_pinctrl_get_group_pins(struct pinctrl_dev *pctldev,
+ unsigned int selector,
+ const unsigned int **pins,
+ unsigned int *num_pins)
+{
+ *pins = &pctldev->desc->pins[selector].number;
+ *num_pins = 1;
+ return 0;
+}
+
+static int eq5p_pinconf_get(struct pinctrl_dev *pctldev, unsigned int pin,
+ unsigned long *config)
+{
+ enum pin_config_param param = pinconf_to_config_param(*config);
+ struct eq5p_pinctrl *pctrl = pinctrl_dev_get_drvdata(pctldev);
+ unsigned int offset = eq5p_pin_to_offset(pin);
+ enum eq5p_bank bank = eq5p_pin_to_bank(pin);
+ u32 val_ds, arg;
+ bool pd, pu;
+
+ pd = eq5p_test_bit(pctrl, bank, EQ5P_PD, offset);
+ pu = eq5p_test_bit(pctrl, bank, EQ5P_PU, offset);
+
+ switch (param) {
+ case PIN_CONFIG_BIAS_DISABLE:
+ arg = !(pd || pu);
+ break;
+ case PIN_CONFIG_BIAS_PULL_DOWN:
+ arg = pd;
+ break;
+ case PIN_CONFIG_BIAS_PULL_UP:
+ arg = pu;
+ break;
+ case PIN_CONFIG_DRIVE_STRENGTH:
+ offset *= 2; /* two bits per pin */
+ if (offset >= 32) {
+ val_ds = readl(pctrl->base + eq5p_regs[bank][EQ5P_DS_HIGH]);
+ offset -= 32;
+ } else {
+ val_ds = readl(pctrl->base + eq5p_regs[bank][EQ5P_DS_LOW]);
+ }
+ arg = (val_ds >> offset) & EQ5P_DS_MASK;
+ break;
+ default:
+ return -ENOTSUPP;
+ }
+
+ *config = pinconf_to_config_packed(param, arg);
+ return 0;
+}
+
+static void eq5p_pinctrl_pin_dbg_show(struct pinctrl_dev *pctldev,
+ struct seq_file *s,
+ unsigned int pin)
+{
+ struct eq5p_pinctrl *pctrl = pinctrl_dev_get_drvdata(pctldev);
+ const char *pin_name = pctrl->desc.pins[pin].name;
+ unsigned int offset = eq5p_pin_to_offset(pin);
+ enum eq5p_bank bank = eq5p_pin_to_bank(pin);
+ const char *func_name, *bias;
+ unsigned long ds_config;
+ u32 drive_strength;
+ bool pd, pu;
+ int i, j;
+
+ /*
+ * First, let's get the function name. All pins have only two functions:
+ * GPIO (IOCR == 0) and something else (IOCR == 1).
+ */
+ if (eq5p_test_bit(pctrl, bank, EQ5P_IOCR, offset)) {
+ func_name = NULL;
+ for (i = 0; i < ARRAY_SIZE(eq5p_functions); i++) {
+ if (i == GPIO_FUNC_SELECTOR)
+ continue;
+
+ for (j = 0; j < eq5p_functions[i].ngroups; j++) {
+ /* Groups and pins are the same thing for us. */
+ const char *x = eq5p_functions[i].groups[j];
+
+ if (strcmp(x, pin_name) == 0) {
+ func_name = eq5p_functions[i].name;
+ break;
+ }
+ }
+
+ if (func_name)
+ break;
+ }
+
+ /*
+ * We have not found the function attached to this pin, this
+ * should never occur as all pins have exactly two functions.
+ */
+ if (!func_name)
+ func_name = "unknown";
+ } else {
+ func_name = eq5p_functions[GPIO_FUNC_SELECTOR].name;
+ }
+
+ /* Second, we retrieve the bias. */
+ pd = eq5p_test_bit(pctrl, bank, EQ5P_PD, offset);
+ pu = eq5p_test_bit(pctrl, bank, EQ5P_PU, offset);
+ if (pd && pu)
+ bias = "both";
+ else if (pd && !pu)
+ bias = "pulldown";
+ else if (!pd && pu)
+ bias = "pullup";
+ else
+ bias = "none";
+
+ /* Third, we get the drive strength. */
+ ds_config = pinconf_to_config_packed(PIN_CONFIG_DRIVE_STRENGTH, 0);
+ eq5p_pinconf_get(pctldev, pin, &ds_config);
+ drive_strength = pinconf_to_config_argument(ds_config);
+
+ seq_printf(s, "function=%s bias=%s drive_strength=%d",
+ func_name, bias, drive_strength);
+}
+
+static const struct pinctrl_ops eq5p_pinctrl_ops = {
+ .get_groups_count = eq5p_pinctrl_get_groups_count,
+ .get_group_name = eq5p_pinctrl_get_group_name,
+ .get_group_pins = eq5p_pinctrl_get_group_pins,
+ .pin_dbg_show = eq5p_pinctrl_pin_dbg_show,
+ .dt_node_to_map = pinconf_generic_dt_node_to_map_pin,
+ .dt_free_map = pinctrl_utils_free_map,
+};
+
+static int eq5p_pinmux_get_functions_count(struct pinctrl_dev *pctldev)
+{
+ return ARRAY_SIZE(eq5p_functions);
+}
+
+static const char *eq5p_pinmux_get_function_name(struct pinctrl_dev *pctldev,
+ unsigned int selector)
+{
+ return eq5p_functions[selector].name;
+}
+
+static int eq5p_pinmux_get_function_groups(struct pinctrl_dev *pctldev,
+ unsigned int selector,
+ const char * const **groups,
+ unsigned int *num_groups)
+{
+ *groups = eq5p_functions[selector].groups;
+ *num_groups = eq5p_functions[selector].ngroups;
+ return 0;
+}
+
+static int eq5p_pinmux_set_mux(struct pinctrl_dev *pctldev,
+ unsigned int func_selector, unsigned int pin)
+{
+ struct eq5p_pinctrl *pctrl = pinctrl_dev_get_drvdata(pctldev);
+ const char *func_name = eq5p_functions[func_selector].name;
+ const char *group_name = pctldev->desc->pins[pin].name;
+ bool is_gpio = func_selector == GPIO_FUNC_SELECTOR;
+ unsigned int offset = eq5p_pin_to_offset(pin);
+ enum eq5p_bank bank = eq5p_pin_to_bank(pin);
+ u32 mask, val;
+
+ dev_dbg(pctldev->dev, "func=%s group=%s\n", func_name, group_name);
+
+ mask = BIT(offset);
+ val = is_gpio ? 0 : mask;
+ eq5p_update_bits(pctrl, bank, EQ5P_IOCR, mask, val);
+ return 0;
+}
+
+static int eq5p_pinmux_gpio_request_enable(struct pinctrl_dev *pctldev,
+ struct pinctrl_gpio_range *range,
+ unsigned int pin)
+{
+ /* Pin numbers and group selectors are the same thing in our case. */
+ return eq5p_pinmux_set_mux(pctldev, GPIO_FUNC_SELECTOR, pin);
+}
+
+static const struct pinmux_ops eq5p_pinmux_ops = {
+ .get_functions_count = eq5p_pinmux_get_functions_count,
+ .get_function_name = eq5p_pinmux_get_function_name,
+ .get_function_groups = eq5p_pinmux_get_function_groups,
+ .set_mux = eq5p_pinmux_set_mux,
+ .gpio_request_enable = eq5p_pinmux_gpio_request_enable,
+ .strict = true,
+};
+
+static int eq5p_pinconf_set_drive_strength(struct pinctrl_dev *pctldev,
+ unsigned int pin, u32 arg)
+{
+ struct eq5p_pinctrl *pctrl = pinctrl_dev_get_drvdata(pctldev);
+ unsigned int offset = eq5p_pin_to_offset(pin);
+ enum eq5p_bank bank = eq5p_pin_to_bank(pin);
+ unsigned int reg;
+ u32 mask, val;
+
+ if (arg & ~EQ5P_DS_MASK) {
+ dev_err(pctldev->dev, "Unsupported drive strength: %u\n", arg);
+ return -EINVAL;
+ }
+
+ offset *= 2; /* two bits per pin */
+
+ if (offset >= 32) {
+ reg = EQ5P_DS_HIGH;
+ offset -= 32;
+ } else {
+ reg = EQ5P_DS_LOW;
+ }
+
+ mask = EQ5P_DS_MASK << offset;
+ val = arg << offset;
+ eq5p_update_bits(pctrl, bank, reg, mask, val);
+ return 0;
+}
+
+static int eq5p_pinconf_set(struct pinctrl_dev *pctldev, unsigned int pin,
+ unsigned long *configs, unsigned int num_configs)
+{
+ struct eq5p_pinctrl *pctrl = pinctrl_dev_get_drvdata(pctldev);
+ const char *pin_name = pctldev->desc->pins[pin].name;
+ unsigned int offset = eq5p_pin_to_offset(pin);
+ enum eq5p_bank bank = eq5p_pin_to_bank(pin);
+ struct device *dev = pctldev->dev;
+ u32 val = BIT(offset);
+ unsigned int i;
+
+ for (i = 0; i < num_configs; i++) {
+ enum pin_config_param param = pinconf_to_config_param(configs[i]);
+ u32 arg = pinconf_to_config_argument(configs[i]);
+
+ switch (param) {
+ case PIN_CONFIG_BIAS_DISABLE:
+ dev_dbg(dev, "pin=%s bias_disable\n", pin_name);
+
+ eq5p_update_bits(pctrl, bank, EQ5P_PD, val, 0);
+ eq5p_update_bits(pctrl, bank, EQ5P_PU, val, 0);
+ break;
+
+ case PIN_CONFIG_BIAS_PULL_DOWN:
+ dev_dbg(dev, "pin=%s bias_pull_down arg=%u\n",
+ pin_name, arg);
+
+ if (arg == 0) /* cannot connect to GND */
+ return -ENOTSUPP;
+
+ eq5p_update_bits(pctrl, bank, EQ5P_PD, val, val);
+ eq5p_update_bits(pctrl, bank, EQ5P_PU, val, 0);
+ break;
+
+ case PIN_CONFIG_BIAS_PULL_UP:
+ dev_dbg(dev, "pin=%s bias_pull_up arg=%u\n",
+ pin_name, arg);
+
+ if (arg == 0) /* cannot connect to VDD */
+ return -ENOTSUPP;
+
+ eq5p_update_bits(pctrl, bank, EQ5P_PD, val, 0);
+ eq5p_update_bits(pctrl, bank, EQ5P_PU, val, val);
+ break;
+
+ case PIN_CONFIG_DRIVE_STRENGTH:
+ dev_dbg(dev, "pin=%s drive_strength arg=%u\n",
+ pin_name, arg);
+
+ eq5p_pinconf_set_drive_strength(pctldev, pin, arg);
+ break;
+
+ default:
+ dev_err(dev, "Unsupported pinconf %u\n", param);
+ return -ENOTSUPP;
+ }
+ }
+
+ return 0;
+}
+
+static const struct pinconf_ops eq5p_pinconf_ops = {
+ .is_generic = true,
+ .pin_config_get = eq5p_pinconf_get,
+ .pin_config_set = eq5p_pinconf_set,
+ /* Pins and groups are equivalent in this driver. */
+ .pin_config_group_get = eq5p_pinconf_get,
+ .pin_config_group_set = eq5p_pinconf_set,
+};
+
+static int eq5p_probe(struct auxiliary_device *adev,
+ const struct auxiliary_device_id *id)
+{
+ struct device *dev = &adev->dev;
+ struct pinctrl_dev *pctldev;
+ struct eq5p_pinctrl *pctrl;
+ int ret;
+
+ pctrl = devm_kzalloc(dev, sizeof(*pctrl), GFP_KERNEL);
+ if (!pctrl)
+ return -ENOMEM;
+
+ pctrl->base = (void __iomem *)dev_get_platdata(dev);
+ pctrl->desc.name = dev_name(dev);
+ pctrl->desc.pins = eq5p_pins;
+ pctrl->desc.npins = ARRAY_SIZE(eq5p_pins);
+ pctrl->desc.pctlops = &eq5p_pinctrl_ops;
+ pctrl->desc.pmxops = &eq5p_pinmux_ops;
+ pctrl->desc.confops = &eq5p_pinconf_ops;
+ pctrl->desc.owner = THIS_MODULE;
+
+ ret = devm_pinctrl_register_and_init(dev, &pctrl->desc, pctrl, &pctldev);
+ if (ret)
+ return dev_err_probe(dev, ret, "failed registering pinctrl device\n");
+
+ ret = pinctrl_enable(pctldev);
+ if (ret)
+ return dev_err_probe(dev, ret, "failed enabling pinctrl device\n");
+
+ return 0;
+}
+
+static const struct auxiliary_device_id eq5p_id_table[] = {
+ { .name = "clk_eyeq.pinctrl" },
+ {}
+};
+MODULE_DEVICE_TABLE(auxiliary, eq5p_id_table);
+
+static struct auxiliary_driver eq5p_driver = {
+ .probe = eq5p_probe,
+ .id_table = eq5p_id_table,
+};
+module_auxiliary_driver(eq5p_driver);
diff --git a/drivers/pinctrl/pinctrl-k210.c b/drivers/pinctrl/pinctrl-k210.c
index a898e40451fe..0f6b55fec31d 100644
--- a/drivers/pinctrl/pinctrl-k210.c
+++ b/drivers/pinctrl/pinctrl-k210.c
@@ -925,7 +925,6 @@ static int k210_fpioa_probe(struct platform_device *pdev)
struct device *dev = &pdev->dev;
struct device_node *np = dev->of_node;
struct k210_fpioa_data *pdata;
- int ret;
dev_info(dev, "K210 FPIOA pin controller\n");
@@ -940,46 +939,28 @@ static int k210_fpioa_probe(struct platform_device *pdev)
if (IS_ERR(pdata->fpioa))
return PTR_ERR(pdata->fpioa);
- pdata->clk = devm_clk_get(dev, "ref");
+ pdata->clk = devm_clk_get_enabled(dev, "ref");
if (IS_ERR(pdata->clk))
return PTR_ERR(pdata->clk);
- ret = clk_prepare_enable(pdata->clk);
- if (ret)
- return ret;
-
- pdata->pclk = devm_clk_get_optional(dev, "pclk");
- if (!IS_ERR(pdata->pclk)) {
- ret = clk_prepare_enable(pdata->pclk);
- if (ret)
- goto disable_clk;
- }
+ pdata->pclk = devm_clk_get_optional_enabled(dev, "pclk");
+ if (IS_ERR(pdata->pclk))
+ return PTR_ERR(pdata->pclk);
pdata->sysctl_map =
syscon_regmap_lookup_by_phandle_args(np,
"canaan,k210-sysctl-power",
1, &pdata->power_offset);
- if (IS_ERR(pdata->sysctl_map)) {
- ret = PTR_ERR(pdata->sysctl_map);
- goto disable_pclk;
- }
+ if (IS_ERR(pdata->sysctl_map))
+ return PTR_ERR(pdata->sysctl_map);
k210_fpioa_init_ties(pdata);
pdata->pctl = pinctrl_register(&k210_pinctrl_desc, dev, (void *)pdata);
- if (IS_ERR(pdata->pctl)) {
- ret = PTR_ERR(pdata->pctl);
- goto disable_pclk;
- }
+ if (IS_ERR(pdata->pctl))
+ return PTR_ERR(pdata->pctl);
return 0;
-
-disable_pclk:
- clk_disable_unprepare(pdata->pclk);
-disable_clk:
- clk_disable_unprepare(pdata->clk);
-
- return ret;
}
static const struct of_device_id k210_fpioa_dt_ids[] = {
diff --git a/drivers/pinctrl/pinctrl-rockchip.c b/drivers/pinctrl/pinctrl-rockchip.c
index 6878bc86faa2..5c1bc4d5b662 100644
--- a/drivers/pinctrl/pinctrl-rockchip.c
+++ b/drivers/pinctrl/pinctrl-rockchip.c
@@ -84,6 +84,27 @@
}, \
}
+#define PIN_BANK_IOMUX_FLAGS_OFFSET_PULL_FLAGS(id, pins, label, iom0, \
+ iom1, iom2, iom3, \
+ offset0, offset1, \
+ offset2, offset3, pull0, \
+ pull1, pull2, pull3) \
+ { \
+ .bank_num = id, \
+ .nr_pins = pins, \
+ .name = label, \
+ .iomux = { \
+ { .type = iom0, .offset = offset0 }, \
+ { .type = iom1, .offset = offset1 }, \
+ { .type = iom2, .offset = offset2 }, \
+ { .type = iom3, .offset = offset3 }, \
+ }, \
+ .pull_type[0] = pull0, \
+ .pull_type[1] = pull1, \
+ .pull_type[2] = pull2, \
+ .pull_type[3] = pull3, \
+ }
+
#define PIN_BANK_DRV_FLAGS(id, pins, label, type0, type1, type2, type3) \
{ \
.bank_num = id, \
@@ -1120,6 +1141,11 @@ static int rockchip_get_mux(struct rockchip_pin_bank *bank, int pin)
if (bank->recalced_mask & BIT(pin))
rockchip_get_recalced_mux(bank, pin, &reg, &bit, &mask);
+ if (ctrl->type == RK3576) {
+ if ((bank->bank_num == 0) && (pin >= RK_PB4) && (pin <= RK_PB7))
+ reg += 0x1ff4; /* GPIO0_IOC_GPIO0B_IOMUX_SEL_H */
+ }
+
if (ctrl->type == RK3588) {
if (bank->bank_num == 0) {
if ((pin >= RK_PB4) && (pin <= RK_PD7)) {
@@ -1234,6 +1260,11 @@ static int rockchip_set_mux(struct rockchip_pin_bank *bank, int pin, int mux)
if (bank->recalced_mask & BIT(pin))
rockchip_get_recalced_mux(bank, pin, &reg, &bit, &mask);
+ if (ctrl->type == RK3576) {
+ if ((bank->bank_num == 0) && (pin >= RK_PB4) && (pin <= RK_PB7))
+ reg += 0x1ff4; /* GPIO0_IOC_GPIO0B_IOMUX_SEL_H */
+ }
+
if (ctrl->type == RK3588) {
if (bank->bank_num == 0) {
if ((pin >= RK_PB4) && (pin <= RK_PD7)) {
@@ -2038,6 +2069,142 @@ static int rk3568_calc_drv_reg_and_bit(struct rockchip_pin_bank *bank,
return 0;
}
+#define RK3576_DRV_BITS_PER_PIN 4
+#define RK3576_DRV_PINS_PER_REG 4
+#define RK3576_DRV_GPIO0_AL_OFFSET 0x10
+#define RK3576_DRV_GPIO0_BH_OFFSET 0x2014
+#define RK3576_DRV_GPIO1_OFFSET 0x6020
+#define RK3576_DRV_GPIO2_OFFSET 0x6040
+#define RK3576_DRV_GPIO3_OFFSET 0x6060
+#define RK3576_DRV_GPIO4_AL_OFFSET 0x6080
+#define RK3576_DRV_GPIO4_CL_OFFSET 0xA090
+#define RK3576_DRV_GPIO4_DL_OFFSET 0xB098
+
+static int rk3576_calc_drv_reg_and_bit(struct rockchip_pin_bank *bank,
+ int pin_num, struct regmap **regmap,
+ int *reg, u8 *bit)
+{
+ struct rockchip_pinctrl *info = bank->drvdata;
+
+ *regmap = info->regmap_base;
+
+ if (bank->bank_num == 0 && pin_num < 12)
+ *reg = RK3576_DRV_GPIO0_AL_OFFSET;
+ else if (bank->bank_num == 0)
+ *reg = RK3576_DRV_GPIO0_BH_OFFSET - 0xc;
+ else if (bank->bank_num == 1)
+ *reg = RK3576_DRV_GPIO1_OFFSET;
+ else if (bank->bank_num == 2)
+ *reg = RK3576_DRV_GPIO2_OFFSET;
+ else if (bank->bank_num == 3)
+ *reg = RK3576_DRV_GPIO3_OFFSET;
+ else if (bank->bank_num == 4 && pin_num < 16)
+ *reg = RK3576_DRV_GPIO4_AL_OFFSET;
+ else if (bank->bank_num == 4 && pin_num < 24)
+ *reg = RK3576_DRV_GPIO4_CL_OFFSET - 0x10;
+ else if (bank->bank_num == 4)
+ *reg = RK3576_DRV_GPIO4_DL_OFFSET - 0x18;
+ else
+ dev_err(info->dev, "unsupported bank_num %d\n", bank->bank_num);
+
+ *reg += ((pin_num / RK3576_DRV_PINS_PER_REG) * 4);
+ *bit = pin_num % RK3576_DRV_PINS_PER_REG;
+ *bit *= RK3576_DRV_BITS_PER_PIN;
+
+ return 0;
+}
+
+#define RK3576_PULL_BITS_PER_PIN 2
+#define RK3576_PULL_PINS_PER_REG 8
+#define RK3576_PULL_GPIO0_AL_OFFSET 0x20
+#define RK3576_PULL_GPIO0_BH_OFFSET 0x2028
+#define RK3576_PULL_GPIO1_OFFSET 0x6110
+#define RK3576_PULL_GPIO2_OFFSET 0x6120
+#define RK3576_PULL_GPIO3_OFFSET 0x6130
+#define RK3576_PULL_GPIO4_AL_OFFSET 0x6140
+#define RK3576_PULL_GPIO4_CL_OFFSET 0xA148
+#define RK3576_PULL_GPIO4_DL_OFFSET 0xB14C
+
+static int rk3576_calc_pull_reg_and_bit(struct rockchip_pin_bank *bank,
+ int pin_num, struct regmap **regmap,
+ int *reg, u8 *bit)
+{
+ struct rockchip_pinctrl *info = bank->drvdata;
+
+ *regmap = info->regmap_base;
+
+ if (bank->bank_num == 0 && pin_num < 12)
+ *reg = RK3576_PULL_GPIO0_AL_OFFSET;
+ else if (bank->bank_num == 0)
+ *reg = RK3576_PULL_GPIO0_BH_OFFSET - 0x4;
+ else if (bank->bank_num == 1)
+ *reg = RK3576_PULL_GPIO1_OFFSET;
+ else if (bank->bank_num == 2)
+ *reg = RK3576_PULL_GPIO2_OFFSET;
+ else if (bank->bank_num == 3)
+ *reg = RK3576_PULL_GPIO3_OFFSET;
+ else if (bank->bank_num == 4 && pin_num < 16)
+ *reg = RK3576_PULL_GPIO4_AL_OFFSET;
+ else if (bank->bank_num == 4 && pin_num < 24)
+ *reg = RK3576_PULL_GPIO4_CL_OFFSET - 0x8;
+ else if (bank->bank_num == 4)
+ *reg = RK3576_PULL_GPIO4_DL_OFFSET - 0xc;
+ else
+ dev_err(info->dev, "unsupported bank_num %d\n", bank->bank_num);
+
+ *reg += ((pin_num / RK3576_PULL_PINS_PER_REG) * 4);
+ *bit = pin_num % RK3576_PULL_PINS_PER_REG;
+ *bit *= RK3576_PULL_BITS_PER_PIN;
+
+ return 0;
+}
+
+#define RK3576_SMT_BITS_PER_PIN 1
+#define RK3576_SMT_PINS_PER_REG 8
+#define RK3576_SMT_GPIO0_AL_OFFSET 0x30
+#define RK3576_SMT_GPIO0_BH_OFFSET 0x2040
+#define RK3576_SMT_GPIO1_OFFSET 0x6210
+#define RK3576_SMT_GPIO2_OFFSET 0x6220
+#define RK3576_SMT_GPIO3_OFFSET 0x6230
+#define RK3576_SMT_GPIO4_AL_OFFSET 0x6240
+#define RK3576_SMT_GPIO4_CL_OFFSET 0xA248
+#define RK3576_SMT_GPIO4_DL_OFFSET 0xB24C
+
+static int rk3576_calc_schmitt_reg_and_bit(struct rockchip_pin_bank *bank,
+ int pin_num,
+ struct regmap **regmap,
+ int *reg, u8 *bit)
+{
+ struct rockchip_pinctrl *info = bank->drvdata;
+
+ *regmap = info->regmap_base;
+
+ if (bank->bank_num == 0 && pin_num < 12)
+ *reg = RK3576_SMT_GPIO0_AL_OFFSET;
+ else if (bank->bank_num == 0)
+ *reg = RK3576_SMT_GPIO0_BH_OFFSET - 0x4;
+ else if (bank->bank_num == 1)
+ *reg = RK3576_SMT_GPIO1_OFFSET;
+ else if (bank->bank_num == 2)
+ *reg = RK3576_SMT_GPIO2_OFFSET;
+ else if (bank->bank_num == 3)
+ *reg = RK3576_SMT_GPIO3_OFFSET;
+ else if (bank->bank_num == 4 && pin_num < 16)
+ *reg = RK3576_SMT_GPIO4_AL_OFFSET;
+ else if (bank->bank_num == 4 && pin_num < 24)
+ *reg = RK3576_SMT_GPIO4_CL_OFFSET - 0x8;
+ else if (bank->bank_num == 4)
+ *reg = RK3576_SMT_GPIO4_DL_OFFSET - 0xc;
+ else
+ dev_err(info->dev, "unsupported bank_num %d\n", bank->bank_num);
+
+ *reg += ((pin_num / RK3576_SMT_PINS_PER_REG) * 4);
+ *bit = pin_num % RK3576_SMT_PINS_PER_REG;
+ *bit *= RK3576_SMT_BITS_PER_PIN;
+
+ return 0;
+}
+
#define RK3588_PMU1_IOC_REG (0x0000)
#define RK3588_PMU2_IOC_REG (0x4000)
#define RK3588_BUS_IOC_REG (0x8000)
@@ -2332,6 +2499,10 @@ static int rockchip_set_drive_perpin(struct rockchip_pin_bank *bank,
rmask_bits = RK3568_DRV_BITS_PER_PIN;
ret = (1 << (strength + 1)) - 1;
goto config;
+ } else if (ctrl->type == RK3576) {
+ rmask_bits = RK3576_DRV_BITS_PER_PIN;
+ ret = ((strength & BIT(2)) >> 2) | ((strength & BIT(0)) << 2) | (strength & BIT(1));
+ goto config;
}
if (ctrl->type == RV1126) {
@@ -2469,6 +2640,7 @@ static int rockchip_get_pull(struct rockchip_pin_bank *bank, int pin_num)
case RK3368:
case RK3399:
case RK3568:
+ case RK3576:
case RK3588:
pull_type = bank->pull_type[pin_num / 8];
data >>= bit;
@@ -2528,6 +2700,7 @@ static int rockchip_set_pull(struct rockchip_pin_bank *bank,
case RK3368:
case RK3399:
case RK3568:
+ case RK3576:
case RK3588:
pull_type = bank->pull_type[pin_num / 8];
ret = -EINVAL;
@@ -2793,6 +2966,7 @@ static bool rockchip_pinconf_pull_valid(struct rockchip_pin_ctrl *ctrl,
case RK3368:
case RK3399:
case RK3568:
+ case RK3576:
case RK3588:
return (pull != PIN_CONFIG_BIAS_PULL_PIN_DEFAULT);
}
@@ -3949,6 +4123,37 @@ static struct rockchip_pin_ctrl rk3568_pin_ctrl = {
.schmitt_calc_reg = rk3568_calc_schmitt_reg_and_bit,
};
+#define RK3576_PIN_BANK(ID, LABEL, OFFSET0, OFFSET1, OFFSET2, OFFSET3) \
+ PIN_BANK_IOMUX_FLAGS_OFFSET_PULL_FLAGS(ID, 32, LABEL, \
+ IOMUX_WIDTH_4BIT, \
+ IOMUX_WIDTH_4BIT, \
+ IOMUX_WIDTH_4BIT, \
+ IOMUX_WIDTH_4BIT, \
+ OFFSET0, OFFSET1, \
+ OFFSET2, OFFSET3, \
+ PULL_TYPE_IO_1V8_ONLY, \
+ PULL_TYPE_IO_1V8_ONLY, \
+ PULL_TYPE_IO_1V8_ONLY, \
+ PULL_TYPE_IO_1V8_ONLY)
+
+static struct rockchip_pin_bank rk3576_pin_banks[] = {
+ RK3576_PIN_BANK(0, "gpio0", 0, 0x8, 0x2004, 0x200C),
+ RK3576_PIN_BANK(1, "gpio1", 0x4020, 0x4028, 0x4030, 0x4038),
+ RK3576_PIN_BANK(2, "gpio2", 0x4040, 0x4048, 0x4050, 0x4058),
+ RK3576_PIN_BANK(3, "gpio3", 0x4060, 0x4068, 0x4070, 0x4078),
+ RK3576_PIN_BANK(4, "gpio4", 0x4080, 0x4088, 0xA390, 0xB398),
+};
+
+static struct rockchip_pin_ctrl rk3576_pin_ctrl __maybe_unused = {
+ .pin_banks = rk3576_pin_banks,
+ .nr_banks = ARRAY_SIZE(rk3576_pin_banks),
+ .label = "RK3576-GPIO",
+ .type = RK3576,
+ .pull_calc_reg = rk3576_calc_pull_reg_and_bit,
+ .drv_calc_reg = rk3576_calc_drv_reg_and_bit,
+ .schmitt_calc_reg = rk3576_calc_schmitt_reg_and_bit,
+};
+
static struct rockchip_pin_bank rk3588_pin_banks[] = {
RK3588_PIN_BANK_FLAGS(0, 32, "gpio0",
IOMUX_WIDTH_4BIT, PULL_TYPE_IO_1V8_ONLY),
@@ -4005,6 +4210,8 @@ static const struct of_device_id rockchip_pinctrl_dt_match[] = {
.data = &rk3399_pin_ctrl },
{ .compatible = "rockchip,rk3568-pinctrl",
.data = &rk3568_pin_ctrl },
+ { .compatible = "rockchip,rk3576-pinctrl",
+ .data = &rk3576_pin_ctrl },
{ .compatible = "rockchip,rk3588-pinctrl",
.data = &rk3588_pin_ctrl },
{},
diff --git a/drivers/pinctrl/pinctrl-rockchip.h b/drivers/pinctrl/pinctrl-rockchip.h
index 849266f8b191..6ebbb0a88ce7 100644
--- a/drivers/pinctrl/pinctrl-rockchip.h
+++ b/drivers/pinctrl/pinctrl-rockchip.h
@@ -197,6 +197,7 @@ enum rockchip_pinctrl_type {
RK3368,
RK3399,
RK3568,
+ RK3576,
RK3588,
};
diff --git a/drivers/pinctrl/pinctrl-single.c b/drivers/pinctrl/pinctrl-single.c
index 4da3c3f422b6..2ec599e383e4 100644
--- a/drivers/pinctrl/pinctrl-single.c
+++ b/drivers/pinctrl/pinctrl-single.c
@@ -1913,7 +1913,8 @@ static int pcs_probe(struct platform_device *pdev)
dev_info(pcs->dev, "%i pins, size %u\n", pcs->desc.npins, pcs->size);
- if (pinctrl_enable(pcs->pctl))
+ ret = pinctrl_enable(pcs->pctl);
+ if (ret)
goto free;
return 0;
diff --git a/drivers/pinctrl/pinctrl-stmfx.c b/drivers/pinctrl/pinctrl-stmfx.c
index 6313be370eb7..d2c5321dd025 100644
--- a/drivers/pinctrl/pinctrl-stmfx.c
+++ b/drivers/pinctrl/pinctrl-stmfx.c
@@ -11,6 +11,7 @@
#include <linux/module.h>
#include <linux/platform_device.h>
#include <linux/seq_file.h>
+#include <linux/string_choices.h>
#include <linux/pinctrl/pinconf.h>
#include <linux/pinctrl/pinmux.h>
@@ -369,14 +370,14 @@ static void stmfx_pinconf_dbg_show(struct pinctrl_dev *pctldev,
return;
if (dir == GPIO_LINE_DIRECTION_OUT) {
- seq_printf(s, "output %s ", val ? "high" : "low");
+ seq_printf(s, "output %s ", str_high_low(val));
if (type)
seq_printf(s, "open drain %s internal pull-up ",
pupd ? "with" : "without");
else
seq_puts(s, "push pull no pull ");
} else {
- seq_printf(s, "input %s ", val ? "high" : "low");
+ seq_printf(s, "input %s ", str_high_low(val));
if (type)
seq_printf(s, "with internal pull-%s ",
pupd ? "up" : "down");
diff --git a/drivers/pinctrl/pinctrl-utils.c b/drivers/pinctrl/pinctrl-utils.c
index d81d7b46116c..b880e44b8221 100644
--- a/drivers/pinctrl/pinctrl-utils.c
+++ b/drivers/pinctrl/pinctrl-utils.c
@@ -70,8 +70,8 @@ int pinctrl_utils_add_map_configs(struct pinctrl_dev *pctldev,
if (WARN_ON(*num_maps == *reserved_maps))
return -ENOSPC;
- dup_configs = kmemdup(configs, num_configs * sizeof(*dup_configs),
- GFP_KERNEL);
+ dup_configs = kmemdup_array(configs, num_configs,
+ sizeof(*dup_configs), GFP_KERNEL);
if (!dup_configs)
return -ENOMEM;
diff --git a/drivers/pinctrl/pinctrl-zynq.c b/drivers/pinctrl/pinctrl-zynq.c
index 0e8de27d0de8..caa8a2ca3e68 100644
--- a/drivers/pinctrl/pinctrl-zynq.c
+++ b/drivers/pinctrl/pinctrl-zynq.c
@@ -1202,6 +1202,7 @@ static const struct of_device_id zynq_pinctrl_of_match[] = {
{ .compatible = "xlnx,pinctrl-zynq" },
{ }
};
+MODULE_DEVICE_TABLE(of, zynq_pinctrl_of_match);
static struct platform_driver zynq_pinctrl_driver = {
.driver = {
diff --git a/drivers/pinctrl/pinmux.c b/drivers/pinctrl/pinmux.c
index aae71a37219b..02033ea1c643 100644
--- a/drivers/pinctrl/pinmux.c
+++ b/drivers/pinctrl/pinmux.c
@@ -442,8 +442,7 @@ int pinmux_enable_setting(const struct pinctrl_setting *setting)
gname = pctlops->get_group_name(pctldev,
setting->data.mux.group);
dev_err_probe(pctldev->dev, ret,
- "could not request pin %d (%s) from group %s "
- " on device %s\n",
+ "could not request pin %d (%s) from group %s on device %s\n",
pins[i], pname, gname,
pinctrl_dev_get_name(pctldev));
goto err_pin_request;
@@ -526,9 +525,7 @@ void pinmux_disable_setting(const struct pinctrl_setting *setting)
gname = pctlops->get_group_name(pctldev,
setting->data.mux.group);
dev_warn(pctldev->dev,
- "not freeing pin %d (%s) as part of "
- "deactivating group %s - it is already "
- "used for some other setting",
+ "not freeing pin %d (%s) as part of deactivating group %s - it is already used for some other setting",
pins[i], desc->name, gname);
}
}
diff --git a/drivers/pinctrl/realtek/pinctrl-rtd.c b/drivers/pinctrl/realtek/pinctrl-rtd.c
index 208896593b61..244060486332 100644
--- a/drivers/pinctrl/realtek/pinctrl-rtd.c
+++ b/drivers/pinctrl/realtek/pinctrl-rtd.c
@@ -533,7 +533,7 @@ static const struct pinconf_ops rtd_pinconf_ops = {
.pin_config_group_set = rtd_pin_config_group_set,
};
-static struct regmap_config rtd_pinctrl_regmap_config = {
+static const struct regmap_config rtd_pinctrl_regmap_config = {
.reg_bits = 32,
.val_bits = 32,
.reg_stride = 4,
diff --git a/drivers/pinctrl/renesas/pinctrl-rzg2l.c b/drivers/pinctrl/renesas/pinctrl-rzg2l.c
index 632180570b70..5a403915fed2 100644
--- a/drivers/pinctrl/renesas/pinctrl-rzg2l.c
+++ b/drivers/pinctrl/renesas/pinctrl-rzg2l.c
@@ -16,6 +16,7 @@
#include <linux/of.h>
#include <linux/of_irq.h>
#include <linux/platform_device.h>
+#include <linux/property.h>
#include <linux/seq_file.h>
#include <linux/spinlock.h>
@@ -51,17 +52,15 @@
#define PIN_CFG_IO_VMC_QSPI BIT(7)
#define PIN_CFG_IO_VMC_ETH0 BIT(8)
#define PIN_CFG_IO_VMC_ETH1 BIT(9)
-#define PIN_CFG_FILONOFF BIT(10)
-#define PIN_CFG_FILNUM BIT(11)
-#define PIN_CFG_FILCLKSEL BIT(12)
-#define PIN_CFG_IOLH_C BIT(13)
-#define PIN_CFG_SOFT_PS BIT(14)
-#define PIN_CFG_OEN BIT(15)
-#define PIN_CFG_NOGPIO_INT BIT(16)
-#define PIN_CFG_NOD BIT(17) /* N-ch Open Drain */
-#define PIN_CFG_SMT BIT(18) /* Schmitt-trigger input control */
-#define PIN_CFG_ELC BIT(19)
-#define PIN_CFG_IOLH_RZV2H BIT(20)
+#define PIN_CFG_NF BIT(10) /* Digital noise filter */
+#define PIN_CFG_IOLH_C BIT(11)
+#define PIN_CFG_SOFT_PS BIT(12)
+#define PIN_CFG_OEN BIT(13)
+#define PIN_CFG_NOGPIO_INT BIT(14)
+#define PIN_CFG_NOD BIT(15) /* N-ch Open Drain */
+#define PIN_CFG_SMT BIT(16) /* Schmitt-trigger input control */
+#define PIN_CFG_ELC BIT(17)
+#define PIN_CFG_IOLH_RZV2H BIT(18)
#define RZG2L_SINGLE_PIN BIT_ULL(63) /* Dedicated pin */
#define RZG2L_VARIABLE_CFG BIT_ULL(62) /* Variable cfg for port pins */
@@ -69,9 +68,7 @@
#define RZG2L_MPXED_COMMON_PIN_FUNCS(group) \
(PIN_CFG_IOLH_##group | \
PIN_CFG_PUPD | \
- PIN_CFG_FILONOFF | \
- PIN_CFG_FILNUM | \
- PIN_CFG_FILCLKSEL)
+ PIN_CFG_NF)
#define RZG2L_MPXED_PIN_FUNCS (RZG2L_MPXED_COMMON_PIN_FUNCS(A) | \
PIN_CFG_SR)
@@ -84,10 +81,7 @@
PIN_CFG_SR | \
PIN_CFG_SMT)
-#define RZG2L_MPXED_ETH_PIN_FUNCS(x) ((x) | \
- PIN_CFG_FILONOFF | \
- PIN_CFG_FILNUM | \
- PIN_CFG_FILCLKSEL)
+#define RZG2L_MPXED_ETH_PIN_FUNCS(x) ((x) | PIN_CFG_NF)
#define PIN_CFG_PIN_MAP_MASK GENMASK_ULL(61, 54)
#define PIN_CFG_PIN_REG_MASK GENMASK_ULL(53, 46)
@@ -394,13 +388,13 @@ static const u64 r9a09g057_variable_pin_cfg[] = {
#ifdef CONFIG_RISCV
static const u64 r9a07g043f_variable_pin_cfg[] = {
RZG2L_VARIABLE_PIN_CFG_PACK(20, 0, PIN_CFG_IOLH_B | PIN_CFG_SR | PIN_CFG_PUPD |
- PIN_CFG_FILONOFF | PIN_CFG_FILNUM | PIN_CFG_FILCLKSEL |
+ PIN_CFG_NF |
PIN_CFG_IEN | PIN_CFG_NOGPIO_INT),
RZG2L_VARIABLE_PIN_CFG_PACK(20, 1, PIN_CFG_IOLH_B | PIN_CFG_SR | PIN_CFG_PUPD |
- PIN_CFG_FILONOFF | PIN_CFG_FILNUM | PIN_CFG_FILCLKSEL |
+ PIN_CFG_NF |
PIN_CFG_IEN | PIN_CFG_NOGPIO_INT),
RZG2L_VARIABLE_PIN_CFG_PACK(20, 2, PIN_CFG_IOLH_B | PIN_CFG_SR | PIN_CFG_PUPD |
- PIN_CFG_FILONOFF | PIN_CFG_FILNUM | PIN_CFG_FILCLKSEL |
+ PIN_CFG_NF |
PIN_CFG_IEN | PIN_CFG_NOGPIO_INT),
RZG2L_VARIABLE_PIN_CFG_PACK(20, 3, PIN_CFG_IOLH_B | PIN_CFG_SR | PIN_CFG_PUPD |
PIN_CFG_IEN | PIN_CFG_NOGPIO_INT),
@@ -431,7 +425,7 @@ static const u64 r9a07g043f_variable_pin_cfg[] = {
RZG2L_VARIABLE_PIN_CFG_PACK(24, 4, PIN_CFG_IOLH_B | PIN_CFG_SR | PIN_CFG_PUPD |
PIN_CFG_NOGPIO_INT),
RZG2L_VARIABLE_PIN_CFG_PACK(24, 5, PIN_CFG_IOLH_B | PIN_CFG_SR | PIN_CFG_PUPD |
- PIN_CFG_FILONOFF | PIN_CFG_FILNUM | PIN_CFG_FILCLKSEL |
+ PIN_CFG_NF |
PIN_CFG_NOGPIO_INT),
};
#endif
@@ -528,8 +522,7 @@ static int rzg2l_map_add_config(struct pinctrl_map *map,
{
unsigned long *cfgs;
- cfgs = kmemdup(configs, num_configs * sizeof(*cfgs),
- GFP_KERNEL);
+ cfgs = kmemdup_array(configs, num_configs, sizeof(*cfgs), GFP_KERNEL);
if (!cfgs)
return -ENOMEM;
@@ -1261,7 +1254,9 @@ static int rzg2l_pinctrl_pinconf_get(struct pinctrl_dev *pctldev,
break;
case PIN_CONFIG_OUTPUT_ENABLE:
- if (!pctrl->data->oen_read || !(cfg & PIN_CFG_OEN))
+ if (!(cfg & PIN_CFG_OEN))
+ return -EINVAL;
+ if (!pctrl->data->oen_read)
return -EOPNOTSUPP;
arg = pctrl->data->oen_read(pctrl, _pin);
if (!arg)
@@ -1390,9 +1385,9 @@ static int rzg2l_pinctrl_pinconf_set(struct pinctrl_dev *pctldev,
for (i = 0; i < num_configs; i++) {
param = pinconf_to_config_param(_configs[i]);
+ arg = pinconf_to_config_argument(_configs[i]);
switch (param) {
case PIN_CONFIG_INPUT_ENABLE:
- arg = pinconf_to_config_argument(_configs[i]);
if (!(cfg & PIN_CFG_IEN))
return -EINVAL;
@@ -1401,8 +1396,9 @@ static int rzg2l_pinctrl_pinconf_set(struct pinctrl_dev *pctldev,
break;
case PIN_CONFIG_OUTPUT_ENABLE:
- arg = pinconf_to_config_argument(_configs[i]);
- if (!pctrl->data->oen_write || !(cfg & PIN_CFG_OEN))
+ if (!(cfg & PIN_CFG_OEN))
+ return -EINVAL;
+ if (!pctrl->data->oen_write)
return -EOPNOTSUPP;
ret = pctrl->data->oen_write(pctrl, _pin, !!arg);
if (ret)
@@ -1410,12 +1406,10 @@ static int rzg2l_pinctrl_pinconf_set(struct pinctrl_dev *pctldev,
break;
case PIN_CONFIG_POWER_SOURCE:
- settings.power_source = pinconf_to_config_argument(_configs[i]);
+ settings.power_source = arg;
break;
case PIN_CONFIG_SLEW_RATE:
- arg = pinconf_to_config_argument(_configs[i]);
-
if (!(cfg & PIN_CFG_SR) || arg > 1)
return -EINVAL;
@@ -1436,8 +1430,6 @@ static int rzg2l_pinctrl_pinconf_set(struct pinctrl_dev *pctldev,
break;
case PIN_CONFIG_DRIVE_STRENGTH:
- arg = pinconf_to_config_argument(_configs[i]);
-
if (!(cfg & PIN_CFG_IOLH_A) || hwcfg->drive_strength_ua)
return -EINVAL;
@@ -1457,12 +1449,10 @@ static int rzg2l_pinctrl_pinconf_set(struct pinctrl_dev *pctldev,
!hwcfg->drive_strength_ua)
return -EINVAL;
- settings.drive_strength_ua = pinconf_to_config_argument(_configs[i]);
+ settings.drive_strength_ua = arg;
break;
case PIN_CONFIG_OUTPUT_IMPEDANCE_OHMS:
- arg = pinconf_to_config_argument(_configs[i]);
-
if (!(cfg & PIN_CFG_IOLH_B) || !hwcfg->iolh_groupb_oi[0])
return -EINVAL;
@@ -1480,7 +1470,6 @@ static int rzg2l_pinctrl_pinconf_set(struct pinctrl_dev *pctldev,
if (!(cfg & PIN_CFG_IOLH_RZV2H))
return -EINVAL;
- arg = pinconf_to_config_argument(_configs[i]);
if (arg > 3)
return -EINVAL;
rzg2l_rmw_pin_config(pctrl, IOLH(off), bit, IOLH_MASK, arg);
@@ -1883,8 +1872,7 @@ static const u64 r9a07g043_gpio_configs[] = {
#ifdef CONFIG_RISCV
/* Below additional port pins (P19 - P28) are exclusively available on RZ/Five SoC only */
RZG2L_GPIO_PORT_SPARSE_PACK(0x2, 0x06, PIN_CFG_IOLH_B | PIN_CFG_SR | PIN_CFG_PUPD |
- PIN_CFG_FILONOFF | PIN_CFG_FILNUM | PIN_CFG_FILCLKSEL |
- PIN_CFG_IEN | PIN_CFG_NOGPIO_INT), /* P19 */
+ PIN_CFG_NF | PIN_CFG_IEN | PIN_CFG_NOGPIO_INT), /* P19 */
RZG2L_GPIO_PORT_PACK_VARIABLE(8, 0x07), /* P20 */
RZG2L_GPIO_PORT_SPARSE_PACK(0x2, 0x08, PIN_CFG_IOLH_B | PIN_CFG_SR | PIN_CFG_PUPD |
PIN_CFG_IEN | PIN_CFG_NOGPIO_INT), /* P21 */
@@ -1892,8 +1880,7 @@ static const u64 r9a07g043_gpio_configs[] = {
PIN_CFG_IEN | PIN_CFG_NOGPIO_INT), /* P22 */
RZG2L_GPIO_PORT_SPARSE_PACK_VARIABLE(0x3e, 0x0a), /* P23 */
RZG2L_GPIO_PORT_PACK_VARIABLE(6, 0x0b), /* P24 */
- RZG2L_GPIO_PORT_SPARSE_PACK(0x2, 0x0c, PIN_CFG_IOLH_B | PIN_CFG_SR | PIN_CFG_FILONOFF |
- PIN_CFG_FILNUM | PIN_CFG_FILCLKSEL |
+ RZG2L_GPIO_PORT_SPARSE_PACK(0x2, 0x0c, PIN_CFG_IOLH_B | PIN_CFG_SR | PIN_CFG_NF |
PIN_CFG_NOGPIO_INT), /* P25 */
0x0, /* P26 */
0x0, /* P27 */
@@ -1971,8 +1958,7 @@ static const struct {
struct rzg2l_dedicated_configs rzg2l_pins[7];
} rzg2l_dedicated_pins = {
.common = {
- { "NMI", RZG2L_SINGLE_PIN_PACK(0x1, 0,
- (PIN_CFG_FILONOFF | PIN_CFG_FILNUM | PIN_CFG_FILCLKSEL)) },
+ { "NMI", RZG2L_SINGLE_PIN_PACK(0x1, 0, PIN_CFG_NF) },
{ "TMS/SWDIO", RZG2L_SINGLE_PIN_PACK(0x2, 0,
(PIN_CFG_IOLH_A | PIN_CFG_SR | PIN_CFG_IEN)) },
{ "TDO", RZG2L_SINGLE_PIN_PACK(0x3, 0,
@@ -2053,8 +2039,7 @@ static const struct {
};
static const struct rzg2l_dedicated_configs rzg3s_dedicated_pins[] = {
- { "NMI", RZG2L_SINGLE_PIN_PACK(0x0, 0, (PIN_CFG_FILONOFF | PIN_CFG_FILNUM |
- PIN_CFG_FILCLKSEL)) },
+ { "NMI", RZG2L_SINGLE_PIN_PACK(0x0, 0, PIN_CFG_NF) },
{ "TMS/SWDIO", RZG2L_SINGLE_PIN_PACK(0x1, 0, (PIN_CFG_IOLH_A | PIN_CFG_IEN |
PIN_CFG_SOFT_PS)) },
{ "TDO", RZG2L_SINGLE_PIN_PACK(0x1, 1, (PIN_CFG_IOLH_A | PIN_CFG_SOFT_PS)) },
@@ -2093,8 +2078,7 @@ static const struct rzg2l_dedicated_configs rzg3s_dedicated_pins[] = {
};
static struct rzg2l_dedicated_configs rzv2h_dedicated_pins[] = {
- { "NMI", RZG2L_SINGLE_PIN_PACK(0x1, 0, (PIN_CFG_FILONOFF | PIN_CFG_FILNUM |
- PIN_CFG_FILCLKSEL)) },
+ { "NMI", RZG2L_SINGLE_PIN_PACK(0x1, 0, PIN_CFG_NF) },
{ "TMS_SWDIO", RZG2L_SINGLE_PIN_PACK(0x3, 0, (PIN_CFG_IOLH_RZV2H | PIN_CFG_SR |
PIN_CFG_IEN)) },
{ "TDO", RZG2L_SINGLE_PIN_PACK(0x3, 2, (PIN_CFG_IOLH_RZV2H | PIN_CFG_SR)) },
@@ -2596,16 +2580,13 @@ static int rzg2l_gpio_register(struct rzg2l_pinctrl *pctrl)
return -EPROBE_DEFER;
ret = of_parse_phandle_with_fixed_args(np, "gpio-ranges", 3, 0, &of_args);
- if (ret) {
- dev_err(pctrl->dev, "Unable to parse gpio-ranges\n");
- return ret;
- }
+ if (ret)
+ return dev_err_probe(pctrl->dev, ret, "Unable to parse gpio-ranges\n");
if (of_args.args[0] != 0 || of_args.args[1] != 0 ||
- of_args.args[2] != pctrl->data->n_port_pins) {
- dev_err(pctrl->dev, "gpio-ranges does not match selected SOC\n");
- return -EINVAL;
- }
+ of_args.args[2] != pctrl->data->n_port_pins)
+ return dev_err_probe(pctrl->dev, -EINVAL,
+ "gpio-ranges does not match selected SOC\n");
chip->names = pctrl->data->port_pins;
chip->request = rzg2l_gpio_request;
@@ -2623,7 +2604,7 @@ static int rzg2l_gpio_register(struct rzg2l_pinctrl *pctrl)
girq = &chip->irq;
gpio_irq_chip_set_chip(girq, &rzg2l_gpio_irqchip);
- girq->fwnode = of_node_to_fwnode(np);
+ girq->fwnode = dev_fwnode(pctrl->dev);
girq->parent_domain = parent_domain;
girq->child_to_parent_hwirq = rzg2l_gpio_child_to_parent_hwirq;
girq->populate_parent_alloc_arg = rzg2l_gpio_populate_parent_fwspec;
@@ -2637,10 +2618,8 @@ static int rzg2l_gpio_register(struct rzg2l_pinctrl *pctrl)
pctrl->gpio_range.name = chip->label;
pctrl->gpio_range.gc = chip;
ret = devm_gpiochip_add_data(pctrl->dev, chip, pctrl);
- if (ret) {
- dev_err(pctrl->dev, "failed to add GPIO controller\n");
- return ret;
- }
+ if (ret)
+ return dev_err_probe(pctrl->dev, ret, "failed to add GPIO controller\n");
dev_dbg(pctrl->dev, "Registered gpio controller\n");
@@ -2726,22 +2705,16 @@ static int rzg2l_pinctrl_register(struct rzg2l_pinctrl *pctrl)
ret = devm_pinctrl_register_and_init(pctrl->dev, &pctrl->desc, pctrl,
&pctrl->pctl);
- if (ret) {
- dev_err(pctrl->dev, "pinctrl registration failed\n");
- return ret;
- }
+ if (ret)
+ return dev_err_probe(pctrl->dev, ret, "pinctrl registration failed\n");
ret = pinctrl_enable(pctrl->pctl);
- if (ret) {
- dev_err(pctrl->dev, "pinctrl enable failed\n");
- return ret;
- }
+ if (ret)
+ dev_err_probe(pctrl->dev, ret, "pinctrl enable failed\n");
ret = rzg2l_gpio_register(pctrl);
- if (ret) {
- dev_err(pctrl->dev, "failed to add GPIO chip: %i\n", ret);
- return ret;
- }
+ if (ret)
+ return dev_err_probe(pctrl->dev, ret, "failed to add GPIO chip\n");
return 0;
}
diff --git a/drivers/pinctrl/renesas/pinctrl-rzv2m.c b/drivers/pinctrl/renesas/pinctrl-rzv2m.c
index 0cae5472ac67..4062c56619f5 100644
--- a/drivers/pinctrl/renesas/pinctrl-rzv2m.c
+++ b/drivers/pinctrl/renesas/pinctrl-rzv2m.c
@@ -196,8 +196,7 @@ static int rzv2m_map_add_config(struct pinctrl_map *map,
{
unsigned long *cfgs;
- cfgs = kmemdup(configs, num_configs * sizeof(*cfgs),
- GFP_KERNEL);
+ cfgs = kmemdup_array(configs, num_configs, sizeof(*cfgs), GFP_KERNEL);
if (!cfgs)
return -ENOMEM;
diff --git a/drivers/pinctrl/renesas/pinctrl.c b/drivers/pinctrl/renesas/pinctrl.c
index 03e9bdbc82b9..29d16c9c1bd1 100644
--- a/drivers/pinctrl/renesas/pinctrl.c
+++ b/drivers/pinctrl/renesas/pinctrl.c
@@ -83,8 +83,7 @@ static int sh_pfc_map_add_config(struct pinctrl_map *map,
{
unsigned long *cfgs;
- cfgs = kmemdup(configs, num_configs * sizeof(*cfgs),
- GFP_KERNEL);
+ cfgs = kmemdup_array(configs, num_configs, sizeof(*cfgs), GFP_KERNEL);
if (cfgs == NULL)
return -ENOMEM;
diff --git a/drivers/pinctrl/samsung/pinctrl-exynos-arm.c b/drivers/pinctrl/samsung/pinctrl-exynos-arm.c
index 85ddf49a5188..d3d8672f74dc 100644
--- a/drivers/pinctrl/samsung/pinctrl-exynos-arm.c
+++ b/drivers/pinctrl/samsung/pinctrl-exynos-arm.c
@@ -40,6 +40,19 @@ static const struct samsung_pin_bank_type bank_type_alive = {
#define S5P_OTHERS_RET_MMC (1 << 29)
#define S5P_OTHERS_RET_UART (1 << 28)
+#define S5P_PIN_PULL_DISABLE 0
+#define S5P_PIN_PULL_DOWN 1
+#define S5P_PIN_PULL_UP 2
+
+static void s5pv210_pud_value_init(struct samsung_pinctrl_drv_data *drvdata)
+{
+ unsigned int *pud_val = drvdata->pud_val;
+
+ pud_val[PUD_PULL_DISABLE] = S5P_PIN_PULL_DISABLE;
+ pud_val[PUD_PULL_DOWN] = S5P_PIN_PULL_DOWN;
+ pud_val[PUD_PULL_UP] = S5P_PIN_PULL_UP;
+}
+
static void s5pv210_retention_disable(struct samsung_pinctrl_drv_data *drvdata)
{
void __iomem *clk_base = (void __iomem *)drvdata->retention_ctrl->priv;
@@ -133,6 +146,7 @@ static const struct samsung_pin_ctrl s5pv210_pin_ctrl[] __initconst = {
.nr_banks = ARRAY_SIZE(s5pv210_pin_bank),
.eint_gpio_init = exynos_eint_gpio_init,
.eint_wkup_init = exynos_eint_wkup_init,
+ .pud_value_init = s5pv210_pud_value_init,
.suspend = exynos_pinctrl_suspend,
.resume = exynos_pinctrl_resume,
.retention_data = &s5pv210_retention_data,
diff --git a/drivers/pinctrl/samsung/pinctrl-exynos.c b/drivers/pinctrl/samsung/pinctrl-exynos.c
index ce5e6783b5b9..b79c211c0374 100644
--- a/drivers/pinctrl/samsung/pinctrl-exynos.c
+++ b/drivers/pinctrl/samsung/pinctrl-exynos.c
@@ -662,7 +662,7 @@ static void exynos_irq_demux_eint16_31(struct irq_desc *desc)
__init int exynos_eint_wkup_init(struct samsung_pinctrl_drv_data *d)
{
struct device *dev = d->dev;
- struct device_node *wkup_np = NULL;
+ struct device_node *wkup_np __free(device_node) = NULL;
struct device_node *np;
struct samsung_pin_bank *bank;
struct exynos_weint_data *weint_data;
@@ -692,17 +692,14 @@ __init int exynos_eint_wkup_init(struct samsung_pinctrl_drv_data *d)
bank->irq_chip = devm_kmemdup(dev, irq_chip, sizeof(*irq_chip),
GFP_KERNEL);
- if (!bank->irq_chip) {
- of_node_put(wkup_np);
+ if (!bank->irq_chip)
return -ENOMEM;
- }
bank->irq_chip->chip.name = bank->name;
bank->irq_domain = irq_domain_create_linear(bank->fwnode,
bank->nr_pins, &exynos_eint_irqd_ops, bank);
if (!bank->irq_domain) {
dev_err(dev, "wkup irq domain add failed\n");
- of_node_put(wkup_np);
return -ENXIO;
}
@@ -715,10 +712,8 @@ __init int exynos_eint_wkup_init(struct samsung_pinctrl_drv_data *d)
weint_data = devm_kcalloc(dev,
bank->nr_pins, sizeof(*weint_data),
GFP_KERNEL);
- if (!weint_data) {
- of_node_put(wkup_np);
+ if (!weint_data)
return -ENOMEM;
- }
for (idx = 0; idx < bank->nr_pins; ++idx) {
irq = irq_of_parse_and_map(to_of_node(bank->fwnode), idx);
@@ -735,13 +730,10 @@ __init int exynos_eint_wkup_init(struct samsung_pinctrl_drv_data *d)
}
}
- if (!muxed_banks) {
- of_node_put(wkup_np);
+ if (!muxed_banks)
return 0;
- }
irq = irq_of_parse_and_map(wkup_np, 0);
- of_node_put(wkup_np);
if (!irq) {
dev_err(dev, "irq number for muxed EINTs not found\n");
return 0;
diff --git a/drivers/pinctrl/samsung/pinctrl-s3c64xx.c b/drivers/pinctrl/samsung/pinctrl-s3c64xx.c
index c5d92db4fdb1..68715c09baa9 100644
--- a/drivers/pinctrl/samsung/pinctrl-s3c64xx.c
+++ b/drivers/pinctrl/samsung/pinctrl-s3c64xx.c
@@ -63,6 +63,10 @@
#define EINT_CON_MASK 0xF
#define EINT_CON_LEN 4
+#define S3C_PIN_PULL_DISABLE 0
+#define S3C_PIN_PULL_DOWN 1
+#define S3C_PIN_PULL_UP 2
+
static const struct samsung_pin_bank_type bank_type_4bit_off = {
.fld_width = { 4, 1, 2, 0, 2, 2, },
.reg_offset = { 0x00, 0x04, 0x08, 0, 0x0c, 0x10, },
@@ -255,6 +259,15 @@ static int s3c64xx_irq_get_trigger(unsigned int type)
return trigger;
}
+static void s3c64xx_pud_value_init(struct samsung_pinctrl_drv_data *drvdata)
+{
+ unsigned int *pud_val = drvdata->pud_val;
+
+ pud_val[PUD_PULL_DISABLE] = S3C_PIN_PULL_DISABLE;
+ pud_val[PUD_PULL_DOWN] = S3C_PIN_PULL_DOWN;
+ pud_val[PUD_PULL_UP] = S3C_PIN_PULL_UP;
+}
+
static void s3c64xx_irq_set_handler(struct irq_data *d, unsigned int type)
{
/* Edge- and level-triggered interrupts need different handlers */
@@ -797,6 +810,7 @@ static const struct samsung_pin_ctrl s3c64xx_pin_ctrl[] __initconst = {
.nr_banks = ARRAY_SIZE(s3c64xx_pin_banks0),
.eint_gpio_init = s3c64xx_eint_gpio_init,
.eint_wkup_init = s3c64xx_eint_eint0_init,
+ .pud_value_init = s3c64xx_pud_value_init,
},
};
diff --git a/drivers/pinctrl/samsung/pinctrl-samsung.c b/drivers/pinctrl/samsung/pinctrl-samsung.c
index 623df65a5d6f..675efa5d86a9 100644
--- a/drivers/pinctrl/samsung/pinctrl-samsung.c
+++ b/drivers/pinctrl/samsung/pinctrl-samsung.c
@@ -122,8 +122,8 @@ static int add_map_configs(struct device *dev, struct pinctrl_map **map,
if (WARN_ON(*num_maps == *reserved_maps))
return -ENOSPC;
- dup_configs = kmemdup(configs, num_configs * sizeof(*dup_configs),
- GFP_KERNEL);
+ dup_configs = kmemdup_array(configs, num_configs, sizeof(*dup_configs),
+ GFP_KERNEL);
if (!dup_configs)
return -ENOMEM;
@@ -251,7 +251,6 @@ static int samsung_dt_node_to_map(struct pinctrl_dev *pctldev,
{
struct samsung_pinctrl_drv_data *drvdata;
unsigned reserved_maps;
- struct device_node *np;
int ret;
drvdata = pinctrl_dev_get_drvdata(pctldev);
@@ -266,12 +265,11 @@ static int samsung_dt_node_to_map(struct pinctrl_dev *pctldev,
&reserved_maps,
num_maps);
- for_each_child_of_node(np_config, np) {
+ for_each_child_of_node_scoped(np_config, np) {
ret = samsung_dt_subnode_to_map(drvdata, pctldev->dev, np, map,
&reserved_maps, num_maps);
if (ret < 0) {
samsung_dt_free_map(pctldev, *map, *num_maps);
- of_node_put(np);
return ret;
}
}
@@ -823,16 +821,16 @@ static struct samsung_pmx_func *samsung_pinctrl_create_functions(
struct device_node *func_np;
if (!of_get_child_count(cfg_np)) {
- if (!of_find_property(cfg_np,
- "samsung,pin-function", NULL))
+ if (!of_property_present(cfg_np,
+ "samsung,pin-function"))
continue;
++func_cnt;
continue;
}
for_each_child_of_node(cfg_np, func_np) {
- if (!of_find_property(func_np,
- "samsung,pin-function", NULL))
+ if (!of_property_present(func_np,
+ "samsung,pin-function"))
continue;
++func_cnt;
}
@@ -849,16 +847,12 @@ static struct samsung_pmx_func *samsung_pinctrl_create_functions(
* and create pin groups and pin function lists.
*/
func_cnt = 0;
- for_each_child_of_node(dev_np, cfg_np) {
- struct device_node *func_np;
-
+ for_each_child_of_node_scoped(dev_np, cfg_np) {
if (!of_get_child_count(cfg_np)) {
ret = samsung_pinctrl_create_function(dev, drvdata,
cfg_np, func);
- if (ret < 0) {
- of_node_put(cfg_np);
+ if (ret < 0)
return ERR_PTR(ret);
- }
if (ret > 0) {
++func;
++func_cnt;
@@ -866,14 +860,11 @@ static struct samsung_pmx_func *samsung_pinctrl_create_functions(
continue;
}
- for_each_child_of_node(cfg_np, func_np) {
+ for_each_child_of_node_scoped(cfg_np, func_np) {
ret = samsung_pinctrl_create_function(dev, drvdata,
func_np, func);
- if (ret < 0) {
- of_node_put(func_np);
- of_node_put(cfg_np);
+ if (ret < 0)
return ERR_PTR(ret);
- }
if (ret > 0) {
++func;
++func_cnt;
@@ -997,6 +988,77 @@ static int samsung_pinctrl_unregister(struct platform_device *pdev,
return 0;
}
+static void samsung_pud_value_init(struct samsung_pinctrl_drv_data *drvdata)
+{
+ unsigned int *pud_val = drvdata->pud_val;
+
+ pud_val[PUD_PULL_DISABLE] = EXYNOS_PIN_PUD_PULL_DISABLE;
+ pud_val[PUD_PULL_DOWN] = EXYNOS_PIN_PID_PULL_DOWN;
+ pud_val[PUD_PULL_UP] = EXYNOS_PIN_PID_PULL_UP;
+}
+
+/*
+ * Enable or Disable the pull-down and pull-up for the gpio pins in the
+ * PUD register.
+ */
+static void samsung_gpio_set_pud(struct gpio_chip *gc, unsigned int offset,
+ unsigned int value)
+{
+ struct samsung_pin_bank *bank = gpiochip_get_data(gc);
+ const struct samsung_pin_bank_type *type = bank->type;
+ void __iomem *reg;
+ unsigned int data, mask;
+
+ reg = bank->pctl_base + bank->pctl_offset;
+ data = readl(reg + type->reg_offset[PINCFG_TYPE_PUD]);
+ mask = (1 << type->fld_width[PINCFG_TYPE_PUD]) - 1;
+ data &= ~(mask << (offset * type->fld_width[PINCFG_TYPE_PUD]));
+ data |= value << (offset * type->fld_width[PINCFG_TYPE_PUD]);
+ writel(data, reg + type->reg_offset[PINCFG_TYPE_PUD]);
+}
+
+/*
+ * Identify the type of PUD config based on the gpiolib request to enable
+ * or disable the PUD config.
+ */
+static int samsung_gpio_set_config(struct gpio_chip *gc, unsigned int offset,
+ unsigned long config)
+{
+ struct samsung_pin_bank *bank = gpiochip_get_data(gc);
+ struct samsung_pinctrl_drv_data *drvdata = bank->drvdata;
+ unsigned int value;
+ int ret = 0;
+ unsigned long flags;
+
+ switch (pinconf_to_config_param(config)) {
+ case PIN_CONFIG_BIAS_DISABLE:
+ value = drvdata->pud_val[PUD_PULL_DISABLE];
+ break;
+ case PIN_CONFIG_BIAS_PULL_DOWN:
+ value = drvdata->pud_val[PUD_PULL_DOWN];
+ break;
+ case PIN_CONFIG_BIAS_PULL_UP:
+ value = drvdata->pud_val[PUD_PULL_UP];
+ break;
+ default:
+ return -ENOTSUPP;
+ }
+
+ ret = clk_enable(drvdata->pclk);
+ if (ret) {
+ dev_err(drvdata->dev, "failed to enable clock\n");
+ return ret;
+ }
+
+ raw_spin_lock_irqsave(&bank->slock, flags);
+ samsung_gpio_set_pud(gc, offset, value);
+ raw_spin_unlock_irqrestore(&bank->slock, flags);
+
+ clk_disable(drvdata->pclk);
+
+ return ret;
+}
+
static const struct gpio_chip samsung_gpiolib_chip = {
.request = gpiochip_generic_request,
.free = gpiochip_generic_free,
@@ -1006,6 +1068,7 @@ static const struct gpio_chip samsung_gpiolib_chip = {
.direction_output = samsung_gpio_direction_output,
.to_irq = samsung_gpio_to_irq,
.add_pin_ranges = samsung_add_pin_ranges,
+ .set_config = samsung_gpio_set_config,
.owner = THIS_MODULE,
};
@@ -1237,6 +1300,11 @@ static int samsung_pinctrl_probe(struct platform_device *pdev)
if (ctrl->eint_wkup_init)
ctrl->eint_wkup_init(drvdata);
+ if (ctrl->pud_value_init)
+ ctrl->pud_value_init(drvdata);
+ else
+ samsung_pud_value_init(drvdata);
+
ret = samsung_gpiolib_register(pdev, drvdata);
if (ret)
goto err_unregister;
diff --git a/drivers/pinctrl/samsung/pinctrl-samsung.h b/drivers/pinctrl/samsung/pinctrl-samsung.h
index d50ba6f07d5d..a1e7377bd890 100644
--- a/drivers/pinctrl/samsung/pinctrl-samsung.h
+++ b/drivers/pinctrl/samsung/pinctrl-samsung.h
@@ -61,6 +61,25 @@ enum pincfg_type {
#define PIN_CON_FUNC_INPUT 0x0
#define PIN_CON_FUNC_OUTPUT 0x1
+/* Values for the pin PUD register */
+#define EXYNOS_PIN_PUD_PULL_DISABLE 0x0
+#define EXYNOS_PIN_PID_PULL_DOWN 0x1
+#define EXYNOS_PIN_PID_PULL_UP 0x3
+
+/*
+ * enum pud_index - Possible index values to access the pud_val array.
+ * @PUD_PULL_DISABLE: Index for the value of pud disable
+ * @PUD_PULL_DOWN: Index for the value of pull down enable
+ * @PUD_PULL_UP: Index for the value of pull up enable
+ * @PUD_MAX: Maximum value of the index
+ */
+enum pud_index {
+ PUD_PULL_DISABLE,
+ PUD_PULL_DOWN,
+ PUD_PULL_UP,
+ PUD_MAX,
+};
+
/**
* enum eint_type - possible external interrupt types.
* @EINT_TYPE_NONE: bank does not support external interrupts
@@ -261,6 +280,7 @@ struct samsung_pin_ctrl {
int (*eint_gpio_init)(struct samsung_pinctrl_drv_data *);
int (*eint_wkup_init)(struct samsung_pinctrl_drv_data *);
+ void (*pud_value_init)(struct samsung_pinctrl_drv_data *drvdata);
void (*suspend)(struct samsung_pinctrl_drv_data *);
void (*resume)(struct samsung_pinctrl_drv_data *);
};
@@ -307,6 +327,7 @@ struct samsung_pinctrl_drv_data {
struct samsung_pin_bank *pin_banks;
unsigned int nr_banks;
unsigned int nr_pins;
+ unsigned int pud_val[PUD_MAX];
struct samsung_retention_ctrl *retention_ctrl;
diff --git a/drivers/pinctrl/sophgo/Kconfig b/drivers/pinctrl/sophgo/Kconfig
new file mode 100644
index 000000000000..b14792ee46fc
--- /dev/null
+++ b/drivers/pinctrl/sophgo/Kconfig
@@ -0,0 +1,54 @@
+# SPDX-License-Identifier: GPL-2.0-only
+#
+# Sophgo SoC PINCTRL drivers
+#
+
+config PINCTRL_SOPHGO_CV18XX
+ bool
+ select GENERIC_PINCTRL_GROUPS
+ select GENERIC_PINMUX_FUNCTIONS
+ select GENERIC_PINCONF
+
+config PINCTRL_SOPHGO_CV1800B
+ tristate "Sophgo CV1800B SoC Pinctrl driver"
+ depends on ARCH_SOPHGO || COMPILE_TEST
+ depends on OF
+ select PINCTRL_SOPHGO_CV18XX
+ help
+ Say Y to select the pinctrl driver for CV1800B SoC.
+ This pin controller allows selecting the mux function for
+ each pin. This driver can also be built as a module called
+ pinctrl-cv1800b.
+
+config PINCTRL_SOPHGO_CV1812H
+ tristate "Sophgo CV1812H SoC Pinctrl driver"
+ depends on ARCH_SOPHGO || COMPILE_TEST
+ depends on OF
+ select PINCTRL_SOPHGO_CV18XX
+ help
+ Say Y to select the pinctrl driver for CV1812H SoC.
+ This pin controller allows selecting the mux function for
+ each pin. This driver can also be built as a module called
+ pinctrl-cv1812h.
+
+config PINCTRL_SOPHGO_SG2000
+ tristate "Sophgo SG2000 SoC Pinctrl driver"
+ depends on ARCH_SOPHGO || COMPILE_TEST
+ depends on OF
+ select PINCTRL_SOPHGO_CV18XX
+ help
+ Say Y to select the pinctrl driver for SG2000 SoC.
+ This pin controller allows selecting the mux function for
+ each pin. This driver can also be built as a module called
+ pinctrl-sg2000.
+
+config PINCTRL_SOPHGO_SG2002
+ tristate "Sophgo SG2000 SoC Pinctrl driver"
+ depends on ARCH_SOPHGO || COMPILE_TEST
+ depends on OF
+ select PINCTRL_SOPHGO_CV18XX
+ help
+ Say Y to select the pinctrl driver for SG2002 SoC.
+ This pin controller allows selecting the mux function for
+ each pin. This driver can also be built as a module called
+ pinctrl-sg2002.
diff --git a/drivers/pinctrl/sophgo/Makefile b/drivers/pinctrl/sophgo/Makefile
new file mode 100644
index 000000000000..4113a5c9191b
--- /dev/null
+++ b/drivers/pinctrl/sophgo/Makefile
@@ -0,0 +1,7 @@
+# SPDX-License-Identifier: GPL-2.0
+
+obj-$(CONFIG_PINCTRL_SOPHGO_CV18XX) += pinctrl-cv18xx.o
+obj-$(CONFIG_PINCTRL_SOPHGO_CV1800B) += pinctrl-cv1800b.o
+obj-$(CONFIG_PINCTRL_SOPHGO_CV1812H) += pinctrl-cv1812h.o
+obj-$(CONFIG_PINCTRL_SOPHGO_SG2000) += pinctrl-sg2000.o
+obj-$(CONFIG_PINCTRL_SOPHGO_SG2002) += pinctrl-sg2002.o
diff --git a/drivers/pinctrl/sophgo/pinctrl-cv1800b.c b/drivers/pinctrl/sophgo/pinctrl-cv1800b.c
new file mode 100644
index 000000000000..3322906689e7
--- /dev/null
+++ b/drivers/pinctrl/sophgo/pinctrl-cv1800b.c
@@ -0,0 +1,462 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Sophgo CV1800B SoC pinctrl driver.
+ *
+ * Copyright (C) 2024 Inochi Amaoto <inochiama@outlook.com>
+ *
+ * This file is generated from vendor pinout definition.
+ */
+
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/of.h>
+
+#include <linux/pinctrl/pinctrl.h>
+#include <linux/pinctrl/pinmux.h>
+
+#include <dt-bindings/pinctrl/pinctrl-cv1800b.h>
+
+#include "pinctrl-cv18xx.h"
+
+enum CV1800B_POWER_DOMAIN {
+ VDD18A_AUD = 0,
+ VDD18A_USB_PLL_ETH_CSI = 1,
+ VDD33A_ETH_USB_SD1 = 2,
+ VDDIO_RTC = 3,
+ VDDIO_SD0_SPI = 4
+};
+
+static const char *const cv1800b_power_domain_desc[] = {
+ [VDD18A_AUD] = "VDD18A_AUD",
+ [VDD18A_USB_PLL_ETH_CSI] = "VDD18A_USB_PLL_ETH_CSI",
+ [VDD33A_ETH_USB_SD1] = "VDD33A_ETH_USB_SD1",
+ [VDDIO_RTC] = "VDDIO_RTC",
+ [VDDIO_SD0_SPI] = "VDDIO_SD0_SPI",
+};
+
+static int cv1800b_get_pull_up(struct cv1800_pin *pin, const u32 *psmap)
+{
+ u32 pstate = psmap[pin->power_domain];
+ enum cv1800_pin_io_type type = cv1800_pin_io_type(pin);
+
+ if (type == IO_TYPE_1V8_ONLY)
+ return 79000;
+
+ if (type == IO_TYPE_1V8_OR_3V3) {
+ if (pstate == PIN_POWER_STATE_1V8)
+ return 60000;
+ if (pstate == PIN_POWER_STATE_3V3)
+ return 60000;
+
+ return -EINVAL;
+ }
+
+ return -ENOTSUPP;
+}
+
+static int cv1800b_get_pull_down(struct cv1800_pin *pin, const u32 *psmap)
+{
+ u32 pstate = psmap[pin->power_domain];
+ enum cv1800_pin_io_type type = cv1800_pin_io_type(pin);
+
+ if (type == IO_TYPE_1V8_ONLY)
+ return 87000;
+
+ if (type == IO_TYPE_1V8_OR_3V3) {
+ if (pstate == PIN_POWER_STATE_1V8)
+ return 61000;
+ if (pstate == PIN_POWER_STATE_3V3)
+ return 62000;
+
+ return -EINVAL;
+ }
+
+ return -ENOTSUPP;
+}
+
+static const u32 cv1800b_1v8_oc_map[] = {
+ 12800,
+ 25300,
+ 37400,
+ 49000
+};
+
+static const u32 cv1800b_18od33_1v8_oc_map[] = {
+ 7800,
+ 11700,
+ 15500,
+ 19200,
+ 23000,
+ 26600,
+ 30200,
+ 33700
+};
+
+static const u32 cv1800b_18od33_3v3_oc_map[] = {
+ 5500,
+ 8200,
+ 10800,
+ 13400,
+ 16100,
+ 18700,
+ 21200,
+ 23700
+};
+
+static const u32 cv1800b_eth_oc_map[] = {
+ 15700,
+ 17800
+};
+
+static int cv1800b_get_oc_map(struct cv1800_pin *pin, const u32 *psmap,
+ const u32 **map)
+{
+ enum cv1800_pin_io_type type = cv1800_pin_io_type(pin);
+ u32 pstate = psmap[pin->power_domain];
+
+ if (type == IO_TYPE_1V8_ONLY) {
+ *map = cv1800b_1v8_oc_map;
+ return ARRAY_SIZE(cv1800b_1v8_oc_map);
+ }
+
+ if (type == IO_TYPE_1V8_OR_3V3) {
+ if (pstate == PIN_POWER_STATE_1V8) {
+ *map = cv1800b_18od33_1v8_oc_map;
+ return ARRAY_SIZE(cv1800b_18od33_1v8_oc_map);
+ } else if (pstate == PIN_POWER_STATE_3V3) {
+ *map = cv1800b_18od33_3v3_oc_map;
+ return ARRAY_SIZE(cv1800b_18od33_3v3_oc_map);
+ }
+ }
+
+ if (type == IO_TYPE_ETH) {
+ *map = cv1800b_eth_oc_map;
+ return ARRAY_SIZE(cv1800b_eth_oc_map);
+ }
+
+ return -ENOTSUPP;
+}
+
+static const u32 cv1800b_1v8_schmitt_map[] = {
+ 0,
+ 970000,
+ 1040000
+};
+
+static const u32 cv1800b_18od33_1v8_schmitt_map[] = {
+ 0,
+ 1070000
+};
+
+static const u32 cv1800b_18od33_3v3_schmitt_map[] = {
+ 0,
+ 1100000
+};
+
+static int cv1800b_get_schmitt_map(struct cv1800_pin *pin, const u32 *psmap,
+ const u32 **map)
+{
+ enum cv1800_pin_io_type type = cv1800_pin_io_type(pin);
+ u32 pstate = psmap[pin->power_domain];
+
+ if (type == IO_TYPE_1V8_ONLY) {
+ *map = cv1800b_1v8_schmitt_map;
+ return ARRAY_SIZE(cv1800b_1v8_schmitt_map);
+ }
+
+ if (type == IO_TYPE_1V8_OR_3V3) {
+ if (pstate == PIN_POWER_STATE_1V8) {
+ *map = cv1800b_18od33_1v8_schmitt_map;
+ return ARRAY_SIZE(cv1800b_18od33_1v8_schmitt_map);
+ } else if (pstate == PIN_POWER_STATE_3V3) {
+ *map = cv1800b_18od33_3v3_schmitt_map;
+ return ARRAY_SIZE(cv1800b_18od33_3v3_schmitt_map);
+ }
+ }
+
+ return -ENOTSUPP;
+}
+
+static const struct cv1800_vddio_cfg_ops cv1800b_vddio_cfg_ops = {
+ .get_pull_up = cv1800b_get_pull_up,
+ .get_pull_down = cv1800b_get_pull_down,
+ .get_oc_map = cv1800b_get_oc_map,
+ .get_schmitt_map = cv1800b_get_schmitt_map,
+};
+
+static const struct pinctrl_pin_desc cv1800b_pins[] = {
+ PINCTRL_PIN(PIN_AUD_AOUTR, "AUD_AOUTR"),
+ PINCTRL_PIN(PIN_SD0_CLK, "SD0_CLK"),
+ PINCTRL_PIN(PIN_SD0_CMD, "SD0_CMD"),
+ PINCTRL_PIN(PIN_SD0_D0, "SD0_D0"),
+ PINCTRL_PIN(PIN_SD0_D1, "SD0_D1"),
+ PINCTRL_PIN(PIN_SD0_D2, "SD0_D2"),
+ PINCTRL_PIN(PIN_SD0_D3, "SD0_D3"),
+ PINCTRL_PIN(PIN_SD0_CD, "SD0_CD"),
+ PINCTRL_PIN(PIN_SD0_PWR_EN, "SD0_PWR_EN"),
+ PINCTRL_PIN(PIN_SPK_EN, "SPK_EN"),
+ PINCTRL_PIN(PIN_UART0_TX, "UART0_TX"),
+ PINCTRL_PIN(PIN_UART0_RX, "UART0_RX"),
+ PINCTRL_PIN(PIN_SPINOR_HOLD_X, "SPINOR_HOLD_X"),
+ PINCTRL_PIN(PIN_SPINOR_SCK, "SPINOR_SCK"),
+ PINCTRL_PIN(PIN_SPINOR_MOSI, "SPINOR_MOSI"),
+ PINCTRL_PIN(PIN_SPINOR_WP_X, "SPINOR_WP_X"),
+ PINCTRL_PIN(PIN_SPINOR_MISO, "SPINOR_MISO"),
+ PINCTRL_PIN(PIN_SPINOR_CS_X, "SPINOR_CS_X"),
+ PINCTRL_PIN(PIN_IIC0_SCL, "IIC0_SCL"),
+ PINCTRL_PIN(PIN_IIC0_SDA, "IIC0_SDA"),
+ PINCTRL_PIN(PIN_AUX0, "AUX0"),
+ PINCTRL_PIN(PIN_PWR_VBAT_DET, "PWR_VBAT_DET"),
+ PINCTRL_PIN(PIN_PWR_SEQ2, "PWR_SEQ2"),
+ PINCTRL_PIN(PIN_XTAL_XIN, "XTAL_XIN"),
+ PINCTRL_PIN(PIN_SD1_GPIO0, "SD1_GPIO0"),
+ PINCTRL_PIN(PIN_SD1_GPIO1, "SD1_GPIO1"),
+ PINCTRL_PIN(PIN_SD1_D3, "SD1_D3"),
+ PINCTRL_PIN(PIN_SD1_D2, "SD1_D2"),
+ PINCTRL_PIN(PIN_SD1_D1, "SD1_D1"),
+ PINCTRL_PIN(PIN_SD1_D0, "SD1_D0"),
+ PINCTRL_PIN(PIN_SD1_CMD, "SD1_CMD"),
+ PINCTRL_PIN(PIN_SD1_CLK, "SD1_CLK"),
+ PINCTRL_PIN(PIN_ADC1, "ADC1"),
+ PINCTRL_PIN(PIN_USB_VBUS_DET, "USB_VBUS_DET"),
+ PINCTRL_PIN(PIN_ETH_TXP, "ETH_TXP"),
+ PINCTRL_PIN(PIN_ETH_TXM, "ETH_TXM"),
+ PINCTRL_PIN(PIN_ETH_RXP, "ETH_RXP"),
+ PINCTRL_PIN(PIN_ETH_RXM, "ETH_RXM"),
+ PINCTRL_PIN(PIN_MIPIRX4N, "MIPIRX4N"),
+ PINCTRL_PIN(PIN_MIPIRX4P, "MIPIRX4P"),
+ PINCTRL_PIN(PIN_MIPIRX3N, "MIPIRX3N"),
+ PINCTRL_PIN(PIN_MIPIRX3P, "MIPIRX3P"),
+ PINCTRL_PIN(PIN_MIPIRX2N, "MIPIRX2N"),
+ PINCTRL_PIN(PIN_MIPIRX2P, "MIPIRX2P"),
+ PINCTRL_PIN(PIN_MIPIRX1N, "MIPIRX1N"),
+ PINCTRL_PIN(PIN_MIPIRX1P, "MIPIRX1P"),
+ PINCTRL_PIN(PIN_MIPIRX0N, "MIPIRX0N"),
+ PINCTRL_PIN(PIN_MIPIRX0P, "MIPIRX0P"),
+ PINCTRL_PIN(PIN_AUD_AINL_MIC, "AUD_AINL_MIC"),
+};
+
+static const struct cv1800_pin cv1800b_pin_data[ARRAY_SIZE(cv1800b_pins)] = {
+ CV1800_FUNC_PIN(PIN_AUD_AOUTR, VDD18A_AUD,
+ IO_TYPE_AUDIO,
+ CV1800_PINCONF_AREA_SYS, 0x12c, 6),
+ CV1800_GENERAL_PIN(PIN_SD0_CLK, VDDIO_SD0_SPI,
+ IO_TYPE_1V8_OR_3V3,
+ CV1800_PINCONF_AREA_SYS, 0x000, 7,
+ CV1800_PINCONF_AREA_SYS, 0xa00),
+ CV1800_GENERAL_PIN(PIN_SD0_CMD, VDDIO_SD0_SPI,
+ IO_TYPE_1V8_OR_3V3,
+ CV1800_PINCONF_AREA_SYS, 0x004, 7,
+ CV1800_PINCONF_AREA_SYS, 0xa04),
+ CV1800_GENERAL_PIN(PIN_SD0_D0, VDDIO_SD0_SPI,
+ IO_TYPE_1V8_OR_3V3,
+ CV1800_PINCONF_AREA_SYS, 0x008, 7,
+ CV1800_PINCONF_AREA_SYS, 0xa08),
+ CV1800_GENERAL_PIN(PIN_SD0_D1, VDDIO_SD0_SPI,
+ IO_TYPE_1V8_OR_3V3,
+ CV1800_PINCONF_AREA_SYS, 0x00c, 7,
+ CV1800_PINCONF_AREA_SYS, 0xa0c),
+ CV1800_GENERAL_PIN(PIN_SD0_D2, VDDIO_SD0_SPI,
+ IO_TYPE_1V8_OR_3V3,
+ CV1800_PINCONF_AREA_SYS, 0x010, 7,
+ CV1800_PINCONF_AREA_SYS, 0xa10),
+ CV1800_GENERAL_PIN(PIN_SD0_D3, VDDIO_SD0_SPI,
+ IO_TYPE_1V8_OR_3V3,
+ CV1800_PINCONF_AREA_SYS, 0x014, 7,
+ CV1800_PINCONF_AREA_SYS, 0xa14),
+ CV1800_GENERAL_PIN(PIN_SD0_CD, VDDIO_SD0_SPI,
+ IO_TYPE_1V8_OR_3V3,
+ CV1800_PINCONF_AREA_SYS, 0x018, 3,
+ CV1800_PINCONF_AREA_SYS, 0x900),
+ CV1800_GENERAL_PIN(PIN_SD0_PWR_EN, VDDIO_SD0_SPI,
+ IO_TYPE_1V8_OR_3V3,
+ CV1800_PINCONF_AREA_SYS, 0x01c, 3,
+ CV1800_PINCONF_AREA_SYS, 0x904),
+ CV1800_GENERAL_PIN(PIN_SPK_EN, VDDIO_SD0_SPI,
+ IO_TYPE_1V8_OR_3V3,
+ CV1800_PINCONF_AREA_SYS, 0x020, 3,
+ CV1800_PINCONF_AREA_SYS, 0x908),
+ CV1800_GENERAL_PIN(PIN_UART0_TX, VDDIO_SD0_SPI,
+ IO_TYPE_1V8_OR_3V3,
+ CV1800_PINCONF_AREA_SYS, 0x024, 7,
+ CV1800_PINCONF_AREA_SYS, 0x90c),
+ CV1800_GENERAL_PIN(PIN_UART0_RX, VDDIO_SD0_SPI,
+ IO_TYPE_1V8_OR_3V3,
+ CV1800_PINCONF_AREA_SYS, 0x028, 7,
+ CV1800_PINCONF_AREA_SYS, 0x910),
+ CV1800_GENERAL_PIN(PIN_SPINOR_HOLD_X, VDDIO_SD0_SPI,
+ IO_TYPE_1V8_OR_3V3,
+ CV1800_PINCONF_AREA_SYS, 0x02c, 3,
+ CV1800_PINCONF_AREA_SYS, 0x914),
+ CV1800_GENERAL_PIN(PIN_SPINOR_SCK, VDDIO_SD0_SPI,
+ IO_TYPE_1V8_OR_3V3,
+ CV1800_PINCONF_AREA_SYS, 0x030, 3,
+ CV1800_PINCONF_AREA_SYS, 0x918),
+ CV1800_GENERAL_PIN(PIN_SPINOR_MOSI, VDDIO_SD0_SPI,
+ IO_TYPE_1V8_OR_3V3,
+ CV1800_PINCONF_AREA_SYS, 0x034, 3,
+ CV1800_PINCONF_AREA_SYS, 0x91c),
+ CV1800_GENERAL_PIN(PIN_SPINOR_WP_X, VDDIO_SD0_SPI,
+ IO_TYPE_1V8_OR_3V3,
+ CV1800_PINCONF_AREA_SYS, 0x038, 3,
+ CV1800_PINCONF_AREA_SYS, 0x920),
+ CV1800_GENERAL_PIN(PIN_SPINOR_MISO, VDDIO_SD0_SPI,
+ IO_TYPE_1V8_OR_3V3,
+ CV1800_PINCONF_AREA_SYS, 0x03c, 3,
+ CV1800_PINCONF_AREA_SYS, 0x924),
+ CV1800_GENERAL_PIN(PIN_SPINOR_CS_X, VDDIO_SD0_SPI,
+ IO_TYPE_1V8_OR_3V3,
+ CV1800_PINCONF_AREA_SYS, 0x040, 3,
+ CV1800_PINCONF_AREA_SYS, 0x928),
+ CV1800_GENERAL_PIN(PIN_IIC0_SCL, VDDIO_SD0_SPI,
+ IO_TYPE_1V8_OR_3V3,
+ CV1800_PINCONF_AREA_SYS, 0x04c, 7,
+ CV1800_PINCONF_AREA_SYS, 0x934),
+ CV1800_GENERAL_PIN(PIN_IIC0_SDA, VDDIO_SD0_SPI,
+ IO_TYPE_1V8_OR_3V3,
+ CV1800_PINCONF_AREA_SYS, 0x050, 7,
+ CV1800_PINCONF_AREA_SYS, 0x938),
+ CV1800_GENERAL_PIN(PIN_AUX0, VDDIO_SD0_SPI,
+ IO_TYPE_1V8_OR_3V3,
+ CV1800_PINCONF_AREA_SYS, 0x054, 7,
+ CV1800_PINCONF_AREA_SYS, 0x93c),
+ CV1800_GENERAL_PIN(PIN_PWR_VBAT_DET, VDDIO_RTC,
+ IO_TYPE_1V8_ONLY,
+ CV1800_PINCONF_AREA_SYS, 0x05c, 0,
+ CV1800_PINCONF_AREA_RTC, 0x004),
+ CV1800_GENERAL_PIN(PIN_PWR_SEQ2, VDDIO_RTC,
+ IO_TYPE_1V8_ONLY,
+ CV1800_PINCONF_AREA_SYS, 0x068, 3,
+ CV1800_PINCONF_AREA_RTC, 0x010),
+ CV1800_GENERAL_PIN(PIN_XTAL_XIN, VDDIO_RTC,
+ IO_TYPE_1V8_ONLY,
+ CV1800_PINCONF_AREA_SYS, 0x074, 0,
+ CV1800_PINCONF_AREA_RTC, 0x020),
+ CV1800_GENERAL_PIN(PIN_SD1_GPIO0, VDD33A_ETH_USB_SD1,
+ IO_TYPE_1V8_OR_3V3,
+ CV1800_PINCONF_AREA_SYS, 0x088, 7,
+ CV1800_PINCONF_AREA_RTC, 0x034),
+ CV1800_GENERAL_PIN(PIN_SD1_GPIO1, VDD33A_ETH_USB_SD1,
+ IO_TYPE_1V8_OR_3V3,
+ CV1800_PINCONF_AREA_SYS, 0x084, 7,
+ CV1800_PINCONF_AREA_RTC, 0x030),
+ CV1800_GENERAL_PIN(PIN_SD1_D3, VDD33A_ETH_USB_SD1,
+ IO_TYPE_1V8_OR_3V3,
+ CV1800_PINCONF_AREA_SYS, 0x08c, 7,
+ CV1800_PINCONF_AREA_RTC, 0x038),
+ CV1800_GENERAL_PIN(PIN_SD1_D2, VDD33A_ETH_USB_SD1,
+ IO_TYPE_1V8_OR_3V3,
+ CV1800_PINCONF_AREA_SYS, 0x090, 7,
+ CV1800_PINCONF_AREA_RTC, 0x03c),
+ CV1800_GENERAL_PIN(PIN_SD1_D1, VDD33A_ETH_USB_SD1,
+ IO_TYPE_1V8_OR_3V3,
+ CV1800_PINCONF_AREA_SYS, 0x094, 7,
+ CV1800_PINCONF_AREA_RTC, 0x040),
+ CV1800_GENERAL_PIN(PIN_SD1_D0, VDD33A_ETH_USB_SD1,
+ IO_TYPE_1V8_OR_3V3,
+ CV1800_PINCONF_AREA_SYS, 0x098, 7,
+ CV1800_PINCONF_AREA_RTC, 0x044),
+ CV1800_GENERAL_PIN(PIN_SD1_CMD, VDD33A_ETH_USB_SD1,
+ IO_TYPE_1V8_OR_3V3,
+ CV1800_PINCONF_AREA_SYS, 0x09c, 7,
+ CV1800_PINCONF_AREA_RTC, 0x048),
+ CV1800_GENERAL_PIN(PIN_SD1_CLK, VDD33A_ETH_USB_SD1,
+ IO_TYPE_1V8_OR_3V3,
+ CV1800_PINCONF_AREA_SYS, 0x0a0, 7,
+ CV1800_PINCONF_AREA_RTC, 0x04c),
+ CV1800_GENERAL_PIN(PIN_ADC1, VDD18A_USB_PLL_ETH_CSI,
+ IO_TYPE_1V8_ONLY,
+ CV1800_PINCONF_AREA_SYS, 0x0a8, 6,
+ CV1800_PINCONF_AREA_SYS, 0x804),
+ CV1800_GENERAL_PIN(PIN_USB_VBUS_DET, VDD18A_USB_PLL_ETH_CSI,
+ IO_TYPE_1V8_ONLY,
+ CV1800_PINCONF_AREA_SYS, 0x0ac, 6,
+ CV1800_PINCONF_AREA_SYS, 0x808),
+ CV1800_FUNC_PIN(PIN_ETH_TXP, VDD18A_USB_PLL_ETH_CSI,
+ IO_TYPE_ETH,
+ CV1800_PINCONF_AREA_SYS, 0x0c0, 7),
+ CV1800_FUNC_PIN(PIN_ETH_TXM, VDD18A_USB_PLL_ETH_CSI,
+ IO_TYPE_ETH,
+ CV1800_PINCONF_AREA_SYS, 0x0c4, 7),
+ CV1800_FUNC_PIN(PIN_ETH_RXP, VDD18A_USB_PLL_ETH_CSI,
+ IO_TYPE_ETH,
+ CV1800_PINCONF_AREA_SYS, 0x0c8, 7),
+ CV1800_FUNC_PIN(PIN_ETH_RXM, VDD18A_USB_PLL_ETH_CSI,
+ IO_TYPE_ETH,
+ CV1800_PINCONF_AREA_SYS, 0x0cc, 7),
+ CV1800_GENERATE_PIN_MUX2(PIN_MIPIRX4N, VDD18A_USB_PLL_ETH_CSI,
+ IO_TYPE_1V8_ONLY,
+ CV1800_PINCONF_AREA_SYS, 0x0d4, 7,
+ CV1800_PINCONF_AREA_SYS, 0x0bc, 7,
+ CV1800_PINCONF_AREA_SYS, 0xc04),
+ CV1800_GENERATE_PIN_MUX2(PIN_MIPIRX4P, VDD18A_USB_PLL_ETH_CSI,
+ IO_TYPE_1V8_ONLY,
+ CV1800_PINCONF_AREA_SYS, 0x0d8, 7,
+ CV1800_PINCONF_AREA_SYS, 0x0b8, 7,
+ CV1800_PINCONF_AREA_SYS, 0xc08),
+ CV1800_GENERATE_PIN_MUX2(PIN_MIPIRX3N, VDD18A_USB_PLL_ETH_CSI,
+ IO_TYPE_1V8_ONLY,
+ CV1800_PINCONF_AREA_SYS, 0x0dc, 7,
+ CV1800_PINCONF_AREA_SYS, 0x0b0, 7,
+ CV1800_PINCONF_AREA_SYS, 0xc0c),
+ CV1800_GENERATE_PIN_MUX2(PIN_MIPIRX3P, VDD18A_USB_PLL_ETH_CSI,
+ IO_TYPE_1V8_ONLY,
+ CV1800_PINCONF_AREA_SYS, 0x0e0, 7,
+ CV1800_PINCONF_AREA_SYS, 0x0b4, 7,
+ CV1800_PINCONF_AREA_SYS, 0xc10),
+ CV1800_GENERAL_PIN(PIN_MIPIRX2N, VDD18A_USB_PLL_ETH_CSI,
+ IO_TYPE_1V8_ONLY,
+ CV1800_PINCONF_AREA_SYS, 0x0e4, 7,
+ CV1800_PINCONF_AREA_SYS, 0xc14),
+ CV1800_GENERAL_PIN(PIN_MIPIRX2P, VDD18A_USB_PLL_ETH_CSI,
+ IO_TYPE_1V8_ONLY,
+ CV1800_PINCONF_AREA_SYS, 0x0e8, 7,
+ CV1800_PINCONF_AREA_SYS, 0xc18),
+ CV1800_GENERAL_PIN(PIN_MIPIRX1N, VDD18A_USB_PLL_ETH_CSI,
+ IO_TYPE_1V8_ONLY,
+ CV1800_PINCONF_AREA_SYS, 0x0ec, 7,
+ CV1800_PINCONF_AREA_SYS, 0xc1c),
+ CV1800_GENERAL_PIN(PIN_MIPIRX1P, VDD18A_USB_PLL_ETH_CSI,
+ IO_TYPE_1V8_ONLY,
+ CV1800_PINCONF_AREA_SYS, 0x0f0, 7,
+ CV1800_PINCONF_AREA_SYS, 0xc20),
+ CV1800_GENERAL_PIN(PIN_MIPIRX0N, VDD18A_USB_PLL_ETH_CSI,
+ IO_TYPE_1V8_ONLY,
+ CV1800_PINCONF_AREA_SYS, 0x0f4, 7,
+ CV1800_PINCONF_AREA_SYS, 0xc24),
+ CV1800_GENERAL_PIN(PIN_MIPIRX0P, VDD18A_USB_PLL_ETH_CSI,
+ IO_TYPE_1V8_ONLY,
+ CV1800_PINCONF_AREA_SYS, 0x0f8, 7,
+ CV1800_PINCONF_AREA_SYS, 0xc28),
+ CV1800_FUNC_PIN(PIN_AUD_AINL_MIC, VDD18A_AUD,
+ IO_TYPE_AUDIO,
+ CV1800_PINCONF_AREA_SYS, 0x120, 5),
+};
+
+static const struct cv1800_pinctrl_data cv1800b_pindata = {
+ .pins = cv1800b_pins,
+ .pindata = cv1800b_pin_data,
+ .pdnames = cv1800b_power_domain_desc,
+ .vddio_ops = &cv1800b_vddio_cfg_ops,
+ .npins = ARRAY_SIZE(cv1800b_pins),
+ .npd = ARRAY_SIZE(cv1800b_power_domain_desc),
+};
+
+static const struct of_device_id cv1800b_pinctrl_ids[] = {
+ { .compatible = "sophgo,cv1800b-pinctrl", .data = &cv1800b_pindata },
+ { }
+};
+MODULE_DEVICE_TABLE(of, cv1800b_pinctrl_ids);
+
+static struct platform_driver cv1800b_pinctrl_driver = {
+ .probe = cv1800_pinctrl_probe,
+ .driver = {
+ .name = "cv1800b-pinctrl",
+ .suppress_bind_attrs = true,
+ .of_match_table = cv1800b_pinctrl_ids,
+ },
+};
+module_platform_driver(cv1800b_pinctrl_driver);
+
+MODULE_DESCRIPTION("Pinctrl driver for the CV1800B series SoC");
+MODULE_LICENSE("GPL");
diff --git a/drivers/pinctrl/sophgo/pinctrl-cv1812h.c b/drivers/pinctrl/sophgo/pinctrl-cv1812h.c
new file mode 100644
index 000000000000..5632290b46fa
--- /dev/null
+++ b/drivers/pinctrl/sophgo/pinctrl-cv1812h.c
@@ -0,0 +1,771 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Sophgo CV1812H SoC pinctrl driver.
+ *
+ * Copyright (C) 2024 Inochi Amaoto <inochiama@outlook.com>
+ *
+ * This file is generated from vendor pinout definition.
+ */
+
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/of.h>
+
+#include <linux/pinctrl/pinctrl.h>
+#include <linux/pinctrl/pinmux.h>
+
+#include <dt-bindings/pinctrl/pinctrl-cv1812h.h>
+
+#include "pinctrl-cv18xx.h"
+
+enum CV1812H_POWER_DOMAIN {
+ VDD18A_EPHY = 0,
+ VDD18A_MIPI = 1,
+ VDDIO18_1 = 2,
+ VDDIO_EMMC = 3,
+ VDDIO_RTC = 4,
+ VDDIO_SD0 = 5,
+ VDDIO_SD1 = 6,
+ VDDIO_VIVO = 7
+};
+
+static const char *const cv1812h_power_domain_desc[] = {
+ [VDD18A_EPHY] = "VDD18A_EPHY",
+ [VDD18A_MIPI] = "VDD18A_MIPI",
+ [VDDIO18_1] = "VDDIO18_1",
+ [VDDIO_EMMC] = "VDDIO_EMMC",
+ [VDDIO_RTC] = "VDDIO_RTC",
+ [VDDIO_SD0] = "VDDIO_SD0",
+ [VDDIO_SD1] = "VDDIO_SD1",
+ [VDDIO_VIVO] = "VDDIO_VIVO",
+};
+
+static int cv1812h_get_pull_up(struct cv1800_pin *pin, const u32 *psmap)
+{
+ u32 pstate = psmap[pin->power_domain];
+ enum cv1800_pin_io_type type = cv1800_pin_io_type(pin);
+
+ if (type == IO_TYPE_1V8_ONLY)
+ return 79000;
+
+ if (type == IO_TYPE_1V8_OR_3V3) {
+ if (pstate == PIN_POWER_STATE_1V8)
+ return 60000;
+ if (pstate == PIN_POWER_STATE_3V3)
+ return 60000;
+
+ return -EINVAL;
+ }
+
+ return -ENOTSUPP;
+}
+
+static int cv1812h_get_pull_down(struct cv1800_pin *pin, const u32 *psmap)
+{
+ u32 pstate = psmap[pin->power_domain];
+ enum cv1800_pin_io_type type = cv1800_pin_io_type(pin);
+
+ if (type == IO_TYPE_1V8_ONLY)
+ return 87000;
+
+ if (type == IO_TYPE_1V8_OR_3V3) {
+ if (pstate == PIN_POWER_STATE_1V8)
+ return 61000;
+ if (pstate == PIN_POWER_STATE_3V3)
+ return 62000;
+
+ return -EINVAL;
+ }
+
+ return -ENOTSUPP;
+}
+
+static const u32 cv1812h_1v8_oc_map[] = {
+ 12800,
+ 25300,
+ 37400,
+ 49000
+};
+
+static const u32 cv1812h_18od33_1v8_oc_map[] = {
+ 7800,
+ 11700,
+ 15500,
+ 19200,
+ 23000,
+ 26600,
+ 30200,
+ 33700
+};
+
+static const u32 cv1812h_18od33_3v3_oc_map[] = {
+ 5500,
+ 8200,
+ 10800,
+ 13400,
+ 16100,
+ 18700,
+ 21200,
+ 23700
+};
+
+static const u32 cv1812h_eth_oc_map[] = {
+ 15700,
+ 17800
+};
+
+static int cv1812h_get_oc_map(struct cv1800_pin *pin, const u32 *psmap,
+ const u32 **map)
+{
+ enum cv1800_pin_io_type type = cv1800_pin_io_type(pin);
+ u32 pstate = psmap[pin->power_domain];
+
+ if (type == IO_TYPE_1V8_ONLY) {
+ *map = cv1812h_1v8_oc_map;
+ return ARRAY_SIZE(cv1812h_1v8_oc_map);
+ }
+
+ if (type == IO_TYPE_1V8_OR_3V3) {
+ if (pstate == PIN_POWER_STATE_1V8) {
+ *map = cv1812h_18od33_1v8_oc_map;
+ return ARRAY_SIZE(cv1812h_18od33_1v8_oc_map);
+ } else if (pstate == PIN_POWER_STATE_3V3) {
+ *map = cv1812h_18od33_3v3_oc_map;
+ return ARRAY_SIZE(cv1812h_18od33_3v3_oc_map);
+ }
+ }
+
+ if (type == IO_TYPE_ETH) {
+ *map = cv1812h_eth_oc_map;
+ return ARRAY_SIZE(cv1812h_eth_oc_map);
+ }
+
+ return -ENOTSUPP;
+}
+
+static const u32 cv1812h_1v8_schmitt_map[] = {
+ 0,
+ 970000,
+ 1040000
+};
+
+static const u32 cv1812h_18od33_1v8_schmitt_map[] = {
+ 0,
+ 1070000
+};
+
+static const u32 cv1812h_18od33_3v3_schmitt_map[] = {
+ 0,
+ 1100000
+};
+
+static int cv1812h_get_schmitt_map(struct cv1800_pin *pin, const u32 *psmap,
+ const u32 **map)
+{
+ enum cv1800_pin_io_type type = cv1800_pin_io_type(pin);
+ u32 pstate = psmap[pin->power_domain];
+
+ if (type == IO_TYPE_1V8_ONLY) {
+ *map = cv1812h_1v8_schmitt_map;
+ return ARRAY_SIZE(cv1812h_1v8_schmitt_map);
+ }
+
+ if (type == IO_TYPE_1V8_OR_3V3) {
+ if (pstate == PIN_POWER_STATE_1V8) {
+ *map = cv1812h_18od33_1v8_schmitt_map;
+ return ARRAY_SIZE(cv1812h_18od33_1v8_schmitt_map);
+ } else if (pstate == PIN_POWER_STATE_3V3) {
+ *map = cv1812h_18od33_3v3_schmitt_map;
+ return ARRAY_SIZE(cv1812h_18od33_3v3_schmitt_map);
+ }
+ }
+
+ return -ENOTSUPP;
+}
+
+static const struct cv1800_vddio_cfg_ops cv1812h_vddio_cfg_ops = {
+ .get_pull_up = cv1812h_get_pull_up,
+ .get_pull_down = cv1812h_get_pull_down,
+ .get_oc_map = cv1812h_get_oc_map,
+ .get_schmitt_map = cv1812h_get_schmitt_map,
+};
+
+static const struct pinctrl_pin_desc cv1812h_pins[] = {
+ PINCTRL_PIN(PIN_MIPI_TXM4, "MIPI_TXM4"),
+ PINCTRL_PIN(PIN_MIPIRX0N, "MIPIRX0N"),
+ PINCTRL_PIN(PIN_MIPIRX3P, "MIPIRX3P"),
+ PINCTRL_PIN(PIN_MIPIRX4P, "MIPIRX4P"),
+ PINCTRL_PIN(PIN_VIVO_D2, "VIVO_D2"),
+ PINCTRL_PIN(PIN_VIVO_D3, "VIVO_D3"),
+ PINCTRL_PIN(PIN_VIVO_D10, "VIVO_D10"),
+ PINCTRL_PIN(PIN_USB_VBUS_DET, "USB_VBUS_DET"),
+ PINCTRL_PIN(PIN_MIPI_TXP3, "MIPI_TXP3"),
+ PINCTRL_PIN(PIN_MIPI_TXM3, "MIPI_TXM3"),
+ PINCTRL_PIN(PIN_MIPI_TXP4, "MIPI_TXP4"),
+ PINCTRL_PIN(PIN_MIPIRX0P, "MIPIRX0P"),
+ PINCTRL_PIN(PIN_MIPIRX1N, "MIPIRX1N"),
+ PINCTRL_PIN(PIN_MIPIRX2N, "MIPIRX2N"),
+ PINCTRL_PIN(PIN_MIPIRX4N, "MIPIRX4N"),
+ PINCTRL_PIN(PIN_MIPIRX5N, "MIPIRX5N"),
+ PINCTRL_PIN(PIN_VIVO_D1, "VIVO_D1"),
+ PINCTRL_PIN(PIN_VIVO_D5, "VIVO_D5"),
+ PINCTRL_PIN(PIN_VIVO_D7, "VIVO_D7"),
+ PINCTRL_PIN(PIN_VIVO_D9, "VIVO_D9"),
+ PINCTRL_PIN(PIN_USB_ID, "USB_ID"),
+ PINCTRL_PIN(PIN_ETH_RXM, "ETH_RXM"),
+ PINCTRL_PIN(PIN_MIPI_TXP2, "MIPI_TXP2"),
+ PINCTRL_PIN(PIN_MIPI_TXM2, "MIPI_TXM2"),
+ PINCTRL_PIN(PIN_CAM_PD0, "CAM_PD0"),
+ PINCTRL_PIN(PIN_CAM_MCLK0, "CAM_MCLK0"),
+ PINCTRL_PIN(PIN_MIPIRX1P, "MIPIRX1P"),
+ PINCTRL_PIN(PIN_MIPIRX2P, "MIPIRX2P"),
+ PINCTRL_PIN(PIN_MIPIRX3N, "MIPIRX3N"),
+ PINCTRL_PIN(PIN_MIPIRX5P, "MIPIRX5P"),
+ PINCTRL_PIN(PIN_VIVO_CLK, "VIVO_CLK"),
+ PINCTRL_PIN(PIN_VIVO_D6, "VIVO_D6"),
+ PINCTRL_PIN(PIN_VIVO_D8, "VIVO_D8"),
+ PINCTRL_PIN(PIN_USB_VBUS_EN, "USB_VBUS_EN"),
+ PINCTRL_PIN(PIN_ETH_RXP, "ETH_RXP"),
+ PINCTRL_PIN(PIN_GPIO_RTX, "GPIO_RTX"),
+ PINCTRL_PIN(PIN_MIPI_TXP1, "MIPI_TXP1"),
+ PINCTRL_PIN(PIN_MIPI_TXM1, "MIPI_TXM1"),
+ PINCTRL_PIN(PIN_CAM_MCLK1, "CAM_MCLK1"),
+ PINCTRL_PIN(PIN_IIC3_SCL, "IIC3_SCL"),
+ PINCTRL_PIN(PIN_VIVO_D4, "VIVO_D4"),
+ PINCTRL_PIN(PIN_ETH_TXM, "ETH_TXM"),
+ PINCTRL_PIN(PIN_ETH_TXP, "ETH_TXP"),
+ PINCTRL_PIN(PIN_MIPI_TXP0, "MIPI_TXP0"),
+ PINCTRL_PIN(PIN_MIPI_TXM0, "MIPI_TXM0"),
+ PINCTRL_PIN(PIN_CAM_PD1, "CAM_PD1"),
+ PINCTRL_PIN(PIN_CAM_RST0, "CAM_RST0"),
+ PINCTRL_PIN(PIN_VIVO_D0, "VIVO_D0"),
+ PINCTRL_PIN(PIN_ADC1, "ADC1"),
+ PINCTRL_PIN(PIN_ADC2, "ADC2"),
+ PINCTRL_PIN(PIN_ADC3, "ADC3"),
+ PINCTRL_PIN(PIN_AUD_AOUTL, "AUD_AOUTL"),
+ PINCTRL_PIN(PIN_IIC3_SDA, "IIC3_SDA"),
+ PINCTRL_PIN(PIN_SD1_D2, "SD1_D2"),
+ PINCTRL_PIN(PIN_AUD_AOUTR, "AUD_AOUTR"),
+ PINCTRL_PIN(PIN_SD1_D3, "SD1_D3"),
+ PINCTRL_PIN(PIN_SD1_CLK, "SD1_CLK"),
+ PINCTRL_PIN(PIN_SD1_CMD, "SD1_CMD"),
+ PINCTRL_PIN(PIN_AUD_AINL_MIC, "AUD_AINL_MIC"),
+ PINCTRL_PIN(PIN_RSTN, "RSTN"),
+ PINCTRL_PIN(PIN_PWM0_BUCK, "PWM0_BUCK"),
+ PINCTRL_PIN(PIN_SD1_D1, "SD1_D1"),
+ PINCTRL_PIN(PIN_SD1_D0, "SD1_D0"),
+ PINCTRL_PIN(PIN_AUD_AINR_MIC, "AUD_AINR_MIC"),
+ PINCTRL_PIN(PIN_IIC2_SCL, "IIC2_SCL"),
+ PINCTRL_PIN(PIN_IIC2_SDA, "IIC2_SDA"),
+ PINCTRL_PIN(PIN_SD0_CD, "SD0_CD"),
+ PINCTRL_PIN(PIN_SD0_D1, "SD0_D1"),
+ PINCTRL_PIN(PIN_UART2_RX, "UART2_RX"),
+ PINCTRL_PIN(PIN_UART2_CTS, "UART2_CTS"),
+ PINCTRL_PIN(PIN_UART2_TX, "UART2_TX"),
+ PINCTRL_PIN(PIN_SD0_CLK, "SD0_CLK"),
+ PINCTRL_PIN(PIN_SD0_D0, "SD0_D0"),
+ PINCTRL_PIN(PIN_SD0_CMD, "SD0_CMD"),
+ PINCTRL_PIN(PIN_CLK32K, "CLK32K"),
+ PINCTRL_PIN(PIN_UART2_RTS, "UART2_RTS"),
+ PINCTRL_PIN(PIN_SD0_D3, "SD0_D3"),
+ PINCTRL_PIN(PIN_SD0_D2, "SD0_D2"),
+ PINCTRL_PIN(PIN_UART0_RX, "UART0_RX"),
+ PINCTRL_PIN(PIN_UART0_TX, "UART0_TX"),
+ PINCTRL_PIN(PIN_JTAG_CPU_TRST, "JTAG_CPU_TRST"),
+ PINCTRL_PIN(PIN_PWR_ON, "PWR_ON"),
+ PINCTRL_PIN(PIN_PWR_GPIO2, "PWR_GPIO2"),
+ PINCTRL_PIN(PIN_PWR_GPIO0, "PWR_GPIO0"),
+ PINCTRL_PIN(PIN_CLK25M, "CLK25M"),
+ PINCTRL_PIN(PIN_SD0_PWR_EN, "SD0_PWR_EN"),
+ PINCTRL_PIN(PIN_SPK_EN, "SPK_EN"),
+ PINCTRL_PIN(PIN_JTAG_CPU_TCK, "JTAG_CPU_TCK"),
+ PINCTRL_PIN(PIN_JTAG_CPU_TMS, "JTAG_CPU_TMS"),
+ PINCTRL_PIN(PIN_PWR_WAKEUP1, "PWR_WAKEUP1"),
+ PINCTRL_PIN(PIN_PWR_WAKEUP0, "PWR_WAKEUP0"),
+ PINCTRL_PIN(PIN_PWR_GPIO1, "PWR_GPIO1"),
+ PINCTRL_PIN(PIN_EMMC_DAT3, "EMMC_DAT3"),
+ PINCTRL_PIN(PIN_EMMC_DAT0, "EMMC_DAT0"),
+ PINCTRL_PIN(PIN_EMMC_DAT2, "EMMC_DAT2"),
+ PINCTRL_PIN(PIN_EMMC_RSTN, "EMMC_RSTN"),
+ PINCTRL_PIN(PIN_AUX0, "AUX0"),
+ PINCTRL_PIN(PIN_IIC0_SDA, "IIC0_SDA"),
+ PINCTRL_PIN(PIN_PWR_SEQ3, "PWR_SEQ3"),
+ PINCTRL_PIN(PIN_PWR_VBAT_DET, "PWR_VBAT_DET"),
+ PINCTRL_PIN(PIN_PWR_SEQ1, "PWR_SEQ1"),
+ PINCTRL_PIN(PIN_PWR_BUTTON1, "PWR_BUTTON1"),
+ PINCTRL_PIN(PIN_EMMC_DAT1, "EMMC_DAT1"),
+ PINCTRL_PIN(PIN_EMMC_CMD, "EMMC_CMD"),
+ PINCTRL_PIN(PIN_EMMC_CLK, "EMMC_CLK"),
+ PINCTRL_PIN(PIN_IIC0_SCL, "IIC0_SCL"),
+ PINCTRL_PIN(PIN_GPIO_ZQ, "GPIO_ZQ"),
+ PINCTRL_PIN(PIN_PWR_RSTN, "PWR_RSTN"),
+ PINCTRL_PIN(PIN_PWR_SEQ2, "PWR_SEQ2"),
+ PINCTRL_PIN(PIN_XTAL_XIN, "XTAL_XIN"),
+};
+
+static const struct cv1800_pin cv1812h_pin_data[ARRAY_SIZE(cv1812h_pins)] = {
+ CV1800_GENERAL_PIN(PIN_MIPI_TXM4, VDD18A_MIPI,
+ IO_TYPE_1V8_ONLY,
+ CV1800_PINCONF_AREA_SYS, 0x194, 7,
+ CV1800_PINCONF_AREA_SYS, 0xc60),
+ CV1800_GENERAL_PIN(PIN_MIPIRX0N, VDD18A_MIPI,
+ IO_TYPE_1V8_ONLY,
+ CV1800_PINCONF_AREA_SYS, 0x18c, 7,
+ CV1800_PINCONF_AREA_SYS, 0xc58),
+ CV1800_GENERATE_PIN_MUX2(PIN_MIPIRX3P, VDD18A_MIPI,
+ IO_TYPE_1V8_ONLY,
+ CV1800_PINCONF_AREA_SYS, 0x178, 7,
+ CV1800_PINCONF_AREA_SYS, 0x118, 7,
+ CV1800_PINCONF_AREA_SYS, 0xc44),
+ CV1800_GENERATE_PIN_MUX2(PIN_MIPIRX4P, VDD18A_MIPI,
+ IO_TYPE_1V8_ONLY,
+ CV1800_PINCONF_AREA_SYS, 0x170, 7,
+ CV1800_PINCONF_AREA_SYS, 0x11c, 7,
+ CV1800_PINCONF_AREA_SYS, 0xc3c),
+ CV1800_GENERAL_PIN(PIN_VIVO_D2, VDDIO_VIVO,
+ IO_TYPE_1V8_OR_3V3,
+ CV1800_PINCONF_AREA_SYS, 0x154, 7,
+ CV1800_PINCONF_AREA_SYS, 0xc20),
+ CV1800_GENERAL_PIN(PIN_VIVO_D3, VDDIO_VIVO,
+ IO_TYPE_1V8_OR_3V3,
+ CV1800_PINCONF_AREA_SYS, 0x150, 7,
+ CV1800_PINCONF_AREA_SYS, 0xc1c),
+ CV1800_GENERAL_PIN(PIN_VIVO_D10, VDDIO_VIVO,
+ IO_TYPE_1V8_OR_3V3,
+ CV1800_PINCONF_AREA_SYS, 0x134, 7,
+ CV1800_PINCONF_AREA_SYS, 0xc00),
+ CV1800_GENERAL_PIN(PIN_USB_VBUS_DET, VDDIO18_1,
+ IO_TYPE_1V8_ONLY,
+ CV1800_PINCONF_AREA_SYS, 0x108, 5,
+ CV1800_PINCONF_AREA_SYS, 0x820),
+ CV1800_GENERAL_PIN(PIN_MIPI_TXP3, VDD18A_MIPI,
+ IO_TYPE_1V8_ONLY,
+ CV1800_PINCONF_AREA_SYS, 0x1a0, 7,
+ CV1800_PINCONF_AREA_SYS, 0xc6c),
+ CV1800_GENERAL_PIN(PIN_MIPI_TXM3, VDD18A_MIPI,
+ IO_TYPE_1V8_ONLY,
+ CV1800_PINCONF_AREA_SYS, 0x19c, 7,
+ CV1800_PINCONF_AREA_SYS, 0xc68),
+ CV1800_GENERAL_PIN(PIN_MIPI_TXP4, VDD18A_MIPI,
+ IO_TYPE_1V8_ONLY,
+ CV1800_PINCONF_AREA_SYS, 0x198, 7,
+ CV1800_PINCONF_AREA_SYS, 0xc64),
+ CV1800_GENERAL_PIN(PIN_MIPIRX0P, VDD18A_MIPI,
+ IO_TYPE_1V8_ONLY,
+ CV1800_PINCONF_AREA_SYS, 0x190, 7,
+ CV1800_PINCONF_AREA_SYS, 0xc5c),
+ CV1800_GENERAL_PIN(PIN_MIPIRX1N, VDD18A_MIPI,
+ IO_TYPE_1V8_ONLY,
+ CV1800_PINCONF_AREA_SYS, 0x184, 7,
+ CV1800_PINCONF_AREA_SYS, 0xc50),
+ CV1800_GENERAL_PIN(PIN_MIPIRX2N, VDD18A_MIPI,
+ IO_TYPE_1V8_ONLY,
+ CV1800_PINCONF_AREA_SYS, 0x17c, 7,
+ CV1800_PINCONF_AREA_SYS, 0xc48),
+ CV1800_GENERATE_PIN_MUX2(PIN_MIPIRX4N, VDD18A_MIPI,
+ IO_TYPE_1V8_ONLY,
+ CV1800_PINCONF_AREA_SYS, 0x16c, 7,
+ CV1800_PINCONF_AREA_SYS, 0x120, 7,
+ CV1800_PINCONF_AREA_SYS, 0xc38),
+ CV1800_GENERAL_PIN(PIN_MIPIRX5N, VDD18A_MIPI,
+ IO_TYPE_1V8_ONLY,
+ CV1800_PINCONF_AREA_SYS, 0x164, 7,
+ CV1800_PINCONF_AREA_SYS, 0xc30),
+ CV1800_GENERAL_PIN(PIN_VIVO_D1, VDDIO_VIVO,
+ IO_TYPE_1V8_OR_3V3,
+ CV1800_PINCONF_AREA_SYS, 0x158, 7,
+ CV1800_PINCONF_AREA_SYS, 0xc24),
+ CV1800_GENERAL_PIN(PIN_VIVO_D5, VDDIO_VIVO,
+ IO_TYPE_1V8_OR_3V3,
+ CV1800_PINCONF_AREA_SYS, 0x148, 7,
+ CV1800_PINCONF_AREA_SYS, 0xc14),
+ CV1800_GENERAL_PIN(PIN_VIVO_D7, VDDIO_VIVO,
+ IO_TYPE_1V8_OR_3V3,
+ CV1800_PINCONF_AREA_SYS, 0x140, 7,
+ CV1800_PINCONF_AREA_SYS, 0xc0c),
+ CV1800_GENERAL_PIN(PIN_VIVO_D9, VDDIO_VIVO,
+ IO_TYPE_1V8_OR_3V3,
+ CV1800_PINCONF_AREA_SYS, 0x138, 7,
+ CV1800_PINCONF_AREA_SYS, 0xc04),
+ CV1800_GENERAL_PIN(PIN_USB_ID, VDDIO18_1,
+ IO_TYPE_1V8_ONLY,
+ CV1800_PINCONF_AREA_SYS, 0x0fc, 3,
+ CV1800_PINCONF_AREA_SYS, 0x814),
+ CV1800_FUNC_PIN(PIN_ETH_RXM, VDD18A_EPHY,
+ IO_TYPE_ETH,
+ CV1800_PINCONF_AREA_SYS, 0x130, 7),
+ CV1800_GENERAL_PIN(PIN_MIPI_TXP2, VDD18A_MIPI,
+ IO_TYPE_1V8_ONLY,
+ CV1800_PINCONF_AREA_SYS, 0x1a8, 7,
+ CV1800_PINCONF_AREA_SYS, 0xc74),
+ CV1800_GENERAL_PIN(PIN_MIPI_TXM2, VDD18A_MIPI,
+ IO_TYPE_1V8_ONLY,
+ CV1800_PINCONF_AREA_SYS, 0x1a4, 7,
+ CV1800_PINCONF_AREA_SYS, 0xc70),
+ CV1800_GENERAL_PIN(PIN_CAM_PD0, VDD18A_MIPI,
+ IO_TYPE_1V8_ONLY,
+ CV1800_PINCONF_AREA_SYS, 0x004, 4,
+ CV1800_PINCONF_AREA_SYS, 0xb04),
+ CV1800_GENERAL_PIN(PIN_CAM_MCLK0, VDD18A_MIPI,
+ IO_TYPE_1V8_ONLY,
+ CV1800_PINCONF_AREA_SYS, 0x000, 3,
+ CV1800_PINCONF_AREA_SYS, 0xb00),
+ CV1800_GENERAL_PIN(PIN_MIPIRX1P, VDD18A_MIPI,
+ IO_TYPE_1V8_ONLY,
+ CV1800_PINCONF_AREA_SYS, 0x188, 7,
+ CV1800_PINCONF_AREA_SYS, 0xc54),
+ CV1800_GENERAL_PIN(PIN_MIPIRX2P, VDD18A_MIPI,
+ IO_TYPE_1V8_ONLY,
+ CV1800_PINCONF_AREA_SYS, 0x180, 7,
+ CV1800_PINCONF_AREA_SYS, 0xc4c),
+ CV1800_GENERATE_PIN_MUX2(PIN_MIPIRX3N, VDD18A_MIPI,
+ IO_TYPE_1V8_ONLY,
+ CV1800_PINCONF_AREA_SYS, 0x174, 7,
+ CV1800_PINCONF_AREA_SYS, 0x114, 7,
+ CV1800_PINCONF_AREA_SYS, 0xc40),
+ CV1800_GENERAL_PIN(PIN_MIPIRX5P, VDD18A_MIPI,
+ IO_TYPE_1V8_ONLY,
+ CV1800_PINCONF_AREA_SYS, 0x168, 7,
+ CV1800_PINCONF_AREA_SYS, 0xc34),
+ CV1800_GENERAL_PIN(PIN_VIVO_CLK, VDDIO_VIVO,
+ IO_TYPE_1V8_OR_3V3,
+ CV1800_PINCONF_AREA_SYS, 0x160, 7,
+ CV1800_PINCONF_AREA_SYS, 0xc2c),
+ CV1800_GENERAL_PIN(PIN_VIVO_D6, VDDIO_VIVO,
+ IO_TYPE_1V8_OR_3V3,
+ CV1800_PINCONF_AREA_SYS, 0x144, 7,
+ CV1800_PINCONF_AREA_SYS, 0xc10),
+ CV1800_GENERAL_PIN(PIN_VIVO_D8, VDDIO_VIVO,
+ IO_TYPE_1V8_OR_3V3,
+ CV1800_PINCONF_AREA_SYS, 0x13c, 7,
+ CV1800_PINCONF_AREA_SYS, 0xc08),
+ CV1800_GENERAL_PIN(PIN_USB_VBUS_EN, VDDIO18_1,
+ IO_TYPE_1V8_ONLY,
+ CV1800_PINCONF_AREA_SYS, 0x100, 3,
+ CV1800_PINCONF_AREA_SYS, 0x818),
+ CV1800_FUNC_PIN(PIN_ETH_RXP, VDD18A_EPHY,
+ IO_TYPE_ETH,
+ CV1800_PINCONF_AREA_SYS, 0x12c, 7),
+ CV1800_GENERAL_PIN(PIN_GPIO_RTX, VDDIO18_1,
+ IO_TYPE_1V8_ONLY,
+ CV1800_PINCONF_AREA_SYS, 0x1cc, 5,
+ CV1800_PINCONF_AREA_SYS, 0xc8c),
+ CV1800_GENERAL_PIN(PIN_MIPI_TXP1, VDD18A_MIPI,
+ IO_TYPE_1V8_ONLY,
+ CV1800_PINCONF_AREA_SYS, 0x1b0, 7,
+ CV1800_PINCONF_AREA_SYS, 0xc7c),
+ CV1800_GENERAL_PIN(PIN_MIPI_TXM1, VDD18A_MIPI,
+ IO_TYPE_1V8_ONLY,
+ CV1800_PINCONF_AREA_SYS, 0x1ac, 7,
+ CV1800_PINCONF_AREA_SYS, 0xc78),
+ CV1800_GENERAL_PIN(PIN_CAM_MCLK1, VDD18A_MIPI,
+ IO_TYPE_1V8_ONLY,
+ CV1800_PINCONF_AREA_SYS, 0x00c, 4,
+ CV1800_PINCONF_AREA_SYS, 0xb0c),
+ CV1800_GENERAL_PIN(PIN_IIC3_SCL, VDD18A_MIPI,
+ IO_TYPE_1V8_ONLY,
+ CV1800_PINCONF_AREA_SYS, 0x014, 3,
+ CV1800_PINCONF_AREA_SYS, 0xb14),
+ CV1800_GENERAL_PIN(PIN_VIVO_D4, VDDIO_VIVO,
+ IO_TYPE_1V8_OR_3V3,
+ CV1800_PINCONF_AREA_SYS, 0x14c, 7,
+ CV1800_PINCONF_AREA_SYS, 0xc18),
+ CV1800_FUNC_PIN(PIN_ETH_TXM, VDD18A_EPHY,
+ IO_TYPE_ETH,
+ CV1800_PINCONF_AREA_SYS, 0x128, 7),
+ CV1800_FUNC_PIN(PIN_ETH_TXP, VDD18A_EPHY,
+ IO_TYPE_ETH,
+ CV1800_PINCONF_AREA_SYS, 0x124, 7),
+ CV1800_GENERAL_PIN(PIN_MIPI_TXP0, VDD18A_MIPI,
+ IO_TYPE_1V8_ONLY,
+ CV1800_PINCONF_AREA_SYS, 0x1b8, 7,
+ CV1800_PINCONF_AREA_SYS, 0xc84),
+ CV1800_GENERAL_PIN(PIN_MIPI_TXM0, VDD18A_MIPI,
+ IO_TYPE_1V8_ONLY,
+ CV1800_PINCONF_AREA_SYS, 0x1b4, 7,
+ CV1800_PINCONF_AREA_SYS, 0xc80),
+ CV1800_GENERAL_PIN(PIN_CAM_PD1, VDD18A_MIPI,
+ IO_TYPE_1V8_ONLY,
+ CV1800_PINCONF_AREA_SYS, 0x010, 6,
+ CV1800_PINCONF_AREA_SYS, 0xb10),
+ CV1800_GENERAL_PIN(PIN_CAM_RST0, VDD18A_MIPI,
+ IO_TYPE_1V8_ONLY,
+ CV1800_PINCONF_AREA_SYS, 0x008, 6,
+ CV1800_PINCONF_AREA_SYS, 0xb08),
+ CV1800_GENERAL_PIN(PIN_VIVO_D0, VDDIO_VIVO,
+ IO_TYPE_1V8_OR_3V3,
+ CV1800_PINCONF_AREA_SYS, 0x15c, 7,
+ CV1800_PINCONF_AREA_SYS, 0xc28),
+ CV1800_GENERAL_PIN(PIN_ADC1, VDDIO18_1,
+ IO_TYPE_1V8_ONLY,
+ CV1800_PINCONF_AREA_SYS, 0x0f8, 4,
+ CV1800_PINCONF_AREA_SYS, 0x810),
+ CV1800_GENERAL_PIN(PIN_ADC2, VDDIO18_1,
+ IO_TYPE_1V8_ONLY,
+ CV1800_PINCONF_AREA_SYS, 0x0f4, 7,
+ CV1800_PINCONF_AREA_SYS, 0x80c),
+ CV1800_GENERAL_PIN(PIN_ADC3, VDDIO18_1,
+ IO_TYPE_1V8_ONLY,
+ CV1800_PINCONF_AREA_SYS, 0x0f0, 7,
+ CV1800_PINCONF_AREA_SYS, 0x808),
+ CV1800_FUNC_PIN(PIN_AUD_AOUTL, VDD18A_MIPI,
+ IO_TYPE_AUDIO,
+ CV1800_PINCONF_AREA_SYS, 0x1c4, 5),
+ CV1800_GENERAL_PIN(PIN_IIC3_SDA, VDD18A_MIPI,
+ IO_TYPE_1V8_ONLY,
+ CV1800_PINCONF_AREA_SYS, 0x018, 3,
+ CV1800_PINCONF_AREA_SYS, 0xb18),
+ CV1800_GENERAL_PIN(PIN_SD1_D2, VDDIO_SD1,
+ IO_TYPE_1V8_OR_3V3,
+ CV1800_PINCONF_AREA_SYS, 0x0d4, 7,
+ CV1800_PINCONF_AREA_RTC, 0x05c),
+ CV1800_FUNC_PIN(PIN_AUD_AOUTR, VDD18A_MIPI,
+ IO_TYPE_AUDIO,
+ CV1800_PINCONF_AREA_SYS, 0x1c8, 6),
+ CV1800_GENERAL_PIN(PIN_SD1_D3, VDDIO_SD1,
+ IO_TYPE_1V8_OR_3V3,
+ CV1800_PINCONF_AREA_SYS, 0x0d0, 7,
+ CV1800_PINCONF_AREA_RTC, 0x058),
+ CV1800_GENERAL_PIN(PIN_SD1_CLK, VDDIO_SD1,
+ IO_TYPE_1V8_OR_3V3,
+ CV1800_PINCONF_AREA_SYS, 0x0e4, 7,
+ CV1800_PINCONF_AREA_RTC, 0x06c),
+ CV1800_GENERAL_PIN(PIN_SD1_CMD, VDDIO_SD1,
+ IO_TYPE_1V8_OR_3V3,
+ CV1800_PINCONF_AREA_SYS, 0x0e0, 7,
+ CV1800_PINCONF_AREA_RTC, 0x068),
+ CV1800_FUNC_PIN(PIN_AUD_AINL_MIC, VDD18A_MIPI,
+ IO_TYPE_AUDIO,
+ CV1800_PINCONF_AREA_SYS, 0x1bc, 5),
+ CV1800_GENERAL_PIN(PIN_RSTN, VDDIO18_1,
+ IO_TYPE_1V8_ONLY,
+ CV1800_PINCONF_AREA_SYS, 0x0e8, 0,
+ CV1800_PINCONF_AREA_SYS, 0x800),
+ CV1800_GENERAL_PIN(PIN_PWM0_BUCK, VDDIO18_1,
+ IO_TYPE_1V8_ONLY,
+ CV1800_PINCONF_AREA_SYS, 0x0ec, 3,
+ CV1800_PINCONF_AREA_SYS, 0x804),
+ CV1800_GENERAL_PIN(PIN_SD1_D1, VDDIO_SD1,
+ IO_TYPE_1V8_OR_3V3,
+ CV1800_PINCONF_AREA_SYS, 0x0d8, 7,
+ CV1800_PINCONF_AREA_RTC, 0x060),
+ CV1800_GENERAL_PIN(PIN_SD1_D0, VDDIO_SD1,
+ IO_TYPE_1V8_OR_3V3,
+ CV1800_PINCONF_AREA_SYS, 0x0dc, 7,
+ CV1800_PINCONF_AREA_RTC, 0x064),
+ CV1800_FUNC_PIN(PIN_AUD_AINR_MIC, VDD18A_MIPI,
+ IO_TYPE_AUDIO,
+ CV1800_PINCONF_AREA_SYS, 0x1c0, 6),
+ CV1800_GENERAL_PIN(PIN_IIC2_SCL, VDDIO_RTC,
+ IO_TYPE_1V8_ONLY,
+ CV1800_PINCONF_AREA_SYS, 0x0b8, 7,
+ CV1800_PINCONF_AREA_RTC, 0x040),
+ CV1800_GENERAL_PIN(PIN_IIC2_SDA, VDDIO_RTC,
+ IO_TYPE_1V8_ONLY,
+ CV1800_PINCONF_AREA_SYS, 0x0bc, 7,
+ CV1800_PINCONF_AREA_RTC, 0x044),
+ CV1800_GENERAL_PIN(PIN_SD0_CD, VDDIO_EMMC,
+ IO_TYPE_1V8_OR_3V3,
+ CV1800_PINCONF_AREA_SYS, 0x034, 3,
+ CV1800_PINCONF_AREA_SYS, 0x900),
+ CV1800_GENERAL_PIN(PIN_SD0_D1, VDDIO_SD0,
+ IO_TYPE_1V8_OR_3V3,
+ CV1800_PINCONF_AREA_SYS, 0x028, 7,
+ CV1800_PINCONF_AREA_SYS, 0xa0c),
+ CV1800_GENERAL_PIN(PIN_UART2_RX, VDDIO_RTC,
+ IO_TYPE_1V8_ONLY,
+ CV1800_PINCONF_AREA_SYS, 0x0c8, 7,
+ CV1800_PINCONF_AREA_RTC, 0x050),
+ CV1800_GENERAL_PIN(PIN_UART2_CTS, VDDIO_RTC,
+ IO_TYPE_1V8_ONLY,
+ CV1800_PINCONF_AREA_SYS, 0x0cc, 7,
+ CV1800_PINCONF_AREA_RTC, 0x054),
+ CV1800_GENERAL_PIN(PIN_UART2_TX, VDDIO_RTC,
+ IO_TYPE_1V8_ONLY,
+ CV1800_PINCONF_AREA_SYS, 0x0c0, 7,
+ CV1800_PINCONF_AREA_RTC, 0x048),
+ CV1800_GENERAL_PIN(PIN_SD0_CLK, VDDIO_SD0,
+ IO_TYPE_1V8_OR_3V3,
+ CV1800_PINCONF_AREA_SYS, 0x01c, 7,
+ CV1800_PINCONF_AREA_SYS, 0xa00),
+ CV1800_GENERAL_PIN(PIN_SD0_D0, VDDIO_SD0,
+ IO_TYPE_1V8_OR_3V3,
+ CV1800_PINCONF_AREA_SYS, 0x024, 7,
+ CV1800_PINCONF_AREA_SYS, 0xa08),
+ CV1800_GENERAL_PIN(PIN_SD0_CMD, VDDIO_SD0,
+ IO_TYPE_1V8_OR_3V3,
+ CV1800_PINCONF_AREA_SYS, 0x020, 7,
+ CV1800_PINCONF_AREA_SYS, 0xa04),
+ CV1800_GENERAL_PIN(PIN_CLK32K, VDDIO_RTC,
+ IO_TYPE_1V8_ONLY,
+ CV1800_PINCONF_AREA_SYS, 0x0b0, 7,
+ CV1800_PINCONF_AREA_RTC, 0x038),
+ CV1800_GENERAL_PIN(PIN_UART2_RTS, VDDIO_RTC,
+ IO_TYPE_1V8_ONLY,
+ CV1800_PINCONF_AREA_SYS, 0x0c4, 7,
+ CV1800_PINCONF_AREA_RTC, 0x04c),
+ CV1800_GENERAL_PIN(PIN_SD0_D3, VDDIO_SD0,
+ IO_TYPE_1V8_OR_3V3,
+ CV1800_PINCONF_AREA_SYS, 0x030, 7,
+ CV1800_PINCONF_AREA_SYS, 0xa14),
+ CV1800_GENERAL_PIN(PIN_SD0_D2, VDDIO_SD0,
+ IO_TYPE_1V8_OR_3V3,
+ CV1800_PINCONF_AREA_SYS, 0x02c, 7,
+ CV1800_PINCONF_AREA_SYS, 0xa10),
+ CV1800_GENERAL_PIN(PIN_UART0_RX, VDDIO_EMMC,
+ IO_TYPE_1V8_OR_3V3,
+ CV1800_PINCONF_AREA_SYS, 0x044, 7,
+ CV1800_PINCONF_AREA_SYS, 0x910),
+ CV1800_GENERAL_PIN(PIN_UART0_TX, VDDIO_EMMC,
+ IO_TYPE_1V8_OR_3V3,
+ CV1800_PINCONF_AREA_SYS, 0x040, 7,
+ CV1800_PINCONF_AREA_SYS, 0x90c),
+ CV1800_GENERAL_PIN(PIN_JTAG_CPU_TRST, VDDIO_EMMC,
+ IO_TYPE_1V8_OR_3V3,
+ CV1800_PINCONF_AREA_SYS, 0x06c, 6,
+ CV1800_PINCONF_AREA_SYS, 0x938),
+ CV1800_GENERAL_PIN(PIN_PWR_ON, VDDIO_RTC,
+ IO_TYPE_1V8_ONLY,
+ CV1800_PINCONF_AREA_SYS, 0x09c, 7,
+ CV1800_PINCONF_AREA_RTC, 0x024),
+ CV1800_GENERAL_PIN(PIN_PWR_GPIO2, VDDIO_RTC,
+ IO_TYPE_1V8_ONLY,
+ CV1800_PINCONF_AREA_SYS, 0x0ac, 7,
+ CV1800_PINCONF_AREA_RTC, 0x034),
+ CV1800_GENERAL_PIN(PIN_PWR_GPIO0, VDDIO_RTC,
+ IO_TYPE_1V8_ONLY,
+ CV1800_PINCONF_AREA_SYS, 0x0a4, 4,
+ CV1800_PINCONF_AREA_RTC, 0x02c),
+ CV1800_GENERAL_PIN(PIN_CLK25M, VDDIO_RTC,
+ IO_TYPE_1V8_ONLY,
+ CV1800_PINCONF_AREA_SYS, 0x0b4, 7,
+ CV1800_PINCONF_AREA_RTC, 0x03c),
+ CV1800_GENERAL_PIN(PIN_SD0_PWR_EN, VDDIO_EMMC,
+ IO_TYPE_1V8_OR_3V3,
+ CV1800_PINCONF_AREA_SYS, 0x038, 3,
+ CV1800_PINCONF_AREA_SYS, 0x904),
+ CV1800_GENERAL_PIN(PIN_SPK_EN, VDDIO_EMMC,
+ IO_TYPE_1V8_OR_3V3,
+ CV1800_PINCONF_AREA_SYS, 0x03c, 3,
+ CV1800_PINCONF_AREA_SYS, 0x908),
+ CV1800_GENERAL_PIN(PIN_JTAG_CPU_TCK, VDDIO_EMMC,
+ IO_TYPE_1V8_OR_3V3,
+ CV1800_PINCONF_AREA_SYS, 0x068, 7,
+ CV1800_PINCONF_AREA_SYS, 0x934),
+ CV1800_GENERAL_PIN(PIN_JTAG_CPU_TMS, VDDIO_EMMC,
+ IO_TYPE_1V8_OR_3V3,
+ CV1800_PINCONF_AREA_SYS, 0x064, 7,
+ CV1800_PINCONF_AREA_SYS, 0x930),
+ CV1800_GENERAL_PIN(PIN_PWR_WAKEUP1, VDDIO_RTC,
+ IO_TYPE_1V8_ONLY,
+ CV1800_PINCONF_AREA_SYS, 0x094, 7,
+ CV1800_PINCONF_AREA_RTC, 0x01c),
+ CV1800_GENERAL_PIN(PIN_PWR_WAKEUP0, VDDIO_RTC,
+ IO_TYPE_1V8_ONLY,
+ CV1800_PINCONF_AREA_SYS, 0x090, 7,
+ CV1800_PINCONF_AREA_RTC, 0x018),
+ CV1800_GENERAL_PIN(PIN_PWR_GPIO1, VDDIO_RTC,
+ IO_TYPE_1V8_ONLY,
+ CV1800_PINCONF_AREA_SYS, 0x0a8, 7,
+ CV1800_PINCONF_AREA_RTC, 0x030),
+ CV1800_GENERAL_PIN(PIN_EMMC_DAT3, VDDIO_EMMC,
+ IO_TYPE_1V8_OR_3V3,
+ CV1800_PINCONF_AREA_SYS, 0x058, 3,
+ CV1800_PINCONF_AREA_SYS, 0x924),
+ CV1800_GENERAL_PIN(PIN_EMMC_DAT0, VDDIO_EMMC,
+ IO_TYPE_1V8_OR_3V3,
+ CV1800_PINCONF_AREA_SYS, 0x054, 3,
+ CV1800_PINCONF_AREA_SYS, 0x920),
+ CV1800_GENERAL_PIN(PIN_EMMC_DAT2, VDDIO_EMMC,
+ IO_TYPE_1V8_OR_3V3,
+ CV1800_PINCONF_AREA_SYS, 0x04c, 3,
+ CV1800_PINCONF_AREA_SYS, 0x918),
+ CV1800_GENERAL_PIN(PIN_EMMC_RSTN, VDDIO_EMMC,
+ IO_TYPE_1V8_OR_3V3,
+ CV1800_PINCONF_AREA_SYS, 0x048, 4,
+ CV1800_PINCONF_AREA_SYS, 0x914),
+ CV1800_GENERAL_PIN(PIN_AUX0, VDDIO_EMMC,
+ IO_TYPE_1V8_OR_3V3,
+ CV1800_PINCONF_AREA_SYS, 0x078, 7,
+ CV1800_PINCONF_AREA_SYS, 0x944),
+ CV1800_GENERAL_PIN(PIN_IIC0_SDA, VDDIO_EMMC,
+ IO_TYPE_1V8_OR_3V3,
+ CV1800_PINCONF_AREA_SYS, 0x074, 7,
+ CV1800_PINCONF_AREA_SYS, 0x940),
+ CV1800_GENERAL_PIN(PIN_PWR_SEQ3, VDDIO_RTC,
+ IO_TYPE_1V8_ONLY,
+ CV1800_PINCONF_AREA_SYS, 0x08c, 3,
+ CV1800_PINCONF_AREA_RTC, 0x010),
+ CV1800_GENERAL_PIN(PIN_PWR_VBAT_DET, VDDIO_RTC,
+ IO_TYPE_1V8_ONLY,
+ CV1800_PINCONF_AREA_SYS, 0x07c, 0,
+ CV1800_PINCONF_AREA_RTC, 0x000),
+ CV1800_GENERAL_PIN(PIN_PWR_SEQ1, VDDIO_RTC,
+ IO_TYPE_1V8_ONLY,
+ CV1800_PINCONF_AREA_SYS, 0x084, 3,
+ CV1800_PINCONF_AREA_RTC, 0x008),
+ CV1800_GENERAL_PIN(PIN_PWR_BUTTON1, VDDIO_RTC,
+ IO_TYPE_1V8_ONLY,
+ CV1800_PINCONF_AREA_SYS, 0x098, 7,
+ CV1800_PINCONF_AREA_RTC, 0x020),
+ CV1800_GENERAL_PIN(PIN_EMMC_DAT1, VDDIO_EMMC,
+ IO_TYPE_1V8_OR_3V3,
+ CV1800_PINCONF_AREA_SYS, 0x060, 3,
+ CV1800_PINCONF_AREA_SYS, 0x92c),
+ CV1800_GENERAL_PIN(PIN_EMMC_CMD, VDDIO_EMMC,
+ IO_TYPE_1V8_OR_3V3,
+ CV1800_PINCONF_AREA_SYS, 0x05c, 3,
+ CV1800_PINCONF_AREA_SYS, 0x928),
+ CV1800_GENERAL_PIN(PIN_EMMC_CLK, VDDIO_EMMC,
+ IO_TYPE_1V8_OR_3V3,
+ CV1800_PINCONF_AREA_SYS, 0x050, 3,
+ CV1800_PINCONF_AREA_SYS, 0x91c),
+ CV1800_GENERAL_PIN(PIN_IIC0_SCL, VDDIO_EMMC,
+ IO_TYPE_1V8_OR_3V3,
+ CV1800_PINCONF_AREA_SYS, 0x070, 7,
+ CV1800_PINCONF_AREA_SYS, 0x93c),
+ CV1800_GENERAL_PIN(PIN_GPIO_ZQ, VDDIO_RTC,
+ IO_TYPE_1V8_ONLY,
+ CV1800_PINCONF_AREA_SYS, 0x1d0, 4,
+ CV1800_PINCONF_AREA_RTC, 0x0e0),
+ CV1800_GENERAL_PIN(PIN_PWR_RSTN, VDDIO_RTC,
+ IO_TYPE_1V8_ONLY,
+ CV1800_PINCONF_AREA_SYS, 0x080, 0,
+ CV1800_PINCONF_AREA_RTC, 0x004),
+ CV1800_GENERAL_PIN(PIN_PWR_SEQ2, VDDIO_RTC,
+ IO_TYPE_1V8_ONLY,
+ CV1800_PINCONF_AREA_SYS, 0x088, 3,
+ CV1800_PINCONF_AREA_RTC, 0x00c),
+ CV1800_GENERAL_PIN(PIN_XTAL_XIN, VDDIO_RTC,
+ IO_TYPE_1V8_ONLY,
+ CV1800_PINCONF_AREA_SYS, 0x0a0, 0,
+ CV1800_PINCONF_AREA_RTC, 0x028),
+};
+
+static const struct cv1800_pinctrl_data cv1812h_pindata = {
+ .pins = cv1812h_pins,
+ .pindata = cv1812h_pin_data,
+ .pdnames = cv1812h_power_domain_desc,
+ .vddio_ops = &cv1812h_vddio_cfg_ops,
+ .npins = ARRAY_SIZE(cv1812h_pins),
+ .npd = ARRAY_SIZE(cv1812h_power_domain_desc),
+};
+
+static const struct of_device_id cv1812h_pinctrl_ids[] = {
+ { .compatible = "sophgo,cv1812h-pinctrl", .data = &cv1812h_pindata },
+ { }
+};
+MODULE_DEVICE_TABLE(of, cv1812h_pinctrl_ids);
+
+static struct platform_driver cv1812h_pinctrl_driver = {
+ .probe = cv1800_pinctrl_probe,
+ .driver = {
+ .name = "cv1812h-pinctrl",
+ .suppress_bind_attrs = true,
+ .of_match_table = cv1812h_pinctrl_ids,
+ },
+};
+module_platform_driver(cv1812h_pinctrl_driver);
+
+MODULE_DESCRIPTION("Pinctrl driver for the CV1812H series SoC");
+MODULE_LICENSE("GPL");
diff --git a/drivers/pinctrl/sophgo/pinctrl-cv18xx.c b/drivers/pinctrl/sophgo/pinctrl-cv18xx.c
new file mode 100644
index 000000000000..d18fc5aa84f7
--- /dev/null
+++ b/drivers/pinctrl/sophgo/pinctrl-cv18xx.c
@@ -0,0 +1,765 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Sophgo CV18XX SoCs pinctrl driver.
+ *
+ * Copyright (C) 2024 Inochi Amaoto <inochiama@outlook.com>
+ *
+ */
+
+#include <linux/bitfield.h>
+#include <linux/export.h>
+#include <linux/io.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+#include <linux/bsearch.h>
+#include <linux/seq_file.h>
+#include <linux/spinlock.h>
+
+#include <linux/pinctrl/consumer.h>
+#include <linux/pinctrl/machine.h>
+#include <linux/pinctrl/pinconf-generic.h>
+#include <linux/pinctrl/pinconf.h>
+#include <linux/pinctrl/pinctrl.h>
+#include <linux/pinctrl/pinmux.h>
+
+#include <dt-bindings/pinctrl/pinctrl-cv18xx.h>
+
+#include "../core.h"
+#include "../pinctrl-utils.h"
+#include "../pinconf.h"
+#include "../pinmux.h"
+#include "pinctrl-cv18xx.h"
+
+struct cv1800_pinctrl {
+ struct device *dev;
+ struct pinctrl_dev *pctl_dev;
+ const struct cv1800_pinctrl_data *data;
+ struct pinctrl_desc pdesc;
+ u32 *power_cfg;
+
+ struct mutex mutex;
+ raw_spinlock_t lock;
+
+ void __iomem *regs[2];
+};
+
+struct cv1800_pin_mux_config {
+ struct cv1800_pin *pin;
+ u32 config;
+};
+
+static unsigned int cv1800_dt_get_pin(u32 value)
+{
+ return value & GENMASK(15, 0);
+}
+
+static unsigned int cv1800_dt_get_pin_mux(u32 value)
+{
+ return (value >> 16) & GENMASK(7, 0);
+}
+
+static unsigned int cv1800_dt_get_pin_mux2(u32 value)
+{
+ return (value >> 24) & GENMASK(7, 0);
+}
+
+#define cv1800_pinctrl_get_component_addr(pctrl, _comp) \
+ ((pctrl)->regs[(_comp)->area] + (_comp)->offset)
+
+static int cv1800_cmp_pin(const void *key, const void *pivot)
+{
+ const struct cv1800_pin *pin = pivot;
+ int pin_id = (long)key;
+ int pivid = pin->pin;
+
+ return pin_id - pivid;
+}
+
+static int cv1800_set_power_cfg(struct cv1800_pinctrl *pctrl,
+ u8 domain, u32 cfg)
+{
+ if (domain >= pctrl->data->npd)
+ return -ENOTSUPP;
+
+ if (pctrl->power_cfg[domain] && pctrl->power_cfg[domain] != cfg)
+ return -EINVAL;
+
+ pctrl->power_cfg[domain] = cfg;
+
+ return 0;
+}
+
+static int cv1800_get_power_cfg(struct cv1800_pinctrl *pctrl,
+ u8 domain)
+{
+ return pctrl->power_cfg[domain];
+}
+
+static struct cv1800_pin *cv1800_get_pin(struct cv1800_pinctrl *pctrl,
+ unsigned long pin)
+{
+ return bsearch((void *)pin, pctrl->data->pindata, pctrl->data->npins,
+ sizeof(struct cv1800_pin), cv1800_cmp_pin);
+}
+
+#define PIN_BGA_ID_OFFSET 8
+#define PIN_BGA_ID_MASK 0xff
+
+static const char *const io_type_desc[] = {
+ "1V8",
+ "18OD33",
+ "AUDIO",
+ "ETH"
+};
+
+static const char *cv1800_get_power_cfg_desc(struct cv1800_pinctrl *pctrl,
+ u8 domain)
+{
+ return pctrl->data->pdnames[domain];
+}
+
+static void cv1800_pctrl_dbg_show(struct pinctrl_dev *pctldev,
+ struct seq_file *seq, unsigned int pin_id)
+{
+ struct cv1800_pinctrl *pctrl = pinctrl_dev_get_drvdata(pctldev);
+ struct cv1800_pin *pin = cv1800_get_pin(pctrl, pin_id);
+ enum cv1800_pin_io_type type = cv1800_pin_io_type(pin);
+ u32 value;
+ void __iomem *reg;
+
+ if (pin->pin >> PIN_BGA_ID_OFFSET)
+ seq_printf(seq, "pos: %c%u ",
+ 'A' + (pin->pin >> PIN_BGA_ID_OFFSET) - 1,
+ pin->pin & PIN_BGA_ID_MASK);
+ else
+ seq_printf(seq, "pos: %u ", pin->pin);
+
+ seq_printf(seq, "power-domain: %s ",
+ cv1800_get_power_cfg_desc(pctrl, pin->power_domain));
+ seq_printf(seq, "type: %s ", io_type_desc[type]);
+
+ reg = cv1800_pinctrl_get_component_addr(pctrl, &pin->mux);
+ value = readl(reg);
+ seq_printf(seq, "mux: 0x%08x ", value);
+
+ if (pin->flags & CV1800_PIN_HAVE_MUX2) {
+ reg = cv1800_pinctrl_get_component_addr(pctrl, &pin->mux2);
+ value = readl(reg);
+ seq_printf(seq, "mux2: 0x%08x ", value);
+ }
+
+ if (type == IO_TYPE_1V8_ONLY || type == IO_TYPE_1V8_OR_3V3) {
+ reg = cv1800_pinctrl_get_component_addr(pctrl, &pin->conf);
+ value = readl(reg);
+ seq_printf(seq, "conf: 0x%08x ", value);
+ }
+}
+
+static int cv1800_verify_pinmux_config(const struct cv1800_pin_mux_config *config)
+{
+ unsigned int mux = cv1800_dt_get_pin_mux(config->config);
+ unsigned int mux2 = cv1800_dt_get_pin_mux2(config->config);
+
+ if (mux > config->pin->mux.max)
+ return -EINVAL;
+
+ if (config->pin->flags & CV1800_PIN_HAVE_MUX2) {
+ if (mux != config->pin->mux2.pfunc)
+ return -EINVAL;
+
+ if (mux2 > config->pin->mux2.max)
+ return -EINVAL;
+ } else {
+ if (mux2 != PIN_MUX_INVALD)
+ return -ENOTSUPP;
+ }
+
+ return 0;
+}
+
+static int cv1800_verify_pin_group(const struct cv1800_pin_mux_config *mux,
+ unsigned long npins)
+{
+ enum cv1800_pin_io_type type;
+ u8 power_domain;
+ int i;
+
+ if (npins == 1)
+ return 0;
+
+ type = cv1800_pin_io_type(mux[0].pin);
+ power_domain = mux[0].pin->power_domain;
+
+ for (i = 0; i < npins; i++) {
+ if (type != cv1800_pin_io_type(mux[i].pin) ||
+ power_domain != mux[i].pin->power_domain)
+ return -ENOTSUPP;
+ }
+
+ return 0;
+}
+
+static int cv1800_pctrl_dt_node_to_map(struct pinctrl_dev *pctldev,
+ struct device_node *np,
+ struct pinctrl_map **maps,
+ unsigned int *num_maps)
+{
+ struct cv1800_pinctrl *pctrl = pinctrl_dev_get_drvdata(pctldev);
+ struct device *dev = pctrl->dev;
+ struct device_node *child;
+ struct pinctrl_map *map;
+ const char **grpnames;
+ const char *grpname;
+ int ngroups = 0;
+ int nmaps = 0;
+ int ret;
+
+ for_each_available_child_of_node(np, child)
+ ngroups += 1;
+
+ grpnames = devm_kcalloc(dev, ngroups, sizeof(*grpnames), GFP_KERNEL);
+ if (!grpnames)
+ return -ENOMEM;
+
+ map = devm_kcalloc(dev, ngroups * 2, sizeof(*map), GFP_KERNEL);
+ if (!map)
+ return -ENOMEM;
+
+ ngroups = 0;
+ mutex_lock(&pctrl->mutex);
+ for_each_available_child_of_node(np, child) {
+ int npins = of_property_count_u32_elems(child, "pinmux");
+ unsigned int *pins;
+ struct cv1800_pin_mux_config *pinmuxs;
+ u32 config, power;
+ int i;
+
+ if (npins < 1) {
+ dev_err(dev, "invalid pinctrl group %pOFn.%pOFn\n",
+ np, child);
+ ret = -EINVAL;
+ goto dt_failed;
+ }
+
+ grpname = devm_kasprintf(dev, GFP_KERNEL, "%pOFn.%pOFn",
+ np, child);
+ if (!grpname) {
+ ret = -ENOMEM;
+ goto dt_failed;
+ }
+
+ grpnames[ngroups++] = grpname;
+
+ pins = devm_kcalloc(dev, npins, sizeof(*pins), GFP_KERNEL);
+ if (!pins) {
+ ret = -ENOMEM;
+ goto dt_failed;
+ }
+
+ pinmuxs = devm_kcalloc(dev, npins, sizeof(*pinmuxs), GFP_KERNEL);
+ if (!pinmuxs) {
+ ret = -ENOMEM;
+ goto dt_failed;
+ }
+
+ for (i = 0; i < npins; i++) {
+ ret = of_property_read_u32_index(child, "pinmux",
+ i, &config);
+ if (ret)
+ goto dt_failed;
+
+ pins[i] = cv1800_dt_get_pin(config);
+ pinmuxs[i].config = config;
+ pinmuxs[i].pin = cv1800_get_pin(pctrl, pins[i]);
+
+ if (!pinmuxs[i].pin) {
+ dev_err(dev, "failed to get pin %d\n", pins[i]);
+ ret = -ENODEV;
+ goto dt_failed;
+ }
+
+ ret = cv1800_verify_pinmux_config(&pinmuxs[i]);
+ if (ret) {
+ dev_err(dev, "group %s pin %d is invalid\n",
+ grpname, i);
+ goto dt_failed;
+ }
+ }
+
+ ret = cv1800_verify_pin_group(pinmuxs, npins);
+ if (ret) {
+ dev_err(dev, "group %s is invalid\n", grpname);
+ goto dt_failed;
+ }
+
+ ret = of_property_read_u32(child, "power-source", &power);
+ if (ret)
+ goto dt_failed;
+
+ if (!(power == PIN_POWER_STATE_3V3 || power == PIN_POWER_STATE_1V8)) {
+ dev_err(dev, "group %s have unsupported power: %u\n",
+ grpname, power);
+ ret = -ENOTSUPP;
+ goto dt_failed;
+ }
+
+ ret = cv1800_set_power_cfg(pctrl, pinmuxs[0].pin->power_domain,
+ power);
+ if (ret)
+ goto dt_failed;
+
+ map[nmaps].type = PIN_MAP_TYPE_MUX_GROUP;
+ map[nmaps].data.mux.function = np->name;
+ map[nmaps].data.mux.group = grpname;
+ nmaps += 1;
+
+ ret = pinconf_generic_parse_dt_config(child, pctldev,
+ &map[nmaps].data.configs.configs,
+ &map[nmaps].data.configs.num_configs);
+ if (ret) {
+ dev_err(dev, "failed to parse pin config of group %s: %d\n",
+ grpname, ret);
+ goto dt_failed;
+ }
+
+ ret = pinctrl_generic_add_group(pctldev, grpname,
+ pins, npins, pinmuxs);
+ if (ret < 0) {
+ dev_err(dev, "failed to add group %s: %d\n", grpname, ret);
+ goto dt_failed;
+ }
+
+ /* don't create a map if there are no pinconf settings */
+ if (map[nmaps].data.configs.num_configs == 0)
+ continue;
+
+ map[nmaps].type = PIN_MAP_TYPE_CONFIGS_GROUP;
+ map[nmaps].data.configs.group_or_pin = grpname;
+ nmaps += 1;
+ }
+
+ ret = pinmux_generic_add_function(pctldev, np->name,
+ grpnames, ngroups, NULL);
+ if (ret < 0) {
+ dev_err(dev, "error adding function %s: %d\n", np->name, ret);
+ goto function_failed;
+ }
+
+ *maps = map;
+ *num_maps = nmaps;
+ mutex_unlock(&pctrl->mutex);
+
+ return 0;
+
+dt_failed:
+ of_node_put(child);
+function_failed:
+ pinctrl_utils_free_map(pctldev, map, nmaps);
+ mutex_unlock(&pctrl->mutex);
+ return ret;
+}
+
+static const struct pinctrl_ops cv1800_pctrl_ops = {
+ .get_groups_count = pinctrl_generic_get_group_count,
+ .get_group_name = pinctrl_generic_get_group_name,
+ .get_group_pins = pinctrl_generic_get_group_pins,
+ .pin_dbg_show = cv1800_pctrl_dbg_show,
+ .dt_node_to_map = cv1800_pctrl_dt_node_to_map,
+ .dt_free_map = pinctrl_utils_free_map,
+};
+
+static int cv1800_pmx_set_mux(struct pinctrl_dev *pctldev,
+ unsigned int fsel, unsigned int gsel)
+{
+ struct cv1800_pinctrl *pctrl = pinctrl_dev_get_drvdata(pctldev);
+ const struct group_desc *group;
+ const struct cv1800_pin_mux_config *configs;
+ unsigned int i;
+
+ group = pinctrl_generic_get_group(pctldev, gsel);
+ if (!group)
+ return -EINVAL;
+
+ configs = group->data;
+
+ for (i = 0; i < group->grp.npins; i++) {
+ const struct cv1800_pin *pin = configs[i].pin;
+ u32 value = configs[i].config;
+ void __iomem *reg_mux;
+ void __iomem *reg_mux2;
+ unsigned long flags;
+ u32 mux;
+ u32 mux2;
+
+ reg_mux = cv1800_pinctrl_get_component_addr(pctrl, &pin->mux);
+ reg_mux2 = cv1800_pinctrl_get_component_addr(pctrl, &pin->mux2);
+ mux = cv1800_dt_get_pin_mux(value);
+ mux2 = cv1800_dt_get_pin_mux2(value);
+
+ raw_spin_lock_irqsave(&pctrl->lock, flags);
+ writel_relaxed(mux, reg_mux);
+ if (mux2 != PIN_MUX_INVALD)
+ writel_relaxed(mux2, reg_mux2);
+ raw_spin_unlock_irqrestore(&pctrl->lock, flags);
+ }
+
+ return 0;
+}
+
+static const struct pinmux_ops cv1800_pmx_ops = {
+ .get_functions_count = pinmux_generic_get_function_count,
+ .get_function_name = pinmux_generic_get_function_name,
+ .get_function_groups = pinmux_generic_get_function_groups,
+ .set_mux = cv1800_pmx_set_mux,
+ .strict = true,
+};
+
+#define PIN_IO_PULLUP BIT(2)
+#define PIN_IO_PULLDOWN BIT(3)
+#define PIN_IO_DRIVE GENMASK(7, 5)
+#define PIN_IO_SCHMITT GENMASK(9, 8)
+#define PIN_IO_BUS_HOLD BIT(10)
+#define PIN_IO_OUT_FAST_SLEW BIT(11)
+
+static u32 cv1800_pull_down_typical_resistor(struct cv1800_pinctrl *pctrl,
+ struct cv1800_pin *pin)
+{
+ return pctrl->data->vddio_ops->get_pull_down(pin, pctrl->power_cfg);
+}
+
+static u32 cv1800_pull_up_typical_resistor(struct cv1800_pinctrl *pctrl,
+ struct cv1800_pin *pin)
+{
+ return pctrl->data->vddio_ops->get_pull_up(pin, pctrl->power_cfg);
+}
+
+static int cv1800_pinctrl_oc2reg(struct cv1800_pinctrl *pctrl,
+ struct cv1800_pin *pin, u32 target)
+{
+ const u32 *map;
+ int i, len;
+
+ len = pctrl->data->vddio_ops->get_oc_map(pin, pctrl->power_cfg, &map);
+ if (len < 0)
+ return len;
+
+ for (i = 0; i < len; i++) {
+ if (map[i] >= target)
+ return i;
+ }
+
+ return -EINVAL;
+}
+
+static int cv1800_pinctrl_reg2oc(struct cv1800_pinctrl *pctrl,
+ struct cv1800_pin *pin, u32 reg)
+{
+ const u32 *map;
+ int len;
+
+ len = pctrl->data->vddio_ops->get_oc_map(pin, pctrl->power_cfg, &map);
+ if (len < 0)
+ return len;
+
+ if (reg >= len)
+ return -EINVAL;
+
+ return map[reg];
+}
+
+static int cv1800_pinctrl_schmitt2reg(struct cv1800_pinctrl *pctrl,
+ struct cv1800_pin *pin, u32 target)
+{
+ const u32 *map;
+ int i, len;
+
+ len = pctrl->data->vddio_ops->get_schmitt_map(pin, pctrl->power_cfg,
+ &map);
+ if (len < 0)
+ return len;
+
+ for (i = 0; i < len; i++) {
+ if (map[i] == target)
+ return i;
+ }
+
+ return -EINVAL;
+}
+
+static int cv1800_pinctrl_reg2schmitt(struct cv1800_pinctrl *pctrl,
+ struct cv1800_pin *pin, u32 reg)
+{
+ const u32 *map;
+ int len;
+
+ len = pctrl->data->vddio_ops->get_schmitt_map(pin, pctrl->power_cfg,
+ &map);
+ if (len < 0)
+ return len;
+
+ if (reg >= len)
+ return -EINVAL;
+
+ return map[reg];
+}
+
+static int cv1800_pconf_get(struct pinctrl_dev *pctldev,
+ unsigned int pin_id, unsigned long *config)
+{
+ struct cv1800_pinctrl *pctrl = pinctrl_dev_get_drvdata(pctldev);
+ int param = pinconf_to_config_param(*config);
+ struct cv1800_pin *pin = cv1800_get_pin(pctrl, pin_id);
+ enum cv1800_pin_io_type type;
+ u32 value;
+ u32 arg;
+ bool enabled;
+ int ret;
+
+ if (!pin)
+ return -EINVAL;
+
+ type = cv1800_pin_io_type(pin);
+ if (type == IO_TYPE_ETH || type == IO_TYPE_AUDIO)
+ return -ENOTSUPP;
+
+ value = readl(cv1800_pinctrl_get_component_addr(pctrl, &pin->conf));
+
+ switch (param) {
+ case PIN_CONFIG_BIAS_PULL_DOWN:
+ enabled = FIELD_GET(PIN_IO_PULLDOWN, value);
+ arg = cv1800_pull_down_typical_resistor(pctrl, pin);
+ break;
+ case PIN_CONFIG_BIAS_PULL_UP:
+ enabled = FIELD_GET(PIN_IO_PULLUP, value);
+ arg = cv1800_pull_up_typical_resistor(pctrl, pin);
+ break;
+ case PIN_CONFIG_DRIVE_STRENGTH_UA:
+ enabled = true;
+ arg = FIELD_GET(PIN_IO_DRIVE, value);
+ ret = cv1800_pinctrl_reg2oc(pctrl, pin, arg);
+ if (ret < 0)
+ return ret;
+ arg = ret;
+ break;
+ case PIN_CONFIG_INPUT_SCHMITT_UV:
+ arg = FIELD_GET(PIN_IO_SCHMITT, value);
+ ret = cv1800_pinctrl_reg2schmitt(pctrl, pin, arg);
+ if (ret < 0)
+ return ret;
+ arg = ret;
+ enabled = arg != 0;
+ break;
+ case PIN_CONFIG_POWER_SOURCE:
+ enabled = true;
+ arg = cv1800_get_power_cfg(pctrl, pin->power_domain);
+ break;
+ case PIN_CONFIG_SLEW_RATE:
+ enabled = true;
+ arg = FIELD_GET(PIN_IO_OUT_FAST_SLEW, value);
+ break;
+ case PIN_CONFIG_BIAS_BUS_HOLD:
+ arg = FIELD_GET(PIN_IO_BUS_HOLD, value);
+ enabled = arg != 0;
+ break;
+ default:
+ return -ENOTSUPP;
+ }
+
+ *config = pinconf_to_config_packed(param, arg);
+
+ return enabled ? 0 : -EINVAL;
+}
+
+static int cv1800_pinconf_compute_config(struct cv1800_pinctrl *pctrl,
+ struct cv1800_pin *pin,
+ unsigned long *configs,
+ unsigned int num_configs,
+ u32 *value)
+{
+ int i;
+ u32 v = 0;
+ enum cv1800_pin_io_type type;
+ int ret;
+
+ if (!pin)
+ return -EINVAL;
+
+ type = cv1800_pin_io_type(pin);
+ if (type == IO_TYPE_ETH || type == IO_TYPE_AUDIO)
+ return -ENOTSUPP;
+
+ for (i = 0; i < num_configs; i++) {
+ int param = pinconf_to_config_param(configs[i]);
+ u32 arg = pinconf_to_config_argument(configs[i]);
+
+ switch (param) {
+ case PIN_CONFIG_BIAS_PULL_DOWN:
+ v &= ~PIN_IO_PULLDOWN;
+ v |= FIELD_PREP(PIN_IO_PULLDOWN, arg);
+ break;
+ case PIN_CONFIG_BIAS_PULL_UP:
+ v &= ~PIN_IO_PULLUP;
+ v |= FIELD_PREP(PIN_IO_PULLUP, arg);
+ break;
+ case PIN_CONFIG_DRIVE_STRENGTH_UA:
+ ret = cv1800_pinctrl_oc2reg(pctrl, pin, arg);
+ if (ret < 0)
+ return ret;
+ v &= ~PIN_IO_DRIVE;
+ v |= FIELD_PREP(PIN_IO_DRIVE, ret);
+ break;
+ case PIN_CONFIG_INPUT_SCHMITT_UV:
+ ret = cv1800_pinctrl_schmitt2reg(pctrl, pin, arg);
+ if (ret < 0)
+ return ret;
+ v &= ~PIN_IO_SCHMITT;
+ v |= FIELD_PREP(PIN_IO_SCHMITT, ret);
+ break;
+ case PIN_CONFIG_POWER_SOURCE:
+ /* Ignore power source as it is always fixed */
+ break;
+ case PIN_CONFIG_SLEW_RATE:
+ v &= ~PIN_IO_OUT_FAST_SLEW;
+ v |= FIELD_PREP(PIN_IO_OUT_FAST_SLEW, arg);
+ break;
+ case PIN_CONFIG_BIAS_BUS_HOLD:
+ v &= ~PIN_IO_BUS_HOLD;
+ v |= FIELD_PREP(PIN_IO_BUS_HOLD, arg);
+ break;
+ default:
+ return -ENOTSUPP;
+ }
+ }
+
+ *value = v;
+
+ return 0;
+}
+
+static int cv1800_pin_set_config(struct cv1800_pinctrl *pctrl,
+ unsigned int pin_id,
+ u32 value)
+{
+ struct cv1800_pin *pin = cv1800_get_pin(pctrl, pin_id);
+ unsigned long flags;
+ void __iomem *addr;
+
+ if (!pin)
+ return -EINVAL;
+
+ addr = cv1800_pinctrl_get_component_addr(pctrl, &pin->conf);
+
+ raw_spin_lock_irqsave(&pctrl->lock, flags);
+ writel(value, addr);
+ raw_spin_unlock_irqrestore(&pctrl->lock, flags);
+
+ return 0;
+}
+
+static int cv1800_pconf_set(struct pinctrl_dev *pctldev,
+ unsigned int pin_id, unsigned long *configs,
+ unsigned int num_configs)
+{
+ struct cv1800_pinctrl *pctrl = pinctrl_dev_get_drvdata(pctldev);
+ struct cv1800_pin *pin = cv1800_get_pin(pctrl, pin_id);
+ u32 value;
+
+ if (!pin)
+ return -ENODEV;
+
+ if (cv1800_pinconf_compute_config(pctrl, pin,
+ configs, num_configs, &value))
+ return -ENOTSUPP;
+
+ return cv1800_pin_set_config(pctrl, pin_id, value);
+}
+
+static int cv1800_pconf_group_set(struct pinctrl_dev *pctldev,
+ unsigned int gsel,
+ unsigned long *configs,
+ unsigned int num_configs)
+{
+ struct cv1800_pinctrl *pctrl = pinctrl_dev_get_drvdata(pctldev);
+ const struct group_desc *group;
+ const struct cv1800_pin_mux_config *pinmuxs;
+ u32 value;
+ int i;
+
+ group = pinctrl_generic_get_group(pctldev, gsel);
+ if (!group)
+ return -EINVAL;
+
+ pinmuxs = group->data;
+
+ if (cv1800_pinconf_compute_config(pctrl, pinmuxs[0].pin,
+ configs, num_configs, &value))
+ return -ENOTSUPP;
+
+ for (i = 0; i < group->grp.npins; i++)
+ cv1800_pin_set_config(pctrl, group->grp.pins[i], value);
+
+ return 0;
+}
+
+static const struct pinconf_ops cv1800_pconf_ops = {
+ .pin_config_get = cv1800_pconf_get,
+ .pin_config_set = cv1800_pconf_set,
+ .pin_config_group_set = cv1800_pconf_group_set,
+ .is_generic = true,
+};
+
+int cv1800_pinctrl_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct cv1800_pinctrl *pctrl;
+ const struct cv1800_pinctrl_data *pctrl_data;
+ int ret;
+
+ pctrl_data = device_get_match_data(dev);
+ if (!pctrl_data)
+ return -ENODEV;
+
+ if (pctrl_data->npins == 0 || pctrl_data->npd == 0)
+ return dev_err_probe(dev, -EINVAL, "invalid pin data\n");
+
+ pctrl = devm_kzalloc(dev, sizeof(*pctrl), GFP_KERNEL);
+ if (!pctrl)
+ return -ENOMEM;
+
+ pctrl->power_cfg = devm_kcalloc(dev, pctrl_data->npd,
+ sizeof(u32), GFP_KERNEL);
+ if (!pctrl->power_cfg)
+ return -ENOMEM;
+
+ pctrl->regs[0] = devm_platform_ioremap_resource_byname(pdev, "sys");
+ if (IS_ERR(pctrl->regs[0]))
+ return PTR_ERR(pctrl->regs[0]);
+
+ pctrl->regs[1] = devm_platform_ioremap_resource_byname(pdev, "rtc");
+ if (IS_ERR(pctrl->regs[1]))
+ return PTR_ERR(pctrl->regs[1]);
+
+ pctrl->pdesc.name = dev_name(dev);
+ pctrl->pdesc.pins = pctrl_data->pins;
+ pctrl->pdesc.npins = pctrl_data->npins;
+ pctrl->pdesc.pctlops = &cv1800_pctrl_ops;
+ pctrl->pdesc.pmxops = &cv1800_pmx_ops;
+ pctrl->pdesc.confops = &cv1800_pconf_ops;
+ pctrl->pdesc.owner = THIS_MODULE;
+
+ pctrl->data = pctrl_data;
+ pctrl->dev = dev;
+ raw_spin_lock_init(&pctrl->lock);
+ mutex_init(&pctrl->mutex);
+
+ platform_set_drvdata(pdev, pctrl);
+
+ ret = devm_pinctrl_register_and_init(dev, &pctrl->pdesc,
+ pctrl, &pctrl->pctl_dev);
+ if (ret)
+ return dev_err_probe(dev, ret,
+ "fail to register pinctrl driver\n");
+
+ return pinctrl_enable(pctrl->pctl_dev);
+}
+EXPORT_SYMBOL_GPL(cv1800_pinctrl_probe);
diff --git a/drivers/pinctrl/sophgo/pinctrl-cv18xx.h b/drivers/pinctrl/sophgo/pinctrl-cv18xx.h
new file mode 100644
index 000000000000..1a9998abb3b7
--- /dev/null
+++ b/drivers/pinctrl/sophgo/pinctrl-cv18xx.h
@@ -0,0 +1,155 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (C) 2024 Inochi Amaoto <inochiama@outlook.com>
+ */
+
+#ifndef _PINCTRL_SOPHGO_CV18XX_H
+#define _PINCTRL_SOPHGO_CV18XX_H
+
+#include <linux/bits.h>
+#include <linux/bitfield.h>
+#include <linux/device.h>
+#include <linux/mutex.h>
+#include <linux/spinlock.h>
+#include <linux/platform_device.h>
+#include <linux/pinctrl/pinctrl.h>
+#include <linux/pinctrl/pinconf.h>
+
+enum cv1800_pin_io_type {
+ IO_TYPE_1V8_ONLY = 0,
+ IO_TYPE_1V8_OR_3V3 = 1,
+ IO_TYPE_AUDIO = 2,
+ IO_TYPE_ETH = 3
+};
+
+#define CV1800_PINCONF_AREA_SYS 0
+#define CV1800_PINCONF_AREA_RTC 1
+
+struct cv1800_pinmux {
+ u16 offset;
+ u8 area;
+ u8 max;
+};
+
+struct cv1800_pinmux2 {
+ u16 offset;
+ u8 area;
+ u8 max;
+ u8 pfunc;
+};
+
+struct cv1800_pinconf {
+ u16 offset;
+ u8 area;
+};
+
+#define CV1800_PIN_HAVE_MUX2 BIT(0)
+#define CV1800_PIN_IO_TYPE GENMASK(2, 1)
+
+#define CV1800_PIN_FLAG_IO_TYPE(type) \
+ FIELD_PREP_CONST(CV1800_PIN_IO_TYPE, type)
+struct cv1800_pin {
+ u16 pin;
+ u16 flags;
+ u8 power_domain;
+ struct cv1800_pinmux mux;
+ struct cv1800_pinmux2 mux2;
+ struct cv1800_pinconf conf;
+};
+
+#define PIN_POWER_STATE_1V8 1800
+#define PIN_POWER_STATE_3V3 3300
+
+/**
+ * struct cv1800_vddio_cfg_ops - pin vddio operations
+ *
+ * @get_pull_up: get resistor for pull up;
+ * @get_pull_down: get resistor for pull down.
+ * @get_oc_map: get mapping for typical low level output current value to
+ * register value map.
+ * @get_schmitt_map: get mapping for register value to typical schmitt
+ * threshold.
+ */
+struct cv1800_vddio_cfg_ops {
+ int (*get_pull_up)(struct cv1800_pin *pin, const u32 *psmap);
+ int (*get_pull_down)(struct cv1800_pin *pin, const u32 *psmap);
+ int (*get_oc_map)(struct cv1800_pin *pin, const u32 *psmap,
+ const u32 **map);
+ int (*get_schmitt_map)(struct cv1800_pin *pin, const u32 *psmap,
+ const u32 **map);
+};
+
+struct cv1800_pinctrl_data {
+ const struct pinctrl_pin_desc *pins;
+ const struct cv1800_pin *pindata;
+ const char * const *pdnames;
+ const struct cv1800_vddio_cfg_ops *vddio_ops;
+ u16 npins;
+ u16 npd;
+};
+
+static inline enum cv1800_pin_io_type cv1800_pin_io_type(struct cv1800_pin *pin)
+{
+ return FIELD_GET(CV1800_PIN_IO_TYPE, pin->flags);
+};
+
+int cv1800_pinctrl_probe(struct platform_device *pdev);
+
+#define CV1800_FUNC_PIN(_id, _power_domain, _type, \
+ _mux_area, _mux_offset, _mux_func_max) \
+ { \
+ .pin = (_id), \
+ .power_domain = (_power_domain), \
+ .flags = CV1800_PIN_FLAG_IO_TYPE(_type), \
+ .mux = { \
+ .area = (_mux_area), \
+ .offset = (_mux_offset), \
+ .max = (_mux_func_max), \
+ }, \
+ }
+
+#define CV1800_GENERAL_PIN(_id, _power_domain, _type, \
+ _mux_area, _mux_offset, _mux_func_max, \
+ _conf_area, _conf_offset) \
+ { \
+ .pin = (_id), \
+ .power_domain = (_power_domain), \
+ .flags = CV1800_PIN_FLAG_IO_TYPE(_type), \
+ .mux = { \
+ .area = (_mux_area), \
+ .offset = (_mux_offset), \
+ .max = (_mux_func_max), \
+ }, \
+ .conf = { \
+ .area = (_conf_area), \
+ .offset = (_conf_offset), \
+ }, \
+ }
+
+#define CV1800_GENERATE_PIN_MUX2(_id, _power_domain, _type, \
+ _mux_area, _mux_offset, _mux_func_max, \
+ _mux2_area, _mux2_offset, \
+ _mux2_func_max, \
+ _conf_area, _conf_offset) \
+ { \
+ .pin = (_id), \
+ .power_domain = (_power_domain), \
+ .flags = CV1800_PIN_FLAG_IO_TYPE(_type) | \
+ CV1800_PIN_HAVE_MUX2, \
+ .mux = { \
+ .area = (_mux_area), \
+ .offset = (_mux_offset), \
+ .max = (_mux_func_max), \
+ }, \
+ .mux2 = { \
+ .area = (_mux2_area), \
+ .offset = (_mux2_offset), \
+ .max = (_mux2_func_max), \
+ }, \
+ .conf = { \
+ .area = (_conf_area), \
+ .offset = (_conf_offset), \
+ }, \
+ }
+
+#endif
diff --git a/drivers/pinctrl/sophgo/pinctrl-sg2000.c b/drivers/pinctrl/sophgo/pinctrl-sg2000.c
new file mode 100644
index 000000000000..63c05b4dd68f
--- /dev/null
+++ b/drivers/pinctrl/sophgo/pinctrl-sg2000.c
@@ -0,0 +1,771 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Sophgo SG2000 SoC pinctrl driver.
+ *
+ * Copyright (C) 2024 Inochi Amaoto <inochiama@outlook.com>
+ *
+ * This file is generated from vendor pinout definition.
+ */
+
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/of.h>
+
+#include <linux/pinctrl/pinctrl.h>
+#include <linux/pinctrl/pinmux.h>
+
+#include <dt-bindings/pinctrl/pinctrl-sg2000.h>
+
+#include "pinctrl-cv18xx.h"
+
+enum SG2000_POWER_DOMAIN {
+ VDD18A_EPHY = 0,
+ VDD18A_MIPI = 1,
+ VDDIO18_1 = 2,
+ VDDIO_EMMC = 3,
+ VDDIO_RTC = 4,
+ VDDIO_SD0 = 5,
+ VDDIO_SD1 = 6,
+ VDDIO_VIVO = 7
+};
+
+static const char *const sg2000_power_domain_desc[] = {
+ [VDD18A_EPHY] = "VDD18A_EPHY",
+ [VDD18A_MIPI] = "VDD18A_MIPI",
+ [VDDIO18_1] = "VDDIO18_1",
+ [VDDIO_EMMC] = "VDDIO_EMMC",
+ [VDDIO_RTC] = "VDDIO_RTC",
+ [VDDIO_SD0] = "VDDIO_SD0",
+ [VDDIO_SD1] = "VDDIO_SD1",
+ [VDDIO_VIVO] = "VDDIO_VIVO",
+};
+
+static int sg2000_get_pull_up(struct cv1800_pin *pin, const u32 *psmap)
+{
+ u32 pstate = psmap[pin->power_domain];
+ enum cv1800_pin_io_type type = cv1800_pin_io_type(pin);
+
+ if (type == IO_TYPE_1V8_ONLY)
+ return 79000;
+
+ if (type == IO_TYPE_1V8_OR_3V3) {
+ if (pstate == PIN_POWER_STATE_1V8)
+ return 60000;
+ if (pstate == PIN_POWER_STATE_3V3)
+ return 60000;
+
+ return -EINVAL;
+ }
+
+ return -ENOTSUPP;
+}
+
+static int sg2000_get_pull_down(struct cv1800_pin *pin, const u32 *psmap)
+{
+ u32 pstate = psmap[pin->power_domain];
+ enum cv1800_pin_io_type type = cv1800_pin_io_type(pin);
+
+ if (type == IO_TYPE_1V8_ONLY)
+ return 87000;
+
+ if (type == IO_TYPE_1V8_OR_3V3) {
+ if (pstate == PIN_POWER_STATE_1V8)
+ return 61000;
+ if (pstate == PIN_POWER_STATE_3V3)
+ return 62000;
+
+ return -EINVAL;
+ }
+
+ return -ENOTSUPP;
+}
+
+static const u32 sg2000_1v8_oc_map[] = {
+ 12800,
+ 25300,
+ 37400,
+ 49000
+};
+
+static const u32 sg2000_18od33_1v8_oc_map[] = {
+ 7800,
+ 11700,
+ 15500,
+ 19200,
+ 23000,
+ 26600,
+ 30200,
+ 33700
+};
+
+static const u32 sg2000_18od33_3v3_oc_map[] = {
+ 5500,
+ 8200,
+ 10800,
+ 13400,
+ 16100,
+ 18700,
+ 21200,
+ 23700
+};
+
+static const u32 sg2000_eth_oc_map[] = {
+ 15700,
+ 17800
+};
+
+static int sg2000_get_oc_map(struct cv1800_pin *pin, const u32 *psmap,
+ const u32 **map)
+{
+ enum cv1800_pin_io_type type = cv1800_pin_io_type(pin);
+ u32 pstate = psmap[pin->power_domain];
+
+ if (type == IO_TYPE_1V8_ONLY) {
+ *map = sg2000_1v8_oc_map;
+ return ARRAY_SIZE(sg2000_1v8_oc_map);
+ }
+
+ if (type == IO_TYPE_1V8_OR_3V3) {
+ if (pstate == PIN_POWER_STATE_1V8) {
+ *map = sg2000_18od33_1v8_oc_map;
+ return ARRAY_SIZE(sg2000_18od33_1v8_oc_map);
+ } else if (pstate == PIN_POWER_STATE_3V3) {
+ *map = sg2000_18od33_3v3_oc_map;
+ return ARRAY_SIZE(sg2000_18od33_3v3_oc_map);
+ }
+ }
+
+ if (type == IO_TYPE_ETH) {
+ *map = sg2000_eth_oc_map;
+ return ARRAY_SIZE(sg2000_eth_oc_map);
+ }
+
+ return -ENOTSUPP;
+}
+
+static const u32 sg2000_1v8_schmitt_map[] = {
+ 0,
+ 970000,
+ 1040000
+};
+
+static const u32 sg2000_18od33_1v8_schmitt_map[] = {
+ 0,
+ 1070000
+};
+
+static const u32 sg2000_18od33_3v3_schmitt_map[] = {
+ 0,
+ 1100000
+};
+
+static int sg2000_get_schmitt_map(struct cv1800_pin *pin, const u32 *psmap,
+ const u32 **map)
+{
+ enum cv1800_pin_io_type type = cv1800_pin_io_type(pin);
+ u32 pstate = psmap[pin->power_domain];
+
+ if (type == IO_TYPE_1V8_ONLY) {
+ *map = sg2000_1v8_schmitt_map;
+ return ARRAY_SIZE(sg2000_1v8_schmitt_map);
+ }
+
+ if (type == IO_TYPE_1V8_OR_3V3) {
+ if (pstate == PIN_POWER_STATE_1V8) {
+ *map = sg2000_18od33_1v8_schmitt_map;
+ return ARRAY_SIZE(sg2000_18od33_1v8_schmitt_map);
+ } else if (pstate == PIN_POWER_STATE_3V3) {
+ *map = sg2000_18od33_3v3_schmitt_map;
+ return ARRAY_SIZE(sg2000_18od33_3v3_schmitt_map);
+ }
+ }
+
+ return -ENOTSUPP;
+}
+
+static const struct cv1800_vddio_cfg_ops sg2000_vddio_cfg_ops = {
+ .get_pull_up = sg2000_get_pull_up,
+ .get_pull_down = sg2000_get_pull_down,
+ .get_oc_map = sg2000_get_oc_map,
+ .get_schmitt_map = sg2000_get_schmitt_map,
+};
+
+static const struct pinctrl_pin_desc sg2000_pins[] = {
+ PINCTRL_PIN(PIN_MIPI_TXM4, "MIPI_TXM4"),
+ PINCTRL_PIN(PIN_MIPIRX0N, "MIPIRX0N"),
+ PINCTRL_PIN(PIN_MIPIRX3P, "MIPIRX3P"),
+ PINCTRL_PIN(PIN_MIPIRX4P, "MIPIRX4P"),
+ PINCTRL_PIN(PIN_VIVO_D2, "VIVO_D2"),
+ PINCTRL_PIN(PIN_VIVO_D3, "VIVO_D3"),
+ PINCTRL_PIN(PIN_VIVO_D10, "VIVO_D10"),
+ PINCTRL_PIN(PIN_USB_VBUS_DET, "USB_VBUS_DET"),
+ PINCTRL_PIN(PIN_MIPI_TXP3, "MIPI_TXP3"),
+ PINCTRL_PIN(PIN_MIPI_TXM3, "MIPI_TXM3"),
+ PINCTRL_PIN(PIN_MIPI_TXP4, "MIPI_TXP4"),
+ PINCTRL_PIN(PIN_MIPIRX0P, "MIPIRX0P"),
+ PINCTRL_PIN(PIN_MIPIRX1N, "MIPIRX1N"),
+ PINCTRL_PIN(PIN_MIPIRX2N, "MIPIRX2N"),
+ PINCTRL_PIN(PIN_MIPIRX4N, "MIPIRX4N"),
+ PINCTRL_PIN(PIN_MIPIRX5N, "MIPIRX5N"),
+ PINCTRL_PIN(PIN_VIVO_D1, "VIVO_D1"),
+ PINCTRL_PIN(PIN_VIVO_D5, "VIVO_D5"),
+ PINCTRL_PIN(PIN_VIVO_D7, "VIVO_D7"),
+ PINCTRL_PIN(PIN_VIVO_D9, "VIVO_D9"),
+ PINCTRL_PIN(PIN_USB_ID, "USB_ID"),
+ PINCTRL_PIN(PIN_ETH_RXM, "ETH_RXM"),
+ PINCTRL_PIN(PIN_MIPI_TXP2, "MIPI_TXP2"),
+ PINCTRL_PIN(PIN_MIPI_TXM2, "MIPI_TXM2"),
+ PINCTRL_PIN(PIN_CAM_PD0, "CAM_PD0"),
+ PINCTRL_PIN(PIN_CAM_MCLK0, "CAM_MCLK0"),
+ PINCTRL_PIN(PIN_MIPIRX1P, "MIPIRX1P"),
+ PINCTRL_PIN(PIN_MIPIRX2P, "MIPIRX2P"),
+ PINCTRL_PIN(PIN_MIPIRX3N, "MIPIRX3N"),
+ PINCTRL_PIN(PIN_MIPIRX5P, "MIPIRX5P"),
+ PINCTRL_PIN(PIN_VIVO_CLK, "VIVO_CLK"),
+ PINCTRL_PIN(PIN_VIVO_D6, "VIVO_D6"),
+ PINCTRL_PIN(PIN_VIVO_D8, "VIVO_D8"),
+ PINCTRL_PIN(PIN_USB_VBUS_EN, "USB_VBUS_EN"),
+ PINCTRL_PIN(PIN_ETH_RXP, "ETH_RXP"),
+ PINCTRL_PIN(PIN_GPIO_RTX, "GPIO_RTX"),
+ PINCTRL_PIN(PIN_MIPI_TXP1, "MIPI_TXP1"),
+ PINCTRL_PIN(PIN_MIPI_TXM1, "MIPI_TXM1"),
+ PINCTRL_PIN(PIN_CAM_MCLK1, "CAM_MCLK1"),
+ PINCTRL_PIN(PIN_IIC3_SCL, "IIC3_SCL"),
+ PINCTRL_PIN(PIN_VIVO_D4, "VIVO_D4"),
+ PINCTRL_PIN(PIN_ETH_TXM, "ETH_TXM"),
+ PINCTRL_PIN(PIN_ETH_TXP, "ETH_TXP"),
+ PINCTRL_PIN(PIN_MIPI_TXP0, "MIPI_TXP0"),
+ PINCTRL_PIN(PIN_MIPI_TXM0, "MIPI_TXM0"),
+ PINCTRL_PIN(PIN_CAM_PD1, "CAM_PD1"),
+ PINCTRL_PIN(PIN_CAM_RST0, "CAM_RST0"),
+ PINCTRL_PIN(PIN_VIVO_D0, "VIVO_D0"),
+ PINCTRL_PIN(PIN_ADC1, "ADC1"),
+ PINCTRL_PIN(PIN_ADC2, "ADC2"),
+ PINCTRL_PIN(PIN_ADC3, "ADC3"),
+ PINCTRL_PIN(PIN_AUD_AOUTL, "AUD_AOUTL"),
+ PINCTRL_PIN(PIN_IIC3_SDA, "IIC3_SDA"),
+ PINCTRL_PIN(PIN_SD1_D2, "SD1_D2"),
+ PINCTRL_PIN(PIN_AUD_AOUTR, "AUD_AOUTR"),
+ PINCTRL_PIN(PIN_SD1_D3, "SD1_D3"),
+ PINCTRL_PIN(PIN_SD1_CLK, "SD1_CLK"),
+ PINCTRL_PIN(PIN_SD1_CMD, "SD1_CMD"),
+ PINCTRL_PIN(PIN_AUD_AINL_MIC, "AUD_AINL_MIC"),
+ PINCTRL_PIN(PIN_RSTN, "RSTN"),
+ PINCTRL_PIN(PIN_PWM0_BUCK, "PWM0_BUCK"),
+ PINCTRL_PIN(PIN_SD1_D1, "SD1_D1"),
+ PINCTRL_PIN(PIN_SD1_D0, "SD1_D0"),
+ PINCTRL_PIN(PIN_AUD_AINR_MIC, "AUD_AINR_MIC"),
+ PINCTRL_PIN(PIN_IIC2_SCL, "IIC2_SCL"),
+ PINCTRL_PIN(PIN_IIC2_SDA, "IIC2_SDA"),
+ PINCTRL_PIN(PIN_SD0_CD, "SD0_CD"),
+ PINCTRL_PIN(PIN_SD0_D1, "SD0_D1"),
+ PINCTRL_PIN(PIN_UART2_RX, "UART2_RX"),
+ PINCTRL_PIN(PIN_UART2_CTS, "UART2_CTS"),
+ PINCTRL_PIN(PIN_UART2_TX, "UART2_TX"),
+ PINCTRL_PIN(PIN_SD0_CLK, "SD0_CLK"),
+ PINCTRL_PIN(PIN_SD0_D0, "SD0_D0"),
+ PINCTRL_PIN(PIN_SD0_CMD, "SD0_CMD"),
+ PINCTRL_PIN(PIN_CLK32K, "CLK32K"),
+ PINCTRL_PIN(PIN_UART2_RTS, "UART2_RTS"),
+ PINCTRL_PIN(PIN_SD0_D3, "SD0_D3"),
+ PINCTRL_PIN(PIN_SD0_D2, "SD0_D2"),
+ PINCTRL_PIN(PIN_UART0_RX, "UART0_RX"),
+ PINCTRL_PIN(PIN_UART0_TX, "UART0_TX"),
+ PINCTRL_PIN(PIN_JTAG_CPU_TRST, "JTAG_CPU_TRST"),
+ PINCTRL_PIN(PIN_PWR_ON, "PWR_ON"),
+ PINCTRL_PIN(PIN_PWR_GPIO2, "PWR_GPIO2"),
+ PINCTRL_PIN(PIN_PWR_GPIO0, "PWR_GPIO0"),
+ PINCTRL_PIN(PIN_CLK25M, "CLK25M"),
+ PINCTRL_PIN(PIN_SD0_PWR_EN, "SD0_PWR_EN"),
+ PINCTRL_PIN(PIN_SPK_EN, "SPK_EN"),
+ PINCTRL_PIN(PIN_JTAG_CPU_TCK, "JTAG_CPU_TCK"),
+ PINCTRL_PIN(PIN_JTAG_CPU_TMS, "JTAG_CPU_TMS"),
+ PINCTRL_PIN(PIN_PWR_WAKEUP1, "PWR_WAKEUP1"),
+ PINCTRL_PIN(PIN_PWR_WAKEUP0, "PWR_WAKEUP0"),
+ PINCTRL_PIN(PIN_PWR_GPIO1, "PWR_GPIO1"),
+ PINCTRL_PIN(PIN_EMMC_DAT3, "EMMC_DAT3"),
+ PINCTRL_PIN(PIN_EMMC_DAT0, "EMMC_DAT0"),
+ PINCTRL_PIN(PIN_EMMC_DAT2, "EMMC_DAT2"),
+ PINCTRL_PIN(PIN_EMMC_RSTN, "EMMC_RSTN"),
+ PINCTRL_PIN(PIN_AUX0, "AUX0"),
+ PINCTRL_PIN(PIN_IIC0_SDA, "IIC0_SDA"),
+ PINCTRL_PIN(PIN_PWR_SEQ3, "PWR_SEQ3"),
+ PINCTRL_PIN(PIN_PWR_VBAT_DET, "PWR_VBAT_DET"),
+ PINCTRL_PIN(PIN_PWR_SEQ1, "PWR_SEQ1"),
+ PINCTRL_PIN(PIN_PWR_BUTTON1, "PWR_BUTTON1"),
+ PINCTRL_PIN(PIN_EMMC_DAT1, "EMMC_DAT1"),
+ PINCTRL_PIN(PIN_EMMC_CMD, "EMMC_CMD"),
+ PINCTRL_PIN(PIN_EMMC_CLK, "EMMC_CLK"),
+ PINCTRL_PIN(PIN_IIC0_SCL, "IIC0_SCL"),
+ PINCTRL_PIN(PIN_GPIO_ZQ, "GPIO_ZQ"),
+ PINCTRL_PIN(PIN_PWR_RSTN, "PWR_RSTN"),
+ PINCTRL_PIN(PIN_PWR_SEQ2, "PWR_SEQ2"),
+ PINCTRL_PIN(PIN_XTAL_XIN, "XTAL_XIN"),
+};
+
+static const struct cv1800_pin sg2000_pin_data[ARRAY_SIZE(sg2000_pins)] = {
+ CV1800_GENERAL_PIN(PIN_MIPI_TXM4, VDD18A_MIPI,
+ IO_TYPE_1V8_ONLY,
+ CV1800_PINCONF_AREA_SYS, 0x194, 7,
+ CV1800_PINCONF_AREA_SYS, 0xc60),
+ CV1800_GENERAL_PIN(PIN_MIPIRX0N, VDD18A_MIPI,
+ IO_TYPE_1V8_ONLY,
+ CV1800_PINCONF_AREA_SYS, 0x18c, 7,
+ CV1800_PINCONF_AREA_SYS, 0xc58),
+ CV1800_GENERATE_PIN_MUX2(PIN_MIPIRX3P, VDD18A_MIPI,
+ IO_TYPE_1V8_ONLY,
+ CV1800_PINCONF_AREA_SYS, 0x178, 7,
+ CV1800_PINCONF_AREA_SYS, 0x118, 7,
+ CV1800_PINCONF_AREA_SYS, 0xc44),
+ CV1800_GENERATE_PIN_MUX2(PIN_MIPIRX4P, VDD18A_MIPI,
+ IO_TYPE_1V8_ONLY,
+ CV1800_PINCONF_AREA_SYS, 0x170, 7,
+ CV1800_PINCONF_AREA_SYS, 0x11c, 7,
+ CV1800_PINCONF_AREA_SYS, 0xc3c),
+ CV1800_GENERAL_PIN(PIN_VIVO_D2, VDDIO_VIVO,
+ IO_TYPE_1V8_OR_3V3,
+ CV1800_PINCONF_AREA_SYS, 0x154, 7,
+ CV1800_PINCONF_AREA_SYS, 0xc20),
+ CV1800_GENERAL_PIN(PIN_VIVO_D3, VDDIO_VIVO,
+ IO_TYPE_1V8_OR_3V3,
+ CV1800_PINCONF_AREA_SYS, 0x150, 7,
+ CV1800_PINCONF_AREA_SYS, 0xc1c),
+ CV1800_GENERAL_PIN(PIN_VIVO_D10, VDDIO_VIVO,
+ IO_TYPE_1V8_OR_3V3,
+ CV1800_PINCONF_AREA_SYS, 0x134, 7,
+ CV1800_PINCONF_AREA_SYS, 0xc00),
+ CV1800_GENERAL_PIN(PIN_USB_VBUS_DET, VDDIO18_1,
+ IO_TYPE_1V8_ONLY,
+ CV1800_PINCONF_AREA_SYS, 0x108, 5,
+ CV1800_PINCONF_AREA_SYS, 0x820),
+ CV1800_GENERAL_PIN(PIN_MIPI_TXP3, VDD18A_MIPI,
+ IO_TYPE_1V8_ONLY,
+ CV1800_PINCONF_AREA_SYS, 0x1a0, 7,
+ CV1800_PINCONF_AREA_SYS, 0xc6c),
+ CV1800_GENERAL_PIN(PIN_MIPI_TXM3, VDD18A_MIPI,
+ IO_TYPE_1V8_ONLY,
+ CV1800_PINCONF_AREA_SYS, 0x19c, 7,
+ CV1800_PINCONF_AREA_SYS, 0xc68),
+ CV1800_GENERAL_PIN(PIN_MIPI_TXP4, VDD18A_MIPI,
+ IO_TYPE_1V8_ONLY,
+ CV1800_PINCONF_AREA_SYS, 0x198, 7,
+ CV1800_PINCONF_AREA_SYS, 0xc64),
+ CV1800_GENERAL_PIN(PIN_MIPIRX0P, VDD18A_MIPI,
+ IO_TYPE_1V8_ONLY,
+ CV1800_PINCONF_AREA_SYS, 0x190, 7,
+ CV1800_PINCONF_AREA_SYS, 0xc5c),
+ CV1800_GENERAL_PIN(PIN_MIPIRX1N, VDD18A_MIPI,
+ IO_TYPE_1V8_ONLY,
+ CV1800_PINCONF_AREA_SYS, 0x184, 7,
+ CV1800_PINCONF_AREA_SYS, 0xc50),
+ CV1800_GENERAL_PIN(PIN_MIPIRX2N, VDD18A_MIPI,
+ IO_TYPE_1V8_ONLY,
+ CV1800_PINCONF_AREA_SYS, 0x17c, 7,
+ CV1800_PINCONF_AREA_SYS, 0xc48),
+ CV1800_GENERATE_PIN_MUX2(PIN_MIPIRX4N, VDD18A_MIPI,
+ IO_TYPE_1V8_ONLY,
+ CV1800_PINCONF_AREA_SYS, 0x16c, 7,
+ CV1800_PINCONF_AREA_SYS, 0x120, 7,
+ CV1800_PINCONF_AREA_SYS, 0xc38),
+ CV1800_GENERAL_PIN(PIN_MIPIRX5N, VDD18A_MIPI,
+ IO_TYPE_1V8_ONLY,
+ CV1800_PINCONF_AREA_SYS, 0x164, 7,
+ CV1800_PINCONF_AREA_SYS, 0xc30),
+ CV1800_GENERAL_PIN(PIN_VIVO_D1, VDDIO_VIVO,
+ IO_TYPE_1V8_OR_3V3,
+ CV1800_PINCONF_AREA_SYS, 0x158, 7,
+ CV1800_PINCONF_AREA_SYS, 0xc24),
+ CV1800_GENERAL_PIN(PIN_VIVO_D5, VDDIO_VIVO,
+ IO_TYPE_1V8_OR_3V3,
+ CV1800_PINCONF_AREA_SYS, 0x148, 7,
+ CV1800_PINCONF_AREA_SYS, 0xc14),
+ CV1800_GENERAL_PIN(PIN_VIVO_D7, VDDIO_VIVO,
+ IO_TYPE_1V8_OR_3V3,
+ CV1800_PINCONF_AREA_SYS, 0x140, 7,
+ CV1800_PINCONF_AREA_SYS, 0xc0c),
+ CV1800_GENERAL_PIN(PIN_VIVO_D9, VDDIO_VIVO,
+ IO_TYPE_1V8_OR_3V3,
+ CV1800_PINCONF_AREA_SYS, 0x138, 7,
+ CV1800_PINCONF_AREA_SYS, 0xc04),
+ CV1800_GENERAL_PIN(PIN_USB_ID, VDDIO18_1,
+ IO_TYPE_1V8_ONLY,
+ CV1800_PINCONF_AREA_SYS, 0x0fc, 3,
+ CV1800_PINCONF_AREA_SYS, 0x814),
+ CV1800_FUNC_PIN(PIN_ETH_RXM, VDD18A_EPHY,
+ IO_TYPE_ETH,
+ CV1800_PINCONF_AREA_SYS, 0x130, 7),
+ CV1800_GENERAL_PIN(PIN_MIPI_TXP2, VDD18A_MIPI,
+ IO_TYPE_1V8_ONLY,
+ CV1800_PINCONF_AREA_SYS, 0x1a8, 7,
+ CV1800_PINCONF_AREA_SYS, 0xc74),
+ CV1800_GENERAL_PIN(PIN_MIPI_TXM2, VDD18A_MIPI,
+ IO_TYPE_1V8_ONLY,
+ CV1800_PINCONF_AREA_SYS, 0x1a4, 7,
+ CV1800_PINCONF_AREA_SYS, 0xc70),
+ CV1800_GENERAL_PIN(PIN_CAM_PD0, VDD18A_MIPI,
+ IO_TYPE_1V8_ONLY,
+ CV1800_PINCONF_AREA_SYS, 0x004, 4,
+ CV1800_PINCONF_AREA_SYS, 0xb04),
+ CV1800_GENERAL_PIN(PIN_CAM_MCLK0, VDD18A_MIPI,
+ IO_TYPE_1V8_ONLY,
+ CV1800_PINCONF_AREA_SYS, 0x000, 3,
+ CV1800_PINCONF_AREA_SYS, 0xb00),
+ CV1800_GENERAL_PIN(PIN_MIPIRX1P, VDD18A_MIPI,
+ IO_TYPE_1V8_ONLY,
+ CV1800_PINCONF_AREA_SYS, 0x188, 7,
+ CV1800_PINCONF_AREA_SYS, 0xc54),
+ CV1800_GENERAL_PIN(PIN_MIPIRX2P, VDD18A_MIPI,
+ IO_TYPE_1V8_ONLY,
+ CV1800_PINCONF_AREA_SYS, 0x180, 7,
+ CV1800_PINCONF_AREA_SYS, 0xc4c),
+ CV1800_GENERATE_PIN_MUX2(PIN_MIPIRX3N, VDD18A_MIPI,
+ IO_TYPE_1V8_ONLY,
+ CV1800_PINCONF_AREA_SYS, 0x174, 7,
+ CV1800_PINCONF_AREA_SYS, 0x114, 7,
+ CV1800_PINCONF_AREA_SYS, 0xc40),
+ CV1800_GENERAL_PIN(PIN_MIPIRX5P, VDD18A_MIPI,
+ IO_TYPE_1V8_ONLY,
+ CV1800_PINCONF_AREA_SYS, 0x168, 7,
+ CV1800_PINCONF_AREA_SYS, 0xc34),
+ CV1800_GENERAL_PIN(PIN_VIVO_CLK, VDDIO_VIVO,
+ IO_TYPE_1V8_OR_3V3,
+ CV1800_PINCONF_AREA_SYS, 0x160, 7,
+ CV1800_PINCONF_AREA_SYS, 0xc2c),
+ CV1800_GENERAL_PIN(PIN_VIVO_D6, VDDIO_VIVO,
+ IO_TYPE_1V8_OR_3V3,
+ CV1800_PINCONF_AREA_SYS, 0x144, 7,
+ CV1800_PINCONF_AREA_SYS, 0xc10),
+ CV1800_GENERAL_PIN(PIN_VIVO_D8, VDDIO_VIVO,
+ IO_TYPE_1V8_OR_3V3,
+ CV1800_PINCONF_AREA_SYS, 0x13c, 7,
+ CV1800_PINCONF_AREA_SYS, 0xc08),
+ CV1800_GENERAL_PIN(PIN_USB_VBUS_EN, VDDIO18_1,
+ IO_TYPE_1V8_ONLY,
+ CV1800_PINCONF_AREA_SYS, 0x100, 3,
+ CV1800_PINCONF_AREA_SYS, 0x818),
+ CV1800_FUNC_PIN(PIN_ETH_RXP, VDD18A_EPHY,
+ IO_TYPE_ETH,
+ CV1800_PINCONF_AREA_SYS, 0x12c, 7),
+ CV1800_GENERAL_PIN(PIN_GPIO_RTX, VDDIO18_1,
+ IO_TYPE_1V8_ONLY,
+ CV1800_PINCONF_AREA_SYS, 0x1cc, 5,
+ CV1800_PINCONF_AREA_SYS, 0xc8c),
+ CV1800_GENERAL_PIN(PIN_MIPI_TXP1, VDD18A_MIPI,
+ IO_TYPE_1V8_ONLY,
+ CV1800_PINCONF_AREA_SYS, 0x1b0, 7,
+ CV1800_PINCONF_AREA_SYS, 0xc7c),
+ CV1800_GENERAL_PIN(PIN_MIPI_TXM1, VDD18A_MIPI,
+ IO_TYPE_1V8_ONLY,
+ CV1800_PINCONF_AREA_SYS, 0x1ac, 7,
+ CV1800_PINCONF_AREA_SYS, 0xc78),
+ CV1800_GENERAL_PIN(PIN_CAM_MCLK1, VDD18A_MIPI,
+ IO_TYPE_1V8_ONLY,
+ CV1800_PINCONF_AREA_SYS, 0x00c, 4,
+ CV1800_PINCONF_AREA_SYS, 0xb0c),
+ CV1800_GENERAL_PIN(PIN_IIC3_SCL, VDD18A_MIPI,
+ IO_TYPE_1V8_ONLY,
+ CV1800_PINCONF_AREA_SYS, 0x014, 3,
+ CV1800_PINCONF_AREA_SYS, 0xb14),
+ CV1800_GENERAL_PIN(PIN_VIVO_D4, VDDIO_VIVO,
+ IO_TYPE_1V8_OR_3V3,
+ CV1800_PINCONF_AREA_SYS, 0x14c, 7,
+ CV1800_PINCONF_AREA_SYS, 0xc18),
+ CV1800_FUNC_PIN(PIN_ETH_TXM, VDD18A_EPHY,
+ IO_TYPE_ETH,
+ CV1800_PINCONF_AREA_SYS, 0x128, 7),
+ CV1800_FUNC_PIN(PIN_ETH_TXP, VDD18A_EPHY,
+ IO_TYPE_ETH,
+ CV1800_PINCONF_AREA_SYS, 0x124, 7),
+ CV1800_GENERAL_PIN(PIN_MIPI_TXP0, VDD18A_MIPI,
+ IO_TYPE_1V8_ONLY,
+ CV1800_PINCONF_AREA_SYS, 0x1b8, 7,
+ CV1800_PINCONF_AREA_SYS, 0xc84),
+ CV1800_GENERAL_PIN(PIN_MIPI_TXM0, VDD18A_MIPI,
+ IO_TYPE_1V8_ONLY,
+ CV1800_PINCONF_AREA_SYS, 0x1b4, 7,
+ CV1800_PINCONF_AREA_SYS, 0xc80),
+ CV1800_GENERAL_PIN(PIN_CAM_PD1, VDD18A_MIPI,
+ IO_TYPE_1V8_ONLY,
+ CV1800_PINCONF_AREA_SYS, 0x010, 6,
+ CV1800_PINCONF_AREA_SYS, 0xb10),
+ CV1800_GENERAL_PIN(PIN_CAM_RST0, VDD18A_MIPI,
+ IO_TYPE_1V8_ONLY,
+ CV1800_PINCONF_AREA_SYS, 0x008, 6,
+ CV1800_PINCONF_AREA_SYS, 0xb08),
+ CV1800_GENERAL_PIN(PIN_VIVO_D0, VDDIO_VIVO,
+ IO_TYPE_1V8_OR_3V3,
+ CV1800_PINCONF_AREA_SYS, 0x15c, 7,
+ CV1800_PINCONF_AREA_SYS, 0xc28),
+ CV1800_GENERAL_PIN(PIN_ADC1, VDDIO18_1,
+ IO_TYPE_1V8_ONLY,
+ CV1800_PINCONF_AREA_SYS, 0x0f8, 4,
+ CV1800_PINCONF_AREA_SYS, 0x810),
+ CV1800_GENERAL_PIN(PIN_ADC2, VDDIO18_1,
+ IO_TYPE_1V8_ONLY,
+ CV1800_PINCONF_AREA_SYS, 0x0f4, 7,
+ CV1800_PINCONF_AREA_SYS, 0x80c),
+ CV1800_GENERAL_PIN(PIN_ADC3, VDDIO18_1,
+ IO_TYPE_1V8_ONLY,
+ CV1800_PINCONF_AREA_SYS, 0x0f0, 7,
+ CV1800_PINCONF_AREA_SYS, 0x808),
+ CV1800_FUNC_PIN(PIN_AUD_AOUTL, VDD18A_MIPI,
+ IO_TYPE_AUDIO,
+ CV1800_PINCONF_AREA_SYS, 0x1c4, 5),
+ CV1800_GENERAL_PIN(PIN_IIC3_SDA, VDD18A_MIPI,
+ IO_TYPE_1V8_ONLY,
+ CV1800_PINCONF_AREA_SYS, 0x018, 3,
+ CV1800_PINCONF_AREA_SYS, 0xb18),
+ CV1800_GENERAL_PIN(PIN_SD1_D2, VDDIO_SD1,
+ IO_TYPE_1V8_OR_3V3,
+ CV1800_PINCONF_AREA_SYS, 0x0d4, 7,
+ CV1800_PINCONF_AREA_RTC, 0x05c),
+ CV1800_FUNC_PIN(PIN_AUD_AOUTR, VDD18A_MIPI,
+ IO_TYPE_AUDIO,
+ CV1800_PINCONF_AREA_SYS, 0x1c8, 6),
+ CV1800_GENERAL_PIN(PIN_SD1_D3, VDDIO_SD1,
+ IO_TYPE_1V8_OR_3V3,
+ CV1800_PINCONF_AREA_SYS, 0x0d0, 7,
+ CV1800_PINCONF_AREA_RTC, 0x058),
+ CV1800_GENERAL_PIN(PIN_SD1_CLK, VDDIO_SD1,
+ IO_TYPE_1V8_OR_3V3,
+ CV1800_PINCONF_AREA_SYS, 0x0e4, 7,
+ CV1800_PINCONF_AREA_RTC, 0x06c),
+ CV1800_GENERAL_PIN(PIN_SD1_CMD, VDDIO_SD1,
+ IO_TYPE_1V8_OR_3V3,
+ CV1800_PINCONF_AREA_SYS, 0x0e0, 7,
+ CV1800_PINCONF_AREA_RTC, 0x068),
+ CV1800_FUNC_PIN(PIN_AUD_AINL_MIC, VDD18A_MIPI,
+ IO_TYPE_AUDIO,
+ CV1800_PINCONF_AREA_SYS, 0x1bc, 5),
+ CV1800_GENERAL_PIN(PIN_RSTN, VDDIO18_1,
+ IO_TYPE_1V8_ONLY,
+ CV1800_PINCONF_AREA_SYS, 0x0e8, 0,
+ CV1800_PINCONF_AREA_SYS, 0x800),
+ CV1800_GENERAL_PIN(PIN_PWM0_BUCK, VDDIO18_1,
+ IO_TYPE_1V8_ONLY,
+ CV1800_PINCONF_AREA_SYS, 0x0ec, 3,
+ CV1800_PINCONF_AREA_SYS, 0x804),
+ CV1800_GENERAL_PIN(PIN_SD1_D1, VDDIO_SD1,
+ IO_TYPE_1V8_OR_3V3,
+ CV1800_PINCONF_AREA_SYS, 0x0d8, 7,
+ CV1800_PINCONF_AREA_RTC, 0x060),
+ CV1800_GENERAL_PIN(PIN_SD1_D0, VDDIO_SD1,
+ IO_TYPE_1V8_OR_3V3,
+ CV1800_PINCONF_AREA_SYS, 0x0dc, 7,
+ CV1800_PINCONF_AREA_RTC, 0x064),
+ CV1800_FUNC_PIN(PIN_AUD_AINR_MIC, VDD18A_MIPI,
+ IO_TYPE_AUDIO,
+ CV1800_PINCONF_AREA_SYS, 0x1c0, 6),
+ CV1800_GENERAL_PIN(PIN_IIC2_SCL, VDDIO_RTC,
+ IO_TYPE_1V8_ONLY,
+ CV1800_PINCONF_AREA_SYS, 0x0b8, 7,
+ CV1800_PINCONF_AREA_RTC, 0x040),
+ CV1800_GENERAL_PIN(PIN_IIC2_SDA, VDDIO_RTC,
+ IO_TYPE_1V8_ONLY,
+ CV1800_PINCONF_AREA_SYS, 0x0bc, 7,
+ CV1800_PINCONF_AREA_RTC, 0x044),
+ CV1800_GENERAL_PIN(PIN_SD0_CD, VDDIO_EMMC,
+ IO_TYPE_1V8_OR_3V3,
+ CV1800_PINCONF_AREA_SYS, 0x034, 3,
+ CV1800_PINCONF_AREA_SYS, 0x900),
+ CV1800_GENERAL_PIN(PIN_SD0_D1, VDDIO_SD0,
+ IO_TYPE_1V8_OR_3V3,
+ CV1800_PINCONF_AREA_SYS, 0x028, 7,
+ CV1800_PINCONF_AREA_SYS, 0xa0c),
+ CV1800_GENERAL_PIN(PIN_UART2_RX, VDDIO_RTC,
+ IO_TYPE_1V8_ONLY,
+ CV1800_PINCONF_AREA_SYS, 0x0c8, 7,
+ CV1800_PINCONF_AREA_RTC, 0x050),
+ CV1800_GENERAL_PIN(PIN_UART2_CTS, VDDIO_RTC,
+ IO_TYPE_1V8_ONLY,
+ CV1800_PINCONF_AREA_SYS, 0x0cc, 7,
+ CV1800_PINCONF_AREA_RTC, 0x054),
+ CV1800_GENERAL_PIN(PIN_UART2_TX, VDDIO_RTC,
+ IO_TYPE_1V8_ONLY,
+ CV1800_PINCONF_AREA_SYS, 0x0c0, 7,
+ CV1800_PINCONF_AREA_RTC, 0x048),
+ CV1800_GENERAL_PIN(PIN_SD0_CLK, VDDIO_SD0,
+ IO_TYPE_1V8_OR_3V3,
+ CV1800_PINCONF_AREA_SYS, 0x01c, 7,
+ CV1800_PINCONF_AREA_SYS, 0xa00),
+ CV1800_GENERAL_PIN(PIN_SD0_D0, VDDIO_SD0,
+ IO_TYPE_1V8_OR_3V3,
+ CV1800_PINCONF_AREA_SYS, 0x024, 7,
+ CV1800_PINCONF_AREA_SYS, 0xa08),
+ CV1800_GENERAL_PIN(PIN_SD0_CMD, VDDIO_SD0,
+ IO_TYPE_1V8_OR_3V3,
+ CV1800_PINCONF_AREA_SYS, 0x020, 7,
+ CV1800_PINCONF_AREA_SYS, 0xa04),
+ CV1800_GENERAL_PIN(PIN_CLK32K, VDDIO_RTC,
+ IO_TYPE_1V8_ONLY,
+ CV1800_PINCONF_AREA_SYS, 0x0b0, 7,
+ CV1800_PINCONF_AREA_RTC, 0x038),
+ CV1800_GENERAL_PIN(PIN_UART2_RTS, VDDIO_RTC,
+ IO_TYPE_1V8_ONLY,
+ CV1800_PINCONF_AREA_SYS, 0x0c4, 7,
+ CV1800_PINCONF_AREA_RTC, 0x04c),
+ CV1800_GENERAL_PIN(PIN_SD0_D3, VDDIO_SD0,
+ IO_TYPE_1V8_OR_3V3,
+ CV1800_PINCONF_AREA_SYS, 0x030, 7,
+ CV1800_PINCONF_AREA_SYS, 0xa14),
+ CV1800_GENERAL_PIN(PIN_SD0_D2, VDDIO_SD0,
+ IO_TYPE_1V8_OR_3V3,
+ CV1800_PINCONF_AREA_SYS, 0x02c, 7,
+ CV1800_PINCONF_AREA_SYS, 0xa10),
+ CV1800_GENERAL_PIN(PIN_UART0_RX, VDDIO_EMMC,
+ IO_TYPE_1V8_OR_3V3,
+ CV1800_PINCONF_AREA_SYS, 0x044, 7,
+ CV1800_PINCONF_AREA_SYS, 0x910),
+ CV1800_GENERAL_PIN(PIN_UART0_TX, VDDIO_EMMC,
+ IO_TYPE_1V8_OR_3V3,
+ CV1800_PINCONF_AREA_SYS, 0x040, 7,
+ CV1800_PINCONF_AREA_SYS, 0x90c),
+ CV1800_GENERAL_PIN(PIN_JTAG_CPU_TRST, VDDIO_EMMC,
+ IO_TYPE_1V8_OR_3V3,
+ CV1800_PINCONF_AREA_SYS, 0x06c, 6,
+ CV1800_PINCONF_AREA_SYS, 0x938),
+ CV1800_GENERAL_PIN(PIN_PWR_ON, VDDIO_RTC,
+ IO_TYPE_1V8_ONLY,
+ CV1800_PINCONF_AREA_SYS, 0x09c, 7,
+ CV1800_PINCONF_AREA_RTC, 0x024),
+ CV1800_GENERAL_PIN(PIN_PWR_GPIO2, VDDIO_RTC,
+ IO_TYPE_1V8_ONLY,
+ CV1800_PINCONF_AREA_SYS, 0x0ac, 7,
+ CV1800_PINCONF_AREA_RTC, 0x034),
+ CV1800_GENERAL_PIN(PIN_PWR_GPIO0, VDDIO_RTC,
+ IO_TYPE_1V8_ONLY,
+ CV1800_PINCONF_AREA_SYS, 0x0a4, 4,
+ CV1800_PINCONF_AREA_RTC, 0x02c),
+ CV1800_GENERAL_PIN(PIN_CLK25M, VDDIO_RTC,
+ IO_TYPE_1V8_ONLY,
+ CV1800_PINCONF_AREA_SYS, 0x0b4, 7,
+ CV1800_PINCONF_AREA_RTC, 0x03c),
+ CV1800_GENERAL_PIN(PIN_SD0_PWR_EN, VDDIO_EMMC,
+ IO_TYPE_1V8_OR_3V3,
+ CV1800_PINCONF_AREA_SYS, 0x038, 3,
+ CV1800_PINCONF_AREA_SYS, 0x904),
+ CV1800_GENERAL_PIN(PIN_SPK_EN, VDDIO_EMMC,
+ IO_TYPE_1V8_OR_3V3,
+ CV1800_PINCONF_AREA_SYS, 0x03c, 3,
+ CV1800_PINCONF_AREA_SYS, 0x908),
+ CV1800_GENERAL_PIN(PIN_JTAG_CPU_TCK, VDDIO_EMMC,
+ IO_TYPE_1V8_OR_3V3,
+ CV1800_PINCONF_AREA_SYS, 0x068, 7,
+ CV1800_PINCONF_AREA_SYS, 0x934),
+ CV1800_GENERAL_PIN(PIN_JTAG_CPU_TMS, VDDIO_EMMC,
+ IO_TYPE_1V8_OR_3V3,
+ CV1800_PINCONF_AREA_SYS, 0x064, 7,
+ CV1800_PINCONF_AREA_SYS, 0x930),
+ CV1800_GENERAL_PIN(PIN_PWR_WAKEUP1, VDDIO_RTC,
+ IO_TYPE_1V8_ONLY,
+ CV1800_PINCONF_AREA_SYS, 0x094, 7,
+ CV1800_PINCONF_AREA_RTC, 0x01c),
+ CV1800_GENERAL_PIN(PIN_PWR_WAKEUP0, VDDIO_RTC,
+ IO_TYPE_1V8_ONLY,
+ CV1800_PINCONF_AREA_SYS, 0x090, 7,
+ CV1800_PINCONF_AREA_RTC, 0x018),
+ CV1800_GENERAL_PIN(PIN_PWR_GPIO1, VDDIO_RTC,
+ IO_TYPE_1V8_ONLY,
+ CV1800_PINCONF_AREA_SYS, 0x0a8, 7,
+ CV1800_PINCONF_AREA_RTC, 0x030),
+ CV1800_GENERAL_PIN(PIN_EMMC_DAT3, VDDIO_EMMC,
+ IO_TYPE_1V8_OR_3V3,
+ CV1800_PINCONF_AREA_SYS, 0x058, 3,
+ CV1800_PINCONF_AREA_SYS, 0x924),
+ CV1800_GENERAL_PIN(PIN_EMMC_DAT0, VDDIO_EMMC,
+ IO_TYPE_1V8_OR_3V3,
+ CV1800_PINCONF_AREA_SYS, 0x054, 3,
+ CV1800_PINCONF_AREA_SYS, 0x920),
+ CV1800_GENERAL_PIN(PIN_EMMC_DAT2, VDDIO_EMMC,
+ IO_TYPE_1V8_OR_3V3,
+ CV1800_PINCONF_AREA_SYS, 0x04c, 3,
+ CV1800_PINCONF_AREA_SYS, 0x918),
+ CV1800_GENERAL_PIN(PIN_EMMC_RSTN, VDDIO_EMMC,
+ IO_TYPE_1V8_OR_3V3,
+ CV1800_PINCONF_AREA_SYS, 0x048, 4,
+ CV1800_PINCONF_AREA_SYS, 0x914),
+ CV1800_GENERAL_PIN(PIN_AUX0, VDDIO_EMMC,
+ IO_TYPE_1V8_OR_3V3,
+ CV1800_PINCONF_AREA_SYS, 0x078, 7,
+ CV1800_PINCONF_AREA_SYS, 0x944),
+ CV1800_GENERAL_PIN(PIN_IIC0_SDA, VDDIO_EMMC,
+ IO_TYPE_1V8_OR_3V3,
+ CV1800_PINCONF_AREA_SYS, 0x074, 7,
+ CV1800_PINCONF_AREA_SYS, 0x940),
+ CV1800_GENERAL_PIN(PIN_PWR_SEQ3, VDDIO_RTC,
+ IO_TYPE_1V8_ONLY,
+ CV1800_PINCONF_AREA_SYS, 0x08c, 3,
+ CV1800_PINCONF_AREA_RTC, 0x010),
+ CV1800_GENERAL_PIN(PIN_PWR_VBAT_DET, VDDIO_RTC,
+ IO_TYPE_1V8_ONLY,
+ CV1800_PINCONF_AREA_SYS, 0x07c, 0,
+ CV1800_PINCONF_AREA_RTC, 0x000),
+ CV1800_GENERAL_PIN(PIN_PWR_SEQ1, VDDIO_RTC,
+ IO_TYPE_1V8_ONLY,
+ CV1800_PINCONF_AREA_SYS, 0x084, 3,
+ CV1800_PINCONF_AREA_RTC, 0x008),
+ CV1800_GENERAL_PIN(PIN_PWR_BUTTON1, VDDIO_RTC,
+ IO_TYPE_1V8_ONLY,
+ CV1800_PINCONF_AREA_SYS, 0x098, 7,
+ CV1800_PINCONF_AREA_RTC, 0x020),
+ CV1800_GENERAL_PIN(PIN_EMMC_DAT1, VDDIO_EMMC,
+ IO_TYPE_1V8_OR_3V3,
+ CV1800_PINCONF_AREA_SYS, 0x060, 3,
+ CV1800_PINCONF_AREA_SYS, 0x92c),
+ CV1800_GENERAL_PIN(PIN_EMMC_CMD, VDDIO_EMMC,
+ IO_TYPE_1V8_OR_3V3,
+ CV1800_PINCONF_AREA_SYS, 0x05c, 3,
+ CV1800_PINCONF_AREA_SYS, 0x928),
+ CV1800_GENERAL_PIN(PIN_EMMC_CLK, VDDIO_EMMC,
+ IO_TYPE_1V8_OR_3V3,
+ CV1800_PINCONF_AREA_SYS, 0x050, 3,
+ CV1800_PINCONF_AREA_SYS, 0x91c),
+ CV1800_GENERAL_PIN(PIN_IIC0_SCL, VDDIO_EMMC,
+ IO_TYPE_1V8_OR_3V3,
+ CV1800_PINCONF_AREA_SYS, 0x070, 7,
+ CV1800_PINCONF_AREA_SYS, 0x93c),
+ CV1800_GENERAL_PIN(PIN_GPIO_ZQ, VDDIO_RTC,
+ IO_TYPE_1V8_ONLY,
+ CV1800_PINCONF_AREA_SYS, 0x1d0, 4,
+ CV1800_PINCONF_AREA_RTC, 0x0e0),
+ CV1800_GENERAL_PIN(PIN_PWR_RSTN, VDDIO_RTC,
+ IO_TYPE_1V8_ONLY,
+ CV1800_PINCONF_AREA_SYS, 0x080, 0,
+ CV1800_PINCONF_AREA_RTC, 0x004),
+ CV1800_GENERAL_PIN(PIN_PWR_SEQ2, VDDIO_RTC,
+ IO_TYPE_1V8_ONLY,
+ CV1800_PINCONF_AREA_SYS, 0x088, 3,
+ CV1800_PINCONF_AREA_RTC, 0x00c),
+ CV1800_GENERAL_PIN(PIN_XTAL_XIN, VDDIO_RTC,
+ IO_TYPE_1V8_ONLY,
+ CV1800_PINCONF_AREA_SYS, 0x0a0, 0,
+ CV1800_PINCONF_AREA_RTC, 0x028),
+};
+
+static const struct cv1800_pinctrl_data sg2000_pindata = {
+ .pins = sg2000_pins,
+ .pindata = sg2000_pin_data,
+ .pdnames = sg2000_power_domain_desc,
+ .vddio_ops = &sg2000_vddio_cfg_ops,
+ .npins = ARRAY_SIZE(sg2000_pins),
+ .npd = ARRAY_SIZE(sg2000_power_domain_desc),
+};
+
+static const struct of_device_id sg2000_pinctrl_ids[] = {
+ { .compatible = "sophgo,sg2000-pinctrl", .data = &sg2000_pindata },
+ { }
+};
+MODULE_DEVICE_TABLE(of, sg2000_pinctrl_ids);
+
+static struct platform_driver sg2000_pinctrl_driver = {
+ .probe = cv1800_pinctrl_probe,
+ .driver = {
+ .name = "sg2000-pinctrl",
+ .suppress_bind_attrs = true,
+ .of_match_table = sg2000_pinctrl_ids,
+ },
+};
+module_platform_driver(sg2000_pinctrl_driver);
+
+MODULE_DESCRIPTION("Pinctrl driver for the SG2000 series SoC");
+MODULE_LICENSE("GPL");
diff --git a/drivers/pinctrl/sophgo/pinctrl-sg2002.c b/drivers/pinctrl/sophgo/pinctrl-sg2002.c
new file mode 100644
index 000000000000..5c49208dcb59
--- /dev/null
+++ b/drivers/pinctrl/sophgo/pinctrl-sg2002.c
@@ -0,0 +1,542 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Sophgo SG2002 SoC pinctrl driver.
+ *
+ * Copyright (C) 2024 Inochi Amaoto <inochiama@outlook.com>
+ *
+ * This file is generated from vendor pinout definition.
+ */
+
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/of.h>
+
+#include <linux/pinctrl/pinctrl.h>
+#include <linux/pinctrl/pinmux.h>
+
+#include <dt-bindings/pinctrl/pinctrl-sg2002.h>
+
+#include "pinctrl-cv18xx.h"
+
+enum SG2002_POWER_DOMAIN {
+ VDD18A_MIPI = 0,
+ VDD18A_USB_PLL_ETH = 1,
+ VDDIO_RTC = 2,
+ VDDIO_SD0_EMMC = 3,
+ VDDIO_SD1 = 4
+};
+
+static const char *const sg2002_power_domain_desc[] = {
+ [VDD18A_MIPI] = "VDD18A_MIPI",
+ [VDD18A_USB_PLL_ETH] = "VDD18A_USB_PLL_ETH",
+ [VDDIO_RTC] = "VDDIO_RTC",
+ [VDDIO_SD0_EMMC] = "VDDIO_SD0_EMMC",
+ [VDDIO_SD1] = "VDDIO_SD1",
+};
+
+static int sg2002_get_pull_up(struct cv1800_pin *pin, const u32 *psmap)
+{
+ u32 pstate = psmap[pin->power_domain];
+ enum cv1800_pin_io_type type = cv1800_pin_io_type(pin);
+
+ if (type == IO_TYPE_1V8_ONLY)
+ return 79000;
+
+ if (type == IO_TYPE_1V8_OR_3V3) {
+ if (pstate == PIN_POWER_STATE_1V8)
+ return 60000;
+ if (pstate == PIN_POWER_STATE_3V3)
+ return 60000;
+
+ return -EINVAL;
+ }
+
+ return -ENOTSUPP;
+}
+
+static int sg2002_get_pull_down(struct cv1800_pin *pin, const u32 *psmap)
+{
+ u32 pstate = psmap[pin->power_domain];
+ enum cv1800_pin_io_type type = cv1800_pin_io_type(pin);
+
+ if (type == IO_TYPE_1V8_ONLY)
+ return 87000;
+
+ if (type == IO_TYPE_1V8_OR_3V3) {
+ if (pstate == PIN_POWER_STATE_1V8)
+ return 61000;
+ if (pstate == PIN_POWER_STATE_3V3)
+ return 62000;
+
+ return -EINVAL;
+ }
+
+ return -ENOTSUPP;
+}
+
+static const u32 sg2002_1v8_oc_map[] = {
+ 12800,
+ 25300,
+ 37400,
+ 49000
+};
+
+static const u32 sg2002_18od33_1v8_oc_map[] = {
+ 7800,
+ 11700,
+ 15500,
+ 19200,
+ 23000,
+ 26600,
+ 30200,
+ 33700
+};
+
+static const u32 sg2002_18od33_3v3_oc_map[] = {
+ 5500,
+ 8200,
+ 10800,
+ 13400,
+ 16100,
+ 18700,
+ 21200,
+ 23700
+};
+
+static const u32 sg2002_eth_oc_map[] = {
+ 15700,
+ 17800
+};
+
+static int sg2002_get_oc_map(struct cv1800_pin *pin, const u32 *psmap,
+ const u32 **map)
+{
+ enum cv1800_pin_io_type type = cv1800_pin_io_type(pin);
+ u32 pstate = psmap[pin->power_domain];
+
+ if (type == IO_TYPE_1V8_ONLY) {
+ *map = sg2002_1v8_oc_map;
+ return ARRAY_SIZE(sg2002_1v8_oc_map);
+ }
+
+ if (type == IO_TYPE_1V8_OR_3V3) {
+ if (pstate == PIN_POWER_STATE_1V8) {
+ *map = sg2002_18od33_1v8_oc_map;
+ return ARRAY_SIZE(sg2002_18od33_1v8_oc_map);
+ } else if (pstate == PIN_POWER_STATE_3V3) {
+ *map = sg2002_18od33_3v3_oc_map;
+ return ARRAY_SIZE(sg2002_18od33_3v3_oc_map);
+ }
+ }
+
+ if (type == IO_TYPE_ETH) {
+ *map = sg2002_eth_oc_map;
+ return ARRAY_SIZE(sg2002_eth_oc_map);
+ }
+
+ return -ENOTSUPP;
+}
+
+static const u32 sg2002_1v8_schmitt_map[] = {
+ 0,
+ 970000,
+ 1040000
+};
+
+static const u32 sg2002_18od33_1v8_schmitt_map[] = {
+ 0,
+ 1070000
+};
+
+static const u32 sg2002_18od33_3v3_schmitt_map[] = {
+ 0,
+ 1100000
+};
+
+static int sg2002_get_schmitt_map(struct cv1800_pin *pin, const u32 *psmap,
+ const u32 **map)
+{
+ enum cv1800_pin_io_type type = cv1800_pin_io_type(pin);
+ u32 pstate = psmap[pin->power_domain];
+
+ if (type == IO_TYPE_1V8_ONLY) {
+ *map = sg2002_1v8_schmitt_map;
+ return ARRAY_SIZE(sg2002_1v8_schmitt_map);
+ }
+
+ if (type == IO_TYPE_1V8_OR_3V3) {
+ if (pstate == PIN_POWER_STATE_1V8) {
+ *map = sg2002_18od33_1v8_schmitt_map;
+ return ARRAY_SIZE(sg2002_18od33_1v8_schmitt_map);
+ } else if (pstate == PIN_POWER_STATE_3V3) {
+ *map = sg2002_18od33_3v3_schmitt_map;
+ return ARRAY_SIZE(sg2002_18od33_3v3_schmitt_map);
+ }
+ }
+
+ return -ENOTSUPP;
+}
+
+static const struct cv1800_vddio_cfg_ops sg2002_vddio_cfg_ops = {
+ .get_pull_up = sg2002_get_pull_up,
+ .get_pull_down = sg2002_get_pull_down,
+ .get_oc_map = sg2002_get_oc_map,
+ .get_schmitt_map = sg2002_get_schmitt_map,
+};
+
+static const struct pinctrl_pin_desc sg2002_pins[] = {
+ PINCTRL_PIN(PIN_AUD_AINL_MIC, "AUD_AINL_MIC"),
+ PINCTRL_PIN(PIN_AUD_AOUTR, "AUD_AOUTR"),
+ PINCTRL_PIN(PIN_SD0_CLK, "SD0_CLK"),
+ PINCTRL_PIN(PIN_SD0_CMD, "SD0_CMD"),
+ PINCTRL_PIN(PIN_SD0_D0, "SD0_D0"),
+ PINCTRL_PIN(PIN_SD0_D1, "SD0_D1"),
+ PINCTRL_PIN(PIN_SD0_D2, "SD0_D2"),
+ PINCTRL_PIN(PIN_SD0_D3, "SD0_D3"),
+ PINCTRL_PIN(PIN_SD0_CD, "SD0_CD"),
+ PINCTRL_PIN(PIN_SD0_PWR_EN, "SD0_PWR_EN"),
+ PINCTRL_PIN(PIN_SPK_EN, "SPK_EN"),
+ PINCTRL_PIN(PIN_UART0_TX, "UART0_TX"),
+ PINCTRL_PIN(PIN_UART0_RX, "UART0_RX"),
+ PINCTRL_PIN(PIN_EMMC_DAT2, "EMMC_DAT2"),
+ PINCTRL_PIN(PIN_EMMC_CLK, "EMMC_CLK"),
+ PINCTRL_PIN(PIN_EMMC_DAT0, "EMMC_DAT0"),
+ PINCTRL_PIN(PIN_EMMC_DAT3, "EMMC_DAT3"),
+ PINCTRL_PIN(PIN_EMMC_CMD, "EMMC_CMD"),
+ PINCTRL_PIN(PIN_EMMC_DAT1, "EMMC_DAT1"),
+ PINCTRL_PIN(PIN_JTAG_CPU_TMS, "JTAG_CPU_TMS"),
+ PINCTRL_PIN(PIN_JTAG_CPU_TCK, "JTAG_CPU_TCK"),
+ PINCTRL_PIN(PIN_IIC0_SCL, "IIC0_SCL"),
+ PINCTRL_PIN(PIN_IIC0_SDA, "IIC0_SDA"),
+ PINCTRL_PIN(PIN_AUX0, "AUX0"),
+ PINCTRL_PIN(PIN_GPIO_ZQ, "GPIO_ZQ"),
+ PINCTRL_PIN(PIN_PWR_VBAT_DET, "PWR_VBAT_DET"),
+ PINCTRL_PIN(PIN_PWR_RSTN, "PWR_RSTN"),
+ PINCTRL_PIN(PIN_PWR_SEQ1, "PWR_SEQ1"),
+ PINCTRL_PIN(PIN_PWR_SEQ2, "PWR_SEQ2"),
+ PINCTRL_PIN(PIN_PWR_WAKEUP0, "PWR_WAKEUP0"),
+ PINCTRL_PIN(PIN_PWR_BUTTON1, "PWR_BUTTON1"),
+ PINCTRL_PIN(PIN_XTAL_XIN, "XTAL_XIN"),
+ PINCTRL_PIN(PIN_PWR_GPIO0, "PWR_GPIO0"),
+ PINCTRL_PIN(PIN_PWR_GPIO1, "PWR_GPIO1"),
+ PINCTRL_PIN(PIN_PWR_GPIO2, "PWR_GPIO2"),
+ PINCTRL_PIN(PIN_SD1_D3, "SD1_D3"),
+ PINCTRL_PIN(PIN_SD1_D2, "SD1_D2"),
+ PINCTRL_PIN(PIN_SD1_D1, "SD1_D1"),
+ PINCTRL_PIN(PIN_SD1_D0, "SD1_D0"),
+ PINCTRL_PIN(PIN_SD1_CMD, "SD1_CMD"),
+ PINCTRL_PIN(PIN_SD1_CLK, "SD1_CLK"),
+ PINCTRL_PIN(PIN_PWM0_BUCK, "PWM0_BUCK"),
+ PINCTRL_PIN(PIN_ADC1, "ADC1"),
+ PINCTRL_PIN(PIN_USB_VBUS_DET, "USB_VBUS_DET"),
+ PINCTRL_PIN(PIN_ETH_TXP, "ETH_TXP"),
+ PINCTRL_PIN(PIN_ETH_TXM, "ETH_TXM"),
+ PINCTRL_PIN(PIN_ETH_RXP, "ETH_RXP"),
+ PINCTRL_PIN(PIN_ETH_RXM, "ETH_RXM"),
+ PINCTRL_PIN(PIN_GPIO_RTX, "GPIO_RTX"),
+ PINCTRL_PIN(PIN_MIPIRX4N, "MIPIRX4N"),
+ PINCTRL_PIN(PIN_MIPIRX4P, "MIPIRX4P"),
+ PINCTRL_PIN(PIN_MIPIRX3N, "MIPIRX3N"),
+ PINCTRL_PIN(PIN_MIPIRX3P, "MIPIRX3P"),
+ PINCTRL_PIN(PIN_MIPIRX2N, "MIPIRX2N"),
+ PINCTRL_PIN(PIN_MIPIRX2P, "MIPIRX2P"),
+ PINCTRL_PIN(PIN_MIPIRX1N, "MIPIRX1N"),
+ PINCTRL_PIN(PIN_MIPIRX1P, "MIPIRX1P"),
+ PINCTRL_PIN(PIN_MIPIRX0N, "MIPIRX0N"),
+ PINCTRL_PIN(PIN_MIPIRX0P, "MIPIRX0P"),
+ PINCTRL_PIN(PIN_MIPI_TXM2, "MIPI_TXM2"),
+ PINCTRL_PIN(PIN_MIPI_TXP2, "MIPI_TXP2"),
+ PINCTRL_PIN(PIN_MIPI_TXM1, "MIPI_TXM1"),
+ PINCTRL_PIN(PIN_MIPI_TXP1, "MIPI_TXP1"),
+ PINCTRL_PIN(PIN_MIPI_TXM0, "MIPI_TXM0"),
+ PINCTRL_PIN(PIN_MIPI_TXP0, "MIPI_TXP0"),
+};
+
+static const struct cv1800_pin sg2002_pin_data[ARRAY_SIZE(sg2002_pins)] = {
+ CV1800_FUNC_PIN(PIN_AUD_AINL_MIC, VDD18A_MIPI,
+ IO_TYPE_AUDIO,
+ CV1800_PINCONF_AREA_SYS, 0x1bc, 5),
+ CV1800_FUNC_PIN(PIN_AUD_AOUTR, VDD18A_MIPI,
+ IO_TYPE_AUDIO,
+ CV1800_PINCONF_AREA_SYS, 0x1c8, 6),
+ CV1800_GENERAL_PIN(PIN_SD0_CLK, VDDIO_SD0_EMMC,
+ IO_TYPE_1V8_OR_3V3,
+ CV1800_PINCONF_AREA_SYS, 0x01c, 7,
+ CV1800_PINCONF_AREA_SYS, 0xa00),
+ CV1800_GENERAL_PIN(PIN_SD0_CMD, VDDIO_SD0_EMMC,
+ IO_TYPE_1V8_OR_3V3,
+ CV1800_PINCONF_AREA_SYS, 0x020, 7,
+ CV1800_PINCONF_AREA_SYS, 0xa04),
+ CV1800_GENERAL_PIN(PIN_SD0_D0, VDDIO_SD0_EMMC,
+ IO_TYPE_1V8_OR_3V3,
+ CV1800_PINCONF_AREA_SYS, 0x024, 7,
+ CV1800_PINCONF_AREA_SYS, 0xa08),
+ CV1800_GENERAL_PIN(PIN_SD0_D1, VDDIO_SD0_EMMC,
+ IO_TYPE_1V8_OR_3V3,
+ CV1800_PINCONF_AREA_SYS, 0x028, 7,
+ CV1800_PINCONF_AREA_SYS, 0xa0c),
+ CV1800_GENERAL_PIN(PIN_SD0_D2, VDDIO_SD0_EMMC,
+ IO_TYPE_1V8_OR_3V3,
+ CV1800_PINCONF_AREA_SYS, 0x02c, 7,
+ CV1800_PINCONF_AREA_SYS, 0xa10),
+ CV1800_GENERAL_PIN(PIN_SD0_D3, VDDIO_SD0_EMMC,
+ IO_TYPE_1V8_OR_3V3,
+ CV1800_PINCONF_AREA_SYS, 0x030, 7,
+ CV1800_PINCONF_AREA_SYS, 0xa14),
+ CV1800_GENERAL_PIN(PIN_SD0_CD, VDDIO_SD0_EMMC,
+ IO_TYPE_1V8_OR_3V3,
+ CV1800_PINCONF_AREA_SYS, 0x034, 3,
+ CV1800_PINCONF_AREA_SYS, 0x900),
+ CV1800_GENERAL_PIN(PIN_SD0_PWR_EN, VDDIO_SD0_EMMC,
+ IO_TYPE_1V8_OR_3V3,
+ CV1800_PINCONF_AREA_SYS, 0x038, 3,
+ CV1800_PINCONF_AREA_SYS, 0x904),
+ CV1800_GENERAL_PIN(PIN_SPK_EN, VDDIO_SD0_EMMC,
+ IO_TYPE_1V8_OR_3V3,
+ CV1800_PINCONF_AREA_SYS, 0x03c, 3,
+ CV1800_PINCONF_AREA_SYS, 0x908),
+ CV1800_GENERAL_PIN(PIN_UART0_TX, VDDIO_SD0_EMMC,
+ IO_TYPE_1V8_OR_3V3,
+ CV1800_PINCONF_AREA_SYS, 0x040, 7,
+ CV1800_PINCONF_AREA_SYS, 0x90c),
+ CV1800_GENERAL_PIN(PIN_UART0_RX, VDDIO_SD0_EMMC,
+ IO_TYPE_1V8_OR_3V3,
+ CV1800_PINCONF_AREA_SYS, 0x044, 7,
+ CV1800_PINCONF_AREA_SYS, 0x910),
+ CV1800_GENERAL_PIN(PIN_EMMC_DAT2, VDDIO_SD0_EMMC,
+ IO_TYPE_1V8_OR_3V3,
+ CV1800_PINCONF_AREA_SYS, 0x04c, 3,
+ CV1800_PINCONF_AREA_SYS, 0x918),
+ CV1800_GENERAL_PIN(PIN_EMMC_CLK, VDDIO_SD0_EMMC,
+ IO_TYPE_1V8_OR_3V3,
+ CV1800_PINCONF_AREA_SYS, 0x050, 3,
+ CV1800_PINCONF_AREA_SYS, 0x91c),
+ CV1800_GENERAL_PIN(PIN_EMMC_DAT0, VDDIO_SD0_EMMC,
+ IO_TYPE_1V8_OR_3V3,
+ CV1800_PINCONF_AREA_SYS, 0x054, 3,
+ CV1800_PINCONF_AREA_SYS, 0x920),
+ CV1800_GENERAL_PIN(PIN_EMMC_DAT3, VDDIO_SD0_EMMC,
+ IO_TYPE_1V8_OR_3V3,
+ CV1800_PINCONF_AREA_SYS, 0x058, 3,
+ CV1800_PINCONF_AREA_SYS, 0x924),
+ CV1800_GENERAL_PIN(PIN_EMMC_CMD, VDDIO_SD0_EMMC,
+ IO_TYPE_1V8_OR_3V3,
+ CV1800_PINCONF_AREA_SYS, 0x05c, 3,
+ CV1800_PINCONF_AREA_SYS, 0x928),
+ CV1800_GENERAL_PIN(PIN_EMMC_DAT1, VDDIO_SD0_EMMC,
+ IO_TYPE_1V8_OR_3V3,
+ CV1800_PINCONF_AREA_SYS, 0x060, 3,
+ CV1800_PINCONF_AREA_SYS, 0x92c),
+ CV1800_GENERAL_PIN(PIN_JTAG_CPU_TMS, VDDIO_SD0_EMMC,
+ IO_TYPE_1V8_OR_3V3,
+ CV1800_PINCONF_AREA_SYS, 0x064, 7,
+ CV1800_PINCONF_AREA_SYS, 0x930),
+ CV1800_GENERAL_PIN(PIN_JTAG_CPU_TCK, VDDIO_SD0_EMMC,
+ IO_TYPE_1V8_OR_3V3,
+ CV1800_PINCONF_AREA_SYS, 0x068, 7,
+ CV1800_PINCONF_AREA_SYS, 0x934),
+ CV1800_GENERAL_PIN(PIN_IIC0_SCL, VDDIO_SD0_EMMC,
+ IO_TYPE_1V8_OR_3V3,
+ CV1800_PINCONF_AREA_SYS, 0x070, 7,
+ CV1800_PINCONF_AREA_SYS, 0x93c),
+ CV1800_GENERAL_PIN(PIN_IIC0_SDA, VDDIO_SD0_EMMC,
+ IO_TYPE_1V8_OR_3V3,
+ CV1800_PINCONF_AREA_SYS, 0x074, 7,
+ CV1800_PINCONF_AREA_SYS, 0x940),
+ CV1800_GENERAL_PIN(PIN_AUX0, VDDIO_SD0_EMMC,
+ IO_TYPE_1V8_OR_3V3,
+ CV1800_PINCONF_AREA_SYS, 0x078, 7,
+ CV1800_PINCONF_AREA_SYS, 0x944),
+ CV1800_GENERAL_PIN(PIN_GPIO_ZQ, VDDIO_RTC,
+ IO_TYPE_1V8_ONLY,
+ CV1800_PINCONF_AREA_SYS, 0x1d0, 4,
+ CV1800_PINCONF_AREA_RTC, 0x0e0),
+ CV1800_GENERAL_PIN(PIN_PWR_VBAT_DET, VDDIO_RTC,
+ IO_TYPE_1V8_ONLY,
+ CV1800_PINCONF_AREA_SYS, 0x07c, 0,
+ CV1800_PINCONF_AREA_RTC, 0x000),
+ CV1800_GENERAL_PIN(PIN_PWR_RSTN, VDDIO_RTC,
+ IO_TYPE_1V8_ONLY,
+ CV1800_PINCONF_AREA_SYS, 0x080, 0,
+ CV1800_PINCONF_AREA_RTC, 0x004),
+ CV1800_GENERAL_PIN(PIN_PWR_SEQ1, VDDIO_RTC,
+ IO_TYPE_1V8_ONLY,
+ CV1800_PINCONF_AREA_SYS, 0x084, 3,
+ CV1800_PINCONF_AREA_RTC, 0x008),
+ CV1800_GENERAL_PIN(PIN_PWR_SEQ2, VDDIO_RTC,
+ IO_TYPE_1V8_ONLY,
+ CV1800_PINCONF_AREA_SYS, 0x088, 3,
+ CV1800_PINCONF_AREA_RTC, 0x00c),
+ CV1800_GENERAL_PIN(PIN_PWR_WAKEUP0, VDDIO_RTC,
+ IO_TYPE_1V8_ONLY,
+ CV1800_PINCONF_AREA_SYS, 0x090, 7,
+ CV1800_PINCONF_AREA_RTC, 0x018),
+ CV1800_GENERAL_PIN(PIN_PWR_BUTTON1, VDDIO_RTC,
+ IO_TYPE_1V8_ONLY,
+ CV1800_PINCONF_AREA_SYS, 0x098, 7,
+ CV1800_PINCONF_AREA_RTC, 0x020),
+ CV1800_GENERAL_PIN(PIN_XTAL_XIN, VDDIO_RTC,
+ IO_TYPE_1V8_ONLY,
+ CV1800_PINCONF_AREA_SYS, 0x0a0, 0,
+ CV1800_PINCONF_AREA_RTC, 0x028),
+ CV1800_GENERAL_PIN(PIN_PWR_GPIO0, VDDIO_RTC,
+ IO_TYPE_1V8_ONLY,
+ CV1800_PINCONF_AREA_SYS, 0x0a4, 4,
+ CV1800_PINCONF_AREA_RTC, 0x02c),
+ CV1800_GENERAL_PIN(PIN_PWR_GPIO1, VDDIO_RTC,
+ IO_TYPE_1V8_ONLY,
+ CV1800_PINCONF_AREA_SYS, 0x0a8, 7,
+ CV1800_PINCONF_AREA_RTC, 0x030),
+ CV1800_GENERAL_PIN(PIN_PWR_GPIO2, VDDIO_RTC,
+ IO_TYPE_1V8_ONLY,
+ CV1800_PINCONF_AREA_SYS, 0x0ac, 7,
+ CV1800_PINCONF_AREA_RTC, 0x034),
+ CV1800_GENERAL_PIN(PIN_SD1_D3, VDDIO_SD1,
+ IO_TYPE_1V8_OR_3V3,
+ CV1800_PINCONF_AREA_SYS, 0x0d0, 7,
+ CV1800_PINCONF_AREA_RTC, 0x058),
+ CV1800_GENERAL_PIN(PIN_SD1_D2, VDDIO_SD1,
+ IO_TYPE_1V8_OR_3V3,
+ CV1800_PINCONF_AREA_SYS, 0x0d4, 7,
+ CV1800_PINCONF_AREA_RTC, 0x05c),
+ CV1800_GENERAL_PIN(PIN_SD1_D1, VDDIO_SD1,
+ IO_TYPE_1V8_OR_3V3,
+ CV1800_PINCONF_AREA_SYS, 0x0d8, 7,
+ CV1800_PINCONF_AREA_RTC, 0x060),
+ CV1800_GENERAL_PIN(PIN_SD1_D0, VDDIO_SD1,
+ IO_TYPE_1V8_OR_3V3,
+ CV1800_PINCONF_AREA_SYS, 0x0dc, 7,
+ CV1800_PINCONF_AREA_RTC, 0x064),
+ CV1800_GENERAL_PIN(PIN_SD1_CMD, VDDIO_SD1,
+ IO_TYPE_1V8_OR_3V3,
+ CV1800_PINCONF_AREA_SYS, 0x0e0, 7,
+ CV1800_PINCONF_AREA_RTC, 0x068),
+ CV1800_GENERAL_PIN(PIN_SD1_CLK, VDDIO_SD1,
+ IO_TYPE_1V8_OR_3V3,
+ CV1800_PINCONF_AREA_SYS, 0x0e4, 7,
+ CV1800_PINCONF_AREA_RTC, 0x06c),
+ CV1800_GENERAL_PIN(PIN_PWM0_BUCK, VDD18A_USB_PLL_ETH,
+ IO_TYPE_1V8_ONLY,
+ CV1800_PINCONF_AREA_SYS, 0x0ec, 3,
+ CV1800_PINCONF_AREA_SYS, 0x804),
+ CV1800_GENERAL_PIN(PIN_ADC1, VDD18A_USB_PLL_ETH,
+ IO_TYPE_1V8_ONLY,
+ CV1800_PINCONF_AREA_SYS, 0x0f8, 4,
+ CV1800_PINCONF_AREA_SYS, 0x810),
+ CV1800_GENERAL_PIN(PIN_USB_VBUS_DET, VDD18A_USB_PLL_ETH,
+ IO_TYPE_1V8_ONLY,
+ CV1800_PINCONF_AREA_SYS, 0x108, 5,
+ CV1800_PINCONF_AREA_SYS, 0x820),
+ CV1800_FUNC_PIN(PIN_ETH_TXP, VDD18A_USB_PLL_ETH,
+ IO_TYPE_ETH,
+ CV1800_PINCONF_AREA_SYS, 0x124, 7),
+ CV1800_FUNC_PIN(PIN_ETH_TXM, VDD18A_USB_PLL_ETH,
+ IO_TYPE_ETH,
+ CV1800_PINCONF_AREA_SYS, 0x128, 7),
+ CV1800_FUNC_PIN(PIN_ETH_RXP, VDD18A_USB_PLL_ETH,
+ IO_TYPE_ETH,
+ CV1800_PINCONF_AREA_SYS, 0x12c, 7),
+ CV1800_FUNC_PIN(PIN_ETH_RXM, VDD18A_USB_PLL_ETH,
+ IO_TYPE_ETH,
+ CV1800_PINCONF_AREA_SYS, 0x130, 7),
+ CV1800_GENERAL_PIN(PIN_GPIO_RTX, VDD18A_USB_PLL_ETH,
+ IO_TYPE_1V8_ONLY,
+ CV1800_PINCONF_AREA_SYS, 0x1cc, 5,
+ CV1800_PINCONF_AREA_SYS, 0xc8c),
+ CV1800_GENERATE_PIN_MUX2(PIN_MIPIRX4N, VDD18A_MIPI,
+ IO_TYPE_1V8_ONLY,
+ CV1800_PINCONF_AREA_SYS, 0x16c, 7,
+ CV1800_PINCONF_AREA_SYS, 0x120, 7,
+ CV1800_PINCONF_AREA_SYS, 0xc38),
+ CV1800_GENERATE_PIN_MUX2(PIN_MIPIRX4P, VDD18A_MIPI,
+ IO_TYPE_1V8_ONLY,
+ CV1800_PINCONF_AREA_SYS, 0x170, 7,
+ CV1800_PINCONF_AREA_SYS, 0x11c, 7,
+ CV1800_PINCONF_AREA_SYS, 0xc3c),
+ CV1800_GENERATE_PIN_MUX2(PIN_MIPIRX3N, VDD18A_MIPI,
+ IO_TYPE_1V8_ONLY,
+ CV1800_PINCONF_AREA_SYS, 0x174, 7,
+ CV1800_PINCONF_AREA_SYS, 0x114, 7,
+ CV1800_PINCONF_AREA_SYS, 0xc40),
+ CV1800_GENERATE_PIN_MUX2(PIN_MIPIRX3P, VDD18A_MIPI,
+ IO_TYPE_1V8_ONLY,
+ CV1800_PINCONF_AREA_SYS, 0x178, 7,
+ CV1800_PINCONF_AREA_SYS, 0x118, 7,
+ CV1800_PINCONF_AREA_SYS, 0xc44),
+ CV1800_GENERAL_PIN(PIN_MIPIRX2N, VDD18A_MIPI,
+ IO_TYPE_1V8_ONLY,
+ CV1800_PINCONF_AREA_SYS, 0x17c, 7,
+ CV1800_PINCONF_AREA_SYS, 0xc48),
+ CV1800_GENERAL_PIN(PIN_MIPIRX2P, VDD18A_MIPI,
+ IO_TYPE_1V8_ONLY,
+ CV1800_PINCONF_AREA_SYS, 0x180, 7,
+ CV1800_PINCONF_AREA_SYS, 0xc4c),
+ CV1800_GENERAL_PIN(PIN_MIPIRX1N, VDD18A_MIPI,
+ IO_TYPE_1V8_ONLY,
+ CV1800_PINCONF_AREA_SYS, 0x184, 7,
+ CV1800_PINCONF_AREA_SYS, 0xc50),
+ CV1800_GENERAL_PIN(PIN_MIPIRX1P, VDD18A_MIPI,
+ IO_TYPE_1V8_ONLY,
+ CV1800_PINCONF_AREA_SYS, 0x188, 7,
+ CV1800_PINCONF_AREA_SYS, 0xc54),
+ CV1800_GENERAL_PIN(PIN_MIPIRX0N, VDD18A_MIPI,
+ IO_TYPE_1V8_ONLY,
+ CV1800_PINCONF_AREA_SYS, 0x18c, 7,
+ CV1800_PINCONF_AREA_SYS, 0xc58),
+ CV1800_GENERAL_PIN(PIN_MIPIRX0P, VDD18A_MIPI,
+ IO_TYPE_1V8_ONLY,
+ CV1800_PINCONF_AREA_SYS, 0x190, 7,
+ CV1800_PINCONF_AREA_SYS, 0xc5c),
+ CV1800_GENERAL_PIN(PIN_MIPI_TXM2, VDD18A_MIPI,
+ IO_TYPE_1V8_ONLY,
+ CV1800_PINCONF_AREA_SYS, 0x1a4, 7,
+ CV1800_PINCONF_AREA_SYS, 0xc70),
+ CV1800_GENERAL_PIN(PIN_MIPI_TXP2, VDD18A_MIPI,
+ IO_TYPE_1V8_ONLY,
+ CV1800_PINCONF_AREA_SYS, 0x1a8, 7,
+ CV1800_PINCONF_AREA_SYS, 0xc74),
+ CV1800_GENERAL_PIN(PIN_MIPI_TXM1, VDD18A_MIPI,
+ IO_TYPE_1V8_ONLY,
+ CV1800_PINCONF_AREA_SYS, 0x1ac, 7,
+ CV1800_PINCONF_AREA_SYS, 0xc78),
+ CV1800_GENERAL_PIN(PIN_MIPI_TXP1, VDD18A_MIPI,
+ IO_TYPE_1V8_ONLY,
+ CV1800_PINCONF_AREA_SYS, 0x1b0, 7,
+ CV1800_PINCONF_AREA_SYS, 0xc7c),
+ CV1800_GENERAL_PIN(PIN_MIPI_TXM0, VDD18A_MIPI,
+ IO_TYPE_1V8_ONLY,
+ CV1800_PINCONF_AREA_SYS, 0x1b4, 7,
+ CV1800_PINCONF_AREA_SYS, 0xc80),
+ CV1800_GENERAL_PIN(PIN_MIPI_TXP0, VDD18A_MIPI,
+ IO_TYPE_1V8_ONLY,
+ CV1800_PINCONF_AREA_SYS, 0x1b8, 7,
+ CV1800_PINCONF_AREA_SYS, 0xc84),
+};
+
+static const struct cv1800_pinctrl_data sg2002_pindata = {
+ .pins = sg2002_pins,
+ .pindata = sg2002_pin_data,
+ .pdnames = sg2002_power_domain_desc,
+ .vddio_ops = &sg2002_vddio_cfg_ops,
+ .npins = ARRAY_SIZE(sg2002_pins),
+ .npd = ARRAY_SIZE(sg2002_power_domain_desc),
+};
+
+static const struct of_device_id sg2002_pinctrl_ids[] = {
+ { .compatible = "sophgo,sg2002-pinctrl", .data = &sg2002_pindata },
+ { }
+};
+MODULE_DEVICE_TABLE(of, sg2002_pinctrl_ids);
+
+static struct platform_driver sg2002_pinctrl_driver = {
+ .probe = cv1800_pinctrl_probe,
+ .driver = {
+ .name = "sg2002-pinctrl",
+ .suppress_bind_attrs = true,
+ .of_match_table = sg2002_pinctrl_ids,
+ },
+};
+module_platform_driver(sg2002_pinctrl_driver);
+
+MODULE_DESCRIPTION("Pinctrl driver for the SG2002 series SoC");
+MODULE_LICENSE("GPL");
diff --git a/drivers/pinctrl/sunxi/pinctrl-sunxi.c b/drivers/pinctrl/sunxi/pinctrl-sunxi.c
index 73bcf806af0e..bde67ee31417 100644
--- a/drivers/pinctrl/sunxi/pinctrl-sunxi.c
+++ b/drivers/pinctrl/sunxi/pinctrl-sunxi.c
@@ -1603,30 +1603,26 @@ int sunxi_pinctrl_init_with_variant(struct platform_device *pdev,
}
ret = of_clk_get_parent_count(node);
- clk = devm_clk_get(&pdev->dev, ret == 1 ? NULL : "apb");
+ clk = devm_clk_get_enabled(&pdev->dev, ret == 1 ? NULL : "apb");
if (IS_ERR(clk)) {
ret = PTR_ERR(clk);
goto gpiochip_error;
}
- ret = clk_prepare_enable(clk);
- if (ret)
- goto gpiochip_error;
-
pctl->irq = devm_kcalloc(&pdev->dev,
pctl->desc->irq_banks,
sizeof(*pctl->irq),
GFP_KERNEL);
if (!pctl->irq) {
ret = -ENOMEM;
- goto clk_error;
+ goto gpiochip_error;
}
for (i = 0; i < pctl->desc->irq_banks; i++) {
pctl->irq[i] = platform_get_irq(pdev, i);
if (pctl->irq[i] < 0) {
ret = pctl->irq[i];
- goto clk_error;
+ goto gpiochip_error;
}
}
@@ -1637,7 +1633,7 @@ int sunxi_pinctrl_init_with_variant(struct platform_device *pdev,
if (!pctl->domain) {
dev_err(&pdev->dev, "Couldn't register IRQ domain\n");
ret = -ENOMEM;
- goto clk_error;
+ goto gpiochip_error;
}
for (i = 0; i < (pctl->desc->irq_banks * IRQ_PER_BANK); i++) {
@@ -1669,8 +1665,6 @@ int sunxi_pinctrl_init_with_variant(struct platform_device *pdev,
return 0;
-clk_error:
- clk_disable_unprepare(clk);
gpiochip_error:
gpiochip_remove(pctl->chip);
return ret;
diff --git a/drivers/pinctrl/ti/pinctrl-ti-iodelay.c b/drivers/pinctrl/ti/pinctrl-ti-iodelay.c
index f5e5a23d2226..019b302db2b0 100644
--- a/drivers/pinctrl/ti/pinctrl-ti-iodelay.c
+++ b/drivers/pinctrl/ti/pinctrl-ti-iodelay.c
@@ -82,7 +82,7 @@ struct ti_iodelay_reg_data {
u32 reg_start_offset;
u32 reg_nr_per_pin;
- struct regmap_config *regmap_config;
+ const struct regmap_config *regmap_config;
};
/**
@@ -274,6 +274,22 @@ static int ti_iodelay_pinconf_set(struct ti_iodelay_device *iod,
}
/**
+ * ti_iodelay_pinconf_deinit_dev() - deinit the iodelay device
+ * @data: IODelay device
+ *
+ * Deinitialize the IODelay device (basically just lock the region back up.
+ */
+static void ti_iodelay_pinconf_deinit_dev(void *data)
+{
+ struct ti_iodelay_device *iod = data;
+ const struct ti_iodelay_reg_data *reg = iod->reg_data;
+
+ /* lock the iodelay region back again */
+ regmap_update_bits(iod->regmap, reg->reg_global_lock_offset,
+ reg->global_lock_mask, reg->global_lock_val);
+}
+
+/**
* ti_iodelay_pinconf_init_dev() - Initialize IODelay device
* @iod: iodelay device
*
@@ -295,6 +311,11 @@ static int ti_iodelay_pinconf_init_dev(struct ti_iodelay_device *iod)
if (r)
return r;
+ r = devm_add_action_or_reset(iod->dev, ti_iodelay_pinconf_deinit_dev,
+ iod);
+ if (r)
+ return r;
+
/* Read up Recalibration sequence done by bootloader */
r = regmap_read(iod->regmap, reg->reg_refclk_offset, &val);
if (r)
@@ -354,21 +375,6 @@ static int ti_iodelay_pinconf_init_dev(struct ti_iodelay_device *iod)
}
/**
- * ti_iodelay_pinconf_deinit_dev() - deinit the iodelay device
- * @iod: IODelay device
- *
- * Deinitialize the IODelay device (basically just lock the region back up.
- */
-static void ti_iodelay_pinconf_deinit_dev(struct ti_iodelay_device *iod)
-{
- const struct ti_iodelay_reg_data *reg = iod->reg_data;
-
- /* lock the iodelay region back again */
- regmap_update_bits(iod->regmap, reg->reg_global_lock_offset,
- reg->global_lock_mask, reg->global_lock_val);
-}
-
-/**
* ti_iodelay_get_pingroup() - Find the group mapped by a group selector
* @iod: iodelay device
* @selector: Group Selector
@@ -770,14 +776,14 @@ static int ti_iodelay_alloc_pins(struct device *dev,
return 0;
}
-static struct regmap_config dra7_iodelay_regmap_config = {
+static const struct regmap_config dra7_iodelay_regmap_config = {
.reg_bits = 32,
.reg_stride = 4,
.val_bits = 32,
.max_register = 0xd1c,
};
-static struct ti_iodelay_reg_data dra7_iodelay_data = {
+static const struct ti_iodelay_reg_data dra7_iodelay_data = {
.signature_mask = 0x0003f000,
.signature_value = 0x29,
.lock_mask = 0x00000400,
@@ -877,27 +883,11 @@ static int ti_iodelay_probe(struct platform_device *pdev)
return ret;
}
- platform_set_drvdata(pdev, iod);
-
return pinctrl_enable(iod->pctl);
}
-/**
- * ti_iodelay_remove() - standard remove
- * @pdev: platform device
- */
-static void ti_iodelay_remove(struct platform_device *pdev)
-{
- struct ti_iodelay_device *iod = platform_get_drvdata(pdev);
-
- ti_iodelay_pinconf_deinit_dev(iod);
-
- /* Expect other allocations to be freed by devm */
-}
-
static struct platform_driver ti_iodelay_driver = {
.probe = ti_iodelay_probe,
- .remove_new = ti_iodelay_remove,
.driver = {
.name = DRIVER_NAME,
.of_match_table = ti_iodelay_of_match,
diff --git a/drivers/platform/chrome/cros_ec_debugfs.c b/drivers/platform/chrome/cros_ec_debugfs.c
index 4525ad1b59f4..839154c46e46 100644
--- a/drivers/platform/chrome/cros_ec_debugfs.c
+++ b/drivers/platform/chrome/cros_ec_debugfs.c
@@ -302,7 +302,6 @@ static const struct file_operations cros_ec_console_log_fops = {
.owner = THIS_MODULE,
.open = cros_ec_console_log_open,
.read = cros_ec_console_log_read,
- .llseek = no_llseek,
.poll = cros_ec_console_log_poll,
.release = cros_ec_console_log_release,
};
diff --git a/drivers/platform/chrome/wilco_ec/debugfs.c b/drivers/platform/chrome/wilco_ec/debugfs.c
index 983f2fa44ba5..99486086af6a 100644
--- a/drivers/platform/chrome/wilco_ec/debugfs.c
+++ b/drivers/platform/chrome/wilco_ec/debugfs.c
@@ -156,7 +156,6 @@ static const struct file_operations fops_raw = {
.owner = THIS_MODULE,
.read = raw_read,
.write = raw_write,
- .llseek = no_llseek,
};
#define CMD_KB_CHROME 0x88
diff --git a/drivers/platform/chrome/wilco_ec/event.c b/drivers/platform/chrome/wilco_ec/event.c
index bd1fb53ba028..196e46a1d489 100644
--- a/drivers/platform/chrome/wilco_ec/event.c
+++ b/drivers/platform/chrome/wilco_ec/event.c
@@ -403,7 +403,6 @@ static const struct file_operations event_fops = {
.poll = event_poll,
.read = event_read,
.release = event_release,
- .llseek = no_llseek,
.owner = THIS_MODULE,
};
diff --git a/drivers/platform/chrome/wilco_ec/telemetry.c b/drivers/platform/chrome/wilco_ec/telemetry.c
index 21d4cbbb009a..a87877e4300a 100644
--- a/drivers/platform/chrome/wilco_ec/telemetry.c
+++ b/drivers/platform/chrome/wilco_ec/telemetry.c
@@ -330,7 +330,6 @@ static const struct file_operations telem_fops = {
.write = telem_write,
.read = telem_read,
.release = telem_release,
- .llseek = no_llseek,
.owner = THIS_MODULE,
};
diff --git a/drivers/platform/surface/surface_aggregator_cdev.c b/drivers/platform/surface/surface_aggregator_cdev.c
index 07e065b9159f..165b1416230d 100644
--- a/drivers/platform/surface/surface_aggregator_cdev.c
+++ b/drivers/platform/surface/surface_aggregator_cdev.c
@@ -670,7 +670,6 @@ static const struct file_operations ssam_controller_fops = {
.fasync = ssam_cdev_fasync,
.unlocked_ioctl = ssam_cdev_device_ioctl,
.compat_ioctl = ssam_cdev_device_ioctl,
- .llseek = no_llseek,
};
diff --git a/drivers/platform/surface/surface_dtx.c b/drivers/platform/surface/surface_dtx.c
index 2de843b7ea70..89ca6b50e812 100644
--- a/drivers/platform/surface/surface_dtx.c
+++ b/drivers/platform/surface/surface_dtx.c
@@ -555,7 +555,6 @@ static const struct file_operations surface_dtx_fops = {
.fasync = surface_dtx_fasync,
.unlocked_ioctl = surface_dtx_ioctl,
.compat_ioctl = surface_dtx_ioctl,
- .llseek = no_llseek,
};
diff --git a/drivers/power/reset/Kconfig b/drivers/power/reset/Kconfig
index fece990af4a7..389d5a193e5d 100644
--- a/drivers/power/reset/Kconfig
+++ b/drivers/power/reset/Kconfig
@@ -75,6 +75,16 @@ config POWER_RESET_BRCMSTB
Say Y here if you have a Broadcom STB board and you wish
to have restart support.
+config POWER_RESET_EP93XX
+ bool "Cirrus EP93XX reset driver" if COMPILE_TEST
+ depends on MFD_SYSCON
+ default ARCH_EP93XX
+ help
+ This driver provides restart support for Cirrus EP93XX SoC.
+
+ Say Y here if you have a Cirrus EP93XX SoC and you wish
+ to have restart support.
+
config POWER_RESET_GEMINI_POWEROFF
bool "Cortina Gemini power-off driver"
depends on ARCH_GEMINI || COMPILE_TEST
diff --git a/drivers/power/reset/Makefile b/drivers/power/reset/Makefile
index a95d1bd275d1..10782d32e1da 100644
--- a/drivers/power/reset/Makefile
+++ b/drivers/power/reset/Makefile
@@ -7,6 +7,7 @@ obj-$(CONFIG_POWER_RESET_ATC260X) += atc260x-poweroff.o
obj-$(CONFIG_POWER_RESET_AXXIA) += axxia-reset.o
obj-$(CONFIG_POWER_RESET_BRCMKONA) += brcm-kona-reset.o
obj-$(CONFIG_POWER_RESET_BRCMSTB) += brcmstb-reboot.o
+obj-$(CONFIG_POWER_RESET_EP93XX) += ep93xx-restart.o
obj-$(CONFIG_POWER_RESET_GEMINI_POWEROFF) += gemini-poweroff.o
obj-$(CONFIG_POWER_RESET_GPIO) += gpio-poweroff.o
obj-$(CONFIG_POWER_RESET_GPIO_RESTART) += gpio-restart.o
diff --git a/drivers/power/reset/ep93xx-restart.c b/drivers/power/reset/ep93xx-restart.c
new file mode 100644
index 000000000000..57cfb8620faf
--- /dev/null
+++ b/drivers/power/reset/ep93xx-restart.c
@@ -0,0 +1,84 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * Cirrus EP93xx SoC reset driver
+ *
+ * Copyright (C) 2021 Nikita Shubin <nikita.shubin@maquefel.me>
+ */
+
+#include <linux/bits.h>
+#include <linux/container_of.h>
+#include <linux/delay.h>
+#include <linux/errno.h>
+#include <linux/mfd/syscon.h>
+#include <linux/module.h>
+#include <linux/mod_devicetable.h>
+#include <linux/notifier.h>
+#include <linux/reboot.h>
+#include <linux/slab.h>
+
+#include <linux/soc/cirrus/ep93xx.h>
+
+#define EP93XX_SYSCON_DEVCFG 0x80
+#define EP93XX_SYSCON_DEVCFG_SWRST BIT(31)
+
+struct ep93xx_restart {
+ struct ep93xx_regmap_adev *aux_dev;
+ struct notifier_block restart_handler;
+};
+
+static int ep93xx_restart_handle(struct notifier_block *this,
+ unsigned long mode, void *cmd)
+{
+ struct ep93xx_restart *priv =
+ container_of(this, struct ep93xx_restart, restart_handler);
+ struct ep93xx_regmap_adev *aux = priv->aux_dev;
+
+ /* Issue the reboot */
+ aux->update_bits(aux->map, aux->lock, EP93XX_SYSCON_DEVCFG,
+ EP93XX_SYSCON_DEVCFG_SWRST, EP93XX_SYSCON_DEVCFG_SWRST);
+ aux->update_bits(aux->map, aux->lock, EP93XX_SYSCON_DEVCFG,
+ EP93XX_SYSCON_DEVCFG_SWRST, 0);
+
+ return NOTIFY_DONE;
+}
+
+static int ep93xx_reboot_probe(struct auxiliary_device *adev,
+ const struct auxiliary_device_id *id)
+{
+ struct ep93xx_regmap_adev *rdev = to_ep93xx_regmap_adev(adev);
+ struct device *dev = &adev->dev;
+ struct ep93xx_restart *priv;
+ int err;
+
+ if (!rdev->update_bits)
+ return -ENODEV;
+
+ priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+
+ priv->aux_dev = rdev;
+
+ priv->restart_handler.notifier_call = ep93xx_restart_handle;
+ priv->restart_handler.priority = 128;
+
+ err = register_restart_handler(&priv->restart_handler);
+ if (err)
+ return dev_err_probe(dev, err, "can't register restart notifier\n");
+
+ return 0;
+}
+
+static const struct auxiliary_device_id ep93xx_reboot_ids[] = {
+ {
+ .name = "soc_ep93xx.reset-ep93xx",
+ },
+ { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(auxiliary, ep93xx_reboot_ids);
+
+static struct auxiliary_driver ep93xx_reboot_driver = {
+ .probe = ep93xx_reboot_probe,
+ .id_table = ep93xx_reboot_ids,
+};
+module_auxiliary_driver(ep93xx_reboot_driver);
diff --git a/drivers/pps/clients/pps_parport.c b/drivers/pps/clients/pps_parport.c
index 63d03a0df5cc..abaffb4e1c1c 100644
--- a/drivers/pps/clients/pps_parport.c
+++ b/drivers/pps/clients/pps_parport.c
@@ -149,6 +149,9 @@ static void parport_attach(struct parport *port)
}
index = ida_alloc(&pps_client_index, GFP_KERNEL);
+ if (index < 0)
+ goto err_free_device;
+
memset(&pps_client_cb, 0, sizeof(pps_client_cb));
pps_client_cb.private = device;
pps_client_cb.irq_func = parport_irq;
@@ -159,7 +162,7 @@ static void parport_attach(struct parport *port)
index);
if (!device->pardev) {
pr_err("couldn't register with %s\n", port->name);
- goto err_free;
+ goto err_free_ida;
}
if (parport_claim_or_block(device->pardev) < 0) {
@@ -187,8 +190,9 @@ err_release_dev:
parport_release(device->pardev);
err_unregister_dev:
parport_unregister_device(device->pardev);
-err_free:
+err_free_ida:
ida_free(&pps_client_index, index);
+err_free_device:
kfree(device);
}
diff --git a/drivers/pps/pps.c b/drivers/pps/pps.c
index 5d19baae6a38..25d47907db17 100644
--- a/drivers/pps/pps.c
+++ b/drivers/pps/pps.c
@@ -319,7 +319,6 @@ static int pps_cdev_release(struct inode *inode, struct file *file)
static const struct file_operations pps_cdev_fops = {
.owner = THIS_MODULE,
- .llseek = no_llseek,
.poll = pps_cdev_poll,
.fasync = pps_cdev_fasync,
.compat_ioctl = pps_cdev_compat_ioctl,
diff --git a/drivers/pwm/pwm-ep93xx.c b/drivers/pwm/pwm-ep93xx.c
index 666f2954133c..994f89ac43b4 100644
--- a/drivers/pwm/pwm-ep93xx.c
+++ b/drivers/pwm/pwm-ep93xx.c
@@ -17,6 +17,7 @@
*/
#include <linux/module.h>
+#include <linux/mod_devicetable.h>
#include <linux/platform_device.h>
#include <linux/slab.h>
#include <linux/clk.h>
@@ -26,8 +27,6 @@
#include <asm/div64.h>
-#include <linux/soc/cirrus/ep93xx.h> /* for ep93xx_pwm_{acquire,release}_gpio() */
-
#define EP93XX_PWMx_TERM_COUNT 0x00
#define EP93XX_PWMx_DUTY_CYCLE 0x04
#define EP93XX_PWMx_ENABLE 0x08
@@ -43,20 +42,6 @@ static inline struct ep93xx_pwm *to_ep93xx_pwm(struct pwm_chip *chip)
return pwmchip_get_drvdata(chip);
}
-static int ep93xx_pwm_request(struct pwm_chip *chip, struct pwm_device *pwm)
-{
- struct platform_device *pdev = to_platform_device(pwmchip_parent(chip));
-
- return ep93xx_pwm_acquire_gpio(pdev);
-}
-
-static void ep93xx_pwm_free(struct pwm_chip *chip, struct pwm_device *pwm)
-{
- struct platform_device *pdev = to_platform_device(pwmchip_parent(chip));
-
- ep93xx_pwm_release_gpio(pdev);
-}
-
static int ep93xx_pwm_apply(struct pwm_chip *chip, struct pwm_device *pwm,
const struct pwm_state *state)
{
@@ -155,8 +140,6 @@ static int ep93xx_pwm_apply(struct pwm_chip *chip, struct pwm_device *pwm,
}
static const struct pwm_ops ep93xx_pwm_ops = {
- .request = ep93xx_pwm_request,
- .free = ep93xx_pwm_free,
.apply = ep93xx_pwm_apply,
};
@@ -188,9 +171,16 @@ static int ep93xx_pwm_probe(struct platform_device *pdev)
return 0;
}
+static const struct of_device_id ep93xx_pwm_of_ids[] = {
+ { .compatible = "cirrus,ep9301-pwm" },
+ { /* sentinel */}
+};
+MODULE_DEVICE_TABLE(of, ep93xx_pwm_of_ids);
+
static struct platform_driver ep93xx_pwm_driver = {
.driver = {
.name = "ep93xx-pwm",
+ .of_match_table = ep93xx_pwm_of_ids,
},
.probe = ep93xx_pwm_probe,
};
diff --git a/drivers/remoteproc/Kconfig b/drivers/remoteproc/Kconfig
index dda2ada215b7..955e4e38477e 100644
--- a/drivers/remoteproc/Kconfig
+++ b/drivers/remoteproc/Kconfig
@@ -330,8 +330,7 @@ config STM32_RPROC
config TI_K3_DSP_REMOTEPROC
tristate "TI K3 DSP remoteproc support"
depends on ARCH_K3
- select MAILBOX
- select OMAP2PLUS_MBOX
+ depends on OMAP2PLUS_MBOX
help
Say m here to support TI's C66x and C71x DSP remote processor
subsystems on various TI K3 family of SoCs through the remote
@@ -340,11 +339,23 @@ config TI_K3_DSP_REMOTEPROC
It's safe to say N here if you're not interested in utilizing
the DSP slave processors.
+config TI_K3_M4_REMOTEPROC
+ tristate "TI K3 M4 remoteproc support"
+ depends on ARCH_OMAP2PLUS || ARCH_K3
+ select MAILBOX
+ select OMAP2PLUS_MBOX
+ help
+ Say m here to support TI's M4 remote processor subsystems
+ on various TI K3 family of SoCs through the remote processor
+ framework.
+
+ It's safe to say N here if you're not interested in utilizing
+ a remote processor.
+
config TI_K3_R5_REMOTEPROC
tristate "TI K3 R5 remoteproc support"
depends on ARCH_K3
- select MAILBOX
- select OMAP2PLUS_MBOX
+ depends on OMAP2PLUS_MBOX
help
Say m here to support TI's R5F remote processor subsystems
on various TI K3 family of SoCs through the remote processor
diff --git a/drivers/remoteproc/Makefile b/drivers/remoteproc/Makefile
index 91314a9b43ce..5ff4e2fee4ab 100644
--- a/drivers/remoteproc/Makefile
+++ b/drivers/remoteproc/Makefile
@@ -37,5 +37,6 @@ obj-$(CONFIG_ST_REMOTEPROC) += st_remoteproc.o
obj-$(CONFIG_ST_SLIM_REMOTEPROC) += st_slim_rproc.o
obj-$(CONFIG_STM32_RPROC) += stm32_rproc.o
obj-$(CONFIG_TI_K3_DSP_REMOTEPROC) += ti_k3_dsp_remoteproc.o
+obj-$(CONFIG_TI_K3_M4_REMOTEPROC) += ti_k3_m4_remoteproc.o
obj-$(CONFIG_TI_K3_R5_REMOTEPROC) += ti_k3_r5_remoteproc.o
obj-$(CONFIG_XLNX_R5_REMOTEPROC) += xlnx_r5_remoteproc.o
diff --git a/drivers/remoteproc/da8xx_remoteproc.c b/drivers/remoteproc/da8xx_remoteproc.c
index 9041a0e07fb2..8770d0cf1255 100644
--- a/drivers/remoteproc/da8xx_remoteproc.c
+++ b/drivers/remoteproc/da8xx_remoteproc.c
@@ -239,8 +239,6 @@ static int da8xx_rproc_probe(struct platform_device *pdev)
struct da8xx_rproc *drproc;
struct rproc *rproc;
struct irq_data *irq_data;
- struct resource *bootreg_res;
- struct resource *chipsig_res;
struct clk *dsp_clk;
struct reset_control *dsp_reset;
void __iomem *chipsig;
@@ -258,15 +256,11 @@ static int da8xx_rproc_probe(struct platform_device *pdev)
return -EINVAL;
}
- bootreg_res = platform_get_resource_byname(pdev, IORESOURCE_MEM,
- "host1cfg");
- bootreg = devm_ioremap_resource(dev, bootreg_res);
+ bootreg = devm_platform_ioremap_resource_byname(pdev, "host1cfg");
if (IS_ERR(bootreg))
return PTR_ERR(bootreg);
- chipsig_res = platform_get_resource_byname(pdev, IORESOURCE_MEM,
- "chipsig");
- chipsig = devm_ioremap_resource(dev, chipsig_res);
+ chipsig = devm_platform_ioremap_resource_byname(pdev, "chipsig");
if (IS_ERR(chipsig))
return PTR_ERR(chipsig);
diff --git a/drivers/remoteproc/imx_dsp_rproc.c b/drivers/remoteproc/imx_dsp_rproc.c
index 087506e21508..376187ad5754 100644
--- a/drivers/remoteproc/imx_dsp_rproc.c
+++ b/drivers/remoteproc/imx_dsp_rproc.c
@@ -509,7 +509,7 @@ static int imx_dsp_rproc_mbox_alloc(struct imx_dsp_rproc *priv)
struct mbox_client *cl;
int ret;
- if (!of_get_property(dev->of_node, "mbox-names", NULL))
+ if (!of_property_present(dev->of_node, "mbox-names"))
return 0;
cl = &priv->cl;
diff --git a/drivers/remoteproc/imx_rproc.c b/drivers/remoteproc/imx_rproc.c
index 144c8e9a642e..800015ff7ff9 100644
--- a/drivers/remoteproc/imx_rproc.c
+++ b/drivers/remoteproc/imx_rproc.c
@@ -18,6 +18,7 @@
#include <linux/of_reserved_mem.h>
#include <linux/platform_device.h>
#include <linux/pm_domain.h>
+#include <linux/reboot.h>
#include <linux/regmap.h>
#include <linux/remoteproc.h>
#include <linux/workqueue.h>
@@ -90,7 +91,7 @@ struct imx_rproc_mem {
#define ATT_CORE_MASK 0xffff
#define ATT_CORE(I) BIT((I))
-static int imx_rproc_xtr_mbox_init(struct rproc *rproc);
+static int imx_rproc_xtr_mbox_init(struct rproc *rproc, bool tx_block);
static void imx_rproc_free_mbox(struct rproc *rproc);
struct imx_rproc {
@@ -119,20 +120,16 @@ struct imx_rproc {
static const struct imx_rproc_att imx_rproc_att_imx93[] = {
/* dev addr , sys addr , size , flags */
/* TCM CODE NON-SECURE */
- { 0x0FFC0000, 0x201C0000, 0x00020000, ATT_OWN | ATT_IOMEM },
- { 0x0FFE0000, 0x201E0000, 0x00020000, ATT_OWN | ATT_IOMEM },
+ { 0x0FFC0000, 0x201C0000, 0x00040000, ATT_OWN | ATT_IOMEM },
/* TCM CODE SECURE */
- { 0x1FFC0000, 0x201C0000, 0x00020000, ATT_OWN | ATT_IOMEM },
- { 0x1FFE0000, 0x201E0000, 0x00020000, ATT_OWN | ATT_IOMEM },
+ { 0x1FFC0000, 0x201C0000, 0x00040000, ATT_OWN | ATT_IOMEM },
/* TCM SYS NON-SECURE*/
- { 0x20000000, 0x20200000, 0x00020000, ATT_OWN | ATT_IOMEM },
- { 0x20020000, 0x20220000, 0x00020000, ATT_OWN | ATT_IOMEM },
+ { 0x20000000, 0x20200000, 0x00040000, ATT_OWN | ATT_IOMEM },
/* TCM SYS SECURE*/
- { 0x30000000, 0x20200000, 0x00020000, ATT_OWN | ATT_IOMEM },
- { 0x30020000, 0x20220000, 0x00020000, ATT_OWN | ATT_IOMEM },
+ { 0x30000000, 0x20200000, 0x00040000, ATT_OWN | ATT_IOMEM },
/* DDR */
{ 0x80000000, 0x80000000, 0x10000000, 0 },
@@ -210,11 +207,9 @@ static const struct imx_rproc_att imx_rproc_att_imx8mq[] = {
/* QSPI Code - alias */
{ 0x08000000, 0x08000000, 0x08000000, 0 },
/* DDR (Code) - alias */
- { 0x10000000, 0x80000000, 0x0FFE0000, 0 },
- /* TCML */
- { 0x1FFE0000, 0x007E0000, 0x00020000, ATT_OWN | ATT_IOMEM},
- /* TCMU */
- { 0x20000000, 0x00800000, 0x00020000, ATT_OWN | ATT_IOMEM},
+ { 0x10000000, 0x40000000, 0x0FFE0000, 0 },
+ /* TCML/U */
+ { 0x1FFE0000, 0x007E0000, 0x00040000, ATT_OWN | ATT_IOMEM},
/* OCRAM_S */
{ 0x20180000, 0x00180000, 0x00008000, ATT_OWN },
/* OCRAM */
@@ -339,6 +334,7 @@ static const struct imx_rproc_dcfg imx_rproc_cfg_imx7ulp = {
.att = imx_rproc_att_imx7ulp,
.att_size = ARRAY_SIZE(imx_rproc_att_imx7ulp),
.method = IMX_RPROC_NONE,
+ .flags = IMX_RPROC_NEED_SYSTEM_OFF,
};
static const struct imx_rproc_dcfg imx_rproc_cfg_imx7d = {
@@ -375,7 +371,7 @@ static int imx_rproc_start(struct rproc *rproc)
struct arm_smccc_res res;
int ret;
- ret = imx_rproc_xtr_mbox_init(rproc);
+ ret = imx_rproc_xtr_mbox_init(rproc, true);
if (ret)
return ret;
@@ -635,7 +631,7 @@ static void imx_rproc_kick(struct rproc *rproc, int vqid)
static int imx_rproc_attach(struct rproc *rproc)
{
- return imx_rproc_xtr_mbox_init(rproc);
+ return imx_rproc_xtr_mbox_init(rproc, true);
}
static int imx_rproc_detach(struct rproc *rproc)
@@ -666,6 +662,17 @@ static struct resource_table *imx_rproc_get_loaded_rsc_table(struct rproc *rproc
return (struct resource_table *)priv->rsc_table;
}
+static struct resource_table *
+imx_rproc_elf_find_loaded_rsc_table(struct rproc *rproc, const struct firmware *fw)
+{
+ struct imx_rproc *priv = rproc->priv;
+
+ if (priv->rsc_table)
+ return (struct resource_table *)priv->rsc_table;
+
+ return rproc_elf_find_loaded_rsc_table(rproc, fw);
+}
+
static const struct rproc_ops imx_rproc_ops = {
.prepare = imx_rproc_prepare,
.attach = imx_rproc_attach,
@@ -676,7 +683,7 @@ static const struct rproc_ops imx_rproc_ops = {
.da_to_va = imx_rproc_da_to_va,
.load = rproc_elf_load_segments,
.parse_fw = imx_rproc_parse_fw,
- .find_loaded_rsc_table = rproc_elf_find_loaded_rsc_table,
+ .find_loaded_rsc_table = imx_rproc_elf_find_loaded_rsc_table,
.get_loaded_rsc_table = imx_rproc_get_loaded_rsc_table,
.sanity_check = rproc_elf_sanity_check,
.get_boot_addr = rproc_elf_get_boot_addr,
@@ -789,7 +796,7 @@ static void imx_rproc_rx_callback(struct mbox_client *cl, void *msg)
queue_work(priv->workqueue, &priv->rproc_work);
}
-static int imx_rproc_xtr_mbox_init(struct rproc *rproc)
+static int imx_rproc_xtr_mbox_init(struct rproc *rproc, bool tx_block)
{
struct imx_rproc *priv = rproc->priv;
struct device *dev = priv->dev;
@@ -807,12 +814,12 @@ static int imx_rproc_xtr_mbox_init(struct rproc *rproc)
if (priv->tx_ch && priv->rx_ch)
return 0;
- if (!of_get_property(dev->of_node, "mbox-names", NULL))
+ if (!of_property_present(dev->of_node, "mbox-names"))
return 0;
cl = &priv->cl;
cl->dev = dev;
- cl->tx_block = true;
+ cl->tx_block = tx_block;
cl->tx_tout = 100;
cl->knows_txdone = false;
cl->rx_callback = imx_rproc_rx_callback;
@@ -1045,6 +1052,22 @@ static int imx_rproc_clk_enable(struct imx_rproc *priv)
return 0;
}
+static int imx_rproc_sys_off_handler(struct sys_off_data *data)
+{
+ struct rproc *rproc = data->cb_data;
+ int ret;
+
+ imx_rproc_free_mbox(rproc);
+
+ ret = imx_rproc_xtr_mbox_init(rproc, false);
+ if (ret) {
+ dev_err(&rproc->dev, "Failed to request non-blocking mbox\n");
+ return NOTIFY_BAD;
+ }
+
+ return NOTIFY_DONE;
+}
+
static int imx_rproc_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
@@ -1076,7 +1099,9 @@ static int imx_rproc_probe(struct platform_device *pdev)
return -ENOMEM;
}
- ret = imx_rproc_xtr_mbox_init(rproc);
+ INIT_WORK(&priv->rproc_work, imx_rproc_vq_work);
+
+ ret = imx_rproc_xtr_mbox_init(rproc, true);
if (ret)
goto err_put_wkq;
@@ -1094,11 +1119,33 @@ static int imx_rproc_probe(struct platform_device *pdev)
if (ret)
goto err_put_scu;
- INIT_WORK(&priv->rproc_work, imx_rproc_vq_work);
-
if (rproc->state != RPROC_DETACHED)
rproc->auto_boot = of_property_read_bool(np, "fsl,auto-boot");
+ if (dcfg->flags & IMX_RPROC_NEED_SYSTEM_OFF) {
+ /*
+ * setup mailbox to non-blocking mode in
+ * [SYS_OFF_MODE_POWER_OFF_PREPARE, SYS_OFF_MODE_RESTART_PREPARE]
+ * phase before invoking [SYS_OFF_MODE_POWER_OFF, SYS_OFF_MODE_RESTART]
+ * atomic chain, see kernel/reboot.c.
+ */
+ ret = devm_register_sys_off_handler(dev, SYS_OFF_MODE_POWER_OFF_PREPARE,
+ SYS_OFF_PRIO_DEFAULT,
+ imx_rproc_sys_off_handler, rproc);
+ if (ret) {
+ dev_err(dev, "register power off handler failure\n");
+ goto err_put_clk;
+ }
+
+ ret = devm_register_sys_off_handler(dev, SYS_OFF_MODE_RESTART_PREPARE,
+ SYS_OFF_PRIO_DEFAULT,
+ imx_rproc_sys_off_handler, rproc);
+ if (ret) {
+ dev_err(dev, "register restart handler failure\n");
+ goto err_put_clk;
+ }
+ }
+
ret = rproc_add(rproc);
if (ret) {
dev_err(dev, "rproc_add failed\n");
diff --git a/drivers/remoteproc/imx_rproc.h b/drivers/remoteproc/imx_rproc.h
index 79a1b8956d14..17a7d051c531 100644
--- a/drivers/remoteproc/imx_rproc.h
+++ b/drivers/remoteproc/imx_rproc.h
@@ -26,6 +26,9 @@ enum imx_rproc_method {
IMX_RPROC_SCU_API,
};
+/* dcfg flags */
+#define IMX_RPROC_NEED_SYSTEM_OFF BIT(0)
+
struct imx_rproc_dcfg {
u32 src_reg;
u32 src_mask;
@@ -36,6 +39,7 @@ struct imx_rproc_dcfg {
const struct imx_rproc_att *att;
size_t att_size;
enum imx_rproc_method method;
+ u32 flags;
};
#endif /* _IMX_RPROC_H */
diff --git a/drivers/remoteproc/ingenic_rproc.c b/drivers/remoteproc/ingenic_rproc.c
index 9902cce28692..1b78d8ddeacf 100644
--- a/drivers/remoteproc/ingenic_rproc.c
+++ b/drivers/remoteproc/ingenic_rproc.c
@@ -183,8 +183,7 @@ static int ingenic_rproc_probe(struct platform_device *pdev)
vpu->dev = &pdev->dev;
platform_set_drvdata(pdev, vpu);
- mem = platform_get_resource_byname(pdev, IORESOURCE_MEM, "aux");
- vpu->aux_base = devm_ioremap_resource(dev, mem);
+ vpu->aux_base = devm_platform_ioremap_resource_byname(pdev, "aux");
if (IS_ERR(vpu->aux_base)) {
dev_err(dev, "Failed to ioremap\n");
return PTR_ERR(vpu->aux_base);
diff --git a/drivers/remoteproc/keystone_remoteproc.c b/drivers/remoteproc/keystone_remoteproc.c
index 7e57b90bcaf8..8f0f7a4cfef2 100644
--- a/drivers/remoteproc/keystone_remoteproc.c
+++ b/drivers/remoteproc/keystone_remoteproc.c
@@ -366,8 +366,6 @@ static int keystone_rproc_probe(struct platform_device *pdev)
struct rproc *rproc;
int dsp_id;
char *fw_name = NULL;
- char *template = "keystone-dsp%d-fw";
- int name_len = 0;
int ret = 0;
if (!np) {
@@ -382,14 +380,12 @@ static int keystone_rproc_probe(struct platform_device *pdev)
}
/* construct a custom default fw name - subject to change in future */
- name_len = strlen(template); /* assuming a single digit alias */
- fw_name = devm_kzalloc(dev, name_len, GFP_KERNEL);
+ fw_name = devm_kasprintf(dev, GFP_KERNEL, "keystone-dsp%d-fw", dsp_id);
if (!fw_name)
return -ENOMEM;
- snprintf(fw_name, name_len, template, dsp_id);
- rproc = rproc_alloc(dev, dev_name(dev), &keystone_rproc_ops, fw_name,
- sizeof(*ksproc));
+ rproc = devm_rproc_alloc(dev, dev_name(dev), &keystone_rproc_ops,
+ fw_name, sizeof(*ksproc));
if (!rproc)
return -ENOMEM;
@@ -400,13 +396,11 @@ static int keystone_rproc_probe(struct platform_device *pdev)
ret = keystone_rproc_of_get_dev_syscon(pdev, ksproc);
if (ret)
- goto free_rproc;
+ return ret;
ksproc->reset = devm_reset_control_get_exclusive(dev, NULL);
- if (IS_ERR(ksproc->reset)) {
- ret = PTR_ERR(ksproc->reset);
- goto free_rproc;
- }
+ if (IS_ERR(ksproc->reset))
+ return PTR_ERR(ksproc->reset);
/* enable clock for accessing DSP internal memories */
pm_runtime_enable(dev);
@@ -471,8 +465,6 @@ disable_clk:
pm_runtime_put_sync(dev);
disable_rpm:
pm_runtime_disable(dev);
-free_rproc:
- rproc_free(rproc);
return ret;
}
@@ -484,7 +476,6 @@ static void keystone_rproc_remove(struct platform_device *pdev)
gpiod_put(ksproc->kick_gpio);
pm_runtime_put_sync(&pdev->dev);
pm_runtime_disable(&pdev->dev);
- rproc_free(ksproc->rproc);
of_reserved_mem_device_release(&pdev->dev);
}
diff --git a/drivers/remoteproc/qcom_q6v5_pas.c b/drivers/remoteproc/qcom_q6v5_pas.c
index 88e7b84f223c..ef82835e98a4 100644
--- a/drivers/remoteproc/qcom_q6v5_pas.c
+++ b/drivers/remoteproc/qcom_q6v5_pas.c
@@ -829,6 +829,23 @@ static const struct adsp_data adsp_resource_init = {
.ssctl_id = 0x14,
};
+static const struct adsp_data sa8775p_adsp_resource = {
+ .crash_reason_smem = 423,
+ .firmware_name = "adsp.mbn",
+ .pas_id = 1,
+ .minidump_id = 5,
+ .auto_boot = true,
+ .proxy_pd_names = (char*[]){
+ "lcx",
+ "lmx",
+ NULL
+ },
+ .load_state = "adsp",
+ .ssr_name = "lpass",
+ .sysmon_name = "adsp",
+ .ssctl_id = 0x14,
+};
+
static const struct adsp_data sdm845_adsp_resource_init = {
.crash_reason_smem = 423,
.firmware_name = "adsp.mdt",
@@ -942,6 +959,42 @@ static const struct adsp_data cdsp_resource_init = {
.ssctl_id = 0x17,
};
+static const struct adsp_data sa8775p_cdsp0_resource = {
+ .crash_reason_smem = 601,
+ .firmware_name = "cdsp0.mbn",
+ .pas_id = 18,
+ .minidump_id = 7,
+ .auto_boot = true,
+ .proxy_pd_names = (char*[]){
+ "cx",
+ "mxc",
+ "nsp",
+ NULL
+ },
+ .load_state = "cdsp",
+ .ssr_name = "cdsp",
+ .sysmon_name = "cdsp",
+ .ssctl_id = 0x17,
+};
+
+static const struct adsp_data sa8775p_cdsp1_resource = {
+ .crash_reason_smem = 633,
+ .firmware_name = "cdsp1.mbn",
+ .pas_id = 30,
+ .minidump_id = 20,
+ .auto_boot = true,
+ .proxy_pd_names = (char*[]){
+ "cx",
+ "mxc",
+ "nsp",
+ NULL
+ },
+ .load_state = "nsp",
+ .ssr_name = "cdsp1",
+ .sysmon_name = "cdsp1",
+ .ssctl_id = 0x20,
+};
+
static const struct adsp_data sdm845_cdsp_resource_init = {
.crash_reason_smem = 601,
.firmware_name = "cdsp.mdt",
@@ -1083,6 +1136,40 @@ static const struct adsp_data sm8350_cdsp_resource = {
.ssctl_id = 0x17,
};
+static const struct adsp_data sa8775p_gpdsp0_resource = {
+ .crash_reason_smem = 640,
+ .firmware_name = "gpdsp0.mbn",
+ .pas_id = 39,
+ .minidump_id = 21,
+ .auto_boot = true,
+ .proxy_pd_names = (char*[]){
+ "cx",
+ "mxc",
+ NULL
+ },
+ .load_state = "gpdsp0",
+ .ssr_name = "gpdsp0",
+ .sysmon_name = "gpdsp0",
+ .ssctl_id = 0x21,
+};
+
+static const struct adsp_data sa8775p_gpdsp1_resource = {
+ .crash_reason_smem = 641,
+ .firmware_name = "gpdsp1.mbn",
+ .pas_id = 40,
+ .minidump_id = 22,
+ .auto_boot = true,
+ .proxy_pd_names = (char*[]){
+ "cx",
+ "mxc",
+ NULL
+ },
+ .load_state = "gpdsp1",
+ .ssr_name = "gpdsp1",
+ .sysmon_name = "gpdsp1",
+ .ssctl_id = 0x22,
+};
+
static const struct adsp_data mpss_resource_init = {
.crash_reason_smem = 421,
.firmware_name = "modem.mdt",
@@ -1329,6 +1416,11 @@ static const struct of_device_id adsp_of_match[] = {
{ .compatible = "qcom,qcs404-adsp-pas", .data = &adsp_resource_init },
{ .compatible = "qcom,qcs404-cdsp-pas", .data = &cdsp_resource_init },
{ .compatible = "qcom,qcs404-wcss-pas", .data = &wcss_resource_init },
+ { .compatible = "qcom,sa8775p-adsp-pas", .data = &sa8775p_adsp_resource},
+ { .compatible = "qcom,sa8775p-cdsp0-pas", .data = &sa8775p_cdsp0_resource},
+ { .compatible = "qcom,sa8775p-cdsp1-pas", .data = &sa8775p_cdsp1_resource},
+ { .compatible = "qcom,sa8775p-gpdsp0-pas", .data = &sa8775p_gpdsp0_resource},
+ { .compatible = "qcom,sa8775p-gpdsp1-pas", .data = &sa8775p_gpdsp1_resource},
{ .compatible = "qcom,sc7180-adsp-pas", .data = &sm8250_adsp_resource},
{ .compatible = "qcom,sc7180-mpss-pas", .data = &mpss_resource_init},
{ .compatible = "qcom,sc7280-adsp-pas", .data = &sm8350_adsp_resource},
@@ -1346,6 +1438,7 @@ static const struct of_device_id adsp_of_match[] = {
{ .compatible = "qcom,sdm845-cdsp-pas", .data = &sdm845_cdsp_resource_init},
{ .compatible = "qcom,sdm845-slpi-pas", .data = &sdm845_slpi_resource_init},
{ .compatible = "qcom,sdx55-mpss-pas", .data = &sdx55_mpss_resource},
+ { .compatible = "qcom,sdx75-mpss-pas", .data = &sm8650_mpss_resource},
{ .compatible = "qcom,sm6115-adsp-pas", .data = &adsp_resource_init},
{ .compatible = "qcom,sm6115-cdsp-pas", .data = &cdsp_resource_init},
{ .compatible = "qcom,sm6115-mpss-pas", .data = &sc8180x_mpss_resource},
diff --git a/drivers/remoteproc/st_slim_rproc.c b/drivers/remoteproc/st_slim_rproc.c
index d17719384c16..5412beb0a692 100644
--- a/drivers/remoteproc/st_slim_rproc.c
+++ b/drivers/remoteproc/st_slim_rproc.c
@@ -259,16 +259,14 @@ struct st_slim_rproc *st_slim_rproc_alloc(struct platform_device *pdev,
slim_rproc->mem[i].size = resource_size(res);
}
- res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "slimcore");
- slim_rproc->slimcore = devm_ioremap_resource(dev, res);
+ slim_rproc->slimcore = devm_platform_ioremap_resource_byname(pdev, "slimcore");
if (IS_ERR(slim_rproc->slimcore)) {
dev_err(&pdev->dev, "failed to ioremap slimcore IO\n");
err = PTR_ERR(slim_rproc->slimcore);
goto err;
}
- res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "peripherals");
- slim_rproc->peri = devm_ioremap_resource(dev, res);
+ slim_rproc->peri = devm_platform_ioremap_resource_byname(pdev, "peripherals");
if (IS_ERR(slim_rproc->peri)) {
dev_err(&pdev->dev, "failed to ioremap peripherals IO\n");
err = PTR_ERR(slim_rproc->peri);
diff --git a/drivers/remoteproc/ti_k3_dsp_remoteproc.c b/drivers/remoteproc/ti_k3_dsp_remoteproc.c
index a22d41689a7d..8be3f631c192 100644
--- a/drivers/remoteproc/ti_k3_dsp_remoteproc.c
+++ b/drivers/remoteproc/ti_k3_dsp_remoteproc.c
@@ -115,6 +115,10 @@ static void k3_dsp_rproc_mbox_callback(struct mbox_client *client, void *data)
const char *name = kproc->rproc->name;
u32 msg = omap_mbox_message(data);
+ /* Do not forward messages from a detached core */
+ if (kproc->rproc->state == RPROC_DETACHED)
+ return;
+
dev_dbg(dev, "mbox msg: 0x%x\n", msg);
switch (msg) {
@@ -155,6 +159,10 @@ static void k3_dsp_rproc_kick(struct rproc *rproc, int vqid)
mbox_msg_t msg = (mbox_msg_t)vqid;
int ret;
+ /* Do not forward messages to a detached core */
+ if (kproc->rproc->state == RPROC_DETACHED)
+ return;
+
/* send the index of the triggered virtqueue in the mailbox payload */
ret = mbox_send_message(kproc->mbox, (void *)msg);
if (ret < 0)
@@ -230,12 +238,9 @@ static int k3_dsp_rproc_request_mbox(struct rproc *rproc)
client->knows_txdone = false;
kproc->mbox = mbox_request_channel(client, 0);
- if (IS_ERR(kproc->mbox)) {
- ret = -EBUSY;
- dev_err(dev, "mbox_request_channel failed: %ld\n",
- PTR_ERR(kproc->mbox));
- return ret;
- }
+ if (IS_ERR(kproc->mbox))
+ return dev_err_probe(dev, PTR_ERR(kproc->mbox),
+ "mbox_request_channel failed\n");
/*
* Ping the remote processor, this is only for sanity-sake for now;
@@ -315,32 +320,23 @@ static int k3_dsp_rproc_start(struct rproc *rproc)
u32 boot_addr;
int ret;
- ret = k3_dsp_rproc_request_mbox(rproc);
- if (ret)
- return ret;
-
boot_addr = rproc->bootaddr;
if (boot_addr & (kproc->data->boot_align_addr - 1)) {
dev_err(dev, "invalid boot address 0x%x, must be aligned on a 0x%x boundary\n",
boot_addr, kproc->data->boot_align_addr);
- ret = -EINVAL;
- goto put_mbox;
+ return -EINVAL;
}
dev_dbg(dev, "booting DSP core using boot addr = 0x%x\n", boot_addr);
ret = ti_sci_proc_set_config(kproc->tsp, boot_addr, 0, 0);
if (ret)
- goto put_mbox;
+ return ret;
ret = k3_dsp_rproc_release(kproc);
if (ret)
- goto put_mbox;
+ return ret;
return 0;
-
-put_mbox:
- mbox_free_channel(kproc->mbox);
- return ret;
}
/*
@@ -353,8 +349,6 @@ static int k3_dsp_rproc_stop(struct rproc *rproc)
{
struct k3_dsp_rproc *kproc = rproc->priv;
- mbox_free_channel(kproc->mbox);
-
k3_dsp_rproc_reset(kproc);
return 0;
@@ -363,42 +357,22 @@ static int k3_dsp_rproc_stop(struct rproc *rproc)
/*
* Attach to a running DSP remote processor (IPC-only mode)
*
- * This rproc attach callback only needs to request the mailbox, the remote
- * processor is already booted, so there is no need to issue any TI-SCI
- * commands to boot the DSP core. This callback is invoked only in IPC-only
- * mode.
+ * This rproc attach callback is a NOP. The remote processor is already booted,
+ * and all required resources have been acquired during probe routine, so there
+ * is no need to issue any TI-SCI commands to boot the DSP core. This callback
+ * is invoked only in IPC-only mode and exists because rproc_validate() checks
+ * for its existence.
*/
-static int k3_dsp_rproc_attach(struct rproc *rproc)
-{
- struct k3_dsp_rproc *kproc = rproc->priv;
- struct device *dev = kproc->dev;
- int ret;
-
- ret = k3_dsp_rproc_request_mbox(rproc);
- if (ret)
- return ret;
-
- dev_info(dev, "DSP initialized in IPC-only mode\n");
- return 0;
-}
+static int k3_dsp_rproc_attach(struct rproc *rproc) { return 0; }
/*
* Detach from a running DSP remote processor (IPC-only mode)
*
- * This rproc detach callback performs the opposite operation to attach callback
- * and only needs to release the mailbox, the DSP core is not stopped and will
- * be left to continue to run its booted firmware. This callback is invoked only
- * in IPC-only mode.
+ * This rproc detach callback is a NOP. The DSP core is not stopped and will be
+ * left to continue to run its booted firmware. This callback is invoked only in
+ * IPC-only mode and exists for sanity sake.
*/
-static int k3_dsp_rproc_detach(struct rproc *rproc)
-{
- struct k3_dsp_rproc *kproc = rproc->priv;
- struct device *dev = kproc->dev;
-
- mbox_free_channel(kproc->mbox);
- dev_info(dev, "DSP deinitialized in IPC-only mode\n");
- return 0;
-}
+static int k3_dsp_rproc_detach(struct rproc *rproc) { return 0; }
/*
* This function implements the .get_loaded_rsc_table() callback and is used
@@ -636,32 +610,6 @@ static void k3_dsp_release_tsp(void *data)
ti_sci_proc_release(tsp);
}
-static
-struct ti_sci_proc *k3_dsp_rproc_of_get_tsp(struct device *dev,
- const struct ti_sci_handle *sci)
-{
- struct ti_sci_proc *tsp;
- u32 temp[2];
- int ret;
-
- ret = of_property_read_u32_array(dev->of_node, "ti,sci-proc-ids",
- temp, 2);
- if (ret < 0)
- return ERR_PTR(ret);
-
- tsp = devm_kzalloc(dev, sizeof(*tsp), GFP_KERNEL);
- if (!tsp)
- return ERR_PTR(-ENOMEM);
-
- tsp->dev = dev;
- tsp->sci = sci;
- tsp->ops = &sci->ops.proc_ops;
- tsp->proc_id = temp[0];
- tsp->host_id = temp[1];
-
- return tsp;
-}
-
static int k3_dsp_rproc_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
@@ -697,6 +645,10 @@ static int k3_dsp_rproc_probe(struct platform_device *pdev)
kproc->dev = dev;
kproc->data = data;
+ ret = k3_dsp_rproc_request_mbox(rproc);
+ if (ret)
+ return ret;
+
kproc->ti_sci = devm_ti_sci_get_by_phandle(dev, "ti,sci");
if (IS_ERR(kproc->ti_sci))
return dev_err_probe(dev, PTR_ERR(kproc->ti_sci),
@@ -711,7 +663,7 @@ static int k3_dsp_rproc_probe(struct platform_device *pdev)
return dev_err_probe(dev, PTR_ERR(kproc->reset),
"failed to get reset\n");
- kproc->tsp = k3_dsp_rproc_of_get_tsp(dev, kproc->ti_sci);
+ kproc->tsp = ti_sci_proc_of_get_tsp(dev, kproc->ti_sci);
if (IS_ERR(kproc->tsp))
return dev_err_probe(dev, PTR_ERR(kproc->tsp),
"failed to construct ti-sci proc control\n");
@@ -789,6 +741,8 @@ static void k3_dsp_rproc_remove(struct platform_device *pdev)
if (ret)
dev_err(dev, "failed to detach proc (%pe)\n", ERR_PTR(ret));
}
+
+ mbox_free_channel(kproc->mbox);
}
static const struct k3_dsp_mem_data c66_mems[] = {
diff --git a/drivers/remoteproc/ti_k3_m4_remoteproc.c b/drivers/remoteproc/ti_k3_m4_remoteproc.c
new file mode 100644
index 000000000000..09f0484a90e1
--- /dev/null
+++ b/drivers/remoteproc/ti_k3_m4_remoteproc.c
@@ -0,0 +1,667 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * TI K3 Cortex-M4 Remote Processor(s) driver
+ *
+ * Copyright (C) 2021-2024 Texas Instruments Incorporated - https://www.ti.com/
+ * Hari Nagalla <hnagalla@ti.com>
+ */
+
+#include <linux/io.h>
+#include <linux/mailbox_client.h>
+#include <linux/module.h>
+#include <linux/of_address.h>
+#include <linux/of_reserved_mem.h>
+#include <linux/platform_device.h>
+#include <linux/remoteproc.h>
+#include <linux/reset.h>
+#include <linux/slab.h>
+
+#include "omap_remoteproc.h"
+#include "remoteproc_internal.h"
+#include "ti_sci_proc.h"
+
+#define K3_M4_IRAM_DEV_ADDR 0x00000
+#define K3_M4_DRAM_DEV_ADDR 0x30000
+
+/**
+ * struct k3_m4_rproc_mem - internal memory structure
+ * @cpu_addr: MPU virtual address of the memory region
+ * @bus_addr: Bus address used to access the memory region
+ * @dev_addr: Device address of the memory region from remote processor view
+ * @size: Size of the memory region
+ */
+struct k3_m4_rproc_mem {
+ void __iomem *cpu_addr;
+ phys_addr_t bus_addr;
+ u32 dev_addr;
+ size_t size;
+};
+
+/**
+ * struct k3_m4_rproc_mem_data - memory definitions for a remote processor
+ * @name: name for this memory entry
+ * @dev_addr: device address for the memory entry
+ */
+struct k3_m4_rproc_mem_data {
+ const char *name;
+ const u32 dev_addr;
+};
+
+/**
+ * struct k3_m4_rproc - k3 remote processor driver structure
+ * @dev: cached device pointer
+ * @mem: internal memory regions data
+ * @num_mems: number of internal memory regions
+ * @rmem: reserved memory regions data
+ * @num_rmems: number of reserved memory regions
+ * @reset: reset control handle
+ * @tsp: TI-SCI processor control handle
+ * @ti_sci: TI-SCI handle
+ * @ti_sci_id: TI-SCI device identifier
+ * @mbox: mailbox channel handle
+ * @client: mailbox client to request the mailbox channel
+ */
+struct k3_m4_rproc {
+ struct device *dev;
+ struct k3_m4_rproc_mem *mem;
+ int num_mems;
+ struct k3_m4_rproc_mem *rmem;
+ int num_rmems;
+ struct reset_control *reset;
+ struct ti_sci_proc *tsp;
+ const struct ti_sci_handle *ti_sci;
+ u32 ti_sci_id;
+ struct mbox_chan *mbox;
+ struct mbox_client client;
+};
+
+/**
+ * k3_m4_rproc_mbox_callback() - inbound mailbox message handler
+ * @client: mailbox client pointer used for requesting the mailbox channel
+ * @data: mailbox payload
+ *
+ * This handler is invoked by the K3 mailbox driver whenever a mailbox
+ * message is received. Usually, the mailbox payload simply contains
+ * the index of the virtqueue that is kicked by the remote processor,
+ * and we let remoteproc core handle it.
+ *
+ * In addition to virtqueue indices, we also have some out-of-band values
+ * that indicate different events. Those values are deliberately very
+ * large so they don't coincide with virtqueue indices.
+ */
+static void k3_m4_rproc_mbox_callback(struct mbox_client *client, void *data)
+{
+ struct device *dev = client->dev;
+ struct rproc *rproc = dev_get_drvdata(dev);
+ u32 msg = (u32)(uintptr_t)(data);
+
+ dev_dbg(dev, "mbox msg: 0x%x\n", msg);
+
+ switch (msg) {
+ case RP_MBOX_CRASH:
+ /*
+ * remoteproc detected an exception, but error recovery is not
+ * supported. So, just log this for now
+ */
+ dev_err(dev, "K3 rproc %s crashed\n", rproc->name);
+ break;
+ case RP_MBOX_ECHO_REPLY:
+ dev_info(dev, "received echo reply from %s\n", rproc->name);
+ break;
+ default:
+ /* silently handle all other valid messages */
+ if (msg >= RP_MBOX_READY && msg < RP_MBOX_END_MSG)
+ return;
+ if (msg > rproc->max_notifyid) {
+ dev_dbg(dev, "dropping unknown message 0x%x", msg);
+ return;
+ }
+ /* msg contains the index of the triggered vring */
+ if (rproc_vq_interrupt(rproc, msg) == IRQ_NONE)
+ dev_dbg(dev, "no message was found in vqid %d\n", msg);
+ }
+}
+
+/*
+ * Kick the remote processor to notify about pending unprocessed messages.
+ * The vqid usage is not used and is inconsequential, as the kick is performed
+ * through a simulated GPIO (a bit in an IPC interrupt-triggering register),
+ * the remote processor is expected to process both its Tx and Rx virtqueues.
+ */
+static void k3_m4_rproc_kick(struct rproc *rproc, int vqid)
+{
+ struct k3_m4_rproc *kproc = rproc->priv;
+ struct device *dev = kproc->dev;
+ u32 msg = (u32)vqid;
+ int ret;
+
+ /*
+ * Send the index of the triggered virtqueue in the mailbox payload.
+ * NOTE: msg is cast to uintptr_t to prevent compiler warnings when
+ * void* is 64bit. It is safely cast back to u32 in the mailbox driver.
+ */
+ ret = mbox_send_message(kproc->mbox, (void *)(uintptr_t)msg);
+ if (ret < 0)
+ dev_err(dev, "failed to send mailbox message, status = %d\n",
+ ret);
+}
+
+static int k3_m4_rproc_ping_mbox(struct k3_m4_rproc *kproc)
+{
+ struct device *dev = kproc->dev;
+ int ret;
+
+ /*
+ * Ping the remote processor, this is only for sanity-sake for now;
+ * there is no functional effect whatsoever.
+ *
+ * Note that the reply will _not_ arrive immediately: this message
+ * will wait in the mailbox fifo until the remote processor is booted.
+ */
+ ret = mbox_send_message(kproc->mbox, (void *)RP_MBOX_ECHO_REQUEST);
+ if (ret < 0) {
+ dev_err(dev, "mbox_send_message failed: %d\n", ret);
+ return ret;
+ }
+
+ return 0;
+}
+
+/*
+ * The M4 cores have a local reset that affects only the CPU, and a
+ * generic module reset that powers on the device and allows the internal
+ * memories to be accessed while the local reset is asserted. This function is
+ * used to release the global reset on remote cores to allow loading into the
+ * internal RAMs. The .prepare() ops is invoked by remoteproc core before any
+ * firmware loading, and is followed by the .start() ops after loading to
+ * actually let the remote cores to run.
+ */
+static int k3_m4_rproc_prepare(struct rproc *rproc)
+{
+ struct k3_m4_rproc *kproc = rproc->priv;
+ struct device *dev = kproc->dev;
+ int ret;
+
+ /* If the core is running already no need to deassert the module reset */
+ if (rproc->state == RPROC_DETACHED)
+ return 0;
+
+ /*
+ * Ensure the local reset is asserted so the core doesn't
+ * execute bogus code when the module reset is released.
+ */
+ ret = reset_control_assert(kproc->reset);
+ if (ret) {
+ dev_err(dev, "could not assert local reset\n");
+ return ret;
+ }
+
+ ret = reset_control_status(kproc->reset);
+ if (ret <= 0) {
+ dev_err(dev, "local reset still not asserted\n");
+ return ret;
+ }
+
+ ret = kproc->ti_sci->ops.dev_ops.get_device(kproc->ti_sci,
+ kproc->ti_sci_id);
+ if (ret) {
+ dev_err(dev, "could not deassert module-reset for internal RAM loading\n");
+ return ret;
+ }
+
+ return 0;
+}
+
+/*
+ * This function implements the .unprepare() ops and performs the complimentary
+ * operations to that of the .prepare() ops. The function is used to assert the
+ * global reset on applicable cores. This completes the second portion of
+ * powering down the remote core. The cores themselves are only halted in the
+ * .stop() callback through the local reset, and the .unprepare() ops is invoked
+ * by the remoteproc core after the remoteproc is stopped to balance the global
+ * reset.
+ */
+static int k3_m4_rproc_unprepare(struct rproc *rproc)
+{
+ struct k3_m4_rproc *kproc = rproc->priv;
+ struct device *dev = kproc->dev;
+ int ret;
+
+ /* If the core is going to be detached do not assert the module reset */
+ if (rproc->state == RPROC_ATTACHED)
+ return 0;
+
+ ret = kproc->ti_sci->ops.dev_ops.put_device(kproc->ti_sci,
+ kproc->ti_sci_id);
+ if (ret) {
+ dev_err(dev, "module-reset assert failed\n");
+ return ret;
+ }
+
+ return 0;
+}
+
+/*
+ * This function implements the .get_loaded_rsc_table() callback and is used
+ * to provide the resource table for a booted remote processor in IPC-only
+ * mode. The remote processor firmwares follow a design-by-contract approach
+ * and are expected to have the resource table at the base of the DDR region
+ * reserved for firmware usage. This provides flexibility for the remote
+ * processor to be booted by different bootloaders that may or may not have the
+ * ability to publish the resource table address and size through a DT
+ * property.
+ */
+static struct resource_table *k3_m4_get_loaded_rsc_table(struct rproc *rproc,
+ size_t *rsc_table_sz)
+{
+ struct k3_m4_rproc *kproc = rproc->priv;
+ struct device *dev = kproc->dev;
+
+ if (!kproc->rmem[0].cpu_addr) {
+ dev_err(dev, "memory-region #1 does not exist, loaded rsc table can't be found");
+ return ERR_PTR(-ENOMEM);
+ }
+
+ /*
+ * NOTE: The resource table size is currently hard-coded to a maximum
+ * of 256 bytes. The most common resource table usage for K3 firmwares
+ * is to only have the vdev resource entry and an optional trace entry.
+ * The exact size could be computed based on resource table address, but
+ * the hard-coded value suffices to support the IPC-only mode.
+ */
+ *rsc_table_sz = 256;
+ return (__force struct resource_table *)kproc->rmem[0].cpu_addr;
+}
+
+/*
+ * Custom function to translate a remote processor device address (internal
+ * RAMs only) to a kernel virtual address. The remote processors can access
+ * their RAMs at either an internal address visible only from a remote
+ * processor, or at the SoC-level bus address. Both these addresses need to be
+ * looked through for translation. The translated addresses can be used either
+ * by the remoteproc core for loading (when using kernel remoteproc loader), or
+ * by any rpmsg bus drivers.
+ */
+static void *k3_m4_rproc_da_to_va(struct rproc *rproc, u64 da, size_t len, bool *is_iomem)
+{
+ struct k3_m4_rproc *kproc = rproc->priv;
+ void __iomem *va = NULL;
+ phys_addr_t bus_addr;
+ u32 dev_addr, offset;
+ size_t size;
+ int i;
+
+ if (len == 0)
+ return NULL;
+
+ for (i = 0; i < kproc->num_mems; i++) {
+ bus_addr = kproc->mem[i].bus_addr;
+ dev_addr = kproc->mem[i].dev_addr;
+ size = kproc->mem[i].size;
+
+ /* handle M4-view addresses */
+ if (da >= dev_addr && ((da + len) <= (dev_addr + size))) {
+ offset = da - dev_addr;
+ va = kproc->mem[i].cpu_addr + offset;
+ return (__force void *)va;
+ }
+
+ /* handle SoC-view addresses */
+ if (da >= bus_addr && ((da + len) <= (bus_addr + size))) {
+ offset = da - bus_addr;
+ va = kproc->mem[i].cpu_addr + offset;
+ return (__force void *)va;
+ }
+ }
+
+ /* handle static DDR reserved memory regions */
+ for (i = 0; i < kproc->num_rmems; i++) {
+ dev_addr = kproc->rmem[i].dev_addr;
+ size = kproc->rmem[i].size;
+
+ if (da >= dev_addr && ((da + len) <= (dev_addr + size))) {
+ offset = da - dev_addr;
+ va = kproc->rmem[i].cpu_addr + offset;
+ return (__force void *)va;
+ }
+ }
+
+ return NULL;
+}
+
+static int k3_m4_rproc_of_get_memories(struct platform_device *pdev,
+ struct k3_m4_rproc *kproc)
+{
+ static const char * const mem_names[] = { "iram", "dram" };
+ static const u32 mem_addrs[] = { K3_M4_IRAM_DEV_ADDR, K3_M4_DRAM_DEV_ADDR };
+ struct device *dev = &pdev->dev;
+ struct resource *res;
+ int num_mems;
+ int i;
+
+ num_mems = ARRAY_SIZE(mem_names);
+ kproc->mem = devm_kcalloc(kproc->dev, num_mems,
+ sizeof(*kproc->mem), GFP_KERNEL);
+ if (!kproc->mem)
+ return -ENOMEM;
+
+ for (i = 0; i < num_mems; i++) {
+ res = platform_get_resource_byname(pdev, IORESOURCE_MEM,
+ mem_names[i]);
+ if (!res) {
+ dev_err(dev, "found no memory resource for %s\n",
+ mem_names[i]);
+ return -EINVAL;
+ }
+ if (!devm_request_mem_region(dev, res->start,
+ resource_size(res),
+ dev_name(dev))) {
+ dev_err(dev, "could not request %s region for resource\n",
+ mem_names[i]);
+ return -EBUSY;
+ }
+
+ kproc->mem[i].cpu_addr = devm_ioremap_wc(dev, res->start,
+ resource_size(res));
+ if (!kproc->mem[i].cpu_addr) {
+ dev_err(dev, "failed to map %s memory\n",
+ mem_names[i]);
+ return -ENOMEM;
+ }
+ kproc->mem[i].bus_addr = res->start;
+ kproc->mem[i].dev_addr = mem_addrs[i];
+ kproc->mem[i].size = resource_size(res);
+
+ dev_dbg(dev, "memory %8s: bus addr %pa size 0x%zx va %pK da 0x%x\n",
+ mem_names[i], &kproc->mem[i].bus_addr,
+ kproc->mem[i].size, kproc->mem[i].cpu_addr,
+ kproc->mem[i].dev_addr);
+ }
+ kproc->num_mems = num_mems;
+
+ return 0;
+}
+
+static void k3_m4_rproc_dev_mem_release(void *data)
+{
+ struct device *dev = data;
+
+ of_reserved_mem_device_release(dev);
+}
+
+static int k3_m4_reserved_mem_init(struct k3_m4_rproc *kproc)
+{
+ struct device *dev = kproc->dev;
+ struct device_node *np = dev->of_node;
+ struct device_node *rmem_np;
+ struct reserved_mem *rmem;
+ int num_rmems;
+ int ret, i;
+
+ num_rmems = of_property_count_elems_of_size(np, "memory-region",
+ sizeof(phandle));
+ if (num_rmems < 0) {
+ dev_err(dev, "device does not reserved memory regions (%d)\n",
+ num_rmems);
+ return -EINVAL;
+ }
+ if (num_rmems < 2) {
+ dev_err(dev, "device needs at least two memory regions to be defined, num = %d\n",
+ num_rmems);
+ return -EINVAL;
+ }
+
+ /* use reserved memory region 0 for vring DMA allocations */
+ ret = of_reserved_mem_device_init_by_idx(dev, np, 0);
+ if (ret) {
+ dev_err(dev, "device cannot initialize DMA pool (%d)\n", ret);
+ return ret;
+ }
+ ret = devm_add_action_or_reset(dev, k3_m4_rproc_dev_mem_release, dev);
+ if (ret)
+ return ret;
+
+ num_rmems--;
+ kproc->rmem = devm_kcalloc(dev, num_rmems, sizeof(*kproc->rmem), GFP_KERNEL);
+ if (!kproc->rmem)
+ return -ENOMEM;
+
+ /* use remaining reserved memory regions for static carveouts */
+ for (i = 0; i < num_rmems; i++) {
+ rmem_np = of_parse_phandle(np, "memory-region", i + 1);
+ if (!rmem_np)
+ return -EINVAL;
+
+ rmem = of_reserved_mem_lookup(rmem_np);
+ if (!rmem) {
+ of_node_put(rmem_np);
+ return -EINVAL;
+ }
+ of_node_put(rmem_np);
+
+ kproc->rmem[i].bus_addr = rmem->base;
+ /* 64-bit address regions currently not supported */
+ kproc->rmem[i].dev_addr = (u32)rmem->base;
+ kproc->rmem[i].size = rmem->size;
+ kproc->rmem[i].cpu_addr = devm_ioremap_wc(dev, rmem->base, rmem->size);
+ if (!kproc->rmem[i].cpu_addr) {
+ dev_err(dev, "failed to map reserved memory#%d at %pa of size %pa\n",
+ i + 1, &rmem->base, &rmem->size);
+ return -ENOMEM;
+ }
+
+ dev_dbg(dev, "reserved memory%d: bus addr %pa size 0x%zx va %pK da 0x%x\n",
+ i + 1, &kproc->rmem[i].bus_addr,
+ kproc->rmem[i].size, kproc->rmem[i].cpu_addr,
+ kproc->rmem[i].dev_addr);
+ }
+ kproc->num_rmems = num_rmems;
+
+ return 0;
+}
+
+static void k3_m4_release_tsp(void *data)
+{
+ struct ti_sci_proc *tsp = data;
+
+ ti_sci_proc_release(tsp);
+}
+
+/*
+ * Power up the M4 remote processor.
+ *
+ * This function will be invoked only after the firmware for this rproc
+ * was loaded, parsed successfully, and all of its resource requirements
+ * were met. This callback is invoked only in remoteproc mode.
+ */
+static int k3_m4_rproc_start(struct rproc *rproc)
+{
+ struct k3_m4_rproc *kproc = rproc->priv;
+ struct device *dev = kproc->dev;
+ int ret;
+
+ ret = k3_m4_rproc_ping_mbox(kproc);
+ if (ret)
+ return ret;
+
+ ret = reset_control_deassert(kproc->reset);
+ if (ret) {
+ dev_err(dev, "local-reset deassert failed, ret = %d\n", ret);
+ return ret;
+ }
+
+ return 0;
+}
+
+/*
+ * Stop the M4 remote processor.
+ *
+ * This function puts the M4 processor into reset, and finishes processing
+ * of any pending messages. This callback is invoked only in remoteproc mode.
+ */
+static int k3_m4_rproc_stop(struct rproc *rproc)
+{
+ struct k3_m4_rproc *kproc = rproc->priv;
+ struct device *dev = kproc->dev;
+ int ret;
+
+ ret = reset_control_assert(kproc->reset);
+ if (ret) {
+ dev_err(dev, "local-reset assert failed, ret = %d\n", ret);
+ return ret;
+ }
+
+ return 0;
+}
+
+/*
+ * Attach to a running M4 remote processor (IPC-only mode)
+ *
+ * The remote processor is already booted, so there is no need to issue any
+ * TI-SCI commands to boot the M4 core. This callback is used only in IPC-only
+ * mode.
+ */
+static int k3_m4_rproc_attach(struct rproc *rproc)
+{
+ struct k3_m4_rproc *kproc = rproc->priv;
+ int ret;
+
+ ret = k3_m4_rproc_ping_mbox(kproc);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+
+/*
+ * Detach from a running M4 remote processor (IPC-only mode)
+ *
+ * This rproc detach callback performs the opposite operation to attach
+ * callback, the M4 core is not stopped and will be left to continue to
+ * run its booted firmware. This callback is invoked only in IPC-only mode.
+ */
+static int k3_m4_rproc_detach(struct rproc *rproc)
+{
+ return 0;
+}
+
+static const struct rproc_ops k3_m4_rproc_ops = {
+ .prepare = k3_m4_rproc_prepare,
+ .unprepare = k3_m4_rproc_unprepare,
+ .start = k3_m4_rproc_start,
+ .stop = k3_m4_rproc_stop,
+ .attach = k3_m4_rproc_attach,
+ .detach = k3_m4_rproc_detach,
+ .kick = k3_m4_rproc_kick,
+ .da_to_va = k3_m4_rproc_da_to_va,
+ .get_loaded_rsc_table = k3_m4_get_loaded_rsc_table,
+};
+
+static int k3_m4_rproc_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct k3_m4_rproc *kproc;
+ struct rproc *rproc;
+ const char *fw_name;
+ bool r_state = false;
+ bool p_state = false;
+ int ret;
+
+ ret = rproc_of_parse_firmware(dev, 0, &fw_name);
+ if (ret)
+ return dev_err_probe(dev, ret, "failed to parse firmware-name property\n");
+
+ rproc = devm_rproc_alloc(dev, dev_name(dev), &k3_m4_rproc_ops, fw_name,
+ sizeof(*kproc));
+ if (!rproc)
+ return -ENOMEM;
+
+ rproc->has_iommu = false;
+ rproc->recovery_disabled = true;
+ kproc = rproc->priv;
+ kproc->dev = dev;
+ platform_set_drvdata(pdev, rproc);
+
+ kproc->ti_sci = devm_ti_sci_get_by_phandle(dev, "ti,sci");
+ if (IS_ERR(kproc->ti_sci))
+ return dev_err_probe(dev, PTR_ERR(kproc->ti_sci),
+ "failed to get ti-sci handle\n");
+
+ ret = of_property_read_u32(dev->of_node, "ti,sci-dev-id", &kproc->ti_sci_id);
+ if (ret)
+ return dev_err_probe(dev, ret, "missing 'ti,sci-dev-id' property\n");
+
+ kproc->reset = devm_reset_control_get_exclusive(dev, NULL);
+ if (IS_ERR(kproc->reset))
+ return dev_err_probe(dev, PTR_ERR(kproc->reset), "failed to get reset\n");
+
+ kproc->tsp = ti_sci_proc_of_get_tsp(dev, kproc->ti_sci);
+ if (IS_ERR(kproc->tsp))
+ return dev_err_probe(dev, PTR_ERR(kproc->tsp),
+ "failed to construct ti-sci proc control\n");
+
+ ret = ti_sci_proc_request(kproc->tsp);
+ if (ret < 0)
+ return dev_err_probe(dev, ret, "ti_sci_proc_request failed\n");
+ ret = devm_add_action_or_reset(dev, k3_m4_release_tsp, kproc->tsp);
+ if (ret)
+ return ret;
+
+ ret = k3_m4_rproc_of_get_memories(pdev, kproc);
+ if (ret)
+ return ret;
+
+ ret = k3_m4_reserved_mem_init(kproc);
+ if (ret)
+ return dev_err_probe(dev, ret, "reserved memory init failed\n");
+
+ ret = kproc->ti_sci->ops.dev_ops.is_on(kproc->ti_sci, kproc->ti_sci_id,
+ &r_state, &p_state);
+ if (ret)
+ return dev_err_probe(dev, ret,
+ "failed to get initial state, mode cannot be determined\n");
+
+ /* configure devices for either remoteproc or IPC-only mode */
+ if (p_state) {
+ rproc->state = RPROC_DETACHED;
+ dev_info(dev, "configured M4F for IPC-only mode\n");
+ } else {
+ dev_info(dev, "configured M4F for remoteproc mode\n");
+ }
+
+ kproc->client.dev = dev;
+ kproc->client.tx_done = NULL;
+ kproc->client.rx_callback = k3_m4_rproc_mbox_callback;
+ kproc->client.tx_block = false;
+ kproc->client.knows_txdone = false;
+ kproc->mbox = mbox_request_channel(&kproc->client, 0);
+ if (IS_ERR(kproc->mbox))
+ return dev_err_probe(dev, PTR_ERR(kproc->mbox),
+ "mbox_request_channel failed\n");
+
+ ret = devm_rproc_add(dev, rproc);
+ if (ret)
+ return dev_err_probe(dev, ret,
+ "failed to register device with remoteproc core\n");
+
+ return 0;
+}
+
+static const struct of_device_id k3_m4_of_match[] = {
+ { .compatible = "ti,am64-m4fss", },
+ { /* sentinel */ },
+};
+MODULE_DEVICE_TABLE(of, k3_m4_of_match);
+
+static struct platform_driver k3_m4_rproc_driver = {
+ .probe = k3_m4_rproc_probe,
+ .driver = {
+ .name = "k3-m4-rproc",
+ .of_match_table = k3_m4_of_match,
+ },
+};
+module_platform_driver(k3_m4_rproc_driver);
+
+MODULE_AUTHOR("Hari Nagalla <hnagalla@ti.com>");
+MODULE_DESCRIPTION("TI K3 M4 Remoteproc driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/remoteproc/ti_k3_r5_remoteproc.c b/drivers/remoteproc/ti_k3_r5_remoteproc.c
index 39a47540c590..747ee467da88 100644
--- a/drivers/remoteproc/ti_k3_r5_remoteproc.c
+++ b/drivers/remoteproc/ti_k3_r5_remoteproc.c
@@ -194,6 +194,10 @@ static void k3_r5_rproc_mbox_callback(struct mbox_client *client, void *data)
const char *name = kproc->rproc->name;
u32 msg = omap_mbox_message(data);
+ /* Do not forward message from a detached core */
+ if (kproc->rproc->state == RPROC_DETACHED)
+ return;
+
dev_dbg(dev, "mbox msg: 0x%x\n", msg);
switch (msg) {
@@ -229,6 +233,10 @@ static void k3_r5_rproc_kick(struct rproc *rproc, int vqid)
mbox_msg_t msg = (mbox_msg_t)vqid;
int ret;
+ /* Do not forward message to a detached core */
+ if (kproc->rproc->state == RPROC_DETACHED)
+ return;
+
/* send the index of the triggered virtqueue in the mailbox payload */
ret = mbox_send_message(kproc->mbox, (void *)msg);
if (ret < 0)
@@ -399,12 +407,9 @@ static int k3_r5_rproc_request_mbox(struct rproc *rproc)
client->knows_txdone = false;
kproc->mbox = mbox_request_channel(client, 0);
- if (IS_ERR(kproc->mbox)) {
- ret = -EBUSY;
- dev_err(dev, "mbox_request_channel failed: %ld\n",
- PTR_ERR(kproc->mbox));
- return ret;
- }
+ if (IS_ERR(kproc->mbox))
+ return dev_err_probe(dev, PTR_ERR(kproc->mbox),
+ "mbox_request_channel failed\n");
/*
* Ping the remote processor, this is only for sanity-sake for now;
@@ -464,8 +469,6 @@ static int k3_r5_rproc_prepare(struct rproc *rproc)
ret);
return ret;
}
- core->released_from_reset = true;
- wake_up_interruptible(&cluster->core_transition);
/*
* Newer IP revisions like on J7200 SoCs support h/w auto-initialization
@@ -552,10 +555,6 @@ static int k3_r5_rproc_start(struct rproc *rproc)
u32 boot_addr;
int ret;
- ret = k3_r5_rproc_request_mbox(rproc);
- if (ret)
- return ret;
-
boot_addr = rproc->bootaddr;
/* TODO: add boot_addr sanity checking */
dev_dbg(dev, "booting R5F core using boot addr = 0x%x\n", boot_addr);
@@ -564,7 +563,7 @@ static int k3_r5_rproc_start(struct rproc *rproc)
core = kproc->core;
ret = ti_sci_proc_set_config(core->tsp, boot_addr, 0, 0);
if (ret)
- goto put_mbox;
+ return ret;
/* unhalt/run all applicable cores */
if (cluster->mode == CLUSTER_MODE_LOCKSTEP) {
@@ -580,13 +579,15 @@ static int k3_r5_rproc_start(struct rproc *rproc)
if (core != core0 && core0->rproc->state == RPROC_OFFLINE) {
dev_err(dev, "%s: can not start core 1 before core 0\n",
__func__);
- ret = -EPERM;
- goto put_mbox;
+ return -EPERM;
}
ret = k3_r5_core_run(core);
if (ret)
- goto put_mbox;
+ return ret;
+
+ core->released_from_reset = true;
+ wake_up_interruptible(&cluster->core_transition);
}
return 0;
@@ -596,8 +597,6 @@ unroll_core_run:
if (k3_r5_core_halt(core))
dev_warn(core->dev, "core halt back failed\n");
}
-put_mbox:
- mbox_free_channel(kproc->mbox);
return ret;
}
@@ -658,8 +657,6 @@ static int k3_r5_rproc_stop(struct rproc *rproc)
goto out;
}
- mbox_free_channel(kproc->mbox);
-
return 0;
unroll_core_halt:
@@ -674,42 +671,22 @@ out:
/*
* Attach to a running R5F remote processor (IPC-only mode)
*
- * The R5F attach callback only needs to request the mailbox, the remote
- * processor is already booted, so there is no need to issue any TI-SCI
- * commands to boot the R5F cores in IPC-only mode. This callback is invoked
- * only in IPC-only mode.
+ * The R5F attach callback is a NOP. The remote processor is already booted, and
+ * all required resources have been acquired during probe routine, so there is
+ * no need to issue any TI-SCI commands to boot the R5F cores in IPC-only mode.
+ * This callback is invoked only in IPC-only mode and exists because
+ * rproc_validate() checks for its existence.
*/
-static int k3_r5_rproc_attach(struct rproc *rproc)
-{
- struct k3_r5_rproc *kproc = rproc->priv;
- struct device *dev = kproc->dev;
- int ret;
-
- ret = k3_r5_rproc_request_mbox(rproc);
- if (ret)
- return ret;
-
- dev_info(dev, "R5F core initialized in IPC-only mode\n");
- return 0;
-}
+static int k3_r5_rproc_attach(struct rproc *rproc) { return 0; }
/*
* Detach from a running R5F remote processor (IPC-only mode)
*
- * The R5F detach callback performs the opposite operation to attach callback
- * and only needs to release the mailbox, the R5F cores are not stopped and
- * will be left in booted state in IPC-only mode. This callback is invoked
- * only in IPC-only mode.
+ * The R5F detach callback is a NOP. The R5F cores are not stopped and will be
+ * left in booted state in IPC-only mode. This callback is invoked only in
+ * IPC-only mode and exists for sanity sake.
*/
-static int k3_r5_rproc_detach(struct rproc *rproc)
-{
- struct k3_r5_rproc *kproc = rproc->priv;
- struct device *dev = kproc->dev;
-
- mbox_free_channel(kproc->mbox);
- dev_info(dev, "R5F core deinitialized in IPC-only mode\n");
- return 0;
-}
+static int k3_r5_rproc_detach(struct rproc *rproc) { return 0; }
/*
* This function implements the .get_loaded_rsc_table() callback and is used
@@ -1259,8 +1236,8 @@ static int k3_r5_cluster_rproc_init(struct platform_device *pdev)
goto out;
}
- rproc = rproc_alloc(cdev, dev_name(cdev), &k3_r5_rproc_ops,
- fw_name, sizeof(*kproc));
+ rproc = devm_rproc_alloc(cdev, dev_name(cdev), &k3_r5_rproc_ops,
+ fw_name, sizeof(*kproc));
if (!rproc) {
ret = -ENOMEM;
goto out;
@@ -1278,9 +1255,13 @@ static int k3_r5_cluster_rproc_init(struct platform_device *pdev)
kproc->rproc = rproc;
core->rproc = rproc;
+ ret = k3_r5_rproc_request_mbox(rproc);
+ if (ret)
+ return ret;
+
ret = k3_r5_rproc_configure_mode(kproc);
if (ret < 0)
- goto err_config;
+ goto out;
if (ret)
goto init_rmem;
@@ -1288,7 +1269,7 @@ static int k3_r5_cluster_rproc_init(struct platform_device *pdev)
if (ret) {
dev_err(dev, "initial configure failed, ret = %d\n",
ret);
- goto err_config;
+ goto out;
}
init_rmem:
@@ -1298,7 +1279,7 @@ init_rmem:
if (ret) {
dev_err(dev, "reserved memory init failed, ret = %d\n",
ret);
- goto err_config;
+ goto out;
}
ret = rproc_add(rproc);
@@ -1332,7 +1313,7 @@ init_rmem:
dev_err(dev,
"Timed out waiting for %s core to power up!\n",
rproc->name);
- return ret;
+ goto err_powerup;
}
}
@@ -1348,12 +1329,10 @@ err_split:
}
}
+err_powerup:
rproc_del(rproc);
err_add:
k3_r5_reserved_mem_exit(kproc);
-err_config:
- rproc_free(rproc);
- core->rproc = NULL;
out:
/* undo core0 upon any failures on core1 in split-mode */
if (cluster->mode == CLUSTER_MODE_SPLIT && core == core1) {
@@ -1395,12 +1374,11 @@ static void k3_r5_cluster_rproc_exit(void *data)
}
}
+ mbox_free_channel(kproc->mbox);
+
rproc_del(rproc);
k3_r5_reserved_mem_exit(kproc);
-
- rproc_free(rproc);
- core->rproc = NULL;
}
}
@@ -1533,32 +1511,6 @@ static int k3_r5_core_of_get_sram_memories(struct platform_device *pdev,
return 0;
}
-static
-struct ti_sci_proc *k3_r5_core_of_get_tsp(struct device *dev,
- const struct ti_sci_handle *sci)
-{
- struct ti_sci_proc *tsp;
- u32 temp[2];
- int ret;
-
- ret = of_property_read_u32_array(dev_of_node(dev), "ti,sci-proc-ids",
- temp, 2);
- if (ret < 0)
- return ERR_PTR(ret);
-
- tsp = devm_kzalloc(dev, sizeof(*tsp), GFP_KERNEL);
- if (!tsp)
- return ERR_PTR(-ENOMEM);
-
- tsp->dev = dev;
- tsp->sci = sci;
- tsp->ops = &sci->ops.proc_ops;
- tsp->proc_id = temp[0];
- tsp->host_id = temp[1];
-
- return tsp;
-}
-
static int k3_r5_core_of_init(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
@@ -1633,7 +1585,7 @@ static int k3_r5_core_of_init(struct platform_device *pdev)
goto err;
}
- core->tsp = k3_r5_core_of_get_tsp(dev, core->ti_sci);
+ core->tsp = ti_sci_proc_of_get_tsp(dev, core->ti_sci);
if (IS_ERR(core->tsp)) {
ret = PTR_ERR(core->tsp);
dev_err(dev, "failed to construct ti-sci proc control, ret = %d\n",
diff --git a/drivers/remoteproc/ti_sci_proc.h b/drivers/remoteproc/ti_sci_proc.h
index 778558abcdcc..f3911ce75252 100644
--- a/drivers/remoteproc/ti_sci_proc.h
+++ b/drivers/remoteproc/ti_sci_proc.h
@@ -28,6 +28,32 @@ struct ti_sci_proc {
u8 host_id;
};
+static inline
+struct ti_sci_proc *ti_sci_proc_of_get_tsp(struct device *dev,
+ const struct ti_sci_handle *sci)
+{
+ struct ti_sci_proc *tsp;
+ u32 temp[2];
+ int ret;
+
+ ret = of_property_read_u32_array(dev_of_node(dev), "ti,sci-proc-ids",
+ temp, 2);
+ if (ret < 0)
+ return ERR_PTR(ret);
+
+ tsp = devm_kzalloc(dev, sizeof(*tsp), GFP_KERNEL);
+ if (!tsp)
+ return ERR_PTR(-ENOMEM);
+
+ tsp->dev = dev;
+ tsp->sci = sci;
+ tsp->ops = &sci->ops.proc_ops;
+ tsp->proc_id = temp[0];
+ tsp->host_id = temp[1];
+
+ return tsp;
+}
+
static inline int ti_sci_proc_request(struct ti_sci_proc *tsp)
{
int ret;
diff --git a/drivers/remoteproc/xlnx_r5_remoteproc.c b/drivers/remoteproc/xlnx_r5_remoteproc.c
index 596f3ffb8935..5aeedeaf3c41 100644
--- a/drivers/remoteproc/xlnx_r5_remoteproc.c
+++ b/drivers/remoteproc/xlnx_r5_remoteproc.c
@@ -57,6 +57,17 @@ struct mem_bank_data {
};
/**
+ * struct zynqmp_sram_bank - sram bank description
+ *
+ * @sram_res: sram address region information
+ * @da: device address of sram
+ */
+struct zynqmp_sram_bank {
+ struct resource sram_res;
+ u32 da;
+};
+
+/**
* struct mbox_info
*
* @rx_mc_buf: to copy data from mailbox rx channel
@@ -120,6 +131,8 @@ static const struct mem_bank_data zynqmp_tcm_banks_lockstep[] = {
* struct zynqmp_r5_core
*
* @rsc_tbl_va: resource table virtual address
+ * @sram: Array of sram memories assigned to this core
+ * @num_sram: number of sram for this core
* @dev: device of RPU instance
* @np: device node of RPU instance
* @tcm_bank_count: number TCM banks accessible to this RPU
@@ -131,6 +144,8 @@ static const struct mem_bank_data zynqmp_tcm_banks_lockstep[] = {
*/
struct zynqmp_r5_core {
void __iomem *rsc_tbl_va;
+ struct zynqmp_sram_bank *sram;
+ int num_sram;
struct device *dev;
struct device_node *np;
int tcm_bank_count;
@@ -494,6 +509,45 @@ static int add_mem_regions_carveout(struct rproc *rproc)
return 0;
}
+static int add_sram_carveouts(struct rproc *rproc)
+{
+ struct zynqmp_r5_core *r5_core = rproc->priv;
+ struct rproc_mem_entry *rproc_mem;
+ struct zynqmp_sram_bank *sram;
+ dma_addr_t dma_addr;
+ size_t len;
+ int da, i;
+
+ for (i = 0; i < r5_core->num_sram; i++) {
+ sram = &r5_core->sram[i];
+
+ dma_addr = (dma_addr_t)sram->sram_res.start;
+
+ len = resource_size(&sram->sram_res);
+ da = sram->da;
+
+ rproc_mem = rproc_mem_entry_init(&rproc->dev, NULL,
+ dma_addr,
+ len, da,
+ zynqmp_r5_mem_region_map,
+ zynqmp_r5_mem_region_unmap,
+ sram->sram_res.name);
+ if (!rproc_mem) {
+ dev_err(&rproc->dev, "failed to add sram %s da=0x%x, size=0x%lx",
+ sram->sram_res.name, da, len);
+ return -ENOMEM;
+ }
+
+ rproc_add_carveout(rproc, rproc_mem);
+ rproc_coredump_add_segment(rproc, da, len);
+
+ dev_dbg(&rproc->dev, "sram carveout %s addr=%llx, da=0x%x, size=0x%lx",
+ sram->sram_res.name, dma_addr, da, len);
+ }
+
+ return 0;
+}
+
/*
* tcm_mem_unmap()
* @rproc: single R5 core's corresponding rproc instance
@@ -669,6 +723,12 @@ static int zynqmp_r5_rproc_prepare(struct rproc *rproc)
return ret;
}
+ ret = add_sram_carveouts(rproc);
+ if (ret) {
+ dev_err(&rproc->dev, "failed to get sram carveout %d\n", ret);
+ return ret;
+ }
+
return 0;
}
@@ -881,6 +941,77 @@ free_rproc:
return ERR_PTR(ret);
}
+static int zynqmp_r5_get_sram_banks(struct zynqmp_r5_core *r5_core)
+{
+ struct device_node *np = r5_core->np;
+ struct device *dev = r5_core->dev;
+ struct zynqmp_sram_bank *sram;
+ struct device_node *sram_np;
+ int num_sram, i, ret;
+ u64 abs_addr, size;
+
+ /* "sram" is optional property. Do not fail, if unavailable. */
+ if (!of_property_present(r5_core->np, "sram"))
+ return 0;
+
+ num_sram = of_property_count_elems_of_size(np, "sram", sizeof(phandle));
+ if (num_sram <= 0) {
+ dev_err(dev, "Invalid sram property, ret = %d\n",
+ num_sram);
+ return -EINVAL;
+ }
+
+ sram = devm_kcalloc(dev, num_sram,
+ sizeof(struct zynqmp_sram_bank), GFP_KERNEL);
+ if (!sram)
+ return -ENOMEM;
+
+ for (i = 0; i < num_sram; i++) {
+ sram_np = of_parse_phandle(np, "sram", i);
+ if (!sram_np) {
+ dev_err(dev, "failed to get sram %d phandle\n", i);
+ return -EINVAL;
+ }
+
+ if (!of_device_is_available(sram_np)) {
+ dev_err(dev, "sram device not available\n");
+ ret = -EINVAL;
+ goto fail_sram_get;
+ }
+
+ ret = of_address_to_resource(sram_np, 0, &sram[i].sram_res);
+ if (ret) {
+ dev_err(dev, "addr to res failed\n");
+ goto fail_sram_get;
+ }
+
+ /* Get SRAM device address */
+ ret = of_property_read_reg(sram_np, i, &abs_addr, &size);
+ if (ret) {
+ dev_err(dev, "failed to get reg property\n");
+ goto fail_sram_get;
+ }
+
+ sram[i].da = (u32)abs_addr;
+
+ of_node_put(sram_np);
+
+ dev_dbg(dev, "sram %d: name=%s, addr=0x%llx, da=0x%x, size=0x%llx\n",
+ i, sram[i].sram_res.name, sram[i].sram_res.start,
+ sram[i].da, resource_size(&sram[i].sram_res));
+ }
+
+ r5_core->sram = sram;
+ r5_core->num_sram = num_sram;
+
+ return 0;
+
+fail_sram_get:
+ of_node_put(sram_np);
+
+ return ret;
+}
+
static int zynqmp_r5_get_tcm_node_from_dt(struct zynqmp_r5_cluster *cluster)
{
int i, j, tcm_bank_count, ret, tcm_pd_idx, pd_count;
@@ -1059,7 +1190,7 @@ static int zynqmp_r5_core_init(struct zynqmp_r5_cluster *cluster,
r5_core = cluster->r5_cores[0];
/* Maintain backward compatibility for zynqmp by using hardcode TCM address. */
- if (of_find_property(r5_core->np, "reg", NULL))
+ if (of_property_present(r5_core->np, "reg"))
ret = zynqmp_r5_get_tcm_node_from_dt(cluster);
else if (device_is_compatible(dev, "xlnx,zynqmp-r5fss"))
ret = zynqmp_r5_get_tcm_node(cluster);
@@ -1086,7 +1217,7 @@ static int zynqmp_r5_core_init(struct zynqmp_r5_cluster *cluster,
return ret;
}
- if (of_find_property(dev_of_node(dev), "xlnx,tcm-mode", NULL) ||
+ if (of_property_present(dev_of_node(dev), "xlnx,tcm-mode") ||
device_is_compatible(dev, "xlnx,zynqmp-r5fss")) {
ret = zynqmp_pm_set_tcm_config(r5_core->pm_domain_id,
tcm_mode);
@@ -1095,6 +1226,10 @@ static int zynqmp_r5_core_init(struct zynqmp_r5_cluster *cluster,
return ret;
}
}
+
+ ret = zynqmp_r5_get_sram_banks(r5_core);
+ if (ret)
+ return ret;
}
return 0;
@@ -1147,7 +1282,7 @@ static int zynqmp_r5_cluster_init(struct zynqmp_r5_cluster *cluster)
return -EINVAL;
}
- if (of_find_property(dev_node, "xlnx,tcm-mode", NULL)) {
+ if (of_property_present(dev_node, "xlnx,tcm-mode")) {
ret = of_property_read_u32(dev_node, "xlnx,tcm-mode", (u32 *)&tcm_mode);
if (ret)
return ret;
diff --git a/drivers/rpmsg/Makefile b/drivers/rpmsg/Makefile
index 58e3b382e316..1e02b58ff61f 100644
--- a/drivers/rpmsg/Makefile
+++ b/drivers/rpmsg/Makefile
@@ -4,6 +4,7 @@ obj-$(CONFIG_RPMSG_CHAR) += rpmsg_char.o
obj-$(CONFIG_RPMSG_CTRL) += rpmsg_ctrl.o
obj-$(CONFIG_RPMSG_NS) += rpmsg_ns.o
obj-$(CONFIG_RPMSG_MTK_SCP) += mtk_rpmsg.o
+CFLAGS_qcom_glink_native.o := -I$(src)
qcom_glink-objs := qcom_glink_native.o qcom_glink_ssr.o
obj-$(CONFIG_RPMSG_QCOM_GLINK) += qcom_glink.o
obj-$(CONFIG_RPMSG_QCOM_GLINK_RPM) += qcom_glink_rpm.o
diff --git a/drivers/rpmsg/qcom_glink_native.c b/drivers/rpmsg/qcom_glink_native.c
index 82d460ff4777..0b2f29006908 100644
--- a/drivers/rpmsg/qcom_glink_native.c
+++ b/drivers/rpmsg/qcom_glink_native.c
@@ -23,6 +23,9 @@
#include "rpmsg_internal.h"
#include "qcom_glink_native.h"
+#define CREATE_TRACE_POINTS
+#include "qcom_glink_trace.h"
+
#define GLINK_NAME_SIZE 32
#define GLINK_VERSION_1 1
@@ -30,11 +33,16 @@
#define RPM_GLINK_CID_MAX 65536
struct glink_msg {
- __le16 cmd;
- __le16 param1;
- __le32 param2;
+ /* New members MUST be added within the __struct_group() macro below. */
+ __struct_group(glink_msg_hdr, hdr, __packed,
+ __le16 cmd;
+ __le16 param1;
+ __le32 param2;
+ );
u8 data[];
} __packed;
+static_assert(offsetof(struct glink_msg, data) == sizeof(struct glink_msg_hdr),
+ "struct member likely outside of __struct_group()");
/**
* struct glink_defer_cmd - deferred incoming control message
@@ -48,7 +56,7 @@ struct glink_msg {
struct glink_defer_cmd {
struct list_head node;
- struct glink_msg msg;
+ struct glink_msg_hdr msg;
u8 data[];
};
@@ -78,6 +86,7 @@ struct glink_core_rx_intent {
/**
* struct qcom_glink - driver context, relates to one remote subsystem
* @dev: reference to the associated struct device
+ * @label: identifier of the glink edge
* @rx_pipe: pipe object for receive FIFO
* @tx_pipe: pipe object for transmit FIFO
* @rx_work: worker for handling received control messages
@@ -96,6 +105,8 @@ struct glink_core_rx_intent {
struct qcom_glink {
struct device *dev;
+ const char *label;
+
struct qcom_glink_pipe *rx_pipe;
struct qcom_glink_pipe *tx_pipe;
@@ -392,6 +403,8 @@ static int qcom_glink_send_version(struct qcom_glink *glink)
msg.param1 = cpu_to_le16(GLINK_VERSION_1);
msg.param2 = cpu_to_le32(glink->features);
+ trace_qcom_glink_cmd_version_tx(glink->label, GLINK_VERSION_1, glink->features);
+
return qcom_glink_tx(glink, &msg, sizeof(msg), NULL, 0, true);
}
@@ -403,6 +416,8 @@ static void qcom_glink_send_version_ack(struct qcom_glink *glink)
msg.param1 = cpu_to_le16(GLINK_VERSION_1);
msg.param2 = cpu_to_le32(glink->features);
+ trace_qcom_glink_cmd_version_ack_tx(glink->label, msg.param1, msg.param2);
+
qcom_glink_tx(glink, &msg, sizeof(msg), NULL, 0, true);
}
@@ -415,6 +430,9 @@ static void qcom_glink_send_open_ack(struct qcom_glink *glink,
msg.param1 = cpu_to_le16(channel->rcid);
msg.param2 = cpu_to_le32(0);
+ trace_qcom_glink_cmd_open_ack_tx(glink->label, channel->name,
+ channel->lcid, channel->rcid);
+
qcom_glink_tx(glink, &msg, sizeof(msg), NULL, 0, true);
}
@@ -424,9 +442,16 @@ static void qcom_glink_handle_intent_req_ack(struct qcom_glink *glink,
struct glink_channel *channel;
unsigned long flags;
+ qcom_glink_rx_advance(glink, ALIGN(sizeof(struct glink_msg), 8));
+
spin_lock_irqsave(&glink->idr_lock, flags);
channel = idr_find(&glink->rcids, cid);
spin_unlock_irqrestore(&glink->idr_lock, flags);
+
+ trace_qcom_glink_cmd_rx_intent_req_ack_rx(glink->label,
+ channel ? channel->name : NULL,
+ channel ? channel->lcid : 0,
+ cid, granted);
if (!channel) {
dev_err(glink->dev, "unable to find channel\n");
return;
@@ -455,12 +480,9 @@ static void qcom_glink_intent_req_abort(struct glink_channel *channel)
static int qcom_glink_send_open_req(struct qcom_glink *glink,
struct glink_channel *channel)
{
- struct {
- struct glink_msg msg;
- u8 name[GLINK_NAME_SIZE];
- } __packed req;
+ DEFINE_RAW_FLEX(struct glink_msg, req, data, GLINK_NAME_SIZE);
int name_len = strlen(channel->name) + 1;
- int req_len = ALIGN(sizeof(req.msg) + name_len, 8);
+ int req_len = ALIGN(sizeof(*req) + name_len, 8);
int ret;
unsigned long flags;
@@ -476,12 +498,15 @@ static int qcom_glink_send_open_req(struct qcom_glink *glink,
channel->lcid = ret;
- req.msg.cmd = cpu_to_le16(GLINK_CMD_OPEN);
- req.msg.param1 = cpu_to_le16(channel->lcid);
- req.msg.param2 = cpu_to_le32(name_len);
- strcpy(req.name, channel->name);
+ req->cmd = cpu_to_le16(GLINK_CMD_OPEN);
+ req->param1 = cpu_to_le16(channel->lcid);
+ req->param2 = cpu_to_le32(name_len);
+ strcpy(req->data, channel->name);
+
+ trace_qcom_glink_cmd_open_tx(glink->label, channel->name,
+ channel->lcid, channel->rcid);
- ret = qcom_glink_tx(glink, &req, req_len, NULL, 0, true);
+ ret = qcom_glink_tx(glink, req, req_len, NULL, 0, true);
if (ret)
goto remove_idr;
@@ -505,18 +530,24 @@ static void qcom_glink_send_close_req(struct qcom_glink *glink,
req.param1 = cpu_to_le16(channel->lcid);
req.param2 = 0;
+ trace_qcom_glink_cmd_close_tx(glink->label, channel->name,
+ channel->lcid, channel->rcid);
+
qcom_glink_tx(glink, &req, sizeof(req), NULL, 0, true);
}
static void qcom_glink_send_close_ack(struct qcom_glink *glink,
- unsigned int rcid)
+ struct glink_channel *channel)
{
struct glink_msg req;
req.cmd = cpu_to_le16(GLINK_CMD_CLOSE_ACK);
- req.param1 = cpu_to_le16(rcid);
+ req.param1 = cpu_to_le16(channel->rcid);
req.param2 = 0;
+ trace_qcom_glink_cmd_close_ack_tx(glink->label, channel->name,
+ channel->lcid, channel->rcid);
+
qcom_glink_tx(glink, &req, sizeof(req), NULL, 0, true);
}
@@ -548,6 +579,9 @@ static void qcom_glink_rx_done_work(struct work_struct *work)
cmd.lcid = cid;
cmd.liid = iid;
+ trace_qcom_glink_cmd_rx_done_tx(glink->label, channel->name,
+ channel->lcid, channel->rcid, cmd.liid, reuse);
+
qcom_glink_tx(glink, &cmd, sizeof(cmd), NULL, 0, true);
if (!reuse) {
kfree(intent->data);
@@ -598,6 +632,8 @@ static void qcom_glink_receive_version(struct qcom_glink *glink,
u32 version,
u32 features)
{
+ trace_qcom_glink_cmd_version_rx(glink->label, version, features);
+
switch (version) {
case 0:
break;
@@ -625,6 +661,8 @@ static void qcom_glink_receive_version_ack(struct qcom_glink *glink,
u32 version,
u32 features)
{
+ trace_qcom_glink_cmd_version_ack_rx(glink->label, version, features);
+
switch (version) {
case 0:
/* Version negotiation failed */
@@ -656,6 +694,10 @@ static int qcom_glink_send_intent_req_ack(struct qcom_glink *glink,
{
struct glink_msg msg;
+ trace_qcom_glink_cmd_rx_intent_req_ack_tx(glink->label, channel->name,
+ channel->lcid, channel->rcid,
+ granted);
+
msg.cmd = cpu_to_le16(GLINK_CMD_RX_INTENT_REQ_ACK);
msg.param1 = cpu_to_le16(channel->lcid);
msg.param2 = cpu_to_le32(granted);
@@ -693,6 +735,10 @@ static int qcom_glink_advertise_intent(struct qcom_glink *glink,
cmd.size = cpu_to_le32(intent->size);
cmd.liid = cpu_to_le32(intent->id);
+ trace_qcom_glink_cmd_intent_tx(glink->label, channel->name,
+ channel->lcid, channel->rcid,
+ cmd.count, cmd.size, cmd.liid);
+
qcom_glink_tx(glink, &cmd, sizeof(cmd), NULL, 0, true);
return 0;
@@ -745,9 +791,14 @@ static void qcom_glink_handle_rx_done(struct qcom_glink *glink,
struct glink_channel *channel;
unsigned long flags;
+ qcom_glink_rx_advance(glink, ALIGN(sizeof(struct glink_msg), 8));
+
spin_lock_irqsave(&glink->idr_lock, flags);
channel = idr_find(&glink->rcids, cid);
spin_unlock_irqrestore(&glink->idr_lock, flags);
+
+ trace_qcom_glink_cmd_rx_done_rx(glink->label, channel ? channel->name : NULL,
+ channel ? channel->lcid : 0, cid, iid, reuse);
if (!channel) {
dev_err(glink->dev, "invalid channel id received\n");
return;
@@ -797,6 +848,10 @@ static void qcom_glink_handle_intent_req(struct qcom_glink *glink,
channel = idr_find(&glink->rcids, cid);
spin_unlock_irqrestore(&glink->idr_lock, flags);
+ trace_qcom_glink_cmd_rx_intent_req_rx(glink->label,
+ channel ? channel->name : NULL,
+ channel ? channel->lcid : 0,
+ cid, size);
if (!channel) {
pr_err("%s channel not found for cid %d\n", __func__, cid);
return;
@@ -826,7 +881,9 @@ static int qcom_glink_rx_defer(struct qcom_glink *glink, size_t extra)
INIT_LIST_HEAD(&dcmd->node);
- qcom_glink_rx_peek(glink, &dcmd->msg, 0, sizeof(dcmd->msg) + extra);
+ qcom_glink_rx_peek(glink,
+ container_of(&dcmd->msg, struct glink_msg, hdr), 0,
+ sizeof(dcmd->msg) + extra);
spin_lock(&glink->rx_lock);
list_add_tail(&dcmd->node, &glink->rx_queue);
@@ -843,7 +900,7 @@ static int qcom_glink_rx_data(struct qcom_glink *glink, size_t avail)
struct glink_core_rx_intent *intent;
struct glink_channel *channel;
struct {
- struct glink_msg msg;
+ struct glink_msg_hdr msg;
__le32 chunk_size;
__le32 left_size;
} __packed hdr;
@@ -869,9 +926,15 @@ static int qcom_glink_rx_data(struct qcom_glink *glink, size_t avail)
}
rcid = le16_to_cpu(hdr.msg.param1);
+ liid = le32_to_cpu(hdr.msg.param2);
spin_lock_irqsave(&glink->idr_lock, flags);
channel = idr_find(&glink->rcids, rcid);
spin_unlock_irqrestore(&glink->idr_lock, flags);
+
+ trace_qcom_glink_cmd_tx_data_rx(glink->label, channel ? channel->name : NULL,
+ channel ? channel->lcid : 0, rcid,
+ liid, chunk_size, left_size,
+ hdr.msg.cmd == GLINK_CMD_TX_DATA_CONT);
if (!channel) {
dev_dbg(glink->dev, "Data on non-existing channel\n");
@@ -902,8 +965,6 @@ static int qcom_glink_rx_data(struct qcom_glink *glink, size_t avail)
intent = channel->buf;
}
} else {
- liid = le32_to_cpu(hdr.msg.param2);
-
spin_lock_irqsave(&channel->intent_lock, flags);
intent = idr_find(&channel->liids, liid);
spin_unlock_irqrestore(&channel->intent_lock, flags);
@@ -952,6 +1013,14 @@ advance_rx:
return ret;
}
+static void qcom_glink_rx_read_notif(struct qcom_glink *glink)
+{
+ trace_qcom_glink_cmd_read_notif_rx(glink->label);
+
+ qcom_glink_rx_advance(glink, ALIGN(sizeof(struct glink_msg), 8));
+ qcom_glink_tx_kick(glink);
+}
+
static void qcom_glink_handle_intent(struct qcom_glink *glink,
unsigned int cid,
unsigned int count,
@@ -965,7 +1034,7 @@ static void qcom_glink_handle_intent(struct qcom_glink *glink,
};
struct {
- struct glink_msg msg;
+ struct glink_msg_hdr msg;
struct intent_pair intents[];
} __packed * msg;
@@ -983,6 +1052,7 @@ static void qcom_glink_handle_intent(struct qcom_glink *glink,
channel = idr_find(&glink->rcids, cid);
spin_unlock_irqrestore(&glink->idr_lock, flags);
if (!channel) {
+ trace_qcom_glink_cmd_intent_rx(glink->label, NULL, 0, cid, count, 0, 0);
dev_err(glink->dev, "intents for non-existing channel\n");
qcom_glink_rx_advance(glink, ALIGN(msglen, 8));
return;
@@ -994,6 +1064,11 @@ static void qcom_glink_handle_intent(struct qcom_glink *glink,
qcom_glink_rx_peek(glink, msg, 0, msglen);
+ trace_qcom_glink_cmd_intent_rx(glink->label, channel->name,
+ channel->lcid, cid, count,
+ count > 0 ? msg->intents[0].size : 0,
+ count > 0 ? msg->intents[0].iid : 0);
+
for (i = 0; i < count; ++i) {
intent = kzalloc(sizeof(*intent), GFP_ATOMIC);
if (!intent)
@@ -1022,9 +1097,14 @@ static int qcom_glink_rx_open_ack(struct qcom_glink *glink, unsigned int lcid)
{
struct glink_channel *channel;
+ qcom_glink_rx_advance(glink, ALIGN(sizeof(struct glink_msg), 8));
+
spin_lock(&glink->idr_lock);
channel = idr_find(&glink->lcids, lcid);
spin_unlock(&glink->idr_lock);
+
+ trace_qcom_glink_cmd_open_ack_rx(glink->label, channel ? channel->name : NULL,
+ lcid, channel ? channel->rcid : 0);
if (!channel) {
dev_err(glink->dev, "Invalid open ack packet\n");
return -EINVAL;
@@ -1057,6 +1137,9 @@ static int qcom_glink_set_flow_control(struct rpmsg_endpoint *ept, bool pause, u
msg.param1 = cpu_to_le16(channel->lcid);
msg.param2 = cpu_to_le32(sigs);
+ trace_qcom_glink_cmd_signal_tx(glink->label, channel->name,
+ channel->lcid, channel->rcid, sigs);
+
return qcom_glink_tx(glink, &msg, sizeof(msg), NULL, 0, true);
}
@@ -1067,9 +1150,14 @@ static void qcom_glink_handle_signals(struct qcom_glink *glink,
unsigned long flags;
bool enable;
+ qcom_glink_rx_advance(glink, ALIGN(sizeof(struct glink_msg), 8));
+
spin_lock_irqsave(&glink->idr_lock, flags);
channel = idr_find(&glink->rcids, rcid);
spin_unlock_irqrestore(&glink->idr_lock, flags);
+
+ trace_qcom_glink_cmd_signal_rx(glink->label, channel ? channel->name : NULL,
+ channel ? channel->lcid : 0, rcid, sigs);
if (!channel) {
dev_err(glink->dev, "signal for non-existing channel\n");
return;
@@ -1114,7 +1202,6 @@ void qcom_glink_native_rx(struct qcom_glink *glink)
break;
case GLINK_CMD_OPEN_ACK:
ret = qcom_glink_rx_open_ack(glink, param1);
- qcom_glink_rx_advance(glink, ALIGN(sizeof(msg), 8));
break;
case GLINK_CMD_OPEN:
ret = qcom_glink_rx_defer(glink, param2);
@@ -1124,27 +1211,22 @@ void qcom_glink_native_rx(struct qcom_glink *glink)
ret = qcom_glink_rx_data(glink, avail);
break;
case GLINK_CMD_READ_NOTIF:
- qcom_glink_rx_advance(glink, ALIGN(sizeof(msg), 8));
- qcom_glink_tx_kick(glink);
+ qcom_glink_rx_read_notif(glink);
break;
case GLINK_CMD_INTENT:
qcom_glink_handle_intent(glink, param1, param2, avail);
break;
case GLINK_CMD_RX_DONE:
qcom_glink_handle_rx_done(glink, param1, param2, false);
- qcom_glink_rx_advance(glink, ALIGN(sizeof(msg), 8));
break;
case GLINK_CMD_RX_DONE_W_REUSE:
qcom_glink_handle_rx_done(glink, param1, param2, true);
- qcom_glink_rx_advance(glink, ALIGN(sizeof(msg), 8));
break;
case GLINK_CMD_RX_INTENT_REQ_ACK:
qcom_glink_handle_intent_req_ack(glink, param1, param2);
- qcom_glink_rx_advance(glink, ALIGN(sizeof(msg), 8));
break;
case GLINK_CMD_SIGNALS:
qcom_glink_handle_signals(glink, param1, param2);
- qcom_glink_rx_advance(glink, ALIGN(sizeof(msg), 8));
break;
default:
dev_err(glink->dev, "unhandled rx cmd: %d\n", cmd);
@@ -1349,6 +1431,10 @@ static int qcom_glink_request_intent(struct qcom_glink *glink,
cmd.cid = channel->lcid;
cmd.size = size;
+ trace_qcom_glink_cmd_rx_intent_req_tx(glink->label, channel->name,
+ channel->lcid, channel->rcid,
+ cmd.size);
+
ret = qcom_glink_tx(glink, &cmd, sizeof(cmd), NULL, 0, true);
if (ret)
goto unlock;
@@ -1377,7 +1463,7 @@ static int __qcom_glink_send(struct glink_channel *channel,
struct glink_core_rx_intent *tmp;
int iid = 0;
struct {
- struct glink_msg msg;
+ struct glink_msg_hdr msg;
__le32 chunk_size;
__le32 left_size;
} __packed req;
@@ -1429,6 +1515,12 @@ static int __qcom_glink_send(struct glink_channel *channel,
req.chunk_size = cpu_to_le32(chunk_size);
req.left_size = cpu_to_le32(len - offset - chunk_size);
+ trace_qcom_glink_cmd_tx_data_tx(glink->label, channel->name,
+ channel->lcid, channel->rcid,
+ iid, chunk_size,
+ len - offset - chunk_size,
+ offset > 0);
+
ret = qcom_glink_tx(glink, &req, sizeof(req), data + offset, chunk_size, wait);
if (ret) {
/* Mark intent available if we failed */
@@ -1544,6 +1636,8 @@ static int qcom_glink_rx_open(struct qcom_glink *glink, unsigned int rcid,
create_device = true;
}
+ trace_qcom_glink_cmd_open_rx(glink->label, name, channel->lcid, rcid);
+
spin_lock_irqsave(&glink->idr_lock, flags);
ret = idr_alloc(&glink->rcids, channel, rcid, rcid + 1, GFP_ATOMIC);
if (ret < 0) {
@@ -1605,6 +1699,9 @@ static void qcom_glink_rx_close(struct qcom_glink *glink, unsigned int rcid)
spin_lock_irqsave(&glink->idr_lock, flags);
channel = idr_find(&glink->rcids, rcid);
spin_unlock_irqrestore(&glink->idr_lock, flags);
+
+ trace_qcom_glink_cmd_close_rx(glink->label, channel ? channel->name : NULL,
+ channel ? channel->lcid : 0, rcid);
if (WARN(!channel, "close request on unknown channel\n"))
return;
@@ -1620,7 +1717,7 @@ static void qcom_glink_rx_close(struct qcom_glink *glink, unsigned int rcid)
}
channel->rpdev = NULL;
- qcom_glink_send_close_ack(glink, channel->rcid);
+ qcom_glink_send_close_ack(glink, channel);
spin_lock_irqsave(&glink->idr_lock, flags);
idr_remove(&glink->rcids, channel->rcid);
@@ -1641,6 +1738,9 @@ static void qcom_glink_rx_close_ack(struct qcom_glink *glink, unsigned int lcid)
spin_lock_irqsave(&glink->idr_lock, flags);
channel = idr_find(&glink->lcids, lcid);
+
+ trace_qcom_glink_cmd_close_ack_rx(glink->label, channel ? channel->name : NULL,
+ lcid, channel ? channel->rcid : 0);
if (WARN(!channel, "close ack on unknown channel\n")) {
spin_unlock_irqrestore(&glink->idr_lock, flags);
return;
@@ -1685,7 +1785,7 @@ static void qcom_glink_work(struct work_struct *work)
list_del(&dcmd->node);
spin_unlock_irqrestore(&glink->rx_lock, flags);
- msg = &dcmd->msg;
+ msg = container_of(&dcmd->msg, struct glink_msg, hdr);
cmd = le16_to_cpu(msg->cmd);
param1 = le16_to_cpu(msg->param1);
param2 = le32_to_cpu(msg->param2);
@@ -1815,6 +1915,10 @@ struct qcom_glink *qcom_glink_native_probe(struct device *dev,
idr_init(&glink->lcids);
idr_init(&glink->rcids);
+ ret = of_property_read_string(dev->of_node, "label", &glink->label);
+ if (ret < 0)
+ glink->label = dev->of_node->name;
+
glink->dev->groups = qcom_glink_groups;
ret = device_add_groups(dev, qcom_glink_groups);
diff --git a/drivers/rpmsg/qcom_glink_trace.h b/drivers/rpmsg/qcom_glink_trace.h
new file mode 100644
index 000000000000..668bdea1d78f
--- /dev/null
+++ b/drivers/rpmsg/qcom_glink_trace.h
@@ -0,0 +1,406 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM qcom_glink
+
+#if !defined(__QCOM_GLINK_TRACE_H__) || defined(TRACE_HEADER_MULTI_READ)
+#define __QCOM_GLINK_TRACE_H__
+
+#include <linux/tracepoint.h>
+#include "qcom_glink_native.h"
+
+
+TRACE_EVENT(qcom_glink_cmd_version,
+ TP_PROTO(const char *remote, unsigned int version, unsigned int features, bool tx),
+ TP_ARGS(remote, version, features, tx),
+ TP_STRUCT__entry(
+ __string(remote, remote)
+ __field(u32, version)
+ __field(u32, features)
+ __field(bool, tx)
+ ),
+ TP_fast_assign(
+ __assign_str(remote);
+ __entry->version = version;
+ __entry->features = features;
+ __entry->tx = tx;
+ ),
+ TP_printk("%s remote: %s version: %u features: %#x",
+ __entry->tx ? "tx" : "rx",
+ __get_str(remote),
+ __entry->version,
+ __entry->features
+ )
+);
+#define trace_qcom_glink_cmd_version_tx(...) trace_qcom_glink_cmd_version(__VA_ARGS__, true)
+#define trace_qcom_glink_cmd_version_rx(...) trace_qcom_glink_cmd_version(__VA_ARGS__, false)
+
+TRACE_EVENT(qcom_glink_cmd_version_ack,
+ TP_PROTO(const char *remote, unsigned int version, unsigned int features, bool tx),
+ TP_ARGS(remote, version, features, tx),
+ TP_STRUCT__entry(
+ __string(remote, remote)
+ __field(u32, version)
+ __field(u32, features)
+ __field(bool, tx)
+ ),
+ TP_fast_assign(
+ __assign_str(remote);
+ __entry->version = version;
+ __entry->features = features;
+ __entry->tx = tx;
+ ),
+ TP_printk("%s remote: %s version: %u features: %#x",
+ __entry->tx ? "tx" : "rx",
+ __get_str(remote),
+ __entry->version,
+ __entry->features
+ )
+);
+#define trace_qcom_glink_cmd_version_ack_tx(...) trace_qcom_glink_cmd_version_ack(__VA_ARGS__, true)
+#define trace_qcom_glink_cmd_version_ack_rx(...) trace_qcom_glink_cmd_version_ack(__VA_ARGS__, false)
+
+TRACE_EVENT(qcom_glink_cmd_open,
+ TP_PROTO(const char *remote, const char *channel, u16 lcid, u16 rcid, bool tx),
+ TP_ARGS(remote, channel, lcid, rcid, tx),
+ TP_STRUCT__entry(
+ __string(remote, remote)
+ __string(channel, channel)
+ __field(u16, lcid)
+ __field(u16, rcid)
+ __field(bool, tx)
+ ),
+ TP_fast_assign(
+ __assign_str(remote);
+ __assign_str(channel);
+ __entry->lcid = lcid;
+ __entry->rcid = rcid;
+ __entry->tx = tx;
+ ),
+ TP_printk("%s remote: %s channel: %s[%u/%u]",
+ __entry->tx ? "tx" : "rx",
+ __get_str(remote),
+ __get_str(channel),
+ __entry->lcid,
+ __entry->rcid
+ )
+);
+#define trace_qcom_glink_cmd_open_tx(...) trace_qcom_glink_cmd_open(__VA_ARGS__, true)
+#define trace_qcom_glink_cmd_open_rx(...) trace_qcom_glink_cmd_open(__VA_ARGS__, false)
+
+TRACE_EVENT(qcom_glink_cmd_close,
+ TP_PROTO(const char *remote, const char *channel, u16 lcid, u16 rcid, bool tx),
+ TP_ARGS(remote, channel, lcid, rcid, tx),
+ TP_STRUCT__entry(
+ __string(remote, remote)
+ __string(channel, channel)
+ __field(u16, lcid)
+ __field(u16, rcid)
+ __field(bool, tx)
+ ),
+ TP_fast_assign(
+ __assign_str(remote);
+ __assign_str(channel);
+ __entry->lcid = lcid;
+ __entry->rcid = rcid;
+ __entry->tx = tx;
+ ),
+ TP_printk("%s remote: %s channel: %s[%u/%u]",
+ __entry->tx ? "tx" : "rx",
+ __get_str(remote),
+ __get_str(channel),
+ __entry->lcid,
+ __entry->rcid
+ )
+);
+#define trace_qcom_glink_cmd_close_tx(...) trace_qcom_glink_cmd_close(__VA_ARGS__, true)
+#define trace_qcom_glink_cmd_close_rx(...) trace_qcom_glink_cmd_close(__VA_ARGS__, false)
+
+TRACE_EVENT(qcom_glink_cmd_open_ack,
+ TP_PROTO(const char *remote, const char *channel, u16 lcid, u16 rcid, bool tx),
+ TP_ARGS(remote, channel, lcid, rcid, tx),
+ TP_STRUCT__entry(
+ __string(remote, remote)
+ __string(channel, channel)
+ __field(u16, lcid)
+ __field(u16, rcid)
+ __field(bool, tx)
+ ),
+ TP_fast_assign(
+ __assign_str(remote);
+ __assign_str(channel);
+ __entry->lcid = lcid;
+ __entry->rcid = rcid;
+ __entry->tx = tx;
+ ),
+ TP_printk("%s remote: %s channel: %s[%u/%u]",
+ __entry->tx ? "tx" : "rx",
+ __get_str(remote),
+ __get_str(channel),
+ __entry->lcid,
+ __entry->rcid
+ )
+);
+#define trace_qcom_glink_cmd_open_ack_tx(...) trace_qcom_glink_cmd_open_ack(__VA_ARGS__, true)
+#define trace_qcom_glink_cmd_open_ack_rx(...) trace_qcom_glink_cmd_open_ack(__VA_ARGS__, false)
+
+TRACE_EVENT(qcom_glink_cmd_intent,
+ TP_PROTO(const char *remote, const char *channel, u16 lcid, u16 rcid, size_t count, size_t size, u32 liid, bool tx),
+ TP_ARGS(remote, channel, lcid, rcid, count, size, liid, tx),
+ TP_STRUCT__entry(
+ __string(remote, remote)
+ __string(channel, channel)
+ __field(u16, lcid)
+ __field(u16, rcid)
+ __field(u32, count)
+ __field(u32, size)
+ __field(u32, liid)
+ __field(bool, tx)
+ ),
+ TP_fast_assign(
+ __assign_str(remote);
+ __assign_str(channel);
+ __entry->lcid = lcid;
+ __entry->rcid = rcid;
+ __entry->count = count;
+ __entry->size = size;
+ __entry->liid = liid;
+ __entry->tx = tx;
+ ),
+ TP_printk("%s remote: %s channel: %s[%u/%u] count: %d [size: %d liid: %d]",
+ __entry->tx ? "tx" : "rx",
+ __get_str(remote),
+ __get_str(channel),
+ __entry->lcid,
+ __entry->rcid,
+ __entry->count,
+ __entry->size,
+ __entry->liid
+ )
+);
+#define trace_qcom_glink_cmd_intent_tx(...) trace_qcom_glink_cmd_intent(__VA_ARGS__, true)
+#define trace_qcom_glink_cmd_intent_rx(...) trace_qcom_glink_cmd_intent(__VA_ARGS__, false)
+
+TRACE_EVENT(qcom_glink_cmd_rx_done,
+ TP_PROTO(const char *remote, const char *channel, u16 lcid, u16 rcid, u32 iid, bool reuse, bool tx),
+ TP_ARGS(remote, channel, lcid, rcid, iid, reuse, tx),
+ TP_STRUCT__entry(
+ __string(remote, remote)
+ __string(channel, channel)
+ __field(u16, lcid)
+ __field(u16, rcid)
+ __field(u32, iid)
+ __field(bool, reuse)
+ __field(bool, tx)
+ ),
+ TP_fast_assign(
+ __assign_str(remote);
+ __assign_str(channel);
+ __entry->lcid = lcid;
+ __entry->rcid = rcid;
+ __entry->iid = iid;
+ __entry->reuse = reuse;
+ __entry->tx = tx;
+ ),
+ TP_printk("%s remote: %s channel: %s[%u/%u] iid: %d reuse: %d",
+ __entry->tx ? "tx" : "rx",
+ __get_str(remote),
+ __get_str(channel),
+ __entry->lcid,
+ __entry->rcid,
+ __entry->iid,
+ __entry->reuse
+ )
+);
+#define trace_qcom_glink_cmd_rx_done_tx(...) trace_qcom_glink_cmd_rx_done(__VA_ARGS__, true)
+#define trace_qcom_glink_cmd_rx_done_rx(...) trace_qcom_glink_cmd_rx_done(__VA_ARGS__, false)
+
+TRACE_EVENT(qcom_glink_cmd_rx_intent_req,
+ TP_PROTO(const char *remote, const char *channel, u16 lcid, u16 rcid, size_t size, bool tx),
+ TP_ARGS(remote, channel, lcid, rcid, size, tx),
+ TP_STRUCT__entry(
+ __string(remote, remote)
+ __string(channel, channel)
+ __field(u16, lcid)
+ __field(u16, rcid)
+ __field(u32, size)
+ __field(bool, tx)
+ ),
+ TP_fast_assign(
+ __assign_str(remote);
+ __assign_str(channel);
+ __entry->lcid = lcid;
+ __entry->rcid = rcid;
+ __entry->size = size;
+ __entry->tx = tx;
+ ),
+ TP_printk("%s remote: %s channel: %s[%u/%u] size: %d",
+ __entry->tx ? "tx" : "rx",
+ __get_str(remote),
+ __get_str(channel),
+ __entry->lcid,
+ __entry->rcid,
+ __entry->size
+ )
+);
+#define trace_qcom_glink_cmd_rx_intent_req_tx(...) trace_qcom_glink_cmd_rx_intent_req(__VA_ARGS__, true)
+#define trace_qcom_glink_cmd_rx_intent_req_rx(...) trace_qcom_glink_cmd_rx_intent_req(__VA_ARGS__, false)
+
+TRACE_EVENT(qcom_glink_cmd_rx_intent_req_ack,
+ TP_PROTO(const char *remote, const char *channel, u16 lcid, u16 rcid, bool granted, bool tx),
+ TP_ARGS(remote, channel, lcid, rcid, granted, tx),
+ TP_STRUCT__entry(
+ __string(remote, remote)
+ __string(channel, channel)
+ __field(u16, lcid)
+ __field(u16, rcid)
+ __field(bool, granted)
+ __field(bool, tx)
+ ),
+ TP_fast_assign(
+ __assign_str(remote);
+ __assign_str(channel);
+ __entry->lcid = lcid;
+ __entry->rcid = rcid;
+ __entry->granted = granted;
+ __entry->tx = tx;
+ ),
+ TP_printk("%s remote: %s channel: %s[%u/%u] granted: %d",
+ __entry->tx ? "tx" : "rx",
+ __get_str(remote),
+ __get_str(channel),
+ __entry->lcid,
+ __entry->rcid,
+ __entry->granted
+ )
+);
+#define trace_qcom_glink_cmd_rx_intent_req_ack_tx(...) trace_qcom_glink_cmd_rx_intent_req_ack(__VA_ARGS__, true)
+#define trace_qcom_glink_cmd_rx_intent_req_ack_rx(...) trace_qcom_glink_cmd_rx_intent_req_ack(__VA_ARGS__, false)
+
+TRACE_EVENT(qcom_glink_cmd_tx_data,
+ TP_PROTO(const char *remote, const char *channel, u16 lcid, u16 rcid, u32 iid, u32 chunk_size, u32 left_size, bool cont, bool tx),
+ TP_ARGS(remote, channel, lcid, rcid, iid, chunk_size, left_size, cont, tx),
+ TP_STRUCT__entry(
+ __string(remote, remote)
+ __string(channel, channel)
+ __field(u16, lcid)
+ __field(u16, rcid)
+ __field(u32, iid)
+ __field(u32, chunk_size)
+ __field(u32, left_size)
+ __field(bool, cont)
+ __field(bool, tx)
+ ),
+ TP_fast_assign(
+ __assign_str(remote);
+ __assign_str(channel);
+ __entry->lcid = lcid;
+ __entry->rcid = rcid;
+ __entry->iid = iid;
+ __entry->chunk_size = chunk_size;
+ __entry->left_size = left_size;
+ __entry->cont = cont;
+ __entry->tx = tx;
+ ),
+ TP_printk("%s remote: %s channel: %s[%u/%u] iid: %d chunk_size: %d left_size: %d cont: %d",
+ __entry->tx ? "tx" : "rx",
+ __get_str(remote),
+ __get_str(channel),
+ __entry->lcid,
+ __entry->rcid,
+ __entry->iid,
+ __entry->chunk_size,
+ __entry->left_size,
+ __entry->cont
+ )
+);
+#define trace_qcom_glink_cmd_tx_data_tx(...) trace_qcom_glink_cmd_tx_data(__VA_ARGS__, true)
+#define trace_qcom_glink_cmd_tx_data_rx(...) trace_qcom_glink_cmd_tx_data(__VA_ARGS__, false)
+
+TRACE_EVENT(qcom_glink_cmd_close_ack,
+ TP_PROTO(const char *remote, const char *channel, u16 lcid, u16 rcid, bool tx),
+ TP_ARGS(remote, channel, lcid, rcid, tx),
+ TP_STRUCT__entry(
+ __string(remote, remote)
+ __string(channel, channel)
+ __field(u16, lcid)
+ __field(u16, rcid)
+ __field(bool, tx)
+ ),
+ TP_fast_assign(
+ __assign_str(remote);
+ __assign_str(channel);
+ __entry->lcid = lcid;
+ __entry->rcid = rcid;
+ __entry->tx = tx;
+ ),
+ TP_printk("%s remote: %s channel: %s[%u/%u]",
+ __entry->tx ? "tx" : "rx",
+ __get_str(remote),
+ __get_str(channel),
+ __entry->lcid,
+ __entry->rcid
+ )
+);
+#define trace_qcom_glink_cmd_close_ack_tx(...) trace_qcom_glink_cmd_close_ack(__VA_ARGS__, true)
+#define trace_qcom_glink_cmd_close_ack_rx(...) trace_qcom_glink_cmd_close_ack(__VA_ARGS__, false)
+
+TRACE_EVENT(qcom_glink_cmd_read_notif,
+ TP_PROTO(const char *remote, bool tx),
+ TP_ARGS(remote, tx),
+ TP_STRUCT__entry(
+ __string(remote, remote)
+ __field(bool, tx)
+ ),
+ TP_fast_assign(
+ __assign_str(remote);
+ __entry->tx = tx;
+ ),
+ TP_printk("%s remote: %s",
+ __entry->tx ? "tx" : "rx",
+ __get_str(remote)
+ )
+);
+#define trace_qcom_glink_cmd_read_notif_tx(...) trace_qcom_glink_cmd_read_notif(__VA_ARGS__, true)
+#define trace_qcom_glink_cmd_read_notif_rx(...) trace_qcom_glink_cmd_read_notif(__VA_ARGS__, false)
+
+TRACE_EVENT(qcom_glink_cmd_signal,
+ TP_PROTO(const char *remote, const char *channel, u16 lcid, u16 rcid, unsigned int signals, bool tx),
+ TP_ARGS(remote, channel, lcid, rcid, signals, tx),
+ TP_STRUCT__entry(
+ __string(remote, remote)
+ __string(channel, channel)
+ __field(u16, lcid)
+ __field(u16, rcid)
+ __field(u32, signals)
+ __field(bool, tx)
+ ),
+ TP_fast_assign(
+ __assign_str(remote);
+ __assign_str(channel);
+ __entry->lcid = lcid;
+ __entry->rcid = rcid;
+ __entry->signals = signals;
+ __entry->tx = tx;
+ ),
+ TP_printk("%s remote: %s channel: %s[%u/%u] signals: %#x",
+ __entry->tx ? "tx" : "rx",
+ __get_str(remote),
+ __get_str(channel),
+ __entry->lcid,
+ __entry->rcid,
+ __entry->signals
+ )
+);
+#define trace_qcom_glink_cmd_signal_tx(...) trace_qcom_glink_cmd_signal(__VA_ARGS__, true)
+#define trace_qcom_glink_cmd_signal_rx(...) trace_qcom_glink_cmd_signal(__VA_ARGS__, false)
+
+#endif
+
+#undef TRACE_INCLUDE_PATH
+#define TRACE_INCLUDE_PATH .
+
+#undef TRACE_INCLUDE_FILE
+#define TRACE_INCLUDE_FILE qcom_glink_trace
+
+#include <trace/define_trace.h>
diff --git a/drivers/rtc/Kconfig b/drivers/rtc/Kconfig
index e87c3d74565c..66eb1122248b 100644
--- a/drivers/rtc/Kconfig
+++ b/drivers/rtc/Kconfig
@@ -743,6 +743,16 @@ config RTC_DRV_S5M
This driver can also be built as a module. If so, the module
will be called rtc-s5m.
+config RTC_DRV_SD2405AL
+ tristate "DFRobot SD2405AL"
+ select REGMAP_I2C
+ help
+ If you say yes here you will get support for the
+ DFRobot SD2405AL I2C RTC Module.
+
+ This driver can also be built as a module. If so, the module
+ will be called rtc-sd2405al.
+
config RTC_DRV_SD3078
tristate "ZXW Shenzhen whwave SD3078"
select REGMAP_I2C
@@ -1934,6 +1944,12 @@ config RTC_DRV_STM32
tristate "STM32 RTC"
select REGMAP_MMIO
depends on ARCH_STM32 || COMPILE_TEST
+ depends on OF
+ depends on PINCTRL
+ select PINMUX
+ select PINCONF
+ select GENERIC_PINCONF
+ depends on COMMON_CLK
help
If you say yes here you get support for the STM32 On-Chip
Real Time Clock.
diff --git a/drivers/rtc/Makefile b/drivers/rtc/Makefile
index 8ee79cb18322..f62340ecc534 100644
--- a/drivers/rtc/Makefile
+++ b/drivers/rtc/Makefile
@@ -163,6 +163,7 @@ obj-$(CONFIG_RTC_DRV_S3C) += rtc-s3c.o
obj-$(CONFIG_RTC_DRV_S5M) += rtc-s5m.o
obj-$(CONFIG_RTC_DRV_SA1100) += rtc-sa1100.o
obj-$(CONFIG_RTC_DRV_SC27XX) += rtc-sc27xx.o
+obj-$(CONFIG_RTC_DRV_SD2405AL) += rtc-sd2405al.o
obj-$(CONFIG_RTC_DRV_SD3078) += rtc-sd3078.o
obj-$(CONFIG_RTC_DRV_SH) += rtc-sh.o
obj-$(CONFIG_RTC_DRV_SNVS) += rtc-snvs.o
diff --git a/drivers/rtc/dev.c b/drivers/rtc/dev.c
index 4aad9bb99868..c4a3ab53dcd4 100644
--- a/drivers/rtc/dev.c
+++ b/drivers/rtc/dev.c
@@ -523,7 +523,6 @@ static int rtc_dev_release(struct inode *inode, struct file *file)
static const struct file_operations rtc_dev_fops = {
.owner = THIS_MODULE,
- .llseek = no_llseek,
.read = rtc_dev_read,
.poll = rtc_dev_poll,
.unlocked_ioctl = rtc_dev_ioctl,
diff --git a/drivers/rtc/rtc-at91sam9.c b/drivers/rtc/rtc-at91sam9.c
index f93bee96e362..993c0878fb66 100644
--- a/drivers/rtc/rtc-at91sam9.c
+++ b/drivers/rtc/rtc-at91sam9.c
@@ -368,6 +368,7 @@ static int at91_rtc_probe(struct platform_device *pdev)
return ret;
rtc->gpbr = syscon_node_to_regmap(args.np);
+ of_node_put(args.np);
rtc->gpbr_offset = args.args[0];
if (IS_ERR(rtc->gpbr)) {
dev_err(&pdev->dev, "failed to retrieve gpbr regmap, aborting.\n");
diff --git a/drivers/rtc/rtc-m41t80.c b/drivers/rtc/rtc-m41t80.c
index 0013bff0447d..1f58ae8b151e 100644
--- a/drivers/rtc/rtc-m41t80.c
+++ b/drivers/rtc/rtc-m41t80.c
@@ -850,7 +850,6 @@ static const struct file_operations wdt_fops = {
.write = wdt_write,
.open = wdt_open,
.release = wdt_release,
- .llseek = no_llseek,
};
static struct miscdevice wdt_dev = {
diff --git a/drivers/rtc/rtc-m48t59.c b/drivers/rtc/rtc-m48t59.c
index f0f6b9b6daec..5d30ce8e13ca 100644
--- a/drivers/rtc/rtc-m48t59.c
+++ b/drivers/rtc/rtc-m48t59.c
@@ -132,7 +132,7 @@ static int m48t59_rtc_set_time(struct device *dev, struct rtc_time *tm)
M48T59_WRITE((bin2bcd(tm->tm_mon + 1) & 0x1F), M48T59_MONTH);
M48T59_WRITE(bin2bcd(year % 100), M48T59_YEAR);
- if (pdata->type == M48T59RTC_TYPE_M48T59 && (year / 100))
+ if (pdata->type == M48T59RTC_TYPE_M48T59 && (year >= 100))
val = (M48T59_WDAY_CEB | M48T59_WDAY_CB);
val |= (bin2bcd(tm->tm_wday) & 0x07);
M48T59_WRITE(val, M48T59_WDAY);
@@ -458,6 +458,8 @@ static int m48t59_rtc_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, m48t59);
m48t59->rtc->ops = &m48t59_rtc_ops;
+ m48t59->rtc->range_min = RTC_TIMESTAMP_BEGIN_1900;
+ m48t59->rtc->range_max = RTC_TIMESTAMP_END_2099;
nvmem_cfg.size = pdata->offset;
ret = devm_rtc_nvmem_register(m48t59->rtc, &nvmem_cfg);
diff --git a/drivers/rtc/rtc-rc5t619.c b/drivers/rtc/rtc-rc5t619.c
index e73102a39f1b..711f62eecd79 100644
--- a/drivers/rtc/rtc-rc5t619.c
+++ b/drivers/rtc/rtc-rc5t619.c
@@ -429,14 +429,23 @@ static int rc5t619_rtc_probe(struct platform_device *pdev)
return devm_rtc_register_device(rtc->rtc);
}
+static const struct platform_device_id rc5t619_rtc_id[] = {
+ {
+ .name = "rc5t619-rtc",
+ }, {
+ /* sentinel */
+ }
+};
+MODULE_DEVICE_TABLE(platform, rc5t619_rtc_id);
+
static struct platform_driver rc5t619_rtc_driver = {
.driver = {
.name = "rc5t619-rtc",
},
.probe = rc5t619_rtc_probe,
+ .id_table = rc5t619_rtc_id,
};
-
module_platform_driver(rc5t619_rtc_driver);
-MODULE_ALIAS("platform:rc5t619-rtc");
+
MODULE_DESCRIPTION("RICOH RC5T619 RTC driver");
MODULE_LICENSE("GPL");
diff --git a/drivers/rtc/rtc-s35390a.c b/drivers/rtc/rtc-s35390a.c
index 2d6b655a4b25..e3dc18882f41 100644
--- a/drivers/rtc/rtc-s35390a.c
+++ b/drivers/rtc/rtc-s35390a.c
@@ -56,7 +56,6 @@ static const struct i2c_device_id s35390a_id[] = {
MODULE_DEVICE_TABLE(i2c, s35390a_id);
static const __maybe_unused struct of_device_id s35390a_of_match[] = {
- { .compatible = "s35390a" },
{ .compatible = "sii,s35390a" },
{ }
};
diff --git a/drivers/rtc/rtc-sd2405al.c b/drivers/rtc/rtc-sd2405al.c
new file mode 100644
index 000000000000..d2568c3e3876
--- /dev/null
+++ b/drivers/rtc/rtc-sd2405al.c
@@ -0,0 +1,227 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * RTC driver for the SD2405AL Real-Time Clock
+ *
+ * Datasheet:
+ * https://image.dfrobot.com/image/data/TOY0021/SD2405AL%20datasheet%20(Angelo%20v0.1).pdf
+ *
+ * Copyright (C) 2024 Tóth János <gomba007@gmail.com>
+ */
+
+#include <linux/bcd.h>
+#include <linux/i2c.h>
+#include <linux/regmap.h>
+#include <linux/rtc.h>
+
+/* Real time clock registers */
+#define SD2405AL_REG_T_SEC 0x00
+#define SD2405AL_REG_T_MIN 0x01
+#define SD2405AL_REG_T_HOUR 0x02
+# define SD2405AL_BIT_12H_PM BIT(5)
+# define SD2405AL_BIT_24H BIT(7)
+#define SD2405AL_REG_T_WEEK 0x03
+#define SD2405AL_REG_T_DAY 0x04
+#define SD2405AL_REG_T_MON 0x05
+#define SD2405AL_REG_T_YEAR 0x06
+
+#define SD2405AL_NUM_T_REGS (SD2405AL_REG_T_YEAR - SD2405AL_REG_T_SEC + 1)
+
+/* Control registers */
+#define SD2405AL_REG_CTR1 0x0F
+# define SD2405AL_BIT_WRTC2 BIT(2)
+# define SD2405AL_BIT_WRTC3 BIT(7)
+#define SD2405AL_REG_CTR2 0x10
+# define SD2405AL_BIT_WRTC1 BIT(7)
+#define SD2405AL_REG_CTR3 0x11
+#define SD2405AL_REG_TTF 0x12
+#define SD2405AL_REG_CNTDWN 0x13
+
+/* General RAM */
+#define SD2405AL_REG_M_START 0x14
+#define SD2405AL_REG_M_END 0x1F
+
+struct sd2405al {
+ struct device *dev;
+ struct rtc_device *rtc;
+ struct regmap *regmap;
+};
+
+static int sd2405al_enable_reg_write(struct sd2405al *sd2405al)
+{
+ int ret;
+
+ /* order of writes is important */
+ ret = regmap_update_bits(sd2405al->regmap, SD2405AL_REG_CTR2,
+ SD2405AL_BIT_WRTC1, SD2405AL_BIT_WRTC1);
+ if (ret < 0)
+ return ret;
+
+ ret = regmap_update_bits(sd2405al->regmap, SD2405AL_REG_CTR1,
+ SD2405AL_BIT_WRTC2 | SD2405AL_BIT_WRTC3,
+ SD2405AL_BIT_WRTC2 | SD2405AL_BIT_WRTC3);
+ if (ret < 0)
+ return ret;
+
+ return 0;
+}
+
+static int sd2405al_disable_reg_write(struct sd2405al *sd2405al)
+{
+ int ret;
+
+ /* order of writes is important */
+ ret = regmap_update_bits(sd2405al->regmap, SD2405AL_REG_CTR1,
+ SD2405AL_BIT_WRTC2 | SD2405AL_BIT_WRTC3, 0x00);
+ if (ret < 0)
+ return ret;
+
+ ret = regmap_update_bits(sd2405al->regmap, SD2405AL_REG_CTR2,
+ SD2405AL_BIT_WRTC1, 0x00);
+ if (ret < 0)
+ return ret;
+
+ return 0;
+}
+
+static int sd2405al_read_time(struct device *dev, struct rtc_time *time)
+{
+ u8 data[SD2405AL_NUM_T_REGS] = { 0 };
+ struct sd2405al *sd2405al = dev_get_drvdata(dev);
+ int ret;
+
+ ret = regmap_bulk_read(sd2405al->regmap, SD2405AL_REG_T_SEC, data,
+ SD2405AL_NUM_T_REGS);
+ if (ret < 0)
+ return ret;
+
+ time->tm_sec = bcd2bin(data[SD2405AL_REG_T_SEC] & 0x7F);
+ time->tm_min = bcd2bin(data[SD2405AL_REG_T_MIN] & 0x7F);
+
+ if (data[SD2405AL_REG_T_HOUR] & SD2405AL_BIT_24H)
+ time->tm_hour = bcd2bin(data[SD2405AL_REG_T_HOUR] & 0x3F);
+ else
+ if (data[SD2405AL_REG_T_HOUR] & SD2405AL_BIT_12H_PM)
+ time->tm_hour = bcd2bin(data[SD2405AL_REG_T_HOUR]
+ & 0x1F) + 12;
+ else /* 12 hour mode, AM */
+ time->tm_hour = bcd2bin(data[SD2405AL_REG_T_HOUR]
+ & 0x1F);
+
+ time->tm_wday = bcd2bin(data[SD2405AL_REG_T_WEEK] & 0x07);
+ time->tm_mday = bcd2bin(data[SD2405AL_REG_T_DAY] & 0x3F);
+ time->tm_mon = bcd2bin(data[SD2405AL_REG_T_MON] & 0x1F) - 1;
+ time->tm_year = bcd2bin(data[SD2405AL_REG_T_YEAR]) + 100;
+
+ dev_dbg(sd2405al->dev, "read time: %ptR (%d)\n", time, time->tm_wday);
+
+ return 0;
+}
+
+static int sd2405al_set_time(struct device *dev, struct rtc_time *time)
+{
+ u8 data[SD2405AL_NUM_T_REGS];
+ struct sd2405al *sd2405al = dev_get_drvdata(dev);
+ int ret;
+
+ data[SD2405AL_REG_T_SEC] = bin2bcd(time->tm_sec);
+ data[SD2405AL_REG_T_MIN] = bin2bcd(time->tm_min);
+ data[SD2405AL_REG_T_HOUR] = bin2bcd(time->tm_hour) | SD2405AL_BIT_24H;
+ data[SD2405AL_REG_T_DAY] = bin2bcd(time->tm_mday);
+ data[SD2405AL_REG_T_WEEK] = bin2bcd(time->tm_wday);
+ data[SD2405AL_REG_T_MON] = bin2bcd(time->tm_mon) + 1;
+ data[SD2405AL_REG_T_YEAR] = bin2bcd(time->tm_year - 100);
+
+ ret = sd2405al_enable_reg_write(sd2405al);
+ if (ret < 0)
+ return ret;
+
+ ret = regmap_bulk_write(sd2405al->regmap, SD2405AL_REG_T_SEC, data,
+ SD2405AL_NUM_T_REGS);
+ if (ret < 0)
+ return ret;
+
+ ret = regmap_write(sd2405al->regmap, SD2405AL_REG_TTF, 0x00);
+ if (ret < 0)
+ return ret;
+
+ ret = sd2405al_disable_reg_write(sd2405al);
+ if (ret < 0)
+ return ret;
+
+ dev_dbg(sd2405al->dev, "set time: %ptR (%d)\n", time, time->tm_wday);
+
+ return 0;
+}
+
+static const struct rtc_class_ops sd2405al_rtc_ops = {
+ .read_time = sd2405al_read_time,
+ .set_time = sd2405al_set_time,
+};
+
+static const struct regmap_config sd2405al_regmap_conf = {
+ .reg_bits = 8,
+ .val_bits = 8,
+ .max_register = SD2405AL_REG_M_END,
+};
+
+static int sd2405al_probe(struct i2c_client *client)
+{
+ struct sd2405al *sd2405al;
+ int ret;
+
+ if (!i2c_check_functionality(client->adapter, I2C_FUNC_I2C))
+ return -ENODEV;
+
+ sd2405al = devm_kzalloc(&client->dev, sizeof(*sd2405al), GFP_KERNEL);
+ if (!sd2405al)
+ return -ENOMEM;
+
+ sd2405al->dev = &client->dev;
+
+ sd2405al->regmap = devm_regmap_init_i2c(client, &sd2405al_regmap_conf);
+ if (IS_ERR(sd2405al->regmap))
+ return PTR_ERR(sd2405al->regmap);
+
+ sd2405al->rtc = devm_rtc_allocate_device(&client->dev);
+ if (IS_ERR(sd2405al->rtc))
+ return PTR_ERR(sd2405al->rtc);
+
+ sd2405al->rtc->ops = &sd2405al_rtc_ops;
+ sd2405al->rtc->range_min = RTC_TIMESTAMP_BEGIN_2000;
+ sd2405al->rtc->range_max = RTC_TIMESTAMP_END_2099;
+
+ dev_set_drvdata(&client->dev, sd2405al);
+
+ ret = devm_rtc_register_device(sd2405al->rtc);
+ if (ret < 0)
+ return ret;
+
+ return 0;
+}
+
+static const struct i2c_device_id sd2405al_id[] = {
+ { "sd2405al" },
+ { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(i2c, sd2405al_id);
+
+static const __maybe_unused struct of_device_id sd2405al_of_match[] = {
+ { .compatible = "dfrobot,sd2405al" },
+ { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(of, sd2405al_of_match);
+
+static struct i2c_driver sd2405al_driver = {
+ .driver = {
+ .name = "sd2405al",
+ .of_match_table = of_match_ptr(sd2405al_of_match),
+ },
+ .probe = sd2405al_probe,
+ .id_table = sd2405al_id,
+};
+
+module_i2c_driver(sd2405al_driver);
+
+MODULE_AUTHOR("Tóth János <gomba007@gmail.com>");
+MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("SD2405AL RTC driver");
diff --git a/drivers/rtc/rtc-stm32.c b/drivers/rtc/rtc-stm32.c
index 98b07969609d..3e4f2ee22b0b 100644
--- a/drivers/rtc/rtc-stm32.c
+++ b/drivers/rtc/rtc-stm32.c
@@ -7,12 +7,16 @@
#include <linux/bcd.h>
#include <linux/bitfield.h>
#include <linux/clk.h>
+#include <linux/clk-provider.h>
#include <linux/errno.h>
#include <linux/iopoll.h>
#include <linux/ioport.h>
#include <linux/mfd/syscon.h>
#include <linux/module.h>
#include <linux/of.h>
+#include <linux/pinctrl/pinctrl.h>
+#include <linux/pinctrl/pinconf-generic.h>
+#include <linux/pinctrl/pinmux.h>
#include <linux/platform_device.h>
#include <linux/pm_wakeirq.h>
#include <linux/regmap.h>
@@ -42,6 +46,12 @@
#define STM32_RTC_CR_FMT BIT(6)
#define STM32_RTC_CR_ALRAE BIT(8)
#define STM32_RTC_CR_ALRAIE BIT(12)
+#define STM32_RTC_CR_OSEL GENMASK(22, 21)
+#define STM32_RTC_CR_OSEL_ALARM_A FIELD_PREP(STM32_RTC_CR_OSEL, 0x01)
+#define STM32_RTC_CR_COE BIT(23)
+#define STM32_RTC_CR_TAMPOE BIT(26)
+#define STM32_RTC_CR_TAMPALRM_TYPE BIT(30)
+#define STM32_RTC_CR_OUT2EN BIT(31)
/* STM32_RTC_ISR/STM32_RTC_ICSR bit fields */
#define STM32_RTC_ISR_ALRAWF BIT(0)
@@ -78,6 +88,12 @@
/* STM32_RTC_SR/_SCR bit fields */
#define STM32_RTC_SR_ALRA BIT(0)
+/* STM32_RTC_CFGR bit fields */
+#define STM32_RTC_CFGR_OUT2_RMP BIT(0)
+#define STM32_RTC_CFGR_LSCOEN GENMASK(2, 1)
+#define STM32_RTC_CFGR_LSCOEN_OUT1 1
+#define STM32_RTC_CFGR_LSCOEN_OUT2_RMP 2
+
/* STM32_RTC_VERR bit fields */
#define STM32_RTC_VERR_MINREV_SHIFT 0
#define STM32_RTC_VERR_MINREV GENMASK(3, 0)
@@ -107,6 +123,14 @@
/* STM32 RTC driver time helpers */
#define SEC_PER_DAY (24 * 60 * 60)
+/* STM32 RTC pinctrl helpers */
+#define STM32_RTC_PINMUX(_name, _action, ...) { \
+ .name = (_name), \
+ .action = (_action), \
+ .groups = ((const char *[]){ __VA_ARGS__ }), \
+ .num_groups = ARRAY_SIZE(((const char *[]){ __VA_ARGS__ })), \
+}
+
struct stm32_rtc;
struct stm32_rtc_registers {
@@ -119,6 +143,7 @@ struct stm32_rtc_registers {
u16 wpr;
u16 sr;
u16 scr;
+ u16 cfgr;
u16 verr;
};
@@ -134,6 +159,8 @@ struct stm32_rtc_data {
bool need_dbp;
bool need_accuracy;
bool rif_protected;
+ bool has_lsco;
+ bool has_alarm_out;
};
struct stm32_rtc {
@@ -146,6 +173,7 @@ struct stm32_rtc {
struct clk *rtc_ck;
const struct stm32_rtc_data *data;
int irq_alarm;
+ struct clk *clk_lsco;
};
struct stm32_rtc_rif_resource {
@@ -171,6 +199,209 @@ static void stm32_rtc_wpr_lock(struct stm32_rtc *rtc)
writel_relaxed(RTC_WPR_WRONG_KEY, rtc->base + regs->wpr);
}
+enum stm32_rtc_pin_name {
+ NONE,
+ OUT1,
+ OUT2,
+ OUT2_RMP
+};
+
+static const struct pinctrl_pin_desc stm32_rtc_pinctrl_pins[] = {
+ PINCTRL_PIN(OUT1, "out1"),
+ PINCTRL_PIN(OUT2, "out2"),
+ PINCTRL_PIN(OUT2_RMP, "out2_rmp"),
+};
+
+static int stm32_rtc_pinctrl_get_groups_count(struct pinctrl_dev *pctldev)
+{
+ return ARRAY_SIZE(stm32_rtc_pinctrl_pins);
+}
+
+static const char *stm32_rtc_pinctrl_get_group_name(struct pinctrl_dev *pctldev,
+ unsigned int selector)
+{
+ return stm32_rtc_pinctrl_pins[selector].name;
+}
+
+static int stm32_rtc_pinctrl_get_group_pins(struct pinctrl_dev *pctldev,
+ unsigned int selector,
+ const unsigned int **pins,
+ unsigned int *num_pins)
+{
+ *pins = &stm32_rtc_pinctrl_pins[selector].number;
+ *num_pins = 1;
+ return 0;
+}
+
+static const struct pinctrl_ops stm32_rtc_pinctrl_ops = {
+ .dt_node_to_map = pinconf_generic_dt_node_to_map_all,
+ .dt_free_map = pinconf_generic_dt_free_map,
+ .get_groups_count = stm32_rtc_pinctrl_get_groups_count,
+ .get_group_name = stm32_rtc_pinctrl_get_group_name,
+ .get_group_pins = stm32_rtc_pinctrl_get_group_pins,
+};
+
+struct stm32_rtc_pinmux_func {
+ const char *name;
+ const char * const *groups;
+ const unsigned int num_groups;
+ int (*action)(struct pinctrl_dev *pctl_dev, unsigned int pin);
+};
+
+static int stm32_rtc_pinmux_action_alarm(struct pinctrl_dev *pctldev, unsigned int pin)
+{
+ struct stm32_rtc *rtc = pinctrl_dev_get_drvdata(pctldev);
+ struct stm32_rtc_registers regs = rtc->data->regs;
+ unsigned int cr = readl_relaxed(rtc->base + regs.cr);
+ unsigned int cfgr = readl_relaxed(rtc->base + regs.cfgr);
+
+ if (!rtc->data->has_alarm_out)
+ return -EPERM;
+
+ cr &= ~STM32_RTC_CR_OSEL;
+ cr |= STM32_RTC_CR_OSEL_ALARM_A;
+ cr &= ~STM32_RTC_CR_TAMPOE;
+ cr &= ~STM32_RTC_CR_COE;
+ cr &= ~STM32_RTC_CR_TAMPALRM_TYPE;
+
+ switch (pin) {
+ case OUT1:
+ cr &= ~STM32_RTC_CR_OUT2EN;
+ cfgr &= ~STM32_RTC_CFGR_OUT2_RMP;
+ break;
+ case OUT2:
+ cr |= STM32_RTC_CR_OUT2EN;
+ cfgr &= ~STM32_RTC_CFGR_OUT2_RMP;
+ break;
+ case OUT2_RMP:
+ cr |= STM32_RTC_CR_OUT2EN;
+ cfgr |= STM32_RTC_CFGR_OUT2_RMP;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ stm32_rtc_wpr_unlock(rtc);
+ writel_relaxed(cr, rtc->base + regs.cr);
+ writel_relaxed(cfgr, rtc->base + regs.cfgr);
+ stm32_rtc_wpr_lock(rtc);
+
+ return 0;
+}
+
+static int stm32_rtc_pinmux_lsco_available(struct pinctrl_dev *pctldev, unsigned int pin)
+{
+ struct stm32_rtc *rtc = pinctrl_dev_get_drvdata(pctldev);
+ struct stm32_rtc_registers regs = rtc->data->regs;
+ unsigned int cr = readl_relaxed(rtc->base + regs.cr);
+ unsigned int cfgr = readl_relaxed(rtc->base + regs.cfgr);
+ unsigned int calib = STM32_RTC_CR_COE;
+ unsigned int tampalrm = STM32_RTC_CR_TAMPOE | STM32_RTC_CR_OSEL;
+
+ switch (pin) {
+ case OUT1:
+ if ((!(cr & STM32_RTC_CR_OUT2EN) &&
+ ((cr & calib) || cr & tampalrm)) ||
+ ((cr & calib) && (cr & tampalrm)))
+ return -EBUSY;
+ break;
+ case OUT2_RMP:
+ if ((cr & STM32_RTC_CR_OUT2EN) &&
+ (cfgr & STM32_RTC_CFGR_OUT2_RMP) &&
+ ((cr & calib) || (cr & tampalrm)))
+ return -EBUSY;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ if (clk_get_rate(rtc->rtc_ck) != 32768)
+ return -ERANGE;
+
+ return 0;
+}
+
+static int stm32_rtc_pinmux_action_lsco(struct pinctrl_dev *pctldev, unsigned int pin)
+{
+ struct stm32_rtc *rtc = pinctrl_dev_get_drvdata(pctldev);
+ struct stm32_rtc_registers regs = rtc->data->regs;
+ struct device *dev = rtc->rtc_dev->dev.parent;
+ u8 lscoen;
+ int ret;
+
+ if (!rtc->data->has_lsco)
+ return -EPERM;
+
+ ret = stm32_rtc_pinmux_lsco_available(pctldev, pin);
+ if (ret)
+ return ret;
+
+ lscoen = (pin == OUT1) ? STM32_RTC_CFGR_LSCOEN_OUT1 : STM32_RTC_CFGR_LSCOEN_OUT2_RMP;
+
+ rtc->clk_lsco = clk_register_gate(dev, "rtc_lsco", __clk_get_name(rtc->rtc_ck),
+ CLK_IGNORE_UNUSED | CLK_IS_CRITICAL,
+ rtc->base + regs.cfgr, lscoen, 0, NULL);
+ if (IS_ERR(rtc->clk_lsco))
+ return PTR_ERR(rtc->clk_lsco);
+
+ of_clk_add_provider(dev->of_node, of_clk_src_simple_get, rtc->clk_lsco);
+
+ return 0;
+}
+
+static const struct stm32_rtc_pinmux_func stm32_rtc_pinmux_functions[] = {
+ STM32_RTC_PINMUX("lsco", &stm32_rtc_pinmux_action_lsco, "out1", "out2_rmp"),
+ STM32_RTC_PINMUX("alarm-a", &stm32_rtc_pinmux_action_alarm, "out1", "out2", "out2_rmp"),
+};
+
+static int stm32_rtc_pinmux_get_functions_count(struct pinctrl_dev *pctldev)
+{
+ return ARRAY_SIZE(stm32_rtc_pinmux_functions);
+}
+
+static const char *stm32_rtc_pinmux_get_fname(struct pinctrl_dev *pctldev, unsigned int selector)
+{
+ return stm32_rtc_pinmux_functions[selector].name;
+}
+
+static int stm32_rtc_pinmux_get_groups(struct pinctrl_dev *pctldev, unsigned int selector,
+ const char * const **groups, unsigned int * const num_groups)
+{
+ *groups = stm32_rtc_pinmux_functions[selector].groups;
+ *num_groups = stm32_rtc_pinmux_functions[selector].num_groups;
+ return 0;
+}
+
+static int stm32_rtc_pinmux_set_mux(struct pinctrl_dev *pctldev, unsigned int selector,
+ unsigned int group)
+{
+ struct stm32_rtc_pinmux_func selected_func = stm32_rtc_pinmux_functions[selector];
+ struct pinctrl_pin_desc pin = stm32_rtc_pinctrl_pins[group];
+
+ /* Call action */
+ if (selected_func.action)
+ return selected_func.action(pctldev, pin.number);
+
+ return -EINVAL;
+}
+
+static const struct pinmux_ops stm32_rtc_pinmux_ops = {
+ .get_functions_count = stm32_rtc_pinmux_get_functions_count,
+ .get_function_name = stm32_rtc_pinmux_get_fname,
+ .get_function_groups = stm32_rtc_pinmux_get_groups,
+ .set_mux = stm32_rtc_pinmux_set_mux,
+ .strict = true,
+};
+
+static struct pinctrl_desc stm32_rtc_pdesc = {
+ .name = DRIVER_NAME,
+ .pins = stm32_rtc_pinctrl_pins,
+ .npins = ARRAY_SIZE(stm32_rtc_pinctrl_pins),
+ .owner = THIS_MODULE,
+ .pctlops = &stm32_rtc_pinctrl_ops,
+ .pmxops = &stm32_rtc_pinmux_ops,
+};
+
static int stm32_rtc_enter_init_mode(struct stm32_rtc *rtc)
{
const struct stm32_rtc_registers *regs = &rtc->data->regs;
@@ -576,6 +807,8 @@ static const struct stm32_rtc_data stm32_rtc_data = {
.need_dbp = true,
.need_accuracy = false,
.rif_protected = false,
+ .has_lsco = false,
+ .has_alarm_out = false,
.regs = {
.tr = 0x00,
.dr = 0x04,
@@ -586,6 +819,7 @@ static const struct stm32_rtc_data stm32_rtc_data = {
.wpr = 0x24,
.sr = 0x0C, /* set to ISR offset to ease alarm management */
.scr = UNDEF_REG,
+ .cfgr = UNDEF_REG,
.verr = UNDEF_REG,
},
.events = {
@@ -599,6 +833,8 @@ static const struct stm32_rtc_data stm32h7_rtc_data = {
.need_dbp = true,
.need_accuracy = false,
.rif_protected = false,
+ .has_lsco = false,
+ .has_alarm_out = false,
.regs = {
.tr = 0x00,
.dr = 0x04,
@@ -609,6 +845,7 @@ static const struct stm32_rtc_data stm32h7_rtc_data = {
.wpr = 0x24,
.sr = 0x0C, /* set to ISR offset to ease alarm management */
.scr = UNDEF_REG,
+ .cfgr = UNDEF_REG,
.verr = UNDEF_REG,
},
.events = {
@@ -631,6 +868,8 @@ static const struct stm32_rtc_data stm32mp1_data = {
.need_dbp = false,
.need_accuracy = true,
.rif_protected = false,
+ .has_lsco = true,
+ .has_alarm_out = true,
.regs = {
.tr = 0x00,
.dr = 0x04,
@@ -641,6 +880,7 @@ static const struct stm32_rtc_data stm32mp1_data = {
.wpr = 0x24,
.sr = 0x50,
.scr = 0x5C,
+ .cfgr = 0x60,
.verr = 0x3F4,
},
.events = {
@@ -654,6 +894,8 @@ static const struct stm32_rtc_data stm32mp25_data = {
.need_dbp = false,
.need_accuracy = true,
.rif_protected = true,
+ .has_lsco = true,
+ .has_alarm_out = true,
.regs = {
.tr = 0x00,
.dr = 0x04,
@@ -664,6 +906,7 @@ static const struct stm32_rtc_data stm32mp25_data = {
.wpr = 0x24,
.sr = 0x50,
.scr = 0x5C,
+ .cfgr = 0x60,
.verr = 0x3F4,
},
.events = {
@@ -681,6 +924,30 @@ static const struct of_device_id stm32_rtc_of_match[] = {
};
MODULE_DEVICE_TABLE(of, stm32_rtc_of_match);
+static void stm32_rtc_clean_outs(struct stm32_rtc *rtc)
+{
+ struct stm32_rtc_registers regs = rtc->data->regs;
+ unsigned int cr = readl_relaxed(rtc->base + regs.cr);
+
+ cr &= ~STM32_RTC_CR_OSEL;
+ cr &= ~STM32_RTC_CR_TAMPOE;
+ cr &= ~STM32_RTC_CR_COE;
+ cr &= ~STM32_RTC_CR_TAMPALRM_TYPE;
+ cr &= ~STM32_RTC_CR_OUT2EN;
+
+ stm32_rtc_wpr_unlock(rtc);
+ writel_relaxed(cr, rtc->base + regs.cr);
+ stm32_rtc_wpr_lock(rtc);
+
+ if (regs.cfgr != UNDEF_REG) {
+ unsigned int cfgr = readl_relaxed(rtc->base + regs.cfgr);
+
+ cfgr &= ~STM32_RTC_CFGR_LSCOEN;
+ cfgr &= ~STM32_RTC_CFGR_OUT2_RMP;
+ writel_relaxed(cfgr, rtc->base + regs.cfgr);
+ }
+}
+
static int stm32_rtc_check_rif(struct stm32_rtc *stm32_rtc,
struct stm32_rtc_rif_resource res)
{
@@ -791,6 +1058,7 @@ static int stm32_rtc_probe(struct platform_device *pdev)
{
struct stm32_rtc *rtc;
const struct stm32_rtc_registers *regs;
+ struct pinctrl_dev *pctl;
int ret;
rtc = devm_kzalloc(&pdev->dev, sizeof(*rtc), GFP_KERNEL);
@@ -912,6 +1180,16 @@ static int stm32_rtc_probe(struct platform_device *pdev)
goto err;
}
+ stm32_rtc_clean_outs(rtc);
+
+ ret = devm_pinctrl_register_and_init(&pdev->dev, &stm32_rtc_pdesc, rtc, &pctl);
+ if (ret)
+ return dev_err_probe(&pdev->dev, ret, "pinctrl register failed");
+
+ ret = pinctrl_enable(pctl);
+ if (ret)
+ return dev_err_probe(&pdev->dev, ret, "pinctrl enable failed");
+
/*
* If INITS flag is reset (calendar year field set to 0x00), calendar
* must be initialized
@@ -950,6 +1228,9 @@ static void stm32_rtc_remove(struct platform_device *pdev)
const struct stm32_rtc_registers *regs = &rtc->data->regs;
unsigned int cr;
+ if (!IS_ERR_OR_NULL(rtc->clk_lsco))
+ clk_unregister_gate(rtc->clk_lsco);
+
/* Disable interrupts */
stm32_rtc_wpr_unlock(rtc);
cr = readl_relaxed(rtc->base + regs->cr);
diff --git a/drivers/rtc/rtc-sun6i.c b/drivers/rtc/rtc-sun6i.c
index 8e0c66906103..e681c1745866 100644
--- a/drivers/rtc/rtc-sun6i.c
+++ b/drivers/rtc/rtc-sun6i.c
@@ -402,6 +402,7 @@ CLK_OF_DECLARE_DRIVER(sun8i_r40_rtc_clk, "allwinner,sun8i-r40-rtc",
static const struct sun6i_rtc_clk_data sun8i_v3_rtc_data = {
.rc_osc_rate = 32000,
.has_out_clk = 1,
+ .has_auto_swt = 1,
};
static void __init sun8i_v3_rtc_clk_init(struct device_node *node)
diff --git a/drivers/rtc/rtc-twl.c b/drivers/rtc/rtc-twl.c
index 2cfacdd37e09..4e24c12004f1 100644
--- a/drivers/rtc/rtc-twl.c
+++ b/drivers/rtc/rtc-twl.c
@@ -591,8 +591,8 @@ static int twl_rtc_probe(struct platform_device *pdev)
memset(&nvmem_cfg, 0, sizeof(nvmem_cfg));
nvmem_cfg.name = "twl-secured-";
nvmem_cfg.type = NVMEM_TYPE_BATTERY_BACKED;
- nvmem_cfg.reg_read = twl_nvram_read,
- nvmem_cfg.reg_write = twl_nvram_write,
+ nvmem_cfg.reg_read = twl_nvram_read;
+ nvmem_cfg.reg_write = twl_nvram_write;
nvmem_cfg.word_size = 1;
nvmem_cfg.stride = 1;
if (twl_class_is_4030()) {
diff --git a/drivers/s390/char/fs3270.c b/drivers/s390/char/fs3270.c
index 61515781c5dd..cfe7efd5b5da 100644
--- a/drivers/s390/char/fs3270.c
+++ b/drivers/s390/char/fs3270.c
@@ -515,7 +515,6 @@ static const struct file_operations fs3270_fops = {
.compat_ioctl = fs3270_ioctl, /* ioctl */
.open = fs3270_open, /* open */
.release = fs3270_close, /* release */
- .llseek = no_llseek,
};
static void fs3270_create_cb(int minor)
diff --git a/drivers/s390/char/sclp_ctl.c b/drivers/s390/char/sclp_ctl.c
index 248b5db3eaa8..dd6051602070 100644
--- a/drivers/s390/char/sclp_ctl.c
+++ b/drivers/s390/char/sclp_ctl.c
@@ -115,7 +115,6 @@ static const struct file_operations sclp_ctl_fops = {
.open = nonseekable_open,
.unlocked_ioctl = sclp_ctl_ioctl,
.compat_ioctl = sclp_ctl_ioctl,
- .llseek = no_llseek,
};
/*
diff --git a/drivers/s390/char/sclp_early.c b/drivers/s390/char/sclp_early.c
index 07df04af82f2..29156455970e 100644
--- a/drivers/s390/char/sclp_early.c
+++ b/drivers/s390/char/sclp_early.c
@@ -44,6 +44,7 @@ static void __init sclp_early_facilities_detect(void)
sclp.has_ibs = !!(sccb->fac117 & 0x20);
sclp.has_gisaf = !!(sccb->fac118 & 0x08);
sclp.has_hvs = !!(sccb->fac119 & 0x80);
+ sclp.has_wti = !!(sccb->fac119 & 0x40);
sclp.has_kss = !!(sccb->fac98 & 0x01);
sclp.has_aisii = !!(sccb->fac118 & 0x40);
sclp.has_aeni = !!(sccb->fac118 & 0x20);
diff --git a/drivers/s390/char/tape_char.c b/drivers/s390/char/tape_char.c
index cc8237afeffa..89778d922d9f 100644
--- a/drivers/s390/char/tape_char.c
+++ b/drivers/s390/char/tape_char.c
@@ -52,7 +52,6 @@ static const struct file_operations tape_fops =
#endif
.open = tapechar_open,
.release = tapechar_release,
- .llseek = no_llseek,
};
static int tapechar_major = TAPECHAR_MAJOR;
diff --git a/drivers/s390/char/uvdevice.c b/drivers/s390/char/uvdevice.c
index 42c9f77f8da0..f598edc5f251 100644
--- a/drivers/s390/char/uvdevice.c
+++ b/drivers/s390/char/uvdevice.c
@@ -448,7 +448,6 @@ static long uvio_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
static const struct file_operations uvio_dev_fops = {
.owner = THIS_MODULE,
.unlocked_ioctl = uvio_ioctl,
- .llseek = no_llseek,
};
static struct miscdevice uvio_dev_miscdev = {
diff --git a/drivers/s390/char/vmcp.c b/drivers/s390/char/vmcp.c
index eb0520a9d4af..c6d58335beb4 100644
--- a/drivers/s390/char/vmcp.c
+++ b/drivers/s390/char/vmcp.c
@@ -242,7 +242,6 @@ static const struct file_operations vmcp_fops = {
.write = vmcp_write,
.unlocked_ioctl = vmcp_ioctl,
.compat_ioctl = vmcp_ioctl,
- .llseek = no_llseek,
};
static struct miscdevice vmcp_dev = {
diff --git a/drivers/s390/char/vmlogrdr.c b/drivers/s390/char/vmlogrdr.c
index c09e1e09fb66..bd5cecc44123 100644
--- a/drivers/s390/char/vmlogrdr.c
+++ b/drivers/s390/char/vmlogrdr.c
@@ -96,7 +96,6 @@ static const struct file_operations vmlogrdr_fops = {
.open = vmlogrdr_open,
.release = vmlogrdr_release,
.read = vmlogrdr_read,
- .llseek = no_llseek,
};
diff --git a/drivers/s390/char/zcore.c b/drivers/s390/char/zcore.c
index 0969fa01df58..33cebb91b933 100644
--- a/drivers/s390/char/zcore.c
+++ b/drivers/s390/char/zcore.c
@@ -165,7 +165,6 @@ static const struct file_operations zcore_reipl_fops = {
.write = zcore_reipl_write,
.open = zcore_reipl_open,
.release = zcore_reipl_release,
- .llseek = no_llseek,
};
static ssize_t zcore_hsa_read(struct file *filp, char __user *buf,
@@ -200,7 +199,6 @@ static const struct file_operations zcore_hsa_fops = {
.write = zcore_hsa_write,
.read = zcore_hsa_read,
.open = nonseekable_open,
- .llseek = no_llseek,
};
static int __init check_sdias(void)
diff --git a/drivers/s390/cio/chsc_sch.c b/drivers/s390/cio/chsc_sch.c
index e6c800653f98..1e58ee3cc87d 100644
--- a/drivers/s390/cio/chsc_sch.c
+++ b/drivers/s390/cio/chsc_sch.c
@@ -924,7 +924,6 @@ static const struct file_operations chsc_fops = {
.release = chsc_release,
.unlocked_ioctl = chsc_ioctl,
.compat_ioctl = chsc_ioctl,
- .llseek = no_llseek,
};
static struct miscdevice chsc_misc_device = {
diff --git a/drivers/s390/cio/css.c b/drivers/s390/cio/css.c
index 53b68f8c32f3..7b59d20bf785 100644
--- a/drivers/s390/cio/css.c
+++ b/drivers/s390/cio/css.c
@@ -1332,7 +1332,6 @@ static ssize_t cio_settle_write(struct file *file, const char __user *buf,
static const struct proc_ops cio_settle_proc_ops = {
.proc_open = nonseekable_open,
.proc_write = cio_settle_write,
- .proc_lseek = no_llseek,
};
static int __init cio_settle_init(void)
diff --git a/drivers/s390/crypto/Makefile b/drivers/s390/crypto/Makefile
index bd94811fd9f1..c88b6e071847 100644
--- a/drivers/s390/crypto/Makefile
+++ b/drivers/s390/crypto/Makefile
@@ -13,10 +13,22 @@ obj-$(CONFIG_ZCRYPT) += zcrypt.o
# adapter drivers depend on ap.o and zcrypt.o
obj-$(CONFIG_ZCRYPT) += zcrypt_cex4.o
-# pkey kernel module
-pkey-objs := pkey_api.o
+# pkey base and api module
+pkey-objs := pkey_base.o pkey_api.o pkey_sysfs.o
obj-$(CONFIG_PKEY) += pkey.o
+# pkey cca handler module
+pkey-cca-objs := pkey_cca.o
+obj-$(CONFIG_PKEY_CCA) += pkey-cca.o
+
+# pkey ep11 handler module
+pkey-ep11-objs := pkey_ep11.o
+obj-$(CONFIG_PKEY_EP11) += pkey-ep11.o
+
+# pkey pckmo handler module
+pkey-pckmo-objs := pkey_pckmo.o
+obj-$(CONFIG_PKEY_PCKMO) += pkey-pckmo.o
+
# adjunct processor matrix
vfio_ap-objs := vfio_ap_drv.o vfio_ap_ops.o
obj-$(CONFIG_VFIO_AP) += vfio_ap.o
diff --git a/drivers/s390/crypto/ap_bus.c b/drivers/s390/crypto/ap_bus.c
index f9f682f19415..60cea6c24349 100644
--- a/drivers/s390/crypto/ap_bus.c
+++ b/drivers/s390/crypto/ap_bus.c
@@ -107,6 +107,7 @@ debug_info_t *ap_dbf_info;
static bool ap_scan_bus(void);
static bool ap_scan_bus_result; /* result of last ap_scan_bus() */
static DEFINE_MUTEX(ap_scan_bus_mutex); /* mutex ap_scan_bus() invocations */
+static struct task_struct *ap_scan_bus_task; /* thread holding the scan mutex */
static atomic64_t ap_scan_bus_count; /* counter ap_scan_bus() invocations */
static int ap_scan_bus_time = AP_CONFIG_TIME;
static struct timer_list ap_scan_bus_timer;
@@ -733,7 +734,7 @@ static void ap_check_bindings_complete(void)
if (!completion_done(&ap_apqn_bindings_complete)) {
complete_all(&ap_apqn_bindings_complete);
ap_send_bindings_complete_uevent();
- pr_debug("%s all apqn bindings complete\n", __func__);
+ pr_debug("all apqn bindings complete\n");
}
}
}
@@ -768,7 +769,7 @@ int ap_wait_apqn_bindings_complete(unsigned long timeout)
else if (l == 0 && timeout)
rc = -ETIME;
- pr_debug("%s rc=%d\n", __func__, rc);
+ pr_debug("rc=%d\n", rc);
return rc;
}
EXPORT_SYMBOL(ap_wait_apqn_bindings_complete);
@@ -795,8 +796,7 @@ static int __ap_revise_reserved(struct device *dev, void *dummy)
drvres = to_ap_drv(dev->driver)->flags
& AP_DRIVER_FLAG_DEFAULT;
if (!!devres != !!drvres) {
- pr_debug("%s reprobing queue=%02x.%04x\n",
- __func__, card, queue);
+ pr_debug("reprobing queue=%02x.%04x\n", card, queue);
rc = device_reprobe(dev);
if (rc)
AP_DBF_WARN("%s reprobing queue=%02x.%04x failed\n",
@@ -1000,17 +1000,31 @@ bool ap_bus_force_rescan(void)
unsigned long scan_counter = atomic64_read(&ap_scan_bus_count);
bool rc = false;
- pr_debug(">%s scan counter=%lu\n", __func__, scan_counter);
+ pr_debug("> scan counter=%lu\n", scan_counter);
/* Only trigger AP bus scans after the initial scan is done */
if (scan_counter <= 0)
goto out;
+ /*
+ * There is one unlikely but nevertheless valid scenario where the
+ * thread holding the mutex may try to send some crypto load but
+ * all cards are offline so a rescan is triggered which causes
+ * a recursive call of ap_bus_force_rescan(). A simple return if
+ * the mutex is already locked by this thread solves this.
+ */
+ if (mutex_is_locked(&ap_scan_bus_mutex)) {
+ if (ap_scan_bus_task == current)
+ goto out;
+ }
+
/* Try to acquire the AP scan bus mutex */
if (mutex_trylock(&ap_scan_bus_mutex)) {
/* mutex acquired, run the AP bus scan */
+ ap_scan_bus_task = current;
ap_scan_bus_result = ap_scan_bus();
rc = ap_scan_bus_result;
+ ap_scan_bus_task = NULL;
mutex_unlock(&ap_scan_bus_mutex);
goto out;
}
@@ -1029,7 +1043,7 @@ bool ap_bus_force_rescan(void)
mutex_unlock(&ap_scan_bus_mutex);
out:
- pr_debug("%s rc=%d\n", __func__, rc);
+ pr_debug("rc=%d\n", rc);
return rc;
}
EXPORT_SYMBOL(ap_bus_force_rescan);
@@ -1043,7 +1057,7 @@ static int ap_bus_cfg_chg(struct notifier_block *nb,
if (action != CHSC_NOTIFY_AP_CFG)
return NOTIFY_DONE;
- pr_debug("%s config change, forcing bus rescan\n", __func__);
+ pr_debug("config change, forcing bus rescan\n");
ap_bus_force_rescan();
@@ -1900,8 +1914,8 @@ static inline void ap_scan_domains(struct ap_card *ac)
aq->last_err_rc = AP_RESPONSE_CHECKSTOPPED;
}
spin_unlock_bh(&aq->lock);
- pr_debug("%s(%d,%d) queue dev checkstop on\n",
- __func__, ac->id, dom);
+ pr_debug("(%d,%d) queue dev checkstop on\n",
+ ac->id, dom);
/* 'receive' pending messages with -EAGAIN */
ap_flush_queue(aq);
goto put_dev_and_continue;
@@ -1911,8 +1925,8 @@ static inline void ap_scan_domains(struct ap_card *ac)
if (aq->dev_state > AP_DEV_STATE_UNINITIATED)
_ap_queue_init_state(aq);
spin_unlock_bh(&aq->lock);
- pr_debug("%s(%d,%d) queue dev checkstop off\n",
- __func__, ac->id, dom);
+ pr_debug("(%d,%d) queue dev checkstop off\n",
+ ac->id, dom);
goto put_dev_and_continue;
}
/* config state change */
@@ -1924,8 +1938,8 @@ static inline void ap_scan_domains(struct ap_card *ac)
aq->last_err_rc = AP_RESPONSE_DECONFIGURED;
}
spin_unlock_bh(&aq->lock);
- pr_debug("%s(%d,%d) queue dev config off\n",
- __func__, ac->id, dom);
+ pr_debug("(%d,%d) queue dev config off\n",
+ ac->id, dom);
ap_send_config_uevent(&aq->ap_dev, aq->config);
/* 'receive' pending messages with -EAGAIN */
ap_flush_queue(aq);
@@ -1936,8 +1950,8 @@ static inline void ap_scan_domains(struct ap_card *ac)
if (aq->dev_state > AP_DEV_STATE_UNINITIATED)
_ap_queue_init_state(aq);
spin_unlock_bh(&aq->lock);
- pr_debug("%s(%d,%d) queue dev config on\n",
- __func__, ac->id, dom);
+ pr_debug("(%d,%d) queue dev config on\n",
+ ac->id, dom);
ap_send_config_uevent(&aq->ap_dev, aq->config);
goto put_dev_and_continue;
}
@@ -2009,8 +2023,8 @@ static inline void ap_scan_adapter(int ap)
ap_scan_rm_card_dev_and_queue_devs(ac);
put_device(dev);
} else {
- pr_debug("%s(%d) no type info (no APQN found), ignored\n",
- __func__, ap);
+ pr_debug("(%d) no type info (no APQN found), ignored\n",
+ ap);
}
return;
}
@@ -2022,8 +2036,7 @@ static inline void ap_scan_adapter(int ap)
ap_scan_rm_card_dev_and_queue_devs(ac);
put_device(dev);
} else {
- pr_debug("%s(%d) no valid type (0) info, ignored\n",
- __func__, ap);
+ pr_debug("(%d) no valid type (0) info, ignored\n", ap);
}
return;
}
@@ -2202,7 +2215,7 @@ static bool ap_scan_bus(void)
bool config_changed;
int ap;
- pr_debug(">%s\n", __func__);
+ pr_debug(">\n");
/* (re-)fetch configuration via QCI */
config_changed = ap_get_configuration();
@@ -2243,7 +2256,7 @@ static bool ap_scan_bus(void)
}
if (atomic64_inc_return(&ap_scan_bus_count) == 1) {
- pr_debug("%s init scan complete\n", __func__);
+ pr_debug("init scan complete\n");
ap_send_init_scan_done_uevent();
}
@@ -2251,7 +2264,7 @@ static bool ap_scan_bus(void)
mod_timer(&ap_scan_bus_timer, jiffies + ap_scan_bus_time * HZ);
- pr_debug("<%s config_changed=%d\n", __func__, config_changed);
+ pr_debug("< config_changed=%d\n", config_changed);
return config_changed;
}
@@ -2284,7 +2297,9 @@ static void ap_scan_bus_wq_callback(struct work_struct *unused)
* system_long_wq which invokes this function here again.
*/
if (mutex_trylock(&ap_scan_bus_mutex)) {
+ ap_scan_bus_task = current;
ap_scan_bus_result = ap_scan_bus();
+ ap_scan_bus_task = NULL;
mutex_unlock(&ap_scan_bus_mutex);
}
}
diff --git a/drivers/s390/crypto/ap_queue.c b/drivers/s390/crypto/ap_queue.c
index 1f647ffd6f4d..8c878c5aa31f 100644
--- a/drivers/s390/crypto/ap_queue.c
+++ b/drivers/s390/crypto/ap_queue.c
@@ -171,8 +171,8 @@ static struct ap_queue_status ap_sm_recv(struct ap_queue *aq)
aq->queue_count = 0;
list_splice_init(&aq->pendingq, &aq->requestq);
aq->requestq_count += aq->pendingq_count;
- pr_debug("%s queue 0x%02x.%04x rescheduled %d reqs (new req %d)\n",
- __func__, AP_QID_CARD(aq->qid), AP_QID_QUEUE(aq->qid),
+ pr_debug("queue 0x%02x.%04x rescheduled %d reqs (new req %d)\n",
+ AP_QID_CARD(aq->qid), AP_QID_QUEUE(aq->qid),
aq->pendingq_count, aq->requestq_count);
aq->pendingq_count = 0;
break;
@@ -453,8 +453,8 @@ static enum ap_sm_wait ap_sm_assoc_wait(struct ap_queue *aq)
case AP_BS_Q_USABLE:
/* association is through */
aq->sm_state = AP_SM_STATE_IDLE;
- pr_debug("%s queue 0x%02x.%04x associated with %u\n",
- __func__, AP_QID_CARD(aq->qid),
+ pr_debug("queue 0x%02x.%04x associated with %u\n",
+ AP_QID_CARD(aq->qid),
AP_QID_QUEUE(aq->qid), aq->assoc_idx);
return AP_SM_WAIT_NONE;
case AP_BS_Q_USABLE_NO_SECURE_KEY:
@@ -697,8 +697,8 @@ static ssize_t ap_functions_show(struct device *dev,
status = ap_test_queue(aq->qid, 1, &hwinfo);
if (status.response_code > AP_RESPONSE_BUSY) {
- pr_debug("%s RC 0x%02x on tapq(0x%02x.%04x)\n",
- __func__, status.response_code,
+ pr_debug("RC 0x%02x on tapq(0x%02x.%04x)\n",
+ status.response_code,
AP_QID_CARD(aq->qid), AP_QID_QUEUE(aq->qid));
return -EIO;
}
@@ -853,8 +853,8 @@ static ssize_t se_bind_show(struct device *dev,
status = ap_test_queue(aq->qid, 1, &hwinfo);
if (status.response_code > AP_RESPONSE_BUSY) {
- pr_debug("%s RC 0x%02x on tapq(0x%02x.%04x)\n",
- __func__, status.response_code,
+ pr_debug("RC 0x%02x on tapq(0x%02x.%04x)\n",
+ status.response_code,
AP_QID_CARD(aq->qid), AP_QID_QUEUE(aq->qid));
return -EIO;
}
@@ -981,8 +981,8 @@ static ssize_t se_associate_show(struct device *dev,
status = ap_test_queue(aq->qid, 1, &hwinfo);
if (status.response_code > AP_RESPONSE_BUSY) {
- pr_debug("%s RC 0x%02x on tapq(0x%02x.%04x)\n",
- __func__, status.response_code,
+ pr_debug("RC 0x%02x on tapq(0x%02x.%04x)\n",
+ status.response_code,
AP_QID_CARD(aq->qid), AP_QID_QUEUE(aq->qid));
return -EIO;
}
diff --git a/drivers/s390/crypto/pkey_api.c b/drivers/s390/crypto/pkey_api.c
index ffc0b5db55c2..3a39e167bdbf 100644
--- a/drivers/s390/crypto/pkey_api.c
+++ b/drivers/s390/crypto/pkey_api.c
@@ -10,1338 +10,698 @@
#define KMSG_COMPONENT "pkey"
#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
-#include <linux/fs.h>
#include <linux/init.h>
#include <linux/miscdevice.h>
-#include <linux/module.h>
#include <linux/slab.h>
-#include <linux/kallsyms.h>
-#include <linux/debugfs.h>
-#include <linux/random.h>
-#include <linux/cpufeature.h>
-#include <asm/zcrypt.h>
-#include <asm/cpacf.h>
-#include <asm/pkey.h>
-#include <crypto/aes.h>
#include "zcrypt_api.h"
#include "zcrypt_ccamisc.h"
-#include "zcrypt_ep11misc.h"
-MODULE_LICENSE("GPL");
-MODULE_AUTHOR("IBM Corporation");
-MODULE_DESCRIPTION("s390 protected key interface");
-
-#define KEYBLOBBUFSIZE 8192 /* key buffer size used for internal processing */
-#define MINKEYBLOBBUFSIZE (sizeof(struct keytoken_header))
-#define PROTKEYBLOBBUFSIZE 256 /* protected key buffer size used internal */
-#define MAXAPQNSINLIST 64 /* max 64 apqns within a apqn list */
-#define AES_WK_VP_SIZE 32 /* Size of WK VP block appended to a prot key */
+#include "pkey_base.h"
/*
- * debug feature data and functions
+ * Helper functions
*/
-
-static debug_info_t *pkey_dbf_info;
-
-#define PKEY_DBF_INFO(...) debug_sprintf_event(pkey_dbf_info, 5, ##__VA_ARGS__)
-#define PKEY_DBF_WARN(...) debug_sprintf_event(pkey_dbf_info, 4, ##__VA_ARGS__)
-#define PKEY_DBF_ERR(...) debug_sprintf_event(pkey_dbf_info, 3, ##__VA_ARGS__)
-
-static void __init pkey_debug_init(void)
+static int key2protkey(const struct pkey_apqn *apqns, size_t nr_apqns,
+ const u8 *key, size_t keylen,
+ u8 *protkey, u32 *protkeylen, u32 *protkeytype)
{
- /* 5 arguments per dbf entry (including the format string ptr) */
- pkey_dbf_info = debug_register("pkey", 1, 1, 5 * sizeof(long));
- debug_register_view(pkey_dbf_info, &debug_sprintf_view);
- debug_set_level(pkey_dbf_info, 3);
-}
-
-static void __exit pkey_debug_exit(void)
-{
- debug_unregister(pkey_dbf_info);
-}
+ int rc;
-/* inside view of a protected key token (only type 0x00 version 0x01) */
-struct protaeskeytoken {
- u8 type; /* 0x00 for PAES specific key tokens */
- u8 res0[3];
- u8 version; /* should be 0x01 for protected AES key token */
- u8 res1[3];
- u32 keytype; /* key type, one of the PKEY_KEYTYPE values */
- u32 len; /* bytes actually stored in protkey[] */
- u8 protkey[MAXPROTKEYSIZE]; /* the protected key blob */
-} __packed;
-
-/* inside view of a clear key token (type 0x00 version 0x02) */
-struct clearkeytoken {
- u8 type; /* 0x00 for PAES specific key tokens */
- u8 res0[3];
- u8 version; /* 0x02 for clear key token */
- u8 res1[3];
- u32 keytype; /* key type, one of the PKEY_KEYTYPE_* values */
- u32 len; /* bytes actually stored in clearkey[] */
- u8 clearkey[]; /* clear key value */
-} __packed;
-
-/* helper function which translates the PKEY_KEYTYPE_AES_* to their keysize */
-static inline u32 pkey_keytype_aes_to_size(u32 keytype)
-{
- switch (keytype) {
- case PKEY_KEYTYPE_AES_128:
- return 16;
- case PKEY_KEYTYPE_AES_192:
- return 24;
- case PKEY_KEYTYPE_AES_256:
- return 32;
- default:
- return 0;
+ /* try the direct way */
+ rc = pkey_handler_key_to_protkey(apqns, nr_apqns,
+ key, keylen,
+ protkey, protkeylen,
+ protkeytype);
+
+ /* if this did not work, try the slowpath way */
+ if (rc == -ENODEV) {
+ rc = pkey_handler_slowpath_key_to_protkey(apqns, nr_apqns,
+ key, keylen,
+ protkey, protkeylen,
+ protkeytype);
+ if (rc)
+ rc = -ENODEV;
}
+
+ pr_debug("rc=%d\n", rc);
+ return rc;
}
/*
- * Create a protected key from a clear key value via PCKMO instruction.
+ * In-Kernel function: Transform a key blob (of any type) into a protected key
*/
-static int pkey_clr2protkey(u32 keytype, const u8 *clrkey,
- u8 *protkey, u32 *protkeylen, u32 *protkeytype)
+int pkey_key2protkey(const u8 *key, u32 keylen,
+ u8 *protkey, u32 *protkeylen, u32 *protkeytype)
{
- /* mask of available pckmo subfunctions */
- static cpacf_mask_t pckmo_functions;
-
- u8 paramblock[112];
- u32 pkeytype;
- int keysize;
- long fc;
-
- switch (keytype) {
- case PKEY_KEYTYPE_AES_128:
- /* 16 byte key, 32 byte aes wkvp, total 48 bytes */
- keysize = 16;
- pkeytype = keytype;
- fc = CPACF_PCKMO_ENC_AES_128_KEY;
- break;
- case PKEY_KEYTYPE_AES_192:
- /* 24 byte key, 32 byte aes wkvp, total 56 bytes */
- keysize = 24;
- pkeytype = keytype;
- fc = CPACF_PCKMO_ENC_AES_192_KEY;
- break;
- case PKEY_KEYTYPE_AES_256:
- /* 32 byte key, 32 byte aes wkvp, total 64 bytes */
- keysize = 32;
- pkeytype = keytype;
- fc = CPACF_PCKMO_ENC_AES_256_KEY;
- break;
- case PKEY_KEYTYPE_ECC_P256:
- /* 32 byte key, 32 byte aes wkvp, total 64 bytes */
- keysize = 32;
- pkeytype = PKEY_KEYTYPE_ECC;
- fc = CPACF_PCKMO_ENC_ECC_P256_KEY;
- break;
- case PKEY_KEYTYPE_ECC_P384:
- /* 48 byte key, 32 byte aes wkvp, total 80 bytes */
- keysize = 48;
- pkeytype = PKEY_KEYTYPE_ECC;
- fc = CPACF_PCKMO_ENC_ECC_P384_KEY;
- break;
- case PKEY_KEYTYPE_ECC_P521:
- /* 80 byte key, 32 byte aes wkvp, total 112 bytes */
- keysize = 80;
- pkeytype = PKEY_KEYTYPE_ECC;
- fc = CPACF_PCKMO_ENC_ECC_P521_KEY;
- break;
- case PKEY_KEYTYPE_ECC_ED25519:
- /* 32 byte key, 32 byte aes wkvp, total 64 bytes */
- keysize = 32;
- pkeytype = PKEY_KEYTYPE_ECC;
- fc = CPACF_PCKMO_ENC_ECC_ED25519_KEY;
- break;
- case PKEY_KEYTYPE_ECC_ED448:
- /* 64 byte key, 32 byte aes wkvp, total 96 bytes */
- keysize = 64;
- pkeytype = PKEY_KEYTYPE_ECC;
- fc = CPACF_PCKMO_ENC_ECC_ED448_KEY;
- break;
- default:
- PKEY_DBF_ERR("%s unknown/unsupported keytype %u\n",
- __func__, keytype);
- return -EINVAL;
- }
-
- if (*protkeylen < keysize + AES_WK_VP_SIZE) {
- PKEY_DBF_ERR("%s prot key buffer size too small: %u < %d\n",
- __func__, *protkeylen, keysize + AES_WK_VP_SIZE);
- return -EINVAL;
- }
+ int rc;
- /* Did we already check for PCKMO ? */
- if (!pckmo_functions.bytes[0]) {
- /* no, so check now */
- if (!cpacf_query(CPACF_PCKMO, &pckmo_functions))
- return -ENODEV;
- }
- /* check for the pckmo subfunction we need now */
- if (!cpacf_test_func(&pckmo_functions, fc)) {
- PKEY_DBF_ERR("%s pckmo functions not available\n", __func__);
- return -ENODEV;
+ rc = key2protkey(NULL, 0, key, keylen,
+ protkey, protkeylen, protkeytype);
+ if (rc == -ENODEV) {
+ pkey_handler_request_modules();
+ rc = key2protkey(NULL, 0, key, keylen,
+ protkey, protkeylen, protkeytype);
}
- /* prepare param block */
- memset(paramblock, 0, sizeof(paramblock));
- memcpy(paramblock, clrkey, keysize);
-
- /* call the pckmo instruction */
- cpacf_pckmo(fc, paramblock);
-
- /* copy created protected key to key buffer including the wkvp block */
- *protkeylen = keysize + AES_WK_VP_SIZE;
- memcpy(protkey, paramblock, *protkeylen);
- *protkeytype = pkeytype;
-
- return 0;
+ return rc;
}
+EXPORT_SYMBOL(pkey_key2protkey);
/*
- * Find card and transform secure key into protected key.
+ * Ioctl functions
*/
-static int pkey_skey2pkey(const u8 *key, u8 *protkey,
- u32 *protkeylen, u32 *protkeytype)
-{
- struct keytoken_header *hdr = (struct keytoken_header *)key;
- u16 cardnr, domain;
- int rc, verify;
-
- zcrypt_wait_api_operational();
-
- /*
- * The cca_xxx2protkey call may fail when a card has been
- * addressed where the master key was changed after last fetch
- * of the mkvp into the cache. Try 3 times: First without verify
- * then with verify and last round with verify and old master
- * key verification pattern match not ignored.
- */
- for (verify = 0; verify < 3; verify++) {
- rc = cca_findcard(key, &cardnr, &domain, verify);
- if (rc < 0)
- continue;
- if (rc > 0 && verify < 2)
- continue;
- switch (hdr->version) {
- case TOKVER_CCA_AES:
- rc = cca_sec2protkey(cardnr, domain, key,
- protkey, protkeylen, protkeytype);
- break;
- case TOKVER_CCA_VLSC:
- rc = cca_cipher2protkey(cardnr, domain, key,
- protkey, protkeylen,
- protkeytype);
- break;
- default:
- return -EINVAL;
- }
- if (rc == 0)
- break;
- }
- if (rc)
- pr_debug("%s failed rc=%d\n", __func__, rc);
+static void *_copy_key_from_user(void __user *ukey, size_t keylen)
+{
+ if (!ukey || keylen < MINKEYBLOBBUFSIZE || keylen > KEYBLOBBUFSIZE)
+ return ERR_PTR(-EINVAL);
- return rc;
+ return memdup_user(ukey, keylen);
}
-/*
- * Construct EP11 key with given clear key value.
- */
-static int pkey_clr2ep11key(const u8 *clrkey, size_t clrkeylen,
- u8 *keybuf, size_t *keybuflen)
+static void *_copy_apqns_from_user(void __user *uapqns, size_t nr_apqns)
{
- u32 nr_apqns, *apqns = NULL;
- u16 card, dom;
- int i, rc;
-
- zcrypt_wait_api_operational();
-
- /* build a list of apqns suitable for ep11 keys with cpacf support */
- rc = ep11_findcard2(&apqns, &nr_apqns, 0xFFFF, 0xFFFF,
- ZCRYPT_CEX7,
- ap_is_se_guest() ? EP11_API_V6 : EP11_API_V4,
- NULL);
- if (rc)
- goto out;
-
- /* go through the list of apqns and try to bild an ep11 key */
- for (rc = -ENODEV, i = 0; i < nr_apqns; i++) {
- card = apqns[i] >> 16;
- dom = apqns[i] & 0xFFFF;
- rc = ep11_clr2keyblob(card, dom, clrkeylen * 8,
- 0, clrkey, keybuf, keybuflen,
- PKEY_TYPE_EP11);
- if (rc == 0)
- break;
- }
+ if (!uapqns || nr_apqns == 0)
+ return NULL;
-out:
- kfree(apqns);
- if (rc)
- pr_debug("%s failed rc=%d\n", __func__, rc);
- return rc;
+ return memdup_user(uapqns, nr_apqns * sizeof(struct pkey_apqn));
}
-/*
- * Find card and transform EP11 secure key into protected key.
- */
-static int pkey_ep11key2pkey(const u8 *key, size_t keylen,
- u8 *protkey, u32 *protkeylen, u32 *protkeytype)
+static int pkey_ioctl_genseck(struct pkey_genseck __user *ugs)
{
- u32 nr_apqns, *apqns = NULL;
- int i, j, rc = -ENODEV;
- u16 card, dom;
+ struct pkey_genseck kgs;
+ struct pkey_apqn apqn;
+ u32 keybuflen;
+ int rc;
- zcrypt_wait_api_operational();
+ if (copy_from_user(&kgs, ugs, sizeof(kgs)))
+ return -EFAULT;
- /* try two times in case of failure */
- for (i = 0; i < 2 && rc; i++) {
+ apqn.card = kgs.cardnr;
+ apqn.domain = kgs.domain;
+ keybuflen = sizeof(kgs.seckey.seckey);
+ rc = pkey_handler_gen_key(&apqn, 1,
+ kgs.keytype, PKEY_TYPE_CCA_DATA, 0, 0,
+ kgs.seckey.seckey, &keybuflen, NULL);
+ pr_debug("gen_key()=%d\n", rc);
+ if (!rc && copy_to_user(ugs, &kgs, sizeof(kgs)))
+ rc = -EFAULT;
+ memzero_explicit(&kgs, sizeof(kgs));
- /* build a list of apqns suitable for this key */
- rc = ep11_findcard2(&apqns, &nr_apqns, 0xFFFF, 0xFFFF,
- ZCRYPT_CEX7,
- ap_is_se_guest() ? EP11_API_V6 : EP11_API_V4,
- ep11_kb_wkvp(key, keylen));
- if (rc)
- continue; /* retry findcard on failure */
-
- /* go through the list of apqns and try to derive an pkey */
- for (rc = -ENODEV, j = 0; j < nr_apqns && rc; j++) {
- card = apqns[j] >> 16;
- dom = apqns[j] & 0xFFFF;
- rc = ep11_kblob2protkey(card, dom, key, keylen,
- protkey, protkeylen, protkeytype);
- }
+ return rc;
+}
- kfree(apqns);
- }
+static int pkey_ioctl_clr2seck(struct pkey_clr2seck __user *ucs)
+{
+ struct pkey_clr2seck kcs;
+ struct pkey_apqn apqn;
+ u32 keybuflen;
+ int rc;
- if (rc)
- pr_debug("%s failed rc=%d\n", __func__, rc);
+ if (copy_from_user(&kcs, ucs, sizeof(kcs)))
+ return -EFAULT;
+
+ apqn.card = kcs.cardnr;
+ apqn.domain = kcs.domain;
+ keybuflen = sizeof(kcs.seckey.seckey);
+ rc = pkey_handler_clr_to_key(&apqn, 1,
+ kcs.keytype, PKEY_TYPE_CCA_DATA, 0, 0,
+ kcs.clrkey.clrkey,
+ pkey_keytype_aes_to_size(kcs.keytype),
+ kcs.seckey.seckey, &keybuflen, NULL);
+ pr_debug("clr_to_key()=%d\n", rc);
+ if (!rc && copy_to_user(ucs, &kcs, sizeof(kcs)))
+ rc = -EFAULT;
+ memzero_explicit(&kcs, sizeof(kcs));
return rc;
}
-/*
- * Verify key and give back some info about the key.
- */
-static int pkey_verifykey(const struct pkey_seckey *seckey,
- u16 *pcardnr, u16 *pdomain,
- u16 *pkeysize, u32 *pattributes)
+static int pkey_ioctl_sec2protk(struct pkey_sec2protk __user *usp)
{
- struct secaeskeytoken *t = (struct secaeskeytoken *)seckey;
- u16 cardnr, domain;
+ struct pkey_sec2protk ksp;
+ struct pkey_apqn apqn;
int rc;
- /* check the secure key for valid AES secure key */
- rc = cca_check_secaeskeytoken(pkey_dbf_info, 3, (u8 *)seckey, 0);
- if (rc)
- goto out;
- if (pattributes)
- *pattributes = PKEY_VERIFY_ATTR_AES;
- if (pkeysize)
- *pkeysize = t->bitsize;
-
- /* try to find a card which can handle this key */
- rc = cca_findcard(seckey->seckey, &cardnr, &domain, 1);
- if (rc < 0)
- goto out;
-
- if (rc > 0) {
- /* key mkvp matches to old master key mkvp */
- pr_debug("%s secure key has old mkvp\n", __func__);
- if (pattributes)
- *pattributes |= PKEY_VERIFY_ATTR_OLD_MKVP;
- rc = 0;
- }
+ if (copy_from_user(&ksp, usp, sizeof(ksp)))
+ return -EFAULT;
+
+ apqn.card = ksp.cardnr;
+ apqn.domain = ksp.domain;
+ ksp.protkey.len = sizeof(ksp.protkey.protkey);
+ rc = pkey_handler_key_to_protkey(&apqn, 1,
+ ksp.seckey.seckey,
+ sizeof(ksp.seckey.seckey),
+ ksp.protkey.protkey,
+ &ksp.protkey.len, &ksp.protkey.type);
+ pr_debug("key_to_protkey()=%d\n", rc);
+ if (!rc && copy_to_user(usp, &ksp, sizeof(ksp)))
+ rc = -EFAULT;
+ memzero_explicit(&ksp, sizeof(ksp));
- if (pcardnr)
- *pcardnr = cardnr;
- if (pdomain)
- *pdomain = domain;
-
-out:
- pr_debug("%s rc=%d\n", __func__, rc);
return rc;
}
-/*
- * Generate a random protected key
- */
-static int pkey_genprotkey(u32 keytype, u8 *protkey,
- u32 *protkeylen, u32 *protkeytype)
+static int pkey_ioctl_clr2protk(struct pkey_clr2protk __user *ucp)
{
- u8 clrkey[32];
- int keysize;
+ struct pkey_clr2protk kcp;
+ struct clearkeytoken *t;
+ u32 keylen;
+ u8 *tmpbuf;
int rc;
- keysize = pkey_keytype_aes_to_size(keytype);
- if (!keysize) {
- PKEY_DBF_ERR("%s unknown/unsupported keytype %d\n", __func__,
- keytype);
+ if (copy_from_user(&kcp, ucp, sizeof(kcp)))
+ return -EFAULT;
+
+ /* build a 'clear key token' from the clear key value */
+ keylen = pkey_keytype_aes_to_size(kcp.keytype);
+ if (!keylen) {
+ PKEY_DBF_ERR("%s unknown/unsupported keytype %u\n",
+ __func__, kcp.keytype);
+ memzero_explicit(&kcp, sizeof(kcp));
return -EINVAL;
}
+ tmpbuf = kzalloc(sizeof(*t) + keylen, GFP_KERNEL);
+ if (!tmpbuf) {
+ memzero_explicit(&kcp, sizeof(kcp));
+ return -ENOMEM;
+ }
+ t = (struct clearkeytoken *)tmpbuf;
+ t->type = TOKTYPE_NON_CCA;
+ t->version = TOKVER_CLEAR_KEY;
+ t->keytype = (keylen - 8) >> 3;
+ t->len = keylen;
+ memcpy(t->clearkey, kcp.clrkey.clrkey, keylen);
+ kcp.protkey.len = sizeof(kcp.protkey.protkey);
- /* generate a dummy random clear key */
- get_random_bytes(clrkey, keysize);
+ rc = key2protkey(NULL, 0,
+ tmpbuf, sizeof(*t) + keylen,
+ kcp.protkey.protkey,
+ &kcp.protkey.len, &kcp.protkey.type);
+ pr_debug("key2protkey()=%d\n", rc);
- /* convert it to a dummy protected key */
- rc = pkey_clr2protkey(keytype, clrkey,
- protkey, protkeylen, protkeytype);
- if (rc)
- return rc;
+ kfree_sensitive(tmpbuf);
- /* replace the key part of the protected key with random bytes */
- get_random_bytes(protkey, keysize);
+ if (!rc && copy_to_user(ucp, &kcp, sizeof(kcp)))
+ rc = -EFAULT;
+ memzero_explicit(&kcp, sizeof(kcp));
- return 0;
+ return rc;
}
-/*
- * Verify if a protected key is still valid
- */
-static int pkey_verifyprotkey(const u8 *protkey, u32 protkeylen,
- u32 protkeytype)
+static int pkey_ioctl_findcard(struct pkey_findcard __user *ufc)
{
- struct {
- u8 iv[AES_BLOCK_SIZE];
- u8 key[MAXPROTKEYSIZE];
- } param;
- u8 null_msg[AES_BLOCK_SIZE];
- u8 dest_buf[AES_BLOCK_SIZE];
- unsigned int k, pkeylen;
- unsigned long fc;
-
- switch (protkeytype) {
- case PKEY_KEYTYPE_AES_128:
- pkeylen = 16 + AES_WK_VP_SIZE;
- fc = CPACF_KMC_PAES_128;
- break;
- case PKEY_KEYTYPE_AES_192:
- pkeylen = 24 + AES_WK_VP_SIZE;
- fc = CPACF_KMC_PAES_192;
- break;
- case PKEY_KEYTYPE_AES_256:
- pkeylen = 32 + AES_WK_VP_SIZE;
- fc = CPACF_KMC_PAES_256;
- break;
- default:
- PKEY_DBF_ERR("%s unknown/unsupported keytype %u\n", __func__,
- protkeytype);
- return -EINVAL;
- }
- if (protkeylen != pkeylen) {
- PKEY_DBF_ERR("%s invalid protected key size %u for keytype %u\n",
- __func__, protkeylen, protkeytype);
- return -EINVAL;
- }
+ struct pkey_findcard kfc;
+ struct pkey_apqn *apqns;
+ size_t nr_apqns;
+ int rc;
- memset(null_msg, 0, sizeof(null_msg));
+ if (copy_from_user(&kfc, ufc, sizeof(kfc)))
+ return -EFAULT;
- memset(param.iv, 0, sizeof(param.iv));
- memcpy(param.key, protkey, protkeylen);
+ nr_apqns = MAXAPQNSINLIST;
+ apqns = kmalloc_array(nr_apqns, sizeof(struct pkey_apqn), GFP_KERNEL);
+ if (!apqns)
+ return -ENOMEM;
- k = cpacf_kmc(fc | CPACF_ENCRYPT, &param, null_msg, dest_buf,
- sizeof(null_msg));
- if (k != sizeof(null_msg)) {
- PKEY_DBF_ERR("%s protected key is not valid\n", __func__);
- return -EKEYREJECTED;
+ rc = pkey_handler_apqns_for_key(kfc.seckey.seckey,
+ sizeof(kfc.seckey.seckey),
+ PKEY_FLAGS_MATCH_CUR_MKVP,
+ apqns, &nr_apqns);
+ if (rc == -ENODEV)
+ rc = pkey_handler_apqns_for_key(kfc.seckey.seckey,
+ sizeof(kfc.seckey.seckey),
+ PKEY_FLAGS_MATCH_ALT_MKVP,
+ apqns, &nr_apqns);
+ pr_debug("apqns_for_key()=%d\n", rc);
+ if (rc) {
+ kfree(apqns);
+ return rc;
}
+ kfc.cardnr = apqns[0].card;
+ kfc.domain = apqns[0].domain;
+ kfree(apqns);
+ if (copy_to_user(ufc, &kfc, sizeof(kfc)))
+ return -EFAULT;
return 0;
}
-/* Helper for pkey_nonccatok2pkey, handles aes clear key token */
-static int nonccatokaes2pkey(const struct clearkeytoken *t,
- u8 *protkey, u32 *protkeylen, u32 *protkeytype)
+static int pkey_ioctl_skey2pkey(struct pkey_skey2pkey __user *usp)
{
- size_t tmpbuflen = max_t(size_t, SECKEYBLOBSIZE, MAXEP11AESKEYBLOBSIZE);
- u8 *tmpbuf = NULL;
- u32 keysize;
+ struct pkey_skey2pkey ksp;
int rc;
- keysize = pkey_keytype_aes_to_size(t->keytype);
- if (!keysize) {
- PKEY_DBF_ERR("%s unknown/unsupported keytype %u\n",
- __func__, t->keytype);
- return -EINVAL;
- }
- if (t->len != keysize) {
- PKEY_DBF_ERR("%s non clear key aes token: invalid key len %u\n",
- __func__, t->len);
- return -EINVAL;
- }
-
- /* try direct way with the PCKMO instruction */
- rc = pkey_clr2protkey(t->keytype, t->clearkey,
- protkey, protkeylen, protkeytype);
- if (!rc)
- goto out;
+ if (copy_from_user(&ksp, usp, sizeof(ksp)))
+ return -EFAULT;
+
+ ksp.protkey.len = sizeof(ksp.protkey.protkey);
+ rc = pkey_handler_key_to_protkey(NULL, 0,
+ ksp.seckey.seckey,
+ sizeof(ksp.seckey.seckey),
+ ksp.protkey.protkey,
+ &ksp.protkey.len,
+ &ksp.protkey.type);
+ pr_debug("key_to_protkey()=%d\n", rc);
+ if (!rc && copy_to_user(usp, &ksp, sizeof(ksp)))
+ rc = -EFAULT;
+ memzero_explicit(&ksp, sizeof(ksp));
- /* PCKMO failed, so try the CCA secure key way */
- tmpbuf = kmalloc(tmpbuflen, GFP_ATOMIC);
- if (!tmpbuf)
- return -ENOMEM;
- zcrypt_wait_api_operational();
- rc = cca_clr2seckey(0xFFFF, 0xFFFF, t->keytype, t->clearkey, tmpbuf);
- if (rc)
- goto try_via_ep11;
- rc = pkey_skey2pkey(tmpbuf,
- protkey, protkeylen, protkeytype);
- if (!rc)
- goto out;
-
-try_via_ep11:
- /* if the CCA way also failed, let's try via EP11 */
- rc = pkey_clr2ep11key(t->clearkey, t->len,
- tmpbuf, &tmpbuflen);
- if (rc)
- goto failure;
- rc = pkey_ep11key2pkey(tmpbuf, tmpbuflen,
- protkey, protkeylen, protkeytype);
- if (!rc)
- goto out;
-
-failure:
- PKEY_DBF_ERR("%s unable to build protected key from clear", __func__);
-
-out:
- kfree(tmpbuf);
return rc;
}
-/* Helper for pkey_nonccatok2pkey, handles ecc clear key token */
-static int nonccatokecc2pkey(const struct clearkeytoken *t,
- u8 *protkey, u32 *protkeylen, u32 *protkeytype)
+static int pkey_ioctl_verifykey(struct pkey_verifykey __user *uvk)
{
- u32 keylen;
+ u32 keytype, keybitsize, flags;
+ struct pkey_verifykey kvk;
int rc;
- switch (t->keytype) {
- case PKEY_KEYTYPE_ECC_P256:
- keylen = 32;
- break;
- case PKEY_KEYTYPE_ECC_P384:
- keylen = 48;
- break;
- case PKEY_KEYTYPE_ECC_P521:
- keylen = 80;
- break;
- case PKEY_KEYTYPE_ECC_ED25519:
- keylen = 32;
- break;
- case PKEY_KEYTYPE_ECC_ED448:
- keylen = 64;
- break;
- default:
- PKEY_DBF_ERR("%s unknown/unsupported keytype %u\n",
- __func__, t->keytype);
- return -EINVAL;
- }
-
- if (t->len != keylen) {
- PKEY_DBF_ERR("%s non clear key ecc token: invalid key len %u\n",
- __func__, t->len);
- return -EINVAL;
- }
+ if (copy_from_user(&kvk, uvk, sizeof(kvk)))
+ return -EFAULT;
- /* only one path possible: via PCKMO instruction */
- rc = pkey_clr2protkey(t->keytype, t->clearkey,
- protkey, protkeylen, protkeytype);
- if (rc) {
- PKEY_DBF_ERR("%s unable to build protected key from clear",
- __func__);
- }
+ kvk.cardnr = 0xFFFF;
+ kvk.domain = 0xFFFF;
+ rc = pkey_handler_verify_key(kvk.seckey.seckey,
+ sizeof(kvk.seckey.seckey),
+ &kvk.cardnr, &kvk.domain,
+ &keytype, &keybitsize, &flags);
+ pr_debug("verify_key()=%d\n", rc);
+ if (!rc && keytype != PKEY_TYPE_CCA_DATA)
+ rc = -EINVAL;
+ kvk.attributes = PKEY_VERIFY_ATTR_AES;
+ kvk.keysize = (u16)keybitsize;
+ if (flags & PKEY_FLAGS_MATCH_ALT_MKVP)
+ kvk.attributes |= PKEY_VERIFY_ATTR_OLD_MKVP;
+ if (!rc && copy_to_user(uvk, &kvk, sizeof(kvk)))
+ rc = -EFAULT;
+ memzero_explicit(&kvk, sizeof(kvk));
return rc;
}
-/*
- * Transform a non-CCA key token into a protected key
- */
-static int pkey_nonccatok2pkey(const u8 *key, u32 keylen,
- u8 *protkey, u32 *protkeylen, u32 *protkeytype)
+static int pkey_ioctl_genprotk(struct pkey_genprotk __user *ugp)
{
- struct keytoken_header *hdr = (struct keytoken_header *)key;
- int rc = -EINVAL;
+ struct pkey_genprotk kgp;
+ int rc;
- switch (hdr->version) {
- case TOKVER_PROTECTED_KEY: {
- struct protaeskeytoken *t;
+ if (copy_from_user(&kgp, ugp, sizeof(kgp)))
+ return -EFAULT;
- if (keylen != sizeof(struct protaeskeytoken))
- goto out;
- t = (struct protaeskeytoken *)key;
- rc = pkey_verifyprotkey(t->protkey, t->len, t->keytype);
- if (rc)
- goto out;
- memcpy(protkey, t->protkey, t->len);
- *protkeylen = t->len;
- *protkeytype = t->keytype;
- break;
- }
- case TOKVER_CLEAR_KEY: {
- struct clearkeytoken *t = (struct clearkeytoken *)key;
-
- if (keylen < sizeof(struct clearkeytoken) ||
- keylen != sizeof(*t) + t->len)
- goto out;
- switch (t->keytype) {
- case PKEY_KEYTYPE_AES_128:
- case PKEY_KEYTYPE_AES_192:
- case PKEY_KEYTYPE_AES_256:
- rc = nonccatokaes2pkey(t, protkey,
- protkeylen, protkeytype);
- break;
- case PKEY_KEYTYPE_ECC_P256:
- case PKEY_KEYTYPE_ECC_P384:
- case PKEY_KEYTYPE_ECC_P521:
- case PKEY_KEYTYPE_ECC_ED25519:
- case PKEY_KEYTYPE_ECC_ED448:
- rc = nonccatokecc2pkey(t, protkey,
- protkeylen, protkeytype);
- break;
- default:
- PKEY_DBF_ERR("%s unknown/unsupported non cca clear key type %u\n",
- __func__, t->keytype);
- return -EINVAL;
- }
- break;
- }
- case TOKVER_EP11_AES: {
- /* check ep11 key for exportable as protected key */
- rc = ep11_check_aes_key(pkey_dbf_info, 3, key, keylen, 1);
- if (rc)
- goto out;
- rc = pkey_ep11key2pkey(key, keylen,
- protkey, protkeylen, protkeytype);
- break;
- }
- case TOKVER_EP11_AES_WITH_HEADER:
- /* check ep11 key with header for exportable as protected key */
- rc = ep11_check_aes_key_with_hdr(pkey_dbf_info,
- 3, key, keylen, 1);
- if (rc)
- goto out;
- rc = pkey_ep11key2pkey(key, keylen,
- protkey, protkeylen, protkeytype);
- break;
- default:
- PKEY_DBF_ERR("%s unknown/unsupported non-CCA token version %d\n",
- __func__, hdr->version);
- }
+ kgp.protkey.len = sizeof(kgp.protkey.protkey);
+ rc = pkey_handler_gen_key(NULL, 0, kgp.keytype,
+ PKEY_TYPE_PROTKEY, 0, 0,
+ kgp.protkey.protkey, &kgp.protkey.len,
+ &kgp.protkey.type);
+ pr_debug("gen_key()=%d\n", rc);
+ if (!rc && copy_to_user(ugp, &kgp, sizeof(kgp)))
+ rc = -EFAULT;
+ memzero_explicit(&kgp, sizeof(kgp));
-out:
return rc;
}
-/*
- * Transform a CCA internal key token into a protected key
- */
-static int pkey_ccainttok2pkey(const u8 *key, u32 keylen,
- u8 *protkey, u32 *protkeylen, u32 *protkeytype)
+static int pkey_ioctl_verifyprotk(struct pkey_verifyprotk __user *uvp)
{
- struct keytoken_header *hdr = (struct keytoken_header *)key;
+ struct pkey_verifyprotk kvp;
+ struct protaeskeytoken *t;
+ u32 keytype;
+ u8 *tmpbuf;
+ int rc;
- switch (hdr->version) {
- case TOKVER_CCA_AES:
- if (keylen != sizeof(struct secaeskeytoken))
- return -EINVAL;
- break;
- case TOKVER_CCA_VLSC:
- if (keylen < hdr->len || keylen > MAXCCAVLSCTOKENSIZE)
- return -EINVAL;
- break;
- default:
- PKEY_DBF_ERR("%s unknown/unsupported CCA internal token version %d\n",
- __func__, hdr->version);
+ if (copy_from_user(&kvp, uvp, sizeof(kvp)))
+ return -EFAULT;
+
+ keytype = pkey_aes_bitsize_to_keytype(8 * kvp.protkey.len);
+ if (!keytype) {
+ PKEY_DBF_ERR("%s unknown/unsupported protkey length %u\n",
+ __func__, kvp.protkey.len);
+ memzero_explicit(&kvp, sizeof(kvp));
return -EINVAL;
}
- return pkey_skey2pkey(key, protkey, protkeylen, protkeytype);
+ /* build a 'protected key token' from the raw protected key */
+ tmpbuf = kzalloc(sizeof(*t), GFP_KERNEL);
+ if (!tmpbuf) {
+ memzero_explicit(&kvp, sizeof(kvp));
+ return -ENOMEM;
+ }
+ t = (struct protaeskeytoken *)tmpbuf;
+ t->type = TOKTYPE_NON_CCA;
+ t->version = TOKVER_PROTECTED_KEY;
+ t->keytype = keytype;
+ t->len = kvp.protkey.len;
+ memcpy(t->protkey, kvp.protkey.protkey, kvp.protkey.len);
+
+ rc = pkey_handler_verify_key(tmpbuf, sizeof(*t),
+ NULL, NULL, NULL, NULL, NULL);
+ pr_debug("verify_key()=%d\n", rc);
+
+ kfree_sensitive(tmpbuf);
+ memzero_explicit(&kvp, sizeof(kvp));
+
+ return rc;
}
-/*
- * Transform a key blob (of any type) into a protected key
- */
-int pkey_keyblob2pkey(const u8 *key, u32 keylen,
- u8 *protkey, u32 *protkeylen, u32 *protkeytype)
+static int pkey_ioctl_kblob2protk(struct pkey_kblob2pkey __user *utp)
{
- struct keytoken_header *hdr = (struct keytoken_header *)key;
+ struct pkey_kblob2pkey ktp;
+ u8 *kkey;
int rc;
- if (keylen < sizeof(struct keytoken_header)) {
- PKEY_DBF_ERR("%s invalid keylen %d\n", __func__, keylen);
- return -EINVAL;
- }
+ if (copy_from_user(&ktp, utp, sizeof(ktp)))
+ return -EFAULT;
+ kkey = _copy_key_from_user(ktp.key, ktp.keylen);
+ if (IS_ERR(kkey))
+ return PTR_ERR(kkey);
+ ktp.protkey.len = sizeof(ktp.protkey.protkey);
+ rc = key2protkey(NULL, 0, kkey, ktp.keylen,
+ ktp.protkey.protkey, &ktp.protkey.len,
+ &ktp.protkey.type);
+ pr_debug("key2protkey()=%d\n", rc);
+ kfree_sensitive(kkey);
+ if (!rc && copy_to_user(utp, &ktp, sizeof(ktp)))
+ rc = -EFAULT;
+ memzero_explicit(&ktp, sizeof(ktp));
- switch (hdr->type) {
- case TOKTYPE_NON_CCA:
- rc = pkey_nonccatok2pkey(key, keylen,
- protkey, protkeylen, protkeytype);
- break;
- case TOKTYPE_CCA_INTERNAL:
- rc = pkey_ccainttok2pkey(key, keylen,
- protkey, protkeylen, protkeytype);
- break;
- default:
- PKEY_DBF_ERR("%s unknown/unsupported blob type %d\n",
- __func__, hdr->type);
- return -EINVAL;
- }
-
- pr_debug("%s rc=%d\n", __func__, rc);
return rc;
}
-EXPORT_SYMBOL(pkey_keyblob2pkey);
-static int pkey_genseckey2(const struct pkey_apqn *apqns, size_t nr_apqns,
- enum pkey_key_type ktype, enum pkey_key_size ksize,
- u32 kflags, u8 *keybuf, size_t *keybufsize)
+static int pkey_ioctl_genseck2(struct pkey_genseck2 __user *ugs)
{
- int i, card, dom, rc;
-
- /* check for at least one apqn given */
- if (!apqns || !nr_apqns)
- return -EINVAL;
+ u32 klen = KEYBLOBBUFSIZE;
+ struct pkey_genseck2 kgs;
+ struct pkey_apqn *apqns;
+ u8 *kkey;
+ int rc;
+ u32 u;
- /* check key type and size */
- switch (ktype) {
- case PKEY_TYPE_CCA_DATA:
- case PKEY_TYPE_CCA_CIPHER:
- if (*keybufsize < SECKEYBLOBSIZE)
- return -EINVAL;
- break;
- case PKEY_TYPE_EP11:
- if (*keybufsize < MINEP11AESKEYBLOBSIZE)
- return -EINVAL;
- break;
- case PKEY_TYPE_EP11_AES:
- if (*keybufsize < (sizeof(struct ep11kblob_header) +
- MINEP11AESKEYBLOBSIZE))
- return -EINVAL;
- break;
- default:
+ if (copy_from_user(&kgs, ugs, sizeof(kgs)))
+ return -EFAULT;
+ u = pkey_aes_bitsize_to_keytype(kgs.size);
+ if (!u) {
+ PKEY_DBF_ERR("%s unknown/unsupported keybitsize %d\n",
+ __func__, kgs.size);
return -EINVAL;
}
- switch (ksize) {
- case PKEY_SIZE_AES_128:
- case PKEY_SIZE_AES_192:
- case PKEY_SIZE_AES_256:
- break;
- default:
- return -EINVAL;
+ apqns = _copy_apqns_from_user(kgs.apqns, kgs.apqn_entries);
+ if (IS_ERR(apqns))
+ return PTR_ERR(apqns);
+ kkey = kzalloc(klen, GFP_KERNEL);
+ if (!kkey) {
+ kfree(apqns);
+ return -ENOMEM;
}
-
- /* simple try all apqns from the list */
- for (i = 0, rc = -ENODEV; i < nr_apqns; i++) {
- card = apqns[i].card;
- dom = apqns[i].domain;
- if (ktype == PKEY_TYPE_EP11 ||
- ktype == PKEY_TYPE_EP11_AES) {
- rc = ep11_genaeskey(card, dom, ksize, kflags,
- keybuf, keybufsize, ktype);
- } else if (ktype == PKEY_TYPE_CCA_DATA) {
- rc = cca_genseckey(card, dom, ksize, keybuf);
- *keybufsize = (rc ? 0 : SECKEYBLOBSIZE);
- } else {
- /* TOKVER_CCA_VLSC */
- rc = cca_gencipherkey(card, dom, ksize, kflags,
- keybuf, keybufsize);
+ rc = pkey_handler_gen_key(apqns, kgs.apqn_entries,
+ u, kgs.type, kgs.size, kgs.keygenflags,
+ kkey, &klen, NULL);
+ pr_debug("gen_key()=%d\n", rc);
+ kfree(apqns);
+ if (rc) {
+ kfree_sensitive(kkey);
+ return rc;
+ }
+ if (kgs.key) {
+ if (kgs.keylen < klen) {
+ kfree_sensitive(kkey);
+ return -EINVAL;
+ }
+ if (copy_to_user(kgs.key, kkey, klen)) {
+ kfree_sensitive(kkey);
+ return -EFAULT;
}
- if (rc == 0)
- break;
}
+ kgs.keylen = klen;
+ if (copy_to_user(ugs, &kgs, sizeof(kgs)))
+ rc = -EFAULT;
+ kfree_sensitive(kkey);
return rc;
}
-static int pkey_clr2seckey2(const struct pkey_apqn *apqns, size_t nr_apqns,
- enum pkey_key_type ktype, enum pkey_key_size ksize,
- u32 kflags, const u8 *clrkey,
- u8 *keybuf, size_t *keybufsize)
+static int pkey_ioctl_clr2seck2(struct pkey_clr2seck2 __user *ucs)
{
- int i, card, dom, rc;
-
- /* check for at least one apqn given */
- if (!apqns || !nr_apqns)
- return -EINVAL;
-
- /* check key type and size */
- switch (ktype) {
- case PKEY_TYPE_CCA_DATA:
- case PKEY_TYPE_CCA_CIPHER:
- if (*keybufsize < SECKEYBLOBSIZE)
- return -EINVAL;
- break;
- case PKEY_TYPE_EP11:
- if (*keybufsize < MINEP11AESKEYBLOBSIZE)
- return -EINVAL;
- break;
- case PKEY_TYPE_EP11_AES:
- if (*keybufsize < (sizeof(struct ep11kblob_header) +
- MINEP11AESKEYBLOBSIZE))
- return -EINVAL;
- break;
- default:
+ u32 klen = KEYBLOBBUFSIZE;
+ struct pkey_clr2seck2 kcs;
+ struct pkey_apqn *apqns;
+ u8 *kkey;
+ int rc;
+ u32 u;
+
+ if (copy_from_user(&kcs, ucs, sizeof(kcs)))
+ return -EFAULT;
+ u = pkey_aes_bitsize_to_keytype(kcs.size);
+ if (!u) {
+ PKEY_DBF_ERR("%s unknown/unsupported keybitsize %d\n",
+ __func__, kcs.size);
+ memzero_explicit(&kcs, sizeof(kcs));
return -EINVAL;
}
- switch (ksize) {
- case PKEY_SIZE_AES_128:
- case PKEY_SIZE_AES_192:
- case PKEY_SIZE_AES_256:
- break;
- default:
- return -EINVAL;
+ apqns = _copy_apqns_from_user(kcs.apqns, kcs.apqn_entries);
+ if (IS_ERR(apqns)) {
+ memzero_explicit(&kcs, sizeof(kcs));
+ return PTR_ERR(apqns);
}
-
- zcrypt_wait_api_operational();
-
- /* simple try all apqns from the list */
- for (i = 0, rc = -ENODEV; i < nr_apqns; i++) {
- card = apqns[i].card;
- dom = apqns[i].domain;
- if (ktype == PKEY_TYPE_EP11 ||
- ktype == PKEY_TYPE_EP11_AES) {
- rc = ep11_clr2keyblob(card, dom, ksize, kflags,
- clrkey, keybuf, keybufsize,
- ktype);
- } else if (ktype == PKEY_TYPE_CCA_DATA) {
- rc = cca_clr2seckey(card, dom, ksize,
- clrkey, keybuf);
- *keybufsize = (rc ? 0 : SECKEYBLOBSIZE);
- } else {
- /* TOKVER_CCA_VLSC */
- rc = cca_clr2cipherkey(card, dom, ksize, kflags,
- clrkey, keybuf, keybufsize);
+ kkey = kzalloc(klen, GFP_KERNEL);
+ if (!kkey) {
+ kfree(apqns);
+ memzero_explicit(&kcs, sizeof(kcs));
+ return -ENOMEM;
+ }
+ rc = pkey_handler_clr_to_key(apqns, kcs.apqn_entries,
+ u, kcs.type, kcs.size, kcs.keygenflags,
+ kcs.clrkey.clrkey, kcs.size / 8,
+ kkey, &klen, NULL);
+ pr_debug("clr_to_key()=%d\n", rc);
+ kfree(apqns);
+ if (rc) {
+ kfree_sensitive(kkey);
+ memzero_explicit(&kcs, sizeof(kcs));
+ return rc;
+ }
+ if (kcs.key) {
+ if (kcs.keylen < klen) {
+ kfree_sensitive(kkey);
+ memzero_explicit(&kcs, sizeof(kcs));
+ return -EINVAL;
+ }
+ if (copy_to_user(kcs.key, kkey, klen)) {
+ kfree_sensitive(kkey);
+ memzero_explicit(&kcs, sizeof(kcs));
+ return -EFAULT;
}
- if (rc == 0)
- break;
}
+ kcs.keylen = klen;
+ if (copy_to_user(ucs, &kcs, sizeof(kcs)))
+ rc = -EFAULT;
+ memzero_explicit(&kcs, sizeof(kcs));
+ kfree_sensitive(kkey);
return rc;
}
-static int pkey_verifykey2(const u8 *key, size_t keylen,
- u16 *cardnr, u16 *domain,
- enum pkey_key_type *ktype,
- enum pkey_key_size *ksize, u32 *flags)
+static int pkey_ioctl_verifykey2(struct pkey_verifykey2 __user *uvk)
{
- struct keytoken_header *hdr = (struct keytoken_header *)key;
- u32 _nr_apqns, *_apqns = NULL;
+ struct pkey_verifykey2 kvk;
+ u8 *kkey;
int rc;
- if (keylen < sizeof(struct keytoken_header))
- return -EINVAL;
-
- if (hdr->type == TOKTYPE_CCA_INTERNAL &&
- hdr->version == TOKVER_CCA_AES) {
- struct secaeskeytoken *t = (struct secaeskeytoken *)key;
-
- rc = cca_check_secaeskeytoken(pkey_dbf_info, 3, key, 0);
- if (rc)
- goto out;
- if (ktype)
- *ktype = PKEY_TYPE_CCA_DATA;
- if (ksize)
- *ksize = (enum pkey_key_size)t->bitsize;
-
- rc = cca_findcard2(&_apqns, &_nr_apqns, *cardnr, *domain,
- ZCRYPT_CEX3C, AES_MK_SET, t->mkvp, 0, 1);
- if (rc == 0 && flags)
- *flags = PKEY_FLAGS_MATCH_CUR_MKVP;
- if (rc == -ENODEV) {
- rc = cca_findcard2(&_apqns, &_nr_apqns,
- *cardnr, *domain,
- ZCRYPT_CEX3C, AES_MK_SET,
- 0, t->mkvp, 1);
- if (rc == 0 && flags)
- *flags = PKEY_FLAGS_MATCH_ALT_MKVP;
- }
- if (rc)
- goto out;
-
- *cardnr = ((struct pkey_apqn *)_apqns)->card;
- *domain = ((struct pkey_apqn *)_apqns)->domain;
-
- } else if (hdr->type == TOKTYPE_CCA_INTERNAL &&
- hdr->version == TOKVER_CCA_VLSC) {
- struct cipherkeytoken *t = (struct cipherkeytoken *)key;
-
- rc = cca_check_secaescipherkey(pkey_dbf_info, 3, key, 0, 1);
- if (rc)
- goto out;
- if (ktype)
- *ktype = PKEY_TYPE_CCA_CIPHER;
- if (ksize) {
- *ksize = PKEY_SIZE_UNKNOWN;
- if (!t->plfver && t->wpllen == 512)
- *ksize = PKEY_SIZE_AES_128;
- else if (!t->plfver && t->wpllen == 576)
- *ksize = PKEY_SIZE_AES_192;
- else if (!t->plfver && t->wpllen == 640)
- *ksize = PKEY_SIZE_AES_256;
- }
-
- rc = cca_findcard2(&_apqns, &_nr_apqns, *cardnr, *domain,
- ZCRYPT_CEX6, AES_MK_SET, t->mkvp0, 0, 1);
- if (rc == 0 && flags)
- *flags = PKEY_FLAGS_MATCH_CUR_MKVP;
- if (rc == -ENODEV) {
- rc = cca_findcard2(&_apqns, &_nr_apqns,
- *cardnr, *domain,
- ZCRYPT_CEX6, AES_MK_SET,
- 0, t->mkvp0, 1);
- if (rc == 0 && flags)
- *flags = PKEY_FLAGS_MATCH_ALT_MKVP;
- }
- if (rc)
- goto out;
-
- *cardnr = ((struct pkey_apqn *)_apqns)->card;
- *domain = ((struct pkey_apqn *)_apqns)->domain;
+ if (copy_from_user(&kvk, uvk, sizeof(kvk)))
+ return -EFAULT;
+ kkey = _copy_key_from_user(kvk.key, kvk.keylen);
+ if (IS_ERR(kkey))
+ return PTR_ERR(kkey);
- } else if (hdr->type == TOKTYPE_NON_CCA &&
- hdr->version == TOKVER_EP11_AES) {
- struct ep11keyblob *kb = (struct ep11keyblob *)key;
- int api;
-
- rc = ep11_check_aes_key(pkey_dbf_info, 3, key, keylen, 1);
- if (rc)
- goto out;
- if (ktype)
- *ktype = PKEY_TYPE_EP11;
- if (ksize)
- *ksize = kb->head.bitlen;
-
- api = ap_is_se_guest() ? EP11_API_V6 : EP11_API_V4;
- rc = ep11_findcard2(&_apqns, &_nr_apqns, *cardnr, *domain,
- ZCRYPT_CEX7, api,
- ep11_kb_wkvp(key, keylen));
- if (rc)
- goto out;
-
- if (flags)
- *flags = PKEY_FLAGS_MATCH_CUR_MKVP;
-
- *cardnr = ((struct pkey_apqn *)_apqns)->card;
- *domain = ((struct pkey_apqn *)_apqns)->domain;
+ rc = pkey_handler_verify_key(kkey, kvk.keylen,
+ &kvk.cardnr, &kvk.domain,
+ &kvk.type, &kvk.size, &kvk.flags);
+ pr_debug("verify_key()=%d\n", rc);
- } else if (hdr->type == TOKTYPE_NON_CCA &&
- hdr->version == TOKVER_EP11_AES_WITH_HEADER) {
- struct ep11kblob_header *kh = (struct ep11kblob_header *)key;
- int api;
+ kfree_sensitive(kkey);
+ if (!rc && copy_to_user(uvk, &kvk, sizeof(kvk)))
+ return -EFAULT;
- rc = ep11_check_aes_key_with_hdr(pkey_dbf_info,
- 3, key, keylen, 1);
- if (rc)
- goto out;
- if (ktype)
- *ktype = PKEY_TYPE_EP11_AES;
- if (ksize)
- *ksize = kh->bitlen;
-
- api = ap_is_se_guest() ? EP11_API_V6 : EP11_API_V4;
- rc = ep11_findcard2(&_apqns, &_nr_apqns, *cardnr, *domain,
- ZCRYPT_CEX7, api,
- ep11_kb_wkvp(key, keylen));
- if (rc)
- goto out;
+ return rc;
+}
- if (flags)
- *flags = PKEY_FLAGS_MATCH_CUR_MKVP;
+static int pkey_ioctl_kblob2protk2(struct pkey_kblob2pkey2 __user *utp)
+{
+ struct pkey_apqn *apqns = NULL;
+ struct pkey_kblob2pkey2 ktp;
+ u8 *kkey;
+ int rc;
- *cardnr = ((struct pkey_apqn *)_apqns)->card;
- *domain = ((struct pkey_apqn *)_apqns)->domain;
- } else {
- rc = -EINVAL;
+ if (copy_from_user(&ktp, utp, sizeof(ktp)))
+ return -EFAULT;
+ apqns = _copy_apqns_from_user(ktp.apqns, ktp.apqn_entries);
+ if (IS_ERR(apqns))
+ return PTR_ERR(apqns);
+ kkey = _copy_key_from_user(ktp.key, ktp.keylen);
+ if (IS_ERR(kkey)) {
+ kfree(apqns);
+ return PTR_ERR(kkey);
}
+ ktp.protkey.len = sizeof(ktp.protkey.protkey);
+ rc = key2protkey(apqns, ktp.apqn_entries, kkey, ktp.keylen,
+ ktp.protkey.protkey, &ktp.protkey.len,
+ &ktp.protkey.type);
+ pr_debug("key2protkey()=%d\n", rc);
+ kfree(apqns);
+ kfree_sensitive(kkey);
+ if (!rc && copy_to_user(utp, &ktp, sizeof(ktp)))
+ rc = -EFAULT;
+ memzero_explicit(&ktp, sizeof(ktp));
-out:
- kfree(_apqns);
return rc;
}
-static int pkey_keyblob2pkey2(const struct pkey_apqn *apqns, size_t nr_apqns,
- const u8 *key, size_t keylen,
- u8 *protkey, u32 *protkeylen, u32 *protkeytype)
+static int pkey_ioctl_apqns4k(struct pkey_apqns4key __user *uak)
{
- struct keytoken_header *hdr = (struct keytoken_header *)key;
- int i, card, dom, rc;
-
- /* check for at least one apqn given */
- if (!apqns || !nr_apqns)
- return -EINVAL;
-
- if (keylen < sizeof(struct keytoken_header))
- return -EINVAL;
+ struct pkey_apqn *apqns = NULL;
+ struct pkey_apqns4key kak;
+ size_t nr_apqns, len;
+ u8 *kkey;
+ int rc;
- if (hdr->type == TOKTYPE_CCA_INTERNAL) {
- if (hdr->version == TOKVER_CCA_AES) {
- if (keylen != sizeof(struct secaeskeytoken))
- return -EINVAL;
- if (cca_check_secaeskeytoken(pkey_dbf_info, 3, key, 0))
- return -EINVAL;
- } else if (hdr->version == TOKVER_CCA_VLSC) {
- if (keylen < hdr->len || keylen > MAXCCAVLSCTOKENSIZE)
- return -EINVAL;
- if (cca_check_secaescipherkey(pkey_dbf_info,
- 3, key, 0, 1))
- return -EINVAL;
- } else {
- PKEY_DBF_ERR("%s unknown CCA internal token version %d\n",
- __func__, hdr->version);
+ if (copy_from_user(&kak, uak, sizeof(kak)))
+ return -EFAULT;
+ nr_apqns = kak.apqn_entries;
+ if (nr_apqns) {
+ apqns = kmalloc_array(nr_apqns,
+ sizeof(struct pkey_apqn),
+ GFP_KERNEL);
+ if (!apqns)
+ return -ENOMEM;
+ }
+ kkey = _copy_key_from_user(kak.key, kak.keylen);
+ if (IS_ERR(kkey)) {
+ kfree(apqns);
+ return PTR_ERR(kkey);
+ }
+ rc = pkey_handler_apqns_for_key(kkey, kak.keylen, kak.flags,
+ apqns, &nr_apqns);
+ pr_debug("apqns_for_key()=%d\n", rc);
+ kfree_sensitive(kkey);
+ if (rc && rc != -ENOSPC) {
+ kfree(apqns);
+ return rc;
+ }
+ if (!rc && kak.apqns) {
+ if (nr_apqns > kak.apqn_entries) {
+ kfree(apqns);
return -EINVAL;
}
- } else if (hdr->type == TOKTYPE_NON_CCA) {
- if (hdr->version == TOKVER_EP11_AES) {
- if (ep11_check_aes_key(pkey_dbf_info,
- 3, key, keylen, 1))
- return -EINVAL;
- } else if (hdr->version == TOKVER_EP11_AES_WITH_HEADER) {
- if (ep11_check_aes_key_with_hdr(pkey_dbf_info,
- 3, key, keylen, 1))
- return -EINVAL;
- } else {
- return pkey_nonccatok2pkey(key, keylen,
- protkey, protkeylen,
- protkeytype);
- }
- } else {
- PKEY_DBF_ERR("%s unknown/unsupported blob type %d\n",
- __func__, hdr->type);
- return -EINVAL;
- }
-
- zcrypt_wait_api_operational();
-
- /* simple try all apqns from the list */
- for (i = 0, rc = -ENODEV; i < nr_apqns; i++) {
- card = apqns[i].card;
- dom = apqns[i].domain;
- if (hdr->type == TOKTYPE_CCA_INTERNAL &&
- hdr->version == TOKVER_CCA_AES) {
- rc = cca_sec2protkey(card, dom, key,
- protkey, protkeylen, protkeytype);
- } else if (hdr->type == TOKTYPE_CCA_INTERNAL &&
- hdr->version == TOKVER_CCA_VLSC) {
- rc = cca_cipher2protkey(card, dom, key,
- protkey, protkeylen,
- protkeytype);
- } else {
- rc = ep11_kblob2protkey(card, dom, key, keylen,
- protkey, protkeylen,
- protkeytype);
+ len = nr_apqns * sizeof(struct pkey_apqn);
+ if (len) {
+ if (copy_to_user(kak.apqns, apqns, len)) {
+ kfree(apqns);
+ return -EFAULT;
+ }
}
- if (rc == 0)
- break;
}
+ kak.apqn_entries = nr_apqns;
+ if (copy_to_user(uak, &kak, sizeof(kak)))
+ rc = -EFAULT;
+ kfree(apqns);
return rc;
}
-static int pkey_apqns4key(const u8 *key, size_t keylen, u32 flags,
- struct pkey_apqn *apqns, size_t *nr_apqns)
+static int pkey_ioctl_apqns4kt(struct pkey_apqns4keytype __user *uat)
{
- struct keytoken_header *hdr = (struct keytoken_header *)key;
- u32 _nr_apqns, *_apqns = NULL;
+ struct pkey_apqn *apqns = NULL;
+ struct pkey_apqns4keytype kat;
+ size_t nr_apqns, len;
int rc;
- if (keylen < sizeof(struct keytoken_header) || flags == 0)
- return -EINVAL;
-
- zcrypt_wait_api_operational();
-
- if (hdr->type == TOKTYPE_NON_CCA &&
- (hdr->version == TOKVER_EP11_AES_WITH_HEADER ||
- hdr->version == TOKVER_EP11_ECC_WITH_HEADER) &&
- is_ep11_keyblob(key + sizeof(struct ep11kblob_header))) {
- struct ep11keyblob *kb = (struct ep11keyblob *)
- (key + sizeof(struct ep11kblob_header));
- int minhwtype = 0, api = 0;
-
- if (flags != PKEY_FLAGS_MATCH_CUR_MKVP)
- return -EINVAL;
- if (kb->attr & EP11_BLOB_PKEY_EXTRACTABLE) {
- minhwtype = ZCRYPT_CEX7;
- api = ap_is_se_guest() ? EP11_API_V6 : EP11_API_V4;
- }
- rc = ep11_findcard2(&_apqns, &_nr_apqns, 0xFFFF, 0xFFFF,
- minhwtype, api, kb->wkvp);
- if (rc)
- goto out;
- } else if (hdr->type == TOKTYPE_NON_CCA &&
- hdr->version == TOKVER_EP11_AES &&
- is_ep11_keyblob(key)) {
- struct ep11keyblob *kb = (struct ep11keyblob *)key;
- int minhwtype = 0, api = 0;
-
- if (flags != PKEY_FLAGS_MATCH_CUR_MKVP)
- return -EINVAL;
- if (kb->attr & EP11_BLOB_PKEY_EXTRACTABLE) {
- minhwtype = ZCRYPT_CEX7;
- api = ap_is_se_guest() ? EP11_API_V6 : EP11_API_V4;
- }
- rc = ep11_findcard2(&_apqns, &_nr_apqns, 0xFFFF, 0xFFFF,
- minhwtype, api, kb->wkvp);
- if (rc)
- goto out;
- } else if (hdr->type == TOKTYPE_CCA_INTERNAL) {
- u64 cur_mkvp = 0, old_mkvp = 0;
- int minhwtype = ZCRYPT_CEX3C;
-
- if (hdr->version == TOKVER_CCA_AES) {
- struct secaeskeytoken *t = (struct secaeskeytoken *)key;
-
- if (flags & PKEY_FLAGS_MATCH_CUR_MKVP)
- cur_mkvp = t->mkvp;
- if (flags & PKEY_FLAGS_MATCH_ALT_MKVP)
- old_mkvp = t->mkvp;
- } else if (hdr->version == TOKVER_CCA_VLSC) {
- struct cipherkeytoken *t = (struct cipherkeytoken *)key;
-
- minhwtype = ZCRYPT_CEX6;
- if (flags & PKEY_FLAGS_MATCH_CUR_MKVP)
- cur_mkvp = t->mkvp0;
- if (flags & PKEY_FLAGS_MATCH_ALT_MKVP)
- old_mkvp = t->mkvp0;
- } else {
- /* unknown cca internal token type */
+ if (copy_from_user(&kat, uat, sizeof(kat)))
+ return -EFAULT;
+ nr_apqns = kat.apqn_entries;
+ if (nr_apqns) {
+ apqns = kmalloc_array(nr_apqns,
+ sizeof(struct pkey_apqn),
+ GFP_KERNEL);
+ if (!apqns)
+ return -ENOMEM;
+ }
+ rc = pkey_handler_apqns_for_keytype(kat.type,
+ kat.cur_mkvp, kat.alt_mkvp,
+ kat.flags, apqns, &nr_apqns);
+ pr_debug("apqns_for_keytype()=%d\n", rc);
+ if (rc && rc != -ENOSPC) {
+ kfree(apqns);
+ return rc;
+ }
+ if (!rc && kat.apqns) {
+ if (nr_apqns > kat.apqn_entries) {
+ kfree(apqns);
return -EINVAL;
}
- rc = cca_findcard2(&_apqns, &_nr_apqns, 0xFFFF, 0xFFFF,
- minhwtype, AES_MK_SET,
- cur_mkvp, old_mkvp, 1);
- if (rc)
- goto out;
- } else if (hdr->type == TOKTYPE_CCA_INTERNAL_PKA) {
- struct eccprivkeytoken *t = (struct eccprivkeytoken *)key;
- u64 cur_mkvp = 0, old_mkvp = 0;
-
- if (t->secid == 0x20) {
- if (flags & PKEY_FLAGS_MATCH_CUR_MKVP)
- cur_mkvp = t->mkvp;
- if (flags & PKEY_FLAGS_MATCH_ALT_MKVP)
- old_mkvp = t->mkvp;
- } else {
- /* unknown cca internal 2 token type */
- return -EINVAL;
+ len = nr_apqns * sizeof(struct pkey_apqn);
+ if (len) {
+ if (copy_to_user(kat.apqns, apqns, len)) {
+ kfree(apqns);
+ return -EFAULT;
+ }
}
- rc = cca_findcard2(&_apqns, &_nr_apqns, 0xFFFF, 0xFFFF,
- ZCRYPT_CEX7, APKA_MK_SET,
- cur_mkvp, old_mkvp, 1);
- if (rc)
- goto out;
- } else {
- return -EINVAL;
}
+ kat.apqn_entries = nr_apqns;
+ if (copy_to_user(uat, &kat, sizeof(kat)))
+ rc = -EFAULT;
+ kfree(apqns);
- if (apqns) {
- if (*nr_apqns < _nr_apqns)
- rc = -ENOSPC;
- else
- memcpy(apqns, _apqns, _nr_apqns * sizeof(u32));
- }
- *nr_apqns = _nr_apqns;
-
-out:
- kfree(_apqns);
return rc;
}
-static int pkey_apqns4keytype(enum pkey_key_type ktype,
- u8 cur_mkvp[32], u8 alt_mkvp[32], u32 flags,
- struct pkey_apqn *apqns, size_t *nr_apqns)
+static int pkey_ioctl_kblob2protk3(struct pkey_kblob2pkey3 __user *utp)
{
- u32 _nr_apqns, *_apqns = NULL;
+ u32 protkeylen = PROTKEYBLOBBUFSIZE;
+ struct pkey_apqn *apqns = NULL;
+ struct pkey_kblob2pkey3 ktp;
+ u8 *kkey, *protkey;
int rc;
- zcrypt_wait_api_operational();
-
- if (ktype == PKEY_TYPE_CCA_DATA || ktype == PKEY_TYPE_CCA_CIPHER) {
- u64 cur_mkvp = 0, old_mkvp = 0;
- int minhwtype = ZCRYPT_CEX3C;
-
- if (flags & PKEY_FLAGS_MATCH_CUR_MKVP)
- cur_mkvp = *((u64 *)cur_mkvp);
- if (flags & PKEY_FLAGS_MATCH_ALT_MKVP)
- old_mkvp = *((u64 *)alt_mkvp);
- if (ktype == PKEY_TYPE_CCA_CIPHER)
- minhwtype = ZCRYPT_CEX6;
- rc = cca_findcard2(&_apqns, &_nr_apqns, 0xFFFF, 0xFFFF,
- minhwtype, AES_MK_SET,
- cur_mkvp, old_mkvp, 1);
- if (rc)
- goto out;
- } else if (ktype == PKEY_TYPE_CCA_ECC) {
- u64 cur_mkvp = 0, old_mkvp = 0;
-
- if (flags & PKEY_FLAGS_MATCH_CUR_MKVP)
- cur_mkvp = *((u64 *)cur_mkvp);
- if (flags & PKEY_FLAGS_MATCH_ALT_MKVP)
- old_mkvp = *((u64 *)alt_mkvp);
- rc = cca_findcard2(&_apqns, &_nr_apqns, 0xFFFF, 0xFFFF,
- ZCRYPT_CEX7, APKA_MK_SET,
- cur_mkvp, old_mkvp, 1);
- if (rc)
- goto out;
-
- } else if (ktype == PKEY_TYPE_EP11 ||
- ktype == PKEY_TYPE_EP11_AES ||
- ktype == PKEY_TYPE_EP11_ECC) {
- u8 *wkvp = NULL;
- int api;
-
- if (flags & PKEY_FLAGS_MATCH_CUR_MKVP)
- wkvp = cur_mkvp;
- api = ap_is_se_guest() ? EP11_API_V6 : EP11_API_V4;
- rc = ep11_findcard2(&_apqns, &_nr_apqns, 0xFFFF, 0xFFFF,
- ZCRYPT_CEX7, api, wkvp);
- if (rc)
- goto out;
-
- } else {
- return -EINVAL;
+ if (copy_from_user(&ktp, utp, sizeof(ktp)))
+ return -EFAULT;
+ apqns = _copy_apqns_from_user(ktp.apqns, ktp.apqn_entries);
+ if (IS_ERR(apqns))
+ return PTR_ERR(apqns);
+ kkey = _copy_key_from_user(ktp.key, ktp.keylen);
+ if (IS_ERR(kkey)) {
+ kfree(apqns);
+ return PTR_ERR(kkey);
}
-
- if (apqns) {
- if (*nr_apqns < _nr_apqns)
- rc = -ENOSPC;
- else
- memcpy(apqns, _apqns, _nr_apqns * sizeof(u32));
+ protkey = kmalloc(protkeylen, GFP_KERNEL);
+ if (!protkey) {
+ kfree(apqns);
+ kfree_sensitive(kkey);
+ return -ENOMEM;
}
- *nr_apqns = _nr_apqns;
-
-out:
- kfree(_apqns);
- return rc;
-}
-
-static int pkey_keyblob2pkey3(const struct pkey_apqn *apqns, size_t nr_apqns,
- const u8 *key, size_t keylen,
- u8 *protkey, u32 *protkeylen, u32 *protkeytype)
-{
- struct keytoken_header *hdr = (struct keytoken_header *)key;
- int i, card, dom, rc;
-
- /* check for at least one apqn given */
- if (!apqns || !nr_apqns)
- return -EINVAL;
-
- if (keylen < sizeof(struct keytoken_header))
- return -EINVAL;
-
- if (hdr->type == TOKTYPE_NON_CCA &&
- hdr->version == TOKVER_EP11_AES_WITH_HEADER &&
- is_ep11_keyblob(key + sizeof(struct ep11kblob_header))) {
- /* EP11 AES key blob with header */
- if (ep11_check_aes_key_with_hdr(pkey_dbf_info,
- 3, key, keylen, 1))
- return -EINVAL;
- } else if (hdr->type == TOKTYPE_NON_CCA &&
- hdr->version == TOKVER_EP11_ECC_WITH_HEADER &&
- is_ep11_keyblob(key + sizeof(struct ep11kblob_header))) {
- /* EP11 ECC key blob with header */
- if (ep11_check_ecc_key_with_hdr(pkey_dbf_info,
- 3, key, keylen, 1))
- return -EINVAL;
- } else if (hdr->type == TOKTYPE_NON_CCA &&
- hdr->version == TOKVER_EP11_AES &&
- is_ep11_keyblob(key)) {
- /* EP11 AES key blob with header in session field */
- if (ep11_check_aes_key(pkey_dbf_info, 3, key, keylen, 1))
- return -EINVAL;
- } else if (hdr->type == TOKTYPE_CCA_INTERNAL) {
- if (hdr->version == TOKVER_CCA_AES) {
- /* CCA AES data key */
- if (keylen != sizeof(struct secaeskeytoken))
- return -EINVAL;
- if (cca_check_secaeskeytoken(pkey_dbf_info, 3, key, 0))
- return -EINVAL;
- } else if (hdr->version == TOKVER_CCA_VLSC) {
- /* CCA AES cipher key */
- if (keylen < hdr->len || keylen > MAXCCAVLSCTOKENSIZE)
- return -EINVAL;
- if (cca_check_secaescipherkey(pkey_dbf_info,
- 3, key, 0, 1))
- return -EINVAL;
- } else {
- PKEY_DBF_ERR("%s unknown CCA internal token version %d\n",
- __func__, hdr->version);
- return -EINVAL;
- }
- } else if (hdr->type == TOKTYPE_CCA_INTERNAL_PKA) {
- /* CCA ECC (private) key */
- if (keylen < sizeof(struct eccprivkeytoken))
- return -EINVAL;
- if (cca_check_sececckeytoken(pkey_dbf_info, 3, key, keylen, 1))
- return -EINVAL;
- } else if (hdr->type == TOKTYPE_NON_CCA) {
- return pkey_nonccatok2pkey(key, keylen,
- protkey, protkeylen, protkeytype);
- } else {
- PKEY_DBF_ERR("%s unknown/unsupported blob type %d\n",
- __func__, hdr->type);
- return -EINVAL;
+ rc = key2protkey(apqns, ktp.apqn_entries, kkey, ktp.keylen,
+ protkey, &protkeylen, &ktp.pkeytype);
+ pr_debug("key2protkey()=%d\n", rc);
+ kfree(apqns);
+ kfree_sensitive(kkey);
+ if (rc) {
+ kfree_sensitive(protkey);
+ return rc;
}
-
- /* simple try all apqns from the list */
- for (rc = -ENODEV, i = 0; rc && i < nr_apqns; i++) {
- card = apqns[i].card;
- dom = apqns[i].domain;
- if (hdr->type == TOKTYPE_NON_CCA &&
- (hdr->version == TOKVER_EP11_AES_WITH_HEADER ||
- hdr->version == TOKVER_EP11_ECC_WITH_HEADER) &&
- is_ep11_keyblob(key + sizeof(struct ep11kblob_header)))
- rc = ep11_kblob2protkey(card, dom, key, hdr->len,
- protkey, protkeylen,
- protkeytype);
- else if (hdr->type == TOKTYPE_NON_CCA &&
- hdr->version == TOKVER_EP11_AES &&
- is_ep11_keyblob(key))
- rc = ep11_kblob2protkey(card, dom, key, hdr->len,
- protkey, protkeylen,
- protkeytype);
- else if (hdr->type == TOKTYPE_CCA_INTERNAL &&
- hdr->version == TOKVER_CCA_AES)
- rc = cca_sec2protkey(card, dom, key, protkey,
- protkeylen, protkeytype);
- else if (hdr->type == TOKTYPE_CCA_INTERNAL &&
- hdr->version == TOKVER_CCA_VLSC)
- rc = cca_cipher2protkey(card, dom, key, protkey,
- protkeylen, protkeytype);
- else if (hdr->type == TOKTYPE_CCA_INTERNAL_PKA)
- rc = cca_ecc2protkey(card, dom, key, protkey,
- protkeylen, protkeytype);
- else
+ if (ktp.pkey && ktp.pkeylen) {
+ if (protkeylen > ktp.pkeylen) {
+ kfree_sensitive(protkey);
return -EINVAL;
+ }
+ if (copy_to_user(ktp.pkey, protkey, protkeylen)) {
+ kfree_sensitive(protkey);
+ return -EFAULT;
+ }
}
+ kfree_sensitive(protkey);
+ ktp.pkeylen = protkeylen;
+ if (copy_to_user(utp, &ktp, sizeof(ktp)))
+ return -EFAULT;
- return rc;
-}
-
-/*
- * File io functions
- */
-
-static void *_copy_key_from_user(void __user *ukey, size_t keylen)
-{
- if (!ukey || keylen < MINKEYBLOBBUFSIZE || keylen > KEYBLOBBUFSIZE)
- return ERR_PTR(-EINVAL);
-
- return memdup_user(ukey, keylen);
-}
-
-static void *_copy_apqns_from_user(void __user *uapqns, size_t nr_apqns)
-{
- if (!uapqns || nr_apqns == 0)
- return NULL;
-
- return memdup_user(uapqns, nr_apqns * sizeof(struct pkey_apqn));
+ return 0;
}
static long pkey_unlocked_ioctl(struct file *filp, unsigned int cmd,
@@ -1350,438 +710,57 @@ static long pkey_unlocked_ioctl(struct file *filp, unsigned int cmd,
int rc;
switch (cmd) {
- case PKEY_GENSECK: {
- struct pkey_genseck __user *ugs = (void __user *)arg;
- struct pkey_genseck kgs;
-
- if (copy_from_user(&kgs, ugs, sizeof(kgs)))
- return -EFAULT;
- rc = cca_genseckey(kgs.cardnr, kgs.domain,
- kgs.keytype, kgs.seckey.seckey);
- pr_debug("%s cca_genseckey()=%d\n", __func__, rc);
- if (!rc && copy_to_user(ugs, &kgs, sizeof(kgs)))
- rc = -EFAULT;
- memzero_explicit(&kgs, sizeof(kgs));
+ case PKEY_GENSECK:
+ rc = pkey_ioctl_genseck((struct pkey_genseck __user *)arg);
break;
- }
- case PKEY_CLR2SECK: {
- struct pkey_clr2seck __user *ucs = (void __user *)arg;
- struct pkey_clr2seck kcs;
-
- if (copy_from_user(&kcs, ucs, sizeof(kcs)))
- return -EFAULT;
- rc = cca_clr2seckey(kcs.cardnr, kcs.domain, kcs.keytype,
- kcs.clrkey.clrkey, kcs.seckey.seckey);
- pr_debug("%s cca_clr2seckey()=%d\n", __func__, rc);
- if (!rc && copy_to_user(ucs, &kcs, sizeof(kcs)))
- rc = -EFAULT;
- memzero_explicit(&kcs, sizeof(kcs));
+ case PKEY_CLR2SECK:
+ rc = pkey_ioctl_clr2seck((struct pkey_clr2seck __user *)arg);
break;
- }
- case PKEY_SEC2PROTK: {
- struct pkey_sec2protk __user *usp = (void __user *)arg;
- struct pkey_sec2protk ksp;
-
- if (copy_from_user(&ksp, usp, sizeof(ksp)))
- return -EFAULT;
- ksp.protkey.len = sizeof(ksp.protkey.protkey);
- rc = cca_sec2protkey(ksp.cardnr, ksp.domain,
- ksp.seckey.seckey, ksp.protkey.protkey,
- &ksp.protkey.len, &ksp.protkey.type);
- pr_debug("%s cca_sec2protkey()=%d\n", __func__, rc);
- if (!rc && copy_to_user(usp, &ksp, sizeof(ksp)))
- rc = -EFAULT;
- memzero_explicit(&ksp, sizeof(ksp));
+ case PKEY_SEC2PROTK:
+ rc = pkey_ioctl_sec2protk((struct pkey_sec2protk __user *)arg);
break;
- }
- case PKEY_CLR2PROTK: {
- struct pkey_clr2protk __user *ucp = (void __user *)arg;
- struct pkey_clr2protk kcp;
-
- if (copy_from_user(&kcp, ucp, sizeof(kcp)))
- return -EFAULT;
- kcp.protkey.len = sizeof(kcp.protkey.protkey);
- rc = pkey_clr2protkey(kcp.keytype, kcp.clrkey.clrkey,
- kcp.protkey.protkey,
- &kcp.protkey.len, &kcp.protkey.type);
- pr_debug("%s pkey_clr2protkey()=%d\n", __func__, rc);
- if (!rc && copy_to_user(ucp, &kcp, sizeof(kcp)))
- rc = -EFAULT;
- memzero_explicit(&kcp, sizeof(kcp));
+ case PKEY_CLR2PROTK:
+ rc = pkey_ioctl_clr2protk((struct pkey_clr2protk __user *)arg);
break;
- }
- case PKEY_FINDCARD: {
- struct pkey_findcard __user *ufc = (void __user *)arg;
- struct pkey_findcard kfc;
-
- if (copy_from_user(&kfc, ufc, sizeof(kfc)))
- return -EFAULT;
- rc = cca_findcard(kfc.seckey.seckey,
- &kfc.cardnr, &kfc.domain, 1);
- pr_debug("%s cca_findcard()=%d\n", __func__, rc);
- if (rc < 0)
- break;
- if (copy_to_user(ufc, &kfc, sizeof(kfc)))
- return -EFAULT;
+ case PKEY_FINDCARD:
+ rc = pkey_ioctl_findcard((struct pkey_findcard __user *)arg);
break;
- }
- case PKEY_SKEY2PKEY: {
- struct pkey_skey2pkey __user *usp = (void __user *)arg;
- struct pkey_skey2pkey ksp;
-
- if (copy_from_user(&ksp, usp, sizeof(ksp)))
- return -EFAULT;
- ksp.protkey.len = sizeof(ksp.protkey.protkey);
- rc = pkey_skey2pkey(ksp.seckey.seckey, ksp.protkey.protkey,
- &ksp.protkey.len, &ksp.protkey.type);
- pr_debug("%s pkey_skey2pkey()=%d\n", __func__, rc);
- if (!rc && copy_to_user(usp, &ksp, sizeof(ksp)))
- rc = -EFAULT;
- memzero_explicit(&ksp, sizeof(ksp));
+ case PKEY_SKEY2PKEY:
+ rc = pkey_ioctl_skey2pkey((struct pkey_skey2pkey __user *)arg);
break;
- }
- case PKEY_VERIFYKEY: {
- struct pkey_verifykey __user *uvk = (void __user *)arg;
- struct pkey_verifykey kvk;
-
- if (copy_from_user(&kvk, uvk, sizeof(kvk)))
- return -EFAULT;
- rc = pkey_verifykey(&kvk.seckey, &kvk.cardnr, &kvk.domain,
- &kvk.keysize, &kvk.attributes);
- pr_debug("%s pkey_verifykey()=%d\n", __func__, rc);
- if (!rc && copy_to_user(uvk, &kvk, sizeof(kvk)))
- rc = -EFAULT;
- memzero_explicit(&kvk, sizeof(kvk));
+ case PKEY_VERIFYKEY:
+ rc = pkey_ioctl_verifykey((struct pkey_verifykey __user *)arg);
break;
- }
- case PKEY_GENPROTK: {
- struct pkey_genprotk __user *ugp = (void __user *)arg;
- struct pkey_genprotk kgp;
-
- if (copy_from_user(&kgp, ugp, sizeof(kgp)))
- return -EFAULT;
- kgp.protkey.len = sizeof(kgp.protkey.protkey);
- rc = pkey_genprotkey(kgp.keytype, kgp.protkey.protkey,
- &kgp.protkey.len, &kgp.protkey.type);
- pr_debug("%s pkey_genprotkey()=%d\n", __func__, rc);
- if (!rc && copy_to_user(ugp, &kgp, sizeof(kgp)))
- rc = -EFAULT;
- memzero_explicit(&kgp, sizeof(kgp));
+ case PKEY_GENPROTK:
+ rc = pkey_ioctl_genprotk((struct pkey_genprotk __user *)arg);
break;
- }
- case PKEY_VERIFYPROTK: {
- struct pkey_verifyprotk __user *uvp = (void __user *)arg;
- struct pkey_verifyprotk kvp;
-
- if (copy_from_user(&kvp, uvp, sizeof(kvp)))
- return -EFAULT;
- rc = pkey_verifyprotkey(kvp.protkey.protkey,
- kvp.protkey.len, kvp.protkey.type);
- pr_debug("%s pkey_verifyprotkey()=%d\n", __func__, rc);
- memzero_explicit(&kvp, sizeof(kvp));
+ case PKEY_VERIFYPROTK:
+ rc = pkey_ioctl_verifyprotk((struct pkey_verifyprotk __user *)arg);
break;
- }
- case PKEY_KBLOB2PROTK: {
- struct pkey_kblob2pkey __user *utp = (void __user *)arg;
- struct pkey_kblob2pkey ktp;
- u8 *kkey;
-
- if (copy_from_user(&ktp, utp, sizeof(ktp)))
- return -EFAULT;
- kkey = _copy_key_from_user(ktp.key, ktp.keylen);
- if (IS_ERR(kkey))
- return PTR_ERR(kkey);
- ktp.protkey.len = sizeof(ktp.protkey.protkey);
- rc = pkey_keyblob2pkey(kkey, ktp.keylen, ktp.protkey.protkey,
- &ktp.protkey.len, &ktp.protkey.type);
- pr_debug("%s pkey_keyblob2pkey()=%d\n", __func__, rc);
- kfree_sensitive(kkey);
- if (!rc && copy_to_user(utp, &ktp, sizeof(ktp)))
- rc = -EFAULT;
- memzero_explicit(&ktp, sizeof(ktp));
+ case PKEY_KBLOB2PROTK:
+ rc = pkey_ioctl_kblob2protk((struct pkey_kblob2pkey __user *)arg);
break;
- }
- case PKEY_GENSECK2: {
- struct pkey_genseck2 __user *ugs = (void __user *)arg;
- size_t klen = KEYBLOBBUFSIZE;
- struct pkey_genseck2 kgs;
- struct pkey_apqn *apqns;
- u8 *kkey;
-
- if (copy_from_user(&kgs, ugs, sizeof(kgs)))
- return -EFAULT;
- apqns = _copy_apqns_from_user(kgs.apqns, kgs.apqn_entries);
- if (IS_ERR(apqns))
- return PTR_ERR(apqns);
- kkey = kzalloc(klen, GFP_KERNEL);
- if (!kkey) {
- kfree(apqns);
- return -ENOMEM;
- }
- rc = pkey_genseckey2(apqns, kgs.apqn_entries,
- kgs.type, kgs.size, kgs.keygenflags,
- kkey, &klen);
- pr_debug("%s pkey_genseckey2()=%d\n", __func__, rc);
- kfree(apqns);
- if (rc) {
- kfree_sensitive(kkey);
- break;
- }
- if (kgs.key) {
- if (kgs.keylen < klen) {
- kfree_sensitive(kkey);
- return -EINVAL;
- }
- if (copy_to_user(kgs.key, kkey, klen)) {
- kfree_sensitive(kkey);
- return -EFAULT;
- }
- }
- kgs.keylen = klen;
- if (copy_to_user(ugs, &kgs, sizeof(kgs)))
- rc = -EFAULT;
- kfree_sensitive(kkey);
+ case PKEY_GENSECK2:
+ rc = pkey_ioctl_genseck2((struct pkey_genseck2 __user *)arg);
break;
- }
- case PKEY_CLR2SECK2: {
- struct pkey_clr2seck2 __user *ucs = (void __user *)arg;
- size_t klen = KEYBLOBBUFSIZE;
- struct pkey_clr2seck2 kcs;
- struct pkey_apqn *apqns;
- u8 *kkey;
-
- if (copy_from_user(&kcs, ucs, sizeof(kcs)))
- return -EFAULT;
- apqns = _copy_apqns_from_user(kcs.apqns, kcs.apqn_entries);
- if (IS_ERR(apqns)) {
- memzero_explicit(&kcs, sizeof(kcs));
- return PTR_ERR(apqns);
- }
- kkey = kzalloc(klen, GFP_KERNEL);
- if (!kkey) {
- kfree(apqns);
- memzero_explicit(&kcs, sizeof(kcs));
- return -ENOMEM;
- }
- rc = pkey_clr2seckey2(apqns, kcs.apqn_entries,
- kcs.type, kcs.size, kcs.keygenflags,
- kcs.clrkey.clrkey, kkey, &klen);
- pr_debug("%s pkey_clr2seckey2()=%d\n", __func__, rc);
- kfree(apqns);
- if (rc) {
- kfree_sensitive(kkey);
- memzero_explicit(&kcs, sizeof(kcs));
- break;
- }
- if (kcs.key) {
- if (kcs.keylen < klen) {
- kfree_sensitive(kkey);
- memzero_explicit(&kcs, sizeof(kcs));
- return -EINVAL;
- }
- if (copy_to_user(kcs.key, kkey, klen)) {
- kfree_sensitive(kkey);
- memzero_explicit(&kcs, sizeof(kcs));
- return -EFAULT;
- }
- }
- kcs.keylen = klen;
- if (copy_to_user(ucs, &kcs, sizeof(kcs)))
- rc = -EFAULT;
- memzero_explicit(&kcs, sizeof(kcs));
- kfree_sensitive(kkey);
+ case PKEY_CLR2SECK2:
+ rc = pkey_ioctl_clr2seck2((struct pkey_clr2seck2 __user *)arg);
break;
- }
- case PKEY_VERIFYKEY2: {
- struct pkey_verifykey2 __user *uvk = (void __user *)arg;
- struct pkey_verifykey2 kvk;
- u8 *kkey;
-
- if (copy_from_user(&kvk, uvk, sizeof(kvk)))
- return -EFAULT;
- kkey = _copy_key_from_user(kvk.key, kvk.keylen);
- if (IS_ERR(kkey))
- return PTR_ERR(kkey);
- rc = pkey_verifykey2(kkey, kvk.keylen,
- &kvk.cardnr, &kvk.domain,
- &kvk.type, &kvk.size, &kvk.flags);
- pr_debug("%s pkey_verifykey2()=%d\n", __func__, rc);
- kfree_sensitive(kkey);
- if (rc)
- break;
- if (copy_to_user(uvk, &kvk, sizeof(kvk)))
- return -EFAULT;
+ case PKEY_VERIFYKEY2:
+ rc = pkey_ioctl_verifykey2((struct pkey_verifykey2 __user *)arg);
break;
- }
- case PKEY_KBLOB2PROTK2: {
- struct pkey_kblob2pkey2 __user *utp = (void __user *)arg;
- struct pkey_apqn *apqns = NULL;
- struct pkey_kblob2pkey2 ktp;
- u8 *kkey;
-
- if (copy_from_user(&ktp, utp, sizeof(ktp)))
- return -EFAULT;
- apqns = _copy_apqns_from_user(ktp.apqns, ktp.apqn_entries);
- if (IS_ERR(apqns))
- return PTR_ERR(apqns);
- kkey = _copy_key_from_user(ktp.key, ktp.keylen);
- if (IS_ERR(kkey)) {
- kfree(apqns);
- return PTR_ERR(kkey);
- }
- ktp.protkey.len = sizeof(ktp.protkey.protkey);
- rc = pkey_keyblob2pkey2(apqns, ktp.apqn_entries,
- kkey, ktp.keylen,
- ktp.protkey.protkey, &ktp.protkey.len,
- &ktp.protkey.type);
- pr_debug("%s pkey_keyblob2pkey2()=%d\n", __func__, rc);
- kfree(apqns);
- kfree_sensitive(kkey);
- if (!rc && copy_to_user(utp, &ktp, sizeof(ktp)))
- rc = -EFAULT;
- memzero_explicit(&ktp, sizeof(ktp));
+ case PKEY_KBLOB2PROTK2:
+ rc = pkey_ioctl_kblob2protk2((struct pkey_kblob2pkey2 __user *)arg);
break;
- }
- case PKEY_APQNS4K: {
- struct pkey_apqns4key __user *uak = (void __user *)arg;
- struct pkey_apqn *apqns = NULL;
- struct pkey_apqns4key kak;
- size_t nr_apqns, len;
- u8 *kkey;
-
- if (copy_from_user(&kak, uak, sizeof(kak)))
- return -EFAULT;
- nr_apqns = kak.apqn_entries;
- if (nr_apqns) {
- apqns = kmalloc_array(nr_apqns,
- sizeof(struct pkey_apqn),
- GFP_KERNEL);
- if (!apqns)
- return -ENOMEM;
- }
- kkey = _copy_key_from_user(kak.key, kak.keylen);
- if (IS_ERR(kkey)) {
- kfree(apqns);
- return PTR_ERR(kkey);
- }
- rc = pkey_apqns4key(kkey, kak.keylen, kak.flags,
- apqns, &nr_apqns);
- pr_debug("%s pkey_apqns4key()=%d\n", __func__, rc);
- kfree_sensitive(kkey);
- if (rc && rc != -ENOSPC) {
- kfree(apqns);
- break;
- }
- if (!rc && kak.apqns) {
- if (nr_apqns > kak.apqn_entries) {
- kfree(apqns);
- return -EINVAL;
- }
- len = nr_apqns * sizeof(struct pkey_apqn);
- if (len) {
- if (copy_to_user(kak.apqns, apqns, len)) {
- kfree(apqns);
- return -EFAULT;
- }
- }
- }
- kak.apqn_entries = nr_apqns;
- if (copy_to_user(uak, &kak, sizeof(kak)))
- rc = -EFAULT;
- kfree(apqns);
+ case PKEY_APQNS4K:
+ rc = pkey_ioctl_apqns4k((struct pkey_apqns4key __user *)arg);
break;
- }
- case PKEY_APQNS4KT: {
- struct pkey_apqns4keytype __user *uat = (void __user *)arg;
- struct pkey_apqn *apqns = NULL;
- struct pkey_apqns4keytype kat;
- size_t nr_apqns, len;
-
- if (copy_from_user(&kat, uat, sizeof(kat)))
- return -EFAULT;
- nr_apqns = kat.apqn_entries;
- if (nr_apqns) {
- apqns = kmalloc_array(nr_apqns,
- sizeof(struct pkey_apqn),
- GFP_KERNEL);
- if (!apqns)
- return -ENOMEM;
- }
- rc = pkey_apqns4keytype(kat.type, kat.cur_mkvp, kat.alt_mkvp,
- kat.flags, apqns, &nr_apqns);
- pr_debug("%s pkey_apqns4keytype()=%d\n", __func__, rc);
- if (rc && rc != -ENOSPC) {
- kfree(apqns);
- break;
- }
- if (!rc && kat.apqns) {
- if (nr_apqns > kat.apqn_entries) {
- kfree(apqns);
- return -EINVAL;
- }
- len = nr_apqns * sizeof(struct pkey_apqn);
- if (len) {
- if (copy_to_user(kat.apqns, apqns, len)) {
- kfree(apqns);
- return -EFAULT;
- }
- }
- }
- kat.apqn_entries = nr_apqns;
- if (copy_to_user(uat, &kat, sizeof(kat)))
- rc = -EFAULT;
- kfree(apqns);
+ case PKEY_APQNS4KT:
+ rc = pkey_ioctl_apqns4kt((struct pkey_apqns4keytype __user *)arg);
break;
- }
- case PKEY_KBLOB2PROTK3: {
- struct pkey_kblob2pkey3 __user *utp = (void __user *)arg;
- u32 protkeylen = PROTKEYBLOBBUFSIZE;
- struct pkey_apqn *apqns = NULL;
- struct pkey_kblob2pkey3 ktp;
- u8 *kkey, *protkey;
-
- if (copy_from_user(&ktp, utp, sizeof(ktp)))
- return -EFAULT;
- apqns = _copy_apqns_from_user(ktp.apqns, ktp.apqn_entries);
- if (IS_ERR(apqns))
- return PTR_ERR(apqns);
- kkey = _copy_key_from_user(ktp.key, ktp.keylen);
- if (IS_ERR(kkey)) {
- kfree(apqns);
- return PTR_ERR(kkey);
- }
- protkey = kmalloc(protkeylen, GFP_KERNEL);
- if (!protkey) {
- kfree(apqns);
- kfree_sensitive(kkey);
- return -ENOMEM;
- }
- rc = pkey_keyblob2pkey3(apqns, ktp.apqn_entries,
- kkey, ktp.keylen,
- protkey, &protkeylen, &ktp.pkeytype);
- pr_debug("%s pkey_keyblob2pkey3()=%d\n", __func__, rc);
- kfree(apqns);
- kfree_sensitive(kkey);
- if (rc) {
- kfree_sensitive(protkey);
- break;
- }
- if (ktp.pkey && ktp.pkeylen) {
- if (protkeylen > ktp.pkeylen) {
- kfree_sensitive(protkey);
- return -EINVAL;
- }
- if (copy_to_user(ktp.pkey, protkey, protkeylen)) {
- kfree_sensitive(protkey);
- return -EFAULT;
- }
- }
- kfree_sensitive(protkey);
- ktp.pkeylen = protkeylen;
- if (copy_to_user(utp, &ktp, sizeof(ktp)))
- return -EFAULT;
+ case PKEY_KBLOB2PROTK3:
+ rc = pkey_ioctl_kblob2protk3((struct pkey_kblob2pkey3 __user *)arg);
break;
- }
default:
/* unknown/unsupported ioctl cmd */
return -ENOTTY;
@@ -1791,499 +770,12 @@ static long pkey_unlocked_ioctl(struct file *filp, unsigned int cmd,
}
/*
- * Sysfs and file io operations
- */
-
-/*
- * Sysfs attribute read function for all protected key binary attributes.
- * The implementation can not deal with partial reads, because a new random
- * protected key blob is generated with each read. In case of partial reads
- * (i.e. off != 0 or count < key blob size) -EINVAL is returned.
- */
-static ssize_t pkey_protkey_aes_attr_read(u32 keytype, bool is_xts, char *buf,
- loff_t off, size_t count)
-{
- struct protaeskeytoken protkeytoken;
- struct pkey_protkey protkey;
- int rc;
-
- if (off != 0 || count < sizeof(protkeytoken))
- return -EINVAL;
- if (is_xts)
- if (count < 2 * sizeof(protkeytoken))
- return -EINVAL;
-
- memset(&protkeytoken, 0, sizeof(protkeytoken));
- protkeytoken.type = TOKTYPE_NON_CCA;
- protkeytoken.version = TOKVER_PROTECTED_KEY;
- protkeytoken.keytype = keytype;
-
- protkey.len = sizeof(protkey.protkey);
- rc = pkey_genprotkey(protkeytoken.keytype,
- protkey.protkey, &protkey.len, &protkey.type);
- if (rc)
- return rc;
-
- protkeytoken.len = protkey.len;
- memcpy(&protkeytoken.protkey, &protkey.protkey, protkey.len);
-
- memcpy(buf, &protkeytoken, sizeof(protkeytoken));
-
- if (is_xts) {
- /* xts needs a second protected key, reuse protkey struct */
- protkey.len = sizeof(protkey.protkey);
- rc = pkey_genprotkey(protkeytoken.keytype,
- protkey.protkey, &protkey.len, &protkey.type);
- if (rc)
- return rc;
-
- protkeytoken.len = protkey.len;
- memcpy(&protkeytoken.protkey, &protkey.protkey, protkey.len);
-
- memcpy(buf + sizeof(protkeytoken), &protkeytoken,
- sizeof(protkeytoken));
-
- return 2 * sizeof(protkeytoken);
- }
-
- return sizeof(protkeytoken);
-}
-
-static ssize_t protkey_aes_128_read(struct file *filp,
- struct kobject *kobj,
- struct bin_attribute *attr,
- char *buf, loff_t off,
- size_t count)
-{
- return pkey_protkey_aes_attr_read(PKEY_KEYTYPE_AES_128, false, buf,
- off, count);
-}
-
-static ssize_t protkey_aes_192_read(struct file *filp,
- struct kobject *kobj,
- struct bin_attribute *attr,
- char *buf, loff_t off,
- size_t count)
-{
- return pkey_protkey_aes_attr_read(PKEY_KEYTYPE_AES_192, false, buf,
- off, count);
-}
-
-static ssize_t protkey_aes_256_read(struct file *filp,
- struct kobject *kobj,
- struct bin_attribute *attr,
- char *buf, loff_t off,
- size_t count)
-{
- return pkey_protkey_aes_attr_read(PKEY_KEYTYPE_AES_256, false, buf,
- off, count);
-}
-
-static ssize_t protkey_aes_128_xts_read(struct file *filp,
- struct kobject *kobj,
- struct bin_attribute *attr,
- char *buf, loff_t off,
- size_t count)
-{
- return pkey_protkey_aes_attr_read(PKEY_KEYTYPE_AES_128, true, buf,
- off, count);
-}
-
-static ssize_t protkey_aes_256_xts_read(struct file *filp,
- struct kobject *kobj,
- struct bin_attribute *attr,
- char *buf, loff_t off,
- size_t count)
-{
- return pkey_protkey_aes_attr_read(PKEY_KEYTYPE_AES_256, true, buf,
- off, count);
-}
-
-static BIN_ATTR_RO(protkey_aes_128, sizeof(struct protaeskeytoken));
-static BIN_ATTR_RO(protkey_aes_192, sizeof(struct protaeskeytoken));
-static BIN_ATTR_RO(protkey_aes_256, sizeof(struct protaeskeytoken));
-static BIN_ATTR_RO(protkey_aes_128_xts, 2 * sizeof(struct protaeskeytoken));
-static BIN_ATTR_RO(protkey_aes_256_xts, 2 * sizeof(struct protaeskeytoken));
-
-static struct bin_attribute *protkey_attrs[] = {
- &bin_attr_protkey_aes_128,
- &bin_attr_protkey_aes_192,
- &bin_attr_protkey_aes_256,
- &bin_attr_protkey_aes_128_xts,
- &bin_attr_protkey_aes_256_xts,
- NULL
-};
-
-static struct attribute_group protkey_attr_group = {
- .name = "protkey",
- .bin_attrs = protkey_attrs,
-};
-
-/*
- * Sysfs attribute read function for all secure key ccadata binary attributes.
- * The implementation can not deal with partial reads, because a new random
- * protected key blob is generated with each read. In case of partial reads
- * (i.e. off != 0 or count < key blob size) -EINVAL is returned.
- */
-static ssize_t pkey_ccadata_aes_attr_read(u32 keytype, bool is_xts, char *buf,
- loff_t off, size_t count)
-{
- struct pkey_seckey *seckey = (struct pkey_seckey *)buf;
- int rc;
-
- if (off != 0 || count < sizeof(struct secaeskeytoken))
- return -EINVAL;
- if (is_xts)
- if (count < 2 * sizeof(struct secaeskeytoken))
- return -EINVAL;
-
- rc = cca_genseckey(-1, -1, keytype, seckey->seckey);
- if (rc)
- return rc;
-
- if (is_xts) {
- seckey++;
- rc = cca_genseckey(-1, -1, keytype, seckey->seckey);
- if (rc)
- return rc;
-
- return 2 * sizeof(struct secaeskeytoken);
- }
-
- return sizeof(struct secaeskeytoken);
-}
-
-static ssize_t ccadata_aes_128_read(struct file *filp,
- struct kobject *kobj,
- struct bin_attribute *attr,
- char *buf, loff_t off,
- size_t count)
-{
- return pkey_ccadata_aes_attr_read(PKEY_KEYTYPE_AES_128, false, buf,
- off, count);
-}
-
-static ssize_t ccadata_aes_192_read(struct file *filp,
- struct kobject *kobj,
- struct bin_attribute *attr,
- char *buf, loff_t off,
- size_t count)
-{
- return pkey_ccadata_aes_attr_read(PKEY_KEYTYPE_AES_192, false, buf,
- off, count);
-}
-
-static ssize_t ccadata_aes_256_read(struct file *filp,
- struct kobject *kobj,
- struct bin_attribute *attr,
- char *buf, loff_t off,
- size_t count)
-{
- return pkey_ccadata_aes_attr_read(PKEY_KEYTYPE_AES_256, false, buf,
- off, count);
-}
-
-static ssize_t ccadata_aes_128_xts_read(struct file *filp,
- struct kobject *kobj,
- struct bin_attribute *attr,
- char *buf, loff_t off,
- size_t count)
-{
- return pkey_ccadata_aes_attr_read(PKEY_KEYTYPE_AES_128, true, buf,
- off, count);
-}
-
-static ssize_t ccadata_aes_256_xts_read(struct file *filp,
- struct kobject *kobj,
- struct bin_attribute *attr,
- char *buf, loff_t off,
- size_t count)
-{
- return pkey_ccadata_aes_attr_read(PKEY_KEYTYPE_AES_256, true, buf,
- off, count);
-}
-
-static BIN_ATTR_RO(ccadata_aes_128, sizeof(struct secaeskeytoken));
-static BIN_ATTR_RO(ccadata_aes_192, sizeof(struct secaeskeytoken));
-static BIN_ATTR_RO(ccadata_aes_256, sizeof(struct secaeskeytoken));
-static BIN_ATTR_RO(ccadata_aes_128_xts, 2 * sizeof(struct secaeskeytoken));
-static BIN_ATTR_RO(ccadata_aes_256_xts, 2 * sizeof(struct secaeskeytoken));
-
-static struct bin_attribute *ccadata_attrs[] = {
- &bin_attr_ccadata_aes_128,
- &bin_attr_ccadata_aes_192,
- &bin_attr_ccadata_aes_256,
- &bin_attr_ccadata_aes_128_xts,
- &bin_attr_ccadata_aes_256_xts,
- NULL
-};
-
-static struct attribute_group ccadata_attr_group = {
- .name = "ccadata",
- .bin_attrs = ccadata_attrs,
-};
-
-#define CCACIPHERTOKENSIZE (sizeof(struct cipherkeytoken) + 80)
-
-/*
- * Sysfs attribute read function for all secure key ccacipher binary attributes.
- * The implementation can not deal with partial reads, because a new random
- * secure key blob is generated with each read. In case of partial reads
- * (i.e. off != 0 or count < key blob size) -EINVAL is returned.
- */
-static ssize_t pkey_ccacipher_aes_attr_read(enum pkey_key_size keybits,
- bool is_xts, char *buf, loff_t off,
- size_t count)
-{
- size_t keysize = CCACIPHERTOKENSIZE;
- u32 nr_apqns, *apqns = NULL;
- int i, rc, card, dom;
-
- if (off != 0 || count < CCACIPHERTOKENSIZE)
- return -EINVAL;
- if (is_xts)
- if (count < 2 * CCACIPHERTOKENSIZE)
- return -EINVAL;
-
- /* build a list of apqns able to generate an cipher key */
- rc = cca_findcard2(&apqns, &nr_apqns, 0xFFFF, 0xFFFF,
- ZCRYPT_CEX6, 0, 0, 0, 0);
- if (rc)
- return rc;
-
- memset(buf, 0, is_xts ? 2 * keysize : keysize);
-
- /* simple try all apqns from the list */
- for (i = 0, rc = -ENODEV; i < nr_apqns; i++) {
- card = apqns[i] >> 16;
- dom = apqns[i] & 0xFFFF;
- rc = cca_gencipherkey(card, dom, keybits, 0, buf, &keysize);
- if (rc == 0)
- break;
- }
- if (rc)
- return rc;
-
- if (is_xts) {
- keysize = CCACIPHERTOKENSIZE;
- buf += CCACIPHERTOKENSIZE;
- rc = cca_gencipherkey(card, dom, keybits, 0, buf, &keysize);
- if (rc == 0)
- return 2 * CCACIPHERTOKENSIZE;
- }
-
- return CCACIPHERTOKENSIZE;
-}
-
-static ssize_t ccacipher_aes_128_read(struct file *filp,
- struct kobject *kobj,
- struct bin_attribute *attr,
- char *buf, loff_t off,
- size_t count)
-{
- return pkey_ccacipher_aes_attr_read(PKEY_SIZE_AES_128, false, buf,
- off, count);
-}
-
-static ssize_t ccacipher_aes_192_read(struct file *filp,
- struct kobject *kobj,
- struct bin_attribute *attr,
- char *buf, loff_t off,
- size_t count)
-{
- return pkey_ccacipher_aes_attr_read(PKEY_SIZE_AES_192, false, buf,
- off, count);
-}
-
-static ssize_t ccacipher_aes_256_read(struct file *filp,
- struct kobject *kobj,
- struct bin_attribute *attr,
- char *buf, loff_t off,
- size_t count)
-{
- return pkey_ccacipher_aes_attr_read(PKEY_SIZE_AES_256, false, buf,
- off, count);
-}
-
-static ssize_t ccacipher_aes_128_xts_read(struct file *filp,
- struct kobject *kobj,
- struct bin_attribute *attr,
- char *buf, loff_t off,
- size_t count)
-{
- return pkey_ccacipher_aes_attr_read(PKEY_SIZE_AES_128, true, buf,
- off, count);
-}
-
-static ssize_t ccacipher_aes_256_xts_read(struct file *filp,
- struct kobject *kobj,
- struct bin_attribute *attr,
- char *buf, loff_t off,
- size_t count)
-{
- return pkey_ccacipher_aes_attr_read(PKEY_SIZE_AES_256, true, buf,
- off, count);
-}
-
-static BIN_ATTR_RO(ccacipher_aes_128, CCACIPHERTOKENSIZE);
-static BIN_ATTR_RO(ccacipher_aes_192, CCACIPHERTOKENSIZE);
-static BIN_ATTR_RO(ccacipher_aes_256, CCACIPHERTOKENSIZE);
-static BIN_ATTR_RO(ccacipher_aes_128_xts, 2 * CCACIPHERTOKENSIZE);
-static BIN_ATTR_RO(ccacipher_aes_256_xts, 2 * CCACIPHERTOKENSIZE);
-
-static struct bin_attribute *ccacipher_attrs[] = {
- &bin_attr_ccacipher_aes_128,
- &bin_attr_ccacipher_aes_192,
- &bin_attr_ccacipher_aes_256,
- &bin_attr_ccacipher_aes_128_xts,
- &bin_attr_ccacipher_aes_256_xts,
- NULL
-};
-
-static struct attribute_group ccacipher_attr_group = {
- .name = "ccacipher",
- .bin_attrs = ccacipher_attrs,
-};
-
-/*
- * Sysfs attribute read function for all ep11 aes key binary attributes.
- * The implementation can not deal with partial reads, because a new random
- * secure key blob is generated with each read. In case of partial reads
- * (i.e. off != 0 or count < key blob size) -EINVAL is returned.
- * This function and the sysfs attributes using it provide EP11 key blobs
- * padded to the upper limit of MAXEP11AESKEYBLOBSIZE which is currently
- * 336 bytes.
+ * File io operations
*/
-static ssize_t pkey_ep11_aes_attr_read(enum pkey_key_size keybits,
- bool is_xts, char *buf, loff_t off,
- size_t count)
-{
- size_t keysize = MAXEP11AESKEYBLOBSIZE;
- u32 nr_apqns, *apqns = NULL;
- int i, rc, card, dom;
-
- if (off != 0 || count < MAXEP11AESKEYBLOBSIZE)
- return -EINVAL;
- if (is_xts)
- if (count < 2 * MAXEP11AESKEYBLOBSIZE)
- return -EINVAL;
-
- /* build a list of apqns able to generate an cipher key */
- rc = ep11_findcard2(&apqns, &nr_apqns, 0xFFFF, 0xFFFF,
- ZCRYPT_CEX7,
- ap_is_se_guest() ? EP11_API_V6 : EP11_API_V4,
- NULL);
- if (rc)
- return rc;
-
- memset(buf, 0, is_xts ? 2 * keysize : keysize);
-
- /* simple try all apqns from the list */
- for (i = 0, rc = -ENODEV; i < nr_apqns; i++) {
- card = apqns[i] >> 16;
- dom = apqns[i] & 0xFFFF;
- rc = ep11_genaeskey(card, dom, keybits, 0, buf, &keysize,
- PKEY_TYPE_EP11_AES);
- if (rc == 0)
- break;
- }
- if (rc)
- return rc;
-
- if (is_xts) {
- keysize = MAXEP11AESKEYBLOBSIZE;
- buf += MAXEP11AESKEYBLOBSIZE;
- rc = ep11_genaeskey(card, dom, keybits, 0, buf, &keysize,
- PKEY_TYPE_EP11_AES);
- if (rc == 0)
- return 2 * MAXEP11AESKEYBLOBSIZE;
- }
-
- return MAXEP11AESKEYBLOBSIZE;
-}
-
-static ssize_t ep11_aes_128_read(struct file *filp,
- struct kobject *kobj,
- struct bin_attribute *attr,
- char *buf, loff_t off,
- size_t count)
-{
- return pkey_ep11_aes_attr_read(PKEY_SIZE_AES_128, false, buf,
- off, count);
-}
-
-static ssize_t ep11_aes_192_read(struct file *filp,
- struct kobject *kobj,
- struct bin_attribute *attr,
- char *buf, loff_t off,
- size_t count)
-{
- return pkey_ep11_aes_attr_read(PKEY_SIZE_AES_192, false, buf,
- off, count);
-}
-
-static ssize_t ep11_aes_256_read(struct file *filp,
- struct kobject *kobj,
- struct bin_attribute *attr,
- char *buf, loff_t off,
- size_t count)
-{
- return pkey_ep11_aes_attr_read(PKEY_SIZE_AES_256, false, buf,
- off, count);
-}
-
-static ssize_t ep11_aes_128_xts_read(struct file *filp,
- struct kobject *kobj,
- struct bin_attribute *attr,
- char *buf, loff_t off,
- size_t count)
-{
- return pkey_ep11_aes_attr_read(PKEY_SIZE_AES_128, true, buf,
- off, count);
-}
-
-static ssize_t ep11_aes_256_xts_read(struct file *filp,
- struct kobject *kobj,
- struct bin_attribute *attr,
- char *buf, loff_t off,
- size_t count)
-{
- return pkey_ep11_aes_attr_read(PKEY_SIZE_AES_256, true, buf,
- off, count);
-}
-
-static BIN_ATTR_RO(ep11_aes_128, MAXEP11AESKEYBLOBSIZE);
-static BIN_ATTR_RO(ep11_aes_192, MAXEP11AESKEYBLOBSIZE);
-static BIN_ATTR_RO(ep11_aes_256, MAXEP11AESKEYBLOBSIZE);
-static BIN_ATTR_RO(ep11_aes_128_xts, 2 * MAXEP11AESKEYBLOBSIZE);
-static BIN_ATTR_RO(ep11_aes_256_xts, 2 * MAXEP11AESKEYBLOBSIZE);
-
-static struct bin_attribute *ep11_attrs[] = {
- &bin_attr_ep11_aes_128,
- &bin_attr_ep11_aes_192,
- &bin_attr_ep11_aes_256,
- &bin_attr_ep11_aes_128_xts,
- &bin_attr_ep11_aes_256_xts,
- NULL
-};
-
-static struct attribute_group ep11_attr_group = {
- .name = "ep11",
- .bin_attrs = ep11_attrs,
-};
-
-static const struct attribute_group *pkey_attr_groups[] = {
- &protkey_attr_group,
- &ccadata_attr_group,
- &ccacipher_attr_group,
- &ep11_attr_group,
- NULL,
-};
static const struct file_operations pkey_fops = {
.owner = THIS_MODULE,
.open = nonseekable_open,
- .llseek = no_llseek,
.unlocked_ioctl = pkey_unlocked_ioctl,
};
@@ -2295,43 +787,13 @@ static struct miscdevice pkey_dev = {
.groups = pkey_attr_groups,
};
-/*
- * Module init
- */
-static int __init pkey_init(void)
+int __init pkey_api_init(void)
{
- cpacf_mask_t func_mask;
-
- /*
- * The pckmo instruction should be available - even if we don't
- * actually invoke it. This instruction comes with MSA 3 which
- * is also the minimum level for the kmc instructions which
- * are able to work with protected keys.
- */
- if (!cpacf_query(CPACF_PCKMO, &func_mask))
- return -ENODEV;
-
- /* check for kmc instructions available */
- if (!cpacf_query(CPACF_KMC, &func_mask))
- return -ENODEV;
- if (!cpacf_test_func(&func_mask, CPACF_KMC_PAES_128) ||
- !cpacf_test_func(&func_mask, CPACF_KMC_PAES_192) ||
- !cpacf_test_func(&func_mask, CPACF_KMC_PAES_256))
- return -ENODEV;
-
- pkey_debug_init();
-
+ /* register as a misc device */
return misc_register(&pkey_dev);
}
-/*
- * Module exit
- */
-static void __exit pkey_exit(void)
+void __exit pkey_api_exit(void)
{
misc_deregister(&pkey_dev);
- pkey_debug_exit();
}
-
-module_cpu_feature_match(S390_CPU_FEATURE_MSA, pkey_init);
-module_exit(pkey_exit);
diff --git a/drivers/s390/crypto/pkey_base.c b/drivers/s390/crypto/pkey_base.c
new file mode 100644
index 000000000000..fea243322838
--- /dev/null
+++ b/drivers/s390/crypto/pkey_base.c
@@ -0,0 +1,362 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * pkey base: debug feature, pkey handler registry
+ *
+ * Copyright IBM Corp. 2024
+ */
+
+#define KMSG_COMPONENT "pkey"
+#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
+
+#include <linux/cpufeature.h>
+#include <linux/init.h>
+#include <linux/list.h>
+#include <linux/module.h>
+#include <linux/rculist.h>
+
+#include "pkey_base.h"
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("IBM Corporation");
+MODULE_DESCRIPTION("s390 protected key base and api");
+
+/*
+ * pkey debug feature
+ */
+debug_info_t *pkey_dbf_info;
+EXPORT_SYMBOL(pkey_dbf_info);
+
+/*
+ * pkey handler registry
+ */
+
+static DEFINE_SPINLOCK(handler_list_write_lock);
+static LIST_HEAD(handler_list);
+
+int pkey_handler_register(struct pkey_handler *handler)
+{
+ const struct pkey_handler *h;
+
+ if (!handler ||
+ !handler->is_supported_key ||
+ !handler->is_supported_keytype)
+ return -EINVAL;
+
+ if (!try_module_get(handler->module))
+ return -ENXIO;
+
+ spin_lock(&handler_list_write_lock);
+
+ rcu_read_lock();
+ list_for_each_entry_rcu(h, &handler_list, list) {
+ if (h == handler) {
+ rcu_read_unlock();
+ spin_unlock(&handler_list_write_lock);
+ module_put(handler->module);
+ return -EEXIST;
+ }
+ }
+ rcu_read_unlock();
+
+ list_add_rcu(&handler->list, &handler_list);
+ spin_unlock(&handler_list_write_lock);
+ synchronize_rcu();
+
+ module_put(handler->module);
+
+ PKEY_DBF_INFO("%s pkey handler '%s' registered\n", __func__,
+ handler->name ?: "<no name>");
+
+ return 0;
+}
+EXPORT_SYMBOL(pkey_handler_register);
+
+int pkey_handler_unregister(struct pkey_handler *handler)
+{
+ spin_lock(&handler_list_write_lock);
+ list_del_rcu(&handler->list);
+ INIT_LIST_HEAD_RCU(&handler->list);
+ spin_unlock(&handler_list_write_lock);
+ synchronize_rcu();
+
+ PKEY_DBF_INFO("%s pkey handler '%s' unregistered\n", __func__,
+ handler->name ?: "<no name>");
+
+ return 0;
+}
+EXPORT_SYMBOL(pkey_handler_unregister);
+
+/*
+ * Handler invocation functions.
+ */
+
+const struct pkey_handler *pkey_handler_get_keybased(const u8 *key, u32 keylen)
+{
+ const struct pkey_handler *h;
+
+ rcu_read_lock();
+ list_for_each_entry_rcu(h, &handler_list, list) {
+ if (!try_module_get(h->module))
+ continue;
+ if (h->is_supported_key(key, keylen)) {
+ rcu_read_unlock();
+ return h;
+ }
+ module_put(h->module);
+ }
+ rcu_read_unlock();
+
+ return NULL;
+}
+EXPORT_SYMBOL(pkey_handler_get_keybased);
+
+const struct pkey_handler *pkey_handler_get_keytypebased(enum pkey_key_type kt)
+{
+ const struct pkey_handler *h;
+
+ rcu_read_lock();
+ list_for_each_entry_rcu(h, &handler_list, list) {
+ if (!try_module_get(h->module))
+ continue;
+ if (h->is_supported_keytype(kt)) {
+ rcu_read_unlock();
+ return h;
+ }
+ module_put(h->module);
+ }
+ rcu_read_unlock();
+
+ return NULL;
+}
+EXPORT_SYMBOL(pkey_handler_get_keytypebased);
+
+void pkey_handler_put(const struct pkey_handler *handler)
+{
+ const struct pkey_handler *h;
+
+ if (!handler)
+ return;
+
+ rcu_read_lock();
+ list_for_each_entry_rcu(h, &handler_list, list) {
+ if (h == handler) {
+ module_put(h->module);
+ break;
+ }
+ }
+ rcu_read_unlock();
+}
+EXPORT_SYMBOL(pkey_handler_put);
+
+int pkey_handler_key_to_protkey(const struct pkey_apqn *apqns, size_t nr_apqns,
+ const u8 *key, u32 keylen,
+ u8 *protkey, u32 *protkeylen, u32 *protkeytype)
+{
+ const struct pkey_handler *h;
+ int rc = -ENODEV;
+
+ h = pkey_handler_get_keybased(key, keylen);
+ if (h && h->key_to_protkey) {
+ rc = h->key_to_protkey(apqns, nr_apqns, key, keylen,
+ protkey, protkeylen,
+ protkeytype);
+ }
+ pkey_handler_put(h);
+
+ return rc;
+}
+EXPORT_SYMBOL(pkey_handler_key_to_protkey);
+
+/*
+ * This handler invocation is special as there may be more than
+ * one handler providing support for the very same key (type).
+ * And the handler may not respond true on is_supported_key(),
+ * so simple try and check return value here.
+ */
+int pkey_handler_slowpath_key_to_protkey(const struct pkey_apqn *apqns,
+ size_t nr_apqns,
+ const u8 *key, u32 keylen,
+ u8 *protkey, u32 *protkeylen,
+ u32 *protkeytype)
+{
+ const struct pkey_handler *h, *htmp[10];
+ int i, n = 0, rc = -ENODEV;
+
+ rcu_read_lock();
+ list_for_each_entry_rcu(h, &handler_list, list) {
+ if (!try_module_get(h->module))
+ continue;
+ if (h->slowpath_key_to_protkey && n < ARRAY_SIZE(htmp))
+ htmp[n++] = h;
+ else
+ module_put(h->module);
+ }
+ rcu_read_unlock();
+
+ for (i = 0; i < n; i++) {
+ h = htmp[i];
+ if (rc)
+ rc = h->slowpath_key_to_protkey(apqns, nr_apqns,
+ key, keylen,
+ protkey, protkeylen,
+ protkeytype);
+ module_put(h->module);
+ }
+
+ return rc;
+}
+EXPORT_SYMBOL(pkey_handler_slowpath_key_to_protkey);
+
+int pkey_handler_gen_key(const struct pkey_apqn *apqns, size_t nr_apqns,
+ u32 keytype, u32 keysubtype,
+ u32 keybitsize, u32 flags,
+ u8 *keybuf, u32 *keybuflen, u32 *keyinfo)
+{
+ const struct pkey_handler *h;
+ int rc = -ENODEV;
+
+ h = pkey_handler_get_keytypebased(keysubtype);
+ if (h && h->gen_key) {
+ rc = h->gen_key(apqns, nr_apqns, keytype, keysubtype,
+ keybitsize, flags,
+ keybuf, keybuflen, keyinfo);
+ }
+ pkey_handler_put(h);
+
+ return rc;
+}
+EXPORT_SYMBOL(pkey_handler_gen_key);
+
+int pkey_handler_clr_to_key(const struct pkey_apqn *apqns, size_t nr_apqns,
+ u32 keytype, u32 keysubtype,
+ u32 keybitsize, u32 flags,
+ const u8 *clrkey, u32 clrkeylen,
+ u8 *keybuf, u32 *keybuflen, u32 *keyinfo)
+{
+ const struct pkey_handler *h;
+ int rc = -ENODEV;
+
+ h = pkey_handler_get_keytypebased(keysubtype);
+ if (h && h->clr_to_key) {
+ rc = h->clr_to_key(apqns, nr_apqns, keytype, keysubtype,
+ keybitsize, flags, clrkey, clrkeylen,
+ keybuf, keybuflen, keyinfo);
+ }
+ pkey_handler_put(h);
+
+ return rc;
+}
+EXPORT_SYMBOL(pkey_handler_clr_to_key);
+
+int pkey_handler_verify_key(const u8 *key, u32 keylen,
+ u16 *card, u16 *dom,
+ u32 *keytype, u32 *keybitsize, u32 *flags)
+{
+ const struct pkey_handler *h;
+ int rc = -ENODEV;
+
+ h = pkey_handler_get_keybased(key, keylen);
+ if (h && h->verify_key) {
+ rc = h->verify_key(key, keylen, card, dom,
+ keytype, keybitsize, flags);
+ }
+ pkey_handler_put(h);
+
+ return rc;
+}
+EXPORT_SYMBOL(pkey_handler_verify_key);
+
+int pkey_handler_apqns_for_key(const u8 *key, u32 keylen, u32 flags,
+ struct pkey_apqn *apqns, size_t *nr_apqns)
+{
+ const struct pkey_handler *h;
+ int rc = -ENODEV;
+
+ h = pkey_handler_get_keybased(key, keylen);
+ if (h && h->apqns_for_key)
+ rc = h->apqns_for_key(key, keylen, flags, apqns, nr_apqns);
+ pkey_handler_put(h);
+
+ return rc;
+}
+EXPORT_SYMBOL(pkey_handler_apqns_for_key);
+
+int pkey_handler_apqns_for_keytype(enum pkey_key_type keysubtype,
+ u8 cur_mkvp[32], u8 alt_mkvp[32], u32 flags,
+ struct pkey_apqn *apqns, size_t *nr_apqns)
+{
+ const struct pkey_handler *h;
+ int rc = -ENODEV;
+
+ h = pkey_handler_get_keytypebased(keysubtype);
+ if (h && h->apqns_for_keytype) {
+ rc = h->apqns_for_keytype(keysubtype,
+ cur_mkvp, alt_mkvp, flags,
+ apqns, nr_apqns);
+ }
+ pkey_handler_put(h);
+
+ return rc;
+}
+EXPORT_SYMBOL(pkey_handler_apqns_for_keytype);
+
+void pkey_handler_request_modules(void)
+{
+#ifdef CONFIG_MODULES
+ static const char * const pkey_handler_modules[] = {
+ "pkey_cca", "pkey_ep11", "pkey_pckmo" };
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(pkey_handler_modules); i++) {
+ const struct pkey_handler *h;
+ bool found = false;
+
+ rcu_read_lock();
+ list_for_each_entry_rcu(h, &handler_list, list) {
+ if (h->module &&
+ !strcmp(h->module->name, pkey_handler_modules[i])) {
+ found = true;
+ break;
+ }
+ }
+ rcu_read_unlock();
+ if (!found) {
+ pr_debug("request_module(%s)\n", pkey_handler_modules[i]);
+ request_module(pkey_handler_modules[i]);
+ }
+ }
+#endif
+}
+EXPORT_SYMBOL(pkey_handler_request_modules);
+
+/*
+ * Module init
+ */
+static int __init pkey_init(void)
+{
+ int rc;
+
+ /* init debug feature */
+ pkey_dbf_info = debug_register("pkey", 1, 1, 5 * sizeof(long));
+ debug_register_view(pkey_dbf_info, &debug_sprintf_view);
+ debug_set_level(pkey_dbf_info, 4);
+
+ /* the handler registry does not need any init */
+
+ rc = pkey_api_init();
+ if (rc)
+ debug_unregister(pkey_dbf_info);
+
+ return rc;
+}
+
+/*
+ * Module exit
+ */
+static void __exit pkey_exit(void)
+{
+ pkey_api_exit();
+}
+
+module_cpu_feature_match(S390_CPU_FEATURE_MSA, pkey_init);
+module_exit(pkey_exit);
diff --git a/drivers/s390/crypto/pkey_base.h b/drivers/s390/crypto/pkey_base.h
new file mode 100644
index 000000000000..7a1a5ce192d8
--- /dev/null
+++ b/drivers/s390/crypto/pkey_base.h
@@ -0,0 +1,195 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
+/*
+ * Copyright IBM Corp. 2024
+ *
+ * Pkey base: debug feature, defines and structs
+ * common to all pkey code.
+ */
+
+#ifndef _PKEY_BASE_H_
+#define _PKEY_BASE_H_
+
+#include <linux/types.h>
+#include <asm/debug.h>
+#include <asm/pkey.h>
+
+/*
+ * pkey debug feature
+ */
+
+extern debug_info_t *pkey_dbf_info;
+
+#define PKEY_DBF_INFO(...) debug_sprintf_event(pkey_dbf_info, 5, ##__VA_ARGS__)
+#define PKEY_DBF_WARN(...) debug_sprintf_event(pkey_dbf_info, 4, ##__VA_ARGS__)
+#define PKEY_DBF_ERR(...) debug_sprintf_event(pkey_dbf_info, 3, ##__VA_ARGS__)
+
+/*
+ * common defines and common structs
+ */
+
+#define KEYBLOBBUFSIZE 8192 /* key buffer size used for internal processing */
+#define MINKEYBLOBBUFSIZE (sizeof(struct keytoken_header))
+#define PROTKEYBLOBBUFSIZE 256 /* protected key buffer size used internal */
+#define MAXAPQNSINLIST 64 /* max 64 apqns within a apqn list */
+#define AES_WK_VP_SIZE 32 /* Size of WK VP block appended to a prot key */
+
+/* inside view of a generic protected key token */
+struct protkeytoken {
+ u8 type; /* 0x00 for PAES specific key tokens */
+ u8 res0[3];
+ u8 version; /* should be 0x01 for protected key token */
+ u8 res1[3];
+ u32 keytype; /* key type, one of the PKEY_KEYTYPE values */
+ u32 len; /* bytes actually stored in protkey[] */
+ u8 protkey[]; /* the protected key blob */
+} __packed;
+
+/* inside view of a protected AES key token */
+struct protaeskeytoken {
+ u8 type; /* 0x00 for PAES specific key tokens */
+ u8 res0[3];
+ u8 version; /* should be 0x01 for protected key token */
+ u8 res1[3];
+ u32 keytype; /* key type, one of the PKEY_KEYTYPE values */
+ u32 len; /* bytes actually stored in protkey[] */
+ u8 protkey[MAXPROTKEYSIZE]; /* the protected key blob */
+} __packed;
+
+/* inside view of a clear key token (type 0x00 version 0x02) */
+struct clearkeytoken {
+ u8 type; /* 0x00 for PAES specific key tokens */
+ u8 res0[3];
+ u8 version; /* 0x02 for clear key token */
+ u8 res1[3];
+ u32 keytype; /* key type, one of the PKEY_KEYTYPE_* values */
+ u32 len; /* bytes actually stored in clearkey[] */
+ u8 clearkey[]; /* clear key value */
+} __packed;
+
+/* helper function which translates the PKEY_KEYTYPE_AES_* to their keysize */
+static inline u32 pkey_keytype_aes_to_size(u32 keytype)
+{
+ switch (keytype) {
+ case PKEY_KEYTYPE_AES_128:
+ return 16;
+ case PKEY_KEYTYPE_AES_192:
+ return 24;
+ case PKEY_KEYTYPE_AES_256:
+ return 32;
+ default:
+ return 0;
+ }
+}
+
+/* helper function which translates AES key bit size into PKEY_KEYTYPE_AES_* */
+static inline u32 pkey_aes_bitsize_to_keytype(u32 keybitsize)
+{
+ switch (keybitsize) {
+ case 128:
+ return PKEY_KEYTYPE_AES_128;
+ case 192:
+ return PKEY_KEYTYPE_AES_192;
+ case 256:
+ return PKEY_KEYTYPE_AES_256;
+ default:
+ return 0;
+ }
+}
+
+/*
+ * pkey_api.c:
+ */
+int __init pkey_api_init(void);
+void __exit pkey_api_exit(void);
+
+/*
+ * pkey_sysfs.c:
+ */
+
+extern const struct attribute_group *pkey_attr_groups[];
+
+/*
+ * pkey handler registry
+ */
+
+struct pkey_handler {
+ struct module *module;
+ const char *name;
+ /*
+ * is_supported_key() and is_supported_keytype() are called
+ * within an rcu_read_lock() scope and thus must not sleep!
+ */
+ bool (*is_supported_key)(const u8 *key, u32 keylen);
+ bool (*is_supported_keytype)(enum pkey_key_type);
+ int (*key_to_protkey)(const struct pkey_apqn *apqns, size_t nr_apqns,
+ const u8 *key, u32 keylen,
+ u8 *protkey, u32 *protkeylen, u32 *protkeytype);
+ int (*slowpath_key_to_protkey)(const struct pkey_apqn *apqns,
+ size_t nr_apqns,
+ const u8 *key, u32 keylen,
+ u8 *protkey, u32 *protkeylen,
+ u32 *protkeytype);
+ int (*gen_key)(const struct pkey_apqn *apqns, size_t nr_apqns,
+ u32 keytype, u32 keysubtype,
+ u32 keybitsize, u32 flags,
+ u8 *keybuf, u32 *keybuflen, u32 *keyinfo);
+ int (*clr_to_key)(const struct pkey_apqn *apqns, size_t nr_apqns,
+ u32 keytype, u32 keysubtype,
+ u32 keybitsize, u32 flags,
+ const u8 *clrkey, u32 clrkeylen,
+ u8 *keybuf, u32 *keybuflen, u32 *keyinfo);
+ int (*verify_key)(const u8 *key, u32 keylen,
+ u16 *card, u16 *dom,
+ u32 *keytype, u32 *keybitsize, u32 *flags);
+ int (*apqns_for_key)(const u8 *key, u32 keylen, u32 flags,
+ struct pkey_apqn *apqns, size_t *nr_apqns);
+ int (*apqns_for_keytype)(enum pkey_key_type ktype,
+ u8 cur_mkvp[32], u8 alt_mkvp[32], u32 flags,
+ struct pkey_apqn *apqns, size_t *nr_apqns);
+ /* used internal by pkey base */
+ struct list_head list;
+};
+
+int pkey_handler_register(struct pkey_handler *handler);
+int pkey_handler_unregister(struct pkey_handler *handler);
+
+/*
+ * invocation function for the registered pkey handlers
+ */
+
+const struct pkey_handler *pkey_handler_get_keybased(const u8 *key, u32 keylen);
+const struct pkey_handler *pkey_handler_get_keytypebased(enum pkey_key_type kt);
+void pkey_handler_put(const struct pkey_handler *handler);
+
+int pkey_handler_key_to_protkey(const struct pkey_apqn *apqns, size_t nr_apqns,
+ const u8 *key, u32 keylen,
+ u8 *protkey, u32 *protkeylen, u32 *protkeytype);
+int pkey_handler_slowpath_key_to_protkey(const struct pkey_apqn *apqns,
+ size_t nr_apqns,
+ const u8 *key, u32 keylen,
+ u8 *protkey, u32 *protkeylen,
+ u32 *protkeytype);
+int pkey_handler_gen_key(const struct pkey_apqn *apqns, size_t nr_apqns,
+ u32 keytype, u32 keysubtype,
+ u32 keybitsize, u32 flags,
+ u8 *keybuf, u32 *keybuflen, u32 *keyinfo);
+int pkey_handler_clr_to_key(const struct pkey_apqn *apqns, size_t nr_apqns,
+ u32 keytype, u32 keysubtype,
+ u32 keybitsize, u32 flags,
+ const u8 *clrkey, u32 clrkeylen,
+ u8 *keybuf, u32 *keybuflen, u32 *keyinfo);
+int pkey_handler_verify_key(const u8 *key, u32 keylen,
+ u16 *card, u16 *dom,
+ u32 *keytype, u32 *keybitsize, u32 *flags);
+int pkey_handler_apqns_for_key(const u8 *key, u32 keylen, u32 flags,
+ struct pkey_apqn *apqns, size_t *nr_apqns);
+int pkey_handler_apqns_for_keytype(enum pkey_key_type ktype,
+ u8 cur_mkvp[32], u8 alt_mkvp[32], u32 flags,
+ struct pkey_apqn *apqns, size_t *nr_apqns);
+
+/*
+ * Unconditional try to load all handler modules
+ */
+void pkey_handler_request_modules(void);
+
+#endif /* _PKEY_BASE_H_ */
diff --git a/drivers/s390/crypto/pkey_cca.c b/drivers/s390/crypto/pkey_cca.c
new file mode 100644
index 000000000000..937051381720
--- /dev/null
+++ b/drivers/s390/crypto/pkey_cca.c
@@ -0,0 +1,629 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * pkey cca specific code
+ *
+ * Copyright IBM Corp. 2024
+ */
+
+#define KMSG_COMPONENT "pkey"
+#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
+
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/cpufeature.h>
+
+#include "zcrypt_api.h"
+#include "zcrypt_ccamisc.h"
+#include "pkey_base.h"
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("IBM Corporation");
+MODULE_DESCRIPTION("s390 protected key CCA handler");
+
+#if IS_MODULE(CONFIG_PKEY_CCA)
+static struct ap_device_id pkey_cca_card_ids[] = {
+ { .dev_type = AP_DEVICE_TYPE_CEX4 },
+ { .dev_type = AP_DEVICE_TYPE_CEX5 },
+ { .dev_type = AP_DEVICE_TYPE_CEX6 },
+ { .dev_type = AP_DEVICE_TYPE_CEX7 },
+ { .dev_type = AP_DEVICE_TYPE_CEX8 },
+ { /* end of list */ },
+};
+MODULE_DEVICE_TABLE(ap, pkey_cca_card_ids);
+#endif
+
+/*
+ * Check key blob for known and supported CCA key.
+ */
+static bool is_cca_key(const u8 *key, u32 keylen)
+{
+ struct keytoken_header *hdr = (struct keytoken_header *)key;
+
+ if (keylen < sizeof(*hdr))
+ return false;
+
+ switch (hdr->type) {
+ case TOKTYPE_CCA_INTERNAL:
+ switch (hdr->version) {
+ case TOKVER_CCA_AES:
+ case TOKVER_CCA_VLSC:
+ return true;
+ default:
+ return false;
+ }
+ case TOKTYPE_CCA_INTERNAL_PKA:
+ return true;
+ default:
+ return false;
+ }
+}
+
+static bool is_cca_keytype(enum pkey_key_type key_type)
+{
+ switch (key_type) {
+ case PKEY_TYPE_CCA_DATA:
+ case PKEY_TYPE_CCA_CIPHER:
+ case PKEY_TYPE_CCA_ECC:
+ return true;
+ default:
+ return false;
+ }
+}
+
+static int cca_apqns4key(const u8 *key, u32 keylen, u32 flags,
+ struct pkey_apqn *apqns, size_t *nr_apqns)
+{
+ struct keytoken_header *hdr = (struct keytoken_header *)key;
+ u32 _nr_apqns, *_apqns = NULL;
+ int rc;
+
+ if (!flags)
+ flags = PKEY_FLAGS_MATCH_CUR_MKVP | PKEY_FLAGS_MATCH_ALT_MKVP;
+
+ if (keylen < sizeof(struct keytoken_header))
+ return -EINVAL;
+
+ zcrypt_wait_api_operational();
+
+ if (hdr->type == TOKTYPE_CCA_INTERNAL) {
+ u64 cur_mkvp = 0, old_mkvp = 0;
+ int minhwtype = ZCRYPT_CEX3C;
+
+ if (hdr->version == TOKVER_CCA_AES) {
+ struct secaeskeytoken *t = (struct secaeskeytoken *)key;
+
+ if (flags & PKEY_FLAGS_MATCH_CUR_MKVP)
+ cur_mkvp = t->mkvp;
+ if (flags & PKEY_FLAGS_MATCH_ALT_MKVP)
+ old_mkvp = t->mkvp;
+ } else if (hdr->version == TOKVER_CCA_VLSC) {
+ struct cipherkeytoken *t = (struct cipherkeytoken *)key;
+
+ minhwtype = ZCRYPT_CEX6;
+ if (flags & PKEY_FLAGS_MATCH_CUR_MKVP)
+ cur_mkvp = t->mkvp0;
+ if (flags & PKEY_FLAGS_MATCH_ALT_MKVP)
+ old_mkvp = t->mkvp0;
+ } else {
+ /* unknown CCA internal token type */
+ return -EINVAL;
+ }
+ rc = cca_findcard2(&_apqns, &_nr_apqns, 0xFFFF, 0xFFFF,
+ minhwtype, AES_MK_SET,
+ cur_mkvp, old_mkvp, 1);
+ if (rc)
+ goto out;
+
+ } else if (hdr->type == TOKTYPE_CCA_INTERNAL_PKA) {
+ struct eccprivkeytoken *t = (struct eccprivkeytoken *)key;
+ u64 cur_mkvp = 0, old_mkvp = 0;
+
+ if (t->secid == 0x20) {
+ if (flags & PKEY_FLAGS_MATCH_CUR_MKVP)
+ cur_mkvp = t->mkvp;
+ if (flags & PKEY_FLAGS_MATCH_ALT_MKVP)
+ old_mkvp = t->mkvp;
+ } else {
+ /* unknown CCA internal 2 token type */
+ return -EINVAL;
+ }
+ rc = cca_findcard2(&_apqns, &_nr_apqns, 0xFFFF, 0xFFFF,
+ ZCRYPT_CEX7, APKA_MK_SET,
+ cur_mkvp, old_mkvp, 1);
+ if (rc)
+ goto out;
+
+ } else {
+ PKEY_DBF_ERR("%s unknown/unsupported blob type %d version %d\n",
+ __func__, hdr->type, hdr->version);
+ return -EINVAL;
+ }
+
+ if (apqns) {
+ if (*nr_apqns < _nr_apqns)
+ rc = -ENOSPC;
+ else
+ memcpy(apqns, _apqns, _nr_apqns * sizeof(u32));
+ }
+ *nr_apqns = _nr_apqns;
+
+out:
+ kfree(_apqns);
+ pr_debug("rc=%d\n", rc);
+ return rc;
+}
+
+static int cca_apqns4type(enum pkey_key_type ktype,
+ u8 cur_mkvp[32], u8 alt_mkvp[32], u32 flags,
+ struct pkey_apqn *apqns, size_t *nr_apqns)
+{
+ u32 _nr_apqns, *_apqns = NULL;
+ int rc;
+
+ zcrypt_wait_api_operational();
+
+ if (ktype == PKEY_TYPE_CCA_DATA || ktype == PKEY_TYPE_CCA_CIPHER) {
+ u64 cur_mkvp = 0, old_mkvp = 0;
+ int minhwtype = ZCRYPT_CEX3C;
+
+ if (flags & PKEY_FLAGS_MATCH_CUR_MKVP)
+ cur_mkvp = *((u64 *)cur_mkvp);
+ if (flags & PKEY_FLAGS_MATCH_ALT_MKVP)
+ old_mkvp = *((u64 *)alt_mkvp);
+ if (ktype == PKEY_TYPE_CCA_CIPHER)
+ minhwtype = ZCRYPT_CEX6;
+ rc = cca_findcard2(&_apqns, &_nr_apqns, 0xFFFF, 0xFFFF,
+ minhwtype, AES_MK_SET,
+ cur_mkvp, old_mkvp, 1);
+ if (rc)
+ goto out;
+
+ } else if (ktype == PKEY_TYPE_CCA_ECC) {
+ u64 cur_mkvp = 0, old_mkvp = 0;
+
+ if (flags & PKEY_FLAGS_MATCH_CUR_MKVP)
+ cur_mkvp = *((u64 *)cur_mkvp);
+ if (flags & PKEY_FLAGS_MATCH_ALT_MKVP)
+ old_mkvp = *((u64 *)alt_mkvp);
+ rc = cca_findcard2(&_apqns, &_nr_apqns, 0xFFFF, 0xFFFF,
+ ZCRYPT_CEX7, APKA_MK_SET,
+ cur_mkvp, old_mkvp, 1);
+ if (rc)
+ goto out;
+
+ } else {
+ PKEY_DBF_ERR("%s unknown/unsupported key type %d",
+ __func__, (int)ktype);
+ return -EINVAL;
+ }
+
+ if (apqns) {
+ if (*nr_apqns < _nr_apqns)
+ rc = -ENOSPC;
+ else
+ memcpy(apqns, _apqns, _nr_apqns * sizeof(u32));
+ }
+ *nr_apqns = _nr_apqns;
+
+out:
+ kfree(_apqns);
+ pr_debug("rc=%d\n", rc);
+ return rc;
+}
+
+static int cca_key2protkey(const struct pkey_apqn *apqns, size_t nr_apqns,
+ const u8 *key, u32 keylen,
+ u8 *protkey, u32 *protkeylen, u32 *protkeytype)
+{
+ struct keytoken_header *hdr = (struct keytoken_header *)key;
+ struct pkey_apqn *local_apqns = NULL;
+ int i, rc;
+
+ if (keylen < sizeof(*hdr))
+ return -EINVAL;
+
+ if (hdr->type == TOKTYPE_CCA_INTERNAL &&
+ hdr->version == TOKVER_CCA_AES) {
+ /* CCA AES data key */
+ if (keylen != sizeof(struct secaeskeytoken))
+ return -EINVAL;
+ if (cca_check_secaeskeytoken(pkey_dbf_info, 3, key, 0))
+ return -EINVAL;
+ } else if (hdr->type == TOKTYPE_CCA_INTERNAL &&
+ hdr->version == TOKVER_CCA_VLSC) {
+ /* CCA AES cipher key */
+ if (keylen < hdr->len || keylen > MAXCCAVLSCTOKENSIZE)
+ return -EINVAL;
+ if (cca_check_secaescipherkey(pkey_dbf_info,
+ 3, key, 0, 1))
+ return -EINVAL;
+ } else if (hdr->type == TOKTYPE_CCA_INTERNAL_PKA) {
+ /* CCA ECC (private) key */
+ if (keylen < sizeof(struct eccprivkeytoken))
+ return -EINVAL;
+ if (cca_check_sececckeytoken(pkey_dbf_info, 3, key, keylen, 1))
+ return -EINVAL;
+ } else {
+ PKEY_DBF_ERR("%s unknown/unsupported blob type %d version %d\n",
+ __func__, hdr->type, hdr->version);
+ return -EINVAL;
+ }
+
+ zcrypt_wait_api_operational();
+
+ if (!apqns || (nr_apqns == 1 &&
+ apqns[0].card == 0xFFFF && apqns[0].domain == 0xFFFF)) {
+ nr_apqns = MAXAPQNSINLIST;
+ local_apqns = kmalloc_array(nr_apqns, sizeof(struct pkey_apqn),
+ GFP_KERNEL);
+ if (!local_apqns)
+ return -ENOMEM;
+ rc = cca_apqns4key(key, keylen, 0, local_apqns, &nr_apqns);
+ if (rc)
+ goto out;
+ apqns = local_apqns;
+ }
+
+ for (rc = -ENODEV, i = 0; rc && i < nr_apqns; i++) {
+ if (hdr->type == TOKTYPE_CCA_INTERNAL &&
+ hdr->version == TOKVER_CCA_AES) {
+ rc = cca_sec2protkey(apqns[i].card, apqns[i].domain,
+ key, protkey,
+ protkeylen, protkeytype);
+ } else if (hdr->type == TOKTYPE_CCA_INTERNAL &&
+ hdr->version == TOKVER_CCA_VLSC) {
+ rc = cca_cipher2protkey(apqns[i].card, apqns[i].domain,
+ key, protkey,
+ protkeylen, protkeytype);
+ } else if (hdr->type == TOKTYPE_CCA_INTERNAL_PKA) {
+ rc = cca_ecc2protkey(apqns[i].card, apqns[i].domain,
+ key, protkey,
+ protkeylen, protkeytype);
+ } else {
+ rc = -EINVAL;
+ break;
+ }
+ }
+
+out:
+ kfree(local_apqns);
+ pr_debug("rc=%d\n", rc);
+ return rc;
+}
+
+/*
+ * Generate CCA secure key.
+ * As of now only CCA AES Data or Cipher secure keys are
+ * supported.
+ * keytype is one of the PKEY_KEYTYPE_* constants,
+ * subtype may be 0 or PKEY_TYPE_CCA_DATA or PKEY_TYPE_CCA_CIPHER,
+ * keybitsize is the bit size of the key (may be 0 for
+ * keytype PKEY_KEYTYPE_AES_*).
+ */
+static int cca_gen_key(const struct pkey_apqn *apqns, size_t nr_apqns,
+ u32 keytype, u32 subtype,
+ u32 keybitsize, u32 flags,
+ u8 *keybuf, u32 *keybuflen, u32 *_keyinfo)
+{
+ struct pkey_apqn *local_apqns = NULL;
+ int i, len, rc;
+
+ /* check keytype, subtype, keybitsize */
+ switch (keytype) {
+ case PKEY_KEYTYPE_AES_128:
+ case PKEY_KEYTYPE_AES_192:
+ case PKEY_KEYTYPE_AES_256:
+ len = pkey_keytype_aes_to_size(keytype);
+ if (keybitsize && keybitsize != 8 * len) {
+ PKEY_DBF_ERR("%s unknown/unsupported keybitsize %d\n",
+ __func__, keybitsize);
+ return -EINVAL;
+ }
+ keybitsize = 8 * len;
+ switch (subtype) {
+ case PKEY_TYPE_CCA_DATA:
+ case PKEY_TYPE_CCA_CIPHER:
+ break;
+ default:
+ PKEY_DBF_ERR("%s unknown/unsupported subtype %d\n",
+ __func__, subtype);
+ return -EINVAL;
+ }
+ break;
+ default:
+ PKEY_DBF_ERR("%s unknown/unsupported keytype %d\n",
+ __func__, keytype);
+ return -EINVAL;
+ }
+
+ zcrypt_wait_api_operational();
+
+ if (!apqns || (nr_apqns == 1 &&
+ apqns[0].card == 0xFFFF && apqns[0].domain == 0xFFFF)) {
+ nr_apqns = MAXAPQNSINLIST;
+ local_apqns = kmalloc_array(nr_apqns, sizeof(struct pkey_apqn),
+ GFP_KERNEL);
+ if (!local_apqns)
+ return -ENOMEM;
+ rc = cca_apqns4type(subtype, NULL, NULL, 0,
+ local_apqns, &nr_apqns);
+ if (rc)
+ goto out;
+ apqns = local_apqns;
+ }
+
+ for (rc = -ENODEV, i = 0; rc && i < nr_apqns; i++) {
+ if (subtype == PKEY_TYPE_CCA_CIPHER) {
+ rc = cca_gencipherkey(apqns[i].card, apqns[i].domain,
+ keybitsize, flags,
+ keybuf, keybuflen);
+ } else {
+ /* PKEY_TYPE_CCA_DATA */
+ rc = cca_genseckey(apqns[i].card, apqns[i].domain,
+ keybitsize, keybuf);
+ *keybuflen = (rc ? 0 : SECKEYBLOBSIZE);
+ }
+ }
+
+out:
+ kfree(local_apqns);
+ pr_debug("rc=%d\n", rc);
+ return rc;
+}
+
+/*
+ * Generate CCA secure key with given clear key value.
+ * As of now only CCA AES Data or Cipher secure keys are
+ * supported.
+ * keytype is one of the PKEY_KEYTYPE_* constants,
+ * subtype may be 0 or PKEY_TYPE_CCA_DATA or PKEY_TYPE_CCA_CIPHER,
+ * keybitsize is the bit size of the key (may be 0 for
+ * keytype PKEY_KEYTYPE_AES_*).
+ */
+static int cca_clr2key(const struct pkey_apqn *apqns, size_t nr_apqns,
+ u32 keytype, u32 subtype,
+ u32 keybitsize, u32 flags,
+ const u8 *clrkey, u32 clrkeylen,
+ u8 *keybuf, u32 *keybuflen, u32 *_keyinfo)
+{
+ struct pkey_apqn *local_apqns = NULL;
+ int i, len, rc;
+
+ /* check keytype, subtype, clrkeylen, keybitsize */
+ switch (keytype) {
+ case PKEY_KEYTYPE_AES_128:
+ case PKEY_KEYTYPE_AES_192:
+ case PKEY_KEYTYPE_AES_256:
+ len = pkey_keytype_aes_to_size(keytype);
+ if (keybitsize && keybitsize != 8 * len) {
+ PKEY_DBF_ERR("%s unknown/unsupported keybitsize %d\n",
+ __func__, keybitsize);
+ return -EINVAL;
+ }
+ keybitsize = 8 * len;
+ if (clrkeylen != len) {
+ PKEY_DBF_ERR("%s invalid clear key len %d != %d\n",
+ __func__, clrkeylen, len);
+ return -EINVAL;
+ }
+ switch (subtype) {
+ case PKEY_TYPE_CCA_DATA:
+ case PKEY_TYPE_CCA_CIPHER:
+ break;
+ default:
+ PKEY_DBF_ERR("%s unknown/unsupported subtype %d\n",
+ __func__, subtype);
+ return -EINVAL;
+ }
+ break;
+ default:
+ PKEY_DBF_ERR("%s unknown/unsupported keytype %d\n",
+ __func__, keytype);
+ return -EINVAL;
+ }
+
+ zcrypt_wait_api_operational();
+
+ if (!apqns || (nr_apqns == 1 &&
+ apqns[0].card == 0xFFFF && apqns[0].domain == 0xFFFF)) {
+ nr_apqns = MAXAPQNSINLIST;
+ local_apqns = kmalloc_array(nr_apqns, sizeof(struct pkey_apqn),
+ GFP_KERNEL);
+ if (!local_apqns)
+ return -ENOMEM;
+ rc = cca_apqns4type(subtype, NULL, NULL, 0,
+ local_apqns, &nr_apqns);
+ if (rc)
+ goto out;
+ apqns = local_apqns;
+ }
+
+ for (rc = -ENODEV, i = 0; rc && i < nr_apqns; i++) {
+ if (subtype == PKEY_TYPE_CCA_CIPHER) {
+ rc = cca_clr2cipherkey(apqns[i].card, apqns[i].domain,
+ keybitsize, flags, clrkey,
+ keybuf, keybuflen);
+ } else {
+ /* PKEY_TYPE_CCA_DATA */
+ rc = cca_clr2seckey(apqns[i].card, apqns[i].domain,
+ keybitsize, clrkey, keybuf);
+ *keybuflen = (rc ? 0 : SECKEYBLOBSIZE);
+ }
+ }
+
+out:
+ kfree(local_apqns);
+ pr_debug("rc=%d\n", rc);
+ return rc;
+}
+
+static int cca_verifykey(const u8 *key, u32 keylen,
+ u16 *card, u16 *dom,
+ u32 *keytype, u32 *keybitsize, u32 *flags)
+{
+ struct keytoken_header *hdr = (struct keytoken_header *)key;
+ u32 nr_apqns, *apqns = NULL;
+ int rc;
+
+ if (keylen < sizeof(*hdr))
+ return -EINVAL;
+
+ zcrypt_wait_api_operational();
+
+ if (hdr->type == TOKTYPE_CCA_INTERNAL &&
+ hdr->version == TOKVER_CCA_AES) {
+ struct secaeskeytoken *t = (struct secaeskeytoken *)key;
+
+ rc = cca_check_secaeskeytoken(pkey_dbf_info, 3, key, 0);
+ if (rc)
+ goto out;
+ *keytype = PKEY_TYPE_CCA_DATA;
+ *keybitsize = t->bitsize;
+ rc = cca_findcard2(&apqns, &nr_apqns, *card, *dom,
+ ZCRYPT_CEX3C, AES_MK_SET,
+ t->mkvp, 0, 1);
+ if (!rc)
+ *flags = PKEY_FLAGS_MATCH_CUR_MKVP;
+ if (rc == -ENODEV) {
+ rc = cca_findcard2(&apqns, &nr_apqns, *card, *dom,
+ ZCRYPT_CEX3C, AES_MK_SET,
+ 0, t->mkvp, 1);
+ if (!rc)
+ *flags = PKEY_FLAGS_MATCH_ALT_MKVP;
+ }
+ if (rc)
+ goto out;
+
+ *card = ((struct pkey_apqn *)apqns)->card;
+ *dom = ((struct pkey_apqn *)apqns)->domain;
+
+ } else if (hdr->type == TOKTYPE_CCA_INTERNAL &&
+ hdr->version == TOKVER_CCA_VLSC) {
+ struct cipherkeytoken *t = (struct cipherkeytoken *)key;
+
+ rc = cca_check_secaescipherkey(pkey_dbf_info, 3, key, 0, 1);
+ if (rc)
+ goto out;
+ *keytype = PKEY_TYPE_CCA_CIPHER;
+ *keybitsize = PKEY_SIZE_UNKNOWN;
+ if (!t->plfver && t->wpllen == 512)
+ *keybitsize = PKEY_SIZE_AES_128;
+ else if (!t->plfver && t->wpllen == 576)
+ *keybitsize = PKEY_SIZE_AES_192;
+ else if (!t->plfver && t->wpllen == 640)
+ *keybitsize = PKEY_SIZE_AES_256;
+ rc = cca_findcard2(&apqns, &nr_apqns, *card, *dom,
+ ZCRYPT_CEX6, AES_MK_SET,
+ t->mkvp0, 0, 1);
+ if (!rc)
+ *flags = PKEY_FLAGS_MATCH_CUR_MKVP;
+ if (rc == -ENODEV) {
+ rc = cca_findcard2(&apqns, &nr_apqns, *card, *dom,
+ ZCRYPT_CEX6, AES_MK_SET,
+ 0, t->mkvp0, 1);
+ if (!rc)
+ *flags = PKEY_FLAGS_MATCH_ALT_MKVP;
+ }
+ if (rc)
+ goto out;
+
+ *card = ((struct pkey_apqn *)apqns)->card;
+ *dom = ((struct pkey_apqn *)apqns)->domain;
+
+ } else {
+ /* unknown/unsupported key blob */
+ rc = -EINVAL;
+ }
+
+out:
+ kfree(apqns);
+ pr_debug("rc=%d\n", rc);
+ return rc;
+}
+
+/*
+ * This function provides an alternate but usually slow way
+ * to convert a 'clear key token' with AES key material into
+ * a protected key. This is done via an intermediate step
+ * which creates a CCA AES DATA secure key first and then
+ * derives the protected key from this secure key.
+ */
+static int cca_slowpath_key2protkey(const struct pkey_apqn *apqns,
+ size_t nr_apqns,
+ const u8 *key, u32 keylen,
+ u8 *protkey, u32 *protkeylen,
+ u32 *protkeytype)
+{
+ const struct keytoken_header *hdr = (const struct keytoken_header *)key;
+ const struct clearkeytoken *t = (const struct clearkeytoken *)key;
+ u32 tmplen, keysize = 0;
+ u8 *tmpbuf;
+ int i, rc;
+
+ if (keylen < sizeof(*hdr))
+ return -EINVAL;
+
+ if (hdr->type == TOKTYPE_NON_CCA &&
+ hdr->version == TOKVER_CLEAR_KEY)
+ keysize = pkey_keytype_aes_to_size(t->keytype);
+ if (!keysize || t->len != keysize)
+ return -EINVAL;
+
+ /* alloc tmp key buffer */
+ tmpbuf = kmalloc(SECKEYBLOBSIZE, GFP_ATOMIC);
+ if (!tmpbuf)
+ return -ENOMEM;
+
+ /* try two times in case of failure */
+ for (i = 0, rc = -ENODEV; i < 2 && rc; i++) {
+ tmplen = SECKEYBLOBSIZE;
+ rc = cca_clr2key(NULL, 0, t->keytype, PKEY_TYPE_CCA_DATA,
+ 8 * keysize, 0, t->clearkey, t->len,
+ tmpbuf, &tmplen, NULL);
+ pr_debug("cca_clr2key()=%d\n", rc);
+ if (rc)
+ continue;
+ rc = cca_key2protkey(NULL, 0, tmpbuf, tmplen,
+ protkey, protkeylen, protkeytype);
+ pr_debug("cca_key2protkey()=%d\n", rc);
+ }
+
+ kfree(tmpbuf);
+ pr_debug("rc=%d\n", rc);
+ return rc;
+}
+
+static struct pkey_handler cca_handler = {
+ .module = THIS_MODULE,
+ .name = "PKEY CCA handler",
+ .is_supported_key = is_cca_key,
+ .is_supported_keytype = is_cca_keytype,
+ .key_to_protkey = cca_key2protkey,
+ .slowpath_key_to_protkey = cca_slowpath_key2protkey,
+ .gen_key = cca_gen_key,
+ .clr_to_key = cca_clr2key,
+ .verify_key = cca_verifykey,
+ .apqns_for_key = cca_apqns4key,
+ .apqns_for_keytype = cca_apqns4type,
+};
+
+/*
+ * Module init
+ */
+static int __init pkey_cca_init(void)
+{
+ /* register this module as pkey handler for all the cca stuff */
+ return pkey_handler_register(&cca_handler);
+}
+
+/*
+ * Module exit
+ */
+static void __exit pkey_cca_exit(void)
+{
+ /* unregister this module as pkey handler */
+ pkey_handler_unregister(&cca_handler);
+}
+
+module_init(pkey_cca_init);
+module_exit(pkey_cca_exit);
diff --git a/drivers/s390/crypto/pkey_ep11.c b/drivers/s390/crypto/pkey_ep11.c
new file mode 100644
index 000000000000..f42d397a9cb6
--- /dev/null
+++ b/drivers/s390/crypto/pkey_ep11.c
@@ -0,0 +1,578 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * pkey ep11 specific code
+ *
+ * Copyright IBM Corp. 2024
+ */
+
+#define KMSG_COMPONENT "pkey"
+#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
+
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/cpufeature.h>
+
+#include "zcrypt_api.h"
+#include "zcrypt_ccamisc.h"
+#include "zcrypt_ep11misc.h"
+#include "pkey_base.h"
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("IBM Corporation");
+MODULE_DESCRIPTION("s390 protected key EP11 handler");
+
+#if IS_MODULE(CONFIG_PKEY_EP11)
+static struct ap_device_id pkey_ep11_card_ids[] = {
+ { .dev_type = AP_DEVICE_TYPE_CEX4 },
+ { .dev_type = AP_DEVICE_TYPE_CEX5 },
+ { .dev_type = AP_DEVICE_TYPE_CEX6 },
+ { .dev_type = AP_DEVICE_TYPE_CEX7 },
+ { .dev_type = AP_DEVICE_TYPE_CEX8 },
+ { /* end of list */ },
+};
+MODULE_DEVICE_TABLE(ap, pkey_ep11_card_ids);
+#endif
+
+/*
+ * Check key blob for known and supported EP11 key.
+ */
+static bool is_ep11_key(const u8 *key, u32 keylen)
+{
+ struct keytoken_header *hdr = (struct keytoken_header *)key;
+
+ if (keylen < sizeof(*hdr))
+ return false;
+
+ switch (hdr->type) {
+ case TOKTYPE_NON_CCA:
+ switch (hdr->version) {
+ case TOKVER_EP11_AES:
+ case TOKVER_EP11_AES_WITH_HEADER:
+ case TOKVER_EP11_ECC_WITH_HEADER:
+ return true;
+ default:
+ return false;
+ }
+ default:
+ return false;
+ }
+}
+
+static bool is_ep11_keytype(enum pkey_key_type key_type)
+{
+ switch (key_type) {
+ case PKEY_TYPE_EP11:
+ case PKEY_TYPE_EP11_AES:
+ case PKEY_TYPE_EP11_ECC:
+ return true;
+ default:
+ return false;
+ }
+}
+
+static int ep11_apqns4key(const u8 *key, u32 keylen, u32 flags,
+ struct pkey_apqn *apqns, size_t *nr_apqns)
+{
+ struct keytoken_header *hdr = (struct keytoken_header *)key;
+ u32 _nr_apqns, *_apqns = NULL;
+ int rc;
+
+ if (!flags)
+ flags = PKEY_FLAGS_MATCH_CUR_MKVP;
+
+ if (keylen < sizeof(struct keytoken_header) || flags == 0)
+ return -EINVAL;
+
+ zcrypt_wait_api_operational();
+
+ if (hdr->type == TOKTYPE_NON_CCA &&
+ (hdr->version == TOKVER_EP11_AES_WITH_HEADER ||
+ hdr->version == TOKVER_EP11_ECC_WITH_HEADER) &&
+ is_ep11_keyblob(key + sizeof(struct ep11kblob_header))) {
+ struct ep11keyblob *kb = (struct ep11keyblob *)
+ (key + sizeof(struct ep11kblob_header));
+ int minhwtype = 0, api = 0;
+
+ if (flags != PKEY_FLAGS_MATCH_CUR_MKVP)
+ return -EINVAL;
+ if (kb->attr & EP11_BLOB_PKEY_EXTRACTABLE) {
+ minhwtype = ZCRYPT_CEX7;
+ api = ap_is_se_guest() ? EP11_API_V6 : EP11_API_V4;
+ }
+ rc = ep11_findcard2(&_apqns, &_nr_apqns, 0xFFFF, 0xFFFF,
+ minhwtype, api, kb->wkvp);
+ if (rc)
+ goto out;
+
+ } else if (hdr->type == TOKTYPE_NON_CCA &&
+ hdr->version == TOKVER_EP11_AES &&
+ is_ep11_keyblob(key)) {
+ struct ep11keyblob *kb = (struct ep11keyblob *)key;
+ int minhwtype = 0, api = 0;
+
+ if (flags != PKEY_FLAGS_MATCH_CUR_MKVP)
+ return -EINVAL;
+ if (kb->attr & EP11_BLOB_PKEY_EXTRACTABLE) {
+ minhwtype = ZCRYPT_CEX7;
+ api = ap_is_se_guest() ? EP11_API_V6 : EP11_API_V4;
+ }
+ rc = ep11_findcard2(&_apqns, &_nr_apqns, 0xFFFF, 0xFFFF,
+ minhwtype, api, kb->wkvp);
+ if (rc)
+ goto out;
+
+ } else {
+ PKEY_DBF_ERR("%s unknown/unsupported blob type %d version %d\n",
+ __func__, hdr->type, hdr->version);
+ return -EINVAL;
+ }
+
+ if (apqns) {
+ if (*nr_apqns < _nr_apqns)
+ rc = -ENOSPC;
+ else
+ memcpy(apqns, _apqns, _nr_apqns * sizeof(u32));
+ }
+ *nr_apqns = _nr_apqns;
+
+out:
+ kfree(_apqns);
+ pr_debug("rc=%d\n", rc);
+ return rc;
+}
+
+static int ep11_apqns4type(enum pkey_key_type ktype,
+ u8 cur_mkvp[32], u8 alt_mkvp[32], u32 flags,
+ struct pkey_apqn *apqns, size_t *nr_apqns)
+{
+ u32 _nr_apqns, *_apqns = NULL;
+ int rc;
+
+ zcrypt_wait_api_operational();
+
+ if (ktype == PKEY_TYPE_EP11 ||
+ ktype == PKEY_TYPE_EP11_AES ||
+ ktype == PKEY_TYPE_EP11_ECC) {
+ u8 *wkvp = NULL;
+ int api;
+
+ if (flags & PKEY_FLAGS_MATCH_CUR_MKVP)
+ wkvp = cur_mkvp;
+ api = ap_is_se_guest() ? EP11_API_V6 : EP11_API_V4;
+ rc = ep11_findcard2(&_apqns, &_nr_apqns, 0xFFFF, 0xFFFF,
+ ZCRYPT_CEX7, api, wkvp);
+ if (rc)
+ goto out;
+
+ } else {
+ PKEY_DBF_ERR("%s unknown/unsupported key type %d\n",
+ __func__, (int)ktype);
+ return -EINVAL;
+ }
+
+ if (apqns) {
+ if (*nr_apqns < _nr_apqns)
+ rc = -ENOSPC;
+ else
+ memcpy(apqns, _apqns, _nr_apqns * sizeof(u32));
+ }
+ *nr_apqns = _nr_apqns;
+
+out:
+ kfree(_apqns);
+ pr_debug("rc=%d\n", rc);
+ return rc;
+}
+
+static int ep11_key2protkey(const struct pkey_apqn *apqns, size_t nr_apqns,
+ const u8 *key, u32 keylen,
+ u8 *protkey, u32 *protkeylen, u32 *protkeytype)
+{
+ struct keytoken_header *hdr = (struct keytoken_header *)key;
+ struct pkey_apqn *local_apqns = NULL;
+ int i, rc;
+
+ if (keylen < sizeof(*hdr))
+ return -EINVAL;
+
+ if (hdr->type == TOKTYPE_NON_CCA &&
+ hdr->version == TOKVER_EP11_AES_WITH_HEADER &&
+ is_ep11_keyblob(key + sizeof(struct ep11kblob_header))) {
+ /* EP11 AES key blob with header */
+ if (ep11_check_aes_key_with_hdr(pkey_dbf_info,
+ 3, key, keylen, 1))
+ return -EINVAL;
+ } else if (hdr->type == TOKTYPE_NON_CCA &&
+ hdr->version == TOKVER_EP11_ECC_WITH_HEADER &&
+ is_ep11_keyblob(key + sizeof(struct ep11kblob_header))) {
+ /* EP11 ECC key blob with header */
+ if (ep11_check_ecc_key_with_hdr(pkey_dbf_info,
+ 3, key, keylen, 1))
+ return -EINVAL;
+ } else if (hdr->type == TOKTYPE_NON_CCA &&
+ hdr->version == TOKVER_EP11_AES &&
+ is_ep11_keyblob(key)) {
+ /* EP11 AES key blob with header in session field */
+ if (ep11_check_aes_key(pkey_dbf_info, 3, key, keylen, 1))
+ return -EINVAL;
+ } else {
+ PKEY_DBF_ERR("%s unknown/unsupported blob type %d version %d\n",
+ __func__, hdr->type, hdr->version);
+ return -EINVAL;
+ }
+
+ zcrypt_wait_api_operational();
+
+ if (!apqns || (nr_apqns == 1 &&
+ apqns[0].card == 0xFFFF && apqns[0].domain == 0xFFFF)) {
+ nr_apqns = MAXAPQNSINLIST;
+ local_apqns = kmalloc_array(nr_apqns, sizeof(struct pkey_apqn),
+ GFP_KERNEL);
+ if (!local_apqns)
+ return -ENOMEM;
+ rc = ep11_apqns4key(key, keylen, 0, local_apqns, &nr_apqns);
+ if (rc)
+ goto out;
+ apqns = local_apqns;
+ }
+
+ for (rc = -ENODEV, i = 0; rc && i < nr_apqns; i++) {
+ if (hdr->type == TOKTYPE_NON_CCA &&
+ hdr->version == TOKVER_EP11_AES_WITH_HEADER &&
+ is_ep11_keyblob(key + sizeof(struct ep11kblob_header))) {
+ rc = ep11_kblob2protkey(apqns[i].card, apqns[i].domain,
+ key, hdr->len, protkey,
+ protkeylen, protkeytype);
+ } else if (hdr->type == TOKTYPE_NON_CCA &&
+ hdr->version == TOKVER_EP11_ECC_WITH_HEADER &&
+ is_ep11_keyblob(key + sizeof(struct ep11kblob_header))) {
+ rc = ep11_kblob2protkey(apqns[i].card, apqns[i].domain,
+ key, hdr->len, protkey,
+ protkeylen, protkeytype);
+ } else if (hdr->type == TOKTYPE_NON_CCA &&
+ hdr->version == TOKVER_EP11_AES &&
+ is_ep11_keyblob(key)) {
+ rc = ep11_kblob2protkey(apqns[i].card, apqns[i].domain,
+ key, hdr->len, protkey,
+ protkeylen, protkeytype);
+ } else {
+ rc = -EINVAL;
+ break;
+ }
+ }
+
+out:
+ kfree(local_apqns);
+ pr_debug("rc=%d\n", rc);
+ return rc;
+}
+
+/*
+ * Generate EP11 secure key.
+ * As of now only EP11 AES secure keys are supported.
+ * keytype is one of the PKEY_KEYTYPE_* constants,
+ * subtype may be PKEY_TYPE_EP11 or PKEY_TYPE_EP11_AES
+ * or 0 (results in subtype PKEY_TYPE_EP11_AES),
+ * keybitsize is the bit size of the key (may be 0 for
+ * keytype PKEY_KEYTYPE_AES_*).
+ */
+static int ep11_gen_key(const struct pkey_apqn *apqns, size_t nr_apqns,
+ u32 keytype, u32 subtype,
+ u32 keybitsize, u32 flags,
+ u8 *keybuf, u32 *keybuflen, u32 *_keyinfo)
+{
+ struct pkey_apqn *local_apqns = NULL;
+ int i, len, rc;
+
+ /* check keytype, subtype, keybitsize */
+ switch (keytype) {
+ case PKEY_KEYTYPE_AES_128:
+ case PKEY_KEYTYPE_AES_192:
+ case PKEY_KEYTYPE_AES_256:
+ len = pkey_keytype_aes_to_size(keytype);
+ if (keybitsize && keybitsize != 8 * len) {
+ PKEY_DBF_ERR("%s unknown/unsupported keybitsize %d\n",
+ __func__, keybitsize);
+ return -EINVAL;
+ }
+ keybitsize = 8 * len;
+ switch (subtype) {
+ case PKEY_TYPE_EP11:
+ case PKEY_TYPE_EP11_AES:
+ break;
+ default:
+ PKEY_DBF_ERR("%s unknown/unsupported subtype %d\n",
+ __func__, subtype);
+ return -EINVAL;
+ }
+ break;
+ default:
+ PKEY_DBF_ERR("%s unknown/unsupported keytype %d\n",
+ __func__, keytype);
+ return -EINVAL;
+ }
+
+ zcrypt_wait_api_operational();
+
+ if (!apqns || (nr_apqns == 1 &&
+ apqns[0].card == 0xFFFF && apqns[0].domain == 0xFFFF)) {
+ nr_apqns = MAXAPQNSINLIST;
+ local_apqns = kmalloc_array(nr_apqns, sizeof(struct pkey_apqn),
+ GFP_KERNEL);
+ if (!local_apqns)
+ return -ENOMEM;
+ rc = ep11_apqns4type(subtype, NULL, NULL, 0,
+ local_apqns, &nr_apqns);
+ if (rc)
+ goto out;
+ apqns = local_apqns;
+ }
+
+ for (rc = -ENODEV, i = 0; rc && i < nr_apqns; i++) {
+ rc = ep11_genaeskey(apqns[i].card, apqns[i].domain,
+ keybitsize, flags,
+ keybuf, keybuflen, subtype);
+ }
+
+out:
+ kfree(local_apqns);
+ pr_debug("rc=%d\n", rc);
+ return rc;
+}
+
+/*
+ * Generate EP11 secure key with given clear key value.
+ * As of now only EP11 AES secure keys are supported.
+ * keytype is one of the PKEY_KEYTYPE_* constants,
+ * subtype may be PKEY_TYPE_EP11 or PKEY_TYPE_EP11_AES
+ * or 0 (assumes PKEY_TYPE_EP11_AES then).
+ * keybitsize is the bit size of the key (may be 0 for
+ * keytype PKEY_KEYTYPE_AES_*).
+ */
+static int ep11_clr2key(const struct pkey_apqn *apqns, size_t nr_apqns,
+ u32 keytype, u32 subtype,
+ u32 keybitsize, u32 flags,
+ const u8 *clrkey, u32 clrkeylen,
+ u8 *keybuf, u32 *keybuflen, u32 *_keyinfo)
+{
+ struct pkey_apqn *local_apqns = NULL;
+ int i, len, rc;
+
+ /* check keytype, subtype, clrkeylen, keybitsize */
+ switch (keytype) {
+ case PKEY_KEYTYPE_AES_128:
+ case PKEY_KEYTYPE_AES_192:
+ case PKEY_KEYTYPE_AES_256:
+ len = pkey_keytype_aes_to_size(keytype);
+ if (keybitsize && keybitsize != 8 * len) {
+ PKEY_DBF_ERR("%s unknown/unsupported keybitsize %d\n",
+ __func__, keybitsize);
+ return -EINVAL;
+ }
+ keybitsize = 8 * len;
+ if (clrkeylen != len) {
+ PKEY_DBF_ERR("%s invalid clear key len %d != %d\n",
+ __func__, clrkeylen, len);
+ return -EINVAL;
+ }
+ switch (subtype) {
+ case PKEY_TYPE_EP11:
+ case PKEY_TYPE_EP11_AES:
+ break;
+ default:
+ PKEY_DBF_ERR("%s unknown/unsupported subtype %d\n",
+ __func__, subtype);
+ return -EINVAL;
+ }
+ break;
+ default:
+ PKEY_DBF_ERR("%s unknown/unsupported keytype %d\n",
+ __func__, keytype);
+ return -EINVAL;
+ }
+
+ zcrypt_wait_api_operational();
+
+ if (!apqns || (nr_apqns == 1 &&
+ apqns[0].card == 0xFFFF && apqns[0].domain == 0xFFFF)) {
+ nr_apqns = MAXAPQNSINLIST;
+ local_apqns = kmalloc_array(nr_apqns, sizeof(struct pkey_apqn),
+ GFP_KERNEL);
+ if (!local_apqns)
+ return -ENOMEM;
+ rc = ep11_apqns4type(subtype, NULL, NULL, 0,
+ local_apqns, &nr_apqns);
+ if (rc)
+ goto out;
+ apqns = local_apqns;
+ }
+
+ for (rc = -ENODEV, i = 0; rc && i < nr_apqns; i++) {
+ rc = ep11_clr2keyblob(apqns[i].card, apqns[i].domain,
+ keybitsize, flags, clrkey,
+ keybuf, keybuflen, subtype);
+ }
+
+out:
+ kfree(local_apqns);
+ pr_debug("rc=%d\n", rc);
+ return rc;
+}
+
+static int ep11_verifykey(const u8 *key, u32 keylen,
+ u16 *card, u16 *dom,
+ u32 *keytype, u32 *keybitsize, u32 *flags)
+{
+ struct keytoken_header *hdr = (struct keytoken_header *)key;
+ u32 nr_apqns, *apqns = NULL;
+ int rc;
+
+ if (keylen < sizeof(*hdr))
+ return -EINVAL;
+
+ zcrypt_wait_api_operational();
+
+ if (hdr->type == TOKTYPE_NON_CCA &&
+ hdr->version == TOKVER_EP11_AES) {
+ struct ep11keyblob *kb = (struct ep11keyblob *)key;
+ int api;
+
+ rc = ep11_check_aes_key(pkey_dbf_info, 3, key, keylen, 1);
+ if (rc)
+ goto out;
+ *keytype = PKEY_TYPE_EP11;
+ *keybitsize = kb->head.bitlen;
+
+ api = ap_is_se_guest() ? EP11_API_V6 : EP11_API_V4;
+ rc = ep11_findcard2(&apqns, &nr_apqns, *card, *dom,
+ ZCRYPT_CEX7, api,
+ ep11_kb_wkvp(key, keylen));
+ if (rc)
+ goto out;
+
+ *flags = PKEY_FLAGS_MATCH_CUR_MKVP;
+
+ *card = ((struct pkey_apqn *)apqns)->card;
+ *dom = ((struct pkey_apqn *)apqns)->domain;
+
+ } else if (hdr->type == TOKTYPE_NON_CCA &&
+ hdr->version == TOKVER_EP11_AES_WITH_HEADER) {
+ struct ep11kblob_header *kh = (struct ep11kblob_header *)key;
+ int api;
+
+ rc = ep11_check_aes_key_with_hdr(pkey_dbf_info,
+ 3, key, keylen, 1);
+ if (rc)
+ goto out;
+ *keytype = PKEY_TYPE_EP11_AES;
+ *keybitsize = kh->bitlen;
+
+ api = ap_is_se_guest() ? EP11_API_V6 : EP11_API_V4;
+ rc = ep11_findcard2(&apqns, &nr_apqns, *card, *dom,
+ ZCRYPT_CEX7, api,
+ ep11_kb_wkvp(key, keylen));
+ if (rc)
+ goto out;
+
+ *flags = PKEY_FLAGS_MATCH_CUR_MKVP;
+
+ *card = ((struct pkey_apqn *)apqns)->card;
+ *dom = ((struct pkey_apqn *)apqns)->domain;
+
+ } else {
+ /* unknown/unsupported key blob */
+ rc = -EINVAL;
+ }
+
+out:
+ kfree(apqns);
+ pr_debug("rc=%d\n", rc);
+ return rc;
+}
+
+/*
+ * This function provides an alternate but usually slow way
+ * to convert a 'clear key token' with AES key material into
+ * a protected key. That is done via an intermediate step
+ * which creates an EP11 AES secure key first and then derives
+ * the protected key from this secure key.
+ */
+static int ep11_slowpath_key2protkey(const struct pkey_apqn *apqns,
+ size_t nr_apqns,
+ const u8 *key, u32 keylen,
+ u8 *protkey, u32 *protkeylen,
+ u32 *protkeytype)
+{
+ const struct keytoken_header *hdr = (const struct keytoken_header *)key;
+ const struct clearkeytoken *t = (const struct clearkeytoken *)key;
+ u32 tmplen, keysize = 0;
+ u8 *tmpbuf;
+ int i, rc;
+
+ if (keylen < sizeof(*hdr))
+ return -EINVAL;
+
+ if (hdr->type == TOKTYPE_NON_CCA &&
+ hdr->version == TOKVER_CLEAR_KEY)
+ keysize = pkey_keytype_aes_to_size(t->keytype);
+ if (!keysize || t->len != keysize)
+ return -EINVAL;
+
+ /* alloc tmp key buffer */
+ tmpbuf = kmalloc(MAXEP11AESKEYBLOBSIZE, GFP_ATOMIC);
+ if (!tmpbuf)
+ return -ENOMEM;
+
+ /* try two times in case of failure */
+ for (i = 0, rc = -ENODEV; i < 2 && rc; i++) {
+ tmplen = MAXEP11AESKEYBLOBSIZE;
+ rc = ep11_clr2key(NULL, 0, t->keytype, PKEY_TYPE_EP11,
+ 8 * keysize, 0, t->clearkey, t->len,
+ tmpbuf, &tmplen, NULL);
+ pr_debug("ep11_clr2key()=%d\n", rc);
+ if (rc)
+ continue;
+ rc = ep11_key2protkey(NULL, 0, tmpbuf, tmplen,
+ protkey, protkeylen, protkeytype);
+ pr_debug("ep11_key2protkey()=%d\n", rc);
+ }
+
+ kfree(tmpbuf);
+ pr_debug("rc=%d\n", rc);
+ return rc;
+}
+
+static struct pkey_handler ep11_handler = {
+ .module = THIS_MODULE,
+ .name = "PKEY EP11 handler",
+ .is_supported_key = is_ep11_key,
+ .is_supported_keytype = is_ep11_keytype,
+ .key_to_protkey = ep11_key2protkey,
+ .slowpath_key_to_protkey = ep11_slowpath_key2protkey,
+ .gen_key = ep11_gen_key,
+ .clr_to_key = ep11_clr2key,
+ .verify_key = ep11_verifykey,
+ .apqns_for_key = ep11_apqns4key,
+ .apqns_for_keytype = ep11_apqns4type,
+};
+
+/*
+ * Module init
+ */
+static int __init pkey_ep11_init(void)
+{
+ /* register this module as pkey handler for all the ep11 stuff */
+ return pkey_handler_register(&ep11_handler);
+}
+
+/*
+ * Module exit
+ */
+static void __exit pkey_ep11_exit(void)
+{
+ /* unregister this module as pkey handler */
+ pkey_handler_unregister(&ep11_handler);
+}
+
+module_init(pkey_ep11_init);
+module_exit(pkey_ep11_exit);
diff --git a/drivers/s390/crypto/pkey_pckmo.c b/drivers/s390/crypto/pkey_pckmo.c
new file mode 100644
index 000000000000..98079b1ed6db
--- /dev/null
+++ b/drivers/s390/crypto/pkey_pckmo.c
@@ -0,0 +1,557 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * pkey pckmo specific code
+ *
+ * Copyright IBM Corp. 2024
+ */
+
+#define KMSG_COMPONENT "pkey"
+#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
+
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/cpufeature.h>
+#include <asm/cpacf.h>
+#include <crypto/aes.h>
+#include <linux/random.h>
+
+#include "zcrypt_api.h"
+#include "zcrypt_ccamisc.h"
+#include "pkey_base.h"
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("IBM Corporation");
+MODULE_DESCRIPTION("s390 protected key PCKMO handler");
+
+/*
+ * Check key blob for known and supported here.
+ */
+static bool is_pckmo_key(const u8 *key, u32 keylen)
+{
+ struct keytoken_header *hdr = (struct keytoken_header *)key;
+ struct clearkeytoken *t = (struct clearkeytoken *)key;
+
+ if (keylen < sizeof(*hdr))
+ return false;
+
+ switch (hdr->type) {
+ case TOKTYPE_NON_CCA:
+ switch (hdr->version) {
+ case TOKVER_CLEAR_KEY:
+ switch (t->keytype) {
+ case PKEY_KEYTYPE_AES_128:
+ case PKEY_KEYTYPE_AES_192:
+ case PKEY_KEYTYPE_AES_256:
+ case PKEY_KEYTYPE_ECC_P256:
+ case PKEY_KEYTYPE_ECC_P384:
+ case PKEY_KEYTYPE_ECC_P521:
+ case PKEY_KEYTYPE_ECC_ED25519:
+ case PKEY_KEYTYPE_ECC_ED448:
+ case PKEY_KEYTYPE_AES_XTS_128:
+ case PKEY_KEYTYPE_AES_XTS_256:
+ case PKEY_KEYTYPE_HMAC_512:
+ case PKEY_KEYTYPE_HMAC_1024:
+ return true;
+ default:
+ return false;
+ }
+ case TOKVER_PROTECTED_KEY:
+ return true;
+ default:
+ return false;
+ }
+ default:
+ return false;
+ }
+}
+
+static bool is_pckmo_keytype(enum pkey_key_type keytype)
+{
+ switch (keytype) {
+ case PKEY_TYPE_PROTKEY:
+ return true;
+ default:
+ return false;
+ }
+}
+
+/*
+ * Create a protected key from a clear key value via PCKMO instruction.
+ */
+static int pckmo_clr2protkey(u32 keytype, const u8 *clrkey, u32 clrkeylen,
+ u8 *protkey, u32 *protkeylen, u32 *protkeytype)
+{
+ /* mask of available pckmo subfunctions */
+ static cpacf_mask_t pckmo_functions;
+
+ int keysize, rc = -EINVAL;
+ u8 paramblock[160];
+ u32 pkeytype;
+ long fc;
+
+ switch (keytype) {
+ case PKEY_KEYTYPE_AES_128:
+ /* 16 byte key, 32 byte aes wkvp, total 48 bytes */
+ keysize = 16;
+ pkeytype = keytype;
+ fc = CPACF_PCKMO_ENC_AES_128_KEY;
+ break;
+ case PKEY_KEYTYPE_AES_192:
+ /* 24 byte key, 32 byte aes wkvp, total 56 bytes */
+ keysize = 24;
+ pkeytype = keytype;
+ fc = CPACF_PCKMO_ENC_AES_192_KEY;
+ break;
+ case PKEY_KEYTYPE_AES_256:
+ /* 32 byte key, 32 byte aes wkvp, total 64 bytes */
+ keysize = 32;
+ pkeytype = keytype;
+ fc = CPACF_PCKMO_ENC_AES_256_KEY;
+ break;
+ case PKEY_KEYTYPE_ECC_P256:
+ /* 32 byte key, 32 byte aes wkvp, total 64 bytes */
+ keysize = 32;
+ pkeytype = PKEY_KEYTYPE_ECC;
+ fc = CPACF_PCKMO_ENC_ECC_P256_KEY;
+ break;
+ case PKEY_KEYTYPE_ECC_P384:
+ /* 48 byte key, 32 byte aes wkvp, total 80 bytes */
+ keysize = 48;
+ pkeytype = PKEY_KEYTYPE_ECC;
+ fc = CPACF_PCKMO_ENC_ECC_P384_KEY;
+ break;
+ case PKEY_KEYTYPE_ECC_P521:
+ /* 80 byte key, 32 byte aes wkvp, total 112 bytes */
+ keysize = 80;
+ pkeytype = PKEY_KEYTYPE_ECC;
+ fc = CPACF_PCKMO_ENC_ECC_P521_KEY;
+ break;
+ case PKEY_KEYTYPE_ECC_ED25519:
+ /* 32 byte key, 32 byte aes wkvp, total 64 bytes */
+ keysize = 32;
+ pkeytype = PKEY_KEYTYPE_ECC;
+ fc = CPACF_PCKMO_ENC_ECC_ED25519_KEY;
+ break;
+ case PKEY_KEYTYPE_ECC_ED448:
+ /* 64 byte key, 32 byte aes wkvp, total 96 bytes */
+ keysize = 64;
+ pkeytype = PKEY_KEYTYPE_ECC;
+ fc = CPACF_PCKMO_ENC_ECC_ED448_KEY;
+ break;
+ case PKEY_KEYTYPE_AES_XTS_128:
+ /* 2x16 byte keys, 32 byte aes wkvp, total 64 bytes */
+ keysize = 32;
+ pkeytype = PKEY_KEYTYPE_AES_XTS_128;
+ fc = CPACF_PCKMO_ENC_AES_XTS_128_DOUBLE_KEY;
+ break;
+ case PKEY_KEYTYPE_AES_XTS_256:
+ /* 2x32 byte keys, 32 byte aes wkvp, total 96 bytes */
+ keysize = 64;
+ pkeytype = PKEY_KEYTYPE_AES_XTS_256;
+ fc = CPACF_PCKMO_ENC_AES_XTS_256_DOUBLE_KEY;
+ break;
+ case PKEY_KEYTYPE_HMAC_512:
+ /* 64 byte key, 32 byte aes wkvp, total 96 bytes */
+ keysize = 64;
+ pkeytype = PKEY_KEYTYPE_HMAC_512;
+ fc = CPACF_PCKMO_ENC_HMAC_512_KEY;
+ break;
+ case PKEY_KEYTYPE_HMAC_1024:
+ /* 128 byte key, 32 byte aes wkvp, total 160 bytes */
+ keysize = 128;
+ pkeytype = PKEY_KEYTYPE_HMAC_1024;
+ fc = CPACF_PCKMO_ENC_HMAC_1024_KEY;
+ break;
+ default:
+ PKEY_DBF_ERR("%s unknown/unsupported keytype %u\n",
+ __func__, keytype);
+ goto out;
+ }
+
+ if (clrkeylen && clrkeylen < keysize) {
+ PKEY_DBF_ERR("%s clear key size too small: %u < %d\n",
+ __func__, clrkeylen, keysize);
+ goto out;
+ }
+ if (*protkeylen < keysize + AES_WK_VP_SIZE) {
+ PKEY_DBF_ERR("%s prot key buffer size too small: %u < %d\n",
+ __func__, *protkeylen, keysize + AES_WK_VP_SIZE);
+ goto out;
+ }
+
+ /* Did we already check for PCKMO ? */
+ if (!pckmo_functions.bytes[0]) {
+ /* no, so check now */
+ if (!cpacf_query(CPACF_PCKMO, &pckmo_functions)) {
+ PKEY_DBF_ERR("%s cpacf_query() failed\n", __func__);
+ rc = -ENODEV;
+ goto out;
+ }
+ }
+ /* check for the pckmo subfunction we need now */
+ if (!cpacf_test_func(&pckmo_functions, fc)) {
+ PKEY_DBF_ERR("%s pckmo functions not available\n", __func__);
+ rc = -ENODEV;
+ goto out;
+ }
+
+ /* prepare param block */
+ memset(paramblock, 0, sizeof(paramblock));
+ memcpy(paramblock, clrkey, keysize);
+
+ /* call the pckmo instruction */
+ cpacf_pckmo(fc, paramblock);
+
+ /* copy created protected key to key buffer including the wkvp block */
+ *protkeylen = keysize + AES_WK_VP_SIZE;
+ memcpy(protkey, paramblock, *protkeylen);
+ *protkeytype = pkeytype;
+
+ rc = 0;
+
+out:
+ pr_debug("rc=%d\n", rc);
+ return rc;
+}
+
+/*
+ * Verify a raw protected key blob.
+ * Currently only AES protected keys are supported.
+ */
+static int pckmo_verify_protkey(const u8 *protkey, u32 protkeylen,
+ u32 protkeytype)
+{
+ struct {
+ u8 iv[AES_BLOCK_SIZE];
+ u8 key[MAXPROTKEYSIZE];
+ } param;
+ u8 null_msg[AES_BLOCK_SIZE];
+ u8 dest_buf[AES_BLOCK_SIZE];
+ unsigned int k, pkeylen;
+ unsigned long fc;
+ int rc = -EINVAL;
+
+ switch (protkeytype) {
+ case PKEY_KEYTYPE_AES_128:
+ pkeylen = 16 + AES_WK_VP_SIZE;
+ fc = CPACF_KMC_PAES_128;
+ break;
+ case PKEY_KEYTYPE_AES_192:
+ pkeylen = 24 + AES_WK_VP_SIZE;
+ fc = CPACF_KMC_PAES_192;
+ break;
+ case PKEY_KEYTYPE_AES_256:
+ pkeylen = 32 + AES_WK_VP_SIZE;
+ fc = CPACF_KMC_PAES_256;
+ break;
+ default:
+ PKEY_DBF_ERR("%s unknown/unsupported keytype %u\n", __func__,
+ protkeytype);
+ goto out;
+ }
+ if (protkeylen != pkeylen) {
+ PKEY_DBF_ERR("%s invalid protected key size %u for keytype %u\n",
+ __func__, protkeylen, protkeytype);
+ goto out;
+ }
+
+ memset(null_msg, 0, sizeof(null_msg));
+
+ memset(param.iv, 0, sizeof(param.iv));
+ memcpy(param.key, protkey, protkeylen);
+
+ k = cpacf_kmc(fc | CPACF_ENCRYPT, &param, null_msg, dest_buf,
+ sizeof(null_msg));
+ if (k != sizeof(null_msg)) {
+ PKEY_DBF_ERR("%s protected key is not valid\n", __func__);
+ rc = -EKEYREJECTED;
+ goto out;
+ }
+
+ rc = 0;
+
+out:
+ pr_debug("rc=%d\n", rc);
+ return rc;
+}
+
+static int pckmo_key2protkey(const u8 *key, u32 keylen,
+ u8 *protkey, u32 *protkeylen, u32 *protkeytype)
+{
+ struct keytoken_header *hdr = (struct keytoken_header *)key;
+ int rc = -EINVAL;
+
+ if (keylen < sizeof(*hdr))
+ return -EINVAL;
+ if (hdr->type != TOKTYPE_NON_CCA)
+ return -EINVAL;
+
+ switch (hdr->version) {
+ case TOKVER_PROTECTED_KEY: {
+ struct protkeytoken *t = (struct protkeytoken *)key;
+
+ if (keylen < sizeof(*t))
+ goto out;
+ switch (t->keytype) {
+ case PKEY_KEYTYPE_AES_128:
+ case PKEY_KEYTYPE_AES_192:
+ case PKEY_KEYTYPE_AES_256:
+ if (keylen != sizeof(struct protaeskeytoken))
+ goto out;
+ rc = pckmo_verify_protkey(t->protkey, t->len,
+ t->keytype);
+ if (rc)
+ goto out;
+ break;
+ case PKEY_KEYTYPE_AES_XTS_128:
+ if (t->len != 64 || keylen != sizeof(*t) + t->len)
+ goto out;
+ break;
+ case PKEY_KEYTYPE_AES_XTS_256:
+ case PKEY_KEYTYPE_HMAC_512:
+ if (t->len != 96 || keylen != sizeof(*t) + t->len)
+ goto out;
+ break;
+ case PKEY_KEYTYPE_HMAC_1024:
+ if (t->len != 160 || keylen != sizeof(*t) + t->len)
+ goto out;
+ break;
+ default:
+ PKEY_DBF_ERR("%s protected key token: unknown keytype %u\n",
+ __func__, t->keytype);
+ goto out;
+ }
+ memcpy(protkey, t->protkey, t->len);
+ *protkeylen = t->len;
+ *protkeytype = t->keytype;
+ break;
+ }
+ case TOKVER_CLEAR_KEY: {
+ struct clearkeytoken *t = (struct clearkeytoken *)key;
+ u32 keysize = 0;
+
+ if (keylen < sizeof(struct clearkeytoken) ||
+ keylen != sizeof(*t) + t->len)
+ goto out;
+ switch (t->keytype) {
+ case PKEY_KEYTYPE_AES_128:
+ case PKEY_KEYTYPE_AES_192:
+ case PKEY_KEYTYPE_AES_256:
+ keysize = pkey_keytype_aes_to_size(t->keytype);
+ break;
+ case PKEY_KEYTYPE_ECC_P256:
+ keysize = 32;
+ break;
+ case PKEY_KEYTYPE_ECC_P384:
+ keysize = 48;
+ break;
+ case PKEY_KEYTYPE_ECC_P521:
+ keysize = 80;
+ break;
+ case PKEY_KEYTYPE_ECC_ED25519:
+ keysize = 32;
+ break;
+ case PKEY_KEYTYPE_ECC_ED448:
+ keysize = 64;
+ break;
+ case PKEY_KEYTYPE_AES_XTS_128:
+ keysize = 32;
+ break;
+ case PKEY_KEYTYPE_AES_XTS_256:
+ keysize = 64;
+ break;
+ case PKEY_KEYTYPE_HMAC_512:
+ keysize = 64;
+ break;
+ case PKEY_KEYTYPE_HMAC_1024:
+ keysize = 128;
+ break;
+ default:
+ break;
+ }
+ if (!keysize) {
+ PKEY_DBF_ERR("%s clear key token: unknown keytype %u\n",
+ __func__, t->keytype);
+ goto out;
+ }
+ if (t->len != keysize) {
+ PKEY_DBF_ERR("%s clear key token: invalid key len %u\n",
+ __func__, t->len);
+ goto out;
+ }
+ rc = pckmo_clr2protkey(t->keytype, t->clearkey, t->len,
+ protkey, protkeylen, protkeytype);
+ break;
+ }
+ default:
+ PKEY_DBF_ERR("%s unknown non-CCA token version %d\n",
+ __func__, hdr->version);
+ break;
+ }
+
+out:
+ pr_debug("rc=%d\n", rc);
+ return rc;
+}
+
+/*
+ * Generate a random protected key.
+ * Currently only the generation of AES protected keys
+ * is supported.
+ */
+static int pckmo_gen_protkey(u32 keytype, u32 subtype,
+ u8 *protkey, u32 *protkeylen, u32 *protkeytype)
+{
+ u8 clrkey[128];
+ int keysize;
+ int rc;
+
+ switch (keytype) {
+ case PKEY_KEYTYPE_AES_128:
+ case PKEY_KEYTYPE_AES_192:
+ case PKEY_KEYTYPE_AES_256:
+ keysize = pkey_keytype_aes_to_size(keytype);
+ break;
+ case PKEY_KEYTYPE_AES_XTS_128:
+ keysize = 32;
+ break;
+ case PKEY_KEYTYPE_AES_XTS_256:
+ case PKEY_KEYTYPE_HMAC_512:
+ keysize = 64;
+ break;
+ case PKEY_KEYTYPE_HMAC_1024:
+ keysize = 128;
+ break;
+ default:
+ PKEY_DBF_ERR("%s unknown/unsupported keytype %d\n",
+ __func__, keytype);
+ return -EINVAL;
+ }
+ if (subtype != PKEY_TYPE_PROTKEY) {
+ PKEY_DBF_ERR("%s unknown/unsupported subtype %d\n",
+ __func__, subtype);
+ return -EINVAL;
+ }
+
+ /* generate a dummy random clear key */
+ get_random_bytes(clrkey, keysize);
+
+ /* convert it to a dummy protected key */
+ rc = pckmo_clr2protkey(keytype, clrkey, keysize,
+ protkey, protkeylen, protkeytype);
+ if (rc)
+ goto out;
+
+ /* replace the key part of the protected key with random bytes */
+ get_random_bytes(protkey, keysize);
+
+out:
+ pr_debug("rc=%d\n", rc);
+ return rc;
+}
+
+/*
+ * Verify a protected key token blob.
+ * Currently only AES protected keys are supported.
+ */
+static int pckmo_verify_key(const u8 *key, u32 keylen)
+{
+ struct keytoken_header *hdr = (struct keytoken_header *)key;
+ int rc = -EINVAL;
+
+ if (keylen < sizeof(*hdr))
+ return -EINVAL;
+ if (hdr->type != TOKTYPE_NON_CCA)
+ return -EINVAL;
+
+ switch (hdr->version) {
+ case TOKVER_PROTECTED_KEY: {
+ struct protaeskeytoken *t;
+
+ if (keylen != sizeof(struct protaeskeytoken))
+ goto out;
+ t = (struct protaeskeytoken *)key;
+ rc = pckmo_verify_protkey(t->protkey, t->len, t->keytype);
+ break;
+ }
+ default:
+ PKEY_DBF_ERR("%s unknown non-CCA token version %d\n",
+ __func__, hdr->version);
+ break;
+ }
+
+out:
+ pr_debug("rc=%d\n", rc);
+ return rc;
+}
+
+/*
+ * Wrapper functions used for the pkey handler struct
+ */
+
+static int pkey_pckmo_key2protkey(const struct pkey_apqn *_apqns,
+ size_t _nr_apqns,
+ const u8 *key, u32 keylen,
+ u8 *protkey, u32 *protkeylen, u32 *keyinfo)
+{
+ return pckmo_key2protkey(key, keylen,
+ protkey, protkeylen, keyinfo);
+}
+
+static int pkey_pckmo_gen_key(const struct pkey_apqn *_apqns, size_t _nr_apqns,
+ u32 keytype, u32 keysubtype,
+ u32 _keybitsize, u32 _flags,
+ u8 *keybuf, u32 *keybuflen, u32 *keyinfo)
+{
+ return pckmo_gen_protkey(keytype, keysubtype,
+ keybuf, keybuflen, keyinfo);
+}
+
+static int pkey_pckmo_verifykey(const u8 *key, u32 keylen,
+ u16 *_card, u16 *_dom,
+ u32 *_keytype, u32 *_keybitsize, u32 *_flags)
+{
+ return pckmo_verify_key(key, keylen);
+}
+
+static struct pkey_handler pckmo_handler = {
+ .module = THIS_MODULE,
+ .name = "PKEY PCKMO handler",
+ .is_supported_key = is_pckmo_key,
+ .is_supported_keytype = is_pckmo_keytype,
+ .key_to_protkey = pkey_pckmo_key2protkey,
+ .gen_key = pkey_pckmo_gen_key,
+ .verify_key = pkey_pckmo_verifykey,
+};
+
+/*
+ * Module init
+ */
+static int __init pkey_pckmo_init(void)
+{
+ cpacf_mask_t func_mask;
+
+ /*
+ * The pckmo instruction should be available - even if we don't
+ * actually invoke it. This instruction comes with MSA 3 which
+ * is also the minimum level for the kmc instructions which
+ * are able to work with protected keys.
+ */
+ if (!cpacf_query(CPACF_PCKMO, &func_mask))
+ return -ENODEV;
+
+ /* register this module as pkey handler for all the pckmo stuff */
+ return pkey_handler_register(&pckmo_handler);
+}
+
+/*
+ * Module exit
+ */
+static void __exit pkey_pckmo_exit(void)
+{
+ /* unregister this module as pkey handler */
+ pkey_handler_unregister(&pckmo_handler);
+}
+
+module_cpu_feature_match(S390_CPU_FEATURE_MSA, pkey_pckmo_init);
+module_exit(pkey_pckmo_exit);
diff --git a/drivers/s390/crypto/pkey_sysfs.c b/drivers/s390/crypto/pkey_sysfs.c
new file mode 100644
index 000000000000..cc0fc1e264bd
--- /dev/null
+++ b/drivers/s390/crypto/pkey_sysfs.c
@@ -0,0 +1,648 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * pkey module sysfs related functions
+ *
+ * Copyright IBM Corp. 2024
+ */
+
+#define KMSG_COMPONENT "pkey"
+#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
+
+#include <linux/sysfs.h>
+
+#include "zcrypt_api.h"
+#include "zcrypt_ccamisc.h"
+#include "zcrypt_ep11misc.h"
+
+#include "pkey_base.h"
+
+/*
+ * Wrapper around pkey_handler_gen_key() which deals with the
+ * ENODEV return code and then tries to enforce a pkey handler
+ * module load.
+ */
+static int sys_pkey_handler_gen_key(u32 keytype, u32 keysubtype,
+ u32 keybitsize, u32 flags,
+ u8 *keybuf, u32 *keybuflen, u32 *keyinfo)
+{
+ int rc;
+
+ rc = pkey_handler_gen_key(NULL, 0,
+ keytype, keysubtype,
+ keybitsize, flags,
+ keybuf, keybuflen, keyinfo);
+ if (rc == -ENODEV) {
+ pkey_handler_request_modules();
+ rc = pkey_handler_gen_key(NULL, 0,
+ keytype, keysubtype,
+ keybitsize, flags,
+ keybuf, keybuflen, keyinfo);
+ }
+
+ return rc;
+}
+
+/*
+ * Sysfs attribute read function for all protected key binary attributes.
+ * The implementation can not deal with partial reads, because a new random
+ * protected key blob is generated with each read. In case of partial reads
+ * (i.e. off != 0 or count < key blob size) -EINVAL is returned.
+ */
+static ssize_t pkey_protkey_aes_attr_read(u32 keytype, bool is_xts, char *buf,
+ loff_t off, size_t count)
+{
+ struct protaeskeytoken protkeytoken;
+ struct pkey_protkey protkey;
+ int rc;
+
+ if (off != 0 || count < sizeof(protkeytoken))
+ return -EINVAL;
+ if (is_xts)
+ if (count < 2 * sizeof(protkeytoken))
+ return -EINVAL;
+
+ memset(&protkeytoken, 0, sizeof(protkeytoken));
+ protkeytoken.type = TOKTYPE_NON_CCA;
+ protkeytoken.version = TOKVER_PROTECTED_KEY;
+ protkeytoken.keytype = keytype;
+
+ protkey.len = sizeof(protkey.protkey);
+ rc = sys_pkey_handler_gen_key(keytype, PKEY_TYPE_PROTKEY, 0, 0,
+ protkey.protkey, &protkey.len,
+ &protkey.type);
+ if (rc)
+ return rc;
+
+ protkeytoken.len = protkey.len;
+ memcpy(&protkeytoken.protkey, &protkey.protkey, protkey.len);
+
+ memcpy(buf, &protkeytoken, sizeof(protkeytoken));
+
+ if (is_xts) {
+ /* xts needs a second protected key, reuse protkey struct */
+ protkey.len = sizeof(protkey.protkey);
+ rc = sys_pkey_handler_gen_key(keytype, PKEY_TYPE_PROTKEY, 0, 0,
+ protkey.protkey, &protkey.len,
+ &protkey.type);
+ if (rc)
+ return rc;
+
+ protkeytoken.len = protkey.len;
+ memcpy(&protkeytoken.protkey, &protkey.protkey, protkey.len);
+
+ memcpy(buf + sizeof(protkeytoken), &protkeytoken,
+ sizeof(protkeytoken));
+
+ return 2 * sizeof(protkeytoken);
+ }
+
+ return sizeof(protkeytoken);
+}
+
+/*
+ * Sysfs attribute read function for the AES XTS prot key binary attributes.
+ * The implementation can not deal with partial reads, because a new random
+ * protected key blob is generated with each read. In case of partial reads
+ * (i.e. off != 0 or count < key blob size) -EINVAL is returned.
+ */
+static ssize_t pkey_protkey_aes_xts_attr_read(u32 keytype, char *buf,
+ loff_t off, size_t count)
+{
+ struct protkeytoken *t = (struct protkeytoken *)buf;
+ u32 protlen, prottype;
+ int rc;
+
+ switch (keytype) {
+ case PKEY_KEYTYPE_AES_XTS_128:
+ protlen = 64;
+ break;
+ case PKEY_KEYTYPE_AES_XTS_256:
+ protlen = 96;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ if (off != 0 || count < sizeof(*t) + protlen)
+ return -EINVAL;
+
+ memset(t, 0, sizeof(*t) + protlen);
+ t->type = TOKTYPE_NON_CCA;
+ t->version = TOKVER_PROTECTED_KEY;
+ t->keytype = keytype;
+
+ rc = sys_pkey_handler_gen_key(keytype, PKEY_TYPE_PROTKEY, 0, 0,
+ t->protkey, &protlen, &prottype);
+ if (rc)
+ return rc;
+
+ t->len = protlen;
+
+ return sizeof(*t) + protlen;
+}
+
+/*
+ * Sysfs attribute read function for the HMAC prot key binary attributes.
+ * The implementation can not deal with partial reads, because a new random
+ * protected key blob is generated with each read. In case of partial reads
+ * (i.e. off != 0 or count < key blob size) -EINVAL is returned.
+ */
+static ssize_t pkey_protkey_hmac_attr_read(u32 keytype, char *buf,
+ loff_t off, size_t count)
+{
+ struct protkeytoken *t = (struct protkeytoken *)buf;
+ u32 protlen, prottype;
+ int rc;
+
+ switch (keytype) {
+ case PKEY_KEYTYPE_HMAC_512:
+ protlen = 96;
+ break;
+ case PKEY_KEYTYPE_HMAC_1024:
+ protlen = 160;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ if (off != 0 || count < sizeof(*t) + protlen)
+ return -EINVAL;
+
+ memset(t, 0, sizeof(*t) + protlen);
+ t->type = TOKTYPE_NON_CCA;
+ t->version = TOKVER_PROTECTED_KEY;
+ t->keytype = keytype;
+
+ rc = sys_pkey_handler_gen_key(keytype, PKEY_TYPE_PROTKEY, 0, 0,
+ t->protkey, &protlen, &prottype);
+ if (rc)
+ return rc;
+
+ t->len = protlen;
+
+ return sizeof(*t) + protlen;
+}
+
+static ssize_t protkey_aes_128_read(struct file *filp,
+ struct kobject *kobj,
+ struct bin_attribute *attr,
+ char *buf, loff_t off,
+ size_t count)
+{
+ return pkey_protkey_aes_attr_read(PKEY_KEYTYPE_AES_128, false, buf,
+ off, count);
+}
+
+static ssize_t protkey_aes_192_read(struct file *filp,
+ struct kobject *kobj,
+ struct bin_attribute *attr,
+ char *buf, loff_t off,
+ size_t count)
+{
+ return pkey_protkey_aes_attr_read(PKEY_KEYTYPE_AES_192, false, buf,
+ off, count);
+}
+
+static ssize_t protkey_aes_256_read(struct file *filp,
+ struct kobject *kobj,
+ struct bin_attribute *attr,
+ char *buf, loff_t off,
+ size_t count)
+{
+ return pkey_protkey_aes_attr_read(PKEY_KEYTYPE_AES_256, false, buf,
+ off, count);
+}
+
+static ssize_t protkey_aes_128_xts_read(struct file *filp,
+ struct kobject *kobj,
+ struct bin_attribute *attr,
+ char *buf, loff_t off,
+ size_t count)
+{
+ return pkey_protkey_aes_attr_read(PKEY_KEYTYPE_AES_128, true, buf,
+ off, count);
+}
+
+static ssize_t protkey_aes_256_xts_read(struct file *filp,
+ struct kobject *kobj,
+ struct bin_attribute *attr,
+ char *buf, loff_t off,
+ size_t count)
+{
+ return pkey_protkey_aes_attr_read(PKEY_KEYTYPE_AES_256, true, buf,
+ off, count);
+}
+
+static ssize_t protkey_aes_xts_128_read(struct file *filp,
+ struct kobject *kobj,
+ struct bin_attribute *attr,
+ char *buf, loff_t off,
+ size_t count)
+{
+ return pkey_protkey_aes_xts_attr_read(PKEY_KEYTYPE_AES_XTS_128,
+ buf, off, count);
+}
+
+static ssize_t protkey_aes_xts_256_read(struct file *filp,
+ struct kobject *kobj,
+ struct bin_attribute *attr,
+ char *buf, loff_t off,
+ size_t count)
+{
+ return pkey_protkey_aes_xts_attr_read(PKEY_KEYTYPE_AES_XTS_256,
+ buf, off, count);
+}
+
+static ssize_t protkey_hmac_512_read(struct file *filp,
+ struct kobject *kobj,
+ struct bin_attribute *attr,
+ char *buf, loff_t off,
+ size_t count)
+{
+ return pkey_protkey_hmac_attr_read(PKEY_KEYTYPE_HMAC_512,
+ buf, off, count);
+}
+
+static ssize_t protkey_hmac_1024_read(struct file *filp,
+ struct kobject *kobj,
+ struct bin_attribute *attr,
+ char *buf, loff_t off,
+ size_t count)
+{
+ return pkey_protkey_hmac_attr_read(PKEY_KEYTYPE_HMAC_1024,
+ buf, off, count);
+}
+
+static BIN_ATTR_RO(protkey_aes_128, sizeof(struct protaeskeytoken));
+static BIN_ATTR_RO(protkey_aes_192, sizeof(struct protaeskeytoken));
+static BIN_ATTR_RO(protkey_aes_256, sizeof(struct protaeskeytoken));
+static BIN_ATTR_RO(protkey_aes_128_xts, 2 * sizeof(struct protaeskeytoken));
+static BIN_ATTR_RO(protkey_aes_256_xts, 2 * sizeof(struct protaeskeytoken));
+static BIN_ATTR_RO(protkey_aes_xts_128, sizeof(struct protkeytoken) + 64);
+static BIN_ATTR_RO(protkey_aes_xts_256, sizeof(struct protkeytoken) + 96);
+static BIN_ATTR_RO(protkey_hmac_512, sizeof(struct protkeytoken) + 96);
+static BIN_ATTR_RO(protkey_hmac_1024, sizeof(struct protkeytoken) + 160);
+
+static struct bin_attribute *protkey_attrs[] = {
+ &bin_attr_protkey_aes_128,
+ &bin_attr_protkey_aes_192,
+ &bin_attr_protkey_aes_256,
+ &bin_attr_protkey_aes_128_xts,
+ &bin_attr_protkey_aes_256_xts,
+ &bin_attr_protkey_aes_xts_128,
+ &bin_attr_protkey_aes_xts_256,
+ &bin_attr_protkey_hmac_512,
+ &bin_attr_protkey_hmac_1024,
+ NULL
+};
+
+static struct attribute_group protkey_attr_group = {
+ .name = "protkey",
+ .bin_attrs = protkey_attrs,
+};
+
+/*
+ * Sysfs attribute read function for all secure key ccadata binary attributes.
+ * The implementation can not deal with partial reads, because a new random
+ * protected key blob is generated with each read. In case of partial reads
+ * (i.e. off != 0 or count < key blob size) -EINVAL is returned.
+ */
+static ssize_t pkey_ccadata_aes_attr_read(u32 keytype, bool is_xts, char *buf,
+ loff_t off, size_t count)
+{
+ struct pkey_seckey *seckey = (struct pkey_seckey *)buf;
+ u32 buflen;
+ int rc;
+
+ if (off != 0 || count < sizeof(struct secaeskeytoken))
+ return -EINVAL;
+ if (is_xts)
+ if (count < 2 * sizeof(struct secaeskeytoken))
+ return -EINVAL;
+
+ buflen = sizeof(seckey->seckey);
+ rc = sys_pkey_handler_gen_key(keytype, PKEY_TYPE_CCA_DATA, 0, 0,
+ seckey->seckey, &buflen, NULL);
+ if (rc)
+ return rc;
+
+ if (is_xts) {
+ seckey++;
+ buflen = sizeof(seckey->seckey);
+ rc = sys_pkey_handler_gen_key(keytype, PKEY_TYPE_CCA_DATA, 0, 0,
+ seckey->seckey, &buflen, NULL);
+ if (rc)
+ return rc;
+
+ return 2 * sizeof(struct secaeskeytoken);
+ }
+
+ return sizeof(struct secaeskeytoken);
+}
+
+static ssize_t ccadata_aes_128_read(struct file *filp,
+ struct kobject *kobj,
+ struct bin_attribute *attr,
+ char *buf, loff_t off,
+ size_t count)
+{
+ return pkey_ccadata_aes_attr_read(PKEY_KEYTYPE_AES_128, false, buf,
+ off, count);
+}
+
+static ssize_t ccadata_aes_192_read(struct file *filp,
+ struct kobject *kobj,
+ struct bin_attribute *attr,
+ char *buf, loff_t off,
+ size_t count)
+{
+ return pkey_ccadata_aes_attr_read(PKEY_KEYTYPE_AES_192, false, buf,
+ off, count);
+}
+
+static ssize_t ccadata_aes_256_read(struct file *filp,
+ struct kobject *kobj,
+ struct bin_attribute *attr,
+ char *buf, loff_t off,
+ size_t count)
+{
+ return pkey_ccadata_aes_attr_read(PKEY_KEYTYPE_AES_256, false, buf,
+ off, count);
+}
+
+static ssize_t ccadata_aes_128_xts_read(struct file *filp,
+ struct kobject *kobj,
+ struct bin_attribute *attr,
+ char *buf, loff_t off,
+ size_t count)
+{
+ return pkey_ccadata_aes_attr_read(PKEY_KEYTYPE_AES_128, true, buf,
+ off, count);
+}
+
+static ssize_t ccadata_aes_256_xts_read(struct file *filp,
+ struct kobject *kobj,
+ struct bin_attribute *attr,
+ char *buf, loff_t off,
+ size_t count)
+{
+ return pkey_ccadata_aes_attr_read(PKEY_KEYTYPE_AES_256, true, buf,
+ off, count);
+}
+
+static BIN_ATTR_RO(ccadata_aes_128, sizeof(struct secaeskeytoken));
+static BIN_ATTR_RO(ccadata_aes_192, sizeof(struct secaeskeytoken));
+static BIN_ATTR_RO(ccadata_aes_256, sizeof(struct secaeskeytoken));
+static BIN_ATTR_RO(ccadata_aes_128_xts, 2 * sizeof(struct secaeskeytoken));
+static BIN_ATTR_RO(ccadata_aes_256_xts, 2 * sizeof(struct secaeskeytoken));
+
+static struct bin_attribute *ccadata_attrs[] = {
+ &bin_attr_ccadata_aes_128,
+ &bin_attr_ccadata_aes_192,
+ &bin_attr_ccadata_aes_256,
+ &bin_attr_ccadata_aes_128_xts,
+ &bin_attr_ccadata_aes_256_xts,
+ NULL
+};
+
+static struct attribute_group ccadata_attr_group = {
+ .name = "ccadata",
+ .bin_attrs = ccadata_attrs,
+};
+
+#define CCACIPHERTOKENSIZE (sizeof(struct cipherkeytoken) + 80)
+
+/*
+ * Sysfs attribute read function for all secure key ccacipher binary attributes.
+ * The implementation can not deal with partial reads, because a new random
+ * secure key blob is generated with each read. In case of partial reads
+ * (i.e. off != 0 or count < key blob size) -EINVAL is returned.
+ */
+static ssize_t pkey_ccacipher_aes_attr_read(enum pkey_key_size keybits,
+ bool is_xts, char *buf, loff_t off,
+ size_t count)
+{
+ u32 keysize = CCACIPHERTOKENSIZE;
+ int rc;
+
+ if (off != 0 || count < CCACIPHERTOKENSIZE)
+ return -EINVAL;
+ if (is_xts)
+ if (count < 2 * CCACIPHERTOKENSIZE)
+ return -EINVAL;
+
+ memset(buf, 0, is_xts ? 2 * keysize : keysize);
+
+ rc = sys_pkey_handler_gen_key(pkey_aes_bitsize_to_keytype(keybits),
+ PKEY_TYPE_CCA_CIPHER, keybits, 0,
+ buf, &keysize, NULL);
+ if (rc)
+ return rc;
+
+ if (is_xts) {
+ keysize = CCACIPHERTOKENSIZE;
+ buf += CCACIPHERTOKENSIZE;
+ rc = sys_pkey_handler_gen_key(
+ pkey_aes_bitsize_to_keytype(keybits),
+ PKEY_TYPE_CCA_CIPHER, keybits, 0,
+ buf, &keysize, NULL);
+ if (rc)
+ return rc;
+ return 2 * CCACIPHERTOKENSIZE;
+ }
+
+ return CCACIPHERTOKENSIZE;
+}
+
+static ssize_t ccacipher_aes_128_read(struct file *filp,
+ struct kobject *kobj,
+ struct bin_attribute *attr,
+ char *buf, loff_t off,
+ size_t count)
+{
+ return pkey_ccacipher_aes_attr_read(PKEY_SIZE_AES_128, false, buf,
+ off, count);
+}
+
+static ssize_t ccacipher_aes_192_read(struct file *filp,
+ struct kobject *kobj,
+ struct bin_attribute *attr,
+ char *buf, loff_t off,
+ size_t count)
+{
+ return pkey_ccacipher_aes_attr_read(PKEY_SIZE_AES_192, false, buf,
+ off, count);
+}
+
+static ssize_t ccacipher_aes_256_read(struct file *filp,
+ struct kobject *kobj,
+ struct bin_attribute *attr,
+ char *buf, loff_t off,
+ size_t count)
+{
+ return pkey_ccacipher_aes_attr_read(PKEY_SIZE_AES_256, false, buf,
+ off, count);
+}
+
+static ssize_t ccacipher_aes_128_xts_read(struct file *filp,
+ struct kobject *kobj,
+ struct bin_attribute *attr,
+ char *buf, loff_t off,
+ size_t count)
+{
+ return pkey_ccacipher_aes_attr_read(PKEY_SIZE_AES_128, true, buf,
+ off, count);
+}
+
+static ssize_t ccacipher_aes_256_xts_read(struct file *filp,
+ struct kobject *kobj,
+ struct bin_attribute *attr,
+ char *buf, loff_t off,
+ size_t count)
+{
+ return pkey_ccacipher_aes_attr_read(PKEY_SIZE_AES_256, true, buf,
+ off, count);
+}
+
+static BIN_ATTR_RO(ccacipher_aes_128, CCACIPHERTOKENSIZE);
+static BIN_ATTR_RO(ccacipher_aes_192, CCACIPHERTOKENSIZE);
+static BIN_ATTR_RO(ccacipher_aes_256, CCACIPHERTOKENSIZE);
+static BIN_ATTR_RO(ccacipher_aes_128_xts, 2 * CCACIPHERTOKENSIZE);
+static BIN_ATTR_RO(ccacipher_aes_256_xts, 2 * CCACIPHERTOKENSIZE);
+
+static struct bin_attribute *ccacipher_attrs[] = {
+ &bin_attr_ccacipher_aes_128,
+ &bin_attr_ccacipher_aes_192,
+ &bin_attr_ccacipher_aes_256,
+ &bin_attr_ccacipher_aes_128_xts,
+ &bin_attr_ccacipher_aes_256_xts,
+ NULL
+};
+
+static struct attribute_group ccacipher_attr_group = {
+ .name = "ccacipher",
+ .bin_attrs = ccacipher_attrs,
+};
+
+/*
+ * Sysfs attribute read function for all ep11 aes key binary attributes.
+ * The implementation can not deal with partial reads, because a new random
+ * secure key blob is generated with each read. In case of partial reads
+ * (i.e. off != 0 or count < key blob size) -EINVAL is returned.
+ * This function and the sysfs attributes using it provide EP11 key blobs
+ * padded to the upper limit of MAXEP11AESKEYBLOBSIZE which is currently
+ * 336 bytes.
+ */
+static ssize_t pkey_ep11_aes_attr_read(enum pkey_key_size keybits,
+ bool is_xts, char *buf, loff_t off,
+ size_t count)
+{
+ u32 keysize = MAXEP11AESKEYBLOBSIZE;
+ int rc;
+
+ if (off != 0 || count < MAXEP11AESKEYBLOBSIZE)
+ return -EINVAL;
+ if (is_xts)
+ if (count < 2 * MAXEP11AESKEYBLOBSIZE)
+ return -EINVAL;
+
+ memset(buf, 0, is_xts ? 2 * keysize : keysize);
+
+ rc = sys_pkey_handler_gen_key(pkey_aes_bitsize_to_keytype(keybits),
+ PKEY_TYPE_EP11_AES, keybits, 0,
+ buf, &keysize, NULL);
+ if (rc)
+ return rc;
+
+ if (is_xts) {
+ keysize = MAXEP11AESKEYBLOBSIZE;
+ buf += MAXEP11AESKEYBLOBSIZE;
+ rc = sys_pkey_handler_gen_key(
+ pkey_aes_bitsize_to_keytype(keybits),
+ PKEY_TYPE_EP11_AES, keybits, 0,
+ buf, &keysize, NULL);
+ if (rc)
+ return rc;
+ return 2 * MAXEP11AESKEYBLOBSIZE;
+ }
+
+ return MAXEP11AESKEYBLOBSIZE;
+}
+
+static ssize_t ep11_aes_128_read(struct file *filp,
+ struct kobject *kobj,
+ struct bin_attribute *attr,
+ char *buf, loff_t off,
+ size_t count)
+{
+ return pkey_ep11_aes_attr_read(PKEY_SIZE_AES_128, false, buf,
+ off, count);
+}
+
+static ssize_t ep11_aes_192_read(struct file *filp,
+ struct kobject *kobj,
+ struct bin_attribute *attr,
+ char *buf, loff_t off,
+ size_t count)
+{
+ return pkey_ep11_aes_attr_read(PKEY_SIZE_AES_192, false, buf,
+ off, count);
+}
+
+static ssize_t ep11_aes_256_read(struct file *filp,
+ struct kobject *kobj,
+ struct bin_attribute *attr,
+ char *buf, loff_t off,
+ size_t count)
+{
+ return pkey_ep11_aes_attr_read(PKEY_SIZE_AES_256, false, buf,
+ off, count);
+}
+
+static ssize_t ep11_aes_128_xts_read(struct file *filp,
+ struct kobject *kobj,
+ struct bin_attribute *attr,
+ char *buf, loff_t off,
+ size_t count)
+{
+ return pkey_ep11_aes_attr_read(PKEY_SIZE_AES_128, true, buf,
+ off, count);
+}
+
+static ssize_t ep11_aes_256_xts_read(struct file *filp,
+ struct kobject *kobj,
+ struct bin_attribute *attr,
+ char *buf, loff_t off,
+ size_t count)
+{
+ return pkey_ep11_aes_attr_read(PKEY_SIZE_AES_256, true, buf,
+ off, count);
+}
+
+static BIN_ATTR_RO(ep11_aes_128, MAXEP11AESKEYBLOBSIZE);
+static BIN_ATTR_RO(ep11_aes_192, MAXEP11AESKEYBLOBSIZE);
+static BIN_ATTR_RO(ep11_aes_256, MAXEP11AESKEYBLOBSIZE);
+static BIN_ATTR_RO(ep11_aes_128_xts, 2 * MAXEP11AESKEYBLOBSIZE);
+static BIN_ATTR_RO(ep11_aes_256_xts, 2 * MAXEP11AESKEYBLOBSIZE);
+
+static struct bin_attribute *ep11_attrs[] = {
+ &bin_attr_ep11_aes_128,
+ &bin_attr_ep11_aes_192,
+ &bin_attr_ep11_aes_256,
+ &bin_attr_ep11_aes_128_xts,
+ &bin_attr_ep11_aes_256_xts,
+ NULL
+};
+
+static struct attribute_group ep11_attr_group = {
+ .name = "ep11",
+ .bin_attrs = ep11_attrs,
+};
+
+const struct attribute_group *pkey_attr_groups[] = {
+ &protkey_attr_group,
+ &ccadata_attr_group,
+ &ccacipher_attr_group,
+ &ep11_attr_group,
+ NULL,
+};
diff --git a/drivers/s390/crypto/vfio_ap_drv.c b/drivers/s390/crypto/vfio_ap_drv.c
index 4aeb3e1213c7..67a807e2e75b 100644
--- a/drivers/s390/crypto/vfio_ap_drv.c
+++ b/drivers/s390/crypto/vfio_ap_drv.c
@@ -26,6 +26,18 @@ MODULE_LICENSE("GPL v2");
struct ap_matrix_dev *matrix_dev;
debug_info_t *vfio_ap_dbf_info;
+static ssize_t features_show(struct device *dev, struct device_attribute *attr, char *buf)
+{
+ return sysfs_emit(buf, "guest_matrix hotplug ap_config\n");
+}
+static DEVICE_ATTR_RO(features);
+
+static struct attribute *matrix_dev_attrs[] = {
+ &dev_attr_features.attr,
+ NULL,
+};
+ATTRIBUTE_GROUPS(matrix_dev);
+
/* Only type 10 adapters (CEX4 and later) are supported
* by the AP matrix device driver
*/
@@ -68,6 +80,7 @@ static struct device_driver matrix_driver = {
.name = "vfio_ap",
.bus = &matrix_bus,
.suppress_bind_attrs = true,
+ .dev_groups = matrix_dev_groups,
};
static int vfio_ap_matrix_dev_create(void)
diff --git a/drivers/s390/crypto/zcrypt_api.c b/drivers/s390/crypto/zcrypt_api.c
index 74036886ca87..5020696f1379 100644
--- a/drivers/s390/crypto/zcrypt_api.c
+++ b/drivers/s390/crypto/zcrypt_api.c
@@ -715,7 +715,7 @@ static long zcrypt_rsa_modexpo(struct ap_perms *perms,
spin_unlock(&zcrypt_list_lock);
if (!pref_zq) {
- pr_debug("%s no matching queue found => ENODEV\n", __func__);
+ pr_debug("no matching queue found => ENODEV\n");
rc = -ENODEV;
goto out;
}
@@ -819,7 +819,7 @@ static long zcrypt_rsa_crt(struct ap_perms *perms,
spin_unlock(&zcrypt_list_lock);
if (!pref_zq) {
- pr_debug("%s no matching queue found => ENODEV\n", __func__);
+ pr_debug("no matching queue found => ENODEV\n");
rc = -ENODEV;
goto out;
}
@@ -940,8 +940,8 @@ static long _zcrypt_send_cprb(bool userspace, struct ap_perms *perms,
spin_unlock(&zcrypt_list_lock);
if (!pref_zq) {
- pr_debug("%s no match for address %02x.%04x => ENODEV\n",
- __func__, xcrb->user_defined, *domain);
+ pr_debug("no match for address %02x.%04x => ENODEV\n",
+ xcrb->user_defined, *domain);
rc = -ENODEV;
goto out;
}
@@ -991,7 +991,7 @@ long zcrypt_send_cprb(struct ica_xcRB *xcrb)
if (rc == -EAGAIN && tr.again_counter >= TRACK_AGAIN_MAX)
rc = -EIO;
if (rc)
- pr_debug("%s rc=%d\n", __func__, rc);
+ pr_debug("rc=%d\n", rc);
return rc;
}
@@ -1138,15 +1138,13 @@ static long _zcrypt_send_ep11_cprb(bool userspace, struct ap_perms *perms,
if (!pref_zq) {
if (targets && target_num == 1) {
- pr_debug("%s no match for address %02x.%04x => ENODEV\n",
- __func__, (int)targets->ap_id,
- (int)targets->dom_id);
+ pr_debug("no match for address %02x.%04x => ENODEV\n",
+ (int)targets->ap_id, (int)targets->dom_id);
} else if (targets) {
- pr_debug("%s no match for %d target addrs => ENODEV\n",
- __func__, (int)target_num);
+ pr_debug("no match for %d target addrs => ENODEV\n",
+ (int)target_num);
} else {
- pr_debug("%s no match for address ff.ffff => ENODEV\n",
- __func__);
+ pr_debug("no match for address ff.ffff => ENODEV\n");
}
rc = -ENODEV;
goto out_free;
@@ -1195,7 +1193,7 @@ long zcrypt_send_ep11_cprb(struct ep11_urb *xcrb)
if (rc == -EAGAIN && tr.again_counter >= TRACK_AGAIN_MAX)
rc = -EIO;
if (rc)
- pr_debug("%s rc=%d\n", __func__, rc);
+ pr_debug("rc=%d\n", rc);
return rc;
}
@@ -1247,7 +1245,7 @@ static long zcrypt_rng(char *buffer)
spin_unlock(&zcrypt_list_lock);
if (!pref_zq) {
- pr_debug("%s no matching queue found => ENODEV\n", __func__);
+ pr_debug("no matching queue found => ENODEV\n");
rc = -ENODEV;
goto out;
}
@@ -1910,7 +1908,6 @@ static const struct file_operations zcrypt_fops = {
#endif
.open = zcrypt_open,
.release = zcrypt_release,
- .llseek = no_llseek,
};
/*
@@ -2037,8 +2034,7 @@ int zcrypt_wait_api_operational(void)
break;
default:
/* other failure */
- pr_debug("%s ap_wait_init_apqn_bindings_complete()=%d\n",
- __func__, rc);
+ pr_debug("ap_wait_init_apqn_bindings_complete()=%d\n", rc);
break;
}
break;
diff --git a/drivers/s390/crypto/zcrypt_ccamisc.c b/drivers/s390/crypto/zcrypt_ccamisc.c
index 7bef2cc4e461..43a27cb3db84 100644
--- a/drivers/s390/crypto/zcrypt_ccamisc.c
+++ b/drivers/s390/crypto/zcrypt_ccamisc.c
@@ -172,7 +172,7 @@ EXPORT_SYMBOL(cca_check_secaescipherkey);
* key token. Returns 0 on success or errno value on failure.
*/
int cca_check_sececckeytoken(debug_info_t *dbg, int dbflvl,
- const u8 *token, size_t keysize,
+ const u8 *token, u32 keysize,
int checkcpacfexport)
{
struct eccprivkeytoken *t = (struct eccprivkeytoken *)token;
@@ -187,7 +187,7 @@ int cca_check_sececckeytoken(debug_info_t *dbg, int dbflvl,
}
if (t->len > keysize) {
if (dbg)
- DBF("%s token check failed, len %d > keysize %zu\n",
+ DBF("%s token check failed, len %d > keysize %u\n",
__func__, (int)t->len, keysize);
return -EINVAL;
}
@@ -737,7 +737,7 @@ static const u8 aes_cipher_key_skeleton[] = {
* Generate (random) CCA AES CIPHER secure key.
*/
int cca_gencipherkey(u16 cardnr, u16 domain, u32 keybitsize, u32 keygenflags,
- u8 *keybuf, size_t *keybufsize)
+ u8 *keybuf, u32 *keybufsize)
{
int rc;
u8 *mem, *ptr;
@@ -1085,7 +1085,7 @@ out:
* Build CCA AES CIPHER secure key with a given clear key value.
*/
int cca_clr2cipherkey(u16 card, u16 dom, u32 keybitsize, u32 keygenflags,
- const u8 *clrkey, u8 *keybuf, size_t *keybufsize)
+ const u8 *clrkey, u8 *keybuf, u32 *keybufsize)
{
int rc;
u8 *token;
diff --git a/drivers/s390/crypto/zcrypt_ccamisc.h b/drivers/s390/crypto/zcrypt_ccamisc.h
index 5ddf02f965f9..aed7e8384542 100644
--- a/drivers/s390/crypto/zcrypt_ccamisc.h
+++ b/drivers/s390/crypto/zcrypt_ccamisc.h
@@ -153,7 +153,7 @@ int cca_check_secaescipherkey(debug_info_t *dbg, int dbflvl,
* key token. Returns 0 on success or errno value on failure.
*/
int cca_check_sececckeytoken(debug_info_t *dbg, int dbflvl,
- const u8 *token, size_t keysize,
+ const u8 *token, u32 keysize,
int checkcpacfexport);
/*
@@ -178,7 +178,7 @@ int cca_sec2protkey(u16 cardnr, u16 domain,
* Generate (random) CCA AES CIPHER secure key.
*/
int cca_gencipherkey(u16 cardnr, u16 domain, u32 keybitsize, u32 keygenflags,
- u8 *keybuf, size_t *keybufsize);
+ u8 *keybuf, u32 *keybufsize);
/*
* Derive proteced key from CCA AES cipher secure key.
@@ -190,7 +190,7 @@ int cca_cipher2protkey(u16 cardnr, u16 domain, const u8 *ckey,
* Build CCA AES CIPHER secure key with a given clear key value.
*/
int cca_clr2cipherkey(u16 cardnr, u16 domain, u32 keybitsize, u32 keygenflags,
- const u8 *clrkey, u8 *keybuf, size_t *keybufsize);
+ const u8 *clrkey, u8 *keybuf, u32 *keybufsize);
/*
* Derive proteced key from CCA ECC secure private key.
diff --git a/drivers/s390/crypto/zcrypt_ep11misc.c b/drivers/s390/crypto/zcrypt_ep11misc.c
index b43db17a4e0e..cb7e6da43602 100644
--- a/drivers/s390/crypto/zcrypt_ep11misc.c
+++ b/drivers/s390/crypto/zcrypt_ep11misc.c
@@ -203,7 +203,7 @@ out:
* For valid ep11 keyblobs, returns a reference to the wrappingkey verification
* pattern. Otherwise NULL.
*/
-const u8 *ep11_kb_wkvp(const u8 *keyblob, size_t keybloblen)
+const u8 *ep11_kb_wkvp(const u8 *keyblob, u32 keybloblen)
{
struct ep11keyblob *kb;
@@ -217,7 +217,7 @@ EXPORT_SYMBOL(ep11_kb_wkvp);
* Simple check if the key blob is a valid EP11 AES key blob with header.
*/
int ep11_check_aes_key_with_hdr(debug_info_t *dbg, int dbflvl,
- const u8 *key, size_t keylen, int checkcpacfexp)
+ const u8 *key, u32 keylen, int checkcpacfexp)
{
struct ep11kblob_header *hdr = (struct ep11kblob_header *)key;
struct ep11keyblob *kb = (struct ep11keyblob *)(key + sizeof(*hdr));
@@ -225,7 +225,7 @@ int ep11_check_aes_key_with_hdr(debug_info_t *dbg, int dbflvl,
#define DBF(...) debug_sprintf_event(dbg, dbflvl, ##__VA_ARGS__)
if (keylen < sizeof(*hdr) + sizeof(*kb)) {
- DBF("%s key check failed, keylen %zu < %zu\n",
+ DBF("%s key check failed, keylen %u < %zu\n",
__func__, keylen, sizeof(*hdr) + sizeof(*kb));
return -EINVAL;
}
@@ -250,7 +250,7 @@ int ep11_check_aes_key_with_hdr(debug_info_t *dbg, int dbflvl,
}
if (hdr->len > keylen) {
if (dbg)
- DBF("%s key check failed, header len %d keylen %zu mismatch\n",
+ DBF("%s key check failed, header len %d keylen %u mismatch\n",
__func__, (int)hdr->len, keylen);
return -EINVAL;
}
@@ -284,7 +284,7 @@ EXPORT_SYMBOL(ep11_check_aes_key_with_hdr);
* Simple check if the key blob is a valid EP11 ECC key blob with header.
*/
int ep11_check_ecc_key_with_hdr(debug_info_t *dbg, int dbflvl,
- const u8 *key, size_t keylen, int checkcpacfexp)
+ const u8 *key, u32 keylen, int checkcpacfexp)
{
struct ep11kblob_header *hdr = (struct ep11kblob_header *)key;
struct ep11keyblob *kb = (struct ep11keyblob *)(key + sizeof(*hdr));
@@ -292,7 +292,7 @@ int ep11_check_ecc_key_with_hdr(debug_info_t *dbg, int dbflvl,
#define DBF(...) debug_sprintf_event(dbg, dbflvl, ##__VA_ARGS__)
if (keylen < sizeof(*hdr) + sizeof(*kb)) {
- DBF("%s key check failed, keylen %zu < %zu\n",
+ DBF("%s key check failed, keylen %u < %zu\n",
__func__, keylen, sizeof(*hdr) + sizeof(*kb));
return -EINVAL;
}
@@ -317,7 +317,7 @@ int ep11_check_ecc_key_with_hdr(debug_info_t *dbg, int dbflvl,
}
if (hdr->len > keylen) {
if (dbg)
- DBF("%s key check failed, header len %d keylen %zu mismatch\n",
+ DBF("%s key check failed, header len %d keylen %u mismatch\n",
__func__, (int)hdr->len, keylen);
return -EINVAL;
}
@@ -352,14 +352,14 @@ EXPORT_SYMBOL(ep11_check_ecc_key_with_hdr);
* the header in the session field (old style EP11 AES key).
*/
int ep11_check_aes_key(debug_info_t *dbg, int dbflvl,
- const u8 *key, size_t keylen, int checkcpacfexp)
+ const u8 *key, u32 keylen, int checkcpacfexp)
{
struct ep11keyblob *kb = (struct ep11keyblob *)key;
#define DBF(...) debug_sprintf_event(dbg, dbflvl, ##__VA_ARGS__)
if (keylen < sizeof(*kb)) {
- DBF("%s key check failed, keylen %zu < %zu\n",
+ DBF("%s key check failed, keylen %u < %zu\n",
__func__, keylen, sizeof(*kb));
return -EINVAL;
}
@@ -378,7 +378,7 @@ int ep11_check_aes_key(debug_info_t *dbg, int dbflvl,
}
if (kb->head.len > keylen) {
if (dbg)
- DBF("%s key check failed, header len %d keylen %zu mismatch\n",
+ DBF("%s key check failed, header len %d keylen %u mismatch\n",
__func__, (int)kb->head.len, keylen);
return -EINVAL;
}
@@ -932,7 +932,7 @@ out:
}
int ep11_genaeskey(u16 card, u16 domain, u32 keybitsize, u32 keygenflags,
- u8 *keybuf, size_t *keybufsize, u32 keybufver)
+ u8 *keybuf, u32 *keybufsize, u32 keybufver)
{
struct ep11kblob_header *hdr;
size_t hdr_size, pl_size;
@@ -1256,7 +1256,7 @@ static int ep11_unwrapkey(u16 card, u16 domain,
const u8 *enckey, size_t enckeysize,
u32 mech, const u8 *iv,
u32 keybitsize, u32 keygenflags,
- u8 *keybuf, size_t *keybufsize,
+ u8 *keybuf, u32 *keybufsize,
u8 keybufver)
{
struct ep11kblob_header *hdr;
@@ -1412,7 +1412,7 @@ out:
}
int ep11_clr2keyblob(u16 card, u16 domain, u32 keybitsize, u32 keygenflags,
- const u8 *clrkey, u8 *keybuf, size_t *keybufsize,
+ const u8 *clrkey, u8 *keybuf, u32 *keybufsize,
u32 keytype)
{
int rc;
@@ -1471,7 +1471,7 @@ out:
EXPORT_SYMBOL(ep11_clr2keyblob);
int ep11_kblob2protkey(u16 card, u16 dom,
- const u8 *keyblob, size_t keybloblen,
+ const u8 *keyblob, u32 keybloblen,
u8 *protkey, u32 *protkeylen, u32 *protkeytype)
{
struct ep11kblob_header *hdr;
diff --git a/drivers/s390/crypto/zcrypt_ep11misc.h b/drivers/s390/crypto/zcrypt_ep11misc.h
index 9d17fd5228a7..9f1bdffdec68 100644
--- a/drivers/s390/crypto/zcrypt_ep11misc.h
+++ b/drivers/s390/crypto/zcrypt_ep11misc.h
@@ -54,7 +54,7 @@ static inline bool is_ep11_keyblob(const u8 *key)
* For valid ep11 keyblobs, returns a reference to the wrappingkey verification
* pattern. Otherwise NULL.
*/
-const u8 *ep11_kb_wkvp(const u8 *kblob, size_t kbloblen);
+const u8 *ep11_kb_wkvp(const u8 *kblob, u32 kbloblen);
/*
* Simple check if the key blob is a valid EP11 AES key blob with header.
@@ -63,7 +63,7 @@ const u8 *ep11_kb_wkvp(const u8 *kblob, size_t kbloblen);
* Returns 0 on success or errno value on failure.
*/
int ep11_check_aes_key_with_hdr(debug_info_t *dbg, int dbflvl,
- const u8 *key, size_t keylen, int checkcpacfexp);
+ const u8 *key, u32 keylen, int checkcpacfexp);
/*
* Simple check if the key blob is a valid EP11 ECC key blob with header.
@@ -72,7 +72,7 @@ int ep11_check_aes_key_with_hdr(debug_info_t *dbg, int dbflvl,
* Returns 0 on success or errno value on failure.
*/
int ep11_check_ecc_key_with_hdr(debug_info_t *dbg, int dbflvl,
- const u8 *key, size_t keylen, int checkcpacfexp);
+ const u8 *key, u32 keylen, int checkcpacfexp);
/*
* Simple check if the key blob is a valid EP11 AES key blob with
@@ -82,7 +82,7 @@ int ep11_check_ecc_key_with_hdr(debug_info_t *dbg, int dbflvl,
* Returns 0 on success or errno value on failure.
*/
int ep11_check_aes_key(debug_info_t *dbg, int dbflvl,
- const u8 *key, size_t keylen, int checkcpacfexp);
+ const u8 *key, u32 keylen, int checkcpacfexp);
/* EP11 card info struct */
struct ep11_card_info {
@@ -115,13 +115,13 @@ int ep11_get_domain_info(u16 card, u16 domain, struct ep11_domain_info *info);
* Generate (random) EP11 AES secure key.
*/
int ep11_genaeskey(u16 card, u16 domain, u32 keybitsize, u32 keygenflags,
- u8 *keybuf, size_t *keybufsize, u32 keybufver);
+ u8 *keybuf, u32 *keybufsize, u32 keybufver);
/*
* Generate EP11 AES secure key with given clear key value.
*/
int ep11_clr2keyblob(u16 cardnr, u16 domain, u32 keybitsize, u32 keygenflags,
- const u8 *clrkey, u8 *keybuf, size_t *keybufsize,
+ const u8 *clrkey, u8 *keybuf, u32 *keybufsize,
u32 keytype);
/*
@@ -149,7 +149,7 @@ int ep11_findcard2(u32 **apqns, u32 *nr_apqns, u16 cardnr, u16 domain,
/*
* Derive proteced key from EP11 key blob (AES and ECC keys).
*/
-int ep11_kblob2protkey(u16 card, u16 dom, const u8 *key, size_t keylen,
+int ep11_kblob2protkey(u16 card, u16 dom, const u8 *key, u32 keylen,
u8 *protkey, u32 *protkeylen, u32 *protkeytype);
void zcrypt_ep11misc_exit(void);
diff --git a/drivers/s390/crypto/zcrypt_msgtype50.c b/drivers/s390/crypto/zcrypt_msgtype50.c
index 3b39cb8f926d..adc65eddaa1e 100644
--- a/drivers/s390/crypto/zcrypt_msgtype50.c
+++ b/drivers/s390/crypto/zcrypt_msgtype50.c
@@ -427,7 +427,7 @@ static void zcrypt_msgtype50_receive(struct ap_queue *aq,
len = t80h->len;
if (len > reply->bufsize || len > msg->bufsize ||
len != reply->len) {
- pr_debug("%s len mismatch => EMSGSIZE\n", __func__);
+ pr_debug("len mismatch => EMSGSIZE\n");
msg->rc = -EMSGSIZE;
goto out;
}
@@ -487,8 +487,8 @@ static long zcrypt_msgtype50_modexpo(struct zcrypt_queue *zq,
out:
ap_msg->private = NULL;
if (rc)
- pr_debug("%s send me cprb at dev=%02x.%04x rc=%d\n",
- __func__, AP_QID_CARD(zq->queue->qid),
+ pr_debug("send me cprb at dev=%02x.%04x rc=%d\n",
+ AP_QID_CARD(zq->queue->qid),
AP_QID_QUEUE(zq->queue->qid), rc);
return rc;
}
@@ -537,8 +537,8 @@ static long zcrypt_msgtype50_modexpo_crt(struct zcrypt_queue *zq,
out:
ap_msg->private = NULL;
if (rc)
- pr_debug("%s send crt cprb at dev=%02x.%04x rc=%d\n",
- __func__, AP_QID_CARD(zq->queue->qid),
+ pr_debug("send crt cprb at dev=%02x.%04x rc=%d\n",
+ AP_QID_CARD(zq->queue->qid),
AP_QID_QUEUE(zq->queue->qid), rc);
return rc;
}
diff --git a/drivers/s390/crypto/zcrypt_msgtype6.c b/drivers/s390/crypto/zcrypt_msgtype6.c
index 215f257d2360..b64c9d9fc613 100644
--- a/drivers/s390/crypto/zcrypt_msgtype6.c
+++ b/drivers/s390/crypto/zcrypt_msgtype6.c
@@ -437,9 +437,8 @@ static int xcrb_msg_to_type6cprb_msgx(bool userspace, struct ap_message *ap_msg,
ap_msg->flags |= AP_MSG_FLAG_ADMIN;
break;
default:
- pr_debug("%s unknown CPRB minor version '%c%c'\n",
- __func__, msg->cprbx.func_id[0],
- msg->cprbx.func_id[1]);
+ pr_debug("unknown CPRB minor version '%c%c'\n",
+ msg->cprbx.func_id[0], msg->cprbx.func_id[1]);
}
/* copy data block */
@@ -629,9 +628,8 @@ static int convert_type86_xcrb(bool userspace, struct zcrypt_queue *zq,
/* Copy CPRB to user */
if (xcrb->reply_control_blk_length < msg->fmt2.count1) {
- pr_debug("%s reply_control_blk_length %u < required %u => EMSGSIZE\n",
- __func__, xcrb->reply_control_blk_length,
- msg->fmt2.count1);
+ pr_debug("reply_control_blk_length %u < required %u => EMSGSIZE\n",
+ xcrb->reply_control_blk_length, msg->fmt2.count1);
return -EMSGSIZE;
}
if (z_copy_to_user(userspace, xcrb->reply_control_blk_addr,
@@ -642,9 +640,8 @@ static int convert_type86_xcrb(bool userspace, struct zcrypt_queue *zq,
/* Copy data buffer to user */
if (msg->fmt2.count2) {
if (xcrb->reply_data_length < msg->fmt2.count2) {
- pr_debug("%s reply_data_length %u < required %u => EMSGSIZE\n",
- __func__, xcrb->reply_data_length,
- msg->fmt2.count2);
+ pr_debug("reply_data_length %u < required %u => EMSGSIZE\n",
+ xcrb->reply_data_length, msg->fmt2.count2);
return -EMSGSIZE;
}
if (z_copy_to_user(userspace, xcrb->reply_data_addr,
@@ -673,9 +670,8 @@ static int convert_type86_ep11_xcrb(bool userspace, struct zcrypt_queue *zq,
char *data = reply->msg;
if (xcrb->resp_len < msg->fmt2.count1) {
- pr_debug("%s resp_len %u < required %u => EMSGSIZE\n",
- __func__, (unsigned int)xcrb->resp_len,
- msg->fmt2.count1);
+ pr_debug("resp_len %u < required %u => EMSGSIZE\n",
+ (unsigned int)xcrb->resp_len, msg->fmt2.count1);
return -EMSGSIZE;
}
@@ -875,8 +871,7 @@ static void zcrypt_msgtype6_receive(struct ap_queue *aq,
len = sizeof(struct type86x_reply) + t86r->length;
if (len > reply->bufsize || len > msg->bufsize ||
len != reply->len) {
- pr_debug("%s len mismatch => EMSGSIZE\n",
- __func__);
+ pr_debug("len mismatch => EMSGSIZE\n");
msg->rc = -EMSGSIZE;
goto out;
}
@@ -890,8 +885,7 @@ static void zcrypt_msgtype6_receive(struct ap_queue *aq,
len = t86r->fmt2.offset1 + t86r->fmt2.count1;
if (len > reply->bufsize || len > msg->bufsize ||
len != reply->len) {
- pr_debug("%s len mismatch => EMSGSIZE\n",
- __func__);
+ pr_debug("len mismatch => EMSGSIZE\n");
msg->rc = -EMSGSIZE;
goto out;
}
@@ -941,8 +935,7 @@ static void zcrypt_msgtype6_receive_ep11(struct ap_queue *aq,
len = t86r->fmt2.offset1 + t86r->fmt2.count1;
if (len > reply->bufsize || len > msg->bufsize ||
len != reply->len) {
- pr_debug("%s len mismatch => EMSGSIZE\n",
- __func__);
+ pr_debug("len mismatch => EMSGSIZE\n");
msg->rc = -EMSGSIZE;
goto out;
}
@@ -1154,8 +1147,8 @@ static long zcrypt_msgtype6_send_cprb(bool userspace, struct zcrypt_queue *zq,
out:
if (rc)
- pr_debug("%s send cprb at dev=%02x.%04x rc=%d\n",
- __func__, AP_QID_CARD(zq->queue->qid),
+ pr_debug("send cprb at dev=%02x.%04x rc=%d\n",
+ AP_QID_CARD(zq->queue->qid),
AP_QID_QUEUE(zq->queue->qid), rc);
return rc;
}
@@ -1277,8 +1270,8 @@ static long zcrypt_msgtype6_send_ep11_cprb(bool userspace, struct zcrypt_queue *
out:
if (rc)
- pr_debug("%s send cprb at dev=%02x.%04x rc=%d\n",
- __func__, AP_QID_CARD(zq->queue->qid),
+ pr_debug("send cprb at dev=%02x.%04x rc=%d\n",
+ AP_QID_CARD(zq->queue->qid),
AP_QID_QUEUE(zq->queue->qid), rc);
return rc;
}
diff --git a/drivers/sbus/char/openprom.c b/drivers/sbus/char/openprom.c
index cc178874c4a6..8643947fee8e 100644
--- a/drivers/sbus/char/openprom.c
+++ b/drivers/sbus/char/openprom.c
@@ -687,7 +687,6 @@ static int openprom_release(struct inode * inode, struct file * file)
static const struct file_operations openprom_fops = {
.owner = THIS_MODULE,
- .llseek = no_llseek,
.unlocked_ioctl = openprom_ioctl,
.compat_ioctl = openprom_compat_ioctl,
.open = openprom_open,
diff --git a/drivers/sbus/char/uctrl.c b/drivers/sbus/char/uctrl.c
index 3c88f29f4c47..8bbed7a7afb7 100644
--- a/drivers/sbus/char/uctrl.c
+++ b/drivers/sbus/char/uctrl.c
@@ -221,7 +221,6 @@ static irqreturn_t uctrl_interrupt(int irq, void *dev_id)
static const struct file_operations uctrl_fops = {
.owner = THIS_MODULE,
- .llseek = no_llseek,
.unlocked_ioctl = uctrl_ioctl,
.open = uctrl_open,
};
diff --git a/drivers/scsi/NCR5380.c b/drivers/scsi/NCR5380.c
index cea3a79d538e..0e10502660de 100644
--- a/drivers/scsi/NCR5380.c
+++ b/drivers/scsi/NCR5380.c
@@ -157,7 +157,6 @@ static inline void initialize_SCp(struct scsi_cmnd *cmd)
}
ncmd->status = 0;
- ncmd->message = 0;
}
static inline void advance_sg_buffer(struct NCR5380_cmd *ncmd)
@@ -199,7 +198,6 @@ static inline void set_resid_from_SCp(struct scsi_cmnd *cmd)
* Polls the chip in a reasonably efficient manner waiting for an
* event to occur. After a short quick poll we begin to yield the CPU
* (if possible). In irq contexts the time-out is arbitrarily limited.
- * Callers may hold locks as long as they are held in irq mode.
*
* Returns 0 if either or both event(s) occurred otherwise -ETIMEDOUT.
*/
@@ -1228,24 +1226,15 @@ out:
return ret;
}
-/*
- * Function : int NCR5380_transfer_pio (struct Scsi_Host *instance,
- * unsigned char *phase, int *count, unsigned char **data)
- *
- * Purpose : transfers data in given phase using polled I/O
- *
- * Inputs : instance - instance of driver, *phase - pointer to
- * what phase is expected, *count - pointer to number of
- * bytes to transfer, **data - pointer to data pointer,
- * can_sleep - 1 or 0 when sleeping is permitted or not, respectively.
- *
- * Returns : -1 when different phase is entered without transferring
- * maximum number of bytes, 0 if all bytes are transferred or exit
- * is in same phase.
- *
- * Also, *phase, *count, *data are modified in place.
+/**
+ * NCR5380_transfer_pio() - transfers data in given phase using polled I/O
+ * @instance: instance of driver
+ * @phase: pointer to what phase is expected
+ * @count: pointer to number of bytes to transfer
+ * @data: pointer to data pointer
+ * @can_sleep: 1 or 0 when sleeping is permitted or not, respectively
*
- * XXX Note : handling for bus free may be useful.
+ * Returns: void. *phase, *count, *data are modified in place.
*/
/*
@@ -1254,9 +1243,9 @@ out:
* counts, we will always do a pseudo DMA or DMA transfer.
*/
-static int NCR5380_transfer_pio(struct Scsi_Host *instance,
- unsigned char *phase, int *count,
- unsigned char **data, unsigned int can_sleep)
+static void NCR5380_transfer_pio(struct Scsi_Host *instance,
+ unsigned char *phase, int *count,
+ unsigned char **data, unsigned int can_sleep)
{
struct NCR5380_hostdata *hostdata = shost_priv(instance);
unsigned char p = *phase, tmp;
@@ -1277,8 +1266,8 @@ static int NCR5380_transfer_pio(struct Scsi_Host *instance,
* valid
*/
- if (NCR5380_poll_politely(hostdata, STATUS_REG, SR_REQ, SR_REQ,
- HZ * can_sleep) < 0)
+ if (NCR5380_poll_politely(hostdata, STATUS_REG, SR_REQ | SR_BSY,
+ SR_REQ | SR_BSY, HZ * can_sleep) < 0)
break;
dsprintk(NDEBUG_HANDSHAKE, instance, "REQ asserted\n");
@@ -1329,17 +1318,19 @@ static int NCR5380_transfer_pio(struct Scsi_Host *instance,
dsprintk(NDEBUG_HANDSHAKE, instance, "REQ negated, handshake complete\n");
-/*
- * We have several special cases to consider during REQ/ACK handshaking :
- * 1. We were in MSGOUT phase, and we are on the last byte of the
- * message. ATN must be dropped as ACK is dropped.
- *
- * 2. We are in a MSGIN phase, and we are on the last byte of the
- * message. We must exit with ACK asserted, so that the calling
- * code may raise ATN before dropping ACK to reject the message.
- *
- * 3. ACK and ATN are clear and the target may proceed as normal.
- */
+ /*
+ * We have several special cases to consider during REQ/ACK
+ * handshaking:
+ *
+ * 1. We were in MSGOUT phase, and we are on the last byte of
+ * the message. ATN must be dropped as ACK is dropped.
+ *
+ * 2. We are in MSGIN phase, and we are on the last byte of the
+ * message. We must exit with ACK asserted, so that the calling
+ * code may raise ATN before dropping ACK to reject the message.
+ *
+ * 3. ACK and ATN are clear & the target may proceed as normal.
+ */
if (!(p == PHASE_MSGIN && c == 1)) {
if (p == PHASE_MSGOUT && c > 1)
NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE | ICR_ASSERT_ATN);
@@ -1361,11 +1352,6 @@ static int NCR5380_transfer_pio(struct Scsi_Host *instance,
*phase = tmp & PHASE_MASK;
else
*phase = PHASE_UNKNOWN;
-
- if (!c || (*phase == p))
- return 0;
- else
- return -1;
}
/**
@@ -1485,6 +1471,7 @@ static int NCR5380_transfer_dma(struct Scsi_Host *instance,
unsigned char **data)
{
struct NCR5380_hostdata *hostdata = shost_priv(instance);
+ struct NCR5380_cmd *ncmd = NCR5380_to_ncmd(hostdata->connected);
int c = *count;
unsigned char p = *phase;
unsigned char *d = *data;
@@ -1496,7 +1483,7 @@ static int NCR5380_transfer_dma(struct Scsi_Host *instance,
return -1;
}
- NCR5380_to_ncmd(hostdata->connected)->phase = p;
+ ncmd->phase = p;
if (p & SR_IO) {
if (hostdata->read_overruns)
@@ -1574,79 +1561,80 @@ static int NCR5380_transfer_dma(struct Scsi_Host *instance,
/* The result is zero iff pseudo DMA send/receive was completed. */
hostdata->dma_len = c;
-/*
- * A note regarding the DMA errata workarounds for early NMOS silicon.
- *
- * For DMA sends, we want to wait until the last byte has been
- * transferred out over the bus before we turn off DMA mode. Alas, there
- * seems to be no terribly good way of doing this on a 5380 under all
- * conditions. For non-scatter-gather operations, we can wait until REQ
- * and ACK both go false, or until a phase mismatch occurs. Gather-sends
- * are nastier, since the device will be expecting more data than we
- * are prepared to send it, and REQ will remain asserted. On a 53C8[01] we
- * could test Last Byte Sent to assure transfer (I imagine this is precisely
- * why this signal was added to the newer chips) but on the older 538[01]
- * this signal does not exist. The workaround for this lack is a watchdog;
- * we bail out of the wait-loop after a modest amount of wait-time if
- * the usual exit conditions are not met. Not a terribly clean or
- * correct solution :-%
- *
- * DMA receive is equally tricky due to a nasty characteristic of the NCR5380.
- * If the chip is in DMA receive mode, it will respond to a target's
- * REQ by latching the SCSI data into the INPUT DATA register and asserting
- * ACK, even if it has _already_ been notified by the DMA controller that
- * the current DMA transfer has completed! If the NCR5380 is then taken
- * out of DMA mode, this already-acknowledged byte is lost. This is
- * not a problem for "one DMA transfer per READ command", because
- * the situation will never arise... either all of the data is DMA'ed
- * properly, or the target switches to MESSAGE IN phase to signal a
- * disconnection (either operation bringing the DMA to a clean halt).
- * However, in order to handle scatter-receive, we must work around the
- * problem. The chosen fix is to DMA fewer bytes, then check for the
- * condition before taking the NCR5380 out of DMA mode. One or two extra
- * bytes are transferred via PIO as necessary to fill out the original
- * request.
- */
-
- if (hostdata->flags & FLAG_DMA_FIXUP) {
- if (p & SR_IO) {
- /*
- * The workaround was to transfer fewer bytes than we
- * intended to with the pseudo-DMA read function, wait for
- * the chip to latch the last byte, read it, and then disable
- * pseudo-DMA mode.
- *
- * After REQ is asserted, the NCR5380 asserts DRQ and ACK.
- * REQ is deasserted when ACK is asserted, and not reasserted
- * until ACK goes false. Since the NCR5380 won't lower ACK
- * until DACK is asserted, which won't happen unless we twiddle
- * the DMA port or we take the NCR5380 out of DMA mode, we
- * can guarantee that we won't handshake another extra
- * byte.
- */
+ /*
+ * A note regarding the DMA errata workarounds for early NMOS silicon.
+ *
+ * For DMA sends, we want to wait until the last byte has been
+ * transferred out over the bus before we turn off DMA mode. Alas, there
+ * seems to be no terribly good way of doing this on a 5380 under all
+ * conditions. For non-scatter-gather operations, we can wait until REQ
+ * and ACK both go false, or until a phase mismatch occurs. Gather-sends
+ * are nastier, since the device will be expecting more data than we
+ * are prepared to send it, and REQ will remain asserted. On a 53C8[01]
+ * we could test Last Byte Sent to assure transfer (I imagine this is
+ * precisely why this signal was added to the newer chips) but on the
+ * older 538[01] this signal does not exist. The workaround for this
+ * lack is a watchdog; we bail out of the wait-loop after a modest
+ * amount of wait-time if the usual exit conditions are not met.
+ * Not a terribly clean or correct solution :-%
+ *
+ * DMA receive is equally tricky due to a nasty characteristic of the
+ * NCR5380. If the chip is in DMA receive mode, it will respond to a
+ * target's REQ by latching the SCSI data into the INPUT DATA register
+ * and asserting ACK, even if it has _already_ been notified by the
+ * DMA controller that the current DMA transfer has completed! If the
+ * NCR5380 is then taken out of DMA mode, this already-acknowledged
+ * byte is lost.
+ *
+ * This is not a problem for "one DMA transfer per READ
+ * command", because the situation will never arise... either all of
+ * the data is DMA'ed properly, or the target switches to MESSAGE IN
+ * phase to signal a disconnection (either operation bringing the DMA
+ * to a clean halt). However, in order to handle scatter-receive, we
+ * must work around the problem. The chosen fix is to DMA fewer bytes,
+ * then check for the condition before taking the NCR5380 out of DMA
+ * mode. One or two extra bytes are transferred via PIO as necessary
+ * to fill out the original request.
+ */
- if (NCR5380_poll_politely(hostdata, BUS_AND_STATUS_REG,
- BASR_DRQ, BASR_DRQ, 0) < 0) {
- result = -1;
- shost_printk(KERN_ERR, instance, "PDMA read: DRQ timeout\n");
- }
- if (NCR5380_poll_politely(hostdata, STATUS_REG,
- SR_REQ, 0, 0) < 0) {
- result = -1;
- shost_printk(KERN_ERR, instance, "PDMA read: !REQ timeout\n");
- }
- d[*count - 1] = NCR5380_read(INPUT_DATA_REG);
- } else {
- /*
- * Wait for the last byte to be sent. If REQ is being asserted for
- * the byte we're interested, we'll ACK it and it will go false.
- */
- if (NCR5380_poll_politely2(hostdata,
- BUS_AND_STATUS_REG, BASR_DRQ, BASR_DRQ,
- BUS_AND_STATUS_REG, BASR_PHASE_MATCH, 0, 0) < 0) {
- result = -1;
- shost_printk(KERN_ERR, instance, "PDMA write: DRQ and phase timeout\n");
+ if ((hostdata->flags & FLAG_DMA_FIXUP) &&
+ (NCR5380_read(BUS_AND_STATUS_REG) & BASR_PHASE_MATCH)) {
+ /*
+ * The workaround was to transfer fewer bytes than we
+ * intended to with the pseudo-DMA receive function, wait for
+ * the chip to latch the last byte, read it, and then disable
+ * DMA mode.
+ *
+ * After REQ is asserted, the NCR5380 asserts DRQ and ACK.
+ * REQ is deasserted when ACK is asserted, and not reasserted
+ * until ACK goes false. Since the NCR5380 won't lower ACK
+ * until DACK is asserted, which won't happen unless we twiddle
+ * the DMA port or we take the NCR5380 out of DMA mode, we
+ * can guarantee that we won't handshake another extra
+ * byte.
+ *
+ * If sending, wait for the last byte to be sent. If REQ is
+ * being asserted for the byte we're interested, we'll ACK it
+ * and it will go false.
+ */
+ if (!NCR5380_poll_politely(hostdata, BUS_AND_STATUS_REG,
+ BASR_DRQ, BASR_DRQ, 0)) {
+ if ((p & SR_IO) &&
+ (NCR5380_read(BUS_AND_STATUS_REG) & BASR_PHASE_MATCH)) {
+ if (!NCR5380_poll_politely(hostdata, STATUS_REG,
+ SR_REQ, 0, 0)) {
+ d[c] = NCR5380_read(INPUT_DATA_REG);
+ --ncmd->this_residual;
+ } else {
+ result = -1;
+ scmd_printk(KERN_ERR, hostdata->connected,
+ "PDMA fixup: !REQ timeout\n");
+ }
}
+ } else if (NCR5380_read(BUS_AND_STATUS_REG) & BASR_PHASE_MATCH) {
+ result = -1;
+ scmd_printk(KERN_ERR, hostdata->connected,
+ "PDMA fixup: DRQ timeout\n");
}
}
@@ -1666,9 +1654,6 @@ static int NCR5380_transfer_dma(struct Scsi_Host *instance,
* Side effects : SCSI things happen, the disconnected queue will be
* modified if a command disconnects, *instance->connected will
* change.
- *
- * XXX Note : we need to watch for bus free or a reset condition here
- * to recover from an unexpected bus free condition.
*/
static void NCR5380_information_transfer(struct Scsi_Host *instance)
@@ -1807,9 +1792,11 @@ static void NCR5380_information_transfer(struct Scsi_Host *instance)
return;
case PHASE_MSGIN:
len = 1;
+ tmp = 0xff;
data = &tmp;
NCR5380_transfer_pio(instance, &phase, &len, &data, 0);
- ncmd->message = tmp;
+ if (tmp == 0xff)
+ break;
switch (tmp) {
case ABORT:
@@ -1996,6 +1983,7 @@ static void NCR5380_information_transfer(struct Scsi_Host *instance)
break;
case PHASE_STATIN:
len = 1;
+ tmp = ncmd->status;
data = &tmp;
NCR5380_transfer_pio(instance, &phase, &len, &data, 0);
ncmd->status = tmp;
@@ -2005,9 +1993,20 @@ static void NCR5380_information_transfer(struct Scsi_Host *instance)
NCR5380_dprint(NDEBUG_ANY, instance);
} /* switch(phase) */
} else {
+ int err;
+
spin_unlock_irq(&hostdata->lock);
- NCR5380_poll_politely(hostdata, STATUS_REG, SR_REQ, SR_REQ, HZ);
+ err = NCR5380_poll_politely(hostdata, STATUS_REG,
+ SR_REQ, SR_REQ, HZ);
spin_lock_irq(&hostdata->lock);
+
+ if (err < 0 && hostdata->connected &&
+ !(NCR5380_read(STATUS_REG) & SR_BSY)) {
+ scmd_printk(KERN_ERR, hostdata->connected,
+ "BSY signal lost\n");
+ do_reset(instance);
+ bus_reset_cleanup(instance);
+ }
}
}
}
diff --git a/drivers/scsi/NCR5380.h b/drivers/scsi/NCR5380.h
index 8dc2be4212dc..d402d4bffcb2 100644
--- a/drivers/scsi/NCR5380.h
+++ b/drivers/scsi/NCR5380.h
@@ -3,10 +3,10 @@
* NCR 5380 defines
*
* Copyright 1993, Drew Eckhardt
- * Visionary Computing
- * (Unix consulting and custom programming)
- * drew@colorado.edu
- * +1 (303) 666-5836
+ * Visionary Computing
+ * (Unix consulting and custom programming)
+ * drew@colorado.edu
+ * +1 (303) 666-5836
*
* For more information, please consult
*
@@ -78,7 +78,7 @@
#define ICR_DIFF_ENABLE 0x20 /* wo Set to enable diff. drivers */
#define ICR_ASSERT_ACK 0x10 /* rw ini Set to assert ACK */
#define ICR_ASSERT_BSY 0x08 /* rw Set to assert BSY */
-#define ICR_ASSERT_SEL 0x04 /* rw Set to assert SEL */
+#define ICR_ASSERT_SEL 0x04 /* rw Set to assert SEL */
#define ICR_ASSERT_ATN 0x02 /* rw Set to assert ATN */
#define ICR_ASSERT_DATA 0x01 /* rw SCSI_DATA_REG is asserted */
@@ -135,7 +135,7 @@
#define BASR_IRQ 0x10 /* ro mirror of IRQ pin */
#define BASR_PHASE_MATCH 0x08 /* ro Set when MSG CD IO match TCR */
#define BASR_BUSY_ERROR 0x04 /* ro Unexpected change to inactive state */
-#define BASR_ATN 0x02 /* ro BUS status */
+#define BASR_ATN 0x02 /* ro BUS status */
#define BASR_ACK 0x01 /* ro BUS status */
/* Write any value to this register to start a DMA send */
@@ -170,7 +170,7 @@
#define CSR_BASE CSR_53C80_INTR
/* Note : PHASE_* macros are based on the values of the STATUS register */
-#define PHASE_MASK (SR_MSG | SR_CD | SR_IO)
+#define PHASE_MASK (SR_MSG | SR_CD | SR_IO)
#define PHASE_DATAOUT 0
#define PHASE_DATAIN SR_IO
@@ -231,7 +231,6 @@ struct NCR5380_cmd {
int this_residual;
struct scatterlist *buffer;
int status;
- int message;
int phase;
struct list_head list;
};
@@ -286,8 +285,9 @@ static const char *NCR5380_info(struct Scsi_Host *instance);
static void NCR5380_reselect(struct Scsi_Host *instance);
static bool NCR5380_select(struct Scsi_Host *, struct scsi_cmnd *);
static int NCR5380_transfer_dma(struct Scsi_Host *instance, unsigned char *phase, int *count, unsigned char **data);
-static int NCR5380_transfer_pio(struct Scsi_Host *instance, unsigned char *phase, int *count, unsigned char **data,
- unsigned int can_sleep);
+static void NCR5380_transfer_pio(struct Scsi_Host *instance,
+ unsigned char *phase, int *count,
+ unsigned char **data, unsigned int can_sleep);
static int NCR5380_poll_politely2(struct NCR5380_hostdata *,
unsigned int, u8, u8,
unsigned int, u8, u8, unsigned long);
diff --git a/drivers/scsi/aacraid/aachba.c b/drivers/scsi/aacraid/aachba.c
index b22857c6f3f4..ec3834bda111 100644
--- a/drivers/scsi/aacraid/aachba.c
+++ b/drivers/scsi/aacraid/aachba.c
@@ -1267,7 +1267,7 @@ static int aac_read_raw_io(struct fib * fib, struct scsi_cmnd * cmd, u64 lba, u3
return ret;
command = ContainerRawIo;
fibsize = sizeof(struct aac_raw_io) +
- ((le32_to_cpu(readcmd->sg.count)-1) * sizeof(struct sgentryraw));
+ (le32_to_cpu(readcmd->sg.count) * sizeof(struct sgentryraw));
}
BUG_ON(fibsize > (fib->dev->max_fib_size - sizeof(struct aac_fibhdr)));
@@ -1302,7 +1302,7 @@ static int aac_read_block64(struct fib * fib, struct scsi_cmnd * cmd, u64 lba, u
if (ret < 0)
return ret;
fibsize = sizeof(struct aac_read64) +
- ((le32_to_cpu(readcmd->sg.count) - 1) *
+ (le32_to_cpu(readcmd->sg.count) *
sizeof (struct sgentry64));
BUG_ON (fibsize > (fib->dev->max_fib_size -
sizeof(struct aac_fibhdr)));
@@ -1337,7 +1337,7 @@ static int aac_read_block(struct fib * fib, struct scsi_cmnd * cmd, u64 lba, u32
if (ret < 0)
return ret;
fibsize = sizeof(struct aac_read) +
- ((le32_to_cpu(readcmd->sg.count) - 1) *
+ (le32_to_cpu(readcmd->sg.count) *
sizeof (struct sgentry));
BUG_ON (fibsize > (fib->dev->max_fib_size -
sizeof(struct aac_fibhdr)));
@@ -1401,7 +1401,7 @@ static int aac_write_raw_io(struct fib * fib, struct scsi_cmnd * cmd, u64 lba, u
return ret;
command = ContainerRawIo;
fibsize = sizeof(struct aac_raw_io) +
- ((le32_to_cpu(writecmd->sg.count)-1) * sizeof (struct sgentryraw));
+ (le32_to_cpu(writecmd->sg.count) * sizeof(struct sgentryraw));
}
BUG_ON(fibsize > (fib->dev->max_fib_size - sizeof(struct aac_fibhdr)));
@@ -1436,7 +1436,7 @@ static int aac_write_block64(struct fib * fib, struct scsi_cmnd * cmd, u64 lba,
if (ret < 0)
return ret;
fibsize = sizeof(struct aac_write64) +
- ((le32_to_cpu(writecmd->sg.count) - 1) *
+ (le32_to_cpu(writecmd->sg.count) *
sizeof (struct sgentry64));
BUG_ON (fibsize > (fib->dev->max_fib_size -
sizeof(struct aac_fibhdr)));
@@ -1473,7 +1473,7 @@ static int aac_write_block(struct fib * fib, struct scsi_cmnd * cmd, u64 lba, u3
if (ret < 0)
return ret;
fibsize = sizeof(struct aac_write) +
- ((le32_to_cpu(writecmd->sg.count) - 1) *
+ (le32_to_cpu(writecmd->sg.count) *
sizeof (struct sgentry));
BUG_ON (fibsize > (fib->dev->max_fib_size -
sizeof(struct aac_fibhdr)));
@@ -1592,9 +1592,9 @@ static int aac_scsi_64(struct fib * fib, struct scsi_cmnd * cmd)
/*
* Build Scatter/Gather list
*/
- fibsize = sizeof (struct aac_srb) - sizeof (struct sgentry) +
+ fibsize = sizeof(struct aac_srb) +
((le32_to_cpu(srbcmd->sg.count) & 0xff) *
- sizeof (struct sgentry64));
+ sizeof(struct sgentry64));
BUG_ON (fibsize > (fib->dev->max_fib_size -
sizeof(struct aac_fibhdr)));
@@ -1624,7 +1624,7 @@ static int aac_scsi_32(struct fib * fib, struct scsi_cmnd * cmd)
* Build Scatter/Gather list
*/
fibsize = sizeof (struct aac_srb) +
- (((le32_to_cpu(srbcmd->sg.count) & 0xff) - 1) *
+ ((le32_to_cpu(srbcmd->sg.count) & 0xff) *
sizeof (struct sgentry));
BUG_ON (fibsize > (fib->dev->max_fib_size -
sizeof(struct aac_fibhdr)));
@@ -1693,8 +1693,7 @@ static int aac_send_safw_bmic_cmd(struct aac_dev *dev,
fibptr->hw_fib_va->header.XferState &=
~cpu_to_le32(FastResponseCapable);
- fibsize = sizeof(struct aac_srb) - sizeof(struct sgentry) +
- sizeof(struct sgentry64);
+ fibsize = sizeof(struct aac_srb) + sizeof(struct sgentry64);
/* allocate DMA buffer for response */
addr = dma_map_single(&dev->pdev->dev, xfer_buf, xfer_len,
@@ -1833,7 +1832,7 @@ static int aac_get_safw_ciss_luns(struct aac_dev *dev)
struct aac_ciss_phys_luns_resp *phys_luns;
datasize = sizeof(struct aac_ciss_phys_luns_resp) +
- (AAC_MAX_TARGETS - 1) * sizeof(struct _ciss_lun);
+ AAC_MAX_TARGETS * sizeof(struct _ciss_lun);
phys_luns = kmalloc(datasize, GFP_KERNEL);
if (phys_luns == NULL)
goto out;
@@ -2267,7 +2266,7 @@ int aac_get_adapter_info(struct aac_dev* dev)
dev->a_ops.adapter_bounds = aac_bounds_32;
dev->scsi_host_ptr->sg_tablesize = (dev->max_fib_size -
sizeof(struct aac_fibhdr) -
- sizeof(struct aac_write) + sizeof(struct sgentry)) /
+ sizeof(struct aac_write)) /
sizeof(struct sgentry);
if (dev->dac_support) {
dev->a_ops.adapter_read = aac_read_block64;
@@ -2278,8 +2277,7 @@ int aac_get_adapter_info(struct aac_dev* dev)
dev->scsi_host_ptr->sg_tablesize =
(dev->max_fib_size -
sizeof(struct aac_fibhdr) -
- sizeof(struct aac_write64) +
- sizeof(struct sgentry64)) /
+ sizeof(struct aac_write64)) /
sizeof(struct sgentry64);
} else {
dev->a_ops.adapter_read = aac_read_block;
diff --git a/drivers/scsi/aacraid/aacraid.h b/drivers/scsi/aacraid/aacraid.h
index 7d5a155073c6..1d09d3ac6aa4 100644
--- a/drivers/scsi/aacraid/aacraid.h
+++ b/drivers/scsi/aacraid/aacraid.h
@@ -322,7 +322,7 @@ struct aac_ciss_phys_luns_resp {
u8 level3[2];
u8 level2[2];
u8 node_ident[16]; /* phys. node identifier */
- } lun[1]; /* List of phys. devices */
+ } lun[]; /* List of phys. devices */
};
/*
@@ -507,32 +507,27 @@ struct sge_ieee1212 {
struct sgmap {
__le32 count;
- struct sgentry sg[1];
+ struct sgentry sg[];
};
struct user_sgmap {
u32 count;
- struct user_sgentry sg[1];
+ struct user_sgentry sg[];
};
struct sgmap64 {
__le32 count;
- struct sgentry64 sg[1];
+ struct sgentry64 sg[];
};
struct user_sgmap64 {
u32 count;
- struct user_sgentry64 sg[1];
+ struct user_sgentry64 sg[];
};
struct sgmapraw {
__le32 count;
- struct sgentryraw sg[1];
-};
-
-struct user_sgmapraw {
- u32 count;
- struct user_sgentryraw sg[1];
+ struct sgentryraw sg[];
};
struct creation_info
@@ -873,7 +868,7 @@ union aac_init
__le16 element_count;
__le16 comp_thresh;
__le16 unused;
- } rrq[1]; /* up to 64 RRQ addresses */
+ } rrq[] __counted_by_le(rr_queue_count); /* up to 64 RRQ addresses */
} r8;
};
@@ -2029,8 +2024,8 @@ struct aac_srb_reply
};
struct aac_srb_unit {
- struct aac_srb srb;
struct aac_srb_reply srb_reply;
+ struct aac_srb srb;
};
/*
diff --git a/drivers/scsi/aacraid/commctrl.c b/drivers/scsi/aacraid/commctrl.c
index e7cc927ed952..68240d6f27ab 100644
--- a/drivers/scsi/aacraid/commctrl.c
+++ b/drivers/scsi/aacraid/commctrl.c
@@ -523,7 +523,7 @@ static int aac_send_raw_srb(struct aac_dev* dev, void __user * arg)
goto cleanup;
}
- if ((fibsize < (sizeof(struct user_aac_srb) - sizeof(struct user_sgentry))) ||
+ if ((fibsize < sizeof(struct user_aac_srb)) ||
(fibsize > (dev->max_fib_size - sizeof(struct aac_fibhdr)))) {
rcode = -EINVAL;
goto cleanup;
@@ -561,7 +561,7 @@ static int aac_send_raw_srb(struct aac_dev* dev, void __user * arg)
rcode = -EINVAL;
goto cleanup;
}
- actual_fibsize = sizeof(struct aac_srb) - sizeof(struct sgentry) +
+ actual_fibsize = sizeof(struct aac_srb) +
((user_srbcmd->sg.count & 0xff) * sizeof(struct sgentry));
actual_fibsize64 = actual_fibsize + (user_srbcmd->sg.count & 0xff) *
(sizeof(struct sgentry64) - sizeof(struct sgentry));
diff --git a/drivers/scsi/aacraid/comminit.c b/drivers/scsi/aacraid/comminit.c
index 0f64b0244303..28cf18955a08 100644
--- a/drivers/scsi/aacraid/comminit.c
+++ b/drivers/scsi/aacraid/comminit.c
@@ -522,8 +522,7 @@ struct aac_dev *aac_init_adapter(struct aac_dev *dev)
spin_lock_init(&dev->iq_lock);
dev->max_fib_size = sizeof(struct hw_fib);
dev->sg_tablesize = host->sg_tablesize = (dev->max_fib_size
- - sizeof(struct aac_fibhdr)
- - sizeof(struct aac_write) + sizeof(struct sgentry))
+ - sizeof(struct aac_fibhdr) - sizeof(struct aac_write))
/ sizeof(struct sgentry);
dev->comm_interface = AAC_COMM_PRODUCER;
dev->raw_io_interface = dev->raw_io_64 = 0;
diff --git a/drivers/scsi/aacraid/commsup.c b/drivers/scsi/aacraid/commsup.c
index 25cee03d7f97..47287559c768 100644
--- a/drivers/scsi/aacraid/commsup.c
+++ b/drivers/scsi/aacraid/commsup.c
@@ -2327,8 +2327,9 @@ static int aac_send_wellness_command(struct aac_dev *dev, char *wellness_str,
sg64->sg[0].addr[0] = cpu_to_le32((u32)(addr & 0xffffffff));
sg64->sg[0].count = cpu_to_le32(datasize);
- ret = aac_fib_send(ScsiPortCommand64, fibptr, sizeof(struct aac_srb),
- FsaNormal, 1, 1, NULL, NULL);
+ ret = aac_fib_send(ScsiPortCommand64, fibptr,
+ sizeof(struct aac_srb) + sizeof(struct sgentry),
+ FsaNormal, 1, 1, NULL, NULL);
dma_free_coherent(&dev->pdev->dev, datasize, dma_buf, addr);
diff --git a/drivers/scsi/aacraid/src.c b/drivers/scsi/aacraid/src.c
index 11ef58204e96..28115ed637e8 100644
--- a/drivers/scsi/aacraid/src.c
+++ b/drivers/scsi/aacraid/src.c
@@ -410,7 +410,7 @@ static void aac_src_start_adapter(struct aac_dev *dev)
lower_32_bits(dev->init_pa),
upper_32_bits(dev->init_pa),
sizeof(struct _r8) +
- (AAC_MAX_HRRQ - 1) * sizeof(struct _rrq),
+ AAC_MAX_HRRQ * sizeof(struct _rrq),
0, 0, 0, NULL, NULL, NULL, NULL, NULL);
} else {
init->r7.host_elapsed_seconds =
diff --git a/drivers/scsi/be2iscsi/be_main.c b/drivers/scsi/be2iscsi/be_main.c
index 06acb5ff609e..76a1e373386e 100644
--- a/drivers/scsi/be2iscsi/be_main.c
+++ b/drivers/scsi/be2iscsi/be_main.c
@@ -5528,7 +5528,6 @@ static int beiscsi_dev_probe(struct pci_dev *pcidev,
struct beiscsi_hba *phba = NULL;
struct be_eq_obj *pbe_eq;
unsigned int s_handle;
- char wq_name[20];
int ret, i;
ret = beiscsi_enable_pci(pcidev);
@@ -5634,9 +5633,8 @@ static int beiscsi_dev_probe(struct pci_dev *pcidev,
phba->ctrl.mcc_alloc_index = phba->ctrl.mcc_free_index = 0;
- snprintf(wq_name, sizeof(wq_name), "beiscsi_%02x_wq",
- phba->shost->host_no);
- phba->wq = alloc_workqueue("%s", WQ_MEM_RECLAIM, 1, wq_name);
+ phba->wq = alloc_workqueue("beiscsi_%02x_wq", WQ_MEM_RECLAIM, 1,
+ phba->shost->host_no);
if (!phba->wq) {
beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
"BM_%d : beiscsi_dev_probe-"
diff --git a/drivers/scsi/bfa/bfa_fcs.c b/drivers/scsi/bfa/bfa_fcs.c
index 5023c0ab4277..e52ce9b01f49 100644
--- a/drivers/scsi/bfa/bfa_fcs.c
+++ b/drivers/scsi/bfa/bfa_fcs.c
@@ -1431,7 +1431,7 @@ bfa_cb_lps_flogo_comp(void *bfad, void *uarg)
* param[in] vf_id - VF_ID
*
* return
- * If lookup succeeds, retuns fcs vf object, otherwise returns NULL
+ * If lookup succeeds, returns fcs vf object, otherwise returns NULL
*/
bfa_fcs_vf_t *
bfa_fcs_vf_lookup(struct bfa_fcs_s *fcs, u16 vf_id)
diff --git a/drivers/scsi/bfa/bfad_im.c b/drivers/scsi/bfa/bfad_im.c
index a9d3d8562d3c..66fb701401de 100644
--- a/drivers/scsi/bfa/bfad_im.c
+++ b/drivers/scsi/bfa/bfad_im.c
@@ -766,9 +766,8 @@ bfad_thread_workq(struct bfad_s *bfad)
struct bfad_im_s *im = bfad->im;
bfa_trc(bfad, 0);
- snprintf(im->drv_workq_name, KOBJ_NAME_LEN, "bfad_wq_%d",
- bfad->inst_no);
- im->drv_workq = create_singlethread_workqueue(im->drv_workq_name);
+ im->drv_workq = alloc_ordered_workqueue("bfad_wq_%d", WQ_MEM_RECLAIM,
+ bfad->inst_no);
if (!im->drv_workq)
return BFA_STATUS_FAILED;
diff --git a/drivers/scsi/bfa/bfad_im.h b/drivers/scsi/bfa/bfad_im.h
index 4353feedf76a..0884af04bd1f 100644
--- a/drivers/scsi/bfa/bfad_im.h
+++ b/drivers/scsi/bfa/bfad_im.h
@@ -134,7 +134,6 @@ struct bfad_fcp_binding {
struct bfad_im_s {
struct bfad_s *bfad;
struct workqueue_struct *drv_workq;
- char drv_workq_name[KOBJ_NAME_LEN];
struct work_struct aen_im_notify_work;
};
diff --git a/drivers/scsi/bnx2fc/bnx2fc.h b/drivers/scsi/bnx2fc/bnx2fc.h
index 7e74f77da14f..6d47a4d8eed6 100644
--- a/drivers/scsi/bnx2fc/bnx2fc.h
+++ b/drivers/scsi/bnx2fc/bnx2fc.h
@@ -358,18 +358,12 @@ struct bnx2fc_rport {
dma_addr_t lcq_dma;
u32 lcq_mem_size;
- void *ofld_req[4];
- dma_addr_t ofld_req_dma[4];
- void *enbl_req;
- dma_addr_t enbl_req_dma;
-
spinlock_t tgt_lock;
spinlock_t cq_lock;
atomic_t num_active_ios;
u32 flush_in_prog;
unsigned long timestamp;
unsigned long retry_delay_timestamp;
- struct list_head free_task_list;
struct bnx2fc_cmd *pending_queue[BNX2FC_SQ_WQES_MAX+1];
struct list_head active_cmd_queue;
struct list_head els_queue;
diff --git a/drivers/scsi/bnx2fc/bnx2fc_fcoe.c b/drivers/scsi/bnx2fc/bnx2fc_fcoe.c
index 1078c20c5ef6..f49783b89d04 100644
--- a/drivers/scsi/bnx2fc/bnx2fc_fcoe.c
+++ b/drivers/scsi/bnx2fc/bnx2fc_fcoe.c
@@ -2363,8 +2363,8 @@ static int _bnx2fc_create(struct net_device *netdev,
interface->vlan_id = vlan_id;
interface->tm_timeout = BNX2FC_TM_TIMEOUT;
- interface->timer_work_queue =
- create_singlethread_workqueue("bnx2fc_timer_wq");
+ interface->timer_work_queue = alloc_ordered_workqueue(
+ "%s", WQ_MEM_RECLAIM, "bnx2fc_timer_wq");
if (!interface->timer_work_queue) {
printk(KERN_ERR PFX "ulp_init could not create timer_wq\n");
rc = -EINVAL;
diff --git a/drivers/scsi/bnx2i/bnx2i.h b/drivers/scsi/bnx2i/bnx2i.h
index df7d04afce05..7030efee5c46 100644
--- a/drivers/scsi/bnx2i/bnx2i.h
+++ b/drivers/scsi/bnx2i/bnx2i.h
@@ -815,11 +815,6 @@ extern struct bnx2i_hba *get_adapter_list_head(void);
struct bnx2i_conn *bnx2i_get_conn_from_id(struct bnx2i_hba *hba,
u16 iscsi_cid);
-int bnx2i_alloc_ep_pool(void);
-void bnx2i_release_ep_pool(void);
-struct bnx2i_endpoint *bnx2i_ep_ofld_list_next(struct bnx2i_hba *hba);
-struct bnx2i_endpoint *bnx2i_ep_destroy_list_next(struct bnx2i_hba *hba);
-
struct bnx2i_hba *bnx2i_find_hba_for_cnic(struct cnic_dev *cnic);
struct bnx2i_hba *bnx2i_alloc_hba(struct cnic_dev *cnic);
@@ -869,12 +864,6 @@ extern int bnx2i_arm_cq_event_coalescing(struct bnx2i_endpoint *ep, u8 action);
extern int bnx2i_hw_ep_disconnect(struct bnx2i_endpoint *bnx2i_ep);
-/* Debug related function prototypes */
-extern void bnx2i_print_pend_cmd_queue(struct bnx2i_conn *conn);
-extern void bnx2i_print_active_cmd_queue(struct bnx2i_conn *conn);
-extern void bnx2i_print_xmit_pdu_queue(struct bnx2i_conn *conn);
-extern void bnx2i_print_recv_state(struct bnx2i_conn *conn);
-
extern int bnx2i_percpu_io_thread(void *arg);
extern int bnx2i_process_scsi_cmd_resp(struct iscsi_session *session,
struct bnx2i_conn *bnx2i_conn,
diff --git a/drivers/scsi/cxgbi/libcxgbi.h b/drivers/scsi/cxgbi/libcxgbi.h
index d92cf1dccc2f..0909b03e2497 100644
--- a/drivers/scsi/cxgbi/libcxgbi.h
+++ b/drivers/scsi/cxgbi/libcxgbi.h
@@ -485,7 +485,6 @@ struct cxgbi_device {
unsigned char nmtus;
unsigned char nports;
struct pci_dev *pdev;
- struct dentry *debugfs_root;
struct iscsi_transport *itp;
struct module *owner;
@@ -499,7 +498,6 @@ struct cxgbi_device {
unsigned int rxq_idx_cntr;
struct cxgbi_ports_map pmap;
- void (*dev_ddp_cleanup)(struct cxgbi_device *);
struct cxgbi_ppm* (*cdev2ppm)(struct cxgbi_device *);
int (*csk_ddp_set_map)(struct cxgbi_ppm *, struct cxgbi_sock *,
struct cxgbi_task_tag_info *);
@@ -512,7 +510,6 @@ struct cxgbi_device {
unsigned int, int);
void (*csk_release_offload_resources)(struct cxgbi_sock *);
- int (*csk_rx_pdu_ready)(struct cxgbi_sock *, struct sk_buff *);
u32 (*csk_send_rx_credits)(struct cxgbi_sock *, u32);
int (*csk_push_tx_frames)(struct cxgbi_sock *, int);
void (*csk_send_abort_req)(struct cxgbi_sock *);
diff --git a/drivers/scsi/device_handler/scsi_dh_rdac.c b/drivers/scsi/device_handler/scsi_dh_rdac.c
index f8a09e3eba58..6e1b252cea0e 100644
--- a/drivers/scsi/device_handler/scsi_dh_rdac.c
+++ b/drivers/scsi/device_handler/scsi_dh_rdac.c
@@ -822,7 +822,8 @@ static int __init rdac_init(void)
/*
* Create workqueue to handle mode selects for rdac
*/
- kmpath_rdacd = create_singlethread_workqueue("kmpath_rdacd");
+ kmpath_rdacd =
+ alloc_ordered_workqueue("%s", WQ_MEM_RECLAIM, "kmpath_rdacd");
if (!kmpath_rdacd) {
scsi_unregister_device_handler(&rdac_dh);
printk(KERN_ERR "kmpath_rdacd creation failed.\n");
diff --git a/drivers/scsi/elx/efct/efct_lio.c b/drivers/scsi/elx/efct/efct_lio.c
index 6a6ec32c46bd..9ac69356b13e 100644
--- a/drivers/scsi/elx/efct/efct_lio.c
+++ b/drivers/scsi/elx/efct/efct_lio.c
@@ -1114,7 +1114,8 @@ int efct_scsi_tgt_new_device(struct efct *efct)
atomic_set(&efct->tgt_efct.watermark_hit, 0);
atomic_set(&efct->tgt_efct.initiator_count, 0);
- lio_wq = create_singlethread_workqueue("efct_lio_worker");
+ lio_wq = alloc_ordered_workqueue("%s", WQ_MEM_RECLAIM,
+ "efct_lio_worker");
if (!lio_wq) {
efc_log_err(efct, "workqueue create failed\n");
return -EIO;
diff --git a/drivers/scsi/elx/libefc/efc_nport.c b/drivers/scsi/elx/libefc/efc_nport.c
index 2e83a667901f..1a7437f4328e 100644
--- a/drivers/scsi/elx/libefc/efc_nport.c
+++ b/drivers/scsi/elx/libefc/efc_nport.c
@@ -705,9 +705,9 @@ efc_nport_vport_del(struct efc *efc, struct efc_domain *domain,
spin_lock_irqsave(&efc->lock, flags);
list_for_each_entry(nport, &domain->nport_list, list_entry) {
if (nport->wwpn == wwpn && nport->wwnn == wwnn) {
- kref_put(&nport->ref, nport->release);
/* Shutdown this NPORT */
efc_sm_post_event(&nport->sm, EFC_EVT_SHUTDOWN, NULL);
+ kref_put(&nport->ref, nport->release);
break;
}
}
diff --git a/drivers/scsi/esas2r/esas2r.h b/drivers/scsi/esas2r/esas2r.h
index ed63f7a9ea54..8a133254c4f6 100644
--- a/drivers/scsi/esas2r/esas2r.h
+++ b/drivers/scsi/esas2r/esas2r.h
@@ -929,7 +929,6 @@ struct esas2r_adapter {
struct list_head fw_event_list;
spinlock_t fw_event_lock;
u8 fw_events_off; /* if '1', then ignore events */
- char fw_event_q_name[ESAS2R_KOBJ_NAME_LEN];
/*
* intr_mode stores the interrupt mode currently being used by this
* adapter. it is based on the interrupt_mode module parameter, but
diff --git a/drivers/scsi/esas2r/esas2r_init.c b/drivers/scsi/esas2r/esas2r_init.c
index c1a5ab662dc8..0cea5f3d1a08 100644
--- a/drivers/scsi/esas2r/esas2r_init.c
+++ b/drivers/scsi/esas2r/esas2r_init.c
@@ -311,9 +311,8 @@ int esas2r_init_adapter(struct Scsi_Host *host, struct pci_dev *pcid,
sema_init(&a->nvram_semaphore, 1);
esas2r_fw_event_off(a);
- snprintf(a->fw_event_q_name, ESAS2R_KOBJ_NAME_LEN, "esas2r/%d",
- a->index);
- a->fw_event_q = create_singlethread_workqueue(a->fw_event_q_name);
+ a->fw_event_q =
+ alloc_ordered_workqueue("esas2r/%d", WQ_MEM_RECLAIM, a->index);
init_waitqueue_head(&a->buffered_ioctl_waiter);
init_waitqueue_head(&a->nvram_waiter);
diff --git a/drivers/scsi/fcoe/fcoe_sysfs.c b/drivers/scsi/fcoe/fcoe_sysfs.c
index 7d3b904af9e8..0609ca6b9353 100644
--- a/drivers/scsi/fcoe/fcoe_sysfs.c
+++ b/drivers/scsi/fcoe/fcoe_sysfs.c
@@ -45,12 +45,8 @@ MODULE_PARM_DESC(fcf_dev_loss_tmo,
*/
#define fcoe_ctlr_id(x) \
((x)->id)
-#define fcoe_ctlr_work_q_name(x) \
- ((x)->work_q_name)
#define fcoe_ctlr_work_q(x) \
((x)->work_q)
-#define fcoe_ctlr_devloss_work_q_name(x) \
- ((x)->devloss_work_q_name)
#define fcoe_ctlr_devloss_work_q(x) \
((x)->devloss_work_q)
#define fcoe_ctlr_mode(x) \
@@ -797,18 +793,14 @@ struct fcoe_ctlr_device *fcoe_ctlr_device_add(struct device *parent,
ctlr->fcf_dev_loss_tmo = fcoe_fcf_dev_loss_tmo;
- snprintf(ctlr->work_q_name, sizeof(ctlr->work_q_name),
- "ctlr_wq_%d", ctlr->id);
- ctlr->work_q = create_singlethread_workqueue(
- ctlr->work_q_name);
+ ctlr->work_q = alloc_ordered_workqueue("ctlr_wq_%d", WQ_MEM_RECLAIM,
+ ctlr->id);
if (!ctlr->work_q)
goto out_del;
- snprintf(ctlr->devloss_work_q_name,
- sizeof(ctlr->devloss_work_q_name),
- "ctlr_dl_wq_%d", ctlr->id);
- ctlr->devloss_work_q = create_singlethread_workqueue(
- ctlr->devloss_work_q_name);
+ ctlr->devloss_work_q = alloc_ordered_workqueue("ctlr_dl_wq_%d",
+ WQ_MEM_RECLAIM,
+ ctlr->id);
if (!ctlr->devloss_work_q)
goto out_del_q;
diff --git a/drivers/scsi/fnic/fnic_main.c b/drivers/scsi/fnic/fnic_main.c
index 29eead383eb9..0044717d4486 100644
--- a/drivers/scsi/fnic/fnic_main.c
+++ b/drivers/scsi/fnic/fnic_main.c
@@ -1161,14 +1161,16 @@ static int __init fnic_init_module(void)
goto err_create_fnic_ioreq_slab;
}
- fnic_event_queue = create_singlethread_workqueue("fnic_event_wq");
+ fnic_event_queue =
+ alloc_ordered_workqueue("%s", WQ_MEM_RECLAIM, "fnic_event_wq");
if (!fnic_event_queue) {
printk(KERN_ERR PFX "fnic work queue create failed\n");
err = -ENOMEM;
goto err_create_fnic_workq;
}
- fnic_fip_queue = create_singlethread_workqueue("fnic_fip_q");
+ fnic_fip_queue =
+ alloc_ordered_workqueue("%s", WQ_MEM_RECLAIM, "fnic_fip_q");
if (!fnic_fip_queue) {
printk(KERN_ERR PFX "fnic FIP work queue create failed\n");
err = -ENOMEM;
diff --git a/drivers/scsi/hisi_sas/hisi_sas_main.c b/drivers/scsi/hisi_sas/hisi_sas_main.c
index ec1a3e7ee94d..6219807ce3b9 100644
--- a/drivers/scsi/hisi_sas/hisi_sas_main.c
+++ b/drivers/scsi/hisi_sas/hisi_sas_main.c
@@ -2302,7 +2302,8 @@ int hisi_sas_alloc(struct hisi_hba *hisi_hba)
hisi_hba->last_slot_index = 0;
- hisi_hba->wq = create_singlethread_workqueue(dev_name(dev));
+ hisi_hba->wq =
+ alloc_ordered_workqueue("%s", WQ_MEM_RECLAIM, dev_name(dev));
if (!hisi_hba->wq) {
dev_err(dev, "sas_alloc: failed to create workqueue\n");
goto err_out;
diff --git a/drivers/scsi/hisi_sas/hisi_sas_v3_hw.c b/drivers/scsi/hisi_sas/hisi_sas_v3_hw.c
index feda9b54b443..4cd3a3eab6f1 100644
--- a/drivers/scsi/hisi_sas/hisi_sas_v3_hw.c
+++ b/drivers/scsi/hisi_sas/hisi_sas_v3_hw.c
@@ -2421,7 +2421,7 @@ out:
spin_lock_irqsave(&device->done_lock, flags);
if (test_bit(SAS_HA_FROZEN, &ha->state)) {
spin_unlock_irqrestore(&device->done_lock, flags);
- dev_info(dev, "slot complete: task(%pK) ignored\n ",
+ dev_info(dev, "slot complete: task(%pK) ignored\n",
task);
return;
}
diff --git a/drivers/scsi/hosts.c b/drivers/scsi/hosts.c
index 7f987335b44c..e021f1106bea 100644
--- a/drivers/scsi/hosts.c
+++ b/drivers/scsi/hosts.c
@@ -292,11 +292,10 @@ int scsi_add_host_with_dma(struct Scsi_Host *shost, struct device *dev,
}
if (shost->transportt->create_work_queue) {
- snprintf(shost->work_q_name, sizeof(shost->work_q_name),
- "scsi_wq_%d", shost->host_no);
- shost->work_q = alloc_workqueue("%s",
- WQ_SYSFS | __WQ_LEGACY | WQ_MEM_RECLAIM | WQ_UNBOUND,
- 1, shost->work_q_name);
+ shost->work_q = alloc_workqueue(
+ "scsi_wq_%d",
+ WQ_SYSFS | __WQ_LEGACY | WQ_MEM_RECLAIM | WQ_UNBOUND, 1,
+ shost->host_no);
if (!shost->work_q) {
error = -EINVAL;
diff --git a/drivers/scsi/ibmvscsi/ibmvfc.c b/drivers/scsi/ibmvscsi/ibmvfc.c
index a3d1013c8307..e66c3ef74267 100644
--- a/drivers/scsi/ibmvscsi/ibmvfc.c
+++ b/drivers/scsi/ibmvscsi/ibmvfc.c
@@ -37,6 +37,7 @@ static unsigned int default_timeout = IBMVFC_DEFAULT_TIMEOUT;
static u64 max_lun = IBMVFC_MAX_LUN;
static unsigned int max_targets = IBMVFC_MAX_TARGETS;
static unsigned int max_requests = IBMVFC_MAX_REQUESTS_DEFAULT;
+static u16 max_sectors = IBMVFC_MAX_SECTORS;
static u16 scsi_qdepth = IBMVFC_SCSI_QDEPTH;
static unsigned int disc_threads = IBMVFC_MAX_DISC_THREADS;
static unsigned int ibmvfc_debug = IBMVFC_DEBUG;
@@ -83,6 +84,9 @@ MODULE_PARM_DESC(default_timeout,
module_param_named(max_requests, max_requests, uint, S_IRUGO);
MODULE_PARM_DESC(max_requests, "Maximum requests for this adapter. "
"[Default=" __stringify(IBMVFC_MAX_REQUESTS_DEFAULT) "]");
+module_param_named(max_sectors, max_sectors, ushort, S_IRUGO);
+MODULE_PARM_DESC(max_sectors, "Maximum sectors for this adapter. "
+ "[Default=" __stringify(IBMVFC_MAX_SECTORS) "]");
module_param_named(scsi_qdepth, scsi_qdepth, ushort, S_IRUGO);
MODULE_PARM_DESC(scsi_qdepth, "Maximum scsi command depth per adapter queue. "
"[Default=" __stringify(IBMVFC_SCSI_QDEPTH) "]");
@@ -1494,7 +1498,7 @@ static void ibmvfc_set_login_info(struct ibmvfc_host *vhost)
memset(login_info, 0, sizeof(*login_info));
login_info->ostype = cpu_to_be32(IBMVFC_OS_LINUX);
- login_info->max_dma_len = cpu_to_be64(IBMVFC_MAX_SECTORS << 9);
+ login_info->max_dma_len = cpu_to_be64(max_sectors << 9);
login_info->max_payload = cpu_to_be32(sizeof(struct ibmvfc_fcp_cmd_iu));
login_info->max_response = cpu_to_be32(sizeof(struct ibmvfc_fcp_rsp));
login_info->partition_num = cpu_to_be32(vhost->partition_number);
@@ -5230,7 +5234,7 @@ static void ibmvfc_npiv_login_done(struct ibmvfc_event *evt)
}
vhost->logged_in = 1;
- npiv_max_sectors = min((uint)(be64_to_cpu(rsp->max_dma_len) >> 9), IBMVFC_MAX_SECTORS);
+ npiv_max_sectors = min((uint)(be64_to_cpu(rsp->max_dma_len) >> 9), max_sectors);
dev_info(vhost->dev, "Host partition: %s, device: %s %s %s max sectors %u\n",
rsp->partition_name, rsp->device_name, rsp->port_loc_code,
rsp->drc_name, npiv_max_sectors);
@@ -6329,7 +6333,7 @@ static int ibmvfc_probe(struct vio_dev *vdev, const struct vio_device_id *id)
shost->can_queue = scsi_qdepth;
shost->max_lun = max_lun;
shost->max_id = max_targets;
- shost->max_sectors = IBMVFC_MAX_SECTORS;
+ shost->max_sectors = max_sectors;
shost->max_cmd_len = IBMVFC_MAX_CDB_LEN;
shost->unique_id = shost->host_no;
shost->nr_hw_queues = mq_enabled ? min(max_scsi_queues, nr_scsi_hw_queues) : 1;
@@ -6556,6 +6560,7 @@ static struct fc_function_template ibmvfc_transport_functions = {
**/
static int __init ibmvfc_module_init(void)
{
+ int min_max_sectors = PAGE_SIZE >> 9;
int rc;
if (!firmware_has_feature(FW_FEATURE_VIO))
@@ -6564,6 +6569,16 @@ static int __init ibmvfc_module_init(void)
printk(KERN_INFO IBMVFC_NAME": IBM Virtual Fibre Channel Driver version: %s %s\n",
IBMVFC_DRIVER_VERSION, IBMVFC_DRIVER_DATE);
+ /*
+ * Range check the max_sectors module parameter. The upper bounds is
+ * implicity checked since the parameter is a ushort.
+ */
+ if (max_sectors < min_max_sectors) {
+ printk(KERN_ERR IBMVFC_NAME ": max_sectors must be at least %d.\n",
+ min_max_sectors);
+ max_sectors = min_max_sectors;
+ }
+
ibmvfc_transport_template = fc_attach_transport(&ibmvfc_transport_functions);
if (!ibmvfc_transport_template)
return -ENOMEM;
diff --git a/drivers/scsi/ibmvscsi/ibmvfc.h b/drivers/scsi/ibmvscsi/ibmvfc.h
index 745ad5ac7251..c73ed2314ad0 100644
--- a/drivers/scsi/ibmvscsi/ibmvfc.h
+++ b/drivers/scsi/ibmvscsi/ibmvfc.h
@@ -32,7 +32,7 @@
#define IBMVFC_DEBUG 0
#define IBMVFC_MAX_TARGETS 1024
#define IBMVFC_MAX_LUN 0xffffffff
-#define IBMVFC_MAX_SECTORS 0xffffu
+#define IBMVFC_MAX_SECTORS 2048
#define IBMVFC_MAX_DISC_THREADS 4
#define IBMVFC_TGT_MEMPOOL_SZ 64
#define IBMVFC_MAX_CMDS_PER_LUN 64
diff --git a/drivers/scsi/ibmvscsi_tgt/ibmvscsi_tgt.c b/drivers/scsi/ibmvscsi_tgt/ibmvscsi_tgt.c
index 2fca17cf8b51..16d085d56e9d 100644
--- a/drivers/scsi/ibmvscsi_tgt/ibmvscsi_tgt.c
+++ b/drivers/scsi/ibmvscsi_tgt/ibmvscsi_tgt.c
@@ -3425,7 +3425,6 @@ static int ibmvscsis_probe(struct vio_dev *vdev,
struct scsi_info *vscsi;
int rc = 0;
long hrc = 0;
- char wq_name[24];
vscsi = kzalloc(sizeof(*vscsi), GFP_KERNEL);
if (!vscsi) {
@@ -3536,8 +3535,8 @@ static int ibmvscsis_probe(struct vio_dev *vdev,
init_completion(&vscsi->wait_idle);
init_completion(&vscsi->unconfig);
- snprintf(wq_name, 24, "ibmvscsis%s", dev_name(&vdev->dev));
- vscsi->work_q = create_workqueue(wq_name);
+ vscsi->work_q = alloc_workqueue("ibmvscsis%s", WQ_MEM_RECLAIM, 1,
+ dev_name(&vdev->dev));
if (!vscsi->work_q) {
rc = -ENOMEM;
dev_err(&vscsi->dev, "create_workqueue failed\n");
diff --git a/drivers/scsi/ipr.h b/drivers/scsi/ipr.h
index c77d6ca1a210..b2b643c6dbbe 100644
--- a/drivers/scsi/ipr.h
+++ b/drivers/scsi/ipr.h
@@ -1030,7 +1030,7 @@ struct ipr_hostrcb_fabric_desc {
#define IPR_PATH_FAILED 0x03
__be16 num_entries;
- struct ipr_hostrcb_config_element elem[1];
+ struct ipr_hostrcb_config_element elem[];
}__attribute__((packed, aligned (4)));
struct ipr_hostrcb64_fabric_desc {
@@ -1044,7 +1044,7 @@ struct ipr_hostrcb64_fabric_desc {
u8 res_path[8];
u8 reserved3[6];
__be16 num_entries;
- struct ipr_hostrcb64_config_element elem[1];
+ struct ipr_hostrcb64_config_element elem[];
}__attribute__((packed, aligned (8)));
#define for_each_hrrq(hrrq, ioa_cfg) \
diff --git a/drivers/scsi/libfc/fc_exch.c b/drivers/scsi/libfc/fc_exch.c
index 1d91c457527f..f84a7e6ae379 100644
--- a/drivers/scsi/libfc/fc_exch.c
+++ b/drivers/scsi/libfc/fc_exch.c
@@ -2693,7 +2693,8 @@ int fc_setup_exch_mgr(void)
fc_cpu_order = ilog2(roundup_pow_of_two(nr_cpu_ids));
fc_cpu_mask = (1 << fc_cpu_order) - 1;
- fc_exch_workqueue = create_singlethread_workqueue("fc_exch_workqueue");
+ fc_exch_workqueue = alloc_ordered_workqueue("%s", WQ_MEM_RECLAIM,
+ "fc_exch_workqueue");
if (!fc_exch_workqueue)
goto err;
return 0;
diff --git a/drivers/scsi/libfc/fc_rport.c b/drivers/scsi/libfc/fc_rport.c
index 33da3c1085f0..308cb4872f96 100644
--- a/drivers/scsi/libfc/fc_rport.c
+++ b/drivers/scsi/libfc/fc_rport.c
@@ -2263,7 +2263,8 @@ struct fc4_prov fc_rport_t0_prov = {
*/
int fc_setup_rport(void)
{
- rport_event_queue = create_singlethread_workqueue("fc_rport_eq");
+ rport_event_queue =
+ alloc_ordered_workqueue("%s", WQ_MEM_RECLAIM, "fc_rport_eq");
if (!rport_event_queue)
return -ENOMEM;
return 0;
diff --git a/drivers/scsi/libsas/sas_ata.c b/drivers/scsi/libsas/sas_ata.c
index 88714b7b0dba..7b4e7a61965a 100644
--- a/drivers/scsi/libsas/sas_ata.c
+++ b/drivers/scsi/libsas/sas_ata.c
@@ -564,7 +564,6 @@ static struct ata_port_operations sas_sata_ops = {
.error_handler = ata_std_error_handler,
.post_internal_cmd = sas_ata_post_internal,
.qc_defer = ata_std_qc_defer,
- .qc_prep = ata_noop_qc_prep,
.qc_issue = sas_ata_qc_issue,
.qc_fill_rtf = sas_ata_qc_fill_rtf,
.set_dmamode = sas_ata_set_dmamode,
diff --git a/drivers/scsi/libsas/sas_init.c b/drivers/scsi/libsas/sas_init.c
index 9c8cc723170d..8566bb1208a0 100644
--- a/drivers/scsi/libsas/sas_init.c
+++ b/drivers/scsi/libsas/sas_init.c
@@ -122,12 +122,12 @@ int sas_register_ha(struct sas_ha_struct *sas_ha)
error = -ENOMEM;
snprintf(name, sizeof(name), "%s_event_q", dev_name(sas_ha->dev));
- sas_ha->event_q = create_singlethread_workqueue(name);
+ sas_ha->event_q = alloc_ordered_workqueue("%s", WQ_MEM_RECLAIM, name);
if (!sas_ha->event_q)
goto Undo_ports;
snprintf(name, sizeof(name), "%s_disco_q", dev_name(sas_ha->dev));
- sas_ha->disco_q = create_singlethread_workqueue(name);
+ sas_ha->disco_q = alloc_ordered_workqueue("%s", WQ_MEM_RECLAIM, name);
if (!sas_ha->disco_q)
goto Undo_event_q;
diff --git a/drivers/scsi/lpfc/lpfc.h b/drivers/scsi/lpfc/lpfc.h
index 7c147d6ea8a8..e5a9c5a323f8 100644
--- a/drivers/scsi/lpfc/lpfc.h
+++ b/drivers/scsi/lpfc/lpfc.h
@@ -306,6 +306,14 @@ struct lpfc_stats {
struct lpfc_hba;
+/* Data structure to keep withheld FLOGI_ACC information */
+struct lpfc_defer_flogi_acc {
+ bool flag;
+ u16 rx_id;
+ u16 ox_id;
+ struct lpfc_nodelist *ndlp;
+
+};
#define LPFC_VMID_TIMER 300 /* timer interval in seconds */
@@ -1430,9 +1438,7 @@ struct lpfc_hba {
uint16_t vlan_id;
struct list_head fcf_conn_rec_list;
- bool defer_flogi_acc_flag;
- uint16_t defer_flogi_acc_rx_id;
- uint16_t defer_flogi_acc_ox_id;
+ struct lpfc_defer_flogi_acc defer_flogi_acc;
spinlock_t ct_ev_lock; /* synchronize access to ct_ev_waiters */
struct list_head ct_ev_waiters;
diff --git a/drivers/scsi/lpfc/lpfc_bsg.c b/drivers/scsi/lpfc/lpfc_bsg.c
index 4756a3f82531..85059b83ea6b 100644
--- a/drivers/scsi/lpfc/lpfc_bsg.c
+++ b/drivers/scsi/lpfc/lpfc_bsg.c
@@ -3208,6 +3208,9 @@ lpfc_bsg_diag_loopback_run(struct bsg_job *job)
cmdiocbq->num_bdes = num_bde;
cmdiocbq->cmd_flag |= LPFC_IO_LIBDFC;
cmdiocbq->cmd_flag |= LPFC_IO_LOOPBACK;
+ if (phba->cfg_vmid_app_header)
+ cmdiocbq->cmd_flag |= LPFC_IO_VMID;
+
cmdiocbq->vport = phba->pport;
cmdiocbq->cmd_cmpl = NULL;
cmdiocbq->bpl_dmabuf = txbmp;
diff --git a/drivers/scsi/lpfc/lpfc_ct.c b/drivers/scsi/lpfc/lpfc_ct.c
index 2dedd1493e5b..134bc96dd134 100644
--- a/drivers/scsi/lpfc/lpfc_ct.c
+++ b/drivers/scsi/lpfc/lpfc_ct.c
@@ -1572,8 +1572,8 @@ lpfc_cmpl_ct_cmd_gft_id(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
}
}
} else
- lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
- "3065 GFT_ID failed x%08x\n", ulp_status);
+ lpfc_vlog_msg(vport, KERN_WARNING, LOG_DISCOVERY,
+ "3065 GFT_ID status x%08x\n", ulp_status);
out:
lpfc_ct_free_iocb(phba, cmdiocb);
@@ -1647,6 +1647,18 @@ lpfc_cmpl_ct(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
}
out:
+ /* If the caller wanted a synchronous DA_ID completion, signal the
+ * wait obj and clear flag to reset the vport.
+ */
+ if (ndlp->save_flags & NLP_WAIT_FOR_DA_ID) {
+ if (ndlp->da_id_waitq)
+ wake_up(ndlp->da_id_waitq);
+ }
+
+ spin_lock_irq(&ndlp->lock);
+ ndlp->save_flags &= ~NLP_WAIT_FOR_DA_ID;
+ spin_unlock_irq(&ndlp->lock);
+
lpfc_ct_free_iocb(phba, cmdiocb);
lpfc_nlp_put(ndlp);
return;
@@ -2246,7 +2258,7 @@ lpfc_cmpl_ct_disc_fdmi(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
}
lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY,
- "0229 FDMI cmd %04x failed, latt = %d "
+ "0229 FDMI cmd %04x latt = %d "
"ulp_status: x%x, rid x%x\n",
be16_to_cpu(fdmi_cmd), latt, ulp_status,
ulp_word4);
@@ -2263,9 +2275,9 @@ lpfc_cmpl_ct_disc_fdmi(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
/* Check for a CT LS_RJT response */
cmd = be16_to_cpu(fdmi_cmd);
if (be16_to_cpu(fdmi_rsp) == SLI_CT_RESPONSE_FS_RJT) {
- /* FDMI rsp failed */
+ /* Log FDMI reject */
lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY | LOG_ELS,
- "0220 FDMI cmd failed FS_RJT Data: x%x", cmd);
+ "0220 FDMI cmd FS_RJT Data: x%x", cmd);
/* Should we fallback to FDMI-2 / FDMI-1 ? */
switch (cmd) {
diff --git a/drivers/scsi/lpfc/lpfc_disc.h b/drivers/scsi/lpfc/lpfc_disc.h
index f82615d87c4b..f5ae8cc15820 100644
--- a/drivers/scsi/lpfc/lpfc_disc.h
+++ b/drivers/scsi/lpfc/lpfc_disc.h
@@ -90,6 +90,8 @@ enum lpfc_nlp_save_flags {
NLP_IN_RECOV_POST_DEV_LOSS = 0x1,
/* wait for outstanding LOGO to cmpl */
NLP_WAIT_FOR_LOGO = 0x2,
+ /* wait for outstanding DA_ID to finish */
+ NLP_WAIT_FOR_DA_ID = 0x4
};
struct lpfc_nodelist {
@@ -159,7 +161,12 @@ struct lpfc_nodelist {
uint32_t nvme_fb_size; /* NVME target's supported byte cnt */
#define NVME_FB_BIT_SHIFT 9 /* PRLI Rsp first burst in 512B units. */
uint32_t nlp_defer_did;
+
+ /* These wait objects are NPIV specific. These IOs must complete
+ * synchronously.
+ */
wait_queue_head_t *logo_waitq;
+ wait_queue_head_t *da_id_waitq;
};
struct lpfc_node_rrq {
diff --git a/drivers/scsi/lpfc/lpfc_els.c b/drivers/scsi/lpfc/lpfc_els.c
index 929cbfc95163..d737b897ddd8 100644
--- a/drivers/scsi/lpfc/lpfc_els.c
+++ b/drivers/scsi/lpfc/lpfc_els.c
@@ -979,7 +979,7 @@ lpfc_cmpl_els_flogi(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
phba->fcoe_cvl_eventtag_attn =
phba->fcoe_cvl_eventtag;
lpfc_printf_log(phba, KERN_WARNING, LOG_FIP | LOG_ELS,
- "2611 FLOGI failed on FCF (x%x), "
+ "2611 FLOGI FCF (x%x), "
"status:x%x/x%x, tmo:x%x, perform "
"roundrobin FCF failover\n",
phba->fcf.current_rec.fcf_indx,
@@ -997,11 +997,11 @@ stop_rr_fcf_flogi:
if (!(ulp_status == IOSTAT_LOCAL_REJECT &&
((ulp_word4 & IOERR_PARAM_MASK) ==
IOERR_LOOP_OPEN_FAILURE)))
- lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
- "2858 FLOGI failure Status:x%x/x%x TMO"
- ":x%x Data x%lx x%x\n",
- ulp_status, ulp_word4, tmo,
- phba->hba_flag, phba->fcf.fcf_flag);
+ lpfc_vlog_msg(vport, KERN_WARNING, LOG_ELS,
+ "2858 FLOGI Status:x%x/x%x TMO"
+ ":x%x Data x%lx x%x\n",
+ ulp_status, ulp_word4, tmo,
+ phba->hba_flag, phba->fcf.fcf_flag);
/* Check for retry */
if (lpfc_els_retry(phba, cmdiocb, rspiocb)) {
@@ -1023,7 +1023,7 @@ stop_rr_fcf_flogi:
lpfc_nlp_put(ndlp);
lpfc_printf_vlog(vport, KERN_WARNING, LOG_ELS,
- "0150 FLOGI failure Status:x%x/x%x "
+ "0150 FLOGI Status:x%x/x%x "
"xri x%x TMO:x%x refcnt %d\n",
ulp_status, ulp_word4, cmdiocb->sli4_xritag,
tmo, kref_read(&ndlp->kref));
@@ -1032,11 +1032,11 @@ stop_rr_fcf_flogi:
if (!(ulp_status == IOSTAT_LOCAL_REJECT &&
((ulp_word4 & IOERR_PARAM_MASK) ==
IOERR_LOOP_OPEN_FAILURE))) {
- /* FLOGI failure */
- lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
- "0100 FLOGI failure Status:x%x/x%x "
- "TMO:x%x\n",
- ulp_status, ulp_word4, tmo);
+ /* Warn FLOGI status */
+ lpfc_vlog_msg(vport, KERN_WARNING, LOG_ELS,
+ "0100 FLOGI Status:x%x/x%x "
+ "TMO:x%x\n",
+ ulp_status, ulp_word4, tmo);
goto flogifail;
}
@@ -1099,8 +1099,10 @@ stop_rr_fcf_flogi:
sp->cmn.priority_tagging, kref_read(&ndlp->kref));
/* reinitialize the VMID datastructure before returning */
- if (lpfc_is_vmid_enabled(phba))
+ if (lpfc_is_vmid_enabled(phba)) {
lpfc_reinit_vmid(vport);
+ vport->vmid_flag = 0;
+ }
if (sp->cmn.priority_tagging)
vport->phba->pport->vmid_flag |= (LPFC_VMID_ISSUE_QFPA |
LPFC_VMID_TYPE_PRIO);
@@ -1390,7 +1392,7 @@ lpfc_issue_els_flogi(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
phba->link_flag &= ~LS_EXTERNAL_LOOPBACK;
/* Check for a deferred FLOGI ACC condition */
- if (phba->defer_flogi_acc_flag) {
+ if (phba->defer_flogi_acc.flag) {
/* lookup ndlp for received FLOGI */
ndlp = lpfc_findnode_did(vport, 0);
if (!ndlp)
@@ -1404,34 +1406,38 @@ lpfc_issue_els_flogi(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
if (phba->sli_rev == LPFC_SLI_REV4) {
bf_set(wqe_ctxt_tag,
&defer_flogi_acc.wqe.xmit_els_rsp.wqe_com,
- phba->defer_flogi_acc_rx_id);
+ phba->defer_flogi_acc.rx_id);
bf_set(wqe_rcvoxid,
&defer_flogi_acc.wqe.xmit_els_rsp.wqe_com,
- phba->defer_flogi_acc_ox_id);
+ phba->defer_flogi_acc.ox_id);
} else {
icmd = &defer_flogi_acc.iocb;
- icmd->ulpContext = phba->defer_flogi_acc_rx_id;
+ icmd->ulpContext = phba->defer_flogi_acc.rx_id;
icmd->unsli3.rcvsli3.ox_id =
- phba->defer_flogi_acc_ox_id;
+ phba->defer_flogi_acc.ox_id;
}
lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
"3354 Xmit deferred FLOGI ACC: rx_id: x%x,"
" ox_id: x%x, hba_flag x%lx\n",
- phba->defer_flogi_acc_rx_id,
- phba->defer_flogi_acc_ox_id, phba->hba_flag);
+ phba->defer_flogi_acc.rx_id,
+ phba->defer_flogi_acc.ox_id, phba->hba_flag);
/* Send deferred FLOGI ACC */
lpfc_els_rsp_acc(vport, ELS_CMD_FLOGI, &defer_flogi_acc,
ndlp, NULL);
- phba->defer_flogi_acc_flag = false;
- vport->fc_myDID = did;
+ phba->defer_flogi_acc.flag = false;
- /* Decrement ndlp reference count to indicate the node can be
- * released when other references are removed.
+ /* Decrement the held ndlp that was incremented when the
+ * deferred flogi acc flag was set.
*/
- lpfc_nlp_put(ndlp);
+ if (phba->defer_flogi_acc.ndlp) {
+ lpfc_nlp_put(phba->defer_flogi_acc.ndlp);
+ phba->defer_flogi_acc.ndlp = NULL;
+ }
+
+ vport->fc_myDID = did;
}
return 0;
@@ -1958,16 +1964,16 @@ lpfc_cmpl_els_rrq(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
if (ulp_status) {
/* Check for retry */
- /* RRQ failed Don't print the vport to vport rjts */
+ /* Warn RRQ status Don't print the vport to vport rjts */
if (ulp_status != IOSTAT_LS_RJT ||
(((ulp_word4) >> 16 != LSRJT_INVALID_CMD) &&
((ulp_word4) >> 16 != LSRJT_UNABLE_TPC)) ||
(phba)->pport->cfg_log_verbose & LOG_ELS)
- lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
- "2881 RRQ failure DID:%06X Status:"
- "x%x/x%x\n",
- ndlp->nlp_DID, ulp_status,
- ulp_word4);
+ lpfc_vlog_msg(vport, KERN_WARNING, LOG_ELS,
+ "2881 RRQ DID:%06X Status:"
+ "x%x/x%x\n",
+ ndlp->nlp_DID, ulp_status,
+ ulp_word4);
}
lpfc_clr_rrq_active(phba, rrq->xritag, rrq);
@@ -2071,16 +2077,16 @@ lpfc_cmpl_els_plogi(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
}
goto out;
}
- /* PLOGI failed Don't print the vport to vport rjts */
+ /* Warn PLOGI status Don't print the vport to vport rjts */
if (ulp_status != IOSTAT_LS_RJT ||
(((ulp_word4) >> 16 != LSRJT_INVALID_CMD) &&
((ulp_word4) >> 16 != LSRJT_UNABLE_TPC)) ||
(phba)->pport->cfg_log_verbose & LOG_ELS)
- lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
- "2753 PLOGI failure DID:%06X "
- "Status:x%x/x%x\n",
- ndlp->nlp_DID, ulp_status,
- ulp_word4);
+ lpfc_vlog_msg(vport, KERN_WARNING, LOG_ELS,
+ "2753 PLOGI DID:%06X "
+ "Status:x%x/x%x\n",
+ ndlp->nlp_DID, ulp_status,
+ ulp_word4);
/* Do not call DSM for lpfc_els_abort'ed ELS cmds */
if (!lpfc_error_lost_link(vport, ulp_status, ulp_word4))
@@ -2317,7 +2323,6 @@ lpfc_cmpl_els_prli(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
struct lpfc_vport *vport = cmdiocb->vport;
struct lpfc_nodelist *ndlp;
char *mode;
- u32 loglevel;
u32 ulp_status;
u32 ulp_word4;
bool release_node = false;
@@ -2366,17 +2371,14 @@ lpfc_cmpl_els_prli(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
* could be expected.
*/
if (test_bit(FC_FABRIC, &vport->fc_flag) ||
- vport->cfg_enable_fc4_type != LPFC_ENABLE_BOTH) {
- mode = KERN_ERR;
- loglevel = LOG_TRACE_EVENT;
- } else {
+ vport->cfg_enable_fc4_type != LPFC_ENABLE_BOTH)
+ mode = KERN_WARNING;
+ else
mode = KERN_INFO;
- loglevel = LOG_ELS;
- }
- /* PRLI failed */
- lpfc_printf_vlog(vport, mode, loglevel,
- "2754 PRLI failure DID:%06X Status:x%x/x%x, "
+ /* Warn PRLI status */
+ lpfc_printf_vlog(vport, mode, LOG_ELS,
+ "2754 PRLI DID:%06X Status:x%x/x%x, "
"data: x%x x%x x%x\n",
ndlp->nlp_DID, ulp_status,
ulp_word4, ndlp->nlp_state,
@@ -2848,11 +2850,11 @@ lpfc_cmpl_els_adisc(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
}
goto out;
}
- /* ADISC failed */
- lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
- "2755 ADISC failure DID:%06X Status:x%x/x%x\n",
- ndlp->nlp_DID, ulp_status,
- ulp_word4);
+ /* Warn ADISC status */
+ lpfc_vlog_msg(vport, KERN_WARNING, LOG_ELS,
+ "2755 ADISC DID:%06X Status:x%x/x%x\n",
+ ndlp->nlp_DID, ulp_status,
+ ulp_word4);
lpfc_disc_state_machine(vport, ndlp, cmdiocb,
NLP_EVT_CMPL_ADISC);
@@ -3039,12 +3041,12 @@ lpfc_cmpl_els_logo(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
* discovery. The PLOGI will retry.
*/
if (ulp_status) {
- /* LOGO failed */
- lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
- "2756 LOGO failure, No Retry DID:%06X "
- "Status:x%x/x%x\n",
- ndlp->nlp_DID, ulp_status,
- ulp_word4);
+ /* Warn LOGO status */
+ lpfc_vlog_msg(vport, KERN_WARNING, LOG_ELS,
+ "2756 LOGO, No Retry DID:%06X "
+ "Status:x%x/x%x\n",
+ ndlp->nlp_DID, ulp_status,
+ ulp_word4);
if (lpfc_error_lost_link(vport, ulp_status, ulp_word4))
skip_recovery = 1;
@@ -4831,11 +4833,10 @@ lpfc_els_retry(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
if ((phba->sli3_options & LPFC_SLI3_NPIV_ENABLED) &&
(cmd == ELS_CMD_FDISC) &&
(stat.un.b.lsRjtRsnCodeExp == LSEXP_OUT_OF_RESOURCE)){
- lpfc_printf_vlog(vport, KERN_ERR,
- LOG_TRACE_EVENT,
- "0125 FDISC Failed (x%x). "
- "Fabric out of resources\n",
- stat.un.lsRjtError);
+ lpfc_vlog_msg(vport, KERN_WARNING, LOG_ELS,
+ "0125 FDISC (x%x). "
+ "Fabric out of resources\n",
+ stat.un.lsRjtError);
lpfc_vport_set_state(vport,
FC_VPORT_NO_FABRIC_RSCS);
}
@@ -4871,11 +4872,10 @@ lpfc_els_retry(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
LSEXP_NOTHING_MORE) {
vport->fc_sparam.cmn.bbRcvSizeMsb &= 0xf;
retry = 1;
- lpfc_printf_vlog(vport, KERN_ERR,
- LOG_TRACE_EVENT,
- "0820 FLOGI Failed (x%x). "
- "BBCredit Not Supported\n",
- stat.un.lsRjtError);
+ lpfc_vlog_msg(vport, KERN_WARNING, LOG_ELS,
+ "0820 FLOGI (x%x). "
+ "BBCredit Not Supported\n",
+ stat.un.lsRjtError);
}
break;
@@ -4885,11 +4885,10 @@ lpfc_els_retry(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
((stat.un.b.lsRjtRsnCodeExp == LSEXP_INVALID_PNAME) ||
(stat.un.b.lsRjtRsnCodeExp == LSEXP_INVALID_NPORT_ID))
) {
- lpfc_printf_vlog(vport, KERN_ERR,
- LOG_TRACE_EVENT,
- "0122 FDISC Failed (x%x). "
- "Fabric Detected Bad WWN\n",
- stat.un.lsRjtError);
+ lpfc_vlog_msg(vport, KERN_WARNING, LOG_ELS,
+ "0122 FDISC (x%x). "
+ "Fabric Detected Bad WWN\n",
+ stat.un.lsRjtError);
lpfc_vport_set_state(vport,
FC_VPORT_FABRIC_REJ_WWN);
}
@@ -5240,9 +5239,10 @@ lpfc_cmpl_els_logo_acc(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
/* ACC to LOGO completes to NPort <nlp_DID> */
lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
"0109 ACC to LOGO completes to NPort x%x refcnt %d "
- "Data: x%x x%x x%x\n",
- ndlp->nlp_DID, kref_read(&ndlp->kref), ndlp->nlp_flag,
- ndlp->nlp_state, ndlp->nlp_rpi);
+ "last els x%x Data: x%x x%x x%x\n",
+ ndlp->nlp_DID, kref_read(&ndlp->kref),
+ ndlp->nlp_last_elscmd, ndlp->nlp_flag, ndlp->nlp_state,
+ ndlp->nlp_rpi);
/* This clause allows the LOGO ACC to complete and free resources
* for the Fabric Domain Controller. It does deliberately skip
@@ -5254,18 +5254,22 @@ lpfc_cmpl_els_logo_acc(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
goto out;
if (ndlp->nlp_state == NLP_STE_NPR_NODE) {
- /* If PLOGI is being retried, PLOGI completion will cleanup the
- * node. The NLP_NPR_2B_DISC flag needs to be retained to make
- * progress on nodes discovered from last RSCN.
- */
- if ((ndlp->nlp_flag & NLP_DELAY_TMO) &&
- (ndlp->nlp_last_elscmd == ELS_CMD_PLOGI))
- goto out;
-
if (ndlp->nlp_flag & NLP_RPI_REGISTERED)
lpfc_unreg_rpi(vport, ndlp);
+ /* If came from PRLO, then PRLO_ACC is done.
+ * Start rediscovery now.
+ */
+ if (ndlp->nlp_last_elscmd == ELS_CMD_PRLO) {
+ spin_lock_irq(&ndlp->lock);
+ ndlp->nlp_flag |= NLP_NPR_2B_DISC;
+ spin_unlock_irq(&ndlp->lock);
+ ndlp->nlp_prev_state = ndlp->nlp_state;
+ lpfc_nlp_set_state(vport, ndlp, NLP_STE_PLOGI_ISSUE);
+ lpfc_issue_els_plogi(vport, ndlp->nlp_DID, 0);
+ }
}
+
out:
/*
* The driver received a LOGO from the rport and has ACK'd it.
@@ -5344,8 +5348,8 @@ lpfc_cmpl_els_rsp(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
u32 ulp_status, ulp_word4, tmo, did, iotag;
if (!vport) {
- lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
- "3177 ELS response failed\n");
+ lpfc_printf_log(phba, KERN_WARNING, LOG_ELS,
+ "3177 null vport in ELS rsp\n");
goto out;
}
if (cmdiocb->context_un.mbox)
@@ -8454,9 +8458,9 @@ lpfc_els_rcv_flogi(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb,
/* Defer ACC response until AFTER we issue a FLOGI */
if (!test_bit(HBA_FLOGI_ISSUED, &phba->hba_flag)) {
- phba->defer_flogi_acc_rx_id = bf_get(wqe_ctxt_tag,
+ phba->defer_flogi_acc.rx_id = bf_get(wqe_ctxt_tag,
&wqe->xmit_els_rsp.wqe_com);
- phba->defer_flogi_acc_ox_id = bf_get(wqe_rcvoxid,
+ phba->defer_flogi_acc.ox_id = bf_get(wqe_rcvoxid,
&wqe->xmit_els_rsp.wqe_com);
vport->fc_myDID = did;
@@ -8464,11 +8468,17 @@ lpfc_els_rcv_flogi(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb,
lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
"3344 Deferring FLOGI ACC: rx_id: x%x,"
" ox_id: x%x, hba_flag x%lx\n",
- phba->defer_flogi_acc_rx_id,
- phba->defer_flogi_acc_ox_id, phba->hba_flag);
+ phba->defer_flogi_acc.rx_id,
+ phba->defer_flogi_acc.ox_id, phba->hba_flag);
- phba->defer_flogi_acc_flag = true;
+ phba->defer_flogi_acc.flag = true;
+ /* This nlp_get is paired with nlp_puts that reset the
+ * defer_flogi_acc.flag back to false. We need to retain
+ * a kref on the ndlp until the deferred FLOGI ACC is
+ * processed or cancelled.
+ */
+ phba->defer_flogi_acc.ndlp = lpfc_nlp_get(ndlp);
return 0;
}
@@ -9641,11 +9651,12 @@ lpfc_els_flush_cmd(struct lpfc_vport *vport)
if (piocb->cmd_flag & LPFC_DRIVER_ABORTED && !mbx_tmo_err)
continue;
- /* On the ELS ring we can have ELS_REQUESTs or
- * GEN_REQUESTs waiting for a response.
+ /* On the ELS ring we can have ELS_REQUESTs, ELS_RSPs,
+ * or GEN_REQUESTs waiting for a CQE response.
*/
ulp_command = get_job_cmnd(phba, piocb);
- if (ulp_command == CMD_ELS_REQUEST64_CR) {
+ if (ulp_command == CMD_ELS_REQUEST64_WQE ||
+ ulp_command == CMD_XMIT_ELS_RSP64_WQE) {
list_add_tail(&piocb->dlist, &abort_list);
/* If the link is down when flushing ELS commands
@@ -10504,7 +10515,7 @@ lpfc_els_unsol_buffer(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
lpfc_els_rcv_flogi(vport, elsiocb, ndlp);
/* retain node if our response is deferred */
- if (phba->defer_flogi_acc_flag)
+ if (phba->defer_flogi_acc.flag)
break;
if (newnode)
lpfc_disc_state_machine(vport, ndlp, NULL,
@@ -10742,7 +10753,7 @@ lpfc_els_unsol_buffer(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
rjt_exp = LSEXP_NOTHING_MORE;
/* Unknown ELS command <elsCmd> received from NPORT <did> */
- lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
+ lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS,
"0115 Unknown ELS command x%x "
"received from NPORT x%x\n", cmd, did);
if (newnode)
@@ -11310,10 +11321,10 @@ lpfc_cmpl_els_fdisc(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
/* Check for retry */
if (lpfc_els_retry(phba, cmdiocb, rspiocb))
goto out;
- /* FDISC failed */
- lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
- "0126 FDISC failed. (x%x/x%x)\n",
- ulp_status, ulp_word4);
+ /* Warn FDISC status */
+ lpfc_vlog_msg(vport, KERN_WARNING, LOG_ELS,
+ "0126 FDISC cmpl status: x%x/x%x)\n",
+ ulp_status, ulp_word4);
goto fdisc_failed;
}
diff --git a/drivers/scsi/lpfc/lpfc_hbadisc.c b/drivers/scsi/lpfc/lpfc_hbadisc.c
index 6943f6c6395c..9241075f72fa 100644
--- a/drivers/scsi/lpfc/lpfc_hbadisc.c
+++ b/drivers/scsi/lpfc/lpfc_hbadisc.c
@@ -175,7 +175,8 @@ lpfc_dev_loss_tmo_callbk(struct fc_rport *rport)
ndlp->nlp_state, ndlp->fc4_xpt_flags);
/* Don't schedule a worker thread event if the vport is going down. */
- if (test_bit(FC_UNLOADING, &vport->load_flag)) {
+ if (test_bit(FC_UNLOADING, &vport->load_flag) ||
+ !test_bit(HBA_SETUP, &phba->hba_flag)) {
spin_lock_irqsave(&ndlp->lock, iflags);
ndlp->rport = NULL;
@@ -526,6 +527,9 @@ lpfc_dev_loss_tmo_handler(struct lpfc_nodelist *ndlp)
* the following lpfc_nlp_put is necessary after fabric node is
* recovered.
*/
+ spin_lock_irqsave(&ndlp->lock, iflags);
+ ndlp->nlp_flag &= ~NLP_IN_DEV_LOSS;
+ spin_unlock_irqrestore(&ndlp->lock, iflags);
if (recovering) {
lpfc_printf_vlog(vport, KERN_INFO,
LOG_DISCOVERY | LOG_NODE,
@@ -538,6 +542,7 @@ lpfc_dev_loss_tmo_handler(struct lpfc_nodelist *ndlp)
spin_lock_irqsave(&ndlp->lock, iflags);
ndlp->save_flags |= NLP_IN_RECOV_POST_DEV_LOSS;
spin_unlock_irqrestore(&ndlp->lock, iflags);
+ return fcf_inuse;
} else if (ndlp->nlp_state == NLP_STE_UNMAPPED_NODE) {
/* Fabric node fully recovered before this dev_loss_tmo
* queue work is processed. Thus, ignore the
@@ -551,15 +556,9 @@ lpfc_dev_loss_tmo_handler(struct lpfc_nodelist *ndlp)
ndlp->nlp_DID, kref_read(&ndlp->kref),
ndlp, ndlp->nlp_flag,
vport->port_state);
- spin_lock_irqsave(&ndlp->lock, iflags);
- ndlp->nlp_flag &= ~NLP_IN_DEV_LOSS;
- spin_unlock_irqrestore(&ndlp->lock, iflags);
return fcf_inuse;
}
- spin_lock_irqsave(&ndlp->lock, iflags);
- ndlp->nlp_flag &= ~NLP_IN_DEV_LOSS;
- spin_unlock_irqrestore(&ndlp->lock, iflags);
lpfc_nlp_put(ndlp);
return fcf_inuse;
}
@@ -1254,7 +1253,14 @@ lpfc_linkdown(struct lpfc_hba *phba)
lpfc_scsi_dev_block(phba);
offline = pci_channel_offline(phba->pcidev);
- phba->defer_flogi_acc_flag = false;
+ /* Decrement the held ndlp if there is a deferred flogi acc */
+ if (phba->defer_flogi_acc.flag) {
+ if (phba->defer_flogi_acc.ndlp) {
+ lpfc_nlp_put(phba->defer_flogi_acc.ndlp);
+ phba->defer_flogi_acc.ndlp = NULL;
+ }
+ }
+ phba->defer_flogi_acc.flag = false;
/* Clear external loopback plug detected flag */
phba->link_flag &= ~LS_EXTERNAL_LOOPBACK;
@@ -1376,7 +1382,7 @@ lpfc_linkup_port(struct lpfc_vport *vport)
(vport != phba->pport))
return;
- if (phba->defer_flogi_acc_flag) {
+ if (phba->defer_flogi_acc.flag) {
clear_bit(FC_ABORT_DISCOVERY, &vport->fc_flag);
clear_bit(FC_RSCN_MODE, &vport->fc_flag);
clear_bit(FC_NLP_MORE, &vport->fc_flag);
diff --git a/drivers/scsi/lpfc/lpfc_hw.h b/drivers/scsi/lpfc/lpfc_hw.h
index 2108b4cb7815..d5c15742f7f2 100644
--- a/drivers/scsi/lpfc/lpfc_hw.h
+++ b/drivers/scsi/lpfc/lpfc_hw.h
@@ -562,6 +562,27 @@ struct fc_vft_header {
#include <uapi/scsi/fc/fc_els.h>
/*
+ * Application Header
+ */
+struct fc_app_header {
+ uint32_t dst_app_id;
+ uint32_t src_app_id;
+#define LOOPBACK_SRC_APPID 0x4321
+ uint32_t word2;
+ uint32_t word3;
+};
+
+/*
+ * dfctl optional header definition
+ */
+enum lpfc_fc_dfctl {
+ LPFC_FC_NO_DEVICE_HEADER,
+ LPFC_FC_16B_DEVICE_HEADER,
+ LPFC_FC_32B_DEVICE_HEADER,
+ LPFC_FC_64B_DEVICE_HEADER,
+};
+
+/*
* Extended Link Service LS_COMMAND codes (Payload Word 0)
*/
#ifdef __BIG_ENDIAN_BITFIELD
diff --git a/drivers/scsi/lpfc/lpfc_hw4.h b/drivers/scsi/lpfc/lpfc_hw4.h
index 500253007b1d..26e1313ebb21 100644
--- a/drivers/scsi/lpfc/lpfc_hw4.h
+++ b/drivers/scsi/lpfc/lpfc_hw4.h
@@ -4847,6 +4847,7 @@ struct fcp_iwrite64_wqe {
#define cmd_buff_len_SHIFT 16
#define cmd_buff_len_MASK 0x00000ffff
#define cmd_buff_len_WORD word3
+/* Note: payload_offset_len field depends on ASIC support */
#define payload_offset_len_SHIFT 0
#define payload_offset_len_MASK 0x0000ffff
#define payload_offset_len_WORD word3
@@ -4863,6 +4864,7 @@ struct fcp_iread64_wqe {
#define cmd_buff_len_SHIFT 16
#define cmd_buff_len_MASK 0x00000ffff
#define cmd_buff_len_WORD word3
+/* Note: payload_offset_len field depends on ASIC support */
#define payload_offset_len_SHIFT 0
#define payload_offset_len_MASK 0x0000ffff
#define payload_offset_len_WORD word3
@@ -4879,6 +4881,7 @@ struct fcp_icmnd64_wqe {
#define cmd_buff_len_SHIFT 16
#define cmd_buff_len_MASK 0x00000ffff
#define cmd_buff_len_WORD word3
+/* Note: payload_offset_len field depends on ASIC support */
#define payload_offset_len_SHIFT 0
#define payload_offset_len_MASK 0x0000ffff
#define payload_offset_len_WORD word3
diff --git a/drivers/scsi/lpfc/lpfc_init.c b/drivers/scsi/lpfc/lpfc_init.c
index e1dfa96c2a55..0dd451009b07 100644
--- a/drivers/scsi/lpfc/lpfc_init.c
+++ b/drivers/scsi/lpfc/lpfc_init.c
@@ -4699,6 +4699,7 @@ lpfc_create_port(struct lpfc_hba *phba, int instance, struct device *dev)
uint64_t wwn;
bool use_no_reset_hba = false;
int rc;
+ u8 if_type;
if (lpfc_no_hba_reset_cnt) {
if (phba->sli_rev < LPFC_SLI_REV4 &&
@@ -4773,10 +4774,24 @@ lpfc_create_port(struct lpfc_hba *phba, int instance, struct device *dev)
shost->max_id = LPFC_MAX_TARGET;
shost->max_lun = vport->cfg_max_luns;
shost->this_id = -1;
- if (phba->sli_rev == LPFC_SLI_REV4)
- shost->max_cmd_len = LPFC_FCP_CDB_LEN_32;
- else
+
+ /* Set max_cmd_len applicable to ASIC support */
+ if (phba->sli_rev == LPFC_SLI_REV4) {
+ if_type = bf_get(lpfc_sli_intf_if_type,
+ &phba->sli4_hba.sli_intf);
+ switch (if_type) {
+ case LPFC_SLI_INTF_IF_TYPE_2:
+ fallthrough;
+ case LPFC_SLI_INTF_IF_TYPE_6:
+ shost->max_cmd_len = LPFC_FCP_CDB_LEN_32;
+ break;
+ default:
+ shost->max_cmd_len = LPFC_FCP_CDB_LEN;
+ break;
+ }
+ } else {
shost->max_cmd_len = LPFC_FCP_CDB_LEN;
+ }
if (phba->sli_rev == LPFC_SLI_REV4) {
if (!phba->cfg_fcp_mq_threshold ||
@@ -10436,6 +10451,7 @@ lpfc_sli4_queue_create(struct lpfc_hba *phba)
struct lpfc_vector_map_info *cpup;
struct lpfc_vector_map_info *eqcpup;
struct lpfc_eq_intr_info *eqi;
+ u32 wqesize;
/*
* Create HBA Record arrays.
@@ -10655,9 +10671,15 @@ lpfc_sli4_queue_create(struct lpfc_hba *phba)
* Create ELS Work Queues
*/
- /* Create slow-path ELS Work Queue */
+ /*
+ * Create slow-path ELS Work Queue.
+ * Increase the ELS WQ size when WQEs contain an embedded cdb
+ */
+ wqesize = (phba->fcp_embed_io) ?
+ LPFC_WQE128_SIZE : phba->sli4_hba.wq_esize;
+
qdesc = lpfc_sli4_queue_alloc(phba, LPFC_DEFAULT_PAGE_SIZE,
- phba->sli4_hba.wq_esize,
+ wqesize,
phba->sli4_hba.wq_ecount, cpu);
if (!qdesc) {
lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
@@ -13861,12 +13883,7 @@ fcponly:
if (sli4_params->sge_supp_len > LPFC_MAX_SGE_SIZE)
sli4_params->sge_supp_len = LPFC_MAX_SGE_SIZE;
- rc = dma_set_max_seg_size(&phba->pcidev->dev, sli4_params->sge_supp_len);
- if (unlikely(rc)) {
- lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
- "6400 Can't set dma maximum segment size\n");
- return rc;
- }
+ dma_set_max_seg_size(&phba->pcidev->dev, sli4_params->sge_supp_len);
/*
* Check whether the adapter supports an embedded copy of the
diff --git a/drivers/scsi/lpfc/lpfc_nportdisc.c b/drivers/scsi/lpfc/lpfc_nportdisc.c
index f6a53446e57f..4574716c8764 100644
--- a/drivers/scsi/lpfc/lpfc_nportdisc.c
+++ b/drivers/scsi/lpfc/lpfc_nportdisc.c
@@ -2652,8 +2652,26 @@ lpfc_rcv_prlo_mapped_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
/* flush the target */
lpfc_sli_abort_iocb(vport, ndlp->nlp_sid, 0, LPFC_CTX_TGT);
- /* Treat like rcv logo */
- lpfc_rcv_logo(vport, ndlp, cmdiocb, ELS_CMD_PRLO);
+ /* Send PRLO_ACC */
+ spin_lock_irq(&ndlp->lock);
+ ndlp->nlp_flag |= NLP_LOGO_ACC;
+ spin_unlock_irq(&ndlp->lock);
+ lpfc_els_rsp_acc(vport, ELS_CMD_PRLO, cmdiocb, ndlp, NULL);
+
+ /* Save ELS_CMD_PRLO as the last elscmd and then set to NPR.
+ * lpfc_cmpl_els_logo_acc is expected to restart discovery.
+ */
+ ndlp->nlp_last_elscmd = ELS_CMD_PRLO;
+ ndlp->nlp_prev_state = ndlp->nlp_state;
+
+ lpfc_printf_vlog(vport, KERN_INFO, LOG_NODE | LOG_ELS | LOG_DISCOVERY,
+ "3422 DID x%06x nflag x%x lastels x%x ref cnt %u\n",
+ ndlp->nlp_DID, ndlp->nlp_flag,
+ ndlp->nlp_last_elscmd,
+ kref_read(&ndlp->kref));
+
+ lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE);
+
return ndlp->nlp_state;
}
diff --git a/drivers/scsi/lpfc/lpfc_scsi.c b/drivers/scsi/lpfc/lpfc_scsi.c
index 98ce9d97a225..0eaede8275da 100644
--- a/drivers/scsi/lpfc/lpfc_scsi.c
+++ b/drivers/scsi/lpfc/lpfc_scsi.c
@@ -4760,7 +4760,7 @@ static int lpfc_scsi_prep_cmnd_buf_s4(struct lpfc_vport *vport,
/* Word 3 */
bf_set(payload_offset_len, &wqe->fcp_icmd,
- sizeof(struct fcp_cmnd32) + sizeof(struct fcp_rsp));
+ sizeof(struct fcp_cmnd) + sizeof(struct fcp_rsp));
/* Word 6 */
bf_set(wqe_ctxt_tag, &wqe->generic.wqe_com,
@@ -5555,11 +5555,20 @@ lpfc_abort_handler(struct scsi_cmnd *cmnd)
iocb = &lpfc_cmd->cur_iocbq;
if (phba->sli_rev == LPFC_SLI_REV4) {
- pring_s4 = phba->sli4_hba.hdwq[iocb->hba_wqidx].io_wq->pring;
- if (!pring_s4) {
+ /* if the io_wq & pring are gone, the port was reset. */
+ if (!phba->sli4_hba.hdwq[iocb->hba_wqidx].io_wq ||
+ !phba->sli4_hba.hdwq[iocb->hba_wqidx].io_wq->pring) {
+ lpfc_printf_vlog(vport, KERN_WARNING, LOG_FCP,
+ "2877 SCSI Layer I/O Abort Request "
+ "IO CMPL Status x%x ID %d LUN %llu "
+ "HBA_SETUP %d\n", FAILED,
+ cmnd->device->id,
+ (u64)cmnd->device->lun,
+ test_bit(HBA_SETUP, &phba->hba_flag));
ret = FAILED;
goto out_unlock_hba;
}
+ pring_s4 = phba->sli4_hba.hdwq[iocb->hba_wqidx].io_wq->pring;
spin_lock(&pring_s4->ring_lock);
}
/* the command is in process of being cancelled */
diff --git a/drivers/scsi/lpfc/lpfc_sli.c b/drivers/scsi/lpfc/lpfc_sli.c
index 88debef2fb6d..2ec6e55771b4 100644
--- a/drivers/scsi/lpfc/lpfc_sli.c
+++ b/drivers/scsi/lpfc/lpfc_sli.c
@@ -1940,12 +1940,15 @@ lpfc_issue_cmf_sync_wqe(struct lpfc_hba *phba, u32 ms, u64 total)
atot = atomic_xchg(&phba->cgn_sync_alarm_cnt, 0);
wtot = atomic_xchg(&phba->cgn_sync_warn_cnt, 0);
+ spin_lock_irqsave(&phba->hbalock, iflags);
+
/* ONLY Managed mode will send the CMF_SYNC_WQE to the HBA */
if (phba->cmf_active_mode != LPFC_CFG_MANAGED ||
- phba->link_state == LPFC_LINK_DOWN)
- return 0;
+ phba->link_state < LPFC_LINK_UP) {
+ ret_val = 0;
+ goto out_unlock;
+ }
- spin_lock_irqsave(&phba->hbalock, iflags);
sync_buf = __lpfc_sli_get_iocbq(phba);
if (!sync_buf) {
lpfc_printf_log(phba, KERN_ERR, LOG_CGN_MGMT,
@@ -4687,6 +4690,17 @@ lpfc_sli_flush_io_rings(struct lpfc_hba *phba)
/* Look on all the FCP Rings for the iotag */
if (phba->sli_rev >= LPFC_SLI_REV4) {
for (i = 0; i < phba->cfg_hdw_queue; i++) {
+ if (!phba->sli4_hba.hdwq ||
+ !phba->sli4_hba.hdwq[i].io_wq) {
+ lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
+ "7777 hdwq's deleted %lx "
+ "%lx %x %x\n",
+ phba->pport->load_flag,
+ phba->hba_flag,
+ phba->link_state,
+ phba->sli.sli_flag);
+ return;
+ }
pring = phba->sli4_hba.hdwq[i].io_wq->pring;
spin_lock_irq(&pring->ring_lock);
@@ -8807,7 +8821,7 @@ lpfc_sli4_hba_setup(struct lpfc_hba *phba)
rc = lpfc_sli4_queue_setup(phba);
if (unlikely(rc)) {
lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
- "0381 Error %d during queue setup.\n ", rc);
+ "0381 Error %d during queue setup.\n", rc);
goto out_stop_timers;
}
/* Initialize the driver internal SLI layer lists. */
@@ -11079,9 +11093,17 @@ __lpfc_sli_prep_xmit_seq64_s4(struct lpfc_iocbq *cmdiocbq,
/* Word 9 */
bf_set(wqe_rcvoxid, &wqe->xmit_sequence.wqe_com, ox_id);
- /* Word 12 */
- if (cmdiocbq->cmd_flag & (LPFC_IO_LIBDFC | LPFC_IO_LOOPBACK))
+ if (cmdiocbq->cmd_flag & (LPFC_IO_LIBDFC | LPFC_IO_LOOPBACK)) {
+ /* Word 10 */
+ if (cmdiocbq->cmd_flag & LPFC_IO_VMID) {
+ bf_set(wqe_appid, &wqe->xmit_sequence.wqe_com, 1);
+ bf_set(wqe_wqes, &wqe->xmit_sequence.wqe_com, 1);
+ wqe->words[31] = LOOPBACK_SRC_APPID;
+ }
+
+ /* Word 12 */
wqe->xmit_sequence.xmit_len = full_size;
+ }
else
wqe->xmit_sequence.xmit_len =
wqe->xmit_sequence.bde.tus.f.bdeSize;
@@ -12473,8 +12495,6 @@ lpfc_sli_issue_abort_iotag(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
cmdiocb->iocb.ulpClass,
LPFC_WQE_CQ_ID_DEFAULT, ia, false);
- abtsiocbp->vport = vport;
-
/* ABTS WQE must go to the same WQ as the WQE to be aborted */
abtsiocbp->hba_wqidx = cmdiocb->hba_wqidx;
if (cmdiocb->cmd_flag & LPFC_IO_FCP)
@@ -18422,6 +18442,7 @@ lpfc_fc_frame_check(struct lpfc_hba *phba, struct fc_frame_header *fc_hdr)
{
/* make rctl_names static to save stack space */
struct fc_vft_header *fc_vft_hdr;
+ struct fc_app_header *fc_app_hdr;
uint32_t *header = (uint32_t *) fc_hdr;
#define FC_RCTL_MDS_DIAGS 0xF4
@@ -18477,6 +18498,32 @@ lpfc_fc_frame_check(struct lpfc_hba *phba, struct fc_frame_header *fc_hdr)
goto drop;
}
+ if (unlikely(phba->link_flag == LS_LOOPBACK_MODE &&
+ phba->cfg_vmid_app_header)) {
+ /* Application header is 16B device header */
+ if (fc_hdr->fh_df_ctl & LPFC_FC_16B_DEVICE_HEADER) {
+ fc_app_hdr = (struct fc_app_header *) (fc_hdr + 1);
+ if (be32_to_cpu(fc_app_hdr->src_app_id) !=
+ LOOPBACK_SRC_APPID) {
+ lpfc_printf_log(phba, KERN_WARNING,
+ LOG_ELS | LOG_LIBDFC,
+ "1932 Loopback src app id "
+ "not matched, app_id:x%x\n",
+ be32_to_cpu(fc_app_hdr->src_app_id));
+
+ goto drop;
+ }
+ } else {
+ lpfc_printf_log(phba, KERN_WARNING,
+ LOG_ELS | LOG_LIBDFC,
+ "1933 Loopback df_ctl bit not set, "
+ "df_ctl:x%x\n",
+ fc_hdr->fh_df_ctl);
+
+ goto drop;
+ }
+ }
+
lpfc_printf_log(phba, KERN_INFO, LOG_ELS,
"2538 Received frame rctl:x%x, type:x%x, "
"frame Data:%08x %08x %08x %08x %08x %08x %08x\n",
@@ -21140,7 +21187,7 @@ lpfc_drain_txq(struct lpfc_hba *phba)
if (!piocbq) {
spin_unlock_irqrestore(&pring->ring_lock, iflags);
lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
- "2823 txq empty and txq_cnt is %d\n ",
+ "2823 txq empty and txq_cnt is %d\n",
txq_cnt);
break;
}
diff --git a/drivers/scsi/lpfc/lpfc_version.h b/drivers/scsi/lpfc/lpfc_version.h
index 7ac9ef281881..e70f163fab90 100644
--- a/drivers/scsi/lpfc/lpfc_version.h
+++ b/drivers/scsi/lpfc/lpfc_version.h
@@ -20,7 +20,7 @@
* included with this package. *
*******************************************************************/
-#define LPFC_DRIVER_VERSION "14.4.0.3"
+#define LPFC_DRIVER_VERSION "14.4.0.5"
#define LPFC_DRIVER_NAME "lpfc"
/* Used for SLI 2/3 */
diff --git a/drivers/scsi/lpfc/lpfc_vmid.c b/drivers/scsi/lpfc/lpfc_vmid.c
index 773e02ae20c3..cc3e4736f2fe 100644
--- a/drivers/scsi/lpfc/lpfc_vmid.c
+++ b/drivers/scsi/lpfc/lpfc_vmid.c
@@ -1,7 +1,7 @@
/*******************************************************************
* This file is part of the Emulex Linux Device Driver for *
* Fibre Channel Host Bus Adapters. *
- * Copyright (C) 2017-2023 Broadcom. All Rights Reserved. The term *
+ * Copyright (C) 2017-2024 Broadcom. All Rights Reserved. The term *
* “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. *
* Copyright (C) 2004-2016 Emulex. All rights reserved. *
* EMULEX and SLI are trademarks of Emulex. *
@@ -321,6 +321,5 @@ lpfc_reinit_vmid(struct lpfc_vport *vport)
if (!hash_empty(vport->hash_table))
hash_for_each_safe(vport->hash_table, bucket, tmp, cur, hnode)
hash_del(&cur->hnode);
- vport->vmid_flag = 0;
write_unlock(&vport->vmid_lock);
}
diff --git a/drivers/scsi/lpfc/lpfc_vport.c b/drivers/scsi/lpfc/lpfc_vport.c
index 4439167a5188..7a4d4d8e2ad5 100644
--- a/drivers/scsi/lpfc/lpfc_vport.c
+++ b/drivers/scsi/lpfc/lpfc_vport.c
@@ -626,6 +626,7 @@ lpfc_vport_delete(struct fc_vport *fc_vport)
struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
struct lpfc_hba *phba = vport->phba;
int rc;
+ DECLARE_WAIT_QUEUE_HEAD_ONSTACK(waitq);
if (vport->port_type == LPFC_PHYSICAL_PORT) {
lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
@@ -679,21 +680,49 @@ lpfc_vport_delete(struct fc_vport *fc_vport)
if (!ndlp)
goto skip_logo;
+ /* Send the DA_ID and Fabric LOGO to cleanup the NPIV fabric entries. */
if (ndlp && ndlp->nlp_state == NLP_STE_UNMAPPED_NODE &&
phba->link_state >= LPFC_LINK_UP &&
phba->fc_topology != LPFC_TOPOLOGY_LOOP) {
if (vport->cfg_enable_da_id) {
- /* Send DA_ID and wait for a completion. */
+ /* Send DA_ID and wait for a completion. This is best
+ * effort. If the DA_ID fails, likely the fabric will
+ * "leak" NportIDs but at least the driver issued the
+ * command.
+ */
+ ndlp = lpfc_findnode_did(vport, NameServer_DID);
+ if (!ndlp)
+ goto issue_logo;
+
+ spin_lock_irq(&ndlp->lock);
+ ndlp->da_id_waitq = &waitq;
+ ndlp->save_flags |= NLP_WAIT_FOR_DA_ID;
+ spin_unlock_irq(&ndlp->lock);
+
rc = lpfc_ns_cmd(vport, SLI_CTNS_DA_ID, 0, 0);
- if (rc) {
- lpfc_printf_log(vport->phba, KERN_WARNING,
- LOG_VPORT,
- "1829 CT command failed to "
- "delete objects on fabric, "
- "rc %d\n", rc);
+ if (!rc) {
+ wait_event_timeout(waitq,
+ !(ndlp->save_flags & NLP_WAIT_FOR_DA_ID),
+ msecs_to_jiffies(phba->fc_ratov * 2000));
}
+
+ lpfc_printf_vlog(vport, KERN_INFO, LOG_VPORT | LOG_ELS,
+ "1829 DA_ID issue status %d. "
+ "SFlag x%x NState x%x, NFlag x%x "
+ "Rpi x%x\n",
+ rc, ndlp->save_flags, ndlp->nlp_state,
+ ndlp->nlp_flag, ndlp->nlp_rpi);
+
+ /* Remove the waitq and save_flags. It no
+ * longer matters if the wake happened.
+ */
+ spin_lock_irq(&ndlp->lock);
+ ndlp->da_id_waitq = NULL;
+ ndlp->save_flags &= ~NLP_WAIT_FOR_DA_ID;
+ spin_unlock_irq(&ndlp->lock);
}
+issue_logo:
/*
* If the vpi is not registered, then a valid FDISC doesn't
* exist and there is no need for a ELS LOGO. Just cleanup
diff --git a/drivers/scsi/mac_scsi.c b/drivers/scsi/mac_scsi.c
index 53ee8f84d094..f225bb20aa22 100644
--- a/drivers/scsi/mac_scsi.c
+++ b/drivers/scsi/mac_scsi.c
@@ -102,11 +102,15 @@ __setup("mac5380=", mac_scsi_setup);
* Linux SCSI drivers lack knowledge of the timing behaviour of SCSI targets
* so bus errors are unavoidable.
*
- * If a MOVE.B instruction faults, we assume that zero bytes were transferred
- * and simply retry. That assumption probably depends on target behaviour but
- * seems to hold up okay. The NOP provides synchronization: without it the
- * fault can sometimes occur after the program counter has moved past the
- * offending instruction. Post-increment addressing can't be used.
+ * If a MOVE.B instruction faults during a receive operation, we assume the
+ * target sent nothing and try again. That assumption probably depends on
+ * target firmware but it seems to hold up okay. If a fault happens during a
+ * send operation, the target may or may not have seen /ACK and got the byte.
+ * It's uncertain so the whole SCSI command gets retried.
+ *
+ * The NOP is needed for synchronization because the fault address in the
+ * exception stack frame may or may not be the instruction that actually
+ * caused the bus error. Post-increment addressing can't be used.
*/
#define MOVE_BYTE(operands) \
@@ -208,8 +212,6 @@ __setup("mac5380=", mac_scsi_setup);
".previous \n" \
: "+a" (addr), "+r" (n), "+r" (result) : "a" (io))
-#define MAC_PDMA_DELAY 32
-
static inline int mac_pdma_recv(void __iomem *io, unsigned char *start, int n)
{
unsigned char *addr = start;
@@ -245,22 +247,21 @@ static inline int mac_pdma_send(unsigned char *start, void __iomem *io, int n)
if (n >= 1) {
MOVE_BYTE("%0@,%3@");
if (result)
- goto out;
+ return -1;
}
if (n >= 1 && ((unsigned long)addr & 1)) {
MOVE_BYTE("%0@,%3@");
if (result)
- goto out;
+ return -2;
}
while (n >= 32)
MOVE_16_WORDS("%0@+,%3@");
while (n >= 2)
MOVE_WORD("%0@+,%3@");
if (result)
- return start - addr; /* Negated to indicate uncertain length */
+ return start - addr - 1; /* Negated to indicate uncertain length */
if (n == 1)
MOVE_BYTE("%0@,%3@");
-out:
return addr - start;
}
@@ -274,25 +275,56 @@ static inline void write_ctrl_reg(struct NCR5380_hostdata *hostdata, u32 value)
out_be32(hostdata->io + (CTRL_REG << 4), value);
}
+static inline int macscsi_wait_for_drq(struct NCR5380_hostdata *hostdata)
+{
+ unsigned int n = 1; /* effectively multiplies NCR5380_REG_POLL_TIME */
+ unsigned char basr;
+
+again:
+ basr = NCR5380_read(BUS_AND_STATUS_REG);
+
+ if (!(basr & BASR_PHASE_MATCH))
+ return 1;
+
+ if (basr & BASR_IRQ)
+ return -1;
+
+ if (basr & BASR_DRQ)
+ return 0;
+
+ if (n-- == 0) {
+ NCR5380_dprint(NDEBUG_PSEUDO_DMA, hostdata->host);
+ dsprintk(NDEBUG_PSEUDO_DMA, hostdata->host,
+ "%s: DRQ timeout\n", __func__);
+ return -1;
+ }
+
+ NCR5380_poll_politely2(hostdata,
+ BUS_AND_STATUS_REG, BASR_DRQ, BASR_DRQ,
+ BUS_AND_STATUS_REG, BASR_PHASE_MATCH, 0, 0);
+ goto again;
+}
+
static inline int macscsi_pread(struct NCR5380_hostdata *hostdata,
unsigned char *dst, int len)
{
u8 __iomem *s = hostdata->pdma_io + (INPUT_DATA_REG << 4);
unsigned char *d = dst;
- int result = 0;
hostdata->pdma_residual = len;
- while (!NCR5380_poll_politely(hostdata, BUS_AND_STATUS_REG,
- BASR_DRQ | BASR_PHASE_MATCH,
- BASR_DRQ | BASR_PHASE_MATCH, 0)) {
- int bytes;
+ while (macscsi_wait_for_drq(hostdata) == 0) {
+ int bytes, chunk_bytes;
if (macintosh_config->ident == MAC_MODEL_IIFX)
write_ctrl_reg(hostdata, CTRL_HANDSHAKE_MODE |
CTRL_INTERRUPTS_ENABLE);
- bytes = mac_pdma_recv(s, d, min(hostdata->pdma_residual, 512));
+ chunk_bytes = min(hostdata->pdma_residual, 512);
+ bytes = mac_pdma_recv(s, d, chunk_bytes);
+
+ if (macintosh_config->ident == MAC_MODEL_IIFX)
+ write_ctrl_reg(hostdata, CTRL_INTERRUPTS_ENABLE);
if (bytes > 0) {
d += bytes;
@@ -300,37 +332,25 @@ static inline int macscsi_pread(struct NCR5380_hostdata *hostdata,
}
if (hostdata->pdma_residual == 0)
- goto out;
+ break;
- if (NCR5380_poll_politely2(hostdata, STATUS_REG, SR_REQ, SR_REQ,
- BUS_AND_STATUS_REG, BASR_ACK,
- BASR_ACK, 0) < 0)
- scmd_printk(KERN_DEBUG, hostdata->connected,
- "%s: !REQ and !ACK\n", __func__);
- if (!(NCR5380_read(BUS_AND_STATUS_REG) & BASR_PHASE_MATCH))
- goto out;
+ if (bytes > 0)
+ continue;
- if (bytes == 0)
- udelay(MAC_PDMA_DELAY);
+ NCR5380_dprint(NDEBUG_PSEUDO_DMA, hostdata->host);
+ dsprintk(NDEBUG_PSEUDO_DMA, hostdata->host,
+ "%s: bus error [%d/%d] (%d/%d)\n",
+ __func__, d - dst, len, bytes, chunk_bytes);
- if (bytes >= 0)
+ if (bytes == 0)
continue;
- dsprintk(NDEBUG_PSEUDO_DMA, hostdata->host,
- "%s: bus error (%d/%d)\n", __func__, d - dst, len);
- NCR5380_dprint(NDEBUG_PSEUDO_DMA, hostdata->host);
- result = -1;
- goto out;
+ if (macscsi_wait_for_drq(hostdata) <= 0)
+ set_host_byte(hostdata->connected, DID_ERROR);
+ break;
}
- scmd_printk(KERN_ERR, hostdata->connected,
- "%s: phase mismatch or !DRQ\n", __func__);
- NCR5380_dprint(NDEBUG_PSEUDO_DMA, hostdata->host);
- result = -1;
-out:
- if (macintosh_config->ident == MAC_MODEL_IIFX)
- write_ctrl_reg(hostdata, CTRL_INTERRUPTS_ENABLE);
- return result;
+ return 0;
}
static inline int macscsi_pwrite(struct NCR5380_hostdata *hostdata,
@@ -338,67 +358,47 @@ static inline int macscsi_pwrite(struct NCR5380_hostdata *hostdata,
{
unsigned char *s = src;
u8 __iomem *d = hostdata->pdma_io + (OUTPUT_DATA_REG << 4);
- int result = 0;
hostdata->pdma_residual = len;
- while (!NCR5380_poll_politely(hostdata, BUS_AND_STATUS_REG,
- BASR_DRQ | BASR_PHASE_MATCH,
- BASR_DRQ | BASR_PHASE_MATCH, 0)) {
- int bytes;
+ while (macscsi_wait_for_drq(hostdata) == 0) {
+ int bytes, chunk_bytes;
if (macintosh_config->ident == MAC_MODEL_IIFX)
write_ctrl_reg(hostdata, CTRL_HANDSHAKE_MODE |
CTRL_INTERRUPTS_ENABLE);
- bytes = mac_pdma_send(s, d, min(hostdata->pdma_residual, 512));
+ chunk_bytes = min(hostdata->pdma_residual, 512);
+ bytes = mac_pdma_send(s, d, chunk_bytes);
+
+ if (macintosh_config->ident == MAC_MODEL_IIFX)
+ write_ctrl_reg(hostdata, CTRL_INTERRUPTS_ENABLE);
if (bytes > 0) {
s += bytes;
hostdata->pdma_residual -= bytes;
}
- if (hostdata->pdma_residual == 0) {
- if (NCR5380_poll_politely(hostdata, TARGET_COMMAND_REG,
- TCR_LAST_BYTE_SENT,
- TCR_LAST_BYTE_SENT,
- 0) < 0) {
- scmd_printk(KERN_ERR, hostdata->connected,
- "%s: Last Byte Sent timeout\n", __func__);
- result = -1;
- }
- goto out;
- }
+ if (hostdata->pdma_residual == 0)
+ break;
- if (NCR5380_poll_politely2(hostdata, STATUS_REG, SR_REQ, SR_REQ,
- BUS_AND_STATUS_REG, BASR_ACK,
- BASR_ACK, 0) < 0)
- scmd_printk(KERN_DEBUG, hostdata->connected,
- "%s: !REQ and !ACK\n", __func__);
- if (!(NCR5380_read(BUS_AND_STATUS_REG) & BASR_PHASE_MATCH))
- goto out;
+ if (bytes > 0)
+ continue;
- if (bytes == 0)
- udelay(MAC_PDMA_DELAY);
+ NCR5380_dprint(NDEBUG_PSEUDO_DMA, hostdata->host);
+ dsprintk(NDEBUG_PSEUDO_DMA, hostdata->host,
+ "%s: bus error [%d/%d] (%d/%d)\n",
+ __func__, s - src, len, bytes, chunk_bytes);
- if (bytes >= 0)
+ if (bytes == 0)
continue;
- dsprintk(NDEBUG_PSEUDO_DMA, hostdata->host,
- "%s: bus error (%d/%d)\n", __func__, s - src, len);
- NCR5380_dprint(NDEBUG_PSEUDO_DMA, hostdata->host);
- result = -1;
- goto out;
+ if (macscsi_wait_for_drq(hostdata) <= 0)
+ set_host_byte(hostdata->connected, DID_ERROR);
+ break;
}
- scmd_printk(KERN_ERR, hostdata->connected,
- "%s: phase mismatch or !DRQ\n", __func__);
- NCR5380_dprint(NDEBUG_PSEUDO_DMA, hostdata->host);
- result = -1;
-out:
- if (macintosh_config->ident == MAC_MODEL_IIFX)
- write_ctrl_reg(hostdata, CTRL_INTERRUPTS_ENABLE);
- return result;
+ return 0;
}
static int macscsi_dma_xfer_len(struct NCR5380_hostdata *hostdata,
@@ -432,7 +432,7 @@ static struct scsi_host_template mac_scsi_template = {
.eh_host_reset_handler = macscsi_host_reset,
.can_queue = 16,
.this_id = 7,
- .sg_tablesize = 1,
+ .sg_tablesize = SG_ALL,
.cmd_per_lun = 2,
.dma_boundary = PAGE_SIZE - 1,
.cmd_size = sizeof(struct NCR5380_cmd),
@@ -470,6 +470,9 @@ static int __init mac_scsi_probe(struct platform_device *pdev)
if (setup_hostid >= 0)
mac_scsi_template.this_id = setup_hostid & 7;
+ if (macintosh_config->ident == MAC_MODEL_IIFX)
+ mac_scsi_template.sg_tablesize = 1;
+
instance = scsi_host_alloc(&mac_scsi_template,
sizeof(struct NCR5380_hostdata));
if (!instance)
@@ -491,6 +494,9 @@ static int __init mac_scsi_probe(struct platform_device *pdev)
host_flags |= setup_toshiba_delay > 0 ? FLAG_TOSHIBA_DELAY : 0;
+ if (instance->sg_tablesize > 1)
+ host_flags |= FLAG_DMA_FIXUP;
+
error = NCR5380_init(instance, host_flags | FLAG_LATE_DMA_SETUP);
if (error)
goto fail_init;
diff --git a/drivers/scsi/megaraid/megaraid_sas.h b/drivers/scsi/megaraid/megaraid_sas.h
index 5680c6cdb221..088cc40ae866 100644
--- a/drivers/scsi/megaraid/megaraid_sas.h
+++ b/drivers/scsi/megaraid/megaraid_sas.h
@@ -814,12 +814,12 @@ struct MR_HOST_DEVICE_LIST {
__le32 size;
__le32 count;
__le32 reserved[2];
- struct MR_HOST_DEVICE_LIST_ENTRY host_device_list[1];
+ struct MR_HOST_DEVICE_LIST_ENTRY host_device_list[] __counted_by_le(count);
} __packed;
#define HOST_DEVICE_LIST_SZ (sizeof(struct MR_HOST_DEVICE_LIST) + \
(sizeof(struct MR_HOST_DEVICE_LIST_ENTRY) * \
- (MEGASAS_MAX_PD + MAX_LOGICAL_DRIVES_EXT - 1)))
+ (MEGASAS_MAX_PD + MAX_LOGICAL_DRIVES_EXT)))
/*
@@ -2473,7 +2473,7 @@ struct MR_LD_VF_MAP {
union MR_LD_REF ref;
u8 ldVfCount;
u8 reserved[6];
- u8 policy[1];
+ u8 policy[];
};
struct MR_LD_VF_AFFILIATION {
diff --git a/drivers/scsi/megaraid/megaraid_sas_base.c b/drivers/scsi/megaraid/megaraid_sas_base.c
index 6c79c350a4d5..4ecf5284c0fc 100644
--- a/drivers/scsi/megaraid/megaraid_sas_base.c
+++ b/drivers/scsi/megaraid/megaraid_sas_base.c
@@ -6380,7 +6380,7 @@ static int megasas_init_fw(struct megasas_instance *instance)
GFP_KERNEL);
if (!fusion->stream_detect_by_ld[i]) {
dev_err(&instance->pdev->dev,
- "unable to allocate stream detect by LD\n ");
+ "unable to allocate stream detect by LD\n");
for (j = 0; j < i; ++j)
kfree(fusion->stream_detect_by_ld[j]);
kfree(fusion->stream_detect_by_ld);
diff --git a/drivers/scsi/megaraid/megaraid_sas_fusion.c b/drivers/scsi/megaraid/megaraid_sas_fusion.c
index 6c1fb8149553..1eec23da28e2 100644
--- a/drivers/scsi/megaraid/megaraid_sas_fusion.c
+++ b/drivers/scsi/megaraid/megaraid_sas_fusion.c
@@ -1988,8 +1988,8 @@ megasas_fusion_start_watchdog(struct megasas_instance *instance)
sizeof(instance->fault_handler_work_q_name),
"poll_megasas%d_status", instance->host->host_no);
- instance->fw_fault_work_q =
- create_singlethread_workqueue(instance->fault_handler_work_q_name);
+ instance->fw_fault_work_q = alloc_ordered_workqueue(
+ "%s", WQ_MEM_RECLAIM, instance->fault_handler_work_q_name);
if (!instance->fw_fault_work_q) {
dev_err(&instance->pdev->dev, "Failed from %s %d\n",
__func__, __LINE__);
diff --git a/drivers/scsi/mpi3mr/mpi/mpi30_cnfg.h b/drivers/scsi/mpi3mr/mpi/mpi30_cnfg.h
index 6a19e17eb1a7..00cd18edfad6 100644
--- a/drivers/scsi/mpi3mr/mpi/mpi30_cnfg.h
+++ b/drivers/scsi/mpi3mr/mpi/mpi30_cnfg.h
@@ -67,6 +67,7 @@
#define MPI3_SECURITY_PGAD_SLOT_GROUP_MASK (0x0000ff00)
#define MPI3_SECURITY_PGAD_SLOT_GROUP_SHIFT (8)
#define MPI3_SECURITY_PGAD_SLOT_MASK (0x000000ff)
+#define MPI3_INSTANCE_PGAD_INSTANCE_MASK (0x0000ffff)
struct mpi3_config_request {
__le16 host_tag;
u8 ioc_use_only02;
@@ -75,7 +76,8 @@ struct mpi3_config_request {
u8 ioc_use_only06;
u8 msg_flags;
__le16 change_count;
- __le16 reserved0a;
+ u8 proxy_ioc_number;
+ u8 reserved0b;
u8 page_version;
u8 page_number;
u8 page_type;
@@ -206,6 +208,9 @@ struct mpi3_config_page_header {
#define MPI3_MFGPAGE_DEVID_SAS5116_MPI_MGMT (0x00b5)
#define MPI3_MFGPAGE_DEVID_SAS5116_NVME_MGMT (0x00b6)
#define MPI3_MFGPAGE_DEVID_SAS5116_PCIE_SWITCH (0x00b8)
+#define MPI3_MFGPAGE_DEVID_SAS5248_MPI (0x00f0)
+#define MPI3_MFGPAGE_DEVID_SAS5248_MPI_NS (0x00f1)
+#define MPI3_MFGPAGE_DEVID_SAS5248_PCIE_SWITCH (0x00f2)
struct mpi3_man_page0 {
struct mpi3_config_page_header header;
u8 chip_revision[8];
@@ -1074,6 +1079,8 @@ struct mpi3_io_unit_page8 {
#define MPI3_IOUNIT8_SBSTATE_SVN_UPDATE_PENDING (0x04)
#define MPI3_IOUNIT8_SBSTATE_KEY_UPDATE_PENDING (0x02)
#define MPI3_IOUNIT8_SBSTATE_SECURE_BOOT_ENABLED (0x01)
+#define MPI3_IOUNIT8_SBMODE_CURRENT_KEY_IOUNIT17 (0x10)
+#define MPI3_IOUNIT8_SBMODE_HARD_SECURE_RECERTIFIED (0x08)
struct mpi3_io_unit_page9 {
struct mpi3_config_page_header header;
__le32 flags;
@@ -1089,6 +1096,8 @@ struct mpi3_io_unit_page9 {
#define MPI3_IOUNIT9_FLAGS_UBM_ENCLOSURE_ORDER_BACKPLANE_TYPE (0x00000004)
#define MPI3_IOUNIT9_FLAGS_VDFIRST_ENABLED (0x00000001)
#define MPI3_IOUNIT9_FIRSTDEVICE_UNKNOWN (0xffff)
+#define MPI3_IOUNIT9_FIRSTDEVICE_IN_DRIVER_PAGE_0 (0xfffe)
+
struct mpi3_io_unit_page10 {
struct mpi3_config_page_header header;
u8 flags;
@@ -1224,6 +1233,19 @@ struct mpi3_io_unit_page15 {
#define MPI3_IOUNIT15_FLAGS_EPRSUPPORT_WITHOUT_POWER_BRAKE_GPIO (0x01)
#define MPI3_IOUNIT15_FLAGS_EPRSUPPORT_WITH_POWER_BRAKE_GPIO (0x02)
#define MPI3_IOUNIT15_NUMPOWERBUDGETDATA_POWER_BUDGETING_DISABLED (0x00)
+
+struct mpi3_io_unit_page17 {
+ struct mpi3_config_page_header header;
+ u8 num_instances;
+ u8 instance;
+ __le16 reserved0a;
+ __le32 reserved0c[4];
+ __le16 key_length;
+ u8 encryption_algorithm;
+ u8 reserved1f;
+ __le32 current_key[];
+};
+#define MPI3_IOUNIT17_PAGEVERSION (0x00)
struct mpi3_ioc_page0 {
struct mpi3_config_page_header header;
__le32 reserved08;
@@ -1311,7 +1333,7 @@ struct mpi3_driver_page0 {
u8 tur_interval;
u8 reserved10;
u8 security_key_timeout;
- __le16 reserved12;
+ __le16 first_device;
__le32 reserved14;
__le32 reserved18;
};
@@ -1324,10 +1346,13 @@ struct mpi3_driver_page0 {
#define MPI3_DRIVER0_BSDOPTS_REGISTRATION_IOC_AND_DEVS (0x00000000)
#define MPI3_DRIVER0_BSDOPTS_REGISTRATION_IOC_ONLY (0x00000001)
#define MPI3_DRIVER0_BSDOPTS_REGISTRATION_IOC_AND_INTERNAL_DEVS (0x00000002)
+#define MPI3_DRIVER0_FIRSTDEVICE_IGNORE1 (0x0000)
+#define MPI3_DRIVER0_FIRSTDEVICE_IGNORE2 (0xffff)
struct mpi3_driver_page1 {
struct mpi3_config_page_header header;
__le32 flags;
- __le32 reserved0c;
+ u8 time_stamp_update;
+ u8 reserved0d[3];
__le16 host_diag_trace_max_size;
__le16 host_diag_trace_min_size;
__le16 host_diag_trace_decrement_size;
@@ -1565,16 +1590,13 @@ struct mpi3_sas_io_unit0_phy_data {
__le32 reserved10;
};
-#ifndef MPI3_SAS_IO_UNIT0_PHY_MAX
-#define MPI3_SAS_IO_UNIT0_PHY_MAX (1)
-#endif
struct mpi3_sas_io_unit_page0 {
struct mpi3_config_page_header header;
__le32 reserved08;
u8 num_phys;
u8 init_status;
__le16 reserved0e;
- struct mpi3_sas_io_unit0_phy_data phy_data[MPI3_SAS_IO_UNIT0_PHY_MAX];
+ struct mpi3_sas_io_unit0_phy_data phy_data[];
};
#define MPI3_SASIOUNIT0_PAGEVERSION (0x00)
@@ -1606,9 +1628,6 @@ struct mpi3_sas_io_unit1_phy_data {
__le32 reserved08;
};
-#ifndef MPI3_SAS_IO_UNIT1_PHY_MAX
-#define MPI3_SAS_IO_UNIT1_PHY_MAX (1)
-#endif
struct mpi3_sas_io_unit_page1 {
struct mpi3_config_page_header header;
__le16 control_flags;
@@ -1618,7 +1637,7 @@ struct mpi3_sas_io_unit_page1 {
u8 num_phys;
u8 sata_max_q_depth;
__le16 reserved12;
- struct mpi3_sas_io_unit1_phy_data phy_data[MPI3_SAS_IO_UNIT1_PHY_MAX];
+ struct mpi3_sas_io_unit1_phy_data phy_data[];
};
#define MPI3_SASIOUNIT1_PAGEVERSION (0x00)
@@ -2353,6 +2372,10 @@ struct mpi3_device0_vd_format {
#define MPI3_DEVICE0_VD_DEVICE_INFO_SAS (0x0001)
#define MPI3_DEVICE0_VD_FLAGS_IO_THROTTLE_GROUP_QD_MASK (0xf000)
#define MPI3_DEVICE0_VD_FLAGS_IO_THROTTLE_GROUP_QD_SHIFT (12)
+#define MPI3_DEVICE0_VD_FLAGS_OSEXPOSURE_MASK (0x0003)
+#define MPI3_DEVICE0_VD_FLAGS_OSEXPOSURE_HDD (0x0000)
+#define MPI3_DEVICE0_VD_FLAGS_OSEXPOSURE_SSD (0x0001)
+#define MPI3_DEVICE0_VD_FLAGS_OSEXPOSURE_NO_GUIDANCE (0x0002)
union mpi3_device0_dev_spec_format {
struct mpi3_device0_sas_sata_format sas_sata_format;
struct mpi3_device0_pcie_format pcie_format;
diff --git a/drivers/scsi/mpi3mr/mpi/mpi30_image.h b/drivers/scsi/mpi3mr/mpi/mpi30_image.h
index 7df242190135..2c6e548cbd0f 100644
--- a/drivers/scsi/mpi3mr/mpi/mpi30_image.h
+++ b/drivers/scsi/mpi3mr/mpi/mpi30_image.h
@@ -205,13 +205,14 @@ struct mpi3_encrypted_hash_entry {
u8 hash_image_type;
u8 hash_algorithm;
u8 encryption_algorithm;
- u8 reserved03;
+ u8 flags;
__le16 public_key_size;
__le16 signature_size;
__le32 public_key[MPI3_PUBLIC_KEY_MAX];
};
-
-#define MPI3_HASH_IMAGE_TYPE_KEY_WITH_SIGNATURE (0x03)
+#define MPI3_HASH_IMAGE_TYPE_KEY_WITH_HASH (0x03)
+#define MPI3_HASH_IMAGE_TYPE_KEY_WITH_HASH_1_OF_2 (0x04)
+#define MPI3_HASH_IMAGE_TYPE_KEY_WITH_HASH_2_OF_2 (0x05)
#define MPI3_HASH_ALGORITHM_VERSION_MASK (0xe0)
#define MPI3_HASH_ALGORITHM_VERSION_NONE (0x00)
#define MPI3_HASH_ALGORITHM_VERSION_SHA1 (0x20)
@@ -230,6 +231,12 @@ struct mpi3_encrypted_hash_entry {
#define MPI3_ENCRYPTION_ALGORITHM_RSA4096 (0x05)
#define MPI3_ENCRYPTION_ALGORITHM_RSA3072 (0x06)
+/* hierarchical signature system (hss) */
+#define MPI3_ENCRYPTION_ALGORITHM_ML_DSA_87 (0x0b)
+#define MPI3_ENCRYPTION_ALGORITHM_ML_DSA_65 (0x0c)
+#define MPI3_ENCRYPTION_ALGORITHM_ML_DSA_44 (0x0d)
+#define MPI3_ENCRYPTED_HASH_ENTRY_FLAGS_PAIRED_KEY_MASK (0x0f)
+
#ifndef MPI3_ENCRYPTED_HASH_ENTRY_MAX
#define MPI3_ENCRYPTED_HASH_ENTRY_MAX (1)
#endif
diff --git a/drivers/scsi/mpi3mr/mpi/mpi30_ioc.h b/drivers/scsi/mpi3mr/mpi/mpi30_ioc.h
index 028784949873..c374867f9ba0 100644
--- a/drivers/scsi/mpi3mr/mpi/mpi30_ioc.h
+++ b/drivers/scsi/mpi3mr/mpi/mpi30_ioc.h
@@ -39,6 +39,12 @@ struct mpi3_ioc_init_request {
#define MPI3_WHOINIT_HOST_DRIVER (0x03)
#define MPI3_WHOINIT_MANUFACTURER (0x04)
+#define MPI3_IOCINIT_DRIVERCAP_OSEXPOSURE_MASK (0x00000003)
+#define MPI3_IOCINIT_DRIVERCAP_OSEXPOSURE_NO_GUIDANCE (0x00000000)
+#define MPI3_IOCINIT_DRIVERCAP_OSEXPOSURE_NO_SPECIAL (0x00000001)
+#define MPI3_IOCINIT_DRIVERCAP_OSEXPOSURE_REPORT_AS_HDD (0x00000002)
+#define MPI3_IOCINIT_DRIVERCAP_OSEXPOSURE_REPORT_AS_SSD (0x00000003)
+
struct mpi3_ioc_facts_request {
__le16 host_tag;
u8 ioc_use_only02;
@@ -140,6 +146,8 @@ struct mpi3_ioc_facts_data {
#define MPI3_IOCFACTS_EXCEPT_MANUFACT_CHECKSUM_FAIL (0x0020)
#define MPI3_IOCFACTS_EXCEPT_FW_CHECKSUM_FAIL (0x0010)
#define MPI3_IOCFACTS_EXCEPT_CONFIG_CHECKSUM_FAIL (0x0008)
+#define MPI3_IOCFACTS_EXCEPT_BLOCKING_BOOT_EVENT (0x0004)
+#define MPI3_IOCFACTS_EXCEPT_SECURITY_SELFTEST_FAILURE (0x0002)
#define MPI3_IOCFACTS_EXCEPT_BOOTSTAT_MASK (0x0001)
#define MPI3_IOCFACTS_EXCEPT_BOOTSTAT_PRIMARY (0x0000)
#define MPI3_IOCFACTS_EXCEPT_BOOTSTAT_SECONDARY (0x0001)
@@ -453,9 +461,6 @@ struct mpi3_event_data_sas_notify_primitive {
#define MPI3_EVENT_NOTIFY_PRIMITIVE_POWER_LOSS_EXPECTED (0x02)
#define MPI3_EVENT_NOTIFY_PRIMITIVE_RESERVED1 (0x03)
#define MPI3_EVENT_NOTIFY_PRIMITIVE_RESERVED2 (0x04)
-#ifndef MPI3_EVENT_SAS_TOPO_PHY_COUNT
-#define MPI3_EVENT_SAS_TOPO_PHY_COUNT (1)
-#endif
struct mpi3_event_sas_topo_phy_entry {
__le16 attached_dev_handle;
u8 link_rate;
@@ -496,7 +501,7 @@ struct mpi3_event_data_sas_topology_change_list {
u8 start_phy_num;
u8 exp_status;
u8 io_unit_port;
- struct mpi3_event_sas_topo_phy_entry phy_entry[MPI3_EVENT_SAS_TOPO_PHY_COUNT];
+ struct mpi3_event_sas_topo_phy_entry phy_entry[] __counted_by(num_entries);
};
#define MPI3_EVENT_SAS_TOPO_ES_NO_EXPANDER (0x00)
@@ -545,9 +550,6 @@ struct mpi3_event_data_pcie_enumeration {
#define MPI3_EVENT_PCIE_ENUM_ES_MAX_SWITCHES_EXCEED (0x40000000)
#define MPI3_EVENT_PCIE_ENUM_ES_MAX_DEVICES_EXCEED (0x20000000)
#define MPI3_EVENT_PCIE_ENUM_ES_RESOURCES_EXHAUSTED (0x10000000)
-#ifndef MPI3_EVENT_PCIE_TOPO_PORT_COUNT
-#define MPI3_EVENT_PCIE_TOPO_PORT_COUNT (1)
-#endif
struct mpi3_event_pcie_topo_port_entry {
__le16 attached_dev_handle;
u8 port_status;
@@ -588,7 +590,7 @@ struct mpi3_event_data_pcie_topology_change_list {
u8 switch_status;
u8 io_unit_port;
__le32 reserved0c;
- struct mpi3_event_pcie_topo_port_entry port_entry[MPI3_EVENT_PCIE_TOPO_PORT_COUNT];
+ struct mpi3_event_pcie_topo_port_entry port_entry[] __counted_by(num_entries);
};
#define MPI3_EVENT_PCIE_TOPO_SS_NO_PCIE_SWITCH (0x00)
diff --git a/drivers/scsi/mpi3mr/mpi/mpi30_transport.h b/drivers/scsi/mpi3mr/mpi/mpi30_transport.h
index fdc3d1968e43..b2ab25a1cfeb 100644
--- a/drivers/scsi/mpi3mr/mpi/mpi30_transport.h
+++ b/drivers/scsi/mpi3mr/mpi/mpi30_transport.h
@@ -18,7 +18,7 @@ union mpi3_version_union {
#define MPI3_VERSION_MAJOR (3)
#define MPI3_VERSION_MINOR (0)
-#define MPI3_VERSION_UNIT (31)
+#define MPI3_VERSION_UNIT (34)
#define MPI3_VERSION_DEV (0)
#define MPI3_DEVHANDLE_INVALID (0xffff)
struct mpi3_sysif_oper_queue_indexes {
@@ -158,6 +158,7 @@ struct mpi3_sysif_registers {
#define MPI3_SYSIF_FAULT_CODE_SOFT_RESET_NEEDED (0x0000f004)
#define MPI3_SYSIF_FAULT_CODE_POWER_CYCLE_REQUIRED (0x0000f005)
#define MPI3_SYSIF_FAULT_CODE_TEMP_THRESHOLD_EXCEEDED (0x0000f006)
+#define MPI3_SYSIF_FAULT_CODE_INSUFFICIENT_PCI_SLOT_POWER (0x0000f007)
#define MPI3_SYSIF_FAULT_INFO0_OFFSET (0x00001c14)
#define MPI3_SYSIF_FAULT_INFO1_OFFSET (0x00001c18)
#define MPI3_SYSIF_FAULT_INFO2_OFFSET (0x00001c1c)
@@ -410,6 +411,7 @@ struct mpi3_default_reply {
#define MPI3_IOCSTATUS_INSUFFICIENT_RESOURCES (0x0006)
#define MPI3_IOCSTATUS_INVALID_FIELD (0x0007)
#define MPI3_IOCSTATUS_INVALID_STATE (0x0008)
+#define MPI3_IOCSTATUS_SHUTDOWN_ACTIVE (0x0009)
#define MPI3_IOCSTATUS_INSUFFICIENT_POWER (0x000a)
#define MPI3_IOCSTATUS_INVALID_CHANGE_COUNT (0x000b)
#define MPI3_IOCSTATUS_ALLOWED_CMD_BLOCK (0x000c)
diff --git a/drivers/scsi/mpi3mr/mpi3mr.h b/drivers/scsi/mpi3mr/mpi3mr.h
index dc2cdd5f0311..fcb0fa31536b 100644
--- a/drivers/scsi/mpi3mr/mpi3mr.h
+++ b/drivers/scsi/mpi3mr/mpi3mr.h
@@ -57,8 +57,8 @@ extern struct list_head mrioc_list;
extern int prot_mask;
extern atomic64_t event_counter;
-#define MPI3MR_DRIVER_VERSION "8.9.1.0.51"
-#define MPI3MR_DRIVER_RELDATE "29-May-2024"
+#define MPI3MR_DRIVER_VERSION "8.12.0.0.50"
+#define MPI3MR_DRIVER_RELDATE "05-Sept-2024"
#define MPI3MR_DRIVER_NAME "mpi3mr"
#define MPI3MR_DRIVER_LICENSE "GPL"
@@ -178,7 +178,7 @@ extern atomic64_t event_counter;
#define MPI3MR_DEFAULT_SDEV_QD 32
/* Definitions for Threaded IRQ poll*/
-#define MPI3MR_IRQ_POLL_SLEEP 2
+#define MPI3MR_IRQ_POLL_SLEEP 20
#define MPI3MR_IRQ_POLL_TRIGGER_IOCOUNT 8
/* Definitions for the controller security status*/
@@ -213,6 +213,7 @@ extern atomic64_t event_counter;
#define MPI3MR_HDB_QUERY_ELEMENT_TRIGGER_FORMAT_INDEX 0
#define MPI3MR_HDB_QUERY_ELEMENT_TRIGGER_FORMAT_DATA 1
+#define MPI3MR_THRESHOLD_REPLY_COUNT 100
/* SGE Flag definition */
#define MPI3MR_SGEFLAGS_SYSTEM_SIMPLE_END_OF_LIST \
@@ -1059,7 +1060,6 @@ struct scmd_priv {
* @sbq_lock: Sense buffer queue lock
* @sbq_host_index: Sense buffer queuehost index
* @event_masks: Event mask bitmap
- * @fwevt_worker_name: Firmware event worker thread name
* @fwevt_worker_thread: Firmware event worker thread
* @fwevt_lock: Firmware event lock
* @fwevt_list: Firmware event list
@@ -1090,6 +1090,7 @@ struct scmd_priv {
* @evtack_cmds_bitmap: Event Ack bitmap
* @delayed_evtack_cmds_list: Delayed event acknowledgment list
* @ts_update_counter: Timestamp update counter
+ * @ts_update_interval: Timestamp update interval
* @reset_in_progress: Reset in progress flag
* @unrecoverable: Controller unrecoverable flag
* @prev_reset_result: Result of previous reset
@@ -1240,7 +1241,6 @@ struct mpi3mr_ioc {
u32 sbq_host_index;
u32 event_masks[MPI3_EVENT_NOTIFY_EVENTMASK_WORDS];
- char fwevt_worker_name[MPI3MR_NAME_LENGTH];
struct workqueue_struct *fwevt_worker_thread;
spinlock_t fwevt_lock;
struct list_head fwevt_list;
@@ -1278,7 +1278,8 @@ struct mpi3mr_ioc {
unsigned long *evtack_cmds_bitmap;
struct list_head delayed_evtack_cmds_list;
- u32 ts_update_counter;
+ u16 ts_update_counter;
+ u16 ts_update_interval;
u8 reset_in_progress;
u8 unrecoverable;
int prev_reset_result;
diff --git a/drivers/scsi/mpi3mr/mpi3mr_fw.c b/drivers/scsi/mpi3mr/mpi3mr_fw.c
index c196dc14ad20..f1ab76351bd8 100644
--- a/drivers/scsi/mpi3mr/mpi3mr_fw.c
+++ b/drivers/scsi/mpi3mr/mpi3mr_fw.c
@@ -345,6 +345,7 @@ static void mpi3mr_process_admin_reply_desc(struct mpi3mr_ioc *mrioc,
{
u16 reply_desc_type, host_tag = 0;
u16 ioc_status = MPI3_IOCSTATUS_SUCCESS;
+ u16 masked_ioc_status = MPI3_IOCSTATUS_SUCCESS;
u32 ioc_loginfo = 0, sense_count = 0;
struct mpi3_status_reply_descriptor *status_desc;
struct mpi3_address_reply_descriptor *addr_desc;
@@ -366,8 +367,8 @@ static void mpi3mr_process_admin_reply_desc(struct mpi3mr_ioc *mrioc,
if (ioc_status &
MPI3_REPLY_DESCRIPT_STATUS_IOCSTATUS_LOGINFOAVAIL)
ioc_loginfo = le32_to_cpu(status_desc->ioc_log_info);
- ioc_status &= MPI3_REPLY_DESCRIPT_STATUS_IOCSTATUS_STATUS_MASK;
- mpi3mr_reply_trigger(mrioc, ioc_status, ioc_loginfo);
+ masked_ioc_status = ioc_status & MPI3_IOCSTATUS_STATUS_MASK;
+ mpi3mr_reply_trigger(mrioc, masked_ioc_status, ioc_loginfo);
break;
case MPI3_REPLY_DESCRIPT_FLAGS_TYPE_ADDRESS_REPLY:
addr_desc = (struct mpi3_address_reply_descriptor *)reply_desc;
@@ -380,7 +381,7 @@ static void mpi3mr_process_admin_reply_desc(struct mpi3mr_ioc *mrioc,
if (ioc_status &
MPI3_REPLY_DESCRIPT_STATUS_IOCSTATUS_LOGINFOAVAIL)
ioc_loginfo = le32_to_cpu(def_reply->ioc_log_info);
- ioc_status &= MPI3_REPLY_DESCRIPT_STATUS_IOCSTATUS_STATUS_MASK;
+ masked_ioc_status = ioc_status & MPI3_IOCSTATUS_STATUS_MASK;
if (def_reply->function == MPI3_FUNCTION_SCSI_IO) {
scsi_reply = (struct mpi3_scsi_io_reply *)def_reply;
sense_buf = mpi3mr_get_sensebuf_virt_addr(mrioc,
@@ -393,7 +394,7 @@ static void mpi3mr_process_admin_reply_desc(struct mpi3mr_ioc *mrioc,
sshdr.asc, sshdr.ascq);
}
}
- mpi3mr_reply_trigger(mrioc, ioc_status, ioc_loginfo);
+ mpi3mr_reply_trigger(mrioc, masked_ioc_status, ioc_loginfo);
break;
case MPI3_REPLY_DESCRIPT_FLAGS_TYPE_SUCCESS:
success_desc = (struct mpi3_success_reply_descriptor *)reply_desc;
@@ -408,7 +409,10 @@ static void mpi3mr_process_admin_reply_desc(struct mpi3mr_ioc *mrioc,
if (cmdptr->state & MPI3MR_CMD_PENDING) {
cmdptr->state |= MPI3MR_CMD_COMPLETE;
cmdptr->ioc_loginfo = ioc_loginfo;
- cmdptr->ioc_status = ioc_status;
+ if (host_tag == MPI3MR_HOSTTAG_BSG_CMDS)
+ cmdptr->ioc_status = ioc_status;
+ else
+ cmdptr->ioc_status = masked_ioc_status;
cmdptr->state &= ~MPI3MR_CMD_PENDING;
if (def_reply) {
cmdptr->state |= MPI3MR_CMD_REPLY_VALID;
@@ -439,6 +443,7 @@ int mpi3mr_process_admin_reply_q(struct mpi3mr_ioc *mrioc)
u32 admin_reply_ci = mrioc->admin_reply_ci;
u32 num_admin_replies = 0;
u64 reply_dma = 0;
+ u16 threshold_comps = 0;
struct mpi3_default_reply_descriptor *reply_desc;
if (!atomic_add_unless(&mrioc->admin_reply_q_in_use, 1, 1))
@@ -462,6 +467,7 @@ int mpi3mr_process_admin_reply_q(struct mpi3mr_ioc *mrioc)
if (reply_dma)
mpi3mr_repost_reply_buf(mrioc, reply_dma);
num_admin_replies++;
+ threshold_comps++;
if (++admin_reply_ci == mrioc->num_admin_replies) {
admin_reply_ci = 0;
exp_phase ^= 1;
@@ -472,6 +478,11 @@ int mpi3mr_process_admin_reply_q(struct mpi3mr_ioc *mrioc)
if ((le16_to_cpu(reply_desc->reply_flags) &
MPI3_REPLY_DESCRIPT_FLAGS_PHASE_MASK) != exp_phase)
break;
+ if (threshold_comps == MPI3MR_THRESHOLD_REPLY_COUNT) {
+ writel(admin_reply_ci,
+ &mrioc->sysif_regs->admin_reply_queue_ci);
+ threshold_comps = 0;
+ }
} while (1);
writel(admin_reply_ci, &mrioc->sysif_regs->admin_reply_queue_ci);
@@ -525,7 +536,7 @@ int mpi3mr_process_op_reply_q(struct mpi3mr_ioc *mrioc,
u32 num_op_reply = 0;
u64 reply_dma = 0;
struct mpi3_default_reply_descriptor *reply_desc;
- u16 req_q_idx = 0, reply_qidx;
+ u16 req_q_idx = 0, reply_qidx, threshold_comps = 0;
reply_qidx = op_reply_q->qid - 1;
@@ -556,6 +567,7 @@ int mpi3mr_process_op_reply_q(struct mpi3mr_ioc *mrioc,
if (reply_dma)
mpi3mr_repost_reply_buf(mrioc, reply_dma);
num_op_reply++;
+ threshold_comps++;
if (++reply_ci == op_reply_q->num_replies) {
reply_ci = 0;
@@ -577,13 +589,19 @@ int mpi3mr_process_op_reply_q(struct mpi3mr_ioc *mrioc,
break;
}
#endif
+ if (threshold_comps == MPI3MR_THRESHOLD_REPLY_COUNT) {
+ writel(reply_ci,
+ &mrioc->sysif_regs->oper_queue_indexes[reply_qidx].consumer_index);
+ atomic_sub(threshold_comps, &op_reply_q->pend_ios);
+ threshold_comps = 0;
+ }
} while (1);
writel(reply_ci,
&mrioc->sysif_regs->oper_queue_indexes[reply_qidx].consumer_index);
op_reply_q->ci = reply_ci;
op_reply_q->ephase = exp_phase;
-
+ atomic_sub(threshold_comps, &op_reply_q->pend_ios);
atomic_dec(&op_reply_q->in_use);
return num_op_reply;
}
@@ -710,7 +728,7 @@ static irqreturn_t mpi3mr_isr_poll(int irq, void *privdata)
mpi3mr_process_op_reply_q(mrioc,
intr_info->op_reply_q);
- usleep_range(MPI3MR_IRQ_POLL_SLEEP, 10 * MPI3MR_IRQ_POLL_SLEEP);
+ usleep_range(MPI3MR_IRQ_POLL_SLEEP, MPI3MR_IRQ_POLL_SLEEP + 1);
} while (atomic_read(&intr_info->op_reply_q->pend_ios) &&
(num_op_reply < mrioc->max_host_ios));
@@ -1344,6 +1362,10 @@ static int mpi3mr_bring_ioc_ready(struct mpi3mr_ioc *mrioc)
int retval = 0;
enum mpi3mr_iocstate ioc_state;
u64 base_info;
+ u8 retry = 0;
+ u64 start_time, elapsed_time_sec;
+
+retry_bring_ioc_ready:
ioc_status = readl(&mrioc->sysif_regs->ioc_status);
ioc_config = readl(&mrioc->sysif_regs->ioc_configuration);
@@ -1362,26 +1384,23 @@ static int mpi3mr_bring_ioc_ready(struct mpi3mr_ioc *mrioc)
ioc_info(mrioc, "controller is in %s state during detection\n",
mpi3mr_iocstate_name(ioc_state));
- if (ioc_state == MRIOC_STATE_BECOMING_READY ||
- ioc_state == MRIOC_STATE_RESET_REQUESTED) {
- timeout = mrioc->ready_timeout * 10;
- do {
- msleep(100);
- } while (--timeout);
+ timeout = mrioc->ready_timeout * 10;
+
+ do {
+ ioc_state = mpi3mr_get_iocstate(mrioc);
+
+ if (ioc_state != MRIOC_STATE_BECOMING_READY &&
+ ioc_state != MRIOC_STATE_RESET_REQUESTED)
+ break;
if (!pci_device_is_present(mrioc->pdev)) {
mrioc->unrecoverable = 1;
- ioc_err(mrioc,
- "controller is not present while waiting to reset\n");
- retval = -1;
+ ioc_err(mrioc, "controller is not present while waiting to reset\n");
goto out_device_not_present;
}
- ioc_state = mpi3mr_get_iocstate(mrioc);
- ioc_info(mrioc,
- "controller is in %s state after waiting to reset\n",
- mpi3mr_iocstate_name(ioc_state));
- }
+ msleep(100);
+ } while (--timeout);
if (ioc_state == MRIOC_STATE_READY) {
ioc_info(mrioc, "issuing message unit reset (MUR) to bring to reset state\n");
@@ -1442,6 +1461,9 @@ static int mpi3mr_bring_ioc_ready(struct mpi3mr_ioc *mrioc)
ioc_config |= MPI3_SYSIF_IOC_CONFIG_ENABLE_IOC;
writel(ioc_config, &mrioc->sysif_regs->ioc_configuration);
+ if (retry == 0)
+ start_time = jiffies;
+
timeout = mrioc->ready_timeout * 10;
do {
ioc_state = mpi3mr_get_iocstate(mrioc);
@@ -1451,6 +1473,12 @@ static int mpi3mr_bring_ioc_ready(struct mpi3mr_ioc *mrioc)
mpi3mr_iocstate_name(ioc_state));
return 0;
}
+ ioc_status = readl(&mrioc->sysif_regs->ioc_status);
+ if ((ioc_status & MPI3_SYSIF_IOC_STATUS_RESET_HISTORY) ||
+ (ioc_status & MPI3_SYSIF_IOC_STATUS_FAULT)) {
+ mpi3mr_print_fault_info(mrioc);
+ goto out_failed;
+ }
if (!pci_device_is_present(mrioc->pdev)) {
mrioc->unrecoverable = 1;
ioc_err(mrioc,
@@ -1459,9 +1487,19 @@ static int mpi3mr_bring_ioc_ready(struct mpi3mr_ioc *mrioc)
goto out_device_not_present;
}
msleep(100);
- } while (--timeout);
+ elapsed_time_sec = jiffies_to_msecs(jiffies - start_time)/1000;
+ } while (elapsed_time_sec < mrioc->ready_timeout);
out_failed:
+ elapsed_time_sec = jiffies_to_msecs(jiffies - start_time)/1000;
+ if ((retry < 2) && (elapsed_time_sec < (mrioc->ready_timeout - 60))) {
+ retry++;
+
+ ioc_warn(mrioc, "retrying to bring IOC ready, retry_count:%d\n"
+ " elapsed time =%llu\n", retry, elapsed_time_sec);
+
+ goto retry_bring_ioc_ready;
+ }
ioc_state = mpi3mr_get_iocstate(mrioc);
ioc_err(mrioc,
"failed to bring to ready state, current state: %s\n",
@@ -2653,7 +2691,7 @@ static void mpi3mr_watchdog_work(struct work_struct *work)
return;
}
- if (mrioc->ts_update_counter++ >= MPI3MR_TSUPDATE_INTERVAL) {
+ if (mrioc->ts_update_counter++ >= mrioc->ts_update_interval) {
mrioc->ts_update_counter = 0;
mpi3mr_sync_timestamp(mrioc);
}
@@ -2742,8 +2780,8 @@ void mpi3mr_start_watchdog(struct mpi3mr_ioc *mrioc)
snprintf(mrioc->watchdog_work_q_name,
sizeof(mrioc->watchdog_work_q_name), "watchdog_%s%d", mrioc->name,
mrioc->id);
- mrioc->watchdog_work_q =
- create_singlethread_workqueue(mrioc->watchdog_work_q_name);
+ mrioc->watchdog_work_q = alloc_ordered_workqueue(
+ "%s", WQ_MEM_RECLAIM, mrioc->watchdog_work_q_name);
if (!mrioc->watchdog_work_q) {
ioc_err(mrioc, "%s: failed (line=%d)\n", __func__, __LINE__);
return;
@@ -3827,6 +3865,29 @@ static int mpi3mr_repost_diag_bufs(struct mpi3mr_ioc *mrioc)
}
/**
+ * mpi3mr_read_tsu_interval - Update time stamp interval
+ * @mrioc: Adapter instance reference
+ *
+ * Update time stamp interval if its defined in driver page 1,
+ * otherwise use default value.
+ *
+ * Return: Nothing
+ */
+static void
+mpi3mr_read_tsu_interval(struct mpi3mr_ioc *mrioc)
+{
+ struct mpi3_driver_page1 driver_pg1;
+ u16 pg_sz = sizeof(driver_pg1);
+ int retval = 0;
+
+ mrioc->ts_update_interval = MPI3MR_TSUPDATE_INTERVAL;
+
+ retval = mpi3mr_cfg_get_driver_pg1(mrioc, &driver_pg1, pg_sz);
+ if (!retval && driver_pg1.time_stamp_update)
+ mrioc->ts_update_interval = (driver_pg1.time_stamp_update * 60);
+}
+
+/**
* mpi3mr_print_ioc_info - Display controller information
* @mrioc: Adapter instance reference
*
@@ -4122,6 +4183,7 @@ retry_init:
goto out_failed_noretry;
}
+ mpi3mr_read_tsu_interval(mrioc);
mpi3mr_print_ioc_info(mrioc);
if (!mrioc->cfg_page) {
@@ -4303,6 +4365,7 @@ retry_init:
goto out_failed_noretry;
}
+ mpi3mr_read_tsu_interval(mrioc);
mpi3mr_print_ioc_info(mrioc);
if (is_resume) {
diff --git a/drivers/scsi/mpi3mr/mpi3mr_os.c b/drivers/scsi/mpi3mr/mpi3mr_os.c
index 616894571c6a..5f2f67acf8bf 100644
--- a/drivers/scsi/mpi3mr/mpi3mr_os.c
+++ b/drivers/scsi/mpi3mr/mpi3mr_os.c
@@ -5317,10 +5317,8 @@ mpi3mr_probe(struct pci_dev *pdev, const struct pci_device_id *id)
else
scsi_host_set_guard(shost, SHOST_DIX_GUARD_CRC);
- snprintf(mrioc->fwevt_worker_name, sizeof(mrioc->fwevt_worker_name),
- "%s%d_fwevt_wrkr", mrioc->driver_name, mrioc->id);
mrioc->fwevt_worker_thread = alloc_ordered_workqueue(
- mrioc->fwevt_worker_name, 0);
+ "%s%d_fwevt_wrkr", 0, mrioc->driver_name, mrioc->id);
if (!mrioc->fwevt_worker_thread) {
ioc_err(mrioc, "failure at %s:%d/%s()!\n",
__FILE__, __LINE__, __func__);
diff --git a/drivers/scsi/mpt3sas/mpt3sas_base.c b/drivers/scsi/mpt3sas/mpt3sas_base.c
index b785a7e88b49..ed5046593fda 100644
--- a/drivers/scsi/mpt3sas/mpt3sas_base.c
+++ b/drivers/scsi/mpt3sas/mpt3sas_base.c
@@ -846,8 +846,8 @@ mpt3sas_base_start_watchdog(struct MPT3SAS_ADAPTER *ioc)
snprintf(ioc->fault_reset_work_q_name,
sizeof(ioc->fault_reset_work_q_name), "poll_%s%d_status",
ioc->driver_name, ioc->id);
- ioc->fault_reset_work_q =
- create_singlethread_workqueue(ioc->fault_reset_work_q_name);
+ ioc->fault_reset_work_q = alloc_ordered_workqueue(
+ "%s", WQ_MEM_RECLAIM, ioc->fault_reset_work_q_name);
if (!ioc->fault_reset_work_q) {
ioc_err(ioc, "%s: failed (line=%d)\n", __func__, __LINE__);
return;
@@ -8898,9 +8898,8 @@ _base_check_ioc_facts_changes(struct MPT3SAS_ADAPTER *ioc)
ioc->device_remove_in_progress, pd_handles_sz, GFP_KERNEL);
if (!device_remove_in_progress) {
ioc_info(ioc,
- "Unable to allocate the memory for "
- "device_remove_in_progress of sz: %d\n "
- , pd_handles_sz);
+ "Unable to allocate the memory for device_remove_in_progress of sz: %d\n",
+ pd_handles_sz);
return -ENOMEM;
}
memset(device_remove_in_progress +
diff --git a/drivers/scsi/mpt3sas/mpt3sas_base.h b/drivers/scsi/mpt3sas/mpt3sas_base.h
index fe1e96fda284..eceb5eeb4651 100644
--- a/drivers/scsi/mpt3sas/mpt3sas_base.h
+++ b/drivers/scsi/mpt3sas/mpt3sas_base.h
@@ -1162,8 +1162,7 @@ typedef void (*MPT3SAS_FLUSH_RUNNING_CMDS)(struct MPT3SAS_ADAPTER *ioc);
* @fault_reset_work_q_name: fw fault work queue
* @fault_reset_work_q: ""
* @fault_reset_work: ""
- * @firmware_event_name: fw event work queue
- * @firmware_event_thread: ""
+ * @firmware_event_thread: fw event work queue
* @fw_event_lock:
* @fw_event_list: list of fw events
* @current_evet: current processing firmware event
@@ -1351,7 +1350,6 @@ struct MPT3SAS_ADAPTER {
struct delayed_work fault_reset_work;
/* fw event handler */
- char firmware_event_name[20];
struct workqueue_struct *firmware_event_thread;
spinlock_t fw_event_lock;
struct list_head fw_event_list;
diff --git a/drivers/scsi/mpt3sas/mpt3sas_scsih.c b/drivers/scsi/mpt3sas/mpt3sas_scsih.c
index 97c2472cd434..728cced42b0e 100644
--- a/drivers/scsi/mpt3sas/mpt3sas_scsih.c
+++ b/drivers/scsi/mpt3sas/mpt3sas_scsih.c
@@ -12301,10 +12301,8 @@ _scsih_probe(struct pci_dev *pdev, const struct pci_device_id *id)
scsi_host_set_guard(shost, SHOST_DIX_GUARD_CRC);
/* event thread */
- snprintf(ioc->firmware_event_name, sizeof(ioc->firmware_event_name),
- "fw_event_%s%d", ioc->driver_name, ioc->id);
ioc->firmware_event_thread = alloc_ordered_workqueue(
- ioc->firmware_event_name, 0);
+ "fw_event_%s%d", 0, ioc->driver_name, ioc->id);
if (!ioc->firmware_event_thread) {
ioc_err(ioc, "failure at %s:%d/%s()!\n",
__FILE__, __LINE__, __func__);
diff --git a/drivers/scsi/myrb.c b/drivers/scsi/myrb.c
index f684eb5e0489..bfc2b835e612 100644
--- a/drivers/scsi/myrb.c
+++ b/drivers/scsi/myrb.c
@@ -112,9 +112,8 @@ static bool myrb_create_mempools(struct pci_dev *pdev, struct myrb_hba *cb)
return false;
}
- snprintf(cb->work_q_name, sizeof(cb->work_q_name),
- "myrb_wq_%d", cb->host->host_no);
- cb->work_q = create_singlethread_workqueue(cb->work_q_name);
+ cb->work_q = alloc_ordered_workqueue("myrb_wq_%d", WQ_MEM_RECLAIM,
+ cb->host->host_no);
if (!cb->work_q) {
dma_pool_destroy(cb->dcdb_pool);
cb->dcdb_pool = NULL;
diff --git a/drivers/scsi/myrb.h b/drivers/scsi/myrb.h
index fb8eacfceee8..78dc4136fb10 100644
--- a/drivers/scsi/myrb.h
+++ b/drivers/scsi/myrb.h
@@ -712,7 +712,6 @@ struct myrb_hba {
struct Scsi_Host *host;
struct workqueue_struct *work_q;
- char work_q_name[20];
struct delayed_work monitor_work;
unsigned long primary_monitor_time;
unsigned long secondary_monitor_time;
diff --git a/drivers/scsi/myrs.c b/drivers/scsi/myrs.c
index e824be9d9bbb..3392feb15cb4 100644
--- a/drivers/scsi/myrs.c
+++ b/drivers/scsi/myrs.c
@@ -2206,9 +2206,8 @@ static bool myrs_create_mempools(struct pci_dev *pdev, struct myrs_hba *cs)
return false;
}
- snprintf(cs->work_q_name, sizeof(cs->work_q_name),
- "myrs_wq_%d", shost->host_no);
- cs->work_q = create_singlethread_workqueue(cs->work_q_name);
+ cs->work_q = alloc_ordered_workqueue("myrs_wq_%d", WQ_MEM_RECLAIM,
+ shost->host_no);
if (!cs->work_q) {
dma_pool_destroy(cs->dcdb_pool);
cs->dcdb_pool = NULL;
diff --git a/drivers/scsi/myrs.h b/drivers/scsi/myrs.h
index 9f6696d0ddd5..e1d6b123de7b 100644
--- a/drivers/scsi/myrs.h
+++ b/drivers/scsi/myrs.h
@@ -904,7 +904,6 @@ struct myrs_hba {
bool disable_enc_msg;
struct workqueue_struct *work_q;
- char work_q_name[20];
struct delayed_work monitor_work;
unsigned long primary_monitor_time;
unsigned long secondary_monitor_time;
diff --git a/drivers/scsi/pm8001/pm8001_init.c b/drivers/scsi/pm8001/pm8001_init.c
index 1e63cb6cd8e3..33e1eba62ca1 100644
--- a/drivers/scsi/pm8001/pm8001_init.c
+++ b/drivers/scsi/pm8001/pm8001_init.c
@@ -100,10 +100,12 @@ static void pm8001_map_queues(struct Scsi_Host *shost)
struct pm8001_hba_info *pm8001_ha = sha->lldd_ha;
struct blk_mq_queue_map *qmap = &shost->tag_set.map[HCTX_TYPE_DEFAULT];
- if (pm8001_ha->number_of_intr > 1)
+ if (pm8001_ha->number_of_intr > 1) {
blk_mq_pci_map_queues(qmap, pm8001_ha->pdev, 1);
+ return;
+ }
- return blk_mq_map_queues(qmap);
+ blk_mq_map_queues(qmap);
}
/*
diff --git a/drivers/scsi/pm8001/pm80xx_hwi.c b/drivers/scsi/pm8001/pm80xx_hwi.c
index 8fe886dc5e47..a9869cd8c4c0 100644
--- a/drivers/scsi/pm8001/pm80xx_hwi.c
+++ b/drivers/scsi/pm8001/pm80xx_hwi.c
@@ -2037,7 +2037,7 @@ mpi_ssp_completion(struct pm8001_hba_info *pm8001_ha, void *piomb)
atomic_dec(&pm8001_dev->running_req);
break;
}
- pm8001_dbg(pm8001_ha, IO, "scsi_status = 0x%x\n ",
+ pm8001_dbg(pm8001_ha, IO, "scsi_status = 0x%x\n",
psspPayload->ssp_resp_iu.status);
spin_lock_irqsave(&t->task_state_lock, flags);
t->task_state_flags &= ~SAS_TASK_STATE_PENDING;
diff --git a/drivers/scsi/pmcraid.c b/drivers/scsi/pmcraid.c
index a2a084c8075e..4c5881917d76 100644
--- a/drivers/scsi/pmcraid.c
+++ b/drivers/scsi/pmcraid.c
@@ -1946,7 +1946,7 @@ static void pmcraid_soft_reset(struct pmcraid_cmd *cmd)
}
iowrite32(doorbell, pinstance->int_regs.host_ioa_interrupt_reg);
- ioread32(pinstance->int_regs.host_ioa_interrupt_reg),
+ ioread32(pinstance->int_regs.host_ioa_interrupt_reg);
int_reg = ioread32(pinstance->int_regs.ioa_host_interrupt_reg);
pmcraid_info("Waiting for IOA to become operational %x:%x\n",
@@ -4009,7 +4009,7 @@ static void pmcraid_tasklet_function(unsigned long instance)
* This routine un-registers registered interrupt handler and
* also frees irqs/vectors.
*
- * Retun Value
+ * Return Value
* None
*/
static
diff --git a/drivers/scsi/qedf/qedf_io.c b/drivers/scsi/qedf/qedf_io.c
index 054a51713d55..fcfc3bed02c6 100644
--- a/drivers/scsi/qedf/qedf_io.c
+++ b/drivers/scsi/qedf/qedf_io.c
@@ -310,7 +310,7 @@ struct qedf_ioreq *qedf_alloc_cmd(struct qedf_rport *fcport, u8 cmd_type)
if (!free_sqes) {
QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_IO,
- "Returning NULL, free_sqes=%d.\n ",
+ "Returning NULL, free_sqes=%d.\n",
free_sqes);
goto out_failed;
}
diff --git a/drivers/scsi/qedf/qedf_main.c b/drivers/scsi/qedf/qedf_main.c
index 4813087e58a1..cf13148ba281 100644
--- a/drivers/scsi/qedf/qedf_main.c
+++ b/drivers/scsi/qedf/qedf_main.c
@@ -3372,9 +3372,8 @@ retry_probe:
QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_INFO, "qedf->io_mempool=%p.\n",
qedf->io_mempool);
- sprintf(host_buf, "qedf_%u_link",
- qedf->lport->host->host_no);
- qedf->link_update_wq = create_workqueue(host_buf);
+ qedf->link_update_wq = alloc_workqueue("qedf_%u_link", WQ_MEM_RECLAIM,
+ 1, qedf->lport->host->host_no);
INIT_DELAYED_WORK(&qedf->link_update, qedf_handle_link_update);
INIT_DELAYED_WORK(&qedf->link_recovery, qedf_link_recovery);
INIT_DELAYED_WORK(&qedf->grcdump_work, qedf_wq_grcdump);
@@ -3584,9 +3583,8 @@ retry_probe:
ether_addr_copy(params.ll2_mac_address, qedf->mac);
/* Start LL2 processing thread */
- snprintf(host_buf, 20, "qedf_%d_ll2", host->host_no);
- qedf->ll2_recv_wq =
- create_workqueue(host_buf);
+ qedf->ll2_recv_wq = alloc_workqueue("qedf_%d_ll2", WQ_MEM_RECLAIM, 1,
+ host->host_no);
if (!qedf->ll2_recv_wq) {
QEDF_ERR(&(qedf->dbg_ctx), "Failed to LL2 workqueue.\n");
rc = -ENOMEM;
@@ -3627,9 +3625,8 @@ retry_probe:
}
}
- sprintf(host_buf, "qedf_%u_timer", qedf->lport->host->host_no);
- qedf->timer_work_queue =
- create_workqueue(host_buf);
+ qedf->timer_work_queue = alloc_workqueue("qedf_%u_timer",
+ WQ_MEM_RECLAIM, 1, qedf->lport->host->host_no);
if (!qedf->timer_work_queue) {
QEDF_ERR(&(qedf->dbg_ctx), "Failed to start timer "
"workqueue.\n");
@@ -3641,7 +3638,8 @@ retry_probe:
if (mode != QEDF_MODE_RECOVERY) {
sprintf(host_buf, "qedf_%u_dpc",
qedf->lport->host->host_no);
- qedf->dpc_wq = create_workqueue(host_buf);
+ qedf->dpc_wq =
+ alloc_workqueue("%s", WQ_MEM_RECLAIM, 1, host_buf);
}
INIT_DELAYED_WORK(&qedf->recovery_work, qedf_recovery_handler);
@@ -4182,7 +4180,7 @@ static int __init qedf_init(void)
goto err3;
}
- qedf_io_wq = create_workqueue("qedf_io_wq");
+ qedf_io_wq = alloc_workqueue("%s", WQ_MEM_RECLAIM, 1, "qedf_io_wq");
if (!qedf_io_wq) {
QEDF_ERR(NULL, "Could not create qedf_io_wq.\n");
goto err4;
diff --git a/drivers/scsi/qedi/qedi_main.c b/drivers/scsi/qedi/qedi_main.c
index cd0180b1f5b9..c5aec26019d6 100644
--- a/drivers/scsi/qedi/qedi_main.c
+++ b/drivers/scsi/qedi/qedi_main.c
@@ -2767,7 +2767,8 @@ retry_probe:
}
sprintf(host_buf, "host_%d", qedi->shost->host_no);
- qedi->tmf_thread = create_singlethread_workqueue(host_buf);
+ qedi->tmf_thread =
+ alloc_ordered_workqueue("%s", WQ_MEM_RECLAIM, host_buf);
if (!qedi->tmf_thread) {
QEDI_ERR(&qedi->dbg_ctx,
"Unable to start tmf thread!\n");
@@ -2775,8 +2776,9 @@ retry_probe:
goto free_cid_que;
}
- sprintf(host_buf, "qedi_ofld%d", qedi->shost->host_no);
- qedi->offload_thread = create_workqueue(host_buf);
+ qedi->offload_thread = alloc_workqueue("qedi_ofld%d",
+ WQ_MEM_RECLAIM,
+ 1, qedi->shost->host_no);
if (!qedi->offload_thread) {
QEDI_ERR(&qedi->dbg_ctx,
"Unable to start offload thread!\n");
diff --git a/drivers/scsi/qla2xxx/qla_def.h b/drivers/scsi/qla2xxx/qla_def.h
index 7cf998e3cc68..15066c112817 100644
--- a/drivers/scsi/qla2xxx/qla_def.h
+++ b/drivers/scsi/qla2xxx/qla_def.h
@@ -2621,7 +2621,6 @@ typedef struct fc_port {
struct kref sess_kref;
struct qla_tgt *tgt;
unsigned long expires;
- struct list_head del_list_entry;
struct work_struct free_work;
struct work_struct reg_work;
uint64_t jiffies_at_registration;
diff --git a/drivers/scsi/qla2xxx/qla_os.c b/drivers/scsi/qla2xxx/qla_os.c
index bc3b2aea3f8b..7f980e6141c2 100644
--- a/drivers/scsi/qla2xxx/qla_os.c
+++ b/drivers/scsi/qla2xxx/qla_os.c
@@ -3501,11 +3501,13 @@ qla2x00_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
if (IS_QLA8031(ha) || IS_MCTP_CAPABLE(ha)) {
sprintf(wq_name, "qla2xxx_%lu_dpc_lp_wq", base_vha->host_no);
- ha->dpc_lp_wq = create_singlethread_workqueue(wq_name);
+ ha->dpc_lp_wq =
+ alloc_ordered_workqueue("%s", WQ_MEM_RECLAIM, wq_name);
INIT_WORK(&ha->idc_aen, qla83xx_service_idc_aen);
sprintf(wq_name, "qla2xxx_%lu_dpc_hp_wq", base_vha->host_no);
- ha->dpc_hp_wq = create_singlethread_workqueue(wq_name);
+ ha->dpc_hp_wq =
+ alloc_ordered_workqueue("%s", WQ_MEM_RECLAIM, wq_name);
INIT_WORK(&ha->nic_core_reset, qla83xx_nic_core_reset_work);
INIT_WORK(&ha->idc_state_handler,
qla83xx_idc_state_handler_work);
diff --git a/drivers/scsi/qla4xxx/ql4_os.c b/drivers/scsi/qla4xxx/ql4_os.c
index 17cccd14765f..d91f54a6e752 100644
--- a/drivers/scsi/qla4xxx/ql4_os.c
+++ b/drivers/scsi/qla4xxx/ql4_os.c
@@ -8806,7 +8806,7 @@ skip_retry_init:
DEBUG2(printk("scsi: %s: Starting kernel thread for "
"qla4xxx_dpc\n", __func__));
sprintf(buf, "qla4xxx_%lu_dpc", ha->host_no);
- ha->dpc_thread = create_singlethread_workqueue(buf);
+ ha->dpc_thread = alloc_ordered_workqueue("%s", WQ_MEM_RECLAIM, buf);
if (!ha->dpc_thread) {
ql4_printk(KERN_WARNING, ha, "Unable to start DPC thread!\n");
ret = -ENODEV;
diff --git a/drivers/scsi/scsi_debug.c b/drivers/scsi/scsi_debug.c
index a9d8a9c62663..d95f417e24c0 100644
--- a/drivers/scsi/scsi_debug.c
+++ b/drivers/scsi/scsi_debug.c
@@ -2760,7 +2760,6 @@ static int resp_mode_sense(struct scsi_cmnd *scp,
else
bd_len = 0;
alloc_len = msense_6 ? cmd[4] : get_unaligned_be16(cmd + 7);
- memset(arr, 0, SDEBUG_MAX_MSENSE_SZ);
if (0x3 == pcontrol) { /* Saving values not supported */
mk_sense_buffer(scp, ILLEGAL_REQUEST, SAVING_PARAMS_UNSUP, 0);
return check_condition_result;
diff --git a/drivers/scsi/scsi_lib.c b/drivers/scsi/scsi_lib.c
index 3958a6d14bf4..0561b318dade 100644
--- a/drivers/scsi/scsi_lib.c
+++ b/drivers/scsi/scsi_lib.c
@@ -1163,7 +1163,6 @@ blk_status_t scsi_alloc_sgtables(struct scsi_cmnd *cmd)
if (blk_integrity_rq(rq)) {
struct scsi_data_buffer *prot_sdb = cmd->prot_sdb;
- int ivecs;
if (WARN_ON_ONCE(!prot_sdb)) {
/*
@@ -1175,20 +1174,15 @@ blk_status_t scsi_alloc_sgtables(struct scsi_cmnd *cmd)
goto out_free_sgtables;
}
- ivecs = blk_rq_count_integrity_sg(rq->q, rq->bio);
-
- if (sg_alloc_table_chained(&prot_sdb->table, ivecs,
+ if (sg_alloc_table_chained(&prot_sdb->table,
+ rq->nr_integrity_segments,
prot_sdb->table.sgl,
SCSI_INLINE_PROT_SG_CNT)) {
ret = BLK_STS_RESOURCE;
goto out_free_sgtables;
}
- count = blk_rq_map_integrity_sg(rq->q, rq->bio,
- prot_sdb->table.sgl);
- BUG_ON(count > ivecs);
- BUG_ON(count > queue_max_integrity_segments(rq->q));
-
+ count = blk_rq_map_integrity_sg(rq, prot_sdb->table.sgl);
cmd->prot_sdb = prot_sdb;
cmd->prot_sdb->table.nents = count;
}
@@ -1988,8 +1982,15 @@ void scsi_init_limits(struct Scsi_Host *shost, struct queue_limits *lim)
if (shost->no_highmem)
lim->features |= BLK_FEAT_BOUNCE_HIGH;
- dma_set_seg_boundary(dev, shost->dma_boundary);
- dma_set_max_seg_size(dev, shost->max_segment_size);
+ /*
+ * Propagate the DMA formation properties to the dma-mapping layer as
+ * a courtesy service to the LLDDs. This needs to check that the buses
+ * actually support the DMA API first, though.
+ */
+ if (dev->dma_parms) {
+ dma_set_seg_boundary(dev, shost->dma_boundary);
+ dma_set_max_seg_size(dev, shost->max_segment_size);
+ }
}
EXPORT_SYMBOL_GPL(scsi_init_limits);
diff --git a/drivers/scsi/scsi_transport_fc.c b/drivers/scsi/scsi_transport_fc.c
index 7d088b8da075..62ea7e44460e 100644
--- a/drivers/scsi/scsi_transport_fc.c
+++ b/drivers/scsi/scsi_transport_fc.c
@@ -441,18 +441,13 @@ static int fc_host_setup(struct transport_container *tc, struct device *dev,
fc_host->next_vport_number = 0;
fc_host->npiv_vports_inuse = 0;
- snprintf(fc_host->work_q_name, sizeof(fc_host->work_q_name),
- "fc_wq_%d", shost->host_no);
- fc_host->work_q = alloc_workqueue("%s", 0, 0, fc_host->work_q_name);
+ fc_host->work_q = alloc_workqueue("fc_wq_%d", 0, 0, shost->host_no);
if (!fc_host->work_q)
return -ENOMEM;
fc_host->dev_loss_tmo = fc_dev_loss_tmo;
- snprintf(fc_host->devloss_work_q_name,
- sizeof(fc_host->devloss_work_q_name),
- "fc_dl_%d", shost->host_no);
- fc_host->devloss_work_q = alloc_workqueue("%s", 0, 0,
- fc_host->devloss_work_q_name);
+ fc_host->devloss_work_q = alloc_workqueue("fc_dl_%d", 0, 0,
+ shost->host_no);
if (!fc_host->devloss_work_q) {
destroy_workqueue(fc_host->work_q);
fc_host->work_q = NULL;
diff --git a/drivers/scsi/sd.c b/drivers/scsi/sd.c
index 9db86943d04c..41e2dfa2d67d 100644
--- a/drivers/scsi/sd.c
+++ b/drivers/scsi/sd.c
@@ -38,7 +38,6 @@
#include <linux/fs.h>
#include <linux/kernel.h>
#include <linux/mm.h>
-#include <linux/bio-integrity.h>
#include <linux/hdreg.h>
#include <linux/errno.h>
#include <linux/idr.h>
@@ -1382,7 +1381,7 @@ static blk_status_t sd_setup_read_write_cmnd(struct scsi_cmnd *cmd)
if (protect && sdkp->protection_type == T10_PI_TYPE2_PROTECTION) {
ret = sd_setup_rw32_cmnd(cmd, write, lba, nr_blocks,
protect | fua, dld);
- } else if (rq->cmd_flags & REQ_ATOMIC && write) {
+ } else if (rq->cmd_flags & REQ_ATOMIC) {
ret = sd_setup_atomic_cmnd(cmd, lba, nr_blocks,
sdkp->use_atomic_write_boundary,
protect | fua);
@@ -3404,7 +3403,7 @@ static void sd_read_block_characteristics(struct scsi_disk *sdkp,
rcu_read_lock();
vpd = rcu_dereference(sdkp->device->vpd_pgb1);
- if (!vpd || vpd->len < 8) {
+ if (!vpd || vpd->len <= 8) {
rcu_read_unlock();
return;
}
@@ -4093,9 +4092,38 @@ static int sd_start_stop_device(struct scsi_disk *sdkp, int start)
{
unsigned char cmd[6] = { START_STOP }; /* START_VALID */
struct scsi_sense_hdr sshdr;
+ struct scsi_failure failure_defs[] = {
+ {
+ /* Power on, reset, or bus device reset occurred */
+ .sense = UNIT_ATTENTION,
+ .asc = 0x29,
+ .ascq = 0,
+ .result = SAM_STAT_CHECK_CONDITION,
+ },
+ {
+ /* Power on occurred */
+ .sense = UNIT_ATTENTION,
+ .asc = 0x29,
+ .ascq = 1,
+ .result = SAM_STAT_CHECK_CONDITION,
+ },
+ {
+ /* SCSI bus reset */
+ .sense = UNIT_ATTENTION,
+ .asc = 0x29,
+ .ascq = 2,
+ .result = SAM_STAT_CHECK_CONDITION,
+ },
+ {}
+ };
+ struct scsi_failures failures = {
+ .total_allowed = 3,
+ .failure_definitions = failure_defs,
+ };
const struct scsi_exec_args exec_args = {
.sshdr = &sshdr,
.req_flags = BLK_MQ_REQ_PM,
+ .failures = &failures,
};
struct scsi_device *sdp = sdkp->device;
int res;
diff --git a/drivers/scsi/sg.c b/drivers/scsi/sg.c
index baf870a03ecf..f86be197fedd 100644
--- a/drivers/scsi/sg.c
+++ b/drivers/scsi/sg.c
@@ -1424,7 +1424,6 @@ static const struct file_operations sg_fops = {
.mmap = sg_mmap,
.release = sg_release,
.fasync = sg_fasync,
- .llseek = no_llseek,
};
static const struct class sg_sysfs_class = {
diff --git a/drivers/scsi/smartpqi/smartpqi.h b/drivers/scsi/smartpqi/smartpqi.h
index cdedc271857a..fae6db20a6e9 100644
--- a/drivers/scsi/smartpqi/smartpqi.h
+++ b/drivers/scsi/smartpqi/smartpqi.h
@@ -505,7 +505,7 @@ struct pqi_vendor_general_request {
__le64 buffer_address;
__le32 buffer_length;
u8 reserved[40];
- } ofa_memory_allocation;
+ } host_memory_allocation;
} data;
};
@@ -517,21 +517,30 @@ struct pqi_vendor_general_response {
u8 reserved[2];
};
-#define PQI_VENDOR_GENERAL_CONFIG_TABLE_UPDATE 0
-#define PQI_VENDOR_GENERAL_HOST_MEMORY_UPDATE 1
+#define PQI_VENDOR_GENERAL_CONFIG_TABLE_UPDATE 0
+#define PQI_VENDOR_GENERAL_OFA_MEMORY_UPDATE 1
+#define PQI_VENDOR_GENERAL_CTRL_LOG_MEMORY_UPDATE 2
#define PQI_OFA_VERSION 1
#define PQI_OFA_SIGNATURE "OFA_QRM"
-#define PQI_OFA_MAX_SG_DESCRIPTORS 64
+#define PQI_CTRL_LOG_VERSION 1
+#define PQI_CTRL_LOG_SIGNATURE "FW_DATA"
+#define PQI_HOST_MAX_SG_DESCRIPTORS 64
-struct pqi_ofa_memory {
- __le64 signature; /* "OFA_QRM" */
+struct pqi_host_memory {
+ __le64 signature; /* "OFA_QRM", "FW_DATA", etc. */
__le16 version; /* version of this struct (1 = 1st version) */
u8 reserved[62];
__le32 bytes_allocated; /* total allocated memory in bytes */
__le16 num_memory_descriptors;
u8 reserved1[2];
- struct pqi_sg_descriptor sg_descriptor[PQI_OFA_MAX_SG_DESCRIPTORS];
+ struct pqi_sg_descriptor sg_descriptor[PQI_HOST_MAX_SG_DESCRIPTORS];
+};
+
+struct pqi_host_memory_descriptor {
+ struct pqi_host_memory *host_memory;
+ dma_addr_t host_memory_dma_handle;
+ void **host_chunk_virt_address;
};
struct pqi_aio_error_info {
@@ -867,7 +876,8 @@ struct pqi_config_table_firmware_features {
#define PQI_FIRMWARE_FEATURE_FW_TRIAGE 17
#define PQI_FIRMWARE_FEATURE_RPL_EXTENDED_FORMAT_4_5 18
#define PQI_FIRMWARE_FEATURE_MULTI_LUN_DEVICE_SUPPORT 21
-#define PQI_FIRMWARE_FEATURE_MAXIMUM 21
+#define PQI_FIRMWARE_FEATURE_CTRL_LOGGING 22
+#define PQI_FIRMWARE_FEATURE_MAXIMUM 22
struct pqi_config_table_debug {
struct pqi_config_table_section_header header;
@@ -1096,6 +1106,11 @@ struct pqi_tmf_work {
u8 scsi_opcode;
};
+struct pqi_raid_io_stats {
+ u64 raid_bypass_cnt;
+ u64 write_stream_cnt;
+};
+
struct pqi_scsi_dev {
int devtype; /* as reported by INQUIRY command */
u8 device_type; /* as reported by */
@@ -1158,7 +1173,7 @@ struct pqi_scsi_dev {
struct pqi_stream_data stream_data[NUM_STREAMS_PER_LUN];
atomic_t scsi_cmds_outstanding[PQI_MAX_LUNS_PER_DEVICE];
- unsigned int raid_bypass_cnt;
+ struct pqi_raid_io_stats __percpu *raid_io_stats;
struct pqi_tmf_work tmf_work[PQI_MAX_LUNS_PER_DEVICE];
};
@@ -1357,6 +1372,7 @@ struct pqi_ctrl_info {
u8 firmware_triage_supported : 1;
u8 rpl_extended_format_4_5_supported : 1;
u8 multi_lun_device_supported : 1;
+ u8 ctrl_logging_supported : 1;
u8 enable_r1_writes : 1;
u8 enable_r5_writes : 1;
u8 enable_r6_writes : 1;
@@ -1398,13 +1414,12 @@ struct pqi_ctrl_info {
wait_queue_head_t block_requests_wait;
struct mutex ofa_mutex;
- struct pqi_ofa_memory *pqi_ofa_mem_virt_addr;
- dma_addr_t pqi_ofa_mem_dma_handle;
- void **pqi_ofa_chunk_virt_addr;
struct work_struct ofa_memory_alloc_work;
struct work_struct ofa_quiesce_work;
u32 ofa_bytes_requested;
u16 ofa_cancel_reason;
+ struct pqi_host_memory_descriptor ofa_memory;
+ struct pqi_host_memory_descriptor ctrl_log_memory;
enum pqi_ctrl_removal_state ctrl_removal_state;
};
diff --git a/drivers/scsi/smartpqi/smartpqi_init.c b/drivers/scsi/smartpqi/smartpqi_init.c
index 24c7cb285dca..7fd5a8c813dc 100644
--- a/drivers/scsi/smartpqi/smartpqi_init.c
+++ b/drivers/scsi/smartpqi/smartpqi_init.c
@@ -33,11 +33,11 @@
#define BUILD_TIMESTAMP
#endif
-#define DRIVER_VERSION "2.1.26-030"
+#define DRIVER_VERSION "2.1.30-031"
#define DRIVER_MAJOR 2
#define DRIVER_MINOR 1
-#define DRIVER_RELEASE 26
-#define DRIVER_REVISION 30
+#define DRIVER_RELEASE 30
+#define DRIVER_REVISION 31
#define DRIVER_NAME "Microchip SmartPQI Driver (v" \
DRIVER_VERSION BUILD_TIMESTAMP ")"
@@ -92,9 +92,9 @@ static int pqi_aio_submit_r56_write_io(struct pqi_ctrl_info *ctrl_info,
static void pqi_ofa_ctrl_quiesce(struct pqi_ctrl_info *ctrl_info);
static void pqi_ofa_ctrl_unquiesce(struct pqi_ctrl_info *ctrl_info);
static int pqi_ofa_ctrl_restart(struct pqi_ctrl_info *ctrl_info, unsigned int delay_secs);
-static void pqi_ofa_setup_host_buffer(struct pqi_ctrl_info *ctrl_info);
-static void pqi_ofa_free_host_buffer(struct pqi_ctrl_info *ctrl_info);
-static int pqi_ofa_host_memory_update(struct pqi_ctrl_info *ctrl_info);
+static void pqi_host_setup_buffer(struct pqi_ctrl_info *ctrl_info, struct pqi_host_memory_descriptor *host_memory_descriptor, u32 total_size, u32 min_size);
+static void pqi_host_free_buffer(struct pqi_ctrl_info *ctrl_info, struct pqi_host_memory_descriptor *host_memory_descriptor);
+static int pqi_host_memory_update(struct pqi_ctrl_info *ctrl_info, struct pqi_host_memory_descriptor *host_memory_descriptor, u16 function_code);
static int pqi_device_wait_for_pending_io(struct pqi_ctrl_info *ctrl_info,
struct pqi_scsi_dev *device, u8 lun, unsigned long timeout_msecs);
static void pqi_fail_all_outstanding_requests(struct pqi_ctrl_info *ctrl_info);
@@ -1508,6 +1508,12 @@ static int pqi_get_raid_map(struct pqi_ctrl_info *ctrl_info,
if (rc)
goto error;
+ device->raid_io_stats = alloc_percpu(struct pqi_raid_io_stats);
+ if (!device->raid_io_stats) {
+ rc = -ENOMEM;
+ goto error;
+ }
+
device->raid_map = raid_map;
return 0;
@@ -2099,6 +2105,10 @@ static void pqi_scsi_update_device(struct pqi_ctrl_info *ctrl_info,
/* To prevent this from being freed later. */
new_device->raid_map = NULL;
}
+ if (new_device->raid_bypass_enabled && existing_device->raid_io_stats == NULL) {
+ existing_device->raid_io_stats = new_device->raid_io_stats;
+ new_device->raid_io_stats = NULL;
+ }
existing_device->raid_bypass_configured = new_device->raid_bypass_configured;
existing_device->raid_bypass_enabled = new_device->raid_bypass_enabled;
}
@@ -2121,6 +2131,7 @@ static void pqi_scsi_update_device(struct pqi_ctrl_info *ctrl_info,
static inline void pqi_free_device(struct pqi_scsi_dev *device)
{
if (device) {
+ free_percpu(device->raid_io_stats);
kfree(device->raid_map);
kfree(device);
}
@@ -2292,17 +2303,23 @@ static void pqi_update_device_list(struct pqi_ctrl_info *ctrl_info,
* queue depth, device size.
*/
list_for_each_entry(device, &ctrl_info->scsi_device_list, scsi_device_list_entry) {
+ /*
+ * Check for queue depth change.
+ */
if (device->sdev && device->queue_depth != device->advertised_queue_depth) {
device->advertised_queue_depth = device->queue_depth;
scsi_change_queue_depth(device->sdev, device->advertised_queue_depth);
- spin_lock_irqsave(&ctrl_info->scsi_device_list_lock, flags);
- if (pqi_volume_rescan_needed(device)) {
- device->rescan = false;
- spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags);
- scsi_rescan_device(device->sdev);
- } else {
- spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags);
- }
+ }
+ spin_lock_irqsave(&ctrl_info->scsi_device_list_lock, flags);
+ /*
+ * Check for changes in the device, such as size.
+ */
+ if (pqi_volume_rescan_needed(device)) {
+ device->rescan = false;
+ spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags);
+ scsi_rescan_device(device->sdev);
+ } else {
+ spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags);
}
}
@@ -2354,14 +2371,6 @@ static inline void pqi_mask_device(u8 *scsi3addr)
scsi3addr[3] |= 0xc0;
}
-static inline bool pqi_is_multipath_device(struct pqi_scsi_dev *device)
-{
- if (pqi_is_logical_device(device))
- return false;
-
- return (device->path_map & (device->path_map - 1)) != 0;
-}
-
static inline bool pqi_expose_device(struct pqi_scsi_dev *device)
{
return !device->is_physical_device || !pqi_skip_device(device->scsi3addr);
@@ -3244,6 +3253,20 @@ static void pqi_process_raid_io_error(struct pqi_io_request *io_request)
sense_data_length);
}
+ if (pqi_cmd_priv(scmd)->this_residual &&
+ !pqi_is_logical_device(scmd->device->hostdata) &&
+ scsi_status == SAM_STAT_CHECK_CONDITION &&
+ host_byte == DID_OK &&
+ sense_data_length &&
+ scsi_normalize_sense(error_info->data, sense_data_length, &sshdr) &&
+ sshdr.sense_key == ILLEGAL_REQUEST &&
+ sshdr.asc == 0x26 &&
+ sshdr.ascq == 0x0) {
+ host_byte = DID_NO_CONNECT;
+ pqi_take_device_offline(scmd->device, "AIO");
+ scsi_build_sense_buffer(0, scmd->sense_buffer, HARDWARE_ERROR, 0x3e, 0x1);
+ }
+
scmd->result = scsi_status;
set_host_byte(scmd, host_byte);
}
@@ -3258,14 +3281,12 @@ static void pqi_process_aio_io_error(struct pqi_io_request *io_request)
int residual_count;
int xfer_count;
bool device_offline;
- struct pqi_scsi_dev *device;
scmd = io_request->scmd;
error_info = io_request->error_info;
host_byte = DID_OK;
sense_data_length = 0;
device_offline = false;
- device = scmd->device->hostdata;
switch (error_info->service_response) {
case PQI_AIO_SERV_RESPONSE_COMPLETE:
@@ -3290,14 +3311,8 @@ static void pqi_process_aio_io_error(struct pqi_io_request *io_request)
break;
case PQI_AIO_STATUS_AIO_PATH_DISABLED:
pqi_aio_path_disabled(io_request);
- if (pqi_is_multipath_device(device)) {
- pqi_device_remove_start(device);
- host_byte = DID_NO_CONNECT;
- scsi_status = SAM_STAT_CHECK_CONDITION;
- } else {
- scsi_status = SAM_STAT_GOOD;
- io_request->status = -EAGAIN;
- }
+ scsi_status = SAM_STAT_GOOD;
+ io_request->status = -EAGAIN;
break;
case PQI_AIO_STATUS_NO_PATH_TO_DEVICE:
case PQI_AIO_STATUS_INVALID_DEVICE:
@@ -3625,7 +3640,7 @@ static void pqi_process_soft_reset(struct pqi_ctrl_info *ctrl_info)
ctrl_info->pqi_mode_enabled = false;
pqi_save_ctrl_mode(ctrl_info, SIS_MODE);
rc = pqi_ofa_ctrl_restart(ctrl_info, delay_secs);
- pqi_ofa_free_host_buffer(ctrl_info);
+ pqi_host_free_buffer(ctrl_info, &ctrl_info->ofa_memory);
pqi_ctrl_ofa_done(ctrl_info);
dev_info(&ctrl_info->pci_dev->dev,
"Online Firmware Activation: %s\n",
@@ -3636,7 +3651,7 @@ static void pqi_process_soft_reset(struct pqi_ctrl_info *ctrl_info)
"Online Firmware Activation ABORTED\n");
if (ctrl_info->soft_reset_handshake_supported)
pqi_clear_soft_reset_status(ctrl_info);
- pqi_ofa_free_host_buffer(ctrl_info);
+ pqi_host_free_buffer(ctrl_info, &ctrl_info->ofa_memory);
pqi_ctrl_ofa_done(ctrl_info);
pqi_ofa_ctrl_unquiesce(ctrl_info);
break;
@@ -3646,7 +3661,7 @@ static void pqi_process_soft_reset(struct pqi_ctrl_info *ctrl_info)
dev_err(&ctrl_info->pci_dev->dev,
"unexpected Online Firmware Activation reset status: 0x%x\n",
reset_status);
- pqi_ofa_free_host_buffer(ctrl_info);
+ pqi_host_free_buffer(ctrl_info, &ctrl_info->ofa_memory);
pqi_ctrl_ofa_done(ctrl_info);
pqi_ofa_ctrl_unquiesce(ctrl_info);
pqi_take_ctrl_offline(ctrl_info, PQI_OFA_RESPONSE_TIMEOUT);
@@ -3661,8 +3676,8 @@ static void pqi_ofa_memory_alloc_worker(struct work_struct *work)
ctrl_info = container_of(work, struct pqi_ctrl_info, ofa_memory_alloc_work);
pqi_ctrl_ofa_start(ctrl_info);
- pqi_ofa_setup_host_buffer(ctrl_info);
- pqi_ofa_host_memory_update(ctrl_info);
+ pqi_host_setup_buffer(ctrl_info, &ctrl_info->ofa_memory, ctrl_info->ofa_bytes_requested, ctrl_info->ofa_bytes_requested);
+ pqi_host_memory_update(ctrl_info, &ctrl_info->ofa_memory, PQI_VENDOR_GENERAL_OFA_MEMORY_UPDATE);
}
static void pqi_ofa_quiesce_worker(struct work_struct *work)
@@ -3702,7 +3717,7 @@ static bool pqi_ofa_process_event(struct pqi_ctrl_info *ctrl_info,
dev_info(&ctrl_info->pci_dev->dev,
"received Online Firmware Activation cancel request: reason: %u\n",
ctrl_info->ofa_cancel_reason);
- pqi_ofa_free_host_buffer(ctrl_info);
+ pqi_host_free_buffer(ctrl_info, &ctrl_info->ofa_memory);
pqi_ctrl_ofa_done(ctrl_info);
break;
default:
@@ -5933,7 +5948,7 @@ static bool pqi_is_parity_write_stream(struct pqi_ctrl_info *ctrl_info,
int rc;
struct pqi_scsi_dev *device;
struct pqi_stream_data *pqi_stream_data;
- struct pqi_scsi_dev_raid_map_data rmd;
+ struct pqi_scsi_dev_raid_map_data rmd = { 0 };
if (!ctrl_info->enable_stream_detection)
return false;
@@ -5975,6 +5990,7 @@ static bool pqi_is_parity_write_stream(struct pqi_ctrl_info *ctrl_info,
pqi_stream_data->next_lba = rmd.first_block +
rmd.block_cnt;
pqi_stream_data->last_accessed = jiffies;
+ per_cpu_ptr(device->raid_io_stats, smp_processor_id())->write_stream_cnt++;
return true;
}
@@ -6025,7 +6041,7 @@ static int pqi_scsi_queue_command(struct Scsi_Host *shost, struct scsi_cmnd *scm
ctrl_info = shost_to_hba(shost);
- if (pqi_ctrl_offline(ctrl_info) || pqi_device_in_remove(device)) {
+ if (pqi_ctrl_offline(ctrl_info) || pqi_device_offline(device) || pqi_device_in_remove(device)) {
set_host_byte(scmd, DID_NO_CONNECT);
pqi_scsi_done(scmd);
return 0;
@@ -6053,7 +6069,7 @@ static int pqi_scsi_queue_command(struct Scsi_Host *shost, struct scsi_cmnd *scm
rc = pqi_raid_bypass_submit_scsi_cmd(ctrl_info, device, scmd, queue_group);
if (rc == 0 || rc == SCSI_MLQUEUE_HOST_BUSY) {
raid_bypassed = true;
- device->raid_bypass_cnt++;
+ per_cpu_ptr(device->raid_io_stats, smp_processor_id())->raid_bypass_cnt++;
}
}
if (!raid_bypassed)
@@ -6190,14 +6206,12 @@ static void pqi_fail_io_queued_for_device(struct pqi_ctrl_info *ctrl_info,
continue;
scsi_device = scmd->device->hostdata;
- if (scsi_device != device)
- continue;
-
- if ((u8)scmd->device->lun != lun)
- continue;
list_del(&io_request->request_list_entry);
- set_host_byte(scmd, DID_RESET);
+ if (scsi_device == device && (u8)scmd->device->lun == lun)
+ set_host_byte(scmd, DID_RESET);
+ else
+ set_host_byte(scmd, DID_REQUEUE);
pqi_free_io_request(io_request);
scsi_dma_unmap(scmd);
pqi_scsi_done(scmd);
@@ -7350,7 +7364,8 @@ static ssize_t pqi_raid_bypass_cnt_show(struct device *dev,
struct scsi_device *sdev;
struct pqi_scsi_dev *device;
unsigned long flags;
- unsigned int raid_bypass_cnt;
+ u64 raid_bypass_cnt;
+ int cpu;
sdev = to_scsi_device(dev);
ctrl_info = shost_to_hba(sdev->host);
@@ -7366,11 +7381,17 @@ static ssize_t pqi_raid_bypass_cnt_show(struct device *dev,
return -ENODEV;
}
- raid_bypass_cnt = device->raid_bypass_cnt;
+ raid_bypass_cnt = 0;
+
+ if (device->raid_io_stats) {
+ for_each_online_cpu(cpu) {
+ raid_bypass_cnt += per_cpu_ptr(device->raid_io_stats, cpu)->raid_bypass_cnt;
+ }
+ }
spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags);
- return scnprintf(buffer, PAGE_SIZE, "0x%x\n", raid_bypass_cnt);
+ return scnprintf(buffer, PAGE_SIZE, "0x%llx\n", raid_bypass_cnt);
}
static ssize_t pqi_sas_ncq_prio_enable_show(struct device *dev,
@@ -7452,6 +7473,43 @@ static ssize_t pqi_numa_node_show(struct device *dev,
return scnprintf(buffer, PAGE_SIZE, "%d\n", ctrl_info->numa_node);
}
+static ssize_t pqi_write_stream_cnt_show(struct device *dev,
+ struct device_attribute *attr, char *buffer)
+{
+ struct pqi_ctrl_info *ctrl_info;
+ struct scsi_device *sdev;
+ struct pqi_scsi_dev *device;
+ unsigned long flags;
+ u64 write_stream_cnt;
+ int cpu;
+
+ sdev = to_scsi_device(dev);
+ ctrl_info = shost_to_hba(sdev->host);
+
+ if (pqi_ctrl_offline(ctrl_info))
+ return -ENODEV;
+
+ spin_lock_irqsave(&ctrl_info->scsi_device_list_lock, flags);
+
+ device = sdev->hostdata;
+ if (!device) {
+ spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags);
+ return -ENODEV;
+ }
+
+ write_stream_cnt = 0;
+
+ if (device->raid_io_stats) {
+ for_each_online_cpu(cpu) {
+ write_stream_cnt += per_cpu_ptr(device->raid_io_stats, cpu)->write_stream_cnt;
+ }
+ }
+
+ spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags);
+
+ return scnprintf(buffer, PAGE_SIZE, "0x%llx\n", write_stream_cnt);
+}
+
static DEVICE_ATTR(lunid, 0444, pqi_lunid_show, NULL);
static DEVICE_ATTR(unique_id, 0444, pqi_unique_id_show, NULL);
static DEVICE_ATTR(path_info, 0444, pqi_path_info_show, NULL);
@@ -7462,6 +7520,7 @@ static DEVICE_ATTR(raid_bypass_cnt, 0444, pqi_raid_bypass_cnt_show, NULL);
static DEVICE_ATTR(sas_ncq_prio_enable, 0644,
pqi_sas_ncq_prio_enable_show, pqi_sas_ncq_prio_enable_store);
static DEVICE_ATTR(numa_node, 0444, pqi_numa_node_show, NULL);
+static DEVICE_ATTR(write_stream_cnt, 0444, pqi_write_stream_cnt_show, NULL);
static struct attribute *pqi_sdev_attrs[] = {
&dev_attr_lunid.attr,
@@ -7473,6 +7532,7 @@ static struct attribute *pqi_sdev_attrs[] = {
&dev_attr_raid_bypass_cnt.attr,
&dev_attr_sas_ncq_prio_enable.attr,
&dev_attr_numa_node.attr,
+ &dev_attr_write_stream_cnt.attr,
NULL
};
@@ -7863,6 +7923,9 @@ static void pqi_ctrl_update_feature_flags(struct pqi_ctrl_info *ctrl_info,
case PQI_FIRMWARE_FEATURE_MULTI_LUN_DEVICE_SUPPORT:
ctrl_info->multi_lun_device_supported = firmware_feature->enabled;
break;
+ case PQI_FIRMWARE_FEATURE_CTRL_LOGGING:
+ ctrl_info->ctrl_logging_supported = firmware_feature->enabled;
+ break;
}
pqi_firmware_feature_status(ctrl_info, firmware_feature);
@@ -7968,6 +8031,11 @@ static struct pqi_firmware_feature pqi_firmware_features[] = {
.feature_bit = PQI_FIRMWARE_FEATURE_MULTI_LUN_DEVICE_SUPPORT,
.feature_status = pqi_ctrl_update_feature_flags,
},
+ {
+ .feature_name = "Controller Data Logging",
+ .feature_bit = PQI_FIRMWARE_FEATURE_CTRL_LOGGING,
+ .feature_status = pqi_ctrl_update_feature_flags,
+ },
};
static void pqi_process_firmware_features(
@@ -8070,6 +8138,7 @@ static void pqi_ctrl_reset_config(struct pqi_ctrl_info *ctrl_info)
ctrl_info->firmware_triage_supported = false;
ctrl_info->rpl_extended_format_4_5_supported = false;
ctrl_info->multi_lun_device_supported = false;
+ ctrl_info->ctrl_logging_supported = false;
}
static int pqi_process_config_table(struct pqi_ctrl_info *ctrl_info)
@@ -8210,6 +8279,9 @@ static void pqi_perform_lockup_action(void)
}
}
+#define PQI_CTRL_LOG_TOTAL_SIZE (4 * 1024 * 1024)
+#define PQI_CTRL_LOG_MIN_SIZE (PQI_CTRL_LOG_TOTAL_SIZE / PQI_HOST_MAX_SG_DESCRIPTORS)
+
static int pqi_ctrl_init(struct pqi_ctrl_info *ctrl_info)
{
int rc;
@@ -8221,6 +8293,12 @@ static int pqi_ctrl_init(struct pqi_ctrl_info *ctrl_info)
if (rc)
return rc;
}
+ if (sis_is_ctrl_logging_supported(ctrl_info)) {
+ sis_notify_kdump(ctrl_info);
+ rc = sis_wait_for_ctrl_logging_completion(ctrl_info);
+ if (rc)
+ return rc;
+ }
sis_soft_reset(ctrl_info);
ssleep(PQI_POST_RESET_DELAY_SECS);
} else {
@@ -8402,6 +8480,11 @@ static int pqi_ctrl_init(struct pqi_ctrl_info *ctrl_info)
if (rc)
return rc;
+ if (ctrl_info->ctrl_logging_supported && !reset_devices) {
+ pqi_host_setup_buffer(ctrl_info, &ctrl_info->ctrl_log_memory, PQI_CTRL_LOG_TOTAL_SIZE, PQI_CTRL_LOG_MIN_SIZE);
+ pqi_host_memory_update(ctrl_info, &ctrl_info->ctrl_log_memory, PQI_VENDOR_GENERAL_CTRL_LOG_MEMORY_UPDATE);
+ }
+
rc = pqi_get_ctrl_product_details(ctrl_info);
if (rc) {
dev_err(&ctrl_info->pci_dev->dev,
@@ -8586,8 +8669,22 @@ static int pqi_ctrl_init_resume(struct pqi_ctrl_info *ctrl_info)
return rc;
}
- if (pqi_ofa_in_progress(ctrl_info))
+ if (pqi_ofa_in_progress(ctrl_info)) {
pqi_ctrl_unblock_scan(ctrl_info);
+ if (ctrl_info->ctrl_logging_supported) {
+ if (!ctrl_info->ctrl_log_memory.host_memory)
+ pqi_host_setup_buffer(ctrl_info,
+ &ctrl_info->ctrl_log_memory,
+ PQI_CTRL_LOG_TOTAL_SIZE,
+ PQI_CTRL_LOG_MIN_SIZE);
+ pqi_host_memory_update(ctrl_info,
+ &ctrl_info->ctrl_log_memory, PQI_VENDOR_GENERAL_CTRL_LOG_MEMORY_UPDATE);
+ } else {
+ if (ctrl_info->ctrl_log_memory.host_memory)
+ pqi_host_free_buffer(ctrl_info,
+ &ctrl_info->ctrl_log_memory);
+ }
+ }
pqi_scan_scsi_devices(ctrl_info);
@@ -8777,6 +8874,7 @@ static void pqi_remove_ctrl(struct pqi_ctrl_info *ctrl_info)
pqi_fail_all_outstanding_requests(ctrl_info);
ctrl_info->pqi_mode_enabled = false;
}
+ pqi_host_free_buffer(ctrl_info, &ctrl_info->ctrl_log_memory);
pqi_unregister_scsi(ctrl_info);
if (ctrl_info->pqi_mode_enabled)
pqi_revert_to_sis_mode(ctrl_info);
@@ -8802,177 +8900,187 @@ static void pqi_ofa_ctrl_unquiesce(struct pqi_ctrl_info *ctrl_info)
pqi_ctrl_unblock_scan(ctrl_info);
}
-static int pqi_ofa_alloc_mem(struct pqi_ctrl_info *ctrl_info, u32 total_size, u32 chunk_size)
+static int pqi_ofa_ctrl_restart(struct pqi_ctrl_info *ctrl_info, unsigned int delay_secs)
+{
+ ssleep(delay_secs);
+
+ return pqi_ctrl_init_resume(ctrl_info);
+}
+
+static int pqi_host_alloc_mem(struct pqi_ctrl_info *ctrl_info,
+ struct pqi_host_memory_descriptor *host_memory_descriptor,
+ u32 total_size, u32 chunk_size)
{
int i;
u32 sg_count;
struct device *dev;
- struct pqi_ofa_memory *ofap;
+ struct pqi_host_memory *host_memory;
struct pqi_sg_descriptor *mem_descriptor;
dma_addr_t dma_handle;
- ofap = ctrl_info->pqi_ofa_mem_virt_addr;
-
sg_count = DIV_ROUND_UP(total_size, chunk_size);
- if (sg_count == 0 || sg_count > PQI_OFA_MAX_SG_DESCRIPTORS)
+ if (sg_count == 0 || sg_count > PQI_HOST_MAX_SG_DESCRIPTORS)
goto out;
- ctrl_info->pqi_ofa_chunk_virt_addr = kmalloc_array(sg_count, sizeof(void *), GFP_KERNEL);
- if (!ctrl_info->pqi_ofa_chunk_virt_addr)
+ host_memory_descriptor->host_chunk_virt_address = kmalloc(sg_count * sizeof(void *), GFP_KERNEL);
+ if (!host_memory_descriptor->host_chunk_virt_address)
goto out;
dev = &ctrl_info->pci_dev->dev;
+ host_memory = host_memory_descriptor->host_memory;
for (i = 0; i < sg_count; i++) {
- ctrl_info->pqi_ofa_chunk_virt_addr[i] =
- dma_alloc_coherent(dev, chunk_size, &dma_handle, GFP_KERNEL);
- if (!ctrl_info->pqi_ofa_chunk_virt_addr[i])
+ host_memory_descriptor->host_chunk_virt_address[i] = dma_alloc_coherent(dev, chunk_size, &dma_handle, GFP_KERNEL);
+ if (!host_memory_descriptor->host_chunk_virt_address[i])
goto out_free_chunks;
- mem_descriptor = &ofap->sg_descriptor[i];
+ mem_descriptor = &host_memory->sg_descriptor[i];
put_unaligned_le64((u64)dma_handle, &mem_descriptor->address);
put_unaligned_le32(chunk_size, &mem_descriptor->length);
}
put_unaligned_le32(CISS_SG_LAST, &mem_descriptor->flags);
- put_unaligned_le16(sg_count, &ofap->num_memory_descriptors);
- put_unaligned_le32(sg_count * chunk_size, &ofap->bytes_allocated);
+ put_unaligned_le16(sg_count, &host_memory->num_memory_descriptors);
+ put_unaligned_le32(sg_count * chunk_size, &host_memory->bytes_allocated);
return 0;
out_free_chunks:
while (--i >= 0) {
- mem_descriptor = &ofap->sg_descriptor[i];
+ mem_descriptor = &host_memory->sg_descriptor[i];
dma_free_coherent(dev, chunk_size,
- ctrl_info->pqi_ofa_chunk_virt_addr[i],
+ host_memory_descriptor->host_chunk_virt_address[i],
get_unaligned_le64(&mem_descriptor->address));
}
- kfree(ctrl_info->pqi_ofa_chunk_virt_addr);
-
+ kfree(host_memory_descriptor->host_chunk_virt_address);
out:
return -ENOMEM;
}
-static int pqi_ofa_alloc_host_buffer(struct pqi_ctrl_info *ctrl_info)
+static int pqi_host_alloc_buffer(struct pqi_ctrl_info *ctrl_info,
+ struct pqi_host_memory_descriptor *host_memory_descriptor,
+ u32 total_required_size, u32 min_required_size)
{
- u32 total_size;
u32 chunk_size;
u32 min_chunk_size;
- if (ctrl_info->ofa_bytes_requested == 0)
+ if (total_required_size == 0 || min_required_size == 0)
return 0;
- total_size = PAGE_ALIGN(ctrl_info->ofa_bytes_requested);
- min_chunk_size = DIV_ROUND_UP(total_size, PQI_OFA_MAX_SG_DESCRIPTORS);
+ total_required_size = PAGE_ALIGN(total_required_size);
+ min_required_size = PAGE_ALIGN(min_required_size);
+ min_chunk_size = DIV_ROUND_UP(total_required_size, PQI_HOST_MAX_SG_DESCRIPTORS);
min_chunk_size = PAGE_ALIGN(min_chunk_size);
- for (chunk_size = total_size; chunk_size >= min_chunk_size;) {
- if (pqi_ofa_alloc_mem(ctrl_info, total_size, chunk_size) == 0)
- return 0;
- chunk_size /= 2;
- chunk_size = PAGE_ALIGN(chunk_size);
+ while (total_required_size >= min_required_size) {
+ for (chunk_size = total_required_size; chunk_size >= min_chunk_size;) {
+ if (pqi_host_alloc_mem(ctrl_info,
+ host_memory_descriptor, total_required_size,
+ chunk_size) == 0)
+ return 0;
+ chunk_size /= 2;
+ chunk_size = PAGE_ALIGN(chunk_size);
+ }
+ total_required_size /= 2;
+ total_required_size = PAGE_ALIGN(total_required_size);
}
return -ENOMEM;
}
-static void pqi_ofa_setup_host_buffer(struct pqi_ctrl_info *ctrl_info)
+static void pqi_host_setup_buffer(struct pqi_ctrl_info *ctrl_info,
+ struct pqi_host_memory_descriptor *host_memory_descriptor,
+ u32 total_size, u32 min_size)
{
struct device *dev;
- struct pqi_ofa_memory *ofap;
+ struct pqi_host_memory *host_memory;
dev = &ctrl_info->pci_dev->dev;
- ofap = dma_alloc_coherent(dev, sizeof(*ofap),
- &ctrl_info->pqi_ofa_mem_dma_handle, GFP_KERNEL);
- if (!ofap)
+ host_memory = dma_alloc_coherent(dev, sizeof(*host_memory),
+ &host_memory_descriptor->host_memory_dma_handle, GFP_KERNEL);
+ if (!host_memory)
return;
- ctrl_info->pqi_ofa_mem_virt_addr = ofap;
+ host_memory_descriptor->host_memory = host_memory;
- if (pqi_ofa_alloc_host_buffer(ctrl_info) < 0) {
- dev_err(dev,
- "failed to allocate host buffer for Online Firmware Activation\n");
- dma_free_coherent(dev, sizeof(*ofap), ofap, ctrl_info->pqi_ofa_mem_dma_handle);
- ctrl_info->pqi_ofa_mem_virt_addr = NULL;
+ if (pqi_host_alloc_buffer(ctrl_info, host_memory_descriptor,
+ total_size, min_size) < 0) {
+ dev_err(dev, "failed to allocate firmware usable host buffer\n");
+ dma_free_coherent(dev, sizeof(*host_memory), host_memory,
+ host_memory_descriptor->host_memory_dma_handle);
+ host_memory_descriptor->host_memory = NULL;
return;
}
-
- put_unaligned_le16(PQI_OFA_VERSION, &ofap->version);
- memcpy(&ofap->signature, PQI_OFA_SIGNATURE, sizeof(ofap->signature));
}
-static void pqi_ofa_free_host_buffer(struct pqi_ctrl_info *ctrl_info)
+static void pqi_host_free_buffer(struct pqi_ctrl_info *ctrl_info,
+ struct pqi_host_memory_descriptor *host_memory_descriptor)
{
unsigned int i;
struct device *dev;
- struct pqi_ofa_memory *ofap;
+ struct pqi_host_memory *host_memory;
struct pqi_sg_descriptor *mem_descriptor;
unsigned int num_memory_descriptors;
- ofap = ctrl_info->pqi_ofa_mem_virt_addr;
- if (!ofap)
+ host_memory = host_memory_descriptor->host_memory;
+ if (!host_memory)
return;
dev = &ctrl_info->pci_dev->dev;
- if (get_unaligned_le32(&ofap->bytes_allocated) == 0)
+ if (get_unaligned_le32(&host_memory->bytes_allocated) == 0)
goto out;
- mem_descriptor = ofap->sg_descriptor;
- num_memory_descriptors =
- get_unaligned_le16(&ofap->num_memory_descriptors);
+ mem_descriptor = host_memory->sg_descriptor;
+ num_memory_descriptors = get_unaligned_le16(&host_memory->num_memory_descriptors);
for (i = 0; i < num_memory_descriptors; i++) {
dma_free_coherent(dev,
get_unaligned_le32(&mem_descriptor[i].length),
- ctrl_info->pqi_ofa_chunk_virt_addr[i],
+ host_memory_descriptor->host_chunk_virt_address[i],
get_unaligned_le64(&mem_descriptor[i].address));
}
- kfree(ctrl_info->pqi_ofa_chunk_virt_addr);
+ kfree(host_memory_descriptor->host_chunk_virt_address);
out:
- dma_free_coherent(dev, sizeof(*ofap), ofap,
- ctrl_info->pqi_ofa_mem_dma_handle);
- ctrl_info->pqi_ofa_mem_virt_addr = NULL;
+ dma_free_coherent(dev, sizeof(*host_memory), host_memory,
+ host_memory_descriptor->host_memory_dma_handle);
+ host_memory_descriptor->host_memory = NULL;
}
-static int pqi_ofa_host_memory_update(struct pqi_ctrl_info *ctrl_info)
+static int pqi_host_memory_update(struct pqi_ctrl_info *ctrl_info,
+ struct pqi_host_memory_descriptor *host_memory_descriptor,
+ u16 function_code)
{
u32 buffer_length;
struct pqi_vendor_general_request request;
- struct pqi_ofa_memory *ofap;
+ struct pqi_host_memory *host_memory;
memset(&request, 0, sizeof(request));
request.header.iu_type = PQI_REQUEST_IU_VENDOR_GENERAL;
- put_unaligned_le16(sizeof(request) - PQI_REQUEST_HEADER_LENGTH,
- &request.header.iu_length);
- put_unaligned_le16(PQI_VENDOR_GENERAL_HOST_MEMORY_UPDATE,
- &request.function_code);
-
- ofap = ctrl_info->pqi_ofa_mem_virt_addr;
-
- if (ofap) {
- buffer_length = offsetof(struct pqi_ofa_memory, sg_descriptor) +
- get_unaligned_le16(&ofap->num_memory_descriptors) *
- sizeof(struct pqi_sg_descriptor);
-
- put_unaligned_le64((u64)ctrl_info->pqi_ofa_mem_dma_handle,
- &request.data.ofa_memory_allocation.buffer_address);
- put_unaligned_le32(buffer_length,
- &request.data.ofa_memory_allocation.buffer_length);
+ put_unaligned_le16(sizeof(request) - PQI_REQUEST_HEADER_LENGTH, &request.header.iu_length);
+ put_unaligned_le16(function_code, &request.function_code);
+
+ host_memory = host_memory_descriptor->host_memory;
+
+ if (host_memory) {
+ buffer_length = offsetof(struct pqi_host_memory, sg_descriptor) + get_unaligned_le16(&host_memory->num_memory_descriptors) * sizeof(struct pqi_sg_descriptor);
+ put_unaligned_le64((u64)host_memory_descriptor->host_memory_dma_handle, &request.data.host_memory_allocation.buffer_address);
+ put_unaligned_le32(buffer_length, &request.data.host_memory_allocation.buffer_length);
+
+ if (function_code == PQI_VENDOR_GENERAL_OFA_MEMORY_UPDATE) {
+ put_unaligned_le16(PQI_OFA_VERSION, &host_memory->version);
+ memcpy(&host_memory->signature, PQI_OFA_SIGNATURE, sizeof(host_memory->signature));
+ } else if (function_code == PQI_VENDOR_GENERAL_CTRL_LOG_MEMORY_UPDATE) {
+ put_unaligned_le16(PQI_CTRL_LOG_VERSION, &host_memory->version);
+ memcpy(&host_memory->signature, PQI_CTRL_LOG_SIGNATURE, sizeof(host_memory->signature));
+ }
}
return pqi_submit_raid_request_synchronous(ctrl_info, &request.header, 0, NULL);
}
-static int pqi_ofa_ctrl_restart(struct pqi_ctrl_info *ctrl_info, unsigned int delay_secs)
-{
- ssleep(delay_secs);
-
- return pqi_ctrl_init_resume(ctrl_info);
-}
-
static struct pqi_raid_error_info pqi_ctrl_offline_raid_error_info = {
.data_out_result = PQI_DATA_IN_OUT_HARDWARE_ERROR,
.status = SAM_STAT_CHECK_CONDITION,
@@ -9446,6 +9554,10 @@ static const struct pci_device_id pqi_pci_id_table[] = {
},
{
PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
+ 0x193d, 0x0462)
+ },
+ {
+ PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
0x193d, 0x1104)
},
{
@@ -9474,6 +9586,10 @@ static const struct pci_device_id pqi_pci_id_table[] = {
},
{
PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
+ 0x193d, 0x1110)
+ },
+ {
+ PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
0x193d, 0x8460)
},
{
@@ -9482,6 +9598,10 @@ static const struct pci_device_id pqi_pci_id_table[] = {
},
{
PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
+ 0x193d, 0x8462)
+ },
+ {
+ PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
0x193d, 0xc460)
},
{
@@ -9590,6 +9710,14 @@ static const struct pci_device_id pqi_pci_id_table[] = {
},
{
PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
+ 0x1ff9, 0x00a1)
+ },
+ {
+ PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
+ 0x1f3a, 0x0104)
+ },
+ {
+ PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
0x19e5, 0xd227)
},
{
@@ -10182,6 +10310,110 @@ static const struct pci_device_id pqi_pci_id_table[] = {
},
{
PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
+ 0x1137, 0x02fe)
+ },
+ {
+ PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
+ 0x1137, 0x02ff)
+ },
+ {
+ PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
+ 0x1137, 0x0300)
+ },
+ {
+ PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
+ 0x1ff9, 0x0045)
+ },
+ {
+ PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
+ 0x1ff9, 0x0046)
+ },
+ {
+ PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
+ 0x1ff9, 0x0047)
+ },
+ {
+ PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
+ 0x1ff9, 0x0048)
+ },
+ {
+ PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
+ 0x1ff9, 0x004a)
+ },
+ {
+ PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
+ 0x1ff9, 0x004b)
+ },
+ {
+ PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
+ 0x1ff9, 0x004c)
+ },
+ {
+ PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
+ 0x1ff9, 0x004f)
+ },
+ {
+ PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
+ 0x1ff9, 0x0051)
+ },
+ {
+ PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
+ 0x1ff9, 0x0052)
+ },
+ {
+ PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
+ 0x1ff9, 0x0053)
+ },
+ {
+ PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
+ 0x1ff9, 0x0054)
+ },
+ {
+ PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
+ 0x1ff9, 0x006b)
+ },
+ {
+ PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
+ 0x1ff9, 0x006c)
+ },
+ {
+ PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
+ 0x1ff9, 0x006d)
+ },
+ {
+ PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
+ 0x1ff9, 0x006f)
+ },
+ {
+ PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
+ 0x1ff9, 0x0070)
+ },
+ {
+ PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
+ 0x1ff9, 0x0071)
+ },
+ {
+ PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
+ 0x1ff9, 0x0072)
+ },
+ {
+ PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
+ 0x1ff9, 0x0086)
+ },
+ {
+ PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
+ 0x1ff9, 0x0087)
+ },
+ {
+ PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
+ 0x1ff9, 0x0088)
+ },
+ {
+ PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
+ 0x1ff9, 0x0089)
+ },
+ {
+ PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
0x1e93, 0x1000)
},
{
@@ -10266,6 +10498,10 @@ static const struct pci_device_id pqi_pci_id_table[] = {
},
{
PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
+ 0x1ff9, 0x00a3)
+ },
+ {
+ PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
PCI_ANY_ID, PCI_ANY_ID)
},
{ 0 }
diff --git a/drivers/scsi/smartpqi/smartpqi_sis.c b/drivers/scsi/smartpqi/smartpqi_sis.c
index 673437c7152b..ca1df36b83f7 100644
--- a/drivers/scsi/smartpqi/smartpqi_sis.c
+++ b/drivers/scsi/smartpqi/smartpqi_sis.c
@@ -29,6 +29,7 @@
#define SIS_ENABLE_INTX 0x80
#define SIS_SOFT_RESET 0x100
#define SIS_CMD_READY 0x200
+#define SIS_NOTIFY_KDUMP 0x400
#define SIS_TRIGGER_SHUTDOWN 0x800000
#define SIS_PQI_RESET_QUIESCE 0x1000000
@@ -52,6 +53,8 @@
#define SIS_BASE_STRUCT_ALIGNMENT 16
#define SIS_CTRL_KERNEL_FW_TRIAGE 0x3
+#define SIS_CTRL_KERNEL_CTRL_LOGGING 0x4
+#define SIS_CTRL_KERNEL_CTRL_LOGGING_STATUS 0x18
#define SIS_CTRL_KERNEL_UP 0x80
#define SIS_CTRL_KERNEL_PANIC 0x100
#define SIS_CTRL_READY_TIMEOUT_SECS 180
@@ -65,6 +68,13 @@ enum sis_fw_triage_status {
FW_TRIAGE_COMPLETED
};
+enum sis_ctrl_logging_status {
+ CTRL_LOGGING_NOT_STARTED = 0,
+ CTRL_LOGGING_STARTED,
+ CTRL_LOGGING_COND_INVALID,
+ CTRL_LOGGING_COMPLETED
+};
+
#pragma pack(1)
/* for use with SIS_CMD_INIT_BASE_STRUCT_ADDRESS command */
@@ -442,6 +452,21 @@ static inline enum sis_fw_triage_status
SIS_CTRL_KERNEL_FW_TRIAGE));
}
+bool sis_is_ctrl_logging_supported(struct pqi_ctrl_info *ctrl_info)
+{
+ return readl(&ctrl_info->registers->sis_firmware_status) & SIS_CTRL_KERNEL_CTRL_LOGGING;
+}
+
+void sis_notify_kdump(struct pqi_ctrl_info *ctrl_info)
+{
+ sis_set_doorbell_bit(ctrl_info, SIS_NOTIFY_KDUMP);
+}
+
+static inline enum sis_ctrl_logging_status sis_read_ctrl_logging_status(struct pqi_ctrl_info *ctrl_info)
+{
+ return ((enum sis_ctrl_logging_status)((readl(&ctrl_info->registers->sis_firmware_status) & SIS_CTRL_KERNEL_CTRL_LOGGING_STATUS) >> 3));
+}
+
void sis_soft_reset(struct pqi_ctrl_info *ctrl_info)
{
writel(SIS_SOFT_RESET,
@@ -484,6 +509,41 @@ int sis_wait_for_fw_triage_completion(struct pqi_ctrl_info *ctrl_info)
return rc;
}
+#define SIS_CTRL_LOGGING_STATUS_TIMEOUT_SECS 180
+#define SIS_CTRL_LOGGING_STATUS_POLL_INTERVAL_SECS 1
+
+int sis_wait_for_ctrl_logging_completion(struct pqi_ctrl_info *ctrl_info)
+{
+ int rc;
+ enum sis_ctrl_logging_status status;
+ unsigned long timeout;
+
+ timeout = (SIS_CTRL_LOGGING_STATUS_TIMEOUT_SECS * HZ) + jiffies;
+ while (1) {
+ status = sis_read_ctrl_logging_status(ctrl_info);
+ if (status == CTRL_LOGGING_COND_INVALID) {
+ dev_err(&ctrl_info->pci_dev->dev,
+ "controller data logging condition invalid\n");
+ rc = -EINVAL;
+ break;
+ } else if (status == CTRL_LOGGING_COMPLETED) {
+ rc = 0;
+ break;
+ }
+
+ if (time_after(jiffies, timeout)) {
+ dev_err(&ctrl_info->pci_dev->dev,
+ "timed out waiting for controller data logging status\n");
+ rc = -ETIMEDOUT;
+ break;
+ }
+
+ ssleep(SIS_CTRL_LOGGING_STATUS_POLL_INTERVAL_SECS);
+ }
+
+ return rc;
+}
+
void sis_verify_structures(void)
{
BUILD_BUG_ON(offsetof(struct sis_base_struct,
diff --git a/drivers/scsi/smartpqi/smartpqi_sis.h b/drivers/scsi/smartpqi/smartpqi_sis.h
index 0c97626d87d4..7e0eac3d07de 100644
--- a/drivers/scsi/smartpqi/smartpqi_sis.h
+++ b/drivers/scsi/smartpqi/smartpqi_sis.h
@@ -31,6 +31,9 @@ u32 sis_read_driver_scratch(struct pqi_ctrl_info *ctrl_info);
void sis_soft_reset(struct pqi_ctrl_info *ctrl_info);
u32 sis_get_product_id(struct pqi_ctrl_info *ctrl_info);
int sis_wait_for_fw_triage_completion(struct pqi_ctrl_info *ctrl_info);
+bool sis_is_ctrl_logging_supported(struct pqi_ctrl_info *ctrl_info);
+void sis_notify_kdump(struct pqi_ctrl_info *ctrl_info);
+int sis_wait_for_ctrl_logging_completion(struct pqi_ctrl_info *ctrl_info);
extern unsigned int sis_ctrl_ready_timeout_secs;
diff --git a/drivers/scsi/snic/snic_main.c b/drivers/scsi/snic/snic_main.c
index cc824dcfe7da..9be3f0193145 100644
--- a/drivers/scsi/snic/snic_main.c
+++ b/drivers/scsi/snic/snic_main.c
@@ -300,9 +300,8 @@ snic_add_host(struct Scsi_Host *shost, struct pci_dev *pdev)
}
SNIC_BUG_ON(shost->work_q != NULL);
- snprintf(shost->work_q_name, sizeof(shost->work_q_name), "scsi_wq_%d",
- shost->host_no);
- shost->work_q = create_singlethread_workqueue(shost->work_q_name);
+ shost->work_q = alloc_ordered_workqueue("scsi_wq_%d", WQ_MEM_RECLAIM,
+ shost->host_no);
if (!shost->work_q) {
SNIC_HOST_ERR(shost, "Failed to Create ScsiHost wq.\n");
@@ -873,7 +872,7 @@ snic_global_data_init(void)
snic_glob->req_cache[SNIC_REQ_CACHE_MAX_SGL] = cachep;
len = sizeof(struct snic_host_req);
- cachep = kmem_cache_create("snic_req_maxsgl", len, SNIC_SG_DESC_ALIGN,
+ cachep = kmem_cache_create("snic_req_tm", len, SNIC_SG_DESC_ALIGN,
SLAB_HWCACHE_ALIGN, NULL);
if (!cachep) {
SNIC_ERR("Failed to create snic tm req slab\n");
@@ -884,7 +883,8 @@ snic_global_data_init(void)
snic_glob->req_cache[SNIC_REQ_TM_CACHE] = cachep;
/* snic_event queue */
- snic_glob->event_q = create_singlethread_workqueue("snic_event_wq");
+ snic_glob->event_q =
+ alloc_ordered_workqueue("%s", WQ_MEM_RECLAIM, "snic_event_wq");
if (!snic_glob->event_q) {
SNIC_ERR("snic event queue create failed\n");
ret = -ENOMEM;
diff --git a/drivers/scsi/st.c b/drivers/scsi/st.c
index 0d8ce1a92168..d50bad3a2ce9 100644
--- a/drivers/scsi/st.c
+++ b/drivers/scsi/st.c
@@ -834,6 +834,9 @@ static int flush_buffer(struct scsi_tape *STp, int seek_next)
int backspace, result;
struct st_partstat *STps;
+ if (STp->ready != ST_READY)
+ return 0;
+
/*
* If there was a bus reset, block further access
* to this device.
@@ -841,8 +844,6 @@ static int flush_buffer(struct scsi_tape *STp, int seek_next)
if (STp->pos_unknown)
return (-EIO);
- if (STp->ready != ST_READY)
- return 0;
STps = &(STp->ps[STp->partition]);
if (STps->rw == ST_WRITING) /* Writing */
return st_flush_write_buffer(STp);
diff --git a/drivers/scsi/stex.c b/drivers/scsi/stex.c
index 8ffb75be99bc..0e81125df8c7 100644
--- a/drivers/scsi/stex.c
+++ b/drivers/scsi/stex.c
@@ -334,7 +334,6 @@ struct st_hba {
struct st_ccb *wait_ccb;
__le32 *scratch;
- char work_q_name[20];
struct workqueue_struct *work_q;
struct work_struct reset_work;
wait_queue_head_t reset_waitq;
@@ -1795,9 +1794,8 @@ static int stex_probe(struct pci_dev *pdev, const struct pci_device_id *id)
hba->pdev = pdev;
init_waitqueue_head(&hba->reset_waitq);
- snprintf(hba->work_q_name, sizeof(hba->work_q_name),
- "stex_wq_%d", host->host_no);
- hba->work_q = create_singlethread_workqueue(hba->work_q_name);
+ hba->work_q = alloc_ordered_workqueue("stex_wq_%d", WQ_MEM_RECLAIM,
+ host->host_no);
if (!hba->work_q) {
printk(KERN_ERR DRV_NAME "(%s): create workqueue failed\n",
pci_name(pdev));
diff --git a/drivers/scsi/sun3_scsi.c b/drivers/scsi/sun3_scsi.c
index f51702893306..fffc0fa52594 100644
--- a/drivers/scsi/sun3_scsi.c
+++ b/drivers/scsi/sun3_scsi.c
@@ -304,7 +304,7 @@ static int sun3scsi_dma_setup(struct NCR5380_hostdata *hostdata,
sun3_udc_write(UDC_INT_ENABLE, UDC_CSR);
#endif
- return count;
+ return count;
}
diff --git a/drivers/scsi/vmw_pvscsi.c b/drivers/scsi/vmw_pvscsi.c
index c4fea077265e..32242d86cf5b 100644
--- a/drivers/scsi/vmw_pvscsi.c
+++ b/drivers/scsi/vmw_pvscsi.c
@@ -1137,7 +1137,8 @@ static int pvscsi_setup_msg_workqueue(struct pvscsi_adapter *adapter)
snprintf(name, sizeof(name),
"vmw_pvscsi_wq_%u", adapter->host->host_no);
- adapter->workqueue = create_singlethread_workqueue(name);
+ adapter->workqueue =
+ alloc_ordered_workqueue("%s", WQ_MEM_RECLAIM, name);
if (!adapter->workqueue) {
printk(KERN_ERR "vmw_pvscsi: failed to create work queue\n");
return 0;
diff --git a/drivers/scsi/zalon.c b/drivers/scsi/zalon.c
index 22d412cab91d..15602ec862e3 100644
--- a/drivers/scsi/zalon.c
+++ b/drivers/scsi/zalon.c
@@ -139,7 +139,7 @@ zalon_probe(struct parisc_device *dev)
return -ENODEV;
if (request_irq(dev->irq, ncr53c8xx_intr, IRQF_SHARED, "zalon", host)) {
- dev_printk(KERN_ERR, &dev->dev, "irq problem with %d, detaching\n ",
+ dev_printk(KERN_ERR, &dev->dev, "irq problem with %d, detaching\n",
dev->irq);
goto fail;
}
diff --git a/drivers/sh/intc/userimask.c b/drivers/sh/intc/userimask.c
index abe9091827cd..a363f77881d1 100644
--- a/drivers/sh/intc/userimask.c
+++ b/drivers/sh/intc/userimask.c
@@ -32,8 +32,11 @@ store_intc_userimask(struct device *dev,
const char *buf, size_t count)
{
unsigned long level;
+ int ret;
- level = simple_strtoul(buf, NULL, 10);
+ ret = kstrtoul(buf, 10, &level);
+ if (ret != 0)
+ return ret;
/*
* Minimal acceptable IRQ levels are in the 2 - 16 range, but
diff --git a/drivers/slimbus/messaging.c b/drivers/slimbus/messaging.c
index 4ce0cb61e481..242570a5e565 100644
--- a/drivers/slimbus/messaging.c
+++ b/drivers/slimbus/messaging.c
@@ -111,7 +111,8 @@ int slim_do_transfer(struct slim_controller *ctrl, struct slim_msg_txn *txn)
{
DECLARE_COMPLETION_ONSTACK(done);
bool need_tid = false, clk_pause_msg = false;
- int ret, timeout;
+ int ret;
+ unsigned long time_left;
/*
* do not vote for runtime-PM if the transactions are part of clock
@@ -151,9 +152,9 @@ int slim_do_transfer(struct slim_controller *ctrl, struct slim_msg_txn *txn)
if (!ret && need_tid && !txn->msg->comp) {
unsigned long ms = txn->rl + HZ;
- timeout = wait_for_completion_timeout(txn->comp,
- msecs_to_jiffies(ms));
- if (!timeout) {
+ time_left = wait_for_completion_timeout(txn->comp,
+ msecs_to_jiffies(ms));
+ if (!time_left) {
ret = -ETIMEDOUT;
slim_free_txn_tid(ctrl, txn);
}
diff --git a/drivers/slimbus/qcom-ctrl.c b/drivers/slimbus/qcom-ctrl.c
index 0274bc285b60..e25f9bdd9b23 100644
--- a/drivers/slimbus/qcom-ctrl.c
+++ b/drivers/slimbus/qcom-ctrl.c
@@ -330,7 +330,8 @@ static int qcom_xfer_msg(struct slim_controller *sctrl,
void *pbuf = slim_alloc_txbuf(ctrl, txn, &done);
unsigned long ms = txn->rl + HZ;
u8 *puc;
- int ret = 0, timeout, retries = QCOM_BUF_ALLOC_RETRIES;
+ int ret = 0, retries = QCOM_BUF_ALLOC_RETRIES;
+ unsigned long time_left;
u8 la = txn->la;
u32 *head;
/* HW expects length field to be excluded */
@@ -374,9 +375,9 @@ static int qcom_xfer_msg(struct slim_controller *sctrl,
memcpy(puc, txn->msg->wbuf, txn->msg->num_bytes);
qcom_slim_queue_tx(ctrl, head, txn->rl, MGR_TX_MSG);
- timeout = wait_for_completion_timeout(&done, msecs_to_jiffies(ms));
+ time_left = wait_for_completion_timeout(&done, msecs_to_jiffies(ms));
- if (!timeout) {
+ if (!time_left) {
dev_err(ctrl->dev, "TX timed out:MC:0x%x,mt:0x%x", txn->mc,
txn->mt);
ret = -ETIMEDOUT;
diff --git a/drivers/slimbus/qcom-ngd-ctrl.c b/drivers/slimbus/qcom-ngd-ctrl.c
index e0b21f0f79c1..1ac8e2912fd1 100644
--- a/drivers/slimbus/qcom-ngd-ctrl.c
+++ b/drivers/slimbus/qcom-ngd-ctrl.c
@@ -788,7 +788,8 @@ static int qcom_slim_ngd_xfer_msg(struct slim_controller *sctrl,
struct qcom_slim_ngd_ctrl *ctrl = dev_get_drvdata(sctrl->dev);
DECLARE_COMPLETION_ONSTACK(tx_sent);
DECLARE_COMPLETION_ONSTACK(done);
- int ret, timeout, i;
+ int ret, i;
+ unsigned long time_left;
u8 wbuf[SLIM_MSGQ_BUF_LEN];
u8 rbuf[SLIM_MSGQ_BUF_LEN];
u32 *pbuf;
@@ -890,8 +891,8 @@ static int qcom_slim_ngd_xfer_msg(struct slim_controller *sctrl,
return ret;
}
- timeout = wait_for_completion_timeout(&tx_sent, HZ);
- if (!timeout) {
+ time_left = wait_for_completion_timeout(&tx_sent, HZ);
+ if (!time_left) {
dev_err(sctrl->dev, "TX timed out:MC:0x%x,mt:0x%x", txn->mc,
txn->mt);
mutex_unlock(&ctrl->tx_lock);
@@ -899,8 +900,8 @@ static int qcom_slim_ngd_xfer_msg(struct slim_controller *sctrl,
}
if (usr_msg) {
- timeout = wait_for_completion_timeout(&done, HZ);
- if (!timeout) {
+ time_left = wait_for_completion_timeout(&done, HZ);
+ if (!time_left) {
dev_err(sctrl->dev, "TX timed out:MC:0x%x,mt:0x%x",
txn->mc, txn->mt);
mutex_unlock(&ctrl->tx_lock);
@@ -916,7 +917,8 @@ static int qcom_slim_ngd_xfer_msg_sync(struct slim_controller *ctrl,
struct slim_msg_txn *txn)
{
DECLARE_COMPLETION_ONSTACK(done);
- int ret, timeout;
+ int ret;
+ unsigned long time_left;
ret = pm_runtime_get_sync(ctrl->dev);
if (ret < 0)
@@ -928,8 +930,8 @@ static int qcom_slim_ngd_xfer_msg_sync(struct slim_controller *ctrl,
if (ret)
goto pm_put;
- timeout = wait_for_completion_timeout(&done, HZ);
- if (!timeout) {
+ time_left = wait_for_completion_timeout(&done, HZ);
+ if (!time_left) {
dev_err(ctrl->dev, "TX timed out:MC:0x%x,mt:0x%x", txn->mc,
txn->mt);
ret = -ETIMEDOUT;
@@ -1168,11 +1170,12 @@ static int qcom_slim_ngd_power_up(struct qcom_slim_ngd_ctrl *ctrl)
enum qcom_slim_ngd_state cur_state = ctrl->state;
struct qcom_slim_ngd *ngd = ctrl->ngd;
u32 laddr, rx_msgq;
- int timeout, ret = 0;
+ int ret = 0;
+ unsigned long time_left;
if (ctrl->state == QCOM_SLIM_NGD_CTRL_DOWN) {
- timeout = wait_for_completion_timeout(&ctrl->qmi.qmi_comp, HZ);
- if (!timeout)
+ time_left = wait_for_completion_timeout(&ctrl->qmi.qmi_comp, HZ);
+ if (!time_left)
return -EREMOTEIO;
}
@@ -1217,8 +1220,8 @@ static int qcom_slim_ngd_power_up(struct qcom_slim_ngd_ctrl *ctrl)
ngd->base + NGD_RX_MSGQ_CFG);
qcom_slim_ngd_setup(ctrl);
- timeout = wait_for_completion_timeout(&ctrl->reconf, HZ);
- if (!timeout) {
+ time_left = wait_for_completion_timeout(&ctrl->reconf, HZ);
+ if (!time_left) {
dev_err(ctrl->dev, "capability exchange timed-out\n");
return -ETIMEDOUT;
}
diff --git a/drivers/soc/Kconfig b/drivers/soc/Kconfig
index 5d924e946507..6a8daeb8c4b9 100644
--- a/drivers/soc/Kconfig
+++ b/drivers/soc/Kconfig
@@ -7,6 +7,7 @@ source "drivers/soc/aspeed/Kconfig"
source "drivers/soc/atmel/Kconfig"
source "drivers/soc/bcm/Kconfig"
source "drivers/soc/canaan/Kconfig"
+source "drivers/soc/cirrus/Kconfig"
source "drivers/soc/fsl/Kconfig"
source "drivers/soc/fujitsu/Kconfig"
source "drivers/soc/hisilicon/Kconfig"
diff --git a/drivers/soc/Makefile b/drivers/soc/Makefile
index 56f476a12847..2037a8695cb2 100644
--- a/drivers/soc/Makefile
+++ b/drivers/soc/Makefile
@@ -8,6 +8,7 @@ obj-y += aspeed/
obj-$(CONFIG_ARCH_AT91) += atmel/
obj-y += bcm/
obj-$(CONFIG_ARCH_CANAAN) += canaan/
+obj-$(CONFIG_EP93XX_SOC) += cirrus/
obj-$(CONFIG_ARCH_DOVE) += dove/
obj-$(CONFIG_MACH_DOVE) += dove/
obj-y += fsl/
diff --git a/drivers/soc/cirrus/Kconfig b/drivers/soc/cirrus/Kconfig
new file mode 100644
index 000000000000..d8b3b1e68998
--- /dev/null
+++ b/drivers/soc/cirrus/Kconfig
@@ -0,0 +1,17 @@
+# SPDX-License-Identifier: GPL-2.0-only
+
+if ARCH_EP93XX
+
+config EP93XX_SOC
+ bool "Cirrus EP93xx chips SoC"
+ select SOC_BUS
+ select AUXILIARY_BUS
+ default y
+ help
+ Enable support SoC for Cirrus EP93xx chips.
+
+ Cirrus EP93xx chips have several swlocked registers,
+ this driver provides locked access for reset, pinctrl
+ and clk devices implemented as auxiliary devices.
+
+endif
diff --git a/drivers/soc/cirrus/Makefile b/drivers/soc/cirrus/Makefile
new file mode 100644
index 000000000000..9e6608b67f76
--- /dev/null
+++ b/drivers/soc/cirrus/Makefile
@@ -0,0 +1,2 @@
+# SPDX-License-Identifier: GPL-2.0-only
+obj-y += soc-ep93xx.o
diff --git a/drivers/soc/cirrus/soc-ep93xx.c b/drivers/soc/cirrus/soc-ep93xx.c
new file mode 100644
index 000000000000..3e79b3b13aef
--- /dev/null
+++ b/drivers/soc/cirrus/soc-ep93xx.c
@@ -0,0 +1,252 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * SoC driver for Cirrus EP93xx chips.
+ * Copyright (C) 2022 Nikita Shubin <nikita.shubin@maquefel.me>
+ *
+ * Based on a rewrite of arch/arm/mach-ep93xx/core.c
+ * Copyright (C) 2006 Lennert Buytenhek <buytenh@wantstofly.org>
+ * Copyright (C) 2007 Herbert Valerio Riedel <hvr@gnu.org>
+ *
+ * Thanks go to Michael Burian and Ray Lehtiniemi for their key
+ * role in the ep93xx Linux community.
+ */
+
+#include <linux/bits.h>
+#include <linux/cleanup.h>
+#include <linux/init.h>
+#include <linux/mfd/syscon.h>
+#include <linux/of.h>
+#include <linux/of_fdt.h>
+#include <linux/platform_device.h>
+#include <linux/regmap.h>
+#include <linux/slab.h>
+#include <linux/spinlock.h>
+#include <linux/sys_soc.h>
+
+#include <linux/soc/cirrus/ep93xx.h>
+
+#define EP93XX_SYSCON_DEVCFG 0x80
+
+#define EP93XX_SWLOCK_MAGICK 0xaa
+#define EP93XX_SYSCON_SWLOCK 0xc0
+#define EP93XX_SYSCON_SYSCFG 0x9c
+#define EP93XX_SYSCON_SYSCFG_REV_MASK GENMASK(31, 28)
+#define EP93XX_SYSCON_SYSCFG_REV_SHIFT 28
+
+struct ep93xx_map_info {
+ spinlock_t lock;
+ void __iomem *base;
+ struct regmap *map;
+};
+
+/*
+ * EP93xx System Controller software locked register write
+ *
+ * Logic safeguards are included to condition the control signals for
+ * power connection to the matrix to prevent part damage. In addition, a
+ * software lock register is included that must be written with 0xAA
+ * before each register write to change the values of the four switch
+ * matrix control registers.
+ */
+static void ep93xx_regmap_write(struct regmap *map, spinlock_t *lock,
+ unsigned int reg, unsigned int val)
+{
+ guard(spinlock_irqsave)(lock);
+
+ regmap_write(map, EP93XX_SYSCON_SWLOCK, EP93XX_SWLOCK_MAGICK);
+ regmap_write(map, reg, val);
+}
+
+static void ep93xx_regmap_update_bits(struct regmap *map, spinlock_t *lock,
+ unsigned int reg, unsigned int mask,
+ unsigned int val)
+{
+ guard(spinlock_irqsave)(lock);
+
+ regmap_write(map, EP93XX_SYSCON_SWLOCK, EP93XX_SWLOCK_MAGICK);
+ /* force write is required to clear swlock if no changes are made */
+ regmap_update_bits_base(map, reg, mask, val, NULL, false, true);
+}
+
+static void ep93xx_unregister_adev(void *_adev)
+{
+ struct auxiliary_device *adev = _adev;
+
+ auxiliary_device_delete(adev);
+ auxiliary_device_uninit(adev);
+}
+
+static void ep93xx_adev_release(struct device *dev)
+{
+ struct auxiliary_device *adev = to_auxiliary_dev(dev);
+ struct ep93xx_regmap_adev *rdev = to_ep93xx_regmap_adev(adev);
+
+ kfree(rdev);
+}
+
+static struct auxiliary_device __init *ep93xx_adev_alloc(struct device *parent,
+ const char *name,
+ struct ep93xx_map_info *info)
+{
+ struct ep93xx_regmap_adev *rdev __free(kfree) = NULL;
+ struct auxiliary_device *adev;
+ int ret;
+
+ rdev = kzalloc(sizeof(*rdev), GFP_KERNEL);
+ if (!rdev)
+ return ERR_PTR(-ENOMEM);
+
+ rdev->map = info->map;
+ rdev->base = info->base;
+ rdev->lock = &info->lock;
+ rdev->write = ep93xx_regmap_write;
+ rdev->update_bits = ep93xx_regmap_update_bits;
+
+ adev = &rdev->adev;
+ adev->name = name;
+ adev->dev.parent = parent;
+ adev->dev.release = ep93xx_adev_release;
+
+ ret = auxiliary_device_init(adev);
+ if (ret)
+ return ERR_PTR(ret);
+
+ return &no_free_ptr(rdev)->adev;
+}
+
+static int __init ep93xx_controller_register(struct device *parent, const char *name,
+ struct ep93xx_map_info *info)
+{
+ struct auxiliary_device *adev;
+ int ret;
+
+ adev = ep93xx_adev_alloc(parent, name, info);
+ if (IS_ERR(adev))
+ return PTR_ERR(adev);
+
+ ret = auxiliary_device_add(adev);
+ if (ret) {
+ auxiliary_device_uninit(adev);
+ return ret;
+ }
+
+ return devm_add_action_or_reset(parent, ep93xx_unregister_adev, adev);
+}
+
+static unsigned int __init ep93xx_soc_revision(struct regmap *map)
+{
+ unsigned int val;
+
+ regmap_read(map, EP93XX_SYSCON_SYSCFG, &val);
+ val &= EP93XX_SYSCON_SYSCFG_REV_MASK;
+ val >>= EP93XX_SYSCON_SYSCFG_REV_SHIFT;
+ return val;
+}
+
+static const char __init *ep93xx_get_soc_rev(unsigned int rev)
+{
+ switch (rev) {
+ case EP93XX_CHIP_REV_D0:
+ return "D0";
+ case EP93XX_CHIP_REV_D1:
+ return "D1";
+ case EP93XX_CHIP_REV_E0:
+ return "E0";
+ case EP93XX_CHIP_REV_E1:
+ return "E1";
+ case EP93XX_CHIP_REV_E2:
+ return "E2";
+ default:
+ return "unknown";
+ }
+}
+
+static const char *pinctrl_names[] __initconst = {
+ "pinctrl-ep9301", /* EP93XX_9301_SOC */
+ "pinctrl-ep9307", /* EP93XX_9307_SOC */
+ "pinctrl-ep9312", /* EP93XX_9312_SOC */
+};
+
+static int __init ep93xx_syscon_probe(struct platform_device *pdev)
+{
+ enum ep93xx_soc_model model;
+ struct ep93xx_map_info *map_info;
+ struct soc_device_attribute *attrs;
+ struct soc_device *soc_dev;
+ struct device *dev = &pdev->dev;
+ struct regmap *map;
+ void __iomem *base;
+ unsigned int rev;
+ int ret;
+
+ model = (enum ep93xx_soc_model)(uintptr_t)device_get_match_data(dev);
+
+ map = device_node_to_regmap(dev->of_node);
+ if (IS_ERR(map))
+ return PTR_ERR(map);
+
+ base = devm_platform_ioremap_resource(pdev, 0);
+ if (IS_ERR(base))
+ return PTR_ERR(base);
+
+ attrs = devm_kzalloc(dev, sizeof(*attrs), GFP_KERNEL);
+ if (!attrs)
+ return -ENOMEM;
+
+ rev = ep93xx_soc_revision(map);
+
+ attrs->machine = of_flat_dt_get_machine_name();
+ attrs->family = "Cirrus Logic EP93xx";
+ attrs->revision = ep93xx_get_soc_rev(rev);
+
+ soc_dev = soc_device_register(attrs);
+ if (IS_ERR(soc_dev))
+ return PTR_ERR(soc_dev);
+
+ map_info = devm_kzalloc(dev, sizeof(*map_info), GFP_KERNEL);
+ if (!map_info)
+ return -ENOMEM;
+
+ spin_lock_init(&map_info->lock);
+ map_info->map = map;
+ map_info->base = base;
+
+ ret = ep93xx_controller_register(dev, pinctrl_names[model], map_info);
+ if (ret)
+ dev_err(dev, "registering pinctrl controller failed\n");
+
+ /*
+ * EP93xx SSP clock rate was doubled in version E2. For more information
+ * see section 6 "2x SSP (Synchronous Serial Port) Clock – Revision E2 only":
+ * http://www.cirrus.com/en/pubs/appNote/AN273REV4.pdf
+ */
+ if (rev == EP93XX_CHIP_REV_E2)
+ ret = ep93xx_controller_register(dev, "clk-ep93xx.e2", map_info);
+ else
+ ret = ep93xx_controller_register(dev, "clk-ep93xx", map_info);
+ if (ret)
+ dev_err(dev, "registering clock controller failed\n");
+
+ ret = ep93xx_controller_register(dev, "reset-ep93xx", map_info);
+ if (ret)
+ dev_err(dev, "registering reset controller failed\n");
+
+ return 0;
+}
+
+static const struct of_device_id ep9301_syscon_of_device_ids[] = {
+ { .compatible = "cirrus,ep9301-syscon", .data = (void *)EP93XX_9301_SOC },
+ { .compatible = "cirrus,ep9302-syscon", .data = (void *)EP93XX_9301_SOC },
+ { .compatible = "cirrus,ep9307-syscon", .data = (void *)EP93XX_9307_SOC },
+ { .compatible = "cirrus,ep9312-syscon", .data = (void *)EP93XX_9312_SOC },
+ { .compatible = "cirrus,ep9315-syscon", .data = (void *)EP93XX_9312_SOC },
+ { /* sentinel */ }
+};
+
+static struct platform_driver ep9301_syscon_driver = {
+ .driver = {
+ .name = "ep9301-syscon",
+ .of_match_table = ep9301_syscon_of_device_ids,
+ },
+};
+builtin_platform_driver_probe(ep9301_syscon_driver, ep93xx_syscon_probe);
diff --git a/drivers/soundwire/bus_type.c b/drivers/soundwire/bus_type.c
index d928258c6761..77dc094075e1 100644
--- a/drivers/soundwire/bus_type.c
+++ b/drivers/soundwire/bus_type.c
@@ -83,7 +83,6 @@ static int sdw_drv_probe(struct device *dev)
struct sdw_slave *slave = dev_to_sdw_dev(dev);
struct sdw_driver *drv = drv_to_sdw_driver(dev->driver);
const struct sdw_device_id *id;
- const char *name;
int ret;
/*
@@ -108,11 +107,6 @@ static int sdw_drv_probe(struct device *dev)
ret = drv->probe(slave, id);
if (ret) {
- name = drv->name;
- if (!name)
- name = drv->driver.name;
-
- dev_err(dev, "Probe of %s failed: %d\n", name, ret);
dev_pm_domain_detach(dev, false);
return ret;
}
@@ -129,7 +123,7 @@ static int sdw_drv_probe(struct device *dev)
/* init the dynamic sysfs attributes we need */
ret = sdw_slave_sysfs_dpn_init(slave);
if (ret < 0)
- dev_warn(dev, "Slave sysfs init failed:%d\n", ret);
+ dev_warn(dev, "failed to initialise sysfs: %d\n", ret);
/*
* Check for valid clk_stop_timeout, use DisCo worst case value of
@@ -153,7 +147,7 @@ static int sdw_drv_probe(struct device *dev)
if (drv->ops && drv->ops->update_status) {
ret = drv->ops->update_status(slave, slave->status);
if (ret < 0)
- dev_warn(dev, "%s: update_status failed with status %d\n", __func__, ret);
+ dev_warn(dev, "failed to update status at probe: %d\n", ret);
}
mutex_unlock(&slave->sdw_dev_lock);
@@ -204,16 +198,11 @@ static void sdw_drv_shutdown(struct device *dev)
*/
int __sdw_register_driver(struct sdw_driver *drv, struct module *owner)
{
- const char *name;
-
drv->driver.bus = &sdw_bus_type;
if (!drv->probe) {
- name = drv->name;
- if (!name)
- name = drv->driver.name;
-
- pr_err("driver %s didn't provide SDW probe routine\n", name);
+ pr_err("driver %s didn't provide SDW probe routine\n",
+ drv->driver.name);
return -EINVAL;
}
diff --git a/drivers/soundwire/cadence_master.c b/drivers/soundwire/cadence_master.c
index e0683a5975d1..05652e983539 100644
--- a/drivers/soundwire/cadence_master.c
+++ b/drivers/soundwire/cadence_master.c
@@ -890,8 +890,14 @@ static int cdns_update_slave_status(struct sdw_cdns *cdns,
}
}
- if (is_slave)
- return sdw_handle_slave_status(&cdns->bus, status);
+ if (is_slave) {
+ int ret;
+
+ mutex_lock(&cdns->status_update_lock);
+ ret = sdw_handle_slave_status(&cdns->bus, status);
+ mutex_unlock(&cdns->status_update_lock);
+ return ret;
+ }
return 0;
}
@@ -988,6 +994,31 @@ irqreturn_t sdw_cdns_irq(int irq, void *dev_id)
}
EXPORT_SYMBOL(sdw_cdns_irq);
+static void cdns_check_attached_status_dwork(struct work_struct *work)
+{
+ struct sdw_cdns *cdns =
+ container_of(work, struct sdw_cdns, attach_dwork.work);
+ enum sdw_slave_status status[SDW_MAX_DEVICES + 1];
+ u32 val;
+ int ret;
+ int i;
+
+ val = cdns_readl(cdns, CDNS_MCP_SLAVE_STAT);
+
+ for (i = 0; i <= SDW_MAX_DEVICES; i++) {
+ status[i] = val & 0x3;
+ if (status[i])
+ dev_dbg(cdns->dev, "Peripheral %d status: %d\n", i, status[i]);
+ val >>= 2;
+ }
+
+ mutex_lock(&cdns->status_update_lock);
+ ret = sdw_handle_slave_status(&cdns->bus, status);
+ mutex_unlock(&cdns->status_update_lock);
+ if (ret < 0)
+ dev_err(cdns->dev, "%s: sdw_handle_slave_status failed: %d\n", __func__, ret);
+}
+
/**
* cdns_update_slave_status_work - update slave status in a work since we will need to handle
* other interrupts eg. CDNS_MCP_INT_RX_WL during the update slave
@@ -1740,7 +1771,11 @@ int sdw_cdns_probe(struct sdw_cdns *cdns)
init_completion(&cdns->tx_complete);
cdns->bus.port_ops = &cdns_port_ops;
+ mutex_init(&cdns->status_update_lock);
+
INIT_WORK(&cdns->work, cdns_update_slave_status_work);
+ INIT_DELAYED_WORK(&cdns->attach_dwork, cdns_check_attached_status_dwork);
+
return 0;
}
EXPORT_SYMBOL(sdw_cdns_probe);
diff --git a/drivers/soundwire/cadence_master.h b/drivers/soundwire/cadence_master.h
index bc84435e420f..e1d7969ba48a 100644
--- a/drivers/soundwire/cadence_master.h
+++ b/drivers/soundwire/cadence_master.h
@@ -117,6 +117,8 @@ struct sdw_cdns_dai_runtime {
* @link_up: Link status
* @msg_count: Messages sent on bus
* @dai_runtime_array: runtime context for each allocated DAI.
+ * @status_update_lock: protect concurrency between interrupt-based and delayed work
+ * status update
*/
struct sdw_cdns {
struct device *dev;
@@ -148,10 +150,13 @@ struct sdw_cdns {
bool interrupt_enabled;
struct work_struct work;
+ struct delayed_work attach_dwork;
struct list_head list;
struct sdw_cdns_dai_runtime **dai_runtime_array;
+
+ struct mutex status_update_lock; /* add mutual exclusion to sdw_handle_slave_status() */
};
#define bus_to_cdns(_bus) container_of(_bus, struct sdw_cdns, bus)
diff --git a/drivers/soundwire/intel.h b/drivers/soundwire/intel.h
index 1db4d9d3a3ba..dddd29381441 100644
--- a/drivers/soundwire/intel.h
+++ b/drivers/soundwire/intel.h
@@ -103,6 +103,8 @@ static inline void intel_writew(void __iomem *base, int offset, u16 value)
#define INTEL_MASTER_RESET_ITERATIONS 10
+#define SDW_INTEL_DELAYED_ENUMERATION_MS 100
+
#define SDW_INTEL_CHECK_OPS(sdw, cb) ((sdw) && (sdw)->link_res && (sdw)->link_res->hw_ops && \
(sdw)->link_res->hw_ops->cb)
#define SDW_INTEL_OPS(sdw, cb) ((sdw)->link_res->hw_ops->cb)
diff --git a/drivers/soundwire/intel_auxdevice.c b/drivers/soundwire/intel_auxdevice.c
index d110f2b587d5..ae689d5d1ab9 100644
--- a/drivers/soundwire/intel_auxdevice.c
+++ b/drivers/soundwire/intel_auxdevice.c
@@ -489,6 +489,7 @@ static void intel_link_remove(struct auxiliary_device *auxdev)
*/
if (!bus->prop.hw_disabled) {
sdw_intel_debugfs_exit(sdw);
+ cancel_delayed_work_sync(&cdns->attach_dwork);
sdw_cdns_enable_interrupt(cdns, false);
}
sdw_bus_master_delete(bus);
diff --git a/drivers/soundwire/intel_bus_common.c b/drivers/soundwire/intel_bus_common.c
index df944e11b9ca..d3ff6c65b64c 100644
--- a/drivers/soundwire/intel_bus_common.c
+++ b/drivers/soundwire/intel_bus_common.c
@@ -45,21 +45,24 @@ int intel_start_bus(struct sdw_intel *sdw)
return ret;
}
- ret = sdw_cdns_exit_reset(cdns);
+ ret = sdw_cdns_enable_interrupt(cdns, true);
if (ret < 0) {
- dev_err(dev, "%s: unable to exit bus reset sequence: %d\n", __func__, ret);
+ dev_err(dev, "%s: cannot enable interrupts: %d\n", __func__, ret);
return ret;
}
- ret = sdw_cdns_enable_interrupt(cdns, true);
+ ret = sdw_cdns_exit_reset(cdns);
if (ret < 0) {
- dev_err(dev, "%s: cannot enable interrupts: %d\n", __func__, ret);
+ dev_err(dev, "%s: unable to exit bus reset sequence: %d\n", __func__, ret);
return ret;
}
sdw_cdns_check_self_clearing_bits(cdns, __func__,
true, INTEL_MASTER_RESET_ITERATIONS);
+ schedule_delayed_work(&cdns->attach_dwork,
+ msecs_to_jiffies(SDW_INTEL_DELAYED_ENUMERATION_MS));
+
return 0;
}
@@ -136,21 +139,24 @@ int intel_start_bus_after_reset(struct sdw_intel *sdw)
return ret;
}
- ret = sdw_cdns_exit_reset(cdns);
+ ret = sdw_cdns_enable_interrupt(cdns, true);
if (ret < 0) {
- dev_err(dev, "unable to exit bus reset sequence during resume\n");
+ dev_err(dev, "cannot enable interrupts during resume\n");
return ret;
}
- ret = sdw_cdns_enable_interrupt(cdns, true);
+ ret = sdw_cdns_exit_reset(cdns);
if (ret < 0) {
- dev_err(dev, "cannot enable interrupts during resume\n");
+ dev_err(dev, "unable to exit bus reset sequence during resume\n");
return ret;
}
}
sdw_cdns_check_self_clearing_bits(cdns, __func__, true, INTEL_MASTER_RESET_ITERATIONS);
+ schedule_delayed_work(&cdns->attach_dwork,
+ msecs_to_jiffies(SDW_INTEL_DELAYED_ENUMERATION_MS));
+
return 0;
}
@@ -184,6 +190,9 @@ int intel_start_bus_after_clock_stop(struct sdw_intel *sdw)
sdw_cdns_check_self_clearing_bits(cdns, __func__, true, INTEL_MASTER_RESET_ITERATIONS);
+ schedule_delayed_work(&cdns->attach_dwork,
+ msecs_to_jiffies(SDW_INTEL_DELAYED_ENUMERATION_MS));
+
return 0;
}
@@ -194,6 +203,8 @@ int intel_stop_bus(struct sdw_intel *sdw, bool clock_stop)
bool wake_enable = false;
int ret;
+ cancel_delayed_work_sync(&cdns->attach_dwork);
+
if (clock_stop) {
ret = sdw_cdns_clock_stop(cdns, true);
if (ret < 0)
diff --git a/drivers/spi/atmel-quadspi.c b/drivers/spi/atmel-quadspi.c
index 936d57869493..4f288f07e38f 100644
--- a/drivers/spi/atmel-quadspi.c
+++ b/drivers/spi/atmel-quadspi.c
@@ -375,9 +375,9 @@ static int atmel_qspi_set_cfg(struct atmel_qspi *aq,
* If the QSPI controller is set in regular SPI mode, set it in
* Serial Memory Mode (SMM).
*/
- if (aq->mr != QSPI_MR_SMM) {
- atmel_qspi_write(QSPI_MR_SMM, aq, QSPI_MR);
- aq->mr = QSPI_MR_SMM;
+ if (!(aq->mr & QSPI_MR_SMM)) {
+ aq->mr |= QSPI_MR_SMM;
+ atmel_qspi_write(aq->scr, aq, QSPI_MR);
}
/* Clear pending interrupts */
@@ -501,7 +501,8 @@ static int atmel_qspi_setup(struct spi_device *spi)
if (ret < 0)
return ret;
- aq->scr = QSPI_SCR_SCBR(scbr);
+ aq->scr &= ~QSPI_SCR_SCBR_MASK;
+ aq->scr |= QSPI_SCR_SCBR(scbr);
atmel_qspi_write(aq->scr, aq, QSPI_SCR);
pm_runtime_mark_last_busy(ctrl->dev.parent);
@@ -534,6 +535,7 @@ static int atmel_qspi_set_cs_timing(struct spi_device *spi)
if (ret < 0)
return ret;
+ aq->scr &= ~QSPI_SCR_DLYBS_MASK;
aq->scr |= QSPI_SCR_DLYBS(cs_setup);
atmel_qspi_write(aq->scr, aq, QSPI_SCR);
@@ -549,8 +551,8 @@ static void atmel_qspi_init(struct atmel_qspi *aq)
atmel_qspi_write(QSPI_CR_SWRST, aq, QSPI_CR);
/* Set the QSPI controller by default in Serial Memory Mode */
- atmel_qspi_write(QSPI_MR_SMM, aq, QSPI_MR);
- aq->mr = QSPI_MR_SMM;
+ aq->mr |= QSPI_MR_SMM;
+ atmel_qspi_write(aq->mr, aq, QSPI_MR);
/* Enable the QSPI controller */
atmel_qspi_write(QSPI_CR_QSPIEN, aq, QSPI_CR);
@@ -721,6 +723,7 @@ static void atmel_qspi_remove(struct platform_device *pdev)
clk_unprepare(aq->pclk);
pm_runtime_disable(&pdev->dev);
+ pm_runtime_dont_use_autosuspend(&pdev->dev);
pm_runtime_put_noidle(&pdev->dev);
}
diff --git a/drivers/spi/spi-airoha-snfi.c b/drivers/spi/spi-airoha-snfi.c
index 9d97ec98881c..94458df53eae 100644
--- a/drivers/spi/spi-airoha-snfi.c
+++ b/drivers/spi/spi-airoha-snfi.c
@@ -211,9 +211,6 @@ struct airoha_snand_dev {
u8 *txrx_buf;
dma_addr_t dma_addr;
-
- u64 cur_page_num;
- bool data_need_update;
};
struct airoha_snand_ctrl {
@@ -405,7 +402,7 @@ static int airoha_snand_write_data(struct airoha_snand_ctrl *as_ctrl, u8 cmd,
for (i = 0; i < len; i += data_len) {
int err;
- data_len = min(len, SPI_MAX_TRANSFER_SIZE);
+ data_len = min(len - i, SPI_MAX_TRANSFER_SIZE);
err = airoha_snand_set_fifo_op(as_ctrl, cmd, data_len);
if (err)
return err;
@@ -427,7 +424,7 @@ static int airoha_snand_read_data(struct airoha_snand_ctrl *as_ctrl, u8 *data,
for (i = 0; i < len; i += data_len) {
int err;
- data_len = min(len, SPI_MAX_TRANSFER_SIZE);
+ data_len = min(len - i, SPI_MAX_TRANSFER_SIZE);
err = airoha_snand_set_fifo_op(as_ctrl, 0xc, data_len);
if (err)
return err;
@@ -644,11 +641,6 @@ static ssize_t airoha_snand_dirmap_read(struct spi_mem_dirmap_desc *desc,
u32 val, rd_mode;
int err;
- if (!as_dev->data_need_update)
- return len;
-
- as_dev->data_need_update = false;
-
switch (op->cmd.opcode) {
case SPI_NAND_OP_READ_FROM_CACHE_DUAL:
rd_mode = 1;
@@ -739,8 +731,13 @@ static ssize_t airoha_snand_dirmap_read(struct spi_mem_dirmap_desc *desc,
if (err)
return err;
- err = regmap_set_bits(as_ctrl->regmap_nfi, REG_SPI_NFI_SNF_STA_CTL1,
- SPI_NFI_READ_FROM_CACHE_DONE);
+ /*
+ * SPI_NFI_READ_FROM_CACHE_DONE bit must be written at the end
+ * of dirmap_read operation even if it is already set.
+ */
+ err = regmap_write_bits(as_ctrl->regmap_nfi, REG_SPI_NFI_SNF_STA_CTL1,
+ SPI_NFI_READ_FROM_CACHE_DONE,
+ SPI_NFI_READ_FROM_CACHE_DONE);
if (err)
return err;
@@ -870,8 +867,13 @@ static ssize_t airoha_snand_dirmap_write(struct spi_mem_dirmap_desc *desc,
if (err)
return err;
- err = regmap_set_bits(as_ctrl->regmap_nfi, REG_SPI_NFI_SNF_STA_CTL1,
- SPI_NFI_LOAD_TO_CACHE_DONE);
+ /*
+ * SPI_NFI_LOAD_TO_CACHE_DONE bit must be written at the end
+ * of dirmap_write operation even if it is already set.
+ */
+ err = regmap_write_bits(as_ctrl->regmap_nfi, REG_SPI_NFI_SNF_STA_CTL1,
+ SPI_NFI_LOAD_TO_CACHE_DONE,
+ SPI_NFI_LOAD_TO_CACHE_DONE);
if (err)
return err;
@@ -885,23 +887,11 @@ static ssize_t airoha_snand_dirmap_write(struct spi_mem_dirmap_desc *desc,
static int airoha_snand_exec_op(struct spi_mem *mem,
const struct spi_mem_op *op)
{
- struct airoha_snand_dev *as_dev = spi_get_ctldata(mem->spi);
u8 data[8], cmd, opcode = op->cmd.opcode;
struct airoha_snand_ctrl *as_ctrl;
int i, err;
as_ctrl = spi_controller_get_devdata(mem->spi->controller);
- if (opcode == SPI_NAND_OP_PROGRAM_EXECUTE &&
- op->addr.val == as_dev->cur_page_num) {
- as_dev->data_need_update = true;
- } else if (opcode == SPI_NAND_OP_PAGE_READ) {
- if (!as_dev->data_need_update &&
- op->addr.val == as_dev->cur_page_num)
- return 0;
-
- as_dev->data_need_update = true;
- as_dev->cur_page_num = op->addr.val;
- }
/* switch to manual mode */
err = airoha_snand_set_mode(as_ctrl, SPI_MODE_MANUAL);
@@ -986,7 +976,6 @@ static int airoha_snand_setup(struct spi_device *spi)
if (dma_mapping_error(as_ctrl->dev, as_dev->dma_addr))
return -ENOMEM;
- as_dev->data_need_update = true;
spi_set_ctldata(spi, as_dev);
return 0;
diff --git a/drivers/spi/spi-ep93xx.c b/drivers/spi/spi-ep93xx.c
index a1d60e51c053..dc6bdc74643d 100644
--- a/drivers/spi/spi-ep93xx.c
+++ b/drivers/spi/spi-ep93xx.c
@@ -18,18 +18,18 @@
#include <linux/err.h>
#include <linux/delay.h>
#include <linux/device.h>
+#include <linux/dma-direction.h>
+#include <linux/dma-mapping.h>
#include <linux/dmaengine.h>
#include <linux/bitops.h>
#include <linux/interrupt.h>
#include <linux/module.h>
+#include <linux/property.h>
#include <linux/platform_device.h>
#include <linux/sched.h>
#include <linux/scatterlist.h>
#include <linux/spi/spi.h>
-#include <linux/platform_data/dma-ep93xx.h>
-#include <linux/platform_data/spi-ep93xx.h>
-
#define SSPCR0 0x0000
#define SSPCR0_SPO BIT(6)
#define SSPCR0_SPH BIT(7)
@@ -76,8 +76,6 @@
* frame decreases this level and sending one frame increases it.
* @dma_rx: RX DMA channel
* @dma_tx: TX DMA channel
- * @dma_rx_data: RX parameters passed to the DMA engine
- * @dma_tx_data: TX parameters passed to the DMA engine
* @rx_sgt: sg table for RX transfers
* @tx_sgt: sg table for TX transfers
* @zeropage: dummy page used as RX buffer when only TX buffer is passed in by
@@ -92,8 +90,6 @@ struct ep93xx_spi {
size_t fifo_level;
struct dma_chan *dma_rx;
struct dma_chan *dma_tx;
- struct ep93xx_dma_data dma_rx_data;
- struct ep93xx_dma_data dma_tx_data;
struct sg_table rx_sgt;
struct sg_table tx_sgt;
void *zeropage;
@@ -575,46 +571,23 @@ static int ep93xx_spi_unprepare_hardware(struct spi_controller *host)
return 0;
}
-static bool ep93xx_spi_dma_filter(struct dma_chan *chan, void *filter_param)
+static int ep93xx_spi_setup_dma(struct device *dev, struct ep93xx_spi *espi)
{
- if (ep93xx_dma_chan_is_m2p(chan))
- return false;
-
- chan->private = filter_param;
- return true;
-}
-
-static int ep93xx_spi_setup_dma(struct ep93xx_spi *espi)
-{
- dma_cap_mask_t mask;
int ret;
espi->zeropage = (void *)get_zeroed_page(GFP_KERNEL);
if (!espi->zeropage)
return -ENOMEM;
- dma_cap_zero(mask);
- dma_cap_set(DMA_SLAVE, mask);
-
- espi->dma_rx_data.port = EP93XX_DMA_SSP;
- espi->dma_rx_data.direction = DMA_DEV_TO_MEM;
- espi->dma_rx_data.name = "ep93xx-spi-rx";
-
- espi->dma_rx = dma_request_channel(mask, ep93xx_spi_dma_filter,
- &espi->dma_rx_data);
- if (!espi->dma_rx) {
- ret = -ENODEV;
+ espi->dma_rx = dma_request_chan(dev, "rx");
+ if (IS_ERR(espi->dma_rx)) {
+ ret = dev_err_probe(dev, PTR_ERR(espi->dma_rx), "rx DMA setup failed");
goto fail_free_page;
}
- espi->dma_tx_data.port = EP93XX_DMA_SSP;
- espi->dma_tx_data.direction = DMA_MEM_TO_DEV;
- espi->dma_tx_data.name = "ep93xx-spi-tx";
-
- espi->dma_tx = dma_request_channel(mask, ep93xx_spi_dma_filter,
- &espi->dma_tx_data);
- if (!espi->dma_tx) {
- ret = -ENODEV;
+ espi->dma_tx = dma_request_chan(dev, "tx");
+ if (IS_ERR(espi->dma_tx)) {
+ ret = dev_err_probe(dev, PTR_ERR(espi->dma_tx), "tx DMA setup failed");
goto fail_release_rx;
}
@@ -647,18 +620,11 @@ static void ep93xx_spi_release_dma(struct ep93xx_spi *espi)
static int ep93xx_spi_probe(struct platform_device *pdev)
{
struct spi_controller *host;
- struct ep93xx_spi_info *info;
struct ep93xx_spi *espi;
struct resource *res;
int irq;
int error;
- info = dev_get_platdata(&pdev->dev);
- if (!info) {
- dev_err(&pdev->dev, "missing platform data\n");
- return -EINVAL;
- }
-
irq = platform_get_irq(pdev, 0);
if (irq < 0)
return irq;
@@ -713,12 +679,17 @@ static int ep93xx_spi_probe(struct platform_device *pdev)
goto fail_release_host;
}
- if (info->use_dma && ep93xx_spi_setup_dma(espi))
+ error = ep93xx_spi_setup_dma(&pdev->dev, espi);
+ if (error == -EPROBE_DEFER)
+ goto fail_release_host;
+
+ if (error)
dev_warn(&pdev->dev, "DMA setup failed. Falling back to PIO\n");
/* make sure that the hardware is disabled */
writel(0, espi->mmio + SSPCR1);
+ device_set_node(&host->dev, dev_fwnode(&pdev->dev));
error = devm_spi_register_controller(&pdev->dev, host);
if (error) {
dev_err(&pdev->dev, "failed to register SPI host\n");
@@ -746,9 +717,16 @@ static void ep93xx_spi_remove(struct platform_device *pdev)
ep93xx_spi_release_dma(espi);
}
+static const struct of_device_id ep93xx_spi_of_ids[] = {
+ { .compatible = "cirrus,ep9301-spi" },
+ { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(of, ep93xx_spi_of_ids);
+
static struct platform_driver ep93xx_spi_driver = {
.driver = {
.name = "ep93xx-spi",
+ .of_match_table = ep93xx_spi_of_ids,
},
.probe = ep93xx_spi_probe,
.remove_new = ep93xx_spi_remove,
diff --git a/drivers/spi/spi-fsl-lpspi.c b/drivers/spi/spi-fsl-lpspi.c
index 8ecb426be45c..977e8b55c82b 100644
--- a/drivers/spi/spi-fsl-lpspi.c
+++ b/drivers/spi/spi-fsl-lpspi.c
@@ -986,6 +986,7 @@ static void fsl_lpspi_remove(struct platform_device *pdev)
fsl_lpspi_dma_exit(controller);
+ pm_runtime_dont_use_autosuspend(fsl_lpspi->dev);
pm_runtime_disable(fsl_lpspi->dev);
}
diff --git a/drivers/spi/spidev.c b/drivers/spi/spidev.c
index 5539c5d139d4..653f82984216 100644
--- a/drivers/spi/spidev.c
+++ b/drivers/spi/spidev.c
@@ -685,7 +685,6 @@ static const struct file_operations spidev_fops = {
.compat_ioctl = spidev_compat_ioctl,
.open = spidev_open,
.release = spidev_release,
- .llseek = no_llseek,
};
/*-------------------------------------------------------------------------*/
diff --git a/drivers/staging/Kconfig b/drivers/staging/Kconfig
index db4a392841b1..3fb68d60dfc1 100644
--- a/drivers/staging/Kconfig
+++ b/drivers/staging/Kconfig
@@ -54,8 +54,6 @@ source "drivers/staging/fbtft/Kconfig"
source "drivers/staging/most/Kconfig"
-source "drivers/staging/ks7010/Kconfig"
-
source "drivers/staging/greybus/Kconfig"
source "drivers/staging/vc04_services/Kconfig"
diff --git a/drivers/staging/Makefile b/drivers/staging/Makefile
index 5390879b5d1b..c977aa13fad4 100644
--- a/drivers/staging/Makefile
+++ b/drivers/staging/Makefile
@@ -17,7 +17,6 @@ obj-$(CONFIG_MFD_NVEC) += nvec/
obj-$(CONFIG_LTE_GDM724X) += gdm724x/
obj-$(CONFIG_FB_TFT) += fbtft/
obj-$(CONFIG_MOST) += most/
-obj-$(CONFIG_KS7010) += ks7010/
obj-$(CONFIG_GREYBUS) += greybus/
obj-$(CONFIG_BCM2835_VCHIQ) += vc04_services/
obj-$(CONFIG_XIL_AXIS_FIFO) += axis-fifo/
diff --git a/drivers/staging/fbtft/fb_ili9320.c b/drivers/staging/fbtft/fb_ili9320.c
index 0be7c2d51548..050fc2367c12 100644
--- a/drivers/staging/fbtft/fb_ili9320.c
+++ b/drivers/staging/fbtft/fb_ili9320.c
@@ -35,8 +35,6 @@ static int init_display(struct fbtft_par *par)
par->fbtftops.reset(par);
devcode = read_devicecode(par);
- fbtft_par_dbg(DEBUG_INIT_DISPLAY, par, "Device code: 0x%04X\n",
- devcode);
if ((devcode != 0x0000) && (devcode != 0x9320))
dev_warn(par->info->device,
"Unrecognized Device code: 0x%04X (expected 0x9320)\n",
diff --git a/drivers/staging/fbtft/fb_ra8875.c b/drivers/staging/fbtft/fb_ra8875.c
index 398bdbf53c9a..0ab1de6647d0 100644
--- a/drivers/staging/fbtft/fb_ra8875.c
+++ b/drivers/staging/fbtft/fb_ra8875.c
@@ -41,13 +41,6 @@ static int init_display(struct fbtft_par *par)
{
gpiod_set_value(par->gpio.dc, 1);
- fbtft_par_dbg(DEBUG_INIT_DISPLAY, par,
- "%s()\n", __func__);
- fbtft_par_dbg(DEBUG_INIT_DISPLAY, par,
- "display size %dx%d\n",
- par->info->var.xres,
- par->info->var.yres);
-
par->fbtftops.reset(par);
if ((par->info->var.xres == 320) && (par->info->var.yres == 240)) {
diff --git a/drivers/staging/fbtft/fb_sh1106.c b/drivers/staging/fbtft/fb_sh1106.c
index 9685ca516a0e..e4c50c1ffed0 100644
--- a/drivers/staging/fbtft/fb_sh1106.c
+++ b/drivers/staging/fbtft/fb_sh1106.c
@@ -88,9 +88,6 @@ static void set_addr_win(struct fbtft_par *par, int xs, int ys, int xe, int ye)
static int blank(struct fbtft_par *par, bool on)
{
- fbtft_par_dbg(DEBUG_BLANK, par, "(%s=%s)\n",
- __func__, on ? "true" : "false");
-
write_reg(par, on ? 0xAE : 0xAF);
return 0;
diff --git a/drivers/staging/fbtft/fb_ssd1289.c b/drivers/staging/fbtft/fb_ssd1289.c
index f27bab38b3ec..255a6d21ca8e 100644
--- a/drivers/staging/fbtft/fb_ssd1289.c
+++ b/drivers/staging/fbtft/fb_ssd1289.c
@@ -93,9 +93,6 @@ static int set_var(struct fbtft_par *par)
{
if (par->fbtftops.init_display != init_display) {
/* don't risk messing up register 11h */
- fbtft_par_dbg(DEBUG_INIT_DISPLAY, par,
- "%s: skipping since custom init_display() is used\n",
- __func__);
return 0;
}
diff --git a/drivers/staging/fbtft/fb_ssd1306.c b/drivers/staging/fbtft/fb_ssd1306.c
index 6cf9df579e88..478d710469b9 100644
--- a/drivers/staging/fbtft/fb_ssd1306.c
+++ b/drivers/staging/fbtft/fb_ssd1306.c
@@ -148,9 +148,6 @@ static void set_addr_win(struct fbtft_par *par, int xs, int ys, int xe, int ye)
static int blank(struct fbtft_par *par, bool on)
{
- fbtft_par_dbg(DEBUG_BLANK, par, "(%s=%s)\n",
- __func__, on ? "true" : "false");
-
if (on)
write_reg(par, 0xAE);
else
diff --git a/drivers/staging/fbtft/fb_ssd1325.c b/drivers/staging/fbtft/fb_ssd1325.c
index 796a2ac3e194..256b0b87a930 100644
--- a/drivers/staging/fbtft/fb_ssd1325.c
+++ b/drivers/staging/fbtft/fb_ssd1325.c
@@ -72,10 +72,6 @@ static uint8_t rgb565_to_g16(u16 pixel)
static void set_addr_win(struct fbtft_par *par, int xs, int ys, int xe, int ye)
{
- fbtft_par_dbg(DEBUG_SET_ADDR_WIN, par,
- "%s(xs=%d, ys=%d, xe=%d, ye=%d)\n", __func__, xs, ys, xe,
- ye);
-
write_reg(par, 0x75);
write_reg(par, 0x00);
write_reg(par, 0x3f);
@@ -86,9 +82,6 @@ static void set_addr_win(struct fbtft_par *par, int xs, int ys, int xe, int ye)
static int blank(struct fbtft_par *par, bool on)
{
- fbtft_par_dbg(DEBUG_BLANK, par, "(%s=%s)\n",
- __func__, on ? "true" : "false");
-
if (on)
write_reg(par, 0xAE);
else
@@ -109,8 +102,6 @@ static int set_gamma(struct fbtft_par *par, u32 *curves)
{
int i;
- fbtft_par_dbg(DEBUG_INIT_DISPLAY, par, "%s()\n", __func__);
-
for (i = 0; i < GAMMA_LEN; i++) {
if (i > 0 && curves[i] < 1) {
dev_err(par->info->device,
diff --git a/drivers/staging/fbtft/fb_ssd1331.c b/drivers/staging/fbtft/fb_ssd1331.c
index ec5eced7f8cb..06b7056d6c71 100644
--- a/drivers/staging/fbtft/fb_ssd1331.c
+++ b/drivers/staging/fbtft/fb_ssd1331.c
@@ -167,8 +167,6 @@ static int set_gamma(struct fbtft_par *par, u32 *curves)
static int blank(struct fbtft_par *par, bool on)
{
- fbtft_par_dbg(DEBUG_BLANK, par, "(%s=%s)\n",
- __func__, on ? "true" : "false");
if (on)
write_reg(par, 0xAE);
else
diff --git a/drivers/staging/fbtft/fb_ssd1351.c b/drivers/staging/fbtft/fb_ssd1351.c
index ca2cba2185ae..f6db2933ebba 100644
--- a/drivers/staging/fbtft/fb_ssd1351.c
+++ b/drivers/staging/fbtft/fb_ssd1351.c
@@ -72,9 +72,6 @@ static int set_var(struct fbtft_par *par)
if (par->fbtftops.init_display != init_display) {
/* don't risk messing up register A0h */
- fbtft_par_dbg(DEBUG_INIT_DISPLAY, par,
- "%s: skipping since custom init_display() is used\n",
- __func__);
return 0;
}
@@ -213,7 +210,7 @@ static void register_onboard_backlight(struct fbtft_par *par)
struct backlight_properties bl_props = { 0, };
bl_props.type = BACKLIGHT_RAW;
- bl_props.power = FB_BLANK_POWERDOWN;
+ bl_props.power = BACKLIGHT_POWER_OFF;
bd = backlight_device_register(dev_driver_string(par->info->device),
par->info->device, par, &bl_ops,
diff --git a/drivers/staging/fbtft/fb_uc1611.c b/drivers/staging/fbtft/fb_uc1611.c
index f61e373c75e9..ca35b386a12d 100644
--- a/drivers/staging/fbtft/fb_uc1611.c
+++ b/drivers/staging/fbtft/fb_uc1611.c
@@ -135,9 +135,6 @@ static void set_addr_win(struct fbtft_par *par, int xs, int ys, int xe, int ye)
static int blank(struct fbtft_par *par, bool on)
{
- fbtft_par_dbg(DEBUG_BLANK, par, "(%s=%s)\n",
- __func__, on ? "true" : "false");
-
if (on)
write_reg(par, 0xA8 | 0x00);
else
diff --git a/drivers/staging/fbtft/fbtft-bus.c b/drivers/staging/fbtft/fbtft-bus.c
index 3d422bc11641..30e436ff19e4 100644
--- a/drivers/staging/fbtft/fbtft-bus.c
+++ b/drivers/staging/fbtft/fbtft-bus.c
@@ -129,9 +129,6 @@ int fbtft_write_vmem16_bus8(struct fbtft_par *par, size_t offset, size_t len)
int ret = 0;
size_t startbyte_size = 0;
- fbtft_par_dbg(DEBUG_WRITE_VMEM, par, "%s(offset=%zu, len=%zu)\n",
- __func__, offset, len);
-
remain = len / 2;
vmem16 = (u16 *)(par->info->screen_buffer + offset);
@@ -182,9 +179,6 @@ int fbtft_write_vmem16_bus9(struct fbtft_par *par, size_t offset, size_t len)
int i;
int ret = 0;
- fbtft_par_dbg(DEBUG_WRITE_VMEM, par, "%s(offset=%zu, len=%zu)\n",
- __func__, offset, len);
-
if (!par->txbuf.buf) {
dev_err(par->info->device, "%s: txbuf.buf is NULL\n", __func__);
return -1;
@@ -232,9 +226,6 @@ int fbtft_write_vmem16_bus16(struct fbtft_par *par, size_t offset, size_t len)
{
u16 *vmem16;
- fbtft_par_dbg(DEBUG_WRITE_VMEM, par, "%s(offset=%zu, len=%zu)\n",
- __func__, offset, len);
-
vmem16 = (u16 *)(par->info->screen_buffer + offset);
/* no need for buffered write with 16-bit bus */
diff --git a/drivers/staging/fbtft/fbtft-core.c b/drivers/staging/fbtft/fbtft-core.c
index 8e2fd0c0fee2..4cfa494243b9 100644
--- a/drivers/staging/fbtft/fbtft-core.c
+++ b/drivers/staging/fbtft/fbtft-core.c
@@ -152,7 +152,7 @@ static int fbtft_backlight_get_brightness(struct backlight_device *bd)
void fbtft_unregister_backlight(struct fbtft_par *par)
{
if (par->info->bl_dev) {
- par->info->bl_dev->props.power = FB_BLANK_POWERDOWN;
+ par->info->bl_dev->props.power = BACKLIGHT_POWER_OFF;
backlight_update_status(par->info->bl_dev);
backlight_device_unregister(par->info->bl_dev);
par->info->bl_dev = NULL;
@@ -178,7 +178,7 @@ void fbtft_register_backlight(struct fbtft_par *par)
bl_props.type = BACKLIGHT_RAW;
/* Assume backlight is off, get polarity from current state of pin */
- bl_props.power = FB_BLANK_POWERDOWN;
+ bl_props.power = BACKLIGHT_POWER_OFF;
if (!gpiod_get_value(par->gpio.led[0]))
par->polarity = true;
@@ -215,8 +215,6 @@ static void fbtft_reset(struct fbtft_par *par)
if (!par->gpio.reset)
return;
- fbtft_par_dbg(DEBUG_RESET, par, "%s()\n", __func__);
-
gpiod_set_value_cansleep(par->gpio.reset, 1);
usleep_range(20, 40);
gpiod_set_value_cansleep(par->gpio.reset, 0);
@@ -801,7 +799,7 @@ int fbtft_register_framebuffer(struct fb_info *fb_info)
/* Turn on backlight if available */
if (fb_info->bl_dev) {
- fb_info->bl_dev->props.power = FB_BLANK_UNBLANK;
+ fb_info->bl_dev->props.power = BACKLIGHT_POWER_ON;
fb_info->bl_dev->ops->update_status(fb_info->bl_dev);
}
@@ -1052,8 +1050,6 @@ static int fbtft_verify_gpios(struct fbtft_par *par)
struct fbtft_platform_data *pdata = par->pdata;
int i;
- fbtft_par_dbg(DEBUG_VERIFY_GPIOS, par, "%s()\n", __func__);
-
if (pdata->display.buswidth != 9 && par->startbyte == 0 &&
!par->gpio.dc) {
dev_err(par->info->device,
@@ -1157,9 +1153,6 @@ int fbtft_probe_common(struct fbtft_display *display,
else
dev = &pdev->dev;
- if (unlikely(display->debug & DEBUG_DRIVER_INIT_FUNCTIONS))
- dev_info(dev, "%s()\n", __func__);
-
pdata = dev->platform_data;
if (!pdata) {
pdata = fbtft_properties_read(dev);
diff --git a/drivers/staging/fbtft/fbtft-sysfs.c b/drivers/staging/fbtft/fbtft-sysfs.c
index 39e8d28066cb..e45c90a03a90 100644
--- a/drivers/staging/fbtft/fbtft-sysfs.c
+++ b/drivers/staging/fbtft/fbtft-sysfs.c
@@ -27,13 +27,9 @@ int fbtft_gamma_parse_str(struct fbtft_par *par, u32 *curves,
int curve_counter, value_counter;
int _count;
- fbtft_par_dbg(DEBUG_SYSFS, par, "%s() str=\n", __func__);
-
if (!str || !curves)
return -EINVAL;
- fbtft_par_dbg(DEBUG_SYSFS, par, "%s\n", str);
-
tmp = kmemdup(str, size + 1, GFP_KERNEL);
if (!tmp)
return -ENOMEM;
diff --git a/drivers/staging/fbtft/fbtft.h b/drivers/staging/fbtft/fbtft.h
index f86ed9d470b8..3e00a26a29d5 100644
--- a/drivers/staging/fbtft/fbtft.h
+++ b/drivers/staging/fbtft/fbtft.h
@@ -202,6 +202,7 @@ struct fbtft_par {
u8 *buf;
u8 startbyte;
struct fbtft_ops fbtftops;
+ /* Spinlock to ensure thread-safe access to dirty_lines_start and dirty_lines_end */
spinlock_t dirty_lock;
unsigned int dirty_lines_start;
unsigned int dirty_lines_end;
@@ -218,6 +219,7 @@ struct fbtft_par {
} gpio;
const s16 *init_sequence;
struct {
+ /* Mutex to synchronize access to gamma curve locking */
struct mutex lock;
u32 *curves;
int num_values;
diff --git a/drivers/staging/greybus/gb-camera.h b/drivers/staging/greybus/gb-camera.h
index 5fc469101fc1..3e09147435a5 100644
--- a/drivers/staging/greybus/gb-camera.h
+++ b/drivers/staging/greybus/gb-camera.h
@@ -92,8 +92,8 @@ struct gb_camera_ops {
unsigned int *flags, struct gb_camera_stream *streams,
struct gb_camera_csi_params *csi_params);
int (*capture)(void *priv, u32 request_id,
- unsigned int streams, unsigned int num_frames,
- size_t settings_size, const void *settings);
+ unsigned int streams, unsigned int num_frames,
+ size_t settings_size, const void *settings);
int (*flush)(void *priv, u32 *request_id);
};
diff --git a/drivers/staging/greybus/spilib.c b/drivers/staging/greybus/spilib.c
index 0e4ae01eb00f..24e9c909fa02 100644
--- a/drivers/staging/greybus/spilib.c
+++ b/drivers/staging/greybus/spilib.c
@@ -490,10 +490,10 @@ int gb_spilib_master_init(struct gb_connection *connection, struct device *dev,
int ret;
u8 i;
- /* Allocate master with space for data */
- ctlr = spi_alloc_master(dev, sizeof(*spi));
+ /* Allocate host with space for data */
+ ctlr = spi_alloc_host(dev, sizeof(*spi));
if (!ctlr) {
- dev_err(dev, "cannot alloc SPI master\n");
+ dev_err(dev, "cannot alloc SPI host\n");
return -ENOMEM;
}
diff --git a/drivers/staging/iio/impedance-analyzer/ad5933.c b/drivers/staging/iio/impedance-analyzer/ad5933.c
index cd00d9607565..4ae1a7039418 100644
--- a/drivers/staging/iio/impedance-analyzer/ad5933.c
+++ b/drivers/staging/iio/impedance-analyzer/ad5933.c
@@ -547,7 +547,8 @@ static int ad5933_ring_preenable(struct iio_dev *indio_dev)
struct ad5933_state *st = iio_priv(indio_dev);
int ret;
- if (bitmap_empty(indio_dev->active_scan_mask, indio_dev->masklength))
+ if (bitmap_empty(indio_dev->active_scan_mask,
+ iio_get_masklength(indio_dev)))
return -EINVAL;
ret = ad5933_reset(st);
@@ -625,7 +626,7 @@ static void ad5933_work(struct work_struct *work)
if (status & AD5933_STAT_DATA_VALID) {
int scan_count = bitmap_weight(indio_dev->active_scan_mask,
- indio_dev->masklength);
+ iio_get_masklength(indio_dev));
ret = ad5933_i2c_read(st->client,
test_bit(1, indio_dev->active_scan_mask) ?
AD5933_REG_REAL_DATA : AD5933_REG_IMAG_DATA,
diff --git a/drivers/staging/ks7010/Kconfig b/drivers/staging/ks7010/Kconfig
deleted file mode 100644
index 8ea6c0928679..000000000000
--- a/drivers/staging/ks7010/Kconfig
+++ /dev/null
@@ -1,14 +0,0 @@
-# SPDX-License-Identifier: GPL-2.0
-config KS7010
- tristate "KeyStream KS7010 SDIO support"
- depends on MMC && WIRELESS
- select WIRELESS_EXT
- select WEXT_PRIV
- select FW_LOADER
- select CRYPTO
- select CRYPTO_HASH
- select CRYPTO_MICHAEL_MIC
- help
- This is a driver for KeyStream KS7010 based SDIO WIFI cards. It is
- found on at least later Spectec SDW-821 (FCC-ID "S2Y-WLAN-11G-K" only,
- sadly not FCC-ID "S2Y-WLAN-11B-G") and Spectec SDW-823 microSD cards.
diff --git a/drivers/staging/ks7010/Makefile b/drivers/staging/ks7010/Makefile
deleted file mode 100644
index 009851a32310..000000000000
--- a/drivers/staging/ks7010/Makefile
+++ /dev/null
@@ -1,4 +0,0 @@
-# SPDX-License-Identifier: GPL-2.0
-obj-$(CONFIG_KS7010) += ks7010.o
-
-ks7010-y := ks_hostif.o ks_wlan_net.o ks7010_sdio.o
diff --git a/drivers/staging/ks7010/TODO b/drivers/staging/ks7010/TODO
deleted file mode 100644
index 80c97543b977..000000000000
--- a/drivers/staging/ks7010/TODO
+++ /dev/null
@@ -1,36 +0,0 @@
-KS7010 Linux driver
-===================
-
-This driver is based on source code from the Ben Nanonote extra repository [1]
-which is based on the original v007 release from Renesas [2]. Some more
-background info about the chipset can be found here [3] and here [4]. Thank
-you to all which already participated in cleaning up the driver so far!
-
-[1] http://projects.qi-hardware.com/index.php/p/openwrt-packages/source/tree/master/ks7010/src
-[2] http://downloads.qi-hardware.com/software/ks7010_sdio_v007.tar.bz2
-[3] http://en.qi-hardware.com/wiki/Ben_NanoNote_Wi-Fi
-[4] https://wikidevi.com/wiki/Renesas
-
-TODO
-----
-
-First a few words what not to do (at least not blindly):
-
-- don't be overly strict with the 80 char limit. Only if it REALLY makes the
- code more readable
-
-Now the TODOs:
-
-- fix codechecker warnings (checkpatch, sparse, smatch). But PLEASE make sure
- that you are not only silencing the warning but really fixing code. You
- should understand the change you submit.
-- fix the 'card removal' event when card is inserted when booting
-- check what other upstream wireless mechanisms can be used instead of the
- custom ones here
-- Switch to use LIB80211.
-- Switch to use MAC80211.
-- Switch to use CFG80211.
-
-Please send any patches to:
-Greg Kroah-Hartman <gregkh@linuxfoundation.org>
-Linux Driver Project Developer List <driverdev-devel@linuxdriverproject.org>
diff --git a/drivers/staging/ks7010/eap_packet.h b/drivers/staging/ks7010/eap_packet.h
deleted file mode 100644
index 1eee774319ad..000000000000
--- a/drivers/staging/ks7010/eap_packet.h
+++ /dev/null
@@ -1,70 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-#ifndef EAP_PACKET_H
-#define EAP_PACKET_H
-
-#include <linux/compiler.h>
-#include <linux/bitops.h>
-#include <uapi/linux/if_ether.h>
-
-struct ether_hdr {
- unsigned char h_dest[ETH_ALEN]; /* destination eth addr */
- unsigned char h_source[ETH_ALEN]; /* source ether addr */
- unsigned char h_dest_snap;
- unsigned char h_source_snap;
- unsigned char h_command;
- unsigned char h_vendor_id[3];
- __be16 h_proto; /* packet type ID field */
- /* followed by length octets of data */
-} __packed;
-
-#define ETHER_HDR_SIZE sizeof(struct ether_hdr)
-
-struct ieee802_1x_hdr {
- unsigned char version;
- unsigned char type;
- unsigned short length;
- /* followed by length octets of data */
-} __packed;
-
-enum {
- IEEE802_1X_TYPE_EAP_PACKET = 0,
- IEEE802_1X_TYPE_EAPOL_START = 1,
- IEEE802_1X_TYPE_EAPOL_LOGOFF = 2,
- IEEE802_1X_TYPE_EAPOL_KEY = 3,
- IEEE802_1X_TYPE_EAPOL_ENCAPSULATED_ASF_ALERT = 4
-};
-
-#define WPA_NONCE_LEN 32
-#define WPA_REPLAY_COUNTER_LEN 8
-
-struct wpa_eapol_key {
- unsigned char type;
- __be16 key_info;
- unsigned short key_length;
- unsigned char replay_counter[WPA_REPLAY_COUNTER_LEN];
- unsigned char key_nonce[WPA_NONCE_LEN];
- unsigned char key_iv[16];
- unsigned char key_rsc[8];
- unsigned char key_id[8]; /* Reserved in IEEE 802.11i/RSN */
- unsigned char key_mic[16];
- unsigned short key_data_length;
- /* followed by key_data_length bytes of key_data */
-} __packed;
-
-#define WPA_KEY_INFO_TYPE_MASK GENMASK(2, 0)
-#define WPA_KEY_INFO_TYPE_HMAC_MD5_RC4 BIT(0)
-#define WPA_KEY_INFO_TYPE_HMAC_SHA1_AES BIT(1)
-#define WPA_KEY_INFO_KEY_TYPE BIT(3) /* 1 = Pairwise, 0 = Group key */
-/* bit4..5 is used in WPA, but is reserved in IEEE 802.11i/RSN */
-#define WPA_KEY_INFO_KEY_INDEX_MASK GENMASK(5, 4)
-#define WPA_KEY_INFO_KEY_INDEX_SHIFT 4
-#define WPA_KEY_INFO_INSTALL BIT(6) /* pairwise */
-#define WPA_KEY_INFO_TXRX BIT(6) /* group */
-#define WPA_KEY_INFO_ACK BIT(7)
-#define WPA_KEY_INFO_MIC BIT(8)
-#define WPA_KEY_INFO_SECURE BIT(9)
-#define WPA_KEY_INFO_ERROR BIT(10)
-#define WPA_KEY_INFO_REQUEST BIT(11)
-#define WPA_KEY_INFO_ENCR_KEY_DATA BIT(12) /* IEEE 802.11i/RSN only */
-
-#endif /* EAP_PACKET_H */
diff --git a/drivers/staging/ks7010/ks7010_sdio.c b/drivers/staging/ks7010/ks7010_sdio.c
deleted file mode 100644
index 8df0e77b57f6..000000000000
--- a/drivers/staging/ks7010/ks7010_sdio.c
+++ /dev/null
@@ -1,1143 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/*
- * Driver for KeyStream, KS7010 based SDIO cards.
- *
- * Copyright (C) 2006-2008 KeyStream Corp.
- * Copyright (C) 2009 Renesas Technology Corp.
- * Copyright (C) 2016 Sang Engineering, Wolfram Sang
- */
-
-#include <linux/atomic.h>
-#include <linux/firmware.h>
-#include <linux/jiffies.h>
-#include <linux/mmc/card.h>
-#include <linux/mmc/sdio_func.h>
-#include <linux/module.h>
-#include <linux/workqueue.h>
-#include "ks_wlan.h"
-#include "ks_hostif.h"
-
-#define ROM_FILE "ks7010sd.rom"
-
-/* SDIO KeyStream vendor and device */
-#define SDIO_VENDOR_ID_KS_CODE_A 0x005b
-#define SDIO_VENDOR_ID_KS_CODE_B 0x0023
-
-/* Older sources suggest earlier versions were named 7910 or 79xx */
-#define SDIO_DEVICE_ID_KS_7010 0x7910
-
-/* Read/Write Status Register */
-#define READ_STATUS_REG 0x000000
-#define WRITE_STATUS_REG 0x00000C
-enum reg_status_type {
- REG_STATUS_BUSY,
- REG_STATUS_IDLE
-};
-
-/* Read Index Register */
-#define READ_INDEX_REG 0x000004
-
-/* Read Data Size Register */
-#define READ_DATA_SIZE_REG 0x000008
-
-/* Write Index Register */
-#define WRITE_INDEX_REG 0x000010
-
-/*
- * Write Status/Read Data Size Register
- * for network packet (less than 2048 bytes data)
- */
-#define WSTATUS_RSIZE_REG 0x000014
-
-/* Write Status Register value */
-#define WSTATUS_MASK 0x80
-
-/* Read Data Size Register value [10:4] */
-#define RSIZE_MASK 0x7F
-
-/* ARM to SD interrupt Enable */
-#define INT_ENABLE_REG 0x000020
-/* ARM to SD interrupt Pending */
-#define INT_PENDING_REG 0x000024
-
-#define INT_GCR_B BIT(7)
-#define INT_GCR_A BIT(6)
-#define INT_WRITE_STATUS BIT(5)
-#define INT_WRITE_INDEX BIT(4)
-#define INT_WRITE_SIZE BIT(3)
-#define INT_READ_STATUS BIT(2)
-#define INT_READ_INDEX BIT(1)
-#define INT_READ_SIZE BIT(0)
-
-/* General Communication Register A */
-#define GCR_A_REG 0x000028
-enum gen_com_reg_a {
- GCR_A_INIT,
- GCR_A_REMAP,
- GCR_A_RUN
-};
-
-/* General Communication Register B */
-#define GCR_B_REG 0x00002C
-enum gen_com_reg_b {
- GCR_B_ACTIVE,
- GCR_B_DOZE
-};
-
-/* Wakeup Register */
-#define WAKEUP_REG 0x008018
-#define WAKEUP_REQ 0x5a
-
-/* AHB Data Window 0x010000-0x01FFFF */
-#define DATA_WINDOW 0x010000
-#define WINDOW_SIZE (64 * 1024)
-
-#define KS7010_IRAM_ADDRESS 0x06000000
-
-#define KS7010_IO_BLOCK_SIZE 512
-
-/**
- * struct ks_sdio_card - SDIO device data.
- *
- * Structure is used as the &struct sdio_func private data.
- *
- * @func: Pointer to the SDIO function device.
- * @priv: Pointer to the &struct net_device private data.
- */
-struct ks_sdio_card {
- struct sdio_func *func;
- struct ks_wlan_private *priv;
-};
-
-static struct sdio_func *ks7010_to_func(struct ks_wlan_private *priv)
-{
- struct ks_sdio_card *ks_sdio = priv->if_hw;
-
- return ks_sdio->func;
-}
-
-/* Read single byte from device address into byte (CMD52) */
-static int ks7010_sdio_readb(struct ks_wlan_private *priv,
- u32 address, u8 *byte)
-{
- struct sdio_func *func = ks7010_to_func(priv);
- int ret;
-
- *byte = sdio_readb(func, address, &ret);
-
- return ret;
-}
-
-/* Read length bytes from device address into buffer (CMD53) */
-static int ks7010_sdio_read(struct ks_wlan_private *priv, u32 address,
- u8 *buffer, unsigned int length)
-{
- struct sdio_func *func = ks7010_to_func(priv);
-
- return sdio_memcpy_fromio(func, buffer, address, length);
-}
-
-/* Write single byte to device address (CMD52) */
-static int ks7010_sdio_writeb(struct ks_wlan_private *priv,
- u32 address, u8 byte)
-{
- struct sdio_func *func = ks7010_to_func(priv);
- int ret;
-
- sdio_writeb(func, byte, address, &ret);
-
- return ret;
-}
-
-/* Write length bytes to device address from buffer (CMD53) */
-static int ks7010_sdio_write(struct ks_wlan_private *priv, u32 address,
- u8 *buffer, unsigned int length)
-{
- struct sdio_func *func = ks7010_to_func(priv);
-
- return sdio_memcpy_toio(func, address, buffer, length);
-}
-
-static void ks_wlan_hw_sleep_doze_request(struct ks_wlan_private *priv)
-{
- int ret;
-
- /* clear request */
- atomic_set(&priv->sleepstatus.doze_request, 0);
-
- if (atomic_read(&priv->sleepstatus.status) == 0) {
- ret = ks7010_sdio_writeb(priv, GCR_B_REG, GCR_B_DOZE);
- if (ret) {
- netdev_err(priv->net_dev, "write GCR_B_REG\n");
- goto set_sleep_mode;
- }
- atomic_set(&priv->sleepstatus.status, 1);
- priv->last_doze = jiffies;
- }
-
-set_sleep_mode:
- priv->sleep_mode = atomic_read(&priv->sleepstatus.status);
-}
-
-static void ks_wlan_hw_sleep_wakeup_request(struct ks_wlan_private *priv)
-{
- int ret;
-
- /* clear request */
- atomic_set(&priv->sleepstatus.wakeup_request, 0);
-
- if (atomic_read(&priv->sleepstatus.status) == 1) {
- ret = ks7010_sdio_writeb(priv, WAKEUP_REG, WAKEUP_REQ);
- if (ret) {
- netdev_err(priv->net_dev, "write WAKEUP_REG\n");
- goto set_sleep_mode;
- }
- atomic_set(&priv->sleepstatus.status, 0);
- priv->last_wakeup = jiffies;
- ++priv->wakeup_count;
- }
-
-set_sleep_mode:
- priv->sleep_mode = atomic_read(&priv->sleepstatus.status);
-}
-
-void ks_wlan_hw_wakeup_request(struct ks_wlan_private *priv)
-{
- int ret;
-
- if (atomic_read(&priv->psstatus.status) == PS_SNOOZE) {
- ret = ks7010_sdio_writeb(priv, WAKEUP_REG, WAKEUP_REQ);
- if (ret)
- netdev_err(priv->net_dev, "write WAKEUP_REG\n");
-
- priv->last_wakeup = jiffies;
- ++priv->wakeup_count;
- }
-}
-
-static void _ks_wlan_hw_power_save(struct ks_wlan_private *priv)
-{
- u8 byte;
- int ret;
-
- if (priv->reg.power_mgmt == POWER_MGMT_ACTIVE)
- return;
-
- if (priv->reg.operation_mode != MODE_INFRASTRUCTURE)
- return;
-
- if (!is_connect_status(priv->connect_status))
- return;
-
- if (priv->dev_state != DEVICE_STATE_SLEEP)
- return;
-
- if (atomic_read(&priv->psstatus.status) == PS_SNOOZE)
- return;
-
- netdev_dbg(priv->net_dev,
- "STATUS:\n"
- "- psstatus.status = %d\n"
- "- psstatus.confirm_wait = %d\n"
- "- psstatus.snooze_guard = %d\n"
- "- txq_count = %d\n",
- atomic_read(&priv->psstatus.status),
- atomic_read(&priv->psstatus.confirm_wait),
- atomic_read(&priv->psstatus.snooze_guard),
- txq_count(priv));
-
- if (atomic_read(&priv->psstatus.confirm_wait) ||
- atomic_read(&priv->psstatus.snooze_guard) ||
- txq_has_space(priv)) {
- queue_delayed_work(priv->wq, &priv->rw_dwork, 0);
- return;
- }
-
- ret = ks7010_sdio_readb(priv, INT_PENDING_REG, &byte);
- if (ret) {
- netdev_err(priv->net_dev, "read INT_PENDING_REG\n");
- goto queue_delayed_work;
- }
- if (byte)
- goto queue_delayed_work;
-
- ret = ks7010_sdio_writeb(priv, GCR_B_REG, GCR_B_DOZE);
- if (ret) {
- netdev_err(priv->net_dev, "write GCR_B_REG\n");
- goto queue_delayed_work;
- }
- atomic_set(&priv->psstatus.status, PS_SNOOZE);
-
- return;
-
-queue_delayed_work:
- queue_delayed_work(priv->wq, &priv->rw_dwork, 1);
-}
-
-int ks_wlan_hw_power_save(struct ks_wlan_private *priv)
-{
- queue_delayed_work(priv->wq, &priv->rw_dwork, 1);
- return 0;
-}
-
-static int enqueue_txdev(struct ks_wlan_private *priv, unsigned char *p,
- unsigned long size,
- void (*complete_handler)(struct ks_wlan_private *priv,
- struct sk_buff *skb),
- struct sk_buff *skb)
-{
- struct tx_device_buffer *sp;
- int ret;
-
- if (priv->dev_state < DEVICE_STATE_BOOT) {
- ret = -EPERM;
- goto err_complete;
- }
-
- if ((TX_DEVICE_BUFF_SIZE - 1) <= txq_count(priv)) {
- netdev_err(priv->net_dev, "tx buffer overflow\n");
- ret = -EOVERFLOW;
- goto err_complete;
- }
-
- sp = &priv->tx_dev.tx_dev_buff[priv->tx_dev.qtail];
- sp->sendp = p;
- sp->size = size;
- sp->complete_handler = complete_handler;
- sp->skb = skb;
- inc_txqtail(priv);
-
- return 0;
-
-err_complete:
- kfree(p);
- if (complete_handler)
- (*complete_handler)(priv, skb);
-
- return ret;
-}
-
-/* write data */
-static int write_to_device(struct ks_wlan_private *priv, u8 *buffer,
- unsigned long size)
-{
- struct hostif_hdr *hdr;
- int ret;
-
- hdr = (struct hostif_hdr *)buffer;
-
- if (le16_to_cpu(hdr->event) < HIF_DATA_REQ ||
- le16_to_cpu(hdr->event) > HIF_REQ_MAX) {
- netdev_err(priv->net_dev, "unknown event=%04X\n", hdr->event);
- return 0;
- }
-
- ret = ks7010_sdio_write(priv, DATA_WINDOW, buffer, size);
- if (ret) {
- netdev_err(priv->net_dev, "write DATA_WINDOW\n");
- return ret;
- }
-
- ret = ks7010_sdio_writeb(priv, WRITE_STATUS_REG, REG_STATUS_BUSY);
- if (ret) {
- netdev_err(priv->net_dev, "write WRITE_STATUS_REG\n");
- return ret;
- }
-
- return 0;
-}
-
-static void tx_device_task(struct ks_wlan_private *priv)
-{
- struct tx_device_buffer *sp;
- int ret;
-
- if (!txq_has_space(priv) ||
- atomic_read(&priv->psstatus.status) == PS_SNOOZE)
- return;
-
- sp = &priv->tx_dev.tx_dev_buff[priv->tx_dev.qhead];
- if (priv->dev_state >= DEVICE_STATE_BOOT) {
- ret = write_to_device(priv, sp->sendp, sp->size);
- if (ret) {
- netdev_err(priv->net_dev,
- "write_to_device error !!(%d)\n", ret);
- queue_delayed_work(priv->wq, &priv->rw_dwork, 1);
- return;
- }
- }
- kfree(sp->sendp);
- if (sp->complete_handler) /* TX Complete */
- (*sp->complete_handler)(priv, sp->skb);
- inc_txqhead(priv);
-
- if (txq_has_space(priv))
- queue_delayed_work(priv->wq, &priv->rw_dwork, 0);
-}
-
-int ks_wlan_hw_tx(struct ks_wlan_private *priv, void *p, unsigned long size,
- void (*complete_handler)(struct ks_wlan_private *priv,
- struct sk_buff *skb),
- struct sk_buff *skb)
-{
- int result;
- struct hostif_hdr *hdr;
-
- hdr = (struct hostif_hdr *)p;
-
- if (le16_to_cpu(hdr->event) < HIF_DATA_REQ ||
- le16_to_cpu(hdr->event) > HIF_REQ_MAX) {
- netdev_err(priv->net_dev, "unknown event=%04X\n", hdr->event);
- return 0;
- }
-
- /* add event to hostt buffer */
- priv->hostt.buff[priv->hostt.qtail] = le16_to_cpu(hdr->event);
- priv->hostt.qtail = (priv->hostt.qtail + 1) % SME_EVENT_BUFF_SIZE;
-
- spin_lock_bh(&priv->tx_dev.tx_dev_lock);
- result = enqueue_txdev(priv, p, size, complete_handler, skb);
- spin_unlock_bh(&priv->tx_dev.tx_dev_lock);
-
- if (txq_has_space(priv))
- queue_delayed_work(priv->wq, &priv->rw_dwork, 0);
-
- return result;
-}
-
-static void rx_event_task(struct tasklet_struct *t)
-{
- struct ks_wlan_private *priv = from_tasklet(priv, t, rx_bh_task);
- struct rx_device_buffer *rp;
-
- if (rxq_has_space(priv) && priv->dev_state >= DEVICE_STATE_BOOT) {
- rp = &priv->rx_dev.rx_dev_buff[priv->rx_dev.qhead];
- hostif_receive(priv, rp->data, rp->size);
- inc_rxqhead(priv);
-
- if (rxq_has_space(priv))
- tasklet_schedule(&priv->rx_bh_task);
- }
-}
-
-static void ks_wlan_hw_rx(struct ks_wlan_private *priv, size_t size)
-{
- int ret;
- struct rx_device_buffer *rx_buffer;
- struct hostif_hdr *hdr;
- u16 event = 0;
-
- /* receive data */
- if (rxq_count(priv) >= (RX_DEVICE_BUFF_SIZE - 1)) {
- netdev_err(priv->net_dev, "rx buffer overflow\n");
- return;
- }
- rx_buffer = &priv->rx_dev.rx_dev_buff[priv->rx_dev.qtail];
-
- ret = ks7010_sdio_read(priv, DATA_WINDOW, &rx_buffer->data[0],
- hif_align_size(size));
- if (ret)
- return;
-
- /* length check */
- if (size > 2046 || size == 0) {
-#ifdef DEBUG
- print_hex_dump_bytes("INVALID DATA dump: ",
- DUMP_PREFIX_OFFSET,
- rx_buffer->data, 32);
-#endif
- ret = ks7010_sdio_writeb(priv, READ_STATUS_REG,
- REG_STATUS_IDLE);
- if (ret)
- netdev_err(priv->net_dev, "write READ_STATUS_REG\n");
-
- /* length check fail */
- return;
- }
-
- hdr = (struct hostif_hdr *)&rx_buffer->data[0];
- rx_buffer->size = le16_to_cpu(hdr->size) + sizeof(hdr->size);
- event = le16_to_cpu(hdr->event);
- inc_rxqtail(priv);
-
- ret = ks7010_sdio_writeb(priv, READ_STATUS_REG, REG_STATUS_IDLE);
- if (ret)
- netdev_err(priv->net_dev, "write READ_STATUS_REG\n");
-
- if (atomic_read(&priv->psstatus.confirm_wait) && is_hif_conf(event)) {
- netdev_dbg(priv->net_dev, "IS_HIF_CONF true !!\n");
- atomic_dec(&priv->psstatus.confirm_wait);
- }
-
- tasklet_schedule(&priv->rx_bh_task);
-}
-
-static void ks7010_rw_function(struct work_struct *work)
-{
- struct ks_wlan_private *priv = container_of(work,
- struct ks_wlan_private,
- rw_dwork.work);
- struct sdio_func *func = ks7010_to_func(priv);
- u8 byte;
- int ret;
-
- /* wait after DOZE */
- if (time_after(priv->last_doze + msecs_to_jiffies(30), jiffies)) {
- netdev_dbg(priv->net_dev, "wait after DOZE\n");
- queue_delayed_work(priv->wq, &priv->rw_dwork, 1);
- return;
- }
-
- /* wait after WAKEUP */
- while (time_after(priv->last_wakeup + msecs_to_jiffies(30), jiffies)) {
- netdev_dbg(priv->net_dev, "wait after WAKEUP\n");
- dev_info(&func->dev, "wake: %lu %lu\n",
- priv->last_wakeup + msecs_to_jiffies(30), jiffies);
- msleep(30);
- }
-
- sdio_claim_host(func);
-
- /* power save wakeup */
- if (atomic_read(&priv->psstatus.status) == PS_SNOOZE) {
- if (txq_has_space(priv)) {
- ks_wlan_hw_wakeup_request(priv);
- queue_delayed_work(priv->wq, &priv->rw_dwork, 1);
- }
- goto release_host;
- }
-
- /* sleep mode doze */
- if (atomic_read(&priv->sleepstatus.doze_request) == 1) {
- ks_wlan_hw_sleep_doze_request(priv);
- goto release_host;
- }
- /* sleep mode wakeup */
- if (atomic_read(&priv->sleepstatus.wakeup_request) == 1) {
- ks_wlan_hw_sleep_wakeup_request(priv);
- goto release_host;
- }
-
- /* read (WriteStatus/ReadDataSize FN1:00_0014) */
- ret = ks7010_sdio_readb(priv, WSTATUS_RSIZE_REG, &byte);
- if (ret) {
- netdev_err(priv->net_dev, "read WSTATUS_RSIZE_REG psstatus=%d\n",
- atomic_read(&priv->psstatus.status));
- goto release_host;
- }
-
- if (byte & RSIZE_MASK) { /* Read schedule */
- ks_wlan_hw_rx(priv, (size_t)((byte & RSIZE_MASK) << 4));
- }
- if ((byte & WSTATUS_MASK))
- tx_device_task(priv);
-
- _ks_wlan_hw_power_save(priv);
-
-release_host:
- sdio_release_host(func);
-}
-
-static void ks_sdio_interrupt(struct sdio_func *func)
-{
- int ret;
- struct ks_sdio_card *card;
- struct ks_wlan_private *priv;
- u8 status, rsize, byte;
-
- card = sdio_get_drvdata(func);
- priv = card->priv;
-
- if (priv->dev_state < DEVICE_STATE_BOOT)
- goto queue_delayed_work;
-
- ret = ks7010_sdio_readb(priv, INT_PENDING_REG, &status);
- if (ret) {
- netdev_err(priv->net_dev, "read INT_PENDING_REG\n");
- goto queue_delayed_work;
- }
-
- /* schedule task for interrupt status */
- /* bit7 -> Write General Communication B register */
- /* read (General Communication B register) */
- /* bit5 -> Write Status Idle */
- /* bit2 -> Read Status Busy */
- if (status & INT_GCR_B ||
- atomic_read(&priv->psstatus.status) == PS_SNOOZE) {
- ret = ks7010_sdio_readb(priv, GCR_B_REG, &byte);
- if (ret) {
- netdev_err(priv->net_dev, "read GCR_B_REG\n");
- goto queue_delayed_work;
- }
- if (byte == GCR_B_ACTIVE) {
- if (atomic_read(&priv->psstatus.status) == PS_SNOOZE) {
- atomic_set(&priv->psstatus.status, PS_WAKEUP);
- priv->wakeup_count = 0;
- }
- complete(&priv->psstatus.wakeup_wait);
- }
- }
-
- do {
- /* read (WriteStatus/ReadDataSize FN1:00_0014) */
- ret = ks7010_sdio_readb(priv, WSTATUS_RSIZE_REG, &byte);
- if (ret) {
- netdev_err(priv->net_dev, "read WSTATUS_RSIZE_REG\n");
- goto queue_delayed_work;
- }
- rsize = byte & RSIZE_MASK;
- if (rsize != 0) /* Read schedule */
- ks_wlan_hw_rx(priv, (size_t)(rsize << 4));
-
- if (byte & WSTATUS_MASK) {
- if (atomic_read(&priv->psstatus.status) == PS_SNOOZE) {
- if (txq_has_space(priv)) {
- ks_wlan_hw_wakeup_request(priv);
- queue_delayed_work(priv->wq,
- &priv->rw_dwork, 1);
- return;
- }
- } else {
- tx_device_task(priv);
- }
- }
- } while (rsize);
-
-queue_delayed_work:
- queue_delayed_work(priv->wq, &priv->rw_dwork, 0);
-}
-
-static int trx_device_init(struct ks_wlan_private *priv)
-{
- priv->tx_dev.qhead = 0;
- priv->tx_dev.qtail = 0;
-
- priv->rx_dev.qhead = 0;
- priv->rx_dev.qtail = 0;
-
- spin_lock_init(&priv->tx_dev.tx_dev_lock);
- spin_lock_init(&priv->rx_dev.rx_dev_lock);
-
- tasklet_setup(&priv->rx_bh_task, rx_event_task);
-
- return 0;
-}
-
-static void trx_device_exit(struct ks_wlan_private *priv)
-{
- struct tx_device_buffer *sp;
-
- /* tx buffer clear */
- while (txq_has_space(priv)) {
- sp = &priv->tx_dev.tx_dev_buff[priv->tx_dev.qhead];
- kfree(sp->sendp);
- if (sp->complete_handler) /* TX Complete */
- (*sp->complete_handler)(priv, sp->skb);
- inc_txqhead(priv);
- }
-
- tasklet_kill(&priv->rx_bh_task);
-}
-
-static int ks7010_sdio_update_index(struct ks_wlan_private *priv, u32 index)
-{
- int ret;
- unsigned char *data_buf;
-
- data_buf = kmemdup(&index, sizeof(u32), GFP_KERNEL);
- if (!data_buf)
- return -ENOMEM;
-
- ret = ks7010_sdio_write(priv, WRITE_INDEX_REG, data_buf, sizeof(index));
- if (ret)
- goto err_free_data_buf;
-
- ret = ks7010_sdio_write(priv, READ_INDEX_REG, data_buf, sizeof(index));
- if (ret)
- goto err_free_data_buf;
-
- return 0;
-
-err_free_data_buf:
- kfree(data_buf);
-
- return ret;
-}
-
-#define ROM_BUFF_SIZE (64 * 1024)
-static int ks7010_sdio_data_compare(struct ks_wlan_private *priv, u32 address,
- u8 *data, unsigned int size)
-{
- int ret;
- u8 *read_buf;
-
- read_buf = kmalloc(ROM_BUFF_SIZE, GFP_KERNEL);
- if (!read_buf)
- return -ENOMEM;
-
- ret = ks7010_sdio_read(priv, address, read_buf, size);
- if (ret)
- goto err_free_read_buf;
-
- if (memcmp(data, read_buf, size) != 0) {
- ret = -EIO;
- netdev_err(priv->net_dev, "data compare error (%d)\n", ret);
- goto err_free_read_buf;
- }
-
- return 0;
-
-err_free_read_buf:
- kfree(read_buf);
-
- return ret;
-}
-
-static int ks7010_copy_firmware(struct ks_wlan_private *priv,
- const struct firmware *fw_entry)
-{
- unsigned int length;
- unsigned int size;
- unsigned int offset;
- unsigned int n = 0;
- u8 *rom_buf;
- int ret;
-
- rom_buf = kmalloc(ROM_BUFF_SIZE, GFP_KERNEL);
- if (!rom_buf)
- return -ENOMEM;
-
- length = fw_entry->size;
-
- do {
- if (length >= ROM_BUFF_SIZE) {
- size = ROM_BUFF_SIZE;
- length = length - ROM_BUFF_SIZE;
- } else {
- size = length;
- length = 0;
- }
- if (size == 0)
- break;
-
- memcpy(rom_buf, fw_entry->data + n, size);
-
- offset = n;
- ret = ks7010_sdio_update_index(priv,
- KS7010_IRAM_ADDRESS + offset);
- if (ret)
- goto free_rom_buf;
-
- ret = ks7010_sdio_write(priv, DATA_WINDOW, rom_buf, size);
- if (ret)
- goto free_rom_buf;
-
- ret = ks7010_sdio_data_compare(priv,
- DATA_WINDOW, rom_buf, size);
- if (ret)
- goto free_rom_buf;
-
- n += size;
-
- } while (size);
-
- ret = ks7010_sdio_writeb(priv, GCR_A_REG, GCR_A_REMAP);
-
-free_rom_buf:
- kfree(rom_buf);
- return ret;
-}
-
-static int ks7010_upload_firmware(struct ks_sdio_card *card)
-{
- struct ks_wlan_private *priv = card->priv;
- struct sdio_func *func = ks7010_to_func(priv);
- unsigned int n;
- u8 byte = 0;
- int ret;
- const struct firmware *fw_entry = NULL;
-
- sdio_claim_host(func);
-
- /* Firmware running ? */
- ret = ks7010_sdio_readb(priv, GCR_A_REG, &byte);
- if (ret)
- goto release_host;
- if (byte == GCR_A_RUN) {
- netdev_dbg(priv->net_dev, "MAC firmware running ...\n");
- ret = -EBUSY;
- goto release_host;
- }
-
- ret = request_firmware(&fw_entry, ROM_FILE,
- &func->dev);
- if (ret)
- goto release_host;
-
- ret = ks7010_copy_firmware(priv, fw_entry);
- if (ret)
- goto release_firmware;
-
- /* Firmware running check */
- for (n = 0; n < 50; ++n) {
- usleep_range(10000, 11000); /* wait_ms(10); */
- ret = ks7010_sdio_readb(priv, GCR_A_REG, &byte);
- if (ret)
- goto release_firmware;
-
- if (byte == GCR_A_RUN)
- break;
- }
- if ((50) <= n) {
- netdev_err(priv->net_dev, "firmware can't start\n");
- ret = -EIO;
- goto release_firmware;
- }
-
- ret = 0;
-
- release_firmware:
- release_firmware(fw_entry);
- release_host:
- sdio_release_host(func);
-
- return ret;
-}
-
-static void ks7010_sme_enqueue_events(struct ks_wlan_private *priv)
-{
- static const u16 init_events[] = {
- SME_GET_EEPROM_CKSUM, SME_STOP_REQUEST,
- SME_RTS_THRESHOLD_REQUEST, SME_FRAGMENTATION_THRESHOLD_REQUEST,
- SME_WEP_INDEX_REQUEST, SME_WEP_KEY1_REQUEST,
- SME_WEP_KEY2_REQUEST, SME_WEP_KEY3_REQUEST,
- SME_WEP_KEY4_REQUEST, SME_WEP_FLAG_REQUEST,
- SME_RSN_ENABLED_REQUEST, SME_MODE_SET_REQUEST,
- SME_START_REQUEST
- };
- int ev;
-
- for (ev = 0; ev < ARRAY_SIZE(init_events); ev++)
- hostif_sme_enqueue(priv, init_events[ev]);
-}
-
-static void ks7010_card_init(struct ks_wlan_private *priv)
-{
- init_completion(&priv->confirm_wait);
-
- /* get mac address & firmware version */
- hostif_sme_enqueue(priv, SME_START);
-
- if (!wait_for_completion_interruptible_timeout
- (&priv->confirm_wait, 5 * HZ)) {
- netdev_dbg(priv->net_dev, "wait time out!! SME_START\n");
- }
-
- if (priv->mac_address_valid && priv->version_size != 0)
- priv->dev_state = DEVICE_STATE_PREINIT;
-
- ks7010_sme_enqueue_events(priv);
-
- if (!wait_for_completion_interruptible_timeout
- (&priv->confirm_wait, 5 * HZ)) {
- netdev_dbg(priv->net_dev, "wait time out!! wireless parameter set\n");
- }
-
- if (priv->dev_state >= DEVICE_STATE_PREINIT) {
- netdev_dbg(priv->net_dev, "DEVICE READY!!\n");
- priv->dev_state = DEVICE_STATE_READY;
- }
-}
-
-static void ks7010_init_defaults(struct ks_wlan_private *priv)
-{
- priv->reg.tx_rate = TX_RATE_AUTO;
- priv->reg.preamble = LONG_PREAMBLE;
- priv->reg.power_mgmt = POWER_MGMT_ACTIVE;
- priv->reg.scan_type = ACTIVE_SCAN;
- priv->reg.beacon_lost_count = 20;
- priv->reg.rts = 2347UL;
- priv->reg.fragment = 2346UL;
- priv->reg.phy_type = D_11BG_COMPATIBLE_MODE;
- priv->reg.cts_mode = CTS_MODE_FALSE;
- priv->reg.rate_set.body[11] = TX_RATE_54M;
- priv->reg.rate_set.body[10] = TX_RATE_48M;
- priv->reg.rate_set.body[9] = TX_RATE_36M;
- priv->reg.rate_set.body[8] = TX_RATE_18M;
- priv->reg.rate_set.body[7] = TX_RATE_9M;
- priv->reg.rate_set.body[6] = TX_RATE_24M | BASIC_RATE;
- priv->reg.rate_set.body[5] = TX_RATE_12M | BASIC_RATE;
- priv->reg.rate_set.body[4] = TX_RATE_6M | BASIC_RATE;
- priv->reg.rate_set.body[3] = TX_RATE_11M | BASIC_RATE;
- priv->reg.rate_set.body[2] = TX_RATE_5M | BASIC_RATE;
- priv->reg.rate_set.body[1] = TX_RATE_2M | BASIC_RATE;
- priv->reg.rate_set.body[0] = TX_RATE_1M | BASIC_RATE;
- priv->reg.tx_rate = TX_RATE_FULL_AUTO;
- priv->reg.rate_set.size = 12;
-}
-
-static int ks7010_sdio_setup_irqs(struct sdio_func *func)
-{
- int ret;
-
- /* interrupt disable */
- sdio_writeb(func, 0, INT_ENABLE_REG, &ret);
- if (ret)
- goto irq_error;
-
- sdio_writeb(func, 0xff, INT_PENDING_REG, &ret);
- if (ret)
- goto irq_error;
-
- /* setup interrupt handler */
- ret = sdio_claim_irq(func, ks_sdio_interrupt);
-
-irq_error:
- return ret;
-}
-
-static void ks7010_sdio_init_irqs(struct sdio_func *func,
- struct ks_wlan_private *priv)
-{
- u8 byte;
- int ret;
-
- /*
- * interrupt setting
- * clear Interrupt status write
- * (ARMtoSD_InterruptPending FN1:00_0024)
- */
- sdio_claim_host(func);
- ret = ks7010_sdio_writeb(priv, INT_PENDING_REG, 0xff);
- sdio_release_host(func);
- if (ret)
- netdev_err(priv->net_dev, "write INT_PENDING_REG\n");
-
- /* enable ks7010sdio interrupt */
- byte = (INT_GCR_B | INT_READ_STATUS | INT_WRITE_STATUS);
- sdio_claim_host(func);
- ret = ks7010_sdio_writeb(priv, INT_ENABLE_REG, byte);
- sdio_release_host(func);
- if (ret)
- netdev_err(priv->net_dev, "write INT_ENABLE_REG\n");
-}
-
-static void ks7010_private_init(struct ks_wlan_private *priv,
- struct ks_sdio_card *card,
- struct net_device *netdev)
-{
- /* private memory initialize */
- priv->if_hw = card;
-
- priv->dev_state = DEVICE_STATE_PREBOOT;
- priv->net_dev = netdev;
- priv->firmware_version[0] = '\0';
- priv->version_size = 0;
- priv->last_doze = jiffies;
- priv->last_wakeup = jiffies;
- memset(&priv->nstats, 0, sizeof(priv->nstats));
- memset(&priv->wstats, 0, sizeof(priv->wstats));
-
- /* sleep mode */
- atomic_set(&priv->sleepstatus.status, 0);
- atomic_set(&priv->sleepstatus.doze_request, 0);
- atomic_set(&priv->sleepstatus.wakeup_request, 0);
-
- trx_device_init(priv);
- hostif_init(priv);
- ks_wlan_net_start(netdev);
- ks7010_init_defaults(priv);
-}
-
-static int ks7010_sdio_probe(struct sdio_func *func,
- const struct sdio_device_id *device)
-{
- struct ks_wlan_private *priv = NULL;
- struct net_device *netdev = NULL;
- struct ks_sdio_card *card;
- int ret;
-
- card = kzalloc(sizeof(*card), GFP_KERNEL);
- if (!card)
- return -ENOMEM;
-
- card->func = func;
-
- sdio_claim_host(func);
-
- ret = sdio_set_block_size(func, KS7010_IO_BLOCK_SIZE);
- if (ret)
- goto err_free_card;
-
- dev_dbg(&card->func->dev, "multi_block=%d sdio_set_block_size()=%d %d\n",
- func->card->cccr.multi_block, func->cur_blksize, ret);
-
- ret = sdio_enable_func(func);
- if (ret)
- goto err_free_card;
-
- ret = ks7010_sdio_setup_irqs(func);
- if (ret)
- goto err_disable_func;
-
- sdio_release_host(func);
-
- sdio_set_drvdata(func, card);
-
- dev_dbg(&card->func->dev, "class = 0x%X, vendor = 0x%X, device = 0x%X\n",
- func->class, func->vendor, func->device);
-
- /* private memory allocate */
- netdev = alloc_etherdev(sizeof(*priv));
- if (!netdev) {
- dev_err(&card->func->dev, "Unable to alloc new net device\n");
- goto err_release_irq;
- }
-
- ret = dev_alloc_name(netdev, "wlan%d");
- if (ret < 0) {
- dev_err(&card->func->dev, "Couldn't get name!\n");
- goto err_free_netdev;
- }
-
- priv = netdev_priv(netdev);
-
- card->priv = priv;
- SET_NETDEV_DEV(netdev, &card->func->dev);
-
- ks7010_private_init(priv, card, netdev);
-
- ret = ks7010_upload_firmware(card);
- if (ret) {
- netdev_err(priv->net_dev,
- "firmware load failed !! ret = %d\n", ret);
- goto err_free_netdev;
- }
-
- ks7010_sdio_init_irqs(func, priv);
-
- priv->dev_state = DEVICE_STATE_BOOT;
-
- priv->wq = alloc_workqueue("wq", WQ_MEM_RECLAIM, 1);
- if (!priv->wq) {
- netdev_err(priv->net_dev, "create_workqueue failed !!\n");
- goto err_free_netdev;
- }
-
- INIT_DELAYED_WORK(&priv->rw_dwork, ks7010_rw_function);
- ks7010_card_init(priv);
-
- ret = register_netdev(priv->net_dev);
- if (ret)
- goto err_destroy_wq;
-
- return 0;
-
- err_destroy_wq:
- destroy_workqueue(priv->wq);
- err_free_netdev:
- free_netdev(netdev);
- err_release_irq:
- sdio_claim_host(func);
- sdio_release_irq(func);
- err_disable_func:
- sdio_disable_func(func);
- err_free_card:
- sdio_release_host(func);
- sdio_set_drvdata(func, NULL);
- kfree(card);
-
- return -ENODEV;
-}
-
-/* send stop request to MAC */
-static int send_stop_request(struct sdio_func *func)
-{
- struct hostif_stop_request *pp;
- struct ks_sdio_card *card;
- size_t size;
-
- card = sdio_get_drvdata(func);
-
- pp = kzalloc(hif_align_size(sizeof(*pp)), GFP_KERNEL);
- if (!pp)
- return -ENOMEM;
-
- size = sizeof(*pp) - sizeof(pp->header.size);
- pp->header.size = cpu_to_le16(size);
- pp->header.event = cpu_to_le16(HIF_STOP_REQ);
-
- sdio_claim_host(func);
- write_to_device(card->priv, (u8 *)pp, hif_align_size(sizeof(*pp)));
- sdio_release_host(func);
-
- kfree(pp);
- return 0;
-}
-
-static void ks7010_sdio_remove(struct sdio_func *func)
-{
- int ret;
- struct ks_sdio_card *card;
- struct ks_wlan_private *priv;
-
- card = sdio_get_drvdata(func);
-
- if (!card)
- return;
-
- priv = card->priv;
- if (!priv)
- goto err_free_card;
-
- ks_wlan_net_stop(priv->net_dev);
-
- /* interrupt disable */
- sdio_claim_host(func);
- sdio_writeb(func, 0, INT_ENABLE_REG, &ret);
- sdio_writeb(func, 0xff, INT_PENDING_REG, &ret);
- sdio_release_host(func);
-
- ret = send_stop_request(func);
- if (ret) /* memory allocation failure */
- goto err_free_card;
-
- if (priv->wq)
- destroy_workqueue(priv->wq);
-
- hostif_exit(priv);
-
- unregister_netdev(priv->net_dev);
-
- trx_device_exit(priv);
- free_netdev(priv->net_dev);
- card->priv = NULL;
-
- sdio_claim_host(func);
- sdio_release_irq(func);
- sdio_disable_func(func);
- sdio_release_host(func);
-err_free_card:
- sdio_set_drvdata(func, NULL);
- kfree(card);
-}
-
-static const struct sdio_device_id ks7010_sdio_ids[] = {
- {SDIO_DEVICE(SDIO_VENDOR_ID_KS_CODE_A, SDIO_DEVICE_ID_KS_7010)},
- {SDIO_DEVICE(SDIO_VENDOR_ID_KS_CODE_B, SDIO_DEVICE_ID_KS_7010)},
- { /* all zero */ }
-};
-MODULE_DEVICE_TABLE(sdio, ks7010_sdio_ids);
-
-static struct sdio_driver ks7010_sdio_driver = {
- .name = "ks7010_sdio",
- .id_table = ks7010_sdio_ids,
- .probe = ks7010_sdio_probe,
- .remove = ks7010_sdio_remove,
-};
-
-module_sdio_driver(ks7010_sdio_driver);
-MODULE_AUTHOR("Sang Engineering, Qi-Hardware, KeyStream");
-MODULE_DESCRIPTION("Driver for KeyStream KS7010 based SDIO cards");
-MODULE_LICENSE("GPL v2");
-MODULE_FIRMWARE(ROM_FILE);
diff --git a/drivers/staging/ks7010/ks_hostif.c b/drivers/staging/ks7010/ks_hostif.c
deleted file mode 100644
index af3825578d85..000000000000
--- a/drivers/staging/ks7010/ks_hostif.c
+++ /dev/null
@@ -1,2312 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/*
- * Driver for KeyStream wireless LAN cards.
- *
- * Copyright (C) 2005-2008 KeyStream Corp.
- * Copyright (C) 2009 Renesas Technology Corp.
- */
-
-#include <crypto/hash.h>
-#include <linux/circ_buf.h>
-#include <linux/if_arp.h>
-#include <net/iw_handler.h>
-#include <uapi/linux/llc.h>
-#include "eap_packet.h"
-#include "ks_wlan.h"
-#include "ks_hostif.h"
-
-#define MICHAEL_MIC_KEY_LEN 8
-#define MICHAEL_MIC_LEN 8
-
-static inline void inc_smeqhead(struct ks_wlan_private *priv)
-{
- priv->sme_i.qhead = (priv->sme_i.qhead + 1) % SME_EVENT_BUFF_SIZE;
-}
-
-static inline void inc_smeqtail(struct ks_wlan_private *priv)
-{
- priv->sme_i.qtail = (priv->sme_i.qtail + 1) % SME_EVENT_BUFF_SIZE;
-}
-
-static inline unsigned int cnt_smeqbody(struct ks_wlan_private *priv)
-{
- return CIRC_CNT_TO_END(priv->sme_i.qhead, priv->sme_i.qtail,
- SME_EVENT_BUFF_SIZE);
-}
-
-static inline u8 get_byte(struct ks_wlan_private *priv)
-{
- u8 data;
-
- data = *priv->rxp++;
- /* length check in advance ! */
- --(priv->rx_size);
- return data;
-}
-
-static inline u16 get_word(struct ks_wlan_private *priv)
-{
- u16 data;
-
- data = (get_byte(priv) & 0xff);
- data |= ((get_byte(priv) << 8) & 0xff00);
- return data;
-}
-
-static inline u32 get_dword(struct ks_wlan_private *priv)
-{
- u32 data;
-
- data = (get_byte(priv) & 0xff);
- data |= ((get_byte(priv) << 8) & 0x0000ff00);
- data |= ((get_byte(priv) << 16) & 0x00ff0000);
- data |= ((get_byte(priv) << 24) & 0xff000000);
- return data;
-}
-
-static void ks_wlan_hw_wakeup_task(struct work_struct *work)
-{
- struct ks_wlan_private *priv;
- int ps_status;
- long time_left;
-
- priv = container_of(work, struct ks_wlan_private, wakeup_work);
- ps_status = atomic_read(&priv->psstatus.status);
-
- if (ps_status == PS_SNOOZE) {
- ks_wlan_hw_wakeup_request(priv);
- time_left = wait_for_completion_interruptible_timeout(&priv->psstatus.wakeup_wait,
- msecs_to_jiffies(20));
- if (time_left <= 0) {
- netdev_dbg(priv->net_dev, "wake up timeout or interrupted !!!\n");
- schedule_work(&priv->wakeup_work);
- return;
- }
- }
-}
-
-static void ks_wlan_do_power_save(struct ks_wlan_private *priv)
-{
- if (is_connect_status(priv->connect_status))
- hostif_sme_enqueue(priv, SME_POW_MNGMT_REQUEST);
- else
- priv->dev_state = DEVICE_STATE_READY;
-}
-
-static
-int get_current_ap(struct ks_wlan_private *priv, struct link_ap_info *ap_info)
-{
- struct local_ap *ap;
- union iwreq_data wrqu;
- struct net_device *netdev = priv->net_dev;
- u8 size;
-
- ap = &priv->current_ap;
-
- if (is_disconnect_status(priv->connect_status)) {
- memset(ap, 0, sizeof(struct local_ap));
- return -EPERM;
- }
-
- ether_addr_copy(ap->bssid, ap_info->bssid);
- memcpy(ap->ssid.body, priv->reg.ssid.body,
- priv->reg.ssid.size);
- ap->ssid.size = priv->reg.ssid.size;
- memcpy(ap->rate_set.body, ap_info->rate_set.body,
- ap_info->rate_set.size);
- ap->rate_set.size = ap_info->rate_set.size;
- if (ap_info->ext_rate_set.size != 0) {
- memcpy(&ap->rate_set.body[ap->rate_set.size],
- ap_info->ext_rate_set.body,
- ap_info->ext_rate_set.size);
- ap->rate_set.size += ap_info->ext_rate_set.size;
- }
- ap->channel = ap_info->ds_parameter.channel;
- ap->rssi = ap_info->rssi;
- ap->sq = ap_info->sq;
- ap->noise = ap_info->noise;
- ap->capability = le16_to_cpu(ap_info->capability);
- size = (ap_info->rsn.size <= RSN_IE_BODY_MAX) ?
- ap_info->rsn.size : RSN_IE_BODY_MAX;
- if ((ap_info->rsn_mode & RSN_MODE_WPA2) &&
- (priv->wpa.version == IW_AUTH_WPA_VERSION_WPA2)) {
- ap->rsn_ie.id = RSN_INFO_ELEM_ID;
- ap->rsn_ie.size = size;
- memcpy(ap->rsn_ie.body, ap_info->rsn.body, size);
- } else if ((ap_info->rsn_mode & RSN_MODE_WPA) &&
- (priv->wpa.version == IW_AUTH_WPA_VERSION_WPA)) {
- ap->wpa_ie.id = WPA_INFO_ELEM_ID;
- ap->wpa_ie.size = size;
- memcpy(ap->wpa_ie.body, ap_info->rsn.body, size);
- } else {
- ap->rsn_ie.id = 0;
- ap->rsn_ie.size = 0;
- ap->wpa_ie.id = 0;
- ap->wpa_ie.size = 0;
- }
-
- wrqu.data.length = 0;
- wrqu.data.flags = 0;
- wrqu.ap_addr.sa_family = ARPHRD_ETHER;
- if (is_connect_status(priv->connect_status)) {
- ether_addr_copy(wrqu.ap_addr.sa_data, priv->current_ap.bssid);
- netdev_dbg(priv->net_dev,
- "IWEVENT: connect bssid=%pM\n",
- wrqu.ap_addr.sa_data);
- wireless_send_event(netdev, SIOCGIWAP, &wrqu, NULL);
- }
- netdev_dbg(priv->net_dev, "Link AP\n"
- "- bssid=%pM\n"
- "- essid=%s\n"
- "- rate_set=%02X,%02X,%02X,%02X,%02X,%02X,%02X,%02X\n"
- "- channel=%d\n"
- "- rssi=%d\n"
- "- sq=%d\n"
- "- capability=%04X\n"
- "- rsn.mode=%d\n"
- "- rsn.size=%d\n"
- "- ext_rate_set_size=%d\n"
- "- rate_set_size=%d\n",
- ap->bssid,
- &ap->ssid.body[0],
- ap->rate_set.body[0], ap->rate_set.body[1],
- ap->rate_set.body[2], ap->rate_set.body[3],
- ap->rate_set.body[4], ap->rate_set.body[5],
- ap->rate_set.body[6], ap->rate_set.body[7],
- ap->channel, ap->rssi, ap->sq, ap->capability,
- ap_info->rsn_mode, ap_info->rsn.size,
- ap_info->ext_rate_set.size, ap_info->rate_set.size);
-
- return 0;
-}
-
-static u8 read_ie(unsigned char *bp, u8 max, u8 *body)
-{
- u8 size = (*(bp + 1) <= max) ? *(bp + 1) : max;
-
- memcpy(body, bp + 2, size);
- return size;
-}
-
-static int
-michael_mic(u8 *key, u8 *data, unsigned int len, u8 priority, u8 *result)
-{
- u8 pad_data[4] = { priority, 0, 0, 0 };
- struct crypto_shash *tfm = NULL;
- struct shash_desc *desc = NULL;
- int ret;
-
- tfm = crypto_alloc_shash("michael_mic", 0, 0);
- if (IS_ERR(tfm)) {
- ret = PTR_ERR(tfm);
- goto err;
- }
-
- ret = crypto_shash_setkey(tfm, key, MICHAEL_MIC_KEY_LEN);
- if (ret < 0)
- goto err_free_tfm;
-
- desc = kmalloc(sizeof(*desc) + crypto_shash_descsize(tfm), GFP_KERNEL);
- if (!desc) {
- ret = -ENOMEM;
- goto err_free_tfm;
- }
-
- desc->tfm = tfm;
-
- ret = crypto_shash_init(desc);
- if (ret < 0)
- goto err_free_desc;
-
- // Compute the MIC value
- /*
- * IEEE802.11i page 47
- * Figure 43g TKIP MIC processing format
- * +--+--+--------+--+----+--+--+--+--+--+--+--+--+
- * |6 |6 |1 |3 |M |1 |1 |1 |1 |1 |1 |1 |1 | Octet
- * +--+--+--------+--+----+--+--+--+--+--+--+--+--+
- * |DA|SA|Priority|0 |Data|M0|M1|M2|M3|M4|M5|M6|M7|
- * +--+--+--------+--+----+--+--+--+--+--+--+--+--+
- */
-
- ret = crypto_shash_update(desc, data, 12);
- if (ret < 0)
- goto err_free_desc;
-
- ret = crypto_shash_update(desc, pad_data, 4);
- if (ret < 0)
- goto err_free_desc;
-
- ret = crypto_shash_finup(desc, data + 12, len - 12, result);
-
-err_free_desc:
- kfree_sensitive(desc);
-
-err_free_tfm:
- crypto_free_shash(tfm);
-
-err:
- return ret;
-}
-
-static
-int get_ap_information(struct ks_wlan_private *priv, struct ap_info *ap_info,
- struct local_ap *ap)
-{
- unsigned char *bp;
- int bsize, offset;
-
- memset(ap, 0, sizeof(struct local_ap));
-
- ether_addr_copy(ap->bssid, ap_info->bssid);
- ap->rssi = ap_info->rssi;
- ap->sq = ap_info->sq;
- ap->noise = ap_info->noise;
- ap->capability = le16_to_cpu(ap_info->capability);
- ap->channel = ap_info->ch_info;
-
- bp = ap_info->body;
- bsize = le16_to_cpu(ap_info->body_size);
- offset = 0;
-
- while (bsize > offset) {
- switch (*bp) { /* Information Element ID */
- case WLAN_EID_SSID:
- ap->ssid.size = read_ie(bp, IEEE80211_MAX_SSID_LEN,
- ap->ssid.body);
- break;
- case WLAN_EID_SUPP_RATES:
- case WLAN_EID_EXT_SUPP_RATES:
- if ((*(bp + 1) + ap->rate_set.size) <=
- RATE_SET_MAX_SIZE) {
- memcpy(&ap->rate_set.body[ap->rate_set.size],
- bp + 2, *(bp + 1));
- ap->rate_set.size += *(bp + 1);
- } else {
- memcpy(&ap->rate_set.body[ap->rate_set.size],
- bp + 2,
- RATE_SET_MAX_SIZE - ap->rate_set.size);
- ap->rate_set.size +=
- (RATE_SET_MAX_SIZE - ap->rate_set.size);
- }
- break;
- case WLAN_EID_RSN:
- ap->rsn_ie.id = *bp;
- ap->rsn_ie.size = read_ie(bp, RSN_IE_BODY_MAX,
- ap->rsn_ie.body);
- break;
- case WLAN_EID_VENDOR_SPECIFIC: /* WPA */
- /* WPA OUI check */
- if (memcmp(bp + 2, CIPHER_ID_WPA_WEP40, 4) == 0) {
- ap->wpa_ie.id = *bp;
- ap->wpa_ie.size = read_ie(bp, RSN_IE_BODY_MAX,
- ap->wpa_ie.body);
- }
- break;
- case WLAN_EID_DS_PARAMS:
- case WLAN_EID_FH_PARAMS:
- case WLAN_EID_CF_PARAMS:
- case WLAN_EID_TIM:
- case WLAN_EID_IBSS_PARAMS:
- case WLAN_EID_COUNTRY:
- case WLAN_EID_ERP_INFO:
- break;
- default:
- netdev_err(priv->net_dev,
- "unknown Element ID=%d\n", *bp);
- break;
- }
-
- offset += 2; /* id & size field */
- offset += *(bp + 1); /* +size offset */
- bp += (*(bp + 1) + 2); /* pointer update */
- }
-
- return 0;
-}
-
-static
-int hostif_data_indication_wpa(struct ks_wlan_private *priv,
- unsigned short auth_type)
-{
- struct ether_hdr *eth_hdr;
- unsigned short eth_proto;
- unsigned char recv_mic[MICHAEL_MIC_LEN];
- char buf[128];
- unsigned long now;
- struct mic_failure *mic_failure;
- u8 mic[MICHAEL_MIC_LEN];
- union iwreq_data wrqu;
- unsigned int key_index = auth_type - 1;
- struct wpa_key *key = &priv->wpa.key[key_index];
-
- eth_hdr = (struct ether_hdr *)(priv->rxp);
- eth_proto = ntohs(eth_hdr->h_proto);
-
- if (eth_hdr->h_dest_snap != eth_hdr->h_source_snap) {
- netdev_err(priv->net_dev, "invalid data format\n");
- priv->nstats.rx_errors++;
- return -EINVAL;
- }
- if (((auth_type == TYPE_PMK1 &&
- priv->wpa.pairwise_suite == IW_AUTH_CIPHER_TKIP) ||
- (auth_type == TYPE_GMK1 &&
- priv->wpa.group_suite == IW_AUTH_CIPHER_TKIP) ||
- (auth_type == TYPE_GMK2 &&
- priv->wpa.group_suite == IW_AUTH_CIPHER_TKIP)) &&
- key->key_len) {
- int ret;
-
- netdev_dbg(priv->net_dev, "TKIP: protocol=%04X: size=%u\n",
- eth_proto, priv->rx_size);
- /* MIC save */
- memcpy(&recv_mic[0],
- (priv->rxp) + ((priv->rx_size) - sizeof(recv_mic)),
- sizeof(recv_mic));
- priv->rx_size = priv->rx_size - sizeof(recv_mic);
-
- ret = michael_mic(key->rx_mic_key, priv->rxp, priv->rx_size,
- 0, mic);
- if (ret < 0)
- return ret;
- if (memcmp(mic, recv_mic, sizeof(mic)) != 0) {
- now = jiffies;
- mic_failure = &priv->wpa.mic_failure;
- /* MIC FAILURE */
- if (mic_failure->last_failure_time &&
- (now - mic_failure->last_failure_time) / HZ >= 60) {
- mic_failure->failure = 0;
- }
- netdev_err(priv->net_dev, "MIC FAILURE\n");
- if (mic_failure->failure == 0) {
- mic_failure->failure = 1;
- mic_failure->counter = 0;
- } else if (mic_failure->failure == 1) {
- mic_failure->failure = 2;
- mic_failure->counter =
- (u16)((now - mic_failure->last_failure_time) / HZ);
- /* range 1-60 */
- if (!mic_failure->counter)
- mic_failure->counter = 1;
- }
- priv->wpa.mic_failure.last_failure_time = now;
-
- /* needed parameters: count, keyid, key type, TSC */
- sprintf(buf,
- "MLME-MICHAELMICFAILURE.indication(keyid=%d %scast addr=%pM)",
- key_index,
- eth_hdr->h_dest[0] & 0x01 ? "broad" : "uni",
- eth_hdr->h_source);
- memset(&wrqu, 0, sizeof(wrqu));
- wrqu.data.length = strlen(buf);
- wireless_send_event(priv->net_dev, IWEVCUSTOM, &wrqu,
- buf);
- return -EINVAL;
- }
- }
- return 0;
-}
-
-static
-void hostif_data_indication(struct ks_wlan_private *priv)
-{
- unsigned int rx_ind_size; /* indicate data size */
- struct sk_buff *skb;
- u16 auth_type;
- unsigned char temp[256];
- struct ether_hdr *eth_hdr;
- struct ieee802_1x_hdr *aa1x_hdr;
- size_t size;
- int ret;
-
- /* min length check */
- if (priv->rx_size <= ETH_HLEN) {
- priv->nstats.rx_errors++;
- return;
- }
-
- auth_type = get_word(priv); /* AuthType */
- get_word(priv); /* Reserve Area */
-
- eth_hdr = (struct ether_hdr *)(priv->rxp);
-
- /* source address check */
- if (ether_addr_equal(&priv->eth_addr[0], eth_hdr->h_source)) {
- netdev_err(priv->net_dev, "invalid : source is own mac address !!\n");
- netdev_err(priv->net_dev, "eth_hdrernet->h_dest=%pM\n", eth_hdr->h_source);
- priv->nstats.rx_errors++;
- return;
- }
-
- /* for WPA */
- if (auth_type != TYPE_DATA && priv->wpa.rsn_enabled) {
- ret = hostif_data_indication_wpa(priv, auth_type);
- if (ret)
- return;
- }
-
- if ((priv->connect_status & FORCE_DISCONNECT) ||
- priv->wpa.mic_failure.failure == 2) {
- return;
- }
-
- /* check 13th byte at rx data */
- switch (*(priv->rxp + 12)) {
- case LLC_SAP_SNAP:
- rx_ind_size = priv->rx_size - 6;
- skb = dev_alloc_skb(rx_ind_size);
- if (!skb) {
- priv->nstats.rx_dropped++;
- return;
- }
- netdev_dbg(priv->net_dev, "SNAP, rx_ind_size = %d\n",
- rx_ind_size);
-
- size = ETH_ALEN * 2;
- skb_put_data(skb, priv->rxp, size);
-
- /* (SNAP+UI..) skip */
-
- size = rx_ind_size - (ETH_ALEN * 2);
- skb_put_data(skb, &eth_hdr->h_proto, size);
-
- aa1x_hdr = (struct ieee802_1x_hdr *)(priv->rxp + ETHER_HDR_SIZE);
- break;
- case LLC_SAP_NETBEUI:
- rx_ind_size = (priv->rx_size + 2);
- skb = dev_alloc_skb(rx_ind_size);
- if (!skb) {
- priv->nstats.rx_dropped++;
- return;
- }
- netdev_dbg(priv->net_dev, "NETBEUI/NetBIOS rx_ind_size=%d\n",
- rx_ind_size);
-
- /* 8802/FDDI MAC copy */
- skb_put_data(skb, priv->rxp, 12);
-
- /* NETBEUI size add */
- temp[0] = (((rx_ind_size - 12) >> 8) & 0xff);
- temp[1] = ((rx_ind_size - 12) & 0xff);
- skb_put_data(skb, temp, 2);
-
- /* copy after Type */
- skb_put_data(skb, priv->rxp + 12, rx_ind_size - 14);
-
- aa1x_hdr = (struct ieee802_1x_hdr *)(priv->rxp + 14);
- break;
- default: /* other rx data */
- netdev_err(priv->net_dev, "invalid data format\n");
- priv->nstats.rx_errors++;
- return;
- }
-
- if (aa1x_hdr->type == IEEE802_1X_TYPE_EAPOL_KEY &&
- priv->wpa.rsn_enabled)
- atomic_set(&priv->psstatus.snooze_guard, 1);
-
- /* rx indication */
- skb->dev = priv->net_dev;
- skb->protocol = eth_type_trans(skb, skb->dev);
- priv->nstats.rx_packets++;
- priv->nstats.rx_bytes += rx_ind_size;
- netif_rx(skb);
-}
-
-static
-void hostif_mib_get_confirm(struct ks_wlan_private *priv)
-{
- struct net_device *dev = priv->net_dev;
- u32 mib_status;
- u32 mib_attribute;
-
- mib_status = get_dword(priv);
- mib_attribute = get_dword(priv);
- get_word(priv); /* mib_val_size */
- get_word(priv); /* mib_val_type */
-
- if (mib_status) {
- netdev_err(priv->net_dev, "attribute=%08X, status=%08X\n",
- mib_attribute, mib_status);
- return;
- }
-
- switch (mib_attribute) {
- case DOT11_MAC_ADDRESS:
- hostif_sme_enqueue(priv, SME_GET_MAC_ADDRESS);
- ether_addr_copy(priv->eth_addr, priv->rxp);
- priv->mac_address_valid = true;
- eth_hw_addr_set(dev, priv->eth_addr);
- netdev_info(dev, "MAC ADDRESS = %pM\n", priv->eth_addr);
- break;
- case DOT11_PRODUCT_VERSION:
- priv->version_size = priv->rx_size;
- memcpy(priv->firmware_version, priv->rxp, priv->rx_size);
- priv->firmware_version[priv->rx_size] = '\0';
- netdev_info(dev, "firmware ver. = %s\n",
- priv->firmware_version);
- hostif_sme_enqueue(priv, SME_GET_PRODUCT_VERSION);
- /* wake_up_interruptible_all(&priv->confirm_wait); */
- complete(&priv->confirm_wait);
- break;
- case LOCAL_GAIN:
- memcpy(&priv->gain, priv->rxp, sizeof(priv->gain));
- netdev_dbg(priv->net_dev, "tx_mode=%d, rx_mode=%d, tx_gain=%d, rx_gain=%d\n",
- priv->gain.tx_mode, priv->gain.rx_mode,
- priv->gain.tx_gain, priv->gain.rx_gain);
- break;
- case LOCAL_EEPROM_SUM:
- memcpy(&priv->eeprom_sum, priv->rxp, sizeof(priv->eeprom_sum));
- if (priv->eeprom_sum.type != 0 &&
- priv->eeprom_sum.type != 1) {
- netdev_err(dev, "LOCAL_EEPROM_SUM error!\n");
- return;
- }
- priv->eeprom_checksum = (priv->eeprom_sum.type == 0) ?
- EEPROM_CHECKSUM_NONE :
- (priv->eeprom_sum.result == 0) ?
- EEPROM_NG : EEPROM_OK;
- break;
- default:
- netdev_err(priv->net_dev, "mib_attribute=%08x\n",
- (unsigned int)mib_attribute);
- break;
- }
-}
-
-static
-void hostif_mib_set_confirm(struct ks_wlan_private *priv)
-{
- u32 mib_status;
- u32 mib_attribute;
-
- mib_status = get_dword(priv);
- mib_attribute = get_dword(priv);
-
- if (mib_status) {
- /* in case of error */
- netdev_err(priv->net_dev, "error :: attribute=%08X, status=%08X\n",
- mib_attribute, mib_status);
- }
-
- switch (mib_attribute) {
- case DOT11_RTS_THRESHOLD:
- hostif_sme_enqueue(priv, SME_RTS_THRESHOLD_CONFIRM);
- break;
- case DOT11_FRAGMENTATION_THRESHOLD:
- hostif_sme_enqueue(priv, SME_FRAGMENTATION_THRESHOLD_CONFIRM);
- break;
- case DOT11_WEP_DEFAULT_KEY_ID:
- if (!priv->wpa.wpa_enabled)
- hostif_sme_enqueue(priv, SME_WEP_INDEX_CONFIRM);
- break;
- case DOT11_WEP_DEFAULT_KEY_VALUE1:
- if (priv->wpa.rsn_enabled)
- hostif_sme_enqueue(priv, SME_SET_PMK_TSC);
- else
- hostif_sme_enqueue(priv, SME_WEP_KEY1_CONFIRM);
- break;
- case DOT11_WEP_DEFAULT_KEY_VALUE2:
- if (priv->wpa.rsn_enabled)
- hostif_sme_enqueue(priv, SME_SET_GMK1_TSC);
- else
- hostif_sme_enqueue(priv, SME_WEP_KEY2_CONFIRM);
- break;
- case DOT11_WEP_DEFAULT_KEY_VALUE3:
- if (priv->wpa.rsn_enabled)
- hostif_sme_enqueue(priv, SME_SET_GMK2_TSC);
- else
- hostif_sme_enqueue(priv, SME_WEP_KEY3_CONFIRM);
- break;
- case DOT11_WEP_DEFAULT_KEY_VALUE4:
- if (!priv->wpa.rsn_enabled)
- hostif_sme_enqueue(priv, SME_WEP_KEY4_CONFIRM);
- break;
- case DOT11_PRIVACY_INVOKED:
- if (!priv->wpa.rsn_enabled)
- hostif_sme_enqueue(priv, SME_WEP_FLAG_CONFIRM);
- break;
- case DOT11_RSN_ENABLED:
- hostif_sme_enqueue(priv, SME_RSN_ENABLED_CONFIRM);
- break;
- case LOCAL_RSN_MODE:
- hostif_sme_enqueue(priv, SME_RSN_MODE_CONFIRM);
- break;
- case LOCAL_MULTICAST_ADDRESS:
- hostif_sme_enqueue(priv, SME_MULTICAST_REQUEST);
- break;
- case LOCAL_MULTICAST_FILTER:
- hostif_sme_enqueue(priv, SME_MULTICAST_CONFIRM);
- break;
- case LOCAL_CURRENTADDRESS:
- priv->mac_address_valid = true;
- break;
- case DOT11_RSN_CONFIG_MULTICAST_CIPHER:
- hostif_sme_enqueue(priv, SME_RSN_MCAST_CONFIRM);
- break;
- case DOT11_RSN_CONFIG_UNICAST_CIPHER:
- hostif_sme_enqueue(priv, SME_RSN_UCAST_CONFIRM);
- break;
- case DOT11_RSN_CONFIG_AUTH_SUITE:
- hostif_sme_enqueue(priv, SME_RSN_AUTH_CONFIRM);
- break;
- case DOT11_GMK1_TSC:
- if (atomic_read(&priv->psstatus.snooze_guard))
- atomic_set(&priv->psstatus.snooze_guard, 0);
- break;
- case DOT11_GMK2_TSC:
- if (atomic_read(&priv->psstatus.snooze_guard))
- atomic_set(&priv->psstatus.snooze_guard, 0);
- break;
- case DOT11_PMK_TSC:
- case LOCAL_PMK:
- case LOCAL_GAIN:
- case LOCAL_WPS_ENABLE:
- case LOCAL_WPS_PROBE_REQ:
- case LOCAL_REGION:
- default:
- break;
- }
-}
-
-static
-void hostif_power_mgmt_confirm(struct ks_wlan_private *priv)
-{
- if (priv->reg.power_mgmt > POWER_MGMT_ACTIVE &&
- priv->reg.operation_mode == MODE_INFRASTRUCTURE) {
- atomic_set(&priv->psstatus.confirm_wait, 0);
- priv->dev_state = DEVICE_STATE_SLEEP;
- ks_wlan_hw_power_save(priv);
- } else {
- priv->dev_state = DEVICE_STATE_READY;
- }
-}
-
-static
-void hostif_sleep_confirm(struct ks_wlan_private *priv)
-{
- atomic_set(&priv->sleepstatus.doze_request, 1);
- queue_delayed_work(priv->wq, &priv->rw_dwork, 1);
-}
-
-static
-void hostif_start_confirm(struct ks_wlan_private *priv)
-{
- union iwreq_data wrqu;
-
- wrqu.data.length = 0;
- wrqu.data.flags = 0;
- wrqu.ap_addr.sa_family = ARPHRD_ETHER;
- if (is_connect_status(priv->connect_status)) {
- eth_zero_addr(wrqu.ap_addr.sa_data);
- wireless_send_event(priv->net_dev, SIOCGIWAP, &wrqu, NULL);
- }
- netdev_dbg(priv->net_dev, " scan_ind_count=%d\n", priv->scan_ind_count);
- hostif_sme_enqueue(priv, SME_START_CONFIRM);
-}
-
-static
-void hostif_connect_indication(struct ks_wlan_private *priv)
-{
- u16 connect_code;
- unsigned int tmp = 0;
- unsigned int old_status = priv->connect_status;
- struct net_device *netdev = priv->net_dev;
- union iwreq_data wrqu0;
-
- connect_code = get_word(priv);
-
- switch (connect_code) {
- case RESULT_CONNECT:
- if (!(priv->connect_status & FORCE_DISCONNECT))
- netif_carrier_on(netdev);
- tmp = FORCE_DISCONNECT & priv->connect_status;
- priv->connect_status = tmp + CONNECT_STATUS;
- break;
- case RESULT_DISCONNECT:
- netif_carrier_off(netdev);
- tmp = FORCE_DISCONNECT & priv->connect_status;
- priv->connect_status = tmp + DISCONNECT_STATUS;
- break;
- default:
- netdev_dbg(priv->net_dev, "unknown connect_code=%d :: scan_ind_count=%d\n",
- connect_code, priv->scan_ind_count);
- netif_carrier_off(netdev);
- tmp = FORCE_DISCONNECT & priv->connect_status;
- priv->connect_status = tmp + DISCONNECT_STATUS;
- break;
- }
-
- get_current_ap(priv, (struct link_ap_info *)priv->rxp);
- if (is_connect_status(priv->connect_status) &&
- is_disconnect_status(old_status)) {
- /* for power save */
- atomic_set(&priv->psstatus.snooze_guard, 0);
- atomic_set(&priv->psstatus.confirm_wait, 0);
- }
- ks_wlan_do_power_save(priv);
-
- wrqu0.data.length = 0;
- wrqu0.data.flags = 0;
- wrqu0.ap_addr.sa_family = ARPHRD_ETHER;
- if (is_disconnect_status(priv->connect_status) &&
- is_connect_status(old_status)) {
- eth_zero_addr(wrqu0.ap_addr.sa_data);
- netdev_dbg(priv->net_dev, "disconnect :: scan_ind_count=%d\n",
- priv->scan_ind_count);
- wireless_send_event(netdev, SIOCGIWAP, &wrqu0, NULL);
- }
- priv->scan_ind_count = 0;
-}
-
-static
-void hostif_scan_indication(struct ks_wlan_private *priv)
-{
- int i;
- struct ap_info *ap_info;
-
- netdev_dbg(priv->net_dev,
- "scan_ind_count = %d\n", priv->scan_ind_count);
- ap_info = (struct ap_info *)(priv->rxp);
-
- if (priv->scan_ind_count) {
- /* bssid check */
- for (i = 0; i < priv->aplist.size; i++) {
- u8 *bssid = priv->aplist.ap[i].bssid;
-
- if (ether_addr_equal(ap_info->bssid, bssid))
- continue;
-
- if (ap_info->frame_type == IEEE80211_STYPE_PROBE_RESP)
- get_ap_information(priv, ap_info,
- &priv->aplist.ap[i]);
- return;
- }
- }
- priv->scan_ind_count++;
- if (priv->scan_ind_count < LOCAL_APLIST_MAX + 1) {
- netdev_dbg(priv->net_dev, " scan_ind_count=%d :: aplist.size=%d\n",
- priv->scan_ind_count, priv->aplist.size);
- get_ap_information(priv, (struct ap_info *)(priv->rxp),
- &priv->aplist.ap[priv->scan_ind_count - 1]);
- priv->aplist.size = priv->scan_ind_count;
- } else {
- netdev_dbg(priv->net_dev, " count over :: scan_ind_count=%d\n",
- priv->scan_ind_count);
- }
-}
-
-static
-void hostif_stop_confirm(struct ks_wlan_private *priv)
-{
- unsigned int tmp = 0;
- unsigned int old_status = priv->connect_status;
- struct net_device *netdev = priv->net_dev;
- union iwreq_data wrqu0;
-
- if (priv->dev_state == DEVICE_STATE_SLEEP)
- priv->dev_state = DEVICE_STATE_READY;
-
- /* disconnect indication */
- if (is_connect_status(priv->connect_status)) {
- netif_carrier_off(netdev);
- tmp = FORCE_DISCONNECT & priv->connect_status;
- priv->connect_status = tmp | DISCONNECT_STATUS;
- netdev_info(netdev, "IWEVENT: disconnect\n");
-
- wrqu0.data.length = 0;
- wrqu0.data.flags = 0;
- wrqu0.ap_addr.sa_family = ARPHRD_ETHER;
- if (is_disconnect_status(priv->connect_status) &&
- is_connect_status(old_status)) {
- eth_zero_addr(wrqu0.ap_addr.sa_data);
- netdev_info(netdev, "IWEVENT: disconnect\n");
- wireless_send_event(netdev, SIOCGIWAP, &wrqu0, NULL);
- }
- priv->scan_ind_count = 0;
- }
-
- hostif_sme_enqueue(priv, SME_STOP_CONFIRM);
-}
-
-static
-void hostif_ps_adhoc_set_confirm(struct ks_wlan_private *priv)
-{
- priv->infra_status = 0; /* infrastructure mode cancel */
- hostif_sme_enqueue(priv, SME_MODE_SET_CONFIRM);
-}
-
-static
-void hostif_infrastructure_set_confirm(struct ks_wlan_private *priv)
-{
- get_word(priv); /* result_code */
- priv->infra_status = 1; /* infrastructure mode set */
- hostif_sme_enqueue(priv, SME_MODE_SET_CONFIRM);
-}
-
-static
-void hostif_adhoc_set_confirm(struct ks_wlan_private *priv)
-{
- priv->infra_status = 1; /* infrastructure mode set */
- hostif_sme_enqueue(priv, SME_MODE_SET_CONFIRM);
-}
-
-static
-void hostif_associate_indication(struct ks_wlan_private *priv)
-{
- struct association_request *assoc_req;
- struct association_response *assoc_resp;
- unsigned char *pb;
- union iwreq_data wrqu;
- char buf[IW_CUSTOM_MAX];
- char *pbuf = &buf[0];
- int i;
-
- static const char associnfo_leader0[] = "ASSOCINFO(ReqIEs=";
- static const char associnfo_leader1[] = " RespIEs=";
-
- assoc_req = (struct association_request *)(priv->rxp);
- assoc_resp = (struct association_response *)(assoc_req + 1);
- pb = (unsigned char *)(assoc_resp + 1);
-
- memset(&wrqu, 0, sizeof(wrqu));
- memcpy(pbuf, associnfo_leader0, sizeof(associnfo_leader0) - 1);
- wrqu.data.length += sizeof(associnfo_leader0) - 1;
- pbuf += sizeof(associnfo_leader0) - 1;
-
- for (i = 0; i < le16_to_cpu(assoc_req->req_ies_size); i++)
- pbuf += sprintf(pbuf, "%02x", *(pb + i));
- wrqu.data.length += (le16_to_cpu(assoc_req->req_ies_size)) * 2;
-
- memcpy(pbuf, associnfo_leader1, sizeof(associnfo_leader1) - 1);
- wrqu.data.length += sizeof(associnfo_leader1) - 1;
- pbuf += sizeof(associnfo_leader1) - 1;
-
- pb += le16_to_cpu(assoc_req->req_ies_size);
- for (i = 0; i < le16_to_cpu(assoc_resp->resp_ies_size); i++)
- pbuf += sprintf(pbuf, "%02x", *(pb + i));
- wrqu.data.length += (le16_to_cpu(assoc_resp->resp_ies_size)) * 2;
-
- pbuf += sprintf(pbuf, ")");
- wrqu.data.length += 1;
-
- wireless_send_event(priv->net_dev, IWEVCUSTOM, &wrqu, buf);
-}
-
-static
-void hostif_bss_scan_confirm(struct ks_wlan_private *priv)
-{
- u32 result_code;
- struct net_device *dev = priv->net_dev;
- union iwreq_data wrqu;
-
- result_code = get_dword(priv);
- netdev_dbg(priv->net_dev, "result=%d :: scan_ind_count=%d\n",
- result_code, priv->scan_ind_count);
-
- priv->sme_i.sme_flag &= ~SME_AP_SCAN;
- hostif_sme_enqueue(priv, SME_BSS_SCAN_CONFIRM);
-
- wrqu.data.length = 0;
- wrqu.data.flags = 0;
- wireless_send_event(dev, SIOCGIWSCAN, &wrqu, NULL);
- priv->scan_ind_count = 0;
-}
-
-static
-void hostif_phy_information_confirm(struct ks_wlan_private *priv)
-{
- struct iw_statistics *wstats = &priv->wstats;
- u8 rssi, signal;
- u8 link_speed;
- u32 transmitted_frame_count, received_fragment_count;
- u32 failed_count, fcs_error_count;
-
- rssi = get_byte(priv);
- signal = get_byte(priv);
- get_byte(priv); /* noise */
- link_speed = get_byte(priv);
- transmitted_frame_count = get_dword(priv);
- received_fragment_count = get_dword(priv);
- failed_count = get_dword(priv);
- fcs_error_count = get_dword(priv);
-
- netdev_dbg(priv->net_dev, "phyinfo confirm rssi=%d signal=%d\n",
- rssi, signal);
- priv->current_rate = (link_speed & RATE_MASK);
- wstats->qual.qual = signal;
- wstats->qual.level = 256 - rssi;
- wstats->qual.noise = 0; /* invalid noise value */
- wstats->qual.updated = IW_QUAL_ALL_UPDATED | IW_QUAL_DBM;
-
- netdev_dbg(priv->net_dev, "\n rssi=%u\n"
- " signal=%u\n"
- " link_speed=%ux500Kbps\n"
- " transmitted_frame_count=%u\n"
- " received_fragment_count=%u\n"
- " failed_count=%u\n"
- " fcs_error_count=%u\n",
- rssi, signal, link_speed, transmitted_frame_count,
- received_fragment_count, failed_count, fcs_error_count);
- /* wake_up_interruptible_all(&priv->confirm_wait); */
- complete(&priv->confirm_wait);
-}
-
-static
-void hostif_mic_failure_confirm(struct ks_wlan_private *priv)
-{
- netdev_dbg(priv->net_dev, "mic_failure=%u\n",
- priv->wpa.mic_failure.failure);
- hostif_sme_enqueue(priv, SME_MIC_FAILURE_CONFIRM);
-}
-
-static
-void hostif_event_check(struct ks_wlan_private *priv)
-{
- u16 event;
-
- event = get_word(priv);
- switch (event) {
- case HIF_DATA_IND:
- hostif_data_indication(priv);
- break;
- case HIF_MIB_GET_CONF:
- hostif_mib_get_confirm(priv);
- break;
- case HIF_MIB_SET_CONF:
- hostif_mib_set_confirm(priv);
- break;
- case HIF_POWER_MGMT_CONF:
- hostif_power_mgmt_confirm(priv);
- break;
- case HIF_SLEEP_CONF:
- hostif_sleep_confirm(priv);
- break;
- case HIF_START_CONF:
- hostif_start_confirm(priv);
- break;
- case HIF_CONNECT_IND:
- hostif_connect_indication(priv);
- break;
- case HIF_STOP_CONF:
- hostif_stop_confirm(priv);
- break;
- case HIF_PS_ADH_SET_CONF:
- hostif_ps_adhoc_set_confirm(priv);
- break;
- case HIF_INFRA_SET_CONF:
- case HIF_INFRA_SET2_CONF:
- hostif_infrastructure_set_confirm(priv);
- break;
- case HIF_ADH_SET_CONF:
- case HIF_ADH_SET2_CONF:
- hostif_adhoc_set_confirm(priv);
- break;
- case HIF_ASSOC_INFO_IND:
- hostif_associate_indication(priv);
- break;
- case HIF_MIC_FAILURE_CONF:
- hostif_mic_failure_confirm(priv);
- break;
- case HIF_SCAN_CONF:
- hostif_bss_scan_confirm(priv);
- break;
- case HIF_PHY_INFO_CONF:
- case HIF_PHY_INFO_IND:
- hostif_phy_information_confirm(priv);
- break;
- case HIF_SCAN_IND:
- hostif_scan_indication(priv);
- break;
- case HIF_AP_SET_CONF:
- default:
- netdev_err(priv->net_dev, "undefined event[%04X]\n", event);
- /* wake_up_all(&priv->confirm_wait); */
- complete(&priv->confirm_wait);
- break;
- }
-
- /* add event to hostt buffer */
- priv->hostt.buff[priv->hostt.qtail] = event;
- priv->hostt.qtail = (priv->hostt.qtail + 1) % SME_EVENT_BUFF_SIZE;
-}
-
-/* allocate size bytes, set header size and event */
-static void *hostif_generic_request(size_t size, int event)
-{
- struct hostif_hdr *p;
-
- p = kzalloc(hif_align_size(size), GFP_ATOMIC);
- if (!p)
- return NULL;
-
- p->size = cpu_to_le16(size - sizeof(p->size));
- p->event = cpu_to_le16(event);
-
- return p;
-}
-
-int hostif_data_request(struct ks_wlan_private *priv, struct sk_buff *skb)
-{
- unsigned int skb_len = 0;
- unsigned char *buffer = NULL;
- unsigned int length = 0;
- struct hostif_data_request *pp;
- unsigned char *p;
- unsigned short eth_proto;
- struct ether_hdr *eth_hdr;
- unsigned short keyinfo = 0;
- struct ieee802_1x_hdr *aa1x_hdr;
- struct wpa_eapol_key *eap_key;
- struct ethhdr *eth;
- size_t size;
- int ret;
-
- skb_len = skb->len;
- if (skb_len > ETH_FRAME_LEN) {
- netdev_err(priv->net_dev, "bad length skb_len=%d\n", skb_len);
- ret = -EOVERFLOW;
- goto err_kfree_skb;
- }
-
- if (is_disconnect_status(priv->connect_status) ||
- (priv->connect_status & FORCE_DISCONNECT) ||
- priv->wpa.mic_failure.stop) {
- if (netif_queue_stopped(priv->net_dev))
- netif_wake_queue(priv->net_dev);
-
- dev_kfree_skb(skb);
-
- return 0;
- }
-
- /* power save wakeup */
- if (atomic_read(&priv->psstatus.status) == PS_SNOOZE) {
- if (!netif_queue_stopped(priv->net_dev))
- netif_stop_queue(priv->net_dev);
- }
-
- size = sizeof(*pp) + 6 + skb_len + 8;
- pp = kmalloc(hif_align_size(size), GFP_ATOMIC);
- if (!pp) {
- ret = -ENOMEM;
- goto err_kfree_skb;
- }
-
- p = (unsigned char *)pp->data;
-
- buffer = skb->data;
- length = skb->len;
-
- /* skb check */
- eth = (struct ethhdr *)skb->data;
- if (!ether_addr_equal(&priv->eth_addr[0], eth->h_source)) {
- netdev_err(priv->net_dev,
- "Invalid mac address: ethernet->h_source=%pM\n",
- eth->h_source);
- ret = -ENXIO;
- goto err_kfree;
- }
-
- /* dest and src MAC address copy */
- size = ETH_ALEN * 2;
- memcpy(p, buffer, size);
- p += size;
- buffer += size;
- length -= size;
-
- /* EtherType/Length check */
- if (*(buffer + 1) + (*buffer << 8) > 1500) {
- /* ProtocolEAP = *(buffer+1) + (*buffer << 8); */
- /* SAP/CTL/OUI(6 byte) add */
- *p++ = 0xAA; /* DSAP */
- *p++ = 0xAA; /* SSAP */
- *p++ = 0x03; /* CTL */
- *p++ = 0x00; /* OUI ("000000") */
- *p++ = 0x00; /* OUI ("000000") */
- *p++ = 0x00; /* OUI ("000000") */
- skb_len += 6;
- } else {
- /* Length(2 byte) delete */
- buffer += 2;
- length -= 2;
- skb_len -= 2;
- }
-
- /* pp->data copy */
- memcpy(p, buffer, length);
-
- p += length;
-
- /* for WPA */
- eth_hdr = (struct ether_hdr *)&pp->data[0];
- eth_proto = ntohs(eth_hdr->h_proto);
-
- /* for MIC FAILURE REPORT check */
- if (eth_proto == ETH_P_PAE &&
- priv->wpa.mic_failure.failure > 0) {
- aa1x_hdr = (struct ieee802_1x_hdr *)(eth_hdr + 1);
- if (aa1x_hdr->type == IEEE802_1X_TYPE_EAPOL_KEY) {
- eap_key = (struct wpa_eapol_key *)(aa1x_hdr + 1);
- keyinfo = ntohs(eap_key->key_info);
- }
- }
-
- if (priv->wpa.rsn_enabled && priv->wpa.key[0].key_len) {
- /* no encryption */
- if (eth_proto == ETH_P_PAE &&
- priv->wpa.key[1].key_len == 0 &&
- priv->wpa.key[2].key_len == 0 &&
- priv->wpa.key[3].key_len == 0) {
- pp->auth_type = cpu_to_le16(TYPE_AUTH);
- } else {
- if (priv->wpa.pairwise_suite == IW_AUTH_CIPHER_TKIP) {
- u8 mic[MICHAEL_MIC_LEN];
-
- ret = michael_mic(priv->wpa.key[0].tx_mic_key,
- &pp->data[0], skb_len,
- 0, mic);
- if (ret < 0)
- goto err_kfree;
-
- memcpy(p, mic, sizeof(mic));
- length += sizeof(mic);
- skb_len += sizeof(mic);
- p += sizeof(mic);
- pp->auth_type =
- cpu_to_le16(TYPE_DATA);
- } else if (priv->wpa.pairwise_suite ==
- IW_AUTH_CIPHER_CCMP) {
- pp->auth_type =
- cpu_to_le16(TYPE_DATA);
- }
- }
- } else {
- if (eth_proto == ETH_P_PAE)
- pp->auth_type = cpu_to_le16(TYPE_AUTH);
- else
- pp->auth_type = cpu_to_le16(TYPE_DATA);
- }
-
- /* header value set */
- pp->header.size =
- cpu_to_le16((sizeof(*pp) - sizeof(pp->header.size) + skb_len));
- pp->header.event = cpu_to_le16(HIF_DATA_REQ);
-
- /* tx request */
- ret = ks_wlan_hw_tx(priv, pp, hif_align_size(sizeof(*pp) + skb_len),
- send_packet_complete, skb);
-
- /* MIC FAILURE REPORT check */
- if (eth_proto == ETH_P_PAE &&
- priv->wpa.mic_failure.failure > 0) {
- if (keyinfo & WPA_KEY_INFO_ERROR &&
- keyinfo & WPA_KEY_INFO_REQUEST) {
- netdev_err(priv->net_dev,
- "MIC ERROR Report SET : %04X\n", keyinfo);
- hostif_sme_enqueue(priv, SME_MIC_FAILURE_REQUEST);
- }
- if (priv->wpa.mic_failure.failure == 2)
- priv->wpa.mic_failure.stop = 1;
- }
-
- return ret;
-
-err_kfree:
- kfree(pp);
-err_kfree_skb:
- dev_kfree_skb(skb);
-
- return ret;
-}
-
-static inline void ps_confirm_wait_inc(struct ks_wlan_private *priv)
-{
- if (atomic_read(&priv->psstatus.status) > PS_ACTIVE_SET)
- atomic_inc(&priv->psstatus.confirm_wait);
-}
-
-static inline void send_request_to_device(struct ks_wlan_private *priv,
- void *data, size_t size)
-{
- ps_confirm_wait_inc(priv);
- ks_wlan_hw_tx(priv, data, size, NULL, NULL);
-}
-
-static void hostif_mib_get_request(struct ks_wlan_private *priv,
- u32 mib_attribute)
-{
- struct hostif_mib_get_request *pp;
-
- pp = hostif_generic_request(sizeof(*pp), HIF_MIB_GET_REQ);
- if (!pp)
- return;
-
- pp->mib_attribute = cpu_to_le32(mib_attribute);
-
- send_request_to_device(priv, pp, hif_align_size(sizeof(*pp)));
-}
-
-static void hostif_mib_set_request(struct ks_wlan_private *priv,
- enum mib_attribute attr,
- enum mib_data_type type,
- void *data, size_t size)
-{
- struct hostif_mib_set_request_t *pp;
-
- if (priv->dev_state < DEVICE_STATE_BOOT)
- return;
-
- pp = hostif_generic_request(sizeof(*pp), HIF_MIB_SET_REQ);
- if (!pp)
- return;
-
- pp->mib_attribute = cpu_to_le32(attr);
- pp->mib_value.size = cpu_to_le16(size);
- pp->mib_value.type = cpu_to_le16(type);
- memcpy(&pp->mib_value.body, data, size);
-
- send_request_to_device(priv, pp, hif_align_size(sizeof(*pp) + size));
-}
-
-static inline void hostif_mib_set_request_int(struct ks_wlan_private *priv,
- enum mib_attribute attr, int val)
-{
- __le32 v = cpu_to_le32(val);
- size_t size = sizeof(v);
-
- hostif_mib_set_request(priv, attr, MIB_VALUE_TYPE_INT, &v, size);
-}
-
-static inline void hostif_mib_set_request_bool(struct ks_wlan_private *priv,
- enum mib_attribute attr,
- bool val)
-{
- __le32 v = cpu_to_le32(val);
- size_t size = sizeof(v);
-
- hostif_mib_set_request(priv, attr, MIB_VALUE_TYPE_BOOL, &v, size);
-}
-
-static inline void hostif_mib_set_request_ostring(struct ks_wlan_private *priv,
- enum mib_attribute attr,
- void *data, size_t size)
-{
- hostif_mib_set_request(priv, attr, MIB_VALUE_TYPE_OSTRING, data, size);
-}
-
-static
-void hostif_start_request(struct ks_wlan_private *priv, unsigned char mode)
-{
- struct hostif_start_request *pp;
-
- pp = hostif_generic_request(sizeof(*pp), HIF_START_REQ);
- if (!pp)
- return;
-
- pp->mode = cpu_to_le16(mode);
-
- send_request_to_device(priv, pp, hif_align_size(sizeof(*pp)));
-
- priv->aplist.size = 0;
- priv->scan_ind_count = 0;
-}
-
-static __le16 ks_wlan_cap(struct ks_wlan_private *priv)
-{
- u16 capability = 0x0000;
-
- if (priv->reg.preamble == SHORT_PREAMBLE)
- capability |= WLAN_CAPABILITY_SHORT_PREAMBLE;
-
- capability &= ~(WLAN_CAPABILITY_PBCC); /* pbcc not support */
-
- if (priv->reg.phy_type != D_11B_ONLY_MODE) {
- capability |= WLAN_CAPABILITY_SHORT_SLOT_TIME;
- capability &= ~(WLAN_CAPABILITY_DSSS_OFDM);
- }
-
- return cpu_to_le16(capability);
-}
-
-static void init_request(struct ks_wlan_private *priv,
- struct hostif_request *req)
-{
- req->phy_type = cpu_to_le16(priv->reg.phy_type);
- req->cts_mode = cpu_to_le16(priv->reg.cts_mode);
- req->scan_type = cpu_to_le16(priv->reg.scan_type);
- req->rate_set.size = priv->reg.rate_set.size;
- req->capability = ks_wlan_cap(priv);
- memcpy(&req->rate_set.body[0], &priv->reg.rate_set.body[0],
- priv->reg.rate_set.size);
-}
-
-static
-void hostif_ps_adhoc_set_request(struct ks_wlan_private *priv)
-{
- struct hostif_ps_adhoc_set_request *pp;
-
- pp = hostif_generic_request(sizeof(*pp), HIF_PS_ADH_SET_REQ);
- if (!pp)
- return;
-
- init_request(priv, &pp->request);
- pp->channel = cpu_to_le16(priv->reg.channel);
-
- send_request_to_device(priv, pp, hif_align_size(sizeof(*pp)));
-}
-
-static
-void hostif_infrastructure_set_request(struct ks_wlan_private *priv, int event)
-{
- struct hostif_infrastructure_set_request *pp;
-
- pp = hostif_generic_request(sizeof(*pp), event);
- if (!pp)
- return;
-
- init_request(priv, &pp->request);
- pp->ssid.size = priv->reg.ssid.size;
- memcpy(&pp->ssid.body[0], &priv->reg.ssid.body[0], priv->reg.ssid.size);
- pp->beacon_lost_count =
- cpu_to_le16(priv->reg.beacon_lost_count);
- pp->auth_type = cpu_to_le16(priv->reg.authenticate_type);
-
- pp->channel_list.body[0] = 1;
- pp->channel_list.body[1] = 8;
- pp->channel_list.body[2] = 2;
- pp->channel_list.body[3] = 9;
- pp->channel_list.body[4] = 3;
- pp->channel_list.body[5] = 10;
- pp->channel_list.body[6] = 4;
- pp->channel_list.body[7] = 11;
- pp->channel_list.body[8] = 5;
- pp->channel_list.body[9] = 12;
- pp->channel_list.body[10] = 6;
- pp->channel_list.body[11] = 13;
- pp->channel_list.body[12] = 7;
- if (priv->reg.phy_type == D_11G_ONLY_MODE) {
- pp->channel_list.size = 13;
- } else {
- pp->channel_list.body[13] = 14;
- pp->channel_list.size = 14;
- }
-
- send_request_to_device(priv, pp, hif_align_size(sizeof(*pp)));
-}
-
-static
-void hostif_adhoc_set_request(struct ks_wlan_private *priv)
-{
- struct hostif_adhoc_set_request *pp;
-
- pp = hostif_generic_request(sizeof(*pp), HIF_ADH_SET_REQ);
- if (!pp)
- return;
-
- init_request(priv, &pp->request);
- pp->channel = cpu_to_le16(priv->reg.channel);
- pp->ssid.size = priv->reg.ssid.size;
- memcpy(&pp->ssid.body[0], &priv->reg.ssid.body[0], priv->reg.ssid.size);
-
- send_request_to_device(priv, pp, hif_align_size(sizeof(*pp)));
-}
-
-static
-void hostif_adhoc_set2_request(struct ks_wlan_private *priv)
-{
- struct hostif_adhoc_set2_request *pp;
-
- pp = hostif_generic_request(sizeof(*pp), HIF_ADH_SET_REQ);
- if (!pp)
- return;
-
- init_request(priv, &pp->request);
- pp->ssid.size = priv->reg.ssid.size;
- memcpy(&pp->ssid.body[0], &priv->reg.ssid.body[0], priv->reg.ssid.size);
-
- pp->channel_list.body[0] = priv->reg.channel;
- pp->channel_list.size = 1;
- memcpy(pp->bssid, priv->reg.bssid, ETH_ALEN);
-
- send_request_to_device(priv, pp, hif_align_size(sizeof(*pp)));
-}
-
-static
-void hostif_stop_request(struct ks_wlan_private *priv)
-{
- struct hostif_stop_request *pp;
-
- pp = hostif_generic_request(sizeof(*pp), HIF_STOP_REQ);
- if (!pp)
- return;
-
- send_request_to_device(priv, pp, hif_align_size(sizeof(*pp)));
-}
-
-static
-void hostif_phy_information_request(struct ks_wlan_private *priv)
-{
- struct hostif_phy_information_request *pp;
-
- pp = hostif_generic_request(sizeof(*pp), HIF_PHY_INFO_REQ);
- if (!pp)
- return;
-
- if (priv->reg.phy_info_timer) {
- pp->type = cpu_to_le16(TIME_TYPE);
- pp->time = cpu_to_le16(priv->reg.phy_info_timer);
- } else {
- pp->type = cpu_to_le16(NORMAL_TYPE);
- pp->time = cpu_to_le16(0);
- }
-
- send_request_to_device(priv, pp, hif_align_size(sizeof(*pp)));
-}
-
-static
-void hostif_power_mgmt_request(struct ks_wlan_private *priv,
- u32 mode, u32 wake_up, u32 receive_dtims)
-{
- struct hostif_power_mgmt_request *pp;
-
- pp = hostif_generic_request(sizeof(*pp), HIF_POWER_MGMT_REQ);
- if (!pp)
- return;
-
- pp->mode = cpu_to_le32(mode);
- pp->wake_up = cpu_to_le32(wake_up);
- pp->receive_dtims = cpu_to_le32(receive_dtims);
-
- send_request_to_device(priv, pp, hif_align_size(sizeof(*pp)));
-}
-
-static
-void hostif_sleep_request(struct ks_wlan_private *priv,
- enum sleep_mode_type mode)
-{
- struct hostif_sleep_request *pp;
-
- if (mode == SLP_SLEEP) {
- pp = hostif_generic_request(sizeof(*pp), HIF_SLEEP_REQ);
- if (!pp)
- return;
-
- send_request_to_device(priv, pp, hif_align_size(sizeof(*pp)));
- } else if (mode == SLP_ACTIVE) {
- atomic_set(&priv->sleepstatus.wakeup_request, 1);
- queue_delayed_work(priv->wq, &priv->rw_dwork, 1);
- } else {
- netdev_err(priv->net_dev, "invalid mode %ld\n", (long)mode);
- return;
- }
-}
-
-static
-void hostif_bss_scan_request(struct ks_wlan_private *priv,
- unsigned long scan_type, u8 *scan_ssid,
- u8 scan_ssid_len)
-{
- struct hostif_bss_scan_request *pp;
-
- pp = hostif_generic_request(sizeof(*pp), HIF_SCAN_REQ);
- if (!pp)
- return;
-
- pp->scan_type = scan_type;
-
- pp->ch_time_min = cpu_to_le32(110); /* default value */
- pp->ch_time_max = cpu_to_le32(130); /* default value */
- pp->channel_list.body[0] = 1;
- pp->channel_list.body[1] = 8;
- pp->channel_list.body[2] = 2;
- pp->channel_list.body[3] = 9;
- pp->channel_list.body[4] = 3;
- pp->channel_list.body[5] = 10;
- pp->channel_list.body[6] = 4;
- pp->channel_list.body[7] = 11;
- pp->channel_list.body[8] = 5;
- pp->channel_list.body[9] = 12;
- pp->channel_list.body[10] = 6;
- pp->channel_list.body[11] = 13;
- pp->channel_list.body[12] = 7;
- if (priv->reg.phy_type == D_11G_ONLY_MODE) {
- pp->channel_list.size = 13;
- } else {
- pp->channel_list.body[13] = 14;
- pp->channel_list.size = 14;
- }
- pp->ssid.size = 0;
-
- /* specified SSID SCAN */
- if (scan_ssid_len > 0 && scan_ssid_len <= 32) {
- pp->ssid.size = scan_ssid_len;
- memcpy(&pp->ssid.body[0], scan_ssid, scan_ssid_len);
- }
-
- send_request_to_device(priv, pp, hif_align_size(sizeof(*pp)));
-
- priv->aplist.size = 0;
- priv->scan_ind_count = 0;
-}
-
-static
-void hostif_mic_failure_request(struct ks_wlan_private *priv,
- u16 failure_count, u16 timer)
-{
- struct hostif_mic_failure_request *pp;
-
- pp = hostif_generic_request(sizeof(*pp), HIF_MIC_FAILURE_REQ);
- if (!pp)
- return;
-
- pp->failure_count = cpu_to_le16(failure_count);
- pp->timer = cpu_to_le16(timer);
-
- send_request_to_device(priv, pp, hif_align_size(sizeof(*pp)));
-}
-
-/* Device I/O Receive indicate */
-static void devio_rec_ind(struct ks_wlan_private *priv, unsigned char *p,
- unsigned int size)
-{
- if (!priv->is_device_open)
- return;
-
- spin_lock(&priv->dev_read_lock);
- priv->dev_data[atomic_read(&priv->rec_count)] = p;
- priv->dev_size[atomic_read(&priv->rec_count)] = size;
-
- if (atomic_read(&priv->event_count) != DEVICE_STOCK_COUNT) {
- /* rx event count inc */
- atomic_inc(&priv->event_count);
- }
- atomic_inc(&priv->rec_count);
- if (atomic_read(&priv->rec_count) == DEVICE_STOCK_COUNT)
- atomic_set(&priv->rec_count, 0);
-
- wake_up_interruptible_all(&priv->devread_wait);
-
- spin_unlock(&priv->dev_read_lock);
-}
-
-void hostif_receive(struct ks_wlan_private *priv, unsigned char *p,
- unsigned int size)
-{
- devio_rec_ind(priv, p, size);
-
- priv->rxp = p;
- priv->rx_size = size;
-
- if (get_word(priv) == priv->rx_size)
- hostif_event_check(priv);
-}
-
-static void hostif_sme_set_wep(struct ks_wlan_private *priv, int type)
-{
- switch (type) {
- case SME_WEP_INDEX_REQUEST:
- hostif_mib_set_request_int(priv, DOT11_WEP_DEFAULT_KEY_ID,
- priv->reg.wep_index);
- break;
- case SME_WEP_KEY1_REQUEST:
- if (priv->wpa.wpa_enabled)
- return;
- hostif_mib_set_request_ostring(priv,
- DOT11_WEP_DEFAULT_KEY_VALUE1,
- &priv->reg.wep_key[0].val[0],
- priv->reg.wep_key[0].size);
- break;
- case SME_WEP_KEY2_REQUEST:
- if (priv->wpa.wpa_enabled)
- return;
- hostif_mib_set_request_ostring(priv,
- DOT11_WEP_DEFAULT_KEY_VALUE2,
- &priv->reg.wep_key[1].val[0],
- priv->reg.wep_key[1].size);
- break;
- case SME_WEP_KEY3_REQUEST:
- if (priv->wpa.wpa_enabled)
- return;
- hostif_mib_set_request_ostring(priv,
- DOT11_WEP_DEFAULT_KEY_VALUE3,
- &priv->reg.wep_key[2].val[0],
- priv->reg.wep_key[2].size);
- break;
- case SME_WEP_KEY4_REQUEST:
- if (priv->wpa.wpa_enabled)
- return;
- hostif_mib_set_request_ostring(priv,
- DOT11_WEP_DEFAULT_KEY_VALUE4,
- &priv->reg.wep_key[3].val[0],
- priv->reg.wep_key[3].size);
- break;
- case SME_WEP_FLAG_REQUEST:
- hostif_mib_set_request_bool(priv, DOT11_PRIVACY_INVOKED,
- priv->reg.privacy_invoked);
- break;
- }
-}
-
-struct wpa_suite {
- __le16 size;
- unsigned char suite[4][CIPHER_ID_LEN];
-} __packed;
-
-struct rsn_mode {
- __le32 rsn_mode;
- __le16 rsn_capability;
-} __packed;
-
-static void hostif_sme_set_rsn(struct ks_wlan_private *priv, int type)
-{
- struct wpa_suite wpa_suite;
- struct rsn_mode rsn_mode;
- size_t size;
- u32 mode;
- const u8 *buf = NULL;
-
- memset(&wpa_suite, 0, sizeof(wpa_suite));
-
- switch (type) {
- case SME_RSN_UCAST_REQUEST:
- wpa_suite.size = cpu_to_le16(1);
- switch (priv->wpa.pairwise_suite) {
- case IW_AUTH_CIPHER_NONE:
- buf = (priv->wpa.version == IW_AUTH_WPA_VERSION_WPA2) ?
- CIPHER_ID_WPA2_NONE : CIPHER_ID_WPA_NONE;
- break;
- case IW_AUTH_CIPHER_WEP40:
- buf = (priv->wpa.version == IW_AUTH_WPA_VERSION_WPA2) ?
- CIPHER_ID_WPA2_WEP40 : CIPHER_ID_WPA_WEP40;
- break;
- case IW_AUTH_CIPHER_TKIP:
- buf = (priv->wpa.version == IW_AUTH_WPA_VERSION_WPA2) ?
- CIPHER_ID_WPA2_TKIP : CIPHER_ID_WPA_TKIP;
- break;
- case IW_AUTH_CIPHER_CCMP:
- buf = (priv->wpa.version == IW_AUTH_WPA_VERSION_WPA2) ?
- CIPHER_ID_WPA2_CCMP : CIPHER_ID_WPA_CCMP;
- break;
- case IW_AUTH_CIPHER_WEP104:
- buf = (priv->wpa.version == IW_AUTH_WPA_VERSION_WPA2) ?
- CIPHER_ID_WPA2_WEP104 : CIPHER_ID_WPA_WEP104;
- break;
- }
-
- if (buf)
- memcpy(&wpa_suite.suite[0][0], buf, CIPHER_ID_LEN);
- size = sizeof(wpa_suite.size) +
- (CIPHER_ID_LEN * le16_to_cpu(wpa_suite.size));
- hostif_mib_set_request_ostring(priv,
- DOT11_RSN_CONFIG_UNICAST_CIPHER,
- &wpa_suite, size);
- break;
- case SME_RSN_MCAST_REQUEST:
- switch (priv->wpa.group_suite) {
- case IW_AUTH_CIPHER_NONE:
- buf = (priv->wpa.version == IW_AUTH_WPA_VERSION_WPA2) ?
- CIPHER_ID_WPA2_NONE : CIPHER_ID_WPA_NONE;
- break;
- case IW_AUTH_CIPHER_WEP40:
- buf = (priv->wpa.version == IW_AUTH_WPA_VERSION_WPA2) ?
- CIPHER_ID_WPA2_WEP40 : CIPHER_ID_WPA_WEP40;
- break;
- case IW_AUTH_CIPHER_TKIP:
- buf = (priv->wpa.version == IW_AUTH_WPA_VERSION_WPA2) ?
- CIPHER_ID_WPA2_TKIP : CIPHER_ID_WPA_TKIP;
- break;
- case IW_AUTH_CIPHER_CCMP:
- buf = (priv->wpa.version == IW_AUTH_WPA_VERSION_WPA2) ?
- CIPHER_ID_WPA2_CCMP : CIPHER_ID_WPA_CCMP;
- break;
- case IW_AUTH_CIPHER_WEP104:
- buf = (priv->wpa.version == IW_AUTH_WPA_VERSION_WPA2) ?
- CIPHER_ID_WPA2_WEP104 : CIPHER_ID_WPA_WEP104;
- break;
- }
- if (buf)
- memcpy(&wpa_suite.suite[0][0], buf, CIPHER_ID_LEN);
- hostif_mib_set_request_ostring(priv,
- DOT11_RSN_CONFIG_MULTICAST_CIPHER,
- &wpa_suite.suite[0][0],
- CIPHER_ID_LEN);
- break;
- case SME_RSN_AUTH_REQUEST:
- wpa_suite.size = cpu_to_le16(1);
- switch (priv->wpa.key_mgmt_suite) {
- case IW_AUTH_KEY_MGMT_802_1X:
- buf = (priv->wpa.version == IW_AUTH_WPA_VERSION_WPA2) ?
- KEY_MGMT_ID_WPA2_1X : KEY_MGMT_ID_WPA_1X;
- break;
- case IW_AUTH_KEY_MGMT_PSK:
- buf = (priv->wpa.version == IW_AUTH_WPA_VERSION_WPA2) ?
- KEY_MGMT_ID_WPA2_PSK : KEY_MGMT_ID_WPA_PSK;
- break;
- case 0:
- buf = (priv->wpa.version == IW_AUTH_WPA_VERSION_WPA2) ?
- KEY_MGMT_ID_WPA2_NONE : KEY_MGMT_ID_WPA_NONE;
- break;
- case 4:
- buf = (priv->wpa.version == IW_AUTH_WPA_VERSION_WPA2) ?
- KEY_MGMT_ID_WPA2_WPANONE :
- KEY_MGMT_ID_WPA_WPANONE;
- break;
- }
-
- if (buf)
- memcpy(&wpa_suite.suite[0][0], buf, KEY_MGMT_ID_LEN);
- size = sizeof(wpa_suite.size) +
- (KEY_MGMT_ID_LEN * le16_to_cpu(wpa_suite.size));
- hostif_mib_set_request_ostring(priv,
- DOT11_RSN_CONFIG_AUTH_SUITE,
- &wpa_suite, size);
- break;
- case SME_RSN_ENABLED_REQUEST:
- hostif_mib_set_request_bool(priv, DOT11_RSN_ENABLED,
- priv->wpa.rsn_enabled);
- break;
- case SME_RSN_MODE_REQUEST:
- mode = (priv->wpa.version == IW_AUTH_WPA_VERSION_WPA2) ?
- RSN_MODE_WPA2 :
- (priv->wpa.version == IW_AUTH_WPA_VERSION_WPA) ?
- RSN_MODE_WPA : RSN_MODE_NONE;
- rsn_mode.rsn_mode = cpu_to_le32(mode);
- rsn_mode.rsn_capability = cpu_to_le16(0);
- hostif_mib_set_request_ostring(priv, LOCAL_RSN_MODE,
- &rsn_mode, sizeof(rsn_mode));
- break;
- }
-}
-
-static
-void hostif_sme_mode_setup(struct ks_wlan_private *priv)
-{
- unsigned char rate_size;
- unsigned char rate_octet[RATE_SET_MAX_SIZE];
- int i = 0;
-
- /* rate setting if rate segging is auto for changing phy_type (#94) */
- if (priv->reg.tx_rate == TX_RATE_FULL_AUTO) {
- if (priv->reg.phy_type == D_11B_ONLY_MODE) {
- priv->reg.rate_set.body[3] = TX_RATE_11M;
- priv->reg.rate_set.body[2] = TX_RATE_5M;
- priv->reg.rate_set.body[1] = TX_RATE_2M | BASIC_RATE;
- priv->reg.rate_set.body[0] = TX_RATE_1M | BASIC_RATE;
- priv->reg.rate_set.size = 4;
- } else { /* D_11G_ONLY_MODE or D_11BG_COMPATIBLE_MODE */
- priv->reg.rate_set.body[11] = TX_RATE_54M;
- priv->reg.rate_set.body[10] = TX_RATE_48M;
- priv->reg.rate_set.body[9] = TX_RATE_36M;
- priv->reg.rate_set.body[8] = TX_RATE_18M;
- priv->reg.rate_set.body[7] = TX_RATE_9M;
- priv->reg.rate_set.body[6] = TX_RATE_24M | BASIC_RATE;
- priv->reg.rate_set.body[5] = TX_RATE_12M | BASIC_RATE;
- priv->reg.rate_set.body[4] = TX_RATE_6M | BASIC_RATE;
- priv->reg.rate_set.body[3] = TX_RATE_11M | BASIC_RATE;
- priv->reg.rate_set.body[2] = TX_RATE_5M | BASIC_RATE;
- priv->reg.rate_set.body[1] = TX_RATE_2M | BASIC_RATE;
- priv->reg.rate_set.body[0] = TX_RATE_1M | BASIC_RATE;
- priv->reg.rate_set.size = 12;
- }
- }
-
- /* rate mask by phy setting */
- if (priv->reg.phy_type == D_11B_ONLY_MODE) {
- for (i = 0; i < priv->reg.rate_set.size; i++) {
- if (!is_11b_rate(priv->reg.rate_set.body[i]))
- break;
-
- if ((priv->reg.rate_set.body[i] & RATE_MASK) >= TX_RATE_5M) {
- rate_octet[i] = priv->reg.rate_set.body[i] &
- RATE_MASK;
- } else {
- rate_octet[i] = priv->reg.rate_set.body[i];
- }
- }
-
- } else { /* D_11G_ONLY_MODE or D_11BG_COMPATIBLE_MODE */
- for (i = 0; i < priv->reg.rate_set.size; i++) {
- if (!is_11bg_rate(priv->reg.rate_set.body[i]))
- break;
-
- if (is_ofdm_ext_rate(priv->reg.rate_set.body[i])) {
- rate_octet[i] = priv->reg.rate_set.body[i] &
- RATE_MASK;
- } else {
- rate_octet[i] = priv->reg.rate_set.body[i];
- }
- }
- }
- rate_size = i;
- if (rate_size == 0) {
- if (priv->reg.phy_type == D_11G_ONLY_MODE)
- rate_octet[0] = TX_RATE_6M | BASIC_RATE;
- else
- rate_octet[0] = TX_RATE_2M | BASIC_RATE;
- rate_size = 1;
- }
-
- /* rate set update */
- priv->reg.rate_set.size = rate_size;
- memcpy(&priv->reg.rate_set.body[0], &rate_octet[0], rate_size);
-
- switch (priv->reg.operation_mode) {
- case MODE_PSEUDO_ADHOC:
- hostif_ps_adhoc_set_request(priv);
- break;
- case MODE_INFRASTRUCTURE:
- if (!is_valid_ether_addr((u8 *)priv->reg.bssid)) {
- hostif_infrastructure_set_request(priv,
- HIF_INFRA_SET_REQ);
- } else {
- hostif_infrastructure_set_request(priv,
- HIF_INFRA_SET2_REQ);
- netdev_dbg(priv->net_dev,
- "Infra bssid = %pM\n", priv->reg.bssid);
- }
- break;
- case MODE_ADHOC:
- if (!is_valid_ether_addr((u8 *)priv->reg.bssid)) {
- hostif_adhoc_set_request(priv);
- } else {
- hostif_adhoc_set2_request(priv);
- netdev_dbg(priv->net_dev,
- "Adhoc bssid = %pM\n", priv->reg.bssid);
- }
- break;
- default:
- break;
- }
-}
-
-static
-void hostif_sme_multicast_set(struct ks_wlan_private *priv)
-{
- struct net_device *dev = priv->net_dev;
- int mc_count;
- struct netdev_hw_addr *ha;
- char set_address[NIC_MAX_MCAST_LIST * ETH_ALEN];
- int i = 0;
-
- spin_lock(&priv->multicast_spin);
-
- memset(set_address, 0, NIC_MAX_MCAST_LIST * ETH_ALEN);
-
- if (dev->flags & IFF_PROMISC) {
- hostif_mib_set_request_int(priv, LOCAL_MULTICAST_FILTER,
- MCAST_FILTER_PROMISC);
- goto spin_unlock;
- }
-
- if ((netdev_mc_count(dev) > NIC_MAX_MCAST_LIST) ||
- (dev->flags & IFF_ALLMULTI)) {
- hostif_mib_set_request_int(priv, LOCAL_MULTICAST_FILTER,
- MCAST_FILTER_MCASTALL);
- goto spin_unlock;
- }
-
- if (priv->sme_i.sme_flag & SME_MULTICAST) {
- mc_count = netdev_mc_count(dev);
- netdev_for_each_mc_addr(ha, dev) {
- ether_addr_copy(&set_address[i * ETH_ALEN], ha->addr);
- i++;
- }
- priv->sme_i.sme_flag &= ~SME_MULTICAST;
- hostif_mib_set_request_ostring(priv, LOCAL_MULTICAST_ADDRESS,
- &set_address[0],
- ETH_ALEN * mc_count);
- } else {
- priv->sme_i.sme_flag |= SME_MULTICAST;
- hostif_mib_set_request_int(priv, LOCAL_MULTICAST_FILTER,
- MCAST_FILTER_MCAST);
- }
-
-spin_unlock:
- spin_unlock(&priv->multicast_spin);
-}
-
-static void hostif_sme_power_mgmt_set(struct ks_wlan_private *priv)
-{
- u32 mode, wake_up, receive_dtims;
-
- if (priv->reg.power_mgmt != POWER_MGMT_SAVE1 &&
- priv->reg.power_mgmt != POWER_MGMT_SAVE2) {
- mode = POWER_ACTIVE;
- wake_up = 0;
- receive_dtims = 0;
- } else {
- mode = (priv->reg.operation_mode == MODE_INFRASTRUCTURE) ?
- POWER_SAVE : POWER_ACTIVE;
- wake_up = 0;
- receive_dtims = (priv->reg.operation_mode == MODE_INFRASTRUCTURE &&
- priv->reg.power_mgmt == POWER_MGMT_SAVE2);
- }
-
- hostif_power_mgmt_request(priv, mode, wake_up, receive_dtims);
-}
-
-static void hostif_sme_sleep_set(struct ks_wlan_private *priv)
-{
- if (priv->sleep_mode != SLP_SLEEP &&
- priv->sleep_mode != SLP_ACTIVE)
- return;
-
- hostif_sleep_request(priv, priv->sleep_mode);
-}
-
-static
-void hostif_sme_set_key(struct ks_wlan_private *priv, int type)
-{
- switch (type) {
- case SME_SET_FLAG:
- hostif_mib_set_request_bool(priv, DOT11_PRIVACY_INVOKED,
- priv->reg.privacy_invoked);
- break;
- case SME_SET_TXKEY:
- hostif_mib_set_request_int(priv, DOT11_WEP_DEFAULT_KEY_ID,
- priv->wpa.txkey);
- break;
- case SME_SET_KEY1:
- hostif_mib_set_request_ostring(priv,
- DOT11_WEP_DEFAULT_KEY_VALUE1,
- &priv->wpa.key[0].key_val[0],
- priv->wpa.key[0].key_len);
- break;
- case SME_SET_KEY2:
- hostif_mib_set_request_ostring(priv,
- DOT11_WEP_DEFAULT_KEY_VALUE2,
- &priv->wpa.key[1].key_val[0],
- priv->wpa.key[1].key_len);
- break;
- case SME_SET_KEY3:
- hostif_mib_set_request_ostring(priv,
- DOT11_WEP_DEFAULT_KEY_VALUE3,
- &priv->wpa.key[2].key_val[0],
- priv->wpa.key[2].key_len);
- break;
- case SME_SET_KEY4:
- hostif_mib_set_request_ostring(priv,
- DOT11_WEP_DEFAULT_KEY_VALUE4,
- &priv->wpa.key[3].key_val[0],
- priv->wpa.key[3].key_len);
- break;
- case SME_SET_PMK_TSC:
- hostif_mib_set_request_ostring(priv, DOT11_PMK_TSC,
- &priv->wpa.key[0].rx_seq[0],
- WPA_RX_SEQ_LEN);
- break;
- case SME_SET_GMK1_TSC:
- hostif_mib_set_request_ostring(priv, DOT11_GMK1_TSC,
- &priv->wpa.key[1].rx_seq[0],
- WPA_RX_SEQ_LEN);
- break;
- case SME_SET_GMK2_TSC:
- hostif_mib_set_request_ostring(priv, DOT11_GMK2_TSC,
- &priv->wpa.key[2].rx_seq[0],
- WPA_RX_SEQ_LEN);
- break;
- }
-}
-
-static
-void hostif_sme_set_pmksa(struct ks_wlan_private *priv)
-{
- struct pmk_cache {
- __le16 size;
- struct {
- u8 bssid[ETH_ALEN];
- u8 pmkid[IW_PMKID_LEN];
- } __packed list[PMK_LIST_MAX];
- } __packed pmkcache;
- struct pmk *pmk;
- size_t size;
- int i = 0;
-
- list_for_each_entry(pmk, &priv->pmklist.head, list) {
- if (i >= PMK_LIST_MAX)
- break;
- ether_addr_copy(pmkcache.list[i].bssid, pmk->bssid);
- memcpy(pmkcache.list[i].pmkid, pmk->pmkid, IW_PMKID_LEN);
- i++;
- }
- pmkcache.size = cpu_to_le16(priv->pmklist.size);
- size = sizeof(priv->pmklist.size) +
- ((ETH_ALEN + IW_PMKID_LEN) * priv->pmklist.size);
- hostif_mib_set_request_ostring(priv, LOCAL_PMK, &pmkcache, size);
-}
-
-/* execute sme */
-static void hostif_sme_execute(struct ks_wlan_private *priv, int event)
-{
- u16 failure;
-
- switch (event) {
- case SME_START:
- if (priv->dev_state == DEVICE_STATE_BOOT)
- hostif_mib_get_request(priv, DOT11_MAC_ADDRESS);
- break;
- case SME_MULTICAST_REQUEST:
- hostif_sme_multicast_set(priv);
- break;
- case SME_MACADDRESS_SET_REQUEST:
- hostif_mib_set_request_ostring(priv, LOCAL_CURRENTADDRESS,
- &priv->eth_addr[0], ETH_ALEN);
- break;
- case SME_BSS_SCAN_REQUEST:
- hostif_bss_scan_request(priv, priv->reg.scan_type,
- priv->scan_ssid, priv->scan_ssid_len);
- break;
- case SME_POW_MNGMT_REQUEST:
- hostif_sme_power_mgmt_set(priv);
- break;
- case SME_PHY_INFO_REQUEST:
- hostif_phy_information_request(priv);
- break;
- case SME_MIC_FAILURE_REQUEST:
- failure = priv->wpa.mic_failure.failure;
- if (failure != 1 && failure != 2) {
- netdev_err(priv->net_dev,
- "SME_MIC_FAILURE_REQUEST: failure count=%u error?\n",
- failure);
- return;
- }
- hostif_mic_failure_request(priv, failure - 1, (failure == 1) ?
- 0 : priv->wpa.mic_failure.counter);
- break;
- case SME_MIC_FAILURE_CONFIRM:
- if (priv->wpa.mic_failure.failure == 2) {
- if (priv->wpa.mic_failure.stop)
- priv->wpa.mic_failure.stop = 0;
- priv->wpa.mic_failure.failure = 0;
- hostif_start_request(priv, priv->reg.operation_mode);
- }
- break;
- case SME_GET_MAC_ADDRESS:
- if (priv->dev_state == DEVICE_STATE_BOOT)
- hostif_mib_get_request(priv, DOT11_PRODUCT_VERSION);
- break;
- case SME_GET_PRODUCT_VERSION:
- if (priv->dev_state == DEVICE_STATE_BOOT)
- priv->dev_state = DEVICE_STATE_PREINIT;
- break;
- case SME_STOP_REQUEST:
- hostif_stop_request(priv);
- break;
- case SME_RTS_THRESHOLD_REQUEST:
- hostif_mib_set_request_int(priv, DOT11_RTS_THRESHOLD,
- priv->reg.rts);
- break;
- case SME_FRAGMENTATION_THRESHOLD_REQUEST:
- hostif_mib_set_request_int(priv, DOT11_FRAGMENTATION_THRESHOLD,
- priv->reg.fragment);
- break;
- case SME_WEP_INDEX_REQUEST:
- case SME_WEP_KEY1_REQUEST:
- case SME_WEP_KEY2_REQUEST:
- case SME_WEP_KEY3_REQUEST:
- case SME_WEP_KEY4_REQUEST:
- case SME_WEP_FLAG_REQUEST:
- hostif_sme_set_wep(priv, event);
- break;
- case SME_RSN_UCAST_REQUEST:
- case SME_RSN_MCAST_REQUEST:
- case SME_RSN_AUTH_REQUEST:
- case SME_RSN_ENABLED_REQUEST:
- case SME_RSN_MODE_REQUEST:
- hostif_sme_set_rsn(priv, event);
- break;
- case SME_SET_FLAG:
- case SME_SET_TXKEY:
- case SME_SET_KEY1:
- case SME_SET_KEY2:
- case SME_SET_KEY3:
- case SME_SET_KEY4:
- case SME_SET_PMK_TSC:
- case SME_SET_GMK1_TSC:
- case SME_SET_GMK2_TSC:
- hostif_sme_set_key(priv, event);
- break;
- case SME_SET_PMKSA:
- hostif_sme_set_pmksa(priv);
- break;
- case SME_WPS_ENABLE_REQUEST:
- hostif_mib_set_request_int(priv, LOCAL_WPS_ENABLE,
- priv->wps.wps_enabled);
- break;
- case SME_WPS_PROBE_REQUEST:
- hostif_mib_set_request_ostring(priv, LOCAL_WPS_PROBE_REQ,
- priv->wps.ie, priv->wps.ielen);
- break;
- case SME_MODE_SET_REQUEST:
- hostif_sme_mode_setup(priv);
- break;
- case SME_SET_GAIN:
- hostif_mib_set_request_ostring(priv, LOCAL_GAIN,
- &priv->gain, sizeof(priv->gain));
- break;
- case SME_GET_GAIN:
- hostif_mib_get_request(priv, LOCAL_GAIN);
- break;
- case SME_GET_EEPROM_CKSUM:
- priv->eeprom_checksum = EEPROM_FW_NOT_SUPPORT; /* initialize */
- hostif_mib_get_request(priv, LOCAL_EEPROM_SUM);
- break;
- case SME_START_REQUEST:
- hostif_start_request(priv, priv->reg.operation_mode);
- break;
- case SME_START_CONFIRM:
- /* for power save */
- atomic_set(&priv->psstatus.snooze_guard, 0);
- atomic_set(&priv->psstatus.confirm_wait, 0);
- if (priv->dev_state == DEVICE_STATE_PREINIT)
- priv->dev_state = DEVICE_STATE_INIT;
- /* wake_up_interruptible_all(&priv->confirm_wait); */
- complete(&priv->confirm_wait);
- break;
- case SME_SLEEP_REQUEST:
- hostif_sme_sleep_set(priv);
- break;
- case SME_SET_REGION:
- hostif_mib_set_request_int(priv, LOCAL_REGION, priv->region);
- break;
- case SME_MULTICAST_CONFIRM:
- case SME_BSS_SCAN_CONFIRM:
- case SME_POW_MNGMT_CONFIRM:
- case SME_PHY_INFO_CONFIRM:
- case SME_STOP_CONFIRM:
- case SME_RTS_THRESHOLD_CONFIRM:
- case SME_FRAGMENTATION_THRESHOLD_CONFIRM:
- case SME_WEP_INDEX_CONFIRM:
- case SME_WEP_KEY1_CONFIRM:
- case SME_WEP_KEY2_CONFIRM:
- case SME_WEP_KEY3_CONFIRM:
- case SME_WEP_KEY4_CONFIRM:
- case SME_WEP_FLAG_CONFIRM:
- case SME_RSN_UCAST_CONFIRM:
- case SME_RSN_MCAST_CONFIRM:
- case SME_RSN_AUTH_CONFIRM:
- case SME_RSN_ENABLED_CONFIRM:
- case SME_RSN_MODE_CONFIRM:
- case SME_MODE_SET_CONFIRM:
- case SME_TERMINATE:
- default:
- break;
- }
-}
-
-static void hostif_sme_work(struct work_struct *work)
-{
- struct ks_wlan_private *priv;
-
- priv = container_of(work, struct ks_wlan_private, sme_work);
-
- if (priv->dev_state < DEVICE_STATE_BOOT)
- return;
-
- if (cnt_smeqbody(priv) <= 0)
- return;
-
- hostif_sme_execute(priv, priv->sme_i.event_buff[priv->sme_i.qhead]);
- inc_smeqhead(priv);
- if (cnt_smeqbody(priv) > 0)
- schedule_work(&priv->sme_work);
-}
-
-/* send to Station Management Entity module */
-void hostif_sme_enqueue(struct ks_wlan_private *priv, u16 event)
-{
- /* enqueue sme event */
- if (cnt_smeqbody(priv) < (SME_EVENT_BUFF_SIZE - 1)) {
- priv->sme_i.event_buff[priv->sme_i.qtail] = event;
- inc_smeqtail(priv);
- } else {
- /* in case of buffer overflow */
- netdev_err(priv->net_dev, "sme queue buffer overflow\n");
- }
-
- schedule_work(&priv->sme_work);
-}
-
-static inline void hostif_aplist_init(struct ks_wlan_private *priv)
-{
- size_t size = LOCAL_APLIST_MAX * sizeof(struct local_ap);
-
- priv->aplist.size = 0;
- memset(&priv->aplist.ap[0], 0, size);
-}
-
-static inline void hostif_status_init(struct ks_wlan_private *priv)
-{
- priv->infra_status = 0;
- priv->current_rate = 4;
- priv->connect_status = DISCONNECT_STATUS;
-}
-
-static inline void hostif_sme_init(struct ks_wlan_private *priv)
-{
- priv->sme_i.sme_status = SME_IDLE;
- priv->sme_i.qhead = 0;
- priv->sme_i.qtail = 0;
- spin_lock_init(&priv->sme_i.sme_spin);
- priv->sme_i.sme_flag = 0;
- INIT_WORK(&priv->sme_work, hostif_sme_work);
-}
-
-static inline void hostif_wpa_init(struct ks_wlan_private *priv)
-{
- memset(&priv->wpa, 0, sizeof(priv->wpa));
- priv->wpa.rsn_enabled = false;
- priv->wpa.mic_failure.failure = 0;
- priv->wpa.mic_failure.last_failure_time = 0;
- priv->wpa.mic_failure.stop = 0;
-}
-
-static inline void hostif_power_save_init(struct ks_wlan_private *priv)
-{
- atomic_set(&priv->psstatus.status, PS_NONE);
- atomic_set(&priv->psstatus.confirm_wait, 0);
- atomic_set(&priv->psstatus.snooze_guard, 0);
- init_completion(&priv->psstatus.wakeup_wait);
- INIT_WORK(&priv->wakeup_work, ks_wlan_hw_wakeup_task);
-}
-
-static inline void hostif_pmklist_init(struct ks_wlan_private *priv)
-{
- int i;
-
- memset(&priv->pmklist, 0, sizeof(priv->pmklist));
- INIT_LIST_HEAD(&priv->pmklist.head);
- for (i = 0; i < PMK_LIST_MAX; i++)
- INIT_LIST_HEAD(&priv->pmklist.pmk[i].list);
-}
-
-static inline void hostif_counters_init(struct ks_wlan_private *priv)
-{
- priv->dev_count = 0;
- atomic_set(&priv->event_count, 0);
- atomic_set(&priv->rec_count, 0);
-}
-
-int hostif_init(struct ks_wlan_private *priv)
-{
- hostif_aplist_init(priv);
- hostif_status_init(priv);
-
- spin_lock_init(&priv->multicast_spin);
- spin_lock_init(&priv->dev_read_lock);
- init_waitqueue_head(&priv->devread_wait);
-
- hostif_counters_init(priv);
- hostif_power_save_init(priv);
- hostif_wpa_init(priv);
- hostif_pmklist_init(priv);
- hostif_sme_init(priv);
-
- return 0;
-}
-
-void hostif_exit(struct ks_wlan_private *priv)
-{
- cancel_work_sync(&priv->sme_work);
-}
diff --git a/drivers/staging/ks7010/ks_hostif.h b/drivers/staging/ks7010/ks_hostif.h
deleted file mode 100644
index c62a494ed6bb..000000000000
--- a/drivers/staging/ks7010/ks_hostif.h
+++ /dev/null
@@ -1,617 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-/*
- * Driver for KeyStream wireless LAN
- *
- * Copyright (c) 2005-2008 KeyStream Corp.
- * Copyright (C) 2009 Renesas Technology Corp.
- */
-
-#ifndef _KS_HOSTIF_H_
-#define _KS_HOSTIF_H_
-
-#include <linux/compiler.h>
-#include <linux/ieee80211.h>
-
-/*
- * HOST-MAC I/F events
- */
-#define HIF_DATA_REQ 0xE001
-#define HIF_DATA_IND 0xE801
-#define HIF_MIB_GET_REQ 0xE002
-#define HIF_MIB_GET_CONF 0xE802
-#define HIF_MIB_SET_REQ 0xE003
-#define HIF_MIB_SET_CONF 0xE803
-#define HIF_POWER_MGMT_REQ 0xE004
-#define HIF_POWER_MGMT_CONF 0xE804
-#define HIF_START_REQ 0xE005
-#define HIF_START_CONF 0xE805
-#define HIF_CONNECT_IND 0xE806
-#define HIF_STOP_REQ 0xE006
-#define HIF_STOP_CONF 0xE807
-#define HIF_PS_ADH_SET_REQ 0xE007
-#define HIF_PS_ADH_SET_CONF 0xE808
-#define HIF_INFRA_SET_REQ 0xE008
-#define HIF_INFRA_SET_CONF 0xE809
-#define HIF_ADH_SET_REQ 0xE009
-#define HIF_ADH_SET_CONF 0xE80A
-#define HIF_AP_SET_REQ 0xE00A
-#define HIF_AP_SET_CONF 0xE80B
-#define HIF_ASSOC_INFO_IND 0xE80C
-#define HIF_MIC_FAILURE_REQ 0xE00B
-#define HIF_MIC_FAILURE_CONF 0xE80D
-#define HIF_SCAN_REQ 0xE00C
-#define HIF_SCAN_CONF 0xE80E
-#define HIF_PHY_INFO_REQ 0xE00D
-#define HIF_PHY_INFO_CONF 0xE80F
-#define HIF_SLEEP_REQ 0xE00E
-#define HIF_SLEEP_CONF 0xE810
-#define HIF_PHY_INFO_IND 0xE811
-#define HIF_SCAN_IND 0xE812
-#define HIF_INFRA_SET2_REQ 0xE00F
-#define HIF_INFRA_SET2_CONF 0xE813
-#define HIF_ADH_SET2_REQ 0xE010
-#define HIF_ADH_SET2_CONF 0xE814
-
-#define HIF_REQ_MAX 0xE010
-
-/*
- * HOST-MAC I/F data structure
- * Byte alignment Little Endian
- */
-
-struct hostif_hdr {
- __le16 size;
- __le16 event;
-} __packed;
-
-struct hostif_data_request {
- struct hostif_hdr header;
- __le16 auth_type;
-#define TYPE_DATA 0x0000
-#define TYPE_AUTH 0x0001
- __le16 reserved;
- u8 data[];
-} __packed;
-
-#define TYPE_PMK1 0x0001
-#define TYPE_GMK1 0x0002
-#define TYPE_GMK2 0x0003
-
-#define CHANNEL_LIST_MAX_SIZE 14
-struct channel_list {
- u8 size;
- u8 body[CHANNEL_LIST_MAX_SIZE];
- u8 pad;
-} __packed;
-
-/**
- * enum mib_attribute - Management Information Base attribute
- * Attribute value used for accessing and updating MIB
- *
- * @DOT11_MAC_ADDRESS: MAC Address (R)
- * @DOT11_PRODUCT_VERSION: FirmWare Version (R)
- * @DOT11_RTS_THRESHOLD: RTS Threshold (R/W)
- * @DOT11_FRAGMENTATION_THRESHOLD: Fragment Threshold (R/W)
- * @DOT11_PRIVACY_INVOKED: WEP ON/OFF (W)
- * @DOT11_WEP_DEFAULT_KEY_ID: WEP Index (W)
- * @DOT11_WEP_DEFAULT_KEY_VALUE1: WEP Key#1(TKIP AES: PairwiseTemporalKey) (W)
- * @DOT11_WEP_DEFAULT_KEY_VALUE2: WEP Key#2(TKIP AES: GroupKey1) (W)
- * @DOT11_WEP_DEFAULT_KEY_VALUE3: WEP Key#3(TKIP AES: GroupKey2) (W)
- * @DOT11_WEP_DEFAULT_KEY_VALUE4: WEP Key#4 (W)
- * @DOT11_WEP_LIST: WEP LIST
- * @DOT11_DESIRED_SSID: SSID
- * @DOT11_CURRENT_CHANNEL: channel set
- * @DOT11_OPERATION_RATE_SET: rate set
- * @LOCAL_AP_SEARCH_INTERVAL: AP search interval (R/W)
- * @LOCAL_CURRENTADDRESS: MAC Address change (W)
- * @LOCAL_MULTICAST_ADDRESS: Multicast Address (W)
- * @LOCAL_MULTICAST_FILTER: Multicast Address Filter enable/disable (W)
- * @LOCAL_SEARCHED_AP_LIST: AP list (R)
- * @LOCAL_LINK_AP_STATUS: Link AP status (R)
- * @LOCAL_PACKET_STATISTICS: tx,rx packets statistics
- * @LOCAL_AP_SCAN_LIST_TYPE_SET: AP_SCAN_LIST_TYPE
- * @DOT11_RSN_ENABLED: WPA enable/disable (W)
- * @LOCAL_RSN_MODE: RSN mode WPA/WPA2 (W)
- * @DOT11_RSN_CONFIG_MULTICAST_CIPHER: GroupKeyCipherSuite (W)
- * @DOT11_RSN_CONFIG_UNICAST_CIPHER: PairwiseKeyCipherSuite (W)
- * @DOT11_RSN_CONFIG_AUTH_SUITE: AuthenticationKeyManagementSuite (W)
- * @DOT11_RSN_CONFIG_VERSION: RSN version (W)
- * @LOCAL_RSN_CONFIG_ALL: RSN CONFIG ALL (W)
- * @DOT11_PMK_TSC: PMK_TSC (W)
- * @DOT11_GMK1_TSC: GMK1_TSC (W)
- * @DOT11_GMK2_TSC: GMK2_TSC (W)
- * @DOT11_GMK3_TSC: GMK3_TSC
- * @LOCAL_PMK: Pairwise Master Key cache (W)
- * @LOCAL_REGION: Region setting
- * @LOCAL_WPS_ENABLE: WiFi Protected Setup
- * @LOCAL_WPS_PROBE_REQ: WPS Probe Request
- * @LOCAL_GAIN: Carrer sense threshold for demo ato show
- * @LOCAL_EEPROM_SUM: EEPROM checksum information
- */
-enum mib_attribute {
- DOT11_MAC_ADDRESS = 0x21010100,
- DOT11_PRODUCT_VERSION = 0x31024100,
- DOT11_RTS_THRESHOLD = 0x21020100,
- DOT11_FRAGMENTATION_THRESHOLD = 0x21050100,
- DOT11_PRIVACY_INVOKED = 0x15010100,
- DOT11_WEP_DEFAULT_KEY_ID = 0x15020100,
- DOT11_WEP_DEFAULT_KEY_VALUE1 = 0x13020101,
- DOT11_WEP_DEFAULT_KEY_VALUE2 = 0x13020102,
- DOT11_WEP_DEFAULT_KEY_VALUE3 = 0x13020103,
- DOT11_WEP_DEFAULT_KEY_VALUE4 = 0x13020104,
- DOT11_WEP_LIST = 0x13020100,
- DOT11_DESIRED_SSID = 0x11090100,
- DOT11_CURRENT_CHANNEL = 0x45010100,
- DOT11_OPERATION_RATE_SET = 0x11110100,
- LOCAL_AP_SEARCH_INTERVAL = 0xF1010100,
- LOCAL_CURRENTADDRESS = 0xF1050100,
- LOCAL_MULTICAST_ADDRESS = 0xF1060100,
- LOCAL_MULTICAST_FILTER = 0xF1060200,
- LOCAL_SEARCHED_AP_LIST = 0xF1030100,
- LOCAL_LINK_AP_STATUS = 0xF1040100,
- LOCAL_PACKET_STATISTICS = 0xF1020100,
- LOCAL_AP_SCAN_LIST_TYPE_SET = 0xF1030200,
- DOT11_RSN_ENABLED = 0x15070100,
- LOCAL_RSN_MODE = 0x56010100,
- DOT11_RSN_CONFIG_MULTICAST_CIPHER = 0x51040100,
- DOT11_RSN_CONFIG_UNICAST_CIPHER = 0x52020100,
- DOT11_RSN_CONFIG_AUTH_SUITE = 0x53020100,
- DOT11_RSN_CONFIG_VERSION = 0x51020100,
- LOCAL_RSN_CONFIG_ALL = 0x5F010100,
- DOT11_PMK_TSC = 0x55010100,
- DOT11_GMK1_TSC = 0x55010101,
- DOT11_GMK2_TSC = 0x55010102,
- DOT11_GMK3_TSC = 0x55010103,
- LOCAL_PMK = 0x58010100,
- LOCAL_REGION = 0xF10A0100,
- LOCAL_WPS_ENABLE = 0xF10B0100,
- LOCAL_WPS_PROBE_REQ = 0xF10C0100,
- LOCAL_GAIN = 0xF10D0100,
- LOCAL_EEPROM_SUM = 0xF10E0100
-};
-
-struct hostif_mib_get_request {
- struct hostif_hdr header;
- __le32 mib_attribute;
-} __packed;
-
-/**
- * enum mib_data_type - Message Information Base data type.
- * @MIB_VALUE_TYPE_NULL: NULL type
- * @MIB_VALUE_TYPE_INT: INTEGER type
- * @MIB_VALUE_TYPE_BOOL: BOOL type
- * @MIB_VALUE_TYPE_COUNT32: unused
- * @MIB_VALUE_TYPE_OSTRING: Chunk of memory
- */
-enum mib_data_type {
- MIB_VALUE_TYPE_NULL = 0,
- MIB_VALUE_TYPE_INT,
- MIB_VALUE_TYPE_BOOL,
- MIB_VALUE_TYPE_COUNT32,
- MIB_VALUE_TYPE_OSTRING
-};
-
-struct hostif_mib_value {
- __le16 size;
- __le16 type;
- u8 body[];
-} __packed;
-
-struct hostif_mib_get_confirm_t {
- struct hostif_hdr header;
- __le32 mib_status;
-#define MIB_SUCCESS 0
-#define MIB_INVALID 1
-#define MIB_READ_ONLY 2
-#define MIB_WRITE_ONLY 3
- __le32 mib_attribute;
- struct hostif_mib_value mib_value;
-} __packed;
-
-struct hostif_mib_set_request_t {
- struct hostif_hdr header;
- __le32 mib_attribute;
- struct hostif_mib_value mib_value;
-} __packed;
-
-struct hostif_power_mgmt_request {
- struct hostif_hdr header;
- __le32 mode;
-#define POWER_ACTIVE 1
-#define POWER_SAVE 2
- __le32 wake_up;
-#define SLEEP_FALSE 0
-#define SLEEP_TRUE 1 /* not used */
- __le32 receive_dtims;
-#define DTIM_FALSE 0
-#define DTIM_TRUE 1
-} __packed;
-
-enum power_mgmt_mode_type {
- POWER_MGMT_ACTIVE,
- POWER_MGMT_SAVE1,
- POWER_MGMT_SAVE2
-};
-
-#define RESULT_SUCCESS 0
-#define RESULT_INVALID_PARAMETERS 1
-#define RESULT_NOT_SUPPORTED 2
-/* #define RESULT_ALREADY_RUNNING 3 */
-#define RESULT_ALREADY_RUNNING 7
-
-struct hostif_start_request {
- struct hostif_hdr header;
- __le16 mode;
-#define MODE_PSEUDO_ADHOC 0
-#define MODE_INFRASTRUCTURE 1
-#define MODE_AP 2 /* not used */
-#define MODE_ADHOC 3
-} __packed;
-
-struct ssid {
- u8 size;
- u8 body[IEEE80211_MAX_SSID_LEN];
- u8 ssid_pad;
-} __packed;
-
-#define RATE_SET_MAX_SIZE 16
-struct rate_set8 {
- u8 size;
- u8 body[8];
- u8 rate_pad;
-} __packed;
-
-struct fh_parms {
- __le16 dwell_time;
- u8 hop_set;
- u8 hop_pattern;
- u8 hop_index;
-} __packed;
-
-struct ds_parms {
- u8 channel;
-} __packed;
-
-struct cf_parms {
- u8 count;
- u8 period;
- __le16 max_duration;
- __le16 dur_remaining;
-} __packed;
-
-struct ibss_parms {
- __le16 atim_window;
-} __packed;
-
-struct rsn_t {
- u8 size;
-#define RSN_BODY_SIZE 64
- u8 body[RSN_BODY_SIZE];
-} __packed;
-
-struct erp_params_t {
- u8 erp_info;
-} __packed;
-
-struct rate_set16 {
- u8 size;
- u8 body[16];
- u8 rate_pad;
-} __packed;
-
-struct ap_info {
- u8 bssid[6]; /* +00 */
- u8 rssi; /* +06 */
- u8 sq; /* +07 */
- u8 noise; /* +08 */
- u8 pad0; /* +09 */
- __le16 beacon_period; /* +10 */
- __le16 capability; /* +12 */
- u8 frame_type; /* +14 */
- u8 ch_info; /* +15 */
- __le16 body_size; /* +16 */
- u8 body[1024]; /* +18 */
- /* +1032 */
-} __packed;
-
-struct link_ap_info {
- u8 bssid[6]; /* +00 */
- u8 rssi; /* +06 */
- u8 sq; /* +07 */
- u8 noise; /* +08 */
- u8 pad0; /* +09 */
- __le16 beacon_period; /* +10 */
- __le16 capability; /* +12 */
- struct rate_set8 rate_set; /* +14 */
- struct fh_parms fh_parameter; /* +24 */
- struct ds_parms ds_parameter; /* +29 */
- struct cf_parms cf_parameter; /* +30 */
- struct ibss_parms ibss_parameter; /* +36 */
- struct erp_params_t erp_parameter; /* +38 */
- u8 pad1; /* +39 */
- struct rate_set8 ext_rate_set; /* +40 */
- u8 DTIM_period; /* +50 */
- u8 rsn_mode; /* +51 */
-#define RSN_MODE_NONE 0
-#define RSN_MODE_WPA 1
-#define RSN_MODE_WPA2 2
- struct {
- u8 size; /* +52 */
- u8 body[128]; /* +53 */
- } __packed rsn;
-} __packed;
-
-#define RESULT_CONNECT 0
-#define RESULT_DISCONNECT 1
-
-struct hostif_stop_request {
- struct hostif_hdr header;
-} __packed;
-
-#define D_11B_ONLY_MODE 0
-#define D_11G_ONLY_MODE 1
-#define D_11BG_COMPATIBLE_MODE 2
-#define D_11A_ONLY_MODE 3
-
-#define CTS_MODE_FALSE 0
-#define CTS_MODE_TRUE 1
-
-struct hostif_request {
- __le16 phy_type;
- __le16 cts_mode;
- __le16 scan_type;
- __le16 capability;
- struct rate_set16 rate_set;
-} __packed;
-
-/**
- * struct hostif_ps_adhoc_set_request - pseudo adhoc mode
- * @capability: bit5 : preamble
- * bit6 : pbcc - Not supported always 0
- * bit10 : ShortSlotTime
- * bit13 : DSSS-OFDM - Not supported always 0
- */
-struct hostif_ps_adhoc_set_request {
- struct hostif_hdr header;
- struct hostif_request request;
- __le16 channel;
-} __packed;
-
-#define AUTH_TYPE_OPEN_SYSTEM 0
-#define AUTH_TYPE_SHARED_KEY 1
-
-/**
- * struct hostif_infrastructure_set_request
- * @capability: bit5 : preamble
- * bit6 : pbcc - Not supported always 0
- * bit10 : ShortSlotTime
- * bit13 : DSSS-OFDM - Not supported always 0
- */
-struct hostif_infrastructure_set_request {
- struct hostif_hdr header;
- struct hostif_request request;
- struct ssid ssid;
- __le16 beacon_lost_count;
- __le16 auth_type;
- struct channel_list channel_list;
- u8 bssid[ETH_ALEN];
-} __packed;
-
-/**
- * struct hostif_adhoc_set_request
- * @capability: bit5 : preamble
- * bit6 : pbcc - Not supported always 0
- * bit10 : ShortSlotTime
- * bit13 : DSSS-OFDM - Not supported always 0
- */
-struct hostif_adhoc_set_request {
- struct hostif_hdr header;
- struct hostif_request request;
- struct ssid ssid;
- __le16 channel;
-} __packed;
-
-/**
- * struct hostif_adhoc_set2_request
- * @capability: bit5 : preamble
- * bit6 : pbcc - Not supported always 0
- * bit10 : ShortSlotTime
- * bit13 : DSSS-OFDM - Not supported always 0
- */
-struct hostif_adhoc_set2_request {
- struct hostif_hdr header;
- struct hostif_request request;
- __le16 reserved;
- struct ssid ssid;
- struct channel_list channel_list;
- u8 bssid[ETH_ALEN];
-} __packed;
-
-struct association_request {
- u8 type;
- u8 pad;
- __le16 capability;
- __le16 listen_interval;
- u8 ap_address[6];
- __le16 req_ies_size;
-} __packed;
-
-struct association_response {
- u8 type;
- u8 pad;
- __le16 capability;
- __le16 status;
- __le16 association_id;
- __le16 resp_ies_size;
-} __packed;
-
-struct hostif_bss_scan_request {
- struct hostif_hdr header;
- u8 scan_type;
-#define ACTIVE_SCAN 0
-#define PASSIVE_SCAN 1
- u8 pad[3];
- __le32 ch_time_min;
- __le32 ch_time_max;
- struct channel_list channel_list;
- struct ssid ssid;
-} __packed;
-
-struct hostif_phy_information_request {
- struct hostif_hdr header;
- __le16 type;
-#define NORMAL_TYPE 0
-#define TIME_TYPE 1
- __le16 time; /* unit 100ms */
-} __packed;
-
-enum sleep_mode_type {
- SLP_ACTIVE,
- SLP_SLEEP
-};
-
-struct hostif_sleep_request {
- struct hostif_hdr header;
-} __packed;
-
-struct hostif_mic_failure_request {
- struct hostif_hdr header;
- __le16 failure_count;
- __le16 timer;
-} __packed;
-
-#define BASIC_RATE 0x80
-#define RATE_MASK 0x7F
-
-#define TX_RATE_AUTO 0xff
-#define TX_RATE_1M_FIXED 0
-#define TX_RATE_2M_FIXED 1
-#define TX_RATE_1_2M_AUTO 2
-#define TX_RATE_5M_FIXED 3
-#define TX_RATE_11M_FIXED 4
-
-#define TX_RATE_FULL_AUTO 0
-#define TX_RATE_11_AUTO 1
-#define TX_RATE_11B_AUTO 2
-#define TX_RATE_11BG_AUTO 3
-#define TX_RATE_MANUAL_AUTO 4
-#define TX_RATE_FIXED 5
-
-/* 11b rate */
-#define TX_RATE_1M ((u8)(10 / 5)) /* 11b 11g basic rate */
-#define TX_RATE_2M ((u8)(20 / 5)) /* 11b 11g basic rate */
-#define TX_RATE_5M ((u8)(55 / 5)) /* 11g basic rate */
-#define TX_RATE_11M ((u8)(110 / 5)) /* 11g basic rate */
-
-/* 11g rate */
-#define TX_RATE_6M ((u8)(60 / 5)) /* 11g basic rate */
-#define TX_RATE_12M ((u8)(120 / 5)) /* 11g basic rate */
-#define TX_RATE_24M ((u8)(240 / 5)) /* 11g basic rate */
-#define TX_RATE_9M ((u8)(90 / 5))
-#define TX_RATE_18M ((u8)(180 / 5))
-#define TX_RATE_36M ((u8)(360 / 5))
-#define TX_RATE_48M ((u8)(480 / 5))
-#define TX_RATE_54M ((u8)(540 / 5))
-
-static inline bool is_11b_rate(u8 rate)
-{
- return (((rate & RATE_MASK) == TX_RATE_1M) ||
- ((rate & RATE_MASK) == TX_RATE_2M) ||
- ((rate & RATE_MASK) == TX_RATE_5M) ||
- ((rate & RATE_MASK) == TX_RATE_11M));
-}
-
-static inline bool is_ofdm_rate(u8 rate)
-{
- return (((rate & RATE_MASK) == TX_RATE_6M) ||
- ((rate & RATE_MASK) == TX_RATE_12M) ||
- ((rate & RATE_MASK) == TX_RATE_24M) ||
- ((rate & RATE_MASK) == TX_RATE_9M) ||
- ((rate & RATE_MASK) == TX_RATE_18M) ||
- ((rate & RATE_MASK) == TX_RATE_36M) ||
- ((rate & RATE_MASK) == TX_RATE_48M) ||
- ((rate & RATE_MASK) == TX_RATE_54M));
-}
-
-static inline bool is_11bg_rate(u8 rate)
-{
- return (is_11b_rate(rate) || is_ofdm_rate(rate));
-}
-
-static inline bool is_ofdm_ext_rate(u8 rate)
-{
- return (((rate & RATE_MASK) == TX_RATE_9M) ||
- ((rate & RATE_MASK) == TX_RATE_18M) ||
- ((rate & RATE_MASK) == TX_RATE_36M) ||
- ((rate & RATE_MASK) == TX_RATE_48M) ||
- ((rate & RATE_MASK) == TX_RATE_54M));
-}
-
-enum connect_status_type {
- CONNECT_STATUS,
- DISCONNECT_STATUS
-};
-
-enum preamble_type {
- LONG_PREAMBLE,
- SHORT_PREAMBLE
-};
-
-enum multicast_filter_type {
- MCAST_FILTER_MCAST,
- MCAST_FILTER_MCASTALL,
- MCAST_FILTER_PROMISC,
-};
-
-#define NIC_MAX_MCAST_LIST 32
-
-#define HIF_EVENT_MASK 0xE800
-
-static inline bool is_hif_ind(unsigned short event)
-{
- return (((event & HIF_EVENT_MASK) == HIF_EVENT_MASK) &&
- (((event & ~HIF_EVENT_MASK) == 0x0001) ||
- ((event & ~HIF_EVENT_MASK) == 0x0006) ||
- ((event & ~HIF_EVENT_MASK) == 0x000C) ||
- ((event & ~HIF_EVENT_MASK) == 0x0011) ||
- ((event & ~HIF_EVENT_MASK) == 0x0012)));
-}
-
-static inline bool is_hif_conf(unsigned short event)
-{
- return (((event & HIF_EVENT_MASK) == HIF_EVENT_MASK) &&
- ((event & ~HIF_EVENT_MASK) > 0x0000) &&
- ((event & ~HIF_EVENT_MASK) < 0x0012) &&
- !is_hif_ind(event));
-}
-
-#ifdef __KERNEL__
-
-#include "ks_wlan.h"
-
-/* function prototype */
-int hostif_data_request(struct ks_wlan_private *priv, struct sk_buff *skb);
-void hostif_receive(struct ks_wlan_private *priv, unsigned char *p,
- unsigned int size);
-void hostif_sme_enqueue(struct ks_wlan_private *priv, u16 event);
-int hostif_init(struct ks_wlan_private *priv);
-void hostif_exit(struct ks_wlan_private *priv);
-int ks_wlan_hw_tx(struct ks_wlan_private *priv, void *p, unsigned long size,
- void (*complete_handler)(struct ks_wlan_private *priv,
- struct sk_buff *skb),
- struct sk_buff *skb);
-void send_packet_complete(struct ks_wlan_private *priv, struct sk_buff *skb);
-
-void ks_wlan_hw_wakeup_request(struct ks_wlan_private *priv);
-int ks_wlan_hw_power_save(struct ks_wlan_private *priv);
-
-#define KS7010_SIZE_ALIGNMENT 32
-
-static inline size_t hif_align_size(size_t size)
-{
- return ALIGN(size, KS7010_SIZE_ALIGNMENT);
-}
-
-#endif /* __KERNEL__ */
-
-#endif /* _KS_HOSTIF_H_ */
diff --git a/drivers/staging/ks7010/ks_wlan.h b/drivers/staging/ks7010/ks_wlan.h
deleted file mode 100644
index 3e9a91b5131c..000000000000
--- a/drivers/staging/ks7010/ks_wlan.h
+++ /dev/null
@@ -1,567 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-/*
- * Driver for KeyStream IEEE802.11 b/g wireless LAN cards.
- *
- * Copyright (C) 2006-2008 KeyStream Corp.
- * Copyright (C) 2009 Renesas Technology Corp.
- */
-
-#ifndef _KS_WLAN_H
-#define _KS_WLAN_H
-
-#include <linux/atomic.h>
-#include <linux/circ_buf.h>
-#include <linux/completion.h>
-#include <linux/netdevice.h>
-#include <linux/sched.h>
-#include <linux/spinlock.h>
-#include <linux/wireless.h>
-
-struct ks_wlan_parameter {
- u8 operation_mode;
- u8 channel;
- u8 tx_rate;
- struct {
- u8 size;
- u8 body[16];
- } rate_set;
- u8 bssid[ETH_ALEN];
- struct {
- u8 size;
- u8 body[32 + 1];
- } ssid;
- u8 preamble;
- u8 power_mgmt;
- u32 scan_type;
-#define BEACON_LOST_COUNT_MAX 65535
- u32 beacon_lost_count;
- u32 rts;
- u32 fragment;
- u32 privacy_invoked;
- u32 wep_index;
- struct {
- u8 size;
- u8 val[13 * 2 + 1];
- } wep_key[4];
- u16 authenticate_type;
- u16 phy_type;
- u16 cts_mode;
- u16 phy_info_timer;
-};
-
-enum {
- DEVICE_STATE_OFF = 0, /* this means hw_unavailable is != 0 */
- DEVICE_STATE_PREBOOT, /* we are in a pre-boot state (empty RAM) */
- DEVICE_STATE_BOOT, /* boot state (fw upload, run fw) */
- DEVICE_STATE_PREINIT, /* pre-init state */
- DEVICE_STATE_INIT, /* init state (restore MIB backup to device) */
- DEVICE_STATE_READY, /* driver&device are in operational state */
- DEVICE_STATE_SLEEP /* device in sleep mode */
-};
-
-/* SME flag */
-#define SME_MODE_SET BIT(0)
-#define SME_RTS BIT(1)
-#define SME_FRAG BIT(2)
-#define SME_WEP_FLAG BIT(3)
-#define SME_WEP_INDEX BIT(4)
-#define SME_WEP_VAL1 BIT(5)
-#define SME_WEP_VAL2 BIT(6)
-#define SME_WEP_VAL3 BIT(7)
-#define SME_WEP_VAL4 BIT(8)
-#define SME_WEP_VAL_MASK GENMASK(8, 5)
-#define SME_RSN BIT(9)
-#define SME_RSN_MULTICAST BIT(10)
-#define SME_RSN_UNICAST BIT(11)
-#define SME_RSN_AUTH BIT(12)
-
-#define SME_AP_SCAN BIT(13)
-#define SME_MULTICAST BIT(14)
-
-/* SME Event */
-enum {
- SME_START,
-
- SME_MULTICAST_REQUEST,
- SME_MACADDRESS_SET_REQUEST,
- SME_BSS_SCAN_REQUEST,
- SME_SET_FLAG,
- SME_SET_TXKEY,
- SME_SET_KEY1,
- SME_SET_KEY2,
- SME_SET_KEY3,
- SME_SET_KEY4,
- SME_SET_PMK_TSC,
- SME_SET_GMK1_TSC,
- SME_SET_GMK2_TSC,
- SME_SET_GMK3_TSC,
- SME_SET_PMKSA,
- SME_POW_MNGMT_REQUEST,
- SME_PHY_INFO_REQUEST,
- SME_MIC_FAILURE_REQUEST,
- SME_GET_MAC_ADDRESS,
- SME_GET_PRODUCT_VERSION,
- SME_STOP_REQUEST,
- SME_RTS_THRESHOLD_REQUEST,
- SME_FRAGMENTATION_THRESHOLD_REQUEST,
- SME_WEP_INDEX_REQUEST,
- SME_WEP_KEY1_REQUEST,
- SME_WEP_KEY2_REQUEST,
- SME_WEP_KEY3_REQUEST,
- SME_WEP_KEY4_REQUEST,
- SME_WEP_FLAG_REQUEST,
- SME_RSN_UCAST_REQUEST,
- SME_RSN_MCAST_REQUEST,
- SME_RSN_AUTH_REQUEST,
- SME_RSN_ENABLED_REQUEST,
- SME_RSN_MODE_REQUEST,
- SME_WPS_ENABLE_REQUEST,
- SME_WPS_PROBE_REQUEST,
- SME_SET_GAIN,
- SME_GET_GAIN,
- SME_SLEEP_REQUEST,
- SME_SET_REGION,
- SME_MODE_SET_REQUEST,
- SME_START_REQUEST,
- SME_GET_EEPROM_CKSUM,
-
- SME_MIC_FAILURE_CONFIRM,
- SME_START_CONFIRM,
-
- SME_MULTICAST_CONFIRM,
- SME_BSS_SCAN_CONFIRM,
- SME_GET_CURRENT_AP,
- SME_POW_MNGMT_CONFIRM,
- SME_PHY_INFO_CONFIRM,
- SME_STOP_CONFIRM,
- SME_RTS_THRESHOLD_CONFIRM,
- SME_FRAGMENTATION_THRESHOLD_CONFIRM,
- SME_WEP_INDEX_CONFIRM,
- SME_WEP_KEY1_CONFIRM,
- SME_WEP_KEY2_CONFIRM,
- SME_WEP_KEY3_CONFIRM,
- SME_WEP_KEY4_CONFIRM,
- SME_WEP_FLAG_CONFIRM,
- SME_RSN_UCAST_CONFIRM,
- SME_RSN_MCAST_CONFIRM,
- SME_RSN_AUTH_CONFIRM,
- SME_RSN_ENABLED_CONFIRM,
- SME_RSN_MODE_CONFIRM,
- SME_MODE_SET_CONFIRM,
- SME_SLEEP_CONFIRM,
-
- SME_RSN_SET_CONFIRM,
- SME_WEP_SET_CONFIRM,
- SME_TERMINATE,
-
- SME_EVENT_SIZE
-};
-
-/* SME Status */
-enum {
- SME_IDLE,
- SME_SETUP,
- SME_DISCONNECT,
- SME_CONNECT
-};
-
-#define SME_EVENT_BUFF_SIZE 128
-
-struct sme_info {
- int sme_status;
- int event_buff[SME_EVENT_BUFF_SIZE];
- unsigned int qhead;
- unsigned int qtail;
- spinlock_t sme_spin;
- unsigned long sme_flag;
-};
-
-struct hostt {
- int buff[SME_EVENT_BUFF_SIZE];
- unsigned int qhead;
- unsigned int qtail;
-};
-
-#define RSN_IE_BODY_MAX 64
-struct rsn_ie {
- u8 id; /* 0xdd = WPA or 0x30 = RSN */
- u8 size; /* max ? 255 ? */
- u8 body[RSN_IE_BODY_MAX];
-} __packed;
-
-#define WPA_INFO_ELEM_ID 0xdd
-#define RSN_INFO_ELEM_ID 0x30
-
-#define WPS_IE_BODY_MAX 255
-struct wps_ie {
- u8 id; /* 221 'dd <len> 00 50 F2 04' */
- u8 size; /* max ? 255 ? */
- u8 body[WPS_IE_BODY_MAX];
-} __packed;
-
-struct local_ap {
- u8 bssid[6];
- u8 rssi;
- u8 sq;
- struct {
- u8 size;
- u8 body[32];
- u8 ssid_pad;
- } ssid;
- struct {
- u8 size;
- u8 body[16];
- u8 rate_pad;
- } rate_set;
- u16 capability;
- u8 channel;
- u8 noise;
- struct rsn_ie wpa_ie;
- struct rsn_ie rsn_ie;
- struct wps_ie wps_ie;
-};
-
-#define LOCAL_APLIST_MAX 31
-#define LOCAL_CURRENT_AP LOCAL_APLIST_MAX
-struct local_aplist {
- int size;
- struct local_ap ap[LOCAL_APLIST_MAX + 1];
-};
-
-struct local_gain {
- u8 tx_mode;
- u8 rx_mode;
- u8 tx_gain;
- u8 rx_gain;
-};
-
-struct local_eeprom_sum {
- u8 type;
- u8 result;
-};
-
-enum {
- EEPROM_OK,
- EEPROM_CHECKSUM_NONE,
- EEPROM_FW_NOT_SUPPORT,
- EEPROM_NG,
-};
-
-/* Power Save Status */
-enum {
- PS_NONE,
- PS_ACTIVE_SET,
- PS_SAVE_SET,
- PS_CONF_WAIT,
- PS_SNOOZE,
- PS_WAKEUP
-};
-
-struct power_save_status {
- atomic_t status; /* initialvalue 0 */
- struct completion wakeup_wait;
- atomic_t confirm_wait;
- atomic_t snooze_guard;
-};
-
-struct sleep_status {
- atomic_t status; /* initialvalue 0 */
- atomic_t doze_request;
- atomic_t wakeup_request;
-};
-
-/* WPA */
-struct scan_ext {
- unsigned int flag;
- char ssid[IW_ESSID_MAX_SIZE + 1];
-};
-
-#define CIPHER_ID_WPA_NONE "\x00\x50\xf2\x00"
-#define CIPHER_ID_WPA_WEP40 "\x00\x50\xf2\x01"
-#define CIPHER_ID_WPA_TKIP "\x00\x50\xf2\x02"
-#define CIPHER_ID_WPA_CCMP "\x00\x50\xf2\x04"
-#define CIPHER_ID_WPA_WEP104 "\x00\x50\xf2\x05"
-
-#define CIPHER_ID_WPA2_NONE "\x00\x0f\xac\x00"
-#define CIPHER_ID_WPA2_WEP40 "\x00\x0f\xac\x01"
-#define CIPHER_ID_WPA2_TKIP "\x00\x0f\xac\x02"
-#define CIPHER_ID_WPA2_CCMP "\x00\x0f\xac\x04"
-#define CIPHER_ID_WPA2_WEP104 "\x00\x0f\xac\x05"
-
-#define CIPHER_ID_LEN 4
-
-enum {
- KEY_MGMT_802_1X,
- KEY_MGMT_PSK,
- KEY_MGMT_WPANONE,
-};
-
-#define KEY_MGMT_ID_WPA_NONE "\x00\x50\xf2\x00"
-#define KEY_MGMT_ID_WPA_1X "\x00\x50\xf2\x01"
-#define KEY_MGMT_ID_WPA_PSK "\x00\x50\xf2\x02"
-#define KEY_MGMT_ID_WPA_WPANONE "\x00\x50\xf2\xff"
-
-#define KEY_MGMT_ID_WPA2_NONE "\x00\x0f\xac\x00"
-#define KEY_MGMT_ID_WPA2_1X "\x00\x0f\xac\x01"
-#define KEY_MGMT_ID_WPA2_PSK "\x00\x0f\xac\x02"
-#define KEY_MGMT_ID_WPA2_WPANONE "\x00\x0f\xac\xff"
-
-#define KEY_MGMT_ID_LEN 4
-
-#define MIC_KEY_SIZE 8
-
-struct wpa_key {
- u32 ext_flags; /* IW_ENCODE_EXT_xxx */
- u8 tx_seq[IW_ENCODE_SEQ_MAX_SIZE]; /* LSB first */
- u8 rx_seq[IW_ENCODE_SEQ_MAX_SIZE]; /* LSB first */
- struct sockaddr addr; /* ff:ff:ff:ff:ff:ff for broadcast/multicast
- * (group) keys or unicast address for
- * individual keys
- */
- u16 alg;
- u16 key_len; /* WEP: 5 or 13, TKIP: 32, CCMP: 16 */
- u8 key_val[IW_ENCODING_TOKEN_MAX];
- u8 tx_mic_key[MIC_KEY_SIZE];
- u8 rx_mic_key[MIC_KEY_SIZE];
-};
-
-#define WPA_KEY_INDEX_MAX 4
-#define WPA_RX_SEQ_LEN 6
-
-struct mic_failure {
- u16 failure; /* MIC Failure counter 0 or 1 or 2 */
- u16 counter; /* 1sec counter 0-60 */
- u32 last_failure_time;
- int stop;
-};
-
-struct wpa_status {
- int wpa_enabled;
- bool rsn_enabled;
- int version;
- int pairwise_suite; /* unicast cipher */
- int group_suite; /* multicast cipher */
- int key_mgmt_suite;
- int auth_alg;
- int txkey;
- struct wpa_key key[WPA_KEY_INDEX_MAX];
- struct scan_ext scan_ext;
- struct mic_failure mic_failure;
-};
-
-#include <linux/list.h>
-#define PMK_LIST_MAX 8
-struct pmk_list {
- u16 size;
- struct list_head head;
- struct pmk {
- struct list_head list;
- u8 bssid[ETH_ALEN];
- u8 pmkid[IW_PMKID_LEN];
- } pmk[PMK_LIST_MAX];
-};
-
-struct wps_status {
- int wps_enabled;
- int ielen;
- u8 ie[255];
-};
-
-/* Tx Device struct */
-#define TX_DEVICE_BUFF_SIZE 1024
-
-struct ks_wlan_private;
-
-/**
- * struct tx_device_buffer - Queue item for the tx queue.
- * @sendp: Pointer to the send request data.
- * @size: Size of @sendp data.
- * @complete_handler: Function called once data write to device is complete.
- * @arg1: First argument to @complete_handler.
- * @arg2: Second argument to @complete_handler.
- */
-struct tx_device_buffer {
- unsigned char *sendp;
- unsigned int size;
- void (*complete_handler)(struct ks_wlan_private *priv,
- struct sk_buff *skb);
- struct sk_buff *skb;
-};
-
-/**
- * struct tx_device - Tx buffer queue.
- * @tx_device_buffer: Queue buffer.
- * @qhead: Head of tx queue.
- * @qtail: Tail of tx queue.
- * @tx_dev_lock: Queue lock.
- */
-struct tx_device {
- struct tx_device_buffer tx_dev_buff[TX_DEVICE_BUFF_SIZE];
- unsigned int qhead;
- unsigned int qtail;
- spinlock_t tx_dev_lock; /* protect access to the queue */
-};
-
-/* Rx Device struct */
-#define RX_DATA_SIZE (2 + 2 + 2347 + 1)
-#define RX_DEVICE_BUFF_SIZE 32
-
-/**
- * struct rx_device_buffer - Queue item for the rx queue.
- * @data: rx data.
- * @size: Size of @data.
- */
-struct rx_device_buffer {
- unsigned char data[RX_DATA_SIZE];
- unsigned int size;
-};
-
-/**
- * struct rx_device - Rx buffer queue.
- * @rx_device_buffer: Queue buffer.
- * @qhead: Head of rx queue.
- * @qtail: Tail of rx queue.
- * @rx_dev_lock: Queue lock.
- */
-struct rx_device {
- struct rx_device_buffer rx_dev_buff[RX_DEVICE_BUFF_SIZE];
- unsigned int qhead;
- unsigned int qtail;
- spinlock_t rx_dev_lock; /* protect access to the queue */
-};
-
-struct ks_wlan_private {
- /* hardware information */
- void *if_hw;
- struct workqueue_struct *wq;
- struct delayed_work rw_dwork;
- struct tasklet_struct rx_bh_task;
-
- struct net_device *net_dev;
- struct net_device_stats nstats;
- struct iw_statistics wstats;
-
- struct completion confirm_wait;
-
- /* trx device & sme */
- struct tx_device tx_dev;
- struct rx_device rx_dev;
- struct sme_info sme_i;
- u8 *rxp;
- unsigned int rx_size;
- struct work_struct sme_work;
- struct work_struct wakeup_work;
- int scan_ind_count;
-
- unsigned char eth_addr[ETH_ALEN];
-
- struct local_aplist aplist;
- struct local_ap current_ap;
- struct power_save_status psstatus;
- struct sleep_status sleepstatus;
- struct wpa_status wpa;
- struct pmk_list pmklist;
- /* wireless parameter */
- struct ks_wlan_parameter reg;
- u8 current_rate;
-
- char nick[IW_ESSID_MAX_SIZE + 1];
-
- spinlock_t multicast_spin;
-
- spinlock_t dev_read_lock;
- wait_queue_head_t devread_wait;
-
- unsigned int need_commit; /* for ioctl */
-
- /* DeviceIoControl */
- bool is_device_open;
- atomic_t event_count;
- atomic_t rec_count;
- int dev_count;
-#define DEVICE_STOCK_COUNT 20
- unsigned char *dev_data[DEVICE_STOCK_COUNT];
- int dev_size[DEVICE_STOCK_COUNT];
-
- /* ioctl : IOCTL_FIRMWARE_VERSION */
- unsigned char firmware_version[128 + 1];
- int version_size;
-
- bool mac_address_valid;
-
- int dev_state;
-
- struct sk_buff *skb;
- unsigned int cur_rx; /* Index into the Rx buffer of next Rx pkt. */
-#define FORCE_DISCONNECT 0x80000000
-#define CONNECT_STATUS_MASK 0x7FFFFFFF
- u32 connect_status;
- int infra_status;
- u8 scan_ssid_len;
- u8 scan_ssid[IW_ESSID_MAX_SIZE + 1];
- struct local_gain gain;
- struct wps_status wps;
- u8 sleep_mode;
-
- u8 region;
- struct local_eeprom_sum eeprom_sum;
- u8 eeprom_checksum;
-
- struct hostt hostt;
-
- unsigned long last_doze;
- unsigned long last_wakeup;
-
- unsigned int wakeup_count; /* for detect wakeup loop */
-};
-
-static inline void inc_txqhead(struct ks_wlan_private *priv)
-{
- priv->tx_dev.qhead = (priv->tx_dev.qhead + 1) % TX_DEVICE_BUFF_SIZE;
-}
-
-static inline void inc_txqtail(struct ks_wlan_private *priv)
-{
- priv->tx_dev.qtail = (priv->tx_dev.qtail + 1) % TX_DEVICE_BUFF_SIZE;
-}
-
-static inline bool txq_has_space(struct ks_wlan_private *priv)
-{
- return (CIRC_SPACE(priv->tx_dev.qhead, priv->tx_dev.qtail,
- TX_DEVICE_BUFF_SIZE) > 0);
-}
-
-static inline void inc_rxqhead(struct ks_wlan_private *priv)
-{
- priv->rx_dev.qhead = (priv->rx_dev.qhead + 1) % RX_DEVICE_BUFF_SIZE;
-}
-
-static inline void inc_rxqtail(struct ks_wlan_private *priv)
-{
- priv->rx_dev.qtail = (priv->rx_dev.qtail + 1) % RX_DEVICE_BUFF_SIZE;
-}
-
-static inline bool rxq_has_space(struct ks_wlan_private *priv)
-{
- return (CIRC_SPACE(priv->rx_dev.qhead, priv->rx_dev.qtail,
- RX_DEVICE_BUFF_SIZE) > 0);
-}
-
-static inline unsigned int txq_count(struct ks_wlan_private *priv)
-{
- return CIRC_CNT_TO_END(priv->tx_dev.qhead, priv->tx_dev.qtail,
- TX_DEVICE_BUFF_SIZE);
-}
-
-static inline unsigned int rxq_count(struct ks_wlan_private *priv)
-{
- return CIRC_CNT_TO_END(priv->rx_dev.qhead, priv->rx_dev.qtail,
- RX_DEVICE_BUFF_SIZE);
-}
-
-int ks_wlan_net_start(struct net_device *dev);
-int ks_wlan_net_stop(struct net_device *dev);
-bool is_connect_status(u32 status);
-bool is_disconnect_status(u32 status);
-
-#endif /* _KS_WLAN_H */
diff --git a/drivers/staging/ks7010/ks_wlan_ioctl.h b/drivers/staging/ks7010/ks_wlan_ioctl.h
deleted file mode 100644
index 97c7d95de411..000000000000
--- a/drivers/staging/ks7010/ks_wlan_ioctl.h
+++ /dev/null
@@ -1,61 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-/*
- * Driver for KeyStream 11b/g wireless LAN
- *
- * Copyright (c) 2005-2008 KeyStream Corp.
- * Copyright (C) 2009 Renesas Technology Corp.
- */
-
-#ifndef _KS_WLAN_IOCTL_H
-#define _KS_WLAN_IOCTL_H
-
-#include <linux/wireless.h>
-/* The low order bit identify a SET (0) or a GET (1) ioctl. */
-
-/* (SIOCIWFIRSTPRIV + 0) */
-/* former KS_WLAN_GET_DRIVER_VERSION (SIOCIWFIRSTPRIV + 1) */
-/* (SIOCIWFIRSTPRIV + 2) */
-#define KS_WLAN_GET_FIRM_VERSION (SIOCIWFIRSTPRIV + 3)
-#define KS_WLAN_SET_WPS_ENABLE (SIOCIWFIRSTPRIV + 4)
-#define KS_WLAN_GET_WPS_ENABLE (SIOCIWFIRSTPRIV + 5)
-#define KS_WLAN_SET_WPS_PROBE_REQ (SIOCIWFIRSTPRIV + 6)
-#define KS_WLAN_GET_EEPROM_CKSUM (SIOCIWFIRSTPRIV + 7)
-#define KS_WLAN_SET_PREAMBLE (SIOCIWFIRSTPRIV + 8)
-#define KS_WLAN_GET_PREAMBLE (SIOCIWFIRSTPRIV + 9)
-#define KS_WLAN_SET_POWER_SAVE (SIOCIWFIRSTPRIV + 10)
-#define KS_WLAN_GET_POWER_SAVE (SIOCIWFIRSTPRIV + 11)
-#define KS_WLAN_SET_SCAN_TYPE (SIOCIWFIRSTPRIV + 12)
-#define KS_WLAN_GET_SCAN_TYPE (SIOCIWFIRSTPRIV + 13)
-#define KS_WLAN_SET_RX_GAIN (SIOCIWFIRSTPRIV + 14)
-#define KS_WLAN_GET_RX_GAIN (SIOCIWFIRSTPRIV + 15)
-#define KS_WLAN_HOSTT (SIOCIWFIRSTPRIV + 16) /* unused */
-//#define KS_WLAN_SET_REGION (SIOCIWFIRSTPRIV + 17)
-#define KS_WLAN_SET_BEACON_LOST (SIOCIWFIRSTPRIV + 18)
-#define KS_WLAN_GET_BEACON_LOST (SIOCIWFIRSTPRIV + 19)
-
-#define KS_WLAN_SET_TX_GAIN (SIOCIWFIRSTPRIV + 20)
-#define KS_WLAN_GET_TX_GAIN (SIOCIWFIRSTPRIV + 21)
-
-/* for KS7010 */
-#define KS_WLAN_SET_PHY_TYPE (SIOCIWFIRSTPRIV + 22)
-#define KS_WLAN_GET_PHY_TYPE (SIOCIWFIRSTPRIV + 23)
-#define KS_WLAN_SET_CTS_MODE (SIOCIWFIRSTPRIV + 24)
-#define KS_WLAN_GET_CTS_MODE (SIOCIWFIRSTPRIV + 25)
-/* (SIOCIWFIRSTPRIV + 26) */
-/* (SIOCIWFIRSTPRIV + 27) */
-#define KS_WLAN_SET_SLEEP_MODE (SIOCIWFIRSTPRIV + 28) /* sleep mode */
-#define KS_WLAN_GET_SLEEP_MODE (SIOCIWFIRSTPRIV + 29) /* sleep mode */
-/* (SIOCIWFIRSTPRIV + 30) */
-/* (SIOCIWFIRSTPRIV + 31) */
-
-#ifdef __KERNEL__
-
-#include "ks_wlan.h"
-#include <linux/netdevice.h>
-
-int ks_wlan_setup_parameter(struct ks_wlan_private *priv,
- unsigned int commit_flag);
-
-#endif /* __KERNEL__ */
-
-#endif /* _KS_WLAN_IOCTL_H */
diff --git a/drivers/staging/ks7010/ks_wlan_net.c b/drivers/staging/ks7010/ks_wlan_net.c
deleted file mode 100644
index 0fb97a79ad0b..000000000000
--- a/drivers/staging/ks7010/ks_wlan_net.c
+++ /dev/null
@@ -1,2676 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/*
- * Driver for KeyStream 11b/g wireless LAN
- *
- * Copyright (C) 2005-2008 KeyStream Corp.
- * Copyright (C) 2009 Renesas Technology Corp.
- */
-
-#include <linux/atomic.h>
-#include <linux/completion.h>
-#include <linux/if_arp.h>
-#include <linux/netdevice.h>
-#include <linux/timer.h>
-#include <linux/uaccess.h>
-
-static int wep_on_off;
-#define WEP_OFF 0
-#define WEP_ON_64BIT 1
-#define WEP_ON_128BIT 2
-
-#include "ks_wlan.h"
-#include "ks_hostif.h"
-#include "ks_wlan_ioctl.h"
-
-/* Include Wireless Extension definition and check version */
-#include <linux/wireless.h>
-#define WIRELESS_SPY /* enable iwspy support */
-#include <net/iw_handler.h> /* New driver API */
-
-/* Frequency list (map channels to frequencies) */
-static const long frequency_list[] = {
- 2412, 2417, 2422, 2427, 2432, 2437, 2442,
- 2447, 2452, 2457, 2462, 2467, 2472, 2484
-};
-
-/* A few details needed for WEP (Wireless Equivalent Privacy) */
-#define MAX_KEY_SIZE 13 /* 128 (?) bits */
-#define MIN_KEY_SIZE 5 /* 40 bits RC4 - WEP */
-struct wep_key {
- u16 len;
- u8 key[16]; /* 40-bit and 104-bit keys */
-};
-
-/*
- * function prototypes
- */
-static int ks_wlan_open(struct net_device *dev);
-static void ks_wlan_tx_timeout(struct net_device *dev, unsigned int txqueue);
-static netdev_tx_t ks_wlan_start_xmit(struct sk_buff *skb, struct net_device *dev);
-static int ks_wlan_close(struct net_device *dev);
-static void ks_wlan_set_rx_mode(struct net_device *dev);
-static struct net_device_stats *ks_wlan_get_stats(struct net_device *dev);
-static int ks_wlan_set_mac_address(struct net_device *dev, void *addr);
-static int ks_wlan_netdev_ioctl(struct net_device *dev, struct ifreq *rq,
- int cmd);
-
-static atomic_t update_phyinfo;
-static struct timer_list update_phyinfo_timer;
-static
-int ks_wlan_update_phy_information(struct ks_wlan_private *priv)
-{
- struct iw_statistics *wstats = &priv->wstats;
-
- netdev_dbg(priv->net_dev, "in_interrupt = %ld\n", in_interrupt());
-
- if (priv->dev_state < DEVICE_STATE_READY)
- return -EBUSY; /* not finished initialize */
-
- if (atomic_read(&update_phyinfo))
- return -EPERM;
-
- /* The status */
- wstats->status = priv->reg.operation_mode; /* Operation mode */
-
- /* Signal quality and co. But where is the noise level ??? */
- hostif_sme_enqueue(priv, SME_PHY_INFO_REQUEST);
-
- /* interruptible_sleep_on_timeout(&priv->confirm_wait, HZ/2); */
- if (!wait_for_completion_interruptible_timeout
- (&priv->confirm_wait, HZ / 2)) {
- netdev_dbg(priv->net_dev, "wait time out!!\n");
- }
-
- atomic_inc(&update_phyinfo);
- update_phyinfo_timer.expires = jiffies + HZ; /* 1sec */
- add_timer(&update_phyinfo_timer);
-
- return 0;
-}
-
-static
-void ks_wlan_update_phyinfo_timeout(struct timer_list *unused)
-{
- pr_debug("in_interrupt = %ld\n", in_interrupt());
- atomic_set(&update_phyinfo, 0);
-}
-
-int ks_wlan_setup_parameter(struct ks_wlan_private *priv,
- unsigned int commit_flag)
-{
- hostif_sme_enqueue(priv, SME_STOP_REQUEST);
-
- if (commit_flag & SME_RTS)
- hostif_sme_enqueue(priv, SME_RTS_THRESHOLD_REQUEST);
- if (commit_flag & SME_FRAG)
- hostif_sme_enqueue(priv, SME_FRAGMENTATION_THRESHOLD_REQUEST);
-
- if (commit_flag & SME_WEP_INDEX)
- hostif_sme_enqueue(priv, SME_WEP_INDEX_REQUEST);
- if (commit_flag & SME_WEP_VAL1)
- hostif_sme_enqueue(priv, SME_WEP_KEY1_REQUEST);
- if (commit_flag & SME_WEP_VAL2)
- hostif_sme_enqueue(priv, SME_WEP_KEY2_REQUEST);
- if (commit_flag & SME_WEP_VAL3)
- hostif_sme_enqueue(priv, SME_WEP_KEY3_REQUEST);
- if (commit_flag & SME_WEP_VAL4)
- hostif_sme_enqueue(priv, SME_WEP_KEY4_REQUEST);
- if (commit_flag & SME_WEP_FLAG)
- hostif_sme_enqueue(priv, SME_WEP_FLAG_REQUEST);
-
- if (commit_flag & SME_RSN) {
- hostif_sme_enqueue(priv, SME_RSN_ENABLED_REQUEST);
- hostif_sme_enqueue(priv, SME_RSN_MODE_REQUEST);
- }
- if (commit_flag & SME_RSN_MULTICAST)
- hostif_sme_enqueue(priv, SME_RSN_MCAST_REQUEST);
- if (commit_flag & SME_RSN_UNICAST)
- hostif_sme_enqueue(priv, SME_RSN_UCAST_REQUEST);
- if (commit_flag & SME_RSN_AUTH)
- hostif_sme_enqueue(priv, SME_RSN_AUTH_REQUEST);
-
- hostif_sme_enqueue(priv, SME_MODE_SET_REQUEST);
-
- hostif_sme_enqueue(priv, SME_START_REQUEST);
-
- return 0;
-}
-
-/*
- * Initial Wireless Extension code for Ks_Wlannet driver by :
- * Jean Tourrilhes <jt@hpl.hp.com> - HPL - 17 November 00
- * Conversion to new driver API by :
- * Jean Tourrilhes <jt@hpl.hp.com> - HPL - 26 March 02
- * Javier also did a good amount of work here, adding some new extensions
- * and fixing my code. Let's just say that without him this code just
- * would not work at all... - Jean II
- */
-
-static int ks_wlan_get_name(struct net_device *dev,
- struct iw_request_info *info,
- union iwreq_data *cwrq,
- char *extra)
-{
- struct ks_wlan_private *priv = netdev_priv(dev);
-
- if (priv->sleep_mode == SLP_SLEEP)
- return -EPERM;
-
- /* for SLEEP MODE */
- if (priv->dev_state < DEVICE_STATE_READY)
- strscpy(cwrq->name, "NOT READY!", sizeof(cwrq->name));
- else if (priv->reg.phy_type == D_11B_ONLY_MODE)
- strscpy(cwrq->name, "IEEE 802.11b", sizeof(cwrq->name));
- else if (priv->reg.phy_type == D_11G_ONLY_MODE)
- strscpy(cwrq->name, "IEEE 802.11g", sizeof(cwrq->name));
- else
- strscpy(cwrq->name, "IEEE 802.11b/g", sizeof(cwrq->name));
-
- return 0;
-}
-
-static int ks_wlan_set_freq(struct net_device *dev,
- struct iw_request_info *info,
- union iwreq_data *fwrq, char *extra)
-{
- struct ks_wlan_private *priv = netdev_priv(dev);
- int channel;
-
- if (priv->sleep_mode == SLP_SLEEP)
- return -EPERM;
-
- /* for SLEEP MODE */
- /* If setting by frequency, convert to a channel */
- if ((fwrq->freq.e == 1) &&
- (fwrq->freq.m >= 241200000) && (fwrq->freq.m <= 248700000)) {
- int f = fwrq->freq.m / 100000;
- int c = 0;
-
- while ((c < 14) && (f != frequency_list[c]))
- c++;
- /* Hack to fall through... */
- fwrq->freq.e = 0;
- fwrq->freq.m = c + 1;
- }
- /* Setting by channel number */
- if ((fwrq->freq.m > 1000) || (fwrq->freq.e > 0))
- return -EOPNOTSUPP;
-
- channel = fwrq->freq.m;
- /* We should do a better check than that,
- * based on the card capability !!!
- */
- if ((channel < 1) || (channel > 14)) {
- netdev_dbg(dev, "%s: New channel value of %d is invalid!\n",
- dev->name, fwrq->freq.m);
- return -EINVAL;
- }
-
- /* Yes ! We can set it !!! */
- priv->reg.channel = (u8)(channel);
- priv->need_commit |= SME_MODE_SET;
-
- return -EINPROGRESS; /* Call commit handler */
-}
-
-static int ks_wlan_get_freq(struct net_device *dev,
- struct iw_request_info *info,
- union iwreq_data *fwrq, char *extra)
-{
- struct ks_wlan_private *priv = netdev_priv(dev);
- int f;
-
- if (priv->sleep_mode == SLP_SLEEP)
- return -EPERM;
-
- /* for SLEEP MODE */
- if (is_connect_status(priv->connect_status))
- f = (int)priv->current_ap.channel;
- else
- f = (int)priv->reg.channel;
-
- fwrq->freq.m = frequency_list[f - 1] * 100000;
- fwrq->freq.e = 1;
-
- return 0;
-}
-
-static int ks_wlan_set_essid(struct net_device *dev,
- struct iw_request_info *info,
- union iwreq_data *dwrq, char *extra)
-{
- struct ks_wlan_private *priv = netdev_priv(dev);
- size_t len;
-
- if (priv->sleep_mode == SLP_SLEEP)
- return -EPERM;
-
- /* for SLEEP MODE */
- /* Check if we asked for `any' */
- if (!dwrq->essid.flags) {
- /* Just send an empty SSID list */
- memset(priv->reg.ssid.body, 0, sizeof(priv->reg.ssid.body));
- priv->reg.ssid.size = 0;
- } else {
- len = dwrq->essid.length;
- /* iwconfig uses nul termination in SSID.. */
- if (len > 0 && extra[len - 1] == '\0')
- len--;
-
- /* Check the size of the string */
- if (len > IW_ESSID_MAX_SIZE)
- return -EINVAL;
-
- /* Set the SSID */
- memset(priv->reg.ssid.body, 0, sizeof(priv->reg.ssid.body));
- memcpy(priv->reg.ssid.body, extra, len);
- priv->reg.ssid.size = len;
- }
- /* Write it to the card */
- priv->need_commit |= SME_MODE_SET;
-
- ks_wlan_setup_parameter(priv, priv->need_commit);
- priv->need_commit = 0;
- return 0;
-}
-
-static int ks_wlan_get_essid(struct net_device *dev,
- struct iw_request_info *info,
- union iwreq_data *dwrq, char *extra)
-{
- struct ks_wlan_private *priv = netdev_priv(dev);
-
- if (priv->sleep_mode == SLP_SLEEP)
- return -EPERM;
-
- /* for SLEEP MODE */
- /* Note : if dwrq->flags != 0, we should
- * get the relevant SSID from the SSID list...
- */
- if (priv->reg.ssid.size != 0) {
- /* Get the current SSID */
- memcpy(extra, priv->reg.ssid.body, priv->reg.ssid.size);
-
- /* If none, we may want to get the one that was set */
-
- /* Push it out ! */
- dwrq->essid.length = priv->reg.ssid.size;
- dwrq->essid.flags = 1; /* active */
- } else {
- dwrq->essid.length = 0;
- dwrq->essid.flags = 0; /* ANY */
- }
-
- return 0;
-}
-
-static int ks_wlan_set_wap(struct net_device *dev, struct iw_request_info *info,
- union iwreq_data *awrq, char *extra)
-{
- struct ks_wlan_private *priv = netdev_priv(dev);
-
- if (priv->sleep_mode == SLP_SLEEP)
- return -EPERM;
-
- /* for SLEEP MODE */
- if (priv->reg.operation_mode != MODE_ADHOC &&
- priv->reg.operation_mode != MODE_INFRASTRUCTURE) {
- eth_zero_addr(priv->reg.bssid);
- return -EOPNOTSUPP;
- }
-
- ether_addr_copy(priv->reg.bssid, awrq->ap_addr.sa_data);
- if (is_valid_ether_addr((u8 *)priv->reg.bssid))
- priv->need_commit |= SME_MODE_SET;
-
- netdev_dbg(dev, "bssid = %pM\n", priv->reg.bssid);
-
- /* Write it to the card */
- if (priv->need_commit) {
- priv->need_commit |= SME_MODE_SET;
- return -EINPROGRESS; /* Call commit handler */
- }
- return 0;
-}
-
-static int ks_wlan_get_wap(struct net_device *dev, struct iw_request_info *info,
- union iwreq_data *awrq, char *extra)
-{
- struct ks_wlan_private *priv = netdev_priv(dev);
-
- if (priv->sleep_mode == SLP_SLEEP)
- return -EPERM;
-
- /* for SLEEP MODE */
- if (is_connect_status(priv->connect_status))
- ether_addr_copy(awrq->ap_addr.sa_data, priv->current_ap.bssid);
- else
- eth_zero_addr(awrq->ap_addr.sa_data);
-
- awrq->ap_addr.sa_family = ARPHRD_ETHER;
-
- return 0;
-}
-
-static int ks_wlan_set_nick(struct net_device *dev,
- struct iw_request_info *info,
- union iwreq_data *dwrq, char *extra)
-{
- struct ks_wlan_private *priv = netdev_priv(dev);
-
- if (priv->sleep_mode == SLP_SLEEP)
- return -EPERM;
-
- /* for SLEEP MODE */
- /* Check the size of the string */
- if (dwrq->data.length > 16 + 1)
- return -E2BIG;
-
- memset(priv->nick, 0, sizeof(priv->nick));
- memcpy(priv->nick, extra, dwrq->data.length);
-
- return -EINPROGRESS; /* Call commit handler */
-}
-
-static int ks_wlan_get_nick(struct net_device *dev,
- struct iw_request_info *info,
- union iwreq_data *dwrq, char *extra)
-{
- struct ks_wlan_private *priv = netdev_priv(dev);
-
- if (priv->sleep_mode == SLP_SLEEP)
- return -EPERM;
-
- /* for SLEEP MODE */
- strscpy(extra, priv->nick, 17);
- dwrq->data.length = strlen(extra) + 1;
-
- return 0;
-}
-
-static int ks_wlan_set_rate(struct net_device *dev,
- struct iw_request_info *info,
- union iwreq_data *vwrq, char *extra)
-{
- struct ks_wlan_private *priv = netdev_priv(dev);
- int i = 0;
-
- if (priv->sleep_mode == SLP_SLEEP)
- return -EPERM;
-
- /* for SLEEP MODE */
- if (priv->reg.phy_type == D_11B_ONLY_MODE) {
- if (vwrq->bitrate.fixed == 1) {
- switch (vwrq->bitrate.value) {
- case 11000000:
- case 5500000:
- priv->reg.rate_set.body[0] =
- (u8)(vwrq->bitrate.value / 500000);
- break;
- case 2000000:
- case 1000000:
- priv->reg.rate_set.body[0] =
- ((u8)(vwrq->bitrate.value / 500000)) |
- BASIC_RATE;
- break;
- default:
- return -EINVAL;
- }
- priv->reg.tx_rate = TX_RATE_FIXED;
- priv->reg.rate_set.size = 1;
- } else { /* vwrq->fixed == 0 */
- if (vwrq->bitrate.value > 0) {
- switch (vwrq->bitrate.value) {
- case 11000000:
- priv->reg.rate_set.body[3] =
- TX_RATE_11M;
- i++;
- fallthrough;
- case 5500000:
- priv->reg.rate_set.body[2] = TX_RATE_5M;
- i++;
- fallthrough;
- case 2000000:
- priv->reg.rate_set.body[1] =
- TX_RATE_2M | BASIC_RATE;
- i++;
- fallthrough;
- case 1000000:
- priv->reg.rate_set.body[0] =
- TX_RATE_1M | BASIC_RATE;
- i++;
- break;
- default:
- return -EINVAL;
- }
- priv->reg.tx_rate = TX_RATE_MANUAL_AUTO;
- priv->reg.rate_set.size = i;
- } else {
- priv->reg.rate_set.body[3] = TX_RATE_11M;
- priv->reg.rate_set.body[2] = TX_RATE_5M;
- priv->reg.rate_set.body[1] =
- TX_RATE_2M | BASIC_RATE;
- priv->reg.rate_set.body[0] =
- TX_RATE_1M | BASIC_RATE;
- priv->reg.tx_rate = TX_RATE_FULL_AUTO;
- priv->reg.rate_set.size = 4;
- }
- }
- } else { /* D_11B_ONLY_MODE or D_11BG_COMPATIBLE_MODE */
- if (vwrq->bitrate.fixed == 1) {
- switch (vwrq->bitrate.value) {
- case 54000000:
- case 48000000:
- case 36000000:
- case 18000000:
- case 9000000:
- priv->reg.rate_set.body[0] =
- (u8)(vwrq->bitrate.value / 500000);
- break;
- case 24000000:
- case 12000000:
- case 11000000:
- case 6000000:
- case 5500000:
- case 2000000:
- case 1000000:
- priv->reg.rate_set.body[0] =
- ((u8)(vwrq->bitrate.value / 500000)) |
- BASIC_RATE;
- break;
- default:
- return -EINVAL;
- }
- priv->reg.tx_rate = TX_RATE_FIXED;
- priv->reg.rate_set.size = 1;
- } else { /* vwrq->fixed == 0 */
- if (vwrq->bitrate.value > 0) {
- switch (vwrq->bitrate.value) {
- case 54000000:
- priv->reg.rate_set.body[11] =
- TX_RATE_54M;
- i++;
- fallthrough;
- case 48000000:
- priv->reg.rate_set.body[10] =
- TX_RATE_48M;
- i++;
- fallthrough;
- case 36000000:
- priv->reg.rate_set.body[9] =
- TX_RATE_36M;
- i++;
- fallthrough;
- case 24000000:
- case 18000000:
- case 12000000:
- case 11000000:
- case 9000000:
- case 6000000:
- if (vwrq->bitrate.value == 24000000) {
- priv->reg.rate_set.body[8] =
- TX_RATE_18M;
- i++;
- priv->reg.rate_set.body[7] =
- TX_RATE_9M;
- i++;
- priv->reg.rate_set.body[6] =
- TX_RATE_24M | BASIC_RATE;
- i++;
- priv->reg.rate_set.body[5] =
- TX_RATE_12M | BASIC_RATE;
- i++;
- priv->reg.rate_set.body[4] =
- TX_RATE_6M | BASIC_RATE;
- i++;
- priv->reg.rate_set.body[3] =
- TX_RATE_11M | BASIC_RATE;
- i++;
- } else if (vwrq->bitrate.value == 18000000) {
- priv->reg.rate_set.body[7] =
- TX_RATE_18M;
- i++;
- priv->reg.rate_set.body[6] =
- TX_RATE_9M;
- i++;
- priv->reg.rate_set.body[5] =
- TX_RATE_12M | BASIC_RATE;
- i++;
- priv->reg.rate_set.body[4] =
- TX_RATE_6M | BASIC_RATE;
- i++;
- priv->reg.rate_set.body[3] =
- TX_RATE_11M | BASIC_RATE;
- i++;
- } else if (vwrq->bitrate.value == 12000000) {
- priv->reg.rate_set.body[6] =
- TX_RATE_9M;
- i++;
- priv->reg.rate_set.body[5] =
- TX_RATE_12M | BASIC_RATE;
- i++;
- priv->reg.rate_set.body[4] =
- TX_RATE_6M | BASIC_RATE;
- i++;
- priv->reg.rate_set.body[3] =
- TX_RATE_11M | BASIC_RATE;
- i++;
- } else if (vwrq->bitrate.value == 11000000) {
- priv->reg.rate_set.body[5] =
- TX_RATE_9M;
- i++;
- priv->reg.rate_set.body[4] =
- TX_RATE_6M | BASIC_RATE;
- i++;
- priv->reg.rate_set.body[3] =
- TX_RATE_11M | BASIC_RATE;
- i++;
- } else if (vwrq->bitrate.value == 9000000) {
- priv->reg.rate_set.body[4] =
- TX_RATE_9M;
- i++;
- priv->reg.rate_set.body[3] =
- TX_RATE_6M | BASIC_RATE;
- i++;
- } else { /* vwrq->value == 6000000 */
- priv->reg.rate_set.body[3] =
- TX_RATE_6M | BASIC_RATE;
- i++;
- }
- fallthrough;
- case 5500000:
- priv->reg.rate_set.body[2] =
- TX_RATE_5M | BASIC_RATE;
- i++;
- fallthrough;
- case 2000000:
- priv->reg.rate_set.body[1] =
- TX_RATE_2M | BASIC_RATE;
- i++;
- fallthrough;
- case 1000000:
- priv->reg.rate_set.body[0] =
- TX_RATE_1M | BASIC_RATE;
- i++;
- break;
- default:
- return -EINVAL;
- }
- priv->reg.tx_rate = TX_RATE_MANUAL_AUTO;
- priv->reg.rate_set.size = i;
- } else {
- priv->reg.rate_set.body[11] = TX_RATE_54M;
- priv->reg.rate_set.body[10] = TX_RATE_48M;
- priv->reg.rate_set.body[9] = TX_RATE_36M;
- priv->reg.rate_set.body[8] = TX_RATE_18M;
- priv->reg.rate_set.body[7] = TX_RATE_9M;
- priv->reg.rate_set.body[6] =
- TX_RATE_24M | BASIC_RATE;
- priv->reg.rate_set.body[5] =
- TX_RATE_12M | BASIC_RATE;
- priv->reg.rate_set.body[4] =
- TX_RATE_6M | BASIC_RATE;
- priv->reg.rate_set.body[3] =
- TX_RATE_11M | BASIC_RATE;
- priv->reg.rate_set.body[2] =
- TX_RATE_5M | BASIC_RATE;
- priv->reg.rate_set.body[1] =
- TX_RATE_2M | BASIC_RATE;
- priv->reg.rate_set.body[0] =
- TX_RATE_1M | BASIC_RATE;
- priv->reg.tx_rate = TX_RATE_FULL_AUTO;
- priv->reg.rate_set.size = 12;
- }
- }
- }
-
- priv->need_commit |= SME_MODE_SET;
-
- return -EINPROGRESS; /* Call commit handler */
-}
-
-static int ks_wlan_get_rate(struct net_device *dev,
- struct iw_request_info *info,
- union iwreq_data *vwrq, char *extra)
-{
- struct ks_wlan_private *priv = netdev_priv(dev);
-
- netdev_dbg(dev, "in_interrupt = %ld update_phyinfo = %d\n",
- in_interrupt(), atomic_read(&update_phyinfo));
-
- if (priv->sleep_mode == SLP_SLEEP)
- return -EPERM;
-
- /* for SLEEP MODE */
- if (!atomic_read(&update_phyinfo))
- ks_wlan_update_phy_information(priv);
-
- vwrq->bitrate.value = ((priv->current_rate) & RATE_MASK) * 500000;
- vwrq->bitrate.fixed = (priv->reg.tx_rate == TX_RATE_FIXED) ? 1 : 0;
-
- return 0;
-}
-
-static int ks_wlan_set_rts(struct net_device *dev, struct iw_request_info *info,
- union iwreq_data *vwrq, char *extra)
-{
- struct ks_wlan_private *priv = netdev_priv(dev);
- int rthr = vwrq->rts.value;
-
- if (priv->sleep_mode == SLP_SLEEP)
- return -EPERM;
-
- /* for SLEEP MODE */
- if (vwrq->rts.disabled)
- rthr = 2347;
- if ((rthr < 0) || (rthr > 2347))
- return -EINVAL;
-
- priv->reg.rts = rthr;
- priv->need_commit |= SME_RTS;
-
- return -EINPROGRESS; /* Call commit handler */
-}
-
-static int ks_wlan_get_rts(struct net_device *dev, struct iw_request_info *info,
- union iwreq_data *vwrq, char *extra)
-{
- struct ks_wlan_private *priv = netdev_priv(dev);
-
- if (priv->sleep_mode == SLP_SLEEP)
- return -EPERM;
-
- /* for SLEEP MODE */
- vwrq->rts.value = priv->reg.rts;
- vwrq->rts.disabled = (vwrq->rts.value >= 2347);
- vwrq->rts.fixed = 1;
-
- return 0;
-}
-
-static int ks_wlan_set_frag(struct net_device *dev,
- struct iw_request_info *info,
- union iwreq_data *vwrq, char *extra)
-{
- struct ks_wlan_private *priv = netdev_priv(dev);
- int fthr = vwrq->frag.value;
-
- if (priv->sleep_mode == SLP_SLEEP)
- return -EPERM;
-
- /* for SLEEP MODE */
- if (vwrq->frag.disabled)
- fthr = 2346;
- if ((fthr < 256) || (fthr > 2346))
- return -EINVAL;
-
- fthr &= ~0x1; /* Get an even value - is it really needed ??? */
- priv->reg.fragment = fthr;
- priv->need_commit |= SME_FRAG;
-
- return -EINPROGRESS; /* Call commit handler */
-}
-
-static int ks_wlan_get_frag(struct net_device *dev,
- struct iw_request_info *info,
- union iwreq_data *vwrq, char *extra)
-{
- struct ks_wlan_private *priv = netdev_priv(dev);
-
- if (priv->sleep_mode == SLP_SLEEP)
- return -EPERM;
-
- /* for SLEEP MODE */
- vwrq->frag.value = priv->reg.fragment;
- vwrq->frag.disabled = (vwrq->frag.value >= 2346);
- vwrq->frag.fixed = 1;
-
- return 0;
-}
-
-static int ks_wlan_set_mode(struct net_device *dev,
- struct iw_request_info *info,
- union iwreq_data *uwrq, char *extra)
-{
- struct ks_wlan_private *priv = netdev_priv(dev);
-
- if (priv->sleep_mode == SLP_SLEEP)
- return -EPERM;
-
- if (uwrq->mode != IW_MODE_ADHOC &&
- uwrq->mode != IW_MODE_INFRA)
- return -EINVAL;
-
- priv->reg.operation_mode = (uwrq->mode == IW_MODE_ADHOC) ?
- MODE_ADHOC : MODE_INFRASTRUCTURE;
- priv->need_commit |= SME_MODE_SET;
-
- return -EINPROGRESS; /* Call commit handler */
-}
-
-static int ks_wlan_get_mode(struct net_device *dev,
- struct iw_request_info *info,
- union iwreq_data *uwrq, char *extra)
-{
- struct ks_wlan_private *priv = netdev_priv(dev);
-
- if (priv->sleep_mode == SLP_SLEEP)
- return -EPERM;
-
- /* If not managed, assume it's ad-hoc */
- uwrq->mode = (priv->reg.operation_mode == MODE_INFRASTRUCTURE) ?
- IW_MODE_INFRA : IW_MODE_ADHOC;
-
- return 0;
-}
-
-static int ks_wlan_set_encode(struct net_device *dev,
- struct iw_request_info *info,
- union iwreq_data *dwrq, char *extra)
-{
- struct ks_wlan_private *priv = netdev_priv(dev);
- struct iw_point *enc = &dwrq->encoding;
- struct wep_key key;
- int index = (enc->flags & IW_ENCODE_INDEX);
-
- if (priv->sleep_mode == SLP_SLEEP)
- return -EPERM;
-
- if (enc->length > MAX_KEY_SIZE)
- return -EINVAL;
-
- /* for SLEEP MODE */
- if ((index < 0) || (index > 4))
- return -EINVAL;
-
- index = (index == 0) ? priv->reg.wep_index : (index - 1);
-
- /* Is WEP supported ? */
- /* Basic checking: do we have a key to set ? */
- if (enc->length > 0) {
- key.len = (enc->length > MIN_KEY_SIZE) ?
- MAX_KEY_SIZE : MIN_KEY_SIZE;
- priv->reg.privacy_invoked = 0x01;
- priv->need_commit |= SME_WEP_FLAG;
- wep_on_off = (enc->length > MIN_KEY_SIZE) ?
- WEP_ON_128BIT : WEP_ON_64BIT;
- /* Check if the key is not marked as invalid */
- if (enc->flags & IW_ENCODE_NOKEY)
- return 0;
-
- /* Cleanup */
- memset(key.key, 0, MAX_KEY_SIZE);
- /* Copy the key in the driver */
- if (copy_from_user(key.key, enc->pointer, enc->length)) {
- key.len = 0;
- return -EFAULT;
- }
- /* Send the key to the card */
- priv->reg.wep_key[index].size = key.len;
- memcpy(&priv->reg.wep_key[index].val[0], &key.key[0],
- priv->reg.wep_key[index].size);
- priv->need_commit |= (SME_WEP_VAL1 << index);
- priv->reg.wep_index = index;
- priv->need_commit |= SME_WEP_INDEX;
- } else {
- if (enc->flags & IW_ENCODE_DISABLED) {
- priv->reg.wep_key[0].size = 0;
- priv->reg.wep_key[1].size = 0;
- priv->reg.wep_key[2].size = 0;
- priv->reg.wep_key[3].size = 0;
- priv->reg.privacy_invoked = 0x00;
- if (priv->reg.authenticate_type == AUTH_TYPE_SHARED_KEY)
- priv->need_commit |= SME_MODE_SET;
-
- priv->reg.authenticate_type = AUTH_TYPE_OPEN_SYSTEM;
- wep_on_off = WEP_OFF;
- priv->need_commit |= SME_WEP_FLAG;
- } else {
- /* set_wep_key(priv, index, 0, 0, 1); xxx */
- if (priv->reg.wep_key[index].size == 0)
- return -EINVAL;
- priv->reg.wep_index = index;
- priv->need_commit |= SME_WEP_INDEX;
- }
- }
-
- /* Commit the changes if needed */
- if (enc->flags & IW_ENCODE_MODE)
- priv->need_commit |= SME_WEP_FLAG;
-
- if (enc->flags & IW_ENCODE_OPEN) {
- if (priv->reg.authenticate_type == AUTH_TYPE_SHARED_KEY)
- priv->need_commit |= SME_MODE_SET;
-
- priv->reg.authenticate_type = AUTH_TYPE_OPEN_SYSTEM;
- } else if (enc->flags & IW_ENCODE_RESTRICTED) {
- if (priv->reg.authenticate_type == AUTH_TYPE_OPEN_SYSTEM)
- priv->need_commit |= SME_MODE_SET;
-
- priv->reg.authenticate_type = AUTH_TYPE_SHARED_KEY;
- }
- if (priv->need_commit) {
- ks_wlan_setup_parameter(priv, priv->need_commit);
- priv->need_commit = 0;
- }
- return 0;
-}
-
-static int ks_wlan_get_encode(struct net_device *dev,
- struct iw_request_info *info,
- union iwreq_data *dwrq, char *extra)
-{
- struct ks_wlan_private *priv = netdev_priv(dev);
- struct iw_point *enc = &dwrq->encoding;
- int index = (enc->flags & IW_ENCODE_INDEX) - 1;
-
- if (priv->sleep_mode == SLP_SLEEP)
- return -EPERM;
-
- /* for SLEEP MODE */
- enc->flags = IW_ENCODE_DISABLED;
-
- /* Check encryption mode */
- switch (priv->reg.authenticate_type) {
- case AUTH_TYPE_OPEN_SYSTEM:
- enc->flags = IW_ENCODE_OPEN;
- break;
- case AUTH_TYPE_SHARED_KEY:
- enc->flags = IW_ENCODE_RESTRICTED;
- break;
- }
-
- /* Which key do we want ? -1 -> tx index */
- if ((index < 0) || (index >= 4))
- index = priv->reg.wep_index;
- if (priv->reg.privacy_invoked) {
- enc->flags &= ~IW_ENCODE_DISABLED;
- /* dwrq->flags |= IW_ENCODE_NOKEY; */
- }
- enc->flags |= index + 1;
- /* Copy the key to the user buffer */
- if (index >= 0 && index < 4) {
- enc->length = (priv->reg.wep_key[index].size <= 16) ?
- priv->reg.wep_key[index].size : 0;
- memcpy(extra, priv->reg.wep_key[index].val, enc->length);
- }
-
- return 0;
-}
-
-static int ks_wlan_get_range(struct net_device *dev,
- struct iw_request_info *info,
- union iwreq_data *dwrq, char *extra)
-{
- struct ks_wlan_private *priv = netdev_priv(dev);
- struct iw_range *range = (struct iw_range *)extra;
- int i, k;
-
- if (priv->sleep_mode == SLP_SLEEP)
- return -EPERM;
-
- /* for SLEEP MODE */
- dwrq->data.length = sizeof(struct iw_range);
- memset(range, 0, sizeof(*range));
- range->min_nwid = 0x0000;
- range->max_nwid = 0x0000;
- range->num_channels = 14;
- /* Should be based on cap_rid.country to give only
- * what the current card support
- */
- k = 0;
- for (i = 0; i < 13; i++) { /* channel 1 -- 13 */
- range->freq[k].i = i + 1; /* List index */
- range->freq[k].m = frequency_list[i] * 100000;
- range->freq[k++].e = 1; /* Values in table in MHz -> * 10^5 * 10 */
- }
- range->num_frequency = k;
- if (priv->reg.phy_type == D_11B_ONLY_MODE ||
- priv->reg.phy_type == D_11BG_COMPATIBLE_MODE) { /* channel 14 */
- range->freq[13].i = 14; /* List index */
- range->freq[13].m = frequency_list[13] * 100000;
- range->freq[13].e = 1; /* Values in table in MHz -> * 10^5 * 10 */
- range->num_frequency = 14;
- }
-
- /* Hum... Should put the right values there */
- range->max_qual.qual = 100;
- range->max_qual.level = 256 - 128; /* 0 dBm? */
- range->max_qual.noise = 256 - 128;
- range->sensitivity = 1;
-
- if (priv->reg.phy_type == D_11B_ONLY_MODE) {
- range->bitrate[0] = 1e6;
- range->bitrate[1] = 2e6;
- range->bitrate[2] = 5.5e6;
- range->bitrate[3] = 11e6;
- range->num_bitrates = 4;
- } else { /* D_11G_ONLY_MODE or D_11BG_COMPATIBLE_MODE */
- range->bitrate[0] = 1e6;
- range->bitrate[1] = 2e6;
- range->bitrate[2] = 5.5e6;
- range->bitrate[3] = 11e6;
-
- range->bitrate[4] = 6e6;
- range->bitrate[5] = 9e6;
- range->bitrate[6] = 12e6;
- if (IW_MAX_BITRATES < 9) {
- range->bitrate[7] = 54e6;
- range->num_bitrates = 8;
- } else {
- range->bitrate[7] = 18e6;
- range->bitrate[8] = 24e6;
- range->bitrate[9] = 36e6;
- range->bitrate[10] = 48e6;
- range->bitrate[11] = 54e6;
-
- range->num_bitrates = 12;
- }
- }
-
- /* Set an indication of the max TCP throughput
- * in bit/s that we can expect using this interface.
- * May be use for QoS stuff... Jean II
- */
- if (i > 2)
- range->throughput = 5000 * 1000;
- else
- range->throughput = 1500 * 1000;
-
- range->min_rts = 0;
- range->max_rts = 2347;
- range->min_frag = 256;
- range->max_frag = 2346;
-
- range->encoding_size[0] = 5; /* WEP: RC4 40 bits */
- range->encoding_size[1] = 13; /* WEP: RC4 ~128 bits */
- range->num_encoding_sizes = 2;
- range->max_encoding_tokens = 4;
-
- /* power management not support */
- range->pmp_flags = IW_POWER_ON;
- range->pmt_flags = IW_POWER_ON;
- range->pm_capa = 0;
-
- /* Transmit Power - values are in dBm( or mW) */
- range->txpower[0] = -256;
- range->num_txpower = 1;
- range->txpower_capa = IW_TXPOW_DBM;
- /* range->txpower_capa = IW_TXPOW_MWATT; */
-
- range->we_version_source = 21;
- range->we_version_compiled = WIRELESS_EXT;
-
- range->retry_capa = IW_RETRY_ON;
- range->retry_flags = IW_RETRY_ON;
- range->r_time_flags = IW_RETRY_ON;
-
- /* Experimental measurements - boundary 11/5.5 Mb/s
- *
- * Note : with or without the (local->rssi), results
- * are somewhat different. - Jean II
- */
- range->avg_qual.qual = 50;
- range->avg_qual.level = 186; /* -70 dBm */
- range->avg_qual.noise = 0;
-
- /* Event capability (kernel + driver) */
- range->event_capa[0] = (IW_EVENT_CAPA_K_0 |
- IW_EVENT_CAPA_MASK(SIOCGIWAP) |
- IW_EVENT_CAPA_MASK(SIOCGIWSCAN));
- range->event_capa[1] = IW_EVENT_CAPA_K_1;
- range->event_capa[4] = (IW_EVENT_CAPA_MASK(IWEVCUSTOM) |
- IW_EVENT_CAPA_MASK(IWEVMICHAELMICFAILURE));
-
- /* encode extension (WPA) capability */
- range->enc_capa = (IW_ENC_CAPA_WPA |
- IW_ENC_CAPA_WPA2 |
- IW_ENC_CAPA_CIPHER_TKIP | IW_ENC_CAPA_CIPHER_CCMP);
- return 0;
-}
-
-static int ks_wlan_set_power(struct net_device *dev,
- struct iw_request_info *info,
- union iwreq_data *vwrq, char *extra)
-{
- struct ks_wlan_private *priv = netdev_priv(dev);
-
- if (priv->sleep_mode == SLP_SLEEP)
- return -EPERM;
-
- if (vwrq->power.disabled) {
- priv->reg.power_mgmt = POWER_MGMT_ACTIVE;
- } else {
- if (priv->reg.operation_mode != MODE_INFRASTRUCTURE)
- return -EINVAL;
- priv->reg.power_mgmt = POWER_MGMT_SAVE1;
- }
-
- hostif_sme_enqueue(priv, SME_POW_MNGMT_REQUEST);
-
- return 0;
-}
-
-static int ks_wlan_get_power(struct net_device *dev,
- struct iw_request_info *info,
- union iwreq_data *vwrq, char *extra)
-{
- struct ks_wlan_private *priv = netdev_priv(dev);
-
- if (priv->sleep_mode == SLP_SLEEP)
- return -EPERM;
- /* for SLEEP MODE */
- vwrq->power.disabled = (priv->reg.power_mgmt <= 0);
-
- return 0;
-}
-
-static int ks_wlan_get_iwstats(struct net_device *dev,
- struct iw_request_info *info,
- union iwreq_data *vwrq, char *extra)
-{
- struct ks_wlan_private *priv = netdev_priv(dev);
-
- if (priv->sleep_mode == SLP_SLEEP)
- return -EPERM;
- /* for SLEEP MODE */
- vwrq->qual.qual = 0; /* not supported */
- vwrq->qual.level = priv->wstats.qual.level;
- vwrq->qual.noise = 0; /* not supported */
- vwrq->qual.updated = 0;
-
- return 0;
-}
-
-/* Note : this is deprecated in favor of IWSCAN */
-static int ks_wlan_get_aplist(struct net_device *dev,
- struct iw_request_info *info,
- union iwreq_data *dwrq, char *extra)
-{
- struct ks_wlan_private *priv = netdev_priv(dev);
- struct sockaddr *address = (struct sockaddr *)extra;
- struct iw_quality qual[LOCAL_APLIST_MAX];
- int i;
-
- if (priv->sleep_mode == SLP_SLEEP)
- return -EPERM;
- /* for SLEEP MODE */
- for (i = 0; i < priv->aplist.size; i++) {
- ether_addr_copy(address[i].sa_data, priv->aplist.ap[i].bssid);
- address[i].sa_family = ARPHRD_ETHER;
- qual[i].level = 256 - priv->aplist.ap[i].rssi;
- qual[i].qual = priv->aplist.ap[i].sq;
- qual[i].noise = 0; /* invalid noise value */
- qual[i].updated = 7;
- }
- if (i) {
- dwrq->data.flags = 1; /* Should be define'd */
- memcpy(extra + sizeof(struct sockaddr) * i,
- &qual, sizeof(struct iw_quality) * i);
- }
- dwrq->data.length = i;
-
- return 0;
-}
-
-static int ks_wlan_set_scan(struct net_device *dev,
- struct iw_request_info *info,
- union iwreq_data *wrqu, char *extra)
-{
- struct ks_wlan_private *priv = netdev_priv(dev);
- struct iw_scan_req *req = NULL;
- int len;
-
- if (priv->sleep_mode == SLP_SLEEP)
- return -EPERM;
-
- /* for SLEEP MODE */
- /* specified SSID SCAN */
- if (wrqu->data.length == sizeof(struct iw_scan_req) &&
- wrqu->data.flags & IW_SCAN_THIS_ESSID) {
- req = (struct iw_scan_req *)extra;
- len = min_t(int, req->essid_len, IW_ESSID_MAX_SIZE);
- priv->scan_ssid_len = len;
- memcpy(priv->scan_ssid, req->essid, len);
- } else {
- priv->scan_ssid_len = 0;
- }
-
- priv->sme_i.sme_flag |= SME_AP_SCAN;
- hostif_sme_enqueue(priv, SME_BSS_SCAN_REQUEST);
-
- /* At this point, just return to the user. */
-
- return 0;
-}
-
-static char *ks_wlan_add_leader_event(const char *rsn_leader, char *end_buf,
- char *current_ev, struct rsn_ie *rsn,
- struct iw_event *iwe,
- struct iw_request_info *info)
-{
- char buffer[RSN_IE_BODY_MAX * 2 + 30];
- char *pbuf;
- int i;
-
- pbuf = &buffer[0];
- memset(iwe, 0, sizeof(*iwe));
- iwe->cmd = IWEVCUSTOM;
- memcpy(buffer, rsn_leader, sizeof(rsn_leader) - 1);
- iwe->u.data.length += sizeof(rsn_leader) - 1;
- pbuf += sizeof(rsn_leader) - 1;
- pbuf += sprintf(pbuf, "%02x", rsn->id);
- pbuf += sprintf(pbuf, "%02x", rsn->size);
- iwe->u.data.length += 4;
-
- for (i = 0; i < rsn->size; i++)
- pbuf += sprintf(pbuf, "%02x", rsn->body[i]);
-
- iwe->u.data.length += rsn->size * 2;
-
- return iwe_stream_add_point(info, current_ev, end_buf, iwe, &buffer[0]);
-}
-
-/*
- * Translate scan data returned from the card to a card independent
- * format that the Wireless Tools will understand - Jean II
- */
-static inline char *ks_wlan_translate_scan(struct net_device *dev,
- struct iw_request_info *info,
- char *current_ev, char *end_buf,
- struct local_ap *ap)
-{
- /* struct ks_wlan_private *priv = (struct ks_wlan_private *)dev->priv; */
- static const char rsn_leader[] = "rsn_ie=";
- static const char wpa_leader[] = "wpa_ie=";
- struct iw_event iwe; /* Temporary buffer */
- u16 capabilities;
- char *current_val; /* For rates */
- int i;
-
- /* First entry *MUST* be the AP MAC address */
- iwe.cmd = SIOCGIWAP;
- iwe.u.ap_addr.sa_family = ARPHRD_ETHER;
- ether_addr_copy(iwe.u.ap_addr.sa_data, ap->bssid);
- current_ev = iwe_stream_add_event(info, current_ev,
- end_buf, &iwe, IW_EV_ADDR_LEN);
-
- /* Other entries will be displayed in the order we give them */
-
- /* Add the ESSID */
- iwe.u.data.length = ap->ssid.size;
- if (iwe.u.data.length > 32)
- iwe.u.data.length = 32;
- iwe.cmd = SIOCGIWESSID;
- iwe.u.data.flags = 1;
- current_ev = iwe_stream_add_point(info, current_ev,
- end_buf, &iwe, ap->ssid.body);
-
- /* Add mode */
- iwe.cmd = SIOCGIWMODE;
- capabilities = ap->capability;
- if (capabilities & (WLAN_CAPABILITY_ESS | WLAN_CAPABILITY_IBSS)) {
- iwe.u.mode = (capabilities & WLAN_CAPABILITY_ESS) ?
- IW_MODE_INFRA : IW_MODE_ADHOC;
- current_ev = iwe_stream_add_event(info, current_ev,
- end_buf, &iwe, IW_EV_UINT_LEN);
- }
-
- /* Add frequency */
- iwe.cmd = SIOCGIWFREQ;
- iwe.u.freq.m = ap->channel;
- iwe.u.freq.m = frequency_list[iwe.u.freq.m - 1] * 100000;
- iwe.u.freq.e = 1;
- current_ev = iwe_stream_add_event(info, current_ev,
- end_buf, &iwe, IW_EV_FREQ_LEN);
-
- /* Add quality statistics */
- iwe.cmd = IWEVQUAL;
- iwe.u.qual.level = 256 - ap->rssi;
- iwe.u.qual.qual = ap->sq;
- iwe.u.qual.noise = 0; /* invalid noise value */
- current_ev = iwe_stream_add_event(info, current_ev, end_buf,
- &iwe, IW_EV_QUAL_LEN);
-
- /* Add encryption capability */
- iwe.cmd = SIOCGIWENCODE;
- iwe.u.data.flags = (capabilities & WLAN_CAPABILITY_PRIVACY) ?
- (IW_ENCODE_ENABLED | IW_ENCODE_NOKEY) :
- IW_ENCODE_DISABLED;
- iwe.u.data.length = 0;
- current_ev = iwe_stream_add_point(info, current_ev, end_buf,
- &iwe, ap->ssid.body);
-
- /*
- * Rate : stuffing multiple values in a single event
- * require a bit more of magic - Jean II
- */
- current_val = current_ev + IW_EV_LCP_LEN;
-
- iwe.cmd = SIOCGIWRATE;
-
- /* These two flags are ignored... */
- iwe.u.bitrate.fixed = 0;
- iwe.u.bitrate.disabled = 0;
-
- /* Max 16 values */
- for (i = 0; i < 16; i++) {
- /* NULL terminated */
- if (i >= ap->rate_set.size)
- break;
- /* Bit rate given in 500 kb/s units (+ 0x80) */
- iwe.u.bitrate.value = ((ap->rate_set.body[i] & 0x7f) * 500000);
- /* Add new value to event */
- current_val = iwe_stream_add_value(info, current_ev,
- current_val, end_buf, &iwe,
- IW_EV_PARAM_LEN);
- }
- /* Check if we added any event */
- if ((current_val - current_ev) > IW_EV_LCP_LEN)
- current_ev = current_val;
-
- if (ap->rsn_ie.id == RSN_INFO_ELEM_ID && ap->rsn_ie.size != 0)
- current_ev = ks_wlan_add_leader_event(rsn_leader, end_buf,
- current_ev, &ap->rsn_ie,
- &iwe, info);
-
- if (ap->wpa_ie.id == WPA_INFO_ELEM_ID && ap->wpa_ie.size != 0)
- current_ev = ks_wlan_add_leader_event(wpa_leader, end_buf,
- current_ev, &ap->wpa_ie,
- &iwe, info);
-
- /*
- * The other data in the scan result are not really
- * interesting, so for now drop it - Jean II
- */
- return current_ev;
-}
-
-static int ks_wlan_get_scan(struct net_device *dev,
- struct iw_request_info *info,
- union iwreq_data *dwrq, char *extra)
-{
- struct ks_wlan_private *priv = netdev_priv(dev);
- int i;
- char *current_ev = extra;
-
- if (priv->sleep_mode == SLP_SLEEP)
- return -EPERM;
- /* for SLEEP MODE */
- if (priv->sme_i.sme_flag & SME_AP_SCAN)
- return -EAGAIN;
-
- if (priv->aplist.size == 0) {
- /* Client error, no scan results...
- * The caller need to restart the scan.
- */
- return -ENODATA;
- }
-
- /* Read and parse all entries */
- for (i = 0; i < priv->aplist.size; i++) {
- if ((extra + dwrq->data.length) - current_ev <= IW_EV_ADDR_LEN) {
- dwrq->data.length = 0;
- return -E2BIG;
- }
- /* Translate to WE format this entry */
- current_ev = ks_wlan_translate_scan(dev, info, current_ev,
- extra + dwrq->data.length,
- &priv->aplist.ap[i]);
- }
- /* Length of data */
- dwrq->data.length = (current_ev - extra);
- dwrq->data.flags = 0;
-
- return 0;
-}
-
-/* called after a bunch of SET operations */
-static int ks_wlan_config_commit(struct net_device *dev,
- struct iw_request_info *info,
- union iwreq_data *zwrq,
- char *extra)
-{
- struct ks_wlan_private *priv = netdev_priv(dev);
-
- if (!priv->need_commit)
- return 0;
-
- ks_wlan_setup_parameter(priv, priv->need_commit);
- priv->need_commit = 0;
- return 0;
-}
-
-/* set association ie params */
-static int ks_wlan_set_genie(struct net_device *dev,
- struct iw_request_info *info,
- union iwreq_data *dwrq, char *extra)
-{
- struct ks_wlan_private *priv = netdev_priv(dev);
-
- if (priv->sleep_mode == SLP_SLEEP)
- return -EPERM;
- /* for SLEEP MODE */
- return 0;
-// return -EOPNOTSUPP;
-}
-
-static int ks_wlan_set_auth_mode(struct net_device *dev,
- struct iw_request_info *info,
- union iwreq_data *vwrq, char *extra)
-{
- struct ks_wlan_private *priv = netdev_priv(dev);
- struct iw_param *param = &vwrq->param;
- int index = (param->flags & IW_AUTH_INDEX);
- int value = param->value;
-
- if (priv->sleep_mode == SLP_SLEEP)
- return -EPERM;
- /* for SLEEP MODE */
- switch (index) {
- case IW_AUTH_WPA_VERSION: /* 0 */
- switch (value) {
- case IW_AUTH_WPA_VERSION_DISABLED:
- priv->wpa.version = value;
- if (priv->wpa.rsn_enabled)
- priv->wpa.rsn_enabled = false;
- priv->need_commit |= SME_RSN;
- break;
- case IW_AUTH_WPA_VERSION_WPA:
- case IW_AUTH_WPA_VERSION_WPA2:
- priv->wpa.version = value;
- if (!(priv->wpa.rsn_enabled))
- priv->wpa.rsn_enabled = true;
- priv->need_commit |= SME_RSN;
- break;
- default:
- return -EOPNOTSUPP;
- }
- break;
- case IW_AUTH_CIPHER_PAIRWISE: /* 1 */
- switch (value) {
- case IW_AUTH_CIPHER_NONE:
- if (priv->reg.privacy_invoked) {
- priv->reg.privacy_invoked = 0x00;
- priv->need_commit |= SME_WEP_FLAG;
- }
- break;
- case IW_AUTH_CIPHER_WEP40:
- case IW_AUTH_CIPHER_TKIP:
- case IW_AUTH_CIPHER_CCMP:
- case IW_AUTH_CIPHER_WEP104:
- if (!priv->reg.privacy_invoked) {
- priv->reg.privacy_invoked = 0x01;
- priv->need_commit |= SME_WEP_FLAG;
- }
- priv->wpa.pairwise_suite = value;
- priv->need_commit |= SME_RSN_UNICAST;
- break;
- default:
- return -EOPNOTSUPP;
- }
- break;
- case IW_AUTH_CIPHER_GROUP: /* 2 */
- switch (value) {
- case IW_AUTH_CIPHER_NONE:
- if (priv->reg.privacy_invoked) {
- priv->reg.privacy_invoked = 0x00;
- priv->need_commit |= SME_WEP_FLAG;
- }
- break;
- case IW_AUTH_CIPHER_WEP40:
- case IW_AUTH_CIPHER_TKIP:
- case IW_AUTH_CIPHER_CCMP:
- case IW_AUTH_CIPHER_WEP104:
- if (!priv->reg.privacy_invoked) {
- priv->reg.privacy_invoked = 0x01;
- priv->need_commit |= SME_WEP_FLAG;
- }
- priv->wpa.group_suite = value;
- priv->need_commit |= SME_RSN_MULTICAST;
- break;
- default:
- return -EOPNOTSUPP;
- }
- break;
- case IW_AUTH_KEY_MGMT: /* 3 */
- switch (value) {
- case IW_AUTH_KEY_MGMT_802_1X:
- case IW_AUTH_KEY_MGMT_PSK:
- case 0: /* NONE or 802_1X_NO_WPA */
- case 4: /* WPA_NONE */
- priv->wpa.key_mgmt_suite = value;
- priv->need_commit |= SME_RSN_AUTH;
- break;
- default:
- return -EOPNOTSUPP;
- }
- break;
- case IW_AUTH_80211_AUTH_ALG: /* 6 */
- switch (value) {
- case IW_AUTH_ALG_OPEN_SYSTEM:
- priv->wpa.auth_alg = value;
- priv->reg.authenticate_type = AUTH_TYPE_OPEN_SYSTEM;
- break;
- case IW_AUTH_ALG_SHARED_KEY:
- priv->wpa.auth_alg = value;
- priv->reg.authenticate_type = AUTH_TYPE_SHARED_KEY;
- break;
- case IW_AUTH_ALG_LEAP:
- default:
- return -EOPNOTSUPP;
- }
- priv->need_commit |= SME_MODE_SET;
- break;
- case IW_AUTH_WPA_ENABLED: /* 7 */
- priv->wpa.wpa_enabled = value;
- break;
- case IW_AUTH_PRIVACY_INVOKED: /* 10 */
- if ((value && !priv->reg.privacy_invoked) ||
- (!value && priv->reg.privacy_invoked)) {
- priv->reg.privacy_invoked = value ? 0x01 : 0x00;
- priv->need_commit |= SME_WEP_FLAG;
- }
- break;
- case IW_AUTH_RX_UNENCRYPTED_EAPOL: /* 4 */
- case IW_AUTH_TKIP_COUNTERMEASURES: /* 5 */
- case IW_AUTH_DROP_UNENCRYPTED: /* 8 */
- case IW_AUTH_ROAMING_CONTROL: /* 9 */
- default:
- break;
- }
-
- /* return -EINPROGRESS; */
- if (priv->need_commit) {
- ks_wlan_setup_parameter(priv, priv->need_commit);
- priv->need_commit = 0;
- }
- return 0;
-}
-
-static int ks_wlan_get_auth_mode(struct net_device *dev,
- struct iw_request_info *info,
- union iwreq_data *vwrq, char *extra)
-{
- struct ks_wlan_private *priv = netdev_priv(dev);
- struct iw_param *param = &vwrq->param;
- int index = (param->flags & IW_AUTH_INDEX);
-
- if (priv->sleep_mode == SLP_SLEEP)
- return -EPERM;
-
- /* for SLEEP MODE */
- /* WPA (not used ?? wpa_supplicant) */
- switch (index) {
- case IW_AUTH_WPA_VERSION:
- param->value = priv->wpa.version;
- break;
- case IW_AUTH_CIPHER_PAIRWISE:
- param->value = priv->wpa.pairwise_suite;
- break;
- case IW_AUTH_CIPHER_GROUP:
- param->value = priv->wpa.group_suite;
- break;
- case IW_AUTH_KEY_MGMT:
- param->value = priv->wpa.key_mgmt_suite;
- break;
- case IW_AUTH_80211_AUTH_ALG:
- param->value = priv->wpa.auth_alg;
- break;
- case IW_AUTH_WPA_ENABLED:
- param->value = priv->wpa.rsn_enabled;
- break;
- case IW_AUTH_RX_UNENCRYPTED_EAPOL: /* OK??? */
- case IW_AUTH_TKIP_COUNTERMEASURES:
- case IW_AUTH_DROP_UNENCRYPTED:
- default:
- /* return -EOPNOTSUPP; */
- break;
- }
- return 0;
-}
-
-/* set encoding token & mode (WPA)*/
-static int ks_wlan_set_encode_ext(struct net_device *dev,
- struct iw_request_info *info,
- union iwreq_data *dwrq, char *extra)
-{
- struct ks_wlan_private *priv = netdev_priv(dev);
- struct iw_encode_ext *enc;
- int index = dwrq->encoding.flags & IW_ENCODE_INDEX;
- unsigned int commit = 0;
- struct wpa_key *key;
-
- enc = (struct iw_encode_ext *)extra;
- if (!enc)
- return -EINVAL;
-
- if (priv->sleep_mode == SLP_SLEEP)
- return -EPERM;
-
- /* for SLEEP MODE */
- if (index < 1 || index > 4)
- return -EINVAL;
- index--;
- key = &priv->wpa.key[index];
-
- if (dwrq->encoding.flags & IW_ENCODE_DISABLED)
- key->key_len = 0;
-
- key->ext_flags = enc->ext_flags;
- if (enc->ext_flags & IW_ENCODE_EXT_SET_TX_KEY) {
- priv->wpa.txkey = index;
- commit |= SME_WEP_INDEX;
- } else if (enc->ext_flags & IW_ENCODE_EXT_RX_SEQ_VALID) {
- memcpy(&key->rx_seq[0], &enc->rx_seq[0], IW_ENCODE_SEQ_MAX_SIZE);
- }
-
- ether_addr_copy(&key->addr.sa_data[0], &enc->addr.sa_data[0]);
-
- switch (enc->alg) {
- case IW_ENCODE_ALG_NONE:
- if (priv->reg.privacy_invoked) {
- priv->reg.privacy_invoked = 0x00;
- commit |= SME_WEP_FLAG;
- }
- key->key_len = 0;
-
- break;
- case IW_ENCODE_ALG_WEP:
- case IW_ENCODE_ALG_CCMP:
- if (!priv->reg.privacy_invoked) {
- priv->reg.privacy_invoked = 0x01;
- commit |= SME_WEP_FLAG;
- }
- if (enc->key_len) {
- int key_len = clamp_val(enc->key_len, 0, IW_ENCODING_TOKEN_MAX);
-
- memcpy(&key->key_val[0], &enc->key[0], key_len);
- key->key_len = key_len;
- commit |= (SME_WEP_VAL1 << index);
- }
- break;
- case IW_ENCODE_ALG_TKIP:
- if (!priv->reg.privacy_invoked) {
- priv->reg.privacy_invoked = 0x01;
- commit |= SME_WEP_FLAG;
- }
- if (enc->key_len == 32) {
- memcpy(&key->key_val[0], &enc->key[0], enc->key_len - 16);
- key->key_len = enc->key_len - 16;
- if (priv->wpa.key_mgmt_suite == 4) { /* WPA_NONE */
- memcpy(&key->tx_mic_key[0], &enc->key[16], 8);
- memcpy(&key->rx_mic_key[0], &enc->key[16], 8);
- } else {
- memcpy(&key->tx_mic_key[0], &enc->key[16], 8);
- memcpy(&key->rx_mic_key[0], &enc->key[24], 8);
- }
- commit |= (SME_WEP_VAL1 << index);
- }
- break;
- default:
- return -EINVAL;
- }
- key->alg = enc->alg;
-
- if (commit) {
- if (commit & SME_WEP_INDEX)
- hostif_sme_enqueue(priv, SME_SET_TXKEY);
- if (commit & SME_WEP_VAL_MASK)
- hostif_sme_enqueue(priv, SME_SET_KEY1 + index);
- if (commit & SME_WEP_FLAG)
- hostif_sme_enqueue(priv, SME_WEP_FLAG_REQUEST);
- }
-
- return 0;
-}
-
-/* get encoding token & mode (WPA)*/
-static int ks_wlan_get_encode_ext(struct net_device *dev,
- struct iw_request_info *info,
- union iwreq_data *dwrq, char *extra)
-{
- struct ks_wlan_private *priv = netdev_priv(dev);
-
- if (priv->sleep_mode == SLP_SLEEP)
- return -EPERM;
-
- /* for SLEEP MODE */
- /* WPA (not used ?? wpa_supplicant)
- * struct ks_wlan_private *priv = (struct ks_wlan_private *)dev->priv;
- * struct iw_encode_ext *enc;
- * enc = (struct iw_encode_ext *)extra;
- * int index = dwrq->flags & IW_ENCODE_INDEX;
- * WPA (not used ?? wpa_supplicant)
- */
- return 0;
-}
-
-static int ks_wlan_set_pmksa(struct net_device *dev,
- struct iw_request_info *info,
- union iwreq_data *dwrq, char *extra)
-{
- struct ks_wlan_private *priv = netdev_priv(dev);
- struct iw_pmksa *pmksa;
- int i;
- struct pmk *pmk;
- struct list_head *ptr;
-
- if (priv->sleep_mode == SLP_SLEEP)
- return -EPERM;
-
- /* for SLEEP MODE */
- if (!extra)
- return -EINVAL;
-
- pmksa = (struct iw_pmksa *)extra;
-
- switch (pmksa->cmd) {
- case IW_PMKSA_ADD:
- if (list_empty(&priv->pmklist.head)) {
- for (i = 0; i < PMK_LIST_MAX; i++) {
- pmk = &priv->pmklist.pmk[i];
- if (is_zero_ether_addr(pmk->bssid))
- break;
- }
- ether_addr_copy(pmk->bssid, pmksa->bssid.sa_data);
- memcpy(pmk->pmkid, pmksa->pmkid, IW_PMKID_LEN);
- list_add(&pmk->list, &priv->pmklist.head);
- priv->pmklist.size++;
- break;
- }
- /* search cache data */
- list_for_each(ptr, &priv->pmklist.head) {
- pmk = list_entry(ptr, struct pmk, list);
- if (ether_addr_equal(pmksa->bssid.sa_data, pmk->bssid)) {
- memcpy(pmk->pmkid, pmksa->pmkid, IW_PMKID_LEN);
- list_move(&pmk->list, &priv->pmklist.head);
- break;
- }
- }
- /* not find address. */
- if (ptr != &priv->pmklist.head)
- break;
- /* new cache data */
- if (priv->pmklist.size < PMK_LIST_MAX) {
- for (i = 0; i < PMK_LIST_MAX; i++) {
- pmk = &priv->pmklist.pmk[i];
- if (is_zero_ether_addr(pmk->bssid))
- break;
- }
- ether_addr_copy(pmk->bssid, pmksa->bssid.sa_data);
- memcpy(pmk->pmkid, pmksa->pmkid, IW_PMKID_LEN);
- list_add(&pmk->list, &priv->pmklist.head);
- priv->pmklist.size++;
- } else { /* overwrite old cache data */
- pmk = list_entry(priv->pmklist.head.prev, struct pmk,
- list);
- ether_addr_copy(pmk->bssid, pmksa->bssid.sa_data);
- memcpy(pmk->pmkid, pmksa->pmkid, IW_PMKID_LEN);
- list_move(&pmk->list, &priv->pmklist.head);
- }
- break;
- case IW_PMKSA_REMOVE:
- if (list_empty(&priv->pmklist.head))
- return -EINVAL;
- /* search cache data */
- list_for_each(ptr, &priv->pmklist.head) {
- pmk = list_entry(ptr, struct pmk, list);
- if (ether_addr_equal(pmksa->bssid.sa_data, pmk->bssid)) {
- eth_zero_addr(pmk->bssid);
- memset(pmk->pmkid, 0, IW_PMKID_LEN);
- list_del_init(&pmk->list);
- break;
- }
- }
- /* not find address. */
- if (ptr == &priv->pmklist.head)
- return 0;
- break;
- case IW_PMKSA_FLUSH:
- memset(&priv->pmklist, 0, sizeof(priv->pmklist));
- INIT_LIST_HEAD(&priv->pmklist.head);
- for (i = 0; i < PMK_LIST_MAX; i++)
- INIT_LIST_HEAD(&priv->pmklist.pmk[i].list);
- break;
- default:
- return -EINVAL;
- }
-
- hostif_sme_enqueue(priv, SME_SET_PMKSA);
- return 0;
-}
-
-static struct iw_statistics *ks_get_wireless_stats(struct net_device *dev)
-{
- struct ks_wlan_private *priv = netdev_priv(dev);
- struct iw_statistics *wstats = &priv->wstats;
-
- if (!atomic_read(&update_phyinfo))
- return (priv->dev_state < DEVICE_STATE_READY) ? NULL : wstats;
-
- /*
- * Packets discarded in the wireless adapter due to wireless
- * specific problems
- */
- wstats->discard.nwid = 0; /* Rx invalid nwid */
- wstats->discard.code = 0; /* Rx invalid crypt */
- wstats->discard.fragment = 0; /* Rx invalid frag */
- wstats->discard.retries = 0; /* Tx excessive retries */
- wstats->discard.misc = 0; /* Invalid misc */
- wstats->miss.beacon = 0; /* Missed beacon */
-
- return wstats;
-}
-
-static int ks_wlan_set_stop_request(struct net_device *dev,
- struct iw_request_info *info,
- union iwreq_data *uwrq, char *extra)
-{
- struct ks_wlan_private *priv = netdev_priv(dev);
-
- if (priv->sleep_mode == SLP_SLEEP)
- return -EPERM;
-
- /* for SLEEP MODE */
- if (!(uwrq->mode))
- return -EINVAL;
-
- hostif_sme_enqueue(priv, SME_STOP_REQUEST);
- return 0;
-}
-
-#include <linux/ieee80211.h>
-static int ks_wlan_set_mlme(struct net_device *dev,
- struct iw_request_info *info,
- union iwreq_data *dwrq, char *extra)
-{
- struct ks_wlan_private *priv = netdev_priv(dev);
- struct iw_mlme *mlme = (struct iw_mlme *)extra;
- union iwreq_data uwrq;
-
- uwrq.mode = 1;
-
- if (priv->sleep_mode == SLP_SLEEP)
- return -EPERM;
-
- if (mlme->cmd != IW_MLME_DEAUTH &&
- mlme->cmd != IW_MLME_DISASSOC)
- return -EOPNOTSUPP;
-
- if (mlme->cmd == IW_MLME_DEAUTH &&
- mlme->reason_code == WLAN_REASON_MIC_FAILURE)
- return 0;
-
- return ks_wlan_set_stop_request(dev, NULL, &uwrq, NULL);
-}
-
-static int ks_wlan_get_firmware_version(struct net_device *dev,
- struct iw_request_info *info,
- union iwreq_data *uwrq, char *extra)
-{
- struct iw_point *dwrq = &uwrq->data;
- struct ks_wlan_private *priv = netdev_priv(dev);
-
- dwrq->length = priv->version_size + 1;
- strscpy(extra, priv->firmware_version, dwrq->length);
- return 0;
-}
-
-static int ks_wlan_set_preamble(struct net_device *dev,
- struct iw_request_info *info,
- union iwreq_data *uwrq, char *extra)
-{
- struct ks_wlan_private *priv = netdev_priv(dev);
-
- if (priv->sleep_mode == SLP_SLEEP)
- return -EPERM;
-
- /* for SLEEP MODE */
- if (uwrq->mode != LONG_PREAMBLE && uwrq->mode != SHORT_PREAMBLE)
- return -EINVAL;
-
- priv->reg.preamble = uwrq->mode;
- priv->need_commit |= SME_MODE_SET;
- return -EINPROGRESS; /* Call commit handler */
-}
-
-static int ks_wlan_get_preamble(struct net_device *dev,
- struct iw_request_info *info,
- union iwreq_data *uwrq, char *extra)
-{
- struct ks_wlan_private *priv = netdev_priv(dev);
-
- if (priv->sleep_mode == SLP_SLEEP)
- return -EPERM;
-
- /* for SLEEP MODE */
- uwrq->mode = priv->reg.preamble;
- return 0;
-}
-
-static int ks_wlan_set_power_mgmt(struct net_device *dev,
- struct iw_request_info *info,
- union iwreq_data *uwrq, char *extra)
-{
- struct ks_wlan_private *priv = netdev_priv(dev);
-
- if (priv->sleep_mode == SLP_SLEEP)
- return -EPERM;
-
- if (uwrq->mode != POWER_MGMT_ACTIVE &&
- uwrq->mode != POWER_MGMT_SAVE1 &&
- uwrq->mode != POWER_MGMT_SAVE2)
- return -EINVAL;
-
- if ((uwrq->mode == POWER_MGMT_SAVE1 || uwrq->mode == POWER_MGMT_SAVE2) &&
- (priv->reg.operation_mode != MODE_INFRASTRUCTURE))
- return -EINVAL;
-
- priv->reg.power_mgmt = uwrq->mode;
- hostif_sme_enqueue(priv, SME_POW_MNGMT_REQUEST);
-
- return 0;
-}
-
-static int ks_wlan_get_power_mgmt(struct net_device *dev,
- struct iw_request_info *info,
- union iwreq_data *uwrq, char *extra)
-{
- struct ks_wlan_private *priv = netdev_priv(dev);
-
- if (priv->sleep_mode == SLP_SLEEP)
- return -EPERM;
-
- /* for SLEEP MODE */
- uwrq->mode = priv->reg.power_mgmt;
- return 0;
-}
-
-static int ks_wlan_set_scan_type(struct net_device *dev,
- struct iw_request_info *info,
- union iwreq_data *uwrq, char *extra)
-{
- struct ks_wlan_private *priv = netdev_priv(dev);
-
- if (priv->sleep_mode == SLP_SLEEP)
- return -EPERM;
- /* for SLEEP MODE */
-
- if (uwrq->mode != ACTIVE_SCAN && uwrq->mode != PASSIVE_SCAN)
- return -EINVAL;
-
- priv->reg.scan_type = uwrq->mode;
- return 0;
-}
-
-static int ks_wlan_get_scan_type(struct net_device *dev,
- struct iw_request_info *info,
- union iwreq_data *uwrq, char *extra)
-{
- struct ks_wlan_private *priv = netdev_priv(dev);
-
- if (priv->sleep_mode == SLP_SLEEP)
- return -EPERM;
- /* for SLEEP MODE */
- uwrq->mode = priv->reg.scan_type;
- return 0;
-}
-
-static int ks_wlan_set_beacon_lost(struct net_device *dev,
- struct iw_request_info *info,
- union iwreq_data *uwrq, char *extra)
-{
- struct ks_wlan_private *priv = netdev_priv(dev);
-
- if (priv->sleep_mode == SLP_SLEEP)
- return -EPERM;
- /* for SLEEP MODE */
- if (uwrq->mode > BEACON_LOST_COUNT_MAX)
- return -EINVAL;
-
- priv->reg.beacon_lost_count = uwrq->mode;
-
- if (priv->reg.operation_mode == MODE_INFRASTRUCTURE) {
- priv->need_commit |= SME_MODE_SET;
- return -EINPROGRESS; /* Call commit handler */
- }
-
- return 0;
-}
-
-static int ks_wlan_get_beacon_lost(struct net_device *dev,
- struct iw_request_info *info,
- union iwreq_data *uwrq, char *extra)
-{
- struct ks_wlan_private *priv = netdev_priv(dev);
-
- if (priv->sleep_mode == SLP_SLEEP)
- return -EPERM;
- /* for SLEEP MODE */
- uwrq->mode = priv->reg.beacon_lost_count;
- return 0;
-}
-
-static int ks_wlan_set_phy_type(struct net_device *dev,
- struct iw_request_info *info,
- union iwreq_data *uwrq, char *extra)
-{
- struct ks_wlan_private *priv = netdev_priv(dev);
-
- if (priv->sleep_mode == SLP_SLEEP)
- return -EPERM;
-
- if (uwrq->mode != D_11B_ONLY_MODE &&
- uwrq->mode != D_11G_ONLY_MODE &&
- uwrq->mode != D_11BG_COMPATIBLE_MODE)
- return -EINVAL;
-
- /* for SLEEP MODE */
- priv->reg.phy_type = uwrq->mode;
- priv->need_commit |= SME_MODE_SET;
- return -EINPROGRESS; /* Call commit handler */
-}
-
-static int ks_wlan_get_phy_type(struct net_device *dev,
- struct iw_request_info *info,
- union iwreq_data *uwrq, char *extra)
-{
- struct ks_wlan_private *priv = netdev_priv(dev);
-
- if (priv->sleep_mode == SLP_SLEEP)
- return -EPERM;
- /* for SLEEP MODE */
- uwrq->mode = priv->reg.phy_type;
- return 0;
-}
-
-static int ks_wlan_set_cts_mode(struct net_device *dev,
- struct iw_request_info *info,
- union iwreq_data *uwrq, char *extra)
-{
- struct ks_wlan_private *priv = netdev_priv(dev);
-
- if (priv->sleep_mode == SLP_SLEEP)
- return -EPERM;
- /* for SLEEP MODE */
- if (uwrq->mode != CTS_MODE_FALSE && uwrq->mode != CTS_MODE_TRUE)
- return -EINVAL;
-
- priv->reg.cts_mode = (uwrq->mode == CTS_MODE_FALSE) ? uwrq->mode :
- (priv->reg.phy_type == D_11G_ONLY_MODE ||
- priv->reg.phy_type == D_11BG_COMPATIBLE_MODE) ?
- uwrq->mode : !uwrq->mode;
-
- priv->need_commit |= SME_MODE_SET;
- return -EINPROGRESS; /* Call commit handler */
-}
-
-static int ks_wlan_get_cts_mode(struct net_device *dev,
- struct iw_request_info *info,
- union iwreq_data *uwrq, char *extra)
-{
- struct ks_wlan_private *priv = netdev_priv(dev);
-
- if (priv->sleep_mode == SLP_SLEEP)
- return -EPERM;
- /* for SLEEP MODE */
- uwrq->mode = priv->reg.cts_mode;
- return 0;
-}
-
-static int ks_wlan_set_sleep_mode(struct net_device *dev,
- struct iw_request_info *info,
- union iwreq_data *uwrq, char *extra)
-{
- struct ks_wlan_private *priv = netdev_priv(dev);
-
- if (uwrq->mode != SLP_SLEEP &&
- uwrq->mode != SLP_ACTIVE) {
- netdev_err(dev, "SET_SLEEP_MODE %d error\n", uwrq->mode);
- return -EINVAL;
- }
-
- priv->sleep_mode = uwrq->mode;
- netdev_info(dev, "SET_SLEEP_MODE %d\n", priv->sleep_mode);
-
- if (uwrq->mode == SLP_SLEEP)
- hostif_sme_enqueue(priv, SME_STOP_REQUEST);
-
- hostif_sme_enqueue(priv, SME_SLEEP_REQUEST);
-
- return 0;
-}
-
-static int ks_wlan_get_sleep_mode(struct net_device *dev,
- struct iw_request_info *info,
- union iwreq_data *uwrq, char *extra)
-{
- struct ks_wlan_private *priv = netdev_priv(dev);
-
- uwrq->mode = priv->sleep_mode;
-
- return 0;
-}
-
-static int ks_wlan_set_wps_enable(struct net_device *dev,
- struct iw_request_info *info,
- union iwreq_data *uwrq, char *extra)
-{
- struct ks_wlan_private *priv = netdev_priv(dev);
-
- if (priv->sleep_mode == SLP_SLEEP)
- return -EPERM;
- /* for SLEEP MODE */
- if (uwrq->mode != 0 && uwrq->mode != 1)
- return -EINVAL;
-
- priv->wps.wps_enabled = uwrq->mode;
- hostif_sme_enqueue(priv, SME_WPS_ENABLE_REQUEST);
-
- return 0;
-}
-
-static int ks_wlan_get_wps_enable(struct net_device *dev,
- struct iw_request_info *info,
- union iwreq_data *uwrq, char *extra)
-{
- struct ks_wlan_private *priv = netdev_priv(dev);
-
- if (priv->sleep_mode == SLP_SLEEP)
- return -EPERM;
- /* for SLEEP MODE */
- uwrq->mode = priv->wps.wps_enabled;
- netdev_info(dev, "return=%d\n", uwrq->mode);
-
- return 0;
-}
-
-static int ks_wlan_set_wps_probe_req(struct net_device *dev,
- struct iw_request_info *info,
- union iwreq_data *uwrq, char *extra)
-{
- struct iw_point *dwrq = &uwrq->data;
- u8 *p = extra;
- unsigned char len;
- struct ks_wlan_private *priv = netdev_priv(dev);
-
- if (priv->sleep_mode == SLP_SLEEP)
- return -EPERM;
-
- /* length check */
- if (p[1] + 2 != dwrq->length || dwrq->length > 256)
- return -EINVAL;
-
- priv->wps.ielen = p[1] + 2 + 1; /* IE header + IE + sizeof(len) */
- len = p[1] + 2; /* IE header + IE */
-
- memcpy(priv->wps.ie, &len, sizeof(len));
- p = memcpy(priv->wps.ie + 1, p, len);
-
- netdev_dbg(dev, "%d(%#x): %02X %02X %02X %02X ... %02X %02X %02X\n",
- priv->wps.ielen, priv->wps.ielen, p[0], p[1], p[2], p[3],
- p[priv->wps.ielen - 3], p[priv->wps.ielen - 2],
- p[priv->wps.ielen - 1]);
-
- hostif_sme_enqueue(priv, SME_WPS_PROBE_REQUEST);
-
- return 0;
-}
-
-static int ks_wlan_set_tx_gain(struct net_device *dev,
- struct iw_request_info *info,
- union iwreq_data *uwrq, char *extra)
-{
- struct ks_wlan_private *priv = netdev_priv(dev);
-
- if (priv->sleep_mode == SLP_SLEEP)
- return -EPERM;
- /* for SLEEP MODE */
- if (uwrq->mode > 0xFF)
- return -EINVAL;
-
- priv->gain.tx_gain = (u8)uwrq->mode;
- priv->gain.tx_mode = (priv->gain.tx_gain < 0xFF) ? 1 : 0;
- hostif_sme_enqueue(priv, SME_SET_GAIN);
- return 0;
-}
-
-static int ks_wlan_get_tx_gain(struct net_device *dev,
- struct iw_request_info *info,
- union iwreq_data *uwrq, char *extra)
-{
- struct ks_wlan_private *priv = netdev_priv(dev);
-
- if (priv->sleep_mode == SLP_SLEEP)
- return -EPERM;
- /* for SLEEP MODE */
- uwrq->mode = priv->gain.tx_gain;
- hostif_sme_enqueue(priv, SME_GET_GAIN);
- return 0;
-}
-
-static int ks_wlan_set_rx_gain(struct net_device *dev,
- struct iw_request_info *info,
- union iwreq_data *uwrq, char *extra)
-{
- struct ks_wlan_private *priv = netdev_priv(dev);
-
- if (priv->sleep_mode == SLP_SLEEP)
- return -EPERM;
- /* for SLEEP MODE */
- if (uwrq->mode > 0xFF)
- return -EINVAL;
-
- priv->gain.rx_gain = (u8)uwrq->mode;
- priv->gain.rx_mode = (priv->gain.rx_gain < 0xFF) ? 1 : 0;
- hostif_sme_enqueue(priv, SME_SET_GAIN);
- return 0;
-}
-
-static int ks_wlan_get_rx_gain(struct net_device *dev,
- struct iw_request_info *info,
- union iwreq_data *uwrq, char *extra)
-{
- struct ks_wlan_private *priv = netdev_priv(dev);
-
- if (priv->sleep_mode == SLP_SLEEP)
- return -EPERM;
- /* for SLEEP MODE */
- uwrq->mode = priv->gain.rx_gain;
- hostif_sme_enqueue(priv, SME_GET_GAIN);
- return 0;
-}
-
-static int ks_wlan_get_eeprom_cksum(struct net_device *dev,
- struct iw_request_info *info,
- union iwreq_data *uwrq, char *extra)
-{
- struct ks_wlan_private *priv = netdev_priv(dev);
-
- uwrq->mode = priv->eeprom_checksum;
- return 0;
-}
-
-static void print_hif_event(struct net_device *dev, int event)
-{
- switch (event) {
- case HIF_DATA_REQ:
- netdev_info(dev, "HIF_DATA_REQ\n");
- break;
- case HIF_DATA_IND:
- netdev_info(dev, "HIF_DATA_IND\n");
- break;
- case HIF_MIB_GET_REQ:
- netdev_info(dev, "HIF_MIB_GET_REQ\n");
- break;
- case HIF_MIB_GET_CONF:
- netdev_info(dev, "HIF_MIB_GET_CONF\n");
- break;
- case HIF_MIB_SET_REQ:
- netdev_info(dev, "HIF_MIB_SET_REQ\n");
- break;
- case HIF_MIB_SET_CONF:
- netdev_info(dev, "HIF_MIB_SET_CONF\n");
- break;
- case HIF_POWER_MGMT_REQ:
- netdev_info(dev, "HIF_POWER_MGMT_REQ\n");
- break;
- case HIF_POWER_MGMT_CONF:
- netdev_info(dev, "HIF_POWER_MGMT_CONF\n");
- break;
- case HIF_START_REQ:
- netdev_info(dev, "HIF_START_REQ\n");
- break;
- case HIF_START_CONF:
- netdev_info(dev, "HIF_START_CONF\n");
- break;
- case HIF_CONNECT_IND:
- netdev_info(dev, "HIF_CONNECT_IND\n");
- break;
- case HIF_STOP_REQ:
- netdev_info(dev, "HIF_STOP_REQ\n");
- break;
- case HIF_STOP_CONF:
- netdev_info(dev, "HIF_STOP_CONF\n");
- break;
- case HIF_PS_ADH_SET_REQ:
- netdev_info(dev, "HIF_PS_ADH_SET_REQ\n");
- break;
- case HIF_PS_ADH_SET_CONF:
- netdev_info(dev, "HIF_PS_ADH_SET_CONF\n");
- break;
- case HIF_INFRA_SET_REQ:
- netdev_info(dev, "HIF_INFRA_SET_REQ\n");
- break;
- case HIF_INFRA_SET_CONF:
- netdev_info(dev, "HIF_INFRA_SET_CONF\n");
- break;
- case HIF_ADH_SET_REQ:
- netdev_info(dev, "HIF_ADH_SET_REQ\n");
- break;
- case HIF_ADH_SET_CONF:
- netdev_info(dev, "HIF_ADH_SET_CONF\n");
- break;
- case HIF_AP_SET_REQ:
- netdev_info(dev, "HIF_AP_SET_REQ\n");
- break;
- case HIF_AP_SET_CONF:
- netdev_info(dev, "HIF_AP_SET_CONF\n");
- break;
- case HIF_ASSOC_INFO_IND:
- netdev_info(dev, "HIF_ASSOC_INFO_IND\n");
- break;
- case HIF_MIC_FAILURE_REQ:
- netdev_info(dev, "HIF_MIC_FAILURE_REQ\n");
- break;
- case HIF_MIC_FAILURE_CONF:
- netdev_info(dev, "HIF_MIC_FAILURE_CONF\n");
- break;
- case HIF_SCAN_REQ:
- netdev_info(dev, "HIF_SCAN_REQ\n");
- break;
- case HIF_SCAN_CONF:
- netdev_info(dev, "HIF_SCAN_CONF\n");
- break;
- case HIF_PHY_INFO_REQ:
- netdev_info(dev, "HIF_PHY_INFO_REQ\n");
- break;
- case HIF_PHY_INFO_CONF:
- netdev_info(dev, "HIF_PHY_INFO_CONF\n");
- break;
- case HIF_SLEEP_REQ:
- netdev_info(dev, "HIF_SLEEP_REQ\n");
- break;
- case HIF_SLEEP_CONF:
- netdev_info(dev, "HIF_SLEEP_CONF\n");
- break;
- case HIF_PHY_INFO_IND:
- netdev_info(dev, "HIF_PHY_INFO_IND\n");
- break;
- case HIF_SCAN_IND:
- netdev_info(dev, "HIF_SCAN_IND\n");
- break;
- case HIF_INFRA_SET2_REQ:
- netdev_info(dev, "HIF_INFRA_SET2_REQ\n");
- break;
- case HIF_INFRA_SET2_CONF:
- netdev_info(dev, "HIF_INFRA_SET2_CONF\n");
- break;
- case HIF_ADH_SET2_REQ:
- netdev_info(dev, "HIF_ADH_SET2_REQ\n");
- break;
- case HIF_ADH_SET2_CONF:
- netdev_info(dev, "HIF_ADH_SET2_CONF\n");
- }
-}
-
-/* get host command history */
-static int ks_wlan_hostt(struct net_device *dev, struct iw_request_info *info,
- union iwreq_data *uwrq, char *extra)
-{
- int i, event;
- struct ks_wlan_private *priv = netdev_priv(dev);
-
- for (i = 63; i >= 0; i--) {
- event =
- priv->hostt.buff[(priv->hostt.qtail - 1 - i) %
- SME_EVENT_BUFF_SIZE];
- print_hif_event(dev, event);
- }
- return 0;
-}
-
-/* Structures to export the Wireless Handlers */
-
-static const struct iw_priv_args ks_wlan_private_args[] = {
-/*{ cmd, set_args, get_args, name[16] } */
- {KS_WLAN_GET_FIRM_VERSION, IW_PRIV_TYPE_NONE,
- IW_PRIV_TYPE_CHAR | (128 + 1), "GetFirmwareVer"},
- {KS_WLAN_SET_WPS_ENABLE, IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1,
- IW_PRIV_TYPE_NONE, "SetWPSEnable"},
- {KS_WLAN_GET_WPS_ENABLE, IW_PRIV_TYPE_NONE,
- IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, "GetW"},
- {KS_WLAN_SET_WPS_PROBE_REQ, IW_PRIV_TYPE_BYTE | 2047, IW_PRIV_TYPE_NONE,
- "SetWPSProbeReq"},
- {KS_WLAN_SET_PREAMBLE, IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1,
- IW_PRIV_TYPE_NONE, "SetPreamble"},
- {KS_WLAN_GET_PREAMBLE, IW_PRIV_TYPE_NONE,
- IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, "GetPreamble"},
- {KS_WLAN_SET_POWER_SAVE, IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1,
- IW_PRIV_TYPE_NONE, "SetPowerSave"},
- {KS_WLAN_GET_POWER_SAVE, IW_PRIV_TYPE_NONE,
- IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, "GetPowerSave"},
- {KS_WLAN_SET_SCAN_TYPE, IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1,
- IW_PRIV_TYPE_NONE, "SetScanType"},
- {KS_WLAN_GET_SCAN_TYPE, IW_PRIV_TYPE_NONE,
- IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, "GetScanType"},
- {KS_WLAN_SET_RX_GAIN, IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1,
- IW_PRIV_TYPE_NONE, "SetRxGain"},
- {KS_WLAN_GET_RX_GAIN, IW_PRIV_TYPE_NONE,
- IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, "GetRxGain"},
- {KS_WLAN_HOSTT, IW_PRIV_TYPE_NONE, IW_PRIV_TYPE_CHAR | (128 + 1),
- "hostt"},
- {KS_WLAN_SET_BEACON_LOST, IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1,
- IW_PRIV_TYPE_NONE, "SetBeaconLost"},
- {KS_WLAN_GET_BEACON_LOST, IW_PRIV_TYPE_NONE,
- IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, "GetBeaconLost"},
- {KS_WLAN_SET_SLEEP_MODE, IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1,
- IW_PRIV_TYPE_NONE, "SetSleepMode"},
- {KS_WLAN_GET_SLEEP_MODE, IW_PRIV_TYPE_NONE,
- IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, "GetSleepMode"},
- {KS_WLAN_SET_TX_GAIN, IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1,
- IW_PRIV_TYPE_NONE, "SetTxGain"},
- {KS_WLAN_GET_TX_GAIN, IW_PRIV_TYPE_NONE,
- IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, "GetTxGain"},
- {KS_WLAN_SET_PHY_TYPE, IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1,
- IW_PRIV_TYPE_NONE, "SetPhyType"},
- {KS_WLAN_GET_PHY_TYPE, IW_PRIV_TYPE_NONE,
- IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, "GetPhyType"},
- {KS_WLAN_SET_CTS_MODE, IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1,
- IW_PRIV_TYPE_NONE, "SetCtsMode"},
- {KS_WLAN_GET_CTS_MODE, IW_PRIV_TYPE_NONE,
- IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, "GetCtsMode"},
- {KS_WLAN_GET_EEPROM_CKSUM, IW_PRIV_TYPE_NONE,
- IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, "GetChecksum"},
-};
-
-static const iw_handler ks_wlan_handler[] = {
- IW_HANDLER(SIOCSIWCOMMIT, ks_wlan_config_commit),
- IW_HANDLER(SIOCGIWNAME, ks_wlan_get_name),
- IW_HANDLER(SIOCSIWFREQ, ks_wlan_set_freq),
- IW_HANDLER(SIOCGIWFREQ, ks_wlan_get_freq),
- IW_HANDLER(SIOCSIWMODE, ks_wlan_set_mode),
- IW_HANDLER(SIOCGIWMODE, ks_wlan_get_mode),
- IW_HANDLER(SIOCGIWRANGE, ks_wlan_get_range),
- IW_HANDLER(SIOCGIWSTATS, ks_wlan_get_iwstats),
- IW_HANDLER(SIOCSIWAP, ks_wlan_set_wap),
- IW_HANDLER(SIOCGIWAP, ks_wlan_get_wap),
- IW_HANDLER(SIOCSIWMLME, ks_wlan_set_mlme),
- IW_HANDLER(SIOCGIWAPLIST, ks_wlan_get_aplist),
- IW_HANDLER(SIOCSIWSCAN, ks_wlan_set_scan),
- IW_HANDLER(SIOCGIWSCAN, ks_wlan_get_scan),
- IW_HANDLER(SIOCSIWESSID, ks_wlan_set_essid),
- IW_HANDLER(SIOCGIWESSID, ks_wlan_get_essid),
- IW_HANDLER(SIOCSIWNICKN, ks_wlan_set_nick),
- IW_HANDLER(SIOCGIWNICKN, ks_wlan_get_nick),
- IW_HANDLER(SIOCSIWRATE, ks_wlan_set_rate),
- IW_HANDLER(SIOCGIWRATE, ks_wlan_get_rate),
- IW_HANDLER(SIOCSIWRTS, ks_wlan_set_rts),
- IW_HANDLER(SIOCGIWRTS, ks_wlan_get_rts),
- IW_HANDLER(SIOCSIWFRAG, ks_wlan_set_frag),
- IW_HANDLER(SIOCGIWFRAG, ks_wlan_get_frag),
- IW_HANDLER(SIOCSIWENCODE, ks_wlan_set_encode),
- IW_HANDLER(SIOCGIWENCODE, ks_wlan_get_encode),
- IW_HANDLER(SIOCSIWPOWER, ks_wlan_set_power),
- IW_HANDLER(SIOCGIWPOWER, ks_wlan_get_power),
- IW_HANDLER(SIOCSIWGENIE, ks_wlan_set_genie),
- IW_HANDLER(SIOCSIWAUTH, ks_wlan_set_auth_mode),
- IW_HANDLER(SIOCGIWAUTH, ks_wlan_get_auth_mode),
- IW_HANDLER(SIOCSIWENCODEEXT, ks_wlan_set_encode_ext),
- IW_HANDLER(SIOCGIWENCODEEXT, ks_wlan_get_encode_ext),
- IW_HANDLER(SIOCSIWPMKSA, ks_wlan_set_pmksa),
-};
-
-/* private_handler */
-static const iw_handler ks_wlan_private_handler[] = {
- NULL, /* 0 */
- NULL, /* 1, KS_WLAN_GET_DRIVER_VERSION */
- NULL, /* 2 */
- ks_wlan_get_firmware_version, /* 3 KS_WLAN_GET_FIRM_VERSION */
- ks_wlan_set_wps_enable, /* 4 KS_WLAN_SET_WPS_ENABLE */
- ks_wlan_get_wps_enable, /* 5 KS_WLAN_GET_WPS_ENABLE */
- ks_wlan_set_wps_probe_req, /* 6 KS_WLAN_SET_WPS_PROBE_REQ */
- ks_wlan_get_eeprom_cksum, /* 7 KS_WLAN_GET_CONNECT */
- ks_wlan_set_preamble, /* 8 KS_WLAN_SET_PREAMBLE */
- ks_wlan_get_preamble, /* 9 KS_WLAN_GET_PREAMBLE */
- ks_wlan_set_power_mgmt, /* 10 KS_WLAN_SET_POWER_SAVE */
- ks_wlan_get_power_mgmt, /* 11 KS_WLAN_GET_POWER_SAVE */
- ks_wlan_set_scan_type, /* 12 KS_WLAN_SET_SCAN_TYPE */
- ks_wlan_get_scan_type, /* 13 KS_WLAN_GET_SCAN_TYPE */
- ks_wlan_set_rx_gain, /* 14 KS_WLAN_SET_RX_GAIN */
- ks_wlan_get_rx_gain, /* 15 KS_WLAN_GET_RX_GAIN */
- ks_wlan_hostt, /* 16 KS_WLAN_HOSTT */
- NULL, /* 17 */
- ks_wlan_set_beacon_lost, /* 18 KS_WLAN_SET_BECAN_LOST */
- ks_wlan_get_beacon_lost, /* 19 KS_WLAN_GET_BECAN_LOST */
- ks_wlan_set_tx_gain, /* 20 KS_WLAN_SET_TX_GAIN */
- ks_wlan_get_tx_gain, /* 21 KS_WLAN_GET_TX_GAIN */
- ks_wlan_set_phy_type, /* 22 KS_WLAN_SET_PHY_TYPE */
- ks_wlan_get_phy_type, /* 23 KS_WLAN_GET_PHY_TYPE */
- ks_wlan_set_cts_mode, /* 24 KS_WLAN_SET_CTS_MODE */
- ks_wlan_get_cts_mode, /* 25 KS_WLAN_GET_CTS_MODE */
- NULL, /* 26 */
- NULL, /* 27 */
- ks_wlan_set_sleep_mode, /* 28 KS_WLAN_SET_SLEEP_MODE */
- ks_wlan_get_sleep_mode, /* 29 KS_WLAN_GET_SLEEP_MODE */
- NULL, /* 30 */
- NULL, /* 31 */
-};
-
-static const struct iw_handler_def ks_wlan_handler_def = {
- .num_standard = ARRAY_SIZE(ks_wlan_handler),
- .num_private = ARRAY_SIZE(ks_wlan_private_handler),
- .num_private_args = ARRAY_SIZE(ks_wlan_private_args),
- .standard = ks_wlan_handler,
- .private = ks_wlan_private_handler,
- .private_args = ks_wlan_private_args,
- .get_wireless_stats = ks_get_wireless_stats,
-};
-
-static int ks_wlan_netdev_ioctl(struct net_device *dev, struct ifreq *rq,
- int cmd)
-{
- int ret;
- struct iwreq *wrq = (struct iwreq *)rq;
-
- switch (cmd) {
- case SIOCIWFIRSTPRIV + 20: /* KS_WLAN_SET_STOP_REQ */
- ret = ks_wlan_set_stop_request(dev, NULL, &wrq->u, NULL);
- break;
- // All other calls are currently unsupported
- default:
- ret = -EOPNOTSUPP;
- }
-
- return ret;
-}
-
-static
-struct net_device_stats *ks_wlan_get_stats(struct net_device *dev)
-{
- struct ks_wlan_private *priv = netdev_priv(dev);
-
- if (priv->dev_state < DEVICE_STATE_READY)
- return NULL; /* not finished initialize */
-
- return &priv->nstats;
-}
-
-static
-int ks_wlan_set_mac_address(struct net_device *dev, void *addr)
-{
- struct ks_wlan_private *priv = netdev_priv(dev);
- struct sockaddr *mac_addr = (struct sockaddr *)addr;
-
- if (netif_running(dev))
- return -EBUSY;
- eth_hw_addr_set(dev, mac_addr->sa_data);
- ether_addr_copy(priv->eth_addr, mac_addr->sa_data);
-
- priv->mac_address_valid = false;
- hostif_sme_enqueue(priv, SME_MACADDRESS_SET_REQUEST);
- netdev_info(dev, "ks_wlan: MAC ADDRESS = %pM\n", priv->eth_addr);
- return 0;
-}
-
-static
-void ks_wlan_tx_timeout(struct net_device *dev, unsigned int txqueue)
-{
- struct ks_wlan_private *priv = netdev_priv(dev);
-
- netdev_dbg(dev, "head(%d) tail(%d)!!\n", priv->tx_dev.qhead,
- priv->tx_dev.qtail);
- if (!netif_queue_stopped(dev))
- netif_stop_queue(dev);
- priv->nstats.tx_errors++;
- netif_wake_queue(dev);
-}
-
-static
-netdev_tx_t ks_wlan_start_xmit(struct sk_buff *skb, struct net_device *dev)
-{
- struct ks_wlan_private *priv = netdev_priv(dev);
- int ret;
-
- netdev_dbg(dev, "in_interrupt()=%ld\n", in_interrupt());
-
- if (!skb) {
- netdev_err(dev, "ks_wlan: skb == NULL!!!\n");
- return 0;
- }
- if (priv->dev_state < DEVICE_STATE_READY) {
- dev_kfree_skb(skb);
- return 0; /* not finished initialize */
- }
-
- if (netif_running(dev))
- netif_stop_queue(dev);
-
- ret = hostif_data_request(priv, skb);
- netif_trans_update(dev);
-
- if (ret)
- netdev_err(dev, "hostif_data_request error: =%d\n", ret);
-
- return 0;
-}
-
-void send_packet_complete(struct ks_wlan_private *priv, struct sk_buff *skb)
-{
- priv->nstats.tx_packets++;
-
- if (netif_queue_stopped(priv->net_dev))
- netif_wake_queue(priv->net_dev);
-
- if (skb) {
- priv->nstats.tx_bytes += skb->len;
- dev_kfree_skb(skb);
- }
-}
-
-/*
- * Set or clear the multicast filter for this adaptor.
- * This routine is not state sensitive and need not be SMP locked.
- */
-static
-void ks_wlan_set_rx_mode(struct net_device *dev)
-{
- struct ks_wlan_private *priv = netdev_priv(dev);
-
- if (priv->dev_state < DEVICE_STATE_READY)
- return; /* not finished initialize */
- hostif_sme_enqueue(priv, SME_MULTICAST_REQUEST);
-}
-
-static
-int ks_wlan_open(struct net_device *dev)
-{
- struct ks_wlan_private *priv = netdev_priv(dev);
-
- priv->cur_rx = 0;
-
- if (!priv->mac_address_valid) {
- netdev_err(dev, "ks_wlan : %s Not READY !!\n", dev->name);
- return -EBUSY;
- }
- netif_start_queue(dev);
-
- return 0;
-}
-
-static
-int ks_wlan_close(struct net_device *dev)
-{
- netif_stop_queue(dev);
-
- return 0;
-}
-
-/* Operational parameters that usually are not changed. */
-/* Time in jiffies before concluding the transmitter is hung. */
-#define TX_TIMEOUT (3 * HZ)
-static const unsigned char dummy_addr[] = {
- 0x00, 0x0b, 0xe3, 0x00, 0x00, 0x00
-};
-
-static const struct net_device_ops ks_wlan_netdev_ops = {
- .ndo_start_xmit = ks_wlan_start_xmit,
- .ndo_open = ks_wlan_open,
- .ndo_stop = ks_wlan_close,
- .ndo_do_ioctl = ks_wlan_netdev_ioctl,
- .ndo_set_mac_address = ks_wlan_set_mac_address,
- .ndo_get_stats = ks_wlan_get_stats,
- .ndo_tx_timeout = ks_wlan_tx_timeout,
- .ndo_set_rx_mode = ks_wlan_set_rx_mode,
-};
-
-int ks_wlan_net_start(struct net_device *dev)
-{
- struct ks_wlan_private *priv;
- /* int rc; */
-
- priv = netdev_priv(dev);
- priv->mac_address_valid = false;
- priv->is_device_open = true;
- priv->need_commit = 0;
- /* phy information update timer */
- atomic_set(&update_phyinfo, 0);
- timer_setup(&update_phyinfo_timer, ks_wlan_update_phyinfo_timeout, 0);
-
- /* dummy address set */
- ether_addr_copy(priv->eth_addr, dummy_addr);
- eth_hw_addr_set(dev, priv->eth_addr);
-
- /* The ks_wlan-specific entries in the device structure. */
- dev->netdev_ops = &ks_wlan_netdev_ops;
- dev->wireless_handlers = &ks_wlan_handler_def;
- dev->watchdog_timeo = TX_TIMEOUT;
-
- netif_carrier_off(dev);
-
- return 0;
-}
-
-int ks_wlan_net_stop(struct net_device *dev)
-{
- struct ks_wlan_private *priv = netdev_priv(dev);
-
- priv->is_device_open = false;
- del_timer_sync(&update_phyinfo_timer);
-
- if (netif_running(dev))
- netif_stop_queue(dev);
-
- return 0;
-}
-
-/**
- * is_connect_status() - return true if status is 'connected'
- * @status: high bit is used as FORCE_DISCONNECT, low bits used for
- * connect status.
- */
-bool is_connect_status(u32 status)
-{
- return (status & CONNECT_STATUS_MASK) == CONNECT_STATUS;
-}
-
-/**
- * is_disconnect_status() - return true if status is 'disconnected'
- * @status: high bit is used as FORCE_DISCONNECT, low bits used for
- * disconnect status.
- */
-bool is_disconnect_status(u32 status)
-{
- return (status & CONNECT_STATUS_MASK) == DISCONNECT_STATUS;
-}
diff --git a/drivers/staging/media/atomisp/include/linux/atomisp.h b/drivers/staging/media/atomisp/include/linux/atomisp.h
index fefbe3cd08f3..4cfe9a0e0d56 100644
--- a/drivers/staging/media/atomisp/include/linux/atomisp.h
+++ b/drivers/staging/media/atomisp/include/linux/atomisp.h
@@ -200,7 +200,7 @@ struct atomisp_dis_vector {
};
/* DVS 2.0 Coefficient types. This structure contains 4 pointers to
- * arrays that contain the coeffients for each type.
+ * arrays that contain the coefficients for each type.
*/
struct atomisp_dvs2_coef_types {
short __user *odd_real; /** real part of the odd coefficients*/
@@ -698,7 +698,7 @@ enum atomisp_burst_capture_options {
/* Digital Image Stabilization:
* 1. get dis statistics: reads DIS statistics from ISP (every frame)
* 2. set dis coefficients: set DIS filter coefficients (one time)
- * 3. set dis motion vecotr: set motion vector (result of DIS, every frame)
+ * 3. set dis motion vector: set motion vector (result of DIS, every frame)
*/
#define ATOMISP_IOC_G_DIS_STAT \
_IOWR('v', BASE_VIDIOC_PRIVATE + 6, struct atomisp_dis_statistics)
diff --git a/drivers/staging/media/atomisp/include/linux/atomisp_platform.h b/drivers/staging/media/atomisp/include/linux/atomisp_platform.h
index fdeb247036b0..064449fd51af 100644
--- a/drivers/staging/media/atomisp/include/linux/atomisp_platform.h
+++ b/drivers/staging/media/atomisp/include/linux/atomisp_platform.h
@@ -116,7 +116,7 @@ struct intel_v4l2_subdev_table {
};
/*
- * Sensor of external ISP can send multiple steams with different mipi data
+ * Sensor of external ISP can send multiple streams with different mipi data
* type in the same virtual channel. This information needs to come from the
* sensor or external ISP
*/
@@ -138,7 +138,7 @@ struct atomisp_input_stream_info {
/*
* if more isys_configs is more than 0, sensor needs to configure the
* input format differently. width and height can be 0. If width and
- * height is not zero, then the corresponsing data needs to be set
+ * height is not zero, then the corresponding data needs to be set
*/
struct atomisp_isys_config_info isys_info[MAX_STREAMS_PER_CHANNEL];
};
@@ -175,8 +175,6 @@ int atomisp_register_sensor_no_gmin(struct v4l2_subdev *subdev, u32 lanes,
enum atomisp_bayer_order bayer_order);
void atomisp_unregister_subdev(struct v4l2_subdev *subdev);
-int v4l2_get_acpi_sensor_info(struct device *dev, char **module_id_str);
-
/* API from old platform_camera.h, new CPUID implementation */
#define __IS_SOC(x) (boot_cpu_data.x86_vfm == x)
#define __IS_SOCS(x, y) (boot_cpu_data.x86_vfm == x || boot_cpu_data.x86_vfm == y)
diff --git a/drivers/staging/media/atomisp/pci/atomisp_csi2_bridge.c b/drivers/staging/media/atomisp/pci/atomisp_csi2_bridge.c
index d789d38ef689..6abda358a72f 100644
--- a/drivers/staging/media/atomisp/pci/atomisp_csi2_bridge.c
+++ b/drivers/staging/media/atomisp/pci/atomisp_csi2_bridge.c
@@ -109,6 +109,8 @@ static struct gmin_cfg_var lenovo_ideapad_miix_310_vars[] = {
static struct gmin_cfg_var xiaomi_mipad2_vars[] = {
/* _DSM contains the wrong CsiPort for the front facing OV5693 sensor */
{ "INT33BE:00", "CsiPort", "0" },
+ /* _DSM contains the wrong CsiLanes for the back facing T4KA3 sensor */
+ { "XMCC0003:00", "CsiLanes", "4" },
{}
};
diff --git a/drivers/staging/media/atomisp/pci/atomisp_fops.c b/drivers/staging/media/atomisp/pci/atomisp_fops.c
index 50c4123ba006..b180fcbea9b1 100644
--- a/drivers/staging/media/atomisp/pci/atomisp_fops.c
+++ b/drivers/staging/media/atomisp/pci/atomisp_fops.c
@@ -441,6 +441,8 @@ const struct vb2_ops atomisp_vb2_ops = {
.buf_queue = atomisp_buf_queue,
.start_streaming = atomisp_start_streaming,
.stop_streaming = atomisp_stop_streaming,
+ .wait_prepare = vb2_ops_wait_prepare,
+ .wait_finish = vb2_ops_wait_finish,
};
static void atomisp_dev_init_struct(struct atomisp_device *isp)
diff --git a/drivers/staging/media/atomisp/pci/atomisp_subdev.c b/drivers/staging/media/atomisp/pci/atomisp_subdev.c
index 3a3e84a035e2..202497695e46 100644
--- a/drivers/staging/media/atomisp/pci/atomisp_subdev.c
+++ b/drivers/staging/media/atomisp/pci/atomisp_subdev.c
@@ -797,12 +797,12 @@ static int atomisp_init_subdev_pipe(struct atomisp_sub_device *asd,
pipe->vb_queue.ops = &atomisp_vb2_ops;
pipe->vb_queue.mem_ops = &vb2_vmalloc_memops;
pipe->vb_queue.timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_MONOTONIC;
+ pipe->vb_queue.lock = &pipe->vb_queue_mutex;
ret = vb2_queue_init(&pipe->vb_queue);
if (ret)
return ret;
pipe->vdev.queue = &pipe->vb_queue;
- pipe->vdev.queue->lock = &pipe->vb_queue_mutex;
INIT_LIST_HEAD(&pipe->buffers_in_css);
INIT_LIST_HEAD(&pipe->activeq);
diff --git a/drivers/staging/media/atomisp/pci/hive_isp_css_common/host/vmem_local.h b/drivers/staging/media/atomisp/pci/hive_isp_css_common/host/vmem_local.h
index d0ba59cedc92..6f0a8fe868bd 100644
--- a/drivers/staging/media/atomisp/pci/hive_isp_css_common/host/vmem_local.h
+++ b/drivers/staging/media/atomisp/pci/hive_isp_css_common/host/vmem_local.h
@@ -20,8 +20,10 @@
#include "vmem_global.h"
typedef u16 t_vmem_elem;
+typedef s16 t_svmem_elem;
-#define VMEM_ARRAY(x, s) t_vmem_elem x[s / ISP_NWAY][ISP_NWAY]
+#define VMEM_ARRAY(x, s) t_vmem_elem x[(s) / ISP_NWAY][ISP_NWAY]
+#define SVMEM_ARRAY(x, s) t_svmem_elem x[(s) / ISP_NWAY][ISP_NWAY]
void isp_vmem_load(
const isp_ID_t ID,
diff --git a/drivers/staging/media/atomisp/pci/hive_isp_css_include/assert_support.h b/drivers/staging/media/atomisp/pci/hive_isp_css_include/assert_support.h
index d294ac402de8..c5ab13511db8 100644
--- a/drivers/staging/media/atomisp/pci/hive_isp_css_include/assert_support.h
+++ b/drivers/staging/media/atomisp/pci/hive_isp_css_include/assert_support.h
@@ -27,7 +27,8 @@
* #define assert(cnd) BUG_ON(cnd)
* but that causes many compiler warnings (==errors) under Android
* because it seems that the BUG_ON() macro is not seen as a check by
- * gcc like the BUG() macro is. */
+ * gcc like the BUG() macro is.
+ */
#define assert(cnd) \
do { \
if (!(cnd)) \
@@ -37,7 +38,8 @@
#ifndef PIPE_GENERATION
/* Deprecated OP___assert, this is still used in ~1000 places
* in the code. This will be removed over time.
- * The implementation for the pipe generation tool is in see support.isp.h */
+ * The implementation for the pipe generation tool is in see support.isp.h
+ */
#define OP___assert(cnd) assert(cnd)
static inline void compile_time_assert(unsigned int cond)
diff --git a/drivers/staging/media/atomisp/pci/hive_isp_css_include/host/csi_rx_public.h b/drivers/staging/media/atomisp/pci/hive_isp_css_include/host/csi_rx_public.h
index 693154e8ec2f..7e37f0809034 100644
--- a/drivers/staging/media/atomisp/pci/hive_isp_css_include/host/csi_rx_public.h
+++ b/drivers/staging/media/atomisp/pci/hive_isp_css_include/host/csi_rx_public.h
@@ -94,7 +94,7 @@ hrt_data csi_rx_fe_ctrl_reg_load(
const hrt_address reg);
/**
* @brief Store a value to the register.
- * Store a value to the registe of the csi rx fe.
+ * Store a value to the register of the csi rx fe.
*
* @param[in] ID The global unique ID for the ibuf-controller instance.
* @param[in] reg The offset address of the register.
@@ -119,7 +119,7 @@ hrt_data csi_rx_be_ctrl_reg_load(
const hrt_address reg);
/**
* @brief Store a value to the register.
- * Store a value to the registe of the csi rx be.
+ * Store a value to the register of the csi rx be.
*
* @param[in] ID The global unique ID for the ibuf-controller instance.
* @param[in] reg The offset address of the register.
diff --git a/drivers/staging/media/atomisp/pci/hive_isp_css_include/math_support.h b/drivers/staging/media/atomisp/pci/hive_isp_css_include/math_support.h
index 160c496784b7..907f9ebcc60d 100644
--- a/drivers/staging/media/atomisp/pci/hive_isp_css_include/math_support.h
+++ b/drivers/staging/media/atomisp/pci/hive_isp_css_include/math_support.h
@@ -28,12 +28,6 @@
#define CEIL_SHIFT(a, b) (((a) + (1 << (b)) - 1) >> (b))
#define CEIL_SHIFT_MUL(a, b) (CEIL_SHIFT(a, b) << (b))
-#if !defined(PIPE_GENERATION)
-
-#define ceil_div(a, b) (CEIL_DIV(a, b))
-
-#endif /* !defined(PIPE_GENERATION) */
-
/*
* For SP and ISP, SDK provides the definition of OP_std_modadd.
* We need it only for host
diff --git a/drivers/staging/media/atomisp/pci/hmm/hmm.c b/drivers/staging/media/atomisp/pci/hmm/hmm.c
index 3e2899ad8517..e8c5d728fd55 100644
--- a/drivers/staging/media/atomisp/pci/hmm/hmm.c
+++ b/drivers/staging/media/atomisp/pci/hmm/hmm.c
@@ -204,9 +204,6 @@ static ia_css_ptr __hmm_alloc(size_t bytes, enum hmm_bo_type type,
goto bind_err;
}
- dev_dbg(atomisp_dev, "pages: 0x%08x (%zu bytes), type: %d, vmalloc %p\n",
- bo->start, bytes, type, vmalloc_noprof);
-
return bo->start;
bind_err:
@@ -231,8 +228,6 @@ void hmm_free(ia_css_ptr virt)
{
struct hmm_buffer_object *bo;
- dev_dbg(atomisp_dev, "%s: free 0x%08x\n", __func__, virt);
-
if (WARN_ON(virt == mmgr_EXCEPTION))
return;
diff --git a/drivers/staging/media/atomisp/pci/isp/kernels/bnr/bnr_1.0/ia_css_bnr.host.c b/drivers/staging/media/atomisp/pci/isp/kernels/bnr/bnr_1.0/ia_css_bnr.host.c
index 457a004e194d..b75cfd3096d8 100644
--- a/drivers/staging/media/atomisp/pci/isp/kernels/bnr/bnr_1.0/ia_css_bnr.host.c
+++ b/drivers/staging/media/atomisp/pci/isp/kernels/bnr/bnr_1.0/ia_css_bnr.host.c
@@ -45,7 +45,8 @@ ia_css_bnr_dump(
const struct sh_css_isp_bnr_params *bnr,
unsigned int level)
{
- if (!bnr) return;
+ if (!bnr)
+ return;
ia_css_debug_dtrace(level, "Bayer Noise Reduction:\n");
ia_css_debug_dtrace(level, "\t%-32s = %d\n",
"bnr_gain_all", bnr->gain_all);
diff --git a/drivers/staging/media/atomisp/pci/isp/kernels/de/de_1.0/ia_css_de.host.c b/drivers/staging/media/atomisp/pci/isp/kernels/de/de_1.0/ia_css_de.host.c
index 25e3f0822fb8..e66faeda3613 100644
--- a/drivers/staging/media/atomisp/pci/isp/kernels/de/de_1.0/ia_css_de.host.c
+++ b/drivers/staging/media/atomisp/pci/isp/kernels/de/de_1.0/ia_css_de.host.c
@@ -47,7 +47,8 @@ ia_css_de_dump(
const struct sh_css_isp_de_params *de,
unsigned int level)
{
- if (!de) return;
+ if (!de)
+ return;
ia_css_debug_dtrace(level, "Demosaic:\n");
ia_css_debug_dtrace(level, "\t%-32s = %d\n",
"de_pixelnoise", de->pixelnoise);
diff --git a/drivers/staging/media/atomisp/pci/isp/kernels/eed1_8/ia_css_eed1_8.host.c b/drivers/staging/media/atomisp/pci/isp/kernels/eed1_8/ia_css_eed1_8.host.c
index e4fc90f88e24..b79d78e5b77f 100644
--- a/drivers/staging/media/atomisp/pci/isp/kernels/eed1_8/ia_css_eed1_8.host.c
+++ b/drivers/staging/media/atomisp/pci/isp/kernels/eed1_8/ia_css_eed1_8.host.c
@@ -172,25 +172,21 @@ ia_css_eed1_8_vmem_encode(
base = shuffle_block * i;
for (j = 0; j < IA_CSS_NUMBER_OF_DEW_ENHANCE_SEGMENTS; j++) {
- to->e_dew_enh_x[0][base + j] = min_t(int, max_t(int,
- from->dew_enhance_seg_x[j], 0),
- 8191);
- to->e_dew_enh_y[0][base + j] = min_t(int, max_t(int,
- from->dew_enhance_seg_y[j], -8192),
- 8191);
+ to->e_dew_enh_x[0][base + j] = clamp(from->dew_enhance_seg_x[j],
+ 0, 8191);
+ to->e_dew_enh_y[0][base + j] = clamp(from->dew_enhance_seg_y[j],
+ -8192, 8191);
}
for (j = 0; j < (IA_CSS_NUMBER_OF_DEW_ENHANCE_SEGMENTS - 1); j++) {
- to->e_dew_enh_a[0][base + j] = min_t(int, max_t(int,
- from->dew_enhance_seg_slope[j],
- -8192), 8191);
+ to->e_dew_enh_a[0][base + j] = clamp(from->dew_enhance_seg_slope[j],
+ -8192, 8191);
/* Convert dew_enhance_seg_exp to flag:
* 0 -> 0
* 1...13 -> 1
*/
- to->e_dew_enh_f[0][base + j] = (min_t(int, max_t(int,
- from->dew_enhance_seg_exp[j],
- 0), 13) > 0);
+ to->e_dew_enh_f[0][base + j] = clamp(from->dew_enhance_seg_exp[j],
+ 0, 13) > 0;
}
/* Hard-coded to 0, in order to be able to handle out of
@@ -276,7 +272,7 @@ ia_css_eed1_8_encode(
for (i = 0; i < (IA_CSS_NUMBER_OF_DEW_ENHANCE_SEGMENTS - 1); i++) {
min_exp = max(min_exp, from->dew_enhance_seg_exp[i]);
}
- to->e_dew_enh_asr = 13 - min(max(min_exp, 0), 13);
+ to->e_dew_enh_asr = 13 - clamp(min_exp, 0, 13);
to->dedgew_max = from->dedgew_max;
}
diff --git a/drivers/staging/media/atomisp/pci/isp/kernels/eed1_8/ia_css_eed1_8_param.h b/drivers/staging/media/atomisp/pci/isp/kernels/eed1_8/ia_css_eed1_8_param.h
index 6fb3b38f49e7..b9eeeb592ec8 100644
--- a/drivers/staging/media/atomisp/pci/isp/kernels/eed1_8/ia_css_eed1_8_param.h
+++ b/drivers/staging/media/atomisp/pci/isp/kernels/eed1_8/ia_css_eed1_8_param.h
@@ -94,8 +94,8 @@
struct eed1_8_vmem_params {
VMEM_ARRAY(e_dew_enh_x, ISP_VEC_NELEMS);
- VMEM_ARRAY(e_dew_enh_y, ISP_VEC_NELEMS);
- VMEM_ARRAY(e_dew_enh_a, ISP_VEC_NELEMS);
+ SVMEM_ARRAY(e_dew_enh_y, ISP_VEC_NELEMS);
+ SVMEM_ARRAY(e_dew_enh_a, ISP_VEC_NELEMS);
VMEM_ARRAY(e_dew_enh_f, ISP_VEC_NELEMS);
VMEM_ARRAY(chgrinv_x, ISP_VEC_NELEMS);
VMEM_ARRAY(chgrinv_a, ISP_VEC_NELEMS);
diff --git a/drivers/staging/media/atomisp/pci/isp/kernels/fpn/fpn_1.0/ia_css_fpn.host.c b/drivers/staging/media/atomisp/pci/isp/kernels/fpn/fpn_1.0/ia_css_fpn.host.c
index 57b5e11e1cfe..8ccfa99c61ef 100644
--- a/drivers/staging/media/atomisp/pci/isp/kernels/fpn/fpn_1.0/ia_css_fpn.host.c
+++ b/drivers/staging/media/atomisp/pci/isp/kernels/fpn/fpn_1.0/ia_css_fpn.host.c
@@ -43,7 +43,8 @@ ia_css_fpn_dump(
const struct sh_css_isp_fpn_params *fpn,
unsigned int level)
{
- if (!fpn) return;
+ if (!fpn)
+ return;
ia_css_debug_dtrace(level, "Fixed Pattern Noise Reduction:\n");
ia_css_debug_dtrace(level, "\t%-32s = %d\n",
"fpn_shift", fpn->shift);
diff --git a/drivers/staging/media/atomisp/pci/isp/kernels/ipu2_io_ls/bayer_io_ls/ia_css_bayer_io.host.c b/drivers/staging/media/atomisp/pci/isp/kernels/ipu2_io_ls/bayer_io_ls/ia_css_bayer_io.host.c
index 0091e2a3da52..c32659894c29 100644
--- a/drivers/staging/media/atomisp/pci/isp/kernels/ipu2_io_ls/bayer_io_ls/ia_css_bayer_io.host.c
+++ b/drivers/staging/media/atomisp/pci/isp/kernels/ipu2_io_ls/bayer_io_ls/ia_css_bayer_io.host.c
@@ -13,9 +13,11 @@
* more details.
*/
+#include <linux/bitops.h>
+#include <linux/math.h>
+
#include "ia_css_bayer_io.host.h"
#include "dma.h"
-#include "math_support.h"
#ifndef IA_CSS_NO_DEBUG
#include "ia_css_debug.h"
#endif
@@ -29,9 +31,8 @@ int ia_css_bayer_io_config(const struct ia_css_binary *binary,
const struct ia_css_frame **out_frames = (const struct ia_css_frame **)
&args->out_frame;
const struct ia_css_frame_info *in_frame_info = ia_css_frame_get_info(in_frame);
- const unsigned int ddr_bits_per_element = sizeof(short) * 8;
- const unsigned int ddr_elems_per_word = ceil_div(HIVE_ISP_DDR_WORD_BITS,
- ddr_bits_per_element);
+ const unsigned int ddr_elems_per_word =
+ DIV_ROUND_UP(HIVE_ISP_DDR_WORD_BITS, BITS_PER_TYPE(short));
unsigned int size_get = 0, size_put = 0;
unsigned int offset = 0;
int ret;
diff --git a/drivers/staging/media/atomisp/pci/isp/kernels/ipu2_io_ls/yuv444_io_ls/ia_css_yuv444_io.host.c b/drivers/staging/media/atomisp/pci/isp/kernels/ipu2_io_ls/yuv444_io_ls/ia_css_yuv444_io.host.c
index 32c504a950ce..5b2d5023b5ee 100644
--- a/drivers/staging/media/atomisp/pci/isp/kernels/ipu2_io_ls/yuv444_io_ls/ia_css_yuv444_io.host.c
+++ b/drivers/staging/media/atomisp/pci/isp/kernels/ipu2_io_ls/yuv444_io_ls/ia_css_yuv444_io.host.c
@@ -13,9 +13,11 @@ FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
more details.
*/
+#include <linux/bitops.h>
+#include <linux/math.h>
+
#include "ia_css_yuv444_io.host.h"
#include "dma.h"
-#include "math_support.h"
#ifndef IA_CSS_NO_DEBUG
#include "ia_css_debug.h"
#endif
@@ -29,9 +31,8 @@ int ia_css_yuv444_io_config(const struct ia_css_binary *binary,
const struct ia_css_frame **out_frames = (const struct ia_css_frame **)
&args->out_frame;
const struct ia_css_frame_info *in_frame_info = ia_css_frame_get_info(in_frame);
- const unsigned int ddr_bits_per_element = sizeof(short) * 8;
- const unsigned int ddr_elems_per_word = ceil_div(HIVE_ISP_DDR_WORD_BITS,
- ddr_bits_per_element);
+ const unsigned int ddr_elems_per_word =
+ DIV_ROUND_UP(HIVE_ISP_DDR_WORD_BITS, BITS_PER_TYPE(short));
unsigned int size_get = 0, size_put = 0;
unsigned int offset = 0;
int ret;
diff --git a/drivers/staging/media/atomisp/pci/isp/kernels/iterator/iterator_1.0/ia_css_iterator.host.c b/drivers/staging/media/atomisp/pci/isp/kernels/iterator/iterator_1.0/ia_css_iterator.host.c
index 5f186fb03642..15386a773dc5 100644
--- a/drivers/staging/media/atomisp/pci/isp/kernels/iterator/iterator_1.0/ia_css_iterator.host.c
+++ b/drivers/staging/media/atomisp/pci/isp/kernels/iterator/iterator_1.0/ia_css_iterator.host.c
@@ -65,8 +65,7 @@ int ia_css_iterator_configure(const struct ia_css_binary *binary,
* the original out res. for video pipe, it has two output pins --- out and
* vf_out, so it can keep these two resolutions already. */
if (binary->info->sp.pipeline.mode == IA_CSS_BINARY_MODE_PREVIEW &&
- binary->vf_downscale_log2 > 0)
- {
+ binary->vf_downscale_log2 > 0) {
/* TODO: Remove this after preview output decimation is fixed
* by configuring out&vf info files properly */
my_info.padded_width <<= binary->vf_downscale_log2;
diff --git a/drivers/staging/media/atomisp/pci/isp/kernels/xnr/xnr_3.0/ia_css_xnr3.host.c b/drivers/staging/media/atomisp/pci/isp/kernels/xnr/xnr_3.0/ia_css_xnr3.host.c
index 70132d955e9b..def2c8fb4b38 100644
--- a/drivers/staging/media/atomisp/pci/isp/kernels/xnr/xnr_3.0/ia_css_xnr3.host.c
+++ b/drivers/staging/media/atomisp/pci/isp/kernels/xnr/xnr_3.0/ia_css_xnr3.host.c
@@ -108,7 +108,7 @@ compute_coring(int coring)
* factor. Clip to [0, isp_scale-1).
*/
isp_coring = ((coring * isp_scale) + offset) / host_scale;
- return min(max(isp_coring, 0), isp_scale - 1);
+ return clamp(isp_coring, 0, isp_scale - 1);
}
/*
@@ -168,15 +168,15 @@ ia_css_xnr3_encode(
to->alpha.y0 = alpha_y0;
to->alpha.u0 = alpha_u0;
to->alpha.v0 = alpha_v0;
- to->alpha.ydiff = min(max(alpha_ydiff, min_diff), max_diff);
- to->alpha.udiff = min(max(alpha_udiff, min_diff), max_diff);
- to->alpha.vdiff = min(max(alpha_vdiff, min_diff), max_diff);
+ to->alpha.ydiff = clamp(alpha_ydiff, min_diff, max_diff);
+ to->alpha.udiff = clamp(alpha_udiff, min_diff, max_diff);
+ to->alpha.vdiff = clamp(alpha_vdiff, min_diff, max_diff);
/* coring parameters are expressed in q1.NN format */
to->coring.u0 = coring_u0;
to->coring.v0 = coring_v0;
- to->coring.udiff = min(max(coring_udiff, min_diff), max_diff);
- to->coring.vdiff = min(max(coring_vdiff, min_diff), max_diff);
+ to->coring.udiff = clamp(coring_udiff, min_diff, max_diff);
+ to->coring.vdiff = clamp(coring_vdiff, min_diff, max_diff);
/* blending strength is expressed in q1.NN format */
to->blending.strength = blending;
diff --git a/drivers/staging/media/atomisp/pci/runtime/binary/src/binary.c b/drivers/staging/media/atomisp/pci/runtime/binary/src/binary.c
index b0f904a5e442..7ce2b2d6da11 100644
--- a/drivers/staging/media/atomisp/pci/runtime/binary/src/binary.c
+++ b/drivers/staging/media/atomisp/pci/runtime/binary/src/binary.c
@@ -328,7 +328,8 @@ ia_css_binary_dvs_grid_info(const struct ia_css_binary *binary,
dvs_info = &info->dvs_grid.dvs_grid_info;
- /* for DIS, we use a division instead of a ceil_div. If this is smaller
+ /*
+ * For DIS, we use a division instead of a DIV_ROUND_UP(). If this is smaller
* than the 3a grid size, it indicates that the outer values are not
* valid for DIS.
*/
@@ -923,8 +924,8 @@ ia_css_binary_fill_info(const struct ia_css_binary_xinfo *xinfo,
return 0;
}
-static int __ia_css_binary_find(struct ia_css_binary_descr *descr,
- struct ia_css_binary *binary) {
+int ia_css_binary_find(struct ia_css_binary_descr *descr, struct ia_css_binary *binary)
+{
int mode;
bool online;
bool two_ppc;
@@ -953,10 +954,8 @@ static int __ia_css_binary_find(struct ia_css_binary_descr *descr,
/* MW: used after an error check, may accept NULL, but doubtfull */
assert(binary);
- ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE,
- "ia_css_binary_find() enter: descr=%p, (mode=%d), binary=%p\n",
- descr, descr->mode,
- binary);
+ dev_dbg(atomisp_dev, "ia_css_binary_find() enter: descr=%p, (mode=%d), binary=%p\n",
+ descr, descr->mode, binary);
mode = descr->mode;
online = descr->online;
@@ -1001,15 +1000,15 @@ static int __ia_css_binary_find(struct ia_css_binary_descr *descr,
}
/* print a map of the binary file */
- ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE, "BINARY INFO:\n");
+ dev_dbg(atomisp_dev, "BINARY INFO:\n");
for (i = 0; i < IA_CSS_BINARY_NUM_MODES; i++) {
xcandidate = binary_infos[i];
if (xcandidate) {
- ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE, "%d:\n", i);
+ dev_dbg(atomisp_dev, "%d:\n", i);
while (xcandidate) {
- ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE, " Name:%s Type:%d Cont:%d\n",
- xcandidate->blob->name, xcandidate->type,
- xcandidate->sp.enable.continuous);
+ dev_dbg(atomisp_dev, " Name:%s Type:%d Cont:%d\n",
+ xcandidate->blob->name, xcandidate->type,
+ xcandidate->sp.enable.continuous);
xcandidate = xcandidate->next;
}
}
@@ -1021,9 +1020,9 @@ static int __ia_css_binary_find(struct ia_css_binary_descr *descr,
struct ia_css_binary_info *candidate = &xcandidate->sp;
/* printf("sh_css_binary_find: evaluating candidate:
* %d\n",candidate->id); */
- ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE,
- "ia_css_binary_find() candidate = %p, mode = %d ID = %d\n",
- candidate, candidate->pipeline.mode, candidate->id);
+ dev_dbg(atomisp_dev,
+ "ia_css_binary_find() candidate = %p, mode = %d ID = %d\n",
+ candidate, candidate->pipeline.mode, candidate->id);
/*
* MW: Only a limited set of jointly configured binaries can
@@ -1032,17 +1031,16 @@ static int __ia_css_binary_find(struct ia_css_binary_descr *descr,
*/
if (!candidate->enable.continuous &&
continuous && (mode != IA_CSS_BINARY_MODE_COPY)) {
- ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE,
- "ia_css_binary_find() [%d] continue: !%d && %d && (%d != %d)\n",
- __LINE__, candidate->enable.continuous,
- continuous, mode,
- IA_CSS_BINARY_MODE_COPY);
+ dev_dbg(atomisp_dev,
+ "ia_css_binary_find() [%d] continue: !%d && %d && (%d != %d)\n",
+ __LINE__, candidate->enable.continuous,
+ continuous, mode, IA_CSS_BINARY_MODE_COPY);
continue;
}
if (striped && candidate->iterator.num_stripes == 1) {
- ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE,
- "ia_css_binary_find() [%d] continue: binary is not striped\n",
- __LINE__);
+ dev_dbg(atomisp_dev,
+ "ia_css_binary_find() [%d] continue: binary is not striped\n",
+ __LINE__);
continue;
}
@@ -1050,58 +1048,38 @@ static int __ia_css_binary_find(struct ia_css_binary_descr *descr,
(mode != IA_CSS_BINARY_MODE_COPY) &&
(mode != IA_CSS_BINARY_MODE_CAPTURE_PP) &&
(mode != IA_CSS_BINARY_MODE_VF_PP)) {
- ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE,
- "ia_css_binary_find() [%d] continue: (%d != %d)\n",
- __LINE__,
- candidate->pipeline.isp_pipe_version, isp_pipe_version);
+ dev_dbg(atomisp_dev, "ia_css_binary_find() [%d] continue: (%d != %d)\n",
+ __LINE__, candidate->pipeline.isp_pipe_version, isp_pipe_version);
continue;
}
if (!candidate->enable.reduced_pipe && enable_reduced_pipe) {
- ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE,
- "ia_css_binary_find() [%d] continue: !%d && %d\n",
- __LINE__,
- candidate->enable.reduced_pipe,
- enable_reduced_pipe);
+ dev_dbg(atomisp_dev, "ia_css_binary_find() [%d] continue: !%d && %d\n",
+ __LINE__, candidate->enable.reduced_pipe, enable_reduced_pipe);
continue;
}
if (!candidate->enable.dvs_6axis && enable_dvs_6axis) {
- ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE,
- "ia_css_binary_find() [%d] continue: !%d && %d\n",
- __LINE__,
- candidate->enable.dvs_6axis,
- enable_dvs_6axis);
+ dev_dbg(atomisp_dev, "ia_css_binary_find() [%d] continue: !%d && %d\n",
+ __LINE__, candidate->enable.dvs_6axis, enable_dvs_6axis);
continue;
}
if (candidate->enable.high_speed && !enable_high_speed) {
- ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE,
- "ia_css_binary_find() [%d] continue: %d && !%d\n",
- __LINE__,
- candidate->enable.high_speed,
- enable_high_speed);
+ dev_dbg(atomisp_dev, "ia_css_binary_find() [%d] continue: %d && !%d\n",
+ __LINE__, candidate->enable.high_speed, enable_high_speed);
continue;
}
if (!candidate->enable.xnr && need_xnr) {
- ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE,
- "ia_css_binary_find() [%d] continue: %d && !%d\n",
- __LINE__,
- candidate->enable.xnr,
- need_xnr);
+ dev_dbg(atomisp_dev, "ia_css_binary_find() [%d] continue: %d && !%d\n",
+ __LINE__, candidate->enable.xnr, need_xnr);
continue;
}
if (!(candidate->enable.ds & 2) && enable_yuv_ds) {
- ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE,
- "ia_css_binary_find() [%d] continue: !%d && %d\n",
- __LINE__,
- ((candidate->enable.ds & 2) != 0),
- enable_yuv_ds);
+ dev_dbg(atomisp_dev, "ia_css_binary_find() [%d] continue: !%d && %d\n",
+ __LINE__, ((candidate->enable.ds & 2) != 0), enable_yuv_ds);
continue;
}
if ((candidate->enable.ds & 2) && !enable_yuv_ds) {
- ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE,
- "ia_css_binary_find() [%d] continue: %d && !%d\n",
- __LINE__,
- ((candidate->enable.ds & 2) != 0),
- enable_yuv_ds);
+ dev_dbg(atomisp_dev, "ia_css_binary_find() [%d] continue: %d && !%d\n",
+ __LINE__, ((candidate->enable.ds & 2) != 0), enable_yuv_ds);
continue;
}
@@ -1115,100 +1093,85 @@ static int __ia_css_binary_find(struct ia_css_binary_descr *descr,
candidate->vf_dec.is_variable ||
/* or more than one output pin. */
xcandidate->num_output_pins > 1)) {
- ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE,
- "ia_css_binary_find() [%d] continue: (%p != NULL) && !(%d || %d || (%d >%d))\n",
- __LINE__, req_vf_info,
- candidate->enable.vf_veceven,
- candidate->vf_dec.is_variable,
- xcandidate->num_output_pins, 1);
+ dev_dbg(atomisp_dev,
+ "ia_css_binary_find() [%d] continue: (%p != NULL) && !(%d || %d || (%d >%d))\n",
+ __LINE__, req_vf_info, candidate->enable.vf_veceven,
+ candidate->vf_dec.is_variable, xcandidate->num_output_pins, 1);
continue;
}
if (!candidate->enable.dvs_envelope && need_dvs) {
- ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE,
- "ia_css_binary_find() [%d] continue: !%d && %d\n",
- __LINE__,
- candidate->enable.dvs_envelope, (int)need_dvs);
+ dev_dbg(atomisp_dev, "ia_css_binary_find() [%d] continue: !%d && %d\n",
+ __LINE__, candidate->enable.dvs_envelope, (int)need_dvs);
continue;
}
/* internal_res check considers input, output, and dvs envelope sizes */
ia_css_binary_internal_res(req_in_info, req_bds_out_info,
req_bin_out_info, &dvs_env, candidate, &internal_res);
if (internal_res.width > candidate->internal.max_width) {
- ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE,
- "ia_css_binary_find() [%d] continue: (%d > %d)\n",
- __LINE__, internal_res.width,
- candidate->internal.max_width);
+ dev_dbg(atomisp_dev, "ia_css_binary_find() [%d] continue: (%d > %d)\n",
+ __LINE__, internal_res.width, candidate->internal.max_width);
continue;
}
if (internal_res.height > candidate->internal.max_height) {
- ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE,
- "ia_css_binary_find() [%d] continue: (%d > %d)\n",
- __LINE__, internal_res.height,
- candidate->internal.max_height);
+ dev_dbg(atomisp_dev, "ia_css_binary_find() [%d] continue: (%d > %d)\n",
+ __LINE__, internal_res.height, candidate->internal.max_height);
continue;
}
if (!candidate->enable.ds && need_ds && !(xcandidate->num_output_pins > 1)) {
- ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE,
- "ia_css_binary_find() [%d] continue: !%d && %d\n",
- __LINE__, candidate->enable.ds, (int)need_ds);
+ dev_dbg(atomisp_dev, "ia_css_binary_find() [%d] continue: !%d && %d\n",
+ __LINE__, candidate->enable.ds, (int)need_ds);
continue;
}
if (!candidate->enable.uds && !candidate->enable.dvs_6axis && need_dz) {
- ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE,
- "ia_css_binary_find() [%d] continue: !%d && !%d && %d\n",
- __LINE__, candidate->enable.uds,
- candidate->enable.dvs_6axis, (int)need_dz);
+ dev_dbg(atomisp_dev,
+ "ia_css_binary_find() [%d] continue: !%d && !%d && %d\n",
+ __LINE__, candidate->enable.uds, candidate->enable.dvs_6axis,
+ (int)need_dz);
continue;
}
if (online && candidate->input.source == IA_CSS_BINARY_INPUT_MEMORY) {
- ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE,
- "ia_css_binary_find() [%d] continue: %d && (%d == %d)\n",
- __LINE__, online, candidate->input.source,
- IA_CSS_BINARY_INPUT_MEMORY);
+ dev_dbg(atomisp_dev,
+ "ia_css_binary_find() [%d] continue: %d && (%d == %d)\n",
+ __LINE__, online, candidate->input.source,
+ IA_CSS_BINARY_INPUT_MEMORY);
continue;
}
if (!online && candidate->input.source == IA_CSS_BINARY_INPUT_SENSOR) {
- ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE,
- "ia_css_binary_find() [%d] continue: !%d && (%d == %d)\n",
- __LINE__, online, candidate->input.source,
- IA_CSS_BINARY_INPUT_SENSOR);
+ dev_dbg(atomisp_dev,
+ "ia_css_binary_find() [%d] continue: !%d && (%d == %d)\n",
+ __LINE__, online, candidate->input.source,
+ IA_CSS_BINARY_INPUT_SENSOR);
continue;
}
if (req_bin_out_info->res.width < candidate->output.min_width ||
req_bin_out_info->res.width > candidate->output.max_width) {
- ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE,
- "ia_css_binary_find() [%d] continue: (%d > %d) || (%d < %d)\n",
- __LINE__,
- req_bin_out_info->padded_width,
- candidate->output.min_width,
- req_bin_out_info->padded_width,
- candidate->output.max_width);
+ dev_dbg(atomisp_dev,
+ "ia_css_binary_find() [%d] continue: (%d > %d) || (%d < %d)\n",
+ __LINE__, req_bin_out_info->padded_width,
+ candidate->output.min_width, req_bin_out_info->padded_width,
+ candidate->output.max_width);
continue;
}
if (xcandidate->num_output_pins > 1 &&
/* in case we have a second output pin, */
req_vf_info) { /* and we need vf output. */
if (req_vf_info->res.width > candidate->output.max_width) {
- ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE,
- "ia_css_binary_find() [%d] continue: (%d < %d)\n",
- __LINE__,
- req_vf_info->res.width,
- candidate->output.max_width);
+ dev_dbg(atomisp_dev,
+ "ia_css_binary_find() [%d] continue: (%d < %d)\n",
+ __LINE__, req_vf_info->res.width,
+ candidate->output.max_width);
continue;
}
}
if (req_in_info->padded_width > candidate->input.max_width) {
- ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE,
- "ia_css_binary_find() [%d] continue: (%d > %d)\n",
- __LINE__, req_in_info->padded_width,
- candidate->input.max_width);
+ dev_dbg(atomisp_dev, "ia_css_binary_find() [%d] continue: (%d > %d)\n",
+ __LINE__, req_in_info->padded_width, candidate->input.max_width);
continue;
}
if (!binary_supports_output_format(xcandidate, req_bin_out_info->format)) {
- ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE,
- "ia_css_binary_find() [%d] continue: !%d\n",
- __LINE__,
- binary_supports_output_format(xcandidate, req_bin_out_info->format));
+ dev_dbg(atomisp_dev, "ia_css_binary_find() [%d] continue: !%d\n",
+ __LINE__, binary_supports_output_format(xcandidate,
+ req_bin_out_info->format));
continue;
}
if (xcandidate->num_output_pins > 1 &&
@@ -1217,11 +1180,10 @@ static int __ia_css_binary_find(struct ia_css_binary_descr *descr,
/* check if the required vf format
is supported. */
!binary_supports_output_format(xcandidate, req_vf_info->format)) {
- ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE,
- "ia_css_binary_find() [%d] continue: (%d > %d) && (%p != NULL) && !%d\n",
- __LINE__, xcandidate->num_output_pins, 1,
- req_vf_info,
- binary_supports_output_format(xcandidate, req_vf_info->format));
+ dev_dbg(atomisp_dev,
+ "ia_css_binary_find() [%d] continue: (%d > %d) && (%p != NULL) && !%d\n",
+ __LINE__, xcandidate->num_output_pins, 1, req_vf_info,
+ binary_supports_output_format(xcandidate, req_vf_info->format));
continue;
}
@@ -1229,11 +1191,11 @@ static int __ia_css_binary_find(struct ia_css_binary_descr *descr,
if (xcandidate->num_output_pins == 1 &&
req_vf_info && candidate->enable.vf_veceven &&
!binary_supports_vf_format(xcandidate, req_vf_info->format)) {
- ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE,
- "ia_css_binary_find() [%d] continue: (%d == %d) && (%p != NULL) && %d && !%d\n",
- __LINE__, xcandidate->num_output_pins, 1,
- req_vf_info, candidate->enable.vf_veceven,
- binary_supports_vf_format(xcandidate, req_vf_info->format));
+ dev_dbg(atomisp_dev,
+ "ia_css_binary_find() [%d] continue: (%d == %d) && (%p != NULL) && %d && !%d\n",
+ __LINE__, xcandidate->num_output_pins, 1,
+ req_vf_info, candidate->enable.vf_veceven,
+ binary_supports_vf_format(xcandidate, req_vf_info->format));
continue;
}
@@ -1241,37 +1203,31 @@ static int __ia_css_binary_find(struct ia_css_binary_descr *descr,
if (xcandidate->num_output_pins == 1 &&
req_vf_info && candidate->enable.vf_veceven) { /* and we need vf output. */
if (req_vf_info->res.width > candidate->output.max_width) {
- ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE,
- "ia_css_binary_find() [%d] continue: (%d < %d)\n",
- __LINE__,
- req_vf_info->res.width,
- candidate->output.max_width);
+ dev_dbg(atomisp_dev,
+ "ia_css_binary_find() [%d] continue: (%d < %d)\n",
+ __LINE__, req_vf_info->res.width,
+ candidate->output.max_width);
continue;
}
}
if (!supports_bds_factor(candidate->bds.supported_bds_factors,
descr->required_bds_factor)) {
- ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE,
- "ia_css_binary_find() [%d] continue: 0x%x & 0x%x)\n",
- __LINE__, candidate->bds.supported_bds_factors,
- descr->required_bds_factor);
+ dev_dbg(atomisp_dev, "ia_css_binary_find() [%d] continue: 0x%x & 0x%x)\n",
+ __LINE__, candidate->bds.supported_bds_factors,
+ descr->required_bds_factor);
continue;
}
if (!candidate->enable.dpc && need_dpc) {
- ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE,
- "ia_css_binary_find() [%d] continue: 0x%x & 0x%x)\n",
- __LINE__, candidate->enable.dpc,
- descr->enable_dpc);
+ dev_dbg(atomisp_dev, "ia_css_binary_find() [%d] continue: 0x%x & 0x%x)\n",
+ __LINE__, candidate->enable.dpc, descr->enable_dpc);
continue;
}
if (candidate->uds.use_bci && enable_capture_pp_bli) {
- ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE,
- "ia_css_binary_find() [%d] continue: 0x%x & 0x%x)\n",
- __LINE__, candidate->uds.use_bci,
- descr->enable_capture_pp_bli);
+ dev_dbg(atomisp_dev, "ia_css_binary_find() [%d] continue: 0x%x & 0x%x)\n",
+ __LINE__, candidate->uds.use_bci, descr->enable_capture_pp_bli);
continue;
}
@@ -1290,39 +1246,18 @@ static int __ia_css_binary_find(struct ia_css_binary_descr *descr,
break;
}
- ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE,
- "ia_css_binary_find() selected = %p, mode = %d ID = %d\n",
- xcandidate, xcandidate ? xcandidate->sp.pipeline.mode : 0, xcandidate ? xcandidate->sp.id : 0);
-
- ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE,
- "ia_css_binary_find() leave: return_err=%d\n", err);
-
if (!err && xcandidate)
- dev_dbg(atomisp_dev,
- "Using binary %s (id %d), type %d, mode %d, continuous %s\n",
- xcandidate->blob->name,
- xcandidate->sp.id,
- xcandidate->type,
+ dev_dbg(atomisp_dev, "Using binary %s (id %d), type %d, mode %d, continuous %s\n",
+ xcandidate->blob->name, xcandidate->sp.id, xcandidate->type,
xcandidate->sp.pipeline.mode,
xcandidate->sp.enable.continuous ? "true" : "false");
+ if (err)
+ dev_err(atomisp_dev, "Failed to find a firmware binary matching the pipeline parameters\n");
return err;
}
-int ia_css_binary_find(struct ia_css_binary_descr *descr,
- struct ia_css_binary *binary)
-{
- int ret = __ia_css_binary_find(descr, binary);
-
- if (unlikely(ret)) {
- dev_dbg(atomisp_dev, "Seeking for binary failed at:");
- dump_stack();
- }
-
- return ret;
-}
-
unsigned
ia_css_binary_max_vf_width(void)
{
diff --git a/drivers/staging/media/atomisp/pci/runtime/isys/src/virtual_isys.c b/drivers/staging/media/atomisp/pci/runtime/isys/src/virtual_isys.c
index 52483498239d..2e0193671f4b 100644
--- a/drivers/staging/media/atomisp/pci/runtime/isys/src/virtual_isys.c
+++ b/drivers/staging/media/atomisp/pci/runtime/isys/src/virtual_isys.c
@@ -13,6 +13,8 @@
* more details.
*/
+#include <linux/bitops.h>
+#include <linux/math.h>
#include <linux/string.h> /* for memcpy() */
#include "system_global.h"
@@ -20,7 +22,6 @@
#include "ia_css_isys.h"
#include "ia_css_debug.h"
-#include "math_support.h"
#include "virtual_isys.h"
#include "isp.h"
#include "sh_css_defs.h"
@@ -558,7 +559,7 @@ static int32_t calculate_stride(
bits_per_pixel = CEIL_MUL(bits_per_pixel, 8);
pixels_per_word = HIVE_ISP_DDR_WORD_BITS / bits_per_pixel;
- words_per_line = ceil_div(pixels_per_line_padded, pixels_per_word);
+ words_per_line = DIV_ROUND_UP(pixels_per_line_padded, pixels_per_word);
bytes_per_line = HIVE_ISP_DDR_WORD_BYTES * words_per_line;
return bytes_per_line;
@@ -690,7 +691,6 @@ static bool calculate_ibuf_ctrl_cfg(
const isp2401_input_system_cfg_t *isys_cfg,
ibuf_ctrl_cfg_t *cfg)
{
- const s32 bits_per_byte = 8;
s32 bits_per_pixel;
s32 bytes_per_pixel;
s32 left_padding;
@@ -698,7 +698,7 @@ static bool calculate_ibuf_ctrl_cfg(
(void)input_port;
bits_per_pixel = isys_cfg->input_port_resolution.bits_per_pixel;
- bytes_per_pixel = ceil_div(bits_per_pixel, bits_per_byte);
+ bytes_per_pixel = BITS_TO_BYTES(bits_per_pixel);
left_padding = CEIL_MUL(isys_cfg->output_port_attr.left_padding, ISP_VEC_NELEMS)
* bytes_per_pixel;
diff --git a/drivers/staging/media/atomisp/pci/sh_css.c b/drivers/staging/media/atomisp/pci/sh_css.c
index 01f0b8a33c99..ca97ea082cf4 100644
--- a/drivers/staging/media/atomisp/pci/sh_css.c
+++ b/drivers/staging/media/atomisp/pci/sh_css.c
@@ -5826,20 +5826,19 @@ need_yuv_scaler_stage(const struct ia_css_pipe *pipe)
* Later, merge this with ia_css_pipe_create_cas_scaler_desc
*/
static int ia_css_pipe_create_cas_scaler_desc_single_output(
- struct ia_css_frame_info *cas_scaler_in_info,
- struct ia_css_frame_info *cas_scaler_out_info,
- struct ia_css_frame_info *cas_scaler_vf_info,
+ struct ia_css_frame_info *in_info,
+ struct ia_css_frame_info *out_info,
+ struct ia_css_frame_info *vf_info,
struct ia_css_cas_binary_descr *descr)
{
unsigned int i;
unsigned int hor_ds_factor = 0, ver_ds_factor = 0;
int err = 0;
struct ia_css_frame_info tmp_in_info;
-
unsigned int max_scale_factor_per_stage = MAX_PREFERRED_YUV_DS_PER_STEP;
- assert(cas_scaler_in_info);
- assert(cas_scaler_out_info);
+ assert(in_info);
+ assert(out_info);
ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
"ia_css_pipe_create_cas_scaler_desc() enter:\n");
@@ -5847,10 +5846,8 @@ static int ia_css_pipe_create_cas_scaler_desc_single_output(
/* We assume that this function is used only for single output port case. */
descr->num_output_stage = 1;
- hor_ds_factor = CEIL_DIV(cas_scaler_in_info->res.width,
- cas_scaler_out_info->res.width);
- ver_ds_factor = CEIL_DIV(cas_scaler_in_info->res.height,
- cas_scaler_out_info->res.height);
+ hor_ds_factor = CEIL_DIV(in_info->res.width, out_info->res.width);
+ ver_ds_factor = CEIL_DIV(in_info->res.height, out_info->res.height);
/* use the same horizontal and vertical downscaling factor for simplicity */
assert(hor_ds_factor == ver_ds_factor);
@@ -5895,30 +5892,29 @@ static int ia_css_pipe_create_cas_scaler_desc_single_output(
goto ERR;
}
- tmp_in_info = *cas_scaler_in_info;
+ tmp_in_info = *in_info;
for (i = 0; i < descr->num_stage; i++) {
descr->in_info[i] = tmp_in_info;
- if ((tmp_in_info.res.width / max_scale_factor_per_stage) <=
- cas_scaler_out_info->res.width) {
+ if ((tmp_in_info.res.width / max_scale_factor_per_stage) <= out_info->res.width) {
descr->is_output_stage[i] = true;
if ((descr->num_output_stage > 1) && (i != (descr->num_stage - 1))) {
- descr->internal_out_info[i].res.width = cas_scaler_out_info->res.width;
- descr->internal_out_info[i].res.height = cas_scaler_out_info->res.height;
- descr->internal_out_info[i].padded_width = cas_scaler_out_info->padded_width;
+ descr->internal_out_info[i].res.width = out_info->res.width;
+ descr->internal_out_info[i].res.height = out_info->res.height;
+ descr->internal_out_info[i].padded_width = out_info->padded_width;
descr->internal_out_info[i].format = IA_CSS_FRAME_FORMAT_YUV420;
} else {
assert(i == (descr->num_stage - 1));
descr->internal_out_info[i].res.width = 0;
descr->internal_out_info[i].res.height = 0;
}
- descr->out_info[i].res.width = cas_scaler_out_info->res.width;
- descr->out_info[i].res.height = cas_scaler_out_info->res.height;
- descr->out_info[i].padded_width = cas_scaler_out_info->padded_width;
- descr->out_info[i].format = cas_scaler_out_info->format;
- if (cas_scaler_vf_info) {
- descr->vf_info[i].res.width = cas_scaler_vf_info->res.width;
- descr->vf_info[i].res.height = cas_scaler_vf_info->res.height;
- descr->vf_info[i].padded_width = cas_scaler_vf_info->padded_width;
+ descr->out_info[i].res.width = out_info->res.width;
+ descr->out_info[i].res.height = out_info->res.height;
+ descr->out_info[i].padded_width = out_info->padded_width;
+ descr->out_info[i].format = out_info->format;
+ if (vf_info) {
+ descr->vf_info[i].res.width = vf_info->res.width;
+ descr->vf_info[i].res.height = vf_info->res.height;
+ descr->vf_info[i].padded_width = vf_info->padded_width;
ia_css_frame_info_set_format(&descr->vf_info[i], IA_CSS_FRAME_FORMAT_YUV_LINE);
} else {
descr->vf_info[i].res.width = 0;
diff --git a/drivers/staging/media/atomisp/pci/sh_css_dvs_info.h b/drivers/staging/media/atomisp/pci/sh_css_dvs_info.h
deleted file mode 100644
index 6f058f132300..000000000000
--- a/drivers/staging/media/atomisp/pci/sh_css_dvs_info.h
+++ /dev/null
@@ -1,37 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-/**
-Support for Intel Camera Imaging ISP subsystem.
-Copyright (c) 2010 - 2015, Intel Corporation.
-
-This program is free software; you can redistribute it and/or modify it
-under the terms and conditions of the GNU General Public License,
-version 2, as published by the Free Software Foundation.
-
-This program is distributed in the hope it will be useful, but WITHOUT
-ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
-FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
-more details.
-*/
-
-#ifndef __SH_CSS_DVS_INFO_H__
-#define __SH_CSS_DVS_INFO_H__
-
-#include <math_support.h>
-
-/* horizontal 64x64 blocks round up to DVS_BLOCKDIM_X, make even */
-#define DVS_NUM_BLOCKS_X(X) (CEIL_MUL(CEIL_DIV((X), DVS_BLOCKDIM_X), 2))
-
-/* vertical 64x64 blocks round up to DVS_BLOCKDIM_Y */
-#define DVS_NUM_BLOCKS_Y(X) (CEIL_DIV((X), DVS_BLOCKDIM_Y_LUMA))
-
-/* Bilinear interpolation (HRT_GDC_BLI_MODE) is the supported method currently.
- * Bicubic interpolation (HRT_GDC_BCI_MODE) is not supported yet */
-#define DVS_GDC_INTERP_METHOD HRT_GDC_BLI_MODE
-
-#define DVS_INPUT_BYTES_PER_PIXEL (1)
-
-#define DVS_NUM_BLOCKS_X_CHROMA(X) (CEIL_DIV((X), DVS_BLOCKDIM_X))
-
-#define DVS_NUM_BLOCKS_Y_CHROMA(X) (CEIL_DIV((X), DVS_BLOCKDIM_Y_CHROMA))
-
-#endif /* __SH_CSS_DVS_INFO_H__ */
diff --git a/drivers/staging/media/atomisp/pci/sh_css_param_dvs.h b/drivers/staging/media/atomisp/pci/sh_css_param_dvs.h
index 7782f76b9f97..25e5b4570f7d 100644
--- a/drivers/staging/media/atomisp/pci/sh_css_param_dvs.h
+++ b/drivers/staging/media/atomisp/pci/sh_css_param_dvs.h
@@ -18,7 +18,6 @@
#include <math_support.h>
#include <ia_css_types.h>
-#include <sh_css_dvs_info.h>
#include "gdc_global.h" /* gdc_warp_param_mem_t */
#define DVS_ENV_MIN_X (12)
diff --git a/drivers/staging/media/deprecated/atmel/atmel-sama5d2-isc.c b/drivers/staging/media/deprecated/atmel/atmel-sama5d2-isc.c
index 31b2b48085c5..712f916f0935 100644
--- a/drivers/staging/media/deprecated/atmel/atmel-sama5d2-isc.c
+++ b/drivers/staging/media/deprecated/atmel/atmel-sama5d2-isc.c
@@ -333,20 +333,16 @@ static const u32 isc_sama5d2_gamma_table[][GAMMA_ENTRIES] = {
static int isc_parse_dt(struct device *dev, struct isc_device *isc)
{
struct device_node *np = dev->of_node;
- struct device_node *epn = NULL;
+ struct device_node *epn;
struct isc_subdev_entity *subdev_entity;
unsigned int flags;
- int ret;
+ int ret = -EINVAL;
INIT_LIST_HEAD(&isc->subdev_entities);
- while (1) {
+ for_each_endpoint_of_node(np, epn) {
struct v4l2_fwnode_endpoint v4l2_epn = { .bus_type = 0 };
- epn = of_graph_get_next_endpoint(np, epn);
- if (!epn)
- return 0;
-
ret = v4l2_fwnode_endpoint_parse(of_fwnode_handle(epn),
&v4l2_epn);
if (ret) {
diff --git a/drivers/staging/media/deprecated/atmel/atmel-sama7g5-isc.c b/drivers/staging/media/deprecated/atmel/atmel-sama7g5-isc.c
index 020034f631f5..9485167d5b7d 100644
--- a/drivers/staging/media/deprecated/atmel/atmel-sama7g5-isc.c
+++ b/drivers/staging/media/deprecated/atmel/atmel-sama7g5-isc.c
@@ -316,23 +316,19 @@ static const u32 isc_sama7g5_gamma_table[][GAMMA_ENTRIES] = {
static int xisc_parse_dt(struct device *dev, struct isc_device *isc)
{
struct device_node *np = dev->of_node;
- struct device_node *epn = NULL;
+ struct device_node *epn;
struct isc_subdev_entity *subdev_entity;
unsigned int flags;
- int ret;
+ int ret = -EINVAL;
bool mipi_mode;
INIT_LIST_HEAD(&isc->subdev_entities);
mipi_mode = of_property_read_bool(np, "microchip,mipi-mode");
- while (1) {
+ for_each_endpoint_of_node(np, epn) {
struct v4l2_fwnode_endpoint v4l2_epn = { .bus_type = 0 };
- epn = of_graph_get_next_endpoint(np, epn);
- if (!epn)
- return 0;
-
ret = v4l2_fwnode_endpoint_parse(of_fwnode_handle(epn),
&v4l2_epn);
if (ret) {
diff --git a/drivers/staging/media/ipu3/ipu3-v4l2.c b/drivers/staging/media/ipu3/ipu3-v4l2.c
index 3df58eb3e882..e7aee7e3db5b 100644
--- a/drivers/staging/media/ipu3/ipu3-v4l2.c
+++ b/drivers/staging/media/ipu3/ipu3-v4l2.c
@@ -535,31 +535,53 @@ static void imgu_vb2_stop_streaming(struct vb2_queue *vq)
container_of(vq, struct imgu_video_device, vbq);
int r;
unsigned int pipe;
+ bool stop_streaming = false;
+ /* Verify that the node had been setup with imgu_v4l2_node_setup() */
WARN_ON(!node->enabled);
pipe = node->pipe;
dev_dbg(dev, "Try to stream off node [%u][%u]", pipe, node->id);
- imgu_pipe = &imgu->imgu_pipe[pipe];
- r = v4l2_subdev_call(&imgu_pipe->imgu_sd.subdev, video, s_stream, 0);
- if (r)
- dev_err(&imgu->pci_dev->dev,
- "failed to stop subdev streaming\n");
+ /*
+ * When the first node of a streaming setup is stopped, the entire
+ * pipeline needs to stop before individual nodes are disabled.
+ * Perform the inverse of the initial setup.
+ *
+ * Part 1 - s_stream on the entire pipeline
+ */
mutex_lock(&imgu->streaming_lock);
- /* Was this the first node with streaming disabled? */
- if (imgu->streaming && imgu_all_nodes_streaming(imgu, node)) {
+ if (imgu->streaming) {
/* Yes, really stop streaming now */
dev_dbg(dev, "IMGU streaming is ready to stop");
r = imgu_s_stream(imgu, false);
if (!r)
imgu->streaming = false;
+ stop_streaming = true;
}
-
- imgu_return_all_buffers(imgu, node, VB2_BUF_STATE_ERROR);
mutex_unlock(&imgu->streaming_lock);
+ /* Part 2 - s_stream on subdevs
+ *
+ * If we call s_stream multiple times, Linux v6.7's call_s_stream()
+ * WARNs and aborts. Thus, disable all pipes at once, and only once.
+ */
+ if (stop_streaming) {
+ for_each_set_bit(pipe, imgu->css.enabled_pipes,
+ IMGU_MAX_PIPE_NUM) {
+ imgu_pipe = &imgu->imgu_pipe[pipe];
+
+ r = v4l2_subdev_call(&imgu_pipe->imgu_sd.subdev,
+ video, s_stream, 0);
+ if (r)
+ dev_err(&imgu->pci_dev->dev,
+ "failed to stop subdev streaming\n");
+ }
+ }
+
+ /* Part 3 - individual node teardown */
video_device_pipeline_stop(&node->vdev);
+ imgu_return_all_buffers(imgu, node, VB2_BUF_STATE_ERROR);
}
/******************** v4l2_ioctl_ops ********************/
diff --git a/drivers/staging/media/meson/vdec/vdec.c b/drivers/staging/media/meson/vdec/vdec.c
index de3e0345ab7c..5e5b296f93ba 100644
--- a/drivers/staging/media/meson/vdec/vdec.c
+++ b/drivers/staging/media/meson/vdec/vdec.c
@@ -982,6 +982,8 @@ static const struct of_device_id vdec_dt_match[] = {
.data = &vdec_platform_gxm },
{ .compatible = "amlogic,gxl-vdec",
.data = &vdec_platform_gxl },
+ { .compatible = "amlogic,gxlx-vdec",
+ .data = &vdec_platform_gxlx },
{ .compatible = "amlogic,g12a-vdec",
.data = &vdec_platform_g12a },
{ .compatible = "amlogic,sm1-vdec",
diff --git a/drivers/staging/media/meson/vdec/vdec_1.c b/drivers/staging/media/meson/vdec/vdec_1.c
index 3fe2de0c9331..a65cb4959446 100644
--- a/drivers/staging/media/meson/vdec/vdec_1.c
+++ b/drivers/staging/media/meson/vdec/vdec_1.c
@@ -129,7 +129,7 @@ static u32 vdec_1_vififo_level(struct amvdec_session *sess)
return amvdec_read_dos(core, VLD_MEM_VIFIFO_LEVEL);
}
-static int vdec_1_stop(struct amvdec_session *sess)
+static void __vdec_1_stop(struct amvdec_session *sess)
{
struct amvdec_core *core = sess->core;
struct amvdec_codec_ops *codec_ops = sess->fmt_out->codec_ops;
@@ -158,10 +158,17 @@ static int vdec_1_stop(struct amvdec_session *sess)
regmap_update_bits(core->regmap_ao, AO_RTI_GEN_PWR_SLEEP0,
GEN_PWR_VDEC_1, GEN_PWR_VDEC_1);
- clk_disable_unprepare(core->vdec_1_clk);
-
if (sess->priv)
codec_ops->stop(sess);
+}
+
+static int vdec_1_stop(struct amvdec_session *sess)
+{
+ struct amvdec_core *core = sess->core;
+
+ __vdec_1_stop(sess);
+
+ clk_disable_unprepare(core->vdec_1_clk);
return 0;
}
@@ -235,7 +242,8 @@ static int vdec_1_start(struct amvdec_session *sess)
return 0;
stop:
- vdec_1_stop(sess);
+ __vdec_1_stop(sess);
+ clk_disable_unprepare(core->vdec_1_clk);
return ret;
}
diff --git a/drivers/staging/media/meson/vdec/vdec_hevc.c b/drivers/staging/media/meson/vdec/vdec_hevc.c
index afced435c907..1939c47def58 100644
--- a/drivers/staging/media/meson/vdec/vdec_hevc.c
+++ b/drivers/staging/media/meson/vdec/vdec_hevc.c
@@ -110,7 +110,7 @@ static u32 vdec_hevc_vififo_level(struct amvdec_session *sess)
return readl_relaxed(sess->core->dos_base + HEVC_STREAM_LEVEL);
}
-static int vdec_hevc_stop(struct amvdec_session *sess)
+static void __vdec_hevc_stop(struct amvdec_session *sess)
{
struct amvdec_core *core = sess->core;
struct amvdec_codec_ops *codec_ops = sess->fmt_out->codec_ops;
@@ -142,6 +142,13 @@ static int vdec_hevc_stop(struct amvdec_session *sess)
else
regmap_update_bits(core->regmap_ao, AO_RTI_GEN_PWR_SLEEP0,
GEN_PWR_VDEC_HEVC, GEN_PWR_VDEC_HEVC);
+}
+
+static int vdec_hevc_stop(struct amvdec_session *sess)
+{
+ struct amvdec_core *core = sess->core;
+
+ __vdec_hevc_stop(sess);
clk_disable_unprepare(core->vdec_hevc_clk);
if (core->platform->revision == VDEC_REVISION_G12A ||
@@ -151,20 +158,12 @@ static int vdec_hevc_stop(struct amvdec_session *sess)
return 0;
}
-static int vdec_hevc_start(struct amvdec_session *sess)
+static int __vdec_hevc_start(struct amvdec_session *sess)
{
int ret;
struct amvdec_core *core = sess->core;
struct amvdec_codec_ops *codec_ops = sess->fmt_out->codec_ops;
- if (core->platform->revision == VDEC_REVISION_G12A ||
- core->platform->revision == VDEC_REVISION_SM1) {
- clk_set_rate(core->vdec_hevcf_clk, 666666666);
- ret = clk_prepare_enable(core->vdec_hevcf_clk);
- if (ret)
- return ret;
- }
-
clk_set_rate(core->vdec_hevc_clk, 666666666);
ret = clk_prepare_enable(core->vdec_hevc_clk);
if (ret) {
@@ -223,10 +222,32 @@ static int vdec_hevc_start(struct amvdec_session *sess)
return 0;
stop:
- vdec_hevc_stop(sess);
+ __vdec_hevc_stop(sess);
+ clk_disable_unprepare(core->vdec_hevc_clk);
return ret;
}
+static int vdec_hevc_start(struct amvdec_session *sess)
+{
+ struct amvdec_core *core = sess->core;
+ int ret;
+
+ if (core->platform->revision == VDEC_REVISION_G12A ||
+ core->platform->revision == VDEC_REVISION_SM1) {
+ clk_set_rate(core->vdec_hevcf_clk, 666666666);
+ ret = clk_prepare_enable(core->vdec_hevcf_clk);
+ if (ret)
+ return ret;
+
+ ret = __vdec_hevc_start(sess);
+ if (ret)
+ clk_disable_unprepare(core->vdec_hevcf_clk);
+ return ret;
+ }
+
+ return __vdec_hevc_start(sess);
+}
+
struct amvdec_ops vdec_hevc_ops = {
.start = vdec_hevc_start,
.stop = vdec_hevc_stop,
diff --git a/drivers/staging/media/meson/vdec/vdec_platform.c b/drivers/staging/media/meson/vdec/vdec_platform.c
index 70c9fd7c8bc5..66bb307db85a 100644
--- a/drivers/staging/media/meson/vdec/vdec_platform.c
+++ b/drivers/staging/media/meson/vdec/vdec_platform.c
@@ -101,6 +101,44 @@ static const struct amvdec_format vdec_formats_gxl[] = {
},
};
+static const struct amvdec_format vdec_formats_gxlx[] = {
+ {
+ .pixfmt = V4L2_PIX_FMT_H264,
+ .min_buffers = 2,
+ .max_buffers = 24,
+ .max_width = 3840,
+ .max_height = 2160,
+ .vdec_ops = &vdec_1_ops,
+ .codec_ops = &codec_h264_ops,
+ .firmware_path = "meson/vdec/gxl_h264.bin",
+ .pixfmts_cap = { V4L2_PIX_FMT_NV12M, 0 },
+ .flags = V4L2_FMT_FLAG_COMPRESSED |
+ V4L2_FMT_FLAG_DYN_RESOLUTION,
+ }, {
+ .pixfmt = V4L2_PIX_FMT_MPEG1,
+ .min_buffers = 8,
+ .max_buffers = 8,
+ .max_width = 1920,
+ .max_height = 1080,
+ .vdec_ops = &vdec_1_ops,
+ .codec_ops = &codec_mpeg12_ops,
+ .firmware_path = "meson/vdec/gxl_mpeg12.bin",
+ .pixfmts_cap = { V4L2_PIX_FMT_NV12M, V4L2_PIX_FMT_YUV420M, 0 },
+ .flags = V4L2_FMT_FLAG_COMPRESSED,
+ }, {
+ .pixfmt = V4L2_PIX_FMT_MPEG2,
+ .min_buffers = 8,
+ .max_buffers = 8,
+ .max_width = 1920,
+ .max_height = 1080,
+ .vdec_ops = &vdec_1_ops,
+ .codec_ops = &codec_mpeg12_ops,
+ .firmware_path = "meson/vdec/gxl_mpeg12.bin",
+ .pixfmts_cap = { V4L2_PIX_FMT_NV12M, V4L2_PIX_FMT_YUV420M, 0 },
+ .flags = V4L2_FMT_FLAG_COMPRESSED,
+ },
+};
+
static const struct amvdec_format vdec_formats_gxm[] = {
{
.pixfmt = V4L2_PIX_FMT_VP9,
@@ -263,6 +301,12 @@ const struct vdec_platform vdec_platform_gxl = {
.revision = VDEC_REVISION_GXL,
};
+const struct vdec_platform vdec_platform_gxlx = {
+ .formats = vdec_formats_gxlx,
+ .num_formats = ARRAY_SIZE(vdec_formats_gxlx),
+ .revision = VDEC_REVISION_GXLX,
+};
+
const struct vdec_platform vdec_platform_gxm = {
.formats = vdec_formats_gxm,
.num_formats = ARRAY_SIZE(vdec_formats_gxm),
diff --git a/drivers/staging/media/meson/vdec/vdec_platform.h b/drivers/staging/media/meson/vdec/vdec_platform.h
index 731877a771f4..88ca4a9db8a8 100644
--- a/drivers/staging/media/meson/vdec/vdec_platform.h
+++ b/drivers/staging/media/meson/vdec/vdec_platform.h
@@ -14,6 +14,7 @@ struct amvdec_format;
enum vdec_revision {
VDEC_REVISION_GXBB,
VDEC_REVISION_GXL,
+ VDEC_REVISION_GXLX,
VDEC_REVISION_GXM,
VDEC_REVISION_G12A,
VDEC_REVISION_SM1,
@@ -28,6 +29,7 @@ struct vdec_platform {
extern const struct vdec_platform vdec_platform_gxbb;
extern const struct vdec_platform vdec_platform_gxm;
extern const struct vdec_platform vdec_platform_gxl;
+extern const struct vdec_platform vdec_platform_gxlx;
extern const struct vdec_platform vdec_platform_g12a;
extern const struct vdec_platform vdec_platform_sm1;
diff --git a/drivers/staging/media/starfive/camss/stf-camss.c b/drivers/staging/media/starfive/camss/stf-camss.c
index fecd3e67c7a1..b6d34145bc19 100644
--- a/drivers/staging/media/starfive/camss/stf-camss.c
+++ b/drivers/staging/media/starfive/camss/stf-camss.c
@@ -358,8 +358,6 @@ err_cleanup_notifier:
/*
* stfcamss_remove - Remove STFCAMSS platform device
* @pdev: Pointer to STFCAMSS platform device
- *
- * Always returns 0.
*/
static void stfcamss_remove(struct platform_device *pdev)
{
diff --git a/drivers/staging/media/starfive/camss/stf-capture.c b/drivers/staging/media/starfive/camss/stf-capture.c
index ec5169e7b391..e15d2e97eb0b 100644
--- a/drivers/staging/media/starfive/camss/stf-capture.c
+++ b/drivers/staging/media/starfive/camss/stf-capture.c
@@ -180,6 +180,8 @@ static void stf_channel_set(struct stfcamss_video *video)
u32 val;
if (cap->type == STF_CAPTURE_RAW) {
+ const struct v4l2_pix_format *pix = &video->active_fmt.fmt.pix;
+
val = stf_syscon_reg_read(stfcamss, VIN_CHANNEL_SEL_EN);
val &= ~U0_VIN_CHANNEL_SEL_MASK;
val |= CHANNEL(0);
@@ -193,7 +195,7 @@ static void stf_channel_set(struct stfcamss_video *video)
val |= PIXEL_HEIGH_BIT_SEL(0);
val &= ~U0_VIN_PIX_CNT_END_MASK;
- val |= PIX_CNT_END(IMAGE_MAX_WIDTH / 4 - 1);
+ val |= PIX_CNT_END(pix->width / 4 - 1);
stf_syscon_reg_write(stfcamss, VIN_INRT_PIX_CFG, val);
} else if (cap->type == STF_CAPTURE_YUV) {
diff --git a/drivers/staging/most/video/video.c b/drivers/staging/most/video/video.c
index 6254a5df2502..2b3cdb1ce140 100644
--- a/drivers/staging/most/video/video.c
+++ b/drivers/staging/most/video/video.c
@@ -454,18 +454,18 @@ static int comp_probe_channel(struct most_interface *iface, int channel_idx,
struct most_video_dev *mdev = get_comp_dev(iface, channel_idx);
if (mdev) {
- pr_err("channel already linked\n");
+ pr_err("Channel already linked\n");
return -EEXIST;
}
if (ccfg->direction != MOST_CH_RX) {
- pr_err("wrong direction, expect rx\n");
+ pr_err("Wrong direction, expected rx\n");
return -EINVAL;
}
if (ccfg->data_type != MOST_CH_SYNC &&
ccfg->data_type != MOST_CH_ISOC) {
- pr_err("wrong channel type, expect sync or isoc\n");
+ pr_err("Wrong channel type, expected sync or isoc\n");
return -EINVAL;
}
diff --git a/drivers/staging/nvec/nvec.c b/drivers/staging/nvec/nvec.c
index d09211589d1c..977f8fc29e63 100644
--- a/drivers/staging/nvec/nvec.c
+++ b/drivers/staging/nvec/nvec.c
@@ -175,7 +175,7 @@ static struct nvec_msg *nvec_msg_alloc(struct nvec_chip *nvec,
}
}
- dev_err(nvec->dev, "could not allocate %s buffer\n",
+ dev_err(nvec->dev, "Could not allocate %s buffer\n",
(category == NVEC_MSG_TX) ? "TX" : "RX");
return NULL;
@@ -315,7 +315,7 @@ int nvec_write_sync(struct nvec_chip *nvec,
if (!(wait_for_completion_timeout(&nvec->sync_write,
msecs_to_jiffies(2000)))) {
dev_warn(nvec->dev,
- "timeout waiting for sync write to complete\n");
+ "Timeout waiting for sync write to complete\n");
mutex_unlock(&nvec->sync_write_mutex);
return -ETIMEDOUT;
}
@@ -392,7 +392,7 @@ static void nvec_request_master(struct work_struct *work)
msecs_to_jiffies(5000));
if (err == 0) {
- dev_warn(nvec->dev, "timeout waiting for ec transfer\n");
+ dev_warn(nvec->dev, "Timeout waiting for ec transfer\n");
nvec_gpio_set_value(nvec, 1);
msg->pos = 0;
}
@@ -454,7 +454,7 @@ static void nvec_dispatch(struct work_struct *work)
if (nvec->sync_write_pending ==
(msg->data[2] << 8) + msg->data[0]) {
- dev_dbg(nvec->dev, "sync write completed!\n");
+ dev_dbg(nvec->dev, "Sync write completed!\n");
nvec->sync_write_pending = 0;
nvec->last_sync_msg = msg;
complete(&nvec->sync_write);
@@ -477,7 +477,7 @@ static void nvec_tx_completed(struct nvec_chip *nvec)
{
/* We got an END_TRANS, let's skip this, maybe there's an event */
if (nvec->tx->pos != nvec->tx->size) {
- dev_err(nvec->dev, "premature END_TRANS, resending\n");
+ dev_err(nvec->dev, "Premature END_TRANS, resending\n");
nvec->tx->pos = 0;
nvec_gpio_set_value(nvec, 0);
} else {
@@ -608,7 +608,7 @@ static irqreturn_t nvec_interrupt(int irq, void *dev)
/* Filter out some errors */
if ((status & irq_mask) == 0 && (status & ~irq_mask) != 0) {
- dev_err(nvec->dev, "unexpected irq mask %lx\n", status);
+ dev_err(nvec->dev, "Unexpected irq mask %lx\n", status);
return IRQ_HANDLED;
}
if ((status & I2C_SL_IRQ) == 0) {
@@ -631,7 +631,7 @@ static irqreturn_t nvec_interrupt(int irq, void *dev)
if (status != (I2C_SL_IRQ | RCVD))
nvec_invalid_flags(nvec, status, false);
break;
- case 1: /* command byte */
+ case 1: /* Command byte */
if (status != I2C_SL_IRQ) {
nvec_invalid_flags(nvec, status, true);
} else {
@@ -845,13 +845,12 @@ static int tegra_nvec_probe(struct platform_device *pdev)
return PTR_ERR(nvec->gpiod);
}
- err = devm_request_irq(dev, nvec->irq, nvec_interrupt, 0,
+ err = devm_request_irq(dev, nvec->irq, nvec_interrupt, IRQF_NO_AUTOEN,
"nvec", nvec);
if (err) {
dev_err(dev, "couldn't request irq\n");
return -ENODEV;
}
- disable_irq(nvec->irq);
tegra_init_i2c_slave(nvec);
diff --git a/drivers/staging/olpc_dcon/olpc_dcon.c b/drivers/staging/olpc_dcon/olpc_dcon.c
index 08ec3aae90ea..4cb904a5f8f4 100644
--- a/drivers/staging/olpc_dcon/olpc_dcon.c
+++ b/drivers/staging/olpc_dcon/olpc_dcon.c
@@ -544,7 +544,7 @@ static const struct backlight_ops dcon_bl_ops = {
static struct backlight_properties dcon_bl_props = {
.max_brightness = 15,
.type = BACKLIGHT_RAW,
- .power = FB_BLANK_UNBLANK,
+ .power = BACKLIGHT_POWER_ON,
};
static int dcon_reboot_notify(struct notifier_block *nb,
diff --git a/drivers/staging/rtl8192e/rtl8192e/r8190P_def.h b/drivers/staging/rtl8192e/rtl8192e/r8190P_def.h
index d87bace0a19b..552fd9b6e3e5 100644
--- a/drivers/staging/rtl8192e/rtl8192e/r8190P_def.h
+++ b/drivers/staging/rtl8192e/rtl8192e/r8190P_def.h
@@ -8,6 +8,7 @@
#define R8190P_DEF_H
#include <linux/types.h>
+#include "r8192E_phy.h"
#define MAX_SILENT_RESET_RX_SLOT_NUM 10
@@ -137,7 +138,7 @@ struct tx_fwinfo_8190pci {
};
struct phy_sts_ofdm_819xpci {
- u8 trsw_gain_X[4];
+ u8 trsw_gain_X[RF90_PATH_MAX];
u8 pwdb_all;
u8 cfosho_X[4];
u8 cfotail_X[4];
@@ -226,7 +227,7 @@ struct rx_desc {
u16 Length:14;
u16 CRC32:1;
u16 ICV:1;
- u8 RxDrvInfoSize;
+ u8 rx_drv_info_size;
u8 Shift:2;
u8 PHYStatus:1;
u8 SWDec:1;
diff --git a/drivers/staging/rtl8192e/rtl8192e/r8192E_cmdpkt.c b/drivers/staging/rtl8192e/rtl8192e/r8192E_cmdpkt.c
index e470b49b0ff7..533cc4e723f6 100644
--- a/drivers/staging/rtl8192e/rtl8192e/r8192E_cmdpkt.c
+++ b/drivers/staging/rtl8192e/rtl8192e/r8192E_cmdpkt.c
@@ -16,18 +16,18 @@ bool rtl92e_send_cmd_pkt(struct net_device *dev, u32 type, const void *data,
struct sk_buff *skb;
unsigned char *seg_ptr;
struct cb_desc *tcb_desc;
- u8 bLastIniPkt;
+ u8 last_ini_pkt;
struct tx_fwinfo_8190pci *pTxFwInfo = NULL;
do {
if ((len - frag_offset) > CMDPACKET_FRAG_SIZE) {
frag_length = CMDPACKET_FRAG_SIZE;
- bLastIniPkt = 0;
+ last_ini_pkt = 0;
} else {
frag_length = (u16)(len - frag_offset);
- bLastIniPkt = 1;
+ last_ini_pkt = 1;
}
if (type == DESC_PACKET_TYPE_NORMAL)
@@ -42,8 +42,8 @@ bool rtl92e_send_cmd_pkt(struct net_device *dev, u32 type, const void *data,
memcpy((unsigned char *)(skb->cb), &dev, sizeof(dev));
tcb_desc = (struct cb_desc *)(skb->cb + MAX_DEV_ADDR_SIZE);
tcb_desc->queue_index = TXCMD_QUEUE;
- tcb_desc->bCmdOrInit = type;
- tcb_desc->bLastIniPkt = bLastIniPkt;
+ tcb_desc->cmd_or_init = type;
+ tcb_desc->last_ini_pkt = last_ini_pkt;
if (type == DESC_PACKET_TYPE_NORMAL) {
tcb_desc->pkt_size = frag_length;
diff --git a/drivers/staging/rtl8192e/rtl8192e/r8192E_dev.c b/drivers/staging/rtl8192e/rtl8192e/r8192E_dev.c
index b3d4b3394284..2672b1ddf88e 100644
--- a/drivers/staging/rtl8192e/rtl8192e/r8192E_dev.c
+++ b/drivers/staging/rtl8192e/rtl8192e/r8192E_dev.c
@@ -289,7 +289,7 @@ static void _rtl92e_read_eeprom_info(struct net_device *dev)
for (i = 0; i < 6; i += 2) {
usValue = rtl92e_eeprom_read(dev,
- (EEPROM_NODE_ADDRESS_BYTE_0 + i) >> 1);
+ (EEPROM_NODE_ADDRESS_BYTE_0 + i) >> 1);
*(u16 *)(&addr[i]) = usValue;
}
eth_hw_addr_set(dev, addr);
@@ -987,10 +987,10 @@ void rtl92e_fill_tx_cmd_desc(struct net_device *dev, struct tx_desc_cmd *entry,
if (dma_mapping_error(&priv->pdev->dev, mapping))
netdev_err(dev, "%s(): DMA Mapping error\n", __func__);
memset(entry, 0, 12);
- entry->LINIP = cb_desc->bLastIniPkt;
+ entry->LINIP = cb_desc->last_ini_pkt;
entry->FirstSeg = 1;
entry->LastSeg = 1;
- if (cb_desc->bCmdOrInit == DESC_PACKET_TYPE_INIT) {
+ if (cb_desc->cmd_or_init == DESC_PACKET_TYPE_INIT) {
entry->CmdInit = DESC_PACKET_TYPE_INIT;
} else {
struct tx_desc *entry_tmp = (struct tx_desc *)entry;
@@ -1145,23 +1145,21 @@ static long _rtl92e_signal_scale_mapping(struct r8192_priv *priv, long currsig)
_pdrvinfo->RxRate == DESC90_RATE11M) &&\
!_pdrvinfo->RxHT)
-static void _rtl92e_query_rxphystatus(
- struct r8192_priv *priv,
- struct rtllib_rx_stats *pstats,
- struct rx_desc *pdesc,
- struct rx_fwinfo *pdrvinfo,
- struct rtllib_rx_stats *precord_stats,
- bool bpacket_match_bssid,
- bool bpacket_toself,
- bool bPacketBeacon,
- bool bToSelfBA
- )
+static void _rtl92e_query_rxphystatus(struct r8192_priv *priv,
+ struct rtllib_rx_stats *pstats,
+ struct rx_desc *pdesc,
+ struct rx_fwinfo *pdrvinfo,
+ struct rtllib_rx_stats *precord_stats,
+ bool bpacket_match_bssid,
+ bool bpacket_toself,
+ bool bPacketBeacon,
+ bool bToSelfBA)
{
struct phy_sts_ofdm_819xpci *pofdm_buf;
struct phy_sts_cck_819xpci *pcck_buf;
u8 *prxpkt;
u8 i, max_spatial_stream, tmp_rxevm;
- s8 rx_pwr[4], rx_pwr_all = 0;
+ s8 rx_pwr[RF90_PATH_MAX], rx_pwr_all = 0;
s8 rx_evmX;
u8 evm, pwdb_all;
u32 RSSI, total_rssi = 0;
@@ -1174,7 +1172,7 @@ static void _rtl92e_query_rxphystatus(
memset(precord_stats, 0, sizeof(struct rtllib_rx_stats));
pstats->bPacketMatchBSSID = precord_stats->bPacketMatchBSSID =
bpacket_match_bssid;
- pstats->bPacketToSelf = precord_stats->bPacketToSelf = bpacket_toself;
+ pstats->packet_to_self = precord_stats->packet_to_self = bpacket_toself;
pstats->bIsCCK = precord_stats->bIsCCK = is_cck_rate;
pstats->bPacketBeacon = precord_stats->bPacketBeacon = bPacketBeacon;
pstats->bToSelfBA = precord_stats->bToSelfBA = bToSelfBA;
@@ -1266,8 +1264,8 @@ static void _rtl92e_query_rxphystatus(
else
sq = ((64 - sq) * 100) / 44;
}
- pstats->SignalQuality = sq;
- precord_stats->SignalQuality = sq;
+ pstats->signal_quality = sq;
+ precord_stats->signal_quality = sq;
pstats->RxMIMOSignalQuality[0] = sq;
precord_stats->RxMIMOSignalQuality[0] = sq;
pstats->RxMIMOSignalQuality[1] = -1;
@@ -1311,8 +1309,8 @@ static void _rtl92e_query_rxphystatus(
evm = rtl92e_evm_db_to_percent(rx_evmX);
if (bpacket_match_bssid) {
if (i == 0) {
- pstats->SignalQuality = evm & 0xff;
- precord_stats->SignalQuality = evm & 0xff;
+ pstats->signal_quality = evm & 0xff;
+ precord_stats->signal_quality = evm & 0xff;
}
pstats->RxMIMOSignalQuality[i] = evm & 0xff;
precord_stats->RxMIMOSignalQuality[i] = evm & 0xff;
@@ -1321,13 +1319,12 @@ static void _rtl92e_query_rxphystatus(
}
if (is_cck_rate) {
- pstats->SignalStrength = precord_stats->SignalStrength =
- _rtl92e_signal_scale_mapping(priv,
- (long)pwdb_all);
+ pstats->signal_strength = precord_stats->signal_strength =
+ _rtl92e_signal_scale_mapping(priv, (long)pwdb_all);
} else {
if (rf_rx_num != 0)
- pstats->SignalStrength = precord_stats->SignalStrength =
+ pstats->signal_strength = precord_stats->signal_strength =
_rtl92e_signal_scale_mapping(priv,
(long)(total_rssi /= rf_rx_num));
}
@@ -1355,10 +1352,10 @@ static void _rtl92e_process_phyinfo(struct r8192_priv *priv, u8 *buffer,
last_rssi = priv->stats.slide_signal_strength[slide_rssi_index];
priv->stats.slide_rssi_total -= last_rssi;
}
- priv->stats.slide_rssi_total += prev_st->SignalStrength;
+ priv->stats.slide_rssi_total += prev_st->signal_strength;
priv->stats.slide_signal_strength[slide_rssi_index++] =
- prev_st->SignalStrength;
+ prev_st->signal_strength;
if (slide_rssi_index >= PHY_RSSI_SLID_WIN_MAX)
slide_rssi_index = 0;
@@ -1373,7 +1370,7 @@ static void _rtl92e_process_phyinfo(struct r8192_priv *priv, u8 *buffer,
if (!bcheck)
return;
- if (!prev_st->bIsCCK && prev_st->bPacketToSelf) {
+ if (!prev_st->bIsCCK && prev_st->packet_to_self) {
for (rfpath = RF90_PATH_A; rfpath < priv->num_total_rf_path; rfpath++) {
if (priv->stats.rx_rssi_percentage[rfpath] == 0) {
priv->stats.rx_rssi_percentage[rfpath] =
@@ -1419,7 +1416,7 @@ static void _rtl92e_process_phyinfo(struct r8192_priv *priv, u8 *buffer,
if (prev_st->RxPWDBAll >= 3)
prev_st->RxPWDBAll -= 3;
}
- if (prev_st->bPacketToSelf || prev_st->bPacketBeacon ||
+ if (prev_st->packet_to_self || prev_st->bPacketBeacon ||
prev_st->bToSelfBA) {
if (priv->undecorated_smoothed_pwdb < 0)
priv->undecorated_smoothed_pwdb = prev_st->RxPWDBAll;
@@ -1439,8 +1436,8 @@ static void _rtl92e_process_phyinfo(struct r8192_priv *priv, u8 *buffer,
rtl92e_update_rx_statistics(priv, prev_st);
}
- if (prev_st->SignalQuality != 0) {
- if (prev_st->bPacketToSelf || prev_st->bPacketBeacon ||
+ if (prev_st->signal_quality != 0) {
+ if (prev_st->packet_to_self || prev_st->bPacketBeacon ||
prev_st->bToSelfBA) {
if (slide_evm_statistics++ >= PHY_RSSI_SLID_WIN_MAX) {
slide_evm_statistics = PHY_RSSI_SLID_WIN_MAX;
@@ -1449,10 +1446,10 @@ static void _rtl92e_process_phyinfo(struct r8192_priv *priv, u8 *buffer,
priv->stats.slide_evm_total -= last_evm;
}
- priv->stats.slide_evm_total += prev_st->SignalQuality;
+ priv->stats.slide_evm_total += prev_st->signal_quality;
priv->stats.slide_evm[slide_evm_index++] =
- prev_st->SignalQuality;
+ prev_st->signal_quality;
if (slide_evm_index >= PHY_RSSI_SLID_WIN_MAX)
slide_evm_index = 0;
@@ -1461,7 +1458,7 @@ static void _rtl92e_process_phyinfo(struct r8192_priv *priv, u8 *buffer,
priv->stats.last_signal_strength_inpercent = tmp_val;
}
- if (prev_st->bPacketToSelf ||
+ if (prev_st->packet_to_self ||
prev_st->bPacketBeacon ||
prev_st->bToSelfBA) {
for (ij = 0; ij < 2; ij++) {
@@ -1496,7 +1493,7 @@ static void _rtl92e_translate_rx_signal_stats(struct net_device *dev,
u8 *tmp_buf;
u8 *praddr;
- tmp_buf = skb->data + pstats->RxDrvInfoSize + pstats->RxBufShift;
+ tmp_buf = skb->data + pstats->rx_drv_info_size + pstats->rx_buf_shift;
hdr = (struct ieee80211_hdr_3addr *)tmp_buf;
fc = le16_to_cpu(hdr->frame_control);
@@ -1509,7 +1506,7 @@ static void _rtl92e_translate_rx_signal_stats(struct net_device *dev,
(fc & IEEE80211_FCTL_TODS) ? hdr->addr1 :
(fc & IEEE80211_FCTL_FROMDS) ? hdr->addr2 :
hdr->addr3) &&
- (!pstats->bHwError) && (!pstats->bCRC) && (!pstats->bICV));
+ (!pstats->hw_error) && (!pstats->bCRC) && (!pstats->bICV));
bpacket_toself = bpacket_match_bssid && /* check this */
ether_addr_equal(praddr, priv->rtllib->dev->dev_addr);
if (ieee80211_is_beacon(hdr->frame_control))
@@ -1521,9 +1518,8 @@ static void _rtl92e_translate_rx_signal_stats(struct net_device *dev,
rtl92e_copy_mpdu_stats(pstats, &previous_stats);
}
-static void _rtl92e_update_received_rate_histogram_stats(
- struct net_device *dev,
- struct rtllib_rx_stats *pstats)
+static void _rtl92e_update_received_rate_histogram_stats(struct net_device *dev,
+ struct rtllib_rx_stats *pstats)
{
struct r8192_priv *priv = (struct r8192_priv *)rtllib_priv(dev);
u32 rcvType = 1;
@@ -1634,20 +1630,20 @@ bool rtl92e_get_rx_stats(struct net_device *dev, struct rtllib_rx_stats *stats,
stats->bICV = pdesc->ICV;
stats->bCRC = pdesc->CRC32;
- stats->bHwError = pdesc->CRC32 | pdesc->ICV;
+ stats->hw_error = pdesc->CRC32 | pdesc->ICV;
stats->Length = pdesc->Length;
if (stats->Length < 24)
- stats->bHwError |= 1;
+ stats->hw_error |= 1;
- if (stats->bHwError)
+ if (stats->hw_error)
return false;
- stats->RxDrvInfoSize = pdesc->RxDrvInfoSize;
- stats->RxBufShift = (pdesc->Shift) & 0x03;
+ stats->rx_drv_info_size = pdesc->rx_drv_info_size;
+ stats->rx_buf_shift = (pdesc->Shift) & 0x03;
stats->decrypted = !pdesc->SWDec;
- pDrvInfo = (struct rx_fwinfo *)(skb->data + stats->RxBufShift);
+ pDrvInfo = (struct rx_fwinfo *)(skb->data + stats->rx_buf_shift);
stats->rate = _rtl92e_rate_hw_to_mgn((bool)pDrvInfo->RxHT,
pDrvInfo->RxRate);
@@ -1837,8 +1833,8 @@ bool rtl92e_is_rx_stuck(struct net_device *dev)
rx_chk_cnt++;
if (priv->undecorated_smoothed_pwdb >= (RATE_ADAPTIVE_TH_HIGH + 5)) {
rx_chk_cnt = 0;
- } else if ((priv->undecorated_smoothed_pwdb < (RATE_ADAPTIVE_TH_HIGH + 5))
- && (((priv->current_chnl_bw != HT_CHANNEL_WIDTH_20) &&
+ } else if ((priv->undecorated_smoothed_pwdb < (RATE_ADAPTIVE_TH_HIGH + 5)) &&
+ (((priv->current_chnl_bw != HT_CHANNEL_WIDTH_20) &&
(priv->undecorated_smoothed_pwdb >= RATE_ADAPTIVE_TH_LOW_40M))
|| ((priv->current_chnl_bw == HT_CHANNEL_WIDTH_20) &&
(priv->undecorated_smoothed_pwdb >= RATE_ADAPTIVE_TH_LOW_20M)))) {
@@ -1859,7 +1855,6 @@ bool rtl92e_is_rx_stuck(struct net_device *dev)
rx_chk_cnt = 0;
}
-
slot_index = (priv->silent_reset_rx_slot_index++) % SilentResetRxSoltNum;
if (priv->rx_ctr == RegRxCounter) {
diff --git a/drivers/staging/rtl8192e/rtl8192e/r8192E_hw.h b/drivers/staging/rtl8192e/rtl8192e/r8192E_hw.h
index 1b444529b59c..e507593b939c 100644
--- a/drivers/staging/rtl8192e/rtl8192e/r8192E_hw.h
+++ b/drivers/staging/rtl8192e/rtl8192e/r8192E_hw.h
@@ -229,7 +229,7 @@ enum _RTL8192PCI_HW {
RATR_MCS6 | RATR_MCS7)
#define RATE_ALL_OFDM_2SS (RATR_MCS8 | RATR_MCS9 | RATR_MCS10 | \
RATR_MCS11 | RATR_MCS12 | RATR_MCS13 | \
- RATR_MCS14|RATR_MCS15)
+ RATR_MCS14 | RATR_MCS15)
DRIVER_RSSI = 0x32c,
MCS_TXAGC = 0x340,
diff --git a/drivers/staging/rtl8192e/rtl8192e/r8192E_phy.c b/drivers/staging/rtl8192e/rtl8192e/r8192E_phy.c
index 18b948d4d86d..fbe624e235df 100644
--- a/drivers/staging/rtl8192e/rtl8192e/r8192E_phy.c
+++ b/drivers/staging/rtl8192e/rtl8192e/r8192E_phy.c
@@ -416,6 +416,7 @@ static bool _rtl92e_bb_config_para_file(struct net_device *dev)
return rtStatus;
}
+
bool rtl92e_config_bb(struct net_device *dev)
{
_rtl92e_init_bb_rf_reg_def(dev);
@@ -508,8 +509,8 @@ static void _rtl92e_set_tx_power_level(struct net_device *dev, u8 channel)
static u8 _rtl92e_phy_set_sw_chnl_cmd_array(struct net_device *dev,
struct sw_chnl_cmd *CmdTable,
u32 CmdTableIdx, u32 CmdTableSz,
- enum sw_chnl_cmd_id CmdID,
- u32 Para1, u32 Para2, u32 msDelay)
+ enum sw_chnl_cmd_id cmd_id,
+ u32 para1, u32 para2, u32 ms_delay)
{
struct sw_chnl_cmd *pCmd;
@@ -523,10 +524,10 @@ static u8 _rtl92e_phy_set_sw_chnl_cmd_array(struct net_device *dev,
}
pCmd = CmdTable + CmdTableIdx;
- pCmd->CmdID = CmdID;
- pCmd->Para1 = Para1;
- pCmd->Para2 = Para2;
- pCmd->msDelay = msDelay;
+ pCmd->cmd_id = cmd_id;
+ pCmd->para1 = para1;
+ pCmd->para2 = para2;
+ pCmd->ms_delay = ms_delay;
return true;
}
@@ -552,18 +553,18 @@ static u8 _rtl92e_phy_switch_channel_step(struct net_device *dev, u8 channel,
_rtl92e_phy_set_sw_chnl_cmd_array(dev, ieee->PreCommonCmd,
PreCommonCmdCnt++,
MAX_PRECMD_CNT,
- CmdID_SetTxPowerLevel,
+ cmd_id_set_tx_power_level,
0, 0, 0);
_rtl92e_phy_set_sw_chnl_cmd_array(dev, ieee->PreCommonCmd,
PreCommonCmdCnt++,
- MAX_PRECMD_CNT, CmdID_End,
+ MAX_PRECMD_CNT, cmd_id_end,
0, 0, 0);
PostCommonCmdCnt = 0;
_rtl92e_phy_set_sw_chnl_cmd_array(dev, ieee->PostCommonCmd,
PostCommonCmdCnt++,
- MAX_POSTCMD_CNT, CmdID_End,
+ MAX_POSTCMD_CNT, cmd_id_end,
0, 0, 0);
RfDependCmdCnt = 0;
@@ -578,14 +579,14 @@ static u8 _rtl92e_phy_switch_channel_step(struct net_device *dev, u8 channel,
ieee->RfDependCmd,
RfDependCmdCnt++,
MAX_RFDEPENDCMD_CNT,
- CmdID_RF_WriteReg,
+ cmd_id_rf_write_reg,
rZebra1_Channel,
channel, 10);
_rtl92e_phy_set_sw_chnl_cmd_array(dev,
ieee->RfDependCmd,
RfDependCmdCnt++,
MAX_RFDEPENDCMD_CNT,
- CmdID_End, 0, 0, 0);
+ cmd_id_end, 0, 0, 0);
do {
switch (*stage) {
@@ -600,7 +601,7 @@ static u8 _rtl92e_phy_switch_channel_step(struct net_device *dev, u8 channel,
break;
}
- if (CurrentCmd && CurrentCmd->CmdID == CmdID_End) {
+ if (CurrentCmd && CurrentCmd->cmd_id == cmd_id_end) {
if ((*stage) == 2)
return true;
(*stage)++;
@@ -610,31 +611,31 @@ static u8 _rtl92e_phy_switch_channel_step(struct net_device *dev, u8 channel,
if (!CurrentCmd)
continue;
- switch (CurrentCmd->CmdID) {
- case CmdID_SetTxPowerLevel:
+ switch (CurrentCmd->cmd_id) {
+ case cmd_id_set_tx_power_level:
if (priv->ic_cut > VERSION_8190_BD)
_rtl92e_set_tx_power_level(dev,
channel);
break;
- case CmdID_WritePortUlong:
- rtl92e_writel(dev, CurrentCmd->Para1,
- CurrentCmd->Para2);
+ case cmd_id_write_port_ulong:
+ rtl92e_writel(dev, CurrentCmd->para1,
+ CurrentCmd->para2);
break;
- case CmdID_WritePortUshort:
- rtl92e_writew(dev, CurrentCmd->Para1,
- CurrentCmd->Para2);
+ case cmd_id_write_port_ushort:
+ rtl92e_writew(dev, CurrentCmd->para1,
+ CurrentCmd->para2);
break;
- case CmdID_WritePortUchar:
- rtl92e_writeb(dev, CurrentCmd->Para1,
- CurrentCmd->Para2);
+ case cmd_id_write_port_uchar:
+ rtl92e_writeb(dev, CurrentCmd->para1,
+ CurrentCmd->para2);
break;
- case CmdID_RF_WriteReg:
+ case cmd_id_rf_write_reg:
for (eRFPath = 0; eRFPath <
priv->num_total_rf_path; eRFPath++)
rtl92e_set_rf_reg(dev,
(enum rf90_radio_path)eRFPath,
- CurrentCmd->Para1, bMask12Bits,
- CurrentCmd->Para2 << 7);
+ CurrentCmd->para1, bMask12Bits,
+ CurrentCmd->para2 << 7);
break;
default:
break;
@@ -644,7 +645,7 @@ static u8 _rtl92e_phy_switch_channel_step(struct net_device *dev, u8 channel,
} while (true);
} /*for (Number of RF paths)*/
- (*delay) = CurrentCmd->msDelay;
+ (*delay) = CurrentCmd->ms_delay;
(*step)++;
return false;
}
@@ -944,19 +945,19 @@ void rtl92e_init_gain(struct net_device *dev, u8 Operation)
case IG_Restore:
BitMask = 0x7f;
rtl92e_set_bb_reg(dev, rOFDM0_XAAGCCore1, BitMask,
- (u32)priv->initgain_backup.xaagccore1);
+ (u32)priv->initgain_backup.xaagccore1);
rtl92e_set_bb_reg(dev, rOFDM0_XBAGCCore1, BitMask,
- (u32)priv->initgain_backup.xbagccore1);
+ (u32)priv->initgain_backup.xbagccore1);
rtl92e_set_bb_reg(dev, rOFDM0_XCAGCCore1, BitMask,
- (u32)priv->initgain_backup.xcagccore1);
+ (u32)priv->initgain_backup.xcagccore1);
rtl92e_set_bb_reg(dev, rOFDM0_XDAGCCore1, BitMask,
- (u32)priv->initgain_backup.xdagccore1);
+ (u32)priv->initgain_backup.xdagccore1);
BitMask = bMaskByte2;
rtl92e_set_bb_reg(dev, rCCK0_CCA, BitMask,
- (u32)priv->initgain_backup.cca);
+ (u32)priv->initgain_backup.cca);
rtl92e_set_tx_power(dev,
- priv->rtllib->current_network.channel);
+ priv->rtllib->current_network.channel);
break;
}
}
diff --git a/drivers/staging/rtl8192e/rtl8192e/r8192E_phy.h b/drivers/staging/rtl8192e/rtl8192e/r8192E_phy.h
index ff4b4004b0d0..956dfbdd5b68 100644
--- a/drivers/staging/rtl8192e/rtl8192e/r8192E_phy.h
+++ b/drivers/staging/rtl8192e/rtl8192e/r8192E_phy.h
@@ -20,8 +20,6 @@ enum hw90_block {
enum rf90_radio_path {
RF90_PATH_A = 0,
RF90_PATH_B = 1,
- RF90_PATH_C = 2,
- RF90_PATH_D = 3,
RF90_PATH_MAX
};
@@ -45,13 +43,13 @@ void rtl92e_set_channel(struct net_device *dev, u8 channel);
void rtl92e_set_bw_mode(struct net_device *dev,
enum ht_channel_width bandwidth,
enum ht_extchnl_offset Offset);
-void rtl92e_init_gain(struct net_device *dev, u8 Operation);
+void rtl92e_init_gain(struct net_device *dev, u8 operation);
void rtl92e_set_rf_off(struct net_device *dev);
bool rtl92e_set_rf_power_state(struct net_device *dev,
enum rt_rf_power_state rf_power_state);
-void rtl92e_scan_op_backup(struct net_device *dev, u8 Operation);
+void rtl92e_scan_op_backup(struct net_device *dev, u8 operation);
#endif
diff --git a/drivers/staging/rtl8192e/rtl8192e/rtl_core.c b/drivers/staging/rtl8192e/rtl8192e/rtl_core.c
index 9eeae01dc98d..dc1301f1a0c1 100644
--- a/drivers/staging/rtl8192e/rtl8192e/rtl_core.c
+++ b/drivers/staging/rtl8192e/rtl8192e/rtl_core.c
@@ -25,7 +25,7 @@
int hwwep = 1;
static char *ifname = "wlan%d";
-static struct pci_device_id rtl8192_pci_id_tbl[] = {
+static const struct pci_device_id rtl8192_pci_id_tbl[] = {
{PCI_DEVICE(0x10ec, 0x8192)},
{PCI_DEVICE(0x07aa, 0x0044)},
{PCI_DEVICE(0x07aa, 0x0047)},
@@ -173,7 +173,7 @@ bool rtl92e_set_rf_state(struct net_device *dev,
else
priv->blinked_ingpio = false;
rtllib_mgnt_disconnect(priv->rtllib,
- WLAN_REASON_DISASSOC_STA_HAS_LEFT);
+ WLAN_REASON_DISASSOC_STA_HAS_LEFT);
}
}
if ((change_source == RF_CHANGE_BY_HW) && !priv->hw_radio_off)
@@ -322,7 +322,7 @@ static int _rtl92e_qos_handle_probe_response(struct r8192_priv *priv,
if (network->flags & NETWORK_HAS_QOS_MASK) {
if (active_network &&
- (network->flags & NETWORK_HAS_QOS_PARAMETERS))
+ (network->flags & NETWORK_HAS_QOS_PARAMETERS))
network->qos_data.active = network->qos_data.supported;
if ((network->qos_data.active == 1) && (active_network == 1) &&
@@ -665,7 +665,7 @@ static void _rtl92e_init_priv_handler(struct net_device *dev)
priv->rtllib->init_gain_handler = rtl92e_init_gain;
priv->rtllib->rtllib_ips_leave_wq = rtl92e_rtllib_ips_leave_wq;
priv->rtllib->rtllib_ips_leave = rtl92e_rtllib_ips_leave;
- priv->rtllib->ScanOperationBackupHandler = rtl92e_scan_op_backup;
+ priv->rtllib->scan_operation_backup_handler = rtl92e_scan_op_backup;
}
static void _rtl92e_init_priv_variable(struct net_device *dev)
@@ -860,13 +860,13 @@ static enum reset_type _rtl92e_tx_check_stuck(struct net_device *dev)
skb = __skb_peek(&ring->queue);
tcb_desc = (struct cb_desc *)(skb->cb +
MAX_DEV_ADDR_SIZE);
- tcb_desc->nStuckCount++;
+ tcb_desc->stuck_count++;
bCheckFwTxCnt = true;
- if (tcb_desc->nStuckCount > 1)
+ if (tcb_desc->stuck_count > 1)
netdev_info(dev,
- "%s: QueueID=%d tcb_desc->nStuckCount=%d\n",
+ "%s: QueueID=%d tcb_desc->stuck_count=%d\n",
__func__, QueueID,
- tcb_desc->nStuckCount);
+ tcb_desc->stuck_count);
}
}
spin_unlock_irqrestore(&priv->irq_th_lock, flags);
@@ -1522,8 +1522,8 @@ static void _rtl92e_rx_normal(struct net_device *dev)
priv->rxbuffersize, DMA_FROM_DEVICE);
skb_put(skb, pdesc->Length);
- skb_reserve(skb, stats.RxDrvInfoSize +
- stats.RxBufShift);
+ skb_reserve(skb, stats.rx_drv_info_size +
+ stats.rx_buf_shift);
skb_trim(skb, skb->len - S_CRC_LEN);
rtllib_hdr = (struct ieee80211_hdr *)skb->data;
if (!is_multicast_ether_addr(rtllib_hdr->addr1)) {
diff --git a/drivers/staging/rtl8192e/rtl8192e/rtl_core.h b/drivers/staging/rtl8192e/rtl8192e/rtl_core.h
index 1d6d31292f41..8297d7e59415 100644
--- a/drivers/staging/rtl8192e/rtl8192e/rtl_core.h
+++ b/drivers/staging/rtl8192e/rtl8192e/rtl_core.h
@@ -300,7 +300,7 @@ struct r8192_priv {
u32 rf_reg_0value[4];
u8 num_total_rf_path;
- bool brfpath_rxenable[4];
+ bool brfpath_rxenable[RF90_PATH_MAX];
bool tx_pwr_data_read_from_eeprom;
diff --git a/drivers/staging/rtl8192e/rtl8192e/rtl_dm.c b/drivers/staging/rtl8192e/rtl8192e/rtl_dm.c
index 0c7f38a4a7db..e9ca5a8768ad 100644
--- a/drivers/staging/rtl8192e/rtl8192e/rtl_dm.c
+++ b/drivers/staging/rtl8192e/rtl8192e/rtl_dm.c
@@ -1484,8 +1484,7 @@ static void _rtl92e_dm_rx_path_sel_byrssi(struct net_device *dev)
rtl92e_set_bb_reg(dev,
rOFDM1_TRxPathEnable,
0x1 << i, 0x1);
- dm_rx_path_sel_table.rf_enable_rssi_th[i]
- = 100;
+ dm_rx_path_sel_table.rf_enable_rssi_th[i] = 100;
disabled_rf_cnt--;
}
}
diff --git a/drivers/staging/rtl8192e/rtl8192e/rtl_ps.c b/drivers/staging/rtl8192e/rtl8192e/rtl_ps.c
index 5aac9110bff6..7b6247acf6f4 100644
--- a/drivers/staging/rtl8192e/rtl8192e/rtl_ps.c
+++ b/drivers/staging/rtl8192e/rtl8192e/rtl_ps.c
@@ -204,12 +204,11 @@ void rtl92e_leisure_ps_enter(struct net_device *dev)
&priv->rtllib->pwr_save_ctrl;
if (!((priv->rtllib->iw_mode == IW_MODE_INFRA) &&
- (priv->rtllib->link_state == MAC80211_LINKED)))
+ (priv->rtllib->link_state == MAC80211_LINKED)))
return;
if (psc->bLeisurePs) {
if (psc->lps_idle_count >= RT_CHECK_FOR_HANG_PERIOD) {
-
if (priv->rtllib->ps == RTLLIB_PS_DISABLED)
_rtl92e_ps_set_mode(dev, RTLLIB_PS_MBCAST | RTLLIB_PS_UNICAST);
} else {
diff --git a/drivers/staging/rtl8192e/rtl8192e/rtl_wx.c b/drivers/staging/rtl8192e/rtl8192e/rtl_wx.c
index c21a0560410a..fe3a42a4fcd5 100644
--- a/drivers/staging/rtl8192e/rtl8192e/rtl_wx.c
+++ b/drivers/staging/rtl8192e/rtl8192e/rtl_wx.c
@@ -288,11 +288,11 @@ static int _rtl92e_wx_set_scan(struct net_device *dev,
if (priv->rtllib->rf_power_state != rf_off) {
priv->rtllib->actscanning = true;
- ieee->ScanOperationBackupHandler(ieee->dev, SCAN_OPT_BACKUP);
+ ieee->scan_operation_backup_handler(ieee->dev, SCAN_OPT_BACKUP);
rtllib_start_scan_syncro(priv->rtllib);
- ieee->ScanOperationBackupHandler(ieee->dev, SCAN_OPT_RESTORE);
+ ieee->scan_operation_backup_handler(ieee->dev, SCAN_OPT_RESTORE);
}
ret = 0;
} else {
@@ -526,7 +526,8 @@ static int _rtl92e_wx_set_enc(struct net_device *dev,
mutex_unlock(&priv->wx_mutex);
if (wrqu->encoding.flags & IW_ENCODE_DISABLED) {
- ieee->pairwise_key_type = ieee->group_key_type = KEY_TYPE_NA;
+ ieee->pairwise_key_type = KEY_TYPE_NA;
+ ieee->group_key_type = KEY_TYPE_NA;
rtl92e_cam_reset(dev);
memset(priv->rtllib->swcamtable, 0,
sizeof(struct sw_cam_table) * 32);
@@ -675,9 +676,9 @@ static int _rtl92e_wx_set_encode_ext(struct net_device *dev,
u8 idx = 0, alg = 0, group = 0;
if ((encoding->flags & IW_ENCODE_DISABLED) ||
- ext->alg == IW_ENCODE_ALG_NONE) {
- ieee->pairwise_key_type = ieee->group_key_type
- = KEY_TYPE_NA;
+ ext->alg == IW_ENCODE_ALG_NONE) {
+ ieee->pairwise_key_type = KEY_TYPE_NA;
+ ieee->group_key_type = KEY_TYPE_NA;
rtl92e_cam_reset(dev);
memset(priv->rtllib->swcamtable, 0,
sizeof(struct sw_cam_table) * 32);
@@ -710,7 +711,7 @@ static int _rtl92e_wx_set_encode_ext(struct net_device *dev,
rtl92e_set_swcam(dev, idx, idx, alg, broadcast_addr, key);
} else {
if ((ieee->pairwise_key_type == KEY_TYPE_CCMP) &&
- ieee->ht_info->current_ht_support)
+ ieee->ht_info->current_ht_support)
rtl92e_writeb(dev, 0x173, 1);
rtl92e_set_key(dev, 4, idx, alg,
(u8 *)ieee->ap_mac_addr, 0, key);
diff --git a/drivers/staging/rtl8192e/rtl819x_HTProc.c b/drivers/staging/rtl8192e/rtl819x_HTProc.c
index e38cd0c9c013..9c9c0bc0cfde 100644
--- a/drivers/staging/rtl8192e/rtl819x_HTProc.c
+++ b/drivers/staging/rtl8192e/rtl819x_HTProc.c
@@ -188,7 +188,7 @@ static void ht_iot_peer_determine(struct rtllib_device *ieee)
}
static u8 ht_iot_act_is_mgnt_use_cck_6m(struct rtllib_device *ieee,
- struct rtllib_network *network)
+ struct rtllib_network *network)
{
u8 retValue = 0;
@@ -559,7 +559,7 @@ void ht_initialize_bss_desc(struct bss_ht *bss_ht)
}
void ht_reset_self_and_save_peer_setting(struct rtllib_device *ieee,
- struct rtllib_network *pNetwork)
+ struct rtllib_network *network)
{
struct rt_hi_throughput *ht_info = ieee->ht_info;
u8 bIOTAction = 0;
@@ -567,32 +567,32 @@ void ht_reset_self_and_save_peer_setting(struct rtllib_device *ieee,
/* unmark enable_ht flag here is the same reason why unmarked in
* function rtllib_softmac_new_net. WB 2008.09.10
*/
- if (pNetwork->bssht.bd_support_ht) {
+ if (network->bssht.bd_support_ht) {
ht_info->current_ht_support = true;
- ht_info->peer_ht_spec_ver = pNetwork->bssht.bd_ht_spec_ver;
+ ht_info->peer_ht_spec_ver = network->bssht.bd_ht_spec_ver;
- if (pNetwork->bssht.bd_ht_cap_len > 0 &&
- pNetwork->bssht.bd_ht_cap_len <= sizeof(ht_info->peer_ht_cap_buf))
+ if (network->bssht.bd_ht_cap_len > 0 &&
+ network->bssht.bd_ht_cap_len <= sizeof(ht_info->peer_ht_cap_buf))
memcpy(ht_info->peer_ht_cap_buf,
- pNetwork->bssht.bd_ht_cap_buf,
- pNetwork->bssht.bd_ht_cap_len);
+ network->bssht.bd_ht_cap_buf,
+ network->bssht.bd_ht_cap_len);
- if (pNetwork->bssht.bd_ht_info_len > 0 &&
- pNetwork->bssht.bd_ht_info_len <=
+ if (network->bssht.bd_ht_info_len > 0 &&
+ network->bssht.bd_ht_info_len <=
sizeof(ht_info->peer_ht_info_buf))
memcpy(ht_info->peer_ht_info_buf,
- pNetwork->bssht.bd_ht_info_buf,
- pNetwork->bssht.bd_ht_info_len);
+ network->bssht.bd_ht_info_buf,
+ network->bssht.bd_ht_info_len);
ht_info->current_rt2rt_aggregation =
- pNetwork->bssht.bd_rt2rt_aggregation;
+ network->bssht.bd_rt2rt_aggregation;
ht_info->current_rt2rt_long_slot_time =
- pNetwork->bssht.bd_rt2rt_long_slot_time;
+ network->bssht.bd_rt2rt_long_slot_time;
ht_iot_peer_determine(ieee);
ht_info->iot_action = 0;
- bIOTAction = ht_iot_act_is_mgnt_use_cck_6m(ieee, pNetwork);
+ bIOTAction = ht_iot_act_is_mgnt_use_cck_6m(ieee, network);
if (bIOTAction)
ht_info->iot_action |= HT_IOT_ACT_MGNT_USE_CCK_6M;
bIOTAction = ht_iot_act_is_ccd_fsync(ieee);
@@ -609,23 +609,23 @@ void ht_reset_self_and_save_peer_setting(struct rtllib_device *ieee,
}
void HT_update_self_and_peer_setting(struct rtllib_device *ieee,
- struct rtllib_network *pNetwork)
+ struct rtllib_network *network)
{
struct rt_hi_throughput *ht_info = ieee->ht_info;
struct ht_info_ele *pPeerHTInfo =
- (struct ht_info_ele *)pNetwork->bssht.bd_ht_info_buf;
+ (struct ht_info_ele *)network->bssht.bd_ht_info_buf;
if (ht_info->current_ht_support) {
- if (pNetwork->bssht.bd_ht_info_len != 0)
+ if (network->bssht.bd_ht_info_len != 0)
ht_info->current_op_mode = pPeerHTInfo->opt_mode;
}
}
EXPORT_SYMBOL(HT_update_self_and_peer_setting);
-u8 ht_c_check(struct rtllib_device *ieee, u8 *pFrame)
+u8 ht_c_check(struct rtllib_device *ieee, u8 *frame)
{
if (ieee->ht_info->current_ht_support) {
- if ((is_qos_data_frame(pFrame) && frame_order(pFrame)) == 1) {
+ if ((is_qos_data_frame(frame) && frame_order(frame)) == 1) {
netdev_dbg(ieee->dev, "HT CONTROL FILED EXIST!!\n");
return true;
}
diff --git a/drivers/staging/rtl8192e/rtl819x_TSProc.c b/drivers/staging/rtl8192e/rtl819x_TSProc.c
index ed6a488bc7ac..89092cd434de 100644
--- a/drivers/staging/rtl8192e/rtl819x_TSProc.c
+++ b/drivers/staging/rtl8192e/rtl819x_TSProc.c
@@ -14,7 +14,7 @@ static void RxPktPendingTimeout(struct timer_list *t)
struct rtllib_device *ieee = container_of(ts, struct rtllib_device,
rx_ts_records[ts->num]);
- struct rx_reorder_entry *pReorderEntry = NULL;
+ struct rx_reorder_entry *reorder_entry = NULL;
unsigned long flags = 0;
u8 index = 0;
@@ -23,31 +23,31 @@ static void RxPktPendingTimeout(struct timer_list *t)
spin_lock_irqsave(&(ieee->reorder_spinlock), flags);
if (ts->rx_timeout_indicate_seq != 0xffff) {
while (!list_empty(&ts->rx_pending_pkt_list)) {
- pReorderEntry = (struct rx_reorder_entry *)
+ reorder_entry = (struct rx_reorder_entry *)
list_entry(ts->rx_pending_pkt_list.prev,
struct rx_reorder_entry, list);
if (index == 0)
- ts->rx_indicate_seq = pReorderEntry->SeqNum;
+ ts->rx_indicate_seq = reorder_entry->seq_num;
- if (SN_LESS(pReorderEntry->SeqNum,
+ if (SN_LESS(reorder_entry->seq_num,
ts->rx_indicate_seq) ||
- SN_EQUAL(pReorderEntry->SeqNum,
+ SN_EQUAL(reorder_entry->seq_num,
ts->rx_indicate_seq)) {
- list_del_init(&pReorderEntry->list);
+ list_del_init(&reorder_entry->list);
- if (SN_EQUAL(pReorderEntry->SeqNum,
+ if (SN_EQUAL(reorder_entry->seq_num,
ts->rx_indicate_seq))
ts->rx_indicate_seq =
(ts->rx_indicate_seq + 1) % 4096;
netdev_dbg(ieee->dev,
- "%s(): Indicate SeqNum: %d\n",
- __func__, pReorderEntry->SeqNum);
+ "%s(): Indicate seq_num: %d\n",
+ __func__, reorder_entry->seq_num);
ieee->stats_IndicateArray[index] =
- pReorderEntry->prxb;
+ reorder_entry->prxb;
index++;
- list_add_tail(&pReorderEntry->list,
+ list_add_tail(&reorder_entry->list,
&ieee->RxReorder_Unused_List);
} else {
pkt_in_buf = true;
@@ -225,7 +225,7 @@ static void MakeTSEntry(struct ts_common_info *ts_common_info, u8 *addr,
}
bool rtllib_get_ts(struct rtllib_device *ieee, struct ts_common_info **ppTS,
- u8 *addr, u8 TID, enum tr_select tx_rx_select, bool bAddNewTs)
+ u8 *addr, u8 TID, enum tr_select tx_rx_select, bool add_new_ts)
{
u8 UP = 0;
struct qos_tsinfo tspec;
@@ -269,7 +269,7 @@ bool rtllib_get_ts(struct rtllib_device *ieee, struct ts_common_info **ppTS,
if (*ppTS)
return true;
- if (!bAddNewTs) {
+ if (!add_new_ts) {
netdev_dbg(ieee->dev, "add new TS failed(tid:%d)\n", UP);
return false;
}
@@ -336,8 +336,8 @@ static void RemoveTsEntry(struct rtllib_device *ieee,
pRxReorderEntry = (struct rx_reorder_entry *)
list_entry(ts->rx_pending_pkt_list.prev,
struct rx_reorder_entry, list);
- netdev_dbg(ieee->dev, "%s(): Delete SeqNum %d!\n",
- __func__, pRxReorderEntry->SeqNum);
+ netdev_dbg(ieee->dev, "%s(): Delete seq_num %d!\n",
+ __func__, pRxReorderEntry->seq_num);
list_del_init(&pRxReorderEntry->list);
{
int i = 0;
diff --git a/drivers/staging/rtl8192e/rtllib.h b/drivers/staging/rtl8192e/rtllib.h
index 022851b7f1a9..d6615f787d53 100644
--- a/drivers/staging/rtl8192e/rtllib.h
+++ b/drivers/staging/rtl8192e/rtllib.h
@@ -93,21 +93,21 @@ static inline void *netdev_priv_rsl(struct net_device *dev)
#define SUPPORT_CKIP_PK 0x10
#define RT_RF_OFF_LEVL_HALT_NIC BIT(3)
#define RT_IN_PS_LEVEL(psc, _PS_FLAG) \
- ((psc->CurPsLevel & _PS_FLAG) ? true : false)
+ ((psc->cur_ps_level & _PS_FLAG) ? true : false)
#define RT_CLEAR_PS_LEVEL(psc, _PS_FLAG) \
- (psc->CurPsLevel &= (~(_PS_FLAG)))
+ (psc->cur_ps_level &= (~(_PS_FLAG)))
/* defined for skb cb field */
/* At most 28 byte */
struct cb_desc {
/* Tx Desc Related flags (8-9) */
- u8 bLastIniPkt:1;
- u8 bCmdOrInit:1;
+ u8 last_ini_pkt:1;
+ u8 cmd_or_init:1;
u8 tx_dis_rate_fallback:1;
u8 tx_use_drv_assinged_rate:1;
u8 hw_sec:1;
- u8 nStuckCount;
+ u8 stuck_count;
/* Tx Firmware Related flags (10-11)*/
u8 cts_enable:1;
@@ -153,20 +153,20 @@ struct cb_desc {
};
enum sw_chnl_cmd_id {
- CmdID_End,
- CmdID_SetTxPowerLevel,
- CmdID_BBRegWrite10,
- CmdID_WritePortUlong,
- CmdID_WritePortUshort,
- CmdID_WritePortUchar,
- CmdID_RF_WriteReg,
+ cmd_id_end,
+ cmd_id_set_tx_power_level,
+ cmd_id_bbreg_write10,
+ cmd_id_write_port_ulong,
+ cmd_id_write_port_ushort,
+ cmd_id_write_port_uchar,
+ cmd_id_rf_write_reg,
};
struct sw_chnl_cmd {
- enum sw_chnl_cmd_id CmdID;
- u32 Para1;
- u32 Para2;
- u32 msDelay;
+ enum sw_chnl_cmd_id cmd_id;
+ u32 para1;
+ u32 para2;
+ u32 ms_delay;
};
/*--------------------------Define -------------------------------------------*/
@@ -339,12 +339,12 @@ enum rt_op_mode {
#define FC_QOS_BIT BIT(7)
#define is_data_frame(pdu) (((pdu[0] & 0x0C) == 0x08) ? true : false)
-#define is_legacy_data_frame(pdu) (is_data_frame(pdu) && (!(pdu[0]&FC_QOS_BIT)))
+#define is_legacy_data_frame(pdu) (is_data_frame(pdu) && (!(pdu[0] & FC_QOS_BIT)))
#define is_qos_data_frame(pframe) \
- ((*(u16 *)pframe&(IEEE80211_STYPE_QOS_DATA|RTLLIB_FTYPE_DATA)) == \
- (IEEE80211_STYPE_QOS_DATA|RTLLIB_FTYPE_DATA))
-#define frame_order(pframe) (*(u16 *)pframe&IEEE80211_FCTL_ORDER)
-#define SN_LESS(a, b) (((a-b)&0x800) != 0)
+ ((*(u16 *)pframe & (IEEE80211_STYPE_QOS_DATA | RTLLIB_FTYPE_DATA)) == \
+ (IEEE80211_STYPE_QOS_DATA | RTLLIB_FTYPE_DATA))
+#define frame_order(pframe) (*(u16 *)pframe & IEEE80211_FCTL_ORDER)
+#define SN_LESS(a, b) (((a - b) & 0x800) != 0)
#define SN_EQUAL(a, b) (a == b)
#define MAX_DEV_ADDR_SIZE 8
@@ -414,24 +414,13 @@ enum _REG_PREAMBLE_MODE {
#define WLAN_GET_SEQ_FRAG(seq) ((seq) & RTLLIB_SCTL_FRAG)
#define WLAN_GET_SEQ_SEQ(seq) (((seq) & RTLLIB_SCTL_SEQ) >> 4)
-/* Authentication algorithms */
-#define WLAN_AUTH_OPEN 0
-#define WLAN_AUTH_SHARED_KEY 1
-#define WLAN_AUTH_LEAP 128
-
-#define WLAN_CAPABILITY_ESS (1<<0)
-#define WLAN_CAPABILITY_IBSS (1<<1)
-#define WLAN_CAPABILITY_PRIVACY (1<<4)
-#define WLAN_CAPABILITY_SHORT_PREAMBLE (1<<5)
-#define WLAN_CAPABILITY_SHORT_SLOT_TIME (1<<10)
-
-#define RTLLIB_STATMASK_SIGNAL (1<<0)
-#define RTLLIB_STATMASK_RSSI (1<<1)
-#define RTLLIB_STATMASK_NOISE (1<<2)
+#define RTLLIB_STATMASK_SIGNAL (1 << 0)
+#define RTLLIB_STATMASK_RSSI (1 << 1)
+#define RTLLIB_STATMASK_NOISE (1 << 2)
#define RTLLIB_STATMASK_WEMASK 0x7
-#define RTLLIB_CCK_MODULATION (1<<0)
-#define RTLLIB_OFDM_MODULATION (1<<1)
+#define RTLLIB_CCK_MODULATION (1 << 0)
+#define RTLLIB_OFDM_MODULATION (1 << 1)
#define RTLLIB_CCK_RATE_LEN 4
#define RTLLIB_CCK_RATE_1MB 0x02
@@ -475,27 +464,27 @@ struct rtllib_rx_stats {
u8 mask;
u16 len;
u16 Length;
- u8 SignalQuality;
+ u8 signal_quality;
s32 RecvSignalPower;
- u8 SignalStrength;
- u16 bHwError:1;
+ u8 signal_strength;
+ u16 hw_error:1;
u16 bCRC:1;
u16 bICV:1;
u16 decrypted:1;
u32 time_stamp_low;
u32 time_stamp_high;
- u8 RxDrvInfoSize;
- u8 RxBufShift;
+ u8 rx_drv_info_size;
+ u8 rx_buf_shift;
bool bIsAMPDU;
bool bFirstMPDU;
bool contain_htc;
u32 RxPWDBAll;
- u8 RxMIMOSignalStrength[4];
+ u8 RxMIMOSignalStrength[2];
s8 RxMIMOSignalQuality[2];
bool bPacketMatchBSSID;
bool bIsCCK;
- bool bPacketToSelf;
+ bool packet_to_self;
bool bPacketBeacon;
bool bToSelfBA;
};
@@ -518,11 +507,11 @@ struct rtllib_frag_entry {
struct rtllib_device;
-#define SEC_ACTIVE_KEY (1<<4)
-#define SEC_AUTH_MODE (1<<5)
-#define SEC_UNICAST_GROUP (1<<6)
-#define SEC_LEVEL (1<<7)
-#define SEC_ENABLED (1<<8)
+#define SEC_ACTIVE_KEY (1 << 4)
+#define SEC_AUTH_MODE (1 << 5)
+#define SEC_UNICAST_GROUP (1 << 6)
+#define SEC_LEVEL (1 << 7)
+#define SEC_ENABLED (1 << 8)
#define SEC_LEVEL_0 0 /* None */
#define SEC_LEVEL_1 1 /* WEP 40 and 104 bit */
@@ -707,17 +696,17 @@ union frameqos {
#define MAX_WPA_IE_LEN 64
#define MAX_WZC_IE_LEN 256
-#define NETWORK_EMPTY_ESSID (1<<0)
-#define NETWORK_HAS_OFDM (1<<1)
-#define NETWORK_HAS_CCK (1<<2)
+#define NETWORK_EMPTY_ESSID (1 << 0)
+#define NETWORK_HAS_OFDM (1 << 1)
+#define NETWORK_HAS_CCK (1 << 2)
/* QoS structure */
-#define NETWORK_HAS_QOS_PARAMETERS (1<<3)
-#define NETWORK_HAS_QOS_INFORMATION (1<<4)
+#define NETWORK_HAS_QOS_PARAMETERS (1 << 3)
+#define NETWORK_HAS_QOS_INFORMATION (1 << 4)
#define NETWORK_HAS_QOS_MASK (NETWORK_HAS_QOS_PARAMETERS | \
NETWORK_HAS_QOS_INFORMATION)
/* 802.11h */
-#define NETWORK_HAS_ERP_VALUE (1<<10)
+#define NETWORK_HAS_ERP_VALUE (1 << 10)
#define QOS_QUEUE_NUM 4
#define QOS_OUI_LEN 3
@@ -962,7 +951,7 @@ struct rtllib_network {
bool unknown_cap_exist;
bool berp_info_valid;
bool buseprotection;
- u8 SignalStrength;
+ u8 signal_strength;
u8 RSSI;
struct list_head list;
};
@@ -1007,8 +996,8 @@ enum rtl_link_state {
#define DEFAULT_MAX_SCAN_AGE (15 * HZ)
#define DEFAULT_FTS 2346
-#define CFG_RTLLIB_RESERVE_FCS (1<<0)
-#define CFG_RTLLIB_COMPUTE_FCS (1<<1)
+#define CFG_RTLLIB_RESERVE_FCS (1 << 0)
+#define CFG_RTLLIB_COMPUTE_FCS (1 << 1)
struct tx_pending {
int frag;
@@ -1026,7 +1015,7 @@ struct bandwidth_autoswitch {
#define REORDER_ENTRY_NUM 128
struct rx_reorder_entry {
struct list_head list;
- u16 SeqNum;
+ u16 seq_num;
struct rtllib_rxb *prxb;
};
@@ -1057,7 +1046,7 @@ struct rt_pwr_save_ctrl {
u8 lps_idle_count;
u8 lps_awake_intvl;
- u32 CurPsLevel;
+ u32 cur_ps_level;
};
#define RT_RF_CHANGE_SOURCE u32
@@ -1299,7 +1288,7 @@ struct rtllib_device {
u16 scan_watch_dog;
/* map of allowed channels. 0 is dummy */
- u8 active_channel_map[MAX_CHANNEL_NUMBER+1];
+ u8 active_channel_map[MAX_CHANNEL_NUMBER + 1];
int rate; /* current rate */
int basic_rate;
@@ -1471,9 +1460,9 @@ struct rtllib_device {
void (*set_wireless_mode)(struct net_device *dev, u8 wireless_mode);
bool (*get_half_nmode_support_by_aps_handler)(struct net_device *dev);
u8 (*rtllib_ap_sec_type)(struct rtllib_device *ieee);
- void (*init_gain_handler)(struct net_device *dev, u8 Operation);
- void (*ScanOperationBackupHandler)(struct net_device *dev,
- u8 Operation);
+ void (*init_gain_handler)(struct net_device *dev, u8 operation);
+ void (*scan_operation_backup_handler)(struct net_device *dev,
+ u8 operation);
void (*set_hw_reg_handler)(struct net_device *dev, u8 variable, u8 *val);
void (*allow_all_dest_addr_handler)(struct net_device *dev,
@@ -1497,32 +1486,32 @@ struct rtllib_device {
/* Uses the channel change callback directly
* instead of [start/stop] scan callbacks
*/
-#define IEEE_SOFTMAC_SCAN (1<<2)
+#define IEEE_SOFTMAC_SCAN (1 << 2)
/* Perform authentication and association handshake */
-#define IEEE_SOFTMAC_ASSOCIATE (1<<3)
+#define IEEE_SOFTMAC_ASSOCIATE (1 << 3)
/* Generate probe requests */
-#define IEEE_SOFTMAC_PROBERQ (1<<4)
+#define IEEE_SOFTMAC_PROBERQ (1 << 4)
/* Generate response to probe requests */
-#define IEEE_SOFTMAC_PROBERS (1<<5)
+#define IEEE_SOFTMAC_PROBERS (1 << 5)
/* The ieee802.11 stack will manage the netif queue
* wake/stop for the driver, taking care of 802.11
* fragmentation. See softmac.c for details.
*/
-#define IEEE_SOFTMAC_TX_QUEUE (1<<7)
+#define IEEE_SOFTMAC_TX_QUEUE (1 << 7)
/* Uses only the softmac_data_hard_start_xmit
* even for TX management frames.
*/
-#define IEEE_SOFTMAC_SINGLE_QUEUE (1<<8)
+#define IEEE_SOFTMAC_SINGLE_QUEUE (1 << 8)
/* Generate beacons. The stack will enqueue beacons
* to the card
*/
-#define IEEE_SOFTMAC_BEACONS (1<<6)
+#define IEEE_SOFTMAC_BEACONS (1 << 6)
static inline void *rtllib_priv(struct net_device *dev)
{
@@ -1737,21 +1726,21 @@ void ht_set_connect_bw_mode(struct rtllib_device *ieee,
void ht_update_default_setting(struct rtllib_device *ieee);
void ht_construct_capability_element(struct rtllib_device *ieee,
u8 *pos_ht_cap, u8 *len,
- u8 isEncrypt, bool bAssoc);
+ u8 is_encrypt, bool assoc);
void ht_construct_rt2rt_agg_element(struct rtllib_device *ieee,
u8 *posRT2RTAgg, u8 *len);
void ht_on_assoc_rsp(struct rtllib_device *ieee);
void ht_initialize_ht_info(struct rtllib_device *ieee);
void ht_initialize_bss_desc(struct bss_ht *bss_ht);
void ht_reset_self_and_save_peer_setting(struct rtllib_device *ieee,
- struct rtllib_network *pNetwork);
+ struct rtllib_network *network);
void HT_update_self_and_peer_setting(struct rtllib_device *ieee,
- struct rtllib_network *pNetwork);
+ struct rtllib_network *network);
u8 ht_get_highest_mcs_rate(struct rtllib_device *ieee, u8 *pMCSRateSet,
u8 *pMCSFilter);
extern u8 MCS_FILTER_ALL[];
extern u16 MCS_DATA_RATE[2][2][77];
-u8 ht_c_check(struct rtllib_device *ieee, u8 *pFrame);
+u8 ht_c_check(struct rtllib_device *ieee, u8 *frame);
void ht_reset_iot_setting(struct rt_hi_throughput *ht_info);
bool is_ht_half_nmode_aps(struct rtllib_device *ieee);
u16 tx_count_to_data_rate(struct rtllib_device *ieee, u8 nDataRate);
@@ -1768,7 +1757,7 @@ void rtllib_tx_ba_inact_timeout(struct timer_list *t);
void rtllib_rx_ba_inact_timeout(struct timer_list *t);
void rtllib_reset_ba_entry(struct ba_record *ba);
bool rtllib_get_ts(struct rtllib_device *ieee, struct ts_common_info **ppTS, u8 *addr,
- u8 TID, enum tr_select tx_rx_select, bool bAddNewTs);
+ u8 TID, enum tr_select tx_rx_select, bool add_new_ts);
void rtllib_ts_init(struct rtllib_device *ieee);
void rtllib_ts_start_add_ba_process(struct rtllib_device *ieee,
struct tx_ts_record *pTxTS);
diff --git a/drivers/staging/rtl8192e/rtllib_crypt_tkip.c b/drivers/staging/rtl8192e/rtllib_crypt_tkip.c
index 74dc8326c886..e544379bfa91 100644
--- a/drivers/staging/rtl8192e/rtllib_crypt_tkip.c
+++ b/drivers/staging/rtl8192e/rtllib_crypt_tkip.c
@@ -637,12 +637,6 @@ static int rtllib_tkip_get_key(void *key, int len, u8 *seq, void *priv)
if (seq) {
/* Return the sequence number of the last transmitted frame. */
- u16 iv16 = tkey->tx_iv16;
- u32 iv32 = tkey->tx_iv32;
-
- if (iv16 == 0)
- iv32--;
- iv16--;
seq[0] = tkey->tx_iv16;
seq[1] = tkey->tx_iv16 >> 8;
seq[2] = tkey->tx_iv32;
diff --git a/drivers/staging/rtl8192e/rtllib_rx.c b/drivers/staging/rtl8192e/rtllib_rx.c
index 84ca5d769b7e..8fe224a83dd6 100644
--- a/drivers/staging/rtl8192e/rtllib_rx.c
+++ b/drivers/staging/rtl8192e/rtllib_rx.c
@@ -403,26 +403,26 @@ drop:
}
static bool add_reorder_entry(struct rx_ts_record *ts,
- struct rx_reorder_entry *pReorderEntry)
+ struct rx_reorder_entry *reorder_entry)
{
struct list_head *list = &ts->rx_pending_pkt_list;
while (list->next != &ts->rx_pending_pkt_list) {
- if (SN_LESS(pReorderEntry->SeqNum, ((struct rx_reorder_entry *)
+ if (SN_LESS(reorder_entry->seq_num, ((struct rx_reorder_entry *)
list_entry(list->next, struct rx_reorder_entry,
- list))->SeqNum))
+ list))->seq_num))
list = list->next;
- else if (SN_EQUAL(pReorderEntry->SeqNum,
+ else if (SN_EQUAL(reorder_entry->seq_num,
((struct rx_reorder_entry *)list_entry(list->next,
- struct rx_reorder_entry, list))->SeqNum))
+ struct rx_reorder_entry, list))->seq_num))
return false;
else
break;
}
- pReorderEntry->list.next = list->next;
- pReorderEntry->list.next->prev = &pReorderEntry->list;
- pReorderEntry->list.prev = list;
- list->next = &pReorderEntry->list;
+ reorder_entry->list.next = list->next;
+ reorder_entry->list.next->prev = &reorder_entry->list;
+ reorder_entry->list.prev = list;
+ list->next = &reorder_entry->list;
return true;
}
@@ -504,8 +504,8 @@ void rtllib_flush_rx_ts_pending_pkts(struct rtllib_device *ieee,
pRxReorderEntry = (struct rx_reorder_entry *)
list_entry(ts->rx_pending_pkt_list.prev,
struct rx_reorder_entry, list);
- netdev_dbg(ieee->dev, "%s(): Indicate SeqNum %d!\n", __func__,
- pRxReorderEntry->SeqNum);
+ netdev_dbg(ieee->dev, "%s(): Indicate seq_num %d!\n", __func__,
+ pRxReorderEntry->seq_num);
list_del_init(&pRxReorderEntry->list);
ieee->rfd_array[rfd_cnt] = pRxReorderEntry->prxb;
@@ -521,10 +521,10 @@ void rtllib_flush_rx_ts_pending_pkts(struct rtllib_device *ieee,
static void rx_reorder_indicate_packet(struct rtllib_device *ieee,
struct rtllib_rxb *prxb,
- struct rx_ts_record *ts, u16 SeqNum)
+ struct rx_ts_record *ts, u16 seq_num)
{
struct rt_hi_throughput *ht_info = ieee->ht_info;
- struct rx_reorder_entry *pReorderEntry = NULL;
+ struct rx_reorder_entry *reorder_entry = NULL;
u8 win_size = ht_info->rx_reorder_win_size;
u16 win_end = 0;
u8 index = 0;
@@ -533,20 +533,20 @@ static void rx_reorder_indicate_packet(struct rtllib_device *ieee,
netdev_dbg(ieee->dev,
"%s(): Seq is %d, ts->rx_indicate_seq is %d, win_size is %d\n",
- __func__, SeqNum, ts->rx_indicate_seq, win_size);
+ __func__, seq_num, ts->rx_indicate_seq, win_size);
spin_lock_irqsave(&(ieee->reorder_spinlock), flags);
win_end = (ts->rx_indicate_seq + win_size - 1) % 4096;
/* Rx Reorder initialize condition.*/
if (ts->rx_indicate_seq == 0xffff)
- ts->rx_indicate_seq = SeqNum;
+ ts->rx_indicate_seq = seq_num;
- /* Drop out the packet which SeqNum is smaller than WinStart */
- if (SN_LESS(SeqNum, ts->rx_indicate_seq)) {
+ /* Drop out the packet which seq_num is smaller than WinStart */
+ if (SN_LESS(seq_num, ts->rx_indicate_seq)) {
netdev_dbg(ieee->dev,
"Packet Drop! IndicateSeq: %d, NewSeq: %d\n",
- ts->rx_indicate_seq, SeqNum);
+ ts->rx_indicate_seq, seq_num);
ht_info->rx_reorder_drop_counter++;
{
int i;
@@ -561,62 +561,62 @@ static void rx_reorder_indicate_packet(struct rtllib_device *ieee,
}
/* Sliding window manipulation. Conditions includes:
- * 1. Incoming SeqNum is equal to WinStart =>Window shift 1
- * 2. Incoming SeqNum is larger than the win_end => Window shift N
+ * 1. Incoming seq_num is equal to WinStart =>Window shift 1
+ * 2. Incoming seq_num is larger than the win_end => Window shift N
*/
- if (SN_EQUAL(SeqNum, ts->rx_indicate_seq)) {
+ if (SN_EQUAL(seq_num, ts->rx_indicate_seq)) {
ts->rx_indicate_seq = (ts->rx_indicate_seq + 1) % 4096;
match_win_start = true;
- } else if (SN_LESS(win_end, SeqNum)) {
- if (SeqNum >= (win_size - 1))
- ts->rx_indicate_seq = SeqNum + 1 - win_size;
+ } else if (SN_LESS(win_end, seq_num)) {
+ if (seq_num >= (win_size - 1))
+ ts->rx_indicate_seq = seq_num + 1 - win_size;
else
ts->rx_indicate_seq = 4095 -
- (win_size - (SeqNum + 1)) + 1;
+ (win_size - (seq_num + 1)) + 1;
netdev_dbg(ieee->dev,
"Window Shift! IndicateSeq: %d, NewSeq: %d\n",
- ts->rx_indicate_seq, SeqNum);
+ ts->rx_indicate_seq, seq_num);
}
/* Indication process.
* After Packet dropping and Sliding Window shifting as above, we can
- * now just indicate the packets with the SeqNum smaller than latest
+ * now just indicate the packets with the seq_num smaller than latest
* WinStart and struct buffer other packets.
*
* For Rx Reorder condition:
- * 1. All packets with SeqNum smaller than WinStart => Indicate
- * 2. All packets with SeqNum larger than or equal to
+ * 1. All packets with seq_num smaller than WinStart => Indicate
+ * 2. All packets with seq_num larger than or equal to
* WinStart => Buffer it.
*/
if (match_win_start) {
/* Current packet is going to be indicated.*/
netdev_dbg(ieee->dev,
"Packets indication! IndicateSeq: %d, NewSeq: %d\n",
- ts->rx_indicate_seq, SeqNum);
+ ts->rx_indicate_seq, seq_num);
ieee->prxb_indicate_array[0] = prxb;
index = 1;
} else {
/* Current packet is going to be inserted into pending list.*/
if (!list_empty(&ieee->RxReorder_Unused_List)) {
- pReorderEntry = (struct rx_reorder_entry *)
+ reorder_entry = (struct rx_reorder_entry *)
list_entry(ieee->RxReorder_Unused_List.next,
struct rx_reorder_entry, list);
- list_del_init(&pReorderEntry->list);
+ list_del_init(&reorder_entry->list);
/* Make a reorder entry and insert
* into a the packet list.
*/
- pReorderEntry->SeqNum = SeqNum;
- pReorderEntry->prxb = prxb;
+ reorder_entry->seq_num = seq_num;
+ reorder_entry->prxb = prxb;
- if (!add_reorder_entry(ts, pReorderEntry)) {
+ if (!add_reorder_entry(ts, reorder_entry)) {
int i;
netdev_dbg(ieee->dev,
"%s(): Duplicate packet is dropped. IndicateSeq: %d, NewSeq: %d\n",
__func__, ts->rx_indicate_seq,
- SeqNum);
- list_add_tail(&pReorderEntry->list,
+ seq_num);
+ list_add_tail(&reorder_entry->list,
&ieee->RxReorder_Unused_List);
for (i = 0; i < prxb->nr_subframes; i++)
@@ -626,7 +626,7 @@ static void rx_reorder_indicate_packet(struct rtllib_device *ieee,
} else {
netdev_dbg(ieee->dev,
"Pkt insert into struct buffer. IndicateSeq: %d, NewSeq: %d\n",
- ts->rx_indicate_seq, SeqNum);
+ ts->rx_indicate_seq, seq_num);
}
} else {
/* Packets are dropped if there are not enough reorder
@@ -653,12 +653,12 @@ static void rx_reorder_indicate_packet(struct rtllib_device *ieee,
netdev_dbg(ieee->dev, "%s(): start RREORDER indicate\n",
__func__);
- pReorderEntry = (struct rx_reorder_entry *)
+ reorder_entry = (struct rx_reorder_entry *)
list_entry(ts->rx_pending_pkt_list.prev,
struct rx_reorder_entry,
list);
- if (SN_LESS(pReorderEntry->SeqNum, ts->rx_indicate_seq) ||
- SN_EQUAL(pReorderEntry->SeqNum, ts->rx_indicate_seq)) {
+ if (SN_LESS(reorder_entry->seq_num, ts->rx_indicate_seq) ||
+ SN_EQUAL(reorder_entry->seq_num, ts->rx_indicate_seq)) {
/* This protect struct buffer from overflow. */
if (index >= REORDER_WIN_SIZE) {
netdev_err(ieee->dev,
@@ -668,18 +668,18 @@ static void rx_reorder_indicate_packet(struct rtllib_device *ieee,
break;
}
- list_del_init(&pReorderEntry->list);
+ list_del_init(&reorder_entry->list);
- if (SN_EQUAL(pReorderEntry->SeqNum, ts->rx_indicate_seq))
+ if (SN_EQUAL(reorder_entry->seq_num, ts->rx_indicate_seq))
ts->rx_indicate_seq = (ts->rx_indicate_seq + 1) %
4096;
- ieee->prxb_indicate_array[index] = pReorderEntry->prxb;
- netdev_dbg(ieee->dev, "%s(): Indicate SeqNum %d!\n",
- __func__, pReorderEntry->SeqNum);
+ ieee->prxb_indicate_array[index] = reorder_entry->prxb;
+ netdev_dbg(ieee->dev, "%s(): Indicate seq_num %d!\n",
+ __func__, reorder_entry->seq_num);
index++;
- list_add_tail(&pReorderEntry->list,
+ list_add_tail(&reorder_entry->list,
&ieee->RxReorder_Unused_List);
} else {
pkt_in_buf = true;
@@ -729,12 +729,12 @@ static u8 parse_subframe(struct rtllib_device *ieee, struct sk_buff *skb,
u16 llc_offset = sizeof(struct ieee80211_hdr_3addr);
bool is_aggregate_frame = false;
- u16 nSubframe_Length;
+ u16 subframe_len;
u8 pad_len = 0;
- u16 SeqNum = 0;
+ u16 seq_num = 0;
struct sk_buff *sub_skb;
/* just for debug purpose */
- SeqNum = WLAN_GET_SEQ_SEQ(le16_to_cpu(hdr->seq_ctrl));
+ seq_num = WLAN_GET_SEQ_SEQ(le16_to_cpu(hdr->seq_ctrl));
if ((RTLLIB_QOS_HAS_SEQ(fc)) &&
(((union frameqos *)(skb->data + RTLLIB_3ADDR_LEN))->field.reserved))
is_aggregate_frame = true;
@@ -781,23 +781,23 @@ static u8 parse_subframe(struct rtllib_device *ieee, struct sk_buff *skb,
memcpy(rxb->dst, dst, ETH_ALEN);
while (skb->len > ETHERNET_HEADER_SIZE) {
/* Offset 12 denote 2 mac address */
- nSubframe_Length = *((u16 *)(skb->data + 12));
- nSubframe_Length = (nSubframe_Length >> 8) +
- (nSubframe_Length << 8);
+ subframe_len = *((u16 *)(skb->data + 12));
+ subframe_len = (subframe_len >> 8) +
+ (subframe_len << 8);
- if (skb->len < (ETHERNET_HEADER_SIZE + nSubframe_Length)) {
+ if (skb->len < (ETHERNET_HEADER_SIZE + subframe_len)) {
netdev_info(ieee->dev,
"%s: A-MSDU parse error!! pRfd->nTotalSubframe : %d\n",
__func__, rxb->nr_subframes);
netdev_info(ieee->dev,
"%s: A-MSDU parse error!! Subframe Length: %d\n",
- __func__, nSubframe_Length);
+ __func__, subframe_len);
netdev_info(ieee->dev,
- "nRemain_Length is %d and nSubframe_Length is : %d\n",
- skb->len, nSubframe_Length);
+ "nRemain_Length is %d and subframe_len is : %d\n",
+ skb->len, subframe_len);
netdev_info(ieee->dev,
- "The Packet SeqNum is %d\n",
- SeqNum);
+ "The Packet seq_num is %d\n",
+ seq_num);
return 0;
}
@@ -813,11 +813,11 @@ static u8 parse_subframe(struct rtllib_device *ieee, struct sk_buff *skb,
*/
/* Allocate new skb for releasing to upper layer */
- sub_skb = dev_alloc_skb(nSubframe_Length + 12);
+ sub_skb = dev_alloc_skb(subframe_len + 12);
if (!sub_skb)
return 0;
skb_reserve(sub_skb, 12);
- skb_put_data(sub_skb, skb->data, nSubframe_Length);
+ skb_put_data(sub_skb, skb->data, subframe_len);
sub_skb->dev = ieee->dev;
rxb->subframes[rxb->nr_subframes++] = sub_skb;
@@ -826,10 +826,10 @@ static u8 parse_subframe(struct rtllib_device *ieee, struct sk_buff *skb,
"ParseSubframe(): Too many Subframes! Packets dropped!\n");
break;
}
- skb_pull(skb, nSubframe_Length);
+ skb_pull(skb, subframe_len);
if (skb->len != 0) {
- pad_len = 4 - ((nSubframe_Length +
+ pad_len = 4 - ((subframe_len +
ETHERNET_HEADER_SIZE) % 4);
if (pad_len == 4)
pad_len = 0;
@@ -1227,7 +1227,7 @@ static int rtllib_rx_infra_adhoc(struct rtllib_device *ieee, struct sk_buff *skb
struct lib80211_crypt_data *crypt = NULL;
struct rtllib_rxb *rxb = NULL;
struct rx_ts_record *ts = NULL;
- u16 fc, sc, SeqNum = 0;
+ u16 fc, sc, seq_num = 0;
u8 type, stype, multicast = 0, unicast = 0, nr_subframes = 0, TID = 0;
u8 dst[ETH_ALEN];
u8 src[ETH_ALEN];
@@ -1321,7 +1321,7 @@ static int rtllib_rx_infra_adhoc(struct rtllib_device *ieee, struct sk_buff *skb
if (ieee->current_network.qos_data.active && is_qos_data_frame(skb->data)
&& !is_multicast_ether_addr(hdr->addr1)) {
TID = frame_qos_tid(skb->data);
- SeqNum = WLAN_GET_SEQ_SEQ(sc);
+ seq_num = WLAN_GET_SEQ_SEQ(sc);
rtllib_get_ts(ieee, (struct ts_common_info **)&ts, hdr->addr2, TID,
RX_DIR, true);
if (TID != 0 && TID != 3)
@@ -1362,7 +1362,7 @@ static int rtllib_rx_infra_adhoc(struct rtllib_device *ieee, struct sk_buff *skb
if (!ieee->ht_info->cur_rx_reorder_enable || !ts)
rtllib_rx_indicate_pkt_legacy(ieee, rx_stats, rxb, dst, src);
else
- rx_reorder_indicate_packet(ieee, rxb, ts, SeqNum);
+ rx_reorder_indicate_packet(ieee, rxb, ts, seq_num);
dev_kfree_skb(skb);
@@ -2177,8 +2177,8 @@ static inline int rtllib_network_init(
network->marvell_cap_exist = false;
network->airgo_cap_exist = false;
network->turbo_enable = 0;
- network->SignalStrength = stats->SignalStrength;
- network->RSSI = stats->SignalStrength;
+ network->signal_strength = stats->signal_strength;
+ network->RSSI = stats->signal_strength;
network->country_ie_len = 0;
memset(network->country_ie_buf, 0, MAX_IE_LEN);
ht_initialize_bss_desc(&network->bssht);
@@ -2215,7 +2215,7 @@ static inline int rtllib_network_init(
}
if (rtllib_is_empty_essid(network->ssid, network->ssid_len))
network->flags |= NETWORK_EMPTY_ESSID;
- stats->signal = 30 + (stats->SignalStrength * 70) / 100;
+ stats->signal = 30 + (stats->signal_strength * 70) / 100;
stats->noise = rtllib_translate_todbm((u8)(100 - stats->signal)) - 25;
memcpy(&network->stats, stats, sizeof(network->stats));
@@ -2334,7 +2334,7 @@ static inline void update_network(struct rtllib_device *ieee,
src->wmm_param[3].ac_aci_acm_aifsn)
memcpy(dst->wmm_param, src->wmm_param, WME_AC_PRAM_LEN);
- dst->SignalStrength = src->SignalStrength;
+ dst->signal_strength = src->signal_strength;
dst->RSSI = src->RSSI;
dst->turbo_enable = src->turbo_enable;
diff --git a/drivers/staging/rtl8192e/rtllib_softmac_wx.c b/drivers/staging/rtl8192e/rtllib_softmac_wx.c
index 11542aea4a20..c59686d68a33 100644
--- a/drivers/staging/rtl8192e/rtllib_softmac_wx.c
+++ b/drivers/staging/rtl8192e/rtllib_softmac_wx.c
@@ -314,7 +314,7 @@ void rtllib_wx_sync_scan_wq(void *data)
/* wait for ps packet to be kicked out successfully */
msleep(50);
- ieee->ScanOperationBackupHandler(ieee->dev, SCAN_OPT_BACKUP);
+ ieee->scan_operation_backup_handler(ieee->dev, SCAN_OPT_BACKUP);
if (ieee->ht_info->current_ht_support && ieee->ht_info->enable_ht &&
ieee->ht_info->cur_bw_40mhz) {
@@ -339,7 +339,7 @@ void rtllib_wx_sync_scan_wq(void *data)
ieee->set_chan(ieee->dev, chan);
}
- ieee->ScanOperationBackupHandler(ieee->dev, SCAN_OPT_RESTORE);
+ ieee->scan_operation_backup_handler(ieee->dev, SCAN_OPT_RESTORE);
ieee->link_state = MAC80211_LINKED;
ieee->link_change(ieee->dev);
diff --git a/drivers/staging/rtl8712/rtl8712_recv.c b/drivers/staging/rtl8712/rtl8712_recv.c
index 1fabc5137a4c..ab344d676bb9 100644
--- a/drivers/staging/rtl8712/rtl8712_recv.c
+++ b/drivers/staging/rtl8712/rtl8712_recv.c
@@ -136,10 +136,6 @@ void r8712_free_recvframe(union recv_frame *precvframe,
static void update_recvframe_attrib_from_recvstat(struct rx_pkt_attrib *pattrib,
struct recv_stat *prxstat)
{
- u16 drvinfo_sz;
-
- drvinfo_sz = (le32_to_cpu(prxstat->rxdw0) & 0x000f0000) >> 16;
- drvinfo_sz <<= 3;
/*TODO:
* Offset 0
*/
diff --git a/drivers/staging/rtl8712/rtl871x_cmd.c b/drivers/staging/rtl8712/rtl871x_cmd.c
index bbd4a13c7bb9..ffeb91dd28c4 100644
--- a/drivers/staging/rtl8712/rtl871x_cmd.c
+++ b/drivers/staging/rtl8712/rtl871x_cmd.c
@@ -528,8 +528,9 @@ void r8712_setstakey_cmd(struct _adapter *padapter, u8 *psta, u8 unicast_key)
if (unicast_key)
memcpy(&psetstakey_para->key, &sta->x_UncstKey, 16);
else
- memcpy(&psetstakey_para->key, &psecuritypriv->XGrpKey[psecuritypriv->XGrpKeyid - 1].
- skey, 16);
+ memcpy(&psetstakey_para->key,
+ &psecuritypriv->XGrpKey[psecuritypriv->XGrpKeyid - 1].skey,
+ 16);
r8712_enqueue_cmd(pcmdpriv, ph2c);
}
diff --git a/drivers/staging/rtl8712/rtl871x_cmd.h b/drivers/staging/rtl8712/rtl871x_cmd.h
index 2613b3c2acfc..268844af57f0 100644
--- a/drivers/staging/rtl8712/rtl871x_cmd.h
+++ b/drivers/staging/rtl8712/rtl871x_cmd.h
@@ -736,7 +736,7 @@ void r8712_getbbrfreg_cmdrsp_callback(struct _adapter *padapter, struct cmd_obj
void r8712_readtssi_cmdrsp_callback(struct _adapter *padapter, struct cmd_obj *pcmd);
void r8712_setstaKey_cmdrsp_callback(struct _adapter *padapter, struct cmd_obj *pcmd);
void r8712_setassocsta_cmdrsp_callback(struct _adapter *padapter, struct cmd_obj *pcmd);
-void r8712_disconnectCtrlEx_cmd(struct _adapter *adapter, u32 enableDrvCtrl, u32 tryPktCnt,
+void r8712_disconnectCtrlEx_cmd(struct _adapter *adapter, u32 enableDrvCtrl, u32 tryPktCnt,
u32 tryPktInterval, u32 firstStageTO);
struct _cmd_callback {
diff --git a/drivers/staging/rtl8712/rtl871x_io.c b/drivers/staging/rtl8712/rtl871x_io.c
index 6789a4c98564..20e080e284dd 100644
--- a/drivers/staging/rtl8712/rtl871x_io.c
+++ b/drivers/staging/rtl8712/rtl871x_io.c
@@ -48,8 +48,8 @@ static uint _init_intf_hdl(struct _adapter *padapter,
set_intf_funs = &(r8712_usb_set_intf_funs);
set_intf_ops = &r8712_usb_set_intf_ops;
init_intf_priv = &r8712_usb_init_intf_priv;
- pintf_priv = pintf_hdl->pintfpriv = kmalloc(sizeof(struct intf_priv),
- GFP_ATOMIC);
+ pintf_priv = kmalloc(sizeof(*pintf_priv), GFP_ATOMIC);
+ pintf_hdl->pintfpriv = pintf_priv;
if (!pintf_priv)
goto _init_intf_hdl_fail;
pintf_hdl->adapter = (u8 *)padapter;
diff --git a/drivers/staging/rtl8712/usb_ops_linux.c b/drivers/staging/rtl8712/usb_ops_linux.c
index 0a3451cdc8a1..4a34824830e3 100644
--- a/drivers/staging/rtl8712/usb_ops_linux.c
+++ b/drivers/staging/rtl8712/usb_ops_linux.c
@@ -221,7 +221,7 @@ static void r8712_usb_read_port_complete(struct urb *purb)
fallthrough;
case -EPROTO:
r8712_read_port(padapter, precvpriv->ff_hwaddr, 0,
- (unsigned char *)precvbuf);
+ (unsigned char *)precvbuf);
break;
case -EINPROGRESS:
netdev_err(padapter->pnetdev, "ERROR: URB IS IN PROGRESS!\n");
diff --git a/drivers/staging/rtl8723bs/Kconfig b/drivers/staging/rtl8723bs/Kconfig
index f23e29b679fb..8d48c61961a6 100644
--- a/drivers/staging/rtl8723bs/Kconfig
+++ b/drivers/staging/rtl8723bs/Kconfig
@@ -3,7 +3,6 @@ config RTL8723BS
tristate "Realtek RTL8723BS SDIO Wireless LAN NIC driver"
depends on WLAN && MMC && CFG80211
depends on m
- select CFG80211_WEXT
select CRYPTO
select CRYPTO_LIB_ARC4
help
diff --git a/drivers/staging/rtl8723bs/Makefile b/drivers/staging/rtl8723bs/Makefile
index 7f5067e89295..ba200ee669f3 100644
--- a/drivers/staging/rtl8723bs/Makefile
+++ b/drivers/staging/rtl8723bs/Makefile
@@ -3,7 +3,6 @@ r8723bs-y = \
core/rtw_ap.o \
core/rtw_btcoex.o \
core/rtw_cmd.o \
- core/rtw_debug.o \
core/rtw_efuse.o \
core/rtw_io.o \
core/rtw_ioctl_set.o \
@@ -12,7 +11,6 @@ r8723bs-y = \
core/rtw_mlme_ext.o \
core/rtw_pwrctrl.o \
core/rtw_recv.o \
- core/rtw_rf.o \
core/rtw_security.o \
core/rtw_sta_mgt.o \
core/rtw_wlan_util.o \
diff --git a/drivers/staging/rtl8723bs/core/rtw_ap.c b/drivers/staging/rtl8723bs/core/rtw_ap.c
index e4063713fecc..e55b4f7e0aef 100644
--- a/drivers/staging/rtl8723bs/core/rtw_ap.c
+++ b/drivers/staging/rtl8723bs/core/rtw_ap.c
@@ -6,7 +6,6 @@
******************************************************************************/
#include <drv_types.h>
-#include <rtw_debug.h>
#include <asm/unaligned.h>
void init_mlme_ap_info(struct adapter *padapter)
@@ -277,7 +276,7 @@ void expire_timeout_chk(struct adapter *padapter)
/* switch to correct channel of current network before issue keep-alive frames */
if (rtw_get_oper_ch(padapter) != pmlmeext->cur_channel) {
backup_oper_channel = rtw_get_oper_ch(padapter);
- SelectChannel(padapter, pmlmeext->cur_channel);
+ r8723bs_select_channel(padapter, pmlmeext->cur_channel);
}
/* issue null data to check sta alive*/
@@ -315,7 +314,7 @@ void expire_timeout_chk(struct adapter *padapter)
}
if (backup_oper_channel > 0) /* back to the original operation channel */
- SelectChannel(padapter, backup_oper_channel);
+ r8723bs_select_channel(padapter, backup_oper_channel);
}
associated_clients_update(padapter, updated);
diff --git a/drivers/staging/rtl8723bs/core/rtw_btcoex.c b/drivers/staging/rtl8723bs/core/rtw_btcoex.c
index 62cbf84b079a..d54095f50113 100644
--- a/drivers/staging/rtl8723bs/core/rtw_btcoex.c
+++ b/drivers/staging/rtl8723bs/core/rtw_btcoex.c
@@ -5,7 +5,6 @@
*
******************************************************************************/
#include <drv_types.h>
-#include <rtw_debug.h>
#include <rtw_btcoex.h>
#include <hal_btcoex.h>
diff --git a/drivers/staging/rtl8723bs/core/rtw_cmd.c b/drivers/staging/rtl8723bs/core/rtw_cmd.c
index d3f10a3cf972..84ce7307d8f3 100644
--- a/drivers/staging/rtl8723bs/core/rtw_cmd.c
+++ b/drivers/staging/rtl8723bs/core/rtw_cmd.c
@@ -5,7 +5,6 @@
*
******************************************************************************/
#include <drv_types.h>
-#include <rtw_debug.h>
#include <hal_btcoex.h>
#include <linux/jiffies.h>
@@ -1884,9 +1883,6 @@ void rtw_createbss_cmd_callback(struct adapter *padapter, struct cmd_obj *pcmd)
/* copy pdev_network information to pmlmepriv->cur_network */
memcpy(&tgt_network->network, pnetwork, (get_wlan_bssid_ex_sz(pnetwork)));
- /* reset ds_config */
- /* tgt_network->network.configuration.ds_config = (u32)rtw_ch2freq(pnetwork->configuration.ds_config); */
-
_clr_fwstate_(pmlmepriv, _FW_UNDER_LINKING);
spin_unlock_bh(&pmlmepriv->scanned_queue.lock);
diff --git a/drivers/staging/rtl8723bs/core/rtw_debug.c b/drivers/staging/rtl8723bs/core/rtw_debug.c
deleted file mode 100644
index 5354fdd11c9b..000000000000
--- a/drivers/staging/rtl8723bs/core/rtw_debug.c
+++ /dev/null
@@ -1,68 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/******************************************************************************
- *
- * Copyright(c) 2007 - 2012 Realtek Corporation. All rights reserved.
- *
- ******************************************************************************/
-
-#include <drv_types.h>
-#include <rtw_debug.h>
-#include <hal_btcoex.h>
-
-#include <rtw_version.h>
-
-static void dump_4_regs(struct adapter *adapter, int offset)
-{
- u32 reg[4];
- int i;
-
- for (i = 0; i < 4; i++)
- reg[i] = rtw_read32(adapter, offset + i);
-
- netdev_dbg(adapter->pnetdev, "0x%03x 0x%08x 0x%08x 0x%08x 0x%08x\n",
- i, reg[0], reg[1], reg[2], reg[3]);
-}
-
-void mac_reg_dump(struct adapter *adapter)
-{
- int i;
-
- netdev_dbg(adapter->pnetdev, "======= MAC REG =======\n");
-
- for (i = 0x0; i < 0x800; i += 4)
- dump_4_regs(adapter, i);
-}
-
-void bb_reg_dump(struct adapter *adapter)
-{
- int i;
-
- netdev_dbg(adapter->pnetdev, "======= BB REG =======\n");
-
- for (i = 0x800; i < 0x1000 ; i += 4)
- dump_4_regs(adapter, i);
-}
-
-static void dump_4_rf_regs(struct adapter *adapter, int path, int offset)
-{
- u8 reg[4];
- int i;
-
- for (i = 0; i < 4; i++)
- reg[i] = rtw_hal_read_rfreg(adapter, path, offset + i,
- 0xffffffff);
-
- netdev_dbg(adapter->pnetdev, "0x%02x 0x%08x 0x%08x 0x%08x 0x%08x\n",
- i, reg[0], reg[1], reg[2], reg[3]);
-}
-
-void rf_reg_dump(struct adapter *adapter)
-{
- int i, path = 0;
-
- netdev_dbg(adapter->pnetdev, "======= RF REG =======\n");
-
- netdev_dbg(adapter->pnetdev, "RF_Path(%x)\n", path);
- for (i = 0; i < 0x100; i++)
- dump_4_rf_regs(adapter, path, i);
-}
diff --git a/drivers/staging/rtl8723bs/core/rtw_efuse.c b/drivers/staging/rtl8723bs/core/rtw_efuse.c
index eb848f9bbf2c..8b671f8a7965 100644
--- a/drivers/staging/rtl8723bs/core/rtw_efuse.c
+++ b/drivers/staging/rtl8723bs/core/rtw_efuse.c
@@ -5,7 +5,6 @@
*
******************************************************************************/
#include <drv_types.h>
-#include <rtw_debug.h>
#include <hal_data.h>
#include <linux/jiffies.h>
@@ -38,7 +37,7 @@ Efuse_Read1ByteFromFakeContent(u16 Offset, u8 *Value)
if (fakeEfuseBank == 0)
*Value = fakeEfuseContent[Offset];
else
- *Value = fakeBTEfuseContent[fakeEfuseBank-1][Offset];
+ *Value = fakeBTEfuseContent[fakeEfuseBank - 1][Offset];
return true;
}
@@ -50,7 +49,7 @@ Efuse_Write1ByteToFakeContent(u16 Offset, u8 Value)
if (fakeEfuseBank == 0)
fakeEfuseContent[Offset] = Value;
else
- fakeBTEfuseContent[fakeEfuseBank-1][Offset] = Value;
+ fakeBTEfuseContent[fakeEfuseBank - 1][Offset] = Value;
return true;
}
@@ -206,21 +205,21 @@ u16 Address)
if (Address < contentLen) {/* E-fuse 512Byte */
/* Write E-fuse Register address bit0~7 */
temp = Address & 0xFF;
- rtw_write8(Adapter, EFUSE_CTRL+1, temp);
- Bytetemp = rtw_read8(Adapter, EFUSE_CTRL+2);
+ rtw_write8(Adapter, EFUSE_CTRL + 1, temp);
+ Bytetemp = rtw_read8(Adapter, EFUSE_CTRL + 2);
/* Write E-fuse Register address bit8~9 */
temp = ((Address >> 8) & 0x03) | (Bytetemp & 0xFC);
- rtw_write8(Adapter, EFUSE_CTRL+2, temp);
+ rtw_write8(Adapter, EFUSE_CTRL + 2, temp);
/* Write 0x30[31]= 0 */
- Bytetemp = rtw_read8(Adapter, EFUSE_CTRL+3);
+ Bytetemp = rtw_read8(Adapter, EFUSE_CTRL + 3);
temp = Bytetemp & 0x7F;
- rtw_write8(Adapter, EFUSE_CTRL+3, temp);
+ rtw_write8(Adapter, EFUSE_CTRL + 3, temp);
/* Wait Write-ready (0x30[31]= 1) */
- Bytetemp = rtw_read8(Adapter, EFUSE_CTRL+3);
+ Bytetemp = rtw_read8(Adapter, EFUSE_CTRL + 3);
while (!(Bytetemp & 0x80)) {
- Bytetemp = rtw_read8(Adapter, EFUSE_CTRL+3);
+ Bytetemp = rtw_read8(Adapter, EFUSE_CTRL + 3);
k++;
if (k == 1000)
break;
@@ -253,16 +252,16 @@ bool bPseudoTest)
/* -----------------e-fuse reg ctrl --------------------------------- */
/* address */
- rtw_write8(padapter, EFUSE_CTRL+1, (u8)(addr&0xff));
- rtw_write8(padapter, EFUSE_CTRL+2, ((u8)((addr>>8) & 0x03)) |
- (rtw_read8(padapter, EFUSE_CTRL+2)&0xFC));
+ rtw_write8(padapter, EFUSE_CTRL + 1, (u8)(addr & 0xff));
+ rtw_write8(padapter, EFUSE_CTRL + 2, ((u8)((addr >> 8) & 0x03)) |
+ (rtw_read8(padapter, EFUSE_CTRL + 2) & 0xFC));
/* rtw_write8(padapter, EFUSE_CTRL+3, 0x72); read cmd */
/* Write bit 32 0 */
- readbyte = rtw_read8(padapter, EFUSE_CTRL+3);
- rtw_write8(padapter, EFUSE_CTRL+3, (readbyte & 0x7f));
+ readbyte = rtw_read8(padapter, EFUSE_CTRL + 3);
+ rtw_write8(padapter, EFUSE_CTRL + 3, (readbyte & 0x7f));
- while (!(0x80 & rtw_read8(padapter, EFUSE_CTRL+3)) && (tmpidx < 1000)) {
+ while (!(0x80 & rtw_read8(padapter, EFUSE_CTRL + 3)) && (tmpidx < 1000)) {
mdelay(1);
tmpidx++;
}
@@ -282,31 +281,22 @@ u8 efuse_OneByteWrite(struct adapter *padapter, u16 addr, u8 data, bool bPseudoT
{
u8 tmpidx = 0;
u8 bResult = false;
- u32 efuseValue;
if (bPseudoTest)
return Efuse_Write1ByteToFakeContent(addr, data);
-
/* -----------------e-fuse reg ctrl --------------------------------- */
/* address */
-
- efuseValue = rtw_read32(padapter, EFUSE_CTRL);
- efuseValue |= (BIT21|BIT31);
- efuseValue &= ~(0x3FFFF);
- efuseValue |= ((addr<<8 | data) & 0x3FFFF);
-
-
/* <20130227, Kordan> 8192E MP chip A-cut had better not set 0x34[11] until B-Cut. */
/* <20130121, Kordan> For SMIC EFUSE specificatoin. */
/* 0x34[11]: SW force PGMEN input of efuse to high. (for the bank selected by 0x34[9:8]) */
/* PHY_SetMacReg(padapter, 0x34, BIT11, 1); */
rtw_write16(padapter, 0x34, rtw_read16(padapter, 0x34) | (BIT11));
- rtw_write32(padapter, EFUSE_CTRL, 0x90600000|((addr<<8 | data)));
+ rtw_write32(padapter, EFUSE_CTRL, 0x90600000 | ((addr << 8 | data)));
- while ((0x80 & rtw_read8(padapter, EFUSE_CTRL+3)) && (tmpidx < 100)) {
+ while ((0x80 & rtw_read8(padapter, EFUSE_CTRL + 3)) && (tmpidx < 100)) {
mdelay(1);
tmpidx++;
}
@@ -365,19 +355,19 @@ efuse_WordEnableDataRead(u8 word_en,
u8 *sourdata,
u8 *targetdata)
{
- if (!(word_en&BIT(0))) {
+ if (!(word_en & BIT(0))) {
targetdata[0] = sourdata[0];
targetdata[1] = sourdata[1];
}
- if (!(word_en&BIT(1))) {
+ if (!(word_en & BIT(1))) {
targetdata[2] = sourdata[2];
targetdata[3] = sourdata[3];
}
- if (!(word_en&BIT(2))) {
+ if (!(word_en & BIT(2))) {
targetdata[4] = sourdata[4];
targetdata[5] = sourdata[5];
}
- if (!(word_en&BIT(3))) {
+ if (!(word_en & BIT(3))) {
targetdata[6] = sourdata[6];
targetdata[7] = sourdata[7];
}
@@ -463,7 +453,7 @@ static void efuse_ShadowRead2Byte(struct adapter *padapter, u16 Offset, u16 *Val
struct eeprom_priv *pEEPROM = GET_EEPROM_EFUSE_PRIV(padapter);
*Value = pEEPROM->efuse_eeprom_data[Offset];
- *Value |= pEEPROM->efuse_eeprom_data[Offset+1]<<8;
+ *Value |= pEEPROM->efuse_eeprom_data[Offset + 1] << 8;
} /* EFUSE_ShadowRead2Byte */
@@ -473,9 +463,9 @@ static void efuse_ShadowRead4Byte(struct adapter *padapter, u16 Offset, u32 *Val
struct eeprom_priv *pEEPROM = GET_EEPROM_EFUSE_PRIV(padapter);
*Value = pEEPROM->efuse_eeprom_data[Offset];
- *Value |= pEEPROM->efuse_eeprom_data[Offset+1]<<8;
- *Value |= pEEPROM->efuse_eeprom_data[Offset+2]<<16;
- *Value |= pEEPROM->efuse_eeprom_data[Offset+3]<<24;
+ *Value |= pEEPROM->efuse_eeprom_data[Offset + 1] << 8;
+ *Value |= pEEPROM->efuse_eeprom_data[Offset + 2] << 16;
+ *Value |= pEEPROM->efuse_eeprom_data[Offset + 3] << 24;
} /* efuse_ShadowRead4Byte */
diff --git a/drivers/staging/rtl8723bs/core/rtw_ieee80211.c b/drivers/staging/rtl8723bs/core/rtw_ieee80211.c
index b89e88d6a82d..5a76069a8222 100644
--- a/drivers/staging/rtl8723bs/core/rtw_ieee80211.c
+++ b/drivers/staging/rtl8723bs/core/rtw_ieee80211.c
@@ -6,7 +6,6 @@
******************************************************************************/
#include <drv_types.h>
-#include <rtw_debug.h>
#include <linux/of.h>
#include <asm/unaligned.h>
@@ -55,7 +54,9 @@ static u8 WIFI_OFDMRATES[] = {
int rtw_get_bit_value_from_ieee_value(u8 val)
{
- unsigned char dot11_rate_table[] = {2, 4, 11, 22, 12, 18, 24, 36, 48, 72, 96, 108, 0}; /* last element must be zero!! */
+ static const unsigned char dot11_rate_table[] = {
+ 2, 4, 11, 22, 12, 18, 24, 36, 48, 72, 96, 108, 0
+ }; /* last element must be zero!! */
int i = 0;
while (dot11_rate_table[i] != 0) {
diff --git a/drivers/staging/rtl8723bs/core/rtw_io.c b/drivers/staging/rtl8723bs/core/rtw_io.c
index 4d3c30ec93b5..fcda9db6ebb5 100644
--- a/drivers/staging/rtl8723bs/core/rtw_io.c
+++ b/drivers/staging/rtl8723bs/core/rtw_io.c
@@ -26,7 +26,6 @@ jackson@realtek.com.tw
*/
#include <drv_types.h>
-#include <rtw_debug.h>
u8 rtw_read8(struct adapter *adapter, u32 addr)
{
diff --git a/drivers/staging/rtl8723bs/core/rtw_ioctl_set.c b/drivers/staging/rtl8723bs/core/rtw_ioctl_set.c
index 3b44f0dd5b0a..587a87fbffeb 100644
--- a/drivers/staging/rtl8723bs/core/rtw_ioctl_set.c
+++ b/drivers/staging/rtl8723bs/core/rtw_ioctl_set.c
@@ -6,7 +6,6 @@
******************************************************************************/
#include <drv_types.h>
-#include <rtw_debug.h>
u8 rtw_validate_bssid(u8 *bssid)
{
diff --git a/drivers/staging/rtl8723bs/core/rtw_mlme.c b/drivers/staging/rtl8723bs/core/rtw_mlme.c
index 8c487b7b7a40..cbdb134278d3 100644
--- a/drivers/staging/rtl8723bs/core/rtw_mlme.c
+++ b/drivers/staging/rtl8723bs/core/rtw_mlme.c
@@ -6,7 +6,6 @@
******************************************************************************/
#include <linux/etherdevice.h>
#include <drv_types.h>
-#include <rtw_debug.h>
#include <hal_btcoex.h>
#include <linux/jiffies.h>
diff --git a/drivers/staging/rtl8723bs/core/rtw_mlme_ext.c b/drivers/staging/rtl8723bs/core/rtw_mlme_ext.c
index 9ebf25a0ef9b..bbdd5fce28a1 100644
--- a/drivers/staging/rtl8723bs/core/rtw_mlme_ext.c
+++ b/drivers/staging/rtl8723bs/core/rtw_mlme_ext.c
@@ -5,7 +5,6 @@
*
******************************************************************************/
#include <drv_types.h>
-#include <rtw_debug.h>
#include <rtw_wifi_regd.h>
#include <hal_btcoex.h>
#include <linux/kernel.h>
@@ -628,7 +627,7 @@ unsigned int OnBeacon(struct adapter *padapter, union recv_frame *precv_frame)
ret = rtw_check_bcn_info(padapter, pframe, len);
if (!ret) {
netdev_dbg(padapter->pnetdev,
- "ap has changed, disconnect now\n ");
+ "ap has changed, disconnect now\n");
receive_disconnect(padapter,
pmlmeinfo->network.mac_address, 0);
return _SUCCESS;
@@ -3831,10 +3830,10 @@ void site_survey(struct adapter *padapter)
} else {
#ifdef DBG_FIXED_CHAN
if (pmlmeext->fixed_chan != 0xff)
- SelectChannel(padapter, pmlmeext->fixed_chan);
+ r8723bs_select_channel(padapter, pmlmeext->fixed_chan);
else
#endif
- SelectChannel(padapter, survey_channel);
+ r8723bs_select_channel(padapter, survey_channel);
}
if (ScanType == SCAN_ACTIVE) { /* obey the channel plan setting... */
diff --git a/drivers/staging/rtl8723bs/core/rtw_pwrctrl.c b/drivers/staging/rtl8723bs/core/rtw_pwrctrl.c
index e9763eab16f6..dbfcbac3d855 100644
--- a/drivers/staging/rtl8723bs/core/rtw_pwrctrl.c
+++ b/drivers/staging/rtl8723bs/core/rtw_pwrctrl.c
@@ -5,7 +5,6 @@
*
******************************************************************************/
#include <drv_types.h>
-#include <rtw_debug.h>
#include <hal_data.h>
#include <linux/jiffies.h>
@@ -285,14 +284,12 @@ void rtw_set_rpwm(struct adapter *padapter, u8 pslv)
if (rpwm & PS_ACK) {
unsigned long start_time;
u8 cpwm_now;
- u8 poll_cnt = 0;
start_time = jiffies;
/* polling cpwm */
do {
mdelay(1);
- poll_cnt++;
rtw_hal_get_hwreg(padapter, HW_VAR_CPWM, &cpwm_now);
if ((cpwm_orig ^ cpwm_now) & 0x80) {
pwrpriv->cpwm = PS_STATE_S4;
diff --git a/drivers/staging/rtl8723bs/core/rtw_recv.c b/drivers/staging/rtl8723bs/core/rtw_recv.c
index 0eadc23a7d54..b30f026789b6 100644
--- a/drivers/staging/rtl8723bs/core/rtw_recv.c
+++ b/drivers/staging/rtl8723bs/core/rtw_recv.c
@@ -5,7 +5,6 @@
*
******************************************************************************/
#include <drv_types.h>
-#include <rtw_debug.h>
#include <linux/jiffies.h>
#include <rtw_recv.h>
#include <net/cfg80211.h>
@@ -2027,12 +2026,9 @@ static int recv_func(struct adapter *padapter, union recv_frame *rframe)
/* check if need to handle uc_swdec_pending_queue*/
if (check_fwstate(mlmepriv, WIFI_STATION_STATE) && psecuritypriv->busetkipkey) {
union recv_frame *pending_frame;
- int cnt = 0;
- while ((pending_frame = rtw_alloc_recvframe(&padapter->recvpriv.uc_swdec_pending_queue))) {
- cnt++;
+ while ((pending_frame = rtw_alloc_recvframe(&padapter->recvpriv.uc_swdec_pending_queue)))
recv_func_posthandle(padapter, pending_frame);
- }
}
ret = recv_func_prehandle(padapter, rframe);
diff --git a/drivers/staging/rtl8723bs/core/rtw_rf.c b/drivers/staging/rtl8723bs/core/rtw_rf.c
deleted file mode 100644
index 4f120c894998..000000000000
--- a/drivers/staging/rtl8723bs/core/rtw_rf.c
+++ /dev/null
@@ -1,34 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/******************************************************************************
- *
- * Copyright(c) 2007 - 2011 Realtek Corporation. All rights reserved.
- *
- ******************************************************************************/
-
-#include <drv_types.h>
-#include <linux/kernel.h>
-
-static const u32 ch_freq_map[] = {
- 2412,
- 2417,
- 2422,
- 2427,
- 2432,
- 2437,
- 2442,
- 2447,
- 2452,
- 2457,
- 2462,
- 2467,
- 2472,
- 2484
-};
-
-u32 rtw_ch2freq(u32 channel)
-{
- if (channel == 0 || channel > ARRAY_SIZE(ch_freq_map))
- return 2412;
-
- return ch_freq_map[channel - 1];
-}
diff --git a/drivers/staging/rtl8723bs/core/rtw_security.c b/drivers/staging/rtl8723bs/core/rtw_security.c
index 7ecdaa2eeaf3..1e9eff01b1aa 100644
--- a/drivers/staging/rtl8723bs/core/rtw_security.c
+++ b/drivers/staging/rtl8723bs/core/rtw_security.c
@@ -6,7 +6,6 @@
******************************************************************************/
#include <linux/crc32.h>
#include <drv_types.h>
-#include <rtw_debug.h>
#include <crypto/aes.h>
static const char * const _security_type_str[] = {
diff --git a/drivers/staging/rtl8723bs/core/rtw_sta_mgt.c b/drivers/staging/rtl8723bs/core/rtw_sta_mgt.c
index 0145c4da5ac0..1b72f2196a1c 100644
--- a/drivers/staging/rtl8723bs/core/rtw_sta_mgt.c
+++ b/drivers/staging/rtl8723bs/core/rtw_sta_mgt.c
@@ -5,7 +5,6 @@
*
******************************************************************************/
#include <drv_types.h>
-#include <rtw_debug.h>
void _rtw_init_stainfo(struct sta_info *psta);
void _rtw_init_stainfo(struct sta_info *psta)
diff --git a/drivers/staging/rtl8723bs/core/rtw_wlan_util.c b/drivers/staging/rtl8723bs/core/rtw_wlan_util.c
index 7fac9ca3e9a0..f37fec1efaf9 100644
--- a/drivers/staging/rtl8723bs/core/rtw_wlan_util.c
+++ b/drivers/staging/rtl8723bs/core/rtw_wlan_util.c
@@ -6,7 +6,6 @@
******************************************************************************/
#include <drv_types.h>
-#include <rtw_debug.h>
#include <hal_com_h2c.h>
static unsigned char ARTHEROS_OUI1[] = {0x00, 0x03, 0x7f};
@@ -333,7 +332,7 @@ inline unsigned long rtw_get_on_cur_ch_time(struct adapter *adapter)
return 0;
}
-void SelectChannel(struct adapter *padapter, unsigned char channel)
+void r8723bs_select_channel(struct adapter *padapter, unsigned char channel)
{
if (mutex_lock_interruptible(&(adapter_to_dvobj(padapter)->setch_mutex)))
return;
diff --git a/drivers/staging/rtl8723bs/core/rtw_xmit.c b/drivers/staging/rtl8723bs/core/rtw_xmit.c
index b1965ec0181f..3e88f14e3bf7 100644
--- a/drivers/staging/rtl8723bs/core/rtw_xmit.c
+++ b/drivers/staging/rtl8723bs/core/rtw_xmit.c
@@ -5,7 +5,6 @@
*
******************************************************************************/
#include <drv_types.h>
-#include <rtw_debug.h>
static u8 P802_1H_OUI[P80211_OUI_LEN] = { 0x00, 0x00, 0xf8 };
static u8 RFC1042_OUI[P80211_OUI_LEN] = { 0x00, 0x00, 0x00 };
@@ -45,7 +44,7 @@ s32 _rtw_init_xmit_priv(struct xmit_priv *pxmitpriv, struct adapter *padapter)
init_completion(&pxmitpriv->terminate_xmitthread_comp);
/*
- * Please insert all the queue initializaiton using _rtw_init_queue below
+ * Please insert all the queue initialization using _rtw_init_queue below
*/
pxmitpriv->adapter = padapter;
diff --git a/drivers/staging/rtl8723bs/hal/HalPhyRf_8723B.c b/drivers/staging/rtl8723bs/hal/HalPhyRf_8723B.c
index 22e33b97800d..81149ab81904 100644
--- a/drivers/staging/rtl8723bs/hal/HalPhyRf_8723B.c
+++ b/drivers/staging/rtl8723bs/hal/HalPhyRf_8723B.c
@@ -6,7 +6,6 @@
******************************************************************************/
#include <drv_types.h>
-#include <rtw_debug.h>
#include "odm_precomp.h"
/* MACRO definition for pRFCalibrateInfo->TxIQC_8723B[0] */
diff --git a/drivers/staging/rtl8723bs/hal/HalPwrSeqCmd.c b/drivers/staging/rtl8723bs/hal/HalPwrSeqCmd.c
index 5f9e94a7e6ad..86404b5e6c52 100644
--- a/drivers/staging/rtl8723bs/hal/HalPwrSeqCmd.c
+++ b/drivers/staging/rtl8723bs/hal/HalPwrSeqCmd.c
@@ -21,7 +21,6 @@ Major Change History:
--*/
#include <drv_types.h>
-#include <rtw_debug.h>
#include <HalPwrSeqCmd.h>
diff --git a/drivers/staging/rtl8723bs/hal/hal_btcoex.c b/drivers/staging/rtl8723bs/hal/hal_btcoex.c
index e26b789b9cdd..b72cf520d576 100644
--- a/drivers/staging/rtl8723bs/hal/hal_btcoex.c
+++ b/drivers/staging/rtl8723bs/hal/hal_btcoex.c
@@ -6,7 +6,6 @@
******************************************************************************/
#include <hal_data.h>
-#include <rtw_debug.h>
#include <hal_btcoex.h>
#include <Mp_Precomp.h>
diff --git a/drivers/staging/rtl8723bs/hal/hal_com.c b/drivers/staging/rtl8723bs/hal/hal_com.c
index 852232102433..719dd116d807 100644
--- a/drivers/staging/rtl8723bs/hal/hal_com.c
+++ b/drivers/staging/rtl8723bs/hal/hal_com.c
@@ -7,7 +7,6 @@
#include <linux/kernel.h>
#include <drv_types.h>
-#include <rtw_debug.h>
#include "hal_com_h2c.h"
#include "odm_precomp.h"
diff --git a/drivers/staging/rtl8723bs/hal/hal_com_phycfg.c b/drivers/staging/rtl8723bs/hal/hal_com_phycfg.c
index 3e814a15e893..d5649e7d8f99 100644
--- a/drivers/staging/rtl8723bs/hal/hal_com_phycfg.c
+++ b/drivers/staging/rtl8723bs/hal/hal_com_phycfg.c
@@ -6,7 +6,6 @@
******************************************************************************/
#include <drv_types.h>
-#include <rtw_debug.h>
#include <hal_data.h>
#include <linux/kernel.h>
diff --git a/drivers/staging/rtl8723bs/hal/hal_intf.c b/drivers/staging/rtl8723bs/hal/hal_intf.c
index 7e3db8d3c910..0a3900548fd2 100644
--- a/drivers/staging/rtl8723bs/hal/hal_intf.c
+++ b/drivers/staging/rtl8723bs/hal/hal_intf.c
@@ -5,7 +5,6 @@
*
******************************************************************************/
#include <drv_types.h>
-#include <rtw_debug.h>
#include <hal_data.h>
void rtw_hal_chip_configure(struct adapter *padapter)
@@ -160,12 +159,6 @@ void rtw_hal_set_odm_var(struct adapter *padapter, enum hal_odm_variable eVariab
padapter->HalFunc.SetHalODMVarHandler(padapter, eVariable, pValue1, bSet);
}
-void rtw_hal_get_odm_var(struct adapter *padapter, enum hal_odm_variable eVariable, void *pValue1, void *pValue2)
-{
- if (padapter->HalFunc.GetHalODMVarHandler)
- padapter->HalFunc.GetHalODMVarHandler(padapter, eVariable, pValue1, pValue2);
-}
-
void rtw_hal_enable_interrupt(struct adapter *padapter)
{
if (padapter->HalFunc.enable_interrupt)
diff --git a/drivers/staging/rtl8723bs/hal/hal_sdio.c b/drivers/staging/rtl8723bs/hal/hal_sdio.c
index 9de62a0f5d35..665c85eccbdf 100644
--- a/drivers/staging/rtl8723bs/hal/hal_sdio.c
+++ b/drivers/staging/rtl8723bs/hal/hal_sdio.c
@@ -6,7 +6,6 @@
******************************************************************************/
#include <drv_types.h>
-#include <rtw_debug.h>
#include <hal_data.h>
u8 rtw_hal_sdio_max_txoqt_free_space(struct adapter *padapter)
diff --git a/drivers/staging/rtl8723bs/hal/rtl8723b_cmd.c b/drivers/staging/rtl8723bs/hal/rtl8723b_cmd.c
index d1ac2f44939c..56526056dd1d 100644
--- a/drivers/staging/rtl8723bs/hal/rtl8723b_cmd.c
+++ b/drivers/staging/rtl8723bs/hal/rtl8723b_cmd.c
@@ -6,7 +6,6 @@
******************************************************************************/
#include <drv_types.h>
-#include <rtw_debug.h>
#include <rtl8723b_hal.h>
#include "hal_com_h2c.h"
diff --git a/drivers/staging/rtl8723bs/hal/rtl8723b_dm.c b/drivers/staging/rtl8723bs/hal/rtl8723b_dm.c
index 2028791988e7..d1c875cf8e6d 100644
--- a/drivers/staging/rtl8723bs/hal/rtl8723b_dm.c
+++ b/drivers/staging/rtl8723bs/hal/rtl8723b_dm.c
@@ -8,7 +8,6 @@
/* This file is for 92CE/92CU dynamic mechanism only */
#include <drv_types.h>
-#include <rtw_debug.h>
#include <rtl8723b_hal.h>
/* Global var */
diff --git a/drivers/staging/rtl8723bs/hal/rtl8723b_hal_init.c b/drivers/staging/rtl8723bs/hal/rtl8723b_hal_init.c
index 7a5c3a98183b..37ebbbf408ec 100644
--- a/drivers/staging/rtl8723bs/hal/rtl8723b_hal_init.c
+++ b/drivers/staging/rtl8723bs/hal/rtl8723b_hal_init.c
@@ -8,7 +8,6 @@
#include <linux/firmware.h>
#include <linux/slab.h>
#include <drv_types.h>
-#include <rtw_debug.h>
#include <rtl8723b_hal.h>
#include "hal_com_h2c.h"
diff --git a/drivers/staging/rtl8723bs/hal/rtl8723b_phycfg.c b/drivers/staging/rtl8723bs/hal/rtl8723b_phycfg.c
index 7764896a04ea..4ff092b7c9c9 100644
--- a/drivers/staging/rtl8723bs/hal/rtl8723b_phycfg.c
+++ b/drivers/staging/rtl8723bs/hal/rtl8723b_phycfg.c
@@ -6,7 +6,6 @@
******************************************************************************/
#include <drv_types.h>
-#include <rtw_debug.h>
#include <rtl8723b_hal.h>
/**
diff --git a/drivers/staging/rtl8723bs/hal/rtl8723bs_recv.c b/drivers/staging/rtl8723bs/hal/rtl8723bs_recv.c
index 74e75dc970f7..28c914ec2604 100644
--- a/drivers/staging/rtl8723bs/hal/rtl8723bs_recv.c
+++ b/drivers/staging/rtl8723bs/hal/rtl8723bs_recv.c
@@ -6,7 +6,6 @@
******************************************************************************/
#include <drv_types.h>
-#include <rtw_debug.h>
#include <rtl8723b_hal.h>
static void initrecvbuf(struct recv_buf *precvbuf, struct adapter *padapter)
diff --git a/drivers/staging/rtl8723bs/hal/rtl8723bs_xmit.c b/drivers/staging/rtl8723bs/hal/rtl8723bs_xmit.c
index 15810438a472..78298e63edce 100644
--- a/drivers/staging/rtl8723bs/hal/rtl8723bs_xmit.c
+++ b/drivers/staging/rtl8723bs/hal/rtl8723bs_xmit.c
@@ -6,7 +6,6 @@
******************************************************************************/
#include <drv_types.h>
-#include <rtw_debug.h>
#include <rtl8723b_hal.h>
static u8 rtw_sdio_wait_enough_TxOQT_space(struct adapter *padapter, u8 agg_num)
diff --git a/drivers/staging/rtl8723bs/hal/sdio_halinit.c b/drivers/staging/rtl8723bs/hal/sdio_halinit.c
index c9cd6578f7f8..d3aae413fc0f 100644
--- a/drivers/staging/rtl8723bs/hal/sdio_halinit.c
+++ b/drivers/staging/rtl8723bs/hal/sdio_halinit.c
@@ -5,7 +5,6 @@
*
******************************************************************************/
#include <drv_types.h>
-#include <rtw_debug.h>
#include <rtl8723b_hal.h>
#include "hal_com_h2c.h"
@@ -380,8 +379,8 @@ static void _InitWMACSetting(struct adapter *padapter)
rtw_write32(padapter, REG_RCR, pHalData->ReceiveConfig);
/* Accept all multicast address */
- rtw_write32(padapter, REG_MAR, 0xFFFFFFFF);
- rtw_write32(padapter, REG_MAR + 4, 0xFFFFFFFF);
+ rtw_write32(padapter, REG_MAR, 0xFFFFFFFF); /* Offset 0x0620-0x0623 */
+ rtw_write32(padapter, REG_MAR + 4, 0xFFFFFFFF); /* Offset 0x0624-0x0627 */
/* Accept all data frames */
value16 = 0xFFFF;
diff --git a/drivers/staging/rtl8723bs/hal/sdio_ops.c b/drivers/staging/rtl8723bs/hal/sdio_ops.c
index 107f427ee4aa..21e9f1858745 100644
--- a/drivers/staging/rtl8723bs/hal/sdio_ops.c
+++ b/drivers/staging/rtl8723bs/hal/sdio_ops.c
@@ -5,7 +5,6 @@
*
*******************************************************************************/
#include <drv_types.h>
-#include <rtw_debug.h>
#include <rtl8723b_hal.h>
/* */
diff --git a/drivers/staging/rtl8723bs/include/drv_types.h b/drivers/staging/rtl8723bs/include/drv_types.h
index 9e6ca1dec525..0b35c97843cc 100644
--- a/drivers/staging/rtl8723bs/include/drv_types.h
+++ b/drivers/staging/rtl8723bs/include/drv_types.h
@@ -452,14 +452,7 @@ struct adapter {
#define DF_RX_BIT BIT1
#define DF_IO_BIT BIT2
-/* define RTW_DISABLE_FUNC(padapter, func) (atomic_add(&adapter_to_dvobj(padapter)->disable_func, (func))) */
/* define RTW_ENABLE_FUNC(padapter, func) (atomic_sub(&adapter_to_dvobj(padapter)->disable_func, (func))) */
-static inline void RTW_DISABLE_FUNC(struct adapter *padapter, int func_bit)
-{
- int df = atomic_read(&adapter_to_dvobj(padapter)->disable_func);
- df |= func_bit;
- atomic_set(&adapter_to_dvobj(padapter)->disable_func, df);
-}
static inline void RTW_ENABLE_FUNC(struct adapter *padapter, int func_bit)
{
diff --git a/drivers/staging/rtl8723bs/include/hal_intf.h b/drivers/staging/rtl8723bs/include/hal_intf.h
index 5cffab2d06ff..efdd1f912b5d 100644
--- a/drivers/staging/rtl8723bs/include/hal_intf.h
+++ b/drivers/staging/rtl8723bs/include/hal_intf.h
@@ -301,7 +301,6 @@ u8 rtw_hal_set_def_var(struct adapter *padapter, enum hal_def_variable eVariable
u8 rtw_hal_get_def_var(struct adapter *padapter, enum hal_def_variable eVariable, void *pValue);
void rtw_hal_set_odm_var(struct adapter *padapter, enum hal_odm_variable eVariable, void *pValue1, bool bSet);
-void rtw_hal_get_odm_var(struct adapter *padapter, enum hal_odm_variable eVariable, void *pValue1, void *pValue2);
void rtw_hal_enable_interrupt(struct adapter *padapter);
void rtw_hal_disable_interrupt(struct adapter *padapter);
diff --git a/drivers/staging/rtl8723bs/include/hal_pwr_seq.h b/drivers/staging/rtl8723bs/include/hal_pwr_seq.h
index 5e43cc89f535..b93d74a5b9a5 100644
--- a/drivers/staging/rtl8723bs/include/hal_pwr_seq.h
+++ b/drivers/staging/rtl8723bs/include/hal_pwr_seq.h
@@ -101,7 +101,7 @@
{0x0007, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_SDIO_MSK, PWR_BASEADDR_MAC, PWR_CMD_WRITE, 0xFF, 0x20}, /*0x07 = 0x20 , SOP option to disable BG/MB*/ \
{0x0005, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_USB_MSK|PWR_INTF_SDIO_MSK, PWR_BASEADDR_MAC, PWR_CMD_WRITE, BIT3|BIT4, BIT3}, /*0x04[12:11] = 2b'01 enable WL suspend*/ \
{0x0005, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_PCI_MSK, PWR_BASEADDR_MAC, PWR_CMD_WRITE, BIT2, BIT2}, /*0x04[10] = 1, enable SW LPS*/ \
- {0x004A, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_USB_MSK, PWR_BASEADDR_MAC, PWR_CMD_WRITE, BIT0, 1}, /*0x48[16] = 1 to enable GPIO9 as EXT WAKEUP*/ \
+ {0x004A, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_USB_MSK, PWR_BASEADDR_MAC, PWR_CMD_WRITE, BIT0, 1}, /*0x48[16] = 1 to enable GPIO9 as EXT WAKEUP*/ \
{0x0023, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_SDIO_MSK, PWR_BASEADDR_MAC, PWR_CMD_WRITE, BIT4, BIT4}, /*0x23[4] = 1b'1 12H LDO enter sleep mode*/ \
{0x0086, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_SDIO_MSK, PWR_BASEADDR_SDIO, PWR_CMD_WRITE, BIT0, BIT0}, /*Set SDIO suspend local register*/ \
{0x0086, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_SDIO_MSK, PWR_BASEADDR_SDIO, PWR_CMD_POLLING, BIT1, 0}, /*wait power state to suspend*/
diff --git a/drivers/staging/rtl8723bs/include/osdep_service.h b/drivers/staging/rtl8723bs/include/osdep_service.h
index cf96b5f7a776..b21267d7ef72 100644
--- a/drivers/staging/rtl8723bs/include/osdep_service.h
+++ b/drivers/staging/rtl8723bs/include/osdep_service.h
@@ -81,9 +81,7 @@ static inline void thread_enter(char *name)
static inline void flush_signals_thread(void)
{
if (signal_pending(current))
- {
flush_signals(current);
- }
}
#define rtw_warn_on(condition) WARN_ON(condition)
@@ -102,7 +100,7 @@ static inline int rtw_bug_check(void *parg1, void *parg2, void *parg3, void *par
#define MAC_ARG(x) (x)
#endif
-extern void rtw_free_netdev(struct net_device * netdev);
+extern void rtw_free_netdev(struct net_device *netdev);
/* Macros for handling unaligned memory accesses */
diff --git a/drivers/staging/rtl8723bs/include/osdep_service_linux.h b/drivers/staging/rtl8723bs/include/osdep_service_linux.h
index 188ed7e26550..2ec54f9e180c 100644
--- a/drivers/staging/rtl8723bs/include/osdep_service_linux.h
+++ b/drivers/staging/rtl8723bs/include/osdep_service_linux.h
@@ -7,43 +7,41 @@
#ifndef __OSDEP_LINUX_SERVICE_H_
#define __OSDEP_LINUX_SERVICE_H_
- #include <linux/spinlock.h>
- #include <linux/compiler.h>
- #include <linux/kernel.h>
- #include <linux/errno.h>
- #include <linux/init.h>
- #include <linux/slab.h>
- #include <linux/module.h>
- #include <linux/kref.h>
- /* include <linux/smp_lock.h> */
- #include <linux/netdevice.h>
- #include <linux/skbuff.h>
- #include <linux/uaccess.h>
- #include <asm/byteorder.h>
- #include <linux/atomic.h>
- #include <linux/io.h>
- #include <linux/sem.h>
- #include <linux/sched.h>
- #include <linux/etherdevice.h>
- #include <linux/wireless.h>
- #include <net/iw_handler.h>
- #include <linux/if_arp.h>
- #include <linux/rtnetlink.h>
- #include <linux/delay.h>
- #include <linux/interrupt.h> /* for struct tasklet_struct */
- #include <linux/ip.h>
- #include <linux/kthread.h>
- #include <linux/list.h>
- #include <linux/vmalloc.h>
-
-/* #include <linux/ieee80211.h> */
- #include <net/ieee80211_radiotap.h>
- #include <net/cfg80211.h>
-
- struct __queue {
- struct list_head queue;
- spinlock_t lock;
- };
+#include <linux/spinlock.h>
+#include <linux/compiler.h>
+#include <linux/kernel.h>
+#include <linux/errno.h>
+#include <linux/init.h>
+#include <linux/slab.h>
+#include <linux/module.h>
+#include <linux/kref.h>
+#include <linux/netdevice.h>
+#include <linux/skbuff.h>
+#include <linux/uaccess.h>
+#include <asm/byteorder.h>
+#include <linux/atomic.h>
+#include <linux/io.h>
+#include <linux/sem.h>
+#include <linux/sched.h>
+#include <linux/etherdevice.h>
+#include <linux/wireless.h>
+#include <net/iw_handler.h>
+#include <linux/if_arp.h>
+#include <linux/rtnetlink.h>
+#include <linux/delay.h>
+#include <linux/interrupt.h> /* for struct tasklet_struct */
+#include <linux/ip.h>
+#include <linux/kthread.h>
+#include <linux/list.h>
+#include <linux/vmalloc.h>
+
+#include <net/ieee80211_radiotap.h>
+#include <net/cfg80211.h>
+
+struct __queue {
+ struct list_head queue;
+ spinlock_t lock;
+};
static inline struct list_head *get_next(struct list_head *list)
{
diff --git a/drivers/staging/rtl8723bs/include/rtl8723b_hal.h b/drivers/staging/rtl8723bs/include/rtl8723b_hal.h
index f9ecd9047d52..e6d6e9de5474 100644
--- a/drivers/staging/rtl8723bs/include/rtl8723b_hal.h
+++ b/drivers/staging/rtl8723bs/include/rtl8723b_hal.h
@@ -38,7 +38,7 @@ struct rt_firmware {
/* This structure must be carefully byte-ordered. */
struct rt_firmware_hdr {
- /* 8-byte alinment required */
+ /* 8-byte alignment required */
/* LONG WORD 0 ---- */
__le16 signature; /* 92C0: test chip; 92C, 88C0: test chip;
diff --git a/drivers/staging/rtl8723bs/include/rtw_cmd.h b/drivers/staging/rtl8723bs/include/rtw_cmd.h
index fe1b03101203..cb44119ce9a9 100644
--- a/drivers/staging/rtl8723bs/include/rtw_cmd.h
+++ b/drivers/staging/rtl8723bs/include/rtw_cmd.h
@@ -516,10 +516,6 @@ struct drvextra_cmd_parm {
/*------------------- Below are used for RF/BB tuning ---------------------*/
-struct getcountjudge_rsp {
- u8 count_judge[MAX_RATES_LENGTH];
-};
-
struct addBaReq_parm {
unsigned int tid;
u8 addr[ETH_ALEN];
diff --git a/drivers/staging/rtl8723bs/include/rtw_debug.h b/drivers/staging/rtl8723bs/include/rtw_debug.h
deleted file mode 100644
index 7f96ff66915f..000000000000
--- a/drivers/staging/rtl8723bs/include/rtw_debug.h
+++ /dev/null
@@ -1,14 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-/******************************************************************************
- *
- * Copyright(c) 2007 - 2011 Realtek Corporation. All rights reserved.
- *
- ******************************************************************************/
-#ifndef __RTW_DEBUG_H__
-#define __RTW_DEBUG_H__
-
-void mac_reg_dump(struct adapter *adapter);
-void bb_reg_dump(struct adapter *adapter);
-void rf_reg_dump(struct adapter *adapter);
-
-#endif /* __RTW_DEBUG_H__ */
diff --git a/drivers/staging/rtl8723bs/include/rtw_event.h b/drivers/staging/rtl8723bs/include/rtw_event.h
index d48bae5416fe..62e0dec249ad 100644
--- a/drivers/staging/rtl8723bs/include/rtw_event.h
+++ b/drivers/staging/rtl8723bs/include/rtw_event.h
@@ -28,7 +28,7 @@ struct surveydone_event {
};
/*
-Used to report the link result of joinning the given bss
+Used to report the link result of joining the given bss
join_res:
diff --git a/drivers/staging/rtl8723bs/include/rtw_io.h b/drivers/staging/rtl8723bs/include/rtw_io.h
index be9741a056e5..0ee87be6dc4f 100644
--- a/drivers/staging/rtl8723bs/include/rtw_io.h
+++ b/drivers/staging/rtl8723bs/include/rtw_io.h
@@ -13,7 +13,7 @@
Otherwise, io_handler will free io_req
*/
-/* below is for the intf_option bit defition... */
+/* below is for the intf_option bit definition... */
struct intf_priv;
struct intf_hdl;
diff --git a/drivers/staging/rtl8723bs/include/rtw_mlme.h b/drivers/staging/rtl8723bs/include/rtw_mlme.h
index e103c4a15d1a..e665479babc2 100644
--- a/drivers/staging/rtl8723bs/include/rtw_mlme.h
+++ b/drivers/staging/rtl8723bs/include/rtw_mlme.h
@@ -131,7 +131,7 @@ struct mlme_priv {
u8 roam_rssi_diff_th; /* rssi difference threshold for active scan candidate selection */
u32 roam_scan_int_ms; /* scan interval for active roam */
u32 roam_scanr_exp_ms; /* scan result expire time in ms for roam */
- u8 roam_tgt_addr[ETH_ALEN]; /* request to roam to speicific target without other consideration */
+ u8 roam_tgt_addr[ETH_ALEN]; /* request to roam to specific target without other consideration */
u8 *nic_hdl;
diff --git a/drivers/staging/rtl8723bs/include/rtw_mlme_ext.h b/drivers/staging/rtl8723bs/include/rtw_mlme_ext.h
index 5b8574f5a251..8315399b64fd 100644
--- a/drivers/staging/rtl8723bs/include/rtw_mlme_ext.h
+++ b/drivers/staging/rtl8723bs/include/rtw_mlme_ext.h
@@ -384,8 +384,8 @@ struct mlme_ext_priv {
unsigned char default_supported_mcs_set[16];
struct ss_res sitesurvey_res;
- struct mlme_ext_info mlmext_info;/* for sta/adhoc mode, including current scanning/connecting/connected related info. */
- /* for ap mode, network includes ap's cap_info */
+ struct mlme_ext_info mlmext_info; /* for sta/adhoc mode, including current scanning/connecting/connected related info. */
+ /* for ap mode, network includes ap's cap_info */
struct timer_list survey_timer;
struct timer_list link_timer;
struct timer_list sa_query_timer;
@@ -455,7 +455,7 @@ u8 rtw_get_center_ch(u8 channel, u8 chnl_bw, u8 chnl_offset);
unsigned long rtw_get_on_cur_ch_time(struct adapter *adapter);
void set_channel_bwmode(struct adapter *padapter, unsigned char channel, unsigned char channel_offset, unsigned short bwmode);
-void SelectChannel(struct adapter *padapter, unsigned char channel);
+void r8723bs_select_channel(struct adapter *padapter, unsigned char channel);
unsigned int decide_wait_for_beacon_timeout(unsigned int bcn_interval);
diff --git a/drivers/staging/rtl8723bs/include/rtw_recv.h b/drivers/staging/rtl8723bs/include/rtw_recv.h
index c93594f75436..18dd1464e0c2 100644
--- a/drivers/staging/rtl8723bs/include/rtw_recv.h
+++ b/drivers/staging/rtl8723bs/include/rtw_recv.h
@@ -444,16 +444,6 @@ static inline u8 *recvframe_pull_tail(union recv_frame *precvframe, signed int s
}
-static inline union recv_frame *rxmem_to_recvframe(u8 *rxmem)
-{
- /* due to the design of 2048 bytes alignment of recv_frame, we can reference the union recv_frame */
- /* from any given member of recv_frame. */
- /* rxmem indicates the any member/address in recv_frame */
-
- return (union recv_frame *)(((SIZE_PTR)rxmem >> RXFRAME_ALIGN) << RXFRAME_ALIGN);
-
-}
-
static inline signed int get_recvframe_len(union recv_frame *precvframe)
{
return precvframe->u.hdr.len;
diff --git a/drivers/staging/rtl8723bs/include/rtw_rf.h b/drivers/staging/rtl8723bs/include/rtw_rf.h
index 718275ee4500..9f98b3f5a2e3 100644
--- a/drivers/staging/rtl8723bs/include/rtw_rf.h
+++ b/drivers/staging/rtl8723bs/include/rtw_rf.h
@@ -97,6 +97,4 @@ enum {
HT_DATA_SC_20_LOWER_OF_40MHZ = 2,
};
-u32 rtw_ch2freq(u32 ch);
-
#endif /* _RTL8711_RF_H_ */
diff --git a/drivers/staging/rtl8723bs/include/rtw_security.h b/drivers/staging/rtl8723bs/include/rtw_security.h
index 98afbd3054a4..32f6d3a5e309 100644
--- a/drivers/staging/rtl8723bs/include/rtw_security.h
+++ b/drivers/staging/rtl8723bs/include/rtw_security.h
@@ -50,43 +50,43 @@ union pn48 {
#ifdef __LITTLE_ENDIAN
struct {
- u8 TSC0;
- u8 TSC1;
- u8 TSC2;
- u8 TSC3;
- u8 TSC4;
- u8 TSC5;
- u8 TSC6;
- u8 TSC7;
+ u8 TSC0;
+ u8 TSC1;
+ u8 TSC2;
+ u8 TSC3;
+ u8 TSC4;
+ u8 TSC5;
+ u8 TSC6;
+ u8 TSC7;
} _byte_;
#else
struct {
- u8 TSC7;
- u8 TSC6;
- u8 TSC5;
- u8 TSC4;
- u8 TSC3;
- u8 TSC2;
- u8 TSC1;
- u8 TSC0;
+ u8 TSC7;
+ u8 TSC6;
+ u8 TSC5;
+ u8 TSC4;
+ u8 TSC3;
+ u8 TSC2;
+ u8 TSC1;
+ u8 TSC0;
} _byte_;
#endif
};
union Keytype {
- u8 skey[16];
- u32 lkey[4];
+ u8 skey[16];
+ u32 lkey[4];
};
struct rt_pmkid_list {
- u8 bUsed;
- u8 Bssid[6];
- u8 PMKID[16];
- u8 SsidBuf[33];
+ u8 bUsed;
+ u8 Bssid[6];
+ u8 PMKID[16];
+ u8 SsidBuf[33];
u8 *ssid_octet;
- u16 ssid_length;
+ u16 ssid_length;
};
@@ -162,7 +162,7 @@ struct security_priv {
/* For WPA2 Pre-Authentication. */
struct rt_pmkid_list PMKIDList[NUM_PMKID_CACHE]; /* Renamed from PreAuthKey[NUM_PRE_AUTH_KEY]. Annie, 2006-10-13. */
- u8 PMKIDIndex;
+ u8 PMKIDIndex;
u8 bWepDefaultKeyIdxSet;
@@ -170,50 +170,48 @@ struct security_priv {
#define GET_ENCRY_ALGO(psecuritypriv, psta, encry_algo, bmcst)\
do {\
- switch (psecuritypriv->dot11AuthAlgrthm)\
- {\
- case dot11AuthAlgrthm_Open:\
- case dot11AuthAlgrthm_Shared:\
- case dot11AuthAlgrthm_Auto:\
- encry_algo = (u8)psecuritypriv->dot11PrivacyAlgrthm;\
- break;\
- case dot11AuthAlgrthm_8021X:\
- if (bmcst)\
- encry_algo = (u8)psecuritypriv->dot118021XGrpPrivacy;\
- else\
- encry_algo = (u8)psta->dot118021XPrivacy;\
- break;\
- case dot11AuthAlgrthm_WAPI:\
- encry_algo = (u8)psecuritypriv->dot11PrivacyAlgrthm;\
- break;\
+ switch (psecuritypriv->dot11AuthAlgrthm) {\
+ case dot11AuthAlgrthm_Open:\
+ case dot11AuthAlgrthm_Shared:\
+ case dot11AuthAlgrthm_Auto:\
+ encry_algo = (u8)psecuritypriv->dot11PrivacyAlgrthm;\
+ break;\
+ case dot11AuthAlgrthm_8021X:\
+ if (bmcst)\
+ encry_algo = (u8)psecuritypriv->dot118021XGrpPrivacy;\
+ else\
+ encry_algo = (u8)psta->dot118021XPrivacy;\
+ break;\
+ case dot11AuthAlgrthm_WAPI:\
+ encry_algo = (u8)psecuritypriv->dot11PrivacyAlgrthm;\
+ break;\
} \
} while (0)
#define SET_ICE_IV_LEN(iv_len, icv_len, encrypt)\
do {\
- switch (encrypt)\
- {\
- case _WEP40_:\
- case _WEP104_:\
- iv_len = 4;\
- icv_len = 4;\
- break;\
- case _TKIP_:\
- iv_len = 8;\
- icv_len = 4;\
- break;\
- case _AES_:\
- iv_len = 8;\
- icv_len = 8;\
- break;\
- case _SMS4_:\
- iv_len = 18;\
- icv_len = 16;\
- break;\
- default:\
- iv_len = 0;\
- icv_len = 0;\
- break;\
+ switch (encrypt) {\
+ case _WEP40_:\
+ case _WEP104_:\
+ iv_len = 4;\
+ icv_len = 4;\
+ break;\
+ case _TKIP_:\
+ iv_len = 8;\
+ icv_len = 4;\
+ break;\
+ case _AES_:\
+ iv_len = 8;\
+ icv_len = 8;\
+ break;\
+ case _SMS4_:\
+ iv_len = 18;\
+ icv_len = 16;\
+ break;\
+ default:\
+ iv_len = 0;\
+ icv_len = 0;\
+ break;\
} \
} while (0)
@@ -242,7 +240,8 @@ struct mic_data {
/* ===== start - public domain SHA256 implementation ===== */
/* This is based on SHA256 implementation in LibTomCrypt that was released into
- * public domain by Tom St Denis. */
+ * public domain by Tom St Denis.
+ */
int omac1_aes_128(u8 *key, u8 *data, size_t data_len, u8 *mac);
void rtw_secmicsetkey(struct mic_data *pmicdata, u8 *key);
diff --git a/drivers/staging/rtl8723bs/include/rtw_xmit.h b/drivers/staging/rtl8723bs/include/rtw_xmit.h
index a3b4310caddf..544468f57692 100644
--- a/drivers/staging/rtl8723bs/include/rtw_xmit.h
+++ b/drivers/staging/rtl8723bs/include/rtw_xmit.h
@@ -15,7 +15,7 @@
#define XMITBUF_ALIGN_SZ 512
-/* xmit extension buff defination */
+/* xmit extension buff definition */
#define MAX_XMIT_EXTBUF_SZ (1536)
#define NR_XMIT_EXTBUFF (32)
diff --git a/drivers/staging/rtl8723bs/os_dep/ioctl_cfg80211.c b/drivers/staging/rtl8723bs/os_dep/ioctl_cfg80211.c
index eb3c73cc2662..b63a74e669bc 100644
--- a/drivers/staging/rtl8723bs/os_dep/ioctl_cfg80211.c
+++ b/drivers/staging/rtl8723bs/os_dep/ioctl_cfg80211.c
@@ -7,7 +7,6 @@
#include <linux/etherdevice.h>
#include <drv_types.h>
-#include <rtw_debug.h>
#include <linux/jiffies.h>
#include <rtw_wifi_regd.h>
diff --git a/drivers/staging/rtl8723bs/os_dep/ioctl_linux.c b/drivers/staging/rtl8723bs/os_dep/ioctl_linux.c
index c81b30f1f1b0..a9e481e182ad 100644
--- a/drivers/staging/rtl8723bs/os_dep/ioctl_linux.c
+++ b/drivers/staging/rtl8723bs/os_dep/ioctl_linux.c
@@ -7,7 +7,6 @@
#include <linux/etherdevice.h>
#include <drv_types.h>
-#include <rtw_debug.h>
#include <rtw_mp.h>
#include <hal_btcoex.h>
#include <linux/jiffies.h>
diff --git a/drivers/staging/rtl8723bs/os_dep/mlme_linux.c b/drivers/staging/rtl8723bs/os_dep/mlme_linux.c
index 1341801e5c21..1904e82a24b5 100644
--- a/drivers/staging/rtl8723bs/os_dep/mlme_linux.c
+++ b/drivers/staging/rtl8723bs/os_dep/mlme_linux.c
@@ -5,7 +5,6 @@
*
******************************************************************************/
#include <drv_types.h>
-#include <rtw_debug.h>
static void _dynamic_check_timer_handler(struct timer_list *t)
{
diff --git a/drivers/staging/rtl8723bs/os_dep/os_intfs.c b/drivers/staging/rtl8723bs/os_dep/os_intfs.c
index 55d0140cd543..fc9b9c5efb50 100644
--- a/drivers/staging/rtl8723bs/os_dep/os_intfs.c
+++ b/drivers/staging/rtl8723bs/os_dep/os_intfs.c
@@ -5,7 +5,6 @@
*
******************************************************************************/
#include <drv_types.h>
-#include <rtw_debug.h>
#include <hal_data.h>
MODULE_LICENSE("GPL");
diff --git a/drivers/staging/rtl8723bs/os_dep/osdep_service.c b/drivers/staging/rtl8723bs/os_dep/osdep_service.c
index f09c1324c39c..a00f9f0c85c5 100644
--- a/drivers/staging/rtl8723bs/os_dep/osdep_service.c
+++ b/drivers/staging/rtl8723bs/os_dep/osdep_service.c
@@ -5,7 +5,6 @@
*
******************************************************************************/
#include <drv_types.h>
-#include <rtw_debug.h>
/*
* Translate the OS dependent @param error_code to OS independent RTW_STATUS_CODE
diff --git a/drivers/staging/rtl8723bs/os_dep/recv_linux.c b/drivers/staging/rtl8723bs/os_dep/recv_linux.c
index 4d28b300b235..746f45cf9aac 100644
--- a/drivers/staging/rtl8723bs/os_dep/recv_linux.c
+++ b/drivers/staging/rtl8723bs/os_dep/recv_linux.c
@@ -5,7 +5,6 @@
*
******************************************************************************/
#include <drv_types.h>
-#include <rtw_debug.h>
#include <linux/jiffies.h>
#include <net/cfg80211.h>
#include <asm/unaligned.h>
diff --git a/drivers/staging/rtl8723bs/os_dep/sdio_intf.c b/drivers/staging/rtl8723bs/os_dep/sdio_intf.c
index 490431484524..d18fde4e5d6c 100644
--- a/drivers/staging/rtl8723bs/os_dep/sdio_intf.c
+++ b/drivers/staging/rtl8723bs/os_dep/sdio_intf.c
@@ -5,7 +5,6 @@
*
******************************************************************************/
#include <drv_types.h>
-#include <rtw_debug.h>
#include <hal_btcoex.h>
#include <linux/jiffies.h>
diff --git a/drivers/staging/rtl8723bs/os_dep/sdio_ops_linux.c b/drivers/staging/rtl8723bs/os_dep/sdio_ops_linux.c
index 0a0b04088e66..4a7c0c9cc7ef 100644
--- a/drivers/staging/rtl8723bs/os_dep/sdio_ops_linux.c
+++ b/drivers/staging/rtl8723bs/os_dep/sdio_ops_linux.c
@@ -6,7 +6,6 @@
*******************************************************************************/
#include <drv_types.h>
-#include <rtw_debug.h>
static bool rtw_sdio_claim_host_needed(struct sdio_func *func)
{
diff --git a/drivers/staging/rtl8723bs/os_dep/wifi_regd.c b/drivers/staging/rtl8723bs/os_dep/wifi_regd.c
index 5eef1d68c6f0..dbd4bf531339 100644
--- a/drivers/staging/rtl8723bs/os_dep/wifi_regd.c
+++ b/drivers/staging/rtl8723bs/os_dep/wifi_regd.c
@@ -6,7 +6,6 @@
*****************************************************************************/
#include <drv_types.h>
-#include <rtw_debug.h>
#include <rtw_wifi_regd.h>
diff --git a/drivers/staging/rtl8723bs/os_dep/xmit_linux.c b/drivers/staging/rtl8723bs/os_dep/xmit_linux.c
index 1eeabfffd6d2..944b9c724b32 100644
--- a/drivers/staging/rtl8723bs/os_dep/xmit_linux.c
+++ b/drivers/staging/rtl8723bs/os_dep/xmit_linux.c
@@ -5,7 +5,6 @@
*
******************************************************************************/
#include <drv_types.h>
-#include <rtw_debug.h>
uint rtw_remainder_len(struct pkt_file *pfile)
@@ -144,9 +143,8 @@ static int rtw_mlcst2unicst(struct adapter *padapter, struct sk_buff *skb)
psta = list_entry(plist, struct sta_info, asoc_list);
stainfo_offset = rtw_stainfo_offset(pstapriv, psta);
- if (stainfo_offset_valid(stainfo_offset)) {
+ if (stainfo_offset_valid(stainfo_offset))
chk_alive_list[chk_alive_num++] = stainfo_offset;
- }
}
spin_unlock_bh(&pstapriv->asoc_list_lock);
diff --git a/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_arm.c b/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_arm.c
index c4d97dbf6ba8..3dbeffc650d3 100644
--- a/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_arm.c
+++ b/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_arm.c
@@ -857,10 +857,10 @@ vchiq_bulk_transmit(struct vchiq_instance *instance, unsigned int handle, const
switch (mode) {
case VCHIQ_BULK_MODE_NOCALLBACK:
case VCHIQ_BULK_MODE_CALLBACK:
- ret = vchiq_bulk_transfer(instance, handle,
- (void *)data, NULL,
- size, userdata, mode,
- VCHIQ_BULK_TRANSMIT);
+ ret = vchiq_bulk_xfer_callback_interruptible(instance, handle,
+ (void *)data, NULL,
+ size, mode, userdata,
+ VCHIQ_BULK_TRANSMIT);
break;
case VCHIQ_BULK_MODE_BLOCKING:
ret = vchiq_blocking_bulk_transfer(instance, handle, (void *)data, size,
@@ -895,9 +895,10 @@ int vchiq_bulk_receive(struct vchiq_instance *instance, unsigned int handle,
switch (mode) {
case VCHIQ_BULK_MODE_NOCALLBACK:
case VCHIQ_BULK_MODE_CALLBACK:
- ret = vchiq_bulk_transfer(instance, handle, data, NULL,
- size, userdata,
- mode, VCHIQ_BULK_RECEIVE);
+ ret = vchiq_bulk_xfer_callback_interruptible(instance, handle,
+ (void *)data, NULL,
+ size, mode, userdata,
+ VCHIQ_BULK_RECEIVE);
break;
case VCHIQ_BULK_MODE_BLOCKING:
ret = vchiq_blocking_bulk_transfer(instance, handle, (void *)data, size,
@@ -968,9 +969,8 @@ vchiq_blocking_bulk_transfer(struct vchiq_instance *instance, unsigned int handl
return -ENOMEM;
}
- ret = vchiq_bulk_transfer(instance, handle, data, NULL, size,
- &waiter->bulk_waiter,
- VCHIQ_BULK_MODE_BLOCKING, dir);
+ ret = vchiq_bulk_xfer_blocking_interruptible(instance, handle, data, NULL, size,
+ &waiter->bulk_waiter, dir);
if ((ret != -EAGAIN) || fatal_signal_pending(current) || !waiter->bulk_waiter.bulk) {
struct vchiq_bulk *bulk = waiter->bulk_waiter.bulk;
diff --git a/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_core.c b/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_core.c
index 50af04b217f4..1f94db6e0cd9 100644
--- a/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_core.c
+++ b/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_core.c
@@ -1139,7 +1139,7 @@ queue_message_sync(struct vchiq_state *state, struct vchiq_service *service,
int msgid,
ssize_t (*copy_callback)(void *context, void *dest,
size_t offset, size_t maxsize),
- void *context, int size, int is_blocking)
+ void *context, int size)
{
struct vchiq_shared_state *local;
struct vchiq_header *header;
@@ -1517,7 +1517,7 @@ parse_open(struct vchiq_state *state, struct vchiq_header *header)
/* Acknowledge the OPEN */
if (service->sync) {
if (queue_message_sync(state, NULL, openack_id, memcpy_copy_callback,
- &ack_payload, sizeof(ack_payload), 0) == -EAGAIN)
+ &ack_payload, sizeof(ack_payload)) == -EAGAIN)
goto bail_not_ready;
/* The service is now open */
@@ -2655,6 +2655,132 @@ close_service_complete(struct vchiq_service *service, int failstate)
return status;
}
+/*
+ * Prepares a bulk transfer to be queued. The function is interruptible and is
+ * intended to be called from user threads. It may return -EAGAIN to indicate
+ * that a signal has been received and the call should be retried after being
+ * returned to user context.
+ */
+static int
+vchiq_bulk_xfer_queue_msg_interruptible(struct vchiq_service *service,
+ void *offset, void __user *uoffset,
+ int size, void *userdata,
+ enum vchiq_bulk_mode mode,
+ enum vchiq_bulk_dir dir)
+{
+ struct vchiq_bulk_queue *queue;
+ struct bulk_waiter *bulk_waiter = NULL;
+ struct vchiq_bulk *bulk;
+ struct vchiq_state *state = service->state;
+ const char dir_char = (dir == VCHIQ_BULK_TRANSMIT) ? 't' : 'r';
+ const int dir_msgtype = (dir == VCHIQ_BULK_TRANSMIT) ?
+ VCHIQ_MSG_BULK_TX : VCHIQ_MSG_BULK_RX;
+ int status = -EINVAL;
+ int payload[2];
+
+ if (mode == VCHIQ_BULK_MODE_BLOCKING) {
+ bulk_waiter = userdata;
+ init_completion(&bulk_waiter->event);
+ bulk_waiter->actual = 0;
+ bulk_waiter->bulk = NULL;
+ }
+
+ queue = (dir == VCHIQ_BULK_TRANSMIT) ?
+ &service->bulk_tx : &service->bulk_rx;
+
+ if (mutex_lock_killable(&service->bulk_mutex))
+ return -EAGAIN;
+
+ if (queue->local_insert == queue->remove + VCHIQ_NUM_SERVICE_BULKS) {
+ VCHIQ_SERVICE_STATS_INC(service, bulk_stalls);
+ do {
+ mutex_unlock(&service->bulk_mutex);
+ if (wait_for_completion_interruptible(&service->bulk_remove_event))
+ return -EAGAIN;
+ if (mutex_lock_killable(&service->bulk_mutex))
+ return -EAGAIN;
+ } while (queue->local_insert == queue->remove +
+ VCHIQ_NUM_SERVICE_BULKS);
+ }
+
+ bulk = &queue->bulks[BULK_INDEX(queue->local_insert)];
+
+ bulk->mode = mode;
+ bulk->dir = dir;
+ bulk->userdata = userdata;
+ bulk->size = size;
+ bulk->actual = VCHIQ_BULK_ACTUAL_ABORTED;
+
+ if (vchiq_prepare_bulk_data(service->instance, bulk, offset, uoffset, size, dir))
+ goto unlock_error_exit;
+
+ /*
+ * Ensure that the bulk data record is visible to the peer
+ * before proceeding.
+ */
+ wmb();
+
+ dev_dbg(state->dev, "core: %d: bt (%d->%d) %cx %x@%pad %pK\n",
+ state->id, service->localport, service->remoteport,
+ dir_char, size, &bulk->data, userdata);
+
+ /*
+ * The slot mutex must be held when the service is being closed, so
+ * claim it here to ensure that isn't happening
+ */
+ if (mutex_lock_killable(&state->slot_mutex)) {
+ status = -EAGAIN;
+ goto cancel_bulk_error_exit;
+ }
+
+ if (service->srvstate != VCHIQ_SRVSTATE_OPEN)
+ goto unlock_both_error_exit;
+
+ payload[0] = lower_32_bits(bulk->data);
+ payload[1] = bulk->size;
+ status = queue_message(state,
+ NULL,
+ VCHIQ_MAKE_MSG(dir_msgtype,
+ service->localport,
+ service->remoteport),
+ memcpy_copy_callback,
+ &payload,
+ sizeof(payload),
+ QMFLAGS_IS_BLOCKING |
+ QMFLAGS_NO_MUTEX_LOCK |
+ QMFLAGS_NO_MUTEX_UNLOCK);
+ if (status)
+ goto unlock_both_error_exit;
+
+ queue->local_insert++;
+
+ mutex_unlock(&state->slot_mutex);
+ mutex_unlock(&service->bulk_mutex);
+
+ dev_dbg(state->dev, "core: %d: bt:%d %cx li=%x ri=%x p=%x\n",
+ state->id, service->localport, dir_char, queue->local_insert,
+ queue->remote_insert, queue->process);
+
+ if (bulk_waiter) {
+ bulk_waiter->bulk = bulk;
+ if (wait_for_completion_interruptible(&bulk_waiter->event))
+ status = -EAGAIN;
+ else if (bulk_waiter->actual == VCHIQ_BULK_ACTUAL_ABORTED)
+ status = -EINVAL;
+ }
+
+ return status;
+
+unlock_both_error_exit:
+ mutex_unlock(&state->slot_mutex);
+cancel_bulk_error_exit:
+ vchiq_complete_bulk(service->instance, bulk);
+unlock_error_exit:
+ mutex_unlock(&service->bulk_mutex);
+
+ return status;
+}
+
/* Called by the slot handler */
int
vchiq_close_service_internal(struct vchiq_service *service, int close_recvd)
@@ -2978,31 +3104,17 @@ vchiq_remove_service(struct vchiq_instance *instance, unsigned int handle)
return status;
}
-/*
- * This function may be called by kernel threads or user threads.
- * User threads may receive -EAGAIN to indicate that a signal has been
- * received and the call should be retried after being returned to user
- * context.
- * When called in blocking mode, the userdata field points to a bulk_waiter
- * structure.
- */
-int vchiq_bulk_transfer(struct vchiq_instance *instance, unsigned int handle,
- void *offset, void __user *uoffset, int size, void *userdata,
- enum vchiq_bulk_mode mode, enum vchiq_bulk_dir dir)
+int
+vchiq_bulk_xfer_blocking_interruptible(struct vchiq_instance *instance, unsigned int handle,
+ void *offset, void __user *uoffset, int size,
+ void __user *userdata, enum vchiq_bulk_dir dir)
{
struct vchiq_service *service = find_service_by_handle(instance, handle);
- struct vchiq_bulk_queue *queue;
- struct vchiq_bulk *bulk;
- struct vchiq_state *state;
- struct bulk_waiter *bulk_waiter = NULL;
- const char dir_char = (dir == VCHIQ_BULK_TRANSMIT) ? 't' : 'r';
- const int dir_msgtype = (dir == VCHIQ_BULK_TRANSMIT) ?
- VCHIQ_MSG_BULK_TX : VCHIQ_MSG_BULK_RX;
+ enum vchiq_bulk_mode mode = VCHIQ_BULK_MODE_BLOCKING;
int status = -EINVAL;
- int payload[2];
if (!service)
- goto error_exit;
+ return -EINVAL;
if (service->srvstate != VCHIQ_SRVSTATE_OPEN)
goto error_exit;
@@ -3013,133 +3125,91 @@ int vchiq_bulk_transfer(struct vchiq_instance *instance, unsigned int handle,
if (vchiq_check_service(service))
goto error_exit;
- switch (mode) {
- case VCHIQ_BULK_MODE_NOCALLBACK:
- case VCHIQ_BULK_MODE_CALLBACK:
- break;
- case VCHIQ_BULK_MODE_BLOCKING:
- bulk_waiter = userdata;
- init_completion(&bulk_waiter->event);
- bulk_waiter->actual = 0;
- bulk_waiter->bulk = NULL;
- break;
- case VCHIQ_BULK_MODE_WAITING:
- bulk_waiter = userdata;
- bulk = bulk_waiter->bulk;
- goto waiting;
- default:
- goto error_exit;
- }
- state = service->state;
+ status = vchiq_bulk_xfer_queue_msg_interruptible(service, offset, uoffset, size,
+ userdata, mode, dir);
- queue = (dir == VCHIQ_BULK_TRANSMIT) ?
- &service->bulk_tx : &service->bulk_rx;
+error_exit:
+ vchiq_service_put(service);
- if (mutex_lock_killable(&service->bulk_mutex)) {
- status = -EAGAIN;
+ return status;
+}
+
+int
+vchiq_bulk_xfer_callback_interruptible(struct vchiq_instance *instance, unsigned int handle,
+ void *offset, void __user *uoffset, int size,
+ enum vchiq_bulk_mode mode, void *userdata,
+ enum vchiq_bulk_dir dir)
+{
+ struct vchiq_service *service = find_service_by_handle(instance, handle);
+ int status = -EINVAL;
+
+ if (!service)
+ return -EINVAL;
+
+ if (mode != VCHIQ_BULK_MODE_CALLBACK &&
+ mode != VCHIQ_BULK_MODE_NOCALLBACK)
goto error_exit;
- }
- if (queue->local_insert == queue->remove + VCHIQ_NUM_SERVICE_BULKS) {
- VCHIQ_SERVICE_STATS_INC(service, bulk_stalls);
- do {
- mutex_unlock(&service->bulk_mutex);
- if (wait_for_completion_interruptible(&service->bulk_remove_event)) {
- status = -EAGAIN;
- goto error_exit;
- }
- if (mutex_lock_killable(&service->bulk_mutex)) {
- status = -EAGAIN;
- goto error_exit;
- }
- } while (queue->local_insert == queue->remove +
- VCHIQ_NUM_SERVICE_BULKS);
- }
+ if (service->srvstate != VCHIQ_SRVSTATE_OPEN)
+ goto error_exit;
- bulk = &queue->bulks[BULK_INDEX(queue->local_insert)];
+ if (!offset && !uoffset)
+ goto error_exit;
- bulk->mode = mode;
- bulk->dir = dir;
- bulk->userdata = userdata;
- bulk->size = size;
- bulk->actual = VCHIQ_BULK_ACTUAL_ABORTED;
+ if (vchiq_check_service(service))
+ goto error_exit;
- if (vchiq_prepare_bulk_data(instance, bulk, offset, uoffset, size, dir))
- goto unlock_error_exit;
+ status = vchiq_bulk_xfer_queue_msg_interruptible(service, offset, uoffset,
+ size, userdata, mode, dir);
- /*
- * Ensure that the bulk data record is visible to the peer
- * before proceeding.
- */
- wmb();
+error_exit:
+ vchiq_service_put(service);
- dev_dbg(state->dev, "core: %d: bt (%d->%d) %cx %x@%pad %pK\n",
- state->id, service->localport, service->remoteport,
- dir_char, size, &bulk->data, userdata);
+ return status;
+}
- /*
- * The slot mutex must be held when the service is being closed, so
- * claim it here to ensure that isn't happening
- */
- if (mutex_lock_killable(&state->slot_mutex)) {
- status = -EAGAIN;
- goto cancel_bulk_error_exit;
- }
+/*
+ * This function is called by VCHIQ ioctl interface and is interruptible.
+ * It may receive -EAGAIN to indicate that a signal has been received
+ * and the call should be retried after being returned to user context.
+ */
+int
+vchiq_bulk_xfer_waiting_interruptible(struct vchiq_instance *instance,
+ unsigned int handle, struct bulk_waiter *userdata)
+{
+ struct vchiq_service *service = find_service_by_handle(instance, handle);
+ struct bulk_waiter *bulk_waiter;
+ int status = -EINVAL;
- if (service->srvstate != VCHIQ_SRVSTATE_OPEN)
- goto unlock_both_error_exit;
+ if (!service)
+ return -EINVAL;
- payload[0] = lower_32_bits(bulk->data);
- payload[1] = bulk->size;
- status = queue_message(state,
- NULL,
- VCHIQ_MAKE_MSG(dir_msgtype,
- service->localport,
- service->remoteport),
- memcpy_copy_callback,
- &payload,
- sizeof(payload),
- QMFLAGS_IS_BLOCKING |
- QMFLAGS_NO_MUTEX_LOCK |
- QMFLAGS_NO_MUTEX_UNLOCK);
- if (status)
- goto unlock_both_error_exit;
+ if (!userdata)
+ goto error_exit;
- queue->local_insert++;
+ if (service->srvstate != VCHIQ_SRVSTATE_OPEN)
+ goto error_exit;
- mutex_unlock(&state->slot_mutex);
- mutex_unlock(&service->bulk_mutex);
+ if (vchiq_check_service(service))
+ goto error_exit;
- dev_dbg(state->dev, "core: %d: bt:%d %cx li=%x ri=%x p=%x\n",
- state->id, service->localport, dir_char, queue->local_insert,
- queue->remote_insert, queue->process);
+ bulk_waiter = userdata;
-waiting:
vchiq_service_put(service);
status = 0;
- if (bulk_waiter) {
- bulk_waiter->bulk = bulk;
- if (wait_for_completion_interruptible(&bulk_waiter->event))
- status = -EAGAIN;
- else if (bulk_waiter->actual == VCHIQ_BULK_ACTUAL_ABORTED)
- status = -EINVAL;
- }
+ if (wait_for_completion_interruptible(&bulk_waiter->event))
+ return -EAGAIN;
+ else if (bulk_waiter->actual == VCHIQ_BULK_ACTUAL_ABORTED)
+ return -EINVAL;
return status;
-unlock_both_error_exit:
- mutex_unlock(&state->slot_mutex);
-cancel_bulk_error_exit:
- vchiq_complete_bulk(service->instance, bulk);
-unlock_error_exit:
- mutex_unlock(&service->bulk_mutex);
-
error_exit:
- if (service)
- vchiq_service_put(service);
+ vchiq_service_put(service);
+
return status;
}
@@ -3175,11 +3245,12 @@ vchiq_queue_message(struct vchiq_instance *instance, unsigned int handle,
switch (service->srvstate) {
case VCHIQ_SRVSTATE_OPEN:
status = queue_message(service->state, service, data_id,
- copy_callback, context, size, 1);
+ copy_callback, context, size,
+ QMFLAGS_IS_BLOCKING);
break;
case VCHIQ_SRVSTATE_OPENSYNC:
status = queue_message_sync(service->state, service, data_id,
- copy_callback, context, size, 1);
+ copy_callback, context, size);
break;
default:
status = -EINVAL;
diff --git a/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_core.h b/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_core.h
index 77cc4d7ac077..468463f31801 100644
--- a/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_core.h
+++ b/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_core.h
@@ -471,9 +471,19 @@ extern void
remote_event_pollall(struct vchiq_state *state);
extern int
-vchiq_bulk_transfer(struct vchiq_instance *instance, unsigned int handle, void *offset,
- void __user *uoffset, int size, void *userdata, enum vchiq_bulk_mode mode,
- enum vchiq_bulk_dir dir);
+vchiq_bulk_xfer_waiting_interruptible(struct vchiq_instance *instance,
+ unsigned int handle, struct bulk_waiter *userdata);
+
+extern int
+vchiq_bulk_xfer_blocking_interruptible(struct vchiq_instance *instance, unsigned int handle,
+ void *offset, void __user *uoffset, int size,
+ void __user *userdata, enum vchiq_bulk_dir dir);
+
+extern int
+vchiq_bulk_xfer_callback_interruptible(struct vchiq_instance *instance, unsigned int handle,
+ void *offset, void __user *uoffset, int size,
+ enum vchiq_bulk_mode mode, void *userdata,
+ enum vchiq_bulk_dir dir);
extern void
vchiq_dump_state(struct seq_file *f, struct vchiq_state *state);
diff --git a/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_dev.c b/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_dev.c
index 9cd2a64dce5e..d41a4624cc92 100644
--- a/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_dev.c
+++ b/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_dev.c
@@ -304,6 +304,11 @@ static int vchiq_irq_queue_bulk_tx_rx(struct vchiq_instance *instance,
}
userdata = &waiter->bulk_waiter;
+
+ status = vchiq_bulk_xfer_blocking_interruptible(instance, args->handle,
+ NULL, args->data, args->size,
+ userdata, dir);
+
} else if (args->mode == VCHIQ_BULK_MODE_WAITING) {
mutex_lock(&instance->bulk_waiter_list_mutex);
list_for_each_entry(iter, &instance->bulk_waiter_list,
@@ -324,12 +329,16 @@ static int vchiq_irq_queue_bulk_tx_rx(struct vchiq_instance *instance,
dev_dbg(service->state->dev, "arm: found bulk_waiter %pK for pid %d\n",
waiter, current->pid);
userdata = &waiter->bulk_waiter;
+
+ status = vchiq_bulk_xfer_waiting_interruptible(instance, args->handle, userdata);
} else {
userdata = args->userdata;
- }
- status = vchiq_bulk_transfer(instance, args->handle, NULL, args->data, args->size,
- userdata, args->mode, dir);
+ status = vchiq_bulk_xfer_callback_interruptible(instance, args->handle, NULL,
+ args->data, args->size,
+ args->mode, userdata, dir);
+
+ }
if (!waiter) {
ret = 0;
diff --git a/drivers/staging/vme_user/vme.c b/drivers/staging/vme_user/vme.c
index 9a091463656d..42304c9f83a2 100644
--- a/drivers/staging/vme_user/vme.c
+++ b/drivers/staging/vme_user/vme.c
@@ -416,10 +416,6 @@ void vme_slave_free(struct vme_resource *resource)
slave_image = list_entry(resource->entry, struct vme_slave_resource,
list);
- if (!slave_image) {
- dev_err(bridge->parent, "Can't find slave resource\n");
- return;
- }
/* Unlock image */
mutex_lock(&slave_image->mtx);
@@ -794,10 +790,6 @@ void vme_master_free(struct vme_resource *resource)
master_image = list_entry(resource->entry, struct vme_master_resource,
list);
- if (!master_image) {
- dev_err(bridge->parent, "Can't find master resource\n");
- return;
- }
/* Unlock image */
spin_lock(&master_image->lock);
@@ -1265,7 +1257,7 @@ EXPORT_SYMBOL(vme_unregister_error_handler);
void vme_irq_handler(struct vme_bridge *bridge, int level, int statid)
{
- void (*call)(int, int, void *);
+ void (*call)(int level, int statid, void *priv_data);
void *priv_data;
call = bridge->irq[level - 1].callback[statid].func;
diff --git a/drivers/staging/vme_user/vme.h b/drivers/staging/vme_user/vme.h
index 26aa40f78a74..7753e736f9fd 100644
--- a/drivers/staging/vme_user/vme.h
+++ b/drivers/staging/vme_user/vme.h
@@ -129,8 +129,7 @@ struct vme_driver {
};
void *vme_alloc_consistent(struct vme_resource *, size_t, dma_addr_t *);
-void vme_free_consistent(struct vme_resource *, size_t, void *,
- dma_addr_t);
+void vme_free_consistent(struct vme_resource *, size_t, void *, dma_addr_t);
size_t vme_get_size(struct vme_resource *);
int vme_check_window(struct vme_bridge *bridge, u32 aspace,
@@ -138,20 +137,20 @@ int vme_check_window(struct vme_bridge *bridge, u32 aspace,
struct vme_resource *vme_slave_request(struct vme_dev *, u32, u32);
int vme_slave_set(struct vme_resource *, int, unsigned long long,
- unsigned long long, dma_addr_t, u32, u32);
+ unsigned long long, dma_addr_t, u32, u32);
int vme_slave_get(struct vme_resource *, int *, unsigned long long *,
- unsigned long long *, dma_addr_t *, u32 *, u32 *);
+ unsigned long long *, dma_addr_t *, u32 *, u32 *);
void vme_slave_free(struct vme_resource *);
struct vme_resource *vme_master_request(struct vme_dev *, u32, u32, u32);
int vme_master_set(struct vme_resource *, int, unsigned long long,
- unsigned long long, u32, u32, u32);
+ unsigned long long, u32, u32, u32);
int vme_master_get(struct vme_resource *, int *, unsigned long long *,
- unsigned long long *, u32 *, u32 *, u32 *);
+ unsigned long long *, u32 *, u32 *, u32 *);
ssize_t vme_master_read(struct vme_resource *, void *, size_t, loff_t);
ssize_t vme_master_write(struct vme_resource *, void *, size_t, loff_t);
unsigned int vme_master_rmw(struct vme_resource *, unsigned int, unsigned int,
- unsigned int, loff_t);
+ unsigned int, loff_t);
int vme_master_mmap(struct vme_resource *resource, struct vm_area_struct *vma);
void vme_master_free(struct vme_resource *);
@@ -162,13 +161,13 @@ struct vme_dma_attr *vme_dma_pci_attribute(dma_addr_t);
struct vme_dma_attr *vme_dma_vme_attribute(unsigned long long, u32, u32, u32);
void vme_dma_free_attribute(struct vme_dma_attr *);
int vme_dma_list_add(struct vme_dma_list *, struct vme_dma_attr *,
- struct vme_dma_attr *, size_t);
+ struct vme_dma_attr *, size_t);
int vme_dma_list_exec(struct vme_dma_list *);
int vme_dma_list_free(struct vme_dma_list *);
int vme_dma_free(struct vme_resource *);
int vme_irq_request(struct vme_dev *, int, int,
- void (*callback)(int, int, void *), void *);
+ void (*callback)(int, int, void *), void *);
void vme_irq_free(struct vme_dev *, int, int);
int vme_irq_generate(struct vme_dev *, int, int);
diff --git a/drivers/staging/vme_user/vme_fake.c b/drivers/staging/vme_user/vme_fake.c
index 7f84d1c86f29..4a59c9069605 100644
--- a/drivers/staging/vme_user/vme_fake.c
+++ b/drivers/staging/vme_user/vme_fake.c
@@ -79,7 +79,7 @@ struct fake_driver {
};
/* Module parameter */
-static int geoid;
+static u32 geoid;
static const char driver_name[] = "vme_fake";
@@ -1059,6 +1059,12 @@ static int __init fake_init(void)
struct vme_slave_resource *slave_image;
struct vme_lm_resource *lm;
+ if (geoid >= VME_MAX_SLOTS) {
+ pr_err("VME geographical address must be between 0 and %d (exclusive), but got %d\n",
+ VME_MAX_SLOTS, geoid);
+ return -EINVAL;
+ }
+
/* We need a fake parent device */
vme_root = root_device_register("vme");
if (IS_ERR(vme_root))
@@ -1283,7 +1289,7 @@ static void __exit fake_exit(void)
}
MODULE_PARM_DESC(geoid, "Set geographical addressing");
-module_param(geoid, int, 0);
+module_param(geoid, uint, 0);
MODULE_DESCRIPTION("Fake VME bridge driver");
MODULE_LICENSE("GPL");
diff --git a/drivers/staging/vme_user/vme_tsi148.c b/drivers/staging/vme_user/vme_tsi148.c
index 2ec9c2904404..31a44025e08f 100644
--- a/drivers/staging/vme_user/vme_tsi148.c
+++ b/drivers/staging/vme_user/vme_tsi148.c
@@ -36,7 +36,7 @@ static void tsi148_remove(struct pci_dev *);
/* Module parameter */
static bool err_chk;
-static int geoid;
+static u32 geoid;
static const char driver_name[] = "vme_tsi148";
@@ -55,14 +55,14 @@ static struct pci_driver tsi148_driver = {
};
static void reg_join(unsigned int high, unsigned int low,
- unsigned long long *variable)
+ unsigned long long *variable)
{
*variable = (unsigned long long)high << 32;
*variable |= (unsigned long long)low;
}
static void reg_split(unsigned long long variable, unsigned int *high,
- unsigned int *low)
+ unsigned int *low)
{
*low = (unsigned int)variable & 0xFFFFFFFF;
*high = (unsigned int)(variable >> 32);
@@ -72,7 +72,7 @@ static void reg_split(unsigned long long variable, unsigned int *high,
* Wakes up DMA queue.
*/
static u32 tsi148_DMA_irqhandler(struct tsi148_driver *bridge,
- int channel_mask)
+ int channel_mask)
{
u32 serviced = 0;
@@ -207,7 +207,7 @@ static u32 tsi148_IACK_irqhandler(struct tsi148_driver *bridge)
* Calling VME bus interrupt callback if provided.
*/
static u32 tsi148_VIRQ_irqhandler(struct vme_bridge *tsi148_bridge,
- u32 stat)
+ u32 stat)
{
int vec, i, serviced = 0;
struct tsi148_driver *bridge;
@@ -358,7 +358,7 @@ static int tsi148_irq_init(struct vme_bridge *tsi148_bridge)
}
static void tsi148_irq_exit(struct vme_bridge *tsi148_bridge,
- struct pci_dev *pdev)
+ struct pci_dev *pdev)
{
struct tsi148_driver *bridge = tsi148_bridge->driver_priv;
@@ -392,7 +392,7 @@ static int tsi148_iack_received(struct tsi148_driver *bridge)
* Configure VME interrupt
*/
static void tsi148_irq_set(struct vme_bridge *tsi148_bridge, int level,
- int state, int sync)
+ int state, int sync)
{
struct pci_dev *pdev;
u32 tmp;
@@ -430,7 +430,7 @@ static void tsi148_irq_set(struct vme_bridge *tsi148_bridge, int level,
* interrupt to be acked.
*/
static int tsi148_irq_generate(struct vme_bridge *tsi148_bridge, int level,
- int statid)
+ int statid)
{
u32 tmp;
struct tsi148_driver *bridge;
@@ -453,7 +453,7 @@ static int tsi148_irq_generate(struct vme_bridge *tsi148_bridge, int level,
/* XXX Consider implementing a timeout? */
wait_event_interruptible(bridge->iack_queue,
- tsi148_iack_received(bridge));
+ tsi148_iack_received(bridge));
mutex_unlock(&bridge->vme_int);
@@ -464,8 +464,8 @@ static int tsi148_irq_generate(struct vme_bridge *tsi148_bridge, int level,
* Initialize a slave window with the requested attributes.
*/
static int tsi148_slave_set(struct vme_slave_resource *image, int enabled,
- unsigned long long vme_base, unsigned long long size,
- dma_addr_t pci_base, u32 aspace, u32 cycle)
+ unsigned long long vme_base, unsigned long long size,
+ dma_addr_t pci_base, u32 aspace, u32 cycle)
{
unsigned int i, addr = 0, granularity = 0;
unsigned int temp_ctl = 0;
@@ -607,8 +607,8 @@ static int tsi148_slave_set(struct vme_slave_resource *image, int enabled,
* Get slave window configuration.
*/
static int tsi148_slave_get(struct vme_slave_resource *image, int *enabled,
- unsigned long long *vme_base, unsigned long long *size,
- dma_addr_t *pci_base, u32 *aspace, u32 *cycle)
+ unsigned long long *vme_base, unsigned long long *size,
+ dma_addr_t *pci_base, u32 *aspace, u32 *cycle)
{
unsigned int i, granularity = 0, ctl = 0;
unsigned int vme_base_low, vme_base_high;
@@ -706,7 +706,7 @@ static int tsi148_slave_get(struct vme_slave_resource *image, int *enabled,
* Allocate and map PCI Resource
*/
static int tsi148_alloc_resource(struct vme_master_resource *image,
- unsigned long long size)
+ unsigned long long size)
{
unsigned long long existing_size;
int retval = 0;
@@ -751,9 +751,9 @@ static int tsi148_alloc_resource(struct vme_master_resource *image,
image->bus_resource.end = (unsigned long)size;
image->bus_resource.flags = IORESOURCE_MEM;
- retval = pci_bus_alloc_resource(pdev->bus,
- &image->bus_resource, size, 0x10000, PCIBIOS_MIN_MEM,
- 0, NULL, NULL);
+ retval = pci_bus_alloc_resource(pdev->bus, &image->bus_resource,
+ size, 0x10000, PCIBIOS_MIN_MEM,
+ 0, NULL, NULL);
if (retval) {
dev_err(tsi148_bridge->parent, "Failed to allocate mem resource for window %d size 0x%lx start 0x%lx\n",
image->number, (unsigned long)size,
@@ -796,8 +796,8 @@ static void tsi148_free_resource(struct vme_master_resource *image)
* Set the attributes of an outbound window.
*/
static int tsi148_master_set(struct vme_master_resource *image, int enabled,
- unsigned long long vme_base, unsigned long long size, u32 aspace,
- u32 cycle, u32 dwidth)
+ unsigned long long vme_base, unsigned long long size,
+ u32 aspace, u32 cycle, u32 dwidth)
{
int retval = 0;
unsigned int i;
@@ -1031,8 +1031,8 @@ err_window:
* XXX Not parsing prefetch information.
*/
static int __tsi148_master_get(struct vme_master_resource *image, int *enabled,
- unsigned long long *vme_base, unsigned long long *size, u32 *aspace,
- u32 *cycle, u32 *dwidth)
+ unsigned long long *vme_base, unsigned long long *size,
+ u32 *aspace, u32 *cycle, u32 *dwidth)
{
unsigned int i, ctl;
unsigned int pci_base_low, pci_base_high;
@@ -1140,15 +1140,15 @@ static int __tsi148_master_get(struct vme_master_resource *image, int *enabled,
}
static int tsi148_master_get(struct vme_master_resource *image, int *enabled,
- unsigned long long *vme_base, unsigned long long *size, u32 *aspace,
- u32 *cycle, u32 *dwidth)
+ unsigned long long *vme_base, unsigned long long *size,
+ u32 *aspace, u32 *cycle, u32 *dwidth)
{
int retval;
spin_lock(&image->lock);
retval = __tsi148_master_get(image, enabled, vme_base, size, aspace,
- cycle, dwidth);
+ cycle, dwidth);
spin_unlock(&image->lock);
@@ -1156,7 +1156,7 @@ static int tsi148_master_get(struct vme_master_resource *image, int *enabled,
}
static ssize_t tsi148_master_read(struct vme_master_resource *image, void *buf,
- size_t count, loff_t offset)
+ size_t count, loff_t offset)
{
int retval, enabled;
unsigned long long vme_base, size;
@@ -1241,7 +1241,7 @@ out:
}
static ssize_t tsi148_master_write(struct vme_master_resource *image, void *buf,
- size_t count, loff_t offset)
+ size_t count, loff_t offset)
{
int retval = 0, enabled;
unsigned long long vme_base, size;
@@ -1342,9 +1342,8 @@ out:
*
* Requires a previously configured master window, returns final value.
*/
-static unsigned int tsi148_master_rmw(struct vme_master_resource *image,
- unsigned int mask, unsigned int compare, unsigned int swap,
- loff_t offset)
+static unsigned int tsi148_master_rmw(struct vme_master_resource *image, unsigned int mask,
+ unsigned int compare, unsigned int swap, loff_t offset)
{
unsigned long long pci_addr;
unsigned int pci_addr_high, pci_addr_low;
@@ -1399,7 +1398,7 @@ static unsigned int tsi148_master_rmw(struct vme_master_resource *image,
}
static int tsi148_dma_set_vme_src_attributes(struct device *dev, __be32 *attr,
- u32 aspace, u32 cycle, u32 dwidth)
+ u32 aspace, u32 cycle, u32 dwidth)
{
u32 val;
@@ -1497,7 +1496,7 @@ static int tsi148_dma_set_vme_src_attributes(struct device *dev, __be32 *attr,
}
static int tsi148_dma_set_vme_dest_attributes(struct device *dev, __be32 *attr,
- u32 aspace, u32 cycle, u32 dwidth)
+ u32 aspace, u32 cycle, u32 dwidth)
{
u32 val;
@@ -1599,8 +1598,8 @@ static int tsi148_dma_set_vme_dest_attributes(struct device *dev, __be32 *attr,
*
* Note: DMA engine expects the DMA descriptor to be big endian.
*/
-static int tsi148_dma_list_add(struct vme_dma_list *list,
- struct vme_dma_attr *src, struct vme_dma_attr *dest, size_t count)
+static int tsi148_dma_list_add(struct vme_dma_list *list, struct vme_dma_attr *src,
+ struct vme_dma_attr *dest, size_t count)
{
struct tsi148_dma_entry *entry, *prev;
u32 address_high, address_low, val;
@@ -1653,8 +1652,7 @@ static int tsi148_dma_list_add(struct vme_dma_list *list,
case VME_DMA_PCI:
pci_attr = src->private;
- reg_split((unsigned long long)pci_attr->address, &address_high,
- &address_low);
+ reg_split((unsigned long long)pci_attr->address, &address_high, &address_low);
entry->descriptor.dsau = cpu_to_be32(address_high);
entry->descriptor.dsal = cpu_to_be32(address_low);
entry->descriptor.dsat = cpu_to_be32(TSI148_LCSR_DSAT_TYP_PCI);
@@ -1662,15 +1660,16 @@ static int tsi148_dma_list_add(struct vme_dma_list *list,
case VME_DMA_VME:
vme_attr = src->private;
- reg_split((unsigned long long)vme_attr->address, &address_high,
- &address_low);
+ reg_split((unsigned long long)vme_attr->address, &address_high, &address_low);
entry->descriptor.dsau = cpu_to_be32(address_high);
entry->descriptor.dsal = cpu_to_be32(address_low);
entry->descriptor.dsat = cpu_to_be32(TSI148_LCSR_DSAT_TYP_VME);
- retval = tsi148_dma_set_vme_src_attributes(
- tsi148_bridge->parent, &entry->descriptor.dsat,
- vme_attr->aspace, vme_attr->cycle, vme_attr->dwidth);
+ retval = tsi148_dma_set_vme_src_attributes(tsi148_bridge->parent,
+ &entry->descriptor.dsat,
+ vme_attr->aspace,
+ vme_attr->cycle,
+ vme_attr->dwidth);
if (retval < 0)
goto err_source;
break;
@@ -1690,7 +1689,7 @@ static int tsi148_dma_list_add(struct vme_dma_list *list,
pci_attr = dest->private;
reg_split((unsigned long long)pci_attr->address, &address_high,
- &address_low);
+ &address_low);
entry->descriptor.ddau = cpu_to_be32(address_high);
entry->descriptor.ddal = cpu_to_be32(address_low);
entry->descriptor.ddat = cpu_to_be32(TSI148_LCSR_DDAT_TYP_PCI);
@@ -1699,14 +1698,16 @@ static int tsi148_dma_list_add(struct vme_dma_list *list,
vme_attr = dest->private;
reg_split((unsigned long long)vme_attr->address, &address_high,
- &address_low);
+ &address_low);
entry->descriptor.ddau = cpu_to_be32(address_high);
entry->descriptor.ddal = cpu_to_be32(address_low);
entry->descriptor.ddat = cpu_to_be32(TSI148_LCSR_DDAT_TYP_VME);
- retval = tsi148_dma_set_vme_dest_attributes(
- tsi148_bridge->parent, &entry->descriptor.ddat,
- vme_attr->aspace, vme_attr->cycle, vme_attr->dwidth);
+ retval = tsi148_dma_set_vme_dest_attributes(tsi148_bridge->parent,
+ &entry->descriptor.ddat,
+ vme_attr->aspace,
+ vme_attr->cycle,
+ vme_attr->dwidth);
if (retval < 0)
goto err_dest;
break;
@@ -1735,7 +1736,7 @@ static int tsi148_dma_list_add(struct vme_dma_list *list,
/* Fill out previous descriptors "Next Address" */
if (entry->list.prev != &list->entries) {
reg_split((unsigned long long)entry->dma_handle, &address_high,
- &address_low);
+ &address_low);
prev = list_entry(entry->list.prev, struct tsi148_dma_entry,
list);
prev->descriptor.dnlau = cpu_to_be32(address_high);
@@ -1813,7 +1814,7 @@ static int tsi148_dma_list_exec(struct vme_dma_list *list)
/* Get first bus address and write into registers */
entry = list_first_entry(&list->entries, struct tsi148_dma_entry,
- list);
+ list);
mutex_unlock(&ctrlr->mtx);
@@ -1832,7 +1833,7 @@ static int tsi148_dma_list_exec(struct vme_dma_list *list)
TSI148_LCSR_DMA[channel] + TSI148_LCSR_OFFSET_DCTL);
retval = wait_event_interruptible(bridge->dma_queue[channel],
- tsi148_dma_busy(ctrlr->parent, channel));
+ tsi148_dma_busy(ctrlr->parent, channel));
if (retval) {
iowrite32be(dctlreg | TSI148_LCSR_DCTL_ABT, bridge->base +
@@ -1883,7 +1884,7 @@ static int tsi148_dma_list_empty(struct vme_dma_list *list)
entry = list_entry(pos, struct tsi148_dma_entry, list);
dma_unmap_single(tsi148_bridge->parent, entry->dma_handle,
- sizeof(struct tsi148_dma_descriptor), DMA_TO_DEVICE);
+ sizeof(struct tsi148_dma_descriptor), DMA_TO_DEVICE);
kfree(entry);
}
@@ -1898,7 +1899,7 @@ static int tsi148_dma_list_empty(struct vme_dma_list *list)
* callback is attached and disabled when the last callback is removed.
*/
static int tsi148_lm_set(struct vme_lm_resource *lm, unsigned long long lm_base,
- u32 aspace, u32 cycle)
+ u32 aspace, u32 cycle)
{
u32 lm_base_high, lm_base_low, lm_ctl = 0;
int i;
@@ -1963,7 +1964,7 @@ static int tsi148_lm_set(struct vme_lm_resource *lm, unsigned long long lm_base,
* or disabled.
*/
static int tsi148_lm_get(struct vme_lm_resource *lm,
- unsigned long long *lm_base, u32 *aspace, u32 *cycle)
+ unsigned long long *lm_base, u32 *aspace, u32 *cycle)
{
u32 lm_base_high, lm_base_low, lm_ctl, enabled = 0;
struct tsi148_driver *bridge;
@@ -2013,7 +2014,7 @@ static int tsi148_lm_get(struct vme_lm_resource *lm,
* Callback will be passed the monitor triggered.
*/
static int tsi148_lm_attach(struct vme_lm_resource *lm, int monitor,
- void (*callback)(void *), void *data)
+ void (*callback)(void *), void *data)
{
u32 lm_ctl, tmp;
struct vme_bridge *tsi148_bridge;
@@ -2086,7 +2087,7 @@ static int tsi148_lm_detach(struct vme_lm_resource *lm, int monitor)
iowrite32be(tmp, bridge->base + TSI148_LCSR_INTEO);
iowrite32be(TSI148_LCSR_INTC_LMC[monitor],
- bridge->base + TSI148_LCSR_INTC);
+ bridge->base + TSI148_LCSR_INTC);
/* Detach callback */
bridge->lm_callback[monitor] = NULL;
@@ -2126,7 +2127,7 @@ static int tsi148_slot_get(struct vme_bridge *tsi148_bridge)
}
static void *tsi148_alloc_consistent(struct device *parent, size_t size,
- dma_addr_t *dma)
+ dma_addr_t *dma)
{
struct pci_dev *pdev;
@@ -2137,7 +2138,7 @@ static void *tsi148_alloc_consistent(struct device *parent, size_t size,
}
static void tsi148_free_consistent(struct device *parent, size_t size,
- void *vaddr, dma_addr_t dma)
+ void *vaddr, dma_addr_t dma)
{
struct pci_dev *pdev;
@@ -2160,7 +2161,7 @@ static void tsi148_free_consistent(struct device *parent, size_t size,
* be mapped onto PCI memory.
*/
static int tsi148_crcsr_init(struct vme_bridge *tsi148_bridge,
- struct pci_dev *pdev)
+ struct pci_dev *pdev)
{
u32 cbar, crat, vstat;
u32 crcsr_bus_high, crcsr_bus_low;
@@ -2201,8 +2202,7 @@ static int tsi148_crcsr_init(struct vme_bridge *tsi148_bridge,
dev_info(tsi148_bridge->parent, "CR/CSR already enabled\n");
} else {
dev_info(tsi148_bridge->parent, "Enabling CR/CSR space\n");
- iowrite32be(crat | TSI148_LCSR_CRAT_EN,
- bridge->base + TSI148_LCSR_CRAT);
+ iowrite32be(crat | TSI148_LCSR_CRAT_EN, bridge->base + TSI148_LCSR_CRAT);
}
/* If we want flushed, error-checked writes, set up a window
@@ -2210,9 +2210,8 @@ static int tsi148_crcsr_init(struct vme_bridge *tsi148_bridge,
* through VME writes.
*/
if (err_chk) {
- retval = tsi148_master_set(bridge->flush_image, 1,
- (vstat * 0x80000), 0x80000, VME_CRCSR, VME_SCT,
- VME_D16);
+ retval = tsi148_master_set(bridge->flush_image, 1, (vstat * 0x80000),
+ 0x80000, VME_CRCSR, VME_SCT, VME_D16);
if (retval)
dev_err(tsi148_bridge->parent, "Configuring flush image failed\n");
}
@@ -2221,7 +2220,7 @@ static int tsi148_crcsr_init(struct vme_bridge *tsi148_bridge,
}
static void tsi148_crcsr_exit(struct vme_bridge *tsi148_bridge,
- struct pci_dev *pdev)
+ struct pci_dev *pdev)
{
u32 crat;
struct tsi148_driver *bridge;
@@ -2231,7 +2230,7 @@ static void tsi148_crcsr_exit(struct vme_bridge *tsi148_bridge,
/* Turn off CR/CSR space */
crat = ioread32be(bridge->base + TSI148_LCSR_CRAT);
iowrite32be(crat & ~TSI148_LCSR_CRAT_EN,
- bridge->base + TSI148_LCSR_CRAT);
+ bridge->base + TSI148_LCSR_CRAT);
/* Free image */
iowrite32be(0, bridge->base + TSI148_LCSR_CROU);
@@ -2253,6 +2252,12 @@ static int tsi148_probe(struct pci_dev *pdev, const struct pci_device_id *id)
struct vme_dma_resource *dma_ctrlr;
struct vme_lm_resource *lm;
+ if (geoid >= VME_MAX_SLOTS) {
+ dev_err(&pdev->dev, "VME geographical address must be between 0 and %d (exclusive), but got %d\n",
+ VME_MAX_SLOTS, geoid);
+ return -EINVAL;
+ }
+
/* If we want to support more than one of each bridge, we need to
* dynamically generate this so we get one per device
*/
@@ -2287,7 +2292,7 @@ static int tsi148_probe(struct pci_dev *pdev, const struct pci_device_id *id)
/* map registers in BAR 0 */
tsi148_device->base = ioremap(pci_resource_start(pdev, 0),
- 4096);
+ 4096);
if (!tsi148_device->base) {
dev_err(&pdev->dev, "Unable to remap CRG region\n");
retval = -EIO;
@@ -2367,7 +2372,7 @@ static int tsi148_probe(struct pci_dev *pdev, const struct pci_device_id *id)
sizeof(master_image->bus_resource));
master_image->kern_base = NULL;
list_add_tail(&master_image->list,
- &tsi148_bridge->master_resources);
+ &tsi148_bridge->master_resources);
}
/* Add slave windows to list */
@@ -2388,7 +2393,7 @@ static int tsi148_probe(struct pci_dev *pdev, const struct pci_device_id *id)
VME_2eSST267 | VME_2eSST320 | VME_SUPER | VME_USER |
VME_PROG | VME_DATA;
list_add_tail(&slave_image->list,
- &tsi148_bridge->slave_resources);
+ &tsi148_bridge->slave_resources);
}
/* Add dma engines to list */
@@ -2409,7 +2414,7 @@ static int tsi148_probe(struct pci_dev *pdev, const struct pci_device_id *id)
INIT_LIST_HEAD(&dma_ctrlr->pending);
INIT_LIST_HEAD(&dma_ctrlr->running);
list_add_tail(&dma_ctrlr->list,
- &tsi148_bridge->dma_resources);
+ &tsi148_bridge->dma_resources);
}
/* Add location monitor to list */
@@ -2447,16 +2452,16 @@ static int tsi148_probe(struct pci_dev *pdev, const struct pci_device_id *id)
data = ioread32be(tsi148_device->base + TSI148_LCSR_VSTAT);
dev_info(&pdev->dev, "Board is%s the VME system controller\n",
- (data & TSI148_LCSR_VSTAT_SCONS) ? "" : " not");
+ (data & TSI148_LCSR_VSTAT_SCONS) ? "" : " not");
if (!geoid)
dev_info(&pdev->dev, "VME geographical address is %d\n",
- data & TSI148_LCSR_VSTAT_GA_M);
+ data & TSI148_LCSR_VSTAT_GA_M);
else
dev_info(&pdev->dev, "VME geographical address is set to %d\n",
- geoid);
+ geoid);
dev_info(&pdev->dev, "VME Write and flush and error check is %s\n",
- err_chk ? "enabled" : "disabled");
+ err_chk ? "enabled" : "disabled");
retval = tsi148_crcsr_init(tsi148_bridge, pdev);
if (retval) {
@@ -2507,8 +2512,7 @@ err_slave:
err_master:
/* resources are stored in link list */
list_for_each_safe(pos, n, &tsi148_bridge->master_resources) {
- master_image = list_entry(pos, struct vme_master_resource,
- list);
+ master_image = list_entry(pos, struct vme_master_resource, list);
list_del(pos);
kfree(master_image);
}
@@ -2605,8 +2609,7 @@ static void tsi148_remove(struct pci_dev *pdev)
/* resources are stored in link list */
list_for_each_safe(pos, tmplist, &tsi148_bridge->master_resources) {
- master_image = list_entry(pos, struct vme_master_resource,
- list);
+ master_image = list_entry(pos, struct vme_master_resource, list);
list_del(pos);
kfree(master_image);
}
@@ -2628,7 +2631,7 @@ MODULE_PARM_DESC(err_chk, "Check for VME errors on reads and writes");
module_param(err_chk, bool, 0);
MODULE_PARM_DESC(geoid, "Override geographical addressing");
-module_param(geoid, int, 0);
+module_param(geoid, uint, 0);
MODULE_DESCRIPTION("VME driver for the Tundra Tempe VME bridge");
MODULE_LICENSE("GPL");
diff --git a/drivers/staging/vt6655/TODO b/drivers/staging/vt6655/TODO
index 63607ef9c97e..529bc22cd608 100644
--- a/drivers/staging/vt6655/TODO
+++ b/drivers/staging/vt6655/TODO
@@ -18,4 +18,4 @@ TODO:
- integrate with drivers/net/wireless
Please send any patches to Greg Kroah-Hartman <greg@kroah.com>
-and Forest Bond <forest@alittletooquiet.net>.
+and Philipp Hortmann <philipp.g.hortmann@gmail.com>.
diff --git a/drivers/staging/vt6655/card.c b/drivers/staging/vt6655/card.c
index 688c870d89bc..6a2e390e9493 100644
--- a/drivers/staging/vt6655/card.c
+++ b/drivers/staging/vt6655/card.c
@@ -388,22 +388,22 @@ void card_safe_reset_tx(struct vnt_private *priv)
struct vnt_tx_desc *curr_td;
/* initialize TD index */
- priv->tail_td[0] = &priv->apTD0Rings[0];
- priv->apCurrTD[0] = &priv->apTD0Rings[0];
+ priv->tail_td[0] = &priv->ap_td0_rings[0];
+ priv->apCurrTD[0] = &priv->ap_td0_rings[0];
- priv->tail_td[1] = &priv->apTD1Rings[0];
- priv->apCurrTD[1] = &priv->apTD1Rings[0];
+ priv->tail_td[1] = &priv->ap_td1_rings[0];
+ priv->apCurrTD[1] = &priv->ap_td1_rings[0];
for (uu = 0; uu < TYPE_MAXTD; uu++)
priv->iTDUsed[uu] = 0;
for (uu = 0; uu < priv->opts.tx_descs[0]; uu++) {
- curr_td = &priv->apTD0Rings[uu];
+ curr_td = &priv->ap_td0_rings[uu];
curr_td->td0.owner = OWNED_BY_HOST;
/* init all Tx Packet pointer to NULL */
}
for (uu = 0; uu < priv->opts.tx_descs[1]; uu++) {
- curr_td = &priv->apTD1Rings[uu];
+ curr_td = &priv->ap_td1_rings[uu];
curr_td->td0.owner = OWNED_BY_HOST;
/* init all Tx Packet pointer to NULL */
}
diff --git a/drivers/staging/vt6655/card.h b/drivers/staging/vt6655/card.h
index f52e42564e81..f6b462ebca51 100644
--- a/drivers/staging/vt6655/card.h
+++ b/drivers/staging/vt6655/card.h
@@ -55,8 +55,8 @@ void CARDvSafeResetRx(struct vnt_private *priv);
void card_radio_power_off(struct vnt_private *priv);
bool card_set_phy_parameter(struct vnt_private *priv, u8 bb_type);
bool card_update_tsf(struct vnt_private *priv, unsigned char rx_rate,
- u64 bss_timestamp);
+ u64 bss_timestamp);
bool card_set_beacon_period(struct vnt_private *priv,
- unsigned short beacon_interval);
+ unsigned short beacon_interval);
#endif /* __CARD_H__ */
diff --git a/drivers/staging/vt6655/device.h b/drivers/staging/vt6655/device.h
index 0212240ba23f..5eaab6b172d3 100644
--- a/drivers/staging/vt6655/device.h
+++ b/drivers/staging/vt6655/device.h
@@ -135,8 +135,8 @@ struct vnt_private {
struct vnt_tx_desc *apCurrTD[TYPE_MAXTD];
struct vnt_tx_desc *tail_td[TYPE_MAXTD];
- struct vnt_tx_desc *apTD0Rings;
- struct vnt_tx_desc *apTD1Rings;
+ struct vnt_tx_desc *ap_td0_rings;
+ struct vnt_tx_desc *ap_td1_rings;
struct vnt_rx_desc *aRD0Ring;
struct vnt_rx_desc *aRD1Ring;
@@ -189,10 +189,10 @@ struct vnt_private {
u8 byBBType; /* 0:11A, 1:11B, 2:11G */
u8 packet_type; /*
- * 0:11a,1:11b,2:11gb (only CCK
- * in BasicRate), 3:11ga (OFDM in
- * Basic Rate)
- */
+ * 0:11a,1:11b,2:11gb (only CCK
+ * in BasicRate), 3:11ga (OFDM in
+ * Basic Rate)
+ */
unsigned short wBasicRate;
unsigned char byACKRate;
unsigned char byTopOFDMBasicRate;
diff --git a/drivers/staging/vt6655/device_main.c b/drivers/staging/vt6655/device_main.c
index 3ff8103366c1..bf3ecf720206 100644
--- a/drivers/staging/vt6655/device_main.c
+++ b/drivers/staging/vt6655/device_main.c
@@ -550,11 +550,11 @@ static bool device_init_rings(struct vnt_private *priv)
priv->opts.tx_descs[0] * sizeof(struct vnt_tx_desc);
/* vir_pool: pvoid type */
- priv->apTD0Rings = vir_pool
+ priv->ap_td0_rings = vir_pool
+ priv->opts.rx_descs0 * sizeof(struct vnt_rx_desc)
+ priv->opts.rx_descs1 * sizeof(struct vnt_rx_desc);
- priv->apTD1Rings = vir_pool
+ priv->ap_td1_rings = vir_pool
+ priv->opts.rx_descs0 * sizeof(struct vnt_rx_desc)
+ priv->opts.rx_descs1 * sizeof(struct vnt_rx_desc)
+ priv->opts.tx_descs[0] * sizeof(struct vnt_tx_desc);
@@ -720,7 +720,7 @@ static int device_init_td0_ring(struct vnt_private *priv)
curr = priv->td0_pool_dma;
for (i = 0; i < priv->opts.tx_descs[0];
i++, curr += sizeof(struct vnt_tx_desc)) {
- desc = &priv->apTD0Rings[i];
+ desc = &priv->ap_td0_rings[i];
desc->td_info = kzalloc(sizeof(*desc->td_info), GFP_KERNEL);
if (!desc->td_info) {
ret = -ENOMEM;
@@ -730,20 +730,20 @@ static int device_init_td0_ring(struct vnt_private *priv)
desc->td_info->buf = priv->tx0_bufs + i * PKT_BUF_SZ;
desc->td_info->buf_dma = priv->tx_bufs_dma0 + i * PKT_BUF_SZ;
- desc->next = &(priv->apTD0Rings[(i + 1) % priv->opts.tx_descs[0]]);
+ desc->next = &(priv->ap_td0_rings[(i + 1) % priv->opts.tx_descs[0]]);
desc->next_desc = cpu_to_le32(curr +
sizeof(struct vnt_tx_desc));
}
if (i > 0)
- priv->apTD0Rings[i - 1].next_desc = cpu_to_le32(priv->td0_pool_dma);
- priv->tail_td[0] = priv->apCurrTD[0] = &priv->apTD0Rings[0];
+ priv->ap_td0_rings[i - 1].next_desc = cpu_to_le32(priv->td0_pool_dma);
+ priv->tail_td[0] = priv->apCurrTD[0] = &priv->ap_td0_rings[0];
return 0;
err_free_desc:
while (i--) {
- desc = &priv->apTD0Rings[i];
+ desc = &priv->ap_td0_rings[i];
kfree(desc->td_info);
}
@@ -761,7 +761,7 @@ static int device_init_td1_ring(struct vnt_private *priv)
curr = priv->td1_pool_dma;
for (i = 0; i < priv->opts.tx_descs[1];
i++, curr += sizeof(struct vnt_tx_desc)) {
- desc = &priv->apTD1Rings[i];
+ desc = &priv->ap_td1_rings[i];
desc->td_info = kzalloc(sizeof(*desc->td_info), GFP_KERNEL);
if (!desc->td_info) {
ret = -ENOMEM;
@@ -771,19 +771,19 @@ static int device_init_td1_ring(struct vnt_private *priv)
desc->td_info->buf = priv->tx1_bufs + i * PKT_BUF_SZ;
desc->td_info->buf_dma = priv->tx_bufs_dma1 + i * PKT_BUF_SZ;
- desc->next = &(priv->apTD1Rings[(i + 1) % priv->opts.tx_descs[1]]);
+ desc->next = &(priv->ap_td1_rings[(i + 1) % priv->opts.tx_descs[1]]);
desc->next_desc = cpu_to_le32(curr + sizeof(struct vnt_tx_desc));
}
if (i > 0)
- priv->apTD1Rings[i - 1].next_desc = cpu_to_le32(priv->td1_pool_dma);
- priv->tail_td[1] = priv->apCurrTD[1] = &priv->apTD1Rings[0];
+ priv->ap_td1_rings[i - 1].next_desc = cpu_to_le32(priv->td1_pool_dma);
+ priv->tail_td[1] = priv->apCurrTD[1] = &priv->ap_td1_rings[0];
return 0;
err_free_desc:
while (i--) {
- desc = &priv->apTD1Rings[i];
+ desc = &priv->ap_td1_rings[i];
kfree(desc->td_info);
}
@@ -795,7 +795,7 @@ static void device_free_td0_ring(struct vnt_private *priv)
int i;
for (i = 0; i < priv->opts.tx_descs[0]; i++) {
- struct vnt_tx_desc *desc = &priv->apTD0Rings[i];
+ struct vnt_tx_desc *desc = &priv->ap_td0_rings[i];
struct vnt_td_info *td_info = desc->td_info;
dev_kfree_skb(td_info->skb);
@@ -808,7 +808,7 @@ static void device_free_td1_ring(struct vnt_private *priv)
int i;
for (i = 0; i < priv->opts.tx_descs[1]; i++) {
- struct vnt_tx_desc *desc = &priv->apTD1Rings[i];
+ struct vnt_tx_desc *desc = &priv->ap_td1_rings[i];
struct vnt_td_info *td_info = desc->td_info;
dev_kfree_skb(td_info->skb);
@@ -1140,7 +1140,7 @@ static void vnt_interrupt_process(struct vnt_private *priv)
PSbIsNextTBTTWakeUp((void *)priv);
if ((priv->op_mode == NL80211_IFTYPE_AP ||
- priv->op_mode == NL80211_IFTYPE_ADHOC) &&
+ priv->op_mode == NL80211_IFTYPE_ADHOC) &&
priv->vif->bss_conf.enable_beacon)
MACvOneShotTimer1MicroSec(priv,
(priv->vif->bss_conf.beacon_int -
@@ -1535,7 +1535,7 @@ static void vnt_bss_info_changed(struct ieee80211_hw *hw,
priv->op_mode != NL80211_IFTYPE_AP) {
if (vif->cfg.assoc && conf->beacon_rate) {
card_update_tsf(priv, conf->beacon_rate->hw_value,
- conf->sync_tsf);
+ conf->sync_tsf);
card_set_beacon_period(priv, conf->beacon_int);
@@ -1763,7 +1763,7 @@ vt6655_probe(struct pci_dev *pcid, const struct pci_device_id *ent)
priv->memaddr = pci_resource_start(pcid, 0);
priv->ioaddr = pci_resource_start(pcid, 1);
priv->port_offset = ioremap(priv->memaddr & PCI_BASE_ADDRESS_MEM_MASK,
- 256);
+ 256);
if (!priv->port_offset) {
dev_err(&pcid->dev, ": Failed to IO remapping ..\n");
device_free_info(priv);
diff --git a/drivers/staging/vt6655/mac.h b/drivers/staging/vt6655/mac.h
index acf931c3f5fd..a33af2852227 100644
--- a/drivers/staging/vt6655/mac.h
+++ b/drivers/staging/vt6655/mac.h
@@ -537,9 +537,9 @@
/*--------------------- Export Macros ------------------------------*/
-#define VT6655_MAC_SELECT_PAGE0(iobase) iowrite8(0, iobase + MAC_REG_PAGE1SEL)
+#define VT6655_MAC_SELECT_PAGE0(iobase) iowrite8(0, (iobase) + MAC_REG_PAGE1SEL)
-#define VT6655_MAC_SELECT_PAGE1(iobase) iowrite8(1, iobase + MAC_REG_PAGE1SEL)
+#define VT6655_MAC_SELECT_PAGE1(iobase) iowrite8(1, (iobase) + MAC_REG_PAGE1SEL)
#define MAKEWORD(lb, hb) \
((unsigned short)(((unsigned char)(lb)) | (((unsigned short)((unsigned char)(hb))) << 8)))
diff --git a/drivers/staging/vt6655/rxtx.c b/drivers/staging/vt6655/rxtx.c
index 5e5ed582c35e..3705cb1e87b6 100644
--- a/drivers/staging/vt6655/rxtx.c
+++ b/drivers/staging/vt6655/rxtx.c
@@ -493,9 +493,9 @@ s_uFillDataHead(
buf->duration_a = cpu_to_le16((u16)s_uGetDataDuration(pDevice, DATADUR_A, cbFrameLength, byPktType,
wCurrentRate, bNeedAck, uFragIdx, cbLastFragmentSize, uMACfragNum, byFBOption));
buf->duration_b = cpu_to_le16((u16)s_uGetDataDuration(pDevice, DATADUR_B, cbFrameLength, PK_TYPE_11B,
- pDevice->byTopCCKBasicRate, bNeedAck, uFragIdx, cbLastFragmentSize, uMACfragNum, byFBOption));
+ pDevice->byTopCCKBasicRate, bNeedAck, uFragIdx, cbLastFragmentSize, uMACfragNum, byFBOption));
buf->duration_a_f0 = cpu_to_le16((u16)s_uGetDataDuration(pDevice, DATADUR_A_F0, cbFrameLength, byPktType,
- wCurrentRate, bNeedAck, uFragIdx, cbLastFragmentSize, uMACfragNum, byFBOption));
+ wCurrentRate, bNeedAck, uFragIdx, cbLastFragmentSize, uMACfragNum, byFBOption));
buf->duration_a_f1 = cpu_to_le16((u16)s_uGetDataDuration(pDevice, DATADUR_A_F1, cbFrameLength, byPktType,
wCurrentRate, bNeedAck, uFragIdx, cbLastFragmentSize, uMACfragNum, byFBOption));
@@ -520,7 +520,7 @@ s_uFillDataHead(
buf->duration_f0 = cpu_to_le16((u16)s_uGetDataDuration(pDevice, DATADUR_A_F0, cbFrameLength, byPktType,
wCurrentRate, bNeedAck, uFragIdx, cbLastFragmentSize, uMACfragNum, byFBOption));
buf->duration_f1 = cpu_to_le16((u16)s_uGetDataDuration(pDevice, DATADUR_A_F1, cbFrameLength, byPktType,
- wCurrentRate, bNeedAck, uFragIdx, cbLastFragmentSize, uMACfragNum, byFBOption));
+ wCurrentRate, bNeedAck, uFragIdx, cbLastFragmentSize, uMACfragNum, byFBOption));
buf->time_stamp_off = vnt_time_stamp_off(pDevice, wCurrentRate);
return buf->duration;
}
@@ -1375,8 +1375,8 @@ static int vnt_beacon_xmit(struct vnt_private *priv,
/* Get Duration and TimeStampOff */
short_head->duration =
cpu_to_le16((u16)s_uGetDataDuration(priv, DATADUR_B,
- frame_size, PK_TYPE_11A, current_rate,
- false, 0, 0, 1, AUTO_FB_NONE));
+ frame_size, PK_TYPE_11A, current_rate,
+ false, 0, 0, 1, AUTO_FB_NONE));
short_head->time_stamp_off =
vnt_time_stamp_off(priv, current_rate);
@@ -1391,8 +1391,8 @@ static int vnt_beacon_xmit(struct vnt_private *priv,
/* Get Duration and TimeStampOff */
short_head->duration =
cpu_to_le16((u16)s_uGetDataDuration(priv, DATADUR_B,
- frame_size, PK_TYPE_11B, current_rate,
- false, 0, 0, 1, AUTO_FB_NONE));
+ frame_size, PK_TYPE_11B, current_rate,
+ false, 0, 0, 1, AUTO_FB_NONE));
short_head->time_stamp_off =
vnt_time_stamp_off(priv, current_rate);
diff --git a/drivers/target/iscsi/iscsi_target.h b/drivers/target/iscsi/iscsi_target.h
index 0c997a08adec..873411e95ed2 100644
--- a/drivers/target/iscsi/iscsi_target.h
+++ b/drivers/target/iscsi/iscsi_target.h
@@ -15,7 +15,6 @@ struct kref;
struct sockaddr_storage;
extern struct iscsi_tiqn *iscsit_get_tiqn_for_login(unsigned char *);
-extern struct iscsi_tiqn *iscsit_get_tiqn(unsigned char *, int);
extern void iscsit_put_tiqn_for_login(struct iscsi_tiqn *);
extern struct iscsi_tiqn *iscsit_add_tiqn(unsigned char *);
extern void iscsit_del_tiqn(struct iscsi_tiqn *);
@@ -35,7 +34,6 @@ extern void iscsit_set_unsolicited_dataout(struct iscsit_cmd *);
extern int iscsit_logout_closesession(struct iscsit_cmd *, struct iscsit_conn *);
extern int iscsit_logout_closeconnection(struct iscsit_cmd *, struct iscsit_conn *);
extern int iscsit_logout_removeconnforrecovery(struct iscsit_cmd *, struct iscsit_conn *);
-extern int iscsit_send_async_msg(struct iscsit_conn *, u16, u8, u8);
extern int iscsit_build_r2ts_for_cmd(struct iscsit_conn *, struct iscsit_cmd *, bool recovery);
extern void iscsit_thread_get_cpumask(struct iscsit_conn *);
extern int iscsi_target_tx_thread(void *);
diff --git a/drivers/target/iscsi/iscsi_target_login.h b/drivers/target/iscsi/iscsi_target_login.h
index 3ca2f232b387..e8760735486b 100644
--- a/drivers/target/iscsi/iscsi_target_login.h
+++ b/drivers/target/iscsi/iscsi_target_login.h
@@ -24,6 +24,5 @@ extern int iscsit_start_kthreads(struct iscsit_conn *);
extern void iscsi_post_login_handler(struct iscsi_np *, struct iscsit_conn *, u8);
extern void iscsi_target_login_sess_out(struct iscsit_conn *, bool, bool);
extern int iscsi_target_login_thread(void *);
-extern void iscsi_handle_login_thread_timeout(struct timer_list *t);
#endif /*** ISCSI_TARGET_LOGIN_H ***/
diff --git a/drivers/target/iscsi/iscsi_target_nego.h b/drivers/target/iscsi/iscsi_target_nego.h
index 41c3db3ddeaa..e60a46d34835 100644
--- a/drivers/target/iscsi/iscsi_target_nego.h
+++ b/drivers/target/iscsi/iscsi_target_nego.h
@@ -15,8 +15,6 @@ extern int extract_param(const char *, const char *, unsigned int, char *,
unsigned char *);
extern int iscsi_target_check_login_request(struct iscsit_conn *,
struct iscsi_login *);
-extern int iscsi_target_get_initial_payload(struct iscsit_conn *,
- struct iscsi_login *);
extern int iscsi_target_locate_portal(struct iscsi_np *, struct iscsit_conn *,
struct iscsi_login *);
extern int iscsi_target_start_negotiation(
diff --git a/drivers/target/iscsi/iscsi_target_tpg.h b/drivers/target/iscsi/iscsi_target_tpg.h
index 71d067f62177..d44d09f2dde9 100644
--- a/drivers/target/iscsi/iscsi_target_tpg.h
+++ b/drivers/target/iscsi/iscsi_target_tpg.h
@@ -24,12 +24,7 @@ extern int iscsit_tpg_del_portal_group(struct iscsi_tiqn *, struct iscsi_portal_
int);
extern int iscsit_tpg_enable_portal_group(struct iscsi_portal_group *);
extern int iscsit_tpg_disable_portal_group(struct iscsi_portal_group *, int);
-extern struct iscsi_node_acl *iscsit_tpg_add_initiator_node_acl(
- struct iscsi_portal_group *, const char *, u32);
-extern void iscsit_tpg_del_initiator_node_acl(struct iscsi_portal_group *,
- struct se_node_acl *);
extern struct iscsi_node_attrib *iscsit_tpg_get_node_attrib(struct iscsit_session *);
-extern void iscsit_tpg_del_external_nps(struct iscsi_tpg_np *);
extern struct iscsi_tpg_np *iscsit_tpg_locate_child_np(struct iscsi_tpg_np *, int);
extern struct iscsi_tpg_np *iscsit_tpg_add_network_portal(struct iscsi_portal_group *,
struct sockaddr_storage *, struct iscsi_tpg_np *,
diff --git a/drivers/target/iscsi/iscsi_target_util.h b/drivers/target/iscsi/iscsi_target_util.h
index 24b8e577575a..336da4fb0a77 100644
--- a/drivers/target/iscsi/iscsi_target_util.h
+++ b/drivers/target/iscsi/iscsi_target_util.h
@@ -17,7 +17,6 @@ extern struct iscsi_r2t *iscsit_get_r2t_for_eos(struct iscsit_cmd *, u32, u32);
extern struct iscsi_r2t *iscsit_get_r2t_from_list(struct iscsit_cmd *);
extern void iscsit_free_r2t(struct iscsi_r2t *, struct iscsit_cmd *);
extern void iscsit_free_r2ts_from_list(struct iscsit_cmd *);
-extern struct iscsit_cmd *iscsit_alloc_cmd(struct iscsit_conn *, gfp_t);
extern struct iscsit_cmd *iscsit_allocate_cmd(struct iscsit_conn *, int);
extern struct iscsi_seq *iscsit_get_seq_holder_for_datain(struct iscsit_cmd *, u32);
extern struct iscsi_seq *iscsit_get_seq_holder_for_r2t(struct iscsit_cmd *);
@@ -34,7 +33,6 @@ extern void iscsit_add_cmd_to_immediate_queue(struct iscsit_cmd *, struct iscsit
extern struct iscsi_queue_req *iscsit_get_cmd_from_immediate_queue(struct iscsit_conn *);
extern int iscsit_add_cmd_to_response_queue(struct iscsit_cmd *, struct iscsit_conn *, u8);
extern struct iscsi_queue_req *iscsit_get_cmd_from_response_queue(struct iscsit_conn *);
-extern void iscsit_remove_cmd_from_tx_queues(struct iscsit_cmd *, struct iscsit_conn *);
extern bool iscsit_conn_all_queues_empty(struct iscsit_conn *);
extern void iscsit_free_queue_reqs_for_conn(struct iscsit_conn *);
extern void iscsit_release_cmd(struct iscsit_cmd *);
@@ -64,9 +62,6 @@ extern int iscsit_send_tx_data(struct iscsit_cmd *, struct iscsit_conn *, int);
extern int iscsit_fe_sendpage_sg(struct iscsit_cmd *, struct iscsit_conn *);
extern int iscsit_tx_login_rsp(struct iscsit_conn *, u8, u8);
extern void iscsit_print_session_params(struct iscsit_session *);
-extern int iscsit_print_dev_to_proc(char *, char **, off_t, int);
-extern int iscsit_print_sessions_to_proc(char *, char **, off_t, int);
-extern int iscsit_print_tpg_to_proc(char *, char **, off_t, int);
extern int rx_data(struct iscsit_conn *, struct kvec *, int, int);
extern int tx_data(struct iscsit_conn *, struct kvec *, int, int);
extern void iscsit_collect_login_stats(struct iscsit_conn *, u8, u8);
diff --git a/drivers/thermal/intel/int340x_thermal/acpi_thermal_rel.c b/drivers/thermal/intel/int340x_thermal/acpi_thermal_rel.c
index 4b4a4d63e61f..cb149bcdd7d5 100644
--- a/drivers/thermal/intel/int340x_thermal/acpi_thermal_rel.c
+++ b/drivers/thermal/intel/int340x_thermal/acpi_thermal_rel.c
@@ -564,7 +564,6 @@ static const struct file_operations acpi_thermal_rel_fops = {
.open = acpi_thermal_rel_open,
.release = acpi_thermal_rel_release,
.unlocked_ioctl = acpi_thermal_rel_ioctl,
- .llseek = no_llseek,
};
static struct miscdevice acpi_thermal_rel_misc_device = {
diff --git a/drivers/thunderbolt/acpi.c b/drivers/thunderbolt/acpi.c
index c9b6bb46111c..d2a0054217da 100644
--- a/drivers/thunderbolt/acpi.c
+++ b/drivers/thunderbolt/acpi.c
@@ -32,40 +32,20 @@ static acpi_status tb_acpi_add_link(acpi_handle handle, u32 level, void *data,
goto out_put;
/*
- * Try to find physical device walking upwards to the hierarcy.
- * We need to do this because the xHCI driver might not yet be
- * bound so the USB3 SuperSpeed ports are not yet created.
+ * Ignore USB3 ports here as USB core will set up device links between
+ * tunneled USB3 devices and NHI host during USB device creation.
+ * USB3 ports might not even have a physical device yet if xHCI driver
+ * isn't bound yet.
*/
- do {
- dev = acpi_get_first_physical_node(adev);
- if (dev)
- break;
-
- adev = acpi_dev_parent(adev);
- } while (adev);
-
- /*
- * Check that the device is PCIe. This is because USB3
- * SuperSpeed ports have this property and they are not power
- * managed with the xHCI and the SuperSpeed hub so we create the
- * link from xHCI instead.
- */
- while (dev && !dev_is_pci(dev))
- dev = dev->parent;
-
- if (!dev)
+ dev = acpi_get_first_physical_node(adev);
+ if (!dev || !dev_is_pci(dev))
goto out_put;
- /*
- * Check that this actually matches the type of device we
- * expect. It should either be xHCI or PCIe root/downstream
- * port.
- */
+ /* Check that this matches a PCIe root/downstream port. */
pdev = to_pci_dev(dev);
- if (pdev->class == PCI_CLASS_SERIAL_USB_XHCI ||
- (pci_is_pcie(pdev) &&
- (pci_pcie_type(pdev) == PCI_EXP_TYPE_ROOT_PORT ||
- pci_pcie_type(pdev) == PCI_EXP_TYPE_DOWNSTREAM))) {
+ if (pci_is_pcie(pdev) &&
+ (pci_pcie_type(pdev) == PCI_EXP_TYPE_ROOT_PORT ||
+ pci_pcie_type(pdev) == PCI_EXP_TYPE_DOWNSTREAM)) {
const struct device_link *link;
/*
diff --git a/drivers/thunderbolt/debugfs.c b/drivers/thunderbolt/debugfs.c
index 9ed4bb2e8d05..350310bd0fee 100644
--- a/drivers/thunderbolt/debugfs.c
+++ b/drivers/thunderbolt/debugfs.c
@@ -9,6 +9,7 @@
#include <linux/bitfield.h>
#include <linux/debugfs.h>
+#include <linux/delay.h>
#include <linux/pm_runtime.h>
#include <linux/uaccess.h>
@@ -34,6 +35,14 @@
#define COUNTER_SET_LEN 3
+/*
+ * USB4 spec doesn't specify dwell range, the range of 100 ms to 500 ms
+ * probed to give good results.
+ */
+#define MIN_DWELL_TIME 100 /* ms */
+#define MAX_DWELL_TIME 500 /* ms */
+#define DWELL_SAMPLE_INTERVAL 10
+
/* Sideband registers and their sizes as defined in the USB4 spec */
struct sb_reg {
unsigned int reg;
@@ -394,8 +403,15 @@ out:
* @ber_level: Current BER level contour value
* @voltage_steps: Number of mandatory voltage steps
* @max_voltage_offset: Maximum mandatory voltage offset (in mV)
+ * @voltage_steps_optional_range: Number of voltage steps for optional range
+ * @max_voltage_offset_optional_range: Maximum voltage offset for the optional
+ * range (in mV).
* @time_steps: Number of time margin steps
* @max_time_offset: Maximum time margin offset (in mUI)
+ * @voltage_time_offset: Offset for voltage / time for software margining
+ * @dwell_time: Dwell time for software margining (in ms)
+ * @error_counter: Error counter operation for software margining
+ * @optional_voltage_offset_range: Enable optional extended voltage range
* @software: %true if software margining is used instead of hardware
* @time: %true if time margining is used instead of voltage
* @right_high: %false if left/low margin test is performed, %true if
@@ -414,13 +430,37 @@ struct tb_margining {
unsigned int ber_level;
unsigned int voltage_steps;
unsigned int max_voltage_offset;
+ unsigned int voltage_steps_optional_range;
+ unsigned int max_voltage_offset_optional_range;
unsigned int time_steps;
unsigned int max_time_offset;
+ unsigned int voltage_time_offset;
+ unsigned int dwell_time;
+ enum usb4_margin_sw_error_counter error_counter;
+ bool optional_voltage_offset_range;
bool software;
bool time;
bool right_high;
};
+static int margining_modify_error_counter(struct tb_margining *margining,
+ u32 lanes, enum usb4_margin_sw_error_counter error_counter)
+{
+ struct usb4_port_margining_params params = { 0 };
+ struct tb_port *port = margining->port;
+ u32 result;
+
+ if (error_counter != USB4_MARGIN_SW_ERROR_COUNTER_CLEAR &&
+ error_counter != USB4_MARGIN_SW_ERROR_COUNTER_STOP)
+ return -EOPNOTSUPP;
+
+ params.error_counter = error_counter;
+ params.lanes = lanes;
+
+ return usb4_port_sw_margin(port, margining->target, margining->index,
+ &params, &result);
+}
+
static bool supports_software(const struct tb_margining *margining)
{
return margining->caps[0] & USB4_MARGIN_CAP_0_MODES_SW;
@@ -454,6 +494,12 @@ independent_time_margins(const struct tb_margining *margining)
return FIELD_GET(USB4_MARGIN_CAP_1_TIME_INDP_MASK, margining->caps[1]);
}
+static bool
+supports_optional_voltage_offset_range(const struct tb_margining *margining)
+{
+ return margining->caps[0] & USB4_MARGIN_CAP_0_OPT_VOLTAGE_SUPPORT;
+}
+
static ssize_t
margining_ber_level_write(struct file *file, const char __user *user_buf,
size_t count, loff_t *ppos)
@@ -553,6 +599,14 @@ static int margining_caps_show(struct seq_file *s, void *not_used)
margining->voltage_steps);
seq_printf(s, "# maximum voltage offset: %u mV\n",
margining->max_voltage_offset);
+ seq_printf(s, "# optional voltage offset range support: %s\n",
+ str_yes_no(supports_optional_voltage_offset_range(margining)));
+ if (supports_optional_voltage_offset_range(margining)) {
+ seq_printf(s, "# voltage margin steps, optional range: %u\n",
+ margining->voltage_steps_optional_range);
+ seq_printf(s, "# maximum voltage offset, optional range: %u mV\n",
+ margining->max_voltage_offset_optional_range);
+ }
switch (independent_voltage_margins(margining)) {
case USB4_MARGIN_CAP_0_VOLTAGE_MIN:
@@ -667,6 +721,198 @@ static int margining_lanes_show(struct seq_file *s, void *not_used)
}
DEBUGFS_ATTR_RW(margining_lanes);
+static ssize_t
+margining_voltage_time_offset_write(struct file *file,
+ const char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct seq_file *s = file->private_data;
+ struct tb_margining *margining = s->private;
+ struct tb *tb = margining->port->sw->tb;
+ unsigned int max_margin;
+ unsigned int val;
+ int ret;
+
+ ret = kstrtouint_from_user(user_buf, count, 10, &val);
+ if (ret)
+ return ret;
+
+ scoped_cond_guard(mutex_intr, return -ERESTARTSYS, &tb->lock) {
+ if (!margining->software)
+ return -EOPNOTSUPP;
+
+ if (margining->time)
+ max_margin = margining->time_steps;
+ else
+ if (margining->optional_voltage_offset_range)
+ max_margin = margining->voltage_steps_optional_range;
+ else
+ max_margin = margining->voltage_steps;
+
+ margining->voltage_time_offset = clamp(val, 0, max_margin);
+ }
+
+ return count;
+}
+
+static int margining_voltage_time_offset_show(struct seq_file *s,
+ void *not_used)
+{
+ const struct tb_margining *margining = s->private;
+ struct tb *tb = margining->port->sw->tb;
+
+ scoped_cond_guard(mutex_intr, return -ERESTARTSYS, &tb->lock) {
+ if (!margining->software)
+ return -EOPNOTSUPP;
+
+ seq_printf(s, "%d\n", margining->voltage_time_offset);
+ }
+
+ return 0;
+}
+DEBUGFS_ATTR_RW(margining_voltage_time_offset);
+
+static ssize_t
+margining_error_counter_write(struct file *file, const char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ enum usb4_margin_sw_error_counter error_counter;
+ struct seq_file *s = file->private_data;
+ struct tb_margining *margining = s->private;
+ struct tb *tb = margining->port->sw->tb;
+ char *buf;
+
+ buf = validate_and_copy_from_user(user_buf, &count);
+ if (IS_ERR(buf))
+ return PTR_ERR(buf);
+
+ buf[count - 1] = '\0';
+
+ if (!strcmp(buf, "nop"))
+ error_counter = USB4_MARGIN_SW_ERROR_COUNTER_NOP;
+ else if (!strcmp(buf, "clear"))
+ error_counter = USB4_MARGIN_SW_ERROR_COUNTER_CLEAR;
+ else if (!strcmp(buf, "start"))
+ error_counter = USB4_MARGIN_SW_ERROR_COUNTER_START;
+ else if (!strcmp(buf, "stop"))
+ error_counter = USB4_MARGIN_SW_ERROR_COUNTER_STOP;
+ else
+ return -EINVAL;
+
+ scoped_cond_guard(mutex_intr, return -ERESTARTSYS, &tb->lock) {
+ if (!margining->software)
+ return -EOPNOTSUPP;
+
+ margining->error_counter = error_counter;
+ }
+
+ return count;
+}
+
+static int margining_error_counter_show(struct seq_file *s, void *not_used)
+{
+ const struct tb_margining *margining = s->private;
+ struct tb *tb = margining->port->sw->tb;
+
+ scoped_cond_guard(mutex_intr, return -ERESTARTSYS, &tb->lock) {
+ if (!margining->software)
+ return -EOPNOTSUPP;
+
+ switch (margining->error_counter) {
+ case USB4_MARGIN_SW_ERROR_COUNTER_NOP:
+ seq_puts(s, "[nop] clear start stop\n");
+ break;
+ case USB4_MARGIN_SW_ERROR_COUNTER_CLEAR:
+ seq_puts(s, "nop [clear] start stop\n");
+ break;
+ case USB4_MARGIN_SW_ERROR_COUNTER_START:
+ seq_puts(s, "nop clear [start] stop\n");
+ break;
+ case USB4_MARGIN_SW_ERROR_COUNTER_STOP:
+ seq_puts(s, "nop clear start [stop]\n");
+ break;
+ }
+ }
+
+ return 0;
+}
+DEBUGFS_ATTR_RW(margining_error_counter);
+
+static ssize_t
+margining_dwell_time_write(struct file *file, const char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct seq_file *s = file->private_data;
+ struct tb_margining *margining = s->private;
+ struct tb *tb = margining->port->sw->tb;
+ unsigned int val;
+ int ret;
+
+ ret = kstrtouint_from_user(user_buf, count, 10, &val);
+ if (ret)
+ return ret;
+
+ scoped_cond_guard(mutex_intr, return -ERESTARTSYS, &tb->lock) {
+ if (!margining->software)
+ return -EOPNOTSUPP;
+
+ margining->dwell_time = clamp(val, MIN_DWELL_TIME, MAX_DWELL_TIME);
+ }
+
+ return count;
+}
+
+static int margining_dwell_time_show(struct seq_file *s, void *not_used)
+{
+ struct tb_margining *margining = s->private;
+ struct tb *tb = margining->port->sw->tb;
+
+ scoped_cond_guard(mutex_intr, return -ERESTARTSYS, &tb->lock) {
+ if (!margining->software)
+ return -EOPNOTSUPP;
+
+ seq_printf(s, "%d\n", margining->dwell_time);
+ }
+
+ return 0;
+}
+DEBUGFS_ATTR_RW(margining_dwell_time);
+
+static ssize_t
+margining_optional_voltage_offset_write(struct file *file, const char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct seq_file *s = file->private_data;
+ struct tb_margining *margining = s->private;
+ struct tb *tb = margining->port->sw->tb;
+ bool val;
+ int ret;
+
+ ret = kstrtobool_from_user(user_buf, count, &val);
+ if (ret)
+ return ret;
+
+ scoped_cond_guard(mutex_intr, return -ERESTARTSYS, &tb->lock) {
+ margining->optional_voltage_offset_range = val;
+ }
+
+ return count;
+}
+
+static int margining_optional_voltage_offset_show(struct seq_file *s,
+ void *not_used)
+{
+ struct tb_margining *margining = s->private;
+ struct tb *tb = margining->port->sw->tb;
+
+ scoped_cond_guard(mutex_intr, return -ERESTARTSYS, &tb->lock) {
+ seq_printf(s, "%u\n", margining->optional_voltage_offset_range);
+ }
+
+ return 0;
+}
+DEBUGFS_ATTR_RW(margining_optional_voltage_offset);
+
static ssize_t margining_mode_write(struct file *file,
const char __user *user_buf,
size_t count, loff_t *ppos)
@@ -739,6 +985,51 @@ static int margining_mode_show(struct seq_file *s, void *not_used)
}
DEBUGFS_ATTR_RW(margining_mode);
+static int margining_run_sw(struct tb_margining *margining,
+ struct usb4_port_margining_params *params)
+{
+ u32 nsamples = margining->dwell_time / DWELL_SAMPLE_INTERVAL;
+ int ret, i;
+
+ ret = usb4_port_sw_margin(margining->port, margining->target, margining->index,
+ params, margining->results);
+ if (ret)
+ goto out_stop;
+
+ for (i = 0; i <= nsamples; i++) {
+ u32 errors = 0;
+
+ ret = usb4_port_sw_margin_errors(margining->port, margining->target,
+ margining->index, &margining->results[1]);
+ if (ret)
+ break;
+
+ if (margining->lanes == USB4_MARGIN_SW_LANE_0)
+ errors = FIELD_GET(USB4_MARGIN_SW_ERR_COUNTER_LANE_0_MASK,
+ margining->results[1]);
+ else if (margining->lanes == USB4_MARGIN_SW_LANE_1)
+ errors = FIELD_GET(USB4_MARGIN_SW_ERR_COUNTER_LANE_1_MASK,
+ margining->results[1]);
+ else if (margining->lanes == USB4_MARGIN_SW_ALL_LANES)
+ errors = margining->results[1];
+
+ /* Any errors stop the test */
+ if (errors)
+ break;
+
+ fsleep(DWELL_SAMPLE_INTERVAL * USEC_PER_MSEC);
+ }
+
+out_stop:
+ /*
+ * Stop the counters but don't clear them to allow the
+ * different error counter configurations.
+ */
+ margining_modify_error_counter(margining, margining->lanes,
+ USB4_MARGIN_SW_ERROR_COUNTER_STOP);
+ return ret;
+}
+
static int margining_run_write(void *data, u64 val)
{
struct tb_margining *margining = data;
@@ -779,36 +1070,43 @@ static int margining_run_write(void *data, u64 val)
clx = ret;
}
+ /* Clear the results */
+ memset(margining->results, 0, sizeof(margining->results));
+
if (margining->software) {
+ struct usb4_port_margining_params params = {
+ .error_counter = USB4_MARGIN_SW_ERROR_COUNTER_CLEAR,
+ .lanes = margining->lanes,
+ .time = margining->time,
+ .voltage_time_offset = margining->voltage_time_offset,
+ .right_high = margining->right_high,
+ .optional_voltage_offset_range = margining->optional_voltage_offset_range,
+ };
+
tb_port_dbg(port,
"running software %s lane margining for %s lanes %u\n",
margining->time ? "time" : "voltage", dev_name(dev),
margining->lanes);
- ret = usb4_port_sw_margin(port, margining->target, margining->index,
- margining->lanes, margining->time,
- margining->right_high,
- USB4_MARGIN_SW_COUNTER_CLEAR);
- if (ret)
- goto out_clx;
- ret = usb4_port_sw_margin_errors(port, margining->target,
- margining->index,
- &margining->results[0]);
+ ret = margining_run_sw(margining, &params);
} else {
+ struct usb4_port_margining_params params = {
+ .ber_level = margining->ber_level,
+ .lanes = margining->lanes,
+ .time = margining->time,
+ .right_high = margining->right_high,
+ .optional_voltage_offset_range = margining->optional_voltage_offset_range,
+ };
+
tb_port_dbg(port,
"running hardware %s lane margining for %s lanes %u\n",
margining->time ? "time" : "voltage", dev_name(dev),
margining->lanes);
- /* Clear the results */
- margining->results[0] = 0;
- margining->results[1] = 0;
- ret = usb4_port_hw_margin(port, margining->target, margining->index,
- margining->lanes, margining->ber_level,
- margining->time, margining->right_high,
+
+ ret = usb4_port_hw_margin(port, margining->target, margining->index, &params,
margining->results);
}
-out_clx:
if (down_sw)
tb_switch_clx_enable(down_sw, clx);
out_unlock:
@@ -837,6 +1135,13 @@ static ssize_t margining_results_write(struct file *file,
margining->results[0] = 0;
margining->results[1] = 0;
+ if (margining->software) {
+ /* Clear the error counters */
+ margining_modify_error_counter(margining,
+ USB4_MARGIN_SW_ALL_LANES,
+ USB4_MARGIN_SW_ERROR_COUNTER_CLEAR);
+ }
+
mutex_unlock(&tb->lock);
return count;
}
@@ -852,6 +1157,8 @@ static void voltage_margin_show(struct seq_file *s,
if (val & USB4_MARGIN_HW_RES_1_EXCEEDS)
seq_puts(s, " exceeds maximum");
seq_puts(s, "\n");
+ if (margining->optional_voltage_offset_range)
+ seq_puts(s, " optional voltage offset range enabled\n");
}
static void time_margin_show(struct seq_file *s,
@@ -924,6 +1231,24 @@ static int margining_results_show(struct seq_file *s, void *not_used)
voltage_margin_show(s, margining, val);
}
}
+ } else {
+ u32 lane_errors, result;
+
+ seq_printf(s, "0x%08x\n", margining->results[1]);
+ result = FIELD_GET(USB4_MARGIN_SW_LANES_MASK, margining->results[0]);
+
+ if (result == USB4_MARGIN_SW_LANE_0 ||
+ result == USB4_MARGIN_SW_ALL_LANES) {
+ lane_errors = FIELD_GET(USB4_MARGIN_SW_ERR_COUNTER_LANE_0_MASK,
+ margining->results[1]);
+ seq_printf(s, "# lane 0 errors: %u\n", lane_errors);
+ }
+ if (result == USB4_MARGIN_SW_LANE_1 ||
+ result == USB4_MARGIN_SW_ALL_LANES) {
+ lane_errors = FIELD_GET(USB4_MARGIN_SW_ERR_COUNTER_LANE_1_MASK,
+ margining->results[1]);
+ seq_printf(s, "# lane 1 errors: %u\n", lane_errors);
+ }
}
mutex_unlock(&tb->lock);
@@ -1091,6 +1416,15 @@ static struct tb_margining *margining_alloc(struct tb_port *port,
val = FIELD_GET(USB4_MARGIN_CAP_0_MAX_VOLTAGE_OFFSET_MASK, margining->caps[0]);
margining->max_voltage_offset = 74 + val * 2;
+ if (supports_optional_voltage_offset_range(margining)) {
+ val = FIELD_GET(USB4_MARGIN_CAP_0_VOLT_STEPS_OPT_MASK,
+ margining->caps[0]);
+ margining->voltage_steps_optional_range = val;
+ val = FIELD_GET(USB4_MARGIN_CAP_1_MAX_VOLT_OFS_OPT_MASK,
+ margining->caps[1]);
+ margining->max_voltage_offset_optional_range = 74 + val * 2;
+ }
+
if (supports_time(margining)) {
val = FIELD_GET(USB4_MARGIN_CAP_1_TIME_STEPS_MASK, margining->caps[1]);
margining->time_steps = val;
@@ -1127,6 +1461,22 @@ static struct tb_margining *margining_alloc(struct tb_port *port,
independent_time_margins(margining) == USB4_MARGIN_CAP_1_TIME_LR))
debugfs_create_file("margin", 0600, dir, margining,
&margining_margin_fops);
+
+ margining->error_counter = USB4_MARGIN_SW_ERROR_COUNTER_CLEAR;
+ margining->dwell_time = MIN_DWELL_TIME;
+
+ if (supports_optional_voltage_offset_range(margining))
+ debugfs_create_file("optional_voltage_offset", DEBUGFS_MODE, dir, margining,
+ &margining_optional_voltage_offset_fops);
+
+ if (supports_software(margining)) {
+ debugfs_create_file("voltage_time_offset", DEBUGFS_MODE, dir, margining,
+ &margining_voltage_time_offset_fops);
+ debugfs_create_file("error_counter", DEBUGFS_MODE, dir, margining,
+ &margining_error_counter_fops);
+ debugfs_create_file("dwell_time", DEBUGFS_MODE, dir, margining,
+ &margining_dwell_time_fops);
+ }
return margining;
}
diff --git a/drivers/thunderbolt/sb_regs.h b/drivers/thunderbolt/sb_regs.h
index 2a88edfc97b2..dbcad25ead50 100644
--- a/drivers/thunderbolt/sb_regs.h
+++ b/drivers/thunderbolt/sb_regs.h
@@ -57,6 +57,9 @@ enum usb4_sb_opcode {
#define USB4_MARGIN_CAP_0_TIME BIT(5)
#define USB4_MARGIN_CAP_0_VOLTAGE_STEPS_MASK GENMASK(12, 6)
#define USB4_MARGIN_CAP_0_MAX_VOLTAGE_OFFSET_MASK GENMASK(18, 13)
+#define USB4_MARGIN_CAP_0_OPT_VOLTAGE_SUPPORT BIT(19)
+#define USB4_MARGIN_CAP_0_VOLT_STEPS_OPT_MASK GENMASK(26, 20)
+#define USB4_MARGIN_CAP_1_MAX_VOLT_OFS_OPT_MASK GENMASK(7, 0)
#define USB4_MARGIN_CAP_1_TIME_DESTR BIT(8)
#define USB4_MARGIN_CAP_1_TIME_INDP_MASK GENMASK(10, 9)
#define USB4_MARGIN_CAP_1_TIME_MIN 0x0
@@ -72,6 +75,7 @@ enum usb4_sb_opcode {
#define USB4_MARGIN_HW_RH BIT(4)
#define USB4_MARGIN_HW_BER_MASK GENMASK(9, 5)
#define USB4_MARGIN_HW_BER_SHIFT 5
+#define USB4_MARGIN_HW_OPT_VOLTAGE BIT(10)
/* Applicable to all margin values */
#define USB4_MARGIN_HW_RES_1_MARGIN_MASK GENMASK(6, 0)
@@ -82,13 +86,17 @@ enum usb4_sb_opcode {
#define USB4_MARGIN_HW_RES_1_L1_LL_MARGIN_SHIFT 24
/* USB4_SB_OPCODE_RUN_SW_LANE_MARGINING */
+#define USB4_MARGIN_SW_LANES_MASK GENMASK(2, 0)
+#define USB4_MARGIN_SW_LANE_0 0x0
+#define USB4_MARGIN_SW_LANE_1 0x1
+#define USB4_MARGIN_SW_ALL_LANES 0x7
#define USB4_MARGIN_SW_TIME BIT(3)
#define USB4_MARGIN_SW_RH BIT(4)
+#define USB4_MARGIN_SW_OPT_VOLTAGE BIT(5)
+#define USB4_MARGIN_SW_VT_MASK GENMASK(12, 6)
#define USB4_MARGIN_SW_COUNTER_MASK GENMASK(14, 13)
-#define USB4_MARGIN_SW_COUNTER_SHIFT 13
-#define USB4_MARGIN_SW_COUNTER_NOP 0x0
-#define USB4_MARGIN_SW_COUNTER_CLEAR 0x1
-#define USB4_MARGIN_SW_COUNTER_START 0x2
-#define USB4_MARGIN_SW_COUNTER_STOP 0x3
+
+#define USB4_MARGIN_SW_ERR_COUNTER_LANE_0_MASK GENMASK(3, 0)
+#define USB4_MARGIN_SW_ERR_COUNTER_LANE_1_MASK GENMASK(7, 4)
#endif
diff --git a/drivers/thunderbolt/tb.h b/drivers/thunderbolt/tb.h
index b47f7873c847..6737188f2581 100644
--- a/drivers/thunderbolt/tb.h
+++ b/drivers/thunderbolt/tb.h
@@ -1353,14 +1353,48 @@ int usb4_port_sb_read(struct tb_port *port, enum usb4_sb_target target, u8 index
int usb4_port_sb_write(struct tb_port *port, enum usb4_sb_target target,
u8 index, u8 reg, const void *buf, u8 size);
+/**
+ * enum usb4_margin_sw_error_counter - Software margining error counter operation
+ * @USB4_MARGIN_SW_ERROR_COUNTER_NOP: No change in counter setup
+ * @USB4_MARGIN_SW_ERROR_COUNTER_CLEAR: Set the error counter to 0, enable counter
+ * @USB4_MARGIN_SW_ERROR_COUNTER_START: Start counter, count from last value
+ * @USB4_MARGIN_SW_ERROR_COUNTER_STOP: Stop counter, do not clear value
+ */
+enum usb4_margin_sw_error_counter {
+ USB4_MARGIN_SW_ERROR_COUNTER_NOP,
+ USB4_MARGIN_SW_ERROR_COUNTER_CLEAR,
+ USB4_MARGIN_SW_ERROR_COUNTER_START,
+ USB4_MARGIN_SW_ERROR_COUNTER_STOP,
+};
+
+/**
+ * struct usb4_port_margining_params - USB4 margining parameters
+ * @error_counter: Error counter operation for software margining
+ * @ber_level: Current BER level contour value
+ * @lanes: %0, %1 or %7 (all)
+ * @voltage_time_offset: Offset for voltage / time for software margining
+ * @optional_voltage_offset_range: Enable optional extended voltage range
+ * @right_high: %false if left/low margin test is performed, %true if right/high
+ * @time: %true if time margining is used instead of voltage
+ */
+struct usb4_port_margining_params {
+ enum usb4_margin_sw_error_counter error_counter;
+ u32 ber_level;
+ u32 lanes;
+ u32 voltage_time_offset;
+ bool optional_voltage_offset_range;
+ bool right_high;
+ bool time;
+};
+
int usb4_port_margining_caps(struct tb_port *port, enum usb4_sb_target target,
u8 index, u32 *caps);
int usb4_port_hw_margin(struct tb_port *port, enum usb4_sb_target target,
- u8 index, unsigned int lanes, unsigned int ber_level,
- bool timing, bool right_high, u32 *results);
+ u8 index, const struct usb4_port_margining_params *params,
+ u32 *results);
int usb4_port_sw_margin(struct tb_port *port, enum usb4_sb_target target,
- u8 index, unsigned int lanes, bool timing,
- bool right_high, u32 counter);
+ u8 index, const struct usb4_port_margining_params *params,
+ u32 *results);
int usb4_port_sw_margin_errors(struct tb_port *port, enum usb4_sb_target target,
u8 index, u32 *errors);
diff --git a/drivers/thunderbolt/usb4.c b/drivers/thunderbolt/usb4.c
index 4d83b65afb5b..0a9b4aeb3fa1 100644
--- a/drivers/thunderbolt/usb4.c
+++ b/drivers/thunderbolt/usb4.c
@@ -1653,31 +1653,31 @@ int usb4_port_margining_caps(struct tb_port *port, enum usb4_sb_target target,
* @port: USB4 port
* @target: Sideband target
* @index: Retimer index if taget is %USB4_SB_TARGET_RETIMER
- * @lanes: Which lanes to run (must match the port capabilities). Can be
- * %0, %1 or %7.
- * @ber_level: BER level contour value
- * @timing: Perform timing margining instead of voltage
- * @right_high: Use Right/high margin instead of left/low
+ * @params: Parameters for USB4 hardware margining
* @results: Array with at least two elements to hold the results
*
* Runs hardware lane margining on USB4 port and returns the result in
* @results.
*/
int usb4_port_hw_margin(struct tb_port *port, enum usb4_sb_target target,
- u8 index, unsigned int lanes, unsigned int ber_level,
- bool timing, bool right_high, u32 *results)
+ u8 index, const struct usb4_port_margining_params *params,
+ u32 *results)
{
u32 val;
int ret;
- val = lanes;
- if (timing)
+ if (WARN_ON_ONCE(!params))
+ return -EINVAL;
+
+ val = params->lanes;
+ if (params->time)
val |= USB4_MARGIN_HW_TIME;
- if (right_high)
+ if (params->right_high)
val |= USB4_MARGIN_HW_RH;
- if (ber_level)
- val |= (ber_level << USB4_MARGIN_HW_BER_SHIFT) &
- USB4_MARGIN_HW_BER_MASK;
+ if (params->ber_level)
+ val |= FIELD_PREP(USB4_MARGIN_HW_BER_MASK, params->ber_level);
+ if (params->optional_voltage_offset_range)
+ val |= USB4_MARGIN_HW_OPT_VOLTAGE;
ret = usb4_port_sb_write(port, target, index, USB4_SB_METADATA, &val,
sizeof(val));
@@ -1698,38 +1698,46 @@ int usb4_port_hw_margin(struct tb_port *port, enum usb4_sb_target target,
* @port: USB4 port
* @target: Sideband target
* @index: Retimer index if taget is %USB4_SB_TARGET_RETIMER
- * @lanes: Which lanes to run (must match the port capabilities). Can be
- * %0, %1 or %7.
- * @timing: Perform timing margining instead of voltage
- * @right_high: Use Right/high margin instead of left/low
- * @counter: What to do with the error counter
+ * @params: Parameters for USB4 software margining
+ * @results: Data word for the operation completion data
*
* Runs software lane margining on USB4 port. Read back the error
* counters by calling usb4_port_sw_margin_errors(). Returns %0 in
* success and negative errno otherwise.
*/
int usb4_port_sw_margin(struct tb_port *port, enum usb4_sb_target target,
- u8 index, unsigned int lanes, bool timing,
- bool right_high, u32 counter)
+ u8 index, const struct usb4_port_margining_params *params,
+ u32 *results)
{
u32 val;
int ret;
- val = lanes;
- if (timing)
+ if (WARN_ON_ONCE(!params))
+ return -EINVAL;
+
+ val = params->lanes;
+ if (params->time)
val |= USB4_MARGIN_SW_TIME;
- if (right_high)
+ if (params->optional_voltage_offset_range)
+ val |= USB4_MARGIN_SW_OPT_VOLTAGE;
+ if (params->right_high)
val |= USB4_MARGIN_SW_RH;
- val |= (counter << USB4_MARGIN_SW_COUNTER_SHIFT) &
- USB4_MARGIN_SW_COUNTER_MASK;
+ val |= FIELD_PREP(USB4_MARGIN_SW_COUNTER_MASK, params->error_counter);
+ val |= FIELD_PREP(USB4_MARGIN_SW_VT_MASK, params->voltage_time_offset);
ret = usb4_port_sb_write(port, target, index, USB4_SB_METADATA, &val,
sizeof(val));
if (ret)
return ret;
- return usb4_port_sb_op(port, target, index,
- USB4_SB_OPCODE_RUN_SW_LANE_MARGINING, 2500);
+ ret = usb4_port_sb_op(port, target, index,
+ USB4_SB_OPCODE_RUN_SW_LANE_MARGINING, 2500);
+ if (ret)
+ return ret;
+
+ return usb4_port_sb_read(port, target, index, USB4_SB_DATA, results,
+ sizeof(*results));
+
}
/**
diff --git a/drivers/tty/hvc/hvsi_lib.c b/drivers/tty/hvc/hvsi_lib.c
index 22e1bc4d8a66..b35c44caf3d7 100644
--- a/drivers/tty/hvc/hvsi_lib.c
+++ b/drivers/tty/hvc/hvsi_lib.c
@@ -303,7 +303,7 @@ int hvsilib_write_mctrl(struct hvsi_priv *pv, int dtr)
pr_devel("HVSI@%x: %s DTR...\n", pv->termno,
dtr ? "Setting" : "Clearing");
- ctrl.hdr.type = VS_CONTROL_PACKET_HEADER,
+ ctrl.hdr.type = VS_CONTROL_PACKET_HEADER;
ctrl.hdr.len = sizeof(struct hvsi_control);
ctrl.verb = cpu_to_be16(VSV_SET_MODEM_CTL);
ctrl.mask = cpu_to_be32(HVSI_TSDTR);
diff --git a/drivers/tty/mxser.c b/drivers/tty/mxser.c
index 5b97e420a95f..4d45eca4929a 100644
--- a/drivers/tty/mxser.c
+++ b/drivers/tty/mxser.c
@@ -208,9 +208,6 @@ static const struct {
};
#define UART_INFO_NUM ARRAY_SIZE(Gpci_uart_info)
-
-/* driver_data correspond to the lines in the structure above
- see also ISA probe function before you change something */
static const struct pci_device_id mxser_pcibrds[] = {
{ PCI_DEVICE_DATA(MOXA, C168, 8) },
{ PCI_DEVICE_DATA(MOXA, C104, 4) },
@@ -986,7 +983,7 @@ static int mxser_get_serial_info(struct tty_struct *tty,
ss->baud_base = MXSER_BAUD_BASE;
ss->close_delay = close_delay;
ss->closing_wait = closing_wait;
- ss->custom_divisor = MXSER_CUSTOM_DIVISOR,
+ ss->custom_divisor = MXSER_CUSTOM_DIVISOR;
mutex_unlock(&port->mutex);
return 0;
}
@@ -1773,8 +1770,6 @@ static void mxser_initbrd(struct mxser_board *brd, bool high_baud)
mxser_process_txrx_fifo(info);
- info->port.close_delay = 5 * HZ / 10;
- info->port.closing_wait = 30 * HZ;
spin_lock_init(&info->slock);
/* before set INT ISR, disable all int */
diff --git a/drivers/tty/serdev/core.c b/drivers/tty/serdev/core.c
index 8913cdd675f6..ebf0bbc2cff2 100644
--- a/drivers/tty/serdev/core.c
+++ b/drivers/tty/serdev/core.c
@@ -529,7 +529,7 @@ static int of_serdev_register_devices(struct serdev_controller *ctrl)
bool found = false;
for_each_available_child_of_node(ctrl->dev.of_node, node) {
- if (!of_get_property(node, "compatible", NULL))
+ if (!of_property_present(node, "compatible"))
continue;
dev_dbg(&ctrl->dev, "adding child %pOF\n", node);
diff --git a/drivers/tty/serial/8250/8250_aspeed_vuart.c b/drivers/tty/serial/8250/8250_aspeed_vuart.c
index 53d8eee9b1c8..25c201cfb91e 100644
--- a/drivers/tty/serial/8250/8250_aspeed_vuart.c
+++ b/drivers/tty/serial/8250/8250_aspeed_vuart.c
@@ -561,6 +561,7 @@ static const struct of_device_id aspeed_vuart_table[] = {
{ .compatible = "aspeed,ast2500-vuart" },
{ },
};
+MODULE_DEVICE_TABLE(of, aspeed_vuart_table);
static struct platform_driver aspeed_vuart_driver = {
.driver = {
diff --git a/drivers/tty/serial/8250/8250_bcm2835aux.c b/drivers/tty/serial/8250/8250_bcm2835aux.c
index 121a5ce86050..d7a0f271263a 100644
--- a/drivers/tty/serial/8250/8250_bcm2835aux.c
+++ b/drivers/tty/serial/8250/8250_bcm2835aux.c
@@ -13,6 +13,7 @@
*/
#include <linux/clk.h>
+#include <linux/console.h>
#include <linux/io.h>
#include <linux/module.h>
#include <linux/of.h>
@@ -213,11 +214,57 @@ static const struct acpi_device_id bcm2835aux_serial_acpi_match[] = {
};
MODULE_DEVICE_TABLE(acpi, bcm2835aux_serial_acpi_match);
+static bool bcm2835aux_can_disable_clock(struct device *dev)
+{
+ struct bcm2835aux_data *data = dev_get_drvdata(dev);
+ struct uart_8250_port *up = serial8250_get_port(data->line);
+
+ if (device_may_wakeup(dev))
+ return false;
+
+ if (uart_console(&up->port) && !console_suspend_enabled)
+ return false;
+
+ return true;
+}
+
+static int bcm2835aux_suspend(struct device *dev)
+{
+ struct bcm2835aux_data *data = dev_get_drvdata(dev);
+
+ serial8250_suspend_port(data->line);
+
+ if (!bcm2835aux_can_disable_clock(dev))
+ return 0;
+
+ clk_disable_unprepare(data->clk);
+ return 0;
+}
+
+static int bcm2835aux_resume(struct device *dev)
+{
+ struct bcm2835aux_data *data = dev_get_drvdata(dev);
+ int ret;
+
+ if (bcm2835aux_can_disable_clock(dev)) {
+ ret = clk_prepare_enable(data->clk);
+ if (ret)
+ return ret;
+ }
+
+ serial8250_resume_port(data->line);
+
+ return 0;
+}
+
+static DEFINE_SIMPLE_DEV_PM_OPS(bcm2835aux_dev_pm_ops, bcm2835aux_suspend, bcm2835aux_resume);
+
static struct platform_driver bcm2835aux_serial_driver = {
.driver = {
.name = "bcm2835-aux-uart",
.of_match_table = bcm2835aux_serial_match,
.acpi_match_table = bcm2835aux_serial_acpi_match,
+ .pm = pm_ptr(&bcm2835aux_dev_pm_ops),
},
.probe = bcm2835aux_serial_probe,
.remove_new = bcm2835aux_serial_remove,
diff --git a/drivers/tty/serial/8250/8250_dma.c b/drivers/tty/serial/8250/8250_dma.c
index 8a353e3cc3dd..d215c494ee24 100644
--- a/drivers/tty/serial/8250/8250_dma.c
+++ b/drivers/tty/serial/8250/8250_dma.c
@@ -89,7 +89,9 @@ int serial8250_tx_dma(struct uart_8250_port *p)
struct tty_port *tport = &p->port.state->port;
struct dma_async_tx_descriptor *desc;
struct uart_port *up = &p->port;
- struct scatterlist sg;
+ struct scatterlist *sg;
+ struct scatterlist sgl[2];
+ int i;
int ret;
if (dma->tx_running) {
@@ -110,18 +112,17 @@ int serial8250_tx_dma(struct uart_8250_port *p)
serial8250_do_prepare_tx_dma(p);
- sg_init_table(&sg, 1);
- /* kfifo can do more than one sg, we don't (quite yet) */
- ret = kfifo_dma_out_prepare_mapped(&tport->xmit_fifo, &sg, 1,
+ sg_init_table(sgl, ARRAY_SIZE(sgl));
+
+ ret = kfifo_dma_out_prepare_mapped(&tport->xmit_fifo, sgl, ARRAY_SIZE(sgl),
UART_XMIT_SIZE, dma->tx_addr);
- /* we already checked empty fifo above, so there should be something */
- if (WARN_ON_ONCE(ret != 1))
- return 0;
+ dma->tx_size = 0;
- dma->tx_size = sg_dma_len(&sg);
+ for_each_sg(sgl, sg, ret, i)
+ dma->tx_size += sg_dma_len(sg);
- desc = dmaengine_prep_slave_sg(dma->txchan, &sg, 1,
+ desc = dmaengine_prep_slave_sg(dma->txchan, sgl, ret,
DMA_MEM_TO_DEV,
DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
if (!desc) {
diff --git a/drivers/tty/serial/8250/8250_dwlib.c b/drivers/tty/serial/8250/8250_dwlib.c
index 5a2520943dfd..b055d89cfb39 100644
--- a/drivers/tty/serial/8250/8250_dwlib.c
+++ b/drivers/tty/serial/8250/8250_dwlib.c
@@ -89,7 +89,7 @@ static void dw8250_set_divisor(struct uart_port *p, unsigned int baud,
unsigned int quot, unsigned int quot_frac)
{
dw8250_writel_ext(p, DW_UART_DLF, quot_frac);
- serial8250_do_set_divisor(p, baud, quot, quot_frac);
+ serial8250_do_set_divisor(p, baud, quot);
}
void dw8250_do_set_termios(struct uart_port *p, struct ktermios *termios,
diff --git a/drivers/tty/serial/8250/8250_early.c b/drivers/tty/serial/8250/8250_early.c
index e3f482fd3de4..6176083d0341 100644
--- a/drivers/tty/serial/8250/8250_early.c
+++ b/drivers/tty/serial/8250/8250_early.c
@@ -171,6 +171,17 @@ OF_EARLYCON_DECLARE(ns16550a, "ns16550a", early_serial8250_setup);
OF_EARLYCON_DECLARE(uart, "nvidia,tegra20-uart", early_serial8250_setup);
OF_EARLYCON_DECLARE(uart, "snps,dw-apb-uart", early_serial8250_setup);
+static int __init early_serial8250_rs2_setup(struct earlycon_device *device,
+ const char *options)
+{
+ device->port.regshift = 2;
+
+ return early_serial8250_setup(device, options);
+}
+OF_EARLYCON_DECLARE(uart, "intel,xscale-uart", early_serial8250_rs2_setup);
+OF_EARLYCON_DECLARE(uart, "mrvl,mmp-uart", early_serial8250_rs2_setup);
+OF_EARLYCON_DECLARE(uart, "mrvl,pxa-uart", early_serial8250_rs2_setup);
+
#ifdef CONFIG_SERIAL_8250_OMAP
static int __init early_omap8250_setup(struct earlycon_device *device,
diff --git a/drivers/tty/serial/8250/8250_exar.c b/drivers/tty/serial/8250/8250_exar.c
index 616128254bbd..b7a75db15249 100644
--- a/drivers/tty/serial/8250/8250_exar.c
+++ b/drivers/tty/serial/8250/8250_exar.c
@@ -500,7 +500,7 @@ static unsigned int xr17v35x_get_divisor(struct uart_port *p, unsigned int baud,
static void xr17v35x_set_divisor(struct uart_port *p, unsigned int baud,
unsigned int quot, unsigned int quot_frac)
{
- serial8250_do_set_divisor(p, baud, quot, quot_frac);
+ serial8250_do_set_divisor(p, baud, quot);
/* Preserve bits not related to baudrate; DLD[7:4]. */
quot_frac |= serial_port_in(p, 0x2) & 0xf0;
diff --git a/drivers/tty/serial/8250/8250_omap.c b/drivers/tty/serial/8250/8250_omap.c
index afef1dd4ddf4..88b58f44e4e9 100644
--- a/drivers/tty/serial/8250/8250_omap.c
+++ b/drivers/tty/serial/8250/8250_omap.c
@@ -137,7 +137,6 @@ struct omap8250_priv {
atomic_t active;
bool is_suspending;
int wakeirq;
- int wakeups_enabled;
u32 latency;
u32 calc_latency;
struct pm_qos_request pm_qos_request;
@@ -1523,7 +1522,10 @@ static int omap8250_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, priv);
- device_init_wakeup(&pdev->dev, true);
+ device_set_wakeup_capable(&pdev->dev, true);
+ if (of_property_read_bool(np, "wakeup-source"))
+ device_set_wakeup_enable(&pdev->dev, true);
+
pm_runtime_enable(&pdev->dev);
pm_runtime_use_autosuspend(&pdev->dev);
@@ -1581,7 +1583,7 @@ static int omap8250_probe(struct platform_device *pdev)
ret = devm_request_irq(&pdev->dev, up.port.irq, omap8250_irq, 0,
dev_name(&pdev->dev), priv);
if (ret < 0)
- return ret;
+ goto err;
priv->wakeirq = irq_of_parse_and_map(np, 1);
@@ -1622,7 +1624,7 @@ static void omap8250_remove(struct platform_device *pdev)
flush_work(&priv->qos_work);
pm_runtime_disable(&pdev->dev);
cpu_latency_qos_remove_request(&priv->pm_qos_request);
- device_init_wakeup(&pdev->dev, false);
+ device_set_wakeup_capable(&pdev->dev, false);
}
static int omap8250_prepare(struct device *dev)
diff --git a/drivers/tty/serial/8250/8250_pci.c b/drivers/tty/serial/8250/8250_pci.c
index e1d7aa2fa347..6709b6a5f301 100644
--- a/drivers/tty/serial/8250/8250_pci.c
+++ b/drivers/tty/serial/8250/8250_pci.c
@@ -1277,7 +1277,7 @@ static void pci_oxsemi_tornado_set_divisor(struct uart_port *port,
serial_icr_write(up, UART_TCR, tcr);
serial_icr_write(up, UART_CPR, cpr);
serial_icr_write(up, UART_CKS, cpr2);
- serial8250_do_set_divisor(port, baud, quot, 0);
+ serial8250_do_set_divisor(port, baud, quot);
}
/*
diff --git a/drivers/tty/serial/8250/8250_platform.c b/drivers/tty/serial/8250/8250_platform.c
index d5c8d851348d..be7ff07cbdd0 100644
--- a/drivers/tty/serial/8250/8250_platform.c
+++ b/drivers/tty/serial/8250/8250_platform.c
@@ -2,11 +2,15 @@
/*
* Universal/legacy platform driver for 8250/16550-type serial ports
*
- * Supports: ISA-compatible 8250/16550 ports
+ * Supports:
+ * ISA-compatible 8250/16550 ports
+ * ACPI 8250/16550 ports
* PNP 8250/16550 ports
* "serial8250" platform devices
*/
+#include <linux/acpi.h>
#include <linux/array_size.h>
+#include <linux/io.h>
#include <linux/module.h>
#include <linux/moduleparam.h>
#include <linux/once.h>
@@ -22,9 +26,9 @@
/*
* Configuration:
- * share_irqs Whether we pass IRQF_SHARED to request_irq().
+ * share_irqs: Whether we pass IRQF_SHARED to request_irq().
* This option is unsafe when used on edge-triggered interrupts.
- * skip_txen_test Force skip of txen test at init time.
+ * skip_txen_test: Force skip of txen test at init time.
*/
unsigned int share_irqs = SERIAL8250_SHARE_IRQS;
unsigned int skip_txen_test;
@@ -61,9 +65,9 @@ static void __init __serial8250_isa_init_ports(void)
nr_uarts = UART_NR;
/*
- * Set up initial isa ports based on nr_uart module param, or else
+ * Set up initial ISA ports based on nr_uart module param, or else
* default to CONFIG_SERIAL_8250_RUNTIME_UARTS. Note that we do not
- * need to increase nr_uarts when setting up the initial isa ports.
+ * need to increase nr_uarts when setting up the initial ISA ports.
*/
for (i = 0; i < nr_uarts; i++)
serial8250_setup_port(i);
@@ -101,13 +105,63 @@ void __init serial8250_isa_init_ports(void)
}
/*
- * Register a set of serial devices attached to a platform device. The
- * list is terminated with a zero flags entry, which means we expect
- * all entries to have at least UPF_BOOT_AUTOCONF set.
+ * Generic 16550A platform devices
*/
-static int serial8250_probe(struct platform_device *dev)
+static int serial8250_probe_acpi(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct uart_8250_port uart = { };
+ struct resource *regs;
+ unsigned char iotype;
+ int ret, line;
+
+ regs = platform_get_mem_or_io(pdev, 0);
+ if (!regs)
+ return dev_err_probe(dev, -EINVAL, "no registers defined\n");
+
+ switch (resource_type(regs)) {
+ case IORESOURCE_IO:
+ uart.port.iobase = regs->start;
+ iotype = UPIO_PORT;
+ break;
+ case IORESOURCE_MEM:
+ uart.port.mapbase = regs->start;
+ uart.port.mapsize = resource_size(regs);
+ uart.port.flags = UPF_IOREMAP;
+ iotype = UPIO_MEM;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ /* default clock frequency */
+ uart.port.uartclk = 1843200;
+ uart.port.type = PORT_16550A;
+ uart.port.dev = &pdev->dev;
+ uart.port.flags |= UPF_SKIP_TEST | UPF_BOOT_AUTOCONF;
+
+ ret = uart_read_and_validate_port_properties(&uart.port);
+ /* no interrupt -> fall back to polling */
+ if (ret == -ENXIO)
+ ret = 0;
+ if (ret)
+ return ret;
+
+ /*
+ * The previous call may not set iotype correctly when reg-io-width
+ * property is absent and it doesn't support IO port resource.
+ */
+ uart.port.iotype = iotype;
+
+ line = serial8250_register_8250_port(&uart);
+ if (line < 0)
+ return line;
+
+ return 0;
+}
+
+static int serial8250_probe_platform(struct platform_device *dev, struct plat_serial8250_port *p)
{
- struct plat_serial8250_port *p = dev_get_platdata(&dev->dev);
struct uart_8250_port uart;
int ret, i, irqflag = 0;
@@ -156,6 +210,31 @@ static int serial8250_probe(struct platform_device *dev)
}
/*
+ * Register a set of serial devices attached to a platform device.
+ * The list is terminated with a zero flags entry, which means we expect
+ * all entries to have at least UPF_BOOT_AUTOCONF set.
+ */
+static int serial8250_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct plat_serial8250_port *p;
+
+ p = dev_get_platdata(dev);
+ if (p)
+ return serial8250_probe_platform(pdev, p);
+
+ /*
+ * Probe platform UART devices defined using standard hardware
+ * discovery mechanism like ACPI or DT. Support only ACPI based
+ * serial device for now.
+ */
+ if (has_acpi_companion(dev))
+ return serial8250_probe_acpi(pdev);
+
+ return 0;
+}
+
+/*
* Remove serial ports registered against a platform device.
*/
static void serial8250_remove(struct platform_device *dev)
@@ -198,6 +277,12 @@ static int serial8250_resume(struct platform_device *dev)
return 0;
}
+static const struct acpi_device_id acpi_platform_serial_table[] = {
+ { "RSCV0003" }, /* RISC-V Generic 16550A UART */
+ { }
+};
+MODULE_DEVICE_TABLE(acpi, acpi_platform_serial_table);
+
static struct platform_driver serial8250_isa_driver = {
.probe = serial8250_probe,
.remove_new = serial8250_remove,
@@ -205,12 +290,13 @@ static struct platform_driver serial8250_isa_driver = {
.resume = serial8250_resume,
.driver = {
.name = "serial8250",
+ .acpi_match_table = acpi_platform_serial_table,
},
};
/*
* This "device" covers _all_ ISA 8250-compatible serial devices listed
- * in the table in include/asm/serial.h
+ * in the table in include/asm/serial.h.
*/
struct platform_device *serial8250_isa_devs;
@@ -239,8 +325,7 @@ static int __init serial8250_init(void)
if (ret)
goto unreg_uart_drv;
- serial8250_isa_devs = platform_device_alloc("serial8250",
- PLAT8250_DEV_LEGACY);
+ serial8250_isa_devs = platform_device_alloc("serial8250", PLAT8250_DEV_LEGACY);
if (!serial8250_isa_devs) {
ret = -ENOMEM;
goto unreg_pnp;
@@ -279,7 +364,7 @@ static void __exit serial8250_exit(void)
/*
* This tells serial8250_unregister_port() not to re-register
* the ports (thereby making serial8250_isa_driver permanently
- * in use.)
+ * in use).
*/
serial8250_isa_devs = NULL;
@@ -312,12 +397,13 @@ MODULE_ALIAS_CHARDEV_MAJOR(TTY_MAJOR);
#ifdef CONFIG_SERIAL_8250_DEPRECATED_OPTIONS
#ifndef MODULE
-/* This module was renamed to 8250_core in 3.7. Keep the old "8250" name
- * working as well for the module options so we don't break people. We
+/*
+ * This module was renamed to 8250_core in 3.7. Keep the old "8250" name
+ * working as well for the module options so we don't break people. We
* need to keep the names identical and the convenient macros will happily
* refuse to let us do that by failing the build with redefinition errors
- * of global variables. So we stick them inside a dummy function to avoid
- * those conflicts. The options still get parsed, and the redefined
+ * of global variables. So we stick them inside a dummy function to avoid
+ * those conflicts. The options still get parsed, and the redefined
* MODULE_PARAM_PREFIX lets us keep the "8250." syntax alive.
*
* This is hacky. I'm sorry.
diff --git a/drivers/tty/serial/8250/8250_port.c b/drivers/tty/serial/8250/8250_port.c
index 2786918aea98..3509af7dc52b 100644
--- a/drivers/tty/serial/8250/8250_port.c
+++ b/drivers/tty/serial/8250/8250_port.c
@@ -2609,7 +2609,7 @@ static unsigned char serial8250_compute_lcr(struct uart_8250_port *up,
}
void serial8250_do_set_divisor(struct uart_port *port, unsigned int baud,
- unsigned int quot, unsigned int quot_frac)
+ unsigned int quot)
{
struct uart_8250_port *up = up_to_u8250p(port);
@@ -2641,7 +2641,7 @@ static void serial8250_set_divisor(struct uart_port *port, unsigned int baud,
if (port->set_divisor)
port->set_divisor(port, baud, quot, quot_frac);
else
- serial8250_do_set_divisor(port, baud, quot, quot_frac);
+ serial8250_do_set_divisor(port, baud, quot);
}
static unsigned int serial8250_get_baud_rate(struct uart_port *port,
diff --git a/drivers/tty/serial/8250/8250_pxa.c b/drivers/tty/serial/8250/8250_pxa.c
index 1ac86b565374..96dd6126296c 100644
--- a/drivers/tty/serial/8250/8250_pxa.c
+++ b/drivers/tty/serial/8250/8250_pxa.c
@@ -165,22 +165,6 @@ static struct platform_driver serial_pxa_driver = {
module_platform_driver(serial_pxa_driver);
-#ifdef CONFIG_SERIAL_8250_CONSOLE
-static int __init early_serial_pxa_setup(struct earlycon_device *device,
- const char *options)
-{
- struct uart_port *port = &device->port;
-
- if (!(device->port.membase || device->port.iobase))
- return -ENODEV;
-
- port->regshift = 2;
- return early_serial8250_setup(device, NULL);
-}
-OF_EARLYCON_DECLARE(early_pxa, "mrvl,pxa-uart", early_serial_pxa_setup);
-OF_EARLYCON_DECLARE(mmp, "mrvl,mmp-uart", early_serial_pxa_setup);
-#endif
-
MODULE_AUTHOR("Sergei Ianovich");
MODULE_DESCRIPTION("driver for PXA on-board UARTS");
MODULE_LICENSE("GPL");
diff --git a/drivers/tty/serial/qcom_geni_serial.c b/drivers/tty/serial/qcom_geni_serial.c
index 69a632fefc41..6f0db310cf69 100644
--- a/drivers/tty/serial/qcom_geni_serial.c
+++ b/drivers/tty/serial/qcom_geni_serial.c
@@ -124,13 +124,14 @@ struct qcom_geni_serial_port {
dma_addr_t tx_dma_addr;
dma_addr_t rx_dma_addr;
bool setup;
- unsigned int baud;
+ unsigned long poll_timeout_us;
unsigned long clk_rate;
void *rx_buf;
u32 loopback;
bool brk;
unsigned int tx_remaining;
+ unsigned int tx_queued;
int wakeup_irq;
bool rx_tx_swap;
bool cts_rts_swap;
@@ -144,6 +145,9 @@ static const struct uart_ops qcom_geni_uart_pops;
static struct uart_driver qcom_geni_console_driver;
static struct uart_driver qcom_geni_uart_driver;
+static void __qcom_geni_serial_cancel_tx_cmd(struct uart_port *uport);
+static void qcom_geni_serial_cancel_tx_cmd(struct uart_port *uport);
+
static inline struct qcom_geni_serial_port *to_dev_port(struct uart_port *uport)
{
return container_of(uport, struct qcom_geni_serial_port, uport);
@@ -265,27 +269,18 @@ static bool qcom_geni_serial_secondary_active(struct uart_port *uport)
return readl(uport->membase + SE_GENI_STATUS) & S_GENI_CMD_ACTIVE;
}
-static bool qcom_geni_serial_poll_bit(struct uart_port *uport,
- int offset, int field, bool set)
+static bool qcom_geni_serial_poll_bitfield(struct uart_port *uport,
+ unsigned int offset, u32 field, u32 val)
{
u32 reg;
struct qcom_geni_serial_port *port;
- unsigned int baud;
- unsigned int fifo_bits;
unsigned long timeout_us = 20000;
struct qcom_geni_private_data *private_data = uport->private_data;
if (private_data->drv) {
port = to_dev_port(uport);
- baud = port->baud;
- if (!baud)
- baud = 115200;
- fifo_bits = port->tx_fifo_depth * port->tx_fifo_width;
- /*
- * Total polling iterations based on FIFO worth of bytes to be
- * sent at current baud. Add a little fluff to the wait.
- */
- timeout_us = ((fifo_bits * USEC_PER_SEC) / baud) + 500;
+ if (port->poll_timeout_us)
+ timeout_us = port->poll_timeout_us;
}
/*
@@ -295,7 +290,7 @@ static bool qcom_geni_serial_poll_bit(struct uart_port *uport,
timeout_us = DIV_ROUND_UP(timeout_us, 10) * 10;
while (timeout_us) {
reg = readl(uport->membase + offset);
- if ((bool)(reg & field) == set)
+ if ((reg & field) == val)
return true;
udelay(10);
timeout_us -= 10;
@@ -303,6 +298,12 @@ static bool qcom_geni_serial_poll_bit(struct uart_port *uport,
return false;
}
+static bool qcom_geni_serial_poll_bit(struct uart_port *uport,
+ unsigned int offset, u32 field, bool set)
+{
+ return qcom_geni_serial_poll_bitfield(uport, offset, field, set ? field : 0);
+}
+
static void qcom_geni_serial_setup_tx(struct uart_port *uport, u32 xmit_size)
{
u32 m_cmd;
@@ -315,18 +316,16 @@ static void qcom_geni_serial_setup_tx(struct uart_port *uport, u32 xmit_size)
static void qcom_geni_serial_poll_tx_done(struct uart_port *uport)
{
int done;
- u32 irq_clear = M_CMD_DONE_EN;
done = qcom_geni_serial_poll_bit(uport, SE_GENI_M_IRQ_STATUS,
M_CMD_DONE_EN, true);
if (!done) {
writel(M_GENI_CMD_ABORT, uport->membase +
SE_GENI_M_CMD_CTRL_REG);
- irq_clear |= M_CMD_ABORT_EN;
qcom_geni_serial_poll_bit(uport, SE_GENI_M_IRQ_STATUS,
M_CMD_ABORT_EN, true);
+ writel(M_CMD_ABORT_EN, uport->membase + SE_GENI_M_IRQ_CLEAR);
}
- writel(irq_clear, uport->membase + SE_GENI_M_IRQ_CLEAR);
}
static void qcom_geni_serial_abort_rx(struct uart_port *uport)
@@ -386,17 +385,27 @@ static int qcom_geni_serial_get_char(struct uart_port *uport)
static void qcom_geni_serial_poll_put_char(struct uart_port *uport,
unsigned char c)
{
- writel(DEF_TX_WM, uport->membase + SE_GENI_TX_WATERMARK_REG);
+ if (qcom_geni_serial_main_active(uport)) {
+ qcom_geni_serial_poll_tx_done(uport);
+ __qcom_geni_serial_cancel_tx_cmd(uport);
+ }
+
+ writel(M_CMD_DONE_EN, uport->membase + SE_GENI_M_IRQ_CLEAR);
qcom_geni_serial_setup_tx(uport, 1);
- WARN_ON(!qcom_geni_serial_poll_bit(uport, SE_GENI_M_IRQ_STATUS,
- M_TX_FIFO_WATERMARK_EN, true));
writel(c, uport->membase + SE_GENI_TX_FIFOn);
- writel(M_TX_FIFO_WATERMARK_EN, uport->membase + SE_GENI_M_IRQ_CLEAR);
qcom_geni_serial_poll_tx_done(uport);
}
#endif
#ifdef CONFIG_SERIAL_QCOM_GENI_CONSOLE
+static void qcom_geni_serial_drain_fifo(struct uart_port *uport)
+{
+ struct qcom_geni_serial_port *port = to_dev_port(uport);
+
+ qcom_geni_serial_poll_bitfield(uport, SE_GENI_M_GP_LENGTH, GP_LENGTH,
+ port->tx_queued);
+}
+
static void qcom_geni_serial_wr_char(struct uart_port *uport, unsigned char ch)
{
struct qcom_geni_private_data *private_data = uport->private_data;
@@ -431,6 +440,7 @@ __qcom_geni_serial_console_write(struct uart_port *uport, const char *s,
}
writel(DEF_TX_WM, uport->membase + SE_GENI_TX_WATERMARK_REG);
+ writel(M_CMD_DONE_EN, uport->membase + SE_GENI_M_IRQ_CLEAR);
qcom_geni_serial_setup_tx(uport, bytes_to_send);
for (i = 0; i < count; ) {
size_t chars_to_write = 0;
@@ -469,10 +479,9 @@ static void qcom_geni_serial_console_write(struct console *co, const char *s,
{
struct uart_port *uport;
struct qcom_geni_serial_port *port;
+ u32 m_irq_en, s_irq_en;
bool locked = true;
unsigned long flags;
- u32 geni_status;
- u32 irq_en;
WARN_ON(co->index < 0 || co->index >= GENI_UART_CONS_PORTS);
@@ -486,40 +495,28 @@ static void qcom_geni_serial_console_write(struct console *co, const char *s,
else
uart_port_lock_irqsave(uport, &flags);
- geni_status = readl(uport->membase + SE_GENI_STATUS);
-
- if (!locked) {
- /*
- * We can only get here if an oops is in progress then we were
- * unable to get the lock. This means we can't safely access
- * our state variables like tx_remaining. About the best we
- * can do is wait for the FIFO to be empty before we start our
- * transfer, so we'll do that.
- */
- qcom_geni_serial_poll_bit(uport, SE_GENI_M_IRQ_STATUS,
- M_TX_FIFO_NOT_EMPTY_EN, false);
- } else if ((geni_status & M_GENI_CMD_ACTIVE) && !port->tx_remaining) {
- /*
- * It seems we can't interrupt existing transfers if all data
- * has been sent, in which case we need to look for done first.
- */
- qcom_geni_serial_poll_tx_done(uport);
-
- if (!kfifo_is_empty(&uport->state->port.xmit_fifo)) {
- irq_en = readl(uport->membase + SE_GENI_M_IRQ_EN);
- writel(irq_en | M_TX_FIFO_WATERMARK_EN,
- uport->membase + SE_GENI_M_IRQ_EN);
- }
+ m_irq_en = readl(uport->membase + SE_GENI_M_IRQ_EN);
+ s_irq_en = readl(uport->membase + SE_GENI_S_IRQ_EN);
+ writel(0, uport->membase + SE_GENI_M_IRQ_EN);
+ writel(0, uport->membase + SE_GENI_S_IRQ_EN);
+
+ if (qcom_geni_serial_main_active(uport)) {
+ /* Wait for completion or drain FIFO */
+ if (!locked || port->tx_remaining == 0)
+ qcom_geni_serial_poll_tx_done(uport);
+ else
+ qcom_geni_serial_drain_fifo(uport);
+
+ qcom_geni_serial_cancel_tx_cmd(uport);
}
__qcom_geni_serial_console_write(uport, s, count);
+ writel(m_irq_en, uport->membase + SE_GENI_M_IRQ_EN);
+ writel(s_irq_en, uport->membase + SE_GENI_S_IRQ_EN);
- if (locked) {
- if (port->tx_remaining)
- qcom_geni_serial_setup_tx(uport, port->tx_remaining);
+ if (locked)
uart_port_unlock_irqrestore(uport, flags);
- }
}
static void handle_rx_console(struct uart_port *uport, u32 bytes, bool drop)
@@ -682,13 +679,10 @@ static void qcom_geni_serial_stop_tx_fifo(struct uart_port *uport)
writel(irq_en, uport->membase + SE_GENI_M_IRQ_EN);
}
-static void qcom_geni_serial_cancel_tx_cmd(struct uart_port *uport)
+static void __qcom_geni_serial_cancel_tx_cmd(struct uart_port *uport)
{
struct qcom_geni_serial_port *port = to_dev_port(uport);
- if (!qcom_geni_serial_main_active(uport))
- return;
-
geni_se_cancel_m_cmd(&port->se);
if (!qcom_geni_serial_poll_bit(uport, SE_GENI_M_IRQ_STATUS,
M_CMD_CANCEL_EN, true)) {
@@ -698,8 +692,19 @@ static void qcom_geni_serial_cancel_tx_cmd(struct uart_port *uport)
writel(M_CMD_ABORT_EN, uport->membase + SE_GENI_M_IRQ_CLEAR);
}
writel(M_CMD_CANCEL_EN, uport->membase + SE_GENI_M_IRQ_CLEAR);
+}
+
+static void qcom_geni_serial_cancel_tx_cmd(struct uart_port *uport)
+{
+ struct qcom_geni_serial_port *port = to_dev_port(uport);
+
+ if (!qcom_geni_serial_main_active(uport))
+ return;
+
+ __qcom_geni_serial_cancel_tx_cmd(uport);
port->tx_remaining = 0;
+ port->tx_queued = 0;
}
static void qcom_geni_serial_handle_rx_fifo(struct uart_port *uport, bool drop)
@@ -923,9 +928,10 @@ static void qcom_geni_serial_handle_tx_fifo(struct uart_port *uport,
if (!chunk)
goto out_write_wakeup;
- if (!port->tx_remaining) {
+ if (!active) {
qcom_geni_serial_setup_tx(uport, pending);
port->tx_remaining = pending;
+ port->tx_queued = 0;
irq_en = readl(uport->membase + SE_GENI_M_IRQ_EN);
if (!(irq_en & M_TX_FIFO_WATERMARK_EN))
@@ -934,6 +940,7 @@ static void qcom_geni_serial_handle_tx_fifo(struct uart_port *uport,
}
qcom_geni_serial_send_chunk_fifo(uport, chunk);
+ port->tx_queued += chunk;
/*
* The tx fifo watermark is level triggered and latched. Though we had
@@ -1244,11 +1251,11 @@ static void qcom_geni_serial_set_termios(struct uart_port *uport,
unsigned long clk_rate;
u32 ver, sampling_rate;
unsigned int avg_bw_core;
+ unsigned long timeout;
qcom_geni_serial_stop_rx(uport);
/* baud rate */
baud = uart_get_baud_rate(uport, termios, old, 300, 4000000);
- port->baud = baud;
sampling_rate = UART_OVERSAMPLING;
/* Sampling rate is halved for IP versions >= 2.5 */
@@ -1326,9 +1333,21 @@ static void qcom_geni_serial_set_termios(struct uart_port *uport,
else
tx_trans_cfg |= UART_CTS_MASK;
- if (baud)
+ if (baud) {
uart_update_timeout(uport, termios->c_cflag, baud);
+ /*
+ * Make sure that qcom_geni_serial_poll_bitfield() waits for
+ * the FIFO, two-word intermediate transfer register and shift
+ * register to clear.
+ *
+ * Note that uart_fifo_timeout() also adds a 20 ms margin.
+ */
+ timeout = jiffies_to_usecs(uart_fifo_timeout(uport));
+ timeout += 3 * timeout / port->tx_fifo_depth;
+ WRITE_ONCE(port->poll_timeout_us, timeout);
+ }
+
if (!uart_console(uport))
writel(port->loopback,
uport->membase + SE_UART_LOOPBACK_CFG);
diff --git a/drivers/tty/serial/rp2.c b/drivers/tty/serial/rp2.c
index 4132fcff7d4e..8bab2aedc499 100644
--- a/drivers/tty/serial/rp2.c
+++ b/drivers/tty/serial/rp2.c
@@ -577,8 +577,8 @@ static void rp2_reset_asic(struct rp2_card *card, unsigned int asic_id)
u32 clk_cfg;
writew(1, base + RP2_GLOBAL_CMD);
- readw(base + RP2_GLOBAL_CMD);
msleep(100);
+ readw(base + RP2_GLOBAL_CMD);
writel(0, base + RP2_CLK_PRESCALER);
/* TDM clock configuration */
diff --git a/drivers/tty/serial/samsung_tty.c b/drivers/tty/serial/samsung_tty.c
index dc35eb77d2ef..0d184ee2f9ce 100644
--- a/drivers/tty/serial/samsung_tty.c
+++ b/drivers/tty/serial/samsung_tty.c
@@ -550,6 +550,7 @@ static void s3c24xx_serial_stop_rx(struct uart_port *port)
case TYPE_APPLE_S5L:
s3c24xx_clear_bit(port, APPLE_S5L_UCON_RXTHRESH_ENA, S3C2410_UCON);
s3c24xx_clear_bit(port, APPLE_S5L_UCON_RXTO_ENA, S3C2410_UCON);
+ s3c24xx_clear_bit(port, APPLE_S5L_UCON_RXTO_LEGACY_ENA, S3C2410_UCON);
break;
default:
disable_irq_nosync(ourport->rx_irq);
@@ -707,9 +708,8 @@ static void enable_rx_pio(struct s3c24xx_uart_port *ourport)
static void s3c24xx_serial_rx_drain_fifo(struct s3c24xx_uart_port *ourport);
-static irqreturn_t s3c24xx_serial_rx_chars_dma(void *dev_id)
+static irqreturn_t s3c24xx_serial_rx_chars_dma(struct s3c24xx_uart_port *ourport)
{
- struct s3c24xx_uart_port *ourport = dev_id;
struct uart_port *port = &ourport->port;
struct s3c24xx_uart_dma *dma = ourport->dma;
struct tty_struct *tty = tty_port_tty_get(&ourport->port.state->port);
@@ -843,9 +843,8 @@ static void s3c24xx_serial_rx_drain_fifo(struct s3c24xx_uart_port *ourport)
tty_flip_buffer_push(&port->state->port);
}
-static irqreturn_t s3c24xx_serial_rx_chars_pio(void *dev_id)
+static irqreturn_t s3c24xx_serial_rx_chars_pio(struct s3c24xx_uart_port *ourport)
{
- struct s3c24xx_uart_port *ourport = dev_id;
struct uart_port *port = &ourport->port;
uart_port_lock(port);
@@ -855,13 +854,11 @@ static irqreturn_t s3c24xx_serial_rx_chars_pio(void *dev_id)
return IRQ_HANDLED;
}
-static irqreturn_t s3c24xx_serial_rx_irq(int irq, void *dev_id)
+static irqreturn_t s3c24xx_serial_rx_irq(struct s3c24xx_uart_port *ourport)
{
- struct s3c24xx_uart_port *ourport = dev_id;
-
if (ourport->dma && ourport->dma->rx_chan)
- return s3c24xx_serial_rx_chars_dma(dev_id);
- return s3c24xx_serial_rx_chars_pio(dev_id);
+ return s3c24xx_serial_rx_chars_dma(ourport);
+ return s3c24xx_serial_rx_chars_pio(ourport);
}
static void s3c24xx_serial_tx_chars(struct s3c24xx_uart_port *ourport)
@@ -928,9 +925,8 @@ static void s3c24xx_serial_tx_chars(struct s3c24xx_uart_port *ourport)
s3c24xx_serial_stop_tx(port);
}
-static irqreturn_t s3c24xx_serial_tx_irq(int irq, void *id)
+static irqreturn_t s3c24xx_serial_tx_irq(struct s3c24xx_uart_port *ourport)
{
- struct s3c24xx_uart_port *ourport = id;
struct uart_port *port = &ourport->port;
uart_port_lock(port);
@@ -944,17 +940,17 @@ static irqreturn_t s3c24xx_serial_tx_irq(int irq, void *id)
/* interrupt handler for s3c64xx and later SoC's.*/
static irqreturn_t s3c64xx_serial_handle_irq(int irq, void *id)
{
- const struct s3c24xx_uart_port *ourport = id;
+ struct s3c24xx_uart_port *ourport = id;
const struct uart_port *port = &ourport->port;
u32 pend = rd_regl(port, S3C64XX_UINTP);
irqreturn_t ret = IRQ_HANDLED;
if (pend & S3C64XX_UINTM_RXD_MSK) {
- ret = s3c24xx_serial_rx_irq(irq, id);
+ ret = s3c24xx_serial_rx_irq(ourport);
wr_regl(port, S3C64XX_UINTP, S3C64XX_UINTM_RXD_MSK);
}
if (pend & S3C64XX_UINTM_TXD_MSK) {
- ret = s3c24xx_serial_tx_irq(irq, id);
+ ret = s3c24xx_serial_tx_irq(ourport);
wr_regl(port, S3C64XX_UINTP, S3C64XX_UINTM_TXD_MSK);
}
return ret;
@@ -963,19 +959,21 @@ static irqreturn_t s3c64xx_serial_handle_irq(int irq, void *id)
/* interrupt handler for Apple SoC's.*/
static irqreturn_t apple_serial_handle_irq(int irq, void *id)
{
- const struct s3c24xx_uart_port *ourport = id;
+ struct s3c24xx_uart_port *ourport = id;
const struct uart_port *port = &ourport->port;
u32 pend = rd_regl(port, S3C2410_UTRSTAT);
irqreturn_t ret = IRQ_NONE;
- if (pend & (APPLE_S5L_UTRSTAT_RXTHRESH | APPLE_S5L_UTRSTAT_RXTO)) {
+ if (pend & (APPLE_S5L_UTRSTAT_RXTHRESH | APPLE_S5L_UTRSTAT_RXTO |
+ APPLE_S5L_UTRSTAT_RXTO_LEGACY)) {
wr_regl(port, S3C2410_UTRSTAT,
- APPLE_S5L_UTRSTAT_RXTHRESH | APPLE_S5L_UTRSTAT_RXTO);
- ret = s3c24xx_serial_rx_irq(irq, id);
+ APPLE_S5L_UTRSTAT_RXTHRESH | APPLE_S5L_UTRSTAT_RXTO |
+ APPLE_S5L_UTRSTAT_RXTO_LEGACY);
+ ret = s3c24xx_serial_rx_irq(ourport);
}
if (pend & APPLE_S5L_UTRSTAT_TXTHRESH) {
wr_regl(port, S3C2410_UTRSTAT, APPLE_S5L_UTRSTAT_TXTHRESH);
- ret = s3c24xx_serial_tx_irq(irq, id);
+ ret = s3c24xx_serial_tx_irq(ourport);
}
return ret;
@@ -1195,7 +1193,8 @@ static void apple_s5l_serial_shutdown(struct uart_port *port)
ucon = rd_regl(port, S3C2410_UCON);
ucon &= ~(APPLE_S5L_UCON_TXTHRESH_ENA_MSK |
APPLE_S5L_UCON_RXTHRESH_ENA_MSK |
- APPLE_S5L_UCON_RXTO_ENA_MSK);
+ APPLE_S5L_UCON_RXTO_ENA_MSK |
+ APPLE_S5L_UCON_RXTO_LEGACY_ENA_MSK);
wr_regl(port, S3C2410_UCON, ucon);
wr_regl(port, S3C2410_UTRSTAT, APPLE_S5L_UTRSTAT_ALL_FLAGS);
@@ -1292,6 +1291,7 @@ static int apple_s5l_serial_startup(struct uart_port *port)
/* Enable Rx Interrupt */
s3c24xx_set_bit(port, APPLE_S5L_UCON_RXTHRESH_ENA, S3C2410_UCON);
s3c24xx_set_bit(port, APPLE_S5L_UCON_RXTO_ENA, S3C2410_UCON);
+ s3c24xx_set_bit(port, APPLE_S5L_UCON_RXTO_LEGACY_ENA, S3C2410_UCON);
return ret;
}
@@ -2148,13 +2148,15 @@ static int s3c24xx_serial_resume_noirq(struct device *dev)
ucon &= ~(APPLE_S5L_UCON_TXTHRESH_ENA_MSK |
APPLE_S5L_UCON_RXTHRESH_ENA_MSK |
- APPLE_S5L_UCON_RXTO_ENA_MSK);
+ APPLE_S5L_UCON_RXTO_ENA_MSK |
+ APPLE_S5L_UCON_RXTO_LEGACY_ENA_MSK);
if (ourport->tx_enabled)
ucon |= APPLE_S5L_UCON_TXTHRESH_ENA_MSK;
if (ourport->rx_enabled)
ucon |= APPLE_S5L_UCON_RXTHRESH_ENA_MSK |
- APPLE_S5L_UCON_RXTO_ENA_MSK;
+ APPLE_S5L_UCON_RXTO_ENA_MSK |
+ APPLE_S5L_UCON_RXTO_LEGACY_ENA_MSK;
wr_regl(port, S3C2410_UCON, ucon);
@@ -2541,7 +2543,7 @@ static const struct s3c24xx_serial_drv_data s5l_serial_drv_data = {
.name = "Apple S5L UART",
.type = TYPE_APPLE_S5L,
.port_type = PORT_8250,
- .iotype = UPIO_MEM,
+ .iotype = UPIO_MEM32,
.fifosize = 16,
.rx_fifomask = S3C2410_UFSTAT_RXMASK,
.rx_fifoshift = S3C2410_UFSTAT_RXSHIFT,
@@ -2827,6 +2829,9 @@ OF_EARLYCON_DECLARE(gs101, "google,gs101-uart", gs101_early_console_setup);
static int __init apple_s5l_early_console_setup(struct earlycon_device *device,
const char *opt)
{
+ /* Apple A7-A11 requires MMIO32 register accesses. */
+ device->port.iotype = UPIO_MEM32;
+
/* Close enough to S3C2410 for earlycon... */
device->port.private_data = &s3c2410_early_console_data;
diff --git a/drivers/tty/serial/sc16is7xx.c b/drivers/tty/serial/sc16is7xx.c
index b4c1798a1df2..ad88a33a504f 100644
--- a/drivers/tty/serial/sc16is7xx.c
+++ b/drivers/tty/serial/sc16is7xx.c
@@ -10,6 +10,7 @@
#undef DEFAULT_SYMBOL_NAMESPACE
#define DEFAULT_SYMBOL_NAMESPACE SERIAL_NXP_SC16IS7XX
+#include <linux/bits.h>
#include <linux/clk.h>
#include <linux/delay.h>
#include <linux/device.h>
@@ -78,52 +79,52 @@
#define SC16IS7XX_XOFF2_REG (0x07) /* Xoff2 word */
/* IER register bits */
-#define SC16IS7XX_IER_RDI_BIT (1 << 0) /* Enable RX data interrupt */
-#define SC16IS7XX_IER_THRI_BIT (1 << 1) /* Enable TX holding register
+#define SC16IS7XX_IER_RDI_BIT BIT(0) /* Enable RX data interrupt */
+#define SC16IS7XX_IER_THRI_BIT BIT(1) /* Enable TX holding register
* interrupt */
-#define SC16IS7XX_IER_RLSI_BIT (1 << 2) /* Enable RX line status
+#define SC16IS7XX_IER_RLSI_BIT BIT(2) /* Enable RX line status
* interrupt */
-#define SC16IS7XX_IER_MSI_BIT (1 << 3) /* Enable Modem status
+#define SC16IS7XX_IER_MSI_BIT BIT(3) /* Enable Modem status
* interrupt */
/* IER register bits - write only if (EFR[4] == 1) */
-#define SC16IS7XX_IER_SLEEP_BIT (1 << 4) /* Enable Sleep mode */
-#define SC16IS7XX_IER_XOFFI_BIT (1 << 5) /* Enable Xoff interrupt */
-#define SC16IS7XX_IER_RTSI_BIT (1 << 6) /* Enable nRTS interrupt */
-#define SC16IS7XX_IER_CTSI_BIT (1 << 7) /* Enable nCTS interrupt */
+#define SC16IS7XX_IER_SLEEP_BIT BIT(4) /* Enable Sleep mode */
+#define SC16IS7XX_IER_XOFFI_BIT BIT(5) /* Enable Xoff interrupt */
+#define SC16IS7XX_IER_RTSI_BIT BIT(6) /* Enable nRTS interrupt */
+#define SC16IS7XX_IER_CTSI_BIT BIT(7) /* Enable nCTS interrupt */
/* FCR register bits */
-#define SC16IS7XX_FCR_FIFO_BIT (1 << 0) /* Enable FIFO */
-#define SC16IS7XX_FCR_RXRESET_BIT (1 << 1) /* Reset RX FIFO */
-#define SC16IS7XX_FCR_TXRESET_BIT (1 << 2) /* Reset TX FIFO */
-#define SC16IS7XX_FCR_RXLVLL_BIT (1 << 6) /* RX Trigger level LSB */
-#define SC16IS7XX_FCR_RXLVLH_BIT (1 << 7) /* RX Trigger level MSB */
+#define SC16IS7XX_FCR_FIFO_BIT BIT(0) /* Enable FIFO */
+#define SC16IS7XX_FCR_RXRESET_BIT BIT(1) /* Reset RX FIFO */
+#define SC16IS7XX_FCR_TXRESET_BIT BIT(2) /* Reset TX FIFO */
+#define SC16IS7XX_FCR_RXLVLL_BIT BIT(6) /* RX Trigger level LSB */
+#define SC16IS7XX_FCR_RXLVLH_BIT BIT(7) /* RX Trigger level MSB */
/* FCR register bits - write only if (EFR[4] == 1) */
-#define SC16IS7XX_FCR_TXLVLL_BIT (1 << 4) /* TX Trigger level LSB */
-#define SC16IS7XX_FCR_TXLVLH_BIT (1 << 5) /* TX Trigger level MSB */
+#define SC16IS7XX_FCR_TXLVLL_BIT BIT(4) /* TX Trigger level LSB */
+#define SC16IS7XX_FCR_TXLVLH_BIT BIT(5) /* TX Trigger level MSB */
/* IIR register bits */
-#define SC16IS7XX_IIR_NO_INT_BIT (1 << 0) /* No interrupts pending */
-#define SC16IS7XX_IIR_ID_MASK 0x3e /* Mask for the interrupt ID */
-#define SC16IS7XX_IIR_THRI_SRC 0x02 /* TX holding register empty */
-#define SC16IS7XX_IIR_RDI_SRC 0x04 /* RX data interrupt */
-#define SC16IS7XX_IIR_RLSE_SRC 0x06 /* RX line status error */
-#define SC16IS7XX_IIR_RTOI_SRC 0x0c /* RX time-out interrupt */
-#define SC16IS7XX_IIR_MSI_SRC 0x00 /* Modem status interrupt
- * - only on 75x/76x
- */
-#define SC16IS7XX_IIR_INPIN_SRC 0x30 /* Input pin change of state
- * - only on 75x/76x
- */
-#define SC16IS7XX_IIR_XOFFI_SRC 0x10 /* Received Xoff */
-#define SC16IS7XX_IIR_CTSRTS_SRC 0x20 /* nCTS,nRTS change of state
- * from active (LOW)
- * to inactive (HIGH)
- */
+#define SC16IS7XX_IIR_NO_INT_BIT 0x01 /* No interrupts pending */
+#define SC16IS7XX_IIR_ID_MASK GENMASK(5, 1) /* Mask for the interrupt ID */
+#define SC16IS7XX_IIR_THRI_SRC 0x02 /* TX holding register empty */
+#define SC16IS7XX_IIR_RDI_SRC 0x04 /* RX data interrupt */
+#define SC16IS7XX_IIR_RLSE_SRC 0x06 /* RX line status error */
+#define SC16IS7XX_IIR_RTOI_SRC 0x0c /* RX time-out interrupt */
+#define SC16IS7XX_IIR_MSI_SRC 0x00 /* Modem status interrupt
+ * - only on 75x/76x
+ */
+#define SC16IS7XX_IIR_INPIN_SRC 0x30 /* Input pin change of state
+ * - only on 75x/76x
+ */
+#define SC16IS7XX_IIR_XOFFI_SRC 0x10 /* Received Xoff */
+#define SC16IS7XX_IIR_CTSRTS_SRC 0x20 /* nCTS,nRTS change of state
+ * from active (LOW)
+ * to inactive (HIGH)
+ */
/* LCR register bits */
-#define SC16IS7XX_LCR_LENGTH0_BIT (1 << 0) /* Word length bit 0 */
-#define SC16IS7XX_LCR_LENGTH1_BIT (1 << 1) /* Word length bit 1
+#define SC16IS7XX_LCR_LENGTH0_BIT BIT(0) /* Word length bit 0 */
+#define SC16IS7XX_LCR_LENGTH1_BIT BIT(1) /* Word length bit 1
*
* Word length bits table:
* 00 -> 5 bit words
@@ -131,7 +132,7 @@
* 10 -> 7 bit words
* 11 -> 8 bit words
*/
-#define SC16IS7XX_LCR_STOPLEN_BIT (1 << 2) /* STOP length bit
+#define SC16IS7XX_LCR_STOPLEN_BIT BIT(2) /* STOP length bit
*
* STOP length bit table:
* 0 -> 1 stop bit
@@ -139,11 +140,11 @@
* word length is 5,
* 2 stop bits otherwise
*/
-#define SC16IS7XX_LCR_PARITY_BIT (1 << 3) /* Parity bit enable */
-#define SC16IS7XX_LCR_EVENPARITY_BIT (1 << 4) /* Even parity bit enable */
-#define SC16IS7XX_LCR_FORCEPARITY_BIT (1 << 5) /* 9-bit multidrop parity */
-#define SC16IS7XX_LCR_TXBREAK_BIT (1 << 6) /* TX break enable */
-#define SC16IS7XX_LCR_DLAB_BIT (1 << 7) /* Divisor Latch enable */
+#define SC16IS7XX_LCR_PARITY_BIT BIT(3) /* Parity bit enable */
+#define SC16IS7XX_LCR_EVENPARITY_BIT BIT(4) /* Even parity bit enable */
+#define SC16IS7XX_LCR_FORCEPARITY_BIT BIT(5) /* 9-bit multidrop parity */
+#define SC16IS7XX_LCR_TXBREAK_BIT BIT(6) /* TX break enable */
+#define SC16IS7XX_LCR_DLAB_BIT BIT(7) /* Divisor Latch enable */
#define SC16IS7XX_LCR_WORD_LEN_5 (0x00)
#define SC16IS7XX_LCR_WORD_LEN_6 (0x01)
#define SC16IS7XX_LCR_WORD_LEN_7 (0x02)
@@ -154,61 +155,65 @@
* reg set */
/* MCR register bits */
-#define SC16IS7XX_MCR_DTR_BIT (1 << 0) /* DTR complement
+#define SC16IS7XX_MCR_DTR_BIT BIT(0) /* DTR complement
* - only on 75x/76x
*/
-#define SC16IS7XX_MCR_RTS_BIT (1 << 1) /* RTS complement */
-#define SC16IS7XX_MCR_TCRTLR_BIT (1 << 2) /* TCR/TLR register enable */
-#define SC16IS7XX_MCR_LOOP_BIT (1 << 4) /* Enable loopback test mode */
-#define SC16IS7XX_MCR_XONANY_BIT (1 << 5) /* Enable Xon Any
+#define SC16IS7XX_MCR_RTS_BIT BIT(1) /* RTS complement */
+#define SC16IS7XX_MCR_TCRTLR_BIT BIT(2) /* TCR/TLR register enable */
+#define SC16IS7XX_MCR_LOOP_BIT BIT(4) /* Enable loopback test mode */
+#define SC16IS7XX_MCR_XONANY_BIT BIT(5) /* Enable Xon Any
* - write enabled
* if (EFR[4] == 1)
*/
-#define SC16IS7XX_MCR_IRDA_BIT (1 << 6) /* Enable IrDA mode
+#define SC16IS7XX_MCR_IRDA_BIT BIT(6) /* Enable IrDA mode
* - write enabled
* if (EFR[4] == 1)
*/
-#define SC16IS7XX_MCR_CLKSEL_BIT (1 << 7) /* Divide clock by 4
+#define SC16IS7XX_MCR_CLKSEL_BIT BIT(7) /* Divide clock by 4
* - write enabled
* if (EFR[4] == 1)
*/
/* LSR register bits */
-#define SC16IS7XX_LSR_DR_BIT (1 << 0) /* Receiver data ready */
-#define SC16IS7XX_LSR_OE_BIT (1 << 1) /* Overrun Error */
-#define SC16IS7XX_LSR_PE_BIT (1 << 2) /* Parity Error */
-#define SC16IS7XX_LSR_FE_BIT (1 << 3) /* Frame Error */
-#define SC16IS7XX_LSR_BI_BIT (1 << 4) /* Break Interrupt */
-#define SC16IS7XX_LSR_BRK_ERROR_MASK 0x1E /* BI, FE, PE, OE bits */
-#define SC16IS7XX_LSR_THRE_BIT (1 << 5) /* TX holding register empty */
-#define SC16IS7XX_LSR_TEMT_BIT (1 << 6) /* Transmitter empty */
-#define SC16IS7XX_LSR_FIFOE_BIT (1 << 7) /* Fifo Error */
+#define SC16IS7XX_LSR_DR_BIT BIT(0) /* Receiver data ready */
+#define SC16IS7XX_LSR_OE_BIT BIT(1) /* Overrun Error */
+#define SC16IS7XX_LSR_PE_BIT BIT(2) /* Parity Error */
+#define SC16IS7XX_LSR_FE_BIT BIT(3) /* Frame Error */
+#define SC16IS7XX_LSR_BI_BIT BIT(4) /* Break Interrupt */
+#define SC16IS7XX_LSR_BRK_ERROR_MASK \
+ (SC16IS7XX_LSR_OE_BIT | \
+ SC16IS7XX_LSR_PE_BIT | \
+ SC16IS7XX_LSR_FE_BIT | \
+ SC16IS7XX_LSR_BI_BIT)
+
+#define SC16IS7XX_LSR_THRE_BIT BIT(5) /* TX holding register empty */
+#define SC16IS7XX_LSR_TEMT_BIT BIT(6) /* Transmitter empty */
+#define SC16IS7XX_LSR_FIFOE_BIT BIT(7) /* Fifo Error */
/* MSR register bits */
-#define SC16IS7XX_MSR_DCTS_BIT (1 << 0) /* Delta CTS Clear To Send */
-#define SC16IS7XX_MSR_DDSR_BIT (1 << 1) /* Delta DSR Data Set Ready
+#define SC16IS7XX_MSR_DCTS_BIT BIT(0) /* Delta CTS Clear To Send */
+#define SC16IS7XX_MSR_DDSR_BIT BIT(1) /* Delta DSR Data Set Ready
* or (IO4)
* - only on 75x/76x
*/
-#define SC16IS7XX_MSR_DRI_BIT (1 << 2) /* Delta RI Ring Indicator
+#define SC16IS7XX_MSR_DRI_BIT BIT(2) /* Delta RI Ring Indicator
* or (IO7)
* - only on 75x/76x
*/
-#define SC16IS7XX_MSR_DCD_BIT (1 << 3) /* Delta CD Carrier Detect
+#define SC16IS7XX_MSR_DCD_BIT BIT(3) /* Delta CD Carrier Detect
* or (IO6)
* - only on 75x/76x
*/
-#define SC16IS7XX_MSR_CTS_BIT (1 << 4) /* CTS */
-#define SC16IS7XX_MSR_DSR_BIT (1 << 5) /* DSR (IO4)
+#define SC16IS7XX_MSR_CTS_BIT BIT(4) /* CTS */
+#define SC16IS7XX_MSR_DSR_BIT BIT(5) /* DSR (IO4)
* - only on 75x/76x
*/
-#define SC16IS7XX_MSR_RI_BIT (1 << 6) /* RI (IO7)
+#define SC16IS7XX_MSR_RI_BIT BIT(6) /* RI (IO7)
* - only on 75x/76x
*/
-#define SC16IS7XX_MSR_CD_BIT (1 << 7) /* CD (IO6)
+#define SC16IS7XX_MSR_CD_BIT BIT(7) /* CD (IO6)
* - only on 75x/76x
*/
-#define SC16IS7XX_MSR_DELTA_MASK 0x0F /* Any of the delta bits! */
/*
* TCR register bits
@@ -241,19 +246,19 @@
#define SC16IS7XX_TLR_RX_TRIGGER(words) ((((words) / 4) & 0x0f) << 4)
/* IOControl register bits (Only 75x/76x) */
-#define SC16IS7XX_IOCONTROL_LATCH_BIT (1 << 0) /* Enable input latching */
-#define SC16IS7XX_IOCONTROL_MODEM_A_BIT (1 << 1) /* Enable GPIO[7:4] as modem A pins */
-#define SC16IS7XX_IOCONTROL_MODEM_B_BIT (1 << 2) /* Enable GPIO[3:0] as modem B pins */
-#define SC16IS7XX_IOCONTROL_SRESET_BIT (1 << 3) /* Software Reset */
+#define SC16IS7XX_IOCONTROL_LATCH_BIT BIT(0) /* Enable input latching */
+#define SC16IS7XX_IOCONTROL_MODEM_A_BIT BIT(1) /* Enable GPIO[7:4] as modem A pins */
+#define SC16IS7XX_IOCONTROL_MODEM_B_BIT BIT(2) /* Enable GPIO[3:0] as modem B pins */
+#define SC16IS7XX_IOCONTROL_SRESET_BIT BIT(3) /* Software Reset */
/* EFCR register bits */
-#define SC16IS7XX_EFCR_9BIT_MODE_BIT (1 << 0) /* Enable 9-bit or Multidrop
+#define SC16IS7XX_EFCR_9BIT_MODE_BIT BIT(0) /* Enable 9-bit or Multidrop
* mode (RS485) */
-#define SC16IS7XX_EFCR_RXDISABLE_BIT (1 << 1) /* Disable receiver */
-#define SC16IS7XX_EFCR_TXDISABLE_BIT (1 << 2) /* Disable transmitter */
-#define SC16IS7XX_EFCR_AUTO_RS485_BIT (1 << 4) /* Auto RS485 RTS direction */
-#define SC16IS7XX_EFCR_RTS_INVERT_BIT (1 << 5) /* RTS output inversion */
-#define SC16IS7XX_EFCR_IRDA_MODE_BIT (1 << 7) /* IrDA mode
+#define SC16IS7XX_EFCR_RXDISABLE_BIT BIT(1) /* Disable receiver */
+#define SC16IS7XX_EFCR_TXDISABLE_BIT BIT(2) /* Disable transmitter */
+#define SC16IS7XX_EFCR_AUTO_RS485_BIT BIT(4) /* Auto RS485 RTS direction */
+#define SC16IS7XX_EFCR_RTS_INVERT_BIT BIT(5) /* RTS output inversion */
+#define SC16IS7XX_EFCR_IRDA_MODE_BIT BIT(7) /* IrDA mode
* 0 = rate upto 115.2 kbit/s
* - Only 75x/76x
* 1 = rate upto 1.152 Mbit/s
@@ -261,16 +266,16 @@
*/
/* EFR register bits */
-#define SC16IS7XX_EFR_AUTORTS_BIT (1 << 6) /* Auto RTS flow ctrl enable */
-#define SC16IS7XX_EFR_AUTOCTS_BIT (1 << 7) /* Auto CTS flow ctrl enable */
-#define SC16IS7XX_EFR_XOFF2_DETECT_BIT (1 << 5) /* Enable Xoff2 detection */
-#define SC16IS7XX_EFR_ENABLE_BIT (1 << 4) /* Enable enhanced functions
+#define SC16IS7XX_EFR_AUTORTS_BIT BIT(6) /* Auto RTS flow ctrl enable */
+#define SC16IS7XX_EFR_AUTOCTS_BIT BIT(7) /* Auto CTS flow ctrl enable */
+#define SC16IS7XX_EFR_XOFF2_DETECT_BIT BIT(5) /* Enable Xoff2 detection */
+#define SC16IS7XX_EFR_ENABLE_BIT BIT(4) /* Enable enhanced functions
* and writing to IER[7:4],
* FCR[5:4], MCR[7:5]
*/
-#define SC16IS7XX_EFR_SWFLOW3_BIT (1 << 3) /* SWFLOW bit 3 */
-#define SC16IS7XX_EFR_SWFLOW2_BIT (1 << 2) /* SWFLOW bit 2
- *
+#define SC16IS7XX_EFR_SWFLOW3_BIT BIT(3)
+#define SC16IS7XX_EFR_SWFLOW2_BIT BIT(2)
+ /*
* SWFLOW bits 3 & 2 table:
* 00 -> no transmitter flow
* control
@@ -282,10 +287,10 @@
* XON1, XON2, XOFF1 and
* XOFF2
*/
-#define SC16IS7XX_EFR_SWFLOW1_BIT (1 << 1) /* SWFLOW bit 2 */
-#define SC16IS7XX_EFR_SWFLOW0_BIT (1 << 0) /* SWFLOW bit 3
- *
- * SWFLOW bits 3 & 2 table:
+#define SC16IS7XX_EFR_SWFLOW1_BIT BIT(1)
+#define SC16IS7XX_EFR_SWFLOW0_BIT BIT(0)
+ /*
+ * SWFLOW bits 1 & 0 table:
* 00 -> no received flow
* control
* 01 -> receiver compares
@@ -309,9 +314,9 @@
#define SC16IS7XX_FIFO_SIZE (64)
#define SC16IS7XX_GPIOS_PER_BANK 4
-#define SC16IS7XX_RECONF_MD (1 << 0)
-#define SC16IS7XX_RECONF_IER (1 << 1)
-#define SC16IS7XX_RECONF_RS485 (1 << 2)
+#define SC16IS7XX_RECONF_MD BIT(0)
+#define SC16IS7XX_RECONF_IER BIT(1)
+#define SC16IS7XX_RECONF_RS485 BIT(2)
struct sc16is7xx_one_config {
unsigned int flags;
diff --git a/drivers/tty/serial/serial_core.c b/drivers/tty/serial/serial_core.c
index 1e3e28e364df..d94d73e45fb6 100644
--- a/drivers/tty/serial/serial_core.c
+++ b/drivers/tty/serial/serial_core.c
@@ -407,14 +407,16 @@ static void uart_shutdown(struct tty_struct *tty, struct uart_state *state)
/*
* Turn off DTR and RTS early.
*/
- if (uport && uart_console(uport) && tty) {
- uport->cons->cflag = tty->termios.c_cflag;
- uport->cons->ispeed = tty->termios.c_ispeed;
- uport->cons->ospeed = tty->termios.c_ospeed;
- }
+ if (uport) {
+ if (uart_console(uport) && tty) {
+ uport->cons->cflag = tty->termios.c_cflag;
+ uport->cons->ispeed = tty->termios.c_ispeed;
+ uport->cons->ospeed = tty->termios.c_ospeed;
+ }
- if (!tty || C_HUPCL(tty))
- uart_port_dtr_rts(uport, false);
+ if (!tty || C_HUPCL(tty))
+ uart_port_dtr_rts(uport, false);
+ }
uart_port_shutdown(port);
}
@@ -1102,21 +1104,19 @@ static int uart_tiocmget(struct tty_struct *tty)
struct uart_state *state = tty->driver_data;
struct tty_port *port = &state->port;
struct uart_port *uport;
- int result = -EIO;
+ int result;
+
+ guard(mutex)(&port->mutex);
- mutex_lock(&port->mutex);
uport = uart_port_check(state);
- if (!uport)
- goto out;
+ if (!uport || tty_io_error(tty))
+ return -EIO;
+
+ uart_port_lock_irq(uport);
+ result = uport->mctrl;
+ result |= uport->ops->get_mctrl(uport);
+ uart_port_unlock_irq(uport);
- if (!tty_io_error(tty)) {
- uart_port_lock_irq(uport);
- result = uport->mctrl;
- result |= uport->ops->get_mctrl(uport);
- uart_port_unlock_irq(uport);
- }
-out:
- mutex_unlock(&port->mutex);
return result;
}
@@ -1126,20 +1126,16 @@ uart_tiocmset(struct tty_struct *tty, unsigned int set, unsigned int clear)
struct uart_state *state = tty->driver_data;
struct tty_port *port = &state->port;
struct uart_port *uport;
- int ret = -EIO;
- mutex_lock(&port->mutex);
+ guard(mutex)(&port->mutex);
+
uport = uart_port_check(state);
- if (!uport)
- goto out;
+ if (!uport || tty_io_error(tty))
+ return -EIO;
- if (!tty_io_error(tty)) {
- uart_update_mctrl(uport, set, clear);
- ret = 0;
- }
-out:
- mutex_unlock(&port->mutex);
- return ret;
+ uart_update_mctrl(uport, set, clear);
+
+ return 0;
}
static int uart_break_ctl(struct tty_struct *tty, int break_state)
@@ -1147,19 +1143,17 @@ static int uart_break_ctl(struct tty_struct *tty, int break_state)
struct uart_state *state = tty->driver_data;
struct tty_port *port = &state->port;
struct uart_port *uport;
- int ret = -EIO;
- mutex_lock(&port->mutex);
+ guard(mutex)(&port->mutex);
+
uport = uart_port_check(state);
if (!uport)
- goto out;
+ return -EIO;
if (uport->type != PORT_UNKNOWN && uport->ops->break_ctl)
uport->ops->break_ctl(uport, break_state);
- ret = 0;
-out:
- mutex_unlock(&port->mutex);
- return ret;
+
+ return 0;
}
static int uart_do_autoconfig(struct tty_struct *tty, struct uart_state *state)
@@ -1176,17 +1170,14 @@ static int uart_do_autoconfig(struct tty_struct *tty, struct uart_state *state)
* changing, and hence any extra opens of the port while
* we're auto-configuring.
*/
- if (mutex_lock_interruptible(&port->mutex))
- return -ERESTARTSYS;
+ scoped_cond_guard(mutex_intr, return -ERESTARTSYS, &port->mutex) {
+ uport = uart_port_check(state);
+ if (!uport)
+ return -EIO;
- uport = uart_port_check(state);
- if (!uport) {
- ret = -EIO;
- goto out;
- }
+ if (tty_port_users(port) != 1)
+ return -EBUSY;
- ret = -EBUSY;
- if (tty_port_users(port) == 1) {
uart_shutdown(tty, state);
/*
@@ -1207,14 +1198,15 @@ static int uart_do_autoconfig(struct tty_struct *tty, struct uart_state *state)
uport->ops->config_port(uport, flags);
ret = uart_startup(tty, state, true);
- if (ret == 0)
- tty_port_set_initialized(port, true);
+ if (ret < 0)
+ return ret;
if (ret > 0)
- ret = 0;
+ return 0;
+
+ tty_port_set_initialized(port, true);
}
-out:
- mutex_unlock(&port->mutex);
- return ret;
+
+ return 0;
}
static void uart_enable_ms(struct uart_port *uport)
@@ -1709,10 +1701,11 @@ static void uart_set_termios(struct tty_struct *tty,
unsigned int iflag_mask = IGNBRK|BRKINT|IGNPAR|PARMRK|INPCK;
bool sw_changed = false;
- mutex_lock(&state->port.mutex);
+ guard(mutex)(&state->port.mutex);
+
uport = uart_port_check(state);
if (!uport)
- goto out;
+ return;
/*
* Drivers doing software flow control also need to know
@@ -1735,9 +1728,8 @@ static void uart_set_termios(struct tty_struct *tty,
tty->termios.c_ospeed == old_termios->c_ospeed &&
tty->termios.c_ispeed == old_termios->c_ispeed &&
((tty->termios.c_iflag ^ old_termios->c_iflag) & iflag_mask) == 0 &&
- !sw_changed) {
- goto out;
- }
+ !sw_changed)
+ return;
uart_change_line_settings(tty, state, old_termios);
/* reload cflag from termios; port driver may have overridden flags */
@@ -1754,8 +1746,6 @@ static void uart_set_termios(struct tty_struct *tty,
mask |= TIOCM_RTS;
uart_set_mctrl(uport, mask);
}
-out:
- mutex_unlock(&state->port.mutex);
}
/*
@@ -2049,10 +2039,11 @@ static void uart_line_info(struct seq_file *m, struct uart_driver *drv, int i)
unsigned int status;
int mmio;
- mutex_lock(&port->mutex);
+ guard(mutex)(&port->mutex);
+
uport = uart_port_check(state);
if (!uport)
- goto out;
+ return;
mmio = uport->iotype >= UPIO_MEM;
seq_printf(m, "%d: uart:%s %s%08llX irq:%d",
@@ -2064,7 +2055,7 @@ static void uart_line_info(struct seq_file *m, struct uart_driver *drv, int i)
if (uport->type == PORT_UNKNOWN) {
seq_putc(m, '\n');
- goto out;
+ return;
}
if (capable(CAP_SYS_ADMIN)) {
@@ -2115,8 +2106,6 @@ static void uart_line_info(struct seq_file *m, struct uart_driver *drv, int i)
seq_putc(m, '\n');
#undef STATBIT
#undef INFOBIT
-out:
- mutex_unlock(&port->mutex);
}
static int uart_proc_show(struct seq_file *m, void *v)
@@ -2393,13 +2382,12 @@ int uart_suspend_port(struct uart_driver *drv, struct uart_port *uport)
struct device *tty_dev;
struct uart_match match = {uport, drv};
- mutex_lock(&port->mutex);
+ guard(mutex)(&port->mutex);
tty_dev = device_find_child(&uport->port_dev->dev, &match, serial_match_port);
if (tty_dev && device_may_wakeup(tty_dev)) {
enable_irq_wake(uport->irq);
put_device(tty_dev);
- mutex_unlock(&port->mutex);
return 0;
}
put_device(tty_dev);
@@ -2417,7 +2405,7 @@ int uart_suspend_port(struct uart_driver *drv, struct uart_port *uport)
uart_port_unlock_irq(uport);
}
device_set_awake_path(uport->dev);
- goto unlock;
+ return 0;
}
uport->suspended = 1;
@@ -2460,8 +2448,6 @@ int uart_suspend_port(struct uart_driver *drv, struct uart_port *uport)
console_stop(uport->cons);
uart_change_pm(state, UART_PM_STATE_OFF);
-unlock:
- mutex_unlock(&port->mutex);
return 0;
}
@@ -2475,14 +2461,13 @@ int uart_resume_port(struct uart_driver *drv, struct uart_port *uport)
struct uart_match match = {uport, drv};
struct ktermios termios;
- mutex_lock(&port->mutex);
+ guard(mutex)(&port->mutex);
tty_dev = device_find_child(&uport->port_dev->dev, &match, serial_match_port);
if (!uport->suspended && device_may_wakeup(tty_dev)) {
if (irqd_is_wakeup_set(irq_get_irq_data((uport->irq))))
disable_irq_wake(uport->irq);
put_device(tty_dev);
- mutex_unlock(&port->mutex);
return 0;
}
put_device(tty_dev);
@@ -2555,8 +2540,6 @@ int uart_resume_port(struct uart_driver *drv, struct uart_port *uport)
tty_port_set_suspended(port, false);
}
- mutex_unlock(&port->mutex);
-
return 0;
}
EXPORT_SYMBOL(uart_resume_port);
@@ -2696,14 +2679,13 @@ static int uart_poll_init(struct tty_driver *driver, int line, char *options)
int ret = 0;
tport = &state->port;
- mutex_lock(&tport->mutex);
+
+ guard(mutex)(&tport->mutex);
port = uart_port_check(state);
if (!port || port->type == PORT_UNKNOWN ||
- !(port->ops->poll_get_char && port->ops->poll_put_char)) {
- ret = -1;
- goto out;
- }
+ !(port->ops->poll_get_char && port->ops->poll_put_char))
+ return -1;
pm_state = state->pm_state;
uart_change_pm(state, UART_PM_STATE_ON);
@@ -2723,10 +2705,10 @@ static int uart_poll_init(struct tty_driver *driver, int line, char *options)
ret = uart_set_options(port, NULL, baud, parity, bits, flow);
console_list_unlock();
}
-out:
+
if (ret)
uart_change_pm(state, pm_state);
- mutex_unlock(&tport->mutex);
+
return ret;
}
diff --git a/drivers/tty/serial/st-asc.c b/drivers/tty/serial/st-asc.c
index f91753a40a69..8aea59f8ca13 100644
--- a/drivers/tty/serial/st-asc.c
+++ b/drivers/tty/serial/st-asc.c
@@ -808,7 +808,6 @@ static void asc_serial_remove(struct platform_device *pdev)
uart_remove_one_port(&asc_uart_driver, port);
}
-#ifdef CONFIG_PM_SLEEP
static int asc_serial_suspend(struct device *dev)
{
struct uart_port *port = dev_get_drvdata(dev);
@@ -823,8 +822,6 @@ static int asc_serial_resume(struct device *dev)
return uart_resume_port(&asc_uart_driver, port);
}
-#endif /* CONFIG_PM_SLEEP */
-
/*----------------------------------------------------------------------*/
#ifdef CONFIG_SERIAL_ST_ASC_CONSOLE
@@ -932,16 +929,15 @@ static struct uart_driver asc_uart_driver = {
.cons = ASC_SERIAL_CONSOLE,
};
-static const struct dev_pm_ops asc_serial_pm_ops = {
- SET_SYSTEM_SLEEP_PM_OPS(asc_serial_suspend, asc_serial_resume)
-};
+static DEFINE_SIMPLE_DEV_PM_OPS(asc_serial_pm_ops, asc_serial_suspend,
+ asc_serial_resume);
static struct platform_driver asc_serial_driver = {
.probe = asc_serial_probe,
.remove_new = asc_serial_remove,
.driver = {
.name = DRIVER_NAME,
- .pm = &asc_serial_pm_ops,
+ .pm = pm_sleep_ptr(&asc_serial_pm_ops),
.of_match_table = of_match_ptr(asc_match),
},
};
diff --git a/drivers/tty/serial/xilinx_uartps.c b/drivers/tty/serial/xilinx_uartps.c
index 2acfcea403ce..777392914819 100644
--- a/drivers/tty/serial/xilinx_uartps.c
+++ b/drivers/tty/serial/xilinx_uartps.c
@@ -219,7 +219,7 @@ struct cdns_platform_data {
u32 quirks;
};
-struct serial_rs485 cdns_rs485_supported = {
+static struct serial_rs485 cdns_rs485_supported = {
.flags = SER_RS485_ENABLED | SER_RS485_RTS_ON_SEND |
SER_RS485_RTS_AFTER_SEND,
.delay_rts_before_send = 1,
diff --git a/drivers/tty/sysrq.c b/drivers/tty/sysrq.c
index 14f8f00fdcf9..930b04e3d148 100644
--- a/drivers/tty/sysrq.c
+++ b/drivers/tty/sysrq.c
@@ -531,6 +531,7 @@ static const struct sysrq_key_op *sysrq_key_table[62] = {
NULL, /* P */
NULL, /* Q */
&sysrq_replay_logs_op, /* R */
+ /* S: May be registered by sched_ext for resetting */
NULL, /* S */
NULL, /* T */
NULL, /* U */
diff --git a/drivers/tty/tty_io.c b/drivers/tty/tty_io.c
index abc2708d4ac5..9771072da177 100644
--- a/drivers/tty/tty_io.c
+++ b/drivers/tty/tty_io.c
@@ -350,22 +350,19 @@ int tty_dev_name_to_number(const char *name, dev_t *number)
return ret;
prefix_length = str - name;
- mutex_lock(&tty_mutex);
+
+ guard(mutex)(&tty_mutex);
list_for_each_entry(p, &tty_drivers, tty_drivers)
if (prefix_length == strlen(p->name) && strncmp(name,
p->name, prefix_length) == 0) {
if (index < p->num) {
*number = MKDEV(p->major, p->minor_start + index);
- goto out;
+ return 0;
}
}
- /* if here then driver wasn't found */
- ret = -ENODEV;
-out:
- mutex_unlock(&tty_mutex);
- return ret;
+ return -ENODEV;
}
EXPORT_SYMBOL_GPL(tty_dev_name_to_number);
@@ -462,7 +459,6 @@ static void tty_show_fdinfo(struct seq_file *m, struct file *file)
}
static const struct file_operations tty_fops = {
- .llseek = no_llseek,
.read_iter = tty_read,
.write_iter = tty_write,
.splice_read = copy_splice_read,
@@ -477,7 +473,6 @@ static const struct file_operations tty_fops = {
};
static const struct file_operations console_fops = {
- .llseek = no_llseek,
.read_iter = tty_read,
.write_iter = redirected_tty_write,
.splice_read = copy_splice_read,
@@ -491,7 +486,6 @@ static const struct file_operations console_fops = {
};
static const struct file_operations hung_up_tty_fops = {
- .llseek = no_llseek,
.read_iter = hung_up_tty_read,
.write_iter = hung_up_tty_write,
.poll = hung_up_tty_poll,
diff --git a/drivers/ufs/core/ufs-fault-injection.c b/drivers/ufs/core/ufs-fault-injection.c
index 169540417079..55db38e75cc4 100644
--- a/drivers/ufs/core/ufs-fault-injection.c
+++ b/drivers/ufs/core/ufs-fault-injection.c
@@ -3,6 +3,7 @@
#include <linux/kconfig.h>
#include <linux/types.h>
#include <linux/fault-inject.h>
+#include <linux/debugfs.h>
#include <linux/module.h>
#include <ufs/ufshcd.h>
#include "ufs-fault-injection.h"
diff --git a/drivers/ufs/core/ufs-sysfs.c b/drivers/ufs/core/ufs-sysfs.c
index e80a32421a8c..fe313800aed0 100644
--- a/drivers/ufs/core/ufs-sysfs.c
+++ b/drivers/ufs/core/ufs-sysfs.c
@@ -198,6 +198,24 @@ static u32 ufshcd_us_to_ahit(unsigned int timer)
FIELD_PREP(UFSHCI_AHIBERN8_SCALE_MASK, scale);
}
+static int ufshcd_read_hci_reg(struct ufs_hba *hba, u32 *val, unsigned int reg)
+{
+ down(&hba->host_sem);
+ if (!ufshcd_is_user_access_allowed(hba)) {
+ up(&hba->host_sem);
+ return -EBUSY;
+ }
+
+ ufshcd_rpm_get_sync(hba);
+ ufshcd_hold(hba);
+ *val = ufshcd_readl(hba, reg);
+ ufshcd_release(hba);
+ ufshcd_rpm_put_sync(hba);
+
+ up(&hba->host_sem);
+ return 0;
+}
+
static ssize_t auto_hibern8_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
@@ -208,23 +226,11 @@ static ssize_t auto_hibern8_show(struct device *dev,
if (!ufshcd_is_auto_hibern8_supported(hba))
return -EOPNOTSUPP;
- down(&hba->host_sem);
- if (!ufshcd_is_user_access_allowed(hba)) {
- ret = -EBUSY;
- goto out;
- }
-
- pm_runtime_get_sync(hba->dev);
- ufshcd_hold(hba);
- ahit = ufshcd_readl(hba, REG_AUTO_HIBERNATE_IDLE_TIMER);
- ufshcd_release(hba);
- pm_runtime_put_sync(hba->dev);
-
- ret = sysfs_emit(buf, "%d\n", ufshcd_ahit_to_us(ahit));
+ ret = ufshcd_read_hci_reg(hba, &ahit, REG_AUTO_HIBERNATE_IDLE_TIMER);
+ if (ret)
+ return ret;
-out:
- up(&hba->host_sem);
- return ret;
+ return sysfs_emit(buf, "%d\n", ufshcd_ahit_to_us(ahit));
}
static ssize_t auto_hibern8_store(struct device *dev,
@@ -519,6 +525,58 @@ static const struct attribute_group ufs_sysfs_capabilities_group = {
.attrs = ufs_sysfs_capabilities_attrs,
};
+static ssize_t version_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct ufs_hba *hba = dev_get_drvdata(dev);
+
+ return sysfs_emit(buf, "0x%x\n", hba->ufs_version);
+}
+
+static ssize_t product_id_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ int ret;
+ u32 val;
+ struct ufs_hba *hba = dev_get_drvdata(dev);
+
+ ret = ufshcd_read_hci_reg(hba, &val, REG_CONTROLLER_PID);
+ if (ret)
+ return ret;
+
+ return sysfs_emit(buf, "0x%x\n", val);
+}
+
+static ssize_t man_id_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ int ret;
+ u32 val;
+ struct ufs_hba *hba = dev_get_drvdata(dev);
+
+ ret = ufshcd_read_hci_reg(hba, &val, REG_CONTROLLER_MID);
+ if (ret)
+ return ret;
+
+ return sysfs_emit(buf, "0x%x\n", val);
+}
+
+static DEVICE_ATTR_RO(version);
+static DEVICE_ATTR_RO(product_id);
+static DEVICE_ATTR_RO(man_id);
+
+static struct attribute *ufs_sysfs_ufshci_cap_attrs[] = {
+ &dev_attr_version.attr,
+ &dev_attr_product_id.attr,
+ &dev_attr_man_id.attr,
+ NULL
+};
+
+static const struct attribute_group ufs_sysfs_ufshci_group = {
+ .name = "ufshci_capabilities",
+ .attrs = ufs_sysfs_ufshci_cap_attrs,
+};
+
static ssize_t monitor_enable_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
@@ -1502,6 +1560,7 @@ static const struct attribute_group ufs_sysfs_attributes_group = {
static const struct attribute_group *ufs_sysfs_groups[] = {
&ufs_sysfs_default_group,
&ufs_sysfs_capabilities_group,
+ &ufs_sysfs_ufshci_group,
&ufs_sysfs_monitor_group,
&ufs_sysfs_power_info_group,
&ufs_sysfs_device_descriptor_group,
diff --git a/include/trace/events/ufs.h b/drivers/ufs/core/ufs_trace.h
index c4e209fbdfbb..84deca2b841d 100644
--- a/include/trace/events/ufs.h
+++ b/drivers/ufs/core/ufs_trace.h
@@ -9,6 +9,7 @@
#if !defined(_TRACE_UFS_H) || defined(TRACE_HEADER_MULTI_READ)
#define _TRACE_UFS_H
+#include <ufs/ufs.h>
#include <linux/tracepoint.h>
#define str_opcode(opcode) \
@@ -395,5 +396,10 @@ TRACE_EVENT(ufshcd_exception_event,
#endif /* if !defined(_TRACE_UFS_H) || defined(TRACE_HEADER_MULTI_READ) */
+#undef TRACE_INCLUDE_PATH
+#define TRACE_INCLUDE_PATH ../../drivers/ufs/core
+#undef TRACE_INCLUDE_FILE
+#define TRACE_INCLUDE_FILE ufs_trace
+
/* This part must be outside protection */
#include <trace/define_trace.h>
diff --git a/drivers/ufs/core/ufshcd.c b/drivers/ufs/core/ufshcd.c
index a6f818cdef0e..24a32e2fd75e 100644
--- a/drivers/ufs/core/ufshcd.c
+++ b/drivers/ufs/core/ufshcd.c
@@ -39,7 +39,7 @@
#include <asm/unaligned.h>
#define CREATE_TRACE_POINTS
-#include <trace/events/ufs.h>
+#include "ufs_trace.h"
#define UFSHCD_ENABLE_INTRS (UTP_TRANSFER_REQ_COMPL |\
UTP_TASK_REQ_COMPL |\
@@ -51,8 +51,10 @@
/* UIC command timeout, unit: ms */
-#define UIC_CMD_TIMEOUT 500
-
+enum {
+ UIC_CMD_TIMEOUT_DEFAULT = 500,
+ UIC_CMD_TIMEOUT_MAX = 2000,
+};
/* NOP OUT retries waiting for NOP IN response */
#define NOP_OUT_RETRIES 10
/* Timeout after 50 msecs if NOP OUT hangs without response */
@@ -116,6 +118,23 @@ static bool is_mcq_supported(struct ufs_hba *hba)
module_param(use_mcq_mode, bool, 0644);
MODULE_PARM_DESC(use_mcq_mode, "Control MCQ mode for controllers starting from UFSHCI 4.0. 1 - enable MCQ, 0 - disable MCQ. MCQ is enabled by default");
+static unsigned int uic_cmd_timeout = UIC_CMD_TIMEOUT_DEFAULT;
+
+static int uic_cmd_timeout_set(const char *val, const struct kernel_param *kp)
+{
+ return param_set_uint_minmax(val, kp, UIC_CMD_TIMEOUT_DEFAULT,
+ UIC_CMD_TIMEOUT_MAX);
+}
+
+static const struct kernel_param_ops uic_cmd_timeout_ops = {
+ .set = uic_cmd_timeout_set,
+ .get = param_get_uint,
+};
+
+module_param_cb(uic_cmd_timeout, &uic_cmd_timeout_ops, &uic_cmd_timeout, 0644);
+MODULE_PARM_DESC(uic_cmd_timeout,
+ "UFS UIC command timeout in milliseconds. Defaults to 500ms. Supported values range from 500ms to 2 seconds inclusively");
+
#define ufshcd_toggle_vreg(_dev, _vreg, _on) \
({ \
int _ret; \
@@ -1785,8 +1804,6 @@ static void ufshcd_remove_clk_scaling_sysfs(struct ufs_hba *hba)
static void ufshcd_init_clk_scaling(struct ufs_hba *hba)
{
- char wq_name[sizeof("ufs_clkscaling_00")];
-
if (!ufshcd_is_clkscaling_supported(hba))
return;
@@ -1798,9 +1815,8 @@ static void ufshcd_init_clk_scaling(struct ufs_hba *hba)
INIT_WORK(&hba->clk_scaling.resume_work,
ufshcd_clk_scaling_resume_work);
- snprintf(wq_name, sizeof(wq_name), "ufs_clkscaling_%d",
- hba->host->host_no);
- hba->clk_scaling.workq = create_singlethread_workqueue(wq_name);
+ hba->clk_scaling.workq = alloc_ordered_workqueue(
+ "ufs_clkscaling_%d", WQ_MEM_RECLAIM, hba->host->host_no);
hba->clk_scaling.is_initialized = true;
}
@@ -2124,8 +2140,6 @@ static void ufshcd_remove_clk_gating_sysfs(struct ufs_hba *hba)
static void ufshcd_init_clk_gating(struct ufs_hba *hba)
{
- char wq_name[sizeof("ufs_clk_gating_00")];
-
if (!ufshcd_is_clkgating_allowed(hba))
return;
@@ -2135,10 +2149,9 @@ static void ufshcd_init_clk_gating(struct ufs_hba *hba)
INIT_DELAYED_WORK(&hba->clk_gating.gate_work, ufshcd_gate_work);
INIT_WORK(&hba->clk_gating.ungate_work, ufshcd_ungate_work);
- snprintf(wq_name, ARRAY_SIZE(wq_name), "ufs_clk_gating_%d",
- hba->host->host_no);
- hba->clk_gating.clk_gating_workq = alloc_ordered_workqueue(wq_name,
- WQ_MEM_RECLAIM | WQ_HIGHPRI);
+ hba->clk_gating.clk_gating_workq = alloc_ordered_workqueue(
+ "ufs_clk_gating_%d", WQ_MEM_RECLAIM | WQ_HIGHPRI,
+ hba->host->host_no);
ufshcd_init_clk_gating_sysfs(hba);
@@ -2452,7 +2465,7 @@ static inline bool ufshcd_ready_for_uic_cmd(struct ufs_hba *hba)
{
u32 val;
int ret = read_poll_timeout(ufshcd_readl, val, val & UIC_COMMAND_READY,
- 500, UIC_CMD_TIMEOUT * 1000, false, hba,
+ 500, uic_cmd_timeout * 1000, false, hba,
REG_CONTROLLER_STATUS);
return ret == 0;
}
@@ -2512,7 +2525,7 @@ ufshcd_wait_for_uic_cmd(struct ufs_hba *hba, struct uic_command *uic_cmd)
lockdep_assert_held(&hba->uic_cmd_mutex);
if (wait_for_completion_timeout(&uic_cmd->done,
- msecs_to_jiffies(UIC_CMD_TIMEOUT))) {
+ msecs_to_jiffies(uic_cmd_timeout))) {
ret = uic_cmd->argument2 & MASK_UIC_COMMAND_RESULT;
} else {
ret = -ETIMEDOUT;
@@ -4285,7 +4298,7 @@ static int ufshcd_uic_pwr_ctrl(struct ufs_hba *hba, struct uic_command *cmd)
}
if (!wait_for_completion_timeout(hba->uic_async_done,
- msecs_to_jiffies(UIC_CMD_TIMEOUT))) {
+ msecs_to_jiffies(uic_cmd_timeout))) {
dev_err(hba->dev,
"pwr ctrl cmd 0x%x with mode 0x%x completion timeout\n",
cmd->command, cmd->argument3);
@@ -5876,12 +5889,11 @@ static inline int ufshcd_get_bkops_status(struct ufs_hba *hba, u32 *status)
/**
* ufshcd_bkops_ctrl - control the auto bkops based on current bkops status
* @hba: per-adapter instance
- * @status: bkops_status value
*
* Read the bkops_status from the UFS device and Enable fBackgroundOpsEn
* flag in the device to permit background operations if the device
- * bkops_status is greater than or equal to "status" argument passed to
- * this function, disable otherwise.
+ * bkops_status is greater than or equal to the "hba->urgent_bkops_lvl",
+ * disable otherwise.
*
* Return: 0 for success, non-zero in case of failure.
*
@@ -5889,11 +5901,11 @@ static inline int ufshcd_get_bkops_status(struct ufs_hba *hba, u32 *status)
* to know whether auto bkops is enabled or disabled after this function
* returns control to it.
*/
-static int ufshcd_bkops_ctrl(struct ufs_hba *hba,
- enum bkops_status status)
+static int ufshcd_bkops_ctrl(struct ufs_hba *hba)
{
- int err;
+ enum bkops_status status = hba->urgent_bkops_lvl;
u32 curr_status = 0;
+ int err;
err = ufshcd_get_bkops_status(hba, &curr_status);
if (err) {
@@ -5915,23 +5927,6 @@ out:
return err;
}
-/**
- * ufshcd_urgent_bkops - handle urgent bkops exception event
- * @hba: per-adapter instance
- *
- * Enable fBackgroundOpsEn flag in the device to permit background
- * operations.
- *
- * If BKOPs is enabled, this function returns 0, 1 if the bkops in not enabled
- * and negative error value for any other failure.
- *
- * Return: 0 upon success; < 0 upon failure.
- */
-static int ufshcd_urgent_bkops(struct ufs_hba *hba)
-{
- return ufshcd_bkops_ctrl(hba, hba->urgent_bkops_lvl);
-}
-
static inline int ufshcd_get_ee_status(struct ufs_hba *hba, u32 *status)
{
return ufshcd_query_attr_retry(hba, UPIU_QUERY_OPCODE_READ_ATTR,
@@ -9692,7 +9687,7 @@ static int __ufshcd_wl_suspend(struct ufs_hba *hba, enum ufs_pm_op pm_op)
* allow background operations if bkops status shows
* that performance might be impacted.
*/
- ret = ufshcd_urgent_bkops(hba);
+ ret = ufshcd_bkops_ctrl(hba);
if (ret) {
/*
* If return err in suspend flow, IO will hang.
@@ -9881,7 +9876,7 @@ static int __ufshcd_wl_resume(struct ufs_hba *hba, enum ufs_pm_op pm_op)
* If BKOPs operations are urgently needed at this moment then
* keep auto-bkops enabled or else disable it.
*/
- ufshcd_urgent_bkops(hba);
+ ufshcd_bkops_ctrl(hba);
if (hba->ee_usr_mask)
ufshcd_write_ee_control(hba);
@@ -10395,7 +10390,6 @@ int ufshcd_init(struct ufs_hba *hba, void __iomem *mmio_base, unsigned int irq)
int err;
struct Scsi_Host *host = hba->host;
struct device *dev = hba->dev;
- char eh_wq_name[sizeof("ufs_eh_wq_00")];
/*
* dev_set_drvdata() must be called before any callbacks are registered
@@ -10462,9 +10456,8 @@ int ufshcd_init(struct ufs_hba *hba, void __iomem *mmio_base, unsigned int irq)
hba->max_pwr_info.is_valid = false;
/* Initialize work queues */
- snprintf(eh_wq_name, sizeof(eh_wq_name), "ufs_eh_wq_%d",
- hba->host->host_no);
- hba->eh_wq = create_singlethread_workqueue(eh_wq_name);
+ hba->eh_wq = alloc_ordered_workqueue("ufs_eh_wq_%d", WQ_MEM_RECLAIM,
+ hba->host->host_no);
if (!hba->eh_wq) {
dev_err(hba->dev, "%s: failed to create eh workqueue\n",
__func__);
diff --git a/drivers/ufs/host/ufs-qcom.c b/drivers/ufs/host/ufs-qcom.c
index c87fdc849c62..ecdfff2456e3 100644
--- a/drivers/ufs/host/ufs-qcom.c
+++ b/drivers/ufs/host/ufs-qcom.c
@@ -93,7 +93,7 @@ static const struct __ufs_qcom_bw_table {
[MODE_HS_RB][UFS_HS_G3][UFS_LANE_2] = { 1492582, 204800 },
[MODE_HS_RB][UFS_HS_G4][UFS_LANE_2] = { 2915200, 409600 },
[MODE_HS_RB][UFS_HS_G5][UFS_LANE_2] = { 5836800, 819200 },
- [MODE_MAX][0][0] = { 7643136, 307200 },
+ [MODE_MAX][0][0] = { 7643136, 819200 },
};
static void ufs_qcom_get_default_testbus_cfg(struct ufs_qcom_host *host);
diff --git a/drivers/ufs/host/ufshcd-pltfrm.c b/drivers/ufs/host/ufshcd-pltfrm.c
index a3e69ecafd27..1f4f30d6cb42 100644
--- a/drivers/ufs/host/ufshcd-pltfrm.c
+++ b/drivers/ufs/host/ufshcd-pltfrm.c
@@ -31,8 +31,7 @@ static int ufshcd_parse_clock_info(struct ufs_hba *hba)
const char *name;
u32 *clkfreq = NULL;
struct ufs_clk_info *clki;
- int len = 0;
- size_t sz = 0;
+ ssize_t sz = 0;
if (!np)
goto out;
@@ -50,15 +49,12 @@ static int ufshcd_parse_clock_info(struct ufs_hba *hba)
if (cnt <= 0)
goto out;
- if (!of_get_property(np, "freq-table-hz", &len)) {
+ sz = of_property_count_u32_elems(np, "freq-table-hz");
+ if (sz <= 0) {
dev_info(dev, "freq-table-hz property not specified\n");
goto out;
}
- if (len <= 0)
- goto out;
-
- sz = len / sizeof(*clkfreq);
if (sz != 2 * cnt) {
dev_err(dev, "%s len mismatch\n", "freq-table-hz");
ret = -EINVAL;
@@ -272,10 +268,10 @@ static int ufshcd_parse_operating_points(struct ufs_hba *hba)
const char **clk_names;
int cnt, i, ret;
- if (!of_find_property(np, "operating-points-v2", NULL))
+ if (!of_property_present(np, "operating-points-v2"))
return 0;
- if (of_find_property(np, "freq-table-hz", NULL)) {
+ if (of_property_present(np, "freq-table-hz")) {
dev_err(dev, "%s: operating-points and freq-table-hz are incompatible\n",
__func__);
return -EINVAL;
diff --git a/drivers/uio/uio.c b/drivers/uio/uio.c
index 20d2a55cb40b..004a549c6c7d 100644
--- a/drivers/uio/uio.c
+++ b/drivers/uio/uio.c
@@ -118,7 +118,7 @@ static const struct sysfs_ops map_sysfs_ops = {
.show = map_type_show,
};
-static struct kobj_type map_attr_type = {
+static const struct kobj_type map_attr_type = {
.release = map_release,
.sysfs_ops = &map_sysfs_ops,
.default_groups = map_groups,
@@ -207,7 +207,7 @@ static const struct sysfs_ops portio_sysfs_ops = {
.show = portio_type_show,
};
-static struct kobj_type portio_attr_type = {
+static const struct kobj_type portio_attr_type = {
.release = portio_release,
.sysfs_ops = &portio_sysfs_ops,
.default_groups = portio_groups,
diff --git a/drivers/usb/cdns3/cdns3-pci-wrap.c b/drivers/usb/cdns3/cdns3-pci-wrap.c
index 1f6320d98a76..591d149de8f3 100644
--- a/drivers/usb/cdns3/cdns3-pci-wrap.c
+++ b/drivers/usb/cdns3/cdns3-pci-wrap.c
@@ -37,8 +37,7 @@ struct cdns3_wrap {
#define PCI_DRIVER_NAME "cdns3-pci-usbss"
#define PLAT_DRIVER_NAME "cdns-usb3"
-#define CDNS_VENDOR_ID 0x17cd
-#define CDNS_DEVICE_ID 0x0100
+#define PCI_DEVICE_ID_CDNS_USB3 0x0100
static struct pci_dev *cdns3_get_second_fun(struct pci_dev *pdev)
{
@@ -190,7 +189,7 @@ static void cdns3_pci_remove(struct pci_dev *pdev)
}
static const struct pci_device_id cdns3_pci_ids[] = {
- { PCI_DEVICE(CDNS_VENDOR_ID, CDNS_DEVICE_ID), },
+ { PCI_VDEVICE(CDNS, PCI_DEVICE_ID_CDNS_USB3) },
{ 0, }
};
diff --git a/drivers/usb/cdns3/cdnsp-pci.c b/drivers/usb/cdns3/cdnsp-pci.c
index 225540fc81ba..2d05368a6745 100644
--- a/drivers/usb/cdns3/cdnsp-pci.c
+++ b/drivers/usb/cdns3/cdnsp-pci.c
@@ -28,10 +28,11 @@
#define PCI_DRIVER_NAME "cdns-pci-usbssp"
#define PLAT_DRIVER_NAME "cdns-usbssp"
-#define CDNS_VENDOR_ID 0x17cd
-#define CDNS_DEVICE_ID 0x0200
-#define CDNS_DRD_ID 0x0100
-#define CDNS_DRD_IF (PCI_CLASS_SERIAL_USB << 8 | 0x80)
+#define PCI_DEVICE_ID_CDNS_USB3 0x0100
+#define PCI_DEVICE_ID_CDNS_UDC 0x0200
+
+#define PCI_CLASS_SERIAL_USB_CDNS_USB3 (PCI_CLASS_SERIAL_USB << 8 | 0x80)
+#define PCI_CLASS_SERIAL_USB_CDNS_UDC PCI_CLASS_SERIAL_USB_DEVICE
static struct pci_dev *cdnsp_get_second_fun(struct pci_dev *pdev)
{
@@ -40,10 +41,10 @@ static struct pci_dev *cdnsp_get_second_fun(struct pci_dev *pdev)
* Platform has two function. The fist keeps resources for
* Host/Device while the secon keeps resources for DRD/OTG.
*/
- if (pdev->device == CDNS_DEVICE_ID)
- return pci_get_device(pdev->vendor, CDNS_DRD_ID, NULL);
- else if (pdev->device == CDNS_DRD_ID)
- return pci_get_device(pdev->vendor, CDNS_DEVICE_ID, NULL);
+ if (pdev->device == PCI_DEVICE_ID_CDNS_UDC)
+ return pci_get_device(pdev->vendor, PCI_DEVICE_ID_CDNS_USB3, NULL);
+ if (pdev->device == PCI_DEVICE_ID_CDNS_USB3)
+ return pci_get_device(pdev->vendor, PCI_DEVICE_ID_CDNS_UDC, NULL);
return NULL;
}
@@ -220,12 +221,12 @@ static const struct dev_pm_ops cdnsp_pci_pm_ops = {
};
static const struct pci_device_id cdnsp_pci_ids[] = {
- { PCI_VENDOR_ID_CDNS, CDNS_DEVICE_ID, PCI_ANY_ID, PCI_ANY_ID,
- PCI_CLASS_SERIAL_USB_DEVICE, PCI_ANY_ID },
- { PCI_VENDOR_ID_CDNS, CDNS_DEVICE_ID, PCI_ANY_ID, PCI_ANY_ID,
- CDNS_DRD_IF, PCI_ANY_ID },
- { PCI_VENDOR_ID_CDNS, CDNS_DRD_ID, PCI_ANY_ID, PCI_ANY_ID,
- CDNS_DRD_IF, PCI_ANY_ID },
+ { PCI_DEVICE(PCI_VENDOR_ID_CDNS, PCI_DEVICE_ID_CDNS_UDC),
+ .class = PCI_CLASS_SERIAL_USB_CDNS_UDC },
+ { PCI_DEVICE(PCI_VENDOR_ID_CDNS, PCI_DEVICE_ID_CDNS_UDC),
+ .class = PCI_CLASS_SERIAL_USB_CDNS_USB3 },
+ { PCI_DEVICE(PCI_VENDOR_ID_CDNS, PCI_DEVICE_ID_CDNS_USB3),
+ .class = PCI_CLASS_SERIAL_USB_CDNS_USB3 },
{ 0, }
};
diff --git a/drivers/usb/cdns3/cdnsp-ring.c b/drivers/usb/cdns3/cdnsp-ring.c
index dbd83d321bca..46852529499d 100644
--- a/drivers/usb/cdns3/cdnsp-ring.c
+++ b/drivers/usb/cdns3/cdnsp-ring.c
@@ -718,7 +718,8 @@ int cdnsp_remove_request(struct cdnsp_device *pdev,
seg = cdnsp_trb_in_td(pdev, cur_td->start_seg, cur_td->first_trb,
cur_td->last_trb, hw_deq);
- if (seg && (pep->ep_state & EP_ENABLED))
+ if (seg && (pep->ep_state & EP_ENABLED) &&
+ !(pep->ep_state & EP_DIS_IN_RROGRESS))
cdnsp_find_new_dequeue_state(pdev, pep, preq->request.stream_id,
cur_td, &deq_state);
else
@@ -736,7 +737,8 @@ int cdnsp_remove_request(struct cdnsp_device *pdev,
* During disconnecting all endpoint will be disabled so we don't
* have to worry about updating dequeue pointer.
*/
- if (pdev->cdnsp_state & CDNSP_STATE_DISCONNECT_PENDING) {
+ if (pdev->cdnsp_state & CDNSP_STATE_DISCONNECT_PENDING ||
+ pep->ep_state & EP_DIS_IN_RROGRESS) {
status = -ESHUTDOWN;
ret = cdnsp_cmd_set_deq(pdev, pep, &deq_state);
}
diff --git a/drivers/usb/cdns3/host.c b/drivers/usb/cdns3/host.c
index ceca4d839dfd..7ba760ee62e3 100644
--- a/drivers/usb/cdns3/host.c
+++ b/drivers/usb/cdns3/host.c
@@ -62,7 +62,9 @@ static const struct xhci_plat_priv xhci_plat_cdns3_xhci = {
.resume_quirk = xhci_cdns3_resume_quirk,
};
-static const struct xhci_plat_priv xhci_plat_cdnsp_xhci;
+static const struct xhci_plat_priv xhci_plat_cdnsp_xhci = {
+ .quirks = XHCI_CDNS_SCTX_QUIRK,
+};
static int __cdns_host_init(struct cdns *cdns)
{
diff --git a/drivers/usb/chipidea/ci_hdrc_imx.c b/drivers/usb/chipidea/ci_hdrc_imx.c
index bdc04ce919f7..c64ab0e07ea0 100644
--- a/drivers/usb/chipidea/ci_hdrc_imx.c
+++ b/drivers/usb/chipidea/ci_hdrc_imx.c
@@ -128,7 +128,7 @@ static struct imx_usbmisc_data *usbmisc_get_init_data(struct device *dev)
* In case the fsl,usbmisc property is not present this device doesn't
* need usbmisc. Return NULL (which is no error here)
*/
- if (!of_get_property(np, "fsl,usbmisc", NULL))
+ if (!of_property_present(np, "fsl,usbmisc"))
return NULL;
data = devm_kzalloc(dev, sizeof(*data), GFP_KERNEL);
diff --git a/drivers/usb/chipidea/ci_hdrc_npcm.c b/drivers/usb/chipidea/ci_hdrc_npcm.c
index b14127873c55..3e5e05dbda89 100644
--- a/drivers/usb/chipidea/ci_hdrc_npcm.c
+++ b/drivers/usb/chipidea/ci_hdrc_npcm.c
@@ -18,7 +18,7 @@ struct npcm_udc_data {
struct ci_hdrc_platform_data pdata;
};
-static int npcm_udc_notify_event(struct ci_hdrc *ci, unsigned event)
+static int npcm_udc_notify_event(struct ci_hdrc *ci, unsigned int event)
{
struct device *dev = ci->dev->parent;
@@ -28,7 +28,7 @@ static int npcm_udc_notify_event(struct ci_hdrc *ci, unsigned event)
hw_write(ci, OP_USBMODE, 0xffffffff, 0x0);
break;
default:
- dev_dbg(dev, "unknown ci_hdrc event (%d)\n",event);
+ dev_dbg(dev, "unknown ci_hdrc event (%d)\n", event);
break;
}
diff --git a/drivers/usb/chipidea/udc.c b/drivers/usb/chipidea/udc.c
index 2d7f616270c1..69ef3cd8d4f8 100644
--- a/drivers/usb/chipidea/udc.c
+++ b/drivers/usb/chipidea/udc.c
@@ -86,7 +86,7 @@ static int hw_device_state(struct ci_hdrc *ci, u32 dma)
hw_write(ci, OP_ENDPTLISTADDR, ~0, dma);
/* interrupt, error, port change, reset, sleep/suspend */
hw_write(ci, OP_USBINTR, ~0,
- USBi_UI|USBi_UEI|USBi_PCI|USBi_URI|USBi_SLI);
+ USBi_UI|USBi_UEI|USBi_PCI|USBi_URI);
} else {
hw_write(ci, OP_USBINTR, ~0, 0);
}
@@ -877,6 +877,7 @@ __releases(ci->lock)
__acquires(ci->lock)
{
int retval;
+ u32 intr;
spin_unlock(&ci->lock);
if (ci->gadget.speed != USB_SPEED_UNKNOWN)
@@ -890,6 +891,11 @@ __acquires(ci->lock)
if (retval)
goto done;
+ /* clear SLI */
+ hw_write(ci, OP_USBSTS, USBi_SLI, USBi_SLI);
+ intr = hw_read(ci, OP_USBINTR, ~0);
+ hw_write(ci, OP_USBINTR, ~0, intr | USBi_SLI);
+
ci->status = usb_ep_alloc_request(&ci->ep0in->ep, GFP_ATOMIC);
if (ci->status == NULL)
retval = -ENOMEM;
diff --git a/drivers/usb/class/cdc-acm.c b/drivers/usb/class/cdc-acm.c
index 0c1b69d944ca..605fea461102 100644
--- a/drivers/usb/class/cdc-acm.c
+++ b/drivers/usb/class/cdc-acm.c
@@ -962,10 +962,12 @@ static int get_serial_info(struct tty_struct *tty, struct serial_struct *ss)
struct acm *acm = tty->driver_data;
ss->line = acm->minor;
+ mutex_lock(&acm->port.mutex);
ss->close_delay = jiffies_to_msecs(acm->port.close_delay) / 10;
ss->closing_wait = acm->port.closing_wait == ASYNC_CLOSING_WAIT_NONE ?
ASYNC_CLOSING_WAIT_NONE :
jiffies_to_msecs(acm->port.closing_wait) / 10;
+ mutex_unlock(&acm->port.mutex);
return 0;
}
diff --git a/drivers/usb/class/usbtmc.c b/drivers/usb/class/usbtmc.c
index 6bd9fe565385..34e46ef308ab 100644
--- a/drivers/usb/class/usbtmc.c
+++ b/drivers/usb/class/usbtmc.c
@@ -754,7 +754,7 @@ static struct urb *usbtmc_create_urb(void)
if (!urb)
return NULL;
- dmabuf = kmalloc(bufsize, GFP_KERNEL);
+ dmabuf = kzalloc(bufsize, GFP_KERNEL);
if (!dmabuf) {
usb_free_urb(urb);
return NULL;
diff --git a/drivers/usb/common/common.c b/drivers/usb/common/common.c
index 59b55d6cf490..b7bea1015d7c 100644
--- a/drivers/usb/common/common.c
+++ b/drivers/usb/common/common.c
@@ -107,19 +107,18 @@ EXPORT_SYMBOL_GPL(usb_speed_string);
*/
enum usb_device_speed usb_get_maximum_speed(struct device *dev)
{
- const char *maximum_speed;
+ const char *p = "maximum-speed";
int ret;
- ret = device_property_read_string(dev, "maximum-speed", &maximum_speed);
- if (ret < 0)
- return USB_SPEED_UNKNOWN;
-
- ret = match_string(ssp_rate, ARRAY_SIZE(ssp_rate), maximum_speed);
+ ret = device_property_match_property_string(dev, p, ssp_rate, ARRAY_SIZE(ssp_rate));
if (ret > 0)
return USB_SPEED_SUPER_PLUS;
- ret = match_string(speed_names, ARRAY_SIZE(speed_names), maximum_speed);
- return (ret < 0) ? USB_SPEED_UNKNOWN : ret;
+ ret = device_property_match_property_string(dev, p, speed_names, ARRAY_SIZE(speed_names));
+ if (ret > 0)
+ return ret;
+
+ return USB_SPEED_UNKNOWN;
}
EXPORT_SYMBOL_GPL(usb_get_maximum_speed);
@@ -276,14 +275,13 @@ EXPORT_SYMBOL_GPL(usb_decode_interval);
*/
enum usb_dr_mode of_usb_get_dr_mode_by_phy(struct device_node *np, int arg0)
{
- struct device_node *controller = NULL;
+ struct device_node *controller;
struct of_phandle_args args;
const char *dr_mode;
int index;
int err;
- do {
- controller = of_find_node_with_property(controller, "phys");
+ for_each_node_with_property(controller, "phys") {
if (!of_device_is_available(controller))
continue;
index = 0;
@@ -306,7 +304,7 @@ enum usb_dr_mode of_usb_get_dr_mode_by_phy(struct device_node *np, int arg0)
goto finish;
index++;
} while (args.np);
- } while (controller);
+ }
finish:
err = of_property_read_string(controller, "dr_mode", &dr_mode);
diff --git a/drivers/usb/core/usb-acpi.c b/drivers/usb/core/usb-acpi.c
index 7f8a912d4fe2..21585ed89ef8 100644
--- a/drivers/usb/core/usb-acpi.c
+++ b/drivers/usb/core/usb-acpi.c
@@ -142,6 +142,53 @@ int usb_acpi_set_power_state(struct usb_device *hdev, int index, bool enable)
}
EXPORT_SYMBOL_GPL(usb_acpi_set_power_state);
+/**
+ * usb_acpi_add_usb4_devlink - add device link to USB4 Host Interface for tunneled USB3 devices
+ *
+ * @udev: Tunneled USB3 device connected to a roothub.
+ *
+ * Adds a device link between a tunneled USB3 device and the USB4 Host Interface
+ * device to ensure correct runtime PM suspend and resume order. This function
+ * should only be called for tunneled USB3 devices.
+ * The USB4 Host Interface this tunneled device depends on is found from the roothub
+ * port ACPI device specific data _DSD entry.
+ *
+ * Return: negative error code on failure, 0 otherwise
+ */
+static int usb_acpi_add_usb4_devlink(struct usb_device *udev)
+{
+ const struct device_link *link;
+ struct usb_port *port_dev;
+ struct usb_hub *hub;
+
+ if (!udev->parent || udev->parent->parent)
+ return 0;
+
+ hub = usb_hub_to_struct_hub(udev->parent);
+ port_dev = hub->ports[udev->portnum - 1];
+
+ struct fwnode_handle *nhi_fwnode __free(fwnode_handle) =
+ fwnode_find_reference(dev_fwnode(&port_dev->dev), "usb4-host-interface", 0);
+
+ if (IS_ERR(nhi_fwnode))
+ return 0;
+
+ link = device_link_add(&port_dev->child->dev, nhi_fwnode->dev,
+ DL_FLAG_AUTOREMOVE_CONSUMER |
+ DL_FLAG_RPM_ACTIVE |
+ DL_FLAG_PM_RUNTIME);
+ if (!link) {
+ dev_err(&port_dev->dev, "Failed to created device link from %s to %s\n",
+ dev_name(&port_dev->child->dev), dev_name(nhi_fwnode->dev));
+ return -EINVAL;
+ }
+
+ dev_dbg(&port_dev->dev, "Created device link from %s to %s\n",
+ dev_name(&port_dev->child->dev), dev_name(nhi_fwnode->dev));
+
+ return 0;
+}
+
/*
* Private to usb-acpi, all the core needs to know is that
* port_dev->location is non-zero when it has been set by the firmware.
@@ -262,6 +309,12 @@ usb_acpi_find_companion_for_device(struct usb_device *udev)
if (!hub)
return NULL;
+
+ /* Tunneled USB3 devices depend on USB4 Host Interface, set device link to it */
+ if (udev->speed >= USB_SPEED_SUPER &&
+ udev->tunnel_mode != USB_LINK_NATIVE)
+ usb_acpi_add_usb4_devlink(udev);
+
/*
* This is an embedded USB device connected to a port and such
* devices share port's ACPI companion.
diff --git a/drivers/usb/dwc2/debugfs.c b/drivers/usb/dwc2/debugfs.c
index 7c82ab590401..3116ac72747f 100644
--- a/drivers/usb/dwc2/debugfs.c
+++ b/drivers/usb/dwc2/debugfs.c
@@ -702,6 +702,7 @@ static int params_show(struct seq_file *seq, void *v)
print_param(seq, p, uframe_sched);
print_param(seq, p, external_id_pin_ctl);
print_param(seq, p, power_down);
+ print_param(seq, p, no_clock_gating);
print_param(seq, p, lpm);
print_param(seq, p, lpm_clock_gating);
print_param(seq, p, besl);
diff --git a/drivers/usb/dwc2/drd.c b/drivers/usb/dwc2/drd.c
index a8605b02115b..1ad8fa3f862a 100644
--- a/drivers/usb/dwc2/drd.c
+++ b/drivers/usb/dwc2/drd.c
@@ -127,6 +127,15 @@ static int dwc2_drd_role_sw_set(struct usb_role_switch *sw, enum usb_role role)
role = USB_ROLE_DEVICE;
}
+ if ((IS_ENABLED(CONFIG_USB_DWC2_PERIPHERAL) ||
+ IS_ENABLED(CONFIG_USB_DWC2_DUAL_ROLE)) &&
+ dwc2_is_device_mode(hsotg) &&
+ hsotg->lx_state == DWC2_L2 &&
+ hsotg->params.power_down == DWC2_POWER_DOWN_PARAM_NONE &&
+ hsotg->bus_suspended &&
+ !hsotg->params.no_clock_gating)
+ dwc2_gadget_exit_clock_gating(hsotg, 0);
+
if (role == USB_ROLE_HOST) {
already = dwc2_ovr_avalid(hsotg, true);
} else if (role == USB_ROLE_DEVICE) {
diff --git a/drivers/usb/dwc2/params.c b/drivers/usb/dwc2/params.c
index a937eadbc9b3..68226defdc60 100644
--- a/drivers/usb/dwc2/params.c
+++ b/drivers/usb/dwc2/params.c
@@ -23,6 +23,7 @@ static void dwc2_set_bcm_params(struct dwc2_hsotg *hsotg)
p->max_transfer_size = 65535;
p->max_packet_count = 511;
p->ahbcfg = 0x10;
+ p->no_clock_gating = true;
}
static void dwc2_set_his_params(struct dwc2_hsotg *hsotg)
@@ -352,6 +353,7 @@ const struct of_device_id dwc2_of_match_table[] = {
MODULE_DEVICE_TABLE(of, dwc2_of_match_table);
const struct acpi_device_id dwc2_acpi_match[] = {
+ /* This ID refers to the same USB IP as of_device_id brcm,bcm2835-usb */
{ "BCM2848", (kernel_ulong_t)dwc2_set_bcm_params },
{ },
};
diff --git a/drivers/usb/dwc2/platform.c b/drivers/usb/dwc2/platform.c
index 7b84416dfc2b..c1b7209b9483 100644
--- a/drivers/usb/dwc2/platform.c
+++ b/drivers/usb/dwc2/platform.c
@@ -469,18 +469,6 @@ static int dwc2_driver_probe(struct platform_device *dev)
spin_lock_init(&hsotg->lock);
- hsotg->irq = platform_get_irq(dev, 0);
- if (hsotg->irq < 0)
- return hsotg->irq;
-
- dev_dbg(hsotg->dev, "registering common handler for irq%d\n",
- hsotg->irq);
- retval = devm_request_irq(hsotg->dev, hsotg->irq,
- dwc2_handle_common_intr, IRQF_SHARED,
- dev_name(hsotg->dev), hsotg);
- if (retval)
- return retval;
-
hsotg->vbus_supply = devm_regulator_get_optional(hsotg->dev, "vbus");
if (IS_ERR(hsotg->vbus_supply)) {
retval = PTR_ERR(hsotg->vbus_supply);
@@ -524,6 +512,20 @@ static int dwc2_driver_probe(struct platform_device *dev)
if (retval)
goto error;
+ hsotg->irq = platform_get_irq(dev, 0);
+ if (hsotg->irq < 0) {
+ retval = hsotg->irq;
+ goto error;
+ }
+
+ dev_dbg(hsotg->dev, "registering common handler for irq%d\n",
+ hsotg->irq);
+ retval = devm_request_irq(hsotg->dev, hsotg->irq,
+ dwc2_handle_common_intr, IRQF_SHARED,
+ dev_name(hsotg->dev), hsotg);
+ if (retval)
+ goto error;
+
/*
* For OTG cores, set the force mode bits to reflect the value
* of dr_mode. Force mode bits should not be touched at any
diff --git a/drivers/usb/dwc3/dwc3-imx8mp.c b/drivers/usb/dwc3/dwc3-imx8mp.c
index 8ee448068503..64c0cd1995aa 100644
--- a/drivers/usb/dwc3/dwc3-imx8mp.c
+++ b/drivers/usb/dwc3/dwc3-imx8mp.c
@@ -5,6 +5,7 @@
* Copyright (c) 2020 NXP.
*/
+#include <linux/cleanup.h>
#include <linux/clk.h>
#include <linux/interrupt.h>
#include <linux/io.h>
@@ -96,7 +97,8 @@ static void imx8mp_configure_glue(struct dwc3_imx8mp *dwc3_imx)
writel(value, dwc3_imx->glue_base + USB_CTRL1);
}
-static void dwc3_imx8mp_wakeup_enable(struct dwc3_imx8mp *dwc3_imx)
+static void dwc3_imx8mp_wakeup_enable(struct dwc3_imx8mp *dwc3_imx,
+ pm_message_t msg)
{
struct dwc3 *dwc3 = platform_get_drvdata(dwc3_imx->dwc3);
u32 val;
@@ -106,12 +108,14 @@ static void dwc3_imx8mp_wakeup_enable(struct dwc3_imx8mp *dwc3_imx)
val = readl(dwc3_imx->hsio_blk_base + USB_WAKEUP_CTRL);
- if ((dwc3->current_dr_role == DWC3_GCTL_PRTCAP_HOST) && dwc3->xhci)
- val |= USB_WAKEUP_EN | USB_WAKEUP_SS_CONN |
- USB_WAKEUP_U3_EN | USB_WAKEUP_DPDM_EN;
- else if (dwc3->current_dr_role == DWC3_GCTL_PRTCAP_DEVICE)
+ if ((dwc3->current_dr_role == DWC3_GCTL_PRTCAP_HOST) && dwc3->xhci) {
+ val |= USB_WAKEUP_EN | USB_WAKEUP_DPDM_EN;
+ if (PMSG_IS_AUTO(msg))
+ val |= USB_WAKEUP_SS_CONN | USB_WAKEUP_U3_EN;
+ } else {
val |= USB_WAKEUP_EN | USB_WAKEUP_VBUS_EN |
USB_WAKEUP_VBUS_SRC_SESS_VAL;
+ }
writel(val, dwc3_imx->hsio_blk_base + USB_WAKEUP_CTRL);
}
@@ -144,10 +148,21 @@ static irqreturn_t dwc3_imx8mp_interrupt(int irq, void *_dwc3_imx)
return IRQ_HANDLED;
}
+static int dwc3_imx8mp_set_software_node(struct device *dev)
+{
+ struct property_entry props[3] = { 0 };
+ int prop_idx = 0;
+
+ props[prop_idx++] = PROPERTY_ENTRY_BOOL("xhci-missing-cas-quirk");
+ props[prop_idx++] = PROPERTY_ENTRY_BOOL("xhci-skip-phy-init-quirk");
+
+ return device_create_managed_software_node(dev, props, NULL);
+}
+
static int dwc3_imx8mp_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
- struct device_node *dwc3_np, *node = dev->of_node;
+ struct device_node *node = dev->of_node;
struct dwc3_imx8mp *dwc3_imx;
struct resource *res;
int err, irq;
@@ -178,39 +193,26 @@ static int dwc3_imx8mp_probe(struct platform_device *pdev)
return PTR_ERR(dwc3_imx->glue_base);
}
- dwc3_imx->hsio_clk = devm_clk_get(dev, "hsio");
- if (IS_ERR(dwc3_imx->hsio_clk)) {
- err = PTR_ERR(dwc3_imx->hsio_clk);
- dev_err(dev, "Failed to get hsio clk, err=%d\n", err);
- return err;
- }
-
- err = clk_prepare_enable(dwc3_imx->hsio_clk);
- if (err) {
- dev_err(dev, "Failed to enable hsio clk, err=%d\n", err);
- return err;
- }
-
- dwc3_imx->suspend_clk = devm_clk_get(dev, "suspend");
- if (IS_ERR(dwc3_imx->suspend_clk)) {
- err = PTR_ERR(dwc3_imx->suspend_clk);
- dev_err(dev, "Failed to get suspend clk, err=%d\n", err);
- goto disable_hsio_clk;
- }
+ dwc3_imx->hsio_clk = devm_clk_get_enabled(dev, "hsio");
+ if (IS_ERR(dwc3_imx->hsio_clk))
+ return dev_err_probe(dev, PTR_ERR(dwc3_imx->hsio_clk),
+ "Failed to get hsio clk\n");
- err = clk_prepare_enable(dwc3_imx->suspend_clk);
- if (err) {
- dev_err(dev, "Failed to enable suspend clk, err=%d\n", err);
- goto disable_hsio_clk;
- }
+ dwc3_imx->suspend_clk = devm_clk_get_enabled(dev, "suspend");
+ if (IS_ERR(dwc3_imx->suspend_clk))
+ return dev_err_probe(dev, PTR_ERR(dwc3_imx->suspend_clk),
+ "Failed to get suspend clk\n");
irq = platform_get_irq(pdev, 0);
- if (irq < 0) {
- err = irq;
- goto disable_clks;
- }
+ if (irq < 0)
+ return irq;
dwc3_imx->irq = irq;
+ struct device_node *dwc3_np __free(device_node) = of_get_compatible_child(node,
+ "snps,dwc3");
+ if (!dwc3_np)
+ return dev_err_probe(dev, -ENODEV, "failed to find dwc3 core child\n");
+
imx8mp_configure_glue(dwc3_imx);
pm_runtime_set_active(dev);
@@ -219,17 +221,17 @@ static int dwc3_imx8mp_probe(struct platform_device *pdev)
if (err < 0)
goto disable_rpm;
- dwc3_np = of_get_compatible_child(node, "snps,dwc3");
- if (!dwc3_np) {
+ err = dwc3_imx8mp_set_software_node(dev);
+ if (err) {
err = -ENODEV;
- dev_err(dev, "failed to find dwc3 core child\n");
+ dev_err(dev, "failed to create software node\n");
goto disable_rpm;
}
err = of_platform_populate(node, NULL, NULL, dev);
if (err) {
dev_err(&pdev->dev, "failed to create dwc3 core\n");
- goto err_node_put;
+ goto disable_rpm;
}
dwc3_imx->dwc3 = of_find_device_by_node(dwc3_np);
@@ -238,7 +240,6 @@ static int dwc3_imx8mp_probe(struct platform_device *pdev)
err = -ENODEV;
goto depopulate;
}
- of_node_put(dwc3_np);
err = devm_request_threaded_irq(dev, irq, NULL, dwc3_imx8mp_interrupt,
IRQF_ONESHOT, dev_name(dev), dwc3_imx);
@@ -254,51 +255,39 @@ static int dwc3_imx8mp_probe(struct platform_device *pdev)
depopulate:
of_platform_depopulate(dev);
-err_node_put:
- of_node_put(dwc3_np);
disable_rpm:
pm_runtime_disable(dev);
pm_runtime_put_noidle(dev);
-disable_clks:
- clk_disable_unprepare(dwc3_imx->suspend_clk);
-disable_hsio_clk:
- clk_disable_unprepare(dwc3_imx->hsio_clk);
return err;
}
static void dwc3_imx8mp_remove(struct platform_device *pdev)
{
- struct dwc3_imx8mp *dwc3_imx = platform_get_drvdata(pdev);
struct device *dev = &pdev->dev;
pm_runtime_get_sync(dev);
of_platform_depopulate(dev);
- clk_disable_unprepare(dwc3_imx->suspend_clk);
- clk_disable_unprepare(dwc3_imx->hsio_clk);
-
pm_runtime_disable(dev);
pm_runtime_put_noidle(dev);
}
-static int __maybe_unused dwc3_imx8mp_suspend(struct dwc3_imx8mp *dwc3_imx,
- pm_message_t msg)
+static int dwc3_imx8mp_suspend(struct dwc3_imx8mp *dwc3_imx, pm_message_t msg)
{
if (dwc3_imx->pm_suspended)
return 0;
/* Wakeup enable */
if (PMSG_IS_AUTO(msg) || device_may_wakeup(dwc3_imx->dev))
- dwc3_imx8mp_wakeup_enable(dwc3_imx);
+ dwc3_imx8mp_wakeup_enable(dwc3_imx, msg);
dwc3_imx->pm_suspended = true;
return 0;
}
-static int __maybe_unused dwc3_imx8mp_resume(struct dwc3_imx8mp *dwc3_imx,
- pm_message_t msg)
+static int dwc3_imx8mp_resume(struct dwc3_imx8mp *dwc3_imx, pm_message_t msg)
{
struct dwc3 *dwc = platform_get_drvdata(dwc3_imx->dwc3);
int ret = 0;
@@ -331,7 +320,7 @@ static int __maybe_unused dwc3_imx8mp_resume(struct dwc3_imx8mp *dwc3_imx,
return ret;
}
-static int __maybe_unused dwc3_imx8mp_pm_suspend(struct device *dev)
+static int dwc3_imx8mp_pm_suspend(struct device *dev)
{
struct dwc3_imx8mp *dwc3_imx = dev_get_drvdata(dev);
int ret;
@@ -349,7 +338,7 @@ static int __maybe_unused dwc3_imx8mp_pm_suspend(struct device *dev)
return ret;
}
-static int __maybe_unused dwc3_imx8mp_pm_resume(struct device *dev)
+static int dwc3_imx8mp_pm_resume(struct device *dev)
{
struct dwc3_imx8mp *dwc3_imx = dev_get_drvdata(dev);
int ret;
@@ -379,7 +368,7 @@ static int __maybe_unused dwc3_imx8mp_pm_resume(struct device *dev)
return ret;
}
-static int __maybe_unused dwc3_imx8mp_runtime_suspend(struct device *dev)
+static int dwc3_imx8mp_runtime_suspend(struct device *dev)
{
struct dwc3_imx8mp *dwc3_imx = dev_get_drvdata(dev);
@@ -388,7 +377,7 @@ static int __maybe_unused dwc3_imx8mp_runtime_suspend(struct device *dev)
return dwc3_imx8mp_suspend(dwc3_imx, PMSG_AUTO_SUSPEND);
}
-static int __maybe_unused dwc3_imx8mp_runtime_resume(struct device *dev)
+static int dwc3_imx8mp_runtime_resume(struct device *dev)
{
struct dwc3_imx8mp *dwc3_imx = dev_get_drvdata(dev);
@@ -398,9 +387,9 @@ static int __maybe_unused dwc3_imx8mp_runtime_resume(struct device *dev)
}
static const struct dev_pm_ops dwc3_imx8mp_dev_pm_ops = {
- SET_SYSTEM_SLEEP_PM_OPS(dwc3_imx8mp_pm_suspend, dwc3_imx8mp_pm_resume)
- SET_RUNTIME_PM_OPS(dwc3_imx8mp_runtime_suspend,
- dwc3_imx8mp_runtime_resume, NULL)
+ SYSTEM_SLEEP_PM_OPS(dwc3_imx8mp_pm_suspend, dwc3_imx8mp_pm_resume)
+ RUNTIME_PM_OPS(dwc3_imx8mp_runtime_suspend, dwc3_imx8mp_runtime_resume,
+ NULL)
};
static const struct of_device_id dwc3_imx8mp_of_match[] = {
@@ -414,7 +403,7 @@ static struct platform_driver dwc3_imx8mp_driver = {
.remove_new = dwc3_imx8mp_remove,
.driver = {
.name = "imx8mp-dwc3",
- .pm = &dwc3_imx8mp_dev_pm_ops,
+ .pm = pm_ptr(&dwc3_imx8mp_dev_pm_ops),
.of_match_table = dwc3_imx8mp_of_match,
},
};
diff --git a/drivers/usb/dwc3/dwc3-octeon.c b/drivers/usb/dwc3/dwc3-octeon.c
index 6010135e1acc..1a3b205367fd 100644
--- a/drivers/usb/dwc3/dwc3-octeon.c
+++ b/drivers/usb/dwc3/dwc3-octeon.c
@@ -419,7 +419,7 @@ static int dwc3_octeon_probe(struct platform_device *pdev)
int ref_clk_sel, ref_clk_fsel, mpll_mul;
int power_active_low, power_gpio;
int err, len;
- u32 clock_rate;
+ u32 clock_rate, gpio_pwr[3];
if (of_property_read_u32(node, "refclk-frequency", &clock_rate)) {
dev_err(dev, "No UCTL \"refclk-frequency\"\n");
@@ -476,21 +476,10 @@ static int dwc3_octeon_probe(struct platform_device *pdev)
power_gpio = DWC3_GPIO_POWER_NONE;
power_active_low = 0;
- if (of_find_property(node, "power", &len)) {
- u32 gpio_pwr[3];
-
- switch (len) {
- case 8:
- of_property_read_u32_array(node, "power", gpio_pwr, 2);
- break;
- case 12:
- of_property_read_u32_array(node, "power", gpio_pwr, 3);
+ len = of_property_read_variable_u32_array(node, "power", gpio_pwr, 2, 3);
+ if (len > 0) {
+ if (len == 3)
power_active_low = gpio_pwr[2] & 0x01;
- break;
- default:
- dev_err(dev, "invalid power configuration\n");
- return -EINVAL;
- }
power_gpio = gpio_pwr[1];
}
diff --git a/drivers/usb/dwc3/dwc3-qcom.c b/drivers/usb/dwc3/dwc3-qcom.c
index 88fb6706a18d..c1d4b52f25b0 100644
--- a/drivers/usb/dwc3/dwc3-qcom.c
+++ b/drivers/usb/dwc3/dwc3-qcom.c
@@ -4,6 +4,7 @@
* Inspired by dwc3-of-simple.c
*/
+#include <linux/cleanup.h>
#include <linux/io.h>
#include <linux/of.h>
#include <linux/clk.h>
@@ -702,11 +703,12 @@ static int dwc3_qcom_clk_init(struct dwc3_qcom *qcom, int count)
static int dwc3_qcom_of_register_core(struct platform_device *pdev)
{
struct dwc3_qcom *qcom = platform_get_drvdata(pdev);
- struct device_node *np = pdev->dev.of_node, *dwc3_np;
+ struct device_node *np = pdev->dev.of_node;
struct device *dev = &pdev->dev;
int ret;
- dwc3_np = of_get_compatible_child(np, "snps,dwc3");
+ struct device_node *dwc3_np __free(device_node) = of_get_compatible_child(np,
+ "snps,dwc3");
if (!dwc3_np) {
dev_err(dev, "failed to find dwc3 core child\n");
return -ENODEV;
@@ -715,7 +717,7 @@ static int dwc3_qcom_of_register_core(struct platform_device *pdev)
ret = of_platform_populate(np, NULL, NULL, dev);
if (ret) {
dev_err(dev, "failed to register dwc3 core - %d\n", ret);
- goto node_put;
+ return ret;
}
qcom->dwc3 = of_find_device_by_node(dwc3_np);
@@ -725,9 +727,6 @@ static int dwc3_qcom_of_register_core(struct platform_device *pdev)
of_platform_depopulate(dev);
}
-node_put:
- of_node_put(dwc3_np);
-
return ret;
}
@@ -736,7 +735,6 @@ static int dwc3_qcom_probe(struct platform_device *pdev)
struct device_node *np = pdev->dev.of_node;
struct device *dev = &pdev->dev;
struct dwc3_qcom *qcom;
- struct resource *res;
int ret, i;
bool ignore_pipe_clk;
bool wakeup_source;
@@ -774,9 +772,7 @@ static int dwc3_qcom_probe(struct platform_device *pdev)
goto reset_assert;
}
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
-
- qcom->qscratch_base = devm_ioremap_resource(dev, res);
+ qcom->qscratch_base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(qcom->qscratch_base)) {
ret = PTR_ERR(qcom->qscratch_base);
goto clk_disable;
diff --git a/drivers/usb/dwc3/dwc3-rtk.c b/drivers/usb/dwc3/dwc3-rtk.c
index 3cd6b184551c..e9c8b032c72c 100644
--- a/drivers/usb/dwc3/dwc3-rtk.c
+++ b/drivers/usb/dwc3/dwc3-rtk.c
@@ -6,6 +6,7 @@
*
*/
+#include <linux/cleanup.h>
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/platform_device.h>
@@ -173,23 +174,20 @@ static const char *const speed_names[] = {
static enum usb_device_speed __get_dwc3_maximum_speed(struct device_node *np)
{
- struct device_node *dwc3_np;
const char *maximum_speed;
int ret;
- dwc3_np = of_get_compatible_child(np, "snps,dwc3");
+ struct device_node *dwc3_np __free(device_node) = of_get_compatible_child(np,
+ "snps,dwc3");
if (!dwc3_np)
return USB_SPEED_UNKNOWN;
ret = of_property_read_string(dwc3_np, "maximum-speed", &maximum_speed);
if (ret < 0)
- goto out;
+ return USB_SPEED_UNKNOWN;
ret = match_string(speed_names, ARRAY_SIZE(speed_names), maximum_speed);
-out:
- of_node_put(dwc3_np);
-
return (ret < 0) ? USB_SPEED_UNKNOWN : ret;
}
@@ -276,7 +274,6 @@ static int dwc3_rtk_probe_dwc3_core(struct dwc3_rtk *rtk)
struct device_node *node = dev->of_node;
struct platform_device *dwc3_pdev;
struct device *dwc3_dev;
- struct device_node *dwc3_node;
enum usb_dr_mode dr_mode;
int ret = 0;
@@ -290,7 +287,8 @@ static int dwc3_rtk_probe_dwc3_core(struct dwc3_rtk *rtk)
return ret;
}
- dwc3_node = of_get_compatible_child(node, "snps,dwc3");
+ struct device_node *dwc3_node __free(device_node) = of_get_compatible_child(node,
+ "snps,dwc3");
if (!dwc3_node) {
dev_err(dev, "failed to find dwc3 core node\n");
ret = -ENODEV;
@@ -301,7 +299,7 @@ static int dwc3_rtk_probe_dwc3_core(struct dwc3_rtk *rtk)
if (!dwc3_pdev) {
dev_err(dev, "failed to find dwc3 core platform_device\n");
ret = -ENODEV;
- goto err_node_put;
+ goto depopulate;
}
dwc3_dev = &dwc3_pdev->dev;
@@ -343,14 +341,11 @@ static int dwc3_rtk_probe_dwc3_core(struct dwc3_rtk *rtk)
switch_usb2_role(rtk, rtk->cur_role);
platform_device_put(dwc3_pdev);
- of_node_put(dwc3_node);
return 0;
err_pdev_put:
platform_device_put(dwc3_pdev);
-err_node_put:
- of_node_put(dwc3_node);
depopulate:
of_platform_depopulate(dev);
@@ -363,30 +358,18 @@ static int dwc3_rtk_probe(struct platform_device *pdev)
struct device *dev = &pdev->dev;
struct resource *res;
void __iomem *regs;
- int ret = 0;
rtk = devm_kzalloc(dev, sizeof(*rtk), GFP_KERNEL);
- if (!rtk) {
- ret = -ENOMEM;
- goto out;
- }
+ if (!rtk)
+ return -ENOMEM;
platform_set_drvdata(pdev, rtk);
rtk->dev = dev;
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- if (!res) {
- dev_err(dev, "missing memory resource\n");
- ret = -ENODEV;
- goto out;
- }
-
- regs = devm_ioremap_resource(dev, res);
- if (IS_ERR(regs)) {
- ret = PTR_ERR(regs);
- goto out;
- }
+ regs = devm_platform_get_and_ioremap_resource(pdev, 0, &res);
+ if (IS_ERR(regs))
+ return PTR_ERR(regs);
rtk->regs = regs;
rtk->regs_size = resource_size(res);
@@ -394,16 +377,11 @@ static int dwc3_rtk_probe(struct platform_device *pdev)
res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
if (res) {
rtk->pm_base = devm_ioremap_resource(dev, res);
- if (IS_ERR(rtk->pm_base)) {
- ret = PTR_ERR(rtk->pm_base);
- goto out;
- }
+ if (IS_ERR(rtk->pm_base))
+ return PTR_ERR(rtk->pm_base);
}
- ret = dwc3_rtk_probe_dwc3_core(rtk);
-
-out:
- return ret;
+ return dwc3_rtk_probe_dwc3_core(rtk);
}
static void dwc3_rtk_remove(struct platform_device *pdev)
diff --git a/drivers/usb/dwc3/dwc3-st.c b/drivers/usb/dwc3/dwc3-st.c
index c8c7cd0c1796..2841021f3557 100644
--- a/drivers/usb/dwc3/dwc3-st.c
+++ b/drivers/usb/dwc3/dwc3-st.c
@@ -14,6 +14,7 @@
* Inspired by dwc3-omap.c and dwc3-exynos.c.
*/
+#include <linux/cleanup.h>
#include <linux/delay.h>
#include <linux/interrupt.h>
#include <linux/io.h>
@@ -197,7 +198,7 @@ static int st_dwc3_probe(struct platform_device *pdev)
struct st_dwc3 *dwc3_data;
struct resource *res;
struct device *dev = &pdev->dev;
- struct device_node *node = dev->of_node, *child;
+ struct device_node *node = dev->of_node;
struct platform_device *child_pdev;
struct regmap *regmap;
int ret;
@@ -224,15 +225,21 @@ static int st_dwc3_probe(struct platform_device *pdev)
dwc3_data->syscfg_reg_off = res->start;
- dev_vdbg(&pdev->dev, "glue-logic addr 0x%pK, syscfg-reg offset 0x%x\n",
+ dev_vdbg(dev, "glue-logic addr 0x%pK, syscfg-reg offset 0x%x\n",
dwc3_data->glue_base, dwc3_data->syscfg_reg_off);
+ struct device_node *child __free(device_node) = of_get_compatible_child(node,
+ "snps,dwc3");
+ if (!child) {
+ dev_err(dev, "failed to find dwc3 core node\n");
+ return -ENODEV;
+ }
+
dwc3_data->rstc_pwrdn =
devm_reset_control_get_exclusive(dev, "powerdown");
- if (IS_ERR(dwc3_data->rstc_pwrdn)) {
- dev_err(&pdev->dev, "could not get power controller\n");
- return PTR_ERR(dwc3_data->rstc_pwrdn);
- }
+ if (IS_ERR(dwc3_data->rstc_pwrdn))
+ return dev_err_probe(dev, PTR_ERR(dwc3_data->rstc_pwrdn),
+ "could not get power controller\n");
/* Manage PowerDown */
reset_control_deassert(dwc3_data->rstc_pwrdn);
@@ -240,26 +247,19 @@ static int st_dwc3_probe(struct platform_device *pdev)
dwc3_data->rstc_rst =
devm_reset_control_get_shared(dev, "softreset");
if (IS_ERR(dwc3_data->rstc_rst)) {
- dev_err(&pdev->dev, "could not get reset controller\n");
- ret = PTR_ERR(dwc3_data->rstc_rst);
+ ret = dev_err_probe(dev, PTR_ERR(dwc3_data->rstc_rst),
+ "could not get reset controller\n");
goto undo_powerdown;
}
/* Manage SoftReset */
reset_control_deassert(dwc3_data->rstc_rst);
- child = of_get_compatible_child(node, "snps,dwc3");
- if (!child) {
- dev_err(&pdev->dev, "failed to find dwc3 core node\n");
- ret = -ENODEV;
- goto err_node_put;
- }
-
/* Allocate and initialize the core */
ret = of_platform_populate(node, NULL, NULL, dev);
if (ret) {
dev_err(dev, "failed to add dwc3 core\n");
- goto err_node_put;
+ goto undo_softreset;
}
child_pdev = of_find_device_by_node(child);
@@ -270,7 +270,6 @@ static int st_dwc3_probe(struct platform_device *pdev)
}
dwc3_data->dr_mode = usb_get_dr_mode(&child_pdev->dev);
- of_node_put(child);
platform_device_put(child_pdev);
/*
@@ -282,8 +281,7 @@ static int st_dwc3_probe(struct platform_device *pdev)
ret = st_dwc3_drd_init(dwc3_data);
if (ret) {
dev_err(dev, "drd initialisation failed\n");
- of_platform_depopulate(dev);
- goto undo_softreset;
+ goto depopulate;
}
/* ST glue logic init */
@@ -294,8 +292,6 @@ static int st_dwc3_probe(struct platform_device *pdev)
depopulate:
of_platform_depopulate(dev);
-err_node_put:
- of_node_put(child);
undo_softreset:
reset_control_assert(dwc3_data->rstc_rst);
undo_powerdown:
diff --git a/drivers/usb/dwc3/dwc3-xilinx.c b/drivers/usb/dwc3/dwc3-xilinx.c
index f1298b1b4f84..b5e5be424ce9 100644
--- a/drivers/usb/dwc3/dwc3-xilinx.c
+++ b/drivers/usb/dwc3/dwc3-xilinx.c
@@ -285,11 +285,8 @@ static int dwc3_xlnx_probe(struct platform_device *pdev)
return -ENOMEM;
regs = devm_platform_ioremap_resource(pdev, 0);
- if (IS_ERR(regs)) {
- ret = PTR_ERR(regs);
- dev_err_probe(dev, ret, "failed to map registers\n");
- return ret;
- }
+ if (IS_ERR(regs))
+ return dev_err_probe(dev, PTR_ERR(regs), "failed to map registers\n");
match = of_match_node(dwc3_xlnx_of_match, pdev->dev.of_node);
diff --git a/drivers/usb/gadget/configfs.c b/drivers/usb/gadget/configfs.c
index 0e7c1e947c0a..c82a6a0fba93 100644
--- a/drivers/usb/gadget/configfs.c
+++ b/drivers/usb/gadget/configfs.c
@@ -6,13 +6,13 @@
#include <linux/kstrtox.h>
#include <linux/nls.h>
#include <linux/usb/composite.h>
+#include <linux/usb/func_utils.h>
#include <linux/usb/gadget_configfs.h>
#include <linux/usb/webusb.h>
#include "configfs.h"
-#include "u_f.h"
#include "u_os_desc.h"
-int check_user_usb_string(const char *name,
+static int check_user_usb_string(const char *name,
struct usb_gadget_strings *stringtab_dev)
{
u16 num;
@@ -902,7 +902,7 @@ static struct configfs_group_operations gadget_language_langid_group_ops = {
.drop_item = gadget_language_string_drop,
};
-static struct config_item_type gadget_language_type = {
+static const struct config_item_type gadget_language_type = {
.ct_item_ops = &gadget_language_langid_item_ops,
.ct_group_ops = &gadget_language_langid_group_ops,
.ct_attrs = gadget_language_langid_attrs,
@@ -961,7 +961,7 @@ static struct configfs_group_operations gadget_language_group_ops = {
.drop_item = &gadget_language_drop,
};
-static struct config_item_type gadget_language_strings_type = {
+static const struct config_item_type gadget_language_strings_type = {
.ct_group_ops = &gadget_language_group_ops,
.ct_owner = THIS_MODULE,
};
@@ -1106,7 +1106,7 @@ static struct configfs_attribute *webusb_attrs[] = {
NULL,
};
-static struct config_item_type webusb_type = {
+static const struct config_item_type webusb_type = {
.ct_attrs = webusb_attrs,
.ct_owner = THIS_MODULE,
};
@@ -1263,7 +1263,7 @@ static struct configfs_item_operations os_desc_ops = {
.drop_link = os_desc_unlink,
};
-static struct config_item_type os_desc_type = {
+static const struct config_item_type os_desc_type = {
.ct_item_ops = &os_desc_ops,
.ct_attrs = os_desc_attrs,
.ct_owner = THIS_MODULE,
diff --git a/drivers/usb/gadget/function/f_acm.c b/drivers/usb/gadget/function/f_acm.c
index 724b2631f249..7061720b9732 100644
--- a/drivers/usb/gadget/function/f_acm.c
+++ b/drivers/usb/gadget/function/f_acm.c
@@ -41,6 +41,7 @@ struct f_acm {
struct gserial port;
u8 ctrl_id, data_id;
u8 port_num;
+ u8 bInterfaceProtocol;
u8 pending;
@@ -89,7 +90,7 @@ acm_iad_descriptor = {
.bInterfaceCount = 2, // control + data
.bFunctionClass = USB_CLASS_COMM,
.bFunctionSubClass = USB_CDC_SUBCLASS_ACM,
- .bFunctionProtocol = USB_CDC_ACM_PROTO_AT_V25TER,
+ /* .bFunctionProtocol = DYNAMIC */
/* .iFunction = DYNAMIC */
};
@@ -101,7 +102,7 @@ static struct usb_interface_descriptor acm_control_interface_desc = {
.bNumEndpoints = 1,
.bInterfaceClass = USB_CLASS_COMM,
.bInterfaceSubClass = USB_CDC_SUBCLASS_ACM,
- .bInterfaceProtocol = USB_CDC_ACM_PROTO_AT_V25TER,
+ /* .bInterfaceProtocol = DYNAMIC */
/* .iInterface = DYNAMIC */
};
@@ -663,6 +664,9 @@ acm_bind(struct usb_configuration *c, struct usb_function *f)
goto fail;
acm->notify = ep;
+ acm_iad_descriptor.bFunctionProtocol = acm->bInterfaceProtocol;
+ acm_control_interface_desc.bInterfaceProtocol = acm->bInterfaceProtocol;
+
/* allocate notification */
acm->notify_req = gs_alloc_req(ep,
sizeof(struct usb_cdc_notification) + 2,
@@ -719,8 +723,14 @@ static void acm_unbind(struct usb_configuration *c, struct usb_function *f)
static void acm_free_func(struct usb_function *f)
{
struct f_acm *acm = func_to_acm(f);
+ struct f_serial_opts *opts;
+
+ opts = container_of(f->fi, struct f_serial_opts, func_inst);
kfree(acm);
+ mutex_lock(&opts->lock);
+ opts->instances--;
+ mutex_unlock(&opts->lock);
}
static void acm_resume(struct usb_function *f)
@@ -761,7 +771,11 @@ static struct usb_function *acm_alloc_func(struct usb_function_instance *fi)
acm->port.func.disable = acm_disable;
opts = container_of(fi, struct f_serial_opts, func_inst);
+ mutex_lock(&opts->lock);
acm->port_num = opts->port_num;
+ acm->bInterfaceProtocol = opts->protocol;
+ opts->instances++;
+ mutex_unlock(&opts->lock);
acm->port.func.unbind = acm_unbind;
acm->port.func.free_func = acm_free_func;
acm->port.func.resume = acm_resume;
@@ -812,11 +826,42 @@ static ssize_t f_acm_port_num_show(struct config_item *item, char *page)
CONFIGFS_ATTR_RO(f_acm_, port_num);
+static ssize_t f_acm_protocol_show(struct config_item *item, char *page)
+{
+ return sprintf(page, "%u\n", to_f_serial_opts(item)->protocol);
+}
+
+static ssize_t f_acm_protocol_store(struct config_item *item,
+ const char *page, size_t count)
+{
+ struct f_serial_opts *opts = to_f_serial_opts(item);
+ int ret;
+
+ mutex_lock(&opts->lock);
+
+ if (opts->instances) {
+ ret = -EBUSY;
+ goto out;
+ }
+
+ ret = kstrtou8(page, 0, &opts->protocol);
+ if (ret)
+ goto out;
+ ret = count;
+
+out:
+ mutex_unlock(&opts->lock);
+ return ret;
+}
+
+CONFIGFS_ATTR(f_acm_, protocol);
+
static struct configfs_attribute *acm_attrs[] = {
#ifdef CONFIG_U_SERIAL_CONSOLE
&f_acm_attr_console,
#endif
&f_acm_attr_port_num,
+ &f_acm_attr_protocol,
NULL,
};
@@ -832,6 +877,7 @@ static void acm_free_instance(struct usb_function_instance *fi)
opts = container_of(fi, struct f_serial_opts, func_inst);
gserial_free_line(opts->port_num);
+ mutex_destroy(&opts->lock);
kfree(opts);
}
@@ -843,7 +889,9 @@ static struct usb_function_instance *acm_alloc_instance(void)
opts = kzalloc(sizeof(*opts), GFP_KERNEL);
if (!opts)
return ERR_PTR(-ENOMEM);
+ opts->protocol = USB_CDC_ACM_PROTO_AT_V25TER;
opts->func_inst.free_func_inst = acm_free_instance;
+ mutex_init(&opts->lock);
ret = gserial_alloc_line(&opts->port_num);
if (ret) {
kfree(opts);
diff --git a/drivers/usb/gadget/function/f_fs.c b/drivers/usb/gadget/function/f_fs.c
index e0ceaa721949..c626bb73ea59 100644
--- a/drivers/usb/gadget/function/f_fs.c
+++ b/drivers/usb/gadget/function/f_fs.c
@@ -33,6 +33,7 @@
#include <linux/usb/ccid.h>
#include <linux/usb/composite.h>
#include <linux/usb/functionfs.h>
+#include <linux/usb/func_utils.h>
#include <linux/aio.h>
#include <linux/kthread.h>
@@ -40,7 +41,6 @@
#include <linux/eventfd.h>
#include "u_fs.h"
-#include "u_f.h"
#include "u_os_desc.h"
#include "configfs.h"
@@ -722,7 +722,6 @@ static __poll_t ffs_ep0_poll(struct file *file, poll_table *wait)
}
static const struct file_operations ffs_ep0_operations = {
- .llseek = no_llseek,
.open = ffs_ep0_open,
.write = ffs_ep0_write,
@@ -1830,7 +1829,6 @@ static long ffs_epfile_ioctl(struct file *file, unsigned code,
}
static const struct file_operations ffs_epfile_operations = {
- .llseek = no_llseek,
.open = ffs_epfile_open,
.write_iter = ffs_epfile_write_iter,
@@ -2478,7 +2476,7 @@ typedef int (*ffs_os_desc_callback)(enum ffs_os_desc_type entity,
static int __must_check ffs_do_single_desc(char *data, unsigned len,
ffs_entity_callback entity,
- void *priv, int *current_class)
+ void *priv, int *current_class, int *current_subclass)
{
struct usb_descriptor_header *_ds = (void *)data;
u8 length;
@@ -2535,6 +2533,7 @@ static int __must_check ffs_do_single_desc(char *data, unsigned len,
if (ds->iInterface)
__entity(STRING, ds->iInterface);
*current_class = ds->bInterfaceClass;
+ *current_subclass = ds->bInterfaceSubClass;
}
break;
@@ -2559,6 +2558,12 @@ static int __must_check ffs_do_single_desc(char *data, unsigned len,
if (length != sizeof(struct ccid_descriptor))
goto inv_length;
break;
+ } else if (*current_class == USB_CLASS_APP_SPEC &&
+ *current_subclass == USB_SUBCLASS_DFU) {
+ pr_vdebug("dfu functional descriptor\n");
+ if (length != sizeof(struct usb_dfu_functional_descriptor))
+ goto inv_length;
+ break;
} else {
pr_vdebug("unknown descriptor: %d for class %d\n",
_ds->bDescriptorType, *current_class);
@@ -2621,6 +2626,7 @@ static int __must_check ffs_do_descs(unsigned count, char *data, unsigned len,
const unsigned _len = len;
unsigned long num = 0;
int current_class = -1;
+ int current_subclass = -1;
for (;;) {
int ret;
@@ -2640,7 +2646,7 @@ static int __must_check ffs_do_descs(unsigned count, char *data, unsigned len,
return _len - len;
ret = ffs_do_single_desc(data, len, entity, priv,
- &current_class);
+ &current_class, &current_subclass);
if (ret < 0) {
pr_debug("%s returns %d\n", __func__, ret);
return ret;
diff --git a/drivers/usb/gadget/function/f_hid.c b/drivers/usb/gadget/function/f_hid.c
index 93dae017ae45..740311c4fa24 100644
--- a/drivers/usb/gadget/function/f_hid.c
+++ b/drivers/usb/gadget/function/f_hid.c
@@ -15,13 +15,21 @@
#include <linux/uaccess.h>
#include <linux/wait.h>
#include <linux/sched.h>
+#include <linux/workqueue.h>
+#include <linux/usb/func_utils.h>
#include <linux/usb/g_hid.h>
+#include <uapi/linux/usb/g_hid.h>
-#include "u_f.h"
#include "u_hid.h"
#define HIDG_MINORS 4
+/*
+ * Most operating systems seem to allow for 5000ms timeout, we will allow
+ * userspace half that time to respond before we return an empty report.
+ */
+#define GET_REPORT_TIMEOUT_MS 2500
+
static int major, minors;
static const struct class hidg_class = {
@@ -31,6 +39,11 @@ static const struct class hidg_class = {
static DEFINE_IDA(hidg_ida);
static DEFINE_MUTEX(hidg_ida_lock); /* protects access to hidg_ida */
+struct report_entry {
+ struct usb_hidg_report report_data;
+ struct list_head node;
+};
+
/*-------------------------------------------------------------------------*/
/* HID gadget struct */
@@ -75,6 +88,19 @@ struct f_hidg {
wait_queue_head_t write_queue;
struct usb_request *req;
+ /* get report */
+ struct usb_request *get_req;
+ struct usb_hidg_report get_report;
+ bool get_report_returned;
+ int get_report_req_report_id;
+ int get_report_req_report_length;
+ spinlock_t get_report_spinlock;
+ wait_queue_head_t get_queue; /* Waiting for userspace response */
+ wait_queue_head_t get_id_queue; /* Get ID came in */
+ struct work_struct work;
+ struct workqueue_struct *workqueue;
+ struct list_head report_list;
+
struct device dev;
struct cdev cdev;
struct usb_function func;
@@ -524,6 +550,174 @@ release_write_pending:
return status;
}
+static struct report_entry *f_hidg_search_for_report(struct f_hidg *hidg, u8 report_id)
+{
+ struct list_head *ptr;
+ struct report_entry *entry;
+
+ list_for_each(ptr, &hidg->report_list) {
+ entry = list_entry(ptr, struct report_entry, node);
+ if (entry->report_data.report_id == report_id)
+ return entry;
+ }
+
+ return NULL;
+}
+
+static void get_report_workqueue_handler(struct work_struct *work)
+{
+ struct f_hidg *hidg = container_of(work, struct f_hidg, work);
+ struct usb_composite_dev *cdev = hidg->func.config->cdev;
+ struct usb_request *req;
+ struct report_entry *ptr;
+ unsigned long flags;
+
+ int status = 0;
+
+ spin_lock_irqsave(&hidg->get_report_spinlock, flags);
+ req = hidg->get_req;
+ if (!req) {
+ spin_unlock_irqrestore(&hidg->get_report_spinlock, flags);
+ return;
+ }
+
+ req->zero = 0;
+ req->length = min_t(unsigned int, min_t(unsigned int, hidg->get_report_req_report_length,
+ hidg->report_length),
+ MAX_REPORT_LENGTH);
+
+ /* Check if there is a response available for immediate response */
+ ptr = f_hidg_search_for_report(hidg, hidg->get_report_req_report_id);
+ if (ptr && !ptr->report_data.userspace_req) {
+ /* Report exists in list and it is to be used for immediate response */
+ req->buf = ptr->report_data.data;
+ status = usb_ep_queue(cdev->gadget->ep0, req, GFP_ATOMIC);
+ hidg->get_report_returned = true;
+ spin_unlock_irqrestore(&hidg->get_report_spinlock, flags);
+ } else {
+ /*
+ * Report does not exist in list or should not be immediately sent
+ * i.e. give userspace time to respond
+ */
+ hidg->get_report_returned = false;
+ spin_unlock_irqrestore(&hidg->get_report_spinlock, flags);
+ wake_up(&hidg->get_id_queue);
+#define GET_REPORT_COND (!hidg->get_report_returned)
+ /* Wait until userspace has responded or timeout */
+ status = wait_event_interruptible_timeout(hidg->get_queue, !GET_REPORT_COND,
+ msecs_to_jiffies(GET_REPORT_TIMEOUT_MS));
+ spin_lock_irqsave(&hidg->get_report_spinlock, flags);
+ req = hidg->get_req;
+ if (!req) {
+ spin_unlock_irqrestore(&hidg->get_report_spinlock, flags);
+ return;
+ }
+ if (status == 0 && !hidg->get_report_returned) {
+ /* GET_REPORT request was not serviced by userspace within timeout period */
+ VDBG(cdev, "get_report : userspace timeout.\n");
+ hidg->get_report_returned = true;
+ }
+
+ /* Search again for report ID in list and respond to GET_REPORT request */
+ ptr = f_hidg_search_for_report(hidg, hidg->get_report_req_report_id);
+ if (ptr) {
+ /*
+ * Either get an updated response just serviced by userspace
+ * or send the latest response in the list
+ */
+ req->buf = ptr->report_data.data;
+ } else {
+ /* If there are no prevoiusly sent reports send empty report */
+ req->buf = hidg->get_report.data;
+ memset(req->buf, 0x0, req->length);
+ }
+
+ status = usb_ep_queue(cdev->gadget->ep0, req, GFP_ATOMIC);
+ spin_unlock_irqrestore(&hidg->get_report_spinlock, flags);
+ }
+
+ if (status < 0)
+ VDBG(cdev, "usb_ep_queue error on ep0 responding to GET_REPORT\n");
+}
+
+static int f_hidg_get_report_id(struct file *file, __u8 __user *buffer)
+{
+ struct f_hidg *hidg = file->private_data;
+ int ret = 0;
+
+ ret = put_user(hidg->get_report_req_report_id, buffer);
+
+ return ret;
+}
+
+static int f_hidg_get_report(struct file *file, struct usb_hidg_report __user *buffer)
+{
+ struct f_hidg *hidg = file->private_data;
+ struct usb_composite_dev *cdev = hidg->func.config->cdev;
+ unsigned long flags;
+ struct report_entry *entry;
+ struct report_entry *ptr;
+ __u8 report_id;
+
+ entry = kmalloc(sizeof(*entry), GFP_KERNEL);
+ if (!entry)
+ return -ENOMEM;
+
+ if (copy_from_user(&entry->report_data, buffer,
+ sizeof(struct usb_hidg_report))) {
+ ERROR(cdev, "copy_from_user error\n");
+ kfree(entry);
+ return -EINVAL;
+ }
+
+ report_id = entry->report_data.report_id;
+
+ spin_lock_irqsave(&hidg->get_report_spinlock, flags);
+ ptr = f_hidg_search_for_report(hidg, report_id);
+
+ if (ptr) {
+ /* Report already exists in list - update it */
+ if (copy_from_user(&ptr->report_data, buffer,
+ sizeof(struct usb_hidg_report))) {
+ spin_unlock_irqrestore(&hidg->get_report_spinlock, flags);
+ ERROR(cdev, "copy_from_user error\n");
+ kfree(entry);
+ return -EINVAL;
+ }
+ kfree(entry);
+ } else {
+ /* Report does not exist in list - add it */
+ list_add_tail(&entry->node, &hidg->report_list);
+ }
+
+ /* If there is no response pending then do nothing further */
+ if (hidg->get_report_returned) {
+ spin_unlock_irqrestore(&hidg->get_report_spinlock, flags);
+ return 0;
+ }
+
+ /* If this userspace response serves the current pending report */
+ if (hidg->get_report_req_report_id == report_id) {
+ hidg->get_report_returned = true;
+ wake_up(&hidg->get_queue);
+ }
+
+ spin_unlock_irqrestore(&hidg->get_report_spinlock, flags);
+ return 0;
+}
+
+static long f_hidg_ioctl(struct file *file, unsigned int code, unsigned long arg)
+{
+ switch (code) {
+ case GADGET_HID_READ_GET_REPORT_ID:
+ return f_hidg_get_report_id(file, (__u8 __user *)arg);
+ case GADGET_HID_WRITE_GET_REPORT:
+ return f_hidg_get_report(file, (struct usb_hidg_report __user *)arg);
+ default:
+ return -ENOTTY;
+ }
+}
+
static __poll_t f_hidg_poll(struct file *file, poll_table *wait)
{
struct f_hidg *hidg = file->private_data;
@@ -531,6 +725,8 @@ static __poll_t f_hidg_poll(struct file *file, poll_table *wait)
poll_wait(file, &hidg->read_queue, wait);
poll_wait(file, &hidg->write_queue, wait);
+ poll_wait(file, &hidg->get_queue, wait);
+ poll_wait(file, &hidg->get_id_queue, wait);
if (WRITE_COND)
ret |= EPOLLOUT | EPOLLWRNORM;
@@ -543,12 +739,16 @@ static __poll_t f_hidg_poll(struct file *file, poll_table *wait)
ret |= EPOLLIN | EPOLLRDNORM;
}
+ if (GET_REPORT_COND)
+ ret |= EPOLLPRI;
+
return ret;
}
#undef WRITE_COND
#undef READ_COND_SSREPORT
#undef READ_COND_INTOUT
+#undef GET_REPORT_COND
static int f_hidg_release(struct inode *inode, struct file *fd)
{
@@ -641,6 +841,10 @@ static void hidg_ssreport_complete(struct usb_ep *ep, struct usb_request *req)
wake_up(&hidg->read_queue);
}
+static void hidg_get_report_complete(struct usb_ep *ep, struct usb_request *req)
+{
+}
+
static int hidg_setup(struct usb_function *f,
const struct usb_ctrlrequest *ctrl)
{
@@ -649,6 +853,7 @@ static int hidg_setup(struct usb_function *f,
struct usb_request *req = cdev->req;
int status = 0;
__u16 value, length;
+ unsigned long flags;
value = __le16_to_cpu(ctrl->wValue);
length = __le16_to_cpu(ctrl->wLength);
@@ -660,14 +865,20 @@ static int hidg_setup(struct usb_function *f,
switch ((ctrl->bRequestType << 8) | ctrl->bRequest) {
case ((USB_DIR_IN | USB_TYPE_CLASS | USB_RECIP_INTERFACE) << 8
| HID_REQ_GET_REPORT):
- VDBG(cdev, "get_report\n");
+ VDBG(cdev, "get_report | wLength=%d\n", ctrl->wLength);
- /* send an empty report */
- length = min_t(unsigned, length, hidg->report_length);
- memset(req->buf, 0x0, length);
+ /*
+ * Update GET_REPORT ID so that an ioctl can be used to determine what
+ * GET_REPORT the request was actually for.
+ */
+ spin_lock_irqsave(&hidg->get_report_spinlock, flags);
+ hidg->get_report_req_report_id = value & 0xff;
+ hidg->get_report_req_report_length = length;
+ spin_unlock_irqrestore(&hidg->get_report_spinlock, flags);
- goto respond;
- break;
+ queue_work(hidg->workqueue, &hidg->work);
+
+ return status;
case ((USB_DIR_IN | USB_TYPE_CLASS | USB_RECIP_INTERFACE) << 8
| HID_REQ_GET_PROTOCOL):
@@ -793,6 +1004,14 @@ static void hidg_disable(struct usb_function *f)
spin_unlock_irqrestore(&hidg->read_spinlock, flags);
}
+ spin_lock_irqsave(&hidg->get_report_spinlock, flags);
+ if (!hidg->get_report_returned) {
+ usb_ep_free_request(f->config->cdev->gadget->ep0, hidg->get_req);
+ hidg->get_req = NULL;
+ hidg->get_report_returned = true;
+ }
+ spin_unlock_irqrestore(&hidg->get_report_spinlock, flags);
+
spin_lock_irqsave(&hidg->write_spinlock, flags);
if (!hidg->write_pending) {
free_ep_req(hidg->in_ep, hidg->req);
@@ -902,6 +1121,14 @@ fail:
return status;
}
+#ifdef CONFIG_COMPAT
+static long f_hidg_compat_ioctl(struct file *file, unsigned int code,
+ unsigned long value)
+{
+ return f_hidg_ioctl(file, code, value);
+}
+#endif
+
static const struct file_operations f_hidg_fops = {
.owner = THIS_MODULE,
.open = f_hidg_open,
@@ -909,6 +1136,10 @@ static const struct file_operations f_hidg_fops = {
.write = f_hidg_write,
.read = f_hidg_read,
.poll = f_hidg_poll,
+ .unlocked_ioctl = f_hidg_ioctl,
+#ifdef CONFIG_COMPAT
+ .compat_ioctl = f_hidg_compat_ioctl,
+#endif
.llseek = noop_llseek,
};
@@ -919,6 +1150,15 @@ static int hidg_bind(struct usb_configuration *c, struct usb_function *f)
struct usb_string *us;
int status;
+ hidg->get_req = usb_ep_alloc_request(c->cdev->gadget->ep0, GFP_ATOMIC);
+ if (!hidg->get_req)
+ return -ENOMEM;
+
+ hidg->get_req->zero = 0;
+ hidg->get_req->complete = hidg_get_report_complete;
+ hidg->get_req->context = hidg;
+ hidg->get_report_returned = true;
+
/* maybe allocate device-global string IDs, and patch descriptors */
us = usb_gstrings_attach(c->cdev, ct_func_strings,
ARRAY_SIZE(ct_func_string_defs));
@@ -1004,9 +1244,24 @@ static int hidg_bind(struct usb_configuration *c, struct usb_function *f)
hidg->write_pending = 1;
hidg->req = NULL;
spin_lock_init(&hidg->read_spinlock);
+ spin_lock_init(&hidg->get_report_spinlock);
init_waitqueue_head(&hidg->write_queue);
init_waitqueue_head(&hidg->read_queue);
+ init_waitqueue_head(&hidg->get_queue);
+ init_waitqueue_head(&hidg->get_id_queue);
INIT_LIST_HEAD(&hidg->completed_out_req);
+ INIT_LIST_HEAD(&hidg->report_list);
+
+ INIT_WORK(&hidg->work, get_report_workqueue_handler);
+ hidg->workqueue = alloc_workqueue("report_work",
+ WQ_FREEZABLE |
+ WQ_MEM_RECLAIM,
+ 1);
+
+ if (!hidg->workqueue) {
+ status = -ENOMEM;
+ goto fail;
+ }
/* create char device */
cdev_init(&hidg->cdev, &f_hidg_fops);
@@ -1016,12 +1271,16 @@ static int hidg_bind(struct usb_configuration *c, struct usb_function *f)
return 0;
fail_free_descs:
+ destroy_workqueue(hidg->workqueue);
usb_free_all_descriptors(f);
fail:
ERROR(f->config->cdev, "hidg_bind FAILED\n");
if (hidg->req != NULL)
free_ep_req(hidg->in_ep, hidg->req);
+ usb_ep_free_request(c->cdev->gadget->ep0, hidg->get_req);
+ hidg->get_req = NULL;
+
return status;
}
@@ -1256,7 +1515,7 @@ static void hidg_unbind(struct usb_configuration *c, struct usb_function *f)
struct f_hidg *hidg = func_to_hidg(f);
cdev_device_del(&hidg->cdev, &hidg->dev);
-
+ destroy_workqueue(hidg->workqueue);
usb_free_all_descriptors(f);
}
diff --git a/drivers/usb/gadget/function/f_loopback.c b/drivers/usb/gadget/function/f_loopback.c
index 979b028edb99..49b009a7d5d7 100644
--- a/drivers/usb/gadget/function/f_loopback.c
+++ b/drivers/usb/gadget/function/f_loopback.c
@@ -14,9 +14,9 @@
#include <linux/module.h>
#include <linux/err.h>
#include <linux/usb/composite.h>
+#include <linux/usb/func_utils.h>
#include "g_zero.h"
-#include "u_f.h"
/*
* LOOPBACK FUNCTION ... a testing vehicle for USB peripherals,
diff --git a/drivers/usb/gadget/function/f_mass_storage.c b/drivers/usb/gadget/function/f_mass_storage.c
index cfd712fd7452..e11d8c0edf06 100644
--- a/drivers/usb/gadget/function/f_mass_storage.c
+++ b/drivers/usb/gadget/function/f_mass_storage.c
@@ -3050,7 +3050,7 @@ static int fsg_bind(struct usb_configuration *c, struct usb_function *f)
if (!common->thread_task) {
common->state = FSG_STATE_NORMAL;
common->thread_task =
- kthread_create(fsg_main_thread, common, "file-storage");
+ kthread_run(fsg_main_thread, common, "file-storage");
if (IS_ERR(common->thread_task)) {
ret = PTR_ERR(common->thread_task);
common->thread_task = NULL;
@@ -3059,7 +3059,6 @@ static int fsg_bind(struct usb_configuration *c, struct usb_function *f)
}
DBG(common, "I/O thread pid: %d\n",
task_pid_nr(common->thread_task));
- wake_up_process(common->thread_task);
}
fsg->gadget = gadget;
diff --git a/drivers/usb/gadget/function/f_midi.c b/drivers/usb/gadget/function/f_midi.c
index 67052a664e74..1067847cc079 100644
--- a/drivers/usb/gadget/function/f_midi.c
+++ b/drivers/usb/gadget/function/f_midi.c
@@ -30,11 +30,11 @@
#include <sound/rawmidi.h>
#include <linux/usb/ch9.h>
+#include <linux/usb/func_utils.h>
#include <linux/usb/gadget.h>
#include <linux/usb/audio.h>
#include <linux/usb/midi.h>
-#include "u_f.h"
#include "u_midi.h"
MODULE_AUTHOR("Ben Williamson");
diff --git a/drivers/usb/gadget/function/f_midi2.c b/drivers/usb/gadget/function/f_midi2.c
index 3f63253ad3e0..8285df9ed6fd 100644
--- a/drivers/usb/gadget/function/f_midi2.c
+++ b/drivers/usb/gadget/function/f_midi2.c
@@ -15,11 +15,11 @@
#include <sound/ump_convert.h>
#include <linux/usb/ch9.h>
+#include <linux/usb/func_utils.h>
#include <linux/usb/gadget.h>
#include <linux/usb/audio.h>
#include <linux/usb/midi-v2.h>
-#include "u_f.h"
#include "u_midi2.h"
struct f_midi2;
diff --git a/drivers/usb/gadget/function/f_sourcesink.c b/drivers/usb/gadget/function/f_sourcesink.c
index 6f3702210450..ec5fd25020fd 100644
--- a/drivers/usb/gadget/function/f_sourcesink.c
+++ b/drivers/usb/gadget/function/f_sourcesink.c
@@ -13,10 +13,10 @@
#include <linux/device.h>
#include <linux/module.h>
#include <linux/usb/composite.h>
+#include <linux/usb/func_utils.h>
#include <linux/err.h>
#include "g_zero.h"
-#include "u_f.h"
/*
* SOURCE/SINK FUNCTION ... a primary testing vehicle for USB peripheral
diff --git a/drivers/usb/gadget/function/f_uac1.c b/drivers/usb/gadget/function/f_uac1.c
index 2b9fb4daa806..c87e74afc881 100644
--- a/drivers/usb/gadget/function/f_uac1.c
+++ b/drivers/usb/gadget/function/f_uac1.c
@@ -377,24 +377,10 @@ enum {
STR_AS_OUT_IF_ALT1,
STR_AS_IN_IF_ALT0,
STR_AS_IN_IF_ALT1,
+ NUM_STR_DESCRIPTORS,
};
-static struct usb_string strings_uac1[] = {
- /* [STR_AC_IF].s = DYNAMIC, */
- [STR_USB_OUT_IT].s = "Playback Input terminal",
- [STR_USB_OUT_IT_CH_NAMES].s = "Playback Channels",
- [STR_IO_OUT_OT].s = "Playback Output terminal",
- [STR_IO_IN_IT].s = "Capture Input terminal",
- [STR_IO_IN_IT_CH_NAMES].s = "Capture Channels",
- [STR_USB_IN_OT].s = "Capture Output terminal",
- [STR_FU_IN].s = "Capture Volume",
- [STR_FU_OUT].s = "Playback Volume",
- [STR_AS_OUT_IF_ALT0].s = "Playback Inactive",
- [STR_AS_OUT_IF_ALT1].s = "Playback Active",
- [STR_AS_IN_IF_ALT0].s = "Capture Inactive",
- [STR_AS_IN_IF_ALT1].s = "Capture Active",
- { },
-};
+static struct usb_string strings_uac1[NUM_STR_DESCRIPTORS + 1] = {};
static struct usb_gadget_strings str_uac1 = {
.language = 0x0409, /* en-us */
@@ -1265,6 +1251,20 @@ static int f_audio_bind(struct usb_configuration *c, struct usb_function *f)
strings_uac1[STR_AC_IF].s = audio_opts->function_name;
+ strings_uac1[STR_USB_OUT_IT].s = audio_opts->c_it_name;
+ strings_uac1[STR_USB_OUT_IT_CH_NAMES].s = audio_opts->c_it_ch_name;
+ strings_uac1[STR_IO_OUT_OT].s = audio_opts->c_ot_name;
+ strings_uac1[STR_FU_OUT].s = audio_opts->c_fu_vol_name;
+ strings_uac1[STR_AS_OUT_IF_ALT0].s = "Playback Inactive";
+ strings_uac1[STR_AS_OUT_IF_ALT1].s = "Playback Active";
+
+ strings_uac1[STR_IO_IN_IT].s = audio_opts->p_it_name;
+ strings_uac1[STR_IO_IN_IT_CH_NAMES].s = audio_opts->p_it_ch_name;
+ strings_uac1[STR_USB_IN_OT].s = audio_opts->p_ot_name;
+ strings_uac1[STR_FU_IN].s = audio_opts->p_fu_vol_name;
+ strings_uac1[STR_AS_IN_IF_ALT0].s = "Capture Inactive";
+ strings_uac1[STR_AS_IN_IF_ALT1].s = "Capture Active";
+
us = usb_gstrings_attach(cdev, uac1_strings, ARRAY_SIZE(strings_uac1));
if (IS_ERR(us))
return PTR_ERR(us);
@@ -1681,8 +1681,19 @@ UAC1_ATTRIBUTE(bool, c_volume_present);
UAC1_ATTRIBUTE(s16, c_volume_min);
UAC1_ATTRIBUTE(s16, c_volume_max);
UAC1_ATTRIBUTE(s16, c_volume_res);
+
UAC1_ATTRIBUTE_STRING(function_name);
+UAC1_ATTRIBUTE_STRING(p_it_name);
+UAC1_ATTRIBUTE_STRING(p_it_ch_name);
+UAC1_ATTRIBUTE_STRING(p_ot_name);
+UAC1_ATTRIBUTE_STRING(p_fu_vol_name);
+
+UAC1_ATTRIBUTE_STRING(c_it_name);
+UAC1_ATTRIBUTE_STRING(c_it_ch_name);
+UAC1_ATTRIBUTE_STRING(c_ot_name);
+UAC1_ATTRIBUTE_STRING(c_fu_vol_name);
+
static struct configfs_attribute *f_uac1_attrs[] = {
&f_uac1_opts_attr_c_chmask,
&f_uac1_opts_attr_c_srate,
@@ -1706,6 +1717,16 @@ static struct configfs_attribute *f_uac1_attrs[] = {
&f_uac1_opts_attr_function_name,
+ &f_uac1_opts_attr_p_it_name,
+ &f_uac1_opts_attr_p_it_ch_name,
+ &f_uac1_opts_attr_p_ot_name,
+ &f_uac1_opts_attr_p_fu_vol_name,
+
+ &f_uac1_opts_attr_c_it_name,
+ &f_uac1_opts_attr_c_it_ch_name,
+ &f_uac1_opts_attr_c_ot_name,
+ &f_uac1_opts_attr_c_fu_vol_name,
+
NULL,
};
@@ -1760,6 +1781,16 @@ static struct usb_function_instance *f_audio_alloc_inst(void)
scnprintf(opts->function_name, sizeof(opts->function_name), "AC Interface");
+ scnprintf(opts->p_it_name, sizeof(opts->p_it_name), "Capture Input terminal");
+ scnprintf(opts->p_it_ch_name, sizeof(opts->p_it_ch_name), "Capture Channels");
+ scnprintf(opts->p_ot_name, sizeof(opts->p_ot_name), "Capture Output terminal");
+ scnprintf(opts->p_fu_vol_name, sizeof(opts->p_fu_vol_name), "Capture Volume");
+
+ scnprintf(opts->c_it_name, sizeof(opts->c_it_name), "Playback Input terminal");
+ scnprintf(opts->c_it_ch_name, sizeof(opts->c_it_ch_name), "Playback Channels");
+ scnprintf(opts->c_ot_name, sizeof(opts->c_ot_name), "Playback Output terminal");
+ scnprintf(opts->c_fu_vol_name, sizeof(opts->c_fu_vol_name), "Playback Volume");
+
return &opts->func_inst;
}
diff --git a/drivers/usb/gadget/function/f_uac2.c b/drivers/usb/gadget/function/f_uac2.c
index 2d6d3286ffde..1cdda44455b3 100644
--- a/drivers/usb/gadget/function/f_uac2.c
+++ b/drivers/usb/gadget/function/f_uac2.c
@@ -95,7 +95,9 @@ enum {
STR_CLKSRC_IN,
STR_CLKSRC_OUT,
STR_USB_IT,
+ STR_USB_IT_CH,
STR_IO_IT,
+ STR_IO_IT_CH,
STR_USB_OT,
STR_IO_OT,
STR_FU_IN,
@@ -104,25 +106,10 @@ enum {
STR_AS_OUT_ALT1,
STR_AS_IN_ALT0,
STR_AS_IN_ALT1,
+ NUM_STR_DESCRIPTORS,
};
-static struct usb_string strings_fn[] = {
- /* [STR_ASSOC].s = DYNAMIC, */
- [STR_IF_CTRL].s = "Topology Control",
- [STR_CLKSRC_IN].s = "Input Clock",
- [STR_CLKSRC_OUT].s = "Output Clock",
- [STR_USB_IT].s = "USBH Out",
- [STR_IO_IT].s = "USBD Out",
- [STR_USB_OT].s = "USBH In",
- [STR_IO_OT].s = "USBD In",
- [STR_FU_IN].s = "Capture Volume",
- [STR_FU_OUT].s = "Playback Volume",
- [STR_AS_OUT_ALT0].s = "Playback Inactive",
- [STR_AS_OUT_ALT1].s = "Playback Active",
- [STR_AS_IN_ALT0].s = "Capture Inactive",
- [STR_AS_IN_ALT1].s = "Capture Active",
- { },
-};
+static struct usb_string strings_fn[NUM_STR_DESCRIPTORS + 1] = {};
static const char *const speed_names[] = {
[USB_SPEED_UNKNOWN] = "UNKNOWN",
@@ -1049,6 +1036,23 @@ afunc_bind(struct usb_configuration *cfg, struct usb_function *fn)
return ret;
strings_fn[STR_ASSOC].s = uac2_opts->function_name;
+ strings_fn[STR_IF_CTRL].s = uac2_opts->if_ctrl_name;
+ strings_fn[STR_CLKSRC_IN].s = uac2_opts->clksrc_in_name;
+ strings_fn[STR_CLKSRC_OUT].s = uac2_opts->clksrc_out_name;
+
+ strings_fn[STR_USB_IT].s = uac2_opts->c_it_name;
+ strings_fn[STR_USB_IT_CH].s = uac2_opts->c_it_ch_name;
+ strings_fn[STR_IO_OT].s = uac2_opts->c_ot_name;
+ strings_fn[STR_FU_OUT].s = uac2_opts->c_fu_vol_name;
+ strings_fn[STR_AS_OUT_ALT0].s = "Playback Inactive";
+ strings_fn[STR_AS_OUT_ALT1].s = "Playback Active";
+
+ strings_fn[STR_IO_IT].s = uac2_opts->p_it_name;
+ strings_fn[STR_IO_IT_CH].s = uac2_opts->p_it_ch_name;
+ strings_fn[STR_USB_OT].s = uac2_opts->p_ot_name;
+ strings_fn[STR_FU_IN].s = uac2_opts->p_fu_vol_name;
+ strings_fn[STR_AS_IN_ALT0].s = "Capture Inactive";
+ strings_fn[STR_AS_IN_ALT1].s = "Capture Active";
us = usb_gstrings_attach(cdev, fn_strings, ARRAY_SIZE(strings_fn));
if (IS_ERR(us))
@@ -1072,7 +1076,9 @@ afunc_bind(struct usb_configuration *cfg, struct usb_function *fn)
in_clk_src_desc.iClockSource = us[STR_CLKSRC_IN].id;
out_clk_src_desc.iClockSource = us[STR_CLKSRC_OUT].id;
usb_out_it_desc.iTerminal = us[STR_USB_IT].id;
+ usb_out_it_desc.iChannelNames = us[STR_USB_IT_CH].id;
io_in_it_desc.iTerminal = us[STR_IO_IT].id;
+ io_in_it_desc.iChannelNames = us[STR_IO_IT_CH].id;
usb_in_ot_desc.iTerminal = us[STR_USB_OT].id;
io_out_ot_desc.iTerminal = us[STR_IO_OT].id;
std_as_out_if0_desc.iInterface = us[STR_AS_OUT_ALT0].id;
@@ -2100,10 +2106,24 @@ UAC2_ATTRIBUTE(s16, c_volume_max);
UAC2_ATTRIBUTE(s16, c_volume_res);
UAC2_ATTRIBUTE(u32, fb_max);
UAC2_ATTRIBUTE_STRING(function_name);
+UAC2_ATTRIBUTE_STRING(if_ctrl_name);
+UAC2_ATTRIBUTE_STRING(clksrc_in_name);
+UAC2_ATTRIBUTE_STRING(clksrc_out_name);
+
+UAC2_ATTRIBUTE_STRING(p_it_name);
+UAC2_ATTRIBUTE_STRING(p_it_ch_name);
+UAC2_ATTRIBUTE_STRING(p_ot_name);
+UAC2_ATTRIBUTE_STRING(p_fu_vol_name);
+
+UAC2_ATTRIBUTE_STRING(c_it_name);
+UAC2_ATTRIBUTE_STRING(c_it_ch_name);
+UAC2_ATTRIBUTE_STRING(c_ot_name);
+UAC2_ATTRIBUTE_STRING(c_fu_vol_name);
UAC2_ATTRIBUTE(s16, p_terminal_type);
UAC2_ATTRIBUTE(s16, c_terminal_type);
+
static struct configfs_attribute *f_uac2_attrs[] = {
&f_uac2_opts_attr_p_chmask,
&f_uac2_opts_attr_p_srate,
@@ -2130,6 +2150,19 @@ static struct configfs_attribute *f_uac2_attrs[] = {
&f_uac2_opts_attr_c_volume_res,
&f_uac2_opts_attr_function_name,
+ &f_uac2_opts_attr_if_ctrl_name,
+ &f_uac2_opts_attr_clksrc_in_name,
+ &f_uac2_opts_attr_clksrc_out_name,
+
+ &f_uac2_opts_attr_p_it_name,
+ &f_uac2_opts_attr_p_it_ch_name,
+ &f_uac2_opts_attr_p_ot_name,
+ &f_uac2_opts_attr_p_fu_vol_name,
+
+ &f_uac2_opts_attr_c_it_name,
+ &f_uac2_opts_attr_c_it_ch_name,
+ &f_uac2_opts_attr_c_ot_name,
+ &f_uac2_opts_attr_c_fu_vol_name,
&f_uac2_opts_attr_p_terminal_type,
&f_uac2_opts_attr_c_terminal_type,
@@ -2191,6 +2224,19 @@ static struct usb_function_instance *afunc_alloc_inst(void)
opts->fb_max = FBACK_FAST_MAX;
scnprintf(opts->function_name, sizeof(opts->function_name), "Source/Sink");
+ scnprintf(opts->if_ctrl_name, sizeof(opts->if_ctrl_name), "Topology Control");
+ scnprintf(opts->clksrc_in_name, sizeof(opts->clksrc_in_name), "Input Clock");
+ scnprintf(opts->clksrc_out_name, sizeof(opts->clksrc_out_name), "Output Clock");
+
+ scnprintf(opts->p_it_name, sizeof(opts->p_it_name), "USBD Out");
+ scnprintf(opts->p_it_ch_name, sizeof(opts->p_it_ch_name), "Capture Channels");
+ scnprintf(opts->p_ot_name, sizeof(opts->p_ot_name), "USBH In");
+ scnprintf(opts->p_fu_vol_name, sizeof(opts->p_fu_vol_name), "Capture Volume");
+
+ scnprintf(opts->c_it_name, sizeof(opts->c_it_name), "USBH Out");
+ scnprintf(opts->c_it_ch_name, sizeof(opts->c_it_ch_name), "Playback Channels");
+ scnprintf(opts->c_ot_name, sizeof(opts->c_ot_name), "USBD In");
+ scnprintf(opts->c_fu_vol_name, sizeof(opts->c_fu_vol_name), "Playback Volume");
opts->p_terminal_type = UAC2_DEF_P_TERM_TYPE;
opts->c_terminal_type = UAC2_DEF_C_TERM_TYPE;
diff --git a/drivers/usb/gadget/function/u_audio.c b/drivers/usb/gadget/function/u_audio.c
index 24299576972f..ca8dbec65f73 100644
--- a/drivers/usb/gadget/function/u_audio.c
+++ b/drivers/usb/gadget/function/u_audio.c
@@ -1140,35 +1140,35 @@ static int u_audio_rate_get(struct snd_kcontrol *kcontrol,
}
static struct snd_kcontrol_new u_audio_controls[] = {
- [UAC_FBACK_CTRL] {
+ [UAC_FBACK_CTRL] = {
.iface = SNDRV_CTL_ELEM_IFACE_PCM,
.name = "Capture Pitch 1000000",
.info = u_audio_pitch_info,
.get = u_audio_pitch_get,
.put = u_audio_pitch_put,
},
- [UAC_P_PITCH_CTRL] {
+ [UAC_P_PITCH_CTRL] = {
.iface = SNDRV_CTL_ELEM_IFACE_PCM,
.name = "Playback Pitch 1000000",
.info = u_audio_pitch_info,
.get = u_audio_pitch_get,
.put = u_audio_pitch_put,
},
- [UAC_MUTE_CTRL] {
+ [UAC_MUTE_CTRL] = {
.iface = SNDRV_CTL_ELEM_IFACE_MIXER,
.name = "", /* will be filled later */
.info = u_audio_mute_info,
.get = u_audio_mute_get,
.put = u_audio_mute_put,
},
- [UAC_VOLUME_CTRL] {
+ [UAC_VOLUME_CTRL] = {
.iface = SNDRV_CTL_ELEM_IFACE_MIXER,
.name = "", /* will be filled later */
.info = u_audio_volume_info,
.get = u_audio_volume_get,
.put = u_audio_volume_put,
},
- [UAC_RATE_CTRL] {
+ [UAC_RATE_CTRL] = {
.iface = SNDRV_CTL_ELEM_IFACE_PCM,
.name = "", /* will be filled later */
.access = SNDRV_CTL_ELEM_ACCESS_READ | SNDRV_CTL_ELEM_ACCESS_VOLATILE,
diff --git a/drivers/usb/gadget/function/u_serial.c b/drivers/usb/gadget/function/u_serial.c
index b394105e55d6..0a8c05b2746b 100644
--- a/drivers/usb/gadget/function/u_serial.c
+++ b/drivers/usb/gadget/function/u_serial.c
@@ -28,6 +28,7 @@
#include <linux/kthread.h>
#include <linux/workqueue.h>
#include <linux/kfifo.h>
+#include <linux/serial.h>
#include "u_serial.h"
@@ -126,6 +127,7 @@ struct gs_port {
wait_queue_head_t close_wait;
bool suspended; /* port suspended */
bool start_delayed; /* delay start when suspended */
+ struct async_icount icount;
/* REVISIT this state ... */
struct usb_cdc_line_coding port_line_coding; /* 8-N-1 etc */
@@ -257,6 +259,7 @@ __acquires(&port->port_lock)
break;
}
do_tty_wake = true;
+ port->icount.tx += len;
req->length = len;
list_del(&req->list);
@@ -408,6 +411,7 @@ static void gs_rx_push(struct work_struct *work)
size -= n;
}
+ port->icount.rx += size;
count = tty_insert_flip_string(&port->port, packet,
size);
if (count)
@@ -851,6 +855,23 @@ static int gs_break_ctl(struct tty_struct *tty, int duration)
return status;
}
+static int gs_get_icount(struct tty_struct *tty,
+ struct serial_icounter_struct *icount)
+{
+ struct gs_port *port = tty->driver_data;
+ struct async_icount cnow;
+ unsigned long flags;
+
+ spin_lock_irqsave(&port->port_lock, flags);
+ cnow = port->icount;
+ spin_unlock_irqrestore(&port->port_lock, flags);
+
+ icount->rx = cnow.rx;
+ icount->tx = cnow.tx;
+
+ return 0;
+}
+
static const struct tty_operations gs_tty_ops = {
.open = gs_open,
.close = gs_close,
@@ -861,6 +882,7 @@ static const struct tty_operations gs_tty_ops = {
.chars_in_buffer = gs_chars_in_buffer,
.unthrottle = gs_unthrottle,
.break_ctl = gs_break_ctl,
+ .get_icount = gs_get_icount,
};
/*-------------------------------------------------------------------------*/
diff --git a/drivers/usb/gadget/function/u_serial.h b/drivers/usb/gadget/function/u_serial.h
index 901d99310bc4..e1274338ea61 100644
--- a/drivers/usb/gadget/function/u_serial.h
+++ b/drivers/usb/gadget/function/u_serial.h
@@ -17,6 +17,10 @@
struct f_serial_opts {
struct usb_function_instance func_inst;
u8 port_num;
+ u8 protocol;
+
+ struct mutex lock; /* protect instances */
+ int instances;
};
/*
diff --git a/drivers/usb/gadget/function/u_uac1.h b/drivers/usb/gadget/function/u_uac1.h
index f7a616760e31..feb6eb76462f 100644
--- a/drivers/usb/gadget/function/u_uac1.h
+++ b/drivers/usb/gadget/function/u_uac1.h
@@ -52,7 +52,17 @@ struct f_uac1_opts {
int req_number;
unsigned bound:1;
- char function_name[32];
+ char function_name[USB_MAX_STRING_LEN];
+
+ char p_it_name[USB_MAX_STRING_LEN];
+ char p_it_ch_name[USB_MAX_STRING_LEN];
+ char p_ot_name[USB_MAX_STRING_LEN];
+ char p_fu_vol_name[USB_MAX_STRING_LEN];
+
+ char c_it_name[USB_MAX_STRING_LEN];
+ char c_it_ch_name[USB_MAX_STRING_LEN];
+ char c_ot_name[USB_MAX_STRING_LEN];
+ char c_fu_vol_name[USB_MAX_STRING_LEN];
struct mutex lock;
int refcnt;
diff --git a/drivers/usb/gadget/function/u_uac2.h b/drivers/usb/gadget/function/u_uac2.h
index 5e81bdd6c5fb..0df808289ded 100644
--- a/drivers/usb/gadget/function/u_uac2.h
+++ b/drivers/usb/gadget/function/u_uac2.h
@@ -68,7 +68,20 @@ struct f_uac2_opts {
int fb_max;
bool bound;
- char function_name[32];
+ char function_name[USB_MAX_STRING_LEN];
+ char if_ctrl_name[USB_MAX_STRING_LEN];
+ char clksrc_in_name[USB_MAX_STRING_LEN];
+ char clksrc_out_name[USB_MAX_STRING_LEN];
+
+ char p_it_name[USB_MAX_STRING_LEN];
+ char p_it_ch_name[USB_MAX_STRING_LEN];
+ char p_ot_name[USB_MAX_STRING_LEN];
+ char p_fu_vol_name[USB_MAX_STRING_LEN];
+
+ char c_it_name[USB_MAX_STRING_LEN];
+ char c_it_ch_name[USB_MAX_STRING_LEN];
+ char c_ot_name[USB_MAX_STRING_LEN];
+ char c_fu_vol_name[USB_MAX_STRING_LEN];
s16 p_terminal_type;
s16 c_terminal_type;
diff --git a/drivers/usb/gadget/function/uvc_v4l2.c b/drivers/usb/gadget/function/uvc_v4l2.c
index a024aecb76dc..de1736f834e6 100644
--- a/drivers/usb/gadget/function/uvc_v4l2.c
+++ b/drivers/usb/gadget/function/uvc_v4l2.c
@@ -121,6 +121,9 @@ static struct uvcg_format *find_format_by_pix(struct uvc_device *uvc,
list_for_each_entry(format, &uvc->header->formats, entry) {
const struct uvc_format_desc *fmtdesc = to_uvc_format(format->fmt);
+ if (IS_ERR(fmtdesc))
+ continue;
+
if (fmtdesc->fcc == pixelformat) {
uformat = format->fmt;
break;
@@ -240,6 +243,7 @@ uvc_v4l2_try_format(struct file *file, void *fh, struct v4l2_format *fmt)
struct uvc_video *video = &uvc->video;
struct uvcg_format *uformat;
struct uvcg_frame *uframe;
+ const struct uvc_format_desc *fmtdesc;
u8 *fcc;
if (fmt->type != video->queue.queue.type)
@@ -277,7 +281,10 @@ uvc_v4l2_try_format(struct file *file, void *fh, struct v4l2_format *fmt)
fmt->fmt.pix.height = uframe->frame.w_height;
fmt->fmt.pix.bytesperline = uvc_v4l2_get_bytesperline(uformat, uframe);
fmt->fmt.pix.sizeimage = uvc_get_frame_size(uformat, uframe);
- fmt->fmt.pix.pixelformat = to_uvc_format(uformat)->fcc;
+ fmtdesc = to_uvc_format(uformat);
+ if (IS_ERR(fmtdesc))
+ return PTR_ERR(fmtdesc);
+ fmt->fmt.pix.pixelformat = fmtdesc->fcc;
}
fmt->fmt.pix.field = V4L2_FIELD_NONE;
fmt->fmt.pix.colorspace = V4L2_COLORSPACE_SRGB;
@@ -389,6 +396,9 @@ uvc_v4l2_enum_format(struct file *file, void *fh, struct v4l2_fmtdesc *f)
return -EINVAL;
fmtdesc = to_uvc_format(uformat);
+ if (IS_ERR(fmtdesc))
+ return PTR_ERR(fmtdesc);
+
f->pixelformat = fmtdesc->fcc;
return 0;
diff --git a/drivers/usb/gadget/legacy/inode.c b/drivers/usb/gadget/legacy/inode.c
index 03179b1880fd..9c7381661016 100644
--- a/drivers/usb/gadget/legacy/inode.c
+++ b/drivers/usb/gadget/legacy/inode.c
@@ -705,7 +705,6 @@ static const struct file_operations ep_io_operations = {
.open = ep_open,
.release = ep_release,
- .llseek = no_llseek,
.unlocked_ioctl = ep_ioctl,
.read_iter = ep_read_iter,
.write_iter = ep_write_iter,
@@ -1939,7 +1938,6 @@ gadget_dev_open (struct inode *inode, struct file *fd)
}
static const struct file_operations ep0_operations = {
- .llseek = no_llseek,
.open = gadget_dev_open,
.read = ep0_read,
diff --git a/drivers/usb/gadget/legacy/raw_gadget.c b/drivers/usb/gadget/legacy/raw_gadget.c
index 399fca32a8ac..112fd18d8c99 100644
--- a/drivers/usb/gadget/legacy/raw_gadget.c
+++ b/drivers/usb/gadget/legacy/raw_gadget.c
@@ -1364,7 +1364,6 @@ static const struct file_operations raw_fops = {
.unlocked_ioctl = raw_ioctl,
.compat_ioctl = raw_ioctl,
.release = raw_release,
- .llseek = no_llseek,
};
static struct miscdevice raw_misc_device = {
diff --git a/drivers/usb/gadget/u_f.c b/drivers/usb/gadget/u_f.c
index 6aea1ecb3999..115d219c9c00 100644
--- a/drivers/usb/gadget/u_f.c
+++ b/drivers/usb/gadget/u_f.c
@@ -8,8 +8,8 @@
* Author: Andrzej Pietrasiewicz <andrzejtp2010@gmail.com>
*/
-#include "u_f.h"
#include <linux/usb/ch9.h>
+#include <linux/usb/func_utils.h>
struct usb_request *alloc_ep_req(struct usb_ep *ep, size_t len)
{
diff --git a/drivers/usb/gadget/udc/atmel_usba_udc.c b/drivers/usb/gadget/udc/atmel_usba_udc.c
index b76885d78e8a..4928eba19327 100644
--- a/drivers/usb/gadget/udc/atmel_usba_udc.c
+++ b/drivers/usb/gadget/udc/atmel_usba_udc.c
@@ -187,7 +187,6 @@ static int regs_dbg_release(struct inode *inode, struct file *file)
static const struct file_operations queue_dbg_fops = {
.owner = THIS_MODULE,
.open = queue_dbg_open,
- .llseek = no_llseek,
.read = queue_dbg_read,
.release = queue_dbg_release,
};
diff --git a/drivers/usb/gadget/udc/bdc/bdc_core.c b/drivers/usb/gadget/udc/bdc/bdc_core.c
index 35a652807fca..5149e2b7f050 100644
--- a/drivers/usb/gadget/udc/bdc/bdc_core.c
+++ b/drivers/usb/gadget/udc/bdc/bdc_core.c
@@ -639,6 +639,7 @@ static const struct of_device_id bdc_of_match[] = {
{ .compatible = "brcm,bdc" },
{ /* sentinel */ }
};
+MODULE_DEVICE_TABLE(of, bdc_of_match);
static struct platform_driver bdc_driver = {
.driver = {
diff --git a/drivers/usb/gadget/udc/cdns2/cdns2-gadget.c b/drivers/usb/gadget/udc/cdns2/cdns2-gadget.c
index d394affb7072..62fce42ef2da 100644
--- a/drivers/usb/gadget/udc/cdns2/cdns2-gadget.c
+++ b/drivers/usb/gadget/udc/cdns2/cdns2-gadget.c
@@ -2033,8 +2033,8 @@ static void cdns2_quiesce(struct cdns2_device *pdev)
set_reg_bit_8(&pdev->usb_regs->usbcs, USBCS_DISCON);
/* Disable interrupt. */
- writeb(0, &pdev->interrupt_regs->extien),
- writeb(0, &pdev->interrupt_regs->usbien),
+ writeb(0, &pdev->interrupt_regs->extien);
+ writeb(0, &pdev->interrupt_regs->usbien);
writew(0, &pdev->adma_regs->ep_ien);
/* Clear interrupt line. */
diff --git a/drivers/usb/gadget/udc/cdns2/cdns2-pci.c b/drivers/usb/gadget/udc/cdns2/cdns2-pci.c
index 50c3d0974d9b..b1a8f772467c 100644
--- a/drivers/usb/gadget/udc/cdns2/cdns2-pci.c
+++ b/drivers/usb/gadget/udc/cdns2/cdns2-pci.c
@@ -15,8 +15,7 @@
#include "cdns2-gadget.h"
#define PCI_DRIVER_NAME "cdns-pci-usbhs"
-#define CDNS_VENDOR_ID 0x17cd
-#define CDNS_DEVICE_ID 0x0120
+#define PCI_DEVICE_ID_CDNS_USB2 0x0120
#define PCI_BAR_DEV 0
#define PCI_DEV_FN_DEVICE 0
@@ -114,8 +113,8 @@ static const struct dev_pm_ops cdns2_pci_pm_ops = {
};
static const struct pci_device_id cdns2_pci_ids[] = {
- { PCI_VENDOR_ID_CDNS, CDNS_DEVICE_ID, PCI_ANY_ID, PCI_ANY_ID,
- PCI_CLASS_SERIAL_USB_DEVICE, PCI_ANY_ID },
+ { PCI_DEVICE(PCI_VENDOR_ID_CDNS, PCI_DEVICE_ID_CDNS_USB2),
+ .class = PCI_CLASS_SERIAL_USB_DEVICE },
{ 0, }
};
diff --git a/drivers/usb/gadget/udc/dummy_hcd.c b/drivers/usb/gadget/udc/dummy_hcd.c
index f37b0d8386c1..ff7bee78bcc4 100644
--- a/drivers/usb/gadget/udc/dummy_hcd.c
+++ b/drivers/usb/gadget/udc/dummy_hcd.c
@@ -1304,7 +1304,8 @@ static int dummy_urb_enqueue(
/* kick the scheduler, it'll do the rest */
if (!hrtimer_active(&dum_hcd->timer))
- hrtimer_start(&dum_hcd->timer, ns_to_ktime(DUMMY_TIMER_INT_NSECS), HRTIMER_MODE_REL);
+ hrtimer_start(&dum_hcd->timer, ns_to_ktime(DUMMY_TIMER_INT_NSECS),
+ HRTIMER_MODE_REL_SOFT);
done:
spin_unlock_irqrestore(&dum_hcd->dum->lock, flags);
@@ -1325,7 +1326,7 @@ static int dummy_urb_dequeue(struct usb_hcd *hcd, struct urb *urb, int status)
rc = usb_hcd_check_unlink_urb(hcd, urb, status);
if (!rc && dum_hcd->rh_state != DUMMY_RH_RUNNING &&
!list_empty(&dum_hcd->urbp_list))
- hrtimer_start(&dum_hcd->timer, ns_to_ktime(0), HRTIMER_MODE_REL);
+ hrtimer_start(&dum_hcd->timer, ns_to_ktime(0), HRTIMER_MODE_REL_SOFT);
spin_unlock_irqrestore(&dum_hcd->dum->lock, flags);
return rc;
@@ -1995,7 +1996,8 @@ return_urb:
dum_hcd->udev = NULL;
} else if (dum_hcd->rh_state == DUMMY_RH_RUNNING) {
/* want a 1 msec delay here */
- hrtimer_start(&dum_hcd->timer, ns_to_ktime(DUMMY_TIMER_INT_NSECS), HRTIMER_MODE_REL);
+ hrtimer_start(&dum_hcd->timer, ns_to_ktime(DUMMY_TIMER_INT_NSECS),
+ HRTIMER_MODE_REL_SOFT);
}
spin_unlock_irqrestore(&dum->lock, flags);
@@ -2389,7 +2391,7 @@ static int dummy_bus_resume(struct usb_hcd *hcd)
dum_hcd->rh_state = DUMMY_RH_RUNNING;
set_link_state(dum_hcd);
if (!list_empty(&dum_hcd->urbp_list))
- hrtimer_start(&dum_hcd->timer, ns_to_ktime(0), HRTIMER_MODE_REL);
+ hrtimer_start(&dum_hcd->timer, ns_to_ktime(0), HRTIMER_MODE_REL_SOFT);
hcd->state = HC_STATE_RUNNING;
}
spin_unlock_irq(&dum_hcd->dum->lock);
@@ -2467,7 +2469,7 @@ static DEVICE_ATTR_RO(urbs);
static int dummy_start_ss(struct dummy_hcd *dum_hcd)
{
- hrtimer_init(&dum_hcd->timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
+ hrtimer_init(&dum_hcd->timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL_SOFT);
dum_hcd->timer.function = dummy_timer;
dum_hcd->rh_state = DUMMY_RH_RUNNING;
dum_hcd->stream_en_ep = 0;
@@ -2497,7 +2499,7 @@ static int dummy_start(struct usb_hcd *hcd)
return dummy_start_ss(dum_hcd);
spin_lock_init(&dum_hcd->dum->lock);
- hrtimer_init(&dum_hcd->timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
+ hrtimer_init(&dum_hcd->timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL_SOFT);
dum_hcd->timer.function = dummy_timer;
dum_hcd->rh_state = DUMMY_RH_RUNNING;
diff --git a/drivers/usb/gadget/udc/lpc32xx_udc.c b/drivers/usb/gadget/udc/lpc32xx_udc.c
index d5f29f8fe481..3bfd889ed56a 100644
--- a/drivers/usb/gadget/udc/lpc32xx_udc.c
+++ b/drivers/usb/gadget/udc/lpc32xx_udc.c
@@ -1487,31 +1487,29 @@ static int udc_ep0_out_req(struct lpc32xx_udc *udc)
req = list_entry(ep0->queue.next, struct lpc32xx_request,
queue);
- if (req) {
- if (req->req.length == 0) {
- /* Just dequeue request */
- done(ep0, req, 0);
- udc->ep0state = WAIT_FOR_SETUP;
- return 1;
- }
+ if (req->req.length == 0) {
+ /* Just dequeue request */
+ done(ep0, req, 0);
+ udc->ep0state = WAIT_FOR_SETUP;
+ return 1;
+ }
- /* Get data from FIFO */
- bufferspace = req->req.length - req->req.actual;
- if (bufferspace > ep0->ep.maxpacket)
- bufferspace = ep0->ep.maxpacket;
+ /* Get data from FIFO */
+ bufferspace = req->req.length - req->req.actual;
+ if (bufferspace > ep0->ep.maxpacket)
+ bufferspace = ep0->ep.maxpacket;
- /* Copy data to buffer */
- prefetchw(req->req.buf + req->req.actual);
- tr = udc_read_hwep(udc, EP_OUT, req->req.buf + req->req.actual,
- bufferspace);
- req->req.actual += bufferspace;
+ /* Copy data to buffer */
+ prefetchw(req->req.buf + req->req.actual);
+ tr = udc_read_hwep(udc, EP_OUT, req->req.buf + req->req.actual,
+ bufferspace);
+ req->req.actual += bufferspace;
- if (tr < ep0->ep.maxpacket) {
- /* This is the last packet */
- done(ep0, req, 0);
- udc->ep0state = WAIT_FOR_SETUP;
- return 1;
- }
+ if (tr < ep0->ep.maxpacket) {
+ /* This is the last packet */
+ done(ep0, req, 0);
+ udc->ep0state = WAIT_FOR_SETUP;
+ return 1;
}
return 0;
@@ -1962,18 +1960,17 @@ static void udc_handle_eps(struct lpc32xx_udc *udc, struct lpc32xx_ep *ep)
/* If there isn't a request waiting, something went wrong */
req = list_entry(ep->queue.next, struct lpc32xx_request, queue);
- if (req) {
- done(ep, req, 0);
- /* Start another request if ready */
- if (!list_empty(&ep->queue)) {
- if (ep->is_in)
- udc_ep_in_req_dma(udc, ep);
- else
- udc_ep_out_req_dma(udc, ep);
- } else
- ep->req_pending = 0;
- }
+ done(ep, req, 0);
+
+ /* Start another request if ready */
+ if (!list_empty(&ep->queue)) {
+ if (ep->is_in)
+ udc_ep_in_req_dma(udc, ep);
+ else
+ udc_ep_out_req_dma(udc, ep);
+ } else
+ ep->req_pending = 0;
}
@@ -1989,10 +1986,6 @@ static void udc_handle_dma_ep(struct lpc32xx_udc *udc, struct lpc32xx_ep *ep)
#endif
req = list_entry(ep->queue.next, struct lpc32xx_request, queue);
- if (!req) {
- ep_err(ep, "DMA interrupt on no req!\n");
- return;
- }
dd = req->dd_desc_ptr;
/* DMA descriptor should always be retired for this call */
diff --git a/drivers/usb/gadget/udc/udc-xilinx.c b/drivers/usb/gadget/udc/udc-xilinx.c
index 74590f93ea61..ebc45565c33e 100644
--- a/drivers/usb/gadget/udc/udc-xilinx.c
+++ b/drivers/usb/gadget/udc/udc-xilinx.c
@@ -947,7 +947,7 @@ static int xudc_ep_disable(struct usb_ep *_ep)
ep->desc = NULL;
ep->ep_usb.desc = NULL;
- dev_dbg(udc->dev, "USB Ep %d disable\n ", ep->epnumber);
+ dev_dbg(udc->dev, "USB Ep %d disable\n", ep->epnumber);
/* Disable the endpoint.*/
epcfg = udc->read_fn(udc->addr + ep->offset);
epcfg &= ~XUSB_EP_CFG_VALID_MASK;
diff --git a/drivers/usb/host/Kconfig b/drivers/usb/host/Kconfig
index 4448d0ab06f0..d011d6c753ed 100644
--- a/drivers/usb/host/Kconfig
+++ b/drivers/usb/host/Kconfig
@@ -40,11 +40,11 @@ config USB_XHCI_DBGCAP
config USB_XHCI_PCI
tristate
depends on USB_PCI
- depends on USB_XHCI_PCI_RENESAS || !USB_XHCI_PCI_RENESAS
default y
config USB_XHCI_PCI_RENESAS
tristate "Support for additional Renesas xHCI controller with firmware"
+ depends on USB_XHCI_PCI
help
Say 'Y' to enable the support for the Renesas xHCI controller with
firmware. Make sure you have the firmware for the device and
diff --git a/drivers/usb/host/ehci-brcm.c b/drivers/usb/host/ehci-brcm.c
index 77e42c739c58..68cad0620f1a 100644
--- a/drivers/usb/host/ehci-brcm.c
+++ b/drivers/usb/host/ehci-brcm.c
@@ -246,6 +246,7 @@ static const struct of_device_id brcm_ehci_of_match[] = {
{ .compatible = "brcm,bcm7445-ehci", },
{}
};
+MODULE_DEVICE_TABLE(of, brcm_ehci_of_match);
static struct platform_driver ehci_brcm_driver = {
.probe = ehci_brcm_probe,
diff --git a/drivers/usb/host/ehci-exynos.c b/drivers/usb/host/ehci-exynos.c
index f40bc2a7a124..e3a961d3f5fc 100644
--- a/drivers/usb/host/ehci-exynos.c
+++ b/drivers/usb/host/ehci-exynos.c
@@ -48,7 +48,6 @@ struct exynos_ehci_hcd {
static int exynos_ehci_get_phy(struct device *dev,
struct exynos_ehci_hcd *exynos_ehci)
{
- struct device_node *child;
struct phy *phy;
int phy_number, num_phys;
int ret;
@@ -66,26 +65,22 @@ static int exynos_ehci_get_phy(struct device *dev,
return 0;
/* Get PHYs using legacy bindings */
- for_each_available_child_of_node(dev->of_node, child) {
+ for_each_available_child_of_node_scoped(dev->of_node, child) {
ret = of_property_read_u32(child, "reg", &phy_number);
if (ret) {
dev_err(dev, "Failed to parse device tree\n");
- of_node_put(child);
return ret;
}
if (phy_number >= PHY_NUMBER) {
dev_err(dev, "Invalid number of PHYs\n");
- of_node_put(child);
return -EINVAL;
}
phy = devm_of_phy_optional_get(dev, child, NULL);
exynos_ehci->phy[phy_number] = phy;
- if (IS_ERR(phy)) {
- of_node_put(child);
+ if (IS_ERR(phy))
return PTR_ERR(phy);
- }
}
exynos_ehci->legacy_phy = true;
diff --git a/drivers/usb/host/ohci-exynos.c b/drivers/usb/host/ohci-exynos.c
index bfa2eba4e3a7..1379e03644b2 100644
--- a/drivers/usb/host/ohci-exynos.c
+++ b/drivers/usb/host/ohci-exynos.c
@@ -37,7 +37,6 @@ struct exynos_ohci_hcd {
static int exynos_ohci_get_phy(struct device *dev,
struct exynos_ohci_hcd *exynos_ohci)
{
- struct device_node *child;
struct phy *phy;
int phy_number, num_phys;
int ret;
@@ -55,26 +54,22 @@ static int exynos_ohci_get_phy(struct device *dev,
return 0;
/* Get PHYs using legacy bindings */
- for_each_available_child_of_node(dev->of_node, child) {
+ for_each_available_child_of_node_scoped(dev->of_node, child) {
ret = of_property_read_u32(child, "reg", &phy_number);
if (ret) {
dev_err(dev, "Failed to parse device tree\n");
- of_node_put(child);
return ret;
}
if (phy_number >= PHY_NUMBER) {
dev_err(dev, "Invalid number of PHYs\n");
- of_node_put(child);
return -EINVAL;
}
phy = devm_of_phy_optional_get(dev, child, NULL);
exynos_ohci->phy[phy_number] = phy;
- if (IS_ERR(phy)) {
- of_node_put(child);
+ if (IS_ERR(phy))
return PTR_ERR(phy);
- }
}
exynos_ohci->legacy_phy = true;
diff --git a/drivers/usb/host/ohci-nxp.c b/drivers/usb/host/ohci-nxp.c
index 8264c454f6bd..5b775e1ea527 100644
--- a/drivers/usb/host/ohci-nxp.c
+++ b/drivers/usb/host/ohci-nxp.c
@@ -51,8 +51,6 @@ static struct hc_driver __read_mostly ohci_nxp_hc_driver;
static struct i2c_client *isp1301_i2c_client;
-static struct clk *usb_host_clk;
-
static void isp1301_configure_lpc32xx(void)
{
/* LPC32XX only supports DAT_SE0 USB mode */
@@ -155,6 +153,7 @@ static int ohci_hcd_nxp_probe(struct platform_device *pdev)
struct resource *res;
int ret = 0, irq;
struct device_node *isp1301_node;
+ struct clk *usb_host_clk;
if (pdev->dev.of_node) {
isp1301_node = of_parse_phandle(pdev->dev.of_node,
@@ -180,26 +179,20 @@ static int ohci_hcd_nxp_probe(struct platform_device *pdev)
}
/* Enable USB host clock */
- usb_host_clk = devm_clk_get(&pdev->dev, NULL);
+ usb_host_clk = devm_clk_get_enabled(&pdev->dev, NULL);
if (IS_ERR(usb_host_clk)) {
- dev_err(&pdev->dev, "failed to acquire USB OHCI clock\n");
+ dev_err(&pdev->dev, "failed to acquire and start USB OHCI clock\n");
ret = PTR_ERR(usb_host_clk);
goto fail_disable;
}
- ret = clk_prepare_enable(usb_host_clk);
- if (ret < 0) {
- dev_err(&pdev->dev, "failed to start USB OHCI clock\n");
- goto fail_disable;
- }
-
isp1301_configure();
hcd = usb_create_hcd(driver, &pdev->dev, dev_name(&pdev->dev));
if (!hcd) {
dev_err(&pdev->dev, "Failed to allocate HC buffer\n");
ret = -ENOMEM;
- goto fail_hcd;
+ goto fail_disable;
}
hcd->regs = devm_platform_get_and_ioremap_resource(pdev, 0, &res);
@@ -229,8 +222,6 @@ static int ohci_hcd_nxp_probe(struct platform_device *pdev)
ohci_nxp_stop_hc();
fail_resource:
usb_put_hcd(hcd);
-fail_hcd:
- clk_disable_unprepare(usb_host_clk);
fail_disable:
isp1301_i2c_client = NULL;
return ret;
@@ -243,7 +234,6 @@ static void ohci_hcd_nxp_remove(struct platform_device *pdev)
usb_remove_hcd(hcd);
ohci_nxp_stop_hc();
usb_put_hcd(hcd);
- clk_disable_unprepare(usb_host_clk);
isp1301_i2c_client = NULL;
}
diff --git a/drivers/usb/host/ohci-ppc-of.c b/drivers/usb/host/ohci-ppc-of.c
index f64bfe5f4d4d..a6be061f8653 100644
--- a/drivers/usb/host/ohci-ppc-of.c
+++ b/drivers/usb/host/ohci-ppc-of.c
@@ -204,10 +204,6 @@ static const struct of_device_id ohci_hcd_ppc_of_match[] = {
#ifdef CONFIG_USB_OHCI_HCD_PPC_OF_LE
{
.name = "usb",
- .compatible = "ohci-littledian",
- },
- {
- .name = "usb",
.compatible = "ohci-le",
},
#endif
diff --git a/drivers/usb/host/r8a66597-hcd.c b/drivers/usb/host/r8a66597-hcd.c
index 9f4bf8c5f8a5..6576515a29cd 100644
--- a/drivers/usb/host/r8a66597-hcd.c
+++ b/drivers/usb/host/r8a66597-hcd.c
@@ -297,9 +297,9 @@ static void put_child_connect_map(struct r8a66597 *r8a66597, int address)
static void set_pipe_reg_addr(struct r8a66597_pipe *pipe, u8 dma_ch)
{
u16 pipenum = pipe->info.pipenum;
- const unsigned long fifoaddr[] = {D0FIFO, D1FIFO, CFIFO};
- const unsigned long fifosel[] = {D0FIFOSEL, D1FIFOSEL, CFIFOSEL};
- const unsigned long fifoctr[] = {D0FIFOCTR, D1FIFOCTR, CFIFOCTR};
+ static const unsigned long fifoaddr[] = {D0FIFO, D1FIFO, CFIFO};
+ static const unsigned long fifosel[] = {D0FIFOSEL, D1FIFOSEL, CFIFOSEL};
+ static const unsigned long fifoctr[] = {D0FIFOCTR, D1FIFOCTR, CFIFOCTR};
if (dma_ch > R8A66597_PIPE_NO_DMA) /* dma fifo not use? */
dma_ch = R8A66597_PIPE_NO_DMA;
diff --git a/drivers/usb/host/xhci-dbgcap.c b/drivers/usb/host/xhci-dbgcap.c
index 161c09953c4e..241d7aa1fbc2 100644
--- a/drivers/usb/host/xhci-dbgcap.c
+++ b/drivers/usb/host/xhci-dbgcap.c
@@ -173,16 +173,18 @@ static void xhci_dbc_giveback(struct dbc_request *req, int status)
spin_lock(&dbc->lock);
}
-static void xhci_dbc_flush_single_request(struct dbc_request *req)
+static void trb_to_noop(union xhci_trb *trb)
{
- union xhci_trb *trb = req->trb;
-
trb->generic.field[0] = 0;
trb->generic.field[1] = 0;
trb->generic.field[2] = 0;
trb->generic.field[3] &= cpu_to_le32(TRB_CYCLE);
trb->generic.field[3] |= cpu_to_le32(TRB_TYPE(TRB_TR_NOOP));
+}
+static void xhci_dbc_flush_single_request(struct dbc_request *req)
+{
+ trb_to_noop(req->trb);
xhci_dbc_giveback(req, -ESHUTDOWN);
}
@@ -649,7 +651,6 @@ static void xhci_dbc_stop(struct xhci_dbc *dbc)
case DS_DISABLED:
return;
case DS_CONFIGURED:
- case DS_STALLED:
if (dbc->driver->disconnect)
dbc->driver->disconnect(dbc);
break;
@@ -670,6 +671,23 @@ static void xhci_dbc_stop(struct xhci_dbc *dbc)
}
static void
+handle_ep_halt_changes(struct xhci_dbc *dbc, struct dbc_ep *dep, bool halted)
+{
+ if (halted) {
+ dev_info(dbc->dev, "DbC Endpoint halted\n");
+ dep->halted = 1;
+
+ } else if (dep->halted) {
+ dev_info(dbc->dev, "DbC Endpoint halt cleared\n");
+ dep->halted = 0;
+
+ if (!list_empty(&dep->list_pending))
+ writel(DBC_DOOR_BELL_TARGET(dep->direction),
+ &dbc->regs->doorbell);
+ }
+}
+
+static void
dbc_handle_port_status(struct xhci_dbc *dbc, union xhci_trb *event)
{
u32 portsc;
@@ -697,6 +715,7 @@ static void dbc_handle_xfer_event(struct xhci_dbc *dbc, union xhci_trb *event)
struct xhci_ring *ring;
int ep_id;
int status;
+ struct xhci_ep_ctx *ep_ctx;
u32 comp_code;
size_t remain_length;
struct dbc_request *req = NULL, *r;
@@ -706,8 +725,30 @@ static void dbc_handle_xfer_event(struct xhci_dbc *dbc, union xhci_trb *event)
ep_id = TRB_TO_EP_ID(le32_to_cpu(event->generic.field[3]));
dep = (ep_id == EPID_OUT) ?
get_out_ep(dbc) : get_in_ep(dbc);
+ ep_ctx = (ep_id == EPID_OUT) ?
+ dbc_bulkout_ctx(dbc) : dbc_bulkin_ctx(dbc);
ring = dep->ring;
+ /* Match the pending request: */
+ list_for_each_entry(r, &dep->list_pending, list_pending) {
+ if (r->trb_dma == event->trans_event.buffer) {
+ req = r;
+ break;
+ }
+ if (r->status == -COMP_STALL_ERROR) {
+ dev_warn(dbc->dev, "Give back stale stalled req\n");
+ ring->num_trbs_free++;
+ xhci_dbc_giveback(r, 0);
+ }
+ }
+
+ if (!req) {
+ dev_warn(dbc->dev, "no matched request\n");
+ return;
+ }
+
+ trace_xhci_dbc_handle_transfer(ring, &req->trb->generic);
+
switch (comp_code) {
case COMP_SUCCESS:
remain_length = 0;
@@ -718,31 +759,49 @@ static void dbc_handle_xfer_event(struct xhci_dbc *dbc, union xhci_trb *event)
case COMP_TRB_ERROR:
case COMP_BABBLE_DETECTED_ERROR:
case COMP_USB_TRANSACTION_ERROR:
- case COMP_STALL_ERROR:
dev_warn(dbc->dev, "tx error %d detected\n", comp_code);
status = -comp_code;
break;
+ case COMP_STALL_ERROR:
+ dev_warn(dbc->dev, "Stall error at bulk TRB %llx, remaining %zu, ep deq %llx\n",
+ event->trans_event.buffer, remain_length, ep_ctx->deq);
+ status = 0;
+ dep->halted = 1;
+
+ /*
+ * xHC DbC may trigger a STALL bulk xfer event when host sends a
+ * ClearFeature(ENDPOINT_HALT) request even if there wasn't an
+ * active bulk transfer.
+ *
+ * Don't give back this transfer request as hardware will later
+ * start processing TRBs starting from this 'STALLED' TRB,
+ * causing TRBs and requests to be out of sync.
+ *
+ * If STALL event shows some bytes were transferred then assume
+ * it's an actual transfer issue and give back the request.
+ * In this case mark the TRB as No-Op to avoid hw from using the
+ * TRB again.
+ */
+
+ if ((ep_ctx->deq & ~TRB_CYCLE) == event->trans_event.buffer) {
+ dev_dbg(dbc->dev, "Ep stopped on Stalled TRB\n");
+ if (remain_length == req->length) {
+ dev_dbg(dbc->dev, "Spurious stall event, keep req\n");
+ req->status = -COMP_STALL_ERROR;
+ req->actual = 0;
+ return;
+ }
+ dev_dbg(dbc->dev, "Give back stalled req, but turn TRB to No-op\n");
+ trb_to_noop(req->trb);
+ }
+ break;
+
default:
dev_err(dbc->dev, "unknown tx error %d\n", comp_code);
status = -comp_code;
break;
}
- /* Match the pending request: */
- list_for_each_entry(r, &dep->list_pending, list_pending) {
- if (r->trb_dma == event->trans_event.buffer) {
- req = r;
- break;
- }
- }
-
- if (!req) {
- dev_warn(dbc->dev, "no matched request\n");
- return;
- }
-
- trace_xhci_dbc_handle_transfer(ring, &req->trb->generic);
-
ring->num_trbs_free++;
req->actual = req->length - remain_length;
xhci_dbc_giveback(req, status);
@@ -762,7 +821,6 @@ static void inc_evt_deq(struct xhci_ring *ring)
static enum evtreturn xhci_dbc_do_handle_events(struct xhci_dbc *dbc)
{
dma_addr_t deq;
- struct dbc_ep *dep;
union xhci_trb *evt;
u32 ctrl, portsc;
bool update_erdp = false;
@@ -814,43 +872,17 @@ static enum evtreturn xhci_dbc_do_handle_events(struct xhci_dbc *dbc)
return EVT_DISC;
}
- /* Handle endpoint stall event: */
+ /* Check and handle changes in endpoint halt status */
ctrl = readl(&dbc->regs->control);
- if ((ctrl & DBC_CTRL_HALT_IN_TR) ||
- (ctrl & DBC_CTRL_HALT_OUT_TR)) {
- dev_info(dbc->dev, "DbC Endpoint stall\n");
- dbc->state = DS_STALLED;
-
- if (ctrl & DBC_CTRL_HALT_IN_TR) {
- dep = get_in_ep(dbc);
- xhci_dbc_flush_endpoint_requests(dep);
- }
-
- if (ctrl & DBC_CTRL_HALT_OUT_TR) {
- dep = get_out_ep(dbc);
- xhci_dbc_flush_endpoint_requests(dep);
- }
-
- return EVT_DONE;
- }
+ handle_ep_halt_changes(dbc, get_in_ep(dbc), ctrl & DBC_CTRL_HALT_IN_TR);
+ handle_ep_halt_changes(dbc, get_out_ep(dbc), ctrl & DBC_CTRL_HALT_OUT_TR);
/* Clear DbC run change bit: */
if (ctrl & DBC_CTRL_DBC_RUN_CHANGE) {
writel(ctrl, &dbc->regs->control);
ctrl = readl(&dbc->regs->control);
}
-
break;
- case DS_STALLED:
- ctrl = readl(&dbc->regs->control);
- if (!(ctrl & DBC_CTRL_HALT_IN_TR) &&
- !(ctrl & DBC_CTRL_HALT_OUT_TR) &&
- (ctrl & DBC_CTRL_DBC_RUN)) {
- dbc->state = DS_CONFIGURED;
- break;
- }
-
- return EVT_DONE;
default:
dev_err(dbc->dev, "Unknown DbC state %d\n", dbc->state);
break;
@@ -939,7 +971,6 @@ static const char * const dbc_state_strings[DS_MAX] = {
[DS_ENABLED] = "enabled",
[DS_CONNECTED] = "connected",
[DS_CONFIGURED] = "configured",
- [DS_STALLED] = "stalled",
};
static ssize_t dbc_show(struct device *dev,
diff --git a/drivers/usb/host/xhci-dbgcap.h b/drivers/usb/host/xhci-dbgcap.h
index 0118c6288a3c..8ec813b6e9fd 100644
--- a/drivers/usb/host/xhci-dbgcap.h
+++ b/drivers/usb/host/xhci-dbgcap.h
@@ -81,7 +81,6 @@ enum dbc_state {
DS_ENABLED,
DS_CONNECTED,
DS_CONFIGURED,
- DS_STALLED,
DS_MAX
};
@@ -90,6 +89,7 @@ struct dbc_ep {
struct list_head list_pending;
struct xhci_ring *ring;
unsigned int direction:1;
+ unsigned int halted:1;
};
#define DBC_QUEUE_SIZE 16
@@ -110,7 +110,6 @@ struct dbc_port {
struct tasklet_struct push;
struct list_head write_pool;
- struct kfifo write_fifo;
bool registered;
};
diff --git a/drivers/usb/host/xhci-dbgtty.c b/drivers/usb/host/xhci-dbgtty.c
index b74e98e94393..b8e78867e25a 100644
--- a/drivers/usb/host/xhci-dbgtty.c
+++ b/drivers/usb/host/xhci-dbgtty.c
@@ -24,19 +24,6 @@ static inline struct dbc_port *dbc_to_port(struct xhci_dbc *dbc)
return dbc->priv;
}
-static unsigned int
-dbc_send_packet(struct dbc_port *port, char *packet, unsigned int size)
-{
- unsigned int len;
-
- len = kfifo_len(&port->write_fifo);
- if (len < size)
- size = len;
- if (size != 0)
- size = kfifo_out(&port->write_fifo, packet, size);
- return size;
-}
-
static int dbc_start_tx(struct dbc_port *port)
__releases(&port->port_lock)
__acquires(&port->port_lock)
@@ -49,7 +36,7 @@ static int dbc_start_tx(struct dbc_port *port)
while (!list_empty(pool)) {
req = list_entry(pool->next, struct dbc_request, list_pool);
- len = dbc_send_packet(port, req->buf, DBC_MAX_PACKET);
+ len = kfifo_out(&port->port.xmit_fifo, req->buf, DBC_MAX_PACKET);
if (len == 0)
break;
do_tty_wake = true;
@@ -216,7 +203,7 @@ static ssize_t dbc_tty_write(struct tty_struct *tty, const u8 *buf,
spin_lock_irqsave(&port->port_lock, flags);
if (count)
- count = kfifo_in(&port->write_fifo, buf, count);
+ count = kfifo_in(&port->port.xmit_fifo, buf, count);
dbc_start_tx(port);
spin_unlock_irqrestore(&port->port_lock, flags);
@@ -230,7 +217,7 @@ static int dbc_tty_put_char(struct tty_struct *tty, u8 ch)
int status;
spin_lock_irqsave(&port->port_lock, flags);
- status = kfifo_put(&port->write_fifo, ch);
+ status = kfifo_put(&port->port.xmit_fifo, ch);
spin_unlock_irqrestore(&port->port_lock, flags);
return status;
@@ -253,7 +240,7 @@ static unsigned int dbc_tty_write_room(struct tty_struct *tty)
unsigned int room;
spin_lock_irqsave(&port->port_lock, flags);
- room = kfifo_avail(&port->write_fifo);
+ room = kfifo_avail(&port->port.xmit_fifo);
spin_unlock_irqrestore(&port->port_lock, flags);
return room;
@@ -266,7 +253,7 @@ static unsigned int dbc_tty_chars_in_buffer(struct tty_struct *tty)
unsigned int chars;
spin_lock_irqsave(&port->port_lock, flags);
- chars = kfifo_len(&port->write_fifo);
+ chars = kfifo_len(&port->port.xmit_fifo);
spin_unlock_irqrestore(&port->port_lock, flags);
return chars;
@@ -346,7 +333,7 @@ static void dbc_rx_push(struct tasklet_struct *t)
port->n_read = 0;
}
- list_move(&req->list_pool, &port->read_pool);
+ list_move_tail(&req->list_pool, &port->read_pool);
}
if (do_push)
@@ -424,7 +411,8 @@ static int xhci_dbc_tty_register_device(struct xhci_dbc *dbc)
goto err_idr;
}
- ret = kfifo_alloc(&port->write_fifo, DBC_WRITE_BUF_SIZE, GFP_KERNEL);
+ ret = kfifo_alloc(&port->port.xmit_fifo, DBC_WRITE_BUF_SIZE,
+ GFP_KERNEL);
if (ret)
goto err_exit_port;
@@ -453,7 +441,7 @@ err_free_requests:
xhci_dbc_free_requests(&port->read_pool);
xhci_dbc_free_requests(&port->write_pool);
err_free_fifo:
- kfifo_free(&port->write_fifo);
+ kfifo_free(&port->port.xmit_fifo);
err_exit_port:
idr_remove(&dbc_tty_minors, port->minor);
err_idr:
@@ -478,7 +466,7 @@ static void xhci_dbc_tty_unregister_device(struct xhci_dbc *dbc)
idr_remove(&dbc_tty_minors, port->minor);
mutex_unlock(&dbc_tty_minors_lock);
- kfifo_free(&port->write_fifo);
+ kfifo_free(&port->port.xmit_fifo);
xhci_dbc_free_requests(&port->read_pool);
xhci_dbc_free_requests(&port->read_queue);
xhci_dbc_free_requests(&port->write_pool);
diff --git a/drivers/usb/host/xhci-ext-caps.h b/drivers/usb/host/xhci-ext-caps.h
index 96eb36a58738..67ecf7320c62 100644
--- a/drivers/usb/host/xhci-ext-caps.h
+++ b/drivers/usb/host/xhci-ext-caps.h
@@ -42,6 +42,7 @@
#define XHCI_EXT_CAPS_DEBUG 10
/* Vendor caps */
#define XHCI_EXT_CAPS_VENDOR_INTEL 192
+#define XHCI_EXT_CAPS_INTEL_SPR_SHADOW 206
/* USB Legacy Support Capability - section 7.1.1 */
#define XHCI_HC_BIOS_OWNED (1 << 16)
#define XHCI_HC_OS_OWNED (1 << 24)
@@ -64,6 +65,10 @@
#define XHCI_HLC (1 << 19)
#define XHCI_BLC (1 << 20)
+/* Intel SPR shadow capability */
+#define XHCI_INTEL_SPR_ESS_PORT_OFFSET 0x8ac4 /* SuperSpeed port control */
+#define XHCI_INTEL_SPR_TUNEN BIT(4) /* Tunnel mode enabled */
+
/* command register values to disable interrupts and halt the HC */
/* start/stop HC execution - do not write unless HC is halted*/
#define XHCI_CMD_RUN (1 << 0)
diff --git a/drivers/usb/host/xhci-hub.c b/drivers/usb/host/xhci-hub.c
index 61f083de6e19..d27c30ac17fd 100644
--- a/drivers/usb/host/xhci-hub.c
+++ b/drivers/usb/host/xhci-hub.c
@@ -752,6 +752,42 @@ static int xhci_exit_test_mode(struct xhci_hcd *xhci)
return xhci_reset(xhci, XHCI_RESET_SHORT_USEC);
}
+/**
+ * xhci_port_is_tunneled() - Check if USB3 connection is tunneled over USB4
+ * @xhci: xhci host controller
+ * @port: USB3 port to be checked.
+ *
+ * Some hosts can detect if a USB3 connection is native USB3 or tunneled over
+ * USB4. Intel hosts expose this via vendor specific extended capability 206
+ * eSS PORT registers TUNEN (tunnel enabled) bit.
+ *
+ * A USB3 device must be connected to the port to detect the tunnel.
+ *
+ * Return: link tunnel mode enum, USB_LINK_UNKNOWN if host is incapable of
+ * detecting USB3 over USB4 tunnels. USB_LINK_NATIVE or USB_LINK_TUNNELED
+ * otherwise.
+ */
+enum usb_link_tunnel_mode xhci_port_is_tunneled(struct xhci_hcd *xhci,
+ struct xhci_port *port)
+{
+ void __iomem *base;
+ u32 offset;
+
+ base = &xhci->cap_regs->hc_capbase;
+ offset = xhci_find_next_ext_cap(base, 0, XHCI_EXT_CAPS_INTEL_SPR_SHADOW);
+
+ if (offset && offset <= XHCI_INTEL_SPR_ESS_PORT_OFFSET) {
+ offset = XHCI_INTEL_SPR_ESS_PORT_OFFSET + port->hcd_portnum * 0x20;
+
+ if (readl(base + offset) & XHCI_INTEL_SPR_TUNEN)
+ return USB_LINK_TUNNELED;
+ else
+ return USB_LINK_NATIVE;
+ }
+
+ return USB_LINK_UNKNOWN;
+}
+
void xhci_set_link_state(struct xhci_hcd *xhci, struct xhci_port *port,
u32 link_state)
{
diff --git a/drivers/usb/host/xhci-mem.c b/drivers/usb/host/xhci-mem.c
index 937ce5fd5809..d2900197a49e 100644
--- a/drivers/usb/host/xhci-mem.c
+++ b/drivers/usb/host/xhci-mem.c
@@ -2332,7 +2332,8 @@ xhci_add_interrupter(struct xhci_hcd *xhci, struct xhci_interrupter *ir,
}
struct xhci_interrupter *
-xhci_create_secondary_interrupter(struct usb_hcd *hcd, unsigned int segs)
+xhci_create_secondary_interrupter(struct usb_hcd *hcd, unsigned int segs,
+ u32 imod_interval)
{
struct xhci_hcd *xhci = hcd_to_xhci(hcd);
struct xhci_interrupter *ir;
@@ -2365,6 +2366,11 @@ xhci_create_secondary_interrupter(struct usb_hcd *hcd, unsigned int segs)
return NULL;
}
+ err = xhci_set_interrupter_moderation(ir, imod_interval);
+ if (err)
+ xhci_warn(xhci, "Failed to set interrupter %d moderation to %uns\n",
+ i, imod_interval);
+
xhci_dbg(xhci, "Add secondary interrupter %d, max interrupters %d\n",
i, xhci->max_interrupters);
diff --git a/drivers/usb/host/xhci-pci-renesas.c b/drivers/usb/host/xhci-pci-renesas.c
index 247cc7c2ce70..30cc5a1380a5 100644
--- a/drivers/usb/host/xhci-pci-renesas.c
+++ b/drivers/usb/host/xhci-pci-renesas.c
@@ -50,6 +50,8 @@
#define RENESAS_RETRY 10000
#define RENESAS_DELAY 10
+#define RENESAS_FW_NAME "renesas_usb_fw.mem"
+
static int renesas_fw_download_image(struct pci_dev *dev,
const u32 *fw, size_t step, bool rom)
{
@@ -573,12 +575,10 @@ exit:
return err;
}
-int renesas_xhci_check_request_fw(struct pci_dev *pdev,
- const struct pci_device_id *id)
+static int renesas_xhci_check_request_fw(struct pci_dev *pdev,
+ const struct pci_device_id *id)
{
- struct xhci_driver_data *driver_data =
- (struct xhci_driver_data *)id->driver_data;
- const char *fw_name = driver_data->firmware;
+ const char fw_name[] = RENESAS_FW_NAME;
const struct firmware *fw;
bool has_rom;
int err;
@@ -625,7 +625,41 @@ exit:
release_firmware(fw);
return err;
}
-EXPORT_SYMBOL_GPL(renesas_xhci_check_request_fw);
-MODULE_DESCRIPTION("Support for Renesas xHCI controller with firmware");
+static int
+xhci_pci_renesas_probe(struct pci_dev *dev, const struct pci_device_id *id)
+{
+ int retval;
+
+ retval = renesas_xhci_check_request_fw(dev, id);
+ if (retval)
+ return retval;
+
+ return xhci_pci_common_probe(dev, id);
+}
+
+static const struct pci_device_id pci_ids[] = {
+ { PCI_DEVICE(PCI_VENDOR_ID_RENESAS, 0x0014) },
+ { PCI_DEVICE(PCI_VENDOR_ID_RENESAS, 0x0015) },
+ { /* end: all zeroes */ }
+};
+MODULE_DEVICE_TABLE(pci, pci_ids);
+
+static struct pci_driver xhci_renesas_pci_driver = {
+ .name = "xhci-pci-renesas",
+ .id_table = pci_ids,
+
+ .probe = xhci_pci_renesas_probe,
+ .remove = xhci_pci_remove,
+
+ .shutdown = usb_hcd_pci_shutdown,
+ .driver = {
+ .pm = pm_ptr(&usb_hcd_pci_pm_ops),
+ },
+};
+module_pci_driver(xhci_renesas_pci_driver);
+
+MODULE_DESCRIPTION("Renesas xHCI PCI Host Controller Driver");
+MODULE_FIRMWARE(RENESAS_FW_NAME);
+MODULE_IMPORT_NS(xhci);
MODULE_LICENSE("GPL v2");
diff --git a/drivers/usb/host/xhci-pci.c b/drivers/usb/host/xhci-pci.c
index dc1e345ab67e..91dccd25a551 100644
--- a/drivers/usb/host/xhci-pci.c
+++ b/drivers/usb/host/xhci-pci.c
@@ -55,6 +55,9 @@
#define PCI_DEVICE_ID_INTEL_ALDER_LAKE_PCH_XHCI 0x51ed
#define PCI_DEVICE_ID_INTEL_ALDER_LAKE_N_PCH_XHCI 0x54ed
+#define PCI_VENDOR_ID_PHYTIUM 0x1db7
+#define PCI_DEVICE_ID_PHYTIUM_XHCI 0xdc27
+
/* Thunderbolt */
#define PCI_DEVICE_ID_INTEL_MAPLE_RIDGE_XHCI 0x1138
#define PCI_DEVICE_ID_INTEL_ALPINE_RIDGE_2C_XHCI 0x15b5
@@ -78,6 +81,9 @@
#define PCI_DEVICE_ID_ASMEDIA_2142_XHCI 0x2142
#define PCI_DEVICE_ID_ASMEDIA_3242_XHCI 0x3242
+#define PCI_DEVICE_ID_CADENCE 0x17CD
+#define PCI_DEVICE_ID_CADENCE_SSP 0x0200
+
static const char hcd_name[] = "xhci_hcd";
static struct hc_driver __read_mostly xhci_pci_hc_driver;
@@ -93,6 +99,10 @@ static const struct xhci_driver_overrides xhci_pci_overrides __initconst = {
.update_hub_device = xhci_pci_update_hub_device,
};
+/*
+ * Primary Legacy and MSI IRQ are synced in suspend_common().
+ * All MSI-X IRQs and secondary MSI IRQs should be synced here.
+ */
static void xhci_msix_sync_irqs(struct xhci_hcd *xhci)
{
struct usb_hcd *hcd = xhci_to_hcd(xhci);
@@ -105,13 +115,12 @@ static void xhci_msix_sync_irqs(struct xhci_hcd *xhci)
}
}
-/* Free any IRQs and disable MSI-X */
+/* Legacy IRQ is freed by usb_remove_hcd() or usb_hcd_pci_shutdown() */
static void xhci_cleanup_msix(struct xhci_hcd *xhci)
{
struct usb_hcd *hcd = xhci_to_hcd(xhci);
struct pci_dev *pdev = to_pci_dev(hcd->self.controller);
- /* return if using legacy interrupt */
if (hcd->irq > 0)
return;
@@ -235,15 +244,6 @@ static int xhci_pci_reinit(struct xhci_hcd *xhci, struct pci_dev *pdev)
static void xhci_pci_quirks(struct device *dev, struct xhci_hcd *xhci)
{
struct pci_dev *pdev = to_pci_dev(dev);
- struct xhci_driver_data *driver_data;
- const struct pci_device_id *id;
-
- id = pci_match_id(to_pci_driver(pdev->dev.driver)->id_table, pdev);
-
- if (id && id->driver_data) {
- driver_data = (struct xhci_driver_data *)id->driver_data;
- xhci->quirks |= driver_data->quirks;
- }
/* Look for vendor-specific quirks */
if (pdev->vendor == PCI_VENDOR_ID_FRESCO_LOGIC &&
@@ -416,6 +416,10 @@ static void xhci_pci_quirks(struct device *dev, struct xhci_hcd *xhci)
if (pdev->vendor == PCI_VENDOR_ID_VIA)
xhci->quirks |= XHCI_RESET_ON_RESUME;
+ if (pdev->vendor == PCI_VENDOR_ID_PHYTIUM &&
+ pdev->device == PCI_DEVICE_ID_PHYTIUM_XHCI)
+ xhci->quirks |= XHCI_RESET_ON_RESUME;
+
/* See https://bugzilla.kernel.org/show_bug.cgi?id=79511 */
if (pdev->vendor == PCI_VENDOR_ID_VIA &&
pdev->device == 0x3432)
@@ -473,6 +477,10 @@ static void xhci_pci_quirks(struct device *dev, struct xhci_hcd *xhci)
xhci->quirks |= XHCI_ZHAOXIN_TRB_FETCH;
}
+ if (pdev->vendor == PCI_DEVICE_ID_CADENCE &&
+ pdev->device == PCI_DEVICE_ID_CADENCE_SSP)
+ xhci->quirks |= XHCI_CDNS_SCTX_QUIRK;
+
/* xHC spec requires PCI devices to support D3hot and D3cold */
if (xhci->hci_version >= 0x120)
xhci->quirks |= XHCI_DEFAULT_PM_RUNTIME_ALLOW;
@@ -534,10 +542,9 @@ static int xhci_pci_setup(struct usb_hcd *hcd)
struct xhci_hcd *xhci;
struct pci_dev *pdev = to_pci_dev(hcd->self.controller);
int retval;
+ u8 sbrn;
xhci = hcd_to_xhci(hcd);
- if (!xhci->sbrn)
- pci_read_config_byte(pdev, XHCI_SBRN_OFFSET, &xhci->sbrn);
/* imod_interval is the interrupt moderation value in nanoseconds. */
xhci->imod_interval = 40000;
@@ -552,7 +559,8 @@ static int xhci_pci_setup(struct usb_hcd *hcd)
if (xhci->quirks & XHCI_PME_STUCK_QUIRK)
xhci_pme_acpi_rtd3_enable(pdev);
- xhci_dbg(xhci, "Got SBRN %u\n", (unsigned int) xhci->sbrn);
+ pci_read_config_byte(pdev, XHCI_SBRN_OFFSET, &sbrn);
+ xhci_dbg(xhci, "Got SBRN %u\n", (unsigned int)sbrn);
/* Find any debug ports */
return xhci_pci_reinit(xhci, pdev);
@@ -572,21 +580,13 @@ static int xhci_pci_update_hub_device(struct usb_hcd *hcd, struct usb_device *hd
* We need to register our own PCI probe function (instead of the USB core's
* function) in order to create a second roothub under xHCI.
*/
-static int xhci_pci_probe(struct pci_dev *dev, const struct pci_device_id *id)
+int xhci_pci_common_probe(struct pci_dev *dev, const struct pci_device_id *id)
{
int retval;
struct xhci_hcd *xhci;
struct usb_hcd *hcd;
- struct xhci_driver_data *driver_data;
struct reset_control *reset;
- driver_data = (struct xhci_driver_data *)id->driver_data;
- if (driver_data && driver_data->quirks & XHCI_RENESAS_FW_QUIRK) {
- retval = renesas_xhci_check_request_fw(dev, id);
- if (retval)
- return retval;
- }
-
reset = devm_reset_control_get_optional_exclusive(&dev->dev, NULL);
if (IS_ERR(reset))
return PTR_ERR(reset);
@@ -651,12 +651,30 @@ put_runtime_pm:
pm_runtime_put_noidle(&dev->dev);
return retval;
}
+EXPORT_SYMBOL_NS_GPL(xhci_pci_common_probe, xhci);
-static void xhci_pci_remove(struct pci_dev *dev)
+static const struct pci_device_id pci_ids_reject[] = {
+ /* handled by xhci-pci-renesas */
+ { PCI_DEVICE(PCI_VENDOR_ID_RENESAS, 0x0014) },
+ { PCI_DEVICE(PCI_VENDOR_ID_RENESAS, 0x0015) },
+ { /* end: all zeroes */ }
+};
+
+static int xhci_pci_probe(struct pci_dev *dev, const struct pci_device_id *id)
+{
+ if (pci_match_id(pci_ids_reject, dev))
+ return -ENODEV;
+
+ return xhci_pci_common_probe(dev, id);
+}
+
+void xhci_pci_remove(struct pci_dev *dev)
{
struct xhci_hcd *xhci;
+ bool set_power_d3;
xhci = hcd_to_xhci(pci_get_drvdata(dev));
+ set_power_d3 = xhci->quirks & XHCI_SPURIOUS_WAKEUP;
xhci->xhc_state |= XHCI_STATE_REMOVING;
@@ -669,12 +687,13 @@ static void xhci_pci_remove(struct pci_dev *dev)
xhci->shared_hcd = NULL;
}
+ usb_hcd_pci_remove(dev);
+
/* Workaround for spurious wakeups at shutdown with HSW */
- if (xhci->quirks & XHCI_SPURIOUS_WAKEUP)
+ if (set_power_d3)
pci_set_power_state(dev, PCI_D3hot);
-
- usb_hcd_pci_remove(dev);
}
+EXPORT_SYMBOL_NS_GPL(xhci_pci_remove, xhci);
/*
* In some Intel xHCI controllers, in order to get D3 working,
@@ -783,7 +802,6 @@ static int xhci_pci_resume(struct usb_hcd *hcd, pm_message_t msg)
{
struct xhci_hcd *xhci = hcd_to_xhci(hcd);
struct pci_dev *pdev = to_pci_dev(hcd->self.controller);
- int retval = 0;
reset_control_reset(xhci->reset);
@@ -814,8 +832,7 @@ static int xhci_pci_resume(struct usb_hcd *hcd, pm_message_t msg)
if (xhci->quirks & XHCI_PME_STUCK_QUIRK)
xhci_pme_quirk(hcd);
- retval = xhci_resume(xhci, msg);
- return retval;
+ return xhci_resume(xhci, msg);
}
static int xhci_pci_poweroff_late(struct usb_hcd *hcd, bool do_wakeup)
@@ -882,19 +899,8 @@ static void xhci_pci_shutdown(struct usb_hcd *hcd)
/*-------------------------------------------------------------------------*/
-static const struct xhci_driver_data reneses_data = {
- .quirks = XHCI_RENESAS_FW_QUIRK,
- .firmware = "renesas_usb_fw.mem",
-};
-
/* PCI driver selection metadata; PCI hotplugging uses this */
static const struct pci_device_id pci_ids[] = {
- { PCI_DEVICE(PCI_VENDOR_ID_RENESAS, 0x0014),
- .driver_data = (unsigned long)&reneses_data,
- },
- { PCI_DEVICE(PCI_VENDOR_ID_RENESAS, 0x0015),
- .driver_data = (unsigned long)&reneses_data,
- },
/* handle any USB 3.0 xHCI controller */
{ PCI_DEVICE_CLASS(PCI_CLASS_SERIAL_USB_XHCI, ~0),
},
@@ -902,14 +908,6 @@ static const struct pci_device_id pci_ids[] = {
};
MODULE_DEVICE_TABLE(pci, pci_ids);
-/*
- * Without CONFIG_USB_XHCI_PCI_RENESAS renesas_xhci_check_request_fw() won't
- * load firmware, so don't encumber the xhci-pci driver with it.
- */
-#if IS_ENABLED(CONFIG_USB_XHCI_PCI_RENESAS)
-MODULE_FIRMWARE("renesas_usb_fw.mem");
-#endif
-
/* pci driver glue; this is a "new style" PCI driver module */
static struct pci_driver xhci_pci_driver = {
.name = hcd_name,
diff --git a/drivers/usb/host/xhci-pci.h b/drivers/usb/host/xhci-pci.h
index cb9a8f331a44..e87c7d9d76b8 100644
--- a/drivers/usb/host/xhci-pci.h
+++ b/drivers/usb/host/xhci-pci.h
@@ -4,22 +4,7 @@
#ifndef XHCI_PCI_H
#define XHCI_PCI_H
-#if IS_ENABLED(CONFIG_USB_XHCI_PCI_RENESAS)
-int renesas_xhci_check_request_fw(struct pci_dev *dev,
- const struct pci_device_id *id);
-
-#else
-static int renesas_xhci_check_request_fw(struct pci_dev *dev,
- const struct pci_device_id *id)
-{
- return 0;
-}
-
-#endif
-
-struct xhci_driver_data {
- u64 quirks;
- const char *firmware;
-};
+int xhci_pci_common_probe(struct pci_dev *dev, const struct pci_device_id *id);
+void xhci_pci_remove(struct pci_dev *dev);
#endif
diff --git a/drivers/usb/host/xhci-plat.c b/drivers/usb/host/xhci-plat.c
index 31bdfa52eeb2..ecaa75718e59 100644
--- a/drivers/usb/host/xhci-plat.c
+++ b/drivers/usb/host/xhci-plat.c
@@ -259,6 +259,12 @@ int xhci_plat_probe(struct platform_device *pdev, struct device *sysdev, const s
if (device_property_read_bool(tmpdev, "write-64-hi-lo-quirk"))
xhci->quirks |= XHCI_WRITE_64_HI_LO;
+ if (device_property_read_bool(tmpdev, "xhci-missing-cas-quirk"))
+ xhci->quirks |= XHCI_MISSING_CAS;
+
+ if (device_property_read_bool(tmpdev, "xhci-skip-phy-init-quirk"))
+ xhci->quirks |= XHCI_SKIP_PHY_INIT;
+
device_property_read_u32(tmpdev, "imod-interval-ns",
&xhci->imod_interval);
}
diff --git a/drivers/usb/host/xhci-ring.c b/drivers/usb/host/xhci-ring.c
index 4ea2c3e072a9..4d664ba53fe9 100644
--- a/drivers/usb/host/xhci-ring.c
+++ b/drivers/usb/host/xhci-ring.c
@@ -1399,6 +1399,20 @@ static void xhci_handle_cmd_set_deq(struct xhci_hcd *xhci, int slot_id,
struct xhci_stream_ctx *ctx =
&ep->stream_info->stream_ctx_array[stream_id];
deq = le64_to_cpu(ctx->stream_ring) & SCTX_DEQ_MASK;
+
+ /*
+ * Cadence xHCI controllers store some endpoint state
+ * information within Rsvd0 fields of Stream Endpoint
+ * context. This field is not cleared during Set TR
+ * Dequeue Pointer command which causes XDMA to skip
+ * over transfer ring and leads to data loss on stream
+ * pipe.
+ * To fix this issue driver must clear Rsvd0 field.
+ */
+ if (xhci->quirks & XHCI_CDNS_SCTX_QUIRK) {
+ ctx->reserved[0] = 0;
+ ctx->reserved[1] = 0;
+ }
} else {
deq = le64_to_cpu(ep_ctx->deq) & ~EP_CTX_CYCLE_MASK;
}
@@ -2521,9 +2535,6 @@ static int process_bulk_intr_td(struct xhci_hcd *xhci, struct xhci_virt_ep *ep,
td->status = 0;
break;
case COMP_SHORT_PACKET:
- xhci_dbg(xhci, "ep %#x - asked for %d bytes, %d bytes untransferred\n",
- td->urb->ep->desc.bEndpointAddress,
- requested, remaining);
td->status = 0;
break;
case COMP_STOPPED_SHORT_PACKET:
@@ -2764,35 +2775,25 @@ static int handle_tx_event(struct xhci_hcd *xhci,
return 0;
}
- do {
- /* This TRB should be in the TD at the head of this ring's
- * TD list.
+ if (list_empty(&ep_ring->td_list)) {
+ /*
+ * Don't print wanings if ring is empty due to a stopped endpoint generating an
+ * extra completion event if the device was suspended. Or, a event for the last TRB
+ * of a short TD we already got a short event for. The short TD is already removed
+ * from the TD list.
*/
- if (list_empty(&ep_ring->td_list)) {
- /*
- * Don't print wanings if it's due to a stopped endpoint
- * generating an extra completion event if the device
- * was suspended. Or, a event for the last TRB of a
- * short TD we already got a short event for.
- * The short TD is already removed from the TD list.
- */
-
- if (!(trb_comp_code == COMP_STOPPED ||
- trb_comp_code == COMP_STOPPED_LENGTH_INVALID ||
- ep_ring->last_td_was_short)) {
- xhci_warn(xhci, "WARN Event TRB for slot %u ep %d with no TDs queued?\n",
- slot_id, ep_index);
- }
- if (ep->skip) {
- ep->skip = false;
- xhci_dbg(xhci, "td_list is empty while skip flag set. Clear skip flag for slot %u ep %u.\n",
- slot_id, ep_index);
- }
-
- td = NULL;
- goto check_endpoint_halted;
+ if (trb_comp_code != COMP_STOPPED &&
+ trb_comp_code != COMP_STOPPED_LENGTH_INVALID &&
+ !ep_ring->last_td_was_short) {
+ xhci_warn(xhci, "Event TRB for slot %u ep %u with no TDs queued\n",
+ slot_id, ep_index);
}
+ ep->skip = false;
+ goto check_endpoint_halted;
+ }
+
+ do {
td = list_first_entry(&ep_ring->td_list, struct xhci_td,
td_list);
@@ -2803,7 +2804,14 @@ static int handle_tx_event(struct xhci_hcd *xhci,
if (ep->skip && usb_endpoint_xfer_isoc(&td->urb->ep->desc)) {
skip_isoc_td(xhci, td, ep, status);
- continue;
+ if (!list_empty(&ep_ring->td_list))
+ continue;
+
+ xhci_dbg(xhci, "All TDs skipped for slot %u ep %u. Clear skip flag.\n",
+ slot_id, ep_index);
+ ep->skip = false;
+ td = NULL;
+ goto check_endpoint_halted;
}
/*
@@ -3941,10 +3949,6 @@ static int xhci_get_isoc_frame_id(struct xhci_hcd *xhci,
start_frame_id = (start_frame_id >> 3) & 0x7ff;
end_frame_id = (end_frame_id >> 3) & 0x7ff;
- xhci_dbg(xhci, "%s: index %d, reg 0x%x start_frame_id 0x%x, end_frame_id 0x%x, start_frame 0x%x\n",
- __func__, index, readl(&xhci->run_regs->microframe_index),
- start_frame_id, end_frame_id, start_frame);
-
if (start_frame_id < end_frame_id) {
if (start_frame > end_frame_id ||
start_frame < start_frame_id)
diff --git a/drivers/usb/host/xhci.c b/drivers/usb/host/xhci.c
index efdf4c228b8c..899c0effb5d3 100644
--- a/drivers/usb/host/xhci.c
+++ b/drivers/usb/host/xhci.c
@@ -347,8 +347,8 @@ static int xhci_disable_interrupter(struct xhci_interrupter *ir)
}
/* interrupt moderation interval imod_interval in nanoseconds */
-static int xhci_set_interrupter_moderation(struct xhci_interrupter *ir,
- u32 imod_interval)
+int xhci_set_interrupter_moderation(struct xhci_interrupter *ir,
+ u32 imod_interval)
{
u32 imod;
@@ -4525,6 +4525,20 @@ static int xhci_update_device(struct usb_hcd *hcd, struct usb_device *udev)
struct xhci_port *port;
u32 capability;
+ /* Check if USB3 device at root port is tunneled over USB4 */
+ if (hcd->speed >= HCD_USB3 && !udev->parent->parent) {
+ port = xhci->usb3_rhub.ports[udev->portnum - 1];
+
+ udev->tunnel_mode = xhci_port_is_tunneled(xhci, port);
+ if (udev->tunnel_mode == USB_LINK_UNKNOWN)
+ dev_dbg(&udev->dev, "link tunnel state unknown\n");
+ else if (udev->tunnel_mode == USB_LINK_TUNNELED)
+ dev_dbg(&udev->dev, "tunneled over USB4 link\n");
+ else if (udev->tunnel_mode == USB_LINK_NATIVE)
+ dev_dbg(&udev->dev, "native USB 3.x link\n");
+ return 0;
+ }
+
if (hcd->speed >= HCD_USB3 || !udev->lpm_capable || !xhci->hw_lpm_support)
return 0;
diff --git a/drivers/usb/host/xhci.h b/drivers/usb/host/xhci.h
index ebd0afd59a60..620502de971a 100644
--- a/drivers/usb/host/xhci.h
+++ b/drivers/usb/host/xhci.h
@@ -1498,15 +1498,10 @@ struct xhci_hcd {
spinlock_t lock;
/* packed release number */
- u8 sbrn;
u16 hci_version;
- u8 max_slots;
u16 max_interrupters;
- u8 max_ports;
- u8 isoc_threshold;
/* imod_interval in ns (I * 250ns) */
u32 imod_interval;
- int event_ring_max;
/* 4KB min, 128MB max */
int page_size;
/* Valid values are 12 to 20, inclusive */
@@ -1616,7 +1611,7 @@ struct xhci_hcd {
#define XHCI_DEFAULT_PM_RUNTIME_ALLOW BIT_ULL(33)
#define XHCI_RESET_PLL_ON_DISCONNECT BIT_ULL(34)
#define XHCI_SNPS_BROKEN_SUSPEND BIT_ULL(35)
-#define XHCI_RENESAS_FW_QUIRK BIT_ULL(36)
+/* Reserved. It was XHCI_RENESAS_FW_QUIRK */
#define XHCI_SKIP_PHY_INIT BIT_ULL(37)
#define XHCI_DISABLE_SPARSE BIT_ULL(38)
#define XHCI_SG_TRB_CACHE_SIZE_QUIRK BIT_ULL(39)
@@ -1628,6 +1623,7 @@ struct xhci_hcd {
#define XHCI_ZHAOXIN_TRB_FETCH BIT_ULL(45)
#define XHCI_ZHAOXIN_HOST BIT_ULL(46)
#define XHCI_WRITE_64_HI_LO BIT_ULL(47)
+#define XHCI_CDNS_SCTX_QUIRK BIT_ULL(48)
unsigned int num_active_eps;
unsigned int limit_active_eps;
@@ -1831,7 +1827,8 @@ struct xhci_container_ctx *xhci_alloc_container_ctx(struct xhci_hcd *xhci,
void xhci_free_container_ctx(struct xhci_hcd *xhci,
struct xhci_container_ctx *ctx);
struct xhci_interrupter *
-xhci_create_secondary_interrupter(struct usb_hcd *hcd, unsigned int segs);
+xhci_create_secondary_interrupter(struct usb_hcd *hcd, unsigned int segs,
+ u32 imod_interval);
void xhci_remove_secondary_interrupter(struct usb_hcd
*hcd, struct xhci_interrupter *ir);
@@ -1871,6 +1868,8 @@ int xhci_alloc_tt_info(struct xhci_hcd *xhci,
struct xhci_virt_device *virt_dev,
struct usb_device *hdev,
struct usb_tt *tt, gfp_t mem_flags);
+int xhci_set_interrupter_moderation(struct xhci_interrupter *ir,
+ u32 imod_interval);
/* xHCI ring, segment, TRB, and TD functions */
dma_addr_t xhci_trb_virt_to_dma(struct xhci_segment *seg, union xhci_trb *trb);
@@ -1904,10 +1903,6 @@ int xhci_queue_reset_ep(struct xhci_hcd *xhci, struct xhci_command *cmd,
enum xhci_ep_reset_type reset_type);
int xhci_queue_reset_device(struct xhci_hcd *xhci, struct xhci_command *cmd,
u32 slot_id);
-void xhci_cleanup_stalled_ring(struct xhci_hcd *xhci, unsigned int slot_id,
- unsigned int ep_index, unsigned int stream_id,
- struct xhci_td *td);
-void xhci_stop_endpoint_command_watchdog(struct timer_list *t);
void xhci_handle_command_timeout(struct work_struct *work);
void xhci_ring_ep_doorbell(struct xhci_hcd *xhci, unsigned int slot_id,
@@ -1929,7 +1924,8 @@ int xhci_hub_control(struct usb_hcd *hcd, u16 typeReq, u16 wValue, u16 wIndex,
int xhci_hub_status_data(struct usb_hcd *hcd, char *buf);
int xhci_find_raw_port_number(struct usb_hcd *hcd, int port1);
struct xhci_hub *xhci_get_rhub(struct usb_hcd *hcd);
-
+enum usb_link_tunnel_mode xhci_port_is_tunneled(struct xhci_hcd *xhci,
+ struct xhci_port *port);
void xhci_hc_died(struct xhci_hcd *xhci);
#ifdef CONFIG_PM
diff --git a/drivers/usb/misc/appledisplay.c b/drivers/usb/misc/appledisplay.c
index c8098e9b432e..62b5a30edc42 100644
--- a/drivers/usb/misc/appledisplay.c
+++ b/drivers/usb/misc/appledisplay.c
@@ -107,7 +107,12 @@ static void appledisplay_complete(struct urb *urb)
case ACD_BTN_BRIGHT_UP:
case ACD_BTN_BRIGHT_DOWN:
pdata->button_pressed = 1;
- schedule_delayed_work(&pdata->work, 0);
+ /*
+ * there is a window during which no device
+ * is registered
+ */
+ if (pdata->bd )
+ schedule_delayed_work(&pdata->work, 0);
break;
case ACD_BTN_NONE:
default:
@@ -202,6 +207,7 @@ static int appledisplay_probe(struct usb_interface *iface,
const struct usb_device_id *id)
{
struct backlight_properties props;
+ struct backlight_device *backlight;
struct appledisplay *pdata;
struct usb_device *udev = interface_to_usbdev(iface);
struct usb_endpoint_descriptor *endpoint;
@@ -272,13 +278,14 @@ static int appledisplay_probe(struct usb_interface *iface,
memset(&props, 0, sizeof(struct backlight_properties));
props.type = BACKLIGHT_RAW;
props.max_brightness = 0xff;
- pdata->bd = backlight_device_register(bl_name, NULL, pdata,
+ backlight = backlight_device_register(bl_name, NULL, pdata,
&appledisplay_bl_data, &props);
- if (IS_ERR(pdata->bd)) {
+ if (IS_ERR(backlight)) {
dev_err(&iface->dev, "Backlight registration failed\n");
- retval = PTR_ERR(pdata->bd);
+ retval = PTR_ERR(backlight);
goto error;
}
+ pdata->bd = backlight;
/* Try to get brightness */
brightness = appledisplay_bl_get_brightness(pdata->bd);
diff --git a/drivers/usb/misc/brcmstb-usb-pinmap.c b/drivers/usb/misc/brcmstb-usb-pinmap.c
index 2b2019c19cde..1ce885e4184c 100644
--- a/drivers/usb/misc/brcmstb-usb-pinmap.c
+++ b/drivers/usb/misc/brcmstb-usb-pinmap.c
@@ -335,6 +335,7 @@ static const struct of_device_id brcmstb_usb_pinmap_of_match[] = {
{ .compatible = "brcm,usb-pinmap" },
{ },
};
+MODULE_DEVICE_TABLE(of, brcmstb_usb_pinmap_of_match);
static struct platform_driver brcmstb_usb_pinmap_driver = {
.driver = {
diff --git a/drivers/usb/misc/cypress_cy7c63.c b/drivers/usb/misc/cypress_cy7c63.c
index cecd7693b741..75f5a740cba3 100644
--- a/drivers/usb/misc/cypress_cy7c63.c
+++ b/drivers/usb/misc/cypress_cy7c63.c
@@ -88,6 +88,9 @@ static int vendor_command(struct cypress *dev, unsigned char request,
USB_DIR_IN | USB_TYPE_VENDOR | USB_RECIP_OTHER,
address, data, iobuf, CYPRESS_MAX_REQSIZE,
USB_CTRL_GET_TIMEOUT);
+ /* we must not process garbage */
+ if (retval < 2)
+ goto err_buf;
/* store returned data (more READs to be added) */
switch (request) {
@@ -107,6 +110,7 @@ static int vendor_command(struct cypress *dev, unsigned char request,
break;
}
+err_buf:
kfree(iobuf);
error:
return retval;
diff --git a/drivers/usb/misc/ldusb.c b/drivers/usb/misc/ldusb.c
index 7cbef74dfc9a..f392d6f84df9 100644
--- a/drivers/usb/misc/ldusb.c
+++ b/drivers/usb/misc/ldusb.c
@@ -627,7 +627,6 @@ static const struct file_operations ld_usb_fops = {
.open = ld_usb_open,
.release = ld_usb_release,
.poll = ld_usb_poll,
- .llseek = no_llseek,
};
/*
diff --git a/drivers/usb/misc/onboard_usb_dev.c b/drivers/usb/misc/onboard_usb_dev.c
index 56710e6b1653..560591e02d6a 100644
--- a/drivers/usb/misc/onboard_usb_dev.c
+++ b/drivers/usb/misc/onboard_usb_dev.c
@@ -11,6 +11,7 @@
#include <linux/err.h>
#include <linux/gpio/consumer.h>
#include <linux/init.h>
+#include <linux/i2c.h>
#include <linux/kernel.h>
#include <linux/list.h>
#include <linux/module.h>
@@ -29,6 +30,17 @@
#include "onboard_usb_dev.h"
+/* USB5744 register offset and mask */
+#define USB5744_CMD_ATTACH 0xAA
+#define USB5744_CMD_ATTACH_LSB 0x56
+#define USB5744_CMD_CREG_ACCESS 0x99
+#define USB5744_CMD_CREG_ACCESS_LSB 0x37
+#define USB5744_CREG_MEM_ADDR 0x00
+#define USB5744_CREG_WRITE 0x00
+#define USB5744_CREG_RUNTIMEFLAGS2 0x41
+#define USB5744_CREG_RUNTIMEFLAGS2_LSB 0x1D
+#define USB5744_CREG_BYPASS_UDC_SUSPEND BIT(3)
+
static void onboard_dev_attach_usb_driver(struct work_struct *work);
static struct usb_device_driver onboard_dev_usbdev_driver;
@@ -98,6 +110,7 @@ static int onboard_dev_power_on(struct onboard_dev *onboard_dev)
fsleep(onboard_dev->pdata->reset_us);
gpiod_set_value_cansleep(onboard_dev->reset_gpio, 0);
+ fsleep(onboard_dev->pdata->power_on_delay_us);
onboard_dev->is_powered_on = true;
@@ -296,10 +309,50 @@ static void onboard_dev_attach_usb_driver(struct work_struct *work)
pr_err("Failed to attach USB driver: %pe\n", ERR_PTR(err));
}
+static int onboard_dev_5744_i2c_init(struct i2c_client *client)
+{
+#if IS_ENABLED(CONFIG_I2C)
+ struct device *dev = &client->dev;
+ int ret;
+
+ /*
+ * Set BYPASS_UDC_SUSPEND bit to ensure MCU is always enabled
+ * and ready to respond to SMBus runtime commands.
+ * The command writes 5 bytes to memory and single data byte in
+ * configuration register.
+ */
+ char wr_buf[7] = {USB5744_CREG_MEM_ADDR, 5,
+ USB5744_CREG_WRITE, 1,
+ USB5744_CREG_RUNTIMEFLAGS2,
+ USB5744_CREG_RUNTIMEFLAGS2_LSB,
+ USB5744_CREG_BYPASS_UDC_SUSPEND};
+
+ ret = i2c_smbus_write_block_data(client, 0, sizeof(wr_buf), wr_buf);
+ if (ret)
+ return dev_err_probe(dev, ret, "BYPASS_UDC_SUSPEND bit configuration failed\n");
+
+ ret = i2c_smbus_write_word_data(client, USB5744_CMD_CREG_ACCESS,
+ USB5744_CMD_CREG_ACCESS_LSB);
+ if (ret)
+ return dev_err_probe(dev, ret, "Configuration Register Access Command failed\n");
+
+ /* Send SMBus command to boot hub. */
+ ret = i2c_smbus_write_word_data(client, USB5744_CMD_ATTACH,
+ USB5744_CMD_ATTACH_LSB);
+ if (ret < 0)
+ return dev_err_probe(dev, ret, "USB Attach with SMBus command failed\n");
+
+ return ret;
+#else
+ return -ENODEV;
+#endif
+}
+
static int onboard_dev_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
struct onboard_dev *onboard_dev;
+ struct device_node *i2c_node;
int err;
onboard_dev = devm_kzalloc(dev, sizeof(*onboard_dev), GFP_KERNEL);
@@ -339,6 +392,27 @@ static int onboard_dev_probe(struct platform_device *pdev)
if (err)
return err;
+ i2c_node = of_parse_phandle(pdev->dev.of_node, "i2c-bus", 0);
+ if (i2c_node) {
+ struct i2c_client *client;
+
+ client = of_find_i2c_device_by_node(i2c_node);
+ of_node_put(i2c_node);
+
+ if (!client) {
+ err = -EPROBE_DEFER;
+ goto err_power_off;
+ }
+
+ if (of_device_is_compatible(pdev->dev.of_node, "usb424,2744") ||
+ of_device_is_compatible(pdev->dev.of_node, "usb424,5744"))
+ err = onboard_dev_5744_i2c_init(client);
+
+ put_device(&client->dev);
+ if (err < 0)
+ goto err_power_off;
+ }
+
/*
* The USB driver might have been detached from the USB devices by
* onboard_dev_remove() (e.g. through an 'unbind' by userspace),
@@ -350,6 +424,10 @@ static int onboard_dev_probe(struct platform_device *pdev)
schedule_work(&attach_usb_driver_work);
return 0;
+
+err_power_off:
+ onboard_dev_power_off(onboard_dev);
+ return err;
}
static void onboard_dev_remove(struct platform_device *pdev)
diff --git a/drivers/usb/misc/onboard_usb_dev.h b/drivers/usb/misc/onboard_usb_dev.h
index fbba549c0f47..317b3eb99c02 100644
--- a/drivers/usb/misc/onboard_usb_dev.h
+++ b/drivers/usb/misc/onboard_usb_dev.h
@@ -10,6 +10,7 @@
struct onboard_dev_pdata {
unsigned long reset_us; /* reset pulse width in us */
+ unsigned long power_on_delay_us; /* power on delay in us */
unsigned int num_supplies; /* number of supplies */
const char * const supply_names[MAX_SUPPLIES];
bool is_hub;
@@ -24,6 +25,7 @@ static const struct onboard_dev_pdata microchip_usb424_data = {
static const struct onboard_dev_pdata microchip_usb5744_data = {
.reset_us = 0,
+ .power_on_delay_us = 10000,
.num_supplies = 2,
.supply_names = { "vdd", "vdd2" },
.is_hub = true,
diff --git a/drivers/usb/misc/qcom_eud.c b/drivers/usb/misc/qcom_eud.c
index 26e9b8749d8a..19906301a4eb 100644
--- a/drivers/usb/misc/qcom_eud.c
+++ b/drivers/usb/misc/qcom_eud.c
@@ -232,7 +232,7 @@ static void eud_remove(struct platform_device *pdev)
}
static const struct of_device_id eud_dt_match[] = {
- { .compatible = "qcom,sc7280-eud" },
+ { .compatible = "qcom,eud" },
{ }
};
MODULE_DEVICE_TABLE(of, eud_dt_match);
diff --git a/drivers/usb/misc/yurex.c b/drivers/usb/misc/yurex.c
index 4745a320eae4..4a9859e03f6b 100644
--- a/drivers/usb/misc/yurex.c
+++ b/drivers/usb/misc/yurex.c
@@ -404,7 +404,6 @@ static ssize_t yurex_read(struct file *file, char __user *buffer, size_t count,
struct usb_yurex *dev;
int len = 0;
char in_buffer[MAX_S64_STRLEN];
- unsigned long flags;
dev = file->private_data;
@@ -419,9 +418,9 @@ static ssize_t yurex_read(struct file *file, char __user *buffer, size_t count,
return -EIO;
}
- spin_lock_irqsave(&dev->lock, flags);
+ spin_lock_irq(&dev->lock);
scnprintf(in_buffer, MAX_S64_STRLEN, "%lld\n", dev->bbu);
- spin_unlock_irqrestore(&dev->lock, flags);
+ spin_unlock_irq(&dev->lock);
mutex_unlock(&dev->io_mutex);
return simple_read_from_buffer(buffer, count, ppos, in_buffer, len);
@@ -511,8 +510,11 @@ static ssize_t yurex_write(struct file *file, const char __user *user_buffer,
__func__, retval);
goto error;
}
- if (set && timeout)
+ if (set && timeout) {
+ spin_lock_irq(&dev->lock);
dev->bbu = c2;
+ spin_unlock_irq(&dev->lock);
+ }
return timeout ? count : -EIO;
error:
diff --git a/drivers/usb/mon/mon_bin.c b/drivers/usb/mon/mon_bin.c
index 4e30de4db1c0..afb71c18415d 100644
--- a/drivers/usb/mon/mon_bin.c
+++ b/drivers/usb/mon/mon_bin.c
@@ -1289,7 +1289,6 @@ static int mon_bin_mmap(struct file *filp, struct vm_area_struct *vma)
static const struct file_operations mon_fops_binary = {
.owner = THIS_MODULE,
.open = mon_bin_open,
- .llseek = no_llseek,
.read = mon_bin_read,
/* .write = mon_text_write, */
.poll = mon_bin_poll,
diff --git a/drivers/usb/mon/mon_stat.c b/drivers/usb/mon/mon_stat.c
index 3c23805ab1a4..398e02af6a2b 100644
--- a/drivers/usb/mon/mon_stat.c
+++ b/drivers/usb/mon/mon_stat.c
@@ -62,7 +62,6 @@ static int mon_stat_release(struct inode *inode, struct file *file)
const struct file_operations mon_fops_stat = {
.owner = THIS_MODULE,
.open = mon_stat_open,
- .llseek = no_llseek,
.read = mon_stat_read,
/* .write = mon_stat_write, */
/* .poll = mon_stat_poll, */
diff --git a/drivers/usb/mon/mon_text.c b/drivers/usb/mon/mon_text.c
index 2fe9b95bac1d..68b9b2b41189 100644
--- a/drivers/usb/mon/mon_text.c
+++ b/drivers/usb/mon/mon_text.c
@@ -685,7 +685,6 @@ static int mon_text_release(struct inode *inode, struct file *file)
static const struct file_operations mon_fops_text_t = {
.owner = THIS_MODULE,
.open = mon_text_open,
- .llseek = no_llseek,
.read = mon_text_read_t,
.release = mon_text_release,
};
@@ -693,7 +692,6 @@ static const struct file_operations mon_fops_text_t = {
static const struct file_operations mon_fops_text_u = {
.owner = THIS_MODULE,
.open = mon_text_open,
- .llseek = no_llseek,
.read = mon_text_read_u,
.release = mon_text_release,
};
diff --git a/drivers/usb/musb/mediatek.c b/drivers/usb/musb/mediatek.c
index 0a35aab3ab81..63c86c046b98 100644
--- a/drivers/usb/musb/mediatek.c
+++ b/drivers/usb/musb/mediatek.c
@@ -416,10 +416,9 @@ static int mtk_musb_probe(struct platform_device *pdev)
return -ENOMEM;
ret = of_platform_populate(np, NULL, NULL, dev);
- if (ret) {
- dev_err(dev, "failed to create child devices at %p\n", np);
- return ret;
- }
+ if (ret)
+ return dev_err_probe(dev, ret,
+ "failed to create child devices at %p\n", np);
ret = mtk_musb_clks_get(glue);
if (ret)
@@ -448,23 +447,19 @@ static int mtk_musb_probe(struct platform_device *pdev)
glue->role = USB_ROLE_NONE;
break;
default:
- dev_err(&pdev->dev, "Error 'dr_mode' property\n");
- return -EINVAL;
+ return dev_err_probe(&pdev->dev, -EINVAL,
+ "Error 'dr_mode' property\n");
}
glue->phy = devm_of_phy_get_by_index(dev, np, 0);
- if (IS_ERR(glue->phy)) {
- dev_err(dev, "fail to getting phy %ld\n",
- PTR_ERR(glue->phy));
- return PTR_ERR(glue->phy);
- }
+ if (IS_ERR(glue->phy))
+ return dev_err_probe(dev, PTR_ERR(glue->phy),
+ "fail to getting phy\n");
glue->usb_phy = usb_phy_generic_register();
- if (IS_ERR(glue->usb_phy)) {
- dev_err(dev, "fail to registering usb-phy %ld\n",
- PTR_ERR(glue->usb_phy));
- return PTR_ERR(glue->usb_phy);
- }
+ if (IS_ERR(glue->usb_phy))
+ return dev_err_probe(dev, PTR_ERR(glue->usb_phy),
+ "fail to registering usb-phy\n");
glue->xceiv = devm_usb_get_phy(dev, USB_PHY_TYPE_USB2);
if (IS_ERR(glue->xceiv)) {
diff --git a/drivers/usb/musb/mpfs.c b/drivers/usb/musb/mpfs.c
index 29c7e5cdb230..00e13214aa76 100644
--- a/drivers/usb/musb/mpfs.c
+++ b/drivers/usb/musb/mpfs.c
@@ -49,30 +49,6 @@ static const struct musb_hdrc_config mpfs_musb_hdrc_config = {
.ram_bits = MPFS_MUSB_RAM_BITS,
};
-static irqreturn_t mpfs_musb_interrupt(int irq, void *__hci)
-{
- unsigned long flags;
- irqreturn_t ret = IRQ_NONE;
- struct musb *musb = __hci;
-
- spin_lock_irqsave(&musb->lock, flags);
-
- musb->int_usb = musb_readb(musb->mregs, MUSB_INTRUSB);
- musb->int_tx = musb_readw(musb->mregs, MUSB_INTRTX);
- musb->int_rx = musb_readw(musb->mregs, MUSB_INTRRX);
-
- if (musb->int_usb || musb->int_tx || musb->int_rx) {
- musb_writeb(musb->mregs, MUSB_INTRUSB, musb->int_usb);
- musb_writew(musb->mregs, MUSB_INTRTX, musb->int_tx);
- musb_writew(musb->mregs, MUSB_INTRRX, musb->int_rx);
- ret = musb_interrupt(musb);
- }
-
- spin_unlock_irqrestore(&musb->lock, flags);
-
- return ret;
-}
-
static void mpfs_musb_set_vbus(struct musb *musb, int is_on)
{
u8 devctl;
@@ -111,6 +87,129 @@ static void mpfs_musb_set_vbus(struct musb *musb, int is_on)
musb_readb(musb->mregs, MUSB_DEVCTL));
}
+#define POLL_SECONDS 2
+
+static void otg_timer(struct timer_list *t)
+{
+ struct musb *musb = from_timer(musb, t, dev_timer);
+ void __iomem *mregs = musb->mregs;
+ u8 devctl;
+ unsigned long flags;
+
+ /*
+ * We poll because PolarFire SoC won't expose several OTG-critical
+ * status change events (from the transceiver) otherwise.
+ */
+ devctl = musb_readb(mregs, MUSB_DEVCTL);
+ dev_dbg(musb->controller, "Poll devctl %02x (%s)\n", devctl,
+ usb_otg_state_string(musb->xceiv->otg->state));
+
+ spin_lock_irqsave(&musb->lock, flags);
+ switch (musb->xceiv->otg->state) {
+ case OTG_STATE_A_WAIT_BCON:
+ devctl &= ~MUSB_DEVCTL_SESSION;
+ musb_writeb(musb->mregs, MUSB_DEVCTL, devctl);
+
+ devctl = musb_readb(musb->mregs, MUSB_DEVCTL);
+ if (devctl & MUSB_DEVCTL_BDEVICE) {
+ musb->xceiv->otg->state = OTG_STATE_B_IDLE;
+ MUSB_DEV_MODE(musb);
+ mod_timer(&musb->dev_timer, jiffies + POLL_SECONDS * HZ);
+ } else {
+ musb->xceiv->otg->state = OTG_STATE_A_IDLE;
+ MUSB_HST_MODE(musb);
+ }
+ break;
+ case OTG_STATE_A_WAIT_VFALL:
+ if (devctl & MUSB_DEVCTL_VBUS) {
+ mod_timer(&musb->dev_timer, jiffies + POLL_SECONDS * HZ);
+ break;
+ }
+ musb->xceiv->otg->state = OTG_STATE_A_WAIT_VRISE;
+ break;
+ case OTG_STATE_B_IDLE:
+ /*
+ * There's no ID-changed IRQ, so we have no good way to tell
+ * when to switch to the A-Default state machine (by setting
+ * the DEVCTL.Session bit).
+ *
+ * Workaround: whenever we're in B_IDLE, try setting the
+ * session flag every few seconds. If it works, ID was
+ * grounded and we're now in the A-Default state machine.
+ *
+ * NOTE: setting the session flag is _supposed_ to trigger
+ * SRP but clearly it doesn't.
+ */
+ musb_writeb(mregs, MUSB_DEVCTL, devctl | MUSB_DEVCTL_SESSION);
+ devctl = musb_readb(mregs, MUSB_DEVCTL);
+ if (devctl & MUSB_DEVCTL_BDEVICE)
+ mod_timer(&musb->dev_timer, jiffies + POLL_SECONDS * HZ);
+ else
+ musb->xceiv->otg->state = OTG_STATE_A_IDLE;
+ break;
+ default:
+ break;
+ }
+ spin_unlock_irqrestore(&musb->lock, flags);
+}
+
+static void __maybe_unused mpfs_musb_try_idle(struct musb *musb, unsigned long timeout)
+{
+ static unsigned long last_timer;
+
+ if (timeout == 0)
+ timeout = jiffies + msecs_to_jiffies(3);
+
+ /* Never idle if active, or when VBUS timeout is not set as host */
+ if (musb->is_active || (musb->a_wait_bcon == 0 &&
+ musb->xceiv->otg->state == OTG_STATE_A_WAIT_BCON)) {
+ dev_dbg(musb->controller, "%s active, deleting timer\n",
+ usb_otg_state_string(musb->xceiv->otg->state));
+ del_timer(&musb->dev_timer);
+ last_timer = jiffies;
+ return;
+ }
+
+ if (time_after(last_timer, timeout) && timer_pending(&musb->dev_timer)) {
+ dev_dbg(musb->controller, "Longer idle timer already pending, ignoring...\n");
+ return;
+ }
+ last_timer = timeout;
+
+ dev_dbg(musb->controller, "%s inactive, starting idle timer for %u ms\n",
+ usb_otg_state_string(musb->xceiv->otg->state),
+ jiffies_to_msecs(timeout - jiffies));
+ mod_timer(&musb->dev_timer, timeout);
+}
+
+static irqreturn_t mpfs_musb_interrupt(int irq, void *__hci)
+{
+ unsigned long flags;
+ irqreturn_t ret = IRQ_NONE;
+ struct musb *musb = __hci;
+
+ spin_lock_irqsave(&musb->lock, flags);
+
+ musb->int_usb = musb_readb(musb->mregs, MUSB_INTRUSB);
+ musb->int_tx = musb_readw(musb->mregs, MUSB_INTRTX);
+ musb->int_rx = musb_readw(musb->mregs, MUSB_INTRRX);
+
+ if (musb->int_usb || musb->int_tx || musb->int_rx) {
+ musb_writeb(musb->mregs, MUSB_INTRUSB, musb->int_usb);
+ musb_writew(musb->mregs, MUSB_INTRTX, musb->int_tx);
+ musb_writew(musb->mregs, MUSB_INTRRX, musb->int_rx);
+ ret = musb_interrupt(musb);
+ }
+
+ /* Poll for ID change */
+ if (musb->xceiv->otg->state == OTG_STATE_B_IDLE)
+ mod_timer(&musb->dev_timer, jiffies + POLL_SECONDS * HZ);
+
+ spin_unlock_irqrestore(&musb->lock, flags);
+
+ return ret;
+}
+
static int mpfs_musb_init(struct musb *musb)
{
struct device *dev = musb->controller;
@@ -121,6 +220,8 @@ static int mpfs_musb_init(struct musb *musb)
return PTR_ERR(musb->xceiv);
}
+ timer_setup(&musb->dev_timer, otg_timer, 0);
+
musb->dyn_fifo = true;
musb->isr = mpfs_musb_interrupt;
@@ -129,14 +230,25 @@ static int mpfs_musb_init(struct musb *musb)
return 0;
}
+static int mpfs_musb_exit(struct musb *musb)
+{
+ del_timer_sync(&musb->dev_timer);
+
+ return 0;
+}
+
static const struct musb_platform_ops mpfs_ops = {
.quirks = MUSB_DMA_INVENTRA,
.init = mpfs_musb_init,
+ .exit = mpfs_musb_exit,
.fifo_mode = 2,
#ifdef CONFIG_USB_INVENTRA_DMA
.dma_init = musbhs_dma_controller_create,
.dma_exit = musbhs_dma_controller_destroy,
#endif
+#ifndef CONFIG_USB_MUSB_HOST
+ .try_idle = mpfs_musb_try_idle,
+#endif
.set_vbus = mpfs_musb_set_vbus
};
diff --git a/drivers/usb/phy/phy-gpio-vbus-usb.c b/drivers/usb/phy/phy-gpio-vbus-usb.c
index 817c242a76ca..5428b2b67de1 100644
--- a/drivers/usb/phy/phy-gpio-vbus-usb.c
+++ b/drivers/usb/phy/phy-gpio-vbus-usb.c
@@ -374,6 +374,7 @@ static const struct of_device_id gpio_vbus_of_match[] = {
},
{},
};
+MODULE_DEVICE_TABLE(of, gpio_vbus_of_match);
static struct platform_driver gpio_vbus_driver = {
.driver = {
diff --git a/drivers/usb/phy/phy-mxs-usb.c b/drivers/usb/phy/phy-mxs-usb.c
index 920a32cd094d..cc4156c1b148 100644
--- a/drivers/usb/phy/phy-mxs-usb.c
+++ b/drivers/usb/phy/phy-mxs-usb.c
@@ -18,6 +18,7 @@
#include <linux/regmap.h>
#include <linux/mfd/syscon.h>
#include <linux/iopoll.h>
+#include <linux/regulator/consumer.h>
#define DRIVER_NAME "mxs_phy"
@@ -70,6 +71,9 @@
#define BM_USBPHY_PLL_EN_USB_CLKS BIT(6)
/* Anatop Registers */
+#define ANADIG_REG_1P1_SET 0x114
+#define ANADIG_REG_1P1_CLR 0x118
+
#define ANADIG_ANA_MISC0 0x150
#define ANADIG_ANA_MISC0_SET 0x154
#define ANADIG_ANA_MISC0_CLR 0x158
@@ -117,6 +121,14 @@
#define BM_ANADIG_USB2_MISC_RX_VPIN_FS BIT(29)
#define BM_ANADIG_USB2_MISC_RX_VMIN_FS BIT(28)
+/* System Integration Module (SIM) Registers */
+#define SIM_GPR1 0x30
+
+#define USB_PHY_VLLS_WAKEUP_EN BIT(0)
+
+#define BM_ANADIG_REG_1P1_ENABLE_WEAK_LINREG BIT(18)
+#define BM_ANADIG_REG_1P1_TRACK_VDD_SOC_CAP BIT(19)
+
#define to_mxs_phy(p) container_of((p), struct mxs_phy, phy)
/* Do disconnection between PHY and controller without vbus */
@@ -149,6 +161,15 @@
#define MXS_PHY_TX_D_CAL_MIN 79
#define MXS_PHY_TX_D_CAL_MAX 119
+/*
+ * At imx6q/6sl/6sx, the PHY2's clock is controlled by hardware directly,
+ * eg, according to PHY's suspend status. In these PHYs, we only need to
+ * open the clock at the initialization and close it at its shutdown routine.
+ * These PHYs can send resume signal without software interfere if not
+ * gate clock.
+ */
+#define MXS_PHY_HARDWARE_CONTROL_PHY2_CLK BIT(4)
+
struct mxs_phy_data {
unsigned int flags;
};
@@ -160,12 +181,14 @@ static const struct mxs_phy_data imx23_phy_data = {
static const struct mxs_phy_data imx6q_phy_data = {
.flags = MXS_PHY_SENDING_SOF_TOO_FAST |
MXS_PHY_DISCONNECT_LINE_WITHOUT_VBUS |
- MXS_PHY_NEED_IP_FIX,
+ MXS_PHY_NEED_IP_FIX |
+ MXS_PHY_HARDWARE_CONTROL_PHY2_CLK,
};
static const struct mxs_phy_data imx6sl_phy_data = {
.flags = MXS_PHY_DISCONNECT_LINE_WITHOUT_VBUS |
- MXS_PHY_NEED_IP_FIX,
+ MXS_PHY_NEED_IP_FIX |
+ MXS_PHY_HARDWARE_CONTROL_PHY2_CLK,
};
static const struct mxs_phy_data vf610_phy_data = {
@@ -174,11 +197,13 @@ static const struct mxs_phy_data vf610_phy_data = {
};
static const struct mxs_phy_data imx6sx_phy_data = {
- .flags = MXS_PHY_DISCONNECT_LINE_WITHOUT_VBUS,
+ .flags = MXS_PHY_DISCONNECT_LINE_WITHOUT_VBUS |
+ MXS_PHY_HARDWARE_CONTROL_PHY2_CLK,
};
static const struct mxs_phy_data imx6ul_phy_data = {
- .flags = MXS_PHY_DISCONNECT_LINE_WITHOUT_VBUS,
+ .flags = MXS_PHY_DISCONNECT_LINE_WITHOUT_VBUS |
+ MXS_PHY_HARDWARE_CONTROL_PHY2_CLK,
};
static const struct mxs_phy_data imx7ulp_phy_data = {
@@ -201,9 +226,11 @@ struct mxs_phy {
struct clk *clk;
const struct mxs_phy_data *data;
struct regmap *regmap_anatop;
+ struct regmap *regmap_sim;
int port_id;
u32 tx_reg_set;
u32 tx_reg_mask;
+ struct regulator *phy_3p0;
};
static inline bool is_imx6q_phy(struct mxs_phy *mxs_phy)
@@ -221,6 +248,11 @@ static inline bool is_imx7ulp_phy(struct mxs_phy *mxs_phy)
return mxs_phy->data == &imx7ulp_phy_data;
}
+static inline bool is_imx6ul_phy(struct mxs_phy *mxs_phy)
+{
+ return mxs_phy->data == &imx6ul_phy_data;
+}
+
/*
* PHY needs some 32K cycles to switch from 32K clock to
* bus (such as AHB/AXI, etc) clock.
@@ -288,6 +320,16 @@ static int mxs_phy_hw_init(struct mxs_phy *mxs_phy)
if (ret)
goto disable_pll;
+ if (mxs_phy->phy_3p0) {
+ ret = regulator_enable(mxs_phy->phy_3p0);
+ if (ret) {
+ dev_err(mxs_phy->phy.dev,
+ "Failed to enable 3p0 regulator, ret=%d\n",
+ ret);
+ return ret;
+ }
+ }
+
/* Power up the PHY */
writel(0, base + HW_USBPHY_PWD);
@@ -448,6 +490,9 @@ static void mxs_phy_shutdown(struct usb_phy *phy)
if (is_imx7ulp_phy(mxs_phy))
mxs_phy_pll_enable(phy->io_priv, false);
+ if (mxs_phy->phy_3p0)
+ regulator_disable(mxs_phy->phy_3p0);
+
clk_disable_unprepare(mxs_phy->clk);
}
@@ -503,12 +548,19 @@ static int mxs_phy_suspend(struct usb_phy *x, int suspend)
}
writel(BM_USBPHY_CTRL_CLKGATE,
x->io_priv + HW_USBPHY_CTRL_SET);
- clk_disable_unprepare(mxs_phy->clk);
+ if (!(mxs_phy->port_id == 1 &&
+ (mxs_phy->data->flags &
+ MXS_PHY_HARDWARE_CONTROL_PHY2_CLK)))
+ clk_disable_unprepare(mxs_phy->clk);
} else {
mxs_phy_clock_switch_delay();
- ret = clk_prepare_enable(mxs_phy->clk);
- if (ret)
- return ret;
+ if (!(mxs_phy->port_id == 1 &&
+ (mxs_phy->data->flags &
+ MXS_PHY_HARDWARE_CONTROL_PHY2_CLK))) {
+ ret = clk_prepare_enable(mxs_phy->clk);
+ if (ret)
+ return ret;
+ }
writel(BM_USBPHY_CTRL_CLKGATE,
x->io_priv + HW_USBPHY_CTRL_CLR);
writel(0, x->io_priv + HW_USBPHY_PWD);
@@ -738,6 +790,17 @@ static int mxs_phy_probe(struct platform_device *pdev)
}
}
+ /* Currently, only imx7ulp has SIM module */
+ if (of_get_property(np, "nxp,sim", NULL)) {
+ mxs_phy->regmap_sim = syscon_regmap_lookup_by_phandle
+ (np, "nxp,sim");
+ if (IS_ERR(mxs_phy->regmap_sim)) {
+ dev_dbg(&pdev->dev,
+ "failed to find regmap for sim\n");
+ return PTR_ERR(mxs_phy->regmap_sim);
+ }
+ }
+
/* Precompute which bits of the TX register are to be updated, if any */
if (!of_property_read_u32(np, "fsl,tx-cal-45-dn-ohms", &val) &&
val >= MXS_PHY_TX_CAL45_MIN && val <= MXS_PHY_TX_CAL45_MAX) {
@@ -789,6 +852,17 @@ static int mxs_phy_probe(struct platform_device *pdev)
mxs_phy->clk = clk;
mxs_phy->data = of_device_get_match_data(&pdev->dev);
+ mxs_phy->phy_3p0 = devm_regulator_get(&pdev->dev, "phy-3p0");
+ if (PTR_ERR(mxs_phy->phy_3p0) == -ENODEV)
+ /* not exist */
+ mxs_phy->phy_3p0 = NULL;
+ else if (IS_ERR(mxs_phy->phy_3p0))
+ return dev_err_probe(&pdev->dev, PTR_ERR(mxs_phy->phy_3p0),
+ "Getting regulator error\n");
+
+ if (mxs_phy->phy_3p0)
+ regulator_set_voltage(mxs_phy->phy_3p0, 3200000, 3200000);
+
platform_set_drvdata(pdev, mxs_phy);
device_set_wakeup_capable(&pdev->dev, true);
@@ -804,28 +878,58 @@ static void mxs_phy_remove(struct platform_device *pdev)
}
#ifdef CONFIG_PM_SLEEP
+static void mxs_phy_wakeup_enable(struct mxs_phy *mxs_phy, bool on)
+{
+ u32 mask = USB_PHY_VLLS_WAKEUP_EN;
+
+ /* If the SoCs don't have SIM, quit */
+ if (!mxs_phy->regmap_sim)
+ return;
+
+ if (on) {
+ regmap_update_bits(mxs_phy->regmap_sim, SIM_GPR1, mask, mask);
+ udelay(500);
+ } else {
+ regmap_update_bits(mxs_phy->regmap_sim, SIM_GPR1, mask, 0);
+ }
+}
+
static void mxs_phy_enable_ldo_in_suspend(struct mxs_phy *mxs_phy, bool on)
{
- unsigned int reg = on ? ANADIG_ANA_MISC0_SET : ANADIG_ANA_MISC0_CLR;
+ unsigned int reg;
+ u32 value;
/* If the SoCs don't have anatop, quit */
if (!mxs_phy->regmap_anatop)
return;
- if (is_imx6q_phy(mxs_phy))
+ if (is_imx6q_phy(mxs_phy)) {
+ reg = on ? ANADIG_ANA_MISC0_SET : ANADIG_ANA_MISC0_CLR;
regmap_write(mxs_phy->regmap_anatop, reg,
BM_ANADIG_ANA_MISC0_STOP_MODE_CONFIG);
- else if (is_imx6sl_phy(mxs_phy))
+ } else if (is_imx6sl_phy(mxs_phy)) {
+ reg = on ? ANADIG_ANA_MISC0_SET : ANADIG_ANA_MISC0_CLR;
regmap_write(mxs_phy->regmap_anatop,
reg, BM_ANADIG_ANA_MISC0_STOP_MODE_CONFIG_SL);
+ } else if (is_imx6ul_phy(mxs_phy)) {
+ reg = on ? ANADIG_REG_1P1_SET : ANADIG_REG_1P1_CLR;
+ value = BM_ANADIG_REG_1P1_ENABLE_WEAK_LINREG |
+ BM_ANADIG_REG_1P1_TRACK_VDD_SOC_CAP;
+ if (mxs_phy_get_vbus_status(mxs_phy) && on)
+ regmap_write(mxs_phy->regmap_anatop, reg, value);
+ else if (!on)
+ regmap_write(mxs_phy->regmap_anatop, reg, value);
+ }
}
static int mxs_phy_system_suspend(struct device *dev)
{
struct mxs_phy *mxs_phy = dev_get_drvdata(dev);
- if (device_may_wakeup(dev))
+ if (device_may_wakeup(dev)) {
mxs_phy_enable_ldo_in_suspend(mxs_phy, true);
+ mxs_phy_wakeup_enable(mxs_phy, true);
+ }
return 0;
}
@@ -834,8 +938,10 @@ static int mxs_phy_system_resume(struct device *dev)
{
struct mxs_phy *mxs_phy = dev_get_drvdata(dev);
- if (device_may_wakeup(dev))
+ if (device_may_wakeup(dev)) {
mxs_phy_enable_ldo_in_suspend(mxs_phy, false);
+ mxs_phy_wakeup_enable(mxs_phy, false);
+ }
return 0;
}
diff --git a/drivers/usb/roles/class.c b/drivers/usb/roles/class.c
index d7aa913ceb8a..c58a12c147f4 100644
--- a/drivers/usb/roles/class.c
+++ b/drivers/usb/roles/class.c
@@ -11,6 +11,7 @@
#include <linux/usb/role.h>
#include <linux/property.h>
#include <linux/device.h>
+#include <linux/lockdep.h>
#include <linux/module.h>
#include <linux/mutex.h>
#include <linux/slab.h>
@@ -21,6 +22,7 @@ static const struct class role_class = {
struct usb_role_switch {
struct device dev;
+ struct lock_class_key key;
struct mutex lock; /* device lock*/
struct module *module; /* the module this device depends on */
enum usb_role role;
@@ -326,6 +328,8 @@ static void usb_role_switch_release(struct device *dev)
{
struct usb_role_switch *sw = to_role_switch(dev);
+ mutex_destroy(&sw->lock);
+ lockdep_unregister_key(&sw->key);
kfree(sw);
}
@@ -364,7 +368,8 @@ usb_role_switch_register(struct device *parent,
if (!sw)
return ERR_PTR(-ENOMEM);
- mutex_init(&sw->lock);
+ lockdep_register_key(&sw->key);
+ mutex_init_with_key(&sw->lock, &sw->key);
sw->allow_userspace_control = desc->allow_userspace_control;
sw->usb2_port = desc->usb2_port;
diff --git a/drivers/usb/serial/aircable.c b/drivers/usb/serial/aircable.c
index a1df686c3066..aa517242d060 100644
--- a/drivers/usb/serial/aircable.c
+++ b/drivers/usb/serial/aircable.c
@@ -138,7 +138,6 @@ static void aircable_process_read_urb(struct urb *urb)
static struct usb_serial_driver aircable_device = {
.driver = {
- .owner = THIS_MODULE,
.name = "aircable",
},
.id_table = id_table,
diff --git a/drivers/usb/serial/ark3116.c b/drivers/usb/serial/ark3116.c
index 67a07cc007f0..800b04fe37fa 100644
--- a/drivers/usb/serial/ark3116.c
+++ b/drivers/usb/serial/ark3116.c
@@ -599,7 +599,6 @@ static void ark3116_process_read_urb(struct urb *urb)
static struct usb_serial_driver ark3116_device = {
.driver = {
- .owner = THIS_MODULE,
.name = "ark3116",
},
.id_table = id_table,
diff --git a/drivers/usb/serial/belkin_sa.c b/drivers/usb/serial/belkin_sa.c
index cf47ee4ae5d3..44f5b58beec9 100644
--- a/drivers/usb/serial/belkin_sa.c
+++ b/drivers/usb/serial/belkin_sa.c
@@ -66,7 +66,6 @@ MODULE_DEVICE_TABLE(usb, id_table);
/* All of the device info needed for the serial converters */
static struct usb_serial_driver belkin_device = {
.driver = {
- .owner = THIS_MODULE,
.name = "belkin",
},
.description = "Belkin / Peracom / GoHubs USB Serial Adapter",
diff --git a/drivers/usb/serial/ch341.c b/drivers/usb/serial/ch341.c
index 0870c6533f80..02945ccf531d 100644
--- a/drivers/usb/serial/ch341.c
+++ b/drivers/usb/serial/ch341.c
@@ -837,7 +837,6 @@ static int ch341_reset_resume(struct usb_serial *serial)
static struct usb_serial_driver ch341_device = {
.driver = {
- .owner = THIS_MODULE,
.name = "ch341-uart",
},
.id_table = id_table,
diff --git a/drivers/usb/serial/cp210x.c b/drivers/usb/serial/cp210x.c
index 21fd26609252..c24101f0a07a 100644
--- a/drivers/usb/serial/cp210x.c
+++ b/drivers/usb/serial/cp210x.c
@@ -299,7 +299,6 @@ struct cp210x_port_private {
static struct usb_serial_driver cp210x_device = {
.driver = {
- .owner = THIS_MODULE,
.name = "cp210x",
},
.id_table = id_table,
diff --git a/drivers/usb/serial/cyberjack.c b/drivers/usb/serial/cyberjack.c
index 51e5aac3bf4c..76dd8e7453b5 100644
--- a/drivers/usb/serial/cyberjack.c
+++ b/drivers/usb/serial/cyberjack.c
@@ -67,7 +67,6 @@ MODULE_DEVICE_TABLE(usb, id_table);
static struct usb_serial_driver cyberjack_device = {
.driver = {
- .owner = THIS_MODULE,
.name = "cyberjack",
},
.description = "Reiner SCT Cyberjack USB card reader",
diff --git a/drivers/usb/serial/cypress_m8.c b/drivers/usb/serial/cypress_m8.c
index 1e0c028c5ec9..ce9134bb30f3 100644
--- a/drivers/usb/serial/cypress_m8.c
+++ b/drivers/usb/serial/cypress_m8.c
@@ -139,7 +139,6 @@ static void cypress_write_int_callback(struct urb *urb);
static struct usb_serial_driver cypress_earthmate_device = {
.driver = {
- .owner = THIS_MODULE,
.name = "earthmate",
},
.description = "DeLorme Earthmate USB",
@@ -166,7 +165,6 @@ static struct usb_serial_driver cypress_earthmate_device = {
static struct usb_serial_driver cypress_hidcom_device = {
.driver = {
- .owner = THIS_MODULE,
.name = "cyphidcom",
},
.description = "HID->COM RS232 Adapter",
@@ -192,7 +190,6 @@ static struct usb_serial_driver cypress_hidcom_device = {
static struct usb_serial_driver cypress_ca42v2_device = {
.driver = {
- .owner = THIS_MODULE,
.name = "nokiaca42v2",
},
.description = "Nokia CA-42 V2 Adapter",
diff --git a/drivers/usb/serial/digi_acceleport.c b/drivers/usb/serial/digi_acceleport.c
index d1dea3850576..a06485965412 100644
--- a/drivers/usb/serial/digi_acceleport.c
+++ b/drivers/usb/serial/digi_acceleport.c
@@ -262,7 +262,6 @@ MODULE_DEVICE_TABLE(usb, id_table_combined);
static struct usb_serial_driver digi_acceleport_2_device = {
.driver = {
- .owner = THIS_MODULE,
.name = "digi_2",
},
.description = "Digi 2 port USB adapter",
@@ -293,7 +292,6 @@ static struct usb_serial_driver digi_acceleport_2_device = {
static struct usb_serial_driver digi_acceleport_4_device = {
.driver = {
- .owner = THIS_MODULE,
.name = "digi_4",
},
.description = "Digi 4 port USB adapter",
diff --git a/drivers/usb/serial/empeg.c b/drivers/usb/serial/empeg.c
index 405e835e93dd..aedcf7ebd269 100644
--- a/drivers/usb/serial/empeg.c
+++ b/drivers/usb/serial/empeg.c
@@ -43,7 +43,6 @@ MODULE_DEVICE_TABLE(usb, id_table);
static struct usb_serial_driver empeg_device = {
.driver = {
- .owner = THIS_MODULE,
.name = "empeg",
},
.id_table = id_table,
diff --git a/drivers/usb/serial/f81232.c b/drivers/usb/serial/f81232.c
index 5f7a46bcace6..530b77fc2f78 100644
--- a/drivers/usb/serial/f81232.c
+++ b/drivers/usb/serial/f81232.c
@@ -967,7 +967,6 @@ static int f81232_resume(struct usb_serial *serial)
static struct usb_serial_driver f81232_device = {
.driver = {
- .owner = THIS_MODULE,
.name = "f81232",
},
.id_table = f81232_id_table,
@@ -994,7 +993,6 @@ static struct usb_serial_driver f81232_device = {
static struct usb_serial_driver f81534a_device = {
.driver = {
- .owner = THIS_MODULE,
.name = "f81534a",
},
.id_table = f81534a_id_table,
diff --git a/drivers/usb/serial/f81534.c b/drivers/usb/serial/f81534.c
index ef126cd3d94f..685930ac8383 100644
--- a/drivers/usb/serial/f81534.c
+++ b/drivers/usb/serial/f81534.c
@@ -1538,7 +1538,6 @@ static int f81534_resume(struct usb_serial *serial)
static struct usb_serial_driver f81534_device = {
.driver = {
- .owner = THIS_MODULE,
.name = "f81534",
},
.description = DRIVER_DESC,
diff --git a/drivers/usb/serial/ftdi_sio.c b/drivers/usb/serial/ftdi_sio.c
index 76a04ab41100..c6f17d732b95 100644
--- a/drivers/usb/serial/ftdi_sio.c
+++ b/drivers/usb/serial/ftdi_sio.c
@@ -2871,7 +2871,6 @@ static int ftdi_ioctl(struct tty_struct *tty,
static struct usb_serial_driver ftdi_device = {
.driver = {
- .owner = THIS_MODULE,
.name = "ftdi_sio",
.dev_groups = ftdi_groups,
},
diff --git a/drivers/usb/serial/garmin_gps.c b/drivers/usb/serial/garmin_gps.c
index 6d6ec7eed87c..b97ba8ed6801 100644
--- a/drivers/usb/serial/garmin_gps.c
+++ b/drivers/usb/serial/garmin_gps.c
@@ -1412,7 +1412,6 @@ static void garmin_port_remove(struct usb_serial_port *port)
/* All of the device info needed */
static struct usb_serial_driver garmin_device = {
.driver = {
- .owner = THIS_MODULE,
.name = "garmin_gps",
},
.description = "Garmin GPS usb/tty",
diff --git a/drivers/usb/serial/generic.c b/drivers/usb/serial/generic.c
index 15b6dee3a8e5..757f0a586ddb 100644
--- a/drivers/usb/serial/generic.c
+++ b/drivers/usb/serial/generic.c
@@ -63,7 +63,6 @@ static int usb_serial_generic_calc_num_ports(struct usb_serial *serial,
static struct usb_serial_driver usb_serial_generic_device = {
.driver = {
- .owner = THIS_MODULE,
.name = "generic",
},
.id_table = generic_device_ids,
diff --git a/drivers/usb/serial/io_edgeport.c b/drivers/usb/serial/io_edgeport.c
index abe4bbb0ac65..c7d6b5e3f898 100644
--- a/drivers/usb/serial/io_edgeport.c
+++ b/drivers/usb/serial/io_edgeport.c
@@ -2978,7 +2978,6 @@ static void edge_port_remove(struct usb_serial_port *port)
static struct usb_serial_driver edgeport_2port_device = {
.driver = {
- .owner = THIS_MODULE,
.name = "edgeport_2",
},
.description = "Edgeport 2 port adapter",
@@ -3013,7 +3012,6 @@ static struct usb_serial_driver edgeport_2port_device = {
static struct usb_serial_driver edgeport_4port_device = {
.driver = {
- .owner = THIS_MODULE,
.name = "edgeport_4",
},
.description = "Edgeport 4 port adapter",
@@ -3048,7 +3046,6 @@ static struct usb_serial_driver edgeport_4port_device = {
static struct usb_serial_driver edgeport_8port_device = {
.driver = {
- .owner = THIS_MODULE,
.name = "edgeport_8",
},
.description = "Edgeport 8 port adapter",
@@ -3083,7 +3080,6 @@ static struct usb_serial_driver edgeport_8port_device = {
static struct usb_serial_driver epic_device = {
.driver = {
- .owner = THIS_MODULE,
.name = "epic",
},
.description = "EPiC device",
diff --git a/drivers/usb/serial/io_ti.c b/drivers/usb/serial/io_ti.c
index 7a3a6e539456..7d0584b2a234 100644
--- a/drivers/usb/serial/io_ti.c
+++ b/drivers/usb/serial/io_ti.c
@@ -2670,7 +2670,6 @@ static int edge_resume(struct usb_serial *serial)
static struct usb_serial_driver edgeport_1port_device = {
.driver = {
- .owner = THIS_MODULE,
.name = "edgeport_ti_1",
},
.description = "Edgeport TI 1 port adapter",
@@ -2708,7 +2707,6 @@ static struct usb_serial_driver edgeport_1port_device = {
static struct usb_serial_driver edgeport_2port_device = {
.driver = {
- .owner = THIS_MODULE,
.name = "edgeport_ti_2",
},
.description = "Edgeport TI 2 port adapter",
diff --git a/drivers/usb/serial/ipaq.c b/drivers/usb/serial/ipaq.c
index e11441bac44f..3c6a9b9b9c2b 100644
--- a/drivers/usb/serial/ipaq.c
+++ b/drivers/usb/serial/ipaq.c
@@ -496,7 +496,6 @@ MODULE_DEVICE_TABLE(usb, ipaq_id_table);
/* All of the device info needed for the Compaq iPAQ */
static struct usb_serial_driver ipaq_device = {
.driver = {
- .owner = THIS_MODULE,
.name = "ipaq",
},
.description = "PocketPC PDA",
diff --git a/drivers/usb/serial/ipw.c b/drivers/usb/serial/ipw.c
index d04c7cc5c1c2..b1e672c2f423 100644
--- a/drivers/usb/serial/ipw.c
+++ b/drivers/usb/serial/ipw.c
@@ -285,7 +285,6 @@ static void ipw_close(struct usb_serial_port *port)
static struct usb_serial_driver ipw_device = {
.driver = {
- .owner = THIS_MODULE,
.name = "ipw",
},
.description = "IPWireless converter",
diff --git a/drivers/usb/serial/ir-usb.c b/drivers/usb/serial/ir-usb.c
index 82f108134e6f..a106b71e698f 100644
--- a/drivers/usb/serial/ir-usb.c
+++ b/drivers/usb/serial/ir-usb.c
@@ -71,7 +71,6 @@ MODULE_DEVICE_TABLE(usb, ir_id_table);
static struct usb_serial_driver ir_device = {
.driver = {
- .owner = THIS_MODULE,
.name = "ir-usb",
},
.description = "IR Dongle",
diff --git a/drivers/usb/serial/iuu_phoenix.c b/drivers/usb/serial/iuu_phoenix.c
index 77cba71bcccb..c21dcc9b6f05 100644
--- a/drivers/usb/serial/iuu_phoenix.c
+++ b/drivers/usb/serial/iuu_phoenix.c
@@ -1157,7 +1157,6 @@ static int iuu_remove_sysfs_attrs(struct usb_serial_port *port)
static struct usb_serial_driver iuu_device = {
.driver = {
- .owner = THIS_MODULE,
.name = "iuu_phoenix",
},
.id_table = id_table,
diff --git a/drivers/usb/serial/keyspan.c b/drivers/usb/serial/keyspan.c
index 0a783985197c..9129e0282c24 100644
--- a/drivers/usb/serial/keyspan.c
+++ b/drivers/usb/serial/keyspan.c
@@ -3001,7 +3001,6 @@ static void keyspan_port_remove(struct usb_serial_port *port)
/* Structs for the devices, pre and post renumeration. */
static struct usb_serial_driver keyspan_pre_device = {
.driver = {
- .owner = THIS_MODULE,
.name = "keyspan_no_firm",
},
.description = "Keyspan - (without firmware)",
@@ -3012,7 +3011,6 @@ static struct usb_serial_driver keyspan_pre_device = {
static struct usb_serial_driver keyspan_1port_device = {
.driver = {
- .owner = THIS_MODULE,
.name = "keyspan_1",
},
.description = "Keyspan 1 port adapter",
@@ -3036,7 +3034,6 @@ static struct usb_serial_driver keyspan_1port_device = {
static struct usb_serial_driver keyspan_2port_device = {
.driver = {
- .owner = THIS_MODULE,
.name = "keyspan_2",
},
.description = "Keyspan 2 port adapter",
@@ -3060,7 +3057,6 @@ static struct usb_serial_driver keyspan_2port_device = {
static struct usb_serial_driver keyspan_4port_device = {
.driver = {
- .owner = THIS_MODULE,
.name = "keyspan_4",
},
.description = "Keyspan 4 port adapter",
diff --git a/drivers/usb/serial/keyspan_pda.c b/drivers/usb/serial/keyspan_pda.c
index 0eef358b314a..e98b479593d3 100644
--- a/drivers/usb/serial/keyspan_pda.c
+++ b/drivers/usb/serial/keyspan_pda.c
@@ -676,7 +676,6 @@ static void keyspan_pda_port_remove(struct usb_serial_port *port)
static struct usb_serial_driver keyspan_pda_fake_device = {
.driver = {
- .owner = THIS_MODULE,
.name = "keyspan_pda_pre",
},
.description = "Keyspan PDA - (prerenumeration)",
@@ -687,7 +686,6 @@ static struct usb_serial_driver keyspan_pda_fake_device = {
static struct usb_serial_driver keyspan_pda_device = {
.driver = {
- .owner = THIS_MODULE,
.name = "keyspan_pda",
},
.description = "Keyspan PDA",
diff --git a/drivers/usb/serial/kl5kusb105.c b/drivers/usb/serial/kl5kusb105.c
index 394b3189e003..a2c0bebc041f 100644
--- a/drivers/usb/serial/kl5kusb105.c
+++ b/drivers/usb/serial/kl5kusb105.c
@@ -75,7 +75,6 @@ MODULE_DEVICE_TABLE(usb, id_table);
static struct usb_serial_driver kl5kusb105d_device = {
.driver = {
- .owner = THIS_MODULE,
.name = "kl5kusb105d",
},
.description = "KL5KUSB105D / PalmConnect",
diff --git a/drivers/usb/serial/kobil_sct.c b/drivers/usb/serial/kobil_sct.c
index 5e775f68fcb8..464433be2034 100644
--- a/drivers/usb/serial/kobil_sct.c
+++ b/drivers/usb/serial/kobil_sct.c
@@ -77,7 +77,6 @@ MODULE_DEVICE_TABLE(usb, id_table);
static struct usb_serial_driver kobil_device = {
.driver = {
- .owner = THIS_MODULE,
.name = "kobil",
},
.description = "KOBIL USB smart card terminal",
@@ -156,8 +155,7 @@ static void kobil_init_termios(struct tty_struct *tty)
{
/* Default to echo off and other sane device settings */
tty->termios.c_lflag = 0;
- tty->termios.c_iflag &= ~(ISIG | ICANON | ECHO | IEXTEN | XCASE);
- tty->termios.c_iflag |= IGNBRK | IGNPAR | IXOFF;
+ tty->termios.c_iflag = IGNBRK | IGNPAR | IXOFF;
/* do NOT translate CR to CR-NL (0x0A -> 0x0A 0x0D) */
tty->termios.c_oflag &= ~ONLCR;
}
diff --git a/drivers/usb/serial/mct_u232.c b/drivers/usb/serial/mct_u232.c
index 6570c8817a80..e5a139ed5d90 100644
--- a/drivers/usb/serial/mct_u232.c
+++ b/drivers/usb/serial/mct_u232.c
@@ -69,7 +69,6 @@ MODULE_DEVICE_TABLE(usb, id_table);
static struct usb_serial_driver mct_u232_device = {
.driver = {
- .owner = THIS_MODULE,
.name = "mct_u232",
},
.description = "MCT U232",
diff --git a/drivers/usb/serial/metro-usb.c b/drivers/usb/serial/metro-usb.c
index 30ab565e0738..028878292901 100644
--- a/drivers/usb/serial/metro-usb.c
+++ b/drivers/usb/serial/metro-usb.c
@@ -341,7 +341,6 @@ static void metrousb_unthrottle(struct tty_struct *tty)
static struct usb_serial_driver metrousb_device = {
.driver = {
- .owner = THIS_MODULE,
.name = "metro-usb",
},
.description = "Metrologic USB to Serial",
diff --git a/drivers/usb/serial/mos7720.c b/drivers/usb/serial/mos7720.c
index 23544074eb1c..e59bfa7c8030 100644
--- a/drivers/usb/serial/mos7720.c
+++ b/drivers/usb/serial/mos7720.c
@@ -1724,7 +1724,6 @@ static void mos7720_port_remove(struct usb_serial_port *port)
static struct usb_serial_driver moschip7720_2port_driver = {
.driver = {
- .owner = THIS_MODULE,
.name = "moschip7720",
},
.description = "Moschip 2 port adapter",
diff --git a/drivers/usb/serial/mos7840.c b/drivers/usb/serial/mos7840.c
index 85697466b147..ca3da79afd23 100644
--- a/drivers/usb/serial/mos7840.c
+++ b/drivers/usb/serial/mos7840.c
@@ -1782,7 +1782,6 @@ static int mos7840_resume(struct usb_serial *serial)
static struct usb_serial_driver moschip7840_4port_device = {
.driver = {
- .owner = THIS_MODULE,
.name = "mos7840",
},
.description = DRIVER_DESC,
diff --git a/drivers/usb/serial/mxuport.c b/drivers/usb/serial/mxuport.c
index 942cb0153423..57e4f2b215d8 100644
--- a/drivers/usb/serial/mxuport.c
+++ b/drivers/usb/serial/mxuport.c
@@ -1278,7 +1278,6 @@ static int mxuport_resume(struct usb_serial *serial)
static struct usb_serial_driver mxuport_device = {
.driver = {
- .owner = THIS_MODULE,
.name = "mxuport",
},
.description = "MOXA UPort",
diff --git a/drivers/usb/serial/navman.c b/drivers/usb/serial/navman.c
index 82791fd67c46..4d57a5902292 100644
--- a/drivers/usb/serial/navman.c
+++ b/drivers/usb/serial/navman.c
@@ -95,7 +95,6 @@ static int navman_write(struct tty_struct *tty, struct usb_serial_port *port,
static struct usb_serial_driver navman_device = {
.driver = {
- .owner = THIS_MODULE,
.name = "navman",
},
.id_table = id_table,
diff --git a/drivers/usb/serial/omninet.c b/drivers/usb/serial/omninet.c
index 41f1b872d277..397ebd5a3e74 100644
--- a/drivers/usb/serial/omninet.c
+++ b/drivers/usb/serial/omninet.c
@@ -49,7 +49,6 @@ MODULE_DEVICE_TABLE(usb, id_table);
static struct usb_serial_driver zyxel_omninet_device = {
.driver = {
- .owner = THIS_MODULE,
.name = "omninet",
},
.description = "ZyXEL - omni.net usb",
diff --git a/drivers/usb/serial/opticon.c b/drivers/usb/serial/opticon.c
index e31a6d77da3a..1ee84ccc4bbd 100644
--- a/drivers/usb/serial/opticon.c
+++ b/drivers/usb/serial/opticon.c
@@ -375,7 +375,6 @@ static void opticon_port_remove(struct usb_serial_port *port)
static struct usb_serial_driver opticon_device = {
.driver = {
- .owner = THIS_MODULE,
.name = "opticon",
},
.id_table = id_table,
diff --git a/drivers/usb/serial/option.c b/drivers/usb/serial/option.c
index 176f38750ad5..eb0731992ca9 100644
--- a/drivers/usb/serial/option.c
+++ b/drivers/usb/serial/option.c
@@ -2381,7 +2381,6 @@ MODULE_DEVICE_TABLE(usb, option_ids);
static struct usb_serial_driver option_1port_device = {
.driver = {
- .owner = THIS_MODULE,
.name = "option1",
},
.description = "GSM modem (1-port)",
diff --git a/drivers/usb/serial/oti6858.c b/drivers/usb/serial/oti6858.c
index fa07f6ff9ecc..24068368892c 100644
--- a/drivers/usb/serial/oti6858.c
+++ b/drivers/usb/serial/oti6858.c
@@ -138,7 +138,6 @@ static void oti6858_port_remove(struct usb_serial_port *port);
/* device info */
static struct usb_serial_driver oti6858_device = {
.driver = {
- .owner = THIS_MODULE,
.name = "oti6858",
},
.id_table = id_table,
diff --git a/drivers/usb/serial/pl2303.c b/drivers/usb/serial/pl2303.c
index d93f5d584557..ab48f8875249 100644
--- a/drivers/usb/serial/pl2303.c
+++ b/drivers/usb/serial/pl2303.c
@@ -118,6 +118,7 @@ static const struct usb_device_id id_table[] = {
{ USB_DEVICE(SMART_VENDOR_ID, SMART_PRODUCT_ID) },
{ USB_DEVICE(AT_VENDOR_ID, AT_VTKIT3_PRODUCT_ID) },
{ USB_DEVICE(IBM_VENDOR_ID, IBM_PRODUCT_ID) },
+ { USB_DEVICE(MACROSILICON_VENDOR_ID, MACROSILICON_MS3020_PRODUCT_ID) },
{ } /* Terminating entry */
};
@@ -1234,7 +1235,6 @@ static void pl2303_process_read_urb(struct urb *urb)
static struct usb_serial_driver pl2303_device = {
.driver = {
- .owner = THIS_MODULE,
.name = "pl2303",
},
.id_table = id_table,
diff --git a/drivers/usb/serial/pl2303.h b/drivers/usb/serial/pl2303.h
index 732f9b13ad5d..d60eda7f6eda 100644
--- a/drivers/usb/serial/pl2303.h
+++ b/drivers/usb/serial/pl2303.h
@@ -171,3 +171,7 @@
/* Allied Telesis VT-Kit3 */
#define AT_VENDOR_ID 0x0caa
#define AT_VTKIT3_PRODUCT_ID 0x3001
+
+/* Macrosilicon MS3020 */
+#define MACROSILICON_VENDOR_ID 0x345f
+#define MACROSILICON_MS3020_PRODUCT_ID 0x3020
diff --git a/drivers/usb/serial/qcaux.c b/drivers/usb/serial/qcaux.c
index 015bb7c5d19d..485ec5b07122 100644
--- a/drivers/usb/serial/qcaux.c
+++ b/drivers/usb/serial/qcaux.c
@@ -72,7 +72,6 @@ MODULE_DEVICE_TABLE(usb, id_table);
static struct usb_serial_driver qcaux_device = {
.driver = {
- .owner = THIS_MODULE,
.name = "qcaux",
},
.id_table = id_table,
diff --git a/drivers/usb/serial/qcserial.c b/drivers/usb/serial/qcserial.c
index 703a9c563557..c7de9585feb2 100644
--- a/drivers/usb/serial/qcserial.c
+++ b/drivers/usb/serial/qcserial.c
@@ -454,7 +454,6 @@ static void qc_release(struct usb_serial *serial)
static struct usb_serial_driver qcdevice = {
.driver = {
- .owner = THIS_MODULE,
.name = "qcserial",
},
.description = "Qualcomm USB modem",
diff --git a/drivers/usb/serial/quatech2.c b/drivers/usb/serial/quatech2.c
index 821f25e52ec2..4167a45d1be3 100644
--- a/drivers/usb/serial/quatech2.c
+++ b/drivers/usb/serial/quatech2.c
@@ -924,7 +924,6 @@ write_out:
static struct usb_serial_driver qt2_device = {
.driver = {
- .owner = THIS_MODULE,
.name = "quatech-serial",
},
.description = DRIVER_DESC,
diff --git a/drivers/usb/serial/safe_serial.c b/drivers/usb/serial/safe_serial.c
index 6accbecb6318..238b54993446 100644
--- a/drivers/usb/serial/safe_serial.c
+++ b/drivers/usb/serial/safe_serial.c
@@ -284,7 +284,6 @@ static int safe_startup(struct usb_serial *serial)
static struct usb_serial_driver safe_device = {
.driver = {
- .owner = THIS_MODULE,
.name = "safe_serial",
},
.id_table = id_table,
diff --git a/drivers/usb/serial/sierra.c b/drivers/usb/serial/sierra.c
index 353b2549eaa8..64a2e0bb5723 100644
--- a/drivers/usb/serial/sierra.c
+++ b/drivers/usb/serial/sierra.c
@@ -1021,7 +1021,6 @@ static int sierra_resume(struct usb_serial *serial)
static struct usb_serial_driver sierra_device = {
.driver = {
- .owner = THIS_MODULE,
.name = "sierra",
},
.description = "Sierra USB modem",
diff --git a/drivers/usb/serial/spcp8x5.c b/drivers/usb/serial/spcp8x5.c
index 6b294bf8bc43..11077beb7232 100644
--- a/drivers/usb/serial/spcp8x5.c
+++ b/drivers/usb/serial/spcp8x5.c
@@ -452,7 +452,6 @@ static int spcp8x5_tiocmget(struct tty_struct *tty)
static struct usb_serial_driver spcp8x5_device = {
.driver = {
- .owner = THIS_MODULE,
.name = "SPCP8x5",
},
.id_table = id_table,
diff --git a/drivers/usb/serial/ssu100.c b/drivers/usb/serial/ssu100.c
index 1e1888b66305..df21009bdf42 100644
--- a/drivers/usb/serial/ssu100.c
+++ b/drivers/usb/serial/ssu100.c
@@ -500,7 +500,6 @@ static void ssu100_process_read_urb(struct urb *urb)
static struct usb_serial_driver ssu100_device = {
.driver = {
- .owner = THIS_MODULE,
.name = "ssu100",
},
.description = DRIVER_DESC,
diff --git a/drivers/usb/serial/symbolserial.c b/drivers/usb/serial/symbolserial.c
index 9aabb087f733..58962bcbf9ba 100644
--- a/drivers/usb/serial/symbolserial.c
+++ b/drivers/usb/serial/symbolserial.c
@@ -169,7 +169,6 @@ static void symbol_port_remove(struct usb_serial_port *port)
static struct usb_serial_driver symbol_device = {
.driver = {
- .owner = THIS_MODULE,
.name = "symbol",
},
.id_table = id_table,
diff --git a/drivers/usb/serial/ti_usb_3410_5052.c b/drivers/usb/serial/ti_usb_3410_5052.c
index 0fba25abf671..a0c244bc77c0 100644
--- a/drivers/usb/serial/ti_usb_3410_5052.c
+++ b/drivers/usb/serial/ti_usb_3410_5052.c
@@ -417,7 +417,6 @@ static const struct usb_device_id ti_id_table_combined[] = {
static struct usb_serial_driver ti_1port_device = {
.driver = {
- .owner = THIS_MODULE,
.name = "ti_usb_3410_5052_1",
},
.description = "TI USB 3410 1 port adapter",
@@ -450,7 +449,6 @@ static struct usb_serial_driver ti_1port_device = {
static struct usb_serial_driver ti_2port_device = {
.driver = {
- .owner = THIS_MODULE,
.name = "ti_usb_3410_5052_2",
},
.description = "TI USB 5052 2 port adapter",
diff --git a/drivers/usb/serial/upd78f0730.c b/drivers/usb/serial/upd78f0730.c
index 46952182e04f..15a17bf111f1 100644
--- a/drivers/usb/serial/upd78f0730.c
+++ b/drivers/usb/serial/upd78f0730.c
@@ -407,7 +407,6 @@ static void upd78f0730_close(struct usb_serial_port *port)
static struct usb_serial_driver upd78f0730_device = {
.driver = {
- .owner = THIS_MODULE,
.name = "upd78f0730",
},
.id_table = id_table,
diff --git a/drivers/usb/serial/usb-serial-simple.c b/drivers/usb/serial/usb-serial-simple.c
index 82f4f0b992aa..2c12449ff60c 100644
--- a/drivers/usb/serial/usb-serial-simple.c
+++ b/drivers/usb/serial/usb-serial-simple.c
@@ -24,7 +24,6 @@ static const struct usb_device_id vendor##_id_table[] = { \
}; \
static struct usb_serial_driver vendor##_device = { \
.driver = { \
- .owner = THIS_MODULE, \
.name = #vendor, \
}, \
.id_table = vendor##_id_table, \
diff --git a/drivers/usb/serial/usb-serial.c b/drivers/usb/serial/usb-serial.c
index f1e91eb7f8a4..df6a2ae0bf42 100644
--- a/drivers/usb/serial/usb-serial.c
+++ b/drivers/usb/serial/usb-serial.c
@@ -1459,17 +1459,18 @@ static void usb_serial_deregister(struct usb_serial_driver *device)
}
/**
- * usb_serial_register_drivers - register drivers for a usb-serial module
+ * __usb_serial_register_drivers - register drivers for a usb-serial module
* @serial_drivers: NULL-terminated array of pointers to drivers to be registered
+ * @owner: owning module
* @name: name of the usb_driver for this set of @serial_drivers
* @id_table: list of all devices this @serial_drivers set binds to
*
* Registers all the drivers in the @serial_drivers array, and dynamically
* creates a struct usb_driver with the name @name and id_table of @id_table.
*/
-int usb_serial_register_drivers(struct usb_serial_driver *const serial_drivers[],
- const char *name,
- const struct usb_device_id *id_table)
+int __usb_serial_register_drivers(struct usb_serial_driver *const serial_drivers[],
+ struct module *owner, const char *name,
+ const struct usb_device_id *id_table)
{
int rc;
struct usb_driver *udriver;
@@ -1514,6 +1515,7 @@ int usb_serial_register_drivers(struct usb_serial_driver *const serial_drivers[]
for (sd = serial_drivers; *sd; ++sd) {
(*sd)->usb_driver = udriver;
+ (*sd)->driver.owner = owner;
rc = usb_serial_register(*sd);
if (rc)
goto err_deregister_drivers;
@@ -1532,7 +1534,7 @@ err_free_driver:
kfree(udriver);
return rc;
}
-EXPORT_SYMBOL_GPL(usb_serial_register_drivers);
+EXPORT_SYMBOL_GPL(__usb_serial_register_drivers);
/**
* usb_serial_deregister_drivers - deregister drivers for a usb-serial module
diff --git a/drivers/usb/serial/usb_debug.c b/drivers/usb/serial/usb_debug.c
index 61a8425b7762..ec9fff794b36 100644
--- a/drivers/usb/serial/usb_debug.c
+++ b/drivers/usb/serial/usb_debug.c
@@ -83,7 +83,6 @@ static void usb_debug_init_termios(struct tty_struct *tty)
static struct usb_serial_driver debug_device = {
.driver = {
- .owner = THIS_MODULE,
.name = "debug",
},
.id_table = id_table,
@@ -96,7 +95,6 @@ static struct usb_serial_driver debug_device = {
static struct usb_serial_driver dbc_device = {
.driver = {
- .owner = THIS_MODULE,
.name = "xhci_dbc",
},
.id_table = dbc_id_table,
diff --git a/drivers/usb/serial/visor.c b/drivers/usb/serial/visor.c
index 4412834db21c..062a38fe0c1c 100644
--- a/drivers/usb/serial/visor.c
+++ b/drivers/usb/serial/visor.c
@@ -161,7 +161,6 @@ MODULE_DEVICE_TABLE(usb, id_table_combined);
and Palm 4.0 devices */
static struct usb_serial_driver handspring_device = {
.driver = {
- .owner = THIS_MODULE,
.name = "visor",
},
.description = "Handspring Visor / Palm OS",
@@ -180,7 +179,6 @@ static struct usb_serial_driver handspring_device = {
/* All of the device info needed for the Clie UX50, TH55 Palm 5.0 devices */
static struct usb_serial_driver clie_5_device = {
.driver = {
- .owner = THIS_MODULE,
.name = "clie_5",
},
.description = "Sony Clie 5.0",
@@ -200,7 +198,6 @@ static struct usb_serial_driver clie_5_device = {
/* device info for the Sony Clie OS version 3.5 */
static struct usb_serial_driver clie_3_5_device = {
.driver = {
- .owner = THIS_MODULE,
.name = "clie_3.5",
},
.description = "Sony Clie 3.5",
diff --git a/drivers/usb/serial/whiteheat.c b/drivers/usb/serial/whiteheat.c
index ca48e90a8e81..009faeb2ef55 100644
--- a/drivers/usb/serial/whiteheat.c
+++ b/drivers/usb/serial/whiteheat.c
@@ -91,7 +91,6 @@ static int whiteheat_break_ctl(struct tty_struct *tty, int break_state);
static struct usb_serial_driver whiteheat_fake_device = {
.driver = {
- .owner = THIS_MODULE,
.name = "whiteheatnofirm",
},
.description = "Connect Tech - WhiteHEAT - (prerenumeration)",
@@ -103,7 +102,6 @@ static struct usb_serial_driver whiteheat_fake_device = {
static struct usb_serial_driver whiteheat_device = {
.driver = {
- .owner = THIS_MODULE,
.name = "whiteheat",
},
.description = "Connect Tech - WhiteHEAT",
diff --git a/drivers/usb/serial/wishbone-serial.c b/drivers/usb/serial/wishbone-serial.c
index ff4092f9b33c..670d573f6b63 100644
--- a/drivers/usb/serial/wishbone-serial.c
+++ b/drivers/usb/serial/wishbone-serial.c
@@ -70,7 +70,6 @@ static void wishbone_serial_close(struct usb_serial_port *port)
static struct usb_serial_driver wishbone_serial_device = {
.driver = {
- .owner = THIS_MODULE,
.name = "wishbone_serial",
},
.id_table = id_table,
diff --git a/drivers/usb/serial/xr_serial.c b/drivers/usb/serial/xr_serial.c
index 1d9a12628f81..4186e791b384 100644
--- a/drivers/usb/serial/xr_serial.c
+++ b/drivers/usb/serial/xr_serial.c
@@ -1082,7 +1082,6 @@ MODULE_DEVICE_TABLE(usb, id_table);
static struct usb_serial_driver xr_device = {
.driver = {
- .owner = THIS_MODULE,
.name = "xr_serial",
},
.id_table = id_table,
diff --git a/drivers/usb/serial/xsens_mt.c b/drivers/usb/serial/xsens_mt.c
index cf262c9a9638..382b3698c1d5 100644
--- a/drivers/usb/serial/xsens_mt.c
+++ b/drivers/usb/serial/xsens_mt.c
@@ -49,7 +49,6 @@ static int xsens_mt_probe(struct usb_serial *serial,
static struct usb_serial_driver xsens_mt_device = {
.driver = {
- .owner = THIS_MODULE,
.name = "xsens_mt",
},
.id_table = id_table,
diff --git a/drivers/usb/storage/alauda.c b/drivers/usb/storage/alauda.c
index 40d34cc28344..a9d3c58ce7d9 100644
--- a/drivers/usb/storage/alauda.c
+++ b/drivers/usb/storage/alauda.c
@@ -132,7 +132,7 @@ static int init_alauda(struct us_data *us);
{ USB_DEVICE_VER(id_vendor, id_product, bcdDeviceMin, bcdDeviceMax), \
.driver_info = (flags) }
-static struct usb_device_id alauda_usb_ids[] = {
+static const struct usb_device_id alauda_usb_ids[] = {
# include "unusual_alauda.h"
{ } /* Terminating entry */
};
@@ -154,7 +154,7 @@ MODULE_DEVICE_TABLE(usb, alauda_usb_ids);
.initFunction = init_function, \
}
-static struct us_unusual_dev alauda_unusual_dev_list[] = {
+static const struct us_unusual_dev alauda_unusual_dev_list[] = {
# include "unusual_alauda.h"
{ } /* Terminating entry */
};
diff --git a/drivers/usb/storage/cypress_atacb.c b/drivers/usb/storage/cypress_atacb.c
index 98b3ec352a13..30dfd0082474 100644
--- a/drivers/usb/storage/cypress_atacb.c
+++ b/drivers/usb/storage/cypress_atacb.c
@@ -33,7 +33,7 @@ MODULE_IMPORT_NS(USB_STORAGE);
{ USB_DEVICE_VER(id_vendor, id_product, bcdDeviceMin, bcdDeviceMax), \
.driver_info = (flags) }
-static struct usb_device_id cypress_usb_ids[] = {
+static const struct usb_device_id cypress_usb_ids[] = {
# include "unusual_cypress.h"
{ } /* Terminating entry */
};
@@ -55,7 +55,7 @@ MODULE_DEVICE_TABLE(usb, cypress_usb_ids);
.initFunction = init_function, \
}
-static struct us_unusual_dev cypress_unusual_dev_list[] = {
+static const struct us_unusual_dev cypress_unusual_dev_list[] = {
# include "unusual_cypress.h"
{ } /* Terminating entry */
};
diff --git a/drivers/usb/storage/datafab.c b/drivers/usb/storage/datafab.c
index bcc4a2fad863..3ea5601d16b8 100644
--- a/drivers/usb/storage/datafab.c
+++ b/drivers/usb/storage/datafab.c
@@ -80,7 +80,7 @@ static int datafab_determine_lun(struct us_data *us,
{ USB_DEVICE_VER(id_vendor, id_product, bcdDeviceMin, bcdDeviceMax), \
.driver_info = (flags) }
-static struct usb_device_id datafab_usb_ids[] = {
+static const struct usb_device_id datafab_usb_ids[] = {
# include "unusual_datafab.h"
{ } /* Terminating entry */
};
@@ -102,7 +102,7 @@ MODULE_DEVICE_TABLE(usb, datafab_usb_ids);
.initFunction = init_function, \
}
-static struct us_unusual_dev datafab_unusual_dev_list[] = {
+static const struct us_unusual_dev datafab_unusual_dev_list[] = {
# include "unusual_datafab.h"
{ } /* Terminating entry */
};
diff --git a/drivers/usb/storage/ene_ub6250.c b/drivers/usb/storage/ene_ub6250.c
index 97c66c0d91f4..a4bfbecbf16c 100644
--- a/drivers/usb/storage/ene_ub6250.c
+++ b/drivers/usb/storage/ene_ub6250.c
@@ -43,7 +43,7 @@ MODULE_FIRMWARE(MS_RW_FIRMWARE);
{ USB_DEVICE_VER(id_vendor, id_product, bcdDeviceMin, bcdDeviceMax), \
.driver_info = (flags)}
-static struct usb_device_id ene_ub6250_usb_ids[] = {
+static const struct usb_device_id ene_ub6250_usb_ids[] = {
# include "unusual_ene_ub6250.h"
{ } /* Terminating entry */
};
@@ -65,7 +65,7 @@ MODULE_DEVICE_TABLE(usb, ene_ub6250_usb_ids);
.initFunction = init_function, \
}
-static struct us_unusual_dev ene_ub6250_unusual_dev_list[] = {
+static const struct us_unusual_dev ene_ub6250_unusual_dev_list[] = {
# include "unusual_ene_ub6250.h"
{ } /* Terminating entry */
};
@@ -1484,7 +1484,7 @@ static int ms_scsi_mode_sense(struct us_data *us, struct scsi_cmnd *srb)
static int ms_scsi_read_capacity(struct us_data *us, struct scsi_cmnd *srb)
{
u32 bl_num;
- u16 bl_len;
+ u32 bl_len;
unsigned int offset = 0;
unsigned char buf[8];
struct scatterlist *sg = NULL;
diff --git a/drivers/usb/storage/freecom.c b/drivers/usb/storage/freecom.c
index c3ce51c2dabd..cab27ba7a32a 100644
--- a/drivers/usb/storage/freecom.c
+++ b/drivers/usb/storage/freecom.c
@@ -119,7 +119,7 @@ static int init_freecom(struct us_data *us);
{ USB_DEVICE_VER(id_vendor, id_product, bcdDeviceMin, bcdDeviceMax), \
.driver_info = (flags) }
-static struct usb_device_id freecom_usb_ids[] = {
+static const struct usb_device_id freecom_usb_ids[] = {
# include "unusual_freecom.h"
{ } /* Terminating entry */
};
@@ -141,7 +141,7 @@ MODULE_DEVICE_TABLE(usb, freecom_usb_ids);
.initFunction = init_function, \
}
-static struct us_unusual_dev freecom_unusual_dev_list[] = {
+static const struct us_unusual_dev freecom_unusual_dev_list[] = {
# include "unusual_freecom.h"
{ } /* Terminating entry */
};
diff --git a/drivers/usb/storage/isd200.c b/drivers/usb/storage/isd200.c
index 300aeef160e7..f2254eb3c0d7 100644
--- a/drivers/usb/storage/isd200.c
+++ b/drivers/usb/storage/isd200.c
@@ -67,7 +67,7 @@ static int isd200_Initialization(struct us_data *us);
{ USB_DEVICE_VER(id_vendor, id_product, bcdDeviceMin, bcdDeviceMax), \
.driver_info = (flags) }
-static struct usb_device_id isd200_usb_ids[] = {
+static const struct usb_device_id isd200_usb_ids[] = {
# include "unusual_isd200.h"
{ } /* Terminating entry */
};
@@ -89,7 +89,7 @@ MODULE_DEVICE_TABLE(usb, isd200_usb_ids);
.initFunction = init_function, \
}
-static struct us_unusual_dev isd200_unusual_dev_list[] = {
+static const struct us_unusual_dev isd200_unusual_dev_list[] = {
# include "unusual_isd200.h"
{ } /* Terminating entry */
};
diff --git a/drivers/usb/storage/jumpshot.c b/drivers/usb/storage/jumpshot.c
index 229bf0c1afc9..0e71a8f33c2b 100644
--- a/drivers/usb/storage/jumpshot.c
+++ b/drivers/usb/storage/jumpshot.c
@@ -62,7 +62,7 @@ MODULE_IMPORT_NS(USB_STORAGE);
{ USB_DEVICE_VER(id_vendor, id_product, bcdDeviceMin, bcdDeviceMax), \
.driver_info = (flags) }
-static struct usb_device_id jumpshot_usb_ids[] = {
+static const struct usb_device_id jumpshot_usb_ids[] = {
# include "unusual_jumpshot.h"
{ } /* Terminating entry */
};
@@ -84,7 +84,7 @@ MODULE_DEVICE_TABLE(usb, jumpshot_usb_ids);
.initFunction = init_function, \
}
-static struct us_unusual_dev jumpshot_unusual_dev_list[] = {
+static const struct us_unusual_dev jumpshot_unusual_dev_list[] = {
# include "unusual_jumpshot.h"
{ } /* Terminating entry */
};
diff --git a/drivers/usb/storage/karma.c b/drivers/usb/storage/karma.c
index 38ddfedef629..d6a5e54f2ca8 100644
--- a/drivers/usb/storage/karma.c
+++ b/drivers/usb/storage/karma.c
@@ -51,7 +51,7 @@ static int rio_karma_init(struct us_data *us);
{ USB_DEVICE_VER(id_vendor, id_product, bcdDeviceMin, bcdDeviceMax), \
.driver_info = (flags) }
-static struct usb_device_id karma_usb_ids[] = {
+static const struct usb_device_id karma_usb_ids[] = {
# include "unusual_karma.h"
{ } /* Terminating entry */
};
@@ -73,7 +73,7 @@ MODULE_DEVICE_TABLE(usb, karma_usb_ids);
.initFunction = init_function, \
}
-static struct us_unusual_dev karma_unusual_dev_list[] = {
+static const struct us_unusual_dev karma_unusual_dev_list[] = {
# include "unusual_karma.h"
{ } /* Terminating entry */
};
diff --git a/drivers/usb/storage/onetouch.c b/drivers/usb/storage/onetouch.c
index 01f3c2779ccf..f97cf6cadb8e 100644
--- a/drivers/usb/storage/onetouch.c
+++ b/drivers/usb/storage/onetouch.c
@@ -55,7 +55,7 @@ struct usb_onetouch {
{ USB_DEVICE_VER(id_vendor, id_product, bcdDeviceMin, bcdDeviceMax), \
.driver_info = (flags) }
-static struct usb_device_id onetouch_usb_ids[] = {
+static const struct usb_device_id onetouch_usb_ids[] = {
# include "unusual_onetouch.h"
{ } /* Terminating entry */
};
@@ -77,7 +77,7 @@ MODULE_DEVICE_TABLE(usb, onetouch_usb_ids);
.initFunction = init_function, \
}
-static struct us_unusual_dev onetouch_unusual_dev_list[] = {
+static const struct us_unusual_dev onetouch_unusual_dev_list[] = {
# include "unusual_onetouch.h"
{ } /* Terminating entry */
};
diff --git a/drivers/usb/storage/sddr09.c b/drivers/usb/storage/sddr09.c
index 51bcd4a43690..03d1b9c69ea1 100644
--- a/drivers/usb/storage/sddr09.c
+++ b/drivers/usb/storage/sddr09.c
@@ -63,7 +63,7 @@ static int usb_stor_sddr09_init(struct us_data *us);
{ USB_DEVICE_VER(id_vendor, id_product, bcdDeviceMin, bcdDeviceMax), \
.driver_info = (flags) }
-static struct usb_device_id sddr09_usb_ids[] = {
+static const struct usb_device_id sddr09_usb_ids[] = {
# include "unusual_sddr09.h"
{ } /* Terminating entry */
};
@@ -85,7 +85,7 @@ MODULE_DEVICE_TABLE(usb, sddr09_usb_ids);
.initFunction = init_function, \
}
-static struct us_unusual_dev sddr09_unusual_dev_list[] = {
+static const struct us_unusual_dev sddr09_unusual_dev_list[] = {
# include "unusual_sddr09.h"
{ } /* Terminating entry */
};
diff --git a/drivers/usb/storage/sddr55.c b/drivers/usb/storage/sddr55.c
index 0aa079405d23..b8227478a7ad 100644
--- a/drivers/usb/storage/sddr55.c
+++ b/drivers/usb/storage/sddr55.c
@@ -40,7 +40,7 @@ MODULE_IMPORT_NS(USB_STORAGE);
{ USB_DEVICE_VER(id_vendor, id_product, bcdDeviceMin, bcdDeviceMax), \
.driver_info = (flags) }
-static struct usb_device_id sddr55_usb_ids[] = {
+static const struct usb_device_id sddr55_usb_ids[] = {
# include "unusual_sddr55.h"
{ } /* Terminating entry */
};
@@ -62,7 +62,7 @@ MODULE_DEVICE_TABLE(usb, sddr55_usb_ids);
.initFunction = init_function, \
}
-static struct us_unusual_dev sddr55_unusual_dev_list[] = {
+static const struct us_unusual_dev sddr55_unusual_dev_list[] = {
# include "unusual_sddr55.h"
{ } /* Terminating entry */
};
diff --git a/drivers/usb/storage/shuttle_usbat.c b/drivers/usb/storage/shuttle_usbat.c
index f0d0ca37163d..e7c224b7c464 100644
--- a/drivers/usb/storage/shuttle_usbat.c
+++ b/drivers/usb/storage/shuttle_usbat.c
@@ -162,7 +162,7 @@ static int init_usbat_flash(struct us_data *us);
{ USB_DEVICE_VER(id_vendor, id_product, bcdDeviceMin, bcdDeviceMax), \
.driver_info = (flags) }
-static struct usb_device_id usbat_usb_ids[] = {
+static const struct usb_device_id usbat_usb_ids[] = {
# include "unusual_usbat.h"
{ } /* Terminating entry */
};
@@ -184,7 +184,7 @@ MODULE_DEVICE_TABLE(usb, usbat_usb_ids);
.initFunction = init_function, \
}
-static struct us_unusual_dev usbat_unusual_dev_list[] = {
+static const struct us_unusual_dev usbat_unusual_dev_list[] = {
# include "unusual_usbat.h"
{ } /* Terminating entry */
};
diff --git a/drivers/usb/storage/uas.c b/drivers/usb/storage/uas.c
index c223b4dc1b19..03043d567fa1 100644
--- a/drivers/usb/storage/uas.c
+++ b/drivers/usb/storage/uas.c
@@ -927,7 +927,7 @@ static const struct scsi_host_template uas_host_template = {
{ USB_DEVICE_VER(id_vendor, id_product, bcdDeviceMin, bcdDeviceMax), \
.driver_info = (flags) }
-static struct usb_device_id uas_usb_ids[] = {
+static const struct usb_device_id uas_usb_ids[] = {
# include "unusual_uas.h"
{ USB_INTERFACE_INFO(USB_CLASS_MASS_STORAGE, USB_SC_SCSI, USB_PR_BULK) },
{ USB_INTERFACE_INFO(USB_CLASS_MASS_STORAGE, USB_SC_SCSI, USB_PR_UAS) },
diff --git a/drivers/usb/typec/anx7411.c b/drivers/usb/typec/anx7411.c
index 31e3e9544bc0..d1e7c487ddfb 100644
--- a/drivers/usb/typec/anx7411.c
+++ b/drivers/usb/typec/anx7411.c
@@ -6,6 +6,7 @@
* Copyright(c) 2022, Analogix Semiconductor. All rights reserved.
*
*/
+#include <linux/bitfield.h>
#include <linux/gpio/consumer.h>
#include <linux/i2c.h>
#include <linux/interrupt.h>
@@ -884,8 +885,8 @@ static void anx7411_chip_standby(struct anx7411_data *ctx)
OCM_RESET);
ret |= anx7411_reg_write(ctx->tcpc_client, ANALOG_CTRL_10, 0x80);
/* Set TCPC to RD and DRP enable */
- cc1 = TCPC_ROLE_CTRL_CC_RD << TCPC_ROLE_CTRL_CC1_SHIFT;
- cc2 = TCPC_ROLE_CTRL_CC_RD << TCPC_ROLE_CTRL_CC2_SHIFT;
+ cc1 = FIELD_PREP(TCPC_ROLE_CTRL_CC1, TCPC_ROLE_CTRL_CC_RD);
+ cc2 = FIELD_PREP(TCPC_ROLE_CTRL_CC2, TCPC_ROLE_CTRL_CC_RD);
ret |= anx7411_reg_write(ctx->tcpc_client, TCPC_ROLE_CTRL,
TCPC_ROLE_CTRL_DRP | cc1 | cc2);
@@ -1571,6 +1572,7 @@ static const struct of_device_id anx_match_table[] = {
{.compatible = "analogix,anx7411",},
{},
};
+MODULE_DEVICE_TABLE(of, anx_match_table);
static struct i2c_driver anx7411_driver = {
.driver = {
diff --git a/drivers/usb/typec/tcpm/maxim_contaminant.c b/drivers/usb/typec/tcpm/maxim_contaminant.c
index f8504a90da26..22163d8f9eb0 100644
--- a/drivers/usb/typec/tcpm/maxim_contaminant.c
+++ b/drivers/usb/typec/tcpm/maxim_contaminant.c
@@ -5,6 +5,7 @@
* USB-C module to reduce wakeups due to contaminants.
*/
+#include <linux/bitfield.h>
#include <linux/device.h>
#include <linux/irqreturn.h>
#include <linux/module.h>
@@ -45,14 +46,9 @@ enum fladc_select {
#define READ1_SLEEP_MS 10
#define READ2_SLEEP_MS 5
-#define STATUS_CHECK(reg, mask, val) (((reg) & (mask)) == (val))
-
#define IS_CC_OPEN(cc_status) \
- (STATUS_CHECK((cc_status), TCPC_CC_STATUS_CC1_MASK << TCPC_CC_STATUS_CC1_SHIFT, \
- TCPC_CC_STATE_SRC_OPEN) && STATUS_CHECK((cc_status), \
- TCPC_CC_STATUS_CC2_MASK << \
- TCPC_CC_STATUS_CC2_SHIFT, \
- TCPC_CC_STATE_SRC_OPEN))
+ (FIELD_GET(TCPC_CC_STATUS_CC1, cc_status) == TCPC_CC_STATE_SRC_OPEN \
+ && FIELD_GET(TCPC_CC_STATUS_CC2, cc_status) == TCPC_CC_STATE_SRC_OPEN)
static int max_contaminant_adc_to_mv(struct max_tcpci_chip *chip, enum fladc_select channel,
bool ua_src, u8 fladc)
@@ -80,8 +76,8 @@ static int max_contaminant_read_adc_mv(struct max_tcpci_chip *chip, enum fladc_s
int ret;
/* Channel & scale select */
- ret = regmap_update_bits(regmap, TCPC_VENDOR_ADC_CTRL1, ADCINSEL_MASK,
- channel << ADC_CHANNEL_OFFSET);
+ ret = regmap_update_bits(regmap, TCPC_VENDOR_ADC_CTRL1, ADCINSEL,
+ FIELD_PREP(ADCINSEL, channel));
if (ret < 0)
return ret;
@@ -100,7 +96,8 @@ static int max_contaminant_read_adc_mv(struct max_tcpci_chip *chip, enum fladc_s
if (ret < 0)
return ret;
- ret = regmap_update_bits(regmap, TCPC_VENDOR_ADC_CTRL1, ADCINSEL_MASK, 0);
+ ret = regmap_update_bits(regmap, TCPC_VENDOR_ADC_CTRL1, ADCINSEL,
+ FIELD_PREP(ADCINSEL, 0));
if (ret < 0)
return ret;
@@ -120,13 +117,14 @@ static int max_contaminant_read_resistance_kohm(struct max_tcpci_chip *chip,
if (channel == CC1_SCALE1 || channel == CC2_SCALE1 || channel == CC1_SCALE2 ||
channel == CC2_SCALE2) {
/* Enable 1uA current source */
- ret = regmap_update_bits(regmap, TCPC_VENDOR_CC_CTRL2, CCLPMODESEL_MASK,
- ULTRA_LOW_POWER_MODE);
+ ret = regmap_update_bits(regmap, TCPC_VENDOR_CC_CTRL2, CCLPMODESEL,
+ FIELD_PREP(CCLPMODESEL, ULTRA_LOW_POWER_MODE));
if (ret < 0)
return ret;
/* Enable 1uA current source */
- ret = regmap_update_bits(regmap, TCPC_VENDOR_CC_CTRL2, CCRPCTRL_MASK, UA_1_SRC);
+ ret = regmap_update_bits(regmap, TCPC_VENDOR_CC_CTRL2, CCRPCTRL,
+ FIELD_PREP(CCRPCTRL, UA_1_SRC));
if (ret < 0)
return ret;
@@ -180,7 +178,8 @@ static int max_contaminant_read_comparators(struct max_tcpci_chip *chip, u8 *ven
int ret;
/* Enable 80uA source */
- ret = regmap_update_bits(regmap, TCPC_VENDOR_CC_CTRL2, CCRPCTRL_MASK, UA_80_SRC);
+ ret = regmap_update_bits(regmap, TCPC_VENDOR_CC_CTRL2, CCRPCTRL,
+ FIELD_PREP(CCRPCTRL, UA_80_SRC));
if (ret < 0)
return ret;
@@ -213,7 +212,8 @@ static int max_contaminant_read_comparators(struct max_tcpci_chip *chip, u8 *ven
if (ret < 0)
return ret;
- ret = regmap_update_bits(regmap, TCPC_VENDOR_CC_CTRL2, CCRPCTRL_MASK, 0);
+ ret = regmap_update_bits(regmap, TCPC_VENDOR_CC_CTRL2, CCRPCTRL,
+ FIELD_PREP(CCRPCTRL, 0));
if (ret < 0)
return ret;
@@ -284,10 +284,11 @@ static int max_contaminant_enable_dry_detection(struct max_tcpci_chip *chip)
u8 temp;
int ret;
- ret = regmap_update_bits(regmap, TCPC_VENDOR_CC_CTRL3, CCWTRDEB_MASK | CCWTRSEL_MASK
- | WTRCYCLE_MASK, CCWTRDEB_1MS << CCWTRDEB_SHIFT |
- CCWTRSEL_1V << CCWTRSEL_SHIFT | WTRCYCLE_4_8_S <<
- WTRCYCLE_SHIFT);
+ ret = regmap_update_bits(regmap, TCPC_VENDOR_CC_CTRL3,
+ CCWTRDEB | CCWTRSEL | WTRCYCLE,
+ FIELD_PREP(CCWTRDEB, CCWTRDEB_1MS)
+ | FIELD_PREP(CCWTRSEL, CCWTRSEL_1V)
+ | FIELD_PREP(WTRCYCLE, WTRCYCLE_4_8_S));
if (ret < 0)
return ret;
@@ -302,8 +303,9 @@ static int max_contaminant_enable_dry_detection(struct max_tcpci_chip *chip)
if (ret < 0)
return ret;
- ret = regmap_update_bits(regmap, TCPC_VENDOR_CC_CTRL2, CCLPMODESEL_MASK,
- ULTRA_LOW_POWER_MODE);
+ ret = regmap_update_bits(regmap, TCPC_VENDOR_CC_CTRL2, CCLPMODESEL,
+ FIELD_PREP(CCLPMODESEL,
+ ULTRA_LOW_POWER_MODE));
if (ret < 0)
return ret;
ret = max_tcpci_read8(chip, TCPC_VENDOR_CC_CTRL2, &temp);
@@ -322,11 +324,14 @@ static int max_contaminant_enable_dry_detection(struct max_tcpci_chip *chip)
return 0;
}
-bool max_contaminant_is_contaminant(struct max_tcpci_chip *chip, bool disconnect_while_debounce)
+bool max_contaminant_is_contaminant(struct max_tcpci_chip *chip, bool disconnect_while_debounce,
+ bool *cc_handled)
{
u8 cc_status, pwr_cntl;
int ret;
+ *cc_handled = true;
+
ret = max_tcpci_read8(chip, TCPC_CC_STATUS, &cc_status);
if (ret < 0)
return false;
@@ -368,9 +373,8 @@ bool max_contaminant_is_contaminant(struct max_tcpci_chip *chip, bool disconnect
return true;
}
}
- return false;
} else if (chip->contaminant_state == DETECTED) {
- if (STATUS_CHECK(cc_status, TCPC_CC_STATUS_TOGGLING, 0)) {
+ if (!(cc_status & TCPC_CC_STATUS_TOGGLING)) {
chip->contaminant_state = max_contaminant_detect_contaminant(chip);
if (chip->contaminant_state == DETECTED) {
max_contaminant_enable_dry_detection(chip);
@@ -379,6 +383,7 @@ bool max_contaminant_is_contaminant(struct max_tcpci_chip *chip, bool disconnect
}
}
+ *cc_handled = false;
return false;
}
diff --git a/drivers/usb/typec/tcpm/tcpci.c b/drivers/usb/typec/tcpm/tcpci.c
index 3e3dcb983dde..ed32583829be 100644
--- a/drivers/usb/typec/tcpm/tcpci.c
+++ b/drivers/usb/typec/tcpm/tcpci.c
@@ -5,6 +5,7 @@
* USB Type-C Port Controller Interface.
*/
+#include <linux/bitfield.h>
#include <linux/delay.h>
#include <linux/kernel.h>
#include <linux/module.h>
@@ -103,45 +104,45 @@ static int tcpci_set_cc(struct tcpc_dev *tcpc, enum typec_cc_status cc)
switch (cc) {
case TYPEC_CC_RA:
- reg = (TCPC_ROLE_CTRL_CC_RA << TCPC_ROLE_CTRL_CC1_SHIFT) |
- (TCPC_ROLE_CTRL_CC_RA << TCPC_ROLE_CTRL_CC2_SHIFT);
+ reg = (FIELD_PREP(TCPC_ROLE_CTRL_CC1, TCPC_ROLE_CTRL_CC_RA)
+ | FIELD_PREP(TCPC_ROLE_CTRL_CC2, TCPC_ROLE_CTRL_CC_RA));
break;
case TYPEC_CC_RD:
- reg = (TCPC_ROLE_CTRL_CC_RD << TCPC_ROLE_CTRL_CC1_SHIFT) |
- (TCPC_ROLE_CTRL_CC_RD << TCPC_ROLE_CTRL_CC2_SHIFT);
+ reg = (FIELD_PREP(TCPC_ROLE_CTRL_CC1, TCPC_ROLE_CTRL_CC_RD)
+ | FIELD_PREP(TCPC_ROLE_CTRL_CC2, TCPC_ROLE_CTRL_CC_RD));
break;
case TYPEC_CC_RP_DEF:
- reg = (TCPC_ROLE_CTRL_CC_RP << TCPC_ROLE_CTRL_CC1_SHIFT) |
- (TCPC_ROLE_CTRL_CC_RP << TCPC_ROLE_CTRL_CC2_SHIFT) |
- (TCPC_ROLE_CTRL_RP_VAL_DEF <<
- TCPC_ROLE_CTRL_RP_VAL_SHIFT);
+ reg = (FIELD_PREP(TCPC_ROLE_CTRL_CC1, TCPC_ROLE_CTRL_CC_RP)
+ | FIELD_PREP(TCPC_ROLE_CTRL_CC2, TCPC_ROLE_CTRL_CC_RP)
+ | FIELD_PREP(TCPC_ROLE_CTRL_RP_VAL,
+ TCPC_ROLE_CTRL_RP_VAL_DEF));
break;
case TYPEC_CC_RP_1_5:
- reg = (TCPC_ROLE_CTRL_CC_RP << TCPC_ROLE_CTRL_CC1_SHIFT) |
- (TCPC_ROLE_CTRL_CC_RP << TCPC_ROLE_CTRL_CC2_SHIFT) |
- (TCPC_ROLE_CTRL_RP_VAL_1_5 <<
- TCPC_ROLE_CTRL_RP_VAL_SHIFT);
+ reg = (FIELD_PREP(TCPC_ROLE_CTRL_CC1, TCPC_ROLE_CTRL_CC_RP)
+ | FIELD_PREP(TCPC_ROLE_CTRL_CC2, TCPC_ROLE_CTRL_CC_RP)
+ | FIELD_PREP(TCPC_ROLE_CTRL_RP_VAL,
+ TCPC_ROLE_CTRL_RP_VAL_1_5));
break;
case TYPEC_CC_RP_3_0:
- reg = (TCPC_ROLE_CTRL_CC_RP << TCPC_ROLE_CTRL_CC1_SHIFT) |
- (TCPC_ROLE_CTRL_CC_RP << TCPC_ROLE_CTRL_CC2_SHIFT) |
- (TCPC_ROLE_CTRL_RP_VAL_3_0 <<
- TCPC_ROLE_CTRL_RP_VAL_SHIFT);
+ reg = (FIELD_PREP(TCPC_ROLE_CTRL_CC1, TCPC_ROLE_CTRL_CC_RP)
+ | FIELD_PREP(TCPC_ROLE_CTRL_CC2, TCPC_ROLE_CTRL_CC_RP)
+ | FIELD_PREP(TCPC_ROLE_CTRL_RP_VAL,
+ TCPC_ROLE_CTRL_RP_VAL_3_0));
break;
case TYPEC_CC_OPEN:
default:
- reg = (TCPC_ROLE_CTRL_CC_OPEN << TCPC_ROLE_CTRL_CC1_SHIFT) |
- (TCPC_ROLE_CTRL_CC_OPEN << TCPC_ROLE_CTRL_CC2_SHIFT);
+ reg = (FIELD_PREP(TCPC_ROLE_CTRL_CC1, TCPC_ROLE_CTRL_CC_OPEN)
+ | FIELD_PREP(TCPC_ROLE_CTRL_CC2, TCPC_ROLE_CTRL_CC_OPEN));
break;
}
if (vconn_pres) {
if (polarity == TYPEC_POLARITY_CC2) {
- reg &= ~(TCPC_ROLE_CTRL_CC1_MASK << TCPC_ROLE_CTRL_CC1_SHIFT);
- reg |= (TCPC_ROLE_CTRL_CC_OPEN << TCPC_ROLE_CTRL_CC1_SHIFT);
+ reg &= ~TCPC_ROLE_CTRL_CC1;
+ reg |= FIELD_PREP(TCPC_ROLE_CTRL_CC1, TCPC_ROLE_CTRL_CC_OPEN);
} else {
- reg &= ~(TCPC_ROLE_CTRL_CC2_MASK << TCPC_ROLE_CTRL_CC2_SHIFT);
- reg |= (TCPC_ROLE_CTRL_CC_OPEN << TCPC_ROLE_CTRL_CC2_SHIFT);
+ reg &= ~TCPC_ROLE_CTRL_CC2;
+ reg |= FIELD_PREP(TCPC_ROLE_CTRL_CC2, TCPC_ROLE_CTRL_CC_OPEN);
}
}
@@ -167,15 +168,11 @@ static int tcpci_apply_rc(struct tcpc_dev *tcpc, enum typec_cc_status cc,
* APPLY_RC state is when ROLE_CONTROL.CC1 != ROLE_CONTROL.CC2 and vbus autodischarge on
* disconnect is disabled. Bail out when ROLE_CONTROL.CC1 != ROLE_CONTROL.CC2.
*/
- if (((reg & (TCPC_ROLE_CTRL_CC2_MASK << TCPC_ROLE_CTRL_CC2_SHIFT)) >>
- TCPC_ROLE_CTRL_CC2_SHIFT) !=
- ((reg & (TCPC_ROLE_CTRL_CC1_MASK << TCPC_ROLE_CTRL_CC1_SHIFT)) >>
- TCPC_ROLE_CTRL_CC1_SHIFT))
+ if (FIELD_GET(TCPC_ROLE_CTRL_CC2, reg) != FIELD_GET(TCPC_ROLE_CTRL_CC1, reg))
return 0;
return regmap_update_bits(tcpci->regmap, TCPC_ROLE_CTRL, polarity == TYPEC_POLARITY_CC1 ?
- TCPC_ROLE_CTRL_CC2_MASK << TCPC_ROLE_CTRL_CC2_SHIFT :
- TCPC_ROLE_CTRL_CC1_MASK << TCPC_ROLE_CTRL_CC1_SHIFT,
+ TCPC_ROLE_CTRL_CC2 : TCPC_ROLE_CTRL_CC1,
TCPC_ROLE_CTRL_CC_OPEN);
}
@@ -200,25 +197,25 @@ static int tcpci_start_toggling(struct tcpc_dev *tcpc,
switch (cc) {
default:
case TYPEC_CC_RP_DEF:
- reg |= (TCPC_ROLE_CTRL_RP_VAL_DEF <<
- TCPC_ROLE_CTRL_RP_VAL_SHIFT);
+ reg |= FIELD_PREP(TCPC_ROLE_CTRL_RP_VAL,
+ TCPC_ROLE_CTRL_RP_VAL_DEF);
break;
case TYPEC_CC_RP_1_5:
- reg |= (TCPC_ROLE_CTRL_RP_VAL_1_5 <<
- TCPC_ROLE_CTRL_RP_VAL_SHIFT);
+ reg |= FIELD_PREP(TCPC_ROLE_CTRL_RP_VAL,
+ TCPC_ROLE_CTRL_RP_VAL_1_5);
break;
case TYPEC_CC_RP_3_0:
- reg |= (TCPC_ROLE_CTRL_RP_VAL_3_0 <<
- TCPC_ROLE_CTRL_RP_VAL_SHIFT);
+ reg |= FIELD_PREP(TCPC_ROLE_CTRL_RP_VAL,
+ TCPC_ROLE_CTRL_RP_VAL_3_0);
break;
}
if (cc == TYPEC_CC_RD)
- reg |= (TCPC_ROLE_CTRL_CC_RD << TCPC_ROLE_CTRL_CC1_SHIFT) |
- (TCPC_ROLE_CTRL_CC_RD << TCPC_ROLE_CTRL_CC2_SHIFT);
+ reg |= (FIELD_PREP(TCPC_ROLE_CTRL_CC1, TCPC_ROLE_CTRL_CC_RD)
+ | FIELD_PREP(TCPC_ROLE_CTRL_CC2, TCPC_ROLE_CTRL_CC_RD));
else
- reg |= (TCPC_ROLE_CTRL_CC_RP << TCPC_ROLE_CTRL_CC1_SHIFT) |
- (TCPC_ROLE_CTRL_CC_RP << TCPC_ROLE_CTRL_CC2_SHIFT);
+ reg |= (FIELD_PREP(TCPC_ROLE_CTRL_CC1, TCPC_ROLE_CTRL_CC_RP)
+ | FIELD_PREP(TCPC_ROLE_CTRL_CC2, TCPC_ROLE_CTRL_CC_RP));
ret = regmap_write(tcpci->regmap, TCPC_ROLE_CTRL, reg);
if (ret < 0)
return ret;
@@ -241,12 +238,10 @@ static int tcpci_get_cc(struct tcpc_dev *tcpc,
if (ret < 0)
return ret;
- *cc1 = tcpci_to_typec_cc((reg >> TCPC_CC_STATUS_CC1_SHIFT) &
- TCPC_CC_STATUS_CC1_MASK,
+ *cc1 = tcpci_to_typec_cc(FIELD_GET(TCPC_CC_STATUS_CC1, reg),
reg & TCPC_CC_STATUS_TERM ||
tcpc_presenting_rd(role_control, CC1));
- *cc2 = tcpci_to_typec_cc((reg >> TCPC_CC_STATUS_CC2_SHIFT) &
- TCPC_CC_STATUS_CC2_MASK,
+ *cc2 = tcpci_to_typec_cc(FIELD_GET(TCPC_CC_STATUS_CC2, reg),
reg & TCPC_CC_STATUS_TERM ||
tcpc_presenting_rd(role_control, CC2));
@@ -282,28 +277,28 @@ static int tcpci_set_polarity(struct tcpc_dev *tcpc,
reg = reg & ~TCPC_ROLE_CTRL_DRP;
if (polarity == TYPEC_POLARITY_CC2) {
- reg &= ~(TCPC_ROLE_CTRL_CC2_MASK << TCPC_ROLE_CTRL_CC2_SHIFT);
+ reg &= ~TCPC_ROLE_CTRL_CC2;
/* Local port is source */
if (cc2 == TYPEC_CC_RD)
/* Role control would have the Rp setting when DRP was enabled */
- reg |= TCPC_ROLE_CTRL_CC_RP << TCPC_ROLE_CTRL_CC2_SHIFT;
+ reg |= FIELD_PREP(TCPC_ROLE_CTRL_CC2, TCPC_ROLE_CTRL_CC_RP);
else
- reg |= TCPC_ROLE_CTRL_CC_RD << TCPC_ROLE_CTRL_CC2_SHIFT;
+ reg |= FIELD_PREP(TCPC_ROLE_CTRL_CC2, TCPC_ROLE_CTRL_CC_RD);
} else {
- reg &= ~(TCPC_ROLE_CTRL_CC1_MASK << TCPC_ROLE_CTRL_CC1_SHIFT);
+ reg &= ~TCPC_ROLE_CTRL_CC1;
/* Local port is source */
if (cc1 == TYPEC_CC_RD)
/* Role control would have the Rp setting when DRP was enabled */
- reg |= TCPC_ROLE_CTRL_CC_RP << TCPC_ROLE_CTRL_CC1_SHIFT;
+ reg |= FIELD_PREP(TCPC_ROLE_CTRL_CC1, TCPC_ROLE_CTRL_CC_RP);
else
- reg |= TCPC_ROLE_CTRL_CC_RD << TCPC_ROLE_CTRL_CC1_SHIFT;
+ reg |= FIELD_PREP(TCPC_ROLE_CTRL_CC1, TCPC_ROLE_CTRL_CC_RD);
}
}
if (polarity == TYPEC_POLARITY_CC2)
- reg |= TCPC_ROLE_CTRL_CC_OPEN << TCPC_ROLE_CTRL_CC1_SHIFT;
+ reg |= FIELD_PREP(TCPC_ROLE_CTRL_CC1, TCPC_ROLE_CTRL_CC_OPEN);
else
- reg |= TCPC_ROLE_CTRL_CC_OPEN << TCPC_ROLE_CTRL_CC2_SHIFT;
+ reg |= FIELD_PREP(TCPC_ROLE_CTRL_CC2, TCPC_ROLE_CTRL_CC_OPEN);
ret = regmap_write(tcpci->regmap, TCPC_ROLE_CTRL, reg);
if (ret < 0)
return ret;
@@ -461,7 +456,7 @@ static int tcpci_set_roles(struct tcpc_dev *tcpc, bool attached,
unsigned int reg;
int ret;
- reg = PD_REV20 << TCPC_MSG_HDR_INFO_REV_SHIFT;
+ reg = FIELD_PREP(TCPC_MSG_HDR_INFO_REV, PD_REV20);
if (role == TYPEC_SOURCE)
reg |= TCPC_MSG_HDR_INFO_PWR_ROLE;
if (data == TYPEC_HOST)
@@ -612,8 +607,11 @@ static int tcpci_pd_transmit(struct tcpc_dev *tcpc, enum tcpm_transmit_type type
}
/* nRetryCount is 3 in PD2.0 spec where 2 in PD3.0 spec */
- reg = ((negotiated_rev > PD_REV20 ? PD_RETRY_COUNT_3_0_OR_HIGHER : PD_RETRY_COUNT_DEFAULT)
- << TCPC_TRANSMIT_RETRY_SHIFT) | (type << TCPC_TRANSMIT_TYPE_SHIFT);
+ reg = FIELD_PREP(TCPC_TRANSMIT_RETRY,
+ (negotiated_rev > PD_REV20
+ ? PD_RETRY_COUNT_3_0_OR_HIGHER
+ : PD_RETRY_COUNT_DEFAULT));
+ reg |= FIELD_PREP(TCPC_TRANSMIT_TYPE, type);
ret = regmap_write(tcpci->regmap, TCPC_TRANSMIT, reg);
if (ret < 0)
return ret;
@@ -709,10 +707,13 @@ irqreturn_t tcpci_irq(struct tcpci *tcpci)
{
u16 status;
int ret;
+ int irq_ret;
unsigned int raw;
tcpci_read16(tcpci, TCPC_ALERT, &status);
+ irq_ret = status & tcpci->alert_mask;
+process_status:
/*
* Clear alert status for everything except RX_STATUS, which shouldn't
* be cleared until we have successfully retrieved message.
@@ -785,7 +786,12 @@ irqreturn_t tcpci_irq(struct tcpci *tcpci)
else if (status & TCPC_ALERT_TX_FAILED)
tcpm_pd_transmit_complete(tcpci->port, TCPC_TX_FAILED);
- return IRQ_RETVAL(status & tcpci->alert_mask);
+ tcpci_read16(tcpci, TCPC_ALERT, &status);
+
+ if (status & tcpci->alert_mask)
+ goto process_status;
+
+ return IRQ_RETVAL(irq_ret);
}
EXPORT_SYMBOL_GPL(tcpci_irq);
@@ -917,20 +923,22 @@ static int tcpci_probe(struct i2c_client *client)
chip->data.set_orientation = err;
- chip->tcpci = tcpci_register_port(&client->dev, &chip->data);
- if (IS_ERR(chip->tcpci))
- return PTR_ERR(chip->tcpci);
-
err = devm_request_threaded_irq(&client->dev, client->irq, NULL,
_tcpci_irq,
- IRQF_SHARED | IRQF_ONESHOT | IRQF_TRIGGER_LOW,
+ IRQF_SHARED | IRQF_ONESHOT,
dev_name(&client->dev), chip);
- if (err < 0) {
- tcpci_unregister_port(chip->tcpci);
+ if (err < 0)
return err;
- }
- return 0;
+ /*
+ * Disable irq while registering port. If irq is configured as an edge
+ * irq this allow to keep track and process the irq as soon as it is enabled.
+ */
+ disable_irq(client->irq);
+ chip->tcpci = tcpci_register_port(&client->dev, &chip->data);
+ enable_irq(client->irq);
+
+ return PTR_ERR_OR_ZERO(chip->tcpci);
}
static void tcpci_remove(struct i2c_client *client)
diff --git a/drivers/usb/typec/tcpm/tcpci_maxim.h b/drivers/usb/typec/tcpm/tcpci_maxim.h
index 78ff3b73ee7e..76270d5c2838 100644
--- a/drivers/usb/typec/tcpm/tcpci_maxim.h
+++ b/drivers/usb/typec/tcpm/tcpci_maxim.h
@@ -20,28 +20,24 @@
#define SBUOVPDIS BIT(7)
#define CCOVPDIS BIT(6)
#define SBURPCTRL BIT(5)
-#define CCLPMODESEL_MASK GENMASK(4, 3)
-#define ULTRA_LOW_POWER_MODE BIT(3)
-#define CCRPCTRL_MASK GENMASK(2, 0)
+#define CCLPMODESEL GENMASK(4, 3)
+#define ULTRA_LOW_POWER_MODE 1
+#define CCRPCTRL GENMASK(2, 0)
#define UA_1_SRC 1
#define UA_80_SRC 3
#define TCPC_VENDOR_CC_CTRL3 0x8e
-#define CCWTRDEB_MASK GENMASK(7, 6)
-#define CCWTRDEB_SHIFT 6
+#define CCWTRDEB GENMASK(7, 6)
#define CCWTRDEB_1MS 1
-#define CCWTRSEL_MASK GENMASK(5, 3)
-#define CCWTRSEL_SHIFT 3
+#define CCWTRSEL GENMASK(5, 3)
#define CCWTRSEL_1V 0x4
#define CCLADDERDIS BIT(2)
-#define WTRCYCLE_MASK BIT(0)
-#define WTRCYCLE_SHIFT 0
+#define WTRCYCLE GENMASK(0, 0)
#define WTRCYCLE_2_4_S 0
#define WTRCYCLE_4_8_S 1
#define TCPC_VENDOR_ADC_CTRL1 0x91
-#define ADCINSEL_MASK GENMASK(7, 5)
-#define ADC_CHANNEL_OFFSET 5
+#define ADCINSEL GENMASK(7, 5)
#define ADCEN BIT(0)
enum contamiant_state {
@@ -85,6 +81,20 @@ static inline int max_tcpci_write8(struct max_tcpci_chip *chip, unsigned int reg
return regmap_raw_write(chip->data.regmap, reg, &val, sizeof(u8));
}
-bool max_contaminant_is_contaminant(struct max_tcpci_chip *chip, bool disconnect_while_debounce);
+/**
+ * max_contaminant_is_contaminant - Test if CC was toggled due to contaminant
+ *
+ * @chip: Handle to a struct max_tcpci_chip
+ * @disconnect_while_debounce: Whether the disconnect was detected when CC
+ * pins were debouncing
+ * @cc_handled: Returns whether or not update to CC status was handled here
+ *
+ * Determine if a contaminant was detected.
+ *
+ * Returns: true if a contaminant was detected, false otherwise. cc_handled
+ * is updated to reflect whether or not further CC handling is required.
+ */
+bool max_contaminant_is_contaminant(struct max_tcpci_chip *chip, bool disconnect_while_debounce,
+ bool *cc_handled);
#endif // TCPCI_MAXIM_H_
diff --git a/drivers/usb/typec/tcpm/tcpci_maxim_core.c b/drivers/usb/typec/tcpm/tcpci_maxim_core.c
index 760e2f92b958..fd1b80593367 100644
--- a/drivers/usb/typec/tcpm/tcpci_maxim_core.c
+++ b/drivers/usb/typec/tcpm/tcpci_maxim_core.c
@@ -97,11 +97,13 @@ static void max_tcpci_init_regs(struct max_tcpci_chip *chip)
if (ret < 0)
return;
- alert_mask = TCPC_ALERT_TX_SUCCESS | TCPC_ALERT_TX_DISCARDED | TCPC_ALERT_TX_FAILED |
- TCPC_ALERT_RX_HARD_RST | TCPC_ALERT_RX_STATUS | TCPC_ALERT_CC_STATUS |
- TCPC_ALERT_VBUS_DISCNCT | TCPC_ALERT_RX_BUF_OVF | TCPC_ALERT_POWER_STATUS |
- /* Enable Extended alert for detecting Fast Role Swap Signal */
- TCPC_ALERT_EXTND | TCPC_ALERT_EXTENDED_STATUS | TCPC_ALERT_FAULT;
+ alert_mask = (TCPC_ALERT_TX_SUCCESS | TCPC_ALERT_TX_DISCARDED |
+ TCPC_ALERT_TX_FAILED | TCPC_ALERT_RX_HARD_RST |
+ TCPC_ALERT_RX_STATUS | TCPC_ALERT_POWER_STATUS |
+ TCPC_ALERT_CC_STATUS |
+ TCPC_ALERT_EXTND | TCPC_ALERT_EXTENDED_STATUS |
+ TCPC_ALERT_VBUS_DISCNCT | TCPC_ALERT_RX_BUF_OVF |
+ TCPC_ALERT_FAULT);
ret = max_tcpci_write16(chip, TCPC_ALERT_MASK, alert_mask);
if (ret < 0) {
@@ -191,9 +193,8 @@ static void process_rx(struct max_tcpci_chip *chip, u16 status)
* Read complete, clear RX status alert bit.
* Clear overflow as well if set.
*/
- ret = max_tcpci_write16(chip, TCPC_ALERT, status & TCPC_ALERT_RX_BUF_OVF ?
- TCPC_ALERT_RX_STATUS | TCPC_ALERT_RX_BUF_OVF :
- TCPC_ALERT_RX_STATUS);
+ ret = max_tcpci_write16(chip, TCPC_ALERT,
+ TCPC_ALERT_RX_STATUS | (status & TCPC_ALERT_RX_BUF_OVF));
if (ret < 0)
return;
@@ -295,9 +296,8 @@ static irqreturn_t _max_tcpci_irq(struct max_tcpci_chip *chip, u16 status)
* be cleared until we have successfully retrieved message.
*/
if (status & ~TCPC_ALERT_RX_STATUS) {
- mask = status & TCPC_ALERT_RX_BUF_OVF ?
- status & ~(TCPC_ALERT_RX_STATUS | TCPC_ALERT_RX_BUF_OVF) :
- status & ~TCPC_ALERT_RX_STATUS;
+ mask = status & ~(TCPC_ALERT_RX_STATUS
+ | (status & TCPC_ALERT_RX_BUF_OVF));
ret = max_tcpci_write16(chip, TCPC_ALERT, mask);
if (ret < 0) {
dev_err(chip->dev, "ALERT clear failed\n");
@@ -357,12 +357,14 @@ static irqreturn_t _max_tcpci_irq(struct max_tcpci_chip *chip, u16 status)
tcpm_vbus_change(chip->port);
if (status & TCPC_ALERT_CC_STATUS) {
+ bool cc_handled = false;
+
if (chip->contaminant_state == DETECTED || tcpm_port_is_toggling(chip->port)) {
- if (!max_contaminant_is_contaminant(chip, false))
+ if (!max_contaminant_is_contaminant(chip, false, &cc_handled))
tcpm_port_clean(chip->port);
- } else {
- tcpm_cc_change(chip->port);
}
+ if (!cc_handled)
+ tcpm_cc_change(chip->port);
}
if (status & TCPC_ALERT_POWER_STATUS)
@@ -397,7 +399,7 @@ static irqreturn_t max_tcpci_irq(int irq, void *dev_id)
}
while (status) {
irq_return = _max_tcpci_irq(chip, status);
- /* Do not return if the ALERT is already set. */
+ /* Do not return if a (new) ALERT is set (again). */
ret = max_tcpci_read16(chip, TCPC_ALERT, &status);
if (ret < 0)
break;
@@ -455,8 +457,9 @@ static int tcpci_init(struct tcpci *tcpci, struct tcpci_data *data)
static void max_tcpci_check_contaminant(struct tcpci *tcpci, struct tcpci_data *tdata)
{
struct max_tcpci_chip *chip = tdata_to_max_tcpci(tdata);
+ bool cc_handled;
- if (!max_contaminant_is_contaminant(chip, true))
+ if (!max_contaminant_is_contaminant(chip, true, &cc_handled))
tcpm_port_clean(chip->port);
}
@@ -472,6 +475,11 @@ static bool max_tcpci_attempt_vconn_swap_discovery(struct tcpci *tcpci, struct t
return true;
}
+static void max_tcpci_unregister_tcpci_port(void *tcpci)
+{
+ tcpci_unregister_port(tcpci);
+}
+
static int max_tcpci_probe(struct i2c_client *client)
{
int ret;
@@ -484,17 +492,17 @@ static int max_tcpci_probe(struct i2c_client *client)
chip->client = client;
chip->data.regmap = devm_regmap_init_i2c(client, &max_tcpci_regmap_config);
- if (IS_ERR(chip->data.regmap)) {
- dev_err(&client->dev, "Regmap init failed\n");
- return PTR_ERR(chip->data.regmap);
- }
+ if (IS_ERR(chip->data.regmap))
+ return dev_err_probe(&client->dev, PTR_ERR(chip->data.regmap),
+ "Regmap init failed\n");
chip->dev = &client->dev;
i2c_set_clientdata(client, chip);
ret = max_tcpci_read8(chip, TCPC_POWER_STATUS, &power_status);
if (ret < 0)
- return ret;
+ return dev_err_probe(&client->dev, ret,
+ "Failed to read TCPC_POWER_STATUS\n");
/* Chip level tcpci callbacks */
chip->data.set_vbus = max_tcpci_set_vbus;
@@ -511,30 +519,25 @@ static int max_tcpci_probe(struct i2c_client *client)
max_tcpci_init_regs(chip);
chip->tcpci = tcpci_register_port(chip->dev, &chip->data);
- if (IS_ERR(chip->tcpci)) {
- dev_err(&client->dev, "TCPCI port registration failed\n");
- return PTR_ERR(chip->tcpci);
- }
+ if (IS_ERR(chip->tcpci))
+ return dev_err_probe(&client->dev, PTR_ERR(chip->tcpci),
+ "TCPCI port registration failed\n");
+
+ ret = devm_add_action_or_reset(&client->dev,
+ max_tcpci_unregister_tcpci_port,
+ chip->tcpci);
+ if (ret)
+ return ret;
+
chip->port = tcpci_get_tcpm_port(chip->tcpci);
+
ret = max_tcpci_init_alert(chip, client);
if (ret < 0)
- goto unreg_port;
+ return dev_err_probe(&client->dev, ret,
+ "IRQ initialization failed\n");
device_init_wakeup(chip->dev, true);
return 0;
-
-unreg_port:
- tcpci_unregister_port(chip->tcpci);
-
- return ret;
-}
-
-static void max_tcpci_remove(struct i2c_client *client)
-{
- struct max_tcpci_chip *chip = i2c_get_clientdata(client);
-
- if (!IS_ERR_OR_NULL(chip->tcpci))
- tcpci_unregister_port(chip->tcpci);
}
static const struct i2c_device_id max_tcpci_id[] = {
@@ -557,7 +560,6 @@ static struct i2c_driver max_tcpci_i2c_driver = {
.of_match_table = of_match_ptr(max_tcpci_of_match),
},
.probe = max_tcpci_probe,
- .remove = max_tcpci_remove,
.id_table = max_tcpci_id,
};
module_i2c_driver(max_tcpci_i2c_driver);
diff --git a/drivers/usb/typec/tcpm/tcpci_rt1711h.c b/drivers/usb/typec/tcpm/tcpci_rt1711h.c
index 67422d45eb54..64f6dd0dc660 100644
--- a/drivers/usb/typec/tcpm/tcpci_rt1711h.c
+++ b/drivers/usb/typec/tcpm/tcpci_rt1711h.c
@@ -5,6 +5,7 @@
* Richtek RT1711H Type-C Chip Driver
*/
+#include <linux/bitfield.h>
#include <linux/bits.h>
#include <linux/kernel.h>
#include <linux/mod_devicetable.h>
@@ -195,12 +196,10 @@ static inline int rt1711h_init_cc_params(struct rt1711h_chip *chip, u8 status)
if (ret < 0)
return ret;
- cc1 = tcpci_to_typec_cc((status >> TCPC_CC_STATUS_CC1_SHIFT) &
- TCPC_CC_STATUS_CC1_MASK,
+ cc1 = tcpci_to_typec_cc(FIELD_GET(TCPC_CC_STATUS_CC1, status),
status & TCPC_CC_STATUS_TERM ||
tcpc_presenting_rd(role, CC1));
- cc2 = tcpci_to_typec_cc((status >> TCPC_CC_STATUS_CC2_SHIFT) &
- TCPC_CC_STATUS_CC2_MASK,
+ cc2 = tcpci_to_typec_cc(FIELD_GET(TCPC_CC_STATUS_CC2, status),
status & TCPC_CC_STATUS_TERM ||
tcpc_presenting_rd(role, CC2));
@@ -233,25 +232,25 @@ static int rt1711h_start_drp_toggling(struct tcpci *tcpci,
switch (cc) {
default:
case TYPEC_CC_RP_DEF:
- reg |= (TCPC_ROLE_CTRL_RP_VAL_DEF <<
- TCPC_ROLE_CTRL_RP_VAL_SHIFT);
+ reg |= FIELD_PREP(TCPC_ROLE_CTRL_RP_VAL,
+ TCPC_ROLE_CTRL_RP_VAL_DEF);
break;
case TYPEC_CC_RP_1_5:
- reg |= (TCPC_ROLE_CTRL_RP_VAL_1_5 <<
- TCPC_ROLE_CTRL_RP_VAL_SHIFT);
+ reg |= FIELD_PREP(TCPC_ROLE_CTRL_RP_VAL,
+ TCPC_ROLE_CTRL_RP_VAL_1_5);
break;
case TYPEC_CC_RP_3_0:
- reg |= (TCPC_ROLE_CTRL_RP_VAL_3_0 <<
- TCPC_ROLE_CTRL_RP_VAL_SHIFT);
+ reg |= FIELD_PREP(TCPC_ROLE_CTRL_RP_VAL,
+ TCPC_ROLE_CTRL_RP_VAL_3_0);
break;
}
if (cc == TYPEC_CC_RD)
- reg |= (TCPC_ROLE_CTRL_CC_RD << TCPC_ROLE_CTRL_CC1_SHIFT) |
- (TCPC_ROLE_CTRL_CC_RD << TCPC_ROLE_CTRL_CC2_SHIFT);
+ reg |= (FIELD_PREP(TCPC_ROLE_CTRL_CC1, TCPC_ROLE_CTRL_CC_RD)
+ | FIELD_PREP(TCPC_ROLE_CTRL_CC2, TCPC_ROLE_CTRL_CC_RD));
else
- reg |= (TCPC_ROLE_CTRL_CC_RP << TCPC_ROLE_CTRL_CC1_SHIFT) |
- (TCPC_ROLE_CTRL_CC_RP << TCPC_ROLE_CTRL_CC2_SHIFT);
+ reg |= (FIELD_PREP(TCPC_ROLE_CTRL_CC1, TCPC_ROLE_CTRL_CC_RP)
+ | FIELD_PREP(TCPC_ROLE_CTRL_CC2, TCPC_ROLE_CTRL_CC_RP));
ret = rt1711h_write8(chip, TCPC_ROLE_CTRL, reg);
if (ret < 0)
diff --git a/drivers/usb/typec/tipd/core.c b/drivers/usb/typec/tipd/core.c
index d532670885e4..7ee721a877c1 100644
--- a/drivers/usb/typec/tipd/core.c
+++ b/drivers/usb/typec/tipd/core.c
@@ -1460,8 +1460,9 @@ static void tps6598x_remove(struct i2c_client *client)
if (!client->irq)
cancel_delayed_work_sync(&tps->wq_poll);
+ else
+ devm_free_irq(tps->dev, client->irq, tps);
- devm_free_irq(tps->dev, client->irq, tps);
tps6598x_disconnect(tps, 0);
typec_unregister_port(tps->port);
usb_role_switch_put(tps->role_sw);
diff --git a/drivers/usb/typec/ucsi/ucsi.c b/drivers/usb/typec/ucsi/ucsi.c
index 17155ed17fdf..e0f3925e401b 100644
--- a/drivers/usb/typec/ucsi/ucsi.c
+++ b/drivers/usb/typec/ucsi/ucsi.c
@@ -38,6 +38,10 @@
void ucsi_notify_common(struct ucsi *ucsi, u32 cci)
{
+ /* Ignore bogus data in CCI if busy indicator is set. */
+ if (cci & UCSI_CCI_BUSY)
+ return;
+
if (UCSI_CCI_CONNECTOR(cci))
ucsi_connector_change(ucsi, UCSI_CCI_CONNECTOR(cci));
@@ -99,24 +103,18 @@ static int ucsi_run_command(struct ucsi *ucsi, u64 command, u32 *cci,
*cci = 0;
- /*
- * Below UCSI 2.0, MESSAGE_IN was limited to 16 bytes. Truncate the
- * reads here.
- */
- if (ucsi->version <= UCSI_VERSION_1_2)
- size = clamp(size, 0, 16);
+ if (size > UCSI_MAX_DATA_LENGTH(ucsi))
+ return -EINVAL;
ret = ucsi->ops->sync_control(ucsi, command);
- if (ret)
- return ret;
+ if (ucsi->ops->read_cci(ucsi, cci))
+ return -EIO;
- ret = ucsi->ops->read_cci(ucsi, cci);
+ if (*cci & UCSI_CCI_BUSY)
+ return ucsi_run_command(ucsi, UCSI_CANCEL, cci, NULL, 0, false) ?: -EBUSY;
if (ret)
return ret;
- if (*cci & UCSI_CCI_BUSY)
- return -EBUSY;
-
if (!(*cci & UCSI_CCI_COMMAND_COMPLETE))
return -EIO;
@@ -148,21 +146,10 @@ static int ucsi_read_error(struct ucsi *ucsi, u8 connector_num)
int ret;
command = UCSI_GET_ERROR_STATUS | UCSI_CONNECTOR_NUMBER(connector_num);
- ret = ucsi_run_command(ucsi, command, &cci,
- &error, sizeof(error), false);
-
- if (cci & UCSI_CCI_BUSY) {
- ret = ucsi_run_command(ucsi, UCSI_CANCEL, &cci, NULL, 0, false);
-
- return ret ? ret : -EBUSY;
- }
-
+ ret = ucsi_run_command(ucsi, command, &cci, &error, sizeof(error), false);
if (ret < 0)
return ret;
- if (cci & UCSI_CCI_ERROR)
- return -EIO;
-
switch (error) {
case UCSI_ERROR_INCOMPATIBLE_PARTNER:
return -EOPNOTSUPP;
@@ -238,9 +225,8 @@ static int ucsi_send_command_common(struct ucsi *ucsi, u64 cmd,
mutex_lock(&ucsi->ppm_lock);
ret = ucsi_run_command(ucsi, cmd, &cci, data, size, conn_ack);
- if (cci & UCSI_CCI_BUSY)
- ret = ucsi_run_command(ucsi, UCSI_CANCEL, &cci, NULL, 0, false) ?: -EBUSY;
- else if (cci & UCSI_CCI_ERROR)
+
+ if (cci & UCSI_CCI_ERROR)
ret = ucsi_read_error(ucsi, connector_num);
mutex_unlock(&ucsi->ppm_lock);
@@ -752,104 +738,66 @@ static struct usb_power_delivery_capabilities *ucsi_get_pd_caps(struct ucsi_conn
&pd_caps);
}
-static int ucsi_read_identity(struct ucsi_connector *con, u8 recipient,
- u8 offset, u8 bytes, void *resp)
+static int ucsi_get_pd_message(struct ucsi_connector *con, u8 recipient,
+ size_t bytes, void *data, u8 type)
{
- struct ucsi *ucsi = con->ucsi;
+ size_t len = min(bytes, UCSI_MAX_DATA_LENGTH(con->ucsi));
u64 command;
+ u8 offset;
int ret;
- command = UCSI_COMMAND(UCSI_GET_PD_MESSAGE) |
- UCSI_CONNECTOR_NUMBER(con->num);
- command |= UCSI_GET_PD_MESSAGE_RECIPIENT(recipient);
- command |= UCSI_GET_PD_MESSAGE_OFFSET(offset);
- command |= UCSI_GET_PD_MESSAGE_BYTES(bytes);
- command |= UCSI_GET_PD_MESSAGE_TYPE(UCSI_GET_PD_MESSAGE_TYPE_IDENTITY);
-
- ret = ucsi_send_command(ucsi, command, resp, bytes);
- if (ret < 0)
- dev_err(ucsi->dev, "UCSI_GET_PD_MESSAGE failed (%d)\n", ret);
-
- return ret;
-}
-
-static int ucsi_get_identity(struct ucsi_connector *con, u8 recipient,
- struct usb_pd_identity *id)
-{
- struct ucsi *ucsi = con->ucsi;
- struct ucsi_pd_message_disc_id resp = {};
- int ret;
-
- if (ucsi->version < UCSI_VERSION_2_0) {
- /*
- * Before UCSI v2.0, MESSAGE_IN is 16 bytes which cannot fit
- * the 28 byte identity response including the VDM header.
- * First request the VDM header, ID Header VDO, Cert Stat VDO
- * and Product VDO.
- */
- ret = ucsi_read_identity(con, recipient, 0, 0x10, &resp);
- if (ret < 0)
- return ret;
-
+ for (offset = 0; offset < bytes; offset += len) {
+ len = min(len, bytes - offset);
- /* Then request Product Type VDO1 through Product Type VDO3. */
- ret = ucsi_read_identity(con, recipient, 0x10, 0xc,
- &resp.vdo[0]);
- if (ret < 0)
- return ret;
+ command = UCSI_COMMAND(UCSI_GET_PD_MESSAGE) | UCSI_CONNECTOR_NUMBER(con->num);
+ command |= UCSI_GET_PD_MESSAGE_RECIPIENT(recipient);
+ command |= UCSI_GET_PD_MESSAGE_OFFSET(offset);
+ command |= UCSI_GET_PD_MESSAGE_BYTES(len);
+ command |= UCSI_GET_PD_MESSAGE_TYPE(type);
- } else {
- /*
- * In UCSI v2.0 and after, MESSAGE_IN is large enough to request
- * the large enough to request the full Discover Identity
- * response at once.
- */
- ret = ucsi_read_identity(con, recipient, 0x0, 0x1c, &resp);
+ ret = ucsi_send_command(con->ucsi, command, data + offset, len);
if (ret < 0)
return ret;
}
- id->id_header = resp.id_header;
- id->cert_stat = resp.cert_stat;
- id->product = resp.product;
- id->vdo[0] = resp.vdo[0];
- id->vdo[1] = resp.vdo[1];
- id->vdo[2] = resp.vdo[2];
return 0;
}
static int ucsi_get_partner_identity(struct ucsi_connector *con)
{
+ u32 vdo[7] = {};
int ret;
- ret = ucsi_get_identity(con, UCSI_RECIPIENT_SOP,
- &con->partner_identity);
+ ret = ucsi_get_pd_message(con, UCSI_RECIPIENT_SOP, sizeof(vdo), vdo,
+ UCSI_GET_PD_MESSAGE_TYPE_IDENTITY);
if (ret < 0)
return ret;
+ /* VDM Header is not part of struct usb_pd_identity, so dropping it. */
+ con->partner_identity = *(struct usb_pd_identity *)&vdo[1];
+
ret = typec_partner_set_identity(con->partner);
- if (ret < 0) {
- dev_err(con->ucsi->dev, "Failed to set partner identity (%d)\n",
- ret);
- }
+ if (ret < 0)
+ dev_err(con->ucsi->dev, "Failed to set partner identity (%d)\n", ret);
return ret;
}
static int ucsi_get_cable_identity(struct ucsi_connector *con)
{
+ u32 vdo[7] = {};
int ret;
- ret = ucsi_get_identity(con, UCSI_RECIPIENT_SOP_P,
- &con->cable_identity);
+ ret = ucsi_get_pd_message(con, UCSI_RECIPIENT_SOP_P, sizeof(vdo), vdo,
+ UCSI_GET_PD_MESSAGE_TYPE_IDENTITY);
if (ret < 0)
return ret;
+ con->cable_identity = *(struct usb_pd_identity *)&vdo[1];
+
ret = typec_cable_set_identity(con->cable);
- if (ret < 0) {
- dev_err(con->ucsi->dev, "Failed to set cable identity (%d)\n",
- ret);
- }
+ if (ret < 0)
+ dev_err(con->ucsi->dev, "Failed to set cable identity (%d)\n", ret);
return ret;
}
@@ -993,7 +941,8 @@ static int ucsi_register_cable(struct ucsi_connector *con)
break;
}
- desc.identity = &con->cable_identity;
+ if (con->ucsi->cap.features & UCSI_CAP_GET_PD_MESSAGE)
+ desc.identity = &con->cable_identity;
desc.active = !!(UCSI_CABLE_PROP_FLAG_ACTIVE_CABLE & cable_prop.flags);
if (con->ucsi->version >= UCSI_VERSION_2_1)
@@ -1094,7 +1043,8 @@ static int ucsi_register_partner(struct ucsi_connector *con)
if (pwr_opmode == UCSI_CONSTAT_PWR_OPMODE_PD)
ucsi_register_device_pdos(con);
- desc.identity = &con->partner_identity;
+ if (con->ucsi->cap.features & UCSI_CAP_GET_PD_MESSAGE)
+ desc.identity = &con->partner_identity;
desc.usb_pd = pwr_opmode == UCSI_CONSTAT_PWR_OPMODE_PD;
partner = typec_register_partner(con->port, &desc);
@@ -1249,6 +1199,10 @@ static void ucsi_handle_connector_change(struct work_struct *work)
mutex_lock(&con->lock);
+ if (!test_and_set_bit(EVENT_PENDING, &ucsi->flags))
+ dev_err_once(ucsi->dev, "%s entered without EVENT_PENDING\n",
+ __func__);
+
command = UCSI_GET_CONNECTOR_STATUS | UCSI_CONNECTOR_NUMBER(con->num);
ret = ucsi_send_command_common(ucsi, command, &con->status,
@@ -1341,12 +1295,26 @@ EXPORT_SYMBOL_GPL(ucsi_connector_change);
/* -------------------------------------------------------------------------- */
+/*
+ * Hard Reset bit field was defined with value 1 in UCSI spec version 1.0.
+ * Starting with spec version 1.1, Hard Reset bit field was removed from the
+ * CONNECTOR_RESET command, until spec 2.0 reintroduced it with value 0, so, in effect,
+ * the value to pass in to the command for a Hard Reset is different depending
+ * on the supported UCSI version by the LPM.
+ *
+ * For performing a Data Reset on LPMs supporting version 2.0 and greater,
+ * this function needs to be called with the second argument set to 0.
+ */
static int ucsi_reset_connector(struct ucsi_connector *con, bool hard)
{
u64 command;
command = UCSI_CONNECTOR_RESET | UCSI_CONNECTOR_NUMBER(con->num);
- command |= hard ? UCSI_CONNECTOR_RESET_HARD : 0;
+
+ if (con->ucsi->version < UCSI_VERSION_1_1)
+ command |= hard ? UCSI_CONNECTOR_RESET_HARD_VER_1_0 : 0;
+ else if (con->ucsi->version >= UCSI_VERSION_2_0)
+ command |= hard ? 0 : UCSI_CONNECTOR_RESET_DATA_VER_2_0;
return ucsi_send_command(con->ucsi, command, NULL, 0);
}
diff --git a/drivers/usb/typec/ucsi/ucsi.h b/drivers/usb/typec/ucsi/ucsi.h
index 5a3481d36d7a..4a017eb6a65b 100644
--- a/drivers/usb/typec/ucsi/ucsi.h
+++ b/drivers/usb/typec/ucsi/ucsi.h
@@ -29,6 +29,7 @@ struct dentry;
#define UCSIv2_MESSAGE_OUT 272
/* UCSI versions */
+#define UCSI_VERSION_1_1 0x0110
#define UCSI_VERSION_1_2 0x0120
#define UCSI_VERSION_2_0 0x0200
#define UCSI_VERSION_2_1 0x0210
@@ -122,7 +123,9 @@ void ucsi_connector_change(struct ucsi *ucsi, u8 num);
#define UCSI_DEFAULT_GET_CONNECTOR_NUMBER(_cmd_) (((_cmd_) >> 16) & GENMASK(6, 0))
/* CONNECTOR_RESET command bits */
-#define UCSI_CONNECTOR_RESET_HARD BIT(23) /* Deprecated in v1.1 */
+#define UCSI_CONNECTOR_RESET_HARD_VER_1_0 BIT(23) /* Deprecated in v1.1 */
+#define UCSI_CONNECTOR_RESET_DATA_VER_2_0 BIT(23) /* Redefined in v2.0 */
+
/* ACK_CC_CI bits */
#define UCSI_ACK_CONNECTOR_CHANGE BIT(16)
@@ -344,47 +347,12 @@ struct ucsi_connector_status {
#define UCSI_CONSTAT_PARTNER_TYPE_AUDIO 6
u32 request_data_obj;
- u8 pwr_status[3];
-#define UCSI_CONSTAT_BC_STATUS(_p_) ((_p_[0]) & GENMASK(1, 0))
+ u8 pwr_status;
+#define UCSI_CONSTAT_BC_STATUS(_p_) ((_p_) & GENMASK(1, 0))
#define UCSI_CONSTAT_BC_NOT_CHARGING 0
#define UCSI_CONSTAT_BC_NOMINAL_CHARGING 1
#define UCSI_CONSTAT_BC_SLOW_CHARGING 2
#define UCSI_CONSTAT_BC_TRICKLE_CHARGING 3
-#define UCSI_CONSTAT_PROVIDER_CAP_LIMIT(_p_) (((_p_[0]) & GENMASK(5, 2)) >> 2)
-#define UCSI_CONSTAT_CAP_PWR_LOWERED 0
-#define UCSI_CONSTAT_CAP_PWR_BUDGET_LIMIT 1
-#define UCSI_CONSTAT_PROVIDER_PD_VERSION_OPER_MODE(_p_) \
- ((get_unaligned_le32(_p_) & GENMASK(21, 6)) >> 6)
-#define UCSI_CONSTAT_ORIENTATION(_p_) (((_p_[2]) & GENMASK(6, 6)) >> 6)
-#define UCSI_CONSTAT_ORIENTATION_DIRECT 0
-#define UCSI_CONSTAT_ORIENTATION_FLIPPED 1
-#define UCSI_CONSTAT_SINK_PATH_STATUS(_p_) (((_p_[2]) & GENMASK(7, 7)) >> 7)
-#define UCSI_CONSTAT_SINK_PATH_DISABLED 0
-#define UCSI_CONSTAT_SINK_PATH_ENABLED 1
- u8 pwr_readings[9];
-#define UCSI_CONSTAT_REV_CURR_PROT_STATUS(_p_) ((_p_[0]) & 0x1)
-#define UCSI_CONSTAT_PWR_READING_VALID(_p_) (((_p_[0]) & GENMASK(1, 1)) >> 1)
-#define UCSI_CONSTAT_CURRENT_SCALE(_p_) (((_p_[0]) & GENMASK(4, 2)) >> 2)
-#define UCSI_CONSTAT_PEAK_CURRENT(_p_) \
- ((get_unaligned_le32(_p_) & GENMASK(20, 5)) >> 5)
-#define UCSI_CONSTAT_AVG_CURRENT(_p_) \
- ((get_unaligned_le32(&(_p_)[2]) & GENMASK(20, 5)) >> 5)
-#define UCSI_CONSTAT_VOLTAGE_SCALE(_p_) \
- ((get_unaligned_le16(&(_p_)[4]) & GENMASK(8, 5)) >> 5)
-#define UCSI_CONSTAT_VOLTAGE_READING(_p_) \
- ((get_unaligned_le32(&(_p_)[5]) & GENMASK(16, 1)) >> 1)
-} __packed;
-
-/*
- * Data structure filled by PPM in response to GET_PD_MESSAGE command with the
- * Response Message Type set to Discover Identity Response.
- */
-struct ucsi_pd_message_disc_id {
- u32 vdm_header;
- u32 id_header;
- u32 cert_stat;
- u32 product;
- u32 vdo[3];
} __packed;
/* -------------------------------------------------------------------------- */
@@ -435,6 +403,8 @@ struct ucsi {
#define UCSI_DELAY_DEVICE_PDOS BIT(1) /* Reading PDOs fails until the parter is in PD mode */
};
+#define UCSI_MAX_DATA_LENGTH(u) (((u)->version < UCSI_VERSION_2_0) ? 0x10 : 0xff)
+
#define UCSI_MAX_SVID 5
#define UCSI_MAX_ALTMODES (UCSI_MAX_SVID * 6)
diff --git a/drivers/usb/typec/ucsi/ucsi_glink.c b/drivers/usb/typec/ucsi/ucsi_glink.c
index 6aace19d595b..03c0fa8edc8d 100644
--- a/drivers/usb/typec/ucsi/ucsi_glink.c
+++ b/drivers/usb/typec/ucsi/ucsi_glink.c
@@ -278,7 +278,7 @@ static void pmic_glink_ucsi_callback(const void *data, size_t len, void *priv)
case UC_UCSI_USBC_NOTIFY_IND:
schedule_work(&ucsi->notify_work);
break;
- };
+ }
}
static void pmic_glink_ucsi_pdr_notify(void *priv, int state)
diff --git a/drivers/usb/usbip/vhci_hcd.c b/drivers/usb/usbip/vhci_hcd.c
index 302a89aeb258..8dac1edc74d4 100644
--- a/drivers/usb/usbip/vhci_hcd.c
+++ b/drivers/usb/usbip/vhci_hcd.c
@@ -372,7 +372,7 @@ static int vhci_hub_control(struct usb_hcd *hcd, u16 typeReq, u16 wValue,
}
switch (wValue) {
case USB_PORT_FEAT_SUSPEND:
- if (hcd->speed == HCD_USB3) {
+ if (hcd->speed >= HCD_USB3) {
pr_err(" ClearPortFeature: USB_PORT_FEAT_SUSPEND req not "
"supported for USB 3.0 roothub\n");
goto error;
@@ -388,7 +388,7 @@ static int vhci_hub_control(struct usb_hcd *hcd, u16 typeReq, u16 wValue,
case USB_PORT_FEAT_POWER:
usbip_dbg_vhci_rh(
" ClearPortFeature: USB_PORT_FEAT_POWER\n");
- if (hcd->speed == HCD_USB3)
+ if (hcd->speed >= HCD_USB3)
vhci_hcd->port_status[rhport] &= ~USB_SS_PORT_STAT_POWER;
else
vhci_hcd->port_status[rhport] &= ~USB_PORT_STAT_POWER;
@@ -404,19 +404,19 @@ static int vhci_hub_control(struct usb_hcd *hcd, u16 typeReq, u16 wValue,
break;
case GetHubDescriptor:
usbip_dbg_vhci_rh(" GetHubDescriptor\n");
- if (hcd->speed == HCD_USB3 &&
+ if (hcd->speed >= HCD_USB3 &&
(wLength < USB_DT_SS_HUB_SIZE ||
wValue != (USB_DT_SS_HUB << 8))) {
pr_err("Wrong hub descriptor type for USB 3.0 roothub.\n");
goto error;
}
- if (hcd->speed == HCD_USB3)
+ if (hcd->speed >= HCD_USB3)
ss_hub_descriptor((struct usb_hub_descriptor *) buf);
else
hub_descriptor((struct usb_hub_descriptor *) buf);
break;
case DeviceRequest | USB_REQ_GET_DESCRIPTOR:
- if (hcd->speed != HCD_USB3)
+ if (hcd->speed < HCD_USB3)
goto error;
if ((wValue >> 8) != USB_DT_BOS)
@@ -503,7 +503,7 @@ static int vhci_hub_control(struct usb_hcd *hcd, u16 typeReq, u16 wValue,
case USB_PORT_FEAT_LINK_STATE:
usbip_dbg_vhci_rh(
" SetPortFeature: USB_PORT_FEAT_LINK_STATE\n");
- if (hcd->speed != HCD_USB3) {
+ if (hcd->speed < HCD_USB3) {
pr_err("USB_PORT_FEAT_LINK_STATE req not "
"supported for USB 2.0 roothub\n");
goto error;
@@ -521,7 +521,7 @@ static int vhci_hub_control(struct usb_hcd *hcd, u16 typeReq, u16 wValue,
usbip_dbg_vhci_rh(
" SetPortFeature: USB_PORT_FEAT_U2_TIMEOUT\n");
/* TODO: add suspend/resume support! */
- if (hcd->speed != HCD_USB3) {
+ if (hcd->speed < HCD_USB3) {
pr_err("USB_PORT_FEAT_U1/2_TIMEOUT req not "
"supported for USB 2.0 roothub\n");
goto error;
@@ -531,7 +531,7 @@ static int vhci_hub_control(struct usb_hcd *hcd, u16 typeReq, u16 wValue,
usbip_dbg_vhci_rh(
" SetPortFeature: USB_PORT_FEAT_SUSPEND\n");
/* Applicable only for USB2.0 hub */
- if (hcd->speed == HCD_USB3) {
+ if (hcd->speed >= HCD_USB3) {
pr_err("USB_PORT_FEAT_SUSPEND req not "
"supported for USB 3.0 roothub\n");
goto error;
@@ -551,7 +551,7 @@ static int vhci_hub_control(struct usb_hcd *hcd, u16 typeReq, u16 wValue,
pr_err("invalid port number %d\n", wIndex);
goto error;
}
- if (hcd->speed == HCD_USB3)
+ if (hcd->speed >= HCD_USB3)
vhci_hcd->port_status[rhport] |= USB_SS_PORT_STAT_POWER;
else
vhci_hcd->port_status[rhport] |= USB_PORT_STAT_POWER;
@@ -564,7 +564,7 @@ static int vhci_hub_control(struct usb_hcd *hcd, u16 typeReq, u16 wValue,
goto error;
}
/* Applicable only for USB3.0 hub */
- if (hcd->speed != HCD_USB3) {
+ if (hcd->speed < HCD_USB3) {
pr_err("USB_PORT_FEAT_BH_PORT_RESET req not "
"supported for USB 2.0 roothub\n");
goto error;
@@ -578,7 +578,7 @@ static int vhci_hub_control(struct usb_hcd *hcd, u16 typeReq, u16 wValue,
goto error;
}
/* if it's already enabled, disable */
- if (hcd->speed == HCD_USB3) {
+ if (hcd->speed >= HCD_USB3) {
vhci_hcd->port_status[rhport] = 0;
vhci_hcd->port_status[rhport] =
(USB_SS_PORT_STAT_POWER |
@@ -602,7 +602,7 @@ static int vhci_hub_control(struct usb_hcd *hcd, u16 typeReq, u16 wValue,
}
if (wValue >= 32)
goto error;
- if (hcd->speed == HCD_USB3) {
+ if (hcd->speed >= HCD_USB3) {
if ((vhci_hcd->port_status[rhport] &
USB_SS_PORT_STAT_POWER) != 0) {
vhci_hcd->port_status[rhport] |= (1 << wValue);
@@ -616,7 +616,7 @@ static int vhci_hub_control(struct usb_hcd *hcd, u16 typeReq, u16 wValue,
break;
case GetPortErrorCount:
usbip_dbg_vhci_rh(" GetPortErrorCount\n");
- if (hcd->speed != HCD_USB3) {
+ if (hcd->speed < HCD_USB3) {
pr_err("GetPortErrorCount req not "
"supported for USB 2.0 roothub\n");
goto error;
@@ -626,7 +626,7 @@ static int vhci_hub_control(struct usb_hcd *hcd, u16 typeReq, u16 wValue,
break;
case SetHubDepth:
usbip_dbg_vhci_rh(" SetHubDepth\n");
- if (hcd->speed != HCD_USB3) {
+ if (hcd->speed < HCD_USB3) {
pr_err("SetHubDepth req not supported for "
"USB 2.0 roothub\n");
goto error;
@@ -646,7 +646,7 @@ error:
if (!invalid_rhport) {
dump_port_status_diff(prev_port_status[rhport],
vhci_hcd->port_status[rhport],
- hcd->speed == HCD_USB3);
+ hcd->speed >= HCD_USB3);
}
}
usbip_dbg_vhci_rh(" bye\n");
@@ -1157,8 +1157,8 @@ static int vhci_setup(struct usb_hcd *hcd)
} else {
vhci->vhci_hcd_ss = hcd_to_vhci_hcd(hcd);
vhci->vhci_hcd_ss->vhci = vhci;
- hcd->speed = HCD_USB3;
- hcd->self.root_hub->speed = USB_SPEED_SUPER;
+ hcd->speed = HCD_USB31;
+ hcd->self.root_hub->speed = USB_SPEED_SUPER_PLUS;
}
/*
@@ -1319,7 +1319,7 @@ static const struct hc_driver vhci_hc_driver = {
.product_desc = driver_desc,
.hcd_priv_size = sizeof(struct vhci_hcd),
- .flags = HCD_USB3 | HCD_SHARED,
+ .flags = HCD_USB31 | HCD_SHARED,
.reset = vhci_setup,
.start = vhci_start,
diff --git a/drivers/usb/usbip/vhci_sysfs.c b/drivers/usb/usbip/vhci_sysfs.c
index e2847cd3e6e3..d5865460e82d 100644
--- a/drivers/usb/usbip/vhci_sysfs.c
+++ b/drivers/usb/usbip/vhci_sysfs.c
@@ -283,6 +283,7 @@ static int valid_args(__u32 *pdev_nr, __u32 *rhport,
case USB_SPEED_HIGH:
case USB_SPEED_WIRELESS:
case USB_SPEED_SUPER:
+ case USB_SPEED_SUPER_PLUS:
break;
default:
pr_err("Failed attach request for unsupported USB speed: %s\n",
@@ -349,7 +350,7 @@ static ssize_t attach_store(struct device *dev, struct device_attribute *attr,
vhci_hcd = hcd_to_vhci_hcd(hcd);
vhci = vhci_hcd->vhci;
- if (speed == USB_SPEED_SUPER)
+ if (speed >= USB_SPEED_SUPER)
vdev = &vhci->vhci_hcd_ss->vdev[rhport];
else
vdev = &vhci->vhci_hcd_hs->vdev[rhport];
diff --git a/drivers/vdpa/Kconfig b/drivers/vdpa/Kconfig
index 5265d09fc1c4..559fb9d3271f 100644
--- a/drivers/vdpa/Kconfig
+++ b/drivers/vdpa/Kconfig
@@ -11,8 +11,7 @@ if VDPA
config VDPA_SIM
tristate "vDPA device simulator core"
- depends on RUNTIME_TESTING_MENU && HAS_DMA
- select DMA_OPS
+ depends on RUNTIME_TESTING_MENU
select VHOST_RING
select IOMMU_IOVA
help
@@ -36,7 +35,12 @@ config VDPA_SIM_BLOCK
config VDPA_USER
tristate "VDUSE (vDPA Device in Userspace) support"
depends on EVENTFD && MMU && HAS_DMA
- select DMA_OPS
+ #
+ # This driver incorrectly tries to override the dma_ops. It should
+ # never have done that, but for now keep it working on architectures
+ # that use dma ops
+ #
+ depends on ARCH_HAS_DMA_OPS
select VHOST_IOTLB
select IOMMU_IOVA
help
diff --git a/drivers/vdpa/ifcvf/ifcvf_base.h b/drivers/vdpa/ifcvf/ifcvf_base.h
index 0f347717021a..aa36de361c10 100644
--- a/drivers/vdpa/ifcvf/ifcvf_base.h
+++ b/drivers/vdpa/ifcvf/ifcvf_base.h
@@ -112,15 +112,12 @@ void ifcvf_write_dev_config(struct ifcvf_hw *hw, u64 offset,
const void *src, int length);
u8 ifcvf_get_status(struct ifcvf_hw *hw);
void ifcvf_set_status(struct ifcvf_hw *hw, u8 status);
-void io_write64_twopart(u64 val, u32 *lo, u32 *hi);
void ifcvf_reset(struct ifcvf_hw *hw);
u64 ifcvf_get_dev_features(struct ifcvf_hw *hw);
u64 ifcvf_get_hw_features(struct ifcvf_hw *hw);
int ifcvf_verify_min_features(struct ifcvf_hw *hw, u64 features);
u16 ifcvf_get_vq_state(struct ifcvf_hw *hw, u16 qid);
int ifcvf_set_vq_state(struct ifcvf_hw *hw, u16 qid, u16 num);
-struct ifcvf_adapter *vf_to_adapter(struct ifcvf_hw *hw);
-int ifcvf_probed_virtio_net(struct ifcvf_hw *hw);
u32 ifcvf_get_config_size(struct ifcvf_hw *hw);
u16 ifcvf_set_vq_vector(struct ifcvf_hw *hw, u16 qid, int vector);
u16 ifcvf_set_config_vector(struct ifcvf_hw *hw, int vector);
diff --git a/drivers/vdpa/mlx5/core/mlx5_vdpa.h b/drivers/vdpa/mlx5/core/mlx5_vdpa.h
index 50aac8fe57ef..2cedf7e2dbc4 100644
--- a/drivers/vdpa/mlx5/core/mlx5_vdpa.h
+++ b/drivers/vdpa/mlx5/core/mlx5_vdpa.h
@@ -83,10 +83,28 @@ enum {
MLX5_VDPA_NUM_AS = 2
};
+struct mlx5_vdpa_mr_resources {
+ struct mlx5_vdpa_mr *mr[MLX5_VDPA_NUM_AS];
+ unsigned int group2asid[MLX5_VDPA_NUMVQ_GROUPS];
+
+ /* Pre-deletion mr list */
+ struct list_head mr_list_head;
+
+ /* Deferred mr list */
+ struct list_head mr_gc_list_head;
+ struct workqueue_struct *wq_gc;
+ struct delayed_work gc_dwork_ent;
+
+ struct mutex lock;
+
+ atomic_t shutdown;
+};
+
struct mlx5_vdpa_dev {
struct vdpa_device vdev;
struct mlx5_core_dev *mdev;
struct mlx5_vdpa_resources res;
+ struct mlx5_vdpa_mr_resources mres;
u64 mlx_features;
u64 actual_features;
@@ -95,14 +113,23 @@ struct mlx5_vdpa_dev {
u16 max_idx;
u32 generation;
- struct mlx5_vdpa_mr *mr[MLX5_VDPA_NUM_AS];
- struct list_head mr_list_head;
- /* serialize mr access */
- struct mutex mr_mtx;
struct mlx5_control_vq cvq;
struct workqueue_struct *wq;
- unsigned int group2asid[MLX5_VDPA_NUMVQ_GROUPS];
bool suspended;
+
+ struct mlx5_async_ctx async_ctx;
+};
+
+struct mlx5_vdpa_async_cmd {
+ int err;
+ struct mlx5_async_work cb_work;
+ struct completion cmd_done;
+
+ void *in;
+ size_t inlen;
+
+ void *out;
+ size_t outlen;
};
int mlx5_vdpa_create_tis(struct mlx5_vdpa_dev *mvdev, void *in, u32 *tisn);
@@ -121,7 +148,9 @@ int mlx5_vdpa_create_mkey(struct mlx5_vdpa_dev *mvdev, u32 *mkey, u32 *in,
int mlx5_vdpa_destroy_mkey(struct mlx5_vdpa_dev *mvdev, u32 mkey);
struct mlx5_vdpa_mr *mlx5_vdpa_create_mr(struct mlx5_vdpa_dev *mvdev,
struct vhost_iotlb *iotlb);
+int mlx5_vdpa_init_mr_resources(struct mlx5_vdpa_dev *mvdev);
void mlx5_vdpa_destroy_mr_resources(struct mlx5_vdpa_dev *mvdev);
+void mlx5_vdpa_clean_mrs(struct mlx5_vdpa_dev *mvdev);
void mlx5_vdpa_get_mr(struct mlx5_vdpa_dev *mvdev,
struct mlx5_vdpa_mr *mr);
void mlx5_vdpa_put_mr(struct mlx5_vdpa_dev *mvdev,
@@ -134,6 +163,14 @@ int mlx5_vdpa_update_cvq_iotlb(struct mlx5_vdpa_dev *mvdev,
unsigned int asid);
int mlx5_vdpa_create_dma_mr(struct mlx5_vdpa_dev *mvdev);
int mlx5_vdpa_reset_mr(struct mlx5_vdpa_dev *mvdev, unsigned int asid);
+int mlx5_vdpa_exec_async_cmds(struct mlx5_vdpa_dev *mvdev,
+ struct mlx5_vdpa_async_cmd *cmds,
+ int num_cmds);
+
+#define mlx5_vdpa_err(__dev, format, ...) \
+ dev_err((__dev)->mdev->device, "%s:%d:(pid %d) error: " format, __func__, __LINE__, \
+ current->pid, ##__VA_ARGS__)
+
#define mlx5_vdpa_warn(__dev, format, ...) \
dev_warn((__dev)->mdev->device, "%s:%d:(pid %d) warning: " format, __func__, __LINE__, \
diff --git a/drivers/vdpa/mlx5/core/mr.c b/drivers/vdpa/mlx5/core/mr.c
index 4758914ccf86..2dd21e0b399e 100644
--- a/drivers/vdpa/mlx5/core/mr.c
+++ b/drivers/vdpa/mlx5/core/mr.c
@@ -49,17 +49,23 @@ static void populate_mtts(struct mlx5_vdpa_direct_mr *mr, __be64 *mtt)
}
}
-static int create_direct_mr(struct mlx5_vdpa_dev *mvdev, struct mlx5_vdpa_direct_mr *mr)
-{
- int inlen;
+struct mlx5_create_mkey_mem {
+ u8 out[MLX5_ST_SZ_BYTES(create_mkey_out)];
+ u8 in[MLX5_ST_SZ_BYTES(create_mkey_in)];
+ __be64 mtt[];
+};
+
+struct mlx5_destroy_mkey_mem {
+ u8 out[MLX5_ST_SZ_BYTES(destroy_mkey_out)];
+ u8 in[MLX5_ST_SZ_BYTES(destroy_mkey_in)];
+};
+
+static void fill_create_direct_mr(struct mlx5_vdpa_dev *mvdev,
+ struct mlx5_vdpa_direct_mr *mr,
+ struct mlx5_create_mkey_mem *mem)
+{
+ void *in = &mem->in;
void *mkc;
- void *in;
- int err;
-
- inlen = MLX5_ST_SZ_BYTES(create_mkey_in) + roundup(MLX5_ST_SZ_BYTES(mtt) * mr->nsg, 16);
- in = kvzalloc(inlen, GFP_KERNEL);
- if (!in)
- return -ENOMEM;
MLX5_SET(create_mkey_in, in, uid, mvdev->res.uid);
mkc = MLX5_ADDR_OF(create_mkey_in, in, memory_key_mkey_entry);
@@ -76,18 +82,36 @@ static int create_direct_mr(struct mlx5_vdpa_dev *mvdev, struct mlx5_vdpa_direct
MLX5_SET(create_mkey_in, in, translations_octword_actual_size,
get_octo_len(mr->end - mr->start, mr->log_size));
populate_mtts(mr, MLX5_ADDR_OF(create_mkey_in, in, klm_pas_mtt));
- err = mlx5_vdpa_create_mkey(mvdev, &mr->mr, in, inlen);
- kvfree(in);
- if (err) {
- mlx5_vdpa_warn(mvdev, "Failed to create direct MR\n");
- return err;
- }
- return 0;
+ MLX5_SET(create_mkey_in, in, opcode, MLX5_CMD_OP_CREATE_MKEY);
+ MLX5_SET(create_mkey_in, in, uid, mvdev->res.uid);
+}
+
+static void create_direct_mr_end(struct mlx5_vdpa_dev *mvdev,
+ struct mlx5_vdpa_direct_mr *mr,
+ struct mlx5_create_mkey_mem *mem)
+{
+ u32 mkey_index = MLX5_GET(create_mkey_out, mem->out, mkey_index);
+
+ mr->mr = mlx5_idx_to_mkey(mkey_index);
+}
+
+static void fill_destroy_direct_mr(struct mlx5_vdpa_dev *mvdev,
+ struct mlx5_vdpa_direct_mr *mr,
+ struct mlx5_destroy_mkey_mem *mem)
+{
+ void *in = &mem->in;
+
+ MLX5_SET(destroy_mkey_in, in, uid, mvdev->res.uid);
+ MLX5_SET(destroy_mkey_in, in, opcode, MLX5_CMD_OP_DESTROY_MKEY);
+ MLX5_SET(destroy_mkey_in, in, mkey_index, mlx5_mkey_to_idx(mr->mr));
}
static void destroy_direct_mr(struct mlx5_vdpa_dev *mvdev, struct mlx5_vdpa_direct_mr *mr)
{
+ if (!mr->mr)
+ return;
+
mlx5_vdpa_destroy_mkey(mvdev, mr->mr);
}
@@ -179,6 +203,123 @@ static int klm_byte_size(int nklms)
return 16 * ALIGN(nklms, 4);
}
+#define MLX5_VDPA_MTT_ALIGN 16
+
+static int create_direct_keys(struct mlx5_vdpa_dev *mvdev, struct mlx5_vdpa_mr *mr)
+{
+ struct mlx5_vdpa_async_cmd *cmds;
+ struct mlx5_vdpa_direct_mr *dmr;
+ int err = 0;
+ int i = 0;
+
+ cmds = kvcalloc(mr->num_directs, sizeof(*cmds), GFP_KERNEL);
+ if (!cmds)
+ return -ENOMEM;
+
+ list_for_each_entry(dmr, &mr->head, list) {
+ struct mlx5_create_mkey_mem *cmd_mem;
+ int mttlen, mttcount;
+
+ mttlen = roundup(MLX5_ST_SZ_BYTES(mtt) * dmr->nsg, MLX5_VDPA_MTT_ALIGN);
+ mttcount = mttlen / sizeof(cmd_mem->mtt[0]);
+ cmd_mem = kvcalloc(1, struct_size(cmd_mem, mtt, mttcount), GFP_KERNEL);
+ if (!cmd_mem) {
+ err = -ENOMEM;
+ goto done;
+ }
+
+ cmds[i].out = cmd_mem->out;
+ cmds[i].outlen = sizeof(cmd_mem->out);
+ cmds[i].in = cmd_mem->in;
+ cmds[i].inlen = struct_size(cmd_mem, mtt, mttcount);
+
+ fill_create_direct_mr(mvdev, dmr, cmd_mem);
+
+ i++;
+ }
+
+ err = mlx5_vdpa_exec_async_cmds(mvdev, cmds, mr->num_directs);
+ if (err) {
+
+ mlx5_vdpa_err(mvdev, "error issuing MTT mkey creation for direct mrs: %d\n", err);
+ goto done;
+ }
+
+ i = 0;
+ list_for_each_entry(dmr, &mr->head, list) {
+ struct mlx5_vdpa_async_cmd *cmd = &cmds[i++];
+ struct mlx5_create_mkey_mem *cmd_mem;
+
+ cmd_mem = container_of(cmd->out, struct mlx5_create_mkey_mem, out);
+
+ if (!cmd->err) {
+ create_direct_mr_end(mvdev, dmr, cmd_mem);
+ } else {
+ err = err ? err : cmd->err;
+ mlx5_vdpa_err(mvdev, "error creating MTT mkey [0x%llx, 0x%llx]: %d\n",
+ dmr->start, dmr->end, cmd->err);
+ }
+ }
+
+done:
+ for (i = i-1; i >= 0; i--) {
+ struct mlx5_create_mkey_mem *cmd_mem;
+
+ cmd_mem = container_of(cmds[i].out, struct mlx5_create_mkey_mem, out);
+ kvfree(cmd_mem);
+ }
+
+ kvfree(cmds);
+ return err;
+}
+
+DEFINE_FREE(free_cmds, struct mlx5_vdpa_async_cmd *, kvfree(_T))
+DEFINE_FREE(free_cmd_mem, struct mlx5_destroy_mkey_mem *, kvfree(_T))
+
+static int destroy_direct_keys(struct mlx5_vdpa_dev *mvdev, struct mlx5_vdpa_mr *mr)
+{
+ struct mlx5_destroy_mkey_mem *cmd_mem __free(free_cmd_mem) = NULL;
+ struct mlx5_vdpa_async_cmd *cmds __free(free_cmds) = NULL;
+ struct mlx5_vdpa_direct_mr *dmr;
+ int err = 0;
+ int i = 0;
+
+ cmds = kvcalloc(mr->num_directs, sizeof(*cmds), GFP_KERNEL);
+ cmd_mem = kvcalloc(mr->num_directs, sizeof(*cmd_mem), GFP_KERNEL);
+ if (!cmds || !cmd_mem)
+ return -ENOMEM;
+
+ list_for_each_entry(dmr, &mr->head, list) {
+ cmds[i].out = cmd_mem[i].out;
+ cmds[i].outlen = sizeof(cmd_mem[i].out);
+ cmds[i].in = cmd_mem[i].in;
+ cmds[i].inlen = sizeof(cmd_mem[i].in);
+ fill_destroy_direct_mr(mvdev, dmr, &cmd_mem[i]);
+ i++;
+ }
+
+ err = mlx5_vdpa_exec_async_cmds(mvdev, cmds, mr->num_directs);
+ if (err) {
+
+ mlx5_vdpa_err(mvdev, "error issuing MTT mkey deletion for direct mrs: %d\n", err);
+ return err;
+ }
+
+ i = 0;
+ list_for_each_entry(dmr, &mr->head, list) {
+ struct mlx5_vdpa_async_cmd *cmd = &cmds[i++];
+
+ dmr->mr = 0;
+ if (cmd->err) {
+ err = err ? err : cmd->err;
+ mlx5_vdpa_err(mvdev, "error deleting MTT mkey [0x%llx, 0x%llx]: %d\n",
+ dmr->start, dmr->end, cmd->err);
+ }
+ }
+
+ return err;
+}
+
static int create_indirect_key(struct mlx5_vdpa_dev *mvdev, struct mlx5_vdpa_mr *mr)
{
int inlen;
@@ -279,14 +420,8 @@ done:
goto err_map;
}
- err = create_direct_mr(mvdev, mr);
- if (err)
- goto err_direct;
-
return 0;
-err_direct:
- dma_unmap_sg_attrs(dma, mr->sg_head.sgl, mr->nsg, DMA_BIDIRECTIONAL, 0);
err_map:
sg_free_table(&mr->sg_head);
return err;
@@ -401,6 +536,10 @@ static int create_user_mr(struct mlx5_vdpa_dev *mvdev,
if (err)
goto err_chain;
+ err = create_direct_keys(mvdev, mr);
+ if (err)
+ goto err_chain;
+
/* Create the memory key that defines the guests's address space. This
* memory key refers to the direct keys that contain the MTT
* translations
@@ -489,6 +628,7 @@ static void destroy_user_mr(struct mlx5_vdpa_dev *mvdev, struct mlx5_vdpa_mr *mr
struct mlx5_vdpa_direct_mr *n;
destroy_indirect_key(mvdev, mr);
+ destroy_direct_keys(mvdev, mr);
list_for_each_entry_safe_reverse(dmr, n, &mr->head, list) {
list_del_init(&dmr->list);
unmap_direct_mr(mvdev, dmr);
@@ -513,22 +653,58 @@ static void _mlx5_vdpa_destroy_mr(struct mlx5_vdpa_dev *mvdev, struct mlx5_vdpa_
kfree(mr);
}
+/* There can be multiple .set_map() operations in quick succession.
+ * This large delay is a simple way to prevent the MR cleanup from blocking
+ * .set_map() MR creation in this scenario.
+ */
+#define MLX5_VDPA_MR_GC_TRIGGER_MS 2000
+
+static void mlx5_vdpa_mr_gc_handler(struct work_struct *work)
+{
+ struct mlx5_vdpa_mr_resources *mres;
+ struct mlx5_vdpa_mr *mr, *tmp;
+ struct mlx5_vdpa_dev *mvdev;
+
+ mres = container_of(work, struct mlx5_vdpa_mr_resources, gc_dwork_ent.work);
+
+ if (atomic_read(&mres->shutdown)) {
+ mutex_lock(&mres->lock);
+ } else if (!mutex_trylock(&mres->lock)) {
+ queue_delayed_work(mres->wq_gc, &mres->gc_dwork_ent,
+ msecs_to_jiffies(MLX5_VDPA_MR_GC_TRIGGER_MS));
+ return;
+ }
+
+ mvdev = container_of(mres, struct mlx5_vdpa_dev, mres);
+
+ list_for_each_entry_safe(mr, tmp, &mres->mr_gc_list_head, mr_list) {
+ _mlx5_vdpa_destroy_mr(mvdev, mr);
+ }
+
+ mutex_unlock(&mres->lock);
+}
+
static void _mlx5_vdpa_put_mr(struct mlx5_vdpa_dev *mvdev,
struct mlx5_vdpa_mr *mr)
{
+ struct mlx5_vdpa_mr_resources *mres = &mvdev->mres;
+
if (!mr)
return;
- if (refcount_dec_and_test(&mr->refcount))
- _mlx5_vdpa_destroy_mr(mvdev, mr);
+ if (refcount_dec_and_test(&mr->refcount)) {
+ list_move_tail(&mr->mr_list, &mres->mr_gc_list_head);
+ queue_delayed_work(mres->wq_gc, &mres->gc_dwork_ent,
+ msecs_to_jiffies(MLX5_VDPA_MR_GC_TRIGGER_MS));
+ }
}
void mlx5_vdpa_put_mr(struct mlx5_vdpa_dev *mvdev,
struct mlx5_vdpa_mr *mr)
{
- mutex_lock(&mvdev->mr_mtx);
+ mutex_lock(&mvdev->mres.lock);
_mlx5_vdpa_put_mr(mvdev, mr);
- mutex_unlock(&mvdev->mr_mtx);
+ mutex_unlock(&mvdev->mres.lock);
}
static void _mlx5_vdpa_get_mr(struct mlx5_vdpa_dev *mvdev,
@@ -543,44 +719,47 @@ static void _mlx5_vdpa_get_mr(struct mlx5_vdpa_dev *mvdev,
void mlx5_vdpa_get_mr(struct mlx5_vdpa_dev *mvdev,
struct mlx5_vdpa_mr *mr)
{
- mutex_lock(&mvdev->mr_mtx);
+ mutex_lock(&mvdev->mres.lock);
_mlx5_vdpa_get_mr(mvdev, mr);
- mutex_unlock(&mvdev->mr_mtx);
+ mutex_unlock(&mvdev->mres.lock);
}
void mlx5_vdpa_update_mr(struct mlx5_vdpa_dev *mvdev,
struct mlx5_vdpa_mr *new_mr,
unsigned int asid)
{
- struct mlx5_vdpa_mr *old_mr = mvdev->mr[asid];
+ struct mlx5_vdpa_mr *old_mr = mvdev->mres.mr[asid];
- mutex_lock(&mvdev->mr_mtx);
+ mutex_lock(&mvdev->mres.lock);
_mlx5_vdpa_put_mr(mvdev, old_mr);
- mvdev->mr[asid] = new_mr;
+ mvdev->mres.mr[asid] = new_mr;
- mutex_unlock(&mvdev->mr_mtx);
+ mutex_unlock(&mvdev->mres.lock);
}
static void mlx5_vdpa_show_mr_leaks(struct mlx5_vdpa_dev *mvdev)
{
struct mlx5_vdpa_mr *mr;
- mutex_lock(&mvdev->mr_mtx);
+ mutex_lock(&mvdev->mres.lock);
- list_for_each_entry(mr, &mvdev->mr_list_head, mr_list) {
+ list_for_each_entry(mr, &mvdev->mres.mr_list_head, mr_list) {
mlx5_vdpa_warn(mvdev, "mkey still alive after resource delete: "
"mr: %p, mkey: 0x%x, refcount: %u\n",
mr, mr->mkey, refcount_read(&mr->refcount));
}
- mutex_unlock(&mvdev->mr_mtx);
+ mutex_unlock(&mvdev->mres.lock);
}
-void mlx5_vdpa_destroy_mr_resources(struct mlx5_vdpa_dev *mvdev)
+void mlx5_vdpa_clean_mrs(struct mlx5_vdpa_dev *mvdev)
{
+ if (!mvdev->res.valid)
+ return;
+
for (int i = 0; i < MLX5_VDPA_NUM_AS; i++)
mlx5_vdpa_update_mr(mvdev, NULL, i);
@@ -613,7 +792,7 @@ static int _mlx5_vdpa_create_mr(struct mlx5_vdpa_dev *mvdev,
if (err)
goto err_iotlb;
- list_add_tail(&mr->mr_list, &mvdev->mr_list_head);
+ list_add_tail(&mr->mr_list, &mvdev->mres.mr_list_head);
return 0;
@@ -639,9 +818,9 @@ struct mlx5_vdpa_mr *mlx5_vdpa_create_mr(struct mlx5_vdpa_dev *mvdev,
if (!mr)
return ERR_PTR(-ENOMEM);
- mutex_lock(&mvdev->mr_mtx);
+ mutex_lock(&mvdev->mres.lock);
err = _mlx5_vdpa_create_mr(mvdev, mr, iotlb);
- mutex_unlock(&mvdev->mr_mtx);
+ mutex_unlock(&mvdev->mres.lock);
if (err)
goto out_err;
@@ -661,7 +840,7 @@ int mlx5_vdpa_update_cvq_iotlb(struct mlx5_vdpa_dev *mvdev,
{
int err;
- if (mvdev->group2asid[MLX5_VDPA_CVQ_GROUP] != asid)
+ if (mvdev->mres.group2asid[MLX5_VDPA_CVQ_GROUP] != asid)
return 0;
spin_lock(&mvdev->cvq.iommu_lock);
@@ -703,3 +882,33 @@ int mlx5_vdpa_reset_mr(struct mlx5_vdpa_dev *mvdev, unsigned int asid)
return 0;
}
+
+int mlx5_vdpa_init_mr_resources(struct mlx5_vdpa_dev *mvdev)
+{
+ struct mlx5_vdpa_mr_resources *mres = &mvdev->mres;
+
+ mres->wq_gc = create_singlethread_workqueue("mlx5_vdpa_mr_gc");
+ if (!mres->wq_gc)
+ return -ENOMEM;
+
+ INIT_DELAYED_WORK(&mres->gc_dwork_ent, mlx5_vdpa_mr_gc_handler);
+
+ mutex_init(&mres->lock);
+
+ INIT_LIST_HEAD(&mres->mr_list_head);
+ INIT_LIST_HEAD(&mres->mr_gc_list_head);
+
+ return 0;
+}
+
+void mlx5_vdpa_destroy_mr_resources(struct mlx5_vdpa_dev *mvdev)
+{
+ struct mlx5_vdpa_mr_resources *mres = &mvdev->mres;
+
+ atomic_set(&mres->shutdown, 1);
+
+ flush_delayed_work(&mres->gc_dwork_ent);
+ destroy_workqueue(mres->wq_gc);
+ mres->wq_gc = NULL;
+ mutex_destroy(&mres->lock);
+}
diff --git a/drivers/vdpa/mlx5/core/resources.c b/drivers/vdpa/mlx5/core/resources.c
index 5c5a41b64bfc..aeae31d0cefa 100644
--- a/drivers/vdpa/mlx5/core/resources.c
+++ b/drivers/vdpa/mlx5/core/resources.c
@@ -256,7 +256,6 @@ int mlx5_vdpa_alloc_resources(struct mlx5_vdpa_dev *mvdev)
mlx5_vdpa_warn(mvdev, "resources already allocated\n");
return -EINVAL;
}
- mutex_init(&mvdev->mr_mtx);
res->uar = mlx5_get_uars_page(mdev);
if (IS_ERR(res->uar)) {
err = PTR_ERR(res->uar);
@@ -301,7 +300,6 @@ err_pd:
err_uctx:
mlx5_put_uars_page(mdev, res->uar);
err_uars:
- mutex_destroy(&mvdev->mr_mtx);
return err;
}
@@ -318,6 +316,78 @@ void mlx5_vdpa_free_resources(struct mlx5_vdpa_dev *mvdev)
dealloc_pd(mvdev, res->pdn, res->uid);
destroy_uctx(mvdev, res->uid);
mlx5_put_uars_page(mvdev->mdev, res->uar);
- mutex_destroy(&mvdev->mr_mtx);
res->valid = false;
}
+
+static void virtqueue_cmd_callback(int status, struct mlx5_async_work *context)
+{
+ struct mlx5_vdpa_async_cmd *cmd =
+ container_of(context, struct mlx5_vdpa_async_cmd, cb_work);
+
+ cmd->err = mlx5_cmd_check(context->ctx->dev, status, cmd->in, cmd->out);
+ complete(&cmd->cmd_done);
+}
+
+static int issue_async_cmd(struct mlx5_vdpa_dev *mvdev,
+ struct mlx5_vdpa_async_cmd *cmds,
+ int issued,
+ int *completed)
+
+{
+ struct mlx5_vdpa_async_cmd *cmd = &cmds[issued];
+ int err;
+
+retry:
+ err = mlx5_cmd_exec_cb(&mvdev->async_ctx,
+ cmd->in, cmd->inlen,
+ cmd->out, cmd->outlen,
+ virtqueue_cmd_callback,
+ &cmd->cb_work);
+ if (err == -EBUSY) {
+ if (*completed < issued) {
+ /* Throttled by own commands: wait for oldest completion. */
+ wait_for_completion(&cmds[*completed].cmd_done);
+ (*completed)++;
+
+ goto retry;
+ } else {
+ /* Throttled by external commands: switch to sync api. */
+ err = mlx5_cmd_exec(mvdev->mdev,
+ cmd->in, cmd->inlen,
+ cmd->out, cmd->outlen);
+ if (!err)
+ (*completed)++;
+ }
+ }
+
+ return err;
+}
+
+int mlx5_vdpa_exec_async_cmds(struct mlx5_vdpa_dev *mvdev,
+ struct mlx5_vdpa_async_cmd *cmds,
+ int num_cmds)
+{
+ int completed = 0;
+ int issued = 0;
+ int err = 0;
+
+ for (int i = 0; i < num_cmds; i++)
+ init_completion(&cmds[i].cmd_done);
+
+ while (issued < num_cmds) {
+
+ err = issue_async_cmd(mvdev, cmds, issued, &completed);
+ if (err) {
+ mlx5_vdpa_err(mvdev, "error issuing command %d of %d: %d\n",
+ issued, num_cmds, err);
+ break;
+ }
+
+ issued++;
+ }
+
+ while (completed < issued)
+ wait_for_completion(&cmds[completed++].cmd_done);
+
+ return err;
+}
diff --git a/drivers/vdpa/mlx5/net/mlx5_vnet.c b/drivers/vdpa/mlx5/net/mlx5_vnet.c
index fa78e8288ebb..dee019977716 100644
--- a/drivers/vdpa/mlx5/net/mlx5_vnet.c
+++ b/drivers/vdpa/mlx5/net/mlx5_vnet.c
@@ -941,11 +941,11 @@ static int create_virtqueue(struct mlx5_vdpa_net *ndev,
MLX5_SET64(virtio_q, vq_ctx, used_addr, mvq->device_addr);
MLX5_SET64(virtio_q, vq_ctx, available_addr, mvq->driver_addr);
- vq_mr = mvdev->mr[mvdev->group2asid[MLX5_VDPA_DATAVQ_GROUP]];
+ vq_mr = mvdev->mres.mr[mvdev->mres.group2asid[MLX5_VDPA_DATAVQ_GROUP]];
if (vq_mr)
MLX5_SET(virtio_q, vq_ctx, virtio_q_mkey, vq_mr->mkey);
- vq_desc_mr = mvdev->mr[mvdev->group2asid[MLX5_VDPA_DATAVQ_DESC_GROUP]];
+ vq_desc_mr = mvdev->mres.mr[mvdev->mres.group2asid[MLX5_VDPA_DATAVQ_DESC_GROUP]];
if (vq_desc_mr &&
MLX5_CAP_DEV_VDPA_EMULATION(mvdev->mdev, desc_group_mkey_supported))
MLX5_SET(virtio_q, vq_ctx, desc_group_mkey, vq_desc_mr->mkey);
@@ -953,11 +953,11 @@ static int create_virtqueue(struct mlx5_vdpa_net *ndev,
/* If there is no mr update, make sure that the existing ones are set
* modify to ready.
*/
- vq_mr = mvdev->mr[mvdev->group2asid[MLX5_VDPA_DATAVQ_GROUP]];
+ vq_mr = mvdev->mres.mr[mvdev->mres.group2asid[MLX5_VDPA_DATAVQ_GROUP]];
if (vq_mr)
mvq->modified_fields |= MLX5_VIRTQ_MODIFY_MASK_VIRTIO_Q_MKEY;
- vq_desc_mr = mvdev->mr[mvdev->group2asid[MLX5_VDPA_DATAVQ_DESC_GROUP]];
+ vq_desc_mr = mvdev->mres.mr[mvdev->mres.group2asid[MLX5_VDPA_DATAVQ_DESC_GROUP]];
if (vq_desc_mr)
mvq->modified_fields |= MLX5_VIRTQ_MODIFY_MASK_DESC_GROUP_MKEY;
}
@@ -1184,40 +1184,92 @@ struct mlx5_virtq_attr {
u16 used_index;
};
-static int query_virtqueue(struct mlx5_vdpa_net *ndev, struct mlx5_vdpa_virtqueue *mvq,
- struct mlx5_virtq_attr *attr)
-{
- int outlen = MLX5_ST_SZ_BYTES(query_virtio_net_q_out);
- u32 in[MLX5_ST_SZ_DW(query_virtio_net_q_in)] = {};
- void *out;
- void *obj_context;
- void *cmd_hdr;
- int err;
+struct mlx5_virtqueue_query_mem {
+ u8 in[MLX5_ST_SZ_BYTES(query_virtio_net_q_in)];
+ u8 out[MLX5_ST_SZ_BYTES(query_virtio_net_q_out)];
+};
- out = kzalloc(outlen, GFP_KERNEL);
- if (!out)
- return -ENOMEM;
+struct mlx5_virtqueue_modify_mem {
+ u8 in[MLX5_ST_SZ_BYTES(modify_virtio_net_q_in)];
+ u8 out[MLX5_ST_SZ_BYTES(modify_virtio_net_q_out)];
+};
- cmd_hdr = MLX5_ADDR_OF(query_virtio_net_q_in, in, general_obj_in_cmd_hdr);
+static void fill_query_virtqueue_cmd(struct mlx5_vdpa_net *ndev,
+ struct mlx5_vdpa_virtqueue *mvq,
+ struct mlx5_virtqueue_query_mem *cmd)
+{
+ void *cmd_hdr = MLX5_ADDR_OF(query_virtio_net_q_in, cmd->in, general_obj_in_cmd_hdr);
MLX5_SET(general_obj_in_cmd_hdr, cmd_hdr, opcode, MLX5_CMD_OP_QUERY_GENERAL_OBJECT);
MLX5_SET(general_obj_in_cmd_hdr, cmd_hdr, obj_type, MLX5_OBJ_TYPE_VIRTIO_NET_Q);
MLX5_SET(general_obj_in_cmd_hdr, cmd_hdr, obj_id, mvq->virtq_id);
MLX5_SET(general_obj_in_cmd_hdr, cmd_hdr, uid, ndev->mvdev.res.uid);
- err = mlx5_cmd_exec(ndev->mvdev.mdev, in, sizeof(in), out, outlen);
- if (err)
- goto err_cmd;
+}
+
+static void query_virtqueue_end(struct mlx5_vdpa_net *ndev,
+ struct mlx5_virtqueue_query_mem *cmd,
+ struct mlx5_virtq_attr *attr)
+{
+ void *obj_context = MLX5_ADDR_OF(query_virtio_net_q_out, cmd->out, obj_context);
- obj_context = MLX5_ADDR_OF(query_virtio_net_q_out, out, obj_context);
memset(attr, 0, sizeof(*attr));
attr->state = MLX5_GET(virtio_net_q_object, obj_context, state);
attr->available_index = MLX5_GET(virtio_net_q_object, obj_context, hw_available_index);
attr->used_index = MLX5_GET(virtio_net_q_object, obj_context, hw_used_index);
- kfree(out);
- return 0;
+}
-err_cmd:
- kfree(out);
+static int query_virtqueues(struct mlx5_vdpa_net *ndev,
+ int start_vq,
+ int num_vqs,
+ struct mlx5_virtq_attr *attrs)
+{
+ struct mlx5_vdpa_dev *mvdev = &ndev->mvdev;
+ struct mlx5_virtqueue_query_mem *cmd_mem;
+ struct mlx5_vdpa_async_cmd *cmds;
+ int err = 0;
+
+ WARN(start_vq + num_vqs > mvdev->max_vqs, "query vq range invalid [%d, %d), max_vqs: %u\n",
+ start_vq, start_vq + num_vqs, mvdev->max_vqs);
+
+ cmds = kvcalloc(num_vqs, sizeof(*cmds), GFP_KERNEL);
+ cmd_mem = kvcalloc(num_vqs, sizeof(*cmd_mem), GFP_KERNEL);
+ if (!cmds || !cmd_mem) {
+ err = -ENOMEM;
+ goto done;
+ }
+
+ for (int i = 0; i < num_vqs; i++) {
+ cmds[i].in = &cmd_mem[i].in;
+ cmds[i].inlen = sizeof(cmd_mem[i].in);
+ cmds[i].out = &cmd_mem[i].out;
+ cmds[i].outlen = sizeof(cmd_mem[i].out);
+ fill_query_virtqueue_cmd(ndev, &ndev->vqs[start_vq + i], &cmd_mem[i]);
+ }
+
+ err = mlx5_vdpa_exec_async_cmds(&ndev->mvdev, cmds, num_vqs);
+ if (err) {
+ mlx5_vdpa_err(mvdev, "error issuing query cmd for vq range [%d, %d): %d\n",
+ start_vq, start_vq + num_vqs, err);
+ goto done;
+ }
+
+ for (int i = 0; i < num_vqs; i++) {
+ struct mlx5_vdpa_async_cmd *cmd = &cmds[i];
+ int vq_idx = start_vq + i;
+
+ if (cmd->err) {
+ mlx5_vdpa_err(mvdev, "query vq %d failed, err: %d\n", vq_idx, err);
+ if (!err)
+ err = cmd->err;
+ continue;
+ }
+
+ query_virtqueue_end(ndev, &cmd_mem[i], &attrs[i]);
+ }
+
+done:
+ kvfree(cmd_mem);
+ kvfree(cmds);
return err;
}
@@ -1251,51 +1303,30 @@ static bool modifiable_virtqueue_fields(struct mlx5_vdpa_virtqueue *mvq)
return true;
}
-static int modify_virtqueue(struct mlx5_vdpa_net *ndev,
- struct mlx5_vdpa_virtqueue *mvq,
- int state)
+static void fill_modify_virtqueue_cmd(struct mlx5_vdpa_net *ndev,
+ struct mlx5_vdpa_virtqueue *mvq,
+ int state,
+ struct mlx5_virtqueue_modify_mem *cmd)
{
- int inlen = MLX5_ST_SZ_BYTES(modify_virtio_net_q_in);
- u32 out[MLX5_ST_SZ_DW(modify_virtio_net_q_out)] = {};
struct mlx5_vdpa_dev *mvdev = &ndev->mvdev;
struct mlx5_vdpa_mr *desc_mr = NULL;
struct mlx5_vdpa_mr *vq_mr = NULL;
- bool state_change = false;
void *obj_context;
void *cmd_hdr;
void *vq_ctx;
- void *in;
- int err;
- if (mvq->fw_state == MLX5_VIRTIO_NET_Q_OBJECT_NONE)
- return 0;
-
- if (!modifiable_virtqueue_fields(mvq))
- return -EINVAL;
-
- in = kzalloc(inlen, GFP_KERNEL);
- if (!in)
- return -ENOMEM;
-
- cmd_hdr = MLX5_ADDR_OF(modify_virtio_net_q_in, in, general_obj_in_cmd_hdr);
+ cmd_hdr = MLX5_ADDR_OF(modify_virtio_net_q_in, cmd->in, general_obj_in_cmd_hdr);
MLX5_SET(general_obj_in_cmd_hdr, cmd_hdr, opcode, MLX5_CMD_OP_MODIFY_GENERAL_OBJECT);
MLX5_SET(general_obj_in_cmd_hdr, cmd_hdr, obj_type, MLX5_OBJ_TYPE_VIRTIO_NET_Q);
MLX5_SET(general_obj_in_cmd_hdr, cmd_hdr, obj_id, mvq->virtq_id);
MLX5_SET(general_obj_in_cmd_hdr, cmd_hdr, uid, ndev->mvdev.res.uid);
- obj_context = MLX5_ADDR_OF(modify_virtio_net_q_in, in, obj_context);
+ obj_context = MLX5_ADDR_OF(modify_virtio_net_q_in, cmd->in, obj_context);
vq_ctx = MLX5_ADDR_OF(virtio_net_q_object, obj_context, virtio_q_context);
- if (mvq->modified_fields & MLX5_VIRTQ_MODIFY_MASK_STATE) {
- if (!is_valid_state_change(mvq->fw_state, state, is_resumable(ndev))) {
- err = -EINVAL;
- goto done;
- }
-
+ if (mvq->modified_fields & MLX5_VIRTQ_MODIFY_MASK_STATE)
MLX5_SET(virtio_net_q_object, obj_context, state, state);
- state_change = true;
- }
if (mvq->modified_fields & MLX5_VIRTQ_MODIFY_MASK_VIRTIO_Q_ADDRS) {
MLX5_SET64(virtio_q, vq_ctx, desc_addr, mvq->desc_addr);
@@ -1323,7 +1354,7 @@ static int modify_virtqueue(struct mlx5_vdpa_net *ndev,
}
if (mvq->modified_fields & MLX5_VIRTQ_MODIFY_MASK_VIRTIO_Q_MKEY) {
- vq_mr = mvdev->mr[mvdev->group2asid[MLX5_VDPA_DATAVQ_GROUP]];
+ vq_mr = mvdev->mres.mr[mvdev->mres.group2asid[MLX5_VDPA_DATAVQ_GROUP]];
if (vq_mr)
MLX5_SET(virtio_q, vq_ctx, virtio_q_mkey, vq_mr->mkey);
@@ -1332,7 +1363,7 @@ static int modify_virtqueue(struct mlx5_vdpa_net *ndev,
}
if (mvq->modified_fields & MLX5_VIRTQ_MODIFY_MASK_DESC_GROUP_MKEY) {
- desc_mr = mvdev->mr[mvdev->group2asid[MLX5_VDPA_DATAVQ_DESC_GROUP]];
+ desc_mr = mvdev->mres.mr[mvdev->mres.group2asid[MLX5_VDPA_DATAVQ_DESC_GROUP]];
if (desc_mr && MLX5_CAP_DEV_VDPA_EMULATION(mvdev->mdev, desc_group_mkey_supported))
MLX5_SET(virtio_q, vq_ctx, desc_group_mkey, desc_mr->mkey);
@@ -1341,38 +1372,36 @@ static int modify_virtqueue(struct mlx5_vdpa_net *ndev,
}
MLX5_SET64(virtio_net_q_object, obj_context, modify_field_select, mvq->modified_fields);
- err = mlx5_cmd_exec(ndev->mvdev.mdev, in, inlen, out, sizeof(out));
- if (err)
- goto done;
+}
- if (state_change)
- mvq->fw_state = state;
+static void modify_virtqueue_end(struct mlx5_vdpa_net *ndev,
+ struct mlx5_vdpa_virtqueue *mvq,
+ int state)
+{
+ struct mlx5_vdpa_dev *mvdev = &ndev->mvdev;
if (mvq->modified_fields & MLX5_VIRTQ_MODIFY_MASK_VIRTIO_Q_MKEY) {
+ unsigned int asid = mvdev->mres.group2asid[MLX5_VDPA_DATAVQ_GROUP];
+ struct mlx5_vdpa_mr *vq_mr = mvdev->mres.mr[asid];
+
mlx5_vdpa_put_mr(mvdev, mvq->vq_mr);
mlx5_vdpa_get_mr(mvdev, vq_mr);
mvq->vq_mr = vq_mr;
}
if (mvq->modified_fields & MLX5_VIRTQ_MODIFY_MASK_DESC_GROUP_MKEY) {
+ unsigned int asid = mvdev->mres.group2asid[MLX5_VDPA_DATAVQ_DESC_GROUP];
+ struct mlx5_vdpa_mr *desc_mr = mvdev->mres.mr[asid];
+
mlx5_vdpa_put_mr(mvdev, mvq->desc_mr);
mlx5_vdpa_get_mr(mvdev, desc_mr);
mvq->desc_mr = desc_mr;
}
- mvq->modified_fields = 0;
-
-done:
- kfree(in);
- return err;
-}
+ if (mvq->modified_fields & MLX5_VIRTQ_MODIFY_MASK_STATE)
+ mvq->fw_state = state;
-static int modify_virtqueue_state(struct mlx5_vdpa_net *ndev,
- struct mlx5_vdpa_virtqueue *mvq,
- unsigned int state)
-{
- mvq->modified_fields |= MLX5_VIRTQ_MODIFY_MASK_STATE;
- return modify_virtqueue(ndev, mvq, state);
+ mvq->modified_fields = 0;
}
static int counter_set_alloc(struct mlx5_vdpa_net *ndev, struct mlx5_vdpa_virtqueue *mvq)
@@ -1525,53 +1554,136 @@ err_fwqp:
return err;
}
-static int suspend_vq(struct mlx5_vdpa_net *ndev, struct mlx5_vdpa_virtqueue *mvq)
+static int modify_virtqueues(struct mlx5_vdpa_net *ndev, int start_vq, int num_vqs, int state)
{
- struct mlx5_virtq_attr attr;
- int err;
+ struct mlx5_vdpa_dev *mvdev = &ndev->mvdev;
+ struct mlx5_virtqueue_modify_mem *cmd_mem;
+ struct mlx5_vdpa_async_cmd *cmds;
+ int err = 0;
- if (!mvq->initialized)
- return 0;
+ WARN(start_vq + num_vqs > mvdev->max_vqs, "modify vq range invalid [%d, %d), max_vqs: %u\n",
+ start_vq, start_vq + num_vqs, mvdev->max_vqs);
- if (mvq->fw_state != MLX5_VIRTIO_NET_Q_OBJECT_STATE_RDY)
- return 0;
+ cmds = kvcalloc(num_vqs, sizeof(*cmds), GFP_KERNEL);
+ cmd_mem = kvcalloc(num_vqs, sizeof(*cmd_mem), GFP_KERNEL);
+ if (!cmds || !cmd_mem) {
+ err = -ENOMEM;
+ goto done;
+ }
- err = modify_virtqueue_state(ndev, mvq, MLX5_VIRTIO_NET_Q_OBJECT_STATE_SUSPEND);
- if (err) {
- mlx5_vdpa_warn(&ndev->mvdev, "modify to suspend failed, err: %d\n", err);
- return err;
+ for (int i = 0; i < num_vqs; i++) {
+ struct mlx5_vdpa_async_cmd *cmd = &cmds[i];
+ struct mlx5_vdpa_virtqueue *mvq;
+ int vq_idx = start_vq + i;
+
+ mvq = &ndev->vqs[vq_idx];
+
+ if (!modifiable_virtqueue_fields(mvq)) {
+ err = -EINVAL;
+ goto done;
+ }
+
+ if (mvq->fw_state != state) {
+ if (!is_valid_state_change(mvq->fw_state, state, is_resumable(ndev))) {
+ err = -EINVAL;
+ goto done;
+ }
+
+ mvq->modified_fields |= MLX5_VIRTQ_MODIFY_MASK_STATE;
+ }
+
+ cmd->in = &cmd_mem[i].in;
+ cmd->inlen = sizeof(cmd_mem[i].in);
+ cmd->out = &cmd_mem[i].out;
+ cmd->outlen = sizeof(cmd_mem[i].out);
+ fill_modify_virtqueue_cmd(ndev, mvq, state, &cmd_mem[i]);
}
- err = query_virtqueue(ndev, mvq, &attr);
+ err = mlx5_vdpa_exec_async_cmds(&ndev->mvdev, cmds, num_vqs);
if (err) {
- mlx5_vdpa_warn(&ndev->mvdev, "failed to query virtqueue, err: %d\n", err);
- return err;
+ mlx5_vdpa_err(mvdev, "error issuing modify cmd for vq range [%d, %d)\n",
+ start_vq, start_vq + num_vqs);
+ goto done;
}
- mvq->avail_idx = attr.available_index;
- mvq->used_idx = attr.used_index;
+ for (int i = 0; i < num_vqs; i++) {
+ struct mlx5_vdpa_async_cmd *cmd = &cmds[i];
+ struct mlx5_vdpa_virtqueue *mvq;
+ int vq_idx = start_vq + i;
- return 0;
+ mvq = &ndev->vqs[vq_idx];
+
+ if (cmd->err) {
+ mlx5_vdpa_err(mvdev, "modify vq %d failed, state: %d -> %d, err: %d\n",
+ vq_idx, mvq->fw_state, state, err);
+ if (!err)
+ err = cmd->err;
+ continue;
+ }
+
+ modify_virtqueue_end(ndev, mvq, state);
+ }
+
+done:
+ kvfree(cmd_mem);
+ kvfree(cmds);
+ return err;
}
-static int suspend_vqs(struct mlx5_vdpa_net *ndev)
+static int suspend_vqs(struct mlx5_vdpa_net *ndev, int start_vq, int num_vqs)
{
- int err = 0;
- int i;
+ struct mlx5_vdpa_virtqueue *mvq;
+ struct mlx5_virtq_attr *attrs;
+ int vq_idx, i;
+ int err;
+
+ if (start_vq >= ndev->cur_num_vqs)
+ return -EINVAL;
+
+ mvq = &ndev->vqs[start_vq];
+ if (!mvq->initialized)
+ return 0;
+
+ if (mvq->fw_state != MLX5_VIRTIO_NET_Q_OBJECT_STATE_RDY)
+ return 0;
+
+ err = modify_virtqueues(ndev, start_vq, num_vqs, MLX5_VIRTIO_NET_Q_OBJECT_STATE_SUSPEND);
+ if (err)
+ return err;
+
+ attrs = kcalloc(num_vqs, sizeof(struct mlx5_virtq_attr), GFP_KERNEL);
+ if (!attrs)
+ return -ENOMEM;
- for (i = 0; i < ndev->cur_num_vqs; i++) {
- int local_err = suspend_vq(ndev, &ndev->vqs[i]);
+ err = query_virtqueues(ndev, start_vq, num_vqs, attrs);
+ if (err)
+ goto done;
- err = local_err ? local_err : err;
+ for (i = 0, vq_idx = start_vq; i < num_vqs; i++, vq_idx++) {
+ mvq = &ndev->vqs[vq_idx];
+ mvq->avail_idx = attrs[i].available_index;
+ mvq->used_idx = attrs[i].used_index;
}
+done:
+ kfree(attrs);
return err;
}
-static int resume_vq(struct mlx5_vdpa_net *ndev, struct mlx5_vdpa_virtqueue *mvq)
+static int suspend_vq(struct mlx5_vdpa_net *ndev, struct mlx5_vdpa_virtqueue *mvq)
+{
+ return suspend_vqs(ndev, mvq->index, 1);
+}
+
+static int resume_vqs(struct mlx5_vdpa_net *ndev, int start_vq, int num_vqs)
{
+ struct mlx5_vdpa_virtqueue *mvq;
int err;
+ if (start_vq >= ndev->mvdev.max_vqs)
+ return -EINVAL;
+
+ mvq = &ndev->vqs[start_vq];
if (!mvq->initialized)
return 0;
@@ -1583,13 +1695,9 @@ static int resume_vq(struct mlx5_vdpa_net *ndev, struct mlx5_vdpa_virtqueue *mvq
/* Due to a FW quirk we need to modify the VQ fields first then change state.
* This should be fixed soon. After that, a single command can be used.
*/
- err = modify_virtqueue(ndev, mvq, 0);
- if (err) {
- mlx5_vdpa_warn(&ndev->mvdev,
- "modify vq properties failed for vq %u, err: %d\n",
- mvq->index, err);
+ err = modify_virtqueues(ndev, start_vq, num_vqs, mvq->fw_state);
+ if (err)
return err;
- }
break;
case MLX5_VIRTIO_NET_Q_OBJECT_STATE_SUSPEND:
if (!is_resumable(ndev)) {
@@ -1600,30 +1708,17 @@ static int resume_vq(struct mlx5_vdpa_net *ndev, struct mlx5_vdpa_virtqueue *mvq
case MLX5_VIRTIO_NET_Q_OBJECT_STATE_RDY:
return 0;
default:
- mlx5_vdpa_warn(&ndev->mvdev, "resume vq %u called from bad state %d\n",
+ mlx5_vdpa_err(&ndev->mvdev, "resume vq %u called from bad state %d\n",
mvq->index, mvq->fw_state);
return -EINVAL;
}
- err = modify_virtqueue_state(ndev, mvq, MLX5_VIRTIO_NET_Q_OBJECT_STATE_RDY);
- if (err)
- mlx5_vdpa_warn(&ndev->mvdev, "modify to resume failed for vq %u, err: %d\n",
- mvq->index, err);
-
- return err;
+ return modify_virtqueues(ndev, start_vq, num_vqs, MLX5_VIRTIO_NET_Q_OBJECT_STATE_RDY);
}
-static int resume_vqs(struct mlx5_vdpa_net *ndev)
+static int resume_vq(struct mlx5_vdpa_net *ndev, struct mlx5_vdpa_virtqueue *mvq)
{
- int err = 0;
-
- for (int i = 0; i < ndev->cur_num_vqs; i++) {
- int local_err = resume_vq(ndev, &ndev->vqs[i]);
-
- err = local_err ? local_err : err;
- }
-
- return err;
+ return resume_vqs(ndev, mvq->index, 1);
}
static void teardown_vq(struct mlx5_vdpa_net *ndev, struct mlx5_vdpa_virtqueue *mvq)
@@ -2002,13 +2097,13 @@ static int setup_steering(struct mlx5_vdpa_net *ndev)
ns = mlx5_get_flow_namespace(ndev->mvdev.mdev, MLX5_FLOW_NAMESPACE_BYPASS);
if (!ns) {
- mlx5_vdpa_warn(&ndev->mvdev, "failed to get flow namespace\n");
+ mlx5_vdpa_err(&ndev->mvdev, "failed to get flow namespace\n");
return -EOPNOTSUPP;
}
ndev->rxft = mlx5_create_auto_grouped_flow_table(ns, &ft_attr);
if (IS_ERR(ndev->rxft)) {
- mlx5_vdpa_warn(&ndev->mvdev, "failed to create flow table\n");
+ mlx5_vdpa_err(&ndev->mvdev, "failed to create flow table\n");
return PTR_ERR(ndev->rxft);
}
mlx5_vdpa_add_rx_flow_table(ndev);
@@ -2124,45 +2219,48 @@ static virtio_net_ctrl_ack handle_ctrl_mac(struct mlx5_vdpa_dev *mvdev, u8 cmd)
static int change_num_qps(struct mlx5_vdpa_dev *mvdev, int newqps)
{
struct mlx5_vdpa_net *ndev = to_mlx5_vdpa_ndev(mvdev);
- int cur_qps = ndev->cur_num_vqs / 2;
+ int cur_vqs = ndev->cur_num_vqs;
+ int new_vqs = newqps * 2;
int err;
int i;
- if (cur_qps > newqps) {
- err = modify_rqt(ndev, 2 * newqps);
+ if (cur_vqs > new_vqs) {
+ err = modify_rqt(ndev, new_vqs);
if (err)
return err;
- for (i = ndev->cur_num_vqs - 1; i >= 2 * newqps; i--) {
- struct mlx5_vdpa_virtqueue *mvq = &ndev->vqs[i];
-
- if (is_resumable(ndev))
- suspend_vq(ndev, mvq);
- else
- teardown_vq(ndev, mvq);
+ if (is_resumable(ndev)) {
+ suspend_vqs(ndev, new_vqs, cur_vqs - new_vqs);
+ } else {
+ for (i = new_vqs; i < cur_vqs; i++)
+ teardown_vq(ndev, &ndev->vqs[i]);
}
- ndev->cur_num_vqs = 2 * newqps;
+ ndev->cur_num_vqs = new_vqs;
} else {
- ndev->cur_num_vqs = 2 * newqps;
- for (i = cur_qps * 2; i < 2 * newqps; i++) {
- struct mlx5_vdpa_virtqueue *mvq = &ndev->vqs[i];
+ ndev->cur_num_vqs = new_vqs;
- err = mvq->initialized ? resume_vq(ndev, mvq) : setup_vq(ndev, mvq, true);
+ for (i = cur_vqs; i < new_vqs; i++) {
+ err = setup_vq(ndev, &ndev->vqs[i], false);
if (err)
goto clean_added;
}
- err = modify_rqt(ndev, 2 * newqps);
+
+ err = resume_vqs(ndev, cur_vqs, new_vqs - cur_vqs);
+ if (err)
+ goto clean_added;
+
+ err = modify_rqt(ndev, new_vqs);
if (err)
goto clean_added;
}
return 0;
clean_added:
- for (--i; i >= 2 * cur_qps; --i)
+ for (--i; i >= cur_vqs; --i)
teardown_vq(ndev, &ndev->vqs[i]);
- ndev->cur_num_vqs = 2 * cur_qps;
+ ndev->cur_num_vqs = cur_vqs;
return err;
}
@@ -2528,9 +2626,9 @@ static int mlx5_vdpa_get_vq_state(struct vdpa_device *vdev, u16 idx, struct vdpa
return 0;
}
- err = query_virtqueue(ndev, mvq, &attr);
+ err = query_virtqueues(ndev, mvq->index, 1, &attr);
if (err) {
- mlx5_vdpa_warn(mvdev, "failed to query virtqueue\n");
+ mlx5_vdpa_err(mvdev, "failed to query virtqueue\n");
return err;
}
state->split.avail_index = attr.used_index;
@@ -2755,6 +2853,9 @@ static int event_handler(struct notifier_block *nb, unsigned long event, void *p
struct mlx5_eqe *eqe = param;
int ret = NOTIFY_DONE;
+ if (ndev->mvdev.suspended)
+ return NOTIFY_DONE;
+
if (event == MLX5_EVENT_TYPE_PORT_CHANGE) {
switch (eqe->sub_type) {
case MLX5_PORT_CHANGE_SUBTYPE_DOWN:
@@ -2879,7 +2980,7 @@ static int save_channel_info(struct mlx5_vdpa_net *ndev, struct mlx5_vdpa_virtqu
int err;
if (mvq->initialized) {
- err = query_virtqueue(ndev, mvq, &attr);
+ err = query_virtqueues(ndev, mvq->index, 1, &attr);
if (err)
return err;
}
@@ -2948,7 +3049,7 @@ static int mlx5_vdpa_change_map(struct mlx5_vdpa_dev *mvdev,
bool teardown = !is_resumable(ndev);
int err;
- suspend_vqs(ndev);
+ suspend_vqs(ndev, 0, ndev->cur_num_vqs);
if (teardown) {
err = save_channels_info(ndev);
if (err)
@@ -2973,7 +3074,7 @@ static int mlx5_vdpa_change_map(struct mlx5_vdpa_dev *mvdev,
return err;
}
- resume_vqs(ndev);
+ resume_vqs(ndev, 0, ndev->cur_num_vqs);
return 0;
}
@@ -3097,7 +3198,7 @@ static void mlx5_vdpa_set_status(struct vdpa_device *vdev, u8 status)
teardown_vq_resources(ndev);
if (ndev->setup) {
- err = resume_vqs(ndev);
+ err = resume_vqs(ndev, 0, ndev->cur_num_vqs);
if (err) {
mlx5_vdpa_warn(mvdev, "failed to resume VQs\n");
goto err_driver;
@@ -3122,7 +3223,7 @@ static void mlx5_vdpa_set_status(struct vdpa_device *vdev, u8 status)
err_driver:
unregister_link_notifier(ndev);
err_setup:
- mlx5_vdpa_destroy_mr_resources(&ndev->mvdev);
+ mlx5_vdpa_clean_mrs(&ndev->mvdev);
ndev->mvdev.status |= VIRTIO_CONFIG_S_FAILED;
err_clear:
up_write(&ndev->reslock);
@@ -3134,7 +3235,7 @@ static void init_group_to_asid_map(struct mlx5_vdpa_dev *mvdev)
/* default mapping all groups are mapped to asid 0 */
for (i = 0; i < MLX5_VDPA_NUMVQ_GROUPS; i++)
- mvdev->group2asid[i] = 0;
+ mvdev->mres.group2asid[i] = 0;
}
static bool needs_vqs_reset(const struct mlx5_vdpa_dev *mvdev)
@@ -3174,7 +3275,7 @@ static int mlx5_vdpa_compat_reset(struct vdpa_device *vdev, u32 flags)
}
if (flags & VDPA_RESET_F_CLEAN_MAP)
- mlx5_vdpa_destroy_mr_resources(&ndev->mvdev);
+ mlx5_vdpa_clean_mrs(&ndev->mvdev);
ndev->mvdev.status = 0;
ndev->mvdev.suspended = false;
ndev->cur_num_vqs = MLX5V_DEFAULT_VQ_COUNT;
@@ -3189,7 +3290,7 @@ static int mlx5_vdpa_compat_reset(struct vdpa_device *vdev, u32 flags)
if ((flags & VDPA_RESET_F_CLEAN_MAP) &&
MLX5_CAP_GEN(mvdev->mdev, umem_uid_0)) {
if (mlx5_vdpa_create_dma_mr(mvdev))
- mlx5_vdpa_warn(mvdev, "create MR failed\n");
+ mlx5_vdpa_err(mvdev, "create MR failed\n");
}
if (vq_reset)
setup_vq_resources(ndev, false);
@@ -3244,7 +3345,7 @@ static int set_map_data(struct mlx5_vdpa_dev *mvdev, struct vhost_iotlb *iotlb,
new_mr = mlx5_vdpa_create_mr(mvdev, iotlb);
if (IS_ERR(new_mr)) {
err = PTR_ERR(new_mr);
- mlx5_vdpa_warn(mvdev, "create map failed(%d)\n", err);
+ mlx5_vdpa_err(mvdev, "create map failed(%d)\n", err);
return err;
}
} else {
@@ -3252,12 +3353,12 @@ static int set_map_data(struct mlx5_vdpa_dev *mvdev, struct vhost_iotlb *iotlb,
new_mr = NULL;
}
- if (!mvdev->mr[asid]) {
+ if (!mvdev->mres.mr[asid]) {
mlx5_vdpa_update_mr(mvdev, new_mr, asid);
} else {
err = mlx5_vdpa_change_map(mvdev, new_mr, asid);
if (err) {
- mlx5_vdpa_warn(mvdev, "change map failed(%d)\n", err);
+ mlx5_vdpa_err(mvdev, "change map failed(%d)\n", err);
goto out_err;
}
}
@@ -3332,7 +3433,10 @@ static void mlx5_vdpa_free(struct vdpa_device *vdev)
ndev = to_mlx5_vdpa_ndev(mvdev);
free_fixed_resources(ndev);
- mlx5_vdpa_destroy_mr_resources(mvdev);
+ mlx5_vdpa_clean_mrs(mvdev);
+ mlx5_vdpa_destroy_mr_resources(&ndev->mvdev);
+ mlx5_cmd_cleanup_async_ctx(&mvdev->async_ctx);
+
if (!is_zero_ether_addr(ndev->config.mac)) {
pfmdev = pci_get_drvdata(pci_physfn(mvdev->mdev->pdev));
mlx5_mpfs_del_mac(pfmdev, ndev->config.mac);
@@ -3500,8 +3604,7 @@ static int mlx5_vdpa_suspend(struct vdpa_device *vdev)
mlx5_vdpa_info(mvdev, "suspending device\n");
down_write(&ndev->reslock);
- unregister_link_notifier(ndev);
- err = suspend_vqs(ndev);
+ err = suspend_vqs(ndev, 0, ndev->cur_num_vqs);
mlx5_vdpa_cvq_suspend(mvdev);
mvdev->suspended = true;
up_write(&ndev->reslock);
@@ -3521,8 +3624,8 @@ static int mlx5_vdpa_resume(struct vdpa_device *vdev)
down_write(&ndev->reslock);
mvdev->suspended = false;
- err = resume_vqs(ndev);
- register_link_notifier(ndev);
+ err = resume_vqs(ndev, 0, ndev->cur_num_vqs);
+ queue_link_work(ndev);
up_write(&ndev->reslock);
return err;
@@ -3537,12 +3640,12 @@ static int mlx5_set_group_asid(struct vdpa_device *vdev, u32 group,
if (group >= MLX5_VDPA_NUMVQ_GROUPS)
return -EINVAL;
- mvdev->group2asid[group] = asid;
+ mvdev->mres.group2asid[group] = asid;
- mutex_lock(&mvdev->mr_mtx);
- if (group == MLX5_VDPA_CVQ_GROUP && mvdev->mr[asid])
- err = mlx5_vdpa_update_cvq_iotlb(mvdev, mvdev->mr[asid]->iotlb, asid);
- mutex_unlock(&mvdev->mr_mtx);
+ mutex_lock(&mvdev->mres.lock);
+ if (group == MLX5_VDPA_CVQ_GROUP && mvdev->mres.mr[asid])
+ err = mlx5_vdpa_update_cvq_iotlb(mvdev, mvdev->mres.mr[asid]->iotlb, asid);
+ mutex_unlock(&mvdev->mres.lock);
return err;
}
@@ -3854,18 +3957,22 @@ static int mlx5_vdpa_dev_add(struct vdpa_mgmt_dev *v_mdev, const char *name,
ndev->rqt_size = 1;
}
+ mlx5_cmd_init_async_ctx(mdev, &mvdev->async_ctx);
+
ndev->mvdev.mlx_features = device_features;
mvdev->vdev.dma_dev = &mdev->pdev->dev;
err = mlx5_vdpa_alloc_resources(&ndev->mvdev);
if (err)
goto err_mpfs;
- INIT_LIST_HEAD(&mvdev->mr_list_head);
+ err = mlx5_vdpa_init_mr_resources(mvdev);
+ if (err)
+ goto err_res;
if (MLX5_CAP_GEN(mvdev->mdev, umem_uid_0)) {
err = mlx5_vdpa_create_dma_mr(mvdev);
if (err)
- goto err_res;
+ goto err_mr_res;
}
err = alloc_fixed_resources(ndev);
@@ -3906,6 +4013,8 @@ err_reg:
err_res2:
free_fixed_resources(ndev);
err_mr:
+ mlx5_vdpa_clean_mrs(mvdev);
+err_mr_res:
mlx5_vdpa_destroy_mr_resources(mvdev);
err_res:
mlx5_vdpa_free_resources(&ndev->mvdev);
@@ -3937,9 +4046,37 @@ static void mlx5_vdpa_dev_del(struct vdpa_mgmt_dev *v_mdev, struct vdpa_device *
mgtdev->ndev = NULL;
}
+static int mlx5_vdpa_set_attr(struct vdpa_mgmt_dev *v_mdev, struct vdpa_device *dev,
+ const struct vdpa_dev_set_config *add_config)
+{
+ struct virtio_net_config *config;
+ struct mlx5_core_dev *pfmdev;
+ struct mlx5_vdpa_dev *mvdev;
+ struct mlx5_vdpa_net *ndev;
+ struct mlx5_core_dev *mdev;
+ int err = -EOPNOTSUPP;
+
+ mvdev = to_mvdev(dev);
+ ndev = to_mlx5_vdpa_ndev(mvdev);
+ mdev = mvdev->mdev;
+ config = &ndev->config;
+
+ down_write(&ndev->reslock);
+ if (add_config->mask & (1 << VDPA_ATTR_DEV_NET_CFG_MACADDR)) {
+ pfmdev = pci_get_drvdata(pci_physfn(mdev->pdev));
+ err = mlx5_mpfs_add_mac(pfmdev, config->mac);
+ if (!err)
+ ether_addr_copy(config->mac, add_config->net.mac);
+ }
+
+ up_write(&ndev->reslock);
+ return err;
+}
+
static const struct vdpa_mgmtdev_ops mdev_ops = {
.dev_add = mlx5_vdpa_dev_add,
.dev_del = mlx5_vdpa_dev_del,
+ .dev_set_attr = mlx5_vdpa_set_attr,
};
static struct virtio_device_id id_table[] = {
diff --git a/drivers/vdpa/pds/cmds.h b/drivers/vdpa/pds/cmds.h
index e24d85cb8f1c..6b1bc33356b0 100644
--- a/drivers/vdpa/pds/cmds.h
+++ b/drivers/vdpa/pds/cmds.h
@@ -14,5 +14,4 @@ int pds_vdpa_cmd_init_vq(struct pds_vdpa_device *pdsv, u16 qid, u16 invert_idx,
struct pds_vdpa_vq_info *vq_info);
int pds_vdpa_cmd_reset_vq(struct pds_vdpa_device *pdsv, u16 qid, u16 invert_idx,
struct pds_vdpa_vq_info *vq_info);
-int pds_vdpa_cmd_set_features(struct pds_vdpa_device *pdsv, u64 features);
#endif /* _VDPA_CMDS_H_ */
diff --git a/drivers/vdpa/vdpa.c b/drivers/vdpa/vdpa.c
index 4dbd2e55a288..8a372b51c21a 100644
--- a/drivers/vdpa/vdpa.c
+++ b/drivers/vdpa/vdpa.c
@@ -1361,6 +1361,80 @@ dev_err:
return err;
}
+static int vdpa_dev_net_device_attr_set(struct vdpa_device *vdev,
+ struct genl_info *info)
+{
+ struct vdpa_dev_set_config set_config = {};
+ struct vdpa_mgmt_dev *mdev = vdev->mdev;
+ struct nlattr **nl_attrs = info->attrs;
+ const u8 *macaddr;
+ int err = -EOPNOTSUPP;
+
+ down_write(&vdev->cf_lock);
+ if (nl_attrs[VDPA_ATTR_DEV_NET_CFG_MACADDR]) {
+ set_config.mask |= BIT_ULL(VDPA_ATTR_DEV_NET_CFG_MACADDR);
+ macaddr = nla_data(nl_attrs[VDPA_ATTR_DEV_NET_CFG_MACADDR]);
+
+ if (is_valid_ether_addr(macaddr)) {
+ ether_addr_copy(set_config.net.mac, macaddr);
+ if (mdev->ops->dev_set_attr) {
+ err = mdev->ops->dev_set_attr(mdev, vdev,
+ &set_config);
+ } else {
+ NL_SET_ERR_MSG_FMT_MOD(info->extack,
+ "Operation not supported by the device.");
+ }
+ } else {
+ NL_SET_ERR_MSG_FMT_MOD(info->extack,
+ "Invalid MAC address");
+ }
+ }
+ up_write(&vdev->cf_lock);
+ return err;
+}
+
+static int vdpa_nl_cmd_dev_attr_set_doit(struct sk_buff *skb,
+ struct genl_info *info)
+{
+ struct vdpa_device *vdev;
+ struct device *dev;
+ const char *name;
+ u64 classes;
+ int err = 0;
+
+ if (!info->attrs[VDPA_ATTR_DEV_NAME])
+ return -EINVAL;
+
+ name = nla_data(info->attrs[VDPA_ATTR_DEV_NAME]);
+
+ down_write(&vdpa_dev_lock);
+ dev = bus_find_device(&vdpa_bus, NULL, name, vdpa_name_match);
+ if (!dev) {
+ NL_SET_ERR_MSG_MOD(info->extack, "device not found");
+ err = -ENODEV;
+ goto dev_err;
+ }
+ vdev = container_of(dev, struct vdpa_device, dev);
+ if (!vdev->mdev) {
+ NL_SET_ERR_MSG_MOD(info->extack, "unmanaged vdpa device");
+ err = -EINVAL;
+ goto mdev_err;
+ }
+ classes = vdpa_mgmtdev_get_classes(vdev->mdev, NULL);
+ if (classes & BIT_ULL(VIRTIO_ID_NET)) {
+ err = vdpa_dev_net_device_attr_set(vdev, info);
+ } else {
+ NL_SET_ERR_MSG_FMT_MOD(info->extack, "%s device not supported",
+ name);
+ }
+
+mdev_err:
+ put_device(dev);
+dev_err:
+ up_write(&vdpa_dev_lock);
+ return err;
+}
+
static int vdpa_dev_config_dump(struct device *dev, void *data)
{
struct vdpa_device *vdev = container_of(dev, struct vdpa_device, dev);
@@ -1497,6 +1571,11 @@ static const struct genl_ops vdpa_nl_ops[] = {
.doit = vdpa_nl_cmd_dev_stats_get_doit,
.flags = GENL_ADMIN_PERM,
},
+ {
+ .cmd = VDPA_CMD_DEV_ATTR_SET,
+ .doit = vdpa_nl_cmd_dev_attr_set_doit,
+ .flags = GENL_ADMIN_PERM,
+ },
};
static struct genl_family vdpa_nl_family __ro_after_init = {
diff --git a/drivers/vdpa/vdpa_sim/vdpa_sim_net.c b/drivers/vdpa/vdpa_sim/vdpa_sim_net.c
index cfe962911804..6caf09a1907b 100644
--- a/drivers/vdpa/vdpa_sim/vdpa_sim_net.c
+++ b/drivers/vdpa/vdpa_sim/vdpa_sim_net.c
@@ -414,6 +414,24 @@ static void vdpasim_net_get_config(struct vdpasim *vdpasim, void *config)
net_config->status = cpu_to_vdpasim16(vdpasim, VIRTIO_NET_S_LINK_UP);
}
+static int vdpasim_net_set_attr(struct vdpa_mgmt_dev *mdev, struct vdpa_device *dev,
+ const struct vdpa_dev_set_config *config)
+{
+ struct vdpasim *vdpasim = container_of(dev, struct vdpasim, vdpa);
+ struct virtio_net_config *vio_config = vdpasim->config;
+
+ mutex_lock(&vdpasim->mutex);
+
+ if (config->mask & (1 << VDPA_ATTR_DEV_NET_CFG_MACADDR)) {
+ ether_addr_copy(vio_config->mac, config->net.mac);
+ mutex_unlock(&vdpasim->mutex);
+ return 0;
+ }
+
+ mutex_unlock(&vdpasim->mutex);
+ return -EOPNOTSUPP;
+}
+
static void vdpasim_net_setup_config(struct vdpasim *vdpasim,
const struct vdpa_dev_set_config *config)
{
@@ -510,7 +528,8 @@ static void vdpasim_net_dev_del(struct vdpa_mgmt_dev *mdev,
static const struct vdpa_mgmtdev_ops vdpasim_net_mgmtdev_ops = {
.dev_add = vdpasim_net_dev_add,
- .dev_del = vdpasim_net_dev_del
+ .dev_del = vdpasim_net_dev_del,
+ .dev_set_attr = vdpasim_net_set_attr
};
static struct virtio_device_id id_table[] = {
diff --git a/drivers/vdpa/vdpa_user/iova_domain.c b/drivers/vdpa/vdpa_user/iova_domain.c
index 791d38d6284c..58116f89d8da 100644
--- a/drivers/vdpa/vdpa_user/iova_domain.c
+++ b/drivers/vdpa/vdpa_user/iova_domain.c
@@ -162,6 +162,7 @@ static void vduse_domain_bounce(struct vduse_iova_domain *domain,
enum dma_data_direction dir)
{
struct vduse_bounce_map *map;
+ struct page *page;
unsigned int offset;
void *addr;
size_t sz;
@@ -178,7 +179,10 @@ static void vduse_domain_bounce(struct vduse_iova_domain *domain,
map->orig_phys == INVALID_PHYS_ADDR))
return;
- addr = kmap_local_page(map->bounce_page);
+ page = domain->user_bounce_pages ?
+ map->user_bounce_page : map->bounce_page;
+
+ addr = kmap_local_page(page);
do_bounce(map->orig_phys + offset, addr + offset, sz, dir);
kunmap_local(addr);
size -= sz;
@@ -270,9 +274,8 @@ int vduse_domain_add_user_bounce_pages(struct vduse_iova_domain *domain,
memcpy_to_page(pages[i], 0,
page_address(map->bounce_page),
PAGE_SIZE);
- __free_page(map->bounce_page);
}
- map->bounce_page = pages[i];
+ map->user_bounce_page = pages[i];
get_page(pages[i]);
}
domain->user_bounce_pages = true;
@@ -297,17 +300,17 @@ void vduse_domain_remove_user_bounce_pages(struct vduse_iova_domain *domain)
struct page *page = NULL;
map = &domain->bounce_maps[i];
- if (WARN_ON(!map->bounce_page))
+ if (WARN_ON(!map->user_bounce_page))
continue;
/* Copy user page to kernel page if it's in use */
if (map->orig_phys != INVALID_PHYS_ADDR) {
- page = alloc_page(GFP_ATOMIC | __GFP_NOFAIL);
+ page = map->bounce_page;
memcpy_from_page(page_address(page),
- map->bounce_page, 0, PAGE_SIZE);
+ map->user_bounce_page, 0, PAGE_SIZE);
}
- put_page(map->bounce_page);
- map->bounce_page = page;
+ put_page(map->user_bounce_page);
+ map->user_bounce_page = NULL;
}
domain->user_bounce_pages = false;
out:
diff --git a/drivers/vdpa/vdpa_user/iova_domain.h b/drivers/vdpa/vdpa_user/iova_domain.h
index f92f22a7267d..7f3f0928ec78 100644
--- a/drivers/vdpa/vdpa_user/iova_domain.h
+++ b/drivers/vdpa/vdpa_user/iova_domain.h
@@ -21,6 +21,7 @@
struct vduse_bounce_map {
struct page *bounce_page;
+ struct page *user_bounce_page;
u64 orig_phys;
};
diff --git a/drivers/vfio/fsl-mc/vfio_fsl_mc_intr.c b/drivers/vfio/fsl-mc/vfio_fsl_mc_intr.c
index 82b2afa9b7e3..7e7988c4258f 100644
--- a/drivers/vfio/fsl-mc/vfio_fsl_mc_intr.c
+++ b/drivers/vfio/fsl-mc/vfio_fsl_mc_intr.c
@@ -108,10 +108,10 @@ static int vfio_fsl_mc_set_irq_trigger(struct vfio_fsl_mc_device *vdev,
void *data)
{
struct fsl_mc_device *mc_dev = vdev->mc_dev;
- int ret, hwirq;
struct vfio_fsl_mc_irq *irq;
struct device *cont_dev = fsl_mc_cont_dev(&mc_dev->dev);
struct fsl_mc_device *mc_cont = to_fsl_mc_device(cont_dev);
+ int ret;
if (!count && (flags & VFIO_IRQ_SET_DATA_NONE))
return vfio_set_trigger(vdev, index, -1);
@@ -136,8 +136,6 @@ static int vfio_fsl_mc_set_irq_trigger(struct vfio_fsl_mc_device *vdev,
return vfio_set_trigger(vdev, index, fd);
}
- hwirq = vdev->mc_dev->irqs[index]->virq;
-
irq = &vdev->mc_irqs[index];
if (flags & VFIO_IRQ_SET_DATA_NONE) {
diff --git a/drivers/vfio/group.c b/drivers/vfio/group.c
index ded364588d29..95b336de8a17 100644
--- a/drivers/vfio/group.c
+++ b/drivers/vfio/group.c
@@ -112,7 +112,7 @@ static int vfio_group_ioctl_set_container(struct vfio_group *group,
return -EFAULT;
f = fdget(fd);
- if (!f.file)
+ if (!fd_file(f))
return -EBADF;
mutex_lock(&group->group_lock);
@@ -125,13 +125,13 @@ static int vfio_group_ioctl_set_container(struct vfio_group *group,
goto out_unlock;
}
- container = vfio_container_from_file(f.file);
+ container = vfio_container_from_file(fd_file(f));
if (container) {
ret = vfio_container_attach_group(container, group);
goto out_unlock;
}
- iommufd = iommufd_ctx_from_file(f.file);
+ iommufd = iommufd_ctx_from_file(fd_file(f));
if (!IS_ERR(iommufd)) {
if (IS_ENABLED(CONFIG_VFIO_NOIOMMU) &&
group->type == VFIO_NO_IOMMU)
diff --git a/drivers/vfio/mdev/mdev_private.h b/drivers/vfio/mdev/mdev_private.h
index 63a1316b08b7..5f61acd0fe42 100644
--- a/drivers/vfio/mdev/mdev_private.h
+++ b/drivers/vfio/mdev/mdev_private.h
@@ -10,9 +10,6 @@
#ifndef MDEV_PRIVATE_H
#define MDEV_PRIVATE_H
-int mdev_bus_register(void);
-void mdev_bus_unregister(void);
-
extern const struct bus_type mdev_bus_type;
extern const struct attribute_group *mdev_device_groups[];
diff --git a/drivers/vfio/mdev/mdev_sysfs.c b/drivers/vfio/mdev/mdev_sysfs.c
index 9d2738e10c0b..e44bb44c581e 100644
--- a/drivers/vfio/mdev/mdev_sysfs.c
+++ b/drivers/vfio/mdev/mdev_sysfs.c
@@ -160,7 +160,7 @@ static void mdev_type_release(struct kobject *kobj)
put_device(type->parent->dev);
}
-static struct kobj_type mdev_type_ktype = {
+static const struct kobj_type mdev_type_ktype = {
.sysfs_ops = &mdev_type_sysfs_ops,
.release = mdev_type_release,
.default_groups = mdev_type_groups,
diff --git a/drivers/vfio/pci/hisilicon/hisi_acc_vfio_pci.c b/drivers/vfio/pci/hisilicon/hisi_acc_vfio_pci.c
index 9a3e97108ace..0d632ba5d2a3 100644
--- a/drivers/vfio/pci/hisilicon/hisi_acc_vfio_pci.c
+++ b/drivers/vfio/pci/hisilicon/hisi_acc_vfio_pci.c
@@ -723,7 +723,6 @@ static const struct file_operations hisi_acc_vf_resume_fops = {
.owner = THIS_MODULE,
.write = hisi_acc_vf_resume_write,
.release = hisi_acc_vf_release_file,
- .llseek = no_llseek,
};
static struct hisi_acc_vf_migration_file *
@@ -845,7 +844,6 @@ static const struct file_operations hisi_acc_vf_save_fops = {
.unlocked_ioctl = hisi_acc_vf_precopy_ioctl,
.compat_ioctl = compat_ptr_ioctl,
.release = hisi_acc_vf_release_file,
- .llseek = no_llseek,
};
static struct hisi_acc_vf_migration_file *
diff --git a/drivers/vfio/pci/mlx5/main.c b/drivers/vfio/pci/mlx5/main.c
index 61d9b0f9146d..242c23eef452 100644
--- a/drivers/vfio/pci/mlx5/main.c
+++ b/drivers/vfio/pci/mlx5/main.c
@@ -587,7 +587,6 @@ static const struct file_operations mlx5vf_save_fops = {
.unlocked_ioctl = mlx5vf_precopy_ioctl,
.compat_ioctl = compat_ptr_ioctl,
.release = mlx5vf_release_file,
- .llseek = no_llseek,
};
static int mlx5vf_pci_save_device_inc_data(struct mlx5vf_pci_core_device *mvdev)
@@ -1000,7 +999,6 @@ static const struct file_operations mlx5vf_resume_fops = {
.owner = THIS_MODULE,
.write = mlx5vf_resume_write,
.release = mlx5vf_release_file,
- .llseek = no_llseek,
};
static struct mlx5_vf_migration_file *
diff --git a/drivers/vfio/pci/pds/lm.c b/drivers/vfio/pci/pds/lm.c
index 6b94cc0bf45b..f2673d395236 100644
--- a/drivers/vfio/pci/pds/lm.c
+++ b/drivers/vfio/pci/pds/lm.c
@@ -235,7 +235,6 @@ static const struct file_operations pds_vfio_save_fops = {
.owner = THIS_MODULE,
.read = pds_vfio_save_read,
.release = pds_vfio_release_file,
- .llseek = no_llseek,
};
static int pds_vfio_get_save_file(struct pds_vfio_pci_device *pds_vfio)
@@ -334,7 +333,6 @@ static const struct file_operations pds_vfio_restore_fops = {
.owner = THIS_MODULE,
.write = pds_vfio_restore_write,
.release = pds_vfio_release_file,
- .llseek = no_llseek,
};
static int pds_vfio_get_restore_file(struct pds_vfio_pci_device *pds_vfio)
diff --git a/drivers/vfio/pci/qat/main.c b/drivers/vfio/pci/qat/main.c
index e36740a282e7..be3644ced17b 100644
--- a/drivers/vfio/pci/qat/main.c
+++ b/drivers/vfio/pci/qat/main.c
@@ -220,7 +220,6 @@ static const struct file_operations qat_vf_save_fops = {
.unlocked_ioctl = qat_vf_precopy_ioctl,
.compat_ioctl = compat_ptr_ioctl,
.release = qat_vf_release_file,
- .llseek = no_llseek,
};
static int qat_vf_save_state(struct qat_vf_core_device *qat_vdev,
@@ -345,7 +344,6 @@ static const struct file_operations qat_vf_resume_fops = {
.owner = THIS_MODULE,
.write = qat_vf_resume_write,
.release = qat_vf_release_file,
- .llseek = no_llseek,
};
static struct qat_vf_migration_file *
diff --git a/drivers/vfio/pci/vfio_pci_core.c b/drivers/vfio/pci/vfio_pci_core.c
index ba0ce0075b2f..1ab58da9f38a 100644
--- a/drivers/vfio/pci/vfio_pci_core.c
+++ b/drivers/vfio/pci/vfio_pci_core.c
@@ -20,6 +20,7 @@
#include <linux/mutex.h>
#include <linux/notifier.h>
#include <linux/pci.h>
+#include <linux/pfn_t.h>
#include <linux/pm_runtime.h>
#include <linux/slab.h>
#include <linux/types.h>
@@ -57,11 +58,6 @@ struct vfio_pci_vf_token {
int users;
};
-struct vfio_pci_mmap_vma {
- struct vm_area_struct *vma;
- struct list_head vma_next;
-};
-
static inline bool vfio_vga_disabled(void)
{
#ifdef CONFIG_VFIO_PCI_VGA
@@ -1328,7 +1324,7 @@ out:
static int
vfio_pci_ioctl_pci_hot_reset_groups(struct vfio_pci_core_device *vdev,
- int array_count, bool slot,
+ u32 array_count, bool slot,
struct vfio_pci_hot_reset __user *arg)
{
int32_t *group_fds;
@@ -1657,14 +1653,20 @@ static unsigned long vma_to_pfn(struct vm_area_struct *vma)
return (pci_resource_start(vdev->pdev, index) >> PAGE_SHIFT) + pgoff;
}
-static vm_fault_t vfio_pci_mmap_fault(struct vm_fault *vmf)
+static vm_fault_t vfio_pci_mmap_huge_fault(struct vm_fault *vmf,
+ unsigned int order)
{
struct vm_area_struct *vma = vmf->vma;
struct vfio_pci_core_device *vdev = vma->vm_private_data;
unsigned long pfn, pgoff = vmf->pgoff - vma->vm_pgoff;
- unsigned long addr = vma->vm_start;
vm_fault_t ret = VM_FAULT_SIGBUS;
+ if (order && (vmf->address & ((PAGE_SIZE << order) - 1) ||
+ vmf->address + (PAGE_SIZE << order) > vma->vm_end)) {
+ ret = VM_FAULT_FALLBACK;
+ goto out;
+ }
+
pfn = vma_to_pfn(vma);
down_read(&vdev->memory_lock);
@@ -1672,30 +1674,49 @@ static vm_fault_t vfio_pci_mmap_fault(struct vm_fault *vmf)
if (vdev->pm_runtime_engaged || !__vfio_pci_memory_enabled(vdev))
goto out_unlock;
- ret = vmf_insert_pfn(vma, vmf->address, pfn + pgoff);
- if (ret & VM_FAULT_ERROR)
- goto out_unlock;
-
- /*
- * Pre-fault the remainder of the vma, abort further insertions and
- * supress error if fault is encountered during pre-fault.
- */
- for (; addr < vma->vm_end; addr += PAGE_SIZE, pfn++) {
- if (addr == vmf->address)
- continue;
-
- if (vmf_insert_pfn(vma, addr, pfn) & VM_FAULT_ERROR)
- break;
+ switch (order) {
+ case 0:
+ ret = vmf_insert_pfn(vma, vmf->address, pfn + pgoff);
+ break;
+#ifdef CONFIG_ARCH_SUPPORTS_PMD_PFNMAP
+ case PMD_ORDER:
+ ret = vmf_insert_pfn_pmd(vmf, __pfn_to_pfn_t(pfn + pgoff,
+ PFN_DEV), false);
+ break;
+#endif
+#ifdef CONFIG_ARCH_SUPPORTS_PUD_PFNMAP
+ case PUD_ORDER:
+ ret = vmf_insert_pfn_pud(vmf, __pfn_to_pfn_t(pfn + pgoff,
+ PFN_DEV), false);
+ break;
+#endif
+ default:
+ ret = VM_FAULT_FALLBACK;
}
out_unlock:
up_read(&vdev->memory_lock);
+out:
+ dev_dbg_ratelimited(&vdev->pdev->dev,
+ "%s(,order = %d) BAR %ld page offset 0x%lx: 0x%x\n",
+ __func__, order,
+ vma->vm_pgoff >>
+ (VFIO_PCI_OFFSET_SHIFT - PAGE_SHIFT),
+ pgoff, (unsigned int)ret);
return ret;
}
+static vm_fault_t vfio_pci_mmap_page_fault(struct vm_fault *vmf)
+{
+ return vfio_pci_mmap_huge_fault(vmf, 0);
+}
+
static const struct vm_operations_struct vfio_pci_mmap_ops = {
- .fault = vfio_pci_mmap_fault,
+ .fault = vfio_pci_mmap_page_fault,
+#ifdef CONFIG_ARCH_SUPPORTS_HUGE_PFNMAP
+ .huge_fault = vfio_pci_mmap_huge_fault,
+#endif
};
int vfio_pci_core_mmap(struct vfio_device *core_vdev, struct vm_area_struct *vma)
diff --git a/drivers/vfio/vfio_iommu_type1.c b/drivers/vfio/vfio_iommu_type1.c
index 0960699e7554..bf391b40e576 100644
--- a/drivers/vfio/vfio_iommu_type1.c
+++ b/drivers/vfio/vfio_iommu_type1.c
@@ -513,12 +513,10 @@ static int follow_fault_pfn(struct vm_area_struct *vma, struct mm_struct *mm,
unsigned long vaddr, unsigned long *pfn,
bool write_fault)
{
- pte_t *ptep;
- pte_t pte;
- spinlock_t *ptl;
+ struct follow_pfnmap_args args = { .vma = vma, .address = vaddr };
int ret;
- ret = follow_pte(vma, vaddr, &ptep, &ptl);
+ ret = follow_pfnmap_start(&args);
if (ret) {
bool unlocked = false;
@@ -532,19 +530,17 @@ static int follow_fault_pfn(struct vm_area_struct *vma, struct mm_struct *mm,
if (ret)
return ret;
- ret = follow_pte(vma, vaddr, &ptep, &ptl);
+ ret = follow_pfnmap_start(&args);
if (ret)
return ret;
}
- pte = ptep_get(ptep);
-
- if (write_fault && !pte_write(pte))
+ if (write_fault && !args.writable)
ret = -EFAULT;
else
- *pfn = pte_pfn(pte);
+ *pfn = args.pfn;
- pte_unmap_unlock(ptep, ptl);
+ follow_pfnmap_end(&args);
return ret;
}
diff --git a/drivers/vfio/virqfd.c b/drivers/vfio/virqfd.c
index 532269133801..d22881245e89 100644
--- a/drivers/vfio/virqfd.c
+++ b/drivers/vfio/virqfd.c
@@ -134,12 +134,12 @@ int vfio_virqfd_enable(void *opaque,
INIT_WORK(&virqfd->flush_inject, virqfd_flush_inject);
irqfd = fdget(fd);
- if (!irqfd.file) {
+ if (!fd_file(irqfd)) {
ret = -EBADF;
goto err_fd;
}
- ctx = eventfd_ctx_fileget(irqfd.file);
+ ctx = eventfd_ctx_fileget(fd_file(irqfd));
if (IS_ERR(ctx)) {
ret = PTR_ERR(ctx);
goto err_ctx;
@@ -171,7 +171,7 @@ int vfio_virqfd_enable(void *opaque,
init_waitqueue_func_entry(&virqfd->wait, virqfd_wakeup);
init_poll_funcptr(&virqfd->pt, virqfd_ptable_queue_proc);
- events = vfs_poll(irqfd.file, &virqfd->pt);
+ events = vfs_poll(fd_file(irqfd), &virqfd->pt);
/*
* Check if there was an event already pending on the eventfd
diff --git a/drivers/vhost/vdpa.c b/drivers/vhost/vdpa.c
index 478cd46a49ed..5a49b5a6d496 100644
--- a/drivers/vhost/vdpa.c
+++ b/drivers/vhost/vdpa.c
@@ -209,11 +209,9 @@ static void vhost_vdpa_setup_vq_irq(struct vhost_vdpa *v, u16 qid)
if (irq < 0)
return;
- irq_bypass_unregister_producer(&vq->call_ctx.producer);
if (!vq->call_ctx.ctx)
return;
- vq->call_ctx.producer.token = vq->call_ctx.ctx;
vq->call_ctx.producer.irq = irq;
ret = irq_bypass_register_producer(&vq->call_ctx.producer);
if (unlikely(ret))
@@ -709,6 +707,14 @@ static long vhost_vdpa_vring_ioctl(struct vhost_vdpa *v, unsigned int cmd,
vq->last_avail_idx = vq_state.split.avail_index;
}
break;
+ case VHOST_SET_VRING_CALL:
+ if (vq->call_ctx.ctx) {
+ if (ops->get_status(vdpa) &
+ VIRTIO_CONFIG_S_DRIVER_OK)
+ vhost_vdpa_unsetup_vq_irq(v, idx);
+ vq->call_ctx.producer.token = NULL;
+ }
+ break;
}
r = vhost_vring_ioctl(&v->vdev, cmd, argp);
@@ -747,13 +753,16 @@ static long vhost_vdpa_vring_ioctl(struct vhost_vdpa *v, unsigned int cmd,
cb.callback = vhost_vdpa_virtqueue_cb;
cb.private = vq;
cb.trigger = vq->call_ctx.ctx;
+ vq->call_ctx.producer.token = vq->call_ctx.ctx;
+ if (ops->get_status(vdpa) &
+ VIRTIO_CONFIG_S_DRIVER_OK)
+ vhost_vdpa_setup_vq_irq(v, idx);
} else {
cb.callback = NULL;
cb.private = NULL;
cb.trigger = NULL;
}
ops->set_vq_cb(vdpa, idx, &cb);
- vhost_vdpa_setup_vq_irq(v, idx);
break;
case VHOST_SET_VRING_NUM:
@@ -1419,6 +1428,7 @@ static int vhost_vdpa_open(struct inode *inode, struct file *filep)
for (i = 0; i < nvqs; i++) {
vqs[i] = &v->vqs[i];
vqs[i]->handle_kick = handle_vq_kick;
+ vqs[i]->call_ctx.ctx = NULL;
}
vhost_dev_init(dev, vqs, nvqs, 0, 0, 0, false,
vhost_vdpa_process_iotlb_msg);
diff --git a/drivers/video/backlight/l4f00242t03.c b/drivers/video/backlight/l4f00242t03.c
index dd0874f8c7ff..4175a4603071 100644
--- a/drivers/video/backlight/l4f00242t03.c
+++ b/drivers/video/backlight/l4f00242t03.c
@@ -166,6 +166,7 @@ static const struct lcd_ops l4f_ops = {
static int l4f00242t03_probe(struct spi_device *spi)
{
struct l4f00242t03_priv *priv;
+ int ret;
priv = devm_kzalloc(&spi->dev, sizeof(struct l4f00242t03_priv),
GFP_KERNEL);
@@ -174,7 +175,9 @@ static int l4f00242t03_probe(struct spi_device *spi)
spi_set_drvdata(spi, priv);
spi->bits_per_word = 9;
- spi_setup(spi);
+ ret = spi_setup(spi);
+ if (ret < 0)
+ return dev_err_probe(&spi->dev, ret, "Unable to setup spi.\n");
priv->spi = spi;
diff --git a/drivers/video/fbdev/core/fbcon.c b/drivers/video/fbdev/core/fbcon.c
index 3f7333dca508..e8b4e8c119b5 100644
--- a/drivers/video/fbdev/core/fbcon.c
+++ b/drivers/video/fbdev/core/fbcon.c
@@ -64,6 +64,8 @@
#include <linux/console.h>
#include <linux/string.h>
#include <linux/kd.h>
+#include <linux/panic.h>
+#include <linux/printk.h>
#include <linux/slab.h>
#include <linux/fb.h>
#include <linux/fbcon.h>
@@ -270,12 +272,24 @@ static int fbcon_get_rotate(struct fb_info *info)
return (ops) ? ops->rotate : 0;
}
+static bool fbcon_skip_panic(struct fb_info *info)
+{
+/* panic_cpu is not exported, and can't be used if built as module. Use
+ * oops_in_progress instead, but non-fatal oops won't be printed.
+ */
+#if defined(MODULE)
+ return (info->skip_panic && unlikely(oops_in_progress));
+#else
+ return (info->skip_panic && unlikely(atomic_read(&panic_cpu) != PANIC_CPU_INVALID));
+#endif
+}
+
static inline int fbcon_is_inactive(struct vc_data *vc, struct fb_info *info)
{
struct fbcon_ops *ops = info->fbcon_par;
return (info->state != FBINFO_STATE_RUNNING ||
- vc->vc_mode != KD_TEXT || ops->graphics);
+ vc->vc_mode != KD_TEXT || ops->graphics || fbcon_skip_panic(info));
}
static int get_color(struct vc_data *vc, struct fb_info *info,
@@ -498,8 +512,10 @@ static int search_fb_in_map(int idx)
int i, retval = 0;
for (i = first_fb_vc; i <= last_fb_vc; i++) {
- if (con2fb_map[i] == idx)
+ if (con2fb_map[i] == idx) {
retval = 1;
+ break;
+ }
}
return retval;
}
@@ -509,8 +525,10 @@ static int search_for_mapped_con(void)
int i, retval = 0;
for (i = first_fb_vc; i <= last_fb_vc; i++) {
- if (con2fb_map[i] != -1)
+ if (con2fb_map[i] != -1) {
retval = 1;
+ break;
+ }
}
return retval;
}
@@ -847,6 +865,8 @@ static int set_con2fb_map(int unit, int newidx, int user)
return err;
fbcon_add_cursor_work(info);
+ } else if (vc) {
+ set_blitting_type(vc, info);
}
con2fb_map[unit] = newidx;
diff --git a/drivers/video/fbdev/omap2/omapfb/dss/dss-of.c b/drivers/video/fbdev/omap2/omapfb/dss/dss-of.c
index 4040e247e026..d5a43b3bf45e 100644
--- a/drivers/video/fbdev/omap2/omapfb/dss/dss-of.c
+++ b/drivers/video/fbdev/omap2/omapfb/dss/dss-of.c
@@ -129,12 +129,9 @@ omapdss_of_find_source_for_first_ep(struct device_node *node)
return ERR_PTR(-EINVAL);
src_port = of_graph_get_remote_port(ep);
- if (!src_port) {
- of_node_put(ep);
- return ERR_PTR(-EINVAL);
- }
-
of_node_put(ep);
+ if (!src_port)
+ return ERR_PTR(-EINVAL);
src = omap_dss_find_output_by_port_node(src_port);
diff --git a/drivers/video/fbdev/sis/sis_main.c b/drivers/video/fbdev/sis/sis_main.c
index 009bf1d92644..75033e6be15a 100644
--- a/drivers/video/fbdev/sis/sis_main.c
+++ b/drivers/video/fbdev/sis/sis_main.c
@@ -183,7 +183,7 @@ static void sisfb_search_mode(char *name, bool quiet)
{
unsigned int j = 0, xres = 0, yres = 0, depth = 0, rate = 0;
int i = 0;
- char strbuf[16], strbuf1[20];
+ char strbuf[24], strbuf1[20];
char *nameptr = name;
/* We don't know the hardware specs yet and there is no ivideo */
diff --git a/drivers/virt/acrn/irqfd.c b/drivers/virt/acrn/irqfd.c
index d4ad211dce7a..9994d818bb7e 100644
--- a/drivers/virt/acrn/irqfd.c
+++ b/drivers/virt/acrn/irqfd.c
@@ -125,12 +125,12 @@ static int acrn_irqfd_assign(struct acrn_vm *vm, struct acrn_irqfd *args)
INIT_WORK(&irqfd->shutdown, hsm_irqfd_shutdown_work);
f = fdget(args->fd);
- if (!f.file) {
+ if (!fd_file(f)) {
ret = -EBADF;
goto out;
}
- eventfd = eventfd_ctx_fileget(f.file);
+ eventfd = eventfd_ctx_fileget(fd_file(f));
if (IS_ERR(eventfd)) {
ret = PTR_ERR(eventfd);
goto fail;
@@ -157,7 +157,7 @@ static int acrn_irqfd_assign(struct acrn_vm *vm, struct acrn_irqfd *args)
mutex_unlock(&vm->irqfds_lock);
/* Check the pending event in this stage */
- events = vfs_poll(f.file, &irqfd->pt);
+ events = vfs_poll(fd_file(f), &irqfd->pt);
if (events & EPOLLIN)
acrn_irqfd_inject(irqfd);
diff --git a/drivers/virt/acrn/mm.c b/drivers/virt/acrn/mm.c
index db8ff1d0ac23..4c2f28715b70 100644
--- a/drivers/virt/acrn/mm.c
+++ b/drivers/virt/acrn/mm.c
@@ -177,9 +177,7 @@ int acrn_vm_ram_map(struct acrn_vm *vm, struct acrn_vm_memmap *memmap)
vma = vma_lookup(current->mm, memmap->vma_base);
if (vma && ((vma->vm_flags & VM_PFNMAP) != 0)) {
unsigned long start_pfn, cur_pfn;
- spinlock_t *ptl;
bool writable;
- pte_t *ptep;
if ((memmap->vma_base + memmap->len) > vma->vm_end) {
mmap_read_unlock(current->mm);
@@ -187,16 +185,20 @@ int acrn_vm_ram_map(struct acrn_vm *vm, struct acrn_vm_memmap *memmap)
}
for (i = 0; i < nr_pages; i++) {
- ret = follow_pte(vma, memmap->vma_base + i * PAGE_SIZE,
- &ptep, &ptl);
+ struct follow_pfnmap_args args = {
+ .vma = vma,
+ .address = memmap->vma_base + i * PAGE_SIZE,
+ };
+
+ ret = follow_pfnmap_start(&args);
if (ret)
break;
- cur_pfn = pte_pfn(ptep_get(ptep));
+ cur_pfn = args.pfn;
if (i == 0)
start_pfn = cur_pfn;
- writable = !!pte_write(ptep_get(ptep));
- pte_unmap_unlock(ptep, ptl);
+ writable = args.writable;
+ follow_pfnmap_end(&args);
/* Disallow write access if the PTE is not writable. */
if (!writable &&
diff --git a/drivers/virt/coco/tdx-guest/tdx-guest.c b/drivers/virt/coco/tdx-guest/tdx-guest.c
index 2acba56ad42e..d7db6c824e13 100644
--- a/drivers/virt/coco/tdx-guest/tdx-guest.c
+++ b/drivers/virt/coco/tdx-guest/tdx-guest.c
@@ -285,7 +285,6 @@ static long tdx_guest_ioctl(struct file *file, unsigned int cmd,
static const struct file_operations tdx_guest_fops = {
.owner = THIS_MODULE,
.unlocked_ioctl = tdx_guest_ioctl,
- .llseek = no_llseek,
};
static struct miscdevice tdx_misc_dev = {
diff --git a/drivers/virtio/virtio_balloon.c b/drivers/virtio/virtio_balloon.c
index 54469277ca30..b36d2803674e 100644
--- a/drivers/virtio/virtio_balloon.c
+++ b/drivers/virtio/virtio_balloon.c
@@ -355,6 +355,8 @@ static inline unsigned int update_balloon_vm_stats(struct virtio_balloon *vb)
{
unsigned long events[NR_VM_EVENT_ITEMS];
unsigned int idx = 0;
+ unsigned int zid;
+ unsigned long stall = 0;
all_vm_events(events);
update_stat(vb, idx++, VIRTIO_BALLOON_S_SWAP_IN,
@@ -363,6 +365,22 @@ static inline unsigned int update_balloon_vm_stats(struct virtio_balloon *vb)
pages_to_bytes(events[PSWPOUT]));
update_stat(vb, idx++, VIRTIO_BALLOON_S_MAJFLT, events[PGMAJFAULT]);
update_stat(vb, idx++, VIRTIO_BALLOON_S_MINFLT, events[PGFAULT]);
+ update_stat(vb, idx++, VIRTIO_BALLOON_S_OOM_KILL, events[OOM_KILL]);
+
+ /* sum all the stall events */
+ for (zid = 0; zid < MAX_NR_ZONES; zid++)
+ stall += events[ALLOCSTALL_NORMAL - ZONE_NORMAL + zid];
+
+ update_stat(vb, idx++, VIRTIO_BALLOON_S_ALLOC_STALL, stall);
+
+ update_stat(vb, idx++, VIRTIO_BALLOON_S_ASYNC_SCAN,
+ pages_to_bytes(events[PGSCAN_KSWAPD]));
+ update_stat(vb, idx++, VIRTIO_BALLOON_S_DIRECT_SCAN,
+ pages_to_bytes(events[PGSCAN_DIRECT]));
+ update_stat(vb, idx++, VIRTIO_BALLOON_S_ASYNC_RECLAIM,
+ pages_to_bytes(events[PGSTEAL_KSWAPD]));
+ update_stat(vb, idx++, VIRTIO_BALLOON_S_DIRECT_RECLAIM,
+ pages_to_bytes(events[PGSTEAL_DIRECT]));
#ifdef CONFIG_HUGETLB_PAGE
update_stat(vb, idx++, VIRTIO_BALLOON_S_HTLB_PGALLOC,
diff --git a/drivers/w1/masters/ds2482.c b/drivers/w1/masters/ds2482.c
index b2d76c1784bd..a2ecbb863c57 100644
--- a/drivers/w1/masters/ds2482.c
+++ b/drivers/w1/masters/ds2482.c
@@ -541,8 +541,8 @@ static void ds2482_remove(struct i2c_client *client)
* Driver data (common to all clients)
*/
static const struct i2c_device_id ds2482_id[] = {
- { "ds2482", 0 },
- { "ds2484", 0 },
+ { "ds2482" },
+ { "ds2484" },
{ }
};
MODULE_DEVICE_TABLE(i2c, ds2482_id);
diff --git a/drivers/watchdog/Kconfig b/drivers/watchdog/Kconfig
index bae1d97cce89..684b9fe84fff 100644
--- a/drivers/watchdog/Kconfig
+++ b/drivers/watchdog/Kconfig
@@ -953,6 +953,15 @@ config RENESAS_RZG2LWDT
This driver adds watchdog support for the integrated watchdogs in the
Renesas RZ/G2L SoCs. These watchdogs can be used to reset a system.
+config RENESAS_RZV2HWDT
+ tristate "Renesas RZ/V2H(P) WDT Watchdog"
+ depends on ARCH_R9A09G057 || COMPILE_TEST
+ depends on PM || COMPILE_TEST
+ select WATCHDOG_CORE
+ help
+ This driver adds watchdog support for the integrated watchdogs in the
+ Renesas RZ/V2H(P) SoCs. These watchdogs can be used to reset a system.
+
config ASPEED_WATCHDOG
tristate "Aspeed BMC watchdog support"
depends on ARCH_ASPEED || COMPILE_TEST
diff --git a/drivers/watchdog/Makefile b/drivers/watchdog/Makefile
index b51030f035a6..ab6f2b41e38e 100644
--- a/drivers/watchdog/Makefile
+++ b/drivers/watchdog/Makefile
@@ -86,6 +86,7 @@ obj-$(CONFIG_RENESAS_WDT) += renesas_wdt.o
obj-$(CONFIG_RENESAS_RZAWDT) += rza_wdt.o
obj-$(CONFIG_RENESAS_RZN1WDT) += rzn1_wdt.o
obj-$(CONFIG_RENESAS_RZG2LWDT) += rzg2l_wdt.o
+obj-$(CONFIG_RENESAS_RZV2HWDT) += rzv2h_wdt.o
obj-$(CONFIG_ASPEED_WATCHDOG) += aspeed_wdt.o
obj-$(CONFIG_STM32_WATCHDOG) += stm32_iwdg.o
obj-$(CONFIG_UNIPHIER_WATCHDOG) += uniphier_wdt.o
diff --git a/drivers/watchdog/acquirewdt.c b/drivers/watchdog/acquirewdt.c
index 53b04abd55b0..08ca18e91124 100644
--- a/drivers/watchdog/acquirewdt.c
+++ b/drivers/watchdog/acquirewdt.c
@@ -218,7 +218,6 @@ static int acq_close(struct inode *inode, struct file *file)
static const struct file_operations acq_fops = {
.owner = THIS_MODULE,
- .llseek = no_llseek,
.write = acq_write,
.unlocked_ioctl = acq_ioctl,
.compat_ioctl = compat_ptr_ioctl,
diff --git a/drivers/watchdog/advantechwdt.c b/drivers/watchdog/advantechwdt.c
index 7a0acbc3e4dd..e41cd3ba4e0e 100644
--- a/drivers/watchdog/advantechwdt.c
+++ b/drivers/watchdog/advantechwdt.c
@@ -217,7 +217,6 @@ static int advwdt_close(struct inode *inode, struct file *file)
static const struct file_operations advwdt_fops = {
.owner = THIS_MODULE,
- .llseek = no_llseek,
.write = advwdt_write,
.unlocked_ioctl = advwdt_ioctl,
.compat_ioctl = compat_ptr_ioctl,
diff --git a/drivers/watchdog/alim1535_wdt.c b/drivers/watchdog/alim1535_wdt.c
index bfb9a91ca1df..1ecbd1ac5c3a 100644
--- a/drivers/watchdog/alim1535_wdt.c
+++ b/drivers/watchdog/alim1535_wdt.c
@@ -359,7 +359,6 @@ static int __init ali_find_watchdog(void)
static const struct file_operations ali_fops = {
.owner = THIS_MODULE,
- .llseek = no_llseek,
.write = ali_write,
.unlocked_ioctl = ali_ioctl,
.compat_ioctl = compat_ptr_ioctl,
diff --git a/drivers/watchdog/alim7101_wdt.c b/drivers/watchdog/alim7101_wdt.c
index 4ff7f5afb7aa..9c7cf939ba3d 100644
--- a/drivers/watchdog/alim7101_wdt.c
+++ b/drivers/watchdog/alim7101_wdt.c
@@ -289,7 +289,6 @@ static long fop_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
static const struct file_operations wdt_fops = {
.owner = THIS_MODULE,
- .llseek = no_llseek,
.write = fop_write,
.open = fop_open,
.release = fop_close,
diff --git a/drivers/watchdog/at91rm9200_wdt.c b/drivers/watchdog/at91rm9200_wdt.c
index 558015f08c7a..17382512a609 100644
--- a/drivers/watchdog/at91rm9200_wdt.c
+++ b/drivers/watchdog/at91rm9200_wdt.c
@@ -210,7 +210,6 @@ static ssize_t at91_wdt_write(struct file *file, const char *data,
static const struct file_operations at91wdt_fops = {
.owner = THIS_MODULE,
- .llseek = no_llseek,
.unlocked_ioctl = at91_wdt_ioctl,
.compat_ioctl = compat_ptr_ioctl,
.open = at91_wdt_open,
diff --git a/drivers/watchdog/ath79_wdt.c b/drivers/watchdog/ath79_wdt.c
index e5cc30622b12..d16b2c583fa4 100644
--- a/drivers/watchdog/ath79_wdt.c
+++ b/drivers/watchdog/ath79_wdt.c
@@ -231,7 +231,6 @@ static long ath79_wdt_ioctl(struct file *file, unsigned int cmd,
static const struct file_operations ath79_wdt_fops = {
.owner = THIS_MODULE,
- .llseek = no_llseek,
.write = ath79_wdt_write,
.unlocked_ioctl = ath79_wdt_ioctl,
.compat_ioctl = compat_ptr_ioctl,
diff --git a/drivers/watchdog/cpu5wdt.c b/drivers/watchdog/cpu5wdt.c
index 9f279c0e13a6..f94b84048612 100644
--- a/drivers/watchdog/cpu5wdt.c
+++ b/drivers/watchdog/cpu5wdt.c
@@ -185,7 +185,6 @@ static ssize_t cpu5wdt_write(struct file *file, const char __user *buf,
static const struct file_operations cpu5wdt_fops = {
.owner = THIS_MODULE,
- .llseek = no_llseek,
.unlocked_ioctl = cpu5wdt_ioctl,
.compat_ioctl = compat_ptr_ioctl,
.open = cpu5wdt_open,
diff --git a/drivers/watchdog/cpwd.c b/drivers/watchdog/cpwd.c
index 901b94d456db..8ee81f018dda 100644
--- a/drivers/watchdog/cpwd.c
+++ b/drivers/watchdog/cpwd.c
@@ -507,7 +507,6 @@ static const struct file_operations cpwd_fops = {
.write = cpwd_write,
.read = cpwd_read,
.release = cpwd_release,
- .llseek = no_llseek,
};
static int cpwd_probe(struct platform_device *op)
diff --git a/drivers/watchdog/eurotechwdt.c b/drivers/watchdog/eurotechwdt.c
index e26609ad4c17..10c647b1226a 100644
--- a/drivers/watchdog/eurotechwdt.c
+++ b/drivers/watchdog/eurotechwdt.c
@@ -368,7 +368,6 @@ static int eurwdt_notify_sys(struct notifier_block *this, unsigned long code,
static const struct file_operations eurwdt_fops = {
.owner = THIS_MODULE,
- .llseek = no_llseek,
.write = eurwdt_write,
.unlocked_ioctl = eurwdt_ioctl,
.compat_ioctl = compat_ptr_ioctl,
diff --git a/drivers/watchdog/gef_wdt.c b/drivers/watchdog/gef_wdt.c
index 6a1db1c783fa..d854fcfbfa5b 100644
--- a/drivers/watchdog/gef_wdt.c
+++ b/drivers/watchdog/gef_wdt.c
@@ -245,7 +245,6 @@ static int gef_wdt_release(struct inode *inode, struct file *file)
static const struct file_operations gef_wdt_fops = {
.owner = THIS_MODULE,
- .llseek = no_llseek,
.write = gef_wdt_write,
.unlocked_ioctl = gef_wdt_ioctl,
.compat_ioctl = compat_ptr_ioctl,
diff --git a/drivers/watchdog/geodewdt.c b/drivers/watchdog/geodewdt.c
index 5186c37ad451..4ed6d139320b 100644
--- a/drivers/watchdog/geodewdt.c
+++ b/drivers/watchdog/geodewdt.c
@@ -196,7 +196,6 @@ static long geodewdt_ioctl(struct file *file, unsigned int cmd,
static const struct file_operations geodewdt_fops = {
.owner = THIS_MODULE,
- .llseek = no_llseek,
.write = geodewdt_write,
.unlocked_ioctl = geodewdt_ioctl,
.compat_ioctl = compat_ptr_ioctl,
diff --git a/drivers/watchdog/iTCO_wdt.c b/drivers/watchdog/iTCO_wdt.c
index 264857d314da..35b358bcf94c 100644
--- a/drivers/watchdog/iTCO_wdt.c
+++ b/drivers/watchdog/iTCO_wdt.c
@@ -563,8 +563,8 @@ static int iTCO_wdt_probe(struct platform_device *pdev)
}
ident.firmware_version = p->iTCO_version;
- p->wddev.info = &ident,
- p->wddev.ops = &iTCO_wdt_ops,
+ p->wddev.info = &ident;
+ p->wddev.ops = &iTCO_wdt_ops;
p->wddev.bootstatus = 0;
p->wddev.timeout = WATCHDOG_TIMEOUT;
watchdog_set_nowayout(&p->wddev, nowayout);
diff --git a/drivers/watchdog/ib700wdt.c b/drivers/watchdog/ib700wdt.c
index 39ea97009abd..b041ad90a62c 100644
--- a/drivers/watchdog/ib700wdt.c
+++ b/drivers/watchdog/ib700wdt.c
@@ -256,7 +256,6 @@ static int ibwdt_close(struct inode *inode, struct file *file)
static const struct file_operations ibwdt_fops = {
.owner = THIS_MODULE,
- .llseek = no_llseek,
.write = ibwdt_write,
.unlocked_ioctl = ibwdt_ioctl,
.compat_ioctl = compat_ptr_ioctl,
diff --git a/drivers/watchdog/ibmasr.c b/drivers/watchdog/ibmasr.c
index 6955c693b5fd..cf845f865945 100644
--- a/drivers/watchdog/ibmasr.c
+++ b/drivers/watchdog/ibmasr.c
@@ -340,7 +340,6 @@ static int asr_release(struct inode *inode, struct file *file)
static const struct file_operations asr_fops = {
.owner = THIS_MODULE,
- .llseek = no_llseek,
.write = asr_write,
.unlocked_ioctl = asr_ioctl,
.compat_ioctl = compat_ptr_ioctl,
diff --git a/drivers/watchdog/imx2_wdt.c b/drivers/watchdog/imx2_wdt.c
index 42e8ffae18dd..4b3a192ee3e8 100644
--- a/drivers/watchdog/imx2_wdt.c
+++ b/drivers/watchdog/imx2_wdt.c
@@ -379,7 +379,7 @@ static void imx2_wdt_shutdown(struct platform_device *pdev)
}
/* Disable watchdog if it is active or non-active but still running */
-static int __maybe_unused imx2_wdt_suspend(struct device *dev)
+static int imx2_wdt_suspend(struct device *dev)
{
struct watchdog_device *wdog = dev_get_drvdata(dev);
struct imx2_wdt_device *wdev = watchdog_get_drvdata(wdog);
@@ -404,7 +404,7 @@ static int __maybe_unused imx2_wdt_suspend(struct device *dev)
}
/* Enable watchdog and configure it if necessary */
-static int __maybe_unused imx2_wdt_resume(struct device *dev)
+static int imx2_wdt_resume(struct device *dev)
{
struct watchdog_device *wdog = dev_get_drvdata(dev);
struct imx2_wdt_device *wdev = watchdog_get_drvdata(wdog);
@@ -435,8 +435,8 @@ static int __maybe_unused imx2_wdt_resume(struct device *dev)
return 0;
}
-static SIMPLE_DEV_PM_OPS(imx2_wdt_pm_ops, imx2_wdt_suspend,
- imx2_wdt_resume);
+static DEFINE_SIMPLE_DEV_PM_OPS(imx2_wdt_pm_ops, imx2_wdt_suspend,
+ imx2_wdt_resume);
static struct imx2_wdt_data imx_wdt = {
.wdw_supported = true,
@@ -476,7 +476,7 @@ static struct platform_driver imx2_wdt_driver = {
.shutdown = imx2_wdt_shutdown,
.driver = {
.name = DRIVER_NAME,
- .pm = &imx2_wdt_pm_ops,
+ .pm = pm_sleep_ptr(&imx2_wdt_pm_ops),
.of_match_table = imx2_wdt_dt_ids,
},
};
diff --git a/drivers/watchdog/imx7ulp_wdt.c b/drivers/watchdog/imx7ulp_wdt.c
index 94914a22daff..0f13a3053357 100644
--- a/drivers/watchdog/imx7ulp_wdt.c
+++ b/drivers/watchdog/imx7ulp_wdt.c
@@ -55,6 +55,7 @@ MODULE_PARM_DESC(nowayout, "Watchdog cannot be stopped once started (default="
struct imx_wdt_hw_feature {
bool prescaler_enable;
+ bool post_rcs_wait;
u32 wdog_clock_rate;
};
@@ -62,7 +63,6 @@ struct imx7ulp_wdt_device {
struct watchdog_device wdd;
void __iomem *base;
struct clk *clk;
- bool post_rcs_wait;
bool ext_reset;
const struct imx_wdt_hw_feature *hw;
};
@@ -95,7 +95,7 @@ static int imx7ulp_wdt_wait_rcs(struct imx7ulp_wdt_device *wdt)
ret = -ETIMEDOUT;
/* Wait 2.5 clocks after RCS done */
- if (wdt->post_rcs_wait)
+ if (wdt->hw->post_rcs_wait)
usleep_range(wait_min, wait_min + 2000);
return ret;
@@ -334,15 +334,6 @@ static int imx7ulp_wdt_probe(struct platform_device *pdev)
/* The WDOG may need to do external reset through dedicated pin */
imx7ulp_wdt->ext_reset = of_property_read_bool(dev->of_node, "fsl,ext-reset-output");
- imx7ulp_wdt->post_rcs_wait = true;
- if (of_device_is_compatible(dev->of_node,
- "fsl,imx8ulp-wdt")) {
- dev_info(dev, "imx8ulp wdt probe\n");
- imx7ulp_wdt->post_rcs_wait = false;
- } else {
- dev_info(dev, "imx7ulp wdt probe\n");
- }
-
wdog = &imx7ulp_wdt->wdd;
wdog->info = &imx7ulp_wdt_info;
wdog->ops = &imx7ulp_wdt_ops;
@@ -403,6 +394,12 @@ static const struct dev_pm_ops imx7ulp_wdt_pm_ops = {
static const struct imx_wdt_hw_feature imx7ulp_wdt_hw = {
.prescaler_enable = false,
.wdog_clock_rate = 1000,
+ .post_rcs_wait = true,
+};
+
+static const struct imx_wdt_hw_feature imx8ulp_wdt_hw = {
+ .prescaler_enable = false,
+ .wdog_clock_rate = 1000,
};
static const struct imx_wdt_hw_feature imx93_wdt_hw = {
@@ -411,8 +408,8 @@ static const struct imx_wdt_hw_feature imx93_wdt_hw = {
};
static const struct of_device_id imx7ulp_wdt_dt_ids[] = {
- { .compatible = "fsl,imx8ulp-wdt", .data = &imx7ulp_wdt_hw, },
{ .compatible = "fsl,imx7ulp-wdt", .data = &imx7ulp_wdt_hw, },
+ { .compatible = "fsl,imx8ulp-wdt", .data = &imx8ulp_wdt_hw, },
{ .compatible = "fsl,imx93-wdt", .data = &imx93_wdt_hw, },
{ /* sentinel */ }
};
diff --git a/drivers/watchdog/imx_sc_wdt.c b/drivers/watchdog/imx_sc_wdt.c
index e51fe1b78518..1280b9b1ec2a 100644
--- a/drivers/watchdog/imx_sc_wdt.c
+++ b/drivers/watchdog/imx_sc_wdt.c
@@ -56,6 +56,25 @@ static int imx_sc_wdt_ping(struct watchdog_device *wdog)
return 0;
}
+static bool imx_sc_wdt_is_running(void)
+{
+ struct arm_smccc_res res;
+
+ arm_smccc_smc(IMX_SIP_TIMER, IMX_SIP_TIMER_START_WDOG,
+ 0, 0, 0, 0, 0, 0, &res);
+
+ /* Already enabled (SC_TIMER_ERR_BUSY)? */
+ if (res.a0 == SC_TIMER_ERR_BUSY)
+ return true;
+
+ /* Undo only if that was us who has (successfully) enabled the WDT */
+ if (!res.a0)
+ arm_smccc_smc(IMX_SIP_TIMER, IMX_SIP_TIMER_STOP_WDOG,
+ 0, 0, 0, 0, 0, 0, &res);
+
+ return false;
+}
+
static int imx_sc_wdt_start(struct watchdog_device *wdog)
{
struct arm_smccc_res res;
@@ -183,6 +202,9 @@ static int imx_sc_wdt_probe(struct platform_device *pdev)
if (ret)
return ret;
+ if (imx_sc_wdt_is_running())
+ set_bit(WDOG_HW_RUNNING, &wdog->status);
+
watchdog_stop_on_reboot(wdog);
watchdog_stop_on_unregister(wdog);
@@ -216,29 +238,6 @@ register_device:
return devm_watchdog_register_device(dev, wdog);
}
-static int __maybe_unused imx_sc_wdt_suspend(struct device *dev)
-{
- struct imx_sc_wdt_device *imx_sc_wdd = dev_get_drvdata(dev);
-
- if (watchdog_active(&imx_sc_wdd->wdd))
- imx_sc_wdt_stop(&imx_sc_wdd->wdd);
-
- return 0;
-}
-
-static int __maybe_unused imx_sc_wdt_resume(struct device *dev)
-{
- struct imx_sc_wdt_device *imx_sc_wdd = dev_get_drvdata(dev);
-
- if (watchdog_active(&imx_sc_wdd->wdd))
- imx_sc_wdt_start(&imx_sc_wdd->wdd);
-
- return 0;
-}
-
-static SIMPLE_DEV_PM_OPS(imx_sc_wdt_pm_ops,
- imx_sc_wdt_suspend, imx_sc_wdt_resume);
-
static const struct of_device_id imx_sc_wdt_dt_ids[] = {
{ .compatible = "fsl,imx-sc-wdt", },
{ /* sentinel */ }
@@ -250,7 +249,6 @@ static struct platform_driver imx_sc_wdt_driver = {
.driver = {
.name = "imx-sc-wdt",
.of_match_table = imx_sc_wdt_dt_ids,
- .pm = &imx_sc_wdt_pm_ops,
},
};
module_platform_driver(imx_sc_wdt_driver);
diff --git a/drivers/watchdog/indydog.c b/drivers/watchdog/indydog.c
index 9857bb74a723..d3092d261345 100644
--- a/drivers/watchdog/indydog.c
+++ b/drivers/watchdog/indydog.c
@@ -149,7 +149,6 @@ static int indydog_notify_sys(struct notifier_block *this,
static const struct file_operations indydog_fops = {
.owner = THIS_MODULE,
- .llseek = no_llseek,
.write = indydog_write,
.unlocked_ioctl = indydog_ioctl,
.compat_ioctl = compat_ptr_ioctl,
diff --git a/drivers/watchdog/it8712f_wdt.c b/drivers/watchdog/it8712f_wdt.c
index 3ce6a58bd81e..b776e6766c9d 100644
--- a/drivers/watchdog/it8712f_wdt.c
+++ b/drivers/watchdog/it8712f_wdt.c
@@ -341,7 +341,6 @@ static int it8712f_wdt_release(struct inode *inode, struct file *file)
static const struct file_operations it8712f_wdt_fops = {
.owner = THIS_MODULE,
- .llseek = no_llseek,
.write = it8712f_wdt_write,
.unlocked_ioctl = it8712f_wdt_ioctl,
.compat_ioctl = compat_ptr_ioctl,
diff --git a/drivers/watchdog/m54xx_wdt.c b/drivers/watchdog/m54xx_wdt.c
index 062ea3e6497e..26bd073bd375 100644
--- a/drivers/watchdog/m54xx_wdt.c
+++ b/drivers/watchdog/m54xx_wdt.c
@@ -179,7 +179,6 @@ static int m54xx_wdt_release(struct inode *inode, struct file *file)
static const struct file_operations m54xx_wdt_fops = {
.owner = THIS_MODULE,
- .llseek = no_llseek,
.write = m54xx_wdt_write,
.unlocked_ioctl = m54xx_wdt_ioctl,
.compat_ioctl = compat_ptr_ioctl,
diff --git a/drivers/watchdog/machzwd.c b/drivers/watchdog/machzwd.c
index 73f2221f6222..73d641486909 100644
--- a/drivers/watchdog/machzwd.c
+++ b/drivers/watchdog/machzwd.c
@@ -359,7 +359,6 @@ static int zf_notify_sys(struct notifier_block *this, unsigned long code,
static const struct file_operations zf_fops = {
.owner = THIS_MODULE,
- .llseek = no_llseek,
.write = zf_write,
.unlocked_ioctl = zf_ioctl,
.compat_ioctl = compat_ptr_ioctl,
diff --git a/drivers/watchdog/marvell_gti_wdt.c b/drivers/watchdog/marvell_gti_wdt.c
index 098bb141a521..298089d45ab8 100644
--- a/drivers/watchdog/marvell_gti_wdt.c
+++ b/drivers/watchdog/marvell_gti_wdt.c
@@ -285,8 +285,8 @@ static int gti_wdt_probe(struct platform_device *pdev)
}
wdog_dev = &priv->wdev;
- wdog_dev->info = &gti_wdt_ident,
- wdog_dev->ops = &gti_wdt_ops,
+ wdog_dev->info = &gti_wdt_ident;
+ wdog_dev->ops = &gti_wdt_ops;
wdog_dev->parent = dev;
/*
* Watchdog counter is 24 bit where lower 8 bits are zeros
diff --git a/drivers/watchdog/mixcomwd.c b/drivers/watchdog/mixcomwd.c
index d387bad377c4..70d9cf84c342 100644
--- a/drivers/watchdog/mixcomwd.c
+++ b/drivers/watchdog/mixcomwd.c
@@ -224,7 +224,6 @@ static long mixcomwd_ioctl(struct file *file,
static const struct file_operations mixcomwd_fops = {
.owner = THIS_MODULE,
- .llseek = no_llseek,
.write = mixcomwd_write,
.unlocked_ioctl = mixcomwd_ioctl,
.compat_ioctl = compat_ptr_ioctl,
diff --git a/drivers/watchdog/mtx-1_wdt.c b/drivers/watchdog/mtx-1_wdt.c
index 06756135033d..11f05024a181 100644
--- a/drivers/watchdog/mtx-1_wdt.c
+++ b/drivers/watchdog/mtx-1_wdt.c
@@ -177,7 +177,6 @@ static ssize_t mtx1_wdt_write(struct file *file, const char *buf,
static const struct file_operations mtx1_wdt_fops = {
.owner = THIS_MODULE,
- .llseek = no_llseek,
.unlocked_ioctl = mtx1_wdt_ioctl,
.compat_ioctl = compat_ptr_ioctl,
.open = mtx1_wdt_open,
diff --git a/drivers/watchdog/nv_tco.c b/drivers/watchdog/nv_tco.c
index ac4a9c16341d..f8eb1f65a59e 100644
--- a/drivers/watchdog/nv_tco.c
+++ b/drivers/watchdog/nv_tco.c
@@ -264,7 +264,6 @@ static long nv_tco_ioctl(struct file *file, unsigned int cmd,
static const struct file_operations nv_tco_fops = {
.owner = THIS_MODULE,
- .llseek = no_llseek,
.write = nv_tco_write,
.unlocked_ioctl = nv_tco_ioctl,
.compat_ioctl = compat_ptr_ioctl,
diff --git a/drivers/watchdog/pc87413_wdt.c b/drivers/watchdog/pc87413_wdt.c
index c7f745caf203..fbf835d112b8 100644
--- a/drivers/watchdog/pc87413_wdt.c
+++ b/drivers/watchdog/pc87413_wdt.c
@@ -470,7 +470,6 @@ static int pc87413_notify_sys(struct notifier_block *this,
static const struct file_operations pc87413_fops = {
.owner = THIS_MODULE,
- .llseek = no_llseek,
.write = pc87413_write,
.unlocked_ioctl = pc87413_ioctl,
.compat_ioctl = compat_ptr_ioctl,
diff --git a/drivers/watchdog/pcwd.c b/drivers/watchdog/pcwd.c
index a793b03a785d..1a4282235aac 100644
--- a/drivers/watchdog/pcwd.c
+++ b/drivers/watchdog/pcwd.c
@@ -749,7 +749,6 @@ static int pcwd_temp_close(struct inode *inode, struct file *file)
static const struct file_operations pcwd_fops = {
.owner = THIS_MODULE,
- .llseek = no_llseek,
.write = pcwd_write,
.unlocked_ioctl = pcwd_ioctl,
.compat_ioctl = compat_ptr_ioctl,
@@ -765,7 +764,6 @@ static struct miscdevice pcwd_miscdev = {
static const struct file_operations pcwd_temp_fops = {
.owner = THIS_MODULE,
- .llseek = no_llseek,
.read = pcwd_temp_read,
.open = pcwd_temp_open,
.release = pcwd_temp_close,
diff --git a/drivers/watchdog/pcwd_pci.c b/drivers/watchdog/pcwd_pci.c
index 54d86fcb1837..a489b426f2ba 100644
--- a/drivers/watchdog/pcwd_pci.c
+++ b/drivers/watchdog/pcwd_pci.c
@@ -643,7 +643,6 @@ static int pcipcwd_notify_sys(struct notifier_block *this, unsigned long code,
static const struct file_operations pcipcwd_fops = {
.owner = THIS_MODULE,
- .llseek = no_llseek,
.write = pcipcwd_write,
.unlocked_ioctl = pcipcwd_ioctl,
.compat_ioctl = compat_ptr_ioctl,
@@ -659,7 +658,6 @@ static struct miscdevice pcipcwd_miscdev = {
static const struct file_operations pcipcwd_temp_fops = {
.owner = THIS_MODULE,
- .llseek = no_llseek,
.read = pcipcwd_temp_read,
.open = pcipcwd_temp_open,
.release = pcipcwd_temp_release,
diff --git a/drivers/watchdog/pcwd_usb.c b/drivers/watchdog/pcwd_usb.c
index 8202f0a6b093..132699e2f247 100644
--- a/drivers/watchdog/pcwd_usb.c
+++ b/drivers/watchdog/pcwd_usb.c
@@ -549,7 +549,6 @@ static int usb_pcwd_notify_sys(struct notifier_block *this, unsigned long code,
static const struct file_operations usb_pcwd_fops = {
.owner = THIS_MODULE,
- .llseek = no_llseek,
.write = usb_pcwd_write,
.unlocked_ioctl = usb_pcwd_ioctl,
.compat_ioctl = compat_ptr_ioctl,
@@ -565,7 +564,6 @@ static struct miscdevice usb_pcwd_miscdev = {
static const struct file_operations usb_pcwd_temperature_fops = {
.owner = THIS_MODULE,
- .llseek = no_llseek,
.read = usb_pcwd_temperature_read,
.open = usb_pcwd_temperature_open,
.release = usb_pcwd_temperature_release,
diff --git a/drivers/watchdog/pika_wdt.c b/drivers/watchdog/pika_wdt.c
index 782b8c23d99c..393aa4b1bc13 100644
--- a/drivers/watchdog/pika_wdt.c
+++ b/drivers/watchdog/pika_wdt.c
@@ -209,7 +209,6 @@ static long pikawdt_ioctl(struct file *file,
static const struct file_operations pikawdt_fops = {
.owner = THIS_MODULE,
- .llseek = no_llseek,
.open = pikawdt_open,
.release = pikawdt_release,
.write = pikawdt_write,
diff --git a/drivers/watchdog/pm8916_wdt.c b/drivers/watchdog/pm8916_wdt.c
index f3fcbeb0852c..007ed139ab96 100644
--- a/drivers/watchdog/pm8916_wdt.c
+++ b/drivers/watchdog/pm8916_wdt.c
@@ -218,7 +218,7 @@ static int pm8916_wdt_probe(struct platform_device *pdev)
return err;
}
- wdt->wdev.ops = &pm8916_wdt_ops,
+ wdt->wdev.ops = &pm8916_wdt_ops;
wdt->wdev.parent = dev;
wdt->wdev.min_timeout = PM8916_WDT_MIN_TIMEOUT;
wdt->wdev.max_timeout = PM8916_WDT_MAX_TIMEOUT;
diff --git a/drivers/watchdog/rc32434_wdt.c b/drivers/watchdog/rc32434_wdt.c
index 417f9b75679c..efadbb9d7ce7 100644
--- a/drivers/watchdog/rc32434_wdt.c
+++ b/drivers/watchdog/rc32434_wdt.c
@@ -242,7 +242,6 @@ static long rc32434_wdt_ioctl(struct file *file, unsigned int cmd,
static const struct file_operations rc32434_wdt_fops = {
.owner = THIS_MODULE,
- .llseek = no_llseek,
.write = rc32434_wdt_write,
.unlocked_ioctl = rc32434_wdt_ioctl,
.compat_ioctl = compat_ptr_ioctl,
diff --git a/drivers/watchdog/rdc321x_wdt.c b/drivers/watchdog/rdc321x_wdt.c
index 6176f4343fc5..80490316a27f 100644
--- a/drivers/watchdog/rdc321x_wdt.c
+++ b/drivers/watchdog/rdc321x_wdt.c
@@ -197,7 +197,6 @@ static ssize_t rdc321x_wdt_write(struct file *file, const char __user *buf,
static const struct file_operations rdc321x_wdt_fops = {
.owner = THIS_MODULE,
- .llseek = no_llseek,
.unlocked_ioctl = rdc321x_wdt_ioctl,
.compat_ioctl = compat_ptr_ioctl,
.open = rdc321x_wdt_open,
diff --git a/drivers/watchdog/riowd.c b/drivers/watchdog/riowd.c
index b293792a292a..f47d90d01c19 100644
--- a/drivers/watchdog/riowd.c
+++ b/drivers/watchdog/riowd.c
@@ -160,7 +160,6 @@ static ssize_t riowd_write(struct file *file, const char __user *buf,
static const struct file_operations riowd_fops = {
.owner = THIS_MODULE,
- .llseek = no_llseek,
.unlocked_ioctl = riowd_ioctl,
.compat_ioctl = compat_ptr_ioctl,
.open = riowd_open,
diff --git a/drivers/watchdog/rzv2h_wdt.c b/drivers/watchdog/rzv2h_wdt.c
new file mode 100644
index 000000000000..1d1b17312747
--- /dev/null
+++ b/drivers/watchdog/rzv2h_wdt.c
@@ -0,0 +1,273 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Renesas RZ/V2H(P) WDT Watchdog Driver
+ *
+ * Copyright (C) 2024 Renesas Electronics Corporation.
+ */
+#include <linux/clk.h>
+#include <linux/delay.h>
+#include <linux/io.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+#include <linux/pm_runtime.h>
+#include <linux/reset.h>
+#include <linux/units.h>
+#include <linux/watchdog.h>
+
+#define WDTRR 0x00 /* WDT Refresh Register RW, 8 */
+#define WDTCR 0x02 /* WDT Control Register RW, 16 */
+#define WDTSR 0x04 /* WDT Status Register RW, 16 */
+#define WDTRCR 0x06 /* WDT Reset Control Register RW, 8 */
+
+#define WDTCR_TOPS_1024 0x00
+#define WDTCR_TOPS_16384 0x03
+
+#define WDTCR_CKS_CLK_1 0x00
+#define WDTCR_CKS_CLK_256 0x50
+
+#define WDTCR_RPES_0 0x300
+#define WDTCR_RPES_75 0x000
+
+#define WDTCR_RPSS_25 0x00
+#define WDTCR_RPSS_100 0x3000
+
+#define WDTRCR_RSTIRQS BIT(7)
+
+#define MAX_TIMEOUT_CYCLES 16384
+#define CLOCK_DIV_BY_256 256
+
+#define WDT_DEFAULT_TIMEOUT 60U
+
+static bool nowayout = WATCHDOG_NOWAYOUT;
+module_param(nowayout, bool, 0);
+MODULE_PARM_DESC(nowayout, "Watchdog cannot be stopped once started (default="
+ __MODULE_STRING(WATCHDOG_NOWAYOUT) ")");
+
+struct rzv2h_wdt_priv {
+ void __iomem *base;
+ struct clk *pclk;
+ struct clk *oscclk;
+ struct reset_control *rstc;
+ struct watchdog_device wdev;
+};
+
+static int rzv2h_wdt_ping(struct watchdog_device *wdev)
+{
+ struct rzv2h_wdt_priv *priv = watchdog_get_drvdata(wdev);
+
+ /*
+ * The down-counter is refreshed and starts counting operation on
+ * a write of the values 00h and FFh to the WDTRR register.
+ */
+ writeb(0x0, priv->base + WDTRR);
+ writeb(0xFF, priv->base + WDTRR);
+
+ return 0;
+}
+
+static void rzv2h_wdt_setup(struct watchdog_device *wdev, u16 wdtcr)
+{
+ struct rzv2h_wdt_priv *priv = watchdog_get_drvdata(wdev);
+
+ /* Configure the timeout, clock division ratio, and window start and end positions. */
+ writew(wdtcr, priv->base + WDTCR);
+
+ /* Enable interrupt output to the ICU. */
+ writeb(0, priv->base + WDTRCR);
+
+ /* Clear underflow flag and refresh error flag. */
+ writew(0, priv->base + WDTSR);
+}
+
+static int rzv2h_wdt_start(struct watchdog_device *wdev)
+{
+ struct rzv2h_wdt_priv *priv = watchdog_get_drvdata(wdev);
+ int ret;
+
+ ret = pm_runtime_resume_and_get(wdev->parent);
+ if (ret)
+ return ret;
+
+ ret = reset_control_deassert(priv->rstc);
+ if (ret) {
+ pm_runtime_put(wdev->parent);
+ return ret;
+ }
+
+ /* delay to handle clock halt after de-assert operation */
+ udelay(3);
+
+ /*
+ * WDTCR
+ * - CKS[7:4] - Clock Division Ratio Select - 0101b: oscclk/256
+ * - RPSS[13:12] - Window Start Position Select - 11b: 100%
+ * - RPES[9:8] - Window End Position Select - 11b: 0%
+ * - TOPS[1:0] - Timeout Period Select - 11b: 16384 cycles (3FFFh)
+ */
+ rzv2h_wdt_setup(wdev, WDTCR_CKS_CLK_256 | WDTCR_RPSS_100 |
+ WDTCR_RPES_0 | WDTCR_TOPS_16384);
+
+ /*
+ * Down counting starts after writing the sequence 00h -> FFh to the
+ * WDTRR register. Hence, call the ping operation after loading the counter.
+ */
+ rzv2h_wdt_ping(wdev);
+
+ return 0;
+}
+
+static int rzv2h_wdt_stop(struct watchdog_device *wdev)
+{
+ struct rzv2h_wdt_priv *priv = watchdog_get_drvdata(wdev);
+ int ret;
+
+ ret = reset_control_assert(priv->rstc);
+ if (ret)
+ return ret;
+
+ ret = pm_runtime_put(wdev->parent);
+ if (ret < 0)
+ return ret;
+
+ return 0;
+}
+
+static const struct watchdog_info rzv2h_wdt_ident = {
+ .options = WDIOF_MAGICCLOSE | WDIOF_KEEPALIVEPING | WDIOF_SETTIMEOUT,
+ .identity = "Renesas RZ/V2H WDT Watchdog",
+};
+
+static int rzv2h_wdt_restart(struct watchdog_device *wdev,
+ unsigned long action, void *data)
+{
+ struct rzv2h_wdt_priv *priv = watchdog_get_drvdata(wdev);
+ int ret;
+
+ if (!watchdog_active(wdev)) {
+ ret = clk_enable(priv->pclk);
+ if (ret)
+ return ret;
+
+ ret = clk_enable(priv->oscclk);
+ if (ret) {
+ clk_disable(priv->pclk);
+ return ret;
+ }
+
+ ret = reset_control_deassert(priv->rstc);
+ if (ret) {
+ clk_disable(priv->oscclk);
+ clk_disable(priv->pclk);
+ return ret;
+ }
+ } else {
+ /*
+ * Writing to the WDT Control Register (WDTCR) or WDT Reset
+ * Control Register (WDTRCR) is possible once between the
+ * release from the reset state and the first refresh operation.
+ * Therefore, issue a reset if the watchdog is active.
+ */
+ ret = reset_control_reset(priv->rstc);
+ if (ret)
+ return ret;
+ }
+
+ /* delay to handle clock halt after de-assert operation */
+ udelay(3);
+
+ /*
+ * WDTCR
+ * - CKS[7:4] - Clock Division Ratio Select - 0000b: oscclk/1
+ * - RPSS[13:12] - Window Start Position Select - 00b: 25%
+ * - RPES[9:8] - Window End Position Select - 00b: 75%
+ * - TOPS[1:0] - Timeout Period Select - 00b: 1024 cycles (03FFh)
+ */
+ rzv2h_wdt_setup(wdev, WDTCR_CKS_CLK_1 | WDTCR_RPSS_25 |
+ WDTCR_RPES_75 | WDTCR_TOPS_1024);
+
+ rzv2h_wdt_ping(wdev);
+
+ /* wait for underflow to trigger... */
+ udelay(5);
+
+ return 0;
+}
+
+static const struct watchdog_ops rzv2h_wdt_ops = {
+ .owner = THIS_MODULE,
+ .start = rzv2h_wdt_start,
+ .stop = rzv2h_wdt_stop,
+ .ping = rzv2h_wdt_ping,
+ .restart = rzv2h_wdt_restart,
+};
+
+static int rzv2h_wdt_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct rzv2h_wdt_priv *priv;
+ int ret;
+
+ priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+
+ priv->base = devm_platform_ioremap_resource(pdev, 0);
+ if (IS_ERR(priv->base))
+ return PTR_ERR(priv->base);
+
+ priv->pclk = devm_clk_get_prepared(&pdev->dev, "pclk");
+ if (IS_ERR(priv->pclk))
+ return dev_err_probe(&pdev->dev, PTR_ERR(priv->pclk), "no pclk");
+
+ priv->oscclk = devm_clk_get_prepared(&pdev->dev, "oscclk");
+ if (IS_ERR(priv->oscclk))
+ return dev_err_probe(&pdev->dev, PTR_ERR(priv->oscclk), "no oscclk");
+
+ priv->rstc = devm_reset_control_get_exclusive(&pdev->dev, NULL);
+ if (IS_ERR(priv->rstc))
+ return dev_err_probe(&pdev->dev, PTR_ERR(priv->rstc),
+ "failed to get cpg reset");
+
+ priv->wdev.max_hw_heartbeat_ms = (MILLI * MAX_TIMEOUT_CYCLES * CLOCK_DIV_BY_256) /
+ clk_get_rate(priv->oscclk);
+ dev_dbg(dev, "max hw timeout of %dms\n", priv->wdev.max_hw_heartbeat_ms);
+
+ ret = devm_pm_runtime_enable(&pdev->dev);
+ if (ret)
+ return ret;
+
+ priv->wdev.min_timeout = 1;
+ priv->wdev.timeout = WDT_DEFAULT_TIMEOUT;
+ priv->wdev.info = &rzv2h_wdt_ident;
+ priv->wdev.ops = &rzv2h_wdt_ops;
+ priv->wdev.parent = dev;
+ watchdog_set_drvdata(&priv->wdev, priv);
+ watchdog_set_nowayout(&priv->wdev, nowayout);
+ watchdog_stop_on_unregister(&priv->wdev);
+
+ ret = watchdog_init_timeout(&priv->wdev, 0, dev);
+ if (ret)
+ dev_warn(dev, "Specified timeout invalid, using default");
+
+ return devm_watchdog_register_device(&pdev->dev, &priv->wdev);
+}
+
+static const struct of_device_id rzv2h_wdt_ids[] = {
+ { .compatible = "renesas,r9a09g057-wdt", },
+ { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(of, rzv2h_wdt_ids);
+
+static struct platform_driver rzv2h_wdt_driver = {
+ .driver = {
+ .name = "rzv2h_wdt",
+ .of_match_table = rzv2h_wdt_ids,
+ },
+ .probe = rzv2h_wdt_probe,
+};
+module_platform_driver(rzv2h_wdt_driver);
+MODULE_AUTHOR("Lad Prabhakar <prabhakar.mahadev-lad.rj@bp.renesas.com>");
+MODULE_DESCRIPTION("Renesas RZ/V2H(P) WDT Watchdog Driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/watchdog/sa1100_wdt.c b/drivers/watchdog/sa1100_wdt.c
index 34a917221e31..6e91ee3fbfb5 100644
--- a/drivers/watchdog/sa1100_wdt.c
+++ b/drivers/watchdog/sa1100_wdt.c
@@ -164,7 +164,6 @@ static long sa1100dog_ioctl(struct file *file, unsigned int cmd,
static const struct file_operations sa1100dog_fops = {
.owner = THIS_MODULE,
- .llseek = no_llseek,
.write = sa1100dog_write,
.unlocked_ioctl = sa1100dog_ioctl,
.compat_ioctl = compat_ptr_ioctl,
diff --git a/drivers/watchdog/sb_wdog.c b/drivers/watchdog/sb_wdog.c
index 504be461f992..eaa68b54cf56 100644
--- a/drivers/watchdog/sb_wdog.c
+++ b/drivers/watchdog/sb_wdog.c
@@ -234,7 +234,6 @@ static int sbwdog_notify_sys(struct notifier_block *this, unsigned long code,
static const struct file_operations sbwdog_fops = {
.owner = THIS_MODULE,
- .llseek = no_llseek,
.write = sbwdog_write,
.unlocked_ioctl = sbwdog_ioctl,
.compat_ioctl = compat_ptr_ioctl,
diff --git a/drivers/watchdog/sbc60xxwdt.c b/drivers/watchdog/sbc60xxwdt.c
index 7b974802dfc7..e9bf12918ed8 100644
--- a/drivers/watchdog/sbc60xxwdt.c
+++ b/drivers/watchdog/sbc60xxwdt.c
@@ -275,7 +275,6 @@ static long fop_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
static const struct file_operations wdt_fops = {
.owner = THIS_MODULE,
- .llseek = no_llseek,
.write = fop_write,
.open = fop_open,
.release = fop_close,
diff --git a/drivers/watchdog/sbc7240_wdt.c b/drivers/watchdog/sbc7240_wdt.c
index d640b26e18a6..21a1f0b32070 100644
--- a/drivers/watchdog/sbc7240_wdt.c
+++ b/drivers/watchdog/sbc7240_wdt.c
@@ -205,7 +205,6 @@ static long fop_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
static const struct file_operations wdt_fops = {
.owner = THIS_MODULE,
- .llseek = no_llseek,
.write = fop_write,
.open = fop_open,
.release = fop_close,
diff --git a/drivers/watchdog/sbc8360.c b/drivers/watchdog/sbc8360.c
index 4f8b9912fc51..a9fd1615b4c3 100644
--- a/drivers/watchdog/sbc8360.c
+++ b/drivers/watchdog/sbc8360.c
@@ -301,7 +301,6 @@ static int sbc8360_notify_sys(struct notifier_block *this, unsigned long code,
static const struct file_operations sbc8360_fops = {
.owner = THIS_MODULE,
- .llseek = no_llseek,
.write = sbc8360_write,
.open = sbc8360_open,
.release = sbc8360_close,
diff --git a/drivers/watchdog/sbc_epx_c3.c b/drivers/watchdog/sbc_epx_c3.c
index 5e3a9ddb952e..1d291dc0a4a6 100644
--- a/drivers/watchdog/sbc_epx_c3.c
+++ b/drivers/watchdog/sbc_epx_c3.c
@@ -153,7 +153,6 @@ static int epx_c3_notify_sys(struct notifier_block *this, unsigned long code,
static const struct file_operations epx_c3_fops = {
.owner = THIS_MODULE,
- .llseek = no_llseek,
.write = epx_c3_write,
.unlocked_ioctl = epx_c3_ioctl,
.compat_ioctl = compat_ptr_ioctl,
diff --git a/drivers/watchdog/sbc_fitpc2_wdt.c b/drivers/watchdog/sbc_fitpc2_wdt.c
index b8eb8d5ca1af..ff9e44825423 100644
--- a/drivers/watchdog/sbc_fitpc2_wdt.c
+++ b/drivers/watchdog/sbc_fitpc2_wdt.c
@@ -181,7 +181,6 @@ static int fitpc2_wdt_release(struct inode *inode, struct file *file)
static const struct file_operations fitpc2_wdt_fops = {
.owner = THIS_MODULE,
- .llseek = no_llseek,
.write = fitpc2_wdt_write,
.unlocked_ioctl = fitpc2_wdt_ioctl,
.compat_ioctl = compat_ptr_ioctl,
diff --git a/drivers/watchdog/sc1200wdt.c b/drivers/watchdog/sc1200wdt.c
index f22ebe89fe13..76a58715f665 100644
--- a/drivers/watchdog/sc1200wdt.c
+++ b/drivers/watchdog/sc1200wdt.c
@@ -304,7 +304,6 @@ static struct notifier_block sc1200wdt_notifier = {
static const struct file_operations sc1200wdt_fops = {
.owner = THIS_MODULE,
- .llseek = no_llseek,
.write = sc1200wdt_write,
.unlocked_ioctl = sc1200wdt_ioctl,
.compat_ioctl = compat_ptr_ioctl,
diff --git a/drivers/watchdog/sc520_wdt.c b/drivers/watchdog/sc520_wdt.c
index ca65468f4b9c..e849e1af267b 100644
--- a/drivers/watchdog/sc520_wdt.c
+++ b/drivers/watchdog/sc520_wdt.c
@@ -331,7 +331,6 @@ static long fop_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
static const struct file_operations wdt_fops = {
.owner = THIS_MODULE,
- .llseek = no_llseek,
.write = fop_write,
.open = fop_open,
.release = fop_close,
diff --git a/drivers/watchdog/sch311x_wdt.c b/drivers/watchdog/sch311x_wdt.c
index 409d49880170..76053158d259 100644
--- a/drivers/watchdog/sch311x_wdt.c
+++ b/drivers/watchdog/sch311x_wdt.c
@@ -334,7 +334,6 @@ static int sch311x_wdt_close(struct inode *inode, struct file *file)
static const struct file_operations sch311x_wdt_fops = {
.owner = THIS_MODULE,
- .llseek = no_llseek,
.write = sch311x_wdt_write,
.unlocked_ioctl = sch311x_wdt_ioctl,
.compat_ioctl = compat_ptr_ioctl,
diff --git a/drivers/watchdog/scx200_wdt.c b/drivers/watchdog/scx200_wdt.c
index 7b5e18323f3f..4dd8549e3674 100644
--- a/drivers/watchdog/scx200_wdt.c
+++ b/drivers/watchdog/scx200_wdt.c
@@ -198,7 +198,6 @@ static long scx200_wdt_ioctl(struct file *file, unsigned int cmd,
static const struct file_operations scx200_wdt_fops = {
.owner = THIS_MODULE,
- .llseek = no_llseek,
.write = scx200_wdt_write,
.unlocked_ioctl = scx200_wdt_ioctl,
.compat_ioctl = compat_ptr_ioctl,
diff --git a/drivers/watchdog/smsc37b787_wdt.c b/drivers/watchdog/smsc37b787_wdt.c
index 7463df479d11..97ca500ec8a8 100644
--- a/drivers/watchdog/smsc37b787_wdt.c
+++ b/drivers/watchdog/smsc37b787_wdt.c
@@ -502,7 +502,6 @@ static int wb_smsc_wdt_notify_sys(struct notifier_block *this,
static const struct file_operations wb_smsc_wdt_fops = {
.owner = THIS_MODULE,
- .llseek = no_llseek,
.write = wb_smsc_wdt_write,
.unlocked_ioctl = wb_smsc_wdt_ioctl,
.compat_ioctl = compat_ptr_ioctl,
diff --git a/drivers/watchdog/ts72xx_wdt.c b/drivers/watchdog/ts72xx_wdt.c
index 3d57670befe1..ac709dc31a65 100644
--- a/drivers/watchdog/ts72xx_wdt.c
+++ b/drivers/watchdog/ts72xx_wdt.c
@@ -12,6 +12,7 @@
*/
#include <linux/platform_device.h>
+#include <linux/mod_devicetable.h>
#include <linux/module.h>
#include <linux/watchdog.h>
#include <linux/io.h>
@@ -160,10 +161,17 @@ static int ts72xx_wdt_probe(struct platform_device *pdev)
return 0;
}
+static const struct of_device_id ts72xx_wdt_of_ids[] = {
+ { .compatible = "technologic,ts7200-wdt" },
+ { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(of, ts72xx_wdt_of_ids);
+
static struct platform_driver ts72xx_wdt_driver = {
.probe = ts72xx_wdt_probe,
.driver = {
.name = "ts72xx-wdt",
+ .of_match_table = ts72xx_wdt_of_ids,
},
};
diff --git a/drivers/watchdog/w83877f_wdt.c b/drivers/watchdog/w83877f_wdt.c
index f2650863fd02..1937084c182c 100644
--- a/drivers/watchdog/w83877f_wdt.c
+++ b/drivers/watchdog/w83877f_wdt.c
@@ -299,7 +299,6 @@ static long fop_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
static const struct file_operations wdt_fops = {
.owner = THIS_MODULE,
- .llseek = no_llseek,
.write = fop_write,
.open = fop_open,
.release = fop_close,
diff --git a/drivers/watchdog/w83977f_wdt.c b/drivers/watchdog/w83977f_wdt.c
index 31bf21ceaf48..3776030fa7c6 100644
--- a/drivers/watchdog/w83977f_wdt.c
+++ b/drivers/watchdog/w83977f_wdt.c
@@ -443,7 +443,6 @@ static int wdt_notify_sys(struct notifier_block *this, unsigned long code,
static const struct file_operations wdt_fops = {
.owner = THIS_MODULE,
- .llseek = no_llseek,
.write = wdt_write,
.unlocked_ioctl = wdt_ioctl,
.compat_ioctl = compat_ptr_ioctl,
diff --git a/drivers/watchdog/wafer5823wdt.c b/drivers/watchdog/wafer5823wdt.c
index a8a1ed215e1e..291109349e73 100644
--- a/drivers/watchdog/wafer5823wdt.c
+++ b/drivers/watchdog/wafer5823wdt.c
@@ -227,7 +227,6 @@ static int wafwdt_notify_sys(struct notifier_block *this, unsigned long code,
static const struct file_operations wafwdt_fops = {
.owner = THIS_MODULE,
- .llseek = no_llseek,
.write = wafwdt_write,
.unlocked_ioctl = wafwdt_ioctl,
.compat_ioctl = compat_ptr_ioctl,
diff --git a/drivers/watchdog/wdrtas.c b/drivers/watchdog/wdrtas.c
index c00627825de8..d4fe0bc82211 100644
--- a/drivers/watchdog/wdrtas.c
+++ b/drivers/watchdog/wdrtas.c
@@ -469,7 +469,6 @@ static int wdrtas_reboot(struct notifier_block *this,
static const struct file_operations wdrtas_fops = {
.owner = THIS_MODULE,
- .llseek = no_llseek,
.write = wdrtas_write,
.unlocked_ioctl = wdrtas_ioctl,
.compat_ioctl = compat_ptr_ioctl,
@@ -485,7 +484,6 @@ static struct miscdevice wdrtas_miscdev = {
static const struct file_operations wdrtas_temp_fops = {
.owner = THIS_MODULE,
- .llseek = no_llseek,
.read = wdrtas_temp_read,
.open = wdrtas_temp_open,
.release = wdrtas_temp_close,
diff --git a/drivers/watchdog/wdt.c b/drivers/watchdog/wdt.c
index 183876156243..3980d60bacd8 100644
--- a/drivers/watchdog/wdt.c
+++ b/drivers/watchdog/wdt.c
@@ -520,7 +520,6 @@ static int wdt_notify_sys(struct notifier_block *this, unsigned long code,
static const struct file_operations wdt_fops = {
.owner = THIS_MODULE,
- .llseek = no_llseek,
.write = wdt_write,
.unlocked_ioctl = wdt_ioctl,
.compat_ioctl = compat_ptr_ioctl,
@@ -536,7 +535,6 @@ static struct miscdevice wdt_miscdev = {
static const struct file_operations wdt_temp_fops = {
.owner = THIS_MODULE,
- .llseek = no_llseek,
.read = wdt_temp_read,
.open = wdt_temp_open,
.release = wdt_temp_release,
diff --git a/drivers/watchdog/wdt285.c b/drivers/watchdog/wdt285.c
index 5b7be7a62d54..78681d9f7d53 100644
--- a/drivers/watchdog/wdt285.c
+++ b/drivers/watchdog/wdt285.c
@@ -178,7 +178,6 @@ static long watchdog_ioctl(struct file *file, unsigned int cmd,
static const struct file_operations watchdog_fops = {
.owner = THIS_MODULE,
- .llseek = no_llseek,
.write = watchdog_write,
.unlocked_ioctl = watchdog_ioctl,
.compat_ioctl = compat_ptr_ioctl,
diff --git a/drivers/watchdog/wdt977.c b/drivers/watchdog/wdt977.c
index c9b8e863f70f..4f449ac4dda4 100644
--- a/drivers/watchdog/wdt977.c
+++ b/drivers/watchdog/wdt977.c
@@ -419,7 +419,6 @@ static int wdt977_notify_sys(struct notifier_block *this, unsigned long code,
static const struct file_operations wdt977_fops = {
.owner = THIS_MODULE,
- .llseek = no_llseek,
.write = wdt977_write,
.unlocked_ioctl = wdt977_ioctl,
.compat_ioctl = compat_ptr_ioctl,
diff --git a/drivers/watchdog/wdt_pci.c b/drivers/watchdog/wdt_pci.c
index d5e56b601351..dc5f29560e9b 100644
--- a/drivers/watchdog/wdt_pci.c
+++ b/drivers/watchdog/wdt_pci.c
@@ -563,7 +563,6 @@ static int wdtpci_notify_sys(struct notifier_block *this, unsigned long code,
static const struct file_operations wdtpci_fops = {
.owner = THIS_MODULE,
- .llseek = no_llseek,
.write = wdtpci_write,
.unlocked_ioctl = wdtpci_ioctl,
.compat_ioctl = compat_ptr_ioctl,
@@ -579,7 +578,6 @@ static struct miscdevice wdtpci_miscdev = {
static const struct file_operations wdtpci_temp_fops = {
.owner = THIS_MODULE,
- .llseek = no_llseek,
.read = wdtpci_temp_read,
.open = wdtpci_temp_open,
.release = wdtpci_temp_release,
diff --git a/drivers/xen/Kconfig b/drivers/xen/Kconfig
index d5989871dd5d..62035fe16bb8 100644
--- a/drivers/xen/Kconfig
+++ b/drivers/xen/Kconfig
@@ -177,8 +177,8 @@ config XEN_GRANT_DMA_ALLOC
config SWIOTLB_XEN
def_bool y
+ depends on ARCH_HAS_DMA_OPS
depends on XEN_PV || ARM || ARM64
- select DMA_OPS
select SWIOTLB
config XEN_PCI_STUB
@@ -261,6 +261,7 @@ config XEN_SCSI_BACKEND
config XEN_PRIVCMD
tristate "Xen hypercall passthrough driver"
depends on XEN
+ imply CONFIG_XEN_PCIDEV_BACKEND
default m
help
The hypercall passthrough driver allows privileged user programs to
@@ -348,10 +349,10 @@ config XEN_GRANT_DMA_IOMMU
config XEN_GRANT_DMA_OPS
bool
- select DMA_OPS
config XEN_VIRTIO
bool "Xen virtio support"
+ depends on ARCH_HAS_DMA_OPS
depends on VIRTIO
select XEN_GRANT_DMA_OPS
select XEN_GRANT_DMA_IOMMU if OF
diff --git a/drivers/xen/acpi.c b/drivers/xen/acpi.c
index 6893c79fd2a1..9e2096524fbc 100644
--- a/drivers/xen/acpi.c
+++ b/drivers/xen/acpi.c
@@ -30,6 +30,7 @@
* IN THE SOFTWARE.
*/
+#include <linux/pci.h>
#include <xen/acpi.h>
#include <xen/interface/platform.h>
#include <asm/xen/hypercall.h>
@@ -75,3 +76,52 @@ int xen_acpi_notify_hypervisor_extended_sleep(u8 sleep_state,
return xen_acpi_notify_hypervisor_state(sleep_state, val_a,
val_b, true);
}
+
+struct acpi_prt_entry {
+ struct acpi_pci_id id;
+ u8 pin;
+ acpi_handle link;
+ u32 index;
+};
+
+int xen_acpi_get_gsi_info(struct pci_dev *dev,
+ int *gsi_out,
+ int *trigger_out,
+ int *polarity_out)
+{
+ int gsi;
+ u8 pin;
+ struct acpi_prt_entry *entry;
+ int trigger = ACPI_LEVEL_SENSITIVE;
+ int polarity = acpi_irq_model == ACPI_IRQ_MODEL_GIC ?
+ ACPI_ACTIVE_HIGH : ACPI_ACTIVE_LOW;
+
+ if (!dev || !gsi_out || !trigger_out || !polarity_out)
+ return -EINVAL;
+
+ pin = dev->pin;
+ if (!pin)
+ return -EINVAL;
+
+ entry = acpi_pci_irq_lookup(dev, pin);
+ if (entry) {
+ if (entry->link)
+ gsi = acpi_pci_link_allocate_irq(entry->link,
+ entry->index,
+ &trigger, &polarity,
+ NULL);
+ else
+ gsi = entry->index;
+ } else
+ gsi = -1;
+
+ if (gsi < 0)
+ return -EINVAL;
+
+ *gsi_out = gsi;
+ *trigger_out = trigger;
+ *polarity_out = polarity;
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(xen_acpi_get_gsi_info);
diff --git a/drivers/xen/evtchn.c b/drivers/xen/evtchn.c
index 9b7fcc7dbb38..7e4a13e632dc 100644
--- a/drivers/xen/evtchn.c
+++ b/drivers/xen/evtchn.c
@@ -694,7 +694,6 @@ static const struct file_operations evtchn_fops = {
.fasync = evtchn_fasync,
.open = evtchn_open,
.release = evtchn_release,
- .llseek = no_llseek,
};
static struct miscdevice evtchn_miscdev = {
diff --git a/drivers/xen/mcelog.c b/drivers/xen/mcelog.c
index e9ac3b8c4167..4f65b641c054 100644
--- a/drivers/xen/mcelog.c
+++ b/drivers/xen/mcelog.c
@@ -182,7 +182,6 @@ static const struct file_operations xen_mce_chrdev_ops = {
.read = xen_mce_chrdev_read,
.poll = xen_mce_chrdev_poll,
.unlocked_ioctl = xen_mce_chrdev_ioctl,
- .llseek = no_llseek,
};
static struct miscdevice xen_mce_chrdev_device = {
diff --git a/drivers/xen/pci.c b/drivers/xen/pci.c
index a2facd8f7e51..416f231809cb 100644
--- a/drivers/xen/pci.c
+++ b/drivers/xen/pci.c
@@ -173,6 +173,19 @@ static int xen_remove_device(struct device *dev)
return r;
}
+int xen_reset_device(const struct pci_dev *dev)
+{
+ struct pci_device_reset device = {
+ .dev.seg = pci_domain_nr(dev->bus),
+ .dev.bus = dev->bus->number,
+ .dev.devfn = dev->devfn,
+ .flags = PCI_DEVICE_RESET_FLR,
+ };
+
+ return HYPERVISOR_physdev_op(PHYSDEVOP_pci_device_reset, &device);
+}
+EXPORT_SYMBOL_GPL(xen_reset_device);
+
static int xen_pci_notifier(struct notifier_block *nb,
unsigned long action, void *data)
{
diff --git a/drivers/xen/privcmd.c b/drivers/xen/privcmd.c
index 9563650dfbaf..3273cb8c2a66 100644
--- a/drivers/xen/privcmd.c
+++ b/drivers/xen/privcmd.c
@@ -46,6 +46,9 @@
#include <xen/page.h>
#include <xen/xen-ops.h>
#include <xen/balloon.h>
+#ifdef CONFIG_XEN_ACPI
+#include <xen/acpi.h>
+#endif
#include "privcmd.h"
@@ -844,6 +847,31 @@ out:
return rc;
}
+static long privcmd_ioctl_pcidev_get_gsi(struct file *file, void __user *udata)
+{
+#if defined(CONFIG_XEN_ACPI)
+ int rc = -EINVAL;
+ struct privcmd_pcidev_get_gsi kdata;
+
+ if (copy_from_user(&kdata, udata, sizeof(kdata)))
+ return -EFAULT;
+
+ if (IS_REACHABLE(CONFIG_XEN_PCIDEV_BACKEND))
+ rc = pcistub_get_gsi_from_sbdf(kdata.sbdf);
+
+ if (rc < 0)
+ return rc;
+
+ kdata.gsi = rc;
+ if (copy_to_user(udata, &kdata, sizeof(kdata)))
+ return -EFAULT;
+
+ return 0;
+#else
+ return -EINVAL;
+#endif
+}
+
#ifdef CONFIG_XEN_PRIVCMD_EVENTFD
/* Irqfd support */
static struct workqueue_struct *irqfd_cleanup_wq;
@@ -959,12 +987,12 @@ static int privcmd_irqfd_assign(struct privcmd_irqfd *irqfd)
INIT_WORK(&kirqfd->shutdown, irqfd_shutdown);
f = fdget(irqfd->fd);
- if (!f.file) {
+ if (!fd_file(f)) {
ret = -EBADF;
goto error_kfree;
}
- kirqfd->eventfd = eventfd_ctx_fileget(f.file);
+ kirqfd->eventfd = eventfd_ctx_fileget(fd_file(f));
if (IS_ERR(kirqfd->eventfd)) {
ret = PTR_ERR(kirqfd->eventfd);
goto error_fd_put;
@@ -995,7 +1023,7 @@ static int privcmd_irqfd_assign(struct privcmd_irqfd *irqfd)
* Check if there was an event already pending on the eventfd before we
* registered, and trigger it as if we didn't miss it.
*/
- events = vfs_poll(f.file, &kirqfd->pt);
+ events = vfs_poll(fd_file(f), &kirqfd->pt);
if (events & EPOLLIN)
irqfd_inject(kirqfd);
@@ -1345,12 +1373,12 @@ static int privcmd_ioeventfd_assign(struct privcmd_ioeventfd *ioeventfd)
return -ENOMEM;
f = fdget(ioeventfd->event_fd);
- if (!f.file) {
+ if (!fd_file(f)) {
ret = -EBADF;
goto error_kfree;
}
- kioeventfd->eventfd = eventfd_ctx_fileget(f.file);
+ kioeventfd->eventfd = eventfd_ctx_fileget(fd_file(f));
fdput(f);
if (IS_ERR(kioeventfd->eventfd)) {
@@ -1543,6 +1571,10 @@ static long privcmd_ioctl(struct file *file,
ret = privcmd_ioctl_ioeventfd(file, udata);
break;
+ case IOCTL_PRIVCMD_PCIDEV_GET_GSI:
+ ret = privcmd_ioctl_pcidev_get_gsi(file, udata);
+ break;
+
default:
break;
}
diff --git a/drivers/xen/xen-pciback/conf_space_capability.c b/drivers/xen/xen-pciback/conf_space_capability.c
index 1948a9700c8f..cf568e899ee2 100644
--- a/drivers/xen/xen-pciback/conf_space_capability.c
+++ b/drivers/xen/xen-pciback/conf_space_capability.c
@@ -122,7 +122,7 @@ static int pm_ctrl_write(struct pci_dev *dev, int offset, u16 new_value,
if (err)
goto out;
- new_state = (pci_power_t)(new_value & PCI_PM_CTRL_STATE_MASK);
+ new_state = (__force pci_power_t)(new_value & PCI_PM_CTRL_STATE_MASK);
new_value &= PM_OK_BITS;
if ((old_value & PM_OK_BITS) != new_value) {
diff --git a/drivers/xen/xen-pciback/pci_stub.c b/drivers/xen/xen-pciback/pci_stub.c
index 4faebbb84999..2f3da5ac62cd 100644
--- a/drivers/xen/xen-pciback/pci_stub.c
+++ b/drivers/xen/xen-pciback/pci_stub.c
@@ -21,6 +21,9 @@
#include <xen/events.h>
#include <xen/pci.h>
#include <xen/xen.h>
+#ifdef CONFIG_XEN_ACPI
+#include <xen/acpi.h>
+#endif
#include <asm/xen/hypervisor.h>
#include <xen/interface/physdev.h>
#include "pciback.h"
@@ -53,6 +56,9 @@ struct pcistub_device {
struct pci_dev *dev;
struct xen_pcibk_device *pdev;/* non-NULL if struct pci_dev is in use */
+#ifdef CONFIG_XEN_ACPI
+ int gsi;
+#endif
};
/* Access to pcistub_devices & seized_devices lists and the initialize_devices
@@ -85,10 +91,23 @@ static struct pcistub_device *pcistub_device_alloc(struct pci_dev *dev)
kref_init(&psdev->kref);
spin_lock_init(&psdev->lock);
+#ifdef CONFIG_XEN_ACPI
+ psdev->gsi = -1;
+#endif
return psdev;
}
+static int pcistub_reset_device_state(struct pci_dev *dev)
+{
+ __pci_reset_function_locked(dev);
+
+ if (!xen_pv_domain())
+ return xen_reset_device(dev);
+ else
+ return 0;
+}
+
/* Don't call this directly as it's called by pcistub_device_put */
static void pcistub_device_release(struct kref *kref)
{
@@ -107,7 +126,7 @@ static void pcistub_device_release(struct kref *kref)
/* Call the reset function which does not take lock as this
* is called from "unbind" which takes a device_lock mutex.
*/
- __pci_reset_function_locked(dev);
+ pcistub_reset_device_state(dev);
if (dev_data &&
pci_load_and_free_saved_state(dev, &dev_data->pci_saved_state))
dev_info(&dev->dev, "Could not reload PCI state\n");
@@ -207,6 +226,25 @@ static struct pci_dev *pcistub_device_get_pci_dev(struct xen_pcibk_device *pdev,
return pci_dev;
}
+#ifdef CONFIG_XEN_ACPI
+int pcistub_get_gsi_from_sbdf(unsigned int sbdf)
+{
+ struct pcistub_device *psdev;
+ int domain = (sbdf >> 16) & 0xffff;
+ int bus = PCI_BUS_NUM(sbdf);
+ int slot = PCI_SLOT(sbdf);
+ int func = PCI_FUNC(sbdf);
+
+ psdev = pcistub_device_find(domain, bus, slot, func);
+
+ if (!psdev)
+ return -ENODEV;
+
+ return psdev->gsi;
+}
+EXPORT_SYMBOL_GPL(pcistub_get_gsi_from_sbdf);
+#endif
+
struct pci_dev *pcistub_get_pci_dev_by_slot(struct xen_pcibk_device *pdev,
int domain, int bus,
int slot, int func)
@@ -284,7 +322,7 @@ void pcistub_put_pci_dev(struct pci_dev *dev)
* (so it's ready for the next domain)
*/
device_lock_assert(&dev->dev);
- __pci_reset_function_locked(dev);
+ pcistub_reset_device_state(dev);
dev_data = pci_get_drvdata(dev);
ret = pci_load_saved_state(dev, dev_data->pci_saved_state);
@@ -354,11 +392,20 @@ static int pcistub_match(struct pci_dev *dev)
return found;
}
-static int pcistub_init_device(struct pci_dev *dev)
+static int pcistub_init_device(struct pcistub_device *psdev)
{
struct xen_pcibk_dev_data *dev_data;
+ struct pci_dev *dev;
+#ifdef CONFIG_XEN_ACPI
+ int gsi, trigger, polarity;
+#endif
int err = 0;
+ if (!psdev)
+ return -EINVAL;
+
+ dev = psdev->dev;
+
dev_dbg(&dev->dev, "initializing...\n");
/* The PCI backend is not intended to be a module (or to work with
@@ -420,9 +467,26 @@ static int pcistub_init_device(struct pci_dev *dev)
dev_err(&dev->dev, "Could not store PCI conf saved state!\n");
else {
dev_dbg(&dev->dev, "resetting (FLR, D3, etc) the device\n");
- __pci_reset_function_locked(dev);
+ err = pcistub_reset_device_state(dev);
+ if (err)
+ goto config_release;
pci_restore_state(dev);
}
+
+#ifdef CONFIG_XEN_ACPI
+ if (xen_initial_domain() && xen_pvh_domain()) {
+ err = xen_acpi_get_gsi_info(dev, &gsi, &trigger, &polarity);
+ if (err) {
+ dev_err(&dev->dev, "Fail to get gsi info!\n");
+ goto config_release;
+ }
+ err = xen_pvh_setup_gsi(gsi, trigger, polarity);
+ if (err)
+ goto config_release;
+ psdev->gsi = gsi;
+ }
+#endif
+
/* Now disable the device (this also ensures some private device
* data is setup before we export)
*/
@@ -462,7 +526,7 @@ static int __init pcistub_init_devices_late(void)
spin_unlock_irqrestore(&pcistub_devices_lock, flags);
- err = pcistub_init_device(psdev->dev);
+ err = pcistub_init_device(psdev);
if (err) {
dev_err(&psdev->dev->dev,
"error %d initializing device\n", err);
@@ -532,7 +596,7 @@ static int pcistub_seize(struct pci_dev *dev,
spin_unlock_irqrestore(&pcistub_devices_lock, flags);
/* don't want irqs disabled when calling pcistub_init_device */
- err = pcistub_init_device(psdev->dev);
+ err = pcistub_init_device(psdev);
spin_lock_irqsave(&pcistub_devices_lock, flags);
@@ -757,7 +821,7 @@ static pci_ers_result_t common_process(struct pcistub_device *psdev,
}
clear_bit(_PCIB_op_pending, (unsigned long *)&pdev->flags);
- res = (pci_ers_result_t)aer_op->err;
+ res = (__force pci_ers_result_t)aer_op->err;
return res;
}
diff --git a/drivers/xen/xenbus/xenbus_dev_frontend.c b/drivers/xen/xenbus/xenbus_dev_frontend.c
index 6f56640092a9..46f8916597e5 100644
--- a/drivers/xen/xenbus/xenbus_dev_frontend.c
+++ b/drivers/xen/xenbus/xenbus_dev_frontend.c
@@ -700,7 +700,6 @@ const struct file_operations xen_xenbus_fops = {
.open = xenbus_file_open,
.release = xenbus_file_release,
.poll = xenbus_file_poll,
- .llseek = no_llseek,
};
EXPORT_SYMBOL_GPL(xen_xenbus_fops);
diff --git a/fs/Kconfig b/fs/Kconfig
index a46b0cbc4d8f..949895cff872 100644
--- a/fs/Kconfig
+++ b/fs/Kconfig
@@ -288,6 +288,10 @@ config HUGETLB_PAGE_OPTIMIZE_VMEMMAP
depends on ARCH_WANT_OPTIMIZE_HUGETLB_VMEMMAP
depends on SPARSEMEM_VMEMMAP
+config HUGETLB_PMD_PAGE_TABLE_SHARING
+ def_bool HUGETLB_PAGE
+ depends on ARCH_WANT_HUGE_PMD_SHARE && SPLIT_PMD_PTLOCKS
+
config ARCH_HAS_GIGANTIC_PAGE
bool
@@ -382,6 +386,29 @@ config NFS_COMMON
depends on NFSD || NFS_FS || LOCKD
default y
+config NFS_COMMON_LOCALIO_SUPPORT
+ tristate
+ default n
+ default y if NFSD=y || NFS_FS=y
+ default m if NFSD=m && NFS_FS=m
+ select SUNRPC
+
+config NFS_LOCALIO
+ bool "NFS client and server support for LOCALIO auxiliary protocol"
+ depends on NFSD && NFS_FS
+ select NFS_COMMON_LOCALIO_SUPPORT
+ default n
+ help
+ Some NFS servers support an auxiliary NFS LOCALIO protocol
+ that is not an official part of the NFS protocol.
+
+ This option enables support for the LOCALIO protocol in the
+ kernel's NFS server and client. Enable this to permit local
+ NFS clients to bypass the network when issuing reads and
+ writes to the local NFS server.
+
+ If unsure, say N.
+
config NFS_V4_2_SSC_HELPER
bool
default y if NFS_V4_2
diff --git a/fs/Makefile b/fs/Makefile
index 6ecc9b0a53f2..61679fd587b7 100644
--- a/fs/Makefile
+++ b/fs/Makefile
@@ -129,3 +129,4 @@ obj-$(CONFIG_EFIVAR_FS) += efivarfs/
obj-$(CONFIG_EROFS_FS) += erofs/
obj-$(CONFIG_VBOXSF_FS) += vboxsf/
obj-$(CONFIG_ZONEFS_FS) += zonefs/
+obj-$(CONFIG_BPF_LSM) += bpf_fs_kfuncs.o
diff --git a/fs/afs/afs_vl.h b/fs/afs/afs_vl.h
index 9c65ffb8a523..a06296c8827d 100644
--- a/fs/afs/afs_vl.h
+++ b/fs/afs/afs_vl.h
@@ -134,13 +134,4 @@ struct afs_uvldbentry__xdr {
__be32 spares9;
};
-struct afs_address_list {
- refcount_t usage;
- unsigned int version;
- unsigned int nr_addrs;
- struct sockaddr_rxrpc addrs[];
-};
-
-extern void afs_put_address_list(struct afs_address_list *alist);
-
#endif /* AFS_VL_H */
diff --git a/fs/afs/file.c b/fs/afs/file.c
index 492d857a3fa0..6762eff97517 100644
--- a/fs/afs/file.c
+++ b/fs/afs/file.c
@@ -420,6 +420,7 @@ const struct netfs_request_ops afs_req_ops = {
.begin_writeback = afs_begin_writeback,
.prepare_write = afs_prepare_write,
.issue_write = afs_issue_write,
+ .retry_request = afs_retry_request,
};
static void afs_add_open_mmap(struct afs_vnode *vnode)
diff --git a/fs/afs/fs_operation.c b/fs/afs/fs_operation.c
index 3546b087e791..428721bbe4f6 100644
--- a/fs/afs/fs_operation.c
+++ b/fs/afs/fs_operation.c
@@ -201,7 +201,7 @@ void afs_wait_for_operation(struct afs_operation *op)
}
}
- if (op->call_responded)
+ if (op->call_responded && op->server)
set_bit(AFS_SERVER_FL_RESPONDING, &op->server->flags);
if (!afs_op_error(op)) {
diff --git a/fs/afs/fs_probe.c b/fs/afs/fs_probe.c
index 580de4adaaf6..b516d05b0fef 100644
--- a/fs/afs/fs_probe.c
+++ b/fs/afs/fs_probe.c
@@ -506,10 +506,10 @@ int afs_wait_for_one_fs_probe(struct afs_server *server, struct afs_endpoint_sta
finish_wait(&server->probe_wq, &wait);
dont_wait:
- if (estate->responsive_set & ~exclude)
- return 1;
if (test_bit(AFS_ESTATE_SUPERSEDED, &estate->flags))
return 0;
+ if (estate->responsive_set & ~exclude)
+ return 1;
if (is_intr && signal_pending(current))
return -ERESTARTSYS;
if (timo == 0)
diff --git a/fs/afs/rotate.c b/fs/afs/rotate.c
index ed09d4d4c211..d612983d6f38 100644
--- a/fs/afs/rotate.c
+++ b/fs/afs/rotate.c
@@ -632,8 +632,10 @@ iterate_address:
wait_for_more_probe_results:
error = afs_wait_for_one_fs_probe(op->server, op->estate, op->addr_tried,
!(op->flags & AFS_OPERATION_UNINTR));
- if (!error)
+ if (error == 1)
goto iterate_address;
+ if (!error)
+ goto restart_from_beginning;
/* We've now had a failure to respond on all of a server's addresses -
* immediately probe them again and consider retrying the server.
@@ -644,10 +646,13 @@ wait_for_more_probe_results:
error = afs_wait_for_one_fs_probe(op->server, op->estate, op->addr_tried,
!(op->flags & AFS_OPERATION_UNINTR));
switch (error) {
- case 0:
+ case 1:
op->flags &= ~AFS_OPERATION_RETRY_SERVER;
- trace_afs_rotate(op, afs_rotate_trace_retry_server, 0);
+ trace_afs_rotate(op, afs_rotate_trace_retry_server, 1);
goto retry_server;
+ case 0:
+ trace_afs_rotate(op, afs_rotate_trace_retry_server, 0);
+ goto restart_from_beginning;
case -ERESTARTSYS:
afs_op_set_error(op, error);
goto failed;
diff --git a/fs/bcachefs/Kconfig b/fs/bcachefs/Kconfig
index 5cdfef3b551a..5bac803ea367 100644
--- a/fs/bcachefs/Kconfig
+++ b/fs/bcachefs/Kconfig
@@ -87,6 +87,13 @@ config BCACHEFS_SIX_OPTIMISTIC_SPIN
is held by another thread, spin for a short while, as long as the
thread owning the lock is running.
+config BCACHEFS_PATH_TRACEPOINTS
+ bool "Extra btree_path tracepoints"
+ depends on BCACHEFS_FS
+ help
+ Enable extra tracepoints for debugging btree_path operations; we don't
+ normally want these enabled because they happen at very high rates.
+
config MEAN_AND_VARIANCE_UNIT_TEST
tristate "mean_and_variance unit tests" if !KUNIT_ALL_TESTS
depends on KUNIT
diff --git a/fs/bcachefs/Makefile b/fs/bcachefs/Makefile
index 0ab533a2b03b..56d20e219f59 100644
--- a/fs/bcachefs/Makefile
+++ b/fs/bcachefs/Makefile
@@ -69,6 +69,7 @@ bcachefs-y := \
printbuf.o \
quota.o \
rebalance.o \
+ rcu_pending.o \
recovery.o \
recovery_passes.o \
reflink.o \
diff --git a/fs/bcachefs/acl.c b/fs/bcachefs/acl.c
index 331a17f3f113..87f1be9d4db4 100644
--- a/fs/bcachefs/acl.c
+++ b/fs/bcachefs/acl.c
@@ -361,7 +361,7 @@ retry:
bch2_trans_begin(trans);
acl = _acl;
- ret = bch2_subvol_is_ro_trans(trans, inode->ei_subvol) ?:
+ ret = bch2_subvol_is_ro_trans(trans, inode->ei_inum.subvol) ?:
bch2_inode_peek(trans, &inode_iter, &inode_u, inode_inum(inode),
BTREE_ITER_intent);
if (ret)
diff --git a/fs/bcachefs/alloc_background.c b/fs/bcachefs/alloc_background.c
index dc3a4024aab6..645b5ed4babb 100644
--- a/fs/bcachefs/alloc_background.c
+++ b/fs/bcachefs/alloc_background.c
@@ -30,6 +30,7 @@
#include <linux/rcupdate.h>
#include <linux/sched/task.h>
#include <linux/sort.h>
+#include <linux/jiffies.h>
static void bch2_discard_one_bucket_fast(struct bch_dev *, u64);
@@ -2183,7 +2184,7 @@ int bch2_dev_freespace_init(struct bch_fs *c, struct bch_dev *ca,
* freespace/need_discard/need_gc_gens btrees as needed:
*/
while (1) {
- if (last_updated + HZ * 10 < jiffies) {
+ if (time_after(jiffies, last_updated + HZ * 10)) {
bch_info(ca, "%s: currently at %llu/%llu",
__func__, iter.pos.offset, ca->mi.nbuckets);
last_updated = jiffies;
@@ -2297,6 +2298,36 @@ int bch2_fs_freespace_init(struct bch_fs *c)
return 0;
}
+/* device removal */
+
+int bch2_dev_remove_alloc(struct bch_fs *c, struct bch_dev *ca)
+{
+ struct bpos start = POS(ca->dev_idx, 0);
+ struct bpos end = POS(ca->dev_idx, U64_MAX);
+ int ret;
+
+ /*
+ * We clear the LRU and need_discard btrees first so that we don't race
+ * with bch2_do_invalidates() and bch2_do_discards()
+ */
+ ret = bch2_dev_remove_stripes(c, ca->dev_idx) ?:
+ bch2_btree_delete_range(c, BTREE_ID_lru, start, end,
+ BTREE_TRIGGER_norun, NULL) ?:
+ bch2_btree_delete_range(c, BTREE_ID_need_discard, start, end,
+ BTREE_TRIGGER_norun, NULL) ?:
+ bch2_btree_delete_range(c, BTREE_ID_freespace, start, end,
+ BTREE_TRIGGER_norun, NULL) ?:
+ bch2_btree_delete_range(c, BTREE_ID_backpointers, start, end,
+ BTREE_TRIGGER_norun, NULL) ?:
+ bch2_btree_delete_range(c, BTREE_ID_bucket_gens, start, end,
+ BTREE_TRIGGER_norun, NULL) ?:
+ bch2_btree_delete_range(c, BTREE_ID_alloc, start, end,
+ BTREE_TRIGGER_norun, NULL) ?:
+ bch2_dev_usage_remove(c, ca->dev_idx);
+ bch_err_msg(ca, ret, "removing dev alloc info");
+ return ret;
+}
+
/* Bucket IO clocks: */
int bch2_bucket_io_time_reset(struct btree_trans *trans, unsigned dev,
@@ -2432,13 +2463,15 @@ static bool bch2_dev_has_open_write_point(struct bch_fs *c, struct bch_dev *ca)
/* device goes ro: */
void bch2_dev_allocator_remove(struct bch_fs *c, struct bch_dev *ca)
{
- unsigned i;
+ lockdep_assert_held(&c->state_lock);
/* First, remove device from allocation groups: */
- for (i = 0; i < ARRAY_SIZE(c->rw_devs); i++)
+ for (unsigned i = 0; i < ARRAY_SIZE(c->rw_devs); i++)
clear_bit(ca->dev_idx, c->rw_devs[i].d);
+ c->rw_devs_change_count++;
+
/*
* Capacity is calculated based off of devices in allocation groups:
*/
@@ -2467,11 +2500,13 @@ void bch2_dev_allocator_remove(struct bch_fs *c, struct bch_dev *ca)
/* device goes rw: */
void bch2_dev_allocator_add(struct bch_fs *c, struct bch_dev *ca)
{
- unsigned i;
+ lockdep_assert_held(&c->state_lock);
- for (i = 0; i < ARRAY_SIZE(c->rw_devs); i++)
+ for (unsigned i = 0; i < ARRAY_SIZE(c->rw_devs); i++)
if (ca->mi.data_allowed & (1 << i))
set_bit(ca->dev_idx, c->rw_devs[i].d);
+
+ c->rw_devs_change_count++;
}
void bch2_dev_allocator_background_exit(struct bch_dev *ca)
diff --git a/fs/bcachefs/alloc_background.h b/fs/bcachefs/alloc_background.h
index fd790b03fbe1..f8e87c6721b1 100644
--- a/fs/bcachefs/alloc_background.h
+++ b/fs/bcachefs/alloc_background.h
@@ -16,7 +16,7 @@ enum bch_validate_flags;
static inline bool bch2_dev_bucket_exists(struct bch_fs *c, struct bpos pos)
{
rcu_read_lock();
- struct bch_dev *ca = bch2_dev_rcu(c, pos.inode);
+ struct bch_dev *ca = bch2_dev_rcu_noerror(c, pos.inode);
bool ret = ca && bucket_valid(ca, pos.offset);
rcu_read_unlock();
return ret;
@@ -338,6 +338,7 @@ static inline const struct bch_backpointer *alloc_v4_backpointers_c(const struct
int bch2_dev_freespace_init(struct bch_fs *, struct bch_dev *, u64, u64);
int bch2_fs_freespace_init(struct bch_fs *);
+int bch2_dev_remove_alloc(struct bch_fs *, struct bch_dev *);
void bch2_recalc_capacity(struct bch_fs *);
u64 bch2_min_rw_member_capacity(struct bch_fs *);
diff --git a/fs/bcachefs/alloc_foreground.c b/fs/bcachefs/alloc_foreground.c
index 8563c2d26847..d0e0b56892e3 100644
--- a/fs/bcachefs/alloc_foreground.c
+++ b/fs/bcachefs/alloc_foreground.c
@@ -600,6 +600,7 @@ static struct open_bucket *bch2_bucket_alloc_trans(struct btree_trans *trans,
enum bch_watermark watermark,
enum bch_data_type data_type,
struct closure *cl,
+ bool nowait,
struct bch_dev_usage *usage)
{
struct bch_fs *c = trans->c;
@@ -609,7 +610,7 @@ static struct open_bucket *bch2_bucket_alloc_trans(struct btree_trans *trans,
struct bucket_alloc_state s = {
.btree_bitmap = data_type == BCH_DATA_btree,
};
- bool waiting = false;
+ bool waiting = nowait;
again:
bch2_dev_usage_read_fast(ca, usage);
avail = dev_buckets_free(ca, *usage, watermark);
@@ -685,7 +686,7 @@ struct open_bucket *bch2_bucket_alloc(struct bch_fs *c, struct bch_dev *ca,
bch2_trans_do(c, NULL, NULL, 0,
PTR_ERR_OR_ZERO(ob = bch2_bucket_alloc_trans(trans, ca, watermark,
- data_type, cl, &usage)));
+ data_type, cl, false, &usage)));
return ob;
}
@@ -748,7 +749,6 @@ static int add_new_bucket(struct bch_fs *c,
unsigned nr_replicas,
unsigned *nr_effective,
bool *have_cache,
- unsigned flags,
struct open_bucket *ob)
{
unsigned durability = ob_dev(c, ob)->mi.durability;
@@ -775,7 +775,7 @@ int bch2_bucket_alloc_set_trans(struct btree_trans *trans,
unsigned nr_replicas,
unsigned *nr_effective,
bool *have_cache,
- unsigned flags,
+ enum bch_write_flags flags,
enum bch_data_type data_type,
enum bch_watermark watermark,
struct closure *cl)
@@ -801,7 +801,8 @@ int bch2_bucket_alloc_set_trans(struct btree_trans *trans,
continue;
}
- ob = bch2_bucket_alloc_trans(trans, ca, watermark, data_type, cl, &usage);
+ ob = bch2_bucket_alloc_trans(trans, ca, watermark, data_type,
+ cl, flags & BCH_WRITE_ALLOC_NOWAIT, &usage);
if (!IS_ERR(ob))
bch2_dev_stripe_increment_inlined(ca, stripe, &usage);
bch2_dev_put(ca);
@@ -815,7 +816,7 @@ int bch2_bucket_alloc_set_trans(struct btree_trans *trans,
if (add_new_bucket(c, ptrs, devs_may_alloc,
nr_replicas, nr_effective,
- have_cache, flags, ob)) {
+ have_cache, ob)) {
ret = 0;
break;
}
@@ -841,7 +842,7 @@ static int bucket_alloc_from_stripe(struct btree_trans *trans,
unsigned *nr_effective,
bool *have_cache,
enum bch_watermark watermark,
- unsigned flags,
+ enum bch_write_flags flags,
struct closure *cl)
{
struct bch_fs *c = trans->c;
@@ -883,7 +884,7 @@ got_bucket:
ret = add_new_bucket(c, ptrs, devs_may_alloc,
nr_replicas, nr_effective,
- have_cache, flags, ob);
+ have_cache, ob);
out_put_head:
bch2_ec_stripe_head_put(c, h);
return ret;
@@ -922,7 +923,7 @@ static int bucket_alloc_set_writepoint(struct bch_fs *c,
unsigned nr_replicas,
unsigned *nr_effective,
bool *have_cache,
- bool ec, unsigned flags)
+ bool ec)
{
struct open_buckets ptrs_skip = { .nr = 0 };
struct open_bucket *ob;
@@ -934,7 +935,7 @@ static int bucket_alloc_set_writepoint(struct bch_fs *c,
have_cache, ec, ob))
ret = add_new_bucket(c, ptrs, devs_may_alloc,
nr_replicas, nr_effective,
- have_cache, flags, ob);
+ have_cache, ob);
else
ob_push(c, &ptrs_skip, ob);
}
@@ -950,8 +951,7 @@ static int bucket_alloc_set_partial(struct bch_fs *c,
unsigned nr_replicas,
unsigned *nr_effective,
bool *have_cache, bool ec,
- enum bch_watermark watermark,
- unsigned flags)
+ enum bch_watermark watermark)
{
int i, ret = 0;
@@ -983,7 +983,7 @@ static int bucket_alloc_set_partial(struct bch_fs *c,
ret = add_new_bucket(c, ptrs, devs_may_alloc,
nr_replicas, nr_effective,
- have_cache, flags, ob);
+ have_cache, ob);
if (ret)
break;
}
@@ -1003,7 +1003,7 @@ static int __open_bucket_add_buckets(struct btree_trans *trans,
unsigned *nr_effective,
bool *have_cache,
enum bch_watermark watermark,
- unsigned flags,
+ enum bch_write_flags flags,
struct closure *_cl)
{
struct bch_fs *c = trans->c;
@@ -1022,18 +1022,15 @@ static int __open_bucket_add_buckets(struct btree_trans *trans,
open_bucket_for_each(c, ptrs, ob, i)
__clear_bit(ob->dev, devs.d);
- if (erasure_code && ec_open_bucket(c, ptrs))
- return 0;
-
ret = bucket_alloc_set_writepoint(c, ptrs, wp, &devs,
nr_replicas, nr_effective,
- have_cache, erasure_code, flags);
+ have_cache, erasure_code);
if (ret)
return ret;
ret = bucket_alloc_set_partial(c, ptrs, wp, &devs,
nr_replicas, nr_effective,
- have_cache, erasure_code, watermark, flags);
+ have_cache, erasure_code, watermark);
if (ret)
return ret;
@@ -1074,12 +1071,12 @@ static int open_bucket_add_buckets(struct btree_trans *trans,
unsigned *nr_effective,
bool *have_cache,
enum bch_watermark watermark,
- unsigned flags,
+ enum bch_write_flags flags,
struct closure *cl)
{
int ret;
- if (erasure_code) {
+ if (erasure_code && !ec_open_bucket(trans->c, ptrs)) {
ret = __open_bucket_add_buckets(trans, ptrs, wp,
devs_have, target, erasure_code,
nr_replicas, nr_effective, have_cache,
@@ -1376,7 +1373,7 @@ int bch2_alloc_sectors_start_trans(struct btree_trans *trans,
unsigned nr_replicas,
unsigned nr_replicas_required,
enum bch_watermark watermark,
- unsigned flags,
+ enum bch_write_flags flags,
struct closure *cl,
struct write_point **wp_ret)
{
@@ -1392,8 +1389,6 @@ int bch2_alloc_sectors_start_trans(struct btree_trans *trans,
if (!IS_ENABLED(CONFIG_BCACHEFS_ERASURE_CODING))
erasure_code = false;
- BUG_ON(flags & BCH_WRITE_ONLY_SPECIFIED_DEVS);
-
BUG_ON(!nr_replicas || !nr_replicas_required);
retry:
ptrs.nr = 0;
@@ -1498,11 +1493,12 @@ err:
try_decrease_writepoints(trans, write_points_nr))
goto retry;
- if (bch2_err_matches(ret, BCH_ERR_open_buckets_empty) ||
+ if (cl && bch2_err_matches(ret, BCH_ERR_open_buckets_empty))
+ ret = -BCH_ERR_bucket_alloc_blocked;
+
+ if (cl && !(flags & BCH_WRITE_ALLOC_NOWAIT) &&
bch2_err_matches(ret, BCH_ERR_freelist_empty))
- return cl
- ? -BCH_ERR_bucket_alloc_blocked
- : -BCH_ERR_ENOSPC_bucket_alloc;
+ ret = -BCH_ERR_bucket_alloc_blocked;
return ret;
}
@@ -1733,13 +1729,6 @@ void bch2_dev_alloc_debug_to_text(struct printbuf *out, struct bch_dev *ca)
for (unsigned i = 0; i < ARRAY_SIZE(c->open_buckets); i++)
nr[c->open_buckets[i].data_type]++;
- printbuf_tabstops_reset(out);
- printbuf_tabstop_push(out, 12);
- printbuf_tabstop_push(out, 16);
- printbuf_tabstop_push(out, 16);
- printbuf_tabstop_push(out, 16);
- printbuf_tabstop_push(out, 16);
-
bch2_dev_usage_to_text(out, ca, &stats);
prt_newline(out);
diff --git a/fs/bcachefs/alloc_foreground.h b/fs/bcachefs/alloc_foreground.h
index 386d231ceca3..1a16fd5bd4f8 100644
--- a/fs/bcachefs/alloc_foreground.h
+++ b/fs/bcachefs/alloc_foreground.h
@@ -155,9 +155,10 @@ static inline bool bch2_bucket_is_open_safe(struct bch_fs *c, unsigned dev, u64
return ret;
}
+enum bch_write_flags;
int bch2_bucket_alloc_set_trans(struct btree_trans *, struct open_buckets *,
struct dev_stripe_state *, struct bch_devs_mask *,
- unsigned, unsigned *, bool *, unsigned,
+ unsigned, unsigned *, bool *, enum bch_write_flags,
enum bch_data_type, enum bch_watermark,
struct closure *);
@@ -167,7 +168,7 @@ int bch2_alloc_sectors_start_trans(struct btree_trans *,
struct bch_devs_list *,
unsigned, unsigned,
enum bch_watermark,
- unsigned,
+ enum bch_write_flags,
struct closure *,
struct write_point **);
diff --git a/fs/bcachefs/backpointers.c b/fs/bcachefs/backpointers.c
index d4da6343efa9..47455a85c909 100644
--- a/fs/bcachefs/backpointers.c
+++ b/fs/bcachefs/backpointers.c
@@ -9,6 +9,7 @@
#include "btree_update_interior.h"
#include "btree_write_buffer.h"
#include "checksum.h"
+#include "disk_accounting.h"
#include "error.h"
#include <linux/mm.h>
@@ -53,7 +54,7 @@ int bch2_backpointer_validate(struct bch_fs *c, struct bkey_s_c k,
struct bkey_s_c_backpointer bp = bkey_s_c_to_backpointer(k);
rcu_read_lock();
- struct bch_dev *ca = bch2_dev_rcu(c, bp.k->p.inode);
+ struct bch_dev *ca = bch2_dev_rcu_noerror(c, bp.k->p.inode);
if (!ca) {
/* these will be caught by fsck */
rcu_read_unlock();
@@ -87,7 +88,7 @@ void bch2_backpointer_to_text(struct printbuf *out, const struct bch_backpointer
void bch2_backpointer_k_to_text(struct printbuf *out, struct bch_fs *c, struct bkey_s_c k)
{
rcu_read_lock();
- struct bch_dev *ca = bch2_dev_rcu(c, k.k->p.inode);
+ struct bch_dev *ca = bch2_dev_rcu_noerror(c, k.k->p.inode);
if (ca) {
struct bpos bucket = bp_pos_to_bucket(ca, k.k->p);
rcu_read_unlock();
@@ -500,7 +501,7 @@ found:
prt_printf(&buf, "\n %s ", bch2_btree_id_str(o_btree));
bch2_bkey_val_to_text(&buf, c, extent2);
- struct nonce nonce = extent_nonce(extent.k->version, p.crc);
+ struct nonce nonce = extent_nonce(extent.k->bversion, p.crc);
struct bch_csum csum = bch2_checksum(c, p.crc.csum_type, nonce, data_buf, bytes);
if (fsck_err_on(bch2_crc_cmp(csum, p.crc.csum),
trans, dup_backpointer_to_bad_csum_extent,
@@ -671,7 +672,7 @@ static int check_extent_to_backpointers(struct btree_trans *trans,
continue;
rcu_read_lock();
- struct bch_dev *ca = bch2_dev_rcu(c, p.ptr.dev);
+ struct bch_dev *ca = bch2_dev_rcu_noerror(c, p.ptr.dev);
if (ca)
bch2_extent_ptr_to_bp(c, ca, btree, level, k, p, entry, &bucket_pos, &bp);
rcu_read_unlock();
@@ -750,10 +751,12 @@ static int bch2_get_btree_in_memory_pos(struct btree_trans *trans,
s64 mem_may_pin = mem_may_pin_bytes(c);
int ret = 0;
+ bch2_btree_cache_unpin(c);
+
btree_interior_mask |= btree_leaf_mask;
- c->btree_cache.pinned_nodes_leaf_mask = btree_leaf_mask;
- c->btree_cache.pinned_nodes_interior_mask = btree_interior_mask;
+ c->btree_cache.pinned_nodes_mask[0] = btree_leaf_mask;
+ c->btree_cache.pinned_nodes_mask[1] = btree_interior_mask;
c->btree_cache.pinned_nodes_start = start;
c->btree_cache.pinned_nodes_end = *end = BBPOS_MAX;
@@ -775,6 +778,7 @@ static int bch2_get_btree_in_memory_pos(struct btree_trans *trans,
BBPOS(btree, b->key.k.p);
break;
}
+ bch2_node_pin(c, b);
0;
}));
}
@@ -782,12 +786,80 @@ static int bch2_get_btree_in_memory_pos(struct btree_trans *trans,
return ret;
}
+struct progress_indicator_state {
+ unsigned long next_print;
+ u64 nodes_seen;
+ u64 nodes_total;
+ struct btree *last_node;
+};
+
+static inline void progress_init(struct progress_indicator_state *s,
+ struct bch_fs *c,
+ u64 btree_id_mask)
+{
+ memset(s, 0, sizeof(*s));
+
+ s->next_print = jiffies + HZ * 10;
+
+ for (unsigned i = 0; i < BTREE_ID_NR; i++) {
+ if (!(btree_id_mask & BIT_ULL(i)))
+ continue;
+
+ struct disk_accounting_pos acc = {
+ .type = BCH_DISK_ACCOUNTING_btree,
+ .btree.id = i,
+ };
+
+ u64 v;
+ bch2_accounting_mem_read(c, disk_accounting_pos_to_bpos(&acc), &v, 1);
+ s->nodes_total += div64_ul(v, btree_sectors(c));
+ }
+}
+
+static inline bool progress_update_p(struct progress_indicator_state *s)
+{
+ bool ret = time_after_eq(jiffies, s->next_print);
+
+ if (ret)
+ s->next_print = jiffies + HZ * 10;
+ return ret;
+}
+
+static void progress_update_iter(struct btree_trans *trans,
+ struct progress_indicator_state *s,
+ struct btree_iter *iter,
+ const char *msg)
+{
+ struct bch_fs *c = trans->c;
+ struct btree *b = path_l(btree_iter_path(trans, iter))->b;
+
+ s->nodes_seen += b != s->last_node;
+ s->last_node = b;
+
+ if (progress_update_p(s)) {
+ struct printbuf buf = PRINTBUF;
+ unsigned percent = s->nodes_total
+ ? div64_u64(s->nodes_seen * 100, s->nodes_total)
+ : 0;
+
+ prt_printf(&buf, "%s: %d%%, done %llu/%llu nodes, at ",
+ msg, percent, s->nodes_seen, s->nodes_total);
+ bch2_bbpos_to_text(&buf, BBPOS(iter->btree_id, iter->pos));
+
+ bch_info(c, "%s", buf.buf);
+ printbuf_exit(&buf);
+ }
+}
+
static int bch2_check_extents_to_backpointers_pass(struct btree_trans *trans,
struct extents_to_bp_state *s)
{
struct bch_fs *c = trans->c;
+ struct progress_indicator_state progress;
int ret = 0;
+ progress_init(&progress, trans->c, BIT_ULL(BTREE_ID_extents)|BIT_ULL(BTREE_ID_reflink));
+
for (enum btree_id btree_id = 0;
btree_id < btree_id_nr_alive(c);
btree_id++) {
@@ -805,6 +877,7 @@ static int bch2_check_extents_to_backpointers_pass(struct btree_trans *trans,
BTREE_ITER_prefetch);
ret = for_each_btree_key_continue(trans, iter, 0, k, ({
+ progress_update_iter(trans, &progress, &iter, "extents_to_backpointers");
check_extent_to_backpointers(trans, s, btree_id, level, k) ?:
bch2_trans_commit(trans, NULL, NULL, BCH_TRANS_COMMIT_no_enospc);
}));
@@ -865,8 +938,7 @@ int bch2_check_extents_to_backpointers(struct bch_fs *c)
bch2_trans_put(trans);
bch2_bkey_buf_exit(&s.last_flushed, c);
- c->btree_cache.pinned_nodes_leaf_mask = 0;
- c->btree_cache.pinned_nodes_interior_mask = 0;
+ bch2_btree_cache_unpin(c);
bch_err_fn(c, ret);
return ret;
@@ -920,19 +992,24 @@ static int bch2_check_backpointers_to_extents_pass(struct btree_trans *trans,
struct bbpos start,
struct bbpos end)
{
+ struct bch_fs *c = trans->c;
struct bkey_buf last_flushed;
+ struct progress_indicator_state progress;
bch2_bkey_buf_init(&last_flushed);
bkey_init(&last_flushed.k->k);
+ progress_init(&progress, trans->c, BIT_ULL(BTREE_ID_backpointers));
int ret = for_each_btree_key_commit(trans, iter, BTREE_ID_backpointers,
POS_MIN, BTREE_ITER_prefetch, k,
- NULL, NULL, BCH_TRANS_COMMIT_no_enospc,
- check_one_backpointer(trans, start, end,
- bkey_s_c_to_backpointer(k),
- &last_flushed));
-
- bch2_bkey_buf_exit(&last_flushed, trans->c);
+ NULL, NULL, BCH_TRANS_COMMIT_no_enospc, ({
+ progress_update_iter(trans, &progress, &iter, "backpointers_to_extents");
+ check_one_backpointer(trans, start, end,
+ bkey_s_c_to_backpointer(k),
+ &last_flushed);
+ }));
+
+ bch2_bkey_buf_exit(&last_flushed, c);
return ret;
}
@@ -977,8 +1054,7 @@ int bch2_check_backpointers_to_extents(struct bch_fs *c)
}
bch2_trans_put(trans);
- c->btree_cache.pinned_nodes_leaf_mask = 0;
- c->btree_cache.pinned_nodes_interior_mask = 0;
+ bch2_btree_cache_unpin(c);
bch_err_fn(c, ret);
return ret;
diff --git a/fs/bcachefs/backpointers.h b/fs/bcachefs/backpointers.h
index 7daecadb764e..3b29fdf519dd 100644
--- a/fs/bcachefs/backpointers.h
+++ b/fs/bcachefs/backpointers.h
@@ -134,28 +134,37 @@ static inline enum bch_data_type bch2_bkey_ptr_data_type(struct bkey_s_c k,
}
}
-static inline void bch2_extent_ptr_to_bp(struct bch_fs *c, struct bch_dev *ca,
+static inline void __bch2_extent_ptr_to_bp(struct bch_fs *c, struct bch_dev *ca,
enum btree_id btree_id, unsigned level,
struct bkey_s_c k, struct extent_ptr_decoded p,
const union bch_extent_entry *entry,
- struct bpos *bucket_pos, struct bch_backpointer *bp)
+ struct bpos *bucket_pos, struct bch_backpointer *bp,
+ u64 sectors)
{
- enum bch_data_type data_type = bch2_bkey_ptr_data_type(k, p, entry);
- s64 sectors = level ? btree_sectors(c) : k.k->size;
u32 bucket_offset;
-
*bucket_pos = PTR_BUCKET_POS_OFFSET(ca, &p.ptr, &bucket_offset);
*bp = (struct bch_backpointer) {
.btree_id = btree_id,
.level = level,
- .data_type = data_type,
+ .data_type = bch2_bkey_ptr_data_type(k, p, entry),
.bucket_offset = ((u64) bucket_offset << MAX_EXTENT_COMPRESS_RATIO_SHIFT) +
p.crc.offset,
- .bucket_len = ptr_disk_sectors(sectors, p),
+ .bucket_len = sectors,
.pos = k.k->p,
};
}
+static inline void bch2_extent_ptr_to_bp(struct bch_fs *c, struct bch_dev *ca,
+ enum btree_id btree_id, unsigned level,
+ struct bkey_s_c k, struct extent_ptr_decoded p,
+ const union bch_extent_entry *entry,
+ struct bpos *bucket_pos, struct bch_backpointer *bp)
+{
+ u64 sectors = ptr_disk_sectors(level ? btree_sectors(c) : k.k->size, p);
+
+ __bch2_extent_ptr_to_bp(c, ca, btree_id, level, k, p, entry, bucket_pos, bp, sectors);
+}
+
int bch2_get_next_backpointer(struct btree_trans *, struct bch_dev *ca, struct bpos, int,
struct bpos *, struct bch_backpointer *, unsigned);
struct bkey_s_c bch2_backpointer_get_key(struct btree_trans *, struct btree_iter *,
diff --git a/fs/bcachefs/bcachefs.h b/fs/bcachefs/bcachefs.h
index 0c7086e00d18..f4151ee51b03 100644
--- a/fs/bcachefs/bcachefs.h
+++ b/fs/bcachefs/bcachefs.h
@@ -542,7 +542,7 @@ struct bch_dev {
* gc_gens_lock, for device resize - holding any is sufficient for
* access: Or rcu_read_lock(), but only for dev_ptr_stale():
*/
- struct bucket_array __rcu *buckets_gc;
+ GENRADIX(struct bucket) buckets_gc;
struct bucket_gens __rcu *bucket_gens;
u8 *oldest_gen;
unsigned long *buckets_nouse;
@@ -594,6 +594,7 @@ struct bch_dev {
#define BCH_FS_FLAGS() \
x(new_fs) \
x(started) \
+ x(clean_recovery) \
x(btree_running) \
x(accounting_replay_done) \
x(may_go_rw) \
@@ -776,7 +777,7 @@ struct bch_fs {
unsigned nsec_per_time_unit;
u64 features;
u64 compat;
- unsigned long errors_silent[BITS_TO_LONGS(BCH_SB_ERR_MAX)];
+ unsigned long errors_silent[BITS_TO_LONGS(BCH_FSCK_ERR_MAX)];
u64 btrees_lost_data;
} sb;
@@ -871,6 +872,7 @@ struct bch_fs {
/* ALLOCATION */
struct bch_devs_mask rw_devs[BCH_DATA_NR];
+ unsigned long rw_devs_change_count;
u64 capacity; /* sectors */
u64 reserved; /* sectors */
@@ -1023,6 +1025,7 @@ struct bch_fs {
/* fs.c */
struct list_head vfs_inodes_list;
struct mutex vfs_inodes_lock;
+ struct rhashtable vfs_inodes_table;
/* VFS IO PATH - fs-io.c */
struct bio_set writepage_bioset;
@@ -1044,8 +1047,6 @@ struct bch_fs {
* for signaling to the toplevel code which pass we want to run now.
*/
enum bch_recovery_pass curr_recovery_pass;
- /* bitmap of explicitly enabled recovery passes: */
- u64 recovery_passes_explicit;
/* bitmask of recovery passes that we actually ran */
u64 recovery_passes_complete;
/* never rewinds version of curr_recovery_pass */
@@ -1085,7 +1086,6 @@ struct bch_fs {
u64 __percpu *counters;
unsigned copy_gc_enabled:1;
- bool promote_whole_extents;
struct bch2_time_stats times[BCH_TIME_STAT_NR];
@@ -1195,12 +1195,15 @@ static inline bool btree_id_cached(const struct bch_fs *c, enum btree_id btree)
static inline struct timespec64 bch2_time_to_timespec(const struct bch_fs *c, s64 time)
{
struct timespec64 t;
+ s64 sec;
s32 rem;
time += c->sb.time_base_lo;
- t.tv_sec = div_s64_rem(time, c->sb.time_units_per_sec, &rem);
- t.tv_nsec = rem * c->sb.nsec_per_time_unit;
+ sec = div_s64_rem(time, c->sb.time_units_per_sec, &rem);
+
+ set_normalized_timespec64(&t, sec, rem * (s64)c->sb.nsec_per_time_unit);
+
return t;
}
diff --git a/fs/bcachefs/bcachefs_format.h b/fs/bcachefs/bcachefs_format.h
index 14ce726bf5a3..84832c2d4df9 100644
--- a/fs/bcachefs/bcachefs_format.h
+++ b/fs/bcachefs/bcachefs_format.h
@@ -217,13 +217,13 @@ struct bkey {
#if __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__
__u8 pad[1];
- struct bversion version;
+ struct bversion bversion;
__u32 size; /* extent size, in sectors */
struct bpos p;
#elif __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
struct bpos p;
__u32 size; /* extent size, in sectors */
- struct bversion version;
+ struct bversion bversion;
__u8 pad[1];
#endif
@@ -328,8 +328,8 @@ enum bch_bkey_fields {
bkey_format_field(OFFSET, p.offset), \
bkey_format_field(SNAPSHOT, p.snapshot), \
bkey_format_field(SIZE, size), \
- bkey_format_field(VERSION_HI, version.hi), \
- bkey_format_field(VERSION_LO, version.lo), \
+ bkey_format_field(VERSION_HI, bversion.hi), \
+ bkey_format_field(VERSION_LO, bversion.lo), \
}, \
})
@@ -795,6 +795,8 @@ LE64_BITMASK(BCH_SB_HAS_ERRORS, struct bch_sb, flags[0], 60, 61);
LE64_BITMASK(BCH_SB_HAS_TOPOLOGY_ERRORS,struct bch_sb, flags[0], 61, 62);
LE64_BITMASK(BCH_SB_BIG_ENDIAN, struct bch_sb, flags[0], 62, 63);
+LE64_BITMASK(BCH_SB_PROMOTE_WHOLE_EXTENTS,
+ struct bch_sb, flags[0], 63, 64);
LE64_BITMASK(BCH_SB_STR_HASH_TYPE, struct bch_sb, flags[1], 0, 4);
LE64_BITMASK(BCH_SB_COMPRESSION_TYPE_LO,struct bch_sb, flags[1], 4, 8);
diff --git a/fs/bcachefs/bkey.h b/fs/bcachefs/bkey.h
index e34cb2bf329c..41df24a53d97 100644
--- a/fs/bcachefs/bkey.h
+++ b/fs/bcachefs/bkey.h
@@ -214,9 +214,9 @@ static __always_inline int bversion_cmp(struct bversion l, struct bversion r)
#define ZERO_VERSION ((struct bversion) { .hi = 0, .lo = 0 })
#define MAX_VERSION ((struct bversion) { .hi = ~0, .lo = ~0ULL })
-static __always_inline int bversion_zero(struct bversion v)
+static __always_inline bool bversion_zero(struct bversion v)
{
- return !bversion_cmp(v, ZERO_VERSION);
+ return bversion_cmp(v, ZERO_VERSION) == 0;
}
#ifdef CONFIG_BCACHEFS_DEBUG
@@ -554,8 +554,8 @@ static inline void bch2_bkey_pack_test(void) {}
x(BKEY_FIELD_OFFSET, p.offset) \
x(BKEY_FIELD_SNAPSHOT, p.snapshot) \
x(BKEY_FIELD_SIZE, size) \
- x(BKEY_FIELD_VERSION_HI, version.hi) \
- x(BKEY_FIELD_VERSION_LO, version.lo)
+ x(BKEY_FIELD_VERSION_HI, bversion.hi) \
+ x(BKEY_FIELD_VERSION_LO, bversion.lo)
struct bkey_format_state {
u64 field_min[BKEY_NR_FIELDS];
diff --git a/fs/bcachefs/bkey_methods.c b/fs/bcachefs/bkey_methods.c
index 88d8958281e8..e7ac227ba7e8 100644
--- a/fs/bcachefs/bkey_methods.c
+++ b/fs/bcachefs/bkey_methods.c
@@ -289,7 +289,7 @@ void bch2_bkey_to_text(struct printbuf *out, const struct bkey *k)
bch2_bpos_to_text(out, k->p);
- prt_printf(out, " len %u ver %llu", k->size, k->version.lo);
+ prt_printf(out, " len %u ver %llu", k->size, k->bversion.lo);
} else {
prt_printf(out, "(null)");
}
diff --git a/fs/bcachefs/bkey_methods.h b/fs/bcachefs/bkey_methods.h
index 3df3dd2723a1..018fb72e32d3 100644
--- a/fs/bcachefs/bkey_methods.h
+++ b/fs/bcachefs/bkey_methods.h
@@ -70,7 +70,7 @@ bool bch2_bkey_normalize(struct bch_fs *, struct bkey_s);
static inline bool bch2_bkey_maybe_mergable(const struct bkey *l, const struct bkey *r)
{
return l->type == r->type &&
- !bversion_cmp(l->version, r->version) &&
+ !bversion_cmp(l->bversion, r->bversion) &&
bpos_eq(l->p, bkey_start_pos(r));
}
diff --git a/fs/bcachefs/bset.c b/fs/bcachefs/bset.c
index 575e1d0b6eeb..d1f6092624d8 100644
--- a/fs/bcachefs/bset.c
+++ b/fs/bcachefs/bset.c
@@ -304,11 +304,6 @@ struct bkey_float {
};
#define BKEY_MANTISSA_BITS 16
-static unsigned bkey_float_byte_offset(unsigned idx)
-{
- return idx * sizeof(struct bkey_float);
-}
-
struct ro_aux_tree {
u8 nothing[0];
struct bkey_float f[];
@@ -328,8 +323,7 @@ static unsigned bset_aux_tree_buf_end(const struct bset_tree *t)
return t->aux_data_offset;
case BSET_RO_AUX_TREE:
return t->aux_data_offset +
- DIV_ROUND_UP(t->size * sizeof(struct bkey_float) +
- t->size * sizeof(u8), 8);
+ DIV_ROUND_UP(t->size * sizeof(struct bkey_float), 8);
case BSET_RW_AUX_TREE:
return t->aux_data_offset +
DIV_ROUND_UP(sizeof(struct rw_aux_tree) * t->size, 8);
@@ -360,14 +354,6 @@ static struct ro_aux_tree *ro_aux_tree_base(const struct btree *b,
return __aux_tree_base(b, t);
}
-static u8 *ro_aux_tree_prev(const struct btree *b,
- const struct bset_tree *t)
-{
- EBUG_ON(bset_aux_tree_type(t) != BSET_RO_AUX_TREE);
-
- return __aux_tree_base(b, t) + bkey_float_byte_offset(t->size);
-}
-
static struct bkey_float *bkey_float(const struct btree *b,
const struct bset_tree *t,
unsigned idx)
@@ -479,15 +465,6 @@ static inline struct bkey_packed *tree_to_bkey(const struct btree *b,
bkey_float(b, t, j)->key_offset);
}
-static struct bkey_packed *tree_to_prev_bkey(const struct btree *b,
- const struct bset_tree *t,
- unsigned j)
-{
- unsigned prev_u64s = ro_aux_tree_prev(b, t)[j];
-
- return (void *) ((u64 *) tree_to_bkey(b, t, j)->_data - prev_u64s);
-}
-
static struct rw_aux_tree *rw_aux_tree(const struct btree *b,
const struct bset_tree *t)
{
@@ -585,8 +562,7 @@ static unsigned rw_aux_tree_bsearch(struct btree *b,
}
static inline unsigned bkey_mantissa(const struct bkey_packed *k,
- const struct bkey_float *f,
- unsigned idx)
+ const struct bkey_float *f)
{
u64 v;
@@ -617,7 +593,7 @@ static __always_inline void make_bfloat(struct btree *b, struct bset_tree *t,
struct bkey_packed *m = tree_to_bkey(b, t, j);
struct bkey_packed *l = is_power_of_2(j)
? min_key
- : tree_to_prev_bkey(b, t, j >> ffs(j));
+ : tree_to_bkey(b, t, j >> ffs(j));
struct bkey_packed *r = is_power_of_2(j + 1)
? max_key
: tree_to_bkey(b, t, j >> (ffz(j) + 1));
@@ -668,7 +644,7 @@ static __always_inline void make_bfloat(struct btree *b, struct bset_tree *t,
EBUG_ON(shift < 0 || shift >= BFLOAT_FAILED);
f->exponent = shift;
- mantissa = bkey_mantissa(m, f, j);
+ mantissa = bkey_mantissa(m, f);
/*
* If we've got garbage bits, set them to all 1s - it's legal for the
@@ -690,8 +666,7 @@ static unsigned __bset_tree_capacity(struct btree *b, const struct bset_tree *t)
static unsigned bset_ro_tree_capacity(struct btree *b, const struct bset_tree *t)
{
- return __bset_tree_capacity(b, t) /
- (sizeof(struct bkey_float) + sizeof(u8));
+ return __bset_tree_capacity(b, t) / sizeof(struct bkey_float);
}
static unsigned bset_rw_tree_capacity(struct btree *b, const struct bset_tree *t)
@@ -720,7 +695,7 @@ static noinline void __build_rw_aux_tree(struct btree *b, struct bset_tree *t)
static noinline void __build_ro_aux_tree(struct btree *b, struct bset_tree *t)
{
- struct bkey_packed *prev = NULL, *k = btree_bkey_first(b, t);
+ struct bkey_packed *k = btree_bkey_first(b, t);
struct bkey_i min_key, max_key;
unsigned cacheline = 1;
@@ -733,12 +708,12 @@ retry:
return;
}
- t->extra = (t->size - rounddown_pow_of_two(t->size - 1)) << 1;
+ t->extra = eytzinger1_extra(t->size - 1);
/* First we figure out where the first key in each cacheline is */
eytzinger1_for_each(j, t->size - 1) {
while (bkey_to_cacheline(b, t, k) < cacheline)
- prev = k, k = bkey_p_next(k);
+ k = bkey_p_next(k);
if (k >= btree_bkey_last(b, t)) {
/* XXX: this path sucks */
@@ -746,17 +721,12 @@ retry:
goto retry;
}
- ro_aux_tree_prev(b, t)[j] = prev->u64s;
bkey_float(b, t, j)->key_offset =
bkey_to_cacheline_offset(b, t, cacheline++, k);
- EBUG_ON(tree_to_prev_bkey(b, t, j) != prev);
EBUG_ON(tree_to_bkey(b, t, j) != k);
}
- while (k != btree_bkey_last(b, t))
- prev = k, k = bkey_p_next(k);
-
if (!bkey_pack_pos(bkey_to_packed(&min_key), b->data->min_key, b)) {
bkey_init(&min_key.k);
min_key.k.p = b->data->min_key;
@@ -915,6 +885,38 @@ struct bkey_packed *bch2_bkey_prev_filter(struct btree *b,
/* Insert */
+static void rw_aux_tree_insert_entry(struct btree *b,
+ struct bset_tree *t,
+ unsigned idx)
+{
+ EBUG_ON(!idx || idx > t->size);
+ struct bkey_packed *start = rw_aux_to_bkey(b, t, idx - 1);
+ struct bkey_packed *end = idx < t->size
+ ? rw_aux_to_bkey(b, t, idx)
+ : btree_bkey_last(b, t);
+
+ if (t->size < bset_rw_tree_capacity(b, t) &&
+ (void *) end - (void *) start > L1_CACHE_BYTES) {
+ struct bkey_packed *k = start;
+
+ while (1) {
+ k = bkey_p_next(k);
+ if (k == end)
+ break;
+
+ if ((void *) k - (void *) start >= L1_CACHE_BYTES) {
+ memmove(&rw_aux_tree(b, t)[idx + 1],
+ &rw_aux_tree(b, t)[idx],
+ (void *) &rw_aux_tree(b, t)[t->size] -
+ (void *) &rw_aux_tree(b, t)[idx]);
+ t->size++;
+ rw_aux_tree_set(b, t, idx, k);
+ break;
+ }
+ }
+ }
+}
+
static void bch2_bset_fix_lookup_table(struct btree *b,
struct bset_tree *t,
struct bkey_packed *_where,
@@ -922,84 +924,59 @@ static void bch2_bset_fix_lookup_table(struct btree *b,
unsigned new_u64s)
{
int shift = new_u64s - clobber_u64s;
- unsigned l, j, where = __btree_node_key_to_offset(b, _where);
+ unsigned idx, j, where = __btree_node_key_to_offset(b, _where);
EBUG_ON(bset_has_ro_aux_tree(t));
if (!bset_has_rw_aux_tree(t))
return;
+ if (where > rw_aux_tree(b, t)[t->size - 1].offset) {
+ rw_aux_tree_insert_entry(b, t, t->size);
+ goto verify;
+ }
+
/* returns first entry >= where */
- l = rw_aux_tree_bsearch(b, t, where);
-
- if (!l) /* never delete first entry */
- l++;
- else if (l < t->size &&
- where < t->end_offset &&
- rw_aux_tree(b, t)[l].offset == where)
- rw_aux_tree_set(b, t, l++, _where);
-
- /* l now > where */
-
- for (j = l;
- j < t->size &&
- rw_aux_tree(b, t)[j].offset < where + clobber_u64s;
- j++)
- ;
-
- if (j < t->size &&
- rw_aux_tree(b, t)[j].offset + shift ==
- rw_aux_tree(b, t)[l - 1].offset)
- j++;
-
- memmove(&rw_aux_tree(b, t)[l],
- &rw_aux_tree(b, t)[j],
- (void *) &rw_aux_tree(b, t)[t->size] -
- (void *) &rw_aux_tree(b, t)[j]);
- t->size -= j - l;
-
- for (j = l; j < t->size; j++)
- rw_aux_tree(b, t)[j].offset += shift;
+ idx = rw_aux_tree_bsearch(b, t, where);
+
+ if (rw_aux_tree(b, t)[idx].offset == where) {
+ if (!idx) { /* never delete first entry */
+ idx++;
+ } else if (where < t->end_offset) {
+ rw_aux_tree_set(b, t, idx++, _where);
+ } else {
+ EBUG_ON(where != t->end_offset);
+ rw_aux_tree_insert_entry(b, t, --t->size);
+ goto verify;
+ }
+ }
- EBUG_ON(l < t->size &&
- rw_aux_tree(b, t)[l].offset ==
- rw_aux_tree(b, t)[l - 1].offset);
+ EBUG_ON(idx < t->size && rw_aux_tree(b, t)[idx].offset <= where);
+ if (idx < t->size &&
+ rw_aux_tree(b, t)[idx].offset + shift ==
+ rw_aux_tree(b, t)[idx - 1].offset) {
+ memmove(&rw_aux_tree(b, t)[idx],
+ &rw_aux_tree(b, t)[idx + 1],
+ (void *) &rw_aux_tree(b, t)[t->size] -
+ (void *) &rw_aux_tree(b, t)[idx + 1]);
+ t->size -= 1;
+ }
- if (t->size < bset_rw_tree_capacity(b, t) &&
- (l < t->size
- ? rw_aux_tree(b, t)[l].offset
- : t->end_offset) -
- rw_aux_tree(b, t)[l - 1].offset >
- L1_CACHE_BYTES / sizeof(u64)) {
- struct bkey_packed *start = rw_aux_to_bkey(b, t, l - 1);
- struct bkey_packed *end = l < t->size
- ? rw_aux_to_bkey(b, t, l)
- : btree_bkey_last(b, t);
- struct bkey_packed *k = start;
+ for (j = idx; j < t->size; j++)
+ rw_aux_tree(b, t)[j].offset += shift;
- while (1) {
- k = bkey_p_next(k);
- if (k == end)
- break;
+ EBUG_ON(idx < t->size &&
+ rw_aux_tree(b, t)[idx].offset ==
+ rw_aux_tree(b, t)[idx - 1].offset);
- if ((void *) k - (void *) start >= L1_CACHE_BYTES) {
- memmove(&rw_aux_tree(b, t)[l + 1],
- &rw_aux_tree(b, t)[l],
- (void *) &rw_aux_tree(b, t)[t->size] -
- (void *) &rw_aux_tree(b, t)[l]);
- t->size++;
- rw_aux_tree_set(b, t, l, k);
- break;
- }
- }
- }
+ rw_aux_tree_insert_entry(b, t, idx);
+verify:
bch2_bset_verify_rw_aux_tree(b, t);
bset_aux_tree_verify(b);
}
void bch2_bset_insert(struct btree *b,
- struct btree_node_iter *iter,
struct bkey_packed *where,
struct bkey_i *insert,
unsigned clobber_u64s)
@@ -1098,8 +1075,7 @@ static inline void prefetch_four_cachelines(void *p)
}
static inline bool bkey_mantissa_bits_dropped(const struct btree *b,
- const struct bkey_float *f,
- unsigned idx)
+ const struct bkey_float *f)
{
#if __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__
unsigned key_bits_start = b->format.key_u64s * 64 - b->nr_key_bits;
@@ -1133,9 +1109,9 @@ static struct bkey_packed *bset_search_tree(const struct btree *b,
goto slowpath;
l = f->mantissa;
- r = bkey_mantissa(packed_search, f, n);
+ r = bkey_mantissa(packed_search, f);
- if (unlikely(l == r) && bkey_mantissa_bits_dropped(b, f, n))
+ if (unlikely(l == r) && bkey_mantissa_bits_dropped(b, f))
goto slowpath;
n = n * 2 + (l < r);
diff --git a/fs/bcachefs/bset.h b/fs/bcachefs/bset.h
index 5c6c7a14fa0f..6953d55b72cc 100644
--- a/fs/bcachefs/bset.h
+++ b/fs/bcachefs/bset.h
@@ -270,8 +270,8 @@ void bch2_bset_init_first(struct btree *, struct bset *);
void bch2_bset_init_next(struct btree *, struct btree_node_entry *);
void bch2_bset_build_aux_tree(struct btree *, struct bset_tree *, bool);
-void bch2_bset_insert(struct btree *, struct btree_node_iter *,
- struct bkey_packed *, struct bkey_i *, unsigned);
+void bch2_bset_insert(struct btree *, struct bkey_packed *, struct bkey_i *,
+ unsigned);
void bch2_bset_delete(struct btree *, struct bkey_packed *, unsigned);
/* Bkey utility code */
diff --git a/fs/bcachefs/btree_cache.c b/fs/bcachefs/btree_cache.c
index e52a06d3418c..6e4afb2b5441 100644
--- a/fs/bcachefs/btree_cache.c
+++ b/fs/bcachefs/btree_cache.c
@@ -15,11 +15,12 @@
#include <linux/prefetch.h>
#include <linux/sched/mm.h>
+#include <linux/swap.h>
#define BTREE_CACHE_NOT_FREED_INCREMENT(counter) \
do { \
if (shrinker_counter) \
- bc->not_freed_##counter++; \
+ bc->not_freed[BCH_BTREE_CACHE_NOT_FREED_##counter]++; \
} while (0)
const char * const bch2_btree_node_flags[] = {
@@ -31,24 +32,29 @@ const char * const bch2_btree_node_flags[] = {
void bch2_recalc_btree_reserve(struct bch_fs *c)
{
- unsigned i, reserve = 16;
+ unsigned reserve = 16;
if (!c->btree_roots_known[0].b)
reserve += 8;
- for (i = 0; i < btree_id_nr_alive(c); i++) {
+ for (unsigned i = 0; i < btree_id_nr_alive(c); i++) {
struct btree_root *r = bch2_btree_id_root(c, i);
if (r->b)
reserve += min_t(unsigned, 1, r->b->c.level) * 8;
}
- c->btree_cache.reserve = reserve;
+ c->btree_cache.nr_reserve = reserve;
}
-static inline unsigned btree_cache_can_free(struct btree_cache *bc)
+static inline size_t btree_cache_can_free(struct btree_cache_list *list)
{
- return max_t(int, 0, bc->used - bc->reserve);
+ struct btree_cache *bc = container_of(list, struct btree_cache, live[list->idx]);
+
+ size_t can_free = list->nr;
+ if (!list->idx)
+ can_free = max_t(ssize_t, 0, can_free - bc->nr_reserve);
+ return can_free;
}
static void btree_node_to_freedlist(struct btree_cache *bc, struct btree *b)
@@ -63,6 +69,18 @@ static void btree_node_data_free(struct bch_fs *c, struct btree *b)
{
struct btree_cache *bc = &c->btree_cache;
+ BUG_ON(btree_node_hashed(b));
+
+ /*
+ * This should really be done in slub/vmalloc, but we're using the
+ * kmalloc_large() path, so we're working around a slub bug by doing
+ * this here:
+ */
+ if (b->data)
+ mm_account_reclaimed_pages(btree_buf_bytes(b) / PAGE_SIZE);
+ if (b->aux_data)
+ mm_account_reclaimed_pages(btree_aux_data_bytes(b) / PAGE_SIZE);
+
EBUG_ON(btree_node_write_in_flight(b));
clear_btree_node_just_written(b);
@@ -76,7 +94,7 @@ static void btree_node_data_free(struct bch_fs *c, struct btree *b)
#endif
b->aux_data = NULL;
- bc->used--;
+ bc->nr_freeable--;
btree_node_to_freedlist(bc, b);
}
@@ -102,6 +120,8 @@ static int btree_node_data_alloc(struct bch_fs *c, struct btree *b, gfp_t gfp)
{
BUG_ON(b->data || b->aux_data);
+ gfp |= __GFP_ACCOUNT|__GFP_RECLAIMABLE;
+
b->data = kvmalloc(btree_buf_bytes(b), gfp);
if (!b->data)
return -BCH_ERR_ENOMEM_btree_node_mem_alloc;
@@ -154,7 +174,7 @@ struct btree *__bch2_btree_node_mem_alloc(struct bch_fs *c)
bch2_btree_lock_init(&b->c, 0);
- bc->used++;
+ bc->nr_freeable++;
list_add(&b->list, &bc->freeable);
return b;
}
@@ -169,10 +189,56 @@ void bch2_btree_node_to_freelist(struct bch_fs *c, struct btree *b)
six_unlock_intent(&b->c.lock);
}
+static inline bool __btree_node_pinned(struct btree_cache *bc, struct btree *b)
+{
+ struct bbpos pos = BBPOS(b->c.btree_id, b->key.k.p);
+
+ u64 mask = bc->pinned_nodes_mask[!!b->c.level];
+
+ return ((mask & BIT_ULL(b->c.btree_id)) &&
+ bbpos_cmp(bc->pinned_nodes_start, pos) < 0 &&
+ bbpos_cmp(bc->pinned_nodes_end, pos) >= 0);
+}
+
+void bch2_node_pin(struct bch_fs *c, struct btree *b)
+{
+ struct btree_cache *bc = &c->btree_cache;
+
+ mutex_lock(&bc->lock);
+ BUG_ON(!__btree_node_pinned(bc, b));
+ if (b != btree_node_root(c, b) && !btree_node_pinned(b)) {
+ set_btree_node_pinned(b);
+ list_move(&b->list, &bc->live[1].list);
+ bc->live[0].nr--;
+ bc->live[1].nr++;
+ }
+ mutex_unlock(&bc->lock);
+}
+
+void bch2_btree_cache_unpin(struct bch_fs *c)
+{
+ struct btree_cache *bc = &c->btree_cache;
+ struct btree *b, *n;
+
+ mutex_lock(&bc->lock);
+ c->btree_cache.pinned_nodes_mask[0] = 0;
+ c->btree_cache.pinned_nodes_mask[1] = 0;
+
+ list_for_each_entry_safe(b, n, &bc->live[1].list, list) {
+ clear_btree_node_pinned(b);
+ list_move(&b->list, &bc->live[0].list);
+ bc->live[0].nr++;
+ bc->live[1].nr--;
+ }
+
+ mutex_unlock(&bc->lock);
+}
+
/* Btree in memory cache - hash table */
void bch2_btree_node_hash_remove(struct btree_cache *bc, struct btree *b)
{
+ lockdep_assert_held(&bc->lock);
int ret = rhashtable_remove_fast(&bc->table, &b->hash, bch_btree_cache_params);
BUG_ON(ret);
@@ -181,7 +247,11 @@ void bch2_btree_node_hash_remove(struct btree_cache *bc, struct btree *b)
b->hash_val = 0;
if (b->c.btree_id < BTREE_ID_NR)
- --bc->used_by_btree[b->c.btree_id];
+ --bc->nr_by_btree[b->c.btree_id];
+
+ bc->live[btree_node_pinned(b)].nr--;
+ bc->nr_freeable++;
+ list_move(&b->list, &bc->freeable);
}
int __bch2_btree_node_hash_insert(struct btree_cache *bc, struct btree *b)
@@ -191,23 +261,30 @@ int __bch2_btree_node_hash_insert(struct btree_cache *bc, struct btree *b)
int ret = rhashtable_lookup_insert_fast(&bc->table, &b->hash,
bch_btree_cache_params);
- if (!ret && b->c.btree_id < BTREE_ID_NR)
- bc->used_by_btree[b->c.btree_id]++;
- return ret;
+ if (ret)
+ return ret;
+
+ if (b->c.btree_id < BTREE_ID_NR)
+ bc->nr_by_btree[b->c.btree_id]++;
+
+ bool p = __btree_node_pinned(bc, b);
+ mod_bit(BTREE_NODE_pinned, &b->flags, p);
+
+ list_move_tail(&b->list, &bc->live[p].list);
+ bc->live[p].nr++;
+
+ bc->nr_freeable--;
+ return 0;
}
int bch2_btree_node_hash_insert(struct btree_cache *bc, struct btree *b,
unsigned level, enum btree_id id)
{
- int ret;
-
b->c.level = level;
b->c.btree_id = id;
mutex_lock(&bc->lock);
- ret = __bch2_btree_node_hash_insert(bc, b);
- if (!ret)
- list_add_tail(&b->list, &bc->live);
+ int ret = __bch2_btree_node_hash_insert(bc, b);
mutex_unlock(&bc->lock);
return ret;
@@ -261,18 +338,6 @@ static int __btree_node_reclaim(struct bch_fs *c, struct btree *b, bool flush, b
int ret = 0;
lockdep_assert_held(&bc->lock);
-
- struct bbpos pos = BBPOS(b->c.btree_id, b->key.k.p);
-
- u64 mask = b->c.level
- ? bc->pinned_nodes_interior_mask
- : bc->pinned_nodes_leaf_mask;
-
- if ((mask & BIT_ULL(b->c.btree_id)) &&
- bbpos_cmp(bc->pinned_nodes_start, pos) < 0 &&
- bbpos_cmp(bc->pinned_nodes_end, pos) >= 0)
- return -BCH_ERR_ENOMEM_btree_node_reclaim;
-
wait_on_io:
if (b->flags & ((1U << BTREE_NODE_dirty)|
(1U << BTREE_NODE_read_in_flight)|
@@ -377,8 +442,9 @@ static int btree_node_write_and_reclaim(struct bch_fs *c, struct btree *b)
static unsigned long bch2_btree_cache_scan(struct shrinker *shrink,
struct shrink_control *sc)
{
- struct bch_fs *c = shrink->private_data;
- struct btree_cache *bc = &c->btree_cache;
+ struct btree_cache_list *list = shrink->private_data;
+ struct btree_cache *bc = container_of(list, struct btree_cache, live[list->idx]);
+ struct bch_fs *c = container_of(bc, struct bch_fs, btree_cache);
struct btree *b, *t;
unsigned long nr = sc->nr_to_scan;
unsigned long can_free = 0;
@@ -386,8 +452,7 @@ static unsigned long bch2_btree_cache_scan(struct shrinker *shrink,
unsigned long touched = 0;
unsigned i, flags;
unsigned long ret = SHRINK_STOP;
- bool trigger_writes = atomic_read(&bc->dirty) + nr >=
- bc->used * 3 / 4;
+ bool trigger_writes = atomic_long_read(&bc->nr_dirty) + nr >= list->nr * 3 / 4;
if (bch2_btree_shrinker_disabled)
return SHRINK_STOP;
@@ -402,7 +467,7 @@ static unsigned long bch2_btree_cache_scan(struct shrinker *shrink,
* succeed, so that inserting keys into the btree can always succeed and
* IO can always make forward progress:
*/
- can_free = btree_cache_can_free(bc);
+ can_free = btree_cache_can_free(list);
nr = min_t(unsigned long, nr, can_free);
i = 0;
@@ -424,22 +489,24 @@ static unsigned long bch2_btree_cache_scan(struct shrinker *shrink,
six_unlock_write(&b->c.lock);
six_unlock_intent(&b->c.lock);
freed++;
- bc->freed++;
+ bc->nr_freed++;
}
}
restart:
- list_for_each_entry_safe(b, t, &bc->live, list) {
+ list_for_each_entry_safe(b, t, &list->list, list) {
touched++;
if (btree_node_accessed(b)) {
clear_btree_node_accessed(b);
- bc->not_freed_access_bit++;
+ bc->not_freed[BCH_BTREE_CACHE_NOT_FREED_access_bit]++;
+ --touched;;
} else if (!btree_node_reclaim(c, b, true)) {
+ bch2_btree_node_hash_remove(bc, b);
+
freed++;
btree_node_data_free(c, b);
- bc->freed++;
+ bc->nr_freed++;
- bch2_btree_node_hash_remove(bc, b);
six_unlock_write(&b->c.lock);
six_unlock_intent(&b->c.lock);
@@ -450,7 +517,7 @@ restart:
!btree_node_will_make_reachable(b) &&
!btree_node_write_blocked(b) &&
six_trylock_read(&b->c.lock)) {
- list_move(&bc->live, &b->list);
+ list_move(&list->list, &b->list);
mutex_unlock(&bc->lock);
__bch2_btree_node_write(c, b, BTREE_WRITE_cache_reclaim);
six_unlock_read(&b->c.lock);
@@ -464,8 +531,8 @@ restart:
break;
}
out_rotate:
- if (&t->list != &bc->live)
- list_move_tail(&bc->live, &t->list);
+ if (&t->list != &list->list)
+ list_move_tail(&list->list, &t->list);
out:
mutex_unlock(&bc->lock);
out_nounlock:
@@ -478,44 +545,45 @@ out_nounlock:
static unsigned long bch2_btree_cache_count(struct shrinker *shrink,
struct shrink_control *sc)
{
- struct bch_fs *c = shrink->private_data;
- struct btree_cache *bc = &c->btree_cache;
+ struct btree_cache_list *list = shrink->private_data;
if (bch2_btree_shrinker_disabled)
return 0;
- return btree_cache_can_free(bc);
+ return btree_cache_can_free(list);
}
void bch2_fs_btree_cache_exit(struct bch_fs *c)
{
struct btree_cache *bc = &c->btree_cache;
- struct btree *b;
- unsigned i, flags;
+ struct btree *b, *t;
+ unsigned long flags;
- shrinker_free(bc->shrink);
+ shrinker_free(bc->live[1].shrink);
+ shrinker_free(bc->live[0].shrink);
/* vfree() can allocate memory: */
flags = memalloc_nofs_save();
mutex_lock(&bc->lock);
if (c->verify_data)
- list_move(&c->verify_data->list, &bc->live);
+ list_move(&c->verify_data->list, &bc->live[0].list);
kvfree(c->verify_ondisk);
- for (i = 0; i < btree_id_nr_alive(c); i++) {
+ for (unsigned i = 0; i < btree_id_nr_alive(c); i++) {
struct btree_root *r = bch2_btree_id_root(c, i);
if (r->b)
- list_add(&r->b->list, &bc->live);
+ list_add(&r->b->list, &bc->live[0].list);
}
- list_splice(&bc->freeable, &bc->live);
-
- while (!list_empty(&bc->live)) {
- b = list_first_entry(&bc->live, struct btree, list);
+ list_for_each_entry_safe(b, t, &bc->live[1].list, list)
+ bch2_btree_node_hash_remove(bc, b);
+ list_for_each_entry_safe(b, t, &bc->live[0].list, list)
+ bch2_btree_node_hash_remove(bc, b);
+ list_for_each_entry_safe(b, t, &bc->freeable, list) {
BUG_ON(btree_node_read_in_flight(b) ||
btree_node_write_in_flight(b));
@@ -523,12 +591,11 @@ void bch2_fs_btree_cache_exit(struct bch_fs *c)
}
BUG_ON(!bch2_journal_error(&c->journal) &&
- atomic_read(&c->btree_cache.dirty));
+ atomic_long_read(&c->btree_cache.nr_dirty));
list_splice(&bc->freed_pcpu, &bc->freed_nonpcpu);
- while (!list_empty(&bc->freed_nonpcpu)) {
- b = list_first_entry(&bc->freed_nonpcpu, struct btree, list);
+ list_for_each_entry_safe(b, t, &bc->freed_nonpcpu, list) {
list_del(&b->list);
six_lock_exit(&b->c.lock);
kfree(b);
@@ -537,6 +604,12 @@ void bch2_fs_btree_cache_exit(struct bch_fs *c)
mutex_unlock(&bc->lock);
memalloc_nofs_restore(flags);
+ for (unsigned i = 0; i < ARRAY_SIZE(bc->nr_by_btree); i++)
+ BUG_ON(bc->nr_by_btree[i]);
+ BUG_ON(bc->live[0].nr);
+ BUG_ON(bc->live[1].nr);
+ BUG_ON(bc->nr_freeable);
+
if (bc->table_init_done)
rhashtable_destroy(&bc->table);
}
@@ -556,22 +629,32 @@ int bch2_fs_btree_cache_init(struct bch_fs *c)
bch2_recalc_btree_reserve(c);
- for (i = 0; i < bc->reserve; i++)
+ for (i = 0; i < bc->nr_reserve; i++)
if (!__bch2_btree_node_mem_alloc(c))
goto err;
- list_splice_init(&bc->live, &bc->freeable);
+ list_splice_init(&bc->live[0].list, &bc->freeable);
mutex_init(&c->verify_lock);
shrink = shrinker_alloc(0, "%s-btree_cache", c->name);
if (!shrink)
goto err;
- bc->shrink = shrink;
+ bc->live[0].shrink = shrink;
+ shrink->count_objects = bch2_btree_cache_count;
+ shrink->scan_objects = bch2_btree_cache_scan;
+ shrink->seeks = 2;
+ shrink->private_data = &bc->live[0];
+ shrinker_register(shrink);
+
+ shrink = shrinker_alloc(0, "%s-btree_cache-pinned", c->name);
+ if (!shrink)
+ goto err;
+ bc->live[1].shrink = shrink;
shrink->count_objects = bch2_btree_cache_count;
shrink->scan_objects = bch2_btree_cache_scan;
- shrink->seeks = 4;
- shrink->private_data = c;
+ shrink->seeks = 8;
+ shrink->private_data = &bc->live[1];
shrinker_register(shrink);
return 0;
@@ -582,7 +665,10 @@ err:
void bch2_fs_btree_cache_init_early(struct btree_cache *bc)
{
mutex_init(&bc->lock);
- INIT_LIST_HEAD(&bc->live);
+ for (unsigned i = 0; i < ARRAY_SIZE(bc->live); i++) {
+ bc->live[i].idx = i;
+ INIT_LIST_HEAD(&bc->live[i].list);
+ }
INIT_LIST_HEAD(&bc->freeable);
INIT_LIST_HEAD(&bc->freed_pcpu);
INIT_LIST_HEAD(&bc->freed_nonpcpu);
@@ -644,14 +730,16 @@ static struct btree *btree_node_cannibalize(struct bch_fs *c)
struct btree_cache *bc = &c->btree_cache;
struct btree *b;
- list_for_each_entry_reverse(b, &bc->live, list)
- if (!btree_node_reclaim(c, b, false))
- return b;
+ for (unsigned i = 0; i < ARRAY_SIZE(bc->live); i++)
+ list_for_each_entry_reverse(b, &bc->live[i].list, list)
+ if (!btree_node_reclaim(c, b, false))
+ return b;
while (1) {
- list_for_each_entry_reverse(b, &bc->live, list)
- if (!btree_node_write_and_reclaim(c, b))
- return b;
+ for (unsigned i = 0; i < ARRAY_SIZE(bc->live); i++)
+ list_for_each_entry_reverse(b, &bc->live[i].list, list)
+ if (!btree_node_write_and_reclaim(c, b))
+ return b;
/*
* Rare case: all nodes were intent-locked.
@@ -671,9 +759,7 @@ struct btree *bch2_btree_node_mem_alloc(struct btree_trans *trans, bool pcpu_rea
: &bc->freed_nonpcpu;
struct btree *b, *b2;
u64 start_time = local_clock();
- unsigned flags;
- flags = memalloc_nofs_save();
mutex_lock(&bc->lock);
/*
@@ -725,7 +811,7 @@ got_node:
}
mutex_lock(&bc->lock);
- bc->used++;
+ bc->nr_freeable++;
got_mem:
mutex_unlock(&bc->lock);
@@ -745,8 +831,6 @@ out:
bch2_time_stats_update(&c->times[BCH_TIME_btree_node_mem_alloc],
start_time);
- memalloc_nofs_restore(flags);
-
int ret = bch2_trans_relock(trans);
if (unlikely(ret)) {
bch2_btree_node_to_freelist(c, b);
@@ -781,7 +865,6 @@ err:
}
mutex_unlock(&bc->lock);
- memalloc_nofs_restore(flags);
return ERR_PTR(-BCH_ERR_ENOMEM_btree_node_mem_alloc);
}
@@ -1269,8 +1352,8 @@ wait_on_io:
BUG_ON(btree_node_dirty(b));
mutex_lock(&bc->lock);
- btree_node_data_free(c, b);
bch2_btree_node_hash_remove(bc, b);
+ btree_node_data_free(c, b);
mutex_unlock(&bc->lock);
out:
six_unlock_write(&b->c.lock);
@@ -1342,13 +1425,20 @@ void bch2_btree_node_to_text(struct printbuf *out, struct bch_fs *c, const struc
}
static void prt_btree_cache_line(struct printbuf *out, const struct bch_fs *c,
- const char *label, unsigned nr)
+ const char *label, size_t nr)
{
prt_printf(out, "%s\t", label);
prt_human_readable_u64(out, nr * c->opts.btree_node_size);
- prt_printf(out, " (%u)\n", nr);
+ prt_printf(out, " (%zu)\n", nr);
}
+static const char * const bch2_btree_cache_not_freed_reasons_strs[] = {
+#define x(n) #n,
+ BCH_BTREE_CACHE_NOT_FREED_REASONS()
+#undef x
+ NULL
+};
+
void bch2_btree_cache_to_text(struct printbuf *out, const struct btree_cache *bc)
{
struct bch_fs *c = container_of(bc, struct bch_fs, btree_cache);
@@ -1356,24 +1446,21 @@ void bch2_btree_cache_to_text(struct printbuf *out, const struct btree_cache *bc
if (!out->nr_tabstops)
printbuf_tabstop_push(out, 32);
- prt_btree_cache_line(out, c, "total:", bc->used);
- prt_btree_cache_line(out, c, "nr dirty:", atomic_read(&bc->dirty));
+ prt_btree_cache_line(out, c, "live:", bc->live[0].nr);
+ prt_btree_cache_line(out, c, "pinned:", bc->live[1].nr);
+ prt_btree_cache_line(out, c, "freeable:", bc->nr_freeable);
+ prt_btree_cache_line(out, c, "dirty:", atomic_long_read(&bc->nr_dirty));
prt_printf(out, "cannibalize lock:\t%p\n", bc->alloc_lock);
prt_newline(out);
- for (unsigned i = 0; i < ARRAY_SIZE(bc->used_by_btree); i++)
- prt_btree_cache_line(out, c, bch2_btree_id_str(i), bc->used_by_btree[i]);
+ for (unsigned i = 0; i < ARRAY_SIZE(bc->nr_by_btree); i++)
+ prt_btree_cache_line(out, c, bch2_btree_id_str(i), bc->nr_by_btree[i]);
prt_newline(out);
- prt_printf(out, "freed:\t%u\n", bc->freed);
+ prt_printf(out, "freed:\t%zu\n", bc->nr_freed);
prt_printf(out, "not freed:\n");
- prt_printf(out, " dirty\t%u\n", bc->not_freed_dirty);
- prt_printf(out, " write in flight\t%u\n", bc->not_freed_write_in_flight);
- prt_printf(out, " read in flight\t%u\n", bc->not_freed_read_in_flight);
- prt_printf(out, " lock intent failed\t%u\n", bc->not_freed_lock_intent);
- prt_printf(out, " lock write failed\t%u\n", bc->not_freed_lock_write);
- prt_printf(out, " access bit\t%u\n", bc->not_freed_access_bit);
- prt_printf(out, " no evict failed\t%u\n", bc->not_freed_noevict);
- prt_printf(out, " write blocked\t%u\n", bc->not_freed_write_blocked);
- prt_printf(out, " will make reachable\t%u\n", bc->not_freed_will_make_reachable);
+
+ for (unsigned i = 0; i < ARRAY_SIZE(bc->not_freed); i++)
+ prt_printf(out, " %s\t%llu\n",
+ bch2_btree_cache_not_freed_reasons_strs[i], bc->not_freed[i]);
}
diff --git a/fs/bcachefs/btree_cache.h b/fs/bcachefs/btree_cache.h
index f82064007127..367acd217c6a 100644
--- a/fs/bcachefs/btree_cache.h
+++ b/fs/bcachefs/btree_cache.h
@@ -19,6 +19,9 @@ int __bch2_btree_node_hash_insert(struct btree_cache *, struct btree *);
int bch2_btree_node_hash_insert(struct btree_cache *, struct btree *,
unsigned, enum btree_id);
+void bch2_node_pin(struct bch_fs *, struct btree *);
+void bch2_btree_cache_unpin(struct bch_fs *);
+
void bch2_btree_node_update_key_early(struct btree_trans *, enum btree_id, unsigned,
struct bkey_s_c, struct bkey_i *);
diff --git a/fs/bcachefs/btree_gc.c b/fs/bcachefs/btree_gc.c
index eb3002c4eae7..660d2fa02da2 100644
--- a/fs/bcachefs/btree_gc.c
+++ b/fs/bcachefs/btree_gc.c
@@ -513,6 +513,8 @@ int bch2_check_topology(struct bch_fs *c)
struct bpos pulled_from_scan = POS_MIN;
int ret = 0;
+ bch2_trans_srcu_unlock(trans);
+
for (unsigned i = 0; i < btree_id_nr_alive(c) && !ret; i++) {
struct btree_root *r = bch2_btree_id_root(c, i);
bool reconstructed_root = false;
@@ -549,9 +551,8 @@ reconstruct_root:
six_unlock_read(&b->c.lock);
if (ret == DROP_THIS_NODE) {
- bch2_btree_node_hash_remove(&c->btree_cache, b);
mutex_lock(&c->btree_cache.lock);
- list_move(&b->list, &c->btree_cache.freeable);
+ bch2_btree_node_hash_remove(&c->btree_cache, b);
mutex_unlock(&c->btree_cache.lock);
r->b = NULL;
@@ -600,15 +601,15 @@ static int bch2_gc_mark_key(struct btree_trans *trans, enum btree_id btree_id,
if (initial) {
BUG_ON(bch2_journal_seq_verify &&
- k.k->version.lo > atomic64_read(&c->journal.seq));
+ k.k->bversion.lo > atomic64_read(&c->journal.seq));
if (fsck_err_on(btree_id != BTREE_ID_accounting &&
- k.k->version.lo > atomic64_read(&c->key_version),
+ k.k->bversion.lo > atomic64_read(&c->key_version),
trans, bkey_version_in_future,
"key version number higher than recorded %llu\n %s",
atomic64_read(&c->key_version),
(bch2_bkey_val_to_text(&buf, c, k), buf.buf)))
- atomic64_set(&c->key_version, k.k->version.lo);
+ atomic64_set(&c->key_version, k.k->bversion.lo);
}
if (mustfix_fsck_err_on(level && !bch2_dev_btree_bitmap_marked(c, k),
@@ -753,10 +754,8 @@ static void bch2_gc_free(struct bch_fs *c)
genradix_free(&c->reflink_gc_table);
genradix_free(&c->gc_stripes);
- for_each_member_device(c, ca) {
- kvfree(rcu_dereference_protected(ca->buckets_gc, 1));
- ca->buckets_gc = NULL;
- }
+ for_each_member_device(c, ca)
+ genradix_free(&ca->buckets_gc);
}
static int bch2_gc_start(struct bch_fs *c)
@@ -910,20 +909,12 @@ static int bch2_gc_alloc_start(struct bch_fs *c)
int ret = 0;
for_each_member_device(c, ca) {
- struct bucket_array *buckets = kvmalloc(sizeof(struct bucket_array) +
- ca->mi.nbuckets * sizeof(struct bucket),
- GFP_KERNEL|__GFP_ZERO);
- if (!buckets) {
+ ret = genradix_prealloc(&ca->buckets_gc, ca->mi.nbuckets, GFP_KERNEL);
+ if (ret) {
bch2_dev_put(ca);
ret = -BCH_ERR_ENOMEM_gc_alloc_start;
break;
}
-
- buckets->first_bucket = ca->mi.first_bucket;
- buckets->nbuckets = ca->mi.nbuckets;
- buckets->nbuckets_minus_first =
- buckets->nbuckets - buckets->first_bucket;
- rcu_assign_pointer(ca->buckets_gc, buckets);
}
bch_err_fn(c, ret);
diff --git a/fs/bcachefs/btree_io.c b/fs/bcachefs/btree_io.c
index 56ea9a77cd4a..1c1448b52207 100644
--- a/fs/bcachefs/btree_io.c
+++ b/fs/bcachefs/btree_io.c
@@ -1195,6 +1195,10 @@ int bch2_btree_node_read_done(struct bch_fs *c, struct bch_dev *ca,
set_btree_bset(b, b->set, &b->data->keys);
b->nr = bch2_key_sort_fix_overlapping(c, &sorted->keys, iter);
+ memset((uint8_t *)(sorted + 1) + b->nr.live_u64s * sizeof(u64), 0,
+ btree_buf_bytes(b) -
+ sizeof(struct btree_node) -
+ b->nr.live_u64s * sizeof(u64));
u64s = le16_to_cpu(sorted->keys.u64s);
*sorted = *b->data;
@@ -1219,7 +1223,7 @@ int bch2_btree_node_read_done(struct bch_fs *c, struct bch_dev *ca,
ret = bch2_bkey_val_validate(c, u.s_c, READ);
if (ret == -BCH_ERR_fsck_delete_bkey ||
(bch2_inject_invalid_keys &&
- !bversion_cmp(u.k->version, MAX_VERSION))) {
+ !bversion_cmp(u.k->bversion, MAX_VERSION))) {
btree_keys_account_key_drop(&b->nr, 0, k);
i->u64s = cpu_to_le16(le16_to_cpu(i->u64s) - k->u64s);
@@ -1666,7 +1670,7 @@ void bch2_btree_node_read(struct btree_trans *trans, struct btree *b,
bch2_btree_pos_to_text(&buf, c, b);
bch_err_ratelimited(c, "%s", buf.buf);
- if (c->recovery_passes_explicit & BIT_ULL(BCH_RECOVERY_PASS_check_topology) &&
+ if (c->opts.recovery_passes & BIT_ULL(BCH_RECOVERY_PASS_check_topology) &&
c->curr_recovery_pass > BCH_RECOVERY_PASS_check_topology)
bch2_fatal_error(c);
@@ -1749,10 +1753,8 @@ static int __bch2_btree_root_read(struct btree_trans *trans, enum btree_id id,
bch2_btree_node_read(trans, b, true);
if (btree_node_read_error(b)) {
- bch2_btree_node_hash_remove(&c->btree_cache, b);
-
mutex_lock(&c->btree_cache.lock);
- list_move(&b->list, &c->btree_cache.freeable);
+ bch2_btree_node_hash_remove(&c->btree_cache, b);
mutex_unlock(&c->btree_cache.lock);
ret = -BCH_ERR_btree_node_read_error;
@@ -2031,7 +2033,7 @@ void __bch2_btree_node_write(struct bch_fs *c, struct btree *b, unsigned flags)
do_write:
BUG_ON((type == BTREE_WRITE_initial) != (b->written == 0));
- atomic_dec(&c->btree_cache.dirty);
+ atomic_long_dec(&c->btree_cache.nr_dirty);
BUG_ON(btree_node_fake(b));
BUG_ON((b->will_make_reachable != 0) != !b->written);
diff --git a/fs/bcachefs/btree_io.h b/fs/bcachefs/btree_io.h
index 63d76f5c6403..9b01ca3de907 100644
--- a/fs/bcachefs/btree_io.h
+++ b/fs/bcachefs/btree_io.h
@@ -18,13 +18,13 @@ struct btree_node_read_all;
static inline void set_btree_node_dirty_acct(struct bch_fs *c, struct btree *b)
{
if (!test_and_set_bit(BTREE_NODE_dirty, &b->flags))
- atomic_inc(&c->btree_cache.dirty);
+ atomic_long_inc(&c->btree_cache.nr_dirty);
}
static inline void clear_btree_node_dirty_acct(struct bch_fs *c, struct btree *b)
{
if (test_and_clear_bit(BTREE_NODE_dirty, &b->flags))
- atomic_dec(&c->btree_cache.dirty);
+ atomic_long_dec(&c->btree_cache.nr_dirty);
}
static inline unsigned btree_ptr_sectors_written(struct bkey_s_c k)
diff --git a/fs/bcachefs/btree_iter.c b/fs/bcachefs/btree_iter.c
index 2e84d22e17bd..bfe9f0c1e1be 100644
--- a/fs/bcachefs/btree_iter.c
+++ b/fs/bcachefs/btree_iter.c
@@ -1010,9 +1010,9 @@ retry_all:
* the same position:
*/
if (trans->paths[idx].uptodate) {
- __btree_path_get(&trans->paths[idx], false);
+ __btree_path_get(trans, &trans->paths[idx], false);
ret = bch2_btree_path_traverse_one(trans, idx, 0, _THIS_IP_);
- __btree_path_put(&trans->paths[idx], false);
+ __btree_path_put(trans, &trans->paths[idx], false);
if (bch2_err_matches(ret, BCH_ERR_transaction_restart) ||
bch2_err_matches(ret, ENOMEM))
@@ -1131,6 +1131,8 @@ int bch2_btree_path_traverse_one(struct btree_trans *trans,
if (unlikely(!trans->srcu_held))
bch2_trans_srcu_lock(trans);
+ trace_btree_path_traverse_start(trans, path);
+
/*
* Ensure we obey path->should_be_locked: if it's set, we can't unlock
* and re-traverse the path without a transaction restart:
@@ -1194,6 +1196,7 @@ int bch2_btree_path_traverse_one(struct btree_trans *trans,
out_uptodate:
path->uptodate = BTREE_ITER_UPTODATE;
+ trace_btree_path_traverse_end(trans, path);
out:
if (bch2_err_matches(ret, BCH_ERR_transaction_restart) != !!trans->restarted)
panic("ret %s (%i) trans->restarted %s (%i)\n",
@@ -1225,7 +1228,7 @@ static btree_path_idx_t btree_path_clone(struct btree_trans *trans, btree_path_i
{
btree_path_idx_t new = btree_path_alloc(trans, src);
btree_path_copy(trans, trans->paths + new, trans->paths + src);
- __btree_path_get(trans->paths + new, intent);
+ __btree_path_get(trans, trans->paths + new, intent);
#ifdef TRACK_PATH_ALLOCATED
trans->paths[new].ip_allocated = ip;
#endif
@@ -1236,8 +1239,10 @@ __flatten
btree_path_idx_t __bch2_btree_path_make_mut(struct btree_trans *trans,
btree_path_idx_t path, bool intent, unsigned long ip)
{
- __btree_path_put(trans->paths + path, intent);
+ struct btree_path *old = trans->paths + path;
+ __btree_path_put(trans, trans->paths + path, intent);
path = btree_path_clone(trans, path, intent, ip);
+ trace_btree_path_clone(trans, old, trans->paths + path);
trans->paths[path].preserve = false;
return path;
}
@@ -1252,6 +1257,8 @@ __bch2_btree_path_set_pos(struct btree_trans *trans,
bch2_trans_verify_not_in_restart(trans);
EBUG_ON(!trans->paths[path_idx].ref);
+ trace_btree_path_set_pos(trans, trans->paths + path_idx, &new_pos);
+
path_idx = bch2_btree_path_make_mut(trans, path_idx, intent, ip);
struct btree_path *path = trans->paths + path_idx;
@@ -1361,13 +1368,15 @@ void bch2_path_put(struct btree_trans *trans, btree_path_idx_t path_idx, bool in
{
struct btree_path *path = trans->paths + path_idx, *dup;
- if (!__btree_path_put(path, intent))
+ if (!__btree_path_put(trans, path, intent))
return;
dup = path->preserve
? have_path_at_pos(trans, path)
: have_node_at_pos(trans, path);
+ trace_btree_path_free(trans, path_idx, dup);
+
if (!dup && !(!path->preserve && !is_btree_node(path, path->level)))
return;
@@ -1392,7 +1401,7 @@ void bch2_path_put(struct btree_trans *trans, btree_path_idx_t path_idx, bool in
static void bch2_path_put_nokeep(struct btree_trans *trans, btree_path_idx_t path,
bool intent)
{
- if (!__btree_path_put(trans->paths + path, intent))
+ if (!__btree_path_put(trans, trans->paths + path, intent))
return;
__bch2_path_free(trans, path);
@@ -1421,8 +1430,8 @@ void __noreturn bch2_trans_unlocked_error(struct btree_trans *trans)
noinline __cold
void bch2_trans_updates_to_text(struct printbuf *buf, struct btree_trans *trans)
{
- prt_printf(buf, "transaction updates for %s journal seq %llu\n",
- trans->fn, trans->journal_res.seq);
+ prt_printf(buf, "%u transaction updates for %s journal seq %llu\n",
+ trans->nr_updates, trans->fn, trans->journal_res.seq);
printbuf_indent_add(buf, 2);
trans_for_each_update(trans, i) {
@@ -1464,7 +1473,7 @@ static void bch2_btree_path_to_text_short(struct printbuf *out, struct btree_tra
{
struct btree_path *path = trans->paths + path_idx;
- prt_printf(out, "path: idx %2u ref %u:%u %c %c %c btree=%s l=%u pos ",
+ prt_printf(out, "path: idx %3u ref %u:%u %c %c %c btree=%s l=%u pos ",
path_idx, path->ref, path->intent_ref,
path->preserve ? 'P' : ' ',
path->should_be_locked ? 'S' : ' ',
@@ -1716,14 +1725,16 @@ btree_path_idx_t bch2_path_get(struct btree_trans *trans,
trans->paths[path_pos].cached == cached &&
trans->paths[path_pos].btree_id == btree_id &&
trans->paths[path_pos].level == level) {
- __btree_path_get(trans->paths + path_pos, intent);
+ trace_btree_path_get(trans, trans->paths + path_pos, &pos);
+
+ __btree_path_get(trans, trans->paths + path_pos, intent);
path_idx = bch2_btree_path_set_pos(trans, path_pos, pos, intent, ip);
path = trans->paths + path_idx;
} else {
path_idx = btree_path_alloc(trans, path_pos);
path = trans->paths + path_idx;
- __btree_path_get(path, intent);
+ __btree_path_get(trans, path, intent);
path->pos = pos;
path->btree_id = btree_id;
path->cached = cached;
@@ -1738,6 +1749,8 @@ btree_path_idx_t bch2_path_get(struct btree_trans *trans,
path->ip_allocated = ip;
#endif
trans->paths_sorted = false;
+
+ trace_btree_path_alloc(trans, path);
}
if (!(flags & BTREE_ITER_nopreserve))
@@ -1857,7 +1870,7 @@ bch2_btree_iter_traverse(struct btree_iter *iter)
struct btree_path *path = btree_iter_path(trans, iter);
if (btree_path_node(path, path->level))
- btree_path_set_should_be_locked(path);
+ btree_path_set_should_be_locked(trans, path);
return 0;
}
@@ -1889,7 +1902,7 @@ struct btree *bch2_btree_iter_peek_node(struct btree_iter *iter)
iter->path = bch2_btree_path_set_pos(trans, iter->path, b->key.k.p,
iter->flags & BTREE_ITER_intent,
btree_iter_ip_allocated(iter));
- btree_path_set_should_be_locked(btree_iter_path(trans, iter));
+ btree_path_set_should_be_locked(trans, btree_iter_path(trans, iter));
out:
bch2_btree_iter_verify_entry_exit(iter);
bch2_btree_iter_verify(iter);
@@ -1983,7 +1996,7 @@ struct btree *bch2_btree_iter_next_node(struct btree_iter *iter)
iter->path = bch2_btree_path_set_pos(trans, iter->path, b->key.k.p,
iter->flags & BTREE_ITER_intent,
btree_iter_ip_allocated(iter));
- btree_path_set_should_be_locked(btree_iter_path(trans, iter));
+ btree_path_set_should_be_locked(trans, btree_iter_path(trans, iter));
EBUG_ON(btree_iter_path(trans, iter)->uptodate);
out:
bch2_btree_iter_verify_entry_exit(iter);
@@ -2155,7 +2168,7 @@ struct bkey_s_c btree_trans_peek_key_cache(struct btree_iter *iter, struct bpos
if (unlikely(ret))
return bkey_s_c_err(ret);
- btree_path_set_should_be_locked(trans->paths + iter->key_cache_path);
+ btree_path_set_should_be_locked(trans, trans->paths + iter->key_cache_path);
k = bch2_btree_path_peek_slot(trans->paths + iter->key_cache_path, &u);
if (k.k && !bkey_err(k)) {
@@ -2199,7 +2212,7 @@ static struct bkey_s_c __bch2_btree_iter_peek(struct btree_iter *iter, struct bp
goto out;
}
- btree_path_set_should_be_locked(path);
+ btree_path_set_should_be_locked(trans, path);
k = btree_path_level_peek_all(trans->c, l, &iter->k);
@@ -2326,7 +2339,7 @@ struct bkey_s_c bch2_btree_iter_peek_upto(struct btree_iter *iter, struct bpos e
* advance, same as on exit for iter->path, but only up
* to snapshot
*/
- __btree_path_get(trans->paths + iter->path, iter->flags & BTREE_ITER_intent);
+ __btree_path_get(trans, trans->paths + iter->path, iter->flags & BTREE_ITER_intent);
iter->update_path = iter->path;
iter->update_path = bch2_btree_path_set_pos(trans,
@@ -2382,14 +2395,14 @@ struct bkey_s_c bch2_btree_iter_peek_upto(struct btree_iter *iter, struct bpos e
iter->flags & BTREE_ITER_intent,
btree_iter_ip_allocated(iter));
- btree_path_set_should_be_locked(btree_iter_path(trans, iter));
+ btree_path_set_should_be_locked(trans, btree_iter_path(trans, iter));
out_no_locked:
if (iter->update_path) {
ret = bch2_btree_path_relock(trans, trans->paths + iter->update_path, _THIS_IP_);
if (unlikely(ret))
k = bkey_s_c_err(ret);
else
- btree_path_set_should_be_locked(trans->paths + iter->update_path);
+ btree_path_set_should_be_locked(trans, trans->paths + iter->update_path);
}
if (!(iter->flags & BTREE_ITER_all_snapshots))
@@ -2511,6 +2524,7 @@ struct bkey_s_c bch2_btree_iter_peek_prev(struct btree_iter *iter)
iter->flags & BTREE_ITER_intent,
_THIS_IP_);
path = btree_iter_path(trans, iter);
+ trace_btree_path_save_pos(trans, path, trans->paths + saved_path);
saved_k = *k.k;
saved_v = k.v;
}
@@ -2527,7 +2541,7 @@ got_key:
continue;
}
- btree_path_set_should_be_locked(path);
+ btree_path_set_should_be_locked(trans, path);
break;
} else if (likely(!bpos_eq(path->l[0].b->data->min_key, POS_MIN))) {
/* Advance to previous leaf node: */
@@ -2685,7 +2699,7 @@ struct bkey_s_c bch2_btree_iter_peek_slot(struct btree_iter *iter)
}
}
out:
- btree_path_set_should_be_locked(btree_iter_path(trans, iter));
+ btree_path_set_should_be_locked(trans, btree_iter_path(trans, iter));
out_no_locked:
bch2_btree_iter_verify_entry_exit(iter);
bch2_btree_iter_verify(iter);
@@ -2712,6 +2726,7 @@ struct bkey_s_c bch2_btree_iter_prev_slot(struct btree_iter *iter)
return bch2_btree_iter_peek_slot(iter);
}
+/* Obsolete, but still used by rust wrapper in -tools */
struct bkey_s_c bch2_btree_iter_peek_and_restart_outlined(struct btree_iter *iter)
{
struct bkey_s_c k;
@@ -2911,9 +2926,9 @@ void bch2_trans_copy_iter(struct btree_iter *dst, struct btree_iter *src)
dst->ip_allocated = _RET_IP_;
#endif
if (src->path)
- __btree_path_get(trans->paths + src->path, src->flags & BTREE_ITER_intent);
+ __btree_path_get(trans, trans->paths + src->path, src->flags & BTREE_ITER_intent);
if (src->update_path)
- __btree_path_get(trans->paths + src->update_path, src->flags & BTREE_ITER_intent);
+ __btree_path_get(trans, trans->paths + src->update_path, src->flags & BTREE_ITER_intent);
dst->key_cache_path = 0;
}
@@ -3237,7 +3252,7 @@ void bch2_trans_put(struct btree_trans *trans)
bch2_trans_unlock(trans);
trans_for_each_update(trans, i)
- __btree_path_put(trans->paths + i->path, true);
+ __btree_path_put(trans, trans->paths + i->path, true);
trans->nr_updates = 0;
check_btree_paths_leaked(trans);
diff --git a/fs/bcachefs/btree_iter.h b/fs/bcachefs/btree_iter.h
index 222b7ce8a901..78e63ad7d380 100644
--- a/fs/bcachefs/btree_iter.h
+++ b/fs/bcachefs/btree_iter.h
@@ -6,6 +6,12 @@
#include "btree_types.h"
#include "trace.h"
+void bch2_trans_updates_to_text(struct printbuf *, struct btree_trans *);
+void bch2_btree_path_to_text(struct printbuf *, struct btree_trans *, btree_path_idx_t);
+void bch2_trans_paths_to_text(struct printbuf *, struct btree_trans *);
+void bch2_dump_trans_updates(struct btree_trans *);
+void bch2_dump_trans_paths_updates(struct btree_trans *);
+
static inline int __bkey_err(const struct bkey *k)
{
return PTR_ERR_OR_ZERO(k);
@@ -13,16 +19,28 @@ static inline int __bkey_err(const struct bkey *k)
#define bkey_err(_k) __bkey_err((_k).k)
-static inline void __btree_path_get(struct btree_path *path, bool intent)
+static inline void __btree_path_get(struct btree_trans *trans, struct btree_path *path, bool intent)
{
+ unsigned idx = path - trans->paths;
+
+ EBUG_ON(!test_bit(idx, trans->paths_allocated));
+ if (unlikely(path->ref == U8_MAX)) {
+ bch2_dump_trans_paths_updates(trans);
+ panic("path %u refcount overflow\n", idx);
+ }
+
path->ref++;
path->intent_ref += intent;
+ trace_btree_path_get_ll(trans, path);
}
-static inline bool __btree_path_put(struct btree_path *path, bool intent)
+static inline bool __btree_path_put(struct btree_trans *trans, struct btree_path *path, bool intent)
{
+ EBUG_ON(!test_bit(path - trans->paths, trans->paths_allocated));
EBUG_ON(!path->ref);
EBUG_ON(!path->intent_ref && intent);
+
+ trace_btree_path_put_ll(trans, path);
path->intent_ref -= intent;
return --path->ref == 0;
}
@@ -511,6 +529,12 @@ void bch2_set_btree_iter_dontneed(struct btree_iter *);
void *__bch2_trans_kmalloc(struct btree_trans *, size_t);
+/**
+ * bch2_trans_kmalloc - allocate memory for use by the current transaction
+ *
+ * Must be called after bch2_trans_begin, which on second and further calls
+ * frees all memory allocated in this transaction
+ */
static inline void *bch2_trans_kmalloc(struct btree_trans *trans, size_t size)
{
size = roundup(size, 8);
@@ -814,20 +838,6 @@ transaction_restart: \
struct bkey_s_c bch2_btree_iter_peek_and_restart_outlined(struct btree_iter *);
-static inline struct bkey_s_c
-__bch2_btree_iter_peek_and_restart(struct btree_trans *trans,
- struct btree_iter *iter, unsigned flags)
-{
- struct bkey_s_c k;
-
- while (btree_trans_too_many_iters(trans) ||
- (k = bch2_btree_iter_peek_type(iter, flags),
- bch2_err_matches(bkey_err(k), BCH_ERR_transaction_restart)))
- bch2_trans_begin(trans);
-
- return k;
-}
-
#define for_each_btree_key_upto_norestart(_trans, _iter, _btree_id, \
_start, _end, _flags, _k, _ret) \
for (bch2_trans_iter_init((_trans), &(_iter), (_btree_id), \
@@ -868,7 +878,7 @@ __bch2_btree_iter_peek_and_restart(struct btree_trans *trans,
\
if (bch2_err_matches(_ret, ENOMEM)) { \
_gfp = GFP_KERNEL; \
- _ret = drop_locks_do(trans, _do); \
+ _ret = drop_locks_do(_trans, _do); \
} \
_ret; \
})
@@ -881,7 +891,7 @@ __bch2_btree_iter_peek_and_restart(struct btree_trans *trans,
_ret = 0; \
if (unlikely(!_p)) { \
_gfp = GFP_KERNEL; \
- _ret = drop_locks_do(trans, ((_p = _do), 0)); \
+ _ret = drop_locks_do(_trans, ((_p = _do), 0)); \
} \
_p; \
})
@@ -894,12 +904,6 @@ __bch2_btree_iter_peek_and_restart(struct btree_trans *trans,
_ret; \
})
-void bch2_trans_updates_to_text(struct printbuf *, struct btree_trans *);
-void bch2_btree_path_to_text(struct printbuf *, struct btree_trans *, btree_path_idx_t);
-void bch2_trans_paths_to_text(struct printbuf *, struct btree_trans *);
-void bch2_dump_trans_updates(struct btree_trans *);
-void bch2_dump_trans_paths_updates(struct btree_trans *);
-
struct btree_trans *__bch2_trans_get(struct bch_fs *, unsigned);
void bch2_trans_put(struct btree_trans *);
diff --git a/fs/bcachefs/btree_key_cache.c b/fs/bcachefs/btree_key_cache.c
index fda7998734cb..244610b1d0b5 100644
--- a/fs/bcachefs/btree_key_cache.c
+++ b/fs/bcachefs/btree_key_cache.c
@@ -79,134 +79,47 @@ static bool bkey_cached_lock_for_evict(struct bkey_cached *ck)
return true;
}
-static void bkey_cached_evict(struct btree_key_cache *c,
+static bool bkey_cached_evict(struct btree_key_cache *c,
struct bkey_cached *ck)
{
- BUG_ON(rhashtable_remove_fast(&c->table, &ck->hash,
- bch2_btree_key_cache_params));
- memset(&ck->key, ~0, sizeof(ck->key));
-
- atomic_long_dec(&c->nr_keys);
-}
-
-static void bkey_cached_free(struct btree_key_cache *bc,
- struct bkey_cached *ck)
-{
- struct bch_fs *c = container_of(bc, struct bch_fs, btree_key_cache);
-
- BUG_ON(test_bit(BKEY_CACHED_DIRTY, &ck->flags));
-
- ck->btree_trans_barrier_seq =
- start_poll_synchronize_srcu(&c->btree_trans_barrier);
-
- if (ck->c.lock.readers) {
- list_move_tail(&ck->list, &bc->freed_pcpu);
- bc->nr_freed_pcpu++;
- } else {
- list_move_tail(&ck->list, &bc->freed_nonpcpu);
- bc->nr_freed_nonpcpu++;
+ bool ret = !rhashtable_remove_fast(&c->table, &ck->hash,
+ bch2_btree_key_cache_params);
+ if (ret) {
+ memset(&ck->key, ~0, sizeof(ck->key));
+ atomic_long_dec(&c->nr_keys);
}
- atomic_long_inc(&bc->nr_freed);
-
- kfree(ck->k);
- ck->k = NULL;
- ck->u64s = 0;
- six_unlock_write(&ck->c.lock);
- six_unlock_intent(&ck->c.lock);
+ return ret;
}
-#ifdef __KERNEL__
-static void __bkey_cached_move_to_freelist_ordered(struct btree_key_cache *bc,
- struct bkey_cached *ck)
+static void __bkey_cached_free(struct rcu_pending *pending, struct rcu_head *rcu)
{
- struct bkey_cached *pos;
-
- bc->nr_freed_nonpcpu++;
+ struct bch_fs *c = container_of(pending->srcu, struct bch_fs, btree_trans_barrier);
+ struct bkey_cached *ck = container_of(rcu, struct bkey_cached, rcu);
- list_for_each_entry_reverse(pos, &bc->freed_nonpcpu, list) {
- if (ULONG_CMP_GE(ck->btree_trans_barrier_seq,
- pos->btree_trans_barrier_seq)) {
- list_move(&ck->list, &pos->list);
- return;
- }
- }
-
- list_move(&ck->list, &bc->freed_nonpcpu);
+ this_cpu_dec(*c->btree_key_cache.nr_pending);
+ kmem_cache_free(bch2_key_cache, ck);
}
-#endif
-
-static void bkey_cached_move_to_freelist(struct btree_key_cache *bc,
- struct bkey_cached *ck)
-{
- BUG_ON(test_bit(BKEY_CACHED_DIRTY, &ck->flags));
-
- if (!ck->c.lock.readers) {
-#ifdef __KERNEL__
- struct btree_key_cache_freelist *f;
- bool freed = false;
-
- preempt_disable();
- f = this_cpu_ptr(bc->pcpu_freed);
-
- if (f->nr < ARRAY_SIZE(f->objs)) {
- f->objs[f->nr++] = ck;
- freed = true;
- }
- preempt_enable();
- if (!freed) {
- mutex_lock(&bc->lock);
- preempt_disable();
- f = this_cpu_ptr(bc->pcpu_freed);
-
- while (f->nr > ARRAY_SIZE(f->objs) / 2) {
- struct bkey_cached *ck2 = f->objs[--f->nr];
-
- __bkey_cached_move_to_freelist_ordered(bc, ck2);
- }
- preempt_enable();
-
- __bkey_cached_move_to_freelist_ordered(bc, ck);
- mutex_unlock(&bc->lock);
- }
-#else
- mutex_lock(&bc->lock);
- list_move_tail(&ck->list, &bc->freed_nonpcpu);
- bc->nr_freed_nonpcpu++;
- mutex_unlock(&bc->lock);
-#endif
- } else {
- mutex_lock(&bc->lock);
- list_move_tail(&ck->list, &bc->freed_pcpu);
- bc->nr_freed_pcpu++;
- mutex_unlock(&bc->lock);
- }
-}
-
-static void bkey_cached_free_fast(struct btree_key_cache *bc,
- struct bkey_cached *ck)
+static void bkey_cached_free(struct btree_key_cache *bc,
+ struct bkey_cached *ck)
{
- struct bch_fs *c = container_of(bc, struct bch_fs, btree_key_cache);
-
- ck->btree_trans_barrier_seq =
- start_poll_synchronize_srcu(&c->btree_trans_barrier);
-
- list_del_init(&ck->list);
- atomic_long_inc(&bc->nr_freed);
-
kfree(ck->k);
ck->k = NULL;
ck->u64s = 0;
- bkey_cached_move_to_freelist(bc, ck);
-
six_unlock_write(&ck->c.lock);
six_unlock_intent(&ck->c.lock);
+
+ bool pcpu_readers = ck->c.lock.readers != NULL;
+ rcu_pending_enqueue(&bc->pending[pcpu_readers], &ck->rcu);
+ this_cpu_inc(*bc->nr_pending);
}
static struct bkey_cached *__bkey_cached_alloc(unsigned key_u64s, gfp_t gfp)
{
+ gfp |= __GFP_ACCOUNT|__GFP_RECLAIMABLE;
+
struct bkey_cached *ck = kmem_cache_zalloc(bch2_key_cache, gfp);
if (unlikely(!ck))
return NULL;
@@ -224,74 +137,14 @@ bkey_cached_alloc(struct btree_trans *trans, struct btree_path *path, unsigned k
{
struct bch_fs *c = trans->c;
struct btree_key_cache *bc = &c->btree_key_cache;
- struct bkey_cached *ck = NULL;
bool pcpu_readers = btree_uses_pcpu_readers(path->btree_id);
int ret;
- if (!pcpu_readers) {
-#ifdef __KERNEL__
- struct btree_key_cache_freelist *f;
-
- preempt_disable();
- f = this_cpu_ptr(bc->pcpu_freed);
- if (f->nr)
- ck = f->objs[--f->nr];
- preempt_enable();
-
- if (!ck) {
- mutex_lock(&bc->lock);
- preempt_disable();
- f = this_cpu_ptr(bc->pcpu_freed);
-
- while (!list_empty(&bc->freed_nonpcpu) &&
- f->nr < ARRAY_SIZE(f->objs) / 2) {
- ck = list_last_entry(&bc->freed_nonpcpu, struct bkey_cached, list);
- list_del_init(&ck->list);
- bc->nr_freed_nonpcpu--;
- f->objs[f->nr++] = ck;
- }
-
- ck = f->nr ? f->objs[--f->nr] : NULL;
- preempt_enable();
- mutex_unlock(&bc->lock);
- }
-#else
- mutex_lock(&bc->lock);
- if (!list_empty(&bc->freed_nonpcpu)) {
- ck = list_last_entry(&bc->freed_nonpcpu, struct bkey_cached, list);
- list_del_init(&ck->list);
- bc->nr_freed_nonpcpu--;
- }
- mutex_unlock(&bc->lock);
-#endif
- } else {
- mutex_lock(&bc->lock);
- if (!list_empty(&bc->freed_pcpu)) {
- ck = list_last_entry(&bc->freed_pcpu, struct bkey_cached, list);
- list_del_init(&ck->list);
- bc->nr_freed_pcpu--;
- }
- mutex_unlock(&bc->lock);
- }
-
- if (ck) {
- ret = btree_node_lock_nopath(trans, &ck->c, SIX_LOCK_intent, _THIS_IP_);
- if (unlikely(ret)) {
- bkey_cached_move_to_freelist(bc, ck);
- return ERR_PTR(ret);
- }
-
- btree_path_cached_set(trans, path, ck, BTREE_NODE_INTENT_LOCKED);
-
- ret = bch2_btree_node_lock_write(trans, path, &ck->c);
- if (unlikely(ret)) {
- btree_node_unlock(trans, path, 0);
- bkey_cached_move_to_freelist(bc, ck);
- return ERR_PTR(ret);
- }
-
- return ck;
- }
+ struct bkey_cached *ck = container_of_or_null(
+ rcu_pending_dequeue(&bc->pending[pcpu_readers]),
+ struct bkey_cached, rcu);
+ if (ck)
+ goto lock;
ck = allocate_dropping_locks(trans, ret,
__bkey_cached_alloc(key_u64s, _gfp));
@@ -302,15 +155,19 @@ bkey_cached_alloc(struct btree_trans *trans, struct btree_path *path, unsigned k
return ERR_PTR(ret);
}
- if (!ck)
- return NULL;
-
- INIT_LIST_HEAD(&ck->list);
- bch2_btree_lock_init(&ck->c, pcpu_readers ? SIX_LOCK_INIT_PCPU : 0);
+ if (ck) {
+ bch2_btree_lock_init(&ck->c, pcpu_readers ? SIX_LOCK_INIT_PCPU : 0);
+ ck->c.cached = true;
+ goto lock;
+ }
- ck->c.cached = true;
- BUG_ON(!six_trylock_intent(&ck->c.lock));
- BUG_ON(!six_trylock_write(&ck->c.lock));
+ ck = container_of_or_null(rcu_pending_dequeue_from_all(&bc->pending[pcpu_readers]),
+ struct bkey_cached, rcu);
+ if (ck)
+ goto lock;
+lock:
+ six_lock_intent(&ck->c.lock, NULL, NULL);
+ six_lock_write(&ck->c.lock, NULL, NULL);
return ck;
}
@@ -322,21 +179,21 @@ bkey_cached_reuse(struct btree_key_cache *c)
struct bkey_cached *ck;
unsigned i;
- mutex_lock(&c->lock);
rcu_read_lock();
tbl = rht_dereference_rcu(c->table.tbl, &c->table);
for (i = 0; i < tbl->size; i++)
rht_for_each_entry_rcu(ck, pos, tbl, i, hash) {
if (!test_bit(BKEY_CACHED_DIRTY, &ck->flags) &&
bkey_cached_lock_for_evict(ck)) {
- bkey_cached_evict(c, ck);
- goto out;
+ if (bkey_cached_evict(c, ck))
+ goto out;
+ six_unlock_write(&ck->c.lock);
+ six_unlock_intent(&ck->c.lock);
}
}
ck = NULL;
out:
rcu_read_unlock();
- mutex_unlock(&c->lock);
return ck;
}
@@ -415,7 +272,7 @@ static int btree_key_cache_create(struct btree_trans *trans, struct btree_path *
path->uptodate = BTREE_ITER_UPTODATE;
return 0;
err:
- bkey_cached_free_fast(bc, ck);
+ bkey_cached_free(bc, ck);
mark_btree_node_locked_noreset(path, 0, BTREE_NODE_UNLOCKED);
return ret;
@@ -611,8 +468,12 @@ evict:
}
mark_btree_node_locked_noreset(path, 0, BTREE_NODE_UNLOCKED);
- bkey_cached_evict(&c->btree_key_cache, ck);
- bkey_cached_free_fast(&c->btree_key_cache, ck);
+ if (bkey_cached_evict(&c->btree_key_cache, ck)) {
+ bkey_cached_free(&c->btree_key_cache, ck);
+ } else {
+ six_unlock_write(&ck->c.lock);
+ six_unlock_intent(&ck->c.lock);
+ }
}
out:
bch2_trans_iter_exit(trans, &b_iter);
@@ -722,7 +583,7 @@ void bch2_btree_key_cache_drop(struct btree_trans *trans,
}
bkey_cached_evict(bc, ck);
- bkey_cached_free_fast(bc, ck);
+ bkey_cached_free(bc, ck);
mark_btree_node_locked(trans, path, 0, BTREE_NODE_UNLOCKED);
btree_path_set_dirty(path, BTREE_ITER_NEED_TRAVERSE);
@@ -735,48 +596,14 @@ static unsigned long bch2_btree_key_cache_scan(struct shrinker *shrink,
struct bch_fs *c = shrink->private_data;
struct btree_key_cache *bc = &c->btree_key_cache;
struct bucket_table *tbl;
- struct bkey_cached *ck, *t;
+ struct bkey_cached *ck;
size_t scanned = 0, freed = 0, nr = sc->nr_to_scan;
- unsigned start, flags;
+ unsigned iter, start;
int srcu_idx;
- mutex_lock(&bc->lock);
- bc->requested_to_free += sc->nr_to_scan;
-
srcu_idx = srcu_read_lock(&c->btree_trans_barrier);
- flags = memalloc_nofs_save();
-
- /*
- * Newest freed entries are at the end of the list - once we hit one
- * that's too new to be freed, we can bail out:
- */
- list_for_each_entry_safe(ck, t, &bc->freed_nonpcpu, list) {
- if (!poll_state_synchronize_srcu(&c->btree_trans_barrier,
- ck->btree_trans_barrier_seq))
- break;
-
- list_del(&ck->list);
- six_lock_exit(&ck->c.lock);
- kmem_cache_free(bch2_key_cache, ck);
- atomic_long_dec(&bc->nr_freed);
- bc->nr_freed_nonpcpu--;
- bc->freed++;
- }
-
- list_for_each_entry_safe(ck, t, &bc->freed_pcpu, list) {
- if (!poll_state_synchronize_srcu(&c->btree_trans_barrier,
- ck->btree_trans_barrier_seq))
- break;
-
- list_del(&ck->list);
- six_lock_exit(&ck->c.lock);
- kmem_cache_free(bch2_key_cache, ck);
- atomic_long_dec(&bc->nr_freed);
- bc->nr_freed_pcpu--;
- bc->freed++;
- }
-
rcu_read_lock();
+
tbl = rht_dereference_rcu(bc->table.tbl, &bc->table);
/*
@@ -792,17 +619,18 @@ static unsigned long bch2_btree_key_cache_scan(struct shrinker *shrink,
return SHRINK_STOP;
}
- if (bc->shrink_iter >= tbl->size)
- bc->shrink_iter = 0;
- start = bc->shrink_iter;
+ iter = bc->shrink_iter;
+ if (iter >= tbl->size)
+ iter = 0;
+ start = iter;
do {
struct rhash_head *pos, *next;
- pos = rht_ptr_rcu(&tbl->buckets[bc->shrink_iter]);
+ pos = rht_ptr_rcu(&tbl->buckets[iter]);
while (!rht_is_a_nulls(pos)) {
- next = rht_dereference_bucket_rcu(pos->next, tbl, bc->shrink_iter);
+ next = rht_dereference_bucket_rcu(pos->next, tbl, iter);
ck = container_of(pos, struct bkey_cached, hash);
if (test_bit(BKEY_CACHED_DIRTY, &ck->flags)) {
@@ -812,29 +640,31 @@ static unsigned long bch2_btree_key_cache_scan(struct shrinker *shrink,
bc->skipped_accessed++;
} else if (!bkey_cached_lock_for_evict(ck)) {
bc->skipped_lock_fail++;
- } else {
- bkey_cached_evict(bc, ck);
+ } else if (bkey_cached_evict(bc, ck)) {
bkey_cached_free(bc, ck);
- bc->moved_to_freelist++;
+ bc->freed++;
freed++;
+ } else {
+ six_unlock_write(&ck->c.lock);
+ six_unlock_intent(&ck->c.lock);
}
scanned++;
if (scanned >= nr)
- break;
+ goto out;
pos = next;
}
- bc->shrink_iter++;
- if (bc->shrink_iter >= tbl->size)
- bc->shrink_iter = 0;
- } while (scanned < nr && bc->shrink_iter != start);
+ iter++;
+ if (iter >= tbl->size)
+ iter = 0;
+ } while (scanned < nr && iter != start);
+out:
+ bc->shrink_iter = iter;
rcu_read_unlock();
- memalloc_nofs_restore(flags);
srcu_read_unlock(&c->btree_trans_barrier, srcu_idx);
- mutex_unlock(&bc->lock);
return freed;
}
@@ -862,18 +692,13 @@ void bch2_fs_btree_key_cache_exit(struct btree_key_cache *bc)
{
struct bch_fs *c = container_of(bc, struct bch_fs, btree_key_cache);
struct bucket_table *tbl;
- struct bkey_cached *ck, *n;
+ struct bkey_cached *ck;
struct rhash_head *pos;
LIST_HEAD(items);
unsigned i;
-#ifdef __KERNEL__
- int cpu;
-#endif
shrinker_free(bc->shrink);
- mutex_lock(&bc->lock);
-
/*
* The loop is needed to guard against racing with rehash:
*/
@@ -892,44 +717,14 @@ void bch2_fs_btree_key_cache_exit(struct btree_key_cache *bc)
for (i = 0; i < tbl->size; i++)
while (pos = rht_ptr_rcu(&tbl->buckets[i]), !rht_is_a_nulls(pos)) {
ck = container_of(pos, struct bkey_cached, hash);
- bkey_cached_evict(bc, ck);
- list_add(&ck->list, &items);
+ BUG_ON(!bkey_cached_evict(bc, ck));
+ kfree(ck->k);
+ kmem_cache_free(bch2_key_cache, ck);
}
}
rcu_read_unlock();
}
-#ifdef __KERNEL__
- if (bc->pcpu_freed) {
- for_each_possible_cpu(cpu) {
- struct btree_key_cache_freelist *f =
- per_cpu_ptr(bc->pcpu_freed, cpu);
-
- for (i = 0; i < f->nr; i++) {
- ck = f->objs[i];
- list_add(&ck->list, &items);
- }
- }
- }
-#endif
-
- BUG_ON(list_count_nodes(&bc->freed_pcpu) != bc->nr_freed_pcpu);
- BUG_ON(list_count_nodes(&bc->freed_nonpcpu) != bc->nr_freed_nonpcpu);
-
- list_splice(&bc->freed_pcpu, &items);
- list_splice(&bc->freed_nonpcpu, &items);
-
- mutex_unlock(&bc->lock);
-
- list_for_each_entry_safe(ck, n, &items, list) {
- cond_resched();
-
- list_del(&ck->list);
- kfree(ck->k);
- six_lock_exit(&ck->c.lock);
- kmem_cache_free(bch2_key_cache, ck);
- }
-
if (atomic_long_read(&bc->nr_dirty) &&
!bch2_journal_error(&c->journal) &&
test_bit(BCH_FS_was_rw, &c->flags))
@@ -943,14 +738,14 @@ void bch2_fs_btree_key_cache_exit(struct btree_key_cache *bc)
if (bc->table_init_done)
rhashtable_destroy(&bc->table);
- free_percpu(bc->pcpu_freed);
+ rcu_pending_exit(&bc->pending[0]);
+ rcu_pending_exit(&bc->pending[1]);
+
+ free_percpu(bc->nr_pending);
}
void bch2_fs_btree_key_cache_init_early(struct btree_key_cache *c)
{
- mutex_init(&c->lock);
- INIT_LIST_HEAD(&c->freed_pcpu);
- INIT_LIST_HEAD(&c->freed_nonpcpu);
}
int bch2_fs_btree_key_cache_init(struct btree_key_cache *bc)
@@ -958,11 +753,13 @@ int bch2_fs_btree_key_cache_init(struct btree_key_cache *bc)
struct bch_fs *c = container_of(bc, struct bch_fs, btree_key_cache);
struct shrinker *shrink;
-#ifdef __KERNEL__
- bc->pcpu_freed = alloc_percpu(struct btree_key_cache_freelist);
- if (!bc->pcpu_freed)
+ bc->nr_pending = alloc_percpu(size_t);
+ if (!bc->nr_pending)
+ return -BCH_ERR_ENOMEM_fs_btree_cache_init;
+
+ if (rcu_pending_init(&bc->pending[0], &c->btree_trans_barrier, __bkey_cached_free) ||
+ rcu_pending_init(&bc->pending[1], &c->btree_trans_barrier, __bkey_cached_free))
return -BCH_ERR_ENOMEM_fs_btree_cache_init;
-#endif
if (rhashtable_init(&bc->table, &bch2_btree_key_cache_params))
return -BCH_ERR_ENOMEM_fs_btree_cache_init;
@@ -984,45 +781,21 @@ int bch2_fs_btree_key_cache_init(struct btree_key_cache *bc)
void bch2_btree_key_cache_to_text(struct printbuf *out, struct btree_key_cache *bc)
{
- struct bch_fs *c = container_of(bc, struct bch_fs, btree_key_cache);
-
printbuf_tabstop_push(out, 24);
printbuf_tabstop_push(out, 12);
- unsigned flags = memalloc_nofs_save();
- mutex_lock(&bc->lock);
prt_printf(out, "keys:\t%lu\r\n", atomic_long_read(&bc->nr_keys));
prt_printf(out, "dirty:\t%lu\r\n", atomic_long_read(&bc->nr_dirty));
- prt_printf(out, "freelist:\t%lu\r\n", atomic_long_read(&bc->nr_freed));
- prt_printf(out, "nonpcpu freelist:\t%zu\r\n", bc->nr_freed_nonpcpu);
- prt_printf(out, "pcpu freelist:\t%zu\r\n", bc->nr_freed_pcpu);
-
- prt_printf(out, "\nshrinker:\n");
+ prt_printf(out, "table size:\t%u\r\n", bc->table.tbl->size);
+ prt_newline(out);
+ prt_printf(out, "shrinker:\n");
prt_printf(out, "requested_to_free:\t%lu\r\n", bc->requested_to_free);
prt_printf(out, "freed:\t%lu\r\n", bc->freed);
- prt_printf(out, "moved_to_freelist:\t%lu\r\n", bc->moved_to_freelist);
prt_printf(out, "skipped_dirty:\t%lu\r\n", bc->skipped_dirty);
prt_printf(out, "skipped_accessed:\t%lu\r\n", bc->skipped_accessed);
prt_printf(out, "skipped_lock_fail:\t%lu\r\n", bc->skipped_lock_fail);
-
- prt_printf(out, "srcu seq:\t%lu\r\n", get_state_synchronize_srcu(&c->btree_trans_barrier));
-
- struct bkey_cached *ck;
- unsigned iter = 0;
- list_for_each_entry(ck, &bc->freed_nonpcpu, list) {
- prt_printf(out, "freed_nonpcpu:\t%lu\r\n", ck->btree_trans_barrier_seq);
- if (++iter > 10)
- break;
- }
-
- iter = 0;
- list_for_each_entry(ck, &bc->freed_pcpu, list) {
- prt_printf(out, "freed_pcpu:\t%lu\r\n", ck->btree_trans_barrier_seq);
- if (++iter > 10)
- break;
- }
- mutex_unlock(&bc->lock);
- memalloc_flags_restore(flags);
+ prt_newline(out);
+ prt_printf(out, "pending:\t%zu\r\n", per_cpu_sum(bc->nr_pending));
}
void bch2_btree_key_cache_exit(void)
diff --git a/fs/bcachefs/btree_key_cache_types.h b/fs/bcachefs/btree_key_cache_types.h
index 237e8bb3ac40..722f1ed10551 100644
--- a/fs/bcachefs/btree_key_cache_types.h
+++ b/fs/bcachefs/btree_key_cache_types.h
@@ -2,33 +2,25 @@
#ifndef _BCACHEFS_BTREE_KEY_CACHE_TYPES_H
#define _BCACHEFS_BTREE_KEY_CACHE_TYPES_H
-struct btree_key_cache_freelist {
- struct bkey_cached *objs[16];
- unsigned nr;
-};
+#include "rcu_pending.h"
struct btree_key_cache {
- struct mutex lock;
struct rhashtable table;
bool table_init_done;
- struct list_head freed_pcpu;
- size_t nr_freed_pcpu;
- struct list_head freed_nonpcpu;
- size_t nr_freed_nonpcpu;
-
struct shrinker *shrink;
unsigned shrink_iter;
- struct btree_key_cache_freelist __percpu *pcpu_freed;
- atomic_long_t nr_freed;
+ /* 0: non pcpu reader locks, 1: pcpu reader locks */
+ struct rcu_pending pending[2];
+ size_t __percpu *nr_pending;
+
atomic_long_t nr_keys;
atomic_long_t nr_dirty;
/* shrinker stats */
unsigned long requested_to_free;
unsigned long freed;
- unsigned long moved_to_freelist;
unsigned long skipped_dirty;
unsigned long skipped_accessed;
unsigned long skipped_lock_fail;
diff --git a/fs/bcachefs/btree_locking.h b/fs/bcachefs/btree_locking.h
index 11a64ead8685..7c07f9fa9add 100644
--- a/fs/bcachefs/btree_locking.h
+++ b/fs/bcachefs/btree_locking.h
@@ -218,16 +218,17 @@ static inline int __btree_node_lock_nopath(struct btree_trans *trans,
bool lock_may_not_fail,
unsigned long ip)
{
- int ret;
-
trans->lock_may_not_fail = lock_may_not_fail;
trans->lock_must_abort = false;
trans->locking = b;
- ret = six_lock_ip_waiter(&b->lock, type, &trans->locking_wait,
- bch2_six_check_for_deadlock, trans, ip);
+ int ret = six_lock_ip_waiter(&b->lock, type, &trans->locking_wait,
+ bch2_six_check_for_deadlock, trans, ip);
WRITE_ONCE(trans->locking, NULL);
WRITE_ONCE(trans->locking_wait.start_time, 0);
+
+ if (!ret)
+ trace_btree_path_lock(trans, _THIS_IP_, b);
return ret;
}
@@ -281,6 +282,7 @@ static inline int btree_node_lock(struct btree_trans *trans,
int ret = 0;
EBUG_ON(level >= BTREE_MAX_DEPTH);
+ bch2_trans_verify_not_unlocked(trans);
if (likely(six_trylock_type(&b->lock, type)) ||
btree_node_lock_increment(trans, b, level, (enum btree_node_locked_type) type) ||
@@ -400,12 +402,13 @@ static inline int bch2_btree_path_upgrade(struct btree_trans *trans,
/* misc: */
-static inline void btree_path_set_should_be_locked(struct btree_path *path)
+static inline void btree_path_set_should_be_locked(struct btree_trans *trans, struct btree_path *path)
{
EBUG_ON(!btree_node_locked(path, path->level));
EBUG_ON(path->uptodate);
path->should_be_locked = true;
+ trace_btree_path_should_be_locked(trans, path);
}
static inline void __btree_path_set_level_up(struct btree_trans *trans,
diff --git a/fs/bcachefs/btree_node_scan.c b/fs/bcachefs/btree_node_scan.c
index b28c649c6838..1e694fedc5da 100644
--- a/fs/bcachefs/btree_node_scan.c
+++ b/fs/bcachefs/btree_node_scan.c
@@ -275,7 +275,7 @@ static int read_btree_nodes(struct find_btree_nodes *f)
w->ca = ca;
t = kthread_run(read_btree_nodes_worker, w, "read_btree_nodes/%s", ca->name);
- ret = IS_ERR_OR_NULL(t);
+ ret = PTR_ERR_OR_ZERO(t);
if (ret) {
percpu_ref_put(&ca->io_ref);
closure_put(&cl);
diff --git a/fs/bcachefs/btree_trans_commit.c b/fs/bcachefs/btree_trans_commit.c
index a0101d9c5d83..1a74a1a252ee 100644
--- a/fs/bcachefs/btree_trans_commit.c
+++ b/fs/bcachefs/btree_trans_commit.c
@@ -214,7 +214,7 @@ bool bch2_btree_bset_insert_key(struct btree_trans *trans,
k = bch2_btree_node_iter_bset_pos(node_iter, b, bset_tree_last(b));
overwrite:
- bch2_bset_insert(b, node_iter, k, insert, clobber_u64s);
+ bch2_bset_insert(b, k, insert, clobber_u64s);
new_u64s = k->u64s;
fix_iter:
if (clobber_u64s != new_u64s)
@@ -684,10 +684,10 @@ bch2_trans_commit_write_locked(struct btree_trans *trans, unsigned flags,
!(flags & BCH_TRANS_COMMIT_no_journal_res)) {
if (bch2_journal_seq_verify)
trans_for_each_update(trans, i)
- i->k->k.version.lo = trans->journal_res.seq;
+ i->k->k.bversion.lo = trans->journal_res.seq;
else if (bch2_inject_invalid_keys)
trans_for_each_update(trans, i)
- i->k->k.version = MAX_VERSION;
+ i->k->k.bversion = MAX_VERSION;
}
h = trans->hooks;
@@ -700,27 +700,31 @@ bch2_trans_commit_write_locked(struct btree_trans *trans, unsigned flags,
struct jset_entry *entry = trans->journal_entries;
- if (likely(!(flags & BCH_TRANS_COMMIT_skip_accounting_apply))) {
- percpu_down_read(&c->mark_lock);
+ percpu_down_read(&c->mark_lock);
+
+ for (entry = trans->journal_entries;
+ entry != (void *) ((u64 *) trans->journal_entries + trans->journal_entries_u64s);
+ entry = vstruct_next(entry))
+ if (entry->type == BCH_JSET_ENTRY_write_buffer_keys &&
+ entry->start->k.type == KEY_TYPE_accounting) {
+ BUG_ON(!trans->journal_res.ref);
+
+ struct bkey_i_accounting *a = bkey_i_to_accounting(entry->start);
- for (entry = trans->journal_entries;
- entry != (void *) ((u64 *) trans->journal_entries + trans->journal_entries_u64s);
- entry = vstruct_next(entry))
- if (jset_entry_is_key(entry) && entry->start->k.type == KEY_TYPE_accounting) {
- struct bkey_i_accounting *a = bkey_i_to_accounting(entry->start);
+ a->k.bversion = journal_pos_to_bversion(&trans->journal_res,
+ (u64 *) entry - (u64 *) trans->journal_entries);
+ BUG_ON(bversion_zero(a->k.bversion));
- a->k.version = journal_pos_to_bversion(&trans->journal_res,
- (u64 *) entry - (u64 *) trans->journal_entries);
- BUG_ON(bversion_zero(a->k.version));
- ret = bch2_accounting_mem_mod_locked(trans, accounting_i_to_s_c(a), false, false);
+ if (likely(!(flags & BCH_TRANS_COMMIT_skip_accounting_apply))) {
+ ret = bch2_accounting_mem_mod_locked(trans, accounting_i_to_s_c(a), BCH_ACCOUNTING_normal);
if (ret)
goto revert_fs_usage;
}
- percpu_up_read(&c->mark_lock);
+ }
+ percpu_up_read(&c->mark_lock);
- /* XXX: we only want to run this if deltas are nonzero */
- bch2_trans_account_disk_usage_change(trans);
- }
+ /* XXX: we only want to run this if deltas are nonzero */
+ bch2_trans_account_disk_usage_change(trans);
trans_for_each_update(trans, i)
if (btree_node_type_has_atomic_triggers(i->bkey_type)) {
@@ -735,6 +739,40 @@ bch2_trans_commit_write_locked(struct btree_trans *trans, unsigned flags,
goto fatal_err;
}
+ trans_for_each_update(trans, i) {
+ enum bch_validate_flags invalid_flags = 0;
+
+ if (!(flags & BCH_TRANS_COMMIT_no_journal_res))
+ invalid_flags |= BCH_VALIDATE_write|BCH_VALIDATE_commit;
+
+ ret = bch2_bkey_validate(c, bkey_i_to_s_c(i->k),
+ i->bkey_type, invalid_flags);
+ if (unlikely(ret)){
+ bch2_trans_inconsistent(trans, "invalid bkey on insert from %s -> %ps\n",
+ trans->fn, (void *) i->ip_allocated);
+ goto fatal_err;
+ }
+ btree_insert_entry_checks(trans, i);
+ }
+
+ for (struct jset_entry *i = trans->journal_entries;
+ i != (void *) ((u64 *) trans->journal_entries + trans->journal_entries_u64s);
+ i = vstruct_next(i)) {
+ enum bch_validate_flags invalid_flags = 0;
+
+ if (!(flags & BCH_TRANS_COMMIT_no_journal_res))
+ invalid_flags |= BCH_VALIDATE_write|BCH_VALIDATE_commit;
+
+ ret = bch2_journal_entry_validate(c, NULL, i,
+ bcachefs_metadata_version_current,
+ CPU_BIG_ENDIAN, invalid_flags);
+ if (unlikely(ret)) {
+ bch2_trans_inconsistent(trans, "invalid journal entry on insert from %s\n",
+ trans->fn);
+ goto fatal_err;
+ }
+ }
+
if (likely(!(flags & BCH_TRANS_COMMIT_no_journal_res))) {
struct journal *j = &c->journal;
struct jset_entry *entry;
@@ -798,7 +836,7 @@ revert_fs_usage:
struct bkey_s_accounting a = bkey_i_to_s_accounting(entry2->start);
bch2_accounting_neg(a);
- bch2_accounting_mem_mod_locked(trans, a.c, false, false);
+ bch2_accounting_mem_mod_locked(trans, a.c, BCH_ACCOUNTING_normal);
bch2_accounting_neg(a);
}
percpu_up_read(&c->mark_lock);
@@ -1019,40 +1057,6 @@ int __bch2_trans_commit(struct btree_trans *trans, unsigned flags)
if (ret)
goto out_reset;
- trans_for_each_update(trans, i) {
- enum bch_validate_flags invalid_flags = 0;
-
- if (!(flags & BCH_TRANS_COMMIT_no_journal_res))
- invalid_flags |= BCH_VALIDATE_write|BCH_VALIDATE_commit;
-
- ret = bch2_bkey_validate(c, bkey_i_to_s_c(i->k),
- i->bkey_type, invalid_flags);
- if (unlikely(ret)){
- bch2_trans_inconsistent(trans, "invalid bkey on insert from %s -> %ps\n",
- trans->fn, (void *) i->ip_allocated);
- return ret;
- }
- btree_insert_entry_checks(trans, i);
- }
-
- for (struct jset_entry *i = trans->journal_entries;
- i != (void *) ((u64 *) trans->journal_entries + trans->journal_entries_u64s);
- i = vstruct_next(i)) {
- enum bch_validate_flags invalid_flags = 0;
-
- if (!(flags & BCH_TRANS_COMMIT_no_journal_res))
- invalid_flags |= BCH_VALIDATE_write|BCH_VALIDATE_commit;
-
- ret = bch2_journal_entry_validate(c, NULL, i,
- bcachefs_metadata_version_current,
- CPU_BIG_ENDIAN, invalid_flags);
- if (unlikely(ret)) {
- bch2_trans_inconsistent(trans, "invalid journal entry on insert from %s\n",
- trans->fn);
- return ret;
- }
- }
-
if (unlikely(!test_bit(BCH_FS_may_go_rw, &c->flags))) {
ret = do_bch2_trans_commit_to_journal_replay(trans);
goto out_reset;
diff --git a/fs/bcachefs/btree_types.h b/fs/bcachefs/btree_types.h
index b256b2a20a4f..4568a41fefaf 100644
--- a/fs/bcachefs/btree_types.h
+++ b/fs/bcachefs/btree_types.h
@@ -138,6 +138,31 @@ struct btree {
struct list_head list;
};
+#define BCH_BTREE_CACHE_NOT_FREED_REASONS() \
+ x(lock_intent) \
+ x(lock_write) \
+ x(dirty) \
+ x(read_in_flight) \
+ x(write_in_flight) \
+ x(noevict) \
+ x(write_blocked) \
+ x(will_make_reachable) \
+ x(access_bit)
+
+enum bch_btree_cache_not_freed_reasons {
+#define x(n) BCH_BTREE_CACHE_NOT_FREED_##n,
+ BCH_BTREE_CACHE_NOT_FREED_REASONS()
+#undef x
+ BCH_BTREE_CACHE_NOT_FREED_REASONS_NR,
+};
+
+struct btree_cache_list {
+ unsigned idx;
+ struct shrinker *shrink;
+ struct list_head list;
+ size_t nr;
+};
+
struct btree_cache {
struct rhashtable table;
bool table_init_done;
@@ -155,28 +180,19 @@ struct btree_cache {
* should never grow past ~2-3 nodes in practice.
*/
struct mutex lock;
- struct list_head live;
struct list_head freeable;
struct list_head freed_pcpu;
struct list_head freed_nonpcpu;
+ struct btree_cache_list live[2];
- /* Number of elements in live + freeable lists */
- unsigned used;
- unsigned reserve;
- unsigned freed;
- unsigned not_freed_lock_intent;
- unsigned not_freed_lock_write;
- unsigned not_freed_dirty;
- unsigned not_freed_read_in_flight;
- unsigned not_freed_write_in_flight;
- unsigned not_freed_noevict;
- unsigned not_freed_write_blocked;
- unsigned not_freed_will_make_reachable;
- unsigned not_freed_access_bit;
- atomic_t dirty;
- struct shrinker *shrink;
+ size_t nr_freeable;
+ size_t nr_reserve;
+ size_t nr_by_btree[BTREE_ID_NR];
+ atomic_long_t nr_dirty;
- unsigned used_by_btree[BTREE_ID_NR];
+ /* shrinker stats */
+ size_t nr_freed;
+ u64 not_freed[BCH_BTREE_CACHE_NOT_FREED_REASONS_NR];
/*
* If we need to allocate memory for a new btree node and that
@@ -189,8 +205,8 @@ struct btree_cache {
struct bbpos pinned_nodes_start;
struct bbpos pinned_nodes_end;
- u64 pinned_nodes_leaf_mask;
- u64 pinned_nodes_interior_mask;
+ /* btree id mask: 0 for leaves, 1 for interior */
+ u64 pinned_nodes_mask[2];
};
struct btree_node_iter {
@@ -386,17 +402,16 @@ struct bkey_cached {
struct btree_bkey_cached_common c;
unsigned long flags;
- unsigned long btree_trans_barrier_seq;
u16 u64s;
struct bkey_cached_key key;
struct rhash_head hash;
- struct list_head list;
struct journal_entry_pin journal;
u64 seq;
struct bkey_i *k;
+ struct rcu_head rcu;
};
static inline struct bpos btree_node_pos(struct btree_bkey_cached_common *b)
@@ -583,7 +598,8 @@ enum btree_write_type {
x(dying) \
x(fake) \
x(need_rewrite) \
- x(never_write)
+ x(never_write) \
+ x(pinned)
enum btree_flags {
/* First bits for btree node write type */
diff --git a/fs/bcachefs/btree_update.c b/fs/bcachefs/btree_update.c
index d6f6df10dcc3..514df618548e 100644
--- a/fs/bcachefs/btree_update.c
+++ b/fs/bcachefs/btree_update.c
@@ -374,7 +374,7 @@ static noinline int flush_new_cached_update(struct btree_trans *trans,
i->key_cache_already_flushed = true;
i->flags |= BTREE_TRIGGER_norun;
- btree_path_set_should_be_locked(btree_path);
+ btree_path_set_should_be_locked(trans, btree_path);
ret = bch2_trans_update_by_path(trans, path_idx, i->k, flags, ip);
out:
bch2_path_put(trans, path_idx, true);
@@ -422,7 +422,9 @@ bch2_trans_update_by_path(struct btree_trans *trans, btree_path_idx_t path_idx,
break;
}
- if (!cmp && i < trans->updates + trans->nr_updates) {
+ bool overwrite = !cmp && i < trans->updates + trans->nr_updates;
+
+ if (overwrite) {
EBUG_ON(i->insert_trigger_run || i->overwrite_trigger_run);
bch2_path_put(trans, i->path, true);
@@ -449,7 +451,9 @@ bch2_trans_update_by_path(struct btree_trans *trans, btree_path_idx_t path_idx,
}
}
- __btree_path_get(trans->paths + i->path, true);
+ __btree_path_get(trans, trans->paths + i->path, true);
+
+ trace_update_by_path(trans, path, i, overwrite);
/*
* If a key is present in the key cache, it must also exist in the
@@ -498,7 +502,7 @@ static noinline int bch2_trans_update_get_key_cache(struct btree_trans *trans,
return btree_trans_restart(trans, BCH_ERR_transaction_restart_key_cache_raced);
}
- btree_path_set_should_be_locked(trans->paths + iter->key_cache_path);
+ btree_path_set_should_be_locked(trans, trans->paths + iter->key_cache_path);
}
return 0;
diff --git a/fs/bcachefs/btree_update.h b/fs/bcachefs/btree_update.h
index 60393e98084d..6a454f2fa005 100644
--- a/fs/bcachefs/btree_update.h
+++ b/fs/bcachefs/btree_update.h
@@ -220,7 +220,8 @@ static inline struct bkey_i *__bch2_bkey_make_mut_noupdate(struct btree_trans *t
if (type && k.k->type != type)
return ERR_PTR(-ENOENT);
- mut = bch2_trans_kmalloc_nomemzero(trans, bytes);
+ /* extra padding for varint_decode_fast... */
+ mut = bch2_trans_kmalloc_nomemzero(trans, bytes + 8);
if (!IS_ERR(mut)) {
bkey_reassemble(mut, k);
diff --git a/fs/bcachefs/btree_update_interior.c b/fs/bcachefs/btree_update_interior.c
index 8fd112026e7a..190bc1e81756 100644
--- a/fs/bcachefs/btree_update_interior.c
+++ b/fs/bcachefs/btree_update_interior.c
@@ -16,6 +16,7 @@
#include "clock.h"
#include "error.h"
#include "extents.h"
+#include "io_write.h"
#include "journal.h"
#include "journal_reclaim.h"
#include "keylist.h"
@@ -145,7 +146,7 @@ fsck_err:
printbuf_exit(&buf);
return ret;
topology_repair:
- if ((c->recovery_passes_explicit & BIT_ULL(BCH_RECOVERY_PASS_check_topology)) &&
+ if ((c->opts.recovery_passes & BIT_ULL(BCH_RECOVERY_PASS_check_topology)) &&
c->curr_recovery_pass > BCH_RECOVERY_PASS_check_topology) {
bch2_inconsistent_error(c);
ret = -BCH_ERR_btree_need_topology_repair;
@@ -250,8 +251,13 @@ static void bch2_btree_node_free_inmem(struct btree_trans *trans,
unsigned i, level = b->c.level;
bch2_btree_node_lock_write_nofail(trans, path, &b->c);
+
+ mutex_lock(&c->btree_cache.lock);
bch2_btree_node_hash_remove(&c->btree_cache, b);
+ mutex_unlock(&c->btree_cache.lock);
+
__btree_node_free(trans, b);
+
six_unlock_write(&b->c.lock);
mark_btree_node_locked_noreset(path, level, BTREE_NODE_INTENT_LOCKED);
@@ -283,7 +289,6 @@ static void bch2_btree_node_free_never_used(struct btree_update *as,
clear_btree_node_need_write(b);
mutex_lock(&c->btree_cache.lock);
- list_del_init(&b->list);
bch2_btree_node_hash_remove(&c->btree_cache, b);
mutex_unlock(&c->btree_cache.lock);
@@ -732,6 +737,18 @@ static void btree_update_nodes_written(struct btree_update *as)
"%s", bch2_err_str(ret));
err:
/*
+ * Ensure transaction is unlocked before using btree_node_lock_nopath()
+ * (the use of which is always suspect, we need to work on removing this
+ * in the future)
+ *
+ * It should be, but bch2_path_get_unlocked_mut() -> bch2_path_get()
+ * calls bch2_path_upgrade(), before we call path_make_mut(), so we may
+ * rarely end up with a locked path besides the one we have here:
+ */
+ bch2_trans_unlock(trans);
+ bch2_trans_begin(trans);
+
+ /*
* We have to be careful because another thread might be getting ready
* to free as->b and calling btree_update_reparent() on us - we'll
* recheck under btree_update_lock below:
@@ -750,18 +767,6 @@ err:
* we're in journal error state:
*/
- /*
- * Ensure transaction is unlocked before using
- * btree_node_lock_nopath() (the use of which is always suspect,
- * we need to work on removing this in the future)
- *
- * It should be, but bch2_path_get_unlocked_mut() -> bch2_path_get()
- * calls bch2_path_upgrade(), before we call path_make_mut(), so
- * we may rarely end up with a locked path besides the one we
- * have here:
- */
- bch2_trans_unlock(trans);
- bch2_trans_begin(trans);
btree_path_idx_t path_idx = bch2_path_get_unlocked_mut(trans,
as->btree_id, b->c.level, b->key.k.p);
struct btree_path *path = trans->paths + path_idx;
@@ -1899,7 +1904,7 @@ static void __btree_increase_depth(struct btree_update *as, struct btree_trans *
six_unlock_intent(&n->c.lock);
mutex_lock(&c->btree_cache.lock);
- list_add_tail(&b->list, &c->btree_cache.live);
+ list_add_tail(&b->list, &c->btree_cache.live[btree_node_pinned(b)].list);
mutex_unlock(&c->btree_cache.lock);
bch2_trans_verify_locks(trans);
@@ -1981,7 +1986,7 @@ int __bch2_foreground_maybe_merge(struct btree_trans *trans,
if (ret)
goto err;
- btree_path_set_should_be_locked(trans->paths + sib_path);
+ btree_path_set_should_be_locked(trans, trans->paths + sib_path);
m = trans->paths[sib_path].l[level].b;
diff --git a/fs/bcachefs/btree_update_interior.h b/fs/bcachefs/btree_update_interior.h
index 02c6ecada97c..10f400957f21 100644
--- a/fs/bcachefs/btree_update_interior.h
+++ b/fs/bcachefs/btree_update_interior.h
@@ -159,6 +159,8 @@ static inline int bch2_foreground_maybe_merge(struct btree_trans *trans,
unsigned level,
unsigned flags)
{
+ bch2_trans_verify_not_unlocked(trans);
+
return bch2_foreground_maybe_merge_sibling(trans, path, level, flags,
btree_prev_sib) ?:
bch2_foreground_maybe_merge_sibling(trans, path, level, flags,
diff --git a/fs/bcachefs/buckets.c b/fs/bcachefs/buckets.c
index 721bbe1dffc1..546cd01a72e3 100644
--- a/fs/bcachefs/buckets.c
+++ b/fs/bcachefs/buckets.c
@@ -75,6 +75,15 @@ void bch2_dev_usage_to_text(struct printbuf *out,
struct bch_dev *ca,
struct bch_dev_usage *usage)
{
+ if (out->nr_tabstops < 5) {
+ printbuf_tabstops_reset(out);
+ printbuf_tabstop_push(out, 12);
+ printbuf_tabstop_push(out, 16);
+ printbuf_tabstop_push(out, 16);
+ printbuf_tabstop_push(out, 16);
+ printbuf_tabstop_push(out, 16);
+ }
+
prt_printf(out, "\tbuckets\rsectors\rfragmented\r\n");
for (unsigned i = 0; i < BCH_DATA_NR; i++) {
@@ -272,7 +281,7 @@ int bch2_check_fix_ptrs(struct btree_trans *trans,
goto err;
rcu_read_lock();
- bch2_bkey_drop_ptrs(bkey_i_to_s(new), ptr, !bch2_dev_rcu(c, ptr->dev));
+ bch2_bkey_drop_ptrs(bkey_i_to_s(new), ptr, !bch2_dev_exists(c, ptr->dev));
rcu_read_unlock();
if (level) {
@@ -477,7 +486,7 @@ out:
return ret;
err:
bch2_dump_trans_updates(trans);
- ret = -EIO;
+ ret = -BCH_ERR_bucket_ref_update;
goto out;
}
@@ -556,22 +565,24 @@ static int bch2_trigger_pointer(struct btree_trans *trans,
s64 *sectors,
enum btree_iter_update_trigger_flags flags)
{
+ struct bch_fs *c = trans->c;
bool insert = !(flags & BTREE_TRIGGER_overwrite);
struct printbuf buf = PRINTBUF;
int ret = 0;
- struct bch_fs *c = trans->c;
+ u64 abs_sectors = ptr_disk_sectors(level ? btree_sectors(c) : k.k->size, p);
+ *sectors = insert ? abs_sectors : -abs_sectors;
+
struct bch_dev *ca = bch2_dev_tryget(c, p.ptr.dev);
if (unlikely(!ca)) {
if (insert && p.ptr.dev != BCH_SB_MEMBER_INVALID)
- ret = -EIO;
+ ret = -BCH_ERR_trigger_pointer;
goto err;
}
struct bpos bucket;
struct bch_backpointer bp;
- bch2_extent_ptr_to_bp(trans->c, ca, btree_id, level, k, p, entry, &bucket, &bp);
- *sectors = insert ? bp.bucket_len : -((s64) bp.bucket_len);
+ __bch2_extent_ptr_to_bp(trans->c, ca, btree_id, level, k, p, entry, &bucket, &bp, abs_sectors);
if (flags & BTREE_TRIGGER_transactional) {
struct bkey_i_alloc_v4 *a = bch2_trans_start_alloc_update(trans, bucket, 0);
@@ -593,7 +604,7 @@ static int bch2_trigger_pointer(struct btree_trans *trans,
if (bch2_fs_inconsistent_on(!g, c, "reference to invalid bucket on device %u\n %s",
p.ptr.dev,
(bch2_bkey_val_to_text(&buf, c, k), buf.buf))) {
- ret = -EIO;
+ ret = -BCH_ERR_trigger_pointer;
goto err_unlock;
}
@@ -638,7 +649,7 @@ static int bch2_trigger_stripe_ptr(struct btree_trans *trans,
bch2_trans_inconsistent(trans,
"stripe pointer doesn't match stripe %llu",
(u64) p.ec.idx);
- ret = -EIO;
+ ret = -BCH_ERR_trigger_stripe_pointer;
goto err;
}
@@ -677,7 +688,7 @@ err:
(u64) p.ec.idx, buf.buf);
printbuf_exit(&buf);
bch2_inconsistent_error(c);
- return -EIO;
+ return -BCH_ERR_trigger_stripe_pointer;
}
m->block_sectors[p.ec.block] += sectors;
@@ -741,7 +752,7 @@ static int __trigger_extent(struct btree_trans *trans,
return ret;
} else if (!p.has_ec) {
*replicas_sectors += disk_sectors;
- acc_replicas_key.replicas.devs[acc_replicas_key.replicas.nr_devs++] = p.ptr.dev;
+ replicas_entry_add_dev(&acc_replicas_key.replicas, p.ptr.dev);
} else {
ret = bch2_trigger_stripe_ptr(trans, k, p, data_type, disk_sectors, flags);
if (ret)
@@ -957,7 +968,7 @@ static int __bch2_trans_mark_metadata_bucket(struct btree_trans *trans,
bch2_data_type_str(a->v.data_type),
bch2_data_type_str(type),
bch2_data_type_str(type));
- ret = -EIO;
+ ret = -BCH_ERR_metadata_bucket_inconsistency;
goto err;
}
@@ -1013,7 +1024,7 @@ err:
bucket_unlock(g);
err_unlock:
percpu_up_read(&c->mark_lock);
- return -EIO;
+ return -BCH_ERR_metadata_bucket_inconsistency;
}
int bch2_trans_mark_metadata_bucket(struct btree_trans *trans,
diff --git a/fs/bcachefs/buckets.h b/fs/bcachefs/buckets.h
index edbdffd508fc..e2cb7b24b220 100644
--- a/fs/bcachefs/buckets.h
+++ b/fs/bcachefs/buckets.h
@@ -80,22 +80,9 @@ static inline void bucket_lock(struct bucket *b)
TASK_UNINTERRUPTIBLE);
}
-static inline struct bucket_array *gc_bucket_array(struct bch_dev *ca)
-{
- return rcu_dereference_check(ca->buckets_gc,
- !ca->fs ||
- percpu_rwsem_is_held(&ca->fs->mark_lock) ||
- lockdep_is_held(&ca->fs->state_lock) ||
- lockdep_is_held(&ca->bucket_lock));
-}
-
static inline struct bucket *gc_bucket(struct bch_dev *ca, size_t b)
{
- struct bucket_array *buckets = gc_bucket_array(ca);
-
- if (b - buckets->first_bucket >= buckets->nbuckets_minus_first)
- return NULL;
- return buckets->b + b;
+ return genradix_ptr(&ca->buckets_gc, b);
}
static inline struct bucket_gens *bucket_gens(struct bch_dev *ca)
diff --git a/fs/bcachefs/buckets_types.h b/fs/bcachefs/buckets_types.h
index c9698cdf866f..28bd09a253c8 100644
--- a/fs/bcachefs/buckets_types.h
+++ b/fs/bcachefs/buckets_types.h
@@ -19,14 +19,6 @@ struct bucket {
u32 stripe_sectors;
} __aligned(sizeof(long));
-struct bucket_array {
- struct rcu_head rcu;
- u16 first_bucket;
- size_t nbuckets;
- size_t nbuckets_minus_first;
- struct bucket b[];
-};
-
struct bucket_gens {
struct rcu_head rcu;
u16 first_bucket;
diff --git a/fs/bcachefs/chardev.c b/fs/bcachefs/chardev.c
index ef1f74866e23..cbfd88f98472 100644
--- a/fs/bcachefs/chardev.c
+++ b/fs/bcachefs/chardev.c
@@ -471,7 +471,6 @@ static ssize_t bch2_data_job_read(struct file *file, char __user *buf,
static const struct file_operations bcachefs_data_ops = {
.release = bch2_data_job_release,
.read = bch2_data_job_read,
- .llseek = no_llseek,
};
static long bch2_ioctl_data(struct bch_fs *c,
diff --git a/fs/bcachefs/checksum.c b/fs/bcachefs/checksum.c
index e7208bf1974e..ce8fc677bef9 100644
--- a/fs/bcachefs/checksum.c
+++ b/fs/bcachefs/checksum.c
@@ -100,13 +100,12 @@ static inline int do_encrypt_sg(struct crypto_sync_skcipher *tfm,
struct scatterlist *sg, size_t len)
{
SYNC_SKCIPHER_REQUEST_ON_STACK(req, tfm);
- int ret;
skcipher_request_set_sync_tfm(req, tfm);
skcipher_request_set_callback(req, 0, NULL, NULL);
skcipher_request_set_crypt(req, sg, sg, len, nonce.d);
- ret = crypto_skcipher_encrypt(req);
+ int ret = crypto_skcipher_encrypt(req);
if (ret)
pr_err("got error %i from crypto_skcipher_encrypt()", ret);
@@ -118,38 +117,47 @@ static inline int do_encrypt(struct crypto_sync_skcipher *tfm,
void *buf, size_t len)
{
if (!is_vmalloc_addr(buf)) {
- struct scatterlist sg;
-
- sg_init_table(&sg, 1);
- sg_set_page(&sg,
- is_vmalloc_addr(buf)
- ? vmalloc_to_page(buf)
- : virt_to_page(buf),
- len, offset_in_page(buf));
+ struct scatterlist sg = {};
+
+ sg_mark_end(&sg);
+ sg_set_page(&sg, virt_to_page(buf), len, offset_in_page(buf));
return do_encrypt_sg(tfm, nonce, &sg, len);
} else {
- unsigned pages = buf_pages(buf, len);
- struct scatterlist *sg;
- size_t orig_len = len;
- int ret, i;
-
- sg = kmalloc_array(pages, sizeof(*sg), GFP_KERNEL);
- if (!sg)
- return -BCH_ERR_ENOMEM_do_encrypt;
+ DARRAY_PREALLOCATED(struct scatterlist, 4) sgl;
+ size_t sgl_len = 0;
+ int ret;
- sg_init_table(sg, pages);
+ darray_init(&sgl);
- for (i = 0; i < pages; i++) {
+ while (len) {
unsigned offset = offset_in_page(buf);
- unsigned pg_len = min_t(size_t, len, PAGE_SIZE - offset);
+ struct scatterlist sg = {
+ .page_link = (unsigned long) vmalloc_to_page(buf),
+ .offset = offset,
+ .length = min(len, PAGE_SIZE - offset),
+ };
- sg_set_page(sg + i, vmalloc_to_page(buf), pg_len, offset);
- buf += pg_len;
- len -= pg_len;
+ if (darray_push(&sgl, sg)) {
+ sg_mark_end(&darray_last(sgl));
+ ret = do_encrypt_sg(tfm, nonce, sgl.data, sgl_len);
+ if (ret)
+ goto err;
+
+ nonce = nonce_add(nonce, sgl_len);
+ sgl_len = 0;
+ sgl.nr = 0;
+ BUG_ON(darray_push(&sgl, sg));
+ }
+
+ buf += sg.length;
+ len -= sg.length;
+ sgl_len += sg.length;
}
- ret = do_encrypt_sg(tfm, nonce, sg, orig_len);
- kfree(sg);
+ sg_mark_end(&darray_last(sgl));
+ ret = do_encrypt_sg(tfm, nonce, sgl.data, sgl_len);
+err:
+ darray_exit(&sgl);
return ret;
}
}
@@ -325,39 +333,42 @@ int __bch2_encrypt_bio(struct bch_fs *c, unsigned type,
{
struct bio_vec bv;
struct bvec_iter iter;
- struct scatterlist sgl[16], *sg = sgl;
- size_t bytes = 0;
+ DARRAY_PREALLOCATED(struct scatterlist, 4) sgl;
+ size_t sgl_len = 0;
int ret = 0;
if (!bch2_csum_type_is_encryption(type))
return 0;
- sg_init_table(sgl, ARRAY_SIZE(sgl));
+ darray_init(&sgl);
bio_for_each_segment(bv, bio, iter) {
- if (sg == sgl + ARRAY_SIZE(sgl)) {
- sg_mark_end(sg - 1);
-
- ret = do_encrypt_sg(c->chacha20, nonce, sgl, bytes);
+ struct scatterlist sg = {
+ .page_link = (unsigned long) bv.bv_page,
+ .offset = bv.bv_offset,
+ .length = bv.bv_len,
+ };
+
+ if (darray_push(&sgl, sg)) {
+ sg_mark_end(&darray_last(sgl));
+ ret = do_encrypt_sg(c->chacha20, nonce, sgl.data, sgl_len);
if (ret)
- return ret;
+ goto err;
- nonce = nonce_add(nonce, bytes);
- bytes = 0;
+ nonce = nonce_add(nonce, sgl_len);
+ sgl_len = 0;
+ sgl.nr = 0;
- sg_init_table(sgl, ARRAY_SIZE(sgl));
- sg = sgl;
+ BUG_ON(darray_push(&sgl, sg));
}
- sg_set_page(sg++, bv.bv_page, bv.bv_len, bv.bv_offset);
- bytes += bv.bv_len;
- }
-
- if (sg != sgl) {
- sg_mark_end(sg - 1);
- return do_encrypt_sg(c->chacha20, nonce, sgl, bytes);
+ sgl_len += sg.length;
}
+ sg_mark_end(&darray_last(sgl));
+ ret = do_encrypt_sg(c->chacha20, nonce, sgl.data, sgl_len);
+err:
+ darray_exit(&sgl);
return ret;
}
diff --git a/fs/bcachefs/clock.h b/fs/bcachefs/clock.h
index 85c975dfbcfe..82c79c8baf92 100644
--- a/fs/bcachefs/clock.h
+++ b/fs/bcachefs/clock.h
@@ -20,15 +20,6 @@ static inline void bch2_increment_clock(struct bch_fs *c, u64 sectors,
void bch2_io_clock_schedule_timeout(struct io_clock *, u64);
-#define bch2_kthread_wait_event_ioclock_timeout(condition, clock, timeout)\
-({ \
- long __ret = timeout; \
- might_sleep(); \
- if (!___wait_cond_timeout(condition)) \
- __ret = __wait_event_timeout(wq, condition, timeout); \
- __ret; \
-})
-
void bch2_io_timers_to_text(struct printbuf *, struct io_clock *);
void bch2_io_clock_exit(struct io_clock *);
diff --git a/fs/bcachefs/darray.c b/fs/bcachefs/darray.c
index b7d223f85873..4f06cd8bbbe1 100644
--- a/fs/bcachefs/darray.c
+++ b/fs/bcachefs/darray.c
@@ -4,12 +4,12 @@
#include <linux/slab.h>
#include "darray.h"
-int __bch2_darray_resize(darray_char *d, size_t element_size, size_t new_size, gfp_t gfp)
+int __bch2_darray_resize_noprof(darray_char *d, size_t element_size, size_t new_size, gfp_t gfp)
{
if (new_size > d->size) {
new_size = roundup_pow_of_two(new_size);
- void *data = kvmalloc_array(new_size, element_size, gfp);
+ void *data = kvmalloc_array_noprof(new_size, element_size, gfp);
if (!data)
return -ENOMEM;
diff --git a/fs/bcachefs/darray.h b/fs/bcachefs/darray.h
index 4b340d13caac..8f4c3f0665c4 100644
--- a/fs/bcachefs/darray.h
+++ b/fs/bcachefs/darray.h
@@ -22,29 +22,23 @@ struct { \
typedef DARRAY(char) darray_char;
typedef DARRAY(char *) darray_str;
-int __bch2_darray_resize(darray_char *, size_t, size_t, gfp_t);
-
-static inline int __darray_resize(darray_char *d, size_t element_size,
- size_t new_size, gfp_t gfp)
-{
- return unlikely(new_size > d->size)
- ? __bch2_darray_resize(d, element_size, new_size, gfp)
- : 0;
-}
+int __bch2_darray_resize_noprof(darray_char *, size_t, size_t, gfp_t);
+
+#define __bch2_darray_resize(...) alloc_hooks(__bch2_darray_resize_noprof(__VA_ARGS__))
+
+#define __darray_resize(_d, _element_size, _new_size, _gfp) \
+ (unlikely((_new_size) > (_d)->size) \
+ ? __bch2_darray_resize((_d), (_element_size), (_new_size), (_gfp))\
+ : 0)
#define darray_resize_gfp(_d, _new_size, _gfp) \
- unlikely(__darray_resize((darray_char *) (_d), sizeof((_d)->data[0]), (_new_size), _gfp))
+ __darray_resize((darray_char *) (_d), sizeof((_d)->data[0]), (_new_size), _gfp)
#define darray_resize(_d, _new_size) \
darray_resize_gfp(_d, _new_size, GFP_KERNEL)
-static inline int __darray_make_room(darray_char *d, size_t t_size, size_t more, gfp_t gfp)
-{
- return __darray_resize(d, t_size, d->nr + more, gfp);
-}
-
#define darray_make_room_gfp(_d, _more, _gfp) \
- __darray_make_room((darray_char *) (_d), sizeof((_d)->data[0]), (_more), _gfp)
+ darray_resize_gfp((_d), (_d)->nr + (_more), _gfp)
#define darray_make_room(_d, _more) \
darray_make_room_gfp(_d, _more, GFP_KERNEL)
diff --git a/fs/bcachefs/data_update.c b/fs/bcachefs/data_update.c
index 004894ad4147..462b1a2fe1ad 100644
--- a/fs/bcachefs/data_update.c
+++ b/fs/bcachefs/data_update.c
@@ -571,7 +571,7 @@ int bch2_extent_drop_ptrs(struct btree_trans *trans,
while (data_opts.kill_ptrs) {
unsigned i = 0, drop = __fls(data_opts.kill_ptrs);
- bch2_bkey_drop_ptrs(bkey_i_to_s(n), ptr, i++ == drop);
+ bch2_bkey_drop_ptrs_noerror(bkey_i_to_s(n), ptr, i++ == drop);
data_opts.kill_ptrs ^= 1U << drop;
}
@@ -639,7 +639,7 @@ int bch2_data_update_init(struct btree_trans *trans,
bch2_write_op_init(&m->op, c, io_opts);
m->op.pos = bkey_start_pos(k.k);
- m->op.version = k.k->version;
+ m->op.version = k.k->bversion;
m->op.target = data_opts.target;
m->op.write_point = wp;
m->op.nr_replicas = 0;
diff --git a/fs/bcachefs/dirent.c b/fs/bcachefs/dirent.c
index 32bfdf19289a..84dd4a879d98 100644
--- a/fs/bcachefs/dirent.c
+++ b/fs/bcachefs/dirent.c
@@ -552,62 +552,30 @@ static int bch2_dir_emit(struct dir_context *ctx, struct bkey_s_c_dirent d, subv
int bch2_readdir(struct bch_fs *c, subvol_inum inum, struct dir_context *ctx)
{
- struct btree_trans *trans = bch2_trans_get(c);
- struct btree_iter iter;
- struct bkey_s_c k;
- subvol_inum target;
- u32 snapshot;
struct bkey_buf sk;
- int ret;
-
bch2_bkey_buf_init(&sk);
-retry:
- bch2_trans_begin(trans);
- ret = bch2_subvolume_get_snapshot(trans, inum.subvol, &snapshot);
- if (ret)
- goto err;
-
- for_each_btree_key_upto_norestart(trans, iter, BTREE_ID_dirents,
- SPOS(inum.inum, ctx->pos, snapshot),
- POS(inum.inum, U64_MAX), 0, k, ret) {
- if (k.k->type != KEY_TYPE_dirent)
- continue;
+ int ret = bch2_trans_run(c,
+ for_each_btree_key_in_subvolume_upto(trans, iter, BTREE_ID_dirents,
+ POS(inum.inum, ctx->pos),
+ POS(inum.inum, U64_MAX),
+ inum.subvol, 0, k, ({
+ if (k.k->type != KEY_TYPE_dirent)
+ continue;
- /* dir_emit() can fault and block: */
- bch2_bkey_buf_reassemble(&sk, c, k);
- struct bkey_s_c_dirent dirent = bkey_i_to_s_c_dirent(sk.k);
+ /* dir_emit() can fault and block: */
+ bch2_bkey_buf_reassemble(&sk, c, k);
+ struct bkey_s_c_dirent dirent = bkey_i_to_s_c_dirent(sk.k);
- ret = bch2_dirent_read_target(trans, inum, dirent, &target);
- if (ret < 0)
- break;
- if (ret)
- continue;
+ subvol_inum target;
+ int ret2 = bch2_dirent_read_target(trans, inum, dirent, &target);
+ if (ret2 > 0)
+ continue;
- /*
- * read_target looks up subvolumes, we can overflow paths if the
- * directory has many subvolumes in it
- *
- * XXX: btree_trans_too_many_iters() is something we'd like to
- * get rid of, and there's no good reason to be using it here
- * except that we don't yet have a for_each_btree_key() helper
- * that does subvolume_get_snapshot().
- */
- ret = drop_locks_do(trans,
- bch2_dir_emit(ctx, dirent, target)) ?:
- btree_trans_too_many_iters(trans);
- if (ret) {
- ret = ret < 0 ? ret : 0;
- break;
- }
- }
- bch2_trans_iter_exit(trans, &iter);
-err:
- if (bch2_err_matches(ret, BCH_ERR_transaction_restart))
- goto retry;
+ ret2 ?: drop_locks_do(trans, bch2_dir_emit(ctx, dirent, target));
+ })));
- bch2_trans_put(trans);
bch2_bkey_buf_exit(&sk, c);
- return ret;
+ return ret < 0 ? ret : 0;
}
diff --git a/fs/bcachefs/disk_accounting.c b/fs/bcachefs/disk_accounting.c
index e972e2bca546..9f3133e3e7e5 100644
--- a/fs/bcachefs/disk_accounting.c
+++ b/fs/bcachefs/disk_accounting.c
@@ -134,6 +134,10 @@ int bch2_accounting_validate(struct bch_fs *c, struct bkey_s_c k,
void *end = &acc_k + 1;
int ret = 0;
+ bkey_fsck_err_on(bversion_zero(k.k->bversion),
+ c, accounting_key_version_0,
+ "accounting key with version=0");
+
switch (acc_k.type) {
case BCH_DISK_ACCOUNTING_nr_inodes:
end = field_end(acc_k, nr_inodes);
@@ -291,7 +295,7 @@ static int __bch2_accounting_mem_insert(struct bch_fs *c, struct bkey_s_c_accoun
struct accounting_mem_entry n = {
.pos = a.k->p,
- .version = a.k->version,
+ .bversion = a.k->bversion,
.nr_counters = bch2_accounting_counters(a.k),
.v[0] = __alloc_percpu_gfp(n.nr_counters * sizeof(u64),
sizeof(u64), GFP_KERNEL),
@@ -319,11 +323,13 @@ err:
return -BCH_ERR_ENOMEM_disk_accounting;
}
-int bch2_accounting_mem_insert(struct bch_fs *c, struct bkey_s_c_accounting a, bool gc)
+int bch2_accounting_mem_insert(struct bch_fs *c, struct bkey_s_c_accounting a,
+ enum bch_accounting_mode mode)
{
struct bch_replicas_padded r;
- if (accounting_to_replicas(&r.e, a.k->p) &&
+ if (mode != BCH_ACCOUNTING_read &&
+ accounting_to_replicas(&r.e, a.k->p) &&
!bch2_replicas_marked_locked(c, &r.e))
return -BCH_ERR_btree_insert_need_mark_replicas;
@@ -566,7 +572,9 @@ int bch2_gc_accounting_done(struct bch_fs *c)
struct { __BKEY_PADDED(k, BCH_ACCOUNTING_MAX_COUNTERS); } k_i;
accounting_key_init(&k_i.k, &acc_k, src_v, nr);
- bch2_accounting_mem_mod_locked(trans, bkey_i_to_s_c_accounting(&k_i.k), false, false);
+ bch2_accounting_mem_mod_locked(trans,
+ bkey_i_to_s_c_accounting(&k_i.k),
+ BCH_ACCOUNTING_normal);
preempt_disable();
struct bch_fs_usage_base *dst = this_cpu_ptr(c->usage);
@@ -589,30 +597,14 @@ fsck_err:
static int accounting_read_key(struct btree_trans *trans, struct bkey_s_c k)
{
struct bch_fs *c = trans->c;
- struct printbuf buf = PRINTBUF;
if (k.k->type != KEY_TYPE_accounting)
return 0;
percpu_down_read(&c->mark_lock);
- int ret = bch2_accounting_mem_mod_locked(trans, bkey_s_c_to_accounting(k), false, true);
+ int ret = bch2_accounting_mem_mod_locked(trans, bkey_s_c_to_accounting(k),
+ BCH_ACCOUNTING_read);
percpu_up_read(&c->mark_lock);
-
- if (bch2_accounting_key_is_zero(bkey_s_c_to_accounting(k)) &&
- ret == -BCH_ERR_btree_insert_need_mark_replicas)
- ret = 0;
-
- struct disk_accounting_pos acc;
- bpos_to_disk_accounting_pos(&acc, k.k->p);
-
- if (fsck_err_on(ret == -BCH_ERR_btree_insert_need_mark_replicas,
- trans, accounting_replicas_not_marked,
- "accounting not marked in superblock replicas\n %s",
- (bch2_accounting_key_to_text(&buf, &acc),
- buf.buf)))
- ret = bch2_accounting_update_sb_one(c, k.k->p);
-fsck_err:
- printbuf_exit(&buf);
return ret;
}
@@ -624,6 +616,7 @@ int bch2_accounting_read(struct bch_fs *c)
{
struct bch_accounting_mem *acc = &c->accounting;
struct btree_trans *trans = bch2_trans_get(c);
+ struct printbuf buf = PRINTBUF;
int ret = for_each_btree_key(trans, iter,
BTREE_ID_accounting, POS_MIN,
@@ -647,7 +640,7 @@ int bch2_accounting_read(struct bch_fs *c)
accounting_pos_cmp, &k.k->p);
bool applied = idx < acc->k.nr &&
- bversion_cmp(acc->k.data[idx].version, k.k->version) >= 0;
+ bversion_cmp(acc->k.data[idx].bversion, k.k->bversion) >= 0;
if (applied)
continue;
@@ -655,7 +648,7 @@ int bch2_accounting_read(struct bch_fs *c)
if (i + 1 < &darray_top(*keys) &&
i[1].k->k.type == KEY_TYPE_accounting &&
!journal_key_cmp(i, i + 1)) {
- BUG_ON(bversion_cmp(i[0].k->k.version, i[1].k->k.version) >= 0);
+ WARN_ON(bversion_cmp(i[0].k->k.bversion, i[1].k->k.bversion) >= 0);
i[1].journal_seq = i[0].journal_seq;
@@ -674,6 +667,45 @@ int bch2_accounting_read(struct bch_fs *c)
keys->gap = keys->nr = dst - keys->data;
percpu_down_read(&c->mark_lock);
+ for (unsigned i = 0; i < acc->k.nr; i++) {
+ u64 v[BCH_ACCOUNTING_MAX_COUNTERS];
+ bch2_accounting_mem_read_counters(acc, i, v, ARRAY_SIZE(v), false);
+
+ if (bch2_is_zero(v, sizeof(v[0]) * acc->k.data[i].nr_counters))
+ continue;
+
+ struct bch_replicas_padded r;
+ if (!accounting_to_replicas(&r.e, acc->k.data[i].pos))
+ continue;
+
+ /*
+ * If the replicas entry is invalid it'll get cleaned up by
+ * check_allocations:
+ */
+ if (bch2_replicas_entry_validate(&r.e, c, &buf))
+ continue;
+
+ struct disk_accounting_pos k;
+ bpos_to_disk_accounting_pos(&k, acc->k.data[i].pos);
+
+ if (fsck_err_on(!bch2_replicas_marked_locked(c, &r.e),
+ trans, accounting_replicas_not_marked,
+ "accounting not marked in superblock replicas\n %s",
+ (printbuf_reset(&buf),
+ bch2_accounting_key_to_text(&buf, &k),
+ buf.buf))) {
+ /*
+ * We're not RW yet and still single threaded, dropping
+ * and retaking lock is ok:
+ */
+ percpu_up_read(&c->mark_lock);
+ ret = bch2_mark_replicas(c, &r.e);
+ if (ret)
+ goto fsck_err;
+ percpu_down_read(&c->mark_lock);
+ }
+ }
+
preempt_disable();
struct bch_fs_usage_base *usage = this_cpu_ptr(c->usage);
@@ -709,8 +741,10 @@ int bch2_accounting_read(struct bch_fs *c)
}
}
preempt_enable();
+fsck_err:
percpu_up_read(&c->mark_lock);
err:
+ printbuf_exit(&buf);
bch2_trans_put(trans);
bch_err_fn(c, ret);
return ret;
diff --git a/fs/bcachefs/disk_accounting.h b/fs/bcachefs/disk_accounting.h
index f29fd0dd9581..4ea6c8a092bc 100644
--- a/fs/bcachefs/disk_accounting.h
+++ b/fs/bcachefs/disk_accounting.h
@@ -36,8 +36,8 @@ static inline void bch2_accounting_accumulate(struct bkey_i_accounting *dst,
for (unsigned i = 0; i < bch2_accounting_counters(&dst->k); i++)
dst->v.d[i] += src.v->d[i];
- if (bversion_cmp(dst->k.version, src.k->version) < 0)
- dst->k.version = src.k->version;
+ if (bversion_cmp(dst->k.bversion, src.k->bversion) < 0)
+ dst->k.bversion = src.k->bversion;
}
static inline void fs_usage_data_type_to_base(struct bch_fs_usage_base *fs_usage,
@@ -103,23 +103,35 @@ static inline int accounting_pos_cmp(const void *_l, const void *_r)
return bpos_cmp(*l, *r);
}
-int bch2_accounting_mem_insert(struct bch_fs *, struct bkey_s_c_accounting, bool);
+enum bch_accounting_mode {
+ BCH_ACCOUNTING_normal,
+ BCH_ACCOUNTING_gc,
+ BCH_ACCOUNTING_read,
+};
+
+int bch2_accounting_mem_insert(struct bch_fs *, struct bkey_s_c_accounting, enum bch_accounting_mode);
void bch2_accounting_mem_gc(struct bch_fs *);
/*
* Update in memory counters so they match the btree update we're doing; called
* from transaction commit path
*/
-static inline int bch2_accounting_mem_mod_locked(struct btree_trans *trans, struct bkey_s_c_accounting a, bool gc, bool read)
+static inline int bch2_accounting_mem_mod_locked(struct btree_trans *trans,
+ struct bkey_s_c_accounting a,
+ enum bch_accounting_mode mode)
{
struct bch_fs *c = trans->c;
+ struct bch_accounting_mem *acc = &c->accounting;
struct disk_accounting_pos acc_k;
bpos_to_disk_accounting_pos(&acc_k, a.k->p);
+ bool gc = mode == BCH_ACCOUNTING_gc;
+
+ EBUG_ON(gc && !acc->gc_running);
if (acc_k.type == BCH_DISK_ACCOUNTING_inum)
return 0;
- if (!gc && !read) {
+ if (mode == BCH_ACCOUNTING_normal) {
switch (acc_k.type) {
case BCH_DISK_ACCOUNTING_persistent_reserved:
trans->fs_usage_delta.reserved += acc_k.persistent_reserved.nr_replicas * a.v->d[0];
@@ -140,14 +152,11 @@ static inline int bch2_accounting_mem_mod_locked(struct btree_trans *trans, stru
}
}
- struct bch_accounting_mem *acc = &c->accounting;
unsigned idx;
- EBUG_ON(gc && !acc->gc_running);
-
while ((idx = eytzinger0_find(acc->k.data, acc->k.nr, sizeof(acc->k.data[0]),
accounting_pos_cmp, &a.k->p)) >= acc->k.nr) {
- int ret = bch2_accounting_mem_insert(c, a, gc);
+ int ret = bch2_accounting_mem_insert(c, a, mode);
if (ret)
return ret;
}
@@ -164,7 +173,7 @@ static inline int bch2_accounting_mem_mod_locked(struct btree_trans *trans, stru
static inline int bch2_accounting_mem_add(struct btree_trans *trans, struct bkey_s_c_accounting a, bool gc)
{
percpu_down_read(&trans->c->mark_lock);
- int ret = bch2_accounting_mem_mod_locked(trans, a, gc, false);
+ int ret = bch2_accounting_mem_mod_locked(trans, a, gc ? BCH_ACCOUNTING_gc : BCH_ACCOUNTING_normal);
percpu_up_read(&trans->c->mark_lock);
return ret;
}
diff --git a/fs/bcachefs/disk_accounting_types.h b/fs/bcachefs/disk_accounting_types.h
index 1687a45177a7..b1982131b206 100644
--- a/fs/bcachefs/disk_accounting_types.h
+++ b/fs/bcachefs/disk_accounting_types.h
@@ -6,7 +6,7 @@
struct accounting_mem_entry {
struct bpos pos;
- struct bversion version;
+ struct bversion bversion;
unsigned nr_counters;
u64 __percpu *v[2];
};
diff --git a/fs/bcachefs/ec.c b/fs/bcachefs/ec.c
index 141a4c63142f..1587c6e1866a 100644
--- a/fs/bcachefs/ec.c
+++ b/fs/bcachefs/ec.c
@@ -18,6 +18,7 @@
#include "ec.h"
#include "error.h"
#include "io_read.h"
+#include "io_write.h"
#include "keylist.h"
#include "recovery.h"
#include "replicas.h"
@@ -146,12 +147,18 @@ void bch2_stripe_to_text(struct printbuf *out, struct bch_fs *c,
bch2_prt_csum_type(out, s.csum_type);
prt_printf(out, " gran %u", 1U << s.csum_granularity_bits);
+ if (s.disk_label) {
+ prt_str(out, " label");
+ bch2_disk_path_to_text(out, c, s.disk_label - 1);
+ }
+
for (unsigned i = 0; i < s.nr_blocks; i++) {
const struct bch_extent_ptr *ptr = sp->ptrs + i;
if ((void *) ptr >= bkey_val_end(k))
break;
+ prt_char(out, ' ');
bch2_extent_ptr_to_text(out, c, ptr);
if (s.csum_type < BCH_CSUM_NR &&
@@ -192,7 +199,7 @@ static int __mark_stripe_bucket(struct btree_trans *trans,
a->dirty_sectors,
a->stripe, s.k->p.offset,
(bch2_bkey_val_to_text(&buf, c, s.s_c), buf.buf))) {
- ret = -EIO;
+ ret = -BCH_ERR_mark_stripe;
goto err;
}
@@ -203,7 +210,7 @@ static int __mark_stripe_bucket(struct btree_trans *trans,
a->dirty_sectors,
a->cached_sectors,
(bch2_bkey_val_to_text(&buf, c, s.s_c), buf.buf))) {
- ret = -EIO;
+ ret = -BCH_ERR_mark_stripe;
goto err;
}
} else {
@@ -213,7 +220,7 @@ static int __mark_stripe_bucket(struct btree_trans *trans,
bucket.inode, bucket.offset, a->gen,
a->stripe,
(bch2_bkey_val_to_text(&buf, c, s.s_c), buf.buf))) {
- ret = -EIO;
+ ret = -BCH_ERR_mark_stripe;
goto err;
}
@@ -223,7 +230,7 @@ static int __mark_stripe_bucket(struct btree_trans *trans,
bch2_data_type_str(a->data_type),
bch2_data_type_str(data_type),
(bch2_bkey_val_to_text(&buf, c, s.s_c), buf.buf))) {
- ret = -EIO;
+ ret = -BCH_ERR_mark_stripe;
goto err;
}
@@ -235,7 +242,7 @@ static int __mark_stripe_bucket(struct btree_trans *trans,
a->dirty_sectors,
a->cached_sectors,
(bch2_bkey_val_to_text(&buf, c, s.s_c), buf.buf))) {
- ret = -EIO;
+ ret = -BCH_ERR_mark_stripe;
goto err;
}
}
@@ -273,8 +280,8 @@ static int mark_stripe_bucket(struct btree_trans *trans,
struct bch_dev *ca = bch2_dev_tryget(c, ptr->dev);
if (unlikely(!ca)) {
- if (!(flags & BTREE_TRIGGER_overwrite))
- ret = -EIO;
+ if (ptr->dev != BCH_SB_MEMBER_INVALID && !(flags & BTREE_TRIGGER_overwrite))
+ ret = -BCH_ERR_mark_stripe;
goto err;
}
@@ -293,7 +300,7 @@ static int mark_stripe_bucket(struct btree_trans *trans,
if (bch2_fs_inconsistent_on(!g, c, "reference to invalid bucket on device %u\n %s",
ptr->dev,
(bch2_bkey_val_to_text(&buf, c, s.s_c), buf.buf))) {
- ret = -EIO;
+ ret = -BCH_ERR_mark_stripe;
goto err_unlock;
}
@@ -351,6 +358,19 @@ static int mark_stripe_buckets(struct btree_trans *trans,
return 0;
}
+static inline void stripe_to_mem(struct stripe *m, const struct bch_stripe *s)
+{
+ m->sectors = le16_to_cpu(s->sectors);
+ m->algorithm = s->algorithm;
+ m->nr_blocks = s->nr_blocks;
+ m->nr_redundant = s->nr_redundant;
+ m->disk_label = s->disk_label;
+ m->blocks_nonempty = 0;
+
+ for (unsigned i = 0; i < s->nr_blocks; i++)
+ m->blocks_nonempty += !!stripe_blockcount_get(s, i);
+}
+
int bch2_trigger_stripe(struct btree_trans *trans,
enum btree_id btree, unsigned level,
struct bkey_s_c old, struct bkey_s _new,
@@ -467,14 +487,7 @@ int bch2_trigger_stripe(struct btree_trans *trans,
memset(m, 0, sizeof(*m));
} else {
- m->sectors = le16_to_cpu(new_s->sectors);
- m->algorithm = new_s->algorithm;
- m->nr_blocks = new_s->nr_blocks;
- m->nr_redundant = new_s->nr_redundant;
- m->blocks_nonempty = 0;
-
- for (unsigned i = 0; i < new_s->nr_blocks; i++)
- m->blocks_nonempty += !!stripe_blockcount_get(new_s, i);
+ stripe_to_mem(m, new_s);
if (!old_s)
bch2_stripes_heap_insert(c, m, idx);
@@ -816,13 +829,16 @@ err:
}
/* recovery read path: */
-int bch2_ec_read_extent(struct btree_trans *trans, struct bch_read_bio *rbio)
+int bch2_ec_read_extent(struct btree_trans *trans, struct bch_read_bio *rbio,
+ struct bkey_s_c orig_k)
{
struct bch_fs *c = trans->c;
- struct ec_stripe_buf *buf;
+ struct ec_stripe_buf *buf = NULL;
struct closure cl;
struct bch_stripe *v;
unsigned i, offset;
+ const char *msg = NULL;
+ struct printbuf msgbuf = PRINTBUF;
int ret = 0;
closure_init_stack(&cl);
@@ -835,32 +851,28 @@ int bch2_ec_read_extent(struct btree_trans *trans, struct bch_read_bio *rbio)
ret = lockrestart_do(trans, get_stripe_key_trans(trans, rbio->pick.ec.idx, buf));
if (ret) {
- bch_err_ratelimited(c,
- "error doing reconstruct read: error %i looking up stripe", ret);
- kfree(buf);
- return -EIO;
+ msg = "stripe not found";
+ goto err;
}
v = &bkey_i_to_stripe(&buf->key)->v;
if (!bch2_ptr_matches_stripe(v, rbio->pick)) {
- bch_err_ratelimited(c,
- "error doing reconstruct read: pointer doesn't match stripe");
- ret = -EIO;
+ msg = "pointer doesn't match stripe";
goto err;
}
offset = rbio->bio.bi_iter.bi_sector - v->ptrs[rbio->pick.ec.block].offset;
if (offset + bio_sectors(&rbio->bio) > le16_to_cpu(v->sectors)) {
- bch_err_ratelimited(c,
- "error doing reconstruct read: read is bigger than stripe");
- ret = -EIO;
+ msg = "read is bigger than stripe";
goto err;
}
ret = ec_stripe_buf_init(buf, offset, bio_sectors(&rbio->bio));
- if (ret)
+ if (ret) {
+ msg = "-ENOMEM";
goto err;
+ }
for (i = 0; i < v->nr_blocks; i++)
ec_block_io(c, buf, REQ_OP_READ, i, &cl);
@@ -868,9 +880,7 @@ int bch2_ec_read_extent(struct btree_trans *trans, struct bch_read_bio *rbio)
closure_sync(&cl);
if (ec_nr_failed(buf) > v->nr_redundant) {
- bch_err_ratelimited(c,
- "error doing reconstruct read: unable to read enough blocks");
- ret = -EIO;
+ msg = "unable to read enough blocks";
goto err;
}
@@ -882,10 +892,17 @@ int bch2_ec_read_extent(struct btree_trans *trans, struct bch_read_bio *rbio)
memcpy_to_bio(&rbio->bio, rbio->bio.bi_iter,
buf->data[rbio->pick.ec.block] + ((offset - buf->offset) << 9));
-err:
+out:
ec_stripe_buf_exit(buf);
kfree(buf);
return ret;
+err:
+ bch2_bkey_val_to_text(&msgbuf, c, orig_k);
+ bch_err_ratelimited(c,
+ "error doing reconstruct read: %s\n %s", msg, msgbuf.buf);
+ printbuf_exit(&msgbuf);;
+ ret = -BCH_ERR_stripe_reconstruct;
+ goto out;
}
/* stripe bucket accounting: */
@@ -1305,7 +1322,7 @@ static int ec_stripe_update_extent(struct btree_trans *trans,
bkey_reassemble(n, k);
- bch2_bkey_drop_ptrs(bkey_i_to_s(n), ptr, ptr->dev != dev);
+ bch2_bkey_drop_ptrs_noerror(bkey_i_to_s(n), ptr, ptr->dev != dev);
ec_ptr = bch2_bkey_has_device(bkey_i_to_s(n), dev);
BUG_ON(!ec_ptr);
@@ -1555,10 +1572,12 @@ void bch2_ec_do_stripe_creates(struct bch_fs *c)
bch2_write_ref_put(c, BCH_WRITE_REF_stripe_create);
}
-static void ec_stripe_set_pending(struct bch_fs *c, struct ec_stripe_head *h)
+static void ec_stripe_new_set_pending(struct bch_fs *c, struct ec_stripe_head *h)
{
struct ec_stripe_new *s = h->s;
+ lockdep_assert_held(&h->lock);
+
BUG_ON(!s->allocated && !s->err);
h->s = NULL;
@@ -1571,6 +1590,12 @@ static void ec_stripe_set_pending(struct bch_fs *c, struct ec_stripe_head *h)
ec_stripe_new_put(c, s, STRIPE_REF_io);
}
+static void ec_stripe_new_cancel(struct bch_fs *c, struct ec_stripe_head *h, int err)
+{
+ h->s->err = err;
+ ec_stripe_new_set_pending(c, h);
+}
+
void bch2_ec_bucket_cancel(struct bch_fs *c, struct open_bucket *ob)
{
struct ec_stripe_new *s = ob->ec;
@@ -1641,7 +1666,8 @@ static void ec_stripe_key_init(struct bch_fs *c,
struct bkey_i *k,
unsigned nr_data,
unsigned nr_parity,
- unsigned stripe_size)
+ unsigned stripe_size,
+ unsigned disk_label)
{
struct bkey_i_stripe *s = bkey_stripe_init(k);
unsigned u64s;
@@ -1652,7 +1678,7 @@ static void ec_stripe_key_init(struct bch_fs *c,
s->v.nr_redundant = nr_parity;
s->v.csum_granularity_bits = ilog2(c->opts.encoded_extent_max >> 9);
s->v.csum_type = BCH_CSUM_crc32c;
- s->v.pad = 0;
+ s->v.disk_label = disk_label;
while ((u64s = stripe_val_u64s(&s->v)) > BKEY_VAL_U64s_MAX) {
BUG_ON(1 << s->v.csum_granularity_bits >=
@@ -1685,40 +1711,32 @@ static int ec_new_stripe_alloc(struct bch_fs *c, struct ec_stripe_head *h)
s->nr_parity = h->redundancy;
ec_stripe_key_init(c, &s->new_stripe.key,
- s->nr_data, s->nr_parity, h->blocksize);
+ s->nr_data, s->nr_parity,
+ h->blocksize, h->disk_label);
h->s = s;
+ h->nr_created++;
return 0;
}
-static struct ec_stripe_head *
-ec_new_stripe_head_alloc(struct bch_fs *c, unsigned target,
- unsigned algo, unsigned redundancy,
- enum bch_watermark watermark)
+static void ec_stripe_head_devs_update(struct bch_fs *c, struct ec_stripe_head *h)
{
- struct ec_stripe_head *h;
-
- h = kzalloc(sizeof(*h), GFP_KERNEL);
- if (!h)
- return NULL;
-
- mutex_init(&h->lock);
- BUG_ON(!mutex_trylock(&h->lock));
-
- h->target = target;
- h->algo = algo;
- h->redundancy = redundancy;
- h->watermark = watermark;
+ struct bch_devs_mask devs = h->devs;
rcu_read_lock();
- h->devs = target_rw_devs(c, BCH_DATA_user, target);
+ h->devs = target_rw_devs(c, BCH_DATA_user, h->disk_label
+ ? group_to_target(h->disk_label - 1)
+ : 0);
+ unsigned nr_devs = dev_mask_nr(&h->devs);
for_each_member_device_rcu(c, ca, &h->devs)
if (!ca->mi.durability)
__clear_bit(ca->dev_idx, h->devs.d);
+ unsigned nr_devs_with_durability = dev_mask_nr(&h->devs);
h->blocksize = pick_blocksize(c, &h->devs);
+ h->nr_active_devs = 0;
for_each_member_device_rcu(c, ca, &h->devs)
if (ca->mi.bucket_size == h->blocksize)
h->nr_active_devs++;
@@ -1729,9 +1747,50 @@ ec_new_stripe_head_alloc(struct bch_fs *c, unsigned target,
* If we only have redundancy + 1 devices, we're better off with just
* replication:
*/
- if (h->nr_active_devs < h->redundancy + 2)
- bch_err(c, "insufficient devices available to create stripe (have %u, need %u) - mismatched bucket sizes?",
- h->nr_active_devs, h->redundancy + 2);
+ h->insufficient_devs = h->nr_active_devs < h->redundancy + 2;
+
+ if (h->insufficient_devs) {
+ const char *err;
+
+ if (nr_devs < h->redundancy + 2)
+ err = NULL;
+ else if (nr_devs_with_durability < h->redundancy + 2)
+ err = "cannot use durability=0 devices";
+ else
+ err = "mismatched bucket sizes";
+
+ if (err)
+ bch_err(c, "insufficient devices available to create stripe (have %u, need %u): %s",
+ h->nr_active_devs, h->redundancy + 2, err);
+ }
+
+ struct bch_devs_mask devs_leaving;
+ bitmap_andnot(devs_leaving.d, devs.d, h->devs.d, BCH_SB_MEMBERS_MAX);
+
+ if (h->s && !h->s->allocated && dev_mask_nr(&devs_leaving))
+ ec_stripe_new_cancel(c, h, -EINTR);
+
+ h->rw_devs_change_count = c->rw_devs_change_count;
+}
+
+static struct ec_stripe_head *
+ec_new_stripe_head_alloc(struct bch_fs *c, unsigned disk_label,
+ unsigned algo, unsigned redundancy,
+ enum bch_watermark watermark)
+{
+ struct ec_stripe_head *h;
+
+ h = kzalloc(sizeof(*h), GFP_KERNEL);
+ if (!h)
+ return NULL;
+
+ mutex_init(&h->lock);
+ BUG_ON(!mutex_trylock(&h->lock));
+
+ h->disk_label = disk_label;
+ h->algo = algo;
+ h->redundancy = redundancy;
+ h->watermark = watermark;
list_add(&h->list, &c->ec_stripe_head_list);
return h;
@@ -1743,14 +1802,14 @@ void bch2_ec_stripe_head_put(struct bch_fs *c, struct ec_stripe_head *h)
h->s->allocated &&
bitmap_weight(h->s->blocks_allocated,
h->s->nr_data) == h->s->nr_data)
- ec_stripe_set_pending(c, h);
+ ec_stripe_new_set_pending(c, h);
mutex_unlock(&h->lock);
}
static struct ec_stripe_head *
__bch2_ec_stripe_head_get(struct btree_trans *trans,
- unsigned target,
+ unsigned disk_label,
unsigned algo,
unsigned redundancy,
enum bch_watermark watermark)
@@ -1768,27 +1827,32 @@ __bch2_ec_stripe_head_get(struct btree_trans *trans,
if (test_bit(BCH_FS_going_ro, &c->flags)) {
h = ERR_PTR(-BCH_ERR_erofs_no_writes);
- goto found;
+ goto err;
}
list_for_each_entry(h, &c->ec_stripe_head_list, list)
- if (h->target == target &&
+ if (h->disk_label == disk_label &&
h->algo == algo &&
h->redundancy == redundancy &&
h->watermark == watermark) {
ret = bch2_trans_mutex_lock(trans, &h->lock);
- if (ret)
+ if (ret) {
h = ERR_PTR(ret);
+ goto err;
+ }
goto found;
}
- h = ec_new_stripe_head_alloc(c, target, algo, redundancy, watermark);
+ h = ec_new_stripe_head_alloc(c, disk_label, algo, redundancy, watermark);
found:
- if (!IS_ERR_OR_NULL(h) &&
- h->nr_active_devs < h->redundancy + 2) {
+ if (h->rw_devs_change_count != c->rw_devs_change_count)
+ ec_stripe_head_devs_update(c, h);
+
+ if (h->insufficient_devs) {
mutex_unlock(&h->lock);
h = NULL;
}
+err:
mutex_unlock(&c->ec_stripe_head_lock);
return h;
}
@@ -1878,7 +1942,6 @@ static int new_stripe_alloc_buckets(struct btree_trans *trans, struct ec_stripe_
return 0;
}
-/* XXX: doesn't obey target: */
static s64 get_existing_stripe(struct bch_fs *c,
struct ec_stripe_head *head)
{
@@ -1901,7 +1964,8 @@ static s64 get_existing_stripe(struct bch_fs *c,
m = genradix_ptr(&c->stripes, stripe_idx);
- if (m->algorithm == head->algo &&
+ if (m->disk_label == head->disk_label &&
+ m->algorithm == head->algo &&
m->nr_redundant == head->redundancy &&
m->sectors == head->blocksize &&
m->blocks_nonempty < m->nr_blocks - m->nr_redundant &&
@@ -2046,9 +2110,19 @@ struct ec_stripe_head *bch2_ec_stripe_head_get(struct btree_trans *trans,
struct bch_fs *c = trans->c;
struct ec_stripe_head *h;
bool waiting = false;
+ unsigned disk_label = 0;
+ struct target t = target_decode(target);
int ret;
- h = __bch2_ec_stripe_head_get(trans, target, algo, redundancy, watermark);
+ if (t.type == TARGET_GROUP) {
+ if (t.group > U8_MAX) {
+ bch_err(c, "cannot create a stripe when disk_label > U8_MAX");
+ return NULL;
+ }
+ disk_label = t.group + 1; /* 0 == no label */
+ }
+
+ h = __bch2_ec_stripe_head_get(trans, disk_label, algo, redundancy, watermark);
if (IS_ERR_OR_NULL(h))
return h;
@@ -2126,6 +2200,73 @@ err:
return ERR_PTR(ret);
}
+/* device removal */
+
+static int bch2_invalidate_stripe_to_dev(struct btree_trans *trans, struct bkey_s_c k_a)
+{
+ struct bch_alloc_v4 a_convert;
+ const struct bch_alloc_v4 *a = bch2_alloc_to_v4(k_a, &a_convert);
+
+ if (!a->stripe)
+ return 0;
+
+ if (a->stripe_sectors) {
+ bch_err(trans->c, "trying to invalidate device in stripe when bucket has stripe data");
+ return -BCH_ERR_invalidate_stripe_to_dev;
+ }
+
+ struct btree_iter iter;
+ struct bkey_i_stripe *s =
+ bch2_bkey_get_mut_typed(trans, &iter, BTREE_ID_stripes, POS(0, a->stripe),
+ BTREE_ITER_slots, stripe);
+ int ret = PTR_ERR_OR_ZERO(s);
+ if (ret)
+ return ret;
+
+ struct disk_accounting_pos acc = {
+ .type = BCH_DISK_ACCOUNTING_replicas,
+ };
+
+ s64 sectors = 0;
+ for (unsigned i = 0; i < s->v.nr_blocks; i++)
+ sectors -= stripe_blockcount_get(&s->v, i);
+
+ bch2_bkey_to_replicas(&acc.replicas, bkey_i_to_s_c(&s->k_i));
+ acc.replicas.data_type = BCH_DATA_user;
+ ret = bch2_disk_accounting_mod(trans, &acc, &sectors, 1, false);
+ if (ret)
+ goto err;
+
+ struct bkey_ptrs ptrs = bch2_bkey_ptrs(bkey_i_to_s(&s->k_i));
+ bkey_for_each_ptr(ptrs, ptr)
+ if (ptr->dev == k_a.k->p.inode)
+ ptr->dev = BCH_SB_MEMBER_INVALID;
+
+ sectors = -sectors;
+
+ bch2_bkey_to_replicas(&acc.replicas, bkey_i_to_s_c(&s->k_i));
+ acc.replicas.data_type = BCH_DATA_user;
+ ret = bch2_disk_accounting_mod(trans, &acc, &sectors, 1, false);
+ if (ret)
+ goto err;
+err:
+ bch2_trans_iter_exit(trans, &iter);
+ return ret;
+}
+
+int bch2_dev_remove_stripes(struct bch_fs *c, unsigned dev_idx)
+{
+ return bch2_trans_run(c,
+ for_each_btree_key_upto_commit(trans, iter,
+ BTREE_ID_alloc, POS(dev_idx, 0), POS(dev_idx, U64_MAX),
+ BTREE_ITER_intent, k,
+ NULL, NULL, 0, ({
+ bch2_invalidate_stripe_to_dev(trans, k);
+ })));
+}
+
+/* startup/shutdown */
+
static void __bch2_ec_stop(struct bch_fs *c, struct bch_dev *ca)
{
struct ec_stripe_head *h;
@@ -2151,8 +2292,7 @@ static void __bch2_ec_stop(struct bch_fs *c, struct bch_dev *ca)
}
goto unlock;
found:
- h->s->err = -BCH_ERR_erofs_no_writes;
- ec_stripe_set_pending(c, h);
+ ec_stripe_new_cancel(c, h, -BCH_ERR_erofs_no_writes);
unlock:
mutex_unlock(&h->lock);
}
@@ -2197,17 +2337,9 @@ int bch2_stripes_read(struct bch_fs *c)
if (ret)
break;
- const struct bch_stripe *s = bkey_s_c_to_stripe(k).v;
-
struct stripe *m = genradix_ptr(&c->stripes, k.k->p.offset);
- m->sectors = le16_to_cpu(s->sectors);
- m->algorithm = s->algorithm;
- m->nr_blocks = s->nr_blocks;
- m->nr_redundant = s->nr_redundant;
- m->blocks_nonempty = 0;
- for (unsigned i = 0; i < s->nr_blocks; i++)
- m->blocks_nonempty += !!stripe_blockcount_get(s, i);
+ stripe_to_mem(m, bkey_s_c_to_stripe(k).v);
bch2_stripes_heap_insert(c, m, k.k->p.offset);
0;
@@ -2252,6 +2384,8 @@ static void bch2_new_stripe_to_text(struct printbuf *out, struct bch_fs *c,
for_each_set_bit(i, s->blocks_gotten, v->nr_blocks)
prt_printf(out, " %u", s->blocks[i]);
prt_newline(out);
+ bch2_bkey_val_to_text(out, c, bkey_i_to_s_c(&s->new_stripe.key));
+ prt_newline(out);
}
void bch2_new_stripes_to_text(struct printbuf *out, struct bch_fs *c)
@@ -2261,9 +2395,10 @@ void bch2_new_stripes_to_text(struct printbuf *out, struct bch_fs *c)
mutex_lock(&c->ec_stripe_head_lock);
list_for_each_entry(h, &c->ec_stripe_head_list, list) {
- prt_printf(out, "target %u algo %u redundancy %u %s:\n",
- h->target, h->algo, h->redundancy,
- bch2_watermarks[h->watermark]);
+ prt_printf(out, "disk label %u algo %u redundancy %u %s nr created %llu:\n",
+ h->disk_label, h->algo, h->redundancy,
+ bch2_watermarks[h->watermark],
+ h->nr_created);
if (h->s)
bch2_new_stripe_to_text(out, c, h->s);
diff --git a/fs/bcachefs/ec.h b/fs/bcachefs/ec.h
index 9baf3411a8f9..43326370b410 100644
--- a/fs/bcachefs/ec.h
+++ b/fs/bcachefs/ec.h
@@ -188,10 +188,15 @@ struct ec_stripe_head {
struct list_head list;
struct mutex lock;
- unsigned target;
+ unsigned disk_label;
unsigned algo;
unsigned redundancy;
enum bch_watermark watermark;
+ bool insufficient_devs;
+
+ unsigned long rw_devs_change_count;
+
+ u64 nr_created;
struct bch_devs_mask devs;
unsigned nr_active_devs;
@@ -204,7 +209,7 @@ struct ec_stripe_head {
struct ec_stripe_new *s;
};
-int bch2_ec_read_extent(struct btree_trans *, struct bch_read_bio *);
+int bch2_ec_read_extent(struct btree_trans *, struct bch_read_bio *, struct bkey_s_c);
void *bch2_writepoint_ec_buf(struct bch_fs *, struct write_point *);
@@ -249,6 +254,8 @@ static inline void ec_stripe_new_put(struct bch_fs *c, struct ec_stripe_new *s,
}
}
+int bch2_dev_remove_stripes(struct bch_fs *, unsigned);
+
void bch2_ec_stop_dev(struct bch_fs *, struct bch_dev *);
void bch2_fs_ec_stop(struct bch_fs *);
void bch2_fs_ec_flush(struct bch_fs *);
diff --git a/fs/bcachefs/ec_format.h b/fs/bcachefs/ec_format.h
index 44ce88ba08d7..64ef52e00078 100644
--- a/fs/bcachefs/ec_format.h
+++ b/fs/bcachefs/ec_format.h
@@ -11,7 +11,14 @@ struct bch_stripe {
__u8 csum_granularity_bits;
__u8 csum_type;
- __u8 pad;
+
+ /*
+ * XXX: targets should be 16 bits - fix this if we ever do a stripe_v2
+ *
+ * we can manage with this because this only needs to point to a
+ * disk label, not a target:
+ */
+ __u8 disk_label;
struct bch_extent_ptr ptrs[];
} __packed __aligned(8);
diff --git a/fs/bcachefs/ec_types.h b/fs/bcachefs/ec_types.h
index 1df03dccfc72..8d1e70e830ac 100644
--- a/fs/bcachefs/ec_types.h
+++ b/fs/bcachefs/ec_types.h
@@ -16,6 +16,7 @@ struct stripe {
u8 nr_blocks;
u8 nr_redundant;
u8 blocks_nonempty;
+ u8 disk_label;
};
struct gc_stripe {
diff --git a/fs/bcachefs/errcode.h b/fs/bcachefs/errcode.h
index 742dcdd3e5d7..60b7875adada 100644
--- a/fs/bcachefs/errcode.h
+++ b/fs/bcachefs/errcode.h
@@ -119,8 +119,8 @@
x(EEXIST, EEXIST_str_hash_set) \
x(EEXIST, EEXIST_discard_in_flight_add) \
x(EEXIST, EEXIST_subvolume_create) \
- x(0, open_buckets_empty) \
- x(0, freelist_empty) \
+ x(ENOSPC, open_buckets_empty) \
+ x(ENOSPC, freelist_empty) \
x(BCH_ERR_freelist_empty, no_buckets_found) \
x(0, transaction_restart) \
x(BCH_ERR_transaction_restart, transaction_restart_fault_inject) \
@@ -244,6 +244,16 @@
x(EIO, btree_node_read_error) \
x(EIO, btree_node_read_validate_error) \
x(EIO, btree_need_topology_repair) \
+ x(EIO, bucket_ref_update) \
+ x(EIO, trigger_pointer) \
+ x(EIO, trigger_stripe_pointer) \
+ x(EIO, metadata_bucket_inconsistency) \
+ x(EIO, mark_stripe) \
+ x(EIO, stripe_reconstruct) \
+ x(EIO, key_type_error) \
+ x(EIO, no_device_to_read_from) \
+ x(EIO, missing_indirect_extent) \
+ x(EIO, invalidate_stripe_to_dev) \
x(BCH_ERR_btree_node_read_err, btree_node_read_err_fixable) \
x(BCH_ERR_btree_node_read_err, btree_node_read_err_want_retry) \
x(BCH_ERR_btree_node_read_err, btree_node_read_err_must_retry) \
diff --git a/fs/bcachefs/error.c b/fs/bcachefs/error.c
index 95afa7bf2020..3a16b535b6c3 100644
--- a/fs/bcachefs/error.c
+++ b/fs/bcachefs/error.c
@@ -239,7 +239,19 @@ int __bch2_fsck_err(struct bch_fs *c,
if (!c)
c = trans->c;
- WARN_ON(!trans && bch2_current_has_btree_trans(c));
+ /*
+ * Ugly: if there's a transaction in the current task it has to be
+ * passed in to unlock if we prompt for user input.
+ *
+ * But, plumbing a transaction and transaction restarts into
+ * bkey_validate() is problematic.
+ *
+ * So:
+ * - make all bkey errors AUTOFIX, they're simple anyways (we just
+ * delete the key)
+ * - and we don't need to warn if we're not prompting
+ */
+ WARN_ON(!(flags & FSCK_AUTOFIX) && !trans && bch2_current_has_btree_trans(c));
if ((flags & FSCK_CAN_FIX) &&
test_bit(err, c->sb.errors_silent))
diff --git a/fs/bcachefs/error.h b/fs/bcachefs/error.h
index 2f1b86978f36..21ee7211b03e 100644
--- a/fs/bcachefs/error.h
+++ b/fs/bcachefs/error.h
@@ -184,7 +184,7 @@ do { \
ret = -BCH_ERR_fsck_delete_bkey; \
goto fsck_err; \
} \
- int _ret = __bch2_bkey_fsck_err(c, k, FSCK_CAN_FIX, \
+ int _ret = __bch2_bkey_fsck_err(c, k, FSCK_CAN_FIX|FSCK_AUTOFIX,\
BCH_FSCK_ERR_##_err_type, \
_err_msg, ##__VA_ARGS__); \
if (_ret != -BCH_ERR_fsck_fix && \
diff --git a/fs/bcachefs/extents.c b/fs/bcachefs/extents.c
index 324303bf4353..cc0d22085aef 100644
--- a/fs/bcachefs/extents.c
+++ b/fs/bcachefs/extents.c
@@ -115,7 +115,7 @@ int bch2_bkey_pick_read_device(struct bch_fs *c, struct bkey_s_c k,
int ret = 0;
if (k.k->type == KEY_TYPE_error)
- return -EIO;
+ return -BCH_ERR_key_type_error;
rcu_read_lock();
bkey_for_each_ptr_decode(k.k, ptrs, p, entry) {
@@ -133,7 +133,7 @@ int bch2_bkey_pick_read_device(struct bch_fs *c, struct bkey_s_c k,
* read:
*/
if (!ret && !p.ptr.cached)
- ret = -EIO;
+ ret = -BCH_ERR_no_device_to_read_from;
struct bch_dev *ca = bch2_dev_rcu(c, p.ptr.dev);
@@ -146,16 +146,13 @@ int bch2_bkey_pick_read_device(struct bch_fs *c, struct bkey_s_c k,
? f->idx
: f->idx + 1;
- if (!p.idx && !ca)
+ if (!p.idx && (!ca || !bch2_dev_is_readable(ca)))
p.idx++;
if (!p.idx && p.has_ec && bch2_force_reconstruct_read)
p.idx++;
- if (!p.idx && !bch2_dev_is_readable(ca))
- p.idx++;
-
- if (p.idx >= (unsigned) p.has_ec + 1)
+ if (p.idx > (unsigned) p.has_ec)
continue;
if (ret > 0 && !ptr_better(c, p, *pick))
@@ -821,6 +818,18 @@ void bch2_bkey_drop_ptr_noerror(struct bkey_s k, struct bch_extent_ptr *ptr)
void bch2_bkey_drop_ptr(struct bkey_s k, struct bch_extent_ptr *ptr)
{
+ if (k.k->type != KEY_TYPE_stripe) {
+ struct bkey_ptrs_c ptrs = bch2_bkey_ptrs_c(k.s_c);
+ const union bch_extent_entry *entry;
+ struct extent_ptr_decoded p;
+
+ bkey_for_each_ptr_decode(k.k, ptrs, p, entry)
+ if (p.ptr.dev == ptr->dev && p.has_ec) {
+ ptr->dev = BCH_SB_MEMBER_INVALID;
+ return;
+ }
+ }
+
bool have_dirty = bch2_bkey_dirty_devs(k.s_c).nr;
bch2_bkey_drop_ptr_noerror(k, ptr);
@@ -848,10 +857,7 @@ void bch2_bkey_drop_device(struct bkey_s k, unsigned dev)
void bch2_bkey_drop_device_noerror(struct bkey_s k, unsigned dev)
{
- struct bch_extent_ptr *ptr = bch2_bkey_has_device(k, dev);
-
- if (ptr)
- bch2_bkey_drop_ptr_noerror(k, ptr);
+ bch2_bkey_drop_ptrs_noerror(k, ptr, ptr->dev == dev);
}
const struct bch_extent_ptr *bch2_bkey_has_device_c(struct bkey_s_c k, unsigned dev)
@@ -1021,7 +1027,7 @@ void bch2_extent_ptr_to_text(struct printbuf *out, struct bch_fs *c, const struc
{
out->atomic++;
rcu_read_lock();
- struct bch_dev *ca = bch2_dev_rcu(c, ptr->dev);
+ struct bch_dev *ca = bch2_dev_rcu_noerror(c, ptr->dev);
if (!ca) {
prt_printf(out, "ptr: %u:%llu gen %u%s", ptr->dev,
(u64) ptr->offset, ptr->gen,
@@ -1125,8 +1131,9 @@ static int extent_ptr_validate(struct bch_fs *c,
{
int ret = 0;
+ /* bad pointers are repaired by check_fix_ptrs(): */
rcu_read_lock();
- struct bch_dev *ca = bch2_dev_rcu(c, ptr->dev);
+ struct bch_dev *ca = bch2_dev_rcu_noerror(c, ptr->dev);
if (!ca) {
rcu_read_unlock();
return 0;
diff --git a/fs/bcachefs/extents.h b/fs/bcachefs/extents.h
index 42a7c6d820a0..ed5001dd662e 100644
--- a/fs/bcachefs/extents.h
+++ b/fs/bcachefs/extents.h
@@ -357,7 +357,7 @@ out: \
__bkey_for_each_ptr_decode(_k, (_p).start, (_p).end, \
_ptr, _entry)
-#define bkey_crc_next(_k, _start, _end, _crc, _iter) \
+#define bkey_crc_next(_k, _end, _crc, _iter) \
({ \
__bkey_extent_entry_for_each_from(_iter, _end, _iter) \
if (extent_entry_is_crc(_iter)) { \
@@ -372,7 +372,7 @@ out: \
#define __bkey_for_each_crc(_k, _start, _end, _crc, _iter) \
for ((_crc) = bch2_extent_crc_unpack(_k, NULL), \
(_iter) = (_start); \
- bkey_crc_next(_k, _start, _end, _crc, _iter); \
+ bkey_crc_next(_k, _end, _crc, _iter); \
(_iter) = extent_entry_next(_iter))
#define bkey_for_each_crc(_k, _p, _crc, _iter) \
@@ -611,9 +611,6 @@ unsigned bch2_extent_ptr_desired_durability(struct bch_fs *, struct extent_ptr_d
unsigned bch2_extent_ptr_durability(struct bch_fs *, struct extent_ptr_decoded *);
unsigned bch2_bkey_durability(struct bch_fs *, struct bkey_s_c);
-void bch2_bkey_drop_device(struct bkey_s, unsigned);
-void bch2_bkey_drop_device_noerror(struct bkey_s, unsigned);
-
const struct bch_extent_ptr *bch2_bkey_has_device_c(struct bkey_s_c, unsigned);
static inline struct bch_extent_ptr *bch2_bkey_has_device(struct bkey_s k, unsigned dev)
@@ -652,6 +649,23 @@ void bch2_extent_ptr_decoded_append(struct bkey_i *,
void bch2_bkey_drop_ptr_noerror(struct bkey_s, struct bch_extent_ptr *);
void bch2_bkey_drop_ptr(struct bkey_s, struct bch_extent_ptr *);
+void bch2_bkey_drop_device_noerror(struct bkey_s, unsigned);
+void bch2_bkey_drop_device(struct bkey_s, unsigned);
+
+#define bch2_bkey_drop_ptrs_noerror(_k, _ptr, _cond) \
+do { \
+ __label__ _again; \
+ struct bkey_ptrs _ptrs; \
+_again: \
+ _ptrs = bch2_bkey_ptrs(_k); \
+ \
+ bkey_for_each_ptr(_ptrs, _ptr) \
+ if (_cond) { \
+ bch2_bkey_drop_ptr_noerror(_k, _ptr); \
+ goto _again; \
+ } \
+} while (0)
+
#define bch2_bkey_drop_ptrs(_k, _ptr, _cond) \
do { \
__label__ _again; \
diff --git a/fs/bcachefs/fs-common.c b/fs/bcachefs/fs-common.c
index 508d029ac53d..7e10a9ddcfd9 100644
--- a/fs/bcachefs/fs-common.c
+++ b/fs/bcachefs/fs-common.c
@@ -42,7 +42,8 @@ int bch2_create_trans(struct btree_trans *trans,
if (ret)
goto err;
- ret = bch2_inode_peek(trans, &dir_iter, dir_u, dir, BTREE_ITER_intent);
+ ret = bch2_inode_peek(trans, &dir_iter, dir_u, dir,
+ BTREE_ITER_intent|BTREE_ITER_with_updates);
if (ret)
goto err;
@@ -163,7 +164,7 @@ int bch2_create_trans(struct btree_trans *trans,
name,
dir_target,
&dir_offset,
- STR_HASH_must_create);
+ STR_HASH_must_create|BTREE_ITER_with_updates);
if (ret)
goto err;
diff --git a/fs/bcachefs/fs-io-buffered.c b/fs/bcachefs/fs-io-buffered.c
index ff60c041abe5..48a1ab9a649b 100644
--- a/fs/bcachefs/fs-io-buffered.c
+++ b/fs/bcachefs/fs-io-buffered.c
@@ -151,7 +151,6 @@ static void bchfs_read(struct btree_trans *trans,
struct bkey_buf sk;
int flags = BCH_READ_RETRY_IF_STALE|
BCH_READ_MAY_PROMOTE;
- u32 snapshot;
int ret = 0;
rbio->c = c;
@@ -159,29 +158,23 @@ static void bchfs_read(struct btree_trans *trans,
rbio->subvol = inum.subvol;
bch2_bkey_buf_init(&sk);
-retry:
bch2_trans_begin(trans);
- iter = (struct btree_iter) { NULL };
-
- ret = bch2_subvolume_get_snapshot(trans, inum.subvol, &snapshot);
- if (ret)
- goto err;
-
bch2_trans_iter_init(trans, &iter, BTREE_ID_extents,
- SPOS(inum.inum, rbio->bio.bi_iter.bi_sector, snapshot),
+ POS(inum.inum, rbio->bio.bi_iter.bi_sector),
BTREE_ITER_slots);
while (1) {
struct bkey_s_c k;
unsigned bytes, sectors, offset_into_extent;
enum btree_id data_btree = BTREE_ID_extents;
- /*
- * read_extent -> io_time_reset may cause a transaction restart
- * without returning an error, we need to check for that here:
- */
- ret = bch2_trans_relock(trans);
+ bch2_trans_begin(trans);
+
+ u32 snapshot;
+ ret = bch2_subvolume_get_snapshot(trans, inum.subvol, &snapshot);
if (ret)
- break;
+ goto err;
+
+ bch2_btree_iter_set_snapshot(&iter, snapshot);
bch2_btree_iter_set_pos(&iter,
POS(inum.inum, rbio->bio.bi_iter.bi_sector));
@@ -189,7 +182,7 @@ retry:
k = bch2_btree_iter_peek_slot(&iter);
ret = bkey_err(k);
if (ret)
- break;
+ goto err;
offset_into_extent = iter.pos.offset -
bkey_start_offset(k.k);
@@ -200,7 +193,7 @@ retry:
ret = bch2_read_indirect_extent(trans, &data_btree,
&offset_into_extent, &sk);
if (ret)
- break;
+ goto err;
k = bkey_i_to_s_c(sk.k);
@@ -210,7 +203,7 @@ retry:
ret = readpage_bio_extend(trans, readpages_iter, &rbio->bio, sectors,
extent_partial_reads_expensive(k));
if (ret)
- break;
+ goto err;
}
bytes = min(sectors, bio_sectors(&rbio->bio)) << 9;
@@ -229,17 +222,13 @@ retry:
swap(rbio->bio.bi_iter.bi_size, bytes);
bio_advance(&rbio->bio, bytes);
-
- ret = btree_trans_too_many_iters(trans);
- if (ret)
+err:
+ if (ret &&
+ !bch2_err_matches(ret, BCH_ERR_transaction_restart))
break;
}
-err:
bch2_trans_iter_exit(trans, &iter);
- if (bch2_err_matches(ret, BCH_ERR_transaction_restart))
- goto retry;
-
if (ret) {
bch_err_inum_offset_ratelimited(c,
iter.pos.inode,
@@ -486,7 +475,7 @@ static void bch2_writepage_io_alloc(struct bch_fs *c,
op->nr_replicas = nr_replicas;
op->res.nr_replicas = nr_replicas;
op->write_point = writepoint_hashed(inode->ei_last_dirtied);
- op->subvol = inode->ei_subvol;
+ op->subvol = inode->ei_inum.subvol;
op->pos = POS(inode->v.i_ino, sector);
op->end_io = bch2_writepage_io_done;
op->devs_need_flush = &inode->ei_devs_need_flush;
diff --git a/fs/bcachefs/fs-io-direct.c b/fs/bcachefs/fs-io-direct.c
index e246b1e05aa2..ee1c0325f313 100644
--- a/fs/bcachefs/fs-io-direct.c
+++ b/fs/bcachefs/fs-io-direct.c
@@ -500,7 +500,7 @@ static __always_inline long bch2_dio_write_loop(struct dio_write *dio)
dio->op.target = dio->op.opts.foreground_target;
dio->op.write_point = writepoint_hashed((unsigned long) current);
dio->op.nr_replicas = dio->op.opts.data_replicas;
- dio->op.subvol = inode->ei_subvol;
+ dio->op.subvol = inode->ei_inum.subvol;
dio->op.pos = POS(inode->v.i_ino, (u64) req->ki_pos >> 9);
dio->op.devs_need_flush = &inode->ei_devs_need_flush;
diff --git a/fs/bcachefs/fs-io-pagecache.c b/fs/bcachefs/fs-io-pagecache.c
index a9cc5cad9cc9..af3a24546aa3 100644
--- a/fs/bcachefs/fs-io-pagecache.c
+++ b/fs/bcachefs/fs-io-pagecache.c
@@ -182,18 +182,11 @@ static void __bch2_folio_set(struct folio *folio,
int bch2_folio_set(struct bch_fs *c, subvol_inum inum,
struct folio **fs, unsigned nr_folios)
{
- struct btree_trans *trans;
- struct btree_iter iter;
- struct bkey_s_c k;
- struct bch_folio *s;
u64 offset = folio_sector(fs[0]);
- unsigned folio_idx;
- u32 snapshot;
bool need_set = false;
- int ret;
- for (folio_idx = 0; folio_idx < nr_folios; folio_idx++) {
- s = bch2_folio_create(fs[folio_idx], GFP_KERNEL);
+ for (unsigned folio_idx = 0; folio_idx < nr_folios; folio_idx++) {
+ struct bch_folio *s = bch2_folio_create(fs[folio_idx], GFP_KERNEL);
if (!s)
return -ENOMEM;
@@ -203,53 +196,40 @@ int bch2_folio_set(struct bch_fs *c, subvol_inum inum,
if (!need_set)
return 0;
- folio_idx = 0;
- trans = bch2_trans_get(c);
-retry:
- bch2_trans_begin(trans);
-
- ret = bch2_subvolume_get_snapshot(trans, inum.subvol, &snapshot);
- if (ret)
- goto err;
-
- for_each_btree_key_norestart(trans, iter, BTREE_ID_extents,
- SPOS(inum.inum, offset, snapshot),
- BTREE_ITER_slots, k, ret) {
- unsigned nr_ptrs = bch2_bkey_nr_ptrs_fully_allocated(k);
- unsigned state = bkey_to_sector_state(k);
-
- while (folio_idx < nr_folios) {
- struct folio *folio = fs[folio_idx];
- u64 folio_start = folio_sector(folio);
- u64 folio_end = folio_end_sector(folio);
- unsigned folio_offset = max(bkey_start_offset(k.k), folio_start) -
- folio_start;
- unsigned folio_len = min(k.k->p.offset, folio_end) -
- folio_offset - folio_start;
-
- BUG_ON(k.k->p.offset < folio_start);
- BUG_ON(bkey_start_offset(k.k) > folio_end);
-
- if (!bch2_folio(folio)->uptodate)
- __bch2_folio_set(folio, folio_offset, folio_len, nr_ptrs, state);
-
- if (k.k->p.offset < folio_end)
- break;
- folio_idx++;
- }
-
- if (folio_idx == nr_folios)
- break;
- }
-
- offset = iter.pos.offset;
- bch2_trans_iter_exit(trans, &iter);
-err:
- if (bch2_err_matches(ret, BCH_ERR_transaction_restart))
- goto retry;
- bch2_trans_put(trans);
+ unsigned folio_idx = 0;
+
+ return bch2_trans_run(c,
+ for_each_btree_key_in_subvolume_upto(trans, iter, BTREE_ID_extents,
+ POS(inum.inum, offset),
+ POS(inum.inum, U64_MAX),
+ inum.subvol, BTREE_ITER_slots, k, ({
+ unsigned nr_ptrs = bch2_bkey_nr_ptrs_fully_allocated(k);
+ unsigned state = bkey_to_sector_state(k);
+
+ while (folio_idx < nr_folios) {
+ struct folio *folio = fs[folio_idx];
+ u64 folio_start = folio_sector(folio);
+ u64 folio_end = folio_end_sector(folio);
+ unsigned folio_offset = max(bkey_start_offset(k.k), folio_start) -
+ folio_start;
+ unsigned folio_len = min(k.k->p.offset, folio_end) -
+ folio_offset - folio_start;
+
+ BUG_ON(k.k->p.offset < folio_start);
+ BUG_ON(bkey_start_offset(k.k) > folio_end);
+
+ if (!bch2_folio(folio)->uptodate)
+ __bch2_folio_set(folio, folio_offset, folio_len, nr_ptrs, state);
+
+ if (k.k->p.offset < folio_end)
+ break;
+ folio_idx++;
+ }
- return ret;
+ if (folio_idx == nr_folios)
+ break;
+ 0;
+ })));
}
void bch2_bio_page_state_set(struct bio *bio, struct bkey_s_c k)
diff --git a/fs/bcachefs/fs-io-pagecache.h b/fs/bcachefs/fs-io-pagecache.h
index fd7d692c087e..fad911cf5068 100644
--- a/fs/bcachefs/fs-io-pagecache.h
+++ b/fs/bcachefs/fs-io-pagecache.h
@@ -99,9 +99,7 @@ static inline void bch2_folio_release(struct folio *folio)
static inline struct bch_folio *__bch2_folio(struct folio *folio)
{
- return folio_has_private(folio)
- ? (struct bch_folio *) folio_get_private(folio)
- : NULL;
+ return folio_get_private(folio);
}
static inline struct bch_folio *bch2_folio(struct folio *folio)
diff --git a/fs/bcachefs/fs-io.c b/fs/bcachefs/fs-io.c
index 77b85da30fb2..71d0fa387509 100644
--- a/fs/bcachefs/fs-io.c
+++ b/fs/bcachefs/fs-io.c
@@ -221,30 +221,11 @@ static inline int range_has_data(struct bch_fs *c, u32 subvol,
struct bpos start,
struct bpos end)
{
- struct btree_trans *trans = bch2_trans_get(c);
- struct btree_iter iter;
- struct bkey_s_c k;
- int ret = 0;
-retry:
- bch2_trans_begin(trans);
-
- ret = bch2_subvolume_get_snapshot(trans, subvol, &start.snapshot);
- if (ret)
- goto err;
-
- for_each_btree_key_upto_norestart(trans, iter, BTREE_ID_extents, start, end, 0, k, ret)
- if (bkey_extent_is_data(k.k) && !bkey_extent_is_unwritten(k)) {
- ret = 1;
- break;
- }
- start = iter.pos;
- bch2_trans_iter_exit(trans, &iter);
-err:
- if (bch2_err_matches(ret, BCH_ERR_transaction_restart))
- goto retry;
-
- bch2_trans_put(trans);
- return ret;
+ return bch2_trans_run(c,
+ for_each_btree_key_in_subvolume_upto(trans, iter, BTREE_ID_extents, start, end,
+ subvol, 0, k, ({
+ bkey_extent_is_data(k.k) && !bkey_extent_is_unwritten(k);
+ })));
}
static int __bch2_truncate_folio(struct bch_inode_info *inode,
@@ -267,7 +248,7 @@ static int __bch2_truncate_folio(struct bch_inode_info *inode,
* XXX: we're doing two index lookups when we end up reading the
* folio
*/
- ret = range_has_data(c, inode->ei_subvol,
+ ret = range_has_data(c, inode->ei_inum.subvol,
POS(inode->v.i_ino, (index << PAGE_SECTORS_SHIFT)),
POS(inode->v.i_ino, (index << PAGE_SECTORS_SHIFT) + PAGE_SECTORS));
if (ret <= 0)
@@ -618,7 +599,7 @@ static noinline int __bchfs_fallocate(struct bch_inode_info *inode, int mode,
bch2_trans_begin(trans);
ret = bch2_subvolume_get_snapshot(trans,
- inode->ei_subvol, &snapshot);
+ inode->ei_inum.subvol, &snapshot);
if (ret)
goto bkey_err;
@@ -813,41 +794,23 @@ static int quota_reserve_range(struct bch_inode_info *inode,
u64 start, u64 end)
{
struct bch_fs *c = inode->v.i_sb->s_fs_info;
- struct btree_trans *trans = bch2_trans_get(c);
- struct btree_iter iter;
- struct bkey_s_c k;
- u32 snapshot;
u64 sectors = end - start;
- u64 pos = start;
- int ret;
-retry:
- bch2_trans_begin(trans);
- ret = bch2_subvolume_get_snapshot(trans, inode->ei_subvol, &snapshot);
- if (ret)
- goto err;
-
- bch2_trans_iter_init(trans, &iter, BTREE_ID_extents,
- SPOS(inode->v.i_ino, pos, snapshot), 0);
-
- while (!(ret = btree_trans_too_many_iters(trans)) &&
- (k = bch2_btree_iter_peek_upto(&iter, POS(inode->v.i_ino, end - 1))).k &&
- !(ret = bkey_err(k))) {
- if (bkey_extent_is_allocation(k.k)) {
- u64 s = min(end, k.k->p.offset) -
- max(start, bkey_start_offset(k.k));
- BUG_ON(s > sectors);
- sectors -= s;
- }
- bch2_btree_iter_advance(&iter);
- }
- pos = iter.pos.offset;
- bch2_trans_iter_exit(trans, &iter);
-err:
- if (bch2_err_matches(ret, BCH_ERR_transaction_restart))
- goto retry;
-
- bch2_trans_put(trans);
+ int ret = bch2_trans_run(c,
+ for_each_btree_key_in_subvolume_upto(trans, iter,
+ BTREE_ID_extents,
+ POS(inode->v.i_ino, start),
+ POS(inode->v.i_ino, end - 1),
+ inode->ei_inum.subvol, 0, k, ({
+ if (bkey_extent_is_allocation(k.k)) {
+ u64 s = min(end, k.k->p.offset) -
+ max(start, bkey_start_offset(k.k));
+ BUG_ON(s > sectors);
+ sectors -= s;
+ }
+
+ 0;
+ })));
return ret ?: bch2_quota_reservation_add(c, inode, res, sectors, true);
}
@@ -942,42 +905,25 @@ static loff_t bch2_seek_data(struct file *file, u64 offset)
{
struct bch_inode_info *inode = file_bch_inode(file);
struct bch_fs *c = inode->v.i_sb->s_fs_info;
- struct btree_trans *trans;
- struct btree_iter iter;
- struct bkey_s_c k;
subvol_inum inum = inode_inum(inode);
u64 isize, next_data = MAX_LFS_FILESIZE;
- u32 snapshot;
- int ret;
isize = i_size_read(&inode->v);
if (offset >= isize)
return -ENXIO;
- trans = bch2_trans_get(c);
-retry:
- bch2_trans_begin(trans);
-
- ret = bch2_subvolume_get_snapshot(trans, inum.subvol, &snapshot);
- if (ret)
- goto err;
-
- for_each_btree_key_upto_norestart(trans, iter, BTREE_ID_extents,
- SPOS(inode->v.i_ino, offset >> 9, snapshot),
- POS(inode->v.i_ino, U64_MAX),
- 0, k, ret) {
- if (bkey_extent_is_data(k.k)) {
- next_data = max(offset, bkey_start_offset(k.k) << 9);
- break;
- } else if (k.k->p.offset >> 9 > isize)
- break;
- }
- bch2_trans_iter_exit(trans, &iter);
-err:
- if (bch2_err_matches(ret, BCH_ERR_transaction_restart))
- goto retry;
-
- bch2_trans_put(trans);
+ int ret = bch2_trans_run(c,
+ for_each_btree_key_in_subvolume_upto(trans, iter, BTREE_ID_extents,
+ POS(inode->v.i_ino, offset >> 9),
+ POS(inode->v.i_ino, U64_MAX),
+ inum.subvol, 0, k, ({
+ if (bkey_extent_is_data(k.k)) {
+ next_data = max(offset, bkey_start_offset(k.k) << 9);
+ break;
+ } else if (k.k->p.offset >> 9 > isize)
+ break;
+ 0;
+ })));
if (ret)
return ret;
@@ -995,50 +941,34 @@ static loff_t bch2_seek_hole(struct file *file, u64 offset)
{
struct bch_inode_info *inode = file_bch_inode(file);
struct bch_fs *c = inode->v.i_sb->s_fs_info;
- struct btree_trans *trans;
- struct btree_iter iter;
- struct bkey_s_c k;
subvol_inum inum = inode_inum(inode);
u64 isize, next_hole = MAX_LFS_FILESIZE;
- u32 snapshot;
- int ret;
isize = i_size_read(&inode->v);
if (offset >= isize)
return -ENXIO;
- trans = bch2_trans_get(c);
-retry:
- bch2_trans_begin(trans);
-
- ret = bch2_subvolume_get_snapshot(trans, inum.subvol, &snapshot);
- if (ret)
- goto err;
-
- for_each_btree_key_norestart(trans, iter, BTREE_ID_extents,
- SPOS(inode->v.i_ino, offset >> 9, snapshot),
- BTREE_ITER_slots, k, ret) {
- if (k.k->p.inode != inode->v.i_ino) {
- next_hole = bch2_seek_pagecache_hole(&inode->v,
- offset, MAX_LFS_FILESIZE, 0, false);
- break;
- } else if (!bkey_extent_is_data(k.k)) {
- next_hole = bch2_seek_pagecache_hole(&inode->v,
- max(offset, bkey_start_offset(k.k) << 9),
- k.k->p.offset << 9, 0, false);
-
- if (next_hole < k.k->p.offset << 9)
+ int ret = bch2_trans_run(c,
+ for_each_btree_key_in_subvolume_upto(trans, iter, BTREE_ID_extents,
+ POS(inode->v.i_ino, offset >> 9),
+ POS(inode->v.i_ino, U64_MAX),
+ inum.subvol, BTREE_ITER_slots, k, ({
+ if (k.k->p.inode != inode->v.i_ino) {
+ next_hole = bch2_seek_pagecache_hole(&inode->v,
+ offset, MAX_LFS_FILESIZE, 0, false);
break;
- } else {
- offset = max(offset, bkey_start_offset(k.k) << 9);
- }
- }
- bch2_trans_iter_exit(trans, &iter);
-err:
- if (bch2_err_matches(ret, BCH_ERR_transaction_restart))
- goto retry;
-
- bch2_trans_put(trans);
+ } else if (!bkey_extent_is_data(k.k)) {
+ next_hole = bch2_seek_pagecache_hole(&inode->v,
+ max(offset, bkey_start_offset(k.k) << 9),
+ k.k->p.offset << 9, 0, false);
+
+ if (next_hole < k.k->p.offset << 9)
+ break;
+ } else {
+ offset = max(offset, bkey_start_offset(k.k) << 9);
+ }
+ 0;
+ })));
if (ret)
return ret;
diff --git a/fs/bcachefs/fs-ioctl.c b/fs/bcachefs/fs-ioctl.c
index 99c7fe987c74..405cf08bda34 100644
--- a/fs/bcachefs/fs-ioctl.c
+++ b/fs/bcachefs/fs-ioctl.c
@@ -100,7 +100,7 @@ static int bch2_ioc_setflags(struct bch_fs *c,
}
mutex_lock(&inode->ei_update_lock);
- ret = bch2_subvol_is_ro(c, inode->ei_subvol) ?:
+ ret = bch2_subvol_is_ro(c, inode->ei_inum.subvol) ?:
bch2_write_inode(c, inode, bch2_inode_flags_set, &s,
ATTR_CTIME);
mutex_unlock(&inode->ei_update_lock);
@@ -184,7 +184,7 @@ static int bch2_ioc_fssetxattr(struct bch_fs *c,
}
mutex_lock(&inode->ei_update_lock);
- ret = bch2_subvol_is_ro(c, inode->ei_subvol) ?:
+ ret = bch2_subvol_is_ro(c, inode->ei_inum.subvol) ?:
bch2_set_projid(c, inode, fa.fsx_projid) ?:
bch2_write_inode(c, inode, fssetxattr_inode_update_fn, &s,
ATTR_CTIME);
diff --git a/fs/bcachefs/fs.c b/fs/bcachefs/fs.c
index 011817afc3ad..4a1bb07a2574 100644
--- a/fs/bcachefs/fs.c
+++ b/fs/bcachefs/fs.c
@@ -108,7 +108,7 @@ retry:
goto retry;
bch2_fs_fatal_err_on(bch2_err_matches(ret, ENOENT), c,
- "%s: inode %u:%llu not found when updating",
+ "%s: inode %llu:%llu not found when updating",
bch2_err_str(ret),
inode_inum(inode).subvol,
inode_inum(inode).inum);
@@ -152,50 +152,106 @@ int bch2_fs_quota_transfer(struct bch_fs *c,
return ret;
}
-static int bch2_iget5_test(struct inode *vinode, void *p)
+static bool subvol_inum_eq(subvol_inum a, subvol_inum b)
{
- struct bch_inode_info *inode = to_bch_ei(vinode);
- subvol_inum *inum = p;
-
- return inode->ei_subvol == inum->subvol &&
- inode->ei_inode.bi_inum == inum->inum;
+ return a.subvol == b.subvol && a.inum == b.inum;
}
-static int bch2_iget5_set(struct inode *vinode, void *p)
+static int bch2_vfs_inode_cmp_fn(struct rhashtable_compare_arg *arg,
+ const void *obj)
{
- struct bch_inode_info *inode = to_bch_ei(vinode);
- subvol_inum *inum = p;
+ const struct bch_inode_info *inode = obj;
+ const subvol_inum *v = arg->key;
- inode->v.i_ino = inum->inum;
- inode->ei_subvol = inum->subvol;
- inode->ei_inode.bi_inum = inum->inum;
- return 0;
+ return !subvol_inum_eq(inode->ei_inum, *v);
}
-static unsigned bch2_inode_hash(subvol_inum inum)
+static const struct rhashtable_params bch2_vfs_inodes_params = {
+ .head_offset = offsetof(struct bch_inode_info, hash),
+ .key_offset = offsetof(struct bch_inode_info, ei_inum),
+ .key_len = sizeof(subvol_inum),
+ .obj_cmpfn = bch2_vfs_inode_cmp_fn,
+ .automatic_shrinking = true,
+};
+
+static void __wait_on_freeing_inode(struct inode *inode)
{
- return jhash_3words(inum.subvol, inum.inum >> 32, inum.inum, JHASH_INITVAL);
+ wait_queue_head_t *wq;
+ DEFINE_WAIT_BIT(wait, &inode->i_state, __I_NEW);
+ wq = bit_waitqueue(&inode->i_state, __I_NEW);
+ prepare_to_wait(wq, &wait.wq_entry, TASK_UNINTERRUPTIBLE);
+ spin_unlock(&inode->i_lock);
+ schedule();
+ finish_wait(wq, &wait.wq_entry);
}
struct bch_inode_info *__bch2_inode_hash_find(struct bch_fs *c, subvol_inum inum)
{
- return to_bch_ei(ilookup5_nowait(c->vfs_sb,
- bch2_inode_hash(inum),
- bch2_iget5_test,
- &inum));
+ return rhashtable_lookup_fast(&c->vfs_inodes_table, &inum, bch2_vfs_inodes_params);
+}
+
+static struct bch_inode_info *bch2_inode_hash_find(struct bch_fs *c, struct btree_trans *trans,
+ subvol_inum inum)
+{
+ struct bch_inode_info *inode;
+repeat:
+ inode = __bch2_inode_hash_find(c, inum);
+ if (inode) {
+ spin_lock(&inode->v.i_lock);
+ if (!test_bit(EI_INODE_HASHED, &inode->ei_flags)) {
+ spin_unlock(&inode->v.i_lock);
+ return NULL;
+ }
+ if ((inode->v.i_state & (I_FREEING|I_WILL_FREE))) {
+ if (!trans) {
+ __wait_on_freeing_inode(&inode->v);
+ } else {
+ bch2_trans_unlock(trans);
+ __wait_on_freeing_inode(&inode->v);
+ int ret = bch2_trans_relock(trans);
+ if (ret)
+ return ERR_PTR(ret);
+ }
+ goto repeat;
+ }
+ __iget(&inode->v);
+ spin_unlock(&inode->v.i_lock);
+ }
+
+ return inode;
+}
+
+static void bch2_inode_hash_remove(struct bch_fs *c, struct bch_inode_info *inode)
+{
+ spin_lock(&inode->v.i_lock);
+ bool remove = test_and_clear_bit(EI_INODE_HASHED, &inode->ei_flags);
+ spin_unlock(&inode->v.i_lock);
+
+ if (remove) {
+ int ret = rhashtable_remove_fast(&c->vfs_inodes_table,
+ &inode->hash, bch2_vfs_inodes_params);
+ BUG_ON(ret);
+ inode->v.i_hash.pprev = NULL;
+ }
}
-static struct bch_inode_info *bch2_inode_insert(struct bch_fs *c, struct bch_inode_info *inode)
+static struct bch_inode_info *bch2_inode_hash_insert(struct bch_fs *c,
+ struct btree_trans *trans,
+ struct bch_inode_info *inode)
{
- subvol_inum inum = inode_inum(inode);
- struct bch_inode_info *old = to_bch_ei(inode_insert5(&inode->v,
- bch2_inode_hash(inum),
- bch2_iget5_test,
- bch2_iget5_set,
- &inum));
- BUG_ON(!old);
+ struct bch_inode_info *old = inode;
+
+ set_bit(EI_INODE_HASHED, &inode->ei_flags);
+retry:
+ if (unlikely(rhashtable_lookup_insert_fast(&c->vfs_inodes_table,
+ &inode->hash,
+ bch2_vfs_inodes_params))) {
+ old = bch2_inode_hash_find(c, trans, inode->ei_inum);
+ if (!old)
+ goto retry;
+
+ clear_bit(EI_INODE_HASHED, &inode->ei_flags);
- if (unlikely(old != inode)) {
/*
* bcachefs doesn't use I_NEW; we have no use for it since we
* only insert fully created inodes in the inode hash table. But
@@ -209,21 +265,17 @@ static struct bch_inode_info *bch2_inode_insert(struct bch_fs *c, struct bch_ino
*/
set_nlink(&inode->v, 1);
discard_new_inode(&inode->v);
- inode = old;
+ return old;
} else {
+ inode_fake_hash(&inode->v);
+
+ inode_sb_list_add(&inode->v);
+
mutex_lock(&c->vfs_inodes_lock);
list_add(&inode->ei_vfs_inode_list, &c->vfs_inodes_list);
mutex_unlock(&c->vfs_inodes_lock);
- /*
- * Again, I_NEW makes no sense for bcachefs. This is only needed
- * for clearing I_NEW, but since the inode was already fully
- * created and initialized we didn't actually want
- * inode_insert5() to set it for us.
- */
- unlock_new_inode(&inode->v);
+ return inode;
}
-
- return inode;
}
#define memalloc_flags_do(_flags, _do) \
@@ -241,7 +293,8 @@ static struct inode *bch2_alloc_inode(struct super_block *sb)
static struct bch_inode_info *__bch2_new_inode(struct bch_fs *c)
{
- struct bch_inode_info *inode = kmem_cache_alloc(bch2_inode_cache, GFP_NOFS);
+ struct bch_inode_info *inode = alloc_inode_sb(c->vfs_sb,
+ bch2_inode_cache, GFP_NOFS);
if (!inode)
return NULL;
@@ -283,13 +336,24 @@ static struct bch_inode_info *bch2_new_inode(struct btree_trans *trans)
return inode;
}
+static struct bch_inode_info *bch2_inode_hash_init_insert(struct btree_trans *trans,
+ subvol_inum inum,
+ struct bch_inode_unpacked *bi,
+ struct bch_subvolume *subvol)
+{
+ struct bch_inode_info *inode = bch2_new_inode(trans);
+ if (IS_ERR(inode))
+ return inode;
+
+ bch2_vfs_inode_init(trans, inum, inode, bi, subvol);
+
+ return bch2_inode_hash_insert(trans->c, trans, inode);
+
+}
+
struct inode *bch2_vfs_inode_get(struct bch_fs *c, subvol_inum inum)
{
- struct bch_inode_info *inode =
- to_bch_ei(ilookup5_nowait(c->vfs_sb,
- bch2_inode_hash(inum),
- bch2_iget5_test,
- &inum));
+ struct bch_inode_info *inode = bch2_inode_hash_find(c, NULL, inum);
if (inode)
return &inode->v;
@@ -300,11 +364,7 @@ struct inode *bch2_vfs_inode_get(struct bch_fs *c, subvol_inum inum)
int ret = lockrestart_do(trans,
bch2_subvolume_get(trans, inum.subvol, true, 0, &subvol) ?:
bch2_inode_find_by_inum_trans(trans, inum, &inode_u)) ?:
- PTR_ERR_OR_ZERO(inode = bch2_new_inode(trans));
- if (!ret) {
- bch2_vfs_inode_init(trans, inum, inode, &inode_u, &subvol);
- inode = bch2_inode_insert(c, inode);
- }
+ PTR_ERR_OR_ZERO(inode = bch2_inode_hash_init_insert(trans, inum, &inode_u, &subvol));
bch2_trans_put(trans);
return ret ? ERR_PTR(ret) : &inode->v;
@@ -325,6 +385,8 @@ __bch2_create(struct mnt_idmap *idmap,
subvol_inum inum;
struct bch_subvolume subvol;
u64 journal_seq = 0;
+ kuid_t kuid;
+ kgid_t kgid;
int ret;
/*
@@ -351,13 +413,15 @@ __bch2_create(struct mnt_idmap *idmap,
retry:
bch2_trans_begin(trans);
- ret = bch2_subvol_is_ro_trans(trans, dir->ei_subvol) ?:
+ kuid = mapped_fsuid(idmap, i_user_ns(&dir->v));
+ kgid = mapped_fsgid(idmap, i_user_ns(&dir->v));
+ ret = bch2_subvol_is_ro_trans(trans, dir->ei_inum.subvol) ?:
bch2_create_trans(trans,
inode_inum(dir), &dir_u, &inode_u,
!(flags & BCH_CREATE_TMPFILE)
? &dentry->d_name : NULL,
- from_kuid(i_user_ns(&dir->v), current_fsuid()),
- from_kgid(i_user_ns(&dir->v), current_fsgid()),
+ from_kuid(i_user_ns(&dir->v), kuid),
+ from_kgid(i_user_ns(&dir->v), kgid),
mode, rdev,
default_acl, acl, snapshot_src, flags) ?:
bch2_quota_acct(c, bch_qid(&inode_u), Q_INO, 1,
@@ -365,7 +429,7 @@ retry:
if (unlikely(ret))
goto err_before_quota;
- inum.subvol = inode_u.bi_subvol ?: dir->ei_subvol;
+ inum.subvol = inode_u.bi_subvol ?: dir->ei_inum.subvol;
inum.inum = inode_u.bi_inum;
ret = bch2_subvolume_get(trans, inum.subvol, true,
@@ -395,8 +459,16 @@ err_before_quota:
* we must insert the new inode into the inode cache before calling
* bch2_trans_exit() and dropping locks, else we could race with another
* thread pulling the inode in and modifying it:
+ *
+ * also, calling bch2_inode_hash_insert() without passing in the
+ * transaction object is sketchy - if we could ever end up in
+ * __wait_on_freeing_inode(), we'd risk deadlock.
+ *
+ * But that shouldn't be possible, since we still have the inode locked
+ * that we just created, and we _really_ can't take a transaction
+ * restart here.
*/
- inode = bch2_inode_insert(c, inode);
+ inode = bch2_inode_hash_insert(c, NULL, inode);
bch2_trans_put(trans);
err:
posix_acl_release(default_acl);
@@ -436,11 +508,7 @@ static struct bch_inode_info *bch2_lookup_trans(struct btree_trans *trans,
if (ret)
goto err;
- struct bch_inode_info *inode =
- to_bch_ei(ilookup5_nowait(c->vfs_sb,
- bch2_inode_hash(inum),
- bch2_iget5_test,
- &inum));
+ struct bch_inode_info *inode = bch2_inode_hash_find(c, trans, inum);
if (inode)
goto out;
@@ -448,7 +516,7 @@ static struct bch_inode_info *bch2_lookup_trans(struct btree_trans *trans,
struct bch_inode_unpacked inode_u;
ret = bch2_subvolume_get(trans, inum.subvol, true, 0, &subvol) ?:
bch2_inode_find_by_inum_nowarn_trans(trans, inum, &inode_u) ?:
- PTR_ERR_OR_ZERO(inode = bch2_new_inode(trans));
+ PTR_ERR_OR_ZERO(inode = bch2_inode_hash_init_insert(trans, inum, &inode_u, &subvol));
bch2_fs_inconsistent_on(bch2_err_matches(ret, ENOENT),
c, "dirent to missing inode:\n %s",
@@ -468,9 +536,6 @@ static struct bch_inode_info *bch2_lookup_trans(struct btree_trans *trans,
ret = -ENOENT;
goto err;
}
-
- bch2_vfs_inode_init(trans, inum, inode, &inode_u, &subvol);
- inode = bch2_inode_insert(c, inode);
out:
bch2_trans_iter_exit(trans, &dirent_iter);
printbuf_exit(&buf);
@@ -557,8 +622,8 @@ static int bch2_link(struct dentry *old_dentry, struct inode *vdir,
lockdep_assert_held(&inode->v.i_rwsem);
- ret = bch2_subvol_is_ro(c, dir->ei_subvol) ?:
- bch2_subvol_is_ro(c, inode->ei_subvol) ?:
+ ret = bch2_subvol_is_ro(c, dir->ei_inum.subvol) ?:
+ bch2_subvol_is_ro(c, inode->ei_inum.subvol) ?:
__bch2_link(c, inode, dir, dentry);
if (unlikely(ret))
return bch2_err_class(ret);
@@ -614,7 +679,7 @@ static int bch2_unlink(struct inode *vdir, struct dentry *dentry)
struct bch_inode_info *dir= to_bch_ei(vdir);
struct bch_fs *c = dir->v.i_sb->s_fs_info;
- int ret = bch2_subvol_is_ro(c, dir->ei_subvol) ?:
+ int ret = bch2_subvol_is_ro(c, dir->ei_inum.subvol) ?:
__bch2_unlink(vdir, dentry, false);
return bch2_err_class(ret);
}
@@ -671,15 +736,16 @@ static int bch2_rename2(struct mnt_idmap *idmap,
struct bch_inode_info *src_inode = to_bch_ei(src_dentry->d_inode);
struct bch_inode_info *dst_inode = to_bch_ei(dst_dentry->d_inode);
struct bch_inode_unpacked dst_dir_u, src_dir_u;
- struct bch_inode_unpacked src_inode_u, dst_inode_u;
+ struct bch_inode_unpacked src_inode_u, dst_inode_u, *whiteout_inode_u;
struct btree_trans *trans;
enum bch_rename_mode mode = flags & RENAME_EXCHANGE
? BCH_RENAME_EXCHANGE
: dst_dentry->d_inode
? BCH_RENAME_OVERWRITE : BCH_RENAME;
+ bool whiteout = !!(flags & RENAME_WHITEOUT);
int ret;
- if (flags & ~(RENAME_NOREPLACE|RENAME_EXCHANGE))
+ if (flags & ~(RENAME_NOREPLACE|RENAME_EXCHANGE|RENAME_WHITEOUT))
return -EINVAL;
if (mode == BCH_RENAME_OVERWRITE) {
@@ -697,8 +763,8 @@ static int bch2_rename2(struct mnt_idmap *idmap,
trans = bch2_trans_get(c);
- ret = bch2_subvol_is_ro_trans(trans, src_dir->ei_subvol) ?:
- bch2_subvol_is_ro_trans(trans, dst_dir->ei_subvol);
+ ret = bch2_subvol_is_ro_trans(trans, src_dir->ei_inum.subvol) ?:
+ bch2_subvol_is_ro_trans(trans, dst_dir->ei_inum.subvol);
if (ret)
goto err;
@@ -720,18 +786,48 @@ static int bch2_rename2(struct mnt_idmap *idmap,
if (ret)
goto err;
}
+retry:
+ bch2_trans_begin(trans);
- ret = commit_do(trans, NULL, NULL, 0,
- bch2_rename_trans(trans,
- inode_inum(src_dir), &src_dir_u,
- inode_inum(dst_dir), &dst_dir_u,
- &src_inode_u,
- &dst_inode_u,
- &src_dentry->d_name,
- &dst_dentry->d_name,
- mode));
+ ret = bch2_rename_trans(trans,
+ inode_inum(src_dir), &src_dir_u,
+ inode_inum(dst_dir), &dst_dir_u,
+ &src_inode_u,
+ &dst_inode_u,
+ &src_dentry->d_name,
+ &dst_dentry->d_name,
+ mode);
if (unlikely(ret))
+ goto err_tx_restart;
+
+ if (whiteout) {
+ whiteout_inode_u = bch2_trans_kmalloc_nomemzero(trans, sizeof(*whiteout_inode_u));
+ ret = PTR_ERR_OR_ZERO(whiteout_inode_u);
+ if (unlikely(ret))
+ goto err_tx_restart;
+ bch2_inode_init_early(c, whiteout_inode_u);
+
+ ret = bch2_create_trans(trans,
+ inode_inum(src_dir), &src_dir_u,
+ whiteout_inode_u,
+ &src_dentry->d_name,
+ from_kuid(i_user_ns(&src_dir->v), current_fsuid()),
+ from_kgid(i_user_ns(&src_dir->v), current_fsgid()),
+ S_IFCHR|WHITEOUT_MODE, 0,
+ NULL, NULL, (subvol_inum) { 0 }, 0) ?:
+ bch2_quota_acct(c, bch_qid(whiteout_inode_u), Q_INO, 1,
+ KEY_TYPE_QUOTA_PREALLOC);
+ if (unlikely(ret))
+ goto err_tx_restart;
+ }
+
+ ret = bch2_trans_commit(trans, NULL, NULL, 0);
+ if (unlikely(ret)) {
+err_tx_restart:
+ if (bch2_err_matches(ret, BCH_ERR_transaction_restart))
+ goto retry;
goto err;
+ }
BUG_ON(src_inode->v.i_ino != src_inode_u.bi_inum);
BUG_ON(dst_inode &&
@@ -779,11 +875,17 @@ static void bch2_setattr_copy(struct mnt_idmap *idmap,
{
struct bch_fs *c = inode->v.i_sb->s_fs_info;
unsigned int ia_valid = attr->ia_valid;
+ kuid_t kuid;
+ kgid_t kgid;
- if (ia_valid & ATTR_UID)
- bi->bi_uid = from_kuid(i_user_ns(&inode->v), attr->ia_uid);
- if (ia_valid & ATTR_GID)
- bi->bi_gid = from_kgid(i_user_ns(&inode->v), attr->ia_gid);
+ if (ia_valid & ATTR_UID) {
+ kuid = from_vfsuid(idmap, i_user_ns(&inode->v), attr->ia_vfsuid);
+ bi->bi_uid = from_kuid(i_user_ns(&inode->v), kuid);
+ }
+ if (ia_valid & ATTR_GID) {
+ kgid = from_vfsgid(idmap, i_user_ns(&inode->v), attr->ia_vfsgid);
+ bi->bi_gid = from_kgid(i_user_ns(&inode->v), kgid);
+ }
if (ia_valid & ATTR_SIZE)
bi->bi_size = attr->ia_size;
@@ -798,11 +900,11 @@ static void bch2_setattr_copy(struct mnt_idmap *idmap,
if (ia_valid & ATTR_MODE) {
umode_t mode = attr->ia_mode;
kgid_t gid = ia_valid & ATTR_GID
- ? attr->ia_gid
+ ? kgid
: inode->v.i_gid;
- if (!in_group_p(gid) &&
- !capable_wrt_inode_uidgid(idmap, &inode->v, CAP_FSETID))
+ if (!in_group_or_capable(idmap, &inode->v,
+ make_vfsgid(idmap, i_user_ns(&inode->v), gid)))
mode &= ~S_ISGID;
bi->bi_mode = mode;
}
@@ -818,17 +920,23 @@ int bch2_setattr_nonsize(struct mnt_idmap *idmap,
struct btree_iter inode_iter = { NULL };
struct bch_inode_unpacked inode_u;
struct posix_acl *acl = NULL;
+ kuid_t kuid;
+ kgid_t kgid;
int ret;
mutex_lock(&inode->ei_update_lock);
qid = inode->ei_qid;
- if (attr->ia_valid & ATTR_UID)
- qid.q[QTYP_USR] = from_kuid(i_user_ns(&inode->v), attr->ia_uid);
+ if (attr->ia_valid & ATTR_UID) {
+ kuid = from_vfsuid(idmap, i_user_ns(&inode->v), attr->ia_vfsuid);
+ qid.q[QTYP_USR] = from_kuid(i_user_ns(&inode->v), kuid);
+ }
- if (attr->ia_valid & ATTR_GID)
- qid.q[QTYP_GRP] = from_kgid(i_user_ns(&inode->v), attr->ia_gid);
+ if (attr->ia_valid & ATTR_GID) {
+ kgid = from_vfsgid(idmap, i_user_ns(&inode->v), attr->ia_vfsgid);
+ qid.q[QTYP_GRP] = from_kgid(i_user_ns(&inode->v), kgid);
+ }
ret = bch2_fs_quota_transfer(c, inode, qid, ~0,
KEY_TYPE_QUOTA_PREALLOC);
@@ -884,13 +992,15 @@ static int bch2_getattr(struct mnt_idmap *idmap,
{
struct bch_inode_info *inode = to_bch_ei(d_inode(path->dentry));
struct bch_fs *c = inode->v.i_sb->s_fs_info;
+ vfsuid_t vfsuid = i_uid_into_vfsuid(idmap, &inode->v);
+ vfsgid_t vfsgid = i_gid_into_vfsgid(idmap, &inode->v);
stat->dev = inode->v.i_sb->s_dev;
stat->ino = inode->v.i_ino;
stat->mode = inode->v.i_mode;
stat->nlink = inode->v.i_nlink;
- stat->uid = inode->v.i_uid;
- stat->gid = inode->v.i_gid;
+ stat->uid = vfsuid_into_kuid(vfsuid);
+ stat->gid = vfsgid_into_kgid(vfsgid);
stat->rdev = inode->v.i_rdev;
stat->size = i_size_read(&inode->v);
stat->atime = inode_get_atime(&inode->v);
@@ -899,7 +1009,7 @@ static int bch2_getattr(struct mnt_idmap *idmap,
stat->blksize = block_bytes(c);
stat->blocks = inode->v.i_blocks;
- stat->subvol = inode->ei_subvol;
+ stat->subvol = inode->ei_inum.subvol;
stat->result_mask |= STATX_SUBVOL;
if ((request_mask & STATX_DIOALIGN) && S_ISREG(inode->v.i_mode)) {
@@ -941,7 +1051,7 @@ static int bch2_setattr(struct mnt_idmap *idmap,
lockdep_assert_held(&inode->v.i_rwsem);
- ret = bch2_subvol_is_ro(c, inode->ei_subvol) ?:
+ ret = bch2_subvol_is_ro(c, inode->ei_inum.subvol) ?:
setattr_prepare(idmap, dentry, iattr);
if (ret)
return ret;
@@ -1034,7 +1144,6 @@ static int bch2_fiemap(struct inode *vinode, struct fiemap_extent_info *info,
struct bkey_buf cur, prev;
unsigned offset_into_extent, sectors;
bool have_extent = false;
- u32 snapshot;
int ret = 0;
ret = fiemap_prep(&ei->v, info, start, &len, FIEMAP_FLAG_SYNC);
@@ -1050,21 +1159,30 @@ static int bch2_fiemap(struct inode *vinode, struct fiemap_extent_info *info,
bch2_bkey_buf_init(&cur);
bch2_bkey_buf_init(&prev);
trans = bch2_trans_get(c);
-retry:
- bch2_trans_begin(trans);
-
- ret = bch2_subvolume_get_snapshot(trans, ei->ei_subvol, &snapshot);
- if (ret)
- goto err;
bch2_trans_iter_init(trans, &iter, BTREE_ID_extents,
- SPOS(ei->v.i_ino, start, snapshot), 0);
+ POS(ei->v.i_ino, start), 0);
- while (!(ret = btree_trans_too_many_iters(trans)) &&
- (k = bch2_btree_iter_peek_upto(&iter, end)).k &&
- !(ret = bkey_err(k))) {
+ while (true) {
enum btree_id data_btree = BTREE_ID_extents;
+ bch2_trans_begin(trans);
+
+ u32 snapshot;
+ ret = bch2_subvolume_get_snapshot(trans, ei->ei_inum.subvol, &snapshot);
+ if (ret)
+ goto err;
+
+ bch2_btree_iter_set_snapshot(&iter, snapshot);
+
+ k = bch2_btree_iter_peek_upto(&iter, end);
+ ret = bkey_err(k);
+ if (ret)
+ goto err;
+
+ if (!k.k)
+ break;
+
if (!bkey_extent_is_data(k.k) &&
k.k->type != KEY_TYPE_reservation) {
bch2_btree_iter_advance(&iter);
@@ -1108,16 +1226,12 @@ retry:
bch2_btree_iter_set_pos(&iter,
POS(iter.pos.inode, iter.pos.offset + sectors));
-
- ret = bch2_trans_relock(trans);
- if (ret)
+err:
+ if (ret &&
+ !bch2_err_matches(ret, BCH_ERR_transaction_restart))
break;
}
- start = iter.pos.offset;
bch2_trans_iter_exit(trans, &iter);
-err:
- if (bch2_err_matches(ret, BCH_ERR_transaction_restart))
- goto retry;
if (!ret && have_extent) {
bch2_trans_unlock(trans);
@@ -1173,7 +1287,7 @@ static int bch2_open(struct inode *vinode, struct file *file)
struct bch_inode_info *inode = to_bch_ei(vinode);
struct bch_fs *c = inode->v.i_sb->s_fs_info;
- int ret = bch2_subvol_is_ro(c, inode->ei_subvol);
+ int ret = bch2_subvol_is_ro(c, inode->ei_inum.subvol);
if (ret)
return ret;
}
@@ -1305,8 +1419,8 @@ static int bcachefs_fid_valid(int fh_len, int fh_type)
static struct bcachefs_fid bch2_inode_to_fid(struct bch_inode_info *inode)
{
return (struct bcachefs_fid) {
- .inum = inode->ei_inode.bi_inum,
- .subvol = inode->ei_subvol,
+ .inum = inode->ei_inum.inum,
+ .subvol = inode->ei_inum.subvol,
.gen = inode->ei_inode.bi_generation,
};
}
@@ -1391,7 +1505,7 @@ static struct dentry *bch2_get_parent(struct dentry *child)
struct bch_fs *c = inode->v.i_sb->s_fs_info;
subvol_inum parent_inum = {
.subvol = inode->ei_inode.bi_parent_subvol ?:
- inode->ei_subvol,
+ inode->ei_inum.subvol,
.inum = inode->ei_inode.bi_dir,
};
@@ -1427,7 +1541,7 @@ static int bch2_get_name(struct dentry *parent, char *name, struct dentry *child
retry:
bch2_trans_begin(trans);
- ret = bch2_subvolume_get_snapshot(trans, dir->ei_subvol, &snapshot);
+ ret = bch2_subvolume_get_snapshot(trans, dir->ei_inum.subvol, &snapshot);
if (ret)
goto err;
@@ -1458,8 +1572,7 @@ retry:
if (ret)
goto err;
- if (target.subvol == inode->ei_subvol &&
- target.inum == inode->ei_inode.bi_inum)
+ if (subvol_inum_eq(target, inode->ei_inum))
goto found;
} else {
/*
@@ -1480,8 +1593,7 @@ retry:
if (ret)
continue;
- if (target.subvol == inode->ei_subvol &&
- target.inum == inode->ei_inode.bi_inum)
+ if (subvol_inum_eq(target, inode->ei_inum))
goto found;
}
}
@@ -1513,12 +1625,15 @@ static const struct export_operations bch_export_ops = {
.get_name = bch2_get_name,
};
-static void bch2_vfs_inode_init(struct btree_trans *trans, subvol_inum inum,
+static void bch2_vfs_inode_init(struct btree_trans *trans,
+ subvol_inum inum,
struct bch_inode_info *inode,
struct bch_inode_unpacked *bi,
struct bch_subvolume *subvol)
{
- bch2_iget5_set(&inode->v, &inum);
+ inode->v.i_ino = inum.inum;
+ inode->ei_inum = inum;
+ inode->ei_inode.bi_inum = inum.inum;
bch2_inode_update_after_write(trans, inode, bi, ~0);
inode->v.i_blocks = bi->bi_sectors;
@@ -1530,7 +1645,6 @@ static void bch2_vfs_inode_init(struct btree_trans *trans, subvol_inum inum,
inode->ei_flags = 0;
inode->ei_quota_reserved = 0;
inode->ei_qid = bch_qid(bi);
- inode->ei_subvol = inum.subvol;
if (BCH_SUBVOLUME_SNAP(subvol))
set_bit(EI_INODE_SNAPSHOT, &inode->ei_flags);
@@ -1597,6 +1711,17 @@ static void bch2_evict_inode(struct inode *vinode)
{
struct bch_fs *c = vinode->i_sb->s_fs_info;
struct bch_inode_info *inode = to_bch_ei(vinode);
+ bool delete = !inode->v.i_nlink && !is_bad_inode(&inode->v);
+
+ /*
+ * evict() has waited for outstanding writeback, we'll do no more IO
+ * through this inode: it's safe to remove from VFS inode hashtable here
+ *
+ * Do that now so that other threads aren't blocked from pulling it back
+ * in, there's no reason for them to be:
+ */
+ if (!delete)
+ bch2_inode_hash_remove(c, inode);
truncate_inode_pages_final(&inode->v.i_data);
@@ -1604,12 +1729,18 @@ static void bch2_evict_inode(struct inode *vinode)
BUG_ON(!is_bad_inode(&inode->v) && inode->ei_quota_reserved);
- if (!inode->v.i_nlink && !is_bad_inode(&inode->v)) {
+ if (delete) {
bch2_quota_acct(c, inode->ei_qid, Q_SPC, -((s64) inode->v.i_blocks),
KEY_TYPE_QUOTA_WARN);
bch2_quota_acct(c, inode->ei_qid, Q_INO, -1,
KEY_TYPE_QUOTA_WARN);
bch2_inode_rm(c, inode_inum(inode));
+
+ /*
+ * If we are deleting, we need it present in the vfs hash table
+ * so that fsck can check if unlinked inodes are still open:
+ */
+ bch2_inode_hash_remove(c, inode);
}
mutex_lock(&c->vfs_inodes_lock);
@@ -1639,7 +1770,7 @@ again:
mutex_lock(&c->vfs_inodes_lock);
list_for_each_entry(inode, &c->vfs_inodes_list, ei_vfs_inode_list) {
- if (!snapshot_list_has_id(s, inode->ei_subvol))
+ if (!snapshot_list_has_id(s, inode->ei_inum.subvol))
continue;
if (!(inode->v.i_state & I_DONTCACHE) &&
@@ -1801,30 +1932,14 @@ static int bch2_show_devname(struct seq_file *seq, struct dentry *root)
static int bch2_show_options(struct seq_file *seq, struct dentry *root)
{
struct bch_fs *c = root->d_sb->s_fs_info;
- enum bch_opt_id i;
struct printbuf buf = PRINTBUF;
- int ret = 0;
- for (i = 0; i < bch2_opts_nr; i++) {
- const struct bch_option *opt = &bch2_opt_table[i];
- u64 v = bch2_opt_get_by_id(&c->opts, i);
+ bch2_opts_to_text(&buf, c->opts, c, c->disk_sb.sb,
+ OPT_MOUNT, OPT_HIDDEN, OPT_SHOW_MOUNT_STYLE);
+ printbuf_nul_terminate(&buf);
+ seq_puts(seq, buf.buf);
- if ((opt->flags & OPT_HIDDEN) ||
- !(opt->flags & OPT_MOUNT))
- continue;
-
- if (v == bch2_opt_get_by_id(&bch2_opts_default, i))
- continue;
-
- printbuf_reset(&buf);
- bch2_opt_to_text(&buf, c, c->disk_sb.sb, opt, v,
- OPT_SHOW_MOUNT_STYLE);
- seq_putc(seq, ',');
- seq_puts(seq, buf.buf);
- }
-
- if (buf.allocation_failure)
- ret = -ENOMEM;
+ int ret = buf.allocation_failure ? -ENOMEM : 0;
printbuf_exit(&buf);
return ret;
}
@@ -2129,12 +2244,23 @@ static int bch2_init_fs_context(struct fs_context *fc)
return 0;
}
+void bch2_fs_vfs_exit(struct bch_fs *c)
+{
+ if (c->vfs_inodes_table.tbl)
+ rhashtable_destroy(&c->vfs_inodes_table);
+}
+
+int bch2_fs_vfs_init(struct bch_fs *c)
+{
+ return rhashtable_init(&c->vfs_inodes_table, &bch2_vfs_inodes_params);
+}
+
static struct file_system_type bcache_fs_type = {
.owner = THIS_MODULE,
.name = "bcachefs",
.init_fs_context = bch2_init_fs_context,
.kill_sb = bch2_kill_sb,
- .fs_flags = FS_REQUIRES_DEV,
+ .fs_flags = FS_REQUIRES_DEV | FS_ALLOW_IDMAP,
};
MODULE_ALIAS_FS("bcachefs");
@@ -2149,7 +2275,8 @@ int __init bch2_vfs_init(void)
{
int ret = -ENOMEM;
- bch2_inode_cache = KMEM_CACHE(bch_inode_info, SLAB_RECLAIM_ACCOUNT);
+ bch2_inode_cache = KMEM_CACHE(bch_inode_info, SLAB_RECLAIM_ACCOUNT |
+ SLAB_ACCOUNT);
if (!bch2_inode_cache)
goto err;
diff --git a/fs/bcachefs/fs.h b/fs/bcachefs/fs.h
index 990ec43e0365..da74ecc236e7 100644
--- a/fs/bcachefs/fs.h
+++ b/fs/bcachefs/fs.h
@@ -13,6 +13,9 @@
struct bch_inode_info {
struct inode v;
+ struct rhash_head hash;
+ subvol_inum ei_inum;
+
struct list_head ei_vfs_inode_list;
unsigned long ei_flags;
@@ -24,8 +27,6 @@ struct bch_inode_info {
struct mutex ei_quota_lock;
struct bch_qid ei_qid;
- u32 ei_subvol;
-
/*
* When we've been doing nocow writes we'll need to issue flushes to the
* underlying block devices
@@ -50,10 +51,7 @@ struct bch_inode_info {
static inline subvol_inum inode_inum(struct bch_inode_info *inode)
{
- return (subvol_inum) {
- .subvol = inode->ei_subvol,
- .inum = inode->ei_inode.bi_inum,
- };
+ return inode->ei_inum;
}
struct bch_inode_info *__bch2_inode_hash_find(struct bch_fs *, subvol_inum);
@@ -69,6 +67,7 @@ struct bch_inode_info *__bch2_inode_hash_find(struct bch_fs *, subvol_inum);
* those:
*/
#define EI_INODE_SNAPSHOT 1
+#define EI_INODE_HASHED 2
#define to_bch_ei(_inode) \
container_of_or_null(_inode, struct bch_inode_info, v)
@@ -189,6 +188,9 @@ int __bch2_unlink(struct inode *, struct dentry *, bool);
void bch2_evict_subvolume_inodes(struct bch_fs *, snapshot_id_list *);
+void bch2_fs_vfs_exit(struct bch_fs *);
+int bch2_fs_vfs_init(struct bch_fs *);
+
void bch2_vfs_exit(void);
int bch2_vfs_init(void);
@@ -203,6 +205,10 @@ static inline struct bch_inode_info *__bch2_inode_hash_find(struct bch_fs *c, su
static inline void bch2_evict_subvolume_inodes(struct bch_fs *c,
snapshot_id_list *s) {}
+
+static inline void bch2_fs_vfs_exit(struct bch_fs *c) {}
+static inline int bch2_fs_vfs_init(struct bch_fs *c) { return 0; }
+
static inline void bch2_vfs_exit(void) {}
static inline int bch2_vfs_init(void) { return 0; }
diff --git a/fs/bcachefs/fsck.c b/fs/bcachefs/fsck.c
index 9b3470a97546..0d8b782b63fb 100644
--- a/fs/bcachefs/fsck.c
+++ b/fs/bcachefs/fsck.c
@@ -21,6 +21,49 @@
#include <linux/bsearch.h>
#include <linux/dcache.h> /* struct qstr */
+static bool inode_points_to_dirent(struct bch_inode_unpacked *inode,
+ struct bkey_s_c_dirent d)
+{
+ return inode->bi_dir == d.k->p.inode &&
+ inode->bi_dir_offset == d.k->p.offset;
+}
+
+static bool dirent_points_to_inode_nowarn(struct bkey_s_c_dirent d,
+ struct bch_inode_unpacked *inode)
+{
+ if (d.v->d_type == DT_SUBVOL
+ ? le32_to_cpu(d.v->d_child_subvol) == inode->bi_subvol
+ : le64_to_cpu(d.v->d_inum) == inode->bi_inum)
+ return 0;
+ return -BCH_ERR_ENOENT_dirent_doesnt_match_inode;
+}
+
+static void dirent_inode_mismatch_msg(struct printbuf *out,
+ struct bch_fs *c,
+ struct bkey_s_c_dirent dirent,
+ struct bch_inode_unpacked *inode)
+{
+ prt_str(out, "inode points to dirent that does not point back:");
+ prt_newline(out);
+ bch2_bkey_val_to_text(out, c, dirent.s_c);
+ prt_newline(out);
+ bch2_inode_unpacked_to_text(out, inode);
+}
+
+static int dirent_points_to_inode(struct bch_fs *c,
+ struct bkey_s_c_dirent dirent,
+ struct bch_inode_unpacked *inode)
+{
+ int ret = dirent_points_to_inode_nowarn(dirent, inode);
+ if (ret) {
+ struct printbuf buf = PRINTBUF;
+ dirent_inode_mismatch_msg(&buf, c, dirent, inode);
+ bch_warn(c, "%s", buf.buf);
+ printbuf_exit(&buf);
+ }
+ return ret;
+}
+
/*
* XXX: this is handling transaction restarts without returning
* -BCH_ERR_transaction_restart_nested, this is not how we do things anymore:
@@ -346,14 +389,17 @@ static int reattach_inode(struct btree_trans *trans,
static int remove_backpointer(struct btree_trans *trans,
struct bch_inode_unpacked *inode)
{
- struct btree_iter iter;
- struct bkey_s_c_dirent d;
- int ret;
+ if (!inode->bi_dir)
+ return 0;
- d = bch2_bkey_get_iter_typed(trans, &iter, BTREE_ID_dirents,
- POS(inode->bi_dir, inode->bi_dir_offset), 0,
+ struct bch_fs *c = trans->c;
+ struct btree_iter iter;
+ struct bkey_s_c_dirent d =
+ bch2_bkey_get_iter_typed(trans, &iter, BTREE_ID_dirents,
+ SPOS(inode->bi_dir, inode->bi_dir_offset, inode->bi_snapshot), 0,
dirent);
- ret = bkey_err(d) ?:
+ int ret = bkey_err(d) ?:
+ dirent_points_to_inode(c, d, inode) ?:
__remove_dirent(trans, d.k->p);
bch2_trans_iter_exit(trans, &iter);
return ret;
@@ -371,7 +417,8 @@ static int reattach_subvol(struct btree_trans *trans, struct bkey_s_c_subvolume
return ret;
ret = remove_backpointer(trans, &inode);
- bch_err_msg(c, ret, "removing dirent");
+ if (!bch2_err_matches(ret, ENOENT))
+ bch_err_msg(c, ret, "removing dirent");
if (ret)
return ret;
@@ -626,12 +673,12 @@ static int ref_visible2(struct bch_fs *c,
struct inode_walker_entry {
struct bch_inode_unpacked inode;
u32 snapshot;
- bool seen_this_pos;
u64 count;
};
struct inode_walker {
bool first_this_inode;
+ bool have_inodes;
bool recalculate_sums;
struct bpos last_pos;
@@ -669,6 +716,12 @@ static int get_inodes_all_snapshots(struct btree_trans *trans,
struct bkey_s_c k;
int ret;
+ /*
+ * We no longer have inodes for w->last_pos; clear this to avoid
+ * screwing up check_i_sectors/check_subdir_count if we take a
+ * transaction restart here:
+ */
+ w->have_inodes = false;
w->recalculate_sums = false;
w->inodes.nr = 0;
@@ -686,6 +739,7 @@ static int get_inodes_all_snapshots(struct btree_trans *trans,
return ret;
w->first_this_inode = true;
+ w->have_inodes = true;
return 0;
}
@@ -740,9 +794,6 @@ static struct inode_walker_entry *walk_inode(struct btree_trans *trans,
int ret = get_inodes_all_snapshots(trans, w, k.k->p.inode);
if (ret)
return ERR_PTR(ret);
- } else if (bkey_cmp(w->last_pos, k.k->p)) {
- darray_for_each(w->inodes, i)
- i->seen_this_pos = false;
}
w->last_pos = k.k->p;
@@ -896,21 +947,6 @@ static struct bkey_s_c_dirent inode_get_dirent(struct btree_trans *trans,
return dirent_get_by_pos(trans, iter, SPOS(inode->bi_dir, inode->bi_dir_offset, *snapshot));
}
-static bool inode_points_to_dirent(struct bch_inode_unpacked *inode,
- struct bkey_s_c_dirent d)
-{
- return inode->bi_dir == d.k->p.inode &&
- inode->bi_dir_offset == d.k->p.offset;
-}
-
-static bool dirent_points_to_inode(struct bkey_s_c_dirent d,
- struct bch_inode_unpacked *inode)
-{
- return d.v->d_type == DT_SUBVOL
- ? le32_to_cpu(d.v->d_child_subvol) == inode->bi_subvol
- : le64_to_cpu(d.v->d_inum) == inode->bi_inum;
-}
-
static int check_inode_deleted_list(struct btree_trans *trans, struct bpos p)
{
struct btree_iter iter;
@@ -920,13 +956,14 @@ static int check_inode_deleted_list(struct btree_trans *trans, struct bpos p)
return ret;
}
-static int check_inode_dirent_inode(struct btree_trans *trans, struct bkey_s_c inode_k,
+static int check_inode_dirent_inode(struct btree_trans *trans,
struct bch_inode_unpacked *inode,
- u32 inode_snapshot, bool *write_inode)
+ bool *write_inode)
{
struct bch_fs *c = trans->c;
struct printbuf buf = PRINTBUF;
+ u32 inode_snapshot = inode->bi_snapshot;
struct btree_iter dirent_iter = {};
struct bkey_s_c_dirent d = inode_get_dirent(trans, &dirent_iter, inode, &inode_snapshot);
int ret = bkey_err(d);
@@ -936,13 +973,13 @@ static int check_inode_dirent_inode(struct btree_trans *trans, struct bkey_s_c i
if (fsck_err_on(ret,
trans, inode_points_to_missing_dirent,
"inode points to missing dirent\n%s",
- (bch2_bkey_val_to_text(&buf, c, inode_k), buf.buf)) ||
- fsck_err_on(!ret && !dirent_points_to_inode(d, inode),
+ (bch2_inode_unpacked_to_text(&buf, inode), buf.buf)) ||
+ fsck_err_on(!ret && dirent_points_to_inode_nowarn(d, inode),
trans, inode_points_to_wrong_dirent,
- "inode points to dirent that does not point back:\n%s",
- (bch2_bkey_val_to_text(&buf, c, inode_k),
- prt_newline(&buf),
- bch2_bkey_val_to_text(&buf, c, d.s_c), buf.buf))) {
+ "%s",
+ (printbuf_reset(&buf),
+ dirent_inode_mismatch_msg(&buf, c, d, inode),
+ buf.buf))) {
/*
* We just clear the backpointer fields for now. If we find a
* dirent that points to this inode in check_dirents(), we'll
@@ -963,7 +1000,7 @@ fsck_err:
return ret;
}
-static bool bch2_inode_open(struct bch_fs *c, struct bpos p)
+static bool bch2_inode_is_open(struct bch_fs *c, struct bpos p)
{
subvol_inum inum = {
.subvol = snapshot_t(c, p.snapshot)->subvol,
@@ -972,7 +1009,7 @@ static bool bch2_inode_open(struct bch_fs *c, struct bpos p)
/* snapshot tree corruption, can't safely delete */
if (!inum.subvol) {
- bch_err_ratelimited(c, "%s(): snapshot %u has no subvol", __func__, p.snapshot);
+ bch_warn_ratelimited(c, "%s(): snapshot %u has no subvol, unlinked but can't safely delete", __func__, p.snapshot);
return true;
}
@@ -1045,30 +1082,44 @@ static int check_inode(struct btree_trans *trans,
}
if (u.bi_flags & BCH_INODE_unlinked) {
- ret = check_inode_deleted_list(trans, k.k->p);
- if (ret < 0)
- return ret;
+ if (!test_bit(BCH_FS_started, &c->flags)) {
+ /*
+ * If we're not in online fsck, don't delete unlinked
+ * inodes, just make sure they're on the deleted list.
+ *
+ * They might be referred to by a logged operation -
+ * i.e. we might have crashed in the middle of a
+ * truncate on an unlinked but open file - so we want to
+ * let the delete_dead_inodes kill it after resuming
+ * logged ops.
+ */
+ ret = check_inode_deleted_list(trans, k.k->p);
+ if (ret < 0)
+ return ret;
- fsck_err_on(!ret,
- trans, unlinked_inode_not_on_deleted_list,
- "inode %llu:%u unlinked, but not on deleted list",
- u.bi_inum, k.k->p.snapshot);
- ret = 0;
- }
+ fsck_err_on(!ret,
+ trans, unlinked_inode_not_on_deleted_list,
+ "inode %llu:%u unlinked, but not on deleted list",
+ u.bi_inum, k.k->p.snapshot);
- if (u.bi_flags & BCH_INODE_unlinked &&
- !bch2_inode_open(c, k.k->p) &&
- (!c->sb.clean ||
- fsck_err(trans, inode_unlinked_but_clean,
- "filesystem marked clean, but inode %llu unlinked",
- u.bi_inum))) {
- ret = bch2_inode_rm_snapshot(trans, u.bi_inum, iter->pos.snapshot);
- bch_err_msg(c, ret, "in fsck deleting inode");
- return ret;
+ ret = bch2_btree_bit_mod_buffered(trans, BTREE_ID_deleted_inodes, k.k->p, 1);
+ if (ret)
+ goto err;
+ } else {
+ if (fsck_err_on(bch2_inode_is_open(c, k.k->p),
+ trans, inode_unlinked_and_not_open,
+ "inode %llu%u unlinked and not open",
+ u.bi_inum, u.bi_snapshot)) {
+ ret = bch2_inode_rm_snapshot(trans, u.bi_inum, iter->pos.snapshot);
+ bch_err_msg(c, ret, "in fsck deleting inode");
+ return ret;
+ }
+ }
}
+ /* i_size_dirty is vestigal, since we now have logged ops for truncate * */
if (u.bi_flags & BCH_INODE_i_size_dirty &&
- (!c->sb.clean ||
+ (!test_bit(BCH_FS_clean_recovery, &c->flags) ||
fsck_err(trans, inode_i_size_dirty_but_clean,
"filesystem marked clean, but inode %llu has i_size dirty",
u.bi_inum))) {
@@ -1097,8 +1148,9 @@ static int check_inode(struct btree_trans *trans,
do_update = true;
}
+ /* i_sectors_dirty is vestigal, i_sectors is always updated transactionally */
if (u.bi_flags & BCH_INODE_i_sectors_dirty &&
- (!c->sb.clean ||
+ (!test_bit(BCH_FS_clean_recovery, &c->flags) ||
fsck_err(trans, inode_i_sectors_dirty_but_clean,
"filesystem marked clean, but inode %llu has i_sectors dirty",
u.bi_inum))) {
@@ -1126,7 +1178,7 @@ static int check_inode(struct btree_trans *trans,
}
if (u.bi_dir || u.bi_dir_offset) {
- ret = check_inode_dirent_inode(trans, k, &u, k.k->p.snapshot, &do_update);
+ ret = check_inode_dirent_inode(trans, &u, &do_update);
if (ret)
goto err;
}
@@ -1555,10 +1607,10 @@ static int check_extent(struct btree_trans *trans, struct btree_iter *iter,
struct bkey_s_c k,
struct inode_walker *inode,
struct snapshots_seen *s,
- struct extent_ends *extent_ends)
+ struct extent_ends *extent_ends,
+ struct disk_reservation *res)
{
struct bch_fs *c = trans->c;
- struct inode_walker_entry *i;
struct printbuf buf = PRINTBUF;
int ret = 0;
@@ -1568,7 +1620,7 @@ static int check_extent(struct btree_trans *trans, struct btree_iter *iter,
goto out;
}
- if (inode->last_pos.inode != k.k->p.inode) {
+ if (inode->last_pos.inode != k.k->p.inode && inode->have_inodes) {
ret = check_i_sectors(trans, inode);
if (ret)
goto err;
@@ -1578,12 +1630,12 @@ static int check_extent(struct btree_trans *trans, struct btree_iter *iter,
if (ret)
goto err;
- i = walk_inode(trans, inode, k);
- ret = PTR_ERR_OR_ZERO(i);
+ struct inode_walker_entry *extent_i = walk_inode(trans, inode, k);
+ ret = PTR_ERR_OR_ZERO(extent_i);
if (ret)
goto err;
- ret = check_key_has_inode(trans, iter, inode, i, k);
+ ret = check_key_has_inode(trans, iter, inode, extent_i, k);
if (ret)
goto err;
@@ -1592,24 +1644,19 @@ static int check_extent(struct btree_trans *trans, struct btree_iter *iter,
&inode->recalculate_sums);
if (ret)
goto err;
- }
- /*
- * Check inodes in reverse order, from oldest snapshots to newest,
- * starting from the inode that matches this extent's snapshot. If we
- * didn't have one, iterate over all inodes:
- */
- if (!i)
- i = &darray_last(inode->inodes);
-
- for (;
- inode->inodes.data && i >= inode->inodes.data;
- --i) {
- if (i->snapshot > k.k->p.snapshot ||
- !key_visible_in_snapshot(c, s, i->snapshot, k.k->p.snapshot))
- continue;
+ /*
+ * Check inodes in reverse order, from oldest snapshots to
+ * newest, starting from the inode that matches this extent's
+ * snapshot. If we didn't have one, iterate over all inodes:
+ */
+ for (struct inode_walker_entry *i = extent_i ?: &darray_last(inode->inodes);
+ inode->inodes.data && i >= inode->inodes.data;
+ --i) {
+ if (i->snapshot > k.k->p.snapshot ||
+ !key_visible_in_snapshot(c, s, i->snapshot, k.k->p.snapshot))
+ continue;
- if (k.k->type != KEY_TYPE_whiteout) {
if (fsck_err_on(!(i->inode.bi_flags & BCH_INODE_i_size_dirty) &&
k.k->p.offset > round_up(i->inode.bi_size, block_bytes(c)) >> 9 &&
!bkey_extent_is_reservation(k),
@@ -1629,13 +1676,25 @@ static int check_extent(struct btree_trans *trans, struct btree_iter *iter,
goto err;
iter->k.type = KEY_TYPE_whiteout;
+ break;
}
-
- if (bkey_extent_is_allocation(k.k))
- i->count += k.k->size;
}
+ }
- i->seen_this_pos = true;
+ ret = bch2_trans_commit(trans, res, NULL, BCH_TRANS_COMMIT_no_enospc);
+ if (ret)
+ goto err;
+
+ if (bkey_extent_is_allocation(k.k)) {
+ for (struct inode_walker_entry *i = extent_i ?: &darray_last(inode->inodes);
+ inode->inodes.data && i >= inode->inodes.data;
+ --i) {
+ if (i->snapshot > k.k->p.snapshot ||
+ !key_visible_in_snapshot(c, s, i->snapshot, k.k->p.snapshot))
+ continue;
+
+ i->count += k.k->size;
+ }
}
if (k.k->type != KEY_TYPE_whiteout) {
@@ -1666,13 +1725,11 @@ int bch2_check_extents(struct bch_fs *c)
extent_ends_init(&extent_ends);
int ret = bch2_trans_run(c,
- for_each_btree_key_commit(trans, iter, BTREE_ID_extents,
+ for_each_btree_key(trans, iter, BTREE_ID_extents,
POS(BCACHEFS_ROOT_INO, 0),
- BTREE_ITER_prefetch|BTREE_ITER_all_snapshots, k,
- &res, NULL,
- BCH_TRANS_COMMIT_no_enospc, ({
+ BTREE_ITER_prefetch|BTREE_ITER_all_snapshots, k, ({
bch2_disk_reservation_put(c, &res);
- check_extent(trans, &iter, k, &w, &s, &extent_ends) ?:
+ check_extent(trans, &iter, k, &w, &s, &extent_ends, &res) ?:
check_extent_overbig(trans, &iter, k);
})) ?:
check_i_sectors_notnested(trans, &w));
@@ -1758,6 +1815,7 @@ static int check_dirent_inode_dirent(struct btree_trans *trans,
{
struct bch_fs *c = trans->c;
struct printbuf buf = PRINTBUF;
+ struct btree_iter bp_iter = { NULL };
int ret = 0;
if (inode_points_to_dirent(target, d))
@@ -1770,7 +1828,7 @@ static int check_dirent_inode_dirent(struct btree_trans *trans,
prt_printf(&buf, "\n "),
bch2_inode_unpacked_to_text(&buf, target),
buf.buf)))
- goto out_noiter;
+ goto err;
if (!target->bi_dir &&
!target->bi_dir_offset) {
@@ -1779,7 +1837,6 @@ static int check_dirent_inode_dirent(struct btree_trans *trans,
return __bch2_fsck_write_inode(trans, target, target_snapshot);
}
- struct btree_iter bp_iter = { NULL };
struct bkey_s_c_dirent bp_dirent = dirent_get_by_pos(trans, &bp_iter,
SPOS(target->bi_dir, target->bi_dir_offset, target_snapshot));
ret = bkey_err(bp_dirent);
@@ -1840,7 +1897,6 @@ out:
err:
fsck_err:
bch2_trans_iter_exit(trans, &bp_iter);
-out_noiter:
printbuf_exit(&buf);
bch_err_fn(c, ret);
return ret;
@@ -2075,7 +2131,7 @@ static int check_dirent(struct btree_trans *trans, struct btree_iter *iter,
if (k.k->type == KEY_TYPE_whiteout)
goto out;
- if (dir->last_pos.inode != k.k->p.inode) {
+ if (dir->last_pos.inode != k.k->p.inode && dir->have_inodes) {
ret = check_subdir_count(trans, dir);
if (ret)
goto err;
@@ -2137,11 +2193,15 @@ static int check_dirent(struct btree_trans *trans, struct btree_iter *iter,
if (ret)
goto err;
}
-
- if (d.v->d_type == DT_DIR)
- for_each_visible_inode(c, s, dir, d.k->p.snapshot, i)
- i->count++;
}
+
+ ret = bch2_trans_commit(trans, NULL, NULL, BCH_TRANS_COMMIT_no_enospc);
+ if (ret)
+ goto err;
+
+ if (d.v->d_type == DT_DIR)
+ for_each_visible_inode(c, s, dir, d.k->p.snapshot, i)
+ i->count++;
out:
err:
fsck_err:
@@ -2164,12 +2224,9 @@ int bch2_check_dirents(struct bch_fs *c)
snapshots_seen_init(&s);
int ret = bch2_trans_run(c,
- for_each_btree_key_commit(trans, iter, BTREE_ID_dirents,
+ for_each_btree_key(trans, iter, BTREE_ID_dirents,
POS(BCACHEFS_ROOT_INO, 0),
- BTREE_ITER_prefetch|BTREE_ITER_all_snapshots,
- k,
- NULL, NULL,
- BCH_TRANS_COMMIT_no_enospc,
+ BTREE_ITER_prefetch|BTREE_ITER_all_snapshots, k,
check_dirent(trans, &iter, k, &hash_info, &dir, &target, &s)) ?:
check_subdir_count_notnested(trans, &dir));
@@ -2314,22 +2371,6 @@ static bool darray_u32_has(darray_u32 *d, u32 v)
return false;
}
-/*
- * We've checked that inode backpointers point to valid dirents; here, it's
- * sufficient to check that the subvolume root has a dirent:
- */
-static int subvol_has_dirent(struct btree_trans *trans, struct bkey_s_c_subvolume s)
-{
- struct bch_inode_unpacked inode;
- int ret = bch2_inode_find_by_inum_trans(trans,
- (subvol_inum) { s.k->p.offset, le64_to_cpu(s.v->inode) },
- &inode);
- if (ret)
- return ret;
-
- return inode.bi_dir != 0;
-}
-
static int check_subvol_path(struct btree_trans *trans, struct btree_iter *iter, struct bkey_s_c k)
{
struct bch_fs *c = trans->c;
@@ -2348,14 +2389,24 @@ static int check_subvol_path(struct btree_trans *trans, struct btree_iter *iter,
struct bkey_s_c_subvolume s = bkey_s_c_to_subvolume(k);
- ret = subvol_has_dirent(trans, s);
- if (ret < 0)
+ struct bch_inode_unpacked subvol_root;
+ ret = bch2_inode_find_by_inum_trans(trans,
+ (subvol_inum) { s.k->p.offset, le64_to_cpu(s.v->inode) },
+ &subvol_root);
+ if (ret)
break;
- if (fsck_err_on(!ret,
+ /*
+ * We've checked that inode backpointers point to valid dirents;
+ * here, it's sufficient to check that the subvolume root has a
+ * dirent:
+ */
+ if (fsck_err_on(!subvol_root.bi_dir,
trans, subvol_unreachable,
"unreachable subvolume %s",
(bch2_bkey_val_to_text(&buf, c, s.s_c),
+ prt_newline(&buf),
+ bch2_inode_unpacked_to_text(&buf, &subvol_root),
buf.buf))) {
ret = reattach_subvol(trans, s);
break;
@@ -2450,10 +2501,8 @@ static int check_path(struct btree_trans *trans, pathbuf *p, struct bkey_s_c ino
if (ret && !bch2_err_matches(ret, ENOENT))
break;
- if (!ret && !dirent_points_to_inode(d, &inode)) {
+ if (!ret && (ret = dirent_points_to_inode(c, d, &inode)))
bch2_trans_iter_exit(trans, &dirent_iter);
- ret = -BCH_ERR_ENOENT_dirent_doesnt_match_inode;
- }
if (bch2_err_matches(ret, ENOENT)) {
ret = 0;
diff --git a/fs/bcachefs/inode.c b/fs/bcachefs/inode.c
index 2be6be33afa3..753c208896c3 100644
--- a/fs/bcachefs/inode.c
+++ b/fs/bcachefs/inode.c
@@ -320,9 +320,11 @@ static noinline int bch2_inode_unpack_slowpath(struct bkey_s_c k,
int bch2_inode_unpack(struct bkey_s_c k,
struct bch_inode_unpacked *unpacked)
{
- if (likely(k.k->type == KEY_TYPE_inode_v3))
- return bch2_inode_unpack_v3(k, unpacked);
- return bch2_inode_unpack_slowpath(k, unpacked);
+ unpacked->bi_snapshot = k.k->p.snapshot;
+
+ return likely(k.k->type == KEY_TYPE_inode_v3)
+ ? bch2_inode_unpack_v3(k, unpacked)
+ : bch2_inode_unpack_slowpath(k, unpacked);
}
int bch2_inode_peek_nowarn(struct btree_trans *trans,
@@ -365,7 +367,7 @@ int bch2_inode_peek(struct btree_trans *trans,
subvol_inum inum, unsigned flags)
{
int ret = bch2_inode_peek_nowarn(trans, iter, inode, inum, flags);
- bch_err_msg(trans->c, ret, "looking up inum %u:%llu:", inum.subvol, inum.inum);
+ bch_err_msg(trans->c, ret, "looking up inum %llu:%llu:", inum.subvol, inum.inum);
return ret;
}
@@ -557,7 +559,7 @@ static void __bch2_inode_unpacked_to_text(struct printbuf *out,
void bch2_inode_unpacked_to_text(struct printbuf *out, struct bch_inode_unpacked *inode)
{
- prt_printf(out, "inum: %llu ", inode->bi_inum);
+ prt_printf(out, "inum: %llu:%u ", inode->bi_inum, inode->bi_snapshot);
__bch2_inode_unpacked_to_text(out, inode);
}
@@ -1111,7 +1113,7 @@ static int may_delete_deleted_inode(struct btree_trans *trans,
pos.offset, pos.snapshot))
goto delete;
- if (c->sb.clean &&
+ if (test_bit(BCH_FS_clean_recovery, &c->flags) &&
!fsck_err(trans, deleted_inode_but_clean,
"filesystem marked as clean but have deleted inode %llu:%u",
pos.offset, pos.snapshot)) {
diff --git a/fs/bcachefs/inode.h b/fs/bcachefs/inode.h
index f1fcb4c58039..695abd707cb6 100644
--- a/fs/bcachefs/inode.h
+++ b/fs/bcachefs/inode.h
@@ -69,6 +69,7 @@ typedef u64 u96;
struct bch_inode_unpacked {
u64 bi_inum;
+ u32 bi_snapshot;
u64 bi_journal_seq;
__le64 bi_hash_seed;
u64 bi_size;
diff --git a/fs/bcachefs/io_read.c b/fs/bcachefs/io_read.c
index 7ee3b75480df..e4fc17c548fd 100644
--- a/fs/bcachefs/io_read.c
+++ b/fs/bcachefs/io_read.c
@@ -286,7 +286,7 @@ static struct promote_op *promote_alloc(struct btree_trans *trans,
*/
bool promote_full = (failed ||
*read_full ||
- READ_ONCE(c->promote_whole_extents));
+ READ_ONCE(c->opts.promote_whole_extents));
/* data might have to be decompressed in the write path: */
unsigned sectors = promote_full
? max(pick->crc.compressed_size, pick->crc.live_size)
@@ -517,7 +517,7 @@ static int __bch2_rbio_narrow_crcs(struct btree_trans *trans,
if ((ret = bkey_err(k)))
goto out;
- if (bversion_cmp(k.k->version, rbio->version) ||
+ if (bversion_cmp(k.k->bversion, rbio->version) ||
!bch2_bkey_matches_ptr(c, k, rbio->pick.ptr, data_offset))
goto out;
@@ -777,7 +777,7 @@ int __bch2_read_indirect_extent(struct btree_trans *trans,
orig_k->k->k.size,
reflink_offset);
bch2_inconsistent_error(trans->c);
- ret = -EIO;
+ ret = -BCH_ERR_missing_indirect_extent;
goto err;
}
@@ -869,9 +869,15 @@ retry_pick:
goto hole;
if (pick_ret < 0) {
+ struct printbuf buf = PRINTBUF;
+ bch2_bkey_val_to_text(&buf, c, k);
+
bch_err_inum_offset_ratelimited(c,
read_pos.inode, read_pos.offset << 9,
- "no device to read from");
+ "no device to read from: %s\n %s",
+ bch2_err_str(pick_ret),
+ buf.buf);
+ printbuf_exit(&buf);
goto err;
}
@@ -1025,7 +1031,7 @@ get_bio:
rbio->read_pos = read_pos;
rbio->data_btree = data_btree;
rbio->data_pos = data_pos;
- rbio->version = k.k->version;
+ rbio->version = k.k->bversion;
rbio->promote = promote;
INIT_WORK(&rbio->work, NULL);
@@ -1086,7 +1092,7 @@ get_bio:
trans->notrace_relock_fail = true;
} else {
/* Attempting reconstruct read: */
- if (bch2_ec_read_extent(trans, rbio)) {
+ if (bch2_ec_read_extent(trans, rbio, k)) {
bch2_rbio_error(rbio, READ_RETRY_AVOID, BLK_STS_IOERR);
goto out;
}
@@ -1214,10 +1220,6 @@ void __bch2_read(struct bch_fs *c, struct bch_read_bio *rbio,
swap(bvec_iter.bi_size, bytes);
bio_advance_iter(&rbio->bio, &bvec_iter, bytes);
-
- ret = btree_trans_too_many_iters(trans);
- if (ret)
- goto err;
err:
if (ret &&
!bch2_err_matches(ret, BCH_ERR_transaction_restart) &&
diff --git a/fs/bcachefs/io_write.c b/fs/bcachefs/io_write.c
index 1d4761d15002..b5fe9e0dc155 100644
--- a/fs/bcachefs/io_write.c
+++ b/fs/bcachefs/io_write.c
@@ -697,7 +697,7 @@ static void init_append_extent(struct bch_write_op *op,
e = bkey_extent_init(op->insert_keys.top);
e->k.p = op->pos;
e->k.size = crc.uncompressed_size;
- e->k.version = version;
+ e->k.bversion = version;
if (crc.csum_type ||
crc.compression_type ||
@@ -1447,9 +1447,7 @@ again:
op->nr_replicas_required,
op->watermark,
op->flags,
- (op->flags & (BCH_WRITE_ALLOC_NOWAIT|
- BCH_WRITE_ONLY_SPECIFIED_DEVS))
- ? NULL : &op->cl, &wp));
+ &op->cl, &wp));
if (unlikely(ret)) {
if (bch2_err_matches(ret, BCH_ERR_operation_blocked))
break;
@@ -1546,7 +1544,7 @@ static void bch2_write_data_inline(struct bch_write_op *op, unsigned data_len)
id = bkey_inline_data_init(op->insert_keys.top);
id->k.p = op->pos;
- id->k.version = op->version;
+ id->k.bversion = op->version;
id->k.size = sectors;
iter = bio->bi_iter;
@@ -1592,6 +1590,9 @@ CLOSURE_CALLBACK(bch2_write)
BUG_ON(!op->write_point.v);
BUG_ON(bkey_eq(op->pos, POS_MAX));
+ if (op->flags & BCH_WRITE_ONLY_SPECIFIED_DEVS)
+ op->flags |= BCH_WRITE_ALLOC_NOWAIT;
+
op->nr_replicas_required = min_t(unsigned, op->nr_replicas_required, op->nr_replicas);
op->start_time = local_clock();
bch2_keylist_init(&op->insert_keys, op->inline_keys);
diff --git a/fs/bcachefs/journal_io.c b/fs/bcachefs/journal_io.c
index 7664b68e6a15..954f6a96e0f4 100644
--- a/fs/bcachefs/journal_io.c
+++ b/fs/bcachefs/journal_io.c
@@ -605,7 +605,7 @@ static int journal_entry_data_usage_validate(struct bch_fs *c,
goto out;
}
- if (journal_entry_err_on(bch2_replicas_entry_validate(&u->r, c->disk_sb.sb, &err),
+ if (journal_entry_err_on(bch2_replicas_entry_validate(&u->r, c, &err),
c, version, jset, entry,
journal_entry_data_usage_bad_size,
"invalid journal entry usage: %s", err.buf)) {
@@ -1353,6 +1353,7 @@ int bch2_journal_read(struct bch_fs *c,
genradix_for_each(&c->journal_entries, radix_iter, _i) {
struct bch_replicas_padded replicas = {
.e.data_type = BCH_DATA_journal,
+ .e.nr_devs = 0,
.e.nr_required = 1,
};
@@ -1379,7 +1380,7 @@ int bch2_journal_read(struct bch_fs *c,
goto err;
darray_for_each(i->ptrs, ptr)
- replicas.e.devs[replicas.e.nr_devs++] = ptr->dev;
+ replicas_entry_add_dev(&replicas.e, ptr->dev);
bch2_replicas_entry_sort(&replicas.e);
@@ -1950,7 +1951,8 @@ static int bch2_journal_write_pick_flush(struct journal *j, struct journal_buf *
if (error ||
w->noflush ||
(!w->must_flush &&
- (jiffies - j->last_flush_write) < msecs_to_jiffies(c->opts.journal_flush_delay) &&
+ time_before(jiffies, j->last_flush_write +
+ msecs_to_jiffies(c->opts.journal_flush_delay)) &&
test_bit(JOURNAL_may_skip_flush, &j->flags))) {
w->noflush = true;
SET_JSET_NO_FLUSH(w->data, true);
diff --git a/fs/bcachefs/journal_reclaim.c b/fs/bcachefs/journal_reclaim.c
index 70b998d9f19c..ace291f175dd 100644
--- a/fs/bcachefs/journal_reclaim.c
+++ b/fs/bcachefs/journal_reclaim.c
@@ -641,6 +641,7 @@ static u64 journal_seq_to_flush(struct journal *j)
static int __bch2_journal_reclaim(struct journal *j, bool direct, bool kicked)
{
struct bch_fs *c = container_of(j, struct bch_fs, journal);
+ struct btree_cache *bc = &c->btree_cache;
bool kthread = (current->flags & PF_KTHREAD) != 0;
u64 seq_to_flush;
size_t min_nr, min_key_cache, nr_flushed;
@@ -681,7 +682,8 @@ static int __bch2_journal_reclaim(struct journal *j, bool direct, bool kicked)
if (j->watermark != BCH_WATERMARK_stripe)
min_nr = 1;
- if (atomic_read(&c->btree_cache.dirty) * 2 > c->btree_cache.used)
+ size_t btree_cache_live = bc->live[0].nr + bc->live[1].nr;
+ if (atomic_long_read(&bc->nr_dirty) * 2 > btree_cache_live)
min_nr = 1;
min_key_cache = min(bch2_nr_btree_keys_need_flush(c), (size_t) 128);
@@ -689,8 +691,7 @@ static int __bch2_journal_reclaim(struct journal *j, bool direct, bool kicked)
trace_and_count(c, journal_reclaim_start, c,
direct, kicked,
min_nr, min_key_cache,
- atomic_read(&c->btree_cache.dirty),
- c->btree_cache.used,
+ atomic_long_read(&bc->nr_dirty), btree_cache_live,
atomic_long_read(&c->btree_key_cache.nr_dirty),
atomic_long_read(&c->btree_key_cache.nr_keys));
diff --git a/fs/bcachefs/logged_ops.c b/fs/bcachefs/logged_ops.c
index f49fdca1d07d..6f4a4e1083c9 100644
--- a/fs/bcachefs/logged_ops.c
+++ b/fs/bcachefs/logged_ops.c
@@ -37,6 +37,14 @@ static int resume_logged_op(struct btree_trans *trans, struct btree_iter *iter,
const struct bch_logged_op_fn *fn = logged_op_fn(k.k->type);
struct bkey_buf sk;
u32 restart_count = trans->restart_count;
+ struct printbuf buf = PRINTBUF;
+ int ret = 0;
+
+ fsck_err_on(test_bit(BCH_FS_clean_recovery, &c->flags),
+ trans, logged_op_but_clean,
+ "filesystem marked as clean but have logged op\n%s",
+ (bch2_bkey_val_to_text(&buf, c, k),
+ buf.buf));
if (!fn)
return 0;
@@ -47,8 +55,9 @@ static int resume_logged_op(struct btree_trans *trans, struct btree_iter *iter,
fn->resume(trans, sk.k);
bch2_bkey_buf_exit(&sk, c);
-
- return trans_was_restarted(trans, restart_count);
+fsck_err:
+ printbuf_exit(&buf);
+ return ret ?: trans_was_restarted(trans, restart_count);
}
int bch2_resume_logged_ops(struct bch_fs *c)
diff --git a/fs/bcachefs/opts.c b/fs/bcachefs/opts.c
index e10fc1da71b1..232be8a44051 100644
--- a/fs/bcachefs/opts.c
+++ b/fs/bcachefs/opts.c
@@ -230,6 +230,8 @@ const struct bch_option bch2_opt_table[] = {
#define OPT_STR_NOLIMIT(_choices) .type = BCH_OPT_STR, \
.min = 0, .max = U64_MAX, \
.choices = _choices
+#define OPT_BITFIELD(_choices) .type = BCH_OPT_BITFIELD, \
+ .choices = _choices
#define OPT_FN(_fn) .type = BCH_OPT_FN, .fn = _fn
#define x(_name, _bits, _flags, _type, _sb_opt, _default, _hint, _help) \
@@ -376,6 +378,13 @@ int bch2_opt_parse(struct bch_fs *c,
*res = ret;
break;
+ case BCH_OPT_BITFIELD: {
+ s64 v = bch2_read_flag_list(val, opt->choices);
+ if (v < 0)
+ return v;
+ *res = v;
+ break;
+ }
case BCH_OPT_FN:
ret = opt->fn.parse(c, val, res, err);
@@ -423,6 +432,9 @@ void bch2_opt_to_text(struct printbuf *out,
else
prt_str(out, opt->choices[v]);
break;
+ case BCH_OPT_BITFIELD:
+ prt_bitflags(out, opt->choices, v);
+ break;
case BCH_OPT_FN:
opt->fn.to_text(out, c, sb, v);
break;
@@ -431,6 +443,32 @@ void bch2_opt_to_text(struct printbuf *out,
}
}
+void bch2_opts_to_text(struct printbuf *out,
+ struct bch_opts opts,
+ struct bch_fs *c, struct bch_sb *sb,
+ unsigned show_mask, unsigned hide_mask,
+ unsigned flags)
+{
+ bool first = true;
+
+ for (enum bch_opt_id i = 0; i < bch2_opts_nr; i++) {
+ const struct bch_option *opt = &bch2_opt_table[i];
+
+ if ((opt->flags & hide_mask) || !(opt->flags & show_mask))
+ continue;
+
+ u64 v = bch2_opt_get_by_id(&opts, i);
+ if (v == bch2_opt_get_by_id(&bch2_opts_default, i))
+ continue;
+
+ if (!first)
+ prt_char(out, ',');
+ first = false;
+
+ bch2_opt_to_text(out, c, sb, opt, v, flags);
+ }
+}
+
int bch2_opt_check_may_set(struct bch_fs *c, int id, u64 v)
{
int ret = 0;
@@ -608,10 +646,20 @@ int bch2_opts_from_sb(struct bch_opts *opts, struct bch_sb *sb)
return 0;
}
-void __bch2_opt_set_sb(struct bch_sb *sb, const struct bch_option *opt, u64 v)
+struct bch_dev_sb_opt_set {
+ void (*set_sb)(struct bch_member *, u64);
+};
+
+static const struct bch_dev_sb_opt_set bch2_dev_sb_opt_setters [] = {
+#define x(n, set) [Opt_##n] = { .set_sb = SET_##set },
+ BCH_DEV_OPT_SETTERS()
+#undef x
+};
+
+void __bch2_opt_set_sb(struct bch_sb *sb, int dev_idx,
+ const struct bch_option *opt, u64 v)
{
- if (opt->set_sb == SET_BCH2_NO_SB_OPT)
- return;
+ enum bch_opt_id id = opt - bch2_opt_table;
if (opt->flags & OPT_SB_FIELD_SECTORS)
v >>= 9;
@@ -619,16 +667,35 @@ void __bch2_opt_set_sb(struct bch_sb *sb, const struct bch_option *opt, u64 v)
if (opt->flags & OPT_SB_FIELD_ILOG2)
v = ilog2(v);
- opt->set_sb(sb, v);
+ if (opt->flags & OPT_SB_FIELD_ONE_BIAS)
+ v++;
+
+ if (opt->flags & OPT_FS) {
+ if (opt->set_sb != SET_BCH2_NO_SB_OPT)
+ opt->set_sb(sb, v);
+ }
+
+ if ((opt->flags & OPT_DEVICE) && dev_idx >= 0) {
+ if (WARN(!bch2_member_exists(sb, dev_idx),
+ "tried to set device option %s on nonexistent device %i",
+ opt->attr.name, dev_idx))
+ return;
+
+ struct bch_member *m = bch2_members_v2_get_mut(sb, dev_idx);
+
+ const struct bch_dev_sb_opt_set *set = bch2_dev_sb_opt_setters + id;
+ if (set->set_sb)
+ set->set_sb(m, v);
+ else
+ pr_err("option %s cannot be set via opt_set_sb()", opt->attr.name);
+ }
}
-void bch2_opt_set_sb(struct bch_fs *c, const struct bch_option *opt, u64 v)
+void bch2_opt_set_sb(struct bch_fs *c, struct bch_dev *ca,
+ const struct bch_option *opt, u64 v)
{
- if (opt->set_sb == SET_BCH2_NO_SB_OPT)
- return;
-
mutex_lock(&c->sb_lock);
- __bch2_opt_set_sb(c->disk_sb.sb, opt, v);
+ __bch2_opt_set_sb(c->disk_sb.sb, ca ? ca->dev_idx : -1, opt, v);
bch2_write_super(c);
mutex_unlock(&c->sb_lock);
}
diff --git a/fs/bcachefs/opts.h b/fs/bcachefs/opts.h
index cda1725702ea..cb2e244a2429 100644
--- a/fs/bcachefs/opts.h
+++ b/fs/bcachefs/opts.h
@@ -53,23 +53,25 @@ void SET_BCH2_NO_SB_OPT(struct bch_sb *, u64);
/* When can be set: */
enum opt_flags {
- OPT_FS = (1 << 0), /* Filesystem option */
- OPT_DEVICE = (1 << 1), /* Device option */
- OPT_INODE = (1 << 2), /* Inode option */
- OPT_FORMAT = (1 << 3), /* May be specified at format time */
- OPT_MOUNT = (1 << 4), /* May be specified at mount time */
- OPT_RUNTIME = (1 << 5), /* May be specified at runtime */
- OPT_HUMAN_READABLE = (1 << 6),
- OPT_MUST_BE_POW_2 = (1 << 7), /* Must be power of 2 */
- OPT_SB_FIELD_SECTORS = (1 << 8),/* Superblock field is >> 9 of actual value */
- OPT_SB_FIELD_ILOG2 = (1 << 9), /* Superblock field is ilog2 of actual value */
- OPT_HIDDEN = (1 << 10),
+ OPT_FS = BIT(0), /* Filesystem option */
+ OPT_DEVICE = BIT(1), /* Device option */
+ OPT_INODE = BIT(2), /* Inode option */
+ OPT_FORMAT = BIT(3), /* May be specified at format time */
+ OPT_MOUNT = BIT(4), /* May be specified at mount time */
+ OPT_RUNTIME = BIT(5), /* May be specified at runtime */
+ OPT_HUMAN_READABLE = BIT(6),
+ OPT_MUST_BE_POW_2 = BIT(7), /* Must be power of 2 */
+ OPT_SB_FIELD_SECTORS = BIT(8), /* Superblock field is >> 9 of actual value */
+ OPT_SB_FIELD_ILOG2 = BIT(9), /* Superblock field is ilog2 of actual value */
+ OPT_SB_FIELD_ONE_BIAS = BIT(10), /* 0 means default value */
+ OPT_HIDDEN = BIT(11),
};
enum opt_type {
BCH_OPT_BOOL,
BCH_OPT_UINT,
BCH_OPT_STR,
+ BCH_OPT_BITFIELD,
BCH_OPT_FN,
};
@@ -263,6 +265,11 @@ enum fsck_err_opts {
OPT_BOOL(), \
BCH2_NO_SB_OPT, true, \
NULL, "Enable inline data extents") \
+ x(promote_whole_extents, u8, \
+ OPT_FS|OPT_MOUNT|OPT_RUNTIME, \
+ OPT_BOOL(), \
+ BCH_SB_PROMOTE_WHOLE_EXTENTS, true, \
+ NULL, "Promote whole extents, instead of just part being read")\
x(acl, u8, \
OPT_FS|OPT_FORMAT|OPT_MOUNT, \
OPT_BOOL(), \
@@ -366,6 +373,16 @@ enum fsck_err_opts {
OPT_BOOL(), \
BCH2_NO_SB_OPT, false, \
NULL, "Exit recovery immediately prior to journal replay")\
+ x(recovery_passes, u64, \
+ OPT_FS|OPT_MOUNT, \
+ OPT_BITFIELD(bch2_recovery_passes), \
+ BCH2_NO_SB_OPT, 0, \
+ NULL, "Recovery passes to run explicitly") \
+ x(recovery_passes_exclude, u64, \
+ OPT_FS|OPT_MOUNT, \
+ OPT_BITFIELD(bch2_recovery_passes), \
+ BCH2_NO_SB_OPT, 0, \
+ NULL, "Recovery passes to exclude") \
x(recovery_pass_last, u8, \
OPT_FS|OPT_MOUNT, \
OPT_STR_NOLIMIT(bch2_recovery_passes), \
@@ -472,11 +489,16 @@ enum fsck_err_opts {
BCH2_NO_SB_OPT, 0, \
"size", "Size of filesystem on device") \
x(durability, u8, \
- OPT_DEVICE, \
+ OPT_DEVICE|OPT_SB_FIELD_ONE_BIAS, \
OPT_UINT(0, BCH_REPLICAS_MAX), \
BCH2_NO_SB_OPT, 1, \
"n", "Data written to this device will be considered\n"\
"to have already been replicated n times") \
+ x(data_allowed, u8, \
+ OPT_DEVICE, \
+ OPT_BITFIELD(__bch2_data_types), \
+ BCH2_NO_SB_OPT, BIT(BCH_DATA_journal)|BIT(BCH_DATA_btree)|BIT(BCH_DATA_user),\
+ "types", "Allowed data types for this device: journal, btree, and/or user")\
x(btree_node_prefetch, u8, \
OPT_FS|OPT_MOUNT|OPT_RUNTIME, \
OPT_BOOL(), \
@@ -484,6 +506,11 @@ enum fsck_err_opts {
NULL, "BTREE_ITER_prefetch casuse btree nodes to be\n"\
" prefetched sequentially")
+#define BCH_DEV_OPT_SETTERS() \
+ x(discard, BCH_MEMBER_DISCARD) \
+ x(durability, BCH_MEMBER_DURABILITY) \
+ x(data_allowed, BCH_MEMBER_DATA_ALLOWED)
+
struct bch_opts {
#define x(_name, _bits, ...) unsigned _name##_defined:1;
BCH_OPTS()
@@ -563,8 +590,10 @@ void bch2_opt_set_by_id(struct bch_opts *, enum bch_opt_id, u64);
u64 bch2_opt_from_sb(struct bch_sb *, enum bch_opt_id);
int bch2_opts_from_sb(struct bch_opts *, struct bch_sb *);
-void __bch2_opt_set_sb(struct bch_sb *, const struct bch_option *, u64);
-void bch2_opt_set_sb(struct bch_fs *, const struct bch_option *, u64);
+void __bch2_opt_set_sb(struct bch_sb *, int, const struct bch_option *, u64);
+
+struct bch_dev;
+void bch2_opt_set_sb(struct bch_fs *, struct bch_dev *, const struct bch_option *, u64);
int bch2_opt_lookup(const char *);
int bch2_opt_validate(const struct bch_option *, u64, struct printbuf *);
@@ -576,6 +605,10 @@ int bch2_opt_parse(struct bch_fs *, const struct bch_option *,
void bch2_opt_to_text(struct printbuf *, struct bch_fs *, struct bch_sb *,
const struct bch_option *, u64, unsigned);
+void bch2_opts_to_text(struct printbuf *,
+ struct bch_opts,
+ struct bch_fs *, struct bch_sb *,
+ unsigned, unsigned, unsigned);
int bch2_opt_check_may_set(struct bch_fs *, int, u64);
int bch2_opts_check_may_set(struct bch_fs *);
diff --git a/fs/bcachefs/rcu_pending.c b/fs/bcachefs/rcu_pending.c
new file mode 100644
index 000000000000..40a20192eee8
--- /dev/null
+++ b/fs/bcachefs/rcu_pending.c
@@ -0,0 +1,650 @@
+// SPDX-License-Identifier: GPL-2.0
+#define pr_fmt(fmt) "%s() " fmt "\n", __func__
+
+#include <linux/generic-radix-tree.h>
+#include <linux/mm.h>
+#include <linux/percpu.h>
+#include <linux/slab.h>
+#include <linux/srcu.h>
+#include <linux/vmalloc.h>
+
+#include "rcu_pending.h"
+#include "darray.h"
+#include "util.h"
+
+#define static_array_for_each(_a, _i) \
+ for (typeof(&(_a)[0]) _i = _a; \
+ _i < (_a) + ARRAY_SIZE(_a); \
+ _i++)
+
+enum rcu_pending_special {
+ RCU_PENDING_KVFREE = 1,
+ RCU_PENDING_CALL_RCU = 2,
+};
+
+#define RCU_PENDING_KVFREE_FN ((rcu_pending_process_fn) (ulong) RCU_PENDING_KVFREE)
+#define RCU_PENDING_CALL_RCU_FN ((rcu_pending_process_fn) (ulong) RCU_PENDING_CALL_RCU)
+
+static inline unsigned long __get_state_synchronize_rcu(struct srcu_struct *ssp)
+{
+ return ssp
+ ? get_state_synchronize_srcu(ssp)
+ : get_state_synchronize_rcu();
+}
+
+static inline unsigned long __start_poll_synchronize_rcu(struct srcu_struct *ssp)
+{
+ return ssp
+ ? start_poll_synchronize_srcu(ssp)
+ : start_poll_synchronize_rcu();
+}
+
+static inline bool __poll_state_synchronize_rcu(struct srcu_struct *ssp, unsigned long cookie)
+{
+ return ssp
+ ? poll_state_synchronize_srcu(ssp, cookie)
+ : poll_state_synchronize_rcu(cookie);
+}
+
+static inline void __rcu_barrier(struct srcu_struct *ssp)
+{
+ return ssp
+ ? srcu_barrier(ssp)
+ : rcu_barrier();
+}
+
+static inline void __call_rcu(struct srcu_struct *ssp, struct rcu_head *rhp,
+ rcu_callback_t func)
+{
+ if (ssp)
+ call_srcu(ssp, rhp, func);
+ else
+ call_rcu(rhp, func);
+}
+
+struct rcu_pending_seq {
+ /*
+ * We're using a radix tree like a vector - we're just pushing elements
+ * onto the end; we're using a radix tree instead of an actual vector to
+ * avoid reallocation overhead
+ */
+ GENRADIX(struct rcu_head *) objs;
+ size_t nr;
+ struct rcu_head **cursor;
+ unsigned long seq;
+};
+
+struct rcu_pending_list {
+ struct rcu_head *head;
+ struct rcu_head *tail;
+ unsigned long seq;
+};
+
+struct rcu_pending_pcpu {
+ struct rcu_pending *parent;
+ spinlock_t lock;
+ int cpu;
+
+ /*
+ * We can't bound the number of unprocessed gp sequence numbers, and we
+ * can't efficiently merge radix trees for expired grace periods, so we
+ * need darray/vector:
+ */
+ DARRAY_PREALLOCATED(struct rcu_pending_seq, 4) objs;
+
+ /* Third entry is for expired objects: */
+ struct rcu_pending_list lists[NUM_ACTIVE_RCU_POLL_OLDSTATE + 1];
+
+ struct rcu_head cb;
+ bool cb_armed;
+ struct work_struct work;
+};
+
+static bool __rcu_pending_has_pending(struct rcu_pending_pcpu *p)
+{
+ if (p->objs.nr)
+ return true;
+
+ static_array_for_each(p->lists, i)
+ if (i->head)
+ return true;
+
+ return false;
+}
+
+static void rcu_pending_list_merge(struct rcu_pending_list *l1,
+ struct rcu_pending_list *l2)
+{
+#ifdef __KERNEL__
+ if (!l1->head)
+ l1->head = l2->head;
+ else
+ l1->tail->next = l2->head;
+#else
+ if (!l1->head)
+ l1->head = l2->head;
+ else
+ l1->tail->next.next = (void *) l2->head;
+#endif
+
+ l1->tail = l2->tail;
+ l2->head = l2->tail = NULL;
+}
+
+static void rcu_pending_list_add(struct rcu_pending_list *l,
+ struct rcu_head *n)
+{
+#ifdef __KERNEL__
+ if (!l->head)
+ l->head = n;
+ else
+ l->tail->next = n;
+ l->tail = n;
+ n->next = NULL;
+#else
+ if (!l->head)
+ l->head = n;
+ else
+ l->tail->next.next = (void *) n;
+ l->tail = n;
+ n->next.next = NULL;
+#endif
+}
+
+static void merge_expired_lists(struct rcu_pending_pcpu *p)
+{
+ struct rcu_pending_list *expired = &p->lists[NUM_ACTIVE_RCU_POLL_OLDSTATE];
+
+ for (struct rcu_pending_list *i = p->lists; i < expired; i++)
+ if (i->head && __poll_state_synchronize_rcu(p->parent->srcu, i->seq))
+ rcu_pending_list_merge(expired, i);
+}
+
+#ifndef __KERNEL__
+static inline void kfree_bulk(size_t nr, void ** p)
+{
+ while (nr--)
+ kfree(*p);
+}
+
+#define local_irq_save(flags) \
+do { \
+ flags = 0; \
+} while (0)
+#endif
+
+static noinline void __process_finished_items(struct rcu_pending *pending,
+ struct rcu_pending_pcpu *p,
+ unsigned long flags)
+{
+ struct rcu_pending_list *expired = &p->lists[NUM_ACTIVE_RCU_POLL_OLDSTATE];
+ struct rcu_pending_seq objs = {};
+ struct rcu_head *list = NULL;
+
+ if (p->objs.nr &&
+ __poll_state_synchronize_rcu(pending->srcu, p->objs.data[0].seq)) {
+ objs = p->objs.data[0];
+ darray_remove_item(&p->objs, p->objs.data);
+ }
+
+ merge_expired_lists(p);
+
+ list = expired->head;
+ expired->head = expired->tail = NULL;
+
+ spin_unlock_irqrestore(&p->lock, flags);
+
+ switch ((ulong) pending->process) {
+ case RCU_PENDING_KVFREE:
+ for (size_t i = 0; i < objs.nr; ) {
+ size_t nr_this_node = min(GENRADIX_NODE_SIZE / sizeof(void *), objs.nr - i);
+
+ kfree_bulk(nr_this_node, (void **) genradix_ptr(&objs.objs, i));
+ i += nr_this_node;
+ }
+ genradix_free(&objs.objs);
+
+ while (list) {
+ struct rcu_head *obj = list;
+#ifdef __KERNEL__
+ list = obj->next;
+#else
+ list = (void *) obj->next.next;
+#endif
+
+ /*
+ * low bit of pointer indicates whether rcu_head needs
+ * to be freed - kvfree_rcu_mightsleep()
+ */
+ BUILD_BUG_ON(ARCH_SLAB_MINALIGN == 0);
+
+ void *ptr = (void *)(((unsigned long) obj->func) & ~1UL);
+ bool free_head = ((unsigned long) obj->func) & 1UL;
+
+ kvfree(ptr);
+ if (free_head)
+ kfree(obj);
+ }
+
+ break;
+
+ case RCU_PENDING_CALL_RCU:
+ for (size_t i = 0; i < objs.nr; i++) {
+ struct rcu_head *obj = *genradix_ptr(&objs.objs, i);
+ obj->func(obj);
+ }
+ genradix_free(&objs.objs);
+
+ while (list) {
+ struct rcu_head *obj = list;
+#ifdef __KERNEL__
+ list = obj->next;
+#else
+ list = (void *) obj->next.next;
+#endif
+ obj->func(obj);
+ }
+ break;
+
+ default:
+ for (size_t i = 0; i < objs.nr; i++)
+ pending->process(pending, *genradix_ptr(&objs.objs, i));
+ genradix_free(&objs.objs);
+
+ while (list) {
+ struct rcu_head *obj = list;
+#ifdef __KERNEL__
+ list = obj->next;
+#else
+ list = (void *) obj->next.next;
+#endif
+ pending->process(pending, obj);
+ }
+ break;
+ }
+}
+
+static bool process_finished_items(struct rcu_pending *pending,
+ struct rcu_pending_pcpu *p,
+ unsigned long flags)
+{
+ /*
+ * XXX: we should grab the gp seq once and avoid multiple function
+ * calls, this is called from __rcu_pending_enqueue() fastpath in
+ * may_sleep==true mode
+ */
+ if ((p->objs.nr && __poll_state_synchronize_rcu(pending->srcu, p->objs.data[0].seq)) ||
+ (p->lists[0].head && __poll_state_synchronize_rcu(pending->srcu, p->lists[0].seq)) ||
+ (p->lists[1].head && __poll_state_synchronize_rcu(pending->srcu, p->lists[1].seq)) ||
+ p->lists[2].head) {
+ __process_finished_items(pending, p, flags);
+ return true;
+ }
+
+ return false;
+}
+
+static void rcu_pending_work(struct work_struct *work)
+{
+ struct rcu_pending_pcpu *p =
+ container_of(work, struct rcu_pending_pcpu, work);
+ struct rcu_pending *pending = p->parent;
+ unsigned long flags;
+
+ do {
+ spin_lock_irqsave(&p->lock, flags);
+ } while (process_finished_items(pending, p, flags));
+
+ spin_unlock_irqrestore(&p->lock, flags);
+}
+
+static void rcu_pending_rcu_cb(struct rcu_head *rcu)
+{
+ struct rcu_pending_pcpu *p = container_of(rcu, struct rcu_pending_pcpu, cb);
+
+ schedule_work_on(p->cpu, &p->work);
+
+ unsigned long flags;
+ spin_lock_irqsave(&p->lock, flags);
+ if (__rcu_pending_has_pending(p)) {
+ spin_unlock_irqrestore(&p->lock, flags);
+ __call_rcu(p->parent->srcu, &p->cb, rcu_pending_rcu_cb);
+ } else {
+ p->cb_armed = false;
+ spin_unlock_irqrestore(&p->lock, flags);
+ }
+}
+
+static __always_inline struct rcu_pending_seq *
+get_object_radix(struct rcu_pending_pcpu *p, unsigned long seq)
+{
+ darray_for_each_reverse(p->objs, objs)
+ if (objs->seq == seq)
+ return objs;
+
+ if (darray_push_gfp(&p->objs, ((struct rcu_pending_seq) { .seq = seq }), GFP_ATOMIC))
+ return NULL;
+
+ return &darray_last(p->objs);
+}
+
+static noinline bool
+rcu_pending_enqueue_list(struct rcu_pending_pcpu *p, unsigned long seq,
+ struct rcu_head *head, void *ptr,
+ unsigned long *flags)
+{
+ if (ptr) {
+ if (!head) {
+ /*
+ * kvfree_rcu_mightsleep(): we weren't passed an
+ * rcu_head, but we need one: use the low bit of the
+ * ponter to free to flag that the head needs to be
+ * freed as well:
+ */
+ ptr = (void *)(((unsigned long) ptr)|1UL);
+ head = kmalloc(sizeof(*head), __GFP_NOWARN);
+ if (!head) {
+ spin_unlock_irqrestore(&p->lock, *flags);
+ head = kmalloc(sizeof(*head), GFP_KERNEL|__GFP_NOFAIL);
+ /*
+ * dropped lock, did GFP_KERNEL allocation,
+ * check for gp expiration
+ */
+ if (unlikely(__poll_state_synchronize_rcu(p->parent->srcu, seq))) {
+ kvfree(--ptr);
+ kfree(head);
+ spin_lock_irqsave(&p->lock, *flags);
+ return false;
+ }
+ }
+ }
+
+ head->func = ptr;
+ }
+again:
+ for (struct rcu_pending_list *i = p->lists;
+ i < p->lists + NUM_ACTIVE_RCU_POLL_OLDSTATE; i++) {
+ if (i->seq == seq) {
+ rcu_pending_list_add(i, head);
+ return false;
+ }
+ }
+
+ for (struct rcu_pending_list *i = p->lists;
+ i < p->lists + NUM_ACTIVE_RCU_POLL_OLDSTATE; i++) {
+ if (!i->head) {
+ i->seq = seq;
+ rcu_pending_list_add(i, head);
+ return true;
+ }
+ }
+
+ merge_expired_lists(p);
+ goto again;
+}
+
+/*
+ * __rcu_pending_enqueue: enqueue a pending RCU item, to be processed (via
+ * pending->pracess) once grace period elapses.
+ *
+ * Attempt to enqueue items onto a radix tree; if memory allocation fails, fall
+ * back to a linked list.
+ *
+ * - If @ptr is NULL, we're enqueuing an item for a generic @pending with a
+ * process callback
+ *
+ * - If @ptr and @head are both not NULL, we're kvfree_rcu()
+ *
+ * - If @ptr is not NULL and @head is, we're kvfree_rcu_mightsleep()
+ *
+ * - If @may_sleep is true, will do GFP_KERNEL memory allocations and process
+ * expired items.
+ */
+static __always_inline void
+__rcu_pending_enqueue(struct rcu_pending *pending, struct rcu_head *head,
+ void *ptr, bool may_sleep)
+{
+
+ struct rcu_pending_pcpu *p;
+ struct rcu_pending_seq *objs;
+ struct genradix_node *new_node = NULL;
+ unsigned long seq, flags;
+ bool start_gp = false;
+
+ BUG_ON((ptr != NULL) != (pending->process == RCU_PENDING_KVFREE_FN));
+
+ local_irq_save(flags);
+ p = this_cpu_ptr(pending->p);
+ spin_lock(&p->lock);
+ seq = __get_state_synchronize_rcu(pending->srcu);
+restart:
+ if (may_sleep &&
+ unlikely(process_finished_items(pending, p, flags)))
+ goto check_expired;
+
+ /*
+ * In kvfree_rcu() mode, the radix tree is only for slab pointers so
+ * that we can do kfree_bulk() - vmalloc pointers always use the linked
+ * list:
+ */
+ if (ptr && unlikely(is_vmalloc_addr(ptr)))
+ goto list_add;
+
+ objs = get_object_radix(p, seq);
+ if (unlikely(!objs))
+ goto list_add;
+
+ if (unlikely(!objs->cursor)) {
+ /*
+ * New radix tree nodes must be added under @p->lock because the
+ * tree root is in a darray that can be resized (typically,
+ * genradix supports concurrent unlocked allocation of new
+ * nodes) - hence preallocation and the retry loop:
+ */
+ objs->cursor = genradix_ptr_alloc_preallocated_inlined(&objs->objs,
+ objs->nr, &new_node, GFP_ATOMIC|__GFP_NOWARN);
+ if (unlikely(!objs->cursor)) {
+ if (may_sleep) {
+ spin_unlock_irqrestore(&p->lock, flags);
+
+ gfp_t gfp = GFP_KERNEL;
+ if (!head)
+ gfp |= __GFP_NOFAIL;
+
+ new_node = genradix_alloc_node(gfp);
+ if (!new_node)
+ may_sleep = false;
+ goto check_expired;
+ }
+list_add:
+ start_gp = rcu_pending_enqueue_list(p, seq, head, ptr, &flags);
+ goto start_gp;
+ }
+ }
+
+ *objs->cursor++ = ptr ?: head;
+ /* zero cursor if we hit the end of a radix tree node: */
+ if (!(((ulong) objs->cursor) & (GENRADIX_NODE_SIZE - 1)))
+ objs->cursor = NULL;
+ start_gp = !objs->nr;
+ objs->nr++;
+start_gp:
+ if (unlikely(start_gp)) {
+ /*
+ * We only have one callback (ideally, we would have one for
+ * every outstanding graceperiod) - so if our callback is
+ * already in flight, we may still have to start a grace period
+ * (since we used get_state() above, not start_poll())
+ */
+ if (!p->cb_armed) {
+ p->cb_armed = true;
+ __call_rcu(pending->srcu, &p->cb, rcu_pending_rcu_cb);
+ } else {
+ __start_poll_synchronize_rcu(pending->srcu);
+ }
+ }
+ spin_unlock_irqrestore(&p->lock, flags);
+free_node:
+ if (new_node)
+ genradix_free_node(new_node);
+ return;
+check_expired:
+ if (unlikely(__poll_state_synchronize_rcu(pending->srcu, seq))) {
+ switch ((ulong) pending->process) {
+ case RCU_PENDING_KVFREE:
+ kvfree(ptr);
+ break;
+ case RCU_PENDING_CALL_RCU:
+ head->func(head);
+ break;
+ default:
+ pending->process(pending, head);
+ break;
+ }
+ goto free_node;
+ }
+
+ local_irq_save(flags);
+ p = this_cpu_ptr(pending->p);
+ spin_lock(&p->lock);
+ goto restart;
+}
+
+void rcu_pending_enqueue(struct rcu_pending *pending, struct rcu_head *obj)
+{
+ __rcu_pending_enqueue(pending, obj, NULL, true);
+}
+
+static struct rcu_head *rcu_pending_pcpu_dequeue(struct rcu_pending_pcpu *p)
+{
+ struct rcu_head *ret = NULL;
+
+ spin_lock_irq(&p->lock);
+ darray_for_each(p->objs, objs)
+ if (objs->nr) {
+ ret = *genradix_ptr(&objs->objs, --objs->nr);
+ objs->cursor = NULL;
+ if (!objs->nr)
+ genradix_free(&objs->objs);
+ goto out;
+ }
+
+ static_array_for_each(p->lists, i)
+ if (i->head) {
+ ret = i->head;
+#ifdef __KERNEL__
+ i->head = ret->next;
+#else
+ i->head = (void *) ret->next.next;
+#endif
+ if (!i->head)
+ i->tail = NULL;
+ goto out;
+ }
+out:
+ spin_unlock_irq(&p->lock);
+
+ return ret;
+}
+
+struct rcu_head *rcu_pending_dequeue(struct rcu_pending *pending)
+{
+ return rcu_pending_pcpu_dequeue(raw_cpu_ptr(pending->p));
+}
+
+struct rcu_head *rcu_pending_dequeue_from_all(struct rcu_pending *pending)
+{
+ struct rcu_head *ret = rcu_pending_dequeue(pending);
+
+ if (ret)
+ return ret;
+
+ int cpu;
+ for_each_possible_cpu(cpu) {
+ ret = rcu_pending_pcpu_dequeue(per_cpu_ptr(pending->p, cpu));
+ if (ret)
+ break;
+ }
+ return ret;
+}
+
+static bool rcu_pending_has_pending_or_armed(struct rcu_pending *pending)
+{
+ int cpu;
+ for_each_possible_cpu(cpu) {
+ struct rcu_pending_pcpu *p = per_cpu_ptr(pending->p, cpu);
+ spin_lock_irq(&p->lock);
+ if (__rcu_pending_has_pending(p) || p->cb_armed) {
+ spin_unlock_irq(&p->lock);
+ return true;
+ }
+ spin_unlock_irq(&p->lock);
+ }
+
+ return false;
+}
+
+void rcu_pending_exit(struct rcu_pending *pending)
+{
+ int cpu;
+
+ if (!pending->p)
+ return;
+
+ while (rcu_pending_has_pending_or_armed(pending)) {
+ __rcu_barrier(pending->srcu);
+
+ for_each_possible_cpu(cpu) {
+ struct rcu_pending_pcpu *p = per_cpu_ptr(pending->p, cpu);
+ flush_work(&p->work);
+ }
+ }
+
+ for_each_possible_cpu(cpu) {
+ struct rcu_pending_pcpu *p = per_cpu_ptr(pending->p, cpu);
+ flush_work(&p->work);
+ }
+
+ for_each_possible_cpu(cpu) {
+ struct rcu_pending_pcpu *p = per_cpu_ptr(pending->p, cpu);
+
+ static_array_for_each(p->lists, i)
+ WARN_ON(i->head);
+ WARN_ON(p->objs.nr);
+ darray_exit(&p->objs);
+ }
+ free_percpu(pending->p);
+}
+
+/**
+ * rcu_pending_init: - initialize a rcu_pending
+ *
+ * @pending: Object to init
+ * @srcu: May optionally be used with an srcu_struct; if NULL, uses normal
+ * RCU flavor
+ * @process: Callback function invoked on objects once their RCU barriers
+ * have completed; if NULL, kvfree() is used.
+ */
+int rcu_pending_init(struct rcu_pending *pending,
+ struct srcu_struct *srcu,
+ rcu_pending_process_fn process)
+{
+ pending->p = alloc_percpu(struct rcu_pending_pcpu);
+ if (!pending->p)
+ return -ENOMEM;
+
+ int cpu;
+ for_each_possible_cpu(cpu) {
+ struct rcu_pending_pcpu *p = per_cpu_ptr(pending->p, cpu);
+ p->parent = pending;
+ p->cpu = cpu;
+ spin_lock_init(&p->lock);
+ darray_init(&p->objs);
+ INIT_WORK(&p->work, rcu_pending_work);
+ }
+
+ pending->srcu = srcu;
+ pending->process = process;
+
+ return 0;
+}
diff --git a/fs/bcachefs/rcu_pending.h b/fs/bcachefs/rcu_pending.h
new file mode 100644
index 000000000000..71a2f4ddaade
--- /dev/null
+++ b/fs/bcachefs/rcu_pending.h
@@ -0,0 +1,27 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _LINUX_RCU_PENDING_H
+#define _LINUX_RCU_PENDING_H
+
+#include <linux/rcupdate.h>
+
+struct rcu_pending;
+typedef void (*rcu_pending_process_fn)(struct rcu_pending *, struct rcu_head *);
+
+struct rcu_pending_pcpu;
+
+struct rcu_pending {
+ struct rcu_pending_pcpu __percpu *p;
+ struct srcu_struct *srcu;
+ rcu_pending_process_fn process;
+};
+
+void rcu_pending_enqueue(struct rcu_pending *pending, struct rcu_head *obj);
+struct rcu_head *rcu_pending_dequeue(struct rcu_pending *pending);
+struct rcu_head *rcu_pending_dequeue_from_all(struct rcu_pending *pending);
+
+void rcu_pending_exit(struct rcu_pending *pending);
+int rcu_pending_init(struct rcu_pending *pending,
+ struct srcu_struct *srcu,
+ rcu_pending_process_fn process);
+
+#endif /* _LINUX_RCU_PENDING_H */
diff --git a/fs/bcachefs/rebalance.c b/fs/bcachefs/rebalance.c
index cf81e5128c3a..2d299a37cf07 100644
--- a/fs/bcachefs/rebalance.c
+++ b/fs/bcachefs/rebalance.c
@@ -13,6 +13,7 @@
#include "errcode.h"
#include "error.h"
#include "inode.h"
+#include "io_write.h"
#include "move.h"
#include "rebalance.h"
#include "subvolume.h"
@@ -156,6 +157,7 @@ static struct bkey_s_c next_rebalance_extent(struct btree_trans *trans,
data_opts->rewrite_ptrs =
bch2_bkey_ptrs_need_rebalance(c, k, r->target, r->compression);
data_opts->target = r->target;
+ data_opts->write_flags |= BCH_WRITE_ONLY_SPECIFIED_DEVS;
if (!data_opts->rewrite_ptrs) {
/*
@@ -263,6 +265,7 @@ static bool rebalance_pred(struct bch_fs *c, void *arg,
data_opts->rewrite_ptrs = bch2_bkey_ptrs_need_rebalance(c, k, target, compression);
data_opts->target = target;
+ data_opts->write_flags |= BCH_WRITE_ONLY_SPECIFIED_DEVS;
return data_opts->rewrite_ptrs != 0;
}
diff --git a/fs/bcachefs/recovery.c b/fs/bcachefs/recovery.c
index 36de1c6fe8c3..6db72d3bad7d 100644
--- a/fs/bcachefs/recovery.c
+++ b/fs/bcachefs/recovery.c
@@ -97,7 +97,7 @@ static void bch2_reconstruct_alloc(struct bch_fs *c)
bch2_write_super(c);
mutex_unlock(&c->sb_lock);
- c->recovery_passes_explicit |= bch2_recovery_passes_from_stable(le64_to_cpu(ext->recovery_passes_required[0]));
+ c->opts.recovery_passes |= bch2_recovery_passes_from_stable(le64_to_cpu(ext->recovery_passes_required[0]));
bch2_shoot_down_journal_keys(c, BTREE_ID_alloc,
@@ -151,7 +151,7 @@ static int bch2_journal_replay_accounting_key(struct btree_trans *trans,
struct bkey_s_c old = bch2_btree_path_peek_slot(btree_iter_path(trans, &iter), &u);
/* Has this delta already been applied to the btree? */
- if (bversion_cmp(old.k->version, k->k->k.version) >= 0) {
+ if (bversion_cmp(old.k->bversion, k->k->k.bversion) >= 0) {
ret = 0;
goto out;
}
@@ -525,17 +525,17 @@ static int read_btree_roots(struct bch_fs *c)
"error reading btree root %s l=%u: %s",
bch2_btree_id_str(i), r->level, bch2_err_str(ret))) {
if (btree_id_is_alloc(i)) {
- c->recovery_passes_explicit |= BIT_ULL(BCH_RECOVERY_PASS_check_allocations);
- c->recovery_passes_explicit |= BIT_ULL(BCH_RECOVERY_PASS_check_alloc_info);
- c->recovery_passes_explicit |= BIT_ULL(BCH_RECOVERY_PASS_check_lrus);
- c->recovery_passes_explicit |= BIT_ULL(BCH_RECOVERY_PASS_check_extents_to_backpointers);
- c->recovery_passes_explicit |= BIT_ULL(BCH_RECOVERY_PASS_check_alloc_to_lru_refs);
+ c->opts.recovery_passes |= BIT_ULL(BCH_RECOVERY_PASS_check_allocations);
+ c->opts.recovery_passes |= BIT_ULL(BCH_RECOVERY_PASS_check_alloc_info);
+ c->opts.recovery_passes |= BIT_ULL(BCH_RECOVERY_PASS_check_lrus);
+ c->opts.recovery_passes |= BIT_ULL(BCH_RECOVERY_PASS_check_extents_to_backpointers);
+ c->opts.recovery_passes |= BIT_ULL(BCH_RECOVERY_PASS_check_alloc_to_lru_refs);
c->sb.compat &= ~(1ULL << BCH_COMPAT_alloc_info);
r->error = 0;
- } else if (!(c->recovery_passes_explicit & BIT_ULL(BCH_RECOVERY_PASS_scan_for_btree_nodes))) {
+ } else if (!(c->opts.recovery_passes & BIT_ULL(BCH_RECOVERY_PASS_scan_for_btree_nodes))) {
bch_info(c, "will run btree node scan");
- c->recovery_passes_explicit |= BIT_ULL(BCH_RECOVERY_PASS_scan_for_btree_nodes);
- c->recovery_passes_explicit |= BIT_ULL(BCH_RECOVERY_PASS_check_topology);
+ c->opts.recovery_passes |= BIT_ULL(BCH_RECOVERY_PASS_scan_for_btree_nodes);
+ c->opts.recovery_passes |= BIT_ULL(BCH_RECOVERY_PASS_check_topology);
}
ret = 0;
@@ -706,17 +706,19 @@ int bch2_fs_recovery(struct bch_fs *c)
if (check_version_upgrade(c))
write_sb = true;
- c->recovery_passes_explicit |= bch2_recovery_passes_from_stable(le64_to_cpu(ext->recovery_passes_required[0]));
+ c->opts.recovery_passes |= bch2_recovery_passes_from_stable(le64_to_cpu(ext->recovery_passes_required[0]));
if (write_sb)
bch2_write_super(c);
mutex_unlock(&c->sb_lock);
if (c->opts.fsck && IS_ENABLED(CONFIG_BCACHEFS_DEBUG))
- c->recovery_passes_explicit |= BIT_ULL(BCH_RECOVERY_PASS_check_topology);
+ c->opts.recovery_passes |= BIT_ULL(BCH_RECOVERY_PASS_check_topology);
if (c->opts.fsck)
set_bit(BCH_FS_fsck_running, &c->flags);
+ if (c->sb.clean)
+ set_bit(BCH_FS_clean_recovery, &c->flags);
ret = bch2_blacklist_table_initialize(c);
if (ret) {
@@ -862,6 +864,9 @@ use_clean:
clear_bit(BCH_FS_fsck_running, &c->flags);
+ /* in case we don't run journal replay, i.e. norecovery mode */
+ set_bit(BCH_FS_accounting_replay_done, &c->flags);
+
/* fsync if we fixed errors */
if (test_bit(BCH_FS_errors_fixed, &c->flags) &&
bch2_write_ref_tryget(c, BCH_WRITE_REF_fsync)) {
diff --git a/fs/bcachefs/recovery_passes.c b/fs/bcachefs/recovery_passes.c
index 73339a0a3111..735b8adc8f9d 100644
--- a/fs/bcachefs/recovery_passes.c
+++ b/fs/bcachefs/recovery_passes.c
@@ -40,7 +40,7 @@ static int bch2_set_may_go_rw(struct bch_fs *c)
set_bit(BCH_FS_may_go_rw, &c->flags);
- if (keys->nr || c->opts.fsck || !c->sb.clean || c->recovery_passes_explicit)
+ if (keys->nr || c->opts.fsck || !c->sb.clean || c->opts.recovery_passes)
return bch2_fs_read_write_early(c);
return 0;
}
@@ -97,14 +97,14 @@ u64 bch2_recovery_passes_from_stable(u64 v)
int bch2_run_explicit_recovery_pass(struct bch_fs *c,
enum bch_recovery_pass pass)
{
- if (c->recovery_passes_explicit & BIT_ULL(pass))
+ if (c->opts.recovery_passes & BIT_ULL(pass))
return 0;
bch_info(c, "running explicit recovery pass %s (%u), currently at %s (%u)",
bch2_recovery_passes[pass], pass,
bch2_recovery_passes[c->curr_recovery_pass], c->curr_recovery_pass);
- c->recovery_passes_explicit |= BIT_ULL(pass);
+ c->opts.recovery_passes |= BIT_ULL(pass);
if (c->curr_recovery_pass >= pass) {
c->curr_recovery_pass = pass;
@@ -161,7 +161,9 @@ static bool should_run_recovery_pass(struct bch_fs *c, enum bch_recovery_pass pa
{
struct recovery_pass_fn *p = recovery_pass_fns + pass;
- if (c->recovery_passes_explicit & BIT_ULL(pass))
+ if (c->opts.recovery_passes_exclude & BIT_ULL(pass))
+ return false;
+ if (c->opts.recovery_passes & BIT_ULL(pass))
return true;
if ((p->when & PASS_FSCK) && c->opts.fsck)
return true;
diff --git a/fs/bcachefs/recovery_passes_types.h b/fs/bcachefs/recovery_passes_types.h
index 8c7dee5983d2..50406ce0e4ef 100644
--- a/fs/bcachefs/recovery_passes_types.h
+++ b/fs/bcachefs/recovery_passes_types.h
@@ -50,7 +50,7 @@
x(check_directory_structure, 30, PASS_ONLINE|PASS_FSCK) \
x(check_nlinks, 31, PASS_FSCK) \
x(resume_logged_ops, 23, PASS_ALWAYS) \
- x(delete_dead_inodes, 32, PASS_FSCK|PASS_UNCLEAN) \
+ x(delete_dead_inodes, 32, PASS_ALWAYS) \
x(fix_reflink_p, 33, 0) \
x(set_fs_needs_rebalance, 34, 0) \
diff --git a/fs/bcachefs/reflink.c b/fs/bcachefs/reflink.c
index e59c0abb4772..f457925fa362 100644
--- a/fs/bcachefs/reflink.c
+++ b/fs/bcachefs/reflink.c
@@ -367,7 +367,7 @@ static int bch2_make_extent_indirect(struct btree_trans *trans,
r_v->k.type = bkey_type_to_indirect(&orig->k);
r_v->k.p = reflink_iter.pos;
bch2_key_resize(&r_v->k, orig->k.size);
- r_v->k.version = orig->k.version;
+ r_v->k.bversion = orig->k.bversion;
set_bkey_val_bytes(&r_v->k, sizeof(__le64) + bkey_val_bytes(&orig->k));
diff --git a/fs/bcachefs/replicas.c b/fs/bcachefs/replicas.c
index 1f34c92a6d11..bcb3276747e0 100644
--- a/fs/bcachefs/replicas.c
+++ b/fs/bcachefs/replicas.c
@@ -66,9 +66,9 @@ void bch2_replicas_entry_to_text(struct printbuf *out,
prt_printf(out, "]");
}
-int bch2_replicas_entry_validate(struct bch_replicas_entry_v1 *r,
- struct bch_sb *sb,
- struct printbuf *err)
+static int bch2_replicas_entry_validate_locked(struct bch_replicas_entry_v1 *r,
+ struct bch_sb *sb,
+ struct printbuf *err)
{
if (!r->nr_devs) {
prt_printf(err, "no devices in entry ");
@@ -94,6 +94,16 @@ bad:
return -BCH_ERR_invalid_replicas_entry;
}
+int bch2_replicas_entry_validate(struct bch_replicas_entry_v1 *r,
+ struct bch_fs *c,
+ struct printbuf *err)
+{
+ mutex_lock(&c->sb_lock);
+ int ret = bch2_replicas_entry_validate_locked(r, c->disk_sb.sb, err);
+ mutex_unlock(&c->sb_lock);
+ return ret;
+}
+
void bch2_cpu_replicas_to_text(struct printbuf *out,
struct bch_replicas_cpu *r)
{
@@ -123,7 +133,7 @@ static void extent_to_replicas(struct bkey_s_c k,
continue;
if (!p.has_ec)
- r->devs[r->nr_devs++] = p.ptr.dev;
+ replicas_entry_add_dev(r, p.ptr.dev);
else
r->nr_required = 0;
}
@@ -140,7 +150,7 @@ static void stripe_to_replicas(struct bkey_s_c k,
for (ptr = s.v->ptrs;
ptr < s.v->ptrs + s.v->nr_blocks;
ptr++)
- r->devs[r->nr_devs++] = ptr->dev;
+ replicas_entry_add_dev(r, ptr->dev);
}
void bch2_bkey_to_replicas(struct bch_replicas_entry_v1 *e,
@@ -181,7 +191,7 @@ void bch2_devlist_to_replicas(struct bch_replicas_entry_v1 *e,
e->nr_required = 1;
darray_for_each(devs, i)
- e->devs[e->nr_devs++] = *i;
+ replicas_entry_add_dev(e, *i);
bch2_replicas_entry_sort(e);
}
@@ -676,7 +686,7 @@ static int bch2_cpu_replicas_validate(struct bch_replicas_cpu *cpu_r,
struct bch_replicas_entry_v1 *e =
cpu_replicas_entry(cpu_r, i);
- int ret = bch2_replicas_entry_validate(e, sb, err);
+ int ret = bch2_replicas_entry_validate_locked(e, sb, err);
if (ret)
return ret;
@@ -795,12 +805,12 @@ bool bch2_have_enough_devs(struct bch_fs *c, struct bch_devs_mask devs,
for (unsigned i = 0; i < e->nr_devs; i++) {
nr_online += test_bit(e->devs[i], devs.d);
- struct bch_dev *ca = bch2_dev_rcu(c, e->devs[i]);
+ struct bch_dev *ca = bch2_dev_rcu_noerror(c, e->devs[i]);
nr_failed += !ca || ca->mi.state == BCH_MEMBER_STATE_failed;
}
rcu_read_unlock();
- if (nr_failed == e->nr_devs)
+ if (nr_online + nr_failed == e->nr_devs)
continue;
if (nr_online < e->nr_required)
diff --git a/fs/bcachefs/replicas.h b/fs/bcachefs/replicas.h
index 622482559c3d..5aba2c1ce133 100644
--- a/fs/bcachefs/replicas.h
+++ b/fs/bcachefs/replicas.h
@@ -10,7 +10,7 @@ void bch2_replicas_entry_sort(struct bch_replicas_entry_v1 *);
void bch2_replicas_entry_to_text(struct printbuf *,
struct bch_replicas_entry_v1 *);
int bch2_replicas_entry_validate(struct bch_replicas_entry_v1 *,
- struct bch_sb *, struct printbuf *);
+ struct bch_fs *, struct printbuf *);
void bch2_cpu_replicas_to_text(struct printbuf *, struct bch_replicas_cpu *);
static inline struct bch_replicas_entry_v1 *
diff --git a/fs/bcachefs/replicas_format.h b/fs/bcachefs/replicas_format.h
index b97208195d06..b7eff904acdb 100644
--- a/fs/bcachefs/replicas_format.h
+++ b/fs/bcachefs/replicas_format.h
@@ -5,7 +5,7 @@
struct bch_replicas_entry_v0 {
__u8 data_type;
__u8 nr_devs;
- __u8 devs[];
+ __u8 devs[] __counted_by(nr_devs);
} __packed;
struct bch_sb_field_replicas_v0 {
@@ -17,7 +17,7 @@ struct bch_replicas_entry_v1 {
__u8 data_type;
__u8 nr_devs;
__u8 nr_required;
- __u8 devs[];
+ __u8 devs[] __counted_by(nr_devs);
} __packed;
struct bch_sb_field_replicas {
@@ -28,4 +28,9 @@ struct bch_sb_field_replicas {
#define replicas_entry_bytes(_i) \
(offsetof(typeof(*(_i)), devs) + (_i)->nr_devs)
+#define replicas_entry_add_dev(e, d) ({ \
+ (e)->nr_devs++; \
+ (e)->devs[(e)->nr_devs - 1] = (d); \
+})
+
#endif /* _BCACHEFS_REPLICAS_FORMAT_H */
diff --git a/fs/bcachefs/sb-clean.c b/fs/bcachefs/sb-clean.c
index c57d42bb8d1b..005275281804 100644
--- a/fs/bcachefs/sb-clean.c
+++ b/fs/bcachefs/sb-clean.c
@@ -155,7 +155,7 @@ struct bch_sb_field_clean *bch2_read_superblock_clean(struct bch_fs *c)
SET_BCH_SB_CLEAN(c->disk_sb.sb, false);
c->sb.clean = false;
mutex_unlock(&c->sb_lock);
- return NULL;
+ return ERR_PTR(-BCH_ERR_invalid_sb_clean);
}
clean = kmemdup(sb_clean, vstruct_bytes(&sb_clean->field),
@@ -167,6 +167,7 @@ struct bch_sb_field_clean *bch2_read_superblock_clean(struct bch_fs *c)
ret = bch2_sb_clean_validate_late(c, clean, READ);
if (ret) {
+ kfree(clean);
mutex_unlock(&c->sb_lock);
return ERR_PTR(ret);
}
diff --git a/fs/bcachefs/sb-downgrade.c b/fs/bcachefs/sb-downgrade.c
index c7e4cdd3f6a5..5102059a0f1d 100644
--- a/fs/bcachefs/sb-downgrade.c
+++ b/fs/bcachefs/sb-downgrade.c
@@ -312,8 +312,7 @@ static void bch2_sb_downgrade_to_text(struct printbuf *out, struct bch_sb *sb,
if (!first)
prt_char(out, ',');
first = false;
- unsigned e = le16_to_cpu(i->errors[j]);
- prt_str(out, e < BCH_SB_ERR_MAX ? bch2_sb_error_strs[e] : "(unknown)");
+ bch2_sb_error_id_to_text(out, le16_to_cpu(i->errors[j]));
}
prt_newline(out);
}
@@ -353,7 +352,9 @@ int bch2_sb_downgrade_update(struct bch_fs *c)
for (unsigned i = 0; i < src->nr_errors; i++)
dst->errors[i] = cpu_to_le16(src->errors[i]);
- downgrade_table_extra(c, &table);
+ ret = downgrade_table_extra(c, &table);
+ if (ret)
+ goto out;
if (!dst->recovery_passes[0] &&
!dst->recovery_passes[1] &&
@@ -399,7 +400,7 @@ void bch2_sb_set_downgrade(struct bch_fs *c, unsigned new_minor, unsigned old_mi
for (unsigned j = 0; j < le16_to_cpu(i->nr_errors); j++) {
unsigned e = le16_to_cpu(i->errors[j]);
- if (e < BCH_SB_ERR_MAX)
+ if (e < BCH_FSCK_ERR_MAX)
__set_bit(e, c->sb.errors_silent);
if (e < sizeof(ext->errors_silent) * 8)
__set_bit_le64(e, ext->errors_silent);
diff --git a/fs/bcachefs/sb-errors.c b/fs/bcachefs/sb-errors.c
index c1270d790e43..013a96883b4e 100644
--- a/fs/bcachefs/sb-errors.c
+++ b/fs/bcachefs/sb-errors.c
@@ -7,12 +7,12 @@
const char * const bch2_sb_error_strs[] = {
#define x(t, n, ...) [n] = #t,
BCH_SB_ERRS()
- NULL
+#undef x
};
-static void bch2_sb_error_id_to_text(struct printbuf *out, enum bch_sb_error_id id)
+void bch2_sb_error_id_to_text(struct printbuf *out, enum bch_sb_error_id id)
{
- if (id < BCH_SB_ERR_MAX)
+ if (id < BCH_FSCK_ERR_MAX)
prt_str(out, bch2_sb_error_strs[id]);
else
prt_printf(out, "(unknown error %u)", id);
diff --git a/fs/bcachefs/sb-errors.h b/fs/bcachefs/sb-errors.h
index 8889001e7db4..b2357b8e6107 100644
--- a/fs/bcachefs/sb-errors.h
+++ b/fs/bcachefs/sb-errors.h
@@ -6,6 +6,8 @@
extern const char * const bch2_sb_error_strs[];
+void bch2_sb_error_id_to_text(struct printbuf *, enum bch_sb_error_id);
+
extern const struct bch_sb_field_ops bch_sb_field_ops_errors;
void bch2_sb_error_count(struct bch_fs *, enum bch_sb_error_id);
diff --git a/fs/bcachefs/sb-errors_format.h b/fs/bcachefs/sb-errors_format.h
index f0c14702f9e6..ed5dca5e1161 100644
--- a/fs/bcachefs/sb-errors_format.h
+++ b/fs/bcachefs/sb-errors_format.h
@@ -210,22 +210,23 @@ enum bch_fsck_flags {
x(inode_snapshot_mismatch, 196, 0) \
x(inode_unlinked_but_clean, 197, 0) \
x(inode_unlinked_but_nlink_nonzero, 198, 0) \
+ x(inode_unlinked_and_not_open, 281, 0) \
x(inode_checksum_type_invalid, 199, 0) \
x(inode_compression_type_invalid, 200, 0) \
x(inode_subvol_root_but_not_dir, 201, 0) \
- x(inode_i_size_dirty_but_clean, 202, 0) \
- x(inode_i_sectors_dirty_but_clean, 203, 0) \
- x(inode_i_sectors_wrong, 204, 0) \
- x(inode_dir_wrong_nlink, 205, 0) \
- x(inode_dir_multiple_links, 206, 0) \
- x(inode_multiple_links_but_nlink_0, 207, 0) \
- x(inode_wrong_backpointer, 208, 0) \
- x(inode_wrong_nlink, 209, 0) \
- x(inode_unreachable, 210, 0) \
- x(deleted_inode_but_clean, 211, 0) \
- x(deleted_inode_missing, 212, 0) \
- x(deleted_inode_is_dir, 213, 0) \
- x(deleted_inode_not_unlinked, 214, 0) \
+ x(inode_i_size_dirty_but_clean, 202, FSCK_AUTOFIX) \
+ x(inode_i_sectors_dirty_but_clean, 203, FSCK_AUTOFIX) \
+ x(inode_i_sectors_wrong, 204, FSCK_AUTOFIX) \
+ x(inode_dir_wrong_nlink, 205, FSCK_AUTOFIX) \
+ x(inode_dir_multiple_links, 206, FSCK_AUTOFIX) \
+ x(inode_multiple_links_but_nlink_0, 207, FSCK_AUTOFIX) \
+ x(inode_wrong_backpointer, 208, FSCK_AUTOFIX) \
+ x(inode_wrong_nlink, 209, FSCK_AUTOFIX) \
+ x(inode_unreachable, 210, FSCK_AUTOFIX) \
+ x(deleted_inode_but_clean, 211, FSCK_AUTOFIX) \
+ x(deleted_inode_missing, 212, FSCK_AUTOFIX) \
+ x(deleted_inode_is_dir, 213, FSCK_AUTOFIX) \
+ x(deleted_inode_not_unlinked, 214, FSCK_AUTOFIX) \
x(extent_overlapping, 215, 0) \
x(key_in_missing_inode, 216, 0) \
x(key_in_wrong_inode_type, 217, 0) \
@@ -255,7 +256,7 @@ enum bch_fsck_flags {
x(dir_loop, 241, 0) \
x(hash_table_key_duplicate, 242, 0) \
x(hash_table_key_wrong_offset, 243, 0) \
- x(unlinked_inode_not_on_deleted_list, 244, 0) \
+ x(unlinked_inode_not_on_deleted_list, 244, FSCK_AUTOFIX) \
x(reflink_p_front_pad_bad, 245, 0) \
x(journal_entry_dup_same_device, 246, 0) \
x(inode_bi_subvol_missing, 247, 0) \
@@ -270,7 +271,7 @@ enum bch_fsck_flags {
x(subvol_children_not_set, 256, 0) \
x(subvol_children_bad, 257, 0) \
x(subvol_loop, 258, 0) \
- x(subvol_unreachable, 259, 0) \
+ x(subvol_unreachable, 259, FSCK_AUTOFIX) \
x(btree_node_bkey_bad_u64s, 260, 0) \
x(btree_node_topology_empty_interior_node, 261, 0) \
x(btree_ptr_v2_min_key_bad, 262, 0) \
@@ -282,8 +283,8 @@ enum bch_fsck_flags {
x(btree_ptr_v2_written_0, 268, 0) \
x(subvol_snapshot_bad, 269, 0) \
x(subvol_inode_bad, 270, 0) \
- x(alloc_key_stripe_sectors_wrong, 271, 0) \
- x(accounting_mismatch, 272, 0) \
+ x(alloc_key_stripe_sectors_wrong, 271, FSCK_AUTOFIX) \
+ x(accounting_mismatch, 272, FSCK_AUTOFIX) \
x(accounting_replicas_not_marked, 273, 0) \
x(invalid_btree_id, 274, 0) \
x(alloc_key_io_time_bad, 275, 0) \
@@ -292,12 +293,14 @@ enum bch_fsck_flags {
x(accounting_key_replicas_nr_devs_0, 278, FSCK_AUTOFIX) \
x(accounting_key_replicas_nr_required_bad, 279, FSCK_AUTOFIX) \
x(accounting_key_replicas_devs_unsorted, 280, FSCK_AUTOFIX) \
+ x(accounting_key_version_0, 282, FSCK_AUTOFIX) \
+ x(logged_op_but_clean, 283, FSCK_AUTOFIX) \
+ x(MAX, 284, 0)
enum bch_sb_error_id {
#define x(t, n, ...) BCH_FSCK_ERR_##t = n,
BCH_SB_ERRS()
#undef x
- BCH_SB_ERR_MAX
};
struct bch_sb_field_errors {
diff --git a/fs/bcachefs/sb-members.c b/fs/bcachefs/sb-members.c
index 4b765422dd77..02bcde3c1b02 100644
--- a/fs/bcachefs/sb-members.c
+++ b/fs/bcachefs/sb-members.c
@@ -465,3 +465,60 @@ void bch2_dev_btree_bitmap_mark(struct bch_fs *c, struct bkey_s_c k)
__bch2_dev_btree_bitmap_mark(mi, ptr->dev, ptr->offset, btree_sectors(c));
}
}
+
+unsigned bch2_sb_nr_devices(const struct bch_sb *sb)
+{
+ unsigned nr = 0;
+
+ for (unsigned i = 0; i < sb->nr_devices; i++)
+ nr += bch2_member_exists((struct bch_sb *) sb, i);
+ return nr;
+}
+
+int bch2_sb_member_alloc(struct bch_fs *c)
+{
+ unsigned dev_idx = c->sb.nr_devices;
+ struct bch_sb_field_members_v2 *mi;
+ unsigned nr_devices;
+ unsigned u64s;
+ int best = -1;
+ u64 best_last_mount = 0;
+
+ if (dev_idx < BCH_SB_MEMBERS_MAX)
+ goto have_slot;
+
+ for (dev_idx = 0; dev_idx < BCH_SB_MEMBERS_MAX; dev_idx++) {
+ /* eventually BCH_SB_MEMBERS_MAX will be raised */
+ if (dev_idx == BCH_SB_MEMBER_INVALID)
+ continue;
+
+ struct bch_member m = bch2_sb_member_get(c->disk_sb.sb, dev_idx);
+ if (bch2_member_alive(&m))
+ continue;
+
+ u64 last_mount = le64_to_cpu(m.last_mount);
+ if (best < 0 || last_mount < best_last_mount) {
+ best = dev_idx;
+ best_last_mount = last_mount;
+ }
+ }
+ if (best >= 0) {
+ dev_idx = best;
+ goto have_slot;
+ }
+
+ return -BCH_ERR_ENOSPC_sb_members;
+have_slot:
+ nr_devices = max_t(unsigned, dev_idx + 1, c->sb.nr_devices);
+
+ mi = bch2_sb_field_get(c->disk_sb.sb, members_v2);
+ u64s = DIV_ROUND_UP(sizeof(struct bch_sb_field_members_v2) +
+ le16_to_cpu(mi->member_bytes) * nr_devices, sizeof(u64));
+
+ mi = bch2_sb_field_resize(&c->disk_sb, members_v2, u64s);
+ if (!mi)
+ return -BCH_ERR_ENOSPC_sb_members;
+
+ c->disk_sb.sb->nr_devices = nr_devices;
+ return dev_idx;
+}
diff --git a/fs/bcachefs/sb-members.h b/fs/bcachefs/sb-members.h
index dd93192ec065..762083b564ee 100644
--- a/fs/bcachefs/sb-members.h
+++ b/fs/bcachefs/sb-members.h
@@ -198,29 +198,37 @@ static inline struct bch_dev *bch2_dev_locked(struct bch_fs *c, unsigned dev)
lockdep_is_held(&c->state_lock));
}
-static inline struct bch_dev *bch2_dev_rcu(struct bch_fs *c, unsigned dev)
+static inline struct bch_dev *bch2_dev_rcu_noerror(struct bch_fs *c, unsigned dev)
{
return c && dev < c->sb.nr_devices
? rcu_dereference(c->devs[dev])
: NULL;
}
+void bch2_dev_missing(struct bch_fs *, unsigned);
+
+static inline struct bch_dev *bch2_dev_rcu(struct bch_fs *c, unsigned dev)
+{
+ struct bch_dev *ca = bch2_dev_rcu_noerror(c, dev);
+ if (unlikely(!ca))
+ bch2_dev_missing(c, dev);
+ return ca;
+}
+
static inline struct bch_dev *bch2_dev_tryget_noerror(struct bch_fs *c, unsigned dev)
{
rcu_read_lock();
- struct bch_dev *ca = bch2_dev_rcu(c, dev);
+ struct bch_dev *ca = bch2_dev_rcu_noerror(c, dev);
if (ca)
bch2_dev_get(ca);
rcu_read_unlock();
return ca;
}
-void bch2_dev_missing(struct bch_fs *, unsigned);
-
static inline struct bch_dev *bch2_dev_tryget(struct bch_fs *c, unsigned dev)
{
struct bch_dev *ca = bch2_dev_tryget_noerror(c, dev);
- if (!ca)
+ if (unlikely(!ca))
bch2_dev_missing(c, dev);
return ca;
}
@@ -307,6 +315,8 @@ static inline bool bch2_member_exists(struct bch_sb *sb, unsigned dev)
return false;
}
+unsigned bch2_sb_nr_devices(const struct bch_sb *);
+
static inline struct bch_member_cpu bch2_mi_to_cpu(struct bch_member *mi)
{
return (struct bch_member_cpu) {
@@ -352,4 +362,6 @@ static inline bool bch2_dev_btree_bitmap_marked_sectors(struct bch_dev *ca, u64
bool bch2_dev_btree_bitmap_marked(struct bch_fs *, struct bkey_s_c);
void bch2_dev_btree_bitmap_mark(struct bch_fs *, struct bkey_s_c);
+int bch2_sb_member_alloc(struct bch_fs *);
+
#endif /* _BCACHEFS_SB_MEMBERS_H */
diff --git a/fs/bcachefs/six.c b/fs/bcachefs/six.c
index 3a494c5d1247..617d07e53b20 100644
--- a/fs/bcachefs/six.c
+++ b/fs/bcachefs/six.c
@@ -169,11 +169,17 @@ static int __do_six_trylock(struct six_lock *lock, enum six_lock_type type,
ret = -1 - SIX_LOCK_write;
}
} else if (type == SIX_LOCK_write && lock->readers) {
- if (try) {
+ if (try)
atomic_add(SIX_LOCK_HELD_write, &lock->state);
- smp_mb__after_atomic();
- }
+ /*
+ * Make sure atomic_add happens before pcpu_read_count and
+ * six_set_bitmask in slow path happens before pcpu_read_count.
+ *
+ * Paired with the smp_mb() in read lock fast path (per-cpu mode)
+ * and the one before atomic_read in read unlock path.
+ */
+ smp_mb();
ret = !pcpu_read_count(lock);
if (try && !ret) {
@@ -335,7 +341,7 @@ static inline bool six_owner_running(struct six_lock *lock)
*/
rcu_read_lock();
struct task_struct *owner = READ_ONCE(lock->owner);
- bool ret = owner ? owner_on_cpu(owner) : !rt_task(current);
+ bool ret = owner ? owner_on_cpu(owner) : !rt_or_dl_task(current);
rcu_read_unlock();
return ret;
diff --git a/fs/bcachefs/snapshot.c b/fs/bcachefs/snapshot.c
index 8b18a9b483a4..1809442b00ee 100644
--- a/fs/bcachefs/snapshot.c
+++ b/fs/bcachefs/snapshot.c
@@ -469,6 +469,7 @@ static u32 bch2_snapshot_tree_oldest_subvol(struct bch_fs *c, u32 snapshot_root)
u32 id = snapshot_root;
u32 subvol = 0, s;
+ rcu_read_lock();
while (id) {
s = snapshot_t(c, id)->subvol;
@@ -477,6 +478,7 @@ static u32 bch2_snapshot_tree_oldest_subvol(struct bch_fs *c, u32 snapshot_root)
id = bch2_snapshot_tree_next(c, id);
}
+ rcu_read_unlock();
return subvol;
}
@@ -1782,6 +1784,7 @@ static int bch2_propagate_key_to_snapshot_leaf(struct btree_trans *trans,
new->k.p.snapshot = leaf_id;
ret = bch2_trans_update(trans, &iter, new, 0);
out:
+ bch2_set_btree_iter_dontneed(&iter);
bch2_trans_iter_exit(trans, &iter);
return ret;
}
diff --git a/fs/bcachefs/str_hash.h b/fs/bcachefs/str_hash.h
index c8c266cb5797..215eed4cce6d 100644
--- a/fs/bcachefs/str_hash.h
+++ b/fs/bcachefs/str_hash.h
@@ -270,7 +270,7 @@ int bch2_hash_set_in_snapshot(struct btree_trans *trans,
desc.hash_bkey(info, bkey_i_to_s_c(insert)),
snapshot),
POS(insert->k.p.inode, U64_MAX),
- BTREE_ITER_slots|BTREE_ITER_intent, k, ret) {
+ BTREE_ITER_slots|BTREE_ITER_intent|flags, k, ret) {
if (is_visible_key(desc, inum, k)) {
if (!desc.cmp_bkey(k, bkey_i_to_s_c(insert)))
goto found;
diff --git a/fs/bcachefs/subvolume.c b/fs/bcachefs/subvolume.c
index dbe834cb349f..6845dde1b339 100644
--- a/fs/bcachefs/subvolume.c
+++ b/fs/bcachefs/subvolume.c
@@ -92,34 +92,32 @@ static int check_subvol(struct btree_trans *trans,
}
struct bch_inode_unpacked inode;
- struct btree_iter inode_iter = {};
- ret = bch2_inode_peek_nowarn(trans, &inode_iter, &inode,
+ ret = bch2_inode_find_by_inum_nowarn_trans(trans,
(subvol_inum) { k.k->p.offset, le64_to_cpu(subvol.v->inode) },
- 0);
- bch2_trans_iter_exit(trans, &inode_iter);
-
- if (ret && !bch2_err_matches(ret, ENOENT))
- return ret;
-
- if (fsck_err_on(ret,
- trans, subvol_to_missing_root,
- "subvolume %llu points to missing subvolume root %llu:%u",
- k.k->p.offset, le64_to_cpu(subvol.v->inode),
- le32_to_cpu(subvol.v->snapshot))) {
- ret = bch2_subvolume_delete(trans, iter->pos.offset);
- bch_err_msg(c, ret, "deleting subvolume %llu", iter->pos.offset);
- return ret ?: -BCH_ERR_transaction_restart_nested;
- }
-
- if (fsck_err_on(inode.bi_subvol != subvol.k->p.offset,
- trans, subvol_root_wrong_bi_subvol,
- "subvol root %llu:%u has wrong bi_subvol field: got %u, should be %llu",
- inode.bi_inum, inode_iter.k.p.snapshot,
- inode.bi_subvol, subvol.k->p.offset)) {
- inode.bi_subvol = subvol.k->p.offset;
- ret = __bch2_fsck_write_inode(trans, &inode, le32_to_cpu(subvol.v->snapshot));
- if (ret)
+ &inode);
+ if (!ret) {
+ if (fsck_err_on(inode.bi_subvol != subvol.k->p.offset,
+ trans, subvol_root_wrong_bi_subvol,
+ "subvol root %llu:%u has wrong bi_subvol field: got %u, should be %llu",
+ inode.bi_inum, inode.bi_snapshot,
+ inode.bi_subvol, subvol.k->p.offset)) {
+ inode.bi_subvol = subvol.k->p.offset;
+ ret = __bch2_fsck_write_inode(trans, &inode, le32_to_cpu(subvol.v->snapshot));
+ if (ret)
+ goto err;
+ }
+ } else if (bch2_err_matches(ret, ENOENT)) {
+ if (fsck_err(trans, subvol_to_missing_root,
+ "subvolume %llu points to missing subvolume root %llu:%u",
+ k.k->p.offset, le64_to_cpu(subvol.v->inode),
+ le32_to_cpu(subvol.v->snapshot))) {
+ ret = bch2_subvolume_delete(trans, iter->pos.offset);
+ bch_err_msg(c, ret, "deleting subvolume %llu", iter->pos.offset);
+ ret = ret ?: -BCH_ERR_transaction_restart_nested;
goto err;
+ }
+ } else {
+ goto err;
}
if (!BCH_SUBVOLUME_SNAP(subvol.v)) {
@@ -137,7 +135,7 @@ static int check_subvol(struct btree_trans *trans,
"%s: snapshot tree %u not found", __func__, snapshot_tree);
if (ret)
- return ret;
+ goto err;
if (fsck_err_on(le32_to_cpu(st.master_subvol) != subvol.k->p.offset,
trans, subvol_not_master_and_not_snapshot,
@@ -147,7 +145,7 @@ static int check_subvol(struct btree_trans *trans,
bch2_bkey_make_mut_typed(trans, iter, &subvol.s_c, 0, subvolume);
ret = PTR_ERR_OR_ZERO(s);
if (ret)
- return ret;
+ goto err;
SET_BCH_SUBVOLUME_SNAP(&s->v, true);
}
diff --git a/fs/bcachefs/subvolume.h b/fs/bcachefs/subvolume.h
index a8299ba2cab2..e62f876541fe 100644
--- a/fs/bcachefs/subvolume.h
+++ b/fs/bcachefs/subvolume.h
@@ -31,6 +31,51 @@ int bch2_subvolume_get_snapshot(struct btree_trans *, u32, u32 *);
int bch2_subvol_is_ro_trans(struct btree_trans *, u32);
int bch2_subvol_is_ro(struct bch_fs *, u32);
+static inline struct bkey_s_c
+bch2_btree_iter_peek_in_subvolume_upto_type(struct btree_iter *iter, struct bpos end,
+ u32 subvolid, unsigned flags)
+{
+ u32 snapshot;
+ int ret = bch2_subvolume_get_snapshot(iter->trans, subvolid, &snapshot);
+ if (ret)
+ return bkey_s_c_err(ret);
+
+ bch2_btree_iter_set_snapshot(iter, snapshot);
+ return bch2_btree_iter_peek_upto_type(iter, end, flags);
+}
+
+#define for_each_btree_key_in_subvolume_upto_continue(_trans, _iter, \
+ _end, _subvolid, _flags, _k, _do) \
+({ \
+ struct bkey_s_c _k; \
+ int _ret3 = 0; \
+ \
+ do { \
+ _ret3 = lockrestart_do(_trans, ({ \
+ (_k) = bch2_btree_iter_peek_in_subvolume_upto_type(&(_iter), \
+ _end, _subvolid, (_flags)); \
+ if (!(_k).k) \
+ break; \
+ \
+ bkey_err(_k) ?: (_do); \
+ })); \
+ } while (!_ret3 && bch2_btree_iter_advance(&(_iter))); \
+ \
+ bch2_trans_iter_exit((_trans), &(_iter)); \
+ _ret3; \
+})
+
+#define for_each_btree_key_in_subvolume_upto(_trans, _iter, _btree_id, \
+ _start, _end, _subvolid, _flags, _k, _do) \
+({ \
+ struct btree_iter _iter; \
+ bch2_trans_iter_init((_trans), &(_iter), (_btree_id), \
+ (_start), (_flags)); \
+ \
+ for_each_btree_key_in_subvolume_upto_continue(_trans, _iter, \
+ _end, _subvolid, _flags, _k, _do); \
+})
+
int bch2_delete_dead_snapshots(struct bch_fs *);
void bch2_delete_dead_snapshots_async(struct bch_fs *);
diff --git a/fs/bcachefs/subvolume_types.h b/fs/bcachefs/subvolume_types.h
index 9b10c8947828..f2ec4277c2a5 100644
--- a/fs/bcachefs/subvolume_types.h
+++ b/fs/bcachefs/subvolume_types.h
@@ -30,7 +30,8 @@ struct snapshot_table {
};
typedef struct {
- u32 subvol;
+ /* we can't have padding in this struct: */
+ u64 subvol;
u64 inum;
} subvol_inum;
diff --git a/fs/bcachefs/super-io.c b/fs/bcachefs/super-io.c
index c8c2ccbdfbb5..ce7410d72089 100644
--- a/fs/bcachefs/super-io.c
+++ b/fs/bcachefs/super-io.c
@@ -418,6 +418,9 @@ static int bch2_sb_validate(struct bch_sb_handle *disk_sb,
if (le16_to_cpu(sb->version) <= bcachefs_metadata_version_disk_accounting_v2 &&
!BCH_SB_ALLOCATOR_STUCK_TIMEOUT(sb))
SET_BCH_SB_ALLOCATOR_STUCK_TIMEOUT(sb, 30);
+
+ if (le16_to_cpu(sb->version) <= bcachefs_metadata_version_disk_accounting_v2)
+ SET_BCH_SB_PROMOTE_WHOLE_EXTENTS(sb, true);
}
for (opt_id = 0; opt_id < bch2_opts_nr; opt_id++) {
@@ -796,8 +799,10 @@ retry:
i < layout.sb_offset + layout.nr_superblocks; i++) {
offset = le64_to_cpu(*i);
- if (offset == opt_get(*opts, sb))
+ if (offset == opt_get(*opts, sb)) {
+ ret = -BCH_ERR_invalid;
continue;
+ }
ret = read_one_super(sb, offset, &err);
if (!ret)
@@ -1185,7 +1190,8 @@ static void bch2_sb_ext_to_text(struct printbuf *out, struct bch_sb *sb,
le_bitvector_to_cpu(errors_silent, (void *) e->errors_silent, sizeof(e->errors_silent) * 8);
prt_printf(out, "Errors to silently fix:\t");
- prt_bitflags_vector(out, bch2_sb_error_strs, errors_silent, sizeof(e->errors_silent) * 8);
+ prt_bitflags_vector(out, bch2_sb_error_strs, errors_silent,
+ min(BCH_FSCK_ERR_MAX, sizeof(e->errors_silent) * 8));
prt_newline(out);
kfree(errors_silent);
@@ -1292,15 +1298,9 @@ void bch2_sb_layout_to_text(struct printbuf *out, struct bch_sb_layout *l)
void bch2_sb_to_text(struct printbuf *out, struct bch_sb *sb,
bool print_layout, unsigned fields)
{
- u64 fields_have = 0;
- unsigned nr_devices = 0;
-
if (!out->nr_tabstops)
printbuf_tabstop_push(out, 44);
- for (int i = 0; i < sb->nr_devices; i++)
- nr_devices += bch2_member_exists(sb, i);
-
prt_printf(out, "External UUID:\t");
pr_uuid(out, sb->user_uuid.b);
prt_newline(out);
@@ -1356,9 +1356,10 @@ void bch2_sb_to_text(struct printbuf *out, struct bch_sb *sb,
prt_newline(out);
prt_printf(out, "Clean:\t%llu\n", BCH_SB_CLEAN(sb));
- prt_printf(out, "Devices:\t%u\n", nr_devices);
+ prt_printf(out, "Devices:\t%u\n", bch2_sb_nr_devices(sb));
prt_printf(out, "Sections:\t");
+ u64 fields_have = 0;
vstruct_for_each(sb, f)
fields_have |= 1 << le32_to_cpu(f->type);
prt_bitflags(out, bch2_sb_fields, fields_have);
diff --git a/fs/bcachefs/super.c b/fs/bcachefs/super.c
index e7fa2de35014..873e4be7e1dc 100644
--- a/fs/bcachefs/super.c
+++ b/fs/bcachefs/super.c
@@ -370,7 +370,7 @@ void bch2_fs_read_only(struct bch_fs *c)
test_bit(BCH_FS_clean_shutdown, &c->flags) &&
c->recovery_pass_done >= BCH_RECOVERY_PASS_journal_replay) {
BUG_ON(c->journal.last_empty_seq != journal_cur_seq(&c->journal));
- BUG_ON(atomic_read(&c->btree_cache.dirty));
+ BUG_ON(atomic_long_read(&c->btree_cache.nr_dirty));
BUG_ON(atomic_long_read(&c->btree_key_cache.nr_dirty));
BUG_ON(c->btree_write_buffer.inc.keys.nr);
BUG_ON(c->btree_write_buffer.flushing.keys.nr);
@@ -543,6 +543,7 @@ static void __bch2_fs_free(struct bch_fs *c)
bch2_fs_fs_io_direct_exit(c);
bch2_fs_fs_io_buffered_exit(c);
bch2_fs_fsio_exit(c);
+ bch2_fs_vfs_exit(c);
bch2_fs_ec_exit(c);
bch2_fs_encryption_exit(c);
bch2_fs_nocow_locking_exit(c);
@@ -810,7 +811,6 @@ static struct bch_fs *bch2_fs_alloc(struct bch_sb *sb, struct bch_opts opts)
c->copy_gc_enabled = 1;
c->rebalance.enabled = 1;
- c->promote_whole_extents = true;
c->journal.flush_write_time = &c->times[BCH_TIME_journal_flush_write];
c->journal.noflush_write_time = &c->times[BCH_TIME_journal_noflush_write];
@@ -926,6 +926,7 @@ static struct bch_fs *bch2_fs_alloc(struct bch_sb *sb, struct bch_opts opts)
bch2_fs_encryption_init(c) ?:
bch2_fs_compress_init(c) ?:
bch2_fs_ec_init(c) ?:
+ bch2_fs_vfs_init(c) ?:
bch2_fs_fsio_init(c) ?:
bch2_fs_fs_io_buffered_init(c) ?:
bch2_fs_fs_io_direct_init(c);
@@ -1591,33 +1592,6 @@ int bch2_dev_set_state(struct bch_fs *c, struct bch_dev *ca,
/* Device add/removal: */
-static int bch2_dev_remove_alloc(struct bch_fs *c, struct bch_dev *ca)
-{
- struct bpos start = POS(ca->dev_idx, 0);
- struct bpos end = POS(ca->dev_idx, U64_MAX);
- int ret;
-
- /*
- * We clear the LRU and need_discard btrees first so that we don't race
- * with bch2_do_invalidates() and bch2_do_discards()
- */
- ret = bch2_btree_delete_range(c, BTREE_ID_lru, start, end,
- BTREE_TRIGGER_norun, NULL) ?:
- bch2_btree_delete_range(c, BTREE_ID_need_discard, start, end,
- BTREE_TRIGGER_norun, NULL) ?:
- bch2_btree_delete_range(c, BTREE_ID_freespace, start, end,
- BTREE_TRIGGER_norun, NULL) ?:
- bch2_btree_delete_range(c, BTREE_ID_backpointers, start, end,
- BTREE_TRIGGER_norun, NULL) ?:
- bch2_btree_delete_range(c, BTREE_ID_alloc, start, end,
- BTREE_TRIGGER_norun, NULL) ?:
- bch2_btree_delete_range(c, BTREE_ID_bucket_gens, start, end,
- BTREE_TRIGGER_norun, NULL) ?:
- bch2_dev_usage_remove(c, ca->dev_idx);
- bch_err_msg(c, ret, "removing dev alloc info");
- return ret;
-}
-
int bch2_dev_remove(struct bch_fs *c, struct bch_dev *ca, int flags)
{
struct bch_member *m;
@@ -1729,9 +1703,6 @@ int bch2_dev_add(struct bch_fs *c, const char *path)
struct bch_opts opts = bch2_opts_empty();
struct bch_sb_handle sb;
struct bch_dev *ca = NULL;
- struct bch_sb_field_members_v2 *mi;
- struct bch_member dev_mi;
- unsigned dev_idx, nr_devices, u64s;
struct printbuf errbuf = PRINTBUF;
struct printbuf label = PRINTBUF;
int ret;
@@ -1741,7 +1712,7 @@ int bch2_dev_add(struct bch_fs *c, const char *path)
if (ret)
goto err;
- dev_mi = bch2_sb_member_get(sb.sb, sb.sb->dev_idx);
+ struct bch_member dev_mi = bch2_sb_member_get(sb.sb, sb.sb->dev_idx);
if (BCH_MEMBER_GROUP(&dev_mi)) {
bch2_disk_path_to_text_sb(&label, sb.sb, BCH_MEMBER_GROUP(&dev_mi) - 1);
@@ -1779,55 +1750,19 @@ int bch2_dev_add(struct bch_fs *c, const char *path)
goto err_unlock;
if (dynamic_fault("bcachefs:add:no_slot"))
- goto no_slot;
-
- if (c->sb.nr_devices < BCH_SB_MEMBERS_MAX) {
- dev_idx = c->sb.nr_devices;
- goto have_slot;
- }
-
- int best = -1;
- u64 best_last_mount = 0;
- for (dev_idx = 0; dev_idx < BCH_SB_MEMBERS_MAX; dev_idx++) {
- struct bch_member m = bch2_sb_member_get(c->disk_sb.sb, dev_idx);
- if (bch2_member_alive(&m))
- continue;
-
- u64 last_mount = le64_to_cpu(m.last_mount);
- if (best < 0 || last_mount < best_last_mount) {
- best = dev_idx;
- best_last_mount = last_mount;
- }
- }
- if (best >= 0) {
- dev_idx = best;
- goto have_slot;
- }
-no_slot:
- ret = -BCH_ERR_ENOSPC_sb_members;
- bch_err_msg(c, ret, "setting up new superblock");
- goto err_unlock;
-
-have_slot:
- nr_devices = max_t(unsigned, dev_idx + 1, c->sb.nr_devices);
-
- mi = bch2_sb_field_get(c->disk_sb.sb, members_v2);
- u64s = DIV_ROUND_UP(sizeof(struct bch_sb_field_members_v2) +
- le16_to_cpu(mi->member_bytes) * nr_devices, sizeof(u64));
+ goto err_unlock;
- mi = bch2_sb_field_resize(&c->disk_sb, members_v2, u64s);
- if (!mi) {
- ret = -BCH_ERR_ENOSPC_sb_members;
+ ret = bch2_sb_member_alloc(c);
+ if (ret < 0) {
bch_err_msg(c, ret, "setting up new superblock");
goto err_unlock;
}
- struct bch_member *m = bch2_members_v2_get_mut(c->disk_sb.sb, dev_idx);
+ unsigned dev_idx = ret;
/* success: */
- *m = dev_mi;
- m->last_mount = cpu_to_le64(ktime_get_real_seconds());
- c->disk_sb.sb->nr_devices = nr_devices;
+ dev_mi.last_mount = cpu_to_le64(ktime_get_real_seconds());
+ *bch2_members_v2_get_mut(c->disk_sb.sb, dev_idx) = dev_mi;
ca->disk_sb.sb->dev_idx = dev_idx;
bch2_dev_attach(c, ca, dev_idx);
diff --git a/fs/bcachefs/sysfs.c b/fs/bcachefs/sysfs.c
index 33f2a64c14c9..03e59f86f360 100644
--- a/fs/bcachefs/sysfs.c
+++ b/fs/bcachefs/sysfs.c
@@ -219,7 +219,6 @@ read_attribute(copy_gc_wait);
rw_attribute(rebalance_enabled);
sysfs_pd_controller_attribute(rebalance);
read_attribute(rebalance_status);
-rw_attribute(promote_whole_extents);
read_attribute(new_stripes);
@@ -234,7 +233,7 @@ write_attribute(perf_test);
#define x(_name) \
static struct attribute sysfs_time_stat_##_name = \
- { .name = #_name, .mode = 0444 };
+ { .name = #_name, .mode = 0644 };
BCH_TIME_STATS()
#undef x
@@ -245,14 +244,18 @@ static struct attribute sysfs_state_rw = {
static size_t bch2_btree_cache_size(struct bch_fs *c)
{
+ struct btree_cache *bc = &c->btree_cache;
size_t ret = 0;
struct btree *b;
- mutex_lock(&c->btree_cache.lock);
- list_for_each_entry(b, &c->btree_cache.live, list)
+ mutex_lock(&bc->lock);
+ list_for_each_entry(b, &bc->live[0].list, list)
ret += btree_buf_bytes(b);
-
- mutex_unlock(&c->btree_cache.lock);
+ list_for_each_entry(b, &bc->live[1].list, list)
+ ret += btree_buf_bytes(b);
+ list_for_each_entry(b, &bc->freeable, list)
+ ret += btree_buf_bytes(b);
+ mutex_unlock(&bc->lock);
return ret;
}
@@ -288,7 +291,7 @@ static int bch2_compression_stats_to_text(struct printbuf *out, struct bch_fs *c
prt_tab_rjust(out);
prt_human_readable_u64(out, nr_extents
- ? div_u64(sectors_uncompressed << 9, nr_extents)
+ ? div64_u64(sectors_uncompressed << 9, nr_extents)
: 0);
prt_tab_rjust(out);
prt_newline(out);
@@ -347,8 +350,6 @@ SHOW(bch2_fs)
if (attr == &sysfs_rebalance_status)
bch2_rebalance_status_to_text(out, c);
- sysfs_print(promote_whole_extents, c->promote_whole_extents);
-
/* Debugging: */
if (attr == &sysfs_journal_debug)
@@ -436,8 +437,6 @@ STORE(bch2_fs)
sysfs_pd_controller_store(rebalance, &c->rebalance.pd);
- sysfs_strtoul(promote_whole_extents, c->promote_whole_extents);
-
/* Debugging: */
if (!test_bit(BCH_FS_started, &c->flags))
@@ -449,11 +448,12 @@ STORE(bch2_fs)
return -EROFS;
if (attr == &sysfs_trigger_btree_cache_shrink) {
+ struct btree_cache *bc = &c->btree_cache;
struct shrink_control sc;
sc.gfp_mask = GFP_KERNEL;
sc.nr_to_scan = strtoul_or_return(buf);
- c->btree_cache.shrink->scan_objects(c->btree_cache.shrink, &sc);
+ bc->live[0].shrink->scan_objects(bc->live[0].shrink, &sc);
}
if (attr == &sysfs_trigger_btree_key_cache_shrink) {
@@ -514,7 +514,7 @@ struct attribute *bch2_fs_files[] = {
&sysfs_btree_cache_size,
&sysfs_btree_write_stats,
- &sysfs_promote_whole_extents,
+ &sysfs_rebalance_status,
&sysfs_compression_stats,
@@ -614,7 +614,6 @@ struct attribute *bch2_fs_internal_files[] = {
&sysfs_copy_gc_wait,
&sysfs_rebalance_enabled,
- &sysfs_rebalance_status,
sysfs_pd_controller_files(rebalance),
&sysfs_moving_ctxts,
@@ -674,7 +673,7 @@ STORE(bch2_fs_opts_dir)
if (ret < 0)
goto err;
- bch2_opt_set_sb(c, opt, v);
+ bch2_opt_set_sb(c, NULL, opt, v);
bch2_opt_set_by_id(&c->opts, id, v);
if (v &&
@@ -728,6 +727,13 @@ SHOW(bch2_fs_time_stats)
STORE(bch2_fs_time_stats)
{
+ struct bch_fs *c = container_of(kobj, struct bch_fs, time_stats);
+
+#define x(name) \
+ if (attr == &sysfs_time_stat_##name) \
+ bch2_time_stats_reset(&c->times[BCH_TIME_##name]);
+ BCH_TIME_STATS()
+#undef x
return size;
}
SYSFS_OPS(bch2_fs_time_stats);
@@ -821,32 +827,17 @@ STORE(bch2_dev)
{
struct bch_dev *ca = container_of(kobj, struct bch_dev, kobj);
struct bch_fs *c = ca->fs;
- struct bch_member *mi;
if (attr == &sysfs_discard) {
bool v = strtoul_or_return(buf);
- mutex_lock(&c->sb_lock);
- mi = bch2_members_v2_get_mut(c->disk_sb.sb, ca->dev_idx);
-
- if (v != BCH_MEMBER_DISCARD(mi)) {
- SET_BCH_MEMBER_DISCARD(mi, v);
- bch2_write_super(c);
- }
- mutex_unlock(&c->sb_lock);
+ bch2_opt_set_sb(c, ca, bch2_opt_table + Opt_discard, v);
}
if (attr == &sysfs_durability) {
u64 v = strtoul_or_return(buf);
- mutex_lock(&c->sb_lock);
- mi = bch2_members_v2_get_mut(c->disk_sb.sb, ca->dev_idx);
-
- if (v + 1 != BCH_MEMBER_DURABILITY(mi)) {
- SET_BCH_MEMBER_DURABILITY(mi, v + 1);
- bch2_write_super(c);
- }
- mutex_unlock(&c->sb_lock);
+ bch2_opt_set_sb(c, ca, bch2_opt_table + Opt_durability, v);
}
if (attr == &sysfs_label) {
diff --git a/fs/bcachefs/tests.c b/fs/bcachefs/tests.c
index 01b768c9b767..b2f209743afe 100644
--- a/fs/bcachefs/tests.c
+++ b/fs/bcachefs/tests.c
@@ -394,7 +394,7 @@ static int insert_test_extent(struct bch_fs *c,
k.k_i.k.p.offset = end;
k.k_i.k.p.snapshot = U32_MAX;
k.k_i.k.size = end - start;
- k.k_i.k.version.lo = test_version++;
+ k.k_i.k.bversion.lo = test_version++;
ret = bch2_btree_insert(c, BTREE_ID_extents, &k.k_i, NULL, 0, 0);
bch_err_fn(c, ret);
diff --git a/fs/bcachefs/thread_with_file.c b/fs/bcachefs/thread_with_file.c
index 0807ce9b171a..dea73bc1cb51 100644
--- a/fs/bcachefs/thread_with_file.c
+++ b/fs/bcachefs/thread_with_file.c
@@ -275,7 +275,6 @@ static long thread_with_stdio_ioctl(struct file *file, unsigned int cmd, unsigne
}
static const struct file_operations thread_with_stdio_fops = {
- .llseek = no_llseek,
.read = thread_with_stdio_read,
.write = thread_with_stdio_write,
.poll = thread_with_stdio_poll,
@@ -285,7 +284,6 @@ static const struct file_operations thread_with_stdio_fops = {
};
static const struct file_operations thread_with_stdout_fops = {
- .llseek = no_llseek,
.read = thread_with_stdio_read,
.poll = thread_with_stdout_poll,
.flush = thread_with_stdio_flush,
@@ -387,7 +385,7 @@ again:
seen = buf->buf.nr;
char *n = memchr(buf->buf.data, '\n', seen);
- if (!n && timeout != MAX_SCHEDULE_TIMEOUT && jiffies >= until) {
+ if (!n && timeout != MAX_SCHEDULE_TIMEOUT && time_after_eq(jiffies, until)) {
spin_unlock(&buf->lock);
return -ETIME;
}
diff --git a/fs/bcachefs/time_stats.c b/fs/bcachefs/time_stats.c
index 4508e9dcbee2..3fe82757f93a 100644
--- a/fs/bcachefs/time_stats.c
+++ b/fs/bcachefs/time_stats.c
@@ -151,6 +151,20 @@ void __bch2_time_stats_update(struct bch2_time_stats *stats, u64 start, u64 end)
}
}
+void bch2_time_stats_reset(struct bch2_time_stats *stats)
+{
+ spin_lock_irq(&stats->lock);
+ unsigned offset = offsetof(struct bch2_time_stats, min_duration);
+ memset((void *) stats + offset, 0, sizeof(*stats) - offset);
+
+ if (stats->buffer) {
+ int cpu;
+ for_each_possible_cpu(cpu)
+ per_cpu_ptr(stats->buffer, cpu)->nr = 0;
+ }
+ spin_unlock_irq(&stats->lock);
+}
+
void bch2_time_stats_exit(struct bch2_time_stats *stats)
{
free_percpu(stats->buffer);
diff --git a/fs/bcachefs/time_stats.h b/fs/bcachefs/time_stats.h
index 5df61403744b..dc6493f7bbab 100644
--- a/fs/bcachefs/time_stats.h
+++ b/fs/bcachefs/time_stats.h
@@ -70,6 +70,7 @@ struct time_stat_buffer {
struct bch2_time_stats {
spinlock_t lock;
bool have_quantiles;
+ struct time_stat_buffer __percpu *buffer;
/* all fields are in nanoseconds */
u64 min_duration;
u64 max_duration;
@@ -87,7 +88,6 @@ struct bch2_time_stats {
struct mean_and_variance_weighted duration_stats_weighted;
struct mean_and_variance_weighted freq_stats_weighted;
- struct time_stat_buffer __percpu *buffer;
};
struct bch2_time_stats_quantiles {
@@ -142,6 +142,7 @@ static inline bool track_event_change(struct bch2_time_stats *stats, bool v)
return false;
}
+void bch2_time_stats_reset(struct bch2_time_stats *);
void bch2_time_stats_exit(struct bch2_time_stats *);
void bch2_time_stats_init(struct bch2_time_stats *);
diff --git a/fs/bcachefs/trace.h b/fs/bcachefs/trace.h
index c62f00322d1e..5597b9d6297f 100644
--- a/fs/bcachefs/trace.h
+++ b/fs/bcachefs/trace.h
@@ -3,7 +3,6 @@
#define TRACE_SYSTEM bcachefs
#if !defined(_TRACE_BCACHEFS_H) || defined(TRACE_HEADER_MULTI_READ)
-#define _TRACE_BCACHEFS_H
#include <linux/tracepoint.h>
@@ -558,6 +557,7 @@ TRACE_EVENT(btree_path_relock_fail,
__field(unsigned long, caller_ip )
__field(u8, btree_id )
__field(u8, level )
+ __field(u8, path_idx)
TRACE_BPOS_entries(pos)
__array(char, node, 24 )
__field(u8, self_read_count )
@@ -575,7 +575,8 @@ TRACE_EVENT(btree_path_relock_fail,
strscpy(__entry->trans_fn, trans->fn, sizeof(__entry->trans_fn));
__entry->caller_ip = caller_ip;
__entry->btree_id = path->btree_id;
- __entry->level = path->level;
+ __entry->level = level;
+ __entry->path_idx = path - trans->paths;
TRACE_BPOS_assign(pos, path->pos);
c = bch2_btree_node_lock_counts(trans, NULL, &path->l[level].b->c, level);
@@ -588,7 +589,7 @@ TRACE_EVENT(btree_path_relock_fail,
c = six_lock_counts(&path->l[level].b->c.lock);
__entry->read_count = c.n[SIX_LOCK_read];
__entry->intent_count = c.n[SIX_LOCK_intent];
- scnprintf(__entry->node, sizeof(__entry->node), "%px", b);
+ scnprintf(__entry->node, sizeof(__entry->node), "%px", &b->c);
}
__entry->iter_lock_seq = path->l[level].lock_seq;
__entry->node_lock_seq = is_btree_node(path, level)
@@ -596,9 +597,10 @@ TRACE_EVENT(btree_path_relock_fail,
: 0;
),
- TP_printk("%s %pS btree %s pos %llu:%llu:%u level %u node %s held %u:%u lock count %u:%u iter seq %u lock seq %u",
+ TP_printk("%s %pS\nidx %2u btree %s pos %llu:%llu:%u level %u node %s held %u:%u lock count %u:%u iter seq %u lock seq %u",
__entry->trans_fn,
(void *) __entry->caller_ip,
+ __entry->path_idx,
bch2_btree_id_str(__entry->btree_id),
__entry->pos_inode,
__entry->pos_offset,
@@ -625,6 +627,7 @@ TRACE_EVENT(btree_path_upgrade_fail,
__field(unsigned long, caller_ip )
__field(u8, btree_id )
__field(u8, level )
+ __field(u8, path_idx)
TRACE_BPOS_entries(pos)
__field(u8, locked )
__field(u8, self_read_count )
@@ -642,6 +645,7 @@ TRACE_EVENT(btree_path_upgrade_fail,
__entry->caller_ip = caller_ip;
__entry->btree_id = path->btree_id;
__entry->level = level;
+ __entry->path_idx = path - trans->paths;
TRACE_BPOS_assign(pos, path->pos);
__entry->locked = btree_node_locked(path, level);
@@ -657,9 +661,10 @@ TRACE_EVENT(btree_path_upgrade_fail,
: 0;
),
- TP_printk("%s %pS btree %s pos %llu:%llu:%u level %u locked %u held %u:%u lock count %u:%u iter seq %u lock seq %u",
+ TP_printk("%s %pS\nidx %2u btree %s pos %llu:%llu:%u level %u locked %u held %u:%u lock count %u:%u iter seq %u lock seq %u",
__entry->trans_fn,
(void *) __entry->caller_ip,
+ __entry->path_idx,
bch2_btree_id_str(__entry->btree_id),
__entry->pos_inode,
__entry->pos_offset,
@@ -1438,6 +1443,456 @@ TRACE_EVENT(error_downcast,
TP_printk("%s -> %s %s", __entry->bch_err, __entry->std_err, __entry->ip)
);
+#ifdef CONFIG_BCACHEFS_PATH_TRACEPOINTS
+
+TRACE_EVENT(update_by_path,
+ TP_PROTO(struct btree_trans *trans, struct btree_path *path,
+ struct btree_insert_entry *i, bool overwrite),
+ TP_ARGS(trans, path, i, overwrite),
+
+ TP_STRUCT__entry(
+ __array(char, trans_fn, 32 )
+ __field(btree_path_idx_t, path_idx )
+ __field(u8, btree_id )
+ TRACE_BPOS_entries(pos)
+ __field(u8, overwrite )
+ __field(btree_path_idx_t, update_idx )
+ __field(btree_path_idx_t, nr_updates )
+ ),
+
+ TP_fast_assign(
+ strscpy(__entry->trans_fn, trans->fn, sizeof(__entry->trans_fn));
+ __entry->path_idx = path - trans->paths;
+ __entry->btree_id = path->btree_id;
+ TRACE_BPOS_assign(pos, path->pos);
+ __entry->overwrite = overwrite;
+ __entry->update_idx = i - trans->updates;
+ __entry->nr_updates = trans->nr_updates;
+ ),
+
+ TP_printk("%s path %3u btree %s pos %llu:%llu:%u overwrite %u update %u/%u",
+ __entry->trans_fn,
+ __entry->path_idx,
+ bch2_btree_id_str(__entry->btree_id),
+ __entry->pos_inode,
+ __entry->pos_offset,
+ __entry->pos_snapshot,
+ __entry->overwrite,
+ __entry->update_idx,
+ __entry->nr_updates)
+);
+
+TRACE_EVENT(btree_path_lock,
+ TP_PROTO(struct btree_trans *trans,
+ unsigned long caller_ip,
+ struct btree_bkey_cached_common *b),
+ TP_ARGS(trans, caller_ip, b),
+
+ TP_STRUCT__entry(
+ __array(char, trans_fn, 32 )
+ __field(unsigned long, caller_ip )
+ __field(u8, btree_id )
+ __field(u8, level )
+ __array(char, node, 24 )
+ __field(u32, lock_seq )
+ ),
+
+ TP_fast_assign(
+ strscpy(__entry->trans_fn, trans->fn, sizeof(__entry->trans_fn));
+ __entry->caller_ip = caller_ip;
+ __entry->btree_id = b->btree_id;
+ __entry->level = b->level;
+
+ scnprintf(__entry->node, sizeof(__entry->node), "%px", b);
+ __entry->lock_seq = six_lock_seq(&b->lock);
+ ),
+
+ TP_printk("%s %pS\nbtree %s level %u node %s lock seq %u",
+ __entry->trans_fn,
+ (void *) __entry->caller_ip,
+ bch2_btree_id_str(__entry->btree_id),
+ __entry->level,
+ __entry->node,
+ __entry->lock_seq)
+);
+
+DECLARE_EVENT_CLASS(btree_path_ev,
+ TP_PROTO(struct btree_trans *trans, struct btree_path *path),
+ TP_ARGS(trans, path),
+
+ TP_STRUCT__entry(
+ __field(u16, idx )
+ __field(u8, ref )
+ __field(u8, btree_id )
+ TRACE_BPOS_entries(pos)
+ ),
+
+ TP_fast_assign(
+ __entry->idx = path - trans->paths;
+ __entry->ref = path->ref;
+ __entry->btree_id = path->btree_id;
+ TRACE_BPOS_assign(pos, path->pos);
+ ),
+
+ TP_printk("path %3u ref %u btree %s pos %llu:%llu:%u",
+ __entry->idx, __entry->ref,
+ bch2_btree_id_str(__entry->btree_id),
+ __entry->pos_inode,
+ __entry->pos_offset,
+ __entry->pos_snapshot)
+);
+
+DEFINE_EVENT(btree_path_ev, btree_path_get_ll,
+ TP_PROTO(struct btree_trans *trans, struct btree_path *path),
+ TP_ARGS(trans, path)
+);
+
+DEFINE_EVENT(btree_path_ev, btree_path_put_ll,
+ TP_PROTO(struct btree_trans *trans, struct btree_path *path),
+ TP_ARGS(trans, path)
+);
+
+DEFINE_EVENT(btree_path_ev, btree_path_should_be_locked,
+ TP_PROTO(struct btree_trans *trans, struct btree_path *path),
+ TP_ARGS(trans, path)
+);
+
+TRACE_EVENT(btree_path_alloc,
+ TP_PROTO(struct btree_trans *trans, struct btree_path *path),
+ TP_ARGS(trans, path),
+
+ TP_STRUCT__entry(
+ __field(btree_path_idx_t, idx )
+ __field(u8, locks_want )
+ __field(u8, btree_id )
+ TRACE_BPOS_entries(pos)
+ ),
+
+ TP_fast_assign(
+ __entry->idx = path - trans->paths;
+ __entry->locks_want = path->locks_want;
+ __entry->btree_id = path->btree_id;
+ TRACE_BPOS_assign(pos, path->pos);
+ ),
+
+ TP_printk("path %3u btree %s locks_want %u pos %llu:%llu:%u",
+ __entry->idx,
+ bch2_btree_id_str(__entry->btree_id),
+ __entry->locks_want,
+ __entry->pos_inode,
+ __entry->pos_offset,
+ __entry->pos_snapshot)
+);
+
+TRACE_EVENT(btree_path_get,
+ TP_PROTO(struct btree_trans *trans, struct btree_path *path, struct bpos *new_pos),
+ TP_ARGS(trans, path, new_pos),
+
+ TP_STRUCT__entry(
+ __field(btree_path_idx_t, idx )
+ __field(u8, ref )
+ __field(u8, preserve )
+ __field(u8, locks_want )
+ __field(u8, btree_id )
+ TRACE_BPOS_entries(old_pos)
+ TRACE_BPOS_entries(new_pos)
+ ),
+
+ TP_fast_assign(
+ __entry->idx = path - trans->paths;
+ __entry->ref = path->ref;
+ __entry->preserve = path->preserve;
+ __entry->locks_want = path->locks_want;
+ __entry->btree_id = path->btree_id;
+ TRACE_BPOS_assign(old_pos, path->pos);
+ TRACE_BPOS_assign(new_pos, *new_pos);
+ ),
+
+ TP_printk(" path %3u ref %u preserve %u btree %s locks_want %u pos %llu:%llu:%u -> %llu:%llu:%u",
+ __entry->idx,
+ __entry->ref,
+ __entry->preserve,
+ bch2_btree_id_str(__entry->btree_id),
+ __entry->locks_want,
+ __entry->old_pos_inode,
+ __entry->old_pos_offset,
+ __entry->old_pos_snapshot,
+ __entry->new_pos_inode,
+ __entry->new_pos_offset,
+ __entry->new_pos_snapshot)
+);
+
+DECLARE_EVENT_CLASS(btree_path_clone,
+ TP_PROTO(struct btree_trans *trans, struct btree_path *path, struct btree_path *new),
+ TP_ARGS(trans, path, new),
+
+ TP_STRUCT__entry(
+ __field(btree_path_idx_t, idx )
+ __field(u8, new_idx )
+ __field(u8, btree_id )
+ __field(u8, ref )
+ __field(u8, preserve )
+ TRACE_BPOS_entries(pos)
+ ),
+
+ TP_fast_assign(
+ __entry->idx = path - trans->paths;
+ __entry->new_idx = new - trans->paths;
+ __entry->btree_id = path->btree_id;
+ __entry->ref = path->ref;
+ __entry->preserve = path->preserve;
+ TRACE_BPOS_assign(pos, path->pos);
+ ),
+
+ TP_printk(" path %3u ref %u preserve %u btree %s %llu:%llu:%u -> %u",
+ __entry->idx,
+ __entry->ref,
+ __entry->preserve,
+ bch2_btree_id_str(__entry->btree_id),
+ __entry->pos_inode,
+ __entry->pos_offset,
+ __entry->pos_snapshot,
+ __entry->new_idx)
+);
+
+DEFINE_EVENT(btree_path_clone, btree_path_clone,
+ TP_PROTO(struct btree_trans *trans, struct btree_path *path, struct btree_path *new),
+ TP_ARGS(trans, path, new)
+);
+
+DEFINE_EVENT(btree_path_clone, btree_path_save_pos,
+ TP_PROTO(struct btree_trans *trans, struct btree_path *path, struct btree_path *new),
+ TP_ARGS(trans, path, new)
+);
+
+DECLARE_EVENT_CLASS(btree_path_traverse,
+ TP_PROTO(struct btree_trans *trans,
+ struct btree_path *path),
+ TP_ARGS(trans, path),
+
+ TP_STRUCT__entry(
+ __array(char, trans_fn, 32 )
+ __field(btree_path_idx_t, idx )
+ __field(u8, ref )
+ __field(u8, preserve )
+ __field(u8, should_be_locked )
+ __field(u8, btree_id )
+ __field(u8, level )
+ TRACE_BPOS_entries(pos)
+ __field(u8, locks_want )
+ __field(u8, nodes_locked )
+ __array(char, node0, 24 )
+ __array(char, node1, 24 )
+ __array(char, node2, 24 )
+ __array(char, node3, 24 )
+ ),
+
+ TP_fast_assign(
+ strscpy(__entry->trans_fn, trans->fn, sizeof(__entry->trans_fn));
+
+ __entry->idx = path - trans->paths;
+ __entry->ref = path->ref;
+ __entry->preserve = path->preserve;
+ __entry->btree_id = path->btree_id;
+ __entry->level = path->level;
+ TRACE_BPOS_assign(pos, path->pos);
+
+ __entry->locks_want = path->locks_want;
+ __entry->nodes_locked = path->nodes_locked;
+ struct btree *b = path->l[0].b;
+ if (IS_ERR(b))
+ strscpy(__entry->node0, bch2_err_str(PTR_ERR(b)), sizeof(__entry->node0));
+ else
+ scnprintf(__entry->node0, sizeof(__entry->node0), "%px", &b->c);
+ b = path->l[1].b;
+ if (IS_ERR(b))
+ strscpy(__entry->node1, bch2_err_str(PTR_ERR(b)), sizeof(__entry->node0));
+ else
+ scnprintf(__entry->node1, sizeof(__entry->node0), "%px", &b->c);
+ b = path->l[2].b;
+ if (IS_ERR(b))
+ strscpy(__entry->node2, bch2_err_str(PTR_ERR(b)), sizeof(__entry->node0));
+ else
+ scnprintf(__entry->node2, sizeof(__entry->node0), "%px", &b->c);
+ b = path->l[3].b;
+ if (IS_ERR(b))
+ strscpy(__entry->node3, bch2_err_str(PTR_ERR(b)), sizeof(__entry->node0));
+ else
+ scnprintf(__entry->node3, sizeof(__entry->node0), "%px", &b->c);
+ ),
+
+ TP_printk("%s\npath %3u ref %u preserve %u btree %s %llu:%llu:%u level %u locks_want %u\n"
+ "locks %u %u %u %u node %s %s %s %s",
+ __entry->trans_fn,
+ __entry->idx,
+ __entry->ref,
+ __entry->preserve,
+ bch2_btree_id_str(__entry->btree_id),
+ __entry->pos_inode,
+ __entry->pos_offset,
+ __entry->pos_snapshot,
+ __entry->level,
+ __entry->locks_want,
+ (__entry->nodes_locked >> 6) & 3,
+ (__entry->nodes_locked >> 4) & 3,
+ (__entry->nodes_locked >> 2) & 3,
+ (__entry->nodes_locked >> 0) & 3,
+ __entry->node3,
+ __entry->node2,
+ __entry->node1,
+ __entry->node0)
+);
+
+DEFINE_EVENT(btree_path_traverse, btree_path_traverse_start,
+ TP_PROTO(struct btree_trans *trans,
+ struct btree_path *path),
+ TP_ARGS(trans, path)
+);
+
+DEFINE_EVENT(btree_path_traverse, btree_path_traverse_end,
+ TP_PROTO(struct btree_trans *trans, struct btree_path *path),
+ TP_ARGS(trans, path)
+);
+
+TRACE_EVENT(btree_path_set_pos,
+ TP_PROTO(struct btree_trans *trans,
+ struct btree_path *path,
+ struct bpos *new_pos),
+ TP_ARGS(trans, path, new_pos),
+
+ TP_STRUCT__entry(
+ __field(btree_path_idx_t, idx )
+ __field(u8, ref )
+ __field(u8, preserve )
+ __field(u8, btree_id )
+ TRACE_BPOS_entries(old_pos)
+ TRACE_BPOS_entries(new_pos)
+ __field(u8, locks_want )
+ __field(u8, nodes_locked )
+ __array(char, node0, 24 )
+ __array(char, node1, 24 )
+ __array(char, node2, 24 )
+ __array(char, node3, 24 )
+ ),
+
+ TP_fast_assign(
+ __entry->idx = path - trans->paths;
+ __entry->ref = path->ref;
+ __entry->preserve = path->preserve;
+ __entry->btree_id = path->btree_id;
+ TRACE_BPOS_assign(old_pos, path->pos);
+ TRACE_BPOS_assign(new_pos, *new_pos);
+
+ __entry->nodes_locked = path->nodes_locked;
+ struct btree *b = path->l[0].b;
+ if (IS_ERR(b))
+ strscpy(__entry->node0, bch2_err_str(PTR_ERR(b)), sizeof(__entry->node0));
+ else
+ scnprintf(__entry->node0, sizeof(__entry->node0), "%px", &b->c);
+ b = path->l[1].b;
+ if (IS_ERR(b))
+ strscpy(__entry->node1, bch2_err_str(PTR_ERR(b)), sizeof(__entry->node0));
+ else
+ scnprintf(__entry->node1, sizeof(__entry->node0), "%px", &b->c);
+ b = path->l[2].b;
+ if (IS_ERR(b))
+ strscpy(__entry->node2, bch2_err_str(PTR_ERR(b)), sizeof(__entry->node0));
+ else
+ scnprintf(__entry->node2, sizeof(__entry->node0), "%px", &b->c);
+ b = path->l[3].b;
+ if (IS_ERR(b))
+ strscpy(__entry->node3, bch2_err_str(PTR_ERR(b)), sizeof(__entry->node0));
+ else
+ scnprintf(__entry->node3, sizeof(__entry->node0), "%px", &b->c);
+ ),
+
+ TP_printk("\npath %3u ref %u preserve %u btree %s %llu:%llu:%u -> %llu:%llu:%u\n"
+ "locks %u %u %u %u node %s %s %s %s",
+ __entry->idx,
+ __entry->ref,
+ __entry->preserve,
+ bch2_btree_id_str(__entry->btree_id),
+ __entry->old_pos_inode,
+ __entry->old_pos_offset,
+ __entry->old_pos_snapshot,
+ __entry->new_pos_inode,
+ __entry->new_pos_offset,
+ __entry->new_pos_snapshot,
+ (__entry->nodes_locked >> 6) & 3,
+ (__entry->nodes_locked >> 4) & 3,
+ (__entry->nodes_locked >> 2) & 3,
+ (__entry->nodes_locked >> 0) & 3,
+ __entry->node3,
+ __entry->node2,
+ __entry->node1,
+ __entry->node0)
+);
+
+TRACE_EVENT(btree_path_free,
+ TP_PROTO(struct btree_trans *trans, btree_path_idx_t path, struct btree_path *dup),
+ TP_ARGS(trans, path, dup),
+
+ TP_STRUCT__entry(
+ __field(btree_path_idx_t, idx )
+ __field(u8, preserve )
+ __field(u8, should_be_locked)
+ __field(s8, dup )
+ __field(u8, dup_locked )
+ ),
+
+ TP_fast_assign(
+ __entry->idx = path;
+ __entry->preserve = trans->paths[path].preserve;
+ __entry->should_be_locked = trans->paths[path].should_be_locked;
+ __entry->dup = dup ? dup - trans->paths : -1;
+ __entry->dup_locked = dup ? btree_node_locked(dup, dup->level) : 0;
+ ),
+
+ TP_printk(" path %3u %c %c dup %2i locked %u", __entry->idx,
+ __entry->preserve ? 'P' : ' ',
+ __entry->should_be_locked ? 'S' : ' ',
+ __entry->dup,
+ __entry->dup_locked)
+);
+
+TRACE_EVENT(btree_path_free_trans_begin,
+ TP_PROTO(btree_path_idx_t path),
+ TP_ARGS(path),
+
+ TP_STRUCT__entry(
+ __field(btree_path_idx_t, idx )
+ ),
+
+ TP_fast_assign(
+ __entry->idx = path;
+ ),
+
+ TP_printk(" path %3u", __entry->idx)
+);
+
+#else /* CONFIG_BCACHEFS_PATH_TRACEPOINTS */
+#ifndef _TRACE_BCACHEFS_H
+
+static inline void trace_update_by_path(struct btree_trans *trans, struct btree_path *path,
+ struct btree_insert_entry *i, bool overwrite) {}
+static inline void trace_btree_path_lock(struct btree_trans *trans, unsigned long caller_ip, struct btree_bkey_cached_common *b) {}
+static inline void trace_btree_path_get_ll(struct btree_trans *trans, struct btree_path *path) {}
+static inline void trace_btree_path_put_ll(struct btree_trans *trans, struct btree_path *path) {}
+static inline void trace_btree_path_should_be_locked(struct btree_trans *trans, struct btree_path *path) {}
+static inline void trace_btree_path_alloc(struct btree_trans *trans, struct btree_path *path) {}
+static inline void trace_btree_path_get(struct btree_trans *trans, struct btree_path *path, struct bpos *new_pos) {}
+static inline void trace_btree_path_clone(struct btree_trans *trans, struct btree_path *path, struct btree_path *new) {}
+static inline void trace_btree_path_save_pos(struct btree_trans *trans, struct btree_path *path, struct btree_path *new) {}
+static inline void trace_btree_path_traverse_start(struct btree_trans *trans, struct btree_path *path) {}
+static inline void trace_btree_path_traverse_end(struct btree_trans *trans, struct btree_path *path) {}
+static inline void trace_btree_path_set_pos(struct btree_trans *trans, struct btree_path *path, struct bpos *new_pos) {}
+static inline void trace_btree_path_free(struct btree_trans *trans, btree_path_idx_t path, struct btree_path *dup) {}
+static inline void trace_btree_path_free_trans_begin(btree_path_idx_t path) {}
+
+#endif
+#endif /* CONFIG_BCACHEFS_PATH_TRACEPOINTS */
+
+#define _TRACE_BCACHEFS_H
#endif /* _TRACE_BCACHEFS_H */
/* This part must be outside protection */
diff --git a/fs/bcachefs/util.c b/fs/bcachefs/util.c
index 1b8554460af4..42f565c76181 100644
--- a/fs/bcachefs/util.c
+++ b/fs/bcachefs/util.c
@@ -64,7 +64,7 @@ static int bch2_pow(u64 n, u64 p, u64 *res)
*res = 1;
while (p--) {
- if (*res > div_u64(U64_MAX, n))
+ if (*res > div64_u64(U64_MAX, n))
return -ERANGE;
*res *= n;
}
@@ -140,14 +140,14 @@ static int __bch2_strtou64_h(const char *cp, u64 *res)
parse_or_ret(cp, parse_unit_suffix(cp, &b));
- if (v > div_u64(U64_MAX, b))
+ if (v > div64_u64(U64_MAX, b))
return -ERANGE;
v *= b;
- if (f_n > div_u64(U64_MAX, b))
+ if (f_n > div64_u64(U64_MAX, b))
return -ERANGE;
- f_n = div_u64(f_n * b, f_d);
+ f_n = div64_u64(f_n * b, f_d);
if (v + f_n < v)
return -ERANGE;
v += f_n;
@@ -204,7 +204,7 @@ STRTO_H(strtoll, long long)
STRTO_H(strtoull, unsigned long long)
STRTO_H(strtou64, u64)
-u64 bch2_read_flag_list(char *opt, const char * const list[])
+u64 bch2_read_flag_list(const char *opt, const char * const list[])
{
u64 ret = 0;
char *p, *s, *d = kstrdup(opt, GFP_KERNEL);
@@ -214,7 +214,7 @@ u64 bch2_read_flag_list(char *opt, const char * const list[])
s = strim(d);
- while ((p = strsep(&s, ","))) {
+ while ((p = strsep(&s, ",;"))) {
int flag = match_string(list, -1, p);
if (flag < 0) {
@@ -360,7 +360,7 @@ void bch2_pr_time_units(struct printbuf *out, u64 ns)
{
const struct time_unit *u = bch2_pick_time_units(ns);
- prt_printf(out, "%llu %s", div_u64(ns, u->nsecs), u->name);
+ prt_printf(out, "%llu %s", div64_u64(ns, u->nsecs), u->name);
}
static void bch2_pr_time_units_aligned(struct printbuf *out, u64 ns)
@@ -477,7 +477,7 @@ void bch2_time_stats_to_text(struct printbuf *out, struct bch2_time_stats *stats
bool is_last = eytzinger0_next(i, NR_QUANTILES) == -1;
u64 q = max(quantiles->entries[i].m, last_q);
- prt_printf(out, "%llu ", div_u64(q, u->nsecs));
+ prt_printf(out, "%llu ", div64_u64(q, u->nsecs));
if (is_last)
prt_newline(out);
last_q = q;
diff --git a/fs/bcachefs/util.h b/fs/bcachefs/util.h
index 902b7f5406a2..fb02c1c36004 100644
--- a/fs/bcachefs/util.h
+++ b/fs/bcachefs/util.h
@@ -195,7 +195,7 @@ static inline int bch2_strtoul_h(const char *cp, long *res)
bool bch2_is_zero(const void *, size_t);
-u64 bch2_read_flag_list(char *, const char * const[]);
+u64 bch2_read_flag_list(const char *, const char * const[]);
void bch2_prt_u64_base2_nbits(struct printbuf *, u64, unsigned);
void bch2_prt_u64_base2(struct printbuf *, u64);
diff --git a/fs/bcachefs/xattr.c b/fs/bcachefs/xattr.c
index 331f944d73dc..56c8d3fe55a4 100644
--- a/fs/bcachefs/xattr.c
+++ b/fs/bcachefs/xattr.c
@@ -250,17 +250,27 @@ static int __bch2_xattr_emit(const char *prefix,
return 0;
}
+static inline const char *bch2_xattr_prefix(unsigned type, struct dentry *dentry)
+{
+ const struct xattr_handler *handler = bch2_xattr_type_to_handler(type);
+
+ if (!xattr_handler_can_list(handler, dentry))
+ return NULL;
+
+ return xattr_prefix(handler);
+}
+
static int bch2_xattr_emit(struct dentry *dentry,
const struct bch_xattr *xattr,
struct xattr_buf *buf)
{
- const struct xattr_handler *handler =
- bch2_xattr_type_to_handler(xattr->x_type);
+ const char *prefix;
+
+ prefix = bch2_xattr_prefix(xattr->x_type, dentry);
+ if (!prefix)
+ return 0;
- return handler && (!handler->list || handler->list(dentry))
- ? __bch2_xattr_emit(handler->prefix ?: handler->name,
- xattr->x_name, xattr->x_name_len, buf)
- : 0;
+ return __bch2_xattr_emit(prefix, xattr->x_name, xattr->x_name_len, buf);
}
static int bch2_xattr_list_bcachefs(struct bch_fs *c,
@@ -295,54 +305,23 @@ ssize_t bch2_xattr_list(struct dentry *dentry, char *buffer, size_t buffer_size)
{
struct bch_fs *c = dentry->d_sb->s_fs_info;
struct bch_inode_info *inode = to_bch_ei(dentry->d_inode);
- struct btree_trans *trans = bch2_trans_get(c);
- struct btree_iter iter;
- struct bkey_s_c k;
struct xattr_buf buf = { .buf = buffer, .len = buffer_size };
u64 offset = 0, inum = inode->ei_inode.bi_inum;
- u32 snapshot;
- int ret;
-retry:
- bch2_trans_begin(trans);
- iter = (struct btree_iter) { NULL };
-
- ret = bch2_subvolume_get_snapshot(trans, inode->ei_subvol, &snapshot);
- if (ret)
- goto err;
-
- for_each_btree_key_upto_norestart(trans, iter, BTREE_ID_xattrs,
- SPOS(inum, offset, snapshot),
- POS(inum, U64_MAX), 0, k, ret) {
- if (k.k->type != KEY_TYPE_xattr)
- continue;
-
- ret = bch2_xattr_emit(dentry, bkey_s_c_to_xattr(k).v, &buf);
- if (ret)
- break;
- }
- offset = iter.pos.offset;
- bch2_trans_iter_exit(trans, &iter);
-err:
- if (bch2_err_matches(ret, BCH_ERR_transaction_restart))
- goto retry;
-
- bch2_trans_put(trans);
+ int ret = bch2_trans_run(c,
+ for_each_btree_key_in_subvolume_upto(trans, iter, BTREE_ID_xattrs,
+ POS(inum, offset),
+ POS(inum, U64_MAX),
+ inode->ei_inum.subvol, 0, k, ({
+ if (k.k->type != KEY_TYPE_xattr)
+ continue;
- if (ret)
- goto out;
+ bch2_xattr_emit(dentry, bkey_s_c_to_xattr(k).v, &buf);
+ }))) ?:
+ bch2_xattr_list_bcachefs(c, &inode->ei_inode, &buf, false) ?:
+ bch2_xattr_list_bcachefs(c, &inode->ei_inode, &buf, true);
- ret = bch2_xattr_list_bcachefs(c, &inode->ei_inode, &buf, false);
- if (ret)
- goto out;
-
- ret = bch2_xattr_list_bcachefs(c, &inode->ei_inode, &buf, true);
- if (ret)
- goto out;
-
- return buf.used;
-out:
- return bch2_err_class(ret);
+ return ret ? bch2_err_class(ret) : buf.used;
}
static int bch2_xattr_get_handler(const struct xattr_handler *handler,
@@ -632,10 +611,6 @@ static const struct xattr_handler bch_xattr_bcachefs_effective_handler = {
const struct xattr_handler *bch2_xattr_handlers[] = {
&bch_xattr_user_handler,
-#ifdef CONFIG_BCACHEFS_POSIX_ACL
- &nop_posix_acl_access,
- &nop_posix_acl_default,
-#endif
&bch_xattr_trusted_handler,
&bch_xattr_security_handler,
#ifndef NO_BCACHEFS_FS
diff --git a/fs/bcachefs/xattr_format.h b/fs/bcachefs/xattr_format.h
index e9f810539552..c7916011ef34 100644
--- a/fs/bcachefs/xattr_format.h
+++ b/fs/bcachefs/xattr_format.h
@@ -13,7 +13,7 @@ struct bch_xattr {
__u8 x_type;
__u8 x_name_len;
__le16 x_val_len;
- __u8 x_name[];
+ __u8 x_name[] __counted_by(x_name_len);
} __packed __aligned(8);
#endif /* _BCACHEFS_XATTR_FORMAT_H */
diff --git a/fs/binfmt_elf.c b/fs/binfmt_elf.c
index 34d0d1e43f36..06dc4a57ba78 100644
--- a/fs/binfmt_elf.c
+++ b/fs/binfmt_elf.c
@@ -2032,10 +2032,8 @@ static int elf_core_dump(struct coredump_params *cprm)
* Collect all the non-memory information about the process for the
* notes. This also sets up the file header.
*/
- if (!fill_note_info(&elf, e_phnum, &info, cprm)) {
- coredump_report_failure("Error collecting note info");
+ if (!fill_note_info(&elf, e_phnum, &info, cprm))
goto end_coredump;
- }
has_dumped = 1;
@@ -2050,10 +2048,8 @@ static int elf_core_dump(struct coredump_params *cprm)
sz += elf_coredump_extra_notes_size();
phdr4note = kmalloc(sizeof(*phdr4note), GFP_KERNEL);
- if (!phdr4note) {
- coredump_report_failure("Error allocating program headers note entry");
+ if (!phdr4note)
goto end_coredump;
- }
fill_elf_note_phdr(phdr4note, sz, offset);
offset += sz;
@@ -2067,24 +2063,18 @@ static int elf_core_dump(struct coredump_params *cprm)
if (e_phnum == PN_XNUM) {
shdr4extnum = kmalloc(sizeof(*shdr4extnum), GFP_KERNEL);
- if (!shdr4extnum) {
- coredump_report_failure("Error allocating extra program headers");
+ if (!shdr4extnum)
goto end_coredump;
- }
fill_extnum_info(&elf, shdr4extnum, e_shoff, segs);
}
offset = dataoff;
- if (!dump_emit(cprm, &elf, sizeof(elf))) {
- coredump_report_failure("Error emitting the ELF headers");
+ if (!dump_emit(cprm, &elf, sizeof(elf)))
goto end_coredump;
- }
- if (!dump_emit(cprm, phdr4note, sizeof(*phdr4note))) {
- coredump_report_failure("Error emitting the program header for notes");
+ if (!dump_emit(cprm, phdr4note, sizeof(*phdr4note)))
goto end_coredump;
- }
/* Write program headers for segments dump */
for (i = 0; i < cprm->vma_count; i++) {
@@ -2107,28 +2097,20 @@ static int elf_core_dump(struct coredump_params *cprm)
phdr.p_flags |= PF_X;
phdr.p_align = ELF_EXEC_PAGESIZE;
- if (!dump_emit(cprm, &phdr, sizeof(phdr))) {
- coredump_report_failure("Error emitting program headers");
+ if (!dump_emit(cprm, &phdr, sizeof(phdr)))
goto end_coredump;
- }
}
- if (!elf_core_write_extra_phdrs(cprm, offset)) {
- coredump_report_failure("Error writing out extra program headers");
+ if (!elf_core_write_extra_phdrs(cprm, offset))
goto end_coredump;
- }
/* write out the notes section */
- if (!write_note_info(&info, cprm)) {
- coredump_report_failure("Error writing out notes");
+ if (!write_note_info(&info, cprm))
goto end_coredump;
- }
/* For cell spufs and x86 xstate */
- if (elf_coredump_extra_notes_write(cprm)) {
- coredump_report_failure("Error writing out extra notes");
+ if (elf_coredump_extra_notes_write(cprm))
goto end_coredump;
- }
/* Align to page */
dump_skip_to(cprm, dataoff);
@@ -2136,22 +2118,16 @@ static int elf_core_dump(struct coredump_params *cprm)
for (i = 0; i < cprm->vma_count; i++) {
struct core_vma_metadata *meta = cprm->vma_meta + i;
- if (!dump_user_range(cprm, meta->start, meta->dump_size)) {
- coredump_report_failure("Error writing out the process memory");
+ if (!dump_user_range(cprm, meta->start, meta->dump_size))
goto end_coredump;
- }
}
- if (!elf_core_write_extra_data(cprm)) {
- coredump_report_failure("Error writing out extra data");
+ if (!elf_core_write_extra_data(cprm))
goto end_coredump;
- }
if (e_phnum == PN_XNUM) {
- if (!dump_emit(cprm, shdr4extnum, sizeof(*shdr4extnum))) {
- coredump_report_failure("Error emitting extra program headers");
+ if (!dump_emit(cprm, shdr4extnum, sizeof(*shdr4extnum)))
goto end_coredump;
- }
}
end_coredump:
diff --git a/fs/bpf_fs_kfuncs.c b/fs/bpf_fs_kfuncs.c
new file mode 100644
index 000000000000..3fe9f59ef867
--- /dev/null
+++ b/fs/bpf_fs_kfuncs.c
@@ -0,0 +1,185 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2024 Google LLC. */
+
+#include <linux/bpf.h>
+#include <linux/btf.h>
+#include <linux/btf_ids.h>
+#include <linux/dcache.h>
+#include <linux/fs.h>
+#include <linux/file.h>
+#include <linux/mm.h>
+#include <linux/xattr.h>
+
+__bpf_kfunc_start_defs();
+
+/**
+ * bpf_get_task_exe_file - get a reference on the exe_file struct file member of
+ * the mm_struct that is nested within the supplied
+ * task_struct
+ * @task: task_struct of which the nested mm_struct exe_file member to get a
+ * reference on
+ *
+ * Get a reference on the exe_file struct file member field of the mm_struct
+ * nested within the supplied *task*. The referenced file pointer acquired by
+ * this BPF kfunc must be released using bpf_put_file(). Failing to call
+ * bpf_put_file() on the returned referenced struct file pointer that has been
+ * acquired by this BPF kfunc will result in the BPF program being rejected by
+ * the BPF verifier.
+ *
+ * This BPF kfunc may only be called from BPF LSM programs.
+ *
+ * Internally, this BPF kfunc leans on get_task_exe_file(), such that calling
+ * bpf_get_task_exe_file() would be analogous to calling get_task_exe_file()
+ * directly in kernel context.
+ *
+ * Return: A referenced struct file pointer to the exe_file member of the
+ * mm_struct that is nested within the supplied *task*. On error, NULL is
+ * returned.
+ */
+__bpf_kfunc struct file *bpf_get_task_exe_file(struct task_struct *task)
+{
+ return get_task_exe_file(task);
+}
+
+/**
+ * bpf_put_file - put a reference on the supplied file
+ * @file: file to put a reference on
+ *
+ * Put a reference on the supplied *file*. Only referenced file pointers may be
+ * passed to this BPF kfunc. Attempting to pass an unreferenced file pointer, or
+ * any other arbitrary pointer for that matter, will result in the BPF program
+ * being rejected by the BPF verifier.
+ *
+ * This BPF kfunc may only be called from BPF LSM programs.
+ */
+__bpf_kfunc void bpf_put_file(struct file *file)
+{
+ fput(file);
+}
+
+/**
+ * bpf_path_d_path - resolve the pathname for the supplied path
+ * @path: path to resolve the pathname for
+ * @buf: buffer to return the resolved pathname in
+ * @buf__sz: length of the supplied buffer
+ *
+ * Resolve the pathname for the supplied *path* and store it in *buf*. This BPF
+ * kfunc is the safer variant of the legacy bpf_d_path() helper and should be
+ * used in place of bpf_d_path() whenever possible. It enforces KF_TRUSTED_ARGS
+ * semantics, meaning that the supplied *path* must itself hold a valid
+ * reference, or else the BPF program will be outright rejected by the BPF
+ * verifier.
+ *
+ * This BPF kfunc may only be called from BPF LSM programs.
+ *
+ * Return: A positive integer corresponding to the length of the resolved
+ * pathname in *buf*, including the NUL termination character. On error, a
+ * negative integer is returned.
+ */
+__bpf_kfunc int bpf_path_d_path(struct path *path, char *buf, size_t buf__sz)
+{
+ int len;
+ char *ret;
+
+ if (!buf__sz)
+ return -EINVAL;
+
+ ret = d_path(path, buf, buf__sz);
+ if (IS_ERR(ret))
+ return PTR_ERR(ret);
+
+ len = buf + buf__sz - ret;
+ memmove(buf, ret, len);
+ return len;
+}
+
+/**
+ * bpf_get_dentry_xattr - get xattr of a dentry
+ * @dentry: dentry to get xattr from
+ * @name__str: name of the xattr
+ * @value_p: output buffer of the xattr value
+ *
+ * Get xattr *name__str* of *dentry* and store the output in *value_ptr*.
+ *
+ * For security reasons, only *name__str* with prefix "user." is allowed.
+ *
+ * Return: 0 on success, a negative value on error.
+ */
+__bpf_kfunc int bpf_get_dentry_xattr(struct dentry *dentry, const char *name__str,
+ struct bpf_dynptr *value_p)
+{
+ struct bpf_dynptr_kern *value_ptr = (struct bpf_dynptr_kern *)value_p;
+ struct inode *inode = d_inode(dentry);
+ u32 value_len;
+ void *value;
+ int ret;
+
+ if (WARN_ON(!inode))
+ return -EINVAL;
+
+ if (strncmp(name__str, XATTR_USER_PREFIX, XATTR_USER_PREFIX_LEN))
+ return -EPERM;
+
+ value_len = __bpf_dynptr_size(value_ptr);
+ value = __bpf_dynptr_data_rw(value_ptr, value_len);
+ if (!value)
+ return -EINVAL;
+
+ ret = inode_permission(&nop_mnt_idmap, inode, MAY_READ);
+ if (ret)
+ return ret;
+ return __vfs_getxattr(dentry, inode, name__str, value, value_len);
+}
+
+/**
+ * bpf_get_file_xattr - get xattr of a file
+ * @file: file to get xattr from
+ * @name__str: name of the xattr
+ * @value_p: output buffer of the xattr value
+ *
+ * Get xattr *name__str* of *file* and store the output in *value_ptr*.
+ *
+ * For security reasons, only *name__str* with prefix "user." is allowed.
+ *
+ * Return: 0 on success, a negative value on error.
+ */
+__bpf_kfunc int bpf_get_file_xattr(struct file *file, const char *name__str,
+ struct bpf_dynptr *value_p)
+{
+ struct dentry *dentry;
+
+ dentry = file_dentry(file);
+ return bpf_get_dentry_xattr(dentry, name__str, value_p);
+}
+
+__bpf_kfunc_end_defs();
+
+BTF_KFUNCS_START(bpf_fs_kfunc_set_ids)
+BTF_ID_FLAGS(func, bpf_get_task_exe_file,
+ KF_ACQUIRE | KF_TRUSTED_ARGS | KF_RET_NULL)
+BTF_ID_FLAGS(func, bpf_put_file, KF_RELEASE)
+BTF_ID_FLAGS(func, bpf_path_d_path, KF_TRUSTED_ARGS)
+BTF_ID_FLAGS(func, bpf_get_dentry_xattr, KF_SLEEPABLE | KF_TRUSTED_ARGS)
+BTF_ID_FLAGS(func, bpf_get_file_xattr, KF_SLEEPABLE | KF_TRUSTED_ARGS)
+BTF_KFUNCS_END(bpf_fs_kfunc_set_ids)
+
+static int bpf_fs_kfuncs_filter(const struct bpf_prog *prog, u32 kfunc_id)
+{
+ if (!btf_id_set8_contains(&bpf_fs_kfunc_set_ids, kfunc_id) ||
+ prog->type == BPF_PROG_TYPE_LSM)
+ return 0;
+ return -EACCES;
+}
+
+static const struct btf_kfunc_id_set bpf_fs_kfunc_set = {
+ .owner = THIS_MODULE,
+ .set = &bpf_fs_kfunc_set_ids,
+ .filter = bpf_fs_kfuncs_filter,
+};
+
+static int __init bpf_fs_kfuncs_init(void)
+{
+ return register_btf_kfunc_id_set(BPF_PROG_TYPE_LSM, &bpf_fs_kfunc_set);
+}
+
+late_initcall(bpf_fs_kfuncs_init);
diff --git a/fs/btrfs/btrfs_inode.h b/fs/btrfs/btrfs_inode.h
index 9a4b7c119318..e152fde888fc 100644
--- a/fs/btrfs/btrfs_inode.h
+++ b/fs/btrfs/btrfs_inode.h
@@ -152,6 +152,7 @@ struct btrfs_inode {
* logged_trans), to access/update delalloc_bytes, new_delalloc_bytes,
* defrag_bytes, disk_i_size, outstanding_extents, csum_bytes and to
* update the VFS' inode number of bytes used.
+ * Also protects setting struct file::private_data.
*/
spinlock_t lock;
diff --git a/fs/btrfs/ctree.h b/fs/btrfs/ctree.h
index 1a44fb9845e3..317a3712270f 100644
--- a/fs/btrfs/ctree.h
+++ b/fs/btrfs/ctree.h
@@ -463,6 +463,8 @@ struct btrfs_file_private {
void *filldir_buf;
u64 last_index;
struct extent_state *llseek_cached_state;
+ /* Task that allocated this structure. */
+ struct task_struct *owner_task;
};
static inline u32 BTRFS_LEAF_DATA_SIZE(const struct btrfs_fs_info *info)
diff --git a/fs/btrfs/defrag.c b/fs/btrfs/defrag.c
index acf1f39e45d0..b95ef44c326b 100644
--- a/fs/btrfs/defrag.c
+++ b/fs/btrfs/defrag.c
@@ -213,6 +213,8 @@ void btrfs_cleanup_defrag_inodes(struct btrfs_fs_info *fs_info)
&fs_info->defrag_inodes, rb_node)
kmem_cache_free(btrfs_inode_defrag_cachep, defrag);
+ fs_info->defrag_inodes = RB_ROOT;
+
spin_unlock(&fs_info->defrag_inodes_lock);
}
diff --git a/fs/btrfs/file.c b/fs/btrfs/file.c
index c5e36f58eb07..4fb521d91b06 100644
--- a/fs/btrfs/file.c
+++ b/fs/btrfs/file.c
@@ -3485,7 +3485,7 @@ static bool find_desired_extent_in_hole(struct btrfs_inode *inode, int whence,
static loff_t find_desired_extent(struct file *file, loff_t offset, int whence)
{
struct btrfs_inode *inode = BTRFS_I(file->f_mapping->host);
- struct btrfs_file_private *private = file->private_data;
+ struct btrfs_file_private *private;
struct btrfs_fs_info *fs_info = inode->root->fs_info;
struct extent_state *cached_state = NULL;
struct extent_state **delalloc_cached_state;
@@ -3513,7 +3513,19 @@ static loff_t find_desired_extent(struct file *file, loff_t offset, int whence)
inode_get_bytes(&inode->vfs_inode) == i_size)
return i_size;
- if (!private) {
+ spin_lock(&inode->lock);
+ private = file->private_data;
+ spin_unlock(&inode->lock);
+
+ if (private && private->owner_task != current) {
+ /*
+ * Not allocated by us, don't use it as its cached state is used
+ * by the task that allocated it and we don't want neither to
+ * mess with it nor get incorrect results because it reflects an
+ * invalid state for the current task.
+ */
+ private = NULL;
+ } else if (!private) {
private = kzalloc(sizeof(*private), GFP_KERNEL);
/*
* No worries if memory allocation failed.
@@ -3521,7 +3533,23 @@ static loff_t find_desired_extent(struct file *file, loff_t offset, int whence)
* lseek SEEK_HOLE/DATA calls to a file when there's delalloc,
* so everything will still be correct.
*/
- file->private_data = private;
+ if (private) {
+ bool free = false;
+
+ private->owner_task = current;
+
+ spin_lock(&inode->lock);
+ if (file->private_data)
+ free = true;
+ else
+ file->private_data = private;
+ spin_unlock(&inode->lock);
+
+ if (free) {
+ kfree(private);
+ private = NULL;
+ }
+ }
}
if (private)
diff --git a/fs/btrfs/ioctl.c b/fs/btrfs/ioctl.c
index 8537eb9b5531..226c91fe31a7 100644
--- a/fs/btrfs/ioctl.c
+++ b/fs/btrfs/ioctl.c
@@ -1310,12 +1310,12 @@ static noinline int __btrfs_ioctl_snap_create(struct file *file,
} else {
struct fd src = fdget(fd);
struct inode *src_inode;
- if (!src.file) {
+ if (!fd_file(src)) {
ret = -EINVAL;
goto out_drop_write;
}
- src_inode = file_inode(src.file);
+ src_inode = file_inode(fd_file(src));
if (src_inode->i_sb != file_inode(file)->i_sb) {
btrfs_info(BTRFS_I(file_inode(file))->root->fs_info,
"Snapshot src from another FS");
diff --git a/fs/btrfs/tree-checker.c b/fs/btrfs/tree-checker.c
index 634d69964fe4..7b50263723bc 100644
--- a/fs/btrfs/tree-checker.c
+++ b/fs/btrfs/tree-checker.c
@@ -1517,7 +1517,7 @@ static int check_extent_item(struct extent_buffer *leaf,
dref_objectid > BTRFS_LAST_FREE_OBJECTID)) {
extent_err(leaf, slot,
"invalid data ref objectid value %llu",
- dref_root);
+ dref_objectid);
return -EUCLEAN;
}
if (unlikely(!IS_ALIGNED(dref_offset,
diff --git a/fs/cachefiles/namei.c b/fs/cachefiles/namei.c
index f53977169db4..2b3f9935dbb4 100644
--- a/fs/cachefiles/namei.c
+++ b/fs/cachefiles/namei.c
@@ -595,14 +595,12 @@ static bool cachefiles_open_file(struct cachefiles_object *object,
* write and readdir but not lookup or open).
*/
touch_atime(&file->f_path);
- dput(dentry);
return true;
check_failed:
fscache_cookie_lookup_negative(object->cookie);
cachefiles_unmark_inode_in_use(object, file);
fput(file);
- dput(dentry);
if (ret == -ESTALE)
return cachefiles_create_file(object);
return false;
@@ -611,7 +609,6 @@ error_fput:
fput(file);
error:
cachefiles_do_unmark_inode_in_use(object, d_inode(dentry));
- dput(dentry);
return false;
}
@@ -654,7 +651,9 @@ bool cachefiles_look_up_object(struct cachefiles_object *object)
goto new_file;
}
- if (!cachefiles_open_file(object, dentry))
+ ret = cachefiles_open_file(object, dentry);
+ dput(dentry);
+ if (!ret)
return false;
_leave(" = t [%lu]", file_inode(object->file)->i_ino);
diff --git a/fs/ceph/addr.c b/fs/ceph/addr.c
index 5d9ccda098cc..53fef258c2bc 100644
--- a/fs/ceph/addr.c
+++ b/fs/ceph/addr.c
@@ -96,7 +96,6 @@ static bool ceph_dirty_folio(struct address_space *mapping, struct folio *folio)
/* dirty the head */
spin_lock(&ci->i_ceph_lock);
- BUG_ON(ci->i_wr_ref == 0); // caller should hold Fw reference
if (__ceph_have_pending_cap_snap(ci)) {
struct ceph_cap_snap *capsnap =
list_last_entry(&ci->i_cap_snaps,
diff --git a/fs/ceph/caps.c b/fs/ceph/caps.c
index 808c9c048276..bed34fc11c91 100644
--- a/fs/ceph/caps.c
+++ b/fs/ceph/caps.c
@@ -10,6 +10,7 @@
#include <linux/writeback.h>
#include <linux/iversion.h>
#include <linux/filelock.h>
+#include <linux/jiffies.h>
#include "super.h"
#include "mds_client.h"
@@ -4149,7 +4150,7 @@ retry:
ceph_remove_cap(mdsc, cap, false);
goto out_unlock;
} else if (tsession) {
- /* add placeholder for the export tagert */
+ /* add placeholder for the export target */
int flag = (cap == ci->i_auth_cap) ? CEPH_CAP_FLAG_AUTH : 0;
tcap = new_cap;
ceph_add_cap(inode, tsession, t_cap_id, issued, 0,
@@ -4602,7 +4603,7 @@ flush_cap_releases:
__ceph_queue_cap_release(session, cap);
spin_unlock(&session->s_cap_lock);
}
- ceph_flush_cap_releases(mdsc, session);
+ ceph_flush_session_cap_releases(mdsc, session);
goto done;
bad:
@@ -4659,7 +4660,7 @@ unsigned long ceph_check_delayed_caps(struct ceph_mds_client *mdsc)
* slowness doesn't block mdsc delayed work,
* preventing send_renew_caps() from running.
*/
- if (jiffies - loop_start >= 5 * HZ)
+ if (time_after_eq(jiffies, loop_start + 5 * HZ))
break;
}
spin_unlock(&mdsc->cap_delay_lock);
@@ -4701,6 +4702,28 @@ void ceph_flush_dirty_caps(struct ceph_mds_client *mdsc)
ceph_mdsc_iterate_sessions(mdsc, flush_dirty_session_caps, true);
}
+/*
+ * Flush all cap releases to the mds
+ */
+static void flush_cap_releases(struct ceph_mds_session *s)
+{
+ struct ceph_mds_client *mdsc = s->s_mdsc;
+ struct ceph_client *cl = mdsc->fsc->client;
+
+ doutc(cl, "begin\n");
+ spin_lock(&s->s_cap_lock);
+ if (s->s_num_cap_releases)
+ ceph_flush_session_cap_releases(mdsc, s);
+ spin_unlock(&s->s_cap_lock);
+ doutc(cl, "done\n");
+
+}
+
+void ceph_flush_cap_releases(struct ceph_mds_client *mdsc)
+{
+ ceph_mdsc_iterate_sessions(mdsc, flush_cap_releases, true);
+}
+
void __ceph_touch_fmode(struct ceph_inode_info *ci,
struct ceph_mds_client *mdsc, int fmode)
{
diff --git a/fs/ceph/dir.c b/fs/ceph/dir.c
index ddec8c9244ee..952109292d69 100644
--- a/fs/ceph/dir.c
+++ b/fs/ceph/dir.c
@@ -2058,7 +2058,7 @@ static int ceph_d_delete(const struct dentry *dentry)
return 0;
if (ceph_snap(d_inode(dentry)) != CEPH_NOSNAP)
return 0;
- /* vaild lease? */
+ /* valid lease? */
di = ceph_dentry(dentry);
if (di) {
if (__dentry_lease_is_valid(di))
diff --git a/fs/ceph/inode.c b/fs/ceph/inode.c
index 4a8eec46254b..315ef02f9a3f 100644
--- a/fs/ceph/inode.c
+++ b/fs/ceph/inode.c
@@ -1779,7 +1779,7 @@ retry_lookup:
if (err < 0)
goto done;
} else if (rinfo->head->is_dentry && req->r_dentry) {
- /* parent inode is not locked, be carefull */
+ /* parent inode is not locked, be careful */
struct ceph_vino *ptvino = NULL;
dvino.ino = le64_to_cpu(rinfo->diri.in->ino);
dvino.snap = le64_to_cpu(rinfo->diri.in->snapid);
diff --git a/fs/ceph/mds_client.c b/fs/ceph/mds_client.c
index 276e34ab3e2c..c4a5fd94bbbb 100644
--- a/fs/ceph/mds_client.c
+++ b/fs/ceph/mds_client.c
@@ -2266,7 +2266,7 @@ int ceph_trim_caps(struct ceph_mds_client *mdsc,
trim_caps - remaining);
}
- ceph_flush_cap_releases(mdsc, session);
+ ceph_flush_session_cap_releases(mdsc, session);
return 0;
}
@@ -2420,7 +2420,7 @@ static void ceph_cap_release_work(struct work_struct *work)
ceph_put_mds_session(session);
}
-void ceph_flush_cap_releases(struct ceph_mds_client *mdsc,
+void ceph_flush_session_cap_releases(struct ceph_mds_client *mdsc,
struct ceph_mds_session *session)
{
struct ceph_client *cl = mdsc->fsc->client;
@@ -2447,7 +2447,7 @@ void __ceph_queue_cap_release(struct ceph_mds_session *session,
session->s_num_cap_releases++;
if (!(session->s_num_cap_releases % CEPH_CAPS_PER_RELEASE))
- ceph_flush_cap_releases(session->s_mdsc, session);
+ ceph_flush_session_cap_releases(session->s_mdsc, session);
}
static void ceph_cap_reclaim_work(struct work_struct *work)
@@ -4340,7 +4340,7 @@ skip_cap_auths:
/* flush cap releases */
spin_lock(&session->s_cap_lock);
if (session->s_num_cap_releases)
- ceph_flush_cap_releases(mdsc, session);
+ ceph_flush_session_cap_releases(mdsc, session);
spin_unlock(&session->s_cap_lock);
send_flushmsg_ack(mdsc, session, seq);
@@ -4910,7 +4910,7 @@ static void send_mds_reconnect(struct ceph_mds_client *mdsc,
} else {
recon_state.msg_version = 2;
}
- /* trsaverse this session's caps */
+ /* traverse this session's caps */
err = ceph_iterate_session_caps(session, reconnect_caps_cb, &recon_state);
spin_lock(&session->s_cap_lock);
@@ -5446,7 +5446,7 @@ static void delayed_work(struct work_struct *work)
}
mutex_unlock(&mdsc->mutex);
- ceph_flush_cap_releases(mdsc, s);
+ ceph_flush_session_cap_releases(mdsc, s);
mutex_lock(&s->s_mutex);
if (renew_caps)
@@ -5877,6 +5877,7 @@ void ceph_mdsc_sync(struct ceph_mds_client *mdsc)
mutex_unlock(&mdsc->mutex);
ceph_flush_dirty_caps(mdsc);
+ ceph_flush_cap_releases(mdsc);
spin_lock(&mdsc->cap_dirty_lock);
want_flush = mdsc->last_cap_flush_tid;
if (!list_empty(&mdsc->cap_flush_list)) {
@@ -6015,6 +6016,18 @@ static void ceph_mdsc_stop(struct ceph_mds_client *mdsc)
ceph_mdsmap_destroy(mdsc->mdsmap);
kfree(mdsc->sessions);
ceph_caps_finalize(mdsc);
+
+ if (mdsc->s_cap_auths) {
+ int i;
+
+ for (i = 0; i < mdsc->s_cap_auths_num; i++) {
+ kfree(mdsc->s_cap_auths[i].match.gids);
+ kfree(mdsc->s_cap_auths[i].match.path);
+ kfree(mdsc->s_cap_auths[i].match.fs_name);
+ }
+ kfree(mdsc->s_cap_auths);
+ }
+
ceph_pool_perm_destroy(mdsc);
}
diff --git a/fs/ceph/mds_client.h b/fs/ceph/mds_client.h
index 9bcc7f181bfe..3dd54587944a 100644
--- a/fs/ceph/mds_client.h
+++ b/fs/ceph/mds_client.h
@@ -559,9 +559,6 @@ extern struct ceph_mds_session *
ceph_get_mds_session(struct ceph_mds_session *s);
extern void ceph_put_mds_session(struct ceph_mds_session *s);
-extern int ceph_send_msg_mds(struct ceph_mds_client *mdsc,
- struct ceph_msg *msg, int mds);
-
extern int ceph_mdsc_init(struct ceph_fs_client *fsc);
extern void ceph_mdsc_close_sessions(struct ceph_mds_client *mdsc);
extern void ceph_mdsc_force_umount(struct ceph_mds_client *mdsc);
@@ -602,8 +599,8 @@ extern void ceph_mdsc_iterate_sessions(struct ceph_mds_client *mdsc,
extern struct ceph_msg *ceph_create_session_msg(u32 op, u64 seq);
extern void __ceph_queue_cap_release(struct ceph_mds_session *session,
struct ceph_cap *cap);
-extern void ceph_flush_cap_releases(struct ceph_mds_client *mdsc,
- struct ceph_mds_session *session);
+extern void ceph_flush_session_cap_releases(struct ceph_mds_client *mdsc,
+ struct ceph_mds_session *session);
extern void ceph_queue_cap_reclaim_work(struct ceph_mds_client *mdsc);
extern void ceph_reclaim_caps_nr(struct ceph_mds_client *mdsc, int nr);
extern void ceph_queue_cap_unlink_work(struct ceph_mds_client *mdsc);
diff --git a/fs/ceph/super.c b/fs/ceph/super.c
index 0cdf84cd1791..73f321b52895 100644
--- a/fs/ceph/super.c
+++ b/fs/ceph/super.c
@@ -126,6 +126,7 @@ static int ceph_sync_fs(struct super_block *sb, int wait)
if (!wait) {
doutc(cl, "(non-blocking)\n");
ceph_flush_dirty_caps(fsc->mdsc);
+ ceph_flush_cap_releases(fsc->mdsc);
doutc(cl, "(non-blocking) done\n");
return 0;
}
diff --git a/fs/ceph/super.h b/fs/ceph/super.h
index 6e817bf1337c..2508aa8950b7 100644
--- a/fs/ceph/super.h
+++ b/fs/ceph/super.h
@@ -1056,8 +1056,6 @@ extern int ceph_fill_trace(struct super_block *sb,
extern int ceph_readdir_prepopulate(struct ceph_mds_request *req,
struct ceph_mds_session *session);
-extern int ceph_inode_holds_cap(struct inode *inode, int mask);
-
extern bool ceph_inode_set_size(struct inode *inode, loff_t size);
extern void __ceph_do_pending_vmtruncate(struct inode *inode);
@@ -1208,10 +1206,6 @@ static inline void ceph_init_inode_acls(struct inode *inode,
struct ceph_acl_sec_ctx *as_ctx)
{
}
-static inline int ceph_acl_chmod(struct dentry *dentry, struct inode *inode)
-{
- return 0;
-}
static inline void ceph_forget_all_cached_acls(struct inode *inode)
{
@@ -1270,6 +1264,7 @@ extern bool __ceph_should_report_size(struct ceph_inode_info *ci);
extern void ceph_check_caps(struct ceph_inode_info *ci, int flags);
extern unsigned long ceph_check_delayed_caps(struct ceph_mds_client *mdsc);
extern void ceph_flush_dirty_caps(struct ceph_mds_client *mdsc);
+extern void ceph_flush_cap_releases(struct ceph_mds_client *mdsc);
extern int ceph_drop_caps_for_unlink(struct inode *inode);
extern int ceph_encode_inode_release(void **p, struct inode *inode,
int mds, int drop, int unless, int force);
diff --git a/fs/coredump.c b/fs/coredump.c
index 53a78b6bbb5b..45737b43dda5 100644
--- a/fs/coredump.c
+++ b/fs/coredump.c
@@ -465,17 +465,7 @@ static bool dump_interrupted(void)
* but then we need to teach dump_write() to restart and clear
* TIF_SIGPENDING.
*/
- if (fatal_signal_pending(current)) {
- coredump_report_failure("interrupted: fatal signal pending");
- return true;
- }
-
- if (freezing(current)) {
- coredump_report_failure("interrupted: freezing");
- return true;
- }
-
- return false;
+ return fatal_signal_pending(current) || freezing(current);
}
static void wait_for_dump_helpers(struct file *file)
@@ -530,7 +520,7 @@ static int umh_pipe_setup(struct subprocess_info *info, struct cred *new)
return err;
}
-int do_coredump(const kernel_siginfo_t *siginfo)
+void do_coredump(const kernel_siginfo_t *siginfo)
{
struct core_state core_state;
struct core_name cn;
@@ -538,7 +528,7 @@ int do_coredump(const kernel_siginfo_t *siginfo)
struct linux_binfmt * binfmt;
const struct cred *old_cred;
struct cred *cred;
- int retval;
+ int retval = 0;
int ispipe;
size_t *argv = NULL;
int argc = 0;
@@ -562,20 +552,14 @@ int do_coredump(const kernel_siginfo_t *siginfo)
audit_core_dumps(siginfo->si_signo);
binfmt = mm->binfmt;
- if (!binfmt || !binfmt->core_dump) {
- retval = -ENOEXEC;
+ if (!binfmt || !binfmt->core_dump)
goto fail;
- }
- if (!__get_dumpable(cprm.mm_flags)) {
- retval = -EACCES;
+ if (!__get_dumpable(cprm.mm_flags))
goto fail;
- }
cred = prepare_creds();
- if (!cred) {
- retval = -EPERM;
+ if (!cred)
goto fail;
- }
/*
* We cannot trust fsuid as being the "true" uid of the process
* nor do we know its entire history. We only know it was tainted
@@ -604,7 +588,6 @@ int do_coredump(const kernel_siginfo_t *siginfo)
if (ispipe < 0) {
coredump_report_failure("format_corename failed, aborting core");
- retval = ispipe;
goto fail_unlock;
}
@@ -625,7 +608,6 @@ int do_coredump(const kernel_siginfo_t *siginfo)
* core_pattern process dies.
*/
coredump_report_failure("RLIMIT_CORE is set to 1, aborting core");
- retval = -EPERM;
goto fail_unlock;
}
cprm.limit = RLIM_INFINITY;
@@ -633,7 +615,6 @@ int do_coredump(const kernel_siginfo_t *siginfo)
dump_count = atomic_inc_return(&core_dump_count);
if (core_pipe_limit && (core_pipe_limit < dump_count)) {
coredump_report_failure("over core_pipe_limit, skipping core dump");
- retval = -E2BIG;
goto fail_dropcount;
}
@@ -641,7 +622,6 @@ int do_coredump(const kernel_siginfo_t *siginfo)
GFP_KERNEL);
if (!helper_argv) {
coredump_report_failure("%s failed to allocate memory", __func__);
- retval = -ENOMEM;
goto fail_dropcount;
}
for (argi = 0; argi < argc; argi++)
@@ -667,16 +647,12 @@ int do_coredump(const kernel_siginfo_t *siginfo)
int open_flags = O_CREAT | O_WRONLY | O_NOFOLLOW |
O_LARGEFILE | O_EXCL;
- if (cprm.limit < binfmt->min_coredump) {
- coredump_report_failure("over coredump resource limit, skipping core dump");
- retval = -E2BIG;
+ if (cprm.limit < binfmt->min_coredump)
goto fail_unlock;
- }
if (need_suid_safe && cn.corename[0] != '/') {
coredump_report_failure(
"this process can only dump core to a fully qualified path, skipping core dump");
- retval = -EPERM;
goto fail_unlock;
}
@@ -722,28 +698,20 @@ int do_coredump(const kernel_siginfo_t *siginfo)
} else {
cprm.file = filp_open(cn.corename, open_flags, 0600);
}
- if (IS_ERR(cprm.file)) {
- retval = PTR_ERR(cprm.file);
+ if (IS_ERR(cprm.file))
goto fail_unlock;
- }
inode = file_inode(cprm.file);
- if (inode->i_nlink > 1) {
- retval = -EMLINK;
+ if (inode->i_nlink > 1)
goto close_fail;
- }
- if (d_unhashed(cprm.file->f_path.dentry)) {
- retval = -EEXIST;
+ if (d_unhashed(cprm.file->f_path.dentry))
goto close_fail;
- }
/*
* AK: actually i see no reason to not allow this for named
* pipes etc, but keep the previous behaviour for now.
*/
- if (!S_ISREG(inode->i_mode)) {
- retval = -EISDIR;
+ if (!S_ISREG(inode->i_mode))
goto close_fail;
- }
/*
* Don't dump core if the filesystem changed owner or mode
* of the file during file creation. This is an issue when
@@ -755,22 +723,17 @@ int do_coredump(const kernel_siginfo_t *siginfo)
current_fsuid())) {
coredump_report_failure("Core dump to %s aborted: "
"cannot preserve file owner", cn.corename);
- retval = -EPERM;
goto close_fail;
}
if ((inode->i_mode & 0677) != 0600) {
coredump_report_failure("Core dump to %s aborted: "
"cannot preserve file permissions", cn.corename);
- retval = -EPERM;
goto close_fail;
}
- if (!(cprm.file->f_mode & FMODE_CAN_WRITE)) {
- retval = -EACCES;
+ if (!(cprm.file->f_mode & FMODE_CAN_WRITE))
goto close_fail;
- }
- retval = do_truncate(idmap, cprm.file->f_path.dentry,
- 0, 0, cprm.file);
- if (retval)
+ if (do_truncate(idmap, cprm.file->f_path.dentry,
+ 0, 0, cprm.file))
goto close_fail;
}
@@ -786,15 +749,10 @@ int do_coredump(const kernel_siginfo_t *siginfo)
*/
if (!cprm.file) {
coredump_report_failure("Core dump to |%s disabled", cn.corename);
- retval = -EPERM;
goto close_fail;
}
- if (!dump_vma_snapshot(&cprm)) {
- coredump_report_failure("Can't get VMA snapshot for core dump |%s",
- cn.corename);
- retval = -EACCES;
+ if (!dump_vma_snapshot(&cprm))
goto close_fail;
- }
file_start_write(cprm.file);
core_dumped = binfmt->core_dump(&cprm);
@@ -810,21 +768,9 @@ int do_coredump(const kernel_siginfo_t *siginfo)
}
file_end_write(cprm.file);
free_vma_snapshot(&cprm);
- } else {
- coredump_report_failure("Core dump to %s%s has been interrupted",
- ispipe ? "|" : "", cn.corename);
- retval = -EAGAIN;
- goto fail;
}
- coredump_report(
- "written to %s%s: VMAs: %d, size %zu; core: %lld bytes, pos %lld",
- ispipe ? "|" : "", cn.corename,
- cprm.vma_count, cprm.vma_data_size, cprm.written, cprm.pos);
if (ispipe && core_pipe_limit)
wait_for_dump_helpers(cprm.file);
-
- retval = 0;
-
close_fail:
if (cprm.file)
filp_close(cprm.file, NULL);
@@ -839,7 +785,7 @@ fail_unlock:
fail_creds:
put_cred(cred);
fail:
- return retval;
+ return;
}
/*
@@ -859,16 +805,8 @@ static int __dump_emit(struct coredump_params *cprm, const void *addr, int nr)
if (dump_interrupted())
return 0;
n = __kernel_write(file, addr, nr, &pos);
- if (n != nr) {
- if (n < 0)
- coredump_report_failure("failed when writing out, error %zd", n);
- else
- coredump_report_failure(
- "partially written out, only %zd(of %d) bytes written",
- n, nr);
-
+ if (n != nr)
return 0;
- }
file->f_pos = pos;
cprm->written += n;
cprm->pos += n;
@@ -881,16 +819,9 @@ static int __dump_skip(struct coredump_params *cprm, size_t nr)
static char zeroes[PAGE_SIZE];
struct file *file = cprm->file;
if (file->f_mode & FMODE_LSEEK) {
- int ret;
-
- if (dump_interrupted())
+ if (dump_interrupted() ||
+ vfs_llseek(file, nr, SEEK_CUR) < 0)
return 0;
-
- ret = vfs_llseek(file, nr, SEEK_CUR);
- if (ret < 0) {
- coredump_report_failure("failed when seeking, error %d", ret);
- return 0;
- }
cprm->pos += nr;
return 1;
} else {
diff --git a/fs/debugfs/file.c b/fs/debugfs/file.c
index c6f4a9a98b85..67299e8b734e 100644
--- a/fs/debugfs/file.c
+++ b/fs/debugfs/file.c
@@ -1218,7 +1218,6 @@ static const struct file_operations u32_array_fops = {
.open = u32_array_open,
.release = u32_array_release,
.read = u32_array_read,
- .llseek = no_llseek,
};
/**
diff --git a/fs/dlm/debug_fs.c b/fs/dlm/debug_fs.c
index 7112958c2e5b..700a0cbb2f14 100644
--- a/fs/dlm/debug_fs.c
+++ b/fs/dlm/debug_fs.c
@@ -733,7 +733,6 @@ out:
static const struct file_operations dlm_rawmsg_fops = {
.open = simple_open,
.write = dlm_rawmsg_write,
- .llseek = no_llseek,
};
void *dlm_create_debug_comms_file(int nodeid, void *data)
diff --git a/fs/efivarfs/file.c b/fs/efivarfs/file.c
index 7e9961639802..23c51d62f902 100644
--- a/fs/efivarfs/file.c
+++ b/fs/efivarfs/file.c
@@ -110,5 +110,4 @@ const struct file_operations efivarfs_file_operations = {
.open = simple_open,
.read = efivarfs_file_read,
.write = efivarfs_file_write,
- .llseek = no_llseek,
};
diff --git a/fs/eventfd.c b/fs/eventfd.c
index 9afdb722fa92..22c934f3a080 100644
--- a/fs/eventfd.c
+++ b/fs/eventfd.c
@@ -349,9 +349,9 @@ struct eventfd_ctx *eventfd_ctx_fdget(int fd)
{
struct eventfd_ctx *ctx;
struct fd f = fdget(fd);
- if (!f.file)
+ if (!fd_file(f))
return ERR_PTR(-EBADF);
- ctx = eventfd_ctx_fileget(f.file);
+ ctx = eventfd_ctx_fileget(fd_file(f));
fdput(f);
return ctx;
}
diff --git a/fs/eventpoll.c b/fs/eventpoll.c
index 145f5349c612..1ae4542f0bd8 100644
--- a/fs/eventpoll.c
+++ b/fs/eventpoll.c
@@ -2261,17 +2261,17 @@ int do_epoll_ctl(int epfd, int op, int fd, struct epoll_event *epds,
error = -EBADF;
f = fdget(epfd);
- if (!f.file)
+ if (!fd_file(f))
goto error_return;
/* Get the "struct file *" for the target file */
tf = fdget(fd);
- if (!tf.file)
+ if (!fd_file(tf))
goto error_fput;
/* The target file descriptor must support poll */
error = -EPERM;
- if (!file_can_poll(tf.file))
+ if (!file_can_poll(fd_file(tf)))
goto error_tgt_fput;
/* Check if EPOLLWAKEUP is allowed */
@@ -2284,7 +2284,7 @@ int do_epoll_ctl(int epfd, int op, int fd, struct epoll_event *epds,
* adding an epoll file descriptor inside itself.
*/
error = -EINVAL;
- if (f.file == tf.file || !is_file_epoll(f.file))
+ if (fd_file(f) == fd_file(tf) || !is_file_epoll(fd_file(f)))
goto error_tgt_fput;
/*
@@ -2295,7 +2295,7 @@ int do_epoll_ctl(int epfd, int op, int fd, struct epoll_event *epds,
if (ep_op_has_event(op) && (epds->events & EPOLLEXCLUSIVE)) {
if (op == EPOLL_CTL_MOD)
goto error_tgt_fput;
- if (op == EPOLL_CTL_ADD && (is_file_epoll(tf.file) ||
+ if (op == EPOLL_CTL_ADD && (is_file_epoll(fd_file(tf)) ||
(epds->events & ~EPOLLEXCLUSIVE_OK_BITS)))
goto error_tgt_fput;
}
@@ -2304,7 +2304,7 @@ int do_epoll_ctl(int epfd, int op, int fd, struct epoll_event *epds,
* At this point it is safe to assume that the "private_data" contains
* our own data structure.
*/
- ep = f.file->private_data;
+ ep = fd_file(f)->private_data;
/*
* When we insert an epoll file descriptor inside another epoll file
@@ -2325,16 +2325,16 @@ int do_epoll_ctl(int epfd, int op, int fd, struct epoll_event *epds,
if (error)
goto error_tgt_fput;
if (op == EPOLL_CTL_ADD) {
- if (READ_ONCE(f.file->f_ep) || ep->gen == loop_check_gen ||
- is_file_epoll(tf.file)) {
+ if (READ_ONCE(fd_file(f)->f_ep) || ep->gen == loop_check_gen ||
+ is_file_epoll(fd_file(tf))) {
mutex_unlock(&ep->mtx);
error = epoll_mutex_lock(&epnested_mutex, 0, nonblock);
if (error)
goto error_tgt_fput;
loop_check_gen++;
full_check = 1;
- if (is_file_epoll(tf.file)) {
- tep = tf.file->private_data;
+ if (is_file_epoll(fd_file(tf))) {
+ tep = fd_file(tf)->private_data;
error = -ELOOP;
if (ep_loop_check(ep, tep) != 0)
goto error_tgt_fput;
@@ -2350,14 +2350,14 @@ int do_epoll_ctl(int epfd, int op, int fd, struct epoll_event *epds,
* above, we can be sure to be able to use the item looked up by
* ep_find() till we release the mutex.
*/
- epi = ep_find(ep, tf.file, fd);
+ epi = ep_find(ep, fd_file(tf), fd);
error = -EINVAL;
switch (op) {
case EPOLL_CTL_ADD:
if (!epi) {
epds->events |= EPOLLERR | EPOLLHUP;
- error = ep_insert(ep, epds, tf.file, fd, full_check);
+ error = ep_insert(ep, epds, fd_file(tf), fd, full_check);
} else
error = -EEXIST;
break;
@@ -2438,7 +2438,7 @@ static int do_epoll_wait(int epfd, struct epoll_event __user *events,
/* Get the "struct file *" for the eventpoll file */
f = fdget(epfd);
- if (!f.file)
+ if (!fd_file(f))
return -EBADF;
/*
@@ -2446,14 +2446,14 @@ static int do_epoll_wait(int epfd, struct epoll_event __user *events,
* the user passed to us _is_ an eventpoll file.
*/
error = -EINVAL;
- if (!is_file_epoll(f.file))
+ if (!is_file_epoll(fd_file(f)))
goto error_fput;
/*
* At this point it is safe to assume that the "private_data" contains
* our own data structure.
*/
- ep = f.file->private_data;
+ ep = fd_file(f)->private_data;
/* Time to fish for events ... */
error = ep_poll(ep, events, maxevents, to);
diff --git a/fs/exec.c b/fs/exec.c
index dad402d55681..6c53920795c2 100644
--- a/fs/exec.c
+++ b/fs/exec.c
@@ -710,80 +710,6 @@ static int copy_strings_kernel(int argc, const char *const *argv,
#ifdef CONFIG_MMU
/*
- * During bprm_mm_init(), we create a temporary stack at STACK_TOP_MAX. Once
- * the binfmt code determines where the new stack should reside, we shift it to
- * its final location. The process proceeds as follows:
- *
- * 1) Use shift to calculate the new vma endpoints.
- * 2) Extend vma to cover both the old and new ranges. This ensures the
- * arguments passed to subsequent functions are consistent.
- * 3) Move vma's page tables to the new range.
- * 4) Free up any cleared pgd range.
- * 5) Shrink the vma to cover only the new range.
- */
-static int shift_arg_pages(struct vm_area_struct *vma, unsigned long shift)
-{
- struct mm_struct *mm = vma->vm_mm;
- unsigned long old_start = vma->vm_start;
- unsigned long old_end = vma->vm_end;
- unsigned long length = old_end - old_start;
- unsigned long new_start = old_start - shift;
- unsigned long new_end = old_end - shift;
- VMA_ITERATOR(vmi, mm, new_start);
- struct vm_area_struct *next;
- struct mmu_gather tlb;
-
- BUG_ON(new_start > new_end);
-
- /*
- * ensure there are no vmas between where we want to go
- * and where we are
- */
- if (vma != vma_next(&vmi))
- return -EFAULT;
-
- vma_iter_prev_range(&vmi);
- /*
- * cover the whole range: [new_start, old_end)
- */
- if (vma_expand(&vmi, vma, new_start, old_end, vma->vm_pgoff, NULL))
- return -ENOMEM;
-
- /*
- * move the page tables downwards, on failure we rely on
- * process cleanup to remove whatever mess we made.
- */
- if (length != move_page_tables(vma, old_start,
- vma, new_start, length, false, true))
- return -ENOMEM;
-
- lru_add_drain();
- tlb_gather_mmu(&tlb, mm);
- next = vma_next(&vmi);
- if (new_end > old_start) {
- /*
- * when the old and new regions overlap clear from new_end.
- */
- free_pgd_range(&tlb, new_end, old_end, new_end,
- next ? next->vm_start : USER_PGTABLES_CEILING);
- } else {
- /*
- * otherwise, clean from old_start; this is done to not touch
- * the address space in [new_end, old_start) some architectures
- * have constraints on va-space that make this illegal (IA64) -
- * for the others its just a little faster.
- */
- free_pgd_range(&tlb, old_start, old_end, new_end,
- next ? next->vm_start : USER_PGTABLES_CEILING);
- }
- tlb_finish_mmu(&tlb);
-
- vma_prev(&vmi);
- /* Shrink the vma to just the new range */
- return vma_shrink(&vmi, vma, new_start, new_end, vma->vm_pgoff);
-}
-
-/*
* Finalizes the stack vm_area_struct. The flags and permissions are updated,
* the stack is optionally relocated, and some extra space is added.
*/
@@ -876,7 +802,12 @@ int setup_arg_pages(struct linux_binprm *bprm,
/* Move stack pages down in memory. */
if (stack_shift) {
- ret = shift_arg_pages(vma, stack_shift);
+ /*
+ * During bprm_mm_init(), we create a temporary stack at STACK_TOP_MAX. Once
+ * the binfmt code determines where the new stack should reside, we shift it to
+ * its final location.
+ */
+ ret = relocate_vma_down(vma, stack_shift);
if (ret)
goto out_unlock;
}
diff --git a/fs/exfat/balloc.c b/fs/exfat/balloc.c
index 0356c88252bd..ce9be95c9172 100644
--- a/fs/exfat/balloc.c
+++ b/fs/exfat/balloc.c
@@ -91,11 +91,8 @@ int exfat_load_bitmap(struct super_block *sb)
return -EIO;
type = exfat_get_entry_type(ep);
- if (type == TYPE_UNUSED)
- break;
- if (type != TYPE_BITMAP)
- continue;
- if (ep->dentry.bitmap.flags == 0x0) {
+ if (type == TYPE_BITMAP &&
+ ep->dentry.bitmap.flags == 0x0) {
int err;
err = exfat_allocate_bitmap(sb, ep);
@@ -103,6 +100,9 @@ int exfat_load_bitmap(struct super_block *sb)
return err;
}
brelse(bh);
+
+ if (type == TYPE_UNUSED)
+ return -EINVAL;
}
if (exfat_get_next_cluster(sb, &clu.dir))
diff --git a/fs/exfat/exfat_fs.h b/fs/exfat/exfat_fs.h
index ecc5db952deb..3cdc1de362a9 100644
--- a/fs/exfat/exfat_fs.h
+++ b/fs/exfat/exfat_fs.h
@@ -10,6 +10,7 @@
#include <linux/ratelimit.h>
#include <linux/nls.h>
#include <linux/blkdev.h>
+#include <uapi/linux/exfat.h>
#define EXFAT_ROOT_INO 1
@@ -148,6 +149,9 @@ enum {
#define DIR_CACHE_SIZE \
(DIV_ROUND_UP(EXFAT_DEN_TO_B(ES_MAX_ENTRY_NUM), SECTOR_SIZE) + 1)
+/* Superblock flags */
+#define EXFAT_FLAGS_SHUTDOWN 1
+
struct exfat_dentry_namebuf {
char *lfn;
int lfnbuf_len; /* usually MAX_UNINAME_BUF_SIZE */
@@ -267,6 +271,8 @@ struct exfat_sb_info {
unsigned int clu_srch_ptr; /* cluster search pointer */
unsigned int used_clusters; /* number of used clusters */
+ unsigned long s_exfat_flags; /* Exfat superblock flags */
+
struct mutex s_lock; /* superblock lock */
struct mutex bitmap_lock; /* bitmap lock */
struct exfat_mount_options options;
@@ -309,13 +315,6 @@ struct exfat_inode_info {
/* for avoiding the race between alloc and free */
unsigned int cache_valid_id;
- /*
- * NOTE: i_size_ondisk is 64bits, so must hold ->inode_lock to access.
- * physically allocated size.
- */
- loff_t i_size_ondisk;
- /* block-aligned i_size (used in cont_write_begin) */
- loff_t i_size_aligned;
/* on-disk position of directory entry or 0 */
loff_t i_pos;
loff_t valid_size;
@@ -338,6 +337,11 @@ static inline struct exfat_inode_info *EXFAT_I(struct inode *inode)
return container_of(inode, struct exfat_inode_info, vfs_inode);
}
+static inline int exfat_forced_shutdown(struct super_block *sb)
+{
+ return test_bit(EXFAT_FLAGS_SHUTDOWN, &EXFAT_SB(sb)->s_exfat_flags);
+}
+
/*
* If ->i_mode can't hold 0222 (i.e. ATTR_RO), we use ->i_attrs to
* save ATTR_RO instead of ->i_mode.
@@ -417,6 +421,11 @@ static inline bool is_valid_cluster(struct exfat_sb_info *sbi,
return clus >= EXFAT_FIRST_CLUSTER && clus < sbi->num_clusters;
}
+static inline loff_t exfat_ondisk_size(const struct inode *inode)
+{
+ return ((loff_t)inode->i_blocks) << 9;
+}
+
/* super.c */
int exfat_set_volume_dirty(struct super_block *sb);
int exfat_clear_volume_dirty(struct super_block *sb);
@@ -461,6 +470,7 @@ int exfat_file_fsync(struct file *file, loff_t start, loff_t end, int datasync);
long exfat_ioctl(struct file *filp, unsigned int cmd, unsigned long arg);
long exfat_compat_ioctl(struct file *filp, unsigned int cmd,
unsigned long arg);
+int exfat_force_shutdown(struct super_block *sb, u32 flags);
/* namei.c */
extern const struct dentry_operations exfat_dentry_ops;
diff --git a/fs/exfat/file.c b/fs/exfat/file.c
index e19469e88000..a25d7eb789f4 100644
--- a/fs/exfat/file.c
+++ b/fs/exfat/file.c
@@ -29,7 +29,7 @@ static int exfat_cont_expand(struct inode *inode, loff_t size)
if (ret)
return ret;
- num_clusters = EXFAT_B_TO_CLU_ROUND_UP(ei->i_size_ondisk, sbi);
+ num_clusters = EXFAT_B_TO_CLU(exfat_ondisk_size(inode), sbi);
new_num_clusters = EXFAT_B_TO_CLU_ROUND_UP(size, sbi);
if (new_num_clusters == num_clusters)
@@ -74,8 +74,6 @@ out:
/* Expanded range not zeroed, do not update valid_size */
i_size_write(inode, size);
- ei->i_size_aligned = round_up(size, sb->s_blocksize);
- ei->i_size_ondisk = ei->i_size_aligned;
inode->i_blocks = round_up(size, sbi->cluster_size) >> 9;
mark_inode_dirty(inode);
@@ -159,7 +157,7 @@ int __exfat_truncate(struct inode *inode)
exfat_set_volume_dirty(sb);
num_clusters_new = EXFAT_B_TO_CLU_ROUND_UP(i_size_read(inode), sbi);
- num_clusters_phys = EXFAT_B_TO_CLU_ROUND_UP(ei->i_size_ondisk, sbi);
+ num_clusters_phys = EXFAT_B_TO_CLU(exfat_ondisk_size(inode), sbi);
exfat_chain_set(&clu, ei->start_clu, num_clusters_phys, ei->flags);
@@ -245,8 +243,6 @@ void exfat_truncate(struct inode *inode)
struct super_block *sb = inode->i_sb;
struct exfat_sb_info *sbi = EXFAT_SB(sb);
struct exfat_inode_info *ei = EXFAT_I(inode);
- unsigned int blocksize = i_blocksize(inode);
- loff_t aligned_size;
int err;
mutex_lock(&sbi->s_lock);
@@ -264,17 +260,6 @@ void exfat_truncate(struct inode *inode)
inode->i_blocks = round_up(i_size_read(inode), sbi->cluster_size) >> 9;
write_size:
- aligned_size = i_size_read(inode);
- if (aligned_size & (blocksize - 1)) {
- aligned_size |= (blocksize - 1);
- aligned_size++;
- }
-
- if (ei->i_size_ondisk > i_size_read(inode))
- ei->i_size_ondisk = aligned_size;
-
- if (ei->i_size_aligned > i_size_read(inode))
- ei->i_size_aligned = aligned_size;
mutex_unlock(&sbi->s_lock);
}
@@ -302,6 +287,9 @@ int exfat_setattr(struct mnt_idmap *idmap, struct dentry *dentry,
unsigned int ia_valid;
int error;
+ if (unlikely(exfat_forced_shutdown(inode->i_sb)))
+ return -EIO;
+
if ((attr->ia_valid & ATTR_SIZE) &&
attr->ia_size > i_size_read(inode)) {
error = exfat_cont_expand(inode, attr->ia_size);
@@ -485,6 +473,19 @@ static int exfat_ioctl_fitrim(struct inode *inode, unsigned long arg)
return 0;
}
+static int exfat_ioctl_shutdown(struct super_block *sb, unsigned long arg)
+{
+ u32 flags;
+
+ if (!capable(CAP_SYS_ADMIN))
+ return -EPERM;
+
+ if (get_user(flags, (__u32 __user *)arg))
+ return -EFAULT;
+
+ return exfat_force_shutdown(sb, flags);
+}
+
long exfat_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
{
struct inode *inode = file_inode(filp);
@@ -495,6 +496,8 @@ long exfat_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
return exfat_ioctl_get_attributes(inode, user_attr);
case FAT_IOCTL_SET_ATTRIBUTES:
return exfat_ioctl_set_attributes(filp, user_attr);
+ case EXFAT_IOC_SHUTDOWN:
+ return exfat_ioctl_shutdown(inode->i_sb, arg);
case FITRIM:
return exfat_ioctl_fitrim(inode, arg);
default:
@@ -515,6 +518,9 @@ int exfat_file_fsync(struct file *filp, loff_t start, loff_t end, int datasync)
struct inode *inode = filp->f_mapping->host;
int err;
+ if (unlikely(exfat_forced_shutdown(inode->i_sb)))
+ return -EIO;
+
err = __generic_file_fsync(filp, start, end, datasync);
if (err)
return err;
@@ -526,32 +532,32 @@ int exfat_file_fsync(struct file *filp, loff_t start, loff_t end, int datasync)
return blkdev_issue_flush(inode->i_sb->s_bdev);
}
-static int exfat_file_zeroed_range(struct file *file, loff_t start, loff_t end)
+static int exfat_extend_valid_size(struct file *file, loff_t new_valid_size)
{
int err;
+ loff_t pos;
struct inode *inode = file_inode(file);
+ struct exfat_inode_info *ei = EXFAT_I(inode);
struct address_space *mapping = inode->i_mapping;
const struct address_space_operations *ops = mapping->a_ops;
- while (start < end) {
- u32 zerofrom, len;
+ pos = ei->valid_size;
+ while (pos < new_valid_size) {
+ u32 len;
struct folio *folio;
- zerofrom = start & (PAGE_SIZE - 1);
- len = PAGE_SIZE - zerofrom;
- if (start + len > end)
- len = end - start;
+ len = PAGE_SIZE - (pos & (PAGE_SIZE - 1));
+ if (pos + len > new_valid_size)
+ len = new_valid_size - pos;
- err = ops->write_begin(file, mapping, start, len, &folio, NULL);
+ err = ops->write_begin(file, mapping, pos, len, &folio, NULL);
if (err)
goto out;
- folio_zero_range(folio, offset_in_folio(folio, start), len);
-
- err = ops->write_end(file, mapping, start, len, len, folio, NULL);
+ err = ops->write_end(file, mapping, pos, len, len, folio, NULL);
if (err < 0)
goto out;
- start += len;
+ pos += len;
balance_dirty_pages_ratelimited(mapping);
cond_resched();
@@ -579,7 +585,7 @@ static ssize_t exfat_file_write_iter(struct kiocb *iocb, struct iov_iter *iter)
goto unlock;
if (pos > valid_size) {
- ret = exfat_file_zeroed_range(file, valid_size, pos);
+ ret = exfat_extend_valid_size(file, pos);
if (ret < 0 && ret != -ENOSPC) {
exfat_err(inode->i_sb,
"write: fail to zero from %llu to %llu(%zd)",
@@ -613,26 +619,46 @@ unlock:
return ret;
}
-static int exfat_file_mmap(struct file *file, struct vm_area_struct *vma)
+static vm_fault_t exfat_page_mkwrite(struct vm_fault *vmf)
{
- int ret;
+ int err;
+ struct vm_area_struct *vma = vmf->vma;
+ struct file *file = vma->vm_file;
struct inode *inode = file_inode(file);
struct exfat_inode_info *ei = EXFAT_I(inode);
- loff_t start = ((loff_t)vma->vm_pgoff << PAGE_SHIFT);
- loff_t end = min_t(loff_t, i_size_read(inode),
+ loff_t start, end;
+
+ if (!inode_trylock(inode))
+ return VM_FAULT_RETRY;
+
+ start = ((loff_t)vma->vm_pgoff << PAGE_SHIFT);
+ end = min_t(loff_t, i_size_read(inode),
start + vma->vm_end - vma->vm_start);
- if ((vma->vm_flags & VM_WRITE) && ei->valid_size < end) {
- ret = exfat_file_zeroed_range(file, ei->valid_size, end);
- if (ret < 0) {
- exfat_err(inode->i_sb,
- "mmap: fail to zero from %llu to %llu(%d)",
- start, end, ret);
- return ret;
+ if (ei->valid_size < end) {
+ err = exfat_extend_valid_size(file, end);
+ if (err < 0) {
+ inode_unlock(inode);
+ return vmf_fs_error(err);
}
}
- return generic_file_mmap(file, vma);
+ inode_unlock(inode);
+
+ return filemap_page_mkwrite(vmf);
+}
+
+static const struct vm_operations_struct exfat_file_vm_ops = {
+ .fault = filemap_fault,
+ .map_pages = filemap_map_pages,
+ .page_mkwrite = exfat_page_mkwrite,
+};
+
+static int exfat_file_mmap(struct file *file, struct vm_area_struct *vma)
+{
+ file_accessed(file);
+ vma->vm_ops = &exfat_file_vm_ops;
+ return 0;
}
const struct file_operations exfat_file_operations = {
diff --git a/fs/exfat/inode.c b/fs/exfat/inode.c
index 05f0e07b01d0..d724de8f57bf 100644
--- a/fs/exfat/inode.c
+++ b/fs/exfat/inode.c
@@ -102,6 +102,9 @@ int exfat_write_inode(struct inode *inode, struct writeback_control *wbc)
{
int ret;
+ if (unlikely(exfat_forced_shutdown(inode->i_sb)))
+ return -EIO;
+
mutex_lock(&EXFAT_SB(inode->i_sb)->s_lock);
ret = __exfat_write_inode(inode, wbc->sync_mode == WB_SYNC_ALL);
mutex_unlock(&EXFAT_SB(inode->i_sb)->s_lock);
@@ -130,11 +133,9 @@ static int exfat_map_cluster(struct inode *inode, unsigned int clu_offset,
struct exfat_sb_info *sbi = EXFAT_SB(sb);
struct exfat_inode_info *ei = EXFAT_I(inode);
unsigned int local_clu_offset = clu_offset;
- unsigned int num_to_be_allocated = 0, num_clusters = 0;
+ unsigned int num_to_be_allocated = 0, num_clusters;
- if (ei->i_size_ondisk > 0)
- num_clusters =
- EXFAT_B_TO_CLU_ROUND_UP(ei->i_size_ondisk, sbi);
+ num_clusters = EXFAT_B_TO_CLU(exfat_ondisk_size(inode), sbi);
if (clu_offset >= num_clusters)
num_to_be_allocated = clu_offset - num_clusters + 1;
@@ -260,21 +261,6 @@ static int exfat_map_cluster(struct inode *inode, unsigned int clu_offset,
return 0;
}
-static int exfat_map_new_buffer(struct exfat_inode_info *ei,
- struct buffer_head *bh, loff_t pos)
-{
- if (buffer_delay(bh) && pos > ei->i_size_aligned)
- return -EIO;
- set_buffer_new(bh);
-
- /*
- * Adjust i_size_aligned if i_size_ondisk is bigger than it.
- */
- if (ei->i_size_ondisk > ei->i_size_aligned)
- ei->i_size_aligned = ei->i_size_ondisk;
- return 0;
-}
-
static int exfat_get_block(struct inode *inode, sector_t iblock,
struct buffer_head *bh_result, int create)
{
@@ -288,7 +274,6 @@ static int exfat_get_block(struct inode *inode, sector_t iblock,
sector_t last_block;
sector_t phys = 0;
sector_t valid_blks;
- loff_t pos;
mutex_lock(&sbi->s_lock);
last_block = EXFAT_B_TO_BLK_ROUND_UP(i_size_read(inode), sb);
@@ -316,12 +301,6 @@ static int exfat_get_block(struct inode *inode, sector_t iblock,
mapped_blocks = sbi->sect_per_clus - sec_offset;
max_blocks = min(mapped_blocks, max_blocks);
- pos = EXFAT_BLK_TO_B((iblock + 1), sb);
- if ((create && iblock >= last_block) || buffer_delay(bh_result)) {
- if (ei->i_size_ondisk < pos)
- ei->i_size_ondisk = pos;
- }
-
map_bh(bh_result, sb, phys);
if (buffer_delay(bh_result))
clear_buffer_delay(bh_result);
@@ -342,13 +321,7 @@ static int exfat_get_block(struct inode *inode, sector_t iblock,
}
/* The area has not been written, map and mark as new. */
- err = exfat_map_new_buffer(ei, bh_result, pos);
- if (err) {
- exfat_fs_error(sb,
- "requested for bmap out of range(pos : (%llu) > i_size_aligned(%llu)\n",
- pos, ei->i_size_aligned);
- goto unlock_ret;
- }
+ set_buffer_new(bh_result);
ei->valid_size = EXFAT_BLK_TO_B(iblock + max_blocks, sb);
mark_inode_dirty(inode);
@@ -371,7 +344,7 @@ static int exfat_get_block(struct inode *inode, sector_t iblock,
* The block has been partially written,
* zero the unwritten part and map the block.
*/
- loff_t size, off;
+ loff_t size, off, pos;
max_blocks = 1;
@@ -382,7 +355,7 @@ static int exfat_get_block(struct inode *inode, sector_t iblock,
if (!bh_result->b_folio)
goto done;
- pos -= sb->s_blocksize;
+ pos = EXFAT_BLK_TO_B(iblock, sb);
size = ei->valid_size - pos;
off = pos & (PAGE_SIZE - 1);
@@ -432,6 +405,9 @@ static void exfat_readahead(struct readahead_control *rac)
static int exfat_writepages(struct address_space *mapping,
struct writeback_control *wbc)
{
+ if (unlikely(exfat_forced_shutdown(mapping->host->i_sb)))
+ return -EIO;
+
return mpage_writepages(mapping, wbc, exfat_get_block);
}
@@ -452,6 +428,9 @@ static int exfat_write_begin(struct file *file, struct address_space *mapping,
{
int ret;
+ if (unlikely(exfat_forced_shutdown(mapping->host->i_sb)))
+ return -EIO;
+
ret = block_write_begin(mapping, pos, len, foliop, exfat_get_block);
if (ret < 0)
@@ -469,14 +448,6 @@ static int exfat_write_end(struct file *file, struct address_space *mapping,
int err;
err = generic_write_end(file, mapping, pos, len, copied, folio, fsdata);
-
- if (ei->i_size_aligned < i_size_read(inode)) {
- exfat_fs_error(inode->i_sb,
- "invalid size(size(%llu) > aligned(%llu)\n",
- i_size_read(inode), ei->i_size_aligned);
- return -EIO;
- }
-
if (err < len)
exfat_write_failed(mapping, pos+len);
@@ -504,20 +475,6 @@ static ssize_t exfat_direct_IO(struct kiocb *iocb, struct iov_iter *iter)
int rw = iov_iter_rw(iter);
ssize_t ret;
- if (rw == WRITE) {
- /*
- * FIXME: blockdev_direct_IO() doesn't use ->write_begin(),
- * so we need to update the ->i_size_aligned to block boundary.
- *
- * But we must fill the remaining area or hole by nul for
- * updating ->i_size_aligned
- *
- * Return 0, and fallback to normal buffered write.
- */
- if (EXFAT_I(inode)->i_size_aligned < size)
- return 0;
- }
-
/*
* Need to use the DIO_LOCKING for avoiding the race
* condition of exfat_get_block() and ->truncate().
@@ -531,8 +488,18 @@ static ssize_t exfat_direct_IO(struct kiocb *iocb, struct iov_iter *iter)
} else
size = pos + ret;
- /* zero the unwritten part in the partially written block */
- if (rw == READ && pos < ei->valid_size && ei->valid_size < size) {
+ if (rw == WRITE) {
+ /*
+ * If the block had been partially written before this write,
+ * ->valid_size will not be updated in exfat_get_block(),
+ * update it here.
+ */
+ if (ei->valid_size < size) {
+ ei->valid_size = size;
+ mark_inode_dirty(inode);
+ }
+ } else if (pos < ei->valid_size && ei->valid_size < size) {
+ /* zero the unwritten part in the partially written block */
iov_iter_revert(iter, size - ei->valid_size);
iov_iter_zero(size - ei->valid_size, iter);
}
@@ -667,15 +634,6 @@ static int exfat_fill_inode(struct inode *inode, struct exfat_dir_entry *info)
i_size_write(inode, size);
- /* ondisk and aligned size should be aligned with block size */
- if (size & (inode->i_sb->s_blocksize - 1)) {
- size |= (inode->i_sb->s_blocksize - 1);
- size++;
- }
-
- ei->i_size_aligned = size;
- ei->i_size_ondisk = size;
-
exfat_save_attr(inode, info->attr);
inode->i_blocks = round_up(i_size_read(inode), sbi->cluster_size) >> 9;
diff --git a/fs/exfat/namei.c b/fs/exfat/namei.c
index 631ad9e8e32a..2c4c44229352 100644
--- a/fs/exfat/namei.c
+++ b/fs/exfat/namei.c
@@ -372,8 +372,6 @@ static int exfat_find_empty_entry(struct inode *inode,
/* directory inode should be updated in here */
i_size_write(inode, size);
- ei->i_size_ondisk += sbi->cluster_size;
- ei->i_size_aligned += sbi->cluster_size;
ei->valid_size += sbi->cluster_size;
ei->flags = p_dir->flags;
inode->i_blocks += sbi->cluster_size >> 9;
@@ -549,6 +547,9 @@ static int exfat_create(struct mnt_idmap *idmap, struct inode *dir,
int err;
loff_t size = i_size_read(dir);
+ if (unlikely(exfat_forced_shutdown(sb)))
+ return -EIO;
+
mutex_lock(&EXFAT_SB(sb)->s_lock);
exfat_set_volume_dirty(sb);
err = exfat_add_entry(dir, dentry->d_name.name, &cdir, TYPE_FILE,
@@ -772,6 +773,9 @@ static int exfat_unlink(struct inode *dir, struct dentry *dentry)
struct exfat_entry_set_cache es;
int entry, err = 0;
+ if (unlikely(exfat_forced_shutdown(sb)))
+ return -EIO;
+
mutex_lock(&EXFAT_SB(sb)->s_lock);
exfat_chain_dup(&cdir, &ei->dir);
entry = ei->entry;
@@ -825,6 +829,9 @@ static int exfat_mkdir(struct mnt_idmap *idmap, struct inode *dir,
int err;
loff_t size = i_size_read(dir);
+ if (unlikely(exfat_forced_shutdown(sb)))
+ return -EIO;
+
mutex_lock(&EXFAT_SB(sb)->s_lock);
exfat_set_volume_dirty(sb);
err = exfat_add_entry(dir, dentry->d_name.name, &cdir, TYPE_DIR,
@@ -915,6 +922,9 @@ static int exfat_rmdir(struct inode *dir, struct dentry *dentry)
struct exfat_entry_set_cache es;
int entry, err;
+ if (unlikely(exfat_forced_shutdown(sb)))
+ return -EIO;
+
mutex_lock(&EXFAT_SB(inode->i_sb)->s_lock);
exfat_chain_dup(&cdir, &ei->dir);
@@ -982,6 +992,9 @@ static int exfat_rename_file(struct inode *inode, struct exfat_chain *p_dir,
struct exfat_entry_set_cache old_es, new_es;
int sync = IS_DIRSYNC(inode);
+ if (unlikely(exfat_forced_shutdown(sb)))
+ return -EIO;
+
num_new_entries = exfat_calc_num_entries(p_uniname);
if (num_new_entries < 0)
return num_new_entries;
diff --git a/fs/exfat/nls.c b/fs/exfat/nls.c
index afdf13c34ff5..1ac011088ce7 100644
--- a/fs/exfat/nls.c
+++ b/fs/exfat/nls.c
@@ -779,8 +779,11 @@ int exfat_create_upcase_table(struct super_block *sb)
le32_to_cpu(ep->dentry.upcase.checksum));
brelse(bh);
- if (ret && ret != -EIO)
+ if (ret && ret != -EIO) {
+ /* free memory from exfat_load_upcase_table call */
+ exfat_free_upcase_table(sbi);
goto load_default;
+ }
/* load successfully */
return ret;
diff --git a/fs/exfat/super.c b/fs/exfat/super.c
index 323ecebe6f0e..bd57844414aa 100644
--- a/fs/exfat/super.c
+++ b/fs/exfat/super.c
@@ -46,6 +46,9 @@ static int exfat_sync_fs(struct super_block *sb, int wait)
struct exfat_sb_info *sbi = EXFAT_SB(sb);
int err = 0;
+ if (unlikely(exfat_forced_shutdown(sb)))
+ return 0;
+
if (!wait)
return 0;
@@ -167,6 +170,41 @@ static int exfat_show_options(struct seq_file *m, struct dentry *root)
return 0;
}
+int exfat_force_shutdown(struct super_block *sb, u32 flags)
+{
+ int ret;
+ struct exfat_sb_info *sbi = sb->s_fs_info;
+ struct exfat_mount_options *opts = &sbi->options;
+
+ if (exfat_forced_shutdown(sb))
+ return 0;
+
+ switch (flags) {
+ case EXFAT_GOING_DOWN_DEFAULT:
+ case EXFAT_GOING_DOWN_FULLSYNC:
+ ret = bdev_freeze(sb->s_bdev);
+ if (ret)
+ return ret;
+ bdev_thaw(sb->s_bdev);
+ set_bit(EXFAT_FLAGS_SHUTDOWN, &sbi->s_exfat_flags);
+ break;
+ case EXFAT_GOING_DOWN_NOSYNC:
+ set_bit(EXFAT_FLAGS_SHUTDOWN, &sbi->s_exfat_flags);
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ if (opts->discard)
+ opts->discard = 0;
+ return 0;
+}
+
+static void exfat_shutdown(struct super_block *sb)
+{
+ exfat_force_shutdown(sb, EXFAT_GOING_DOWN_NOSYNC);
+}
+
static struct inode *exfat_alloc_inode(struct super_block *sb)
{
struct exfat_inode_info *ei;
@@ -193,6 +231,7 @@ static const struct super_operations exfat_sops = {
.sync_fs = exfat_sync_fs,
.statfs = exfat_statfs,
.show_options = exfat_show_options,
+ .shutdown = exfat_shutdown,
};
enum {
@@ -370,8 +409,6 @@ static int exfat_read_root(struct inode *inode)
inode->i_blocks = round_up(i_size_read(inode), sbi->cluster_size) >> 9;
ei->i_pos = ((loff_t)sbi->root_dir << 32) | 0xffffffff;
- ei->i_size_aligned = i_size_read(inode);
- ei->i_size_ondisk = i_size_read(inode);
exfat_save_attr(inode, EXFAT_ATTR_SUBDIR);
ei->i_crtime = simple_inode_init_ts(inode);
diff --git a/fs/ext4/bitmap.c b/fs/ext4/bitmap.c
index cd725bebe69e..2a135075468d 100644
--- a/fs/ext4/bitmap.c
+++ b/fs/ext4/bitmap.c
@@ -18,15 +18,17 @@ unsigned int ext4_count_free(char *bitmap, unsigned int numchars)
int ext4_inode_bitmap_csum_verify(struct super_block *sb,
struct ext4_group_desc *gdp,
- struct buffer_head *bh, int sz)
+ struct buffer_head *bh)
{
__u32 hi;
__u32 provided, calculated;
struct ext4_sb_info *sbi = EXT4_SB(sb);
+ int sz;
if (!ext4_has_metadata_csum(sb))
return 1;
+ sz = EXT4_INODES_PER_GROUP(sb) >> 3;
provided = le16_to_cpu(gdp->bg_inode_bitmap_csum_lo);
calculated = ext4_chksum(sbi, sbi->s_csum_seed, (__u8 *)bh->b_data, sz);
if (sbi->s_desc_size >= EXT4_BG_INODE_BITMAP_CSUM_HI_END) {
@@ -40,14 +42,16 @@ int ext4_inode_bitmap_csum_verify(struct super_block *sb,
void ext4_inode_bitmap_csum_set(struct super_block *sb,
struct ext4_group_desc *gdp,
- struct buffer_head *bh, int sz)
+ struct buffer_head *bh)
{
__u32 csum;
struct ext4_sb_info *sbi = EXT4_SB(sb);
+ int sz;
if (!ext4_has_metadata_csum(sb))
return;
+ sz = EXT4_INODES_PER_GROUP(sb) >> 3;
csum = ext4_chksum(sbi, sbi->s_csum_seed, (__u8 *)bh->b_data, sz);
gdp->bg_inode_bitmap_csum_lo = cpu_to_le16(csum & 0xFFFF);
if (sbi->s_desc_size >= EXT4_BG_INODE_BITMAP_CSUM_HI_END)
diff --git a/fs/ext4/dir.c b/fs/ext4/dir.c
index 13196afe55ce..ef6a3c8f3a9a 100644
--- a/fs/ext4/dir.c
+++ b/fs/ext4/dir.c
@@ -280,12 +280,20 @@ static int ext4_readdir(struct file *file, struct dir_context *ctx)
struct fscrypt_str de_name =
FSTR_INIT(de->name,
de->name_len);
+ u32 hash;
+ u32 minor_hash;
+
+ if (IS_CASEFOLDED(inode)) {
+ hash = EXT4_DIRENT_HASH(de);
+ minor_hash = EXT4_DIRENT_MINOR_HASH(de);
+ } else {
+ hash = 0;
+ minor_hash = 0;
+ }
/* Directory is encrypted */
err = fscrypt_fname_disk_to_usr(inode,
- EXT4_DIRENT_HASH(de),
- EXT4_DIRENT_MINOR_HASH(de),
- &de_name, &fstr);
+ hash, minor_hash, &de_name, &fstr);
de_name = fstr;
fstr.len = save_len;
if (err)
diff --git a/fs/ext4/ext4.h b/fs/ext4/ext4.h
index ecc15e5f1eba..44b0d418143c 100644
--- a/fs/ext4/ext4.h
+++ b/fs/ext4/ext4.h
@@ -1058,6 +1058,7 @@ struct ext4_inode_info {
/* Number of ongoing updates on this inode */
atomic_t i_fc_updates;
+ atomic_t i_unwritten; /* Nr. of inflight conversions pending */
/* Fast commit wait queue for this inode */
wait_queue_head_t i_fc_wait;
@@ -1106,6 +1107,10 @@ struct ext4_inode_info {
/* mballoc */
atomic_t i_prealloc_active;
+
+ /* allocation reservation info for delalloc */
+ /* In case of bigalloc, this refer to clusters rather than blocks */
+ unsigned int i_reserved_data_blocks;
struct rb_root i_prealloc_node;
rwlock_t i_prealloc_lock;
@@ -1122,10 +1127,6 @@ struct ext4_inode_info {
/* ialloc */
ext4_group_t i_last_alloc_group;
- /* allocation reservation info for delalloc */
- /* In case of bigalloc, this refer to clusters rather than blocks */
- unsigned int i_reserved_data_blocks;
-
/* pending cluster reservations for bigalloc file systems */
struct ext4_pending_tree i_pending_tree;
@@ -1149,7 +1150,6 @@ struct ext4_inode_info {
*/
struct list_head i_rsv_conversion_list;
struct work_struct i_rsv_conversion_work;
- atomic_t i_unwritten; /* Nr. of inflight conversions pending */
spinlock_t i_block_reservation_lock;
@@ -2338,9 +2338,9 @@ struct ext4_dir_entry_2 {
((struct ext4_dir_entry_hash *) \
(((void *)(entry)) + \
((8 + (entry)->name_len + EXT4_DIR_ROUND) & ~EXT4_DIR_ROUND)))
-#define EXT4_DIRENT_HASH(entry) le32_to_cpu(EXT4_DIRENT_HASHES(de)->hash)
+#define EXT4_DIRENT_HASH(entry) le32_to_cpu(EXT4_DIRENT_HASHES(entry)->hash)
#define EXT4_DIRENT_MINOR_HASH(entry) \
- le32_to_cpu(EXT4_DIRENT_HASHES(de)->minor_hash)
+ le32_to_cpu(EXT4_DIRENT_HASHES(entry)->minor_hash)
static inline bool ext4_hash_in_dirent(const struct inode *inode)
{
@@ -2462,6 +2462,7 @@ static inline __le16 ext4_rec_len_to_disk(unsigned len, unsigned blocksize)
#define DX_HASH_HALF_MD4_UNSIGNED 4
#define DX_HASH_TEA_UNSIGNED 5
#define DX_HASH_SIPHASH 6
+#define DX_HASH_LAST DX_HASH_SIPHASH
static inline u32 ext4_chksum(struct ext4_sb_info *sbi, u32 crc,
const void *address, unsigned int length)
@@ -2695,10 +2696,10 @@ struct mmpd_data {
extern unsigned int ext4_count_free(char *bitmap, unsigned numchars);
void ext4_inode_bitmap_csum_set(struct super_block *sb,
struct ext4_group_desc *gdp,
- struct buffer_head *bh, int sz);
+ struct buffer_head *bh);
int ext4_inode_bitmap_csum_verify(struct super_block *sb,
struct ext4_group_desc *gdp,
- struct buffer_head *bh, int sz);
+ struct buffer_head *bh);
void ext4_block_bitmap_csum_set(struct super_block *sb,
struct ext4_group_desc *gdp,
struct buffer_head *bh);
@@ -3712,11 +3713,12 @@ extern int ext4_map_blocks(handle_t *handle, struct inode *inode,
extern int ext4_ext_calc_credits_for_single_extent(struct inode *inode,
int num,
struct ext4_ext_path *path);
-extern int ext4_ext_insert_extent(handle_t *, struct inode *,
- struct ext4_ext_path **,
- struct ext4_extent *, int);
+extern struct ext4_ext_path *ext4_ext_insert_extent(
+ handle_t *handle, struct inode *inode,
+ struct ext4_ext_path *path,
+ struct ext4_extent *newext, int gb_flags);
extern struct ext4_ext_path *ext4_find_extent(struct inode *, ext4_lblk_t,
- struct ext4_ext_path **,
+ struct ext4_ext_path *,
int flags);
extern void ext4_free_ext_path(struct ext4_ext_path *);
extern int ext4_ext_check_inode(struct inode *inode);
@@ -3853,6 +3855,9 @@ static inline int ext4_buffer_uptodate(struct buffer_head *bh)
return buffer_uptodate(bh);
}
+extern int ext4_block_write_begin(handle_t *handle, struct folio *folio,
+ loff_t pos, unsigned len,
+ get_block_t *get_block);
#endif /* __KERNEL__ */
#define EFSBADCRC EBADMSG /* Bad CRC detected */
diff --git a/fs/ext4/extents.c b/fs/ext4/extents.c
index e067f2dd0335..34e25eee6521 100644
--- a/fs/ext4/extents.c
+++ b/fs/ext4/extents.c
@@ -84,12 +84,11 @@ static void ext4_extent_block_csum_set(struct inode *inode,
et->et_checksum = ext4_extent_block_csum(inode, eh);
}
-static int ext4_split_extent_at(handle_t *handle,
- struct inode *inode,
- struct ext4_ext_path **ppath,
- ext4_lblk_t split,
- int split_flag,
- int flags);
+static struct ext4_ext_path *ext4_split_extent_at(handle_t *handle,
+ struct inode *inode,
+ struct ext4_ext_path *path,
+ ext4_lblk_t split,
+ int split_flag, int flags);
static int ext4_ext_trunc_restart_fn(struct inode *inode, int *dropped)
{
@@ -106,21 +105,27 @@ static int ext4_ext_trunc_restart_fn(struct inode *inode, int *dropped)
return 0;
}
+static inline void ext4_ext_path_brelse(struct ext4_ext_path *path)
+{
+ brelse(path->p_bh);
+ path->p_bh = NULL;
+}
+
static void ext4_ext_drop_refs(struct ext4_ext_path *path)
{
int depth, i;
- if (!path)
+ if (IS_ERR_OR_NULL(path))
return;
depth = path->p_depth;
- for (i = 0; i <= depth; i++, path++) {
- brelse(path->p_bh);
- path->p_bh = NULL;
- }
+ for (i = 0; i <= depth; i++, path++)
+ ext4_ext_path_brelse(path);
}
void ext4_free_ext_path(struct ext4_ext_path *path)
{
+ if (IS_ERR_OR_NULL(path))
+ return;
ext4_ext_drop_refs(path);
kfree(path);
}
@@ -323,19 +328,18 @@ static inline int ext4_ext_space_root_idx(struct inode *inode, int check)
return size;
}
-static inline int
+static inline struct ext4_ext_path *
ext4_force_split_extent_at(handle_t *handle, struct inode *inode,
- struct ext4_ext_path **ppath, ext4_lblk_t lblk,
+ struct ext4_ext_path *path, ext4_lblk_t lblk,
int nofail)
{
- struct ext4_ext_path *path = *ppath;
int unwritten = ext4_ext_is_unwritten(path[path->p_depth].p_ext);
int flags = EXT4_EX_NOCACHE | EXT4_GET_BLOCKS_PRE_IO;
if (nofail)
flags |= EXT4_GET_BLOCKS_METADATA_NOFAIL | EXT4_EX_NOFAIL;
- return ext4_split_extent_at(handle, inode, ppath, lblk, unwritten ?
+ return ext4_split_extent_at(handle, inode, path, lblk, unwritten ?
EXT4_EXT_MARK_UNWRIT1|EXT4_EXT_MARK_UNWRIT2 : 0,
flags);
}
@@ -635,8 +639,7 @@ int ext4_ext_precache(struct inode *inode)
*/
if ((i == depth) ||
path[i].p_idx > EXT_LAST_INDEX(path[i].p_hdr)) {
- brelse(path[i].p_bh);
- path[i].p_bh = NULL;
+ ext4_ext_path_brelse(path + i);
i--;
continue;
}
@@ -689,7 +692,7 @@ static void ext4_ext_show_leaf(struct inode *inode, struct ext4_ext_path *path)
struct ext4_extent *ex;
int i;
- if (!path)
+ if (IS_ERR_OR_NULL(path))
return;
eh = path[depth].p_hdr;
@@ -881,11 +884,10 @@ void ext4_ext_tree_init(handle_t *handle, struct inode *inode)
struct ext4_ext_path *
ext4_find_extent(struct inode *inode, ext4_lblk_t block,
- struct ext4_ext_path **orig_path, int flags)
+ struct ext4_ext_path *path, int flags)
{
struct ext4_extent_header *eh;
struct buffer_head *bh;
- struct ext4_ext_path *path = orig_path ? *orig_path : NULL;
short int depth, i, ppos = 0;
int ret;
gfp_t gfp_flags = GFP_NOFS;
@@ -906,7 +908,7 @@ ext4_find_extent(struct inode *inode, ext4_lblk_t block,
ext4_ext_drop_refs(path);
if (depth > path[0].p_maxdepth) {
kfree(path);
- *orig_path = path = NULL;
+ path = NULL;
}
}
if (!path) {
@@ -961,8 +963,6 @@ ext4_find_extent(struct inode *inode, ext4_lblk_t block,
err:
ext4_free_ext_path(path);
- if (orig_path)
- *orig_path = NULL;
return ERR_PTR(ret);
}
@@ -1395,15 +1395,15 @@ out:
* finds empty index and adds new leaf.
* if no free index is found, then it requests in-depth growing.
*/
-static int ext4_ext_create_new_leaf(handle_t *handle, struct inode *inode,
- unsigned int mb_flags,
- unsigned int gb_flags,
- struct ext4_ext_path **ppath,
- struct ext4_extent *newext)
+static struct ext4_ext_path *
+ext4_ext_create_new_leaf(handle_t *handle, struct inode *inode,
+ unsigned int mb_flags, unsigned int gb_flags,
+ struct ext4_ext_path *path,
+ struct ext4_extent *newext)
{
- struct ext4_ext_path *path = *ppath;
struct ext4_ext_path *curp;
int depth, i, err = 0;
+ ext4_lblk_t ee_block = le32_to_cpu(newext->ee_block);
repeat:
i = depth = ext_depth(inode);
@@ -1422,42 +1422,38 @@ repeat:
* entry: create all needed subtree and add new leaf */
err = ext4_ext_split(handle, inode, mb_flags, path, newext, i);
if (err)
- goto out;
+ goto errout;
/* refill path */
- path = ext4_find_extent(inode,
- (ext4_lblk_t)le32_to_cpu(newext->ee_block),
- ppath, gb_flags);
- if (IS_ERR(path))
- err = PTR_ERR(path);
- } else {
- /* tree is full, time to grow in depth */
- err = ext4_ext_grow_indepth(handle, inode, mb_flags);
- if (err)
- goto out;
+ path = ext4_find_extent(inode, ee_block, path, gb_flags);
+ return path;
+ }
- /* refill path */
- path = ext4_find_extent(inode,
- (ext4_lblk_t)le32_to_cpu(newext->ee_block),
- ppath, gb_flags);
- if (IS_ERR(path)) {
- err = PTR_ERR(path);
- goto out;
- }
+ /* tree is full, time to grow in depth */
+ err = ext4_ext_grow_indepth(handle, inode, mb_flags);
+ if (err)
+ goto errout;
- /*
- * only first (depth 0 -> 1) produces free space;
- * in all other cases we have to split the grown tree
- */
- depth = ext_depth(inode);
- if (path[depth].p_hdr->eh_entries == path[depth].p_hdr->eh_max) {
- /* now we need to split */
- goto repeat;
- }
+ /* refill path */
+ path = ext4_find_extent(inode, ee_block, path, gb_flags);
+ if (IS_ERR(path))
+ return path;
+
+ /*
+ * only first (depth 0 -> 1) produces free space;
+ * in all other cases we have to split the grown tree
+ */
+ depth = ext_depth(inode);
+ if (path[depth].p_hdr->eh_entries == path[depth].p_hdr->eh_max) {
+ /* now we need to split */
+ goto repeat;
}
-out:
- return err;
+ return path;
+
+errout:
+ ext4_free_ext_path(path);
+ return ERR_PTR(err);
}
/*
@@ -1749,12 +1745,23 @@ static int ext4_ext_correct_indexes(handle_t *handle, struct inode *inode,
break;
err = ext4_ext_get_access(handle, inode, path + k);
if (err)
- break;
+ goto clean;
path[k].p_idx->ei_block = border;
err = ext4_ext_dirty(handle, inode, path + k);
if (err)
- break;
+ goto clean;
}
+ return 0;
+
+clean:
+ /*
+ * The path[k].p_bh is either unmodified or with no verified bit
+ * set (see ext4_ext_get_access()). So just clear the verified bit
+ * of the successfully modified extents buffers, which will force
+ * these extents to be checked to avoid using inconsistent data.
+ */
+ while (++k < depth)
+ clear_buffer_verified(path[k].p_bh);
return err;
}
@@ -1876,7 +1883,7 @@ static void ext4_ext_try_to_merge_up(handle_t *handle,
(path[1].p_ext - EXT_FIRST_EXTENT(path[1].p_hdr));
path[0].p_hdr->eh_max = cpu_to_le16(max_root);
- brelse(path[1].p_bh);
+ ext4_ext_path_brelse(path + 1);
ext4_free_blocks(handle, inode, NULL, blk, 1,
EXT4_FREE_BLOCKS_METADATA | EXT4_FREE_BLOCKS_FORGET);
}
@@ -1964,16 +1971,15 @@ out:
* inserts requested extent as new one into the tree,
* creating new leaf in the no-space case.
*/
-int ext4_ext_insert_extent(handle_t *handle, struct inode *inode,
- struct ext4_ext_path **ppath,
- struct ext4_extent *newext, int gb_flags)
+struct ext4_ext_path *
+ext4_ext_insert_extent(handle_t *handle, struct inode *inode,
+ struct ext4_ext_path *path,
+ struct ext4_extent *newext, int gb_flags)
{
- struct ext4_ext_path *path = *ppath;
struct ext4_extent_header *eh;
struct ext4_extent *ex, *fex;
struct ext4_extent *nearex; /* nearest extent */
- struct ext4_ext_path *npath = NULL;
- int depth, len, err;
+ int depth, len, err = 0;
ext4_lblk_t next;
int mb_flags = 0, unwritten;
@@ -1981,14 +1987,16 @@ int ext4_ext_insert_extent(handle_t *handle, struct inode *inode,
mb_flags |= EXT4_MB_DELALLOC_RESERVED;
if (unlikely(ext4_ext_get_actual_len(newext) == 0)) {
EXT4_ERROR_INODE(inode, "ext4_ext_get_actual_len(newext) == 0");
- return -EFSCORRUPTED;
+ err = -EFSCORRUPTED;
+ goto errout;
}
depth = ext_depth(inode);
ex = path[depth].p_ext;
eh = path[depth].p_hdr;
if (unlikely(path[depth].p_hdr == NULL)) {
EXT4_ERROR_INODE(inode, "path[%d].p_hdr == NULL", depth);
- return -EFSCORRUPTED;
+ err = -EFSCORRUPTED;
+ goto errout;
}
/* try to insert block into found extent and return */
@@ -2026,7 +2034,7 @@ int ext4_ext_insert_extent(handle_t *handle, struct inode *inode,
err = ext4_ext_get_access(handle, inode,
path + depth);
if (err)
- return err;
+ goto errout;
unwritten = ext4_ext_is_unwritten(ex);
ex->ee_len = cpu_to_le16(ext4_ext_get_actual_len(ex)
+ ext4_ext_get_actual_len(newext));
@@ -2051,7 +2059,7 @@ prepend:
err = ext4_ext_get_access(handle, inode,
path + depth);
if (err)
- return err;
+ goto errout;
unwritten = ext4_ext_is_unwritten(ex);
ex->ee_block = newext->ee_block;
@@ -2076,21 +2084,26 @@ prepend:
if (le32_to_cpu(newext->ee_block) > le32_to_cpu(fex->ee_block))
next = ext4_ext_next_leaf_block(path);
if (next != EXT_MAX_BLOCKS) {
+ struct ext4_ext_path *npath;
+
ext_debug(inode, "next leaf block - %u\n", next);
- BUG_ON(npath != NULL);
npath = ext4_find_extent(inode, next, NULL, gb_flags);
- if (IS_ERR(npath))
- return PTR_ERR(npath);
+ if (IS_ERR(npath)) {
+ err = PTR_ERR(npath);
+ goto errout;
+ }
BUG_ON(npath->p_depth != path->p_depth);
eh = npath[depth].p_hdr;
if (le16_to_cpu(eh->eh_entries) < le16_to_cpu(eh->eh_max)) {
ext_debug(inode, "next leaf isn't full(%d)\n",
le16_to_cpu(eh->eh_entries));
+ ext4_free_ext_path(path);
path = npath;
goto has_space;
}
ext_debug(inode, "next leaf has no free space(%d,%d)\n",
le16_to_cpu(eh->eh_entries), le16_to_cpu(eh->eh_max));
+ ext4_free_ext_path(npath);
}
/*
@@ -2099,10 +2112,10 @@ prepend:
*/
if (gb_flags & EXT4_GET_BLOCKS_METADATA_NOFAIL)
mb_flags |= EXT4_MB_USE_RESERVED;
- err = ext4_ext_create_new_leaf(handle, inode, mb_flags, gb_flags,
- ppath, newext);
- if (err)
- goto cleanup;
+ path = ext4_ext_create_new_leaf(handle, inode, mb_flags, gb_flags,
+ path, newext);
+ if (IS_ERR(path))
+ return path;
depth = ext_depth(inode);
eh = path[depth].p_hdr;
@@ -2111,7 +2124,7 @@ has_space:
err = ext4_ext_get_access(handle, inode, path + depth);
if (err)
- goto cleanup;
+ goto errout;
if (!nearex) {
/* there is no extent in this leaf, create first one */
@@ -2169,17 +2182,20 @@ merge:
if (!(gb_flags & EXT4_GET_BLOCKS_PRE_IO))
ext4_ext_try_to_merge(handle, inode, path, nearex);
-
/* time to correct all indexes above */
err = ext4_ext_correct_indexes(handle, inode, path);
if (err)
- goto cleanup;
+ goto errout;
err = ext4_ext_dirty(handle, inode, path + path->p_depth);
+ if (err)
+ goto errout;
-cleanup:
- ext4_free_ext_path(npath);
- return err;
+ return path;
+
+errout:
+ ext4_free_ext_path(path);
+ return ERR_PTR(err);
}
static int ext4_fill_es_cache_info(struct inode *inode,
@@ -2279,27 +2295,26 @@ static int ext4_ext_rm_idx(handle_t *handle, struct inode *inode,
{
int err;
ext4_fsblk_t leaf;
+ int k = depth - 1;
/* free index block */
- depth--;
- path = path + depth;
- leaf = ext4_idx_pblock(path->p_idx);
- if (unlikely(path->p_hdr->eh_entries == 0)) {
- EXT4_ERROR_INODE(inode, "path->p_hdr->eh_entries == 0");
+ leaf = ext4_idx_pblock(path[k].p_idx);
+ if (unlikely(path[k].p_hdr->eh_entries == 0)) {
+ EXT4_ERROR_INODE(inode, "path[%d].p_hdr->eh_entries == 0", k);
return -EFSCORRUPTED;
}
- err = ext4_ext_get_access(handle, inode, path);
+ err = ext4_ext_get_access(handle, inode, path + k);
if (err)
return err;
- if (path->p_idx != EXT_LAST_INDEX(path->p_hdr)) {
- int len = EXT_LAST_INDEX(path->p_hdr) - path->p_idx;
+ if (path[k].p_idx != EXT_LAST_INDEX(path[k].p_hdr)) {
+ int len = EXT_LAST_INDEX(path[k].p_hdr) - path[k].p_idx;
len *= sizeof(struct ext4_extent_idx);
- memmove(path->p_idx, path->p_idx + 1, len);
+ memmove(path[k].p_idx, path[k].p_idx + 1, len);
}
- le16_add_cpu(&path->p_hdr->eh_entries, -1);
- err = ext4_ext_dirty(handle, inode, path);
+ le16_add_cpu(&path[k].p_hdr->eh_entries, -1);
+ err = ext4_ext_dirty(handle, inode, path + k);
if (err)
return err;
ext_debug(inode, "index is empty, remove it, free block %llu\n", leaf);
@@ -2308,18 +2323,29 @@ static int ext4_ext_rm_idx(handle_t *handle, struct inode *inode,
ext4_free_blocks(handle, inode, NULL, leaf, 1,
EXT4_FREE_BLOCKS_METADATA | EXT4_FREE_BLOCKS_FORGET);
- while (--depth >= 0) {
- if (path->p_idx != EXT_FIRST_INDEX(path->p_hdr))
+ while (--k >= 0) {
+ if (path[k + 1].p_idx != EXT_FIRST_INDEX(path[k + 1].p_hdr))
break;
- path--;
- err = ext4_ext_get_access(handle, inode, path);
+ err = ext4_ext_get_access(handle, inode, path + k);
if (err)
- break;
- path->p_idx->ei_block = (path+1)->p_idx->ei_block;
- err = ext4_ext_dirty(handle, inode, path);
+ goto clean;
+ path[k].p_idx->ei_block = path[k + 1].p_idx->ei_block;
+ err = ext4_ext_dirty(handle, inode, path + k);
if (err)
- break;
+ goto clean;
}
+ return 0;
+
+clean:
+ /*
+ * The path[k].p_bh is either unmodified or with no verified bit
+ * set (see ext4_ext_get_access()). So just clear the verified bit
+ * of the successfully modified extents buffers, which will force
+ * these extents to be checked to avoid using inconsistent data.
+ */
+ while (++k < depth)
+ clear_buffer_verified(path[k].p_bh);
+
return err;
}
@@ -2872,11 +2898,12 @@ again:
* fail removing space due to ENOSPC so try to use
* reserved block if that happens.
*/
- err = ext4_force_split_extent_at(handle, inode, &path,
- end + 1, 1);
- if (err < 0)
+ path = ext4_force_split_extent_at(handle, inode, path,
+ end + 1, 1);
+ if (IS_ERR(path)) {
+ err = PTR_ERR(path);
goto out;
-
+ }
} else if (sbi->s_cluster_ratio > 1 && end >= ex_end &&
partial.state == initial) {
/*
@@ -2934,8 +2961,7 @@ again:
err = ext4_ext_rm_leaf(handle, inode, path,
&partial, start, end);
/* root level has p_bh == NULL, brelse() eats this */
- brelse(path[i].p_bh);
- path[i].p_bh = NULL;
+ ext4_ext_path_brelse(path + i);
i--;
continue;
}
@@ -2997,8 +3023,7 @@ again:
err = ext4_ext_rm_idx(handle, inode, path, i);
}
/* root level has p_bh == NULL, brelse() eats this */
- brelse(path[i].p_bh);
- path[i].p_bh = NULL;
+ ext4_ext_path_brelse(path + i);
i--;
ext_debug(inode, "return to level %d\n", i);
}
@@ -3113,7 +3138,7 @@ static void ext4_zeroout_es(struct inode *inode, struct ext4_extent *ex)
return;
ext4_es_insert_extent(inode, ee_block, ee_len, ee_pblock,
- EXTENT_STATUS_WRITTEN);
+ EXTENT_STATUS_WRITTEN, 0);
}
/* FIXME!! we need to try to merge to left or right after zero-out */
@@ -3147,16 +3172,14 @@ static int ext4_ext_zeroout(struct inode *inode, struct ext4_extent *ex)
* a> the extent are splitted into two extent.
* b> split is not needed, and just mark the extent.
*
- * return 0 on success.
+ * Return an extent path pointer on success, or an error pointer on failure.
*/
-static int ext4_split_extent_at(handle_t *handle,
- struct inode *inode,
- struct ext4_ext_path **ppath,
- ext4_lblk_t split,
- int split_flag,
- int flags)
+static struct ext4_ext_path *ext4_split_extent_at(handle_t *handle,
+ struct inode *inode,
+ struct ext4_ext_path *path,
+ ext4_lblk_t split,
+ int split_flag, int flags)
{
- struct ext4_ext_path *path = *ppath;
ext4_fsblk_t newblock;
ext4_lblk_t ee_block;
struct ext4_extent *ex, newex, orig_ex, zero_ex;
@@ -3226,10 +3249,31 @@ static int ext4_split_extent_at(handle_t *handle,
if (split_flag & EXT4_EXT_MARK_UNWRIT2)
ext4_ext_mark_unwritten(ex2);
- err = ext4_ext_insert_extent(handle, inode, ppath, &newex, flags);
- if (err != -ENOSPC && err != -EDQUOT && err != -ENOMEM)
+ path = ext4_ext_insert_extent(handle, inode, path, &newex, flags);
+ if (!IS_ERR(path))
goto out;
+ err = PTR_ERR(path);
+ if (err != -ENOSPC && err != -EDQUOT && err != -ENOMEM)
+ return path;
+
+ /*
+ * Get a new path to try to zeroout or fix the extent length.
+ * Using EXT4_EX_NOFAIL guarantees that ext4_find_extent()
+ * will not return -ENOMEM, otherwise -ENOMEM will cause a
+ * retry in do_writepages(), and a WARN_ON may be triggered
+ * in ext4_da_update_reserve_space() due to an incorrect
+ * ee_len causing the i_reserved_data_blocks exception.
+ */
+ path = ext4_find_extent(inode, ee_block, NULL, flags | EXT4_EX_NOFAIL);
+ if (IS_ERR(path)) {
+ EXT4_ERROR_INODE(inode, "Failed split extent on %u, err %ld",
+ split, PTR_ERR(path));
+ return path;
+ }
+ depth = ext_depth(inode);
+ ex = path[depth].p_ext;
+
if (EXT4_EXT_MAY_ZEROOUT & split_flag) {
if (split_flag & (EXT4_EXT_DATA_VALID1|EXT4_EXT_DATA_VALID2)) {
if (split_flag & EXT4_EXT_DATA_VALID1) {
@@ -3280,14 +3324,17 @@ fix_extent_len:
* and err is a non-zero error code.
*/
ext4_ext_dirty(handle, inode, path + path->p_depth);
- return err;
out:
+ if (err) {
+ ext4_free_ext_path(path);
+ path = ERR_PTR(err);
+ }
ext4_ext_show_leaf(inode, path);
- return err;
+ return path;
}
/*
- * ext4_split_extents() splits an extent and mark extent which is covered
+ * ext4_split_extent() splits an extent and mark extent which is covered
* by @map as split_flags indicates
*
* It may result in splitting the extent into multiple extents (up to three)
@@ -3297,21 +3344,18 @@ out:
* c> Splits in three extents: Somone is splitting in middle of the extent
*
*/
-static int ext4_split_extent(handle_t *handle,
- struct inode *inode,
- struct ext4_ext_path **ppath,
- struct ext4_map_blocks *map,
- int split_flag,
- int flags)
+static struct ext4_ext_path *ext4_split_extent(handle_t *handle,
+ struct inode *inode,
+ struct ext4_ext_path *path,
+ struct ext4_map_blocks *map,
+ int split_flag, int flags,
+ unsigned int *allocated)
{
- struct ext4_ext_path *path = *ppath;
ext4_lblk_t ee_block;
struct ext4_extent *ex;
unsigned int ee_len, depth;
- int err = 0;
int unwritten;
int split_flag1, flags1;
- int allocated = map->m_len;
depth = ext_depth(inode);
ex = path[depth].p_ext;
@@ -3327,28 +3371,27 @@ static int ext4_split_extent(handle_t *handle,
EXT4_EXT_MARK_UNWRIT2;
if (split_flag & EXT4_EXT_DATA_VALID2)
split_flag1 |= EXT4_EXT_DATA_VALID1;
- err = ext4_split_extent_at(handle, inode, ppath,
+ path = ext4_split_extent_at(handle, inode, path,
map->m_lblk + map->m_len, split_flag1, flags1);
- if (err)
- goto out;
- } else {
- allocated = ee_len - (map->m_lblk - ee_block);
- }
- /*
- * Update path is required because previous ext4_split_extent_at() may
- * result in split of original leaf or extent zeroout.
- */
- path = ext4_find_extent(inode, map->m_lblk, ppath, flags);
- if (IS_ERR(path))
- return PTR_ERR(path);
- depth = ext_depth(inode);
- ex = path[depth].p_ext;
- if (!ex) {
- EXT4_ERROR_INODE(inode, "unexpected hole at %lu",
- (unsigned long) map->m_lblk);
- return -EFSCORRUPTED;
+ if (IS_ERR(path))
+ return path;
+ /*
+ * Update path is required because previous ext4_split_extent_at
+ * may result in split of original leaf or extent zeroout.
+ */
+ path = ext4_find_extent(inode, map->m_lblk, path, flags);
+ if (IS_ERR(path))
+ return path;
+ depth = ext_depth(inode);
+ ex = path[depth].p_ext;
+ if (!ex) {
+ EXT4_ERROR_INODE(inode, "unexpected hole at %lu",
+ (unsigned long) map->m_lblk);
+ ext4_free_ext_path(path);
+ return ERR_PTR(-EFSCORRUPTED);
+ }
+ unwritten = ext4_ext_is_unwritten(ex);
}
- unwritten = ext4_ext_is_unwritten(ex);
if (map->m_lblk >= ee_block) {
split_flag1 = split_flag & EXT4_EXT_DATA_VALID2;
@@ -3357,15 +3400,20 @@ static int ext4_split_extent(handle_t *handle,
split_flag1 |= split_flag & (EXT4_EXT_MAY_ZEROOUT |
EXT4_EXT_MARK_UNWRIT2);
}
- err = ext4_split_extent_at(handle, inode, ppath,
+ path = ext4_split_extent_at(handle, inode, path,
map->m_lblk, split_flag1, flags);
- if (err)
- goto out;
+ if (IS_ERR(path))
+ return path;
}
+ if (allocated) {
+ if (map->m_lblk + map->m_len > ee_block + ee_len)
+ *allocated = ee_len - (map->m_lblk - ee_block);
+ else
+ *allocated = map->m_len;
+ }
ext4_ext_show_leaf(inode, path);
-out:
- return err ? err : allocated;
+ return path;
}
/*
@@ -3388,13 +3436,11 @@ out:
* that are allocated and initialized.
* It is guaranteed to be >= map->m_len.
*/
-static int ext4_ext_convert_to_initialized(handle_t *handle,
- struct inode *inode,
- struct ext4_map_blocks *map,
- struct ext4_ext_path **ppath,
- int flags)
+static struct ext4_ext_path *
+ext4_ext_convert_to_initialized(handle_t *handle, struct inode *inode,
+ struct ext4_map_blocks *map, struct ext4_ext_path *path,
+ int flags, unsigned int *allocated)
{
- struct ext4_ext_path *path = *ppath;
struct ext4_sb_info *sbi;
struct ext4_extent_header *eh;
struct ext4_map_blocks split_map;
@@ -3404,7 +3450,6 @@ static int ext4_ext_convert_to_initialized(handle_t *handle,
unsigned int ee_len, depth, map_len = map->m_len;
int err = 0;
int split_flag = EXT4_EXT_DATA_VALID2;
- int allocated = 0;
unsigned int max_zeroout = 0;
ext_debug(inode, "logical block %llu, max_blocks %u\n",
@@ -3445,6 +3490,7 @@ static int ext4_ext_convert_to_initialized(handle_t *handle,
* - L2: we only attempt to merge with an extent stored in the
* same extent tree node.
*/
+ *allocated = 0;
if ((map->m_lblk == ee_block) &&
/* See if we can merge left */
(map_len < ee_len) && /*L1*/
@@ -3474,7 +3520,7 @@ static int ext4_ext_convert_to_initialized(handle_t *handle,
(prev_len < (EXT_INIT_MAX_LEN - map_len))) { /*C4*/
err = ext4_ext_get_access(handle, inode, path + depth);
if (err)
- goto out;
+ goto errout;
trace_ext4_ext_convert_to_initialized_fastpath(inode,
map, ex, abut_ex);
@@ -3489,7 +3535,7 @@ static int ext4_ext_convert_to_initialized(handle_t *handle,
abut_ex->ee_len = cpu_to_le16(prev_len + map_len);
/* Result: number of initialized blocks past m_lblk */
- allocated = map_len;
+ *allocated = map_len;
}
} else if (((map->m_lblk + map_len) == (ee_block + ee_len)) &&
(map_len < ee_len) && /*L1*/
@@ -3520,7 +3566,7 @@ static int ext4_ext_convert_to_initialized(handle_t *handle,
(next_len < (EXT_INIT_MAX_LEN - map_len))) { /*C4*/
err = ext4_ext_get_access(handle, inode, path + depth);
if (err)
- goto out;
+ goto errout;
trace_ext4_ext_convert_to_initialized_fastpath(inode,
map, ex, abut_ex);
@@ -3535,18 +3581,20 @@ static int ext4_ext_convert_to_initialized(handle_t *handle,
abut_ex->ee_len = cpu_to_le16(next_len + map_len);
/* Result: number of initialized blocks past m_lblk */
- allocated = map_len;
+ *allocated = map_len;
}
}
- if (allocated) {
+ if (*allocated) {
/* Mark the block containing both extents as dirty */
err = ext4_ext_dirty(handle, inode, path + depth);
/* Update path to point to the right extent */
path[depth].p_ext = abut_ex;
+ if (err)
+ goto errout;
goto out;
} else
- allocated = ee_len - (map->m_lblk - ee_block);
+ *allocated = ee_len - (map->m_lblk - ee_block);
WARN_ON(map->m_lblk < ee_block);
/*
@@ -3573,21 +3621,21 @@ static int ext4_ext_convert_to_initialized(handle_t *handle,
split_map.m_lblk = map->m_lblk;
split_map.m_len = map->m_len;
- if (max_zeroout && (allocated > split_map.m_len)) {
- if (allocated <= max_zeroout) {
+ if (max_zeroout && (*allocated > split_map.m_len)) {
+ if (*allocated <= max_zeroout) {
/* case 3 or 5 */
zero_ex1.ee_block =
cpu_to_le32(split_map.m_lblk +
split_map.m_len);
zero_ex1.ee_len =
- cpu_to_le16(allocated - split_map.m_len);
+ cpu_to_le16(*allocated - split_map.m_len);
ext4_ext_store_pblock(&zero_ex1,
ext4_ext_pblock(ex) + split_map.m_lblk +
split_map.m_len - ee_block);
err = ext4_ext_zeroout(inode, &zero_ex1);
if (err)
goto fallback;
- split_map.m_len = allocated;
+ split_map.m_len = *allocated;
}
if (split_map.m_lblk - ee_block + split_map.m_len <
max_zeroout) {
@@ -3605,22 +3653,24 @@ static int ext4_ext_convert_to_initialized(handle_t *handle,
split_map.m_len += split_map.m_lblk - ee_block;
split_map.m_lblk = ee_block;
- allocated = map->m_len;
+ *allocated = map->m_len;
}
}
fallback:
- err = ext4_split_extent(handle, inode, ppath, &split_map, split_flag,
- flags);
- if (err > 0)
- err = 0;
+ path = ext4_split_extent(handle, inode, path, &split_map, split_flag,
+ flags, NULL);
+ if (IS_ERR(path))
+ return path;
out:
/* If we have gotten a failure, don't zero out status tree */
- if (!err) {
- ext4_zeroout_es(inode, &zero_ex1);
- ext4_zeroout_es(inode, &zero_ex2);
- }
- return err ? err : allocated;
+ ext4_zeroout_es(inode, &zero_ex1);
+ ext4_zeroout_es(inode, &zero_ex2);
+ return path;
+
+errout:
+ ext4_free_ext_path(path);
+ return ERR_PTR(err);
}
/*
@@ -3645,15 +3695,16 @@ out:
* being filled will be convert to initialized by the end_io callback function
* via ext4_convert_unwritten_extents().
*
- * Returns the size of unwritten extent to be written on success.
+ * The size of unwritten extent to be written is passed to the caller via the
+ * allocated pointer. Return an extent path pointer on success, or an error
+ * pointer on failure.
*/
-static int ext4_split_convert_extents(handle_t *handle,
+static struct ext4_ext_path *ext4_split_convert_extents(handle_t *handle,
struct inode *inode,
struct ext4_map_blocks *map,
- struct ext4_ext_path **ppath,
- int flags)
+ struct ext4_ext_path *path,
+ int flags, unsigned int *allocated)
{
- struct ext4_ext_path *path = *ppath;
ext4_lblk_t eof_block;
ext4_lblk_t ee_block;
struct ext4_extent *ex;
@@ -3686,15 +3737,15 @@ static int ext4_split_convert_extents(handle_t *handle,
split_flag |= (EXT4_EXT_MARK_UNWRIT2 | EXT4_EXT_DATA_VALID2);
}
flags |= EXT4_GET_BLOCKS_PRE_IO;
- return ext4_split_extent(handle, inode, ppath, map, split_flag, flags);
+ return ext4_split_extent(handle, inode, path, map, split_flag, flags,
+ allocated);
}
-static int ext4_convert_unwritten_extents_endio(handle_t *handle,
- struct inode *inode,
- struct ext4_map_blocks *map,
- struct ext4_ext_path **ppath)
+static struct ext4_ext_path *
+ext4_convert_unwritten_extents_endio(handle_t *handle, struct inode *inode,
+ struct ext4_map_blocks *map,
+ struct ext4_ext_path *path)
{
- struct ext4_ext_path *path = *ppath;
struct ext4_extent *ex;
ext4_lblk_t ee_block;
unsigned int ee_len;
@@ -3722,20 +3773,21 @@ static int ext4_convert_unwritten_extents_endio(handle_t *handle,
inode->i_ino, (unsigned long long)ee_block, ee_len,
(unsigned long long)map->m_lblk, map->m_len);
#endif
- err = ext4_split_convert_extents(handle, inode, map, ppath,
- EXT4_GET_BLOCKS_CONVERT);
- if (err < 0)
- return err;
- path = ext4_find_extent(inode, map->m_lblk, ppath, 0);
+ path = ext4_split_convert_extents(handle, inode, map, path,
+ EXT4_GET_BLOCKS_CONVERT, NULL);
if (IS_ERR(path))
- return PTR_ERR(path);
+ return path;
+
+ path = ext4_find_extent(inode, map->m_lblk, path, 0);
+ if (IS_ERR(path))
+ return path;
depth = ext_depth(inode);
ex = path[depth].p_ext;
}
err = ext4_ext_get_access(handle, inode, path + depth);
if (err)
- goto out;
+ goto errout;
/* first mark the extent as initialized */
ext4_ext_mark_initialized(ex);
@@ -3746,18 +3798,23 @@ static int ext4_convert_unwritten_extents_endio(handle_t *handle,
/* Mark modified extent as dirty */
err = ext4_ext_dirty(handle, inode, path + path->p_depth);
-out:
+ if (err)
+ goto errout;
+
ext4_ext_show_leaf(inode, path);
- return err;
+ return path;
+
+errout:
+ ext4_free_ext_path(path);
+ return ERR_PTR(err);
}
-static int
+static struct ext4_ext_path *
convert_initialized_extent(handle_t *handle, struct inode *inode,
struct ext4_map_blocks *map,
- struct ext4_ext_path **ppath,
+ struct ext4_ext_path *path,
unsigned int *allocated)
{
- struct ext4_ext_path *path = *ppath;
struct ext4_extent *ex;
ext4_lblk_t ee_block;
unsigned int ee_len;
@@ -3780,25 +3837,27 @@ convert_initialized_extent(handle_t *handle, struct inode *inode,
(unsigned long long)ee_block, ee_len);
if (ee_block != map->m_lblk || ee_len > map->m_len) {
- err = ext4_split_convert_extents(handle, inode, map, ppath,
- EXT4_GET_BLOCKS_CONVERT_UNWRITTEN);
- if (err < 0)
- return err;
- path = ext4_find_extent(inode, map->m_lblk, ppath, 0);
+ path = ext4_split_convert_extents(handle, inode, map, path,
+ EXT4_GET_BLOCKS_CONVERT_UNWRITTEN, NULL);
if (IS_ERR(path))
- return PTR_ERR(path);
+ return path;
+
+ path = ext4_find_extent(inode, map->m_lblk, path, 0);
+ if (IS_ERR(path))
+ return path;
depth = ext_depth(inode);
ex = path[depth].p_ext;
if (!ex) {
EXT4_ERROR_INODE(inode, "unexpected hole at %lu",
(unsigned long) map->m_lblk);
- return -EFSCORRUPTED;
+ err = -EFSCORRUPTED;
+ goto errout;
}
}
err = ext4_ext_get_access(handle, inode, path + depth);
if (err)
- return err;
+ goto errout;
/* first mark the extent as unwritten */
ext4_ext_mark_unwritten(ex);
@@ -3810,7 +3869,7 @@ convert_initialized_extent(handle_t *handle, struct inode *inode,
/* Mark modified extent as dirty */
err = ext4_ext_dirty(handle, inode, path + path->p_depth);
if (err)
- return err;
+ goto errout;
ext4_ext_show_leaf(inode, path);
ext4_update_inode_fsync_trans(handle, inode, 1);
@@ -3819,22 +3878,24 @@ convert_initialized_extent(handle_t *handle, struct inode *inode,
if (*allocated > map->m_len)
*allocated = map->m_len;
map->m_len = *allocated;
- return 0;
+ return path;
+
+errout:
+ ext4_free_ext_path(path);
+ return ERR_PTR(err);
}
-static int
+static struct ext4_ext_path *
ext4_ext_handle_unwritten_extents(handle_t *handle, struct inode *inode,
struct ext4_map_blocks *map,
- struct ext4_ext_path **ppath, int flags,
- unsigned int allocated, ext4_fsblk_t newblock)
+ struct ext4_ext_path *path, int flags,
+ unsigned int *allocated, ext4_fsblk_t newblock)
{
- struct ext4_ext_path __maybe_unused *path = *ppath;
- int ret = 0;
int err = 0;
ext_debug(inode, "logical block %llu, max_blocks %u, flags 0x%x, allocated %u\n",
(unsigned long long)map->m_lblk, map->m_len, flags,
- allocated);
+ *allocated);
ext4_ext_show_leaf(inode, path);
/*
@@ -3844,36 +3905,34 @@ ext4_ext_handle_unwritten_extents(handle_t *handle, struct inode *inode,
flags |= EXT4_GET_BLOCKS_METADATA_NOFAIL;
trace_ext4_ext_handle_unwritten_extents(inode, map, flags,
- allocated, newblock);
+ *allocated, newblock);
/* get_block() before submitting IO, split the extent */
if (flags & EXT4_GET_BLOCKS_PRE_IO) {
- ret = ext4_split_convert_extents(handle, inode, map, ppath,
- flags | EXT4_GET_BLOCKS_CONVERT);
- if (ret < 0) {
- err = ret;
- goto out2;
- }
+ path = ext4_split_convert_extents(handle, inode, map, path,
+ flags | EXT4_GET_BLOCKS_CONVERT, allocated);
+ if (IS_ERR(path))
+ return path;
/*
- * shouldn't get a 0 return when splitting an extent unless
+ * shouldn't get a 0 allocated when splitting an extent unless
* m_len is 0 (bug) or extent has been corrupted
*/
- if (unlikely(ret == 0)) {
+ if (unlikely(*allocated == 0)) {
EXT4_ERROR_INODE(inode,
- "unexpected ret == 0, m_len = %u",
+ "unexpected allocated == 0, m_len = %u",
map->m_len);
err = -EFSCORRUPTED;
- goto out2;
+ goto errout;
}
map->m_flags |= EXT4_MAP_UNWRITTEN;
goto out;
}
/* IO end_io complete, convert the filled extent to written */
if (flags & EXT4_GET_BLOCKS_CONVERT) {
- err = ext4_convert_unwritten_extents_endio(handle, inode, map,
- ppath);
- if (err < 0)
- goto out2;
+ path = ext4_convert_unwritten_extents_endio(handle, inode,
+ map, path);
+ if (IS_ERR(path))
+ return path;
ext4_update_inode_fsync_trans(handle, inode, 1);
goto map_out;
}
@@ -3905,36 +3964,37 @@ ext4_ext_handle_unwritten_extents(handle_t *handle, struct inode *inode,
* For buffered writes, at writepage time, etc. Convert a
* discovered unwritten extent to written.
*/
- ret = ext4_ext_convert_to_initialized(handle, inode, map, ppath, flags);
- if (ret < 0) {
- err = ret;
- goto out2;
- }
+ path = ext4_ext_convert_to_initialized(handle, inode, map, path,
+ flags, allocated);
+ if (IS_ERR(path))
+ return path;
ext4_update_inode_fsync_trans(handle, inode, 1);
/*
- * shouldn't get a 0 return when converting an unwritten extent
+ * shouldn't get a 0 allocated when converting an unwritten extent
* unless m_len is 0 (bug) or extent has been corrupted
*/
- if (unlikely(ret == 0)) {
- EXT4_ERROR_INODE(inode, "unexpected ret == 0, m_len = %u",
+ if (unlikely(*allocated == 0)) {
+ EXT4_ERROR_INODE(inode, "unexpected allocated == 0, m_len = %u",
map->m_len);
err = -EFSCORRUPTED;
- goto out2;
+ goto errout;
}
out:
- allocated = ret;
map->m_flags |= EXT4_MAP_NEW;
map_out:
map->m_flags |= EXT4_MAP_MAPPED;
out1:
map->m_pblk = newblock;
- if (allocated > map->m_len)
- allocated = map->m_len;
- map->m_len = allocated;
+ if (*allocated > map->m_len)
+ *allocated = map->m_len;
+ map->m_len = *allocated;
ext4_ext_show_leaf(inode, path);
-out2:
- return err ? err : allocated;
+ return path;
+
+errout:
+ ext4_free_ext_path(path);
+ return ERR_PTR(err);
}
/*
@@ -4097,7 +4157,8 @@ again:
insert_hole:
/* Put just found gap into cache to speed up subsequent requests */
ext_debug(inode, " -> %u:%u\n", hole_start, len);
- ext4_es_insert_extent(inode, hole_start, len, ~0, EXTENT_STATUS_HOLE);
+ ext4_es_insert_extent(inode, hole_start, len, ~0,
+ EXTENT_STATUS_HOLE, 0);
/* Update hole_len to reflect hole size after lblk */
if (hole_start != lblk)
@@ -4131,7 +4192,7 @@ int ext4_ext_map_blocks(handle_t *handle, struct inode *inode,
struct ext4_extent newex, *ex, ex2;
struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
ext4_fsblk_t newblock = 0, pblk;
- int err = 0, depth, ret;
+ int err = 0, depth;
unsigned int allocated = 0, offset = 0;
unsigned int allocated_clusters = 0;
struct ext4_allocation_request ar;
@@ -4144,7 +4205,6 @@ int ext4_ext_map_blocks(handle_t *handle, struct inode *inode,
path = ext4_find_extent(inode, map->m_lblk, NULL, 0);
if (IS_ERR(path)) {
err = PTR_ERR(path);
- path = NULL;
goto out;
}
@@ -4193,8 +4253,10 @@ int ext4_ext_map_blocks(handle_t *handle, struct inode *inode,
*/
if ((!ext4_ext_is_unwritten(ex)) &&
(flags & EXT4_GET_BLOCKS_CONVERT_UNWRITTEN)) {
- err = convert_initialized_extent(handle,
- inode, map, &path, &allocated);
+ path = convert_initialized_extent(handle,
+ inode, map, path, &allocated);
+ if (IS_ERR(path))
+ err = PTR_ERR(path);
goto out;
} else if (!ext4_ext_is_unwritten(ex)) {
map->m_flags |= EXT4_MAP_MAPPED;
@@ -4206,13 +4268,11 @@ int ext4_ext_map_blocks(handle_t *handle, struct inode *inode,
goto out;
}
- ret = ext4_ext_handle_unwritten_extents(
- handle, inode, map, &path, flags,
- allocated, newblock);
- if (ret < 0)
- err = ret;
- else
- allocated = ret;
+ path = ext4_ext_handle_unwritten_extents(
+ handle, inode, map, path, flags,
+ &allocated, newblock);
+ if (IS_ERR(path))
+ err = PTR_ERR(path);
goto out;
}
}
@@ -4264,6 +4324,7 @@ int ext4_ext_map_blocks(handle_t *handle, struct inode *inode,
get_implied_cluster_alloc(inode->i_sb, map, &ex2, path)) {
ar.len = allocated = map->m_len;
newblock = map->m_pblk;
+ err = 0;
goto got_allocated_blocks;
}
@@ -4336,8 +4397,9 @@ got_allocated_blocks:
map->m_flags |= EXT4_MAP_UNWRITTEN;
}
- err = ext4_ext_insert_extent(handle, inode, &path, &newex, flags);
- if (err) {
+ path = ext4_ext_insert_extent(handle, inode, path, &newex, flags);
+ if (IS_ERR(path)) {
+ err = PTR_ERR(path);
if (allocated_clusters) {
int fb_flags = 0;
@@ -4357,43 +4419,6 @@ got_allocated_blocks:
}
/*
- * Reduce the reserved cluster count to reflect successful deferred
- * allocation of delayed allocated clusters or direct allocation of
- * clusters discovered to be delayed allocated. Once allocated, a
- * cluster is not included in the reserved count.
- */
- if (test_opt(inode->i_sb, DELALLOC) && allocated_clusters) {
- if (flags & EXT4_GET_BLOCKS_DELALLOC_RESERVE) {
- /*
- * When allocating delayed allocated clusters, simply
- * reduce the reserved cluster count and claim quota
- */
- ext4_da_update_reserve_space(inode, allocated_clusters,
- 1);
- } else {
- ext4_lblk_t lblk, len;
- unsigned int n;
-
- /*
- * When allocating non-delayed allocated clusters
- * (from fallocate, filemap, DIO, or clusters
- * allocated when delalloc has been disabled by
- * ext4_nonda_switch), reduce the reserved cluster
- * count by the number of allocated clusters that
- * have previously been delayed allocated. Quota
- * has been claimed by ext4_mb_new_blocks() above,
- * so release the quota reservations made for any
- * previously delayed allocated clusters.
- */
- lblk = EXT4_LBLK_CMASK(sbi, map->m_lblk);
- len = allocated_clusters << sbi->s_cluster_bits;
- n = ext4_es_delayed_clu(inode, lblk, len);
- if (n > 0)
- ext4_da_update_reserve_space(inode, (int) n, 0);
- }
- }
-
- /*
* Cache the extent and update transaction to commit on fdatasync only
* when it is _not_ an unwritten extent.
*/
@@ -5184,7 +5209,7 @@ ext4_ext_shift_extents(struct inode *inode, handle_t *handle,
* won't be shifted beyond EXT_MAX_BLOCKS.
*/
if (SHIFT == SHIFT_LEFT) {
- path = ext4_find_extent(inode, start - 1, &path,
+ path = ext4_find_extent(inode, start - 1, path,
EXT4_EX_NOCACHE);
if (IS_ERR(path))
return PTR_ERR(path);
@@ -5233,7 +5258,7 @@ again:
* becomes NULL to indicate the end of the loop.
*/
while (iterator && start <= stop) {
- path = ext4_find_extent(inode, *iterator, &path,
+ path = ext4_find_extent(inode, *iterator, path,
EXT4_EX_NOCACHE);
if (IS_ERR(path))
return PTR_ERR(path);
@@ -5535,6 +5560,7 @@ static int ext4_insert_range(struct file *file, loff_t offset, loff_t len)
path = ext4_find_extent(inode, offset_lblk, NULL, 0);
if (IS_ERR(path)) {
up_write(&EXT4_I(inode)->i_data_sem);
+ ret = PTR_ERR(path);
goto out_stop;
}
@@ -5553,22 +5579,21 @@ static int ext4_insert_range(struct file *file, loff_t offset, loff_t len)
if (ext4_ext_is_unwritten(extent))
split_flag = EXT4_EXT_MARK_UNWRIT1 |
EXT4_EXT_MARK_UNWRIT2;
- ret = ext4_split_extent_at(handle, inode, &path,
+ path = ext4_split_extent_at(handle, inode, path,
offset_lblk, split_flag,
EXT4_EX_NOCACHE |
EXT4_GET_BLOCKS_PRE_IO |
EXT4_GET_BLOCKS_METADATA_NOFAIL);
}
- ext4_free_ext_path(path);
- if (ret < 0) {
+ if (IS_ERR(path)) {
up_write(&EXT4_I(inode)->i_data_sem);
+ ret = PTR_ERR(path);
goto out_stop;
}
- } else {
- ext4_free_ext_path(path);
}
+ ext4_free_ext_path(path);
ext4_es_remove_extent(inode, offset_lblk, EXT_MAX_BLOCKS - offset_lblk);
/*
@@ -5636,25 +5661,21 @@ ext4_swap_extents(handle_t *handle, struct inode *inode1,
int e1_len, e2_len, len;
int split = 0;
- path1 = ext4_find_extent(inode1, lblk1, NULL, EXT4_EX_NOCACHE);
+ path1 = ext4_find_extent(inode1, lblk1, path1, EXT4_EX_NOCACHE);
if (IS_ERR(path1)) {
*erp = PTR_ERR(path1);
- path1 = NULL;
- finish:
- count = 0;
- goto repeat;
+ goto errout;
}
- path2 = ext4_find_extent(inode2, lblk2, NULL, EXT4_EX_NOCACHE);
+ path2 = ext4_find_extent(inode2, lblk2, path2, EXT4_EX_NOCACHE);
if (IS_ERR(path2)) {
*erp = PTR_ERR(path2);
- path2 = NULL;
- goto finish;
+ goto errout;
}
ex1 = path1[path1->p_depth].p_ext;
ex2 = path2[path2->p_depth].p_ext;
/* Do we have something to swap ? */
if (unlikely(!ex2 || !ex1))
- goto finish;
+ goto errout;
e1_blk = le32_to_cpu(ex1->ee_block);
e2_blk = le32_to_cpu(ex2->ee_block);
@@ -5676,7 +5697,7 @@ ext4_swap_extents(handle_t *handle, struct inode *inode1,
next2 = e2_blk;
/* Do we have something to swap */
if (next1 == EXT_MAX_BLOCKS || next2 == EXT_MAX_BLOCKS)
- goto finish;
+ goto errout;
/* Move to the rightest boundary */
len = next1 - lblk1;
if (len < next2 - lblk2)
@@ -5686,28 +5707,32 @@ ext4_swap_extents(handle_t *handle, struct inode *inode1,
lblk1 += len;
lblk2 += len;
count -= len;
- goto repeat;
+ continue;
}
/* Prepare left boundary */
if (e1_blk < lblk1) {
split = 1;
- *erp = ext4_force_split_extent_at(handle, inode1,
- &path1, lblk1, 0);
- if (unlikely(*erp))
- goto finish;
+ path1 = ext4_force_split_extent_at(handle, inode1,
+ path1, lblk1, 0);
+ if (IS_ERR(path1)) {
+ *erp = PTR_ERR(path1);
+ goto errout;
+ }
}
if (e2_blk < lblk2) {
split = 1;
- *erp = ext4_force_split_extent_at(handle, inode2,
- &path2, lblk2, 0);
- if (unlikely(*erp))
- goto finish;
+ path2 = ext4_force_split_extent_at(handle, inode2,
+ path2, lblk2, 0);
+ if (IS_ERR(path2)) {
+ *erp = PTR_ERR(path2);
+ goto errout;
+ }
}
/* ext4_split_extent_at() may result in leaf extent split,
* path must to be revalidated. */
if (split)
- goto repeat;
+ continue;
/* Prepare right boundary */
len = count;
@@ -5718,30 +5743,34 @@ ext4_swap_extents(handle_t *handle, struct inode *inode1,
if (len != e1_len) {
split = 1;
- *erp = ext4_force_split_extent_at(handle, inode1,
- &path1, lblk1 + len, 0);
- if (unlikely(*erp))
- goto finish;
+ path1 = ext4_force_split_extent_at(handle, inode1,
+ path1, lblk1 + len, 0);
+ if (IS_ERR(path1)) {
+ *erp = PTR_ERR(path1);
+ goto errout;
+ }
}
if (len != e2_len) {
split = 1;
- *erp = ext4_force_split_extent_at(handle, inode2,
- &path2, lblk2 + len, 0);
- if (*erp)
- goto finish;
+ path2 = ext4_force_split_extent_at(handle, inode2,
+ path2, lblk2 + len, 0);
+ if (IS_ERR(path2)) {
+ *erp = PTR_ERR(path2);
+ goto errout;
+ }
}
/* ext4_split_extent_at() may result in leaf extent split,
* path must to be revalidated. */
if (split)
- goto repeat;
+ continue;
BUG_ON(e2_len != e1_len);
*erp = ext4_ext_get_access(handle, inode1, path1 + path1->p_depth);
if (unlikely(*erp))
- goto finish;
+ goto errout;
*erp = ext4_ext_get_access(handle, inode2, path2 + path2->p_depth);
if (unlikely(*erp))
- goto finish;
+ goto errout;
/* Both extents are fully inside boundaries. Swap it now */
tmp_ex = *ex1;
@@ -5759,7 +5788,7 @@ ext4_swap_extents(handle_t *handle, struct inode *inode1,
*erp = ext4_ext_dirty(handle, inode2, path2 +
path2->p_depth);
if (unlikely(*erp))
- goto finish;
+ goto errout;
*erp = ext4_ext_dirty(handle, inode1, path1 +
path1->p_depth);
/*
@@ -5769,17 +5798,17 @@ ext4_swap_extents(handle_t *handle, struct inode *inode1,
* aborted anyway.
*/
if (unlikely(*erp))
- goto finish;
+ goto errout;
+
lblk1 += len;
lblk2 += len;
replaced_count += len;
count -= len;
-
- repeat:
- ext4_free_ext_path(path1);
- ext4_free_ext_path(path2);
- path1 = path2 = NULL;
}
+
+errout:
+ ext4_free_ext_path(path1);
+ ext4_free_ext_path(path2);
return replaced_count;
}
@@ -5814,11 +5843,8 @@ int ext4_clu_mapped(struct inode *inode, ext4_lblk_t lclu)
/* search for the extent closest to the first block in the cluster */
path = ext4_find_extent(inode, EXT4_C2B(sbi, lclu), NULL, 0);
- if (IS_ERR(path)) {
- err = PTR_ERR(path);
- path = NULL;
- goto out;
- }
+ if (IS_ERR(path))
+ return PTR_ERR(path);
depth = ext_depth(inode);
@@ -5880,7 +5906,7 @@ out:
int ext4_ext_replay_update_ex(struct inode *inode, ext4_lblk_t start,
int len, int unwritten, ext4_fsblk_t pblk)
{
- struct ext4_ext_path *path = NULL, *ppath;
+ struct ext4_ext_path *path;
struct ext4_extent *ex;
int ret;
@@ -5896,30 +5922,34 @@ int ext4_ext_replay_update_ex(struct inode *inode, ext4_lblk_t start,
if (le32_to_cpu(ex->ee_block) != start ||
ext4_ext_get_actual_len(ex) != len) {
/* We need to split this extent to match our extent first */
- ppath = path;
down_write(&EXT4_I(inode)->i_data_sem);
- ret = ext4_force_split_extent_at(NULL, inode, &ppath, start, 1);
+ path = ext4_force_split_extent_at(NULL, inode, path, start, 1);
up_write(&EXT4_I(inode)->i_data_sem);
- if (ret)
+ if (IS_ERR(path)) {
+ ret = PTR_ERR(path);
goto out;
- kfree(path);
- path = ext4_find_extent(inode, start, NULL, 0);
+ }
+
+ path = ext4_find_extent(inode, start, path, 0);
if (IS_ERR(path))
- return -1;
- ppath = path;
+ return PTR_ERR(path);
+
ex = path[path->p_depth].p_ext;
WARN_ON(le32_to_cpu(ex->ee_block) != start);
+
if (ext4_ext_get_actual_len(ex) != len) {
down_write(&EXT4_I(inode)->i_data_sem);
- ret = ext4_force_split_extent_at(NULL, inode, &ppath,
- start + len, 1);
+ path = ext4_force_split_extent_at(NULL, inode, path,
+ start + len, 1);
up_write(&EXT4_I(inode)->i_data_sem);
- if (ret)
+ if (IS_ERR(path)) {
+ ret = PTR_ERR(path);
goto out;
- kfree(path);
- path = ext4_find_extent(inode, start, NULL, 0);
+ }
+
+ path = ext4_find_extent(inode, start, path, 0);
if (IS_ERR(path))
- return -EINVAL;
+ return PTR_ERR(path);
ex = path[path->p_depth].p_ext;
}
}
@@ -6001,12 +6031,9 @@ int ext4_ext_replay_set_iblocks(struct inode *inode)
if (IS_ERR(path))
return PTR_ERR(path);
ex = path[path->p_depth].p_ext;
- if (!ex) {
- ext4_free_ext_path(path);
+ if (!ex)
goto out;
- }
end = le32_to_cpu(ex->ee_block) + ext4_ext_get_actual_len(ex);
- ext4_free_ext_path(path);
/* Count the number of data blocks */
cur = 0;
@@ -6032,32 +6059,28 @@ int ext4_ext_replay_set_iblocks(struct inode *inode)
ret = skip_hole(inode, &cur);
if (ret < 0)
goto out;
- path = ext4_find_extent(inode, cur, NULL, 0);
+ path = ext4_find_extent(inode, cur, path, 0);
if (IS_ERR(path))
goto out;
numblks += path->p_depth;
- ext4_free_ext_path(path);
while (cur < end) {
- path = ext4_find_extent(inode, cur, NULL, 0);
+ path = ext4_find_extent(inode, cur, path, 0);
if (IS_ERR(path))
break;
ex = path[path->p_depth].p_ext;
- if (!ex) {
- ext4_free_ext_path(path);
- return 0;
- }
+ if (!ex)
+ goto cleanup;
+
cur = max(cur + 1, le32_to_cpu(ex->ee_block) +
ext4_ext_get_actual_len(ex));
ret = skip_hole(inode, &cur);
- if (ret < 0) {
- ext4_free_ext_path(path);
+ if (ret < 0)
break;
- }
- path2 = ext4_find_extent(inode, cur, NULL, 0);
- if (IS_ERR(path2)) {
- ext4_free_ext_path(path);
+
+ path2 = ext4_find_extent(inode, cur, path2, 0);
+ if (IS_ERR(path2))
break;
- }
+
for (i = 0; i <= max(path->p_depth, path2->p_depth); i++) {
cmp1 = cmp2 = 0;
if (i <= path->p_depth)
@@ -6069,13 +6092,14 @@ int ext4_ext_replay_set_iblocks(struct inode *inode)
if (cmp1 != cmp2 && cmp2 != 0)
numblks++;
}
- ext4_free_ext_path(path);
- ext4_free_ext_path(path2);
}
out:
inode->i_blocks = numblks << (inode->i_sb->s_blocksize_bits - 9);
ext4_mark_inode_dirty(NULL, inode);
+cleanup:
+ ext4_free_ext_path(path);
+ ext4_free_ext_path(path2);
return 0;
}
@@ -6096,12 +6120,9 @@ int ext4_ext_clear_bb(struct inode *inode)
if (IS_ERR(path))
return PTR_ERR(path);
ex = path[path->p_depth].p_ext;
- if (!ex) {
- ext4_free_ext_path(path);
- return 0;
- }
+ if (!ex)
+ goto out;
end = le32_to_cpu(ex->ee_block) + ext4_ext_get_actual_len(ex);
- ext4_free_ext_path(path);
cur = 0;
while (cur < end) {
@@ -6111,16 +6132,16 @@ int ext4_ext_clear_bb(struct inode *inode)
if (ret < 0)
break;
if (ret > 0) {
- path = ext4_find_extent(inode, map.m_lblk, NULL, 0);
- if (!IS_ERR_OR_NULL(path)) {
+ path = ext4_find_extent(inode, map.m_lblk, path, 0);
+ if (!IS_ERR(path)) {
for (j = 0; j < path->p_depth; j++) {
-
ext4_mb_mark_bb(inode->i_sb,
path[j].p_block, 1, false);
ext4_fc_record_regions(inode->i_sb, inode->i_ino,
0, path[j].p_block, 1, 1);
}
- ext4_free_ext_path(path);
+ } else {
+ path = NULL;
}
ext4_mb_mark_bb(inode->i_sb, map.m_pblk, map.m_len, false);
ext4_fc_record_regions(inode->i_sb, inode->i_ino,
@@ -6129,5 +6150,7 @@ int ext4_ext_clear_bb(struct inode *inode)
cur = cur + map.m_len;
}
+out:
+ ext4_free_ext_path(path);
return 0;
}
diff --git a/fs/ext4/extents_status.c b/fs/ext4/extents_status.c
index 17dcf13adde2..c786691dabd3 100644
--- a/fs/ext4/extents_status.c
+++ b/fs/ext4/extents_status.c
@@ -558,8 +558,8 @@ static int ext4_es_can_be_merged(struct extent_status *es1,
if (ext4_es_is_hole(es1))
return 1;
- /* we need to check delayed extent is without unwritten status */
- if (ext4_es_is_delayed(es1) && !ext4_es_is_unwritten(es1))
+ /* we need to check delayed extent */
+ if (ext4_es_is_delayed(es1))
return 1;
return 0;
@@ -848,11 +848,12 @@ out:
*/
void ext4_es_insert_extent(struct inode *inode, ext4_lblk_t lblk,
ext4_lblk_t len, ext4_fsblk_t pblk,
- unsigned int status)
+ unsigned int status, int flags)
{
struct extent_status newes;
ext4_lblk_t end = lblk + len - 1;
int err1 = 0, err2 = 0, err3 = 0;
+ int resv_used = 0, pending = 0;
struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
struct extent_status *es1 = NULL;
struct extent_status *es2 = NULL;
@@ -862,21 +863,14 @@ void ext4_es_insert_extent(struct inode *inode, ext4_lblk_t lblk,
if (EXT4_SB(inode->i_sb)->s_mount_state & EXT4_FC_REPLAY)
return;
- es_debug("add [%u/%u) %llu %x to extent status tree of inode %lu\n",
- lblk, len, pblk, status, inode->i_ino);
+ es_debug("add [%u/%u) %llu %x %x to extent status tree of inode %lu\n",
+ lblk, len, pblk, status, flags, inode->i_ino);
if (!len)
return;
BUG_ON(end < lblk);
-
- if ((status & EXTENT_STATUS_DELAYED) &&
- (status & EXTENT_STATUS_WRITTEN)) {
- ext4_warning(inode->i_sb, "Inserting extent [%u/%u] as "
- " delayed and written which can potentially "
- " cause data loss.", lblk, len);
- WARN_ON(1);
- }
+ WARN_ON_ONCE(status & EXTENT_STATUS_DELAYED);
newes.es_lblk = lblk;
newes.es_len = len;
@@ -894,11 +888,11 @@ retry:
es1 = __es_alloc_extent(true);
if ((err1 || err2) && !es2)
es2 = __es_alloc_extent(true);
- if ((err1 || err2 || err3) && revise_pending && !pr)
+ if ((err1 || err2 || err3 < 0) && revise_pending && !pr)
pr = __alloc_pending(true);
write_lock(&EXT4_I(inode)->i_es_lock);
- err1 = __es_remove_extent(inode, lblk, end, NULL, es1);
+ err1 = __es_remove_extent(inode, lblk, end, &resv_used, es1);
if (err1 != 0)
goto error;
/* Free preallocated extent if it didn't get used. */
@@ -922,16 +916,38 @@ retry:
if (revise_pending) {
err3 = __revise_pending(inode, lblk, len, &pr);
- if (err3 != 0)
+ if (err3 < 0)
goto error;
if (pr) {
__free_pending(pr);
pr = NULL;
}
+ pending = err3;
}
error:
write_unlock(&EXT4_I(inode)->i_es_lock);
- if (err1 || err2 || err3)
+ /*
+ * Reduce the reserved cluster count to reflect successful deferred
+ * allocation of delayed allocated clusters or direct allocation of
+ * clusters discovered to be delayed allocated. Once allocated, a
+ * cluster is not included in the reserved count.
+ *
+ * When direct allocating (from fallocate, filemap, DIO, or clusters
+ * allocated when delalloc has been disabled by ext4_nonda_switch())
+ * an extent either 1) contains delayed blocks but start with
+ * non-delayed allocated blocks (e.g. hole) or 2) contains non-delayed
+ * allocated blocks which belong to delayed allocated clusters when
+ * bigalloc feature is enabled, quota has already been claimed by
+ * ext4_mb_new_blocks(), so release the quota reservations made for
+ * any previously delayed allocated clusters instead of claim them
+ * again.
+ */
+ resv_used += pending;
+ if (resv_used)
+ ext4_da_update_reserve_space(inode, resv_used,
+ flags & EXT4_GET_BLOCKS_DELALLOC_RESERVE);
+
+ if (err1 || err2 || err3 < 0)
goto retry;
ext4_es_print_tree(inode);
@@ -1051,7 +1067,7 @@ out:
}
struct rsvd_count {
- int ndelonly;
+ int ndelayed;
bool first_do_lblk_found;
ext4_lblk_t first_do_lblk;
ext4_lblk_t last_do_lblk;
@@ -1077,10 +1093,10 @@ static void init_rsvd(struct inode *inode, ext4_lblk_t lblk,
struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
struct rb_node *node;
- rc->ndelonly = 0;
+ rc->ndelayed = 0;
/*
- * for bigalloc, note the first delonly block in the range has not
+ * for bigalloc, note the first delayed block in the range has not
* been found, record the extent containing the block to the left of
* the region to be removed, if any, and note that there's no partial
* cluster to track
@@ -1100,9 +1116,8 @@ static void init_rsvd(struct inode *inode, ext4_lblk_t lblk,
}
/*
- * count_rsvd - count the clusters containing delayed and not unwritten
- * (delonly) blocks in a range within an extent and add to
- * the running tally in rsvd_count
+ * count_rsvd - count the clusters containing delayed blocks in a range
+ * within an extent and add to the running tally in rsvd_count
*
* @inode - file containing extent
* @lblk - first block in range
@@ -1119,13 +1134,13 @@ static void count_rsvd(struct inode *inode, ext4_lblk_t lblk, long len,
struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
ext4_lblk_t i, end, nclu;
- if (!ext4_es_is_delonly(es))
+ if (!ext4_es_is_delayed(es))
return;
WARN_ON(len <= 0);
if (sbi->s_cluster_ratio == 1) {
- rc->ndelonly += (int) len;
+ rc->ndelayed += (int) len;
return;
}
@@ -1135,7 +1150,7 @@ static void count_rsvd(struct inode *inode, ext4_lblk_t lblk, long len,
end = lblk + (ext4_lblk_t) len - 1;
end = (end > ext4_es_end(es)) ? ext4_es_end(es) : end;
- /* record the first block of the first delonly extent seen */
+ /* record the first block of the first delayed extent seen */
if (!rc->first_do_lblk_found) {
rc->first_do_lblk = i;
rc->first_do_lblk_found = true;
@@ -1149,7 +1164,7 @@ static void count_rsvd(struct inode *inode, ext4_lblk_t lblk, long len,
* doesn't start with it, count it and stop tracking
*/
if (rc->partial && (rc->lclu != EXT4_B2C(sbi, i))) {
- rc->ndelonly++;
+ rc->ndelayed++;
rc->partial = false;
}
@@ -1159,7 +1174,7 @@ static void count_rsvd(struct inode *inode, ext4_lblk_t lblk, long len,
*/
if (EXT4_LBLK_COFF(sbi, i) != 0) {
if (end >= EXT4_LBLK_CFILL(sbi, i)) {
- rc->ndelonly++;
+ rc->ndelayed++;
rc->partial = false;
i = EXT4_LBLK_CFILL(sbi, i) + 1;
}
@@ -1167,11 +1182,11 @@ static void count_rsvd(struct inode *inode, ext4_lblk_t lblk, long len,
/*
* if the current cluster starts on a cluster boundary, count the
- * number of whole delonly clusters in the extent
+ * number of whole delayed clusters in the extent
*/
if ((i + sbi->s_cluster_ratio - 1) <= end) {
nclu = (end - i + 1) >> sbi->s_cluster_bits;
- rc->ndelonly += nclu;
+ rc->ndelayed += nclu;
i += nclu << sbi->s_cluster_bits;
}
@@ -1231,10 +1246,9 @@ static struct pending_reservation *__pr_tree_search(struct rb_root *root,
* @rc - pointer to reserved count data
*
* The number of reservations to be released is equal to the number of
- * clusters containing delayed and not unwritten (delonly) blocks within
- * the range, minus the number of clusters still containing delonly blocks
- * at the ends of the range, and minus the number of pending reservations
- * within the range.
+ * clusters containing delayed blocks within the range, minus the number of
+ * clusters still containing delayed blocks at the ends of the range, and
+ * minus the number of pending reservations within the range.
*/
static unsigned int get_rsvd(struct inode *inode, ext4_lblk_t end,
struct extent_status *right_es,
@@ -1245,33 +1259,33 @@ static unsigned int get_rsvd(struct inode *inode, ext4_lblk_t end,
struct ext4_pending_tree *tree = &EXT4_I(inode)->i_pending_tree;
struct rb_node *node;
ext4_lblk_t first_lclu, last_lclu;
- bool left_delonly, right_delonly, count_pending;
+ bool left_delayed, right_delayed, count_pending;
struct extent_status *es;
if (sbi->s_cluster_ratio > 1) {
/* count any remaining partial cluster */
if (rc->partial)
- rc->ndelonly++;
+ rc->ndelayed++;
- if (rc->ndelonly == 0)
+ if (rc->ndelayed == 0)
return 0;
first_lclu = EXT4_B2C(sbi, rc->first_do_lblk);
last_lclu = EXT4_B2C(sbi, rc->last_do_lblk);
/*
- * decrease the delonly count by the number of clusters at the
- * ends of the range that still contain delonly blocks -
+ * decrease the delayed count by the number of clusters at the
+ * ends of the range that still contain delayed blocks -
* these clusters still need to be reserved
*/
- left_delonly = right_delonly = false;
+ left_delayed = right_delayed = false;
es = rc->left_es;
while (es && ext4_es_end(es) >=
EXT4_LBLK_CMASK(sbi, rc->first_do_lblk)) {
- if (ext4_es_is_delonly(es)) {
- rc->ndelonly--;
- left_delonly = true;
+ if (ext4_es_is_delayed(es)) {
+ rc->ndelayed--;
+ left_delayed = true;
break;
}
node = rb_prev(&es->rb_node);
@@ -1279,7 +1293,7 @@ static unsigned int get_rsvd(struct inode *inode, ext4_lblk_t end,
break;
es = rb_entry(node, struct extent_status, rb_node);
}
- if (right_es && (!left_delonly || first_lclu != last_lclu)) {
+ if (right_es && (!left_delayed || first_lclu != last_lclu)) {
if (end < ext4_es_end(right_es)) {
es = right_es;
} else {
@@ -1289,9 +1303,9 @@ static unsigned int get_rsvd(struct inode *inode, ext4_lblk_t end,
}
while (es && es->es_lblk <=
EXT4_LBLK_CFILL(sbi, rc->last_do_lblk)) {
- if (ext4_es_is_delonly(es)) {
- rc->ndelonly--;
- right_delonly = true;
+ if (ext4_es_is_delayed(es)) {
+ rc->ndelayed--;
+ right_delayed = true;
break;
}
node = rb_next(&es->rb_node);
@@ -1305,21 +1319,21 @@ static unsigned int get_rsvd(struct inode *inode, ext4_lblk_t end,
/*
* Determine the block range that should be searched for
* pending reservations, if any. Clusters on the ends of the
- * original removed range containing delonly blocks are
+ * original removed range containing delayed blocks are
* excluded. They've already been accounted for and it's not
* possible to determine if an associated pending reservation
* should be released with the information available in the
* extents status tree.
*/
if (first_lclu == last_lclu) {
- if (left_delonly | right_delonly)
+ if (left_delayed | right_delayed)
count_pending = false;
else
count_pending = true;
} else {
- if (left_delonly)
+ if (left_delayed)
first_lclu++;
- if (right_delonly)
+ if (right_delayed)
last_lclu--;
if (first_lclu <= last_lclu)
count_pending = true;
@@ -1330,13 +1344,13 @@ static unsigned int get_rsvd(struct inode *inode, ext4_lblk_t end,
/*
* a pending reservation found between first_lclu and last_lclu
* represents an allocated cluster that contained at least one
- * delonly block, so the delonly total must be reduced by one
+ * delayed block, so the delayed total must be reduced by one
* for each pending reservation found and released
*/
if (count_pending) {
pr = __pr_tree_search(&tree->root, first_lclu);
while (pr && pr->lclu <= last_lclu) {
- rc->ndelonly--;
+ rc->ndelayed--;
node = rb_next(&pr->rb_node);
rb_erase(&pr->rb_node, &tree->root);
__free_pending(pr);
@@ -1347,7 +1361,7 @@ static unsigned int get_rsvd(struct inode *inode, ext4_lblk_t end,
}
}
}
- return rc->ndelonly;
+ return rc->ndelayed;
}
@@ -1940,7 +1954,7 @@ static struct pending_reservation *__get_pending(struct inode *inode,
* @lblk - logical block in the cluster to be added
* @prealloc - preallocated pending entry
*
- * Returns 0 on successful insertion and -ENOMEM on failure. If the
+ * Returns 1 on successful insertion and -ENOMEM on failure. If the
* pending reservation is already in the set, returns successfully.
*/
static int __insert_pending(struct inode *inode, ext4_lblk_t lblk,
@@ -1984,6 +1998,7 @@ static int __insert_pending(struct inode *inode, ext4_lblk_t lblk,
rb_link_node(&pr->rb_node, parent, p);
rb_insert_color(&pr->rb_node, &tree->root);
+ ret = 1;
out:
return ret;
@@ -2105,7 +2120,7 @@ retry:
es1 = __es_alloc_extent(true);
if ((err1 || err2) && !es2)
es2 = __es_alloc_extent(true);
- if (err1 || err2 || err3) {
+ if (err1 || err2 || err3 < 0) {
if (lclu_allocated && !pr1)
pr1 = __alloc_pending(true);
if (end_allocated && !pr2)
@@ -2135,7 +2150,7 @@ retry:
if (lclu_allocated) {
err3 = __insert_pending(inode, lblk, &pr1);
- if (err3 != 0)
+ if (err3 < 0)
goto error;
if (pr1) {
__free_pending(pr1);
@@ -2144,7 +2159,7 @@ retry:
}
if (end_allocated) {
err3 = __insert_pending(inode, end, &pr2);
- if (err3 != 0)
+ if (err3 < 0)
goto error;
if (pr2) {
__free_pending(pr2);
@@ -2153,7 +2168,7 @@ retry:
}
error:
write_unlock(&EXT4_I(inode)->i_es_lock);
- if (err1 || err2 || err3)
+ if (err1 || err2 || err3 < 0)
goto retry;
ext4_es_print_tree(inode);
@@ -2162,94 +2177,6 @@ error:
}
/*
- * __es_delayed_clu - count number of clusters containing blocks that
- * are delayed only
- *
- * @inode - file containing block range
- * @start - logical block defining start of range
- * @end - logical block defining end of range
- *
- * Returns the number of clusters containing only delayed (not delayed
- * and unwritten) blocks in the range specified by @start and @end. Any
- * cluster or part of a cluster within the range and containing a delayed
- * and not unwritten block within the range is counted as a whole cluster.
- */
-static unsigned int __es_delayed_clu(struct inode *inode, ext4_lblk_t start,
- ext4_lblk_t end)
-{
- struct ext4_es_tree *tree = &EXT4_I(inode)->i_es_tree;
- struct extent_status *es;
- struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
- struct rb_node *node;
- ext4_lblk_t first_lclu, last_lclu;
- unsigned long long last_counted_lclu;
- unsigned int n = 0;
-
- /* guaranteed to be unequal to any ext4_lblk_t value */
- last_counted_lclu = ~0ULL;
-
- es = __es_tree_search(&tree->root, start);
-
- while (es && (es->es_lblk <= end)) {
- if (ext4_es_is_delonly(es)) {
- if (es->es_lblk <= start)
- first_lclu = EXT4_B2C(sbi, start);
- else
- first_lclu = EXT4_B2C(sbi, es->es_lblk);
-
- if (ext4_es_end(es) >= end)
- last_lclu = EXT4_B2C(sbi, end);
- else
- last_lclu = EXT4_B2C(sbi, ext4_es_end(es));
-
- if (first_lclu == last_counted_lclu)
- n += last_lclu - first_lclu;
- else
- n += last_lclu - first_lclu + 1;
- last_counted_lclu = last_lclu;
- }
- node = rb_next(&es->rb_node);
- if (!node)
- break;
- es = rb_entry(node, struct extent_status, rb_node);
- }
-
- return n;
-}
-
-/*
- * ext4_es_delayed_clu - count number of clusters containing blocks that
- * are both delayed and unwritten
- *
- * @inode - file containing block range
- * @lblk - logical block defining start of range
- * @len - number of blocks in range
- *
- * Locking for external use of __es_delayed_clu().
- */
-unsigned int ext4_es_delayed_clu(struct inode *inode, ext4_lblk_t lblk,
- ext4_lblk_t len)
-{
- struct ext4_inode_info *ei = EXT4_I(inode);
- ext4_lblk_t end;
- unsigned int n;
-
- if (len == 0)
- return 0;
-
- end = lblk + len - 1;
- WARN_ON(end < lblk);
-
- read_lock(&ei->i_es_lock);
-
- n = __es_delayed_clu(inode, lblk, end);
-
- read_unlock(&ei->i_es_lock);
-
- return n;
-}
-
-/*
* __revise_pending - makes, cancels, or leaves unchanged pending cluster
* reservations for a specified block range depending
* upon the presence or absence of delayed blocks
@@ -2263,7 +2190,9 @@ unsigned int ext4_es_delayed_clu(struct inode *inode, ext4_lblk_t lblk,
*
* Used after a newly allocated extent is added to the extents status tree.
* Requires that the extents in the range have either written or unwritten
- * status. Must be called while holding i_es_lock.
+ * status. Must be called while holding i_es_lock. Returns number of new
+ * inserts pending cluster on insert pendings, returns 0 on remove pendings,
+ * return -ENOMEM on failure.
*/
static int __revise_pending(struct inode *inode, ext4_lblk_t lblk,
ext4_lblk_t len,
@@ -2273,6 +2202,7 @@ static int __revise_pending(struct inode *inode, ext4_lblk_t lblk,
ext4_lblk_t end = lblk + len - 1;
ext4_lblk_t first, last;
bool f_del = false, l_del = false;
+ int pendings = 0;
int ret = 0;
if (len == 0)
@@ -2294,49 +2224,53 @@ static int __revise_pending(struct inode *inode, ext4_lblk_t lblk,
if (EXT4_B2C(sbi, lblk) == EXT4_B2C(sbi, end)) {
first = EXT4_LBLK_CMASK(sbi, lblk);
if (first != lblk)
- f_del = __es_scan_range(inode, &ext4_es_is_delonly,
+ f_del = __es_scan_range(inode, &ext4_es_is_delayed,
first, lblk - 1);
if (f_del) {
ret = __insert_pending(inode, first, prealloc);
if (ret < 0)
goto out;
+ pendings += ret;
} else {
last = EXT4_LBLK_CMASK(sbi, end) +
sbi->s_cluster_ratio - 1;
if (last != end)
l_del = __es_scan_range(inode,
- &ext4_es_is_delonly,
+ &ext4_es_is_delayed,
end + 1, last);
if (l_del) {
ret = __insert_pending(inode, last, prealloc);
if (ret < 0)
goto out;
+ pendings += ret;
} else
__remove_pending(inode, last);
}
} else {
first = EXT4_LBLK_CMASK(sbi, lblk);
if (first != lblk)
- f_del = __es_scan_range(inode, &ext4_es_is_delonly,
+ f_del = __es_scan_range(inode, &ext4_es_is_delayed,
first, lblk - 1);
if (f_del) {
ret = __insert_pending(inode, first, prealloc);
if (ret < 0)
goto out;
+ pendings += ret;
} else
__remove_pending(inode, first);
last = EXT4_LBLK_CMASK(sbi, end) + sbi->s_cluster_ratio - 1;
if (last != end)
- l_del = __es_scan_range(inode, &ext4_es_is_delonly,
+ l_del = __es_scan_range(inode, &ext4_es_is_delayed,
end + 1, last);
if (l_del) {
ret = __insert_pending(inode, last, prealloc);
if (ret < 0)
goto out;
+ pendings += ret;
} else
__remove_pending(inode, last);
}
out:
- return ret;
+ return (ret < 0) ? ret : pendings;
}
diff --git a/fs/ext4/extents_status.h b/fs/ext4/extents_status.h
index 3c8e2edee5d5..4424232de298 100644
--- a/fs/ext4/extents_status.h
+++ b/fs/ext4/extents_status.h
@@ -42,6 +42,10 @@ enum {
#define ES_SHIFT (sizeof(ext4_fsblk_t)*8 - ES_FLAGS)
#define ES_MASK (~((ext4_fsblk_t)0) << ES_SHIFT)
+/*
+ * Besides EXTENT_STATUS_REFERENCED, all these extent type masks
+ * are exclusive, only one type can be set at a time.
+ */
#define EXTENT_STATUS_WRITTEN (1 << ES_WRITTEN_B)
#define EXTENT_STATUS_UNWRITTEN (1 << ES_UNWRITTEN_B)
#define EXTENT_STATUS_DELAYED (1 << ES_DELAYED_B)
@@ -51,7 +55,9 @@ enum {
#define ES_TYPE_MASK ((ext4_fsblk_t)(EXTENT_STATUS_WRITTEN | \
EXTENT_STATUS_UNWRITTEN | \
EXTENT_STATUS_DELAYED | \
- EXTENT_STATUS_HOLE) << ES_SHIFT)
+ EXTENT_STATUS_HOLE))
+
+#define ES_TYPE_VALID(type) ((type) && !((type) & ((type) - 1)))
struct ext4_sb_info;
struct ext4_extent;
@@ -129,7 +135,7 @@ extern void ext4_es_init_tree(struct ext4_es_tree *tree);
extern void ext4_es_insert_extent(struct inode *inode, ext4_lblk_t lblk,
ext4_lblk_t len, ext4_fsblk_t pblk,
- unsigned int status);
+ unsigned int status, int flags);
extern void ext4_es_cache_extent(struct inode *inode, ext4_lblk_t lblk,
ext4_lblk_t len, ext4_fsblk_t pblk,
unsigned int status);
@@ -156,7 +162,7 @@ static inline unsigned int ext4_es_status(struct extent_status *es)
static inline unsigned int ext4_es_type(struct extent_status *es)
{
- return (es->es_pblk & ES_TYPE_MASK) >> ES_SHIFT;
+ return (es->es_pblk >> ES_SHIFT) & ES_TYPE_MASK;
}
static inline int ext4_es_is_written(struct extent_status *es)
@@ -184,11 +190,6 @@ static inline int ext4_es_is_mapped(struct extent_status *es)
return (ext4_es_is_written(es) || ext4_es_is_unwritten(es));
}
-static inline int ext4_es_is_delonly(struct extent_status *es)
-{
- return (ext4_es_is_delayed(es) && !ext4_es_is_unwritten(es));
-}
-
static inline void ext4_es_set_referenced(struct extent_status *es)
{
es->es_pblk |= ((ext4_fsblk_t)EXTENT_STATUS_REFERENCED) << ES_SHIFT;
@@ -224,17 +225,12 @@ static inline void ext4_es_store_pblock(struct extent_status *es,
es->es_pblk = block;
}
-static inline void ext4_es_store_status(struct extent_status *es,
- unsigned int status)
-{
- es->es_pblk = (((ext4_fsblk_t)status << ES_SHIFT) & ES_MASK) |
- (es->es_pblk & ~ES_MASK);
-}
-
static inline void ext4_es_store_pblock_status(struct extent_status *es,
ext4_fsblk_t pb,
unsigned int status)
{
+ WARN_ON_ONCE(!ES_TYPE_VALID(status & ES_TYPE_MASK));
+
es->es_pblk = (((ext4_fsblk_t)status << ES_SHIFT) & ES_MASK) |
(pb & ~ES_MASK);
}
@@ -252,8 +248,6 @@ extern bool ext4_is_pending(struct inode *inode, ext4_lblk_t lblk);
extern void ext4_es_insert_delayed_extent(struct inode *inode, ext4_lblk_t lblk,
ext4_lblk_t len, bool lclu_allocated,
bool end_allocated);
-extern unsigned int ext4_es_delayed_clu(struct inode *inode, ext4_lblk_t lblk,
- ext4_lblk_t len);
extern void ext4_clear_inode_es(struct inode *inode);
#endif /* _EXT4_EXTENTS_STATUS_H */
diff --git a/fs/ext4/fast_commit.c b/fs/ext4/fast_commit.c
index 3926a05eceee..eaa5f5b51f50 100644
--- a/fs/ext4/fast_commit.c
+++ b/fs/ext4/fast_commit.c
@@ -339,22 +339,29 @@ void ext4_fc_mark_ineligible(struct super_block *sb, int reason, handle_t *handl
{
struct ext4_sb_info *sbi = EXT4_SB(sb);
tid_t tid;
+ bool has_transaction = true;
+ bool is_ineligible;
if (ext4_fc_disabled(sb))
return;
- ext4_set_mount_flag(sb, EXT4_MF_FC_INELIGIBLE);
if (handle && !IS_ERR(handle))
tid = handle->h_transaction->t_tid;
else {
read_lock(&sbi->s_journal->j_state_lock);
- tid = sbi->s_journal->j_running_transaction ?
- sbi->s_journal->j_running_transaction->t_tid : 0;
+ if (sbi->s_journal->j_running_transaction)
+ tid = sbi->s_journal->j_running_transaction->t_tid;
+ else
+ has_transaction = false;
read_unlock(&sbi->s_journal->j_state_lock);
}
spin_lock(&sbi->s_fc_lock);
- if (tid_gt(tid, sbi->s_fc_ineligible_tid))
+ is_ineligible = ext4_test_mount_flag(sb, EXT4_MF_FC_INELIGIBLE);
+ if (has_transaction &&
+ (!is_ineligible ||
+ (is_ineligible && tid_gt(tid, sbi->s_fc_ineligible_tid))))
sbi->s_fc_ineligible_tid = tid;
+ ext4_set_mount_flag(sb, EXT4_MF_FC_INELIGIBLE);
spin_unlock(&sbi->s_fc_lock);
WARN_ON(reason >= EXT4_FC_REASON_MAX);
sbi->s_fc_stats.fc_ineligible_reason_count[reason]++;
@@ -1288,8 +1295,21 @@ static void ext4_fc_cleanup(journal_t *journal, int full, tid_t tid)
list_del_init(&iter->i_fc_list);
ext4_clear_inode_state(&iter->vfs_inode,
EXT4_STATE_FC_COMMITTING);
- if (tid_geq(tid, iter->i_sync_tid))
+ if (tid_geq(tid, iter->i_sync_tid)) {
ext4_fc_reset_inode(&iter->vfs_inode);
+ } else if (full) {
+ /*
+ * We are called after a full commit, inode has been
+ * modified while the commit was running. Re-enqueue
+ * the inode into STAGING, which will then be splice
+ * back into MAIN. This cannot happen during
+ * fastcommit because the journal is locked all the
+ * time in that case (and tid doesn't increase so
+ * tid check above isn't reliable).
+ */
+ list_add_tail(&EXT4_I(&iter->vfs_inode)->i_fc_list,
+ &sbi->s_fc_q[FC_Q_STAGING]);
+ }
/* Make sure EXT4_STATE_FC_COMMITTING bit is clear */
smp_mb();
#if (BITS_PER_LONG < 64)
@@ -1772,7 +1792,7 @@ static int ext4_fc_replay_add_range(struct super_block *sb,
if (ret == 0) {
/* Range is not mapped */
- path = ext4_find_extent(inode, cur, NULL, 0);
+ path = ext4_find_extent(inode, cur, path, 0);
if (IS_ERR(path))
goto out;
memset(&newex, 0, sizeof(newex));
@@ -1783,11 +1803,10 @@ static int ext4_fc_replay_add_range(struct super_block *sb,
if (ext4_ext_is_unwritten(ex))
ext4_ext_mark_unwritten(&newex);
down_write(&EXT4_I(inode)->i_data_sem);
- ret = ext4_ext_insert_extent(
- NULL, inode, &path, &newex, 0);
+ path = ext4_ext_insert_extent(NULL, inode,
+ path, &newex, 0);
up_write((&EXT4_I(inode)->i_data_sem));
- ext4_free_ext_path(path);
- if (ret)
+ if (IS_ERR(path))
goto out;
goto next;
}
@@ -1836,6 +1855,7 @@ next:
ext4_ext_replay_shrink_inode(inode, i_size_read(inode) >>
sb->s_blocksize_bits);
out:
+ ext4_free_ext_path(path);
iput(inode);
return 0;
}
@@ -1936,12 +1956,13 @@ static void ext4_fc_set_bitmaps_and_counters(struct super_block *sb)
break;
if (ret > 0) {
- path = ext4_find_extent(inode, map.m_lblk, NULL, 0);
+ path = ext4_find_extent(inode, map.m_lblk, path, 0);
if (!IS_ERR(path)) {
for (j = 0; j < path->p_depth; j++)
ext4_mb_mark_bb(inode->i_sb,
path[j].p_block, 1, true);
- ext4_free_ext_path(path);
+ } else {
+ path = NULL;
}
cur += ret;
ext4_mb_mark_bb(inode->i_sb, map.m_pblk,
@@ -1952,6 +1973,8 @@ static void ext4_fc_set_bitmaps_and_counters(struct super_block *sb)
}
iput(inode);
}
+
+ ext4_free_ext_path(path);
}
/*
diff --git a/fs/ext4/file.c b/fs/ext4/file.c
index c89e434db6b7..f14aed14b9cf 100644
--- a/fs/ext4/file.c
+++ b/fs/ext4/file.c
@@ -306,7 +306,7 @@ out:
}
static ssize_t ext4_handle_inode_extension(struct inode *inode, loff_t offset,
- ssize_t count)
+ ssize_t written, ssize_t count)
{
handle_t *handle;
@@ -315,7 +315,7 @@ static ssize_t ext4_handle_inode_extension(struct inode *inode, loff_t offset,
if (IS_ERR(handle))
return PTR_ERR(handle);
- if (ext4_update_inode_size(inode, offset + count)) {
+ if (ext4_update_inode_size(inode, offset + written)) {
int ret = ext4_mark_inode_dirty(handle, inode);
if (unlikely(ret)) {
ext4_journal_stop(handle);
@@ -323,21 +323,21 @@ static ssize_t ext4_handle_inode_extension(struct inode *inode, loff_t offset,
}
}
- if (inode->i_nlink)
+ if ((written == count) && inode->i_nlink)
ext4_orphan_del(handle, inode);
ext4_journal_stop(handle);
- return count;
+ return written;
}
/*
* Clean up the inode after DIO or DAX extending write has completed and the
* inode size has been updated using ext4_handle_inode_extension().
*/
-static void ext4_inode_extension_cleanup(struct inode *inode, ssize_t count)
+static void ext4_inode_extension_cleanup(struct inode *inode, bool need_trunc)
{
lockdep_assert_held_write(&inode->i_rwsem);
- if (count < 0) {
+ if (need_trunc) {
ext4_truncate_failed_write(inode);
/*
* If the truncate operation failed early, then the inode may
@@ -393,7 +393,7 @@ static int ext4_dio_write_end_io(struct kiocb *iocb, ssize_t size,
if (pos + size <= READ_ONCE(EXT4_I(inode)->i_disksize) &&
pos + size <= i_size_read(inode))
return size;
- return ext4_handle_inode_extension(inode, pos, size);
+ return ext4_handle_inode_extension(inode, pos, size, size);
}
static const struct iomap_dio_ops ext4_dio_write_ops = {
@@ -586,7 +586,7 @@ static ssize_t ext4_dio_write_iter(struct kiocb *iocb, struct iov_iter *from)
* writeback of delalloc blocks.
*/
WARN_ON_ONCE(ret == -EIOCBQUEUED);
- ext4_inode_extension_cleanup(inode, ret);
+ ext4_inode_extension_cleanup(inode, ret < 0);
}
out:
@@ -669,8 +669,8 @@ ext4_dax_write_iter(struct kiocb *iocb, struct iov_iter *from)
ret = dax_iomap_rw(iocb, from, &ext4_iomap_ops);
if (extend) {
- ret = ext4_handle_inode_extension(inode, offset, ret);
- ext4_inode_extension_cleanup(inode, ret);
+ ret = ext4_handle_inode_extension(inode, offset, ret, count);
+ ext4_inode_extension_cleanup(inode, ret < (ssize_t)count);
}
out:
inode_unlock(inode);
diff --git a/fs/ext4/ialloc.c b/fs/ext4/ialloc.c
index 9dfd768ed9f8..7f1a5f90dbbd 100644
--- a/fs/ext4/ialloc.c
+++ b/fs/ext4/ialloc.c
@@ -87,10 +87,10 @@ static int ext4_validate_inode_bitmap(struct super_block *sb,
if (EXT4_SB(sb)->s_mount_state & EXT4_FC_REPLAY)
return 0;
- grp = ext4_get_group_info(sb, block_group);
-
if (buffer_verified(bh))
return 0;
+
+ grp = ext4_get_group_info(sb, block_group);
if (!grp || EXT4_MB_GRP_IBITMAP_CORRUPT(grp))
return -EFSCORRUPTED;
@@ -98,8 +98,7 @@ static int ext4_validate_inode_bitmap(struct super_block *sb,
if (buffer_verified(bh))
goto verified;
blk = ext4_inode_bitmap(sb, desc);
- if (!ext4_inode_bitmap_csum_verify(sb, desc, bh,
- EXT4_INODES_PER_GROUP(sb) / 8) ||
+ if (!ext4_inode_bitmap_csum_verify(sb, desc, bh) ||
ext4_simulate_fail(sb, EXT4_SIM_IBITMAP_CRC)) {
ext4_unlock_group(sb, block_group);
ext4_error(sb, "Corrupt inode bitmap - block_group = %u, "
@@ -327,8 +326,7 @@ void ext4_free_inode(handle_t *handle, struct inode *inode)
if (percpu_counter_initialized(&sbi->s_dirs_counter))
percpu_counter_dec(&sbi->s_dirs_counter);
}
- ext4_inode_bitmap_csum_set(sb, gdp, bitmap_bh,
- EXT4_INODES_PER_GROUP(sb) / 8);
+ ext4_inode_bitmap_csum_set(sb, gdp, bitmap_bh);
ext4_group_desc_csum_set(sb, block_group, gdp);
ext4_unlock_group(sb, block_group);
@@ -514,6 +512,8 @@ static int find_group_orlov(struct super_block *sb, struct inode *parent,
if (min_inodes < 1)
min_inodes = 1;
min_clusters = avefreec - EXT4_CLUSTERS_PER_GROUP(sb)*flex_size / 4;
+ if (min_clusters < 0)
+ min_clusters = 0;
/*
* Start looking in the flex group where we last allocated an
@@ -755,10 +755,10 @@ int ext4_mark_inode_used(struct super_block *sb, int ino)
struct ext4_group_desc *gdp;
ext4_group_t group;
int bit;
- int err = -EFSCORRUPTED;
+ int err;
if (ino < EXT4_FIRST_INO(sb) || ino > max_ino)
- goto out;
+ return -EFSCORRUPTED;
group = (ino - 1) / EXT4_INODES_PER_GROUP(sb);
bit = (ino - 1) % EXT4_INODES_PER_GROUP(sb);
@@ -772,7 +772,7 @@ int ext4_mark_inode_used(struct super_block *sb, int ino)
}
gdp = ext4_get_group_desc(sb, group, &group_desc_bh);
- if (!gdp || !group_desc_bh) {
+ if (!gdp) {
err = -EINVAL;
goto out;
}
@@ -851,8 +851,7 @@ int ext4_mark_inode_used(struct super_block *sb, int ino)
ext4_free_inodes_set(sb, gdp, ext4_free_inodes_count(sb, gdp) - 1);
if (ext4_has_group_desc_csum(sb)) {
- ext4_inode_bitmap_csum_set(sb, gdp, inode_bitmap_bh,
- EXT4_INODES_PER_GROUP(sb) / 8);
+ ext4_inode_bitmap_csum_set(sb, gdp, inode_bitmap_bh);
ext4_group_desc_csum_set(sb, group, gdp);
}
@@ -860,6 +859,7 @@ int ext4_mark_inode_used(struct super_block *sb, int ino)
err = ext4_handle_dirty_metadata(NULL, NULL, group_desc_bh);
sync_dirty_buffer(group_desc_bh);
out:
+ brelse(inode_bitmap_bh);
return err;
}
@@ -1053,14 +1053,14 @@ got_group:
brelse(inode_bitmap_bh);
inode_bitmap_bh = ext4_read_inode_bitmap(sb, group);
/* Skip groups with suspicious inode tables */
- if (((!(sbi->s_mount_state & EXT4_FC_REPLAY))
- && EXT4_MB_GRP_IBITMAP_CORRUPT(grp)) ||
- IS_ERR(inode_bitmap_bh)) {
+ if (IS_ERR(inode_bitmap_bh)) {
inode_bitmap_bh = NULL;
goto next_group;
}
+ if (!(sbi->s_mount_state & EXT4_FC_REPLAY) &&
+ EXT4_MB_GRP_IBITMAP_CORRUPT(grp))
+ goto next_group;
-repeat_in_this_group:
ret2 = find_inode_bit(sb, group, inode_bitmap_bh, &ino);
if (!ret2)
goto next_group;
@@ -1110,8 +1110,6 @@ repeat_in_this_group:
if (!ret2)
goto got; /* we grabbed the inode! */
- if (ino < EXT4_INODES_PER_GROUP(sb))
- goto repeat_in_this_group;
next_group:
if (++group == ngroups)
group = 0;
@@ -1224,8 +1222,7 @@ got:
}
}
if (ext4_has_group_desc_csum(sb)) {
- ext4_inode_bitmap_csum_set(sb, gdp, inode_bitmap_bh,
- EXT4_INODES_PER_GROUP(sb) / 8);
+ ext4_inode_bitmap_csum_set(sb, gdp, inode_bitmap_bh);
ext4_group_desc_csum_set(sb, group, gdp);
}
ext4_unlock_group(sb, group);
diff --git a/fs/ext4/indirect.c b/fs/ext4/indirect.c
index d8ca7f64f952..7404f0935c90 100644
--- a/fs/ext4/indirect.c
+++ b/fs/ext4/indirect.c
@@ -652,13 +652,6 @@ int ext4_ind_map_blocks(handle_t *handle, struct inode *inode,
ext4_update_inode_fsync_trans(handle, inode, 1);
count = ar.len;
- /*
- * Update reserved blocks/metadata blocks after successful block
- * allocation which had been deferred till now.
- */
- if (flags & EXT4_GET_BLOCKS_DELALLOC_RESERVE)
- ext4_da_update_reserve_space(inode, count, 1);
-
got_it:
map->m_flags |= EXT4_MAP_MAPPED;
map->m_pblk = le32_to_cpu(chain[depth-1].key);
diff --git a/fs/ext4/inline.c b/fs/ext4/inline.c
index edf4aa99a974..3536ca7e4fcc 100644
--- a/fs/ext4/inline.c
+++ b/fs/ext4/inline.c
@@ -601,10 +601,11 @@ retry:
goto out;
if (ext4_should_dioread_nolock(inode)) {
- ret = __block_write_begin(folio, from, to,
- ext4_get_block_unwritten);
+ ret = ext4_block_write_begin(handle, folio, from, to,
+ ext4_get_block_unwritten);
} else
- ret = __block_write_begin(folio, from, to, ext4_get_block);
+ ret = ext4_block_write_begin(handle, folio, from, to,
+ ext4_get_block);
if (!ret && ext4_should_journal_data(inode)) {
ret = ext4_walk_page_buffers(handle, inode,
@@ -856,8 +857,8 @@ static int ext4_da_convert_inline_data_to_extent(struct address_space *mapping,
goto out;
}
- ret = __block_write_begin(folio, 0, inline_size,
- ext4_da_get_block_prep);
+ ret = ext4_block_write_begin(NULL, folio, 0, inline_size,
+ ext4_da_get_block_prep);
if (ret) {
up_read(&EXT4_I(inode)->xattr_sem);
folio_unlock(folio);
@@ -1665,24 +1666,36 @@ struct buffer_head *ext4_find_inline_entry(struct inode *dir,
struct ext4_dir_entry_2 **res_dir,
int *has_inline_data)
{
+ struct ext4_xattr_ibody_find is = {
+ .s = { .not_found = -ENODATA, },
+ };
+ struct ext4_xattr_info i = {
+ .name_index = EXT4_XATTR_INDEX_SYSTEM,
+ .name = EXT4_XATTR_SYSTEM_DATA,
+ };
int ret;
- struct ext4_iloc iloc;
void *inline_start;
int inline_size;
- if (ext4_get_inode_loc(dir, &iloc))
- return NULL;
+ ret = ext4_get_inode_loc(dir, &is.iloc);
+ if (ret)
+ return ERR_PTR(ret);
down_read(&EXT4_I(dir)->xattr_sem);
+
+ ret = ext4_xattr_ibody_find(dir, &i, &is);
+ if (ret)
+ goto out;
+
if (!ext4_has_inline_data(dir)) {
*has_inline_data = 0;
goto out;
}
- inline_start = (void *)ext4_raw_inode(&iloc)->i_block +
+ inline_start = (void *)ext4_raw_inode(&is.iloc)->i_block +
EXT4_INLINE_DOTDOT_SIZE;
inline_size = EXT4_MIN_INLINE_DATA_SIZE - EXT4_INLINE_DOTDOT_SIZE;
- ret = ext4_search_dir(iloc.bh, inline_start, inline_size,
+ ret = ext4_search_dir(is.iloc.bh, inline_start, inline_size,
dir, fname, 0, res_dir);
if (ret == 1)
goto out_find;
@@ -1692,20 +1705,23 @@ struct buffer_head *ext4_find_inline_entry(struct inode *dir,
if (ext4_get_inline_size(dir) == EXT4_MIN_INLINE_DATA_SIZE)
goto out;
- inline_start = ext4_get_inline_xattr_pos(dir, &iloc);
+ inline_start = ext4_get_inline_xattr_pos(dir, &is.iloc);
inline_size = ext4_get_inline_size(dir) - EXT4_MIN_INLINE_DATA_SIZE;
- ret = ext4_search_dir(iloc.bh, inline_start, inline_size,
+ ret = ext4_search_dir(is.iloc.bh, inline_start, inline_size,
dir, fname, 0, res_dir);
if (ret == 1)
goto out_find;
out:
- brelse(iloc.bh);
- iloc.bh = NULL;
+ brelse(is.iloc.bh);
+ if (ret < 0)
+ is.iloc.bh = ERR_PTR(ret);
+ else
+ is.iloc.bh = NULL;
out_find:
up_read(&EXT4_I(dir)->xattr_sem);
- return iloc.bh;
+ return is.iloc.bh;
}
int ext4_delete_inline_entry(handle_t *handle,
diff --git a/fs/ext4/inode.c b/fs/ext4/inode.c
index 03374dc215d1..54bdd4884fe6 100644
--- a/fs/ext4/inode.c
+++ b/fs/ext4/inode.c
@@ -49,6 +49,11 @@
#include <trace/events/ext4.h>
+static void ext4_journalled_zero_new_buffers(handle_t *handle,
+ struct inode *inode,
+ struct folio *folio,
+ unsigned from, unsigned to);
+
static __u32 ext4_inode_csum(struct inode *inode, struct ext4_inode *raw,
struct ext4_inode_info *ei)
{
@@ -478,7 +483,89 @@ static int ext4_map_query_blocks(handle_t *handle, struct inode *inode,
status = map->m_flags & EXT4_MAP_UNWRITTEN ?
EXTENT_STATUS_UNWRITTEN : EXTENT_STATUS_WRITTEN;
ext4_es_insert_extent(inode, map->m_lblk, map->m_len,
- map->m_pblk, status);
+ map->m_pblk, status, 0);
+ return retval;
+}
+
+static int ext4_map_create_blocks(handle_t *handle, struct inode *inode,
+ struct ext4_map_blocks *map, int flags)
+{
+ struct extent_status es;
+ unsigned int status;
+ int err, retval = 0;
+
+ /*
+ * We pass in the magic EXT4_GET_BLOCKS_DELALLOC_RESERVE
+ * indicates that the blocks and quotas has already been
+ * checked when the data was copied into the page cache.
+ */
+ if (map->m_flags & EXT4_MAP_DELAYED)
+ flags |= EXT4_GET_BLOCKS_DELALLOC_RESERVE;
+
+ /*
+ * Here we clear m_flags because after allocating an new extent,
+ * it will be set again.
+ */
+ map->m_flags &= ~EXT4_MAP_FLAGS;
+
+ /*
+ * We need to check for EXT4 here because migrate could have
+ * changed the inode type in between.
+ */
+ if (ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS)) {
+ retval = ext4_ext_map_blocks(handle, inode, map, flags);
+ } else {
+ retval = ext4_ind_map_blocks(handle, inode, map, flags);
+
+ /*
+ * We allocated new blocks which will result in i_data's
+ * format changing. Force the migrate to fail by clearing
+ * migrate flags.
+ */
+ if (retval > 0 && map->m_flags & EXT4_MAP_NEW)
+ ext4_clear_inode_state(inode, EXT4_STATE_EXT_MIGRATE);
+ }
+ if (retval <= 0)
+ return retval;
+
+ if (unlikely(retval != map->m_len)) {
+ ext4_warning(inode->i_sb,
+ "ES len assertion failed for inode %lu: "
+ "retval %d != map->m_len %d",
+ inode->i_ino, retval, map->m_len);
+ WARN_ON(1);
+ }
+
+ /*
+ * We have to zeroout blocks before inserting them into extent
+ * status tree. Otherwise someone could look them up there and
+ * use them before they are really zeroed. We also have to
+ * unmap metadata before zeroing as otherwise writeback can
+ * overwrite zeros with stale data from block device.
+ */
+ if (flags & EXT4_GET_BLOCKS_ZERO &&
+ map->m_flags & EXT4_MAP_MAPPED && map->m_flags & EXT4_MAP_NEW) {
+ err = ext4_issue_zeroout(inode, map->m_lblk, map->m_pblk,
+ map->m_len);
+ if (err)
+ return err;
+ }
+
+ /*
+ * If the extent has been zeroed out, we don't need to update
+ * extent status tree.
+ */
+ if (flags & EXT4_GET_BLOCKS_PRE_IO &&
+ ext4_es_lookup_extent(inode, map->m_lblk, NULL, &es)) {
+ if (ext4_es_is_written(&es))
+ return retval;
+ }
+
+ status = map->m_flags & EXT4_MAP_UNWRITTEN ?
+ EXTENT_STATUS_UNWRITTEN : EXTENT_STATUS_WRITTEN;
+ ext4_es_insert_extent(inode, map->m_lblk, map->m_len,
+ map->m_pblk, status, flags);
+
return retval;
}
@@ -576,32 +663,7 @@ int ext4_map_blocks(handle_t *handle, struct inode *inode,
* file system block.
*/
down_read(&EXT4_I(inode)->i_data_sem);
- if (ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS)) {
- retval = ext4_ext_map_blocks(handle, inode, map, 0);
- } else {
- retval = ext4_ind_map_blocks(handle, inode, map, 0);
- }
- if (retval > 0) {
- unsigned int status;
-
- if (unlikely(retval != map->m_len)) {
- ext4_warning(inode->i_sb,
- "ES len assertion failed for inode "
- "%lu: retval %d != map->m_len %d",
- inode->i_ino, retval, map->m_len);
- WARN_ON(1);
- }
-
- status = map->m_flags & EXT4_MAP_UNWRITTEN ?
- EXTENT_STATUS_UNWRITTEN : EXTENT_STATUS_WRITTEN;
- if (!(flags & EXT4_GET_BLOCKS_DELALLOC_RESERVE) &&
- !(status & EXTENT_STATUS_WRITTEN) &&
- ext4_es_scan_range(inode, &ext4_es_is_delayed, map->m_lblk,
- map->m_lblk + map->m_len - 1))
- status |= EXTENT_STATUS_DELAYED;
- ext4_es_insert_extent(inode, map->m_lblk, map->m_len,
- map->m_pblk, status);
- }
+ retval = ext4_map_query_blocks(handle, inode, map);
up_read((&EXT4_I(inode)->i_data_sem));
found:
@@ -631,88 +693,13 @@ found:
return retval;
/*
- * Here we clear m_flags because after allocating an new extent,
- * it will be set again.
- */
- map->m_flags &= ~EXT4_MAP_FLAGS;
-
- /*
* New blocks allocate and/or writing to unwritten extent
* will possibly result in updating i_data, so we take
* the write lock of i_data_sem, and call get_block()
* with create == 1 flag.
*/
down_write(&EXT4_I(inode)->i_data_sem);
-
- /*
- * We need to check for EXT4 here because migrate
- * could have changed the inode type in between
- */
- if (ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS)) {
- retval = ext4_ext_map_blocks(handle, inode, map, flags);
- } else {
- retval = ext4_ind_map_blocks(handle, inode, map, flags);
-
- if (retval > 0 && map->m_flags & EXT4_MAP_NEW) {
- /*
- * We allocated new blocks which will result in
- * i_data's format changing. Force the migrate
- * to fail by clearing migrate flags
- */
- ext4_clear_inode_state(inode, EXT4_STATE_EXT_MIGRATE);
- }
- }
-
- if (retval > 0) {
- unsigned int status;
-
- if (unlikely(retval != map->m_len)) {
- ext4_warning(inode->i_sb,
- "ES len assertion failed for inode "
- "%lu: retval %d != map->m_len %d",
- inode->i_ino, retval, map->m_len);
- WARN_ON(1);
- }
-
- /*
- * We have to zeroout blocks before inserting them into extent
- * status tree. Otherwise someone could look them up there and
- * use them before they are really zeroed. We also have to
- * unmap metadata before zeroing as otherwise writeback can
- * overwrite zeros with stale data from block device.
- */
- if (flags & EXT4_GET_BLOCKS_ZERO &&
- map->m_flags & EXT4_MAP_MAPPED &&
- map->m_flags & EXT4_MAP_NEW) {
- ret = ext4_issue_zeroout(inode, map->m_lblk,
- map->m_pblk, map->m_len);
- if (ret) {
- retval = ret;
- goto out_sem;
- }
- }
-
- /*
- * If the extent has been zeroed out, we don't need to update
- * extent status tree.
- */
- if ((flags & EXT4_GET_BLOCKS_PRE_IO) &&
- ext4_es_lookup_extent(inode, map->m_lblk, NULL, &es)) {
- if (ext4_es_is_written(&es))
- goto out_sem;
- }
- status = map->m_flags & EXT4_MAP_UNWRITTEN ?
- EXTENT_STATUS_UNWRITTEN : EXTENT_STATUS_WRITTEN;
- if (!(flags & EXT4_GET_BLOCKS_DELALLOC_RESERVE) &&
- !(status & EXTENT_STATUS_WRITTEN) &&
- ext4_es_scan_range(inode, &ext4_es_is_delayed, map->m_lblk,
- map->m_lblk + map->m_len - 1))
- status |= EXTENT_STATUS_DELAYED;
- ext4_es_insert_extent(inode, map->m_lblk, map->m_len,
- map->m_pblk, status);
- }
-
-out_sem:
+ retval = ext4_map_create_blocks(handle, inode, map, flags);
up_write((&EXT4_I(inode)->i_data_sem));
if (retval > 0 && map->m_flags & EXT4_MAP_MAPPED) {
ret = check_block_validity(inode, map);
@@ -1018,32 +1005,16 @@ static int ext4_dirty_journalled_data(handle_t *handle, struct buffer_head *bh)
int do_journal_get_write_access(handle_t *handle, struct inode *inode,
struct buffer_head *bh)
{
- int dirty = buffer_dirty(bh);
- int ret;
-
if (!buffer_mapped(bh) || buffer_freed(bh))
return 0;
- /*
- * __block_write_begin() could have dirtied some buffers. Clean
- * the dirty bit as jbd2_journal_get_write_access() could complain
- * otherwise about fs integrity issues. Setting of the dirty bit
- * by __block_write_begin() isn't a real problem here as we clear
- * the bit before releasing a page lock and thus writeback cannot
- * ever write the buffer.
- */
- if (dirty)
- clear_buffer_dirty(bh);
BUFFER_TRACE(bh, "get write access");
- ret = ext4_journal_get_write_access(handle, inode->i_sb, bh,
+ return ext4_journal_get_write_access(handle, inode->i_sb, bh,
EXT4_JTR_NONE);
- if (!ret && dirty)
- ret = ext4_dirty_journalled_data(handle, bh);
- return ret;
}
-#ifdef CONFIG_FS_ENCRYPTION
-static int ext4_block_write_begin(struct folio *folio, loff_t pos, unsigned len,
- get_block_t *get_block)
+int ext4_block_write_begin(handle_t *handle, struct folio *folio,
+ loff_t pos, unsigned len,
+ get_block_t *get_block)
{
unsigned from = pos & (PAGE_SIZE - 1);
unsigned to = from + len;
@@ -1056,6 +1027,7 @@ static int ext4_block_write_begin(struct folio *folio, loff_t pos, unsigned len,
struct buffer_head *bh, *head, *wait[2];
int nr_wait = 0;
int i;
+ bool should_journal_data = ext4_should_journal_data(inode);
BUG_ON(!folio_test_locked(folio));
BUG_ON(from > PAGE_SIZE);
@@ -1085,10 +1057,22 @@ static int ext4_block_write_begin(struct folio *folio, loff_t pos, unsigned len,
if (err)
break;
if (buffer_new(bh)) {
+ /*
+ * We may be zeroing partial buffers or all new
+ * buffers in case of failure. Prepare JBD2 for
+ * that.
+ */
+ if (should_journal_data)
+ do_journal_get_write_access(handle,
+ inode, bh);
if (folio_test_uptodate(folio)) {
- clear_buffer_new(bh);
+ /*
+ * Unlike __block_write_begin() we leave
+ * dirtying of new uptodate buffers to
+ * ->write_end() time or
+ * folio_zero_new_buffers().
+ */
set_buffer_uptodate(bh);
- mark_buffer_dirty(bh);
continue;
}
if (block_end > to || block_start < from)
@@ -1118,7 +1102,11 @@ static int ext4_block_write_begin(struct folio *folio, loff_t pos, unsigned len,
err = -EIO;
}
if (unlikely(err)) {
- folio_zero_new_buffers(folio, from, to);
+ if (should_journal_data)
+ ext4_journalled_zero_new_buffers(handle, inode, folio,
+ from, to);
+ else
+ folio_zero_new_buffers(folio, from, to);
} else if (fscrypt_inode_uses_fs_layer_crypto(inode)) {
for (i = 0; i < nr_wait; i++) {
int err2;
@@ -1134,7 +1122,6 @@ static int ext4_block_write_begin(struct folio *folio, loff_t pos, unsigned len,
return err;
}
-#endif
/*
* To preserve ordering, it is essential that the hole instantiation and
@@ -1216,19 +1203,12 @@ retry_journal:
/* In case writeback began while the folio was unlocked */
folio_wait_stable(folio);
-#ifdef CONFIG_FS_ENCRYPTION
if (ext4_should_dioread_nolock(inode))
- ret = ext4_block_write_begin(folio, pos, len,
+ ret = ext4_block_write_begin(handle, folio, pos, len,
ext4_get_block_unwritten);
else
- ret = ext4_block_write_begin(folio, pos, len, ext4_get_block);
-#else
- if (ext4_should_dioread_nolock(inode))
- ret = __block_write_begin(folio, pos, len,
- ext4_get_block_unwritten);
- else
- ret = __block_write_begin(folio, pos, len, ext4_get_block);
-#endif
+ ret = ext4_block_write_begin(handle, folio, pos, len,
+ ext4_get_block);
if (!ret && ext4_should_journal_data(inode)) {
ret = ext4_walk_page_buffers(handle, inode,
folio_buffers(folio), from, to,
@@ -1241,7 +1221,7 @@ retry_journal:
folio_unlock(folio);
/*
- * __block_write_begin may have instantiated a few blocks
+ * ext4_block_write_begin may have instantiated a few blocks
* outside i_size. Trim these off again. Don't need
* i_size_read because we hold i_rwsem.
*
@@ -1388,9 +1368,9 @@ static void ext4_journalled_zero_new_buffers(handle_t *handle,
size = min(to, block_end) - start;
folio_zero_range(folio, start, size);
- write_end_fn(handle, inode, bh);
}
clear_buffer_new(bh);
+ write_end_fn(handle, inode, bh);
}
}
block_start = block_end;
@@ -1661,7 +1641,7 @@ static int ext4_clu_alloc_state(struct inode *inode, ext4_lblk_t lblk)
int ret;
/* Has delalloc reservation? */
- if (ext4_es_scan_clu(inode, &ext4_es_is_delonly, lblk))
+ if (ext4_es_scan_clu(inode, &ext4_es_is_delayed, lblk))
return 1;
/* Already been allocated? */
@@ -1782,7 +1762,7 @@ found:
* Delayed extent could be allocated by fallocate.
* So we need to check it.
*/
- if (ext4_es_is_delonly(&es)) {
+ if (ext4_es_is_delayed(&es)) {
map->m_flags |= EXT4_MAP_DELAYED;
return 0;
}
@@ -2217,11 +2197,6 @@ static int mpage_map_one_extent(handle_t *handle, struct mpage_da_data *mpd)
* writeback and there is nothing we can do about it so it might result
* in data loss. So use reserved blocks to allocate metadata if
* possible.
- *
- * We pass in the magic EXT4_GET_BLOCKS_DELALLOC_RESERVE if
- * the blocks in question are delalloc blocks. This indicates
- * that the blocks and quotas has already been checked when
- * the data was copied into the page cache.
*/
get_blocks_flags = EXT4_GET_BLOCKS_CREATE |
EXT4_GET_BLOCKS_METADATA_NOFAIL |
@@ -2229,8 +2204,6 @@ static int mpage_map_one_extent(handle_t *handle, struct mpage_da_data *mpd)
dioread_nolock = ext4_should_dioread_nolock(inode);
if (dioread_nolock)
get_blocks_flags |= EXT4_GET_BLOCKS_IO_CREATE_EXT;
- if (map->m_flags & BIT(BH_Delay))
- get_blocks_flags |= EXT4_GET_BLOCKS_DELALLOC_RESERVE;
err = ext4_map_blocks(handle, inode, map, get_blocks_flags);
if (err < 0)
@@ -2959,11 +2932,8 @@ retry:
if (IS_ERR(folio))
return PTR_ERR(folio);
-#ifdef CONFIG_FS_ENCRYPTION
- ret = ext4_block_write_begin(folio, pos, len, ext4_da_get_block_prep);
-#else
- ret = __block_write_begin(folio, pos, len, ext4_da_get_block_prep);
-#endif
+ ret = ext4_block_write_begin(NULL, folio, pos, len,
+ ext4_da_get_block_prep);
if (ret < 0) {
folio_unlock(folio);
folio_put(folio);
@@ -4067,7 +4037,7 @@ int ext4_punch_hole(struct file *file, loff_t offset, loff_t length)
stop_block);
ext4_es_insert_extent(inode, first_block, hole_len, ~0,
- EXTENT_STATUS_HOLE);
+ EXTENT_STATUS_HOLE, 0);
up_write(&EXT4_I(inode)->i_data_sem);
}
ext4_fc_track_range(handle, inode, first_block, stop_block);
@@ -5276,8 +5246,9 @@ static void ext4_wait_for_tail_page_commit(struct inode *inode)
{
unsigned offset;
journal_t *journal = EXT4_SB(inode->i_sb)->s_journal;
- tid_t commit_tid = 0;
+ tid_t commit_tid;
int ret;
+ bool has_transaction;
offset = inode->i_size & (PAGE_SIZE - 1);
/*
@@ -5302,12 +5273,14 @@ static void ext4_wait_for_tail_page_commit(struct inode *inode)
folio_put(folio);
if (ret != -EBUSY)
return;
- commit_tid = 0;
+ has_transaction = false;
read_lock(&journal->j_state_lock);
- if (journal->j_committing_transaction)
+ if (journal->j_committing_transaction) {
commit_tid = journal->j_committing_transaction->t_tid;
+ has_transaction = true;
+ }
read_unlock(&journal->j_state_lock);
- if (commit_tid)
+ if (has_transaction)
jbd2_log_wait_commit(journal, commit_tid);
}
}
@@ -6216,7 +6189,8 @@ retry_alloc:
if (folio_pos(folio) + len > size)
len = size - folio_pos(folio);
- err = __block_write_begin(folio, 0, len, ext4_get_block);
+ err = ext4_block_write_begin(handle, folio, 0, len,
+ ext4_get_block);
if (!err) {
ret = VM_FAULT_SIGBUS;
if (ext4_journal_folio_buffers(handle, folio, len))
diff --git a/fs/ext4/ioctl.c b/fs/ext4/ioctl.c
index e8bf5972dd47..1c77400bd88e 100644
--- a/fs/ext4/ioctl.c
+++ b/fs/ext4/ioctl.c
@@ -1343,10 +1343,10 @@ group_extend_out:
me.moved_len = 0;
donor = fdget(me.donor_fd);
- if (!donor.file)
+ if (!fd_file(donor))
return -EBADF;
- if (!(donor.file->f_mode & FMODE_WRITE)) {
+ if (!(fd_file(donor)->f_mode & FMODE_WRITE)) {
err = -EBADF;
goto mext_out;
}
@@ -1367,7 +1367,7 @@ group_extend_out:
if (err)
goto mext_out;
- err = ext4_move_extents(filp, donor.file, me.orig_start,
+ err = ext4_move_extents(filp, fd_file(donor), me.orig_start,
me.donor_start, me.len, &me.moved_len);
mnt_drop_write_file(filp);
diff --git a/fs/ext4/mballoc.c b/fs/ext4/mballoc.c
index 9dda9cd68ab2..d73e38323879 100644
--- a/fs/ext4/mballoc.c
+++ b/fs/ext4/mballoc.c
@@ -2356,7 +2356,7 @@ int ext4_mb_find_by_goal(struct ext4_allocation_context *ac,
ex.fe_logical = 0xDEADFA11; /* debug value */
if (max >= ac->ac_g_ex.fe_len &&
- ac->ac_g_ex.fe_len == EXT4_B2C(sbi, sbi->s_stripe)) {
+ ac->ac_g_ex.fe_len == EXT4_NUM_B2C(sbi, sbi->s_stripe)) {
ext4_fsblk_t start;
start = ext4_grp_offs_to_block(ac->ac_sb, &ex);
@@ -2553,7 +2553,7 @@ void ext4_mb_scan_aligned(struct ext4_allocation_context *ac,
do_div(a, sbi->s_stripe);
i = (a * sbi->s_stripe) - first_group_block;
- stripe = EXT4_B2C(sbi, sbi->s_stripe);
+ stripe = EXT4_NUM_B2C(sbi, sbi->s_stripe);
i = EXT4_B2C(sbi, i);
while (i < EXT4_CLUSTERS_PER_GROUP(sb)) {
if (!mb_test_bit(i, bitmap)) {
@@ -2928,9 +2928,11 @@ repeat:
if (cr == CR_POWER2_ALIGNED)
ext4_mb_simple_scan_group(ac, &e4b);
else {
- bool is_stripe_aligned = sbi->s_stripe &&
+ bool is_stripe_aligned =
+ (sbi->s_stripe >=
+ sbi->s_cluster_ratio) &&
!(ac->ac_g_ex.fe_len %
- EXT4_B2C(sbi, sbi->s_stripe));
+ EXT4_NUM_B2C(sbi, sbi->s_stripe));
if ((cr == CR_GOAL_LEN_FAST ||
cr == CR_BEST_AVAIL_LEN) &&
@@ -3075,8 +3077,7 @@ static int ext4_mb_seq_groups_show(struct seq_file *seq, void *v)
seq_puts(seq, " ]");
if (EXT4_MB_GRP_BBITMAP_CORRUPT(&sg.info))
seq_puts(seq, " Block bitmap corrupted!");
- seq_puts(seq, "\n");
-
+ seq_putc(seq, '\n');
return 0;
}
@@ -3707,7 +3708,7 @@ int ext4_mb_init(struct super_block *sb)
*/
if (sbi->s_stripe > 1) {
sbi->s_mb_group_prealloc = roundup(
- sbi->s_mb_group_prealloc, EXT4_B2C(sbi, sbi->s_stripe));
+ sbi->s_mb_group_prealloc, EXT4_NUM_B2C(sbi, sbi->s_stripe));
}
sbi->s_locality_groups = alloc_percpu(struct ext4_locality_group);
@@ -3887,11 +3888,8 @@ static void ext4_free_data_in_buddy(struct super_block *sb,
/*
* Clear the trimmed flag for the group so that the next
* ext4_trim_fs can trim it.
- * If the volume is mounted with -o discard, online discard
- * is supported and the free blocks will be trimmed online.
*/
- if (!test_opt(sb, DISCARD))
- EXT4_MB_GRP_CLEAR_TRIMMED(db);
+ EXT4_MB_GRP_CLEAR_TRIMMED(db);
if (!db->bb_free_root.rb_node) {
/* No more items in the per group rb tree
@@ -6515,8 +6513,9 @@ do_more:
" group:%u block:%d count:%lu failed"
" with %d", block_group, bit, count,
err);
- } else
- EXT4_MB_GRP_CLEAR_TRIMMED(e4b.bd_info);
+ }
+
+ EXT4_MB_GRP_CLEAR_TRIMMED(e4b.bd_info);
ext4_lock_group(sb, block_group);
mb_free_blocks(inode, &e4b, bit, count_clusters);
diff --git a/fs/ext4/migrate.c b/fs/ext4/migrate.c
index d98ac2af8199..1b0dfd963d3f 100644
--- a/fs/ext4/migrate.c
+++ b/fs/ext4/migrate.c
@@ -37,7 +37,6 @@ static int finish_range(handle_t *handle, struct inode *inode,
path = ext4_find_extent(inode, lb->first_block, NULL, 0);
if (IS_ERR(path)) {
retval = PTR_ERR(path);
- path = NULL;
goto err_out;
}
@@ -53,7 +52,9 @@ static int finish_range(handle_t *handle, struct inode *inode,
retval = ext4_datasem_ensure_credits(handle, inode, needed, needed, 0);
if (retval < 0)
goto err_out;
- retval = ext4_ext_insert_extent(handle, inode, &path, &newext, 0);
+ path = ext4_ext_insert_extent(handle, inode, path, &newext, 0);
+ if (IS_ERR(path))
+ retval = PTR_ERR(path);
err_out:
up_write((&EXT4_I(inode)->i_data_sem));
ext4_free_ext_path(path);
@@ -663,8 +664,8 @@ int ext4_ind_migrate(struct inode *inode)
if (unlikely(ret2 && !ret))
ret = ret2;
errout:
- ext4_journal_stop(handle);
up_write(&EXT4_I(inode)->i_data_sem);
+ ext4_journal_stop(handle);
out_unlock:
ext4_writepages_up_write(inode->i_sb, alloc_ctx);
return ret;
diff --git a/fs/ext4/move_extent.c b/fs/ext4/move_extent.c
index 204f53b23622..b64661ea6e0e 100644
--- a/fs/ext4/move_extent.c
+++ b/fs/ext4/move_extent.c
@@ -17,27 +17,23 @@
* get_ext_path() - Find an extent path for designated logical block number.
* @inode: inode to be searched
* @lblock: logical block number to find an extent path
- * @ppath: pointer to an extent path pointer (for output)
+ * @path: pointer to an extent path
*
- * ext4_find_extent wrapper. Return 0 on success, or a negative error value
- * on failure.
+ * ext4_find_extent wrapper. Return an extent path pointer on success,
+ * or an error pointer on failure.
*/
-static inline int
+static inline struct ext4_ext_path *
get_ext_path(struct inode *inode, ext4_lblk_t lblock,
- struct ext4_ext_path **ppath)
+ struct ext4_ext_path *path)
{
- struct ext4_ext_path *path;
-
- path = ext4_find_extent(inode, lblock, ppath, EXT4_EX_NOCACHE);
+ path = ext4_find_extent(inode, lblock, path, EXT4_EX_NOCACHE);
if (IS_ERR(path))
- return PTR_ERR(path);
+ return path;
if (path[ext_depth(inode)].p_ext == NULL) {
ext4_free_ext_path(path);
- *ppath = NULL;
- return -ENODATA;
+ return ERR_PTR(-ENODATA);
}
- *ppath = path;
- return 0;
+ return path;
}
/**
@@ -95,9 +91,11 @@ mext_check_coverage(struct inode *inode, ext4_lblk_t from, ext4_lblk_t count,
int ret = 0;
ext4_lblk_t last = from + count;
while (from < last) {
- *err = get_ext_path(inode, from, &path);
- if (*err)
- goto out;
+ path = get_ext_path(inode, from, path);
+ if (IS_ERR(path)) {
+ *err = PTR_ERR(path);
+ return ret;
+ }
ext = path[ext_depth(inode)].p_ext;
if (unwritten != ext4_ext_is_unwritten(ext))
goto out;
@@ -166,15 +164,16 @@ mext_folio_double_lock(struct inode *inode1, struct inode *inode2,
return 0;
}
-/* Force page buffers uptodate w/o dropping page's lock */
-static int
-mext_page_mkuptodate(struct folio *folio, unsigned from, unsigned to)
+/* Force folio buffers uptodate w/o dropping folio's lock */
+static int mext_page_mkuptodate(struct folio *folio, size_t from, size_t to)
{
struct inode *inode = folio->mapping->host;
sector_t block;
- struct buffer_head *bh, *head, *arr[MAX_BUF_PER_PAGE];
+ struct buffer_head *bh, *head;
unsigned int blocksize, block_start, block_end;
- int i, err, nr = 0, partial = 0;
+ int nr = 0;
+ bool partial = false;
+
BUG_ON(!folio_test_locked(folio));
BUG_ON(folio_test_writeback(folio));
@@ -186,19 +185,21 @@ mext_page_mkuptodate(struct folio *folio, unsigned from, unsigned to)
if (!head)
head = create_empty_buffers(folio, blocksize, 0);
- block = (sector_t)folio->index << (PAGE_SHIFT - inode->i_blkbits);
- for (bh = head, block_start = 0; bh != head || !block_start;
- block++, block_start = block_end, bh = bh->b_this_page) {
+ block = folio_pos(folio) >> inode->i_blkbits;
+ block_end = 0;
+ bh = head;
+ do {
+ block_start = block_end;
block_end = block_start + blocksize;
if (block_end <= from || block_start >= to) {
if (!buffer_uptodate(bh))
- partial = 1;
+ partial = true;
continue;
}
if (buffer_uptodate(bh))
continue;
if (!buffer_mapped(bh)) {
- err = ext4_get_block(inode, block, bh, 0);
+ int err = ext4_get_block(inode, block, bh, 0);
if (err)
return err;
if (!buffer_mapped(bh)) {
@@ -207,21 +208,30 @@ mext_page_mkuptodate(struct folio *folio, unsigned from, unsigned to)
continue;
}
}
- BUG_ON(nr >= MAX_BUF_PER_PAGE);
- arr[nr++] = bh;
- }
+ lock_buffer(bh);
+ if (buffer_uptodate(bh)) {
+ unlock_buffer(bh);
+ continue;
+ }
+ ext4_read_bh_nowait(bh, 0, NULL);
+ nr++;
+ } while (block++, (bh = bh->b_this_page) != head);
+
/* No io required */
if (!nr)
goto out;
- for (i = 0; i < nr; i++) {
- bh = arr[i];
- if (!bh_uptodate_or_lock(bh)) {
- err = ext4_read_bh(bh, 0, NULL);
- if (err)
- return err;
- }
- }
+ bh = head;
+ do {
+ if (bh_offset(bh) + blocksize <= from)
+ continue;
+ if (bh_offset(bh) > to)
+ break;
+ wait_on_buffer(bh);
+ if (buffer_uptodate(bh))
+ continue;
+ return -EIO;
+ } while ((bh = bh->b_this_page) != head);
out:
if (!partial)
folio_mark_uptodate(folio);
@@ -624,9 +634,11 @@ ext4_move_extents(struct file *o_filp, struct file *d_filp, __u64 orig_blk,
int offset_in_page;
int unwritten, cur_len;
- ret = get_ext_path(orig_inode, o_start, &path);
- if (ret)
+ path = get_ext_path(orig_inode, o_start, path);
+ if (IS_ERR(path)) {
+ ret = PTR_ERR(path);
goto out;
+ }
ex = path[path->p_depth].p_ext;
cur_blk = le32_to_cpu(ex->ee_block);
cur_len = ext4_ext_get_actual_len(ex);
diff --git a/fs/ext4/namei.c b/fs/ext4/namei.c
index 6a95713f9193..790db7eac6c2 100644
--- a/fs/ext4/namei.c
+++ b/fs/ext4/namei.c
@@ -1482,7 +1482,7 @@ static bool ext4_match(struct inode *parent,
}
/*
- * Returns 0 if not found, -1 on failure, and 1 on success
+ * Returns 0 if not found, -EFSCORRUPTED on failure, and 1 on success
*/
int ext4_search_dir(struct buffer_head *bh, char *search_buf, int buf_size,
struct inode *dir, struct ext4_filename *fname,
@@ -1503,7 +1503,7 @@ int ext4_search_dir(struct buffer_head *bh, char *search_buf, int buf_size,
* a full check */
if (ext4_check_dir_entry(dir, NULL, de, bh, search_buf,
buf_size, offset))
- return -1;
+ return -EFSCORRUPTED;
*res_dir = de;
return 1;
}
@@ -1511,7 +1511,7 @@ int ext4_search_dir(struct buffer_head *bh, char *search_buf, int buf_size,
de_len = ext4_rec_len_from_disk(de->rec_len,
dir->i_sb->s_blocksize);
if (de_len <= 0)
- return -1;
+ return -EFSCORRUPTED;
offset += de_len;
de = (struct ext4_dir_entry_2 *) ((char *) de + de_len);
}
@@ -1574,7 +1574,7 @@ static struct buffer_head *__ext4_find_entry(struct inode *dir,
&has_inline_data);
if (inlined)
*inlined = has_inline_data;
- if (has_inline_data)
+ if (has_inline_data || IS_ERR(ret))
goto cleanup_and_exit;
}
@@ -1663,8 +1663,10 @@ restart:
goto cleanup_and_exit;
} else {
brelse(bh);
- if (i < 0)
+ if (i < 0) {
+ ret = ERR_PTR(i);
goto cleanup_and_exit;
+ }
}
next:
if (++block >= nblocks)
@@ -1758,7 +1760,7 @@ static struct buffer_head * ext4_dx_find_entry(struct inode *dir,
if (retval == 1)
goto success;
brelse(bh);
- if (retval == -1) {
+ if (retval < 0) {
bh = ERR_PTR(ERR_BAD_DX_DIR);
goto errout;
}
@@ -1999,7 +2001,7 @@ static struct ext4_dir_entry_2 *do_split(handle_t *handle, struct inode *dir,
split = count/2;
hash2 = map[split].hash;
- continued = hash2 == map[split - 1].hash;
+ continued = split > 0 ? hash2 == map[split - 1].hash : 0;
dxtrace(printk(KERN_INFO "Split block %lu at %x, %i/%i\n",
(unsigned long)dx_get_block(frame->at),
hash2, split, count-split));
diff --git a/fs/ext4/readpage.c b/fs/ext4/readpage.c
index 8494492582ab..5d3a9dc9a32d 100644
--- a/fs/ext4/readpage.c
+++ b/fs/ext4/readpage.c
@@ -221,7 +221,7 @@ int ext4_mpage_readpages(struct inode *inode,
sector_t block_in_file;
sector_t last_block;
sector_t last_block_in_file;
- sector_t blocks[MAX_BUF_PER_PAGE];
+ sector_t first_block;
unsigned page_block;
struct block_device *bdev = inode->i_sb->s_bdev;
int length;
@@ -263,6 +263,7 @@ int ext4_mpage_readpages(struct inode *inode,
unsigned map_offset = block_in_file - map.m_lblk;
unsigned last = map.m_len - map_offset;
+ first_block = map.m_pblk + map_offset;
for (relative_block = 0; ; relative_block++) {
if (relative_block == last) {
/* needed? */
@@ -271,8 +272,6 @@ int ext4_mpage_readpages(struct inode *inode,
}
if (page_block == blocks_per_page)
break;
- blocks[page_block] = map.m_pblk + map_offset +
- relative_block;
page_block++;
block_in_file++;
}
@@ -307,7 +306,9 @@ int ext4_mpage_readpages(struct inode *inode,
goto confused; /* hole -> non-hole */
/* Contiguous blocks? */
- if (page_block && blocks[page_block-1] != map.m_pblk-1)
+ if (!page_block)
+ first_block = map.m_pblk;
+ else if (first_block + page_block != map.m_pblk)
goto confused;
for (relative_block = 0; ; relative_block++) {
if (relative_block == map.m_len) {
@@ -316,7 +317,6 @@ int ext4_mpage_readpages(struct inode *inode,
break;
} else if (page_block == blocks_per_page)
break;
- blocks[page_block] = map.m_pblk+relative_block;
page_block++;
block_in_file++;
}
@@ -339,7 +339,7 @@ int ext4_mpage_readpages(struct inode *inode,
* This folio will go to BIO. Do we need to send this
* BIO off first?
*/
- if (bio && (last_block_in_bio != blocks[0] - 1 ||
+ if (bio && (last_block_in_bio != first_block - 1 ||
!fscrypt_mergeable_bio(bio, inode, next_block))) {
submit_and_realloc:
submit_bio(bio);
@@ -355,7 +355,7 @@ int ext4_mpage_readpages(struct inode *inode,
fscrypt_set_bio_crypt_ctx(bio, inode, next_block,
GFP_KERNEL);
ext4_set_bio_post_read_ctx(bio, inode, folio->index);
- bio->bi_iter.bi_sector = blocks[0] << (blkbits - 9);
+ bio->bi_iter.bi_sector = first_block << (blkbits - 9);
bio->bi_end_io = mpage_end_io;
if (rac)
bio->bi_opf |= REQ_RAHEAD;
@@ -371,7 +371,7 @@ int ext4_mpage_readpages(struct inode *inode,
submit_bio(bio);
bio = NULL;
} else
- last_block_in_bio = blocks[blocks_per_page - 1];
+ last_block_in_bio = first_block + blocks_per_page - 1;
continue;
confused:
if (bio) {
diff --git a/fs/ext4/resize.c b/fs/ext4/resize.c
index 0ba9837d65ca..e04eb08b9060 100644
--- a/fs/ext4/resize.c
+++ b/fs/ext4/resize.c
@@ -1319,8 +1319,7 @@ static int ext4_set_bitmap_checksums(struct super_block *sb,
bh = ext4_get_bitmap(sb, group_data->inode_bitmap);
if (!bh)
return -EIO;
- ext4_inode_bitmap_csum_set(sb, gdp, bh,
- EXT4_INODES_PER_GROUP(sb) / 8);
+ ext4_inode_bitmap_csum_set(sb, gdp, bh);
brelse(bh);
bh = ext4_get_bitmap(sb, group_data->block_bitmap);
diff --git a/fs/ext4/super.c b/fs/ext4/super.c
index e72145c4ae5a..16a4ce704460 100644
--- a/fs/ext4/super.c
+++ b/fs/ext4/super.c
@@ -735,11 +735,12 @@ static void ext4_handle_error(struct super_block *sb, bool force_ro, int error,
ext4_msg(sb, KERN_CRIT, "Remounting filesystem read-only");
/*
- * Make sure updated value of ->s_mount_flags will be visible before
- * ->s_flags update
+ * EXT4_FLAGS_SHUTDOWN was set which stops all filesystem
+ * modifications. We don't set SB_RDONLY because that requires
+ * sb->s_umount semaphore and setting it without proper remount
+ * procedure is confusing code such as freeze_super() leading to
+ * deadlocks and other problems.
*/
- smp_wmb();
- sb->s_flags |= SB_RDONLY;
}
static void update_super_work(struct work_struct *work)
@@ -3045,7 +3046,7 @@ int ext4_seq_options_show(struct seq_file *seq, void *offset)
seq_puts(seq, sb_rdonly(sb) ? "ro" : "rw");
rc = _ext4_show_options(seq, sb, 1);
- seq_puts(seq, "\n");
+ seq_putc(seq, '\n');
return rc;
}
@@ -5087,16 +5088,27 @@ out:
return ret;
}
-static void ext4_hash_info_init(struct super_block *sb)
+static int ext4_hash_info_init(struct super_block *sb)
{
struct ext4_sb_info *sbi = EXT4_SB(sb);
struct ext4_super_block *es = sbi->s_es;
unsigned int i;
+ sbi->s_def_hash_version = es->s_def_hash_version;
+
+ if (sbi->s_def_hash_version > DX_HASH_LAST) {
+ ext4_msg(sb, KERN_ERR,
+ "Invalid default hash set in the superblock");
+ return -EINVAL;
+ } else if (sbi->s_def_hash_version == DX_HASH_SIPHASH) {
+ ext4_msg(sb, KERN_ERR,
+ "SIPHASH is not a valid default hash value");
+ return -EINVAL;
+ }
+
for (i = 0; i < 4; i++)
sbi->s_hash_seed[i] = le32_to_cpu(es->s_hash_seed[i]);
- sbi->s_def_hash_version = es->s_def_hash_version;
if (ext4_has_feature_dir_index(sb)) {
i = le32_to_cpu(es->s_flags);
if (i & EXT2_FLAGS_UNSIGNED_HASH)
@@ -5114,6 +5126,7 @@ static void ext4_hash_info_init(struct super_block *sb)
#endif
}
}
+ return 0;
}
static int ext4_block_group_meta_init(struct super_block *sb, int silent)
@@ -5165,6 +5178,18 @@ static int ext4_block_group_meta_init(struct super_block *sb, int silent)
return 0;
}
+/*
+ * It's hard to get stripe aligned blocks if stripe is not aligned with
+ * cluster, just disable stripe and alert user to simplify code and avoid
+ * stripe aligned allocation which will rarely succeed.
+ */
+static bool ext4_is_stripe_incompatible(struct super_block *sb, unsigned long stripe)
+{
+ struct ext4_sb_info *sbi = EXT4_SB(sb);
+ return (stripe > 0 && sbi->s_cluster_ratio > 1 &&
+ stripe % sbi->s_cluster_ratio != 0);
+}
+
static int __ext4_fill_super(struct fs_context *fc, struct super_block *sb)
{
struct ext4_super_block *es = NULL;
@@ -5249,7 +5274,9 @@ static int __ext4_fill_super(struct fs_context *fc, struct super_block *sb)
if (err)
goto failed_mount;
- ext4_hash_info_init(sb);
+ err = ext4_hash_info_init(sb);
+ if (err)
+ goto failed_mount;
err = ext4_handle_clustersize(sb);
if (err)
@@ -5272,13 +5299,7 @@ static int __ext4_fill_super(struct fs_context *fc, struct super_block *sb)
goto failed_mount3;
sbi->s_stripe = ext4_get_stripe_size(sbi);
- /*
- * It's hard to get stripe aligned blocks if stripe is not aligned with
- * cluster, just disable stripe and alert user to simpfy code and avoid
- * stripe aligned allocation which will rarely successes.
- */
- if (sbi->s_stripe > 0 && sbi->s_cluster_ratio > 1 &&
- sbi->s_stripe % sbi->s_cluster_ratio != 0) {
+ if (ext4_is_stripe_incompatible(sb, sbi->s_stripe)) {
ext4_msg(sb, KERN_WARNING,
"stripe (%lu) is not aligned with cluster size (%u), "
"stripe is disabled",
@@ -5313,6 +5334,8 @@ static int __ext4_fill_super(struct fs_context *fc, struct super_block *sb)
INIT_LIST_HEAD(&sbi->s_orphan); /* unlinked but open files */
mutex_init(&sbi->s_orphan_lock);
+ spin_lock_init(&sbi->s_bdev_wb_lock);
+
ext4_fast_commit_init(sb);
sb->s_root = NULL;
@@ -5534,7 +5557,6 @@ static int __ext4_fill_super(struct fs_context *fc, struct super_block *sb)
* Save the original bdev mapping's wb_err value which could be
* used to detect the metadata async write error.
*/
- spin_lock_init(&sbi->s_bdev_wb_lock);
errseq_check_and_advance(&sb->s_bdev->bd_mapping->wb_err,
&sbi->s_bdev_wb_err);
EXT4_SB(sb)->s_mount_state |= EXT4_ORPHAN_FS;
@@ -5614,8 +5636,8 @@ failed_mount3a:
failed_mount3:
/* flush s_sb_upd_work before sbi destroy */
flush_work(&sbi->s_sb_upd_work);
- del_timer_sync(&sbi->s_err_report);
ext4_stop_mmpd(sbi);
+ del_timer_sync(&sbi->s_err_report);
ext4_group_desc_free(sbi);
failed_mount:
if (sbi->s_chksum_driver)
@@ -6441,6 +6463,15 @@ static int __ext4_remount(struct fs_context *fc, struct super_block *sb)
}
+ if ((ctx->spec & EXT4_SPEC_s_stripe) &&
+ ext4_is_stripe_incompatible(sb, ctx->s_stripe)) {
+ ext4_msg(sb, KERN_WARNING,
+ "stripe (%lu) is not aligned with cluster size (%u), "
+ "stripe is disabled",
+ ctx->s_stripe, sbi->s_cluster_ratio);
+ ctx->s_stripe = 0;
+ }
+
/*
* Changing the DIOREAD_NOLOCK or DELALLOC mount options may cause
* two calls to ext4_should_dioread_nolock() to return inconsistent
diff --git a/fs/ext4/xattr.c b/fs/ext4/xattr.c
index 46ce2f21fef9..e0e1956dcdd3 100644
--- a/fs/ext4/xattr.c
+++ b/fs/ext4/xattr.c
@@ -458,7 +458,7 @@ static int ext4_xattr_inode_iget(struct inode *parent, unsigned long ea_ino,
ext4_set_inode_state(inode, EXT4_STATE_LUSTRE_EA_INODE);
ext4_xattr_inode_set_ref(inode, 1);
} else {
- inode_lock(inode);
+ inode_lock_nested(inode, I_MUTEX_XATTR);
inode->i_flags |= S_NOQUOTA;
inode_unlock(inode);
}
@@ -1039,7 +1039,7 @@ static int ext4_xattr_inode_update_ref(handle_t *handle, struct inode *ea_inode,
s64 ref_count;
int ret;
- inode_lock(ea_inode);
+ inode_lock_nested(ea_inode, I_MUTEX_XATTR);
ret = ext4_reserve_inode_write(handle, ea_inode, &iloc);
if (ret)
@@ -2879,33 +2879,31 @@ ext4_expand_inode_array(struct ext4_xattr_inode_array **ea_inode_array,
if (*ea_inode_array == NULL) {
/*
* Start with 15 inodes, so it fits into a power-of-two size.
- * If *ea_inode_array is NULL, this is essentially offsetof()
*/
- (*ea_inode_array) =
- kmalloc(offsetof(struct ext4_xattr_inode_array,
- inodes[EIA_MASK]),
- GFP_NOFS);
+ (*ea_inode_array) = kmalloc(
+ struct_size(*ea_inode_array, inodes, EIA_MASK),
+ GFP_NOFS);
if (*ea_inode_array == NULL)
return -ENOMEM;
(*ea_inode_array)->count = 0;
} else if (((*ea_inode_array)->count & EIA_MASK) == EIA_MASK) {
/* expand the array once all 15 + n * 16 slots are full */
struct ext4_xattr_inode_array *new_array = NULL;
- int count = (*ea_inode_array)->count;
- /* if new_array is NULL, this is essentially offsetof() */
new_array = kmalloc(
- offsetof(struct ext4_xattr_inode_array,
- inodes[count + EIA_INCR]),
- GFP_NOFS);
+ struct_size(*ea_inode_array, inodes,
+ (*ea_inode_array)->count + EIA_INCR),
+ GFP_NOFS);
if (new_array == NULL)
return -ENOMEM;
memcpy(new_array, *ea_inode_array,
- offsetof(struct ext4_xattr_inode_array, inodes[count]));
+ struct_size(*ea_inode_array, inodes,
+ (*ea_inode_array)->count));
kfree(*ea_inode_array);
*ea_inode_array = new_array;
}
- (*ea_inode_array)->inodes[(*ea_inode_array)->count++] = inode;
+ (*ea_inode_array)->count++;
+ (*ea_inode_array)->inodes[(*ea_inode_array)->count - 1] = inode;
return 0;
}
@@ -3036,8 +3034,6 @@ void ext4_xattr_inode_array_free(struct ext4_xattr_inode_array *ea_inode_array)
*
* Create a new entry in the extended attribute block cache, and insert
* it unless such an entry is already in the cache.
- *
- * Returns 0, or a negative error number on failure.
*/
static void
ext4_xattr_block_cache_insert(struct mb_cache *ea_block_cache,
@@ -3065,8 +3061,7 @@ ext4_xattr_block_cache_insert(struct mb_cache *ea_block_cache,
*
* Compare two extended attribute blocks for equality.
*
- * Returns 0 if the blocks are equal, 1 if they differ, and
- * a negative error number on errors.
+ * Returns 0 if the blocks are equal, 1 if they differ.
*/
static int
ext4_xattr_cmp(struct ext4_xattr_header *header1,
diff --git a/fs/ext4/xattr.h b/fs/ext4/xattr.h
index bd97c4aa8177..b25c2d7b5f99 100644
--- a/fs/ext4/xattr.h
+++ b/fs/ext4/xattr.h
@@ -32,8 +32,7 @@ struct ext4_xattr_header {
__le32 h_refcount; /* reference count */
__le32 h_blocks; /* number of disk blocks used */
__le32 h_hash; /* hash value of all attributes */
- __le32 h_checksum; /* crc32c(uuid+id+xattrblock) */
- /* id = inum if refcount=1, blknum otherwise */
+ __le32 h_checksum; /* crc32c(uuid+blknum+xattrblock) */
__u32 h_reserved[3]; /* zero right now */
};
@@ -130,8 +129,8 @@ struct ext4_xattr_ibody_find {
};
struct ext4_xattr_inode_array {
- unsigned int count; /* # of used items in the array */
- struct inode *inodes[];
+ unsigned int count;
+ struct inode *inodes[] __counted_by(count);
};
extern const struct xattr_handler ext4_xattr_user_handler;
diff --git a/fs/f2fs/checkpoint.c b/fs/f2fs/checkpoint.c
index bdd96329dddd..7f76460b721f 100644
--- a/fs/f2fs/checkpoint.c
+++ b/fs/f2fs/checkpoint.c
@@ -99,7 +99,7 @@ repeat:
}
if (unlikely(!PageUptodate(page))) {
- f2fs_handle_page_eio(sbi, page->index, META);
+ f2fs_handle_page_eio(sbi, page_folio(page), META);
f2fs_put_page(page, 1);
return ERR_PTR(-EIO);
}
@@ -345,30 +345,31 @@ static int __f2fs_write_meta_page(struct page *page,
enum iostat_type io_type)
{
struct f2fs_sb_info *sbi = F2FS_P_SB(page);
+ struct folio *folio = page_folio(page);
- trace_f2fs_writepage(page_folio(page), META);
+ trace_f2fs_writepage(folio, META);
if (unlikely(f2fs_cp_error(sbi))) {
if (is_sbi_flag_set(sbi, SBI_IS_CLOSE)) {
- ClearPageUptodate(page);
+ folio_clear_uptodate(folio);
dec_page_count(sbi, F2FS_DIRTY_META);
- unlock_page(page);
+ folio_unlock(folio);
return 0;
}
goto redirty_out;
}
if (unlikely(is_sbi_flag_set(sbi, SBI_POR_DOING)))
goto redirty_out;
- if (wbc->for_reclaim && page->index < GET_SUM_BLOCK(sbi, 0))
+ if (wbc->for_reclaim && folio->index < GET_SUM_BLOCK(sbi, 0))
goto redirty_out;
- f2fs_do_write_meta_page(sbi, page, io_type);
+ f2fs_do_write_meta_page(sbi, folio, io_type);
dec_page_count(sbi, F2FS_DIRTY_META);
if (wbc->for_reclaim)
f2fs_submit_merged_write_cond(sbi, NULL, page, 0, META);
- unlock_page(page);
+ folio_unlock(folio);
if (unlikely(f2fs_cp_error(sbi)))
f2fs_submit_merged_write(sbi, META);
@@ -1551,7 +1552,7 @@ static int do_checkpoint(struct f2fs_sb_info *sbi, struct cp_control *cpc)
blk = start_blk + BLKS_PER_SEG(sbi) - nm_i->nat_bits_blocks;
for (i = 0; i < nm_i->nat_bits_blocks; i++)
f2fs_update_meta_page(sbi, nm_i->nat_bits +
- (i << F2FS_BLKSIZE_BITS), blk + i);
+ F2FS_BLK_TO_BYTES(i), blk + i);
}
/* write out checkpoint buffer at block 0 */
diff --git a/fs/f2fs/compress.c b/fs/f2fs/compress.c
index 990b93689b46..7f26440e8595 100644
--- a/fs/f2fs/compress.c
+++ b/fs/f2fs/compress.c
@@ -90,11 +90,13 @@ bool f2fs_is_compressed_page(struct page *page)
static void f2fs_set_compressed_page(struct page *page,
struct inode *inode, pgoff_t index, void *data)
{
- attach_page_private(page, (void *)data);
+ struct folio *folio = page_folio(page);
+
+ folio_attach_private(folio, (void *)data);
/* i_crypto_info and iv index */
- page->index = index;
- page->mapping = inode->i_mapping;
+ folio->index = index;
+ folio->mapping = inode->i_mapping;
}
static void f2fs_drop_rpages(struct compress_ctx *cc, int len, bool unlock)
@@ -160,17 +162,17 @@ void f2fs_destroy_compress_ctx(struct compress_ctx *cc, bool reuse)
cc->cluster_idx = NULL_CLUSTER;
}
-void f2fs_compress_ctx_add_page(struct compress_ctx *cc, struct page *page)
+void f2fs_compress_ctx_add_page(struct compress_ctx *cc, struct folio *folio)
{
unsigned int cluster_ofs;
- if (!f2fs_cluster_can_merge_page(cc, page->index))
+ if (!f2fs_cluster_can_merge_page(cc, folio->index))
f2fs_bug_on(F2FS_I_SB(cc->inode), 1);
- cluster_ofs = offset_in_cluster(cc, page->index);
- cc->rpages[cluster_ofs] = page;
+ cluster_ofs = offset_in_cluster(cc, folio->index);
+ cc->rpages[cluster_ofs] = folio_page(folio, 0);
cc->nr_rpages++;
- cc->cluster_idx = cluster_idx(cc, page->index);
+ cc->cluster_idx = cluster_idx(cc, folio->index);
}
#ifdef CONFIG_F2FS_FS_LZO
@@ -879,7 +881,7 @@ static bool cluster_has_invalid_data(struct compress_ctx *cc)
f2fs_bug_on(F2FS_I_SB(cc->inode), !page);
/* beyond EOF */
- if (page->index >= nr_pages)
+ if (page_folio(page)->index >= nr_pages)
return true;
}
return false;
@@ -945,7 +947,7 @@ static int __f2fs_get_cluster_blocks(struct inode *inode,
unsigned int cluster_size = F2FS_I(inode)->i_cluster_size;
int count, i;
- for (i = 1, count = 1; i < cluster_size; i++) {
+ for (i = 0, count = 0; i < cluster_size; i++) {
block_t blkaddr = data_blkaddr(dn->inode, dn->node_page,
dn->ofs_in_node + i);
@@ -956,8 +958,8 @@ static int __f2fs_get_cluster_blocks(struct inode *inode,
return count;
}
-static int __f2fs_cluster_blocks(struct inode *inode,
- unsigned int cluster_idx, bool compr_blks)
+static int __f2fs_cluster_blocks(struct inode *inode, unsigned int cluster_idx,
+ enum cluster_check_type type)
{
struct dnode_of_data dn;
unsigned int start_idx = cluster_idx <<
@@ -978,10 +980,12 @@ static int __f2fs_cluster_blocks(struct inode *inode,
}
if (dn.data_blkaddr == COMPRESS_ADDR) {
- if (compr_blks)
- ret = __f2fs_get_cluster_blocks(inode, &dn);
- else
+ if (type == CLUSTER_COMPR_BLKS)
+ ret = 1 + __f2fs_get_cluster_blocks(inode, &dn);
+ else if (type == CLUSTER_IS_COMPR)
ret = 1;
+ } else if (type == CLUSTER_RAW_BLKS) {
+ ret = __f2fs_get_cluster_blocks(inode, &dn);
}
fail:
f2fs_put_dnode(&dn);
@@ -991,7 +995,16 @@ fail:
/* return # of compressed blocks in compressed cluster */
static int f2fs_compressed_blocks(struct compress_ctx *cc)
{
- return __f2fs_cluster_blocks(cc->inode, cc->cluster_idx, true);
+ return __f2fs_cluster_blocks(cc->inode, cc->cluster_idx,
+ CLUSTER_COMPR_BLKS);
+}
+
+/* return # of raw blocks in non-compressed cluster */
+static int f2fs_decompressed_blocks(struct inode *inode,
+ unsigned int cluster_idx)
+{
+ return __f2fs_cluster_blocks(inode, cluster_idx,
+ CLUSTER_RAW_BLKS);
}
/* return whether cluster is compressed one or not */
@@ -999,7 +1012,16 @@ int f2fs_is_compressed_cluster(struct inode *inode, pgoff_t index)
{
return __f2fs_cluster_blocks(inode,
index >> F2FS_I(inode)->i_log_cluster_size,
- false);
+ CLUSTER_IS_COMPR);
+}
+
+/* return whether cluster contains non raw blocks or not */
+bool f2fs_is_sparse_cluster(struct inode *inode, pgoff_t index)
+{
+ unsigned int cluster_idx = index >> F2FS_I(inode)->i_log_cluster_size;
+
+ return f2fs_decompressed_blocks(inode, cluster_idx) !=
+ F2FS_I(inode)->i_cluster_size;
}
static bool cluster_may_compress(struct compress_ctx *cc)
@@ -1093,7 +1115,7 @@ retry:
if (PageUptodate(page))
f2fs_put_page(page, 1);
else
- f2fs_compress_ctx_add_page(cc, page);
+ f2fs_compress_ctx_add_page(cc, page_folio(page));
}
if (!f2fs_cluster_is_empty(cc)) {
@@ -1123,7 +1145,7 @@ retry:
}
f2fs_wait_on_page_writeback(page, DATA, true, true);
- f2fs_compress_ctx_add_page(cc, page);
+ f2fs_compress_ctx_add_page(cc, page_folio(page));
if (!PageUptodate(page)) {
release_and_retry:
@@ -1523,7 +1545,8 @@ continue_unlock:
if (!clear_page_dirty_for_io(cc->rpages[i]))
goto continue_unlock;
- ret = f2fs_write_single_data_page(cc->rpages[i], &submitted,
+ ret = f2fs_write_single_data_page(page_folio(cc->rpages[i]),
+ &submitted,
NULL, NULL, wbc, io_type,
compr_blocks, false);
if (ret) {
diff --git a/fs/f2fs/data.c b/fs/f2fs/data.c
index 5dfa0207ad8f..94f7b084f601 100644
--- a/fs/f2fs/data.c
+++ b/fs/f2fs/data.c
@@ -7,7 +7,6 @@
*/
#include <linux/fs.h>
#include <linux/f2fs_fs.h>
-#include <linux/buffer_head.h>
#include <linux/sched/mm.h>
#include <linux/mpage.h>
#include <linux/writeback.h>
@@ -355,7 +354,7 @@ static void f2fs_write_end_io(struct bio *bio)
}
f2fs_bug_on(sbi, page->mapping == NODE_MAPPING(sbi) &&
- page->index != nid_of_node(page));
+ page_folio(page)->index != nid_of_node(page));
dec_page_count(sbi, type);
if (f2fs_in_warm_node_list(sbi, page))
@@ -704,7 +703,7 @@ int f2fs_submit_page_bio(struct f2fs_io_info *fio)
bio = __bio_alloc(fio, 1);
f2fs_set_bio_crypt_ctx(bio, fio->page->mapping->host,
- fio->page->index, fio, GFP_NOIO);
+ page_folio(fio->page)->index, fio, GFP_NOIO);
if (bio_add_page(bio, page, PAGE_SIZE, 0) < PAGE_SIZE) {
bio_put(bio);
@@ -803,7 +802,7 @@ static int add_ipu_page(struct f2fs_io_info *fio, struct bio **bio,
fio->new_blkaddr));
if (f2fs_crypt_mergeable_bio(*bio,
fio->page->mapping->host,
- fio->page->index, fio) &&
+ page_folio(fio->page)->index, fio) &&
bio_add_page(*bio, page, PAGE_SIZE, 0) ==
PAGE_SIZE) {
ret = 0;
@@ -903,7 +902,7 @@ alloc_new:
if (!bio) {
bio = __bio_alloc(fio, BIO_MAX_VECS);
f2fs_set_bio_crypt_ctx(bio, fio->page->mapping->host,
- fio->page->index, fio, GFP_NOIO);
+ page_folio(fio->page)->index, fio, GFP_NOIO);
add_bio_entry(fio->sbi, bio, page, fio->temp);
} else {
@@ -996,13 +995,13 @@ next:
(!io_is_mergeable(sbi, io->bio, io, fio, io->last_block_in_bio,
fio->new_blkaddr) ||
!f2fs_crypt_mergeable_bio(io->bio, fio->page->mapping->host,
- bio_page->index, fio)))
+ page_folio(bio_page)->index, fio)))
__submit_merged_bio(io);
alloc_new:
if (io->bio == NULL) {
io->bio = __bio_alloc(fio, BIO_MAX_VECS);
f2fs_set_bio_crypt_ctx(io->bio, fio->page->mapping->host,
- bio_page->index, fio, GFP_NOIO);
+ page_folio(bio_page)->index, fio, GFP_NOIO);
io->fio = *fio;
}
@@ -1087,7 +1086,7 @@ static struct bio *f2fs_grab_read_bio(struct inode *inode, block_t blkaddr,
}
/* This can handle encryption stuffs */
-static int f2fs_submit_page_read(struct inode *inode, struct page *page,
+static int f2fs_submit_page_read(struct inode *inode, struct folio *folio,
block_t blkaddr, blk_opf_t op_flags,
bool for_write)
{
@@ -1095,14 +1094,14 @@ static int f2fs_submit_page_read(struct inode *inode, struct page *page,
struct bio *bio;
bio = f2fs_grab_read_bio(inode, blkaddr, 1, op_flags,
- page->index, for_write);
+ folio->index, for_write);
if (IS_ERR(bio))
return PTR_ERR(bio);
/* wait for GCed page writeback via META_MAPPING */
f2fs_wait_on_block_writeback(inode, blkaddr);
- if (bio_add_page(bio, page, PAGE_SIZE, 0) < PAGE_SIZE) {
+ if (!bio_add_folio(bio, folio, PAGE_SIZE, 0)) {
iostat_update_and_unbind_ctx(bio);
if (bio->bi_private)
mempool_free(bio->bi_private, bio_post_read_ctx_pool);
@@ -1270,7 +1269,7 @@ got_it:
return page;
}
- err = f2fs_submit_page_read(inode, page, dn.data_blkaddr,
+ err = f2fs_submit_page_read(inode, page_folio(page), dn.data_blkaddr,
op_flags, for_write);
if (err)
goto put_err;
@@ -1713,6 +1712,14 @@ skip:
dn.ofs_in_node = end_offset;
}
+ if (flag == F2FS_GET_BLOCK_DIO && f2fs_lfs_mode(sbi) &&
+ map->m_may_create) {
+ /* the next block to be allocated may not be contiguous. */
+ if (GET_SEGOFF_FROM_SEG0(sbi, blkaddr) % BLKS_PER_SEC(sbi) ==
+ CAP_BLKS_PER_SEC(sbi) - 1)
+ goto sync_out;
+ }
+
if (pgofs >= end)
goto sync_out;
else if (dn.ofs_in_node < end_offset)
@@ -1939,7 +1946,7 @@ int f2fs_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo,
inode_lock_shared(inode);
- maxbytes = max_file_blocks(inode) << F2FS_BLKSIZE_BITS;
+ maxbytes = F2FS_BLK_TO_BYTES(max_file_blocks(inode));
if (start > maxbytes) {
ret = -EFBIG;
goto out;
@@ -2064,7 +2071,7 @@ out:
static inline loff_t f2fs_readpage_limit(struct inode *inode)
{
if (IS_ENABLED(CONFIG_FS_VERITY) && IS_VERITY(inode))
- return inode->i_sb->s_maxbytes;
+ return F2FS_BLK_TO_BYTES(max_file_blocks(inode));
return i_size_read(inode);
}
@@ -2208,19 +2215,22 @@ int f2fs_read_multi_pages(struct compress_ctx *cc, struct bio **bio_ret,
/* get rid of pages beyond EOF */
for (i = 0; i < cc->cluster_size; i++) {
struct page *page = cc->rpages[i];
+ struct folio *folio;
if (!page)
continue;
- if ((sector_t)page->index >= last_block_in_file) {
- zero_user_segment(page, 0, PAGE_SIZE);
- if (!PageUptodate(page))
- SetPageUptodate(page);
- } else if (!PageUptodate(page)) {
+
+ folio = page_folio(page);
+ if ((sector_t)folio->index >= last_block_in_file) {
+ folio_zero_segment(folio, 0, folio_size(folio));
+ if (!folio_test_uptodate(folio))
+ folio_mark_uptodate(folio);
+ } else if (!folio_test_uptodate(folio)) {
continue;
}
- unlock_page(page);
+ folio_unlock(folio);
if (for_write)
- put_page(page);
+ folio_put(folio);
cc->rpages[i] = NULL;
cc->nr_rpages--;
}
@@ -2280,7 +2290,7 @@ skip_reading_dnode:
}
for (i = 0; i < cc->nr_cpages; i++) {
- struct page *page = dic->cpages[i];
+ struct folio *folio = page_folio(dic->cpages[i]);
block_t blkaddr;
struct bio_post_read_ctx *ctx;
@@ -2290,7 +2300,8 @@ skip_reading_dnode:
f2fs_wait_on_block_writeback(inode, blkaddr);
- if (f2fs_load_compressed_page(sbi, page, blkaddr)) {
+ if (f2fs_load_compressed_page(sbi, folio_page(folio, 0),
+ blkaddr)) {
if (atomic_dec_and_test(&dic->remaining_pages)) {
f2fs_decompress_cluster(dic, true);
break;
@@ -2300,7 +2311,7 @@ skip_reading_dnode:
if (bio && (!page_is_mergeable(sbi, bio,
*last_block_in_bio, blkaddr) ||
- !f2fs_crypt_mergeable_bio(bio, inode, page->index, NULL))) {
+ !f2fs_crypt_mergeable_bio(bio, inode, folio->index, NULL))) {
submit_and_realloc:
f2fs_submit_read_bio(sbi, bio, DATA);
bio = NULL;
@@ -2309,7 +2320,7 @@ submit_and_realloc:
if (!bio) {
bio = f2fs_grab_read_bio(inode, blkaddr, nr_pages,
f2fs_ra_op_flags(rac),
- page->index, for_write);
+ folio->index, for_write);
if (IS_ERR(bio)) {
ret = PTR_ERR(bio);
f2fs_decompress_end_io(dic, ret, true);
@@ -2319,7 +2330,7 @@ submit_and_realloc:
}
}
- if (bio_add_page(bio, page, blocksize, 0) < blocksize)
+ if (!bio_add_folio(bio, folio, blocksize, 0))
goto submit_and_realloc;
ctx = get_post_read_ctx(bio);
@@ -2430,7 +2441,7 @@ static int f2fs_mpage_readpages(struct inode *inode,
if (ret)
goto set_error_page;
- f2fs_compress_ctx_add_page(&cc, &folio->page);
+ f2fs_compress_ctx_add_page(&cc, folio);
goto next_page;
read_single_page:
@@ -2645,21 +2656,24 @@ static inline bool need_inplace_update(struct f2fs_io_info *fio)
int f2fs_do_write_data_page(struct f2fs_io_info *fio)
{
- struct page *page = fio->page;
- struct inode *inode = page->mapping->host;
+ struct folio *folio = page_folio(fio->page);
+ struct inode *inode = folio->mapping->host;
struct dnode_of_data dn;
struct node_info ni;
bool ipu_force = false;
+ bool atomic_commit;
int err = 0;
/* Use COW inode to make dnode_of_data for atomic write */
- if (f2fs_is_atomic_file(inode))
+ atomic_commit = f2fs_is_atomic_file(inode) &&
+ page_private_atomic(folio_page(folio, 0));
+ if (atomic_commit)
set_new_dnode(&dn, F2FS_I(inode)->cow_inode, NULL, NULL, 0);
else
set_new_dnode(&dn, inode, NULL, NULL, 0);
if (need_inplace_update(fio) &&
- f2fs_lookup_read_extent_cache_block(inode, page->index,
+ f2fs_lookup_read_extent_cache_block(inode, folio->index,
&fio->old_blkaddr)) {
if (!f2fs_is_valid_blkaddr(fio->sbi, fio->old_blkaddr,
DATA_GENERIC_ENHANCE))
@@ -2674,7 +2688,7 @@ int f2fs_do_write_data_page(struct f2fs_io_info *fio)
if (fio->need_lock == LOCK_REQ && !f2fs_trylock_op(fio->sbi))
return -EAGAIN;
- err = f2fs_get_dnode_of_data(&dn, page->index, LOOKUP_NODE);
+ err = f2fs_get_dnode_of_data(&dn, folio->index, LOOKUP_NODE);
if (err)
goto out;
@@ -2682,8 +2696,8 @@ int f2fs_do_write_data_page(struct f2fs_io_info *fio)
/* This page is already truncated */
if (fio->old_blkaddr == NULL_ADDR) {
- ClearPageUptodate(page);
- clear_page_private_gcing(page);
+ folio_clear_uptodate(folio);
+ clear_page_private_gcing(folio_page(folio, 0));
goto out_writepage;
}
got_it:
@@ -2709,7 +2723,7 @@ got_it:
if (err)
goto out_writepage;
- set_page_writeback(page);
+ folio_start_writeback(folio);
f2fs_put_dnode(&dn);
if (fio->need_lock == LOCK_REQ)
f2fs_unlock_op(fio->sbi);
@@ -2717,11 +2731,11 @@ got_it:
if (err) {
if (fscrypt_inode_uses_fs_layer_crypto(inode))
fscrypt_finalize_bounce_page(&fio->encrypted_page);
- end_page_writeback(page);
+ folio_end_writeback(folio);
} else {
set_inode_flag(inode, FI_UPDATE_WRITE);
}
- trace_f2fs_do_write_data_page(page_folio(page), IPU);
+ trace_f2fs_do_write_data_page(folio, IPU);
return err;
}
@@ -2743,15 +2757,17 @@ got_it:
if (err)
goto out_writepage;
- set_page_writeback(page);
+ folio_start_writeback(folio);
if (fio->compr_blocks && fio->old_blkaddr == COMPRESS_ADDR)
f2fs_i_compr_blocks_update(inode, fio->compr_blocks - 1, false);
/* LFS mode write path */
f2fs_outplace_write_data(&dn, fio);
- trace_f2fs_do_write_data_page(page_folio(page), OPU);
+ trace_f2fs_do_write_data_page(folio, OPU);
set_inode_flag(inode, FI_APPEND_WRITE);
+ if (atomic_commit)
+ clear_page_private_atomic(folio_page(folio, 0));
out_writepage:
f2fs_put_dnode(&dn);
out:
@@ -2760,7 +2776,7 @@ out:
return err;
}
-int f2fs_write_single_data_page(struct page *page, int *submitted,
+int f2fs_write_single_data_page(struct folio *folio, int *submitted,
struct bio **bio,
sector_t *last_block,
struct writeback_control *wbc,
@@ -2768,12 +2784,13 @@ int f2fs_write_single_data_page(struct page *page, int *submitted,
int compr_blocks,
bool allow_balance)
{
- struct inode *inode = page->mapping->host;
+ struct inode *inode = folio->mapping->host;
+ struct page *page = folio_page(folio, 0);
struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
loff_t i_size = i_size_read(inode);
const pgoff_t end_index = ((unsigned long long)i_size)
>> PAGE_SHIFT;
- loff_t psize = (loff_t)(page->index + 1) << PAGE_SHIFT;
+ loff_t psize = (loff_t)(folio->index + 1) << PAGE_SHIFT;
unsigned offset = 0;
bool need_balance_fs = false;
bool quota_inode = IS_NOQUOTA(inode);
@@ -2797,11 +2814,11 @@ int f2fs_write_single_data_page(struct page *page, int *submitted,
.last_block = last_block,
};
- trace_f2fs_writepage(page_folio(page), DATA);
+ trace_f2fs_writepage(folio, DATA);
/* we should bypass data pages to proceed the kworker jobs */
if (unlikely(f2fs_cp_error(sbi))) {
- mapping_set_error(page->mapping, -EIO);
+ mapping_set_error(folio->mapping, -EIO);
/*
* don't drop any dirty dentry pages for keeping lastest
* directory structure.
@@ -2819,7 +2836,7 @@ int f2fs_write_single_data_page(struct page *page, int *submitted,
if (unlikely(is_sbi_flag_set(sbi, SBI_POR_DOING)))
goto redirty_out;
- if (page->index < end_index ||
+ if (folio->index < end_index ||
f2fs_verity_in_progress(inode) ||
compr_blocks)
goto write;
@@ -2829,10 +2846,10 @@ int f2fs_write_single_data_page(struct page *page, int *submitted,
* this page does not have to be written to disk.
*/
offset = i_size & (PAGE_SIZE - 1);
- if ((page->index >= end_index + 1) || !offset)
+ if ((folio->index >= end_index + 1) || !offset)
goto out;
- zero_user_segment(page, offset, PAGE_SIZE);
+ folio_zero_segment(folio, offset, folio_size(folio));
write:
/* Dentry/quota blocks are controlled by checkpoint */
if (S_ISDIR(inode->i_mode) || quota_inode) {
@@ -2862,7 +2879,7 @@ write:
err = -EAGAIN;
if (f2fs_has_inline_data(inode)) {
- err = f2fs_write_inline_data(inode, page);
+ err = f2fs_write_inline_data(inode, folio);
if (!err)
goto out;
}
@@ -2892,7 +2909,7 @@ done:
out:
inode_dec_dirty_pages(inode);
if (err) {
- ClearPageUptodate(page);
+ folio_clear_uptodate(folio);
clear_page_private_gcing(page);
}
@@ -2902,7 +2919,7 @@ out:
f2fs_remove_dirty_inode(inode);
submitted = NULL;
}
- unlock_page(page);
+ folio_unlock(folio);
if (!S_ISDIR(inode->i_mode) && !IS_NOQUOTA(inode) &&
!F2FS_I(inode)->wb_task && allow_balance)
f2fs_balance_fs(sbi, need_balance_fs);
@@ -2920,7 +2937,7 @@ out:
return 0;
redirty_out:
- redirty_page_for_writepage(wbc, page);
+ folio_redirty_for_writepage(wbc, folio);
/*
* pageout() in MM translates EAGAIN, so calls handle_write_error()
* -> mapping_set_error() -> set_bit(AS_EIO, ...).
@@ -2929,29 +2946,30 @@ redirty_out:
*/
if (!err || wbc->for_reclaim)
return AOP_WRITEPAGE_ACTIVATE;
- unlock_page(page);
+ folio_unlock(folio);
return err;
}
static int f2fs_write_data_page(struct page *page,
struct writeback_control *wbc)
{
+ struct folio *folio = page_folio(page);
#ifdef CONFIG_F2FS_FS_COMPRESSION
- struct inode *inode = page->mapping->host;
+ struct inode *inode = folio->mapping->host;
if (unlikely(f2fs_cp_error(F2FS_I_SB(inode))))
goto out;
if (f2fs_compressed_file(inode)) {
- if (f2fs_is_compressed_cluster(inode, page->index)) {
- redirty_page_for_writepage(wbc, page);
+ if (f2fs_is_compressed_cluster(inode, folio->index)) {
+ folio_redirty_for_writepage(wbc, folio);
return AOP_WRITEPAGE_ACTIVATE;
}
}
out:
#endif
- return f2fs_write_single_data_page(page, NULL, NULL, NULL,
+ return f2fs_write_single_data_page(folio, NULL, NULL, NULL,
wbc, FS_DATA_IO, 0, true);
}
@@ -3157,11 +3175,11 @@ continue_unlock:
#ifdef CONFIG_F2FS_FS_COMPRESSION
if (f2fs_compressed_file(inode)) {
folio_get(folio);
- f2fs_compress_ctx_add_page(&cc, &folio->page);
+ f2fs_compress_ctx_add_page(&cc, folio);
continue;
}
#endif
- ret = f2fs_write_single_data_page(&folio->page,
+ ret = f2fs_write_single_data_page(folio,
&submitted, &bio, &last_block,
wbc, io_type, 0, true);
if (ret == AOP_WRITEPAGE_ACTIVATE)
@@ -3369,11 +3387,11 @@ void f2fs_write_failed(struct inode *inode, loff_t to)
}
static int prepare_write_begin(struct f2fs_sb_info *sbi,
- struct page *page, loff_t pos, unsigned len,
+ struct folio *folio, loff_t pos, unsigned int len,
block_t *blk_addr, bool *node_changed)
{
- struct inode *inode = page->mapping->host;
- pgoff_t index = page->index;
+ struct inode *inode = folio->mapping->host;
+ pgoff_t index = folio->index;
struct dnode_of_data dn;
struct page *ipage;
bool locked = false;
@@ -3410,13 +3428,13 @@ restart:
if (f2fs_has_inline_data(inode)) {
if (pos + len <= MAX_INLINE_DATA(inode)) {
- f2fs_do_read_inline_data(page_folio(page), ipage);
+ f2fs_do_read_inline_data(folio, ipage);
set_inode_flag(inode, FI_DATA_EXIST);
if (inode->i_nlink)
set_page_private_inline(ipage);
goto out;
}
- err = f2fs_convert_inline_page(&dn, page);
+ err = f2fs_convert_inline_page(&dn, folio_page(folio, 0));
if (err || dn.data_blkaddr != NULL_ADDR)
goto out;
}
@@ -3509,12 +3527,12 @@ unlock_out:
}
static int prepare_atomic_write_begin(struct f2fs_sb_info *sbi,
- struct page *page, loff_t pos, unsigned int len,
+ struct folio *folio, loff_t pos, unsigned int len,
block_t *blk_addr, bool *node_changed, bool *use_cow)
{
- struct inode *inode = page->mapping->host;
+ struct inode *inode = folio->mapping->host;
struct inode *cow_inode = F2FS_I(inode)->cow_inode;
- pgoff_t index = page->index;
+ pgoff_t index = folio->index;
int err = 0;
block_t ori_blk_addr = NULL_ADDR;
@@ -3620,10 +3638,10 @@ repeat:
*foliop = folio;
if (f2fs_is_atomic_file(inode))
- err = prepare_atomic_write_begin(sbi, &folio->page, pos, len,
+ err = prepare_atomic_write_begin(sbi, folio, pos, len,
&blkaddr, &need_balance, &use_cow);
else
- err = prepare_write_begin(sbi, &folio->page, pos, len,
+ err = prepare_write_begin(sbi, folio, pos, len,
&blkaddr, &need_balance);
if (err)
goto put_folio;
@@ -3648,7 +3666,7 @@ repeat:
if (!(pos & (PAGE_SIZE - 1)) && (pos + len) >= i_size_read(inode) &&
!f2fs_verity_in_progress(inode)) {
- folio_zero_segment(folio, len, PAGE_SIZE);
+ folio_zero_segment(folio, len, folio_size(folio));
return 0;
}
@@ -3662,8 +3680,8 @@ repeat:
goto put_folio;
}
err = f2fs_submit_page_read(use_cow ?
- F2FS_I(inode)->cow_inode : inode, &folio->page,
- blkaddr, 0, true);
+ F2FS_I(inode)->cow_inode : inode,
+ folio, blkaddr, 0, true);
if (err)
goto put_folio;
@@ -3727,6 +3745,9 @@ static int f2fs_write_end(struct file *file,
folio_mark_dirty(folio);
+ if (f2fs_is_atomic_file(inode))
+ set_page_private_atomic(folio_page(folio, 0));
+
if (pos + copied > i_size_read(inode) &&
!f2fs_verity_in_progress(inode)) {
f2fs_i_size_write(inode, pos + copied);
@@ -4117,9 +4138,8 @@ const struct address_space_operations f2fs_dblock_aops = {
.swap_deactivate = f2fs_swap_deactivate,
};
-void f2fs_clear_page_cache_dirty_tag(struct page *page)
+void f2fs_clear_page_cache_dirty_tag(struct folio *folio)
{
- struct folio *folio = page_folio(page);
struct address_space *mapping = folio->mapping;
unsigned long flags;
diff --git a/fs/f2fs/debug.c b/fs/f2fs/debug.c
index 8b0e1e71b667..546b8ba91261 100644
--- a/fs/f2fs/debug.c
+++ b/fs/f2fs/debug.c
@@ -275,7 +275,7 @@ static void update_mem_info(struct f2fs_sb_info *sbi)
/* build nm */
si->base_mem += sizeof(struct f2fs_nm_info);
si->base_mem += __bitmap_size(sbi, NAT_BITMAP);
- si->base_mem += (NM_I(sbi)->nat_bits_blocks << F2FS_BLKSIZE_BITS);
+ si->base_mem += F2FS_BLK_TO_BYTES(NM_I(sbi)->nat_bits_blocks);
si->base_mem += NM_I(sbi)->nat_blocks *
f2fs_bitmap_size(NAT_ENTRY_PER_BLOCK);
si->base_mem += NM_I(sbi)->nat_blocks / 8;
diff --git a/fs/f2fs/dir.c b/fs/f2fs/dir.c
index cbd7a5e96a37..1136539a57a8 100644
--- a/fs/f2fs/dir.c
+++ b/fs/f2fs/dir.c
@@ -166,7 +166,8 @@ static unsigned long dir_block_index(unsigned int level,
unsigned long bidx = 0;
for (i = 0; i < level; i++)
- bidx += dir_buckets(i, dir_level) * bucket_blocks(i);
+ bidx += mul_u32_u32(dir_buckets(i, dir_level),
+ bucket_blocks(i));
bidx += idx * bucket_blocks(level);
return bidx;
}
@@ -841,6 +842,7 @@ void f2fs_delete_entry(struct f2fs_dir_entry *dentry, struct page *page,
struct f2fs_dentry_block *dentry_blk;
unsigned int bit_pos;
int slots = GET_DENTRY_SLOTS(le16_to_cpu(dentry->name_len));
+ pgoff_t index = page_folio(page)->index;
int i;
f2fs_update_time(F2FS_I_SB(dir), REQ_TIME);
@@ -866,8 +868,8 @@ void f2fs_delete_entry(struct f2fs_dir_entry *dentry, struct page *page,
set_page_dirty(page);
if (bit_pos == NR_DENTRY_IN_BLOCK &&
- !f2fs_truncate_hole(dir, page->index, page->index + 1)) {
- f2fs_clear_page_cache_dirty_tag(page);
+ !f2fs_truncate_hole(dir, index, index + 1)) {
+ f2fs_clear_page_cache_dirty_tag(page_folio(page));
clear_page_dirty_for_io(page);
ClearPageUptodate(page);
clear_page_private_all(page);
diff --git a/fs/f2fs/extent_cache.c b/fs/f2fs/extent_cache.c
index fd1fc06359ee..62ac440d9416 100644
--- a/fs/f2fs/extent_cache.c
+++ b/fs/f2fs/extent_cache.c
@@ -366,7 +366,7 @@ static unsigned int __free_extent_tree(struct f2fs_sb_info *sbi,
static void __drop_largest_extent(struct extent_tree *et,
pgoff_t fofs, unsigned int len)
{
- if (fofs < et->largest.fofs + et->largest.len &&
+ if (fofs < (pgoff_t)et->largest.fofs + et->largest.len &&
fofs + len > et->largest.fofs) {
et->largest.len = 0;
et->largest_updated = true;
@@ -456,7 +456,7 @@ static bool __lookup_extent_tree(struct inode *inode, pgoff_t pgofs,
if (type == EX_READ &&
et->largest.fofs <= pgofs &&
- et->largest.fofs + et->largest.len > pgofs) {
+ (pgoff_t)et->largest.fofs + et->largest.len > pgofs) {
*ei = et->largest;
ret = true;
stat_inc_largest_node_hit(sbi);
diff --git a/fs/f2fs/f2fs.h b/fs/f2fs/f2fs.h
index ac19c61f0c3e..33f5449dc22d 100644
--- a/fs/f2fs/f2fs.h
+++ b/fs/f2fs/f2fs.h
@@ -11,7 +11,6 @@
#include <linux/uio.h>
#include <linux/types.h>
#include <linux/page-flags.h>
-#include <linux/buffer_head.h>
#include <linux/slab.h>
#include <linux/crc32.h>
#include <linux/magic.h>
@@ -134,6 +133,12 @@ typedef u32 nid_t;
#define COMPRESS_EXT_NUM 16
+enum blkzone_allocation_policy {
+ BLKZONE_ALLOC_PRIOR_SEQ, /* Prioritize writing to sequential zones */
+ BLKZONE_ALLOC_ONLY_SEQ, /* Only allow writing to sequential zones */
+ BLKZONE_ALLOC_PRIOR_CONV, /* Prioritize writing to conventional zones */
+};
+
/*
* An implementation of an rwsem that is explicitly unfair to readers. This
* prevents priority inversion when a low-priority reader acquires the read lock
@@ -285,6 +290,7 @@ enum {
APPEND_INO, /* for append ino list */
UPDATE_INO, /* for update ino list */
TRANS_DIR_INO, /* for transactions dir ino list */
+ XATTR_DIR_INO, /* for xattr updated dir ino list */
FLUSH_INO, /* for multiple device flushing */
MAX_INO_ENTRY, /* max. list */
};
@@ -784,7 +790,6 @@ enum {
FI_NEED_IPU, /* used for ipu per file */
FI_ATOMIC_FILE, /* indicate atomic file */
FI_DATA_EXIST, /* indicate data exists */
- FI_INLINE_DOTS, /* indicate inline dot dentries */
FI_SKIP_WRITES, /* should skip data page writeback */
FI_OPU_WRITE, /* used for opu per file */
FI_DIRTY_FILE, /* indicate regular/symlink has dirty pages */
@@ -802,6 +807,7 @@ enum {
FI_ALIGNED_WRITE, /* enable aligned write */
FI_COW_FILE, /* indicate COW file */
FI_ATOMIC_COMMITTED, /* indicate atomic commit completed except disk sync */
+ FI_ATOMIC_DIRTIED, /* indicate atomic file is dirtied */
FI_ATOMIC_REPLACE, /* indicate atomic replace */
FI_OPENED_FILE, /* indicate file has been opened */
FI_MAX, /* max flag, never be used */
@@ -1155,6 +1161,7 @@ enum cp_reason_type {
CP_FASTBOOT_MODE,
CP_SPEC_LOG_NUM,
CP_RECOVER_DIR,
+ CP_XATTR_DIR,
};
enum iostat_type {
@@ -1293,6 +1300,7 @@ struct f2fs_gc_control {
bool no_bg_gc; /* check the space and stop bg_gc */
bool should_migrate_blocks; /* should migrate blocks */
bool err_gc_skipped; /* return EAGAIN if GC skipped */
+ bool one_time; /* require one time GC in one migration unit */
unsigned int nr_free_secs; /* # of free sections to do GC */
};
@@ -1418,7 +1426,8 @@ static inline void f2fs_clear_bit(unsigned int nr, char *addr);
* bit 1 PAGE_PRIVATE_ONGOING_MIGRATION
* bit 2 PAGE_PRIVATE_INLINE_INODE
* bit 3 PAGE_PRIVATE_REF_RESOURCE
- * bit 4- f2fs private data
+ * bit 4 PAGE_PRIVATE_ATOMIC_WRITE
+ * bit 5- f2fs private data
*
* Layout B: lowest bit should be 0
* page.private is a wrapped pointer.
@@ -1428,6 +1437,7 @@ enum {
PAGE_PRIVATE_ONGOING_MIGRATION, /* data page which is on-going migrating */
PAGE_PRIVATE_INLINE_INODE, /* inode page contains inline data */
PAGE_PRIVATE_REF_RESOURCE, /* dirty page has referenced resources */
+ PAGE_PRIVATE_ATOMIC_WRITE, /* data page from atomic write path */
PAGE_PRIVATE_MAX
};
@@ -1559,6 +1569,8 @@ struct f2fs_sb_info {
#ifdef CONFIG_BLK_DEV_ZONED
unsigned int blocks_per_blkz; /* F2FS blocks per zone */
unsigned int max_open_zones; /* max open zone resources of the zoned device */
+ /* For adjust the priority writing position of data in zone UFS */
+ unsigned int blkzone_alloc_policy;
#endif
/* for node-related operations */
@@ -1685,6 +1697,8 @@ struct f2fs_sb_info {
unsigned int max_victim_search;
/* migration granularity of garbage collection, unit: segment */
unsigned int migration_granularity;
+ /* migration window granularity of garbage collection, unit: segment */
+ unsigned int migration_window_granularity;
/*
* for stat information.
@@ -1994,6 +2008,16 @@ static inline struct f2fs_super_block *F2FS_RAW_SUPER(struct f2fs_sb_info *sbi)
return (struct f2fs_super_block *)(sbi->raw_super);
}
+static inline struct f2fs_super_block *F2FS_SUPER_BLOCK(struct folio *folio,
+ pgoff_t index)
+{
+ pgoff_t idx_in_folio = index % (1 << folio_order(folio));
+
+ return (struct f2fs_super_block *)
+ (page_address(folio_page(folio, idx_in_folio)) +
+ F2FS_SUPER_OFFSET);
+}
+
static inline struct f2fs_checkpoint *F2FS_CKPT(struct f2fs_sb_info *sbi)
{
return (struct f2fs_checkpoint *)(sbi->ckpt);
@@ -2396,14 +2420,17 @@ static inline void clear_page_private_##name(struct page *page) \
PAGE_PRIVATE_GET_FUNC(nonpointer, NOT_POINTER);
PAGE_PRIVATE_GET_FUNC(inline, INLINE_INODE);
PAGE_PRIVATE_GET_FUNC(gcing, ONGOING_MIGRATION);
+PAGE_PRIVATE_GET_FUNC(atomic, ATOMIC_WRITE);
PAGE_PRIVATE_SET_FUNC(reference, REF_RESOURCE);
PAGE_PRIVATE_SET_FUNC(inline, INLINE_INODE);
PAGE_PRIVATE_SET_FUNC(gcing, ONGOING_MIGRATION);
+PAGE_PRIVATE_SET_FUNC(atomic, ATOMIC_WRITE);
PAGE_PRIVATE_CLEAR_FUNC(reference, REF_RESOURCE);
PAGE_PRIVATE_CLEAR_FUNC(inline, INLINE_INODE);
PAGE_PRIVATE_CLEAR_FUNC(gcing, ONGOING_MIGRATION);
+PAGE_PRIVATE_CLEAR_FUNC(atomic, ATOMIC_WRITE);
static inline unsigned long get_page_private_data(struct page *page)
{
@@ -2435,6 +2462,7 @@ static inline void clear_page_private_all(struct page *page)
clear_page_private_reference(page);
clear_page_private_gcing(page);
clear_page_private_inline(page);
+ clear_page_private_atomic(page);
f2fs_bug_on(F2FS_P_SB(page), page_private(page));
}
@@ -2854,13 +2882,26 @@ static inline bool is_inflight_io(struct f2fs_sb_info *sbi, int type)
return false;
}
+static inline bool is_inflight_read_io(struct f2fs_sb_info *sbi)
+{
+ return get_pages(sbi, F2FS_RD_DATA) || get_pages(sbi, F2FS_DIO_READ);
+}
+
static inline bool is_idle(struct f2fs_sb_info *sbi, int type)
{
+ bool zoned_gc = (type == GC_TIME &&
+ F2FS_HAS_FEATURE(sbi, F2FS_FEATURE_BLKZONED));
+
if (sbi->gc_mode == GC_URGENT_HIGH)
return true;
- if (is_inflight_io(sbi, type))
- return false;
+ if (zoned_gc) {
+ if (is_inflight_read_io(sbi))
+ return false;
+ } else {
+ if (is_inflight_io(sbi, type))
+ return false;
+ }
if (sbi->gc_mode == GC_URGENT_MID)
return true;
@@ -2869,6 +2910,9 @@ static inline bool is_idle(struct f2fs_sb_info *sbi, int type)
(type == DISCARD_TIME || type == GC_TIME))
return true;
+ if (zoned_gc)
+ return true;
+
return f2fs_time_over(sbi, type);
}
@@ -2900,26 +2944,27 @@ static inline __le32 *blkaddr_in_node(struct f2fs_node *node)
}
static inline int f2fs_has_extra_attr(struct inode *inode);
-static inline block_t data_blkaddr(struct inode *inode,
- struct page *node_page, unsigned int offset)
+static inline unsigned int get_dnode_base(struct inode *inode,
+ struct page *node_page)
{
- struct f2fs_node *raw_node;
- __le32 *addr_array;
- int base = 0;
- bool is_inode = IS_INODE(node_page);
+ if (!IS_INODE(node_page))
+ return 0;
- raw_node = F2FS_NODE(node_page);
+ return inode ? get_extra_isize(inode) :
+ offset_in_addr(&F2FS_NODE(node_page)->i);
+}
- if (is_inode) {
- if (!inode)
- /* from GC path only */
- base = offset_in_addr(&raw_node->i);
- else if (f2fs_has_extra_attr(inode))
- base = get_extra_isize(inode);
- }
+static inline __le32 *get_dnode_addr(struct inode *inode,
+ struct page *node_page)
+{
+ return blkaddr_in_node(F2FS_NODE(node_page)) +
+ get_dnode_base(inode, node_page);
+}
- addr_array = blkaddr_in_node(raw_node);
- return le32_to_cpu(addr_array[base + offset]);
+static inline block_t data_blkaddr(struct inode *inode,
+ struct page *node_page, unsigned int offset)
+{
+ return le32_to_cpu(*(get_dnode_addr(inode, node_page) + offset));
}
static inline block_t f2fs_data_blkaddr(struct dnode_of_data *dn)
@@ -3038,10 +3083,8 @@ static inline void __mark_inode_dirty_flag(struct inode *inode,
return;
fallthrough;
case FI_DATA_EXIST:
- case FI_INLINE_DOTS:
case FI_PIN_FILE:
case FI_COMPRESS_RELEASED:
- case FI_ATOMIC_COMMITTED:
f2fs_mark_inode_dirty_sync(inode, true);
}
}
@@ -3163,8 +3206,6 @@ static inline void get_inline_info(struct inode *inode, struct f2fs_inode *ri)
set_bit(FI_INLINE_DENTRY, fi->flags);
if (ri->i_inline & F2FS_DATA_EXIST)
set_bit(FI_DATA_EXIST, fi->flags);
- if (ri->i_inline & F2FS_INLINE_DOTS)
- set_bit(FI_INLINE_DOTS, fi->flags);
if (ri->i_inline & F2FS_EXTRA_ATTR)
set_bit(FI_EXTRA_ATTR, fi->flags);
if (ri->i_inline & F2FS_PIN_FILE)
@@ -3185,8 +3226,6 @@ static inline void set_raw_inline(struct inode *inode, struct f2fs_inode *ri)
ri->i_inline |= F2FS_INLINE_DENTRY;
if (is_inode_flag_set(inode, FI_DATA_EXIST))
ri->i_inline |= F2FS_DATA_EXIST;
- if (is_inode_flag_set(inode, FI_INLINE_DOTS))
- ri->i_inline |= F2FS_INLINE_DOTS;
if (is_inode_flag_set(inode, FI_EXTRA_ATTR))
ri->i_inline |= F2FS_EXTRA_ATTR;
if (is_inode_flag_set(inode, FI_PIN_FILE))
@@ -3267,11 +3306,6 @@ static inline int f2fs_exist_data(struct inode *inode)
return is_inode_flag_set(inode, FI_DATA_EXIST);
}
-static inline int f2fs_has_inline_dots(struct inode *inode)
-{
- return is_inode_flag_set(inode, FI_INLINE_DOTS);
-}
-
static inline int f2fs_is_mmap_file(struct inode *inode)
{
return is_inode_flag_set(inode, FI_MMAP_FILE);
@@ -3292,8 +3326,6 @@ static inline bool f2fs_is_cow_file(struct inode *inode)
return is_inode_flag_set(inode, FI_COW_FILE);
}
-static inline __le32 *get_dnode_addr(struct inode *inode,
- struct page *node_page);
static inline void *inline_data_addr(struct inode *inode, struct page *page)
{
__le32 *addr = get_dnode_addr(inode, page);
@@ -3432,17 +3464,6 @@ static inline int get_inline_xattr_addrs(struct inode *inode)
return F2FS_I(inode)->i_inline_xattr_size;
}
-static inline __le32 *get_dnode_addr(struct inode *inode,
- struct page *node_page)
-{
- int base = 0;
-
- if (IS_INODE(node_page) && f2fs_has_extra_attr(inode))
- base = get_extra_isize(inode);
-
- return blkaddr_in_node(F2FS_NODE(node_page)) + base;
-}
-
#define f2fs_get_inode_mode(i) \
((is_inode_flag_set(i, FI_ACL_MODE)) ? \
(F2FS_I(i)->i_acl_mode) : ((i)->i_mode))
@@ -3495,7 +3516,7 @@ int f2fs_setattr(struct mnt_idmap *idmap, struct dentry *dentry,
int f2fs_truncate_hole(struct inode *inode, pgoff_t pg_start, pgoff_t pg_end);
void f2fs_truncate_data_blocks_range(struct dnode_of_data *dn, int count);
int f2fs_do_shutdown(struct f2fs_sb_info *sbi, unsigned int flag,
- bool readonly);
+ bool readonly, bool need_lock);
int f2fs_precache_extents(struct inode *inode);
int f2fs_fileattr_get(struct dentry *dentry, struct fileattr *fa);
int f2fs_fileattr_set(struct mnt_idmap *idmap,
@@ -3719,7 +3740,7 @@ bool f2fs_exist_trim_candidates(struct f2fs_sb_info *sbi,
struct page *f2fs_get_sum_page(struct f2fs_sb_info *sbi, unsigned int segno);
void f2fs_update_meta_page(struct f2fs_sb_info *sbi, void *src,
block_t blk_addr);
-void f2fs_do_write_meta_page(struct f2fs_sb_info *sbi, struct page *page,
+void f2fs_do_write_meta_page(struct f2fs_sb_info *sbi, struct folio *folio,
enum iostat_type io_type);
void f2fs_do_write_node_page(unsigned int nid, struct f2fs_io_info *fio);
void f2fs_outplace_write_data(struct dnode_of_data *dn,
@@ -3759,8 +3780,7 @@ void f2fs_destroy_segment_manager_caches(void);
int f2fs_rw_hint_to_seg_type(struct f2fs_sb_info *sbi, enum rw_hint hint);
enum rw_hint f2fs_io_type_to_rw_hint(struct f2fs_sb_info *sbi,
enum page_type type, enum temp_type temp);
-unsigned int f2fs_usable_segs_in_sec(struct f2fs_sb_info *sbi,
- unsigned int segno);
+unsigned int f2fs_usable_segs_in_sec(struct f2fs_sb_info *sbi);
unsigned int f2fs_usable_blks_in_seg(struct f2fs_sb_info *sbi,
unsigned int segno);
@@ -3868,7 +3888,7 @@ int f2fs_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo,
int f2fs_encrypt_one_page(struct f2fs_io_info *fio);
bool f2fs_should_update_inplace(struct inode *inode, struct f2fs_io_info *fio);
bool f2fs_should_update_outplace(struct inode *inode, struct f2fs_io_info *fio);
-int f2fs_write_single_data_page(struct page *page, int *submitted,
+int f2fs_write_single_data_page(struct folio *folio, int *submitted,
struct bio **bio, sector_t *last_block,
struct writeback_control *wbc,
enum iostat_type io_type,
@@ -3877,7 +3897,7 @@ void f2fs_write_failed(struct inode *inode, loff_t to);
void f2fs_invalidate_folio(struct folio *folio, size_t offset, size_t length);
bool f2fs_release_folio(struct folio *folio, gfp_t wait);
bool f2fs_overwrite_io(struct inode *inode, loff_t pos, size_t len);
-void f2fs_clear_page_cache_dirty_tag(struct page *page);
+void f2fs_clear_page_cache_dirty_tag(struct folio *folio);
int f2fs_init_post_read_processing(void);
void f2fs_destroy_post_read_processing(void);
int f2fs_init_post_read_wq(struct f2fs_sb_info *sbi);
@@ -3901,7 +3921,7 @@ void f2fs_destroy_garbage_collection_cache(void);
/* victim selection function for cleaning and SSR */
int f2fs_get_victim(struct f2fs_sb_info *sbi, unsigned int *result,
int gc_type, int type, char alloc_mode,
- unsigned long long age);
+ unsigned long long age, bool one_time);
/*
* recovery.c
@@ -3987,7 +4007,7 @@ static inline struct f2fs_stat_info *F2FS_STAT(struct f2fs_sb_info *sbi)
#define stat_inc_cp_call_count(sbi, foreground) \
atomic_inc(&sbi->cp_call_count[(foreground)])
-#define stat_inc_cp_count(si) (F2FS_STAT(sbi)->cp_count++)
+#define stat_inc_cp_count(sbi) (F2FS_STAT(sbi)->cp_count++)
#define stat_io_skip_bggc_count(sbi) ((sbi)->io_skip_bggc++)
#define stat_other_skip_bggc_count(sbi) ((sbi)->other_skip_bggc++)
#define stat_inc_dirty_inode(sbi, type) ((sbi)->ndirty_inode[type]++)
@@ -4172,7 +4192,7 @@ int f2fs_read_inline_data(struct inode *inode, struct folio *folio);
int f2fs_convert_inline_page(struct dnode_of_data *dn, struct page *page);
int f2fs_convert_inline_inode(struct inode *inode);
int f2fs_try_convert_inline_dir(struct inode *dir, struct dentry *dentry);
-int f2fs_write_inline_data(struct inode *inode, struct page *page);
+int f2fs_write_inline_data(struct inode *inode, struct folio *folio);
int f2fs_recover_inline_data(struct inode *inode, struct page *npage);
struct f2fs_dir_entry *f2fs_find_in_inline_dir(struct inode *dir,
const struct f2fs_filename *fname,
@@ -4289,6 +4309,11 @@ static inline bool f2fs_meta_inode_gc_required(struct inode *inode)
* compress.c
*/
#ifdef CONFIG_F2FS_FS_COMPRESSION
+enum cluster_check_type {
+ CLUSTER_IS_COMPR, /* check only if compressed cluster */
+ CLUSTER_COMPR_BLKS, /* return # of compressed blocks in a cluster */
+ CLUSTER_RAW_BLKS /* return # of raw blocks in a cluster */
+};
bool f2fs_is_compressed_page(struct page *page);
struct page *f2fs_compress_control_page(struct page *page);
int f2fs_prepare_compress_overwrite(struct inode *inode,
@@ -4309,12 +4334,13 @@ bool f2fs_cluster_can_merge_page(struct compress_ctx *cc, pgoff_t index);
bool f2fs_all_cluster_page_ready(struct compress_ctx *cc, struct page **pages,
int index, int nr_pages, bool uptodate);
bool f2fs_sanity_check_cluster(struct dnode_of_data *dn);
-void f2fs_compress_ctx_add_page(struct compress_ctx *cc, struct page *page);
+void f2fs_compress_ctx_add_page(struct compress_ctx *cc, struct folio *folio);
int f2fs_write_multi_pages(struct compress_ctx *cc,
int *submitted,
struct writeback_control *wbc,
enum iostat_type io_type);
int f2fs_is_compressed_cluster(struct inode *inode, pgoff_t index);
+bool f2fs_is_sparse_cluster(struct inode *inode, pgoff_t index);
void f2fs_update_read_extent_tree_range_compressed(struct inode *inode,
pgoff_t fofs, block_t blkaddr,
unsigned int llen, unsigned int c_len);
@@ -4401,6 +4427,12 @@ static inline bool f2fs_load_compressed_page(struct f2fs_sb_info *sbi,
static inline void f2fs_invalidate_compress_pages(struct f2fs_sb_info *sbi,
nid_t ino) { }
#define inc_compr_inode_stat(inode) do { } while (0)
+static inline int f2fs_is_compressed_cluster(
+ struct inode *inode,
+ pgoff_t index) { return 0; }
+static inline bool f2fs_is_sparse_cluster(
+ struct inode *inode,
+ pgoff_t index) { return true; }
static inline void f2fs_update_read_extent_tree_range_compressed(
struct inode *inode,
pgoff_t fofs, block_t blkaddr,
@@ -4653,9 +4685,11 @@ static inline void f2fs_io_schedule_timeout(long timeout)
io_schedule_timeout(timeout);
}
-static inline void f2fs_handle_page_eio(struct f2fs_sb_info *sbi, pgoff_t ofs,
- enum page_type type)
+static inline void f2fs_handle_page_eio(struct f2fs_sb_info *sbi,
+ struct folio *folio, enum page_type type)
{
+ pgoff_t ofs = folio->index;
+
if (unlikely(f2fs_cp_error(sbi)))
return;
diff --git a/fs/f2fs/file.c b/fs/f2fs/file.c
index 168f08507004..9ae54c4c72fe 100644
--- a/fs/f2fs/file.c
+++ b/fs/f2fs/file.c
@@ -8,7 +8,6 @@
#include <linux/fs.h>
#include <linux/f2fs_fs.h>
#include <linux/stat.h>
-#include <linux/buffer_head.h>
#include <linux/writeback.h>
#include <linux/blkdev.h>
#include <linux/falloc.h>
@@ -54,7 +53,7 @@ static vm_fault_t f2fs_filemap_fault(struct vm_fault *vmf)
static vm_fault_t f2fs_vm_page_mkwrite(struct vm_fault *vmf)
{
- struct page *page = vmf->page;
+ struct folio *folio = page_folio(vmf->page);
struct inode *inode = file_inode(vmf->vma->vm_file);
struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
struct dnode_of_data dn;
@@ -86,7 +85,7 @@ static vm_fault_t f2fs_vm_page_mkwrite(struct vm_fault *vmf)
#ifdef CONFIG_F2FS_FS_COMPRESSION
if (f2fs_compressed_file(inode)) {
- int ret = f2fs_is_compressed_cluster(inode, page->index);
+ int ret = f2fs_is_compressed_cluster(inode, folio->index);
if (ret < 0) {
err = ret;
@@ -106,11 +105,11 @@ static vm_fault_t f2fs_vm_page_mkwrite(struct vm_fault *vmf)
file_update_time(vmf->vma->vm_file);
filemap_invalidate_lock_shared(inode->i_mapping);
- lock_page(page);
- if (unlikely(page->mapping != inode->i_mapping ||
- page_offset(page) > i_size_read(inode) ||
- !PageUptodate(page))) {
- unlock_page(page);
+ folio_lock(folio);
+ if (unlikely(folio->mapping != inode->i_mapping ||
+ folio_pos(folio) > i_size_read(inode) ||
+ !folio_test_uptodate(folio))) {
+ folio_unlock(folio);
err = -EFAULT;
goto out_sem;
}
@@ -118,9 +117,9 @@ static vm_fault_t f2fs_vm_page_mkwrite(struct vm_fault *vmf)
set_new_dnode(&dn, inode, NULL, NULL, 0);
if (need_alloc) {
/* block allocation */
- err = f2fs_get_block_locked(&dn, page->index);
+ err = f2fs_get_block_locked(&dn, folio->index);
} else {
- err = f2fs_get_dnode_of_data(&dn, page->index, LOOKUP_NODE);
+ err = f2fs_get_dnode_of_data(&dn, folio->index, LOOKUP_NODE);
f2fs_put_dnode(&dn);
if (f2fs_is_pinned_file(inode) &&
!__is_valid_data_blkaddr(dn.data_blkaddr))
@@ -128,11 +127,11 @@ static vm_fault_t f2fs_vm_page_mkwrite(struct vm_fault *vmf)
}
if (err) {
- unlock_page(page);
+ folio_unlock(folio);
goto out_sem;
}
- f2fs_wait_on_page_writeback(page, DATA, false, true);
+ f2fs_wait_on_page_writeback(folio_page(folio, 0), DATA, false, true);
/* wait for GCed page writeback via META_MAPPING */
f2fs_wait_on_block_writeback(inode, dn.data_blkaddr);
@@ -140,18 +139,18 @@ static vm_fault_t f2fs_vm_page_mkwrite(struct vm_fault *vmf)
/*
* check to see if the page is mapped already (no holes)
*/
- if (PageMappedToDisk(page))
+ if (folio_test_mappedtodisk(folio))
goto out_sem;
/* page is wholly or partially inside EOF */
- if (((loff_t)(page->index + 1) << PAGE_SHIFT) >
+ if (((loff_t)(folio->index + 1) << PAGE_SHIFT) >
i_size_read(inode)) {
loff_t offset;
offset = i_size_read(inode) & ~PAGE_MASK;
- zero_user_segment(page, offset, PAGE_SIZE);
+ folio_zero_segment(folio, offset, folio_size(folio));
}
- set_page_dirty(page);
+ folio_mark_dirty(folio);
f2fs_update_iostat(sbi, inode, APP_MAPPED_IO, F2FS_BLKSIZE);
f2fs_update_time(sbi, REQ_TIME);
@@ -163,7 +162,7 @@ out_sem:
out:
ret = vmf_fs_error(err);
- trace_f2fs_vm_page_mkwrite(inode, page->index, vmf->vma->vm_flags, ret);
+ trace_f2fs_vm_page_mkwrite(inode, folio->index, vmf->vma->vm_flags, ret);
return ret;
}
@@ -218,6 +217,9 @@ static inline enum cp_reason_type need_do_checkpoint(struct inode *inode)
f2fs_exist_written_data(sbi, F2FS_I(inode)->i_pino,
TRANS_DIR_INO))
cp_reason = CP_RECOVER_DIR;
+ else if (f2fs_exist_written_data(sbi, F2FS_I(inode)->i_pino,
+ XATTR_DIR_INO))
+ cp_reason = CP_XATTR_DIR;
return cp_reason;
}
@@ -373,8 +375,7 @@ sync_nodes:
f2fs_remove_ino_entry(sbi, ino, APPEND_INO);
clear_inode_flag(inode, FI_APPEND_WRITE);
flush_out:
- if ((!atomic && F2FS_OPTION(sbi).fsync_mode != FSYNC_MODE_NOBARRIER) ||
- (atomic && !test_opt(sbi, NOBARRIER) && f2fs_sb_has_blkzoned(sbi)))
+ if (!atomic && F2FS_OPTION(sbi).fsync_mode != FSYNC_MODE_NOBARRIER)
ret = f2fs_issue_flush(sbi, inode->i_ino);
if (!ret) {
f2fs_remove_ino_entry(sbi, ino, UPDATE_INO);
@@ -431,7 +432,7 @@ static bool __found_offset(struct address_space *mapping,
static loff_t f2fs_seek_block(struct file *file, loff_t offset, int whence)
{
struct inode *inode = file->f_mapping->host;
- loff_t maxbytes = inode->i_sb->s_maxbytes;
+ loff_t maxbytes = F2FS_BLK_TO_BYTES(max_file_blocks(inode));
struct dnode_of_data dn;
pgoff_t pgofs, end_offset;
loff_t data_ofs = offset;
@@ -513,10 +514,7 @@ fail:
static loff_t f2fs_llseek(struct file *file, loff_t offset, int whence)
{
struct inode *inode = file->f_mapping->host;
- loff_t maxbytes = inode->i_sb->s_maxbytes;
-
- if (f2fs_compressed_file(inode))
- maxbytes = max_file_blocks(inode) << F2FS_BLKSIZE_BITS;
+ loff_t maxbytes = F2FS_BLK_TO_BYTES(max_file_blocks(inode));
switch (whence) {
case SEEK_SET:
@@ -1052,6 +1050,13 @@ int f2fs_setattr(struct mnt_idmap *idmap, struct dentry *dentry,
return err;
}
+ /*
+ * wait for inflight dio, blocks should be removed after
+ * IO completion.
+ */
+ if (attr->ia_size < old_size)
+ inode_dio_wait(inode);
+
f2fs_down_write(&fi->i_gc_rwsem[WRITE]);
filemap_invalidate_lock(inode->i_mapping);
@@ -1888,6 +1893,12 @@ static long f2fs_fallocate(struct file *file, int mode,
if (ret)
goto out;
+ /*
+ * wait for inflight dio, blocks should be removed after IO
+ * completion.
+ */
+ inode_dio_wait(inode);
+
if (mode & FALLOC_FL_PUNCH_HOLE) {
if (offset >= inode->i_size)
goto out;
@@ -2116,10 +2127,12 @@ static int f2fs_ioc_start_atomic_write(struct file *filp, bool truncate)
struct mnt_idmap *idmap = file_mnt_idmap(filp);
struct f2fs_inode_info *fi = F2FS_I(inode);
struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
- struct inode *pinode;
loff_t isize;
int ret;
+ if (!(filp->f_mode & FMODE_WRITE))
+ return -EBADF;
+
if (!inode_owner_or_capable(idmap, inode))
return -EACCES;
@@ -2149,6 +2162,7 @@ static int f2fs_ioc_start_atomic_write(struct file *filp, bool truncate)
goto out;
f2fs_down_write(&fi->i_gc_rwsem[WRITE]);
+ f2fs_down_write(&fi->i_gc_rwsem[READ]);
/*
* Should wait end_io to count F2FS_WB_CP_DATA correctly by
@@ -2158,27 +2172,18 @@ static int f2fs_ioc_start_atomic_write(struct file *filp, bool truncate)
f2fs_warn(sbi, "Unexpected flush for atomic writes: ino=%lu, npages=%u",
inode->i_ino, get_dirty_pages(inode));
ret = filemap_write_and_wait_range(inode->i_mapping, 0, LLONG_MAX);
- if (ret) {
- f2fs_up_write(&fi->i_gc_rwsem[WRITE]);
- goto out;
- }
+ if (ret)
+ goto out_unlock;
/* Check if the inode already has a COW inode */
if (fi->cow_inode == NULL) {
/* Create a COW inode for atomic write */
- pinode = f2fs_iget(inode->i_sb, fi->i_pino);
- if (IS_ERR(pinode)) {
- f2fs_up_write(&fi->i_gc_rwsem[WRITE]);
- ret = PTR_ERR(pinode);
- goto out;
- }
+ struct dentry *dentry = file_dentry(filp);
+ struct inode *dir = d_inode(dentry->d_parent);
- ret = f2fs_get_tmpfile(idmap, pinode, &fi->cow_inode);
- iput(pinode);
- if (ret) {
- f2fs_up_write(&fi->i_gc_rwsem[WRITE]);
- goto out;
- }
+ ret = f2fs_get_tmpfile(idmap, dir, &fi->cow_inode);
+ if (ret)
+ goto out_unlock;
set_inode_flag(fi->cow_inode, FI_COW_FILE);
clear_inode_flag(fi->cow_inode, FI_INLINE_DATA);
@@ -2187,11 +2192,13 @@ static int f2fs_ioc_start_atomic_write(struct file *filp, bool truncate)
F2FS_I(fi->cow_inode)->atomic_inode = inode;
} else {
/* Reuse the already created COW inode */
+ f2fs_bug_on(sbi, get_dirty_pages(fi->cow_inode));
+
+ invalidate_mapping_pages(fi->cow_inode->i_mapping, 0, -1);
+
ret = f2fs_do_truncate_blocks(fi->cow_inode, 0, true);
- if (ret) {
- f2fs_up_write(&fi->i_gc_rwsem[WRITE]);
- goto out;
- }
+ if (ret)
+ goto out_unlock;
}
f2fs_write_inode(inode, NULL);
@@ -2210,7 +2217,11 @@ static int f2fs_ioc_start_atomic_write(struct file *filp, bool truncate)
}
f2fs_i_size_write(fi->cow_inode, isize);
+out_unlock:
+ f2fs_up_write(&fi->i_gc_rwsem[READ]);
f2fs_up_write(&fi->i_gc_rwsem[WRITE]);
+ if (ret)
+ goto out;
f2fs_update_time(sbi, REQ_TIME);
fi->atomic_write_task = current;
@@ -2228,6 +2239,9 @@ static int f2fs_ioc_commit_atomic_write(struct file *filp)
struct mnt_idmap *idmap = file_mnt_idmap(filp);
int ret;
+ if (!(filp->f_mode & FMODE_WRITE))
+ return -EBADF;
+
if (!inode_owner_or_capable(idmap, inode))
return -EACCES;
@@ -2260,6 +2274,9 @@ static int f2fs_ioc_abort_atomic_write(struct file *filp)
struct mnt_idmap *idmap = file_mnt_idmap(filp);
int ret;
+ if (!(filp->f_mode & FMODE_WRITE))
+ return -EBADF;
+
if (!inode_owner_or_capable(idmap, inode))
return -EACCES;
@@ -2279,7 +2296,7 @@ static int f2fs_ioc_abort_atomic_write(struct file *filp)
}
int f2fs_do_shutdown(struct f2fs_sb_info *sbi, unsigned int flag,
- bool readonly)
+ bool readonly, bool need_lock)
{
struct super_block *sb = sbi->sb;
int ret = 0;
@@ -2326,12 +2343,19 @@ int f2fs_do_shutdown(struct f2fs_sb_info *sbi, unsigned int flag,
if (readonly)
goto out;
+ /* grab sb->s_umount to avoid racing w/ remount() */
+ if (need_lock)
+ down_read(&sbi->sb->s_umount);
+
f2fs_stop_gc_thread(sbi);
f2fs_stop_discard_thread(sbi);
f2fs_drop_discard_cmd(sbi);
clear_opt(sbi, DISCARD);
+ if (need_lock)
+ up_read(&sbi->sb->s_umount);
+
f2fs_update_time(sbi, REQ_TIME);
out:
@@ -2368,7 +2392,7 @@ static int f2fs_ioc_shutdown(struct file *filp, unsigned long arg)
}
}
- ret = f2fs_do_shutdown(sbi, in, readonly);
+ ret = f2fs_do_shutdown(sbi, in, readonly, true);
if (need_drop)
mnt_drop_write_file(filp);
@@ -2686,7 +2710,8 @@ static int f2fs_defragment_range(struct f2fs_sb_info *sbi,
(range->start + range->len) >> PAGE_SHIFT,
DIV_ROUND_UP(i_size_read(inode), PAGE_SIZE));
- if (is_inode_flag_set(inode, FI_COMPRESS_RELEASED)) {
+ if (is_inode_flag_set(inode, FI_COMPRESS_RELEASED) ||
+ f2fs_is_atomic_file(inode)) {
err = -EINVAL;
goto unlock_out;
}
@@ -2710,7 +2735,7 @@ static int f2fs_defragment_range(struct f2fs_sb_info *sbi,
* block addresses are continuous.
*/
if (f2fs_lookup_read_extent_cache(inode, pg_start, &ei)) {
- if (ei.fofs + ei.len >= pg_end)
+ if ((pgoff_t)ei.fofs + ei.len >= pg_end)
goto out;
}
@@ -2793,6 +2818,8 @@ do_map:
goto clear_out;
}
+ f2fs_wait_on_page_writeback(page, DATA, true, true);
+
set_page_dirty(page);
set_page_private_gcing(page);
f2fs_put_page(page, 1);
@@ -2917,6 +2944,11 @@ static int f2fs_move_file_range(struct file *file_in, loff_t pos_in,
goto out_unlock;
}
+ if (f2fs_is_atomic_file(src) || f2fs_is_atomic_file(dst)) {
+ ret = -EINVAL;
+ goto out_unlock;
+ }
+
ret = -EINVAL;
if (pos_in + len > src->i_size || pos_in + len < pos_in)
goto out_unlock;
@@ -2968,9 +3000,9 @@ static int f2fs_move_file_range(struct file *file_in, loff_t pos_in,
}
f2fs_lock_op(sbi);
- ret = __exchange_data_block(src, dst, pos_in >> F2FS_BLKSIZE_BITS,
- pos_out >> F2FS_BLKSIZE_BITS,
- len >> F2FS_BLKSIZE_BITS, false);
+ ret = __exchange_data_block(src, dst, F2FS_BYTES_TO_BLK(pos_in),
+ F2FS_BYTES_TO_BLK(pos_out),
+ F2FS_BYTES_TO_BLK(len), false);
if (!ret) {
if (dst_max_i_size)
@@ -3014,10 +3046,10 @@ static int __f2fs_ioc_move_range(struct file *filp,
return -EBADF;
dst = fdget(range->dst_fd);
- if (!dst.file)
+ if (!fd_file(dst))
return -EBADF;
- if (!(dst.file->f_mode & FMODE_WRITE)) {
+ if (!(fd_file(dst)->f_mode & FMODE_WRITE)) {
err = -EBADF;
goto err_out;
}
@@ -3026,7 +3058,7 @@ static int __f2fs_ioc_move_range(struct file *filp,
if (err)
goto err_out;
- err = f2fs_move_file_range(filp, range->pos_in, dst.file,
+ err = f2fs_move_file_range(filp, range->pos_in, fd_file(dst),
range->pos_out, range->len);
mnt_drop_write_file(filp);
@@ -3300,6 +3332,11 @@ static int f2fs_ioc_set_pin_file(struct file *filp, unsigned long arg)
inode_lock(inode);
+ if (f2fs_is_atomic_file(inode)) {
+ ret = -EINVAL;
+ goto out;
+ }
+
if (!pin) {
clear_inode_flag(inode, FI_PIN_FILE);
f2fs_i_gc_failures_write(inode, 0);
@@ -4193,6 +4230,8 @@ static int redirty_blocks(struct inode *inode, pgoff_t page_idx, int len)
/* It will never fail, when page has pinned above */
f2fs_bug_on(F2FS_I_SB(inode), !page);
+ f2fs_wait_on_page_writeback(page, DATA, true, true);
+
set_page_dirty(page);
set_page_private_gcing(page);
f2fs_put_page(page, 1);
@@ -4207,9 +4246,8 @@ static int f2fs_ioc_decompress_file(struct file *filp)
struct inode *inode = file_inode(filp);
struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
struct f2fs_inode_info *fi = F2FS_I(inode);
- pgoff_t page_idx = 0, last_idx;
- int cluster_size = fi->i_cluster_size;
- int count, ret;
+ pgoff_t page_idx = 0, last_idx, cluster_idx;
+ int ret;
if (!f2fs_sb_has_compression(sbi) ||
F2FS_OPTION(sbi).compress_mode != COMPR_MODE_USER)
@@ -4244,10 +4282,15 @@ static int f2fs_ioc_decompress_file(struct file *filp)
goto out;
last_idx = DIV_ROUND_UP(i_size_read(inode), PAGE_SIZE);
+ last_idx >>= fi->i_log_cluster_size;
- count = last_idx - page_idx;
- while (count && count >= cluster_size) {
- ret = redirty_blocks(inode, page_idx, cluster_size);
+ for (cluster_idx = 0; cluster_idx < last_idx; cluster_idx++) {
+ page_idx = cluster_idx << fi->i_log_cluster_size;
+
+ if (!f2fs_is_compressed_cluster(inode, page_idx))
+ continue;
+
+ ret = redirty_blocks(inode, page_idx, fi->i_cluster_size);
if (ret < 0)
break;
@@ -4257,9 +4300,6 @@ static int f2fs_ioc_decompress_file(struct file *filp)
break;
}
- count -= cluster_size;
- page_idx += cluster_size;
-
cond_resched();
if (fatal_signal_pending(current)) {
ret = -EINTR;
@@ -4286,9 +4326,9 @@ static int f2fs_ioc_compress_file(struct file *filp)
{
struct inode *inode = file_inode(filp);
struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
- pgoff_t page_idx = 0, last_idx;
- int cluster_size = F2FS_I(inode)->i_cluster_size;
- int count, ret;
+ struct f2fs_inode_info *fi = F2FS_I(inode);
+ pgoff_t page_idx = 0, last_idx, cluster_idx;
+ int ret;
if (!f2fs_sb_has_compression(sbi) ||
F2FS_OPTION(sbi).compress_mode != COMPR_MODE_USER)
@@ -4322,10 +4362,15 @@ static int f2fs_ioc_compress_file(struct file *filp)
set_inode_flag(inode, FI_ENABLE_COMPRESS);
last_idx = DIV_ROUND_UP(i_size_read(inode), PAGE_SIZE);
+ last_idx >>= fi->i_log_cluster_size;
+
+ for (cluster_idx = 0; cluster_idx < last_idx; cluster_idx++) {
+ page_idx = cluster_idx << fi->i_log_cluster_size;
+
+ if (f2fs_is_sparse_cluster(inode, page_idx))
+ continue;
- count = last_idx - page_idx;
- while (count && count >= cluster_size) {
- ret = redirty_blocks(inode, page_idx, cluster_size);
+ ret = redirty_blocks(inode, page_idx, fi->i_cluster_size);
if (ret < 0)
break;
@@ -4335,9 +4380,6 @@ static int f2fs_ioc_compress_file(struct file *filp)
break;
}
- count -= cluster_size;
- page_idx += cluster_size;
-
cond_resched();
if (fatal_signal_pending(current)) {
ret = -EINTR;
@@ -4538,6 +4580,13 @@ static ssize_t f2fs_dio_read_iter(struct kiocb *iocb, struct iov_iter *to)
f2fs_down_read(&fi->i_gc_rwsem[READ]);
}
+ /* dio is not compatible w/ atomic file */
+ if (f2fs_is_atomic_file(inode)) {
+ f2fs_up_read(&fi->i_gc_rwsem[READ]);
+ ret = -EOPNOTSUPP;
+ goto out;
+ }
+
/*
* We have to use __iomap_dio_rw() and iomap_dio_complete() instead of
* the higher-level function iomap_dio_rw() in order to ensure that the
@@ -4597,6 +4646,10 @@ static ssize_t f2fs_file_read_iter(struct kiocb *iocb, struct iov_iter *to)
f2fs_trace_rw_file_path(iocb->ki_filp, iocb->ki_pos,
iov_iter_count(to), READ);
+ /* In LFS mode, if there is inflight dio, wait for its completion */
+ if (f2fs_lfs_mode(F2FS_I_SB(inode)))
+ inode_dio_wait(inode);
+
if (f2fs_should_use_dio(inode, iocb, to)) {
ret = f2fs_dio_read_iter(iocb, to);
} else {
@@ -4949,6 +5002,12 @@ static ssize_t f2fs_file_write_iter(struct kiocb *iocb, struct iov_iter *from)
/* Determine whether we will do a direct write or a buffered write. */
dio = f2fs_should_use_dio(inode, iocb, from);
+ /* dio is not compatible w/ atomic write */
+ if (dio && f2fs_is_atomic_file(inode)) {
+ ret = -EOPNOTSUPP;
+ goto out_unlock;
+ }
+
/* Possibly preallocate the blocks for the write. */
target_size = iocb->ki_pos + iov_iter_count(from);
preallocated = f2fs_preallocate_blocks(iocb, from, dio);
diff --git a/fs/f2fs/gc.c b/fs/f2fs/gc.c
index 724bbcb447d3..9322a7200e31 100644
--- a/fs/f2fs/gc.c
+++ b/fs/f2fs/gc.c
@@ -81,6 +81,8 @@ static int gc_thread_func(void *data)
continue;
}
+ gc_control.one_time = false;
+
/*
* [GC triggering condition]
* 0. GC is not conducted currently.
@@ -116,15 +118,30 @@ static int gc_thread_func(void *data)
goto next;
}
- if (has_enough_invalid_blocks(sbi))
+ if (f2fs_sb_has_blkzoned(sbi)) {
+ if (has_enough_free_blocks(sbi,
+ gc_th->no_zoned_gc_percent)) {
+ wait_ms = gc_th->no_gc_sleep_time;
+ f2fs_up_write(&sbi->gc_lock);
+ goto next;
+ }
+ if (wait_ms == gc_th->no_gc_sleep_time)
+ wait_ms = gc_th->max_sleep_time;
+ }
+
+ if (need_to_boost_gc(sbi)) {
decrease_sleep_time(gc_th, &wait_ms);
- else
+ if (f2fs_sb_has_blkzoned(sbi))
+ gc_control.one_time = true;
+ } else {
increase_sleep_time(gc_th, &wait_ms);
+ }
do_gc:
stat_inc_gc_call_count(sbi, foreground ?
FOREGROUND : BACKGROUND);
- sync_mode = F2FS_OPTION(sbi).bggc_mode == BGGC_MODE_SYNC;
+ sync_mode = (F2FS_OPTION(sbi).bggc_mode == BGGC_MODE_SYNC) ||
+ gc_control.one_time;
/* foreground GC was been triggered via f2fs_balance_fs() */
if (foreground)
@@ -179,9 +196,21 @@ int f2fs_start_gc_thread(struct f2fs_sb_info *sbi)
return -ENOMEM;
gc_th->urgent_sleep_time = DEF_GC_THREAD_URGENT_SLEEP_TIME;
- gc_th->min_sleep_time = DEF_GC_THREAD_MIN_SLEEP_TIME;
- gc_th->max_sleep_time = DEF_GC_THREAD_MAX_SLEEP_TIME;
- gc_th->no_gc_sleep_time = DEF_GC_THREAD_NOGC_SLEEP_TIME;
+ gc_th->valid_thresh_ratio = DEF_GC_THREAD_VALID_THRESH_RATIO;
+
+ if (f2fs_sb_has_blkzoned(sbi)) {
+ gc_th->min_sleep_time = DEF_GC_THREAD_MIN_SLEEP_TIME_ZONED;
+ gc_th->max_sleep_time = DEF_GC_THREAD_MAX_SLEEP_TIME_ZONED;
+ gc_th->no_gc_sleep_time = DEF_GC_THREAD_NOGC_SLEEP_TIME_ZONED;
+ gc_th->no_zoned_gc_percent = LIMIT_NO_ZONED_GC;
+ gc_th->boost_zoned_gc_percent = LIMIT_BOOST_ZONED_GC;
+ } else {
+ gc_th->min_sleep_time = DEF_GC_THREAD_MIN_SLEEP_TIME;
+ gc_th->max_sleep_time = DEF_GC_THREAD_MAX_SLEEP_TIME;
+ gc_th->no_gc_sleep_time = DEF_GC_THREAD_NOGC_SLEEP_TIME;
+ gc_th->no_zoned_gc_percent = 0;
+ gc_th->boost_zoned_gc_percent = 0;
+ }
gc_th->gc_wake = false;
@@ -339,7 +368,7 @@ static unsigned int get_cb_cost(struct f2fs_sb_info *sbi, unsigned int segno)
unsigned char age = 0;
unsigned char u;
unsigned int i;
- unsigned int usable_segs_per_sec = f2fs_usable_segs_in_sec(sbi, segno);
+ unsigned int usable_segs_per_sec = f2fs_usable_segs_in_sec(sbi);
for (i = 0; i < usable_segs_per_sec; i++)
mtime += get_seg_entry(sbi, start + i)->mtime;
@@ -368,6 +397,11 @@ static inline unsigned int get_gc_cost(struct f2fs_sb_info *sbi,
if (p->alloc_mode == SSR)
return get_seg_entry(sbi, segno)->ckpt_valid_blocks;
+ if (p->one_time_gc && (get_valid_blocks(sbi, segno, true) >=
+ CAP_BLKS_PER_SEC(sbi) * sbi->gc_thread->valid_thresh_ratio /
+ 100))
+ return UINT_MAX;
+
/* alloc_mode == LFS */
if (p->gc_mode == GC_GREEDY)
return get_valid_blocks(sbi, segno, true);
@@ -742,7 +776,7 @@ static int f2fs_gc_pinned_control(struct inode *inode, int gc_type,
*/
int f2fs_get_victim(struct f2fs_sb_info *sbi, unsigned int *result,
int gc_type, int type, char alloc_mode,
- unsigned long long age)
+ unsigned long long age, bool one_time)
{
struct dirty_seglist_info *dirty_i = DIRTY_I(sbi);
struct sit_info *sm = SIT_I(sbi);
@@ -759,6 +793,7 @@ int f2fs_get_victim(struct f2fs_sb_info *sbi, unsigned int *result,
p.alloc_mode = alloc_mode;
p.age = age;
p.age_threshold = sbi->am.age_threshold;
+ p.one_time_gc = one_time;
retry:
select_policy(sbi, gc_type, type, &p);
@@ -1670,13 +1705,14 @@ next_step:
}
static int __get_victim(struct f2fs_sb_info *sbi, unsigned int *victim,
- int gc_type)
+ int gc_type, bool one_time)
{
struct sit_info *sit_i = SIT_I(sbi);
int ret;
down_write(&sit_i->sentry_lock);
- ret = f2fs_get_victim(sbi, victim, gc_type, NO_CHECK_TYPE, LFS, 0);
+ ret = f2fs_get_victim(sbi, victim, gc_type, NO_CHECK_TYPE,
+ LFS, 0, one_time);
up_write(&sit_i->sentry_lock);
return ret;
}
@@ -1684,30 +1720,49 @@ static int __get_victim(struct f2fs_sb_info *sbi, unsigned int *victim,
static int do_garbage_collect(struct f2fs_sb_info *sbi,
unsigned int start_segno,
struct gc_inode_list *gc_list, int gc_type,
- bool force_migrate)
+ bool force_migrate, bool one_time)
{
struct page *sum_page;
struct f2fs_summary_block *sum;
struct blk_plug plug;
unsigned int segno = start_segno;
unsigned int end_segno = start_segno + SEGS_PER_SEC(sbi);
+ unsigned int sec_end_segno;
int seg_freed = 0, migrated = 0;
unsigned char type = IS_DATASEG(get_seg_entry(sbi, segno)->type) ?
SUM_TYPE_DATA : SUM_TYPE_NODE;
unsigned char data_type = (type == SUM_TYPE_DATA) ? DATA : NODE;
int submitted = 0;
- if (__is_large_section(sbi))
- end_segno = rounddown(end_segno, SEGS_PER_SEC(sbi));
+ if (__is_large_section(sbi)) {
+ sec_end_segno = rounddown(end_segno, SEGS_PER_SEC(sbi));
- /*
- * zone-capacity can be less than zone-size in zoned devices,
- * resulting in less than expected usable segments in the zone,
- * calculate the end segno in the zone which can be garbage collected
- */
- if (f2fs_sb_has_blkzoned(sbi))
- end_segno -= SEGS_PER_SEC(sbi) -
- f2fs_usable_segs_in_sec(sbi, segno);
+ /*
+ * zone-capacity can be less than zone-size in zoned devices,
+ * resulting in less than expected usable segments in the zone,
+ * calculate the end segno in the zone which can be garbage
+ * collected
+ */
+ if (f2fs_sb_has_blkzoned(sbi))
+ sec_end_segno -= SEGS_PER_SEC(sbi) -
+ f2fs_usable_segs_in_sec(sbi);
+
+ if (gc_type == BG_GC || one_time) {
+ unsigned int window_granularity =
+ sbi->migration_window_granularity;
+
+ if (f2fs_sb_has_blkzoned(sbi) &&
+ !has_enough_free_blocks(sbi,
+ sbi->gc_thread->boost_zoned_gc_percent))
+ window_granularity *=
+ BOOST_GC_MULTIPLE;
+
+ end_segno = start_segno + window_granularity;
+ }
+
+ if (end_segno > sec_end_segno)
+ end_segno = sec_end_segno;
+ }
sanity_check_seg_type(sbi, get_seg_entry(sbi, segno)->type);
@@ -1786,7 +1841,8 @@ freed:
if (__is_large_section(sbi))
sbi->next_victim_seg[gc_type] =
- (segno + 1 < end_segno) ? segno + 1 : NULL_SEGNO;
+ (segno + 1 < sec_end_segno) ?
+ segno + 1 : NULL_SEGNO;
skip:
f2fs_put_page(sum_page, 0);
}
@@ -1863,7 +1919,7 @@ gc_more:
goto stop;
}
retry:
- ret = __get_victim(sbi, &segno, gc_type);
+ ret = __get_victim(sbi, &segno, gc_type, gc_control->one_time);
if (ret) {
/* allow to search victim from sections has pinned data */
if (ret == -ENODATA && gc_type == FG_GC &&
@@ -1875,17 +1931,21 @@ retry:
}
seg_freed = do_garbage_collect(sbi, segno, &gc_list, gc_type,
- gc_control->should_migrate_blocks);
+ gc_control->should_migrate_blocks,
+ gc_control->one_time);
if (seg_freed < 0)
goto stop;
total_freed += seg_freed;
- if (seg_freed == f2fs_usable_segs_in_sec(sbi, segno)) {
+ if (seg_freed == f2fs_usable_segs_in_sec(sbi)) {
sec_freed++;
total_sec_freed++;
}
+ if (gc_control->one_time)
+ goto stop;
+
if (gc_type == FG_GC) {
sbi->cur_victim_sec = NULL_SEGNO;
@@ -2010,8 +2070,7 @@ int f2fs_gc_range(struct f2fs_sb_info *sbi,
.iroot = RADIX_TREE_INIT(gc_list.iroot, GFP_NOFS),
};
- do_garbage_collect(sbi, segno, &gc_list, FG_GC,
- dry_run_sections == 0);
+ do_garbage_collect(sbi, segno, &gc_list, FG_GC, true, false);
put_gc_inode(&gc_list);
if (!dry_run && get_valid_blocks(sbi, segno, true))
diff --git a/fs/f2fs/gc.h b/fs/f2fs/gc.h
index a8ea3301b815..2914b678bf8f 100644
--- a/fs/f2fs/gc.h
+++ b/fs/f2fs/gc.h
@@ -15,16 +15,27 @@
#define DEF_GC_THREAD_MAX_SLEEP_TIME 60000
#define DEF_GC_THREAD_NOGC_SLEEP_TIME 300000 /* wait 5 min */
+/* GC sleep parameters for zoned deivces */
+#define DEF_GC_THREAD_MIN_SLEEP_TIME_ZONED 10
+#define DEF_GC_THREAD_MAX_SLEEP_TIME_ZONED 20
+#define DEF_GC_THREAD_NOGC_SLEEP_TIME_ZONED 60000
+
/* choose candidates from sections which has age of more than 7 days */
#define DEF_GC_THREAD_AGE_THRESHOLD (60 * 60 * 24 * 7)
#define DEF_GC_THREAD_CANDIDATE_RATIO 20 /* select 20% oldest sections as candidates */
#define DEF_GC_THREAD_MAX_CANDIDATE_COUNT 10 /* select at most 10 sections as candidates */
#define DEF_GC_THREAD_AGE_WEIGHT 60 /* age weight */
+#define DEF_GC_THREAD_VALID_THRESH_RATIO 95 /* do not GC over 95% valid block ratio for one time GC */
#define DEFAULT_ACCURACY_CLASS 10000 /* accuracy class */
#define LIMIT_INVALID_BLOCK 40 /* percentage over total user space */
#define LIMIT_FREE_BLOCK 40 /* percentage over invalid + free space */
+#define LIMIT_NO_ZONED_GC 60 /* percentage over total user space of no gc for zoned devices */
+#define LIMIT_BOOST_ZONED_GC 25 /* percentage over total user space of boosted gc for zoned devices */
+#define DEF_MIGRATION_WINDOW_GRANULARITY_ZONED 3
+#define BOOST_GC_MULTIPLE 5
+
#define DEF_GC_FAILED_PINNED_FILES 2048
#define MAX_GC_FAILED_PINNED_FILES USHRT_MAX
@@ -51,6 +62,11 @@ struct f2fs_gc_kthread {
* caller of f2fs_balance_fs()
* will wait on this wait queue.
*/
+
+ /* for gc control for zoned devices */
+ unsigned int no_zoned_gc_percent;
+ unsigned int boost_zoned_gc_percent;
+ unsigned int valid_thresh_ratio;
};
struct gc_inode_list {
@@ -152,6 +168,12 @@ static inline void decrease_sleep_time(struct f2fs_gc_kthread *gc_th,
*wait -= min_time;
}
+static inline bool has_enough_free_blocks(struct f2fs_sb_info *sbi,
+ unsigned int limit_perc)
+{
+ return free_sections(sbi) > ((sbi->total_sections * limit_perc) / 100);
+}
+
static inline bool has_enough_invalid_blocks(struct f2fs_sb_info *sbi)
{
block_t user_block_count = sbi->user_block_count;
@@ -167,3 +189,10 @@ static inline bool has_enough_invalid_blocks(struct f2fs_sb_info *sbi)
free_user_blocks(sbi) <
limit_free_user_blocks(invalid_user_blocks));
}
+
+static inline bool need_to_boost_gc(struct f2fs_sb_info *sbi)
+{
+ if (f2fs_sb_has_blkzoned(sbi))
+ return !has_enough_free_blocks(sbi, LIMIT_BOOST_ZONED_GC);
+ return has_enough_invalid_blocks(sbi);
+}
diff --git a/fs/f2fs/inline.c b/fs/f2fs/inline.c
index cca7d448e55c..005babf1bed1 100644
--- a/fs/f2fs/inline.c
+++ b/fs/f2fs/inline.c
@@ -260,35 +260,34 @@ out:
return err;
}
-int f2fs_write_inline_data(struct inode *inode, struct page *page)
+int f2fs_write_inline_data(struct inode *inode, struct folio *folio)
{
- struct dnode_of_data dn;
- int err;
+ struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
+ struct page *ipage;
- set_new_dnode(&dn, inode, NULL, NULL, 0);
- err = f2fs_get_dnode_of_data(&dn, 0, LOOKUP_NODE);
- if (err)
- return err;
+ ipage = f2fs_get_node_page(sbi, inode->i_ino);
+ if (IS_ERR(ipage))
+ return PTR_ERR(ipage);
if (!f2fs_has_inline_data(inode)) {
- f2fs_put_dnode(&dn);
+ f2fs_put_page(ipage, 1);
return -EAGAIN;
}
- f2fs_bug_on(F2FS_I_SB(inode), page->index);
+ f2fs_bug_on(F2FS_I_SB(inode), folio->index);
- f2fs_wait_on_page_writeback(dn.inode_page, NODE, true, true);
- memcpy_from_page(inline_data_addr(inode, dn.inode_page),
- page, 0, MAX_INLINE_DATA(inode));
- set_page_dirty(dn.inode_page);
+ f2fs_wait_on_page_writeback(ipage, NODE, true, true);
+ memcpy_from_folio(inline_data_addr(inode, ipage),
+ folio, 0, MAX_INLINE_DATA(inode));
+ set_page_dirty(ipage);
- f2fs_clear_page_cache_dirty_tag(page);
+ f2fs_clear_page_cache_dirty_tag(folio);
set_inode_flag(inode, FI_APPEND_WRITE);
set_inode_flag(inode, FI_DATA_EXIST);
- clear_page_private_inline(dn.inode_page);
- f2fs_put_dnode(&dn);
+ clear_page_private_inline(ipage);
+ f2fs_put_page(ipage, 1);
return 0;
}
diff --git a/fs/f2fs/inode.c b/fs/f2fs/inode.c
index aef57172014f..1ed86df343a5 100644
--- a/fs/f2fs/inode.c
+++ b/fs/f2fs/inode.c
@@ -7,7 +7,6 @@
*/
#include <linux/fs.h>
#include <linux/f2fs_fs.h>
-#include <linux/buffer_head.h>
#include <linux/writeback.h>
#include <linux/sched/mm.h>
#include <linux/lz4.h>
@@ -35,6 +34,11 @@ void f2fs_mark_inode_dirty_sync(struct inode *inode, bool sync)
if (f2fs_inode_dirtied(inode, sync))
return;
+ if (f2fs_is_atomic_file(inode)) {
+ set_inode_flag(inode, FI_ATOMIC_DIRTIED);
+ return;
+ }
+
mark_inode_dirty_sync(inode);
}
@@ -175,7 +179,8 @@ bool f2fs_inode_chksum_verify(struct f2fs_sb_info *sbi, struct page *page)
if (provided != calculated)
f2fs_warn(sbi, "checksum invalid, nid = %lu, ino_of_node = %x, %x vs. %x",
- page->index, ino_of_node(page), provided, calculated);
+ page_folio(page)->index, ino_of_node(page),
+ provided, calculated);
return provided == calculated;
}
diff --git a/fs/f2fs/namei.c b/fs/f2fs/namei.c
index 38b4750475db..57d46e1439de 100644
--- a/fs/f2fs/namei.c
+++ b/fs/f2fs/namei.c
@@ -457,62 +457,6 @@ struct dentry *f2fs_get_parent(struct dentry *child)
return d_obtain_alias(f2fs_iget(child->d_sb, ino));
}
-static int __recover_dot_dentries(struct inode *dir, nid_t pino)
-{
- struct f2fs_sb_info *sbi = F2FS_I_SB(dir);
- struct qstr dot = QSTR_INIT(".", 1);
- struct f2fs_dir_entry *de;
- struct page *page;
- int err = 0;
-
- if (f2fs_readonly(sbi->sb)) {
- f2fs_info(sbi, "skip recovering inline_dots inode (ino:%lu, pino:%u) in readonly mountpoint",
- dir->i_ino, pino);
- return 0;
- }
-
- if (!S_ISDIR(dir->i_mode)) {
- f2fs_err(sbi, "inconsistent inode status, skip recovering inline_dots inode (ino:%lu, i_mode:%u, pino:%u)",
- dir->i_ino, dir->i_mode, pino);
- set_sbi_flag(sbi, SBI_NEED_FSCK);
- return -ENOTDIR;
- }
-
- err = f2fs_dquot_initialize(dir);
- if (err)
- return err;
-
- f2fs_balance_fs(sbi, true);
-
- f2fs_lock_op(sbi);
-
- de = f2fs_find_entry(dir, &dot, &page);
- if (de) {
- f2fs_put_page(page, 0);
- } else if (IS_ERR(page)) {
- err = PTR_ERR(page);
- goto out;
- } else {
- err = f2fs_do_add_link(dir, &dot, NULL, dir->i_ino, S_IFDIR);
- if (err)
- goto out;
- }
-
- de = f2fs_find_entry(dir, &dotdot_name, &page);
- if (de)
- f2fs_put_page(page, 0);
- else if (IS_ERR(page))
- err = PTR_ERR(page);
- else
- err = f2fs_do_add_link(dir, &dotdot_name, NULL, pino, S_IFDIR);
-out:
- if (!err)
- clear_inode_flag(dir, FI_INLINE_DOTS);
-
- f2fs_unlock_op(sbi);
- return err;
-}
-
static struct dentry *f2fs_lookup(struct inode *dir, struct dentry *dentry,
unsigned int flags)
{
@@ -522,7 +466,6 @@ static struct dentry *f2fs_lookup(struct inode *dir, struct dentry *dentry,
struct dentry *new;
nid_t ino = -1;
int err = 0;
- unsigned int root_ino = F2FS_ROOT_INO(F2FS_I_SB(dir));
struct f2fs_filename fname;
trace_f2fs_lookup_start(dir, dentry, flags);
@@ -558,17 +501,6 @@ static struct dentry *f2fs_lookup(struct inode *dir, struct dentry *dentry,
goto out;
}
- if ((dir->i_ino == root_ino) && f2fs_has_inline_dots(dir)) {
- err = __recover_dot_dentries(dir, root_ino);
- if (err)
- goto out_iput;
- }
-
- if (f2fs_has_inline_dots(inode)) {
- err = __recover_dot_dentries(inode, dir->i_ino);
- if (err)
- goto out_iput;
- }
if (IS_ENCRYPTED(dir) &&
(S_ISDIR(inode->i_mode) || S_ISLNK(inode->i_mode)) &&
!fscrypt_has_permitted_context(dir, inode)) {
diff --git a/fs/f2fs/node.c b/fs/f2fs/node.c
index b72ef96f7e33..59b13ff243fa 100644
--- a/fs/f2fs/node.c
+++ b/fs/f2fs/node.c
@@ -20,7 +20,7 @@
#include "iostat.h"
#include <trace/events/f2fs.h>
-#define on_f2fs_build_free_nids(nmi) mutex_is_locked(&(nm_i)->build_lock)
+#define on_f2fs_build_free_nids(nm_i) mutex_is_locked(&(nm_i)->build_lock)
static struct kmem_cache *nat_entry_slab;
static struct kmem_cache *free_nid_slab;
@@ -123,7 +123,7 @@ bool f2fs_available_free_memory(struct f2fs_sb_info *sbi, int type)
static void clear_node_page_dirty(struct page *page)
{
if (PageDirty(page)) {
- f2fs_clear_page_cache_dirty_tag(page);
+ f2fs_clear_page_cache_dirty_tag(page_folio(page));
clear_page_dirty_for_io(page);
dec_page_count(F2FS_P_SB(page), F2FS_DIRTY_NODES);
}
@@ -919,7 +919,7 @@ static int truncate_node(struct dnode_of_data *dn)
clear_node_page_dirty(dn->node_page);
set_sbi_flag(sbi, SBI_IS_DIRTY);
- index = dn->node_page->index;
+ index = page_folio(dn->node_page)->index;
f2fs_put_page(dn->node_page, 1);
invalidate_mapping_pages(NODE_MAPPING(sbi),
@@ -1369,6 +1369,7 @@ fail:
*/
static int read_node_page(struct page *page, blk_opf_t op_flags)
{
+ struct folio *folio = page_folio(page);
struct f2fs_sb_info *sbi = F2FS_P_SB(page);
struct node_info ni;
struct f2fs_io_info fio = {
@@ -1381,21 +1382,21 @@ static int read_node_page(struct page *page, blk_opf_t op_flags)
};
int err;
- if (PageUptodate(page)) {
+ if (folio_test_uptodate(folio)) {
if (!f2fs_inode_chksum_verify(sbi, page)) {
- ClearPageUptodate(page);
+ folio_clear_uptodate(folio);
return -EFSBADCRC;
}
return LOCKED_PAGE;
}
- err = f2fs_get_node_info(sbi, page->index, &ni, false);
+ err = f2fs_get_node_info(sbi, folio->index, &ni, false);
if (err)
return err;
/* NEW_ADDR can be seen, after cp_error drops some dirty node pages */
if (unlikely(ni.blk_addr == NULL_ADDR || ni.blk_addr == NEW_ADDR)) {
- ClearPageUptodate(page);
+ folio_clear_uptodate(folio);
return -ENOENT;
}
@@ -1492,7 +1493,7 @@ out_err:
out_put_err:
/* ENOENT comes from read_node_page which is not an error. */
if (err != -ENOENT)
- f2fs_handle_page_eio(sbi, page->index, NODE);
+ f2fs_handle_page_eio(sbi, page_folio(page), NODE);
f2fs_put_page(page, 1);
return ERR_PTR(err);
}
@@ -1535,7 +1536,7 @@ static void flush_inline_data(struct f2fs_sb_info *sbi, nid_t ino)
if (!clear_page_dirty_for_io(page))
goto page_out;
- ret = f2fs_write_inline_data(inode, page);
+ ret = f2fs_write_inline_data(inode, page_folio(page));
inode_dec_dirty_pages(inode);
f2fs_remove_dirty_inode(inode);
if (ret)
@@ -1608,6 +1609,7 @@ static int __write_node_page(struct page *page, bool atomic, bool *submitted,
enum iostat_type io_type, unsigned int *seq_id)
{
struct f2fs_sb_info *sbi = F2FS_P_SB(page);
+ struct folio *folio = page_folio(page);
nid_t nid;
struct node_info ni;
struct f2fs_io_info fio = {
@@ -1624,15 +1626,15 @@ static int __write_node_page(struct page *page, bool atomic, bool *submitted,
};
unsigned int seq;
- trace_f2fs_writepage(page_folio(page), NODE);
+ trace_f2fs_writepage(folio, NODE);
if (unlikely(f2fs_cp_error(sbi))) {
/* keep node pages in remount-ro mode */
if (F2FS_OPTION(sbi).errors == MOUNT_ERRORS_READONLY)
goto redirty_out;
- ClearPageUptodate(page);
+ folio_clear_uptodate(folio);
dec_page_count(sbi, F2FS_DIRTY_NODES);
- unlock_page(page);
+ folio_unlock(folio);
return 0;
}
@@ -1646,7 +1648,7 @@ static int __write_node_page(struct page *page, bool atomic, bool *submitted,
/* get old block addr of this node page */
nid = nid_of_node(page);
- f2fs_bug_on(sbi, page->index != nid);
+ f2fs_bug_on(sbi, folio->index != nid);
if (f2fs_get_node_info(sbi, nid, &ni, !do_balance))
goto redirty_out;
@@ -1660,10 +1662,10 @@ static int __write_node_page(struct page *page, bool atomic, bool *submitted,
/* This page is already truncated */
if (unlikely(ni.blk_addr == NULL_ADDR)) {
- ClearPageUptodate(page);
+ folio_clear_uptodate(folio);
dec_page_count(sbi, F2FS_DIRTY_NODES);
f2fs_up_read(&sbi->node_write);
- unlock_page(page);
+ folio_unlock(folio);
return 0;
}
@@ -1674,7 +1676,7 @@ static int __write_node_page(struct page *page, bool atomic, bool *submitted,
goto redirty_out;
}
- if (atomic && !test_opt(sbi, NOBARRIER) && !f2fs_sb_has_blkzoned(sbi))
+ if (atomic && !test_opt(sbi, NOBARRIER))
fio.op_flags |= REQ_PREFLUSH | REQ_FUA;
/* should add to global list before clearing PAGECACHE status */
@@ -1684,7 +1686,7 @@ static int __write_node_page(struct page *page, bool atomic, bool *submitted,
*seq_id = seq;
}
- set_page_writeback(page);
+ folio_start_writeback(folio);
fio.old_blkaddr = ni.blk_addr;
f2fs_do_write_node_page(nid, &fio);
@@ -1697,7 +1699,7 @@ static int __write_node_page(struct page *page, bool atomic, bool *submitted,
submitted = NULL;
}
- unlock_page(page);
+ folio_unlock(folio);
if (unlikely(f2fs_cp_error(sbi))) {
f2fs_submit_merged_write(sbi, NODE);
@@ -1711,7 +1713,7 @@ static int __write_node_page(struct page *page, bool atomic, bool *submitted,
return 0;
redirty_out:
- redirty_page_for_writepage(wbc, page);
+ folio_redirty_for_writepage(wbc, folio);
return AOP_WRITEPAGE_ACTIVATE;
}
@@ -1867,7 +1869,7 @@ continue_unlock:
}
if (!ret && atomic && !marked) {
f2fs_debug(sbi, "Retry to write fsync mark: ino=%u, idx=%lx",
- ino, last_page->index);
+ ino, page_folio(last_page)->index);
lock_page(last_page);
f2fs_wait_on_page_writeback(last_page, NODE, true, true);
set_page_dirty(last_page);
@@ -3166,7 +3168,7 @@ static int __get_nat_bitmaps(struct f2fs_sb_info *sbi)
nm_i->nat_bits_blocks = F2FS_BLK_ALIGN((nat_bits_bytes << 1) + 8);
nm_i->nat_bits = f2fs_kvzalloc(sbi,
- nm_i->nat_bits_blocks << F2FS_BLKSIZE_BITS, GFP_KERNEL);
+ F2FS_BLK_TO_BYTES(nm_i->nat_bits_blocks), GFP_KERNEL);
if (!nm_i->nat_bits)
return -ENOMEM;
@@ -3185,7 +3187,7 @@ static int __get_nat_bitmaps(struct f2fs_sb_info *sbi)
if (IS_ERR(page))
return PTR_ERR(page);
- memcpy(nm_i->nat_bits + (i << F2FS_BLKSIZE_BITS),
+ memcpy(nm_i->nat_bits + F2FS_BLK_TO_BYTES(i),
page_address(page), F2FS_BLKSIZE);
f2fs_put_page(page, 1);
}
diff --git a/fs/f2fs/segment.c b/fs/f2fs/segment.c
index 78c3198a6308..1766254279d2 100644
--- a/fs/f2fs/segment.c
+++ b/fs/f2fs/segment.c
@@ -199,6 +199,10 @@ void f2fs_abort_atomic_write(struct inode *inode, bool clean)
clear_inode_flag(inode, FI_ATOMIC_COMMITTED);
clear_inode_flag(inode, FI_ATOMIC_REPLACE);
clear_inode_flag(inode, FI_ATOMIC_FILE);
+ if (is_inode_flag_set(inode, FI_ATOMIC_DIRTIED)) {
+ clear_inode_flag(inode, FI_ATOMIC_DIRTIED);
+ f2fs_mark_inode_dirty_sync(inode, true);
+ }
stat_dec_atomic_inode(inode);
F2FS_I(inode)->atomic_write_task = NULL;
@@ -366,6 +370,10 @@ out:
} else {
sbi->committed_atomic_block += fi->atomic_write_cnt;
set_inode_flag(inode, FI_ATOMIC_COMMITTED);
+ if (is_inode_flag_set(inode, FI_ATOMIC_DIRTIED)) {
+ clear_inode_flag(inode, FI_ATOMIC_DIRTIED);
+ f2fs_mark_inode_dirty_sync(inode, true);
+ }
}
__complete_revoke_list(inode, &revoke_list, ret ? true : false);
@@ -1282,6 +1290,13 @@ static int __submit_discard_cmd(struct f2fs_sb_info *sbi,
wait_list, issued);
return 0;
}
+
+ /*
+ * Issue discard for conventional zones only if the device
+ * supports discard.
+ */
+ if (!bdev_max_discard_sectors(bdev))
+ return -EOPNOTSUPP;
}
#endif
@@ -2686,22 +2701,47 @@ static int get_new_segment(struct f2fs_sb_info *sbi,
goto got_it;
}
+#ifdef CONFIG_BLK_DEV_ZONED
/*
* If we format f2fs on zoned storage, let's try to get pinned sections
* from beginning of the storage, which should be a conventional one.
*/
if (f2fs_sb_has_blkzoned(sbi)) {
- segno = pinning ? 0 : max(first_zoned_segno(sbi), *newseg);
+ /* Prioritize writing to conventional zones */
+ if (sbi->blkzone_alloc_policy == BLKZONE_ALLOC_PRIOR_CONV || pinning)
+ segno = 0;
+ else
+ segno = max(first_zoned_segno(sbi), *newseg);
hint = GET_SEC_FROM_SEG(sbi, segno);
}
+#endif
find_other_zone:
secno = find_next_zero_bit(free_i->free_secmap, MAIN_SECS(sbi), hint);
+
+#ifdef CONFIG_BLK_DEV_ZONED
+ if (secno >= MAIN_SECS(sbi) && f2fs_sb_has_blkzoned(sbi)) {
+ /* Write only to sequential zones */
+ if (sbi->blkzone_alloc_policy == BLKZONE_ALLOC_ONLY_SEQ) {
+ hint = GET_SEC_FROM_SEG(sbi, first_zoned_segno(sbi));
+ secno = find_next_zero_bit(free_i->free_secmap, MAIN_SECS(sbi), hint);
+ } else
+ secno = find_first_zero_bit(free_i->free_secmap,
+ MAIN_SECS(sbi));
+ if (secno >= MAIN_SECS(sbi)) {
+ ret = -ENOSPC;
+ f2fs_bug_on(sbi, 1);
+ goto out_unlock;
+ }
+ }
+#endif
+
if (secno >= MAIN_SECS(sbi)) {
secno = find_first_zero_bit(free_i->free_secmap,
MAIN_SECS(sbi));
if (secno >= MAIN_SECS(sbi)) {
ret = -ENOSPC;
+ f2fs_bug_on(sbi, 1);
goto out_unlock;
}
}
@@ -2743,10 +2783,8 @@ got_it:
out_unlock:
spin_unlock(&free_i->segmap_lock);
- if (ret == -ENOSPC) {
+ if (ret == -ENOSPC)
f2fs_stop_checkpoint(sbi, false, STOP_CP_REASON_NO_SEGMENT);
- f2fs_bug_on(sbi, 1);
- }
return ret;
}
@@ -3052,7 +3090,8 @@ static int get_ssr_segment(struct f2fs_sb_info *sbi, int type,
sanity_check_seg_type(sbi, seg_type);
/* f2fs_need_SSR() already forces to do this */
- if (!f2fs_get_victim(sbi, &segno, BG_GC, seg_type, alloc_mode, age)) {
+ if (!f2fs_get_victim(sbi, &segno, BG_GC, seg_type,
+ alloc_mode, age, false)) {
curseg->next_segno = segno;
return 1;
}
@@ -3079,7 +3118,8 @@ static int get_ssr_segment(struct f2fs_sb_info *sbi, int type,
for (; cnt-- > 0; reversed ? i-- : i++) {
if (i == seg_type)
continue;
- if (!f2fs_get_victim(sbi, &segno, BG_GC, i, alloc_mode, age)) {
+ if (!f2fs_get_victim(sbi, &segno, BG_GC, i,
+ alloc_mode, age, false)) {
curseg->next_segno = segno;
return 1;
}
@@ -3522,7 +3562,8 @@ static int __get_segment_type_6(struct f2fs_io_info *fio)
if (file_is_cold(inode) || f2fs_need_compress_data(inode))
return CURSEG_COLD_DATA;
- type = __get_age_segment_type(inode, fio->page->index);
+ type = __get_age_segment_type(inode,
+ page_folio(fio->page)->index);
if (type != NO_CHECK_TYPE)
return type;
@@ -3781,7 +3822,7 @@ out:
f2fs_up_read(&fio->sbi->io_order_lock);
}
-void f2fs_do_write_meta_page(struct f2fs_sb_info *sbi, struct page *page,
+void f2fs_do_write_meta_page(struct f2fs_sb_info *sbi, struct folio *folio,
enum iostat_type io_type)
{
struct f2fs_io_info fio = {
@@ -3790,20 +3831,20 @@ void f2fs_do_write_meta_page(struct f2fs_sb_info *sbi, struct page *page,
.temp = HOT,
.op = REQ_OP_WRITE,
.op_flags = REQ_SYNC | REQ_META | REQ_PRIO,
- .old_blkaddr = page->index,
- .new_blkaddr = page->index,
- .page = page,
+ .old_blkaddr = folio->index,
+ .new_blkaddr = folio->index,
+ .page = folio_page(folio, 0),
.encrypted_page = NULL,
.in_list = 0,
};
- if (unlikely(page->index >= MAIN_BLKADDR(sbi)))
+ if (unlikely(folio->index >= MAIN_BLKADDR(sbi)))
fio.op_flags &= ~REQ_META;
- set_page_writeback(page);
+ folio_start_writeback(folio);
f2fs_submit_page_write(&fio);
- stat_inc_meta_count(sbi, page->index);
+ stat_inc_meta_count(sbi, folio->index);
f2fs_update_iostat(sbi, NULL, io_type, F2FS_BLKSIZE);
}
@@ -5381,8 +5422,7 @@ unsigned int f2fs_usable_blks_in_seg(struct f2fs_sb_info *sbi,
return BLKS_PER_SEG(sbi);
}
-unsigned int f2fs_usable_segs_in_sec(struct f2fs_sb_info *sbi,
- unsigned int segno)
+unsigned int f2fs_usable_segs_in_sec(struct f2fs_sb_info *sbi)
{
if (f2fs_sb_has_blkzoned(sbi))
return CAP_SEGS_PER_SEC(sbi);
diff --git a/fs/f2fs/segment.h b/fs/f2fs/segment.h
index bfc01a521cb9..71adb4a43bec 100644
--- a/fs/f2fs/segment.h
+++ b/fs/f2fs/segment.h
@@ -188,6 +188,7 @@ struct victim_sel_policy {
unsigned int min_segno; /* segment # having min. cost */
unsigned long long age; /* mtime of GCed section*/
unsigned long long age_threshold;/* age threshold */
+ bool one_time_gc; /* one time GC */
};
struct seg_entry {
@@ -430,7 +431,7 @@ static inline void __set_free(struct f2fs_sb_info *sbi, unsigned int segno)
unsigned int secno = GET_SEC_FROM_SEG(sbi, segno);
unsigned int start_segno = GET_SEG_FROM_SEC(sbi, secno);
unsigned int next;
- unsigned int usable_segs = f2fs_usable_segs_in_sec(sbi, segno);
+ unsigned int usable_segs = f2fs_usable_segs_in_sec(sbi);
spin_lock(&free_i->segmap_lock);
clear_bit(segno, free_i->free_segmap);
@@ -464,7 +465,7 @@ static inline void __set_test_and_free(struct f2fs_sb_info *sbi,
unsigned int secno = GET_SEC_FROM_SEG(sbi, segno);
unsigned int start_segno = GET_SEG_FROM_SEC(sbi, secno);
unsigned int next;
- unsigned int usable_segs = f2fs_usable_segs_in_sec(sbi, segno);
+ unsigned int usable_segs = f2fs_usable_segs_in_sec(sbi);
spin_lock(&free_i->segmap_lock);
if (test_and_clear_bit(segno, free_i->free_segmap)) {
diff --git a/fs/f2fs/super.c b/fs/f2fs/super.c
index 176b5177c89d..87ab5696bd48 100644
--- a/fs/f2fs/super.c
+++ b/fs/f2fs/super.c
@@ -11,7 +11,6 @@
#include <linux/fs_context.h>
#include <linux/sched/mm.h>
#include <linux/statfs.h>
-#include <linux/buffer_head.h>
#include <linux/kthread.h>
#include <linux/parser.h>
#include <linux/mount.h>
@@ -707,6 +706,11 @@ static int parse_options(struct super_block *sb, char *options, bool is_remount)
if (!strcmp(name, "on")) {
F2FS_OPTION(sbi).bggc_mode = BGGC_MODE_ON;
} else if (!strcmp(name, "off")) {
+ if (f2fs_sb_has_blkzoned(sbi)) {
+ f2fs_warn(sbi, "zoned devices need bggc");
+ kfree(name);
+ return -EINVAL;
+ }
F2FS_OPTION(sbi).bggc_mode = BGGC_MODE_OFF;
} else if (!strcmp(name, "sync")) {
F2FS_OPTION(sbi).bggc_mode = BGGC_MODE_SYNC;
@@ -2561,7 +2565,7 @@ restore_opts:
static void f2fs_shutdown(struct super_block *sb)
{
- f2fs_do_shutdown(F2FS_SB(sb), F2FS_GOING_DOWN_NOSYNC, false);
+ f2fs_do_shutdown(F2FS_SB(sb), F2FS_GOING_DOWN_NOSYNC, false, false);
}
#ifdef CONFIG_QUOTA
@@ -3318,29 +3322,47 @@ loff_t max_file_blocks(struct inode *inode)
* fit within U32_MAX + 1 data units.
*/
- result = min(result, (((loff_t)U32_MAX + 1) * 4096) >> F2FS_BLKSIZE_BITS);
+ result = min(result, F2FS_BYTES_TO_BLK(((loff_t)U32_MAX + 1) * 4096));
return result;
}
-static int __f2fs_commit_super(struct buffer_head *bh,
- struct f2fs_super_block *super)
+static int __f2fs_commit_super(struct f2fs_sb_info *sbi, struct folio *folio,
+ pgoff_t index, bool update)
{
- lock_buffer(bh);
- if (super)
- memcpy(bh->b_data + F2FS_SUPER_OFFSET, super, sizeof(*super));
- set_buffer_dirty(bh);
- unlock_buffer(bh);
-
+ struct bio *bio;
/* it's rare case, we can do fua all the time */
- return __sync_dirty_buffer(bh, REQ_SYNC | REQ_PREFLUSH | REQ_FUA);
+ blk_opf_t opf = REQ_OP_WRITE | REQ_SYNC | REQ_PREFLUSH | REQ_FUA;
+ int ret;
+
+ folio_lock(folio);
+ folio_wait_writeback(folio);
+ if (update)
+ memcpy(F2FS_SUPER_BLOCK(folio, index), F2FS_RAW_SUPER(sbi),
+ sizeof(struct f2fs_super_block));
+ folio_mark_dirty(folio);
+ folio_clear_dirty_for_io(folio);
+ folio_start_writeback(folio);
+ folio_unlock(folio);
+
+ bio = bio_alloc(sbi->sb->s_bdev, 1, opf, GFP_NOFS);
+
+ /* it doesn't need to set crypto context for superblock update */
+ bio->bi_iter.bi_sector = SECTOR_FROM_BLOCK(folio_index(folio));
+
+ if (!bio_add_folio(bio, folio, folio_size(folio), 0))
+ f2fs_bug_on(sbi, 1);
+
+ ret = submit_bio_wait(bio);
+ folio_end_writeback(folio);
+
+ return ret;
}
static inline bool sanity_check_area_boundary(struct f2fs_sb_info *sbi,
- struct buffer_head *bh)
+ struct folio *folio, pgoff_t index)
{
- struct f2fs_super_block *raw_super = (struct f2fs_super_block *)
- (bh->b_data + F2FS_SUPER_OFFSET);
+ struct f2fs_super_block *raw_super = F2FS_SUPER_BLOCK(folio, index);
struct super_block *sb = sbi->sb;
u32 segment0_blkaddr = le32_to_cpu(raw_super->segment0_blkaddr);
u32 cp_blkaddr = le32_to_cpu(raw_super->cp_blkaddr);
@@ -3356,9 +3378,9 @@ static inline bool sanity_check_area_boundary(struct f2fs_sb_info *sbi,
u32 segment_count = le32_to_cpu(raw_super->segment_count);
u32 log_blocks_per_seg = le32_to_cpu(raw_super->log_blocks_per_seg);
u64 main_end_blkaddr = main_blkaddr +
- (segment_count_main << log_blocks_per_seg);
+ ((u64)segment_count_main << log_blocks_per_seg);
u64 seg_end_blkaddr = segment0_blkaddr +
- (segment_count << log_blocks_per_seg);
+ ((u64)segment_count << log_blocks_per_seg);
if (segment0_blkaddr != cp_blkaddr) {
f2fs_info(sbi, "Mismatch start address, segment0(%u) cp_blkaddr(%u)",
@@ -3415,7 +3437,7 @@ static inline bool sanity_check_area_boundary(struct f2fs_sb_info *sbi,
set_sbi_flag(sbi, SBI_NEED_SB_WRITE);
res = "internally";
} else {
- err = __f2fs_commit_super(bh, NULL);
+ err = __f2fs_commit_super(sbi, folio, index, false);
res = err ? "failed" : "done";
}
f2fs_info(sbi, "Fix alignment : %s, start(%u) end(%llu) block(%u)",
@@ -3428,12 +3450,11 @@ static inline bool sanity_check_area_boundary(struct f2fs_sb_info *sbi,
}
static int sanity_check_raw_super(struct f2fs_sb_info *sbi,
- struct buffer_head *bh)
+ struct folio *folio, pgoff_t index)
{
block_t segment_count, segs_per_sec, secs_per_zone, segment_count_main;
block_t total_sections, blocks_per_seg;
- struct f2fs_super_block *raw_super = (struct f2fs_super_block *)
- (bh->b_data + F2FS_SUPER_OFFSET);
+ struct f2fs_super_block *raw_super = F2FS_SUPER_BLOCK(folio, index);
size_t crc_offset = 0;
__u32 crc = 0;
@@ -3591,7 +3612,7 @@ static int sanity_check_raw_super(struct f2fs_sb_info *sbi,
}
/* check CP/SIT/NAT/SSA/MAIN_AREA area boundary */
- if (sanity_check_area_boundary(sbi, bh))
+ if (sanity_check_area_boundary(sbi, folio, index))
return -EFSCORRUPTED;
return 0;
@@ -3786,6 +3807,8 @@ static void init_sb_info(struct f2fs_sb_info *sbi)
sbi->next_victim_seg[FG_GC] = NULL_SEGNO;
sbi->max_victim_search = DEF_MAX_VICTIM_SEARCH;
sbi->migration_granularity = SEGS_PER_SEC(sbi);
+ sbi->migration_window_granularity = f2fs_sb_has_blkzoned(sbi) ?
+ DEF_MIGRATION_WINDOW_GRANULARITY_ZONED : SEGS_PER_SEC(sbi);
sbi->seq_file_ra_mul = MIN_RA_MUL;
sbi->max_fragment_chunk = DEF_FRAGMENT_SIZE;
sbi->max_fragment_hole = DEF_FRAGMENT_SIZE;
@@ -3938,7 +3961,7 @@ static int read_raw_super_block(struct f2fs_sb_info *sbi,
{
struct super_block *sb = sbi->sb;
int block;
- struct buffer_head *bh;
+ struct folio *folio;
struct f2fs_super_block *super;
int err = 0;
@@ -3947,32 +3970,32 @@ static int read_raw_super_block(struct f2fs_sb_info *sbi,
return -ENOMEM;
for (block = 0; block < 2; block++) {
- bh = sb_bread(sb, block);
- if (!bh) {
+ folio = read_mapping_folio(sb->s_bdev->bd_mapping, block, NULL);
+ if (IS_ERR(folio)) {
f2fs_err(sbi, "Unable to read %dth superblock",
block + 1);
- err = -EIO;
+ err = PTR_ERR(folio);
*recovery = 1;
continue;
}
/* sanity checking of raw super */
- err = sanity_check_raw_super(sbi, bh);
+ err = sanity_check_raw_super(sbi, folio, block);
if (err) {
f2fs_err(sbi, "Can't find valid F2FS filesystem in %dth superblock",
block + 1);
- brelse(bh);
+ folio_put(folio);
*recovery = 1;
continue;
}
if (!*raw_super) {
- memcpy(super, bh->b_data + F2FS_SUPER_OFFSET,
+ memcpy(super, F2FS_SUPER_BLOCK(folio, block),
sizeof(*super));
*valid_super_block = block;
*raw_super = super;
}
- brelse(bh);
+ folio_put(folio);
}
/* No valid superblock */
@@ -3986,7 +4009,8 @@ static int read_raw_super_block(struct f2fs_sb_info *sbi,
int f2fs_commit_super(struct f2fs_sb_info *sbi, bool recover)
{
- struct buffer_head *bh;
+ struct folio *folio;
+ pgoff_t index;
__u32 crc = 0;
int err;
@@ -4004,22 +4028,24 @@ int f2fs_commit_super(struct f2fs_sb_info *sbi, bool recover)
}
/* write back-up superblock first */
- bh = sb_bread(sbi->sb, sbi->valid_super_block ? 0 : 1);
- if (!bh)
- return -EIO;
- err = __f2fs_commit_super(bh, F2FS_RAW_SUPER(sbi));
- brelse(bh);
+ index = sbi->valid_super_block ? 0 : 1;
+ folio = read_mapping_folio(sbi->sb->s_bdev->bd_mapping, index, NULL);
+ if (IS_ERR(folio))
+ return PTR_ERR(folio);
+ err = __f2fs_commit_super(sbi, folio, index, true);
+ folio_put(folio);
/* if we are in recovery path, skip writing valid superblock */
if (recover || err)
return err;
/* write current valid superblock */
- bh = sb_bread(sbi->sb, sbi->valid_super_block);
- if (!bh)
- return -EIO;
- err = __f2fs_commit_super(bh, F2FS_RAW_SUPER(sbi));
- brelse(bh);
+ index = sbi->valid_super_block;
+ folio = read_mapping_folio(sbi->sb->s_bdev->bd_mapping, index, NULL);
+ if (IS_ERR(folio))
+ return PTR_ERR(folio);
+ err = __f2fs_commit_super(sbi, folio, index, true);
+ folio_put(folio);
return err;
}
@@ -4173,12 +4199,14 @@ void f2fs_handle_critical_error(struct f2fs_sb_info *sbi, unsigned char reason,
}
f2fs_warn(sbi, "Remounting filesystem read-only");
+
/*
- * Make sure updated value of ->s_mount_flags will be visible before
- * ->s_flags update
+ * We have already set CP_ERROR_FLAG flag to stop all updates
+ * to filesystem, so it doesn't need to set SB_RDONLY flag here
+ * because the flag should be set covered w/ sb->s_umount semaphore
+ * via remount procedure, otherwise, it will confuse code like
+ * freeze_super() which will lead to deadlocks and other problems.
*/
- smp_wmb();
- sb->s_flags |= SB_RDONLY;
}
static void f2fs_record_error_work(struct work_struct *work)
@@ -4219,6 +4247,7 @@ static int f2fs_scan_devices(struct f2fs_sb_info *sbi)
sbi->aligned_blksize = true;
#ifdef CONFIG_BLK_DEV_ZONED
sbi->max_open_zones = UINT_MAX;
+ sbi->blkzone_alloc_policy = BLKZONE_ALLOC_PRIOR_SEQ;
#endif
for (i = 0; i < max_devices; i++) {
diff --git a/fs/f2fs/sysfs.c b/fs/f2fs/sysfs.c
index fee7ee45ceaa..c56e8c873935 100644
--- a/fs/f2fs/sysfs.c
+++ b/fs/f2fs/sysfs.c
@@ -170,6 +170,12 @@ static ssize_t undiscard_blks_show(struct f2fs_attr *a,
SM_I(sbi)->dcc_info->undiscard_blks);
}
+static ssize_t atgc_enabled_show(struct f2fs_attr *a,
+ struct f2fs_sb_info *sbi, char *buf)
+{
+ return sysfs_emit(buf, "%d\n", sbi->am.atgc_enabled ? 1 : 0);
+}
+
static ssize_t gc_mode_show(struct f2fs_attr *a,
struct f2fs_sb_info *sbi, char *buf)
{
@@ -182,50 +188,50 @@ static ssize_t features_show(struct f2fs_attr *a,
int len = 0;
if (f2fs_sb_has_encrypt(sbi))
- len += scnprintf(buf, PAGE_SIZE - len, "%s",
+ len += sysfs_emit_at(buf, len, "%s",
"encryption");
if (f2fs_sb_has_blkzoned(sbi))
- len += scnprintf(buf + len, PAGE_SIZE - len, "%s%s",
+ len += sysfs_emit_at(buf, len, "%s%s",
len ? ", " : "", "blkzoned");
if (f2fs_sb_has_extra_attr(sbi))
- len += scnprintf(buf + len, PAGE_SIZE - len, "%s%s",
+ len += sysfs_emit_at(buf, len, "%s%s",
len ? ", " : "", "extra_attr");
if (f2fs_sb_has_project_quota(sbi))
- len += scnprintf(buf + len, PAGE_SIZE - len, "%s%s",
+ len += sysfs_emit_at(buf, len, "%s%s",
len ? ", " : "", "projquota");
if (f2fs_sb_has_inode_chksum(sbi))
- len += scnprintf(buf + len, PAGE_SIZE - len, "%s%s",
+ len += sysfs_emit_at(buf, len, "%s%s",
len ? ", " : "", "inode_checksum");
if (f2fs_sb_has_flexible_inline_xattr(sbi))
- len += scnprintf(buf + len, PAGE_SIZE - len, "%s%s",
+ len += sysfs_emit_at(buf, len, "%s%s",
len ? ", " : "", "flexible_inline_xattr");
if (f2fs_sb_has_quota_ino(sbi))
- len += scnprintf(buf + len, PAGE_SIZE - len, "%s%s",
+ len += sysfs_emit_at(buf, len, "%s%s",
len ? ", " : "", "quota_ino");
if (f2fs_sb_has_inode_crtime(sbi))
- len += scnprintf(buf + len, PAGE_SIZE - len, "%s%s",
+ len += sysfs_emit_at(buf, len, "%s%s",
len ? ", " : "", "inode_crtime");
if (f2fs_sb_has_lost_found(sbi))
- len += scnprintf(buf + len, PAGE_SIZE - len, "%s%s",
+ len += sysfs_emit_at(buf, len, "%s%s",
len ? ", " : "", "lost_found");
if (f2fs_sb_has_verity(sbi))
- len += scnprintf(buf + len, PAGE_SIZE - len, "%s%s",
+ len += sysfs_emit_at(buf, len, "%s%s",
len ? ", " : "", "verity");
if (f2fs_sb_has_sb_chksum(sbi))
- len += scnprintf(buf + len, PAGE_SIZE - len, "%s%s",
+ len += sysfs_emit_at(buf, len, "%s%s",
len ? ", " : "", "sb_checksum");
if (f2fs_sb_has_casefold(sbi))
- len += scnprintf(buf + len, PAGE_SIZE - len, "%s%s",
+ len += sysfs_emit_at(buf, len, "%s%s",
len ? ", " : "", "casefold");
if (f2fs_sb_has_readonly(sbi))
- len += scnprintf(buf + len, PAGE_SIZE - len, "%s%s",
+ len += sysfs_emit_at(buf, len, "%s%s",
len ? ", " : "", "readonly");
if (f2fs_sb_has_compression(sbi))
- len += scnprintf(buf + len, PAGE_SIZE - len, "%s%s",
+ len += sysfs_emit_at(buf, len, "%s%s",
len ? ", " : "", "compression");
- len += scnprintf(buf + len, PAGE_SIZE - len, "%s%s",
+ len += sysfs_emit_at(buf, len, "%s%s",
len ? ", " : "", "pin_file");
- len += scnprintf(buf + len, PAGE_SIZE - len, "\n");
+ len += sysfs_emit_at(buf, len, "\n");
return len;
}
@@ -323,17 +329,14 @@ static ssize_t f2fs_sbi_show(struct f2fs_attr *a,
int hot_count = sbi->raw_super->hot_ext_count;
int len = 0, i;
- len += scnprintf(buf + len, PAGE_SIZE - len,
- "cold file extension:\n");
+ len += sysfs_emit_at(buf, len, "cold file extension:\n");
for (i = 0; i < cold_count; i++)
- len += scnprintf(buf + len, PAGE_SIZE - len, "%s\n",
- extlist[i]);
+ len += sysfs_emit_at(buf, len, "%s\n", extlist[i]);
- len += scnprintf(buf + len, PAGE_SIZE - len,
- "hot file extension:\n");
+ len += sysfs_emit_at(buf, len, "hot file extension:\n");
for (i = cold_count; i < cold_count + hot_count; i++)
- len += scnprintf(buf + len, PAGE_SIZE - len, "%s\n",
- extlist[i]);
+ len += sysfs_emit_at(buf, len, "%s\n", extlist[i]);
+
return len;
}
@@ -561,6 +564,11 @@ out:
return -EINVAL;
}
+ if (!strcmp(a->attr.name, "migration_window_granularity")) {
+ if (t == 0 || t > SEGS_PER_SEC(sbi))
+ return -EINVAL;
+ }
+
if (!strcmp(a->attr.name, "gc_urgent")) {
if (t == 0) {
sbi->gc_mode = GC_NORMAL;
@@ -627,6 +635,15 @@ out:
}
#endif
+#ifdef CONFIG_BLK_DEV_ZONED
+ if (!strcmp(a->attr.name, "blkzone_alloc_policy")) {
+ if (t < BLKZONE_ALLOC_PRIOR_SEQ || t > BLKZONE_ALLOC_PRIOR_CONV)
+ return -EINVAL;
+ sbi->blkzone_alloc_policy = t;
+ return count;
+ }
+#endif
+
#ifdef CONFIG_F2FS_FS_COMPRESSION
if (!strcmp(a->attr.name, "compr_written_block") ||
!strcmp(a->attr.name, "compr_saved_block")) {
@@ -775,7 +792,8 @@ out:
if (!strcmp(a->attr.name, "ipu_policy")) {
if (t >= BIT(F2FS_IPU_MAX))
return -EINVAL;
- if (t && f2fs_lfs_mode(sbi))
+ /* allow F2FS_IPU_NOCACHE only for IPU in the pinned file */
+ if (f2fs_lfs_mode(sbi) && (t & ~BIT(F2FS_IPU_NOCACHE)))
return -EINVAL;
SM_I(sbi)->ipu_policy = (unsigned int)t;
return count;
@@ -960,6 +978,9 @@ GC_THREAD_RW_ATTR(gc_urgent_sleep_time, urgent_sleep_time);
GC_THREAD_RW_ATTR(gc_min_sleep_time, min_sleep_time);
GC_THREAD_RW_ATTR(gc_max_sleep_time, max_sleep_time);
GC_THREAD_RW_ATTR(gc_no_gc_sleep_time, no_gc_sleep_time);
+GC_THREAD_RW_ATTR(gc_no_zoned_gc_percent, no_zoned_gc_percent);
+GC_THREAD_RW_ATTR(gc_boost_zoned_gc_percent, boost_zoned_gc_percent);
+GC_THREAD_RW_ATTR(gc_valid_thresh_ratio, valid_thresh_ratio);
/* SM_INFO ATTR */
SM_INFO_RW_ATTR(reclaim_segments, rec_prefree_segments);
@@ -969,6 +990,7 @@ SM_INFO_GENERAL_RW_ATTR(min_fsync_blocks);
SM_INFO_GENERAL_RW_ATTR(min_seq_blocks);
SM_INFO_GENERAL_RW_ATTR(min_hot_blocks);
SM_INFO_GENERAL_RW_ATTR(min_ssr_sections);
+SM_INFO_GENERAL_RW_ATTR(reserved_segments);
/* DCC_INFO ATTR */
DCC_INFO_RW_ATTR(max_small_discards, max_discards);
@@ -1001,6 +1023,7 @@ F2FS_SBI_RW_ATTR(gc_pin_file_thresh, gc_pin_file_threshold);
F2FS_SBI_RW_ATTR(gc_reclaimed_segments, gc_reclaimed_segs);
F2FS_SBI_GENERAL_RW_ATTR(max_victim_search);
F2FS_SBI_GENERAL_RW_ATTR(migration_granularity);
+F2FS_SBI_GENERAL_RW_ATTR(migration_window_granularity);
F2FS_SBI_GENERAL_RW_ATTR(dir_level);
#ifdef CONFIG_F2FS_IOSTAT
F2FS_SBI_GENERAL_RW_ATTR(iostat_enable);
@@ -1033,6 +1056,7 @@ F2FS_SBI_GENERAL_RW_ATTR(warm_data_age_threshold);
F2FS_SBI_GENERAL_RW_ATTR(last_age_weight);
#ifdef CONFIG_BLK_DEV_ZONED
F2FS_SBI_GENERAL_RO_ATTR(unusable_blocks_per_sec);
+F2FS_SBI_GENERAL_RW_ATTR(blkzone_alloc_policy);
#endif
/* STAT_INFO ATTR */
@@ -1072,6 +1096,7 @@ F2FS_GENERAL_RO_ATTR(encoding);
F2FS_GENERAL_RO_ATTR(mounted_time_sec);
F2FS_GENERAL_RO_ATTR(main_blkaddr);
F2FS_GENERAL_RO_ATTR(pending_discard);
+F2FS_GENERAL_RO_ATTR(atgc_enabled);
F2FS_GENERAL_RO_ATTR(gc_mode);
#ifdef CONFIG_F2FS_STAT_FS
F2FS_GENERAL_RO_ATTR(moved_blocks_background);
@@ -1116,6 +1141,9 @@ static struct attribute *f2fs_attrs[] = {
ATTR_LIST(gc_min_sleep_time),
ATTR_LIST(gc_max_sleep_time),
ATTR_LIST(gc_no_gc_sleep_time),
+ ATTR_LIST(gc_no_zoned_gc_percent),
+ ATTR_LIST(gc_boost_zoned_gc_percent),
+ ATTR_LIST(gc_valid_thresh_ratio),
ATTR_LIST(gc_idle),
ATTR_LIST(gc_urgent),
ATTR_LIST(reclaim_segments),
@@ -1138,8 +1166,10 @@ static struct attribute *f2fs_attrs[] = {
ATTR_LIST(min_seq_blocks),
ATTR_LIST(min_hot_blocks),
ATTR_LIST(min_ssr_sections),
+ ATTR_LIST(reserved_segments),
ATTR_LIST(max_victim_search),
ATTR_LIST(migration_granularity),
+ ATTR_LIST(migration_window_granularity),
ATTR_LIST(dir_level),
ATTR_LIST(ram_thresh),
ATTR_LIST(ra_nid_pages),
@@ -1187,6 +1217,7 @@ static struct attribute *f2fs_attrs[] = {
#endif
#ifdef CONFIG_BLK_DEV_ZONED
ATTR_LIST(unusable_blocks_per_sec),
+ ATTR_LIST(blkzone_alloc_policy),
#endif
#ifdef CONFIG_F2FS_FS_COMPRESSION
ATTR_LIST(compr_written_block),
@@ -1200,6 +1231,7 @@ static struct attribute *f2fs_attrs[] = {
ATTR_LIST(atgc_candidate_count),
ATTR_LIST(atgc_age_weight),
ATTR_LIST(atgc_age_threshold),
+ ATTR_LIST(atgc_enabled),
ATTR_LIST(seq_file_ra_mul),
ATTR_LIST(gc_segment_mode),
ATTR_LIST(gc_reclaimed_segments),
diff --git a/fs/f2fs/verity.c b/fs/f2fs/verity.c
index 84a33fe49bed..2287f238ae09 100644
--- a/fs/f2fs/verity.c
+++ b/fs/f2fs/verity.c
@@ -74,7 +74,7 @@ static int pagecache_write(struct inode *inode, const void *buf, size_t count,
struct address_space *mapping = inode->i_mapping;
const struct address_space_operations *aops = mapping->a_ops;
- if (pos + count > inode->i_sb->s_maxbytes)
+ if (pos + count > F2FS_BLK_TO_BYTES(max_file_blocks(inode)))
return -EFBIG;
while (count) {
@@ -237,7 +237,8 @@ static int f2fs_get_verity_descriptor(struct inode *inode, void *buf,
pos = le64_to_cpu(dloc.pos);
/* Get the descriptor */
- if (pos + size < pos || pos + size > inode->i_sb->s_maxbytes ||
+ if (pos + size < pos ||
+ pos + size > F2FS_BLK_TO_BYTES(max_file_blocks(inode)) ||
pos < f2fs_verity_metadata_pos(inode) || size > INT_MAX) {
f2fs_warn(F2FS_I_SB(inode), "invalid verity xattr");
f2fs_handle_error(F2FS_I_SB(inode),
diff --git a/fs/f2fs/xattr.c b/fs/f2fs/xattr.c
index f290fe9327c4..3f3874943679 100644
--- a/fs/f2fs/xattr.c
+++ b/fs/f2fs/xattr.c
@@ -629,6 +629,7 @@ static int __f2fs_setxattr(struct inode *inode, int index,
const char *name, const void *value, size_t size,
struct page *ipage, int flags)
{
+ struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
struct f2fs_xattr_entry *here, *last;
void *base_addr, *last_base_addr;
int found, newsize;
@@ -772,9 +773,18 @@ retry:
if (index == F2FS_XATTR_INDEX_ENCRYPTION &&
!strcmp(name, F2FS_XATTR_NAME_ENCRYPTION_CONTEXT))
f2fs_set_encrypted_inode(inode);
- if (S_ISDIR(inode->i_mode))
- set_sbi_flag(F2FS_I_SB(inode), SBI_NEED_CP);
+ if (!S_ISDIR(inode->i_mode))
+ goto same;
+ /*
+ * In restrict mode, fsync() always try to trigger checkpoint for all
+ * metadata consistency, in other mode, it triggers checkpoint when
+ * parent's xattr metadata was updated.
+ */
+ if (F2FS_OPTION(sbi).fsync_mode == FSYNC_MODE_STRICT)
+ set_sbi_flag(sbi, SBI_NEED_CP);
+ else
+ f2fs_add_ino_entry(sbi, inode->i_ino, XATTR_DIR_INO);
same:
if (is_inode_flag_set(inode, FI_ACL_MODE)) {
inode->i_mode = F2FS_I(inode)->i_acl_mode;
diff --git a/fs/fcntl.c b/fs/fcntl.c
index 081e5e3d89ea..22dd9dcce7ec 100644
--- a/fs/fcntl.c
+++ b/fs/fcntl.c
@@ -405,7 +405,7 @@ static long f_dupfd_query(int fd, struct file *filp)
* overkill, but given our lockless file pointer lookup, the
* alternatives are complicated.
*/
- return f.file == filp;
+ return fd_file(f) == filp;
}
/* Let the caller figure out whether a given file was just created. */
@@ -573,17 +573,17 @@ SYSCALL_DEFINE3(fcntl, unsigned int, fd, unsigned int, cmd, unsigned long, arg)
struct fd f = fdget_raw(fd);
long err = -EBADF;
- if (!f.file)
+ if (!fd_file(f))
goto out;
- if (unlikely(f.file->f_mode & FMODE_PATH)) {
+ if (unlikely(fd_file(f)->f_mode & FMODE_PATH)) {
if (!check_fcntl_cmd(cmd))
goto out1;
}
- err = security_file_fcntl(f.file, cmd, arg);
+ err = security_file_fcntl(fd_file(f), cmd, arg);
if (!err)
- err = do_fcntl(fd, cmd, arg, f.file);
+ err = do_fcntl(fd, cmd, arg, fd_file(f));
out1:
fdput(f);
@@ -600,15 +600,15 @@ SYSCALL_DEFINE3(fcntl64, unsigned int, fd, unsigned int, cmd,
struct flock64 flock;
long err = -EBADF;
- if (!f.file)
+ if (!fd_file(f))
goto out;
- if (unlikely(f.file->f_mode & FMODE_PATH)) {
+ if (unlikely(fd_file(f)->f_mode & FMODE_PATH)) {
if (!check_fcntl_cmd(cmd))
goto out1;
}
- err = security_file_fcntl(f.file, cmd, arg);
+ err = security_file_fcntl(fd_file(f), cmd, arg);
if (err)
goto out1;
@@ -618,7 +618,7 @@ SYSCALL_DEFINE3(fcntl64, unsigned int, fd, unsigned int, cmd,
err = -EFAULT;
if (copy_from_user(&flock, argp, sizeof(flock)))
break;
- err = fcntl_getlk64(f.file, cmd, &flock);
+ err = fcntl_getlk64(fd_file(f), cmd, &flock);
if (!err && copy_to_user(argp, &flock, sizeof(flock)))
err = -EFAULT;
break;
@@ -629,10 +629,10 @@ SYSCALL_DEFINE3(fcntl64, unsigned int, fd, unsigned int, cmd,
err = -EFAULT;
if (copy_from_user(&flock, argp, sizeof(flock)))
break;
- err = fcntl_setlk64(fd, f.file, cmd, &flock);
+ err = fcntl_setlk64(fd, fd_file(f), cmd, &flock);
break;
default:
- err = do_fcntl(fd, cmd, arg, f.file);
+ err = do_fcntl(fd, cmd, arg, fd_file(f));
break;
}
out1:
@@ -737,15 +737,15 @@ static long do_compat_fcntl64(unsigned int fd, unsigned int cmd,
struct flock flock;
long err = -EBADF;
- if (!f.file)
+ if (!fd_file(f))
return err;
- if (unlikely(f.file->f_mode & FMODE_PATH)) {
+ if (unlikely(fd_file(f)->f_mode & FMODE_PATH)) {
if (!check_fcntl_cmd(cmd))
goto out_put;
}
- err = security_file_fcntl(f.file, cmd, arg);
+ err = security_file_fcntl(fd_file(f), cmd, arg);
if (err)
goto out_put;
@@ -754,7 +754,7 @@ static long do_compat_fcntl64(unsigned int fd, unsigned int cmd,
err = get_compat_flock(&flock, compat_ptr(arg));
if (err)
break;
- err = fcntl_getlk(f.file, convert_fcntl_cmd(cmd), &flock);
+ err = fcntl_getlk(fd_file(f), convert_fcntl_cmd(cmd), &flock);
if (err)
break;
err = fixup_compat_flock(&flock);
@@ -766,7 +766,7 @@ static long do_compat_fcntl64(unsigned int fd, unsigned int cmd,
err = get_compat_flock64(&flock, compat_ptr(arg));
if (err)
break;
- err = fcntl_getlk(f.file, convert_fcntl_cmd(cmd), &flock);
+ err = fcntl_getlk(fd_file(f), convert_fcntl_cmd(cmd), &flock);
if (!err)
err = put_compat_flock64(&flock, compat_ptr(arg));
break;
@@ -775,7 +775,7 @@ static long do_compat_fcntl64(unsigned int fd, unsigned int cmd,
err = get_compat_flock(&flock, compat_ptr(arg));
if (err)
break;
- err = fcntl_setlk(fd, f.file, convert_fcntl_cmd(cmd), &flock);
+ err = fcntl_setlk(fd, fd_file(f), convert_fcntl_cmd(cmd), &flock);
break;
case F_SETLK64:
case F_SETLKW64:
@@ -784,10 +784,10 @@ static long do_compat_fcntl64(unsigned int fd, unsigned int cmd,
err = get_compat_flock64(&flock, compat_ptr(arg));
if (err)
break;
- err = fcntl_setlk(fd, f.file, convert_fcntl_cmd(cmd), &flock);
+ err = fcntl_setlk(fd, fd_file(f), convert_fcntl_cmd(cmd), &flock);
break;
default:
- err = do_fcntl(fd, cmd, arg, f.file);
+ err = do_fcntl(fd, cmd, arg, fd_file(f));
break;
}
out_put:
diff --git a/fs/fhandle.c b/fs/fhandle.c
index 8cb665629f4a..82df28d45cd7 100644
--- a/fs/fhandle.c
+++ b/fs/fhandle.c
@@ -140,9 +140,9 @@ static int get_path_from_fd(int fd, struct path *root)
spin_unlock(&fs->lock);
} else {
struct fd f = fdget(fd);
- if (!f.file)
+ if (!fd_file(f))
return -EBADF;
- *root = f.file->f_path;
+ *root = fd_file(f)->f_path;
path_get(root);
fdput(f);
}
diff --git a/fs/file.c b/fs/file.c
index 976ecd4ce2c6..5125607d040a 100644
--- a/fs/file.c
+++ b/fs/file.c
@@ -1124,7 +1124,7 @@ EXPORT_SYMBOL(task_lookup_next_fdget_rcu);
* The fput_needed flag returned by fget_light should be passed to the
* corresponding fput_light.
*/
-static unsigned long __fget_light(unsigned int fd, fmode_t mask)
+static inline struct fd __fget_light(unsigned int fd, fmode_t mask)
{
struct files_struct *files = current->files;
struct file *file;
@@ -1141,22 +1141,22 @@ static unsigned long __fget_light(unsigned int fd, fmode_t mask)
if (likely(atomic_read_acquire(&files->count) == 1)) {
file = files_lookup_fd_raw(files, fd);
if (!file || unlikely(file->f_mode & mask))
- return 0;
- return (unsigned long)file;
+ return EMPTY_FD;
+ return BORROWED_FD(file);
} else {
file = __fget_files(files, fd, mask);
if (!file)
- return 0;
- return FDPUT_FPUT | (unsigned long)file;
+ return EMPTY_FD;
+ return CLONED_FD(file);
}
}
-unsigned long __fdget(unsigned int fd)
+struct fd fdget(unsigned int fd)
{
return __fget_light(fd, FMODE_PATH);
}
-EXPORT_SYMBOL(__fdget);
+EXPORT_SYMBOL(fdget);
-unsigned long __fdget_raw(unsigned int fd)
+struct fd fdget_raw(unsigned int fd)
{
return __fget_light(fd, 0);
}
@@ -1177,16 +1177,16 @@ static inline bool file_needs_f_pos_lock(struct file *file)
(file_count(file) > 1 || file->f_op->iterate_shared);
}
-unsigned long __fdget_pos(unsigned int fd)
+struct fd fdget_pos(unsigned int fd)
{
- unsigned long v = __fdget(fd);
- struct file *file = (struct file *)(v & ~3);
+ struct fd f = fdget(fd);
+ struct file *file = fd_file(f);
if (file && file_needs_f_pos_lock(file)) {
- v |= FDPUT_POS_UNLOCK;
+ f.word |= FDPUT_POS_UNLOCK;
mutex_lock(&file->f_pos_lock);
}
- return v;
+ return f;
}
void __f_unlock_pos(struct file *f)
diff --git a/fs/fsopen.c b/fs/fsopen.c
index ed2dd000622e..6cef3deccded 100644
--- a/fs/fsopen.c
+++ b/fs/fsopen.c
@@ -78,7 +78,6 @@ static int fscontext_release(struct inode *inode, struct file *file)
const struct file_operations fscontext_fops = {
.read = fscontext_read,
.release = fscontext_release,
- .llseek = no_llseek,
};
/*
@@ -394,13 +393,13 @@ SYSCALL_DEFINE5(fsconfig,
}
f = fdget(fd);
- if (!f.file)
+ if (!fd_file(f))
return -EBADF;
ret = -EINVAL;
- if (f.file->f_op != &fscontext_fops)
+ if (fd_file(f)->f_op != &fscontext_fops)
goto out_f;
- fc = f.file->private_data;
+ fc = fd_file(f)->private_data;
if (fc->ops == &legacy_fs_context_ops) {
switch (cmd) {
case FSCONFIG_SET_BINARY:
diff --git a/fs/fuse/Makefile b/fs/fuse/Makefile
index 6e0228c6d0cb..ce0ff7a9007b 100644
--- a/fs/fuse/Makefile
+++ b/fs/fuse/Makefile
@@ -3,6 +3,9 @@
# Makefile for the FUSE filesystem.
#
+# Needed for trace events
+ccflags-y = -I$(src)
+
obj-$(CONFIG_FUSE_FS) += fuse.o
obj-$(CONFIG_CUSE) += cuse.o
obj-$(CONFIG_VIRTIO_FS) += virtiofs.o
diff --git a/fs/fuse/acl.c b/fs/fuse/acl.c
index 04cfd8fee992..8f484b105f13 100644
--- a/fs/fuse/acl.c
+++ b/fs/fuse/acl.c
@@ -12,7 +12,6 @@
#include <linux/posix_acl_xattr.h>
static struct posix_acl *__fuse_get_acl(struct fuse_conn *fc,
- struct mnt_idmap *idmap,
struct inode *inode, int type, bool rcu)
{
int size;
@@ -74,7 +73,7 @@ struct posix_acl *fuse_get_acl(struct mnt_idmap *idmap,
if (fuse_no_acl(fc, inode))
return ERR_PTR(-EOPNOTSUPP);
- return __fuse_get_acl(fc, idmap, inode, type, false);
+ return __fuse_get_acl(fc, inode, type, false);
}
struct posix_acl *fuse_get_inode_acl(struct inode *inode, int type, bool rcu)
@@ -90,8 +89,7 @@ struct posix_acl *fuse_get_inode_acl(struct inode *inode, int type, bool rcu)
*/
if (!fc->posix_acl)
return NULL;
-
- return __fuse_get_acl(fc, &nop_mnt_idmap, inode, type, rcu);
+ return __fuse_get_acl(fc, inode, type, rcu);
}
int fuse_set_acl(struct mnt_idmap *idmap, struct dentry *dentry,
@@ -146,8 +144,8 @@ int fuse_set_acl(struct mnt_idmap *idmap, struct dentry *dentry,
* be stripped.
*/
if (fc->posix_acl &&
- !in_group_or_capable(&nop_mnt_idmap, inode,
- i_gid_into_vfsgid(&nop_mnt_idmap, inode)))
+ !in_group_or_capable(idmap, inode,
+ i_gid_into_vfsgid(idmap, inode)))
extra_flags |= FUSE_SETXATTR_ACL_KILL_SGID;
ret = fuse_setxattr(inode, name, value, size, 0, extra_flags);
diff --git a/fs/fuse/control.c b/fs/fuse/control.c
index 97ac994ff78f..2a730d88cc3b 100644
--- a/fs/fuse/control.c
+++ b/fs/fuse/control.c
@@ -183,27 +183,23 @@ out:
static const struct file_operations fuse_ctl_abort_ops = {
.open = nonseekable_open,
.write = fuse_conn_abort_write,
- .llseek = no_llseek,
};
static const struct file_operations fuse_ctl_waiting_ops = {
.open = nonseekable_open,
.read = fuse_conn_waiting_read,
- .llseek = no_llseek,
};
static const struct file_operations fuse_conn_max_background_ops = {
.open = nonseekable_open,
.read = fuse_conn_max_background_read,
.write = fuse_conn_max_background_write,
- .llseek = no_llseek,
};
static const struct file_operations fuse_conn_congestion_threshold_ops = {
.open = nonseekable_open,
.read = fuse_conn_congestion_threshold_read,
.write = fuse_conn_congestion_threshold_write,
- .llseek = no_llseek,
};
static struct dentry *fuse_ctl_add_dentry(struct dentry *parent,
diff --git a/fs/fuse/dev.c b/fs/fuse/dev.c
index f0c9cd1a0b39..1f64ae6d7a69 100644
--- a/fs/fuse/dev.c
+++ b/fs/fuse/dev.c
@@ -22,6 +22,9 @@
#include <linux/splice.h>
#include <linux/sched.h>
+#define CREATE_TRACE_POINTS
+#include "fuse_trace.h"
+
MODULE_ALIAS_MISCDEV(FUSE_MINOR);
MODULE_ALIAS("devname:fuse");
@@ -105,11 +108,17 @@ static void fuse_drop_waiting(struct fuse_conn *fc)
static void fuse_put_request(struct fuse_req *req);
-static struct fuse_req *fuse_get_req(struct fuse_mount *fm, bool for_background)
+static struct fuse_req *fuse_get_req(struct mnt_idmap *idmap,
+ struct fuse_mount *fm,
+ bool for_background)
{
struct fuse_conn *fc = fm->fc;
struct fuse_req *req;
+ bool no_idmap = !fm->sb || (fm->sb->s_iflags & SB_I_NOIDMAP);
+ kuid_t fsuid;
+ kgid_t fsgid;
int err;
+
atomic_inc(&fc->num_waiting);
if (fuse_block_alloc(fc, for_background)) {
@@ -137,19 +146,32 @@ static struct fuse_req *fuse_get_req(struct fuse_mount *fm, bool for_background)
goto out;
}
- req->in.h.uid = from_kuid(fc->user_ns, current_fsuid());
- req->in.h.gid = from_kgid(fc->user_ns, current_fsgid());
req->in.h.pid = pid_nr_ns(task_pid(current), fc->pid_ns);
__set_bit(FR_WAITING, &req->flags);
if (for_background)
__set_bit(FR_BACKGROUND, &req->flags);
- if (unlikely(req->in.h.uid == ((uid_t)-1) ||
- req->in.h.gid == ((gid_t)-1))) {
+ /*
+ * Keep the old behavior when idmappings support was not
+ * declared by a FUSE server.
+ *
+ * For those FUSE servers who support idmapped mounts,
+ * we send UID/GID only along with "inode creation"
+ * fuse requests, otherwise idmap == &invalid_mnt_idmap and
+ * req->in.h.{u,g}id will be equal to FUSE_INVALID_UIDGID.
+ */
+ fsuid = no_idmap ? current_fsuid() : mapped_fsuid(idmap, fc->user_ns);
+ fsgid = no_idmap ? current_fsgid() : mapped_fsgid(idmap, fc->user_ns);
+ req->in.h.uid = from_kuid(fc->user_ns, fsuid);
+ req->in.h.gid = from_kgid(fc->user_ns, fsgid);
+
+ if (no_idmap && unlikely(req->in.h.uid == ((uid_t)-1) ||
+ req->in.h.gid == ((gid_t)-1))) {
fuse_put_request(req);
return ERR_PTR(-EOVERFLOW);
}
+
return req;
out:
@@ -194,11 +216,22 @@ unsigned int fuse_len_args(unsigned int numargs, struct fuse_arg *args)
}
EXPORT_SYMBOL_GPL(fuse_len_args);
-u64 fuse_get_unique(struct fuse_iqueue *fiq)
+static u64 fuse_get_unique_locked(struct fuse_iqueue *fiq)
{
fiq->reqctr += FUSE_REQ_ID_STEP;
return fiq->reqctr;
}
+
+u64 fuse_get_unique(struct fuse_iqueue *fiq)
+{
+ u64 ret;
+
+ spin_lock(&fiq->lock);
+ ret = fuse_get_unique_locked(fiq);
+ spin_unlock(&fiq->lock);
+
+ return ret;
+}
EXPORT_SYMBOL_GPL(fuse_get_unique);
static unsigned int fuse_req_hash(u64 unique)
@@ -217,22 +250,70 @@ __releases(fiq->lock)
spin_unlock(&fiq->lock);
}
+static void fuse_dev_queue_forget(struct fuse_iqueue *fiq, struct fuse_forget_link *forget)
+{
+ spin_lock(&fiq->lock);
+ if (fiq->connected) {
+ fiq->forget_list_tail->next = forget;
+ fiq->forget_list_tail = forget;
+ fuse_dev_wake_and_unlock(fiq);
+ } else {
+ kfree(forget);
+ spin_unlock(&fiq->lock);
+ }
+}
+
+static void fuse_dev_queue_interrupt(struct fuse_iqueue *fiq, struct fuse_req *req)
+{
+ spin_lock(&fiq->lock);
+ if (list_empty(&req->intr_entry)) {
+ list_add_tail(&req->intr_entry, &fiq->interrupts);
+ /*
+ * Pairs with smp_mb() implied by test_and_set_bit()
+ * from fuse_request_end().
+ */
+ smp_mb();
+ if (test_bit(FR_FINISHED, &req->flags)) {
+ list_del_init(&req->intr_entry);
+ spin_unlock(&fiq->lock);
+ } else {
+ fuse_dev_wake_and_unlock(fiq);
+ }
+ } else {
+ spin_unlock(&fiq->lock);
+ }
+}
+
+static void fuse_dev_queue_req(struct fuse_iqueue *fiq, struct fuse_req *req)
+{
+ spin_lock(&fiq->lock);
+ if (fiq->connected) {
+ if (req->in.h.opcode != FUSE_NOTIFY_REPLY)
+ req->in.h.unique = fuse_get_unique_locked(fiq);
+ list_add_tail(&req->list, &fiq->pending);
+ fuse_dev_wake_and_unlock(fiq);
+ } else {
+ spin_unlock(&fiq->lock);
+ req->out.h.error = -ENOTCONN;
+ clear_bit(FR_PENDING, &req->flags);
+ fuse_request_end(req);
+ }
+}
+
const struct fuse_iqueue_ops fuse_dev_fiq_ops = {
- .wake_forget_and_unlock = fuse_dev_wake_and_unlock,
- .wake_interrupt_and_unlock = fuse_dev_wake_and_unlock,
- .wake_pending_and_unlock = fuse_dev_wake_and_unlock,
+ .send_forget = fuse_dev_queue_forget,
+ .send_interrupt = fuse_dev_queue_interrupt,
+ .send_req = fuse_dev_queue_req,
};
EXPORT_SYMBOL_GPL(fuse_dev_fiq_ops);
-static void queue_request_and_unlock(struct fuse_iqueue *fiq,
- struct fuse_req *req)
-__releases(fiq->lock)
+static void fuse_send_one(struct fuse_iqueue *fiq, struct fuse_req *req)
{
req->in.h.len = sizeof(struct fuse_in_header) +
fuse_len_args(req->args->in_numargs,
(struct fuse_arg *) req->args->in_args);
- list_add_tail(&req->list, &fiq->pending);
- fiq->ops->wake_pending_and_unlock(fiq);
+ trace_fuse_request_send(req);
+ fiq->ops->send_req(fiq, req);
}
void fuse_queue_forget(struct fuse_conn *fc, struct fuse_forget_link *forget,
@@ -243,15 +324,7 @@ void fuse_queue_forget(struct fuse_conn *fc, struct fuse_forget_link *forget,
forget->forget_one.nodeid = nodeid;
forget->forget_one.nlookup = nlookup;
- spin_lock(&fiq->lock);
- if (fiq->connected) {
- fiq->forget_list_tail->next = forget;
- fiq->forget_list_tail = forget;
- fiq->ops->wake_forget_and_unlock(fiq);
- } else {
- kfree(forget);
- spin_unlock(&fiq->lock);
- }
+ fiq->ops->send_forget(fiq, forget);
}
static void flush_bg_queue(struct fuse_conn *fc)
@@ -265,9 +338,7 @@ static void flush_bg_queue(struct fuse_conn *fc)
req = list_first_entry(&fc->bg_queue, struct fuse_req, list);
list_del(&req->list);
fc->active_background++;
- spin_lock(&fiq->lock);
- req->in.h.unique = fuse_get_unique(fiq);
- queue_request_and_unlock(fiq, req);
+ fuse_send_one(fiq, req);
}
}
@@ -288,6 +359,7 @@ void fuse_request_end(struct fuse_req *req)
if (test_and_set_bit(FR_FINISHED, &req->flags))
goto put_request;
+ trace_fuse_request_end(req);
/*
* test_and_set_bit() implies smp_mb() between bit
* changing and below FR_INTERRUPTED check. Pairs with
@@ -337,29 +409,12 @@ static int queue_interrupt(struct fuse_req *req)
{
struct fuse_iqueue *fiq = &req->fm->fc->iq;
- spin_lock(&fiq->lock);
/* Check for we've sent request to interrupt this req */
- if (unlikely(!test_bit(FR_INTERRUPTED, &req->flags))) {
- spin_unlock(&fiq->lock);
+ if (unlikely(!test_bit(FR_INTERRUPTED, &req->flags)))
return -EINVAL;
- }
- if (list_empty(&req->intr_entry)) {
- list_add_tail(&req->intr_entry, &fiq->interrupts);
- /*
- * Pairs with smp_mb() implied by test_and_set_bit()
- * from fuse_request_end().
- */
- smp_mb();
- if (test_bit(FR_FINISHED, &req->flags)) {
- list_del_init(&req->intr_entry);
- spin_unlock(&fiq->lock);
- return 0;
- }
- fiq->ops->wake_interrupt_and_unlock(fiq);
- } else {
- spin_unlock(&fiq->lock);
- }
+ fiq->ops->send_interrupt(fiq, req);
+
return 0;
}
@@ -414,21 +469,15 @@ static void __fuse_request_send(struct fuse_req *req)
struct fuse_iqueue *fiq = &req->fm->fc->iq;
BUG_ON(test_bit(FR_BACKGROUND, &req->flags));
- spin_lock(&fiq->lock);
- if (!fiq->connected) {
- spin_unlock(&fiq->lock);
- req->out.h.error = -ENOTCONN;
- } else {
- req->in.h.unique = fuse_get_unique(fiq);
- /* acquire extra reference, since request is still needed
- after fuse_request_end() */
- __fuse_get_request(req);
- queue_request_and_unlock(fiq, req);
- request_wait_answer(req);
- /* Pairs with smp_wmb() in fuse_request_end() */
- smp_rmb();
- }
+ /* acquire extra reference, since request is still needed after
+ fuse_request_end() */
+ __fuse_get_request(req);
+ fuse_send_one(fiq, req);
+
+ request_wait_answer(req);
+ /* Pairs with smp_wmb() in fuse_request_end() */
+ smp_rmb();
}
static void fuse_adjust_compat(struct fuse_conn *fc, struct fuse_args *args)
@@ -468,8 +517,14 @@ static void fuse_force_creds(struct fuse_req *req)
{
struct fuse_conn *fc = req->fm->fc;
- req->in.h.uid = from_kuid_munged(fc->user_ns, current_fsuid());
- req->in.h.gid = from_kgid_munged(fc->user_ns, current_fsgid());
+ if (!req->fm->sb || req->fm->sb->s_iflags & SB_I_NOIDMAP) {
+ req->in.h.uid = from_kuid_munged(fc->user_ns, current_fsuid());
+ req->in.h.gid = from_kgid_munged(fc->user_ns, current_fsgid());
+ } else {
+ req->in.h.uid = FUSE_INVALID_UIDGID;
+ req->in.h.gid = FUSE_INVALID_UIDGID;
+ }
+
req->in.h.pid = pid_nr_ns(task_pid(current), fc->pid_ns);
}
@@ -484,7 +539,9 @@ static void fuse_args_to_req(struct fuse_req *req, struct fuse_args *args)
__set_bit(FR_ASYNC, &req->flags);
}
-ssize_t fuse_simple_request(struct fuse_mount *fm, struct fuse_args *args)
+ssize_t __fuse_simple_request(struct mnt_idmap *idmap,
+ struct fuse_mount *fm,
+ struct fuse_args *args)
{
struct fuse_conn *fc = fm->fc;
struct fuse_req *req;
@@ -501,7 +558,7 @@ ssize_t fuse_simple_request(struct fuse_mount *fm, struct fuse_args *args)
__set_bit(FR_FORCE, &req->flags);
} else {
WARN_ON(args->nocreds);
- req = fuse_get_req(fm, false);
+ req = fuse_get_req(idmap, fm, false);
if (IS_ERR(req))
return PTR_ERR(req);
}
@@ -562,7 +619,7 @@ int fuse_simple_background(struct fuse_mount *fm, struct fuse_args *args,
__set_bit(FR_BACKGROUND, &req->flags);
} else {
WARN_ON(args->nocreds);
- req = fuse_get_req(fm, true);
+ req = fuse_get_req(&invalid_mnt_idmap, fm, true);
if (IS_ERR(req))
return PTR_ERR(req);
}
@@ -583,9 +640,8 @@ static int fuse_simple_notify_reply(struct fuse_mount *fm,
{
struct fuse_req *req;
struct fuse_iqueue *fiq = &fm->fc->iq;
- int err = 0;
- req = fuse_get_req(fm, false);
+ req = fuse_get_req(&invalid_mnt_idmap, fm, false);
if (IS_ERR(req))
return PTR_ERR(req);
@@ -594,16 +650,9 @@ static int fuse_simple_notify_reply(struct fuse_mount *fm,
fuse_args_to_req(req, args);
- spin_lock(&fiq->lock);
- if (fiq->connected) {
- queue_request_and_unlock(fiq, req);
- } else {
- err = -ENODEV;
- spin_unlock(&fiq->lock);
- fuse_put_request(req);
- }
+ fuse_send_one(fiq, req);
- return err;
+ return 0;
}
/*
@@ -1075,9 +1124,9 @@ __releases(fiq->lock)
return err ? err : reqsize;
}
-struct fuse_forget_link *fuse_dequeue_forget(struct fuse_iqueue *fiq,
- unsigned int max,
- unsigned int *countp)
+static struct fuse_forget_link *fuse_dequeue_forget(struct fuse_iqueue *fiq,
+ unsigned int max,
+ unsigned int *countp)
{
struct fuse_forget_link *head = fiq->forget_list_head.next;
struct fuse_forget_link **newhead = &head;
@@ -1096,7 +1145,6 @@ struct fuse_forget_link *fuse_dequeue_forget(struct fuse_iqueue *fiq,
return head;
}
-EXPORT_SYMBOL(fuse_dequeue_forget);
static int fuse_read_single_forget(struct fuse_iqueue *fiq,
struct fuse_copy_state *cs,
@@ -1111,7 +1159,7 @@ __releases(fiq->lock)
struct fuse_in_header ih = {
.opcode = FUSE_FORGET,
.nodeid = forget->forget_one.nodeid,
- .unique = fuse_get_unique(fiq),
+ .unique = fuse_get_unique_locked(fiq),
.len = sizeof(ih) + sizeof(arg),
};
@@ -1142,7 +1190,7 @@ __releases(fiq->lock)
struct fuse_batch_forget_in arg = { .count = 0 };
struct fuse_in_header ih = {
.opcode = FUSE_BATCH_FORGET,
- .unique = fuse_get_unique(fiq),
+ .unique = fuse_get_unique_locked(fiq),
.len = sizeof(ih) + sizeof(arg),
};
@@ -1830,7 +1878,7 @@ static void fuse_resend(struct fuse_conn *fc)
}
/* iq and pq requests are both oldest to newest */
list_splice(&to_queue, &fiq->pending);
- fiq->ops->wake_pending_and_unlock(fiq);
+ fuse_dev_wake_and_unlock(fiq);
}
static int fuse_notify_resend(struct fuse_conn *fc)
@@ -2329,15 +2377,15 @@ static long fuse_dev_ioctl_clone(struct file *file, __u32 __user *argp)
return -EFAULT;
f = fdget(oldfd);
- if (!f.file)
+ if (!fd_file(f))
return -EINVAL;
/*
* Check against file->f_op because CUSE
* uses the same ioctl handler.
*/
- if (f.file->f_op == file->f_op)
- fud = fuse_get_dev(f.file);
+ if (fd_file(f)->f_op == file->f_op)
+ fud = fuse_get_dev(fd_file(f));
res = -EINVAL;
if (fud) {
@@ -2408,7 +2456,6 @@ static long fuse_dev_ioctl(struct file *file, unsigned int cmd,
const struct file_operations fuse_dev_operations = {
.owner = THIS_MODULE,
.open = fuse_dev_open,
- .llseek = no_llseek,
.read_iter = fuse_dev_read,
.splice_read = fuse_dev_splice_read,
.write_iter = fuse_dev_write,
diff --git a/fs/fuse/dir.c b/fs/fuse/dir.c
index 8e96df9fd76c..54104dd48af7 100644
--- a/fs/fuse/dir.c
+++ b/fs/fuse/dir.c
@@ -545,17 +545,21 @@ static u32 fuse_ext_size(size_t size)
/*
* This adds just a single supplementary group that matches the parent's group.
*/
-static int get_create_supp_group(struct inode *dir, struct fuse_in_arg *ext)
+static int get_create_supp_group(struct mnt_idmap *idmap,
+ struct inode *dir,
+ struct fuse_in_arg *ext)
{
struct fuse_conn *fc = get_fuse_conn(dir);
struct fuse_ext_header *xh;
struct fuse_supp_groups *sg;
kgid_t kgid = dir->i_gid;
+ vfsgid_t vfsgid = make_vfsgid(idmap, fc->user_ns, kgid);
gid_t parent_gid = from_kgid(fc->user_ns, kgid);
+
u32 sg_len = fuse_ext_size(sizeof(*sg) + sizeof(sg->groups[0]));
- if (parent_gid == (gid_t) -1 || gid_eq(kgid, current_fsgid()) ||
- !in_group_p(kgid))
+ if (parent_gid == (gid_t) -1 || vfsgid_eq_kgid(vfsgid, current_fsgid()) ||
+ !vfsgid_in_group_p(vfsgid))
return 0;
xh = extend_arg(ext, sg_len);
@@ -572,7 +576,8 @@ static int get_create_supp_group(struct inode *dir, struct fuse_in_arg *ext)
return 0;
}
-static int get_create_ext(struct fuse_args *args,
+static int get_create_ext(struct mnt_idmap *idmap,
+ struct fuse_args *args,
struct inode *dir, struct dentry *dentry,
umode_t mode)
{
@@ -583,7 +588,7 @@ static int get_create_ext(struct fuse_args *args,
if (fc->init_security)
err = get_security_context(dentry, mode, &ext);
if (!err && fc->create_supp_group)
- err = get_create_supp_group(dir, &ext);
+ err = get_create_supp_group(idmap, dir, &ext);
if (!err && ext.size) {
WARN_ON(args->in_numargs >= ARRAY_SIZE(args->in_args));
@@ -609,9 +614,9 @@ static void free_ext_value(struct fuse_args *args)
* If the filesystem doesn't support this, then fall back to separate
* 'mknod' + 'open' requests.
*/
-static int fuse_create_open(struct inode *dir, struct dentry *entry,
- struct file *file, unsigned int flags,
- umode_t mode, u32 opcode)
+static int fuse_create_open(struct mnt_idmap *idmap, struct inode *dir,
+ struct dentry *entry, struct file *file,
+ unsigned int flags, umode_t mode, u32 opcode)
{
int err;
struct inode *inode;
@@ -668,11 +673,11 @@ static int fuse_create_open(struct inode *dir, struct dentry *entry,
args.out_args[1].size = sizeof(*outopenp);
args.out_args[1].value = outopenp;
- err = get_create_ext(&args, dir, entry, mode);
+ err = get_create_ext(idmap, &args, dir, entry, mode);
if (err)
goto out_free_ff;
- err = fuse_simple_request(fm, &args);
+ err = fuse_simple_idmap_request(idmap, fm, &args);
free_ext_value(&args);
if (err)
goto out_free_ff;
@@ -729,6 +734,7 @@ static int fuse_atomic_open(struct inode *dir, struct dentry *entry,
umode_t mode)
{
int err;
+ struct mnt_idmap *idmap = file_mnt_idmap(file);
struct fuse_conn *fc = get_fuse_conn(dir);
struct dentry *res = NULL;
@@ -753,7 +759,7 @@ static int fuse_atomic_open(struct inode *dir, struct dentry *entry,
if (fc->no_create)
goto mknod;
- err = fuse_create_open(dir, entry, file, flags, mode, FUSE_CREATE);
+ err = fuse_create_open(idmap, dir, entry, file, flags, mode, FUSE_CREATE);
if (err == -ENOSYS) {
fc->no_create = 1;
goto mknod;
@@ -764,7 +770,7 @@ out_dput:
return err;
mknod:
- err = fuse_mknod(&nop_mnt_idmap, dir, entry, mode, 0);
+ err = fuse_mknod(idmap, dir, entry, mode, 0);
if (err)
goto out_dput;
no_open:
@@ -774,9 +780,9 @@ no_open:
/*
* Code shared between mknod, mkdir, symlink and link
*/
-static int create_new_entry(struct fuse_mount *fm, struct fuse_args *args,
- struct inode *dir, struct dentry *entry,
- umode_t mode)
+static int create_new_entry(struct mnt_idmap *idmap, struct fuse_mount *fm,
+ struct fuse_args *args, struct inode *dir,
+ struct dentry *entry, umode_t mode)
{
struct fuse_entry_out outarg;
struct inode *inode;
@@ -798,12 +804,12 @@ static int create_new_entry(struct fuse_mount *fm, struct fuse_args *args,
args->out_args[0].value = &outarg;
if (args->opcode != FUSE_LINK) {
- err = get_create_ext(args, dir, entry, mode);
+ err = get_create_ext(idmap, args, dir, entry, mode);
if (err)
goto out_put_forget_req;
}
- err = fuse_simple_request(fm, args);
+ err = fuse_simple_idmap_request(idmap, fm, args);
free_ext_value(args);
if (err)
goto out_put_forget_req;
@@ -864,13 +870,13 @@ static int fuse_mknod(struct mnt_idmap *idmap, struct inode *dir,
args.in_args[0].value = &inarg;
args.in_args[1].size = entry->d_name.len + 1;
args.in_args[1].value = entry->d_name.name;
- return create_new_entry(fm, &args, dir, entry, mode);
+ return create_new_entry(idmap, fm, &args, dir, entry, mode);
}
static int fuse_create(struct mnt_idmap *idmap, struct inode *dir,
struct dentry *entry, umode_t mode, bool excl)
{
- return fuse_mknod(&nop_mnt_idmap, dir, entry, mode, 0);
+ return fuse_mknod(idmap, dir, entry, mode, 0);
}
static int fuse_tmpfile(struct mnt_idmap *idmap, struct inode *dir,
@@ -882,7 +888,8 @@ static int fuse_tmpfile(struct mnt_idmap *idmap, struct inode *dir,
if (fc->no_tmpfile)
return -EOPNOTSUPP;
- err = fuse_create_open(dir, file->f_path.dentry, file, file->f_flags, mode, FUSE_TMPFILE);
+ err = fuse_create_open(idmap, dir, file->f_path.dentry, file,
+ file->f_flags, mode, FUSE_TMPFILE);
if (err == -ENOSYS) {
fc->no_tmpfile = 1;
err = -EOPNOTSUPP;
@@ -909,7 +916,7 @@ static int fuse_mkdir(struct mnt_idmap *idmap, struct inode *dir,
args.in_args[0].value = &inarg;
args.in_args[1].size = entry->d_name.len + 1;
args.in_args[1].value = entry->d_name.name;
- return create_new_entry(fm, &args, dir, entry, S_IFDIR);
+ return create_new_entry(idmap, fm, &args, dir, entry, S_IFDIR);
}
static int fuse_symlink(struct mnt_idmap *idmap, struct inode *dir,
@@ -925,7 +932,7 @@ static int fuse_symlink(struct mnt_idmap *idmap, struct inode *dir,
args.in_args[0].value = entry->d_name.name;
args.in_args[1].size = len;
args.in_args[1].value = link;
- return create_new_entry(fm, &args, dir, entry, S_IFLNK);
+ return create_new_entry(idmap, fm, &args, dir, entry, S_IFLNK);
}
void fuse_flush_time_update(struct inode *inode)
@@ -1019,7 +1026,7 @@ static int fuse_rmdir(struct inode *dir, struct dentry *entry)
return err;
}
-static int fuse_rename_common(struct inode *olddir, struct dentry *oldent,
+static int fuse_rename_common(struct mnt_idmap *idmap, struct inode *olddir, struct dentry *oldent,
struct inode *newdir, struct dentry *newent,
unsigned int flags, int opcode, size_t argsize)
{
@@ -1040,7 +1047,7 @@ static int fuse_rename_common(struct inode *olddir, struct dentry *oldent,
args.in_args[1].value = oldent->d_name.name;
args.in_args[2].size = newent->d_name.len + 1;
args.in_args[2].value = newent->d_name.name;
- err = fuse_simple_request(fm, &args);
+ err = fuse_simple_idmap_request(idmap, fm, &args);
if (!err) {
/* ctime changes */
fuse_update_ctime(d_inode(oldent));
@@ -1086,7 +1093,8 @@ static int fuse_rename2(struct mnt_idmap *idmap, struct inode *olddir,
if (fc->no_rename2 || fc->minor < 23)
return -EINVAL;
- err = fuse_rename_common(olddir, oldent, newdir, newent, flags,
+ err = fuse_rename_common((flags & RENAME_WHITEOUT) ? idmap : &invalid_mnt_idmap,
+ olddir, oldent, newdir, newent, flags,
FUSE_RENAME2,
sizeof(struct fuse_rename2_in));
if (err == -ENOSYS) {
@@ -1094,7 +1102,7 @@ static int fuse_rename2(struct mnt_idmap *idmap, struct inode *olddir,
err = -EINVAL;
}
} else {
- err = fuse_rename_common(olddir, oldent, newdir, newent, 0,
+ err = fuse_rename_common(&invalid_mnt_idmap, olddir, oldent, newdir, newent, 0,
FUSE_RENAME,
sizeof(struct fuse_rename_in));
}
@@ -1119,7 +1127,7 @@ static int fuse_link(struct dentry *entry, struct inode *newdir,
args.in_args[0].value = &inarg;
args.in_args[1].size = newent->d_name.len + 1;
args.in_args[1].value = newent->d_name.name;
- err = create_new_entry(fm, &args, newdir, newent, inode->i_mode);
+ err = create_new_entry(&invalid_mnt_idmap, fm, &args, newdir, newent, inode->i_mode);
if (!err)
fuse_update_ctime_in_cache(inode);
else if (err == -EINTR)
@@ -1128,18 +1136,22 @@ static int fuse_link(struct dentry *entry, struct inode *newdir,
return err;
}
-static void fuse_fillattr(struct inode *inode, struct fuse_attr *attr,
- struct kstat *stat)
+static void fuse_fillattr(struct mnt_idmap *idmap, struct inode *inode,
+ struct fuse_attr *attr, struct kstat *stat)
{
unsigned int blkbits;
struct fuse_conn *fc = get_fuse_conn(inode);
+ vfsuid_t vfsuid = make_vfsuid(idmap, fc->user_ns,
+ make_kuid(fc->user_ns, attr->uid));
+ vfsgid_t vfsgid = make_vfsgid(idmap, fc->user_ns,
+ make_kgid(fc->user_ns, attr->gid));
stat->dev = inode->i_sb->s_dev;
stat->ino = attr->ino;
stat->mode = (inode->i_mode & S_IFMT) | (attr->mode & 07777);
stat->nlink = attr->nlink;
- stat->uid = make_kuid(fc->user_ns, attr->uid);
- stat->gid = make_kgid(fc->user_ns, attr->gid);
+ stat->uid = vfsuid_into_kuid(vfsuid);
+ stat->gid = vfsgid_into_kgid(vfsgid);
stat->rdev = inode->i_rdev;
stat->atime.tv_sec = attr->atime;
stat->atime.tv_nsec = attr->atimensec;
@@ -1178,8 +1190,8 @@ static void fuse_statx_to_attr(struct fuse_statx *sx, struct fuse_attr *attr)
attr->blksize = sx->blksize;
}
-static int fuse_do_statx(struct inode *inode, struct file *file,
- struct kstat *stat)
+static int fuse_do_statx(struct mnt_idmap *idmap, struct inode *inode,
+ struct file *file, struct kstat *stat)
{
int err;
struct fuse_attr attr;
@@ -1232,15 +1244,15 @@ static int fuse_do_statx(struct inode *inode, struct file *file,
stat->result_mask = sx->mask & (STATX_BASIC_STATS | STATX_BTIME);
stat->btime.tv_sec = sx->btime.tv_sec;
stat->btime.tv_nsec = min_t(u32, sx->btime.tv_nsec, NSEC_PER_SEC - 1);
- fuse_fillattr(inode, &attr, stat);
+ fuse_fillattr(idmap, inode, &attr, stat);
stat->result_mask |= STATX_TYPE;
}
return 0;
}
-static int fuse_do_getattr(struct inode *inode, struct kstat *stat,
- struct file *file)
+static int fuse_do_getattr(struct mnt_idmap *idmap, struct inode *inode,
+ struct kstat *stat, struct file *file)
{
int err;
struct fuse_getattr_in inarg;
@@ -1279,15 +1291,15 @@ static int fuse_do_getattr(struct inode *inode, struct kstat *stat,
ATTR_TIMEOUT(&outarg),
attr_version);
if (stat)
- fuse_fillattr(inode, &outarg.attr, stat);
+ fuse_fillattr(idmap, inode, &outarg.attr, stat);
}
}
return err;
}
-static int fuse_update_get_attr(struct inode *inode, struct file *file,
- struct kstat *stat, u32 request_mask,
- unsigned int flags)
+static int fuse_update_get_attr(struct mnt_idmap *idmap, struct inode *inode,
+ struct file *file, struct kstat *stat,
+ u32 request_mask, unsigned int flags)
{
struct fuse_inode *fi = get_fuse_inode(inode);
struct fuse_conn *fc = get_fuse_conn(inode);
@@ -1318,17 +1330,17 @@ retry:
forget_all_cached_acls(inode);
/* Try statx if BTIME is requested */
if (!fc->no_statx && (request_mask & ~STATX_BASIC_STATS)) {
- err = fuse_do_statx(inode, file, stat);
+ err = fuse_do_statx(idmap, inode, file, stat);
if (err == -ENOSYS) {
fc->no_statx = 1;
err = 0;
goto retry;
}
} else {
- err = fuse_do_getattr(inode, stat, file);
+ err = fuse_do_getattr(idmap, inode, stat, file);
}
} else if (stat) {
- generic_fillattr(&nop_mnt_idmap, request_mask, inode, stat);
+ generic_fillattr(idmap, request_mask, inode, stat);
stat->mode = fi->orig_i_mode;
stat->ino = fi->orig_ino;
if (test_bit(FUSE_I_BTIME, &fi->state)) {
@@ -1342,7 +1354,7 @@ retry:
int fuse_update_attributes(struct inode *inode, struct file *file, u32 mask)
{
- return fuse_update_get_attr(inode, file, NULL, mask, 0);
+ return fuse_update_get_attr(&nop_mnt_idmap, inode, file, NULL, mask, 0);
}
int fuse_reverse_inval_entry(struct fuse_conn *fc, u64 parent_nodeid,
@@ -1462,6 +1474,14 @@ static int fuse_access(struct inode *inode, int mask)
BUG_ON(mask & MAY_NOT_BLOCK);
+ /*
+ * We should not send FUSE_ACCESS to the userspace
+ * when idmapped mounts are enabled as for this case
+ * we have fc->default_permissions = 1 and access
+ * permission checks are done on the kernel side.
+ */
+ WARN_ON_ONCE(!(fm->sb->s_iflags & SB_I_NOIDMAP));
+
if (fm->fc->no_access)
return 0;
@@ -1486,7 +1506,7 @@ static int fuse_perm_getattr(struct inode *inode, int mask)
return -ECHILD;
forget_all_cached_acls(inode);
- return fuse_do_getattr(inode, NULL, NULL);
+ return fuse_do_getattr(&nop_mnt_idmap, inode, NULL, NULL);
}
/*
@@ -1534,7 +1554,7 @@ static int fuse_permission(struct mnt_idmap *idmap,
}
if (fc->default_permissions) {
- err = generic_permission(&nop_mnt_idmap, inode, mask);
+ err = generic_permission(idmap, inode, mask);
/* If permission is denied, try to refresh file
attributes. This is also needed, because the root
@@ -1542,7 +1562,7 @@ static int fuse_permission(struct mnt_idmap *idmap,
if (err == -EACCES && !refreshed) {
err = fuse_perm_getattr(inode, mask);
if (!err)
- err = generic_permission(&nop_mnt_idmap,
+ err = generic_permission(idmap,
inode, mask);
}
@@ -1738,17 +1758,29 @@ static bool update_mtime(unsigned ivalid, bool trust_local_mtime)
return true;
}
-static void iattr_to_fattr(struct fuse_conn *fc, struct iattr *iattr,
- struct fuse_setattr_in *arg, bool trust_local_cmtime)
+static void iattr_to_fattr(struct mnt_idmap *idmap, struct fuse_conn *fc,
+ struct iattr *iattr, struct fuse_setattr_in *arg,
+ bool trust_local_cmtime)
{
unsigned ivalid = iattr->ia_valid;
if (ivalid & ATTR_MODE)
arg->valid |= FATTR_MODE, arg->mode = iattr->ia_mode;
- if (ivalid & ATTR_UID)
- arg->valid |= FATTR_UID, arg->uid = from_kuid(fc->user_ns, iattr->ia_uid);
- if (ivalid & ATTR_GID)
- arg->valid |= FATTR_GID, arg->gid = from_kgid(fc->user_ns, iattr->ia_gid);
+
+ if (ivalid & ATTR_UID) {
+ kuid_t fsuid = from_vfsuid(idmap, fc->user_ns, iattr->ia_vfsuid);
+
+ arg->valid |= FATTR_UID;
+ arg->uid = from_kuid(fc->user_ns, fsuid);
+ }
+
+ if (ivalid & ATTR_GID) {
+ kgid_t fsgid = from_vfsgid(idmap, fc->user_ns, iattr->ia_vfsgid);
+
+ arg->valid |= FATTR_GID;
+ arg->gid = from_kgid(fc->user_ns, fsgid);
+ }
+
if (ivalid & ATTR_SIZE)
arg->valid |= FATTR_SIZE, arg->size = iattr->ia_size;
if (ivalid & ATTR_ATIME) {
@@ -1868,8 +1900,8 @@ int fuse_flush_times(struct inode *inode, struct fuse_file *ff)
* vmtruncate() doesn't allow for this case, so do the rlimit checking
* and the actual truncation by hand.
*/
-int fuse_do_setattr(struct dentry *dentry, struct iattr *attr,
- struct file *file)
+int fuse_do_setattr(struct mnt_idmap *idmap, struct dentry *dentry,
+ struct iattr *attr, struct file *file)
{
struct inode *inode = d_inode(dentry);
struct fuse_mount *fm = get_fuse_mount(inode);
@@ -1889,7 +1921,7 @@ int fuse_do_setattr(struct dentry *dentry, struct iattr *attr,
if (!fc->default_permissions)
attr->ia_valid |= ATTR_FORCE;
- err = setattr_prepare(&nop_mnt_idmap, dentry, attr);
+ err = setattr_prepare(idmap, dentry, attr);
if (err)
return err;
@@ -1948,7 +1980,7 @@ int fuse_do_setattr(struct dentry *dentry, struct iattr *attr,
memset(&inarg, 0, sizeof(inarg));
memset(&outarg, 0, sizeof(outarg));
- iattr_to_fattr(fc, attr, &inarg, trust_local_cmtime);
+ iattr_to_fattr(idmap, fc, attr, &inarg, trust_local_cmtime);
if (file) {
struct fuse_file *ff = file->private_data;
inarg.valid |= FATTR_FH;
@@ -2065,7 +2097,7 @@ static int fuse_setattr(struct mnt_idmap *idmap, struct dentry *entry,
* ia_mode calculation may have used stale i_mode.
* Refresh and recalculate.
*/
- ret = fuse_do_getattr(inode, NULL, file);
+ ret = fuse_do_getattr(idmap, inode, NULL, file);
if (ret)
return ret;
@@ -2083,7 +2115,7 @@ static int fuse_setattr(struct mnt_idmap *idmap, struct dentry *entry,
if (!attr->ia_valid)
return 0;
- ret = fuse_do_setattr(entry, attr, file);
+ ret = fuse_do_setattr(idmap, entry, attr, file);
if (!ret) {
/*
* If filesystem supports acls it may have updated acl xattrs in
@@ -2122,7 +2154,7 @@ static int fuse_getattr(struct mnt_idmap *idmap,
return -EACCES;
}
- return fuse_update_get_attr(inode, NULL, stat, request_mask, flags);
+ return fuse_update_get_attr(idmap, inode, NULL, stat, request_mask, flags);
}
static const struct inode_operations fuse_dir_inode_operations = {
diff --git a/fs/fuse/file.c b/fs/fuse/file.c
index ba6df52a823e..f33fbce86ae0 100644
--- a/fs/fuse/file.c
+++ b/fs/fuse/file.c
@@ -448,9 +448,6 @@ static struct fuse_writepage_args *fuse_find_writeback(struct fuse_inode *fi,
/*
* Check if any page in a range is under writeback
- *
- * This is currently done by walking the list of writepage requests
- * for the inode, which can be pretty inefficient.
*/
static bool fuse_range_is_writeback(struct inode *inode, pgoff_t idx_from,
pgoff_t idx_to)
@@ -458,6 +455,9 @@ static bool fuse_range_is_writeback(struct inode *inode, pgoff_t idx_from,
struct fuse_inode *fi = get_fuse_inode(inode);
bool found;
+ if (RB_EMPTY_ROOT(&fi->writepages))
+ return false;
+
spin_lock(&fi->lock);
found = fuse_find_writeback(fi, idx_from, idx_to);
spin_unlock(&fi->lock);
@@ -1345,7 +1345,7 @@ static bool fuse_dio_wr_exclusive_lock(struct kiocb *iocb, struct iov_iter *from
/* shared locks are not allowed with parallel page cache IO */
if (test_bit(FUSE_I_CACHE_IO_MODE, &fi->state))
- return false;
+ return true;
/* Parallel dio beyond EOF is not supported, at least for now. */
if (fuse_io_past_eof(iocb, from))
@@ -1398,6 +1398,7 @@ static void fuse_dio_unlock(struct kiocb *iocb, bool exclusive)
static ssize_t fuse_cache_write_iter(struct kiocb *iocb, struct iov_iter *from)
{
struct file *file = iocb->ki_filp;
+ struct mnt_idmap *idmap = file_mnt_idmap(file);
struct address_space *mapping = file->f_mapping;
ssize_t written = 0;
struct inode *inode = mapping->host;
@@ -1412,7 +1413,7 @@ static ssize_t fuse_cache_write_iter(struct kiocb *iocb, struct iov_iter *from)
return err;
if (fc->handle_killpriv_v2 &&
- setattr_should_drop_suidgid(&nop_mnt_idmap,
+ setattr_should_drop_suidgid(idmap,
file_inode(file))) {
goto writethrough;
}
@@ -1762,27 +1763,31 @@ static void fuse_writepage_free(struct fuse_writepage_args *wpa)
for (i = 0; i < ap->num_pages; i++)
__free_page(ap->pages[i]);
- if (wpa->ia.ff)
- fuse_file_put(wpa->ia.ff, false);
+ fuse_file_put(wpa->ia.ff, false);
kfree(ap->pages);
kfree(wpa);
}
-static void fuse_writepage_finish(struct fuse_mount *fm,
- struct fuse_writepage_args *wpa)
+static void fuse_writepage_finish_stat(struct inode *inode, struct page *page)
+{
+ struct backing_dev_info *bdi = inode_to_bdi(inode);
+
+ dec_wb_stat(&bdi->wb, WB_WRITEBACK);
+ dec_node_page_state(page, NR_WRITEBACK_TEMP);
+ wb_writeout_inc(&bdi->wb);
+}
+
+static void fuse_writepage_finish(struct fuse_writepage_args *wpa)
{
struct fuse_args_pages *ap = &wpa->ia.ap;
struct inode *inode = wpa->inode;
struct fuse_inode *fi = get_fuse_inode(inode);
- struct backing_dev_info *bdi = inode_to_bdi(inode);
int i;
- for (i = 0; i < ap->num_pages; i++) {
- dec_wb_stat(&bdi->wb, WB_WRITEBACK);
- dec_node_page_state(ap->pages[i], NR_WRITEBACK_TEMP);
- wb_writeout_inc(&bdi->wb);
- }
+ for (i = 0; i < ap->num_pages; i++)
+ fuse_writepage_finish_stat(inode, ap->pages[i]);
+
wake_up(&fi->page_waitq);
}
@@ -1829,19 +1834,14 @@ __acquires(fi->lock)
out_free:
fi->writectr--;
rb_erase(&wpa->writepages_entry, &fi->writepages);
- fuse_writepage_finish(fm, wpa);
+ fuse_writepage_finish(wpa);
spin_unlock(&fi->lock);
/* After rb_erase() aux request list is private */
for (aux = wpa->next; aux; aux = next) {
- struct backing_dev_info *bdi = inode_to_bdi(aux->inode);
-
next = aux->next;
aux->next = NULL;
-
- dec_wb_stat(&bdi->wb, WB_WRITEBACK);
- dec_node_page_state(aux->ia.ap.pages[0], NR_WRITEBACK_TEMP);
- wb_writeout_inc(&bdi->wb);
+ fuse_writepage_finish_stat(aux->inode, aux->ia.ap.pages[0]);
fuse_writepage_free(aux);
}
@@ -1936,7 +1936,6 @@ static void fuse_writepage_end(struct fuse_mount *fm, struct fuse_args *args,
wpa->next = next->next;
next->next = NULL;
- next->ia.ff = fuse_file_get(wpa->ia.ff);
tree_insert(&fi->writepages, next);
/*
@@ -1965,7 +1964,7 @@ static void fuse_writepage_end(struct fuse_mount *fm, struct fuse_args *args,
fuse_send_writepage(fm, next, inarg->offset + inarg->size);
}
fi->writectr--;
- fuse_writepage_finish(fm, wpa);
+ fuse_writepage_finish(wpa);
spin_unlock(&fi->lock);
fuse_writepage_free(wpa);
}
@@ -2049,49 +2048,77 @@ static void fuse_writepage_add_to_bucket(struct fuse_conn *fc,
rcu_read_unlock();
}
+static void fuse_writepage_args_page_fill(struct fuse_writepage_args *wpa, struct folio *folio,
+ struct folio *tmp_folio, uint32_t page_index)
+{
+ struct inode *inode = folio->mapping->host;
+ struct fuse_args_pages *ap = &wpa->ia.ap;
+
+ folio_copy(tmp_folio, folio);
+
+ ap->pages[page_index] = &tmp_folio->page;
+ ap->descs[page_index].offset = 0;
+ ap->descs[page_index].length = PAGE_SIZE;
+
+ inc_wb_stat(&inode_to_bdi(inode)->wb, WB_WRITEBACK);
+ inc_node_page_state(&tmp_folio->page, NR_WRITEBACK_TEMP);
+}
+
+static struct fuse_writepage_args *fuse_writepage_args_setup(struct folio *folio,
+ struct fuse_file *ff)
+{
+ struct inode *inode = folio->mapping->host;
+ struct fuse_conn *fc = get_fuse_conn(inode);
+ struct fuse_writepage_args *wpa;
+ struct fuse_args_pages *ap;
+
+ wpa = fuse_writepage_args_alloc();
+ if (!wpa)
+ return NULL;
+
+ fuse_writepage_add_to_bucket(fc, wpa);
+ fuse_write_args_fill(&wpa->ia, ff, folio_pos(folio), 0);
+ wpa->ia.write.in.write_flags |= FUSE_WRITE_CACHE;
+ wpa->inode = inode;
+ wpa->ia.ff = ff;
+
+ ap = &wpa->ia.ap;
+ ap->args.in_pages = true;
+ ap->args.end = fuse_writepage_end;
+
+ return wpa;
+}
+
static int fuse_writepage_locked(struct folio *folio)
{
struct address_space *mapping = folio->mapping;
struct inode *inode = mapping->host;
- struct fuse_conn *fc = get_fuse_conn(inode);
struct fuse_inode *fi = get_fuse_inode(inode);
struct fuse_writepage_args *wpa;
struct fuse_args_pages *ap;
struct folio *tmp_folio;
+ struct fuse_file *ff;
int error = -ENOMEM;
- folio_start_writeback(folio);
-
- wpa = fuse_writepage_args_alloc();
- if (!wpa)
- goto err;
- ap = &wpa->ia.ap;
-
tmp_folio = folio_alloc(GFP_NOFS | __GFP_HIGHMEM, 0);
if (!tmp_folio)
- goto err_free;
+ goto err;
error = -EIO;
- wpa->ia.ff = fuse_write_file_get(fi);
- if (!wpa->ia.ff)
+ ff = fuse_write_file_get(fi);
+ if (!ff)
goto err_nofile;
- fuse_writepage_add_to_bucket(fc, wpa);
- fuse_write_args_fill(&wpa->ia, wpa->ia.ff, folio_pos(folio), 0);
+ wpa = fuse_writepage_args_setup(folio, ff);
+ error = -ENOMEM;
+ if (!wpa)
+ goto err_writepage_args;
- folio_copy(tmp_folio, folio);
- wpa->ia.write.in.write_flags |= FUSE_WRITE_CACHE;
- wpa->next = NULL;
- ap->args.in_pages = true;
+ ap = &wpa->ia.ap;
ap->num_pages = 1;
- ap->pages[0] = &tmp_folio->page;
- ap->descs[0].offset = 0;
- ap->descs[0].length = PAGE_SIZE;
- ap->args.end = fuse_writepage_end;
- wpa->inode = inode;
- inc_wb_stat(&inode_to_bdi(inode)->wb, WB_WRITEBACK);
- node_stat_add_folio(tmp_folio, NR_WRITEBACK_TEMP);
+ folio_start_writeback(folio);
+ fuse_writepage_args_page_fill(wpa, folio, tmp_folio, 0);
spin_lock(&fi->lock);
tree_insert(&fi->writepages, wpa);
@@ -2103,13 +2130,12 @@ static int fuse_writepage_locked(struct folio *folio)
return 0;
+err_writepage_args:
+ fuse_file_put(ff, false);
err_nofile:
folio_put(tmp_folio);
-err_free:
- kfree(wpa);
err:
mapping_set_error(folio->mapping, error);
- folio_end_writeback(folio);
return error;
}
@@ -2155,7 +2181,6 @@ static void fuse_writepages_send(struct fuse_fill_wb_data *data)
int num_pages = wpa->ia.ap.num_pages;
int i;
- wpa->ia.ff = fuse_file_get(data->ff);
spin_lock(&fi->lock);
list_add_tail(&wpa->queue_entry, &fi->queued_writes);
fuse_flush_writepages(inode);
@@ -2210,11 +2235,7 @@ static bool fuse_writepage_add(struct fuse_writepage_args *new_wpa,
spin_unlock(&fi->lock);
if (tmp) {
- struct backing_dev_info *bdi = inode_to_bdi(new_wpa->inode);
-
- dec_wb_stat(&bdi->wb, WB_WRITEBACK);
- dec_node_page_state(new_ap->pages[0], NR_WRITEBACK_TEMP);
- wb_writeout_inc(&bdi->wb);
+ fuse_writepage_finish_stat(new_wpa->inode, new_ap->pages[0]);
fuse_writepage_free(new_wpa);
}
@@ -2264,24 +2285,17 @@ static int fuse_writepages_fill(struct folio *folio,
struct inode *inode = data->inode;
struct fuse_inode *fi = get_fuse_inode(inode);
struct fuse_conn *fc = get_fuse_conn(inode);
- struct page *tmp_page;
+ struct folio *tmp_folio;
int err;
- if (!data->ff) {
- err = -EIO;
- data->ff = fuse_write_file_get(fi);
- if (!data->ff)
- goto out_unlock;
- }
-
if (wpa && fuse_writepage_need_send(fc, &folio->page, ap, data)) {
fuse_writepages_send(data);
data->wpa = NULL;
}
err = -ENOMEM;
- tmp_page = alloc_page(GFP_NOFS | __GFP_HIGHMEM);
- if (!tmp_page)
+ tmp_folio = folio_alloc(GFP_NOFS | __GFP_HIGHMEM, 0);
+ if (!tmp_folio)
goto out_unlock;
/*
@@ -2299,35 +2313,20 @@ static int fuse_writepages_fill(struct folio *folio,
*/
if (data->wpa == NULL) {
err = -ENOMEM;
- wpa = fuse_writepage_args_alloc();
+ wpa = fuse_writepage_args_setup(folio, data->ff);
if (!wpa) {
- __free_page(tmp_page);
+ folio_put(tmp_folio);
goto out_unlock;
}
- fuse_writepage_add_to_bucket(fc, wpa);
-
+ fuse_file_get(wpa->ia.ff);
data->max_pages = 1;
-
ap = &wpa->ia.ap;
- fuse_write_args_fill(&wpa->ia, data->ff, folio_pos(folio), 0);
- wpa->ia.write.in.write_flags |= FUSE_WRITE_CACHE;
- wpa->next = NULL;
- ap->args.in_pages = true;
- ap->args.end = fuse_writepage_end;
- ap->num_pages = 0;
- wpa->inode = inode;
}
folio_start_writeback(folio);
- copy_highpage(tmp_page, &folio->page);
- ap->pages[ap->num_pages] = tmp_page;
- ap->descs[ap->num_pages].offset = 0;
- ap->descs[ap->num_pages].length = PAGE_SIZE;
+ fuse_writepage_args_page_fill(wpa, folio, tmp_folio, ap->num_pages);
data->orig_pages[ap->num_pages] = &folio->page;
- inc_wb_stat(&inode_to_bdi(inode)->wb, WB_WRITEBACK);
- inc_node_page_state(tmp_page, NR_WRITEBACK_TEMP);
-
err = 0;
if (data->wpa) {
/*
@@ -2352,13 +2351,13 @@ static int fuse_writepages(struct address_space *mapping,
struct writeback_control *wbc)
{
struct inode *inode = mapping->host;
+ struct fuse_inode *fi = get_fuse_inode(inode);
struct fuse_conn *fc = get_fuse_conn(inode);
struct fuse_fill_wb_data data;
int err;
- err = -EIO;
if (fuse_is_bad(inode))
- goto out;
+ return -EIO;
if (wbc->sync_mode == WB_SYNC_NONE &&
fc->num_background >= fc->congestion_threshold)
@@ -2366,7 +2365,9 @@ static int fuse_writepages(struct address_space *mapping,
data.inode = inode;
data.wpa = NULL;
- data.ff = NULL;
+ data.ff = fuse_write_file_get(fi);
+ if (!data.ff)
+ return -EIO;
err = -ENOMEM;
data.orig_pages = kcalloc(fc->max_pages,
@@ -2380,11 +2381,10 @@ static int fuse_writepages(struct address_space *mapping,
WARN_ON(!data.wpa->ia.ap.num_pages);
fuse_writepages_send(&data);
}
- if (data.ff)
- fuse_file_put(data.ff, false);
kfree(data.orig_pages);
out:
+ fuse_file_put(data.ff, false);
return err;
}
@@ -2973,7 +2973,7 @@ static void fuse_do_truncate(struct file *file)
attr.ia_file = file;
attr.ia_valid |= ATTR_FILE;
- fuse_do_setattr(file_dentry(file), &attr, file);
+ fuse_do_setattr(file_mnt_idmap(file), file_dentry(file), &attr, file);
}
static inline loff_t fuse_round_up(struct fuse_conn *fc, loff_t off)
diff --git a/fs/fuse/fuse_i.h b/fs/fuse/fuse_i.h
index f23919610313..e6cc3d552b13 100644
--- a/fs/fuse/fuse_i.h
+++ b/fs/fuse/fuse_i.h
@@ -449,22 +449,19 @@ struct fuse_iqueue;
*/
struct fuse_iqueue_ops {
/**
- * Signal that a forget has been queued
+ * Send one forget
*/
- void (*wake_forget_and_unlock)(struct fuse_iqueue *fiq)
- __releases(fiq->lock);
+ void (*send_forget)(struct fuse_iqueue *fiq, struct fuse_forget_link *link);
/**
- * Signal that an INTERRUPT request has been queued
+ * Send interrupt for request
*/
- void (*wake_interrupt_and_unlock)(struct fuse_iqueue *fiq)
- __releases(fiq->lock);
+ void (*send_interrupt)(struct fuse_iqueue *fiq, struct fuse_req *req);
/**
- * Signal that a request has been queued
+ * Send one request
*/
- void (*wake_pending_and_unlock)(struct fuse_iqueue *fiq)
- __releases(fiq->lock);
+ void (*send_req)(struct fuse_iqueue *fiq, struct fuse_req *req);
/**
* Clean up when fuse_iqueue is destroyed
@@ -869,7 +866,7 @@ struct fuse_conn {
/** Negotiated minor version */
unsigned minor;
- /** Entry on the fuse_mount_list */
+ /** Entry on the fuse_conn_list */
struct list_head entry;
/** Device ID from the root super block */
@@ -1053,10 +1050,6 @@ void fuse_queue_forget(struct fuse_conn *fc, struct fuse_forget_link *forget,
struct fuse_forget_link *fuse_alloc_forget(void);
-struct fuse_forget_link *fuse_dequeue_forget(struct fuse_iqueue *fiq,
- unsigned int max,
- unsigned int *countp);
-
/*
* Initialize READ or READDIR request
*/
@@ -1154,7 +1147,22 @@ void __exit fuse_ctl_cleanup(void);
/**
* Simple request sending that does request allocation and freeing
*/
-ssize_t fuse_simple_request(struct fuse_mount *fm, struct fuse_args *args);
+ssize_t __fuse_simple_request(struct mnt_idmap *idmap,
+ struct fuse_mount *fm,
+ struct fuse_args *args);
+
+static inline ssize_t fuse_simple_request(struct fuse_mount *fm, struct fuse_args *args)
+{
+ return __fuse_simple_request(&invalid_mnt_idmap, fm, args);
+}
+
+static inline ssize_t fuse_simple_idmap_request(struct mnt_idmap *idmap,
+ struct fuse_mount *fm,
+ struct fuse_args *args)
+{
+ return __fuse_simple_request(idmap, fm, args);
+}
+
int fuse_simple_background(struct fuse_mount *fm, struct fuse_args *args,
gfp_t gfp_flags);
@@ -1330,8 +1338,8 @@ bool fuse_write_update_attr(struct inode *inode, loff_t pos, ssize_t written);
int fuse_flush_times(struct inode *inode, struct fuse_file *ff);
int fuse_write_inode(struct inode *inode, struct writeback_control *wbc);
-int fuse_do_setattr(struct dentry *dentry, struct iattr *attr,
- struct file *file);
+int fuse_do_setattr(struct mnt_idmap *idmap, struct dentry *dentry,
+ struct iattr *attr, struct file *file);
void fuse_set_initialized(struct fuse_conn *fc);
diff --git a/fs/fuse/fuse_trace.h b/fs/fuse/fuse_trace.h
new file mode 100644
index 000000000000..bbe9ddd8c716
--- /dev/null
+++ b/fs/fuse/fuse_trace.h
@@ -0,0 +1,132 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM fuse
+
+#if !defined(_TRACE_FUSE_H) || defined(TRACE_HEADER_MULTI_READ)
+#define _TRACE_FUSE_H
+
+#include <linux/tracepoint.h>
+
+#define OPCODES \
+ EM( FUSE_LOOKUP, "FUSE_LOOKUP") \
+ EM( FUSE_FORGET, "FUSE_FORGET") \
+ EM( FUSE_GETATTR, "FUSE_GETATTR") \
+ EM( FUSE_SETATTR, "FUSE_SETATTR") \
+ EM( FUSE_READLINK, "FUSE_READLINK") \
+ EM( FUSE_SYMLINK, "FUSE_SYMLINK") \
+ EM( FUSE_MKNOD, "FUSE_MKNOD") \
+ EM( FUSE_MKDIR, "FUSE_MKDIR") \
+ EM( FUSE_UNLINK, "FUSE_UNLINK") \
+ EM( FUSE_RMDIR, "FUSE_RMDIR") \
+ EM( FUSE_RENAME, "FUSE_RENAME") \
+ EM( FUSE_LINK, "FUSE_LINK") \
+ EM( FUSE_OPEN, "FUSE_OPEN") \
+ EM( FUSE_READ, "FUSE_READ") \
+ EM( FUSE_WRITE, "FUSE_WRITE") \
+ EM( FUSE_STATFS, "FUSE_STATFS") \
+ EM( FUSE_RELEASE, "FUSE_RELEASE") \
+ EM( FUSE_FSYNC, "FUSE_FSYNC") \
+ EM( FUSE_SETXATTR, "FUSE_SETXATTR") \
+ EM( FUSE_GETXATTR, "FUSE_GETXATTR") \
+ EM( FUSE_LISTXATTR, "FUSE_LISTXATTR") \
+ EM( FUSE_REMOVEXATTR, "FUSE_REMOVEXATTR") \
+ EM( FUSE_FLUSH, "FUSE_FLUSH") \
+ EM( FUSE_INIT, "FUSE_INIT") \
+ EM( FUSE_OPENDIR, "FUSE_OPENDIR") \
+ EM( FUSE_READDIR, "FUSE_READDIR") \
+ EM( FUSE_RELEASEDIR, "FUSE_RELEASEDIR") \
+ EM( FUSE_FSYNCDIR, "FUSE_FSYNCDIR") \
+ EM( FUSE_GETLK, "FUSE_GETLK") \
+ EM( FUSE_SETLK, "FUSE_SETLK") \
+ EM( FUSE_SETLKW, "FUSE_SETLKW") \
+ EM( FUSE_ACCESS, "FUSE_ACCESS") \
+ EM( FUSE_CREATE, "FUSE_CREATE") \
+ EM( FUSE_INTERRUPT, "FUSE_INTERRUPT") \
+ EM( FUSE_BMAP, "FUSE_BMAP") \
+ EM( FUSE_DESTROY, "FUSE_DESTROY") \
+ EM( FUSE_IOCTL, "FUSE_IOCTL") \
+ EM( FUSE_POLL, "FUSE_POLL") \
+ EM( FUSE_NOTIFY_REPLY, "FUSE_NOTIFY_REPLY") \
+ EM( FUSE_BATCH_FORGET, "FUSE_BATCH_FORGET") \
+ EM( FUSE_FALLOCATE, "FUSE_FALLOCATE") \
+ EM( FUSE_READDIRPLUS, "FUSE_READDIRPLUS") \
+ EM( FUSE_RENAME2, "FUSE_RENAME2") \
+ EM( FUSE_LSEEK, "FUSE_LSEEK") \
+ EM( FUSE_COPY_FILE_RANGE, "FUSE_COPY_FILE_RANGE") \
+ EM( FUSE_SETUPMAPPING, "FUSE_SETUPMAPPING") \
+ EM( FUSE_REMOVEMAPPING, "FUSE_REMOVEMAPPING") \
+ EM( FUSE_SYNCFS, "FUSE_SYNCFS") \
+ EM( FUSE_TMPFILE, "FUSE_TMPFILE") \
+ EM( FUSE_STATX, "FUSE_STATX") \
+ EMe(CUSE_INIT, "CUSE_INIT")
+
+/*
+ * This will turn the above table into TRACE_DEFINE_ENUM() for each of the
+ * entries.
+ */
+#undef EM
+#undef EMe
+#define EM(a, b) TRACE_DEFINE_ENUM(a);
+#define EMe(a, b) TRACE_DEFINE_ENUM(a);
+
+OPCODES
+
+/* Now we redfine it with the table that __print_symbolic needs. */
+#undef EM
+#undef EMe
+#define EM(a, b) {a, b},
+#define EMe(a, b) {a, b}
+
+TRACE_EVENT(fuse_request_send,
+ TP_PROTO(const struct fuse_req *req),
+
+ TP_ARGS(req),
+
+ TP_STRUCT__entry(
+ __field(dev_t, connection)
+ __field(uint64_t, unique)
+ __field(enum fuse_opcode, opcode)
+ __field(uint32_t, len)
+ ),
+
+ TP_fast_assign(
+ __entry->connection = req->fm->fc->dev;
+ __entry->unique = req->in.h.unique;
+ __entry->opcode = req->in.h.opcode;
+ __entry->len = req->in.h.len;
+ ),
+
+ TP_printk("connection %u req %llu opcode %u (%s) len %u ",
+ __entry->connection, __entry->unique, __entry->opcode,
+ __print_symbolic(__entry->opcode, OPCODES), __entry->len)
+);
+
+TRACE_EVENT(fuse_request_end,
+ TP_PROTO(const struct fuse_req *req),
+
+ TP_ARGS(req),
+
+ TP_STRUCT__entry(
+ __field(dev_t, connection)
+ __field(uint64_t, unique)
+ __field(uint32_t, len)
+ __field(int32_t, error)
+ ),
+
+ TP_fast_assign(
+ __entry->connection = req->fm->fc->dev;
+ __entry->unique = req->in.h.unique;
+ __entry->len = req->out.h.len;
+ __entry->error = req->out.h.error;
+ ),
+
+ TP_printk("connection %u req %llu len %u error %d", __entry->connection,
+ __entry->unique, __entry->len, __entry->error)
+);
+
+#endif /* _TRACE_FUSE_H */
+
+#undef TRACE_INCLUDE_PATH
+#define TRACE_INCLUDE_PATH .
+#define TRACE_INCLUDE_FILE fuse_trace
+#include <trace/define_trace.h>
diff --git a/fs/fuse/inode.c b/fs/fuse/inode.c
index bebd89002328..fd3321e29a3e 100644
--- a/fs/fuse/inode.c
+++ b/fs/fuse/inode.c
@@ -1348,6 +1348,12 @@ static void process_init_reply(struct fuse_mount *fm, struct fuse_args *args,
}
if (flags & FUSE_NO_EXPORT_SUPPORT)
fm->sb->s_export_op = &fuse_export_fid_operations;
+ if (flags & FUSE_ALLOW_IDMAP) {
+ if (fc->default_permissions)
+ fm->sb->s_iflags &= ~SB_I_NOIDMAP;
+ else
+ ok = false;
+ }
} else {
ra_pages = fc->max_read / PAGE_SIZE;
fc->no_lock = 1;
@@ -1395,7 +1401,7 @@ void fuse_send_init(struct fuse_mount *fm)
FUSE_HANDLE_KILLPRIV_V2 | FUSE_SETXATTR_EXT | FUSE_INIT_EXT |
FUSE_SECURITY_CTX | FUSE_CREATE_SUPP_GROUP |
FUSE_HAS_EXPIRE_ONLY | FUSE_DIRECT_IO_ALLOW_MMAP |
- FUSE_NO_EXPORT_SUPPORT | FUSE_HAS_RESEND;
+ FUSE_NO_EXPORT_SUPPORT | FUSE_HAS_RESEND | FUSE_ALLOW_IDMAP;
#ifdef CONFIG_FUSE_DAX
if (fm->fc->dax)
flags |= FUSE_MAP_ALIGNMENT;
@@ -1572,6 +1578,7 @@ static void fuse_sb_defaults(struct super_block *sb)
sb->s_time_gran = 1;
sb->s_export_op = &fuse_export_operations;
sb->s_iflags |= SB_I_IMA_UNVERIFIABLE_SIGNATURE;
+ sb->s_iflags |= SB_I_NOIDMAP;
if (sb->s_user_ns != &init_user_ns)
sb->s_iflags |= SB_I_UNTRUSTED_MOUNTER;
sb->s_flags &= ~(SB_NOSEC | SB_I_VERSION);
@@ -1984,7 +1991,7 @@ static void fuse_kill_sb_anon(struct super_block *sb)
static struct file_system_type fuse_fs_type = {
.owner = THIS_MODULE,
.name = "fuse",
- .fs_flags = FS_HAS_SUBTYPE | FS_USERNS_MOUNT,
+ .fs_flags = FS_HAS_SUBTYPE | FS_USERNS_MOUNT | FS_ALLOW_IDMAP,
.init_fs_context = fuse_init_fs_context,
.parameters = fuse_fs_parameters,
.kill_sb = fuse_kill_sb_anon,
@@ -2005,7 +2012,7 @@ static struct file_system_type fuseblk_fs_type = {
.init_fs_context = fuse_init_fs_context,
.parameters = fuse_fs_parameters,
.kill_sb = fuse_kill_sb_blk,
- .fs_flags = FS_REQUIRES_DEV | FS_HAS_SUBTYPE,
+ .fs_flags = FS_REQUIRES_DEV | FS_HAS_SUBTYPE | FS_ALLOW_IDMAP,
};
MODULE_ALIAS_FS("fuseblk");
diff --git a/fs/fuse/passthrough.c b/fs/fuse/passthrough.c
index 9666d13884ce..62aee8289d11 100644
--- a/fs/fuse/passthrough.c
+++ b/fs/fuse/passthrough.c
@@ -228,16 +228,13 @@ int fuse_backing_open(struct fuse_conn *fc, struct fuse_backing_map *map)
if (map->flags || map->padding)
goto out;
- file = fget(map->fd);
+ file = fget_raw(map->fd);
res = -EBADF;
if (!file)
goto out;
- res = -EOPNOTSUPP;
- if (!file->f_op->read_iter || !file->f_op->write_iter)
- goto out_fput;
-
backing_sb = file_inode(file)->i_sb;
+ pr_info("%s: %x:%pD %i\n", __func__, backing_sb->s_dev, file, backing_sb->s_stack_depth);
res = -ELOOP;
if (backing_sb->s_stack_depth >= fc->max_stack_depth)
goto out_fput;
diff --git a/fs/fuse/virtio_fs.c b/fs/fuse/virtio_fs.c
index dd5260141615..6404a189e989 100644
--- a/fs/fuse/virtio_fs.c
+++ b/fs/fuse/virtio_fs.c
@@ -56,12 +56,14 @@ struct virtio_fs_vq {
bool connected;
long in_flight;
struct completion in_flight_zero; /* No inflight requests */
+ struct kobject *kobj;
char name[VQ_NAME_LEN];
} ____cacheline_aligned_in_smp;
/* A virtio-fs device instance */
struct virtio_fs {
struct kobject kobj;
+ struct kobject *mqs_kobj;
struct list_head list; /* on virtio_fs_instances */
char *tag;
struct virtio_fs_vq *vqs;
@@ -200,19 +202,94 @@ static const struct kobj_type virtio_fs_ktype = {
.default_groups = virtio_fs_groups,
};
+static struct virtio_fs_vq *virtio_fs_kobj_to_vq(struct virtio_fs *fs,
+ struct kobject *kobj)
+{
+ int i;
+
+ for (i = 0; i < fs->nvqs; i++) {
+ if (kobj == fs->vqs[i].kobj)
+ return &fs->vqs[i];
+ }
+ return NULL;
+}
+
+static ssize_t name_show(struct kobject *kobj,
+ struct kobj_attribute *attr, char *buf)
+{
+ struct virtio_fs *fs = container_of(kobj->parent->parent, struct virtio_fs, kobj);
+ struct virtio_fs_vq *fsvq = virtio_fs_kobj_to_vq(fs, kobj);
+
+ if (!fsvq)
+ return -EINVAL;
+ return sysfs_emit(buf, "%s\n", fsvq->name);
+}
+
+static struct kobj_attribute virtio_fs_vq_name_attr = __ATTR_RO(name);
+
+static ssize_t cpu_list_show(struct kobject *kobj,
+ struct kobj_attribute *attr, char *buf)
+{
+ struct virtio_fs *fs = container_of(kobj->parent->parent, struct virtio_fs, kobj);
+ struct virtio_fs_vq *fsvq = virtio_fs_kobj_to_vq(fs, kobj);
+ unsigned int cpu, qid;
+ const size_t size = PAGE_SIZE - 1;
+ bool first = true;
+ int ret = 0, pos = 0;
+
+ if (!fsvq)
+ return -EINVAL;
+
+ qid = fsvq->vq->index;
+ for (cpu = 0; cpu < nr_cpu_ids; cpu++) {
+ if (qid < VQ_REQUEST || (fs->mq_map[cpu] == qid - VQ_REQUEST)) {
+ if (first)
+ ret = snprintf(buf + pos, size - pos, "%u", cpu);
+ else
+ ret = snprintf(buf + pos, size - pos, ", %u", cpu);
+
+ if (ret >= size - pos)
+ break;
+ first = false;
+ pos += ret;
+ }
+ }
+ ret = snprintf(buf + pos, size + 1 - pos, "\n");
+ return pos + ret;
+}
+
+static struct kobj_attribute virtio_fs_vq_cpu_list_attr = __ATTR_RO(cpu_list);
+
+static struct attribute *virtio_fs_vq_attrs[] = {
+ &virtio_fs_vq_name_attr.attr,
+ &virtio_fs_vq_cpu_list_attr.attr,
+ NULL
+};
+
+static struct attribute_group virtio_fs_vq_attr_group = {
+ .attrs = virtio_fs_vq_attrs,
+};
+
/* Make sure virtiofs_mutex is held */
-static void virtio_fs_put(struct virtio_fs *fs)
+static void virtio_fs_put_locked(struct virtio_fs *fs)
{
+ lockdep_assert_held(&virtio_fs_mutex);
+
kobject_put(&fs->kobj);
}
+static void virtio_fs_put(struct virtio_fs *fs)
+{
+ mutex_lock(&virtio_fs_mutex);
+ virtio_fs_put_locked(fs);
+ mutex_unlock(&virtio_fs_mutex);
+}
+
static void virtio_fs_fiq_release(struct fuse_iqueue *fiq)
{
struct virtio_fs *vfs = fiq->priv;
- mutex_lock(&virtio_fs_mutex);
virtio_fs_put(vfs);
- mutex_unlock(&virtio_fs_mutex);
}
static void virtio_fs_drain_queue(struct virtio_fs_vq *fsvq)
@@ -273,6 +350,50 @@ static void virtio_fs_start_all_queues(struct virtio_fs *fs)
}
}
+static void virtio_fs_delete_queues_sysfs(struct virtio_fs *fs)
+{
+ struct virtio_fs_vq *fsvq;
+ int i;
+
+ for (i = 0; i < fs->nvqs; i++) {
+ fsvq = &fs->vqs[i];
+ kobject_put(fsvq->kobj);
+ }
+}
+
+static int virtio_fs_add_queues_sysfs(struct virtio_fs *fs)
+{
+ struct virtio_fs_vq *fsvq;
+ char buff[12];
+ int i, j, ret;
+
+ for (i = 0; i < fs->nvqs; i++) {
+ fsvq = &fs->vqs[i];
+
+ sprintf(buff, "%d", i);
+ fsvq->kobj = kobject_create_and_add(buff, fs->mqs_kobj);
+ if (!fs->mqs_kobj) {
+ ret = -ENOMEM;
+ goto out_del;
+ }
+
+ ret = sysfs_create_group(fsvq->kobj, &virtio_fs_vq_attr_group);
+ if (ret) {
+ kobject_put(fsvq->kobj);
+ goto out_del;
+ }
+ }
+
+ return 0;
+
+out_del:
+ for (j = 0; j < i; j++) {
+ fsvq = &fs->vqs[j];
+ kobject_put(fsvq->kobj);
+ }
+ return ret;
+}
+
/* Add a new instance to the list or return -EEXIST if tag name exists*/
static int virtio_fs_add_instance(struct virtio_device *vdev,
struct virtio_fs *fs)
@@ -296,17 +417,22 @@ static int virtio_fs_add_instance(struct virtio_device *vdev,
*/
fs->kobj.kset = virtio_fs_kset;
ret = kobject_add(&fs->kobj, NULL, "%d", vdev->index);
- if (ret < 0) {
- mutex_unlock(&virtio_fs_mutex);
- return ret;
+ if (ret < 0)
+ goto out_unlock;
+
+ fs->mqs_kobj = kobject_create_and_add("mqs", &fs->kobj);
+ if (!fs->mqs_kobj) {
+ ret = -ENOMEM;
+ goto out_del;
}
ret = sysfs_create_link(&fs->kobj, &vdev->dev.kobj, "device");
- if (ret < 0) {
- kobject_del(&fs->kobj);
- mutex_unlock(&virtio_fs_mutex);
- return ret;
- }
+ if (ret < 0)
+ goto out_put;
+
+ ret = virtio_fs_add_queues_sysfs(fs);
+ if (ret)
+ goto out_remove;
list_add_tail(&fs->list, &virtio_fs_instances);
@@ -315,6 +441,16 @@ static int virtio_fs_add_instance(struct virtio_device *vdev,
kobject_uevent(&fs->kobj, KOBJ_ADD);
return 0;
+
+out_remove:
+ sysfs_remove_link(&fs->kobj, "device");
+out_put:
+ kobject_put(fs->mqs_kobj);
+out_del:
+ kobject_del(&fs->kobj);
+out_unlock:
+ mutex_unlock(&virtio_fs_mutex);
+ return ret;
}
/* Return the virtio_fs with a given tag, or NULL */
@@ -1043,7 +1179,9 @@ static void virtio_fs_remove(struct virtio_device *vdev)
mutex_lock(&virtio_fs_mutex);
/* This device is going away. No one should get new reference */
list_del_init(&fs->list);
+ virtio_fs_delete_queues_sysfs(fs);
sysfs_remove_link(&fs->kobj, "device");
+ kobject_put(fs->mqs_kobj);
kobject_del(&fs->kobj);
virtio_fs_stop_all_queues(fs);
virtio_fs_drain_all_queues_locked(fs);
@@ -1052,7 +1190,7 @@ static void virtio_fs_remove(struct virtio_device *vdev)
vdev->priv = NULL;
/* Put device reference on virtio_fs object */
- virtio_fs_put(fs);
+ virtio_fs_put_locked(fs);
mutex_unlock(&virtio_fs_mutex);
}
@@ -1091,22 +1229,13 @@ static struct virtio_driver virtio_fs_driver = {
#endif
};
-static void virtio_fs_wake_forget_and_unlock(struct fuse_iqueue *fiq)
-__releases(fiq->lock)
+static void virtio_fs_send_forget(struct fuse_iqueue *fiq, struct fuse_forget_link *link)
{
- struct fuse_forget_link *link;
struct virtio_fs_forget *forget;
struct virtio_fs_forget_req *req;
- struct virtio_fs *fs;
- struct virtio_fs_vq *fsvq;
- u64 unique;
-
- link = fuse_dequeue_forget(fiq, 1, NULL);
- unique = fuse_get_unique(fiq);
-
- fs = fiq->priv;
- fsvq = &fs->vqs[VQ_HIPRIO];
- spin_unlock(&fiq->lock);
+ struct virtio_fs *fs = fiq->priv;
+ struct virtio_fs_vq *fsvq = &fs->vqs[VQ_HIPRIO];
+ u64 unique = fuse_get_unique(fiq);
/* Allocate a buffer for the request */
forget = kmalloc(sizeof(*forget), GFP_NOFS | __GFP_NOFAIL);
@@ -1126,8 +1255,7 @@ __releases(fiq->lock)
kfree(link);
}
-static void virtio_fs_wake_interrupt_and_unlock(struct fuse_iqueue *fiq)
-__releases(fiq->lock)
+static void virtio_fs_send_interrupt(struct fuse_iqueue *fiq, struct fuse_req *req)
{
/*
* TODO interrupts.
@@ -1136,7 +1264,6 @@ __releases(fiq->lock)
* Exceptions are blocking lock operations; for example fcntl(F_SETLKW)
* with shared lock between host and guest.
*/
- spin_unlock(&fiq->lock);
}
/* Count number of scatter-gather elements required */
@@ -1341,21 +1468,17 @@ out:
return ret;
}
-static void virtio_fs_wake_pending_and_unlock(struct fuse_iqueue *fiq)
-__releases(fiq->lock)
+static void virtio_fs_send_req(struct fuse_iqueue *fiq, struct fuse_req *req)
{
unsigned int queue_id;
struct virtio_fs *fs;
- struct fuse_req *req;
struct virtio_fs_vq *fsvq;
int ret;
- WARN_ON(list_empty(&fiq->pending));
- req = list_last_entry(&fiq->pending, struct fuse_req, list);
+ if (req->in.h.opcode != FUSE_NOTIFY_REPLY)
+ req->in.h.unique = fuse_get_unique(fiq);
+
clear_bit(FR_PENDING, &req->flags);
- list_del_init(&req->list);
- WARN_ON(!list_empty(&fiq->pending));
- spin_unlock(&fiq->lock);
fs = fiq->priv;
queue_id = VQ_REQUEST + fs->mq_map[raw_smp_processor_id()];
@@ -1393,10 +1516,10 @@ __releases(fiq->lock)
}
static const struct fuse_iqueue_ops virtio_fs_fiq_ops = {
- .wake_forget_and_unlock = virtio_fs_wake_forget_and_unlock,
- .wake_interrupt_and_unlock = virtio_fs_wake_interrupt_and_unlock,
- .wake_pending_and_unlock = virtio_fs_wake_pending_and_unlock,
- .release = virtio_fs_fiq_release,
+ .send_forget = virtio_fs_send_forget,
+ .send_interrupt = virtio_fs_send_interrupt,
+ .send_req = virtio_fs_send_req,
+ .release = virtio_fs_fiq_release,
};
static inline void virtio_fs_ctx_set_defaults(struct fuse_fs_context *ctx)
@@ -1596,9 +1719,7 @@ static int virtio_fs_get_tree(struct fs_context *fsc)
out_err:
kfree(fc);
- mutex_lock(&virtio_fs_mutex);
virtio_fs_put(fs);
- mutex_unlock(&virtio_fs_mutex);
return err;
}
@@ -1628,6 +1749,7 @@ static struct file_system_type virtio_fs_type = {
.name = "virtiofs",
.init_fs_context = virtio_fs_init_fs_context,
.kill_sb = virtio_kill_sb,
+ .fs_flags = FS_ALLOW_IDMAP,
};
static int virtio_fs_uevent(const struct kobject *kobj, struct kobj_uevent_env *env)
diff --git a/fs/gfs2/aops.c b/fs/gfs2/aops.c
index 10d5acd3f742..68fc8af14700 100644
--- a/fs/gfs2/aops.c
+++ b/fs/gfs2/aops.c
@@ -139,35 +139,6 @@ static int __gfs2_jdata_write_folio(struct folio *folio,
}
/**
- * gfs2_jdata_writepage - Write complete page
- * @page: Page to write
- * @wbc: The writeback control
- *
- * Returns: errno
- *
- */
-
-static int gfs2_jdata_writepage(struct page *page, struct writeback_control *wbc)
-{
- struct folio *folio = page_folio(page);
- struct inode *inode = page->mapping->host;
- struct gfs2_inode *ip = GFS2_I(inode);
- struct gfs2_sbd *sdp = GFS2_SB(inode);
-
- if (gfs2_assert_withdraw(sdp, ip->i_gl->gl_state == LM_ST_EXCLUSIVE))
- goto out;
- if (folio_test_checked(folio) || current->journal_info)
- goto out_ignore;
- return __gfs2_jdata_write_folio(folio, wbc);
-
-out_ignore:
- folio_redirty_for_writepage(wbc, folio);
-out:
- folio_unlock(folio);
- return 0;
-}
-
-/**
* gfs2_writepages - Write a bunch of dirty pages back to disk
* @mapping: The mapping to write
* @wbc: Write-back control
@@ -748,7 +719,6 @@ static const struct address_space_operations gfs2_aops = {
};
static const struct address_space_operations gfs2_jdata_aops = {
- .writepage = gfs2_jdata_writepage,
.writepages = gfs2_jdata_writepages,
.read_folio = gfs2_read_folio,
.readahead = gfs2_readahead,
diff --git a/fs/gfs2/file.c b/fs/gfs2/file.c
index 08982937b5df..f7dd64856c9b 100644
--- a/fs/gfs2/file.c
+++ b/fs/gfs2/file.c
@@ -1057,7 +1057,7 @@ retry:
}
pagefault_disable();
- ret = iomap_file_buffered_write(iocb, from, &gfs2_iomap_ops);
+ ret = iomap_file_buffered_write(iocb, from, &gfs2_iomap_ops, NULL);
pagefault_enable();
if (ret > 0)
written += ret;
diff --git a/fs/gfs2/glock.c b/fs/gfs2/glock.c
index 12a769077ea0..269c3bc7fced 100644
--- a/fs/gfs2/glock.c
+++ b/fs/gfs2/glock.c
@@ -1885,14 +1885,16 @@ void gfs2_glock_dq_m(unsigned int num_gh, struct gfs2_holder *ghs)
void gfs2_glock_cb(struct gfs2_glock *gl, unsigned int state)
{
unsigned long delay = 0;
- unsigned long holdtime;
- unsigned long now = jiffies;
gfs2_glock_hold(gl);
spin_lock(&gl->gl_lockref.lock);
- holdtime = gl->gl_tchange + gl->gl_hold_time;
if (!list_empty(&gl->gl_holders) &&
gl->gl_name.ln_type == LM_TYPE_INODE) {
+ unsigned long now = jiffies;
+ unsigned long holdtime;
+
+ holdtime = gl->gl_tchange + gl->gl_hold_time;
+
if (time_before(now, holdtime))
delay = holdtime - now;
if (test_bit(GLF_HAVE_REPLY, &gl->gl_flags))
@@ -2249,6 +2251,7 @@ void gfs2_gl_hash_clear(struct gfs2_sbd *sdp)
gfs2_free_dead_glocks(sdp);
glock_hash_walk(dump_glock_func, sdp);
destroy_workqueue(sdp->sd_glock_wq);
+ sdp->sd_glock_wq = NULL;
}
static const char *state2str(unsigned state)
diff --git a/fs/gfs2/log.c b/fs/gfs2/log.c
index 6ee6013fb825..f9c5089783d2 100644
--- a/fs/gfs2/log.c
+++ b/fs/gfs2/log.c
@@ -80,15 +80,6 @@ void gfs2_remove_from_ail(struct gfs2_bufdata *bd)
brelse(bd->bd_bh);
}
-static int __gfs2_writepage(struct folio *folio, struct writeback_control *wbc,
- void *data)
-{
- struct address_space *mapping = data;
- int ret = mapping->a_ops->writepage(&folio->page, wbc);
- mapping_set_error(mapping, ret);
- return ret;
-}
-
/**
* gfs2_ail1_start_one - Start I/O on a transaction
* @sdp: The superblock
@@ -140,7 +131,7 @@ __acquires(&sdp->sd_ail_lock)
if (!mapping)
continue;
spin_unlock(&sdp->sd_ail_lock);
- ret = write_cache_pages(mapping, wbc, __gfs2_writepage, mapping);
+ ret = mapping->a_ops->writepages(mapping, wbc);
if (need_resched()) {
blk_finish_plug(plug);
cond_resched();
@@ -149,6 +140,7 @@ __acquires(&sdp->sd_ail_lock)
spin_lock(&sdp->sd_ail_lock);
if (ret == -ENODATA) /* if a jdata write into a new hole */
ret = 0; /* ignore it */
+ mapping_set_error(mapping, ret);
if (ret || wbc->nr_to_write <= 0)
break;
return -EBUSY;
diff --git a/fs/gfs2/meta_io.c b/fs/gfs2/meta_io.c
index 2b26e8d529aa..fea3efcc2f93 100644
--- a/fs/gfs2/meta_io.c
+++ b/fs/gfs2/meta_io.c
@@ -30,9 +30,9 @@
#include "util.h"
#include "trace_gfs2.h"
-static int gfs2_aspace_writepage(struct page *page, struct writeback_control *wbc)
+static void gfs2_aspace_write_folio(struct folio *folio,
+ struct writeback_control *wbc)
{
- struct folio *folio = page_folio(page);
struct buffer_head *bh, *head;
int nr_underway = 0;
blk_opf_t write_flags = REQ_META | REQ_PRIO | wbc_to_write_flags(wbc);
@@ -66,8 +66,8 @@ static int gfs2_aspace_writepage(struct page *page, struct writeback_control *wb
} while ((bh = bh->b_this_page) != head);
/*
- * The page and its buffers are protected by PageWriteback(), so we can
- * drop the bh refcounts early.
+ * The folio and its buffers are protected from truncation by
+ * the writeback flag, so we can drop the bh refcounts early.
*/
BUG_ON(folio_test_writeback(folio));
folio_start_writeback(folio);
@@ -84,21 +84,31 @@ static int gfs2_aspace_writepage(struct page *page, struct writeback_control *wb
if (nr_underway == 0)
folio_end_writeback(folio);
+}
- return 0;
+static int gfs2_aspace_writepages(struct address_space *mapping,
+ struct writeback_control *wbc)
+{
+ struct folio *folio = NULL;
+ int error;
+
+ while ((folio = writeback_iter(mapping, wbc, folio, &error)))
+ gfs2_aspace_write_folio(folio, wbc);
+
+ return error;
}
const struct address_space_operations gfs2_meta_aops = {
.dirty_folio = block_dirty_folio,
.invalidate_folio = block_invalidate_folio,
- .writepage = gfs2_aspace_writepage,
+ .writepages = gfs2_aspace_writepages,
.release_folio = gfs2_release_folio,
};
const struct address_space_operations gfs2_rgrp_aops = {
.dirty_folio = block_dirty_folio,
.invalidate_folio = block_invalidate_folio,
- .writepage = gfs2_aspace_writepage,
+ .writepages = gfs2_aspace_writepages,
.release_folio = gfs2_release_folio,
};
diff --git a/fs/gfs2/ops_fstype.c b/fs/gfs2/ops_fstype.c
index ff1f3e3dc65c..e83d293c3614 100644
--- a/fs/gfs2/ops_fstype.c
+++ b/fs/gfs2/ops_fstype.c
@@ -1307,7 +1307,8 @@ fail_debug:
fail_delete_wq:
destroy_workqueue(sdp->sd_delete_wq);
fail_glock_wq:
- destroy_workqueue(sdp->sd_glock_wq);
+ if (sdp->sd_glock_wq)
+ destroy_workqueue(sdp->sd_glock_wq);
fail_free:
free_sbd(sdp);
sb->s_fs_info = NULL;
diff --git a/fs/inode.c b/fs/inode.c
index af78f515403f..471ae4a31549 100644
--- a/fs/inode.c
+++ b/fs/inode.c
@@ -439,14 +439,6 @@ static void init_once(void *foo)
}
/*
- * inode->i_lock must be held
- */
-void __iget(struct inode *inode)
-{
- atomic_inc(&inode->i_count);
-}
-
-/*
* get additional reference to inode; caller must already hold one.
*/
void ihold(struct inode *inode)
diff --git a/fs/ioctl.c b/fs/ioctl.c
index 64776891120c..6e0c954388d4 100644
--- a/fs/ioctl.c
+++ b/fs/ioctl.c
@@ -235,9 +235,9 @@ static long ioctl_file_clone(struct file *dst_file, unsigned long srcfd,
loff_t cloned;
int ret;
- if (!src_file.file)
+ if (!fd_file(src_file))
return -EBADF;
- cloned = vfs_clone_file_range(src_file.file, off, dst_file, destoff,
+ cloned = vfs_clone_file_range(fd_file(src_file), off, dst_file, destoff,
olen, 0);
if (cloned < 0)
ret = cloned;
@@ -895,16 +895,16 @@ SYSCALL_DEFINE3(ioctl, unsigned int, fd, unsigned int, cmd, unsigned long, arg)
struct fd f = fdget(fd);
int error;
- if (!f.file)
+ if (!fd_file(f))
return -EBADF;
- error = security_file_ioctl(f.file, cmd, arg);
+ error = security_file_ioctl(fd_file(f), cmd, arg);
if (error)
goto out;
- error = do_vfs_ioctl(f.file, fd, cmd, arg);
+ error = do_vfs_ioctl(fd_file(f), fd, cmd, arg);
if (error == -ENOIOCTLCMD)
- error = vfs_ioctl(f.file, cmd, arg);
+ error = vfs_ioctl(fd_file(f), cmd, arg);
out:
fdput(f);
@@ -953,32 +953,32 @@ COMPAT_SYSCALL_DEFINE3(ioctl, unsigned int, fd, unsigned int, cmd,
struct fd f = fdget(fd);
int error;
- if (!f.file)
+ if (!fd_file(f))
return -EBADF;
- error = security_file_ioctl_compat(f.file, cmd, arg);
+ error = security_file_ioctl_compat(fd_file(f), cmd, arg);
if (error)
goto out;
switch (cmd) {
/* FICLONE takes an int argument, so don't use compat_ptr() */
case FICLONE:
- error = ioctl_file_clone(f.file, arg, 0, 0, 0);
+ error = ioctl_file_clone(fd_file(f), arg, 0, 0, 0);
break;
#if defined(CONFIG_X86_64)
/* these get messy on amd64 due to alignment differences */
case FS_IOC_RESVSP_32:
case FS_IOC_RESVSP64_32:
- error = compat_ioctl_preallocate(f.file, 0, compat_ptr(arg));
+ error = compat_ioctl_preallocate(fd_file(f), 0, compat_ptr(arg));
break;
case FS_IOC_UNRESVSP_32:
case FS_IOC_UNRESVSP64_32:
- error = compat_ioctl_preallocate(f.file, FALLOC_FL_PUNCH_HOLE,
+ error = compat_ioctl_preallocate(fd_file(f), FALLOC_FL_PUNCH_HOLE,
compat_ptr(arg));
break;
case FS_IOC_ZERO_RANGE_32:
- error = compat_ioctl_preallocate(f.file, FALLOC_FL_ZERO_RANGE,
+ error = compat_ioctl_preallocate(fd_file(f), FALLOC_FL_ZERO_RANGE,
compat_ptr(arg));
break;
#endif
@@ -998,13 +998,13 @@ COMPAT_SYSCALL_DEFINE3(ioctl, unsigned int, fd, unsigned int, cmd,
* argument.
*/
default:
- error = do_vfs_ioctl(f.file, fd, cmd,
+ error = do_vfs_ioctl(fd_file(f), fd, cmd,
(unsigned long)compat_ptr(arg));
if (error != -ENOIOCTLCMD)
break;
- if (f.file->f_op->compat_ioctl)
- error = f.file->f_op->compat_ioctl(f.file, cmd, arg);
+ if (fd_file(f)->f_op->compat_ioctl)
+ error = fd_file(f)->f_op->compat_ioctl(fd_file(f), cmd, arg);
if (error == -ENOIOCTLCMD)
error = -ENOTTY;
break;
diff --git a/fs/iomap/buffered-io.c b/fs/iomap/buffered-io.c
index 9b4ca3811a24..11ea747228ae 100644
--- a/fs/iomap/buffered-io.c
+++ b/fs/iomap/buffered-io.c
@@ -23,7 +23,6 @@
#define IOEND_BATCH_SIZE 4096
-typedef int (*iomap_punch_t)(struct inode *inode, loff_t offset, loff_t length);
/*
* Structure allocated for each folio to track per-block uptodate, dirty state
* and I/O completions.
@@ -1022,13 +1021,14 @@ retry:
ssize_t
iomap_file_buffered_write(struct kiocb *iocb, struct iov_iter *i,
- const struct iomap_ops *ops)
+ const struct iomap_ops *ops, void *private)
{
struct iomap_iter iter = {
.inode = iocb->ki_filp->f_mapping->host,
.pos = iocb->ki_pos,
.len = iov_iter_count(i),
.flags = IOMAP_WRITE,
+ .private = private,
};
ssize_t ret;
@@ -1046,15 +1046,14 @@ iomap_file_buffered_write(struct kiocb *iocb, struct iov_iter *i,
}
EXPORT_SYMBOL_GPL(iomap_file_buffered_write);
-static int iomap_write_delalloc_ifs_punch(struct inode *inode,
+static void iomap_write_delalloc_ifs_punch(struct inode *inode,
struct folio *folio, loff_t start_byte, loff_t end_byte,
- iomap_punch_t punch)
+ struct iomap *iomap, iomap_punch_t punch)
{
unsigned int first_blk, last_blk, i;
loff_t last_byte;
u8 blkbits = inode->i_blkbits;
struct iomap_folio_state *ifs;
- int ret = 0;
/*
* When we have per-block dirty tracking, there can be
@@ -1064,47 +1063,35 @@ static int iomap_write_delalloc_ifs_punch(struct inode *inode,
*/
ifs = folio->private;
if (!ifs)
- return ret;
+ return;
last_byte = min_t(loff_t, end_byte - 1,
folio_pos(folio) + folio_size(folio) - 1);
first_blk = offset_in_folio(folio, start_byte) >> blkbits;
last_blk = offset_in_folio(folio, last_byte) >> blkbits;
for (i = first_blk; i <= last_blk; i++) {
- if (!ifs_block_is_dirty(folio, ifs, i)) {
- ret = punch(inode, folio_pos(folio) + (i << blkbits),
- 1 << blkbits);
- if (ret)
- return ret;
- }
+ if (!ifs_block_is_dirty(folio, ifs, i))
+ punch(inode, folio_pos(folio) + (i << blkbits),
+ 1 << blkbits, iomap);
}
-
- return ret;
}
-
-static int iomap_write_delalloc_punch(struct inode *inode, struct folio *folio,
+static void iomap_write_delalloc_punch(struct inode *inode, struct folio *folio,
loff_t *punch_start_byte, loff_t start_byte, loff_t end_byte,
- iomap_punch_t punch)
+ struct iomap *iomap, iomap_punch_t punch)
{
- int ret = 0;
-
if (!folio_test_dirty(folio))
- return ret;
+ return;
/* if dirty, punch up to offset */
if (start_byte > *punch_start_byte) {
- ret = punch(inode, *punch_start_byte,
- start_byte - *punch_start_byte);
- if (ret)
- return ret;
+ punch(inode, *punch_start_byte, start_byte - *punch_start_byte,
+ iomap);
}
/* Punch non-dirty blocks within folio */
- ret = iomap_write_delalloc_ifs_punch(inode, folio, start_byte,
- end_byte, punch);
- if (ret)
- return ret;
+ iomap_write_delalloc_ifs_punch(inode, folio, start_byte, end_byte,
+ iomap, punch);
/*
* Make sure the next punch start is correctly bound to
@@ -1112,8 +1099,6 @@ static int iomap_write_delalloc_punch(struct inode *inode, struct folio *folio,
*/
*punch_start_byte = min_t(loff_t, end_byte,
folio_pos(folio) + folio_size(folio));
-
- return ret;
}
/*
@@ -1133,13 +1118,12 @@ static int iomap_write_delalloc_punch(struct inode *inode, struct folio *folio,
* This function uses [start_byte, end_byte) intervals (i.e. open ended) to
* simplify range iterations.
*/
-static int iomap_write_delalloc_scan(struct inode *inode,
+static void iomap_write_delalloc_scan(struct inode *inode,
loff_t *punch_start_byte, loff_t start_byte, loff_t end_byte,
- iomap_punch_t punch)
+ struct iomap *iomap, iomap_punch_t punch)
{
while (start_byte < end_byte) {
struct folio *folio;
- int ret;
/* grab locked page */
folio = filemap_lock_folio(inode->i_mapping,
@@ -1150,20 +1134,14 @@ static int iomap_write_delalloc_scan(struct inode *inode,
continue;
}
- ret = iomap_write_delalloc_punch(inode, folio, punch_start_byte,
- start_byte, end_byte, punch);
- if (ret) {
- folio_unlock(folio);
- folio_put(folio);
- return ret;
- }
+ iomap_write_delalloc_punch(inode, folio, punch_start_byte,
+ start_byte, end_byte, iomap, punch);
/* move offset to start of next folio in range */
start_byte = folio_next_index(folio) << PAGE_SHIFT;
folio_unlock(folio);
folio_put(folio);
}
- return 0;
}
/*
@@ -1199,12 +1177,12 @@ static int iomap_write_delalloc_scan(struct inode *inode,
* require sprinkling this code with magic "+ 1" and "- 1" arithmetic and expose
* the code to subtle off-by-one bugs....
*/
-static int iomap_write_delalloc_release(struct inode *inode,
- loff_t start_byte, loff_t end_byte, iomap_punch_t punch)
+static void iomap_write_delalloc_release(struct inode *inode, loff_t start_byte,
+ loff_t end_byte, unsigned flags, struct iomap *iomap,
+ iomap_punch_t punch)
{
loff_t punch_start_byte = start_byte;
loff_t scan_end_byte = min(i_size_read(inode), end_byte);
- int error = 0;
/*
* Lock the mapping to avoid races with page faults re-instantiating
@@ -1221,13 +1199,15 @@ static int iomap_write_delalloc_release(struct inode *inode,
/*
* If there is no more data to scan, all that is left is to
* punch out the remaining range.
+ *
+ * Note that mapping_seek_hole_data is only supposed to return
+ * either an offset or -ENXIO, so WARN on any other error as
+ * that would be an API change without updating the callers.
*/
if (start_byte == -ENXIO || start_byte == scan_end_byte)
break;
- if (start_byte < 0) {
- error = start_byte;
+ if (WARN_ON_ONCE(start_byte < 0))
goto out_unlock;
- }
WARN_ON_ONCE(start_byte < punch_start_byte);
WARN_ON_ONCE(start_byte > scan_end_byte);
@@ -1237,28 +1217,31 @@ static int iomap_write_delalloc_release(struct inode *inode,
*/
data_end = mapping_seek_hole_data(inode->i_mapping, start_byte,
scan_end_byte, SEEK_HOLE);
- if (data_end < 0) {
- error = data_end;
+ if (WARN_ON_ONCE(data_end < 0))
goto out_unlock;
- }
- WARN_ON_ONCE(data_end <= start_byte);
+
+ /*
+ * If we race with post-direct I/O invalidation of the page cache,
+ * there might be no data left at start_byte.
+ */
+ if (data_end == start_byte)
+ continue;
+
+ WARN_ON_ONCE(data_end < start_byte);
WARN_ON_ONCE(data_end > scan_end_byte);
- error = iomap_write_delalloc_scan(inode, &punch_start_byte,
- start_byte, data_end, punch);
- if (error)
- goto out_unlock;
+ iomap_write_delalloc_scan(inode, &punch_start_byte, start_byte,
+ data_end, iomap, punch);
/* The next data search starts at the end of this one. */
start_byte = data_end;
}
if (punch_start_byte < end_byte)
- error = punch(inode, punch_start_byte,
- end_byte - punch_start_byte);
+ punch(inode, punch_start_byte, end_byte - punch_start_byte,
+ iomap);
out_unlock:
filemap_invalidate_unlock(inode->i_mapping);
- return error;
}
/*
@@ -1291,20 +1274,20 @@ out_unlock:
* ->punch
* internal filesystem allocation lock
*/
-int iomap_file_buffered_write_punch_delalloc(struct inode *inode,
- struct iomap *iomap, loff_t pos, loff_t length,
- ssize_t written, iomap_punch_t punch)
+void iomap_file_buffered_write_punch_delalloc(struct inode *inode,
+ loff_t pos, loff_t length, ssize_t written, unsigned flags,
+ struct iomap *iomap, iomap_punch_t punch)
{
loff_t start_byte;
loff_t end_byte;
unsigned int blocksize = i_blocksize(inode);
if (iomap->type != IOMAP_DELALLOC)
- return 0;
+ return;
/* If we didn't reserve the blocks, we're not allowed to punch them. */
if (!(iomap->flags & IOMAP_F_NEW))
- return 0;
+ return;
/*
* start_byte refers to the first unused block after a short write. If
@@ -1319,26 +1302,35 @@ int iomap_file_buffered_write_punch_delalloc(struct inode *inode,
/* Nothing to do if we've written the entire delalloc extent */
if (start_byte >= end_byte)
- return 0;
+ return;
- return iomap_write_delalloc_release(inode, start_byte, end_byte,
- punch);
+ iomap_write_delalloc_release(inode, start_byte, end_byte, flags, iomap,
+ punch);
}
EXPORT_SYMBOL_GPL(iomap_file_buffered_write_punch_delalloc);
static loff_t iomap_unshare_iter(struct iomap_iter *iter)
{
struct iomap *iomap = &iter->iomap;
- const struct iomap *srcmap = iomap_iter_srcmap(iter);
loff_t pos = iter->pos;
loff_t length = iomap_length(iter);
loff_t written = 0;
- /* don't bother with blocks that are not shared to start with */
+ /* Don't bother with blocks that are not shared to start with. */
if (!(iomap->flags & IOMAP_F_SHARED))
return length;
- /* don't bother with holes or unwritten extents */
- if (srcmap->type == IOMAP_HOLE || srcmap->type == IOMAP_UNWRITTEN)
+
+ /*
+ * Don't bother with holes or unwritten extents.
+ *
+ * Note that we use srcmap directly instead of iomap_iter_srcmap as
+ * unsharing requires providing a separate source map, and the presence
+ * of one is a good indicator that unsharing is needed, unlike
+ * IOMAP_F_SHARED which can be set for any data that goes into the COW
+ * fork for XFS.
+ */
+ if (iter->srcmap.type == IOMAP_HOLE ||
+ iter->srcmap.type == IOMAP_UNWRITTEN)
return length;
do {
@@ -1393,16 +1385,53 @@ iomap_file_unshare(struct inode *inode, loff_t pos, loff_t len,
}
EXPORT_SYMBOL_GPL(iomap_file_unshare);
-static loff_t iomap_zero_iter(struct iomap_iter *iter, bool *did_zero)
+/*
+ * Flush the remaining range of the iter and mark the current mapping stale.
+ * This is used when zero range sees an unwritten mapping that may have had
+ * dirty pagecache over it.
+ */
+static inline int iomap_zero_iter_flush_and_stale(struct iomap_iter *i)
+{
+ struct address_space *mapping = i->inode->i_mapping;
+ loff_t end = i->pos + i->len - 1;
+
+ i->iomap.flags |= IOMAP_F_STALE;
+ return filemap_write_and_wait_range(mapping, i->pos, end);
+}
+
+static loff_t iomap_zero_iter(struct iomap_iter *iter, bool *did_zero,
+ bool *range_dirty)
{
const struct iomap *srcmap = iomap_iter_srcmap(iter);
loff_t pos = iter->pos;
loff_t length = iomap_length(iter);
loff_t written = 0;
- /* already zeroed? we're done. */
- if (srcmap->type == IOMAP_HOLE || srcmap->type == IOMAP_UNWRITTEN)
+ /*
+ * We must zero subranges of unwritten mappings that might be dirty in
+ * pagecache from previous writes. We only know whether the entire range
+ * was clean or not, however, and dirty folios may have been written
+ * back or reclaimed at any point after mapping lookup.
+ *
+ * The easiest way to deal with this is to flush pagecache to trigger
+ * any pending unwritten conversions and then grab the updated extents
+ * from the fs. The flush may change the current mapping, so mark it
+ * stale for the iterator to remap it for the next pass to handle
+ * properly.
+ *
+ * Note that holes are treated the same as unwritten because zero range
+ * is (ab)used for partial folio zeroing in some cases. Hole backed
+ * post-eof ranges can be dirtied via mapped write and the flush
+ * triggers writeback time post-eof zeroing.
+ */
+ if (srcmap->type == IOMAP_HOLE || srcmap->type == IOMAP_UNWRITTEN) {
+ if (*range_dirty) {
+ *range_dirty = false;
+ return iomap_zero_iter_flush_and_stale(iter);
+ }
+ /* range is clean and already zeroed, nothing to do */
return length;
+ }
do {
struct folio *folio;
@@ -1450,9 +1479,27 @@ iomap_zero_range(struct inode *inode, loff_t pos, loff_t len, bool *did_zero,
.flags = IOMAP_ZERO,
};
int ret;
+ bool range_dirty;
+
+ /*
+ * Zero range wants to skip pre-zeroed (i.e. unwritten) mappings, but
+ * pagecache must be flushed to ensure stale data from previous
+ * buffered writes is not exposed. A flush is only required for certain
+ * types of mappings, but checking pagecache after mapping lookup is
+ * racy with writeback and reclaim.
+ *
+ * Therefore, check the entire range first and pass along whether any
+ * part of it is dirty. If so and an underlying mapping warrants it,
+ * flush the cache at that point. This trades off the occasional false
+ * positive (and spurious flush, if the dirty data and mapping don't
+ * happen to overlap) for simplicity in handling a relatively uncommon
+ * situation.
+ */
+ range_dirty = filemap_range_needs_writeback(inode->i_mapping,
+ pos, pos + len - 1);
while ((ret = iomap_iter(&iter, ops)) > 0)
- iter.processed = iomap_zero_iter(&iter, did_zero);
+ iter.processed = iomap_zero_iter(&iter, did_zero, &range_dirty);
return ret;
}
EXPORT_SYMBOL_GPL(iomap_zero_range);
@@ -2007,10 +2054,10 @@ iomap_writepages(struct address_space *mapping, struct writeback_control *wbc,
}
EXPORT_SYMBOL_GPL(iomap_writepages);
-static int __init iomap_init(void)
+static int __init iomap_buffered_init(void)
{
return bioset_init(&iomap_ioend_bioset, 4 * (PAGE_SIZE / SECTOR_SIZE),
offsetof(struct iomap_ioend, io_bio),
BIOSET_NEED_BVECS);
}
-fs_initcall(iomap_init);
+fs_initcall(iomap_buffered_init);
diff --git a/fs/iomap/direct-io.c b/fs/iomap/direct-io.c
index f3b43d223a46..f637aa0706a3 100644
--- a/fs/iomap/direct-io.c
+++ b/fs/iomap/direct-io.c
@@ -27,6 +27,13 @@
#define IOMAP_DIO_WRITE (1U << 30)
#define IOMAP_DIO_DIRTY (1U << 31)
+/*
+ * Used for sub block zeroing in iomap_dio_zero()
+ */
+#define IOMAP_ZERO_PAGE_SIZE (SZ_64K)
+#define IOMAP_ZERO_PAGE_ORDER (get_order(IOMAP_ZERO_PAGE_SIZE))
+static struct page *zero_page;
+
struct iomap_dio {
struct kiocb *iocb;
const struct iomap_dio_ops *dops;
@@ -232,13 +239,20 @@ release_bio:
}
EXPORT_SYMBOL_GPL(iomap_dio_bio_end_io);
-static void iomap_dio_zero(const struct iomap_iter *iter, struct iomap_dio *dio,
+static int iomap_dio_zero(const struct iomap_iter *iter, struct iomap_dio *dio,
loff_t pos, unsigned len)
{
struct inode *inode = file_inode(dio->iocb->ki_filp);
- struct page *page = ZERO_PAGE(0);
struct bio *bio;
+ if (!len)
+ return 0;
+ /*
+ * Max block size supported is 64k
+ */
+ if (WARN_ON_ONCE(len > IOMAP_ZERO_PAGE_SIZE))
+ return -EINVAL;
+
bio = iomap_dio_alloc_bio(iter, dio, 1, REQ_OP_WRITE | REQ_SYNC | REQ_IDLE);
fscrypt_set_bio_crypt_ctx(bio, inode, pos >> inode->i_blkbits,
GFP_KERNEL);
@@ -246,8 +260,9 @@ static void iomap_dio_zero(const struct iomap_iter *iter, struct iomap_dio *dio,
bio->bi_private = dio;
bio->bi_end_io = iomap_dio_bio_end_io;
- __bio_add_page(bio, page, len, 0);
+ __bio_add_page(bio, zero_page, len, 0);
iomap_dio_submit_bio(iter, dio, bio, pos);
+ return 0;
}
/*
@@ -356,8 +371,10 @@ static loff_t iomap_dio_bio_iter(const struct iomap_iter *iter,
if (need_zeroout) {
/* zero out from the start of the block to the write offset */
pad = pos & (fs_block_size - 1);
- if (pad)
- iomap_dio_zero(iter, dio, pos - pad, pad);
+
+ ret = iomap_dio_zero(iter, dio, pos - pad, pad);
+ if (ret)
+ goto out;
}
/*
@@ -431,7 +448,8 @@ zero_tail:
/* zero out from the end of the write to the end of the block */
pad = pos & (fs_block_size - 1);
if (pad)
- iomap_dio_zero(iter, dio, pos, fs_block_size - pad);
+ ret = iomap_dio_zero(iter, dio, pos,
+ fs_block_size - pad);
}
out:
/* Undo iter limitation to current extent */
@@ -753,3 +771,15 @@ iomap_dio_rw(struct kiocb *iocb, struct iov_iter *iter,
return iomap_dio_complete(dio);
}
EXPORT_SYMBOL_GPL(iomap_dio_rw);
+
+static int __init iomap_dio_init(void)
+{
+ zero_page = alloc_pages(GFP_KERNEL | __GFP_ZERO,
+ IOMAP_ZERO_PAGE_ORDER);
+
+ if (!zero_page)
+ return -ENOMEM;
+
+ return 0;
+}
+fs_initcall(iomap_dio_init);
diff --git a/fs/isofs/rock.h b/fs/isofs/rock.h
index ee9660e9671c..7755e587f778 100644
--- a/fs/isofs/rock.h
+++ b/fs/isofs/rock.h
@@ -44,7 +44,7 @@ struct RR_PN_s {
struct SL_component {
__u8 flags;
__u8 len;
- __u8 text[];
+ __u8 text[] __counted_by(len);
} __attribute__ ((packed));
struct RR_SL_s {
diff --git a/fs/jbd2/checkpoint.c b/fs/jbd2/checkpoint.c
index 951f78634adf..b3971e91e8eb 100644
--- a/fs/jbd2/checkpoint.c
+++ b/fs/jbd2/checkpoint.c
@@ -79,17 +79,23 @@ __releases(&journal->j_state_lock)
if (space_left < nblocks) {
int chkpt = journal->j_checkpoint_transactions != NULL;
tid_t tid = 0;
+ bool has_transaction = false;
- if (journal->j_committing_transaction)
+ if (journal->j_committing_transaction) {
tid = journal->j_committing_transaction->t_tid;
+ has_transaction = true;
+ }
spin_unlock(&journal->j_list_lock);
write_unlock(&journal->j_state_lock);
if (chkpt) {
jbd2_log_do_checkpoint(journal);
- } else if (jbd2_cleanup_journal_tail(journal) == 0) {
- /* We were able to recover space; yay! */
+ } else if (jbd2_cleanup_journal_tail(journal) <= 0) {
+ /*
+ * We were able to recover space or the
+ * journal was aborted due to an error.
+ */
;
- } else if (tid) {
+ } else if (has_transaction) {
/*
* jbd2_journal_commit_transaction() may want
* to take the checkpoint_mutex if JBD2_FLUSHED
@@ -407,6 +413,7 @@ unsigned long jbd2_journal_shrink_checkpoint_list(journal_t *journal,
tid_t tid = 0;
unsigned long nr_freed = 0;
unsigned long freed;
+ bool first_set = false;
again:
spin_lock(&journal->j_list_lock);
@@ -426,8 +433,10 @@ again:
else
transaction = journal->j_checkpoint_transactions;
- if (!first_tid)
+ if (!first_set) {
first_tid = transaction->t_tid;
+ first_set = true;
+ }
last_transaction = journal->j_checkpoint_transactions->t_cpprev;
next_transaction = transaction;
last_tid = last_transaction->t_tid;
@@ -457,7 +466,7 @@ again:
spin_unlock(&journal->j_list_lock);
cond_resched();
- if (*nr_to_scan && next_tid)
+ if (*nr_to_scan && journal->j_shrink_transaction)
goto again;
out:
trace_jbd2_shrink_checkpoint_list(journal, first_tid, tid, last_tid,
diff --git a/fs/jbd2/journal.c b/fs/jbd2/journal.c
index 1ebf2393bfb7..97f487c3d8fc 100644
--- a/fs/jbd2/journal.c
+++ b/fs/jbd2/journal.c
@@ -281,6 +281,16 @@ static void journal_kill_thread(journal_t *journal)
write_unlock(&journal->j_state_lock);
}
+static inline bool jbd2_data_needs_escaping(char *data)
+{
+ return *((__be32 *)data) == cpu_to_be32(JBD2_MAGIC_NUMBER);
+}
+
+static inline void jbd2_data_do_escape(char *data)
+{
+ *((unsigned int *)data) = 0;
+}
+
/*
* jbd2_journal_write_metadata_buffer: write a metadata buffer to the journal.
*
@@ -318,9 +328,7 @@ int jbd2_journal_write_metadata_buffer(transaction_t *transaction,
struct buffer_head **bh_out,
sector_t blocknr)
{
- int done_copy_out = 0;
int do_escape = 0;
- char *mapped_data;
struct buffer_head *new_bh;
struct folio *new_folio;
unsigned int new_offset;
@@ -349,37 +357,33 @@ int jbd2_journal_write_metadata_buffer(transaction_t *transaction,
* we use that version of the data for the commit.
*/
if (jh_in->b_frozen_data) {
- done_copy_out = 1;
new_folio = virt_to_folio(jh_in->b_frozen_data);
new_offset = offset_in_folio(new_folio, jh_in->b_frozen_data);
+ do_escape = jbd2_data_needs_escaping(jh_in->b_frozen_data);
+ if (do_escape)
+ jbd2_data_do_escape(jh_in->b_frozen_data);
} else {
+ char *tmp;
+ char *mapped_data;
+
new_folio = bh_in->b_folio;
new_offset = offset_in_folio(new_folio, bh_in->b_data);
- }
-
- mapped_data = kmap_local_folio(new_folio, new_offset);
- /*
- * Fire data frozen trigger if data already wasn't frozen. Do this
- * before checking for escaping, as the trigger may modify the magic
- * offset. If a copy-out happens afterwards, it will have the correct
- * data in the buffer.
- */
- if (!done_copy_out)
+ mapped_data = kmap_local_folio(new_folio, new_offset);
+ /*
+ * Fire data frozen trigger if data already wasn't frozen. Do
+ * this before checking for escaping, as the trigger may modify
+ * the magic offset. If a copy-out happens afterwards, it will
+ * have the correct data in the buffer.
+ */
jbd2_buffer_frozen_trigger(jh_in, mapped_data,
jh_in->b_triggers);
-
- /*
- * Check for escaping
- */
- if (*((__be32 *)mapped_data) == cpu_to_be32(JBD2_MAGIC_NUMBER))
- do_escape = 1;
- kunmap_local(mapped_data);
-
- /*
- * Do we need to do a data copy?
- */
- if (do_escape && !done_copy_out) {
- char *tmp;
+ do_escape = jbd2_data_needs_escaping(mapped_data);
+ kunmap_local(mapped_data);
+ /*
+ * Do we need to do a data copy?
+ */
+ if (!do_escape)
+ goto escape_done;
spin_unlock(&jh_in->b_state_lock);
tmp = jbd2_alloc(bh_in->b_size, GFP_NOFS);
@@ -406,18 +410,10 @@ int jbd2_journal_write_metadata_buffer(transaction_t *transaction,
copy_done:
new_folio = virt_to_folio(jh_in->b_frozen_data);
new_offset = offset_in_folio(new_folio, jh_in->b_frozen_data);
- done_copy_out = 1;
+ jbd2_data_do_escape(jh_in->b_frozen_data);
}
- /*
- * Did we need to do an escaping? Now we've done all the
- * copying, we can finally do so.
- * b_frozen_data is from jbd2_alloc() which always provides an
- * address from the direct kernels mapping.
- */
- if (do_escape)
- *((unsigned int *)jh_in->b_frozen_data) = 0;
-
+escape_done:
folio_set_bh(new_bh, new_folio, new_offset);
new_bh->b_size = bh_in->b_size;
new_bh->b_bdev = journal->j_dev;
@@ -710,7 +706,7 @@ int jbd2_fc_begin_commit(journal_t *journal, tid_t tid)
return -EINVAL;
write_lock(&journal->j_state_lock);
- if (tid <= journal->j_commit_sequence) {
+ if (tid_geq(journal->j_commit_sequence, tid)) {
write_unlock(&journal->j_state_lock);
return -EALREADY;
}
@@ -740,9 +736,9 @@ EXPORT_SYMBOL(jbd2_fc_begin_commit);
*/
static int __jbd2_fc_end_commit(journal_t *journal, tid_t tid, bool fallback)
{
- jbd2_journal_unlock_updates(journal);
if (journal->j_fc_cleanup_callback)
journal->j_fc_cleanup_callback(journal, 0, tid);
+ jbd2_journal_unlock_updates(journal);
write_lock(&journal->j_state_lock);
journal->j_flags &= ~JBD2_FAST_COMMIT_ONGOING;
if (fallback)
@@ -841,17 +837,12 @@ int jbd2_fc_get_buf(journal_t *journal, struct buffer_head **bh_out)
*bh_out = NULL;
- if (journal->j_fc_off + journal->j_fc_first < journal->j_fc_last) {
- fc_off = journal->j_fc_off;
- blocknr = journal->j_fc_first + fc_off;
- journal->j_fc_off++;
- } else {
- ret = -EINVAL;
- }
-
- if (ret)
- return ret;
+ if (journal->j_fc_off + journal->j_fc_first >= journal->j_fc_last)
+ return -EINVAL;
+ fc_off = journal->j_fc_off;
+ blocknr = journal->j_fc_first + fc_off;
+ journal->j_fc_off++;
ret = jbd2_journal_bmap(journal, blocknr, &pblock);
if (ret)
return ret;
@@ -860,7 +851,6 @@ int jbd2_fc_get_buf(journal_t *journal, struct buffer_head **bh_out)
if (!bh)
return -ENOMEM;
-
journal->j_fc_wbuf[fc_off] = bh;
*bh_out = bh;
@@ -903,7 +893,7 @@ int jbd2_fc_wait_bufs(journal_t *journal, int num_blks)
}
EXPORT_SYMBOL(jbd2_fc_wait_bufs);
-int jbd2_fc_release_bufs(journal_t *journal)
+void jbd2_fc_release_bufs(journal_t *journal)
{
struct buffer_head *bh;
int i, j_fc_off;
@@ -917,8 +907,6 @@ int jbd2_fc_release_bufs(journal_t *journal)
put_bh(bh);
journal->j_fc_wbuf[i] = NULL;
}
-
- return 0;
}
EXPORT_SYMBOL(jbd2_fc_release_bufs);
@@ -1944,7 +1932,7 @@ static void jbd2_mark_journal_empty(journal_t *journal, blk_opf_t write_flags)
if (had_fast_commit)
jbd2_set_feature_fast_commit(journal);
- /* Log is no longer empty */
+ /* Log is empty */
write_lock(&journal->j_state_lock);
journal->j_flags |= JBD2_FLUSHED;
write_unlock(&journal->j_state_lock);
@@ -2866,8 +2854,7 @@ static struct journal_head *journal_alloc_journal_head(void)
ret = kmem_cache_zalloc(jbd2_journal_head_cache,
GFP_NOFS | __GFP_NOFAIL);
}
- if (ret)
- spin_lock_init(&ret->b_state_lock);
+ spin_lock_init(&ret->b_state_lock);
return ret;
}
diff --git a/fs/kernel_read_file.c b/fs/kernel_read_file.c
index c429c42a6867..9ff37ae650ea 100644
--- a/fs/kernel_read_file.c
+++ b/fs/kernel_read_file.c
@@ -178,10 +178,10 @@ ssize_t kernel_read_file_from_fd(int fd, loff_t offset, void **buf,
struct fd f = fdget(fd);
ssize_t ret = -EBADF;
- if (!f.file || !(f.file->f_mode & FMODE_READ))
+ if (!fd_file(f) || !(fd_file(f)->f_mode & FMODE_READ))
goto out;
- ret = kernel_read_file(f.file, offset, buf, buf_size, file_size, id);
+ ret = kernel_read_file(fd_file(f), offset, buf, buf_size, file_size, id);
out:
fdput(f);
return ret;
diff --git a/fs/lockd/host.c b/fs/lockd/host.c
index c11516801784..5e6877c37f73 100644
--- a/fs/lockd/host.c
+++ b/fs/lockd/host.c
@@ -440,7 +440,7 @@ nlm_bind_host(struct nlm_host *host)
if ((clnt = host->h_rpcclnt) != NULL) {
nlm_rebind_host(host);
} else {
- unsigned long increment = nlmsvc_timeout;
+ unsigned long increment = nlm_timeout * HZ;
struct rpc_timeout timeparms = {
.to_initval = increment,
.to_increment = increment,
diff --git a/fs/lockd/svc.c b/fs/lockd/svc.c
index ab8042a5b895..4ec22c2f2ea3 100644
--- a/fs/lockd/svc.c
+++ b/fs/lockd/svc.c
@@ -53,7 +53,6 @@ EXPORT_SYMBOL_GPL(nlmsvc_ops);
static DEFINE_MUTEX(nlmsvc_mutex);
static unsigned int nlmsvc_users;
static struct svc_serv *nlmsvc_serv;
-unsigned long nlmsvc_timeout;
static void nlmsvc_request_retry(struct timer_list *tl)
{
@@ -68,7 +67,7 @@ unsigned int lockd_net_id;
* and also changed through the sysctl interface. -- Jamie Lokier, Aug 2003
*/
static unsigned long nlm_grace_period;
-static unsigned long nlm_timeout = LOCKD_DFLT_TIMEO;
+unsigned long nlm_timeout = LOCKD_DFLT_TIMEO;
static int nlm_udpport, nlm_tcpport;
/* RLIM_NOFILE defaults to 1024. That seems like a reasonable default here. */
@@ -125,6 +124,8 @@ lockd(void *vrqstp)
struct net *net = &init_net;
struct lockd_net *ln = net_generic(net, lockd_net_id);
+ svc_thread_init_status(rqstp, 0);
+
/* try_to_freeze() is called from svc_recv() */
set_freezable();
@@ -333,10 +334,6 @@ static int lockd_get(void)
printk(KERN_WARNING
"lockd_up: no pid, %d users??\n", nlmsvc_users);
- if (!nlm_timeout)
- nlm_timeout = LOCKD_DFLT_TIMEO;
- nlmsvc_timeout = nlm_timeout * HZ;
-
serv = svc_create(&nlmsvc_program, LOCKD_BUFSIZE, lockd);
if (!serv) {
printk(KERN_WARNING "lockd_up: create service failed\n");
diff --git a/fs/locks.c b/fs/locks.c
index b51b1c395ce6..204847628f3e 100644
--- a/fs/locks.c
+++ b/fs/locks.c
@@ -2157,15 +2157,15 @@ SYSCALL_DEFINE2(flock, unsigned int, fd, unsigned int, cmd)
error = -EBADF;
f = fdget(fd);
- if (!f.file)
+ if (!fd_file(f))
return error;
- if (type != F_UNLCK && !(f.file->f_mode & (FMODE_READ | FMODE_WRITE)))
+ if (type != F_UNLCK && !(fd_file(f)->f_mode & (FMODE_READ | FMODE_WRITE)))
goto out_putf;
- flock_make_lock(f.file, &fl, type);
+ flock_make_lock(fd_file(f), &fl, type);
- error = security_file_lock(f.file, fl.c.flc_type);
+ error = security_file_lock(fd_file(f), fl.c.flc_type);
if (error)
goto out_putf;
@@ -2173,12 +2173,12 @@ SYSCALL_DEFINE2(flock, unsigned int, fd, unsigned int, cmd)
if (can_sleep)
fl.c.flc_flags |= FL_SLEEP;
- if (f.file->f_op->flock)
- error = f.file->f_op->flock(f.file,
+ if (fd_file(f)->f_op->flock)
+ error = fd_file(f)->f_op->flock(fd_file(f),
(can_sleep) ? F_SETLKW : F_SETLK,
&fl);
else
- error = locks_lock_file_wait(f.file, &fl);
+ error = locks_lock_file_wait(fd_file(f), &fl);
locks_release_private(&fl);
out_putf:
diff --git a/fs/mnt_idmapping.c b/fs/mnt_idmapping.c
index 79491663dbc0..7b1df8cc2821 100644
--- a/fs/mnt_idmapping.c
+++ b/fs/mnt_idmapping.c
@@ -32,6 +32,15 @@ struct mnt_idmap nop_mnt_idmap = {
};
EXPORT_SYMBOL_GPL(nop_mnt_idmap);
+/*
+ * Carries the invalid idmapping of a full 0-4294967295 {g,u}id range.
+ * This means that all {g,u}ids are mapped to INVALID_VFS{G,U}ID.
+ */
+struct mnt_idmap invalid_mnt_idmap = {
+ .count = REFCOUNT_INIT(1),
+};
+EXPORT_SYMBOL_GPL(invalid_mnt_idmap);
+
/**
* initial_idmapping - check whether this is the initial mapping
* @ns: idmapping to check
@@ -75,6 +84,8 @@ vfsuid_t make_vfsuid(struct mnt_idmap *idmap,
if (idmap == &nop_mnt_idmap)
return VFSUIDT_INIT(kuid);
+ if (idmap == &invalid_mnt_idmap)
+ return INVALID_VFSUID;
if (initial_idmapping(fs_userns))
uid = __kuid_val(kuid);
else
@@ -112,6 +123,8 @@ vfsgid_t make_vfsgid(struct mnt_idmap *idmap,
if (idmap == &nop_mnt_idmap)
return VFSGIDT_INIT(kgid);
+ if (idmap == &invalid_mnt_idmap)
+ return INVALID_VFSGID;
if (initial_idmapping(fs_userns))
gid = __kgid_val(kgid);
else
@@ -140,6 +153,8 @@ kuid_t from_vfsuid(struct mnt_idmap *idmap,
if (idmap == &nop_mnt_idmap)
return AS_KUIDT(vfsuid);
+ if (idmap == &invalid_mnt_idmap)
+ return INVALID_UID;
uid = map_id_up(&idmap->uid_map, __vfsuid_val(vfsuid));
if (uid == (uid_t)-1)
return INVALID_UID;
@@ -167,6 +182,8 @@ kgid_t from_vfsgid(struct mnt_idmap *idmap,
if (idmap == &nop_mnt_idmap)
return AS_KGIDT(vfsgid);
+ if (idmap == &invalid_mnt_idmap)
+ return INVALID_GID;
gid = map_id_up(&idmap->gid_map, __vfsgid_val(vfsgid));
if (gid == (gid_t)-1)
return INVALID_GID;
@@ -296,7 +313,7 @@ struct mnt_idmap *alloc_mnt_idmap(struct user_namespace *mnt_userns)
*/
struct mnt_idmap *mnt_idmap_get(struct mnt_idmap *idmap)
{
- if (idmap != &nop_mnt_idmap)
+ if (idmap != &nop_mnt_idmap && idmap != &invalid_mnt_idmap)
refcount_inc(&idmap->count);
return idmap;
@@ -312,7 +329,8 @@ EXPORT_SYMBOL_GPL(mnt_idmap_get);
*/
void mnt_idmap_put(struct mnt_idmap *idmap)
{
- if (idmap != &nop_mnt_idmap && refcount_dec_and_test(&idmap->count))
+ if (idmap != &nop_mnt_idmap && idmap != &invalid_mnt_idmap &&
+ refcount_dec_and_test(&idmap->count))
free_mnt_idmap(idmap);
}
EXPORT_SYMBOL_GPL(mnt_idmap_put);
diff --git a/fs/namei.c b/fs/namei.c
index 891b169e38c9..4a4a22a08ac2 100644
--- a/fs/namei.c
+++ b/fs/namei.c
@@ -2506,25 +2506,25 @@ static const char *path_init(struct nameidata *nd, unsigned flags)
struct fd f = fdget_raw(nd->dfd);
struct dentry *dentry;
- if (!f.file)
+ if (!fd_file(f))
return ERR_PTR(-EBADF);
if (flags & LOOKUP_LINKAT_EMPTY) {
- if (f.file->f_cred != current_cred() &&
- !ns_capable(f.file->f_cred->user_ns, CAP_DAC_READ_SEARCH)) {
+ if (fd_file(f)->f_cred != current_cred() &&
+ !ns_capable(fd_file(f)->f_cred->user_ns, CAP_DAC_READ_SEARCH)) {
fdput(f);
return ERR_PTR(-ENOENT);
}
}
- dentry = f.file->f_path.dentry;
+ dentry = fd_file(f)->f_path.dentry;
if (*s && unlikely(!d_can_lookup(dentry))) {
fdput(f);
return ERR_PTR(-ENOTDIR);
}
- nd->path = f.file->f_path;
+ nd->path = fd_file(f)->f_path;
if (flags & LOOKUP_RCU) {
nd->inode = nd->path.dentry->d_inode;
nd->seq = read_seqcount_begin(&nd->path.dentry->d_seq);
diff --git a/fs/namespace.c b/fs/namespace.c
index e71e4564987b..93c377816d75 100644
--- a/fs/namespace.c
+++ b/fs/namespace.c
@@ -4134,14 +4134,14 @@ SYSCALL_DEFINE3(fsmount, int, fs_fd, unsigned int, flags,
}
f = fdget(fs_fd);
- if (!f.file)
+ if (!fd_file(f))
return -EBADF;
ret = -EINVAL;
- if (f.file->f_op != &fscontext_fops)
+ if (fd_file(f)->f_op != &fscontext_fops)
goto err_fsfd;
- fc = f.file->private_data;
+ fc = fd_file(f)->private_data;
ret = mutex_lock_interruptible(&fc->uapi_mutex);
if (ret < 0)
@@ -4471,6 +4471,10 @@ static int can_idmap_mount(const struct mount_kattr *kattr, struct mount *mnt)
if (!(m->mnt_sb->s_type->fs_flags & FS_ALLOW_IDMAP))
return -EINVAL;
+ /* The filesystem has turned off idmapped mounts. */
+ if (m->mnt_sb->s_iflags & SB_I_NOIDMAP)
+ return -EINVAL;
+
/* We're not controlling the superblock. */
if (!ns_capable(fs_userns, CAP_SYS_ADMIN))
return -EPERM;
@@ -4684,15 +4688,15 @@ static int build_mount_idmapped(const struct mount_attr *attr, size_t usize,
return -EINVAL;
f = fdget(attr->userns_fd);
- if (!f.file)
+ if (!fd_file(f))
return -EBADF;
- if (!proc_ns_file(f.file)) {
+ if (!proc_ns_file(fd_file(f))) {
err = -EINVAL;
goto out_fput;
}
- ns = get_proc_ns(file_inode(f.file));
+ ns = get_proc_ns(file_inode(fd_file(f)));
if (ns->ops->type != CLONE_NEWUSER) {
err = -EINVAL;
goto out_fput;
@@ -5292,13 +5296,13 @@ static struct mnt_namespace *grab_requested_mnt_ns(const struct mnt_id_req *kreq
struct ns_common *ns;
CLASS(fd, f)(kreq->spare);
- if (!f.file)
+ if (fd_empty(f))
return ERR_PTR(-EBADF);
- if (!proc_ns_file(f.file))
+ if (!proc_ns_file(fd_file(f)))
return ERR_PTR(-EINVAL);
- ns = get_proc_ns(file_inode(f.file));
+ ns = get_proc_ns(file_inode(fd_file(f)));
if (ns->ops->type != CLONE_NEWNS)
return ERR_PTR(-EINVAL);
diff --git a/fs/netfs/buffered_write.c b/fs/netfs/buffered_write.c
index d7eae597e54d..b3910dfcb56d 100644
--- a/fs/netfs/buffered_write.c
+++ b/fs/netfs/buffered_write.c
@@ -552,6 +552,7 @@ vm_fault_t netfs_page_mkwrite(struct vm_fault *vmf, struct netfs_group *netfs_gr
trace_netfs_folio(folio, netfs_folio_trace_mkwrite);
netfs_set_group(folio, netfs_group);
file_update_time(file);
+ set_bit(NETFS_ICTX_MODIFIED_ATTR, &ictx->flags);
if (ictx->ops->post_modify)
ictx->ops->post_modify(inode);
ret = VM_FAULT_LOCKED;
diff --git a/fs/netfs/internal.h b/fs/netfs/internal.h
index c9f0ed24cb7b..c562aec3b483 100644
--- a/fs/netfs/internal.h
+++ b/fs/netfs/internal.h
@@ -58,6 +58,7 @@ static inline void netfs_proc_del_rreq(struct netfs_io_request *rreq) {}
/*
* misc.c
*/
+struct folio_queue *netfs_buffer_make_space(struct netfs_io_request *rreq);
int netfs_buffer_append_folio(struct netfs_io_request *rreq, struct folio *folio,
bool needs_put);
struct folio_queue *netfs_delete_buffer_head(struct netfs_io_request *wreq);
diff --git a/fs/netfs/misc.c b/fs/netfs/misc.c
index 0ad0982ce0e2..63280791de3b 100644
--- a/fs/netfs/misc.c
+++ b/fs/netfs/misc.c
@@ -9,34 +9,66 @@
#include "internal.h"
/*
- * Append a folio to the rolling queue.
+ * Make sure there's space in the rolling queue.
*/
-int netfs_buffer_append_folio(struct netfs_io_request *rreq, struct folio *folio,
- bool needs_put)
+struct folio_queue *netfs_buffer_make_space(struct netfs_io_request *rreq)
{
- struct folio_queue *tail = rreq->buffer_tail;
- unsigned int slot, order = folio_order(folio);
+ struct folio_queue *tail = rreq->buffer_tail, *prev;
+ unsigned int prev_nr_slots = 0;
if (WARN_ON_ONCE(!rreq->buffer && tail) ||
WARN_ON_ONCE(rreq->buffer && !tail))
- return -EIO;
-
- if (!tail || folioq_full(tail)) {
- tail = kmalloc(sizeof(*tail), GFP_NOFS);
- if (!tail)
- return -ENOMEM;
- netfs_stat(&netfs_n_folioq);
- folioq_init(tail);
- tail->prev = rreq->buffer_tail;
- if (tail->prev)
- tail->prev->next = tail;
- rreq->buffer_tail = tail;
- if (!rreq->buffer) {
- rreq->buffer = tail;
- iov_iter_folio_queue(&rreq->io_iter, ITER_SOURCE, tail, 0, 0, 0);
+ return ERR_PTR(-EIO);
+
+ prev = tail;
+ if (prev) {
+ if (!folioq_full(tail))
+ return tail;
+ prev_nr_slots = folioq_nr_slots(tail);
+ }
+
+ tail = kmalloc(sizeof(*tail), GFP_NOFS);
+ if (!tail)
+ return ERR_PTR(-ENOMEM);
+ netfs_stat(&netfs_n_folioq);
+ folioq_init(tail);
+ tail->prev = prev;
+ if (prev)
+ /* [!] NOTE: After we set prev->next, the consumer is entirely
+ * at liberty to delete prev.
+ */
+ WRITE_ONCE(prev->next, tail);
+
+ rreq->buffer_tail = tail;
+ if (!rreq->buffer) {
+ rreq->buffer = tail;
+ iov_iter_folio_queue(&rreq->io_iter, ITER_SOURCE, tail, 0, 0, 0);
+ } else {
+ /* Make sure we don't leave the master iterator pointing to a
+ * block that might get immediately consumed.
+ */
+ if (rreq->io_iter.folioq == prev &&
+ rreq->io_iter.folioq_slot == prev_nr_slots) {
+ rreq->io_iter.folioq = tail;
+ rreq->io_iter.folioq_slot = 0;
}
- rreq->buffer_tail_slot = 0;
}
+ rreq->buffer_tail_slot = 0;
+ return tail;
+}
+
+/*
+ * Append a folio to the rolling queue.
+ */
+int netfs_buffer_append_folio(struct netfs_io_request *rreq, struct folio *folio,
+ bool needs_put)
+{
+ struct folio_queue *tail;
+ unsigned int slot, order = folio_order(folio);
+
+ tail = netfs_buffer_make_space(rreq);
+ if (IS_ERR(tail))
+ return PTR_ERR(tail);
rreq->io_iter.count += PAGE_SIZE << order;
diff --git a/fs/netfs/write_issue.c b/fs/netfs/write_issue.c
index 04e66d587f77..6293f547e4c3 100644
--- a/fs/netfs/write_issue.c
+++ b/fs/netfs/write_issue.c
@@ -153,12 +153,22 @@ static void netfs_prepare_write(struct netfs_io_request *wreq,
loff_t start)
{
struct netfs_io_subrequest *subreq;
+ struct iov_iter *wreq_iter = &wreq->io_iter;
+
+ /* Make sure we don't point the iterator at a used-up folio_queue
+ * struct being used as a placeholder to prevent the queue from
+ * collapsing. In such a case, extend the queue.
+ */
+ if (iov_iter_is_folioq(wreq_iter) &&
+ wreq_iter->folioq_slot >= folioq_nr_slots(wreq_iter->folioq)) {
+ netfs_buffer_make_space(wreq);
+ }
subreq = netfs_alloc_subrequest(wreq);
subreq->source = stream->source;
subreq->start = start;
subreq->stream_nr = stream->stream_nr;
- subreq->io_iter = wreq->io_iter;
+ subreq->io_iter = *wreq_iter;
_enter("R=%x[%x]", wreq->debug_id, subreq->debug_index);
@@ -307,6 +317,7 @@ static int netfs_write_folio(struct netfs_io_request *wreq,
struct netfs_io_stream *stream;
struct netfs_group *fgroup; /* TODO: Use this with ceph */
struct netfs_folio *finfo;
+ size_t iter_off = 0;
size_t fsize = folio_size(folio), flen = fsize, foff = 0;
loff_t fpos = folio_pos(folio), i_size;
bool to_eof = false, streamw = false;
@@ -462,7 +473,12 @@ static int netfs_write_folio(struct netfs_io_request *wreq,
if (choose_s < 0)
break;
stream = &wreq->io_streams[choose_s];
- wreq->io_iter.iov_offset = stream->submit_off;
+
+ /* Advance the iterator(s). */
+ if (stream->submit_off > iter_off) {
+ iov_iter_advance(&wreq->io_iter, stream->submit_off - iter_off);
+ iter_off = stream->submit_off;
+ }
atomic64_set(&wreq->issued_to, fpos + stream->submit_off);
stream->submit_extendable_to = fsize - stream->submit_off;
@@ -477,8 +493,8 @@ static int netfs_write_folio(struct netfs_io_request *wreq,
debug = true;
}
- wreq->io_iter.iov_offset = 0;
- iov_iter_advance(&wreq->io_iter, fsize);
+ if (fsize > iter_off)
+ iov_iter_advance(&wreq->io_iter, fsize - iter_off);
atomic64_set(&wreq->issued_to, fpos + fsize);
if (!debug)
diff --git a/fs/nfs/Kconfig b/fs/nfs/Kconfig
index 57249f040dfc..0eb20012792f 100644
--- a/fs/nfs/Kconfig
+++ b/fs/nfs/Kconfig
@@ -4,6 +4,7 @@ config NFS_FS
depends on INET && FILE_LOCKING && MULTIUSER
select LOCKD
select SUNRPC
+ select NFS_COMMON
select NFS_ACL_SUPPORT if NFS_V3_ACL
help
Choose Y here if you want to access files residing on other
diff --git a/fs/nfs/Makefile b/fs/nfs/Makefile
index 5f6db37f461e..9fb2f2cac87e 100644
--- a/fs/nfs/Makefile
+++ b/fs/nfs/Makefile
@@ -13,6 +13,7 @@ nfs-y := client.o dir.o file.o getroot.o inode.o super.o \
nfs-$(CONFIG_ROOT_NFS) += nfsroot.o
nfs-$(CONFIG_SYSCTL) += sysctl.o
nfs-$(CONFIG_NFS_FSCACHE) += fscache.o
+nfs-$(CONFIG_NFS_LOCALIO) += localio.o
obj-$(CONFIG_NFS_V2) += nfsv2.o
nfsv2-y := nfs2super.o proc.o nfs2xdr.o
diff --git a/fs/nfs/callback.c b/fs/nfs/callback.c
index 8adfcd4c8c1a..6cf92498a5ac 100644
--- a/fs/nfs/callback.c
+++ b/fs/nfs/callback.c
@@ -76,6 +76,8 @@ nfs4_callback_svc(void *vrqstp)
{
struct svc_rqst *rqstp = vrqstp;
+ svc_thread_init_status(rqstp, 0);
+
set_freezable();
while (!svc_thread_should_stop(rqstp))
diff --git a/fs/nfs/client.c b/fs/nfs/client.c
index 8286edd6062d..a1d21c4be0ac 100644
--- a/fs/nfs/client.c
+++ b/fs/nfs/client.c
@@ -178,6 +178,14 @@ struct nfs_client *nfs_alloc_client(const struct nfs_client_initdata *cl_init)
clp->cl_max_connect = cl_init->max_connect ? cl_init->max_connect : 1;
clp->cl_net = get_net(cl_init->net);
+#if IS_ENABLED(CONFIG_NFS_LOCALIO)
+ seqlock_init(&clp->cl_boot_lock);
+ ktime_get_real_ts64(&clp->cl_nfssvc_boot);
+ clp->cl_uuid.net = NULL;
+ clp->cl_uuid.dom = NULL;
+ spin_lock_init(&clp->cl_localio_lock);
+#endif /* CONFIG_NFS_LOCALIO */
+
clp->cl_principal = "*";
clp->cl_xprtsec = cl_init->xprtsec;
return clp;
@@ -233,6 +241,8 @@ static void pnfs_init_server(struct nfs_server *server)
*/
void nfs_free_client(struct nfs_client *clp)
{
+ nfs_local_disable(clp);
+
/* -EIO all pending I/O */
if (!IS_ERR(clp->cl_rpcclient))
rpc_shutdown_client(clp->cl_rpcclient);
@@ -424,7 +434,10 @@ struct nfs_client *nfs_get_client(const struct nfs_client_initdata *cl_init)
list_add_tail(&new->cl_share_link,
&nn->nfs_client_list);
spin_unlock(&nn->nfs_client_lock);
- return rpc_ops->init_client(new, cl_init);
+ new = rpc_ops->init_client(new, cl_init);
+ if (!IS_ERR(new))
+ nfs_local_probe(new);
+ return new;
}
spin_unlock(&nn->nfs_client_lock);
@@ -997,8 +1010,8 @@ struct nfs_server *nfs_alloc_server(void)
init_waitqueue_head(&server->write_congestion_wait);
atomic_long_set(&server->writeback, 0);
- ida_init(&server->openowner_id);
- ida_init(&server->lockowner_id);
+ atomic64_set(&server->owner_ctr, 0);
+
pnfs_init_server(server);
rpc_init_wait_queue(&server->uoc_rpcwaitq, "NFS UOC");
@@ -1037,8 +1050,6 @@ void nfs_free_server(struct nfs_server *server)
}
ida_free(&s_sysfs_ids, server->s_sysfs_id);
- ida_destroy(&server->lockowner_id);
- ida_destroy(&server->openowner_id);
put_cred(server->cred);
nfs_release_automount_timer();
call_rcu(&server->rcu, delayed_free);
diff --git a/fs/nfs/dir.c b/fs/nfs/dir.c
index 4cb97ef41350..492cffd9d3d8 100644
--- a/fs/nfs/dir.c
+++ b/fs/nfs/dir.c
@@ -151,7 +151,7 @@ struct nfs_cache_array {
unsigned char folio_full : 1,
folio_is_eof : 1,
cookies_are_ordered : 1;
- struct nfs_cache_array_entry array[];
+ struct nfs_cache_array_entry array[] __counted_by(size);
};
struct nfs_readdir_descriptor {
@@ -328,7 +328,8 @@ static int nfs_readdir_folio_array_append(struct folio *folio,
goto out;
}
- cache_entry = &array->array[array->size];
+ array->size++;
+ cache_entry = &array->array[array->size - 1];
cache_entry->cookie = array->last_cookie;
cache_entry->ino = entry->ino;
cache_entry->d_type = entry->d_type;
@@ -337,7 +338,6 @@ static int nfs_readdir_folio_array_append(struct folio *folio,
array->last_cookie = entry->cookie;
if (array->last_cookie <= cache_entry->cookie)
array->cookies_are_ordered = 0;
- array->size++;
if (entry->eof != 0)
nfs_readdir_array_set_eof(array);
out:
diff --git a/fs/nfs/filelayout/filelayout.c b/fs/nfs/filelayout/filelayout.c
index b6e9aeaf4ce2..d39a1f58e18d 100644
--- a/fs/nfs/filelayout/filelayout.c
+++ b/fs/nfs/filelayout/filelayout.c
@@ -488,7 +488,7 @@ filelayout_read_pagelist(struct nfs_pgio_header *hdr)
/* Perform an asynchronous read to ds */
nfs_initiate_pgio(ds_clnt, hdr, hdr->cred,
NFS_PROTO(hdr->inode), &filelayout_read_call_ops,
- 0, RPC_TASK_SOFTCONN);
+ 0, RPC_TASK_SOFTCONN, NULL);
return PNFS_ATTEMPTED;
}
@@ -530,7 +530,7 @@ filelayout_write_pagelist(struct nfs_pgio_header *hdr, int sync)
/* Perform an asynchronous write */
nfs_initiate_pgio(ds_clnt, hdr, hdr->cred,
NFS_PROTO(hdr->inode), &filelayout_write_call_ops,
- sync, RPC_TASK_SOFTCONN);
+ sync, RPC_TASK_SOFTCONN, NULL);
return PNFS_ATTEMPTED;
}
@@ -1011,7 +1011,7 @@ static int filelayout_initiate_commit(struct nfs_commit_data *data, int how)
data->args.fh = fh;
return nfs_initiate_commit(ds_clnt, data, NFS_PROTO(data->inode),
&filelayout_commit_call_ops, how,
- RPC_TASK_SOFTCONN);
+ RPC_TASK_SOFTCONN, NULL);
out_err:
pnfs_generic_prepare_to_resend_writes(data);
pnfs_generic_commit_release(data);
diff --git a/fs/nfs/flexfilelayout/flexfilelayout.c b/fs/nfs/flexfilelayout/flexfilelayout.c
index 39ba9f4208aa..f78115c6c2c1 100644
--- a/fs/nfs/flexfilelayout/flexfilelayout.c
+++ b/fs/nfs/flexfilelayout/flexfilelayout.c
@@ -11,6 +11,7 @@
#include <linux/nfs_mount.h>
#include <linux/nfs_page.h>
#include <linux/module.h>
+#include <linux/file.h>
#include <linux/sched/mm.h>
#include <linux/sunrpc/metrics.h>
@@ -162,6 +163,21 @@ decode_name(struct xdr_stream *xdr, u32 *id)
return 0;
}
+static struct nfsd_file *
+ff_local_open_fh(struct nfs_client *clp, const struct cred *cred,
+ struct nfs_fh *fh, fmode_t mode)
+{
+ if (mode & FMODE_WRITE) {
+ /*
+ * Always request read and write access since this corresponds
+ * to a rw layout.
+ */
+ mode |= FMODE_READ;
+ }
+
+ return nfs_local_open_fh(clp, cred, fh, mode);
+}
+
static bool ff_mirror_match_fh(const struct nfs4_ff_layout_mirror *m1,
const struct nfs4_ff_layout_mirror *m2)
{
@@ -237,7 +253,7 @@ static struct nfs4_ff_layout_mirror *ff_layout_alloc_mirror(gfp_t gfp_flags)
static void ff_layout_free_mirror(struct nfs4_ff_layout_mirror *mirror)
{
- const struct cred *cred;
+ const struct cred *cred;
ff_layout_remove_mirror(mirror);
kfree(mirror->fh_versions);
@@ -1756,6 +1772,7 @@ ff_layout_read_pagelist(struct nfs_pgio_header *hdr)
struct pnfs_layout_segment *lseg = hdr->lseg;
struct nfs4_pnfs_ds *ds;
struct rpc_clnt *ds_clnt;
+ struct nfsd_file *localio;
struct nfs4_ff_layout_mirror *mirror;
const struct cred *ds_cred;
loff_t offset = hdr->args.offset;
@@ -1802,11 +1819,18 @@ ff_layout_read_pagelist(struct nfs_pgio_header *hdr)
hdr->args.offset = offset;
hdr->mds_offset = offset;
+ /* Start IO accounting for local read */
+ localio = ff_local_open_fh(ds->ds_clp, ds_cred, fh, FMODE_READ);
+ if (localio) {
+ hdr->task.tk_start = ktime_get();
+ ff_layout_read_record_layoutstats_start(&hdr->task, hdr);
+ }
+
/* Perform an asynchronous read to ds */
nfs_initiate_pgio(ds_clnt, hdr, ds_cred, ds->ds_clp->rpc_ops,
vers == 3 ? &ff_layout_read_call_ops_v3 :
&ff_layout_read_call_ops_v4,
- 0, RPC_TASK_SOFTCONN);
+ 0, RPC_TASK_SOFTCONN, localio);
put_cred(ds_cred);
return PNFS_ATTEMPTED;
@@ -1826,6 +1850,7 @@ ff_layout_write_pagelist(struct nfs_pgio_header *hdr, int sync)
struct pnfs_layout_segment *lseg = hdr->lseg;
struct nfs4_pnfs_ds *ds;
struct rpc_clnt *ds_clnt;
+ struct nfsd_file *localio;
struct nfs4_ff_layout_mirror *mirror;
const struct cred *ds_cred;
loff_t offset = hdr->args.offset;
@@ -1870,11 +1895,19 @@ ff_layout_write_pagelist(struct nfs_pgio_header *hdr, int sync)
*/
hdr->args.offset = offset;
+ /* Start IO accounting for local write */
+ localio = ff_local_open_fh(ds->ds_clp, ds_cred, fh,
+ FMODE_READ|FMODE_WRITE);
+ if (localio) {
+ hdr->task.tk_start = ktime_get();
+ ff_layout_write_record_layoutstats_start(&hdr->task, hdr);
+ }
+
/* Perform an asynchronous write */
nfs_initiate_pgio(ds_clnt, hdr, ds_cred, ds->ds_clp->rpc_ops,
vers == 3 ? &ff_layout_write_call_ops_v3 :
&ff_layout_write_call_ops_v4,
- sync, RPC_TASK_SOFTCONN);
+ sync, RPC_TASK_SOFTCONN, localio);
put_cred(ds_cred);
return PNFS_ATTEMPTED;
@@ -1908,6 +1941,7 @@ static int ff_layout_initiate_commit(struct nfs_commit_data *data, int how)
struct pnfs_layout_segment *lseg = data->lseg;
struct nfs4_pnfs_ds *ds;
struct rpc_clnt *ds_clnt;
+ struct nfsd_file *localio;
struct nfs4_ff_layout_mirror *mirror;
const struct cred *ds_cred;
u32 idx;
@@ -1946,10 +1980,18 @@ static int ff_layout_initiate_commit(struct nfs_commit_data *data, int how)
if (fh)
data->args.fh = fh;
+ /* Start IO accounting for local commit */
+ localio = ff_local_open_fh(ds->ds_clp, ds_cred, fh,
+ FMODE_READ|FMODE_WRITE);
+ if (localio) {
+ data->task.tk_start = ktime_get();
+ ff_layout_commit_record_layoutstats_start(&data->task, data);
+ }
+
ret = nfs_initiate_commit(ds_clnt, data, ds->ds_clp->rpc_ops,
vers == 3 ? &ff_layout_commit_call_ops_v3 :
&ff_layout_commit_call_ops_v4,
- how, RPC_TASK_SOFTCONN);
+ how, RPC_TASK_SOFTCONN, localio);
put_cred(ds_cred);
return ret;
out_err:
@@ -2087,12 +2129,6 @@ static int ff_layout_encode_ioerr(struct xdr_stream *xdr,
}
static void
-encode_opaque_fixed(struct xdr_stream *xdr, const void *buf, size_t len)
-{
- WARN_ON_ONCE(xdr_stream_encode_opaque_fixed(xdr, buf, len) < 0);
-}
-
-static void
ff_layout_encode_ff_iostat_head(struct xdr_stream *xdr,
const nfs4_stateid *stateid,
const struct nfs42_layoutstat_devinfo *devinfo)
diff --git a/fs/nfs/flexfilelayout/flexfilelayoutdev.c b/fs/nfs/flexfilelayout/flexfilelayoutdev.c
index e028f5a0ef5f..e58bedfb1dcc 100644
--- a/fs/nfs/flexfilelayout/flexfilelayoutdev.c
+++ b/fs/nfs/flexfilelayout/flexfilelayoutdev.c
@@ -395,6 +395,12 @@ nfs4_ff_layout_prepare_ds(struct pnfs_layout_segment *lseg,
/* connect success, check rsize/wsize limit */
if (!status) {
+ /*
+ * ds_clp is put in destroy_ds().
+ * keep ds_clp even if DS is local, so that if local IO cannot
+ * proceed somehow, we can fall back to NFS whenever we want.
+ */
+ nfs_local_probe(ds->ds_clp);
max_payload =
nfs_block_size(rpc_max_payload(ds->ds_clp->cl_rpcclient),
NULL);
diff --git a/fs/nfs/fs_context.c b/fs/nfs/fs_context.c
index 6c9f3f6645dd..7e000d782e28 100644
--- a/fs/nfs/fs_context.c
+++ b/fs/nfs/fs_context.c
@@ -49,6 +49,7 @@ enum nfs_param {
Opt_bsize,
Opt_clientaddr,
Opt_cto,
+ Opt_alignwrite,
Opt_fg,
Opt_fscache,
Opt_fscache_flag,
@@ -149,6 +150,7 @@ static const struct fs_parameter_spec nfs_fs_parameters[] = {
fsparam_u32 ("bsize", Opt_bsize),
fsparam_string("clientaddr", Opt_clientaddr),
fsparam_flag_no("cto", Opt_cto),
+ fsparam_flag_no("alignwrite", Opt_alignwrite),
fsparam_flag ("fg", Opt_fg),
fsparam_flag_no("fsc", Opt_fscache_flag),
fsparam_string("fsc", Opt_fscache),
@@ -592,6 +594,12 @@ static int nfs_fs_context_parse_param(struct fs_context *fc,
else
ctx->flags |= NFS_MOUNT_TRUNK_DISCOVERY;
break;
+ case Opt_alignwrite:
+ if (result.negated)
+ ctx->flags |= NFS_MOUNT_NO_ALIGNWRITE;
+ else
+ ctx->flags &= ~NFS_MOUNT_NO_ALIGNWRITE;
+ break;
case Opt_ac:
if (result.negated)
ctx->flags |= NFS_MOUNT_NOAC;
diff --git a/fs/nfs/getroot.c b/fs/nfs/getroot.c
index 11ff2b2e060f..f13d25d95b85 100644
--- a/fs/nfs/getroot.c
+++ b/fs/nfs/getroot.c
@@ -62,7 +62,7 @@ static int nfs_superblock_set_dummy_root(struct super_block *sb, struct inode *i
}
/*
- * get an NFS2/NFS3 root dentry from the root filehandle
+ * get a root dentry from the root filehandle
*/
int nfs_get_root(struct super_block *s, struct fs_context *fc)
{
diff --git a/fs/nfs/inode.c b/fs/nfs/inode.c
index b4914a11c3c2..542c7d97b235 100644
--- a/fs/nfs/inode.c
+++ b/fs/nfs/inode.c
@@ -2461,35 +2461,54 @@ static void nfs_destroy_inodecache(void)
kmem_cache_destroy(nfs_inode_cachep);
}
+struct workqueue_struct *nfslocaliod_workqueue;
struct workqueue_struct *nfsiod_workqueue;
EXPORT_SYMBOL_GPL(nfsiod_workqueue);
/*
- * start up the nfsiod workqueue
+ * Destroy the nfsiod workqueues
*/
-static int nfsiod_start(void)
+static void nfsiod_stop(void)
{
struct workqueue_struct *wq;
- dprintk("RPC: creating workqueue nfsiod\n");
- wq = alloc_workqueue("nfsiod", WQ_MEM_RECLAIM | WQ_UNBOUND, 0);
- if (wq == NULL)
- return -ENOMEM;
- nfsiod_workqueue = wq;
- return 0;
+
+ wq = nfsiod_workqueue;
+ if (wq != NULL) {
+ nfsiod_workqueue = NULL;
+ destroy_workqueue(wq);
+ }
+#if IS_ENABLED(CONFIG_NFS_LOCALIO)
+ wq = nfslocaliod_workqueue;
+ if (wq != NULL) {
+ nfslocaliod_workqueue = NULL;
+ destroy_workqueue(wq);
+ }
+#endif /* CONFIG_NFS_LOCALIO */
}
/*
- * Destroy the nfsiod workqueue
+ * Start the nfsiod workqueues
*/
-static void nfsiod_stop(void)
+static int nfsiod_start(void)
{
- struct workqueue_struct *wq;
-
- wq = nfsiod_workqueue;
- if (wq == NULL)
- return;
- nfsiod_workqueue = NULL;
- destroy_workqueue(wq);
+ dprintk("RPC: creating workqueue nfsiod\n");
+ nfsiod_workqueue = alloc_workqueue("nfsiod", WQ_MEM_RECLAIM | WQ_UNBOUND, 0);
+ if (nfsiod_workqueue == NULL)
+ return -ENOMEM;
+#if IS_ENABLED(CONFIG_NFS_LOCALIO)
+ /*
+ * localio writes need to use a normal (non-memreclaim) workqueue.
+ * When we start getting low on space, XFS goes and calls flush_work() on
+ * a non-memreclaim work queue, which causes a priority inversion problem.
+ */
+ dprintk("RPC: creating workqueue nfslocaliod\n");
+ nfslocaliod_workqueue = alloc_workqueue("nfslocaliod", WQ_UNBOUND, 0);
+ if (unlikely(nfslocaliod_workqueue == NULL)) {
+ nfsiod_stop();
+ return -ENOMEM;
+ }
+#endif /* CONFIG_NFS_LOCALIO */
+ return 0;
}
unsigned int nfs_net_id;
diff --git a/fs/nfs/internal.h b/fs/nfs/internal.h
index 5902a9beca1f..430733e3eff2 100644
--- a/fs/nfs/internal.h
+++ b/fs/nfs/internal.h
@@ -9,6 +9,7 @@
#include <linux/crc32.h>
#include <linux/sunrpc/addr.h>
#include <linux/nfs_page.h>
+#include <linux/nfslocalio.h>
#include <linux/wait_bit.h>
#define NFS_SB_MASK (SB_RDONLY|SB_NOSUID|SB_NODEV|SB_NOEXEC|SB_SYNCHRONOUS)
@@ -308,7 +309,8 @@ void nfs_pgio_header_free(struct nfs_pgio_header *);
int nfs_generic_pgio(struct nfs_pageio_descriptor *, struct nfs_pgio_header *);
int nfs_initiate_pgio(struct rpc_clnt *clnt, struct nfs_pgio_header *hdr,
const struct cred *cred, const struct nfs_rpc_ops *rpc_ops,
- const struct rpc_call_ops *call_ops, int how, int flags);
+ const struct rpc_call_ops *call_ops, int how, int flags,
+ struct nfsd_file *localio);
void nfs_free_request(struct nfs_page *req);
struct nfs_pgio_mirror *
nfs_pgio_current_mirror(struct nfs_pageio_descriptor *desc);
@@ -438,6 +440,7 @@ int nfs_check_flags(int);
/* inode.c */
extern struct workqueue_struct *nfsiod_workqueue;
+extern struct workqueue_struct *nfslocaliod_workqueue;
extern struct inode *nfs_alloc_inode(struct super_block *sb);
extern void nfs_free_inode(struct inode *);
extern int nfs_write_inode(struct inode *, struct writeback_control *);
@@ -449,6 +452,51 @@ extern void nfs_set_cache_invalid(struct inode *inode, unsigned long flags);
extern bool nfs_check_cache_invalid(struct inode *, unsigned long);
extern int nfs_wait_bit_killable(struct wait_bit_key *key, int mode);
+#if IS_ENABLED(CONFIG_NFS_LOCALIO)
+/* localio.c */
+extern void nfs_local_disable(struct nfs_client *);
+extern void nfs_local_probe(struct nfs_client *);
+extern struct nfsd_file *nfs_local_open_fh(struct nfs_client *,
+ const struct cred *,
+ struct nfs_fh *,
+ const fmode_t);
+extern int nfs_local_doio(struct nfs_client *,
+ struct nfsd_file *,
+ struct nfs_pgio_header *,
+ const struct rpc_call_ops *);
+extern int nfs_local_commit(struct nfsd_file *,
+ struct nfs_commit_data *,
+ const struct rpc_call_ops *, int);
+extern bool nfs_server_is_local(const struct nfs_client *clp);
+
+#else /* CONFIG_NFS_LOCALIO */
+static inline void nfs_local_disable(struct nfs_client *clp) {}
+static inline void nfs_local_probe(struct nfs_client *clp) {}
+static inline struct nfsd_file *
+nfs_local_open_fh(struct nfs_client *clp, const struct cred *cred,
+ struct nfs_fh *fh, const fmode_t mode)
+{
+ return NULL;
+}
+static inline int nfs_local_doio(struct nfs_client *clp,
+ struct nfsd_file *localio,
+ struct nfs_pgio_header *hdr,
+ const struct rpc_call_ops *call_ops)
+{
+ return -EINVAL;
+}
+static inline int nfs_local_commit(struct nfsd_file *localio,
+ struct nfs_commit_data *data,
+ const struct rpc_call_ops *call_ops, int how)
+{
+ return -EINVAL;
+}
+static inline bool nfs_server_is_local(const struct nfs_client *clp)
+{
+ return false;
+}
+#endif /* CONFIG_NFS_LOCALIO */
+
/* super.c */
extern const struct super_operations nfs_sops;
bool nfs_auth_info_match(const struct nfs_auth_info *, rpc_authflavor_t);
@@ -505,7 +553,6 @@ extern int nfs_read_add_folio(struct nfs_pageio_descriptor *pgio,
struct nfs_open_context *ctx,
struct folio *folio);
extern void nfs_pageio_complete_read(struct nfs_pageio_descriptor *pgio);
-extern void nfs_read_prepare(struct rpc_task *task, void *calldata);
extern void nfs_pageio_reset_read_mds(struct nfs_pageio_descriptor *pgio);
/* super.c */
@@ -528,7 +575,8 @@ extern int nfs_initiate_commit(struct rpc_clnt *clnt,
struct nfs_commit_data *data,
const struct nfs_rpc_ops *nfs_ops,
const struct rpc_call_ops *call_ops,
- int how, int flags);
+ int how, int flags,
+ struct nfsd_file *localio);
extern void nfs_init_commit(struct nfs_commit_data *data,
struct list_head *head,
struct pnfs_layout_segment *lseg,
diff --git a/fs/nfs/localio.c b/fs/nfs/localio.c
new file mode 100644
index 000000000000..c29cdf51c458
--- /dev/null
+++ b/fs/nfs/localio.c
@@ -0,0 +1,757 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * NFS client support for local clients to bypass network stack
+ *
+ * Copyright (C) 2014 Weston Andros Adamson <dros@primarydata.com>
+ * Copyright (C) 2019 Trond Myklebust <trond.myklebust@hammerspace.com>
+ * Copyright (C) 2024 Mike Snitzer <snitzer@hammerspace.com>
+ * Copyright (C) 2024 NeilBrown <neilb@suse.de>
+ */
+
+#include <linux/module.h>
+#include <linux/errno.h>
+#include <linux/vfs.h>
+#include <linux/file.h>
+#include <linux/inet.h>
+#include <linux/sunrpc/addr.h>
+#include <linux/inetdevice.h>
+#include <net/addrconf.h>
+#include <linux/nfs_common.h>
+#include <linux/nfslocalio.h>
+#include <linux/module.h>
+#include <linux/bvec.h>
+
+#include <linux/nfs.h>
+#include <linux/nfs_fs.h>
+#include <linux/nfs_xdr.h>
+
+#include "internal.h"
+#include "pnfs.h"
+#include "nfstrace.h"
+
+#define NFSDBG_FACILITY NFSDBG_VFS
+
+struct nfs_local_kiocb {
+ struct kiocb kiocb;
+ struct bio_vec *bvec;
+ struct nfs_pgio_header *hdr;
+ struct work_struct work;
+ struct nfsd_file *localio;
+};
+
+struct nfs_local_fsync_ctx {
+ struct nfsd_file *localio;
+ struct nfs_commit_data *data;
+ struct work_struct work;
+ struct kref kref;
+ struct completion *done;
+};
+static void nfs_local_fsync_work(struct work_struct *work);
+
+static bool localio_enabled __read_mostly = true;
+module_param(localio_enabled, bool, 0644);
+
+static inline bool nfs_client_is_local(const struct nfs_client *clp)
+{
+ return !!test_bit(NFS_CS_LOCAL_IO, &clp->cl_flags);
+}
+
+bool nfs_server_is_local(const struct nfs_client *clp)
+{
+ return nfs_client_is_local(clp) && localio_enabled;
+}
+EXPORT_SYMBOL_GPL(nfs_server_is_local);
+
+/*
+ * UUID_IS_LOCAL XDR functions
+ */
+
+static void localio_xdr_enc_uuidargs(struct rpc_rqst *req,
+ struct xdr_stream *xdr,
+ const void *data)
+{
+ const u8 *uuid = data;
+
+ encode_opaque_fixed(xdr, uuid, UUID_SIZE);
+}
+
+static int localio_xdr_dec_uuidres(struct rpc_rqst *req,
+ struct xdr_stream *xdr,
+ void *result)
+{
+ /* void return */
+ return 0;
+}
+
+static const struct rpc_procinfo nfs_localio_procedures[] = {
+ [LOCALIOPROC_UUID_IS_LOCAL] = {
+ .p_proc = LOCALIOPROC_UUID_IS_LOCAL,
+ .p_encode = localio_xdr_enc_uuidargs,
+ .p_decode = localio_xdr_dec_uuidres,
+ .p_arglen = XDR_QUADLEN(UUID_SIZE),
+ .p_replen = 0,
+ .p_statidx = LOCALIOPROC_UUID_IS_LOCAL,
+ .p_name = "UUID_IS_LOCAL",
+ },
+};
+
+static unsigned int nfs_localio_counts[ARRAY_SIZE(nfs_localio_procedures)];
+static const struct rpc_version nfslocalio_version1 = {
+ .number = 1,
+ .nrprocs = ARRAY_SIZE(nfs_localio_procedures),
+ .procs = nfs_localio_procedures,
+ .counts = nfs_localio_counts,
+};
+
+static const struct rpc_version *nfslocalio_version[] = {
+ [1] = &nfslocalio_version1,
+};
+
+extern const struct rpc_program nfslocalio_program;
+static struct rpc_stat nfslocalio_rpcstat = { &nfslocalio_program };
+
+const struct rpc_program nfslocalio_program = {
+ .name = "nfslocalio",
+ .number = NFS_LOCALIO_PROGRAM,
+ .nrvers = ARRAY_SIZE(nfslocalio_version),
+ .version = nfslocalio_version,
+ .stats = &nfslocalio_rpcstat,
+};
+
+/*
+ * nfs_local_enable - enable local i/o for an nfs_client
+ */
+static void nfs_local_enable(struct nfs_client *clp)
+{
+ spin_lock(&clp->cl_localio_lock);
+ set_bit(NFS_CS_LOCAL_IO, &clp->cl_flags);
+ trace_nfs_local_enable(clp);
+ spin_unlock(&clp->cl_localio_lock);
+}
+
+/*
+ * nfs_local_disable - disable local i/o for an nfs_client
+ */
+void nfs_local_disable(struct nfs_client *clp)
+{
+ spin_lock(&clp->cl_localio_lock);
+ if (test_and_clear_bit(NFS_CS_LOCAL_IO, &clp->cl_flags)) {
+ trace_nfs_local_disable(clp);
+ nfs_uuid_invalidate_one_client(&clp->cl_uuid);
+ }
+ spin_unlock(&clp->cl_localio_lock);
+}
+
+/*
+ * nfs_init_localioclient - Initialise an NFS localio client connection
+ */
+static struct rpc_clnt *nfs_init_localioclient(struct nfs_client *clp)
+{
+ struct rpc_clnt *rpcclient_localio;
+
+ rpcclient_localio = rpc_bind_new_program(clp->cl_rpcclient,
+ &nfslocalio_program, 1);
+
+ dprintk_rcu("%s: server (%s) %s NFS LOCALIO.\n",
+ __func__, rpc_peeraddr2str(clp->cl_rpcclient, RPC_DISPLAY_ADDR),
+ (IS_ERR(rpcclient_localio) ? "does not support" : "supports"));
+
+ return rpcclient_localio;
+}
+
+static bool nfs_server_uuid_is_local(struct nfs_client *clp)
+{
+ u8 uuid[UUID_SIZE];
+ struct rpc_message msg = {
+ .rpc_argp = &uuid,
+ };
+ struct rpc_clnt *rpcclient_localio;
+ int status;
+
+ rpcclient_localio = nfs_init_localioclient(clp);
+ if (IS_ERR(rpcclient_localio))
+ return false;
+
+ export_uuid(uuid, &clp->cl_uuid.uuid);
+
+ msg.rpc_proc = &nfs_localio_procedures[LOCALIOPROC_UUID_IS_LOCAL];
+ status = rpc_call_sync(rpcclient_localio, &msg, 0);
+ dprintk("%s: NFS reply UUID_IS_LOCAL: status=%d\n",
+ __func__, status);
+ rpc_shutdown_client(rpcclient_localio);
+
+ /* Server is only local if it initialized required struct members */
+ if (status || !clp->cl_uuid.net || !clp->cl_uuid.dom)
+ return false;
+
+ return true;
+}
+
+/*
+ * nfs_local_probe - probe local i/o support for an nfs_server and nfs_client
+ * - called after alloc_client and init_client (so cl_rpcclient exists)
+ * - this function is idempotent, it can be called for old or new clients
+ */
+void nfs_local_probe(struct nfs_client *clp)
+{
+ /* Disallow localio if disabled via sysfs or AUTH_SYS isn't used */
+ if (!localio_enabled ||
+ clp->cl_rpcclient->cl_auth->au_flavor != RPC_AUTH_UNIX) {
+ nfs_local_disable(clp);
+ return;
+ }
+
+ if (nfs_client_is_local(clp)) {
+ /* If already enabled, disable and re-enable */
+ nfs_local_disable(clp);
+ }
+
+ nfs_uuid_begin(&clp->cl_uuid);
+ if (nfs_server_uuid_is_local(clp))
+ nfs_local_enable(clp);
+ nfs_uuid_end(&clp->cl_uuid);
+}
+EXPORT_SYMBOL_GPL(nfs_local_probe);
+
+/*
+ * nfs_local_open_fh - open a local filehandle in terms of nfsd_file
+ *
+ * Returns a pointer to a struct nfsd_file or NULL
+ */
+struct nfsd_file *
+nfs_local_open_fh(struct nfs_client *clp, const struct cred *cred,
+ struct nfs_fh *fh, const fmode_t mode)
+{
+ struct nfsd_file *localio;
+ int status;
+
+ if (!nfs_server_is_local(clp))
+ return NULL;
+ if (mode & ~(FMODE_READ | FMODE_WRITE))
+ return NULL;
+
+ localio = nfs_open_local_fh(&clp->cl_uuid, clp->cl_rpcclient,
+ cred, fh, mode);
+ if (IS_ERR(localio)) {
+ status = PTR_ERR(localio);
+ trace_nfs_local_open_fh(fh, mode, status);
+ switch (status) {
+ case -ENOMEM:
+ case -ENXIO:
+ case -ENOENT:
+ /* Revalidate localio, will disable if unsupported */
+ nfs_local_probe(clp);
+ }
+ return NULL;
+ }
+ return localio;
+}
+EXPORT_SYMBOL_GPL(nfs_local_open_fh);
+
+static struct bio_vec *
+nfs_bvec_alloc_and_import_pagevec(struct page **pagevec,
+ unsigned int npages, gfp_t flags)
+{
+ struct bio_vec *bvec, *p;
+
+ bvec = kmalloc_array(npages, sizeof(*bvec), flags);
+ if (bvec != NULL) {
+ for (p = bvec; npages > 0; p++, pagevec++, npages--) {
+ p->bv_page = *pagevec;
+ p->bv_len = PAGE_SIZE;
+ p->bv_offset = 0;
+ }
+ }
+ return bvec;
+}
+
+static void
+nfs_local_iocb_free(struct nfs_local_kiocb *iocb)
+{
+ kfree(iocb->bvec);
+ kfree(iocb);
+}
+
+static struct nfs_local_kiocb *
+nfs_local_iocb_alloc(struct nfs_pgio_header *hdr,
+ struct nfsd_file *localio, gfp_t flags)
+{
+ struct nfs_local_kiocb *iocb;
+
+ iocb = kmalloc(sizeof(*iocb), flags);
+ if (iocb == NULL)
+ return NULL;
+ iocb->bvec = nfs_bvec_alloc_and_import_pagevec(hdr->page_array.pagevec,
+ hdr->page_array.npages, flags);
+ if (iocb->bvec == NULL) {
+ kfree(iocb);
+ return NULL;
+ }
+ init_sync_kiocb(&iocb->kiocb, nfs_to->nfsd_file_file(localio));
+ iocb->kiocb.ki_pos = hdr->args.offset;
+ iocb->localio = localio;
+ iocb->hdr = hdr;
+ iocb->kiocb.ki_flags &= ~IOCB_APPEND;
+ return iocb;
+}
+
+static void
+nfs_local_iter_init(struct iov_iter *i, struct nfs_local_kiocb *iocb, int dir)
+{
+ struct nfs_pgio_header *hdr = iocb->hdr;
+
+ iov_iter_bvec(i, dir, iocb->bvec, hdr->page_array.npages,
+ hdr->args.count + hdr->args.pgbase);
+ if (hdr->args.pgbase != 0)
+ iov_iter_advance(i, hdr->args.pgbase);
+}
+
+static void
+nfs_local_hdr_release(struct nfs_pgio_header *hdr,
+ const struct rpc_call_ops *call_ops)
+{
+ call_ops->rpc_call_done(&hdr->task, hdr);
+ call_ops->rpc_release(hdr);
+}
+
+static void
+nfs_local_pgio_init(struct nfs_pgio_header *hdr,
+ const struct rpc_call_ops *call_ops)
+{
+ hdr->task.tk_ops = call_ops;
+ if (!hdr->task.tk_start)
+ hdr->task.tk_start = ktime_get();
+}
+
+static void
+nfs_local_pgio_done(struct nfs_pgio_header *hdr, long status)
+{
+ if (status >= 0) {
+ hdr->res.count = status;
+ hdr->res.op_status = NFS4_OK;
+ hdr->task.tk_status = 0;
+ } else {
+ hdr->res.op_status = nfs4_stat_to_errno(status);
+ hdr->task.tk_status = status;
+ }
+}
+
+static void
+nfs_local_pgio_release(struct nfs_local_kiocb *iocb)
+{
+ struct nfs_pgio_header *hdr = iocb->hdr;
+
+ nfs_to->nfsd_file_put_local(iocb->localio);
+ nfs_local_iocb_free(iocb);
+ nfs_local_hdr_release(hdr, hdr->task.tk_ops);
+}
+
+static void
+nfs_local_read_done(struct nfs_local_kiocb *iocb, long status)
+{
+ struct nfs_pgio_header *hdr = iocb->hdr;
+ struct file *filp = iocb->kiocb.ki_filp;
+
+ nfs_local_pgio_done(hdr, status);
+
+ if (hdr->res.count != hdr->args.count ||
+ hdr->args.offset + hdr->res.count >= i_size_read(file_inode(filp)))
+ hdr->res.eof = true;
+
+ dprintk("%s: read %ld bytes eof %d.\n", __func__,
+ status > 0 ? status : 0, hdr->res.eof);
+}
+
+static void nfs_local_call_read(struct work_struct *work)
+{
+ struct nfs_local_kiocb *iocb =
+ container_of(work, struct nfs_local_kiocb, work);
+ struct file *filp = iocb->kiocb.ki_filp;
+ const struct cred *save_cred;
+ struct iov_iter iter;
+ ssize_t status;
+
+ save_cred = override_creds(filp->f_cred);
+
+ nfs_local_iter_init(&iter, iocb, READ);
+
+ status = filp->f_op->read_iter(&iocb->kiocb, &iter);
+ WARN_ON_ONCE(status == -EIOCBQUEUED);
+
+ nfs_local_read_done(iocb, status);
+ nfs_local_pgio_release(iocb);
+
+ revert_creds(save_cred);
+}
+
+static int
+nfs_do_local_read(struct nfs_pgio_header *hdr,
+ struct nfsd_file *localio,
+ const struct rpc_call_ops *call_ops)
+{
+ struct nfs_local_kiocb *iocb;
+
+ dprintk("%s: vfs_read count=%u pos=%llu\n",
+ __func__, hdr->args.count, hdr->args.offset);
+
+ iocb = nfs_local_iocb_alloc(hdr, localio, GFP_KERNEL);
+ if (iocb == NULL)
+ return -ENOMEM;
+
+ nfs_local_pgio_init(hdr, call_ops);
+ hdr->res.eof = false;
+
+ INIT_WORK(&iocb->work, nfs_local_call_read);
+ queue_work(nfslocaliod_workqueue, &iocb->work);
+
+ return 0;
+}
+
+static void
+nfs_copy_boot_verifier(struct nfs_write_verifier *verifier, struct inode *inode)
+{
+ struct nfs_client *clp = NFS_SERVER(inode)->nfs_client;
+ u32 *verf = (u32 *)verifier->data;
+ int seq = 0;
+
+ do {
+ read_seqbegin_or_lock(&clp->cl_boot_lock, &seq);
+ verf[0] = (u32)clp->cl_nfssvc_boot.tv_sec;
+ verf[1] = (u32)clp->cl_nfssvc_boot.tv_nsec;
+ } while (need_seqretry(&clp->cl_boot_lock, seq));
+ done_seqretry(&clp->cl_boot_lock, seq);
+}
+
+static void
+nfs_reset_boot_verifier(struct inode *inode)
+{
+ struct nfs_client *clp = NFS_SERVER(inode)->nfs_client;
+
+ write_seqlock(&clp->cl_boot_lock);
+ ktime_get_real_ts64(&clp->cl_nfssvc_boot);
+ write_sequnlock(&clp->cl_boot_lock);
+}
+
+static void
+nfs_set_local_verifier(struct inode *inode,
+ struct nfs_writeverf *verf,
+ enum nfs3_stable_how how)
+{
+ nfs_copy_boot_verifier(&verf->verifier, inode);
+ verf->committed = how;
+}
+
+/* Factored out from fs/nfsd/vfs.h:fh_getattr() */
+static int __vfs_getattr(struct path *p, struct kstat *stat, int version)
+{
+ u32 request_mask = STATX_BASIC_STATS;
+
+ if (version == 4)
+ request_mask |= (STATX_BTIME | STATX_CHANGE_COOKIE);
+ return vfs_getattr(p, stat, request_mask, AT_STATX_SYNC_AS_STAT);
+}
+
+/* Copied from fs/nfsd/nfsfh.c:nfsd4_change_attribute() */
+static u64 __nfsd4_change_attribute(const struct kstat *stat,
+ const struct inode *inode)
+{
+ u64 chattr;
+
+ if (stat->result_mask & STATX_CHANGE_COOKIE) {
+ chattr = stat->change_cookie;
+ if (S_ISREG(inode->i_mode) &&
+ !(stat->attributes & STATX_ATTR_CHANGE_MONOTONIC)) {
+ chattr += (u64)stat->ctime.tv_sec << 30;
+ chattr += stat->ctime.tv_nsec;
+ }
+ } else {
+ chattr = time_to_chattr(&stat->ctime);
+ }
+ return chattr;
+}
+
+static void nfs_local_vfs_getattr(struct nfs_local_kiocb *iocb)
+{
+ struct kstat stat;
+ struct file *filp = iocb->kiocb.ki_filp;
+ struct nfs_pgio_header *hdr = iocb->hdr;
+ struct nfs_fattr *fattr = hdr->res.fattr;
+ int version = NFS_PROTO(hdr->inode)->version;
+
+ if (unlikely(!fattr) || __vfs_getattr(&filp->f_path, &stat, version))
+ return;
+
+ fattr->valid = (NFS_ATTR_FATTR_FILEID |
+ NFS_ATTR_FATTR_CHANGE |
+ NFS_ATTR_FATTR_SIZE |
+ NFS_ATTR_FATTR_ATIME |
+ NFS_ATTR_FATTR_MTIME |
+ NFS_ATTR_FATTR_CTIME |
+ NFS_ATTR_FATTR_SPACE_USED);
+
+ fattr->fileid = stat.ino;
+ fattr->size = stat.size;
+ fattr->atime = stat.atime;
+ fattr->mtime = stat.mtime;
+ fattr->ctime = stat.ctime;
+ if (version == 4) {
+ fattr->change_attr =
+ __nfsd4_change_attribute(&stat, file_inode(filp));
+ } else
+ fattr->change_attr = nfs_timespec_to_change_attr(&fattr->ctime);
+ fattr->du.nfs3.used = stat.blocks << 9;
+}
+
+static void
+nfs_local_write_done(struct nfs_local_kiocb *iocb, long status)
+{
+ struct nfs_pgio_header *hdr = iocb->hdr;
+ struct inode *inode = hdr->inode;
+
+ dprintk("%s: wrote %ld bytes.\n", __func__, status > 0 ? status : 0);
+
+ /* Handle short writes as if they are ENOSPC */
+ if (status > 0 && status < hdr->args.count) {
+ hdr->mds_offset += status;
+ hdr->args.offset += status;
+ hdr->args.pgbase += status;
+ hdr->args.count -= status;
+ nfs_set_pgio_error(hdr, -ENOSPC, hdr->args.offset);
+ status = -ENOSPC;
+ }
+ if (status < 0)
+ nfs_reset_boot_verifier(inode);
+ else if (nfs_should_remove_suid(inode)) {
+ /* Deal with the suid/sgid bit corner case */
+ spin_lock(&inode->i_lock);
+ nfs_set_cache_invalid(inode, NFS_INO_INVALID_MODE);
+ spin_unlock(&inode->i_lock);
+ }
+ nfs_local_pgio_done(hdr, status);
+}
+
+static void nfs_local_call_write(struct work_struct *work)
+{
+ struct nfs_local_kiocb *iocb =
+ container_of(work, struct nfs_local_kiocb, work);
+ struct file *filp = iocb->kiocb.ki_filp;
+ unsigned long old_flags = current->flags;
+ const struct cred *save_cred;
+ struct iov_iter iter;
+ ssize_t status;
+
+ current->flags |= PF_LOCAL_THROTTLE | PF_MEMALLOC_NOIO;
+ save_cred = override_creds(filp->f_cred);
+
+ nfs_local_iter_init(&iter, iocb, WRITE);
+
+ file_start_write(filp);
+ status = filp->f_op->write_iter(&iocb->kiocb, &iter);
+ file_end_write(filp);
+ WARN_ON_ONCE(status == -EIOCBQUEUED);
+
+ nfs_local_write_done(iocb, status);
+ nfs_local_vfs_getattr(iocb);
+ nfs_local_pgio_release(iocb);
+
+ revert_creds(save_cred);
+ current->flags = old_flags;
+}
+
+static int
+nfs_do_local_write(struct nfs_pgio_header *hdr,
+ struct nfsd_file *localio,
+ const struct rpc_call_ops *call_ops)
+{
+ struct nfs_local_kiocb *iocb;
+
+ dprintk("%s: vfs_write count=%u pos=%llu %s\n",
+ __func__, hdr->args.count, hdr->args.offset,
+ (hdr->args.stable == NFS_UNSTABLE) ? "unstable" : "stable");
+
+ iocb = nfs_local_iocb_alloc(hdr, localio, GFP_NOIO);
+ if (iocb == NULL)
+ return -ENOMEM;
+
+ switch (hdr->args.stable) {
+ default:
+ break;
+ case NFS_DATA_SYNC:
+ iocb->kiocb.ki_flags |= IOCB_DSYNC;
+ break;
+ case NFS_FILE_SYNC:
+ iocb->kiocb.ki_flags |= IOCB_DSYNC|IOCB_SYNC;
+ }
+ nfs_local_pgio_init(hdr, call_ops);
+
+ nfs_set_local_verifier(hdr->inode, hdr->res.verf, hdr->args.stable);
+
+ INIT_WORK(&iocb->work, nfs_local_call_write);
+ queue_work(nfslocaliod_workqueue, &iocb->work);
+
+ return 0;
+}
+
+int nfs_local_doio(struct nfs_client *clp, struct nfsd_file *localio,
+ struct nfs_pgio_header *hdr,
+ const struct rpc_call_ops *call_ops)
+{
+ int status = 0;
+ struct file *filp = nfs_to->nfsd_file_file(localio);
+
+ if (!hdr->args.count)
+ return 0;
+ /* Don't support filesystems without read_iter/write_iter */
+ if (!filp->f_op->read_iter || !filp->f_op->write_iter) {
+ nfs_local_disable(clp);
+ status = -EAGAIN;
+ goto out;
+ }
+
+ switch (hdr->rw_mode) {
+ case FMODE_READ:
+ status = nfs_do_local_read(hdr, localio, call_ops);
+ break;
+ case FMODE_WRITE:
+ status = nfs_do_local_write(hdr, localio, call_ops);
+ break;
+ default:
+ dprintk("%s: invalid mode: %d\n", __func__,
+ hdr->rw_mode);
+ status = -EINVAL;
+ }
+out:
+ if (status != 0) {
+ nfs_to->nfsd_file_put_local(localio);
+ hdr->task.tk_status = status;
+ nfs_local_hdr_release(hdr, call_ops);
+ }
+ return status;
+}
+
+static void
+nfs_local_init_commit(struct nfs_commit_data *data,
+ const struct rpc_call_ops *call_ops)
+{
+ data->task.tk_ops = call_ops;
+}
+
+static int
+nfs_local_run_commit(struct file *filp, struct nfs_commit_data *data)
+{
+ loff_t start = data->args.offset;
+ loff_t end = LLONG_MAX;
+
+ if (data->args.count > 0) {
+ end = start + data->args.count - 1;
+ if (end < start)
+ end = LLONG_MAX;
+ }
+
+ dprintk("%s: commit %llu - %llu\n", __func__, start, end);
+ return vfs_fsync_range(filp, start, end, 0);
+}
+
+static void
+nfs_local_commit_done(struct nfs_commit_data *data, int status)
+{
+ if (status >= 0) {
+ nfs_set_local_verifier(data->inode,
+ data->res.verf,
+ NFS_FILE_SYNC);
+ data->res.op_status = NFS4_OK;
+ data->task.tk_status = 0;
+ } else {
+ nfs_reset_boot_verifier(data->inode);
+ data->res.op_status = nfs4_stat_to_errno(status);
+ data->task.tk_status = status;
+ }
+}
+
+static void
+nfs_local_release_commit_data(struct nfsd_file *localio,
+ struct nfs_commit_data *data,
+ const struct rpc_call_ops *call_ops)
+{
+ nfs_to->nfsd_file_put_local(localio);
+ call_ops->rpc_call_done(&data->task, data);
+ call_ops->rpc_release(data);
+}
+
+static struct nfs_local_fsync_ctx *
+nfs_local_fsync_ctx_alloc(struct nfs_commit_data *data,
+ struct nfsd_file *localio, gfp_t flags)
+{
+ struct nfs_local_fsync_ctx *ctx = kmalloc(sizeof(*ctx), flags);
+
+ if (ctx != NULL) {
+ ctx->localio = localio;
+ ctx->data = data;
+ INIT_WORK(&ctx->work, nfs_local_fsync_work);
+ kref_init(&ctx->kref);
+ ctx->done = NULL;
+ }
+ return ctx;
+}
+
+static void
+nfs_local_fsync_ctx_kref_free(struct kref *kref)
+{
+ kfree(container_of(kref, struct nfs_local_fsync_ctx, kref));
+}
+
+static void
+nfs_local_fsync_ctx_put(struct nfs_local_fsync_ctx *ctx)
+{
+ kref_put(&ctx->kref, nfs_local_fsync_ctx_kref_free);
+}
+
+static void
+nfs_local_fsync_ctx_free(struct nfs_local_fsync_ctx *ctx)
+{
+ nfs_local_release_commit_data(ctx->localio, ctx->data,
+ ctx->data->task.tk_ops);
+ nfs_local_fsync_ctx_put(ctx);
+}
+
+static void
+nfs_local_fsync_work(struct work_struct *work)
+{
+ struct nfs_local_fsync_ctx *ctx;
+ int status;
+
+ ctx = container_of(work, struct nfs_local_fsync_ctx, work);
+
+ status = nfs_local_run_commit(nfs_to->nfsd_file_file(ctx->localio),
+ ctx->data);
+ nfs_local_commit_done(ctx->data, status);
+ if (ctx->done != NULL)
+ complete(ctx->done);
+ nfs_local_fsync_ctx_free(ctx);
+}
+
+int nfs_local_commit(struct nfsd_file *localio,
+ struct nfs_commit_data *data,
+ const struct rpc_call_ops *call_ops, int how)
+{
+ struct nfs_local_fsync_ctx *ctx;
+
+ ctx = nfs_local_fsync_ctx_alloc(data, localio, GFP_KERNEL);
+ if (!ctx) {
+ nfs_local_commit_done(data, -ENOMEM);
+ nfs_local_release_commit_data(localio, data, call_ops);
+ return -ENOMEM;
+ }
+
+ nfs_local_init_commit(data, call_ops);
+ kref_get(&ctx->kref);
+ if (how & FLUSH_SYNC) {
+ DECLARE_COMPLETION_ONSTACK(done);
+ ctx->done = &done;
+ queue_work(nfsiod_workqueue, &ctx->work);
+ wait_for_completion(&done);
+ } else
+ queue_work(nfsiod_workqueue, &ctx->work);
+ nfs_local_fsync_ctx_put(ctx);
+ return 0;
+}
diff --git a/fs/nfs/nfs2xdr.c b/fs/nfs/nfs2xdr.c
index c19093814296..6e75c6c2d234 100644
--- a/fs/nfs/nfs2xdr.c
+++ b/fs/nfs/nfs2xdr.c
@@ -22,14 +22,12 @@
#include <linux/nfs.h>
#include <linux/nfs2.h>
#include <linux/nfs_fs.h>
+#include <linux/nfs_common.h>
#include "nfstrace.h"
#include "internal.h"
#define NFSDBG_FACILITY NFSDBG_XDR
-/* Mapping from NFS error code to "errno" error code. */
-#define errno_NFSERR_IO EIO
-
/*
* Declare the space requirements for NFS arguments and replies as
* number of 32bit-words
@@ -64,8 +62,6 @@
#define NFS_readdirres_sz (1+NFS_pagepad_sz)
#define NFS_statfsres_sz (1+NFS_info_sz)
-static int nfs_stat_to_errno(enum nfs_stat);
-
/*
* Encode/decode NFSv2 basic data types
*
@@ -1054,70 +1050,6 @@ out_default:
return nfs_stat_to_errno(status);
}
-
-/*
- * We need to translate between nfs status return values and
- * the local errno values which may not be the same.
- */
-static const struct {
- int stat;
- int errno;
-} nfs_errtbl[] = {
- { NFS_OK, 0 },
- { NFSERR_PERM, -EPERM },
- { NFSERR_NOENT, -ENOENT },
- { NFSERR_IO, -errno_NFSERR_IO},
- { NFSERR_NXIO, -ENXIO },
-/* { NFSERR_EAGAIN, -EAGAIN }, */
- { NFSERR_ACCES, -EACCES },
- { NFSERR_EXIST, -EEXIST },
- { NFSERR_XDEV, -EXDEV },
- { NFSERR_NODEV, -ENODEV },
- { NFSERR_NOTDIR, -ENOTDIR },
- { NFSERR_ISDIR, -EISDIR },
- { NFSERR_INVAL, -EINVAL },
- { NFSERR_FBIG, -EFBIG },
- { NFSERR_NOSPC, -ENOSPC },
- { NFSERR_ROFS, -EROFS },
- { NFSERR_MLINK, -EMLINK },
- { NFSERR_NAMETOOLONG, -ENAMETOOLONG },
- { NFSERR_NOTEMPTY, -ENOTEMPTY },
- { NFSERR_DQUOT, -EDQUOT },
- { NFSERR_STALE, -ESTALE },
- { NFSERR_REMOTE, -EREMOTE },
-#ifdef EWFLUSH
- { NFSERR_WFLUSH, -EWFLUSH },
-#endif
- { NFSERR_BADHANDLE, -EBADHANDLE },
- { NFSERR_NOT_SYNC, -ENOTSYNC },
- { NFSERR_BAD_COOKIE, -EBADCOOKIE },
- { NFSERR_NOTSUPP, -ENOTSUPP },
- { NFSERR_TOOSMALL, -ETOOSMALL },
- { NFSERR_SERVERFAULT, -EREMOTEIO },
- { NFSERR_BADTYPE, -EBADTYPE },
- { NFSERR_JUKEBOX, -EJUKEBOX },
- { -1, -EIO }
-};
-
-/**
- * nfs_stat_to_errno - convert an NFS status code to a local errno
- * @status: NFS status code to convert
- *
- * Returns a local errno value, or -EIO if the NFS status code is
- * not recognized. This function is used jointly by NFSv2 and NFSv3.
- */
-static int nfs_stat_to_errno(enum nfs_stat status)
-{
- int i;
-
- for (i = 0; nfs_errtbl[i].stat != -1; i++) {
- if (nfs_errtbl[i].stat == (int)status)
- return nfs_errtbl[i].errno;
- }
- dprintk("NFS: Unrecognized nfs status value: %u\n", status);
- return nfs_errtbl[i].errno;
-}
-
#define PROC(proc, argtype, restype, timer) \
[NFSPROC_##proc] = { \
.p_proc = NFSPROC_##proc, \
diff --git a/fs/nfs/nfs3xdr.c b/fs/nfs/nfs3xdr.c
index 60f032be805a..4ae01c10b7e2 100644
--- a/fs/nfs/nfs3xdr.c
+++ b/fs/nfs/nfs3xdr.c
@@ -21,14 +21,13 @@
#include <linux/nfs3.h>
#include <linux/nfs_fs.h>
#include <linux/nfsacl.h>
+#include <linux/nfs_common.h>
+
#include "nfstrace.h"
#include "internal.h"
#define NFSDBG_FACILITY NFSDBG_XDR
-/* Mapping from NFS error code to "errno" error code. */
-#define errno_NFSERR_IO EIO
-
/*
* Declare the space requirements for NFS arguments and replies as
* number of 32bit-words
@@ -91,8 +90,6 @@
NFS3_pagepad_sz)
#define ACL3_setaclres_sz (1+NFS3_post_op_attr_sz)
-static int nfs3_stat_to_errno(enum nfs_stat);
-
/*
* Map file type to S_IFMT bits
*/
@@ -1406,7 +1403,7 @@ static int nfs3_xdr_dec_getattr3res(struct rpc_rqst *req,
out:
return error;
out_default:
- return nfs3_stat_to_errno(status);
+ return nfs_stat_to_errno(status);
}
/*
@@ -1445,7 +1442,7 @@ static int nfs3_xdr_dec_setattr3res(struct rpc_rqst *req,
out:
return error;
out_status:
- return nfs3_stat_to_errno(status);
+ return nfs_stat_to_errno(status);
}
/*
@@ -1495,7 +1492,7 @@ out_default:
error = decode_post_op_attr(xdr, result->dir_attr, userns);
if (unlikely(error))
goto out;
- return nfs3_stat_to_errno(status);
+ return nfs_stat_to_errno(status);
}
/*
@@ -1537,7 +1534,7 @@ static int nfs3_xdr_dec_access3res(struct rpc_rqst *req,
out:
return error;
out_default:
- return nfs3_stat_to_errno(status);
+ return nfs_stat_to_errno(status);
}
/*
@@ -1578,7 +1575,7 @@ static int nfs3_xdr_dec_readlink3res(struct rpc_rqst *req,
out:
return error;
out_default:
- return nfs3_stat_to_errno(status);
+ return nfs_stat_to_errno(status);
}
/*
@@ -1658,7 +1655,7 @@ static int nfs3_xdr_dec_read3res(struct rpc_rqst *req, struct xdr_stream *xdr,
out:
return error;
out_status:
- return nfs3_stat_to_errno(status);
+ return nfs_stat_to_errno(status);
}
/*
@@ -1728,7 +1725,7 @@ static int nfs3_xdr_dec_write3res(struct rpc_rqst *req, struct xdr_stream *xdr,
out:
return error;
out_status:
- return nfs3_stat_to_errno(status);
+ return nfs_stat_to_errno(status);
}
/*
@@ -1795,7 +1792,7 @@ out_default:
error = decode_wcc_data(xdr, result->dir_attr, userns);
if (unlikely(error))
goto out;
- return nfs3_stat_to_errno(status);
+ return nfs_stat_to_errno(status);
}
/*
@@ -1835,7 +1832,7 @@ static int nfs3_xdr_dec_remove3res(struct rpc_rqst *req,
out:
return error;
out_status:
- return nfs3_stat_to_errno(status);
+ return nfs_stat_to_errno(status);
}
/*
@@ -1881,7 +1878,7 @@ static int nfs3_xdr_dec_rename3res(struct rpc_rqst *req,
out:
return error;
out_status:
- return nfs3_stat_to_errno(status);
+ return nfs_stat_to_errno(status);
}
/*
@@ -1926,7 +1923,7 @@ static int nfs3_xdr_dec_link3res(struct rpc_rqst *req, struct xdr_stream *xdr,
out:
return error;
out_status:
- return nfs3_stat_to_errno(status);
+ return nfs_stat_to_errno(status);
}
/**
@@ -2101,7 +2098,7 @@ out_default:
error = decode_post_op_attr(xdr, result->dir_attr, rpc_rqst_userns(req));
if (unlikely(error))
goto out;
- return nfs3_stat_to_errno(status);
+ return nfs_stat_to_errno(status);
}
/*
@@ -2167,7 +2164,7 @@ static int nfs3_xdr_dec_fsstat3res(struct rpc_rqst *req,
out:
return error;
out_status:
- return nfs3_stat_to_errno(status);
+ return nfs_stat_to_errno(status);
}
/*
@@ -2243,7 +2240,7 @@ static int nfs3_xdr_dec_fsinfo3res(struct rpc_rqst *req,
out:
return error;
out_status:
- return nfs3_stat_to_errno(status);
+ return nfs_stat_to_errno(status);
}
/*
@@ -2304,7 +2301,7 @@ static int nfs3_xdr_dec_pathconf3res(struct rpc_rqst *req,
out:
return error;
out_status:
- return nfs3_stat_to_errno(status);
+ return nfs_stat_to_errno(status);
}
/*
@@ -2350,7 +2347,7 @@ static int nfs3_xdr_dec_commit3res(struct rpc_rqst *req,
out:
return error;
out_status:
- return nfs3_stat_to_errno(status);
+ return nfs_stat_to_errno(status);
}
#ifdef CONFIG_NFS_V3_ACL
@@ -2416,7 +2413,7 @@ static int nfs3_xdr_dec_getacl3res(struct rpc_rqst *req,
out:
return error;
out_default:
- return nfs3_stat_to_errno(status);
+ return nfs_stat_to_errno(status);
}
static int nfs3_xdr_dec_setacl3res(struct rpc_rqst *req,
@@ -2435,76 +2432,11 @@ static int nfs3_xdr_dec_setacl3res(struct rpc_rqst *req,
out:
return error;
out_default:
- return nfs3_stat_to_errno(status);
+ return nfs_stat_to_errno(status);
}
#endif /* CONFIG_NFS_V3_ACL */
-
-/*
- * We need to translate between nfs status return values and
- * the local errno values which may not be the same.
- */
-static const struct {
- int stat;
- int errno;
-} nfs_errtbl[] = {
- { NFS_OK, 0 },
- { NFSERR_PERM, -EPERM },
- { NFSERR_NOENT, -ENOENT },
- { NFSERR_IO, -errno_NFSERR_IO},
- { NFSERR_NXIO, -ENXIO },
-/* { NFSERR_EAGAIN, -EAGAIN }, */
- { NFSERR_ACCES, -EACCES },
- { NFSERR_EXIST, -EEXIST },
- { NFSERR_XDEV, -EXDEV },
- { NFSERR_NODEV, -ENODEV },
- { NFSERR_NOTDIR, -ENOTDIR },
- { NFSERR_ISDIR, -EISDIR },
- { NFSERR_INVAL, -EINVAL },
- { NFSERR_FBIG, -EFBIG },
- { NFSERR_NOSPC, -ENOSPC },
- { NFSERR_ROFS, -EROFS },
- { NFSERR_MLINK, -EMLINK },
- { NFSERR_NAMETOOLONG, -ENAMETOOLONG },
- { NFSERR_NOTEMPTY, -ENOTEMPTY },
- { NFSERR_DQUOT, -EDQUOT },
- { NFSERR_STALE, -ESTALE },
- { NFSERR_REMOTE, -EREMOTE },
-#ifdef EWFLUSH
- { NFSERR_WFLUSH, -EWFLUSH },
-#endif
- { NFSERR_BADHANDLE, -EBADHANDLE },
- { NFSERR_NOT_SYNC, -ENOTSYNC },
- { NFSERR_BAD_COOKIE, -EBADCOOKIE },
- { NFSERR_NOTSUPP, -ENOTSUPP },
- { NFSERR_TOOSMALL, -ETOOSMALL },
- { NFSERR_SERVERFAULT, -EREMOTEIO },
- { NFSERR_BADTYPE, -EBADTYPE },
- { NFSERR_JUKEBOX, -EJUKEBOX },
- { -1, -EIO }
-};
-
-/**
- * nfs3_stat_to_errno - convert an NFS status code to a local errno
- * @status: NFS status code to convert
- *
- * Returns a local errno value, or -EIO if the NFS status code is
- * not recognized. This function is used jointly by NFSv2 and NFSv3.
- */
-static int nfs3_stat_to_errno(enum nfs_stat status)
-{
- int i;
-
- for (i = 0; nfs_errtbl[i].stat != -1; i++) {
- if (nfs_errtbl[i].stat == (int)status)
- return nfs_errtbl[i].errno;
- }
- dprintk("NFS: Unrecognized nfs status value: %u\n", status);
- return nfs_errtbl[i].errno;
-}
-
-
#define PROC(proc, argtype, restype, timer) \
[NFS3PROC_##proc] = { \
.p_proc = NFS3PROC_##proc, \
diff --git a/fs/nfs/nfs4_fs.h b/fs/nfs/nfs4_fs.h
index c2045a2a9d0f..7d383d29a995 100644
--- a/fs/nfs/nfs4_fs.h
+++ b/fs/nfs/nfs4_fs.h
@@ -83,7 +83,7 @@ struct nfs4_minor_version_ops {
#define NFS_SEQID_CONFIRMED 1
struct nfs_seqid_counter {
ktime_t create_time;
- int owner_id;
+ u64 owner_id;
int flags;
u32 counter;
spinlock_t lock; /* Protects the list */
diff --git a/fs/nfs/nfs4proc.c b/fs/nfs/nfs4proc.c
index b8ffbe52ba15..cd2fbde2e6d7 100644
--- a/fs/nfs/nfs4proc.c
+++ b/fs/nfs/nfs4proc.c
@@ -3904,6 +3904,18 @@ static void nfs4_close_context(struct nfs_open_context *ctx, int is_sync)
#define FATTR4_WORD2_NFS41_MASK (2*FATTR4_WORD2_SUPPATTR_EXCLCREAT - 1UL)
#define FATTR4_WORD2_NFS42_MASK (2*FATTR4_WORD2_OPEN_ARGUMENTS - 1UL)
+#define FATTR4_WORD2_NFS42_TIME_DELEG_MASK \
+ (FATTR4_WORD2_TIME_DELEG_MODIFY|FATTR4_WORD2_TIME_DELEG_ACCESS)
+static bool nfs4_server_delegtime_capable(struct nfs4_server_caps_res *res)
+{
+ u32 share_access_want = res->open_caps.oa_share_access_want[0];
+ u32 attr_bitmask = res->attr_bitmask[2];
+
+ return (share_access_want & NFS4_SHARE_WANT_DELEG_TIMESTAMPS) &&
+ ((attr_bitmask & FATTR4_WORD2_NFS42_TIME_DELEG_MASK) ==
+ FATTR4_WORD2_NFS42_TIME_DELEG_MASK);
+}
+
static int _nfs4_server_capabilities(struct nfs_server *server, struct nfs_fh *fhandle)
{
u32 minorversion = server->nfs_client->cl_minorversion;
@@ -3982,8 +3994,6 @@ static int _nfs4_server_capabilities(struct nfs_server *server, struct nfs_fh *f
#endif
if (res.attr_bitmask[0] & FATTR4_WORD0_FS_LOCATIONS)
server->caps |= NFS_CAP_FS_LOCATIONS;
- if (res.attr_bitmask[2] & FATTR4_WORD2_TIME_DELEG_MODIFY)
- server->caps |= NFS_CAP_DELEGTIME;
if (!(res.attr_bitmask[0] & FATTR4_WORD0_FILEID))
server->fattr_valid &= ~NFS_ATTR_FATTR_FILEID;
if (!(res.attr_bitmask[1] & FATTR4_WORD1_MODE))
@@ -4011,6 +4021,8 @@ static int _nfs4_server_capabilities(struct nfs_server *server, struct nfs_fh *f
if (res.open_caps.oa_share_access_want[0] &
NFS4_SHARE_WANT_OPEN_XOR_DELEGATION)
server->caps |= NFS_CAP_OPEN_XOR;
+ if (nfs4_server_delegtime_capable(&res))
+ server->caps |= NFS_CAP_DELEGTIME;
memcpy(server->cache_consistency_bitmask, res.attr_bitmask, sizeof(server->cache_consistency_bitmask));
server->cache_consistency_bitmask[0] &= FATTR4_WORD0_CHANGE|FATTR4_WORD0_SIZE;
diff --git a/fs/nfs/nfs4state.c b/fs/nfs/nfs4state.c
index 877f682b45f2..581864a15888 100644
--- a/fs/nfs/nfs4state.c
+++ b/fs/nfs/nfs4state.c
@@ -501,11 +501,7 @@ nfs4_alloc_state_owner(struct nfs_server *server,
sp = kzalloc(sizeof(*sp), gfp_flags);
if (!sp)
return NULL;
- sp->so_seqid.owner_id = ida_alloc(&server->openowner_id, gfp_flags);
- if (sp->so_seqid.owner_id < 0) {
- kfree(sp);
- return NULL;
- }
+ sp->so_seqid.owner_id = atomic64_inc_return(&server->owner_ctr);
sp->so_server = server;
sp->so_cred = get_cred(cred);
spin_lock_init(&sp->so_lock);
@@ -536,7 +532,6 @@ static void nfs4_free_state_owner(struct nfs4_state_owner *sp)
{
nfs4_destroy_seqid_counter(&sp->so_seqid);
put_cred(sp->so_cred);
- ida_free(&sp->so_server->openowner_id, sp->so_seqid.owner_id);
kfree(sp);
}
@@ -879,19 +874,13 @@ static struct nfs4_lock_state *nfs4_alloc_lock_state(struct nfs4_state *state, f
refcount_set(&lsp->ls_count, 1);
lsp->ls_state = state;
lsp->ls_owner = owner;
- lsp->ls_seqid.owner_id = ida_alloc(&server->lockowner_id, GFP_KERNEL_ACCOUNT);
- if (lsp->ls_seqid.owner_id < 0)
- goto out_free;
+ lsp->ls_seqid.owner_id = atomic64_inc_return(&server->owner_ctr);
INIT_LIST_HEAD(&lsp->ls_locks);
return lsp;
-out_free:
- kfree(lsp);
- return NULL;
}
void nfs4_free_lock_state(struct nfs_server *server, struct nfs4_lock_state *lsp)
{
- ida_free(&server->lockowner_id, lsp->ls_seqid.owner_id);
nfs4_destroy_seqid_counter(&lsp->ls_seqid);
kfree(lsp);
}
@@ -1957,6 +1946,7 @@ restart:
set_bit(ops->owner_flag_bit, &sp->so_flags);
nfs4_put_state_owner(sp);
status = nfs4_recovery_handle_error(clp, status);
+ nfs4_free_state_owners(&freeme);
return (status != 0) ? status : -EAGAIN;
}
@@ -2023,6 +2013,12 @@ static int nfs4_handle_reclaim_lease_error(struct nfs_client *clp, int status)
nfs_mark_client_ready(clp, -EPERM);
clear_bit(NFS4CLNT_LEASE_CONFIRM, &clp->cl_state);
return -EPERM;
+ case -ETIMEDOUT:
+ if (clp->cl_cons_state == NFS_CS_SESSION_INITING) {
+ nfs_mark_client_ready(clp, -EIO);
+ return -EIO;
+ }
+ fallthrough;
case -EACCES:
case -NFS4ERR_DELAY:
case -EAGAIN:
diff --git a/fs/nfs/nfs4xdr.c b/fs/nfs/nfs4xdr.c
index 7704a4509676..e8ac3f615f93 100644
--- a/fs/nfs/nfs4xdr.c
+++ b/fs/nfs/nfs4xdr.c
@@ -52,6 +52,7 @@
#include <linux/nfs.h>
#include <linux/nfs4.h>
#include <linux/nfs_fs.h>
+#include <linux/nfs_common.h>
#include "nfs4_fs.h"
#include "nfs4trace.h"
@@ -63,11 +64,7 @@
#define NFSDBG_FACILITY NFSDBG_XDR
-/* Mapping from NFS error code to "errno" error code. */
-#define errno_NFSERR_IO EIO
-
struct compound_hdr;
-static int nfs4_stat_to_errno(int);
static void encode_layoutget(struct xdr_stream *xdr,
const struct nfs4_layoutget_args *args,
struct compound_hdr *hdr);
@@ -975,11 +972,6 @@ static __be32 *reserve_space(struct xdr_stream *xdr, size_t nbytes)
return p;
}
-static void encode_opaque_fixed(struct xdr_stream *xdr, const void *buf, size_t len)
-{
- WARN_ON_ONCE(xdr_stream_encode_opaque_fixed(xdr, buf, len) < 0);
-}
-
static void encode_string(struct xdr_stream *xdr, unsigned int len, const char *str)
{
WARN_ON_ONCE(xdr_stream_encode_opaque(xdr, str, len) < 0);
@@ -1424,12 +1416,12 @@ static inline void encode_openhdr(struct xdr_stream *xdr, const struct nfs_opena
*/
encode_nfs4_seqid(xdr, arg->seqid);
encode_share_access(xdr, arg->share_access);
- p = reserve_space(xdr, 36);
+ p = reserve_space(xdr, 40);
p = xdr_encode_hyper(p, arg->clientid);
- *p++ = cpu_to_be32(24);
+ *p++ = cpu_to_be32(28);
p = xdr_encode_opaque_fixed(p, "open id:", 8);
*p++ = cpu_to_be32(arg->server->s_dev);
- *p++ = cpu_to_be32(arg->id.uniquifier);
+ p = xdr_encode_hyper(p, arg->id.uniquifier);
xdr_encode_hyper(p, arg->id.create_time);
}
@@ -3447,7 +3439,7 @@ static int decode_attr_link_support(struct xdr_stream *xdr, uint32_t *bitmap, ui
*res = be32_to_cpup(p);
bitmap[0] &= ~FATTR4_WORD0_LINK_SUPPORT;
}
- dprintk("%s: link support=%s\n", __func__, *res == 0 ? "false" : "true");
+ dprintk("%s: link support=%s\n", __func__, str_false_true(*res == 0));
return 0;
}
@@ -3465,7 +3457,7 @@ static int decode_attr_symlink_support(struct xdr_stream *xdr, uint32_t *bitmap,
*res = be32_to_cpup(p);
bitmap[0] &= ~FATTR4_WORD0_SYMLINK_SUPPORT;
}
- dprintk("%s: symlink support=%s\n", __func__, *res == 0 ? "false" : "true");
+ dprintk("%s: symlink support=%s\n", __func__, str_false_true(*res == 0));
return 0;
}
@@ -3607,7 +3599,7 @@ static int decode_attr_case_insensitive(struct xdr_stream *xdr, uint32_t *bitmap
*res = be32_to_cpup(p);
bitmap[0] &= ~FATTR4_WORD0_CASE_INSENSITIVE;
}
- dprintk("%s: case_insensitive=%s\n", __func__, *res == 0 ? "false" : "true");
+ dprintk("%s: case_insensitive=%s\n", __func__, str_false_true(*res == 0));
return 0;
}
@@ -3625,7 +3617,7 @@ static int decode_attr_case_preserving(struct xdr_stream *xdr, uint32_t *bitmap,
*res = be32_to_cpup(p);
bitmap[0] &= ~FATTR4_WORD0_CASE_PRESERVING;
}
- dprintk("%s: case_preserving=%s\n", __func__, *res == 0 ? "false" : "true");
+ dprintk("%s: case_preserving=%s\n", __func__, str_false_true(*res == 0));
return 0;
}
@@ -4333,8 +4325,7 @@ static int decode_attr_xattrsupport(struct xdr_stream *xdr, uint32_t *bitmap,
*res = be32_to_cpup(p);
bitmap[2] &= ~FATTR4_WORD2_XATTR_SUPPORT;
}
- dprintk("%s: XATTR support=%s\n", __func__,
- *res == 0 ? "false" : "true");
+ dprintk("%s: XATTR support=%s\n", __func__, str_false_true(*res == 0));
return 0;
}
@@ -4409,14 +4400,6 @@ static int decode_access(struct xdr_stream *xdr, u32 *supported, u32 *access)
return 0;
}
-static int decode_opaque_fixed(struct xdr_stream *xdr, void *buf, size_t len)
-{
- ssize_t ret = xdr_stream_decode_opaque_fixed(xdr, buf, len);
- if (unlikely(ret < 0))
- return -EIO;
- return 0;
-}
-
static int decode_stateid(struct xdr_stream *xdr, nfs4_stateid *stateid)
{
return decode_opaque_fixed(xdr, stateid, NFS4_STATEID_SIZE);
@@ -7621,72 +7604,6 @@ int nfs4_decode_dirent(struct xdr_stream *xdr, struct nfs_entry *entry,
return 0;
}
-/*
- * We need to translate between nfs status return values and
- * the local errno values which may not be the same.
- */
-static struct {
- int stat;
- int errno;
-} nfs_errtbl[] = {
- { NFS4_OK, 0 },
- { NFS4ERR_PERM, -EPERM },
- { NFS4ERR_NOENT, -ENOENT },
- { NFS4ERR_IO, -errno_NFSERR_IO},
- { NFS4ERR_NXIO, -ENXIO },
- { NFS4ERR_ACCESS, -EACCES },
- { NFS4ERR_EXIST, -EEXIST },
- { NFS4ERR_XDEV, -EXDEV },
- { NFS4ERR_NOTDIR, -ENOTDIR },
- { NFS4ERR_ISDIR, -EISDIR },
- { NFS4ERR_INVAL, -EINVAL },
- { NFS4ERR_FBIG, -EFBIG },
- { NFS4ERR_NOSPC, -ENOSPC },
- { NFS4ERR_ROFS, -EROFS },
- { NFS4ERR_MLINK, -EMLINK },
- { NFS4ERR_NAMETOOLONG, -ENAMETOOLONG },
- { NFS4ERR_NOTEMPTY, -ENOTEMPTY },
- { NFS4ERR_DQUOT, -EDQUOT },
- { NFS4ERR_STALE, -ESTALE },
- { NFS4ERR_BADHANDLE, -EBADHANDLE },
- { NFS4ERR_BAD_COOKIE, -EBADCOOKIE },
- { NFS4ERR_NOTSUPP, -ENOTSUPP },
- { NFS4ERR_TOOSMALL, -ETOOSMALL },
- { NFS4ERR_SERVERFAULT, -EREMOTEIO },
- { NFS4ERR_BADTYPE, -EBADTYPE },
- { NFS4ERR_LOCKED, -EAGAIN },
- { NFS4ERR_SYMLINK, -ELOOP },
- { NFS4ERR_OP_ILLEGAL, -EOPNOTSUPP },
- { NFS4ERR_DEADLOCK, -EDEADLK },
- { NFS4ERR_NOXATTR, -ENODATA },
- { NFS4ERR_XATTR2BIG, -E2BIG },
- { -1, -EIO }
-};
-
-/*
- * Convert an NFS error code to a local one.
- * This one is used jointly by NFSv2 and NFSv3.
- */
-static int
-nfs4_stat_to_errno(int stat)
-{
- int i;
- for (i = 0; nfs_errtbl[i].stat != -1; i++) {
- if (nfs_errtbl[i].stat == stat)
- return nfs_errtbl[i].errno;
- }
- if (stat <= 10000 || stat > 10100) {
- /* The server is looney tunes. */
- return -EREMOTEIO;
- }
- /* If we cannot translate the error, the recovery routines should
- * handle it.
- * Note: remaining NFSv4 error codes have values > 10000, so should
- * not conflict with native Linux error codes.
- */
- return -stat;
-}
-
#ifdef CONFIG_NFS_V4_2
#include "nfs42xdr.c"
#endif /* CONFIG_NFS_V4_2 */
diff --git a/fs/nfs/nfstrace.h b/fs/nfs/nfstrace.h
index 352fdaed4075..1eab98c277fa 100644
--- a/fs/nfs/nfstrace.h
+++ b/fs/nfs/nfstrace.h
@@ -1685,6 +1685,67 @@ TRACE_EVENT(nfs_mount_path,
TP_printk("path='%s'", __get_str(path))
);
+TRACE_EVENT(nfs_local_open_fh,
+ TP_PROTO(
+ const struct nfs_fh *fh,
+ fmode_t fmode,
+ int error
+ ),
+
+ TP_ARGS(fh, fmode, error),
+
+ TP_STRUCT__entry(
+ __field(int, error)
+ __field(u32, fhandle)
+ __field(unsigned int, fmode)
+ ),
+
+ TP_fast_assign(
+ __entry->error = error;
+ __entry->fhandle = nfs_fhandle_hash(fh);
+ __entry->fmode = (__force unsigned int)fmode;
+ ),
+
+ TP_printk(
+ "error=%d fhandle=0x%08x mode=%s",
+ __entry->error,
+ __entry->fhandle,
+ show_fs_fmode_flags(__entry->fmode)
+ )
+);
+
+DECLARE_EVENT_CLASS(nfs_local_client_event,
+ TP_PROTO(
+ const struct nfs_client *clp
+ ),
+
+ TP_ARGS(clp),
+
+ TP_STRUCT__entry(
+ __field(unsigned int, protocol)
+ __string(server, clp->cl_hostname)
+ ),
+
+ TP_fast_assign(
+ __entry->protocol = clp->rpc_ops->version;
+ __assign_str(server);
+ ),
+
+ TP_printk(
+ "server=%s NFSv%u", __get_str(server), __entry->protocol
+ )
+);
+
+#define DEFINE_NFS_LOCAL_CLIENT_EVENT(name) \
+ DEFINE_EVENT(nfs_local_client_event, name, \
+ TP_PROTO( \
+ const struct nfs_client *clp \
+ ), \
+ TP_ARGS(clp))
+
+DEFINE_NFS_LOCAL_CLIENT_EVENT(nfs_local_enable);
+DEFINE_NFS_LOCAL_CLIENT_EVENT(nfs_local_disable);
+
DECLARE_EVENT_CLASS(nfs_xdr_event,
TP_PROTO(
const struct xdr_stream *xdr,
diff --git a/fs/nfs/pagelist.c b/fs/nfs/pagelist.c
index 04124f226665..e27c07bd8929 100644
--- a/fs/nfs/pagelist.c
+++ b/fs/nfs/pagelist.c
@@ -731,7 +731,8 @@ static void nfs_pgio_prepare(struct rpc_task *task, void *calldata)
int nfs_initiate_pgio(struct rpc_clnt *clnt, struct nfs_pgio_header *hdr,
const struct cred *cred, const struct nfs_rpc_ops *rpc_ops,
- const struct rpc_call_ops *call_ops, int how, int flags)
+ const struct rpc_call_ops *call_ops, int how, int flags,
+ struct nfsd_file *localio)
{
struct rpc_task *task;
struct rpc_message msg = {
@@ -761,6 +762,10 @@ int nfs_initiate_pgio(struct rpc_clnt *clnt, struct nfs_pgio_header *hdr,
hdr->args.count,
(unsigned long long)hdr->args.offset);
+ if (localio)
+ return nfs_local_doio(NFS_SERVER(hdr->inode)->nfs_client,
+ localio, hdr, call_ops);
+
task = rpc_run_task(&task_setup_data);
if (IS_ERR(task))
return PTR_ERR(task);
@@ -953,6 +958,12 @@ static int nfs_generic_pg_pgios(struct nfs_pageio_descriptor *desc)
nfs_pgheader_init(desc, hdr, nfs_pgio_header_free);
ret = nfs_generic_pgio(desc, hdr);
if (ret == 0) {
+ struct nfs_client *clp = NFS_SERVER(hdr->inode)->nfs_client;
+
+ struct nfsd_file *localio =
+ nfs_local_open_fh(clp, hdr->cred,
+ hdr->args.fh, hdr->args.context->mode);
+
if (NFS_SERVER(hdr->inode)->nfs_client->cl_minorversion)
task_flags = RPC_TASK_MOVEABLE;
ret = nfs_initiate_pgio(NFS_CLIENT(hdr->inode),
@@ -961,7 +972,8 @@ static int nfs_generic_pg_pgios(struct nfs_pageio_descriptor *desc)
NFS_PROTO(hdr->inode),
desc->pg_rpc_callops,
desc->pg_ioflags,
- RPC_TASK_CRED_NOREF | task_flags);
+ RPC_TASK_CRED_NOREF | task_flags,
+ localio);
}
return ret;
}
diff --git a/fs/nfs/pnfs_nfs.c b/fs/nfs/pnfs_nfs.c
index a74ee69a2fa6..dbef837e871a 100644
--- a/fs/nfs/pnfs_nfs.c
+++ b/fs/nfs/pnfs_nfs.c
@@ -490,7 +490,7 @@ pnfs_generic_commit_pagelist(struct inode *inode, struct list_head *mds_pages,
nfs_initiate_commit(NFS_CLIENT(inode), data,
NFS_PROTO(data->inode),
data->mds_ops, how,
- RPC_TASK_CRED_NOREF);
+ RPC_TASK_CRED_NOREF, NULL);
} else {
nfs_init_commit(data, NULL, data->lseg, cinfo);
initiate_commit(data, how);
diff --git a/fs/nfs/read.c b/fs/nfs/read.c
index a6103333b666..81bd1b9aba17 100644
--- a/fs/nfs/read.c
+++ b/fs/nfs/read.c
@@ -48,8 +48,7 @@ static struct nfs_pgio_header *nfs_readhdr_alloc(void)
static void nfs_readhdr_free(struct nfs_pgio_header *rhdr)
{
- if (rhdr->res.scratch != NULL)
- kfree(rhdr->res.scratch);
+ kfree(rhdr->res.scratch);
kmem_cache_free(nfs_rdata_cachep, rhdr);
}
diff --git a/fs/nfs/super.c b/fs/nfs/super.c
index 97b386032b71..9723b6c53397 100644
--- a/fs/nfs/super.c
+++ b/fs/nfs/super.c
@@ -551,6 +551,9 @@ static void nfs_show_mount_options(struct seq_file *m, struct nfs_server *nfss,
else
seq_puts(m, ",local_lock=posix");
+ if (nfss->flags & NFS_MOUNT_NO_ALIGNWRITE)
+ seq_puts(m, ",noalignwrite");
+
if (nfss->flags & NFS_MOUNT_WRITE_EAGER) {
if (nfss->flags & NFS_MOUNT_WRITE_WAIT)
seq_puts(m, ",write=wait");
diff --git a/fs/nfs/write.c b/fs/nfs/write.c
index d074d0ceb4f0..ead2dc55952d 100644
--- a/fs/nfs/write.c
+++ b/fs/nfs/write.c
@@ -772,8 +772,7 @@ static void nfs_inode_add_request(struct nfs_page *req)
nfs_lock_request(req);
spin_lock(&mapping->i_private_lock);
set_bit(PG_MAPPED, &req->wb_flags);
- folio_set_private(folio);
- folio->private = req;
+ folio_attach_private(folio, req);
spin_unlock(&mapping->i_private_lock);
atomic_long_inc(&nfsi->nrequests);
/* this a head request for a page group - mark it as having an
@@ -797,8 +796,7 @@ static void nfs_inode_remove_request(struct nfs_page *req)
spin_lock(&mapping->i_private_lock);
if (likely(folio)) {
- folio->private = NULL;
- folio_clear_private(folio);
+ folio_detach_private(folio);
clear_bit(PG_MAPPED, &req->wb_head->wb_flags);
}
spin_unlock(&mapping->i_private_lock);
@@ -1297,7 +1295,10 @@ static int nfs_can_extend_write(struct file *file, struct folio *folio,
struct file_lock_context *flctx = locks_inode_context(inode);
struct file_lock *fl;
int ret;
+ unsigned int mntflags = NFS_SERVER(inode)->flags;
+ if (mntflags & NFS_MOUNT_NO_ALIGNWRITE)
+ return 0;
if (file->f_flags & O_DSYNC)
return 0;
if (!nfs_folio_write_uptodate(folio, pagelen))
@@ -1663,7 +1664,8 @@ EXPORT_SYMBOL_GPL(nfs_commitdata_release);
int nfs_initiate_commit(struct rpc_clnt *clnt, struct nfs_commit_data *data,
const struct nfs_rpc_ops *nfs_ops,
const struct rpc_call_ops *call_ops,
- int how, int flags)
+ int how, int flags,
+ struct nfsd_file *localio)
{
struct rpc_task *task;
int priority = flush_task_priority(how);
@@ -1692,6 +1694,9 @@ int nfs_initiate_commit(struct rpc_clnt *clnt, struct nfs_commit_data *data,
dprintk("NFS: initiated commit call\n");
+ if (localio)
+ return nfs_local_commit(localio, data, call_ops, how);
+
task = rpc_run_task(&task_setup_data);
if (IS_ERR(task))
return PTR_ERR(task);
@@ -1791,6 +1796,7 @@ nfs_commit_list(struct inode *inode, struct list_head *head, int how,
struct nfs_commit_info *cinfo)
{
struct nfs_commit_data *data;
+ struct nfsd_file *localio;
unsigned short task_flags = 0;
/* another commit raced with us */
@@ -1807,9 +1813,12 @@ nfs_commit_list(struct inode *inode, struct list_head *head, int how,
nfs_init_commit(data, head, NULL, cinfo);
if (NFS_SERVER(inode)->nfs_client->cl_minorversion)
task_flags = RPC_TASK_MOVEABLE;
+
+ localio = nfs_local_open_fh(NFS_SERVER(inode)->nfs_client, data->cred,
+ data->args.fh, data->context->mode);
return nfs_initiate_commit(NFS_CLIENT(inode), data, NFS_PROTO(inode),
data->mds_ops, how,
- RPC_TASK_CRED_NOREF | task_flags);
+ RPC_TASK_CRED_NOREF | task_flags, localio);
}
/*
diff --git a/fs/nfs_common/Makefile b/fs/nfs_common/Makefile
index 119c75ab9fd0..a5e54809701e 100644
--- a/fs/nfs_common/Makefile
+++ b/fs/nfs_common/Makefile
@@ -6,5 +6,10 @@
obj-$(CONFIG_NFS_ACL_SUPPORT) += nfs_acl.o
nfs_acl-objs := nfsacl.o
+obj-$(CONFIG_NFS_COMMON_LOCALIO_SUPPORT) += nfs_localio.o
+nfs_localio-objs := nfslocalio.o
+
obj-$(CONFIG_GRACE_PERIOD) += grace.o
obj-$(CONFIG_NFS_V4_2_SSC_HELPER) += nfs_ssc.o
+
+obj-$(CONFIG_NFS_COMMON) += common.o
diff --git a/fs/nfs_common/common.c b/fs/nfs_common/common.c
new file mode 100644
index 000000000000..34a115176f97
--- /dev/null
+++ b/fs/nfs_common/common.c
@@ -0,0 +1,134 @@
+// SPDX-License-Identifier: GPL-2.0-only
+
+#include <linux/module.h>
+#include <linux/nfs_common.h>
+#include <linux/nfs4.h>
+
+/*
+ * We need to translate between nfs status return values and
+ * the local errno values which may not be the same.
+ */
+static const struct {
+ int stat;
+ int errno;
+} nfs_errtbl[] = {
+ { NFS_OK, 0 },
+ { NFSERR_PERM, -EPERM },
+ { NFSERR_NOENT, -ENOENT },
+ { NFSERR_IO, -errno_NFSERR_IO},
+ { NFSERR_NXIO, -ENXIO },
+/* { NFSERR_EAGAIN, -EAGAIN }, */
+ { NFSERR_ACCES, -EACCES },
+ { NFSERR_EXIST, -EEXIST },
+ { NFSERR_XDEV, -EXDEV },
+ { NFSERR_NODEV, -ENODEV },
+ { NFSERR_NOTDIR, -ENOTDIR },
+ { NFSERR_ISDIR, -EISDIR },
+ { NFSERR_INVAL, -EINVAL },
+ { NFSERR_FBIG, -EFBIG },
+ { NFSERR_NOSPC, -ENOSPC },
+ { NFSERR_ROFS, -EROFS },
+ { NFSERR_MLINK, -EMLINK },
+ { NFSERR_NAMETOOLONG, -ENAMETOOLONG },
+ { NFSERR_NOTEMPTY, -ENOTEMPTY },
+ { NFSERR_DQUOT, -EDQUOT },
+ { NFSERR_STALE, -ESTALE },
+ { NFSERR_REMOTE, -EREMOTE },
+#ifdef EWFLUSH
+ { NFSERR_WFLUSH, -EWFLUSH },
+#endif
+ { NFSERR_BADHANDLE, -EBADHANDLE },
+ { NFSERR_NOT_SYNC, -ENOTSYNC },
+ { NFSERR_BAD_COOKIE, -EBADCOOKIE },
+ { NFSERR_NOTSUPP, -ENOTSUPP },
+ { NFSERR_TOOSMALL, -ETOOSMALL },
+ { NFSERR_SERVERFAULT, -EREMOTEIO },
+ { NFSERR_BADTYPE, -EBADTYPE },
+ { NFSERR_JUKEBOX, -EJUKEBOX },
+ { -1, -EIO }
+};
+
+/**
+ * nfs_stat_to_errno - convert an NFS status code to a local errno
+ * @status: NFS status code to convert
+ *
+ * Returns a local errno value, or -EIO if the NFS status code is
+ * not recognized. This function is used jointly by NFSv2 and NFSv3.
+ */
+int nfs_stat_to_errno(enum nfs_stat status)
+{
+ int i;
+
+ for (i = 0; nfs_errtbl[i].stat != -1; i++) {
+ if (nfs_errtbl[i].stat == (int)status)
+ return nfs_errtbl[i].errno;
+ }
+ return nfs_errtbl[i].errno;
+}
+EXPORT_SYMBOL_GPL(nfs_stat_to_errno);
+
+/*
+ * We need to translate between nfs v4 status return values and
+ * the local errno values which may not be the same.
+ */
+static const struct {
+ int stat;
+ int errno;
+} nfs4_errtbl[] = {
+ { NFS4_OK, 0 },
+ { NFS4ERR_PERM, -EPERM },
+ { NFS4ERR_NOENT, -ENOENT },
+ { NFS4ERR_IO, -errno_NFSERR_IO},
+ { NFS4ERR_NXIO, -ENXIO },
+ { NFS4ERR_ACCESS, -EACCES },
+ { NFS4ERR_EXIST, -EEXIST },
+ { NFS4ERR_XDEV, -EXDEV },
+ { NFS4ERR_NOTDIR, -ENOTDIR },
+ { NFS4ERR_ISDIR, -EISDIR },
+ { NFS4ERR_INVAL, -EINVAL },
+ { NFS4ERR_FBIG, -EFBIG },
+ { NFS4ERR_NOSPC, -ENOSPC },
+ { NFS4ERR_ROFS, -EROFS },
+ { NFS4ERR_MLINK, -EMLINK },
+ { NFS4ERR_NAMETOOLONG, -ENAMETOOLONG },
+ { NFS4ERR_NOTEMPTY, -ENOTEMPTY },
+ { NFS4ERR_DQUOT, -EDQUOT },
+ { NFS4ERR_STALE, -ESTALE },
+ { NFS4ERR_BADHANDLE, -EBADHANDLE },
+ { NFS4ERR_BAD_COOKIE, -EBADCOOKIE },
+ { NFS4ERR_NOTSUPP, -ENOTSUPP },
+ { NFS4ERR_TOOSMALL, -ETOOSMALL },
+ { NFS4ERR_SERVERFAULT, -EREMOTEIO },
+ { NFS4ERR_BADTYPE, -EBADTYPE },
+ { NFS4ERR_LOCKED, -EAGAIN },
+ { NFS4ERR_SYMLINK, -ELOOP },
+ { NFS4ERR_OP_ILLEGAL, -EOPNOTSUPP },
+ { NFS4ERR_DEADLOCK, -EDEADLK },
+ { NFS4ERR_NOXATTR, -ENODATA },
+ { NFS4ERR_XATTR2BIG, -E2BIG },
+ { -1, -EIO }
+};
+
+/*
+ * Convert an NFS error code to a local one.
+ * This one is used by NFSv4.
+ */
+int nfs4_stat_to_errno(int stat)
+{
+ int i;
+ for (i = 0; nfs4_errtbl[i].stat != -1; i++) {
+ if (nfs4_errtbl[i].stat == stat)
+ return nfs4_errtbl[i].errno;
+ }
+ if (stat <= 10000 || stat > 10100) {
+ /* The server is looney tunes. */
+ return -EREMOTEIO;
+ }
+ /* If we cannot translate the error, the recovery routines should
+ * handle it.
+ * Note: remaining NFSv4 error codes have values > 10000, so should
+ * not conflict with native Linux error codes.
+ */
+ return -stat;
+}
+EXPORT_SYMBOL_GPL(nfs4_stat_to_errno);
diff --git a/fs/nfs_common/nfslocalio.c b/fs/nfs_common/nfslocalio.c
new file mode 100644
index 000000000000..42b479b9191f
--- /dev/null
+++ b/fs/nfs_common/nfslocalio.c
@@ -0,0 +1,172 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (C) 2024 Mike Snitzer <snitzer@hammerspace.com>
+ * Copyright (C) 2024 NeilBrown <neilb@suse.de>
+ */
+
+#include <linux/module.h>
+#include <linux/rculist.h>
+#include <linux/nfslocalio.h>
+#include <net/netns/generic.h>
+
+MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("NFS localio protocol bypass support");
+
+static DEFINE_SPINLOCK(nfs_uuid_lock);
+
+/*
+ * Global list of nfs_uuid_t instances
+ * that is protected by nfs_uuid_lock.
+ */
+static LIST_HEAD(nfs_uuids);
+
+void nfs_uuid_begin(nfs_uuid_t *nfs_uuid)
+{
+ nfs_uuid->net = NULL;
+ nfs_uuid->dom = NULL;
+ uuid_gen(&nfs_uuid->uuid);
+
+ spin_lock(&nfs_uuid_lock);
+ list_add_tail_rcu(&nfs_uuid->list, &nfs_uuids);
+ spin_unlock(&nfs_uuid_lock);
+}
+EXPORT_SYMBOL_GPL(nfs_uuid_begin);
+
+void nfs_uuid_end(nfs_uuid_t *nfs_uuid)
+{
+ if (nfs_uuid->net == NULL) {
+ spin_lock(&nfs_uuid_lock);
+ list_del_init(&nfs_uuid->list);
+ spin_unlock(&nfs_uuid_lock);
+ }
+}
+EXPORT_SYMBOL_GPL(nfs_uuid_end);
+
+static nfs_uuid_t * nfs_uuid_lookup_locked(const uuid_t *uuid)
+{
+ nfs_uuid_t *nfs_uuid;
+
+ list_for_each_entry(nfs_uuid, &nfs_uuids, list)
+ if (uuid_equal(&nfs_uuid->uuid, uuid))
+ return nfs_uuid;
+
+ return NULL;
+}
+
+static struct module *nfsd_mod;
+
+void nfs_uuid_is_local(const uuid_t *uuid, struct list_head *list,
+ struct net *net, struct auth_domain *dom,
+ struct module *mod)
+{
+ nfs_uuid_t *nfs_uuid;
+
+ spin_lock(&nfs_uuid_lock);
+ nfs_uuid = nfs_uuid_lookup_locked(uuid);
+ if (nfs_uuid) {
+ kref_get(&dom->ref);
+ nfs_uuid->dom = dom;
+ /*
+ * We don't hold a ref on the net, but instead put
+ * ourselves on a list so the net pointer can be
+ * invalidated.
+ */
+ list_move(&nfs_uuid->list, list);
+ rcu_assign_pointer(nfs_uuid->net, net);
+
+ __module_get(mod);
+ nfsd_mod = mod;
+ }
+ spin_unlock(&nfs_uuid_lock);
+}
+EXPORT_SYMBOL_GPL(nfs_uuid_is_local);
+
+static void nfs_uuid_put_locked(nfs_uuid_t *nfs_uuid)
+{
+ if (nfs_uuid->net) {
+ module_put(nfsd_mod);
+ nfs_uuid->net = NULL;
+ }
+ if (nfs_uuid->dom) {
+ auth_domain_put(nfs_uuid->dom);
+ nfs_uuid->dom = NULL;
+ }
+ list_del_init(&nfs_uuid->list);
+}
+
+void nfs_uuid_invalidate_clients(struct list_head *list)
+{
+ nfs_uuid_t *nfs_uuid, *tmp;
+
+ spin_lock(&nfs_uuid_lock);
+ list_for_each_entry_safe(nfs_uuid, tmp, list, list)
+ nfs_uuid_put_locked(nfs_uuid);
+ spin_unlock(&nfs_uuid_lock);
+}
+EXPORT_SYMBOL_GPL(nfs_uuid_invalidate_clients);
+
+void nfs_uuid_invalidate_one_client(nfs_uuid_t *nfs_uuid)
+{
+ if (nfs_uuid->net) {
+ spin_lock(&nfs_uuid_lock);
+ nfs_uuid_put_locked(nfs_uuid);
+ spin_unlock(&nfs_uuid_lock);
+ }
+}
+EXPORT_SYMBOL_GPL(nfs_uuid_invalidate_one_client);
+
+struct nfsd_file *nfs_open_local_fh(nfs_uuid_t *uuid,
+ struct rpc_clnt *rpc_clnt, const struct cred *cred,
+ const struct nfs_fh *nfs_fh, const fmode_t fmode)
+{
+ struct net *net;
+ struct nfsd_file *localio;
+
+ /*
+ * Not running in nfsd context, so must safely get reference on nfsd_serv.
+ * But the server may already be shutting down, if so disallow new localio.
+ * uuid->net is NOT a counted reference, but rcu_read_lock() ensures that
+ * if uuid->net is not NULL, then calling nfsd_serv_try_get() is safe
+ * and if it succeeds we will have an implied reference to the net.
+ *
+ * Otherwise NFS may not have ref on NFSD and therefore cannot safely
+ * make 'nfs_to' calls.
+ */
+ rcu_read_lock();
+ net = rcu_dereference(uuid->net);
+ if (!net || !nfs_to->nfsd_serv_try_get(net)) {
+ rcu_read_unlock();
+ return ERR_PTR(-ENXIO);
+ }
+ rcu_read_unlock();
+ /* We have an implied reference to net thanks to nfsd_serv_try_get */
+ localio = nfs_to->nfsd_open_local_fh(net, uuid->dom, rpc_clnt,
+ cred, nfs_fh, fmode);
+ if (IS_ERR(localio))
+ nfs_to->nfsd_serv_put(net);
+ return localio;
+}
+EXPORT_SYMBOL_GPL(nfs_open_local_fh);
+
+/*
+ * The NFS LOCALIO code needs to call into NFSD using various symbols,
+ * but cannot be statically linked, because that will make the NFS
+ * module always depend on the NFSD module.
+ *
+ * 'nfs_to' provides NFS access to NFSD functions needed for LOCALIO,
+ * its lifetime is tightly coupled to the NFSD module and will always
+ * be available to NFS LOCALIO because any successful client<->server
+ * LOCALIO handshake results in a reference on the NFSD module (above),
+ * so NFS implicitly holds a reference to the NFSD module and its
+ * functions in the 'nfs_to' nfsd_localio_operations cannot disappear.
+ *
+ * If the last NFS client using LOCALIO disconnects (and its reference
+ * on NFSD dropped) then NFSD could be unloaded, resulting in 'nfs_to'
+ * functions being invalid pointers. But if NFSD isn't loaded then NFS
+ * will not be able to handshake with NFSD and will have no cause to
+ * try to call 'nfs_to' function pointers. If/when NFSD is reloaded it
+ * will reinitialize the 'nfs_to' function pointers and make LOCALIO
+ * possible.
+ */
+const struct nfsd_localio_operations *nfs_to;
+EXPORT_SYMBOL_GPL(nfs_to);
diff --git a/fs/nfsd/Kconfig b/fs/nfsd/Kconfig
index ec2ab6429e00..c0bd1509ccd4 100644
--- a/fs/nfsd/Kconfig
+++ b/fs/nfsd/Kconfig
@@ -7,6 +7,7 @@ config NFSD
select LOCKD
select SUNRPC
select EXPORTFS
+ select NFS_COMMON
select NFS_ACL_SUPPORT if NFSD_V2_ACL
select NFS_ACL_SUPPORT if NFSD_V3_ACL
depends on MULTIUSER
diff --git a/fs/nfsd/Makefile b/fs/nfsd/Makefile
index b8736a82e57c..18cbd3fa7691 100644
--- a/fs/nfsd/Makefile
+++ b/fs/nfsd/Makefile
@@ -23,3 +23,4 @@ nfsd-$(CONFIG_NFSD_PNFS) += nfs4layouts.o
nfsd-$(CONFIG_NFSD_BLOCKLAYOUT) += blocklayout.o blocklayoutxdr.o
nfsd-$(CONFIG_NFSD_SCSILAYOUT) += blocklayout.o blocklayoutxdr.o
nfsd-$(CONFIG_NFSD_FLEXFILELAYOUT) += flexfilelayout.o flexfilelayoutxdr.o
+nfsd-$(CONFIG_NFS_LOCALIO) += localio.o
diff --git a/fs/nfsd/auth.c b/fs/nfsd/auth.c
index e6beaaf4f170..93e33d1ee891 100644
--- a/fs/nfsd/auth.c
+++ b/fs/nfsd/auth.c
@@ -5,26 +5,26 @@
#include "nfsd.h"
#include "auth.h"
-int nfsexp_flags(struct svc_rqst *rqstp, struct svc_export *exp)
+int nfsexp_flags(struct svc_cred *cred, struct svc_export *exp)
{
struct exp_flavor_info *f;
struct exp_flavor_info *end = exp->ex_flavors + exp->ex_nflavors;
for (f = exp->ex_flavors; f < end; f++) {
- if (f->pseudoflavor == rqstp->rq_cred.cr_flavor)
+ if (f->pseudoflavor == cred->cr_flavor)
return f->flags;
}
return exp->ex_flags;
}
-int nfsd_setuser(struct svc_rqst *rqstp, struct svc_export *exp)
+int nfsd_setuser(struct svc_cred *cred, struct svc_export *exp)
{
struct group_info *rqgi;
struct group_info *gi;
struct cred *new;
int i;
- int flags = nfsexp_flags(rqstp, exp);
+ int flags = nfsexp_flags(cred, exp);
/* discard any old override before preparing the new set */
revert_creds(get_cred(current_real_cred()));
@@ -32,10 +32,10 @@ int nfsd_setuser(struct svc_rqst *rqstp, struct svc_export *exp)
if (!new)
return -ENOMEM;
- new->fsuid = rqstp->rq_cred.cr_uid;
- new->fsgid = rqstp->rq_cred.cr_gid;
+ new->fsuid = cred->cr_uid;
+ new->fsgid = cred->cr_gid;
- rqgi = rqstp->rq_cred.cr_group_info;
+ rqgi = cred->cr_group_info;
if (flags & NFSEXP_ALLSQUASH) {
new->fsuid = exp->ex_anon_uid;
diff --git a/fs/nfsd/auth.h b/fs/nfsd/auth.h
index dbd66424f600..8c5031bbbcee 100644
--- a/fs/nfsd/auth.h
+++ b/fs/nfsd/auth.h
@@ -12,6 +12,6 @@
* Set the current process's fsuid/fsgid etc to those of the NFS
* client user
*/
-int nfsd_setuser(struct svc_rqst *, struct svc_export *);
+int nfsd_setuser(struct svc_cred *cred, struct svc_export *exp);
#endif /* LINUX_NFSD_AUTH_H */
diff --git a/fs/nfsd/blocklayout.c b/fs/nfsd/blocklayout.c
index 3c040c81c77d..08a20e5bcf7f 100644
--- a/fs/nfsd/blocklayout.c
+++ b/fs/nfsd/blocklayout.c
@@ -147,8 +147,7 @@ nfsd4_block_get_device_info_simple(struct super_block *sb,
struct pnfs_block_deviceaddr *dev;
struct pnfs_block_volume *b;
- dev = kzalloc(sizeof(struct pnfs_block_deviceaddr) +
- sizeof(struct pnfs_block_volume), GFP_KERNEL);
+ dev = kzalloc(struct_size(dev, volumes, 1), GFP_KERNEL);
if (!dev)
return -ENOMEM;
gdp->gd_device = dev;
@@ -255,8 +254,7 @@ nfsd4_block_get_device_info_scsi(struct super_block *sb,
const struct pr_ops *ops;
int ret;
- dev = kzalloc(sizeof(struct pnfs_block_deviceaddr) +
- sizeof(struct pnfs_block_volume), GFP_KERNEL);
+ dev = kzalloc(struct_size(dev, volumes, 1), GFP_KERNEL);
if (!dev)
return -ENOMEM;
gdp->gd_device = dev;
diff --git a/fs/nfsd/blocklayoutxdr.h b/fs/nfsd/blocklayoutxdr.h
index b0361e8aa9a7..4e28ac8f1127 100644
--- a/fs/nfsd/blocklayoutxdr.h
+++ b/fs/nfsd/blocklayoutxdr.h
@@ -47,7 +47,7 @@ struct pnfs_block_volume {
struct pnfs_block_deviceaddr {
u32 nr_volumes;
- struct pnfs_block_volume volumes[];
+ struct pnfs_block_volume volumes[] __counted_by(nr_volumes);
};
__be32 nfsd4_block_encode_getdeviceinfo(struct xdr_stream *xdr,
diff --git a/fs/nfsd/cache.h b/fs/nfsd/cache.h
index 66a05fefae98..bb7addef4a31 100644
--- a/fs/nfsd/cache.h
+++ b/fs/nfsd/cache.h
@@ -10,7 +10,7 @@
#define NFSCACHE_H
#include <linux/sunrpc/svc.h>
-#include "netns.h"
+#include "nfsd.h"
/*
* Representation of a reply cache entry.
diff --git a/fs/nfsd/export.c b/fs/nfsd/export.c
index 50b3135d07ac..c82d8e3e0d4f 100644
--- a/fs/nfsd/export.c
+++ b/fs/nfsd/export.c
@@ -1074,10 +1074,30 @@ static struct svc_export *exp_find(struct cache_detail *cd,
return exp;
}
+/**
+ * check_nfsd_access - check if access to export is allowed.
+ * @exp: svc_export that is being accessed.
+ * @rqstp: svc_rqst attempting to access @exp (will be NULL for LOCALIO).
+ *
+ * Return values:
+ * %nfs_ok if access is granted, or
+ * %nfserr_wrongsec if access is denied
+ */
__be32 check_nfsd_access(struct svc_export *exp, struct svc_rqst *rqstp)
{
struct exp_flavor_info *f, *end = exp->ex_flavors + exp->ex_nflavors;
- struct svc_xprt *xprt = rqstp->rq_xprt;
+ struct svc_xprt *xprt;
+
+ /*
+ * If rqstp is NULL, this is a LOCALIO request which will only
+ * ever use a filehandle/credential pair for which access has
+ * been affirmed (by ACCESS or OPEN NFS requests) over the
+ * wire. So there is no need for further checks here.
+ */
+ if (!rqstp)
+ return nfs_ok;
+
+ xprt = rqstp->rq_xprt;
if (exp->ex_xprtsec_modes & NFSEXP_XPRTSEC_NONE) {
if (!test_bit(XPT_TLS_SESSION, &xprt->xpt_flags))
@@ -1098,17 +1118,17 @@ __be32 check_nfsd_access(struct svc_export *exp, struct svc_rqst *rqstp)
ok:
/* legacy gss-only clients are always OK: */
if (exp->ex_client == rqstp->rq_gssclient)
- return 0;
+ return nfs_ok;
/* ip-address based client; check sec= export option: */
for (f = exp->ex_flavors; f < end; f++) {
if (f->pseudoflavor == rqstp->rq_cred.cr_flavor)
- return 0;
+ return nfs_ok;
}
/* defaults in absence of sec= options: */
if (exp->ex_nflavors == 0) {
if (rqstp->rq_cred.cr_flavor == RPC_AUTH_NULL ||
rqstp->rq_cred.cr_flavor == RPC_AUTH_UNIX)
- return 0;
+ return nfs_ok;
}
/* If the compound op contains a spo_must_allowed op,
@@ -1118,10 +1138,10 @@ ok:
*/
if (nfsd4_spo_must_allow(rqstp))
- return 0;
+ return nfs_ok;
denied:
- return rqstp->rq_vers < 4 ? nfserr_acces : nfserr_wrongsec;
+ return nfserr_wrongsec;
}
/*
@@ -1164,19 +1184,35 @@ gss:
return gssexp;
}
+/**
+ * rqst_exp_find - Find an svc_export in the context of a rqst or similar
+ * @reqp: The handle to be used to suspend the request if a cache-upcall is needed
+ * If NULL, missing in-cache information will result in failure.
+ * @net: The network namespace in which the request exists
+ * @cl: default auth_domain to use for looking up the export
+ * @gsscl: an alternate auth_domain defined using deprecated gss/krb5 format.
+ * @fsid_type: The type of fsid to look for
+ * @fsidv: The actual fsid to look up in the context of either client.
+ *
+ * Perform a lookup for @cl/@fsidv in the given @net for an export. If
+ * none found and @gsscl specified, repeat the lookup.
+ *
+ * Returns an export, or an error pointer.
+ */
struct svc_export *
-rqst_exp_find(struct svc_rqst *rqstp, int fsid_type, u32 *fsidv)
+rqst_exp_find(struct cache_req *reqp, struct net *net,
+ struct auth_domain *cl, struct auth_domain *gsscl,
+ int fsid_type, u32 *fsidv)
{
+ struct nfsd_net *nn = net_generic(net, nfsd_net_id);
struct svc_export *gssexp, *exp = ERR_PTR(-ENOENT);
- struct nfsd_net *nn = net_generic(SVC_NET(rqstp), nfsd_net_id);
struct cache_detail *cd = nn->svc_export_cache;
- if (rqstp->rq_client == NULL)
+ if (!cl)
goto gss;
/* First try the auth_unix client: */
- exp = exp_find(cd, rqstp->rq_client, fsid_type,
- fsidv, &rqstp->rq_chandle);
+ exp = exp_find(cd, cl, fsid_type, fsidv, reqp);
if (PTR_ERR(exp) == -ENOENT)
goto gss;
if (IS_ERR(exp))
@@ -1186,10 +1222,9 @@ rqst_exp_find(struct svc_rqst *rqstp, int fsid_type, u32 *fsidv)
return exp;
gss:
/* Otherwise, try falling back on gss client */
- if (rqstp->rq_gssclient == NULL)
+ if (!gsscl)
return exp;
- gssexp = exp_find(cd, rqstp->rq_gssclient, fsid_type, fsidv,
- &rqstp->rq_chandle);
+ gssexp = exp_find(cd, gsscl, fsid_type, fsidv, reqp);
if (PTR_ERR(gssexp) == -ENOENT)
return exp;
if (!IS_ERR(exp))
@@ -1220,7 +1255,9 @@ struct svc_export *rqst_find_fsidzero_export(struct svc_rqst *rqstp)
mk_fsid(FSID_NUM, fsidv, 0, 0, 0, NULL);
- return rqst_exp_find(rqstp, FSID_NUM, fsidv);
+ return rqst_exp_find(&rqstp->rq_chandle, SVC_NET(rqstp),
+ rqstp->rq_client, rqstp->rq_gssclient,
+ FSID_NUM, fsidv);
}
/*
diff --git a/fs/nfsd/export.h b/fs/nfsd/export.h
index ca9dc230ae3d..3794ae253a70 100644
--- a/fs/nfsd/export.h
+++ b/fs/nfsd/export.h
@@ -99,7 +99,8 @@ struct svc_expkey {
#define EX_NOHIDE(exp) ((exp)->ex_flags & NFSEXP_NOHIDE)
#define EX_WGATHER(exp) ((exp)->ex_flags & NFSEXP_GATHERED_WRITES)
-int nfsexp_flags(struct svc_rqst *rqstp, struct svc_export *exp);
+struct svc_cred;
+int nfsexp_flags(struct svc_cred *cred, struct svc_export *exp);
__be32 check_nfsd_access(struct svc_export *exp, struct svc_rqst *rqstp);
/*
@@ -127,6 +128,8 @@ static inline struct svc_export *exp_get(struct svc_export *exp)
cache_get(&exp->h);
return exp;
}
-struct svc_export * rqst_exp_find(struct svc_rqst *, int, u32 *);
+struct svc_export *rqst_exp_find(struct cache_req *reqp, struct net *net,
+ struct auth_domain *cl, struct auth_domain *gsscl,
+ int fsid_type, u32 *fsidv);
#endif /* NFSD_EXPORT_H */
diff --git a/fs/nfsd/filecache.c b/fs/nfsd/filecache.c
index f4704f5d4086..19bb88c7eebd 100644
--- a/fs/nfsd/filecache.c
+++ b/fs/nfsd/filecache.c
@@ -52,10 +52,11 @@
#define NFSD_FILE_CACHE_UP (0)
/* We only care about NFSD_MAY_READ/WRITE for this cache */
-#define NFSD_FILE_MAY_MASK (NFSD_MAY_READ|NFSD_MAY_WRITE)
+#define NFSD_FILE_MAY_MASK (NFSD_MAY_READ|NFSD_MAY_WRITE|NFSD_MAY_LOCALIO)
static DEFINE_PER_CPU(unsigned long, nfsd_file_cache_hits);
static DEFINE_PER_CPU(unsigned long, nfsd_file_acquisitions);
+static DEFINE_PER_CPU(unsigned long, nfsd_file_allocations);
static DEFINE_PER_CPU(unsigned long, nfsd_file_releases);
static DEFINE_PER_CPU(unsigned long, nfsd_file_total_age);
static DEFINE_PER_CPU(unsigned long, nfsd_file_evictions);
@@ -111,7 +112,7 @@ static void
nfsd_file_schedule_laundrette(void)
{
if (test_bit(NFSD_FILE_CACHE_UP, &nfsd_file_flags))
- queue_delayed_work(system_wq, &nfsd_filecache_laundrette,
+ queue_delayed_work(system_unbound_wq, &nfsd_filecache_laundrette,
NFSD_LAUNDRETTE_DELAY);
}
@@ -151,7 +152,7 @@ nfsd_file_mark_put(struct nfsd_file_mark *nfm)
}
static struct nfsd_file_mark *
-nfsd_file_mark_find_or_create(struct nfsd_file *nf, struct inode *inode)
+nfsd_file_mark_find_or_create(struct inode *inode)
{
int err;
struct fsnotify_mark *mark;
@@ -215,7 +216,9 @@ nfsd_file_alloc(struct net *net, struct inode *inode, unsigned char need,
if (unlikely(!nf))
return NULL;
+ this_cpu_inc(nfsd_file_allocations);
INIT_LIST_HEAD(&nf->nf_lru);
+ INIT_LIST_HEAD(&nf->nf_gc);
nf->nf_birthtime = ktime_get();
nf->nf_file = NULL;
nf->nf_cred = get_current_cred();
@@ -387,14 +390,42 @@ nfsd_file_put(struct nfsd_file *nf)
nfsd_file_free(nf);
}
+/**
+ * nfsd_file_put_local - put the reference to nfsd_file and local nfsd_serv
+ * @nf: nfsd_file of which to put the references
+ *
+ * First put the reference of the nfsd_file and then put the
+ * reference to the associated nn->nfsd_serv.
+ */
+void
+nfsd_file_put_local(struct nfsd_file *nf)
+{
+ struct net *net = nf->nf_net;
+
+ nfsd_file_put(nf);
+ nfsd_serv_put(net);
+}
+
+/**
+ * nfsd_file_file - get the backing file of an nfsd_file
+ * @nf: nfsd_file of which to access the backing file.
+ *
+ * Return backing file for @nf.
+ */
+struct file *
+nfsd_file_file(struct nfsd_file *nf)
+{
+ return nf->nf_file;
+}
+
static void
nfsd_file_dispose_list(struct list_head *dispose)
{
struct nfsd_file *nf;
while (!list_empty(dispose)) {
- nf = list_first_entry(dispose, struct nfsd_file, nf_lru);
- list_del_init(&nf->nf_lru);
+ nf = list_first_entry(dispose, struct nfsd_file, nf_gc);
+ list_del_init(&nf->nf_gc);
nfsd_file_free(nf);
}
}
@@ -411,12 +442,12 @@ nfsd_file_dispose_list_delayed(struct list_head *dispose)
{
while(!list_empty(dispose)) {
struct nfsd_file *nf = list_first_entry(dispose,
- struct nfsd_file, nf_lru);
+ struct nfsd_file, nf_gc);
struct nfsd_net *nn = net_generic(nf->nf_net, nfsd_net_id);
struct nfsd_fcache_disposal *l = nn->fcache_disposal;
spin_lock(&l->lock);
- list_move_tail(&nf->nf_lru, &l->freeme);
+ list_move_tail(&nf->nf_gc, &l->freeme);
spin_unlock(&l->lock);
svc_wake_up(nn->nfsd_serv);
}
@@ -503,7 +534,8 @@ nfsd_file_lru_cb(struct list_head *item, struct list_lru_one *lru,
/* Refcount went to zero. Unhash it and queue it to the dispose list */
nfsd_file_unhash(nf);
- list_lru_isolate_move(lru, &nf->nf_lru, head);
+ list_lru_isolate(lru, &nf->nf_lru);
+ list_add(&nf->nf_gc, head);
this_cpu_inc(nfsd_file_evictions);
trace_nfsd_file_gc_disposed(nf);
return LRU_REMOVED;
@@ -578,7 +610,7 @@ nfsd_file_cond_queue(struct nfsd_file *nf, struct list_head *dispose)
/* If refcount goes to 0, then put on the dispose list */
if (refcount_sub_and_test(decrement, &nf->nf_ref)) {
- list_add(&nf->nf_lru, dispose);
+ list_add(&nf->nf_gc, dispose);
trace_nfsd_file_closing(nf);
}
}
@@ -654,8 +686,8 @@ nfsd_file_close_inode_sync(struct inode *inode)
nfsd_file_queue_for_close(inode, &dispose);
while (!list_empty(&dispose)) {
- nf = list_first_entry(&dispose, struct nfsd_file, nf_lru);
- list_del_init(&nf->nf_lru);
+ nf = list_first_entry(&dispose, struct nfsd_file, nf_gc);
+ list_del_init(&nf->nf_gc);
nfsd_file_free(nf);
}
}
@@ -909,6 +941,7 @@ nfsd_file_cache_shutdown(void)
for_each_possible_cpu(i) {
per_cpu(nfsd_file_cache_hits, i) = 0;
per_cpu(nfsd_file_acquisitions, i) = 0;
+ per_cpu(nfsd_file_allocations, i) = 0;
per_cpu(nfsd_file_releases, i) = 0;
per_cpu(nfsd_file_total_age, i) = 0;
per_cpu(nfsd_file_evictions, i) = 0;
@@ -977,12 +1010,14 @@ nfsd_file_is_cached(struct inode *inode)
}
static __be32
-nfsd_file_do_acquire(struct svc_rqst *rqstp, struct svc_fh *fhp,
+nfsd_file_do_acquire(struct svc_rqst *rqstp, struct net *net,
+ struct svc_cred *cred,
+ struct auth_domain *client,
+ struct svc_fh *fhp,
unsigned int may_flags, struct file *file,
struct nfsd_file **pnf, bool want_gc)
{
unsigned char need = may_flags & NFSD_FILE_MAY_MASK;
- struct net *net = SVC_NET(rqstp);
struct nfsd_file *new, *nf;
bool stale_retry = true;
bool open_retry = true;
@@ -991,8 +1026,13 @@ nfsd_file_do_acquire(struct svc_rqst *rqstp, struct svc_fh *fhp,
int ret;
retry:
- status = fh_verify(rqstp, fhp, S_IFREG,
- may_flags|NFSD_MAY_OWNER_OVERRIDE);
+ if (rqstp) {
+ status = fh_verify(rqstp, fhp, S_IFREG,
+ may_flags|NFSD_MAY_OWNER_OVERRIDE);
+ } else {
+ status = fh_verify_local(net, cred, client, fhp, S_IFREG,
+ may_flags|NFSD_MAY_OWNER_OVERRIDE);
+ }
if (status != nfs_ok)
return status;
inode = d_inode(fhp->fh_dentry);
@@ -1024,7 +1064,7 @@ retry:
if (unlikely(nf)) {
spin_unlock(&inode->i_lock);
rcu_read_unlock();
- nfsd_file_slab_free(&new->nf_rcu);
+ nfsd_file_free(new);
goto wait_for_construction;
}
nf = new;
@@ -1035,8 +1075,6 @@ retry:
if (likely(ret == 0))
goto open_file;
- if (ret == -EEXIST)
- goto retry;
trace_nfsd_file_insert_err(rqstp, inode, may_flags, ret);
status = nfserr_jukebox;
goto construction_err;
@@ -1051,6 +1089,7 @@ wait_for_construction:
status = nfserr_jukebox;
goto construction_err;
}
+ nfsd_file_put(nf);
open_retry = false;
fh_put(fhp);
goto retry;
@@ -1074,7 +1113,7 @@ out:
open_file:
trace_nfsd_file_alloc(nf);
- nf->nf_mark = nfsd_file_mark_find_or_create(nf, inode);
+ nf->nf_mark = nfsd_file_mark_find_or_create(inode);
if (nf->nf_mark) {
if (file) {
get_file(file);
@@ -1139,7 +1178,8 @@ __be32
nfsd_file_acquire_gc(struct svc_rqst *rqstp, struct svc_fh *fhp,
unsigned int may_flags, struct nfsd_file **pnf)
{
- return nfsd_file_do_acquire(rqstp, fhp, may_flags, NULL, pnf, true);
+ return nfsd_file_do_acquire(rqstp, SVC_NET(rqstp), NULL, NULL,
+ fhp, may_flags, NULL, pnf, true);
}
/**
@@ -1163,7 +1203,55 @@ __be32
nfsd_file_acquire(struct svc_rqst *rqstp, struct svc_fh *fhp,
unsigned int may_flags, struct nfsd_file **pnf)
{
- return nfsd_file_do_acquire(rqstp, fhp, may_flags, NULL, pnf, false);
+ return nfsd_file_do_acquire(rqstp, SVC_NET(rqstp), NULL, NULL,
+ fhp, may_flags, NULL, pnf, false);
+}
+
+/**
+ * nfsd_file_acquire_local - Get a struct nfsd_file with an open file for localio
+ * @net: The network namespace in which to perform a lookup
+ * @cred: the user credential with which to validate access
+ * @client: the auth_domain for LOCALIO lookup
+ * @fhp: the NFS filehandle of the file to be opened
+ * @may_flags: NFSD_MAY_ settings for the file
+ * @pnf: OUT: new or found "struct nfsd_file" object
+ *
+ * This file lookup interface provide access to a file given the
+ * filehandle and credential. No connection-based authorisation
+ * is performed and in that way it is quite different to other
+ * file access mediated by nfsd. It allows a kernel module such as the NFS
+ * client to reach across network and filesystem namespaces to access
+ * a file. The security implications of this should be carefully
+ * considered before use.
+ *
+ * The nfsd_file object returned by this API is reference-counted
+ * and garbage-collected. The object is retained for a few
+ * seconds after the final nfsd_file_put() in case the caller
+ * wants to re-use it.
+ *
+ * Return values:
+ * %nfs_ok - @pnf points to an nfsd_file with its reference
+ * count boosted.
+ *
+ * On error, an nfsstat value in network byte order is returned.
+ */
+__be32
+nfsd_file_acquire_local(struct net *net, struct svc_cred *cred,
+ struct auth_domain *client, struct svc_fh *fhp,
+ unsigned int may_flags, struct nfsd_file **pnf)
+{
+ /*
+ * Save creds before calling nfsd_file_do_acquire() (which calls
+ * nfsd_setuser). Important because caller (LOCALIO) is from
+ * client context.
+ */
+ const struct cred *save_cred = get_current_cred();
+ __be32 beres;
+
+ beres = nfsd_file_do_acquire(NULL, net, cred, client,
+ fhp, may_flags, NULL, pnf, true);
+ revert_creds(save_cred);
+ return beres;
}
/**
@@ -1189,7 +1277,8 @@ nfsd_file_acquire_opened(struct svc_rqst *rqstp, struct svc_fh *fhp,
unsigned int may_flags, struct file *file,
struct nfsd_file **pnf)
{
- return nfsd_file_do_acquire(rqstp, fhp, may_flags, file, pnf, false);
+ return nfsd_file_do_acquire(rqstp, SVC_NET(rqstp), NULL, NULL,
+ fhp, may_flags, file, pnf, false);
}
/*
@@ -1199,7 +1288,7 @@ nfsd_file_acquire_opened(struct svc_rqst *rqstp, struct svc_fh *fhp,
*/
int nfsd_file_cache_stats_show(struct seq_file *m, void *v)
{
- unsigned long releases = 0, evictions = 0;
+ unsigned long allocations = 0, releases = 0, evictions = 0;
unsigned long hits = 0, acquisitions = 0;
unsigned int i, count = 0, buckets = 0;
unsigned long lru = 0, total_age = 0;
@@ -1224,6 +1313,7 @@ int nfsd_file_cache_stats_show(struct seq_file *m, void *v)
for_each_possible_cpu(i) {
hits += per_cpu(nfsd_file_cache_hits, i);
acquisitions += per_cpu(nfsd_file_acquisitions, i);
+ allocations += per_cpu(nfsd_file_allocations, i);
releases += per_cpu(nfsd_file_releases, i);
total_age += per_cpu(nfsd_file_total_age, i);
evictions += per_cpu(nfsd_file_evictions, i);
@@ -1234,6 +1324,7 @@ int nfsd_file_cache_stats_show(struct seq_file *m, void *v)
seq_printf(m, "lru entries: %lu\n", lru);
seq_printf(m, "cache hits: %lu\n", hits);
seq_printf(m, "acquisitions: %lu\n", acquisitions);
+ seq_printf(m, "allocations: %lu\n", allocations);
seq_printf(m, "releases: %lu\n", releases);
seq_printf(m, "evictions: %lu\n", evictions);
if (releases)
diff --git a/fs/nfsd/filecache.h b/fs/nfsd/filecache.h
index c61884def906..cadf3c2689c4 100644
--- a/fs/nfsd/filecache.h
+++ b/fs/nfsd/filecache.h
@@ -44,6 +44,7 @@ struct nfsd_file {
struct nfsd_file_mark *nf_mark;
struct list_head nf_lru;
+ struct list_head nf_gc;
struct rcu_head nf_rcu;
ktime_t nf_birthtime;
};
@@ -54,7 +55,9 @@ void nfsd_file_cache_shutdown(void);
int nfsd_file_cache_start_net(struct net *net);
void nfsd_file_cache_shutdown_net(struct net *net);
void nfsd_file_put(struct nfsd_file *nf);
+void nfsd_file_put_local(struct nfsd_file *nf);
struct nfsd_file *nfsd_file_get(struct nfsd_file *nf);
+struct file *nfsd_file_file(struct nfsd_file *nf);
void nfsd_file_close_inode_sync(struct inode *inode);
void nfsd_file_net_dispose(struct nfsd_net *nn);
bool nfsd_file_is_cached(struct inode *inode);
@@ -65,5 +68,8 @@ __be32 nfsd_file_acquire(struct svc_rqst *rqstp, struct svc_fh *fhp,
__be32 nfsd_file_acquire_opened(struct svc_rqst *rqstp, struct svc_fh *fhp,
unsigned int may_flags, struct file *file,
struct nfsd_file **nfp);
+__be32 nfsd_file_acquire_local(struct net *net, struct svc_cred *cred,
+ struct auth_domain *client, struct svc_fh *fhp,
+ unsigned int may_flags, struct nfsd_file **pnf);
int nfsd_file_cache_stats_show(struct seq_file *m, void *v);
#endif /* _FS_NFSD_FILECACHE_H */
diff --git a/fs/nfsd/localio.c b/fs/nfsd/localio.c
new file mode 100644
index 000000000000..291e9c69cae4
--- /dev/null
+++ b/fs/nfsd/localio.c
@@ -0,0 +1,169 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * NFS server support for local clients to bypass network stack
+ *
+ * Copyright (C) 2014 Weston Andros Adamson <dros@primarydata.com>
+ * Copyright (C) 2019 Trond Myklebust <trond.myklebust@hammerspace.com>
+ * Copyright (C) 2024 Mike Snitzer <snitzer@hammerspace.com>
+ * Copyright (C) 2024 NeilBrown <neilb@suse.de>
+ */
+
+#include <linux/exportfs.h>
+#include <linux/sunrpc/svcauth.h>
+#include <linux/sunrpc/clnt.h>
+#include <linux/nfs.h>
+#include <linux/nfs_common.h>
+#include <linux/nfslocalio.h>
+#include <linux/nfs_fs.h>
+#include <linux/nfs_xdr.h>
+#include <linux/string.h>
+
+#include "nfsd.h"
+#include "vfs.h"
+#include "netns.h"
+#include "filecache.h"
+#include "cache.h"
+
+static const struct nfsd_localio_operations nfsd_localio_ops = {
+ .nfsd_serv_try_get = nfsd_serv_try_get,
+ .nfsd_serv_put = nfsd_serv_put,
+ .nfsd_open_local_fh = nfsd_open_local_fh,
+ .nfsd_file_put_local = nfsd_file_put_local,
+ .nfsd_file_file = nfsd_file_file,
+};
+
+void nfsd_localio_ops_init(void)
+{
+ nfs_to = &nfsd_localio_ops;
+}
+
+/**
+ * nfsd_open_local_fh - lookup a local filehandle @nfs_fh and map to nfsd_file
+ *
+ * @net: 'struct net' to get the proper nfsd_net required for LOCALIO access
+ * @dom: 'struct auth_domain' required for LOCALIO access
+ * @rpc_clnt: rpc_clnt that the client established
+ * @cred: cred that the client established
+ * @nfs_fh: filehandle to lookup
+ * @fmode: fmode_t to use for open
+ *
+ * This function maps a local fh to a path on a local filesystem.
+ * This is useful when the nfs client has the local server mounted - it can
+ * avoid all the NFS overhead with reads, writes and commits.
+ *
+ * On successful return, returned nfsd_file will have its nf_net member
+ * set. Caller (NFS client) is responsible for calling nfsd_serv_put and
+ * nfsd_file_put (via nfs_to->nfsd_file_put_local).
+ */
+struct nfsd_file *
+nfsd_open_local_fh(struct net *net, struct auth_domain *dom,
+ struct rpc_clnt *rpc_clnt, const struct cred *cred,
+ const struct nfs_fh *nfs_fh, const fmode_t fmode)
+{
+ int mayflags = NFSD_MAY_LOCALIO;
+ struct svc_cred rq_cred;
+ struct svc_fh fh;
+ struct nfsd_file *localio;
+ __be32 beres;
+
+ if (nfs_fh->size > NFS4_FHSIZE)
+ return ERR_PTR(-EINVAL);
+
+ /* nfs_fh -> svc_fh */
+ fh_init(&fh, NFS4_FHSIZE);
+ fh.fh_handle.fh_size = nfs_fh->size;
+ memcpy(fh.fh_handle.fh_raw, nfs_fh->data, nfs_fh->size);
+
+ if (fmode & FMODE_READ)
+ mayflags |= NFSD_MAY_READ;
+ if (fmode & FMODE_WRITE)
+ mayflags |= NFSD_MAY_WRITE;
+
+ svcauth_map_clnt_to_svc_cred_local(rpc_clnt, cred, &rq_cred);
+
+ beres = nfsd_file_acquire_local(net, &rq_cred, dom,
+ &fh, mayflags, &localio);
+ if (beres)
+ localio = ERR_PTR(nfs_stat_to_errno(be32_to_cpu(beres)));
+
+ fh_put(&fh);
+ if (rq_cred.cr_group_info)
+ put_group_info(rq_cred.cr_group_info);
+
+ return localio;
+}
+EXPORT_SYMBOL_GPL(nfsd_open_local_fh);
+
+/*
+ * UUID_IS_LOCAL XDR functions
+ */
+
+static __be32 localio_proc_null(struct svc_rqst *rqstp)
+{
+ return rpc_success;
+}
+
+struct localio_uuidarg {
+ uuid_t uuid;
+};
+
+static __be32 localio_proc_uuid_is_local(struct svc_rqst *rqstp)
+{
+ struct localio_uuidarg *argp = rqstp->rq_argp;
+ struct net *net = SVC_NET(rqstp);
+ struct nfsd_net *nn = net_generic(net, nfsd_net_id);
+
+ nfs_uuid_is_local(&argp->uuid, &nn->local_clients,
+ net, rqstp->rq_client, THIS_MODULE);
+
+ return rpc_success;
+}
+
+static bool localio_decode_uuidarg(struct svc_rqst *rqstp,
+ struct xdr_stream *xdr)
+{
+ struct localio_uuidarg *argp = rqstp->rq_argp;
+ u8 uuid[UUID_SIZE];
+
+ if (decode_opaque_fixed(xdr, uuid, UUID_SIZE))
+ return false;
+ import_uuid(&argp->uuid, uuid);
+
+ return true;
+}
+
+static const struct svc_procedure localio_procedures1[] = {
+ [LOCALIOPROC_NULL] = {
+ .pc_func = localio_proc_null,
+ .pc_decode = nfssvc_decode_voidarg,
+ .pc_encode = nfssvc_encode_voidres,
+ .pc_argsize = sizeof(struct nfsd_voidargs),
+ .pc_ressize = sizeof(struct nfsd_voidres),
+ .pc_cachetype = RC_NOCACHE,
+ .pc_xdrressize = 0,
+ .pc_name = "NULL",
+ },
+ [LOCALIOPROC_UUID_IS_LOCAL] = {
+ .pc_func = localio_proc_uuid_is_local,
+ .pc_decode = localio_decode_uuidarg,
+ .pc_encode = nfssvc_encode_voidres,
+ .pc_argsize = sizeof(struct localio_uuidarg),
+ .pc_argzero = sizeof(struct localio_uuidarg),
+ .pc_ressize = sizeof(struct nfsd_voidres),
+ .pc_cachetype = RC_NOCACHE,
+ .pc_name = "UUID_IS_LOCAL",
+ },
+};
+
+#define LOCALIO_NR_PROCEDURES ARRAY_SIZE(localio_procedures1)
+static DEFINE_PER_CPU_ALIGNED(unsigned long,
+ localio_count[LOCALIO_NR_PROCEDURES]);
+const struct svc_version localio_version1 = {
+ .vs_vers = 1,
+ .vs_nproc = LOCALIO_NR_PROCEDURES,
+ .vs_proc = localio_procedures1,
+ .vs_dispatch = nfsd_dispatch,
+ .vs_count = localio_count,
+ .vs_xdrsize = XDR_QUADLEN(UUID_SIZE),
+ .vs_hidden = true,
+};
diff --git a/fs/nfsd/netns.h b/fs/nfsd/netns.h
index 14ec15656320..26f7b34d1a03 100644
--- a/fs/nfsd/netns.h
+++ b/fs/nfsd/netns.h
@@ -13,6 +13,7 @@
#include <linux/filelock.h>
#include <linux/nfs4.h>
#include <linux/percpu_counter.h>
+#include <linux/percpu-refcount.h>
#include <linux/siphash.h>
#include <linux/sunrpc/stats.h>
@@ -139,7 +140,9 @@ struct nfsd_net {
struct svc_info nfsd_info;
#define nfsd_serv nfsd_info.serv
-
+ struct percpu_ref nfsd_serv_ref;
+ struct completion nfsd_serv_confirm_done;
+ struct completion nfsd_serv_free_done;
/*
* clientid and stateid data for construction of net unique COPY
@@ -148,12 +151,13 @@ struct nfsd_net {
u32 s2s_cp_cl_id;
struct idr s2s_cp_stateids;
spinlock_t s2s_cp_lock;
+ atomic_t pending_async_copies;
/*
* Version information
*/
- bool *nfsd_versions;
- bool *nfsd4_minorversions;
+ bool nfsd_versions[NFSD_MAXVERS + 1];
+ bool nfsd4_minorversions[NFSD_SUPPORTED_MINOR_VERSION + 1];
/*
* Duplicate reply cache
@@ -213,16 +217,21 @@ struct nfsd_net {
/* last time an admin-revoke happened for NFSv4.0 */
time64_t nfs40_last_revoke;
+#if IS_ENABLED(CONFIG_NFS_LOCALIO)
+ /* Local clients to be invalidated when net is shut down */
+ struct list_head local_clients;
+#endif
};
/* Simple check to find out if a given net was properly initialized */
#define nfsd_netns_ready(nn) ((nn)->sessionid_hashtbl)
extern bool nfsd_support_version(int vers);
-extern void nfsd_netns_free_versions(struct nfsd_net *nn);
-
extern unsigned int nfsd_net_id;
+bool nfsd_serv_try_get(struct net *net);
+void nfsd_serv_put(struct net *net);
+
void nfsd_copy_write_verifier(__be32 verf[2], struct nfsd_net *nn);
void nfsd_reset_write_verifier(struct nfsd_net *nn);
#endif /* __NFSD_NETNS_H__ */
diff --git a/fs/nfsd/nfs3proc.c b/fs/nfsd/nfs3proc.c
index dfcc957e460d..372bdcf5e07a 100644
--- a/fs/nfsd/nfs3proc.c
+++ b/fs/nfsd/nfs3proc.c
@@ -28,6 +28,29 @@ static int nfs3_ftypes[] = {
S_IFIFO, /* NF3FIFO */
};
+static __be32 nfsd3_map_status(__be32 status)
+{
+ switch (status) {
+ case nfs_ok:
+ break;
+ case nfserr_nofilehandle:
+ status = nfserr_badhandle;
+ break;
+ case nfserr_wrongsec:
+ case nfserr_file_open:
+ status = nfserr_acces;
+ break;
+ case nfserr_symlink_not_dir:
+ status = nfserr_notdir;
+ break;
+ case nfserr_symlink:
+ case nfserr_wrong_type:
+ status = nfserr_inval;
+ break;
+ }
+ return status;
+}
+
/*
* NULL call.
*/
@@ -57,6 +80,7 @@ nfsd3_proc_getattr(struct svc_rqst *rqstp)
resp->status = fh_getattr(&resp->fh, &resp->stat);
out:
+ resp->status = nfsd3_map_status(resp->status);
return rpc_success;
}
@@ -80,6 +104,7 @@ nfsd3_proc_setattr(struct svc_rqst *rqstp)
if (argp->check_guard)
guardtime = &argp->guardtime;
resp->status = nfsd_setattr(rqstp, &resp->fh, &attrs, guardtime);
+ resp->status = nfsd3_map_status(resp->status);
return rpc_success;
}
@@ -103,6 +128,7 @@ nfsd3_proc_lookup(struct svc_rqst *rqstp)
resp->status = nfsd_lookup(rqstp, &resp->dirfh,
argp->name, argp->len,
&resp->fh);
+ resp->status = nfsd3_map_status(resp->status);
return rpc_success;
}
@@ -122,6 +148,7 @@ nfsd3_proc_access(struct svc_rqst *rqstp)
fh_copy(&resp->fh, &argp->fh);
resp->access = argp->access;
resp->status = nfsd_access(rqstp, &resp->fh, &resp->access, NULL);
+ resp->status = nfsd3_map_status(resp->status);
return rpc_success;
}
@@ -142,6 +169,7 @@ nfsd3_proc_readlink(struct svc_rqst *rqstp)
resp->pages = rqstp->rq_next_page++;
resp->status = nfsd_readlink(rqstp, &resp->fh,
page_address(*resp->pages), &resp->len);
+ resp->status = nfsd3_map_status(resp->status);
return rpc_success;
}
@@ -179,6 +207,7 @@ nfsd3_proc_read(struct svc_rqst *rqstp)
fh_copy(&resp->fh, &argp->fh);
resp->status = nfsd_read(rqstp, &resp->fh, argp->offset,
&resp->count, &resp->eof);
+ resp->status = nfsd3_map_status(resp->status);
return rpc_success;
}
@@ -212,6 +241,7 @@ nfsd3_proc_write(struct svc_rqst *rqstp)
rqstp->rq_vec, nvecs, &cnt,
resp->committed, resp->verf);
resp->count = cnt;
+ resp->status = nfsd3_map_status(resp->status);
return rpc_success;
}
@@ -359,6 +389,7 @@ nfsd3_proc_create(struct svc_rqst *rqstp)
newfhp = fh_init(&resp->fh, NFS3_FHSIZE);
resp->status = nfsd3_create_file(rqstp, dirfhp, newfhp, argp);
+ resp->status = nfsd3_map_status(resp->status);
return rpc_success;
}
@@ -384,6 +415,7 @@ nfsd3_proc_mkdir(struct svc_rqst *rqstp)
fh_init(&resp->fh, NFS3_FHSIZE);
resp->status = nfsd_create(rqstp, &resp->dirfh, argp->name, argp->len,
&attrs, S_IFDIR, 0, &resp->fh);
+ resp->status = nfsd3_map_status(resp->status);
return rpc_success;
}
@@ -424,6 +456,7 @@ nfsd3_proc_symlink(struct svc_rqst *rqstp)
argp->flen, argp->tname, &attrs, &resp->fh);
kfree(argp->tname);
out:
+ resp->status = nfsd3_map_status(resp->status);
return rpc_success;
}
@@ -465,6 +498,7 @@ nfsd3_proc_mknod(struct svc_rqst *rqstp)
resp->status = nfsd_create(rqstp, &resp->dirfh, argp->name, argp->len,
&attrs, type, rdev, &resp->fh);
out:
+ resp->status = nfsd3_map_status(resp->status);
return rpc_success;
}
@@ -486,6 +520,7 @@ nfsd3_proc_remove(struct svc_rqst *rqstp)
fh_copy(&resp->fh, &argp->fh);
resp->status = nfsd_unlink(rqstp, &resp->fh, -S_IFDIR,
argp->name, argp->len);
+ resp->status = nfsd3_map_status(resp->status);
return rpc_success;
}
@@ -506,6 +541,7 @@ nfsd3_proc_rmdir(struct svc_rqst *rqstp)
fh_copy(&resp->fh, &argp->fh);
resp->status = nfsd_unlink(rqstp, &resp->fh, S_IFDIR,
argp->name, argp->len);
+ resp->status = nfsd3_map_status(resp->status);
return rpc_success;
}
@@ -528,6 +564,7 @@ nfsd3_proc_rename(struct svc_rqst *rqstp)
fh_copy(&resp->tfh, &argp->tfh);
resp->status = nfsd_rename(rqstp, &resp->ffh, argp->fname, argp->flen,
&resp->tfh, argp->tname, argp->tlen);
+ resp->status = nfsd3_map_status(resp->status);
return rpc_success;
}
@@ -548,6 +585,7 @@ nfsd3_proc_link(struct svc_rqst *rqstp)
fh_copy(&resp->tfh, &argp->tfh);
resp->status = nfsd_link(rqstp, &resp->tfh, argp->tname, argp->tlen,
&resp->fh);
+ resp->status = nfsd3_map_status(resp->status);
return rpc_success;
}
@@ -600,6 +638,7 @@ nfsd3_proc_readdir(struct svc_rqst *rqstp)
/* Recycle only pages that were part of the reply */
rqstp->rq_next_page = resp->xdr.page_ptr + 1;
+ resp->status = nfsd3_map_status(resp->status);
return rpc_success;
}
@@ -644,6 +683,7 @@ nfsd3_proc_readdirplus(struct svc_rqst *rqstp)
rqstp->rq_next_page = resp->xdr.page_ptr + 1;
out:
+ resp->status = nfsd3_map_status(resp->status);
return rpc_success;
}
@@ -661,6 +701,7 @@ nfsd3_proc_fsstat(struct svc_rqst *rqstp)
resp->status = nfsd_statfs(rqstp, &argp->fh, &resp->stats, 0);
fh_put(&argp->fh);
+ resp->status = nfsd3_map_status(resp->status);
return rpc_success;
}
@@ -704,6 +745,7 @@ nfsd3_proc_fsinfo(struct svc_rqst *rqstp)
}
fh_put(&argp->fh);
+ resp->status = nfsd3_map_status(resp->status);
return rpc_success;
}
@@ -746,6 +788,7 @@ nfsd3_proc_pathconf(struct svc_rqst *rqstp)
}
fh_put(&argp->fh);
+ resp->status = nfsd3_map_status(resp->status);
return rpc_success;
}
@@ -773,6 +816,7 @@ nfsd3_proc_commit(struct svc_rqst *rqstp)
argp->count, resp->verf);
nfsd_file_put(nf);
out:
+ resp->status = nfsd3_map_status(resp->status);
return rpc_success;
}
diff --git a/fs/nfsd/nfs4callback.c b/fs/nfsd/nfs4callback.c
index d756f443fc44..b5b3ab9d719a 100644
--- a/fs/nfsd/nfs4callback.c
+++ b/fs/nfsd/nfs4callback.c
@@ -1223,6 +1223,7 @@ static void nfsd4_cb_prepare(struct rpc_task *task, void *calldata)
* cb_seq_status is only set in decode_cb_sequence4res,
* and so will remain 1 if an rpc level failure occurs.
*/
+ trace_nfsd_cb_rpc_prepare(clp);
cb->cb_seq_status = 1;
cb->cb_status = 0;
if (minorversion && !nfsd41_cb_get_slot(cb, task))
@@ -1329,11 +1330,14 @@ static void nfsd4_cb_done(struct rpc_task *task, void *calldata)
struct nfsd4_callback *cb = calldata;
struct nfs4_client *clp = cb->cb_clp;
+ trace_nfsd_cb_rpc_done(clp);
+
if (!nfsd4_cb_sequence_done(task, cb))
return;
if (cb->cb_status) {
- WARN_ON_ONCE(task->tk_status);
+ WARN_ONCE(task->tk_status, "cb_status=%d tk_status=%d",
+ cb->cb_status, task->tk_status);
task->tk_status = cb->cb_status;
}
@@ -1359,6 +1363,8 @@ static void nfsd4_cb_release(void *calldata)
{
struct nfsd4_callback *cb = calldata;
+ trace_nfsd_cb_rpc_release(cb->cb_clp);
+
if (cb->cb_need_restart)
nfsd4_queue_cb(cb);
else
diff --git a/fs/nfsd/nfs4idmap.c b/fs/nfsd/nfs4idmap.c
index 7a806ac13e31..8cca1329f348 100644
--- a/fs/nfsd/nfs4idmap.c
+++ b/fs/nfsd/nfs4idmap.c
@@ -581,6 +581,7 @@ static __be32 idmap_id_to_name(struct xdr_stream *xdr,
.id = id,
.type = type,
};
+ __be32 status = nfs_ok;
__be32 *p;
int ret;
struct nfsd_net *nn = net_generic(SVC_NET(rqstp), nfsd_net_id);
@@ -593,12 +594,16 @@ static __be32 idmap_id_to_name(struct xdr_stream *xdr,
return nfserrno(ret);
ret = strlen(item->name);
WARN_ON_ONCE(ret > IDMAP_NAMESZ);
+
p = xdr_reserve_space(xdr, ret + 4);
- if (!p)
- return nfserr_resource;
- p = xdr_encode_opaque(p, item->name, ret);
+ if (unlikely(!p)) {
+ status = nfserr_resource;
+ goto out_put;
+ }
+ xdr_encode_opaque(p, item->name, ret);
+out_put:
cache_put(&item->h, nn->idtoname_cache);
- return 0;
+ return status;
}
static bool
diff --git a/fs/nfsd/nfs4layouts.c b/fs/nfsd/nfs4layouts.c
index 4f3072b5979a..fbfddd3c4c94 100644
--- a/fs/nfsd/nfs4layouts.c
+++ b/fs/nfsd/nfs4layouts.c
@@ -740,6 +740,7 @@ static const struct nfsd4_callback_ops nfsd4_cb_layout_ops = {
.prepare = nfsd4_cb_layout_prepare,
.done = nfsd4_cb_layout_done,
.release = nfsd4_cb_layout_release,
+ .opcode = OP_CB_LAYOUTRECALL,
};
static bool
diff --git a/fs/nfsd/nfs4proc.c b/fs/nfsd/nfs4proc.c
index 2e39cf2e502a..b5a6bf4f459f 100644
--- a/fs/nfsd/nfs4proc.c
+++ b/fs/nfsd/nfs4proc.c
@@ -158,7 +158,7 @@ do_open_permission(struct svc_rqst *rqstp, struct svc_fh *current_fh, struct nfs
return fh_verify(rqstp, current_fh, S_IFREG, accmode);
}
-static __be32 nfsd_check_obj_isreg(struct svc_fh *fh)
+static __be32 nfsd_check_obj_isreg(struct svc_fh *fh, u32 minor_version)
{
umode_t mode = d_inode(fh->fh_dentry)->i_mode;
@@ -166,14 +166,15 @@ static __be32 nfsd_check_obj_isreg(struct svc_fh *fh)
return nfs_ok;
if (S_ISDIR(mode))
return nfserr_isdir;
- /*
- * Using err_symlink as our catch-all case may look odd; but
- * there's no other obvious error for this case in 4.0, and we
- * happen to know that it will cause the linux v4 client to do
- * the right thing on attempts to open something other than a
- * regular file.
- */
- return nfserr_symlink;
+ if (S_ISLNK(mode))
+ return nfserr_symlink;
+
+ /* RFC 7530 - 16.16.6 */
+ if (minor_version == 0)
+ return nfserr_symlink;
+ else
+ return nfserr_wrong_type;
+
}
static void nfsd4_set_open_owner_reply_cache(struct nfsd4_compound_state *cstate, struct nfsd4_open *open, struct svc_fh *resfh)
@@ -466,7 +467,7 @@ do_open_lookup(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate, stru
}
if (status)
goto out;
- status = nfsd_check_obj_isreg(*resfh);
+ status = nfsd_check_obj_isreg(*resfh, cstate->minorversion);
if (status)
goto out;
@@ -751,15 +752,6 @@ nfsd4_access(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
&access->ac_supported);
}
-static void gen_boot_verifier(nfs4_verifier *verifier, struct net *net)
-{
- __be32 *verf = (__be32 *)verifier->data;
-
- BUILD_BUG_ON(2*sizeof(*verf) != sizeof(verifier->data));
-
- nfsd_copy_write_verifier(verf, net_generic(net, nfsd_net_id));
-}
-
static __be32
nfsd4_commit(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
union nfsd4_op_u *u)
@@ -1288,6 +1280,7 @@ static void nfs4_put_copy(struct nfsd4_copy *copy)
{
if (!refcount_dec_and_test(&copy->refcount))
return;
+ atomic_dec(&copy->cp_nn->pending_async_copies);
kfree(copy->cp_src);
kfree(copy);
}
@@ -1621,7 +1614,8 @@ static int nfsd4_cb_offload_done(struct nfsd4_callback *cb,
static const struct nfsd4_callback_ops nfsd4_cb_offload_ops = {
.release = nfsd4_cb_offload_release,
- .done = nfsd4_cb_offload_done
+ .done = nfsd4_cb_offload_done,
+ .opcode = OP_CB_OFFLOAD,
};
static void nfsd4_init_copy_res(struct nfsd4_copy *copy, bool sync)
@@ -1630,7 +1624,6 @@ static void nfsd4_init_copy_res(struct nfsd4_copy *copy, bool sync)
test_bit(NFSD4_COPY_F_COMMITTED, &copy->cp_flags) ?
NFS_FILE_SYNC : NFS_UNSTABLE;
nfsd4_copy_set_sync(copy, sync);
- gen_boot_verifier(&copy->cp_res.wr_verifier, copy->cp_clp->net);
}
static ssize_t _nfsd_copy_file_range(struct nfsd4_copy *copy,
@@ -1767,7 +1760,7 @@ static int nfsd4_do_async_copy(void *data)
{
struct nfsd4_copy *copy = (struct nfsd4_copy *)data;
- trace_nfsd_copy_do_async(copy);
+ trace_nfsd_copy_async(copy);
if (nfsd4_ssc_is_inter(copy)) {
struct file *filp;
@@ -1794,6 +1787,7 @@ static int nfsd4_do_async_copy(void *data)
do_callback:
set_bit(NFSD4_COPY_F_COMPLETED, &copy->cp_flags);
+ trace_nfsd_copy_async_done(copy);
nfsd4_send_cb_offload(copy);
cleanup_async_copy(copy);
return 0;
@@ -1803,9 +1797,11 @@ static __be32
nfsd4_copy(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
union nfsd4_op_u *u)
{
+ struct nfsd_net *nn = net_generic(SVC_NET(rqstp), nfsd_net_id);
+ struct nfsd4_copy *async_copy = NULL;
struct nfsd4_copy *copy = &u->copy;
+ struct nfsd42_write_res *result;
__be32 status;
- struct nfsd4_copy *async_copy = NULL;
/*
* Currently, async COPY is not reliable. Force all COPY
@@ -1814,6 +1810,9 @@ nfsd4_copy(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
*/
nfsd4_copy_set_sync(copy, true);
+ result = &copy->cp_res;
+ nfsd_copy_write_verifier((__be32 *)&result->wr_verifier.data, nn);
+
copy->cp_clp = cstate->clp;
if (nfsd4_ssc_is_inter(copy)) {
trace_nfsd_copy_inter(copy);
@@ -1838,12 +1837,16 @@ nfsd4_copy(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
memcpy(&copy->fh, &cstate->current_fh.fh_handle,
sizeof(struct knfsd_fh));
if (nfsd4_copy_is_async(copy)) {
- struct nfsd_net *nn = net_generic(SVC_NET(rqstp), nfsd_net_id);
-
- status = nfserrno(-ENOMEM);
async_copy = kzalloc(sizeof(struct nfsd4_copy), GFP_KERNEL);
if (!async_copy)
goto out_err;
+ async_copy->cp_nn = nn;
+ /* Arbitrary cap on number of pending async copy operations */
+ if (atomic_inc_return(&nn->pending_async_copies) >
+ (int)rqstp->rq_pool->sp_nrthreads) {
+ atomic_dec(&nn->pending_async_copies);
+ goto out_err;
+ }
INIT_LIST_HEAD(&async_copy->copies);
refcount_set(&async_copy->refcount, 1);
async_copy->cp_src = kmalloc(sizeof(*async_copy->cp_src), GFP_KERNEL);
@@ -1851,8 +1854,8 @@ nfsd4_copy(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
goto out_err;
if (!nfs4_init_copy_state(nn, copy))
goto out_err;
- memcpy(&copy->cp_res.cb_stateid, &copy->cp_stateid.cs_stid,
- sizeof(copy->cp_res.cb_stateid));
+ memcpy(&result->cb_stateid, &copy->cp_stateid.cs_stid,
+ sizeof(result->cb_stateid));
dup_copy_fields(copy, async_copy);
async_copy->copy_task = kthread_create(nfsd4_do_async_copy,
async_copy, "%s", "copy thread");
@@ -1883,7 +1886,7 @@ out_err:
}
if (async_copy)
cleanup_async_copy(async_copy);
- status = nfserrno(-ENOMEM);
+ status = nfserr_jukebox;
goto out;
}
@@ -1942,7 +1945,7 @@ nfsd4_copy_notify(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
struct nfsd4_copy_notify *cn = &u->copy_notify;
__be32 status;
struct nfsd_net *nn = net_generic(SVC_NET(rqstp), nfsd_net_id);
- struct nfs4_stid *stid;
+ struct nfs4_stid *stid = NULL;
struct nfs4_cpntf_state *cps;
struct nfs4_client *clp = cstate->clp;
@@ -1951,6 +1954,8 @@ nfsd4_copy_notify(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
&stid);
if (status)
return status;
+ if (!stid)
+ return nfserr_bad_stateid;
cn->cpn_lease_time.tv_sec = nn->nfsd4_lease;
cn->cpn_lease_time.tv_nsec = 0;
@@ -2231,7 +2236,9 @@ nfsd4_getdeviceinfo(struct svc_rqst *rqstp,
return nfserr_noent;
}
- exp = rqst_exp_find(rqstp, map->fsid_type, map->fsid);
+ exp = rqst_exp_find(&rqstp->rq_chandle, SVC_NET(rqstp),
+ rqstp->rq_client, rqstp->rq_gssclient,
+ map->fsid_type, map->fsid);
if (IS_ERR(exp)) {
dprintk("%s: could not find device id\n", __func__);
return nfserr_noent;
diff --git a/fs/nfsd/nfs4recover.c b/fs/nfsd/nfs4recover.c
index 67d8673a9391..b7d61eb8afe9 100644
--- a/fs/nfsd/nfs4recover.c
+++ b/fs/nfsd/nfs4recover.c
@@ -809,6 +809,10 @@ __cld_pipe_inprogress_downcall(const struct cld_msg_v2 __user *cmsg,
ci = &cmsg->cm_u.cm_clntinfo;
if (get_user(namelen, &ci->cc_name.cn_len))
return -EFAULT;
+ if (namelen == 0 || namelen > NFS4_OPAQUE_LIMIT) {
+ dprintk("%s: invalid namelen (%u)", __func__, namelen);
+ return -EINVAL;
+ }
name.data = memdup_user(&ci->cc_name.cn_id, namelen);
if (IS_ERR(name.data))
return PTR_ERR(name.data);
@@ -831,6 +835,10 @@ __cld_pipe_inprogress_downcall(const struct cld_msg_v2 __user *cmsg,
cnm = &cmsg->cm_u.cm_name;
if (get_user(namelen, &cnm->cn_len))
return -EFAULT;
+ if (namelen == 0 || namelen > NFS4_OPAQUE_LIMIT) {
+ dprintk("%s: invalid namelen (%u)", __func__, namelen);
+ return -EINVAL;
+ }
name.data = memdup_user(&cnm->cn_id, namelen);
if (IS_ERR(name.data))
return PTR_ERR(name.data);
@@ -1895,10 +1903,7 @@ nfsd4_cltrack_upcall_lock(struct nfs4_client *clp)
static void
nfsd4_cltrack_upcall_unlock(struct nfs4_client *clp)
{
- smp_mb__before_atomic();
- clear_bit(NFSD4_CLIENT_UPCALL_LOCK, &clp->cl_flags);
- smp_mb__after_atomic();
- wake_up_bit(&clp->cl_flags, NFSD4_CLIENT_UPCALL_LOCK);
+ clear_and_wake_up_bit(NFSD4_CLIENT_UPCALL_LOCK, &clp->cl_flags);
}
static void
diff --git a/fs/nfsd/nfs4state.c b/fs/nfsd/nfs4state.c
index a366fb1c1b9b..ac1859c7cc9d 100644
--- a/fs/nfsd/nfs4state.c
+++ b/fs/nfsd/nfs4state.c
@@ -400,6 +400,7 @@ static const struct nfsd4_callback_ops nfsd4_cb_notify_lock_ops = {
.prepare = nfsd4_cb_notify_lock_prepare,
.done = nfsd4_cb_notify_lock_done,
.release = nfsd4_cb_notify_lock_release,
+ .opcode = OP_CB_NOTIFY_LOCK,
};
/*
@@ -1077,7 +1078,8 @@ static void nfs4_free_deleg(struct nfs4_stid *stid)
* When a delegation is recalled, the filehandle is stored in the "new"
* filter.
* Every 30 seconds we swap the filters and clear the "new" one,
- * unless both are empty of course.
+ * unless both are empty of course. This results in delegations for a
+ * given filehandle being blocked for between 30 and 60 seconds.
*
* Each filter is 256 bits. We hash the filehandle to 32bit and use the
* low 3 bytes as hash-table indices.
@@ -1106,9 +1108,9 @@ static int delegation_blocked(struct knfsd_fh *fh)
if (ktime_get_seconds() - bd->swap_time > 30) {
bd->entries -= bd->old_entries;
bd->old_entries = bd->entries;
+ bd->new = 1-bd->new;
memset(bd->set[bd->new], 0,
sizeof(bd->set[0]));
- bd->new = 1-bd->new;
bd->swap_time = ktime_get_seconds();
}
spin_unlock(&blocked_delegations_lock);
@@ -1663,9 +1665,7 @@ static void release_openowner(struct nfs4_openowner *oo)
{
struct nfs4_ol_stateid *stp;
struct nfs4_client *clp = oo->oo_owner.so_client;
- struct list_head reaplist;
-
- INIT_LIST_HEAD(&reaplist);
+ LIST_HEAD(reaplist);
spin_lock(&clp->cl_lock);
unhash_openowner_locked(oo);
@@ -2369,9 +2369,8 @@ __destroy_client(struct nfs4_client *clp)
int i;
struct nfs4_openowner *oo;
struct nfs4_delegation *dp;
- struct list_head reaplist;
+ LIST_HEAD(reaplist);
- INIT_LIST_HEAD(&reaplist);
spin_lock(&state_lock);
while (!list_empty(&clp->cl_delegations)) {
dp = list_entry(clp->cl_delegations.next, struct nfs4_delegation, dl_perclnt);
@@ -2692,7 +2691,7 @@ static int client_info_show(struct seq_file *m, void *v)
clp->cl_nii_time.tv_sec, clp->cl_nii_time.tv_nsec);
}
seq_printf(m, "callback state: %s\n", cb_state2str(clp->cl_cb_state));
- seq_printf(m, "callback address: %pISpc\n", &clp->cl_cb_conn.cb_addr);
+ seq_printf(m, "callback address: \"%pISpc\"\n", &clp->cl_cb_conn.cb_addr);
seq_printf(m, "admin-revoked states: %d\n",
atomic_read(&clp->cl_admin_revoked));
drop_client(clp);
@@ -3059,7 +3058,10 @@ nfsd4_cb_getattr_done(struct nfsd4_callback *cb, struct rpc_task *task)
{
struct nfs4_cb_fattr *ncf =
container_of(cb, struct nfs4_cb_fattr, ncf_getattr);
+ struct nfs4_delegation *dp =
+ container_of(ncf, struct nfs4_delegation, dl_cb_fattr);
+ trace_nfsd_cb_getattr_done(&dp->dl_stid.sc_stateid, task);
ncf->ncf_cb_status = task->tk_status;
switch (task->tk_status) {
case -NFS4ERR_DELAY:
@@ -3078,19 +3080,20 @@ nfsd4_cb_getattr_release(struct nfsd4_callback *cb)
struct nfs4_delegation *dp =
container_of(ncf, struct nfs4_delegation, dl_cb_fattr);
- clear_bit(CB_GETATTR_BUSY, &ncf->ncf_cb_flags);
- wake_up_bit(&ncf->ncf_cb_flags, CB_GETATTR_BUSY);
+ clear_and_wake_up_bit(CB_GETATTR_BUSY, &ncf->ncf_cb_flags);
nfs4_put_stid(&dp->dl_stid);
}
static const struct nfsd4_callback_ops nfsd4_cb_recall_any_ops = {
.done = nfsd4_cb_recall_any_done,
.release = nfsd4_cb_recall_any_release,
+ .opcode = OP_CB_RECALL_ANY,
};
static const struct nfsd4_callback_ops nfsd4_cb_getattr_ops = {
.done = nfsd4_cb_getattr_done,
.release = nfsd4_cb_getattr_release,
+ .opcode = OP_CB_GETATTR,
};
static void nfs4_cb_getattr(struct nfs4_cb_fattr *ncf)
@@ -4704,6 +4707,7 @@ void nfsd4_cstate_clear_replay(struct nfsd4_compound_state *cstate)
if (so != NULL) {
cstate->replay_owner = NULL;
atomic_set(&so->so_replay.rp_locked, RP_UNLOCKED);
+ smp_mb__after_atomic();
wake_up_var(&so->so_replay.rp_locked);
nfs4_put_stateowner(so);
}
@@ -5004,6 +5008,7 @@ move_to_close_lru(struct nfs4_ol_stateid *s, struct net *net)
* so tell them to stop waiting.
*/
atomic_set(&oo->oo_owner.so_replay.rp_locked, RP_UNHASHED);
+ smp_mb__after_atomic();
wake_up_var(&oo->oo_owner.so_replay.rp_locked);
wait_event(close_wq, refcount_read(&s->st_stid.sc_count) == 2);
@@ -5218,6 +5223,7 @@ static const struct nfsd4_callback_ops nfsd4_cb_recall_ops = {
.prepare = nfsd4_cb_recall_prepare,
.done = nfsd4_cb_recall_done,
.release = nfsd4_cb_recall_release,
+ .opcode = OP_CB_RECALL,
};
static void nfsd_break_one_deleg(struct nfs4_delegation *dp)
@@ -5277,11 +5283,8 @@ static bool nfsd_breaker_owns_lease(struct file_lease *fl)
struct svc_rqst *rqst;
struct nfs4_client *clp;
- if (!i_am_nfsd())
- return false;
- rqst = kthread_data(current);
- /* Note rq_prog == NFS_ACL_PROGRAM is also possible: */
- if (rqst->rq_prog != NFS_PROGRAM || rqst->rq_vers < 4)
+ rqst = nfsd_current_rqst();
+ if (!nfsd_v4client(rqst))
return false;
clp = *(rqst->rq_lease_breaker);
return dl->dl_stid.sc_client == clp;
@@ -5859,7 +5862,7 @@ nfs4_set_delegation(struct nfsd4_open *open, struct nfs4_ol_stateid *stp,
/*
* Now that the deleg is set, check again to ensure that nothing
- * raced in and changed the mode while we weren't lookng.
+ * raced in and changed the mode while we weren't looking.
*/
status = nfsd4_verify_setuid_write(open, fp->fi_deleg_file);
if (status)
@@ -5912,6 +5915,28 @@ static void nfsd4_open_deleg_none_ext(struct nfsd4_open *open, int status)
}
}
+static bool
+nfs4_delegation_stat(struct nfs4_delegation *dp, struct svc_fh *currentfh,
+ struct kstat *stat)
+{
+ struct nfsd_file *nf = find_rw_file(dp->dl_stid.sc_file);
+ struct path path;
+ int rc;
+
+ if (!nf)
+ return false;
+
+ path.mnt = currentfh->fh_export->ex_path.mnt;
+ path.dentry = file_dentry(nf->nf_file);
+
+ rc = vfs_getattr(&path, stat,
+ (STATX_SIZE | STATX_CTIME | STATX_CHANGE_COOKIE),
+ AT_STATX_SYNC_AS_STAT);
+
+ nfsd_file_put(nf);
+ return rc == 0;
+}
+
/*
* The Linux NFS server does not offer write delegations to NFSv4.0
* clients in order to avoid conflicts between write delegations and
@@ -5947,7 +5972,6 @@ nfs4_open_delegation(struct nfsd4_open *open, struct nfs4_ol_stateid *stp,
int cb_up;
int status = 0;
struct kstat stat;
- struct path path;
cb_up = nfsd4_cb_channel_good(oo->oo_owner.so_client);
open->op_recall = false;
@@ -5983,20 +6007,16 @@ nfs4_open_delegation(struct nfsd4_open *open, struct nfs4_ol_stateid *stp,
memcpy(&open->op_delegate_stateid, &dp->dl_stid.sc_stateid, sizeof(dp->dl_stid.sc_stateid));
if (open->op_share_access & NFS4_SHARE_ACCESS_WRITE) {
- open->op_delegate_type = NFS4_OPEN_DELEGATE_WRITE;
- trace_nfsd_deleg_write(&dp->dl_stid.sc_stateid);
- path.mnt = currentfh->fh_export->ex_path.mnt;
- path.dentry = currentfh->fh_dentry;
- if (vfs_getattr(&path, &stat,
- (STATX_SIZE | STATX_CTIME | STATX_CHANGE_COOKIE),
- AT_STATX_SYNC_AS_STAT)) {
+ if (!nfs4_delegation_stat(dp, currentfh, &stat)) {
nfs4_put_stid(&dp->dl_stid);
destroy_delegation(dp);
goto out_no_deleg;
}
+ open->op_delegate_type = NFS4_OPEN_DELEGATE_WRITE;
dp->dl_cb_fattr.ncf_cur_fsize = stat.size;
dp->dl_cb_fattr.ncf_initial_cinfo =
nfsd4_change_attribute(&stat, d_inode(currentfh->fh_dentry));
+ trace_nfsd_deleg_write(&dp->dl_stid.sc_stateid);
} else {
open->op_delegate_type = NFS4_OPEN_DELEGATE_READ;
trace_nfsd_deleg_read(&dp->dl_stid.sc_stateid);
@@ -6271,7 +6291,6 @@ void nfsd4_ssc_init_umount_work(struct nfsd_net *nn)
INIT_LIST_HEAD(&nn->nfsd_ssc_mount_list);
init_waitqueue_head(&nn->nfsd_ssc_waitq);
}
-EXPORT_SYMBOL_GPL(nfsd4_ssc_init_umount_work);
/*
* This is called when nfsd is being shutdown, after all inter_ssc
@@ -6619,9 +6638,8 @@ deleg_reaper(struct nfsd_net *nn)
{
struct list_head *pos, *next;
struct nfs4_client *clp;
- struct list_head cblist;
+ LIST_HEAD(cblist);
- INIT_LIST_HEAD(&cblist);
spin_lock(&nn->client_lock);
list_for_each_safe(pos, next, &nn->client_lru) {
clp = list_entry(pos, struct nfs4_client, cl_lru);
@@ -6647,7 +6665,6 @@ deleg_reaper(struct nfsd_net *nn)
cl_ra_cblist);
list_del_init(&clp->cl_ra_cblist);
clp->cl_ra->ra_keep = 0;
- clp->cl_ra->ra_bmval[0] = BIT(RCA4_TYPE_MASK_RDATA_DLG);
clp->cl_ra->ra_bmval[0] = BIT(RCA4_TYPE_MASK_RDATA_DLG) |
BIT(RCA4_TYPE_MASK_WDATA_DLG);
trace_nfsd_cb_recall_any(clp->cl_ra);
@@ -6892,7 +6909,8 @@ nfs4_check_file(struct svc_rqst *rqstp, struct svc_fh *fhp, struct nfs4_stid *s,
nf = nfs4_find_file(s, flags);
if (nf) {
- status = nfsd_permission(rqstp, fhp->fh_export, fhp->fh_dentry,
+ status = nfsd_permission(&rqstp->rq_cred,
+ fhp->fh_export, fhp->fh_dentry,
acc | NFSD_MAY_OWNER_OVERRIDE);
if (status) {
nfsd_file_put(nf);
@@ -7023,11 +7041,7 @@ nfs4_preprocess_stateid_op(struct svc_rqst *rqstp,
*nfp = NULL;
if (ZERO_STATEID(stateid) || ONE_STATEID(stateid)) {
- if (cstid)
- status = nfserr_bad_stateid;
- else
- status = check_special_stateids(net, fhp, stateid,
- flags);
+ status = check_special_stateids(net, fhp, stateid, flags);
goto done;
}
@@ -7481,8 +7495,9 @@ nfsd4_delegreturn(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
goto put_stateid;
trace_nfsd_deleg_return(stateid);
- wake_up_var(d_inode(cstate->current_fh.fh_dentry));
destroy_delegation(dp);
+ smp_mb__after_atomic();
+ wake_up_var(d_inode(cstate->current_fh.fh_dentry));
put_stateid:
nfs4_put_stid(&dp->dl_stid);
out:
@@ -8338,7 +8353,7 @@ out:
* @cstate: NFSv4 COMPOUND state
* @u: RELEASE_LOCKOWNER arguments
*
- * Check if theree are any locks still held and if not - free the lockowner
+ * Check if there are any locks still held and if not, free the lockowner
* and any lock state that is owned.
*
* Return values:
@@ -8557,6 +8572,7 @@ static int nfs4_state_create_net(struct net *net)
spin_lock_init(&nn->client_lock);
spin_lock_init(&nn->s2s_cp_lock);
idr_init(&nn->s2s_cp_stateids);
+ atomic_set(&nn->pending_async_copies, 0);
spin_lock_init(&nn->blocked_locks_lock);
INIT_LIST_HEAD(&nn->blocked_locks_lru);
@@ -8836,6 +8852,7 @@ nfsd4_deleg_getattr_conflict(struct svc_rqst *rqstp, struct dentry *dentry,
__be32 status;
struct nfsd_net *nn = net_generic(SVC_NET(rqstp), nfsd_net_id);
struct file_lock_context *ctx;
+ struct nfs4_delegation *dp = NULL;
struct file_lease *fl;
struct iattr attrs;
struct nfs4_cb_fattr *ncf;
@@ -8845,84 +8862,76 @@ nfsd4_deleg_getattr_conflict(struct svc_rqst *rqstp, struct dentry *dentry,
ctx = locks_inode_context(inode);
if (!ctx)
return 0;
+
+#define NON_NFSD_LEASE ((void *)1)
+
spin_lock(&ctx->flc_lock);
for_each_file_lock(fl, &ctx->flc_lease) {
- unsigned char type = fl->c.flc_type;
-
if (fl->c.flc_flags == FL_LAYOUT)
continue;
- if (fl->fl_lmops != &nfsd_lease_mng_ops) {
- /*
- * non-nfs lease, if it's a lease with F_RDLCK then
- * we are done; there isn't any write delegation
- * on this inode
- */
- if (type == F_RDLCK)
- break;
-
- nfsd_stats_wdeleg_getattr_inc(nn);
- spin_unlock(&ctx->flc_lock);
-
- status = nfserrno(nfsd_open_break_lease(inode, NFSD_MAY_READ));
+ if (fl->c.flc_type == F_WRLCK) {
+ if (fl->fl_lmops == &nfsd_lease_mng_ops)
+ dp = fl->c.flc_owner;
+ else
+ dp = NON_NFSD_LEASE;
+ }
+ break;
+ }
+ if (dp == NULL || dp == NON_NFSD_LEASE ||
+ dp->dl_recall.cb_clp == *(rqstp->rq_lease_breaker)) {
+ spin_unlock(&ctx->flc_lock);
+ if (dp == NON_NFSD_LEASE) {
+ status = nfserrno(nfsd_open_break_lease(inode,
+ NFSD_MAY_READ));
if (status != nfserr_jukebox ||
!nfsd_wait_for_delegreturn(rqstp, inode))
return status;
- return 0;
}
- if (type == F_WRLCK) {
- struct nfs4_delegation *dp = fl->c.flc_owner;
+ return 0;
+ }
- if (dp->dl_recall.cb_clp == *(rqstp->rq_lease_breaker)) {
- spin_unlock(&ctx->flc_lock);
- return 0;
- }
- nfsd_stats_wdeleg_getattr_inc(nn);
- dp = fl->c.flc_owner;
- refcount_inc(&dp->dl_stid.sc_count);
- ncf = &dp->dl_cb_fattr;
- nfs4_cb_getattr(&dp->dl_cb_fattr);
- spin_unlock(&ctx->flc_lock);
- wait_on_bit_timeout(&ncf->ncf_cb_flags, CB_GETATTR_BUSY,
- TASK_INTERRUPTIBLE, NFSD_CB_GETATTR_TIMEOUT);
- if (ncf->ncf_cb_status) {
- /* Recall delegation only if client didn't respond */
- status = nfserrno(nfsd_open_break_lease(inode, NFSD_MAY_READ));
- if (status != nfserr_jukebox ||
- !nfsd_wait_for_delegreturn(rqstp, inode)) {
- nfs4_put_stid(&dp->dl_stid);
- return status;
- }
- }
- if (!ncf->ncf_file_modified &&
- (ncf->ncf_initial_cinfo != ncf->ncf_cb_change ||
- ncf->ncf_cur_fsize != ncf->ncf_cb_fsize))
- ncf->ncf_file_modified = true;
- if (ncf->ncf_file_modified) {
- int err;
-
- /*
- * Per section 10.4.3 of RFC 8881, the server would
- * not update the file's metadata with the client's
- * modified size
- */
- attrs.ia_mtime = attrs.ia_ctime = current_time(inode);
- attrs.ia_valid = ATTR_MTIME | ATTR_CTIME | ATTR_DELEG;
- inode_lock(inode);
- err = notify_change(&nop_mnt_idmap, dentry, &attrs, NULL);
- inode_unlock(inode);
- if (err) {
- nfs4_put_stid(&dp->dl_stid);
- return nfserrno(err);
- }
- ncf->ncf_cur_fsize = ncf->ncf_cb_fsize;
- *size = ncf->ncf_cur_fsize;
- *modified = true;
- }
- nfs4_put_stid(&dp->dl_stid);
- return 0;
+ nfsd_stats_wdeleg_getattr_inc(nn);
+ refcount_inc(&dp->dl_stid.sc_count);
+ ncf = &dp->dl_cb_fattr;
+ nfs4_cb_getattr(&dp->dl_cb_fattr);
+ spin_unlock(&ctx->flc_lock);
+
+ wait_on_bit_timeout(&ncf->ncf_cb_flags, CB_GETATTR_BUSY,
+ TASK_INTERRUPTIBLE, NFSD_CB_GETATTR_TIMEOUT);
+ if (ncf->ncf_cb_status) {
+ /* Recall delegation only if client didn't respond */
+ status = nfserrno(nfsd_open_break_lease(inode, NFSD_MAY_READ));
+ if (status != nfserr_jukebox ||
+ !nfsd_wait_for_delegreturn(rqstp, inode))
+ goto out_status;
+ }
+ if (!ncf->ncf_file_modified &&
+ (ncf->ncf_initial_cinfo != ncf->ncf_cb_change ||
+ ncf->ncf_cur_fsize != ncf->ncf_cb_fsize))
+ ncf->ncf_file_modified = true;
+ if (ncf->ncf_file_modified) {
+ int err;
+
+ /*
+ * Per section 10.4.3 of RFC 8881, the server would
+ * not update the file's metadata with the client's
+ * modified size
+ */
+ attrs.ia_mtime = attrs.ia_ctime = current_time(inode);
+ attrs.ia_valid = ATTR_MTIME | ATTR_CTIME | ATTR_DELEG;
+ inode_lock(inode);
+ err = notify_change(&nop_mnt_idmap, dentry, &attrs, NULL);
+ inode_unlock(inode);
+ if (err) {
+ status = nfserrno(err);
+ goto out_status;
}
- break;
+ ncf->ncf_cur_fsize = ncf->ncf_cb_fsize;
+ *size = ncf->ncf_cur_fsize;
+ *modified = true;
}
- spin_unlock(&ctx->flc_lock);
- return 0;
+ status = 0;
+out_status:
+ nfs4_put_stid(&dp->dl_stid);
+ return status;
}
diff --git a/fs/nfsd/nfs4xdr.c b/fs/nfsd/nfs4xdr.c
index 97f583777972..f118921250c3 100644
--- a/fs/nfsd/nfs4xdr.c
+++ b/fs/nfsd/nfs4xdr.c
@@ -1246,14 +1246,6 @@ nfsd4_decode_putfh(struct nfsd4_compoundargs *argp, union nfsd4_op_u *u)
}
static __be32
-nfsd4_decode_putpubfh(struct nfsd4_compoundargs *argp, union nfsd4_op_u *p)
-{
- if (argp->minorversion == 0)
- return nfs_ok;
- return nfserr_notsupp;
-}
-
-static __be32
nfsd4_decode_read(struct nfsd4_compoundargs *argp, union nfsd4_op_u *u)
{
struct nfsd4_read *read = &u->read;
@@ -2374,7 +2366,7 @@ static const nfsd4_dec nfsd4_dec_ops[] = {
[OP_OPEN_CONFIRM] = nfsd4_decode_open_confirm,
[OP_OPEN_DOWNGRADE] = nfsd4_decode_open_downgrade,
[OP_PUTFH] = nfsd4_decode_putfh,
- [OP_PUTPUBFH] = nfsd4_decode_putpubfh,
+ [OP_PUTPUBFH] = nfsd4_decode_noop,
[OP_PUTROOTFH] = nfsd4_decode_noop,
[OP_READ] = nfsd4_decode_read,
[OP_READDIR] = nfsd4_decode_readdir,
@@ -5731,6 +5723,23 @@ __be32 nfsd4_check_resp_size(struct nfsd4_compoundres *resp, u32 respsize)
return nfserr_rep_too_big;
}
+static __be32 nfsd4_map_status(__be32 status, u32 minor)
+{
+ switch (status) {
+ case nfs_ok:
+ break;
+ case nfserr_wrong_type:
+ /* RFC 8881 - 15.1.2.9 */
+ if (minor == 0)
+ status = nfserr_inval;
+ break;
+ case nfserr_symlink_not_dir:
+ status = nfserr_symlink;
+ break;
+ }
+ return status;
+}
+
void
nfsd4_encode_operation(struct nfsd4_compoundres *resp, struct nfsd4_op *op)
{
@@ -5798,6 +5807,8 @@ nfsd4_encode_operation(struct nfsd4_compoundres *resp, struct nfsd4_op *op)
so->so_replay.rp_buf, len);
}
status:
+ op->status = nfsd4_map_status(op->status,
+ resp->cstate.minorversion);
*p = op->status;
release:
if (opdesc && opdesc->op_release)
diff --git a/fs/nfsd/nfsctl.c b/fs/nfsd/nfsctl.c
index 34eb2c2cbcde..3adbc05ebaac 100644
--- a/fs/nfsd/nfsctl.c
+++ b/fs/nfsd/nfsctl.c
@@ -18,6 +18,7 @@
#include <linux/sunrpc/svc.h>
#include <linux/module.h>
#include <linux/fsnotify.h>
+#include <linux/nfslocalio.h>
#include "idmap.h"
#include "nfsd.h"
@@ -174,6 +175,13 @@ static int export_features_show(struct seq_file *m, void *v)
DEFINE_SHOW_ATTRIBUTE(export_features);
+static int nfsd_pool_stats_open(struct inode *inode, struct file *file)
+{
+ struct nfsd_net *nn = net_generic(inode->i_sb->s_fs_info, nfsd_net_id);
+
+ return svc_pool_stats_open(&nn->nfsd_info, file);
+}
+
static const struct file_operations pool_stats_operations = {
.open = nfsd_pool_stats_open,
.read = seq_read,
@@ -1762,7 +1770,7 @@ int nfsd_nl_threads_get_doit(struct sk_buff *skb, struct genl_info *info)
struct svc_pool *sp = &nn->nfsd_serv->sv_pools[i];
err = nla_put_u32(skb, NFSD_A_SERVER_THREADS,
- atomic_read(&sp->sp_nrthreads));
+ sp->sp_nrthreads);
if (err)
goto err_unlock;
}
@@ -2224,8 +2232,9 @@ err_free_msg:
*/
static __net_init int nfsd_net_init(struct net *net)
{
- int retval;
struct nfsd_net *nn = net_generic(net, nfsd_net_id);
+ int retval;
+ int i;
retval = nfsd_export_init(net);
if (retval)
@@ -2238,16 +2247,20 @@ static __net_init int nfsd_net_init(struct net *net)
if (retval)
goto out_repcache_error;
memset(&nn->nfsd_svcstats, 0, sizeof(nn->nfsd_svcstats));
- nn->nfsd_svcstats.program = &nfsd_program;
- nn->nfsd_versions = NULL;
- nn->nfsd4_minorversions = NULL;
+ nn->nfsd_svcstats.program = &nfsd_programs[0];
+ for (i = 0; i < sizeof(nn->nfsd_versions); i++)
+ nn->nfsd_versions[i] = nfsd_support_version(i);
+ for (i = 0; i < sizeof(nn->nfsd4_minorversions); i++)
+ nn->nfsd4_minorversions[i] = nfsd_support_version(4);
nn->nfsd_info.mutex = &nfsd_mutex;
nn->nfsd_serv = NULL;
nfsd4_init_leases_net(nn);
get_random_bytes(&nn->siphash_key, sizeof(nn->siphash_key));
seqlock_init(&nn->writeverf_lock);
nfsd_proc_stat_init(net);
-
+#if IS_ENABLED(CONFIG_NFS_LOCALIO)
+ INIT_LIST_HEAD(&nn->local_clients);
+#endif
return 0;
out_repcache_error:
@@ -2258,6 +2271,22 @@ out_export_error:
return retval;
}
+#if IS_ENABLED(CONFIG_NFS_LOCALIO)
+/**
+ * nfsd_net_pre_exit - Disconnect localio clients from net namespace
+ * @net: a network namespace that is about to be destroyed
+ *
+ * This invalidated ->net pointers held by localio clients
+ * while they can still safely access nn->counter.
+ */
+static __net_exit void nfsd_net_pre_exit(struct net *net)
+{
+ struct nfsd_net *nn = net_generic(net, nfsd_net_id);
+
+ nfs_uuid_invalidate_clients(&nn->local_clients);
+}
+#endif
+
/**
* nfsd_net_exit - Release the nfsd_net portion of a net namespace
* @net: a network namespace that is about to be destroyed
@@ -2271,11 +2300,13 @@ static __net_exit void nfsd_net_exit(struct net *net)
percpu_counter_destroy_many(nn->counter, NFSD_STATS_COUNTERS_NUM);
nfsd_idmap_shutdown(net);
nfsd_export_shutdown(net);
- nfsd_netns_free_versions(nn);
}
static struct pernet_operations nfsd_net_ops = {
.init = nfsd_net_init,
+#if IS_ENABLED(CONFIG_NFS_LOCALIO)
+ .pre_exit = nfsd_net_pre_exit,
+#endif
.exit = nfsd_net_exit,
.id = &nfsd_net_id,
.size = sizeof(struct nfsd_net),
@@ -2313,6 +2344,7 @@ static int __init init_nfsd(void)
retval = genl_register_family(&nfsd_nl_family);
if (retval)
goto out_free_all;
+ nfsd_localio_ops_init();
return 0;
out_free_all:
diff --git a/fs/nfsd/nfsd.h b/fs/nfsd/nfsd.h
index cec8697b1cd6..4b56ba1e8e48 100644
--- a/fs/nfsd/nfsd.h
+++ b/fs/nfsd/nfsd.h
@@ -23,9 +23,7 @@
#include <uapi/linux/nfsd/debug.h>
-#include "netns.h"
#include "export.h"
-#include "stats.h"
#undef ifdebug
#ifdef CONFIG_SUNRPC_DEBUG
@@ -37,7 +35,14 @@
/*
* nfsd version
*/
+#define NFSD_MINVERS 2
+#define NFSD_MAXVERS 4
#define NFSD_SUPPORTED_MINOR_VERSION 2
+bool nfsd_support_version(int vers);
+
+#include "netns.h"
+#include "stats.h"
+
/*
* Maximum blocksizes supported by daemon under various circumstances.
*/
@@ -80,7 +85,7 @@ struct nfsd_genl_rqstp {
u32 rq_opnum[NFSD_MAX_OPS_PER_COMPOUND];
};
-extern struct svc_program nfsd_program;
+extern struct svc_program nfsd_programs[];
extern const struct svc_version nfsd_version2, nfsd_version3, nfsd_version4;
extern struct mutex nfsd_mutex;
extern spinlock_t nfsd_drc_lock;
@@ -111,11 +116,9 @@ int nfsd_nrthreads(struct net *);
int nfsd_nrpools(struct net *);
int nfsd_get_nrthreads(int n, int *, struct net *);
int nfsd_set_nrthreads(int n, int *, struct net *);
-int nfsd_pool_stats_open(struct inode *, struct file *);
-int nfsd_pool_stats_release(struct inode *, struct file *);
void nfsd_shutdown_threads(struct net *net);
-bool i_am_nfsd(void);
+struct svc_rqst *nfsd_current_rqst(void);
struct nfsdfs_client {
struct kref cl_ref;
@@ -143,6 +146,10 @@ extern const struct svc_version nfsd_acl_version3;
#endif
#endif
+#if IS_ENABLED(CONFIG_NFS_LOCALIO)
+extern const struct svc_version localio_version1;
+#endif
+
struct nfsd_net;
enum vers_op {NFSD_SET, NFSD_CLEAR, NFSD_TEST, NFSD_AVAIL };
@@ -156,7 +163,7 @@ extern int nfsd_max_blksize;
static inline int nfsd_v4client(struct svc_rqst *rq)
{
- return rq->rq_prog == NFS_PROGRAM && rq->rq_vers == 4;
+ return rq && rq->rq_prog == NFS_PROGRAM && rq->rq_vers == 4;
}
static inline struct user_namespace *
nfsd_user_namespace(const struct svc_rqst *rqstp)
@@ -327,17 +334,36 @@ void nfsd_lockd_shutdown(void);
#define nfserr_xattr2big cpu_to_be32(NFS4ERR_XATTR2BIG)
#define nfserr_noxattr cpu_to_be32(NFS4ERR_NOXATTR)
-/* error codes for internal use */
+/*
+ * Error codes for internal use. We use enum to choose numbers that are
+ * not already assigned, then covert to be32 resulting in a number that
+ * cannot conflict with any existing be32 nfserr value.
+ */
+enum {
+ NFSERR_DROPIT = NFS4ERR_FIRST_FREE,
/* if a request fails due to kmalloc failure, it gets dropped.
* Client should resend eventually
*/
-#define nfserr_dropit cpu_to_be32(30000)
+#define nfserr_dropit cpu_to_be32(NFSERR_DROPIT)
+
/* end-of-file indicator in readdir */
-#define nfserr_eof cpu_to_be32(30001)
+ NFSERR_EOF,
+#define nfserr_eof cpu_to_be32(NFSERR_EOF)
+
/* replay detected */
-#define nfserr_replay_me cpu_to_be32(11001)
+ NFSERR_REPLAY_ME,
+#define nfserr_replay_me cpu_to_be32(NFSERR_REPLAY_ME)
+
/* nfs41 replay detected */
-#define nfserr_replay_cache cpu_to_be32(11002)
+ NFSERR_REPLAY_CACHE,
+#define nfserr_replay_cache cpu_to_be32(NFSERR_REPLAY_CACHE)
+
+/* symlink found where dir expected - handled differently to
+ * other symlink found errors by NFSv3.
+ */
+ NFSERR_SYMLINK_NOT_DIR,
+#define nfserr_symlink_not_dir cpu_to_be32(NFSERR_SYMLINK_NOT_DIR)
+};
/* Check for dir entries '.' and '..' */
#define isdotent(n, l) (l < 3 && n[0] == '.' && (l == 1 || n[1] == '.'))
diff --git a/fs/nfsd/nfsfh.c b/fs/nfsd/nfsfh.c
index dd4e11a703aa..40ad58a6a036 100644
--- a/fs/nfsd/nfsfh.c
+++ b/fs/nfsd/nfsfh.c
@@ -62,8 +62,7 @@ static int nfsd_acceptable(void *expv, struct dentry *dentry)
* the write call).
*/
static inline __be32
-nfsd_mode_check(struct svc_rqst *rqstp, struct dentry *dentry,
- umode_t requested)
+nfsd_mode_check(struct dentry *dentry, umode_t requested)
{
umode_t mode = d_inode(dentry)->i_mode & S_IFMT;
@@ -76,36 +75,36 @@ nfsd_mode_check(struct svc_rqst *rqstp, struct dentry *dentry,
}
return nfs_ok;
}
- /*
- * v4 has an error more specific than err_notdir which we should
- * return in preference to err_notdir:
- */
- if (rqstp->rq_vers == 4 && mode == S_IFLNK)
+ if (mode == S_IFLNK) {
+ if (requested == S_IFDIR)
+ return nfserr_symlink_not_dir;
return nfserr_symlink;
+ }
if (requested == S_IFDIR)
return nfserr_notdir;
if (mode == S_IFDIR)
return nfserr_isdir;
- return nfserr_inval;
+ return nfserr_wrong_type;
}
-static bool nfsd_originating_port_ok(struct svc_rqst *rqstp, int flags)
+static bool nfsd_originating_port_ok(struct svc_rqst *rqstp,
+ struct svc_cred *cred,
+ struct svc_export *exp)
{
- if (flags & NFSEXP_INSECURE_PORT)
+ if (nfsexp_flags(cred, exp) & NFSEXP_INSECURE_PORT)
return true;
/* We don't require gss requests to use low ports: */
- if (rqstp->rq_cred.cr_flavor >= RPC_AUTH_GSS)
+ if (cred->cr_flavor >= RPC_AUTH_GSS)
return true;
return test_bit(RQ_SECURE, &rqstp->rq_flags);
}
static __be32 nfsd_setuser_and_check_port(struct svc_rqst *rqstp,
+ struct svc_cred *cred,
struct svc_export *exp)
{
- int flags = nfsexp_flags(rqstp, exp);
-
/* Check if the request originated from a secure port. */
- if (!nfsd_originating_port_ok(rqstp, flags)) {
+ if (rqstp && !nfsd_originating_port_ok(rqstp, cred, exp)) {
RPC_IFDEBUG(char buf[RPC_MAX_ADDRBUFLEN]);
dprintk("nfsd: request from insecure port %s!\n",
svc_print_addr(rqstp, buf, sizeof(buf)));
@@ -113,23 +112,15 @@ static __be32 nfsd_setuser_and_check_port(struct svc_rqst *rqstp,
}
/* Set user creds for this exportpoint */
- return nfserrno(nfsd_setuser(rqstp, exp));
+ return nfserrno(nfsd_setuser(cred, exp));
}
-static inline __be32 check_pseudo_root(struct svc_rqst *rqstp,
- struct dentry *dentry, struct svc_export *exp)
+static inline __be32 check_pseudo_root(struct dentry *dentry,
+ struct svc_export *exp)
{
if (!(exp->ex_flags & NFSEXP_V4ROOT))
return nfs_ok;
/*
- * v2/v3 clients have no need for the V4ROOT export--they use
- * the mount protocl instead; also, further V4ROOT checks may be
- * in v4-specific code, in which case v2/v3 clients could bypass
- * them.
- */
- if (!nfsd_v4client(rqstp))
- return nfserr_stale;
- /*
* We're exposing only the directories and symlinks that have to be
* traversed on the way to real exports:
*/
@@ -151,7 +142,11 @@ static inline __be32 check_pseudo_root(struct svc_rqst *rqstp,
* dentry. On success, the results are used to set fh_export and
* fh_dentry.
*/
-static __be32 nfsd_set_fh_dentry(struct svc_rqst *rqstp, struct svc_fh *fhp)
+static __be32 nfsd_set_fh_dentry(struct svc_rqst *rqstp, struct net *net,
+ struct svc_cred *cred,
+ struct auth_domain *client,
+ struct auth_domain *gssclient,
+ struct svc_fh *fhp)
{
struct knfsd_fh *fh = &fhp->fh_handle;
struct fid *fid = NULL;
@@ -162,10 +157,8 @@ static __be32 nfsd_set_fh_dentry(struct svc_rqst *rqstp, struct svc_fh *fhp)
int len;
__be32 error;
- error = nfserr_stale;
- if (rqstp->rq_vers > 2)
- error = nfserr_badhandle;
- if (rqstp->rq_vers == 4 && fh->fh_size == 0)
+ error = nfserr_badhandle;
+ if (fh->fh_size == 0)
return nfserr_nofilehandle;
if (fh->fh_version != 1)
@@ -195,7 +188,9 @@ static __be32 nfsd_set_fh_dentry(struct svc_rqst *rqstp, struct svc_fh *fhp)
data_left -= len;
if (data_left < 0)
return error;
- exp = rqst_exp_find(rqstp, fh->fh_fsid_type, fh->fh_fsid);
+ exp = rqst_exp_find(rqstp ? &rqstp->rq_chandle : NULL,
+ net, client, gssclient,
+ fh->fh_fsid_type, fh->fh_fsid);
fid = (struct fid *)(fh->fh_fsid + len);
error = nfserr_stale;
@@ -229,7 +224,7 @@ static __be32 nfsd_set_fh_dentry(struct svc_rqst *rqstp, struct svc_fh *fhp)
put_cred(override_creds(new));
put_cred(new);
} else {
- error = nfsd_setuser_and_check_port(rqstp, exp);
+ error = nfsd_setuser_and_check_port(rqstp, cred, exp);
if (error)
goto out;
}
@@ -237,9 +232,7 @@ static __be32 nfsd_set_fh_dentry(struct svc_rqst *rqstp, struct svc_fh *fhp)
/*
* Look up the dentry using the NFS file handle.
*/
- error = nfserr_stale;
- if (rqstp->rq_vers > 2)
- error = nfserr_badhandle;
+ error = nfserr_badhandle;
fileid_type = fh->fh_fileid_type;
@@ -278,17 +271,25 @@ static __be32 nfsd_set_fh_dentry(struct svc_rqst *rqstp, struct svc_fh *fhp)
fhp->fh_dentry = dentry;
fhp->fh_export = exp;
- switch (rqstp->rq_vers) {
- case 4:
+ switch (fhp->fh_maxsize) {
+ case NFS4_FHSIZE:
if (dentry->d_sb->s_export_op->flags & EXPORT_OP_NOATOMIC_ATTR)
fhp->fh_no_atomic_attr = true;
+ fhp->fh_64bit_cookies = true;
break;
- case 3:
+ case NFS3_FHSIZE:
if (dentry->d_sb->s_export_op->flags & EXPORT_OP_NOWCC)
fhp->fh_no_wcc = true;
+ fhp->fh_64bit_cookies = true;
+ if (exp->ex_flags & NFSEXP_V4ROOT)
+ goto out;
break;
- case 2:
+ case NFS_FHSIZE:
fhp->fh_no_wcc = true;
+ if (EX_WGATHER(exp))
+ fhp->fh_use_wgather = true;
+ if (exp->ex_flags & NFSEXP_V4ROOT)
+ goto out;
}
return 0;
@@ -298,42 +299,33 @@ out:
}
/**
- * fh_verify - filehandle lookup and access checking
- * @rqstp: pointer to current rpc request
+ * __fh_verify - filehandle lookup and access checking
+ * @rqstp: RPC transaction context, or NULL
+ * @net: net namespace in which to perform the export lookup
+ * @cred: RPC user credential
+ * @client: RPC auth domain
+ * @gssclient: RPC GSS auth domain, or NULL
* @fhp: filehandle to be verified
* @type: expected type of object pointed to by filehandle
* @access: type of access needed to object
*
- * Look up a dentry from the on-the-wire filehandle, check the client's
- * access to the export, and set the current task's credentials.
- *
- * Regardless of success or failure of fh_verify(), fh_put() should be
- * called on @fhp when the caller is finished with the filehandle.
- *
- * fh_verify() may be called multiple times on a given filehandle, for
- * example, when processing an NFSv4 compound. The first call will look
- * up a dentry using the on-the-wire filehandle. Subsequent calls will
- * skip the lookup and just perform the other checks and possibly change
- * the current task's credentials.
- *
- * @type specifies the type of object expected using one of the S_IF*
- * constants defined in include/linux/stat.h. The caller may use zero
- * to indicate that it doesn't care, or a negative integer to indicate
- * that it expects something not of the given type.
- *
- * @access is formed from the NFSD_MAY_* constants defined in
- * fs/nfsd/vfs.h.
+ * See fh_verify() for further descriptions of @fhp, @type, and @access.
*/
-__be32
-fh_verify(struct svc_rqst *rqstp, struct svc_fh *fhp, umode_t type, int access)
+static __be32
+__fh_verify(struct svc_rqst *rqstp,
+ struct net *net, struct svc_cred *cred,
+ struct auth_domain *client,
+ struct auth_domain *gssclient,
+ struct svc_fh *fhp, umode_t type, int access)
{
- struct nfsd_net *nn = net_generic(SVC_NET(rqstp), nfsd_net_id);
+ struct nfsd_net *nn = net_generic(net, nfsd_net_id);
struct svc_export *exp = NULL;
struct dentry *dentry;
__be32 error;
if (!fhp->fh_dentry) {
- error = nfsd_set_fh_dentry(rqstp, fhp);
+ error = nfsd_set_fh_dentry(rqstp, net, cred, client,
+ gssclient, fhp);
if (error)
goto out;
}
@@ -358,15 +350,15 @@ fh_verify(struct svc_rqst *rqstp, struct svc_fh *fhp, umode_t type, int access)
* (for example, if different id-squashing options are in
* effect on the new filesystem).
*/
- error = check_pseudo_root(rqstp, dentry, exp);
+ error = check_pseudo_root(dentry, exp);
if (error)
goto out;
- error = nfsd_setuser_and_check_port(rqstp, exp);
+ error = nfsd_setuser_and_check_port(rqstp, cred, exp);
if (error)
goto out;
- error = nfsd_mode_check(rqstp, dentry, type);
+ error = nfsd_mode_check(dentry, type);
if (error)
goto out;
@@ -392,7 +384,7 @@ fh_verify(struct svc_rqst *rqstp, struct svc_fh *fhp, umode_t type, int access)
skip_pseudoflavor_check:
/* Finally, check access permissions. */
- error = nfsd_permission(rqstp, exp, dentry, access);
+ error = nfsd_permission(cred, exp, dentry, access);
out:
trace_nfsd_fh_verify_err(rqstp, fhp, type, access, error);
if (error == nfserr_stale)
@@ -400,6 +392,63 @@ out:
return error;
}
+/**
+ * fh_verify_local - filehandle lookup and access checking
+ * @net: net namespace in which to perform the export lookup
+ * @cred: RPC user credential
+ * @client: RPC auth domain
+ * @fhp: filehandle to be verified
+ * @type: expected type of object pointed to by filehandle
+ * @access: type of access needed to object
+ *
+ * This API can be used by callers who do not have an RPC
+ * transaction context (ie are not running in an nfsd thread).
+ *
+ * See fh_verify() for further descriptions of @fhp, @type, and @access.
+ */
+__be32
+fh_verify_local(struct net *net, struct svc_cred *cred,
+ struct auth_domain *client, struct svc_fh *fhp,
+ umode_t type, int access)
+{
+ return __fh_verify(NULL, net, cred, client, NULL,
+ fhp, type, access);
+}
+
+/**
+ * fh_verify - filehandle lookup and access checking
+ * @rqstp: pointer to current rpc request
+ * @fhp: filehandle to be verified
+ * @type: expected type of object pointed to by filehandle
+ * @access: type of access needed to object
+ *
+ * Look up a dentry from the on-the-wire filehandle, check the client's
+ * access to the export, and set the current task's credentials.
+ *
+ * Regardless of success or failure of fh_verify(), fh_put() should be
+ * called on @fhp when the caller is finished with the filehandle.
+ *
+ * fh_verify() may be called multiple times on a given filehandle, for
+ * example, when processing an NFSv4 compound. The first call will look
+ * up a dentry using the on-the-wire filehandle. Subsequent calls will
+ * skip the lookup and just perform the other checks and possibly change
+ * the current task's credentials.
+ *
+ * @type specifies the type of object expected using one of the S_IF*
+ * constants defined in include/linux/stat.h. The caller may use zero
+ * to indicate that it doesn't care, or a negative integer to indicate
+ * that it expects something not of the given type.
+ *
+ * @access is formed from the NFSD_MAY_* constants defined in
+ * fs/nfsd/vfs.h.
+ */
+__be32
+fh_verify(struct svc_rqst *rqstp, struct svc_fh *fhp, umode_t type, int access)
+{
+ return __fh_verify(rqstp, SVC_NET(rqstp), &rqstp->rq_cred,
+ rqstp->rq_client, rqstp->rq_gssclient,
+ fhp, type, access);
+}
/*
* Compose a file handle for an NFS reply.
diff --git a/fs/nfsd/nfsfh.h b/fs/nfsd/nfsfh.h
index 6ebdf7ea27bf..5b7394801dc4 100644
--- a/fs/nfsd/nfsfh.h
+++ b/fs/nfsd/nfsfh.h
@@ -88,6 +88,8 @@ typedef struct svc_fh {
* wcc data is not atomic with
* operation
*/
+ bool fh_use_wgather; /* NFSv2 wgather option */
+ bool fh_64bit_cookies;/* readdir cookie size */
int fh_flags; /* FH flags */
bool fh_post_saved; /* post-op attrs saved */
bool fh_pre_saved; /* pre-op attrs saved */
@@ -215,6 +217,8 @@ extern char * SVCFH_fmt(struct svc_fh *fhp);
* Function prototypes
*/
__be32 fh_verify(struct svc_rqst *, struct svc_fh *, umode_t, int);
+__be32 fh_verify_local(struct net *, struct svc_cred *, struct auth_domain *,
+ struct svc_fh *, umode_t, int);
__be32 fh_compose(struct svc_fh *, struct svc_export *, struct dentry *, struct svc_fh *);
__be32 fh_update(struct svc_fh *);
void fh_put(struct svc_fh *);
diff --git a/fs/nfsd/nfsproc.c b/fs/nfsd/nfsproc.c
index 36370b957b63..6dda081eb24c 100644
--- a/fs/nfsd/nfsproc.c
+++ b/fs/nfsd/nfsproc.c
@@ -13,6 +13,31 @@
#define NFSDDBG_FACILITY NFSDDBG_PROC
+static __be32 nfsd_map_status(__be32 status)
+{
+ switch (status) {
+ case nfs_ok:
+ break;
+ case nfserr_nofilehandle:
+ case nfserr_badhandle:
+ status = nfserr_stale;
+ break;
+ case nfserr_wrongsec:
+ case nfserr_xdev:
+ case nfserr_file_open:
+ status = nfserr_acces;
+ break;
+ case nfserr_symlink_not_dir:
+ status = nfserr_notdir;
+ break;
+ case nfserr_symlink:
+ case nfserr_wrong_type:
+ status = nfserr_inval;
+ break;
+ }
+ return status;
+}
+
static __be32
nfsd_proc_null(struct svc_rqst *rqstp)
{
@@ -38,6 +63,7 @@ nfsd_proc_getattr(struct svc_rqst *rqstp)
goto out;
resp->status = fh_getattr(&resp->fh, &resp->stat);
out:
+ resp->status = nfsd_map_status(resp->status);
return rpc_success;
}
@@ -109,6 +135,7 @@ nfsd_proc_setattr(struct svc_rqst *rqstp)
resp->status = fh_getattr(&resp->fh, &resp->stat);
out:
+ resp->status = nfsd_map_status(resp->status);
return rpc_success;
}
@@ -143,6 +170,7 @@ nfsd_proc_lookup(struct svc_rqst *rqstp)
resp->status = fh_getattr(&resp->fh, &resp->stat);
out:
+ resp->status = nfsd_map_status(resp->status);
return rpc_success;
}
@@ -164,6 +192,7 @@ nfsd_proc_readlink(struct svc_rqst *rqstp)
page_address(resp->page), &resp->len);
fh_put(&argp->fh);
+ resp->status = nfsd_map_status(resp->status);
return rpc_success;
}
@@ -200,6 +229,7 @@ nfsd_proc_read(struct svc_rqst *rqstp)
resp->status = fh_getattr(&resp->fh, &resp->stat);
else if (resp->status == nfserr_jukebox)
set_bit(RQ_DROPME, &rqstp->rq_flags);
+ resp->status = nfsd_map_status(resp->status);
return rpc_success;
}
@@ -235,6 +265,7 @@ nfsd_proc_write(struct svc_rqst *rqstp)
resp->status = fh_getattr(&resp->fh, &resp->stat);
else if (resp->status == nfserr_jukebox)
set_bit(RQ_DROPME, &rqstp->rq_flags);
+ resp->status = nfsd_map_status(resp->status);
return rpc_success;
}
@@ -331,10 +362,11 @@ nfsd_proc_create(struct svc_rqst *rqstp)
* echo thing > device-special-file-or-pipe
* by doing a CREATE with type==0
*/
- resp->status = nfsd_permission(rqstp,
- newfhp->fh_export,
- newfhp->fh_dentry,
- NFSD_MAY_WRITE|NFSD_MAY_LOCAL_ACCESS);
+ resp->status = nfsd_permission(
+ &rqstp->rq_cred,
+ newfhp->fh_export,
+ newfhp->fh_dentry,
+ NFSD_MAY_WRITE|NFSD_MAY_LOCAL_ACCESS);
if (resp->status && resp->status != nfserr_rofs)
goto out_unlock;
}
@@ -403,6 +435,7 @@ done:
goto out;
resp->status = fh_getattr(&resp->fh, &resp->stat);
out:
+ resp->status = nfsd_map_status(resp->status);
return rpc_success;
}
@@ -419,6 +452,7 @@ nfsd_proc_remove(struct svc_rqst *rqstp)
resp->status = nfsd_unlink(rqstp, &argp->fh, -S_IFDIR,
argp->name, argp->len);
fh_put(&argp->fh);
+ resp->status = nfsd_map_status(resp->status);
return rpc_success;
}
@@ -437,6 +471,7 @@ nfsd_proc_rename(struct svc_rqst *rqstp)
&argp->tfh, argp->tname, argp->tlen);
fh_put(&argp->ffh);
fh_put(&argp->tfh);
+ resp->status = nfsd_map_status(resp->status);
return rpc_success;
}
@@ -457,6 +492,7 @@ nfsd_proc_link(struct svc_rqst *rqstp)
&argp->ffh);
fh_put(&argp->ffh);
fh_put(&argp->tfh);
+ resp->status = nfsd_map_status(resp->status);
return rpc_success;
}
@@ -495,6 +531,7 @@ nfsd_proc_symlink(struct svc_rqst *rqstp)
fh_put(&argp->ffh);
fh_put(&newfh);
out:
+ resp->status = nfsd_map_status(resp->status);
return rpc_success;
}
@@ -528,6 +565,7 @@ nfsd_proc_mkdir(struct svc_rqst *rqstp)
resp->status = fh_getattr(&resp->fh, &resp->stat);
out:
+ resp->status = nfsd_map_status(resp->status);
return rpc_success;
}
@@ -545,6 +583,7 @@ nfsd_proc_rmdir(struct svc_rqst *rqstp)
resp->status = nfsd_unlink(rqstp, &argp->fh, S_IFDIR,
argp->name, argp->len);
fh_put(&argp->fh);
+ resp->status = nfsd_map_status(resp->status);
return rpc_success;
}
@@ -590,6 +629,7 @@ nfsd_proc_readdir(struct svc_rqst *rqstp)
nfssvc_encode_nfscookie(resp, offset);
fh_put(&argp->fh);
+ resp->status = nfsd_map_status(resp->status);
return rpc_success;
}
@@ -607,6 +647,7 @@ nfsd_proc_statfs(struct svc_rqst *rqstp)
resp->status = nfsd_statfs(rqstp, &argp->fh, &resp->stats,
NFSD_MAY_BYPASS_GSS_ON_ROOT);
fh_put(&argp->fh);
+ resp->status = nfsd_map_status(resp->status);
return rpc_success;
}
diff --git a/fs/nfsd/nfssvc.c b/fs/nfsd/nfssvc.c
index 0bc8eaa5e009..e236135ddc63 100644
--- a/fs/nfsd/nfssvc.c
+++ b/fs/nfsd/nfssvc.c
@@ -19,6 +19,7 @@
#include <linux/sunrpc/svc_xprt.h>
#include <linux/lockd/bind.h>
#include <linux/nfsacl.h>
+#include <linux/nfslocalio.h>
#include <linux/seq_file.h>
#include <linux/inetdevice.h>
#include <net/addrconf.h>
@@ -35,7 +36,6 @@
#define NFSDDBG_FACILITY NFSDDBG_SVC
atomic_t nfsd_th_cnt = ATOMIC_INIT(0);
-extern struct svc_program nfsd_program;
static int nfsd(void *vrqstp);
#if defined(CONFIG_NFSD_V2_ACL) || defined(CONFIG_NFSD_V3_ACL)
static int nfsd_acl_rpcbind_set(struct net *,
@@ -80,6 +80,15 @@ DEFINE_SPINLOCK(nfsd_drc_lock);
unsigned long nfsd_drc_max_mem;
unsigned long nfsd_drc_mem_used;
+#if IS_ENABLED(CONFIG_NFS_LOCALIO)
+static const struct svc_version *localio_versions[] = {
+ [1] = &localio_version1,
+};
+
+#define NFSD_LOCALIO_NRVERS ARRAY_SIZE(localio_versions)
+
+#endif /* CONFIG_NFS_LOCALIO */
+
#if defined(CONFIG_NFSD_V2_ACL) || defined(CONFIG_NFSD_V3_ACL)
static const struct svc_version *nfsd_acl_version[] = {
# if defined(CONFIG_NFSD_V2_ACL)
@@ -90,23 +99,12 @@ static const struct svc_version *nfsd_acl_version[] = {
# endif
};
-#define NFSD_ACL_MINVERS 2
+#define NFSD_ACL_MINVERS 2
#define NFSD_ACL_NRVERS ARRAY_SIZE(nfsd_acl_version)
-static struct svc_program nfsd_acl_program = {
- .pg_prog = NFS_ACL_PROGRAM,
- .pg_nvers = NFSD_ACL_NRVERS,
- .pg_vers = nfsd_acl_version,
- .pg_name = "nfsacl",
- .pg_class = "nfsd",
- .pg_authenticate = &svc_set_client,
- .pg_init_request = nfsd_acl_init_request,
- .pg_rpcbind_set = nfsd_acl_rpcbind_set,
-};
-
#endif /* defined(CONFIG_NFSD_V2_ACL) || defined(CONFIG_NFSD_V3_ACL) */
-static const struct svc_version *nfsd_version[] = {
+static const struct svc_version *nfsd_version[NFSD_MAXVERS+1] = {
#if defined(CONFIG_NFSD_V2)
[2] = &nfsd_version2,
#endif
@@ -116,97 +114,63 @@ static const struct svc_version *nfsd_version[] = {
#endif
};
-#define NFSD_MINVERS 2
-#define NFSD_NRVERS ARRAY_SIZE(nfsd_version)
-
-struct svc_program nfsd_program = {
-#if defined(CONFIG_NFSD_V2_ACL) || defined(CONFIG_NFSD_V3_ACL)
- .pg_next = &nfsd_acl_program,
-#endif
+struct svc_program nfsd_programs[] = {
+ {
.pg_prog = NFS_PROGRAM, /* program number */
- .pg_nvers = NFSD_NRVERS, /* nr of entries in nfsd_version */
+ .pg_nvers = NFSD_MAXVERS+1, /* nr of entries in nfsd_version */
.pg_vers = nfsd_version, /* version table */
.pg_name = "nfsd", /* program name */
.pg_class = "nfsd", /* authentication class */
- .pg_authenticate = &svc_set_client, /* export authentication */
+ .pg_authenticate = svc_set_client, /* export authentication */
.pg_init_request = nfsd_init_request,
.pg_rpcbind_set = nfsd_rpcbind_set,
+ },
+#if defined(CONFIG_NFSD_V2_ACL) || defined(CONFIG_NFSD_V3_ACL)
+ {
+ .pg_prog = NFS_ACL_PROGRAM,
+ .pg_nvers = NFSD_ACL_NRVERS,
+ .pg_vers = nfsd_acl_version,
+ .pg_name = "nfsacl",
+ .pg_class = "nfsd",
+ .pg_authenticate = svc_set_client,
+ .pg_init_request = nfsd_acl_init_request,
+ .pg_rpcbind_set = nfsd_acl_rpcbind_set,
+ },
+#endif /* defined(CONFIG_NFSD_V2_ACL) || defined(CONFIG_NFSD_V3_ACL) */
+#if IS_ENABLED(CONFIG_NFS_LOCALIO)
+ {
+ .pg_prog = NFS_LOCALIO_PROGRAM,
+ .pg_nvers = NFSD_LOCALIO_NRVERS,
+ .pg_vers = localio_versions,
+ .pg_name = "nfslocalio",
+ .pg_class = "nfsd",
+ .pg_authenticate = svc_set_client,
+ .pg_init_request = svc_generic_init_request,
+ .pg_rpcbind_set = svc_generic_rpcbind_set,
+ }
+#endif /* CONFIG_NFS_LOCALIO */
};
bool nfsd_support_version(int vers)
{
- if (vers >= NFSD_MINVERS && vers < NFSD_NRVERS)
+ if (vers >= NFSD_MINVERS && vers <= NFSD_MAXVERS)
return nfsd_version[vers] != NULL;
return false;
}
-static bool *
-nfsd_alloc_versions(void)
-{
- bool *vers = kmalloc_array(NFSD_NRVERS, sizeof(bool), GFP_KERNEL);
- unsigned i;
-
- if (vers) {
- /* All compiled versions are enabled by default */
- for (i = 0; i < NFSD_NRVERS; i++)
- vers[i] = nfsd_support_version(i);
- }
- return vers;
-}
-
-static bool *
-nfsd_alloc_minorversions(void)
-{
- bool *vers = kmalloc_array(NFSD_SUPPORTED_MINOR_VERSION + 1,
- sizeof(bool), GFP_KERNEL);
- unsigned i;
-
- if (vers) {
- /* All minor versions are enabled by default */
- for (i = 0; i <= NFSD_SUPPORTED_MINOR_VERSION; i++)
- vers[i] = nfsd_support_version(4);
- }
- return vers;
-}
-
-void
-nfsd_netns_free_versions(struct nfsd_net *nn)
-{
- kfree(nn->nfsd_versions);
- kfree(nn->nfsd4_minorversions);
- nn->nfsd_versions = NULL;
- nn->nfsd4_minorversions = NULL;
-}
-
-static void
-nfsd_netns_init_versions(struct nfsd_net *nn)
-{
- if (!nn->nfsd_versions) {
- nn->nfsd_versions = nfsd_alloc_versions();
- nn->nfsd4_minorversions = nfsd_alloc_minorversions();
- if (!nn->nfsd_versions || !nn->nfsd4_minorversions)
- nfsd_netns_free_versions(nn);
- }
-}
-
int nfsd_vers(struct nfsd_net *nn, int vers, enum vers_op change)
{
- if (vers < NFSD_MINVERS || vers >= NFSD_NRVERS)
+ if (vers < NFSD_MINVERS || vers > NFSD_MAXVERS)
return 0;
switch(change) {
case NFSD_SET:
- if (nn->nfsd_versions)
- nn->nfsd_versions[vers] = nfsd_support_version(vers);
+ nn->nfsd_versions[vers] = nfsd_support_version(vers);
break;
case NFSD_CLEAR:
- nfsd_netns_init_versions(nn);
- if (nn->nfsd_versions)
- nn->nfsd_versions[vers] = false;
+ nn->nfsd_versions[vers] = false;
break;
case NFSD_TEST:
- if (nn->nfsd_versions)
- return nn->nfsd_versions[vers];
- fallthrough;
+ return nn->nfsd_versions[vers];
case NFSD_AVAIL:
return nfsd_support_version(vers);
}
@@ -233,23 +197,16 @@ int nfsd_minorversion(struct nfsd_net *nn, u32 minorversion, enum vers_op change
switch(change) {
case NFSD_SET:
- if (nn->nfsd4_minorversions) {
- nfsd_vers(nn, 4, NFSD_SET);
- nn->nfsd4_minorversions[minorversion] =
- nfsd_vers(nn, 4, NFSD_TEST);
- }
+ nfsd_vers(nn, 4, NFSD_SET);
+ nn->nfsd4_minorversions[minorversion] =
+ nfsd_vers(nn, 4, NFSD_TEST);
break;
case NFSD_CLEAR:
- nfsd_netns_init_versions(nn);
- if (nn->nfsd4_minorversions) {
- nn->nfsd4_minorversions[minorversion] = false;
- nfsd_adjust_nfsd_versions4(nn);
- }
+ nn->nfsd4_minorversions[minorversion] = false;
+ nfsd_adjust_nfsd_versions4(nn);
break;
case NFSD_TEST:
- if (nn->nfsd4_minorversions)
- return nn->nfsd4_minorversions[minorversion];
- return nfsd_vers(nn, 4, NFSD_TEST);
+ return nn->nfsd4_minorversions[minorversion];
case NFSD_AVAIL:
return minorversion <= NFSD_SUPPORTED_MINOR_VERSION &&
nfsd_vers(nn, 4, NFSD_AVAIL);
@@ -257,6 +214,34 @@ int nfsd_minorversion(struct nfsd_net *nn, u32 minorversion, enum vers_op change
return 0;
}
+bool nfsd_serv_try_get(struct net *net)
+{
+ struct nfsd_net *nn = net_generic(net, nfsd_net_id);
+
+ return (nn && percpu_ref_tryget_live(&nn->nfsd_serv_ref));
+}
+
+void nfsd_serv_put(struct net *net)
+{
+ struct nfsd_net *nn = net_generic(net, nfsd_net_id);
+
+ percpu_ref_put(&nn->nfsd_serv_ref);
+}
+
+static void nfsd_serv_done(struct percpu_ref *ref)
+{
+ struct nfsd_net *nn = container_of(ref, struct nfsd_net, nfsd_serv_ref);
+
+ complete(&nn->nfsd_serv_confirm_done);
+}
+
+static void nfsd_serv_free(struct percpu_ref *ref)
+{
+ struct nfsd_net *nn = container_of(ref, struct nfsd_net, nfsd_serv_ref);
+
+ complete(&nn->nfsd_serv_free_done);
+}
+
/*
* Maximum number of nfsd processes
*/
@@ -456,6 +441,7 @@ static void nfsd_shutdown_net(struct net *net)
lockd_down(net);
nn->lockd_up = false;
}
+ percpu_ref_exit(&nn->nfsd_serv_ref);
nn->nfsd_net_up = false;
nfsd_shutdown_generic();
}
@@ -535,6 +521,13 @@ void nfsd_destroy_serv(struct net *net)
struct nfsd_net *nn = net_generic(net, nfsd_net_id);
struct svc_serv *serv = nn->nfsd_serv;
+ lockdep_assert_held(&nfsd_mutex);
+
+ percpu_ref_kill_and_confirm(&nn->nfsd_serv_ref, nfsd_serv_done);
+ wait_for_completion(&nn->nfsd_serv_confirm_done);
+ wait_for_completion(&nn->nfsd_serv_free_done);
+ /* percpu_ref_exit is called in nfsd_shutdown_net */
+
spin_lock(&nfsd_notifier_lock);
nn->nfsd_serv = NULL;
spin_unlock(&nfsd_notifier_lock);
@@ -568,11 +561,11 @@ void nfsd_reset_versions(struct nfsd_net *nn)
{
int i;
- for (i = 0; i < NFSD_NRVERS; i++)
+ for (i = 0; i <= NFSD_MAXVERS; i++)
if (nfsd_vers(nn, i, NFSD_TEST))
return;
- for (i = 0; i < NFSD_NRVERS; i++)
+ for (i = 0; i <= NFSD_MAXVERS; i++)
if (i != 4)
nfsd_vers(nn, i, NFSD_SET);
else {
@@ -642,9 +635,11 @@ void nfsd_shutdown_threads(struct net *net)
mutex_unlock(&nfsd_mutex);
}
-bool i_am_nfsd(void)
+struct svc_rqst *nfsd_current_rqst(void)
{
- return kthread_func(current) == nfsd;
+ if (kthread_func(current) == nfsd)
+ return kthread_data(current);
+ return NULL;
}
int nfsd_create_serv(struct net *net)
@@ -657,10 +652,18 @@ int nfsd_create_serv(struct net *net)
if (nn->nfsd_serv)
return 0;
+ error = percpu_ref_init(&nn->nfsd_serv_ref, nfsd_serv_free,
+ 0, GFP_KERNEL);
+ if (error)
+ return error;
+ init_completion(&nn->nfsd_serv_free_done);
+ init_completion(&nn->nfsd_serv_confirm_done);
+
if (nfsd_max_blksize == 0)
nfsd_max_blksize = nfsd_get_default_max_blksize();
nfsd_reset_versions(nn);
- serv = svc_create_pooled(&nfsd_program, &nn->nfsd_svcstats,
+ serv = svc_create_pooled(nfsd_programs, ARRAY_SIZE(nfsd_programs),
+ &nn->nfsd_svcstats,
nfsd_max_blksize, nfsd);
if (serv == NULL)
return -ENOMEM;
@@ -705,7 +708,7 @@ int nfsd_get_nrthreads(int n, int *nthreads, struct net *net)
if (serv)
for (i = 0; i < serv->sv_nrpools && i < n; i++)
- nthreads[i] = atomic_read(&serv->sv_pools[i].sp_nrthreads);
+ nthreads[i] = serv->sv_pools[i].sp_nrthreads;
return 0;
}
@@ -905,17 +908,17 @@ nfsd_init_request(struct svc_rqst *rqstp,
if (likely(nfsd_vers(nn, rqstp->rq_vers, NFSD_TEST)))
return svc_generic_init_request(rqstp, progp, ret);
- ret->mismatch.lovers = NFSD_NRVERS;
- for (i = NFSD_MINVERS; i < NFSD_NRVERS; i++) {
+ ret->mismatch.lovers = NFSD_MAXVERS + 1;
+ for (i = NFSD_MINVERS; i <= NFSD_MAXVERS; i++) {
if (nfsd_vers(nn, i, NFSD_TEST)) {
ret->mismatch.lovers = i;
break;
}
}
- if (ret->mismatch.lovers == NFSD_NRVERS)
+ if (ret->mismatch.lovers > NFSD_MAXVERS)
return rpc_prog_unavail;
ret->mismatch.hivers = NFSD_MINVERS;
- for (i = NFSD_NRVERS - 1; i >= NFSD_MINVERS; i--) {
+ for (i = NFSD_MAXVERS; i >= NFSD_MINVERS; i--) {
if (nfsd_vers(nn, i, NFSD_TEST)) {
ret->mismatch.hivers = i;
break;
@@ -937,11 +940,9 @@ nfsd(void *vrqstp)
/* At this point, the thread shares current->fs
* with the init process. We need to create files with the
- * umask as defined by the client instead of init's umask. */
- if (unshare_fs_struct() < 0) {
- printk("Unable to start nfsd thread: out of memory\n");
- goto out;
- }
+ * umask as defined by the client instead of init's umask.
+ */
+ svc_thread_init_status(rqstp, unshare_fs_struct());
current->fs->umask = 0;
@@ -963,14 +964,13 @@ nfsd(void *vrqstp)
atomic_dec(&nfsd_th_cnt);
-out:
/* Release the thread */
svc_exit_thread(rqstp);
return 0;
}
/**
- * nfsd_dispatch - Process an NFS or NFSACL Request
+ * nfsd_dispatch - Process an NFS or NFSACL or LOCALIO Request
* @rqstp: incoming request
*
* This RPC dispatcher integrates the NFS server's duplicate reply cache.
@@ -1084,10 +1084,3 @@ bool nfssvc_encode_voidres(struct svc_rqst *rqstp, struct xdr_stream *xdr)
{
return true;
}
-
-int nfsd_pool_stats_open(struct inode *inode, struct file *file)
-{
- struct nfsd_net *nn = net_generic(inode->i_sb->s_fs_info, nfsd_net_id);
-
- return svc_pool_stats_open(&nn->nfsd_info, file);
-}
diff --git a/fs/nfsd/state.h b/fs/nfsd/state.h
index ec4559ecd193..79c743c01a47 100644
--- a/fs/nfsd/state.h
+++ b/fs/nfsd/state.h
@@ -79,6 +79,7 @@ struct nfsd4_callback_ops {
void (*prepare)(struct nfsd4_callback *);
int (*done)(struct nfsd4_callback *, struct rpc_task *);
void (*release)(struct nfsd4_callback *);
+ uint32_t opcode;
};
/*
diff --git a/fs/nfsd/trace.h b/fs/nfsd/trace.h
index 77bbd23aa150..c625966cfcf3 100644
--- a/fs/nfsd/trace.h
+++ b/fs/nfsd/trace.h
@@ -86,7 +86,8 @@ DEFINE_NFSD_XDR_ERR_EVENT(cant_encode);
{ NFSD_MAY_NOT_BREAK_LEASE, "NOT_BREAK_LEASE" }, \
{ NFSD_MAY_BYPASS_GSS, "BYPASS_GSS" }, \
{ NFSD_MAY_READ_IF_EXEC, "READ_IF_EXEC" }, \
- { NFSD_MAY_64BIT_COOKIE, "64BIT_COOKIE" })
+ { NFSD_MAY_64BIT_COOKIE, "64BIT_COOKIE" }, \
+ { NFSD_MAY_LOCALIO, "LOCALIO" })
TRACE_EVENT(nfsd_compound,
TP_PROTO(
@@ -193,7 +194,7 @@ TRACE_EVENT(nfsd_compound_encode_err,
{ S_IFIFO, "FIFO" }, \
{ S_IFSOCK, "SOCK" })
-TRACE_EVENT(nfsd_fh_verify,
+TRACE_EVENT_CONDITION(nfsd_fh_verify,
TP_PROTO(
const struct svc_rqst *rqstp,
const struct svc_fh *fhp,
@@ -201,6 +202,7 @@ TRACE_EVENT(nfsd_fh_verify,
int access
),
TP_ARGS(rqstp, fhp, type, access),
+ TP_CONDITION(rqstp != NULL),
TP_STRUCT__entry(
__field(unsigned int, netns_ino)
__sockaddr(server, rqstp->rq_xprt->xpt_remotelen)
@@ -239,7 +241,7 @@ TRACE_EVENT_CONDITION(nfsd_fh_verify_err,
__be32 error
),
TP_ARGS(rqstp, fhp, type, access, error),
- TP_CONDITION(error),
+ TP_CONDITION(rqstp != NULL && error),
TP_STRUCT__entry(
__field(unsigned int, netns_ino)
__sockaddr(server, rqstp->rq_xprt->xpt_remotelen)
@@ -295,12 +297,13 @@ DECLARE_EVENT_CLASS(nfsd_fh_err_class,
__entry->status)
)
-#define DEFINE_NFSD_FH_ERR_EVENT(name) \
-DEFINE_EVENT(nfsd_fh_err_class, nfsd_##name, \
- TP_PROTO(struct svc_rqst *rqstp, \
- struct svc_fh *fhp, \
- int status), \
- TP_ARGS(rqstp, fhp, status))
+#define DEFINE_NFSD_FH_ERR_EVENT(name) \
+DEFINE_EVENT_CONDITION(nfsd_fh_err_class, nfsd_##name, \
+ TP_PROTO(struct svc_rqst *rqstp, \
+ struct svc_fh *fhp, \
+ int status), \
+ TP_ARGS(rqstp, fhp, status), \
+ TP_CONDITION(rqstp != NULL))
DEFINE_NFSD_FH_ERR_EVENT(set_fh_dentry_badexport);
DEFINE_NFSD_FH_ERR_EVENT(set_fh_dentry_badhandle);
@@ -1486,6 +1489,9 @@ DEFINE_NFSD_CB_EVENT(new_state);
DEFINE_NFSD_CB_EVENT(probe);
DEFINE_NFSD_CB_EVENT(lost);
DEFINE_NFSD_CB_EVENT(shutdown);
+DEFINE_NFSD_CB_EVENT(rpc_prepare);
+DEFINE_NFSD_CB_EVENT(rpc_done);
+DEFINE_NFSD_CB_EVENT(rpc_release);
TRACE_DEFINE_ENUM(RPC_AUTH_NULL);
TRACE_DEFINE_ENUM(RPC_AUTH_UNIX);
@@ -1553,6 +1559,19 @@ TRACE_EVENT(nfsd_cb_setup_err,
__entry->error)
);
+/* Not a real opcode, but there is no 0 operation. */
+#define _CB_NULL 0
+
+#define show_nfsd_cb_opcode(val) \
+ __print_symbolic(val, \
+ { _CB_NULL, "CB_NULL" }, \
+ { OP_CB_GETATTR, "CB_GETATTR" }, \
+ { OP_CB_RECALL, "CB_RECALL" }, \
+ { OP_CB_LAYOUTRECALL, "CB_LAYOUTRECALL" }, \
+ { OP_CB_RECALL_ANY, "CB_RECALL_ANY" }, \
+ { OP_CB_NOTIFY_LOCK, "CB_NOTIFY_LOCK" }, \
+ { OP_CB_OFFLOAD, "CB_OFFLOAD" })
+
DECLARE_EVENT_CLASS(nfsd_cb_lifetime_class,
TP_PROTO(
const struct nfs4_client *clp,
@@ -1563,6 +1582,7 @@ DECLARE_EVENT_CLASS(nfsd_cb_lifetime_class,
__field(u32, cl_boot)
__field(u32, cl_id)
__field(const void *, cb)
+ __field(unsigned long, opcode)
__field(bool, need_restart)
__sockaddr(addr, clp->cl_cb_conn.cb_addrlen)
),
@@ -1570,14 +1590,15 @@ DECLARE_EVENT_CLASS(nfsd_cb_lifetime_class,
__entry->cl_boot = clp->cl_clientid.cl_boot;
__entry->cl_id = clp->cl_clientid.cl_id;
__entry->cb = cb;
+ __entry->opcode = cb->cb_ops ? cb->cb_ops->opcode : _CB_NULL;
__entry->need_restart = cb->cb_need_restart;
__assign_sockaddr(addr, &clp->cl_cb_conn.cb_addr,
clp->cl_cb_conn.cb_addrlen)
),
- TP_printk("addr=%pISpc client %08x:%08x cb=%p%s",
- __get_sockaddr(addr), __entry->cl_boot, __entry->cl_id,
- __entry->cb, __entry->need_restart ?
- " (need restart)" : " (first try)"
+ TP_printk("addr=%pISpc client %08x:%08x cb=%p%s opcode=%s",
+ __get_sockaddr(addr), __entry->cl_boot, __entry->cl_id, __entry->cb,
+ __entry->need_restart ? " (need restart)" : " (first try)",
+ show_nfsd_cb_opcode(__entry->opcode)
)
);
@@ -1830,6 +1851,7 @@ DEFINE_NFSD_CB_DONE_EVENT(nfsd_cb_recall_done);
DEFINE_NFSD_CB_DONE_EVENT(nfsd_cb_notify_lock_done);
DEFINE_NFSD_CB_DONE_EVENT(nfsd_cb_layout_done);
DEFINE_NFSD_CB_DONE_EVENT(nfsd_cb_offload_done);
+DEFINE_NFSD_CB_DONE_EVENT(nfsd_cb_getattr_done);
TRACE_EVENT(nfsd_cb_recall_any_done,
TP_PROTO(
@@ -2127,6 +2149,10 @@ DECLARE_EVENT_CLASS(nfsd_copy_class,
__field(u32, dst_cl_id)
__field(u32, dst_so_id)
__field(u32, dst_si_generation)
+ __field(u32, cb_cl_boot)
+ __field(u32, cb_cl_id)
+ __field(u32, cb_so_id)
+ __field(u32, cb_si_generation)
__field(u64, src_cp_pos)
__field(u64, dst_cp_pos)
__field(u64, cp_count)
@@ -2135,6 +2161,7 @@ DECLARE_EVENT_CLASS(nfsd_copy_class,
TP_fast_assign(
const stateid_t *src_stp = &copy->cp_src_stateid;
const stateid_t *dst_stp = &copy->cp_dst_stateid;
+ const stateid_t *cb_stp = &copy->cp_res.cb_stateid;
__entry->intra = test_bit(NFSD4_COPY_F_INTRA, &copy->cp_flags);
__entry->async = !test_bit(NFSD4_COPY_F_SYNCHRONOUS, &copy->cp_flags);
@@ -2146,6 +2173,10 @@ DECLARE_EVENT_CLASS(nfsd_copy_class,
__entry->dst_cl_id = dst_stp->si_opaque.so_clid.cl_id;
__entry->dst_so_id = dst_stp->si_opaque.so_id;
__entry->dst_si_generation = dst_stp->si_generation;
+ __entry->cb_cl_boot = cb_stp->si_opaque.so_clid.cl_boot;
+ __entry->cb_cl_id = cb_stp->si_opaque.so_clid.cl_id;
+ __entry->cb_so_id = cb_stp->si_opaque.so_id;
+ __entry->cb_si_generation = cb_stp->si_generation;
__entry->src_cp_pos = copy->cp_src_pos;
__entry->dst_cp_pos = copy->cp_dst_pos;
__entry->cp_count = copy->cp_count;
@@ -2153,14 +2184,17 @@ DECLARE_EVENT_CLASS(nfsd_copy_class,
sizeof(struct sockaddr_in6));
),
TP_printk("client=%pISpc intra=%d async=%d "
- "src_stateid[si_generation:0x%x cl_boot:0x%x cl_id:0x%x so_id:0x%x] "
- "dst_stateid[si_generation:0x%x cl_boot:0x%x cl_id:0x%x so_id:0x%x] "
+ "src_client %08x:%08x src_stateid %08x:%08x "
+ "dst_client %08x:%08x dst_stateid %08x:%08x "
+ "cb_client %08x:%08x cb_stateid %08x:%08x "
"cp_src_pos=%llu cp_dst_pos=%llu cp_count=%llu",
__get_sockaddr(addr), __entry->intra, __entry->async,
- __entry->src_si_generation, __entry->src_cl_boot,
- __entry->src_cl_id, __entry->src_so_id,
- __entry->dst_si_generation, __entry->dst_cl_boot,
- __entry->dst_cl_id, __entry->dst_so_id,
+ __entry->src_cl_boot, __entry->src_cl_id,
+ __entry->src_so_id, __entry->src_si_generation,
+ __entry->dst_cl_boot, __entry->dst_cl_id,
+ __entry->dst_so_id, __entry->dst_si_generation,
+ __entry->cb_cl_boot, __entry->cb_cl_id,
+ __entry->cb_so_id, __entry->cb_si_generation,
__entry->src_cp_pos, __entry->dst_cp_pos, __entry->cp_count
)
);
@@ -2172,7 +2206,7 @@ DEFINE_EVENT(nfsd_copy_class, nfsd_copy_##name, \
DEFINE_COPY_EVENT(inter);
DEFINE_COPY_EVENT(intra);
-DEFINE_COPY_EVENT(do_async);
+DEFINE_COPY_EVENT(async);
TRACE_EVENT(nfsd_copy_done,
TP_PROTO(
@@ -2193,11 +2227,80 @@ TRACE_EVENT(nfsd_copy_done,
__assign_sockaddr(addr, &copy->cp_clp->cl_addr,
sizeof(struct sockaddr_in6));
),
- TP_printk("addr=%pISpc status=%d intra=%d async=%d ",
+ TP_printk("addr=%pISpc status=%d intra=%d async=%d",
__get_sockaddr(addr), __entry->status, __entry->intra, __entry->async
)
);
+TRACE_EVENT(nfsd_copy_async_done,
+ TP_PROTO(
+ const struct nfsd4_copy *copy
+ ),
+ TP_ARGS(copy),
+ TP_STRUCT__entry(
+ __field(int, status)
+ __field(bool, intra)
+ __field(bool, async)
+ __field(u32, src_cl_boot)
+ __field(u32, src_cl_id)
+ __field(u32, src_so_id)
+ __field(u32, src_si_generation)
+ __field(u32, dst_cl_boot)
+ __field(u32, dst_cl_id)
+ __field(u32, dst_so_id)
+ __field(u32, dst_si_generation)
+ __field(u32, cb_cl_boot)
+ __field(u32, cb_cl_id)
+ __field(u32, cb_so_id)
+ __field(u32, cb_si_generation)
+ __field(u64, src_cp_pos)
+ __field(u64, dst_cp_pos)
+ __field(u64, cp_count)
+ __sockaddr(addr, sizeof(struct sockaddr_in6))
+ ),
+ TP_fast_assign(
+ const stateid_t *src_stp = &copy->cp_src_stateid;
+ const stateid_t *dst_stp = &copy->cp_dst_stateid;
+ const stateid_t *cb_stp = &copy->cp_res.cb_stateid;
+
+ __entry->status = be32_to_cpu(copy->nfserr);
+ __entry->intra = test_bit(NFSD4_COPY_F_INTRA, &copy->cp_flags);
+ __entry->async = !test_bit(NFSD4_COPY_F_SYNCHRONOUS, &copy->cp_flags);
+ __entry->src_cl_boot = src_stp->si_opaque.so_clid.cl_boot;
+ __entry->src_cl_id = src_stp->si_opaque.so_clid.cl_id;
+ __entry->src_so_id = src_stp->si_opaque.so_id;
+ __entry->src_si_generation = src_stp->si_generation;
+ __entry->dst_cl_boot = dst_stp->si_opaque.so_clid.cl_boot;
+ __entry->dst_cl_id = dst_stp->si_opaque.so_clid.cl_id;
+ __entry->dst_so_id = dst_stp->si_opaque.so_id;
+ __entry->dst_si_generation = dst_stp->si_generation;
+ __entry->cb_cl_boot = cb_stp->si_opaque.so_clid.cl_boot;
+ __entry->cb_cl_id = cb_stp->si_opaque.so_clid.cl_id;
+ __entry->cb_so_id = cb_stp->si_opaque.so_id;
+ __entry->cb_si_generation = cb_stp->si_generation;
+ __entry->src_cp_pos = copy->cp_src_pos;
+ __entry->dst_cp_pos = copy->cp_dst_pos;
+ __entry->cp_count = copy->cp_count;
+ __assign_sockaddr(addr, &copy->cp_clp->cl_addr,
+ sizeof(struct sockaddr_in6));
+ ),
+ TP_printk("client=%pISpc status=%d intra=%d async=%d "
+ "src_client %08x:%08x src_stateid %08x:%08x "
+ "dst_client %08x:%08x dst_stateid %08x:%08x "
+ "cb_client %08x:%08x cb_stateid %08x:%08x "
+ "cp_src_pos=%llu cp_dst_pos=%llu cp_count=%llu",
+ __get_sockaddr(addr),
+ __entry->status, __entry->intra, __entry->async,
+ __entry->src_cl_boot, __entry->src_cl_id,
+ __entry->src_so_id, __entry->src_si_generation,
+ __entry->dst_cl_boot, __entry->dst_cl_id,
+ __entry->dst_so_id, __entry->dst_si_generation,
+ __entry->cb_cl_boot, __entry->cb_cl_id,
+ __entry->cb_so_id, __entry->cb_si_generation,
+ __entry->src_cp_pos, __entry->dst_cp_pos, __entry->cp_count
+ )
+);
+
#endif /* _NFSD_TRACE_H */
#undef TRACE_INCLUDE_PATH
diff --git a/fs/nfsd/vfs.c b/fs/nfsd/vfs.c
index 29b1f3613800..22325b590e17 100644
--- a/fs/nfsd/vfs.c
+++ b/fs/nfsd/vfs.c
@@ -100,6 +100,7 @@ nfserrno (int errno)
{ nfserr_io, -EUCLEAN },
{ nfserr_perm, -ENOKEY },
{ nfserr_no_grace, -ENOGRACE},
+ { nfserr_io, -EBADMSG },
};
int i;
@@ -421,8 +422,9 @@ nfsd_get_write_access(struct svc_rqst *rqstp, struct svc_fh *fhp,
if (iap->ia_size < inode->i_size) {
__be32 err;
- err = nfsd_permission(rqstp, fhp->fh_export, fhp->fh_dentry,
- NFSD_MAY_TRUNC | NFSD_MAY_OWNER_OVERRIDE);
+ err = nfsd_permission(&rqstp->rq_cred,
+ fhp->fh_export, fhp->fh_dentry,
+ NFSD_MAY_TRUNC | NFSD_MAY_OWNER_OVERRIDE);
if (err)
return err;
}
@@ -814,7 +816,8 @@ nfsd_access(struct svc_rqst *rqstp, struct svc_fh *fhp, u32 *access, u32 *suppor
sresult |= map->access;
- err2 = nfsd_permission(rqstp, export, dentry, map->how);
+ err2 = nfsd_permission(&rqstp->rq_cred, export,
+ dentry, map->how);
switch (err2) {
case nfs_ok:
result |= map->access;
@@ -1160,7 +1163,6 @@ nfsd_vfs_write(struct svc_rqst *rqstp, struct svc_fh *fhp, struct nfsd_file *nf,
errseq_t since;
__be32 nfserr;
int host_err;
- int use_wgather;
loff_t pos = offset;
unsigned long exp_op_flags = 0;
unsigned int pflags = current->flags;
@@ -1186,12 +1188,11 @@ nfsd_vfs_write(struct svc_rqst *rqstp, struct svc_fh *fhp, struct nfsd_file *nf,
}
exp = fhp->fh_export;
- use_wgather = (rqstp->rq_vers == 2) && EX_WGATHER(exp);
if (!EX_ISSYNC(exp))
stable = NFS_UNSTABLE;
- if (stable && !use_wgather)
+ if (stable && !fhp->fh_use_wgather)
flags |= RWF_SYNC;
iov_iter_kvec(&iter, ITER_SOURCE, vec, vlen, *cnt);
@@ -1210,7 +1211,7 @@ nfsd_vfs_write(struct svc_rqst *rqstp, struct svc_fh *fhp, struct nfsd_file *nf,
if (host_err < 0)
goto out_nfserr;
- if (stable && use_wgather) {
+ if (stable && fhp->fh_use_wgather) {
host_err = wait_for_concurrent_writes(file);
if (host_err < 0)
commit_reset_write_verifier(nn, rqstp, host_err);
@@ -1475,7 +1476,8 @@ nfsd_create_locked(struct svc_rqst *rqstp, struct svc_fh *fhp,
dirp = d_inode(dentry);
dchild = dget(resfhp->fh_dentry);
- err = nfsd_permission(rqstp, fhp->fh_export, dentry, NFSD_MAY_CREATE);
+ err = nfsd_permission(&rqstp->rq_cred, fhp->fh_export, dentry,
+ NFSD_MAY_CREATE);
if (err)
goto out;
@@ -1767,10 +1769,7 @@ nfsd_link(struct svc_rqst *rqstp, struct svc_fh *ffhp,
if (!err)
err = nfserrno(commit_metadata(tfhp));
} else {
- if (host_err == -EXDEV && rqstp->rq_vers == 2)
- err = nfserr_acces;
- else
- err = nfserrno(host_err);
+ err = nfserrno(host_err);
}
dput(dnew);
out_drop_write:
@@ -1836,7 +1835,7 @@ nfsd_rename(struct svc_rqst *rqstp, struct svc_fh *ffhp, char *fname, int flen,
if (!flen || isdotent(fname, flen) || !tlen || isdotent(tname, tlen))
goto out;
- err = (rqstp->rq_vers == 2) ? nfserr_acces : nfserr_xdev;
+ err = nfserr_xdev;
if (ffhp->fh_export->ex_path.mnt != tfhp->fh_export->ex_path.mnt)
goto out;
if (ffhp->fh_export->ex_path.dentry != tfhp->fh_export->ex_path.dentry)
@@ -1851,7 +1850,7 @@ retry:
trap = lock_rename(tdentry, fdentry);
if (IS_ERR(trap)) {
- err = (rqstp->rq_vers == 2) ? nfserr_acces : nfserr_xdev;
+ err = nfserr_xdev;
goto out_want_write;
}
err = fh_fill_pre_attrs(ffhp);
@@ -2020,10 +2019,7 @@ out_nfserr:
/* name is mounted-on. There is no perfect
* error status.
*/
- if (nfsd_v4client(rqstp))
- err = nfserr_file_open;
- else
- err = nfserr_acces;
+ err = nfserr_file_open;
} else {
err = nfserrno(host_err);
}
@@ -2178,8 +2174,7 @@ nfsd_readdir(struct svc_rqst *rqstp, struct svc_fh *fhp, loff_t *offsetp,
loff_t offset = *offsetp;
int may_flags = NFSD_MAY_READ;
- /* NFSv2 only supports 32 bit cookies */
- if (rqstp->rq_vers > 2)
+ if (fhp->fh_64bit_cookies)
may_flags |= NFSD_MAY_64BIT_COOKIE;
err = nfsd_open(rqstp, fhp, S_IFDIR, may_flags, &file);
@@ -2255,9 +2250,9 @@ nfsd_statfs(struct svc_rqst *rqstp, struct svc_fh *fhp, struct kstatfs *stat, in
return err;
}
-static int exp_rdonly(struct svc_rqst *rqstp, struct svc_export *exp)
+static int exp_rdonly(struct svc_cred *cred, struct svc_export *exp)
{
- return nfsexp_flags(rqstp, exp) & NFSEXP_READONLY;
+ return nfsexp_flags(cred, exp) & NFSEXP_READONLY;
}
#ifdef CONFIG_NFSD_V4
@@ -2501,8 +2496,8 @@ out_unlock:
* Check for a user's access permissions to this inode.
*/
__be32
-nfsd_permission(struct svc_rqst *rqstp, struct svc_export *exp,
- struct dentry *dentry, int acc)
+nfsd_permission(struct svc_cred *cred, struct svc_export *exp,
+ struct dentry *dentry, int acc)
{
struct inode *inode = d_inode(dentry);
int err;
@@ -2533,7 +2528,7 @@ nfsd_permission(struct svc_rqst *rqstp, struct svc_export *exp,
*/
if (!(acc & NFSD_MAY_LOCAL_ACCESS))
if (acc & (NFSD_MAY_WRITE | NFSD_MAY_SATTR | NFSD_MAY_TRUNC)) {
- if (exp_rdonly(rqstp, exp) ||
+ if (exp_rdonly(cred, exp) ||
__mnt_is_readonly(exp->ex_path.mnt))
return nfserr_rofs;
if (/* (acc & NFSD_MAY_WRITE) && */ IS_IMMUTABLE(inode))
diff --git a/fs/nfsd/vfs.h b/fs/nfsd/vfs.h
index 57cd70062048..3ff146522556 100644
--- a/fs/nfsd/vfs.h
+++ b/fs/nfsd/vfs.h
@@ -33,6 +33,8 @@
#define NFSD_MAY_64BIT_COOKIE 0x1000 /* 64 bit readdir cookies for >= NFSv3 */
+#define NFSD_MAY_LOCALIO 0x2000 /* for tracing, reflects when localio used */
+
#define NFSD_MAY_CREATE (NFSD_MAY_EXEC|NFSD_MAY_WRITE)
#define NFSD_MAY_REMOVE (NFSD_MAY_EXEC|NFSD_MAY_WRITE|NFSD_MAY_TRUNC)
@@ -153,8 +155,8 @@ __be32 nfsd_readdir(struct svc_rqst *, struct svc_fh *,
__be32 nfsd_statfs(struct svc_rqst *, struct svc_fh *,
struct kstatfs *, int access);
-__be32 nfsd_permission(struct svc_rqst *, struct svc_export *,
- struct dentry *, int);
+__be32 nfsd_permission(struct svc_cred *cred, struct svc_export *exp,
+ struct dentry *dentry, int acc);
void nfsd_filp_close(struct file *fp);
diff --git a/fs/nfsd/xdr4.h b/fs/nfsd/xdr4.h
index fbdd42cde1fa..2a21a7662e03 100644
--- a/fs/nfsd/xdr4.h
+++ b/fs/nfsd/xdr4.h
@@ -713,6 +713,7 @@ struct nfsd4_copy {
struct nfsd4_ssc_umount_item *ss_nsui;
struct nfs_fh c_fh;
nfs4_stateid stateid;
+ struct nfsd_net *cp_nn;
};
static inline void nfsd4_copy_set_sync(struct nfsd4_copy *copy, bool sync)
diff --git a/fs/nilfs2/alloc.h b/fs/nilfs2/alloc.h
index d825a9faca6d..e19d7eb10084 100644
--- a/fs/nilfs2/alloc.h
+++ b/fs/nilfs2/alloc.h
@@ -37,7 +37,7 @@ void *nilfs_palloc_block_get_entry(const struct inode *, __u64,
int nilfs_palloc_count_max_entries(struct inode *, u64, u64 *);
/**
- * nilfs_palloc_req - persistent allocator request and reply
+ * struct nilfs_palloc_req - persistent allocator request and reply
* @pr_entry_nr: entry number (vblocknr or inode number)
* @pr_desc_bh: buffer head of the buffer containing block group descriptors
* @pr_bitmap_bh: buffer head of the buffer containing a block group bitmap
diff --git a/fs/nilfs2/bmap.c b/fs/nilfs2/bmap.c
index cd14ea25968c..c9e8d9a7d820 100644
--- a/fs/nilfs2/bmap.c
+++ b/fs/nilfs2/bmap.c
@@ -349,7 +349,7 @@ int nilfs_bmap_propagate(struct nilfs_bmap *bmap, struct buffer_head *bh)
}
/**
- * nilfs_bmap_lookup_dirty_buffers -
+ * nilfs_bmap_lookup_dirty_buffers - collect dirty block buffers
* @bmap: bmap
* @listp: pointer to buffer head list
*/
diff --git a/fs/nilfs2/bmap.h b/fs/nilfs2/bmap.h
index 608168a5cb88..4656df392722 100644
--- a/fs/nilfs2/bmap.h
+++ b/fs/nilfs2/bmap.h
@@ -44,6 +44,19 @@ struct nilfs_bmap_stats {
/**
* struct nilfs_bmap_operations - bmap operation table
+ * @bop_lookup: single block search operation
+ * @bop_lookup_contig: consecutive block search operation
+ * @bop_insert: block insertion operation
+ * @bop_delete: block delete operation
+ * @bop_clear: block mapping resource release operation
+ * @bop_propagate: operation to propagate dirty state towards the
+ * mapping root
+ * @bop_lookup_dirty_buffers: operation to collect dirty block buffers
+ * @bop_assign: disk block address assignment operation
+ * @bop_mark: operation to mark in-use blocks as dirty for
+ * relocation by GC
+ * @bop_seek_key: find valid block key operation
+ * @bop_last_key: find last valid block key operation
*/
struct nilfs_bmap_operations {
int (*bop_lookup)(const struct nilfs_bmap *, __u64, int, __u64 *);
@@ -66,7 +79,7 @@ struct nilfs_bmap_operations {
int (*bop_seek_key)(const struct nilfs_bmap *, __u64, __u64 *);
int (*bop_last_key)(const struct nilfs_bmap *, __u64 *);
- /* The following functions are internal use only. */
+ /* private: internal use only */
int (*bop_check_insert)(const struct nilfs_bmap *, __u64);
int (*bop_check_delete)(struct nilfs_bmap *, __u64);
int (*bop_gather_data)(struct nilfs_bmap *, __u64 *, __u64 *, int);
@@ -74,9 +87,8 @@ struct nilfs_bmap_operations {
#define NILFS_BMAP_SIZE (NILFS_INODE_BMAP_SIZE * sizeof(__le64))
-#define NILFS_BMAP_KEY_BIT (sizeof(unsigned long) * 8 /* CHAR_BIT */)
-#define NILFS_BMAP_NEW_PTR_INIT \
- (1UL << (sizeof(unsigned long) * 8 /* CHAR_BIT */ - 1))
+#define NILFS_BMAP_KEY_BIT BITS_PER_LONG
+#define NILFS_BMAP_NEW_PTR_INIT (1UL << (BITS_PER_LONG - 1))
static inline int nilfs_bmap_is_new_ptr(unsigned long ptr)
{
diff --git a/fs/nilfs2/btnode.c b/fs/nilfs2/btnode.c
index c034080c334b..57b4af5ad646 100644
--- a/fs/nilfs2/btnode.c
+++ b/fs/nilfs2/btnode.c
@@ -179,11 +179,32 @@ void nilfs_btnode_delete(struct buffer_head *bh)
}
/**
- * nilfs_btnode_prepare_change_key
- * prepare to move contents of the block for old key to one of new key.
- * the old buffer will not be removed, but might be reused for new buffer.
- * it might return -ENOMEM because of memory allocation errors,
- * and might return -EIO because of disk read errors.
+ * nilfs_btnode_prepare_change_key - prepare to change the search key of a
+ * b-tree node block
+ * @btnc: page cache in which the b-tree node block is buffered
+ * @ctxt: structure for exchanging context information for key change
+ *
+ * nilfs_btnode_prepare_change_key() prepares to move the contents of the
+ * b-tree node block of the old key given in the "oldkey" member of @ctxt to
+ * the position of the new key given in the "newkey" member of @ctxt in the
+ * page cache @btnc. Here, the key of the block is an index in units of
+ * blocks, and if the page and block sizes match, it matches the page index
+ * in the page cache.
+ *
+ * If the page size and block size match, this function attempts to move the
+ * entire folio, and in preparation for this, inserts the original folio into
+ * the new index of the cache. If this insertion fails or if the page size
+ * and block size are different, it falls back to a copy preparation using
+ * nilfs_btnode_create_block(), inserts a new block at the position
+ * corresponding to "newkey", and stores the buffer head pointer in the
+ * "newbh" member of @ctxt.
+ *
+ * Note that the current implementation does not support folio sizes larger
+ * than the page size.
+ *
+ * Return: 0 on success, or the following negative error code on failure.
+ * * %-EIO - I/O error (metadata corruption).
+ * * %-ENOMEM - Insufficient memory available.
*/
int nilfs_btnode_prepare_change_key(struct address_space *btnc,
struct nilfs_btnode_chkey_ctxt *ctxt)
@@ -245,8 +266,21 @@ retry:
}
/**
- * nilfs_btnode_commit_change_key
- * commit the change_key operation prepared by prepare_change_key().
+ * nilfs_btnode_commit_change_key - commit the change of the search key of
+ * a b-tree node block
+ * @btnc: page cache in which the b-tree node block is buffered
+ * @ctxt: structure for exchanging context information for key change
+ *
+ * nilfs_btnode_commit_change_key() executes the key change based on the
+ * context @ctxt prepared by nilfs_btnode_prepare_change_key(). If no valid
+ * block buffer is prepared in "newbh" of @ctxt (i.e., a full folio move),
+ * this function removes the folio from the old index and completes the move.
+ * Otherwise, it copies the block data and inherited flag states of "oldbh"
+ * to "newbh" and clears the "oldbh" from the cache. In either case, the
+ * relocated buffer is marked as dirty.
+ *
+ * As with nilfs_btnode_prepare_change_key(), the current implementation does
+ * not support folio sizes larger than the page size.
*/
void nilfs_btnode_commit_change_key(struct address_space *btnc,
struct nilfs_btnode_chkey_ctxt *ctxt)
@@ -285,8 +319,19 @@ void nilfs_btnode_commit_change_key(struct address_space *btnc,
}
/**
- * nilfs_btnode_abort_change_key
- * abort the change_key operation prepared by prepare_change_key().
+ * nilfs_btnode_abort_change_key - abort the change of the search key of a
+ * b-tree node block
+ * @btnc: page cache in which the b-tree node block is buffered
+ * @ctxt: structure for exchanging context information for key change
+ *
+ * nilfs_btnode_abort_change_key() cancels the key change associated with the
+ * context @ctxt prepared via nilfs_btnode_prepare_change_key() and performs
+ * any necessary cleanup. If no valid block buffer is prepared in "newbh" of
+ * @ctxt, this function removes the folio from the destination index and aborts
+ * the move. Otherwise, it clears "newbh" from the cache.
+ *
+ * As with nilfs_btnode_prepare_change_key(), the current implementation does
+ * not support folio sizes larger than the page size.
*/
void nilfs_btnode_abort_change_key(struct address_space *btnc,
struct nilfs_btnode_chkey_ctxt *ctxt)
diff --git a/fs/nilfs2/btree.c b/fs/nilfs2/btree.c
index 862bdf23120e..ef5061bb56da 100644
--- a/fs/nilfs2/btree.c
+++ b/fs/nilfs2/btree.c
@@ -350,7 +350,7 @@ static int nilfs_btree_node_broken(const struct nilfs_btree_node *node,
if (unlikely(level < NILFS_BTREE_LEVEL_NODE_MIN ||
level >= NILFS_BTREE_LEVEL_MAX ||
(flags & NILFS_BTREE_NODE_ROOT) ||
- nchildren < 0 ||
+ nchildren <= 0 ||
nchildren > NILFS_BTREE_NODE_NCHILDREN_MAX(size))) {
nilfs_crit(inode->i_sb,
"bad btree node (ino=%lu, blocknr=%llu): level = %d, flags = 0x%x, nchildren = %d",
@@ -381,7 +381,8 @@ static int nilfs_btree_root_broken(const struct nilfs_btree_node *node,
if (unlikely(level < NILFS_BTREE_LEVEL_NODE_MIN ||
level >= NILFS_BTREE_LEVEL_MAX ||
nchildren < 0 ||
- nchildren > NILFS_BTREE_ROOT_NCHILDREN_MAX)) {
+ nchildren > NILFS_BTREE_ROOT_NCHILDREN_MAX ||
+ (nchildren == 0 && level > NILFS_BTREE_LEVEL_NODE_MIN))) {
nilfs_crit(inode->i_sb,
"bad btree root (ino=%lu): level = %d, flags = 0x%x, nchildren = %d",
inode->i_ino, level, flags, nchildren);
@@ -1658,13 +1659,16 @@ static int nilfs_btree_check_delete(struct nilfs_bmap *btree, __u64 key)
int nchildren, ret;
root = nilfs_btree_get_root(btree);
+ nchildren = nilfs_btree_node_get_nchildren(root);
+ if (unlikely(nchildren == 0))
+ return 0;
+
switch (nilfs_btree_height(btree)) {
case 2:
bh = NULL;
node = root;
break;
case 3:
- nchildren = nilfs_btree_node_get_nchildren(root);
if (nchildren > 1)
return 0;
ptr = nilfs_btree_node_get_ptr(root, nchildren - 1,
@@ -1673,12 +1677,12 @@ static int nilfs_btree_check_delete(struct nilfs_bmap *btree, __u64 key)
if (ret < 0)
return ret;
node = (struct nilfs_btree_node *)bh->b_data;
+ nchildren = nilfs_btree_node_get_nchildren(node);
break;
default:
return 0;
}
- nchildren = nilfs_btree_node_get_nchildren(node);
maxkey = nilfs_btree_node_get_key(node, nchildren - 1);
nextmaxkey = (nchildren > 1) ?
nilfs_btree_node_get_key(node, nchildren - 2) : 0;
diff --git a/fs/nilfs2/btree.h b/fs/nilfs2/btree.h
index 92868e1a48ca..2a220f716c91 100644
--- a/fs/nilfs2/btree.h
+++ b/fs/nilfs2/btree.h
@@ -24,6 +24,7 @@
* @bp_index: index of child node
* @bp_oldreq: ptr end request for old ptr
* @bp_newreq: ptr alloc request for new ptr
+ * @bp_ctxt: context information for changing the key of a b-tree node block
* @bp_op: rebalance operation
*/
struct nilfs_btree_path {
diff --git a/fs/nilfs2/cpfile.c b/fs/nilfs2/cpfile.c
index 69a5cced1e84..f0ce37552446 100644
--- a/fs/nilfs2/cpfile.c
+++ b/fs/nilfs2/cpfile.c
@@ -125,10 +125,17 @@ static void nilfs_cpfile_block_init(struct inode *cpfile,
}
}
-static inline int nilfs_cpfile_get_header_block(struct inode *cpfile,
- struct buffer_head **bhp)
+static int nilfs_cpfile_get_header_block(struct inode *cpfile,
+ struct buffer_head **bhp)
{
- return nilfs_mdt_get_block(cpfile, 0, 0, NULL, bhp);
+ int err = nilfs_mdt_get_block(cpfile, 0, 0, NULL, bhp);
+
+ if (unlikely(err == -ENOENT)) {
+ nilfs_error(cpfile->i_sb,
+ "missing header block in checkpoint metadata");
+ err = -EIO;
+ }
+ return err;
}
static inline int nilfs_cpfile_get_checkpoint_block(struct inode *cpfile,
@@ -283,14 +290,9 @@ int nilfs_cpfile_create_checkpoint(struct inode *cpfile, __u64 cno)
down_write(&NILFS_MDT(cpfile)->mi_sem);
ret = nilfs_cpfile_get_header_block(cpfile, &header_bh);
- if (unlikely(ret < 0)) {
- if (ret == -ENOENT) {
- nilfs_error(cpfile->i_sb,
- "checkpoint creation failed due to metadata corruption.");
- ret = -EIO;
- }
+ if (unlikely(ret < 0))
goto out_sem;
- }
+
ret = nilfs_cpfile_get_checkpoint_block(cpfile, cno, 1, &cp_bh);
if (unlikely(ret < 0))
goto out_header;
@@ -704,9 +706,15 @@ ssize_t nilfs_cpfile_get_cpinfo(struct inode *cpfile, __u64 *cnop, int mode,
}
/**
- * nilfs_cpfile_delete_checkpoint -
- * @cpfile:
- * @cno:
+ * nilfs_cpfile_delete_checkpoint - delete a checkpoint
+ * @cpfile: checkpoint file inode
+ * @cno: checkpoint number to delete
+ *
+ * Return: 0 on success, or the following negative error code on failure.
+ * * %-EBUSY - Checkpoint in use (snapshot specified).
+ * * %-EIO - I/O error (including metadata corruption).
+ * * %-ENOENT - No valid checkpoint found.
+ * * %-ENOMEM - Insufficient memory available.
*/
int nilfs_cpfile_delete_checkpoint(struct inode *cpfile, __u64 cno)
{
@@ -968,21 +976,15 @@ static int nilfs_cpfile_clear_snapshot(struct inode *cpfile, __u64 cno)
}
/**
- * nilfs_cpfile_is_snapshot -
+ * nilfs_cpfile_is_snapshot - determine if checkpoint is a snapshot
* @cpfile: inode of checkpoint file
- * @cno: checkpoint number
- *
- * Description:
- *
- * Return Value: On success, 1 is returned if the checkpoint specified by
- * @cno is a snapshot, or 0 if not. On error, one of the following negative
- * error codes is returned.
- *
- * %-EIO - I/O error.
- *
- * %-ENOMEM - Insufficient amount of memory available.
+ * @cno: checkpoint number
*
- * %-ENOENT - No such checkpoint.
+ * Return: 1 if the checkpoint specified by @cno is a snapshot, 0 if not, or
+ * the following negative error code on failure.
+ * * %-EIO - I/O error (including metadata corruption).
+ * * %-ENOENT - No such checkpoint.
+ * * %-ENOMEM - Insufficient memory available.
*/
int nilfs_cpfile_is_snapshot(struct inode *cpfile, __u64 cno)
{
diff --git a/fs/nilfs2/dat.c b/fs/nilfs2/dat.c
index fc1caf63a42a..0bef662176a4 100644
--- a/fs/nilfs2/dat.c
+++ b/fs/nilfs2/dat.c
@@ -271,18 +271,15 @@ void nilfs_dat_abort_update(struct inode *dat,
}
/**
- * nilfs_dat_mark_dirty -
- * @dat: DAT file inode
+ * nilfs_dat_mark_dirty - mark the DAT block buffer containing the specified
+ * virtual block address entry as dirty
+ * @dat: DAT file inode
* @vblocknr: virtual block number
*
- * Description:
- *
- * Return Value: On success, 0 is returned. On error, one of the following
- * negative error codes is returned.
- *
- * %-EIO - I/O error.
- *
- * %-ENOMEM - Insufficient amount of memory available.
+ * Return: 0 on success, or the following negative error code on failure.
+ * * %-EINVAL - Invalid DAT entry (internal code).
+ * * %-EIO - I/O error (including metadata corruption).
+ * * %-ENOMEM - Insufficient memory available.
*/
int nilfs_dat_mark_dirty(struct inode *dat, __u64 vblocknr)
{
diff --git a/fs/nilfs2/dir.c b/fs/nilfs2/dir.c
index 4b3e19d74925..fe5b1a30c509 100644
--- a/fs/nilfs2/dir.c
+++ b/fs/nilfs2/dir.c
@@ -231,37 +231,6 @@ static struct nilfs_dir_entry *nilfs_next_entry(struct nilfs_dir_entry *p)
nilfs_rec_len_from_disk(p->rec_len));
}
-static unsigned char
-nilfs_filetype_table[NILFS_FT_MAX] = {
- [NILFS_FT_UNKNOWN] = DT_UNKNOWN,
- [NILFS_FT_REG_FILE] = DT_REG,
- [NILFS_FT_DIR] = DT_DIR,
- [NILFS_FT_CHRDEV] = DT_CHR,
- [NILFS_FT_BLKDEV] = DT_BLK,
- [NILFS_FT_FIFO] = DT_FIFO,
- [NILFS_FT_SOCK] = DT_SOCK,
- [NILFS_FT_SYMLINK] = DT_LNK,
-};
-
-#define S_SHIFT 12
-static unsigned char
-nilfs_type_by_mode[(S_IFMT >> S_SHIFT) + 1] = {
- [S_IFREG >> S_SHIFT] = NILFS_FT_REG_FILE,
- [S_IFDIR >> S_SHIFT] = NILFS_FT_DIR,
- [S_IFCHR >> S_SHIFT] = NILFS_FT_CHRDEV,
- [S_IFBLK >> S_SHIFT] = NILFS_FT_BLKDEV,
- [S_IFIFO >> S_SHIFT] = NILFS_FT_FIFO,
- [S_IFSOCK >> S_SHIFT] = NILFS_FT_SOCK,
- [S_IFLNK >> S_SHIFT] = NILFS_FT_SYMLINK,
-};
-
-static void nilfs_set_de_type(struct nilfs_dir_entry *de, struct inode *inode)
-{
- umode_t mode = inode->i_mode;
-
- de->file_type = nilfs_type_by_mode[(mode & S_IFMT)>>S_SHIFT];
-}
-
static int nilfs_readdir(struct file *file, struct dir_context *ctx)
{
loff_t pos = ctx->pos;
@@ -297,10 +266,7 @@ static int nilfs_readdir(struct file *file, struct dir_context *ctx)
if (de->inode) {
unsigned char t;
- if (de->file_type < NILFS_FT_MAX)
- t = nilfs_filetype_table[de->file_type];
- else
- t = DT_UNKNOWN;
+ t = fs_ftype_to_dtype(de->file_type);
if (!dir_emit(ctx, de->name, de->name_len,
le64_to_cpu(de->inode), t)) {
@@ -444,7 +410,7 @@ void nilfs_set_link(struct inode *dir, struct nilfs_dir_entry *de,
err = nilfs_prepare_chunk(folio, from, to);
BUG_ON(err);
de->inode = cpu_to_le64(inode->i_ino);
- nilfs_set_de_type(de, inode);
+ de->file_type = fs_umode_to_ftype(inode->i_mode);
nilfs_commit_chunk(folio, mapping, from, to);
inode_set_mtime_to_ts(dir, inode_set_ctime_current(dir));
}
@@ -531,7 +497,7 @@ got_it:
de->name_len = namelen;
memcpy(de->name, name, namelen);
de->inode = cpu_to_le64(inode->i_ino);
- nilfs_set_de_type(de, inode);
+ de->file_type = fs_umode_to_ftype(inode->i_mode);
nilfs_commit_chunk(folio, folio->mapping, from, to);
inode_set_mtime_to_ts(dir, inode_set_ctime_current(dir));
nilfs_mark_inode_dirty(dir);
@@ -612,14 +578,14 @@ int nilfs_make_empty(struct inode *inode, struct inode *parent)
de->rec_len = nilfs_rec_len_to_disk(NILFS_DIR_REC_LEN(1));
memcpy(de->name, ".\0\0", 4);
de->inode = cpu_to_le64(inode->i_ino);
- nilfs_set_de_type(de, inode);
+ de->file_type = fs_umode_to_ftype(inode->i_mode);
de = (struct nilfs_dir_entry *)(kaddr + NILFS_DIR_REC_LEN(1));
de->name_len = 2;
de->rec_len = nilfs_rec_len_to_disk(chunk_size - NILFS_DIR_REC_LEN(1));
de->inode = cpu_to_le64(parent->i_ino);
memcpy(de->name, "..\0", 4);
- nilfs_set_de_type(de, inode);
+ de->file_type = fs_umode_to_ftype(inode->i_mode);
kunmap_local(kaddr);
nilfs_commit_chunk(folio, mapping, 0, chunk_size);
fail:
diff --git a/fs/nilfs2/inode.c b/fs/nilfs2/inode.c
index 8661f452dba6..be6acf6e2bfc 100644
--- a/fs/nilfs2/inode.c
+++ b/fs/nilfs2/inode.c
@@ -15,6 +15,7 @@
#include <linux/writeback.h>
#include <linux/uio.h>
#include <linux/fiemap.h>
+#include <linux/random.h>
#include "nilfs.h"
#include "btnode.h"
#include "segment.h"
@@ -28,17 +29,13 @@
* @ino: inode number
* @cno: checkpoint number
* @root: pointer on NILFS root object (mounted checkpoint)
- * @for_gc: inode for GC flag
- * @for_btnc: inode for B-tree node cache flag
- * @for_shadow: inode for shadowed page cache flag
+ * @type: inode type
*/
struct nilfs_iget_args {
u64 ino;
__u64 cno;
struct nilfs_root *root;
- bool for_gc;
- bool for_btnc;
- bool for_shadow;
+ unsigned int type;
};
static int nilfs_iget_test(struct inode *inode, void *opaque);
@@ -162,7 +159,7 @@ static int nilfs_writepages(struct address_space *mapping,
int err = 0;
if (sb_rdonly(inode->i_sb)) {
- nilfs_clear_dirty_pages(mapping, false);
+ nilfs_clear_dirty_pages(mapping);
return -EROFS;
}
@@ -186,7 +183,7 @@ static int nilfs_writepage(struct page *page, struct writeback_control *wbc)
* have dirty pages that try to be flushed in background.
* So, here we simply discard this dirty page.
*/
- nilfs_clear_folio_dirty(folio, false);
+ nilfs_clear_folio_dirty(folio);
folio_unlock(folio);
return -EROFS;
}
@@ -315,8 +312,7 @@ static int nilfs_insert_inode_locked(struct inode *inode,
unsigned long ino)
{
struct nilfs_iget_args args = {
- .ino = ino, .root = root, .cno = 0, .for_gc = false,
- .for_btnc = false, .for_shadow = false
+ .ino = ino, .root = root, .cno = 0, .type = NILFS_I_TYPE_NORMAL
};
return insert_inode_locked4(inode, ino, nilfs_iget_test, &args);
@@ -325,7 +321,6 @@ static int nilfs_insert_inode_locked(struct inode *inode,
struct inode *nilfs_new_inode(struct inode *dir, umode_t mode)
{
struct super_block *sb = dir->i_sb;
- struct the_nilfs *nilfs = sb->s_fs_info;
struct inode *inode;
struct nilfs_inode_info *ii;
struct nilfs_root *root;
@@ -343,25 +338,13 @@ struct inode *nilfs_new_inode(struct inode *dir, umode_t mode)
root = NILFS_I(dir)->i_root;
ii = NILFS_I(inode);
ii->i_state = BIT(NILFS_I_NEW);
+ ii->i_type = NILFS_I_TYPE_NORMAL;
ii->i_root = root;
err = nilfs_ifile_create_inode(root->ifile, &ino, &bh);
if (unlikely(err))
goto failed_ifile_create_inode;
/* reference count of i_bh inherits from nilfs_mdt_read_block() */
-
- if (unlikely(ino < NILFS_USER_INO)) {
- nilfs_warn(sb,
- "inode bitmap is inconsistent for reserved inodes");
- do {
- brelse(bh);
- err = nilfs_ifile_create_inode(root->ifile, &ino, &bh);
- if (unlikely(err))
- goto failed_ifile_create_inode;
- } while (ino < NILFS_USER_INO);
-
- nilfs_info(sb, "repaired inode bitmap for reserved inodes");
- }
ii->i_bh = bh;
atomic64_inc(&root->inodes_count);
@@ -385,9 +368,7 @@ struct inode *nilfs_new_inode(struct inode *dir, umode_t mode)
/* ii->i_dir_acl = 0; */
ii->i_dir_start_lookup = 0;
nilfs_set_inode_flags(inode);
- spin_lock(&nilfs->ns_next_gen_lock);
- inode->i_generation = nilfs->ns_next_generation++;
- spin_unlock(&nilfs->ns_next_gen_lock);
+ inode->i_generation = get_random_u32();
if (nilfs_insert_inode_locked(inode, root, ino) < 0) {
err = -EIO;
goto failed_after_creation;
@@ -546,23 +527,10 @@ static int nilfs_iget_test(struct inode *inode, void *opaque)
return 0;
ii = NILFS_I(inode);
- if (test_bit(NILFS_I_BTNC, &ii->i_state)) {
- if (!args->for_btnc)
- return 0;
- } else if (args->for_btnc) {
+ if (ii->i_type != args->type)
return 0;
- }
- if (test_bit(NILFS_I_SHADOW, &ii->i_state)) {
- if (!args->for_shadow)
- return 0;
- } else if (args->for_shadow) {
- return 0;
- }
- if (!test_bit(NILFS_I_GCINODE, &ii->i_state))
- return !args->for_gc;
-
- return args->for_gc && args->cno == ii->i_cno;
+ return !(args->type & NILFS_I_TYPE_GC) || args->cno == ii->i_cno;
}
static int nilfs_iget_set(struct inode *inode, void *opaque)
@@ -572,15 +540,9 @@ static int nilfs_iget_set(struct inode *inode, void *opaque)
inode->i_ino = args->ino;
NILFS_I(inode)->i_cno = args->cno;
NILFS_I(inode)->i_root = args->root;
+ NILFS_I(inode)->i_type = args->type;
if (args->root && args->ino == NILFS_ROOT_INO)
nilfs_get_root(args->root);
-
- if (args->for_gc)
- NILFS_I(inode)->i_state = BIT(NILFS_I_GCINODE);
- if (args->for_btnc)
- NILFS_I(inode)->i_state |= BIT(NILFS_I_BTNC);
- if (args->for_shadow)
- NILFS_I(inode)->i_state |= BIT(NILFS_I_SHADOW);
return 0;
}
@@ -588,8 +550,7 @@ struct inode *nilfs_ilookup(struct super_block *sb, struct nilfs_root *root,
unsigned long ino)
{
struct nilfs_iget_args args = {
- .ino = ino, .root = root, .cno = 0, .for_gc = false,
- .for_btnc = false, .for_shadow = false
+ .ino = ino, .root = root, .cno = 0, .type = NILFS_I_TYPE_NORMAL
};
return ilookup5(sb, ino, nilfs_iget_test, &args);
@@ -599,8 +560,7 @@ struct inode *nilfs_iget_locked(struct super_block *sb, struct nilfs_root *root,
unsigned long ino)
{
struct nilfs_iget_args args = {
- .ino = ino, .root = root, .cno = 0, .for_gc = false,
- .for_btnc = false, .for_shadow = false
+ .ino = ino, .root = root, .cno = 0, .type = NILFS_I_TYPE_NORMAL
};
return iget5_locked(sb, ino, nilfs_iget_test, nilfs_iget_set, &args);
@@ -631,8 +591,7 @@ struct inode *nilfs_iget_for_gc(struct super_block *sb, unsigned long ino,
__u64 cno)
{
struct nilfs_iget_args args = {
- .ino = ino, .root = NULL, .cno = cno, .for_gc = true,
- .for_btnc = false, .for_shadow = false
+ .ino = ino, .root = NULL, .cno = cno, .type = NILFS_I_TYPE_GC
};
struct inode *inode;
int err;
@@ -677,9 +636,7 @@ int nilfs_attach_btree_node_cache(struct inode *inode)
args.ino = inode->i_ino;
args.root = ii->i_root;
args.cno = ii->i_cno;
- args.for_gc = test_bit(NILFS_I_GCINODE, &ii->i_state) != 0;
- args.for_btnc = true;
- args.for_shadow = test_bit(NILFS_I_SHADOW, &ii->i_state) != 0;
+ args.type = ii->i_type | NILFS_I_TYPE_BTNC;
btnc_inode = iget5_locked(inode->i_sb, inode->i_ino, nilfs_iget_test,
nilfs_iget_set, &args);
@@ -733,8 +690,8 @@ void nilfs_detach_btree_node_cache(struct inode *inode)
struct inode *nilfs_iget_for_shadow(struct inode *inode)
{
struct nilfs_iget_args args = {
- .ino = inode->i_ino, .root = NULL, .cno = 0, .for_gc = false,
- .for_btnc = false, .for_shadow = true
+ .ino = inode->i_ino, .root = NULL, .cno = 0,
+ .type = NILFS_I_TYPE_SHADOW
};
struct inode *s_inode;
int err;
@@ -900,7 +857,7 @@ static void nilfs_clear_inode(struct inode *inode)
if (test_bit(NILFS_I_BMAP, &ii->i_state))
nilfs_bmap_clear(ii->i_bmap);
- if (!test_bit(NILFS_I_BTNC, &ii->i_state))
+ if (!(ii->i_type & NILFS_I_TYPE_BTNC))
nilfs_detach_btree_node_cache(inode);
if (ii->i_root && inode->i_ino == NILFS_ROOT_INO)
diff --git a/fs/nilfs2/ioctl.c b/fs/nilfs2/ioctl.c
index 8be471ce4f19..fa77f78df681 100644
--- a/fs/nilfs2/ioctl.c
+++ b/fs/nilfs2/ioctl.c
@@ -17,6 +17,7 @@
#include <linux/mount.h> /* mnt_want_write_file(), mnt_drop_write_file() */
#include <linux/buffer_head.h>
#include <linux/fileattr.h>
+#include <linux/string.h>
#include "nilfs.h"
#include "segment.h"
#include "bmap.h"
@@ -114,7 +115,11 @@ static int nilfs_ioctl_wrap_copy(struct the_nilfs *nilfs,
}
/**
- * nilfs_fileattr_get - ioctl to support lsattr
+ * nilfs_fileattr_get - retrieve miscellaneous file attributes
+ * @dentry: the object to retrieve from
+ * @fa: fileattr pointer
+ *
+ * Return: always 0 as success.
*/
int nilfs_fileattr_get(struct dentry *dentry, struct fileattr *fa)
{
@@ -126,7 +131,12 @@ int nilfs_fileattr_get(struct dentry *dentry, struct fileattr *fa)
}
/**
- * nilfs_fileattr_set - ioctl to support chattr
+ * nilfs_fileattr_set - change miscellaneous file attributes
+ * @idmap: idmap of the mount
+ * @dentry: the object to change
+ * @fa: fileattr pointer
+ *
+ * Return: 0 on success, or a negative error code on failure.
*/
int nilfs_fileattr_set(struct mnt_idmap *idmap,
struct dentry *dentry, struct fileattr *fa)
@@ -159,6 +169,10 @@ int nilfs_fileattr_set(struct mnt_idmap *idmap,
/**
* nilfs_ioctl_getversion - get info about a file's version (generation number)
+ * @inode: inode object
+ * @argp: userspace memory where the generation number of @inode is stored
+ *
+ * Return: 0 on success, or %-EFAULT on error.
*/
static int nilfs_ioctl_getversion(struct inode *inode, void __user *argp)
{
@@ -1266,6 +1280,91 @@ out:
return ret;
}
+/**
+ * nilfs_ioctl_get_fslabel - get the volume name of the file system
+ * @sb: super block instance
+ * @argp: pointer to userspace memory where the volume name should be stored
+ *
+ * Return: 0 on success, %-EFAULT if copying to userspace memory fails.
+ */
+static int nilfs_ioctl_get_fslabel(struct super_block *sb, void __user *argp)
+{
+ struct the_nilfs *nilfs = sb->s_fs_info;
+ char label[NILFS_MAX_VOLUME_NAME + 1];
+
+ BUILD_BUG_ON(NILFS_MAX_VOLUME_NAME >= FSLABEL_MAX);
+
+ down_read(&nilfs->ns_sem);
+ memtostr_pad(label, nilfs->ns_sbp[0]->s_volume_name);
+ up_read(&nilfs->ns_sem);
+
+ if (copy_to_user(argp, label, sizeof(label)))
+ return -EFAULT;
+ return 0;
+}
+
+/**
+ * nilfs_ioctl_set_fslabel - set the volume name of the file system
+ * @sb: super block instance
+ * @filp: file object
+ * @argp: pointer to userspace memory that contains the volume name
+ *
+ * Return: 0 on success, or the following negative error code on failure.
+ * * %-EFAULT - Error copying input data.
+ * * %-EINVAL - Label length exceeds record size in superblock.
+ * * %-EIO - I/O error.
+ * * %-EPERM - Operation not permitted (insufficient permissions).
+ * * %-EROFS - Read only file system.
+ */
+static int nilfs_ioctl_set_fslabel(struct super_block *sb, struct file *filp,
+ void __user *argp)
+{
+ char label[NILFS_MAX_VOLUME_NAME + 1];
+ struct the_nilfs *nilfs = sb->s_fs_info;
+ struct nilfs_super_block **sbp;
+ size_t len;
+ int ret;
+
+ if (!capable(CAP_SYS_ADMIN))
+ return -EPERM;
+
+ ret = mnt_want_write_file(filp);
+ if (ret)
+ return ret;
+
+ if (copy_from_user(label, argp, NILFS_MAX_VOLUME_NAME + 1)) {
+ ret = -EFAULT;
+ goto out_drop_write;
+ }
+
+ len = strnlen(label, NILFS_MAX_VOLUME_NAME + 1);
+ if (len > NILFS_MAX_VOLUME_NAME) {
+ nilfs_err(sb, "unable to set label with more than %zu bytes",
+ NILFS_MAX_VOLUME_NAME);
+ ret = -EINVAL;
+ goto out_drop_write;
+ }
+
+ down_write(&nilfs->ns_sem);
+ sbp = nilfs_prepare_super(sb, false);
+ if (unlikely(!sbp)) {
+ ret = -EIO;
+ goto out_unlock;
+ }
+
+ strtomem_pad(sbp[0]->s_volume_name, label, 0);
+ if (sbp[1])
+ strtomem_pad(sbp[1]->s_volume_name, label, 0);
+
+ ret = nilfs_commit_super(sb, NILFS_SB_COMMIT_ALL);
+
+out_unlock:
+ up_write(&nilfs->ns_sem);
+out_drop_write:
+ mnt_drop_write_file(filp);
+ return ret;
+}
+
long nilfs_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
{
struct inode *inode = file_inode(filp);
@@ -1308,6 +1407,10 @@ long nilfs_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
return nilfs_ioctl_set_alloc_range(inode, argp);
case FITRIM:
return nilfs_ioctl_trim_fs(inode, argp);
+ case FS_IOC_GETFSLABEL:
+ return nilfs_ioctl_get_fslabel(inode->i_sb, argp);
+ case FS_IOC_SETFSLABEL:
+ return nilfs_ioctl_set_fslabel(inode->i_sb, filp, argp);
default:
return -ENOTTY;
}
@@ -1334,6 +1437,8 @@ long nilfs_compat_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
case NILFS_IOCTL_RESIZE:
case NILFS_IOCTL_SET_ALLOC_RANGE:
case FITRIM:
+ case FS_IOC_GETFSLABEL:
+ case FS_IOC_SETFSLABEL:
break;
default:
return -ENOIOCTLCMD;
diff --git a/fs/nilfs2/mdt.c b/fs/nilfs2/mdt.c
index 4f792a0ad0f0..ceb7dc0b5bad 100644
--- a/fs/nilfs2/mdt.c
+++ b/fs/nilfs2/mdt.c
@@ -411,7 +411,7 @@ nilfs_mdt_write_page(struct page *page, struct writeback_control *wbc)
* have dirty folios that try to be flushed in background.
* So, here we simply discard this dirty folio.
*/
- nilfs_clear_folio_dirty(folio, false);
+ nilfs_clear_folio_dirty(folio);
folio_unlock(folio);
return -EROFS;
}
@@ -638,10 +638,10 @@ void nilfs_mdt_restore_from_shadow_map(struct inode *inode)
if (mi->mi_palloc_cache)
nilfs_palloc_clear_cache(inode);
- nilfs_clear_dirty_pages(inode->i_mapping, true);
+ nilfs_clear_dirty_pages(inode->i_mapping);
nilfs_copy_back_pages(inode->i_mapping, shadow->inode->i_mapping);
- nilfs_clear_dirty_pages(ii->i_assoc_inode->i_mapping, true);
+ nilfs_clear_dirty_pages(ii->i_assoc_inode->i_mapping);
nilfs_copy_back_pages(ii->i_assoc_inode->i_mapping,
NILFS_I(shadow->inode)->i_assoc_inode->i_mapping);
diff --git a/fs/nilfs2/nilfs.h b/fs/nilfs2/nilfs.h
index 4017f7856440..fb1c4c5bae7c 100644
--- a/fs/nilfs2/nilfs.h
+++ b/fs/nilfs2/nilfs.h
@@ -22,6 +22,7 @@
/**
* struct nilfs_inode_info - nilfs inode data in memory
* @i_flags: inode flags
+ * @i_type: inode type (combination of flags that inidicate usage)
* @i_state: dynamic state flags
* @i_bmap: pointer on i_bmap_data
* @i_bmap_data: raw block mapping
@@ -37,6 +38,7 @@
*/
struct nilfs_inode_info {
__u32 i_flags;
+ unsigned int i_type;
unsigned long i_state; /* Dynamic state flags */
struct nilfs_bmap *i_bmap;
struct nilfs_bmap i_bmap_data;
@@ -90,9 +92,16 @@ enum {
NILFS_I_UPDATED, /* The file has been written back */
NILFS_I_INODE_SYNC, /* dsync is not allowed for inode */
NILFS_I_BMAP, /* has bmap and btnode_cache */
- NILFS_I_GCINODE, /* inode for GC, on memory only */
- NILFS_I_BTNC, /* inode for btree node cache */
- NILFS_I_SHADOW, /* inode for shadowed page cache */
+};
+
+/*
+ * Flags to identify the usage of on-memory inodes (i_type)
+ */
+enum {
+ NILFS_I_TYPE_NORMAL = 0,
+ NILFS_I_TYPE_GC = 0x0001, /* For data caching during GC */
+ NILFS_I_TYPE_BTNC = 0x0002, /* For btree node cache */
+ NILFS_I_TYPE_SHADOW = 0x0004, /* For shadowed page cache */
};
/*
@@ -103,6 +112,18 @@ enum {
NILFS_SB_COMMIT_ALL /* Commit both super blocks */
};
+/**
+ * define NILFS_MAX_VOLUME_NAME - maximum number of characters (bytes) in a
+ * file system volume name
+ *
+ * Defined by the size of the volume name field in the on-disk superblocks.
+ * This volume name does not include the terminating NULL byte if the string
+ * length matches the field size, so use (NILFS_MAX_VOLUME_NAME + 1) for the
+ * size of the buffer that requires a NULL byte termination.
+ */
+#define NILFS_MAX_VOLUME_NAME \
+ sizeof_field(struct nilfs_super_block, s_volume_name)
+
/*
* Macros to check inode numbers
*/
diff --git a/fs/nilfs2/page.c b/fs/nilfs2/page.c
index 14e470fb8870..9c0b7cddeaae 100644
--- a/fs/nilfs2/page.c
+++ b/fs/nilfs2/page.c
@@ -262,7 +262,7 @@ repeat:
NILFS_FOLIO_BUG(folio, "inconsistent dirty state");
dfolio = filemap_grab_folio(dmap, folio->index);
- if (unlikely(IS_ERR(dfolio))) {
+ if (IS_ERR(dfolio)) {
/* No empty page is added to the page cache */
folio_unlock(folio);
err = PTR_ERR(dfolio);
@@ -357,9 +357,8 @@ repeat:
/**
* nilfs_clear_dirty_pages - discard dirty pages in address space
* @mapping: address space with dirty pages for discarding
- * @silent: suppress [true] or print [false] warning messages
*/
-void nilfs_clear_dirty_pages(struct address_space *mapping, bool silent)
+void nilfs_clear_dirty_pages(struct address_space *mapping)
{
struct folio_batch fbatch;
unsigned int i;
@@ -380,7 +379,7 @@ void nilfs_clear_dirty_pages(struct address_space *mapping, bool silent)
* was acquired. Skip processing in that case.
*/
if (likely(folio->mapping == mapping))
- nilfs_clear_folio_dirty(folio, silent);
+ nilfs_clear_folio_dirty(folio);
folio_unlock(folio);
}
@@ -392,20 +391,13 @@ void nilfs_clear_dirty_pages(struct address_space *mapping, bool silent)
/**
* nilfs_clear_folio_dirty - discard dirty folio
* @folio: dirty folio that will be discarded
- * @silent: suppress [true] or print [false] warning messages
*/
-void nilfs_clear_folio_dirty(struct folio *folio, bool silent)
+void nilfs_clear_folio_dirty(struct folio *folio)
{
- struct inode *inode = folio->mapping->host;
- struct super_block *sb = inode->i_sb;
struct buffer_head *bh, *head;
BUG_ON(!folio_test_locked(folio));
- if (!silent)
- nilfs_warn(sb, "discard dirty page: offset=%lld, ino=%lu",
- folio_pos(folio), inode->i_ino);
-
folio_clear_uptodate(folio);
folio_clear_mappedtodisk(folio);
@@ -419,11 +411,6 @@ void nilfs_clear_folio_dirty(struct folio *folio, bool silent)
bh = head;
do {
lock_buffer(bh);
- if (!silent)
- nilfs_warn(sb,
- "discard dirty block: blocknr=%llu, size=%zu",
- (u64)bh->b_blocknr, bh->b_size);
-
set_mask_bits(&bh->b_state, clear_bits, 0);
unlock_buffer(bh);
} while (bh = bh->b_this_page, bh != head);
diff --git a/fs/nilfs2/page.h b/fs/nilfs2/page.h
index 7e1a2c455a10..64521a03a19e 100644
--- a/fs/nilfs2/page.h
+++ b/fs/nilfs2/page.h
@@ -41,8 +41,8 @@ void nilfs_folio_bug(struct folio *);
int nilfs_copy_dirty_pages(struct address_space *, struct address_space *);
void nilfs_copy_back_pages(struct address_space *, struct address_space *);
-void nilfs_clear_folio_dirty(struct folio *, bool);
-void nilfs_clear_dirty_pages(struct address_space *, bool);
+void nilfs_clear_folio_dirty(struct folio *folio);
+void nilfs_clear_dirty_pages(struct address_space *mapping);
unsigned int nilfs_page_count_clean_buffers(struct page *, unsigned int,
unsigned int);
unsigned long nilfs_find_uncommitted_extent(struct inode *inode,
diff --git a/fs/nilfs2/recovery.c b/fs/nilfs2/recovery.c
index ec61ce9f29a2..21d81097a89f 100644
--- a/fs/nilfs2/recovery.c
+++ b/fs/nilfs2/recovery.c
@@ -433,8 +433,17 @@ static int nilfs_prepare_segment_for_recovery(struct the_nilfs *nilfs,
* The next segment is invalidated by this recovery.
*/
err = nilfs_sufile_free(sufile, segnum[1]);
- if (unlikely(err))
+ if (unlikely(err)) {
+ if (err == -ENOENT) {
+ nilfs_err(sb,
+ "checkpoint log inconsistency at block %llu (segment %llu): next segment %llu is unallocated",
+ (unsigned long long)nilfs->ns_last_pseg,
+ (unsigned long long)nilfs->ns_segnum,
+ (unsigned long long)segnum[1]);
+ err = -EINVAL;
+ }
goto failed;
+ }
for (i = 1; i < 4; i++) {
err = nilfs_segment_list_add(head, segnum[i]);
diff --git a/fs/nilfs2/segment.c b/fs/nilfs2/segment.c
index 871ec35ea8e8..587251830897 100644
--- a/fs/nilfs2/segment.c
+++ b/fs/nilfs2/segment.c
@@ -519,7 +519,7 @@ static void nilfs_segctor_end_finfo(struct nilfs_sc_info *sci,
ii = NILFS_I(inode);
- if (test_bit(NILFS_I_GCINODE, &ii->i_state))
+ if (ii->i_type & NILFS_I_TYPE_GC)
cno = ii->i_cno;
else if (NILFS_ROOT_METADATA_FILE(inode->i_ino))
cno = 0;
@@ -1102,12 +1102,64 @@ static int nilfs_segctor_scan_file_dsync(struct nilfs_sc_info *sci,
return err;
}
+/**
+ * nilfs_free_segments - free the segments given by an array of segment numbers
+ * @nilfs: nilfs object
+ * @segnumv: array of segment numbers to be freed
+ * @nsegs: number of segments to be freed in @segnumv
+ *
+ * nilfs_free_segments() wraps nilfs_sufile_freev() and
+ * nilfs_sufile_cancel_freev(), and edits the segment usage metadata file
+ * (sufile) to free all segments given by @segnumv and @nsegs at once. If
+ * it fails midway, it cancels the changes so that none of the segments are
+ * freed. If @nsegs is 0, this function does nothing.
+ *
+ * The freeing of segments is not finalized until the writing of a log with
+ * a super root block containing this sufile change is complete, and it can
+ * be canceled with nilfs_sufile_cancel_freev() until then.
+ *
+ * Return: 0 on success, or the following negative error code on failure.
+ * * %-EINVAL - Invalid segment number.
+ * * %-EIO - I/O error (including metadata corruption).
+ * * %-ENOMEM - Insufficient memory available.
+ */
+static int nilfs_free_segments(struct the_nilfs *nilfs, __u64 *segnumv,
+ size_t nsegs)
+{
+ size_t ndone;
+ int ret;
+
+ if (!nsegs)
+ return 0;
+
+ ret = nilfs_sufile_freev(nilfs->ns_sufile, segnumv, nsegs, &ndone);
+ if (unlikely(ret)) {
+ nilfs_sufile_cancel_freev(nilfs->ns_sufile, segnumv, ndone,
+ NULL);
+ /*
+ * If a segment usage of the segments to be freed is in a
+ * hole block, nilfs_sufile_freev() will return -ENOENT.
+ * In this case, -EINVAL should be returned to the caller
+ * since there is something wrong with the given segment
+ * number array. This error can only occur during GC, so
+ * there is no need to worry about it propagating to other
+ * callers (such as fsync).
+ */
+ if (ret == -ENOENT) {
+ nilfs_err(nilfs->ns_sb,
+ "The segment usage entry %llu to be freed is invalid (in a hole)",
+ (unsigned long long)segnumv[ndone]);
+ ret = -EINVAL;
+ }
+ }
+ return ret;
+}
+
static int nilfs_segctor_collect_blocks(struct nilfs_sc_info *sci, int mode)
{
struct the_nilfs *nilfs = sci->sc_super->s_fs_info;
struct list_head *head;
struct nilfs_inode_info *ii;
- size_t ndone;
int err = 0;
switch (nilfs_sc_cstage_get(sci)) {
@@ -1201,14 +1253,10 @@ static int nilfs_segctor_collect_blocks(struct nilfs_sc_info *sci, int mode)
nilfs_sc_cstage_inc(sci);
fallthrough;
case NILFS_ST_SUFILE:
- err = nilfs_sufile_freev(nilfs->ns_sufile, sci->sc_freesegs,
- sci->sc_nfreesegs, &ndone);
- if (unlikely(err)) {
- nilfs_sufile_cancel_freev(nilfs->ns_sufile,
- sci->sc_freesegs, ndone,
- NULL);
+ err = nilfs_free_segments(nilfs, sci->sc_freesegs,
+ sci->sc_nfreesegs);
+ if (unlikely(err))
break;
- }
sci->sc_stage.flags |= NILFS_CF_SUFREED;
err = nilfs_segctor_scan_file(sci, nilfs->ns_sufile,
@@ -2456,7 +2504,7 @@ static void nilfs_construction_timeout(struct timer_list *t)
{
struct nilfs_sc_info *sci = from_timer(sci, t, sc_timer);
- wake_up_process(sci->sc_timer_task);
+ wake_up_process(sci->sc_task);
}
static void
@@ -2582,123 +2630,85 @@ static int nilfs_segctor_flush_mode(struct nilfs_sc_info *sci)
}
/**
- * nilfs_segctor_thread - main loop of the segment constructor thread.
+ * nilfs_log_write_required - determine whether log writing is required
+ * @sci: nilfs_sc_info struct
+ * @modep: location for storing log writing mode
+ *
+ * Return: true if log writing is required, false otherwise. If log writing
+ * is required, the mode is stored in the location pointed to by @modep.
+ */
+static bool nilfs_log_write_required(struct nilfs_sc_info *sci, int *modep)
+{
+ bool timedout, ret = true;
+
+ spin_lock(&sci->sc_state_lock);
+ timedout = ((sci->sc_state & NILFS_SEGCTOR_COMMIT) &&
+ time_after_eq(jiffies, sci->sc_timer.expires));
+ if (timedout || sci->sc_seq_request != sci->sc_seq_done)
+ *modep = SC_LSEG_SR;
+ else if (sci->sc_flush_request)
+ *modep = nilfs_segctor_flush_mode(sci);
+ else
+ ret = false;
+
+ spin_unlock(&sci->sc_state_lock);
+ return ret;
+}
+
+/**
+ * nilfs_segctor_thread - main loop of the log writer thread
* @arg: pointer to a struct nilfs_sc_info.
*
- * nilfs_segctor_thread() initializes a timer and serves as a daemon
- * to execute segment constructions.
+ * nilfs_segctor_thread() is the main loop function of the log writer kernel
+ * thread, which determines whether log writing is necessary, and if so,
+ * performs the log write in the background, or waits if not. It is also
+ * used to decide the background writeback of the superblock.
+ *
+ * Return: Always 0.
*/
static int nilfs_segctor_thread(void *arg)
{
struct nilfs_sc_info *sci = (struct nilfs_sc_info *)arg;
struct the_nilfs *nilfs = sci->sc_super->s_fs_info;
- int timeout = 0;
-
- sci->sc_timer_task = current;
- timer_setup(&sci->sc_timer, nilfs_construction_timeout, 0);
- /* start sync. */
- sci->sc_task = current;
- wake_up(&sci->sc_wait_task); /* for nilfs_segctor_start_thread() */
nilfs_info(sci->sc_super,
"segctord starting. Construction interval = %lu seconds, CP frequency < %lu seconds",
sci->sc_interval / HZ, sci->sc_mjcp_freq / HZ);
set_freezable();
- spin_lock(&sci->sc_state_lock);
- loop:
- for (;;) {
- int mode;
-
- if (sci->sc_state & NILFS_SEGCTOR_QUIT)
- goto end_thread;
-
- if (timeout || sci->sc_seq_request != sci->sc_seq_done)
- mode = SC_LSEG_SR;
- else if (sci->sc_flush_request)
- mode = nilfs_segctor_flush_mode(sci);
- else
- break;
-
- spin_unlock(&sci->sc_state_lock);
- nilfs_segctor_thread_construct(sci, mode);
- spin_lock(&sci->sc_state_lock);
- timeout = 0;
- }
-
- if (freezing(current)) {
- spin_unlock(&sci->sc_state_lock);
- try_to_freeze();
- spin_lock(&sci->sc_state_lock);
- } else {
+ while (!kthread_should_stop()) {
DEFINE_WAIT(wait);
- int should_sleep = 1;
+ bool should_write;
+ int mode;
+
+ if (freezing(current)) {
+ try_to_freeze();
+ continue;
+ }
prepare_to_wait(&sci->sc_wait_daemon, &wait,
TASK_INTERRUPTIBLE);
-
- if (sci->sc_seq_request != sci->sc_seq_done)
- should_sleep = 0;
- else if (sci->sc_flush_request)
- should_sleep = 0;
- else if (sci->sc_state & NILFS_SEGCTOR_COMMIT)
- should_sleep = time_before(jiffies,
- sci->sc_timer.expires);
-
- if (should_sleep) {
- spin_unlock(&sci->sc_state_lock);
+ should_write = nilfs_log_write_required(sci, &mode);
+ if (!should_write)
schedule();
- spin_lock(&sci->sc_state_lock);
- }
finish_wait(&sci->sc_wait_daemon, &wait);
- timeout = ((sci->sc_state & NILFS_SEGCTOR_COMMIT) &&
- time_after_eq(jiffies, sci->sc_timer.expires));
if (nilfs_sb_dirty(nilfs) && nilfs_sb_need_update(nilfs))
set_nilfs_discontinued(nilfs);
+
+ if (should_write)
+ nilfs_segctor_thread_construct(sci, mode);
}
- goto loop;
- end_thread:
/* end sync. */
+ spin_lock(&sci->sc_state_lock);
sci->sc_task = NULL;
timer_shutdown_sync(&sci->sc_timer);
- wake_up(&sci->sc_wait_task); /* for nilfs_segctor_kill_thread() */
spin_unlock(&sci->sc_state_lock);
return 0;
}
-static int nilfs_segctor_start_thread(struct nilfs_sc_info *sci)
-{
- struct task_struct *t;
-
- t = kthread_run(nilfs_segctor_thread, sci, "segctord");
- if (IS_ERR(t)) {
- int err = PTR_ERR(t);
-
- nilfs_err(sci->sc_super, "error %d creating segctord thread",
- err);
- return err;
- }
- wait_event(sci->sc_wait_task, sci->sc_task != NULL);
- return 0;
-}
-
-static void nilfs_segctor_kill_thread(struct nilfs_sc_info *sci)
- __acquires(&sci->sc_state_lock)
- __releases(&sci->sc_state_lock)
-{
- sci->sc_state |= NILFS_SEGCTOR_QUIT;
-
- while (sci->sc_task) {
- wake_up(&sci->sc_wait_daemon);
- spin_unlock(&sci->sc_state_lock);
- wait_event(sci->sc_wait_task, sci->sc_task == NULL);
- spin_lock(&sci->sc_state_lock);
- }
-}
-
/*
* Setup & clean-up functions
*/
@@ -2719,7 +2729,6 @@ static struct nilfs_sc_info *nilfs_segctor_new(struct super_block *sb,
init_waitqueue_head(&sci->sc_wait_request);
init_waitqueue_head(&sci->sc_wait_daemon);
- init_waitqueue_head(&sci->sc_wait_task);
spin_lock_init(&sci->sc_state_lock);
INIT_LIST_HEAD(&sci->sc_dirty_files);
INIT_LIST_HEAD(&sci->sc_segbufs);
@@ -2774,8 +2783,12 @@ static void nilfs_segctor_destroy(struct nilfs_sc_info *sci)
up_write(&nilfs->ns_segctor_sem);
+ if (sci->sc_task) {
+ wake_up(&sci->sc_wait_daemon);
+ kthread_stop(sci->sc_task);
+ }
+
spin_lock(&sci->sc_state_lock);
- nilfs_segctor_kill_thread(sci);
flag = ((sci->sc_state & NILFS_SEGCTOR_COMMIT) || sci->sc_flush_request
|| sci->sc_seq_request != sci->sc_seq_done);
spin_unlock(&sci->sc_state_lock);
@@ -2823,14 +2836,15 @@ static void nilfs_segctor_destroy(struct nilfs_sc_info *sci)
* This allocates a log writer object, initializes it, and starts the
* log writer.
*
- * Return Value: On success, 0 is returned. On error, one of the following
- * negative error code is returned.
- *
- * %-ENOMEM - Insufficient memory available.
+ * Return: 0 on success, or the following negative error code on failure.
+ * * %-EINTR - Log writer thread creation failed due to interruption.
+ * * %-ENOMEM - Insufficient memory available.
*/
int nilfs_attach_log_writer(struct super_block *sb, struct nilfs_root *root)
{
struct the_nilfs *nilfs = sb->s_fs_info;
+ struct nilfs_sc_info *sci;
+ struct task_struct *t;
int err;
if (nilfs->ns_writer) {
@@ -2843,15 +2857,23 @@ int nilfs_attach_log_writer(struct super_block *sb, struct nilfs_root *root)
return 0;
}
- nilfs->ns_writer = nilfs_segctor_new(sb, root);
- if (!nilfs->ns_writer)
+ sci = nilfs_segctor_new(sb, root);
+ if (unlikely(!sci))
return -ENOMEM;
- err = nilfs_segctor_start_thread(nilfs->ns_writer);
- if (unlikely(err))
+ nilfs->ns_writer = sci;
+ t = kthread_create(nilfs_segctor_thread, sci, "segctord");
+ if (IS_ERR(t)) {
+ err = PTR_ERR(t);
+ nilfs_err(sb, "error %d creating segctord thread", err);
nilfs_detach_log_writer(sb);
+ return err;
+ }
+ sci->sc_task = t;
+ timer_setup(&sci->sc_timer, nilfs_construction_timeout, 0);
- return err;
+ wake_up_process(sci->sc_task);
+ return 0;
}
/**
diff --git a/fs/nilfs2/segment.h b/fs/nilfs2/segment.h
index 1060f72ebf5a..f723f47ddc4e 100644
--- a/fs/nilfs2/segment.h
+++ b/fs/nilfs2/segment.h
@@ -22,10 +22,10 @@ struct nilfs_root;
* struct nilfs_recovery_info - Recovery information
* @ri_need_recovery: Recovery status
* @ri_super_root: Block number of the last super root
- * @ri_ri_cno: Number of the last checkpoint
+ * @ri_cno: Number of the last checkpoint
* @ri_lsegs_start: Region for roll-forwarding (start block number)
* @ri_lsegs_end: Region for roll-forwarding (end block number)
- * @ri_lseg_start_seq: Sequence value of the segment at ri_lsegs_start
+ * @ri_lsegs_start_seq: Sequence value of the segment at ri_lsegs_start
* @ri_used_segments: List of segments to be mark active
* @ri_pseg_start: Block number of the last partial segment
* @ri_seq: Sequence number on the last partial segment
@@ -105,9 +105,8 @@ struct nilfs_segsum_pointer {
* @sc_flush_request: inode bitmap of metadata files to be flushed
* @sc_wait_request: Client request queue
* @sc_wait_daemon: Daemon wait queue
- * @sc_wait_task: Start/end wait queue to control segctord task
* @sc_seq_request: Request counter
- * @sc_seq_accept: Accepted request count
+ * @sc_seq_accepted: Accepted request count
* @sc_seq_done: Completion counter
* @sc_sync: Request of explicit sync operation
* @sc_interval: Timeout value of background construction
@@ -158,7 +157,6 @@ struct nilfs_sc_info {
wait_queue_head_t sc_wait_request;
wait_queue_head_t sc_wait_daemon;
- wait_queue_head_t sc_wait_task;
__u32 sc_seq_request;
__u32 sc_seq_accepted;
@@ -171,7 +169,6 @@ struct nilfs_sc_info {
unsigned long sc_watermark;
struct timer_list sc_timer;
- struct task_struct *sc_timer_task;
struct task_struct *sc_task;
};
@@ -192,7 +189,6 @@ enum {
};
/* sc_state */
-#define NILFS_SEGCTOR_QUIT 0x0001 /* segctord is being destroyed */
#define NILFS_SEGCTOR_COMMIT 0x0004 /* committed transaction exists */
/*
diff --git a/fs/nilfs2/sufile.c b/fs/nilfs2/sufile.c
index 6748218be7c5..eea5a6a12f7b 100644
--- a/fs/nilfs2/sufile.c
+++ b/fs/nilfs2/sufile.c
@@ -79,10 +79,17 @@ nilfs_sufile_block_get_segment_usage(const struct inode *sufile, __u64 segnum,
NILFS_MDT(sufile)->mi_entry_size;
}
-static inline int nilfs_sufile_get_header_block(struct inode *sufile,
- struct buffer_head **bhp)
+static int nilfs_sufile_get_header_block(struct inode *sufile,
+ struct buffer_head **bhp)
{
- return nilfs_mdt_get_block(sufile, 0, 0, NULL, bhp);
+ int err = nilfs_mdt_get_block(sufile, 0, 0, NULL, bhp);
+
+ if (unlikely(err == -ENOENT)) {
+ nilfs_error(sufile->i_sb,
+ "missing header block in segment usage metadata");
+ err = -EIO;
+ }
+ return err;
}
static inline int
@@ -506,8 +513,15 @@ int nilfs_sufile_mark_dirty(struct inode *sufile, __u64 segnum)
down_write(&NILFS_MDT(sufile)->mi_sem);
ret = nilfs_sufile_get_segment_usage_block(sufile, segnum, 0, &bh);
- if (ret)
+ if (unlikely(ret)) {
+ if (ret == -ENOENT) {
+ nilfs_error(sufile->i_sb,
+ "segment usage for segment %llu is unreadable due to a hole block",
+ (unsigned long long)segnum);
+ ret = -EIO;
+ }
goto out_sem;
+ }
kaddr = kmap_local_page(bh->b_page);
su = nilfs_sufile_block_get_segment_usage(sufile, segnum, bh, kaddr);
@@ -840,21 +854,17 @@ out:
}
/**
- * nilfs_sufile_get_suinfo -
+ * nilfs_sufile_get_suinfo - get segment usage information
* @sufile: inode of segment usage file
* @segnum: segment number to start looking
- * @buf: array of suinfo
- * @sisz: byte size of suinfo
- * @nsi: size of suinfo array
- *
- * Description:
+ * @buf: array of suinfo
+ * @sisz: byte size of suinfo
+ * @nsi: size of suinfo array
*
- * Return Value: On success, 0 is returned and .... On error, one of the
- * following negative error codes is returned.
- *
- * %-EIO - I/O error.
- *
- * %-ENOMEM - Insufficient amount of memory available.
+ * Return: Count of segment usage info items stored in the output buffer on
+ * success, or the following negative error code on failure.
+ * * %-EIO - I/O error (including metadata corruption).
+ * * %-ENOMEM - Insufficient memory available.
*/
ssize_t nilfs_sufile_get_suinfo(struct inode *sufile, __u64 segnum, void *buf,
unsigned int sisz, size_t nsi)
@@ -1241,9 +1251,15 @@ int nilfs_sufile_read(struct super_block *sb, size_t susize,
if (err)
goto failed;
- err = nilfs_sufile_get_header_block(sufile, &header_bh);
- if (err)
+ err = nilfs_mdt_get_block(sufile, 0, 0, NULL, &header_bh);
+ if (unlikely(err)) {
+ if (err == -ENOENT) {
+ nilfs_err(sb,
+ "missing header block in segment usage metadata");
+ err = -EINVAL;
+ }
goto failed;
+ }
sui = NILFS_SUI(sufile);
kaddr = kmap_local_page(header_bh->b_page);
diff --git a/fs/nilfs2/super.c b/fs/nilfs2/super.c
index e835e1f5a712..eca79cca3803 100644
--- a/fs/nilfs2/super.c
+++ b/fs/nilfs2/super.c
@@ -105,6 +105,10 @@ static void nilfs_set_error(struct super_block *sb)
/**
* __nilfs_error() - report failure condition on a filesystem
+ * @sb: super block instance
+ * @function: name of calling function
+ * @fmt: format string for message to be output
+ * @...: optional arguments to @fmt
*
* __nilfs_error() sets an ERROR_FS flag on the superblock as well as
* reporting an error message. This function should be called when
@@ -156,6 +160,7 @@ struct inode *nilfs_alloc_inode(struct super_block *sb)
return NULL;
ii->i_bh = NULL;
ii->i_state = 0;
+ ii->i_type = 0;
ii->i_cno = 0;
ii->i_assoc_inode = NULL;
ii->i_bmap = &ii->i_bmap_data;
@@ -1063,6 +1068,10 @@ nilfs_fill_super(struct super_block *sb, struct fs_context *fc)
if (err)
goto failed_nilfs;
+ super_set_uuid(sb, nilfs->ns_sbp[0]->s_uuid,
+ sizeof(nilfs->ns_sbp[0]->s_uuid));
+ super_set_sysfs_name_bdev(sb);
+
cno = nilfs_last_cno(nilfs);
err = nilfs_attach_checkpoint(sb, cno, true, &fsroot);
if (err) {
diff --git a/fs/nilfs2/the_nilfs.c b/fs/nilfs2/the_nilfs.c
index e44dde57ab65..ac03fd3c330c 100644
--- a/fs/nilfs2/the_nilfs.c
+++ b/fs/nilfs2/the_nilfs.c
@@ -12,7 +12,6 @@
#include <linux/slab.h>
#include <linux/blkdev.h>
#include <linux/backing-dev.h>
-#include <linux/random.h>
#include <linux/log2.h>
#include <linux/crc32.h>
#include "nilfs.h"
@@ -69,7 +68,6 @@ struct the_nilfs *alloc_nilfs(struct super_block *sb)
INIT_LIST_HEAD(&nilfs->ns_dirty_files);
INIT_LIST_HEAD(&nilfs->ns_gc_inodes);
spin_lock_init(&nilfs->ns_inode_lock);
- spin_lock_init(&nilfs->ns_next_gen_lock);
spin_lock_init(&nilfs->ns_last_segment_lock);
nilfs->ns_cptree = RB_ROOT;
spin_lock_init(&nilfs->ns_cptree_lock);
@@ -754,9 +752,6 @@ int init_nilfs(struct the_nilfs *nilfs, struct super_block *sb)
nilfs->ns_blocksize_bits = sb->s_blocksize_bits;
nilfs->ns_blocksize = blocksize;
- get_random_bytes(&nilfs->ns_next_generation,
- sizeof(nilfs->ns_next_generation));
-
err = nilfs_store_disk_layout(nilfs, sbp);
if (err)
goto failed_sbh;
diff --git a/fs/nilfs2/the_nilfs.h b/fs/nilfs2/the_nilfs.h
index 1e829ed7b0ef..4776a70f01ae 100644
--- a/fs/nilfs2/the_nilfs.h
+++ b/fs/nilfs2/the_nilfs.h
@@ -71,8 +71,6 @@ enum {
* @ns_dirty_files: list of dirty files
* @ns_inode_lock: lock protecting @ns_dirty_files
* @ns_gc_inodes: dummy inodes to keep live blocks
- * @ns_next_generation: next generation number for inodes
- * @ns_next_gen_lock: lock protecting @ns_next_generation
* @ns_mount_opt: mount options
* @ns_resuid: uid for reserved blocks
* @ns_resgid: gid for reserved blocks
@@ -161,10 +159,6 @@ struct the_nilfs {
/* GC inode list */
struct list_head ns_gc_inodes;
- /* Inode allocator */
- u32 ns_next_generation;
- spinlock_t ns_next_gen_lock;
-
/* Mount options */
unsigned long ns_mount_opt;
diff --git a/fs/notify/fanotify/fanotify_user.c b/fs/notify/fanotify/fanotify_user.c
index 9ec313e9f6e1..13454e5fd3fb 100644
--- a/fs/notify/fanotify/fanotify_user.c
+++ b/fs/notify/fanotify/fanotify_user.c
@@ -1006,17 +1006,17 @@ static int fanotify_find_path(int dfd, const char __user *filename,
struct fd f = fdget(dfd);
ret = -EBADF;
- if (!f.file)
+ if (!fd_file(f))
goto out;
ret = -ENOTDIR;
if ((flags & FAN_MARK_ONLYDIR) &&
- !(S_ISDIR(file_inode(f.file)->i_mode))) {
+ !(S_ISDIR(file_inode(fd_file(f))->i_mode))) {
fdput(f);
goto out;
}
- *path = f.file->f_path;
+ *path = fd_file(f)->f_path;
path_get(path);
fdput(f);
} else {
@@ -1753,14 +1753,14 @@ static int do_fanotify_mark(int fanotify_fd, unsigned int flags, __u64 mask,
}
f = fdget(fanotify_fd);
- if (unlikely(!f.file))
+ if (unlikely(!fd_file(f)))
return -EBADF;
/* verify that this is indeed an fanotify instance */
ret = -EINVAL;
- if (unlikely(f.file->f_op != &fanotify_fops))
+ if (unlikely(fd_file(f)->f_op != &fanotify_fops))
goto fput_and_out;
- group = f.file->private_data;
+ group = fd_file(f)->private_data;
/*
* An unprivileged user is not allowed to setup mount nor filesystem
diff --git a/fs/notify/inotify/inotify_user.c b/fs/notify/inotify/inotify_user.c
index 4ffc30606e0b..c7e451d5bd51 100644
--- a/fs/notify/inotify/inotify_user.c
+++ b/fs/notify/inotify/inotify_user.c
@@ -753,7 +753,7 @@ SYSCALL_DEFINE3(inotify_add_watch, int, fd, const char __user *, pathname,
return -EINVAL;
f = fdget(fd);
- if (unlikely(!f.file))
+ if (unlikely(!fd_file(f)))
return -EBADF;
/* IN_MASK_ADD and IN_MASK_CREATE don't make sense together */
@@ -763,7 +763,7 @@ SYSCALL_DEFINE3(inotify_add_watch, int, fd, const char __user *, pathname,
}
/* verify that this is indeed an inotify instance */
- if (unlikely(f.file->f_op != &inotify_fops)) {
+ if (unlikely(fd_file(f)->f_op != &inotify_fops)) {
ret = -EINVAL;
goto fput_and_out;
}
@@ -780,7 +780,7 @@ SYSCALL_DEFINE3(inotify_add_watch, int, fd, const char __user *, pathname,
/* inode held in place by reference to path; group by fget on fd */
inode = path.dentry->d_inode;
- group = f.file->private_data;
+ group = fd_file(f)->private_data;
/* create/update an inode mark */
ret = inotify_update_watch(group, inode, mask);
@@ -798,14 +798,14 @@ SYSCALL_DEFINE2(inotify_rm_watch, int, fd, __s32, wd)
int ret = -EINVAL;
f = fdget(fd);
- if (unlikely(!f.file))
+ if (unlikely(!fd_file(f)))
return -EBADF;
/* verify that this is indeed an inotify instance */
- if (unlikely(f.file->f_op != &inotify_fops))
+ if (unlikely(fd_file(f)->f_op != &inotify_fops))
goto out;
- group = f.file->private_data;
+ group = fd_file(f)->private_data;
i_mark = inotify_idr_find(group, wd);
if (unlikely(!i_mark))
diff --git a/fs/nsfs.c b/fs/nsfs.c
index 67ee176b8824..c675fc40ce2d 100644
--- a/fs/nsfs.c
+++ b/fs/nsfs.c
@@ -22,7 +22,6 @@ static struct vfsmount *nsfs_mnt;
static long ns_ioctl(struct file *filp, unsigned int ioctl,
unsigned long arg);
static const struct file_operations ns_file_operations = {
- .llseek = no_llseek,
.unlocked_ioctl = ns_ioctl,
.compat_ioctl = compat_ptr_ioctl,
};
diff --git a/fs/ocfs2/aops.c b/fs/ocfs2/aops.c
index d6c985cc6353..db72b3e924b3 100644
--- a/fs/ocfs2/aops.c
+++ b/fs/ocfs2/aops.c
@@ -156,9 +156,8 @@ int ocfs2_get_block(struct inode *inode, sector_t iblock,
err = ocfs2_extent_map_get_blocks(inode, iblock, &p_blkno, &count,
&ext_flags);
if (err) {
- mlog(ML_ERROR, "Error %d from get_blocks(0x%p, %llu, 1, "
- "%llu, NULL)\n", err, inode, (unsigned long long)iblock,
- (unsigned long long)p_blkno);
+ mlog(ML_ERROR, "get_blocks() failed, inode: 0x%p, "
+ "block: %llu\n", inode, (unsigned long long)iblock);
goto bail;
}
@@ -1187,7 +1186,7 @@ static int ocfs2_write_cluster(struct address_space *mapping,
/* This is the direct io target page. */
if (wc->w_pages[i] == NULL) {
- p_blkno++;
+ p_blkno += (1 << (PAGE_SHIFT - inode->i_sb->s_blocksize_bits));
continue;
}
diff --git a/fs/ocfs2/buffer_head_io.c b/fs/ocfs2/buffer_head_io.c
index cdb9b9bdea1f..8f714406528d 100644
--- a/fs/ocfs2/buffer_head_io.c
+++ b/fs/ocfs2/buffer_head_io.c
@@ -235,7 +235,6 @@ int ocfs2_read_blocks(struct ocfs2_caching_info *ci, u64 block, int nr,
if (bhs[i] == NULL) {
bhs[i] = sb_getblk(sb, block++);
if (bhs[i] == NULL) {
- ocfs2_metadata_cache_io_unlock(ci);
status = -ENOMEM;
mlog_errno(status);
/* Don't forget to put previous bh! */
@@ -389,7 +388,8 @@ read_failure:
/* Always set the buffer in the cache, even if it was
* a forced read, or read-ahead which hasn't yet
* completed. */
- ocfs2_set_buffer_uptodate(ci, bh);
+ if (bh)
+ ocfs2_set_buffer_uptodate(ci, bh);
}
ocfs2_metadata_cache_io_unlock(ci);
diff --git a/fs/ocfs2/cluster/heartbeat.c b/fs/ocfs2/cluster/heartbeat.c
index 1bde1281d514..4b9f45d7049e 100644
--- a/fs/ocfs2/cluster/heartbeat.c
+++ b/fs/ocfs2/cluster/heartbeat.c
@@ -1785,17 +1785,17 @@ static ssize_t o2hb_region_dev_store(struct config_item *item,
goto out;
f = fdget(fd);
- if (f.file == NULL)
+ if (fd_file(f) == NULL)
goto out;
if (reg->hr_blocks == 0 || reg->hr_start_block == 0 ||
reg->hr_block_bytes == 0)
goto out2;
- if (!S_ISBLK(f.file->f_mapping->host->i_mode))
+ if (!S_ISBLK(fd_file(f)->f_mapping->host->i_mode))
goto out2;
- reg->hr_bdev_file = bdev_file_open_by_dev(f.file->f_mapping->host->i_rdev,
+ reg->hr_bdev_file = bdev_file_open_by_dev(fd_file(f)->f_mapping->host->i_rdev,
BLK_OPEN_WRITE | BLK_OPEN_READ, NULL, NULL);
if (IS_ERR(reg->hr_bdev_file)) {
ret = PTR_ERR(reg->hr_bdev_file);
diff --git a/fs/ocfs2/dir.c b/fs/ocfs2/dir.c
index ccef3f42b333..213206ebdd58 100644
--- a/fs/ocfs2/dir.c
+++ b/fs/ocfs2/dir.c
@@ -3512,16 +3512,6 @@ static int dx_leaf_sort_cmp(const void *a, const void *b)
return 0;
}
-static void dx_leaf_sort_swap(void *a, void *b, int size)
-{
- struct ocfs2_dx_entry *entry1 = a;
- struct ocfs2_dx_entry *entry2 = b;
-
- BUG_ON(size != sizeof(*entry1));
-
- swap(*entry1, *entry2);
-}
-
static int ocfs2_dx_leaf_same_major(struct ocfs2_dx_leaf *dx_leaf)
{
struct ocfs2_dx_entry_list *dl_list = &dx_leaf->dl_list;
@@ -3782,7 +3772,7 @@ static int ocfs2_dx_dir_rebalance(struct ocfs2_super *osb, struct inode *dir,
*/
sort(dx_leaf->dl_list.de_entries, num_used,
sizeof(struct ocfs2_dx_entry), dx_leaf_sort_cmp,
- dx_leaf_sort_swap);
+ NULL);
ocfs2_journal_dirty(handle, dx_leaf_bh);
diff --git a/fs/ocfs2/dlmglue.c b/fs/ocfs2/dlmglue.c
index da78a04d6f0b..60df52e4c1f8 100644
--- a/fs/ocfs2/dlmglue.c
+++ b/fs/ocfs2/dlmglue.c
@@ -3151,11 +3151,8 @@ static int ocfs2_dlm_seq_show(struct seq_file *m, void *v)
#ifdef CONFIG_OCFS2_FS_STATS
if (!lockres->l_lock_wait && dlm_debug->d_filter_secs) {
now = ktime_to_us(ktime_get_real());
- if (lockres->l_lock_prmode.ls_last >
- lockres->l_lock_exmode.ls_last)
- last = lockres->l_lock_prmode.ls_last;
- else
- last = lockres->l_lock_exmode.ls_last;
+ last = max(lockres->l_lock_prmode.ls_last,
+ lockres->l_lock_exmode.ls_last);
/*
* Use d_filter_secs field to filter lock resources dump,
* the default d_filter_secs(0) value filters nothing,
diff --git a/fs/ocfs2/extent_map.c b/fs/ocfs2/extent_map.c
index 70a768b623cf..f7672472fa82 100644
--- a/fs/ocfs2/extent_map.c
+++ b/fs/ocfs2/extent_map.c
@@ -973,7 +973,13 @@ int ocfs2_read_virt_blocks(struct inode *inode, u64 v_block, int nr,
}
while (done < nr) {
- down_read(&OCFS2_I(inode)->ip_alloc_sem);
+ if (!down_read_trylock(&OCFS2_I(inode)->ip_alloc_sem)) {
+ rc = -EAGAIN;
+ mlog(ML_ERROR,
+ "Inode #%llu ip_alloc_sem is temporarily unavailable\n",
+ (unsigned long long)OCFS2_I(inode)->ip_blkno);
+ break;
+ }
rc = ocfs2_extent_map_get_blocks(inode, v_block + done,
&p_block, &p_count, NULL);
up_read(&OCFS2_I(inode)->ip_alloc_sem);
diff --git a/fs/ocfs2/journal.c b/fs/ocfs2/journal.c
index 530fba34f6d3..1bf188b6866a 100644
--- a/fs/ocfs2/journal.c
+++ b/fs/ocfs2/journal.c
@@ -1055,7 +1055,7 @@ void ocfs2_journal_shutdown(struct ocfs2_super *osb)
if (!igrab(inode))
BUG();
- num_running_trans = atomic_read(&(osb->journal->j_num_trans));
+ num_running_trans = atomic_read(&(journal->j_num_trans));
trace_ocfs2_journal_shutdown(num_running_trans);
/* Do a commit_cache here. It will flush our journal, *and*
@@ -1074,9 +1074,10 @@ void ocfs2_journal_shutdown(struct ocfs2_super *osb)
osb->commit_task = NULL;
}
- BUG_ON(atomic_read(&(osb->journal->j_num_trans)) != 0);
+ BUG_ON(atomic_read(&(journal->j_num_trans)) != 0);
- if (ocfs2_mount_local(osb)) {
+ if (ocfs2_mount_local(osb) &&
+ (journal->j_journal->j_flags & JBD2_LOADED)) {
jbd2_journal_lock_updates(journal->j_journal);
status = jbd2_journal_flush(journal->j_journal, 0);
jbd2_journal_unlock_updates(journal->j_journal);
diff --git a/fs/ocfs2/localalloc.c b/fs/ocfs2/localalloc.c
index 5df34561c551..8ac42ea81a17 100644
--- a/fs/ocfs2/localalloc.c
+++ b/fs/ocfs2/localalloc.c
@@ -1002,6 +1002,25 @@ static int ocfs2_sync_local_to_main(struct ocfs2_super *osb,
start = bit_off + 1;
}
+ /* clear the contiguous bits until the end boundary */
+ if (count) {
+ blkno = la_start_blk +
+ ocfs2_clusters_to_blocks(osb->sb,
+ start - count);
+
+ trace_ocfs2_sync_local_to_main_free(
+ count, start - count,
+ (unsigned long long)la_start_blk,
+ (unsigned long long)blkno);
+
+ status = ocfs2_release_clusters(handle,
+ main_bm_inode,
+ main_bm_bh, blkno,
+ count);
+ if (status < 0)
+ mlog_errno(status);
+ }
+
bail:
if (status)
mlog_errno(status);
diff --git a/fs/ocfs2/quota_global.c b/fs/ocfs2/quota_global.c
index 0575c2d060eb..2b0daced98eb 100644
--- a/fs/ocfs2/quota_global.c
+++ b/fs/ocfs2/quota_global.c
@@ -371,12 +371,16 @@ int ocfs2_global_read_info(struct super_block *sb, int type)
status = ocfs2_extent_map_get_blocks(oinfo->dqi_gqinode, 0, &oinfo->dqi_giblk,
&pcount, NULL);
- if (status < 0)
+ if (status < 0) {
+ mlog_errno(status);
goto out_unlock;
+ }
status = ocfs2_qinfo_lock(oinfo, 0);
- if (status < 0)
+ if (status < 0) {
+ mlog_errno(status);
goto out_unlock;
+ }
status = sb->s_op->quota_read(sb, type, (char *)&dinfo,
sizeof(struct ocfs2_global_disk_dqinfo),
OCFS2_GLOBAL_INFO_OFF);
@@ -404,12 +408,11 @@ int ocfs2_global_read_info(struct super_block *sb, int type)
schedule_delayed_work(&oinfo->dqi_sync_work,
msecs_to_jiffies(oinfo->dqi_syncms));
-out_err:
- return status;
+ return 0;
out_unlock:
ocfs2_unlock_global_qf(oinfo, 0);
- mlog_errno(status);
- goto out_err;
+out_err:
+ return status;
}
/* Write information to global quota file. Expects exclusive lock on quota
diff --git a/fs/ocfs2/quota_local.c b/fs/ocfs2/quota_local.c
index 8ce462c64c51..73d3367c533b 100644
--- a/fs/ocfs2/quota_local.c
+++ b/fs/ocfs2/quota_local.c
@@ -692,7 +692,7 @@ static int ocfs2_local_read_info(struct super_block *sb, int type)
int status;
struct buffer_head *bh = NULL;
struct ocfs2_quota_recovery *rec;
- int locked = 0;
+ int locked = 0, global_read = 0;
info->dqi_max_spc_limit = 0x7fffffffffffffffLL;
info->dqi_max_ino_limit = 0x7fffffffffffffffLL;
@@ -700,6 +700,7 @@ static int ocfs2_local_read_info(struct super_block *sb, int type)
if (!oinfo) {
mlog(ML_ERROR, "failed to allocate memory for ocfs2 quota"
" info.");
+ status = -ENOMEM;
goto out_err;
}
info->dqi_priv = oinfo;
@@ -712,6 +713,7 @@ static int ocfs2_local_read_info(struct super_block *sb, int type)
status = ocfs2_global_read_info(sb, type);
if (status < 0)
goto out_err;
+ global_read = 1;
status = ocfs2_inode_lock(lqinode, &oinfo->dqi_lqi_bh, 1);
if (status < 0) {
@@ -782,10 +784,12 @@ out_err:
if (locked)
ocfs2_inode_unlock(lqinode, 1);
ocfs2_release_local_quota_bitmaps(&oinfo->dqi_chunk);
+ if (global_read)
+ cancel_delayed_work_sync(&oinfo->dqi_sync_work);
kfree(oinfo);
}
brelse(bh);
- return -1;
+ return status;
}
/* Write local info to quota file */
diff --git a/fs/ocfs2/refcounttree.c b/fs/ocfs2/refcounttree.c
index 1f303b1adf1a..004393b13c0a 100644
--- a/fs/ocfs2/refcounttree.c
+++ b/fs/ocfs2/refcounttree.c
@@ -25,6 +25,7 @@
#include "namei.h"
#include "ocfs2_trace.h"
#include "file.h"
+#include "symlink.h"
#include <linux/bio.h>
#include <linux/blkdev.h>
@@ -1392,13 +1393,6 @@ static int cmp_refcount_rec_by_cpos(const void *a, const void *b)
return 0;
}
-static void swap_refcount_rec(void *a, void *b, int size)
-{
- struct ocfs2_refcount_rec *l = a, *r = b;
-
- swap(*l, *r);
-}
-
/*
* The refcount cpos are ordered by their 64bit cpos,
* But we will use the low 32 bit to be the e_cpos in the b-tree.
@@ -1474,7 +1468,7 @@ static int ocfs2_divide_leaf_refcount_block(struct buffer_head *ref_leaf_bh,
*/
sort(&rl->rl_recs, le16_to_cpu(rl->rl_used),
sizeof(struct ocfs2_refcount_rec),
- cmp_refcount_rec_by_low_cpos, swap_refcount_rec);
+ cmp_refcount_rec_by_low_cpos, NULL);
ret = ocfs2_find_refcount_split_pos(rl, &cpos, &split_index);
if (ret) {
@@ -1499,11 +1493,11 @@ static int ocfs2_divide_leaf_refcount_block(struct buffer_head *ref_leaf_bh,
sort(&rl->rl_recs, le16_to_cpu(rl->rl_used),
sizeof(struct ocfs2_refcount_rec),
- cmp_refcount_rec_by_cpos, swap_refcount_rec);
+ cmp_refcount_rec_by_cpos, NULL);
sort(&new_rl->rl_recs, le16_to_cpu(new_rl->rl_used),
sizeof(struct ocfs2_refcount_rec),
- cmp_refcount_rec_by_cpos, swap_refcount_rec);
+ cmp_refcount_rec_by_cpos, NULL);
*split_cpos = cpos;
return 0;
@@ -4155,8 +4149,9 @@ static int __ocfs2_reflink(struct dentry *old_dentry,
int ret;
struct inode *inode = d_inode(old_dentry);
struct buffer_head *new_bh = NULL;
+ struct ocfs2_inode_info *oi = OCFS2_I(inode);
- if (OCFS2_I(inode)->ip_flags & OCFS2_INODE_SYSTEM_FILE) {
+ if (oi->ip_flags & OCFS2_INODE_SYSTEM_FILE) {
ret = -EINVAL;
mlog_errno(ret);
goto out;
@@ -4182,6 +4177,26 @@ static int __ocfs2_reflink(struct dentry *old_dentry,
goto out_unlock;
}
+ if ((oi->ip_dyn_features & OCFS2_HAS_XATTR_FL) &&
+ (oi->ip_dyn_features & OCFS2_INLINE_XATTR_FL)) {
+ /*
+ * Adjust extent record count to reserve space for extended attribute.
+ * Inline data count had been adjusted in ocfs2_duplicate_inline_data().
+ */
+ struct ocfs2_inode_info *new_oi = OCFS2_I(new_inode);
+
+ if (!(new_oi->ip_dyn_features & OCFS2_INLINE_DATA_FL) &&
+ !(ocfs2_inode_is_fast_symlink(new_inode))) {
+ struct ocfs2_dinode *new_di = (struct ocfs2_dinode *)new_bh->b_data;
+ struct ocfs2_dinode *old_di = (struct ocfs2_dinode *)old_bh->b_data;
+ struct ocfs2_extent_list *el = &new_di->id2.i_list;
+ int inline_size = le16_to_cpu(old_di->i_xattr_inline_size);
+
+ le16_add_cpu(&el->l_count, -(inline_size /
+ sizeof(struct ocfs2_extent_rec)));
+ }
+ }
+
ret = ocfs2_create_reflink_node(inode, old_bh,
new_inode, new_bh, preserve);
if (ret) {
@@ -4189,7 +4204,7 @@ static int __ocfs2_reflink(struct dentry *old_dentry,
goto inode_unlock;
}
- if (OCFS2_I(inode)->ip_dyn_features & OCFS2_HAS_XATTR_FL) {
+ if (oi->ip_dyn_features & OCFS2_HAS_XATTR_FL) {
ret = ocfs2_reflink_xattrs(inode, old_bh,
new_inode, new_bh,
preserve);
diff --git a/fs/ocfs2/super.c b/fs/ocfs2/super.c
index afee70125ae3..3d404624bb96 100644
--- a/fs/ocfs2/super.c
+++ b/fs/ocfs2/super.c
@@ -1571,15 +1571,13 @@ static int __init ocfs2_init(void)
ocfs2_set_locking_protocol();
- status = register_quota_format(&ocfs2_quota_format);
- if (status < 0)
- goto out3;
+ register_quota_format(&ocfs2_quota_format);
+
status = register_filesystem(&ocfs2_fs_type);
if (!status)
return 0;
unregister_quota_format(&ocfs2_quota_format);
-out3:
debugfs_remove(ocfs2_debugfs_root);
ocfs2_free_mem_caches();
out2:
@@ -2357,8 +2355,8 @@ static int ocfs2_verify_volume(struct ocfs2_dinode *di,
(unsigned long long)bh->b_blocknr);
} else if (le32_to_cpu(di->id2.i_super.s_clustersize_bits) < 12 ||
le32_to_cpu(di->id2.i_super.s_clustersize_bits) > 20) {
- mlog(ML_ERROR, "bad cluster size found: %u\n",
- 1 << le32_to_cpu(di->id2.i_super.s_clustersize_bits));
+ mlog(ML_ERROR, "bad cluster size bit found: %u\n",
+ le32_to_cpu(di->id2.i_super.s_clustersize_bits));
} else if (!le64_to_cpu(di->id2.i_super.s_root_blkno)) {
mlog(ML_ERROR, "bad root_blkno: 0\n");
} else if (!le64_to_cpu(di->id2.i_super.s_system_dir_blkno)) {
diff --git a/fs/ocfs2/xattr.c b/fs/ocfs2/xattr.c
index 35c0cc2a51af..dd0a05365e79 100644
--- a/fs/ocfs2/xattr.c
+++ b/fs/ocfs2/xattr.c
@@ -4167,15 +4167,6 @@ static int cmp_xe(const void *a, const void *b)
return 0;
}
-static void swap_xe(void *a, void *b, int size)
-{
- struct ocfs2_xattr_entry *l = a, *r = b, tmp;
-
- tmp = *l;
- memcpy(l, r, sizeof(struct ocfs2_xattr_entry));
- memcpy(r, &tmp, sizeof(struct ocfs2_xattr_entry));
-}
-
/*
* When the ocfs2_xattr_block is filled up, new bucket will be created
* and all the xattr entries will be moved to the new bucket.
@@ -4241,7 +4232,7 @@ static void ocfs2_cp_xattr_block_to_bucket(struct inode *inode,
trace_ocfs2_cp_xattr_block_to_bucket_end(offset, size, off_change);
sort(target + offset, count, sizeof(struct ocfs2_xattr_entry),
- cmp_xe, swap_xe);
+ cmp_xe, NULL);
}
/*
@@ -4436,7 +4427,7 @@ static int ocfs2_defrag_xattr_bucket(struct inode *inode,
*/
sort(entries, le16_to_cpu(xh->xh_count),
sizeof(struct ocfs2_xattr_entry),
- cmp_xe_offset, swap_xe);
+ cmp_xe_offset, NULL);
/* Move all name/values to the end of the bucket. */
xe = xh->xh_entries;
@@ -4478,7 +4469,7 @@ static int ocfs2_defrag_xattr_bucket(struct inode *inode,
/* sort the entries by their name_hash. */
sort(entries, le16_to_cpu(xh->xh_count),
sizeof(struct ocfs2_xattr_entry),
- cmp_xe, swap_xe);
+ cmp_xe, NULL);
buf = bucket_buf;
for (i = 0; i < bucket->bu_blocks; i++, buf += blocksize)
@@ -6520,16 +6511,7 @@ static int ocfs2_reflink_xattr_inline(struct ocfs2_xattr_reflink *args)
}
new_oi = OCFS2_I(args->new_inode);
- /*
- * Adjust extent record count to reserve space for extended attribute.
- * Inline data count had been adjusted in ocfs2_duplicate_inline_data().
- */
- if (!(new_oi->ip_dyn_features & OCFS2_INLINE_DATA_FL) &&
- !(ocfs2_inode_is_fast_symlink(args->new_inode))) {
- struct ocfs2_extent_list *el = &new_di->id2.i_list;
- le16_add_cpu(&el->l_count, -(inline_size /
- sizeof(struct ocfs2_extent_rec)));
- }
+
spin_lock(&new_oi->ip_lock);
new_oi->ip_dyn_features |= OCFS2_HAS_XATTR_FL | OCFS2_INLINE_XATTR_FL;
new_di->i_dyn_features = cpu_to_le16(new_oi->ip_dyn_features);
diff --git a/fs/open.c b/fs/open.c
index daf1b55ca818..acaeb3e25c88 100644
--- a/fs/open.c
+++ b/fs/open.c
@@ -193,10 +193,10 @@ long do_sys_ftruncate(unsigned int fd, loff_t length, int small)
if (length < 0)
return -EINVAL;
f = fdget(fd);
- if (!f.file)
+ if (!fd_file(f))
return -EBADF;
- error = do_ftruncate(f.file, length, small);
+ error = do_ftruncate(fd_file(f), length, small);
fdput(f);
return error;
@@ -352,8 +352,8 @@ int ksys_fallocate(int fd, int mode, loff_t offset, loff_t len)
struct fd f = fdget(fd);
int error = -EBADF;
- if (f.file) {
- error = vfs_fallocate(f.file, mode, offset, len);
+ if (fd_file(f)) {
+ error = vfs_fallocate(fd_file(f), mode, offset, len);
fdput(f);
}
return error;
@@ -584,16 +584,16 @@ SYSCALL_DEFINE1(fchdir, unsigned int, fd)
int error;
error = -EBADF;
- if (!f.file)
+ if (!fd_file(f))
goto out;
error = -ENOTDIR;
- if (!d_can_lookup(f.file->f_path.dentry))
+ if (!d_can_lookup(fd_file(f)->f_path.dentry))
goto out_putf;
- error = file_permission(f.file, MAY_EXEC | MAY_CHDIR);
+ error = file_permission(fd_file(f), MAY_EXEC | MAY_CHDIR);
if (!error)
- set_fs_pwd(current->fs, &f.file->f_path);
+ set_fs_pwd(current->fs, &fd_file(f)->f_path);
out_putf:
fdput(f);
out:
@@ -674,8 +674,8 @@ SYSCALL_DEFINE2(fchmod, unsigned int, fd, umode_t, mode)
struct fd f = fdget(fd);
int err = -EBADF;
- if (f.file) {
- err = vfs_fchmod(f.file, mode);
+ if (fd_file(f)) {
+ err = vfs_fchmod(fd_file(f), mode);
fdput(f);
}
return err;
@@ -868,8 +868,8 @@ int ksys_fchown(unsigned int fd, uid_t user, gid_t group)
struct fd f = fdget(fd);
int error = -EBADF;
- if (f.file) {
- error = vfs_fchown(f.file, user, group);
+ if (fd_file(f)) {
+ error = vfs_fchown(fd_file(f), user, group);
fdput(f);
}
return error;
diff --git a/fs/orangefs/orangefs-sysfs.c b/fs/orangefs/orangefs-sysfs.c
index be4ba03a01a0..04e15dfa504a 100644
--- a/fs/orangefs/orangefs-sysfs.c
+++ b/fs/orangefs/orangefs-sysfs.c
@@ -904,7 +904,7 @@ static void orangefs_obj_release(struct kobject *kobj)
orangefs_obj = NULL;
}
-static struct kobj_type orangefs_ktype = {
+static const struct kobj_type orangefs_ktype = {
.sysfs_ops = &orangefs_sysfs_ops,
.default_groups = orangefs_default_groups,
.release = orangefs_obj_release,
@@ -951,7 +951,7 @@ static void acache_orangefs_obj_release(struct kobject *kobj)
acache_orangefs_obj = NULL;
}
-static struct kobj_type acache_orangefs_ktype = {
+static const struct kobj_type acache_orangefs_ktype = {
.sysfs_ops = &orangefs_sysfs_ops,
.default_groups = acache_orangefs_default_groups,
.release = acache_orangefs_obj_release,
@@ -998,7 +998,7 @@ static void capcache_orangefs_obj_release(struct kobject *kobj)
capcache_orangefs_obj = NULL;
}
-static struct kobj_type capcache_orangefs_ktype = {
+static const struct kobj_type capcache_orangefs_ktype = {
.sysfs_ops = &orangefs_sysfs_ops,
.default_groups = capcache_orangefs_default_groups,
.release = capcache_orangefs_obj_release,
@@ -1045,7 +1045,7 @@ static void ccache_orangefs_obj_release(struct kobject *kobj)
ccache_orangefs_obj = NULL;
}
-static struct kobj_type ccache_orangefs_ktype = {
+static const struct kobj_type ccache_orangefs_ktype = {
.sysfs_ops = &orangefs_sysfs_ops,
.default_groups = ccache_orangefs_default_groups,
.release = ccache_orangefs_obj_release,
@@ -1092,7 +1092,7 @@ static void ncache_orangefs_obj_release(struct kobject *kobj)
ncache_orangefs_obj = NULL;
}
-static struct kobj_type ncache_orangefs_ktype = {
+static const struct kobj_type ncache_orangefs_ktype = {
.sysfs_ops = &orangefs_sysfs_ops,
.default_groups = ncache_orangefs_default_groups,
.release = ncache_orangefs_obj_release,
@@ -1132,7 +1132,7 @@ static void pc_orangefs_obj_release(struct kobject *kobj)
pc_orangefs_obj = NULL;
}
-static struct kobj_type pc_orangefs_ktype = {
+static const struct kobj_type pc_orangefs_ktype = {
.sysfs_ops = &orangefs_sysfs_ops,
.default_groups = pc_orangefs_default_groups,
.release = pc_orangefs_obj_release,
@@ -1165,7 +1165,7 @@ static void stats_orangefs_obj_release(struct kobject *kobj)
stats_orangefs_obj = NULL;
}
-static struct kobj_type stats_orangefs_ktype = {
+static const struct kobj_type stats_orangefs_ktype = {
.sysfs_ops = &orangefs_sysfs_ops,
.default_groups = stats_orangefs_default_groups,
.release = stats_orangefs_obj_release,
diff --git a/fs/overlayfs/file.c b/fs/overlayfs/file.c
index 1a411cae57ed..4504493b20be 100644
--- a/fs/overlayfs/file.c
+++ b/fs/overlayfs/file.c
@@ -93,11 +93,11 @@ static int ovl_real_fdget_meta(const struct file *file, struct fd *real,
bool allow_meta)
{
struct dentry *dentry = file_dentry(file);
+ struct file *realfile = file->private_data;
struct path realpath;
int err;
- real->flags = 0;
- real->file = file->private_data;
+ real->word = (unsigned long)realfile;
if (allow_meta) {
ovl_path_real(dentry, &realpath);
@@ -113,16 +113,17 @@ static int ovl_real_fdget_meta(const struct file *file, struct fd *real,
return -EIO;
/* Has it been copied up since we'd opened it? */
- if (unlikely(file_inode(real->file) != d_inode(realpath.dentry))) {
- real->flags = FDPUT_FPUT;
- real->file = ovl_open_realfile(file, &realpath);
-
- return PTR_ERR_OR_ZERO(real->file);
+ if (unlikely(file_inode(realfile) != d_inode(realpath.dentry))) {
+ struct file *f = ovl_open_realfile(file, &realpath);
+ if (IS_ERR(f))
+ return PTR_ERR(f);
+ real->word = (unsigned long)f | FDPUT_FPUT;
+ return 0;
}
/* Did the flags change since open? */
- if (unlikely((file->f_flags ^ real->file->f_flags) & ~OVL_OPEN_FLAGS))
- return ovl_change_flags(real->file, file->f_flags);
+ if (unlikely((file->f_flags ^ realfile->f_flags) & ~OVL_OPEN_FLAGS))
+ return ovl_change_flags(realfile, file->f_flags);
return 0;
}
@@ -130,10 +131,11 @@ static int ovl_real_fdget_meta(const struct file *file, struct fd *real,
static int ovl_real_fdget(const struct file *file, struct fd *real)
{
if (d_is_dir(file_dentry(file))) {
- real->flags = 0;
- real->file = ovl_dir_real_file(file, false);
-
- return PTR_ERR_OR_ZERO(real->file);
+ struct file *f = ovl_dir_real_file(file, false);
+ if (IS_ERR(f))
+ return PTR_ERR(f);
+ real->word = (unsigned long)f;
+ return 0;
}
return ovl_real_fdget_meta(file, real, false);
@@ -209,13 +211,13 @@ static loff_t ovl_llseek(struct file *file, loff_t offset, int whence)
* files, so we use the real file to perform seeks.
*/
ovl_inode_lock(inode);
- real.file->f_pos = file->f_pos;
+ fd_file(real)->f_pos = file->f_pos;
old_cred = ovl_override_creds(inode->i_sb);
- ret = vfs_llseek(real.file, offset, whence);
+ ret = vfs_llseek(fd_file(real), offset, whence);
revert_creds(old_cred);
- file->f_pos = real.file->f_pos;
+ file->f_pos = fd_file(real)->f_pos;
ovl_inode_unlock(inode);
fdput(real);
@@ -275,7 +277,7 @@ static ssize_t ovl_read_iter(struct kiocb *iocb, struct iov_iter *iter)
if (ret)
return ret;
- ret = backing_file_read_iter(real.file, iter, iocb, iocb->ki_flags,
+ ret = backing_file_read_iter(fd_file(real), iter, iocb, iocb->ki_flags,
&ctx);
fdput(real);
@@ -314,7 +316,7 @@ static ssize_t ovl_write_iter(struct kiocb *iocb, struct iov_iter *iter)
* this property in case it is set by the issuer.
*/
ifl &= ~IOCB_DIO_CALLER_COMP;
- ret = backing_file_write_iter(real.file, iter, iocb, ifl, &ctx);
+ ret = backing_file_write_iter(fd_file(real), iter, iocb, ifl, &ctx);
fdput(real);
out_unlock:
@@ -339,7 +341,7 @@ static ssize_t ovl_splice_read(struct file *in, loff_t *ppos,
if (ret)
return ret;
- ret = backing_file_splice_read(real.file, ppos, pipe, len, flags, &ctx);
+ ret = backing_file_splice_read(fd_file(real), ppos, pipe, len, flags, &ctx);
fdput(real);
return ret;
@@ -348,7 +350,7 @@ static ssize_t ovl_splice_read(struct file *in, loff_t *ppos,
/*
* Calling iter_file_splice_write() directly from overlay's f_op may deadlock
* due to lock order inversion between pipe->mutex in iter_file_splice_write()
- * and file_start_write(real.file) in ovl_write_iter().
+ * and file_start_write(fd_file(real)) in ovl_write_iter().
*
* So do everything ovl_write_iter() does and call iter_file_splice_write() on
* the real file.
@@ -373,7 +375,7 @@ static ssize_t ovl_splice_write(struct pipe_inode_info *pipe, struct file *out,
if (ret)
goto out_unlock;
- ret = backing_file_splice_write(pipe, real.file, ppos, len, flags, &ctx);
+ ret = backing_file_splice_write(pipe, fd_file(real), ppos, len, flags, &ctx);
fdput(real);
out_unlock:
@@ -397,9 +399,9 @@ static int ovl_fsync(struct file *file, loff_t start, loff_t end, int datasync)
return ret;
/* Don't sync lower file for fear of receiving EROFS error */
- if (file_inode(real.file) == ovl_inode_upper(file_inode(file))) {
+ if (file_inode(fd_file(real)) == ovl_inode_upper(file_inode(file))) {
old_cred = ovl_override_creds(file_inode(file)->i_sb);
- ret = vfs_fsync_range(real.file, start, end, datasync);
+ ret = vfs_fsync_range(fd_file(real), start, end, datasync);
revert_creds(old_cred);
}
@@ -439,7 +441,7 @@ static long ovl_fallocate(struct file *file, int mode, loff_t offset, loff_t len
goto out_unlock;
old_cred = ovl_override_creds(file_inode(file)->i_sb);
- ret = vfs_fallocate(real.file, mode, offset, len);
+ ret = vfs_fallocate(fd_file(real), mode, offset, len);
revert_creds(old_cred);
/* Update size */
@@ -464,7 +466,7 @@ static int ovl_fadvise(struct file *file, loff_t offset, loff_t len, int advice)
return ret;
old_cred = ovl_override_creds(file_inode(file)->i_sb);
- ret = vfs_fadvise(real.file, offset, len, advice);
+ ret = vfs_fadvise(fd_file(real), offset, len, advice);
revert_creds(old_cred);
fdput(real);
@@ -509,18 +511,18 @@ static loff_t ovl_copyfile(struct file *file_in, loff_t pos_in,
old_cred = ovl_override_creds(file_inode(file_out)->i_sb);
switch (op) {
case OVL_COPY:
- ret = vfs_copy_file_range(real_in.file, pos_in,
- real_out.file, pos_out, len, flags);
+ ret = vfs_copy_file_range(fd_file(real_in), pos_in,
+ fd_file(real_out), pos_out, len, flags);
break;
case OVL_CLONE:
- ret = vfs_clone_file_range(real_in.file, pos_in,
- real_out.file, pos_out, len, flags);
+ ret = vfs_clone_file_range(fd_file(real_in), pos_in,
+ fd_file(real_out), pos_out, len, flags);
break;
case OVL_DEDUPE:
- ret = vfs_dedupe_file_range_one(real_in.file, pos_in,
- real_out.file, pos_out, len,
+ ret = vfs_dedupe_file_range_one(fd_file(real_in), pos_in,
+ fd_file(real_out), pos_out, len,
flags);
break;
}
@@ -583,9 +585,9 @@ static int ovl_flush(struct file *file, fl_owner_t id)
if (err)
return err;
- if (real.file->f_op->flush) {
+ if (fd_file(real)->f_op->flush) {
old_cred = ovl_override_creds(file_inode(file)->i_sb);
- err = real.file->f_op->flush(real.file, id);
+ err = fd_file(real)->f_op->flush(fd_file(real), id);
revert_creds(old_cred);
}
fdput(real);
diff --git a/fs/pidfs.c b/fs/pidfs.c
index 7ffdc88dfb52..80675b6bf884 100644
--- a/fs/pidfs.c
+++ b/fs/pidfs.c
@@ -120,6 +120,7 @@ static long pidfd_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
struct nsproxy *nsp __free(put_nsproxy) = NULL;
struct pid *pid = pidfd_pid(file);
struct ns_common *ns_common = NULL;
+ struct pid_namespace *pid_ns;
if (arg)
return -EINVAL;
@@ -202,7 +203,9 @@ static long pidfd_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
case PIDFD_GET_PID_NAMESPACE:
if (IS_ENABLED(CONFIG_PID_NS)) {
rcu_read_lock();
- ns_common = to_ns_common( get_pid_ns(task_active_pid_ns(task)));
+ pid_ns = task_active_pid_ns(task);
+ if (pid_ns)
+ ns_common = to_ns_common(get_pid_ns(pid_ns));
rcu_read_unlock();
}
break;
diff --git a/fs/pipe.c b/fs/pipe.c
index 4083ba492cb6..12b22c2723b7 100644
--- a/fs/pipe.c
+++ b/fs/pipe.c
@@ -1231,7 +1231,6 @@ err:
const struct file_operations pipefifo_fops = {
.open = fifo_open,
- .llseek = no_llseek,
.read_iter = pipe_read,
.write_iter = pipe_write,
.poll = pipe_poll,
diff --git a/fs/proc/base.c b/fs/proc/base.c
index e7810f3bd522..b31283d81c52 100644
--- a/fs/proc/base.c
+++ b/fs/proc/base.c
@@ -2626,7 +2626,7 @@ static ssize_t timerslack_ns_write(struct file *file, const char __user *buf,
}
task_lock(p);
- if (task_is_realtime(p))
+ if (rt_or_dl_task_policy(p))
slack_ns = 0;
else if (slack_ns == 0)
slack_ns = p->default_timer_slack_ns;
diff --git a/fs/proc/inode.c b/fs/proc/inode.c
index d19434e2a58e..626ad7bd94f2 100644
--- a/fs/proc/inode.c
+++ b/fs/proc/inode.c
@@ -303,9 +303,7 @@ static ssize_t proc_reg_read_iter(struct kiocb *iocb, struct iov_iter *iter)
static ssize_t pde_read(struct proc_dir_entry *pde, struct file *file, char __user *buf, size_t count, loff_t *ppos)
{
- typeof_member(struct proc_ops, proc_read) read;
-
- read = pde->proc_ops->proc_read;
+ __auto_type read = pde->proc_ops->proc_read;
if (read)
return read(file, buf, count, ppos);
return -EIO;
@@ -327,9 +325,7 @@ static ssize_t proc_reg_read(struct file *file, char __user *buf, size_t count,
static ssize_t pde_write(struct proc_dir_entry *pde, struct file *file, const char __user *buf, size_t count, loff_t *ppos)
{
- typeof_member(struct proc_ops, proc_write) write;
-
- write = pde->proc_ops->proc_write;
+ __auto_type write = pde->proc_ops->proc_write;
if (write)
return write(file, buf, count, ppos);
return -EIO;
@@ -351,9 +347,7 @@ static ssize_t proc_reg_write(struct file *file, const char __user *buf, size_t
static __poll_t pde_poll(struct proc_dir_entry *pde, struct file *file, struct poll_table_struct *pts)
{
- typeof_member(struct proc_ops, proc_poll) poll;
-
- poll = pde->proc_ops->proc_poll;
+ __auto_type poll = pde->proc_ops->proc_poll;
if (poll)
return poll(file, pts);
return DEFAULT_POLLMASK;
@@ -375,9 +369,7 @@ static __poll_t proc_reg_poll(struct file *file, struct poll_table_struct *pts)
static long pde_ioctl(struct proc_dir_entry *pde, struct file *file, unsigned int cmd, unsigned long arg)
{
- typeof_member(struct proc_ops, proc_ioctl) ioctl;
-
- ioctl = pde->proc_ops->proc_ioctl;
+ __auto_type ioctl = pde->proc_ops->proc_ioctl;
if (ioctl)
return ioctl(file, cmd, arg);
return -ENOTTY;
@@ -400,9 +392,7 @@ static long proc_reg_unlocked_ioctl(struct file *file, unsigned int cmd, unsigne
#ifdef CONFIG_COMPAT
static long pde_compat_ioctl(struct proc_dir_entry *pde, struct file *file, unsigned int cmd, unsigned long arg)
{
- typeof_member(struct proc_ops, proc_compat_ioctl) compat_ioctl;
-
- compat_ioctl = pde->proc_ops->proc_compat_ioctl;
+ __auto_type compat_ioctl = pde->proc_ops->proc_compat_ioctl;
if (compat_ioctl)
return compat_ioctl(file, cmd, arg);
return -ENOTTY;
@@ -424,9 +414,7 @@ static long proc_reg_compat_ioctl(struct file *file, unsigned int cmd, unsigned
static int pde_mmap(struct proc_dir_entry *pde, struct file *file, struct vm_area_struct *vma)
{
- typeof_member(struct proc_ops, proc_mmap) mmap;
-
- mmap = pde->proc_ops->proc_mmap;
+ __auto_type mmap = pde->proc_ops->proc_mmap;
if (mmap)
return mmap(file, vma);
return -EIO;
@@ -483,7 +471,6 @@ static int proc_reg_open(struct inode *inode, struct file *file)
struct proc_dir_entry *pde = PDE(inode);
int rv = 0;
typeof_member(struct proc_ops, proc_open) open;
- typeof_member(struct proc_ops, proc_release) release;
struct pde_opener *pdeo;
if (!pde->proc_ops->proc_lseek)
@@ -510,7 +497,7 @@ static int proc_reg_open(struct inode *inode, struct file *file)
if (!use_pde(pde))
return -ENOENT;
- release = pde->proc_ops->proc_release;
+ __auto_type release = pde->proc_ops->proc_release;
if (release) {
pdeo = kmem_cache_alloc(pde_opener_cache, GFP_KERNEL);
if (!pdeo) {
@@ -547,9 +534,7 @@ static int proc_reg_release(struct inode *inode, struct file *file)
struct pde_opener *pdeo;
if (pde_is_permanent(pde)) {
- typeof_member(struct proc_ops, proc_release) release;
-
- release = pde->proc_ops->proc_release;
+ __auto_type release = pde->proc_ops->proc_release;
if (release) {
return release(inode, file);
}
diff --git a/fs/proc/internal.h b/fs/proc/internal.h
index 9e3f25e4c188..87e4d6282025 100644
--- a/fs/proc/internal.h
+++ b/fs/proc/internal.h
@@ -166,8 +166,7 @@ static inline int folio_precise_page_mapcount(struct folio *folio,
{
int mapcount = atomic_read(&page->_mapcount) + 1;
- /* Handle page_has_type() pages */
- if (mapcount < PAGE_MAPCOUNT_RESERVE + 1)
+ if (page_mapcount_is_type(mapcount))
mapcount = 0;
if (folio_test_large(folio))
mapcount += folio_entire_mapcount(folio);
diff --git a/fs/proc/page.c b/fs/proc/page.c
index b7a5c84b5819..a55f5acefa97 100644
--- a/fs/proc/page.c
+++ b/fs/proc/page.c
@@ -182,7 +182,6 @@ u64 stable_page_flags(const struct page *page)
#endif
u |= kpf_copy_bit(k, KPF_LOCKED, PG_locked);
- u |= kpf_copy_bit(k, KPF_ERROR, PG_error);
u |= kpf_copy_bit(k, KPF_DIRTY, PG_dirty);
u |= kpf_copy_bit(k, KPF_UPTODATE, PG_uptodate);
u |= kpf_copy_bit(k, KPF_WRITEBACK, PG_writeback);
@@ -207,18 +206,16 @@ u64 stable_page_flags(const struct page *page)
u |= kpf_copy_bit(page->flags, KPF_HWPOISON, PG_hwpoison);
#endif
-#ifdef CONFIG_ARCH_USES_PG_UNCACHED
- u |= kpf_copy_bit(k, KPF_UNCACHED, PG_uncached);
-#endif
-
u |= kpf_copy_bit(k, KPF_RESERVED, PG_reserved);
- u |= kpf_copy_bit(k, KPF_MAPPEDTODISK, PG_mappedtodisk);
+ u |= kpf_copy_bit(k, KPF_OWNER_2, PG_owner_2);
u |= kpf_copy_bit(k, KPF_PRIVATE, PG_private);
u |= kpf_copy_bit(k, KPF_PRIVATE_2, PG_private_2);
u |= kpf_copy_bit(k, KPF_OWNER_PRIVATE, PG_owner_priv_1);
u |= kpf_copy_bit(k, KPF_ARCH, PG_arch_1);
-#ifdef CONFIG_ARCH_USES_PG_ARCH_X
+#ifdef CONFIG_ARCH_USES_PG_ARCH_2
u |= kpf_copy_bit(k, KPF_ARCH_2, PG_arch_2);
+#endif
+#ifdef CONFIG_ARCH_USES_PG_ARCH_3
u |= kpf_copy_bit(k, KPF_ARCH_3, PG_arch_3);
#endif
diff --git a/fs/proc/proc_sysctl.c b/fs/proc/proc_sysctl.c
index 9553e77c9d31..d11ebc055ce0 100644
--- a/fs/proc/proc_sysctl.c
+++ b/fs/proc/proc_sysctl.c
@@ -29,8 +29,13 @@ static const struct inode_operations proc_sys_inode_operations;
static const struct file_operations proc_sys_dir_file_operations;
static const struct inode_operations proc_sys_dir_operations;
-/* Support for permanently empty directories */
-static struct ctl_table sysctl_mount_point[] = { };
+/*
+ * Support for permanently empty directories.
+ * Must be non-empty to avoid sharing an address with other tables.
+ */
+static struct ctl_table sysctl_mount_point[] = {
+ { }
+};
/**
* register_sysctl_mount_point() - registers a sysctl mount point
@@ -42,7 +47,7 @@ static struct ctl_table sysctl_mount_point[] = { };
*/
struct ctl_table_header *register_sysctl_mount_point(const char *path)
{
- return register_sysctl(path, sysctl_mount_point);
+ return register_sysctl_sz(path, sysctl_mount_point, 0);
}
EXPORT_SYMBOL(register_sysctl_mount_point);
diff --git a/fs/proc/task_mmu.c b/fs/proc/task_mmu.c
index ade74a396968..72f14fd59c2d 100644
--- a/fs/proc/task_mmu.c
+++ b/fs/proc/task_mmu.c
@@ -543,21 +543,6 @@ static int do_procmap_query(struct proc_maps_private *priv, void __user *uarg)
}
}
- if (karg.build_id_size) {
- __u32 build_id_sz;
-
- err = build_id_parse(vma, build_id_buf, &build_id_sz);
- if (err) {
- karg.build_id_size = 0;
- } else {
- if (karg.build_id_size < build_id_sz) {
- err = -ENAMETOOLONG;
- goto out;
- }
- karg.build_id_size = build_id_sz;
- }
- }
-
if (karg.vma_name_size) {
size_t name_buf_sz = min_t(size_t, PATH_MAX, karg.vma_name_size);
const struct path *path;
diff --git a/fs/pstore/platform.c b/fs/pstore/platform.c
index 84719e2bcbc4..f56b066ab80c 100644
--- a/fs/pstore/platform.c
+++ b/fs/pstore/platform.c
@@ -275,7 +275,7 @@ void pstore_record_init(struct pstore_record *record,
* end of the buffer.
*/
static void pstore_dump(struct kmsg_dumper *dumper,
- enum kmsg_dump_reason reason)
+ struct kmsg_dump_detail *detail)
{
struct kmsg_dump_iter iter;
unsigned long total = 0;
@@ -285,9 +285,9 @@ static void pstore_dump(struct kmsg_dumper *dumper,
int saved_ret = 0;
int ret;
- why = kmsg_dump_reason_str(reason);
+ why = kmsg_dump_reason_str(detail->reason);
- if (pstore_cannot_block_path(reason)) {
+ if (pstore_cannot_block_path(detail->reason)) {
if (!raw_spin_trylock_irqsave(&psinfo->buf_lock, flags)) {
pr_err("dump skipped in %s path because of concurrent dump\n",
in_nmi() ? "NMI" : why);
@@ -311,7 +311,7 @@ static void pstore_dump(struct kmsg_dumper *dumper,
pstore_record_init(&record, psinfo);
record.type = PSTORE_TYPE_DMESG;
record.count = oopscount;
- record.reason = reason;
+ record.reason = detail->reason;
record.part = part;
record.buf = psinfo->buf;
@@ -352,7 +352,7 @@ static void pstore_dump(struct kmsg_dumper *dumper,
}
ret = psinfo->write(&record);
- if (ret == 0 && reason == KMSG_DUMP_OOPS) {
+ if (ret == 0 && detail->reason == KMSG_DUMP_OOPS) {
pstore_new_entry = 1;
pstore_timer_kick();
} else {
diff --git a/fs/quota/dquot.c b/fs/quota/dquot.c
index 7ae885e6d5d7..b40410cd39af 100644
--- a/fs/quota/dquot.c
+++ b/fs/quota/dquot.c
@@ -163,13 +163,12 @@ static struct quota_module_name module_names[] = INIT_QUOTA_MODULE_NAMES;
/* SLAB cache for dquot structures */
static struct kmem_cache *dquot_cachep;
-int register_quota_format(struct quota_format_type *fmt)
+void register_quota_format(struct quota_format_type *fmt)
{
spin_lock(&dq_list_lock);
fmt->qf_next = quota_formats;
quota_formats = fmt;
spin_unlock(&dq_list_lock);
- return 0;
}
EXPORT_SYMBOL(register_quota_format);
@@ -1831,7 +1830,6 @@ void dquot_claim_space_nodirty(struct inode *inode, qsize_t number)
spin_unlock(&inode->i_lock);
mark_all_dquot_dirty(dquots);
srcu_read_unlock(&dquot_srcu, index);
- return;
}
EXPORT_SYMBOL(dquot_claim_space_nodirty);
@@ -1873,7 +1871,6 @@ void dquot_reclaim_space_nodirty(struct inode *inode, qsize_t number)
spin_unlock(&inode->i_lock);
mark_all_dquot_dirty(dquots);
srcu_read_unlock(&dquot_srcu, index);
- return;
}
EXPORT_SYMBOL(dquot_reclaim_space_nodirty);
@@ -2406,7 +2403,7 @@ static int vfs_setup_quota_inode(struct inode *inode, int type)
int dquot_load_quota_sb(struct super_block *sb, int type, int format_id,
unsigned int flags)
{
- struct quota_format_type *fmt = find_quota_format(format_id);
+ struct quota_format_type *fmt;
struct quota_info *dqopt = sb_dqopt(sb);
int error;
@@ -2416,6 +2413,7 @@ int dquot_load_quota_sb(struct super_block *sb, int type, int format_id,
if (WARN_ON_ONCE(flags & DQUOT_SUSPENDED))
return -EINVAL;
+ fmt = find_quota_format(format_id);
if (!fmt)
return -ESRCH;
if (!sb->dq_op || !sb->s_qcop ||
@@ -2596,7 +2594,8 @@ static int dquot_quota_enable(struct super_block *sb, unsigned int flags)
goto out_err;
}
if (sb_has_quota_limits_enabled(sb, type)) {
- ret = -EBUSY;
+ /* compatible with XFS */
+ ret = -EEXIST;
goto out_err;
}
spin_lock(&dq_state_lock);
@@ -2610,9 +2609,6 @@ out_err:
if (flags & qtype_enforce_flag(type))
dquot_disable(sb, type, DQUOT_LIMITS_ENABLED);
}
- /* Error code translation for better compatibility with XFS */
- if (ret == -EBUSY)
- ret = -EEXIST;
return ret;
}
diff --git a/fs/quota/quota.c b/fs/quota/quota.c
index 0e41fb84060f..290157bc7bec 100644
--- a/fs/quota/quota.c
+++ b/fs/quota/quota.c
@@ -980,7 +980,7 @@ SYSCALL_DEFINE4(quotactl_fd, unsigned int, fd, unsigned int, cmd,
int ret;
f = fdget_raw(fd);
- if (!f.file)
+ if (!fd_file(f))
return -EBADF;
ret = -EINVAL;
@@ -988,12 +988,12 @@ SYSCALL_DEFINE4(quotactl_fd, unsigned int, fd, unsigned int, cmd,
goto out;
if (quotactl_cmd_write(cmds)) {
- ret = mnt_want_write(f.file->f_path.mnt);
+ ret = mnt_want_write(fd_file(f)->f_path.mnt);
if (ret)
goto out;
}
- sb = f.file->f_path.mnt->mnt_sb;
+ sb = fd_file(f)->f_path.mnt->mnt_sb;
if (quotactl_cmd_onoff(cmds))
down_write(&sb->s_umount);
else
@@ -1007,7 +1007,7 @@ SYSCALL_DEFINE4(quotactl_fd, unsigned int, fd, unsigned int, cmd,
up_read(&sb->s_umount);
if (quotactl_cmd_write(cmds))
- mnt_drop_write(f.file->f_path.mnt);
+ mnt_drop_write(fd_file(f)->f_path.mnt);
out:
fdput(f);
return ret;
diff --git a/fs/quota/quota_v1.c b/fs/quota/quota_v1.c
index 3f3e8acc05db..6f7f0b4afba9 100644
--- a/fs/quota/quota_v1.c
+++ b/fs/quota/quota_v1.c
@@ -235,7 +235,8 @@ static struct quota_format_type v1_quota_format = {
static int __init init_v1_quota_format(void)
{
- return register_quota_format(&v1_quota_format);
+ register_quota_format(&v1_quota_format);
+ return 0;
}
static void __exit exit_v1_quota_format(void)
diff --git a/fs/quota/quota_v2.c b/fs/quota/quota_v2.c
index c48c233f3bef..1fda93dcbc1b 100644
--- a/fs/quota/quota_v2.c
+++ b/fs/quota/quota_v2.c
@@ -440,12 +440,9 @@ static struct quota_format_type v2r1_quota_format = {
static int __init init_v2_quota_format(void)
{
- int ret;
-
- ret = register_quota_format(&v2r0_quota_format);
- if (ret)
- return ret;
- return register_quota_format(&v2r1_quota_format);
+ register_quota_format(&v2r0_quota_format);
+ register_quota_format(&v2r1_quota_format);
+ return 0;
}
static void __exit exit_v2_quota_format(void)
diff --git a/fs/read_write.c b/fs/read_write.c
index 070a7c33b9dd..64dc24afdb3a 100644
--- a/fs/read_write.c
+++ b/fs/read_write.c
@@ -387,12 +387,12 @@ static off_t ksys_lseek(unsigned int fd, off_t offset, unsigned int whence)
{
off_t retval;
struct fd f = fdget_pos(fd);
- if (!f.file)
+ if (!fd_file(f))
return -EBADF;
retval = -EINVAL;
if (whence <= SEEK_MAX) {
- loff_t res = vfs_llseek(f.file, offset, whence);
+ loff_t res = vfs_llseek(fd_file(f), offset, whence);
retval = res;
if (res != (loff_t)retval)
retval = -EOVERFLOW; /* LFS: should only happen on 32 bit platforms */
@@ -423,14 +423,14 @@ SYSCALL_DEFINE5(llseek, unsigned int, fd, unsigned long, offset_high,
struct fd f = fdget_pos(fd);
loff_t offset;
- if (!f.file)
+ if (!fd_file(f))
return -EBADF;
retval = -EINVAL;
if (whence > SEEK_MAX)
goto out_putf;
- offset = vfs_llseek(f.file, ((loff_t) offset_high << 32) | offset_low,
+ offset = vfs_llseek(fd_file(f), ((loff_t) offset_high << 32) | offset_low,
whence);
retval = (int)offset;
@@ -703,15 +703,15 @@ ssize_t ksys_read(unsigned int fd, char __user *buf, size_t count)
struct fd f = fdget_pos(fd);
ssize_t ret = -EBADF;
- if (f.file) {
- loff_t pos, *ppos = file_ppos(f.file);
+ if (fd_file(f)) {
+ loff_t pos, *ppos = file_ppos(fd_file(f));
if (ppos) {
pos = *ppos;
ppos = &pos;
}
- ret = vfs_read(f.file, buf, count, ppos);
+ ret = vfs_read(fd_file(f), buf, count, ppos);
if (ret >= 0 && ppos)
- f.file->f_pos = pos;
+ fd_file(f)->f_pos = pos;
fdput_pos(f);
}
return ret;
@@ -727,15 +727,15 @@ ssize_t ksys_write(unsigned int fd, const char __user *buf, size_t count)
struct fd f = fdget_pos(fd);
ssize_t ret = -EBADF;
- if (f.file) {
- loff_t pos, *ppos = file_ppos(f.file);
+ if (fd_file(f)) {
+ loff_t pos, *ppos = file_ppos(fd_file(f));
if (ppos) {
pos = *ppos;
ppos = &pos;
}
- ret = vfs_write(f.file, buf, count, ppos);
+ ret = vfs_write(fd_file(f), buf, count, ppos);
if (ret >= 0 && ppos)
- f.file->f_pos = pos;
+ fd_file(f)->f_pos = pos;
fdput_pos(f);
}
@@ -758,10 +758,10 @@ ssize_t ksys_pread64(unsigned int fd, char __user *buf, size_t count,
return -EINVAL;
f = fdget(fd);
- if (f.file) {
+ if (fd_file(f)) {
ret = -ESPIPE;
- if (f.file->f_mode & FMODE_PREAD)
- ret = vfs_read(f.file, buf, count, &pos);
+ if (fd_file(f)->f_mode & FMODE_PREAD)
+ ret = vfs_read(fd_file(f), buf, count, &pos);
fdput(f);
}
@@ -792,10 +792,10 @@ ssize_t ksys_pwrite64(unsigned int fd, const char __user *buf,
return -EINVAL;
f = fdget(fd);
- if (f.file) {
+ if (fd_file(f)) {
ret = -ESPIPE;
- if (f.file->f_mode & FMODE_PWRITE)
- ret = vfs_write(f.file, buf, count, &pos);
+ if (fd_file(f)->f_mode & FMODE_PWRITE)
+ ret = vfs_write(fd_file(f), buf, count, &pos);
fdput(f);
}
@@ -1078,15 +1078,15 @@ static ssize_t do_readv(unsigned long fd, const struct iovec __user *vec,
struct fd f = fdget_pos(fd);
ssize_t ret = -EBADF;
- if (f.file) {
- loff_t pos, *ppos = file_ppos(f.file);
+ if (fd_file(f)) {
+ loff_t pos, *ppos = file_ppos(fd_file(f));
if (ppos) {
pos = *ppos;
ppos = &pos;
}
- ret = vfs_readv(f.file, vec, vlen, ppos, flags);
+ ret = vfs_readv(fd_file(f), vec, vlen, ppos, flags);
if (ret >= 0 && ppos)
- f.file->f_pos = pos;
+ fd_file(f)->f_pos = pos;
fdput_pos(f);
}
@@ -1102,15 +1102,15 @@ static ssize_t do_writev(unsigned long fd, const struct iovec __user *vec,
struct fd f = fdget_pos(fd);
ssize_t ret = -EBADF;
- if (f.file) {
- loff_t pos, *ppos = file_ppos(f.file);
+ if (fd_file(f)) {
+ loff_t pos, *ppos = file_ppos(fd_file(f));
if (ppos) {
pos = *ppos;
ppos = &pos;
}
- ret = vfs_writev(f.file, vec, vlen, ppos, flags);
+ ret = vfs_writev(fd_file(f), vec, vlen, ppos, flags);
if (ret >= 0 && ppos)
- f.file->f_pos = pos;
+ fd_file(f)->f_pos = pos;
fdput_pos(f);
}
@@ -1136,10 +1136,10 @@ static ssize_t do_preadv(unsigned long fd, const struct iovec __user *vec,
return -EINVAL;
f = fdget(fd);
- if (f.file) {
+ if (fd_file(f)) {
ret = -ESPIPE;
- if (f.file->f_mode & FMODE_PREAD)
- ret = vfs_readv(f.file, vec, vlen, &pos, flags);
+ if (fd_file(f)->f_mode & FMODE_PREAD)
+ ret = vfs_readv(fd_file(f), vec, vlen, &pos, flags);
fdput(f);
}
@@ -1159,10 +1159,10 @@ static ssize_t do_pwritev(unsigned long fd, const struct iovec __user *vec,
return -EINVAL;
f = fdget(fd);
- if (f.file) {
+ if (fd_file(f)) {
ret = -ESPIPE;
- if (f.file->f_mode & FMODE_PWRITE)
- ret = vfs_writev(f.file, vec, vlen, &pos, flags);
+ if (fd_file(f)->f_mode & FMODE_PWRITE)
+ ret = vfs_writev(fd_file(f), vec, vlen, &pos, flags);
fdput(f);
}
@@ -1328,19 +1328,19 @@ static ssize_t do_sendfile(int out_fd, int in_fd, loff_t *ppos,
*/
retval = -EBADF;
in = fdget(in_fd);
- if (!in.file)
+ if (!fd_file(in))
goto out;
- if (!(in.file->f_mode & FMODE_READ))
+ if (!(fd_file(in)->f_mode & FMODE_READ))
goto fput_in;
retval = -ESPIPE;
if (!ppos) {
- pos = in.file->f_pos;
+ pos = fd_file(in)->f_pos;
} else {
pos = *ppos;
- if (!(in.file->f_mode & FMODE_PREAD))
+ if (!(fd_file(in)->f_mode & FMODE_PREAD))
goto fput_in;
}
- retval = rw_verify_area(READ, in.file, &pos, count);
+ retval = rw_verify_area(READ, fd_file(in), &pos, count);
if (retval < 0)
goto fput_in;
if (count > MAX_RW_COUNT)
@@ -1351,13 +1351,13 @@ static ssize_t do_sendfile(int out_fd, int in_fd, loff_t *ppos,
*/
retval = -EBADF;
out = fdget(out_fd);
- if (!out.file)
+ if (!fd_file(out))
goto fput_in;
- if (!(out.file->f_mode & FMODE_WRITE))
+ if (!(fd_file(out)->f_mode & FMODE_WRITE))
goto fput_out;
- in_inode = file_inode(in.file);
- out_inode = file_inode(out.file);
- out_pos = out.file->f_pos;
+ in_inode = file_inode(fd_file(in));
+ out_inode = file_inode(fd_file(out));
+ out_pos = fd_file(out)->f_pos;
if (!max)
max = min(in_inode->i_sb->s_maxbytes, out_inode->i_sb->s_maxbytes);
@@ -1377,33 +1377,33 @@ static ssize_t do_sendfile(int out_fd, int in_fd, loff_t *ppos,
* and the application is arguably buggy if it doesn't expect
* EAGAIN on a non-blocking file descriptor.
*/
- if (in.file->f_flags & O_NONBLOCK)
+ if (fd_file(in)->f_flags & O_NONBLOCK)
fl = SPLICE_F_NONBLOCK;
#endif
- opipe = get_pipe_info(out.file, true);
+ opipe = get_pipe_info(fd_file(out), true);
if (!opipe) {
- retval = rw_verify_area(WRITE, out.file, &out_pos, count);
+ retval = rw_verify_area(WRITE, fd_file(out), &out_pos, count);
if (retval < 0)
goto fput_out;
- retval = do_splice_direct(in.file, &pos, out.file, &out_pos,
+ retval = do_splice_direct(fd_file(in), &pos, fd_file(out), &out_pos,
count, fl);
} else {
- if (out.file->f_flags & O_NONBLOCK)
+ if (fd_file(out)->f_flags & O_NONBLOCK)
fl |= SPLICE_F_NONBLOCK;
- retval = splice_file_to_pipe(in.file, opipe, &pos, count, fl);
+ retval = splice_file_to_pipe(fd_file(in), opipe, &pos, count, fl);
}
if (retval > 0) {
add_rchar(current, retval);
add_wchar(current, retval);
- fsnotify_access(in.file);
- fsnotify_modify(out.file);
- out.file->f_pos = out_pos;
+ fsnotify_access(fd_file(in));
+ fsnotify_modify(fd_file(out));
+ fd_file(out)->f_pos = out_pos;
if (ppos)
*ppos = pos;
else
- in.file->f_pos = pos;
+ fd_file(in)->f_pos = pos;
}
inc_syscr(current);
@@ -1676,11 +1676,11 @@ SYSCALL_DEFINE6(copy_file_range, int, fd_in, loff_t __user *, off_in,
ssize_t ret = -EBADF;
f_in = fdget(fd_in);
- if (!f_in.file)
+ if (!fd_file(f_in))
goto out2;
f_out = fdget(fd_out);
- if (!f_out.file)
+ if (!fd_file(f_out))
goto out1;
ret = -EFAULT;
@@ -1688,21 +1688,21 @@ SYSCALL_DEFINE6(copy_file_range, int, fd_in, loff_t __user *, off_in,
if (copy_from_user(&pos_in, off_in, sizeof(loff_t)))
goto out;
} else {
- pos_in = f_in.file->f_pos;
+ pos_in = fd_file(f_in)->f_pos;
}
if (off_out) {
if (copy_from_user(&pos_out, off_out, sizeof(loff_t)))
goto out;
} else {
- pos_out = f_out.file->f_pos;
+ pos_out = fd_file(f_out)->f_pos;
}
ret = -EINVAL;
if (flags != 0)
goto out;
- ret = vfs_copy_file_range(f_in.file, pos_in, f_out.file, pos_out, len,
+ ret = vfs_copy_file_range(fd_file(f_in), pos_in, fd_file(f_out), pos_out, len,
flags);
if (ret > 0) {
pos_in += ret;
@@ -1712,14 +1712,14 @@ SYSCALL_DEFINE6(copy_file_range, int, fd_in, loff_t __user *, off_in,
if (copy_to_user(off_in, &pos_in, sizeof(loff_t)))
ret = -EFAULT;
} else {
- f_in.file->f_pos = pos_in;
+ fd_file(f_in)->f_pos = pos_in;
}
if (off_out) {
if (copy_to_user(off_out, &pos_out, sizeof(loff_t)))
ret = -EFAULT;
} else {
- f_out.file->f_pos = pos_out;
+ fd_file(f_out)->f_pos = pos_out;
}
}
diff --git a/fs/readdir.c b/fs/readdir.c
index d6c82421902a..6d29cab8576e 100644
--- a/fs/readdir.c
+++ b/fs/readdir.c
@@ -225,10 +225,10 @@ SYSCALL_DEFINE3(old_readdir, unsigned int, fd,
.dirent = dirent
};
- if (!f.file)
+ if (!fd_file(f))
return -EBADF;
- error = iterate_dir(f.file, &buf.ctx);
+ error = iterate_dir(fd_file(f), &buf.ctx);
if (buf.result)
error = buf.result;
@@ -318,10 +318,10 @@ SYSCALL_DEFINE3(getdents, unsigned int, fd,
int error;
f = fdget_pos(fd);
- if (!f.file)
+ if (!fd_file(f))
return -EBADF;
- error = iterate_dir(f.file, &buf.ctx);
+ error = iterate_dir(fd_file(f), &buf.ctx);
if (error >= 0)
error = buf.error;
if (buf.prev_reclen) {
@@ -401,10 +401,10 @@ SYSCALL_DEFINE3(getdents64, unsigned int, fd,
int error;
f = fdget_pos(fd);
- if (!f.file)
+ if (!fd_file(f))
return -EBADF;
- error = iterate_dir(f.file, &buf.ctx);
+ error = iterate_dir(fd_file(f), &buf.ctx);
if (error >= 0)
error = buf.error;
if (buf.prev_reclen) {
@@ -483,10 +483,10 @@ COMPAT_SYSCALL_DEFINE3(old_readdir, unsigned int, fd,
.dirent = dirent
};
- if (!f.file)
+ if (!fd_file(f))
return -EBADF;
- error = iterate_dir(f.file, &buf.ctx);
+ error = iterate_dir(fd_file(f), &buf.ctx);
if (buf.result)
error = buf.result;
@@ -569,10 +569,10 @@ COMPAT_SYSCALL_DEFINE3(getdents, unsigned int, fd,
int error;
f = fdget_pos(fd);
- if (!f.file)
+ if (!fd_file(f))
return -EBADF;
- error = iterate_dir(f.file, &buf.ctx);
+ error = iterate_dir(fd_file(f), &buf.ctx);
if (error >= 0)
error = buf.error;
if (buf.prev_reclen) {
diff --git a/fs/remap_range.c b/fs/remap_range.c
index 28246dfc8485..4403d5c68fcb 100644
--- a/fs/remap_range.c
+++ b/fs/remap_range.c
@@ -537,7 +537,7 @@ int vfs_dedupe_file_range(struct file *file, struct file_dedupe_range *same)
for (i = 0, info = same->info; i < count; i++, info++) {
struct fd dst_fd = fdget(info->dest_fd);
- struct file *dst_file = dst_fd.file;
+ struct file *dst_file = fd_file(dst_fd);
if (!dst_file) {
info->status = -EBADF;
diff --git a/fs/select.c b/fs/select.c
index cae82e9e0dcc..a77907faf2b4 100644
--- a/fs/select.c
+++ b/fs/select.c
@@ -529,10 +529,10 @@ static noinline_for_stack int do_select(int n, fd_set_bits *fds, struct timespec
continue;
mask = EPOLLNVAL;
f = fdget(i);
- if (f.file) {
+ if (fd_file(f)) {
wait_key_set(wait, in, out, bit,
busy_flag);
- mask = vfs_poll(f.file, wait);
+ mask = vfs_poll(fd_file(f), wait);
fdput(f);
}
@@ -777,7 +777,9 @@ static inline int get_sigset_argpack(struct sigset_argpack *to,
{
// the path is hot enough for overhead of copy_from_user() to matter
if (from) {
- if (!user_read_access_begin(from, sizeof(*from)))
+ if (can_do_masked_user_access())
+ from = masked_user_access_begin(from);
+ else if (!user_read_access_begin(from, sizeof(*from)))
return -EFAULT;
unsafe_get_user(to->p, &from->p, Efault);
unsafe_get_user(to->size, &from->size, Efault);
@@ -861,13 +863,13 @@ static inline __poll_t do_pollfd(struct pollfd *pollfd, poll_table *pwait,
goto out;
mask = EPOLLNVAL;
f = fdget(fd);
- if (!f.file)
+ if (!fd_file(f))
goto out;
/* userland u16 ->events contains POLL... bitmap */
filter = demangle_poll(pollfd->events) | EPOLLERR | EPOLLHUP;
pwait->_key = filter | busy_flag;
- mask = vfs_poll(f.file, pwait);
+ mask = vfs_poll(fd_file(f), pwait);
if (mask & busy_flag)
*can_busy_poll = true;
mask &= filter; /* Mask out unneeded events. */
diff --git a/fs/signalfd.c b/fs/signalfd.c
index d0333bce015e..736bebf93591 100644
--- a/fs/signalfd.c
+++ b/fs/signalfd.c
@@ -289,10 +289,10 @@ static int do_signalfd4(int ufd, sigset_t *mask, int flags)
fd_install(ufd, file);
} else {
struct fd f = fdget(ufd);
- if (!f.file)
+ if (!fd_file(f))
return -EBADF;
- ctx = f.file->private_data;
- if (f.file->f_op != &signalfd_fops) {
+ ctx = fd_file(f)->private_data;
+ if (fd_file(f)->f_op != &signalfd_fops) {
fdput(f);
return -EINVAL;
}
diff --git a/fs/smb/client/cifsencrypt.c b/fs/smb/client/cifsencrypt.c
index 7481b21a0489..2d851f596a72 100644
--- a/fs/smb/client/cifsencrypt.c
+++ b/fs/smb/client/cifsencrypt.c
@@ -416,7 +416,7 @@ find_timestamp(struct cifs_ses *ses)
}
static int calc_ntlmv2_hash(struct cifs_ses *ses, char *ntlmv2_hash,
- const struct nls_table *nls_cp)
+ const struct nls_table *nls_cp, struct shash_desc *hmacmd5)
{
int rc = 0;
int len;
@@ -425,34 +425,26 @@ static int calc_ntlmv2_hash(struct cifs_ses *ses, char *ntlmv2_hash,
wchar_t *domain;
wchar_t *server;
- if (!ses->server->secmech.hmacmd5) {
- cifs_dbg(VFS, "%s: can't generate ntlmv2 hash\n", __func__);
- return -1;
- }
-
/* calculate md4 hash of password */
E_md4hash(ses->password, nt_hash, nls_cp);
- rc = crypto_shash_setkey(ses->server->secmech.hmacmd5->tfm, nt_hash,
- CIFS_NTHASH_SIZE);
+ rc = crypto_shash_setkey(hmacmd5->tfm, nt_hash, CIFS_NTHASH_SIZE);
if (rc) {
- cifs_dbg(VFS, "%s: Could not set NT Hash as a key\n", __func__);
+ cifs_dbg(VFS, "%s: Could not set NT hash as a key, rc=%d\n", __func__, rc);
return rc;
}
- rc = crypto_shash_init(ses->server->secmech.hmacmd5);
+ rc = crypto_shash_init(hmacmd5);
if (rc) {
- cifs_dbg(VFS, "%s: Could not init hmacmd5\n", __func__);
+ cifs_dbg(VFS, "%s: Could not init HMAC-MD5, rc=%d\n", __func__, rc);
return rc;
}
/* convert ses->user_name to unicode */
len = ses->user_name ? strlen(ses->user_name) : 0;
user = kmalloc(2 + (len * 2), GFP_KERNEL);
- if (user == NULL) {
- rc = -ENOMEM;
- return rc;
- }
+ if (user == NULL)
+ return -ENOMEM;
if (len) {
len = cifs_strtoUTF16(user, ses->user_name, len, nls_cp);
@@ -461,11 +453,10 @@ static int calc_ntlmv2_hash(struct cifs_ses *ses, char *ntlmv2_hash,
*(u16 *)user = 0;
}
- rc = crypto_shash_update(ses->server->secmech.hmacmd5,
- (char *)user, 2 * len);
+ rc = crypto_shash_update(hmacmd5, (char *)user, 2 * len);
kfree(user);
if (rc) {
- cifs_dbg(VFS, "%s: Could not update with user\n", __func__);
+ cifs_dbg(VFS, "%s: Could not update with user, rc=%d\n", __func__, rc);
return rc;
}
@@ -474,19 +465,15 @@ static int calc_ntlmv2_hash(struct cifs_ses *ses, char *ntlmv2_hash,
len = strlen(ses->domainName);
domain = kmalloc(2 + (len * 2), GFP_KERNEL);
- if (domain == NULL) {
- rc = -ENOMEM;
- return rc;
- }
+ if (domain == NULL)
+ return -ENOMEM;
+
len = cifs_strtoUTF16((__le16 *)domain, ses->domainName, len,
nls_cp);
- rc =
- crypto_shash_update(ses->server->secmech.hmacmd5,
- (char *)domain, 2 * len);
+ rc = crypto_shash_update(hmacmd5, (char *)domain, 2 * len);
kfree(domain);
if (rc) {
- cifs_dbg(VFS, "%s: Could not update with domain\n",
- __func__);
+ cifs_dbg(VFS, "%s: Could not update with domain, rc=%d\n", __func__, rc);
return rc;
}
} else {
@@ -494,33 +481,27 @@ static int calc_ntlmv2_hash(struct cifs_ses *ses, char *ntlmv2_hash,
len = strlen(ses->ip_addr);
server = kmalloc(2 + (len * 2), GFP_KERNEL);
- if (server == NULL) {
- rc = -ENOMEM;
- return rc;
- }
- len = cifs_strtoUTF16((__le16 *)server, ses->ip_addr, len,
- nls_cp);
- rc =
- crypto_shash_update(ses->server->secmech.hmacmd5,
- (char *)server, 2 * len);
+ if (server == NULL)
+ return -ENOMEM;
+
+ len = cifs_strtoUTF16((__le16 *)server, ses->ip_addr, len, nls_cp);
+ rc = crypto_shash_update(hmacmd5, (char *)server, 2 * len);
kfree(server);
if (rc) {
- cifs_dbg(VFS, "%s: Could not update with server\n",
- __func__);
+ cifs_dbg(VFS, "%s: Could not update with server, rc=%d\n", __func__, rc);
return rc;
}
}
- rc = crypto_shash_final(ses->server->secmech.hmacmd5,
- ntlmv2_hash);
+ rc = crypto_shash_final(hmacmd5, ntlmv2_hash);
if (rc)
- cifs_dbg(VFS, "%s: Could not generate md5 hash\n", __func__);
+ cifs_dbg(VFS, "%s: Could not generate MD5 hash, rc=%d\n", __func__, rc);
return rc;
}
static int
-CalcNTLMv2_response(const struct cifs_ses *ses, char *ntlmv2_hash)
+CalcNTLMv2_response(const struct cifs_ses *ses, char *ntlmv2_hash, struct shash_desc *hmacmd5)
{
int rc;
struct ntlmv2_resp *ntlmv2 = (struct ntlmv2_resp *)
@@ -531,43 +512,33 @@ CalcNTLMv2_response(const struct cifs_ses *ses, char *ntlmv2_hash)
hash_len = ses->auth_key.len - (CIFS_SESS_KEY_SIZE +
offsetof(struct ntlmv2_resp, challenge.key[0]));
- if (!ses->server->secmech.hmacmd5) {
- cifs_dbg(VFS, "%s: can't generate ntlmv2 hash\n", __func__);
- return -1;
- }
-
- rc = crypto_shash_setkey(ses->server->secmech.hmacmd5->tfm,
- ntlmv2_hash, CIFS_HMAC_MD5_HASH_SIZE);
+ rc = crypto_shash_setkey(hmacmd5->tfm, ntlmv2_hash, CIFS_HMAC_MD5_HASH_SIZE);
if (rc) {
- cifs_dbg(VFS, "%s: Could not set NTLMV2 Hash as a key\n",
- __func__);
+ cifs_dbg(VFS, "%s: Could not set NTLMv2 hash as a key, rc=%d\n", __func__, rc);
return rc;
}
- rc = crypto_shash_init(ses->server->secmech.hmacmd5);
+ rc = crypto_shash_init(hmacmd5);
if (rc) {
- cifs_dbg(VFS, "%s: Could not init hmacmd5\n", __func__);
+ cifs_dbg(VFS, "%s: Could not init HMAC-MD5, rc=%d\n", __func__, rc);
return rc;
}
if (ses->server->negflavor == CIFS_NEGFLAVOR_EXTENDED)
- memcpy(ntlmv2->challenge.key,
- ses->ntlmssp->cryptkey, CIFS_SERVER_CHALLENGE_SIZE);
+ memcpy(ntlmv2->challenge.key, ses->ntlmssp->cryptkey, CIFS_SERVER_CHALLENGE_SIZE);
else
- memcpy(ntlmv2->challenge.key,
- ses->server->cryptkey, CIFS_SERVER_CHALLENGE_SIZE);
- rc = crypto_shash_update(ses->server->secmech.hmacmd5,
- ntlmv2->challenge.key, hash_len);
+ memcpy(ntlmv2->challenge.key, ses->server->cryptkey, CIFS_SERVER_CHALLENGE_SIZE);
+
+ rc = crypto_shash_update(hmacmd5, ntlmv2->challenge.key, hash_len);
if (rc) {
- cifs_dbg(VFS, "%s: Could not update with response\n", __func__);
+ cifs_dbg(VFS, "%s: Could not update with response, rc=%d\n", __func__, rc);
return rc;
}
/* Note that the MD5 digest over writes anon.challenge_key.key */
- rc = crypto_shash_final(ses->server->secmech.hmacmd5,
- ntlmv2->ntlmv2_hash);
+ rc = crypto_shash_final(hmacmd5, ntlmv2->ntlmv2_hash);
if (rc)
- cifs_dbg(VFS, "%s: Could not generate md5 hash\n", __func__);
+ cifs_dbg(VFS, "%s: Could not generate MD5 hash, rc=%d\n", __func__, rc);
return rc;
}
@@ -575,6 +546,7 @@ CalcNTLMv2_response(const struct cifs_ses *ses, char *ntlmv2_hash)
int
setup_ntlmv2_rsp(struct cifs_ses *ses, const struct nls_table *nls_cp)
{
+ struct shash_desc *hmacmd5 = NULL;
int rc;
int baselen;
unsigned int tilen;
@@ -640,55 +612,51 @@ setup_ntlmv2_rsp(struct cifs_ses *ses, const struct nls_table *nls_cp)
cifs_server_lock(ses->server);
- rc = cifs_alloc_hash("hmac(md5)", &ses->server->secmech.hmacmd5);
+ rc = cifs_alloc_hash("hmac(md5)", &hmacmd5);
if (rc) {
+ cifs_dbg(VFS, "Could not allocate HMAC-MD5, rc=%d\n", rc);
goto unlock;
}
/* calculate ntlmv2_hash */
- rc = calc_ntlmv2_hash(ses, ntlmv2_hash, nls_cp);
+ rc = calc_ntlmv2_hash(ses, ntlmv2_hash, nls_cp, hmacmd5);
if (rc) {
- cifs_dbg(VFS, "Could not get v2 hash rc %d\n", rc);
+ cifs_dbg(VFS, "Could not get NTLMv2 hash, rc=%d\n", rc);
goto unlock;
}
/* calculate first part of the client response (CR1) */
- rc = CalcNTLMv2_response(ses, ntlmv2_hash);
+ rc = CalcNTLMv2_response(ses, ntlmv2_hash, hmacmd5);
if (rc) {
- cifs_dbg(VFS, "Could not calculate CR1 rc: %d\n", rc);
+ cifs_dbg(VFS, "Could not calculate CR1, rc=%d\n", rc);
goto unlock;
}
/* now calculate the session key for NTLMv2 */
- rc = crypto_shash_setkey(ses->server->secmech.hmacmd5->tfm,
- ntlmv2_hash, CIFS_HMAC_MD5_HASH_SIZE);
+ rc = crypto_shash_setkey(hmacmd5->tfm, ntlmv2_hash, CIFS_HMAC_MD5_HASH_SIZE);
if (rc) {
- cifs_dbg(VFS, "%s: Could not set NTLMV2 Hash as a key\n",
- __func__);
+ cifs_dbg(VFS, "%s: Could not set NTLMv2 hash as a key, rc=%d\n", __func__, rc);
goto unlock;
}
- rc = crypto_shash_init(ses->server->secmech.hmacmd5);
+ rc = crypto_shash_init(hmacmd5);
if (rc) {
- cifs_dbg(VFS, "%s: Could not init hmacmd5\n", __func__);
+ cifs_dbg(VFS, "%s: Could not init HMAC-MD5, rc=%d\n", __func__, rc);
goto unlock;
}
- rc = crypto_shash_update(ses->server->secmech.hmacmd5,
- ntlmv2->ntlmv2_hash,
- CIFS_HMAC_MD5_HASH_SIZE);
+ rc = crypto_shash_update(hmacmd5, ntlmv2->ntlmv2_hash, CIFS_HMAC_MD5_HASH_SIZE);
if (rc) {
- cifs_dbg(VFS, "%s: Could not update with response\n", __func__);
+ cifs_dbg(VFS, "%s: Could not update with response, rc=%d\n", __func__, rc);
goto unlock;
}
- rc = crypto_shash_final(ses->server->secmech.hmacmd5,
- ses->auth_key.response);
+ rc = crypto_shash_final(hmacmd5, ses->auth_key.response);
if (rc)
- cifs_dbg(VFS, "%s: Could not generate md5 hash\n", __func__);
-
+ cifs_dbg(VFS, "%s: Could not generate MD5 hash, rc=%d\n", __func__, rc);
unlock:
cifs_server_unlock(ses->server);
+ cifs_free_hash(&hmacmd5);
setup_ntlmv2_rsp_ret:
kfree_sensitive(tiblob);
@@ -732,16 +700,19 @@ cifs_crypto_secmech_release(struct TCP_Server_Info *server)
cifs_free_hash(&server->secmech.aes_cmac);
cifs_free_hash(&server->secmech.hmacsha256);
cifs_free_hash(&server->secmech.md5);
- cifs_free_hash(&server->secmech.sha512);
- cifs_free_hash(&server->secmech.hmacmd5);
- if (server->secmech.enc) {
- crypto_free_aead(server->secmech.enc);
- server->secmech.enc = NULL;
- }
+ if (!SERVER_IS_CHAN(server)) {
+ if (server->secmech.enc) {
+ crypto_free_aead(server->secmech.enc);
+ server->secmech.enc = NULL;
+ }
- if (server->secmech.dec) {
- crypto_free_aead(server->secmech.dec);
+ if (server->secmech.dec) {
+ crypto_free_aead(server->secmech.dec);
+ server->secmech.dec = NULL;
+ }
+ } else {
+ server->secmech.enc = NULL;
server->secmech.dec = NULL;
}
}
diff --git a/fs/smb/client/cifsfs.h b/fs/smb/client/cifsfs.h
index 61ded59b858f..71b720dbb2ce 100644
--- a/fs/smb/client/cifsfs.h
+++ b/fs/smb/client/cifsfs.h
@@ -146,6 +146,6 @@ extern const struct export_operations cifs_export_ops;
#endif /* CONFIG_CIFS_NFSD_EXPORT */
/* when changing internal version - update following two lines at same time */
-#define SMB3_PRODUCT_BUILD 50
-#define CIFS_VERSION "2.50"
+#define SMB3_PRODUCT_BUILD 51
+#define CIFS_VERSION "2.51"
#endif /* _CIFSFS_H */
diff --git a/fs/smb/client/cifsglob.h b/fs/smb/client/cifsglob.h
index a71a988a92f9..315aac5dec05 100644
--- a/fs/smb/client/cifsglob.h
+++ b/fs/smb/client/cifsglob.h
@@ -178,10 +178,8 @@ struct session_key {
/* crypto hashing related structure/fields, not specific to a sec mech */
struct cifs_secmech {
- struct shash_desc *hmacmd5; /* hmacmd5 hash function, for NTLMv2/CR1 hashes */
struct shash_desc *md5; /* md5 hash function, for CIFS/SMB1 signatures */
struct shash_desc *hmacsha256; /* hmac-sha256 hash function, for SMB2 signatures */
- struct shash_desc *sha512; /* sha512 hash function, for SMB3.1.1 preauth hash */
struct shash_desc *aes_cmac; /* block-cipher based MAC function, for SMB3 signatures */
struct crypto_aead *enc; /* smb3 encryption AEAD TFM (AES-CCM and AES-GCM) */
@@ -821,6 +819,7 @@ struct TCP_Server_Info {
* format: \\HOST\SHARE[\OPTIONAL PATH]
*/
char *leaf_fullpath;
+ bool dfs_conn:1;
};
static inline bool is_smb1(struct TCP_Server_Info *server)
@@ -1059,6 +1058,7 @@ struct cifs_ses {
struct list_head smb_ses_list;
struct list_head rlist; /* reconnect list */
struct list_head tcon_list;
+ struct list_head dlist; /* dfs list */
struct cifs_tcon *tcon_ipc;
spinlock_t ses_lock; /* protect anything here that is not protected */
struct mutex session_mutex;
@@ -1287,6 +1287,7 @@ struct cifs_tcon {
/* BB add field for back pointer to sb struct(s)? */
#ifdef CONFIG_CIFS_DFS_UPCALL
struct delayed_work dfs_cache_work;
+ struct list_head dfs_ses_list;
#endif
struct delayed_work query_interfaces; /* query interfaces workqueue job */
char *origin_fullpath; /* canonical copy of smb3_fs_context::source */
diff --git a/fs/smb/client/cifsproto.h b/fs/smb/client/cifsproto.h
index c69e3f48a60c..68c716e6261b 100644
--- a/fs/smb/client/cifsproto.h
+++ b/fs/smb/client/cifsproto.h
@@ -724,15 +724,9 @@ static inline int cifs_create_options(struct cifs_sb_info *cifs_sb, int options)
int cifs_wait_for_server_reconnect(struct TCP_Server_Info *server, bool retry);
-/* Put references of @ses and its children */
static inline void cifs_put_smb_ses(struct cifs_ses *ses)
{
- struct cifs_ses *next;
-
- do {
- next = ses->dfs_root_ses;
- __cifs_put_smb_ses(ses);
- } while ((ses = next));
+ __cifs_put_smb_ses(ses);
}
/* Get an active reference of @ses and its children.
@@ -746,9 +740,7 @@ static inline void cifs_put_smb_ses(struct cifs_ses *ses)
static inline void cifs_smb_ses_inc_refcount(struct cifs_ses *ses)
{
lockdep_assert_held(&cifs_tcp_ses_lock);
-
- for (; ses; ses = ses->dfs_root_ses)
- ses->ses_count++;
+ ses->ses_count++;
}
static inline bool dfs_src_pathname_equal(const char *s1, const char *s2)
diff --git a/fs/smb/client/connect.c b/fs/smb/client/connect.c
index 08a41c7aaf72..adf8758847f6 100644
--- a/fs/smb/client/connect.c
+++ b/fs/smb/client/connect.c
@@ -811,13 +811,9 @@ cifs_read_iter_from_socket(struct TCP_Server_Info *server, struct iov_iter *iter
unsigned int to_read)
{
struct msghdr smb_msg = { .msg_iter = *iter };
- int ret;
iov_iter_truncate(&smb_msg.msg_iter, to_read);
- ret = cifs_readv_from_socket(server, &smb_msg);
- if (ret > 0)
- iov_iter_advance(iter, ret);
- return ret;
+ return cifs_readv_from_socket(server, &smb_msg);
}
static bool
@@ -1530,6 +1526,9 @@ static int match_server(struct TCP_Server_Info *server,
if (server->nosharesock)
return 0;
+ if (!match_super && (ctx->dfs_conn || server->dfs_conn))
+ return 0;
+
/* If multidialect negotiation see if existing sessions match one */
if (strcmp(ctx->vals->version_string, SMB3ANY_VERSION_STRING) == 0) {
if (server->vals->protocol_id < SMB30_PROT_ID)
@@ -1723,6 +1722,7 @@ cifs_get_tcp_session(struct smb3_fs_context *ctx,
if (ctx->nosharesock)
tcp_ses->nosharesock = true;
+ tcp_ses->dfs_conn = ctx->dfs_conn;
tcp_ses->ops = ctx->ops;
tcp_ses->vals = ctx->vals;
@@ -1873,13 +1873,15 @@ out_err:
}
/* this function must be called with ses_lock and chan_lock held */
-static int match_session(struct cifs_ses *ses, struct smb3_fs_context *ctx)
+static int match_session(struct cifs_ses *ses,
+ struct smb3_fs_context *ctx,
+ bool match_super)
{
if (ctx->sectype != Unspecified &&
ctx->sectype != ses->sectype)
return 0;
- if (ctx->dfs_root_ses != ses->dfs_root_ses)
+ if (!match_super && ctx->dfs_root_ses != ses->dfs_root_ses)
return 0;
/*
@@ -1998,7 +2000,7 @@ cifs_find_smb_ses(struct TCP_Server_Info *server, struct smb3_fs_context *ctx)
continue;
}
spin_lock(&ses->chan_lock);
- if (match_session(ses, ctx)) {
+ if (match_session(ses, ctx, false)) {
spin_unlock(&ses->chan_lock);
spin_unlock(&ses->ses_lock);
ret = ses;
@@ -2058,8 +2060,7 @@ void __cifs_put_smb_ses(struct cifs_ses *ses)
if (do_logoff) {
xid = get_xid();
rc = server->ops->logoff(xid, ses);
- if (rc)
- cifs_server_dbg(VFS, "%s: Session Logoff failure rc=%d\n",
+ cifs_server_dbg(FYI, "%s: Session Logoff: rc=%d\n",
__func__, rc);
_free_xid(xid);
}
@@ -2382,8 +2383,6 @@ cifs_get_smb_ses(struct TCP_Server_Info *server, struct smb3_fs_context *ctx)
* need to lock before changing something in the session.
*/
spin_lock(&cifs_tcp_ses_lock);
- if (ctx->dfs_root_ses)
- cifs_smb_ses_inc_refcount(ctx->dfs_root_ses);
ses->dfs_root_ses = ctx->dfs_root_ses;
list_add(&ses->smb_ses_list, &server->smb_ses_list);
spin_unlock(&cifs_tcp_ses_lock);
@@ -2458,6 +2457,7 @@ cifs_put_tcon(struct cifs_tcon *tcon, enum smb3_tcon_ref_trace trace)
{
unsigned int xid;
struct cifs_ses *ses;
+ LIST_HEAD(ses_list);
/*
* IPC tcon share the lifetime of their session and are
@@ -2482,6 +2482,9 @@ cifs_put_tcon(struct cifs_tcon *tcon, enum smb3_tcon_ref_trace trace)
list_del_init(&tcon->tcon_list);
tcon->status = TID_EXITING;
+#ifdef CONFIG_CIFS_DFS_UPCALL
+ list_replace_init(&tcon->dfs_ses_list, &ses_list);
+#endif
spin_unlock(&tcon->tc_lock);
spin_unlock(&cifs_tcp_ses_lock);
@@ -2509,6 +2512,9 @@ cifs_put_tcon(struct cifs_tcon *tcon, enum smb3_tcon_ref_trace trace)
cifs_fscache_release_super_cookie(tcon);
tconInfoFree(tcon, netfs_trace_tcon_ref_free);
cifs_put_smb_ses(ses);
+#ifdef CONFIG_CIFS_DFS_UPCALL
+ dfs_put_root_smb_sessions(&ses_list);
+#endif
}
/**
@@ -2892,7 +2898,7 @@ cifs_match_super(struct super_block *sb, void *data)
spin_lock(&ses->chan_lock);
spin_lock(&tcon->tc_lock);
if (!match_server(tcp_srv, ctx, true) ||
- !match_session(ses, ctx) ||
+ !match_session(ses, ctx, true) ||
!match_tcon(tcon, ctx) ||
!match_prepath(sb, tcon, mnt_data)) {
rc = 0;
@@ -3623,13 +3629,12 @@ out:
int cifs_mount(struct cifs_sb_info *cifs_sb, struct smb3_fs_context *ctx)
{
struct cifs_mount_ctx mnt_ctx = { .cifs_sb = cifs_sb, .fs_ctx = ctx, };
- bool isdfs;
int rc;
- rc = dfs_mount_share(&mnt_ctx, &isdfs);
+ rc = dfs_mount_share(&mnt_ctx);
if (rc)
goto error;
- if (!isdfs)
+ if (!ctx->dfs_conn)
goto out;
/*
@@ -4034,7 +4039,7 @@ cifs_set_vol_auth(struct smb3_fs_context *ctx, struct cifs_ses *ses)
}
static struct cifs_tcon *
-__cifs_construct_tcon(struct cifs_sb_info *cifs_sb, kuid_t fsuid)
+cifs_construct_tcon(struct cifs_sb_info *cifs_sb, kuid_t fsuid)
{
int rc;
struct cifs_tcon *master_tcon = cifs_sb_master_tcon(cifs_sb);
@@ -4132,17 +4137,6 @@ out:
return tcon;
}
-static struct cifs_tcon *
-cifs_construct_tcon(struct cifs_sb_info *cifs_sb, kuid_t fsuid)
-{
- struct cifs_tcon *ret;
-
- cifs_mount_lock();
- ret = __cifs_construct_tcon(cifs_sb, fsuid);
- cifs_mount_unlock();
- return ret;
-}
-
struct cifs_tcon *
cifs_sb_master_tcon(struct cifs_sb_info *cifs_sb)
{
@@ -4212,9 +4206,9 @@ tlink_rb_insert(struct rb_root *root, struct tcon_link *new_tlink)
struct tcon_link *
cifs_sb_tlink(struct cifs_sb_info *cifs_sb)
{
- int ret;
- kuid_t fsuid = current_fsuid();
struct tcon_link *tlink, *newtlink;
+ kuid_t fsuid = current_fsuid();
+ int err;
if (!(cifs_sb->mnt_cifs_flags & CIFS_MOUNT_MULTIUSER))
return cifs_get_tlink(cifs_sb_master_tlink(cifs_sb));
@@ -4249,9 +4243,9 @@ cifs_sb_tlink(struct cifs_sb_info *cifs_sb)
spin_unlock(&cifs_sb->tlink_tree_lock);
} else {
wait_for_construction:
- ret = wait_on_bit(&tlink->tl_flags, TCON_LINK_PENDING,
+ err = wait_on_bit(&tlink->tl_flags, TCON_LINK_PENDING,
TASK_INTERRUPTIBLE);
- if (ret) {
+ if (err) {
cifs_put_tlink(tlink);
return ERR_PTR(-ERESTARTSYS);
}
@@ -4262,8 +4256,9 @@ wait_for_construction:
/* return error if we tried this already recently */
if (time_before(jiffies, tlink->tl_time + TLINK_ERROR_EXPIRE)) {
+ err = PTR_ERR(tlink->tl_tcon);
cifs_put_tlink(tlink);
- return ERR_PTR(-EACCES);
+ return ERR_PTR(err);
}
if (test_and_set_bit(TCON_LINK_PENDING, &tlink->tl_flags))
@@ -4275,8 +4270,11 @@ wait_for_construction:
wake_up_bit(&tlink->tl_flags, TCON_LINK_PENDING);
if (IS_ERR(tlink->tl_tcon)) {
+ err = PTR_ERR(tlink->tl_tcon);
+ if (err == -ENOKEY)
+ err = -EACCES;
cifs_put_tlink(tlink);
- return ERR_PTR(-EACCES);
+ return ERR_PTR(err);
}
return tlink;
diff --git a/fs/smb/client/dfs.c b/fs/smb/client/dfs.c
index 3ec965547e3d..3f6077c68d68 100644
--- a/fs/smb/client/dfs.c
+++ b/fs/smb/client/dfs.c
@@ -69,7 +69,7 @@ static int get_session(struct cifs_mount_ctx *mnt_ctx, const char *full_path)
* Get an active reference of @ses so that next call to cifs_put_tcon() won't
* release it as any new DFS referrals must go through its IPC tcon.
*/
-static void add_root_smb_session(struct cifs_mount_ctx *mnt_ctx)
+static void set_root_smb_session(struct cifs_mount_ctx *mnt_ctx)
{
struct smb3_fs_context *ctx = mnt_ctx->fs_ctx;
struct cifs_ses *ses = mnt_ctx->ses;
@@ -95,7 +95,7 @@ static inline int parse_dfs_target(struct smb3_fs_context *ctx,
return rc;
}
-static int set_ref_paths(struct cifs_mount_ctx *mnt_ctx,
+static int setup_dfs_ref(struct cifs_mount_ctx *mnt_ctx,
struct dfs_info3_param *tgt,
struct dfs_ref_walk *rw)
{
@@ -120,6 +120,7 @@ static int set_ref_paths(struct cifs_mount_ctx *mnt_ctx,
}
ref_walk_path(rw) = ref_path;
ref_walk_fpath(rw) = full_path;
+ ref_walk_ses(rw) = ctx->dfs_root_ses;
return 0;
}
@@ -128,11 +129,11 @@ static int __dfs_referral_walk(struct cifs_mount_ctx *mnt_ctx,
{
struct smb3_fs_context *ctx = mnt_ctx->fs_ctx;
struct dfs_info3_param tgt = {};
- bool is_refsrv;
int rc = -ENOENT;
again:
do {
+ ctx->dfs_root_ses = ref_walk_ses(rw);
if (ref_walk_empty(rw)) {
rc = dfs_get_referral(mnt_ctx, ref_walk_path(rw) + 1,
NULL, ref_walk_tl(rw));
@@ -158,10 +159,7 @@ again:
if (rc)
continue;
- is_refsrv = tgt.server_type == DFS_TYPE_ROOT ||
- DFS_INTERLINK(tgt.flags);
ref_walk_set_tgt_hint(rw);
-
if (tgt.flags & DFSREF_STORAGE_SERVER) {
rc = cifs_mount_get_tcon(mnt_ctx);
if (!rc)
@@ -172,12 +170,10 @@ again:
continue;
}
- if (is_refsrv)
- add_root_smb_session(mnt_ctx);
-
+ set_root_smb_session(mnt_ctx);
rc = ref_walk_advance(rw);
if (!rc) {
- rc = set_ref_paths(mnt_ctx, &tgt, rw);
+ rc = setup_dfs_ref(mnt_ctx, &tgt, rw);
if (!rc) {
rc = -EREMOTE;
goto again;
@@ -193,20 +189,22 @@ out:
return rc;
}
-static int dfs_referral_walk(struct cifs_mount_ctx *mnt_ctx)
+static int dfs_referral_walk(struct cifs_mount_ctx *mnt_ctx,
+ struct dfs_ref_walk **rw)
{
- struct dfs_ref_walk *rw;
int rc;
- rw = ref_walk_alloc();
- if (IS_ERR(rw))
- return PTR_ERR(rw);
+ *rw = ref_walk_alloc();
+ if (IS_ERR(*rw)) {
+ rc = PTR_ERR(*rw);
+ *rw = NULL;
+ return rc;
+ }
- ref_walk_init(rw);
- rc = set_ref_paths(mnt_ctx, NULL, rw);
+ ref_walk_init(*rw);
+ rc = setup_dfs_ref(mnt_ctx, NULL, *rw);
if (!rc)
- rc = __dfs_referral_walk(mnt_ctx, rw);
- ref_walk_free(rw);
+ rc = __dfs_referral_walk(mnt_ctx, *rw);
return rc;
}
@@ -214,16 +212,16 @@ static int __dfs_mount_share(struct cifs_mount_ctx *mnt_ctx)
{
struct cifs_sb_info *cifs_sb = mnt_ctx->cifs_sb;
struct smb3_fs_context *ctx = mnt_ctx->fs_ctx;
+ struct dfs_ref_walk *rw = NULL;
struct cifs_tcon *tcon;
char *origin_fullpath;
- bool new_tcon = true;
int rc;
origin_fullpath = dfs_get_path(cifs_sb, ctx->source);
if (IS_ERR(origin_fullpath))
return PTR_ERR(origin_fullpath);
- rc = dfs_referral_walk(mnt_ctx);
+ rc = dfs_referral_walk(mnt_ctx, &rw);
if (!rc) {
/*
* Prevent superblock from being created with any missing
@@ -241,21 +239,16 @@ static int __dfs_mount_share(struct cifs_mount_ctx *mnt_ctx)
tcon = mnt_ctx->tcon;
spin_lock(&tcon->tc_lock);
- if (!tcon->origin_fullpath) {
- tcon->origin_fullpath = origin_fullpath;
- origin_fullpath = NULL;
- } else {
- new_tcon = false;
- }
+ tcon->origin_fullpath = origin_fullpath;
+ origin_fullpath = NULL;
+ ref_walk_set_tcon(rw, tcon);
spin_unlock(&tcon->tc_lock);
-
- if (new_tcon) {
- queue_delayed_work(dfscache_wq, &tcon->dfs_cache_work,
- dfs_cache_get_ttl() * HZ);
- }
+ queue_delayed_work(dfscache_wq, &tcon->dfs_cache_work,
+ dfs_cache_get_ttl() * HZ);
out:
kfree(origin_fullpath);
+ ref_walk_free(rw);
return rc;
}
@@ -279,7 +272,7 @@ static int update_fs_context_dstaddr(struct smb3_fs_context *ctx)
return rc;
}
-int dfs_mount_share(struct cifs_mount_ctx *mnt_ctx, bool *isdfs)
+int dfs_mount_share(struct cifs_mount_ctx *mnt_ctx)
{
struct smb3_fs_context *ctx = mnt_ctx->fs_ctx;
bool nodfs = ctx->nodfs;
@@ -289,7 +282,6 @@ int dfs_mount_share(struct cifs_mount_ctx *mnt_ctx, bool *isdfs)
if (rc)
return rc;
- *isdfs = false;
rc = get_session(mnt_ctx, NULL);
if (rc)
return rc;
@@ -317,10 +309,15 @@ int dfs_mount_share(struct cifs_mount_ctx *mnt_ctx, bool *isdfs)
return rc;
}
- *isdfs = true;
- add_root_smb_session(mnt_ctx);
- rc = __dfs_mount_share(mnt_ctx);
- dfs_put_root_smb_sessions(mnt_ctx);
+ if (!ctx->dfs_conn) {
+ ctx->dfs_conn = true;
+ cifs_mount_put_conns(mnt_ctx);
+ rc = get_session(mnt_ctx, NULL);
+ }
+ if (!rc) {
+ set_root_smb_session(mnt_ctx);
+ rc = __dfs_mount_share(mnt_ctx);
+ }
return rc;
}
diff --git a/fs/smb/client/dfs.h b/fs/smb/client/dfs.h
index e5c4dcf83750..1aa2bc65b3bc 100644
--- a/fs/smb/client/dfs.h
+++ b/fs/smb/client/dfs.h
@@ -19,6 +19,7 @@
struct dfs_ref {
char *path;
char *full_path;
+ struct cifs_ses *ses;
struct dfs_cache_tgt_list tl;
struct dfs_cache_tgt_iterator *tit;
};
@@ -38,6 +39,7 @@ struct dfs_ref_walk {
#define ref_walk_path(w) (ref_walk_cur(w)->path)
#define ref_walk_fpath(w) (ref_walk_cur(w)->full_path)
#define ref_walk_tl(w) (&ref_walk_cur(w)->tl)
+#define ref_walk_ses(w) (ref_walk_cur(w)->ses)
static inline struct dfs_ref_walk *ref_walk_alloc(void)
{
@@ -60,14 +62,19 @@ static inline void __ref_walk_free(struct dfs_ref *ref)
kfree(ref->path);
kfree(ref->full_path);
dfs_cache_free_tgts(&ref->tl);
+ if (ref->ses)
+ cifs_put_smb_ses(ref->ses);
memset(ref, 0, sizeof(*ref));
}
static inline void ref_walk_free(struct dfs_ref_walk *rw)
{
- struct dfs_ref *ref = ref_walk_start(rw);
+ struct dfs_ref *ref;
- for (; ref <= ref_walk_end(rw); ref++)
+ if (!rw)
+ return;
+
+ for (ref = ref_walk_start(rw); ref <= ref_walk_end(rw); ref++)
__ref_walk_free(ref);
kfree(rw);
}
@@ -116,9 +123,22 @@ static inline void ref_walk_set_tgt_hint(struct dfs_ref_walk *rw)
ref_walk_tit(rw));
}
+static inline void ref_walk_set_tcon(struct dfs_ref_walk *rw,
+ struct cifs_tcon *tcon)
+{
+ struct dfs_ref *ref = ref_walk_start(rw);
+
+ for (; ref <= ref_walk_cur(rw); ref++) {
+ if (WARN_ON_ONCE(!ref->ses))
+ continue;
+ list_add(&ref->ses->dlist, &tcon->dfs_ses_list);
+ ref->ses = NULL;
+ }
+}
+
int dfs_parse_target_referral(const char *full_path, const struct dfs_info3_param *ref,
struct smb3_fs_context *ctx);
-int dfs_mount_share(struct cifs_mount_ctx *mnt_ctx, bool *isdfs);
+int dfs_mount_share(struct cifs_mount_ctx *mnt_ctx);
static inline char *dfs_get_path(struct cifs_sb_info *cifs_sb, const char *path)
{
@@ -142,20 +162,14 @@ static inline int dfs_get_referral(struct cifs_mount_ctx *mnt_ctx, const char *p
* references of all DFS root sessions that were used across the mount process
* in dfs_mount_share().
*/
-static inline void dfs_put_root_smb_sessions(struct cifs_mount_ctx *mnt_ctx)
+static inline void dfs_put_root_smb_sessions(struct list_head *head)
{
- const struct smb3_fs_context *ctx = mnt_ctx->fs_ctx;
- struct cifs_ses *ses = ctx->dfs_root_ses;
- struct cifs_ses *cur;
-
- if (!ses)
- return;
+ struct cifs_ses *ses, *n;
- for (cur = ses; cur; cur = cur->dfs_root_ses) {
- if (cur->dfs_root_ses)
- cifs_put_smb_ses(cur->dfs_root_ses);
+ list_for_each_entry_safe(ses, n, head, dlist) {
+ list_del_init(&ses->dlist);
+ cifs_put_smb_ses(ses);
}
- cifs_put_smb_ses(ses);
}
#endif /* _CIFS_DFS_H */
diff --git a/fs/smb/client/dfs_cache.c b/fs/smb/client/dfs_cache.c
index 11c8efecf7aa..110f03df012a 100644
--- a/fs/smb/client/dfs_cache.c
+++ b/fs/smb/client/dfs_cache.c
@@ -126,6 +126,7 @@ static inline void free_tgts(struct cache_entry *ce)
static inline void flush_cache_ent(struct cache_entry *ce)
{
+ cifs_dbg(FYI, "%s: %s\n", __func__, ce->path);
hlist_del_init(&ce->hlist);
kfree(ce->path);
free_tgts(ce);
@@ -441,34 +442,31 @@ static struct cache_entry *alloc_cache_entry(struct dfs_info3_param *refs, int n
return ce;
}
-static void remove_oldest_entry_locked(void)
+/* Remove all referrals that have a single target or oldest entry */
+static void purge_cache(void)
{
int i;
struct cache_entry *ce;
- struct cache_entry *to_del = NULL;
-
- WARN_ON(!rwsem_is_locked(&htable_rw_lock));
+ struct cache_entry *oldest = NULL;
for (i = 0; i < CACHE_HTABLE_SIZE; i++) {
struct hlist_head *l = &cache_htable[i];
+ struct hlist_node *n;
- hlist_for_each_entry(ce, l, hlist) {
+ hlist_for_each_entry_safe(ce, n, l, hlist) {
if (hlist_unhashed(&ce->hlist))
continue;
- if (!to_del || timespec64_compare(&ce->etime,
- &to_del->etime) < 0)
- to_del = ce;
+ if (ce->numtgts == 1)
+ flush_cache_ent(ce);
+ else if (!oldest ||
+ timespec64_compare(&ce->etime,
+ &oldest->etime) < 0)
+ oldest = ce;
}
}
- if (!to_del) {
- cifs_dbg(FYI, "%s: no entry to remove\n", __func__);
- return;
- }
-
- cifs_dbg(FYI, "%s: removing entry\n", __func__);
- dump_ce(to_del);
- flush_cache_ent(to_del);
+ if (atomic_read(&cache_count) >= CACHE_MAX_ENTRIES && oldest)
+ flush_cache_ent(oldest);
}
/* Add a new DFS cache entry */
@@ -484,7 +482,7 @@ static struct cache_entry *add_cache_entry_locked(struct dfs_info3_param *refs,
if (atomic_read(&cache_count) >= CACHE_MAX_ENTRIES) {
cifs_dbg(FYI, "%s: reached max cache size (%d)\n", __func__, CACHE_MAX_ENTRIES);
- remove_oldest_entry_locked();
+ purge_cache();
}
rc = cache_entry_hash(refs[0].path_name, strlen(refs[0].path_name), &hash);
@@ -1095,16 +1093,18 @@ int dfs_cache_get_tgt_share(char *path, const struct dfs_cache_tgt_iterator *it,
return 0;
}
-static bool target_share_equal(struct TCP_Server_Info *server, const char *s1, const char *s2)
+static bool target_share_equal(struct cifs_tcon *tcon, const char *s1)
{
- char unc[sizeof("\\\\") + SERVER_NAME_LENGTH] = {0};
+ struct TCP_Server_Info *server = tcon->ses->server;
+ struct sockaddr_storage ss;
const char *host;
+ const char *s2 = &tcon->tree_name[1];
size_t hostlen;
- struct sockaddr_storage ss;
+ char unc[sizeof("\\\\") + SERVER_NAME_LENGTH] = {0};
bool match;
int rc;
- if (strcasecmp(s1, s2))
+ if (strcasecmp(s2, s1))
return false;
/*
@@ -1128,34 +1128,6 @@ static bool target_share_equal(struct TCP_Server_Info *server, const char *s1, c
return match;
}
-/*
- * Mark dfs tcon for reconnecting when the currently connected tcon does not match any of the new
- * target shares in @refs.
- */
-static void mark_for_reconnect_if_needed(struct TCP_Server_Info *server,
- const char *path,
- struct dfs_cache_tgt_list *old_tl,
- struct dfs_cache_tgt_list *new_tl)
-{
- struct dfs_cache_tgt_iterator *oit, *nit;
-
- for (oit = dfs_cache_get_tgt_iterator(old_tl); oit;
- oit = dfs_cache_get_next_tgt(old_tl, oit)) {
- for (nit = dfs_cache_get_tgt_iterator(new_tl); nit;
- nit = dfs_cache_get_next_tgt(new_tl, nit)) {
- if (target_share_equal(server,
- dfs_cache_get_tgt_name(oit),
- dfs_cache_get_tgt_name(nit))) {
- dfs_cache_noreq_update_tgthint(path, nit);
- return;
- }
- }
- }
-
- cifs_dbg(FYI, "%s: no cached or matched targets. mark dfs share for reconnect.\n", __func__);
- cifs_signal_cifsd_for_reconnect(server, true);
-}
-
static bool is_ses_good(struct cifs_ses *ses)
{
struct TCP_Server_Info *server = ses->server;
@@ -1172,41 +1144,35 @@ static bool is_ses_good(struct cifs_ses *ses)
return ret;
}
-/* Refresh dfs referral of @ses and mark it for reconnect if needed */
-static void __refresh_ses_referral(struct cifs_ses *ses, bool force_refresh)
+static char *get_ses_refpath(struct cifs_ses *ses)
{
struct TCP_Server_Info *server = ses->server;
- DFS_CACHE_TGT_LIST(old_tl);
- DFS_CACHE_TGT_LIST(new_tl);
- bool needs_refresh = false;
- struct cache_entry *ce;
- unsigned int xid;
- char *path = NULL;
- int rc = 0;
-
- xid = get_xid();
+ char *path = ERR_PTR(-ENOENT);
mutex_lock(&server->refpath_lock);
if (server->leaf_fullpath) {
path = kstrdup(server->leaf_fullpath + 1, GFP_ATOMIC);
if (!path)
- rc = -ENOMEM;
+ path = ERR_PTR(-ENOMEM);
}
mutex_unlock(&server->refpath_lock);
- if (!path)
- goto out;
+ return path;
+}
- down_read(&htable_rw_lock);
- ce = lookup_cache_entry(path);
- needs_refresh = force_refresh || IS_ERR(ce) || cache_entry_expired(ce);
- if (!IS_ERR(ce)) {
- rc = get_targets(ce, &old_tl);
- cifs_dbg(FYI, "%s: get_targets: %d\n", __func__, rc);
- }
- up_read(&htable_rw_lock);
+/* Refresh dfs referral of @ses */
+static void refresh_ses_referral(struct cifs_ses *ses)
+{
+ struct cache_entry *ce;
+ unsigned int xid;
+ char *path;
+ int rc = 0;
- if (!needs_refresh) {
- rc = 0;
+ xid = get_xid();
+
+ path = get_ses_refpath(ses);
+ if (IS_ERR(path)) {
+ rc = PTR_ERR(path);
+ path = NULL;
goto out;
}
@@ -1217,29 +1183,106 @@ static void __refresh_ses_referral(struct cifs_ses *ses, bool force_refresh)
goto out;
}
- ce = cache_refresh_path(xid, ses, path, true);
- if (!IS_ERR(ce)) {
- rc = get_targets(ce, &new_tl);
+ ce = cache_refresh_path(xid, ses, path, false);
+ if (!IS_ERR(ce))
up_read(&htable_rw_lock);
- cifs_dbg(FYI, "%s: get_targets: %d\n", __func__, rc);
- mark_for_reconnect_if_needed(server, path, &old_tl, &new_tl);
- }
+ else
+ rc = PTR_ERR(ce);
out:
free_xid(xid);
- dfs_cache_free_tgts(&old_tl);
- dfs_cache_free_tgts(&new_tl);
kfree(path);
}
-static inline void refresh_ses_referral(struct cifs_ses *ses)
+static int __refresh_tcon_referral(struct cifs_tcon *tcon,
+ const char *path,
+ struct dfs_info3_param *refs,
+ int numrefs, bool force_refresh)
{
- __refresh_ses_referral(ses, false);
+ struct cache_entry *ce;
+ bool reconnect = force_refresh;
+ int rc = 0;
+ int i;
+
+ if (unlikely(!numrefs))
+ return 0;
+
+ if (force_refresh) {
+ for (i = 0; i < numrefs; i++) {
+ /* TODO: include prefix paths in the matching */
+ if (target_share_equal(tcon, refs[i].node_name)) {
+ reconnect = false;
+ break;
+ }
+ }
+ }
+
+ down_write(&htable_rw_lock);
+ ce = lookup_cache_entry(path);
+ if (!IS_ERR(ce)) {
+ if (force_refresh || cache_entry_expired(ce))
+ rc = update_cache_entry_locked(ce, refs, numrefs);
+ } else if (PTR_ERR(ce) == -ENOENT) {
+ ce = add_cache_entry_locked(refs, numrefs);
+ }
+ up_write(&htable_rw_lock);
+
+ if (IS_ERR(ce))
+ rc = PTR_ERR(ce);
+ if (reconnect) {
+ cifs_tcon_dbg(FYI, "%s: mark for reconnect\n", __func__);
+ cifs_signal_cifsd_for_reconnect(tcon->ses->server, true);
+ }
+ return rc;
}
-static inline void force_refresh_ses_referral(struct cifs_ses *ses)
+static void refresh_tcon_referral(struct cifs_tcon *tcon, bool force_refresh)
{
- __refresh_ses_referral(ses, true);
+ struct dfs_info3_param *refs = NULL;
+ struct cache_entry *ce;
+ struct cifs_ses *ses;
+ unsigned int xid;
+ bool needs_refresh;
+ char *path;
+ int numrefs = 0;
+ int rc = 0;
+
+ xid = get_xid();
+ ses = tcon->ses;
+
+ path = get_ses_refpath(ses);
+ if (IS_ERR(path)) {
+ rc = PTR_ERR(path);
+ path = NULL;
+ goto out;
+ }
+
+ down_read(&htable_rw_lock);
+ ce = lookup_cache_entry(path);
+ needs_refresh = force_refresh || IS_ERR(ce) || cache_entry_expired(ce);
+ if (!needs_refresh) {
+ up_read(&htable_rw_lock);
+ goto out;
+ }
+ up_read(&htable_rw_lock);
+
+ ses = CIFS_DFS_ROOT_SES(ses);
+ if (!is_ses_good(ses)) {
+ cifs_dbg(FYI, "%s: skip cache refresh due to disconnected ipc\n",
+ __func__);
+ goto out;
+ }
+
+ rc = get_dfs_referral(xid, ses, path, &refs, &numrefs);
+ if (!rc) {
+ rc = __refresh_tcon_referral(tcon, path, refs,
+ numrefs, force_refresh);
+ }
+
+out:
+ free_xid(xid);
+ kfree(path);
+ free_dfs_info_array(refs, numrefs);
}
/**
@@ -1280,7 +1323,7 @@ int dfs_cache_remount_fs(struct cifs_sb_info *cifs_sb)
*/
cifs_sb->mnt_cifs_flags |= CIFS_MOUNT_USE_PREFIX_PATH;
- force_refresh_ses_referral(tcon->ses);
+ refresh_tcon_referral(tcon, true);
return 0;
}
@@ -1292,8 +1335,9 @@ void dfs_cache_refresh(struct work_struct *work)
tcon = container_of(work, struct cifs_tcon, dfs_cache_work.work);
- for (ses = tcon->ses; ses; ses = ses->dfs_root_ses)
+ list_for_each_entry(ses, &tcon->dfs_ses_list, dlist)
refresh_ses_referral(ses);
+ refresh_tcon_referral(tcon, false);
queue_delayed_work(dfscache_wq, &tcon->dfs_cache_work,
atomic_read(&dfs_cache_ttl) * HZ);
diff --git a/fs/smb/client/fs_context.h b/fs/smb/client/fs_context.h
index cf577ec0dd0a..69f9d938b336 100644
--- a/fs/smb/client/fs_context.h
+++ b/fs/smb/client/fs_context.h
@@ -284,6 +284,7 @@ struct smb3_fs_context {
struct cifs_ses *dfs_root_ses;
bool dfs_automount:1; /* set for dfs automount only */
enum cifs_reparse_type reparse_type;
+ bool dfs_conn:1; /* set for dfs mounts */
};
extern const struct fs_parameter_spec smb3_fs_parameters[];
diff --git a/fs/smb/client/inode.c b/fs/smb/client/inode.c
index 331a86074ae7..647f9bedd9fc 100644
--- a/fs/smb/client/inode.c
+++ b/fs/smb/client/inode.c
@@ -834,10 +834,6 @@ static void cifs_open_info_to_fattr(struct cifs_fattr *fattr,
fattr->cf_mode = S_IFREG | cifs_sb->ctx->file_mode;
fattr->cf_dtype = DT_REG;
- /* clear write bits if ATTR_READONLY is set */
- if (fattr->cf_cifsattrs & ATTR_READONLY)
- fattr->cf_mode &= ~(S_IWUGO);
-
/*
* Don't accept zero nlink from non-unix servers unless
* delete is pending. Instead mark it as unknown.
@@ -850,6 +846,10 @@ static void cifs_open_info_to_fattr(struct cifs_fattr *fattr,
}
}
+ /* clear write bits if ATTR_READONLY is set */
+ if (fattr->cf_cifsattrs & ATTR_READONLY)
+ fattr->cf_mode &= ~(S_IWUGO);
+
out_reparse:
if (S_ISLNK(fattr->cf_mode)) {
if (likely(data->symlink_target))
@@ -1267,11 +1267,14 @@ handle_mnt_opt:
__func__, rc);
goto out;
}
- }
-
- /* fill in remaining high mode bits e.g. SUID, VTX */
- if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_UNX_EMUL)
+ } else if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_UNX_EMUL)
+ /* fill in remaining high mode bits e.g. SUID, VTX */
cifs_sfu_mode(fattr, full_path, cifs_sb, xid);
+ else if (!(tcon->posix_extensions))
+ /* clear write bits if ATTR_READONLY is set */
+ if (fattr->cf_cifsattrs & ATTR_READONLY)
+ fattr->cf_mode &= ~(S_IWUGO);
+
/* check for Minshall+French symlinks */
if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_MF_SYMLINKS) {
diff --git a/fs/smb/client/ioctl.c b/fs/smb/client/ioctl.c
index 9bb5c869f4db..2ce193609d8b 100644
--- a/fs/smb/client/ioctl.c
+++ b/fs/smb/client/ioctl.c
@@ -90,23 +90,23 @@ static long cifs_ioctl_copychunk(unsigned int xid, struct file *dst_file,
}
src_file = fdget(srcfd);
- if (!src_file.file) {
+ if (!fd_file(src_file)) {
rc = -EBADF;
goto out_drop_write;
}
- if (src_file.file->f_op->unlocked_ioctl != cifs_ioctl) {
+ if (fd_file(src_file)->f_op->unlocked_ioctl != cifs_ioctl) {
rc = -EBADF;
cifs_dbg(VFS, "src file seems to be from a different filesystem type\n");
goto out_fput;
}
- src_inode = file_inode(src_file.file);
+ src_inode = file_inode(fd_file(src_file));
rc = -EINVAL;
if (S_ISDIR(src_inode->i_mode))
goto out_fput;
- rc = cifs_file_copychunk_range(xid, src_file.file, 0, dst_file, 0,
+ rc = cifs_file_copychunk_range(xid, fd_file(src_file), 0, dst_file, 0,
src_inode->i_size, 0);
if (rc > 0)
rc = 0;
diff --git a/fs/smb/client/misc.c b/fs/smb/client/misc.c
index dab526191b07..054f10ebf65a 100644
--- a/fs/smb/client/misc.c
+++ b/fs/smb/client/misc.c
@@ -145,6 +145,9 @@ tcon_info_alloc(bool dir_leases_enabled, enum smb3_tcon_ref_trace trace)
mutex_init(&ret_buf->fscache_lock);
#endif
trace_smb3_tcon_ref(ret_buf->debug_id, ret_buf->tc_count, trace);
+#ifdef CONFIG_CIFS_DFS_UPCALL
+ INIT_LIST_HEAD(&ret_buf->dfs_ses_list);
+#endif
return ret_buf;
}
@@ -1108,7 +1111,8 @@ static void tcon_super_cb(struct super_block *sb, void *arg)
t2 = cifs_sb_master_tcon(cifs_sb);
spin_lock(&t2->tc_lock);
- if (t1->ses == t2->ses &&
+ if ((t1->ses == t2->ses ||
+ t1->ses->dfs_root_ses == t2->ses->dfs_root_ses) &&
t1->ses->server == t2->ses->server &&
t2->origin_fullpath &&
dfs_src_pathname_equal(t2->origin_fullpath, t1->origin_fullpath))
diff --git a/fs/smb/client/namespace.c b/fs/smb/client/namespace.c
index 4a517b280f2b..0f788031b740 100644
--- a/fs/smb/client/namespace.c
+++ b/fs/smb/client/namespace.c
@@ -240,7 +240,7 @@ static struct vfsmount *cifs_do_automount(struct path *path)
ctx->source = NULL;
goto out;
}
- ctx->dfs_automount = is_dfs_mount(mntpt);
+ ctx->dfs_automount = ctx->dfs_conn = is_dfs_mount(mntpt);
cifs_dbg(FYI, "%s: ctx: source=%s UNC=%s prepath=%s dfs_automount=%d\n",
__func__, ctx->source, ctx->UNC, ctx->prepath, ctx->dfs_automount);
diff --git a/fs/smb/client/reparse.c b/fs/smb/client/reparse.c
index 48c27581ec51..3b48a093cfb1 100644
--- a/fs/smb/client/reparse.c
+++ b/fs/smb/client/reparse.c
@@ -108,8 +108,8 @@ static int nfs_set_reparse_buf(struct reparse_posix_data *buf,
buf->InodeType = cpu_to_le64(type);
buf->ReparseDataLength = cpu_to_le16(len + dlen -
sizeof(struct reparse_data_buffer));
- *(__le64 *)buf->DataBuffer = cpu_to_le64(((u64)MAJOR(dev) << 32) |
- MINOR(dev));
+ *(__le64 *)buf->DataBuffer = cpu_to_le64(((u64)MINOR(dev) << 32) |
+ MAJOR(dev));
iov->iov_base = buf;
iov->iov_len = len + dlen;
return 0;
@@ -468,7 +468,7 @@ static void wsl_to_fattr(struct cifs_open_info_data *data,
else if (!strncmp(name, SMB2_WSL_XATTR_MODE, nlen))
fattr->cf_mode = (umode_t)le32_to_cpu(*(__le32 *)v);
else if (!strncmp(name, SMB2_WSL_XATTR_DEV, nlen))
- fattr->cf_rdev = wsl_mkdev(v);
+ fattr->cf_rdev = reparse_mkdev(v);
} while (next);
out:
fattr->cf_dtype = S_DT(fattr->cf_mode);
@@ -485,11 +485,11 @@ bool cifs_reparse_point_to_fattr(struct cifs_sb_info *cifs_sb,
switch (le64_to_cpu(buf->InodeType)) {
case NFS_SPECFILE_CHR:
fattr->cf_mode |= S_IFCHR;
- fattr->cf_rdev = reparse_nfs_mkdev(buf);
+ fattr->cf_rdev = reparse_mkdev(buf->DataBuffer);
break;
case NFS_SPECFILE_BLK:
fattr->cf_mode |= S_IFBLK;
- fattr->cf_rdev = reparse_nfs_mkdev(buf);
+ fattr->cf_rdev = reparse_mkdev(buf->DataBuffer);
break;
case NFS_SPECFILE_FIFO:
fattr->cf_mode |= S_IFIFO;
diff --git a/fs/smb/client/reparse.h b/fs/smb/client/reparse.h
index 2c0644bc4e65..158e7b7aae64 100644
--- a/fs/smb/client/reparse.h
+++ b/fs/smb/client/reparse.h
@@ -18,14 +18,7 @@
*/
#define IO_REPARSE_TAG_INTERNAL ((__u32)~0U)
-static inline dev_t reparse_nfs_mkdev(struct reparse_posix_data *buf)
-{
- u64 v = le64_to_cpu(*(__le64 *)buf->DataBuffer);
-
- return MKDEV(v >> 32, v & 0xffffffff);
-}
-
-static inline dev_t wsl_mkdev(void *ptr)
+static inline dev_t reparse_mkdev(void *ptr)
{
u64 v = le64_to_cpu(*(__le64 *)ptr);
diff --git a/fs/smb/client/sess.c b/fs/smb/client/sess.c
index 3216f786908f..03c0b484a4b5 100644
--- a/fs/smb/client/sess.c
+++ b/fs/smb/client/sess.c
@@ -624,7 +624,7 @@ cifs_ses_add_channel(struct cifs_ses *ses,
* to sign packets before we generate the channel signing key
* (we sign with the session key)
*/
- rc = smb311_crypto_shash_allocate(chan->server);
+ rc = smb3_crypto_shash_allocate(chan->server);
if (rc) {
cifs_dbg(VFS, "%s: crypto alloc failed\n", __func__);
mutex_unlock(&ses->session_mutex);
diff --git a/fs/smb/client/smb2misc.c b/fs/smb/client/smb2misc.c
index f3c4b70b77b9..bdeb12ff53e3 100644
--- a/fs/smb/client/smb2misc.c
+++ b/fs/smb/client/smb2misc.c
@@ -906,41 +906,41 @@ smb311_update_preauth_hash(struct cifs_ses *ses, struct TCP_Server_Info *server,
|| (hdr->Status !=
cpu_to_le32(NT_STATUS_MORE_PROCESSING_REQUIRED))))
return 0;
-
ok:
- rc = smb311_crypto_shash_allocate(server);
- if (rc)
+ rc = cifs_alloc_hash("sha512", &sha512);
+ if (rc) {
+ cifs_dbg(VFS, "%s: Could not allocate SHA512 shash, rc=%d\n", __func__, rc);
return rc;
+ }
- sha512 = server->secmech.sha512;
rc = crypto_shash_init(sha512);
if (rc) {
- cifs_dbg(VFS, "%s: Could not init sha512 shash\n", __func__);
- return rc;
+ cifs_dbg(VFS, "%s: Could not init SHA512 shash, rc=%d\n", __func__, rc);
+ goto err_free;
}
rc = crypto_shash_update(sha512, ses->preauth_sha_hash,
SMB2_PREAUTH_HASH_SIZE);
if (rc) {
- cifs_dbg(VFS, "%s: Could not update sha512 shash\n", __func__);
- return rc;
+ cifs_dbg(VFS, "%s: Could not update SHA512 shash, rc=%d\n", __func__, rc);
+ goto err_free;
}
for (i = 0; i < nvec; i++) {
rc = crypto_shash_update(sha512, iov[i].iov_base, iov[i].iov_len);
if (rc) {
- cifs_dbg(VFS, "%s: Could not update sha512 shash\n",
- __func__);
- return rc;
+ cifs_dbg(VFS, "%s: Could not update SHA512 shash, rc=%d\n", __func__, rc);
+ goto err_free;
}
}
rc = crypto_shash_final(sha512, ses->preauth_sha_hash);
if (rc) {
- cifs_dbg(VFS, "%s: Could not finalize sha512 shash\n",
- __func__);
- return rc;
+ cifs_dbg(VFS, "%s: Could not finalize SHA12 shash, rc=%d\n", __func__, rc);
+ goto err_free;
}
+err_free:
+ cifs_free_hash(&sha512);
return 0;
}
diff --git a/fs/smb/client/smb2ops.c b/fs/smb/client/smb2ops.c
index 7381ec333c6d..177173072bfa 100644
--- a/fs/smb/client/smb2ops.c
+++ b/fs/smb/client/smb2ops.c
@@ -4309,7 +4309,7 @@ smb2_get_enc_key(struct TCP_Server_Info *server, __u64 ses_id, int enc, u8 *key)
*/
static int
crypt_message(struct TCP_Server_Info *server, int num_rqst,
- struct smb_rqst *rqst, int enc)
+ struct smb_rqst *rqst, int enc, struct crypto_aead *tfm)
{
struct smb2_transform_hdr *tr_hdr =
(struct smb2_transform_hdr *)rqst[0].rq_iov[0].iov_base;
@@ -4320,8 +4320,6 @@ crypt_message(struct TCP_Server_Info *server, int num_rqst,
u8 key[SMB3_ENC_DEC_KEY_SIZE];
struct aead_request *req;
u8 *iv;
- DECLARE_CRYPTO_WAIT(wait);
- struct crypto_aead *tfm;
unsigned int crypt_len = le32_to_cpu(tr_hdr->OriginalMessageSize);
void *creq;
size_t sensitive_size;
@@ -4333,14 +4331,6 @@ crypt_message(struct TCP_Server_Info *server, int num_rqst,
return rc;
}
- rc = smb3_crypto_aead_allocate(server);
- if (rc) {
- cifs_server_dbg(VFS, "%s: crypto alloc failed\n", __func__);
- return rc;
- }
-
- tfm = enc ? server->secmech.enc : server->secmech.dec;
-
if ((server->cipher_type == SMB2_ENCRYPTION_AES256_CCM) ||
(server->cipher_type == SMB2_ENCRYPTION_AES256_GCM))
rc = crypto_aead_setkey(tfm, key, SMB3_GCM256_CRYPTKEY_SIZE);
@@ -4380,11 +4370,7 @@ crypt_message(struct TCP_Server_Info *server, int num_rqst,
aead_request_set_crypt(req, sg, sg, crypt_len, iv);
aead_request_set_ad(req, assoc_data_len);
- aead_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG,
- crypto_req_done, &wait);
-
- rc = crypto_wait_req(enc ? crypto_aead_encrypt(req)
- : crypto_aead_decrypt(req), &wait);
+ rc = enc ? crypto_aead_encrypt(req) : crypto_aead_decrypt(req);
if (!rc && enc)
memcpy(&tr_hdr->Signature, sign, SMB2_SIGNATURE_SIZE);
@@ -4526,7 +4512,7 @@ smb3_init_transform_rq(struct TCP_Server_Info *server, int num_rqst,
/* fill the 1st iov with a transform header */
fill_transform_hdr(tr_hdr, orig_len, old_rq, server->cipher_type);
- rc = crypt_message(server, num_rqst, new_rq, 1);
+ rc = crypt_message(server, num_rqst, new_rq, 1, server->secmech.enc);
cifs_dbg(FYI, "Encrypt message returned %d\n", rc);
if (rc)
goto err_free;
@@ -4551,8 +4537,9 @@ decrypt_raw_data(struct TCP_Server_Info *server, char *buf,
unsigned int buf_data_size, struct iov_iter *iter,
bool is_offloaded)
{
- struct kvec iov[2];
+ struct crypto_aead *tfm;
struct smb_rqst rqst = {NULL};
+ struct kvec iov[2];
size_t iter_size = 0;
int rc;
@@ -4568,9 +4555,31 @@ decrypt_raw_data(struct TCP_Server_Info *server, char *buf,
iter_size = iov_iter_count(iter);
}
- rc = crypt_message(server, 1, &rqst, 0);
+ if (is_offloaded) {
+ if ((server->cipher_type == SMB2_ENCRYPTION_AES128_GCM) ||
+ (server->cipher_type == SMB2_ENCRYPTION_AES256_GCM))
+ tfm = crypto_alloc_aead("gcm(aes)", 0, 0);
+ else
+ tfm = crypto_alloc_aead("ccm(aes)", 0, 0);
+ if (IS_ERR(tfm)) {
+ rc = PTR_ERR(tfm);
+ cifs_server_dbg(VFS, "%s: Failed alloc decrypt TFM, rc=%d\n", __func__, rc);
+
+ return rc;
+ }
+ } else {
+ if (unlikely(!server->secmech.dec))
+ return -EIO;
+
+ tfm = server->secmech.dec;
+ }
+
+ rc = crypt_message(server, 1, &rqst, 0, tfm);
cifs_dbg(FYI, "Decrypt message returned %d\n", rc);
+ if (is_offloaded)
+ crypto_free_aead(tfm);
+
if (rc)
return rc;
@@ -4869,9 +4878,12 @@ receive_encrypted_read(struct TCP_Server_Info *server, struct mid_q_entry **mid,
goto discard_data;
server->total_read += rc;
- if (rc < len)
- iov_iter_zero(len - rc, &iter);
- iov_iter_revert(&iter, len);
+ if (rc < len) {
+ struct iov_iter tmp = iter;
+
+ iov_iter_advance(&tmp, rc);
+ iov_iter_zero(len - rc, &tmp);
+ }
iov_iter_truncate(&iter, dw->len);
rc = cifs_discard_remaining_data(server);
diff --git a/fs/smb/client/smb2pdu.c b/fs/smb/client/smb2pdu.c
index 2cb1bf65a172..02828b9c3cb3 100644
--- a/fs/smb/client/smb2pdu.c
+++ b/fs/smb/client/smb2pdu.c
@@ -1266,6 +1266,16 @@ SMB2_negotiate(const unsigned int xid,
else
cifs_server_dbg(VFS, "Missing expected negotiate contexts\n");
}
+
+ if (server->cipher_type && !rc) {
+ if (!SERVER_IS_CHAN(server)) {
+ rc = smb3_crypto_aead_allocate(server);
+ } else {
+ /* For channels, just reuse the primary server crypto secmech. */
+ server->secmech.enc = server->primary_server->secmech.enc;
+ server->secmech.dec = server->primary_server->secmech.dec;
+ }
+ }
neg_exit:
free_rsp_buf(resp_buftype, rsp);
return rc;
@@ -4866,7 +4876,9 @@ smb2_writev_callback(struct mid_q_entry *mid)
#endif
if (result) {
cifs_stats_fail_inc(tcon, SMB2_WRITE_HE);
- trace_smb3_write_err(wdata->xid,
+ trace_smb3_write_err(wdata->rreq->debug_id,
+ wdata->subreq.debug_index,
+ wdata->xid,
wdata->req->cfile->fid.persistent_fid,
tcon->tid, tcon->ses->Suid, wdata->subreq.start,
wdata->subreq.len, wdata->result);
@@ -4874,7 +4886,9 @@ smb2_writev_callback(struct mid_q_entry *mid)
pr_warn_once("Out of space writing to %s\n",
tcon->tree_name);
} else
- trace_smb3_write_done(0 /* no xid */,
+ trace_smb3_write_done(wdata->rreq->debug_id,
+ wdata->subreq.debug_index,
+ wdata->xid,
wdata->req->cfile->fid.persistent_fid,
tcon->tid, tcon->ses->Suid,
wdata->subreq.start, wdata->subreq.len);
@@ -4952,7 +4966,9 @@ smb2_async_writev(struct cifs_io_subrequest *wdata)
offsetof(struct smb2_write_req, Buffer));
req->RemainingBytes = 0;
- trace_smb3_write_enter(wdata->xid,
+ trace_smb3_write_enter(wdata->rreq->debug_id,
+ wdata->subreq.debug_index,
+ wdata->xid,
io_parms->persistent_fid,
io_parms->tcon->tid,
io_parms->tcon->ses->Suid,
@@ -5032,7 +5048,9 @@ smb2_async_writev(struct cifs_io_subrequest *wdata)
wdata, flags, &wdata->credits);
/* Can't touch wdata if rc == 0 */
if (rc) {
- trace_smb3_write_err(xid,
+ trace_smb3_write_err(wdata->rreq->debug_id,
+ wdata->subreq.debug_index,
+ xid,
io_parms->persistent_fid,
io_parms->tcon->tid,
io_parms->tcon->ses->Suid,
@@ -5112,7 +5130,7 @@ replay_again:
offsetof(struct smb2_write_req, Buffer));
req->RemainingBytes = 0;
- trace_smb3_write_enter(xid, io_parms->persistent_fid,
+ trace_smb3_write_enter(0, 0, xid, io_parms->persistent_fid,
io_parms->tcon->tid, io_parms->tcon->ses->Suid,
io_parms->offset, io_parms->length);
@@ -5133,7 +5151,7 @@ replay_again:
rsp = (struct smb2_write_rsp *)rsp_iov.iov_base;
if (rc) {
- trace_smb3_write_err(xid,
+ trace_smb3_write_err(0, 0, xid,
req->PersistentFileId,
io_parms->tcon->tid,
io_parms->tcon->ses->Suid,
@@ -5142,7 +5160,7 @@ replay_again:
cifs_dbg(VFS, "Send error in write = %d\n", rc);
} else {
*nbytes = le32_to_cpu(rsp->DataLength);
- trace_smb3_write_done(xid,
+ trace_smb3_write_done(0, 0, xid,
req->PersistentFileId,
io_parms->tcon->tid,
io_parms->tcon->ses->Suid,
diff --git a/fs/smb/client/smb2proto.h b/fs/smb/client/smb2proto.h
index c7e1b149877a..56a896ff7cd9 100644
--- a/fs/smb/client/smb2proto.h
+++ b/fs/smb/client/smb2proto.h
@@ -291,7 +291,7 @@ extern int smb2_validate_and_copy_iov(unsigned int offset,
extern void smb2_copy_fs_info_to_kstatfs(
struct smb2_fs_full_size_info *pfs_inf,
struct kstatfs *kst);
-extern int smb311_crypto_shash_allocate(struct TCP_Server_Info *server);
+extern int smb3_crypto_shash_allocate(struct TCP_Server_Info *server);
extern int smb311_update_preauth_hash(struct cifs_ses *ses,
struct TCP_Server_Info *server,
struct kvec *iov, int nvec);
diff --git a/fs/smb/client/smb2transport.c b/fs/smb/client/smb2transport.c
index e4636fca821d..f7e04c40d22e 100644
--- a/fs/smb/client/smb2transport.c
+++ b/fs/smb/client/smb2transport.c
@@ -26,8 +26,7 @@
#include "../common/smb2status.h"
#include "smb2glob.h"
-static int
-smb3_crypto_shash_allocate(struct TCP_Server_Info *server)
+int smb3_crypto_shash_allocate(struct TCP_Server_Info *server)
{
struct cifs_secmech *p = &server->secmech;
int rc;
@@ -46,33 +45,6 @@ err:
return rc;
}
-int
-smb311_crypto_shash_allocate(struct TCP_Server_Info *server)
-{
- struct cifs_secmech *p = &server->secmech;
- int rc = 0;
-
- rc = cifs_alloc_hash("hmac(sha256)", &p->hmacsha256);
- if (rc)
- return rc;
-
- rc = cifs_alloc_hash("cmac(aes)", &p->aes_cmac);
- if (rc)
- goto err;
-
- rc = cifs_alloc_hash("sha512", &p->sha512);
- if (rc)
- goto err;
-
- return 0;
-
-err:
- cifs_free_hash(&p->aes_cmac);
- cifs_free_hash(&p->hmacsha256);
- return rc;
-}
-
-
static
int smb2_get_sign_key(__u64 ses_id, struct TCP_Server_Info *server, u8 *key)
{
@@ -242,7 +214,7 @@ smb2_calc_signature(struct smb_rqst *rqst, struct TCP_Server_Info *server,
ses = smb2_find_smb_ses(server, le64_to_cpu(shdr->SessionId));
if (unlikely(!ses)) {
- cifs_server_dbg(VFS, "%s: Could not find session\n", __func__);
+ cifs_server_dbg(FYI, "%s: Could not find session\n", __func__);
return -ENOENT;
}
diff --git a/fs/smb/client/trace.h b/fs/smb/client/trace.h
index 8e9964001e2a..0b52d22a91a0 100644
--- a/fs/smb/client/trace.h
+++ b/fs/smb/client/trace.h
@@ -157,6 +157,7 @@ DEFINE_EVENT(smb3_rw_err_class, smb3_##name, \
TP_ARGS(rreq_debug_id, rreq_debug_index, xid, fid, tid, sesid, offset, len, rc))
DEFINE_SMB3_RW_ERR_EVENT(read_err);
+DEFINE_SMB3_RW_ERR_EVENT(write_err);
/* For logging errors in other file I/O ops */
DECLARE_EVENT_CLASS(smb3_other_err_class,
@@ -202,7 +203,6 @@ DEFINE_EVENT(smb3_other_err_class, smb3_##name, \
int rc), \
TP_ARGS(xid, fid, tid, sesid, offset, len, rc))
-DEFINE_SMB3_OTHER_ERR_EVENT(write_err);
DEFINE_SMB3_OTHER_ERR_EVENT(query_dir_err);
DEFINE_SMB3_OTHER_ERR_EVENT(zero_err);
DEFINE_SMB3_OTHER_ERR_EVENT(falloc_err);
@@ -370,6 +370,8 @@ DEFINE_EVENT(smb3_rw_done_class, smb3_##name, \
DEFINE_SMB3_RW_DONE_EVENT(read_enter);
DEFINE_SMB3_RW_DONE_EVENT(read_done);
+DEFINE_SMB3_RW_DONE_EVENT(write_enter);
+DEFINE_SMB3_RW_DONE_EVENT(write_done);
/* For logging successful other op */
DECLARE_EVENT_CLASS(smb3_other_done_class,
@@ -411,11 +413,9 @@ DEFINE_EVENT(smb3_other_done_class, smb3_##name, \
__u32 len), \
TP_ARGS(xid, fid, tid, sesid, offset, len))
-DEFINE_SMB3_OTHER_DONE_EVENT(write_enter);
DEFINE_SMB3_OTHER_DONE_EVENT(query_dir_enter);
DEFINE_SMB3_OTHER_DONE_EVENT(zero_enter);
DEFINE_SMB3_OTHER_DONE_EVENT(falloc_enter);
-DEFINE_SMB3_OTHER_DONE_EVENT(write_done);
DEFINE_SMB3_OTHER_DONE_EVENT(query_dir_done);
DEFINE_SMB3_OTHER_DONE_EVENT(zero_done);
DEFINE_SMB3_OTHER_DONE_EVENT(falloc_done);
diff --git a/fs/smb/client/transport.c b/fs/smb/client/transport.c
index fd5a85d43759..91812150186c 100644
--- a/fs/smb/client/transport.c
+++ b/fs/smb/client/transport.c
@@ -1817,11 +1817,8 @@ cifs_readv_receive(struct TCP_Server_Info *server, struct mid_q_entry *mid)
length = data_len; /* An RDMA read is already done. */
else
#endif
- {
length = cifs_read_iter_from_socket(server, &rdata->subreq.io_iter,
data_len);
- iov_iter_revert(&rdata->subreq.io_iter, data_len);
- }
if (length > 0)
rdata->got_bytes += length;
server->total_read += length;
diff --git a/fs/smb/common/smb2pdu.h b/fs/smb/common/smb2pdu.h
index c769f9dbc0b4..9f272cc8f566 100644
--- a/fs/smb/common/smb2pdu.h
+++ b/fs/smb/common/smb2pdu.h
@@ -6,7 +6,7 @@
* Note that, due to trying to use names similar to the protocol specifications,
* there are many mixed case field names in the structures below. Although
* this does not match typical Linux kernel style, it is necessary to be
- * able to match against the protocol specfication.
+ * able to match against the protocol specification.
*
* SMB2 commands
* Some commands have minimal (wct=0,bcc=0), or uninteresting, responses
@@ -491,7 +491,7 @@ struct smb2_encryption_neg_context {
__le16 ContextType; /* 2 */
__le16 DataLength;
__le32 Reserved;
- /* CipherCount usally 2, but can be 3 when AES256-GCM enabled */
+ /* CipherCount usually 2, but can be 3 when AES256-GCM enabled */
__le16 CipherCount; /* AES128-GCM and AES128-CCM by default */
__le16 Ciphers[];
} __packed;
@@ -1061,7 +1061,7 @@ struct smb2_server_client_notification {
#define IL_IMPERSONATION cpu_to_le32(0x00000002)
#define IL_DELEGATE cpu_to_le32(0x00000003)
-/* File Attrubutes */
+/* File Attributes */
#define FILE_ATTRIBUTE_READONLY 0x00000001
#define FILE_ATTRIBUTE_HIDDEN 0x00000002
#define FILE_ATTRIBUTE_SYSTEM 0x00000004
diff --git a/fs/smb/server/connection.c b/fs/smb/server/connection.c
index cac80e7bfefc..aa2a37a7ce84 100644
--- a/fs/smb/server/connection.c
+++ b/fs/smb/server/connection.c
@@ -25,7 +25,7 @@ DECLARE_RWSEM(conn_list_lock);
/**
* ksmbd_conn_free() - free resources of the connection instance
*
- * @conn: connection instance to be cleand up
+ * @conn: connection instance to be cleaned up
*
* During the thread termination, the corresponding conn instance
* resources(sock/memory) are released and finally the conn object is freed.
diff --git a/fs/smb/server/ksmbd_netlink.h b/fs/smb/server/ksmbd_netlink.h
index f4e55199938d..38e6fd2da3b8 100644
--- a/fs/smb/server/ksmbd_netlink.h
+++ b/fs/smb/server/ksmbd_netlink.h
@@ -213,7 +213,7 @@ struct ksmbd_tree_connect_response {
};
/*
- * IPC Request struture to disconnect tree connection.
+ * IPC Request structure to disconnect tree connection.
*/
struct ksmbd_tree_disconnect_request {
__u64 session_id; /* session id */
diff --git a/fs/smb/server/oplock.c b/fs/smb/server/oplock.c
index 246cde380dfb..4142c7ad5fa9 100644
--- a/fs/smb/server/oplock.c
+++ b/fs/smb/server/oplock.c
@@ -796,7 +796,7 @@ out:
/**
* smb2_lease_break_noti() - break lease when a new client request
* write lease
- * @opinfo: conains lease state information
+ * @opinfo: contains lease state information
*
* Return: 0 on success, otherwise error
*/
@@ -1484,7 +1484,7 @@ void create_lease_buf(u8 *rbuf, struct lease *lease)
}
/**
- * parse_lease_state() - parse lease context containted in file open request
+ * parse_lease_state() - parse lease context contained in file open request
* @open_req: buffer containing smb2 file open(create) request
*
* Return: allocated lease context object on success, otherwise NULL
diff --git a/fs/smb/server/server.c b/fs/smb/server/server.c
index c402d4abe826..231d2d224656 100644
--- a/fs/smb/server/server.c
+++ b/fs/smb/server/server.c
@@ -279,7 +279,7 @@ static void handle_ksmbd_work(struct work_struct *wk)
/**
* queue_ksmbd_work() - queue a smb request to worker thread queue
- * for proccessing smb command and sending response
+ * for processing smb command and sending response
* @conn: connection instance
*
* read remaining data from socket create and submit work.
diff --git a/fs/smb/server/smb2pdu.c b/fs/smb/server/smb2pdu.c
index e6bdc1b20727..7460089c186f 100644
--- a/fs/smb/server/smb2pdu.c
+++ b/fs/smb/server/smb2pdu.c
@@ -1335,8 +1335,7 @@ static int ntlm_negotiate(struct ksmbd_work *work,
return rc;
sz = le16_to_cpu(rsp->SecurityBufferOffset);
- chgblob =
- (struct challenge_message *)((char *)&rsp->hdr.ProtocolId + sz);
+ chgblob = (struct challenge_message *)rsp->Buffer;
memset(chgblob, 0, sizeof(struct challenge_message));
if (!work->conn->use_spnego) {
@@ -1369,9 +1368,7 @@ static int ntlm_negotiate(struct ksmbd_work *work,
goto out;
}
- sz = le16_to_cpu(rsp->SecurityBufferOffset);
- unsafe_memcpy((char *)&rsp->hdr.ProtocolId + sz, spnego_blob, spnego_blob_len,
- /* alloc is larger than blob, see smb2_allocate_rsp_buf() */);
+ memcpy(rsp->Buffer, spnego_blob, spnego_blob_len);
rsp->SecurityBufferLength = cpu_to_le16(spnego_blob_len);
out:
@@ -1453,10 +1450,7 @@ static int ntlm_authenticate(struct ksmbd_work *work,
if (rc)
return -ENOMEM;
- sz = le16_to_cpu(rsp->SecurityBufferOffset);
- unsafe_memcpy((char *)&rsp->hdr.ProtocolId + sz, spnego_blob,
- spnego_blob_len,
- /* alloc is larger than blob, see smb2_allocate_rsp_buf() */);
+ memcpy(rsp->Buffer, spnego_blob, spnego_blob_len);
rsp->SecurityBufferLength = cpu_to_le16(spnego_blob_len);
kfree(spnego_blob);
}
@@ -2058,18 +2052,20 @@ out_err1:
* @access: file access flags
* @disposition: file disposition flags
* @may_flags: set with MAY_ flags
- * @is_dir: is creating open flags for directory
+ * @coptions: file creation options
+ * @mode: file mode
*
* Return: file open flags
*/
static int smb2_create_open_flags(bool file_present, __le32 access,
__le32 disposition,
int *may_flags,
- bool is_dir)
+ __le32 coptions,
+ umode_t mode)
{
int oflags = O_NONBLOCK | O_LARGEFILE;
- if (is_dir) {
+ if (coptions & FILE_DIRECTORY_FILE_LE || S_ISDIR(mode)) {
access &= ~FILE_WRITE_DESIRE_ACCESS_LE;
ksmbd_debug(SMB, "Discard write access to a directory\n");
}
@@ -2086,7 +2082,7 @@ static int smb2_create_open_flags(bool file_present, __le32 access,
*may_flags = MAY_OPEN | MAY_READ;
}
- if (access == FILE_READ_ATTRIBUTES_LE)
+ if (access == FILE_READ_ATTRIBUTES_LE || S_ISBLK(mode) || S_ISCHR(mode))
oflags |= O_PATH;
if (file_present) {
@@ -3181,8 +3177,8 @@ int smb2_open(struct ksmbd_work *work)
open_flags = smb2_create_open_flags(file_present, daccess,
req->CreateDisposition,
&may_flags,
- req->CreateOptions & FILE_DIRECTORY_FILE_LE ||
- (file_present && S_ISDIR(d_inode(path.dentry)->i_mode)));
+ req->CreateOptions,
+ file_present ? d_inode(path.dentry)->i_mode : 0);
if (!test_tree_conn_flag(tcon, KSMBD_TREE_CONN_FLAG_WRITABLE)) {
if (open_flags & (O_CREAT | O_TRUNC)) {
@@ -3531,8 +3527,9 @@ int smb2_open(struct ksmbd_work *work)
memcpy(fp->create_guid, dh_info.CreateGuid,
SMB2_CREATE_GUID_SIZE);
if (dh_info.timeout)
- fp->durable_timeout = min(dh_info.timeout,
- DURABLE_HANDLE_MAX_TIMEOUT);
+ fp->durable_timeout =
+ min_t(unsigned int, dh_info.timeout,
+ DURABLE_HANDLE_MAX_TIMEOUT);
else
fp->durable_timeout = 60;
}
@@ -4586,7 +4583,7 @@ static int smb2_get_ea(struct ksmbd_work *work, struct ksmbd_file *fp,
path = &fp->filp->f_path;
/* single EA entry is requested with given user.* name */
if (req->InputBufferLength) {
- if (le32_to_cpu(req->InputBufferLength) <
+ if (le32_to_cpu(req->InputBufferLength) <=
sizeof(struct smb2_ea_info_req))
return -EINVAL;
@@ -8090,7 +8087,7 @@ int smb2_ioctl(struct ksmbd_work *work)
goto out;
}
- if (in_buf_len < sizeof(struct copychunk_ioctl_req)) {
+ if (in_buf_len <= sizeof(struct copychunk_ioctl_req)) {
ret = -EINVAL;
goto out;
}
diff --git a/fs/smb/server/smb2pdu.h b/fs/smb/server/smb2pdu.h
index 3be7d5ae65a8..73aff20e22d0 100644
--- a/fs/smb/server/smb2pdu.h
+++ b/fs/smb/server/smb2pdu.h
@@ -194,7 +194,7 @@ struct copychunk_ioctl_req {
__le64 ResumeKey[3];
__le32 ChunkCount;
__le32 Reserved;
- __u8 Chunks[1]; /* array of srv_copychunk */
+ __u8 Chunks[]; /* array of srv_copychunk */
} __packed;
struct srv_copychunk {
@@ -370,7 +370,7 @@ struct smb2_file_attr_tag_info {
struct smb2_ea_info_req {
__le32 NextEntryOffset;
__u8 EaNameLength;
- char name[1];
+ char name[];
} __packed; /* level 15 Query */
struct smb2_ea_info {
diff --git a/fs/smb/server/smb_common.c b/fs/smb/server/smb_common.c
index cc4bb2377cbd..5b8d75e78ffb 100644
--- a/fs/smb/server/smb_common.c
+++ b/fs/smb/server/smb_common.c
@@ -488,7 +488,7 @@ int ksmbd_populate_dot_dotdot_entries(struct ksmbd_work *work, int info_level,
* @shortname: destination short filename
*
* Return: shortname length or 0 when source long name is '.' or '..'
- * TODO: Though this function comforms the restriction of 8.3 Filename spec,
+ * TODO: Though this function conforms the restriction of 8.3 Filename spec,
* but the result is different with Windows 7's one. need to check.
*/
int ksmbd_extract_shortname(struct ksmbd_conn *conn, const char *longname,
diff --git a/fs/smb/server/vfs_cache.h b/fs/smb/server/vfs_cache.h
index b0f6d0f94cb8..5bbb179736c2 100644
--- a/fs/smb/server/vfs_cache.h
+++ b/fs/smb/server/vfs_cache.h
@@ -100,8 +100,8 @@ struct ksmbd_file {
struct list_head blocked_works;
struct list_head lock_list;
- int durable_timeout;
- int durable_scavenger_timeout;
+ unsigned int durable_timeout;
+ unsigned int durable_scavenger_timeout;
/* if ls is happening on directory, below is valid*/
struct ksmbd_readdir_data readdir_data;
diff --git a/fs/smb/server/xattr.h b/fs/smb/server/xattr.h
index fa3e27d6971b..505101a8104c 100644
--- a/fs/smb/server/xattr.h
+++ b/fs/smb/server/xattr.h
@@ -99,7 +99,7 @@ struct xattr_ntacl {
__u8 posix_acl_hash[XATTR_SD_HASH_SIZE]; /* 64bytes hash for posix acl */
};
-/* DOS ATTRIBUITE XATTR PREFIX */
+/* DOS ATTRIBUTE XATTR PREFIX */
#define DOS_ATTRIBUTE_PREFIX "DOSATTRIB"
#define DOS_ATTRIBUTE_PREFIX_LEN (sizeof(DOS_ATTRIBUTE_PREFIX) - 1)
#define XATTR_NAME_DOS_ATTRIBUTE (XATTR_USER_PREFIX DOS_ATTRIBUTE_PREFIX)
diff --git a/fs/splice.c b/fs/splice.c
index 60aed8de21f8..06232d7e505f 100644
--- a/fs/splice.c
+++ b/fs/splice.c
@@ -1566,11 +1566,11 @@ static ssize_t vmsplice_to_pipe(struct file *file, struct iov_iter *iter,
static int vmsplice_type(struct fd f, int *type)
{
- if (!f.file)
+ if (!fd_file(f))
return -EBADF;
- if (f.file->f_mode & FMODE_WRITE) {
+ if (fd_file(f)->f_mode & FMODE_WRITE) {
*type = ITER_SOURCE;
- } else if (f.file->f_mode & FMODE_READ) {
+ } else if (fd_file(f)->f_mode & FMODE_READ) {
*type = ITER_DEST;
} else {
fdput(f);
@@ -1621,9 +1621,9 @@ SYSCALL_DEFINE4(vmsplice, int, fd, const struct iovec __user *, uiov,
if (!iov_iter_count(&iter))
error = 0;
else if (type == ITER_SOURCE)
- error = vmsplice_to_pipe(f.file, &iter, flags);
+ error = vmsplice_to_pipe(fd_file(f), &iter, flags);
else
- error = vmsplice_to_user(f.file, &iter, flags);
+ error = vmsplice_to_user(fd_file(f), &iter, flags);
kfree(iov);
out_fdput:
@@ -1646,10 +1646,10 @@ SYSCALL_DEFINE6(splice, int, fd_in, loff_t __user *, off_in,
error = -EBADF;
in = fdget(fd_in);
- if (in.file) {
+ if (fd_file(in)) {
out = fdget(fd_out);
- if (out.file) {
- error = __do_splice(in.file, off_in, out.file, off_out,
+ if (fd_file(out)) {
+ error = __do_splice(fd_file(in), off_in, fd_file(out), off_out,
len, flags);
fdput(out);
}
@@ -2016,10 +2016,10 @@ SYSCALL_DEFINE4(tee, int, fdin, int, fdout, size_t, len, unsigned int, flags)
error = -EBADF;
in = fdget(fdin);
- if (in.file) {
+ if (fd_file(in)) {
out = fdget(fdout);
- if (out.file) {
- error = do_tee(in.file, out.file, len, flags);
+ if (fd_file(out)) {
+ error = do_tee(fd_file(in), fd_file(out), len, flags);
fdput(out);
}
fdput(in);
diff --git a/fs/squashfs/decompressor_multi_percpu.c b/fs/squashfs/decompressor_multi_percpu.c
index 8a218e7c2390..e4d7e507b268 100644
--- a/fs/squashfs/decompressor_multi_percpu.c
+++ b/fs/squashfs/decompressor_multi_percpu.c
@@ -46,7 +46,7 @@ static void *squashfs_decompressor_create(struct squashfs_sb_info *msblk,
}
kfree(comp_opts);
- return (__force void *) percpu;
+ return (void *)(__force unsigned long) percpu;
out:
for_each_possible_cpu(cpu) {
@@ -61,7 +61,7 @@ out:
static void squashfs_decompressor_destroy(struct squashfs_sb_info *msblk)
{
struct squashfs_stream __percpu *percpu =
- (struct squashfs_stream __percpu *) msblk->stream;
+ (void __percpu *)(unsigned long) msblk->stream;
struct squashfs_stream *stream;
int cpu;
@@ -79,7 +79,7 @@ static int squashfs_decompress(struct squashfs_sb_info *msblk, struct bio *bio,
{
struct squashfs_stream *stream;
struct squashfs_stream __percpu *percpu =
- (struct squashfs_stream __percpu *) msblk->stream;
+ (void __percpu *)(unsigned long) msblk->stream;
int res;
local_lock(&percpu->lock);
diff --git a/fs/stat.c b/fs/stat.c
index 89ce1be56310..41e598376d7e 100644
--- a/fs/stat.c
+++ b/fs/stat.c
@@ -224,9 +224,9 @@ int vfs_fstat(int fd, struct kstat *stat)
int error;
f = fdget_raw(fd);
- if (!f.file)
+ if (!fd_file(f))
return -EBADF;
- error = vfs_getattr(&f.file->f_path, stat, STATX_BASIC_STATS, 0);
+ error = vfs_getattr(&fd_file(f)->f_path, stat, STATX_BASIC_STATS, 0);
fdput(f);
return error;
}
@@ -277,9 +277,9 @@ static int vfs_statx_fd(int fd, int flags, struct kstat *stat,
u32 request_mask)
{
CLASS(fd_raw, f)(fd);
- if (!f.file)
+ if (!fd_file(f))
return -EBADF;
- return vfs_statx_path(&f.file->f_path, flags, stat, request_mask);
+ return vfs_statx_path(&fd_file(f)->f_path, flags, stat, request_mask);
}
/**
diff --git a/fs/statfs.c b/fs/statfs.c
index 96d1c3edf289..9c7bb27e7932 100644
--- a/fs/statfs.c
+++ b/fs/statfs.c
@@ -116,8 +116,8 @@ int fd_statfs(int fd, struct kstatfs *st)
{
struct fd f = fdget_raw(fd);
int error = -EBADF;
- if (f.file) {
- error = vfs_statfs(&f.file->f_path, st);
+ if (fd_file(f)) {
+ error = vfs_statfs(&fd_file(f)->f_path, st);
fdput(f);
}
return error;
diff --git a/fs/sync.c b/fs/sync.c
index dc725914e1ed..67df255eb189 100644
--- a/fs/sync.c
+++ b/fs/sync.c
@@ -152,15 +152,15 @@ SYSCALL_DEFINE1(syncfs, int, fd)
struct super_block *sb;
int ret, ret2;
- if (!f.file)
+ if (!fd_file(f))
return -EBADF;
- sb = f.file->f_path.dentry->d_sb;
+ sb = fd_file(f)->f_path.dentry->d_sb;
down_read(&sb->s_umount);
ret = sync_filesystem(sb);
up_read(&sb->s_umount);
- ret2 = errseq_check_and_advance(&sb->s_wb_err, &f.file->f_sb_err);
+ ret2 = errseq_check_and_advance(&sb->s_wb_err, &fd_file(f)->f_sb_err);
fdput(f);
return ret ? ret : ret2;
@@ -208,8 +208,8 @@ static int do_fsync(unsigned int fd, int datasync)
struct fd f = fdget(fd);
int ret = -EBADF;
- if (f.file) {
- ret = vfs_fsync(f.file, datasync);
+ if (fd_file(f)) {
+ ret = vfs_fsync(fd_file(f), datasync);
fdput(f);
}
return ret;
@@ -360,8 +360,8 @@ int ksys_sync_file_range(int fd, loff_t offset, loff_t nbytes,
ret = -EBADF;
f = fdget(fd);
- if (f.file)
- ret = sync_file_range(f.file, offset, nbytes, flags);
+ if (fd_file(f))
+ ret = sync_file_range(fd_file(f), offset, nbytes, flags);
fdput(f);
return ret;
diff --git a/fs/timerfd.c b/fs/timerfd.c
index 4bf2f8bfec11..137523e0bb21 100644
--- a/fs/timerfd.c
+++ b/fs/timerfd.c
@@ -397,9 +397,9 @@ static const struct file_operations timerfd_fops = {
static int timerfd_fget(int fd, struct fd *p)
{
struct fd f = fdget(fd);
- if (!f.file)
+ if (!fd_file(f))
return -EBADF;
- if (f.file->f_op != &timerfd_fops) {
+ if (fd_file(f)->f_op != &timerfd_fops) {
fdput(f);
return -EINVAL;
}
@@ -482,7 +482,7 @@ static int do_timerfd_settime(int ufd, int flags,
ret = timerfd_fget(ufd, &f);
if (ret)
return ret;
- ctx = f.file->private_data;
+ ctx = fd_file(f)->private_data;
if (isalarm(ctx) && !capable(CAP_WAKE_ALARM)) {
fdput(f);
@@ -546,7 +546,7 @@ static int do_timerfd_gettime(int ufd, struct itimerspec64 *t)
int ret = timerfd_fget(ufd, &f);
if (ret)
return ret;
- ctx = f.file->private_data;
+ ctx = fd_file(f)->private_data;
spin_lock_irq(&ctx->wqh.lock);
if (ctx->expired && ctx->tintv) {
diff --git a/fs/ubifs/debug.c b/fs/ubifs/debug.c
index d91cec93d968..5cc69beaa62e 100644
--- a/fs/ubifs/debug.c
+++ b/fs/ubifs/debug.c
@@ -2807,7 +2807,6 @@ static const struct file_operations dfs_fops = {
.read = dfs_file_read,
.write = dfs_file_write,
.owner = THIS_MODULE,
- .llseek = no_llseek,
};
/**
@@ -2952,7 +2951,6 @@ static const struct file_operations dfs_global_fops = {
.read = dfs_global_file_read,
.write = dfs_global_file_write,
.owner = THIS_MODULE,
- .llseek = no_llseek,
};
/**
diff --git a/fs/userfaultfd.c b/fs/userfaultfd.c
index 27a3e9285fbf..68cdd89c97a3 100644
--- a/fs/userfaultfd.c
+++ b/fs/userfaultfd.c
@@ -104,21 +104,6 @@ bool userfaultfd_wp_unpopulated(struct vm_area_struct *vma)
return ctx->features & UFFD_FEATURE_WP_UNPOPULATED;
}
-static void userfaultfd_set_vm_flags(struct vm_area_struct *vma,
- vm_flags_t flags)
-{
- const bool uffd_wp_changed = (vma->vm_flags ^ flags) & VM_UFFD_WP;
-
- vm_flags_reset(vma, flags);
- /*
- * For shared mappings, we want to enable writenotify while
- * userfaultfd-wp is enabled (see vma_wants_writenotify()). We'll simply
- * recalculate vma->vm_page_prot whenever userfaultfd-wp changes.
- */
- if ((vma->vm_flags & VM_SHARED) && uffd_wp_changed)
- vma_set_page_prot(vma);
-}
-
static int userfaultfd_wake_function(wait_queue_entry_t *wq, unsigned mode,
int wake_flags, void *key)
{
@@ -386,15 +371,8 @@ vm_fault_t handle_userfault(struct vm_fault *vmf, unsigned long reason)
unsigned int blocking_state;
/*
- * We don't do userfault handling for the final child pid update.
- *
- * We also don't do userfault handling during
- * coredumping. hugetlbfs has the special
- * hugetlb_follow_page_mask() to skip missing pages in the
- * FOLL_DUMP case, anon memory also checks for FOLL_DUMP with
- * the no_page_table() helper in follow_page_mask(), but the
- * shmem_vm_ops->fault method is invoked even during
- * coredumping and it ends up here.
+ * We don't do userfault handling for the final child pid update
+ * and when coredumping (faults triggered by get_dump_page()).
*/
if (current->flags & (PF_EXITING|PF_DUMPCORE))
goto out;
@@ -615,22 +593,7 @@ static void userfaultfd_event_wait_completion(struct userfaultfd_ctx *ctx,
spin_unlock_irq(&ctx->event_wqh.lock);
if (release_new_ctx) {
- struct vm_area_struct *vma;
- struct mm_struct *mm = release_new_ctx->mm;
- VMA_ITERATOR(vmi, mm, 0);
-
- /* the various vma->vm_userfaultfd_ctx still points to it */
- mmap_write_lock(mm);
- for_each_vma(vmi, vma) {
- if (vma->vm_userfaultfd_ctx.ctx == release_new_ctx) {
- vma_start_write(vma);
- vma->vm_userfaultfd_ctx = NULL_VM_UFFD_CTX;
- userfaultfd_set_vm_flags(vma,
- vma->vm_flags & ~__VM_UFFD_FLAGS);
- }
- }
- mmap_write_unlock(mm);
-
+ userfaultfd_release_new(release_new_ctx);
userfaultfd_ctx_put(release_new_ctx);
}
@@ -662,9 +625,7 @@ int dup_userfaultfd(struct vm_area_struct *vma, struct list_head *fcs)
return 0;
if (!(octx->features & UFFD_FEATURE_EVENT_FORK)) {
- vma_start_write(vma);
- vma->vm_userfaultfd_ctx = NULL_VM_UFFD_CTX;
- userfaultfd_set_vm_flags(vma, vma->vm_flags & ~__VM_UFFD_FLAGS);
+ userfaultfd_reset_ctx(vma);
return 0;
}
@@ -749,9 +710,7 @@ void mremap_userfaultfd_prep(struct vm_area_struct *vma,
up_write(&ctx->map_changing_lock);
} else {
/* Drop uffd context if remap feature not enabled */
- vma_start_write(vma);
- vma->vm_userfaultfd_ctx = NULL_VM_UFFD_CTX;
- userfaultfd_set_vm_flags(vma, vma->vm_flags & ~__VM_UFFD_FLAGS);
+ userfaultfd_reset_ctx(vma);
}
}
@@ -870,54 +829,14 @@ static int userfaultfd_release(struct inode *inode, struct file *file)
{
struct userfaultfd_ctx *ctx = file->private_data;
struct mm_struct *mm = ctx->mm;
- struct vm_area_struct *vma, *prev;
/* len == 0 means wake all */
struct userfaultfd_wake_range range = { .len = 0, };
- unsigned long new_flags;
- VMA_ITERATOR(vmi, mm, 0);
WRITE_ONCE(ctx->released, true);
- if (!mmget_not_zero(mm))
- goto wakeup;
+ userfaultfd_release_all(mm, ctx);
/*
- * Flush page faults out of all CPUs. NOTE: all page faults
- * must be retried without returning VM_FAULT_SIGBUS if
- * userfaultfd_ctx_get() succeeds but vma->vma_userfault_ctx
- * changes while handle_userfault released the mmap_lock. So
- * it's critical that released is set to true (above), before
- * taking the mmap_lock for writing.
- */
- mmap_write_lock(mm);
- prev = NULL;
- for_each_vma(vmi, vma) {
- cond_resched();
- BUG_ON(!!vma->vm_userfaultfd_ctx.ctx ^
- !!(vma->vm_flags & __VM_UFFD_FLAGS));
- if (vma->vm_userfaultfd_ctx.ctx != ctx) {
- prev = vma;
- continue;
- }
- /* Reset ptes for the whole vma range if wr-protected */
- if (userfaultfd_wp(vma))
- uffd_wp_range(vma, vma->vm_start,
- vma->vm_end - vma->vm_start, false);
- new_flags = vma->vm_flags & ~__VM_UFFD_FLAGS;
- vma = vma_modify_flags_uffd(&vmi, prev, vma, vma->vm_start,
- vma->vm_end, new_flags,
- NULL_VM_UFFD_CTX);
-
- vma_start_write(vma);
- userfaultfd_set_vm_flags(vma, new_flags);
- vma->vm_userfaultfd_ctx = NULL_VM_UFFD_CTX;
-
- prev = vma;
- }
- mmap_write_unlock(mm);
- mmput(mm);
-wakeup:
- /*
* After no new page faults can wait on this fault_*wqh, flush
* the last page faults that may have been already waiting on
* the fault_*wqh.
@@ -1293,14 +1212,14 @@ static int userfaultfd_register(struct userfaultfd_ctx *ctx,
unsigned long arg)
{
struct mm_struct *mm = ctx->mm;
- struct vm_area_struct *vma, *prev, *cur;
+ struct vm_area_struct *vma, *cur;
int ret;
struct uffdio_register uffdio_register;
struct uffdio_register __user *user_uffdio_register;
- unsigned long vm_flags, new_flags;
+ unsigned long vm_flags;
bool found;
bool basic_ioctls;
- unsigned long start, end, vma_end;
+ unsigned long start, end;
struct vma_iterator vmi;
bool wp_async = userfaultfd_wp_async_ctx(ctx);
@@ -1428,57 +1347,8 @@ static int userfaultfd_register(struct userfaultfd_ctx *ctx,
} for_each_vma_range(vmi, cur, end);
BUG_ON(!found);
- vma_iter_set(&vmi, start);
- prev = vma_prev(&vmi);
- if (vma->vm_start < start)
- prev = vma;
-
- ret = 0;
- for_each_vma_range(vmi, vma, end) {
- cond_resched();
-
- BUG_ON(!vma_can_userfault(vma, vm_flags, wp_async));
- BUG_ON(vma->vm_userfaultfd_ctx.ctx &&
- vma->vm_userfaultfd_ctx.ctx != ctx);
- WARN_ON(!(vma->vm_flags & VM_MAYWRITE));
-
- /*
- * Nothing to do: this vma is already registered into this
- * userfaultfd and with the right tracking mode too.
- */
- if (vma->vm_userfaultfd_ctx.ctx == ctx &&
- (vma->vm_flags & vm_flags) == vm_flags)
- goto skip;
-
- if (vma->vm_start > start)
- start = vma->vm_start;
- vma_end = min(end, vma->vm_end);
-
- new_flags = (vma->vm_flags & ~__VM_UFFD_FLAGS) | vm_flags;
- vma = vma_modify_flags_uffd(&vmi, prev, vma, start, vma_end,
- new_flags,
- (struct vm_userfaultfd_ctx){ctx});
- if (IS_ERR(vma)) {
- ret = PTR_ERR(vma);
- break;
- }
-
- /*
- * In the vma_merge() successful mprotect-like case 8:
- * the next vma was merged into the current one and
- * the current one has not been updated yet.
- */
- vma_start_write(vma);
- userfaultfd_set_vm_flags(vma, new_flags);
- vma->vm_userfaultfd_ctx.ctx = ctx;
-
- if (is_vm_hugetlb_page(vma) && uffd_disable_huge_pmd_share(vma))
- hugetlb_unshare_all_pmds(vma);
-
- skip:
- prev = vma;
- start = vma->vm_end;
- }
+ ret = userfaultfd_register_range(ctx, vma, vm_flags, start, end,
+ wp_async);
out_unlock:
mmap_write_unlock(mm);
@@ -1519,7 +1389,6 @@ static int userfaultfd_unregister(struct userfaultfd_ctx *ctx,
struct vm_area_struct *vma, *prev, *cur;
int ret;
struct uffdio_range uffdio_unregister;
- unsigned long new_flags;
bool found;
unsigned long start, end, vma_end;
const void __user *buf = (void __user *)arg;
@@ -1622,27 +1491,13 @@ static int userfaultfd_unregister(struct userfaultfd_ctx *ctx,
wake_userfault(vma->vm_userfaultfd_ctx.ctx, &range);
}
- /* Reset ptes for the whole vma range if wr-protected */
- if (userfaultfd_wp(vma))
- uffd_wp_range(vma, start, vma_end - start, false);
-
- new_flags = vma->vm_flags & ~__VM_UFFD_FLAGS;
- vma = vma_modify_flags_uffd(&vmi, prev, vma, start, vma_end,
- new_flags, NULL_VM_UFFD_CTX);
+ vma = userfaultfd_clear_vma(&vmi, prev, vma,
+ start, vma_end);
if (IS_ERR(vma)) {
ret = PTR_ERR(vma);
break;
}
- /*
- * In the vma_merge() successful mprotect-like case 8:
- * the next vma was merged into the current one and
- * the current one has not been updated yet.
- */
- vma_start_write(vma);
- userfaultfd_set_vm_flags(vma, new_flags);
- vma->vm_userfaultfd_ctx = NULL_VM_UFFD_CTX;
-
skip:
prev = vma;
start = vma->vm_end;
diff --git a/fs/utimes.c b/fs/utimes.c
index 3701b3946f88..99b26f792b89 100644
--- a/fs/utimes.c
+++ b/fs/utimes.c
@@ -115,9 +115,9 @@ static int do_utimes_fd(int fd, struct timespec64 *times, int flags)
return -EINVAL;
f = fdget(fd);
- if (!f.file)
+ if (!fd_file(f))
return -EBADF;
- error = vfs_utimes(&f.file->f_path, times);
+ error = vfs_utimes(&fd_file(f)->f_path, times);
fdput(f);
return error;
}
diff --git a/fs/xattr.c b/fs/xattr.c
index 7672ce5486c5..05ec7e7d9e87 100644
--- a/fs/xattr.c
+++ b/fs/xattr.c
@@ -697,19 +697,19 @@ SYSCALL_DEFINE5(fsetxattr, int, fd, const char __user *, name,
int error;
CLASS(fd, f)(fd);
- if (!f.file)
+ if (!fd_file(f))
return -EBADF;
- audit_file(f.file);
+ audit_file(fd_file(f));
error = setxattr_copy(name, &ctx);
if (error)
return error;
- error = mnt_want_write_file(f.file);
+ error = mnt_want_write_file(fd_file(f));
if (!error) {
- error = do_setxattr(file_mnt_idmap(f.file),
- f.file->f_path.dentry, &ctx);
- mnt_drop_write_file(f.file);
+ error = do_setxattr(file_mnt_idmap(fd_file(f)),
+ fd_file(f)->f_path.dentry, &ctx);
+ mnt_drop_write_file(fd_file(f));
}
kvfree(ctx.kvalue);
return error;
@@ -812,10 +812,10 @@ SYSCALL_DEFINE4(fgetxattr, int, fd, const char __user *, name,
struct fd f = fdget(fd);
ssize_t error = -EBADF;
- if (!f.file)
+ if (!fd_file(f))
return error;
- audit_file(f.file);
- error = getxattr(file_mnt_idmap(f.file), f.file->f_path.dentry,
+ audit_file(fd_file(f));
+ error = getxattr(file_mnt_idmap(fd_file(f)), fd_file(f)->f_path.dentry,
name, value, size);
fdput(f);
return error;
@@ -888,10 +888,10 @@ SYSCALL_DEFINE3(flistxattr, int, fd, char __user *, list, size_t, size)
struct fd f = fdget(fd);
ssize_t error = -EBADF;
- if (!f.file)
+ if (!fd_file(f))
return error;
- audit_file(f.file);
- error = listxattr(f.file->f_path.dentry, list, size);
+ audit_file(fd_file(f));
+ error = listxattr(fd_file(f)->f_path.dentry, list, size);
fdput(f);
return error;
}
@@ -954,9 +954,9 @@ SYSCALL_DEFINE2(fremovexattr, int, fd, const char __user *, name)
char kname[XATTR_NAME_MAX + 1];
int error = -EBADF;
- if (!f.file)
+ if (!fd_file(f))
return error;
- audit_file(f.file);
+ audit_file(fd_file(f));
error = strncpy_from_user(kname, name, sizeof(kname));
if (error == 0 || error == sizeof(kname))
@@ -964,11 +964,11 @@ SYSCALL_DEFINE2(fremovexattr, int, fd, const char __user *, name)
if (error < 0)
return error;
- error = mnt_want_write_file(f.file);
+ error = mnt_want_write_file(fd_file(f));
if (!error) {
- error = removexattr(file_mnt_idmap(f.file),
- f.file->f_path.dentry, kname);
- mnt_drop_write_file(f.file);
+ error = removexattr(file_mnt_idmap(fd_file(f)),
+ fd_file(f)->f_path.dentry, kname);
+ mnt_drop_write_file(fd_file(f));
}
fdput(f);
return error;
diff --git a/fs/xfs/libxfs/xfs_attr_leaf.c b/fs/xfs/libxfs/xfs_attr_leaf.c
index 6aaec1246c95..e50d913ad32f 100644
--- a/fs/xfs/libxfs/xfs_attr_leaf.c
+++ b/fs/xfs/libxfs/xfs_attr_leaf.c
@@ -1138,10 +1138,7 @@ xfs_attr3_leaf_to_shortform(
trace_xfs_attr_leaf_to_sf(args);
- tmpbuffer = kmalloc(args->geo->blksize, GFP_KERNEL | __GFP_NOFAIL);
- if (!tmpbuffer)
- return -ENOMEM;
-
+ tmpbuffer = kvmalloc(args->geo->blksize, GFP_KERNEL | __GFP_NOFAIL);
memcpy(tmpbuffer, bp->b_addr, args->geo->blksize);
leaf = (xfs_attr_leafblock_t *)tmpbuffer;
@@ -1205,7 +1202,7 @@ xfs_attr3_leaf_to_shortform(
error = 0;
out:
- kfree(tmpbuffer);
+ kvfree(tmpbuffer);
return error;
}
@@ -1613,7 +1610,7 @@ xfs_attr3_leaf_compact(
trace_xfs_attr_leaf_compact(args);
- tmpbuffer = kmalloc(args->geo->blksize, GFP_KERNEL | __GFP_NOFAIL);
+ tmpbuffer = kvmalloc(args->geo->blksize, GFP_KERNEL | __GFP_NOFAIL);
memcpy(tmpbuffer, bp->b_addr, args->geo->blksize);
memset(bp->b_addr, 0, args->geo->blksize);
leaf_src = (xfs_attr_leafblock_t *)tmpbuffer;
@@ -1651,7 +1648,7 @@ xfs_attr3_leaf_compact(
*/
xfs_trans_log_buf(trans, bp, 0, args->geo->blksize - 1);
- kfree(tmpbuffer);
+ kvfree(tmpbuffer);
}
/*
@@ -2330,7 +2327,7 @@ xfs_attr3_leaf_unbalance(
struct xfs_attr_leafblock *tmp_leaf;
struct xfs_attr3_icleaf_hdr tmphdr;
- tmp_leaf = kzalloc(state->args->geo->blksize,
+ tmp_leaf = kvzalloc(state->args->geo->blksize,
GFP_KERNEL | __GFP_NOFAIL);
/*
@@ -2371,7 +2368,7 @@ xfs_attr3_leaf_unbalance(
}
memcpy(save_leaf, tmp_leaf, state->args->geo->blksize);
savehdr = tmphdr; /* struct copy */
- kfree(tmp_leaf);
+ kvfree(tmp_leaf);
}
xfs_attr3_leaf_hdr_to_disk(state->args->geo, save_leaf, &savehdr);
diff --git a/fs/xfs/libxfs/xfs_ialloc.c b/fs/xfs/libxfs/xfs_ialloc.c
index 20bb5ce38134..271855227514 100644
--- a/fs/xfs/libxfs/xfs_ialloc.c
+++ b/fs/xfs/libxfs/xfs_ialloc.c
@@ -3034,6 +3034,11 @@ xfs_ialloc_setup_geometry(
igeo->ialloc_align = mp->m_dalign;
else
igeo->ialloc_align = 0;
+
+ if (mp->m_sb.sb_blocksize > PAGE_SIZE)
+ igeo->min_folio_order = mp->m_sb.sb_blocklog - PAGE_SHIFT;
+ else
+ igeo->min_folio_order = 0;
}
/* Compute the location of the root directory inode that is laid out by mkfs. */
diff --git a/fs/xfs/libxfs/xfs_shared.h b/fs/xfs/libxfs/xfs_shared.h
index 2f7413afbf46..33b84a3a83ff 100644
--- a/fs/xfs/libxfs/xfs_shared.h
+++ b/fs/xfs/libxfs/xfs_shared.h
@@ -224,6 +224,9 @@ struct xfs_ino_geometry {
/* precomputed value for di_flags2 */
uint64_t new_diflags2;
+ /* minimum folio order of a page cache allocation */
+ unsigned int min_folio_order;
+
};
#endif /* __XFS_SHARED_H__ */
diff --git a/fs/xfs/scrub/xfile.c b/fs/xfs/scrub/xfile.c
index 9b5d98fe1f8a..c753c79df203 100644
--- a/fs/xfs/scrub/xfile.c
+++ b/fs/xfs/scrub/xfile.c
@@ -126,7 +126,7 @@ xfile_load(
unsigned int len;
unsigned int offset;
- if (shmem_get_folio(inode, pos >> PAGE_SHIFT, &folio,
+ if (shmem_get_folio(inode, pos >> PAGE_SHIFT, 0, &folio,
SGP_READ) < 0)
break;
if (!folio) {
@@ -196,7 +196,7 @@ xfile_store(
unsigned int len;
unsigned int offset;
- if (shmem_get_folio(inode, pos >> PAGE_SHIFT, &folio,
+ if (shmem_get_folio(inode, pos >> PAGE_SHIFT, 0, &folio,
SGP_CACHE) < 0)
break;
if (filemap_check_wb_err(inode->i_mapping, 0)) {
@@ -267,7 +267,7 @@ xfile_get_folio(
i_size_write(inode, pos + len);
pflags = memalloc_nofs_save();
- error = shmem_get_folio(inode, pos >> PAGE_SHIFT, &folio,
+ error = shmem_get_folio(inode, pos >> PAGE_SHIFT, 0, &folio,
(flags & XFILE_ALLOC) ? SGP_CACHE : SGP_READ);
memalloc_nofs_restore(pflags);
if (error)
diff --git a/fs/xfs/xfs_buf_mem.c b/fs/xfs/xfs_buf_mem.c
index 9bb2d24de709..07bebbfb16ee 100644
--- a/fs/xfs/xfs_buf_mem.c
+++ b/fs/xfs/xfs_buf_mem.c
@@ -149,7 +149,7 @@ xmbuf_map_page(
return -ENOMEM;
}
- error = shmem_get_folio(inode, pos >> PAGE_SHIFT, &folio, SGP_CACHE);
+ error = shmem_get_folio(inode, pos >> PAGE_SHIFT, 0, &folio, SGP_CACHE);
if (error)
return error;
diff --git a/fs/xfs/xfs_exchrange.c b/fs/xfs/xfs_exchrange.c
index d0889190ab7f..75cb53f090d1 100644
--- a/fs/xfs/xfs_exchrange.c
+++ b/fs/xfs/xfs_exchrange.c
@@ -829,9 +829,9 @@ xfs_ioc_exchange_range(
fxr.flags = args.flags;
file1 = fdget(args.file1_fd);
- if (!file1.file)
+ if (!fd_file(file1))
return -EBADF;
- fxr.file1 = file1.file;
+ fxr.file1 = fd_file(file1);
error = xfs_exchange_range(&fxr);
fdput(file1);
@@ -935,9 +935,9 @@ xfs_ioc_commit_range(
fxr.file2_ctime.tv_nsec = kern_f->file2_ctime_nsec;
file1 = fdget(args.file1_fd);
- if (!file1.file)
+ if (fd_empty(file1))
return -EBADF;
- fxr.file1 = file1.file;
+ fxr.file1 = fd_file(file1);
error = xfs_exchange_range(&fxr);
fdput(file1);
diff --git a/fs/xfs/xfs_file.c b/fs/xfs/xfs_file.c
index e97d789495a5..412b1d71b52b 100644
--- a/fs/xfs/xfs_file.c
+++ b/fs/xfs/xfs_file.c
@@ -760,7 +760,7 @@ write_retry:
trace_xfs_file_buffered_write(iocb, from);
ret = iomap_file_buffered_write(iocb, from,
- &xfs_buffered_write_iomap_ops);
+ &xfs_buffered_write_iomap_ops, NULL);
/*
* If we hit a space limit, try to free up some lingering preallocated
diff --git a/fs/xfs/xfs_handle.c b/fs/xfs/xfs_handle.c
index cf5acbd3c7ca..49e5e5f04e60 100644
--- a/fs/xfs/xfs_handle.c
+++ b/fs/xfs/xfs_handle.c
@@ -85,16 +85,16 @@ xfs_find_handle(
int hsize;
xfs_handle_t handle;
struct inode *inode;
- struct fd f = {NULL};
+ struct fd f = EMPTY_FD;
struct path path;
int error;
struct xfs_inode *ip;
if (cmd == XFS_IOC_FD_TO_HANDLE) {
f = fdget(hreq->fd);
- if (!f.file)
+ if (!fd_file(f))
return -EBADF;
- inode = file_inode(f.file);
+ inode = file_inode(fd_file(f));
} else {
error = user_path_at(AT_FDCWD, hreq->path, 0, &path);
if (error)
diff --git a/fs/xfs/xfs_icache.c b/fs/xfs/xfs_icache.c
index 20d9924f28c2..a680e5b82672 100644
--- a/fs/xfs/xfs_icache.c
+++ b/fs/xfs/xfs_icache.c
@@ -100,7 +100,8 @@ xfs_inode_alloc(
/* VFS doesn't initialise i_mode! */
VFS_I(ip)->i_mode = 0;
- mapping_set_large_folios(VFS_I(ip)->i_mapping);
+ mapping_set_folio_min_order(VFS_I(ip)->i_mapping,
+ M_IGEO(mp)->min_folio_order);
XFS_STATS_INC(mp, vn_active);
ASSERT(atomic_read(&ip->i_pincount) == 0);
@@ -360,7 +361,8 @@ xfs_reinit_inode(
inode->i_uid = uid;
inode->i_gid = gid;
inode->i_state = state;
- mapping_set_large_folios(inode->i_mapping);
+ mapping_set_folio_min_order(inode->i_mapping,
+ M_IGEO(mp)->min_folio_order);
return error;
}
diff --git a/fs/xfs/xfs_ioctl.c b/fs/xfs/xfs_ioctl.c
index 7226d27e8afc..a20d426ef021 100644
--- a/fs/xfs/xfs_ioctl.c
+++ b/fs/xfs/xfs_ioctl.c
@@ -886,33 +886,33 @@ xfs_ioc_swapext(
/* Pull information for the target fd */
f = fdget((int)sxp->sx_fdtarget);
- if (!f.file) {
+ if (!fd_file(f)) {
error = -EINVAL;
goto out;
}
- if (!(f.file->f_mode & FMODE_WRITE) ||
- !(f.file->f_mode & FMODE_READ) ||
- (f.file->f_flags & O_APPEND)) {
+ if (!(fd_file(f)->f_mode & FMODE_WRITE) ||
+ !(fd_file(f)->f_mode & FMODE_READ) ||
+ (fd_file(f)->f_flags & O_APPEND)) {
error = -EBADF;
goto out_put_file;
}
tmp = fdget((int)sxp->sx_fdtmp);
- if (!tmp.file) {
+ if (!fd_file(tmp)) {
error = -EINVAL;
goto out_put_file;
}
- if (!(tmp.file->f_mode & FMODE_WRITE) ||
- !(tmp.file->f_mode & FMODE_READ) ||
- (tmp.file->f_flags & O_APPEND)) {
+ if (!(fd_file(tmp)->f_mode & FMODE_WRITE) ||
+ !(fd_file(tmp)->f_mode & FMODE_READ) ||
+ (fd_file(tmp)->f_flags & O_APPEND)) {
error = -EBADF;
goto out_put_tmp_file;
}
- if (IS_SWAPFILE(file_inode(f.file)) ||
- IS_SWAPFILE(file_inode(tmp.file))) {
+ if (IS_SWAPFILE(file_inode(fd_file(f))) ||
+ IS_SWAPFILE(file_inode(fd_file(tmp)))) {
error = -EINVAL;
goto out_put_tmp_file;
}
@@ -922,14 +922,14 @@ xfs_ioc_swapext(
* before we cast and access them as XFS structures as we have no
* control over what the user passes us here.
*/
- if (f.file->f_op != &xfs_file_operations ||
- tmp.file->f_op != &xfs_file_operations) {
+ if (fd_file(f)->f_op != &xfs_file_operations ||
+ fd_file(tmp)->f_op != &xfs_file_operations) {
error = -EINVAL;
goto out_put_tmp_file;
}
- ip = XFS_I(file_inode(f.file));
- tip = XFS_I(file_inode(tmp.file));
+ ip = XFS_I(file_inode(fd_file(f)));
+ tip = XFS_I(file_inode(fd_file(tmp)));
if (ip->i_mount != tip->i_mount) {
error = -EINVAL;
diff --git a/fs/xfs/xfs_iomap.c b/fs/xfs/xfs_iomap.c
index 72c981e3dc92..1e11f48814c0 100644
--- a/fs/xfs/xfs_iomap.c
+++ b/fs/xfs/xfs_iomap.c
@@ -1208,14 +1208,14 @@ out_unlock:
return error;
}
-static int
+static void
xfs_buffered_write_delalloc_punch(
struct inode *inode,
loff_t offset,
- loff_t length)
+ loff_t length,
+ struct iomap *iomap)
{
xfs_bmap_punch_delalloc_range(XFS_I(inode), offset, offset + length);
- return 0;
}
static int
@@ -1227,17 +1227,8 @@ xfs_buffered_write_iomap_end(
unsigned flags,
struct iomap *iomap)
{
-
- struct xfs_mount *mp = XFS_M(inode->i_sb);
- int error;
-
- error = iomap_file_buffered_write_punch_delalloc(inode, iomap, offset,
- length, written, &xfs_buffered_write_delalloc_punch);
- if (error && !xfs_is_shutdown(mp)) {
- xfs_alert(mp, "%s: unable to clean up ino 0x%llx",
- __func__, XFS_I(inode)->i_ino);
- return error;
- }
+ iomap_file_buffered_write_punch_delalloc(inode, offset, length, written,
+ flags, iomap, &xfs_buffered_write_delalloc_punch);
return 0;
}
diff --git a/fs/xfs/xfs_iops.c b/fs/xfs/xfs_iops.c
index 1cdc8034f54d..ee79cf161312 100644
--- a/fs/xfs/xfs_iops.c
+++ b/fs/xfs/xfs_iops.c
@@ -567,7 +567,7 @@ xfs_stat_blksize(
return 1U << mp->m_allocsize_log;
}
- return PAGE_SIZE;
+ return max_t(uint32_t, PAGE_SIZE, mp->m_sb.sb_blocksize);
}
STATIC int
@@ -870,16 +870,6 @@ xfs_setattr_size(
error = xfs_zero_range(ip, oldsize, newsize - oldsize,
&did_zeroing);
} else {
- /*
- * iomap won't detect a dirty page over an unwritten block (or a
- * cow block over a hole) and subsequently skips zeroing the
- * newly post-EOF portion of the page. Flush the new EOF to
- * convert the block before the pagecache truncate.
- */
- error = filemap_write_and_wait_range(inode->i_mapping, newsize,
- newsize);
- if (error)
- return error;
error = xfs_truncate_page(ip, newsize, &did_zeroing);
}
diff --git a/fs/xfs/xfs_log_recover.c b/fs/xfs/xfs_log_recover.c
index 1a74fe22672e..ec766b4bc853 100644
--- a/fs/xfs/xfs_log_recover.c
+++ b/fs/xfs/xfs_log_recover.c
@@ -2128,7 +2128,7 @@ xlog_recover_add_to_cont_trans(
old_ptr = item->ri_buf[item->ri_cnt-1].i_addr;
old_len = item->ri_buf[item->ri_cnt-1].i_len;
- ptr = kvrealloc(old_ptr, old_len, len + old_len, GFP_KERNEL);
+ ptr = kvrealloc(old_ptr, len + old_len, GFP_KERNEL);
if (!ptr)
return -ENOMEM;
memcpy(&ptr[old_len], dp, len);
diff --git a/fs/xfs/xfs_mount.c b/fs/xfs/xfs_mount.c
index 460f93a9ce00..1fdd79c5bfa0 100644
--- a/fs/xfs/xfs_mount.c
+++ b/fs/xfs/xfs_mount.c
@@ -132,11 +132,15 @@ xfs_sb_validate_fsb_count(
xfs_sb_t *sbp,
uint64_t nblocks)
{
- ASSERT(PAGE_SHIFT >= sbp->sb_blocklog);
+ uint64_t max_bytes;
+
ASSERT(sbp->sb_blocklog >= BBSHIFT);
+ if (check_shl_overflow(nblocks, sbp->sb_blocklog, &max_bytes))
+ return -EFBIG;
+
/* Limited by ULONG_MAX of page cache index */
- if (nblocks >> (PAGE_SHIFT - sbp->sb_blocklog) > ULONG_MAX)
+ if (max_bytes >> PAGE_SHIFT > ULONG_MAX)
return -EFBIG;
return 0;
}
diff --git a/fs/xfs/xfs_super.c b/fs/xfs/xfs_super.c
index 26767745d9fd..fbb3a1594c0d 100644
--- a/fs/xfs/xfs_super.c
+++ b/fs/xfs/xfs_super.c
@@ -1638,16 +1638,28 @@ xfs_fs_fill_super(
goto out_free_sb;
}
- /*
- * Until this is fixed only page-sized or smaller data blocks work.
- */
if (mp->m_sb.sb_blocksize > PAGE_SIZE) {
- xfs_warn(mp,
- "File system with blocksize %d bytes. "
- "Only pagesize (%ld) or less will currently work.",
+ size_t max_folio_size = mapping_max_folio_size_supported();
+
+ if (!xfs_has_crc(mp)) {
+ xfs_warn(mp,
+"V4 Filesystem with blocksize %d bytes. Only pagesize (%ld) or less is supported.",
mp->m_sb.sb_blocksize, PAGE_SIZE);
- error = -ENOSYS;
- goto out_free_sb;
+ error = -ENOSYS;
+ goto out_free_sb;
+ }
+
+ if (mp->m_sb.sb_blocksize > max_folio_size) {
+ xfs_warn(mp,
+"block size (%u bytes) not supported; Only block size (%zu) or less is supported",
+ mp->m_sb.sb_blocksize, max_folio_size);
+ error = -ENOSYS;
+ goto out_free_sb;
+ }
+
+ xfs_warn(mp,
+"EXPERIMENTAL: V5 Filesystem with Large Block Size (%d bytes) enabled.",
+ mp->m_sb.sb_blocksize);
}
/* Ensure this filesystem fits in the page cache limits */
diff --git a/fs/zonefs/file.c b/fs/zonefs/file.c
index 3b103715acc9..35166c92420c 100644
--- a/fs/zonefs/file.c
+++ b/fs/zonefs/file.c
@@ -563,7 +563,7 @@ static ssize_t zonefs_file_buffered_write(struct kiocb *iocb,
if (ret <= 0)
goto inode_unlock;
- ret = iomap_file_buffered_write(iocb, from, &zonefs_write_iomap_ops);
+ ret = iomap_file_buffered_write(iocb, from, &zonefs_write_iomap_ops, NULL);
if (ret == -EIO)
zonefs_io_error(inode, true);
diff --git a/fs/zonefs/sysfs.c b/fs/zonefs/sysfs.c
index 8ccb65c2b419..ff9a688f1f9c 100644
--- a/fs/zonefs/sysfs.c
+++ b/fs/zonefs/sysfs.c
@@ -92,6 +92,7 @@ int zonefs_sysfs_register(struct super_block *sb)
struct zonefs_sb_info *sbi = ZONEFS_SB(sb);
int ret;
+ super_set_sysfs_name_id(sb);
init_completion(&sbi->s_kobj_unregister);
ret = kobject_init_and_add(&sbi->s_kobj, &zonefs_sb_ktype,
zonefs_sysfs_root, "%s", sb->s_id);
diff --git a/include/asm-generic/mm_hooks.h b/include/asm-generic/mm_hooks.h
index 4dbb177d1150..6eea3b3c1e65 100644
--- a/include/asm-generic/mm_hooks.h
+++ b/include/asm-generic/mm_hooks.h
@@ -1,8 +1,8 @@
/* SPDX-License-Identifier: GPL-2.0 */
/*
- * Define generic no-op hooks for arch_dup_mmap, arch_exit_mmap
- * and arch_unmap to be included in asm-FOO/mmu_context.h for any
- * arch FOO which doesn't need to hook these.
+ * Define generic no-op hooks for arch_dup_mmap and arch_exit_mmap
+ * to be included in asm-FOO/mmu_context.h for any arch FOO which
+ * doesn't need to hook these.
*/
#ifndef _ASM_GENERIC_MM_HOOKS_H
#define _ASM_GENERIC_MM_HOOKS_H
@@ -17,11 +17,6 @@ static inline void arch_exit_mmap(struct mm_struct *mm)
{
}
-static inline void arch_unmap(struct mm_struct *mm,
- unsigned long start, unsigned long end)
-{
-}
-
static inline bool arch_vma_access_permitted(struct vm_area_struct *vma,
bool write, bool execute, bool foreign)
{
diff --git a/include/asm-generic/mmzone.h b/include/asm-generic/mmzone.h
new file mode 100644
index 000000000000..2ab5193e8394
--- /dev/null
+++ b/include/asm-generic/mmzone.h
@@ -0,0 +1,5 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _ASM_GENERIC_MMZONE_H
+#define _ASM_GENERIC_MMZONE_H
+
+#endif
diff --git a/include/asm-generic/numa.h b/include/asm-generic/numa.h
index c32e0cf23c90..e063d6487f66 100644
--- a/include/asm-generic/numa.h
+++ b/include/asm-generic/numa.h
@@ -32,10 +32,8 @@ static inline const struct cpumask *cpumask_of_node(int node)
void __init arch_numa_init(void);
int __init numa_add_memblk(int nodeid, u64 start, u64 end);
-void __init numa_set_distance(int from, int to, int distance);
-void __init numa_free_distance(void);
void __init early_map_cpu_to_node(unsigned int cpu, int nid);
-int __init early_cpu_to_node(int cpu);
+int early_cpu_to_node(int cpu);
void numa_store_cpu_info(unsigned int cpu);
void numa_add_cpu(unsigned int cpu);
void numa_remove_cpu(unsigned int cpu);
@@ -51,4 +49,8 @@ static inline int early_cpu_to_node(int cpu) { return 0; }
#endif /* CONFIG_NUMA */
+#ifdef CONFIG_NUMA_EMU
+void debug_cpumask_set_cpu(unsigned int cpu, int node, bool enable);
+#endif
+
#endif /* __ASM_GENERIC_NUMA_H */
diff --git a/include/asm-generic/vmlinux.lds.h b/include/asm-generic/vmlinux.lds.h
index 1ae44793132a..eeadbaeccf88 100644
--- a/include/asm-generic/vmlinux.lds.h
+++ b/include/asm-generic/vmlinux.lds.h
@@ -133,6 +133,7 @@
*(__dl_sched_class) \
*(__rt_sched_class) \
*(__fair_sched_class) \
+ *(__ext_sched_class) \
*(__idle_sched_class) \
__sched_class_lowest = .;
@@ -918,6 +919,10 @@
#define RUNTIME_CONST(t,x) NAMED_SECTION(runtime_##t##_##x)
+#define RUNTIME_CONST_VARIABLES \
+ RUNTIME_CONST(shift, d_hash_shift) \
+ RUNTIME_CONST(ptr, dentry_hashtable)
+
/* Alignment must be consistent with (kunit_suite *) in include/kunit/test.h */
#define KUNIT_TABLE() \
. = ALIGN(8); \
diff --git a/include/linux/einj-cxl.h b/include/cxl/einj.h
index 624ff6ff41f9..624ff6ff41f9 100644
--- a/include/linux/einj-cxl.h
+++ b/include/cxl/einj.h
diff --git a/include/linux/cxl-event.h b/include/cxl/event.h
index 0bea1afbd747..0bea1afbd747 100644
--- a/include/linux/cxl-event.h
+++ b/include/cxl/event.h
diff --git a/include/cxl/mailbox.h b/include/cxl/mailbox.h
new file mode 100644
index 000000000000..bacd111e75f1
--- /dev/null
+++ b/include/cxl/mailbox.h
@@ -0,0 +1,28 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/* Copyright(c) 2024 Intel Corporation. */
+#ifndef __CXL_MBOX_H__
+#define __CXL_MBOX_H__
+#include <linux/rcuwait.h>
+
+struct cxl_mbox_cmd;
+
+/**
+ * struct cxl_mailbox - context for CXL mailbox operations
+ * @host: device that hosts the mailbox
+ * @payload_size: Size of space for payload
+ * (CXL 3.1 8.2.8.4.3 Mailbox Capabilities Register)
+ * @mbox_mutex: mutex protects device mailbox and firmware
+ * @mbox_wait: rcuwait for mailbox
+ * @mbox_send: @dev specific transport for transmitting mailbox commands
+ */
+struct cxl_mailbox {
+ struct device *host;
+ size_t payload_size;
+ struct mutex mbox_mutex; /* lock to protect mailbox context */
+ struct rcuwait mbox_wait;
+ int (*mbox_send)(struct cxl_mailbox *cxl_mbox, struct cxl_mbox_cmd *cmd);
+};
+
+int cxl_mailbox_init(struct cxl_mailbox *cxl_mbox, struct device *host);
+
+#endif
diff --git a/include/drm/display/drm_dp.h b/include/drm/display/drm_dp.h
index 173548c6473a..a6f8b098c56f 100644
--- a/include/drm/display/drm_dp.h
+++ b/include/drm/display/drm_dp.h
@@ -1543,6 +1543,10 @@ enum drm_dp_phy {
#define DP_SYMBOL_ERROR_COUNT_LANE2_PHY_REPEATER1 0xf0039 /* 1.3 */
#define DP_SYMBOL_ERROR_COUNT_LANE3_PHY_REPEATER1 0xf003b /* 1.3 */
+#define DP_OUI_PHY_REPEATER1 0xf003d /* 1.3 */
+#define DP_OUI_PHY_REPEATER(dp_phy) \
+ DP_LTTPR_REG(dp_phy, DP_OUI_PHY_REPEATER1)
+
#define __DP_FEC1_BASE 0xf0290 /* 1.4 */
#define __DP_FEC2_BASE 0xf0298 /* 1.4 */
#define DP_FEC_BASE(dp_phy) \
diff --git a/include/drm/display/drm_dp_helper.h b/include/drm/display/drm_dp_helper.h
index ea03e1dd26ba..279624833ea9 100644
--- a/include/drm/display/drm_dp_helper.h
+++ b/include/drm/display/drm_dp_helper.h
@@ -112,6 +112,7 @@ struct drm_dp_vsc_sdp {
* @target_rr: Target Refresh
* @duration_incr_ms: Successive frame duration increase
* @duration_decr_ms: Successive frame duration decrease
+ * @target_rr_divider: Target refresh rate divider
* @mode: Adaptive Sync Operation Mode
*/
struct drm_dp_as_sdp {
@@ -657,6 +658,8 @@ struct drm_dp_desc {
int drm_dp_read_desc(struct drm_dp_aux *aux, struct drm_dp_desc *desc,
bool is_branch);
+int drm_dp_dump_lttpr_desc(struct drm_dp_aux *aux, enum drm_dp_phy dp_phy);
+
/**
* enum drm_dp_quirk - Display Port sink/branch device specific quirks
*
diff --git a/include/drm/display/drm_dp_mst_helper.h b/include/drm/display/drm_dp_mst_helper.h
index cfe096389d94..f6a1cbb0f600 100644
--- a/include/drm/display/drm_dp_mst_helper.h
+++ b/include/drm/display/drm_dp_mst_helper.h
@@ -244,18 +244,18 @@ struct drm_dp_mst_branch {
bool link_address_sent;
/* global unique identifier to identify branch devices */
- u8 guid[16];
+ guid_t guid;
};
struct drm_dp_nak_reply {
- u8 guid[16];
+ guid_t guid;
u8 reason;
u8 nak_data;
};
struct drm_dp_link_address_ack_reply {
- u8 guid[16];
+ guid_t guid;
u8 nports;
struct drm_dp_link_addr_reply_port {
bool input_port;
@@ -265,7 +265,7 @@ struct drm_dp_link_address_ack_reply {
bool ddps;
bool legacy_device_plug_status;
u8 dpcd_revision;
- u8 peer_guid[16];
+ guid_t peer_guid;
u8 num_sdp_streams;
u8 num_sdp_stream_sinks;
} ports[16];
@@ -348,7 +348,7 @@ struct drm_dp_allocate_payload_ack_reply {
};
struct drm_dp_connection_status_notify {
- u8 guid[16];
+ guid_t guid;
u8 port_number;
bool legacy_device_plug_status;
bool displayport_device_plug_status;
@@ -425,7 +425,7 @@ struct drm_dp_query_payload {
struct drm_dp_resource_status_notify {
u8 port_number;
- u8 guid[16];
+ guid_t guid;
u16 available_pbn;
};
@@ -885,6 +885,8 @@ int drm_dp_check_act_status(struct drm_dp_mst_topology_mgr *mgr);
void drm_dp_mst_dump_topology(struct seq_file *m,
struct drm_dp_mst_topology_mgr *mgr);
+void drm_dp_mst_topology_queue_probe(struct drm_dp_mst_topology_mgr *mgr);
+
void drm_dp_mst_topology_mgr_suspend(struct drm_dp_mst_topology_mgr *mgr);
int __must_check
drm_dp_mst_topology_mgr_resume(struct drm_dp_mst_topology_mgr *mgr,
diff --git a/include/drm/drm_accel.h b/include/drm/drm_accel.h
index 41c78b7d712c..038ccb02f9a3 100644
--- a/include/drm/drm_accel.h
+++ b/include/drm/drm_accel.h
@@ -52,11 +52,10 @@
#if IS_ENABLED(CONFIG_DRM_ACCEL)
+extern struct xarray accel_minors_xa;
+
void accel_core_exit(void);
int accel_core_init(void);
-void accel_minor_remove(int index);
-int accel_minor_alloc(void);
-void accel_minor_replace(struct drm_minor *minor, int index);
void accel_set_device_instance_params(struct device *kdev, int index);
int accel_open(struct inode *inode, struct file *filp);
void accel_debugfs_init(struct drm_device *dev);
@@ -74,19 +73,6 @@ static inline int __init accel_core_init(void)
return 0;
}
-static inline void accel_minor_remove(int index)
-{
-}
-
-static inline int accel_minor_alloc(void)
-{
- return -EOPNOTSUPP;
-}
-
-static inline void accel_minor_replace(struct drm_minor *minor, int index)
-{
-}
-
static inline void accel_set_device_instance_params(struct device *kdev, int index)
{
}
diff --git a/include/drm/drm_atomic.h b/include/drm/drm_atomic.h
index 4d7f4c5f2001..31ca88deb10d 100644
--- a/include/drm/drm_atomic.h
+++ b/include/drm/drm_atomic.h
@@ -460,7 +460,7 @@ struct drm_atomic_state {
*
* Used for signaling unbound planes/connectors.
* When a connector or plane is not bound to any CRTC, it's still important
- * to preserve linearity to prevent the atomic states from being freed to early.
+ * to preserve linearity to prevent the atomic states from being freed too early.
*
* This commit (if set) is not bound to any CRTC, but will be completed when
* drm_atomic_helper_commit_hw_done() is called.
diff --git a/include/drm/drm_connector.h b/include/drm/drm_connector.h
index c754651044d4..e3fa43291f44 100644
--- a/include/drm/drm_connector.h
+++ b/include/drm/drm_connector.h
@@ -471,14 +471,6 @@ enum drm_privacy_screen_status {
*
* DP definitions come from the DP v2.0 spec
* HDMI definitions come from the CTA-861-H spec
- *
- * A note on YCC and RGB variants:
- *
- * Since userspace is not aware of the encoding on the wire
- * (RGB or YCbCr), drivers are free to pick the appropriate
- * variant, regardless of what userspace selects. E.g., if
- * BT2020_RGB is selected by userspace a driver will pick
- * BT2020_YCC if the encoding on the wire is YUV444 or YUV420.
*
* @DRM_MODE_COLORIMETRY_DEFAULT:
* Driver specific behavior.
diff --git a/include/drm/drm_device.h b/include/drm/drm_device.h
index 63767cf24371..c91f87b5242d 100644
--- a/include/drm/drm_device.h
+++ b/include/drm/drm_device.h
@@ -213,8 +213,9 @@ struct drm_device {
* This can be set to true it the hardware has a working vblank counter
* with high-precision timestamping (otherwise there are races) and the
* driver uses drm_crtc_vblank_on() and drm_crtc_vblank_off()
- * appropriately. See also @max_vblank_count and
- * &drm_crtc_funcs.get_vblank_counter.
+ * appropriately. Also, see @max_vblank_count,
+ * &drm_crtc_funcs.get_vblank_counter and
+ * &drm_vblank_crtc_config.disable_immediate.
*/
bool vblank_disable_immediate;
diff --git a/include/drm/drm_drv.h b/include/drm/drm_drv.h
index cd37936c3926..02ea4e3248fd 100644
--- a/include/drm/drm_drv.h
+++ b/include/drm/drm_drv.h
@@ -229,34 +229,6 @@ struct drm_driver {
void (*postclose) (struct drm_device *, struct drm_file *);
/**
- * @lastclose:
- *
- * Called when the last &struct drm_file has been closed and there's
- * currently no userspace client for the &struct drm_device.
- *
- * Modern drivers should only use this to force-restore the fbdev
- * framebuffer using drm_fb_helper_restore_fbdev_mode_unlocked().
- * Anything else would indicate there's something seriously wrong.
- * Modern drivers can also use this to execute delayed power switching
- * state changes, e.g. in conjunction with the :ref:`vga_switcheroo`
- * infrastructure.
- *
- * This is called after @postclose hook has been called.
- *
- * NOTE:
- *
- * All legacy drivers use this callback to de-initialize the hardware.
- * This is purely because of the shadow-attach model, where the DRM
- * kernel driver does not really own the hardware. Instead ownershipe is
- * handled with the help of userspace through an inheritedly racy dance
- * to set/unset the VT into raw mode.
- *
- * Legacy drivers initialize the hardware in the @firstopen callback,
- * which isn't even called for modern drivers.
- */
- void (*lastclose) (struct drm_device *);
-
- /**
* @unload:
*
* Reverse the effects of the driver load callback. Ideally,
diff --git a/include/drm/drm_edid.h b/include/drm/drm_edid.h
index 6bdfa254a1c1..eaac5e665892 100644
--- a/include/drm/drm_edid.h
+++ b/include/drm/drm_edid.h
@@ -440,8 +440,6 @@ int drm_add_modes_noedid(struct drm_connector *connector,
int hdisplay, int vdisplay);
int drm_edid_header_is_valid(const void *edid);
-bool drm_edid_block_valid(u8 *raw_edid, int block, bool print_bad_edid,
- bool *edid_corrupt);
bool drm_edid_is_valid(struct edid *edid);
void drm_edid_get_monitor_name(const struct edid *edid, char *name,
int buflen);
diff --git a/include/drm/drm_fb_helper.h b/include/drm/drm_fb_helper.h
index 375737fd6c36..699f2790b9ac 100644
--- a/include/drm/drm_fb_helper.h
+++ b/include/drm/drm_fb_helper.h
@@ -271,9 +271,7 @@ int drm_fb_helper_hotplug_event(struct drm_fb_helper *fb_helper);
int drm_fb_helper_initial_config(struct drm_fb_helper *fb_helper);
int drm_fb_helper_debug_enter(struct fb_info *info);
int drm_fb_helper_debug_leave(struct fb_info *info);
-
void drm_fb_helper_lastclose(struct drm_device *dev);
-void drm_fb_helper_output_poll_changed(struct drm_device *dev);
#else
static inline void drm_fb_helper_prepare(struct drm_device *dev,
struct drm_fb_helper *helper,
@@ -401,10 +399,6 @@ static inline int drm_fb_helper_debug_leave(struct fb_info *info)
static inline void drm_fb_helper_lastclose(struct drm_device *dev)
{
}
-
-static inline void drm_fb_helper_output_poll_changed(struct drm_device *dev)
-{
-}
#endif
#endif
diff --git a/include/drm/drm_file.h b/include/drm/drm_file.h
index ab230d3af138..8c0030c77308 100644
--- a/include/drm/drm_file.h
+++ b/include/drm/drm_file.h
@@ -45,6 +45,8 @@ struct drm_printer;
struct device;
struct file;
+extern struct xarray drm_minors_xa;
+
/*
* FIXME: Not sure we want to have drm_minor here in the end, but to avoid
* header include loops we need it here for now.
@@ -434,6 +436,9 @@ static inline bool drm_is_accel_client(const struct drm_file *file_priv)
void drm_file_update_pid(struct drm_file *);
+struct drm_minor *drm_minor_acquire(struct xarray *minors_xa, unsigned int minor_id);
+void drm_minor_release(struct drm_minor *minor);
+
int drm_open(struct inode *inode, struct file *filp);
int drm_open_helper(struct file *filp, struct drm_minor *minor);
ssize_t drm_read(struct file *filp, char __user *buffer,
diff --git a/include/drm/drm_fixed.h b/include/drm/drm_fixed.h
index ef8bc8d72039..1922188f00e8 100644
--- a/include/drm/drm_fixed.h
+++ b/include/drm/drm_fixed.h
@@ -25,8 +25,9 @@
#ifndef DRM_FIXED_H
#define DRM_FIXED_H
-#include <linux/kernel.h>
#include <linux/math64.h>
+#include <linux/types.h>
+#include <linux/wordpart.h>
typedef union dfixed {
u32 full;
diff --git a/include/drm/drm_mipi_dsi.h b/include/drm/drm_mipi_dsi.h
index 0f520eeeaa8e..f725f8654611 100644
--- a/include/drm/drm_mipi_dsi.h
+++ b/include/drm/drm_mipi_dsi.h
@@ -365,6 +365,18 @@ void mipi_dsi_dcs_set_display_off_multi(struct mipi_dsi_multi_context *ctx);
void mipi_dsi_dcs_set_display_on_multi(struct mipi_dsi_multi_context *ctx);
void mipi_dsi_dcs_set_tear_on_multi(struct mipi_dsi_multi_context *ctx,
enum mipi_dsi_dcs_tear_mode mode);
+void mipi_dsi_turn_on_peripheral_multi(struct mipi_dsi_multi_context *ctx);
+void mipi_dsi_dcs_soft_reset_multi(struct mipi_dsi_multi_context *ctx);
+void mipi_dsi_dcs_set_display_brightness_multi(struct mipi_dsi_multi_context *ctx,
+ u16 brightness);
+void mipi_dsi_dcs_set_pixel_format_multi(struct mipi_dsi_multi_context *ctx,
+ u8 format);
+void mipi_dsi_dcs_set_column_address_multi(struct mipi_dsi_multi_context *ctx,
+ u16 start, u16 end);
+void mipi_dsi_dcs_set_page_address_multi(struct mipi_dsi_multi_context *ctx,
+ u16 start, u16 end);
+void mipi_dsi_dcs_set_tear_scanline_multi(struct mipi_dsi_multi_context *ctx,
+ u16 scanline);
/**
* mipi_dsi_generic_write_seq - transmit data using a generic write packet
diff --git a/include/drm/drm_mode_config.h b/include/drm/drm_mode_config.h
index ab0f167474b1..271765e2e9f2 100644
--- a/include/drm/drm_mode_config.h
+++ b/include/drm/drm_mode_config.h
@@ -98,22 +98,6 @@ struct drm_mode_config_funcs {
const struct drm_format_info *(*get_format_info)(const struct drm_mode_fb_cmd2 *mode_cmd);
/**
- * @output_poll_changed:
- *
- * Callback used by helpers to inform the driver of output configuration
- * changes.
- *
- * Drivers implementing fbdev emulation use drm_kms_helper_hotplug_event()
- * to call this hook to inform the fbdev helper of output changes.
- *
- * This hook is deprecated, drivers should instead implement fbdev
- * support with struct drm_client, which takes care of any necessary
- * hotplug event forwarding already without further involvement by
- * the driver.
- */
- void (*output_poll_changed)(struct drm_device *dev);
-
- /**
* @mode_valid:
*
* Device specific validation of display modes. Can be used to reject
diff --git a/include/drm/drm_panic.h b/include/drm/drm_panic.h
index 73bb3f3d9ed9..54085d5d05c3 100644
--- a/include/drm/drm_panic.h
+++ b/include/drm/drm_panic.h
@@ -1,4 +1,10 @@
/* SPDX-License-Identifier: GPL-2.0 or MIT */
+
+/*
+ * Copyright (c) 2024 Intel
+ * Copyright (c) 2024 Red Hat
+ */
+
#ifndef __DRM_PANIC_H__
#define __DRM_PANIC_H__
@@ -8,9 +14,6 @@
#include <drm/drm_device.h>
#include <drm/drm_fourcc.h>
-/*
- * Copyright (c) 2024 Intel
- */
/**
* struct drm_scanout_buffer - DRM scanout buffer
@@ -146,16 +149,4 @@ struct drm_scanout_buffer {
#define drm_panic_unlock(dev, flags) \
raw_spin_unlock_irqrestore(&(dev)->mode_config.panic_lock, flags)
-#ifdef CONFIG_DRM_PANIC
-
-void drm_panic_register(struct drm_device *dev);
-void drm_panic_unregister(struct drm_device *dev);
-
-#else
-
-static inline void drm_panic_register(struct drm_device *dev) {}
-static inline void drm_panic_unregister(struct drm_device *dev) {}
-
-#endif
-
#endif /* __DRM_PANIC_H__ */
diff --git a/include/drm/drm_prime.h b/include/drm/drm_prime.h
index 2a1d01e5b56b..fa085c44d4ca 100644
--- a/include/drm/drm_prime.h
+++ b/include/drm/drm_prime.h
@@ -69,6 +69,9 @@ void drm_gem_dmabuf_release(struct dma_buf *dma_buf);
int drm_gem_prime_fd_to_handle(struct drm_device *dev,
struct drm_file *file_priv, int prime_fd, uint32_t *handle);
+struct dma_buf *drm_gem_prime_handle_to_dmabuf(struct drm_device *dev,
+ struct drm_file *file_priv, uint32_t handle,
+ uint32_t flags);
int drm_gem_prime_handle_to_fd(struct drm_device *dev,
struct drm_file *file_priv, uint32_t handle, uint32_t flags,
int *prime_fd);
diff --git a/include/drm/drm_print.h b/include/drm/drm_print.h
index 5d9dff5149c9..d2676831d765 100644
--- a/include/drm/drm_print.h
+++ b/include/drm/drm_print.h
@@ -221,7 +221,8 @@ drm_vprintf(struct drm_printer *p, const char *fmt, va_list *va)
/**
* struct drm_print_iterator - local struct used with drm_printer_coredump
- * @data: Pointer to the devcoredump output buffer
+ * @data: Pointer to the devcoredump output buffer, can be NULL if using
+ * drm_printer_coredump to determine size of devcoredump
* @start: The offset within the buffer to start writing
* @remain: The number of bytes to write for this iteration
*/
@@ -266,6 +267,57 @@ struct drm_print_iterator {
* coredump_read, ...)
* }
*
+ * The above example has a time complexity of O(N^2), where N is the size of the
+ * devcoredump. This is acceptable for small devcoredumps but scales poorly for
+ * larger ones.
+ *
+ * Another use case for drm_coredump_printer is to capture the devcoredump into
+ * a saved buffer before the dev_coredump() callback. This involves two passes:
+ * one to determine the size of the devcoredump and another to print it to a
+ * buffer. Then, in dev_coredump(), copy from the saved buffer into the
+ * devcoredump read buffer.
+ *
+ * For example::
+ *
+ * char *devcoredump_saved_buffer;
+ *
+ * ssize_t __coredump_print(char *buffer, ssize_t count, ...)
+ * {
+ * struct drm_print_iterator iter;
+ * struct drm_printer p;
+ *
+ * iter.data = buffer;
+ * iter.start = 0;
+ * iter.remain = count;
+ *
+ * p = drm_coredump_printer(&iter);
+ *
+ * drm_printf(p, "foo=%d\n", foo);
+ * ...
+ * return count - iter.remain;
+ * }
+ *
+ * void coredump_print(...)
+ * {
+ * ssize_t count;
+ *
+ * count = __coredump_print(NULL, INT_MAX, ...);
+ * devcoredump_saved_buffer = kvmalloc(count, GFP_KERNEL);
+ * __coredump_print(devcoredump_saved_buffer, count, ...);
+ * }
+ *
+ * void coredump_read(char *buffer, loff_t offset, size_t count,
+ * void *data, size_t datalen)
+ * {
+ * ...
+ * memcpy(buffer, devcoredump_saved_buffer + offset, count);
+ * ...
+ * }
+ *
+ * The above example has a time complexity of O(N*2), where N is the size of the
+ * devcoredump. This scales better than the previous example for larger
+ * devcoredumps.
+ *
* RETURNS:
* The &drm_printer object
*/
diff --git a/include/drm/drm_rect.h b/include/drm/drm_rect.h
index 73fcb899a01d..46f09cf68458 100644
--- a/include/drm/drm_rect.h
+++ b/include/drm/drm_rect.h
@@ -238,6 +238,21 @@ static inline void drm_rect_fp_to_int(struct drm_rect *dst,
drm_rect_height(src) >> 16);
}
+/**
+ * drm_rect_overlap - Check if two rectangles overlap
+ * @a: first rectangle
+ * @b: second rectangle
+ *
+ * RETURNS:
+ * %true if the rectangles overlap, %false otherwise.
+ */
+static inline bool drm_rect_overlap(const struct drm_rect *a,
+ const struct drm_rect *b)
+{
+ return (a->x2 > b->x1 && b->x2 > a->x1 &&
+ a->y2 > b->y1 && b->y2 > a->y1);
+}
+
bool drm_rect_intersect(struct drm_rect *r, const struct drm_rect *clip);
bool drm_rect_clip_scaled(struct drm_rect *src, struct drm_rect *dst,
const struct drm_rect *clip);
diff --git a/include/drm/drm_vblank.h b/include/drm/drm_vblank.h
index c8f829b4307c..151ab1e85b1b 100644
--- a/include/drm/drm_vblank.h
+++ b/include/drm/drm_vblank.h
@@ -79,6 +79,31 @@ struct drm_pending_vblank_event {
};
/**
+ * struct drm_vblank_crtc_config - vblank configuration for a CRTC
+ */
+struct drm_vblank_crtc_config {
+ /**
+ * @offdelay_ms: Vblank off delay in ms, used to determine how long
+ * &drm_vblank_crtc.disable_timer waits before disabling.
+ *
+ * Defaults to the value of drm_vblank_offdelay in drm_crtc_vblank_on().
+ */
+ int offdelay_ms;
+
+ /**
+ * @disable_immediate: See &drm_device.vblank_disable_immediate
+ * for the exact semantics of immediate vblank disabling.
+ *
+ * Additionally, this tracks the disable immediate value per crtc, just
+ * in case it needs to differ from the default value for a given device.
+ *
+ * Defaults to the value of &drm_device.vblank_disable_immediate in
+ * drm_crtc_vblank_on().
+ */
+ bool disable_immediate;
+};
+
+/**
* struct drm_vblank_crtc - vblank tracking for a CRTC
*
* This structure tracks the vblank state for one CRTC.
@@ -99,8 +124,8 @@ struct drm_vblank_crtc {
wait_queue_head_t queue;
/**
* @disable_timer: Disable timer for the delayed vblank disabling
- * hysteresis logic. Vblank disabling is controlled through the
- * drm_vblank_offdelay module option and the setting of the
+ * hysteresis logic. Vblank disabling is controlled through
+ * &drm_vblank_crtc_config.offdelay_ms and the setting of the
* &drm_device.max_vblank_count value.
*/
struct timer_list disable_timer;
@@ -199,6 +224,12 @@ struct drm_vblank_crtc {
struct drm_display_mode hwmode;
/**
+ * @config: Stores vblank configuration values for a given CRTC.
+ * Also, see drm_crtc_vblank_on_config().
+ */
+ struct drm_vblank_crtc_config config;
+
+ /**
* @enabled: Tracks the enabling state of the corresponding &drm_crtc to
* avoid double-disabling and hence corrupting saved state. Needed by
* drivers not using atomic KMS, since those might go through their CRTC
@@ -247,6 +278,8 @@ void drm_wait_one_vblank(struct drm_device *dev, unsigned int pipe);
void drm_crtc_wait_one_vblank(struct drm_crtc *crtc);
void drm_crtc_vblank_off(struct drm_crtc *crtc);
void drm_crtc_vblank_reset(struct drm_crtc *crtc);
+void drm_crtc_vblank_on_config(struct drm_crtc *crtc,
+ const struct drm_vblank_crtc_config *config);
void drm_crtc_vblank_on(struct drm_crtc *crtc);
u64 drm_crtc_accurate_vblank_count(struct drm_crtc *crtc);
void drm_crtc_vblank_restore(struct drm_crtc *crtc);
diff --git a/include/drm/gpu_scheduler.h b/include/drm/gpu_scheduler.h
index 5acc64954a88..fe8edb917360 100644
--- a/include/drm/gpu_scheduler.h
+++ b/include/drm/gpu_scheduler.h
@@ -579,7 +579,7 @@ bool drm_sched_wqueue_ready(struct drm_gpu_scheduler *sched);
void drm_sched_wqueue_stop(struct drm_gpu_scheduler *sched);
void drm_sched_wqueue_start(struct drm_gpu_scheduler *sched);
void drm_sched_stop(struct drm_gpu_scheduler *sched, struct drm_sched_job *bad);
-void drm_sched_start(struct drm_gpu_scheduler *sched, bool full_recovery);
+void drm_sched_start(struct drm_gpu_scheduler *sched);
void drm_sched_resubmit_jobs(struct drm_gpu_scheduler *sched);
void drm_sched_increase_karma(struct drm_sched_job *bad);
void drm_sched_reset_karma(struct drm_sched_job *bad);
diff --git a/include/drm/ttm/ttm_bo.h b/include/drm/ttm/ttm_bo.h
index 6ccf96c91f3a..7b56d1ca36d7 100644
--- a/include/drm/ttm/ttm_bo.h
+++ b/include/drm/ttm/ttm_bo.h
@@ -190,6 +190,41 @@ struct ttm_operation_ctx {
uint64_t bytes_moved;
};
+struct ttm_lru_walk;
+
+/** struct ttm_lru_walk_ops - Operations for a LRU walk. */
+struct ttm_lru_walk_ops {
+ /**
+ * process_bo - Process this bo.
+ * @walk: struct ttm_lru_walk describing the walk.
+ * @bo: A locked and referenced buffer object.
+ *
+ * Return: Negative error code on error, User-defined positive value
+ * (typically, but not always, size of the processed bo) on success.
+ * On success, the returned values are summed by the walk and the
+ * walk exits when its target is met.
+ * 0 also indicates success, -EBUSY means this bo was skipped.
+ */
+ s64 (*process_bo)(struct ttm_lru_walk *walk, struct ttm_buffer_object *bo);
+};
+
+/**
+ * struct ttm_lru_walk - Structure describing a LRU walk.
+ */
+struct ttm_lru_walk {
+ /** @ops: Pointer to the ops structure. */
+ const struct ttm_lru_walk_ops *ops;
+ /** @ctx: Pointer to the struct ttm_operation_ctx. */
+ struct ttm_operation_ctx *ctx;
+ /** @ticket: The struct ww_acquire_ctx if any. */
+ struct ww_acquire_ctx *ticket;
+ /** @trylock_only: Only use trylock for locking. */
+ bool trylock_only;
+};
+
+s64 ttm_lru_walk_for_evict(struct ttm_lru_walk *walk, struct ttm_device *bdev,
+ struct ttm_resource_manager *man, s64 target);
+
/**
* ttm_bo_get - reference a struct ttm_buffer_object
*
@@ -378,15 +413,14 @@ void ttm_bo_kunmap(struct ttm_bo_kmap_obj *map);
int ttm_bo_vmap(struct ttm_buffer_object *bo, struct iosys_map *map);
void ttm_bo_vunmap(struct ttm_buffer_object *bo, struct iosys_map *map);
int ttm_bo_mmap_obj(struct vm_area_struct *vma, struct ttm_buffer_object *bo);
-int ttm_bo_swapout(struct ttm_buffer_object *bo, struct ttm_operation_ctx *ctx,
- gfp_t gfp_flags);
+s64 ttm_bo_swapout(struct ttm_device *bdev, struct ttm_operation_ctx *ctx,
+ struct ttm_resource_manager *man, gfp_t gfp_flags,
+ s64 target);
void ttm_bo_pin(struct ttm_buffer_object *bo);
void ttm_bo_unpin(struct ttm_buffer_object *bo);
-int ttm_mem_evict_first(struct ttm_device *bdev,
- struct ttm_resource_manager *man,
- const struct ttm_place *place,
- struct ttm_operation_ctx *ctx,
- struct ww_acquire_ctx *ticket);
+int ttm_bo_evict_first(struct ttm_device *bdev,
+ struct ttm_resource_manager *man,
+ struct ttm_operation_ctx *ctx);
vm_fault_t ttm_bo_vm_reserve(struct ttm_buffer_object *bo,
struct vm_fault *vmf);
vm_fault_t ttm_bo_vm_fault_reserved(struct vm_fault *vmf,
diff --git a/include/drm/ttm/ttm_resource.h b/include/drm/ttm/ttm_resource.h
index 69769355139f..be034be56ba1 100644
--- a/include/drm/ttm/ttm_resource.h
+++ b/include/drm/ttm/ttm_resource.h
@@ -49,6 +49,43 @@ struct io_mapping;
struct sg_table;
struct scatterlist;
+/**
+ * enum ttm_lru_item_type - enumerate ttm_lru_item subclasses
+ */
+enum ttm_lru_item_type {
+ /** @TTM_LRU_RESOURCE: The resource subclass */
+ TTM_LRU_RESOURCE,
+ /** @TTM_LRU_HITCH: The iterator hitch subclass */
+ TTM_LRU_HITCH
+};
+
+/**
+ * struct ttm_lru_item - The TTM lru list node base class
+ * @link: The list link
+ * @type: The subclass type
+ */
+struct ttm_lru_item {
+ struct list_head link;
+ enum ttm_lru_item_type type;
+};
+
+/**
+ * ttm_lru_item_init() - initialize a struct ttm_lru_item
+ * @item: The item to initialize
+ * @type: The subclass type
+ */
+static inline void ttm_lru_item_init(struct ttm_lru_item *item,
+ enum ttm_lru_item_type type)
+{
+ item->type = type;
+ INIT_LIST_HEAD(&item->link);
+}
+
+static inline bool ttm_lru_item_is_res(const struct ttm_lru_item *item)
+{
+ return item->type == TTM_LRU_RESOURCE;
+}
+
struct ttm_resource_manager_func {
/**
* struct ttm_resource_manager_func member alloc
@@ -217,19 +254,20 @@ struct ttm_resource {
/**
* @lru: Least recently used list, see &ttm_resource_manager.lru
*/
- struct list_head lru;
+ struct ttm_lru_item lru;
};
/**
- * struct ttm_resource_cursor
+ * ttm_lru_item_to_res() - Downcast a struct ttm_lru_item to a struct ttm_resource
+ * @item: The struct ttm_lru_item to downcast
*
- * @priority: the current priority
- *
- * Cursor to iterate over the resources in a manager.
+ * Return: Pointer to the embedding struct ttm_resource
*/
-struct ttm_resource_cursor {
- unsigned int priority;
-};
+static inline struct ttm_resource *
+ttm_lru_item_to_res(struct ttm_lru_item *item)
+{
+ return container_of(item, struct ttm_resource, lru);
+}
/**
* struct ttm_lru_bulk_move_pos
@@ -246,8 +284,9 @@ struct ttm_lru_bulk_move_pos {
/**
* struct ttm_lru_bulk_move
- *
* @pos: first/last lru entry for resources in the each domain/priority
+ * @cursor_list: The list of cursors currently traversing any of
+ * the sublists of @pos. Protected by the ttm device's lru_lock.
*
* Container for the current bulk move state. Should be used with
* ttm_lru_bulk_move_init() and ttm_bo_set_bulk_move().
@@ -257,9 +296,38 @@ struct ttm_lru_bulk_move_pos {
*/
struct ttm_lru_bulk_move {
struct ttm_lru_bulk_move_pos pos[TTM_NUM_MEM_TYPES][TTM_MAX_BO_PRIORITY];
+ struct list_head cursor_list;
};
/**
+ * struct ttm_resource_cursor
+ * @man: The resource manager currently being iterated over
+ * @hitch: A hitch list node inserted before the next resource
+ * to iterate over.
+ * @bulk_link: A list link for the list of cursors traversing the
+ * bulk sublist of @bulk. Protected by the ttm device's lru_lock.
+ * @bulk: Pointer to struct ttm_lru_bulk_move whose subrange @hitch is
+ * inserted to. NULL if none. Never dereference this pointer since
+ * the struct ttm_lru_bulk_move object pointed to might have been
+ * freed. The pointer is only for comparison.
+ * @mem_type: The memory type of the LRU list being traversed.
+ * This field is valid iff @bulk != NULL.
+ * @priority: the current priority
+ *
+ * Cursor to iterate over the resources in a manager.
+ */
+struct ttm_resource_cursor {
+ struct ttm_resource_manager *man;
+ struct ttm_lru_item hitch;
+ struct list_head bulk_link;
+ struct ttm_lru_bulk_move *bulk;
+ unsigned int mem_type;
+ unsigned int priority;
+};
+
+void ttm_resource_cursor_fini(struct ttm_resource_cursor *cursor);
+
+/**
* struct ttm_kmap_iter_iomap - Specialization for a struct io_mapping +
* struct sg_table backed struct ttm_resource.
* @base: Embedded struct ttm_kmap_iter providing the usage interface.
@@ -347,6 +415,8 @@ ttm_resource_manager_cleanup(struct ttm_resource_manager *man)
void ttm_lru_bulk_move_init(struct ttm_lru_bulk_move *bulk);
void ttm_lru_bulk_move_tail(struct ttm_lru_bulk_move *bulk);
+void ttm_lru_bulk_move_fini(struct ttm_device *bdev,
+ struct ttm_lru_bulk_move *bulk);
void ttm_resource_add_bulk_move(struct ttm_resource *res,
struct ttm_buffer_object *bo);
@@ -389,9 +459,10 @@ struct ttm_resource *
ttm_resource_manager_first(struct ttm_resource_manager *man,
struct ttm_resource_cursor *cursor);
struct ttm_resource *
-ttm_resource_manager_next(struct ttm_resource_manager *man,
- struct ttm_resource_cursor *cursor,
- struct ttm_resource *res);
+ttm_resource_manager_next(struct ttm_resource_cursor *cursor);
+
+struct ttm_resource *
+ttm_lru_first_res_or_null(struct list_head *head);
/**
* ttm_resource_manager_for_each_res - iterate over all resources
@@ -403,7 +474,7 @@ ttm_resource_manager_next(struct ttm_resource_manager *man,
*/
#define ttm_resource_manager_for_each_res(man, cursor, res) \
for (res = ttm_resource_manager_first(man, cursor); res; \
- res = ttm_resource_manager_next(man, cursor, res))
+ res = ttm_resource_manager_next(cursor))
struct ttm_kmap_iter *
ttm_kmap_iter_iomap_init(struct ttm_kmap_iter_iomap *iter_io,
diff --git a/include/dt-bindings/clock/at91.h b/include/dt-bindings/clock/at91.h
index 3e3972a814c1..6ede88c3992d 100644
--- a/include/dt-bindings/clock/at91.h
+++ b/include/dt-bindings/clock/at91.h
@@ -38,6 +38,10 @@
#define PMC_CPU (PMC_MAIN + 9)
#define PMC_MCK1 (PMC_MAIN + 10)
+/* SAM9X7 */
+#define PMC_PLLADIV2 (PMC_MAIN + 11)
+#define PMC_LVDSPLL (PMC_MAIN + 12)
+
#ifndef AT91_PMC_MOSCS
#define AT91_PMC_MOSCS 0 /* MOSCS Flag */
#define AT91_PMC_LOCKA 1 /* PLLA Lock */
diff --git a/include/dt-bindings/clock/axg-audio-clkc.h b/include/dt-bindings/clock/axg-audio-clkc.h
index 08c82c22fa5f..607f23b83fa7 100644
--- a/include/dt-bindings/clock/axg-audio-clkc.h
+++ b/include/dt-bindings/clock/axg-audio-clkc.h
@@ -155,5 +155,12 @@
#define AUD_CLKID_SYSCLK_B_DIV 175
#define AUD_CLKID_SYSCLK_A_EN 176
#define AUD_CLKID_SYSCLK_B_EN 177
+#define AUD_CLKID_EARCRX 178
+#define AUD_CLKID_EARCRX_CMDC_SEL 179
+#define AUD_CLKID_EARCRX_CMDC_DIV 180
+#define AUD_CLKID_EARCRX_CMDC 181
+#define AUD_CLKID_EARCRX_DMAC_SEL 182
+#define AUD_CLKID_EARCRX_DMAC_DIV 183
+#define AUD_CLKID_EARCRX_DMAC 184
#endif /* __AXG_AUDIO_CLKC_BINDINGS_H */
diff --git a/include/dt-bindings/clock/cirrus,ep9301-syscon.h b/include/dt-bindings/clock/cirrus,ep9301-syscon.h
new file mode 100644
index 000000000000..6bb8f532e7d0
--- /dev/null
+++ b/include/dt-bindings/clock/cirrus,ep9301-syscon.h
@@ -0,0 +1,46 @@
+/* SPDX-License-Identifier: (GPL-2.0 OR MIT) */
+#ifndef DT_BINDINGS_CIRRUS_EP93XX_CLOCK_H
+#define DT_BINDINGS_CIRRUS_EP93XX_CLOCK_H
+
+#define EP93XX_CLK_PLL1 0
+#define EP93XX_CLK_PLL2 1
+
+#define EP93XX_CLK_FCLK 2
+#define EP93XX_CLK_HCLK 3
+#define EP93XX_CLK_PCLK 4
+
+#define EP93XX_CLK_UART 5
+#define EP93XX_CLK_SPI 6
+#define EP93XX_CLK_PWM 7
+#define EP93XX_CLK_USB 8
+
+#define EP93XX_CLK_M2M0 9
+#define EP93XX_CLK_M2M1 10
+
+#define EP93XX_CLK_M2P0 11
+#define EP93XX_CLK_M2P1 12
+#define EP93XX_CLK_M2P2 13
+#define EP93XX_CLK_M2P3 14
+#define EP93XX_CLK_M2P4 15
+#define EP93XX_CLK_M2P5 16
+#define EP93XX_CLK_M2P6 17
+#define EP93XX_CLK_M2P7 18
+#define EP93XX_CLK_M2P8 19
+#define EP93XX_CLK_M2P9 20
+
+#define EP93XX_CLK_UART1 21
+#define EP93XX_CLK_UART2 22
+#define EP93XX_CLK_UART3 23
+
+#define EP93XX_CLK_ADC 24
+#define EP93XX_CLK_ADC_EN 25
+
+#define EP93XX_CLK_KEYPAD 26
+
+#define EP93XX_CLK_VIDEO 27
+
+#define EP93XX_CLK_I2S_MCLK 28
+#define EP93XX_CLK_I2S_SCLK 29
+#define EP93XX_CLK_I2S_LRCLK 30
+
+#endif /* DT_BINDINGS_CIRRUS_EP93XX_CLOCK_H */
diff --git a/include/dt-bindings/clock/nxp,imx95-clock.h b/include/dt-bindings/clock/nxp,imx95-clock.h
index 782662c3e740..b7a713a9ac8c 100644
--- a/include/dt-bindings/clock/nxp,imx95-clock.h
+++ b/include/dt-bindings/clock/nxp,imx95-clock.h
@@ -25,4 +25,7 @@
#define IMX95_CLK_DISPMIX_ENG0_SEL 0
#define IMX95_CLK_DISPMIX_ENG1_SEL 1
+#define IMX95_CLK_NETCMIX_ENETC0_RMII 0
+#define IMX95_CLK_NETCMIX_ENETC1_RMII 1
+
#endif /* __DT_BINDINGS_CLOCK_IMX95_H */
diff --git a/include/dt-bindings/clock/px30-cru.h b/include/dt-bindings/clock/px30-cru.h
index 5b1416fcde6f..a2abf1995c34 100644
--- a/include/dt-bindings/clock/px30-cru.h
+++ b/include/dt-bindings/clock/px30-cru.h
@@ -175,8 +175,6 @@
#define PCLK_CIF 352
#define PCLK_OTP_PHY 353
-#define CLK_NR_CLKS (PCLK_OTP_PHY + 1)
-
/* pmu-clocks indices */
#define PLL_GPLL 1
@@ -195,8 +193,6 @@
#define PCLK_GPIO0_PMU 20
#define PCLK_UART0_PMU 21
-#define CLKPMU_NR_CLKS (PCLK_UART0_PMU + 1)
-
/* soft-reset indices */
#define SRST_CORE0_PO 0
#define SRST_CORE1_PO 1
diff --git a/include/dt-bindings/clock/qcom,gcc-sc8180x.h b/include/dt-bindings/clock/qcom,gcc-sc8180x.h
index 487b12c19db5..e364006aa6ea 100644
--- a/include/dt-bindings/clock/qcom,gcc-sc8180x.h
+++ b/include/dt-bindings/clock/qcom,gcc-sc8180x.h
@@ -248,6 +248,7 @@
#define GCC_USB3_SEC_CLKREF_CLK 238
#define GCC_UFS_MEM_CLKREF_EN 239
#define GCC_UFS_CARD_CLKREF_EN 240
+#define GPLL9 241
#define GCC_EMAC_BCR 0
#define GCC_GPU_BCR 1
diff --git a/include/dt-bindings/clock/rk3036-cru.h b/include/dt-bindings/clock/rk3036-cru.h
index a96a9870ad59..99cc617e1e54 100644
--- a/include/dt-bindings/clock/rk3036-cru.h
+++ b/include/dt-bindings/clock/rk3036-cru.h
@@ -94,8 +94,6 @@
#define HCLK_CPU 477
#define HCLK_PERI 478
-#define CLK_NR_CLKS (HCLK_PERI + 1)
-
/* soft-reset indices */
#define SRST_CORE0 0
#define SRST_CORE1 1
diff --git a/include/dt-bindings/clock/rk3228-cru.h b/include/dt-bindings/clock/rk3228-cru.h
index de550ea56eeb..138b6ce514dd 100644
--- a/include/dt-bindings/clock/rk3228-cru.h
+++ b/include/dt-bindings/clock/rk3228-cru.h
@@ -146,8 +146,6 @@
#define HCLK_S_CRYPTO 477
#define HCLK_PERI 478
-#define CLK_NR_CLKS (HCLK_PERI + 1)
-
/* soft-reset indices */
#define SRST_CORE0_PO 0
#define SRST_CORE1_PO 1
diff --git a/include/dt-bindings/clock/rk3288-cru.h b/include/dt-bindings/clock/rk3288-cru.h
index 33819acbfc56..c6034b01b050 100644
--- a/include/dt-bindings/clock/rk3288-cru.h
+++ b/include/dt-bindings/clock/rk3288-cru.h
@@ -195,8 +195,6 @@
#define HCLK_CPU 477
#define HCLK_PERI 478
-#define CLK_NR_CLKS (HCLK_PERI + 1)
-
/* soft-reset indices */
#define SRST_CORE0 0
#define SRST_CORE1 1
diff --git a/include/dt-bindings/clock/rk3308-cru.h b/include/dt-bindings/clock/rk3308-cru.h
index d97840f9ee2e..ce4cd72b9d3d 100644
--- a/include/dt-bindings/clock/rk3308-cru.h
+++ b/include/dt-bindings/clock/rk3308-cru.h
@@ -212,8 +212,6 @@
#define PCLK_CAN 233
#define PCLK_OWIRE 234
-#define CLK_NR_CLKS (PCLK_OWIRE + 1)
-
/* soft-reset indices */
/* cru_softrst_con0 */
diff --git a/include/dt-bindings/clock/rk3328-cru.h b/include/dt-bindings/clock/rk3328-cru.h
index 555b4ff660ae..8885a2e98c65 100644
--- a/include/dt-bindings/clock/rk3328-cru.h
+++ b/include/dt-bindings/clock/rk3328-cru.h
@@ -201,8 +201,6 @@
#define HCLK_RGA 340
#define HCLK_HDCP 341
-#define CLK_NR_CLKS (HCLK_HDCP + 1)
-
/* soft-reset indices */
#define SRST_CORE0_PO 0
#define SRST_CORE1_PO 1
diff --git a/include/dt-bindings/clock/rk3368-cru.h b/include/dt-bindings/clock/rk3368-cru.h
index 83c72a163fd3..ebae3cbf8192 100644
--- a/include/dt-bindings/clock/rk3368-cru.h
+++ b/include/dt-bindings/clock/rk3368-cru.h
@@ -182,8 +182,6 @@
#define HCLK_BUS 477
#define HCLK_PERI 478
-#define CLK_NR_CLKS (HCLK_PERI + 1)
-
/* soft-reset indices */
#define SRST_CORE_B0 0
#define SRST_CORE_B1 1
diff --git a/include/dt-bindings/clock/rk3399-cru.h b/include/dt-bindings/clock/rk3399-cru.h
index 39169d94a44e..4c90c7703a83 100644
--- a/include/dt-bindings/clock/rk3399-cru.h
+++ b/include/dt-bindings/clock/rk3399-cru.h
@@ -335,8 +335,6 @@
#define HCLK_SDIO_NOC 495
#define HCLK_SDIOAUDIO_NOC 496
-#define CLK_NR_CLKS (HCLK_SDIOAUDIO_NOC + 1)
-
/* pmu-clocks indices */
#define PLL_PPLL 1
@@ -378,8 +376,6 @@
#define PCLK_INTR_ARB_PMU 49
#define HCLK_NOC_PMU 50
-#define CLKPMU_NR_CLKS (HCLK_NOC_PMU + 1)
-
/* soft-reset indices */
/* cru_softrst_con0 */
diff --git a/include/dt-bindings/clock/rockchip,rk3576-cru.h b/include/dt-bindings/clock/rockchip,rk3576-cru.h
new file mode 100644
index 000000000000..25aed298ac2c
--- /dev/null
+++ b/include/dt-bindings/clock/rockchip,rk3576-cru.h
@@ -0,0 +1,592 @@
+/* SPDX-License-Identifier: (GPL-2.0 OR MIT) */
+/*
+ * Copyright (c) 2023 Rockchip Electronics Co. Ltd.
+ * Copyright (c) 2024 Collabora Ltd.
+ *
+ * Author: Elaine Zhang <zhangqing@rock-chips.com>
+ * Author: Detlev Casanova <detlev.casanova@collabora.com>
+ */
+
+#ifndef _DT_BINDINGS_CLK_ROCKCHIP_RK3576_H
+#define _DT_BINDINGS_CLK_ROCKCHIP_RK3576_H
+
+/* cru-clocks indices */
+
+/* cru plls */
+#define PLL_BPLL 0
+#define PLL_LPLL 1
+#define PLL_VPLL 2
+#define PLL_AUPLL 3
+#define PLL_CPLL 4
+#define PLL_GPLL 5
+#define PLL_PPLL 6
+#define ARMCLK_L 7
+#define ARMCLK_B 8
+
+/* cru clocks */
+#define CLK_CPLL_DIV20 9
+#define CLK_CPLL_DIV10 10
+#define CLK_GPLL_DIV8 11
+#define CLK_GPLL_DIV6 12
+#define CLK_CPLL_DIV4 13
+#define CLK_GPLL_DIV4 14
+#define CLK_SPLL_DIV2 15
+#define CLK_GPLL_DIV3 16
+#define CLK_CPLL_DIV2 17
+#define CLK_GPLL_DIV2 18
+#define CLK_SPLL_DIV1 19
+#define PCLK_TOP_ROOT 20
+#define ACLK_TOP 21
+#define HCLK_TOP 22
+#define CLK_AUDIO_FRAC_0 23
+#define CLK_AUDIO_FRAC_1 24
+#define CLK_AUDIO_FRAC_2 25
+#define CLK_AUDIO_FRAC_3 26
+#define CLK_UART_FRAC_0 27
+#define CLK_UART_FRAC_1 28
+#define CLK_UART_FRAC_2 29
+#define CLK_UART1_SRC_TOP 30
+#define CLK_AUDIO_INT_0 31
+#define CLK_AUDIO_INT_1 32
+#define CLK_AUDIO_INT_2 33
+#define CLK_PDM0_SRC_TOP 34
+#define CLK_PDM1_OUT 35
+#define CLK_GMAC0_125M_SRC 36
+#define CLK_GMAC1_125M_SRC 37
+#define LCLK_ASRC_SRC_0 38
+#define LCLK_ASRC_SRC_1 39
+#define REF_CLK0_OUT_PLL 40
+#define REF_CLK1_OUT_PLL 41
+#define REF_CLK2_OUT_PLL 42
+#define REFCLKO25M_GMAC0_OUT 43
+#define REFCLKO25M_GMAC1_OUT 44
+#define CLK_CIFOUT_OUT 45
+#define CLK_GMAC0_RMII_CRU 46
+#define CLK_GMAC1_RMII_CRU 47
+#define CLK_OTPC_AUTO_RD_G 48
+#define CLK_OTP_PHY_G 49
+#define CLK_MIPI_CAMERAOUT_M0 50
+#define CLK_MIPI_CAMERAOUT_M1 51
+#define CLK_MIPI_CAMERAOUT_M2 52
+#define MCLK_PDM0_SRC_TOP 53
+#define HCLK_AUDIO_ROOT 54
+#define HCLK_ASRC_2CH_0 55
+#define HCLK_ASRC_2CH_1 56
+#define HCLK_ASRC_4CH_0 57
+#define HCLK_ASRC_4CH_1 58
+#define CLK_ASRC_2CH_0 59
+#define CLK_ASRC_2CH_1 60
+#define CLK_ASRC_4CH_0 61
+#define CLK_ASRC_4CH_1 62
+#define MCLK_SAI0_8CH_SRC 63
+#define MCLK_SAI0_8CH 64
+#define HCLK_SAI0_8CH 65
+#define HCLK_SPDIF_RX0 66
+#define MCLK_SPDIF_RX0 67
+#define HCLK_SPDIF_RX1 68
+#define MCLK_SPDIF_RX1 69
+#define MCLK_SAI1_8CH_SRC 70
+#define MCLK_SAI1_8CH 71
+#define HCLK_SAI1_8CH 72
+#define MCLK_SAI2_2CH_SRC 73
+#define MCLK_SAI2_2CH 74
+#define HCLK_SAI2_2CH 75
+#define MCLK_SAI3_2CH_SRC 76
+#define MCLK_SAI3_2CH 77
+#define HCLK_SAI3_2CH 78
+#define MCLK_SAI4_2CH_SRC 79
+#define MCLK_SAI4_2CH 80
+#define HCLK_SAI4_2CH 81
+#define HCLK_ACDCDIG_DSM 82
+#define MCLK_ACDCDIG_DSM 83
+#define CLK_PDM1 84
+#define HCLK_PDM1 85
+#define MCLK_PDM1 86
+#define HCLK_SPDIF_TX0 87
+#define MCLK_SPDIF_TX0 88
+#define HCLK_SPDIF_TX1 89
+#define MCLK_SPDIF_TX1 90
+#define CLK_SAI1_MCLKOUT 91
+#define CLK_SAI2_MCLKOUT 92
+#define CLK_SAI3_MCLKOUT 93
+#define CLK_SAI4_MCLKOUT 94
+#define CLK_SAI0_MCLKOUT 95
+#define HCLK_BUS_ROOT 96
+#define PCLK_BUS_ROOT 97
+#define ACLK_BUS_ROOT 98
+#define HCLK_CAN0 99
+#define CLK_CAN0 100
+#define HCLK_CAN1 101
+#define CLK_CAN1 102
+#define CLK_KEY_SHIFT 103
+#define PCLK_I2C1 104
+#define PCLK_I2C2 105
+#define PCLK_I2C3 106
+#define PCLK_I2C4 107
+#define PCLK_I2C5 108
+#define PCLK_I2C6 109
+#define PCLK_I2C7 110
+#define PCLK_I2C8 111
+#define PCLK_I2C9 112
+#define PCLK_WDT_BUSMCU 113
+#define TCLK_WDT_BUSMCU 114
+#define ACLK_GIC 115
+#define CLK_I2C1 116
+#define CLK_I2C2 117
+#define CLK_I2C3 118
+#define CLK_I2C4 119
+#define CLK_I2C5 120
+#define CLK_I2C6 121
+#define CLK_I2C7 122
+#define CLK_I2C8 123
+#define CLK_I2C9 124
+#define PCLK_SARADC 125
+#define CLK_SARADC 126
+#define PCLK_TSADC 127
+#define CLK_TSADC 128
+#define PCLK_UART0 129
+#define PCLK_UART2 130
+#define PCLK_UART3 131
+#define PCLK_UART4 132
+#define PCLK_UART5 133
+#define PCLK_UART6 134
+#define PCLK_UART7 135
+#define PCLK_UART8 136
+#define PCLK_UART9 137
+#define PCLK_UART10 138
+#define PCLK_UART11 139
+#define SCLK_UART0 140
+#define SCLK_UART2 141
+#define SCLK_UART3 142
+#define SCLK_UART4 143
+#define SCLK_UART5 144
+#define SCLK_UART6 145
+#define SCLK_UART7 146
+#define SCLK_UART8 147
+#define SCLK_UART9 148
+#define SCLK_UART10 149
+#define SCLK_UART11 150
+#define PCLK_SPI0 151
+#define PCLK_SPI1 152
+#define PCLK_SPI2 153
+#define PCLK_SPI3 154
+#define PCLK_SPI4 155
+#define CLK_SPI0 156
+#define CLK_SPI1 157
+#define CLK_SPI2 158
+#define CLK_SPI3 159
+#define CLK_SPI4 160
+#define PCLK_WDT0 161
+#define TCLK_WDT0 162
+#define PCLK_PWM1 163
+#define CLK_PWM1 164
+#define CLK_OSC_PWM1 165
+#define CLK_RC_PWM1 166
+#define PCLK_BUSTIMER0 167
+#define PCLK_BUSTIMER1 168
+#define CLK_TIMER0_ROOT 169
+#define CLK_TIMER0 170
+#define CLK_TIMER1 171
+#define CLK_TIMER2 172
+#define CLK_TIMER3 173
+#define CLK_TIMER4 174
+#define CLK_TIMER5 175
+#define PCLK_MAILBOX0 176
+#define PCLK_GPIO1 177
+#define DBCLK_GPIO1 178
+#define PCLK_GPIO2 179
+#define DBCLK_GPIO2 180
+#define PCLK_GPIO3 181
+#define DBCLK_GPIO3 182
+#define PCLK_GPIO4 183
+#define DBCLK_GPIO4 184
+#define ACLK_DECOM 185
+#define PCLK_DECOM 186
+#define DCLK_DECOM 187
+#define CLK_TIMER1_ROOT 188
+#define CLK_TIMER6 189
+#define CLK_TIMER7 190
+#define CLK_TIMER8 191
+#define CLK_TIMER9 192
+#define CLK_TIMER10 193
+#define CLK_TIMER11 194
+#define ACLK_DMAC0 195
+#define ACLK_DMAC1 196
+#define ACLK_DMAC2 197
+#define ACLK_SPINLOCK 198
+#define HCLK_I3C0 199
+#define HCLK_I3C1 200
+#define HCLK_BUS_CM0_ROOT 201
+#define FCLK_BUS_CM0_CORE 202
+#define CLK_BUS_CM0_RTC 203
+#define PCLK_PMU2 204
+#define PCLK_PWM2 205
+#define CLK_PWM2 206
+#define CLK_RC_PWM2 207
+#define CLK_OSC_PWM2 208
+#define CLK_FREQ_PWM1 209
+#define CLK_COUNTER_PWM1 210
+#define SAI_SCLKIN_FREQ 211
+#define SAI_SCLKIN_COUNTER 212
+#define CLK_I3C0 213
+#define CLK_I3C1 214
+#define PCLK_CSIDPHY1 215
+#define PCLK_DDR_ROOT 216
+#define PCLK_DDR_MON_CH0 217
+#define TMCLK_DDR_MON_CH0 218
+#define ACLK_DDR_ROOT 219
+#define HCLK_DDR_ROOT 220
+#define FCLK_DDR_CM0_CORE 221
+#define CLK_DDR_TIMER_ROOT 222
+#define CLK_DDR_TIMER0 223
+#define CLK_DDR_TIMER1 224
+#define TCLK_WDT_DDR 225
+#define PCLK_WDT 226
+#define PCLK_TIMER 227
+#define CLK_DDR_CM0_RTC 228
+#define ACLK_RKNN0 229
+#define ACLK_RKNN1 230
+#define HCLK_RKNN_ROOT 231
+#define CLK_RKNN_DSU0 232
+#define PCLK_NPUTOP_ROOT 233
+#define PCLK_NPU_TIMER 234
+#define CLK_NPUTIMER_ROOT 235
+#define CLK_NPUTIMER0 236
+#define CLK_NPUTIMER1 237
+#define PCLK_NPU_WDT 238
+#define TCLK_NPU_WDT 239
+#define ACLK_RKNN_CBUF 240
+#define HCLK_NPU_CM0_ROOT 241
+#define FCLK_NPU_CM0_CORE 242
+#define CLK_NPU_CM0_RTC 243
+#define HCLK_RKNN_CBUF 244
+#define HCLK_NVM_ROOT 245
+#define ACLK_NVM_ROOT 246
+#define SCLK_FSPI_X2 247
+#define HCLK_FSPI 248
+#define CCLK_SRC_EMMC 249
+#define HCLK_EMMC 250
+#define ACLK_EMMC 251
+#define BCLK_EMMC 252
+#define TCLK_EMMC 253
+#define PCLK_PHP_ROOT 254
+#define ACLK_PHP_ROOT 255
+#define PCLK_PCIE0 256
+#define CLK_PCIE0_AUX 257
+#define ACLK_PCIE0_MST 258
+#define ACLK_PCIE0_SLV 259
+#define ACLK_PCIE0_DBI 260
+#define ACLK_USB3OTG1 261
+#define CLK_REF_USB3OTG1 262
+#define CLK_SUSPEND_USB3OTG1 263
+#define ACLK_MMU0 264
+#define ACLK_SLV_MMU0 265
+#define ACLK_MMU1 266
+#define ACLK_SLV_MMU1 267
+#define PCLK_PCIE1 268
+#define CLK_PCIE1_AUX 269
+#define ACLK_PCIE1_MST 270
+#define ACLK_PCIE1_SLV 271
+#define ACLK_PCIE1_DBI 272
+#define CLK_RXOOB0 273
+#define CLK_RXOOB1 274
+#define CLK_PMALIVE0 275
+#define CLK_PMALIVE1 276
+#define ACLK_SATA0 277
+#define ACLK_SATA1 278
+#define CLK_USB3OTG1_PIPE_PCLK 279
+#define CLK_USB3OTG1_UTMI 280
+#define CLK_USB3OTG0_PIPE_PCLK 281
+#define CLK_USB3OTG0_UTMI 282
+#define HCLK_SDGMAC_ROOT 283
+#define ACLK_SDGMAC_ROOT 284
+#define PCLK_SDGMAC_ROOT 285
+#define ACLK_GMAC0 286
+#define ACLK_GMAC1 287
+#define PCLK_GMAC0 288
+#define PCLK_GMAC1 289
+#define CCLK_SRC_SDIO 290
+#define HCLK_SDIO 291
+#define CLK_GMAC1_PTP_REF 292
+#define CLK_GMAC0_PTP_REF 293
+#define CLK_GMAC1_PTP_REF_SRC 294
+#define CLK_GMAC0_PTP_REF_SRC 295
+#define CCLK_SRC_SDMMC0 296
+#define HCLK_SDMMC0 297
+#define SCLK_FSPI1_X2 298
+#define HCLK_FSPI1 299
+#define ACLK_DSMC_ROOT 300
+#define ACLK_DSMC 301
+#define PCLK_DSMC 302
+#define CLK_DSMC_SYS 303
+#define HCLK_HSGPIO 304
+#define CLK_HSGPIO_TX 305
+#define CLK_HSGPIO_RX 306
+#define ACLK_HSGPIO 307
+#define PCLK_PHPPHY_ROOT 308
+#define PCLK_PCIE2_COMBOPHY0 309
+#define PCLK_PCIE2_COMBOPHY1 310
+#define CLK_PCIE_100M_SRC 311
+#define CLK_PCIE_100M_NDUTY_SRC 312
+#define CLK_REF_PCIE0_PHY 313
+#define CLK_REF_PCIE1_PHY 314
+#define CLK_REF_MPHY_26M 315
+#define HCLK_RKVDEC_ROOT 316
+#define ACLK_RKVDEC_ROOT 317
+#define HCLK_RKVDEC 318
+#define CLK_RKVDEC_HEVC_CA 319
+#define CLK_RKVDEC_CORE 320
+#define ACLK_UFS_ROOT 321
+#define ACLK_USB_ROOT 322
+#define PCLK_USB_ROOT 323
+#define ACLK_USB3OTG0 324
+#define CLK_REF_USB3OTG0 325
+#define CLK_SUSPEND_USB3OTG0 326
+#define ACLK_MMU2 327
+#define ACLK_SLV_MMU2 328
+#define ACLK_UFS_SYS 329
+#define ACLK_VPU_ROOT 330
+#define ACLK_VPU_MID_ROOT 331
+#define HCLK_VPU_ROOT 332
+#define ACLK_JPEG_ROOT 333
+#define ACLK_VPU_LOW_ROOT 334
+#define HCLK_RGA2E_0 335
+#define ACLK_RGA2E_0 336
+#define CLK_CORE_RGA2E_0 337
+#define ACLK_JPEG 338
+#define HCLK_JPEG 339
+#define HCLK_VDPP 340
+#define ACLK_VDPP 341
+#define CLK_CORE_VDPP 342
+#define HCLK_RGA2E_1 343
+#define ACLK_RGA2E_1 344
+#define CLK_CORE_RGA2E_1 345
+#define DCLK_EBC_FRAC_SRC 346
+#define HCLK_EBC 347
+#define ACLK_EBC 348
+#define DCLK_EBC 349
+#define HCLK_VEPU0_ROOT 350
+#define ACLK_VEPU0_ROOT 351
+#define HCLK_VEPU0 352
+#define ACLK_VEPU0 353
+#define CLK_VEPU0_CORE 354
+#define ACLK_VI_ROOT 355
+#define HCLK_VI_ROOT 356
+#define PCLK_VI_ROOT 357
+#define DCLK_VICAP 358
+#define ACLK_VICAP 359
+#define HCLK_VICAP 360
+#define CLK_ISP_CORE 361
+#define CLK_ISP_CORE_MARVIN 362
+#define CLK_ISP_CORE_VICAP 363
+#define ACLK_ISP 364
+#define HCLK_ISP 365
+#define ACLK_VPSS 366
+#define HCLK_VPSS 367
+#define CLK_CORE_VPSS 368
+#define PCLK_CSI_HOST_0 369
+#define PCLK_CSI_HOST_1 370
+#define PCLK_CSI_HOST_2 371
+#define PCLK_CSI_HOST_3 372
+#define PCLK_CSI_HOST_4 373
+#define ICLK_CSIHOST01 374
+#define ICLK_CSIHOST0 375
+#define CLK_ISP_PVTPLL_SRC 376
+#define ACLK_VI_ROOT_INTER 377
+#define CLK_VICAP_I0CLK 378
+#define CLK_VICAP_I1CLK 379
+#define CLK_VICAP_I2CLK 380
+#define CLK_VICAP_I3CLK 381
+#define CLK_VICAP_I4CLK 382
+#define ACLK_VOP_ROOT 383
+#define HCLK_VOP_ROOT 384
+#define PCLK_VOP_ROOT 385
+#define HCLK_VOP 386
+#define ACLK_VOP 387
+#define DCLK_VP0_SRC 388
+#define DCLK_VP1_SRC 389
+#define DCLK_VP2_SRC 390
+#define DCLK_VP0 391
+#define DCLK_VP1 392
+#define DCLK_VP2 393
+#define PCLK_VOPGRF 394
+#define ACLK_VO0_ROOT 395
+#define HCLK_VO0_ROOT 396
+#define PCLK_VO0_ROOT 397
+#define PCLK_VO0_GRF 398
+#define ACLK_HDCP0 399
+#define HCLK_HDCP0 400
+#define PCLK_HDCP0 401
+#define CLK_TRNG0_SKP 402
+#define PCLK_DSIHOST0 403
+#define CLK_DSIHOST0 404
+#define PCLK_HDMITX0 405
+#define CLK_HDMITX0_EARC 406
+#define CLK_HDMITX0_REF 407
+#define PCLK_EDP0 408
+#define CLK_EDP0_24M 409
+#define CLK_EDP0_200M 410
+#define MCLK_SAI5_8CH_SRC 411
+#define MCLK_SAI5_8CH 412
+#define HCLK_SAI5_8CH 413
+#define MCLK_SAI6_8CH_SRC 414
+#define MCLK_SAI6_8CH 415
+#define HCLK_SAI6_8CH 416
+#define HCLK_SPDIF_TX2 417
+#define MCLK_SPDIF_TX2 418
+#define HCLK_SPDIF_RX2 419
+#define MCLK_SPDIF_RX2 420
+#define HCLK_SAI8_8CH 421
+#define MCLK_SAI8_8CH_SRC 422
+#define MCLK_SAI8_8CH 423
+#define ACLK_VO1_ROOT 424
+#define HCLK_VO1_ROOT 425
+#define PCLK_VO1_ROOT 426
+#define MCLK_SAI7_8CH_SRC 427
+#define MCLK_SAI7_8CH 428
+#define HCLK_SAI7_8CH 429
+#define HCLK_SPDIF_TX3 430
+#define HCLK_SPDIF_TX4 431
+#define HCLK_SPDIF_TX5 432
+#define MCLK_SPDIF_TX3 433
+#define CLK_AUX16MHZ_0 434
+#define ACLK_DP0 435
+#define PCLK_DP0 436
+#define PCLK_VO1_GRF 437
+#define ACLK_HDCP1 438
+#define HCLK_HDCP1 439
+#define PCLK_HDCP1 440
+#define CLK_TRNG1_SKP 441
+#define HCLK_SAI9_8CH 442
+#define MCLK_SAI9_8CH_SRC 443
+#define MCLK_SAI9_8CH 444
+#define MCLK_SPDIF_TX4 445
+#define MCLK_SPDIF_TX5 446
+#define CLK_GPU_SRC_PRE 447
+#define CLK_GPU 448
+#define PCLK_GPU_ROOT 449
+#define ACLK_CENTER_ROOT 450
+#define ACLK_CENTER_LOW_ROOT 451
+#define HCLK_CENTER_ROOT 452
+#define PCLK_CENTER_ROOT 453
+#define ACLK_DMA2DDR 454
+#define ACLK_DDR_SHAREMEM 455
+#define PCLK_DMA2DDR 456
+#define PCLK_SHAREMEM 457
+#define HCLK_VEPU1_ROOT 458
+#define ACLK_VEPU1_ROOT 459
+#define HCLK_VEPU1 460
+#define ACLK_VEPU1 461
+#define CLK_VEPU1_CORE 462
+#define CLK_JDBCK_DAP 463
+#define PCLK_MIPI_DCPHY 464
+#define CLK_32K_USB2DEBUG 465
+#define PCLK_CSIDPHY 466
+#define PCLK_USBDPPHY 467
+#define CLK_PMUPHY_REF_SRC 468
+#define CLK_USBDP_COMBO_PHY_IMMORTAL 469
+#define CLK_HDMITXHDP 470
+#define PCLK_MPHY 471
+#define CLK_REF_OSC_MPHY 472
+#define CLK_REF_UFS_CLKOUT 473
+#define HCLK_PMU1_ROOT 474
+#define HCLK_PMU_CM0_ROOT 475
+#define CLK_200M_PMU_SRC 476
+#define CLK_100M_PMU_SRC 477
+#define CLK_50M_PMU_SRC 478
+#define FCLK_PMU_CM0_CORE 479
+#define CLK_PMU_CM0_RTC 480
+#define PCLK_PMU1 481
+#define CLK_PMU1 482
+#define PCLK_PMU1WDT 483
+#define TCLK_PMU1WDT 484
+#define PCLK_PMUTIMER 485
+#define CLK_PMUTIMER_ROOT 486
+#define CLK_PMUTIMER0 487
+#define CLK_PMUTIMER1 488
+#define PCLK_PMU1PWM 489
+#define CLK_PMU1PWM 490
+#define CLK_PMU1PWM_OSC 491
+#define PCLK_PMUPHY_ROOT 492
+#define PCLK_I2C0 493
+#define CLK_I2C0 494
+#define SCLK_UART1 495
+#define PCLK_UART1 496
+#define CLK_PMU1PWM_RC 497
+#define CLK_PDM0 498
+#define HCLK_PDM0 499
+#define MCLK_PDM0 500
+#define HCLK_VAD 501
+#define CLK_OSCCHK_PVTM 502
+#define CLK_PDM0_OUT 503
+#define CLK_HPTIMER_SRC 504
+#define PCLK_PMU0_ROOT 505
+#define PCLK_PMU0 506
+#define PCLK_GPIO0 507
+#define DBCLK_GPIO0 508
+#define CLK_OSC0_PMU1 509
+#define PCLK_PMU1_ROOT 510
+#define XIN_OSC0_DIV 511
+#define ACLK_USB 512
+#define ACLK_UFS 513
+#define ACLK_SDGMAC 514
+#define HCLK_SDGMAC 515
+#define PCLK_SDGMAC 516
+#define HCLK_VO1 517
+#define HCLK_VO0 518
+#define PCLK_CCI_ROOT 519
+#define ACLK_CCI_ROOT 520
+#define HCLK_VO0VOP_CHANNEL 521
+#define ACLK_VO0VOP_CHANNEL 522
+#define ACLK_TOP_MID 523
+#define ACLK_SECURE_HIGH 524
+#define CLK_USBPHY_REF_SRC 525
+#define CLK_PHY_REF_SRC 526
+#define CLK_CPLL_REF_SRC 527
+#define CLK_AUPLL_REF_SRC 528
+#define PCLK_SECURE_NS 529
+#define HCLK_SECURE_NS 530
+#define ACLK_SECURE_NS 531
+#define PCLK_OTPC_NS 532
+#define HCLK_CRYPTO_NS 533
+#define HCLK_TRNG_NS 534
+#define CLK_OTPC_NS 535
+#define SCLK_DSU 536
+#define SCLK_DDR 537
+#define ACLK_CRYPTO_NS 538
+#define CLK_PKA_CRYPTO_NS 539
+#define ACLK_RKVDEC_ROOT_BAK 540
+#define CLK_AUDIO_FRAC_0_SRC 541
+#define CLK_AUDIO_FRAC_1_SRC 542
+#define CLK_AUDIO_FRAC_2_SRC 543
+#define CLK_AUDIO_FRAC_3_SRC 544
+#define PCLK_HDPTX_APB 545
+
+/* secure clk */
+#define CLK_STIMER0_ROOT 546
+#define CLK_STIMER1_ROOT 547
+#define PCLK_SECURE_S 548
+#define HCLK_SECURE_S 549
+#define ACLK_SECURE_S 550
+#define CLK_PKA_CRYPTO_S 551
+#define HCLK_VO1_S 552
+#define PCLK_VO1_S 553
+#define HCLK_VO0_S 554
+#define PCLK_VO0_S 555
+#define PCLK_KLAD 556
+#define HCLK_CRYPTO_S 557
+#define HCLK_KLAD 558
+#define ACLK_CRYPTO_S 559
+#define HCLK_TRNG_S 560
+#define PCLK_OTPC_S 561
+#define CLK_OTPC_S 562
+#define PCLK_WDT_S 563
+#define TCLK_WDT_S 564
+#define PCLK_HDCP0_TRNG 565
+#define PCLK_HDCP1_TRNG 566
+#define HCLK_HDCP_KEY0 567
+#define HCLK_HDCP_KEY1 568
+#define PCLK_EDP_S 569
+#define ACLK_KLAD 570
+
+#endif
diff --git a/include/dt-bindings/iio/adi,ad4695.h b/include/dt-bindings/iio/adi,ad4695.h
new file mode 100644
index 000000000000..9fbef542bf67
--- /dev/null
+++ b/include/dt-bindings/iio/adi,ad4695.h
@@ -0,0 +1,9 @@
+/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) */
+
+#ifndef _DT_BINDINGS_ADI_AD4695_H
+#define _DT_BINDINGS_ADI_AD4695_H
+
+#define AD4695_COMMON_MODE_REFGND 0xFF
+#define AD4695_COMMON_MODE_COM 0xFE
+
+#endif /* _DT_BINDINGS_ADI_AD4695_H */
diff --git a/include/dt-bindings/interconnect/qcom,msm8937.h b/include/dt-bindings/interconnect/qcom,msm8937.h
new file mode 100644
index 000000000000..98b8a4637aab
--- /dev/null
+++ b/include/dt-bindings/interconnect/qcom,msm8937.h
@@ -0,0 +1,93 @@
+/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) */
+/*
+ * Qualcomm MSM8937 interconnect IDs
+ */
+
+#ifndef __DT_BINDINGS_INTERCONNECT_QCOM_MSM8937_H
+#define __DT_BINDINGS_INTERCONNECT_QCOM_MSM8937_H
+
+/* BIMC fabric */
+#define MAS_APPS_PROC 0
+#define MAS_OXILI 1
+#define MAS_SNOC_BIMC_0 2
+#define MAS_SNOC_BIMC_2 3
+#define MAS_SNOC_BIMC_1 4
+#define MAS_TCU_0 5
+#define SLV_EBI 6
+#define SLV_BIMC_SNOC 7
+
+/* PCNOC fabric */
+#define MAS_SPDM 0
+#define MAS_BLSP_1 1
+#define MAS_BLSP_2 2
+#define MAS_USB_HS1 3
+#define MAS_XI_USB_HS1 4
+#define MAS_CRYPTO 5
+#define MAS_SDCC_1 6
+#define MAS_SDCC_2 7
+#define MAS_SNOC_PCNOC 8
+#define PCNOC_M_0 9
+#define PCNOC_M_1 10
+#define PCNOC_INT_0 11
+#define PCNOC_INT_1 12
+#define PCNOC_INT_2 13
+#define PCNOC_INT_3 14
+#define PCNOC_S_0 15
+#define PCNOC_S_1 16
+#define PCNOC_S_2 17
+#define PCNOC_S_3 18
+#define PCNOC_S_4 19
+#define PCNOC_S_6 20
+#define PCNOC_S_7 21
+#define PCNOC_S_8 22
+#define SLV_SDCC_2 23
+#define SLV_SPDM 24
+#define SLV_PDM 25
+#define SLV_PRNG 26
+#define SLV_TCSR 27
+#define SLV_SNOC_CFG 28
+#define SLV_MESSAGE_RAM 29
+#define SLV_CAMERA_SS_CFG 30
+#define SLV_DISP_SS_CFG 31
+#define SLV_VENUS_CFG 32
+#define SLV_GPU_CFG 33
+#define SLV_TLMM 34
+#define SLV_BLSP_1 35
+#define SLV_BLSP_2 36
+#define SLV_PMIC_ARB 37
+#define SLV_SDCC_1 38
+#define SLV_CRYPTO_0_CFG 39
+#define SLV_USB_HS 40
+#define SLV_TCU 41
+#define SLV_PCNOC_SNOC 42
+
+/* SNOC fabric */
+#define MAS_QDSS_BAM 0
+#define MAS_BIMC_SNOC 1
+#define MAS_PCNOC_SNOC 2
+#define MAS_QDSS_ETR 3
+#define QDSS_INT 4
+#define SNOC_INT_0 5
+#define SNOC_INT_1 6
+#define SNOC_INT_2 7
+#define SLV_KPSS_AHB 8
+#define SLV_WCSS 9
+#define SLV_SNOC_BIMC_1 10
+#define SLV_IMEM 11
+#define SLV_SNOC_PCNOC 12
+#define SLV_QDSS_STM 13
+#define SLV_CATS_1 14
+#define SLV_LPASS 15
+
+/* SNOC-MM fabric */
+#define MAS_JPEG 0
+#define MAS_MDP 1
+#define MAS_VENUS 2
+#define MAS_VFE0 3
+#define MAS_VFE1 4
+#define MAS_CPP 5
+#define SLV_SNOC_BIMC_0 6
+#define SLV_SNOC_BIMC_2 7
+#define SLV_CATS_0 8
+
+#endif /* __DT_BINDINGS_INTERCONNECT_QCOM_MSM8937_H */
diff --git a/include/dt-bindings/interconnect/qcom,msm8976.h b/include/dt-bindings/interconnect/qcom,msm8976.h
new file mode 100644
index 000000000000..4ea90f22320e
--- /dev/null
+++ b/include/dt-bindings/interconnect/qcom,msm8976.h
@@ -0,0 +1,97 @@
+/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) */
+/*
+ * Qualcomm MSM8976 interconnect IDs
+ */
+
+#ifndef __DT_BINDINGS_INTERCONNECT_QCOM_MSM8976_H
+#define __DT_BINDINGS_INTERCONNECT_QCOM_MSM8976_H
+
+/* BIMC fabric */
+#define MAS_APPS_PROC 0
+#define MAS_SMMNOC_BIMC 1
+#define MAS_SNOC_BIMC 2
+#define MAS_TCU_0 3
+#define SLV_EBI 4
+#define SLV_BIMC_SNOC 5
+
+/* PCNOC fabric */
+#define MAS_USB_HS2 0
+#define MAS_BLSP_1 1
+#define MAS_USB_HS1 2
+#define MAS_BLSP_2 3
+#define MAS_CRYPTO 4
+#define MAS_SDCC_1 5
+#define MAS_SDCC_2 6
+#define MAS_SDCC_3 7
+#define MAS_SNOC_PCNOC 8
+#define MAS_LPASS_AHB 9
+#define MAS_SPDM 10
+#define MAS_DEHR 11
+#define MAS_XM_USB_HS1 12
+#define PCNOC_M_0 13
+#define PCNOC_M_1 14
+#define PCNOC_INT_0 15
+#define PCNOC_INT_1 16
+#define PCNOC_INT_2 17
+#define PCNOC_S_1 18
+#define PCNOC_S_2 19
+#define PCNOC_S_3 20
+#define PCNOC_S_4 21
+#define PCNOC_S_8 22
+#define PCNOC_S_9 23
+#define SLV_TCSR 24
+#define SLV_TLMM 25
+#define SLV_CRYPTO_0_CFG 26
+#define SLV_MESSAGE_RAM 27
+#define SLV_PDM 28
+#define SLV_PRNG 29
+#define SLV_PMIC_ARB 30
+#define SLV_SNOC_CFG 31
+#define SLV_DCC_CFG 32
+#define SLV_CAMERA_SS_CFG 33
+#define SLV_DISP_SS_CFG 34
+#define SLV_VENUS_CFG 35
+#define SLV_SDCC_1 36
+#define SLV_BLSP_1 37
+#define SLV_USB_HS 38
+#define SLV_SDCC_3 39
+#define SLV_SDCC_2 40
+#define SLV_GPU_CFG 41
+#define SLV_USB_HS2 42
+#define SLV_BLSP_2 43
+#define SLV_PCNOC_SNOC 44
+
+/* SNOC fabric */
+#define MAS_QDSS_BAM 0
+#define MAS_BIMC_SNOC 1
+#define MAS_PCNOC_SNOC 2
+#define MAS_QDSS_ETR 3
+#define MAS_LPASS_PROC 4
+#define MAS_IPA 5
+#define QDSS_INT 6
+#define SNOC_INT_0 7
+#define SNOC_INT_1 8
+#define SNOC_INT_2 9
+#define SLV_KPSS_AHB 10
+#define SLV_SNOC_BIMC 11
+#define SLV_IMEM 12
+#define SLV_SNOC_PCNOC 13
+#define SLV_QDSS_STM 14
+#define SLV_CATS_0 15
+#define SLV_CATS_1 16
+#define SLV_LPASS 17
+
+/* SNOC-MM fabric */
+#define MAS_JPEG 0
+#define MAS_OXILI 1
+#define MAS_MDP0 2
+#define MAS_MDP1 3
+#define MAS_VENUS_0 4
+#define MAS_VENUS_1 5
+#define MAS_VFE_0 6
+#define MAS_VFE_1 7
+#define MAS_CPP 8
+#define MM_INT_0 9
+#define SLV_SMMNOC_BIMC 10
+
+#endif /* __DT_BINDINGS_INTERCONNECT_QCOM_MSM8976_H */
diff --git a/include/dt-bindings/interconnect/qcom,sm8350.h b/include/dt-bindings/interconnect/qcom,sm8350.h
index c7f7ed315aeb..2282f93607bc 100644
--- a/include/dt-bindings/interconnect/qcom,sm8350.h
+++ b/include/dt-bindings/interconnect/qcom,sm8350.h
@@ -119,9 +119,6 @@
#define SLAVE_SERVICE_GEM_NOC_1 16
#define SLAVE_SERVICE_GEM_NOC_2 17
#define SLAVE_SERVICE_GEM_NOC 18
-#define MASTER_MNOC_HF_MEM_NOC_DISP 19
-#define MASTER_MNOC_SF_MEM_NOC_DISP 20
-#define SLAVE_LLCC_DISP 21
#define MASTER_CNOC_LPASS_AG_NOC 0
#define SLAVE_LPASS_CORE_CFG 1
@@ -133,8 +130,6 @@
#define MASTER_LLCC 0
#define SLAVE_EBI1 1
-#define MASTER_LLCC_DISP 2
-#define SLAVE_EBI1_DISP 3
#define MASTER_CAMNOC_HF 0
#define MASTER_CAMNOC_ICP 1
@@ -149,11 +144,6 @@
#define SLAVE_MNOC_HF_MEM_NOC 10
#define SLAVE_MNOC_SF_MEM_NOC 11
#define SLAVE_SERVICE_MNOC 12
-#define MASTER_MDP0_DISP 13
-#define MASTER_MDP1_DISP 14
-#define MASTER_ROTATOR_DISP 15
-#define SLAVE_MNOC_HF_MEM_NOC_DISP 16
-#define SLAVE_MNOC_SF_MEM_NOC_DISP 17
#define MASTER_CDSP_NOC_CFG 0
#define MASTER_CDSP_PROC 1
diff --git a/include/dt-bindings/pinctrl/pinctrl-cv1800b.h b/include/dt-bindings/pinctrl/pinctrl-cv1800b.h
new file mode 100644
index 000000000000..0593fc33d470
--- /dev/null
+++ b/include/dt-bindings/pinctrl/pinctrl-cv1800b.h
@@ -0,0 +1,63 @@
+/* SPDX-License-Identifier: GPL-2.0-only OR BSD-2-Clause */
+/*
+ * Copyright (C) 2024 Inochi Amaoto <inochiama@outlook.com>
+ *
+ * This file is generated from vendor pinout definition.
+ */
+
+#ifndef _DT_BINDINGS_PINCTRL_CV1800B_H
+#define _DT_BINDINGS_PINCTRL_CV1800B_H
+
+#include <dt-bindings/pinctrl/pinctrl-cv18xx.h>
+
+#define PIN_AUD_AOUTR 1
+#define PIN_SD0_CLK 3
+#define PIN_SD0_CMD 4
+#define PIN_SD0_D0 5
+#define PIN_SD0_D1 7
+#define PIN_SD0_D2 8
+#define PIN_SD0_D3 9
+#define PIN_SD0_CD 11
+#define PIN_SD0_PWR_EN 12
+#define PIN_SPK_EN 14
+#define PIN_UART0_TX 15
+#define PIN_UART0_RX 16
+#define PIN_SPINOR_HOLD_X 17
+#define PIN_SPINOR_SCK 18
+#define PIN_SPINOR_MOSI 19
+#define PIN_SPINOR_WP_X 20
+#define PIN_SPINOR_MISO 21
+#define PIN_SPINOR_CS_X 22
+#define PIN_IIC0_SCL 23
+#define PIN_IIC0_SDA 24
+#define PIN_AUX0 25
+#define PIN_PWR_VBAT_DET 30
+#define PIN_PWR_SEQ2 31
+#define PIN_XTAL_XIN 33
+#define PIN_SD1_GPIO0 35
+#define PIN_SD1_GPIO1 36
+#define PIN_SD1_D3 38
+#define PIN_SD1_D2 39
+#define PIN_SD1_D1 40
+#define PIN_SD1_D0 41
+#define PIN_SD1_CMD 42
+#define PIN_SD1_CLK 43
+#define PIN_ADC1 44
+#define PIN_USB_VBUS_DET 45
+#define PIN_ETH_TXP 47
+#define PIN_ETH_TXM 48
+#define PIN_ETH_RXP 49
+#define PIN_ETH_RXM 50
+#define PIN_MIPIRX4N 56
+#define PIN_MIPIRX4P 57
+#define PIN_MIPIRX3N 58
+#define PIN_MIPIRX3P 59
+#define PIN_MIPIRX2N 60
+#define PIN_MIPIRX2P 61
+#define PIN_MIPIRX1N 62
+#define PIN_MIPIRX1P 63
+#define PIN_MIPIRX0N 64
+#define PIN_MIPIRX0P 65
+#define PIN_AUD_AINL_MIC 67
+
+#endif /* _DT_BINDINGS_PINCTRL_CV1800B_H */
diff --git a/include/dt-bindings/pinctrl/pinctrl-cv1812h.h b/include/dt-bindings/pinctrl/pinctrl-cv1812h.h
new file mode 100644
index 000000000000..2908de347919
--- /dev/null
+++ b/include/dt-bindings/pinctrl/pinctrl-cv1812h.h
@@ -0,0 +1,127 @@
+/* SPDX-License-Identifier: GPL-2.0-only OR BSD-2-Clause */
+/*
+ * Copyright (C) 2024 Inochi Amaoto <inochiama@outlook.com>
+ *
+ * This file is generated from vendor pinout definition.
+ */
+
+#ifndef _DT_BINDINGS_PINCTRL_CV1812H_H
+#define _DT_BINDINGS_PINCTRL_CV1812H_H
+
+#include <dt-bindings/pinctrl/pinctrl-cv18xx.h>
+
+#define PINPOS(row, col) \
+ ((((row) - 'A' + 1) << 8) + ((col) - 1))
+
+#define PIN_MIPI_TXM4 PINPOS('A', 2)
+#define PIN_MIPIRX0N PINPOS('A', 4)
+#define PIN_MIPIRX3P PINPOS('A', 6)
+#define PIN_MIPIRX4P PINPOS('A', 7)
+#define PIN_VIVO_D2 PINPOS('A', 9)
+#define PIN_VIVO_D3 PINPOS('A', 10)
+#define PIN_VIVO_D10 PINPOS('A', 12)
+#define PIN_USB_VBUS_DET PINPOS('A', 13)
+#define PIN_MIPI_TXP3 PINPOS('B', 1)
+#define PIN_MIPI_TXM3 PINPOS('B', 2)
+#define PIN_MIPI_TXP4 PINPOS('B', 3)
+#define PIN_MIPIRX0P PINPOS('B', 4)
+#define PIN_MIPIRX1N PINPOS('B', 5)
+#define PIN_MIPIRX2N PINPOS('B', 6)
+#define PIN_MIPIRX4N PINPOS('B', 7)
+#define PIN_MIPIRX5N PINPOS('B', 8)
+#define PIN_VIVO_D1 PINPOS('B', 9)
+#define PIN_VIVO_D5 PINPOS('B', 10)
+#define PIN_VIVO_D7 PINPOS('B', 11)
+#define PIN_VIVO_D9 PINPOS('B', 12)
+#define PIN_USB_ID PINPOS('B', 13)
+#define PIN_ETH_RXM PINPOS('B', 15)
+#define PIN_MIPI_TXP2 PINPOS('C', 1)
+#define PIN_MIPI_TXM2 PINPOS('C', 2)
+#define PIN_CAM_PD0 PINPOS('C', 3)
+#define PIN_CAM_MCLK0 PINPOS('C', 4)
+#define PIN_MIPIRX1P PINPOS('C', 5)
+#define PIN_MIPIRX2P PINPOS('C', 6)
+#define PIN_MIPIRX3N PINPOS('C', 7)
+#define PIN_MIPIRX5P PINPOS('C', 8)
+#define PIN_VIVO_CLK PINPOS('C', 9)
+#define PIN_VIVO_D6 PINPOS('C', 10)
+#define PIN_VIVO_D8 PINPOS('C', 11)
+#define PIN_USB_VBUS_EN PINPOS('C', 12)
+#define PIN_ETH_RXP PINPOS('C', 14)
+#define PIN_GPIO_RTX PINPOS('C', 15)
+#define PIN_MIPI_TXP1 PINPOS('D', 1)
+#define PIN_MIPI_TXM1 PINPOS('D', 2)
+#define PIN_CAM_MCLK1 PINPOS('D', 3)
+#define PIN_IIC3_SCL PINPOS('D', 4)
+#define PIN_VIVO_D4 PINPOS('D', 10)
+#define PIN_ETH_TXM PINPOS('D', 14)
+#define PIN_ETH_TXP PINPOS('D', 15)
+#define PIN_MIPI_TXP0 PINPOS('E', 1)
+#define PIN_MIPI_TXM0 PINPOS('E', 2)
+#define PIN_CAM_PD1 PINPOS('E', 4)
+#define PIN_CAM_RST0 PINPOS('E', 5)
+#define PIN_VIVO_D0 PINPOS('E', 10)
+#define PIN_ADC1 PINPOS('E', 13)
+#define PIN_ADC2 PINPOS('E', 14)
+#define PIN_ADC3 PINPOS('E', 15)
+#define PIN_AUD_AOUTL PINPOS('F', 2)
+#define PIN_IIC3_SDA PINPOS('F', 4)
+#define PIN_SD1_D2 PINPOS('F', 14)
+#define PIN_AUD_AOUTR PINPOS('G', 2)
+#define PIN_SD1_D3 PINPOS('G', 13)
+#define PIN_SD1_CLK PINPOS('G', 14)
+#define PIN_SD1_CMD PINPOS('G', 15)
+#define PIN_AUD_AINL_MIC PINPOS('H', 1)
+#define PIN_RSTN PINPOS('H', 12)
+#define PIN_PWM0_BUCK PINPOS('H', 13)
+#define PIN_SD1_D1 PINPOS('H', 14)
+#define PIN_SD1_D0 PINPOS('H', 15)
+#define PIN_AUD_AINR_MIC PINPOS('J', 1)
+#define PIN_IIC2_SCL PINPOS('J', 13)
+#define PIN_IIC2_SDA PINPOS('J', 14)
+#define PIN_SD0_CD PINPOS('K', 2)
+#define PIN_SD0_D1 PINPOS('K', 3)
+#define PIN_UART2_RX PINPOS('K', 13)
+#define PIN_UART2_CTS PINPOS('K', 14)
+#define PIN_UART2_TX PINPOS('K', 15)
+#define PIN_SD0_CLK PINPOS('L', 1)
+#define PIN_SD0_D0 PINPOS('L', 2)
+#define PIN_SD0_CMD PINPOS('L', 3)
+#define PIN_CLK32K PINPOS('L', 14)
+#define PIN_UART2_RTS PINPOS('L', 15)
+#define PIN_SD0_D3 PINPOS('M', 1)
+#define PIN_SD0_D2 PINPOS('M', 2)
+#define PIN_UART0_RX PINPOS('M', 4)
+#define PIN_UART0_TX PINPOS('M', 5)
+#define PIN_JTAG_CPU_TRST PINPOS('M', 6)
+#define PIN_PWR_ON PINPOS('M', 11)
+#define PIN_PWR_GPIO2 PINPOS('M', 12)
+#define PIN_PWR_GPIO0 PINPOS('M', 13)
+#define PIN_CLK25M PINPOS('M', 14)
+#define PIN_SD0_PWR_EN PINPOS('N', 1)
+#define PIN_SPK_EN PINPOS('N', 3)
+#define PIN_JTAG_CPU_TCK PINPOS('N', 4)
+#define PIN_JTAG_CPU_TMS PINPOS('N', 6)
+#define PIN_PWR_WAKEUP1 PINPOS('N', 11)
+#define PIN_PWR_WAKEUP0 PINPOS('N', 12)
+#define PIN_PWR_GPIO1 PINPOS('N', 13)
+#define PIN_EMMC_DAT3 PINPOS('P', 1)
+#define PIN_EMMC_DAT0 PINPOS('P', 2)
+#define PIN_EMMC_DAT2 PINPOS('P', 3)
+#define PIN_EMMC_RSTN PINPOS('P', 4)
+#define PIN_AUX0 PINPOS('P', 5)
+#define PIN_IIC0_SDA PINPOS('P', 6)
+#define PIN_PWR_SEQ3 PINPOS('P', 10)
+#define PIN_PWR_VBAT_DET PINPOS('P', 11)
+#define PIN_PWR_SEQ1 PINPOS('P', 12)
+#define PIN_PWR_BUTTON1 PINPOS('P', 13)
+#define PIN_EMMC_DAT1 PINPOS('R', 2)
+#define PIN_EMMC_CMD PINPOS('R', 3)
+#define PIN_EMMC_CLK PINPOS('R', 4)
+#define PIN_IIC0_SCL PINPOS('R', 6)
+#define PIN_GPIO_ZQ PINPOS('R', 10)
+#define PIN_PWR_RSTN PINPOS('R', 11)
+#define PIN_PWR_SEQ2 PINPOS('R', 12)
+#define PIN_XTAL_XIN PINPOS('R', 13)
+
+#endif /* _DT_BINDINGS_PINCTRL_CV1812H_H */
diff --git a/include/dt-bindings/pinctrl/pinctrl-cv18xx.h b/include/dt-bindings/pinctrl/pinctrl-cv18xx.h
new file mode 100644
index 000000000000..bc92ad1067ec
--- /dev/null
+++ b/include/dt-bindings/pinctrl/pinctrl-cv18xx.h
@@ -0,0 +1,19 @@
+/* SPDX-License-Identifier: GPL-2.0-only OR BSD-2-Clause */
+/*
+ * Copyright (C) 2023 Sophgo Ltd.
+ *
+ * Author: Inochi Amaoto <inochiama@outlook.com>
+ */
+
+#ifndef _DT_BINDINGS_PINCTRL_CV18XX_H
+#define _DT_BINDINGS_PINCTRL_CV18XX_H
+
+#define PIN_MUX_INVALD 0xff
+
+#define PINMUX2(pin, mux, mux2) \
+ (((pin) & 0xffff) | (((mux) & 0xff) << 16) | (((mux2) & 0xff) << 24))
+
+#define PINMUX(pin, mux) \
+ PINMUX2(pin, mux, PIN_MUX_INVALD)
+
+#endif /* _DT_BINDINGS_PINCTRL_CV18XX_H */
diff --git a/include/dt-bindings/pinctrl/pinctrl-sg2000.h b/include/dt-bindings/pinctrl/pinctrl-sg2000.h
new file mode 100644
index 000000000000..4871f9a7c6c1
--- /dev/null
+++ b/include/dt-bindings/pinctrl/pinctrl-sg2000.h
@@ -0,0 +1,127 @@
+/* SPDX-License-Identifier: GPL-2.0-only OR BSD-2-Clause */
+/*
+ * Copyright (C) 2024 Inochi Amaoto <inochiama@outlook.com>
+ *
+ * This file is generated from vendor pinout definition.
+ */
+
+#ifndef _DT_BINDINGS_PINCTRL_SG2000_H
+#define _DT_BINDINGS_PINCTRL_SG2000_H
+
+#include <dt-bindings/pinctrl/pinctrl-cv18xx.h>
+
+#define PINPOS(row, col) \
+ ((((row) - 'A' + 1) << 8) + ((col) - 1))
+
+#define PIN_MIPI_TXM4 PINPOS('A', 2)
+#define PIN_MIPIRX0N PINPOS('A', 4)
+#define PIN_MIPIRX3P PINPOS('A', 6)
+#define PIN_MIPIRX4P PINPOS('A', 7)
+#define PIN_VIVO_D2 PINPOS('A', 9)
+#define PIN_VIVO_D3 PINPOS('A', 10)
+#define PIN_VIVO_D10 PINPOS('A', 12)
+#define PIN_USB_VBUS_DET PINPOS('A', 13)
+#define PIN_MIPI_TXP3 PINPOS('B', 1)
+#define PIN_MIPI_TXM3 PINPOS('B', 2)
+#define PIN_MIPI_TXP4 PINPOS('B', 3)
+#define PIN_MIPIRX0P PINPOS('B', 4)
+#define PIN_MIPIRX1N PINPOS('B', 5)
+#define PIN_MIPIRX2N PINPOS('B', 6)
+#define PIN_MIPIRX4N PINPOS('B', 7)
+#define PIN_MIPIRX5N PINPOS('B', 8)
+#define PIN_VIVO_D1 PINPOS('B', 9)
+#define PIN_VIVO_D5 PINPOS('B', 10)
+#define PIN_VIVO_D7 PINPOS('B', 11)
+#define PIN_VIVO_D9 PINPOS('B', 12)
+#define PIN_USB_ID PINPOS('B', 13)
+#define PIN_ETH_RXM PINPOS('B', 15)
+#define PIN_MIPI_TXP2 PINPOS('C', 1)
+#define PIN_MIPI_TXM2 PINPOS('C', 2)
+#define PIN_CAM_PD0 PINPOS('C', 3)
+#define PIN_CAM_MCLK0 PINPOS('C', 4)
+#define PIN_MIPIRX1P PINPOS('C', 5)
+#define PIN_MIPIRX2P PINPOS('C', 6)
+#define PIN_MIPIRX3N PINPOS('C', 7)
+#define PIN_MIPIRX5P PINPOS('C', 8)
+#define PIN_VIVO_CLK PINPOS('C', 9)
+#define PIN_VIVO_D6 PINPOS('C', 10)
+#define PIN_VIVO_D8 PINPOS('C', 11)
+#define PIN_USB_VBUS_EN PINPOS('C', 12)
+#define PIN_ETH_RXP PINPOS('C', 14)
+#define PIN_GPIO_RTX PINPOS('C', 15)
+#define PIN_MIPI_TXP1 PINPOS('D', 1)
+#define PIN_MIPI_TXM1 PINPOS('D', 2)
+#define PIN_CAM_MCLK1 PINPOS('D', 3)
+#define PIN_IIC3_SCL PINPOS('D', 4)
+#define PIN_VIVO_D4 PINPOS('D', 10)
+#define PIN_ETH_TXM PINPOS('D', 14)
+#define PIN_ETH_TXP PINPOS('D', 15)
+#define PIN_MIPI_TXP0 PINPOS('E', 1)
+#define PIN_MIPI_TXM0 PINPOS('E', 2)
+#define PIN_CAM_PD1 PINPOS('E', 4)
+#define PIN_CAM_RST0 PINPOS('E', 5)
+#define PIN_VIVO_D0 PINPOS('E', 10)
+#define PIN_ADC1 PINPOS('E', 13)
+#define PIN_ADC2 PINPOS('E', 14)
+#define PIN_ADC3 PINPOS('E', 15)
+#define PIN_AUD_AOUTL PINPOS('F', 2)
+#define PIN_IIC3_SDA PINPOS('F', 4)
+#define PIN_SD1_D2 PINPOS('F', 14)
+#define PIN_AUD_AOUTR PINPOS('G', 2)
+#define PIN_SD1_D3 PINPOS('G', 13)
+#define PIN_SD1_CLK PINPOS('G', 14)
+#define PIN_SD1_CMD PINPOS('G', 15)
+#define PIN_AUD_AINL_MIC PINPOS('H', 1)
+#define PIN_RSTN PINPOS('H', 12)
+#define PIN_PWM0_BUCK PINPOS('H', 13)
+#define PIN_SD1_D1 PINPOS('H', 14)
+#define PIN_SD1_D0 PINPOS('H', 15)
+#define PIN_AUD_AINR_MIC PINPOS('J', 1)
+#define PIN_IIC2_SCL PINPOS('J', 13)
+#define PIN_IIC2_SDA PINPOS('J', 14)
+#define PIN_SD0_CD PINPOS('K', 2)
+#define PIN_SD0_D1 PINPOS('K', 3)
+#define PIN_UART2_RX PINPOS('K', 13)
+#define PIN_UART2_CTS PINPOS('K', 14)
+#define PIN_UART2_TX PINPOS('K', 15)
+#define PIN_SD0_CLK PINPOS('L', 1)
+#define PIN_SD0_D0 PINPOS('L', 2)
+#define PIN_SD0_CMD PINPOS('L', 3)
+#define PIN_CLK32K PINPOS('L', 14)
+#define PIN_UART2_RTS PINPOS('L', 15)
+#define PIN_SD0_D3 PINPOS('M', 1)
+#define PIN_SD0_D2 PINPOS('M', 2)
+#define PIN_UART0_RX PINPOS('M', 4)
+#define PIN_UART0_TX PINPOS('M', 5)
+#define PIN_JTAG_CPU_TRST PINPOS('M', 6)
+#define PIN_PWR_ON PINPOS('M', 11)
+#define PIN_PWR_GPIO2 PINPOS('M', 12)
+#define PIN_PWR_GPIO0 PINPOS('M', 13)
+#define PIN_CLK25M PINPOS('M', 14)
+#define PIN_SD0_PWR_EN PINPOS('N', 1)
+#define PIN_SPK_EN PINPOS('N', 3)
+#define PIN_JTAG_CPU_TCK PINPOS('N', 4)
+#define PIN_JTAG_CPU_TMS PINPOS('N', 6)
+#define PIN_PWR_WAKEUP1 PINPOS('N', 11)
+#define PIN_PWR_WAKEUP0 PINPOS('N', 12)
+#define PIN_PWR_GPIO1 PINPOS('N', 13)
+#define PIN_EMMC_DAT3 PINPOS('P', 1)
+#define PIN_EMMC_DAT0 PINPOS('P', 2)
+#define PIN_EMMC_DAT2 PINPOS('P', 3)
+#define PIN_EMMC_RSTN PINPOS('P', 4)
+#define PIN_AUX0 PINPOS('P', 5)
+#define PIN_IIC0_SDA PINPOS('P', 6)
+#define PIN_PWR_SEQ3 PINPOS('P', 10)
+#define PIN_PWR_VBAT_DET PINPOS('P', 11)
+#define PIN_PWR_SEQ1 PINPOS('P', 12)
+#define PIN_PWR_BUTTON1 PINPOS('P', 13)
+#define PIN_EMMC_DAT1 PINPOS('R', 2)
+#define PIN_EMMC_CMD PINPOS('R', 3)
+#define PIN_EMMC_CLK PINPOS('R', 4)
+#define PIN_IIC0_SCL PINPOS('R', 6)
+#define PIN_GPIO_ZQ PINPOS('R', 10)
+#define PIN_PWR_RSTN PINPOS('R', 11)
+#define PIN_PWR_SEQ2 PINPOS('R', 12)
+#define PIN_XTAL_XIN PINPOS('R', 13)
+
+#endif /* _DT_BINDINGS_PINCTRL_SG2000_H */
diff --git a/include/dt-bindings/pinctrl/pinctrl-sg2002.h b/include/dt-bindings/pinctrl/pinctrl-sg2002.h
new file mode 100644
index 000000000000..3c36cfa0a550
--- /dev/null
+++ b/include/dt-bindings/pinctrl/pinctrl-sg2002.h
@@ -0,0 +1,79 @@
+/* SPDX-License-Identifier: GPL-2.0-only OR BSD-2-Clause */
+/*
+ * Copyright (C) 2024 Inochi Amaoto <inochiama@outlook.com>
+ *
+ * This file is generated from vendor pinout definition.
+ */
+
+#ifndef _DT_BINDINGS_PINCTRL_SG2002_H
+#define _DT_BINDINGS_PINCTRL_SG2002_H
+
+#include <dt-bindings/pinctrl/pinctrl-cv18xx.h>
+
+#define PIN_AUD_AINL_MIC 2
+#define PIN_AUD_AOUTR 4
+#define PIN_SD0_CLK 6
+#define PIN_SD0_CMD 7
+#define PIN_SD0_D0 8
+#define PIN_SD0_D1 10
+#define PIN_SD0_D2 11
+#define PIN_SD0_D3 12
+#define PIN_SD0_CD 14
+#define PIN_SD0_PWR_EN 15
+#define PIN_SPK_EN 17
+#define PIN_UART0_TX 18
+#define PIN_UART0_RX 19
+#define PIN_EMMC_DAT2 20
+#define PIN_EMMC_CLK 21
+#define PIN_EMMC_DAT0 22
+#define PIN_EMMC_DAT3 23
+#define PIN_EMMC_CMD 24
+#define PIN_EMMC_DAT1 25
+#define PIN_JTAG_CPU_TMS 26
+#define PIN_JTAG_CPU_TCK 27
+#define PIN_IIC0_SCL 28
+#define PIN_IIC0_SDA 29
+#define PIN_AUX0 30
+#define PIN_GPIO_ZQ 35
+#define PIN_PWR_VBAT_DET 38
+#define PIN_PWR_RSTN 39
+#define PIN_PWR_SEQ1 40
+#define PIN_PWR_SEQ2 41
+#define PIN_PWR_WAKEUP0 43
+#define PIN_PWR_BUTTON1 44
+#define PIN_XTAL_XIN 45
+#define PIN_PWR_GPIO0 47
+#define PIN_PWR_GPIO1 48
+#define PIN_PWR_GPIO2 49
+#define PIN_SD1_D3 51
+#define PIN_SD1_D2 52
+#define PIN_SD1_D1 53
+#define PIN_SD1_D0 54
+#define PIN_SD1_CMD 55
+#define PIN_SD1_CLK 56
+#define PIN_PWM0_BUCK 58
+#define PIN_ADC1 59
+#define PIN_USB_VBUS_DET 60
+#define PIN_ETH_TXP 62
+#define PIN_ETH_TXM 63
+#define PIN_ETH_RXP 64
+#define PIN_ETH_RXM 65
+#define PIN_GPIO_RTX 67
+#define PIN_MIPIRX4N 72
+#define PIN_MIPIRX4P 73
+#define PIN_MIPIRX3N 74
+#define PIN_MIPIRX3P 75
+#define PIN_MIPIRX2N 76
+#define PIN_MIPIRX2P 77
+#define PIN_MIPIRX1N 78
+#define PIN_MIPIRX1P 79
+#define PIN_MIPIRX0N 80
+#define PIN_MIPIRX0P 81
+#define PIN_MIPI_TXM2 83
+#define PIN_MIPI_TXP2 84
+#define PIN_MIPI_TXM1 85
+#define PIN_MIPI_TXP1 86
+#define PIN_MIPI_TXM0 87
+#define PIN_MIPI_TXP0 88
+
+#endif /* _DT_BINDINGS_PINCTRL_SG2002_H */
diff --git a/include/dt-bindings/reset/rockchip,rk3576-cru.h b/include/dt-bindings/reset/rockchip,rk3576-cru.h
new file mode 100644
index 000000000000..ae856906f3a3
--- /dev/null
+++ b/include/dt-bindings/reset/rockchip,rk3576-cru.h
@@ -0,0 +1,564 @@
+/* SPDX-License-Identifier: (GPL-2.0 OR MIT) */
+/*
+ * Copyright (c) 2023 Rockchip Electronics Co. Ltd.
+ * Copyright (c) 2024 Collabora Ltd.
+ *
+ * Author: Elaine Zhang <zhangqing@rock-chips.com>
+ * Author: Detlev Casanova <detlev.casanova@collabora.com>
+ */
+
+#ifndef _DT_BINDINGS_RESET_ROCKCHIP_RK3576_H
+#define _DT_BINDINGS_RESET_ROCKCHIP_RK3576_H
+
+#define SRST_A_TOP_BIU 0
+#define SRST_P_TOP_BIU 1
+#define SRST_A_TOP_MID_BIU 2
+#define SRST_A_SECURE_HIGH_BIU 3
+#define SRST_H_TOP_BIU 4
+
+#define SRST_H_VO0VOP_CHANNEL_BIU 5
+#define SRST_A_VO0VOP_CHANNEL_BIU 6
+
+#define SRST_BISRINTF 7
+
+#define SRST_H_AUDIO_BIU 8
+#define SRST_H_ASRC_2CH_0 9
+#define SRST_H_ASRC_2CH_1 10
+#define SRST_H_ASRC_4CH_0 11
+#define SRST_H_ASRC_4CH_1 12
+#define SRST_ASRC_2CH_0 13
+#define SRST_ASRC_2CH_1 14
+#define SRST_ASRC_4CH_0 15
+#define SRST_ASRC_4CH_1 16
+#define SRST_M_SAI0_8CH 17
+#define SRST_H_SAI0_8CH 18
+#define SRST_H_SPDIF_RX0 19
+#define SRST_M_SPDIF_RX0 20
+
+#define SRST_H_SPDIF_RX1 21
+#define SRST_M_SPDIF_RX1 22
+#define SRST_M_SAI1_8CH 23
+#define SRST_H_SAI1_8CH 24
+#define SRST_M_SAI2_2CH 25
+#define SRST_H_SAI2_2CH 26
+#define SRST_M_SAI3_2CH 27
+#define SRST_H_SAI3_2CH 28
+
+#define SRST_M_SAI4_2CH 29
+#define SRST_H_SAI4_2CH 30
+#define SRST_H_ACDCDIG_DSM 31
+#define SRST_M_ACDCDIG_DSM 32
+#define SRST_PDM1 33
+#define SRST_H_PDM1 34
+#define SRST_M_PDM1 35
+#define SRST_H_SPDIF_TX0 36
+#define SRST_M_SPDIF_TX0 37
+#define SRST_H_SPDIF_TX1 38
+#define SRST_M_SPDIF_TX1 39
+
+#define SRST_A_BUS_BIU 40
+#define SRST_P_BUS_BIU 41
+#define SRST_P_CRU 42
+#define SRST_H_CAN0 43
+#define SRST_CAN0 44
+#define SRST_H_CAN1 45
+#define SRST_CAN1 46
+#define SRST_P_INTMUX2BUS 47
+#define SRST_P_VCCIO_IOC 48
+#define SRST_H_BUS_BIU 49
+#define SRST_KEY_SHIFT 50
+
+#define SRST_P_I2C1 51
+#define SRST_P_I2C2 52
+#define SRST_P_I2C3 53
+#define SRST_P_I2C4 54
+#define SRST_P_I2C5 55
+#define SRST_P_I2C6 56
+#define SRST_P_I2C7 57
+#define SRST_P_I2C8 58
+#define SRST_P_I2C9 59
+#define SRST_P_WDT_BUSMCU 60
+#define SRST_T_WDT_BUSMCU 61
+#define SRST_A_GIC 62
+#define SRST_I2C1 63
+#define SRST_I2C2 64
+#define SRST_I2C3 65
+#define SRST_I2C4 66
+
+#define SRST_I2C5 67
+#define SRST_I2C6 68
+#define SRST_I2C7 69
+#define SRST_I2C8 70
+#define SRST_I2C9 71
+#define SRST_P_SARADC 72
+#define SRST_SARADC 73
+#define SRST_P_TSADC 74
+#define SRST_TSADC 75
+#define SRST_P_UART0 76
+#define SRST_P_UART2 77
+#define SRST_P_UART3 78
+#define SRST_P_UART4 79
+#define SRST_P_UART5 80
+#define SRST_P_UART6 81
+
+#define SRST_P_UART7 82
+#define SRST_P_UART8 83
+#define SRST_P_UART9 84
+#define SRST_P_UART10 85
+#define SRST_P_UART11 86
+#define SRST_S_UART0 87
+#define SRST_S_UART2 88
+#define SRST_S_UART3 89
+#define SRST_S_UART4 90
+#define SRST_S_UART5 91
+
+#define SRST_S_UART6 92
+#define SRST_S_UART7 93
+#define SRST_S_UART8 94
+#define SRST_S_UART9 95
+#define SRST_S_UART10 96
+#define SRST_S_UART11 97
+#define SRST_P_SPI0 98
+#define SRST_P_SPI1 99
+#define SRST_P_SPI2 100
+
+#define SRST_P_SPI3 101
+#define SRST_P_SPI4 102
+#define SRST_SPI0 103
+#define SRST_SPI1 104
+#define SRST_SPI2 105
+#define SRST_SPI3 106
+#define SRST_SPI4 107
+#define SRST_P_WDT0 108
+#define SRST_T_WDT0 109
+#define SRST_P_SYS_GRF 110
+#define SRST_P_PWM1 111
+#define SRST_PWM1 112
+
+#define SRST_P_BUSTIMER0 113
+#define SRST_P_BUSTIMER1 114
+#define SRST_TIMER0 115
+#define SRST_TIMER1 116
+#define SRST_TIMER2 117
+#define SRST_TIMER3 118
+#define SRST_TIMER4 119
+#define SRST_TIMER5 120
+#define SRST_P_BUSIOC 121
+#define SRST_P_MAILBOX0 122
+#define SRST_P_GPIO1 123
+
+#define SRST_GPIO1 124
+#define SRST_P_GPIO2 125
+#define SRST_GPIO2 126
+#define SRST_P_GPIO3 127
+#define SRST_GPIO3 128
+#define SRST_P_GPIO4 129
+#define SRST_GPIO4 130
+#define SRST_A_DECOM 131
+#define SRST_P_DECOM 132
+#define SRST_D_DECOM 133
+#define SRST_TIMER6 134
+#define SRST_TIMER7 135
+#define SRST_TIMER8 136
+#define SRST_TIMER9 137
+#define SRST_TIMER10 138
+
+#define SRST_TIMER11 139
+#define SRST_A_DMAC0 140
+#define SRST_A_DMAC1 141
+#define SRST_A_DMAC2 142
+#define SRST_A_SPINLOCK 143
+#define SRST_REF_PVTPLL_BUS 144
+#define SRST_H_I3C0 145
+#define SRST_H_I3C1 146
+#define SRST_H_BUS_CM0_BIU 147
+#define SRST_F_BUS_CM0_CORE 148
+#define SRST_T_BUS_CM0_JTAG 149
+
+#define SRST_P_INTMUX2PMU 150
+#define SRST_P_INTMUX2DDR 151
+#define SRST_P_PVTPLL_BUS 152
+#define SRST_P_PWM2 153
+#define SRST_PWM2 154
+#define SRST_FREQ_PWM1 155
+#define SRST_COUNTER_PWM1 156
+#define SRST_I3C0 157
+#define SRST_I3C1 158
+
+#define SRST_P_DDR_MON_CH0 159
+#define SRST_P_DDR_BIU 160
+#define SRST_P_DDR_UPCTL_CH0 161
+#define SRST_TM_DDR_MON_CH0 162
+#define SRST_A_DDR_BIU 163
+#define SRST_DFI_CH0 164
+#define SRST_DDR_MON_CH0 165
+#define SRST_P_DDR_HWLP_CH0 166
+#define SRST_P_DDR_MON_CH1 167
+#define SRST_P_DDR_HWLP_CH1 168
+
+#define SRST_P_DDR_UPCTL_CH1 169
+#define SRST_TM_DDR_MON_CH1 170
+#define SRST_DFI_CH1 171
+#define SRST_A_DDR01_MSCH0 172
+#define SRST_A_DDR01_MSCH1 173
+#define SRST_DDR_MON_CH1 174
+#define SRST_DDR_SCRAMBLE_CH0 175
+#define SRST_DDR_SCRAMBLE_CH1 176
+#define SRST_P_AHB2APB 177
+#define SRST_H_AHB2APB 178
+#define SRST_H_DDR_BIU 179
+#define SRST_F_DDR_CM0_CORE 180
+
+#define SRST_P_DDR01_MSCH0 181
+#define SRST_P_DDR01_MSCH1 182
+#define SRST_DDR_TIMER0 183
+#define SRST_DDR_TIMER1 184
+#define SRST_T_WDT_DDR 185
+#define SRST_P_WDT 186
+#define SRST_P_TIMER 187
+#define SRST_T_DDR_CM0_JTAG 188
+#define SRST_P_DDR_GRF 189
+
+#define SRST_DDR_UPCTL_CH0 190
+#define SRST_A_DDR_UPCTL_0_CH0 191
+#define SRST_A_DDR_UPCTL_1_CH0 192
+#define SRST_A_DDR_UPCTL_2_CH0 193
+#define SRST_A_DDR_UPCTL_3_CH0 194
+#define SRST_A_DDR_UPCTL_4_CH0 195
+
+#define SRST_DDR_UPCTL_CH1 196
+#define SRST_A_DDR_UPCTL_0_CH1 197
+#define SRST_A_DDR_UPCTL_1_CH1 198
+#define SRST_A_DDR_UPCTL_2_CH1 199
+#define SRST_A_DDR_UPCTL_3_CH1 200
+#define SRST_A_DDR_UPCTL_4_CH1 201
+
+#define SRST_REF_PVTPLL_DDR 202
+#define SRST_P_PVTPLL_DDR 203
+
+#define SRST_A_RKNN0 204
+#define SRST_A_RKNN0_BIU 205
+#define SRST_L_RKNN0_BIU 206
+
+#define SRST_A_RKNN1 207
+#define SRST_A_RKNN1_BIU 208
+#define SRST_L_RKNN1_BIU 209
+
+#define SRST_NPU_DAP 210
+#define SRST_L_NPUSUBSYS_BIU 211
+#define SRST_P_NPUTOP_BIU 212
+#define SRST_P_NPU_TIMER 213
+#define SRST_NPUTIMER0 214
+#define SRST_NPUTIMER1 215
+#define SRST_P_NPU_WDT 216
+#define SRST_T_NPU_WDT 217
+
+#define SRST_A_RKNN_CBUF 218
+#define SRST_A_RVCORE0 219
+#define SRST_P_NPU_GRF 220
+#define SRST_P_PVTPLL_NPU 221
+#define SRST_NPU_PVTPLL 222
+#define SRST_H_NPU_CM0_BIU 223
+#define SRST_F_NPU_CM0_CORE 224
+#define SRST_T_NPU_CM0_JTAG 225
+#define SRST_A_RKNNTOP_BIU 226
+#define SRST_H_RKNN_CBUF 227
+#define SRST_H_RKNNTOP_BIU 228
+
+#define SRST_H_NVM_BIU 229
+#define SRST_A_NVM_BIU 230
+#define SRST_S_FSPI 231
+#define SRST_H_FSPI 232
+#define SRST_C_EMMC 233
+#define SRST_H_EMMC 234
+#define SRST_A_EMMC 235
+#define SRST_B_EMMC 236
+#define SRST_T_EMMC 237
+
+#define SRST_P_GRF 238
+#define SRST_P_PHP_BIU 239
+#define SRST_A_PHP_BIU 240
+#define SRST_P_PCIE0 241
+#define SRST_PCIE0_POWER_UP 242
+
+#define SRST_A_USB3OTG1 243
+#define SRST_A_MMU0 244
+#define SRST_A_SLV_MMU0 245
+#define SRST_A_MMU1 246
+
+#define SRST_A_SLV_MMU1 247
+#define SRST_P_PCIE1 248
+#define SRST_PCIE1_POWER_UP 249
+
+#define SRST_RXOOB0 250
+#define SRST_RXOOB1 251
+#define SRST_PMALIVE0 252
+#define SRST_PMALIVE1 253
+#define SRST_A_SATA0 254
+#define SRST_A_SATA1 255
+#define SRST_ASIC1 256
+#define SRST_ASIC0 257
+
+#define SRST_P_CSIDPHY1 258
+#define SRST_SCAN_CSIDPHY1 259
+
+#define SRST_P_SDGMAC_GRF 260
+#define SRST_P_SDGMAC_BIU 261
+#define SRST_A_SDGMAC_BIU 262
+#define SRST_H_SDGMAC_BIU 263
+#define SRST_A_GMAC0 264
+#define SRST_A_GMAC1 265
+#define SRST_P_GMAC0 266
+#define SRST_P_GMAC1 267
+#define SRST_H_SDIO 268
+
+#define SRST_H_SDMMC0 269
+#define SRST_S_FSPI1 270
+#define SRST_H_FSPI1 271
+#define SRST_A_DSMC_BIU 272
+#define SRST_A_DSMC 273
+#define SRST_P_DSMC 274
+#define SRST_H_HSGPIO 275
+#define SRST_HSGPIO 276
+#define SRST_A_HSGPIO 277
+
+#define SRST_H_RKVDEC 278
+#define SRST_H_RKVDEC_BIU 279
+#define SRST_A_RKVDEC_BIU 280
+#define SRST_RKVDEC_HEVC_CA 281
+#define SRST_RKVDEC_CORE 282
+
+#define SRST_A_USB_BIU 283
+#define SRST_P_USBUFS_BIU 284
+#define SRST_A_USB3OTG0 285
+#define SRST_A_UFS_BIU 286
+#define SRST_A_MMU2 287
+#define SRST_A_SLV_MMU2 288
+#define SRST_A_UFS_SYS 289
+
+#define SRST_A_UFS 290
+#define SRST_P_USBUFS_GRF 291
+#define SRST_P_UFS_GRF 292
+
+#define SRST_H_VPU_BIU 293
+#define SRST_A_JPEG_BIU 294
+#define SRST_A_RGA_BIU 295
+#define SRST_A_VDPP_BIU 296
+#define SRST_A_EBC_BIU 297
+#define SRST_H_RGA2E_0 298
+#define SRST_A_RGA2E_0 299
+#define SRST_CORE_RGA2E_0 300
+
+#define SRST_A_JPEG 301
+#define SRST_H_JPEG 302
+#define SRST_H_VDPP 303
+#define SRST_A_VDPP 304
+#define SRST_CORE_VDPP 305
+#define SRST_H_RGA2E_1 306
+#define SRST_A_RGA2E_1 307
+#define SRST_CORE_RGA2E_1 308
+#define SRST_H_EBC 309
+#define SRST_A_EBC 310
+#define SRST_D_EBC 311
+
+#define SRST_H_VEPU0_BIU 312
+#define SRST_A_VEPU0_BIU 313
+#define SRST_H_VEPU0 314
+#define SRST_A_VEPU0 315
+#define SRST_VEPU0_CORE 316
+
+#define SRST_A_VI_BIU 317
+#define SRST_H_VI_BIU 318
+#define SRST_P_VI_BIU 319
+#define SRST_D_VICAP 320
+#define SRST_A_VICAP 321
+#define SRST_H_VICAP 322
+#define SRST_ISP0 323
+#define SRST_ISP0_VICAP 324
+
+#define SRST_CORE_VPSS 325
+#define SRST_P_CSI_HOST_0 326
+#define SRST_P_CSI_HOST_1 327
+#define SRST_P_CSI_HOST_2 328
+#define SRST_P_CSI_HOST_3 329
+#define SRST_P_CSI_HOST_4 330
+
+#define SRST_CIFIN 331
+#define SRST_VICAP_I0CLK 332
+#define SRST_VICAP_I1CLK 333
+#define SRST_VICAP_I2CLK 334
+#define SRST_VICAP_I3CLK 335
+#define SRST_VICAP_I4CLK 336
+
+#define SRST_A_VOP_BIU 337
+#define SRST_A_VOP2_BIU 338
+#define SRST_H_VOP_BIU 339
+#define SRST_P_VOP_BIU 340
+#define SRST_H_VOP 341
+#define SRST_A_VOP 342
+#define SRST_D_VP0 343
+
+#define SRST_D_VP1 344
+#define SRST_D_VP2 345
+#define SRST_P_VOP2_BIU 346
+#define SRST_P_VOPGRF 347
+
+#define SRST_H_VO0_BIU 348
+#define SRST_P_VO0_BIU 349
+#define SRST_A_HDCP0_BIU 350
+#define SRST_P_VO0_GRF 351
+#define SRST_A_HDCP0 352
+#define SRST_H_HDCP0 353
+#define SRST_HDCP0 354
+
+#define SRST_P_DSIHOST0 355
+#define SRST_DSIHOST0 356
+#define SRST_P_HDMITX0 357
+#define SRST_HDMITX0_REF 358
+#define SRST_P_EDP0 359
+#define SRST_EDP0_24M 360
+
+#define SRST_M_SAI5_8CH 361
+#define SRST_H_SAI5_8CH 362
+#define SRST_M_SAI6_8CH 363
+#define SRST_H_SAI6_8CH 364
+#define SRST_H_SPDIF_TX2 365
+#define SRST_M_SPDIF_TX2 366
+#define SRST_H_SPDIF_RX2 367
+#define SRST_M_SPDIF_RX2 368
+
+#define SRST_H_SAI8_8CH 369
+#define SRST_M_SAI8_8CH 370
+
+#define SRST_H_VO1_BIU 371
+#define SRST_P_VO1_BIU 372
+#define SRST_M_SAI7_8CH 373
+#define SRST_H_SAI7_8CH 374
+#define SRST_H_SPDIF_TX3 375
+#define SRST_H_SPDIF_TX4 376
+#define SRST_H_SPDIF_TX5 377
+#define SRST_M_SPDIF_TX3 378
+
+#define SRST_DP0 379
+#define SRST_P_VO1_GRF 380
+#define SRST_A_HDCP1_BIU 381
+#define SRST_A_HDCP1 382
+#define SRST_H_HDCP1 383
+#define SRST_HDCP1 384
+#define SRST_H_SAI9_8CH 385
+#define SRST_M_SAI9_8CH 386
+#define SRST_M_SPDIF_TX4 387
+#define SRST_M_SPDIF_TX5 388
+
+#define SRST_GPU 389
+#define SRST_A_S_GPU_BIU 390
+#define SRST_A_M0_GPU_BIU 391
+#define SRST_P_GPU_BIU 392
+#define SRST_P_GPU_GRF 393
+#define SRST_GPU_PVTPLL 394
+#define SRST_P_PVTPLL_GPU 395
+
+#define SRST_A_CENTER_BIU 396
+#define SRST_A_DMA2DDR 397
+#define SRST_A_DDR_SHAREMEM 398
+#define SRST_A_DDR_SHAREMEM_BIU 399
+#define SRST_H_CENTER_BIU 400
+#define SRST_P_CENTER_GRF 401
+#define SRST_P_DMA2DDR 402
+#define SRST_P_SHAREMEM 403
+#define SRST_P_CENTER_BIU 404
+
+#define SRST_LINKSYM_HDMITXPHY0 405
+
+#define SRST_DP0_PIXELCLK 406
+#define SRST_PHY_DP0_TX 407
+#define SRST_DP1_PIXELCLK 408
+#define SRST_DP2_PIXELCLK 409
+
+#define SRST_H_VEPU1_BIU 410
+#define SRST_A_VEPU1_BIU 411
+#define SRST_H_VEPU1 412
+#define SRST_A_VEPU1 413
+#define SRST_VEPU1_CORE 414
+
+#define SRST_P_PHPPHY_CRU 415
+#define SRST_P_APB2ASB_SLV_CHIP_TOP 416
+#define SRST_P_PCIE2_COMBOPHY0 417
+#define SRST_P_PCIE2_COMBOPHY0_GRF 418
+#define SRST_P_PCIE2_COMBOPHY1 419
+#define SRST_P_PCIE2_COMBOPHY1_GRF 420
+
+#define SRST_PCIE0_PIPE_PHY 421
+#define SRST_PCIE1_PIPE_PHY 422
+
+#define SRST_H_CRYPTO_NS 423
+#define SRST_H_TRNG_NS 424
+#define SRST_P_OTPC_NS 425
+#define SRST_OTPC_NS 426
+
+#define SRST_P_HDPTX_GRF 427
+#define SRST_P_HDPTX_APB 428
+#define SRST_P_MIPI_DCPHY 429
+#define SRST_P_DCPHY_GRF 430
+#define SRST_P_BOT0_APB2ASB 431
+#define SRST_P_BOT1_APB2ASB 432
+#define SRST_USB2DEBUG 433
+#define SRST_P_CSIPHY_GRF 434
+#define SRST_P_CSIPHY 435
+#define SRST_P_USBPHY_GRF_0 436
+#define SRST_P_USBPHY_GRF_1 437
+#define SRST_P_USBDP_GRF 438
+#define SRST_P_USBDPPHY 439
+#define SRST_USBDP_COMBO_PHY_INIT 440
+
+#define SRST_USBDP_COMBO_PHY_CMN 441
+#define SRST_USBDP_COMBO_PHY_LANE 442
+#define SRST_USBDP_COMBO_PHY_PCS 443
+#define SRST_M_MIPI_DCPHY 444
+#define SRST_S_MIPI_DCPHY 445
+#define SRST_SCAN_CSIPHY 446
+#define SRST_P_VCCIO6_IOC 447
+#define SRST_OTGPHY_0 448
+#define SRST_OTGPHY_1 449
+#define SRST_HDPTX_INIT 450
+#define SRST_HDPTX_CMN 451
+#define SRST_HDPTX_LANE 452
+#define SRST_HDMITXHDP 453
+
+#define SRST_MPHY_INIT 454
+#define SRST_P_MPHY_GRF 455
+#define SRST_P_VCCIO7_IOC 456
+
+#define SRST_H_PMU1_BIU 457
+#define SRST_P_PMU1_NIU 458
+#define SRST_H_PMU_CM0_BIU 459
+#define SRST_PMU_CM0_CORE 460
+#define SRST_PMU_CM0_JTAG 461
+
+#define SRST_P_CRU_PMU1 462
+#define SRST_P_PMU1_GRF 463
+#define SRST_P_PMU1_IOC 464
+#define SRST_P_PMU1WDT 465
+#define SRST_T_PMU1WDT 466
+#define SRST_P_PMUTIMER 467
+#define SRST_PMUTIMER0 468
+#define SRST_PMUTIMER1 469
+#define SRST_P_PMU1PWM 470
+#define SRST_PMU1PWM 471
+
+#define SRST_P_I2C0 472
+#define SRST_I2C0 473
+#define SRST_S_UART1 474
+#define SRST_P_UART1 475
+#define SRST_PDM0 476
+#define SRST_H_PDM0 477
+
+#define SRST_M_PDM0 478
+#define SRST_H_VAD 479
+
+#define SRST_P_PMU0GRF 480
+#define SRST_P_PMU0IOC 481
+#define SRST_P_GPIO0 482
+#define SRST_DB_GPIO0 483
+
+#endif
diff --git a/include/keys/dns_resolver-type.h b/include/keys/dns_resolver-type.h
index 218ca22fb056..1b89088a2837 100644
--- a/include/keys/dns_resolver-type.h
+++ b/include/keys/dns_resolver-type.h
@@ -12,8 +12,4 @@
extern struct key_type key_type_dns_resolver;
-extern int request_dns_resolver_key(const char *description,
- const char *callout_info,
- char **data);
-
#endif /* _KEYS_DNS_RESOLVER_TYPE_H */
diff --git a/include/kunit/clk.h b/include/kunit/clk.h
new file mode 100644
index 000000000000..73bc99cefe7b
--- /dev/null
+++ b/include/kunit/clk.h
@@ -0,0 +1,28 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _CLK_KUNIT_H
+#define _CLK_KUNIT_H
+
+struct clk;
+struct clk_hw;
+struct device;
+struct device_node;
+struct kunit;
+
+struct clk *
+clk_get_kunit(struct kunit *test, struct device *dev, const char *con_id);
+struct clk *
+of_clk_get_kunit(struct kunit *test, struct device_node *np, int index);
+
+struct clk *
+clk_hw_get_clk_kunit(struct kunit *test, struct clk_hw *hw, const char *con_id);
+struct clk *
+clk_hw_get_clk_prepared_enabled_kunit(struct kunit *test, struct clk_hw *hw,
+ const char *con_id);
+
+int clk_prepare_enable_kunit(struct kunit *test, struct clk *clk);
+
+int clk_hw_register_kunit(struct kunit *test, struct device *dev, struct clk_hw *hw);
+int of_clk_hw_register_kunit(struct kunit *test, struct device_node *node,
+ struct clk_hw *hw);
+
+#endif
diff --git a/include/kunit/of.h b/include/kunit/of.h
new file mode 100644
index 000000000000..48d4e70c9666
--- /dev/null
+++ b/include/kunit/of.h
@@ -0,0 +1,115 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _KUNIT_OF_H
+#define _KUNIT_OF_H
+
+#include <kunit/test.h>
+
+struct device_node;
+
+#ifdef CONFIG_OF
+
+void of_node_put_kunit(struct kunit *test, struct device_node *node);
+
+#else
+
+static inline
+void of_node_put_kunit(struct kunit *test, struct device_node *node)
+{
+ kunit_skip(test, "requires CONFIG_OF");
+}
+
+#endif /* !CONFIG_OF */
+
+#if defined(CONFIG_OF) && defined(CONFIG_OF_OVERLAY) && defined(CONFIG_OF_EARLY_FLATTREE)
+
+int of_overlay_fdt_apply_kunit(struct kunit *test, void *overlay_fdt,
+ u32 overlay_fdt_size, int *ovcs_id);
+#else
+
+static inline int
+of_overlay_fdt_apply_kunit(struct kunit *test, void *overlay_fdt,
+ u32 overlay_fdt_size, int *ovcs_id)
+{
+ kunit_skip(test, "requires CONFIG_OF and CONFIG_OF_OVERLAY and CONFIG_OF_EARLY_FLATTREE for root node");
+ return -EINVAL;
+}
+
+#endif
+
+/**
+ * __of_overlay_apply_kunit() - Test managed of_overlay_fdt_apply() variant
+ * @test: test context
+ * @overlay_begin: start address of overlay to apply
+ * @overlay_end: end address of overlay to apply
+ *
+ * This is mostly internal API. See of_overlay_apply_kunit() for the wrapper
+ * that makes this easier to use.
+ *
+ * Similar to of_overlay_fdt_apply(), except the overlay is managed by the test
+ * case and is automatically removed with of_overlay_remove() after the test
+ * case concludes.
+ *
+ * Return: 0 on success, negative errno on failure
+ */
+static inline int __of_overlay_apply_kunit(struct kunit *test,
+ u8 *overlay_begin,
+ const u8 *overlay_end)
+{
+ int unused;
+
+ return of_overlay_fdt_apply_kunit(test, overlay_begin,
+ overlay_end - overlay_begin,
+ &unused);
+}
+
+/**
+ * of_overlay_apply_kunit() - Test managed of_overlay_fdt_apply() for built-in overlays
+ * @test: test context
+ * @overlay_name: name of overlay to apply
+ *
+ * This macro is used to apply a device tree overlay built with the
+ * cmd_dt_S_dtbo rule in scripts/Makefile.lib that has been compiled into the
+ * kernel image or KUnit test module. The overlay is automatically removed when
+ * the test is finished.
+ *
+ * Unit tests that need device tree nodes should compile an overlay file with
+ * @overlay_name\.dtbo.o in their Makefile along with their unit test and then
+ * load the overlay during their test. The @overlay_name matches the filename
+ * of the overlay without the dtbo filename extension. If CONFIG_OF_OVERLAY is
+ * not enabled, the @test will be skipped.
+ *
+ * In the Makefile
+ *
+ * .. code-block:: none
+ *
+ * obj-$(CONFIG_OF_OVERLAY_KUNIT_TEST) += overlay_test.o kunit_overlay_test.dtbo.o
+ *
+ * In the test
+ *
+ * .. code-block:: c
+ *
+ * static void of_overlay_kunit_of_overlay_apply(struct kunit *test)
+ * {
+ * struct device_node *np;
+ *
+ * KUNIT_ASSERT_EQ(test, 0,
+ * of_overlay_apply_kunit(test, kunit_overlay_test));
+ *
+ * np = of_find_node_by_name(NULL, "test-kunit");
+ * KUNIT_EXPECT_NOT_ERR_OR_NULL(test, np);
+ * of_node_put(np);
+ * }
+ *
+ * Return: 0 on success, negative errno on failure.
+ */
+#define of_overlay_apply_kunit(test, overlay_name) \
+({ \
+ extern uint8_t __dtbo_##overlay_name##_begin[]; \
+ extern uint8_t __dtbo_##overlay_name##_end[]; \
+ \
+ __of_overlay_apply_kunit((test), \
+ __dtbo_##overlay_name##_begin, \
+ __dtbo_##overlay_name##_end); \
+})
+
+#endif
diff --git a/include/kunit/platform_device.h b/include/kunit/platform_device.h
new file mode 100644
index 000000000000..0fc0999d2420
--- /dev/null
+++ b/include/kunit/platform_device.h
@@ -0,0 +1,20 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _KUNIT_PLATFORM_DRIVER_H
+#define _KUNIT_PLATFORM_DRIVER_H
+
+struct kunit;
+struct platform_device;
+struct platform_driver;
+
+struct platform_device *
+kunit_platform_device_alloc(struct kunit *test, const char *name, int id);
+int kunit_platform_device_add(struct kunit *test, struct platform_device *pdev);
+
+int kunit_platform_device_prepare_wait_for_probe(struct kunit *test,
+ struct platform_device *pdev,
+ struct completion *x);
+
+int kunit_platform_driver_register(struct kunit *test,
+ struct platform_driver *drv);
+
+#endif
diff --git a/include/linux/acpi.h b/include/linux/acpi.h
index 1655c4c23a78..4d5ee84c468b 100644
--- a/include/linux/acpi.h
+++ b/include/linux/acpi.h
@@ -363,6 +363,7 @@ void acpi_unregister_gsi (u32 gsi);
struct pci_dev;
+struct acpi_prt_entry *acpi_pci_irq_lookup(struct pci_dev *dev, int pin);
int acpi_pci_irq_enable (struct pci_dev *dev);
void acpi_penalize_isa_irq(int irq, int active);
bool acpi_isa_irq_available(int irq);
diff --git a/include/linux/alloc_tag.h b/include/linux/alloc_tag.h
index 8c61ccd161ba..1f0a9ff23a2c 100644
--- a/include/linux/alloc_tag.h
+++ b/include/linux/alloc_tag.h
@@ -70,7 +70,7 @@ static inline struct alloc_tag *ct_to_alloc_tag(struct codetag *ct)
/*
* When percpu variables are required to be defined as weak, static percpu
* variables can't be used inside a function (see comments for DECLARE_PER_CPU_SECTION).
- * Instead we will accound all module allocations to a single counter.
+ * Instead we will account all module allocations to a single counter.
*/
DECLARE_PER_CPU(struct alloc_tag_counters, _shared_alloc_tag);
@@ -137,7 +137,16 @@ static inline void alloc_tag_sub_check(union codetag_ref *ref) {}
/* Caller should verify both ref and tag to be valid */
static inline void __alloc_tag_ref_set(union codetag_ref *ref, struct alloc_tag *tag)
{
+ alloc_tag_add_check(ref, tag);
+ if (!ref || !tag)
+ return;
+
ref->ct = &tag->ct;
+}
+
+static inline void alloc_tag_ref_set(union codetag_ref *ref, struct alloc_tag *tag)
+{
+ __alloc_tag_ref_set(ref, tag);
/*
* We need in increment the call counter every time we have a new
* allocation or when we split a large allocation into smaller ones.
@@ -147,22 +156,9 @@ static inline void __alloc_tag_ref_set(union codetag_ref *ref, struct alloc_tag
this_cpu_inc(tag->counters->calls);
}
-static inline void alloc_tag_ref_set(union codetag_ref *ref, struct alloc_tag *tag)
-{
- alloc_tag_add_check(ref, tag);
- if (!ref || !tag)
- return;
-
- __alloc_tag_ref_set(ref, tag);
-}
-
static inline void alloc_tag_add(union codetag_ref *ref, struct alloc_tag *tag, size_t bytes)
{
- alloc_tag_add_check(ref, tag);
- if (!ref || !tag)
- return;
-
- __alloc_tag_ref_set(ref, tag);
+ alloc_tag_ref_set(ref, tag);
this_cpu_add(tag->counters->bytes, bytes);
}
diff --git a/include/linux/attribute_container.h b/include/linux/attribute_container.h
index e4004d1e6725..b3643de9931d 100644
--- a/include/linux/attribute_container.h
+++ b/include/linux/attribute_container.h
@@ -61,14 +61,8 @@ int attribute_container_device_trigger_safe(struct device *dev,
int (*undo)(struct attribute_container *,
struct device *,
struct device *));
-void attribute_container_trigger(struct device *dev,
- int (*fn)(struct attribute_container *,
- struct device *));
int attribute_container_add_attrs(struct device *classdev);
int attribute_container_add_class_device(struct device *classdev);
-int attribute_container_add_class_device_adapter(struct attribute_container *cont,
- struct device *dev,
- struct device *classdev);
void attribute_container_remove_attrs(struct device *classdev);
void attribute_container_class_device_del(struct device *classdev);
struct attribute_container *attribute_container_classdev_to_container(struct device *);
diff --git a/include/linux/auxiliary_bus.h b/include/linux/auxiliary_bus.h
index 662b8ae54b6a..31762324bcc9 100644
--- a/include/linux/auxiliary_bus.h
+++ b/include/linux/auxiliary_bus.h
@@ -271,6 +271,6 @@ void auxiliary_driver_unregister(struct auxiliary_driver *auxdrv);
struct auxiliary_device *auxiliary_find_device(struct device *start,
const void *data,
- int (*match)(struct device *dev, const void *data));
+ device_match_t match);
#endif /* _AUXILIARY_BUS_H_ */
diff --git a/include/linux/bcma/bcma_driver_pci.h b/include/linux/bcma/bcma_driver_pci.h
index 68da8dba5162..dba41b65ae0d 100644
--- a/include/linux/bcma/bcma_driver_pci.h
+++ b/include/linux/bcma/bcma_driver_pci.h
@@ -203,7 +203,7 @@ struct pci_dev;
#define BCMA_CORE_PCI_MDIO_RXCTRL0 0x840
/* PCIE Root Capability Register bits (Host mode only) */
-#define BCMA_CORE_PCI_RC_CRS_VISIBILITY 0x0001
+#define BCMA_CORE_PCI_RC_RRS_VISIBILITY 0x0001
struct bcma_drv_pci;
struct bcma_bus;
diff --git a/include/linux/bitmap.h b/include/linux/bitmap.h
index d3b66d77df7a..262b6596eca5 100644
--- a/include/linux/bitmap.h
+++ b/include/linux/bitmap.h
@@ -203,12 +203,12 @@ unsigned long bitmap_find_next_zero_area_off(unsigned long *map,
* the bit offset of all zero areas this function finds is multiples of that
* power of 2. A @align_mask of 0 means no alignment is required.
*/
-static inline unsigned long
-bitmap_find_next_zero_area(unsigned long *map,
- unsigned long size,
- unsigned long start,
- unsigned int nr,
- unsigned long align_mask)
+static __always_inline
+unsigned long bitmap_find_next_zero_area(unsigned long *map,
+ unsigned long size,
+ unsigned long start,
+ unsigned int nr,
+ unsigned long align_mask)
{
return bitmap_find_next_zero_area_off(map, size, start, nr,
align_mask, 0);
@@ -228,7 +228,7 @@ void bitmap_fold(unsigned long *dst, const unsigned long *orig,
#define bitmap_size(nbits) (ALIGN(nbits, BITS_PER_LONG) / BITS_PER_BYTE)
-static inline void bitmap_zero(unsigned long *dst, unsigned int nbits)
+static __always_inline void bitmap_zero(unsigned long *dst, unsigned int nbits)
{
unsigned int len = bitmap_size(nbits);
@@ -238,7 +238,7 @@ static inline void bitmap_zero(unsigned long *dst, unsigned int nbits)
memset(dst, 0, len);
}
-static inline void bitmap_fill(unsigned long *dst, unsigned int nbits)
+static __always_inline void bitmap_fill(unsigned long *dst, unsigned int nbits)
{
unsigned int len = bitmap_size(nbits);
@@ -248,8 +248,8 @@ static inline void bitmap_fill(unsigned long *dst, unsigned int nbits)
memset(dst, 0xff, len);
}
-static inline void bitmap_copy(unsigned long *dst, const unsigned long *src,
- unsigned int nbits)
+static __always_inline
+void bitmap_copy(unsigned long *dst, const unsigned long *src, unsigned int nbits)
{
unsigned int len = bitmap_size(nbits);
@@ -262,8 +262,8 @@ static inline void bitmap_copy(unsigned long *dst, const unsigned long *src,
/*
* Copy bitmap and clear tail bits in last word.
*/
-static inline void bitmap_copy_clear_tail(unsigned long *dst,
- const unsigned long *src, unsigned int nbits)
+static __always_inline
+void bitmap_copy_clear_tail(unsigned long *dst, const unsigned long *src, unsigned int nbits)
{
bitmap_copy(dst, src, nbits);
if (nbits % BITS_PER_LONG)
@@ -318,16 +318,18 @@ void bitmap_to_arr64(u64 *buf, const unsigned long *bitmap, unsigned int nbits);
bitmap_copy_clear_tail((unsigned long *)(buf), (const unsigned long *)(bitmap), (nbits))
#endif
-static inline bool bitmap_and(unsigned long *dst, const unsigned long *src1,
- const unsigned long *src2, unsigned int nbits)
+static __always_inline
+bool bitmap_and(unsigned long *dst, const unsigned long *src1,
+ const unsigned long *src2, unsigned int nbits)
{
if (small_const_nbits(nbits))
return (*dst = *src1 & *src2 & BITMAP_LAST_WORD_MASK(nbits)) != 0;
return __bitmap_and(dst, src1, src2, nbits);
}
-static inline void bitmap_or(unsigned long *dst, const unsigned long *src1,
- const unsigned long *src2, unsigned int nbits)
+static __always_inline
+void bitmap_or(unsigned long *dst, const unsigned long *src1,
+ const unsigned long *src2, unsigned int nbits)
{
if (small_const_nbits(nbits))
*dst = *src1 | *src2;
@@ -335,8 +337,9 @@ static inline void bitmap_or(unsigned long *dst, const unsigned long *src1,
__bitmap_or(dst, src1, src2, nbits);
}
-static inline void bitmap_xor(unsigned long *dst, const unsigned long *src1,
- const unsigned long *src2, unsigned int nbits)
+static __always_inline
+void bitmap_xor(unsigned long *dst, const unsigned long *src1,
+ const unsigned long *src2, unsigned int nbits)
{
if (small_const_nbits(nbits))
*dst = *src1 ^ *src2;
@@ -344,16 +347,17 @@ static inline void bitmap_xor(unsigned long *dst, const unsigned long *src1,
__bitmap_xor(dst, src1, src2, nbits);
}
-static inline bool bitmap_andnot(unsigned long *dst, const unsigned long *src1,
- const unsigned long *src2, unsigned int nbits)
+static __always_inline
+bool bitmap_andnot(unsigned long *dst, const unsigned long *src1,
+ const unsigned long *src2, unsigned int nbits)
{
if (small_const_nbits(nbits))
return (*dst = *src1 & ~(*src2) & BITMAP_LAST_WORD_MASK(nbits)) != 0;
return __bitmap_andnot(dst, src1, src2, nbits);
}
-static inline void bitmap_complement(unsigned long *dst, const unsigned long *src,
- unsigned int nbits)
+static __always_inline
+void bitmap_complement(unsigned long *dst, const unsigned long *src, unsigned int nbits)
{
if (small_const_nbits(nbits))
*dst = ~(*src);
@@ -368,8 +372,8 @@ static inline void bitmap_complement(unsigned long *dst, const unsigned long *sr
#endif
#define BITMAP_MEM_MASK (BITMAP_MEM_ALIGNMENT - 1)
-static inline bool bitmap_equal(const unsigned long *src1,
- const unsigned long *src2, unsigned int nbits)
+static __always_inline
+bool bitmap_equal(const unsigned long *src1, const unsigned long *src2, unsigned int nbits)
{
if (small_const_nbits(nbits))
return !((*src1 ^ *src2) & BITMAP_LAST_WORD_MASK(nbits));
@@ -388,10 +392,9 @@ static inline bool bitmap_equal(const unsigned long *src1,
*
* Returns: True if (*@src1 | *@src2) == *@src3, false otherwise
*/
-static inline bool bitmap_or_equal(const unsigned long *src1,
- const unsigned long *src2,
- const unsigned long *src3,
- unsigned int nbits)
+static __always_inline
+bool bitmap_or_equal(const unsigned long *src1, const unsigned long *src2,
+ const unsigned long *src3, unsigned int nbits)
{
if (!small_const_nbits(nbits))
return __bitmap_or_equal(src1, src2, src3, nbits);
@@ -399,9 +402,8 @@ static inline bool bitmap_or_equal(const unsigned long *src1,
return !(((*src1 | *src2) ^ *src3) & BITMAP_LAST_WORD_MASK(nbits));
}
-static inline bool bitmap_intersects(const unsigned long *src1,
- const unsigned long *src2,
- unsigned int nbits)
+static __always_inline
+bool bitmap_intersects(const unsigned long *src1, const unsigned long *src2, unsigned int nbits)
{
if (small_const_nbits(nbits))
return ((*src1 & *src2) & BITMAP_LAST_WORD_MASK(nbits)) != 0;
@@ -409,8 +411,8 @@ static inline bool bitmap_intersects(const unsigned long *src1,
return __bitmap_intersects(src1, src2, nbits);
}
-static inline bool bitmap_subset(const unsigned long *src1,
- const unsigned long *src2, unsigned int nbits)
+static __always_inline
+bool bitmap_subset(const unsigned long *src1, const unsigned long *src2, unsigned int nbits)
{
if (small_const_nbits(nbits))
return ! ((*src1 & ~(*src2)) & BITMAP_LAST_WORD_MASK(nbits));
@@ -418,7 +420,8 @@ static inline bool bitmap_subset(const unsigned long *src1,
return __bitmap_subset(src1, src2, nbits);
}
-static inline bool bitmap_empty(const unsigned long *src, unsigned nbits)
+static __always_inline
+bool bitmap_empty(const unsigned long *src, unsigned nbits)
{
if (small_const_nbits(nbits))
return ! (*src & BITMAP_LAST_WORD_MASK(nbits));
@@ -426,7 +429,8 @@ static inline bool bitmap_empty(const unsigned long *src, unsigned nbits)
return find_first_bit(src, nbits) == nbits;
}
-static inline bool bitmap_full(const unsigned long *src, unsigned int nbits)
+static __always_inline
+bool bitmap_full(const unsigned long *src, unsigned int nbits)
{
if (small_const_nbits(nbits))
return ! (~(*src) & BITMAP_LAST_WORD_MASK(nbits));
@@ -460,8 +464,8 @@ unsigned long bitmap_weight_andnot(const unsigned long *src1,
return __bitmap_weight_andnot(src1, src2, nbits);
}
-static __always_inline void bitmap_set(unsigned long *map, unsigned int start,
- unsigned int nbits)
+static __always_inline
+void bitmap_set(unsigned long *map, unsigned int start, unsigned int nbits)
{
if (__builtin_constant_p(nbits) && nbits == 1)
__set_bit(start, map);
@@ -476,8 +480,8 @@ static __always_inline void bitmap_set(unsigned long *map, unsigned int start,
__bitmap_set(map, start, nbits);
}
-static __always_inline void bitmap_clear(unsigned long *map, unsigned int start,
- unsigned int nbits)
+static __always_inline
+void bitmap_clear(unsigned long *map, unsigned int start, unsigned int nbits)
{
if (__builtin_constant_p(nbits) && nbits == 1)
__clear_bit(start, map);
@@ -492,8 +496,9 @@ static __always_inline void bitmap_clear(unsigned long *map, unsigned int start,
__bitmap_clear(map, start, nbits);
}
-static inline void bitmap_shift_right(unsigned long *dst, const unsigned long *src,
- unsigned int shift, unsigned int nbits)
+static __always_inline
+void bitmap_shift_right(unsigned long *dst, const unsigned long *src,
+ unsigned int shift, unsigned int nbits)
{
if (small_const_nbits(nbits))
*dst = (*src & BITMAP_LAST_WORD_MASK(nbits)) >> shift;
@@ -501,8 +506,9 @@ static inline void bitmap_shift_right(unsigned long *dst, const unsigned long *s
__bitmap_shift_right(dst, src, shift, nbits);
}
-static inline void bitmap_shift_left(unsigned long *dst, const unsigned long *src,
- unsigned int shift, unsigned int nbits)
+static __always_inline
+void bitmap_shift_left(unsigned long *dst, const unsigned long *src,
+ unsigned int shift, unsigned int nbits)
{
if (small_const_nbits(nbits))
*dst = (*src << shift) & BITMAP_LAST_WORD_MASK(nbits);
@@ -510,11 +516,12 @@ static inline void bitmap_shift_left(unsigned long *dst, const unsigned long *sr
__bitmap_shift_left(dst, src, shift, nbits);
}
-static inline void bitmap_replace(unsigned long *dst,
- const unsigned long *old,
- const unsigned long *new,
- const unsigned long *mask,
- unsigned int nbits)
+static __always_inline
+void bitmap_replace(unsigned long *dst,
+ const unsigned long *old,
+ const unsigned long *new,
+ const unsigned long *mask,
+ unsigned int nbits)
{
if (small_const_nbits(nbits))
*dst = (*old & ~(*mask)) | (*new & *mask);
@@ -557,8 +564,9 @@ static inline void bitmap_replace(unsigned long *dst,
* bitmap_gather() can be seen as the 'reverse' bitmap_scatter() operation.
* See bitmap_scatter() for details related to this relationship.
*/
-static inline void bitmap_scatter(unsigned long *dst, const unsigned long *src,
- const unsigned long *mask, unsigned int nbits)
+static __always_inline
+void bitmap_scatter(unsigned long *dst, const unsigned long *src,
+ const unsigned long *mask, unsigned int nbits)
{
unsigned int n = 0;
unsigned int bit;
@@ -611,8 +619,9 @@ static inline void bitmap_scatter(unsigned long *dst, const unsigned long *src,
* bitmap_scatter(res, src, mask, n) and a call to
* bitmap_scatter(res, result, mask, n) will lead to the same res value.
*/
-static inline void bitmap_gather(unsigned long *dst, const unsigned long *src,
- const unsigned long *mask, unsigned int nbits)
+static __always_inline
+void bitmap_gather(unsigned long *dst, const unsigned long *src,
+ const unsigned long *mask, unsigned int nbits)
{
unsigned int n = 0;
unsigned int bit;
@@ -623,9 +632,9 @@ static inline void bitmap_gather(unsigned long *dst, const unsigned long *src,
__assign_bit(n++, dst, test_bit(bit, src));
}
-static inline void bitmap_next_set_region(unsigned long *bitmap,
- unsigned int *rs, unsigned int *re,
- unsigned int end)
+static __always_inline
+void bitmap_next_set_region(unsigned long *bitmap, unsigned int *rs,
+ unsigned int *re, unsigned int end)
{
*rs = find_next_bit(bitmap, end, *rs);
*re = find_next_zero_bit(bitmap, end, *rs + 1);
@@ -640,7 +649,8 @@ static inline void bitmap_next_set_region(unsigned long *bitmap,
* This is the complement to __bitmap_find_free_region() and releases
* the found region (by clearing it in the bitmap).
*/
-static inline void bitmap_release_region(unsigned long *bitmap, unsigned int pos, int order)
+static __always_inline
+void bitmap_release_region(unsigned long *bitmap, unsigned int pos, int order)
{
bitmap_clear(bitmap, pos, BIT(order));
}
@@ -656,7 +666,8 @@ static inline void bitmap_release_region(unsigned long *bitmap, unsigned int pos
* Returns: 0 on success, or %-EBUSY if specified region wasn't
* free (not all bits were zero).
*/
-static inline int bitmap_allocate_region(unsigned long *bitmap, unsigned int pos, int order)
+static __always_inline
+int bitmap_allocate_region(unsigned long *bitmap, unsigned int pos, int order)
{
unsigned int len = BIT(order);
@@ -680,7 +691,8 @@ static inline int bitmap_allocate_region(unsigned long *bitmap, unsigned int pos
* Returns: the bit offset in bitmap of the allocated region,
* or -errno on failure.
*/
-static inline int bitmap_find_free_region(unsigned long *bitmap, unsigned int bits, int order)
+static __always_inline
+int bitmap_find_free_region(unsigned long *bitmap, unsigned int bits, int order)
{
unsigned int pos, end; /* scans bitmap by regions of size order */
@@ -734,7 +746,7 @@ static inline int bitmap_find_free_region(unsigned long *bitmap, unsigned int bi
* That is ``(u32 *)(&val)[0]`` gets the upper 32 bits,
* but we expect the lower 32-bits of u64.
*/
-static inline void bitmap_from_u64(unsigned long *dst, u64 mask)
+static __always_inline void bitmap_from_u64(unsigned long *dst, u64 mask)
{
bitmap_from_arr64(dst, &mask, 64);
}
@@ -749,9 +761,8 @@ static inline void bitmap_from_u64(unsigned long *dst, u64 mask)
* @map memory region. For @nbits = 0 and @nbits > BITS_PER_LONG the return
* value is undefined.
*/
-static inline unsigned long bitmap_read(const unsigned long *map,
- unsigned long start,
- unsigned long nbits)
+static __always_inline
+unsigned long bitmap_read(const unsigned long *map, unsigned long start, unsigned long nbits)
{
size_t index = BIT_WORD(start);
unsigned long offset = start % BITS_PER_LONG;
@@ -784,8 +795,9 @@ static inline unsigned long bitmap_read(const unsigned long *map,
*
* For @nbits == 0 and @nbits > BITS_PER_LONG no writes are performed.
*/
-static inline void bitmap_write(unsigned long *map, unsigned long value,
- unsigned long start, unsigned long nbits)
+static __always_inline
+void bitmap_write(unsigned long *map, unsigned long value,
+ unsigned long start, unsigned long nbits)
{
size_t index;
unsigned long offset;
diff --git a/include/linux/bits.h b/include/linux/bits.h
index 0eb24d21aac2..60044b608817 100644
--- a/include/linux/bits.h
+++ b/include/linux/bits.h
@@ -36,4 +36,19 @@
#define GENMASK_ULL(h, l) \
(GENMASK_INPUT_CHECK(h, l) + __GENMASK_ULL(h, l))
+#if !defined(__ASSEMBLY__)
+/*
+ * Missing asm support
+ *
+ * __GENMASK_U128() depends on _BIT128() which would not work
+ * in the asm code, as it shifts an 'unsigned __init128' data
+ * type instead of direct representation of 128 bit constants
+ * such as long and unsigned long. The fundamental problem is
+ * that a 128 bit constant will get silently truncated by the
+ * gcc compiler.
+ */
+#define GENMASK_U128(h, l) \
+ (GENMASK_INPUT_CHECK(h, l) + __GENMASK_U128(h, l))
+#endif
+
#endif /* __LINUX_BITS_H */
diff --git a/include/linux/blk-integrity.h b/include/linux/blk-integrity.h
index de98049b7ded..676f8f860c47 100644
--- a/include/linux/blk-integrity.h
+++ b/include/linux/blk-integrity.h
@@ -25,9 +25,10 @@ static inline bool queue_limits_stack_integrity_bdev(struct queue_limits *t,
}
#ifdef CONFIG_BLK_DEV_INTEGRITY
-int blk_rq_map_integrity_sg(struct request_queue *, struct bio *,
- struct scatterlist *);
+int blk_rq_map_integrity_sg(struct request *, struct scatterlist *);
int blk_rq_count_integrity_sg(struct request_queue *, struct bio *);
+int blk_rq_integrity_map_user(struct request *rq, void __user *ubuf,
+ ssize_t bytes, u32 seed);
static inline bool
blk_integrity_queue_supports_integrity(struct request_queue *q)
@@ -96,12 +97,18 @@ static inline int blk_rq_count_integrity_sg(struct request_queue *q,
{
return 0;
}
-static inline int blk_rq_map_integrity_sg(struct request_queue *q,
- struct bio *b,
+static inline int blk_rq_map_integrity_sg(struct request *q,
struct scatterlist *s)
{
return 0;
}
+static inline int blk_rq_integrity_map_user(struct request *rq,
+ void __user *ubuf,
+ ssize_t bytes,
+ u32 seed)
+{
+ return -EINVAL;
+}
static inline struct blk_integrity *bdev_get_integrity(struct block_device *b)
{
return NULL;
diff --git a/include/linux/blk-mq.h b/include/linux/blk-mq.h
index 8d304b1d16b1..4fecf46ef681 100644
--- a/include/linux/blk-mq.h
+++ b/include/linux/blk-mq.h
@@ -149,10 +149,7 @@ struct request {
* physical address coalescing is performed.
*/
unsigned short nr_phys_segments;
-
-#ifdef CONFIG_BLK_DEV_INTEGRITY
unsigned short nr_integrity_segments;
-#endif
#ifdef CONFIG_BLK_INLINE_ENCRYPTION
struct bio_crypt_ctx *crypt_ctx;
diff --git a/include/linux/blk_types.h b/include/linux/blk_types.h
index 413ebdff974b..dce7615c35e7 100644
--- a/include/linux/blk_types.h
+++ b/include/linux/blk_types.h
@@ -251,11 +251,9 @@ struct bio {
struct bio_crypt_ctx *bi_crypt_context;
#endif
- union {
#if defined(CONFIG_BLK_DEV_INTEGRITY)
- struct bio_integrity_payload *bi_integrity; /* data integrity */
+ struct bio_integrity_payload *bi_integrity; /* data integrity */
#endif
- };
unsigned short bi_vcnt; /* how many bio_vec's */
diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h
index 643c9020a35a..50c3b959da28 100644
--- a/include/linux/blkdev.h
+++ b/include/linux/blkdev.h
@@ -968,8 +968,6 @@ static inline void blk_queue_disable_write_zeroes(struct request_queue *q)
/*
* Access functions for manipulating queue properties
*/
-extern void blk_limits_io_min(struct queue_limits *limits, unsigned int min);
-extern void blk_limits_io_opt(struct queue_limits *limits, unsigned int opt);
extern void blk_set_queue_depth(struct request_queue *q, unsigned int depth);
extern void blk_set_stacking_limits(struct queue_limits *lim);
extern int blk_stack_limits(struct queue_limits *t, struct queue_limits *b,
diff --git a/include/linux/bpf.h b/include/linux/bpf.h
index 3b94ec161e8c..19d8ca8ac960 100644
--- a/include/linux/bpf.h
+++ b/include/linux/bpf.h
@@ -294,6 +294,7 @@ struct bpf_map {
* same prog type, JITed flag and xdp_has_frags flag.
*/
struct {
+ const struct btf_type *attach_func_proto;
spinlock_t lock;
enum bpf_prog_type type;
bool jited;
@@ -694,6 +695,11 @@ enum bpf_type_flag {
/* DYNPTR points to xdp_buff */
DYNPTR_TYPE_XDP = BIT(16 + BPF_BASE_TYPE_BITS),
+ /* Memory must be aligned on some architectures, used in combination with
+ * MEM_FIXED_SIZE.
+ */
+ MEM_ALIGNED = BIT(17 + BPF_BASE_TYPE_BITS),
+
__BPF_TYPE_FLAG_MAX,
__BPF_TYPE_LAST_FLAG = __BPF_TYPE_FLAG_MAX - 1,
};
@@ -731,8 +737,6 @@ enum bpf_arg_type {
ARG_ANYTHING, /* any (initialized) argument is ok */
ARG_PTR_TO_SPIN_LOCK, /* pointer to bpf_spin_lock */
ARG_PTR_TO_SOCK_COMMON, /* pointer to sock_common */
- ARG_PTR_TO_INT, /* pointer to int */
- ARG_PTR_TO_LONG, /* pointer to long */
ARG_PTR_TO_SOCKET, /* pointer to bpf_sock (fullsock) */
ARG_PTR_TO_BTF_ID, /* pointer to in-kernel struct */
ARG_PTR_TO_RINGBUF_MEM, /* pointer to dynamically reserved ringbuf memory */
@@ -743,7 +747,7 @@ enum bpf_arg_type {
ARG_PTR_TO_STACK, /* pointer to stack */
ARG_PTR_TO_CONST_STR, /* pointer to a null terminated read-only string */
ARG_PTR_TO_TIMER, /* pointer to bpf_timer */
- ARG_PTR_TO_KPTR, /* pointer to referenced kptr */
+ ARG_KPTR_XCHG_DEST, /* pointer to destination that kptrs are bpf_kptr_xchg'd into */
ARG_PTR_TO_DYNPTR, /* pointer to bpf_dynptr. See bpf_type_flag for dynptr type */
__BPF_ARG_TYPE_MAX,
@@ -807,6 +811,12 @@ struct bpf_func_proto {
bool gpl_only;
bool pkt_access;
bool might_sleep;
+ /* set to true if helper follows contract for llvm
+ * attribute bpf_fastcall:
+ * - void functions do not scratch r0
+ * - functions taking N arguments scratch only registers r1-rN
+ */
+ bool allow_fastcall;
enum bpf_return_type ret_type;
union {
struct {
@@ -919,6 +929,7 @@ static_assert(__BPF_REG_TYPE_MAX <= BPF_BASE_TYPE_LIMIT);
*/
struct bpf_insn_access_aux {
enum bpf_reg_type reg_type;
+ bool is_ldsx;
union {
int ctx_field_size;
struct {
@@ -927,6 +938,7 @@ struct bpf_insn_access_aux {
};
};
struct bpf_verifier_log *log; /* for verbose logs */
+ bool is_retval; /* is accessing function return value ? */
};
static inline void
@@ -965,6 +977,8 @@ struct bpf_verifier_ops {
struct bpf_insn_access_aux *info);
int (*gen_prologue)(struct bpf_insn *insn, bool direct_write,
const struct bpf_prog *prog);
+ int (*gen_epilogue)(struct bpf_insn *insn, const struct bpf_prog *prog,
+ s16 ctx_stack_off);
int (*gen_ld_abs)(const struct bpf_insn *orig,
struct bpf_insn *insn_buf);
u32 (*convert_ctx_access)(enum bpf_access_type type,
@@ -1795,6 +1809,7 @@ struct bpf_struct_ops_common_value {
#define BPF_MODULE_OWNER ((void *)((0xeB9FUL << 2) + POISON_POINTER_DELTA))
bool bpf_struct_ops_get(const void *kdata);
void bpf_struct_ops_put(const void *kdata);
+int bpf_struct_ops_supported(const struct bpf_struct_ops *st_ops, u32 moff);
int bpf_struct_ops_map_sys_lookup_elem(struct bpf_map *map, void *key,
void *value);
int bpf_struct_ops_prepare_trampoline(struct bpf_tramp_links *tlinks,
@@ -1851,6 +1866,10 @@ static inline void bpf_module_put(const void *data, struct module *owner)
{
module_put(owner);
}
+static inline int bpf_struct_ops_supported(const struct bpf_struct_ops *st_ops, u32 moff)
+{
+ return -ENOTSUPP;
+}
static inline int bpf_struct_ops_map_sys_lookup_elem(struct bpf_map *map,
void *key,
void *value)
@@ -2227,7 +2246,16 @@ void __bpf_obj_drop_impl(void *p, const struct btf_record *rec, bool percpu);
struct bpf_map *bpf_map_get(u32 ufd);
struct bpf_map *bpf_map_get_with_uref(u32 ufd);
-struct bpf_map *__bpf_map_get(struct fd f);
+
+static inline struct bpf_map *__bpf_map_get(struct fd f)
+{
+ if (fd_empty(f))
+ return ERR_PTR(-EBADF);
+ if (unlikely(fd_file(f)->f_op != &bpf_map_fops))
+ return ERR_PTR(-EINVAL);
+ return fd_file(f)->private_data;
+}
+
void bpf_map_inc(struct bpf_map *map);
void bpf_map_inc_with_uref(struct bpf_map *map);
struct bpf_map *__bpf_map_inc_not_zero(struct bpf_map *map, bool uref);
@@ -3184,7 +3212,9 @@ extern const struct bpf_func_proto bpf_get_current_uid_gid_proto;
extern const struct bpf_func_proto bpf_get_current_comm_proto;
extern const struct bpf_func_proto bpf_get_stackid_proto;
extern const struct bpf_func_proto bpf_get_stack_proto;
+extern const struct bpf_func_proto bpf_get_stack_sleepable_proto;
extern const struct bpf_func_proto bpf_get_task_stack_proto;
+extern const struct bpf_func_proto bpf_get_task_stack_sleepable_proto;
extern const struct bpf_func_proto bpf_get_stackid_proto_pe;
extern const struct bpf_func_proto bpf_get_stack_proto_pe;
extern const struct bpf_func_proto bpf_sock_map_update_proto;
@@ -3192,6 +3222,7 @@ extern const struct bpf_func_proto bpf_sock_hash_update_proto;
extern const struct bpf_func_proto bpf_get_current_cgroup_id_proto;
extern const struct bpf_func_proto bpf_get_current_ancestor_cgroup_id_proto;
extern const struct bpf_func_proto bpf_get_cgroup_classid_curr_proto;
+extern const struct bpf_func_proto bpf_current_task_under_cgroup_proto;
extern const struct bpf_func_proto bpf_msg_redirect_hash_proto;
extern const struct bpf_func_proto bpf_msg_redirect_map_proto;
extern const struct bpf_func_proto bpf_sk_redirect_hash_proto;
diff --git a/include/linux/bpf_lsm.h b/include/linux/bpf_lsm.h
index 1de7ece5d36d..aefcd6564251 100644
--- a/include/linux/bpf_lsm.h
+++ b/include/linux/bpf_lsm.h
@@ -9,6 +9,7 @@
#include <linux/sched.h>
#include <linux/bpf.h>
+#include <linux/bpf_verifier.h>
#include <linux/lsm_hooks.h>
#ifdef CONFIG_BPF_LSM
@@ -45,6 +46,8 @@ void bpf_inode_storage_free(struct inode *inode);
void bpf_lsm_find_cgroup_shim(const struct bpf_prog *prog, bpf_func_t *bpf_func);
+int bpf_lsm_get_retval_range(const struct bpf_prog *prog,
+ struct bpf_retval_range *range);
#else /* !CONFIG_BPF_LSM */
static inline bool bpf_lsm_is_sleepable_hook(u32 btf_id)
@@ -78,6 +81,11 @@ static inline void bpf_lsm_find_cgroup_shim(const struct bpf_prog *prog,
{
}
+static inline int bpf_lsm_get_retval_range(const struct bpf_prog *prog,
+ struct bpf_retval_range *range)
+{
+ return -EOPNOTSUPP;
+}
#endif /* CONFIG_BPF_LSM */
#endif /* _LINUX_BPF_LSM_H */
diff --git a/include/linux/bpf_verifier.h b/include/linux/bpf_verifier.h
index 7b776dae36e5..4513372c5bc8 100644
--- a/include/linux/bpf_verifier.h
+++ b/include/linux/bpf_verifier.h
@@ -23,6 +23,8 @@
* (in the "-8,-16,...,-512" form)
*/
#define TMP_STR_BUF_LEN 320
+/* Patch buffer size */
+#define INSN_BUF_SIZE 32
/* Liveness marks, used for registers and spilled-regs (in stack slots).
* Read marks propagate upwards until they find a write mark; they record that
@@ -371,6 +373,10 @@ struct bpf_jmp_history_entry {
u32 prev_idx : 22;
/* special flags, e.g., whether insn is doing register stack spill/load */
u32 flags : 10;
+ /* additional registers that need precision tracking when this
+ * jump is backtracked, vector of six 10-bit records
+ */
+ u64 linked_regs;
};
/* Maximum number of register states that can exist at once */
@@ -572,6 +578,14 @@ struct bpf_insn_aux_data {
bool is_iter_next; /* bpf_iter_<type>_next() kfunc call */
bool call_with_percpu_alloc_ptr; /* {this,per}_cpu_ptr() with prog percpu alloc */
u8 alu_state; /* used in combination with alu_limit */
+ /* true if STX or LDX instruction is a part of a spill/fill
+ * pattern for a bpf_fastcall call.
+ */
+ u8 fastcall_pattern:1;
+ /* for CALL instructions, a number of spill/fill pairs in the
+ * bpf_fastcall pattern.
+ */
+ u8 fastcall_spills_num:3;
/* below fields are initialized once */
unsigned int orig_idx; /* original instruction index */
@@ -641,6 +655,10 @@ struct bpf_subprog_info {
u32 linfo_idx; /* The idx to the main_prog->aux->linfo */
u16 stack_depth; /* max. stack depth used by this function */
u16 stack_extra;
+ /* offsets in range [stack_depth .. fastcall_stack_off)
+ * are used for bpf_fastcall spills and fills.
+ */
+ s16 fastcall_stack_off;
bool has_tail_call: 1;
bool tail_call_reachable: 1;
bool has_ld_abs: 1;
@@ -648,6 +666,8 @@ struct bpf_subprog_info {
bool is_async_cb: 1;
bool is_exception_cb: 1;
bool args_cached: 1;
+ /* true if bpf_fastcall stack region is used by functions that can't be inlined */
+ bool keep_fastcall_stack: 1;
u8 arg_cnt;
struct bpf_subprog_arg_info args[MAX_BPF_FUNC_REG_ARGS];
@@ -762,6 +782,8 @@ struct bpf_verifier_env {
* e.g., in reg_type_str() to generate reg_type string
*/
char tmp_str_buf[TMP_STR_BUF_LEN];
+ struct bpf_insn insn_buf[INSN_BUF_SIZE];
+ struct bpf_insn epilogue_buf[INSN_BUF_SIZE];
};
static inline struct bpf_func_info_aux *subprog_aux(struct bpf_verifier_env *env, int subprog)
@@ -905,6 +927,11 @@ static inline bool type_is_sk_pointer(enum bpf_reg_type type)
type == PTR_TO_XDP_SOCK;
}
+static inline bool type_may_be_null(u32 type)
+{
+ return type & PTR_MAYBE_NULL;
+}
+
static inline void mark_reg_scratched(struct bpf_verifier_env *env, u32 regno)
{
env->scratched_regs |= 1U << regno;
diff --git a/include/linux/btf.h b/include/linux/btf.h
index cffb43133c68..b8a583194c4a 100644
--- a/include/linux/btf.h
+++ b/include/linux/btf.h
@@ -580,6 +580,7 @@ bool btf_is_prog_ctx_type(struct bpf_verifier_log *log, const struct btf *btf,
int get_kern_ctx_btf_id(struct bpf_verifier_log *log, enum bpf_prog_type prog_type);
bool btf_types_are_same(const struct btf *btf1, u32 id1,
const struct btf *btf2, u32 id2);
+int btf_check_iter_arg(struct btf *btf, const struct btf_type *func, int arg_idx);
#else
static inline const struct btf_type *btf_type_by_id(const struct btf *btf,
u32 type_id)
@@ -654,6 +655,10 @@ static inline bool btf_types_are_same(const struct btf *btf1, u32 id1,
{
return false;
}
+static inline int btf_check_iter_arg(struct btf *btf, const struct btf_type *func, int arg_idx)
+{
+ return -EOPNOTSUPP;
+}
#endif
static inline bool btf_type_is_struct_ptr(struct btf *btf, const struct btf_type *t)
diff --git a/include/linux/buildid.h b/include/linux/buildid.h
index 20aa3c2d89f7..014a88c41073 100644
--- a/include/linux/buildid.h
+++ b/include/linux/buildid.h
@@ -7,8 +7,8 @@
#define BUILD_ID_SIZE_MAX 20
struct vm_area_struct;
-int build_id_parse(struct vm_area_struct *vma, unsigned char *build_id,
- __u32 *size);
+int build_id_parse(struct vm_area_struct *vma, unsigned char *build_id, __u32 *size);
+int build_id_parse_nofault(struct vm_area_struct *vma, unsigned char *build_id, __u32 *size);
int build_id_parse_buf(const void *buf, unsigned char *build_id, u32 buf_size);
#if IS_ENABLED(CONFIG_STACKTRACE_BUILD_ID) || IS_ENABLED(CONFIG_VMCORE_INFO)
diff --git a/include/linux/ceph/osd_client.h b/include/linux/ceph/osd_client.h
index f66f6aac74f6..d7941478158c 100644
--- a/include/linux/ceph/osd_client.h
+++ b/include/linux/ceph/osd_client.h
@@ -449,8 +449,6 @@ extern int ceph_osdc_init(struct ceph_osd_client *osdc,
extern void ceph_osdc_stop(struct ceph_osd_client *osdc);
extern void ceph_osdc_reopen_osds(struct ceph_osd_client *osdc);
-extern void ceph_osdc_handle_reply(struct ceph_osd_client *osdc,
- struct ceph_msg *msg);
extern void ceph_osdc_handle_map(struct ceph_osd_client *osdc,
struct ceph_msg *msg);
void ceph_osdc_update_epoch_barrier(struct ceph_osd_client *osdc, u32 eb);
diff --git a/include/linux/cgroup-defs.h b/include/linux/cgroup-defs.h
index eb0f6f349496..47ae4c4d924c 100644
--- a/include/linux/cgroup-defs.h
+++ b/include/linux/cgroup-defs.h
@@ -172,7 +172,11 @@ struct cgroup_subsys_state {
/* reference count - access via css_[try]get() and css_put() */
struct percpu_ref refcnt;
- /* siblings list anchored at the parent's ->children */
+ /*
+ * siblings list anchored at the parent's ->children
+ *
+ * linkage is protected by cgroup_mutex or RCU
+ */
struct list_head sibling;
struct list_head children;
@@ -789,6 +793,11 @@ struct cgroup_subsys {
extern struct percpu_rw_semaphore cgroup_threadgroup_rwsem;
+struct cgroup_of_peak {
+ unsigned long value;
+ struct list_head list;
+};
+
/**
* cgroup_threadgroup_change_begin - threadgroup exclusion for cgroups
* @tsk: target task
diff --git a/include/linux/cgroup.h b/include/linux/cgroup.h
index c60ba0ab1462..f8ef47f8a634 100644
--- a/include/linux/cgroup.h
+++ b/include/linux/cgroup.h
@@ -11,6 +11,7 @@
#include <linux/sched.h>
#include <linux/nodemask.h>
+#include <linux/list.h>
#include <linux/rculist.h>
#include <linux/cgroupstats.h>
#include <linux/fs.h>
@@ -28,8 +29,6 @@
struct kernel_clone_args;
-#ifdef CONFIG_CGROUPS
-
/*
* All weight knobs on the default hierarchy should use the following min,
* default and max values. The default value is the logarithmic center of
@@ -39,6 +38,8 @@ struct kernel_clone_args;
#define CGROUP_WEIGHT_DFL 100
#define CGROUP_WEIGHT_MAX 10000
+#ifdef CONFIG_CGROUPS
+
enum {
CSS_TASK_ITER_PROCS = (1U << 0), /* walk only threadgroup leaders */
CSS_TASK_ITER_THREADED = (1U << 1), /* walk all threaded css_sets in the domain */
@@ -854,4 +855,6 @@ static inline void cgroup_bpf_put(struct cgroup *cgrp) {}
struct cgroup *task_get_cgroup1(struct task_struct *tsk, int hierarchy_id);
+struct cgroup_of_peak *of_peak(struct kernfs_open_file *of);
+
#endif /* _LINUX_CGROUP_H */
diff --git a/include/linux/cleanup.h b/include/linux/cleanup.h
index d9e613803df1..038b2d523bf8 100644
--- a/include/linux/cleanup.h
+++ b/include/linux/cleanup.h
@@ -4,6 +4,142 @@
#include <linux/compiler.h>
+/**
+ * DOC: scope-based cleanup helpers
+ *
+ * The "goto error" pattern is notorious for introducing subtle resource
+ * leaks. It is tedious and error prone to add new resource acquisition
+ * constraints into code paths that already have several unwind
+ * conditions. The "cleanup" helpers enable the compiler to help with
+ * this tedium and can aid in maintaining LIFO (last in first out)
+ * unwind ordering to avoid unintentional leaks.
+ *
+ * As drivers make up the majority of the kernel code base, here is an
+ * example of using these helpers to clean up PCI drivers. The target of
+ * the cleanups are occasions where a goto is used to unwind a device
+ * reference (pci_dev_put()), or unlock the device (pci_dev_unlock())
+ * before returning.
+ *
+ * The DEFINE_FREE() macro can arrange for PCI device references to be
+ * dropped when the associated variable goes out of scope::
+ *
+ * DEFINE_FREE(pci_dev_put, struct pci_dev *, if (_T) pci_dev_put(_T))
+ * ...
+ * struct pci_dev *dev __free(pci_dev_put) =
+ * pci_get_slot(parent, PCI_DEVFN(0, 0));
+ *
+ * The above will automatically call pci_dev_put() if @dev is non-NULL
+ * when @dev goes out of scope (automatic variable scope). If a function
+ * wants to invoke pci_dev_put() on error, but return @dev (i.e. without
+ * freeing it) on success, it can do::
+ *
+ * return no_free_ptr(dev);
+ *
+ * ...or::
+ *
+ * return_ptr(dev);
+ *
+ * The DEFINE_GUARD() macro can arrange for the PCI device lock to be
+ * dropped when the scope where guard() is invoked ends::
+ *
+ * DEFINE_GUARD(pci_dev, struct pci_dev *, pci_dev_lock(_T), pci_dev_unlock(_T))
+ * ...
+ * guard(pci_dev)(dev);
+ *
+ * The lifetime of the lock obtained by the guard() helper follows the
+ * scope of automatic variable declaration. Take the following example::
+ *
+ * func(...)
+ * {
+ * if (...) {
+ * ...
+ * guard(pci_dev)(dev); // pci_dev_lock() invoked here
+ * ...
+ * } // <- implied pci_dev_unlock() triggered here
+ * }
+ *
+ * Observe the lock is held for the remainder of the "if ()" block not
+ * the remainder of "func()".
+ *
+ * Now, when a function uses both __free() and guard(), or multiple
+ * instances of __free(), the LIFO order of variable definition order
+ * matters. GCC documentation says:
+ *
+ * "When multiple variables in the same scope have cleanup attributes,
+ * at exit from the scope their associated cleanup functions are run in
+ * reverse order of definition (last defined, first cleanup)."
+ *
+ * When the unwind order matters it requires that variables be defined
+ * mid-function scope rather than at the top of the file. Take the
+ * following example and notice the bug highlighted by "!!"::
+ *
+ * LIST_HEAD(list);
+ * DEFINE_MUTEX(lock);
+ *
+ * struct object {
+ * struct list_head node;
+ * };
+ *
+ * static struct object *alloc_add(void)
+ * {
+ * struct object *obj;
+ *
+ * lockdep_assert_held(&lock);
+ * obj = kzalloc(sizeof(*obj), GFP_KERNEL);
+ * if (obj) {
+ * LIST_HEAD_INIT(&obj->node);
+ * list_add(obj->node, &list):
+ * }
+ * return obj;
+ * }
+ *
+ * static void remove_free(struct object *obj)
+ * {
+ * lockdep_assert_held(&lock);
+ * list_del(&obj->node);
+ * kfree(obj);
+ * }
+ *
+ * DEFINE_FREE(remove_free, struct object *, if (_T) remove_free(_T))
+ * static int init(void)
+ * {
+ * struct object *obj __free(remove_free) = NULL;
+ * int err;
+ *
+ * guard(mutex)(&lock);
+ * obj = alloc_add();
+ *
+ * if (!obj)
+ * return -ENOMEM;
+ *
+ * err = other_init(obj);
+ * if (err)
+ * return err; // remove_free() called without the lock!!
+ *
+ * no_free_ptr(obj);
+ * return 0;
+ * }
+ *
+ * That bug is fixed by changing init() to call guard() and define +
+ * initialize @obj in this order::
+ *
+ * guard(mutex)(&lock);
+ * struct object *obj __free(remove_free) = alloc_add();
+ *
+ * Given that the "__free(...) = NULL" pattern for variables defined at
+ * the top of the function poses this potential interdependency problem
+ * the recommendation is to always define and assign variables in one
+ * statement and not group variable definitions at the top of the
+ * function when __free() is used.
+ *
+ * Lastly, given that the benefit of cleanup helpers is removal of
+ * "goto", and that the "goto" statement can jump between scopes, the
+ * expectation is that usage of "goto" and cleanup helpers is never
+ * mixed in the same function. I.e. for a given routine, convert all
+ * resources that need a "goto" cleanup to scope-based cleanup, or
+ * convert none of them.
+ */
+
/*
* DEFINE_FREE(name, type, free):
* simple helper macro that defines the required wrapper for a __free()
@@ -98,7 +234,7 @@ const volatile void * __must_check_fn(const volatile void *val)
* DEFINE_CLASS(fdget, struct fd, fdput(_T), fdget(fd), int fd)
*
* CLASS(fdget, f)(fd);
- * if (!f.file)
+ * if (!fd_file(f))
* return -EBADF;
*
* // use 'f' without concern
diff --git a/include/linux/clk-provider.h b/include/linux/clk-provider.h
index 4a537260f655..7e43caabb54b 100644
--- a/include/linux/clk-provider.h
+++ b/include/linux/clk-provider.h
@@ -394,6 +394,20 @@ struct clk *clk_register_fixed_rate(struct device *dev, const char *name,
__clk_hw_register_fixed_rate((dev), NULL, (name), (parent_name), NULL, \
NULL, (flags), (fixed_rate), 0, 0, true)
/**
+ * devm_clk_hw_register_fixed_rate_parent_data - register fixed-rate clock with
+ * the clock framework
+ * @dev: device that is registering this clock
+ * @name: name of this clock
+ * @parent_data: parent clk data
+ * @flags: framework-specific flags
+ * @fixed_rate: non-adjustable clock rate
+ */
+#define devm_clk_hw_register_fixed_rate_parent_data(dev, name, parent_data, flags, \
+ fixed_rate) \
+ __clk_hw_register_fixed_rate((dev), NULL, (name), NULL, NULL, \
+ (parent_data), (flags), (fixed_rate), 0, \
+ 0, true)
+/**
* clk_hw_register_fixed_rate_parent_hw - register fixed-rate clock with
* the clock framework
* @dev: device that is registering this clock
diff --git a/include/linux/clk.h b/include/linux/clk.h
index 0fa56d672532..851a0f2cf42c 100644
--- a/include/linux/clk.h
+++ b/include/linux/clk.h
@@ -641,6 +641,32 @@ struct clk *devm_clk_get_optional_prepared(struct device *dev, const char *id);
struct clk *devm_clk_get_optional_enabled(struct device *dev, const char *id);
/**
+ * devm_clk_get_optional_enabled_with_rate - devm_clk_get_optional() +
+ * clk_set_rate() +
+ * clk_prepare_enable()
+ * @dev: device for clock "consumer"
+ * @id: clock consumer ID
+ * @rate: new clock rate
+ *
+ * Context: May sleep.
+ *
+ * Return: a struct clk corresponding to the clock producer, or
+ * valid IS_ERR() condition containing errno. The implementation
+ * uses @dev and @id to determine the clock consumer, and thereby
+ * the clock producer. If no such clk is found, it returns NULL
+ * which serves as a dummy clk. That's the only difference compared
+ * to devm_clk_get_enabled().
+ *
+ * The returned clk (if valid) is prepared and enabled and rate was set.
+ *
+ * The clock will automatically be disabled, unprepared and freed
+ * when the device is unbound from the bus.
+ */
+struct clk *devm_clk_get_optional_enabled_with_rate(struct device *dev,
+ const char *id,
+ unsigned long rate);
+
+/**
* devm_get_clk_from_child - lookup and obtain a managed reference to a
* clock producer from child node.
* @dev: device for clock "consumer"
@@ -982,6 +1008,13 @@ static inline struct clk *devm_clk_get_optional_enabled(struct device *dev,
return NULL;
}
+static inline struct clk *
+devm_clk_get_optional_enabled_with_rate(struct device *dev, const char *id,
+ unsigned long rate)
+{
+ return NULL;
+}
+
static inline int __must_check devm_clk_bulk_get(struct device *dev, int num_clks,
struct clk_bulk_data *clks)
{
diff --git a/include/linux/cma.h b/include/linux/cma.h
index 9db877506ea8..d15b64f51336 100644
--- a/include/linux/cma.h
+++ b/include/linux/cma.h
@@ -52,4 +52,20 @@ extern bool cma_release(struct cma *cma, const struct page *pages, unsigned long
extern int cma_for_each_area(int (*it)(struct cma *cma, void *data), void *data);
extern void cma_reserve_pages_on_error(struct cma *cma);
+
+#ifdef CONFIG_CMA
+struct folio *cma_alloc_folio(struct cma *cma, int order, gfp_t gfp);
+bool cma_free_folio(struct cma *cma, const struct folio *folio);
+#else
+static inline struct folio *cma_alloc_folio(struct cma *cma, int order, gfp_t gfp)
+{
+ return NULL;
+}
+
+static inline bool cma_free_folio(struct cma *cma, const struct folio *folio)
+{
+ return false;
+}
+#endif
+
#endif
diff --git a/include/linux/compiler.h b/include/linux/compiler.h
index ec55bcce4146..4d4e23b6e3e7 100644
--- a/include/linux/compiler.h
+++ b/include/linux/compiler.h
@@ -133,7 +133,7 @@ void ftrace_likely_update(struct ftrace_likely_data *f, int val,
#define annotate_unreachable() __annotate_unreachable(__COUNTER__)
/* Annotate a C jump table to allow objtool to follow the code flow */
-#define __annotate_jump_table __section(".rodata..c_jump_table")
+#define __annotate_jump_table __section(".rodata..c_jump_table,\"a\",@progbits #")
#else /* !CONFIG_OBJTOOL */
#define annotate_reachable()
diff --git a/include/linux/coredump.h b/include/linux/coredump.h
index edeb8532ce0f..45e598fe3476 100644
--- a/include/linux/coredump.h
+++ b/include/linux/coredump.h
@@ -42,7 +42,7 @@ extern int dump_emit(struct coredump_params *cprm, const void *addr, int nr);
extern int dump_align(struct coredump_params *cprm, int align);
int dump_user_range(struct coredump_params *cprm, unsigned long start,
unsigned long len);
-extern int do_coredump(const kernel_siginfo_t *siginfo);
+extern void do_coredump(const kernel_siginfo_t *siginfo);
/*
* Logging for the coredump code, ratelimited.
@@ -62,11 +62,7 @@ extern int do_coredump(const kernel_siginfo_t *siginfo);
#define coredump_report_failure(fmt, ...) __COREDUMP_PRINTK(KERN_WARNING, fmt, ##__VA_ARGS__)
#else
-static inline int do_coredump(const kernel_siginfo_t *siginfo)
-{
- /* Coredump support is not available, can't fail. */
- return 0;
-}
+static inline void do_coredump(const kernel_siginfo_t *siginfo) {}
#define coredump_report(...)
#define coredump_report_failure(...)
diff --git a/include/linux/coresight-pmu.h b/include/linux/coresight-pmu.h
index 51ac441a37c3..89b0ac0014b0 100644
--- a/include/linux/coresight-pmu.h
+++ b/include/linux/coresight-pmu.h
@@ -49,12 +49,21 @@
* Interpretation of the PERF_RECORD_AUX_OUTPUT_HW_ID payload.
* Used to associate a CPU with the CoreSight Trace ID.
* [07:00] - Trace ID - uses 8 bits to make value easy to read in file.
- * [59:08] - Unused (SBZ)
- * [63:60] - Version
+ * [39:08] - Sink ID - as reported in /sys/bus/event_source/devices/cs_etm/sinks/
+ * Added in minor version 1.
+ * [55:40] - Unused (SBZ)
+ * [59:56] - Minor Version - previously existing fields are compatible with
+ * all minor versions.
+ * [63:60] - Major Version - previously existing fields mean different things
+ * in new major versions.
*/
#define CS_AUX_HW_ID_TRACE_ID_MASK GENMASK_ULL(7, 0)
-#define CS_AUX_HW_ID_VERSION_MASK GENMASK_ULL(63, 60)
+#define CS_AUX_HW_ID_SINK_ID_MASK GENMASK_ULL(39, 8)
-#define CS_AUX_HW_ID_CURR_VERSION 0
+#define CS_AUX_HW_ID_MINOR_VERSION_MASK GENMASK_ULL(59, 56)
+#define CS_AUX_HW_ID_MAJOR_VERSION_MASK GENMASK_ULL(63, 60)
+
+#define CS_AUX_HW_ID_MAJOR_VERSION 0
+#define CS_AUX_HW_ID_MINOR_VERSION 1
#endif
diff --git a/include/linux/coresight.h b/include/linux/coresight.h
index f09ace92176e..c13342594278 100644
--- a/include/linux/coresight.h
+++ b/include/linux/coresight.h
@@ -218,6 +218,24 @@ struct coresight_sysfs_link {
const char *target_name;
};
+/* architecturally we have 128 IDs some of which are reserved */
+#define CORESIGHT_TRACE_IDS_MAX 128
+
+/**
+ * Trace ID map.
+ *
+ * @used_ids: Bitmap to register available (bit = 0) and in use (bit = 1) IDs.
+ * Initialised so that the reserved IDs are permanently marked as
+ * in use.
+ * @perf_cs_etm_session_active: Number of Perf sessions using this ID map.
+ */
+struct coresight_trace_id_map {
+ DECLARE_BITMAP(used_ids, CORESIGHT_TRACE_IDS_MAX);
+ atomic_t __percpu *cpu_map;
+ atomic_t perf_cs_etm_session_active;
+ spinlock_t lock;
+};
+
/**
* struct coresight_device - representation of a device as used by the framework
* @pdata: Platform data with device connections associated to this device.
@@ -271,6 +289,7 @@ struct coresight_device {
bool sysfs_sink_activated;
struct dev_ext_attribute *ea;
struct coresight_device *def_sink;
+ struct coresight_trace_id_map perf_sink_id_map;
/* sysfs links between components */
int nr_links;
bool has_conns_grp;
@@ -365,7 +384,7 @@ struct coresight_ops_link {
struct coresight_ops_source {
int (*cpu_id)(struct coresight_device *csdev);
int (*enable)(struct coresight_device *csdev, struct perf_event *event,
- enum cs_mode mode);
+ enum cs_mode mode, struct coresight_trace_id_map *id_map);
void (*disable)(struct coresight_device *csdev,
struct perf_event *event);
};
diff --git a/include/linux/cpumask.h b/include/linux/cpumask.h
index 53158de44b83..9278a50d514f 100644
--- a/include/linux/cpumask.h
+++ b/include/linux/cpumask.h
@@ -30,7 +30,7 @@
extern unsigned int nr_cpu_ids;
#endif
-static inline void set_nr_cpu_ids(unsigned int nr)
+static __always_inline void set_nr_cpu_ids(unsigned int nr)
{
#if (NR_CPUS == 1) || defined(CONFIG_FORCE_NR_CPUS)
WARN_ON(nr != nr_cpu_ids);
@@ -149,7 +149,7 @@ static __always_inline unsigned int cpumask_check(unsigned int cpu)
*
* Return: >= nr_cpu_ids if no cpus set.
*/
-static inline unsigned int cpumask_first(const struct cpumask *srcp)
+static __always_inline unsigned int cpumask_first(const struct cpumask *srcp)
{
return find_first_bit(cpumask_bits(srcp), small_cpumask_bits);
}
@@ -160,7 +160,7 @@ static inline unsigned int cpumask_first(const struct cpumask *srcp)
*
* Return: >= nr_cpu_ids if all cpus are set.
*/
-static inline unsigned int cpumask_first_zero(const struct cpumask *srcp)
+static __always_inline unsigned int cpumask_first_zero(const struct cpumask *srcp)
{
return find_first_zero_bit(cpumask_bits(srcp), small_cpumask_bits);
}
@@ -172,7 +172,7 @@ static inline unsigned int cpumask_first_zero(const struct cpumask *srcp)
*
* Return: >= nr_cpu_ids if no cpus set in both. See also cpumask_next_and().
*/
-static inline
+static __always_inline
unsigned int cpumask_first_and(const struct cpumask *srcp1, const struct cpumask *srcp2)
{
return find_first_and_bit(cpumask_bits(srcp1), cpumask_bits(srcp2), small_cpumask_bits);
@@ -186,7 +186,7 @@ unsigned int cpumask_first_and(const struct cpumask *srcp1, const struct cpumask
*
* Return: >= nr_cpu_ids if no cpus set in all.
*/
-static inline
+static __always_inline
unsigned int cpumask_first_and_and(const struct cpumask *srcp1,
const struct cpumask *srcp2,
const struct cpumask *srcp3)
@@ -201,7 +201,7 @@ unsigned int cpumask_first_and_and(const struct cpumask *srcp1,
*
* Return: >= nr_cpumask_bits if no CPUs set.
*/
-static inline unsigned int cpumask_last(const struct cpumask *srcp)
+static __always_inline unsigned int cpumask_last(const struct cpumask *srcp)
{
return find_last_bit(cpumask_bits(srcp), small_cpumask_bits);
}
@@ -213,7 +213,7 @@ static inline unsigned int cpumask_last(const struct cpumask *srcp)
*
* Return: >= nr_cpu_ids if no further cpus set.
*/
-static inline
+static __always_inline
unsigned int cpumask_next(int n, const struct cpumask *srcp)
{
/* -1 is a legal arg here. */
@@ -229,7 +229,8 @@ unsigned int cpumask_next(int n, const struct cpumask *srcp)
*
* Return: >= nr_cpu_ids if no further cpus unset.
*/
-static inline unsigned int cpumask_next_zero(int n, const struct cpumask *srcp)
+static __always_inline
+unsigned int cpumask_next_zero(int n, const struct cpumask *srcp)
{
/* -1 is a legal arg here. */
if (n != -1)
@@ -239,18 +240,21 @@ static inline unsigned int cpumask_next_zero(int n, const struct cpumask *srcp)
#if NR_CPUS == 1
/* Uniprocessor: there is only one valid CPU */
-static inline unsigned int cpumask_local_spread(unsigned int i, int node)
+static __always_inline
+unsigned int cpumask_local_spread(unsigned int i, int node)
{
return 0;
}
-static inline unsigned int cpumask_any_and_distribute(const struct cpumask *src1p,
- const struct cpumask *src2p)
+static __always_inline
+unsigned int cpumask_any_and_distribute(const struct cpumask *src1p,
+ const struct cpumask *src2p)
{
return cpumask_first_and(src1p, src2p);
}
-static inline unsigned int cpumask_any_distribute(const struct cpumask *srcp)
+static __always_inline
+unsigned int cpumask_any_distribute(const struct cpumask *srcp)
{
return cpumask_first(srcp);
}
@@ -269,9 +273,9 @@ unsigned int cpumask_any_distribute(const struct cpumask *srcp);
*
* Return: >= nr_cpu_ids if no further cpus set in both.
*/
-static inline
+static __always_inline
unsigned int cpumask_next_and(int n, const struct cpumask *src1p,
- const struct cpumask *src2p)
+ const struct cpumask *src2p)
{
/* -1 is a legal arg here. */
if (n != -1)
@@ -291,7 +295,7 @@ unsigned int cpumask_next_and(int n, const struct cpumask *src1p,
for_each_set_bit(cpu, cpumask_bits(mask), small_cpumask_bits)
#if NR_CPUS == 1
-static inline
+static __always_inline
unsigned int cpumask_next_wrap(int n, const struct cpumask *mask, int start, bool wrap)
{
cpumask_check(start);
@@ -394,7 +398,7 @@ unsigned int __pure cpumask_next_wrap(int n, const struct cpumask *mask, int sta
* Often used to find any cpu but smp_processor_id() in a mask.
* Return: >= nr_cpu_ids if no cpus set.
*/
-static inline
+static __always_inline
unsigned int cpumask_any_but(const struct cpumask *mask, unsigned int cpu)
{
unsigned int i;
@@ -414,7 +418,7 @@ unsigned int cpumask_any_but(const struct cpumask *mask, unsigned int cpu)
*
* Returns >= nr_cpu_ids if no cpus set.
*/
-static inline
+static __always_inline
unsigned int cpumask_any_and_but(const struct cpumask *mask1,
const struct cpumask *mask2,
unsigned int cpu)
@@ -436,7 +440,8 @@ unsigned int cpumask_any_and_but(const struct cpumask *mask1,
*
* Return: >= nr_cpu_ids if such cpu doesn't exist.
*/
-static inline unsigned int cpumask_nth(unsigned int cpu, const struct cpumask *srcp)
+static __always_inline
+unsigned int cpumask_nth(unsigned int cpu, const struct cpumask *srcp)
{
return find_nth_bit(cpumask_bits(srcp), small_cpumask_bits, cpumask_check(cpu));
}
@@ -449,7 +454,7 @@ static inline unsigned int cpumask_nth(unsigned int cpu, const struct cpumask *s
*
* Return: >= nr_cpu_ids if such cpu doesn't exist.
*/
-static inline
+static __always_inline
unsigned int cpumask_nth_and(unsigned int cpu, const struct cpumask *srcp1,
const struct cpumask *srcp2)
{
@@ -465,7 +470,7 @@ unsigned int cpumask_nth_and(unsigned int cpu, const struct cpumask *srcp1,
*
* Return: >= nr_cpu_ids if such cpu doesn't exist.
*/
-static inline
+static __always_inline
unsigned int cpumask_nth_andnot(unsigned int cpu, const struct cpumask *srcp1,
const struct cpumask *srcp2)
{
@@ -508,12 +513,14 @@ unsigned int cpumask_nth_and_andnot(unsigned int cpu, const struct cpumask *srcp
* @cpu: cpu number (< nr_cpu_ids)
* @dstp: the cpumask pointer
*/
-static __always_inline void cpumask_set_cpu(unsigned int cpu, struct cpumask *dstp)
+static __always_inline
+void cpumask_set_cpu(unsigned int cpu, struct cpumask *dstp)
{
set_bit(cpumask_check(cpu), cpumask_bits(dstp));
}
-static __always_inline void __cpumask_set_cpu(unsigned int cpu, struct cpumask *dstp)
+static __always_inline
+void __cpumask_set_cpu(unsigned int cpu, struct cpumask *dstp)
{
__set_bit(cpumask_check(cpu), cpumask_bits(dstp));
}
@@ -557,7 +564,8 @@ static __always_inline void __cpumask_assign_cpu(int cpu, struct cpumask *dstp,
*
* Return: true if @cpu is set in @cpumask, else returns false
*/
-static __always_inline bool cpumask_test_cpu(int cpu, const struct cpumask *cpumask)
+static __always_inline
+bool cpumask_test_cpu(int cpu, const struct cpumask *cpumask)
{
return test_bit(cpumask_check(cpu), cpumask_bits((cpumask)));
}
@@ -571,7 +579,8 @@ static __always_inline bool cpumask_test_cpu(int cpu, const struct cpumask *cpum
*
* Return: true if @cpu is set in old bitmap of @cpumask, else returns false
*/
-static __always_inline bool cpumask_test_and_set_cpu(int cpu, struct cpumask *cpumask)
+static __always_inline
+bool cpumask_test_and_set_cpu(int cpu, struct cpumask *cpumask)
{
return test_and_set_bit(cpumask_check(cpu), cpumask_bits(cpumask));
}
@@ -585,7 +594,8 @@ static __always_inline bool cpumask_test_and_set_cpu(int cpu, struct cpumask *cp
*
* Return: true if @cpu is set in old bitmap of @cpumask, else returns false
*/
-static __always_inline bool cpumask_test_and_clear_cpu(int cpu, struct cpumask *cpumask)
+static __always_inline
+bool cpumask_test_and_clear_cpu(int cpu, struct cpumask *cpumask)
{
return test_and_clear_bit(cpumask_check(cpu), cpumask_bits(cpumask));
}
@@ -594,7 +604,7 @@ static __always_inline bool cpumask_test_and_clear_cpu(int cpu, struct cpumask *
* cpumask_setall - set all cpus (< nr_cpu_ids) in a cpumask
* @dstp: the cpumask pointer
*/
-static inline void cpumask_setall(struct cpumask *dstp)
+static __always_inline void cpumask_setall(struct cpumask *dstp)
{
if (small_const_nbits(small_cpumask_bits)) {
cpumask_bits(dstp)[0] = BITMAP_LAST_WORD_MASK(nr_cpumask_bits);
@@ -607,7 +617,7 @@ static inline void cpumask_setall(struct cpumask *dstp)
* cpumask_clear - clear all cpus (< nr_cpu_ids) in a cpumask
* @dstp: the cpumask pointer
*/
-static inline void cpumask_clear(struct cpumask *dstp)
+static __always_inline void cpumask_clear(struct cpumask *dstp)
{
bitmap_zero(cpumask_bits(dstp), large_cpumask_bits);
}
@@ -620,9 +630,9 @@ static inline void cpumask_clear(struct cpumask *dstp)
*
* Return: false if *@dstp is empty, else returns true
*/
-static inline bool cpumask_and(struct cpumask *dstp,
- const struct cpumask *src1p,
- const struct cpumask *src2p)
+static __always_inline
+bool cpumask_and(struct cpumask *dstp, const struct cpumask *src1p,
+ const struct cpumask *src2p)
{
return bitmap_and(cpumask_bits(dstp), cpumask_bits(src1p),
cpumask_bits(src2p), small_cpumask_bits);
@@ -634,8 +644,9 @@ static inline bool cpumask_and(struct cpumask *dstp,
* @src1p: the first input
* @src2p: the second input
*/
-static inline void cpumask_or(struct cpumask *dstp, const struct cpumask *src1p,
- const struct cpumask *src2p)
+static __always_inline
+void cpumask_or(struct cpumask *dstp, const struct cpumask *src1p,
+ const struct cpumask *src2p)
{
bitmap_or(cpumask_bits(dstp), cpumask_bits(src1p),
cpumask_bits(src2p), small_cpumask_bits);
@@ -647,9 +658,9 @@ static inline void cpumask_or(struct cpumask *dstp, const struct cpumask *src1p,
* @src1p: the first input
* @src2p: the second input
*/
-static inline void cpumask_xor(struct cpumask *dstp,
- const struct cpumask *src1p,
- const struct cpumask *src2p)
+static __always_inline
+void cpumask_xor(struct cpumask *dstp, const struct cpumask *src1p,
+ const struct cpumask *src2p)
{
bitmap_xor(cpumask_bits(dstp), cpumask_bits(src1p),
cpumask_bits(src2p), small_cpumask_bits);
@@ -663,9 +674,9 @@ static inline void cpumask_xor(struct cpumask *dstp,
*
* Return: false if *@dstp is empty, else returns true
*/
-static inline bool cpumask_andnot(struct cpumask *dstp,
- const struct cpumask *src1p,
- const struct cpumask *src2p)
+static __always_inline
+bool cpumask_andnot(struct cpumask *dstp, const struct cpumask *src1p,
+ const struct cpumask *src2p)
{
return bitmap_andnot(cpumask_bits(dstp), cpumask_bits(src1p),
cpumask_bits(src2p), small_cpumask_bits);
@@ -678,8 +689,8 @@ static inline bool cpumask_andnot(struct cpumask *dstp,
*
* Return: true if the cpumasks are equal, false if not
*/
-static inline bool cpumask_equal(const struct cpumask *src1p,
- const struct cpumask *src2p)
+static __always_inline
+bool cpumask_equal(const struct cpumask *src1p, const struct cpumask *src2p)
{
return bitmap_equal(cpumask_bits(src1p), cpumask_bits(src2p),
small_cpumask_bits);
@@ -694,9 +705,9 @@ static inline bool cpumask_equal(const struct cpumask *src1p,
* Return: true if first cpumask ORed with second cpumask == third cpumask,
* otherwise false
*/
-static inline bool cpumask_or_equal(const struct cpumask *src1p,
- const struct cpumask *src2p,
- const struct cpumask *src3p)
+static __always_inline
+bool cpumask_or_equal(const struct cpumask *src1p, const struct cpumask *src2p,
+ const struct cpumask *src3p)
{
return bitmap_or_equal(cpumask_bits(src1p), cpumask_bits(src2p),
cpumask_bits(src3p), small_cpumask_bits);
@@ -710,8 +721,8 @@ static inline bool cpumask_or_equal(const struct cpumask *src1p,
* Return: true if first cpumask ANDed with second cpumask is non-empty,
* otherwise false
*/
-static inline bool cpumask_intersects(const struct cpumask *src1p,
- const struct cpumask *src2p)
+static __always_inline
+bool cpumask_intersects(const struct cpumask *src1p, const struct cpumask *src2p)
{
return bitmap_intersects(cpumask_bits(src1p), cpumask_bits(src2p),
small_cpumask_bits);
@@ -724,8 +735,8 @@ static inline bool cpumask_intersects(const struct cpumask *src1p,
*
* Return: true if *@src1p is a subset of *@src2p, else returns false
*/
-static inline bool cpumask_subset(const struct cpumask *src1p,
- const struct cpumask *src2p)
+static __always_inline
+bool cpumask_subset(const struct cpumask *src1p, const struct cpumask *src2p)
{
return bitmap_subset(cpumask_bits(src1p), cpumask_bits(src2p),
small_cpumask_bits);
@@ -737,7 +748,7 @@ static inline bool cpumask_subset(const struct cpumask *src1p,
*
* Return: true if srcp is empty (has no bits set), else false
*/
-static inline bool cpumask_empty(const struct cpumask *srcp)
+static __always_inline bool cpumask_empty(const struct cpumask *srcp)
{
return bitmap_empty(cpumask_bits(srcp), small_cpumask_bits);
}
@@ -748,7 +759,7 @@ static inline bool cpumask_empty(const struct cpumask *srcp)
*
* Return: true if srcp is full (has all bits set), else false
*/
-static inline bool cpumask_full(const struct cpumask *srcp)
+static __always_inline bool cpumask_full(const struct cpumask *srcp)
{
return bitmap_full(cpumask_bits(srcp), nr_cpumask_bits);
}
@@ -759,7 +770,7 @@ static inline bool cpumask_full(const struct cpumask *srcp)
*
* Return: count of bits set in *srcp
*/
-static inline unsigned int cpumask_weight(const struct cpumask *srcp)
+static __always_inline unsigned int cpumask_weight(const struct cpumask *srcp)
{
return bitmap_weight(cpumask_bits(srcp), small_cpumask_bits);
}
@@ -771,8 +782,8 @@ static inline unsigned int cpumask_weight(const struct cpumask *srcp)
*
* Return: count of bits set in both *srcp1 and *srcp2
*/
-static inline unsigned int cpumask_weight_and(const struct cpumask *srcp1,
- const struct cpumask *srcp2)
+static __always_inline
+unsigned int cpumask_weight_and(const struct cpumask *srcp1, const struct cpumask *srcp2)
{
return bitmap_weight_and(cpumask_bits(srcp1), cpumask_bits(srcp2), small_cpumask_bits);
}
@@ -784,8 +795,9 @@ static inline unsigned int cpumask_weight_and(const struct cpumask *srcp1,
*
* Return: count of bits set in both *srcp1 and *srcp2
*/
-static inline unsigned int cpumask_weight_andnot(const struct cpumask *srcp1,
- const struct cpumask *srcp2)
+static __always_inline
+unsigned int cpumask_weight_andnot(const struct cpumask *srcp1,
+ const struct cpumask *srcp2)
{
return bitmap_weight_andnot(cpumask_bits(srcp1), cpumask_bits(srcp2), small_cpumask_bits);
}
@@ -796,8 +808,8 @@ static inline unsigned int cpumask_weight_andnot(const struct cpumask *srcp1,
* @srcp: the input to shift
* @n: the number of bits to shift by
*/
-static inline void cpumask_shift_right(struct cpumask *dstp,
- const struct cpumask *srcp, int n)
+static __always_inline
+void cpumask_shift_right(struct cpumask *dstp, const struct cpumask *srcp, int n)
{
bitmap_shift_right(cpumask_bits(dstp), cpumask_bits(srcp), n,
small_cpumask_bits);
@@ -809,8 +821,8 @@ static inline void cpumask_shift_right(struct cpumask *dstp,
* @srcp: the input to shift
* @n: the number of bits to shift by
*/
-static inline void cpumask_shift_left(struct cpumask *dstp,
- const struct cpumask *srcp, int n)
+static __always_inline
+void cpumask_shift_left(struct cpumask *dstp, const struct cpumask *srcp, int n)
{
bitmap_shift_left(cpumask_bits(dstp), cpumask_bits(srcp), n,
nr_cpumask_bits);
@@ -821,8 +833,8 @@ static inline void cpumask_shift_left(struct cpumask *dstp,
* @dstp: the result
* @srcp: the input cpumask
*/
-static inline void cpumask_copy(struct cpumask *dstp,
- const struct cpumask *srcp)
+static __always_inline
+void cpumask_copy(struct cpumask *dstp, const struct cpumask *srcp)
{
bitmap_copy(cpumask_bits(dstp), cpumask_bits(srcp), large_cpumask_bits);
}
@@ -858,8 +870,8 @@ static inline void cpumask_copy(struct cpumask *dstp,
*
* Return: -errno, or 0 for success.
*/
-static inline int cpumask_parse_user(const char __user *buf, int len,
- struct cpumask *dstp)
+static __always_inline
+int cpumask_parse_user(const char __user *buf, int len, struct cpumask *dstp)
{
return bitmap_parse_user(buf, len, cpumask_bits(dstp), nr_cpumask_bits);
}
@@ -872,8 +884,8 @@ static inline int cpumask_parse_user(const char __user *buf, int len,
*
* Return: -errno, or 0 for success.
*/
-static inline int cpumask_parselist_user(const char __user *buf, int len,
- struct cpumask *dstp)
+static __always_inline
+int cpumask_parselist_user(const char __user *buf, int len, struct cpumask *dstp)
{
return bitmap_parselist_user(buf, len, cpumask_bits(dstp),
nr_cpumask_bits);
@@ -886,7 +898,7 @@ static inline int cpumask_parselist_user(const char __user *buf, int len,
*
* Return: -errno, or 0 for success.
*/
-static inline int cpumask_parse(const char *buf, struct cpumask *dstp)
+static __always_inline int cpumask_parse(const char *buf, struct cpumask *dstp)
{
return bitmap_parse(buf, UINT_MAX, cpumask_bits(dstp), nr_cpumask_bits);
}
@@ -898,7 +910,7 @@ static inline int cpumask_parse(const char *buf, struct cpumask *dstp)
*
* Return: -errno, or 0 for success.
*/
-static inline int cpulist_parse(const char *buf, struct cpumask *dstp)
+static __always_inline int cpulist_parse(const char *buf, struct cpumask *dstp)
{
return bitmap_parselist(buf, cpumask_bits(dstp), nr_cpumask_bits);
}
@@ -908,7 +920,7 @@ static inline int cpulist_parse(const char *buf, struct cpumask *dstp)
*
* Return: size to allocate for a &struct cpumask in bytes
*/
-static inline unsigned int cpumask_size(void)
+static __always_inline unsigned int cpumask_size(void)
{
return bitmap_size(large_cpumask_bits);
}
@@ -920,7 +932,7 @@ static inline unsigned int cpumask_size(void)
bool alloc_cpumask_var_node(cpumask_var_t *mask, gfp_t flags, int node);
-static inline
+static __always_inline
bool zalloc_cpumask_var_node(cpumask_var_t *mask, gfp_t flags, int node)
{
return alloc_cpumask_var_node(mask, flags | __GFP_ZERO, node);
@@ -938,13 +950,13 @@ bool zalloc_cpumask_var_node(cpumask_var_t *mask, gfp_t flags, int node)
*
* Return: %true if allocation succeeded, %false if not
*/
-static inline
+static __always_inline
bool alloc_cpumask_var(cpumask_var_t *mask, gfp_t flags)
{
return alloc_cpumask_var_node(mask, flags, NUMA_NO_NODE);
}
-static inline
+static __always_inline
bool zalloc_cpumask_var(cpumask_var_t *mask, gfp_t flags)
{
return alloc_cpumask_var(mask, flags | __GFP_ZERO);
@@ -954,7 +966,7 @@ void alloc_bootmem_cpumask_var(cpumask_var_t *mask);
void free_cpumask_var(cpumask_var_t mask);
void free_bootmem_cpumask_var(cpumask_var_t mask);
-static inline bool cpumask_available(cpumask_var_t mask)
+static __always_inline bool cpumask_available(cpumask_var_t mask)
{
return mask != NULL;
}
@@ -964,43 +976,43 @@ static inline bool cpumask_available(cpumask_var_t mask)
#define this_cpu_cpumask_var_ptr(x) this_cpu_ptr(x)
#define __cpumask_var_read_mostly
-static inline bool alloc_cpumask_var(cpumask_var_t *mask, gfp_t flags)
+static __always_inline bool alloc_cpumask_var(cpumask_var_t *mask, gfp_t flags)
{
return true;
}
-static inline bool alloc_cpumask_var_node(cpumask_var_t *mask, gfp_t flags,
+static __always_inline bool alloc_cpumask_var_node(cpumask_var_t *mask, gfp_t flags,
int node)
{
return true;
}
-static inline bool zalloc_cpumask_var(cpumask_var_t *mask, gfp_t flags)
+static __always_inline bool zalloc_cpumask_var(cpumask_var_t *mask, gfp_t flags)
{
cpumask_clear(*mask);
return true;
}
-static inline bool zalloc_cpumask_var_node(cpumask_var_t *mask, gfp_t flags,
+static __always_inline bool zalloc_cpumask_var_node(cpumask_var_t *mask, gfp_t flags,
int node)
{
cpumask_clear(*mask);
return true;
}
-static inline void alloc_bootmem_cpumask_var(cpumask_var_t *mask)
+static __always_inline void alloc_bootmem_cpumask_var(cpumask_var_t *mask)
{
}
-static inline void free_cpumask_var(cpumask_var_t mask)
+static __always_inline void free_cpumask_var(cpumask_var_t mask)
{
}
-static inline void free_bootmem_cpumask_var(cpumask_var_t mask)
+static __always_inline void free_bootmem_cpumask_var(cpumask_var_t mask)
{
}
-static inline bool cpumask_available(cpumask_var_t mask)
+static __always_inline bool cpumask_available(cpumask_var_t mask)
{
return true;
}
@@ -1058,7 +1070,7 @@ void set_cpu_online(unsigned int cpu, bool online);
((struct cpumask *)(1 ? (bitmap) \
: (void *)sizeof(__check_is_bitmap(bitmap))))
-static inline int __check_is_bitmap(const unsigned long *bitmap)
+static __always_inline int __check_is_bitmap(const unsigned long *bitmap)
{
return 1;
}
@@ -1073,7 +1085,7 @@ static inline int __check_is_bitmap(const unsigned long *bitmap)
extern const unsigned long
cpu_bit_bitmap[BITS_PER_LONG+1][BITS_TO_LONGS(NR_CPUS)];
-static inline const struct cpumask *get_cpu_mask(unsigned int cpu)
+static __always_inline const struct cpumask *get_cpu_mask(unsigned int cpu)
{
const unsigned long *p = cpu_bit_bitmap[1 + cpu % BITS_PER_LONG];
p -= cpu / BITS_PER_LONG;
@@ -1100,32 +1112,32 @@ static __always_inline unsigned int num_online_cpus(void)
#define num_present_cpus() cpumask_weight(cpu_present_mask)
#define num_active_cpus() cpumask_weight(cpu_active_mask)
-static inline bool cpu_online(unsigned int cpu)
+static __always_inline bool cpu_online(unsigned int cpu)
{
return cpumask_test_cpu(cpu, cpu_online_mask);
}
-static inline bool cpu_enabled(unsigned int cpu)
+static __always_inline bool cpu_enabled(unsigned int cpu)
{
return cpumask_test_cpu(cpu, cpu_enabled_mask);
}
-static inline bool cpu_possible(unsigned int cpu)
+static __always_inline bool cpu_possible(unsigned int cpu)
{
return cpumask_test_cpu(cpu, cpu_possible_mask);
}
-static inline bool cpu_present(unsigned int cpu)
+static __always_inline bool cpu_present(unsigned int cpu)
{
return cpumask_test_cpu(cpu, cpu_present_mask);
}
-static inline bool cpu_active(unsigned int cpu)
+static __always_inline bool cpu_active(unsigned int cpu)
{
return cpumask_test_cpu(cpu, cpu_active_mask);
}
-static inline bool cpu_dying(unsigned int cpu)
+static __always_inline bool cpu_dying(unsigned int cpu)
{
return cpumask_test_cpu(cpu, cpu_dying_mask);
}
@@ -1138,32 +1150,32 @@ static inline bool cpu_dying(unsigned int cpu)
#define num_present_cpus() 1U
#define num_active_cpus() 1U
-static inline bool cpu_online(unsigned int cpu)
+static __always_inline bool cpu_online(unsigned int cpu)
{
return cpu == 0;
}
-static inline bool cpu_possible(unsigned int cpu)
+static __always_inline bool cpu_possible(unsigned int cpu)
{
return cpu == 0;
}
-static inline bool cpu_enabled(unsigned int cpu)
+static __always_inline bool cpu_enabled(unsigned int cpu)
{
return cpu == 0;
}
-static inline bool cpu_present(unsigned int cpu)
+static __always_inline bool cpu_present(unsigned int cpu)
{
return cpu == 0;
}
-static inline bool cpu_active(unsigned int cpu)
+static __always_inline bool cpu_active(unsigned int cpu)
{
return cpu == 0;
}
-static inline bool cpu_dying(unsigned int cpu)
+static __always_inline bool cpu_dying(unsigned int cpu)
{
return false;
}
@@ -1197,7 +1209,7 @@ static inline bool cpu_dying(unsigned int cpu)
* Return: the length of the (null-terminated) @buf string, zero if
* nothing is copied.
*/
-static inline ssize_t
+static __always_inline ssize_t
cpumap_print_to_pagebuf(bool list, char *buf, const struct cpumask *mask)
{
return bitmap_print_to_pagebuf(list, buf, cpumask_bits(mask),
@@ -1220,9 +1232,9 @@ cpumap_print_to_pagebuf(bool list, char *buf, const struct cpumask *mask)
* Return: the length of how many bytes have been copied, excluding
* terminating '\0'.
*/
-static inline ssize_t
-cpumap_print_bitmask_to_buf(char *buf, const struct cpumask *mask,
- loff_t off, size_t count)
+static __always_inline
+ssize_t cpumap_print_bitmask_to_buf(char *buf, const struct cpumask *mask,
+ loff_t off, size_t count)
{
return bitmap_print_bitmask_to_buf(buf, cpumask_bits(mask),
nr_cpu_ids, off, count) - 1;
@@ -1242,9 +1254,9 @@ cpumap_print_bitmask_to_buf(char *buf, const struct cpumask *mask,
* Return: the length of how many bytes have been copied, excluding
* terminating '\0'.
*/
-static inline ssize_t
-cpumap_print_list_to_buf(char *buf, const struct cpumask *mask,
- loff_t off, size_t count)
+static __always_inline
+ssize_t cpumap_print_list_to_buf(char *buf, const struct cpumask *mask,
+ loff_t off, size_t count)
{
return bitmap_print_list_to_buf(buf, cpumask_bits(mask),
nr_cpu_ids, off, count) - 1;
diff --git a/include/linux/damon.h b/include/linux/damon.h
index 27c546bfc6d4..a67f2c4940e9 100644
--- a/include/linux/damon.h
+++ b/include/linux/damon.h
@@ -233,7 +233,6 @@ struct damos_quota {
unsigned long charge_addr_from;
/* For prioritization */
- unsigned long histogram[DAMOS_MAX_SCORE + 1];
unsigned int min_score;
/* For feedback loop */
@@ -630,6 +629,8 @@ struct damon_ctx {
unsigned long next_ops_update_sis;
/* for waiting until the execution of the kdamond_fn is started */
struct completion kdamond_started;
+ /* for scheme quotas prioritization */
+ unsigned long *regions_score_histogram;
/* public: */
struct task_struct *kdamond;
diff --git a/include/linux/debugfs.h b/include/linux/debugfs.h
index c9c65b132c0f..0928a6c8ae1e 100644
--- a/include/linux/debugfs.h
+++ b/include/linux/debugfs.h
@@ -57,7 +57,6 @@ static const struct file_operations __fops = { \
.release = simple_attr_release, \
.read = debugfs_attr_read, \
.write = (__is_signed) ? debugfs_attr_write_signed : debugfs_attr_write, \
- .llseek = no_llseek, \
}
#define DEFINE_DEBUGFS_ATTRIBUTE(__fops, __get, __set, __fmt) \
diff --git a/include/linux/decompress/unxz.h b/include/linux/decompress/unxz.h
index f764e2a7201e..3dd2658a9dab 100644
--- a/include/linux/decompress/unxz.h
+++ b/include/linux/decompress/unxz.h
@@ -1,10 +1,9 @@
+/* SPDX-License-Identifier: 0BSD */
+
/*
* Wrapper for decompressing XZ-compressed kernel, initramfs, and initrd
*
* Author: Lasse Collin <lasse.collin@tukaani.org>
- *
- * This file has been put into the public domain.
- * You can do whatever you want with this file.
*/
#ifndef DECOMPRESS_UNXZ_H
diff --git a/include/linux/device-mapper.h b/include/linux/device-mapper.h
index 53ca3a913d06..8321f65897f3 100644
--- a/include/linux/device-mapper.h
+++ b/include/linux/device-mapper.h
@@ -524,7 +524,6 @@ int dm_post_suspending(struct dm_target *ti);
int dm_noflush_suspending(struct dm_target *ti);
void dm_accept_partial_bio(struct bio *bio, unsigned int n_sectors);
void dm_submit_bio_remap(struct bio *clone, struct bio *tgt_clone);
-union map_info *dm_get_rq_mapinfo(struct request *rq);
#ifdef CONFIG_BLK_DEV_ZONED
struct dm_report_zones_args {
diff --git a/include/linux/device.h b/include/linux/device.h
index 34eb20f5966f..b4bde8d22697 100644
--- a/include/linux/device.h
+++ b/include/linux/device.h
@@ -707,6 +707,8 @@ struct device_physical_location {
* for dma allocations. This flag is managed by the dma ops
* instance from ->dma_supported.
* @dma_skip_sync: DMA sync operations can be skipped for coherent buffers.
+ * @dma_iommu: Device is using default IOMMU implementation for DMA and
+ * doesn't rely on dma_ops structure.
*
* At the lowest level, every device in a Linux system is represented by an
* instance of struct device. The device structure contains the information
@@ -748,7 +750,7 @@ struct device {
struct dev_pin_info *pins;
#endif
struct dev_msi_info msi;
-#ifdef CONFIG_DMA_OPS
+#ifdef CONFIG_ARCH_HAS_DMA_OPS
const struct dma_map_ops *dma_ops;
#endif
u64 *dma_mask; /* dma mask (if dma'able device) */
@@ -822,6 +824,9 @@ struct device {
#ifdef CONFIG_DMA_NEED_SYNC
bool dma_skip_sync:1;
#endif
+#ifdef CONFIG_IOMMU_DMA
+ bool dma_iommu:1;
+#endif
};
/**
diff --git a/include/linux/device/bus.h b/include/linux/device/bus.h
index 807831d6bf0f..cdc4757217f9 100644
--- a/include/linux/device/bus.h
+++ b/include/linux/device/bus.h
@@ -126,6 +126,9 @@ struct bus_attribute {
int __must_check bus_create_file(const struct bus_type *bus, struct bus_attribute *attr);
void bus_remove_file(const struct bus_type *bus, struct bus_attribute *attr);
+/* Matching function type for drivers/base APIs to find a specific device */
+typedef int (*device_match_t)(struct device *dev, const void *data);
+
/* Generic device matching functions that all busses can use to match with */
int device_match_name(struct device *dev, const void *name);
int device_match_of_node(struct device *dev, const void *np);
@@ -139,8 +142,7 @@ int device_match_any(struct device *dev, const void *unused);
int bus_for_each_dev(const struct bus_type *bus, struct device *start, void *data,
int (*fn)(struct device *dev, void *data));
struct device *bus_find_device(const struct bus_type *bus, struct device *start,
- const void *data,
- int (*match)(struct device *dev, const void *data));
+ const void *data, device_match_t match);
/**
* bus_find_device_by_name - device iterator for locating a particular device
* of a specific name.
diff --git a/include/linux/device/class.h b/include/linux/device/class.h
index c576b49c55c2..518c9c83d64b 100644
--- a/include/linux/device/class.h
+++ b/include/linux/device/class.h
@@ -95,7 +95,7 @@ void class_dev_iter_exit(struct class_dev_iter *iter);
int class_for_each_device(const struct class *class, const struct device *start, void *data,
int (*fn)(struct device *dev, void *data));
struct device *class_find_device(const struct class *class, const struct device *start,
- const void *data, int (*match)(struct device *, const void *));
+ const void *data, device_match_t match);
/**
* class_find_device_by_name - device iterator for locating a particular device
diff --git a/include/linux/device/driver.h b/include/linux/device/driver.h
index 1fc8b68786de..5c04b8e3833b 100644
--- a/include/linux/device/driver.h
+++ b/include/linux/device/driver.h
@@ -157,7 +157,7 @@ int __must_check driver_for_each_device(struct device_driver *drv, struct device
void *data, int (*fn)(struct device *dev, void *));
struct device *driver_find_device(const struct device_driver *drv,
struct device *start, const void *data,
- int (*match)(struct device *dev, const void *data));
+ device_match_t match);
/**
* driver_find_device_by_name - device iterator for locating a particular device
diff --git a/include/linux/dma-direct.h b/include/linux/dma-direct.h
index edbe13d00776..d7e30d4f7503 100644
--- a/include/linux/dma-direct.h
+++ b/include/linux/dma-direct.h
@@ -12,7 +12,7 @@
#include <linux/mem_encrypt.h>
#include <linux/swiotlb.h>
-extern unsigned int zone_dma_bits;
+extern u64 zone_dma_limit;
/*
* Record the mapping of CPU physical to DMA addresses for a given region.
diff --git a/include/linux/dma-fence-array.h b/include/linux/dma-fence-array.h
index 29c5650c1038..079b3dec0a16 100644
--- a/include/linux/dma-fence-array.h
+++ b/include/linux/dma-fence-array.h
@@ -79,6 +79,12 @@ to_dma_fence_array(struct dma_fence *fence)
for (index = 0, fence = dma_fence_array_first(head); fence; \
++(index), fence = dma_fence_array_next(head, index))
+struct dma_fence_array *dma_fence_array_alloc(int num_fences);
+void dma_fence_array_init(struct dma_fence_array *array,
+ int num_fences, struct dma_fence **fences,
+ u64 context, unsigned seqno,
+ bool signal_on_any);
+
struct dma_fence_array *dma_fence_array_create(int num_fences,
struct dma_fence **fences,
u64 context, unsigned seqno,
diff --git a/include/linux/dma-heap.h b/include/linux/dma-heap.h
index 064bad725061..27d15f60950a 100644
--- a/include/linux/dma-heap.h
+++ b/include/linux/dma-heap.h
@@ -9,14 +9,13 @@
#ifndef _DMA_HEAPS_H
#define _DMA_HEAPS_H
-#include <linux/cdev.h>
#include <linux/types.h>
struct dma_heap;
/**
* struct dma_heap_ops - ops to operate on a given heap
- * @allocate: allocate dmabuf and return struct dma_buf ptr
+ * @allocate: allocate dmabuf and return struct dma_buf ptr
*
* allocate returns dmabuf on success, ERR_PTR(-errno) on error.
*/
@@ -41,28 +40,10 @@ struct dma_heap_export_info {
void *priv;
};
-/**
- * dma_heap_get_drvdata() - get per-heap driver data
- * @heap: DMA-Heap to retrieve private data for
- *
- * Returns:
- * The per-heap data for the heap.
- */
void *dma_heap_get_drvdata(struct dma_heap *heap);
-/**
- * dma_heap_get_name() - get heap name
- * @heap: DMA-Heap to retrieve private data for
- *
- * Returns:
- * The char* for the heap name.
- */
const char *dma_heap_get_name(struct dma_heap *heap);
-/**
- * dma_heap_add - adds a heap to dmabuf heaps
- * @exp_info: information needed to register this heap
- */
struct dma_heap *dma_heap_add(const struct dma_heap_export_info *exp_info);
#endif /* _DMA_HEAPS_H */
diff --git a/include/linux/dma-map-ops.h b/include/linux/dma-map-ops.h
index 02a1c825896b..b7773201414c 100644
--- a/include/linux/dma-map-ops.h
+++ b/include/linux/dma-map-ops.h
@@ -13,20 +13,7 @@
struct cma;
struct iommu_ops;
-/*
- * Values for struct dma_map_ops.flags:
- *
- * DMA_F_PCI_P2PDMA_SUPPORTED: Indicates the dma_map_ops implementation can
- * handle PCI P2PDMA pages in the map_sg/unmap_sg operation.
- * DMA_F_CAN_SKIP_SYNC: DMA sync operations can be skipped if the device is
- * coherent and it's not an SWIOTLB buffer.
- */
-#define DMA_F_PCI_P2PDMA_SUPPORTED (1 << 0)
-#define DMA_F_CAN_SKIP_SYNC (1 << 1)
-
struct dma_map_ops {
- unsigned int flags;
-
void *(*alloc)(struct device *dev, size_t size,
dma_addr_t *dma_handle, gfp_t gfp,
unsigned long attrs);
@@ -37,11 +24,6 @@ struct dma_map_ops {
gfp_t gfp);
void (*free_pages)(struct device *dev, size_t size, struct page *vaddr,
dma_addr_t dma_handle, enum dma_data_direction dir);
- struct sg_table *(*alloc_noncontiguous)(struct device *dev, size_t size,
- enum dma_data_direction dir, gfp_t gfp,
- unsigned long attrs);
- void (*free_noncontiguous)(struct device *dev, size_t size,
- struct sg_table *sgt, enum dma_data_direction dir);
int (*mmap)(struct device *, struct vm_area_struct *,
void *, dma_addr_t, size_t, unsigned long attrs);
@@ -88,7 +70,7 @@ struct dma_map_ops {
unsigned long (*get_merge_boundary)(struct device *dev);
};
-#ifdef CONFIG_DMA_OPS
+#ifdef CONFIG_ARCH_HAS_DMA_OPS
#include <asm/dma-mapping.h>
static inline const struct dma_map_ops *get_dma_ops(struct device *dev)
@@ -103,7 +85,7 @@ static inline void set_dma_ops(struct device *dev,
{
dev->dma_ops = dma_ops;
}
-#else /* CONFIG_DMA_OPS */
+#else /* CONFIG_ARCH_HAS_DMA_OPS */
static inline const struct dma_map_ops *get_dma_ops(struct device *dev)
{
return NULL;
@@ -112,7 +94,7 @@ static inline void set_dma_ops(struct device *dev,
const struct dma_map_ops *dma_ops)
{
}
-#endif /* CONFIG_DMA_OPS */
+#endif /* CONFIG_ARCH_HAS_DMA_OPS */
#ifdef CONFIG_DMA_CMA
extern struct cma *dma_contiguous_default_area;
@@ -219,20 +201,6 @@ static inline int dma_mmap_from_global_coherent(struct vm_area_struct *vma,
}
#endif /* CONFIG_DMA_GLOBAL_POOL */
-/*
- * This is the actual return value from the ->alloc_noncontiguous method.
- * The users of the DMA API should only care about the sg_table, but to make
- * the DMA-API internal vmaping and freeing easier we stash away the page
- * array as well (except for the fallback case). This can go away any time,
- * e.g. when a vmap-variant that takes a scatterlist comes along.
- */
-struct dma_sgt_handle {
- struct sg_table sgt;
- struct page **pages;
-};
-#define sgt_handle(sgt) \
- container_of((sgt), struct dma_sgt_handle, sgt)
-
int dma_common_get_sgtable(struct device *dev, struct sg_table *sgt,
void *cpu_addr, dma_addr_t dma_addr, size_t size,
unsigned long attrs);
diff --git a/include/linux/dma-mapping.h b/include/linux/dma-mapping.h
index f693aafe221f..1524da363734 100644
--- a/include/linux/dma-mapping.h
+++ b/include/linux/dma-mapping.h
@@ -524,13 +524,11 @@ static inline unsigned int dma_get_max_seg_size(struct device *dev)
return SZ_64K;
}
-static inline int dma_set_max_seg_size(struct device *dev, unsigned int size)
+static inline void dma_set_max_seg_size(struct device *dev, unsigned int size)
{
- if (dev->dma_parms) {
- dev->dma_parms->max_segment_size = size;
- return 0;
- }
- return -EIO;
+ if (WARN_ON_ONCE(!dev->dma_parms))
+ return;
+ dev->dma_parms->max_segment_size = size;
}
static inline unsigned long dma_get_seg_boundary(struct device *dev)
@@ -559,13 +557,11 @@ static inline unsigned long dma_get_seg_boundary_nr_pages(struct device *dev,
return (dma_get_seg_boundary(dev) >> page_shift) + 1;
}
-static inline int dma_set_seg_boundary(struct device *dev, unsigned long mask)
+static inline void dma_set_seg_boundary(struct device *dev, unsigned long mask)
{
- if (dev->dma_parms) {
- dev->dma_parms->segment_boundary_mask = mask;
- return 0;
- }
- return -EIO;
+ if (WARN_ON_ONCE(!dev->dma_parms))
+ return;
+ dev->dma_parms->segment_boundary_mask = mask;
}
static inline unsigned int dma_get_min_align_mask(struct device *dev)
@@ -575,13 +571,12 @@ static inline unsigned int dma_get_min_align_mask(struct device *dev)
return 0;
}
-static inline int dma_set_min_align_mask(struct device *dev,
+static inline void dma_set_min_align_mask(struct device *dev,
unsigned int min_align_mask)
{
if (WARN_ON_ONCE(!dev->dma_parms))
- return -EIO;
+ return;
dev->dma_parms->min_align_mask = min_align_mask;
- return 0;
}
#ifndef dma_get_cache_alignment
diff --git a/include/linux/dma/ipu-dma.h b/include/linux/dma/ipu-dma.h
deleted file mode 100644
index 6969391580d2..000000000000
--- a/include/linux/dma/ipu-dma.h
+++ /dev/null
@@ -1,174 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-only */
-/*
- * Copyright (C) 2008
- * Guennadi Liakhovetski, DENX Software Engineering, <lg@denx.de>
- *
- * Copyright (C) 2005-2007 Freescale Semiconductor, Inc.
- */
-
-#ifndef __LINUX_DMA_IPU_DMA_H
-#define __LINUX_DMA_IPU_DMA_H
-
-#include <linux/types.h>
-#include <linux/dmaengine.h>
-
-/* IPU DMA Controller channel definitions. */
-enum ipu_channel {
- IDMAC_IC_0 = 0, /* IC (encoding task) to memory */
- IDMAC_IC_1 = 1, /* IC (viewfinder task) to memory */
- IDMAC_ADC_0 = 1,
- IDMAC_IC_2 = 2,
- IDMAC_ADC_1 = 2,
- IDMAC_IC_3 = 3,
- IDMAC_IC_4 = 4,
- IDMAC_IC_5 = 5,
- IDMAC_IC_6 = 6,
- IDMAC_IC_7 = 7, /* IC (sensor data) to memory */
- IDMAC_IC_8 = 8,
- IDMAC_IC_9 = 9,
- IDMAC_IC_10 = 10,
- IDMAC_IC_11 = 11,
- IDMAC_IC_12 = 12,
- IDMAC_IC_13 = 13,
- IDMAC_SDC_0 = 14, /* Background synchronous display data */
- IDMAC_SDC_1 = 15, /* Foreground data (overlay) */
- IDMAC_SDC_2 = 16,
- IDMAC_SDC_3 = 17,
- IDMAC_ADC_2 = 18,
- IDMAC_ADC_3 = 19,
- IDMAC_ADC_4 = 20,
- IDMAC_ADC_5 = 21,
- IDMAC_ADC_6 = 22,
- IDMAC_ADC_7 = 23,
- IDMAC_PF_0 = 24,
- IDMAC_PF_1 = 25,
- IDMAC_PF_2 = 26,
- IDMAC_PF_3 = 27,
- IDMAC_PF_4 = 28,
- IDMAC_PF_5 = 29,
- IDMAC_PF_6 = 30,
- IDMAC_PF_7 = 31,
-};
-
-/* Order significant! */
-enum ipu_channel_status {
- IPU_CHANNEL_FREE,
- IPU_CHANNEL_INITIALIZED,
- IPU_CHANNEL_READY,
- IPU_CHANNEL_ENABLED,
-};
-
-#define IPU_CHANNELS_NUM 32
-
-enum pixel_fmt {
- /* 1 byte */
- IPU_PIX_FMT_GENERIC,
- IPU_PIX_FMT_RGB332,
- IPU_PIX_FMT_YUV420P,
- IPU_PIX_FMT_YUV422P,
- IPU_PIX_FMT_YUV420P2,
- IPU_PIX_FMT_YVU422P,
- /* 2 bytes */
- IPU_PIX_FMT_RGB565,
- IPU_PIX_FMT_RGB666,
- IPU_PIX_FMT_BGR666,
- IPU_PIX_FMT_YUYV,
- IPU_PIX_FMT_UYVY,
- /* 3 bytes */
- IPU_PIX_FMT_RGB24,
- IPU_PIX_FMT_BGR24,
- /* 4 bytes */
- IPU_PIX_FMT_GENERIC_32,
- IPU_PIX_FMT_RGB32,
- IPU_PIX_FMT_BGR32,
- IPU_PIX_FMT_ABGR32,
- IPU_PIX_FMT_BGRA32,
- IPU_PIX_FMT_RGBA32,
-};
-
-enum ipu_color_space {
- IPU_COLORSPACE_RGB,
- IPU_COLORSPACE_YCBCR,
- IPU_COLORSPACE_YUV
-};
-
-/*
- * Enumeration of IPU rotation modes
- */
-enum ipu_rotate_mode {
- /* Note the enum values correspond to BAM value */
- IPU_ROTATE_NONE = 0,
- IPU_ROTATE_VERT_FLIP = 1,
- IPU_ROTATE_HORIZ_FLIP = 2,
- IPU_ROTATE_180 = 3,
- IPU_ROTATE_90_RIGHT = 4,
- IPU_ROTATE_90_RIGHT_VFLIP = 5,
- IPU_ROTATE_90_RIGHT_HFLIP = 6,
- IPU_ROTATE_90_LEFT = 7,
-};
-
-/*
- * Enumeration of DI ports for ADC.
- */
-enum display_port {
- DISP0,
- DISP1,
- DISP2,
- DISP3
-};
-
-struct idmac_video_param {
- unsigned short in_width;
- unsigned short in_height;
- uint32_t in_pixel_fmt;
- unsigned short out_width;
- unsigned short out_height;
- uint32_t out_pixel_fmt;
- unsigned short out_stride;
- bool graphics_combine_en;
- bool global_alpha_en;
- bool key_color_en;
- enum display_port disp;
- unsigned short out_left;
- unsigned short out_top;
-};
-
-/*
- * Union of initialization parameters for a logical channel. So far only video
- * parameters are used.
- */
-union ipu_channel_param {
- struct idmac_video_param video;
-};
-
-struct idmac_tx_desc {
- struct dma_async_tx_descriptor txd;
- struct scatterlist *sg; /* scatterlist for this */
- unsigned int sg_len; /* tx-descriptor. */
- struct list_head list;
-};
-
-struct idmac_channel {
- struct dma_chan dma_chan;
- dma_cookie_t completed; /* last completed cookie */
- union ipu_channel_param params;
- enum ipu_channel link; /* input channel, linked to the output */
- enum ipu_channel_status status;
- void *client; /* Only one client per channel */
- unsigned int n_tx_desc;
- struct idmac_tx_desc *desc; /* allocated tx-descriptors */
- struct scatterlist *sg[2]; /* scatterlist elements in buffer-0 and -1 */
- struct list_head free_list; /* free tx-descriptors */
- struct list_head queue; /* queued tx-descriptors */
- spinlock_t lock; /* protects sg[0,1], queue */
- struct mutex chan_mutex; /* protects status, cookie, free_list */
- bool sec_chan_en;
- int active_buffer;
- unsigned int eof_irq;
- char eof_name[16]; /* EOF IRQ name for request_irq() */
-};
-
-#define to_tx_desc(tx) container_of(tx, struct idmac_tx_desc, txd)
-#define to_idmac_chan(c) container_of(c, struct idmac_channel, dma_chan)
-
-#endif /* __LINUX_DMA_IPU_DMA_H */
diff --git a/include/linux/dma/k3-udma-glue.h b/include/linux/dma/k3-udma-glue.h
index 1e491c5dcac2..2dea217629d0 100644
--- a/include/linux/dma/k3-udma-glue.h
+++ b/include/linux/dma/k3-udma-glue.h
@@ -136,8 +136,6 @@ u32 k3_udma_glue_rx_flow_get_fdq_id(struct k3_udma_glue_rx_channel *rx_chn,
u32 k3_udma_glue_rx_get_flow_id_base(struct k3_udma_glue_rx_channel *rx_chn);
int k3_udma_glue_rx_get_irq(struct k3_udma_glue_rx_channel *rx_chn,
u32 flow_num);
-void k3_udma_glue_rx_put_irq(struct k3_udma_glue_rx_channel *rx_chn,
- u32 flow_num);
void k3_udma_glue_reset_rx_chn(struct k3_udma_glue_rx_channel *rx_chn,
u32 flow_num, void *data,
void (*cleanup)(void *data, dma_addr_t desc_dma),
diff --git a/include/linux/efi.h b/include/linux/efi.h
index 6bf3c4fe8511..e28d88066033 100644
--- a/include/linux/efi.h
+++ b/include/linux/efi.h
@@ -764,8 +764,6 @@ extern int efi_mem_desc_lookup(u64 phys_addr, efi_memory_desc_t *out_md);
extern int __efi_mem_desc_lookup(u64 phys_addr, efi_memory_desc_t *out_md);
extern void efi_mem_reserve(phys_addr_t addr, u64 size);
extern int efi_mem_reserve_persistent(phys_addr_t addr, u64 size);
-extern void efi_initialize_iomem_resources(struct resource *code_resource,
- struct resource *data_resource, struct resource *bss_resource);
extern u64 efi_get_fdt_params(struct efi_memory_map_data *data);
extern struct kobject *efi_kobj;
diff --git a/include/linux/err.h b/include/linux/err.h
index b5d9bb2a2349..a4dacd745fcf 100644
--- a/include/linux/err.h
+++ b/include/linux/err.h
@@ -41,6 +41,9 @@ static inline void * __must_check ERR_PTR(long error)
return (void *) error;
}
+/* Return the pointer in the percpu address space. */
+#define ERR_PTR_PCPU(error) ((void __percpu *)(unsigned long)ERR_PTR(error))
+
/**
* PTR_ERR - Extract the error code from an error pointer.
* @ptr: An error pointer.
@@ -51,6 +54,9 @@ static inline long __must_check PTR_ERR(__force const void *ptr)
return (long) ptr;
}
+/* Read an error pointer from the percpu address space. */
+#define PTR_ERR_PCPU(ptr) (PTR_ERR((const void *)(__force const unsigned long)(ptr)))
+
/**
* IS_ERR - Detect an error pointer.
* @ptr: The pointer to check.
@@ -61,6 +67,9 @@ static inline bool __must_check IS_ERR(__force const void *ptr)
return IS_ERR_VALUE((unsigned long)ptr);
}
+/* Read an error pointer from the percpu address space. */
+#define IS_ERR_PCPU(ptr) (IS_ERR((const void *)(__force const unsigned long)(ptr)))
+
/**
* IS_ERR_OR_NULL - Detect an error pointer or a null pointer.
* @ptr: The pointer to check.
diff --git a/include/linux/f2fs_fs.h b/include/linux/f2fs_fs.h
index 01bee2b289c2..b0b821edfd97 100644
--- a/include/linux/f2fs_fs.h
+++ b/include/linux/f2fs_fs.h
@@ -19,7 +19,6 @@
#define F2FS_BLKSIZE_BITS PAGE_SHIFT /* bits for F2FS_BLKSIZE */
#define F2FS_MAX_EXTENSION 64 /* # of extension entries */
#define F2FS_EXTENSION_LEN 8 /* max size of extension */
-#define F2FS_BLK_ALIGN(x) (((x) + F2FS_BLKSIZE - 1) >> F2FS_BLKSIZE_BITS)
#define NULL_ADDR ((block_t)0) /* used as block_t addresses */
#define NEW_ADDR ((block_t)-1) /* used as block_t addresses */
@@ -28,6 +27,7 @@
#define F2FS_BYTES_TO_BLK(bytes) ((bytes) >> F2FS_BLKSIZE_BITS)
#define F2FS_BLK_TO_BYTES(blk) ((blk) << F2FS_BLKSIZE_BITS)
#define F2FS_BLK_END_BYTES(blk) (F2FS_BLK_TO_BYTES(blk + 1) - 1)
+#define F2FS_BLK_ALIGN(x) (F2FS_BYTES_TO_BLK((x) + F2FS_BLKSIZE - 1))
/* 0, 1(node nid), 2(meta nid) are reserved node id */
#define F2FS_RESERVED_NODE_NUM 3
@@ -278,7 +278,7 @@ struct node_footer {
#define F2FS_INLINE_DATA 0x02 /* file inline data flag */
#define F2FS_INLINE_DENTRY 0x04 /* file inline dentry flag */
#define F2FS_DATA_EXIST 0x08 /* file inline data exist flag */
-#define F2FS_INLINE_DOTS 0x10 /* file having implicit dot dentries */
+#define F2FS_INLINE_DOTS 0x10 /* file having implicit dot dentries (obsolete) */
#define F2FS_EXTRA_ATTR 0x20 /* file having extra attribute */
#define F2FS_PIN_FILE 0x40 /* file should not be gced */
#define F2FS_COMPRESS_RELEASED 0x80 /* file released compressed blocks */
diff --git a/include/linux/fault-inject.h b/include/linux/fault-inject.h
index 354413950d34..8c829d28dcf3 100644
--- a/include/linux/fault-inject.h
+++ b/include/linux/fault-inject.h
@@ -2,13 +2,17 @@
#ifndef _LINUX_FAULT_INJECT_H
#define _LINUX_FAULT_INJECT_H
+#include <linux/err.h>
+#include <linux/types.h>
+
+struct dentry;
+struct kmem_cache;
+
#ifdef CONFIG_FAULT_INJECTION
-#include <linux/types.h>
-#include <linux/debugfs.h>
+#include <linux/atomic.h>
#include <linux/configfs.h>
#include <linux/ratelimit.h>
-#include <linux/atomic.h>
/*
* For explanation of the elements of this struct, see
@@ -51,6 +55,28 @@ int setup_fault_attr(struct fault_attr *attr, char *str);
bool should_fail_ex(struct fault_attr *attr, ssize_t size, int flags);
bool should_fail(struct fault_attr *attr, ssize_t size);
+#else /* CONFIG_FAULT_INJECTION */
+
+struct fault_attr {
+};
+
+#define DECLARE_FAULT_ATTR(name) struct fault_attr name = {}
+
+static inline int setup_fault_attr(struct fault_attr *attr, char *str)
+{
+ return 0; /* Note: 0 means error for __setup() handlers! */
+}
+static inline bool should_fail_ex(struct fault_attr *attr, ssize_t size, int flags)
+{
+ return false;
+}
+static inline bool should_fail(struct fault_attr *attr, ssize_t size)
+{
+ return false;
+}
+
+#endif /* CONFIG_FAULT_INJECTION */
+
#ifdef CONFIG_FAULT_INJECTION_DEBUG_FS
struct dentry *fault_create_debugfs_attr(const char *name,
@@ -87,10 +113,6 @@ static inline void fault_config_init(struct fault_config *config,
#endif /* CONFIG_FAULT_INJECTION_CONFIGFS */
-#endif /* CONFIG_FAULT_INJECTION */
-
-struct kmem_cache;
-
#ifdef CONFIG_FAIL_PAGE_ALLOC
bool should_fail_alloc_page(gfp_t gfp_mask, unsigned int order);
#else
diff --git a/include/linux/fb.h b/include/linux/fb.h
index abf6643ebcaf..267b59ead432 100644
--- a/include/linux/fb.h
+++ b/include/linux/fb.h
@@ -510,6 +510,7 @@ struct fb_info {
void *par;
bool skip_vt_switch; /* no VT switch on suspend/resume required */
+ bool skip_panic; /* Do not write to the fb after a panic */
};
/* This will go away
diff --git a/include/linux/file.h b/include/linux/file.h
index 6bd9cd9c87e5..f98de143245a 100644
--- a/include/linux/file.h
+++ b/include/linux/file.h
@@ -36,51 +36,52 @@ static inline void fput_light(struct file *file, int fput_needed)
fput(file);
}
+/* either a reference to struct file + flags
+ * (cloned vs. borrowed, pos locked), with
+ * flags stored in lower bits of value,
+ * or empty (represented by 0).
+ */
struct fd {
- struct file *file;
- unsigned int flags;
+ unsigned long word;
};
#define FDPUT_FPUT 1
#define FDPUT_POS_UNLOCK 2
-static inline void fdput(struct fd fd)
+#define fd_file(f) ((struct file *)((f).word & ~(FDPUT_FPUT|FDPUT_POS_UNLOCK)))
+static inline bool fd_empty(struct fd f)
{
- if (fd.flags & FDPUT_FPUT)
- fput(fd.file);
+ return unlikely(!f.word);
}
-extern struct file *fget(unsigned int fd);
-extern struct file *fget_raw(unsigned int fd);
-extern struct file *fget_task(struct task_struct *task, unsigned int fd);
-extern unsigned long __fdget(unsigned int fd);
-extern unsigned long __fdget_raw(unsigned int fd);
-extern unsigned long __fdget_pos(unsigned int fd);
-extern void __f_unlock_pos(struct file *);
-
-static inline struct fd __to_fd(unsigned long v)
+#define EMPTY_FD (struct fd){0}
+static inline struct fd BORROWED_FD(struct file *f)
{
- return (struct fd){(struct file *)(v & ~3),v & 3};
+ return (struct fd){(unsigned long)f};
}
-
-static inline struct fd fdget(unsigned int fd)
+static inline struct fd CLONED_FD(struct file *f)
{
- return __to_fd(__fdget(fd));
+ return (struct fd){(unsigned long)f | FDPUT_FPUT};
}
-static inline struct fd fdget_raw(unsigned int fd)
+static inline void fdput(struct fd fd)
{
- return __to_fd(__fdget_raw(fd));
+ if (fd.word & FDPUT_FPUT)
+ fput(fd_file(fd));
}
-static inline struct fd fdget_pos(int fd)
-{
- return __to_fd(__fdget_pos(fd));
-}
+extern struct file *fget(unsigned int fd);
+extern struct file *fget_raw(unsigned int fd);
+extern struct file *fget_task(struct task_struct *task, unsigned int fd);
+extern void __f_unlock_pos(struct file *);
+
+struct fd fdget(unsigned int fd);
+struct fd fdget_raw(unsigned int fd);
+struct fd fdget_pos(unsigned int fd);
static inline void fdput_pos(struct fd f)
{
- if (f.flags & FDPUT_POS_UNLOCK)
- __f_unlock_pos(f.file);
+ if (f.word & FDPUT_POS_UNLOCK)
+ __f_unlock_pos(fd_file(f));
fdput(f);
}
diff --git a/include/linux/filter.h b/include/linux/filter.h
index 64e1506fefb8..7d7578a8eac1 100644
--- a/include/linux/filter.h
+++ b/include/linux/filter.h
@@ -437,6 +437,16 @@ static inline bool insn_is_cast_user(const struct bpf_insn *insn)
.off = OFF, \
.imm = 0 })
+/* Unconditional jumps, gotol pc + imm32 */
+
+#define BPF_JMP32_A(IMM) \
+ ((struct bpf_insn) { \
+ .code = BPF_JMP32 | BPF_JA, \
+ .dst_reg = 0, \
+ .src_reg = 0, \
+ .off = 0, \
+ .imm = IMM })
+
/* Relative call */
#define BPF_CALL_REL(TGT) \
diff --git a/include/linux/find.h b/include/linux/find.h
index 5dfca4225fef..68685714bc18 100644
--- a/include/linux/find.h
+++ b/include/linux/find.h
@@ -52,7 +52,7 @@ unsigned long _find_next_bit_le(const unsigned long *addr, unsigned
* Returns the bit number for the next set bit
* If no bits are set, returns @size.
*/
-static inline
+static __always_inline
unsigned long find_next_bit(const unsigned long *addr, unsigned long size,
unsigned long offset)
{
@@ -81,7 +81,7 @@ unsigned long find_next_bit(const unsigned long *addr, unsigned long size,
* Returns the bit number for the next set bit
* If no bits are set, returns @size.
*/
-static inline
+static __always_inline
unsigned long find_next_and_bit(const unsigned long *addr1,
const unsigned long *addr2, unsigned long size,
unsigned long offset)
@@ -112,7 +112,7 @@ unsigned long find_next_and_bit(const unsigned long *addr1,
* Returns the bit number for the next set bit
* If no bits are set, returns @size.
*/
-static inline
+static __always_inline
unsigned long find_next_andnot_bit(const unsigned long *addr1,
const unsigned long *addr2, unsigned long size,
unsigned long offset)
@@ -142,7 +142,7 @@ unsigned long find_next_andnot_bit(const unsigned long *addr1,
* Returns the bit number for the next set bit
* If no bits are set, returns @size.
*/
-static inline
+static __always_inline
unsigned long find_next_or_bit(const unsigned long *addr1,
const unsigned long *addr2, unsigned long size,
unsigned long offset)
@@ -171,7 +171,7 @@ unsigned long find_next_or_bit(const unsigned long *addr1,
* Returns the bit number of the next zero bit
* If no bits are zero, returns @size.
*/
-static inline
+static __always_inline
unsigned long find_next_zero_bit(const unsigned long *addr, unsigned long size,
unsigned long offset)
{
@@ -198,7 +198,7 @@ unsigned long find_next_zero_bit(const unsigned long *addr, unsigned long size,
* Returns the bit number of the first set bit.
* If no bits are set, returns @size.
*/
-static inline
+static __always_inline
unsigned long find_first_bit(const unsigned long *addr, unsigned long size)
{
if (small_const_nbits(size)) {
@@ -224,7 +224,7 @@ unsigned long find_first_bit(const unsigned long *addr, unsigned long size)
* Returns the bit number of the N'th set bit.
* If no such, returns >= @size.
*/
-static inline
+static __always_inline
unsigned long find_nth_bit(const unsigned long *addr, unsigned long size, unsigned long n)
{
if (n >= size)
@@ -249,7 +249,7 @@ unsigned long find_nth_bit(const unsigned long *addr, unsigned long size, unsign
* Returns the bit number of the N'th set bit.
* If no such, returns @size.
*/
-static inline
+static __always_inline
unsigned long find_nth_and_bit(const unsigned long *addr1, const unsigned long *addr2,
unsigned long size, unsigned long n)
{
@@ -276,7 +276,7 @@ unsigned long find_nth_and_bit(const unsigned long *addr1, const unsigned long *
* Returns the bit number of the N'th set bit.
* If no such, returns @size.
*/
-static inline
+static __always_inline
unsigned long find_nth_andnot_bit(const unsigned long *addr1, const unsigned long *addr2,
unsigned long size, unsigned long n)
{
@@ -332,7 +332,7 @@ unsigned long find_nth_and_andnot_bit(const unsigned long *addr1,
* Returns the bit number for the next set bit
* If no bits are set, returns @size.
*/
-static inline
+static __always_inline
unsigned long find_first_and_bit(const unsigned long *addr1,
const unsigned long *addr2,
unsigned long size)
@@ -357,7 +357,7 @@ unsigned long find_first_and_bit(const unsigned long *addr1,
* Returns the bit number for the first set bit
* If no bits are set, returns @size.
*/
-static inline
+static __always_inline
unsigned long find_first_and_and_bit(const unsigned long *addr1,
const unsigned long *addr2,
const unsigned long *addr3,
@@ -381,7 +381,7 @@ unsigned long find_first_and_and_bit(const unsigned long *addr1,
* Returns the bit number of the first cleared bit.
* If no bits are zero, returns @size.
*/
-static inline
+static __always_inline
unsigned long find_first_zero_bit(const unsigned long *addr, unsigned long size)
{
if (small_const_nbits(size)) {
@@ -402,7 +402,7 @@ unsigned long find_first_zero_bit(const unsigned long *addr, unsigned long size)
*
* Returns the bit number of the last set bit, or size.
*/
-static inline
+static __always_inline
unsigned long find_last_bit(const unsigned long *addr, unsigned long size)
{
if (small_const_nbits(size)) {
@@ -425,7 +425,7 @@ unsigned long find_last_bit(const unsigned long *addr, unsigned long size)
* Returns the bit number for the next set bit, or first set bit up to @offset
* If no bits are set, returns @size.
*/
-static inline
+static __always_inline
unsigned long find_next_and_bit_wrap(const unsigned long *addr1,
const unsigned long *addr2,
unsigned long size, unsigned long offset)
@@ -448,7 +448,7 @@ unsigned long find_next_and_bit_wrap(const unsigned long *addr1,
* Returns the bit number for the next set bit, or first set bit up to @offset
* If no bits are set, returns @size.
*/
-static inline
+static __always_inline
unsigned long find_next_bit_wrap(const unsigned long *addr,
unsigned long size, unsigned long offset)
{
@@ -465,7 +465,7 @@ unsigned long find_next_bit_wrap(const unsigned long *addr,
* Helper for for_each_set_bit_wrap(). Make sure you're doing right thing
* before using it alone.
*/
-static inline
+static __always_inline
unsigned long __for_each_wrap(const unsigned long *bitmap, unsigned long size,
unsigned long start, unsigned long n)
{
@@ -506,20 +506,20 @@ extern unsigned long find_next_clump8(unsigned long *clump,
#if defined(__LITTLE_ENDIAN)
-static inline unsigned long find_next_zero_bit_le(const void *addr,
- unsigned long size, unsigned long offset)
+static __always_inline
+unsigned long find_next_zero_bit_le(const void *addr, unsigned long size, unsigned long offset)
{
return find_next_zero_bit(addr, size, offset);
}
-static inline unsigned long find_next_bit_le(const void *addr,
- unsigned long size, unsigned long offset)
+static __always_inline
+unsigned long find_next_bit_le(const void *addr, unsigned long size, unsigned long offset)
{
return find_next_bit(addr, size, offset);
}
-static inline unsigned long find_first_zero_bit_le(const void *addr,
- unsigned long size)
+static __always_inline
+unsigned long find_first_zero_bit_le(const void *addr, unsigned long size)
{
return find_first_zero_bit(addr, size);
}
@@ -527,7 +527,7 @@ static inline unsigned long find_first_zero_bit_le(const void *addr,
#elif defined(__BIG_ENDIAN)
#ifndef find_next_zero_bit_le
-static inline
+static __always_inline
unsigned long find_next_zero_bit_le(const void *addr, unsigned
long size, unsigned long offset)
{
@@ -546,7 +546,7 @@ unsigned long find_next_zero_bit_le(const void *addr, unsigned
#endif
#ifndef find_first_zero_bit_le
-static inline
+static __always_inline
unsigned long find_first_zero_bit_le(const void *addr, unsigned long size)
{
if (small_const_nbits(size)) {
@@ -560,7 +560,7 @@ unsigned long find_first_zero_bit_le(const void *addr, unsigned long size)
#endif
#ifndef find_next_bit_le
-static inline
+static __always_inline
unsigned long find_next_bit_le(const void *addr, unsigned
long size, unsigned long offset)
{
diff --git a/include/linux/firewire.h b/include/linux/firewire.h
index 1cca14cf5652..b632eec3ab52 100644
--- a/include/linux/firewire.h
+++ b/include/linux/firewire.h
@@ -134,6 +134,8 @@ struct fw_card {
__be32 topology_map[(CSR_TOPOLOGY_MAP_END - CSR_TOPOLOGY_MAP) / 4];
__be32 maint_utility_register;
+
+ struct workqueue_struct *isoc_wq;
};
static inline struct fw_card *fw_card_get(struct fw_card *card)
@@ -509,6 +511,7 @@ union fw_iso_callback {
struct fw_iso_context {
struct fw_card *card;
+ struct work_struct work;
int type;
int channel;
int speed;
@@ -528,6 +531,25 @@ int fw_iso_context_queue(struct fw_iso_context *ctx,
unsigned long payload);
void fw_iso_context_queue_flush(struct fw_iso_context *ctx);
int fw_iso_context_flush_completions(struct fw_iso_context *ctx);
+
+/**
+ * fw_iso_context_schedule_flush_completions() - schedule work item to process isochronous context.
+ * @ctx: the isochronous context
+ *
+ * Schedule a work item on workqueue to process the isochronous context. The registered callback
+ * function is called by the worker when a queued packet buffer with the interrupt flag is
+ * completed, either after transmission in the IT context or after being filled in the IR context.
+ * The callback function is also called when the header buffer in the context becomes full, If it
+ * is required to process the context in the current context, fw_iso_context_flush_completions() is
+ * available instead.
+ *
+ * Context: Any context.
+ */
+static inline void fw_iso_context_schedule_flush_completions(struct fw_iso_context *ctx)
+{
+ queue_work(ctx->card->isoc_wq, &ctx->work);
+}
+
int fw_iso_context_start(struct fw_iso_context *ctx,
int cycle, int sync, int tags);
int fw_iso_context_stop(struct fw_iso_context *ctx);
diff --git a/include/linux/folio_queue.h b/include/linux/folio_queue.h
index 955680c3bb5f..af871405ae55 100644
--- a/include/linux/folio_queue.h
+++ b/include/linux/folio_queue.h
@@ -3,6 +3,12 @@
*
* Copyright (C) 2024 Red Hat, Inc. All Rights Reserved.
* Written by David Howells (dhowells@redhat.com)
+ *
+ * See:
+ *
+ * Documentation/core-api/folio_queue.rst
+ *
+ * for a description of the API.
*/
#ifndef _LINUX_FOLIO_QUEUE_H
@@ -33,6 +39,13 @@ struct folio_queue {
#endif
};
+/**
+ * folioq_init - Initialise a folio queue segment
+ * @folioq: The segment to initialise
+ *
+ * Initialise a folio queue segment. Note that the folio pointers are
+ * left uninitialised.
+ */
static inline void folioq_init(struct folio_queue *folioq)
{
folio_batch_init(&folioq->vec);
@@ -43,62 +56,155 @@ static inline void folioq_init(struct folio_queue *folioq)
folioq->marks3 = 0;
}
+/**
+ * folioq_nr_slots: Query the capacity of a folio queue segment
+ * @folioq: The segment to query
+ *
+ * Query the number of folios that a particular folio queue segment might hold.
+ * [!] NOTE: This must not be assumed to be the same for every segment!
+ */
static inline unsigned int folioq_nr_slots(const struct folio_queue *folioq)
{
return PAGEVEC_SIZE;
}
+/**
+ * folioq_count: Query the occupancy of a folio queue segment
+ * @folioq: The segment to query
+ *
+ * Query the number of folios that have been added to a folio queue segment.
+ * Note that this is not decreased as folios are removed from a segment.
+ */
static inline unsigned int folioq_count(struct folio_queue *folioq)
{
return folio_batch_count(&folioq->vec);
}
+/**
+ * folioq_count: Query if a folio queue segment is full
+ * @folioq: The segment to query
+ *
+ * Query if a folio queue segment is fully occupied. Note that this does not
+ * change if folios are removed from a segment.
+ */
static inline bool folioq_full(struct folio_queue *folioq)
{
//return !folio_batch_space(&folioq->vec);
return folioq_count(folioq) >= folioq_nr_slots(folioq);
}
+/**
+ * folioq_is_marked: Check first folio mark in a folio queue segment
+ * @folioq: The segment to query
+ * @slot: The slot number of the folio to query
+ *
+ * Determine if the first mark is set for the folio in the specified slot in a
+ * folio queue segment.
+ */
static inline bool folioq_is_marked(const struct folio_queue *folioq, unsigned int slot)
{
return test_bit(slot, &folioq->marks);
}
+/**
+ * folioq_mark: Set the first mark on a folio in a folio queue segment
+ * @folioq: The segment to modify
+ * @slot: The slot number of the folio to modify
+ *
+ * Set the first mark for the folio in the specified slot in a folio queue
+ * segment.
+ */
static inline void folioq_mark(struct folio_queue *folioq, unsigned int slot)
{
set_bit(slot, &folioq->marks);
}
+/**
+ * folioq_unmark: Clear the first mark on a folio in a folio queue segment
+ * @folioq: The segment to modify
+ * @slot: The slot number of the folio to modify
+ *
+ * Clear the first mark for the folio in the specified slot in a folio queue
+ * segment.
+ */
static inline void folioq_unmark(struct folio_queue *folioq, unsigned int slot)
{
clear_bit(slot, &folioq->marks);
}
+/**
+ * folioq_is_marked2: Check second folio mark in a folio queue segment
+ * @folioq: The segment to query
+ * @slot: The slot number of the folio to query
+ *
+ * Determine if the second mark is set for the folio in the specified slot in a
+ * folio queue segment.
+ */
static inline bool folioq_is_marked2(const struct folio_queue *folioq, unsigned int slot)
{
return test_bit(slot, &folioq->marks2);
}
+/**
+ * folioq_mark2: Set the second mark on a folio in a folio queue segment
+ * @folioq: The segment to modify
+ * @slot: The slot number of the folio to modify
+ *
+ * Set the second mark for the folio in the specified slot in a folio queue
+ * segment.
+ */
static inline void folioq_mark2(struct folio_queue *folioq, unsigned int slot)
{
set_bit(slot, &folioq->marks2);
}
+/**
+ * folioq_unmark2: Clear the second mark on a folio in a folio queue segment
+ * @folioq: The segment to modify
+ * @slot: The slot number of the folio to modify
+ *
+ * Clear the second mark for the folio in the specified slot in a folio queue
+ * segment.
+ */
static inline void folioq_unmark2(struct folio_queue *folioq, unsigned int slot)
{
clear_bit(slot, &folioq->marks2);
}
+/**
+ * folioq_is_marked3: Check third folio mark in a folio queue segment
+ * @folioq: The segment to query
+ * @slot: The slot number of the folio to query
+ *
+ * Determine if the third mark is set for the folio in the specified slot in a
+ * folio queue segment.
+ */
static inline bool folioq_is_marked3(const struct folio_queue *folioq, unsigned int slot)
{
return test_bit(slot, &folioq->marks3);
}
+/**
+ * folioq_mark3: Set the third mark on a folio in a folio queue segment
+ * @folioq: The segment to modify
+ * @slot: The slot number of the folio to modify
+ *
+ * Set the third mark for the folio in the specified slot in a folio queue
+ * segment.
+ */
static inline void folioq_mark3(struct folio_queue *folioq, unsigned int slot)
{
set_bit(slot, &folioq->marks3);
}
+/**
+ * folioq_unmark3: Clear the third mark on a folio in a folio queue segment
+ * @folioq: The segment to modify
+ * @slot: The slot number of the folio to modify
+ *
+ * Clear the third mark for the folio in the specified slot in a folio queue
+ * segment.
+ */
static inline void folioq_unmark3(struct folio_queue *folioq, unsigned int slot)
{
clear_bit(slot, &folioq->marks3);
@@ -111,6 +217,19 @@ static inline unsigned int __folio_order(struct folio *folio)
return folio->_flags_1 & 0xff;
}
+/**
+ * folioq_append: Add a folio to a folio queue segment
+ * @folioq: The segment to add to
+ * @folio: The folio to add
+ *
+ * Add a folio to the tail of the sequence in a folio queue segment, increasing
+ * the occupancy count and returning the slot number for the folio just added.
+ * The folio size is extracted and stored in the queue and the marks are left
+ * unmodified.
+ *
+ * Note that it's left up to the caller to check that the segment capacity will
+ * not be exceeded and to extend the queue.
+ */
static inline unsigned int folioq_append(struct folio_queue *folioq, struct folio *folio)
{
unsigned int slot = folioq->vec.nr++;
@@ -120,6 +239,19 @@ static inline unsigned int folioq_append(struct folio_queue *folioq, struct foli
return slot;
}
+/**
+ * folioq_append_mark: Add a folio to a folio queue segment
+ * @folioq: The segment to add to
+ * @folio: The folio to add
+ *
+ * Add a folio to the tail of the sequence in a folio queue segment, increasing
+ * the occupancy count and returning the slot number for the folio just added.
+ * The folio size is extracted and stored in the queue, the first mark is set
+ * and and the second and third marks are left unmodified.
+ *
+ * Note that it's left up to the caller to check that the segment capacity will
+ * not be exceeded and to extend the queue.
+ */
static inline unsigned int folioq_append_mark(struct folio_queue *folioq, struct folio *folio)
{
unsigned int slot = folioq->vec.nr++;
@@ -130,21 +262,57 @@ static inline unsigned int folioq_append_mark(struct folio_queue *folioq, struct
return slot;
}
+/**
+ * folioq_folio: Get a folio from a folio queue segment
+ * @folioq: The segment to access
+ * @slot: The folio slot to access
+ *
+ * Retrieve the folio in the specified slot from a folio queue segment. Note
+ * that no bounds check is made and if the slot hasn't been added into yet, the
+ * pointer will be undefined. If the slot has been cleared, NULL will be
+ * returned.
+ */
static inline struct folio *folioq_folio(const struct folio_queue *folioq, unsigned int slot)
{
return folioq->vec.folios[slot];
}
+/**
+ * folioq_folio_order: Get the order of a folio from a folio queue segment
+ * @folioq: The segment to access
+ * @slot: The folio slot to access
+ *
+ * Retrieve the order of the folio in the specified slot from a folio queue
+ * segment. Note that no bounds check is made and if the slot hasn't been
+ * added into yet, the order returned will be 0.
+ */
static inline unsigned int folioq_folio_order(const struct folio_queue *folioq, unsigned int slot)
{
return folioq->orders[slot];
}
+/**
+ * folioq_folio_size: Get the size of a folio from a folio queue segment
+ * @folioq: The segment to access
+ * @slot: The folio slot to access
+ *
+ * Retrieve the size of the folio in the specified slot from a folio queue
+ * segment. Note that no bounds check is made and if the slot hasn't been
+ * added into yet, the size returned will be PAGE_SIZE.
+ */
static inline size_t folioq_folio_size(const struct folio_queue *folioq, unsigned int slot)
{
return PAGE_SIZE << folioq_folio_order(folioq, slot);
}
+/**
+ * folioq_clear: Clear a folio from a folio queue segment
+ * @folioq: The segment to clear
+ * @slot: The folio slot to clear
+ *
+ * Clear a folio from a sequence in a folio queue segment and clear its marks.
+ * The occupancy count is left unchanged.
+ */
static inline void folioq_clear(struct folio_queue *folioq, unsigned int slot)
{
folioq->vec.folios[slot] = NULL;
diff --git a/include/linux/fs.h b/include/linux/fs.h
index 0df3e5f0dd2b..e3c603d01337 100644
--- a/include/linux/fs.h
+++ b/include/linux/fs.h
@@ -1229,6 +1229,7 @@ extern int send_sigurg(struct file *file);
#define SB_I_TS_EXPIRY_WARNED 0x00000400 /* warned about timestamp range expiry */
#define SB_I_RETIRED 0x00000800 /* superblock shouldn't be reused */
#define SB_I_NOUMASK 0x00001000 /* VFS does not apply umask */
+#define SB_I_NOIDMAP 0x00002000 /* No idmapped mounts on this superblock */
/* Possible states of 'frozen' field */
enum {
@@ -3148,7 +3149,14 @@ static inline bool is_zero_ino(ino_t ino)
return (u32)ino == 0;
}
-extern void __iget(struct inode * inode);
+/*
+ * inode->i_lock must be held
+ */
+static inline void __iget(struct inode *inode)
+{
+ atomic_inc(&inode->i_count);
+}
+
extern void iget_failed(struct inode *);
extern void clear_inode(struct inode *);
extern void __destroy_inode(struct inode *);
@@ -3226,7 +3234,6 @@ extern ssize_t iter_file_splice_write(struct pipe_inode_info *,
extern void
file_ra_state_init(struct file_ra_state *ra, struct address_space *mapping);
extern loff_t noop_llseek(struct file *file, loff_t offset, int whence);
-#define no_llseek NULL
extern loff_t vfs_setpos(struct file *file, loff_t offset, loff_t maxsize);
extern loff_t generic_file_llseek(struct file *file, loff_t offset, int whence);
extern loff_t generic_file_llseek_size(struct file *file, loff_t offset,
@@ -3513,7 +3520,6 @@ static inline int kiocb_set_rw_flags(struct kiocb *ki, rwf_t flags,
if (flags & RWF_NOWAIT) {
if (!(ki->ki_filp->f_mode & FMODE_NOWAIT))
return -EOPNOTSUPP;
- kiocb_flags |= IOCB_NOIO;
}
if (flags & RWF_ATOMIC) {
if (rw_type != WRITE)
diff --git a/include/linux/fsl/mc.h b/include/linux/fsl/mc.h
index 083c860fd28e..c90ec889bfc2 100644
--- a/include/linux/fsl/mc.h
+++ b/include/linux/fsl/mc.h
@@ -436,7 +436,7 @@ void fsl_mc_free_irqs(struct fsl_mc_device *mc_dev);
struct fsl_mc_device *fsl_mc_get_endpoint(struct fsl_mc_device *mc_dev,
u16 if_id);
-extern struct bus_type fsl_mc_bus_type;
+extern const struct bus_type fsl_mc_bus_type;
extern struct device_type fsl_mc_bus_dprc_type;
extern struct device_type fsl_mc_bus_dpni_type;
diff --git a/include/linux/generic-radix-tree.h b/include/linux/generic-radix-tree.h
index f3512fddf3d7..5b51c3d582d6 100644
--- a/include/linux/generic-radix-tree.h
+++ b/include/linux/generic-radix-tree.h
@@ -41,6 +41,7 @@
#include <linux/limits.h>
#include <linux/log2.h>
#include <linux/math.h>
+#include <linux/slab.h>
#include <linux/types.h>
struct genradix_root;
@@ -48,10 +49,63 @@ struct genradix_root;
#define GENRADIX_NODE_SHIFT 9
#define GENRADIX_NODE_SIZE (1U << GENRADIX_NODE_SHIFT)
+#define GENRADIX_ARY (GENRADIX_NODE_SIZE / sizeof(struct genradix_node *))
+#define GENRADIX_ARY_SHIFT ilog2(GENRADIX_ARY)
+
+/* depth that's needed for a genradix that can address up to ULONG_MAX: */
+#define GENRADIX_MAX_DEPTH \
+ DIV_ROUND_UP(BITS_PER_LONG - GENRADIX_NODE_SHIFT, GENRADIX_ARY_SHIFT)
+
+#define GENRADIX_DEPTH_MASK \
+ ((unsigned long) (roundup_pow_of_two(GENRADIX_MAX_DEPTH + 1) - 1))
+
+static inline int genradix_depth_shift(unsigned depth)
+{
+ return GENRADIX_NODE_SHIFT + GENRADIX_ARY_SHIFT * depth;
+}
+
+/*
+ * Returns size (of data, in bytes) that a tree of a given depth holds:
+ */
+static inline size_t genradix_depth_size(unsigned depth)
+{
+ return 1UL << genradix_depth_shift(depth);
+}
+
+static inline unsigned genradix_root_to_depth(struct genradix_root *r)
+{
+ return (unsigned long) r & GENRADIX_DEPTH_MASK;
+}
+
+static inline struct genradix_node *genradix_root_to_node(struct genradix_root *r)
+{
+ return (void *) ((unsigned long) r & ~GENRADIX_DEPTH_MASK);
+}
+
struct __genradix {
struct genradix_root *root;
};
+struct genradix_node {
+ union {
+ /* Interior node: */
+ struct genradix_node *children[GENRADIX_ARY];
+
+ /* Leaf: */
+ u8 data[GENRADIX_NODE_SIZE];
+ };
+};
+
+static inline struct genradix_node *genradix_alloc_node(gfp_t gfp_mask)
+{
+ return kzalloc(GENRADIX_NODE_SIZE, gfp_mask);
+}
+
+static inline void genradix_free_node(struct genradix_node *node)
+{
+ kfree(node);
+}
+
/*
* NOTE: currently, sizeof(_type) must not be larger than GENRADIX_NODE_SIZE:
*/
@@ -128,6 +182,30 @@ static inline size_t __idx_to_offset(size_t idx, size_t obj_size)
#define __genradix_idx_to_offset(_radix, _idx) \
__idx_to_offset(_idx, __genradix_obj_size(_radix))
+static inline void *__genradix_ptr_inlined(struct __genradix *radix, size_t offset)
+{
+ struct genradix_root *r = READ_ONCE(radix->root);
+ struct genradix_node *n = genradix_root_to_node(r);
+ unsigned level = genradix_root_to_depth(r);
+ unsigned shift = genradix_depth_shift(level);
+
+ if (unlikely(ilog2(offset) >= genradix_depth_shift(level)))
+ return NULL;
+
+ while (n && shift > GENRADIX_NODE_SHIFT) {
+ shift -= GENRADIX_ARY_SHIFT;
+ n = n->children[offset >> shift];
+ offset &= (1UL << shift) - 1;
+ }
+
+ return n ? &n->data[offset] : NULL;
+}
+
+#define genradix_ptr_inlined(_radix, _idx) \
+ (__genradix_cast(_radix) \
+ __genradix_ptr_inlined(&(_radix)->tree, \
+ __genradix_idx_to_offset(_radix, _idx)))
+
void *__genradix_ptr(struct __genradix *, size_t);
/**
@@ -142,7 +220,24 @@ void *__genradix_ptr(struct __genradix *, size_t);
__genradix_ptr(&(_radix)->tree, \
__genradix_idx_to_offset(_radix, _idx)))
-void *__genradix_ptr_alloc(struct __genradix *, size_t, gfp_t);
+void *__genradix_ptr_alloc(struct __genradix *, size_t,
+ struct genradix_node **, gfp_t);
+
+#define genradix_ptr_alloc_inlined(_radix, _idx, _gfp) \
+ (__genradix_cast(_radix) \
+ (__genradix_ptr_inlined(&(_radix)->tree, \
+ __genradix_idx_to_offset(_radix, _idx)) ?: \
+ __genradix_ptr_alloc(&(_radix)->tree, \
+ __genradix_idx_to_offset(_radix, _idx), \
+ NULL, _gfp)))
+
+#define genradix_ptr_alloc_preallocated_inlined(_radix, _idx, _new_node, _gfp)\
+ (__genradix_cast(_radix) \
+ (__genradix_ptr_inlined(&(_radix)->tree, \
+ __genradix_idx_to_offset(_radix, _idx)) ?: \
+ __genradix_ptr_alloc(&(_radix)->tree, \
+ __genradix_idx_to_offset(_radix, _idx), \
+ _new_node, _gfp)))
/**
* genradix_ptr_alloc - get a pointer to a genradix entry, allocating it
@@ -157,7 +252,13 @@ void *__genradix_ptr_alloc(struct __genradix *, size_t, gfp_t);
(__genradix_cast(_radix) \
__genradix_ptr_alloc(&(_radix)->tree, \
__genradix_idx_to_offset(_radix, _idx), \
- _gfp))
+ NULL, _gfp))
+
+#define genradix_ptr_alloc_preallocated(_radix, _idx, _new_node, _gfp)\
+ (__genradix_cast(_radix) \
+ __genradix_ptr_alloc(&(_radix)->tree, \
+ __genradix_idx_to_offset(_radix, _idx), \
+ _new_node, _gfp))
struct genradix_iter {
size_t offset;
diff --git a/include/linux/gfp.h b/include/linux/gfp.h
index f53f76e0b17e..a951de920e20 100644
--- a/include/linux/gfp.h
+++ b/include/linux/gfp.h
@@ -319,7 +319,7 @@ static inline struct page *alloc_pages_mpol_noprof(gfp_t gfp, unsigned int order
}
static inline struct folio *folio_alloc_noprof(gfp_t gfp, unsigned int order)
{
- return __folio_alloc_node(gfp, order, numa_node_id());
+ return __folio_alloc_node_noprof(gfp, order, numa_node_id());
}
static inline struct folio *folio_alloc_mpol_noprof(gfp_t gfp, unsigned int order,
struct mempolicy *mpol, pgoff_t ilx, int nid)
@@ -446,4 +446,27 @@ extern struct page *alloc_contig_pages_noprof(unsigned long nr_pages, gfp_t gfp_
#endif
void free_contig_range(unsigned long pfn, unsigned long nr_pages);
+#ifdef CONFIG_CONTIG_ALLOC
+static inline struct folio *folio_alloc_gigantic_noprof(int order, gfp_t gfp,
+ int nid, nodemask_t *node)
+{
+ struct page *page;
+
+ if (WARN_ON(!order || !(gfp & __GFP_COMP)))
+ return NULL;
+
+ page = alloc_contig_pages_noprof(1 << order, gfp, nid, node);
+
+ return page ? page_folio(page) : NULL;
+}
+#else
+static inline struct folio *folio_alloc_gigantic_noprof(int order, gfp_t gfp,
+ int nid, nodemask_t *node)
+{
+ return NULL;
+}
+#endif
+/* This should be paired with folio_put() rather than free_contig_range(). */
+#define folio_alloc_gigantic(...) alloc_hooks(folio_alloc_gigantic_noprof(__VA_ARGS__))
+
#endif /* __LINUX_GFP_H */
diff --git a/include/linux/gfp_types.h b/include/linux/gfp_types.h
index 313be4ad79fd..65db9349f905 100644
--- a/include/linux/gfp_types.h
+++ b/include/linux/gfp_types.h
@@ -215,7 +215,8 @@ enum {
* the caller still has to check for failures) while costly requests try to be
* not disruptive and back off even without invoking the OOM killer.
* The following three modifiers might be used to override some of these
- * implicit rules.
+ * implicit rules. Please note that all of them must be used along with
+ * %__GFP_DIRECT_RECLAIM flag.
*
* %__GFP_NORETRY: The VM implementation will try only very lightweight
* memory direct reclaim to get some memory under memory pressure (thus
@@ -246,11 +247,14 @@ enum {
* cannot handle allocation failures. The allocation could block
* indefinitely but will never return with failure. Testing for
* failure is pointless.
+ * It _must_ be blockable and used together with __GFP_DIRECT_RECLAIM.
+ * It should _never_ be used in non-sleepable contexts.
* New users should be evaluated carefully (and the flag should be
* used only when there is no reasonable failure policy) but it is
* definitely preferable to use the flag rather than opencode endless
* loop around allocator.
- * Using this flag for costly allocations is _highly_ discouraged.
+ * Allocating pages from the buddy with __GFP_NOFAIL and order > 1 is
+ * not supported. Please consider using kvmalloc() instead.
*/
#define __GFP_IO ((__force gfp_t)___GFP_IO)
#define __GFP_FS ((__force gfp_t)___GFP_FS)
diff --git a/include/linux/huge_mm.h b/include/linux/huge_mm.h
index e25d9ebfdf89..67d0ab3c3bba 100644
--- a/include/linux/huge_mm.h
+++ b/include/linux/huge_mm.h
@@ -76,9 +76,9 @@ extern struct kobj_attribute thpsize_shmem_enabled_attr;
/*
* Mask of all large folio orders supported for file THP. Folios in a DAX
* file is never split and the MAX_PAGECACHE_ORDER limit does not apply to
- * it.
+ * it. Same to PFNMAPs where there's neither page* nor pagecache.
*/
-#define THP_ORDERS_ALL_FILE_DAX \
+#define THP_ORDERS_ALL_SPECIAL \
(BIT(PMD_ORDER) | BIT(PUD_ORDER))
#define THP_ORDERS_ALL_FILE_DEFAULT \
((BIT(MAX_PAGECACHE_ORDER + 1) - 1) & ~BIT(0))
@@ -87,7 +87,7 @@ extern struct kobj_attribute thpsize_shmem_enabled_attr;
* Mask of all large folio orders supported for THP.
*/
#define THP_ORDERS_ALL \
- (THP_ORDERS_ALL_ANON | THP_ORDERS_ALL_FILE_DAX | THP_ORDERS_ALL_FILE_DEFAULT)
+ (THP_ORDERS_ALL_ANON | THP_ORDERS_ALL_SPECIAL | THP_ORDERS_ALL_FILE_DEFAULT)
#define TVA_SMAPS (1 << 0) /* Will be used for procfs */
#define TVA_IN_PF (1 << 1) /* Page fault handler */
@@ -96,6 +96,8 @@ extern struct kobj_attribute thpsize_shmem_enabled_attr;
#define thp_vma_allowable_order(vma, vm_flags, tva_flags, order) \
(!!thp_vma_allowable_orders(vma, vm_flags, tva_flags, BIT(order)))
+#define split_folio(f) split_folio_to_list(f, NULL)
+
#ifdef CONFIG_PGTABLE_HAS_HUGE_LEAVES
#define HPAGE_PMD_SHIFT PMD_SHIFT
#define HPAGE_PUD_SHIFT PUD_SHIFT
@@ -114,6 +116,53 @@ extern struct kobj_attribute thpsize_shmem_enabled_attr;
#define HPAGE_PUD_MASK (~(HPAGE_PUD_SIZE - 1))
#define HPAGE_PUD_SIZE ((1UL) << HPAGE_PUD_SHIFT)
+enum mthp_stat_item {
+ MTHP_STAT_ANON_FAULT_ALLOC,
+ MTHP_STAT_ANON_FAULT_FALLBACK,
+ MTHP_STAT_ANON_FAULT_FALLBACK_CHARGE,
+ MTHP_STAT_SWPOUT,
+ MTHP_STAT_SWPOUT_FALLBACK,
+ MTHP_STAT_SHMEM_ALLOC,
+ MTHP_STAT_SHMEM_FALLBACK,
+ MTHP_STAT_SHMEM_FALLBACK_CHARGE,
+ MTHP_STAT_SPLIT,
+ MTHP_STAT_SPLIT_FAILED,
+ MTHP_STAT_SPLIT_DEFERRED,
+ MTHP_STAT_NR_ANON,
+ MTHP_STAT_NR_ANON_PARTIALLY_MAPPED,
+ __MTHP_STAT_COUNT
+};
+
+#if defined(CONFIG_TRANSPARENT_HUGEPAGE) && defined(CONFIG_SYSFS)
+struct mthp_stat {
+ unsigned long stats[ilog2(MAX_PTRS_PER_PTE) + 1][__MTHP_STAT_COUNT];
+};
+
+DECLARE_PER_CPU(struct mthp_stat, mthp_stats);
+
+static inline void mod_mthp_stat(int order, enum mthp_stat_item item, int delta)
+{
+ if (order <= 0 || order > PMD_ORDER)
+ return;
+
+ this_cpu_add(mthp_stats.stats[order][item], delta);
+}
+
+static inline void count_mthp_stat(int order, enum mthp_stat_item item)
+{
+ mod_mthp_stat(order, item, 1);
+}
+
+#else
+static inline void mod_mthp_stat(int order, enum mthp_stat_item item, int delta)
+{
+}
+
+static inline void count_mthp_stat(int order, enum mthp_stat_item item)
+{
+}
+#endif
+
#ifdef CONFIG_TRANSPARENT_HUGEPAGE
extern unsigned long transparent_hugepage_flags;
@@ -269,41 +318,6 @@ struct thpsize {
#define to_thpsize(kobj) container_of(kobj, struct thpsize, kobj)
-enum mthp_stat_item {
- MTHP_STAT_ANON_FAULT_ALLOC,
- MTHP_STAT_ANON_FAULT_FALLBACK,
- MTHP_STAT_ANON_FAULT_FALLBACK_CHARGE,
- MTHP_STAT_SWPOUT,
- MTHP_STAT_SWPOUT_FALLBACK,
- MTHP_STAT_SHMEM_ALLOC,
- MTHP_STAT_SHMEM_FALLBACK,
- MTHP_STAT_SHMEM_FALLBACK_CHARGE,
- MTHP_STAT_SPLIT,
- MTHP_STAT_SPLIT_FAILED,
- MTHP_STAT_SPLIT_DEFERRED,
- __MTHP_STAT_COUNT
-};
-
-struct mthp_stat {
- unsigned long stats[ilog2(MAX_PTRS_PER_PTE) + 1][__MTHP_STAT_COUNT];
-};
-
-#ifdef CONFIG_SYSFS
-DECLARE_PER_CPU(struct mthp_stat, mthp_stats);
-
-static inline void count_mthp_stat(int order, enum mthp_stat_item item)
-{
- if (order <= 0 || order > PMD_ORDER)
- return;
-
- this_cpu_inc(mthp_stats.stats[order][item]);
-}
-#else
-static inline void count_mthp_stat(int order, enum mthp_stat_item item)
-{
-}
-#endif
-
#define transparent_hugepage_use_zero_page() \
(transparent_hugepage_flags & \
(1<<TRANSPARENT_HUGEPAGE_USE_ZERO_PAGE_FLAG))
@@ -314,14 +328,29 @@ unsigned long thp_get_unmapped_area_vmflags(struct file *filp, unsigned long add
unsigned long len, unsigned long pgoff, unsigned long flags,
vm_flags_t vm_flags);
-bool can_split_folio(struct folio *folio, int *pextra_pins);
+bool can_split_folio(struct folio *folio, int caller_pins, int *pextra_pins);
int split_huge_page_to_list_to_order(struct page *page, struct list_head *list,
unsigned int new_order);
+int min_order_for_split(struct folio *folio);
+int split_folio_to_list(struct folio *folio, struct list_head *list);
static inline int split_huge_page(struct page *page)
{
- return split_huge_page_to_list_to_order(page, NULL, 0);
+ struct folio *folio = page_folio(page);
+ int ret = min_order_for_split(folio);
+
+ if (ret < 0)
+ return ret;
+
+ /*
+ * split_huge_page() locks the page before splitting and
+ * expects the same page that has been split to be locked when
+ * returned. split_folio(page_folio(page)) cannot be used here
+ * because it converts the page to folio and passes the head
+ * page to be split.
+ */
+ return split_huge_page_to_list_to_order(page, NULL, ret);
}
-void deferred_split_folio(struct folio *folio);
+void deferred_split_folio(struct folio *folio, bool partially_mapped);
void __split_huge_pmd(struct vm_area_struct *vma, pmd_t *pmd,
unsigned long address, bool freeze, struct folio *folio);
@@ -342,6 +371,17 @@ void split_huge_pmd_address(struct vm_area_struct *vma, unsigned long address,
void __split_huge_pud(struct vm_area_struct *vma, pud_t *pud,
unsigned long address);
+#ifdef CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD
+int change_huge_pud(struct mmu_gather *tlb, struct vm_area_struct *vma,
+ pud_t *pudp, unsigned long addr, pgprot_t newprot,
+ unsigned long cp_flags);
+#else
+static inline int
+change_huge_pud(struct mmu_gather *tlb, struct vm_area_struct *vma,
+ pud_t *pudp, unsigned long addr, pgprot_t newprot,
+ unsigned long cp_flags) { return 0; }
+#endif
+
#define split_huge_pud(__vma, __pud, __address) \
do { \
pud_t *____pud = (__pud); \
@@ -410,11 +450,6 @@ static inline bool is_huge_zero_pmd(pmd_t pmd)
return pmd_present(pmd) && READ_ONCE(huge_zero_pfn) == pmd_pfn(pmd);
}
-static inline bool is_huge_zero_pud(pud_t pud)
-{
- return false;
-}
-
struct folio *mm_get_huge_zero_folio(struct mm_struct *mm);
void mm_put_huge_zero_folio(struct mm_struct *mm);
@@ -470,7 +505,7 @@ thp_get_unmapped_area_vmflags(struct file *filp, unsigned long addr,
}
static inline bool
-can_split_folio(struct folio *folio, int *pextra_pins)
+can_split_folio(struct folio *folio, int caller_pins, int *pextra_pins)
{
return false;
}
@@ -484,7 +519,13 @@ static inline int split_huge_page(struct page *page)
{
return 0;
}
-static inline void deferred_split_folio(struct folio *folio) {}
+
+static inline int split_folio_to_list(struct folio *folio, struct list_head *list)
+{
+ return 0;
+}
+
+static inline void deferred_split_folio(struct folio *folio, bool partially_mapped) {}
#define split_huge_pmd(__vma, __pmd, __address) \
do { } while (0)
@@ -555,11 +596,6 @@ static inline bool is_huge_zero_pmd(pmd_t pmd)
return false;
}
-static inline bool is_huge_zero_pud(pud_t pud)
-{
- return false;
-}
-
static inline void mm_put_huge_zero_folio(struct mm_struct *mm)
{
return;
@@ -585,6 +621,19 @@ static inline int next_order(unsigned long *orders, int prev)
{
return 0;
}
+
+static inline void __split_huge_pud(struct vm_area_struct *vma, pud_t *pud,
+ unsigned long address)
+{
+}
+
+static inline int change_huge_pud(struct mmu_gather *tlb,
+ struct vm_area_struct *vma, pud_t *pudp,
+ unsigned long addr, pgprot_t newprot,
+ unsigned long cp_flags)
+{
+ return 0;
+}
#endif /* CONFIG_TRANSPARENT_HUGEPAGE */
static inline int split_folio_to_list_to_order(struct folio *folio,
@@ -598,7 +647,4 @@ static inline int split_folio_to_order(struct folio *folio, int new_order)
return split_folio_to_list_to_order(folio, NULL, new_order);
}
-#define split_folio_to_list(f, l) split_folio_to_list_to_order(f, l, 0)
-#define split_folio(f) split_folio_to_order(f, 0)
-
#endif /* _LINUX_HUGE_MM_H */
diff --git a/include/linux/hugetlb.h b/include/linux/hugetlb.h
index 45bf05ad5c53..e4697539b665 100644
--- a/include/linux/hugetlb.h
+++ b/include/linux/hugetlb.h
@@ -127,9 +127,6 @@ int move_hugetlb_page_tables(struct vm_area_struct *vma,
unsigned long len);
int copy_hugetlb_page_range(struct mm_struct *, struct mm_struct *,
struct vm_area_struct *, struct vm_area_struct *);
-struct page *hugetlb_follow_page_mask(struct vm_area_struct *vma,
- unsigned long address, unsigned int flags,
- unsigned int *page_mask);
void unmap_hugepage_range(struct vm_area_struct *,
unsigned long, unsigned long, struct page *,
zap_flags_t);
@@ -695,6 +692,9 @@ struct folio *alloc_hugetlb_folio(struct vm_area_struct *vma,
struct folio *alloc_hugetlb_folio_nodemask(struct hstate *h, int preferred_nid,
nodemask_t *nmask, gfp_t gfp_mask,
bool allow_alloc_fallback);
+struct folio *alloc_hugetlb_folio_reserve(struct hstate *h, int preferred_nid,
+ nodemask_t *nmask, gfp_t gfp_mask);
+
int hugetlb_add_to_page_cache(struct folio *folio, struct address_space *mapping,
pgoff_t idx);
void restore_reserve_on_error(struct hstate *h, struct vm_area_struct *vma,
@@ -899,10 +899,11 @@ static inline bool hugepage_movable_supported(struct hstate *h)
/* Movability of hugepages depends on migration support. */
static inline gfp_t htlb_alloc_mask(struct hstate *h)
{
- if (hugepage_movable_supported(h))
- return GFP_HIGHUSER_MOVABLE;
- else
- return GFP_HIGHUSER;
+ gfp_t gfp = __GFP_COMP | __GFP_NOWARN;
+
+ gfp |= hugepage_movable_supported(h) ? GFP_HIGHUSER_MOVABLE : GFP_HIGHUSER;
+
+ return gfp;
}
static inline gfp_t htlb_modify_alloc_mask(struct hstate *h, gfp_t gfp_mask)
@@ -1062,6 +1063,13 @@ static inline struct folio *alloc_hugetlb_folio(struct vm_area_struct *vma,
}
static inline struct folio *
+alloc_hugetlb_folio_reserve(struct hstate *h, int preferred_nid,
+ nodemask_t *nmask, gfp_t gfp_mask)
+{
+ return NULL;
+}
+
+static inline struct folio *
alloc_hugetlb_folio_nodemask(struct hstate *h, int preferred_nid,
nodemask_t *nmask, gfp_t gfp_mask,
bool allow_alloc_fallback)
@@ -1251,7 +1259,7 @@ static inline __init void hugetlb_cma_reserve(int order)
}
#endif
-#ifdef CONFIG_ARCH_WANT_HUGE_PMD_SHARE
+#ifdef CONFIG_HUGETLB_PMD_PAGE_TABLE_SHARING
static inline bool hugetlb_pmd_shared(pte_t *pte)
{
return page_count(virt_to_page(pte)) > 1;
@@ -1287,8 +1295,7 @@ bool __vma_private_lock(struct vm_area_struct *vma);
static inline pte_t *
hugetlb_walk(struct vm_area_struct *vma, unsigned long addr, unsigned long sz)
{
-#if defined(CONFIG_HUGETLB_PAGE) && \
- defined(CONFIG_ARCH_WANT_HUGE_PMD_SHARE) && defined(CONFIG_LOCKDEP)
+#if defined(CONFIG_HUGETLB_PMD_PAGE_TABLE_SHARING) && defined(CONFIG_LOCKDEP)
struct hugetlb_vma_lock *vma_lock = vma->vm_private_data;
/*
diff --git a/include/linux/i2c.h b/include/linux/i2c.h
index 377def497298..388ce71a29a9 100644
--- a/include/linux/i2c.h
+++ b/include/linux/i2c.h
@@ -761,6 +761,9 @@ struct i2c_adapter {
struct regulator *bus_regulator;
struct dentry *debugfs;
+
+ /* 7bit address space */
+ DECLARE_BITMAP(addrs_in_instantiation, 1 << 7);
};
#define to_i2c_adapter(d) container_of(d, struct i2c_adapter, dev)
diff --git a/include/linux/i3c/master.h b/include/linux/i3c/master.h
index 074f632868d9..2a1ed05d5782 100644
--- a/include/linux/i3c/master.h
+++ b/include/linux/i3c/master.h
@@ -278,6 +278,20 @@ enum i3c_bus_mode {
};
/**
+ * enum i3c_open_drain_speed - I3C open-drain speed
+ * @I3C_OPEN_DRAIN_SLOW_SPEED: Slow open-drain speed for sending the first
+ * broadcast address. The first broadcast address at this speed
+ * will be visible to all devices on the I3C bus. I3C devices
+ * working in I2C mode will turn off their spike filter when
+ * switching into I3C mode.
+ * @I3C_OPEN_DRAIN_NORMAL_SPEED: Normal open-drain speed in I3C bus mode.
+ */
+enum i3c_open_drain_speed {
+ I3C_OPEN_DRAIN_SLOW_SPEED,
+ I3C_OPEN_DRAIN_NORMAL_SPEED,
+};
+
+/**
* enum i3c_addr_slot_status - I3C address slot status
* @I3C_ADDR_SLOT_FREE: address is free
* @I3C_ADDR_SLOT_RSVD: address is reserved
@@ -436,6 +450,7 @@ struct i3c_bus {
* NULL.
* @enable_hotjoin: enable hot join event detect.
* @disable_hotjoin: disable hot join event detect.
+ * @set_speed: adjust I3C open drain mode timing.
*/
struct i3c_master_controller_ops {
int (*bus_init)(struct i3c_master_controller *master);
@@ -464,6 +479,7 @@ struct i3c_master_controller_ops {
struct i3c_ibi_slot *slot);
int (*enable_hotjoin)(struct i3c_master_controller *master);
int (*disable_hotjoin)(struct i3c_master_controller *master);
+ int (*set_speed)(struct i3c_master_controller *master, enum i3c_open_drain_speed speed);
};
/**
diff --git a/include/linux/iio/backend.h b/include/linux/iio/backend.h
index 8099759d7242..37d56914d485 100644
--- a/include/linux/iio/backend.h
+++ b/include/linux/iio/backend.h
@@ -3,6 +3,7 @@
#define _IIO_BACKEND_H_
#include <linux/types.h>
+#include <linux/iio/iio.h>
struct iio_chan_spec;
struct fwnode_handle;
@@ -17,11 +18,13 @@ enum iio_backend_data_type {
};
enum iio_backend_data_source {
- IIO_BACKEND_INTERNAL_CONTINUOS_WAVE,
+ IIO_BACKEND_INTERNAL_CONTINUOUS_WAVE,
IIO_BACKEND_EXTERNAL,
IIO_BACKEND_DATA_SOURCE_MAX
};
+#define iio_backend_debugfs_ptr(ptr) PTR_IF(IS_ENABLED(CONFIG_DEBUG_FS), ptr)
+
/**
* IIO_BACKEND_EX_INFO - Helper for an IIO extended channel attribute
* @_name: Attribute name
@@ -54,6 +57,8 @@ enum iio_backend_test_pattern {
IIO_BACKEND_NO_TEST_PATTERN,
/* modified prbs9 */
IIO_BACKEND_ADI_PRBS_9A = 32,
+ /* modified prbs23 */
+ IIO_BACKEND_ADI_PRBS_23A,
IIO_BACKEND_TEST_PATTERN_MAX
};
@@ -81,6 +86,9 @@ enum iio_backend_sample_trigger {
* @extend_chan_spec: Extend an IIO channel.
* @ext_info_set: Extended info setter.
* @ext_info_get: Extended info getter.
+ * @read_raw: Read a channel attribute from a backend device
+ * @debugfs_print_chan_status: Print channel status into a buffer.
+ * @debugfs_reg_access: Read or write register value of backend.
**/
struct iio_backend_ops {
int (*enable)(struct iio_backend *back);
@@ -113,11 +121,31 @@ struct iio_backend_ops {
const char *buf, size_t len);
int (*ext_info_get)(struct iio_backend *back, uintptr_t private,
const struct iio_chan_spec *chan, char *buf);
+ int (*read_raw)(struct iio_backend *back,
+ struct iio_chan_spec const *chan, int *val, int *val2,
+ long mask);
+ int (*debugfs_print_chan_status)(struct iio_backend *back,
+ unsigned int chan, char *buf,
+ size_t len);
+ int (*debugfs_reg_access)(struct iio_backend *back, unsigned int reg,
+ unsigned int writeval, unsigned int *readval);
+};
+
+/**
+ * struct iio_backend_info - info structure for an iio_backend
+ * @name: Backend name.
+ * @ops: Backend operations.
+ */
+struct iio_backend_info {
+ const char *name;
+ const struct iio_backend_ops *ops;
};
int iio_backend_chan_enable(struct iio_backend *back, unsigned int chan);
int iio_backend_chan_disable(struct iio_backend *back, unsigned int chan);
int devm_iio_backend_enable(struct device *dev, struct iio_backend *back);
+int iio_backend_enable(struct iio_backend *back);
+void iio_backend_disable(struct iio_backend *back);
int iio_backend_data_format_set(struct iio_backend *back, unsigned int chan,
const struct iio_backend_data_fmt *data);
int iio_backend_data_source_set(struct iio_backend *back, unsigned int chan,
@@ -141,17 +169,41 @@ ssize_t iio_backend_ext_info_set(struct iio_dev *indio_dev, uintptr_t private,
const char *buf, size_t len);
ssize_t iio_backend_ext_info_get(struct iio_dev *indio_dev, uintptr_t private,
const struct iio_chan_spec *chan, char *buf);
-
-int iio_backend_extend_chan_spec(struct iio_dev *indio_dev,
- struct iio_backend *back,
+int iio_backend_read_raw(struct iio_backend *back,
+ struct iio_chan_spec const *chan, int *val, int *val2,
+ long mask);
+int iio_backend_extend_chan_spec(struct iio_backend *back,
struct iio_chan_spec *chan);
void *iio_backend_get_priv(const struct iio_backend *conv);
struct iio_backend *devm_iio_backend_get(struct device *dev, const char *name);
+struct iio_backend *devm_iio_backend_fwnode_get(struct device *dev,
+ const char *name,
+ struct fwnode_handle *fwnode);
struct iio_backend *
__devm_iio_backend_get_from_fwnode_lookup(struct device *dev,
struct fwnode_handle *fwnode);
int devm_iio_backend_register(struct device *dev,
- const struct iio_backend_ops *ops, void *priv);
+ const struct iio_backend_info *info, void *priv);
+
+static inline int iio_backend_read_scale(struct iio_backend *back,
+ struct iio_chan_spec const *chan,
+ int *val, int *val2)
+{
+ return iio_backend_read_raw(back, chan, val, val2, IIO_CHAN_INFO_SCALE);
+}
+
+static inline int iio_backend_read_offset(struct iio_backend *back,
+ struct iio_chan_spec const *chan,
+ int *val, int *val2)
+{
+ return iio_backend_read_raw(back, chan, val, val2,
+ IIO_CHAN_INFO_OFFSET);
+}
+ssize_t iio_backend_debugfs_print_chan_status(struct iio_backend *back,
+ unsigned int chan, char *buf,
+ size_t len);
+void iio_backend_debugfs_add(struct iio_backend *back,
+ struct iio_dev *indio_dev);
#endif
diff --git a/include/linux/iio/iio.h b/include/linux/iio/iio.h
index 894309294182..18779b631e90 100644
--- a/include/linux/iio/iio.h
+++ b/include/linux/iio/iio.h
@@ -609,7 +609,7 @@ struct iio_dev {
int scan_bytes;
const unsigned long *available_scan_masks;
- unsigned masklength;
+ unsigned __private masklength;
const unsigned long *active_scan_mask;
bool scan_timestamp;
struct iio_trigger *trig;
@@ -810,6 +810,23 @@ static inline struct dentry *iio_get_debugfs_dentry(struct iio_dev *indio_dev)
}
#endif
+/**
+ * iio_device_suspend_triggering() - suspend trigger attached to an iio_dev
+ * @indio_dev: iio_dev associated with the device that will have triggers suspended
+ *
+ * Return 0 if successful, negative otherwise
+ **/
+int iio_device_suspend_triggering(struct iio_dev *indio_dev);
+
+/**
+ * iio_device_resume_triggering() - resume trigger attached to an iio_dev
+ * that was previously suspended with iio_device_suspend_triggering()
+ * @indio_dev: iio_dev associated with the device that will have triggers resumed
+ *
+ * Return 0 if successful, negative otherwise
+ **/
+int iio_device_resume_triggering(struct iio_dev *indio_dev);
+
#ifdef CONFIG_ACPI
bool iio_read_acpi_mount_matrix(struct device *dev,
struct iio_mount_matrix *orientation,
@@ -855,6 +872,26 @@ static inline const struct iio_scan_type
return &chan->scan_type;
}
+/**
+ * iio_get_masklength - Get length of the channels mask
+ * @indio_dev: the IIO device to get the masklength for
+ */
+static inline unsigned int iio_get_masklength(const struct iio_dev *indio_dev)
+{
+ return ACCESS_PRIVATE(indio_dev, masklength);
+}
+
+int iio_active_scan_mask_index(struct iio_dev *indio_dev);
+
+/**
+ * iio_for_each_active_channel - Iterated over active channels
+ * @indio_dev: the IIO device
+ * @chan: Holds the index of the enabled channel
+ */
+#define iio_for_each_active_channel(indio_dev, chan) \
+ for_each_set_bit((chan), (indio_dev)->active_scan_mask, \
+ iio_get_masklength(indio_dev))
+
ssize_t iio_format_value(char *buf, unsigned int type, int size, int *vals);
int iio_str_to_fixpoint(const char *str, int fract_mult, int *integer,
diff --git a/include/linux/input/matrix_keypad.h b/include/linux/input/matrix_keypad.h
index b8d8d69eba29..90867f44ab4d 100644
--- a/include/linux/input/matrix_keypad.h
+++ b/include/linux/input/matrix_keypad.h
@@ -34,52 +34,6 @@ struct matrix_keymap_data {
unsigned int keymap_size;
};
-/**
- * struct matrix_keypad_platform_data - platform-dependent keypad data
- * @keymap_data: pointer to &matrix_keymap_data
- * @row_gpios: pointer to array of gpio numbers representing rows
- * @col_gpios: pointer to array of gpio numbers reporesenting colums
- * @num_row_gpios: actual number of row gpios used by device
- * @num_col_gpios: actual number of col gpios used by device
- * @col_scan_delay_us: delay, measured in microseconds, that is
- * needed before we can keypad after activating column gpio
- * @debounce_ms: debounce interval in milliseconds
- * @clustered_irq: may be specified if interrupts of all row/column GPIOs
- * are bundled to one single irq
- * @clustered_irq_flags: flags that are needed for the clustered irq
- * @active_low: gpio polarity
- * @wakeup: controls whether the device should be set up as wakeup
- * source
- * @no_autorepeat: disable key autorepeat
- * @drive_inactive_cols: drive inactive columns during scan, rather than
- * making them inputs.
- *
- * This structure represents platform-specific data that use used by
- * matrix_keypad driver to perform proper initialization.
- */
-struct matrix_keypad_platform_data {
- const struct matrix_keymap_data *keymap_data;
-
- const unsigned int *row_gpios;
- const unsigned int *col_gpios;
-
- unsigned int num_row_gpios;
- unsigned int num_col_gpios;
-
- unsigned int col_scan_delay_us;
-
- /* key debounce interval in milli-second */
- unsigned int debounce_ms;
-
- unsigned int clustered_irq;
- unsigned int clustered_irq_flags;
-
- bool active_low;
- bool wakeup;
- bool no_autorepeat;
- bool drive_inactive_cols;
-};
-
int matrix_keypad_build_keymap(const struct matrix_keymap_data *keymap_data,
const char *keymap_name,
unsigned int rows, unsigned int cols,
@@ -88,6 +42,4 @@ int matrix_keypad_build_keymap(const struct matrix_keymap_data *keymap_data,
int matrix_keypad_parse_properties(struct device *dev,
unsigned int *rows, unsigned int *cols);
-#define matrix_keypad_parse_of_params matrix_keypad_parse_properties
-
#endif /* _MATRIX_KEYPAD_H */
diff --git a/include/linux/iomap.h b/include/linux/iomap.h
index 6fc1c858013d..4ad12a3c8bae 100644
--- a/include/linux/iomap.h
+++ b/include/linux/iomap.h
@@ -257,11 +257,7 @@ static inline const struct iomap *iomap_iter_srcmap(const struct iomap_iter *i)
}
ssize_t iomap_file_buffered_write(struct kiocb *iocb, struct iov_iter *from,
- const struct iomap_ops *ops);
-int iomap_file_buffered_write_punch_delalloc(struct inode *inode,
- struct iomap *iomap, loff_t pos, loff_t length, ssize_t written,
- int (*punch)(struct inode *inode, loff_t pos, loff_t length));
-
+ const struct iomap_ops *ops, void *private);
int iomap_read_folio(struct folio *folio, const struct iomap_ops *ops);
void iomap_readahead(struct readahead_control *, const struct iomap_ops *ops);
bool iomap_is_partially_uptodate(struct folio *, size_t from, size_t count);
@@ -277,6 +273,13 @@ int iomap_truncate_page(struct inode *inode, loff_t pos, bool *did_zero,
const struct iomap_ops *ops);
vm_fault_t iomap_page_mkwrite(struct vm_fault *vmf,
const struct iomap_ops *ops);
+
+typedef void (*iomap_punch_t)(struct inode *inode, loff_t offset, loff_t length,
+ struct iomap *iomap);
+void iomap_file_buffered_write_punch_delalloc(struct inode *inode, loff_t pos,
+ loff_t length, ssize_t written, unsigned flag,
+ struct iomap *iomap, iomap_punch_t punch);
+
int iomap_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo,
u64 start, u64 len, const struct iomap_ops *ops);
loff_t iomap_seek_hole(struct inode *inode, loff_t offset,
diff --git a/include/linux/iommu-dma.h b/include/linux/iommu-dma.h
new file mode 100644
index 000000000000..508beaa44c39
--- /dev/null
+++ b/include/linux/iommu-dma.h
@@ -0,0 +1,69 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (c) 2024, NVIDIA CORPORATION & AFFILIATES. All rights reserved
+ *
+ * DMA operations that map physical memory through IOMMU.
+ */
+#ifndef _LINUX_IOMMU_DMA_H
+#define _LINUX_IOMMU_DMA_H
+
+#include <linux/dma-direction.h>
+
+#ifdef CONFIG_IOMMU_DMA
+static inline bool use_dma_iommu(struct device *dev)
+{
+ return dev->dma_iommu;
+}
+#else
+static inline bool use_dma_iommu(struct device *dev)
+{
+ return false;
+}
+#endif /* CONFIG_IOMMU_DMA */
+
+dma_addr_t iommu_dma_map_page(struct device *dev, struct page *page,
+ unsigned long offset, size_t size, enum dma_data_direction dir,
+ unsigned long attrs);
+void iommu_dma_unmap_page(struct device *dev, dma_addr_t dma_handle,
+ size_t size, enum dma_data_direction dir, unsigned long attrs);
+int iommu_dma_map_sg(struct device *dev, struct scatterlist *sg, int nents,
+ enum dma_data_direction dir, unsigned long attrs);
+void iommu_dma_unmap_sg(struct device *dev, struct scatterlist *sg, int nents,
+ enum dma_data_direction dir, unsigned long attrs);
+void *iommu_dma_alloc(struct device *dev, size_t size, dma_addr_t *handle,
+ gfp_t gfp, unsigned long attrs);
+int iommu_dma_mmap(struct device *dev, struct vm_area_struct *vma,
+ void *cpu_addr, dma_addr_t dma_addr, size_t size,
+ unsigned long attrs);
+int iommu_dma_get_sgtable(struct device *dev, struct sg_table *sgt,
+ void *cpu_addr, dma_addr_t dma_addr, size_t size,
+ unsigned long attrs);
+unsigned long iommu_dma_get_merge_boundary(struct device *dev);
+size_t iommu_dma_opt_mapping_size(void);
+size_t iommu_dma_max_mapping_size(struct device *dev);
+void iommu_dma_free(struct device *dev, size_t size, void *cpu_addr,
+ dma_addr_t handle, unsigned long attrs);
+dma_addr_t iommu_dma_map_resource(struct device *dev, phys_addr_t phys,
+ size_t size, enum dma_data_direction dir, unsigned long attrs);
+void iommu_dma_unmap_resource(struct device *dev, dma_addr_t handle,
+ size_t size, enum dma_data_direction dir, unsigned long attrs);
+struct sg_table *iommu_dma_alloc_noncontiguous(struct device *dev, size_t size,
+ enum dma_data_direction dir, gfp_t gfp, unsigned long attrs);
+void iommu_dma_free_noncontiguous(struct device *dev, size_t size,
+ struct sg_table *sgt, enum dma_data_direction dir);
+void *iommu_dma_vmap_noncontiguous(struct device *dev, size_t size,
+ struct sg_table *sgt);
+#define iommu_dma_vunmap_noncontiguous(dev, vaddr) \
+ vunmap(vaddr);
+int iommu_dma_mmap_noncontiguous(struct device *dev, struct vm_area_struct *vma,
+ size_t size, struct sg_table *sgt);
+void iommu_dma_sync_single_for_cpu(struct device *dev, dma_addr_t dma_handle,
+ size_t size, enum dma_data_direction dir);
+void iommu_dma_sync_single_for_device(struct device *dev, dma_addr_t dma_handle,
+ size_t size, enum dma_data_direction dir);
+void iommu_dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sgl,
+ int nelems, enum dma_data_direction dir);
+void iommu_dma_sync_sg_for_device(struct device *dev, struct scatterlist *sgl,
+ int nelems, enum dma_data_direction dir);
+
+#endif /* _LINUX_IOMMU_DMA_H */
diff --git a/include/linux/iommufd.h b/include/linux/iommufd.h
index ffc3a949f837..30f832a60ccb 100644
--- a/include/linux/iommufd.h
+++ b/include/linux/iommufd.h
@@ -6,17 +6,17 @@
#ifndef __LINUX_IOMMUFD_H
#define __LINUX_IOMMUFD_H
-#include <linux/types.h>
-#include <linux/errno.h>
#include <linux/err.h>
+#include <linux/errno.h>
+#include <linux/types.h>
struct device;
-struct iommufd_device;
-struct page;
-struct iommufd_ctx;
-struct iommufd_access;
struct file;
struct iommu_group;
+struct iommufd_access;
+struct iommufd_ctx;
+struct iommufd_device;
+struct page;
struct iommufd_device *iommufd_device_bind(struct iommufd_ctx *ictx,
struct device *dev, u32 *id);
diff --git a/include/linux/ioprio.h b/include/linux/ioprio.h
index db1249cd9692..b25377b6ea98 100644
--- a/include/linux/ioprio.h
+++ b/include/linux/ioprio.h
@@ -40,7 +40,7 @@ static inline int task_nice_ioclass(struct task_struct *task)
{
if (task->policy == SCHED_IDLE)
return IOPRIO_CLASS_IDLE;
- else if (task_is_realtime(task))
+ else if (rt_or_dl_task_policy(task))
return IOPRIO_CLASS_RT;
else
return IOPRIO_CLASS_BE;
diff --git a/include/linux/jbd2.h b/include/linux/jbd2.h
index 5157d92b6f23..8aef9bb6ad57 100644
--- a/include/linux/jbd2.h
+++ b/include/linux/jbd2.h
@@ -1086,7 +1086,7 @@ struct journal_s
int j_revoke_records_per_block;
/**
- * @j_transaction_overhead:
+ * @j_transaction_overhead_buffers:
*
* Number of blocks each transaction needs for its own bookkeeping
*/
@@ -1675,7 +1675,7 @@ int jbd2_fc_get_buf(journal_t *journal, struct buffer_head **bh_out);
int jbd2_submit_inode_data(journal_t *journal, struct jbd2_inode *jinode);
int jbd2_wait_inode_data(journal_t *journal, struct jbd2_inode *jinode);
int jbd2_fc_wait_bufs(journal_t *journal, int num_blks);
-int jbd2_fc_release_bufs(journal_t *journal);
+void jbd2_fc_release_bufs(journal_t *journal);
/*
* is_journal_abort
diff --git a/include/linux/kernel-page-flags.h b/include/linux/kernel-page-flags.h
index 859f4b0c1b2b..196778a087c4 100644
--- a/include/linux/kernel-page-flags.h
+++ b/include/linux/kernel-page-flags.h
@@ -10,12 +10,11 @@
*/
#define KPF_RESERVED 32
#define KPF_MLOCKED 33
-#define KPF_MAPPEDTODISK 34
+#define KPF_OWNER_2 34
#define KPF_PRIVATE 35
#define KPF_PRIVATE_2 36
#define KPF_OWNER_PRIVATE 37
#define KPF_ARCH 38
-#define KPF_UNCACHED 39
#define KPF_SOFTDIRTY 40
#define KPF_ARCH_2 41
#define KPF_ARCH_3 42
diff --git a/include/linux/key.h b/include/linux/key.h
index 943a432da3ae..074dca3222b9 100644
--- a/include/linux/key.h
+++ b/include/linux/key.h
@@ -436,9 +436,6 @@ extern key_ref_t keyring_search(key_ref_t keyring,
const char *description,
bool recurse);
-extern int keyring_add_key(struct key *keyring,
- struct key *key);
-
extern int keyring_restrict(key_ref_t keyring, const char *type,
const char *restriction);
diff --git a/include/linux/kfence.h b/include/linux/kfence.h
index 88100cc9caba..0ad1ddbb8b99 100644
--- a/include/linux/kfence.h
+++ b/include/linux/kfence.h
@@ -124,7 +124,7 @@ static __always_inline void *kfence_alloc(struct kmem_cache *s, size_t size, gfp
if (!static_branch_likely(&kfence_allocation_key))
return NULL;
#endif
- if (likely(atomic_read(&kfence_allocation_gate)))
+ if (likely(atomic_read(&kfence_allocation_gate) > 0))
return NULL;
return __kfence_alloc(s, size, flags);
}
diff --git a/include/linux/khugepaged.h b/include/linux/khugepaged.h
index f68865e19b0b..30baae91b225 100644
--- a/include/linux/khugepaged.h
+++ b/include/linux/khugepaged.h
@@ -4,6 +4,7 @@
#include <linux/sched/coredump.h> /* MMF_VM_HUGEPAGE */
+extern unsigned int khugepaged_max_ptes_none __read_mostly;
#ifdef CONFIG_TRANSPARENT_HUGEPAGE
extern struct attribute_group khugepaged_attr_group;
diff --git a/include/linux/kmsg_dump.h b/include/linux/kmsg_dump.h
index 906521c2329c..6055fc969877 100644
--- a/include/linux/kmsg_dump.h
+++ b/include/linux/kmsg_dump.h
@@ -40,6 +40,17 @@ struct kmsg_dump_iter {
};
/**
+ * struct kmsg_dump_detail - kernel crash detail
+ * @reason: reason for the crash, see kmsg_dump_reason.
+ * @description: optional short string, to provide additional information.
+ */
+
+struct kmsg_dump_detail {
+ enum kmsg_dump_reason reason;
+ const char *description;
+};
+
+/**
* struct kmsg_dumper - kernel crash message dumper structure
* @list: Entry in the dumper list (private)
* @dump: Call into dumping code which will retrieve the data with
@@ -49,13 +60,13 @@ struct kmsg_dump_iter {
*/
struct kmsg_dumper {
struct list_head list;
- void (*dump)(struct kmsg_dumper *dumper, enum kmsg_dump_reason reason);
+ void (*dump)(struct kmsg_dumper *dumper, struct kmsg_dump_detail *detail);
enum kmsg_dump_reason max_reason;
bool registered;
};
#ifdef CONFIG_PRINTK
-void kmsg_dump(enum kmsg_dump_reason reason);
+void kmsg_dump_desc(enum kmsg_dump_reason reason, const char *desc);
bool kmsg_dump_get_line(struct kmsg_dump_iter *iter, bool syslog,
char *line, size_t size, size_t *len);
@@ -71,7 +82,7 @@ int kmsg_dump_unregister(struct kmsg_dumper *dumper);
const char *kmsg_dump_reason_str(enum kmsg_dump_reason reason);
#else
-static inline void kmsg_dump(enum kmsg_dump_reason reason)
+static inline void kmsg_dump_desc(enum kmsg_dump_reason reason, const char *desc)
{
}
@@ -107,4 +118,9 @@ static inline const char *kmsg_dump_reason_str(enum kmsg_dump_reason reason)
}
#endif
+static inline void kmsg_dump(enum kmsg_dump_reason reason)
+{
+ kmsg_dump_desc(reason, NULL);
+}
+
#endif /* _LINUX_KMSG_DUMP_H */
diff --git a/include/linux/kprobes.h b/include/linux/kprobes.h
index 5fcbc254d186..8c4f3bb24429 100644
--- a/include/linux/kprobes.h
+++ b/include/linux/kprobes.h
@@ -269,15 +269,6 @@ extern unsigned long __stop_kprobe_blacklist[];
extern struct kretprobe_blackpoint kretprobe_blacklist[];
-#ifdef CONFIG_KPROBES_SANITY_TEST
-extern int init_test_probes(void);
-#else /* !CONFIG_KPROBES_SANITY_TEST */
-static inline int init_test_probes(void)
-{
- return 0;
-}
-#endif /* CONFIG_KPROBES_SANITY_TEST */
-
extern int arch_prepare_kprobe(struct kprobe *p);
extern void arch_arm_kprobe(struct kprobe *p);
extern void arch_disarm_kprobe(struct kprobe *p);
diff --git a/include/linux/kvm_host.h b/include/linux/kvm_host.h
index 0d5125a3e31a..db567d26f7b9 100644
--- a/include/linux/kvm_host.h
+++ b/include/linux/kvm_host.h
@@ -1529,8 +1529,22 @@ static inline void kvm_create_vcpu_debugfs(struct kvm_vcpu *vcpu) {}
#endif
#ifdef CONFIG_KVM_GENERIC_HARDWARE_ENABLING
-int kvm_arch_hardware_enable(void);
-void kvm_arch_hardware_disable(void);
+/*
+ * kvm_arch_{enable,disable}_virtualization() are called on one CPU, under
+ * kvm_usage_lock, immediately after/before 0=>1 and 1=>0 transitions of
+ * kvm_usage_count, i.e. at the beginning of the generic hardware enabling
+ * sequence, and at the end of the generic hardware disabling sequence.
+ */
+void kvm_arch_enable_virtualization(void);
+void kvm_arch_disable_virtualization(void);
+/*
+ * kvm_arch_{enable,disable}_virtualization_cpu() are called on "every" CPU to
+ * do the actual twiddling of hardware bits. The hooks are called on all
+ * online CPUs when KVM enables/disabled virtualization, and on a single CPU
+ * when that CPU is onlined/offlined (including for Resume/Suspend).
+ */
+int kvm_arch_enable_virtualization_cpu(void);
+void kvm_arch_disable_virtualization_cpu(void);
#endif
int kvm_arch_vcpu_runnable(struct kvm_vcpu *vcpu);
bool kvm_arch_vcpu_in_kernel(struct kvm_vcpu *vcpu);
diff --git a/include/linux/leds.h b/include/linux/leds.h
index 6885603f211b..e5968c3ed4ae 100644
--- a/include/linux/leds.h
+++ b/include/linux/leds.h
@@ -611,6 +611,8 @@ enum led_trigger_netdev_modes {
TRIGGER_NETDEV_FULL_DUPLEX,
TRIGGER_NETDEV_TX,
TRIGGER_NETDEV_RX,
+ TRIGGER_NETDEV_TX_ERR,
+ TRIGGER_NETDEV_RX_ERR,
/* Keep last */
__TRIGGER_NETDEV_MAX,
diff --git a/include/linux/libata.h b/include/linux/libata.h
index 17394098bee9..9b4a6ff03235 100644
--- a/include/linux/libata.h
+++ b/include/linux/libata.h
@@ -55,6 +55,46 @@
/* defines only for the constants which don't work well as enums */
#define ATA_TAG_POISON 0xfafbfcfdU
+/*
+ * Quirk flags bits.
+ * ata_device->quirks is an unsigned int, so __ATA_QUIRK_MAX must not exceed 32.
+ */
+enum ata_quirks {
+ __ATA_QUIRK_DIAGNOSTIC, /* Failed boot diag */
+ __ATA_QUIRK_NODMA, /* DMA problems */
+ __ATA_QUIRK_NONCQ, /* Don't use NCQ */
+ __ATA_QUIRK_MAX_SEC_128, /* Limit max sects to 128 */
+ __ATA_QUIRK_BROKEN_HPA, /* Broken HPA */
+ __ATA_QUIRK_DISABLE, /* Disable it */
+ __ATA_QUIRK_HPA_SIZE, /* Native size off by one */
+ __ATA_QUIRK_IVB, /* cbl det validity bit bugs */
+ __ATA_QUIRK_STUCK_ERR, /* Stuck ERR on next PACKET */
+ __ATA_QUIRK_BRIDGE_OK, /* No bridge limits */
+ __ATA_QUIRK_ATAPI_MOD16_DMA, /* Use ATAPI DMA for commands that */
+ /* are not a multiple of 16 bytes */
+ __ATA_QUIRK_FIRMWARE_WARN, /* Firmware update warning */
+ __ATA_QUIRK_1_5_GBPS, /* Force 1.5 Gbps */
+ __ATA_QUIRK_NOSETXFER, /* Skip SETXFER, SATA only */
+ __ATA_QUIRK_BROKEN_FPDMA_AA, /* Skip AA */
+ __ATA_QUIRK_DUMP_ID, /* Dump IDENTIFY data */
+ __ATA_QUIRK_MAX_SEC_LBA48, /* Set max sects to 65535 */
+ __ATA_QUIRK_ATAPI_DMADIR, /* Device requires dmadir */
+ __ATA_QUIRK_NO_NCQ_TRIM, /* Do not use queued TRIM */
+ __ATA_QUIRK_NOLPM, /* Do not use LPM */
+ __ATA_QUIRK_WD_BROKEN_LPM, /* Some WDs have broken LPM */
+ __ATA_QUIRK_ZERO_AFTER_TRIM, /* Guarantees zero after trim */
+ __ATA_QUIRK_NO_DMA_LOG, /* Do not use DMA for log read */
+ __ATA_QUIRK_NOTRIM, /* Do not use TRIM */
+ __ATA_QUIRK_MAX_SEC_1024, /* Limit max sects to 1024 */
+ __ATA_QUIRK_MAX_TRIM_128M, /* Limit max trim size to 128M */
+ __ATA_QUIRK_NO_NCQ_ON_ATI, /* Disable NCQ on ATI chipset */
+ __ATA_QUIRK_NO_ID_DEV_LOG, /* Identify device log missing */
+ __ATA_QUIRK_NO_LOG_DIR, /* Do not read log directory */
+ __ATA_QUIRK_NO_FUA, /* Do not use FUA */
+
+ __ATA_QUIRK_MAX,
+};
+
enum {
/* various global constants */
LIBATA_MAX_PRD = ATA_MAX_PRD / 2,
@@ -338,6 +378,7 @@ enum {
ATA_EHI_PRINTINFO = (1 << 18), /* print configuration info */
ATA_EHI_SETMODE = (1 << 19), /* configure transfer mode */
ATA_EHI_POST_SETMODE = (1 << 20), /* revalidating after setmode */
+ ATA_EHI_DID_PRINT_QUIRKS = (1 << 21), /* already printed quirks info */
ATA_EHI_DID_RESET = ATA_EHI_DID_SOFTRESET | ATA_EHI_DID_HARDRESET,
@@ -362,43 +403,42 @@ enum {
*/
ATA_EH_CMD_TIMEOUT_TABLE_SIZE = 8,
- /* Horkage types. May be set by libata or controller on drives
- (some horkage may be drive/controller pair dependent */
-
- ATA_HORKAGE_DIAGNOSTIC = (1 << 0), /* Failed boot diag */
- ATA_HORKAGE_NODMA = (1 << 1), /* DMA problems */
- ATA_HORKAGE_NONCQ = (1 << 2), /* Don't use NCQ */
- ATA_HORKAGE_MAX_SEC_128 = (1 << 3), /* Limit max sects to 128 */
- ATA_HORKAGE_BROKEN_HPA = (1 << 4), /* Broken HPA */
- ATA_HORKAGE_DISABLE = (1 << 5), /* Disable it */
- ATA_HORKAGE_HPA_SIZE = (1 << 6), /* native size off by one */
- ATA_HORKAGE_IVB = (1 << 8), /* cbl det validity bit bugs */
- ATA_HORKAGE_STUCK_ERR = (1 << 9), /* stuck ERR on next PACKET */
- ATA_HORKAGE_BRIDGE_OK = (1 << 10), /* no bridge limits */
- ATA_HORKAGE_ATAPI_MOD16_DMA = (1 << 11), /* use ATAPI DMA for commands
- not multiple of 16 bytes */
- ATA_HORKAGE_FIRMWARE_WARN = (1 << 12), /* firmware update warning */
- ATA_HORKAGE_1_5_GBPS = (1 << 13), /* force 1.5 Gbps */
- ATA_HORKAGE_NOSETXFER = (1 << 14), /* skip SETXFER, SATA only */
- ATA_HORKAGE_BROKEN_FPDMA_AA = (1 << 15), /* skip AA */
- ATA_HORKAGE_DUMP_ID = (1 << 16), /* dump IDENTIFY data */
- ATA_HORKAGE_MAX_SEC_LBA48 = (1 << 17), /* Set max sects to 65535 */
- ATA_HORKAGE_ATAPI_DMADIR = (1 << 18), /* device requires dmadir */
- ATA_HORKAGE_NO_NCQ_TRIM = (1 << 19), /* don't use queued TRIM */
- ATA_HORKAGE_NOLPM = (1 << 20), /* don't use LPM */
- ATA_HORKAGE_WD_BROKEN_LPM = (1 << 21), /* some WDs have broken LPM */
- ATA_HORKAGE_ZERO_AFTER_TRIM = (1 << 22),/* guarantees zero after trim */
- ATA_HORKAGE_NO_DMA_LOG = (1 << 23), /* don't use DMA for log read */
- ATA_HORKAGE_NOTRIM = (1 << 24), /* don't use TRIM */
- ATA_HORKAGE_MAX_SEC_1024 = (1 << 25), /* Limit max sects to 1024 */
- ATA_HORKAGE_MAX_TRIM_128M = (1 << 26), /* Limit max trim size to 128M */
- ATA_HORKAGE_NO_NCQ_ON_ATI = (1 << 27), /* Disable NCQ on ATI chipset */
- ATA_HORKAGE_NO_ID_DEV_LOG = (1 << 28), /* Identify device log missing */
- ATA_HORKAGE_NO_LOG_DIR = (1 << 29), /* Do not read log directory */
- ATA_HORKAGE_NO_FUA = (1 << 30), /* Do not use FUA */
-
- /* DMA mask for user DMA control: User visible values; DO NOT
- renumber */
+ /*
+ * Quirk flags: may be set by libata or controller drivers on drives.
+ * Some quirks may be drive/controller pair dependent.
+ */
+ ATA_QUIRK_DIAGNOSTIC = (1U << __ATA_QUIRK_DIAGNOSTIC),
+ ATA_QUIRK_NODMA = (1U << __ATA_QUIRK_NODMA),
+ ATA_QUIRK_NONCQ = (1U << __ATA_QUIRK_NONCQ),
+ ATA_QUIRK_MAX_SEC_128 = (1U << __ATA_QUIRK_MAX_SEC_128),
+ ATA_QUIRK_BROKEN_HPA = (1U << __ATA_QUIRK_BROKEN_HPA),
+ ATA_QUIRK_DISABLE = (1U << __ATA_QUIRK_DISABLE),
+ ATA_QUIRK_HPA_SIZE = (1U << __ATA_QUIRK_HPA_SIZE),
+ ATA_QUIRK_IVB = (1U << __ATA_QUIRK_IVB),
+ ATA_QUIRK_STUCK_ERR = (1U << __ATA_QUIRK_STUCK_ERR),
+ ATA_QUIRK_BRIDGE_OK = (1U << __ATA_QUIRK_BRIDGE_OK),
+ ATA_QUIRK_ATAPI_MOD16_DMA = (1U << __ATA_QUIRK_ATAPI_MOD16_DMA),
+ ATA_QUIRK_FIRMWARE_WARN = (1U << __ATA_QUIRK_FIRMWARE_WARN),
+ ATA_QUIRK_1_5_GBPS = (1U << __ATA_QUIRK_1_5_GBPS),
+ ATA_QUIRK_NOSETXFER = (1U << __ATA_QUIRK_NOSETXFER),
+ ATA_QUIRK_BROKEN_FPDMA_AA = (1U << __ATA_QUIRK_BROKEN_FPDMA_AA),
+ ATA_QUIRK_DUMP_ID = (1U << __ATA_QUIRK_DUMP_ID),
+ ATA_QUIRK_MAX_SEC_LBA48 = (1U << __ATA_QUIRK_MAX_SEC_LBA48),
+ ATA_QUIRK_ATAPI_DMADIR = (1U << __ATA_QUIRK_ATAPI_DMADIR),
+ ATA_QUIRK_NO_NCQ_TRIM = (1U << __ATA_QUIRK_NO_NCQ_TRIM),
+ ATA_QUIRK_NOLPM = (1U << __ATA_QUIRK_NOLPM),
+ ATA_QUIRK_WD_BROKEN_LPM = (1U << __ATA_QUIRK_WD_BROKEN_LPM),
+ ATA_QUIRK_ZERO_AFTER_TRIM = (1U << __ATA_QUIRK_ZERO_AFTER_TRIM),
+ ATA_QUIRK_NO_DMA_LOG = (1U << __ATA_QUIRK_NO_DMA_LOG),
+ ATA_QUIRK_NOTRIM = (1U << __ATA_QUIRK_NOTRIM),
+ ATA_QUIRK_MAX_SEC_1024 = (1U << __ATA_QUIRK_MAX_SEC_1024),
+ ATA_QUIRK_MAX_TRIM_128M = (1U << __ATA_QUIRK_MAX_TRIM_128M),
+ ATA_QUIRK_NO_NCQ_ON_ATI = (1U << __ATA_QUIRK_NO_NCQ_ON_ATI),
+ ATA_QUIRK_NO_ID_DEV_LOG = (1U << __ATA_QUIRK_NO_ID_DEV_LOG),
+ ATA_QUIRK_NO_LOG_DIR = (1U << __ATA_QUIRK_NO_LOG_DIR),
+ ATA_QUIRK_NO_FUA = (1U << __ATA_QUIRK_NO_FUA),
+
+ /* User visible DMA mask for DMA control. DO NOT renumber. */
ATA_DMA_MASK_ATA = (1 << 0), /* DMA on ATA Disk */
ATA_DMA_MASK_ATAPI = (1 << 1), /* DMA on ATAPI */
ATA_DMA_MASK_CFA = (1 << 2), /* DMA on CF Card */
@@ -660,10 +700,25 @@ struct ata_cpr_log {
struct ata_cpr cpr[] __counted_by(nr_cpr);
};
+struct ata_cdl {
+ /*
+ * Buffer to cache the CDL log page 18h (command duration descriptors)
+ * for SCSI-ATA translation.
+ */
+ u8 desc_log_buf[ATA_LOG_CDL_SIZE];
+
+ /*
+ * Buffer to handle reading the sense data for successful NCQ Commands
+ * log page for commands using a CDL with one of the limits policy set
+ * to 0xD (successful completion with sense data available bit set).
+ */
+ u8 ncq_sense_log_buf[ATA_LOG_SENSE_NCQ_SIZE];
+};
+
struct ata_device {
struct ata_link *link;
unsigned int devno; /* 0 or 1 */
- unsigned int horkage; /* List of broken features */
+ unsigned int quirks; /* List of broken features */
unsigned long flags; /* ATA_DFLAG_xxx */
struct scsi_device *sdev; /* attached SCSI device */
void *private_data;
@@ -722,13 +777,16 @@ struct ata_device {
/* Concurrent positioning ranges */
struct ata_cpr_log *cpr_log;
- /* Command Duration Limits log support */
- u8 cdl[ATA_LOG_CDL_SIZE];
+ /* Command Duration Limits support */
+ struct ata_cdl *cdl;
/* error history */
int spdn_cnt;
/* ering is CLEAR_END, read comment above CLEAR_END */
struct ata_ering ering;
+
+ /* For EH */
+ u8 sector_buf[ATA_SECT_SIZE] ____cacheline_aligned;
};
/* Fields between ATA_DEVICE_CLEAR_BEGIN and ATA_DEVICE_CLEAR_END are
@@ -874,9 +932,6 @@ struct ata_port {
#ifdef CONFIG_ATA_ACPI
struct ata_acpi_gtm __acpi_init_gtm; /* use ata_acpi_init_gtm() */
#endif
- /* owned by EH */
- u8 *ncq_sense_buf;
- u8 sector_buf[ATA_SECT_SIZE] ____cacheline_aligned;
};
/* The following initializer overrides a method to NULL whether one of
@@ -1064,8 +1119,6 @@ static inline bool ata_port_is_frozen(const struct ata_port *ap)
extern int ata_std_prereset(struct ata_link *link, unsigned long deadline);
extern int ata_wait_after_reset(struct ata_link *link, unsigned long deadline,
int (*check_ready)(struct ata_link *link));
-extern int sata_std_hardreset(struct ata_link *link, unsigned int *class,
- unsigned long deadline);
extern void ata_std_postreset(struct ata_link *link, unsigned int *classes);
extern struct ata_host *ata_host_alloc(struct device *dev, int n_ports);
@@ -1129,7 +1182,6 @@ extern int ata_xfer_mode2shift(u8 xfer_mode);
extern const char *ata_mode_string(unsigned int xfer_mask);
extern unsigned int ata_id_xfermask(const u16 *id);
extern int ata_std_qc_defer(struct ata_queued_cmd *qc);
-extern enum ata_completion_errors ata_noop_qc_prep(struct ata_queued_cmd *qc);
extern void ata_sg_init(struct ata_queued_cmd *qc, struct scatterlist *sg,
unsigned int n_elem);
extern unsigned int ata_dev_classify(const struct ata_taskfile *tf);
@@ -1190,12 +1242,13 @@ extern int sata_scr_read(struct ata_link *link, int reg, u32 *val);
extern int sata_scr_write(struct ata_link *link, int reg, u32 val);
extern int sata_scr_write_flush(struct ata_link *link, int reg, u32 val);
extern int sata_set_spd(struct ata_link *link);
+int sata_std_hardreset(struct ata_link *link, unsigned int *class,
+ unsigned long deadline);
extern int sata_link_hardreset(struct ata_link *link,
const unsigned int *timing, unsigned long deadline,
bool *online, int (*check_ready)(struct ata_link *));
extern int sata_link_resume(struct ata_link *link, const unsigned int *params,
unsigned long deadline);
-extern int ata_eh_read_sense_success_ncq_log(struct ata_link *link);
extern void ata_eh_analyze_ncq_error(struct ata_link *link);
#else
static inline const unsigned int *
@@ -1217,6 +1270,11 @@ static inline int sata_scr_write_flush(struct ata_link *link, int reg, u32 val)
return -EOPNOTSUPP;
}
static inline int sata_set_spd(struct ata_link *link) { return -EOPNOTSUPP; }
+static inline int sata_std_hardreset(struct ata_link *link, unsigned int *class,
+ unsigned long deadline)
+{
+ return -EOPNOTSUPP;
+}
static inline int sata_link_hardreset(struct ata_link *link,
const unsigned int *timing,
unsigned long deadline,
@@ -1233,10 +1291,6 @@ static inline int sata_link_resume(struct ata_link *link,
{
return -EOPNOTSUPP;
}
-static inline int ata_eh_read_sense_success_ncq_log(struct ata_link *link)
-{
- return -EOPNOTSUPP;
-}
static inline void ata_eh_analyze_ncq_error(struct ata_link *link) { }
#endif
extern int sata_link_debounce(struct ata_link *link,
@@ -1967,7 +2021,6 @@ extern unsigned int ata_sff_data_xfer(struct ata_queued_cmd *qc,
extern unsigned int ata_sff_data_xfer32(struct ata_queued_cmd *qc,
unsigned char *buf, unsigned int buflen, int rw);
extern void ata_sff_irq_on(struct ata_port *ap);
-extern void ata_sff_irq_clear(struct ata_port *ap);
extern int ata_sff_hsm_move(struct ata_port *ap, struct ata_queued_cmd *qc,
u8 status, int in_wq);
extern void ata_sff_queue_work(struct work_struct *work);
diff --git a/include/linux/lockd/lockd.h b/include/linux/lockd/lockd.h
index 1b95fe31051f..61c4b9c41904 100644
--- a/include/linux/lockd/lockd.h
+++ b/include/linux/lockd/lockd.h
@@ -200,7 +200,7 @@ extern const struct svc_procedure nlmsvc_procedures[24];
extern const struct svc_procedure nlmsvc_procedures4[24];
#endif
extern int nlmsvc_grace_period;
-extern unsigned long nlmsvc_timeout;
+extern unsigned long nlm_timeout;
extern bool nsm_use_hostnames;
extern u32 nsm_local_state;
diff --git a/include/linux/lru_cache.h b/include/linux/lru_cache.h
index c9afcdd9324c..ff82ef85a084 100644
--- a/include/linux/lru_cache.h
+++ b/include/linux/lru_cache.h
@@ -119,7 +119,7 @@ write intent log information, three of which are mentioned here.
*/
/* this defines an element in a tracked set
- * .colision is for hash table lookup.
+ * .collision is for hash table lookup.
* When we process a new IO request, we know its sector, thus can deduce the
* region number (label) easily. To do the label -> object lookup without a
* full list walk, we use a simple hash table.
@@ -145,7 +145,7 @@ write intent log information, three of which are mentioned here.
* But it avoids high order page allocations in kmalloc.
*/
struct lc_element {
- struct hlist_node colision;
+ struct hlist_node collision;
struct list_head list; /* LRU list or free list */
unsigned refcnt;
/* back "pointer" into lc_cache->element[index],
diff --git a/include/linux/lsm_hook_defs.h b/include/linux/lsm_hook_defs.h
index 1d59513bf230..9eca013aa5e1 100644
--- a/include/linux/lsm_hook_defs.h
+++ b/include/linux/lsm_hook_defs.h
@@ -431,7 +431,7 @@ LSM_HOOK(int, 0, bpf_prog_load, struct bpf_prog *prog, union bpf_attr *attr,
struct bpf_token *token)
LSM_HOOK(void, LSM_RET_VOID, bpf_prog_free, struct bpf_prog *prog)
LSM_HOOK(int, 0, bpf_token_create, struct bpf_token *token, union bpf_attr *attr,
- struct path *path)
+ const struct path *path)
LSM_HOOK(void, LSM_RET_VOID, bpf_token_free, struct bpf_token *token)
LSM_HOOK(int, 0, bpf_token_cmd, const struct bpf_token *token, enum bpf_cmd cmd)
LSM_HOOK(int, 0, bpf_token_capable, const struct bpf_token *token, int cap)
diff --git a/include/linux/maple_tree.h b/include/linux/maple_tree.h
index a53ad4dabd7e..c2c11004085e 100644
--- a/include/linux/maple_tree.h
+++ b/include/linux/maple_tree.h
@@ -52,9 +52,9 @@
* bit in the node type. This is possible by using bit 1 to indicate if bit 2
* is part of the type or the slot.
*
- * Once the type is decided, the decision of an allocation range type or a range
- * type is done by examining the immutable tree flag for the MAPLE_ALLOC_RANGE
- * flag.
+ * Once the type is decided, the decision of an allocation range type or a
+ * range type is done by examining the immutable tree flag for the
+ * MT_FLAGS_ALLOC_RANGE flag.
*
* Node types:
* 0x??1 = Root
@@ -148,6 +148,18 @@ enum maple_type {
maple_arange_64,
};
+enum store_type {
+ wr_invalid,
+ wr_new_root,
+ wr_store_root,
+ wr_exact_fit,
+ wr_spanning_store,
+ wr_split_store,
+ wr_rebalance,
+ wr_append,
+ wr_node_store,
+ wr_slot_store,
+};
/**
* DOC: Maple tree flags
@@ -436,6 +448,7 @@ struct ma_state {
unsigned char offset;
unsigned char mas_flags;
unsigned char end; /* The end of the node */
+ enum store_type store_type; /* The type of store needed for this operation */
};
struct ma_wr_state {
@@ -477,6 +490,7 @@ struct ma_wr_state {
.max = ULONG_MAX, \
.alloc = NULL, \
.mas_flags = 0, \
+ .store_type = wr_invalid, \
}
#define MA_WR_STATE(name, ma_state, wr_entry) \
diff --git a/include/linux/memblock.h b/include/linux/memblock.h
index fc4d75c6cec3..673d5cae7c81 100644
--- a/include/linux/memblock.h
+++ b/include/linux/memblock.h
@@ -467,6 +467,7 @@ static inline __init_memblock bool memblock_bottom_up(void)
phys_addr_t memblock_phys_mem_size(void);
phys_addr_t memblock_reserved_size(void);
+unsigned long memblock_estimated_nr_free_pages(void);
phys_addr_t memblock_start_of_DRAM(void);
phys_addr_t memblock_end_of_DRAM(void);
void memblock_enforce_memory_limit(phys_addr_t memory_limit);
diff --git a/include/linux/memcontrol.h b/include/linux/memcontrol.h
index 0e5bf25d324f..34d2da05f2f1 100644
--- a/include/linux/memcontrol.h
+++ b/include/linux/memcontrol.h
@@ -57,7 +57,7 @@ enum memcg_memory_event {
struct mem_cgroup_reclaim_cookie {
pg_data_t *pgdat;
- unsigned int generation;
+ int generation;
};
#ifdef CONFIG_MEMCG
@@ -70,6 +70,7 @@ struct mem_cgroup_id {
};
struct memcg_vmstats_percpu;
+struct memcg1_events_percpu;
struct memcg_vmstats;
struct lruvec_stats_percpu;
struct lruvec_stats;
@@ -77,7 +78,7 @@ struct lruvec_stats;
struct mem_cgroup_reclaim_iter {
struct mem_cgroup *position;
/* scan generation, increased every round-trip */
- unsigned int generation;
+ atomic_t generation;
};
/*
@@ -193,6 +194,11 @@ struct mem_cgroup {
struct page_counter memsw; /* v1 only */
};
+ /* registered local peak watchers */
+ struct list_head memory_peaks;
+ struct list_head swap_peaks;
+ spinlock_t peaks_lock;
+
/* Range enforcement for interrupt charges */
struct work_struct high_work;
@@ -270,6 +276,8 @@ struct mem_cgroup {
struct page_counter kmem; /* v1 only */
struct page_counter tcpmem; /* v1 only */
+ struct memcg1_events_percpu __percpu *events_percpu;
+
unsigned long soft_limit;
/* protected by memcg_oom_lock */
@@ -361,11 +369,11 @@ static inline bool folio_memcg_kmem(struct folio *folio);
* After the initialization objcg->memcg is always pointing at
* a valid memcg, but can be atomically swapped to the parent memcg.
*
- * The caller must ensure that the returned memcg won't be released:
- * e.g. acquire the rcu_read_lock or css_set_lock.
+ * The caller must ensure that the returned memcg won't be released.
*/
static inline struct mem_cgroup *obj_cgroup_memcg(struct obj_cgroup *objcg)
{
+ lockdep_assert_once(rcu_read_lock_held() || lockdep_is_held(&cgroup_mutex));
return READ_ONCE(objcg->memcg);
}
@@ -439,6 +447,19 @@ static inline struct mem_cgroup *folio_memcg(struct folio *folio)
return __folio_memcg(folio);
}
+/*
+ * folio_memcg_charged - If a folio is charged to a memory cgroup.
+ * @folio: Pointer to the folio.
+ *
+ * Returns true if folio is charged to a memory cgroup, otherwise returns false.
+ */
+static inline bool folio_memcg_charged(struct folio *folio)
+{
+ if (folio_memcg_kmem(folio))
+ return __folio_objcg(folio) != NULL;
+ return __folio_memcg(folio) != NULL;
+}
+
/**
* folio_memcg_rcu - Locklessly get the memory cgroup associated with a folio.
* @folio: Pointer to the folio.
@@ -455,7 +476,6 @@ static inline struct mem_cgroup *folio_memcg_rcu(struct folio *folio)
unsigned long memcg_data = READ_ONCE(folio->memcg_data);
VM_BUG_ON_FOLIO(folio_test_slab(folio), folio);
- WARN_ON_ONCE(!rcu_read_lock_held());
if (memcg_data & MEMCG_DATA_KMEM) {
struct obj_cgroup *objcg;
@@ -464,6 +484,8 @@ static inline struct mem_cgroup *folio_memcg_rcu(struct folio *folio)
return obj_cgroup_memcg(objcg);
}
+ WARN_ON_ONCE(!rcu_read_lock_held());
+
return (struct mem_cgroup *)(memcg_data & ~OBJEXTS_FLAGS_MASK);
}
@@ -677,7 +699,8 @@ int mem_cgroup_hugetlb_try_charge(struct mem_cgroup *memcg, gfp_t gfp,
int mem_cgroup_swapin_charge_folio(struct folio *folio, struct mm_struct *mm,
gfp_t gfp, swp_entry_t entry);
-void mem_cgroup_swapin_uncharge_swap(swp_entry_t entry);
+
+void mem_cgroup_swapin_uncharge_swap(swp_entry_t entry, unsigned int nr_pages);
void __mem_cgroup_uncharge(struct folio *folio);
@@ -762,6 +785,8 @@ struct mem_cgroup *get_mem_cgroup_from_mm(struct mm_struct *mm);
struct mem_cgroup *get_mem_cgroup_from_current(void);
+struct mem_cgroup *get_mem_cgroup_from_folio(struct folio *folio);
+
struct lruvec *folio_lruvec_lock(struct folio *folio);
struct lruvec *folio_lruvec_lock_irq(struct folio *folio);
struct lruvec *folio_lruvec_lock_irqsave(struct folio *folio,
@@ -1006,8 +1031,8 @@ static inline void count_memcg_folio_events(struct folio *folio,
count_memcg_events(memcg, idx, nr);
}
-static inline void count_memcg_event_mm(struct mm_struct *mm,
- enum vm_event_item idx)
+static inline void count_memcg_events_mm(struct mm_struct *mm,
+ enum vm_event_item idx, unsigned long count)
{
struct mem_cgroup *memcg;
@@ -1017,10 +1042,16 @@ static inline void count_memcg_event_mm(struct mm_struct *mm,
rcu_read_lock();
memcg = mem_cgroup_from_task(rcu_dereference(mm->owner));
if (likely(memcg))
- count_memcg_events(memcg, idx, 1);
+ count_memcg_events(memcg, idx, count);
rcu_read_unlock();
}
+static inline void count_memcg_event_mm(struct mm_struct *mm,
+ enum vm_event_item idx)
+{
+ count_memcg_events_mm(mm, idx, 1);
+}
+
static inline void memcg_memory_event(struct mem_cgroup *memcg,
enum memcg_memory_event event)
{
@@ -1176,7 +1207,7 @@ static inline int mem_cgroup_swapin_charge_folio(struct folio *folio,
return 0;
}
-static inline void mem_cgroup_swapin_uncharge_swap(swp_entry_t entry)
+static inline void mem_cgroup_swapin_uncharge_swap(swp_entry_t entry, unsigned int nr)
{
}
@@ -1240,6 +1271,11 @@ static inline struct mem_cgroup *get_mem_cgroup_from_current(void)
return NULL;
}
+static inline struct mem_cgroup *get_mem_cgroup_from_folio(struct folio *folio)
+{
+ return NULL;
+}
+
static inline
struct mem_cgroup *mem_cgroup_from_css(struct cgroup_subsys_state *css)
{
@@ -1462,6 +1498,11 @@ static inline void count_memcg_folio_events(struct folio *folio,
{
}
+static inline void count_memcg_events_mm(struct mm_struct *mm,
+ enum vm_event_item idx, unsigned long count)
+{
+}
+
static inline
void count_memcg_event_mm(struct mm_struct *mm, enum vm_event_item idx)
{
@@ -1717,7 +1758,6 @@ static inline int memcg_kmem_id(struct mem_cgroup *memcg)
return memcg ? memcg->kmemcg_id : -1;
}
-struct mem_cgroup *mem_cgroup_from_obj(void *p);
struct mem_cgroup *mem_cgroup_from_slab_obj(void *p);
static inline void count_objcg_event(struct obj_cgroup *objcg,
@@ -1780,11 +1820,6 @@ static inline int memcg_kmem_id(struct mem_cgroup *memcg)
return -1;
}
-static inline struct mem_cgroup *mem_cgroup_from_obj(void *p)
-{
- return NULL;
-}
-
static inline struct mem_cgroup *mem_cgroup_from_slab_obj(void *p)
{
return NULL;
diff --git a/include/linux/memory_hotplug.h b/include/linux/memory_hotplug.h
index ebe876930e78..b27ddce5d324 100644
--- a/include/linux/memory_hotplug.h
+++ b/include/linux/memory_hotplug.h
@@ -16,54 +16,6 @@ struct resource;
struct vmem_altmap;
struct dev_pagemap;
-#ifdef CONFIG_HAVE_ARCH_NODEDATA_EXTENSION
-/*
- * For supporting node-hotadd, we have to allocate a new pgdat.
- *
- * If an arch has generic style NODE_DATA(),
- * node_data[nid] = kzalloc() works well. But it depends on the architecture.
- *
- * In general, generic_alloc_nodedata() is used.
- *
- */
-extern pg_data_t *arch_alloc_nodedata(int nid);
-extern void arch_refresh_nodedata(int nid, pg_data_t *pgdat);
-
-#else /* CONFIG_HAVE_ARCH_NODEDATA_EXTENSION */
-
-#define arch_alloc_nodedata(nid) generic_alloc_nodedata(nid)
-
-#ifdef CONFIG_NUMA
-/*
- * XXX: node aware allocation can't work well to get new node's memory at this time.
- * Because, pgdat for the new node is not allocated/initialized yet itself.
- * To use new node's memory, more consideration will be necessary.
- */
-#define generic_alloc_nodedata(nid) \
-({ \
- memblock_alloc(sizeof(*pgdat), SMP_CACHE_BYTES); \
-})
-
-extern pg_data_t *node_data[];
-static inline void arch_refresh_nodedata(int nid, pg_data_t *pgdat)
-{
- node_data[nid] = pgdat;
-}
-
-#else /* !CONFIG_NUMA */
-
-/* never called */
-static inline pg_data_t *generic_alloc_nodedata(int nid)
-{
- BUG();
- return NULL;
-}
-static inline void arch_refresh_nodedata(int nid, pg_data_t *pgdat)
-{
-}
-#endif /* CONFIG_NUMA */
-#endif /* CONFIG_HAVE_ARCH_NODEDATA_EXTENSION */
-
#ifdef CONFIG_MEMORY_HOTPLUG
struct page *pfn_to_online_page(unsigned long pfn);
diff --git a/include/linux/mfd/88pm80x.h b/include/linux/mfd/88pm80x.h
index def5df6e74bf..551ef1c367d6 100644
--- a/include/linux/mfd/88pm80x.h
+++ b/include/linux/mfd/88pm80x.h
@@ -294,7 +294,7 @@ struct pm80x_chip {
struct i2c_client *client;
struct i2c_client *companion;
struct regmap *regmap;
- struct regmap_irq_chip *regmap_irq_chip;
+ const struct regmap_irq_chip *regmap_irq_chip;
struct regmap_irq_chip_data *irq_data;
int type;
int irq;
diff --git a/include/linux/mfd/ds1wm.h b/include/linux/mfd/ds1wm.h
deleted file mode 100644
index 43dfca1c9702..000000000000
--- a/include/linux/mfd/ds1wm.h
+++ /dev/null
@@ -1,29 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-/* MFD cell driver data for the DS1WM driver
- *
- * to be defined in the MFD device that is
- * using this driver for one of his sub devices
- */
-
-struct ds1wm_driver_data {
- int active_high;
- int clock_rate;
- /* in milliseconds, the amount of time to
- * sleep following a reset pulse. Zero
- * should work if your bus devices recover
- * time respects the 1-wire spec since the
- * ds1wm implements the precise timings of
- * a reset pulse/presence detect sequence.
- */
- unsigned int reset_recover_delay;
-
- /* Say 1 here for big endian Hardware
- * (only relevant with bus-shift > 0
- */
- bool is_hw_big_endian;
-
- /* left shift of register number to get register address offsett.
- * Only 0,1,2 allowed for 8,16 or 32 bit bus width respectively
- */
- unsigned int bus_shift;
-};
diff --git a/include/linux/migrate.h b/include/linux/migrate.h
index 644be30b69c8..002e49b2ebd9 100644
--- a/include/linux/migrate.h
+++ b/include/linux/migrate.h
@@ -70,6 +70,7 @@ int migrate_pages(struct list_head *l, new_folio_t new, free_folio_t free,
unsigned int *ret_succeeded);
struct folio *alloc_migration_target(struct folio *src, unsigned long private);
bool isolate_movable_page(struct page *page, isolate_mode_t mode);
+bool isolate_folio_to_list(struct folio *folio, struct list_head *list);
int migrate_huge_page_move_mapping(struct address_space *mapping,
struct folio *dst, struct folio *src);
@@ -91,6 +92,8 @@ static inline struct folio *alloc_migration_target(struct folio *src,
{ return NULL; }
static inline bool isolate_movable_page(struct page *page, isolate_mode_t mode)
{ return false; }
+static inline bool isolate_folio_to_list(struct folio *folio, struct list_head *list)
+ { return false; }
static inline int migrate_huge_page_move_mapping(struct address_space *mapping,
struct folio *dst, struct folio *src)
diff --git a/include/linux/mlx5/device.h b/include/linux/mlx5/device.h
index d0f7d1f36c5e..cc647992f3d1 100644
--- a/include/linux/mlx5/device.h
+++ b/include/linux/mlx5/device.h
@@ -211,6 +211,7 @@ enum {
enum {
MLX5_PFAULT_SUBTYPE_WQE = 0,
MLX5_PFAULT_SUBTYPE_RDMA = 1,
+ MLX5_PFAULT_SUBTYPE_MEMORY = 2,
};
enum wqe_page_fault_type {
@@ -370,6 +371,7 @@ enum mlx5_driver_event {
MLX5_DRIVER_EVENT_SF_PEER_DEVLINK,
MLX5_DRIVER_EVENT_AFFILIATION_DONE,
MLX5_DRIVER_EVENT_AFFILIATION_REMOVED,
+ MLX5_DRIVER_EVENT_ACTIVE_BACKUP_LAG_CHANGE_LOWERSTATE,
};
enum {
@@ -646,10 +648,11 @@ struct mlx5_eqe_page_req {
__be32 rsvd1[5];
};
+#define MEMORY_SCHEME_PAGE_FAULT_GRANULARITY 4096
struct mlx5_eqe_page_fault {
- __be32 bytes_committed;
union {
struct {
+ __be32 bytes_committed;
u16 reserved1;
__be16 wqe_index;
u16 reserved2;
@@ -659,6 +662,7 @@ struct mlx5_eqe_page_fault {
__be32 pftype_wq;
} __packed wqe;
struct {
+ __be32 bytes_committed;
__be32 r_key;
u16 reserved1;
__be16 packet_length;
@@ -666,6 +670,23 @@ struct mlx5_eqe_page_fault {
__be64 rdma_va;
__be32 pftype_token;
} __packed rdma;
+ struct {
+ u8 flags;
+ u8 reserved1;
+ __be16 post_demand_fault_pages;
+ __be16 pre_demand_fault_pages;
+ __be16 token47_32;
+ __be32 token31_0;
+ /*
+ * FW changed from specifying the fault size in byte
+ * count to 4k pages granularity. The size specified
+ * in pages uses bits 31:12, to keep backward
+ * compatibility.
+ */
+ __be32 demand_fault_pages;
+ __be32 mkey;
+ __be64 va;
+ } __packed memory;
} __packed;
} __packed;
@@ -1370,6 +1391,14 @@ enum mlx5_qcam_feature_groups {
#define MLX5_CAP_ODP(mdev, cap)\
MLX5_GET(odp_cap, mdev->caps.hca[MLX5_CAP_ODP]->cur, cap)
+#define MLX5_CAP_ODP_SCHEME(mdev, cap) \
+ (MLX5_GET(odp_cap, mdev->caps.hca[MLX5_CAP_ODP]->cur, \
+ mem_page_fault) ? \
+ MLX5_GET(odp_cap, mdev->caps.hca[MLX5_CAP_ODP]->cur, \
+ memory_page_fault_scheme_cap.cap) : \
+ MLX5_GET(odp_cap, mdev->caps.hca[MLX5_CAP_ODP]->cur, \
+ transport_page_fault_scheme_cap.cap))
+
#define MLX5_CAP_ODP_MAX(mdev, cap)\
MLX5_GET(odp_cap, mdev->caps.hca[MLX5_CAP_ODP]->max, cap)
diff --git a/include/linux/mlx5/driver.h b/include/linux/mlx5/driver.h
index 9f42834f57c5..e23c692a34c7 100644
--- a/include/linux/mlx5/driver.h
+++ b/include/linux/mlx5/driver.h
@@ -645,6 +645,7 @@ struct mlx5_priv {
struct mlx5_sf_hw_table *sf_hw_table;
struct mlx5_sf_table *sf_table;
#endif
+ struct blocking_notifier_head lag_nh;
};
enum mlx5_device_state {
@@ -1183,7 +1184,6 @@ bool mlx5_lag_mode_is_hash(struct mlx5_core_dev *dev);
bool mlx5_lag_is_master(struct mlx5_core_dev *dev);
bool mlx5_lag_is_shared_fdb(struct mlx5_core_dev *dev);
bool mlx5_lag_is_mpesw(struct mlx5_core_dev *dev);
-struct net_device *mlx5_lag_get_roce_netdev(struct mlx5_core_dev *dev);
u8 mlx5_lag_get_slave_port(struct mlx5_core_dev *dev,
struct net_device *slave);
int mlx5_lag_query_cong_counters(struct mlx5_core_dev *dev,
diff --git a/include/linux/mlx5/mlx5_ifc.h b/include/linux/mlx5/mlx5_ifc.h
index 620a5c305123..97f6de69f616 100644
--- a/include/linux/mlx5/mlx5_ifc.h
+++ b/include/linux/mlx5/mlx5_ifc.h
@@ -316,6 +316,7 @@ enum {
MLX5_CMD_OP_SYNC_CRYPTO = 0xb12,
MLX5_CMD_OP_ALLOW_OTHER_VHCA_ACCESS = 0xb16,
MLX5_CMD_OP_GENERATE_WQE = 0xb17,
+ MLX5_CMD_OPCODE_QUERY_VUID = 0xb22,
MLX5_CMD_OP_MAX
};
@@ -1412,11 +1413,13 @@ struct mlx5_ifc_atomic_caps_bits {
u8 reserved_at_e0[0x720];
};
-struct mlx5_ifc_odp_cap_bits {
+struct mlx5_ifc_odp_scheme_cap_bits {
u8 reserved_at_0[0x40];
u8 sig[0x1];
- u8 reserved_at_41[0x1f];
+ u8 reserved_at_41[0x4];
+ u8 page_prefetch[0x1];
+ u8 reserved_at_46[0x1a];
u8 reserved_at_60[0x20];
@@ -1430,7 +1433,20 @@ struct mlx5_ifc_odp_cap_bits {
struct mlx5_ifc_odp_per_transport_service_cap_bits dc_odp_caps;
- u8 reserved_at_120[0x6E0];
+ u8 reserved_at_120[0xe0];
+};
+
+struct mlx5_ifc_odp_cap_bits {
+ struct mlx5_ifc_odp_scheme_cap_bits transport_page_fault_scheme_cap;
+
+ struct mlx5_ifc_odp_scheme_cap_bits memory_page_fault_scheme_cap;
+
+ u8 reserved_at_400[0x200];
+
+ u8 mem_page_fault[0x1];
+ u8 reserved_at_601[0x1f];
+
+ u8 reserved_at_620[0x1e0];
};
struct mlx5_ifc_tls_cap_bits {
@@ -1978,7 +1994,8 @@ struct mlx5_ifc_cmd_hca_cap_bits {
u8 reserved_at_5a0[0x10];
u8 enhanced_cqe_compression[0x1];
- u8 reserved_at_5b1[0x2];
+ u8 reserved_at_5b1[0x1];
+ u8 crossing_vhca_mkey[0x1];
u8 log_max_dek[0x5];
u8 reserved_at_5b8[0x4];
u8 mini_cqe_resp_stride_index[0x1];
@@ -2047,7 +2064,9 @@ struct mlx5_ifc_cmd_hca_cap_bits {
u8 dynamic_msix_table_size[0xc];
u8 reserved_at_740[0xc];
u8 min_dynamic_vf_msix_table_size[0x4];
- u8 reserved_at_750[0x4];
+ u8 reserved_at_750[0x2];
+ u8 data_direct[0x1];
+ u8 reserved_at_753[0x1];
u8 max_dynamic_vf_msix_table_size[0xc];
u8 reserved_at_760[0x3];
@@ -2075,7 +2094,11 @@ struct mlx5_ifc_cmd_hca_cap_2_bits {
u8 reserved_at_0[0x80];
u8 migratable[0x1];
- u8 reserved_at_81[0x1f];
+ u8 reserved_at_81[0x11];
+ u8 query_vuid[0x1];
+ u8 reserved_at_93[0x5];
+ u8 umr_log_entity_size_5[0x1];
+ u8 reserved_at_99[0x7];
u8 max_reformat_insert_size[0x8];
u8 max_reformat_insert_offset[0x8];
@@ -2130,7 +2153,8 @@ struct mlx5_ifc_cmd_hca_cap_2_bits {
u8 min_mkey_log_entity_size_fixed_buffer[0x5];
u8 ec_vf_vport_base[0x10];
- u8 reserved_at_3a0[0x10];
+ u8 reserved_at_3a0[0xa];
+ u8 max_mkey_log_entity_size_mtt[0x6];
u8 max_rqt_vhca_id[0x10];
u8 reserved_at_3c0[0x20];
@@ -4267,6 +4291,7 @@ enum {
MLX5_MKC_ACCESS_MODE_KSM = 0x3,
MLX5_MKC_ACCESS_MODE_SW_ICM = 0x4,
MLX5_MKC_ACCESS_MODE_MEMIC = 0x5,
+ MLX5_MKC_ACCESS_MODE_CROSSING = 0x6,
};
struct mlx5_ifc_mkc_bits {
@@ -4309,14 +4334,16 @@ struct mlx5_ifc_mkc_bits {
u8 bsf_octword_size[0x20];
- u8 reserved_at_120[0x80];
+ u8 reserved_at_120[0x60];
+
+ u8 crossing_target_vhca_id[0x10];
+ u8 reserved_at_190[0x10];
u8 translations_octword_size[0x20];
u8 reserved_at_1c0[0x19];
u8 relaxed_ordering_read[0x1];
- u8 reserved_at_1d9[0x1];
- u8 log_page_size[0x5];
+ u8 log_page_size[0x6];
u8 reserved_at_1e0[0x20];
};
@@ -5253,6 +5280,36 @@ struct mlx5_ifc_query_vport_state_out_bits {
u8 state[0x4];
};
+struct mlx5_ifc_array1024_auto_bits {
+ u8 array1024_auto[32][0x20];
+};
+
+struct mlx5_ifc_query_vuid_in_bits {
+ u8 opcode[0x10];
+ u8 uid[0x10];
+
+ u8 reserved_at_20[0x40];
+
+ u8 query_vfs_vuid[0x1];
+ u8 data_direct[0x1];
+ u8 reserved_at_62[0xe];
+ u8 vhca_id[0x10];
+};
+
+struct mlx5_ifc_query_vuid_out_bits {
+ u8 status[0x8];
+ u8 reserved_at_8[0x18];
+
+ u8 syndrome[0x20];
+
+ u8 reserved_at_40[0x1a0];
+
+ u8 reserved_at_1e0[0x10];
+ u8 num_of_entries[0x10];
+
+ struct mlx5_ifc_array1024_auto_bits vuid[];
+};
+
enum {
MLX5_VPORT_STATE_OP_MOD_VNIC_VPORT = 0x0,
MLX5_VPORT_STATE_OP_MOD_ESW_VPORT = 0x1,
@@ -7357,6 +7414,30 @@ struct mlx5_ifc_qp_2err_in_bits {
u8 reserved_at_60[0x20];
};
+struct mlx5_ifc_trans_page_fault_info_bits {
+ u8 error[0x1];
+ u8 reserved_at_1[0x4];
+ u8 page_fault_type[0x3];
+ u8 wq_number[0x18];
+
+ u8 reserved_at_20[0x8];
+ u8 fault_token[0x18];
+};
+
+struct mlx5_ifc_mem_page_fault_info_bits {
+ u8 error[0x1];
+ u8 reserved_at_1[0xf];
+ u8 fault_token_47_32[0x10];
+
+ u8 fault_token_31_0[0x20];
+};
+
+union mlx5_ifc_page_fault_resume_in_page_fault_info_auto_bits {
+ struct mlx5_ifc_trans_page_fault_info_bits trans_page_fault_info;
+ struct mlx5_ifc_mem_page_fault_info_bits mem_page_fault_info;
+ u8 reserved_at_0[0x40];
+};
+
struct mlx5_ifc_page_fault_resume_out_bits {
u8 status[0x8];
u8 reserved_at_8[0x18];
@@ -7373,13 +7454,8 @@ struct mlx5_ifc_page_fault_resume_in_bits {
u8 reserved_at_20[0x10];
u8 op_mod[0x10];
- u8 error[0x1];
- u8 reserved_at_41[0x4];
- u8 page_fault_type[0x3];
- u8 wq_number[0x18];
-
- u8 reserved_at_60[0x8];
- u8 token[0x18];
+ union mlx5_ifc_page_fault_resume_in_page_fault_info_auto_bits
+ page_fault_info;
};
struct mlx5_ifc_nop_out_bits {
@@ -9131,7 +9207,8 @@ struct mlx5_ifc_create_mkey_in_bits {
u8 pg_access[0x1];
u8 mkey_umem_valid[0x1];
- u8 reserved_at_62[0x1e];
+ u8 data_direct[0x1];
+ u8 reserved_at_63[0x1d];
struct mlx5_ifc_mkc_bits memory_key_mkey_entry;
diff --git a/include/linux/mm.h b/include/linux/mm.h
index 13bff7cf03b7..ecf63d2b0582 100644
--- a/include/linux/mm.h
+++ b/include/linux/mm.h
@@ -98,7 +98,11 @@ extern int mmap_rnd_compat_bits __read_mostly;
#endif
#ifndef PHYSMEM_END
+# ifdef MAX_PHYSMEM_BITS
# define PHYSMEM_END ((1ULL << MAX_PHYSMEM_BITS) - 1)
+# else
+# define PHYSMEM_END (((phys_addr_t)-1)&~(1ULL<<63))
+# endif
#endif
#include <asm/page.h>
@@ -1015,27 +1019,6 @@ static inline struct vm_area_struct *vma_prev(struct vma_iterator *vmi)
return mas_prev(&vmi->mas, 0);
}
-static inline
-struct vm_area_struct *vma_iter_prev_range(struct vma_iterator *vmi)
-{
- return mas_prev_range(&vmi->mas, 0);
-}
-
-static inline unsigned long vma_iter_addr(struct vma_iterator *vmi)
-{
- return vmi->mas.index;
-}
-
-static inline unsigned long vma_iter_end(struct vma_iterator *vmi)
-{
- return vmi->mas.last + 1;
-}
-static inline int vma_iter_bulk_alloc(struct vma_iterator *vmi,
- unsigned long count)
-{
- return mas_expected_entries(&vmi->mas, count);
-}
-
static inline int vma_iter_clear_gfp(struct vma_iterator *vmi,
unsigned long start, unsigned long end, gfp_t gfp)
{
@@ -1259,8 +1242,7 @@ static inline int folio_mapcount(const struct folio *folio)
if (likely(!folio_test_large(folio))) {
mapcount = atomic_read(&folio->_mapcount) + 1;
- /* Handle page_has_type() pages */
- if (mapcount < PAGE_MAPCOUNT_RESERVE + 1)
+ if (page_mapcount_is_type(mapcount))
mapcount = 0;
return mapcount;
}
@@ -1756,6 +1738,8 @@ static inline void vma_set_access_pid_bit(struct vm_area_struct *vma)
__set_bit(pid_bit, &vma->numab_state->pids_active[1]);
}
}
+
+bool folio_use_access_time(struct folio *folio);
#else /* !CONFIG_NUMA_BALANCING */
static inline int folio_xchg_last_cpupid(struct folio *folio, int cpupid)
{
@@ -1809,6 +1793,10 @@ static inline bool cpupid_match_pid(struct task_struct *task, int cpupid)
static inline void vma_set_access_pid_bit(struct vm_area_struct *vma)
{
}
+static inline bool folio_use_access_time(struct folio *folio)
+{
+ return false;
+}
#endif /* CONFIG_NUMA_BALANCING */
#if defined(CONFIG_KASAN_SW_TAGS) || defined(CONFIG_KASAN_HW_TAGS)
@@ -2158,14 +2146,19 @@ static inline size_t folio_size(const struct folio *folio)
* MM ("mapped shared"), or if the folio is only mapped into a single MM
* ("mapped exclusively").
*
+ * For KSM folios, this function also returns "mapped shared" when a folio is
+ * mapped multiple times into the same MM, because the individual page mappings
+ * are independent.
+ *
* As precise information is not easily available for all folios, this function
* estimates the number of MMs ("sharers") that are currently mapping a folio
* using the number of times the first page of the folio is currently mapped
* into page tables.
*
- * For small anonymous folios (except KSM folios) and anonymous hugetlb folios,
- * the return value will be exactly correct, because they can only be mapped
- * at most once into an MM, and they cannot be partially mapped.
+ * For small anonymous folios and anonymous hugetlb folios, the return
+ * value will be exactly correct: non-KSM folios can only be mapped at most once
+ * into an MM, and they cannot be partially mapped. KSM folios are
+ * considered shared even if mapped multiple times into the same MM.
*
* For other folios, the result can be fuzzy:
* #. For partially-mappable large folios (THP), the return value can wrongly
@@ -2174,9 +2167,6 @@ static inline size_t folio_size(const struct folio *folio)
* #. For pagecache folios (including hugetlb), the return value can wrongly
* indicate "mapped shared" (false positive) when two VMAs in the same MM
* cover the same file range.
- * #. For (small) KSM folios, the return value can wrongly indicate "mapped
- * shared" (false positive), when the folio is mapped multiple times into
- * the same MM.
*
* Further, this function only considers current page table mappings that
* are tracked using the folio mapcount(s).
@@ -2210,26 +2200,10 @@ static inline bool folio_likely_mapped_shared(struct folio *folio)
return atomic_read(&folio->_mapcount) > 0;
}
-#ifndef HAVE_ARCH_MAKE_PAGE_ACCESSIBLE
-static inline int arch_make_page_accessible(struct page *page)
-{
- return 0;
-}
-#endif
-
#ifndef HAVE_ARCH_MAKE_FOLIO_ACCESSIBLE
static inline int arch_make_folio_accessible(struct folio *folio)
{
- int ret;
- long i, nr = folio_nr_pages(folio);
-
- for (i = 0; i < nr; i++) {
- ret = arch_make_page_accessible(folio_page(folio, i));
- if (ret)
- break;
- }
-
- return ret;
+ return 0;
}
#endif
@@ -2409,11 +2383,40 @@ void free_pgd_range(struct mmu_gather *tlb, unsigned long addr,
unsigned long end, unsigned long floor, unsigned long ceiling);
int
copy_page_range(struct vm_area_struct *dst_vma, struct vm_area_struct *src_vma);
-int follow_pte(struct vm_area_struct *vma, unsigned long address,
- pte_t **ptepp, spinlock_t **ptlp);
int generic_access_phys(struct vm_area_struct *vma, unsigned long addr,
void *buf, int len, int write);
+struct follow_pfnmap_args {
+ /**
+ * Inputs:
+ * @vma: Pointer to @vm_area_struct struct
+ * @address: the virtual address to walk
+ */
+ struct vm_area_struct *vma;
+ unsigned long address;
+ /**
+ * Internals:
+ *
+ * The caller shouldn't touch any of these.
+ */
+ spinlock_t *lock;
+ pte_t *ptep;
+ /**
+ * Outputs:
+ *
+ * @pfn: the PFN of the address
+ * @pgprot: the pgprot_t of the mapping
+ * @writable: whether the mapping is writable
+ * @special: whether the mapping is a special mapping (real PFN maps)
+ */
+ unsigned long pfn;
+ pgprot_t pgprot;
+ bool writable;
+ bool special;
+};
+int follow_pfnmap_start(struct follow_pfnmap_args *args);
+void follow_pfnmap_end(struct follow_pfnmap_args *args);
+
extern void truncate_pagecache(struct inode *inode, loff_t new);
extern void truncate_setsize(struct inode *inode, loff_t newsize);
void pagecache_isize_extended(struct inode *inode, loff_t from, loff_t to);
@@ -2541,11 +2544,6 @@ int set_page_dirty_lock(struct page *page);
int get_cmdline(struct task_struct *task, char *buffer, int buflen);
-extern unsigned long move_page_tables(struct vm_area_struct *vma,
- unsigned long old_addr, struct vm_area_struct *new_vma,
- unsigned long new_addr, unsigned long len,
- bool need_rmap_locks, bool for_stack);
-
/*
* Flags used by change_protection(). For now we make it a bitmap so
* that we can pass in multiple flags just like parameters. However
@@ -2566,21 +2564,6 @@ extern unsigned long move_page_tables(struct vm_area_struct *vma,
#define MM_CP_UFFD_WP_ALL (MM_CP_UFFD_WP | \
MM_CP_UFFD_WP_RESOLVE)
-bool vma_needs_dirty_tracking(struct vm_area_struct *vma);
-bool vma_wants_writenotify(struct vm_area_struct *vma, pgprot_t vm_page_prot);
-static inline bool vma_wants_manual_pte_write_upgrade(struct vm_area_struct *vma)
-{
- /*
- * We want to check manually if we can change individual PTEs writable
- * if we can't do that automatically for all PTEs in a mapping. For
- * private mappings, that's always the case when we have write
- * permissions as we properly have to handle COW.
- */
- if (vma->vm_flags & VM_SHARED)
- return vma_wants_writenotify(vma, vma->vm_page_prot);
- return !!(vma->vm_flags & VM_WRITE);
-
-}
bool can_change_pte_writable(struct vm_area_struct *vma, unsigned long addr,
pte_t pte);
extern long change_protection(struct mmu_gather *tlb,
@@ -2704,6 +2687,30 @@ static inline pte_t pte_mkspecial(pte_t pte)
}
#endif
+#ifndef CONFIG_ARCH_SUPPORTS_PMD_PFNMAP
+static inline bool pmd_special(pmd_t pmd)
+{
+ return false;
+}
+
+static inline pmd_t pmd_mkspecial(pmd_t pmd)
+{
+ return pmd;
+}
+#endif /* CONFIG_ARCH_SUPPORTS_PMD_PFNMAP */
+
+#ifndef CONFIG_ARCH_SUPPORTS_PUD_PFNMAP
+static inline bool pud_special(pud_t pud)
+{
+ return false;
+}
+
+static inline pud_t pud_mkspecial(pud_t pud)
+{
+ return pud;
+}
+#endif /* CONFIG_ARCH_SUPPORTS_PUD_PFNMAP */
+
#ifndef CONFIG_ARCH_HAS_PTE_DEVMAP
static inline int pte_devmap(pte_t pte)
{
@@ -2896,7 +2903,7 @@ static inline void pagetable_free(struct ptdesc *pt)
__free_pages(page, compound_order(page));
}
-#if USE_SPLIT_PTE_PTLOCKS
+#if defined(CONFIG_SPLIT_PTE_PTLOCKS)
#if ALLOC_SPLIT_PTLOCKS
void __init ptlock_cache_init(void);
bool ptlock_alloc(struct ptdesc *ptdesc);
@@ -2954,7 +2961,7 @@ static inline bool ptlock_init(struct ptdesc *ptdesc)
return true;
}
-#else /* !USE_SPLIT_PTE_PTLOCKS */
+#else /* !defined(CONFIG_SPLIT_PTE_PTLOCKS) */
/*
* We use mm->page_table_lock to guard all pagetable pages of the mm.
*/
@@ -2969,7 +2976,7 @@ static inline spinlock_t *ptep_lockptr(struct mm_struct *mm, pte_t *pte)
static inline void ptlock_cache_init(void) {}
static inline bool ptlock_init(struct ptdesc *ptdesc) { return true; }
static inline void ptlock_free(struct ptdesc *ptdesc) {}
-#endif /* USE_SPLIT_PTE_PTLOCKS */
+#endif /* defined(CONFIG_SPLIT_PTE_PTLOCKS) */
static inline bool pagetable_pte_ctor(struct ptdesc *ptdesc)
{
@@ -3029,7 +3036,7 @@ pte_t *pte_offset_map_nolock(struct mm_struct *mm, pmd_t *pmd,
((unlikely(pmd_none(*(pmd))) && __pte_alloc_kernel(pmd))? \
NULL: pte_offset_kernel(pmd, address))
-#if USE_SPLIT_PMD_PTLOCKS
+#if defined(CONFIG_SPLIT_PMD_PTLOCKS)
static inline struct page *pmd_pgtable_page(pmd_t *pmd)
{
@@ -3288,78 +3295,9 @@ void anon_vma_interval_tree_verify(struct anon_vma_chain *node);
/* mmap.c */
extern int __vm_enough_memory(struct mm_struct *mm, long pages, int cap_sys_admin);
-extern int vma_expand(struct vma_iterator *vmi, struct vm_area_struct *vma,
- unsigned long start, unsigned long end, pgoff_t pgoff,
- struct vm_area_struct *next);
-extern int vma_shrink(struct vma_iterator *vmi, struct vm_area_struct *vma,
- unsigned long start, unsigned long end, pgoff_t pgoff);
-extern struct anon_vma *find_mergeable_anon_vma(struct vm_area_struct *);
extern int insert_vm_struct(struct mm_struct *, struct vm_area_struct *);
-extern void unlink_file_vma(struct vm_area_struct *);
-extern struct vm_area_struct *copy_vma(struct vm_area_struct **,
- unsigned long addr, unsigned long len, pgoff_t pgoff,
- bool *need_rmap_locks);
extern void exit_mmap(struct mm_struct *);
-struct vm_area_struct *vma_modify(struct vma_iterator *vmi,
- struct vm_area_struct *prev,
- struct vm_area_struct *vma,
- unsigned long start, unsigned long end,
- unsigned long vm_flags,
- struct mempolicy *policy,
- struct vm_userfaultfd_ctx uffd_ctx,
- struct anon_vma_name *anon_name);
-
-/* We are about to modify the VMA's flags. */
-static inline struct vm_area_struct
-*vma_modify_flags(struct vma_iterator *vmi,
- struct vm_area_struct *prev,
- struct vm_area_struct *vma,
- unsigned long start, unsigned long end,
- unsigned long new_flags)
-{
- return vma_modify(vmi, prev, vma, start, end, new_flags,
- vma_policy(vma), vma->vm_userfaultfd_ctx,
- anon_vma_name(vma));
-}
-
-/* We are about to modify the VMA's flags and/or anon_name. */
-static inline struct vm_area_struct
-*vma_modify_flags_name(struct vma_iterator *vmi,
- struct vm_area_struct *prev,
- struct vm_area_struct *vma,
- unsigned long start,
- unsigned long end,
- unsigned long new_flags,
- struct anon_vma_name *new_name)
-{
- return vma_modify(vmi, prev, vma, start, end, new_flags,
- vma_policy(vma), vma->vm_userfaultfd_ctx, new_name);
-}
-
-/* We are about to modify the VMA's memory policy. */
-static inline struct vm_area_struct
-*vma_modify_policy(struct vma_iterator *vmi,
- struct vm_area_struct *prev,
- struct vm_area_struct *vma,
- unsigned long start, unsigned long end,
- struct mempolicy *new_pol)
-{
- return vma_modify(vmi, prev, vma, start, end, vma->vm_flags,
- new_pol, vma->vm_userfaultfd_ctx, anon_vma_name(vma));
-}
-
-/* We are about to modify the VMA's flags and/or uffd context. */
-static inline struct vm_area_struct
-*vma_modify_flags_uffd(struct vma_iterator *vmi,
- struct vm_area_struct *prev,
- struct vm_area_struct *vma,
- unsigned long start, unsigned long end,
- unsigned long new_flags,
- struct vm_userfaultfd_ctx new_ctx)
-{
- return vma_modify(vmi, prev, vma, start, end, new_flags,
- vma_policy(vma), new_ctx, anon_vma_name(vma));
-}
+int relocate_vma_down(struct vm_area_struct *vma, unsigned long shift);
static inline int check_data_rlimit(unsigned long rlim,
unsigned long new,
@@ -3392,10 +3330,6 @@ extern struct vm_area_struct *_install_special_mapping(struct mm_struct *mm,
unsigned long addr, unsigned long len,
unsigned long flags,
const struct vm_special_mapping *spec);
-/* This is an obsolete alternative to _install_special_mapping. */
-extern int install_special_mapping(struct mm_struct *mm,
- unsigned long addr, unsigned long len,
- unsigned long flags, struct page **pages);
unsigned long randomize_stack_top(unsigned long stack_top);
unsigned long randomize_page(unsigned long start, unsigned long range);
@@ -3421,14 +3355,14 @@ extern unsigned long do_mmap(struct file *file, unsigned long addr,
extern int do_vmi_munmap(struct vma_iterator *vmi, struct mm_struct *mm,
unsigned long start, size_t len, struct list_head *uf,
bool unlock);
+int do_vmi_align_munmap(struct vma_iterator *vmi, struct vm_area_struct *vma,
+ struct mm_struct *mm, unsigned long start,
+ unsigned long end, struct list_head *uf, bool unlock);
extern int do_munmap(struct mm_struct *, unsigned long, size_t,
struct list_head *uf);
extern int do_madvise(struct mm_struct *mm, unsigned long start, size_t len_in, int behavior);
#ifdef CONFIG_MMU
-extern int do_vma_munmap(struct vma_iterator *vmi, struct vm_area_struct *vma,
- unsigned long start, unsigned long end,
- struct list_head *uf, bool unlock);
extern int __mm_populate(unsigned long addr, unsigned long len,
int ignore_errors);
static inline void mm_populate(unsigned long addr, unsigned long len)
@@ -3656,9 +3590,6 @@ static inline vm_fault_t vmf_fs_error(int err)
return VM_FAULT_SIGBUS;
}
-struct page *follow_page(struct vm_area_struct *vma, unsigned long address,
- unsigned int foll_flags);
-
static inline int vm_fault_to_errno(vm_fault_t vm_fault, int foll_flags)
{
if (vm_fault & VM_FAULT_OOM)
@@ -4194,18 +4125,18 @@ madvise_set_anon_name(struct mm_struct *mm, unsigned long start,
#ifdef CONFIG_UNACCEPTED_MEMORY
-bool range_contains_unaccepted_memory(phys_addr_t start, phys_addr_t end);
-void accept_memory(phys_addr_t start, phys_addr_t end);
+bool range_contains_unaccepted_memory(phys_addr_t start, unsigned long size);
+void accept_memory(phys_addr_t start, unsigned long size);
#else
static inline bool range_contains_unaccepted_memory(phys_addr_t start,
- phys_addr_t end)
+ unsigned long size)
{
return false;
}
-static inline void accept_memory(phys_addr_t start, phys_addr_t end)
+static inline void accept_memory(phys_addr_t start, unsigned long size)
{
}
@@ -4213,9 +4144,7 @@ static inline void accept_memory(phys_addr_t start, phys_addr_t end)
static inline bool pfn_is_unaccepted_memory(unsigned long pfn)
{
- phys_addr_t paddr = pfn << PAGE_SHIFT;
-
- return range_contains_unaccepted_memory(paddr, paddr + PAGE_SIZE);
+ return range_contains_unaccepted_memory(pfn << PAGE_SHIFT, PAGE_SIZE);
}
void vma_pgtable_walk_begin(struct vm_area_struct *vma);
@@ -4233,4 +4162,61 @@ static inline int do_mseal(unsigned long start, size_t len_in, unsigned long fla
}
#endif
+#ifdef CONFIG_MEM_ALLOC_PROFILING
+static inline void pgalloc_tag_split(struct folio *folio, int old_order, int new_order)
+{
+ int i;
+ struct alloc_tag *tag;
+ unsigned int nr_pages = 1 << new_order;
+
+ if (!mem_alloc_profiling_enabled())
+ return;
+
+ tag = pgalloc_tag_get(&folio->page);
+ if (!tag)
+ return;
+
+ for (i = nr_pages; i < (1 << old_order); i += nr_pages) {
+ union codetag_ref *ref = get_page_tag_ref(folio_page(folio, i));
+
+ if (ref) {
+ /* Set new reference to point to the original tag */
+ alloc_tag_ref_set(ref, tag);
+ put_page_tag_ref(ref);
+ }
+ }
+}
+
+static inline void pgalloc_tag_copy(struct folio *new, struct folio *old)
+{
+ struct alloc_tag *tag;
+ union codetag_ref *ref;
+
+ tag = pgalloc_tag_get(&old->page);
+ if (!tag)
+ return;
+
+ ref = get_page_tag_ref(&new->page);
+ if (!ref)
+ return;
+
+ /* Clear the old ref to the original allocation tag. */
+ clear_page_tag_ref(&old->page);
+ /* Decrement the counters of the tag on get_new_folio. */
+ alloc_tag_sub(ref, folio_nr_pages(new));
+
+ __alloc_tag_ref_set(ref, tag);
+
+ put_page_tag_ref(ref);
+}
+#else /* !CONFIG_MEM_ALLOC_PROFILING */
+static inline void pgalloc_tag_split(struct folio *folio, int old_order, int new_order)
+{
+}
+
+static inline void pgalloc_tag_copy(struct folio *new, struct folio *old)
+{
+}
+#endif /* CONFIG_MEM_ALLOC_PROFILING */
+
#endif /* _LINUX_MM_H */
diff --git a/include/linux/mm_types.h b/include/linux/mm_types.h
index 485424979254..6e3bdf8e38bc 100644
--- a/include/linux/mm_types.h
+++ b/include/linux/mm_types.h
@@ -109,7 +109,7 @@ struct page {
/**
* @private: Mapping-private opaque data.
* Usually used for buffer_heads if PagePrivate.
- * Used for swp_entry_t if PageSwapCache.
+ * Used for swp_entry_t if swapcache flag set.
* Indicates order in the buddy system if PageBuddy.
*/
unsigned long private;
@@ -660,6 +660,9 @@ struct vma_numab_state {
* per VM-area/task. A VM area is any part of the process virtual memory
* space that has a special rule for the page-fault handlers (ie a shared
* library, the executable area etc).
+ *
+ * Only explicitly marked struct members may be accessed by RCU readers before
+ * getting a stable reference.
*/
struct vm_area_struct {
/* The first cache line has the info for VMA tree walking. */
@@ -675,7 +678,11 @@ struct vm_area_struct {
#endif
};
- struct mm_struct *vm_mm; /* The address space we belong to. */
+ /*
+ * The address space we belong to.
+ * Unstable RCU readers are allowed to read this.
+ */
+ struct mm_struct *vm_mm;
pgprot_t vm_page_prot; /* Access permissions of this VMA. */
/*
@@ -688,7 +695,10 @@ struct vm_area_struct {
};
#ifdef CONFIG_PER_VMA_LOCK
- /* Flag to indicate areas detached from the mm->mm_mt tree */
+ /*
+ * Flag to indicate areas detached from the mm->mm_mt tree.
+ * Unstable RCU readers are allowed to read this.
+ */
bool detached;
/*
@@ -706,6 +716,7 @@ struct vm_area_struct {
* slowpath.
*/
int vm_lock_seq;
+ /* Unstable RCU readers are allowed to read this. */
struct vma_lock *vm_lock;
#endif
@@ -947,7 +958,7 @@ struct mm_struct {
#ifdef CONFIG_MMU_NOTIFIER
struct mmu_notifier_subscriptions *notifier_subscriptions;
#endif
-#if defined(CONFIG_TRANSPARENT_HUGEPAGE) && !USE_SPLIT_PMD_PTLOCKS
+#if defined(CONFIG_TRANSPARENT_HUGEPAGE) && !defined(CONFIG_SPLIT_PMD_PTLOCKS)
pgtable_t pmd_huge_pte; /* protected by page_table_lock */
#endif
#ifdef CONFIG_NUMA_BALANCING
@@ -1313,6 +1324,9 @@ struct vm_special_mapping {
int (*mremap)(const struct vm_special_mapping *sm,
struct vm_area_struct *new_vma);
+
+ void (*close)(const struct vm_special_mapping *sm,
+ struct vm_area_struct *vma);
};
enum tlb_flush_reason {
diff --git a/include/linux/mm_types_task.h b/include/linux/mm_types_task.h
index a2f6179b672b..bff5706b76e1 100644
--- a/include/linux/mm_types_task.h
+++ b/include/linux/mm_types_task.h
@@ -16,9 +16,6 @@
#include <asm/tlbbatch.h>
#endif
-#define USE_SPLIT_PTE_PTLOCKS (NR_CPUS >= CONFIG_SPLIT_PTLOCK_CPUS)
-#define USE_SPLIT_PMD_PTLOCKS (USE_SPLIT_PTE_PTLOCKS && \
- IS_ENABLED(CONFIG_ARCH_ENABLE_SPLIT_PMD_PTLOCK))
#define ALLOC_SPLIT_PTLOCKS (SPINLOCK_SIZE > BITS_PER_LONG/8)
/*
diff --git a/include/linux/mmc/host.h b/include/linux/mmc/host.h
index 6a31ed02d3ff..8fc2b328ec4d 100644
--- a/include/linux/mmc/host.h
+++ b/include/linux/mmc/host.h
@@ -10,6 +10,7 @@
#include <linux/sched.h>
#include <linux/device.h>
#include <linux/fault-inject.h>
+#include <linux/debugfs.h>
#include <linux/mmc/core.h>
#include <linux/mmc/card.h>
diff --git a/include/linux/mmzone.h b/include/linux/mmzone.h
index 1dc6248feb83..17506e4a2835 100644
--- a/include/linux/mmzone.h
+++ b/include/linux/mmzone.h
@@ -666,11 +666,6 @@ enum zone_watermarks {
#define NR_LOWORDER_PCP_LISTS (MIGRATE_PCPTYPES * (PAGE_ALLOC_COSTLY_ORDER + 1))
#define NR_PCP_LISTS (NR_LOWORDER_PCP_LISTS + NR_PCP_THP)
-#define min_wmark_pages(z) (z->_watermark[WMARK_MIN] + z->watermark_boost)
-#define low_wmark_pages(z) (z->_watermark[WMARK_LOW] + z->watermark_boost)
-#define high_wmark_pages(z) (z->_watermark[WMARK_HIGH] + z->watermark_boost)
-#define wmark_pages(z, i) (z->_watermark[i] + z->watermark_boost)
-
/*
* Flags used in pcp->flags field.
*
@@ -1016,6 +1011,32 @@ enum zone_flags {
ZONE_BELOW_HIGH, /* zone is below high watermark. */
};
+static inline unsigned long wmark_pages(const struct zone *z,
+ enum zone_watermarks w)
+{
+ return z->_watermark[w] + z->watermark_boost;
+}
+
+static inline unsigned long min_wmark_pages(const struct zone *z)
+{
+ return wmark_pages(z, WMARK_MIN);
+}
+
+static inline unsigned long low_wmark_pages(const struct zone *z)
+{
+ return wmark_pages(z, WMARK_LOW);
+}
+
+static inline unsigned long high_wmark_pages(const struct zone *z)
+{
+ return wmark_pages(z, WMARK_HIGH);
+}
+
+static inline unsigned long promo_wmark_pages(const struct zone *z)
+{
+ return wmark_pages(z, WMARK_PROMO);
+}
+
static inline unsigned long zone_managed_pages(struct zone *zone)
{
return (unsigned long)atomic_long_read(&zone->managed_pages);
@@ -1688,7 +1709,7 @@ static inline struct zoneref *first_zones_zonelist(struct zonelist *zonelist,
zone = zonelist_zone(z))
#define for_next_zone_zonelist_nodemask(zone, z, highidx, nodemask) \
- for (zone = z->zone; \
+ for (zone = zonelist_zone(z); \
zone; \
z = next_zones_zonelist(++z, highidx, nodemask), \
zone = zonelist_zone(z))
@@ -1724,7 +1745,7 @@ static inline bool movable_only_nodes(nodemask_t *nodes)
nid = first_node(*nodes);
zonelist = &NODE_DATA(nid)->node_zonelists[ZONELIST_FALLBACK];
z = first_zones_zonelist(zonelist, ZONE_NORMAL, nodes);
- return (!z->zone) ? true : false;
+ return (!zonelist_zone(z)) ? true : false;
}
diff --git a/include/linux/mnt_idmapping.h b/include/linux/mnt_idmapping.h
index cd4d5c8781f5..b1b219bc3422 100644
--- a/include/linux/mnt_idmapping.h
+++ b/include/linux/mnt_idmapping.h
@@ -9,6 +9,7 @@ struct mnt_idmap;
struct user_namespace;
extern struct mnt_idmap nop_mnt_idmap;
+extern struct mnt_idmap invalid_mnt_idmap;
extern struct user_namespace init_user_ns;
typedef struct {
diff --git a/include/linux/msi.h b/include/linux/msi.h
index 944979763825..b10093c4d00e 100644
--- a/include/linux/msi.h
+++ b/include/linux/msi.h
@@ -554,6 +554,8 @@ enum {
MSI_FLAG_MSIX_CONTIGUOUS = (1 << 19),
/* PCI/MSI-X vectors can be dynamically allocated/freed post MSI-X enable */
MSI_FLAG_PCI_MSIX_ALLOC_DYN = (1 << 20),
+ /* PCI MSIs cannot be steered separately to CPU cores */
+ MSI_FLAG_NO_AFFINITY = (1 << 21),
};
/**
diff --git a/include/linux/mutex.h b/include/linux/mutex.h
index a561c629d89f..2bf91b57591b 100644
--- a/include/linux/mutex.h
+++ b/include/linux/mutex.h
@@ -49,7 +49,6 @@ static inline void mutex_destroy(struct mutex *lock) {}
#endif
-#ifndef CONFIG_PREEMPT_RT
/**
* mutex_init - initialize the mutex
* @mutex: the mutex to be initialized
@@ -65,6 +64,18 @@ do { \
__mutex_init((mutex), #mutex, &__key); \
} while (0)
+/**
+ * mutex_init_with_key - initialize a mutex with a given lockdep key
+ * @mutex: the mutex to be initialized
+ * @key: the lockdep key to be associated with the mutex
+ *
+ * Initialize the mutex to the unlocked state.
+ *
+ * It is not allowed to initialize an already locked mutex.
+ */
+#define mutex_init_with_key(mutex, key) __mutex_init((mutex), #mutex, (key))
+
+#ifndef CONFIG_PREEMPT_RT
#define __MUTEX_INITIALIZER(lockname) \
{ .owner = ATOMIC_LONG_INIT(0) \
, .wait_lock = __RAW_SPIN_LOCK_UNLOCKED(lockname.wait_lock) \
@@ -111,12 +122,6 @@ do { \
__mutex_rt_init((mutex), name, key); \
} while (0)
-#define mutex_init(mutex) \
-do { \
- static struct lock_class_key __key; \
- \
- __mutex_init((mutex), #mutex, &__key); \
-} while (0)
#endif /* CONFIG_PREEMPT_RT */
#ifdef CONFIG_DEBUG_MUTEXES
diff --git a/include/linux/netfilter.h b/include/linux/netfilter.h
index 2683b2b77612..2b8aac2c70ad 100644
--- a/include/linux/netfilter.h
+++ b/include/linux/netfilter.h
@@ -376,15 +376,11 @@ int nf_route(struct net *net, struct dst_entry **dst, struct flowi *fl,
struct nf_conn;
enum nf_nat_manip_type;
struct nlattr;
-enum ip_conntrack_dir;
struct nf_nat_hook {
int (*parse_nat_setup)(struct nf_conn *ct, enum nf_nat_manip_type manip,
const struct nlattr *attr);
void (*decode_session)(struct sk_buff *skb, struct flowi *fl);
- unsigned int (*manip_pkt)(struct sk_buff *skb, struct nf_conn *ct,
- enum nf_nat_manip_type mtype,
- enum ip_conntrack_dir dir);
void (*remove_nat_bysrc)(struct nf_conn *ct);
};
diff --git a/include/linux/nfs.h b/include/linux/nfs.h
index ceb70a926b95..9ad727ddfedb 100644
--- a/include/linux/nfs.h
+++ b/include/linux/nfs.h
@@ -8,11 +8,20 @@
#ifndef _LINUX_NFS_H
#define _LINUX_NFS_H
+#include <linux/cred.h>
+#include <linux/sunrpc/auth.h>
#include <linux/sunrpc/msg_prot.h>
#include <linux/string.h>
#include <linux/crc32.h>
#include <uapi/linux/nfs.h>
+/* The LOCALIO program is entirely private to Linux and is
+ * NOT part of the uapi.
+ */
+#define NFS_LOCALIO_PROGRAM 400122
+#define LOCALIOPROC_NULL 0
+#define LOCALIOPROC_UUID_IS_LOCAL 1
+
/*
* This is the kernel NFS client file handle representation
*/
diff --git a/include/linux/nfs4.h b/include/linux/nfs4.h
index f9df88091c6d..8d7430d9f218 100644
--- a/include/linux/nfs4.h
+++ b/include/linux/nfs4.h
@@ -281,15 +281,18 @@ enum nfsstat4 {
/* nfs42 */
NFS4ERR_PARTNER_NOTSUPP = 10088,
NFS4ERR_PARTNER_NO_AUTH = 10089,
- NFS4ERR_UNION_NOTSUPP = 10090,
- NFS4ERR_OFFLOAD_DENIED = 10091,
- NFS4ERR_WRONG_LFS = 10092,
- NFS4ERR_BADLABEL = 10093,
- NFS4ERR_OFFLOAD_NO_REQS = 10094,
+ NFS4ERR_UNION_NOTSUPP = 10090,
+ NFS4ERR_OFFLOAD_DENIED = 10091,
+ NFS4ERR_WRONG_LFS = 10092,
+ NFS4ERR_BADLABEL = 10093,
+ NFS4ERR_OFFLOAD_NO_REQS = 10094,
/* xattr (RFC8276) */
- NFS4ERR_NOXATTR = 10095,
- NFS4ERR_XATTR2BIG = 10096,
+ NFS4ERR_NOXATTR = 10095,
+ NFS4ERR_XATTR2BIG = 10096,
+
+ /* can be used for internal errors */
+ NFS4ERR_FIRST_FREE
};
/* error codes for internal client use */
diff --git a/include/linux/nfs_common.h b/include/linux/nfs_common.h
new file mode 100644
index 000000000000..5fc02df88252
--- /dev/null
+++ b/include/linux/nfs_common.h
@@ -0,0 +1,17 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * This file contains constants and methods used by both NFS client and server.
+ */
+#ifndef _LINUX_NFS_COMMON_H
+#define _LINUX_NFS_COMMON_H
+
+#include <linux/errno.h>
+#include <uapi/linux/nfs.h>
+
+/* Mapping from NFS error code to "errno" error code. */
+#define errno_NFSERR_IO EIO
+
+int nfs_stat_to_errno(enum nfs_stat status);
+int nfs4_stat_to_errno(int stat);
+
+#endif /* _LINUX_NFS_COMMON_H */
diff --git a/include/linux/nfs_fs_sb.h b/include/linux/nfs_fs_sb.h
index 1df86ab98c77..853df3fcd4c2 100644
--- a/include/linux/nfs_fs_sb.h
+++ b/include/linux/nfs_fs_sb.h
@@ -8,6 +8,7 @@
#include <linux/wait.h>
#include <linux/nfs_xdr.h>
#include <linux/sunrpc/xprt.h>
+#include <linux/nfslocalio.h>
#include <linux/atomic.h>
#include <linux/refcount.h>
@@ -49,6 +50,7 @@ struct nfs_client {
#define NFS_CS_DS 7 /* - Server is a DS */
#define NFS_CS_REUSEPORT 8 /* - reuse src port on reconnect */
#define NFS_CS_PNFS 9 /* - Server used for pnfs */
+#define NFS_CS_LOCAL_IO 10 /* - client is local */
struct sockaddr_storage cl_addr; /* server identifier */
size_t cl_addrlen;
char * cl_hostname; /* hostname of server */
@@ -125,6 +127,13 @@ struct nfs_client {
struct net *cl_net;
struct list_head pending_cb_stateids;
struct rcu_head rcu;
+
+#if IS_ENABLED(CONFIG_NFS_LOCALIO)
+ struct timespec64 cl_nfssvc_boot;
+ seqlock_t cl_boot_lock;
+ nfs_uuid_t cl_uuid;
+ spinlock_t cl_localio_lock;
+#endif /* CONFIG_NFS_LOCALIO */
};
/*
@@ -158,6 +167,7 @@ struct nfs_server {
#define NFS_MOUNT_WRITE_WAIT 0x02000000
#define NFS_MOUNT_TRUNK_DISCOVERY 0x04000000
#define NFS_MOUNT_SHUTDOWN 0x08000000
+#define NFS_MOUNT_NO_ALIGNWRITE 0x10000000
unsigned int fattr_valid; /* Valid attributes */
unsigned int caps; /* server capabilities */
@@ -234,8 +244,7 @@ struct nfs_server {
/* the following fields are protected by nfs_client->cl_lock */
struct rb_root state_owners;
#endif
- struct ida openowner_id;
- struct ida lockowner_id;
+ atomic64_t owner_ctr;
struct list_head state_owners_lru;
struct list_head layouts;
struct list_head delegations;
diff --git a/include/linux/nfs_xdr.h b/include/linux/nfs_xdr.h
index 45623af3e7b8..12d8e47bc5a3 100644
--- a/include/linux/nfs_xdr.h
+++ b/include/linux/nfs_xdr.h
@@ -446,7 +446,7 @@ struct nfs42_clone_res {
struct stateowner_id {
__u64 create_time;
- __u32 uniquifier;
+ __u64 uniquifier;
};
struct nfs4_open_delegation {
@@ -1854,6 +1854,24 @@ struct nfs_rpc_ops {
};
/*
+ * Helper functions used by NFS client and/or server
+ */
+static inline void encode_opaque_fixed(struct xdr_stream *xdr,
+ const void *buf, size_t len)
+{
+ WARN_ON_ONCE(xdr_stream_encode_opaque_fixed(xdr, buf, len) < 0);
+}
+
+static inline int decode_opaque_fixed(struct xdr_stream *xdr,
+ void *buf, size_t len)
+{
+ ssize_t ret = xdr_stream_decode_opaque_fixed(xdr, buf, len);
+ if (unlikely(ret < 0))
+ return -EIO;
+ return 0;
+}
+
+/*
* Function vectors etc. for the NFS client
*/
extern const struct nfs_rpc_ops nfs_v2_clientops;
@@ -1866,4 +1884,4 @@ extern const struct rpc_version nfs_version4;
extern const struct rpc_version nfsacl_version3;
extern const struct rpc_program nfsacl_program;
-#endif
+#endif /* _LINUX_NFS_XDR_H */
diff --git a/include/linux/nfslocalio.h b/include/linux/nfslocalio.h
new file mode 100644
index 000000000000..b353abe00357
--- /dev/null
+++ b/include/linux/nfslocalio.h
@@ -0,0 +1,74 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (C) 2024 Mike Snitzer <snitzer@hammerspace.com>
+ * Copyright (C) 2024 NeilBrown <neilb@suse.de>
+ */
+#ifndef __LINUX_NFSLOCALIO_H
+#define __LINUX_NFSLOCALIO_H
+
+/* nfsd_file structure is purposely kept opaque to NFS client */
+struct nfsd_file;
+
+#if IS_ENABLED(CONFIG_NFS_LOCALIO)
+
+#include <linux/module.h>
+#include <linux/list.h>
+#include <linux/uuid.h>
+#include <linux/sunrpc/clnt.h>
+#include <linux/sunrpc/svcauth.h>
+#include <linux/nfs.h>
+#include <net/net_namespace.h>
+
+/*
+ * Useful to allow a client to negotiate if localio
+ * possible with its server.
+ *
+ * See Documentation/filesystems/nfs/localio.rst for more detail.
+ */
+typedef struct {
+ uuid_t uuid;
+ struct list_head list;
+ struct net __rcu *net; /* nfsd's network namespace */
+ struct auth_domain *dom; /* auth_domain for localio */
+} nfs_uuid_t;
+
+void nfs_uuid_begin(nfs_uuid_t *);
+void nfs_uuid_end(nfs_uuid_t *);
+void nfs_uuid_is_local(const uuid_t *, struct list_head *,
+ struct net *, struct auth_domain *, struct module *);
+void nfs_uuid_invalidate_clients(struct list_head *list);
+void nfs_uuid_invalidate_one_client(nfs_uuid_t *nfs_uuid);
+
+/* localio needs to map filehandle -> struct nfsd_file */
+extern struct nfsd_file *
+nfsd_open_local_fh(struct net *, struct auth_domain *, struct rpc_clnt *,
+ const struct cred *, const struct nfs_fh *,
+ const fmode_t) __must_hold(rcu);
+
+struct nfsd_localio_operations {
+ bool (*nfsd_serv_try_get)(struct net *);
+ void (*nfsd_serv_put)(struct net *);
+ struct nfsd_file *(*nfsd_open_local_fh)(struct net *,
+ struct auth_domain *,
+ struct rpc_clnt *,
+ const struct cred *,
+ const struct nfs_fh *,
+ const fmode_t);
+ void (*nfsd_file_put_local)(struct nfsd_file *);
+ struct file *(*nfsd_file_file)(struct nfsd_file *);
+} ____cacheline_aligned;
+
+extern void nfsd_localio_ops_init(void);
+extern const struct nfsd_localio_operations *nfs_to;
+
+struct nfsd_file *nfs_open_local_fh(nfs_uuid_t *,
+ struct rpc_clnt *, const struct cred *,
+ const struct nfs_fh *, const fmode_t);
+
+#else /* CONFIG_NFS_LOCALIO */
+static inline void nfsd_localio_ops_init(void)
+{
+}
+#endif /* CONFIG_NFS_LOCALIO */
+
+#endif /* __LINUX_NFSLOCALIO_H */
diff --git a/include/linux/nodemask.h b/include/linux/nodemask.h
index b61438313a73..9fd7a0ce9c1a 100644
--- a/include/linux/nodemask.h
+++ b/include/linux/nodemask.h
@@ -107,11 +107,11 @@ extern nodemask_t _unused_nodemask_arg_;
*/
#define nodemask_pr_args(maskp) __nodemask_pr_numnodes(maskp), \
__nodemask_pr_bits(maskp)
-static inline unsigned int __nodemask_pr_numnodes(const nodemask_t *m)
+static __always_inline unsigned int __nodemask_pr_numnodes(const nodemask_t *m)
{
return m ? MAX_NUMNODES : 0;
}
-static inline const unsigned long *__nodemask_pr_bits(const nodemask_t *m)
+static __always_inline const unsigned long *__nodemask_pr_bits(const nodemask_t *m)
{
return m ? m->bits : NULL;
}
@@ -132,19 +132,19 @@ static __always_inline void __node_set(int node, volatile nodemask_t *dstp)
}
#define node_clear(node, dst) __node_clear((node), &(dst))
-static inline void __node_clear(int node, volatile nodemask_t *dstp)
+static __always_inline void __node_clear(int node, volatile nodemask_t *dstp)
{
clear_bit(node, dstp->bits);
}
#define nodes_setall(dst) __nodes_setall(&(dst), MAX_NUMNODES)
-static inline void __nodes_setall(nodemask_t *dstp, unsigned int nbits)
+static __always_inline void __nodes_setall(nodemask_t *dstp, unsigned int nbits)
{
bitmap_fill(dstp->bits, nbits);
}
#define nodes_clear(dst) __nodes_clear(&(dst), MAX_NUMNODES)
-static inline void __nodes_clear(nodemask_t *dstp, unsigned int nbits)
+static __always_inline void __nodes_clear(nodemask_t *dstp, unsigned int nbits)
{
bitmap_zero(dstp->bits, nbits);
}
@@ -154,14 +154,14 @@ static inline void __nodes_clear(nodemask_t *dstp, unsigned int nbits)
#define node_test_and_set(node, nodemask) \
__node_test_and_set((node), &(nodemask))
-static inline bool __node_test_and_set(int node, nodemask_t *addr)
+static __always_inline bool __node_test_and_set(int node, nodemask_t *addr)
{
return test_and_set_bit(node, addr->bits);
}
#define nodes_and(dst, src1, src2) \
__nodes_and(&(dst), &(src1), &(src2), MAX_NUMNODES)
-static inline void __nodes_and(nodemask_t *dstp, const nodemask_t *src1p,
+static __always_inline void __nodes_and(nodemask_t *dstp, const nodemask_t *src1p,
const nodemask_t *src2p, unsigned int nbits)
{
bitmap_and(dstp->bits, src1p->bits, src2p->bits, nbits);
@@ -169,7 +169,7 @@ static inline void __nodes_and(nodemask_t *dstp, const nodemask_t *src1p,
#define nodes_or(dst, src1, src2) \
__nodes_or(&(dst), &(src1), &(src2), MAX_NUMNODES)
-static inline void __nodes_or(nodemask_t *dstp, const nodemask_t *src1p,
+static __always_inline void __nodes_or(nodemask_t *dstp, const nodemask_t *src1p,
const nodemask_t *src2p, unsigned int nbits)
{
bitmap_or(dstp->bits, src1p->bits, src2p->bits, nbits);
@@ -177,7 +177,7 @@ static inline void __nodes_or(nodemask_t *dstp, const nodemask_t *src1p,
#define nodes_xor(dst, src1, src2) \
__nodes_xor(&(dst), &(src1), &(src2), MAX_NUMNODES)
-static inline void __nodes_xor(nodemask_t *dstp, const nodemask_t *src1p,
+static __always_inline void __nodes_xor(nodemask_t *dstp, const nodemask_t *src1p,
const nodemask_t *src2p, unsigned int nbits)
{
bitmap_xor(dstp->bits, src1p->bits, src2p->bits, nbits);
@@ -185,7 +185,7 @@ static inline void __nodes_xor(nodemask_t *dstp, const nodemask_t *src1p,
#define nodes_andnot(dst, src1, src2) \
__nodes_andnot(&(dst), &(src1), &(src2), MAX_NUMNODES)
-static inline void __nodes_andnot(nodemask_t *dstp, const nodemask_t *src1p,
+static __always_inline void __nodes_andnot(nodemask_t *dstp, const nodemask_t *src1p,
const nodemask_t *src2p, unsigned int nbits)
{
bitmap_andnot(dstp->bits, src1p->bits, src2p->bits, nbits);
@@ -193,7 +193,7 @@ static inline void __nodes_andnot(nodemask_t *dstp, const nodemask_t *src1p,
#define nodes_complement(dst, src) \
__nodes_complement(&(dst), &(src), MAX_NUMNODES)
-static inline void __nodes_complement(nodemask_t *dstp,
+static __always_inline void __nodes_complement(nodemask_t *dstp,
const nodemask_t *srcp, unsigned int nbits)
{
bitmap_complement(dstp->bits, srcp->bits, nbits);
@@ -201,7 +201,7 @@ static inline void __nodes_complement(nodemask_t *dstp,
#define nodes_equal(src1, src2) \
__nodes_equal(&(src1), &(src2), MAX_NUMNODES)
-static inline bool __nodes_equal(const nodemask_t *src1p,
+static __always_inline bool __nodes_equal(const nodemask_t *src1p,
const nodemask_t *src2p, unsigned int nbits)
{
return bitmap_equal(src1p->bits, src2p->bits, nbits);
@@ -209,7 +209,7 @@ static inline bool __nodes_equal(const nodemask_t *src1p,
#define nodes_intersects(src1, src2) \
__nodes_intersects(&(src1), &(src2), MAX_NUMNODES)
-static inline bool __nodes_intersects(const nodemask_t *src1p,
+static __always_inline bool __nodes_intersects(const nodemask_t *src1p,
const nodemask_t *src2p, unsigned int nbits)
{
return bitmap_intersects(src1p->bits, src2p->bits, nbits);
@@ -217,33 +217,33 @@ static inline bool __nodes_intersects(const nodemask_t *src1p,
#define nodes_subset(src1, src2) \
__nodes_subset(&(src1), &(src2), MAX_NUMNODES)
-static inline bool __nodes_subset(const nodemask_t *src1p,
+static __always_inline bool __nodes_subset(const nodemask_t *src1p,
const nodemask_t *src2p, unsigned int nbits)
{
return bitmap_subset(src1p->bits, src2p->bits, nbits);
}
#define nodes_empty(src) __nodes_empty(&(src), MAX_NUMNODES)
-static inline bool __nodes_empty(const nodemask_t *srcp, unsigned int nbits)
+static __always_inline bool __nodes_empty(const nodemask_t *srcp, unsigned int nbits)
{
return bitmap_empty(srcp->bits, nbits);
}
#define nodes_full(nodemask) __nodes_full(&(nodemask), MAX_NUMNODES)
-static inline bool __nodes_full(const nodemask_t *srcp, unsigned int nbits)
+static __always_inline bool __nodes_full(const nodemask_t *srcp, unsigned int nbits)
{
return bitmap_full(srcp->bits, nbits);
}
#define nodes_weight(nodemask) __nodes_weight(&(nodemask), MAX_NUMNODES)
-static inline int __nodes_weight(const nodemask_t *srcp, unsigned int nbits)
+static __always_inline int __nodes_weight(const nodemask_t *srcp, unsigned int nbits)
{
return bitmap_weight(srcp->bits, nbits);
}
#define nodes_shift_right(dst, src, n) \
__nodes_shift_right(&(dst), &(src), (n), MAX_NUMNODES)
-static inline void __nodes_shift_right(nodemask_t *dstp,
+static __always_inline void __nodes_shift_right(nodemask_t *dstp,
const nodemask_t *srcp, int n, int nbits)
{
bitmap_shift_right(dstp->bits, srcp->bits, n, nbits);
@@ -251,7 +251,7 @@ static inline void __nodes_shift_right(nodemask_t *dstp,
#define nodes_shift_left(dst, src, n) \
__nodes_shift_left(&(dst), &(src), (n), MAX_NUMNODES)
-static inline void __nodes_shift_left(nodemask_t *dstp,
+static __always_inline void __nodes_shift_left(nodemask_t *dstp,
const nodemask_t *srcp, int n, int nbits)
{
bitmap_shift_left(dstp->bits, srcp->bits, n, nbits);
@@ -261,13 +261,13 @@ static inline void __nodes_shift_left(nodemask_t *dstp,
> MAX_NUMNODES, then the silly min_ts could be dropped. */
#define first_node(src) __first_node(&(src))
-static inline unsigned int __first_node(const nodemask_t *srcp)
+static __always_inline unsigned int __first_node(const nodemask_t *srcp)
{
return min_t(unsigned int, MAX_NUMNODES, find_first_bit(srcp->bits, MAX_NUMNODES));
}
#define next_node(n, src) __next_node((n), &(src))
-static inline unsigned int __next_node(int n, const nodemask_t *srcp)
+static __always_inline unsigned int __next_node(int n, const nodemask_t *srcp)
{
return min_t(unsigned int, MAX_NUMNODES, find_next_bit(srcp->bits, MAX_NUMNODES, n+1));
}
@@ -277,7 +277,7 @@ static inline unsigned int __next_node(int n, const nodemask_t *srcp)
* the first node in src if needed. Returns MAX_NUMNODES if src is empty.
*/
#define next_node_in(n, src) __next_node_in((n), &(src))
-static inline unsigned int __next_node_in(int node, const nodemask_t *srcp)
+static __always_inline unsigned int __next_node_in(int node, const nodemask_t *srcp)
{
unsigned int ret = __next_node(node, srcp);
@@ -286,7 +286,7 @@ static inline unsigned int __next_node_in(int node, const nodemask_t *srcp)
return ret;
}
-static inline void init_nodemask_of_node(nodemask_t *mask, int node)
+static __always_inline void init_nodemask_of_node(nodemask_t *mask, int node)
{
nodes_clear(*mask);
node_set(node, *mask);
@@ -304,7 +304,7 @@ static inline void init_nodemask_of_node(nodemask_t *mask, int node)
})
#define first_unset_node(mask) __first_unset_node(&(mask))
-static inline unsigned int __first_unset_node(const nodemask_t *maskp)
+static __always_inline unsigned int __first_unset_node(const nodemask_t *maskp)
{
return min_t(unsigned int, MAX_NUMNODES,
find_first_zero_bit(maskp->bits, MAX_NUMNODES));
@@ -338,21 +338,21 @@ static inline unsigned int __first_unset_node(const nodemask_t *maskp)
#define nodemask_parse_user(ubuf, ulen, dst) \
__nodemask_parse_user((ubuf), (ulen), &(dst), MAX_NUMNODES)
-static inline int __nodemask_parse_user(const char __user *buf, int len,
+static __always_inline int __nodemask_parse_user(const char __user *buf, int len,
nodemask_t *dstp, int nbits)
{
return bitmap_parse_user(buf, len, dstp->bits, nbits);
}
#define nodelist_parse(buf, dst) __nodelist_parse((buf), &(dst), MAX_NUMNODES)
-static inline int __nodelist_parse(const char *buf, nodemask_t *dstp, int nbits)
+static __always_inline int __nodelist_parse(const char *buf, nodemask_t *dstp, int nbits)
{
return bitmap_parselist(buf, dstp->bits, nbits);
}
#define node_remap(oldbit, old, new) \
__node_remap((oldbit), &(old), &(new), MAX_NUMNODES)
-static inline int __node_remap(int oldbit,
+static __always_inline int __node_remap(int oldbit,
const nodemask_t *oldp, const nodemask_t *newp, int nbits)
{
return bitmap_bitremap(oldbit, oldp->bits, newp->bits, nbits);
@@ -360,7 +360,7 @@ static inline int __node_remap(int oldbit,
#define nodes_remap(dst, src, old, new) \
__nodes_remap(&(dst), &(src), &(old), &(new), MAX_NUMNODES)
-static inline void __nodes_remap(nodemask_t *dstp, const nodemask_t *srcp,
+static __always_inline void __nodes_remap(nodemask_t *dstp, const nodemask_t *srcp,
const nodemask_t *oldp, const nodemask_t *newp, int nbits)
{
bitmap_remap(dstp->bits, srcp->bits, oldp->bits, newp->bits, nbits);
@@ -368,7 +368,7 @@ static inline void __nodes_remap(nodemask_t *dstp, const nodemask_t *srcp,
#define nodes_onto(dst, orig, relmap) \
__nodes_onto(&(dst), &(orig), &(relmap), MAX_NUMNODES)
-static inline void __nodes_onto(nodemask_t *dstp, const nodemask_t *origp,
+static __always_inline void __nodes_onto(nodemask_t *dstp, const nodemask_t *origp,
const nodemask_t *relmapp, int nbits)
{
bitmap_onto(dstp->bits, origp->bits, relmapp->bits, nbits);
@@ -376,7 +376,7 @@ static inline void __nodes_onto(nodemask_t *dstp, const nodemask_t *origp,
#define nodes_fold(dst, orig, sz) \
__nodes_fold(&(dst), &(orig), sz, MAX_NUMNODES)
-static inline void __nodes_fold(nodemask_t *dstp, const nodemask_t *origp,
+static __always_inline void __nodes_fold(nodemask_t *dstp, const nodemask_t *origp,
int sz, int nbits)
{
bitmap_fold(dstp->bits, origp->bits, sz, nbits);
@@ -418,22 +418,22 @@ enum node_states {
extern nodemask_t node_states[NR_NODE_STATES];
#if MAX_NUMNODES > 1
-static inline int node_state(int node, enum node_states state)
+static __always_inline int node_state(int node, enum node_states state)
{
return node_isset(node, node_states[state]);
}
-static inline void node_set_state(int node, enum node_states state)
+static __always_inline void node_set_state(int node, enum node_states state)
{
__node_set(node, &node_states[state]);
}
-static inline void node_clear_state(int node, enum node_states state)
+static __always_inline void node_clear_state(int node, enum node_states state)
{
__node_clear(node, &node_states[state]);
}
-static inline int num_node_state(enum node_states state)
+static __always_inline int num_node_state(enum node_states state)
{
return nodes_weight(node_states[state]);
}
@@ -443,11 +443,11 @@ static inline int num_node_state(enum node_states state)
#define first_online_node first_node(node_states[N_ONLINE])
#define first_memory_node first_node(node_states[N_MEMORY])
-static inline unsigned int next_online_node(int nid)
+static __always_inline unsigned int next_online_node(int nid)
{
return next_node(nid, node_states[N_ONLINE]);
}
-static inline unsigned int next_memory_node(int nid)
+static __always_inline unsigned int next_memory_node(int nid)
{
return next_node(nid, node_states[N_MEMORY]);
}
@@ -455,13 +455,13 @@ static inline unsigned int next_memory_node(int nid)
extern unsigned int nr_node_ids;
extern unsigned int nr_online_nodes;
-static inline void node_set_online(int nid)
+static __always_inline void node_set_online(int nid)
{
node_set_state(nid, N_ONLINE);
nr_online_nodes = num_node_state(N_ONLINE);
}
-static inline void node_set_offline(int nid)
+static __always_inline void node_set_offline(int nid)
{
node_clear_state(nid, N_ONLINE);
nr_online_nodes = num_node_state(N_ONLINE);
@@ -469,20 +469,20 @@ static inline void node_set_offline(int nid)
#else
-static inline int node_state(int node, enum node_states state)
+static __always_inline int node_state(int node, enum node_states state)
{
return node == 0;
}
-static inline void node_set_state(int node, enum node_states state)
+static __always_inline void node_set_state(int node, enum node_states state)
{
}
-static inline void node_clear_state(int node, enum node_states state)
+static __always_inline void node_clear_state(int node, enum node_states state)
{
}
-static inline int num_node_state(enum node_states state)
+static __always_inline int num_node_state(enum node_states state)
{
return 1;
}
@@ -502,7 +502,7 @@ static inline int num_node_state(enum node_states state)
#endif
-static inline int node_random(const nodemask_t *maskp)
+static __always_inline int node_random(const nodemask_t *maskp)
{
#if defined(CONFIG_NUMA) && (MAX_NUMNODES > 1)
int w, bit;
diff --git a/include/linux/numa.h b/include/linux/numa.h
index eb19503604fe..3567e40329eb 100644
--- a/include/linux/numa.h
+++ b/include/linux/numa.h
@@ -30,6 +30,12 @@ static inline bool numa_valid_node(int nid)
#ifdef CONFIG_NUMA
#include <asm/sparsemem.h>
+extern struct pglist_data *node_data[];
+#define NODE_DATA(nid) (node_data[nid])
+
+void __init alloc_node_data(int nid);
+void __init alloc_offline_node_data(int nid);
+
/* Generic implementation available */
int numa_nearest_node(int node, unsigned int state);
@@ -57,6 +63,8 @@ static inline int phys_to_target_node(u64 start)
{
return 0;
}
+
+static inline void alloc_offline_node_data(int nid) {}
#endif
#define numa_map_to_online_node(node) numa_nearest_node(node, N_ONLINE)
diff --git a/include/linux/numa_memblks.h b/include/linux/numa_memblks.h
new file mode 100644
index 000000000000..cfad6ce7e1bd
--- /dev/null
+++ b/include/linux/numa_memblks.h
@@ -0,0 +1,58 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef __NUMA_MEMBLKS_H
+#define __NUMA_MEMBLKS_H
+
+#ifdef CONFIG_NUMA_MEMBLKS
+#include <linux/types.h>
+
+#define NR_NODE_MEMBLKS (MAX_NUMNODES * 2)
+
+void __init numa_set_distance(int from, int to, int distance);
+void __init numa_reset_distance(void);
+
+struct numa_memblk {
+ u64 start;
+ u64 end;
+ int nid;
+};
+
+struct numa_meminfo {
+ int nr_blks;
+ struct numa_memblk blk[NR_NODE_MEMBLKS];
+};
+
+int __init numa_add_memblk(int nodeid, u64 start, u64 end);
+void __init numa_remove_memblk_from(int idx, struct numa_meminfo *mi);
+
+int __init numa_cleanup_meminfo(struct numa_meminfo *mi);
+
+int __init numa_memblks_init(int (*init_func)(void),
+ bool memblock_force_top_down);
+
+#ifdef CONFIG_NUMA_EMU
+int numa_emu_cmdline(char *str);
+void __init numa_emu_update_cpu_to_node(int *emu_nid_to_phys,
+ unsigned int nr_emu_nids);
+u64 __init numa_emu_dma_end(void);
+void __init numa_emulation(struct numa_meminfo *numa_meminfo,
+ int numa_dist_cnt);
+#else
+static inline void numa_emulation(struct numa_meminfo *numa_meminfo,
+ int numa_dist_cnt)
+{ }
+static inline int numa_emu_cmdline(char *str)
+{
+ return -EINVAL;
+}
+#endif /* CONFIG_NUMA_EMU */
+
+#ifdef CONFIG_NUMA_KEEP_MEMINFO
+extern int phys_to_target_node(u64 start);
+#define phys_to_target_node phys_to_target_node
+extern int memory_add_physaddr_to_nid(u64 start);
+#define memory_add_physaddr_to_nid memory_add_physaddr_to_nid
+#endif /* CONFIG_NUMA_KEEP_MEMINFO */
+
+#endif /* CONFIG_NUMA_MEMBLKS */
+
+#endif /* __NUMA_MEMBLKS_H */
diff --git a/include/linux/page-flags.h b/include/linux/page-flags.h
index 5769fe6e4950..1b3a76710487 100644
--- a/include/linux/page-flags.h
+++ b/include/linux/page-flags.h
@@ -66,8 +66,6 @@
* PG_referenced, PG_reclaim are used for page reclaim for anonymous and
* file-backed pagecache (see mm/vmscan.c).
*
- * PG_error is set to indicate that an I/O error occurred on this page.
- *
* PG_arch_1 is an architecture specific page state bit. The generic code
* guarantees that this bit is cleared for a page when it first is entered into
* the page cache.
@@ -103,22 +101,18 @@ enum pageflags {
PG_waiters, /* Page has waiters, check its waitqueue. Must be bit #7 and in the same byte as "PG_locked" */
PG_active,
PG_workingset,
- PG_error,
- PG_owner_priv_1, /* Owner use. If pagecache, fs may use*/
+ PG_owner_priv_1, /* Owner use. If pagecache, fs may use */
+ PG_owner_2, /* Owner use. If pagecache, fs may use */
PG_arch_1,
PG_reserved,
PG_private, /* If pagecache, has fs-private data */
PG_private_2, /* If pagecache, has fs aux data */
- PG_mappedtodisk, /* Has blocks allocated on-disk */
PG_reclaim, /* To be reclaimed asap */
PG_swapbacked, /* Page is backed by RAM/swap */
PG_unevictable, /* Page is "unevictable" */
#ifdef CONFIG_MMU
PG_mlocked, /* Page is vma mlocked */
#endif
-#ifdef CONFIG_ARCH_USES_PG_UNCACHED
- PG_uncached, /* Page has been mapped as uncached */
-#endif
#ifdef CONFIG_MEMORY_FAILURE
PG_hwpoison, /* hardware poisoned page. Don't touch */
#endif
@@ -126,14 +120,21 @@ enum pageflags {
PG_young,
PG_idle,
#endif
-#ifdef CONFIG_ARCH_USES_PG_ARCH_X
+#ifdef CONFIG_ARCH_USES_PG_ARCH_2
PG_arch_2,
+#endif
+#ifdef CONFIG_ARCH_USES_PG_ARCH_3
PG_arch_3,
#endif
__NR_PAGEFLAGS,
PG_readahead = PG_reclaim,
+ /* Anonymous memory (and shmem) */
+ PG_swapcache = PG_owner_priv_1, /* Swap page: swp_entry_t in private */
+ /* Some filesystems */
+ PG_checked = PG_owner_priv_1,
+
/*
* Depending on the way an anonymous folio can be mapped into a page
* table (e.g., single PMD/PUD/CONT of the head page vs. PTE-mapped
@@ -141,13 +142,13 @@ enum pageflags {
* tail pages of an anonymous folio. For now, we only expect it to be
* set on tail pages for PTE-mapped THP.
*/
- PG_anon_exclusive = PG_mappedtodisk,
-
- /* Filesystems */
- PG_checked = PG_owner_priv_1,
+ PG_anon_exclusive = PG_owner_2,
- /* SwapBacked */
- PG_swapcache = PG_owner_priv_1, /* Swap page: swp_entry_t in private */
+ /*
+ * Set if all buffer heads in the folio are mapped.
+ * Filesystems which do not use BHs can use it for their own purpose.
+ */
+ PG_mappedtodisk = PG_owner_2,
/* Two page bits are conscripted by FS-Cache to maintain local caching
* state. These bits are set on pages belonging to the netfs's inodes
@@ -183,8 +184,9 @@ enum pageflags {
*/
/* At least one page in this folio has the hwpoison flag set */
- PG_has_hwpoisoned = PG_error,
+ PG_has_hwpoisoned = PG_active,
PG_large_rmappable = PG_workingset, /* anon or file-backed */
+ PG_partially_mapped = PG_reclaim, /* was identified to be partially mapped */
};
#define PAGEFLAGS_MASK ((1UL << NR_PAGEFLAGS) - 1)
@@ -235,7 +237,7 @@ static __always_inline int page_is_fake_head(const struct page *page)
return page_fixed_fake_head(page) != page;
}
-static inline unsigned long _compound_head(const struct page *page)
+static __always_inline unsigned long _compound_head(const struct page *page)
{
unsigned long head = READ_ONCE(page->compound_head);
@@ -506,7 +508,6 @@ static inline int TestClearPage##uname(struct page *page) { return 0; }
__PAGEFLAG(Locked, locked, PF_NO_TAIL)
FOLIO_FLAG(waiters, FOLIO_HEAD_PAGE)
-PAGEFLAG(Error, error, PF_NO_TAIL) TESTCLEARFLAG(Error, error, PF_NO_TAIL)
FOLIO_FLAG(referenced, FOLIO_HEAD_PAGE)
FOLIO_TEST_CLEAR_FLAG(referenced, FOLIO_HEAD_PAGE)
__FOLIO_SET_FLAG(referenced, FOLIO_HEAD_PAGE)
@@ -514,8 +515,9 @@ PAGEFLAG(Dirty, dirty, PF_HEAD) TESTSCFLAG(Dirty, dirty, PF_HEAD)
__CLEARPAGEFLAG(Dirty, dirty, PF_HEAD)
PAGEFLAG(LRU, lru, PF_HEAD) __CLEARPAGEFLAG(LRU, lru, PF_HEAD)
TESTCLEARFLAG(LRU, lru, PF_HEAD)
-PAGEFLAG(Active, active, PF_HEAD) __CLEARPAGEFLAG(Active, active, PF_HEAD)
- TESTCLEARFLAG(Active, active, PF_HEAD)
+FOLIO_FLAG(active, FOLIO_HEAD_PAGE)
+ __FOLIO_CLEAR_FLAG(active, FOLIO_HEAD_PAGE)
+ FOLIO_TEST_CLEAR_FLAG(active, FOLIO_HEAD_PAGE)
PAGEFLAG(Workingset, workingset, PF_HEAD)
TESTCLEARFLAG(Workingset, workingset, PF_HEAD)
PAGEFLAG(Checked, checked, PF_NO_COMPOUND) /* Used by some filesystems */
@@ -531,9 +533,9 @@ PAGEFLAG(XenRemapped, xen_remapped, PF_NO_COMPOUND)
PAGEFLAG(Reserved, reserved, PF_NO_COMPOUND)
__CLEARPAGEFLAG(Reserved, reserved, PF_NO_COMPOUND)
__SETPAGEFLAG(Reserved, reserved, PF_NO_COMPOUND)
-PAGEFLAG(SwapBacked, swapbacked, PF_NO_TAIL)
- __CLEARPAGEFLAG(SwapBacked, swapbacked, PF_NO_TAIL)
- __SETPAGEFLAG(SwapBacked, swapbacked, PF_NO_TAIL)
+FOLIO_FLAG(swapbacked, FOLIO_HEAD_PAGE)
+ __FOLIO_CLEAR_FLAG(swapbacked, FOLIO_HEAD_PAGE)
+ __FOLIO_SET_FLAG(swapbacked, FOLIO_HEAD_PAGE)
/*
* Private page markings that may be used by the filesystem that owns the page
@@ -542,8 +544,9 @@ PAGEFLAG(SwapBacked, swapbacked, PF_NO_TAIL)
*/
PAGEFLAG(Private, private, PF_ANY)
PAGEFLAG(Private2, private_2, PF_ANY) TESTSCFLAG(Private2, private_2, PF_ANY)
-PAGEFLAG(OwnerPriv1, owner_priv_1, PF_ANY)
- TESTCLEARFLAG(OwnerPriv1, owner_priv_1, PF_ANY)
+
+/* owner_2 can be set on tail pages for anon memory */
+FOLIO_FLAG(owner_2, FOLIO_HEAD_PAGE)
/*
* Only test-and-set exist for PG_writeback. The unconditional operators are
@@ -556,8 +559,8 @@ PAGEFLAG(MappedToDisk, mappedtodisk, PF_NO_TAIL)
/* PG_readahead is only used for reads; PG_reclaim is only for writes */
PAGEFLAG(Reclaim, reclaim, PF_NO_TAIL)
TESTCLEARFLAG(Reclaim, reclaim, PF_NO_TAIL)
-PAGEFLAG(Readahead, readahead, PF_NO_COMPOUND)
- TESTCLEARFLAG(Readahead, readahead, PF_NO_COMPOUND)
+FOLIO_FLAG(readahead, FOLIO_HEAD_PAGE)
+ FOLIO_TEST_CLEAR_FLAG(readahead, FOLIO_HEAD_PAGE)
#ifdef CONFIG_HIGHMEM
/*
@@ -577,34 +580,26 @@ static __always_inline bool folio_test_swapcache(const struct folio *folio)
test_bit(PG_swapcache, const_folio_flags(folio, 0));
}
-static __always_inline bool PageSwapCache(const struct page *page)
-{
- return folio_test_swapcache(page_folio(page));
-}
-
-SETPAGEFLAG(SwapCache, swapcache, PF_NO_TAIL)
-CLEARPAGEFLAG(SwapCache, swapcache, PF_NO_TAIL)
+FOLIO_SET_FLAG(swapcache, FOLIO_HEAD_PAGE)
+FOLIO_CLEAR_FLAG(swapcache, FOLIO_HEAD_PAGE)
#else
-PAGEFLAG_FALSE(SwapCache, swapcache)
+FOLIO_FLAG_FALSE(swapcache)
#endif
-PAGEFLAG(Unevictable, unevictable, PF_HEAD)
- __CLEARPAGEFLAG(Unevictable, unevictable, PF_HEAD)
- TESTCLEARFLAG(Unevictable, unevictable, PF_HEAD)
+FOLIO_FLAG(unevictable, FOLIO_HEAD_PAGE)
+ __FOLIO_CLEAR_FLAG(unevictable, FOLIO_HEAD_PAGE)
+ FOLIO_TEST_CLEAR_FLAG(unevictable, FOLIO_HEAD_PAGE)
#ifdef CONFIG_MMU
-PAGEFLAG(Mlocked, mlocked, PF_NO_TAIL)
- __CLEARPAGEFLAG(Mlocked, mlocked, PF_NO_TAIL)
- TESTSCFLAG(Mlocked, mlocked, PF_NO_TAIL)
-#else
-PAGEFLAG_FALSE(Mlocked, mlocked) __CLEARPAGEFLAG_NOOP(Mlocked, mlocked)
- TESTSCFLAG_FALSE(Mlocked, mlocked)
-#endif
-
-#ifdef CONFIG_ARCH_USES_PG_UNCACHED
-PAGEFLAG(Uncached, uncached, PF_NO_COMPOUND)
+FOLIO_FLAG(mlocked, FOLIO_HEAD_PAGE)
+ __FOLIO_CLEAR_FLAG(mlocked, FOLIO_HEAD_PAGE)
+ FOLIO_TEST_CLEAR_FLAG(mlocked, FOLIO_HEAD_PAGE)
+ FOLIO_TEST_SET_FLAG(mlocked, FOLIO_HEAD_PAGE)
#else
-PAGEFLAG_FALSE(Uncached, uncached)
+FOLIO_FLAG_FALSE(mlocked)
+ __FOLIO_CLEAR_FLAG_NOOP(mlocked)
+ FOLIO_TEST_CLEAR_FLAG_FALSE(mlocked)
+ FOLIO_TEST_SET_FLAG_FALSE(mlocked)
#endif
#ifdef CONFIG_MEMORY_FAILURE
@@ -865,8 +860,18 @@ static inline void ClearPageCompound(struct page *page)
ClearPageHead(page);
}
FOLIO_FLAG(large_rmappable, FOLIO_SECOND_PAGE)
+FOLIO_TEST_FLAG(partially_mapped, FOLIO_SECOND_PAGE)
+/*
+ * PG_partially_mapped is protected by deferred_split split_queue_lock,
+ * so its safe to use non-atomic set/clear.
+ */
+__FOLIO_SET_FLAG(partially_mapped, FOLIO_SECOND_PAGE)
+__FOLIO_CLEAR_FLAG(partially_mapped, FOLIO_SECOND_PAGE)
#else
FOLIO_FLAG_FALSE(large_rmappable)
+FOLIO_TEST_FLAG_FALSE(partially_mapped)
+__FOLIO_SET_FLAG_NOOP(partially_mapped)
+__FOLIO_CLEAR_FLAG_NOOP(partially_mapped)
#endif
#define PG_head_mask ((1UL << PG_head))
@@ -927,79 +932,74 @@ PAGEFLAG_FALSE(HasHWPoisoned, has_hwpoisoned)
#endif
/*
- * For pages that are never mapped to userspace,
- * page_type may be used. Because it is initialised to -1, we invert the
- * sense of the bit, so __SetPageFoo *clears* the bit used for PageFoo, and
- * __ClearPageFoo *sets* the bit used for PageFoo. We reserve a few high and
- * low bits so that an underflow or overflow of _mapcount won't be
- * mistaken for a page type value.
+ * For pages that do not use mapcount, page_type may be used.
+ * The low 24 bits of pagetype may be used for your own purposes, as long
+ * as you are careful to not affect the top 8 bits. The low bits of
+ * pagetype will be overwritten when you clear the page_type from the page.
*/
-
enum pagetype {
- PG_buddy = 0x40000000,
- PG_offline = 0x20000000,
- PG_table = 0x10000000,
- PG_guard = 0x08000000,
- PG_hugetlb = 0x04000000,
- PG_slab = 0x02000000,
- PG_zsmalloc = 0x01000000,
-
- PAGE_TYPE_BASE = 0x80000000,
-
- /*
- * Reserve 0xffff0000 - 0xfffffffe to catch _mapcount underflows and
- * allow owners that set a type to reuse the lower 16 bit for their own
- * purposes.
- */
- PAGE_MAPCOUNT_RESERVE = ~0x0000ffff,
+ /* 0x00-0x7f are positive numbers, ie mapcount */
+ /* Reserve 0x80-0xef for mapcount overflow. */
+ PGTY_buddy = 0xf0,
+ PGTY_offline = 0xf1,
+ PGTY_table = 0xf2,
+ PGTY_guard = 0xf3,
+ PGTY_hugetlb = 0xf4,
+ PGTY_slab = 0xf5,
+ PGTY_zsmalloc = 0xf6,
+ PGTY_unaccepted = 0xf7,
+
+ PGTY_mapcount_underflow = 0xff
};
-#define PageType(page, flag) \
- ((READ_ONCE(page->page_type) & (PAGE_TYPE_BASE | flag)) == PAGE_TYPE_BASE)
-#define folio_test_type(folio, flag) \
- ((READ_ONCE(folio->page.page_type) & (PAGE_TYPE_BASE | flag)) == PAGE_TYPE_BASE)
+static inline bool page_type_has_type(int page_type)
+{
+ return page_type < (PGTY_mapcount_underflow << 24);
+}
-static inline int page_type_has_type(unsigned int page_type)
+/* This takes a mapcount which is one more than page->_mapcount */
+static inline bool page_mapcount_is_type(unsigned int mapcount)
{
- return (int)page_type < PAGE_MAPCOUNT_RESERVE;
+ return page_type_has_type(mapcount - 1);
}
-static inline int page_has_type(const struct page *page)
+static inline bool page_has_type(const struct page *page)
{
- return page_type_has_type(READ_ONCE(page->page_type));
+ return page_mapcount_is_type(data_race(page->page_type));
}
#define FOLIO_TYPE_OPS(lname, fname) \
-static __always_inline bool folio_test_##fname(const struct folio *folio)\
+static __always_inline bool folio_test_##fname(const struct folio *folio) \
{ \
- return folio_test_type(folio, PG_##lname); \
+ return data_race(folio->page.page_type >> 24) == PGTY_##lname; \
} \
static __always_inline void __folio_set_##fname(struct folio *folio) \
{ \
- VM_BUG_ON_FOLIO(!folio_test_type(folio, 0), folio); \
- folio->page.page_type &= ~PG_##lname; \
+ VM_BUG_ON_FOLIO(data_race(folio->page.page_type) != UINT_MAX, \
+ folio); \
+ folio->page.page_type = (unsigned int)PGTY_##lname << 24; \
} \
static __always_inline void __folio_clear_##fname(struct folio *folio) \
{ \
VM_BUG_ON_FOLIO(!folio_test_##fname(folio), folio); \
- folio->page.page_type |= PG_##lname; \
+ folio->page.page_type = UINT_MAX; \
}
#define PAGE_TYPE_OPS(uname, lname, fname) \
FOLIO_TYPE_OPS(lname, fname) \
static __always_inline int Page##uname(const struct page *page) \
{ \
- return PageType(page, PG_##lname); \
+ return data_race(page->page_type >> 24) == PGTY_##lname; \
} \
static __always_inline void __SetPage##uname(struct page *page) \
{ \
- VM_BUG_ON_PAGE(!PageType(page, 0), page); \
- page->page_type &= ~PG_##lname; \
+ VM_BUG_ON_PAGE(data_race(page->page_type) != UINT_MAX, page); \
+ page->page_type = (unsigned int)PGTY_##lname << 24; \
} \
static __always_inline void __ClearPage##uname(struct page *page) \
{ \
VM_BUG_ON_PAGE(!Page##uname(page), page); \
- page->page_type |= PG_##lname; \
+ page->page_type = UINT_MAX; \
}
/*
@@ -1076,6 +1076,13 @@ FOLIO_TEST_FLAG_FALSE(hugetlb)
PAGE_TYPE_OPS(Zsmalloc, zsmalloc, zsmalloc)
+/*
+ * Mark pages that has to be accepted before touched for the first time.
+ *
+ * Serialized with zone lock.
+ */
+PAGE_TYPE_OPS(Unaccepted, unaccepted, unaccepted)
+
/**
* PageHuge - Determine if the page belongs to hugetlbfs
* @page: The page to test.
@@ -1175,25 +1182,20 @@ static __always_inline void __ClearPageAnonExclusive(struct page *page)
*/
#define PAGE_FLAGS_SECOND \
(0xffUL /* order */ | 1UL << PG_has_hwpoisoned | \
- 1UL << PG_large_rmappable)
+ 1UL << PG_large_rmappable | 1UL << PG_partially_mapped)
#define PAGE_FLAGS_PRIVATE \
(1UL << PG_private | 1UL << PG_private_2)
/**
- * page_has_private - Determine if page has private stuff
- * @page: The page to be checked
+ * folio_has_private - Determine if folio has private stuff
+ * @folio: The folio to be checked
*
- * Determine if a page has private stuff, indicating that release routines
+ * Determine if a folio has private stuff, indicating that release routines
* should be invoked upon it.
*/
-static inline int page_has_private(const struct page *page)
-{
- return !!(page->flags & PAGE_FLAGS_PRIVATE);
-}
-
-static inline bool folio_has_private(const struct folio *folio)
+static inline int folio_has_private(const struct folio *folio)
{
- return page_has_private(&folio->page);
+ return !!(folio->flags & PAGE_FLAGS_PRIVATE);
}
#undef PF_ANY
diff --git a/include/linux/page_counter.h b/include/linux/page_counter.h
index 904c52f97284..79dbd8bc35a7 100644
--- a/include/linux/page_counter.h
+++ b/include/linux/page_counter.h
@@ -26,11 +26,14 @@ struct page_counter {
atomic_long_t children_low_usage;
unsigned long watermark;
+ /* Latest cg2 reset watermark */
+ unsigned long local_watermark;
unsigned long failcnt;
/* Keep all the read most fields in a separete cacheline. */
CACHELINE_PADDING(_pad2_);
+ bool protection_support;
unsigned long min;
unsigned long low;
unsigned long high;
@@ -44,12 +47,17 @@ struct page_counter {
#define PAGE_COUNTER_MAX (LONG_MAX / PAGE_SIZE)
#endif
+/*
+ * Protection is supported only for the first counter (with id 0).
+ */
static inline void page_counter_init(struct page_counter *counter,
- struct page_counter *parent)
+ struct page_counter *parent,
+ bool protection_support)
{
- atomic_long_set(&counter->usage, 0);
+ counter->usage = (atomic_long_t)ATOMIC_LONG_INIT(0);
counter->max = PAGE_COUNTER_MAX;
counter->parent = parent;
+ counter->protection_support = protection_support;
}
static inline unsigned long page_counter_read(struct page_counter *counter)
@@ -78,11 +86,24 @@ int page_counter_memparse(const char *buf, const char *max,
static inline void page_counter_reset_watermark(struct page_counter *counter)
{
- counter->watermark = page_counter_read(counter);
+ unsigned long usage = page_counter_read(counter);
+
+ /*
+ * Update local_watermark first, so it's always <= watermark
+ * (modulo CPU/compiler re-ordering)
+ */
+ counter->local_watermark = usage;
+ counter->watermark = usage;
}
+#ifdef CONFIG_MEMCG
void page_counter_calculate_protection(struct page_counter *root,
struct page_counter *counter,
bool recursive_protection);
+#else
+static inline void page_counter_calculate_protection(struct page_counter *root,
+ struct page_counter *counter,
+ bool recursive_protection) {}
+#endif
#endif /* _LINUX_PAGE_COUNTER_H */
diff --git a/include/linux/pagemap.h b/include/linux/pagemap.h
index e39c3a7ce33c..68a5f1ff3301 100644
--- a/include/linux/pagemap.h
+++ b/include/linux/pagemap.h
@@ -206,14 +206,21 @@ enum mapping_flags {
AS_EXITING = 4, /* final truncate in progress */
/* writeback related tags are not used */
AS_NO_WRITEBACK_TAGS = 5,
- AS_LARGE_FOLIO_SUPPORT = 6,
- AS_RELEASE_ALWAYS, /* Call ->release_folio(), even if no private data */
- AS_STABLE_WRITES, /* must wait for writeback before modifying
+ AS_RELEASE_ALWAYS = 6, /* Call ->release_folio(), even if no private data */
+ AS_STABLE_WRITES = 7, /* must wait for writeback before modifying
folio contents */
- AS_INACCESSIBLE, /* Do not attempt direct R/W access to the mapping,
- including to move the mapping */
+ AS_INACCESSIBLE = 8, /* Do not attempt direct R/W access to the mapping */
+ /* Bits 16-25 are used for FOLIO_ORDER */
+ AS_FOLIO_ORDER_BITS = 5,
+ AS_FOLIO_ORDER_MIN = 16,
+ AS_FOLIO_ORDER_MAX = AS_FOLIO_ORDER_MIN + AS_FOLIO_ORDER_BITS,
};
+#define AS_FOLIO_ORDER_BITS_MASK ((1u << AS_FOLIO_ORDER_BITS) - 1)
+#define AS_FOLIO_ORDER_MIN_MASK (AS_FOLIO_ORDER_BITS_MASK << AS_FOLIO_ORDER_MIN)
+#define AS_FOLIO_ORDER_MAX_MASK (AS_FOLIO_ORDER_BITS_MASK << AS_FOLIO_ORDER_MAX)
+#define AS_FOLIO_ORDER_MASK (AS_FOLIO_ORDER_MIN_MASK | AS_FOLIO_ORDER_MAX_MASK)
+
/**
* mapping_set_error - record a writeback error in the address_space
* @mapping: the mapping in which an error should be set
@@ -369,9 +376,64 @@ static inline void mapping_set_gfp_mask(struct address_space *m, gfp_t mask)
#define MAX_XAS_ORDER (XA_CHUNK_SHIFT * 2 - 1)
#define MAX_PAGECACHE_ORDER min(MAX_XAS_ORDER, PREFERRED_MAX_PAGECACHE_ORDER)
+/*
+ * mapping_max_folio_size_supported() - Check the max folio size supported
+ *
+ * The filesystem should call this function at mount time if there is a
+ * requirement on the folio mapping size in the page cache.
+ */
+static inline size_t mapping_max_folio_size_supported(void)
+{
+ if (IS_ENABLED(CONFIG_TRANSPARENT_HUGEPAGE))
+ return 1U << (PAGE_SHIFT + MAX_PAGECACHE_ORDER);
+ return PAGE_SIZE;
+}
+
+/*
+ * mapping_set_folio_order_range() - Set the orders supported by a file.
+ * @mapping: The address space of the file.
+ * @min: Minimum folio order (between 0-MAX_PAGECACHE_ORDER inclusive).
+ * @max: Maximum folio order (between @min-MAX_PAGECACHE_ORDER inclusive).
+ *
+ * The filesystem should call this function in its inode constructor to
+ * indicate which base size (min) and maximum size (max) of folio the VFS
+ * can use to cache the contents of the file. This should only be used
+ * if the filesystem needs special handling of folio sizes (ie there is
+ * something the core cannot know).
+ * Do not tune it based on, eg, i_size.
+ *
+ * Context: This should not be called while the inode is active as it
+ * is non-atomic.
+ */
+static inline void mapping_set_folio_order_range(struct address_space *mapping,
+ unsigned int min,
+ unsigned int max)
+{
+ if (!IS_ENABLED(CONFIG_TRANSPARENT_HUGEPAGE))
+ return;
+
+ if (min > MAX_PAGECACHE_ORDER)
+ min = MAX_PAGECACHE_ORDER;
+
+ if (max > MAX_PAGECACHE_ORDER)
+ max = MAX_PAGECACHE_ORDER;
+
+ if (max < min)
+ max = min;
+
+ mapping->flags = (mapping->flags & ~AS_FOLIO_ORDER_MASK) |
+ (min << AS_FOLIO_ORDER_MIN) | (max << AS_FOLIO_ORDER_MAX);
+}
+
+static inline void mapping_set_folio_min_order(struct address_space *mapping,
+ unsigned int min)
+{
+ mapping_set_folio_order_range(mapping, min, MAX_PAGECACHE_ORDER);
+}
+
/**
* mapping_set_large_folios() - Indicate the file supports large folios.
- * @mapping: The file.
+ * @mapping: The address space of the file.
*
* The filesystem should call this function in its inode constructor to
* indicate that the VFS can use large folios to cache the contents of
@@ -382,7 +444,44 @@ static inline void mapping_set_gfp_mask(struct address_space *m, gfp_t mask)
*/
static inline void mapping_set_large_folios(struct address_space *mapping)
{
- __set_bit(AS_LARGE_FOLIO_SUPPORT, &mapping->flags);
+ mapping_set_folio_order_range(mapping, 0, MAX_PAGECACHE_ORDER);
+}
+
+static inline unsigned int
+mapping_max_folio_order(const struct address_space *mapping)
+{
+ if (!IS_ENABLED(CONFIG_TRANSPARENT_HUGEPAGE))
+ return 0;
+ return (mapping->flags & AS_FOLIO_ORDER_MAX_MASK) >> AS_FOLIO_ORDER_MAX;
+}
+
+static inline unsigned int
+mapping_min_folio_order(const struct address_space *mapping)
+{
+ if (!IS_ENABLED(CONFIG_TRANSPARENT_HUGEPAGE))
+ return 0;
+ return (mapping->flags & AS_FOLIO_ORDER_MIN_MASK) >> AS_FOLIO_ORDER_MIN;
+}
+
+static inline unsigned long
+mapping_min_folio_nrpages(struct address_space *mapping)
+{
+ return 1UL << mapping_min_folio_order(mapping);
+}
+
+/**
+ * mapping_align_index() - Align index for this mapping.
+ * @mapping: The address_space.
+ * @index: The page index.
+ *
+ * The index of a folio must be naturally aligned. If you are adding a
+ * new folio to the page cache and need to know what index to give it,
+ * call this function.
+ */
+static inline pgoff_t mapping_align_index(struct address_space *mapping,
+ pgoff_t index)
+{
+ return round_down(index, mapping_min_folio_nrpages(mapping));
}
/*
@@ -391,20 +490,17 @@ static inline void mapping_set_large_folios(struct address_space *mapping)
*/
static inline bool mapping_large_folio_support(struct address_space *mapping)
{
- /* AS_LARGE_FOLIO_SUPPORT is only reasonable for pagecache folios */
+ /* AS_FOLIO_ORDER is only reasonable for pagecache folios */
VM_WARN_ONCE((unsigned long)mapping & PAGE_MAPPING_ANON,
"Anonymous mapping always supports large folio");
- return IS_ENABLED(CONFIG_TRANSPARENT_HUGEPAGE) &&
- test_bit(AS_LARGE_FOLIO_SUPPORT, &mapping->flags);
+ return mapping_max_folio_order(mapping) > 0;
}
/* Return the maximum folio size for this pagecache mapping, in bytes. */
-static inline size_t mapping_max_folio_size(struct address_space *mapping)
+static inline size_t mapping_max_folio_size(const struct address_space *mapping)
{
- if (mapping_large_folio_support(mapping))
- return PAGE_SIZE << MAX_PAGECACHE_ORDER;
- return PAGE_SIZE;
+ return PAGE_SIZE << mapping_max_folio_order(mapping);
}
static inline int filemap_nr_thps(struct address_space *mapping)
diff --git a/include/linux/pagewalk.h b/include/linux/pagewalk.h
index 27cd1e59ccf7..f5eb5a32aeed 100644
--- a/include/linux/pagewalk.h
+++ b/include/linux/pagewalk.h
@@ -130,4 +130,62 @@ int walk_page_mapping(struct address_space *mapping, pgoff_t first_index,
pgoff_t nr, const struct mm_walk_ops *ops,
void *private);
+typedef int __bitwise folio_walk_flags_t;
+
+/*
+ * Walk migration entries as well. Careful: a large folio might get split
+ * concurrently.
+ */
+#define FW_MIGRATION ((__force folio_walk_flags_t)BIT(0))
+
+/* Walk shared zeropages (small + huge) as well. */
+#define FW_ZEROPAGE ((__force folio_walk_flags_t)BIT(1))
+
+enum folio_walk_level {
+ FW_LEVEL_PTE,
+ FW_LEVEL_PMD,
+ FW_LEVEL_PUD,
+};
+
+/**
+ * struct folio_walk - folio_walk_start() / folio_walk_end() data
+ * @page: exact folio page referenced (if applicable)
+ * @level: page table level identifying the entry type
+ * @pte: pointer to the page table entry (FW_LEVEL_PTE).
+ * @pmd: pointer to the page table entry (FW_LEVEL_PMD).
+ * @pud: pointer to the page table entry (FW_LEVEL_PUD).
+ * @ptl: pointer to the page table lock.
+ *
+ * (see folio_walk_start() documentation for more details)
+ */
+struct folio_walk {
+ /* public */
+ struct page *page;
+ enum folio_walk_level level;
+ union {
+ pte_t *ptep;
+ pud_t *pudp;
+ pmd_t *pmdp;
+ };
+ union {
+ pte_t pte;
+ pud_t pud;
+ pmd_t pmd;
+ };
+ /* private */
+ struct vm_area_struct *vma;
+ spinlock_t *ptl;
+};
+
+struct folio *folio_walk_start(struct folio_walk *fw,
+ struct vm_area_struct *vma, unsigned long addr,
+ folio_walk_flags_t flags);
+
+#define folio_walk_end(__fw, __vma) do { \
+ spin_unlock((__fw)->ptl); \
+ if (likely((__fw)->level == FW_LEVEL_PTE)) \
+ pte_unmap((__fw)->ptep); \
+ vma_pgtable_walk_end(__vma); \
+} while (0)
+
#endif /* _LINUX_PAGEWALK_H */
diff --git a/include/linux/pci-epc.h b/include/linux/pci-epc.h
index 85bdf2adb760..42ef06136bd1 100644
--- a/include/linux/pci-epc.h
+++ b/include/linux/pci-epc.h
@@ -128,6 +128,7 @@ struct pci_epc_mem {
* @group: configfs group representing the PCI EPC device
* @lock: mutex to protect pci_epc ops
* @function_num_map: bitmap to manage physical function number
+ * @domain_nr: PCI domain number of the endpoint controller
* @init_complete: flag to indicate whether the EPC initialization is complete
* or not
*/
@@ -145,10 +146,12 @@ struct pci_epc {
/* mutex to protect against concurrent access of EP controller */
struct mutex lock;
unsigned long function_num_map;
+ int domain_nr;
bool init_complete;
};
/**
+ * enum pci_epc_bar_type - configurability of endpoint BAR
* @BAR_PROGRAMMABLE: The BAR mask can be configured by the EPC.
* @BAR_FIXED: The BAR mask is fixed by the hardware.
* @BAR_RESERVED: The BAR should not be touched by an EPF driver.
diff --git a/include/linux/pci.h b/include/linux/pci.h
index 4cf89a4b4cbc..573b4c4c2be6 100644
--- a/include/linux/pci.h
+++ b/include/linux/pci.h
@@ -371,6 +371,7 @@ struct pci_dev {
can be generated */
unsigned int pme_poll:1; /* Poll device's PME status bit */
unsigned int pinned:1; /* Whether this dev is pinned */
+ unsigned int config_rrs_sv:1; /* Config RRS software visibility */
unsigned int imm_ready:1; /* Supports Immediate Readiness */
unsigned int d1_support:1; /* Low power state D1 is supported */
unsigned int d2_support:1; /* Low power state D2 is supported */
@@ -517,6 +518,9 @@ struct pci_dev {
#ifdef CONFIG_PCI_DOE
struct xarray doe_mbs; /* Data Object Exchange mailboxes */
#endif
+#ifdef CONFIG_PCI_NPEM
+ struct npem *npem; /* Native PCIe Enclosure Management */
+#endif
u16 acs_cap; /* ACS Capability offset */
phys_addr_t rom; /* Physical address if not from BAR */
size_t romlen; /* Length if not from BAR */
@@ -1098,7 +1102,7 @@ enum pcie_bus_config_types {
extern enum pcie_bus_config_types pcie_bus_config;
-extern struct bus_type pci_bus_type;
+extern const struct bus_type pci_bus_type;
/* Do NOT directly access these two variables, unless you are arch-specific PCI
* code, or PCI core code. */
@@ -1884,7 +1888,7 @@ static inline int acpi_pci_bus_find_domain_nr(struct pci_bus *bus)
{ return 0; }
#endif
int pci_bus_find_domain_nr(struct pci_bus *bus, struct device *parent);
-void pci_bus_release_domain_nr(struct pci_bus *bus, struct device *parent);
+void pci_bus_release_domain_nr(struct device *parent, int domain_nr);
#endif
/* Some architectures require additional setup to direct VGA traffic */
@@ -2290,8 +2294,11 @@ static inline void pci_fixup_device(enum pci_fixup_pass pass,
#endif
void __iomem *pcim_iomap(struct pci_dev *pdev, int bar, unsigned long maxlen);
+void __iomem *pcim_iomap_region(struct pci_dev *pdev, int bar,
+ const char *name);
void pcim_iounmap(struct pci_dev *pdev, void __iomem *addr);
void __iomem * const *pcim_iomap_table(struct pci_dev *pdev);
+int pcim_request_region(struct pci_dev *pdev, int bar, const char *name);
int pcim_iomap_regions(struct pci_dev *pdev, int mask, const char *name);
int pcim_iomap_regions_request_all(struct pci_dev *pdev, int mask,
const char *name);
diff --git a/include/linux/pci_ids.h b/include/linux/pci_ids.h
index 91182aa1d2ec..4cf6aaed5f35 100644
--- a/include/linux/pci_ids.h
+++ b/include/linux/pci_ids.h
@@ -2662,6 +2662,8 @@
#define PCI_DEVICE_ID_DCI_PCCOM8 0x0002
#define PCI_DEVICE_ID_DCI_PCCOM2 0x0004
+#define PCI_VENDOR_ID_GLENFLY 0x6766
+
#define PCI_VENDOR_ID_INTEL 0x8086
#define PCI_DEVICE_ID_INTEL_EESSC 0x0008
#define PCI_DEVICE_ID_INTEL_HDA_CML_LP 0x02c8
@@ -2707,6 +2709,9 @@
#define PCI_DEVICE_ID_INTEL_82815_MC 0x1130
#define PCI_DEVICE_ID_INTEL_82815_CGC 0x1132
#define PCI_DEVICE_ID_INTEL_SST_TNG 0x119a
+#define PCI_DEVICE_ID_INTEL_DSA_GNRD 0x11fb
+#define PCI_DEVICE_ID_INTEL_DSA_DMR 0x1212
+#define PCI_DEVICE_ID_INTEL_IAA_DMR 0x1216
#define PCI_DEVICE_ID_INTEL_82092AA_0 0x1221
#define PCI_DEVICE_ID_INTEL_82437 0x122d
#define PCI_DEVICE_ID_INTEL_82371FB_0 0x122e
diff --git a/include/linux/percpu.h b/include/linux/percpu.h
index 4b2047b78b67..b6321fc49159 100644
--- a/include/linux/percpu.h
+++ b/include/linux/percpu.h
@@ -135,7 +135,6 @@ extern void __init setup_per_cpu_areas(void);
extern void __percpu *pcpu_alloc_noprof(size_t size, size_t align, bool reserved,
gfp_t gfp) __alloc_size(1);
-extern size_t pcpu_alloc_size(void __percpu *__pdata);
#define __alloc_percpu_gfp(_size, _align, _gfp) \
alloc_hooks(pcpu_alloc_noprof(_size, _align, false, _gfp))
diff --git a/include/linux/pgalloc_tag.h b/include/linux/pgalloc_tag.h
index 207f0c83c8e9..59a3deb792a8 100644
--- a/include/linux/pgalloc_tag.h
+++ b/include/linux/pgalloc_tag.h
@@ -80,36 +80,6 @@ static inline void pgalloc_tag_sub(struct page *page, unsigned int nr)
}
}
-static inline void pgalloc_tag_split(struct page *page, unsigned int nr)
-{
- int i;
- struct page_ext *first_page_ext;
- struct page_ext *page_ext;
- union codetag_ref *ref;
- struct alloc_tag *tag;
-
- if (!mem_alloc_profiling_enabled())
- return;
-
- first_page_ext = page_ext = page_ext_get(page);
- if (unlikely(!page_ext))
- return;
-
- ref = codetag_ref_from_page_ext(page_ext);
- if (!ref->ct)
- goto out;
-
- tag = ct_to_alloc_tag(ref->ct);
- page_ext = page_ext_next(page_ext);
- for (i = 1; i < nr; i++) {
- /* Set new reference to point to the original tag */
- alloc_tag_ref_set(codetag_ref_from_page_ext(page_ext), tag);
- page_ext = page_ext_next(page_ext);
- }
-out:
- page_ext_put(first_page_ext);
-}
-
static inline struct alloc_tag *pgalloc_tag_get(struct page *page)
{
struct alloc_tag *tag = NULL;
@@ -142,7 +112,6 @@ static inline void clear_page_tag_ref(struct page *page) {}
static inline void pgalloc_tag_add(struct page *page, struct task_struct *task,
unsigned int nr) {}
static inline void pgalloc_tag_sub(struct page *page, unsigned int nr) {}
-static inline void pgalloc_tag_split(struct page *page, unsigned int nr) {}
static inline struct alloc_tag *pgalloc_tag_get(struct page *page) { return NULL; }
static inline void pgalloc_tag_sub_pages(struct alloc_tag *tag, unsigned int nr) {}
diff --git a/include/linux/pgtable.h b/include/linux/pgtable.h
index 2a6a3cccfc36..e8b2ac6bd2ae 100644
--- a/include/linux/pgtable.h
+++ b/include/linux/pgtable.h
@@ -447,6 +447,12 @@ static inline void arch_check_zapped_pmd(struct vm_area_struct *vma,
}
#endif
+#ifndef arch_check_zapped_pud
+static inline void arch_check_zapped_pud(struct vm_area_struct *vma, pud_t pud)
+{
+}
+#endif
+
#ifndef __HAVE_ARCH_PTEP_GET_AND_CLEAR
static inline pte_t ptep_get_and_clear(struct mm_struct *mm,
unsigned long address,
@@ -1950,6 +1956,18 @@ typedef unsigned int pgtbl_mod_mask;
#define MAX_PTRS_PER_P4D PTRS_PER_P4D
#endif
+#ifndef pte_pgprot
+#define pte_pgprot(x) ((pgprot_t) {0})
+#endif
+
+#ifndef pmd_pgprot
+#define pmd_pgprot(x) ((pgprot_t) {0})
+#endif
+
+#ifndef pud_pgprot
+#define pud_pgprot(x) ((pgprot_t) {0})
+#endif
+
/* description of effects of mapping type and prot in current implementation.
* this is due to the limited x86 page protection hardware. The expected
* behavior is in parens:
diff --git a/include/linux/pinctrl/pinconf-generic.h b/include/linux/pinctrl/pinconf-generic.h
index a65d3d078e58..53cfde98433d 100644
--- a/include/linux/pinctrl/pinconf-generic.h
+++ b/include/linux/pinctrl/pinconf-generic.h
@@ -81,6 +81,8 @@ struct pinctrl_map;
* @PIN_CONFIG_INPUT_SCHMITT_ENABLE: control schmitt-trigger mode on the pin.
* If the argument != 0, schmitt-trigger mode is enabled. If it's 0,
* schmitt-trigger mode is disabled.
+ * @PIN_CONFIG_INPUT_SCHMITT_UV: this will configure an input pin to run in
+ * schmitt-trigger mode. The argument is in uV.
* @PIN_CONFIG_MODE_LOW_POWER: this will configure the pin for low power
* operation, if several modes of operation are supported these can be
* passed in the argument on a custom form, else just use argument 1
@@ -132,6 +134,7 @@ enum pin_config_param {
PIN_CONFIG_INPUT_ENABLE,
PIN_CONFIG_INPUT_SCHMITT,
PIN_CONFIG_INPUT_SCHMITT_ENABLE,
+ PIN_CONFIG_INPUT_SCHMITT_UV,
PIN_CONFIG_MODE_LOW_POWER,
PIN_CONFIG_MODE_PWM,
PIN_CONFIG_OUTPUT,
diff --git a/include/linux/platform_data/ad5449.h b/include/linux/platform_data/ad5449.h
deleted file mode 100644
index d687ef5726c2..000000000000
--- a/include/linux/platform_data/ad5449.h
+++ /dev/null
@@ -1,39 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-only */
-/*
- * AD5415, AD5426, AD5429, AD5432, AD5439, AD5443, AD5449 Digital to Analog
- * Converter driver.
- *
- * Copyright 2012 Analog Devices Inc.
- * Author: Lars-Peter Clausen <lars@metafoo.de>
- */
-
-#ifndef __LINUX_PLATFORM_DATA_AD5449_H__
-#define __LINUX_PLATFORM_DATA_AD5449_H__
-
-/**
- * enum ad5449_sdo_mode - AD5449 SDO pin configuration
- * @AD5449_SDO_DRIVE_FULL: Drive the SDO pin with full strength.
- * @AD5449_SDO_DRIVE_WEAK: Drive the SDO pin with not full strength.
- * @AD5449_SDO_OPEN_DRAIN: Operate the SDO pin in open-drain mode.
- * @AD5449_SDO_DISABLED: Disable the SDO pin, in this mode it is not possible to
- * read back from the device.
- */
-enum ad5449_sdo_mode {
- AD5449_SDO_DRIVE_FULL = 0x0,
- AD5449_SDO_DRIVE_WEAK = 0x1,
- AD5449_SDO_OPEN_DRAIN = 0x2,
- AD5449_SDO_DISABLED = 0x3,
-};
-
-/**
- * struct ad5449_platform_data - Platform data for the ad5449 DAC driver
- * @sdo_mode: SDO pin mode
- * @hardware_clear_to_midscale: Whether asserting the hardware CLR pin sets the
- * outputs to midscale (true) or to zero scale(false).
- */
-struct ad5449_platform_data {
- enum ad5449_sdo_mode sdo_mode;
- bool hardware_clear_to_midscale;
-};
-
-#endif
diff --git a/include/linux/platform_data/amd_qdma.h b/include/linux/platform_data/amd_qdma.h
new file mode 100644
index 000000000000..576d952f97ed
--- /dev/null
+++ b/include/linux/platform_data/amd_qdma.h
@@ -0,0 +1,36 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+/*
+ * Copyright (C) 2023-2024, Advanced Micro Devices, Inc.
+ */
+
+#ifndef _PLATDATA_AMD_QDMA_H
+#define _PLATDATA_AMD_QDMA_H
+
+#include <linux/dmaengine.h>
+
+/**
+ * struct qdma_queue_info - DMA queue information. This information is used to
+ * match queue when DMA channel is requested
+ * @dir: Channel transfer direction
+ */
+struct qdma_queue_info {
+ enum dma_transfer_direction dir;
+};
+
+#define QDMA_FILTER_PARAM(qinfo) ((void *)(qinfo))
+
+struct dma_slave_map;
+
+/**
+ * struct qdma_platdata - Platform specific data for QDMA engine
+ * @max_mm_channels: Maximum number of MM DMA channels in each direction
+ * @device_map: DMA slave map
+ * @irq_index: The index of first IRQ
+ */
+struct qdma_platdata {
+ u32 max_mm_channels;
+ u32 irq_index;
+ struct dma_slave_map *device_map;
+};
+
+#endif /* _PLATDATA_AMD_QDMA_H */
diff --git a/include/linux/platform_data/cyttsp4.h b/include/linux/platform_data/cyttsp4.h
deleted file mode 100644
index 5dc9d2be384b..000000000000
--- a/include/linux/platform_data/cyttsp4.h
+++ /dev/null
@@ -1,62 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-only */
-/*
- * Header file for:
- * Cypress TrueTouch(TM) Standard Product (TTSP) touchscreen drivers.
- * For use with Cypress Txx3xx parts.
- * Supported parts include:
- * CY8CTST341
- * CY8CTMA340
- *
- * Copyright (C) 2009, 2010, 2011 Cypress Semiconductor, Inc.
- * Copyright (C) 2012 Javier Martinez Canillas <javier@dowhile0.org>
- *
- * Contact Cypress Semiconductor at www.cypress.com (kev@cypress.com)
- */
-#ifndef _CYTTSP4_H_
-#define _CYTTSP4_H_
-
-#define CYTTSP4_MT_NAME "cyttsp4_mt"
-#define CYTTSP4_I2C_NAME "cyttsp4_i2c_adapter"
-#define CYTTSP4_SPI_NAME "cyttsp4_spi_adapter"
-
-#define CY_TOUCH_SETTINGS_MAX 32
-
-struct touch_framework {
- const uint16_t *abs;
- uint8_t size;
- uint8_t enable_vkeys;
-} __packed;
-
-struct cyttsp4_mt_platform_data {
- struct touch_framework *frmwrk;
- unsigned short flags;
- char const *inp_dev_name;
-};
-
-struct touch_settings {
- const uint8_t *data;
- uint32_t size;
- uint8_t tag;
-} __packed;
-
-struct cyttsp4_core_platform_data {
- int irq_gpio;
- int rst_gpio;
- int level_irq_udelay;
- int (*xres)(struct cyttsp4_core_platform_data *pdata,
- struct device *dev);
- int (*init)(struct cyttsp4_core_platform_data *pdata,
- int on, struct device *dev);
- int (*power)(struct cyttsp4_core_platform_data *pdata,
- int on, struct device *dev, atomic_t *ignore_irq);
- int (*irq_stat)(struct cyttsp4_core_platform_data *pdata,
- struct device *dev);
- struct touch_settings *sett[CY_TOUCH_SETTINGS_MAX];
-};
-
-struct cyttsp4_platform_data {
- struct cyttsp4_core_platform_data *core_pdata;
- struct cyttsp4_mt_platform_data *mt_pdata;
-};
-
-#endif /* _CYTTSP4_H_ */
diff --git a/include/linux/platform_data/dma-ep93xx.h b/include/linux/platform_data/dma-ep93xx.h
deleted file mode 100644
index eb9805bb3fe8..000000000000
--- a/include/linux/platform_data/dma-ep93xx.h
+++ /dev/null
@@ -1,94 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-#ifndef __ASM_ARCH_DMA_H
-#define __ASM_ARCH_DMA_H
-
-#include <linux/types.h>
-#include <linux/dmaengine.h>
-#include <linux/dma-mapping.h>
-
-/*
- * M2P channels.
- *
- * Note that these values are also directly used for setting the PPALLOC
- * register.
- */
-#define EP93XX_DMA_I2S1 0
-#define EP93XX_DMA_I2S2 1
-#define EP93XX_DMA_AAC1 2
-#define EP93XX_DMA_AAC2 3
-#define EP93XX_DMA_AAC3 4
-#define EP93XX_DMA_I2S3 5
-#define EP93XX_DMA_UART1 6
-#define EP93XX_DMA_UART2 7
-#define EP93XX_DMA_UART3 8
-#define EP93XX_DMA_IRDA 9
-/* M2M channels */
-#define EP93XX_DMA_SSP 10
-#define EP93XX_DMA_IDE 11
-
-/**
- * struct ep93xx_dma_data - configuration data for the EP93xx dmaengine
- * @port: peripheral which is requesting the channel
- * @direction: TX/RX channel
- * @name: optional name for the channel, this is displayed in /proc/interrupts
- *
- * This information is passed as private channel parameter in a filter
- * function. Note that this is only needed for slave/cyclic channels. For
- * memcpy channels %NULL data should be passed.
- */
-struct ep93xx_dma_data {
- int port;
- enum dma_transfer_direction direction;
- const char *name;
-};
-
-/**
- * struct ep93xx_dma_chan_data - platform specific data for a DMA channel
- * @name: name of the channel, used for getting the right clock for the channel
- * @base: mapped registers
- * @irq: interrupt number used by this channel
- */
-struct ep93xx_dma_chan_data {
- const char *name;
- void __iomem *base;
- int irq;
-};
-
-/**
- * struct ep93xx_dma_platform_data - platform data for the dmaengine driver
- * @channels: array of channels which are passed to the driver
- * @num_channels: number of channels in the array
- *
- * This structure is passed to the DMA engine driver via platform data. For
- * M2P channels, contract is that even channels are for TX and odd for RX.
- * There is no requirement for the M2M channels.
- */
-struct ep93xx_dma_platform_data {
- struct ep93xx_dma_chan_data *channels;
- size_t num_channels;
-};
-
-static inline bool ep93xx_dma_chan_is_m2p(struct dma_chan *chan)
-{
- return !strcmp(dev_name(chan->device->dev), "ep93xx-dma-m2p");
-}
-
-/**
- * ep93xx_dma_chan_direction - returns direction the channel can be used
- * @chan: channel
- *
- * This function can be used in filter functions to find out whether the
- * channel supports given DMA direction. Only M2P channels have such
- * limitation, for M2M channels the direction is configurable.
- */
-static inline enum dma_transfer_direction
-ep93xx_dma_chan_direction(struct dma_chan *chan)
-{
- if (!ep93xx_dma_chan_is_m2p(chan))
- return DMA_TRANS_NONE;
-
- /* even channels are for TX, odd for RX */
- return (chan->chan_id % 2 == 0) ? DMA_MEM_TO_DEV : DMA_DEV_TO_MEM;
-}
-
-#endif /* __ASM_ARCH_DMA_H */
diff --git a/include/linux/platform_data/eth-ep93xx.h b/include/linux/platform_data/eth-ep93xx.h
deleted file mode 100644
index 8eef637a804d..000000000000
--- a/include/linux/platform_data/eth-ep93xx.h
+++ /dev/null
@@ -1,10 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-#ifndef _LINUX_PLATFORM_DATA_ETH_EP93XX
-#define _LINUX_PLATFORM_DATA_ETH_EP93XX
-
-struct ep93xx_eth_data {
- unsigned char dev_addr[6];
- unsigned char phy_id;
-};
-
-#endif
diff --git a/include/linux/platform_data/keypad-ep93xx.h b/include/linux/platform_data/keypad-ep93xx.h
deleted file mode 100644
index 3054fced8509..000000000000
--- a/include/linux/platform_data/keypad-ep93xx.h
+++ /dev/null
@@ -1,32 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-#ifndef __KEYPAD_EP93XX_H
-#define __KEYPAD_EP93XX_H
-
-struct matrix_keymap_data;
-
-/* flags for the ep93xx_keypad driver */
-#define EP93XX_KEYPAD_DISABLE_3_KEY (1<<0) /* disable 3-key reset */
-#define EP93XX_KEYPAD_DIAG_MODE (1<<1) /* diagnostic mode */
-#define EP93XX_KEYPAD_BACK_DRIVE (1<<2) /* back driving mode */
-#define EP93XX_KEYPAD_TEST_MODE (1<<3) /* scan only column 0 */
-#define EP93XX_KEYPAD_AUTOREPEAT (1<<4) /* enable key autorepeat */
-
-/**
- * struct ep93xx_keypad_platform_data - platform specific device structure
- * @keymap_data: pointer to &matrix_keymap_data
- * @debounce: debounce start count; terminal count is 0xff
- * @prescale: row/column counter pre-scaler load value
- * @flags: see above
- */
-struct ep93xx_keypad_platform_data {
- struct matrix_keymap_data *keymap_data;
- unsigned int debounce;
- unsigned int prescale;
- unsigned int flags;
- unsigned int clk_rate;
-};
-
-#define EP93XX_MATRIX_ROWS (8)
-#define EP93XX_MATRIX_COLS (8)
-
-#endif /* __KEYPAD_EP93XX_H */
diff --git a/include/linux/platform_data/keypad-nomadik-ske.h b/include/linux/platform_data/keypad-nomadik-ske.h
deleted file mode 100644
index 7efabbca1dca..000000000000
--- a/include/linux/platform_data/keypad-nomadik-ske.h
+++ /dev/null
@@ -1,50 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-only */
-/*
- * Copyright (C) ST-Ericsson SA 2010
- *
- * Author: Naveen Kumar Gaddipati <naveen.gaddipati@stericsson.com>
- *
- * ux500 Scroll key and Keypad Encoder (SKE) header
- */
-
-#ifndef __SKE_H
-#define __SKE_H
-
-#include <linux/input/matrix_keypad.h>
-
-/* register definitions for SKE peripheral */
-#define SKE_CR 0x00
-#define SKE_VAL0 0x04
-#define SKE_VAL1 0x08
-#define SKE_DBCR 0x0C
-#define SKE_IMSC 0x10
-#define SKE_RIS 0x14
-#define SKE_MIS 0x18
-#define SKE_ICR 0x1C
-
-/*
- * Keypad module
- */
-
-/**
- * struct keypad_platform_data - structure for platform specific data
- * @init: pointer to keypad init function
- * @exit: pointer to keypad deinitialisation function
- * @keymap_data: matrix scan code table for keycodes
- * @krow: maximum number of rows
- * @kcol: maximum number of columns
- * @debounce_ms: platform specific debounce time
- * @no_autorepeat: flag for auto repetition
- * @wakeup_enable: allow waking up the system
- */
-struct ske_keypad_platform_data {
- int (*init)(void);
- int (*exit)(void);
- const struct matrix_keymap_data *keymap_data;
- u8 krow;
- u8 kcol;
- u8 debounce_ms;
- bool no_autorepeat;
- bool wakeup_enable;
-};
-#endif /*__SKE_KPD_H*/
diff --git a/include/linux/platform_data/mcs.h b/include/linux/platform_data/mcs.h
deleted file mode 100644
index fcc6f2a1f5c3..000000000000
--- a/include/linux/platform_data/mcs.h
+++ /dev/null
@@ -1,30 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-or-later */
-/*
- * Copyright (C) 2009 - 2010 Samsung Electronics Co.Ltd
- * Author: Joonyoung Shim <jy0922.shim@samsung.com>
- * Author: HeungJun Kim <riverful.kim@samsung.com>
- */
-
-#ifndef __LINUX_MCS_H
-#define __LINUX_MCS_H
-
-#define MCS_KEY_MAP(v, c) ((((v) & 0xff) << 16) | ((c) & 0xffff))
-#define MCS_KEY_VAL(v) (((v) >> 16) & 0xff)
-#define MCS_KEY_CODE(v) ((v) & 0xffff)
-
-struct mcs_platform_data {
- void (*poweron)(bool);
- void (*cfg_pin)(void);
-
- /* touchscreen */
- unsigned int x_size;
- unsigned int y_size;
-
- /* touchkey */
- const u32 *keymap;
- unsigned int keymap_size;
- unsigned int key_maxval;
- bool no_autorepeat;
-};
-
-#endif /* __LINUX_MCS_H */
diff --git a/include/linux/platform_data/spi-ep93xx.h b/include/linux/platform_data/spi-ep93xx.h
deleted file mode 100644
index b439f2a896e0..000000000000
--- a/include/linux/platform_data/spi-ep93xx.h
+++ /dev/null
@@ -1,15 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-#ifndef __ASM_MACH_EP93XX_SPI_H
-#define __ASM_MACH_EP93XX_SPI_H
-
-struct spi_device;
-
-/**
- * struct ep93xx_spi_info - EP93xx specific SPI descriptor
- * @use_dma: use DMA for the transfers
- */
-struct ep93xx_spi_info {
- bool use_dma;
-};
-
-#endif /* __ASM_MACH_EP93XX_SPI_H */
diff --git a/include/linux/platform_data/zforce_ts.h b/include/linux/platform_data/zforce_ts.h
deleted file mode 100644
index 2463a4a856a6..000000000000
--- a/include/linux/platform_data/zforce_ts.h
+++ /dev/null
@@ -1,15 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-only */
-/* drivers/input/touchscreen/zforce.c
- *
- * Copyright (C) 2012-2013 MundoReader S.L.
- */
-
-#ifndef _LINUX_INPUT_ZFORCE_TS_H
-#define _LINUX_INPUT_ZFORCE_TS_H
-
-struct zforce_ts_platdata {
- unsigned int x_max;
- unsigned int y_max;
-};
-
-#endif /* _LINUX_INPUT_ZFORCE_TS_H */
diff --git a/include/linux/platform_device.h b/include/linux/platform_device.h
index d422db6eec63..7132623e4658 100644
--- a/include/linux/platform_device.h
+++ b/include/linux/platform_device.h
@@ -52,7 +52,7 @@ struct platform_device {
extern int platform_device_register(struct platform_device *);
extern void platform_device_unregister(struct platform_device *);
-extern struct bus_type platform_bus_type;
+extern const struct bus_type platform_bus_type;
extern struct device platform_bus;
extern struct resource *platform_get_resource(struct platform_device *,
diff --git a/include/linux/quota.h b/include/linux/quota.h
index 07071e64abf3..89a0d83ddad0 100644
--- a/include/linux/quota.h
+++ b/include/linux/quota.h
@@ -526,7 +526,7 @@ struct quota_info {
const struct quota_format_ops *ops[MAXQUOTAS]; /* Operations for each type */
};
-int register_quota_format(struct quota_format_type *fmt);
+void register_quota_format(struct quota_format_type *fmt);
void unregister_quota_format(struct quota_format_type *fmt);
struct quota_module_name {
diff --git a/include/linux/ratelimit_types.h b/include/linux/ratelimit_types.h
index 002266693e50..765232ce0b5e 100644
--- a/include/linux/ratelimit_types.h
+++ b/include/linux/ratelimit_types.h
@@ -19,8 +19,8 @@ struct ratelimit_state {
int burst;
int printed;
int missed;
+ unsigned int flags;
unsigned long begin;
- unsigned long flags;
};
#define RATELIMIT_STATE_INIT_FLAGS(name, interval_init, burst_init, flags_init) { \
diff --git a/include/linux/ring_buffer.h b/include/linux/ring_buffer.h
index fd35d4ec12e1..17fbb7855295 100644
--- a/include/linux/ring_buffer.h
+++ b/include/linux/ring_buffer.h
@@ -89,6 +89,14 @@ void ring_buffer_discard_commit(struct trace_buffer *buffer,
struct trace_buffer *
__ring_buffer_alloc(unsigned long size, unsigned flags, struct lock_class_key *key);
+struct trace_buffer *__ring_buffer_alloc_range(unsigned long size, unsigned flags,
+ int order, unsigned long start,
+ unsigned long range_size,
+ struct lock_class_key *key);
+
+bool ring_buffer_last_boot_delta(struct trace_buffer *buffer, long *text,
+ long *data);
+
/*
* Because the ring buffer is generic, if other users of the ring buffer get
* traced by ftrace, it can produce lockdep warnings. We need to keep each
@@ -100,6 +108,18 @@ __ring_buffer_alloc(unsigned long size, unsigned flags, struct lock_class_key *k
__ring_buffer_alloc((size), (flags), &__key); \
})
+/*
+ * Because the ring buffer is generic, if other users of the ring buffer get
+ * traced by ftrace, it can produce lockdep warnings. We need to keep each
+ * ring buffer's lock class separate.
+ */
+#define ring_buffer_alloc_range(size, flags, order, start, range_size) \
+({ \
+ static struct lock_class_key __key; \
+ __ring_buffer_alloc_range((size), (flags), (order), (start), \
+ (range_size), &__key); \
+})
+
typedef bool (*ring_buffer_cond_fn)(void *data);
int ring_buffer_wait(struct trace_buffer *buffer, int cpu, int full,
ring_buffer_cond_fn cond, void *data);
diff --git a/include/linux/rmap.h b/include/linux/rmap.h
index 0978c64f49d8..d5e93e44322e 100644
--- a/include/linux/rmap.h
+++ b/include/linux/rmap.h
@@ -331,7 +331,7 @@ static __always_inline void __folio_dup_file_rmap(struct folio *folio,
switch (level) {
case RMAP_LEVEL_PTE:
if (!folio_test_large(folio)) {
- atomic_inc(&page->_mapcount);
+ atomic_inc(&folio->_mapcount);
break;
}
@@ -425,7 +425,7 @@ static __always_inline int __folio_try_dup_anon_rmap(struct folio *folio,
if (!folio_test_large(folio)) {
if (PageAnonExclusive(page))
ClearPageAnonExclusive(page);
- atomic_inc(&page->_mapcount);
+ atomic_inc(&folio->_mapcount);
break;
}
@@ -745,7 +745,12 @@ int folio_mkclean(struct folio *);
int pfn_mkclean_range(unsigned long pfn, unsigned long nr_pages, pgoff_t pgoff,
struct vm_area_struct *vma);
-void remove_migration_ptes(struct folio *src, struct folio *dst, bool locked);
+enum rmp_flags {
+ RMP_LOCKED = 1 << 0,
+ RMP_USE_SHARED_ZEROPAGE = 1 << 1,
+};
+
+void remove_migration_ptes(struct folio *src, struct folio *dst, int flags);
/*
* rmap_walk_control: To control rmap traversing for specific needs
diff --git a/include/linux/sbitmap.h b/include/linux/sbitmap.h
index c09cdcc99471..189140bf11fc 100644
--- a/include/linux/sbitmap.h
+++ b/include/linux/sbitmap.h
@@ -40,7 +40,7 @@ struct sbitmap_word {
/**
* @swap_lock: serializes simultaneous updates of ->word and ->cleared
*/
- spinlock_t swap_lock;
+ raw_spinlock_t swap_lock;
} ____cacheline_aligned_in_smp;
/**
diff --git a/include/linux/sched.h b/include/linux/sched.h
index 3773c1c8f099..e6ee4258169a 100644
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
@@ -82,6 +82,8 @@ struct task_group;
struct task_struct;
struct user_event_mm;
+#include <linux/sched/ext.h>
+
/*
* Task state bitmask. NOTE! These bits are also
* encoded in fs/proc/array.c: get_task_state().
@@ -149,8 +151,9 @@ struct user_event_mm;
* Special states are those that do not use the normal wait-loop pattern. See
* the comment with set_special_state().
*/
-#define is_special_task_state(state) \
- ((state) & (__TASK_STOPPED | __TASK_TRACED | TASK_PARKED | TASK_DEAD))
+#define is_special_task_state(state) \
+ ((state) & (__TASK_STOPPED | __TASK_TRACED | TASK_PARKED | \
+ TASK_DEAD | TASK_FROZEN))
#ifdef CONFIG_DEBUG_ATOMIC_SLEEP
# define debug_normal_state_change(state_value) \
@@ -541,9 +544,14 @@ struct sched_entity {
struct rb_node run_node;
u64 deadline;
u64 min_vruntime;
+ u64 min_slice;
struct list_head group_node;
- unsigned int on_rq;
+ unsigned char on_rq;
+ unsigned char sched_delayed;
+ unsigned char rel_deadline;
+ unsigned char custom_slice;
+ /* hole */
u64 exec_start;
u64 sum_exec_runtime;
@@ -639,12 +647,26 @@ struct sched_dl_entity {
*
* @dl_overrun tells if the task asked to be informed about runtime
* overruns.
+ *
+ * @dl_server tells if this is a server entity.
+ *
+ * @dl_defer tells if this is a deferred or regular server. For
+ * now only defer server exists.
+ *
+ * @dl_defer_armed tells if the deferrable server is waiting
+ * for the replenishment timer to activate it.
+ *
+ * @dl_defer_running tells if the deferrable server is actually
+ * running, skipping the defer phase.
*/
unsigned int dl_throttled : 1;
unsigned int dl_yielded : 1;
unsigned int dl_non_contending : 1;
unsigned int dl_overrun : 1;
unsigned int dl_server : 1;
+ unsigned int dl_defer : 1;
+ unsigned int dl_defer_armed : 1;
+ unsigned int dl_defer_running : 1;
/*
* Bandwidth enforcement timer. Each -deadline task has its
@@ -672,7 +694,7 @@ struct sched_dl_entity {
*/
struct rq *rq;
dl_server_has_tasks_f server_has_tasks;
- dl_server_pick_f server_pick;
+ dl_server_pick_f server_pick_task;
#ifdef CONFIG_RT_MUTEXES
/*
@@ -810,6 +832,9 @@ struct task_struct {
struct sched_rt_entity rt;
struct sched_dl_entity dl;
struct sched_dl_entity *dl_server;
+#ifdef CONFIG_SCHED_CLASS_EXT
+ struct sched_ext_entity scx;
+#endif
const struct sched_class *sched_class;
#ifdef CONFIG_SCHED_CORE
diff --git a/include/linux/sched/deadline.h b/include/linux/sched/deadline.h
index df3aca89d4f5..3a912ab42bb5 100644
--- a/include/linux/sched/deadline.h
+++ b/include/linux/sched/deadline.h
@@ -10,16 +10,16 @@
#include <linux/sched.h>
-#define MAX_DL_PRIO 0
-
-static inline int dl_prio(int prio)
+static inline bool dl_prio(int prio)
{
- if (unlikely(prio < MAX_DL_PRIO))
- return 1;
- return 0;
+ return unlikely(prio < MAX_DL_PRIO);
}
-static inline int dl_task(struct task_struct *p)
+/*
+ * Returns true if a task has a priority that belongs to DL class. PI-boosted
+ * tasks will return true. Use dl_policy() to ignore PI-boosted tasks.
+ */
+static inline bool dl_task(struct task_struct *p)
{
return dl_prio(p->prio);
}
diff --git a/include/linux/sched/ext.h b/include/linux/sched/ext.h
new file mode 100644
index 000000000000..1ddbde64a31b
--- /dev/null
+++ b/include/linux/sched/ext.h
@@ -0,0 +1,215 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * BPF extensible scheduler class: Documentation/scheduler/sched-ext.rst
+ *
+ * Copyright (c) 2022 Meta Platforms, Inc. and affiliates.
+ * Copyright (c) 2022 Tejun Heo <tj@kernel.org>
+ * Copyright (c) 2022 David Vernet <dvernet@meta.com>
+ */
+#ifndef _LINUX_SCHED_EXT_H
+#define _LINUX_SCHED_EXT_H
+
+#ifdef CONFIG_SCHED_CLASS_EXT
+
+#include <linux/llist.h>
+#include <linux/rhashtable-types.h>
+
+enum scx_public_consts {
+ SCX_OPS_NAME_LEN = 128,
+
+ SCX_SLICE_DFL = 20 * 1000000, /* 20ms */
+ SCX_SLICE_INF = U64_MAX, /* infinite, implies nohz */
+};
+
+/*
+ * DSQ (dispatch queue) IDs are 64bit of the format:
+ *
+ * Bits: [63] [62 .. 0]
+ * [ B] [ ID ]
+ *
+ * B: 1 for IDs for built-in DSQs, 0 for ops-created user DSQs
+ * ID: 63 bit ID
+ *
+ * Built-in IDs:
+ *
+ * Bits: [63] [62] [61..32] [31 .. 0]
+ * [ 1] [ L] [ R ] [ V ]
+ *
+ * 1: 1 for built-in DSQs.
+ * L: 1 for LOCAL_ON DSQ IDs, 0 for others
+ * V: For LOCAL_ON DSQ IDs, a CPU number. For others, a pre-defined value.
+ */
+enum scx_dsq_id_flags {
+ SCX_DSQ_FLAG_BUILTIN = 1LLU << 63,
+ SCX_DSQ_FLAG_LOCAL_ON = 1LLU << 62,
+
+ SCX_DSQ_INVALID = SCX_DSQ_FLAG_BUILTIN | 0,
+ SCX_DSQ_GLOBAL = SCX_DSQ_FLAG_BUILTIN | 1,
+ SCX_DSQ_LOCAL = SCX_DSQ_FLAG_BUILTIN | 2,
+ SCX_DSQ_LOCAL_ON = SCX_DSQ_FLAG_BUILTIN | SCX_DSQ_FLAG_LOCAL_ON,
+ SCX_DSQ_LOCAL_CPU_MASK = 0xffffffffLLU,
+};
+
+/*
+ * A dispatch queue (DSQ) can be either a FIFO or p->scx.dsq_vtime ordered
+ * queue. A built-in DSQ is always a FIFO. The built-in local DSQs are used to
+ * buffer between the scheduler core and the BPF scheduler. See the
+ * documentation for more details.
+ */
+struct scx_dispatch_q {
+ raw_spinlock_t lock;
+ struct list_head list; /* tasks in dispatch order */
+ struct rb_root priq; /* used to order by p->scx.dsq_vtime */
+ u32 nr;
+ u32 seq; /* used by BPF iter */
+ u64 id;
+ struct rhash_head hash_node;
+ struct llist_node free_node;
+ struct rcu_head rcu;
+};
+
+/* scx_entity.flags */
+enum scx_ent_flags {
+ SCX_TASK_QUEUED = 1 << 0, /* on ext runqueue */
+ SCX_TASK_RESET_RUNNABLE_AT = 1 << 2, /* runnable_at should be reset */
+ SCX_TASK_DEQD_FOR_SLEEP = 1 << 3, /* last dequeue was for SLEEP */
+
+ SCX_TASK_STATE_SHIFT = 8, /* bit 8 and 9 are used to carry scx_task_state */
+ SCX_TASK_STATE_BITS = 2,
+ SCX_TASK_STATE_MASK = ((1 << SCX_TASK_STATE_BITS) - 1) << SCX_TASK_STATE_SHIFT,
+
+ SCX_TASK_CURSOR = 1 << 31, /* iteration cursor, not a task */
+};
+
+/* scx_entity.flags & SCX_TASK_STATE_MASK */
+enum scx_task_state {
+ SCX_TASK_NONE, /* ops.init_task() not called yet */
+ SCX_TASK_INIT, /* ops.init_task() succeeded, but task can be cancelled */
+ SCX_TASK_READY, /* fully initialized, but not in sched_ext */
+ SCX_TASK_ENABLED, /* fully initialized and in sched_ext */
+
+ SCX_TASK_NR_STATES,
+};
+
+/* scx_entity.dsq_flags */
+enum scx_ent_dsq_flags {
+ SCX_TASK_DSQ_ON_PRIQ = 1 << 0, /* task is queued on the priority queue of a dsq */
+};
+
+/*
+ * Mask bits for scx_entity.kf_mask. Not all kfuncs can be called from
+ * everywhere and the following bits track which kfunc sets are currently
+ * allowed for %current. This simple per-task tracking works because SCX ops
+ * nest in a limited way. BPF will likely implement a way to allow and disallow
+ * kfuncs depending on the calling context which will replace this manual
+ * mechanism. See scx_kf_allow().
+ */
+enum scx_kf_mask {
+ SCX_KF_UNLOCKED = 0, /* sleepable and not rq locked */
+ /* ENQUEUE and DISPATCH may be nested inside CPU_RELEASE */
+ SCX_KF_CPU_RELEASE = 1 << 0, /* ops.cpu_release() */
+ /* ops.dequeue (in REST) may be nested inside DISPATCH */
+ SCX_KF_DISPATCH = 1 << 1, /* ops.dispatch() */
+ SCX_KF_ENQUEUE = 1 << 2, /* ops.enqueue() and ops.select_cpu() */
+ SCX_KF_SELECT_CPU = 1 << 3, /* ops.select_cpu() */
+ SCX_KF_REST = 1 << 4, /* other rq-locked operations */
+
+ __SCX_KF_RQ_LOCKED = SCX_KF_CPU_RELEASE | SCX_KF_DISPATCH |
+ SCX_KF_ENQUEUE | SCX_KF_SELECT_CPU | SCX_KF_REST,
+ __SCX_KF_TERMINAL = SCX_KF_ENQUEUE | SCX_KF_SELECT_CPU | SCX_KF_REST,
+};
+
+enum scx_dsq_lnode_flags {
+ SCX_DSQ_LNODE_ITER_CURSOR = 1 << 0,
+
+ /* high 16 bits can be for iter cursor flags */
+ __SCX_DSQ_LNODE_PRIV_SHIFT = 16,
+};
+
+struct scx_dsq_list_node {
+ struct list_head node;
+ u32 flags;
+ u32 priv; /* can be used by iter cursor */
+};
+
+/*
+ * The following is embedded in task_struct and contains all fields necessary
+ * for a task to be scheduled by SCX.
+ */
+struct sched_ext_entity {
+ struct scx_dispatch_q *dsq;
+ struct scx_dsq_list_node dsq_list; /* dispatch order */
+ struct rb_node dsq_priq; /* p->scx.dsq_vtime order */
+ u32 dsq_seq;
+ u32 dsq_flags; /* protected by DSQ lock */
+ u32 flags; /* protected by rq lock */
+ u32 weight;
+ s32 sticky_cpu;
+ s32 holding_cpu;
+ u32 kf_mask; /* see scx_kf_mask above */
+ struct task_struct *kf_tasks[2]; /* see SCX_CALL_OP_TASK() */
+ atomic_long_t ops_state;
+
+ struct list_head runnable_node; /* rq->scx.runnable_list */
+ unsigned long runnable_at;
+
+#ifdef CONFIG_SCHED_CORE
+ u64 core_sched_at; /* see scx_prio_less() */
+#endif
+ u64 ddsp_dsq_id;
+ u64 ddsp_enq_flags;
+
+ /* BPF scheduler modifiable fields */
+
+ /*
+ * Runtime budget in nsecs. This is usually set through
+ * scx_bpf_dispatch() but can also be modified directly by the BPF
+ * scheduler. Automatically decreased by SCX as the task executes. On
+ * depletion, a scheduling event is triggered.
+ *
+ * This value is cleared to zero if the task is preempted by
+ * %SCX_KICK_PREEMPT and shouldn't be used to determine how long the
+ * task ran. Use p->se.sum_exec_runtime instead.
+ */
+ u64 slice;
+
+ /*
+ * Used to order tasks when dispatching to the vtime-ordered priority
+ * queue of a dsq. This is usually set through scx_bpf_dispatch_vtime()
+ * but can also be modified directly by the BPF scheduler. Modifying it
+ * while a task is queued on a dsq may mangle the ordering and is not
+ * recommended.
+ */
+ u64 dsq_vtime;
+
+ /*
+ * If set, reject future sched_setscheduler(2) calls updating the policy
+ * to %SCHED_EXT with -%EACCES.
+ *
+ * Can be set from ops.init_task() while the BPF scheduler is being
+ * loaded (!scx_init_task_args->fork). If set and the task's policy is
+ * already %SCHED_EXT, the task's policy is rejected and forcefully
+ * reverted to %SCHED_NORMAL. The number of such events are reported
+ * through /sys/kernel/debug/sched_ext::nr_rejected. Setting this flag
+ * during fork is not allowed.
+ */
+ bool disallow; /* reject switching into SCX */
+
+ /* cold fields */
+#ifdef CONFIG_EXT_GROUP_SCHED
+ struct cgroup *cgrp_moving_from;
+#endif
+ /* must be the last field, see init_scx_entity() */
+ struct list_head tasks_node;
+};
+
+void sched_ext_free(struct task_struct *p);
+void print_scx_info(const char *log_lvl, struct task_struct *p);
+
+#else /* !CONFIG_SCHED_CLASS_EXT */
+
+static inline void sched_ext_free(struct task_struct *p) {}
+static inline void print_scx_info(const char *log_lvl, struct task_struct *p) {}
+
+#endif /* CONFIG_SCHED_CLASS_EXT */
+#endif /* _LINUX_SCHED_EXT_H */
diff --git a/include/linux/sched/mm.h b/include/linux/sched/mm.h
index 91546493c43d..07bb8d4181d7 100644
--- a/include/linux/sched/mm.h
+++ b/include/linux/sched/mm.h
@@ -179,27 +179,20 @@ static inline void mm_update_next_owner(struct mm_struct *mm)
extern void arch_pick_mmap_layout(struct mm_struct *mm,
struct rlimit *rlim_stack);
-extern unsigned long
-arch_get_unmapped_area(struct file *, unsigned long, unsigned long,
- unsigned long, unsigned long);
-extern unsigned long
+
+unsigned long
+arch_get_unmapped_area(struct file *filp, unsigned long addr,
+ unsigned long len, unsigned long pgoff,
+ unsigned long flags, vm_flags_t vm_flags);
+unsigned long
arch_get_unmapped_area_topdown(struct file *filp, unsigned long addr,
- unsigned long len, unsigned long pgoff,
- unsigned long flags);
+ unsigned long len, unsigned long pgoff,
+ unsigned long flags, vm_flags_t);
unsigned long mm_get_unmapped_area(struct mm_struct *mm, struct file *filp,
unsigned long addr, unsigned long len,
unsigned long pgoff, unsigned long flags);
-unsigned long
-arch_get_unmapped_area_vmflags(struct file *filp, unsigned long addr,
- unsigned long len, unsigned long pgoff,
- unsigned long flags, vm_flags_t vm_flags);
-unsigned long
-arch_get_unmapped_area_topdown_vmflags(struct file *filp, unsigned long addr,
- unsigned long len, unsigned long pgoff,
- unsigned long flags, vm_flags_t);
-
unsigned long mm_get_unmapped_area_vmflags(struct mm_struct *mm,
struct file *filp,
unsigned long addr,
@@ -211,11 +204,11 @@ unsigned long mm_get_unmapped_area_vmflags(struct mm_struct *mm,
unsigned long
generic_get_unmapped_area(struct file *filp, unsigned long addr,
unsigned long len, unsigned long pgoff,
- unsigned long flags);
+ unsigned long flags, vm_flags_t vm_flags);
unsigned long
generic_get_unmapped_area_topdown(struct file *filp, unsigned long addr,
unsigned long len, unsigned long pgoff,
- unsigned long flags);
+ unsigned long flags, vm_flags_t vm_flags);
#else
static inline void arch_pick_mmap_layout(struct mm_struct *mm,
struct rlimit *rlim_stack) {}
diff --git a/include/linux/sched/prio.h b/include/linux/sched/prio.h
index ab83d85e1183..6ab43b4f72f9 100644
--- a/include/linux/sched/prio.h
+++ b/include/linux/sched/prio.h
@@ -14,6 +14,7 @@
*/
#define MAX_RT_PRIO 100
+#define MAX_DL_PRIO 0
#define MAX_PRIO (MAX_RT_PRIO + NICE_WIDTH)
#define DEFAULT_PRIO (MAX_RT_PRIO + NICE_WIDTH / 2)
diff --git a/include/linux/sched/rt.h b/include/linux/sched/rt.h
index b2b9e6eb9683..4e3338103654 100644
--- a/include/linux/sched/rt.h
+++ b/include/linux/sched/rt.h
@@ -6,19 +6,40 @@
struct task_struct;
-static inline int rt_prio(int prio)
+static inline bool rt_prio(int prio)
{
- if (unlikely(prio < MAX_RT_PRIO))
- return 1;
- return 0;
+ return unlikely(prio < MAX_RT_PRIO && prio >= MAX_DL_PRIO);
}
-static inline int rt_task(struct task_struct *p)
+static inline bool rt_or_dl_prio(int prio)
+{
+ return unlikely(prio < MAX_RT_PRIO);
+}
+
+/*
+ * Returns true if a task has a priority that belongs to RT class. PI-boosted
+ * tasks will return true. Use rt_policy() to ignore PI-boosted tasks.
+ */
+static inline bool rt_task(struct task_struct *p)
{
return rt_prio(p->prio);
}
-static inline bool task_is_realtime(struct task_struct *tsk)
+/*
+ * Returns true if a task has a priority that belongs to RT or DL classes.
+ * PI-boosted tasks will return true. Use rt_or_dl_task_policy() to ignore
+ * PI-boosted tasks.
+ */
+static inline bool rt_or_dl_task(struct task_struct *p)
+{
+ return rt_or_dl_prio(p->prio);
+}
+
+/*
+ * Returns true if a task has a policy that belongs to RT or DL classes.
+ * PI-boosted tasks will return false.
+ */
+static inline bool rt_or_dl_task_policy(struct task_struct *tsk)
{
int policy = tsk->policy;
diff --git a/include/linux/sched/task.h b/include/linux/sched/task.h
index d362aacf9f89..0f2aeb37bbb0 100644
--- a/include/linux/sched/task.h
+++ b/include/linux/sched/task.h
@@ -63,7 +63,8 @@ extern asmlinkage void schedule_tail(struct task_struct *prev);
extern void init_idle(struct task_struct *idle, int cpu);
extern int sched_fork(unsigned long clone_flags, struct task_struct *p);
-extern void sched_cgroup_fork(struct task_struct *p, struct kernel_clone_args *kargs);
+extern int sched_cgroup_fork(struct task_struct *p, struct kernel_clone_args *kargs);
+extern void sched_cancel_fork(struct task_struct *p);
extern void sched_post_fork(struct task_struct *p);
extern void sched_dead(struct task_struct *p);
@@ -119,6 +120,11 @@ static inline struct task_struct *get_task_struct(struct task_struct *t)
return t;
}
+static inline struct task_struct *tryget_task_struct(struct task_struct *t)
+{
+ return refcount_inc_not_zero(&t->usage) ? t : NULL;
+}
+
extern void __put_task_struct(struct task_struct *t);
extern void __put_task_struct_rcu_cb(struct rcu_head *rhp);
diff --git a/include/linux/sched/task_stack.h b/include/linux/sched/task_stack.h
index ccd72b978e1f..bf10bdb487dd 100644
--- a/include/linux/sched/task_stack.h
+++ b/include/linux/sched/task_stack.h
@@ -95,23 +95,11 @@ static inline int object_is_on_stack(const void *obj)
extern void thread_stack_cache_init(void);
#ifdef CONFIG_DEBUG_STACK_USAGE
+unsigned long stack_not_used(struct task_struct *p);
+#else
static inline unsigned long stack_not_used(struct task_struct *p)
{
- unsigned long *n = end_of_stack(p);
-
- do { /* Skip over canary */
-# ifdef CONFIG_STACK_GROWSUP
- n--;
-# else
- n++;
-# endif
- } while (!*n);
-
-# ifdef CONFIG_STACK_GROWSUP
- return (unsigned long)end_of_stack(p) - (unsigned long)n;
-# else
- return (unsigned long)n - (unsigned long)end_of_stack(p);
-# endif
+ return 0;
}
#endif
extern void set_task_stack_end_magic(struct task_struct *tsk);
diff --git a/include/linux/security.h b/include/linux/security.h
index c37c32ebbdcd..b86ec2afc691 100644
--- a/include/linux/security.h
+++ b/include/linux/security.h
@@ -2182,7 +2182,7 @@ extern int security_bpf_prog_load(struct bpf_prog *prog, union bpf_attr *attr,
struct bpf_token *token);
extern void security_bpf_prog_free(struct bpf_prog *prog);
extern int security_bpf_token_create(struct bpf_token *token, union bpf_attr *attr,
- struct path *path);
+ const struct path *path);
extern void security_bpf_token_free(struct bpf_token *token);
extern int security_bpf_token_cmd(const struct bpf_token *token, enum bpf_cmd cmd);
extern int security_bpf_token_capable(const struct bpf_token *token, int cap);
@@ -2222,7 +2222,7 @@ static inline void security_bpf_prog_free(struct bpf_prog *prog)
{ }
static inline int security_bpf_token_create(struct bpf_token *token, union bpf_attr *attr,
- struct path *path)
+ const struct path *path)
{
return 0;
}
diff --git a/include/linux/seqlock.h b/include/linux/seqlock.h
index d90d8ee29d81..fffeb754880f 100644
--- a/include/linux/seqlock.h
+++ b/include/linux/seqlock.h
@@ -157,7 +157,7 @@ __seqprop_##lockname##_const_ptr(const seqcount_##lockname##_t *s) \
static __always_inline unsigned \
__seqprop_##lockname##_sequence(const seqcount_##lockname##_t *s) \
{ \
- unsigned seq = READ_ONCE(s->seqcount.sequence); \
+ unsigned seq = smp_load_acquire(&s->seqcount.sequence); \
\
if (!IS_ENABLED(CONFIG_PREEMPT_RT)) \
return seq; \
@@ -170,7 +170,7 @@ __seqprop_##lockname##_sequence(const seqcount_##lockname##_t *s) \
* Re-read the sequence counter since the (possibly \
* preempted) writer made progress. \
*/ \
- seq = READ_ONCE(s->seqcount.sequence); \
+ seq = smp_load_acquire(&s->seqcount.sequence); \
} \
\
return seq; \
@@ -208,7 +208,7 @@ static inline const seqcount_t *__seqprop_const_ptr(const seqcount_t *s)
static inline unsigned __seqprop_sequence(const seqcount_t *s)
{
- return READ_ONCE(s->sequence);
+ return smp_load_acquire(&s->sequence);
}
static inline bool __seqprop_preemptible(const seqcount_t *s)
@@ -263,17 +263,9 @@ SEQCOUNT_LOCKNAME(mutex, struct mutex, true, mutex)
#define seqprop_assert(s) __seqprop(s, assert)(s)
/**
- * __read_seqcount_begin() - begin a seqcount_t read section w/o barrier
+ * __read_seqcount_begin() - begin a seqcount_t read section
* @s: Pointer to seqcount_t or any of the seqcount_LOCKNAME_t variants
*
- * __read_seqcount_begin is like read_seqcount_begin, but has no smp_rmb()
- * barrier. Callers should ensure that smp_rmb() or equivalent ordering is
- * provided before actually loading any of the variables that are to be
- * protected in this critical section.
- *
- * Use carefully, only in critical code, and comment how the barrier is
- * provided.
- *
* Return: count to be passed to read_seqcount_retry()
*/
#define __read_seqcount_begin(s) \
@@ -293,13 +285,7 @@ SEQCOUNT_LOCKNAME(mutex, struct mutex, true, mutex)
*
* Return: count to be passed to read_seqcount_retry()
*/
-#define raw_read_seqcount_begin(s) \
-({ \
- unsigned _seq = __read_seqcount_begin(s); \
- \
- smp_rmb(); \
- _seq; \
-})
+#define raw_read_seqcount_begin(s) __read_seqcount_begin(s)
/**
* read_seqcount_begin() - begin a seqcount_t read critical section
@@ -328,7 +314,6 @@ SEQCOUNT_LOCKNAME(mutex, struct mutex, true, mutex)
({ \
unsigned __seq = seqprop_sequence(s); \
\
- smp_rmb(); \
kcsan_atomic_next(KCSAN_SEQLOCK_REGION_MAX); \
__seq; \
})
diff --git a/include/linux/serial_8250.h b/include/linux/serial_8250.h
index fd59ed2cca53..e0717c8393d7 100644
--- a/include/linux/serial_8250.h
+++ b/include/linux/serial_8250.h
@@ -193,7 +193,7 @@ void serial8250_do_pm(struct uart_port *port, unsigned int state,
unsigned int oldstate);
void serial8250_do_set_mctrl(struct uart_port *port, unsigned int mctrl);
void serial8250_do_set_divisor(struct uart_port *port, unsigned int baud,
- unsigned int quot, unsigned int quot_frac);
+ unsigned int quot);
int fsl8250_handle_irq(struct uart_port *port);
int serial8250_handle_irq(struct uart_port *port, unsigned int iir);
u16 serial8250_rx_chars(struct uart_8250_port *up, u16 lsr);
diff --git a/include/linux/serial_s3c.h b/include/linux/serial_s3c.h
index 1672cf0810ef..102aa33d956c 100644
--- a/include/linux/serial_s3c.h
+++ b/include/linux/serial_s3c.h
@@ -246,24 +246,28 @@
S5PV210_UFCON_TXTRIG4 | \
S5PV210_UFCON_RXTRIG4)
-#define APPLE_S5L_UCON_RXTO_ENA 9
-#define APPLE_S5L_UCON_RXTHRESH_ENA 12
-#define APPLE_S5L_UCON_TXTHRESH_ENA 13
-#define APPLE_S5L_UCON_RXTO_ENA_MSK (1 << APPLE_S5L_UCON_RXTO_ENA)
-#define APPLE_S5L_UCON_RXTHRESH_ENA_MSK (1 << APPLE_S5L_UCON_RXTHRESH_ENA)
-#define APPLE_S5L_UCON_TXTHRESH_ENA_MSK (1 << APPLE_S5L_UCON_TXTHRESH_ENA)
+#define APPLE_S5L_UCON_RXTO_ENA 9
+#define APPLE_S5L_UCON_RXTO_LEGACY_ENA 11
+#define APPLE_S5L_UCON_RXTHRESH_ENA 12
+#define APPLE_S5L_UCON_TXTHRESH_ENA 13
+#define APPLE_S5L_UCON_RXTO_ENA_MSK BIT(APPLE_S5L_UCON_RXTO_ENA)
+#define APPLE_S5L_UCON_RXTO_LEGACY_ENA_MSK BIT(APPLE_S5L_UCON_RXTO_LEGACY_ENA)
+#define APPLE_S5L_UCON_RXTHRESH_ENA_MSK BIT(APPLE_S5L_UCON_RXTHRESH_ENA)
+#define APPLE_S5L_UCON_TXTHRESH_ENA_MSK BIT(APPLE_S5L_UCON_TXTHRESH_ENA)
#define APPLE_S5L_UCON_DEFAULT (S3C2410_UCON_TXIRQMODE | \
S3C2410_UCON_RXIRQMODE | \
S3C2410_UCON_RXFIFO_TOI)
#define APPLE_S5L_UCON_MASK (APPLE_S5L_UCON_RXTO_ENA_MSK | \
+ APPLE_S5L_UCON_RXTO_LEGACY_ENA_MSK | \
APPLE_S5L_UCON_RXTHRESH_ENA_MSK | \
APPLE_S5L_UCON_TXTHRESH_ENA_MSK)
-#define APPLE_S5L_UTRSTAT_RXTHRESH (1<<4)
-#define APPLE_S5L_UTRSTAT_TXTHRESH (1<<5)
-#define APPLE_S5L_UTRSTAT_RXTO (1<<9)
-#define APPLE_S5L_UTRSTAT_ALL_FLAGS (0x3f0)
+#define APPLE_S5L_UTRSTAT_RXTO_LEGACY BIT(3)
+#define APPLE_S5L_UTRSTAT_RXTHRESH BIT(4)
+#define APPLE_S5L_UTRSTAT_TXTHRESH BIT(5)
+#define APPLE_S5L_UTRSTAT_RXTO BIT(9)
+#define APPLE_S5L_UTRSTAT_ALL_FLAGS GENMASK(9, 3)
#ifndef __ASSEMBLY__
diff --git a/include/linux/set_memory.h b/include/linux/set_memory.h
index 95ac8398ee72..e7aec20fb44f 100644
--- a/include/linux/set_memory.h
+++ b/include/linux/set_memory.h
@@ -8,10 +8,10 @@
#ifdef CONFIG_ARCH_HAS_SET_MEMORY
#include <asm/set_memory.h>
#else
-static inline int set_memory_ro(unsigned long addr, int numpages) { return 0; }
-static inline int set_memory_rw(unsigned long addr, int numpages) { return 0; }
-static inline int set_memory_x(unsigned long addr, int numpages) { return 0; }
-static inline int set_memory_nx(unsigned long addr, int numpages) { return 0; }
+static inline int __must_check set_memory_ro(unsigned long addr, int numpages) { return 0; }
+static inline int __must_check set_memory_rw(unsigned long addr, int numpages) { return 0; }
+static inline int __must_check set_memory_x(unsigned long addr, int numpages) { return 0; }
+static inline int __must_check set_memory_nx(unsigned long addr, int numpages) { return 0; }
#endif
#ifndef set_memory_rox
diff --git a/include/linux/shmem_fs.h b/include/linux/shmem_fs.h
index 1d06b1e5408a..515a9a6a3c6f 100644
--- a/include/linux/shmem_fs.h
+++ b/include/linux/shmem_fs.h
@@ -111,20 +111,13 @@ extern void shmem_truncate_range(struct inode *inode, loff_t start, loff_t end);
int shmem_unuse(unsigned int type);
#ifdef CONFIG_TRANSPARENT_HUGEPAGE
-extern bool shmem_is_huge(struct inode *inode, pgoff_t index, bool shmem_huge_force,
- struct mm_struct *mm, unsigned long vm_flags);
unsigned long shmem_allowable_huge_orders(struct inode *inode,
struct vm_area_struct *vma, pgoff_t index,
- bool global_huge);
+ loff_t write_end, bool shmem_huge_force);
#else
-static __always_inline bool shmem_is_huge(struct inode *inode, pgoff_t index, bool shmem_huge_force,
- struct mm_struct *mm, unsigned long vm_flags)
-{
- return false;
-}
static inline unsigned long shmem_allowable_huge_orders(struct inode *inode,
struct vm_area_struct *vma, pgoff_t index,
- bool global_huge)
+ loff_t write_end, bool shmem_huge_force)
{
return 0;
}
@@ -150,8 +143,8 @@ enum sgp_type {
SGP_FALLOC, /* like SGP_WRITE, but make existing page Uptodate */
};
-int shmem_get_folio(struct inode *inode, pgoff_t index, struct folio **foliop,
- enum sgp_type sgp);
+int shmem_get_folio(struct inode *inode, pgoff_t index, loff_t write_end,
+ struct folio **foliop, enum sgp_type sgp);
struct folio *shmem_read_folio_gfp(struct address_space *mapping,
pgoff_t index, gfp_t gfp);
diff --git a/include/linux/slab.h b/include/linux/slab.h
index da3a546571e7..b35e2db7eb0e 100644
--- a/include/linux/slab.h
+++ b/include/linux/slab.h
@@ -930,6 +930,16 @@ static inline __alloc_size(1, 2) void *kmalloc_array_noprof(size_t n, size_t siz
* @new_n: new number of elements to alloc
* @new_size: new size of a single member of the array
* @flags: the type of memory to allocate (see kmalloc)
+ *
+ * If __GFP_ZERO logic is requested, callers must ensure that, starting with the
+ * initial memory allocation, every subsequent call to this API for the same
+ * memory allocation is flagged with __GFP_ZERO. Otherwise, it is possible that
+ * __GFP_ZERO is not fully honored by this API.
+ *
+ * See krealloc_noprof() for further details.
+ *
+ * In any case, the contents of the object pointed to are preserved up to the
+ * lesser of the new and old sizes.
*/
static inline __realloc_size(2, 3) void * __must_check krealloc_array_noprof(void *p,
size_t new_n,
@@ -1038,8 +1048,8 @@ kvmalloc_array_node_noprof(size_t n, size_t size, gfp_t flags, int node)
#define kvcalloc_node(...) alloc_hooks(kvcalloc_node_noprof(__VA_ARGS__))
#define kvcalloc(...) alloc_hooks(kvcalloc_noprof(__VA_ARGS__))
-extern void *kvrealloc_noprof(const void *p, size_t oldsize, size_t newsize, gfp_t flags)
- __realloc_size(3);
+void *kvrealloc_noprof(const void *p, size_t size, gfp_t flags)
+ __realloc_size(2);
#define kvrealloc(...) alloc_hooks(kvrealloc_noprof(__VA_ARGS__))
extern void kvfree(const void *addr);
diff --git a/include/linux/soc/cirrus/ep93xx.h b/include/linux/soc/cirrus/ep93xx.h
index 56fbe2dc59b1..3e6cf2b25a97 100644
--- a/include/linux/soc/cirrus/ep93xx.h
+++ b/include/linux/soc/cirrus/ep93xx.h
@@ -2,7 +2,18 @@
#ifndef _SOC_EP93XX_H
#define _SOC_EP93XX_H
-struct platform_device;
+struct regmap;
+struct spinlock_t;
+
+enum ep93xx_soc_model {
+ EP93XX_9301_SOC,
+ EP93XX_9307_SOC,
+ EP93XX_9312_SOC,
+};
+
+#include <linux/auxiliary_bus.h>
+#include <linux/compiler_types.h>
+#include <linux/container_of.h>
#define EP93XX_CHIP_REV_D0 3
#define EP93XX_CHIP_REV_D1 4
@@ -10,28 +21,18 @@ struct platform_device;
#define EP93XX_CHIP_REV_E1 6
#define EP93XX_CHIP_REV_E2 7
-#ifdef CONFIG_ARCH_EP93XX
-int ep93xx_pwm_acquire_gpio(struct platform_device *pdev);
-void ep93xx_pwm_release_gpio(struct platform_device *pdev);
-int ep93xx_ide_acquire_gpio(struct platform_device *pdev);
-void ep93xx_ide_release_gpio(struct platform_device *pdev);
-int ep93xx_keypad_acquire_gpio(struct platform_device *pdev);
-void ep93xx_keypad_release_gpio(struct platform_device *pdev);
-int ep93xx_i2s_acquire(void);
-void ep93xx_i2s_release(void);
-unsigned int ep93xx_chip_revision(void);
+struct ep93xx_regmap_adev {
+ struct auxiliary_device adev;
+ struct regmap *map;
+ void __iomem *base;
+ spinlock_t *lock;
+ void (*write)(struct regmap *map, spinlock_t *lock, unsigned int reg,
+ unsigned int val);
+ void (*update_bits)(struct regmap *map, spinlock_t *lock,
+ unsigned int reg, unsigned int mask, unsigned int val);
+};
-#else
-static inline int ep93xx_pwm_acquire_gpio(struct platform_device *pdev) { return 0; }
-static inline void ep93xx_pwm_release_gpio(struct platform_device *pdev) {}
-static inline int ep93xx_ide_acquire_gpio(struct platform_device *pdev) { return 0; }
-static inline void ep93xx_ide_release_gpio(struct platform_device *pdev) {}
-static inline int ep93xx_keypad_acquire_gpio(struct platform_device *pdev) { return 0; }
-static inline void ep93xx_keypad_release_gpio(struct platform_device *pdev) {}
-static inline int ep93xx_i2s_acquire(void) { return 0; }
-static inline void ep93xx_i2s_release(void) {}
-static inline unsigned int ep93xx_chip_revision(void) { return 0; }
-
-#endif
+#define to_ep93xx_regmap_adev(_adev) \
+ container_of((_adev), struct ep93xx_regmap_adev, adev)
#endif
diff --git a/include/linux/soc/qcom/geni-se.h b/include/linux/soc/qcom/geni-se.h
index 0f038a1a0330..c3bca9c0bf2c 100644
--- a/include/linux/soc/qcom/geni-se.h
+++ b/include/linux/soc/qcom/geni-se.h
@@ -88,11 +88,15 @@ struct geni_se {
#define SE_GENI_M_IRQ_STATUS 0x610
#define SE_GENI_M_IRQ_EN 0x614
#define SE_GENI_M_IRQ_CLEAR 0x618
+#define SE_GENI_M_IRQ_EN_SET 0x61c
+#define SE_GENI_M_IRQ_EN_CLEAR 0x620
#define SE_GENI_S_CMD0 0x630
#define SE_GENI_S_CMD_CTRL_REG 0x634
#define SE_GENI_S_IRQ_STATUS 0x640
#define SE_GENI_S_IRQ_EN 0x644
#define SE_GENI_S_IRQ_CLEAR 0x648
+#define SE_GENI_S_IRQ_EN_SET 0x64c
+#define SE_GENI_S_IRQ_EN_CLEAR 0x650
#define SE_GENI_TX_FIFOn 0x700
#define SE_GENI_RX_FIFOn 0x780
#define SE_GENI_TX_FIFO_STATUS 0x800
@@ -101,6 +105,8 @@ struct geni_se {
#define SE_GENI_RX_WATERMARK_REG 0x810
#define SE_GENI_RX_RFR_WATERMARK_REG 0x814
#define SE_GENI_IOS 0x908
+#define SE_GENI_M_GP_LENGTH 0x910
+#define SE_GENI_S_GP_LENGTH 0x914
#define SE_DMA_TX_IRQ_STAT 0xc40
#define SE_DMA_TX_IRQ_CLR 0xc44
#define SE_DMA_TX_FSM_RST 0xc58
@@ -234,6 +240,9 @@ struct geni_se {
#define IO2_DATA_IN BIT(1)
#define RX_DATA_IN BIT(0)
+/* SE_GENI_M_GP_LENGTH and SE_GENI_S_GP_LENGTH fields */
+#define GP_LENGTH GENMASK(31, 0)
+
/* SE_DMA_TX_IRQ_STAT Register fields */
#define TX_DMA_DONE BIT(0)
#define TX_EOT BIT(1)
diff --git a/include/linux/soundwire/sdw.h b/include/linux/soundwire/sdw.h
index 94fc1b57c57b..5e0dd47a0412 100644
--- a/include/linux/soundwire/sdw.h
+++ b/include/linux/soundwire/sdw.h
@@ -704,8 +704,6 @@ struct sdw_master_device {
container_of(d, struct sdw_master_device, dev)
struct sdw_driver {
- const char *name;
-
int (*probe)(struct sdw_slave *sdw,
const struct sdw_device_id *id);
int (*remove)(struct sdw_slave *sdw);
diff --git a/include/linux/string.h b/include/linux/string.h
index 95b3fc308f4f..0dd27afcfaf7 100644
--- a/include/linux/string.h
+++ b/include/linux/string.h
@@ -283,6 +283,18 @@ static inline void memcpy_flushcache(void *dst, const void *src, size_t cnt)
void *memchr_inv(const void *s, int c, size_t n);
char *strreplace(char *str, char old, char new);
+/**
+ * mem_is_zero - Check if an area of memory is all 0's.
+ * @s: The memory area
+ * @n: The size of the area
+ *
+ * Return: True if the area of memory is all 0's.
+ */
+static inline bool mem_is_zero(const void *s, size_t n)
+{
+ return !memchr_inv(s, 0, n);
+}
+
extern void kfree_const(const void *x);
extern char *kstrdup(const char *s, gfp_t gfp) __malloc;
diff --git a/include/linux/sunrpc/sched.h b/include/linux/sunrpc/sched.h
index 0c77ba488bba..fec1e8a1570c 100644
--- a/include/linux/sunrpc/sched.h
+++ b/include/linux/sunrpc/sched.h
@@ -151,13 +151,15 @@ struct rpc_task_setup {
#define RPC_WAS_SENT(t) ((t)->tk_flags & RPC_TASK_SENT)
#define RPC_IS_MOVEABLE(t) ((t)->tk_flags & RPC_TASK_MOVEABLE)
-#define RPC_TASK_RUNNING 0
-#define RPC_TASK_QUEUED 1
-#define RPC_TASK_ACTIVE 2
-#define RPC_TASK_NEED_XMIT 3
-#define RPC_TASK_NEED_RECV 4
-#define RPC_TASK_MSG_PIN_WAIT 5
-#define RPC_TASK_SIGNALLED 6
+enum {
+ RPC_TASK_RUNNING,
+ RPC_TASK_QUEUED,
+ RPC_TASK_ACTIVE,
+ RPC_TASK_NEED_XMIT,
+ RPC_TASK_NEED_RECV,
+ RPC_TASK_MSG_PIN_WAIT,
+ RPC_TASK_SIGNALLED,
+};
#define rpc_test_and_set_running(t) \
test_and_set_bit(RPC_TASK_RUNNING, &(t)->tk_runstate)
diff --git a/include/linux/sunrpc/svc.h b/include/linux/sunrpc/svc.h
index a7d0406b9ef5..e68fecf6eab5 100644
--- a/include/linux/sunrpc/svc.h
+++ b/include/linux/sunrpc/svc.h
@@ -21,6 +21,7 @@
#include <linux/wait.h>
#include <linux/mm.h>
#include <linux/pagevec.h>
+#include <linux/kthread.h>
/*
*
@@ -33,9 +34,9 @@
* node traffic on multi-node NUMA NFS servers.
*/
struct svc_pool {
- unsigned int sp_id; /* pool id; also node id on NUMA */
+ unsigned int sp_id; /* pool id; also node id on NUMA */
struct lwq sp_xprts; /* pending transports */
- atomic_t sp_nrthreads; /* # of threads in pool */
+ unsigned int sp_nrthreads; /* # of threads in pool */
struct list_head sp_all_threads; /* all server threads */
struct llist_head sp_idle_threads; /* idle server threads */
@@ -66,9 +67,10 @@ enum {
* We currently do not support more than one RPC program per daemon.
*/
struct svc_serv {
- struct svc_program * sv_program; /* RPC program */
+ struct svc_program * sv_programs; /* RPC programs */
struct svc_stat * sv_stats; /* RPC statistics */
spinlock_t sv_lock;
+ unsigned int sv_nprogs; /* Number of sv_programs */
unsigned int sv_nrthreads; /* # of server threads */
unsigned int sv_maxconn; /* max connections allowed or
* '0' causing max to be based
@@ -232,6 +234,11 @@ struct svc_rqst {
struct net *rq_bc_net; /* pointer to backchannel's
* net namespace
*/
+
+ int rq_err; /* Thread sets this to inidicate
+ * initialisation success.
+ */
+
unsigned long bc_to_initval;
unsigned int bc_to_retries;
void ** rq_lease_breaker; /* The v4 client breaking a lease */
@@ -305,6 +312,31 @@ static inline bool svc_thread_should_stop(struct svc_rqst *rqstp)
return test_bit(RQ_VICTIM, &rqstp->rq_flags);
}
+/**
+ * svc_thread_init_status - report whether thread has initialised successfully
+ * @rqstp: the thread in question
+ * @err: errno code
+ *
+ * After performing any initialisation that could fail, and before starting
+ * normal work, each sunrpc svc_thread must call svc_thread_init_status()
+ * with an appropriate error, or zero.
+ *
+ * If zero is passed, the thread is ready and must continue until
+ * svc_thread_should_stop() returns true. If a non-zero error is passed
+ * the call will not return - the thread will exit.
+ */
+static inline void svc_thread_init_status(struct svc_rqst *rqstp, int err)
+{
+ rqstp->rq_err = err;
+ /* memory barrier ensures assignment to error above is visible before
+ * waitqueue_active() test below completes.
+ */
+ smp_mb();
+ wake_up_var(&rqstp->rq_err);
+ if (err)
+ kthread_exit(1);
+}
+
struct svc_deferred_req {
u32 prot; /* protocol (UDP or TCP) */
struct svc_xprt *xprt;
@@ -329,10 +361,9 @@ struct svc_process_info {
};
/*
- * List of RPC programs on the same transport endpoint
+ * RPC program - an array of these can use the same transport endpoint
*/
struct svc_program {
- struct svc_program * pg_next; /* other programs (same xprt) */
u32 pg_prog; /* program number */
unsigned int pg_lovers; /* lowest version */
unsigned int pg_hivers; /* highest version */
@@ -401,19 +432,16 @@ struct svc_procedure {
*/
int sunrpc_set_pool_mode(const char *val);
int sunrpc_get_pool_mode(char *val, size_t size);
-int svc_rpcb_setup(struct svc_serv *serv, struct net *net);
void svc_rpcb_cleanup(struct svc_serv *serv, struct net *net);
int svc_bind(struct svc_serv *serv, struct net *net);
struct svc_serv *svc_create(struct svc_program *, unsigned int,
int (*threadfn)(void *data));
-struct svc_rqst *svc_rqst_alloc(struct svc_serv *serv,
- struct svc_pool *pool, int node);
bool svc_rqst_replace_page(struct svc_rqst *rqstp,
struct page *page);
void svc_rqst_release_pages(struct svc_rqst *rqstp);
-void svc_rqst_free(struct svc_rqst *);
void svc_exit_thread(struct svc_rqst *);
struct svc_serv * svc_create_pooled(struct svc_program *prog,
+ unsigned int nprog,
struct svc_stat *stats,
unsigned int bufsize,
int (*threadfn)(void *data));
@@ -446,11 +474,6 @@ int svc_generic_rpcbind_set(struct net *net,
u32 version, int family,
unsigned short proto,
unsigned short port);
-int svc_rpcbind_set_version(struct net *net,
- const struct svc_program *progp,
- u32 version, int family,
- unsigned short proto,
- unsigned short port);
#define RPC_MAX_ADDRBUFLEN (63U)
diff --git a/include/linux/sunrpc/svc_rdma.h b/include/linux/sunrpc/svc_rdma.h
index d33bab33099a..619fc0bd837a 100644
--- a/include/linux/sunrpc/svc_rdma.h
+++ b/include/linux/sunrpc/svc_rdma.h
@@ -48,6 +48,7 @@
#include <linux/sunrpc/rpc_rdma.h>
#include <linux/sunrpc/rpc_rdma_cid.h>
#include <linux/sunrpc/svc_rdma_pcl.h>
+#include <linux/sunrpc/rdma_rn.h>
#include <linux/percpu_counter.h>
#include <rdma/ib_verbs.h>
@@ -76,6 +77,7 @@ struct svcxprt_rdma {
struct svc_xprt sc_xprt; /* SVC transport structure */
struct rdma_cm_id *sc_cm_id; /* RDMA connection id */
struct list_head sc_accept_q; /* Conn. waiting accept */
+ struct rpcrdma_notification sc_rn; /* removal notification */
int sc_ord; /* RDMA read limit */
int sc_max_send_sges;
bool sc_snd_w_inv; /* OK to use Send With Invalidate */
diff --git a/include/linux/sunrpc/svcauth.h b/include/linux/sunrpc/svcauth.h
index 61c455f1e1f5..2e111153f7cd 100644
--- a/include/linux/sunrpc/svcauth.h
+++ b/include/linux/sunrpc/svcauth.h
@@ -14,6 +14,7 @@
#include <linux/sunrpc/msg_prot.h>
#include <linux/sunrpc/cache.h>
#include <linux/sunrpc/gss_api.h>
+#include <linux/sunrpc/clnt.h>
#include <linux/hash.h>
#include <linux/stringhash.h>
#include <linux/cred.h>
@@ -151,13 +152,16 @@ struct auth_ops {
struct svc_xprt;
-extern enum svc_auth_status svc_authenticate(struct svc_rqst *rqstp);
extern rpc_authflavor_t svc_auth_flavor(struct svc_rqst *rqstp);
extern int svc_authorise(struct svc_rqst *rqstp);
extern enum svc_auth_status svc_set_client(struct svc_rqst *rqstp);
extern int svc_auth_register(rpc_authflavor_t flavor, struct auth_ops *aops);
extern void svc_auth_unregister(rpc_authflavor_t flavor);
+extern void svcauth_map_clnt_to_svc_cred_local(struct rpc_clnt *clnt,
+ const struct cred *,
+ struct svc_cred *);
+
extern struct auth_domain *unix_domain_find(char *name);
extern void auth_domain_put(struct auth_domain *item);
extern struct auth_domain *auth_domain_lookup(char *name, struct auth_domain *new);
diff --git a/include/linux/sunrpc/svcsock.h b/include/linux/sunrpc/svcsock.h
index 7c78ec6356b9..bf45d9e8492a 100644
--- a/include/linux/sunrpc/svcsock.h
+++ b/include/linux/sunrpc/svcsock.h
@@ -58,8 +58,6 @@ static inline u32 svc_sock_final_rec(struct svc_sock *svsk)
*/
void svc_recv(struct svc_rqst *rqstp);
void svc_send(struct svc_rqst *rqstp);
-void svc_drop(struct svc_rqst *);
-void svc_sock_update_bufs(struct svc_serv *serv);
int svc_addsock(struct svc_serv *serv, struct net *net,
const int fd, char *name_return, const size_t len,
const struct cred *cred);
diff --git a/include/linux/sunrpc/xdrgen/_builtins.h b/include/linux/sunrpc/xdrgen/_builtins.h
new file mode 100644
index 000000000000..66ca3ece951a
--- /dev/null
+++ b/include/linux/sunrpc/xdrgen/_builtins.h
@@ -0,0 +1,243 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (c) 2024 Oracle and/or its affiliates.
+ *
+ * This header defines XDR data type primitives specified in
+ * Section 4 of RFC 4506, used by RPC programs implemented
+ * in the Linux kernel.
+ */
+
+#ifndef _SUNRPC_XDRGEN__BUILTINS_H_
+#define _SUNRPC_XDRGEN__BUILTINS_H_
+
+#include <linux/sunrpc/xdr.h>
+
+static inline bool
+xdrgen_decode_void(struct xdr_stream *xdr)
+{
+ return true;
+}
+
+static inline bool
+xdrgen_encode_void(struct xdr_stream *xdr)
+{
+ return true;
+}
+
+static inline bool
+xdrgen_decode_bool(struct xdr_stream *xdr, bool *ptr)
+{
+ __be32 *p = xdr_inline_decode(xdr, XDR_UNIT);
+
+ if (unlikely(!p))
+ return false;
+ *ptr = (*p != xdr_zero);
+ return true;
+}
+
+static inline bool
+xdrgen_encode_bool(struct xdr_stream *xdr, bool val)
+{
+ __be32 *p = xdr_reserve_space(xdr, XDR_UNIT);
+
+ if (unlikely(!p))
+ return false;
+ *p = val ? xdr_one : xdr_zero;
+ return true;
+}
+
+static inline bool
+xdrgen_decode_int(struct xdr_stream *xdr, s32 *ptr)
+{
+ __be32 *p = xdr_inline_decode(xdr, XDR_UNIT);
+
+ if (unlikely(!p))
+ return false;
+ *ptr = be32_to_cpup(p);
+ return true;
+}
+
+static inline bool
+xdrgen_encode_int(struct xdr_stream *xdr, s32 val)
+{
+ __be32 *p = xdr_reserve_space(xdr, XDR_UNIT);
+
+ if (unlikely(!p))
+ return false;
+ *p = cpu_to_be32(val);
+ return true;
+}
+
+static inline bool
+xdrgen_decode_unsigned_int(struct xdr_stream *xdr, u32 *ptr)
+{
+ __be32 *p = xdr_inline_decode(xdr, XDR_UNIT);
+
+ if (unlikely(!p))
+ return false;
+ *ptr = be32_to_cpup(p);
+ return true;
+}
+
+static inline bool
+xdrgen_encode_unsigned_int(struct xdr_stream *xdr, u32 val)
+{
+ __be32 *p = xdr_reserve_space(xdr, XDR_UNIT);
+
+ if (unlikely(!p))
+ return false;
+ *p = cpu_to_be32(val);
+ return true;
+}
+
+static inline bool
+xdrgen_decode_long(struct xdr_stream *xdr, s32 *ptr)
+{
+ __be32 *p = xdr_inline_decode(xdr, XDR_UNIT);
+
+ if (unlikely(!p))
+ return false;
+ *ptr = be32_to_cpup(p);
+ return true;
+}
+
+static inline bool
+xdrgen_encode_long(struct xdr_stream *xdr, s32 val)
+{
+ __be32 *p = xdr_reserve_space(xdr, XDR_UNIT);
+
+ if (unlikely(!p))
+ return false;
+ *p = cpu_to_be32(val);
+ return true;
+}
+
+static inline bool
+xdrgen_decode_unsigned_long(struct xdr_stream *xdr, u32 *ptr)
+{
+ __be32 *p = xdr_inline_decode(xdr, XDR_UNIT);
+
+ if (unlikely(!p))
+ return false;
+ *ptr = be32_to_cpup(p);
+ return true;
+}
+
+static inline bool
+xdrgen_encode_unsigned_long(struct xdr_stream *xdr, u32 val)
+{
+ __be32 *p = xdr_reserve_space(xdr, XDR_UNIT);
+
+ if (unlikely(!p))
+ return false;
+ *p = cpu_to_be32(val);
+ return true;
+}
+
+static inline bool
+xdrgen_decode_hyper(struct xdr_stream *xdr, s64 *ptr)
+{
+ __be32 *p = xdr_inline_decode(xdr, XDR_UNIT * 2);
+
+ if (unlikely(!p))
+ return false;
+ *ptr = get_unaligned_be64(p);
+ return true;
+}
+
+static inline bool
+xdrgen_encode_hyper(struct xdr_stream *xdr, s64 val)
+{
+ __be32 *p = xdr_reserve_space(xdr, XDR_UNIT * 2);
+
+ if (unlikely(!p))
+ return false;
+ put_unaligned_be64(val, p);
+ return true;
+}
+
+static inline bool
+xdrgen_decode_unsigned_hyper(struct xdr_stream *xdr, u64 *ptr)
+{
+ __be32 *p = xdr_inline_decode(xdr, XDR_UNIT * 2);
+
+ if (unlikely(!p))
+ return false;
+ *ptr = get_unaligned_be64(p);
+ return true;
+}
+
+static inline bool
+xdrgen_encode_unsigned_hyper(struct xdr_stream *xdr, u64 val)
+{
+ __be32 *p = xdr_reserve_space(xdr, XDR_UNIT * 2);
+
+ if (unlikely(!p))
+ return false;
+ put_unaligned_be64(val, p);
+ return true;
+}
+
+static inline bool
+xdrgen_decode_string(struct xdr_stream *xdr, string *ptr, u32 maxlen)
+{
+ __be32 *p;
+ u32 len;
+
+ if (unlikely(xdr_stream_decode_u32(xdr, &len) < 0))
+ return false;
+ if (unlikely(maxlen && len > maxlen))
+ return false;
+ if (len != 0) {
+ p = xdr_inline_decode(xdr, len);
+ if (unlikely(!p))
+ return false;
+ ptr->data = (unsigned char *)p;
+ }
+ ptr->len = len;
+ return true;
+}
+
+static inline bool
+xdrgen_encode_string(struct xdr_stream *xdr, string val, u32 maxlen)
+{
+ __be32 *p = xdr_reserve_space(xdr, XDR_UNIT + xdr_align_size(val.len));
+
+ if (unlikely(!p))
+ return false;
+ xdr_encode_opaque(p, val.data, val.len);
+ return true;
+}
+
+static inline bool
+xdrgen_decode_opaque(struct xdr_stream *xdr, opaque *ptr, u32 maxlen)
+{
+ __be32 *p;
+ u32 len;
+
+ if (unlikely(xdr_stream_decode_u32(xdr, &len) < 0))
+ return false;
+ if (unlikely(maxlen && len > maxlen))
+ return false;
+ if (len != 0) {
+ p = xdr_inline_decode(xdr, len);
+ if (unlikely(!p))
+ return false;
+ ptr->data = (u8 *)p;
+ }
+ ptr->len = len;
+ return true;
+}
+
+static inline bool
+xdrgen_encode_opaque(struct xdr_stream *xdr, opaque val)
+{
+ __be32 *p = xdr_reserve_space(xdr, XDR_UNIT + xdr_align_size(val.len));
+
+ if (unlikely(!p))
+ return false;
+ xdr_encode_opaque(p, val.data, val.len);
+ return true;
+}
+
+#endif /* _SUNRPC_XDRGEN__BUILTINS_H_ */
diff --git a/include/linux/sunrpc/xdrgen/_defs.h b/include/linux/sunrpc/xdrgen/_defs.h
new file mode 100644
index 000000000000..be9e62371758
--- /dev/null
+++ b/include/linux/sunrpc/xdrgen/_defs.h
@@ -0,0 +1,26 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (c) 2024 Oracle and/or its affiliates.
+ *
+ * This header defines XDR data type primitives specified in
+ * Section 4 of RFC 4506, used by RPC programs implemented
+ * in the Linux kernel.
+ */
+
+#ifndef _SUNRPC_XDRGEN__DEFS_H_
+#define _SUNRPC_XDRGEN__DEFS_H_
+
+#define TRUE (true)
+#define FALSE (false)
+
+typedef struct {
+ u32 len;
+ unsigned char *data;
+} string;
+
+typedef struct {
+ u32 len;
+ u8 *data;
+} opaque;
+
+#endif /* _SUNRPC_XDRGEN__DEFS_H_ */
diff --git a/include/linux/swap.h b/include/linux/swap.h
index ba7ea95d1c57..ca533b478c21 100644
--- a/include/linux/swap.h
+++ b/include/linux/swap.h
@@ -243,22 +243,24 @@ enum {
* free clusters are organized into a list. We fetch an entry from the list to
* get a free cluster.
*
- * The data field stores next cluster if the cluster is free or cluster usage
- * counter otherwise. The flags field determines if a cluster is free. This is
- * protected by swap_info_struct.lock.
+ * The flags field determines if a cluster is free. This is
+ * protected by cluster lock.
*/
struct swap_cluster_info {
spinlock_t lock; /*
* Protect swap_cluster_info fields
- * and swap_info_struct->swap_map
- * elements correspond to the swap
- * cluster
+ * other than list, and swap_info_struct->swap_map
+ * elements corresponding to the swap cluster.
*/
- unsigned int data:24;
- unsigned int flags:8;
+ u16 count;
+ u8 flags;
+ u8 order;
+ struct list_head list;
};
#define CLUSTER_FLAG_FREE 1 /* This cluster is free */
-#define CLUSTER_FLAG_NEXT_NULL 2 /* This cluster has no next cluster */
+#define CLUSTER_FLAG_NONFULL 2 /* This cluster is on nonfull list */
+#define CLUSTER_FLAG_FRAG 4 /* This cluster is on nonfull list */
+#define CLUSTER_FLAG_FULL 8 /* This cluster is on full list */
/*
* The first page in the swap file is the swap header, which is always marked
@@ -283,11 +285,6 @@ struct percpu_cluster {
unsigned int next[SWAP_NR_ORDERS]; /* Likely next allocation offset */
};
-struct swap_cluster_list {
- struct swap_cluster_info head;
- struct swap_cluster_info tail;
-};
-
/*
* The in-memory structure used to track swap areas.
*/
@@ -299,8 +296,15 @@ struct swap_info_struct {
signed char type; /* strange name for an index */
unsigned int max; /* extent of the swap_map */
unsigned char *swap_map; /* vmalloc'ed array of usage counts */
+ unsigned long *zeromap; /* kvmalloc'ed bitmap to track zero pages */
struct swap_cluster_info *cluster_info; /* cluster info. Only for SSD */
- struct swap_cluster_list free_clusters; /* free clusters list */
+ struct list_head free_clusters; /* free clusters list */
+ struct list_head full_clusters; /* full clusters list */
+ struct list_head nonfull_clusters[SWAP_NR_ORDERS];
+ /* list of cluster that contains at least one free slot */
+ struct list_head frag_clusters[SWAP_NR_ORDERS];
+ /* list of cluster that are fragmented or contented */
+ unsigned int frag_cluster_nr[SWAP_NR_ORDERS];
unsigned int lowest_bit; /* index of first free in swap_map */
unsigned int highest_bit; /* index of last free in swap_map */
unsigned int pages; /* total of usable pages of swap */
@@ -331,7 +335,7 @@ struct swap_info_struct {
* list.
*/
struct work_struct discard_work; /* discard worker */
- struct swap_cluster_list discard_clusters; /* discard clusters list */
+ struct list_head discard_clusters; /* discard clusters list */
struct plist_node avail_lists[]; /*
* entries in swap_avail_heads, one
* entry per node.
@@ -478,9 +482,9 @@ void put_swap_folio(struct folio *folio, swp_entry_t entry);
extern swp_entry_t get_swap_page_of_type(int);
extern int get_swap_pages(int n, swp_entry_t swp_entries[], int order);
extern int add_swap_count_continuation(swp_entry_t, gfp_t);
-extern void swap_shmem_alloc(swp_entry_t);
+extern void swap_shmem_alloc(swp_entry_t, int);
extern int swap_duplicate(swp_entry_t);
-extern int swapcache_prepare(swp_entry_t);
+extern int swapcache_prepare(swp_entry_t entry, int nr);
extern void swap_free_nr(swp_entry_t entry, int nr_pages);
extern void swapcache_free_entries(swp_entry_t *entries, int n);
extern void free_swap_and_cache_nr(swp_entry_t entry, int nr);
@@ -545,7 +549,7 @@ static inline int add_swap_count_continuation(swp_entry_t swp, gfp_t gfp_mask)
return 0;
}
-static inline void swap_shmem_alloc(swp_entry_t swp)
+static inline void swap_shmem_alloc(swp_entry_t swp, int nr)
{
}
@@ -554,7 +558,7 @@ static inline int swap_duplicate(swp_entry_t swp)
return 0;
}
-static inline int swapcache_prepare(swp_entry_t swp)
+static inline int swapcache_prepare(swp_entry_t swp, int nr)
{
return 0;
}
diff --git a/include/linux/tracepoint.h b/include/linux/tracepoint.h
index 6be396bb4297..93a9f3070b48 100644
--- a/include/linux/tracepoint.h
+++ b/include/linux/tracepoint.h
@@ -64,6 +64,13 @@ struct tp_module {
bool trace_module_has_bad_taint(struct module *mod);
extern int register_tracepoint_module_notifier(struct notifier_block *nb);
extern int unregister_tracepoint_module_notifier(struct notifier_block *nb);
+void for_each_module_tracepoint(void (*fct)(struct tracepoint *,
+ struct module *, void *),
+ void *priv);
+void for_each_tracepoint_in_module(struct module *,
+ void (*fct)(struct tracepoint *,
+ struct module *, void *),
+ void *priv);
#else
static inline bool trace_module_has_bad_taint(struct module *mod)
{
@@ -79,6 +86,19 @@ int unregister_tracepoint_module_notifier(struct notifier_block *nb)
{
return 0;
}
+static inline
+void for_each_module_tracepoint(void (*fct)(struct tracepoint *,
+ struct module *, void *),
+ void *priv)
+{
+}
+static inline
+void for_each_tracepoint_in_module(struct module *mod,
+ void (*fct)(struct tracepoint *,
+ struct module *, void *),
+ void *priv)
+{
+}
#endif /* CONFIG_MODULES */
/*
diff --git a/include/linux/uaccess.h b/include/linux/uaccess.h
index d8e4105a2f21..39c7cf82b0c2 100644
--- a/include/linux/uaccess.h
+++ b/include/linux/uaccess.h
@@ -33,6 +33,13 @@
})
#endif
+#ifdef masked_user_access_begin
+ #define can_do_masked_user_access() 1
+#else
+ #define can_do_masked_user_access() 0
+ #define masked_user_access_begin(src) NULL
+#endif
+
/*
* Architectures should provide two primitives (raw_copy_{to,from}_user())
* and get rid of their private instances of copy_{to,from}_user() and
diff --git a/include/linux/usb.h b/include/linux/usb.h
index 832997a9da0a..672d8fc2abdb 100644
--- a/include/linux/usb.h
+++ b/include/linux/usb.h
@@ -495,6 +495,12 @@ struct usb_dev_state;
struct usb_tt;
+enum usb_link_tunnel_mode {
+ USB_LINK_UNKNOWN = 0,
+ USB_LINK_NATIVE,
+ USB_LINK_TUNNELED,
+};
+
enum usb_port_connect_type {
USB_PORT_CONNECT_TYPE_UNKNOWN = 0,
USB_PORT_CONNECT_TYPE_HOT_PLUG,
@@ -605,6 +611,7 @@ struct usb3_lpm_parameters {
* WUSB devices are not, until we authorize them from user space.
* FIXME -- complete doc
* @authenticated: Crypto authentication passed
+ * @tunnel_mode: Connection native or tunneled over USB4
* @lpm_capable: device supports LPM
* @lpm_devinit_allow: Allow USB3 device initiated LPM, exit latency is in range
* @usb2_hw_lpm_capable: device can perform USB2 hardware LPM
@@ -714,6 +721,7 @@ struct usb_device {
unsigned do_remote_wakeup:1;
unsigned reset_resume:1;
unsigned port_is_suspended:1;
+ enum usb_link_tunnel_mode tunnel_mode;
int slot_id;
struct usb2_lpm_parameters l1_params;
diff --git a/include/linux/usb/composite.h b/include/linux/usb/composite.h
index af3cd2aae4bc..6e38fb9d2117 100644
--- a/include/linux/usb/composite.h
+++ b/include/linux/usb/composite.h
@@ -256,7 +256,7 @@ int config_ep_by_speed(struct usb_gadget *g, struct usb_function *f,
struct usb_ep *_ep);
int usb_func_wakeup(struct usb_function *func);
-#define MAX_CONFIG_INTERFACES 16 /* arbitrary; max 255 */
+#define MAX_CONFIG_INTERFACES 32
/**
* struct usb_configuration - represents one gadget configuration
diff --git a/drivers/usb/gadget/u_f.h b/include/linux/usb/func_utils.h
index e313c3b8dcb1..c8795c965109 100644
--- a/drivers/usb/gadget/u_f.h
+++ b/include/linux/usb/func_utils.h
@@ -1,6 +1,6 @@
// SPDX-License-Identifier: GPL-2.0
/*
- * u_f.h
+ * func_utils.h
*
* Utility definitions for USB functions
*
@@ -10,8 +10,8 @@
* Author: Andrzej Pietrasiewicz <andrzejtp2010@gmail.com>
*/
-#ifndef __U_F_H__
-#define __U_F_H__
+#ifndef _FUNC_UTILS_H_
+#define _FUNC_UTILS_H_
#include <linux/usb/gadget.h>
#include <linux/overflow.h>
@@ -83,4 +83,4 @@ static inline void free_ep_req(struct usb_ep *ep, struct usb_request *req)
usb_ep_free_request(ep, req);
}
-#endif /* __U_F_H__ */
+#endif /* _FUNC_UTILS_H_ */
diff --git a/include/linux/usb/gadget_configfs.h b/include/linux/usb/gadget_configfs.h
index d61aebd68128..6b5d6838f865 100644
--- a/include/linux/usb/gadget_configfs.h
+++ b/include/linux/usb/gadget_configfs.h
@@ -4,9 +4,6 @@
#include <linux/configfs.h>
-int check_user_usb_string(const char *name,
- struct usb_gadget_strings *stringtab_dev);
-
#define GS_STRINGS_W(__struct, __name) \
static ssize_t __struct##_##__name##_store(struct config_item *item, \
const char *page, size_t len) \
@@ -37,7 +34,7 @@ static struct configfs_item_operations struct_in##_langid_item_ops = { \
.release = struct_in##_attr_release, \
}; \
\
-static struct config_item_type struct_in##_langid_type = { \
+static const struct config_item_type struct_in##_langid_type = { \
.ct_item_ops = &struct_in##_langid_item_ops, \
.ct_attrs = struct_in##_langid_attrs, \
.ct_owner = THIS_MODULE, \
@@ -94,7 +91,7 @@ static struct configfs_group_operations struct_in##_strings_ops = { \
.drop_item = &struct_in##_strings_drop, \
}; \
\
-static struct config_item_type struct_in##_strings_type = { \
+static const struct config_item_type struct_in##_strings_type = { \
.ct_group_ops = &struct_in##_strings_ops, \
.ct_owner = THIS_MODULE, \
}
diff --git a/include/linux/usb/serial.h b/include/linux/usb/serial.h
index 1a0a4dc87980..75b2b763f1ba 100644
--- a/include/linux/usb/serial.h
+++ b/include/linux/usb/serial.h
@@ -311,8 +311,11 @@ struct usb_serial_driver {
#define to_usb_serial_driver(d) \
container_of(d, struct usb_serial_driver, driver)
-int usb_serial_register_drivers(struct usb_serial_driver *const serial_drivers[],
- const char *name, const struct usb_device_id *id_table);
+#define usb_serial_register_drivers(serial_drivers, name, id_table) \
+ __usb_serial_register_drivers(serial_drivers, THIS_MODULE, name, id_table)
+int __usb_serial_register_drivers(struct usb_serial_driver *const serial_drivers[],
+ struct module *owner, const char *name,
+ const struct usb_device_id *id_table);
void usb_serial_deregister_drivers(struct usb_serial_driver *const serial_drivers[]);
void usb_serial_port_softint(struct usb_serial_port *port);
diff --git a/include/linux/usb/tcpci.h b/include/linux/usb/tcpci.h
index 0ab39b6ea205..f7f5cfbdef12 100644
--- a/include/linux/usb/tcpci.h
+++ b/include/linux/usb/tcpci.h
@@ -63,15 +63,12 @@
#define TCPC_ROLE_CTRL 0x1a
#define TCPC_ROLE_CTRL_DRP BIT(6)
-#define TCPC_ROLE_CTRL_RP_VAL_SHIFT 4
-#define TCPC_ROLE_CTRL_RP_VAL_MASK 0x3
+#define TCPC_ROLE_CTRL_RP_VAL GENMASK(5, 4)
#define TCPC_ROLE_CTRL_RP_VAL_DEF 0x0
#define TCPC_ROLE_CTRL_RP_VAL_1_5 0x1
#define TCPC_ROLE_CTRL_RP_VAL_3_0 0x2
-#define TCPC_ROLE_CTRL_CC2_SHIFT 2
-#define TCPC_ROLE_CTRL_CC2_MASK 0x3
-#define TCPC_ROLE_CTRL_CC1_SHIFT 0
-#define TCPC_ROLE_CTRL_CC1_MASK 0x3
+#define TCPC_ROLE_CTRL_CC2 GENMASK(3, 2)
+#define TCPC_ROLE_CTRL_CC1 GENMASK(1, 0)
#define TCPC_ROLE_CTRL_CC_RA 0x0
#define TCPC_ROLE_CTRL_CC_RP 0x1
#define TCPC_ROLE_CTRL_CC_RD 0x2
@@ -92,11 +89,9 @@
#define TCPC_CC_STATUS_TERM BIT(4)
#define TCPC_CC_STATUS_TERM_RP 0
#define TCPC_CC_STATUS_TERM_RD 1
+#define TCPC_CC_STATUS_CC2 GENMASK(3, 2)
+#define TCPC_CC_STATUS_CC1 GENMASK(1, 0)
#define TCPC_CC_STATE_SRC_OPEN 0
-#define TCPC_CC_STATUS_CC2_SHIFT 2
-#define TCPC_CC_STATUS_CC2_MASK 0x3
-#define TCPC_CC_STATUS_CC1_SHIFT 0
-#define TCPC_CC_STATUS_CC1_MASK 0x3
#define TCPC_POWER_STATUS 0x1e
#define TCPC_POWER_STATUS_DBG_ACC_CON BIT(7)
@@ -134,9 +129,8 @@
#define TCPC_MSG_HDR_INFO 0x2e
#define TCPC_MSG_HDR_INFO_DATA_ROLE BIT(3)
+#define TCPC_MSG_HDR_INFO_REV GENMASK(2, 1)
#define TCPC_MSG_HDR_INFO_PWR_ROLE BIT(0)
-#define TCPC_MSG_HDR_INFO_REV_SHIFT 1
-#define TCPC_MSG_HDR_INFO_REV_MASK 0x3
#define TCPC_RX_DETECT 0x2f
#define TCPC_RX_DETECT_HARD_RESET BIT(5)
@@ -154,10 +148,8 @@
#define TCPC_RX_DATA 0x34 /* through 0x4f */
#define TCPC_TRANSMIT 0x50
-#define TCPC_TRANSMIT_RETRY_SHIFT 4
-#define TCPC_TRANSMIT_RETRY_MASK 0x3
-#define TCPC_TRANSMIT_TYPE_SHIFT 0
-#define TCPC_TRANSMIT_TYPE_MASK 0x7
+#define TCPC_TRANSMIT_RETRY GENMASK(5, 4)
+#define TCPC_TRANSMIT_TYPE GENMASK(2, 0)
#define TCPC_TX_BYTE_CNT 0x51
#define TCPC_TX_HDR 0x52
@@ -178,8 +170,7 @@
#define tcpc_presenting_rd(reg, cc) \
(!(TCPC_ROLE_CTRL_DRP & (reg)) && \
- (((reg) & (TCPC_ROLE_CTRL_## cc ##_MASK << TCPC_ROLE_CTRL_## cc ##_SHIFT)) == \
- (TCPC_ROLE_CTRL_CC_RD << TCPC_ROLE_CTRL_## cc ##_SHIFT)))
+ FIELD_GET(TCPC_ROLE_CTRL_## cc, reg) == TCPC_ROLE_CTRL_CC_RD)
struct tcpci;
@@ -190,7 +181,7 @@ struct tcpci;
* Optional; Callback to perform chip specific operations when FRS
* is sourcing vbus.
* @auto_discharge_disconnect:
- * Optional; Enables TCPC to autonously discharge vbus on disconnect.
+ * Optional; Enables TCPC to autonomously discharge vbus on disconnect.
* @vbus_vsafe0v:
* optional; Set when TCPC can detect whether vbus is at VSAFE0V.
* @set_partner_usb_comm_capable:
@@ -256,7 +247,7 @@ static inline enum typec_cc_status tcpci_to_typec_cc(unsigned int cc, bool sink)
if (sink)
return TYPEC_CC_RP_3_0;
fallthrough;
- case 0x0:
+ case TCPC_CC_STATE_SRC_OPEN:
default:
return TYPEC_CC_OPEN;
}
diff --git a/include/linux/usb/usbnet.h b/include/linux/usb/usbnet.h
index 9f08a584d707..0b9f1e598e3a 100644
--- a/include/linux/usb/usbnet.h
+++ b/include/linux/usb/usbnet.h
@@ -76,8 +76,23 @@ struct usbnet {
# define EVENT_LINK_CHANGE 11
# define EVENT_SET_RX_MODE 12
# define EVENT_NO_IP_ALIGN 13
+/* This one is special, as it indicates that the device is going away
+ * there are cyclic dependencies between tasklet, timer and bh
+ * that must be broken
+ */
+# define EVENT_UNPLUG 31
};
+static inline bool usbnet_going_away(struct usbnet *ubn)
+{
+ return test_bit(EVENT_UNPLUG, &ubn->flags);
+}
+
+static inline void usbnet_mark_going_away(struct usbnet *ubn)
+{
+ set_bit(EVENT_UNPLUG, &ubn->flags);
+}
+
static inline struct usb_driver *driver_of(struct usb_interface *intf)
{
return to_usb_driver(intf->dev.driver);
diff --git a/include/linux/userfaultfd_k.h b/include/linux/userfaultfd_k.h
index a12bcf042551..9fc6ce15c499 100644
--- a/include/linux/userfaultfd_k.h
+++ b/include/linux/userfaultfd_k.h
@@ -267,6 +267,25 @@ extern void userfaultfd_unmap_complete(struct mm_struct *mm,
extern bool userfaultfd_wp_unpopulated(struct vm_area_struct *vma);
extern bool userfaultfd_wp_async(struct vm_area_struct *vma);
+void userfaultfd_reset_ctx(struct vm_area_struct *vma);
+
+struct vm_area_struct *userfaultfd_clear_vma(struct vma_iterator *vmi,
+ struct vm_area_struct *prev,
+ struct vm_area_struct *vma,
+ unsigned long start,
+ unsigned long end);
+
+int userfaultfd_register_range(struct userfaultfd_ctx *ctx,
+ struct vm_area_struct *vma,
+ unsigned long vm_flags,
+ unsigned long start, unsigned long end,
+ bool wp_async);
+
+void userfaultfd_release_new(struct userfaultfd_ctx *ctx);
+
+void userfaultfd_release_all(struct mm_struct *mm,
+ struct userfaultfd_ctx *ctx);
+
#else /* CONFIG_USERFAULTFD */
/* mm helpers */
diff --git a/include/linux/vdpa.h b/include/linux/vdpa.h
index 7977ca03ac7a..2e7a30fe6b92 100644
--- a/include/linux/vdpa.h
+++ b/include/linux/vdpa.h
@@ -582,11 +582,20 @@ void vdpa_set_status(struct vdpa_device *vdev, u8 status);
* @dev: vdpa device to remove
* Driver need to remove the specified device by calling
* _vdpa_unregister_device().
+ * @dev_set_attr: change a vdpa device's attr after it was create
+ * @mdev: parent device to use for device
+ * @dev: vdpa device structure
+ * @config:Attributes to be set for the device.
+ * The driver needs to check the mask of the structure and then set
+ * the related information to the vdpa device. The driver must return 0
+ * if set successfully.
*/
struct vdpa_mgmtdev_ops {
int (*dev_add)(struct vdpa_mgmt_dev *mdev, const char *name,
const struct vdpa_dev_set_config *config);
void (*dev_del)(struct vdpa_mgmt_dev *mdev, struct vdpa_device *dev);
+ int (*dev_set_attr)(struct vdpa_mgmt_dev *mdev, struct vdpa_device *dev,
+ const struct vdpa_dev_set_config *config);
};
/**
diff --git a/include/linux/vm_event_item.h b/include/linux/vm_event_item.h
index 747943bc8cc2..aed952d04132 100644
--- a/include/linux/vm_event_item.h
+++ b/include/linux/vm_event_item.h
@@ -50,6 +50,7 @@ enum vm_event_item { PGPGIN, PGPGOUT, PSWPIN, PSWPOUT,
PGSTEAL_ANON,
PGSTEAL_FILE,
#ifdef CONFIG_NUMA
+ PGSCAN_ZONE_RECLAIM_SUCCESS,
PGSCAN_ZONE_RECLAIM_FAILED,
#endif
PGINODESTEAL, SLABS_SCANNED, KSWAPD_INODESTEAL,
@@ -104,6 +105,7 @@ enum vm_event_item { PGPGIN, PGPGOUT, PSWPIN, PSWPOUT,
THP_SPLIT_PAGE,
THP_SPLIT_PAGE_FAILED,
THP_DEFERRED_SPLIT_PAGE,
+ THP_UNDERUSED_SPLIT_PAGE,
THP_SPLIT_PMD,
THP_SCAN_EXCEED_NONE_PTE,
THP_SCAN_EXCEED_SWAP_PTE,
@@ -154,6 +156,30 @@ enum vm_event_item { PGPGIN, PGPGOUT, PSWPIN, PSWPOUT,
VMA_LOCK_RETRY,
VMA_LOCK_MISS,
#endif
+#ifdef CONFIG_DEBUG_STACK_USAGE
+ KSTACK_1K,
+#if THREAD_SIZE > 1024
+ KSTACK_2K,
+#endif
+#if THREAD_SIZE > 2048
+ KSTACK_4K,
+#endif
+#if THREAD_SIZE > 4096
+ KSTACK_8K,
+#endif
+#if THREAD_SIZE > 8192
+ KSTACK_16K,
+#endif
+#if THREAD_SIZE > 16384
+ KSTACK_32K,
+#endif
+#if THREAD_SIZE > 32768
+ KSTACK_64K,
+#endif
+#if THREAD_SIZE > 65536
+ KSTACK_REST,
+#endif
+#endif /* CONFIG_DEBUG_STACK_USAGE */
NR_VM_EVENT_ITEMS
};
diff --git a/include/linux/vmalloc.h b/include/linux/vmalloc.h
index e4a631ec430b..ad2ce7a6ab7a 100644
--- a/include/linux/vmalloc.h
+++ b/include/linux/vmalloc.h
@@ -189,6 +189,10 @@ extern void *__vcalloc_noprof(size_t n, size_t size, gfp_t flags) __alloc_size(1
extern void *vcalloc_noprof(size_t n, size_t size) __alloc_size(1, 2);
#define vcalloc(...) alloc_hooks(vcalloc_noprof(__VA_ARGS__))
+void * __must_check vrealloc_noprof(const void *p, size_t size, gfp_t flags)
+ __realloc_size(2);
+#define vrealloc(...) alloc_hooks(vrealloc_noprof(__VA_ARGS__))
+
extern void vfree(const void *addr);
extern void vfree_atomic(const void *addr);
diff --git a/include/linux/vmstat.h b/include/linux/vmstat.h
index 9eb77c9007e6..d2761bf8ff32 100644
--- a/include/linux/vmstat.h
+++ b/include/linux/vmstat.h
@@ -32,6 +32,7 @@ struct reclaim_stat {
unsigned nr_ref_keep;
unsigned nr_unmap_fail;
unsigned nr_lazyfree_fail;
+ unsigned nr_demoted;
};
/* Stat data for system wide items */
diff --git a/include/linux/writeback.h b/include/linux/writeback.h
index 8f651bb0a1a5..d6db822e4bb3 100644
--- a/include/linux/writeback.h
+++ b/include/linux/writeback.h
@@ -79,6 +79,9 @@ struct writeback_control {
*/
struct swap_iocb **swap_plug;
+ /* Target list for splitting a large folio */
+ struct list_head *list;
+
/* internal fields used by the ->writepages implementation: */
struct folio_batch fbatch;
pgoff_t index;
diff --git a/include/linux/xz.h b/include/linux/xz.h
index 7285ca5d56e9..58ae1d746c6f 100644
--- a/include/linux/xz.h
+++ b/include/linux/xz.h
@@ -1,11 +1,10 @@
+/* SPDX-License-Identifier: 0BSD */
+
/*
* XZ decompressor
*
* Authors: Lasse Collin <lasse.collin@tukaani.org>
* Igor Pavlov <https://7-zip.org/>
- *
- * This file has been put into the public domain.
- * You can do whatever you want with this file.
*/
#ifndef XZ_H
@@ -19,11 +18,6 @@
# include <stdint.h>
#endif
-/* In Linux, this is used to make extern functions static when needed. */
-#ifndef XZ_EXTERN
-# define XZ_EXTERN extern
-#endif
-
/**
* enum xz_mode - Operation mode
*
@@ -143,7 +137,7 @@ struct xz_buf {
size_t out_size;
};
-/**
+/*
* struct xz_dec - Opaque type to hold the XZ decoder state
*/
struct xz_dec;
@@ -191,7 +185,7 @@ struct xz_dec;
* ready to be used with xz_dec_run(). If memory allocation fails,
* xz_dec_init() returns NULL.
*/
-XZ_EXTERN struct xz_dec *xz_dec_init(enum xz_mode mode, uint32_t dict_max);
+struct xz_dec *xz_dec_init(enum xz_mode mode, uint32_t dict_max);
/**
* xz_dec_run() - Run the XZ decoder
@@ -211,7 +205,7 @@ XZ_EXTERN struct xz_dec *xz_dec_init(enum xz_mode mode, uint32_t dict_max);
* get that amount valid data from the beginning of the stream. You must use
* the multi-call decoder if you don't want to uncompress the whole stream.
*/
-XZ_EXTERN enum xz_ret xz_dec_run(struct xz_dec *s, struct xz_buf *b);
+enum xz_ret xz_dec_run(struct xz_dec *s, struct xz_buf *b);
/**
* xz_dec_reset() - Reset an already allocated decoder state
@@ -224,32 +218,38 @@ XZ_EXTERN enum xz_ret xz_dec_run(struct xz_dec *s, struct xz_buf *b);
* xz_dec_run(). Thus, explicit call to xz_dec_reset() is useful only in
* multi-call mode.
*/
-XZ_EXTERN void xz_dec_reset(struct xz_dec *s);
+void xz_dec_reset(struct xz_dec *s);
/**
* xz_dec_end() - Free the memory allocated for the decoder state
* @s: Decoder state allocated using xz_dec_init(). If s is NULL,
* this function does nothing.
*/
-XZ_EXTERN void xz_dec_end(struct xz_dec *s);
+void xz_dec_end(struct xz_dec *s);
-/*
- * Decompressor for MicroLZMA, an LZMA variant with a very minimal header.
- * See xz_dec_microlzma_alloc() below for details.
+/**
+ * DOC: MicroLZMA decompressor
+ *
+ * This MicroLZMA header format was created for use in EROFS but may be used
+ * by others too. **In most cases one needs the XZ APIs above instead.**
*
- * These functions aren't used or available in preboot code and thus aren't
- * marked with XZ_EXTERN. This avoids warnings about static functions that
- * are never defined.
+ * The compressed format supported by this decoder is a raw LZMA stream
+ * whose first byte (always 0x00) has been replaced with bitwise-negation
+ * of the LZMA properties (lc/lp/pb) byte. For example, if lc/lp/pb is
+ * 3/0/2, the first byte is 0xA2. This way the first byte can never be 0x00.
+ * Just like with LZMA2, lc + lp <= 4 must be true. The LZMA end-of-stream
+ * marker must not be used. The unused values are reserved for future use.
*/
-/**
+
+/*
* struct xz_dec_microlzma - Opaque type to hold the MicroLZMA decoder state
*/
struct xz_dec_microlzma;
/**
* xz_dec_microlzma_alloc() - Allocate memory for the MicroLZMA decoder
- * @mode XZ_SINGLE or XZ_PREALLOC
- * @dict_size LZMA dictionary size. This must be at least 4 KiB and
+ * @mode: XZ_SINGLE or XZ_PREALLOC
+ * @dict_size: LZMA dictionary size. This must be at least 4 KiB and
* at most 3 GiB.
*
* In contrast to xz_dec_init(), this function only allocates the memory
@@ -262,40 +262,30 @@ struct xz_dec_microlzma;
* On success, xz_dec_microlzma_alloc() returns a pointer to
* struct xz_dec_microlzma. If memory allocation fails or
* dict_size is invalid, NULL is returned.
- *
- * The compressed format supported by this decoder is a raw LZMA stream
- * whose first byte (always 0x00) has been replaced with bitwise-negation
- * of the LZMA properties (lc/lp/pb) byte. For example, if lc/lp/pb is
- * 3/0/2, the first byte is 0xA2. This way the first byte can never be 0x00.
- * Just like with LZMA2, lc + lp <= 4 must be true. The LZMA end-of-stream
- * marker must not be used. The unused values are reserved for future use.
- * This MicroLZMA header format was created for use in EROFS but may be used
- * by others too.
*/
-extern struct xz_dec_microlzma *xz_dec_microlzma_alloc(enum xz_mode mode,
- uint32_t dict_size);
+struct xz_dec_microlzma *xz_dec_microlzma_alloc(enum xz_mode mode,
+ uint32_t dict_size);
/**
* xz_dec_microlzma_reset() - Reset the MicroLZMA decoder state
- * @s Decoder state allocated using xz_dec_microlzma_alloc()
- * @comp_size Compressed size of the input stream
- * @uncomp_size Uncompressed size of the input stream. A value smaller
+ * @s: Decoder state allocated using xz_dec_microlzma_alloc()
+ * @comp_size: Compressed size of the input stream
+ * @uncomp_size: Uncompressed size of the input stream. A value smaller
* than the real uncompressed size of the input stream can
* be specified if uncomp_size_is_exact is set to false.
* uncomp_size can never be set to a value larger than the
* expected real uncompressed size because it would eventually
* result in XZ_DATA_ERROR.
- * @uncomp_size_is_exact This is an int instead of bool to avoid
+ * @uncomp_size_is_exact: This is an int instead of bool to avoid
* requiring stdbool.h. This should normally be set to true.
* When this is set to false, error detection is weaker.
*/
-extern void xz_dec_microlzma_reset(struct xz_dec_microlzma *s,
- uint32_t comp_size, uint32_t uncomp_size,
- int uncomp_size_is_exact);
+void xz_dec_microlzma_reset(struct xz_dec_microlzma *s, uint32_t comp_size,
+ uint32_t uncomp_size, int uncomp_size_is_exact);
/**
* xz_dec_microlzma_run() - Run the MicroLZMA decoder
- * @s Decoder state initialized using xz_dec_microlzma_reset()
+ * @s: Decoder state initialized using xz_dec_microlzma_reset()
* @b: Input and output buffers
*
* This works similarly to xz_dec_run() with a few important differences.
@@ -329,15 +319,14 @@ extern void xz_dec_microlzma_reset(struct xz_dec_microlzma *s,
* may be changed normally like with XZ_PREALLOC. This way input data can be
* provided from non-contiguous memory.
*/
-extern enum xz_ret xz_dec_microlzma_run(struct xz_dec_microlzma *s,
- struct xz_buf *b);
+enum xz_ret xz_dec_microlzma_run(struct xz_dec_microlzma *s, struct xz_buf *b);
/**
* xz_dec_microlzma_end() - Free the memory allocated for the decoder state
* @s: Decoder state allocated using xz_dec_microlzma_alloc().
* If s is NULL, this function does nothing.
*/
-extern void xz_dec_microlzma_end(struct xz_dec_microlzma *s);
+void xz_dec_microlzma_end(struct xz_dec_microlzma *s);
/*
* Standalone build (userspace build or in-kernel build for boot time use)
@@ -358,13 +347,13 @@ extern void xz_dec_microlzma_end(struct xz_dec_microlzma *s);
* This must be called before any other xz_* function to initialize
* the CRC32 lookup table.
*/
-XZ_EXTERN void xz_crc32_init(void);
+void xz_crc32_init(void);
/*
* Update CRC32 value using the polynomial from IEEE-802.3. To start a new
* calculation, the third argument must be zero. To continue the calculation,
* the previously returned value is passed as the third argument.
*/
-XZ_EXTERN uint32_t xz_crc32(const uint8_t *buf, size_t size, uint32_t crc);
+uint32_t xz_crc32(const uint8_t *buf, size_t size, uint32_t crc);
#endif
#endif
diff --git a/include/linux/zstd.h b/include/linux/zstd.h
index 113408eef6ec..b2c7cf310c8f 100644
--- a/include/linux/zstd.h
+++ b/include/linux/zstd.h
@@ -77,6 +77,30 @@ int zstd_min_clevel(void);
*/
int zstd_max_clevel(void);
+/**
+ * zstd_default_clevel() - default compression level
+ *
+ * Return: Default compression level.
+ */
+int zstd_default_clevel(void);
+
+/**
+ * struct zstd_custom_mem - custom memory allocation
+ */
+typedef ZSTD_customMem zstd_custom_mem;
+
+/**
+ * struct zstd_dict_load_method - Dictionary load method.
+ * See zstd_lib.h.
+ */
+typedef ZSTD_dictLoadMethod_e zstd_dict_load_method;
+
+/**
+ * struct zstd_dict_content_type - Dictionary context type.
+ * See zstd_lib.h.
+ */
+typedef ZSTD_dictContentType_e zstd_dict_content_type;
+
/* ====== Parameter Selection ====== */
/**
@@ -136,6 +160,19 @@ typedef ZSTD_parameters zstd_parameters;
zstd_parameters zstd_get_params(int level,
unsigned long long estimated_src_size);
+
+/**
+ * zstd_get_cparams() - returns zstd_compression_parameters for selected level
+ * @level: The compression level
+ * @estimated_src_size: The estimated source size to compress or 0
+ * if unknown.
+ * @dict_size: Dictionary size.
+ *
+ * Return: The selected zstd_compression_parameters.
+ */
+zstd_compression_parameters zstd_get_cparams(int level,
+ unsigned long long estimated_src_size, size_t dict_size);
+
/* ====== Single-pass Compression ====== */
typedef ZSTD_CCtx zstd_cctx;
@@ -180,6 +217,71 @@ zstd_cctx *zstd_init_cctx(void *workspace, size_t workspace_size);
size_t zstd_compress_cctx(zstd_cctx *cctx, void *dst, size_t dst_capacity,
const void *src, size_t src_size, const zstd_parameters *parameters);
+/**
+ * zstd_create_cctx_advanced() - Create compression context
+ * @custom_mem: Custom allocator.
+ *
+ * Return: NULL on error, pointer to compression context otherwise.
+ */
+zstd_cctx *zstd_create_cctx_advanced(zstd_custom_mem custom_mem);
+
+/**
+ * zstd_free_cctx() - Free compression context
+ * @cdict: Pointer to compression context.
+ *
+ * Return: Always 0.
+ */
+size_t zstd_free_cctx(zstd_cctx* cctx);
+
+/**
+ * struct zstd_cdict - Compression dictionary.
+ * See zstd_lib.h.
+ */
+typedef ZSTD_CDict zstd_cdict;
+
+/**
+ * zstd_create_cdict_byreference() - Create compression dictionary
+ * @dict: Pointer to dictionary buffer.
+ * @dict_size: Size of the dictionary buffer.
+ * @dict_load_method: Dictionary load method.
+ * @dict_content_type: Dictionary content type.
+ * @custom_mem: Memory allocator.
+ *
+ * Note, this uses @dict by reference (ZSTD_dlm_byRef), so it should be
+ * free before zstd_cdict is destroyed.
+ *
+ * Return: NULL on error, pointer to compression dictionary
+ * otherwise.
+ */
+zstd_cdict *zstd_create_cdict_byreference(const void *dict, size_t dict_size,
+ zstd_compression_parameters cparams,
+ zstd_custom_mem custom_mem);
+
+/**
+ * zstd_free_cdict() - Free compression dictionary
+ * @cdict: Pointer to compression dictionary.
+ *
+ * Return: Always 0.
+ */
+size_t zstd_free_cdict(zstd_cdict* cdict);
+
+/**
+ * zstd_compress_using_cdict() - compress src into dst using a dictionary
+ * @cctx: The context. Must have been initialized with zstd_init_cctx().
+ * @dst: The buffer to compress src into.
+ * @dst_capacity: The size of the destination buffer. May be any size, but
+ * ZSTD_compressBound(srcSize) is guaranteed to be large enough.
+ * @src: The data to compress.
+ * @src_size: The size of the data to compress.
+ * @cdict: The dictionary to be used.
+ *
+ * Return: The compressed size or an error, which can be checked using
+ * zstd_is_error().
+ */
+size_t zstd_compress_using_cdict(zstd_cctx *cctx, void *dst,
+ size_t dst_capacity, const void *src, size_t src_size,
+ const zstd_cdict *cdict);
+
/* ====== Single-pass Decompression ====== */
typedef ZSTD_DCtx zstd_dctx;
@@ -220,6 +322,71 @@ zstd_dctx *zstd_init_dctx(void *workspace, size_t workspace_size);
size_t zstd_decompress_dctx(zstd_dctx *dctx, void *dst, size_t dst_capacity,
const void *src, size_t src_size);
+/**
+ * struct zstd_ddict - Decompression dictionary.
+ * See zstd_lib.h.
+ */
+typedef ZSTD_DDict zstd_ddict;
+
+/**
+ * zstd_create_ddict_byreference() - Create decompression dictionary
+ * @dict: Pointer to dictionary buffer.
+ * @dict_size: Size of the dictionary buffer.
+ * @dict_load_method: Dictionary load method.
+ * @dict_content_type: Dictionary content type.
+ * @custom_mem: Memory allocator.
+ *
+ * Note, this uses @dict by reference (ZSTD_dlm_byRef), so it should be
+ * free before zstd_ddict is destroyed.
+ *
+ * Return: NULL on error, pointer to decompression dictionary
+ * otherwise.
+ */
+zstd_ddict *zstd_create_ddict_byreference(const void *dict, size_t dict_size,
+ zstd_custom_mem custom_mem);
+/**
+ * zstd_free_ddict() - Free decompression dictionary
+ * @dict: Pointer to the dictionary.
+ *
+ * Return: Always 0.
+ */
+size_t zstd_free_ddict(zstd_ddict *ddict);
+
+/**
+ * zstd_create_dctx_advanced() - Create decompression context
+ * @custom_mem: Custom allocator.
+ *
+ * Return: NULL on error, pointer to decompression context otherwise.
+ */
+zstd_dctx *zstd_create_dctx_advanced(zstd_custom_mem custom_mem);
+
+/**
+ * zstd_free_dctx() -- Free decompression context
+ * @dctx: Pointer to decompression context.
+ * Return: Always 0.
+ */
+size_t zstd_free_dctx(zstd_dctx *dctx);
+
+/**
+ * zstd_decompress_using_ddict() - decompress src into dst using a dictionary
+ * @dctx: The decompression context.
+ * @dst: The buffer to decompress src into.
+ * @dst_capacity: The size of the destination buffer. Must be at least as large
+ * as the decompressed size. If the caller cannot upper bound the
+ * decompressed size, then it's better to use the streaming API.
+ * @src: The zstd compressed data to decompress. Multiple concatenated
+ * frames and skippable frames are allowed.
+ * @src_size: The exact size of the data to decompress.
+ * @ddict: The dictionary to be used.
+ *
+ * Return: The decompressed size or an error, which can be checked using
+ * zstd_is_error().
+ */
+size_t zstd_decompress_using_ddict(zstd_dctx *dctx,
+ void *dst, size_t dst_capacity, const void *src, size_t src_size,
+ const zstd_ddict *ddict);
+
+
/* ====== Streaming Buffers ====== */
/**
diff --git a/include/linux/zswap.h b/include/linux/zswap.h
index 6cecb4a4f68b..9cd1beef0654 100644
--- a/include/linux/zswap.h
+++ b/include/linux/zswap.h
@@ -13,17 +13,15 @@ extern atomic_t zswap_stored_pages;
struct zswap_lruvec_state {
/*
- * Number of pages in zswap that should be protected from the shrinker.
- * This number is an estimate of the following counts:
+ * Number of swapped in pages from disk, i.e not found in the zswap pool.
*
- * a) Recent page faults.
- * b) Recent insertion to the zswap LRU. This includes new zswap stores,
- * as well as recent zswap LRU rotations.
- *
- * These pages are likely to be warm, and might incur IO if the are written
- * to swap.
+ * This is consumed and subtracted from the lru size in
+ * zswap_shrinker_count() to penalize past overshrinking that led to disk
+ * swapins. The idea is that had we considered this many more pages in the
+ * LRU active/protected and not written them back, we would not have had to
+ * swapped them in.
*/
- atomic_long_t nr_zswap_protected;
+ atomic_long_t nr_disk_swapins;
};
unsigned long zswap_total_pages(void);
diff --git a/include/media/cec.h b/include/media/cec.h
index d131514032f2..16b412b3131b 100644
--- a/include/media/cec.h
+++ b/include/media/cec.h
@@ -66,6 +66,8 @@ struct cec_data {
struct list_head xfer_list;
struct cec_adapter *adap;
struct cec_msg msg;
+ u8 match_len;
+ u8 match_reply[5];
struct cec_fh *fh;
struct delayed_work work;
struct completion c;
@@ -296,6 +298,37 @@ struct cec_adapter {
char input_phys[40];
};
+static inline int cec_get_device(struct cec_adapter *adap)
+{
+ struct cec_devnode *devnode = &adap->devnode;
+
+ /*
+ * Check if the cec device is available. This needs to be done with
+ * the devnode->lock held to prevent an open/unregister race:
+ * without the lock, the device could be unregistered and freed between
+ * the devnode->registered check and get_device() calls, leading to
+ * a crash.
+ */
+ mutex_lock(&devnode->lock);
+ /*
+ * return ENODEV if the cec device has been removed
+ * already or if it is not registered anymore.
+ */
+ if (!devnode->registered) {
+ mutex_unlock(&devnode->lock);
+ return -ENODEV;
+ }
+ /* and increase the device refcount */
+ get_device(&devnode->dev);
+ mutex_unlock(&devnode->lock);
+ return 0;
+}
+
+static inline void cec_put_device(struct cec_adapter *adap)
+{
+ put_device(&adap->devnode.dev);
+}
+
static inline void *cec_get_drvdata(const struct cec_adapter *adap)
{
return adap->priv;
diff --git a/include/media/rc-core.h b/include/media/rc-core.h
index 803349599c27..d095908073ef 100644
--- a/include/media/rc-core.h
+++ b/include/media/rc-core.h
@@ -127,7 +127,6 @@ struct lirc_fh {
* @min_timeout: minimum timeout supported by device
* @max_timeout: maximum timeout supported by device
* @rx_resolution : resolution (in us) of input sampler
- * @tx_resolution: resolution (in us) of output sampler
* @lirc_dev: lirc device
* @lirc_cdev: lirc char cdev
* @gap_start: start time for gap after timeout if non-zero
@@ -194,7 +193,6 @@ struct rc_dev {
u32 min_timeout;
u32 max_timeout;
u32 rx_resolution;
- u32 tx_resolution;
#ifdef CONFIG_LIRC
struct device lirc_dev;
struct cdev lirc_cdev;
diff --git a/include/media/v4l2-mc.h b/include/media/v4l2-mc.h
index ed0a44b6eada..1837c9fd78cf 100644
--- a/include/media/v4l2-mc.h
+++ b/include/media/v4l2-mc.h
@@ -178,6 +178,9 @@ void v4l2_pipeline_pm_put(struct media_entity *entity);
* @flags: New link flags that will be applied
* @notification: The link's state change notification type (MEDIA_DEV_NOTIFY_*)
*
+ * THIS FUNCTION IS DEPRECATED. DO NOT USE IN NEW DRIVERS. USE RUNTIME PM
+ * ON SUB-DEVICE DRIVERS INSTEAD.
+ *
* React to link management on powered pipelines by updating the use count of
* all entities in the source and sink sides of the link. Entities are powered
* on or off accordingly. The use of this function should be paired
diff --git a/include/media/v4l2-subdev.h b/include/media/v4l2-subdev.h
index bd235d325ff9..8daa0929865c 100644
--- a/include/media/v4l2-subdev.h
+++ b/include/media/v4l2-subdev.h
@@ -1250,6 +1250,12 @@ int v4l2_subdev_link_validate_default(struct v4l2_subdev *sd,
* calls v4l2_subdev_link_validate_default() to ensure that
* width, height and the media bus pixel code are equal on both
* source and sink of the link.
+ *
+ * The function can be used as a drop-in &media_entity_ops.link_validate
+ * implementation for v4l2_subdev instances. It supports all links between
+ * subdevs, as well as links between subdevs and video devices, provided that
+ * the video devices also implement their &media_entity_ops.link_validate
+ * operation.
*/
int v4l2_subdev_link_validate(struct media_link *link);
diff --git a/include/media/videobuf2-core.h b/include/media/videobuf2-core.h
index 955237ac503d..9b02aeba4108 100644
--- a/include/media/videobuf2-core.h
+++ b/include/media/videobuf2-core.h
@@ -154,6 +154,8 @@ struct vb2_mem_ops {
* @mem_priv: private data with this plane.
* @dbuf: dma_buf - shared buffer object.
* @dbuf_mapped: flag to show whether dbuf is mapped or not
+ * @dbuf_duplicated: boolean to show whether dbuf is duplicated with a
+ * previous plane of the buffer.
* @bytesused: number of bytes occupied by data in the plane (payload).
* @length: size of this plane (NOT the payload) in bytes. The maximum
* valid size is MAX_UINT - PAGE_SIZE.
@@ -179,6 +181,7 @@ struct vb2_plane {
void *mem_priv;
struct dma_buf *dbuf;
unsigned int dbuf_mapped;
+ bool dbuf_duplicated;
unsigned int bytesused;
unsigned int length;
unsigned int min_length;
diff --git a/include/net/tcp.h b/include/net/tcp.h
index f77f812bfbe7..d1948d357dad 100644
--- a/include/net/tcp.h
+++ b/include/net/tcp.h
@@ -2435,9 +2435,26 @@ static inline s64 tcp_rto_delta_us(const struct sock *sk)
{
const struct sk_buff *skb = tcp_rtx_queue_head(sk);
u32 rto = inet_csk(sk)->icsk_rto;
- u64 rto_time_stamp_us = tcp_skb_timestamp_us(skb) + jiffies_to_usecs(rto);
- return rto_time_stamp_us - tcp_sk(sk)->tcp_mstamp;
+ if (likely(skb)) {
+ u64 rto_time_stamp_us = tcp_skb_timestamp_us(skb) + jiffies_to_usecs(rto);
+
+ return rto_time_stamp_us - tcp_sk(sk)->tcp_mstamp;
+ } else {
+ WARN_ONCE(1,
+ "rtx queue emtpy: "
+ "out:%u sacked:%u lost:%u retrans:%u "
+ "tlp_high_seq:%u sk_state:%u ca_state:%u "
+ "advmss:%u mss_cache:%u pmtu:%u\n",
+ tcp_sk(sk)->packets_out, tcp_sk(sk)->sacked_out,
+ tcp_sk(sk)->lost_out, tcp_sk(sk)->retrans_out,
+ tcp_sk(sk)->tlp_high_seq, sk->sk_state,
+ inet_csk(sk)->icsk_ca_state,
+ tcp_sk(sk)->advmss, tcp_sk(sk)->mss_cache,
+ inet_csk(sk)->icsk_pmtu_cookie);
+ return jiffies_to_usecs(rto);
+ }
+
}
/*
diff --git a/include/rdma/ib_umem.h b/include/rdma/ib_umem.h
index 565a85044541..7dc7b1cc71b5 100644
--- a/include/rdma/ib_umem.h
+++ b/include/rdma/ib_umem.h
@@ -38,6 +38,7 @@ struct ib_umem_dmabuf {
unsigned long last_sg_trim;
void *private;
u8 pinned : 1;
+ u8 revoked : 1;
};
static inline struct ib_umem_dmabuf *to_ib_umem_dmabuf(struct ib_umem *umem)
@@ -150,9 +151,15 @@ struct ib_umem_dmabuf *ib_umem_dmabuf_get_pinned(struct ib_device *device,
unsigned long offset,
size_t size, int fd,
int access);
+struct ib_umem_dmabuf *
+ib_umem_dmabuf_get_pinned_with_dma_device(struct ib_device *device,
+ struct device *dma_device,
+ unsigned long offset, size_t size,
+ int fd, int access);
int ib_umem_dmabuf_map_pages(struct ib_umem_dmabuf *umem_dmabuf);
void ib_umem_dmabuf_unmap_pages(struct ib_umem_dmabuf *umem_dmabuf);
void ib_umem_dmabuf_release(struct ib_umem_dmabuf *umem_dmabuf);
+void ib_umem_dmabuf_revoke(struct ib_umem_dmabuf *umem_dmabuf);
#else /* CONFIG_INFINIBAND_USER_MEM */
@@ -196,12 +203,23 @@ ib_umem_dmabuf_get_pinned(struct ib_device *device, unsigned long offset,
{
return ERR_PTR(-EOPNOTSUPP);
}
+
+static inline struct ib_umem_dmabuf *
+ib_umem_dmabuf_get_pinned_with_dma_device(struct ib_device *device,
+ struct device *dma_device,
+ unsigned long offset, size_t size,
+ int fd, int access)
+{
+ return ERR_PTR(-EOPNOTSUPP);
+}
+
static inline int ib_umem_dmabuf_map_pages(struct ib_umem_dmabuf *umem_dmabuf)
{
return -EOPNOTSUPP;
}
static inline void ib_umem_dmabuf_unmap_pages(struct ib_umem_dmabuf *umem_dmabuf) { }
static inline void ib_umem_dmabuf_release(struct ib_umem_dmabuf *umem_dmabuf) { }
+static inline void ib_umem_dmabuf_revoke(struct ib_umem_dmabuf *umem_dmabuf) {}
#endif /* CONFIG_INFINIBAND_USER_MEM */
#endif /* IB_UMEM_H */
diff --git a/include/rdma/ib_verbs.h b/include/rdma/ib_verbs.h
index 6c5712ae559d..aa8ede439905 100644
--- a/include/rdma/ib_verbs.h
+++ b/include/rdma/ib_verbs.h
@@ -2476,7 +2476,7 @@ struct ib_device_ops {
struct ib_mr *(*reg_user_mr_dmabuf)(struct ib_pd *pd, u64 offset,
u64 length, u64 virt_addr, int fd,
int mr_access_flags,
- struct ib_udata *udata);
+ struct uverbs_attr_bundle *attrs);
struct ib_mr *(*rereg_user_mr)(struct ib_mr *mr, int flags, u64 start,
u64 length, u64 virt_addr,
int mr_access_flags, struct ib_pd *pd,
@@ -4453,6 +4453,8 @@ struct net_device *ib_get_net_dev_by_params(struct ib_device *dev, u32 port,
const struct sockaddr *addr);
int ib_device_set_netdev(struct ib_device *ib_dev, struct net_device *ndev,
unsigned int port);
+struct net_device *ib_device_get_netdev(struct ib_device *ib_dev,
+ u32 port);
struct ib_wq *ib_create_wq(struct ib_pd *pd,
struct ib_wq_init_attr *init_attr);
int ib_destroy_wq_user(struct ib_wq *wq, struct ib_udata *udata);
diff --git a/include/rdma/rdma_netlink.h b/include/rdma/rdma_netlink.h
index c2a79aeee113..326deaf56d5d 100644
--- a/include/rdma/rdma_netlink.h
+++ b/include/rdma/rdma_netlink.h
@@ -6,6 +6,8 @@
#include <linux/netlink.h>
#include <uapi/rdma/rdma_netlink.h>
+struct ib_device;
+
enum {
RDMA_NLDEV_ATTR_EMPTY_STRING = 1,
RDMA_NLDEV_ATTR_ENTRY_STRLEN = 16,
@@ -110,6 +112,16 @@ int rdma_nl_multicast(struct net *net, struct sk_buff *skb,
*/
bool rdma_nl_chk_listeners(unsigned int group);
+/**
+ * Prepare and send an event message
+ * @ib: the IB device which triggered the event
+ * @port_num: the port number which triggered the event - 0 if unused
+ * @type: the event type
+ * Returns 0 on success or a negative error code
+ */
+int rdma_nl_notify_event(struct ib_device *ib, u32 port_num,
+ enum rdma_nl_notify_event_type type);
+
struct rdma_link_ops {
struct list_head list;
const char *type;
diff --git a/include/scsi/fcoe_sysfs.h b/include/scsi/fcoe_sysfs.h
index 4b1216de3f22..2b28a05e492b 100644
--- a/include/scsi/fcoe_sysfs.h
+++ b/include/scsi/fcoe_sysfs.h
@@ -50,9 +50,7 @@ struct fcoe_ctlr_device {
struct fcoe_sysfs_function_template *f;
struct list_head fcfs;
- char work_q_name[20];
struct workqueue_struct *work_q;
- char devloss_work_q_name[20];
struct workqueue_struct *devloss_work_q;
struct mutex lock;
diff --git a/include/scsi/scsi_dbg.h b/include/scsi/scsi_dbg.h
index 7b196d234626..bd29cdb513a5 100644
--- a/include/scsi/scsi_dbg.h
+++ b/include/scsi/scsi_dbg.h
@@ -24,7 +24,6 @@ extern const char *scsi_extd_sense_format(unsigned char, unsigned char,
const char **);
extern const char *scsi_mlreturn_string(int);
extern const char *scsi_hostbyte_string(int);
-extern const char *scsi_driverbyte_string(int);
#else
static inline bool
scsi_opcode_sa_name(int cmd, int sa,
@@ -76,12 +75,6 @@ scsi_hostbyte_string(int result)
return NULL;
}
-static inline const char *
-scsi_driverbyte_string(int result)
-{
- return NULL;
-}
-
#endif
#endif /* _SCSI_SCSI_DBG_H */
diff --git a/include/scsi/scsi_host.h b/include/scsi/scsi_host.h
index 19a1c5c48935..2b4ab0369ffb 100644
--- a/include/scsi/scsi_host.h
+++ b/include/scsi/scsi_host.h
@@ -677,7 +677,6 @@ struct Scsi_Host {
/*
* Optional work queue to be utilized by the transport
*/
- char work_q_name[20];
struct workqueue_struct *work_q;
/*
diff --git a/include/scsi/scsi_transport_fc.h b/include/scsi/scsi_transport_fc.h
index 4b884b8013e0..8e6c60090c62 100644
--- a/include/scsi/scsi_transport_fc.h
+++ b/include/scsi/scsi_transport_fc.h
@@ -575,9 +575,7 @@ struct fc_host_attrs {
u16 npiv_vports_inuse;
/* work queues for rport state manipulation */
- char work_q_name[20];
struct workqueue_struct *work_q;
- char devloss_work_q_name[20];
struct workqueue_struct *devloss_work_q;
/* bsg support */
@@ -654,12 +652,8 @@ struct fc_host_attrs {
(((struct fc_host_attrs *)(x)->shost_data)->next_vport_number)
#define fc_host_npiv_vports_inuse(x) \
(((struct fc_host_attrs *)(x)->shost_data)->npiv_vports_inuse)
-#define fc_host_work_q_name(x) \
- (((struct fc_host_attrs *)(x)->shost_data)->work_q_name)
#define fc_host_work_q(x) \
(((struct fc_host_attrs *)(x)->shost_data)->work_q)
-#define fc_host_devloss_work_q_name(x) \
- (((struct fc_host_attrs *)(x)->shost_data)->devloss_work_q_name)
#define fc_host_devloss_work_q(x) \
(((struct fc_host_attrs *)(x)->shost_data)->devloss_work_q)
#define fc_host_dev_loss_tmo(x) \
diff --git a/include/trace/events/dma.h b/include/trace/events/dma.h
new file mode 100644
index 000000000000..569f86a44aaa
--- /dev/null
+++ b/include/trace/events/dma.h
@@ -0,0 +1,342 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM dma
+
+#if !defined(_TRACE_DMA_H) || defined(TRACE_HEADER_MULTI_READ)
+#define _TRACE_DMA_H
+
+#include <linux/tracepoint.h>
+#include <linux/dma-direction.h>
+#include <linux/dma-mapping.h>
+#include <trace/events/mmflags.h>
+
+TRACE_DEFINE_ENUM(DMA_BIDIRECTIONAL);
+TRACE_DEFINE_ENUM(DMA_TO_DEVICE);
+TRACE_DEFINE_ENUM(DMA_FROM_DEVICE);
+TRACE_DEFINE_ENUM(DMA_NONE);
+
+#define decode_dma_data_direction(dir) \
+ __print_symbolic(dir, \
+ { DMA_BIDIRECTIONAL, "BIDIRECTIONAL" }, \
+ { DMA_TO_DEVICE, "TO_DEVICE" }, \
+ { DMA_FROM_DEVICE, "FROM_DEVICE" }, \
+ { DMA_NONE, "NONE" })
+
+#define decode_dma_attrs(attrs) \
+ __print_flags(attrs, "|", \
+ { DMA_ATTR_WEAK_ORDERING, "WEAK_ORDERING" }, \
+ { DMA_ATTR_WRITE_COMBINE, "WRITE_COMBINE" }, \
+ { DMA_ATTR_NO_KERNEL_MAPPING, "NO_KERNEL_MAPPING" }, \
+ { DMA_ATTR_SKIP_CPU_SYNC, "SKIP_CPU_SYNC" }, \
+ { DMA_ATTR_FORCE_CONTIGUOUS, "FORCE_CONTIGUOUS" }, \
+ { DMA_ATTR_ALLOC_SINGLE_PAGES, "ALLOC_SINGLE_PAGES" }, \
+ { DMA_ATTR_NO_WARN, "NO_WARN" }, \
+ { DMA_ATTR_PRIVILEGED, "PRIVILEGED" })
+
+DECLARE_EVENT_CLASS(dma_map,
+ TP_PROTO(struct device *dev, phys_addr_t phys_addr, dma_addr_t dma_addr,
+ size_t size, enum dma_data_direction dir, unsigned long attrs),
+ TP_ARGS(dev, phys_addr, dma_addr, size, dir, attrs),
+
+ TP_STRUCT__entry(
+ __string(device, dev_name(dev))
+ __field(u64, phys_addr)
+ __field(u64, dma_addr)
+ __field(size_t, size)
+ __field(enum dma_data_direction, dir)
+ __field(unsigned long, attrs)
+ ),
+
+ TP_fast_assign(
+ __assign_str(device);
+ __entry->phys_addr = phys_addr;
+ __entry->dma_addr = dma_addr;
+ __entry->size = size;
+ __entry->dir = dir;
+ __entry->attrs = attrs;
+ ),
+
+ TP_printk("%s dir=%s dma_addr=%llx size=%zu phys_addr=%llx attrs=%s",
+ __get_str(device),
+ decode_dma_data_direction(__entry->dir),
+ __entry->dma_addr,
+ __entry->size,
+ __entry->phys_addr,
+ decode_dma_attrs(__entry->attrs))
+);
+
+DEFINE_EVENT(dma_map, dma_map_page,
+ TP_PROTO(struct device *dev, phys_addr_t phys_addr, dma_addr_t dma_addr,
+ size_t size, enum dma_data_direction dir, unsigned long attrs),
+ TP_ARGS(dev, phys_addr, dma_addr, size, dir, attrs));
+
+DEFINE_EVENT(dma_map, dma_map_resource,
+ TP_PROTO(struct device *dev, phys_addr_t phys_addr, dma_addr_t dma_addr,
+ size_t size, enum dma_data_direction dir, unsigned long attrs),
+ TP_ARGS(dev, phys_addr, dma_addr, size, dir, attrs));
+
+DECLARE_EVENT_CLASS(dma_unmap,
+ TP_PROTO(struct device *dev, dma_addr_t addr, size_t size,
+ enum dma_data_direction dir, unsigned long attrs),
+ TP_ARGS(dev, addr, size, dir, attrs),
+
+ TP_STRUCT__entry(
+ __string(device, dev_name(dev))
+ __field(u64, addr)
+ __field(size_t, size)
+ __field(enum dma_data_direction, dir)
+ __field(unsigned long, attrs)
+ ),
+
+ TP_fast_assign(
+ __assign_str(device);
+ __entry->addr = addr;
+ __entry->size = size;
+ __entry->dir = dir;
+ __entry->attrs = attrs;
+ ),
+
+ TP_printk("%s dir=%s dma_addr=%llx size=%zu attrs=%s",
+ __get_str(device),
+ decode_dma_data_direction(__entry->dir),
+ __entry->addr,
+ __entry->size,
+ decode_dma_attrs(__entry->attrs))
+);
+
+DEFINE_EVENT(dma_unmap, dma_unmap_page,
+ TP_PROTO(struct device *dev, dma_addr_t addr, size_t size,
+ enum dma_data_direction dir, unsigned long attrs),
+ TP_ARGS(dev, addr, size, dir, attrs));
+
+DEFINE_EVENT(dma_unmap, dma_unmap_resource,
+ TP_PROTO(struct device *dev, dma_addr_t addr, size_t size,
+ enum dma_data_direction dir, unsigned long attrs),
+ TP_ARGS(dev, addr, size, dir, attrs));
+
+TRACE_EVENT(dma_alloc,
+ TP_PROTO(struct device *dev, void *virt_addr, dma_addr_t dma_addr,
+ size_t size, gfp_t flags, unsigned long attrs),
+ TP_ARGS(dev, virt_addr, dma_addr, size, flags, attrs),
+
+ TP_STRUCT__entry(
+ __string(device, dev_name(dev))
+ __field(u64, phys_addr)
+ __field(u64, dma_addr)
+ __field(size_t, size)
+ __field(gfp_t, flags)
+ __field(unsigned long, attrs)
+ ),
+
+ TP_fast_assign(
+ __assign_str(device);
+ __entry->phys_addr = virt_to_phys(virt_addr);
+ __entry->dma_addr = dma_addr;
+ __entry->size = size;
+ __entry->flags = flags;
+ __entry->attrs = attrs;
+ ),
+
+ TP_printk("%s dma_addr=%llx size=%zu phys_addr=%llx flags=%s attrs=%s",
+ __get_str(device),
+ __entry->dma_addr,
+ __entry->size,
+ __entry->phys_addr,
+ show_gfp_flags(__entry->flags),
+ decode_dma_attrs(__entry->attrs))
+);
+
+TRACE_EVENT(dma_free,
+ TP_PROTO(struct device *dev, void *virt_addr, dma_addr_t dma_addr,
+ size_t size, unsigned long attrs),
+ TP_ARGS(dev, virt_addr, dma_addr, size, attrs),
+
+ TP_STRUCT__entry(
+ __string(device, dev_name(dev))
+ __field(u64, phys_addr)
+ __field(u64, dma_addr)
+ __field(size_t, size)
+ __field(unsigned long, attrs)
+ ),
+
+ TP_fast_assign(
+ __assign_str(device);
+ __entry->phys_addr = virt_to_phys(virt_addr);
+ __entry->dma_addr = dma_addr;
+ __entry->size = size;
+ __entry->attrs = attrs;
+ ),
+
+ TP_printk("%s dma_addr=%llx size=%zu phys_addr=%llx attrs=%s",
+ __get_str(device),
+ __entry->dma_addr,
+ __entry->size,
+ __entry->phys_addr,
+ decode_dma_attrs(__entry->attrs))
+);
+
+TRACE_EVENT(dma_map_sg,
+ TP_PROTO(struct device *dev, struct scatterlist *sgl, int nents,
+ int ents, enum dma_data_direction dir, unsigned long attrs),
+ TP_ARGS(dev, sgl, nents, ents, dir, attrs),
+
+ TP_STRUCT__entry(
+ __string(device, dev_name(dev))
+ __dynamic_array(u64, phys_addrs, nents)
+ __dynamic_array(u64, dma_addrs, ents)
+ __dynamic_array(unsigned int, lengths, ents)
+ __field(enum dma_data_direction, dir)
+ __field(unsigned long, attrs)
+ ),
+
+ TP_fast_assign(
+ struct scatterlist *sg;
+ int i;
+
+ __assign_str(device);
+ for_each_sg(sgl, sg, nents, i)
+ ((u64 *)__get_dynamic_array(phys_addrs))[i] = sg_phys(sg);
+ for_each_sg(sgl, sg, ents, i) {
+ ((u64 *)__get_dynamic_array(dma_addrs))[i] =
+ sg_dma_address(sg);
+ ((unsigned int *)__get_dynamic_array(lengths))[i] =
+ sg_dma_len(sg);
+ }
+ __entry->dir = dir;
+ __entry->attrs = attrs;
+ ),
+
+ TP_printk("%s dir=%s dma_addrs=%s sizes=%s phys_addrs=%s attrs=%s",
+ __get_str(device),
+ decode_dma_data_direction(__entry->dir),
+ __print_array(__get_dynamic_array(dma_addrs),
+ __get_dynamic_array_len(dma_addrs) /
+ sizeof(u64), sizeof(u64)),
+ __print_array(__get_dynamic_array(lengths),
+ __get_dynamic_array_len(lengths) /
+ sizeof(unsigned int), sizeof(unsigned int)),
+ __print_array(__get_dynamic_array(phys_addrs),
+ __get_dynamic_array_len(phys_addrs) /
+ sizeof(u64), sizeof(u64)),
+ decode_dma_attrs(__entry->attrs))
+);
+
+TRACE_EVENT(dma_unmap_sg,
+ TP_PROTO(struct device *dev, struct scatterlist *sgl, int nents,
+ enum dma_data_direction dir, unsigned long attrs),
+ TP_ARGS(dev, sgl, nents, dir, attrs),
+
+ TP_STRUCT__entry(
+ __string(device, dev_name(dev))
+ __dynamic_array(u64, addrs, nents)
+ __field(enum dma_data_direction, dir)
+ __field(unsigned long, attrs)
+ ),
+
+ TP_fast_assign(
+ struct scatterlist *sg;
+ int i;
+
+ __assign_str(device);
+ for_each_sg(sgl, sg, nents, i)
+ ((u64 *)__get_dynamic_array(addrs))[i] = sg_phys(sg);
+ __entry->dir = dir;
+ __entry->attrs = attrs;
+ ),
+
+ TP_printk("%s dir=%s phys_addrs=%s attrs=%s",
+ __get_str(device),
+ decode_dma_data_direction(__entry->dir),
+ __print_array(__get_dynamic_array(addrs),
+ __get_dynamic_array_len(addrs) /
+ sizeof(u64), sizeof(u64)),
+ decode_dma_attrs(__entry->attrs))
+);
+
+DECLARE_EVENT_CLASS(dma_sync_single,
+ TP_PROTO(struct device *dev, dma_addr_t dma_addr, size_t size,
+ enum dma_data_direction dir),
+ TP_ARGS(dev, dma_addr, size, dir),
+
+ TP_STRUCT__entry(
+ __string(device, dev_name(dev))
+ __field(u64, dma_addr)
+ __field(size_t, size)
+ __field(enum dma_data_direction, dir)
+ ),
+
+ TP_fast_assign(
+ __assign_str(device);
+ __entry->dma_addr = dma_addr;
+ __entry->size = size;
+ __entry->dir = dir;
+ ),
+
+ TP_printk("%s dir=%s dma_addr=%llx size=%zu",
+ __get_str(device),
+ decode_dma_data_direction(__entry->dir),
+ __entry->dma_addr,
+ __entry->size)
+);
+
+DEFINE_EVENT(dma_sync_single, dma_sync_single_for_cpu,
+ TP_PROTO(struct device *dev, dma_addr_t dma_addr, size_t size,
+ enum dma_data_direction dir),
+ TP_ARGS(dev, dma_addr, size, dir));
+
+DEFINE_EVENT(dma_sync_single, dma_sync_single_for_device,
+ TP_PROTO(struct device *dev, dma_addr_t dma_addr, size_t size,
+ enum dma_data_direction dir),
+ TP_ARGS(dev, dma_addr, size, dir));
+
+DECLARE_EVENT_CLASS(dma_sync_sg,
+ TP_PROTO(struct device *dev, struct scatterlist *sgl, int nents,
+ enum dma_data_direction dir),
+ TP_ARGS(dev, sgl, nents, dir),
+
+ TP_STRUCT__entry(
+ __string(device, dev_name(dev))
+ __dynamic_array(u64, dma_addrs, nents)
+ __dynamic_array(unsigned int, lengths, nents)
+ __field(enum dma_data_direction, dir)
+ ),
+
+ TP_fast_assign(
+ struct scatterlist *sg;
+ int i;
+
+ __assign_str(device);
+ for_each_sg(sgl, sg, nents, i) {
+ ((u64 *)__get_dynamic_array(dma_addrs))[i] =
+ sg_dma_address(sg);
+ ((unsigned int *)__get_dynamic_array(lengths))[i] =
+ sg_dma_len(sg);
+ }
+ __entry->dir = dir;
+ ),
+
+ TP_printk("%s dir=%s dma_addrs=%s sizes=%s",
+ __get_str(device),
+ decode_dma_data_direction(__entry->dir),
+ __print_array(__get_dynamic_array(dma_addrs),
+ __get_dynamic_array_len(dma_addrs) /
+ sizeof(u64), sizeof(u64)),
+ __print_array(__get_dynamic_array(lengths),
+ __get_dynamic_array_len(lengths) /
+ sizeof(unsigned int), sizeof(unsigned int)))
+);
+
+DEFINE_EVENT(dma_sync_sg, dma_sync_sg_for_cpu,
+ TP_PROTO(struct device *dev, struct scatterlist *sg, int nents,
+ enum dma_data_direction dir),
+ TP_ARGS(dev, sg, nents, dir));
+
+DEFINE_EVENT(dma_sync_sg, dma_sync_sg_for_device,
+ TP_PROTO(struct device *dev, struct scatterlist *sg, int nents,
+ enum dma_data_direction dir),
+ TP_ARGS(dev, sg, nents, dir));
+
+#endif /* _TRACE_DMA_H */
+
+/* This part must be outside protection */
+#include <trace/define_trace.h>
diff --git a/include/trace/events/f2fs.h b/include/trace/events/f2fs.h
index ed794b5fefbe..2851c823095b 100644
--- a/include/trace/events/f2fs.h
+++ b/include/trace/events/f2fs.h
@@ -139,7 +139,8 @@ TRACE_DEFINE_ENUM(EX_BLOCK_AGE);
{ CP_NODE_NEED_CP, "node needs cp" }, \
{ CP_FASTBOOT_MODE, "fastboot mode" }, \
{ CP_SPEC_LOG_NUM, "log type is 2" }, \
- { CP_RECOVER_DIR, "dir needs recovery" })
+ { CP_RECOVER_DIR, "dir needs recovery" }, \
+ { CP_XATTR_DIR, "dir's xattr updated" })
#define show_shutdown_mode(type) \
__print_symbolic(type, \
diff --git a/include/trace/events/filemap.h b/include/trace/events/filemap.h
index 46c89c1e460c..f48fe637bfd2 100644
--- a/include/trace/events/filemap.h
+++ b/include/trace/events/filemap.h
@@ -56,6 +56,90 @@ DEFINE_EVENT(mm_filemap_op_page_cache, mm_filemap_add_to_page_cache,
TP_ARGS(folio)
);
+DECLARE_EVENT_CLASS(mm_filemap_op_page_cache_range,
+
+ TP_PROTO(
+ struct address_space *mapping,
+ pgoff_t index,
+ pgoff_t last_index
+ ),
+
+ TP_ARGS(mapping, index, last_index),
+
+ TP_STRUCT__entry(
+ __field(unsigned long, i_ino)
+ __field(dev_t, s_dev)
+ __field(unsigned long, index)
+ __field(unsigned long, last_index)
+ ),
+
+ TP_fast_assign(
+ __entry->i_ino = mapping->host->i_ino;
+ if (mapping->host->i_sb)
+ __entry->s_dev =
+ mapping->host->i_sb->s_dev;
+ else
+ __entry->s_dev = mapping->host->i_rdev;
+ __entry->index = index;
+ __entry->last_index = last_index;
+ ),
+
+ TP_printk(
+ "dev=%d:%d ino=%lx ofs=%lld-%lld",
+ MAJOR(__entry->s_dev),
+ MINOR(__entry->s_dev), __entry->i_ino,
+ ((loff_t)__entry->index) << PAGE_SHIFT,
+ ((((loff_t)__entry->last_index + 1) << PAGE_SHIFT) - 1)
+ )
+);
+
+DEFINE_EVENT(mm_filemap_op_page_cache_range, mm_filemap_get_pages,
+ TP_PROTO(
+ struct address_space *mapping,
+ pgoff_t index,
+ pgoff_t last_index
+ ),
+ TP_ARGS(mapping, index, last_index)
+);
+
+DEFINE_EVENT(mm_filemap_op_page_cache_range, mm_filemap_map_pages,
+ TP_PROTO(
+ struct address_space *mapping,
+ pgoff_t index,
+ pgoff_t last_index
+ ),
+ TP_ARGS(mapping, index, last_index)
+);
+
+TRACE_EVENT(mm_filemap_fault,
+ TP_PROTO(struct address_space *mapping, pgoff_t index),
+
+ TP_ARGS(mapping, index),
+
+ TP_STRUCT__entry(
+ __field(unsigned long, i_ino)
+ __field(dev_t, s_dev)
+ __field(unsigned long, index)
+ ),
+
+ TP_fast_assign(
+ __entry->i_ino = mapping->host->i_ino;
+ if (mapping->host->i_sb)
+ __entry->s_dev =
+ mapping->host->i_sb->s_dev;
+ else
+ __entry->s_dev = mapping->host->i_rdev;
+ __entry->index = index;
+ ),
+
+ TP_printk(
+ "dev=%d:%d ino=%lx ofs=%lld",
+ MAJOR(__entry->s_dev),
+ MINOR(__entry->s_dev), __entry->i_ino,
+ ((loff_t)__entry->index) << PAGE_SHIFT
+ )
+);
+
TRACE_EVENT(filemap_set_wb_err,
TP_PROTO(struct address_space *mapping, errseq_t eseq),
diff --git a/include/trace/events/firewire.h b/include/trace/events/firewire.h
index b108176deb22..ad0e0cf82b9c 100644
--- a/include/trace/events/firewire.h
+++ b/include/trace/events/firewire.h
@@ -830,13 +830,13 @@ TRACE_EVENT_CONDITION(isoc_inbound_multiple_queue,
#ifndef show_cause
enum fw_iso_context_completions_cause {
FW_ISO_CONTEXT_COMPLETIONS_CAUSE_FLUSH = 0,
- FW_ISO_CONTEXT_COMPLETIONS_CAUSE_IRQ,
+ FW_ISO_CONTEXT_COMPLETIONS_CAUSE_INTERRUPT,
FW_ISO_CONTEXT_COMPLETIONS_CAUSE_HEADER_OVERFLOW,
};
#define show_cause(cause) \
__print_symbolic(cause, \
{ FW_ISO_CONTEXT_COMPLETIONS_CAUSE_FLUSH, "FLUSH" }, \
- { FW_ISO_CONTEXT_COMPLETIONS_CAUSE_IRQ, "IRQ" }, \
+ { FW_ISO_CONTEXT_COMPLETIONS_CAUSE_INTERRUPT, "INTERRUPT" }, \
{ FW_ISO_CONTEXT_COMPLETIONS_CAUSE_HEADER_OVERFLOW, "HEADER_OVERFLOW" } \
)
#endif
diff --git a/include/trace/events/mmflags.h b/include/trace/events/mmflags.h
index 37265977d524..bb8a59c6caa2 100644
--- a/include/trace/events/mmflags.h
+++ b/include/trace/events/mmflags.h
@@ -71,12 +71,6 @@
#define IF_HAVE_PG_MLOCK(_name)
#endif
-#ifdef CONFIG_ARCH_USES_PG_UNCACHED
-#define IF_HAVE_PG_UNCACHED(_name) ,{1UL << PG_##_name, __stringify(_name)}
-#else
-#define IF_HAVE_PG_UNCACHED(_name)
-#endif
-
#ifdef CONFIG_MEMORY_FAILURE
#define IF_HAVE_PG_HWPOISON(_name) ,{1UL << PG_##_name, __stringify(_name)}
#else
@@ -89,10 +83,16 @@
#define IF_HAVE_PG_IDLE(_name)
#endif
-#ifdef CONFIG_ARCH_USES_PG_ARCH_X
-#define IF_HAVE_PG_ARCH_X(_name) ,{1UL << PG_##_name, __stringify(_name)}
+#ifdef CONFIG_ARCH_USES_PG_ARCH_2
+#define IF_HAVE_PG_ARCH_2(_name) ,{1UL << PG_##_name, __stringify(_name)}
+#else
+#define IF_HAVE_PG_ARCH_2(_name)
+#endif
+
+#ifdef CONFIG_ARCH_USES_PG_ARCH_3
+#define IF_HAVE_PG_ARCH_3(_name) ,{1UL << PG_##_name, __stringify(_name)}
#else
-#define IF_HAVE_PG_ARCH_X(_name)
+#define IF_HAVE_PG_ARCH_3(_name)
#endif
#define DEF_PAGEFLAG_NAME(_name) { 1UL << PG_##_name, __stringify(_name) }
@@ -100,7 +100,6 @@
#define __def_pageflag_names \
DEF_PAGEFLAG_NAME(locked), \
DEF_PAGEFLAG_NAME(waiters), \
- DEF_PAGEFLAG_NAME(error), \
DEF_PAGEFLAG_NAME(referenced), \
DEF_PAGEFLAG_NAME(uptodate), \
DEF_PAGEFLAG_NAME(dirty), \
@@ -108,39 +107,28 @@
DEF_PAGEFLAG_NAME(active), \
DEF_PAGEFLAG_NAME(workingset), \
DEF_PAGEFLAG_NAME(owner_priv_1), \
+ DEF_PAGEFLAG_NAME(owner_2), \
DEF_PAGEFLAG_NAME(arch_1), \
DEF_PAGEFLAG_NAME(reserved), \
DEF_PAGEFLAG_NAME(private), \
DEF_PAGEFLAG_NAME(private_2), \
DEF_PAGEFLAG_NAME(writeback), \
DEF_PAGEFLAG_NAME(head), \
- DEF_PAGEFLAG_NAME(mappedtodisk), \
DEF_PAGEFLAG_NAME(reclaim), \
DEF_PAGEFLAG_NAME(swapbacked), \
DEF_PAGEFLAG_NAME(unevictable) \
IF_HAVE_PG_MLOCK(mlocked) \
-IF_HAVE_PG_UNCACHED(uncached) \
IF_HAVE_PG_HWPOISON(hwpoison) \
IF_HAVE_PG_IDLE(idle) \
IF_HAVE_PG_IDLE(young) \
-IF_HAVE_PG_ARCH_X(arch_2) \
-IF_HAVE_PG_ARCH_X(arch_3)
+IF_HAVE_PG_ARCH_2(arch_2) \
+IF_HAVE_PG_ARCH_3(arch_3)
#define show_page_flags(flags) \
(flags) ? __print_flags(flags, "|", \
__def_pageflag_names \
) : "none"
-#define DEF_PAGETYPE_NAME(_name) { PG_##_name, __stringify(_name) }
-
-#define __def_pagetype_names \
- DEF_PAGETYPE_NAME(slab), \
- DEF_PAGETYPE_NAME(hugetlb), \
- DEF_PAGETYPE_NAME(offline), \
- DEF_PAGETYPE_NAME(guard), \
- DEF_PAGETYPE_NAME(table), \
- DEF_PAGETYPE_NAME(buddy)
-
#if defined(CONFIG_X86)
#define __VM_ARCH_SPECIFIC_1 {VM_PAT, "pat" }
#elif defined(CONFIG_PPC64)
diff --git a/include/trace/events/netfs.h b/include/trace/events/netfs.h
index 76bd42a96815..1d7c52821e55 100644
--- a/include/trace/events/netfs.h
+++ b/include/trace/events/netfs.h
@@ -448,7 +448,8 @@ TRACE_EVENT(netfs_folio,
),
TP_fast_assign(
- __entry->ino = folio->mapping->host->i_ino;
+ struct address_space *__m = READ_ONCE(folio->mapping);
+ __entry->ino = __m ? __m->host->i_ino : 0;
__entry->why = why;
__entry->index = folio_index(folio);
__entry->nr = folio_nr_pages(folio);
diff --git a/include/trace/events/oom.h b/include/trace/events/oom.h
index a42be4c8563b..9f0a5d1482c4 100644
--- a/include/trace/events/oom.h
+++ b/include/trace/events/oom.h
@@ -55,8 +55,8 @@ TRACE_EVENT(reclaim_retry_zone,
),
TP_fast_assign(
- __entry->node = zone_to_nid(zoneref->zone);
- __entry->zone_idx = zoneref->zone_idx;
+ __entry->node = zonelist_node_idx(zoneref);
+ __entry->zone_idx = zonelist_zone_idx(zoneref);
__entry->order = order;
__entry->reclaimable = reclaimable;
__entry->available = available;
diff --git a/include/trace/events/rpcrdma.h b/include/trace/events/rpcrdma.h
index a96a985c49b3..e6a72646c507 100644
--- a/include/trace/events/rpcrdma.h
+++ b/include/trace/events/rpcrdma.h
@@ -2172,6 +2172,29 @@ TRACE_EVENT(svcrdma_qp_error,
)
);
+TRACE_EVENT(svcrdma_device_removal,
+ TP_PROTO(
+ const struct rdma_cm_id *id
+ ),
+
+ TP_ARGS(id),
+
+ TP_STRUCT__entry(
+ __string(name, id->device->name)
+ __array(unsigned char, addr, sizeof(struct sockaddr_in6))
+ ),
+
+ TP_fast_assign(
+ __assign_str(name);
+ memcpy(__entry->addr, &id->route.addr.dst_addr,
+ sizeof(struct sockaddr_in6));
+ ),
+
+ TP_printk("device %s to be removed, disconnecting %pISpc\n",
+ __get_str(name), __entry->addr
+ )
+);
+
DECLARE_EVENT_CLASS(svcrdma_sendqueue_class,
TP_PROTO(
const struct svcxprt_rdma *rdma,
diff --git a/include/trace/events/sched_ext.h b/include/trace/events/sched_ext.h
new file mode 100644
index 000000000000..fe19da7315a9
--- /dev/null
+++ b/include/trace/events/sched_ext.h
@@ -0,0 +1,32 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM sched_ext
+
+#if !defined(_TRACE_SCHED_EXT_H) || defined(TRACE_HEADER_MULTI_READ)
+#define _TRACE_SCHED_EXT_H
+
+#include <linux/tracepoint.h>
+
+TRACE_EVENT(sched_ext_dump,
+
+ TP_PROTO(const char *line),
+
+ TP_ARGS(line),
+
+ TP_STRUCT__entry(
+ __string(line, line)
+ ),
+
+ TP_fast_assign(
+ __assign_str(line);
+ ),
+
+ TP_printk("%s",
+ __get_str(line)
+ )
+);
+
+#endif /* _TRACE_SCHED_EXT_H */
+
+/* This part must be outside protection */
+#include <trace/define_trace.h>
diff --git a/include/trace/misc/nfs.h b/include/trace/misc/nfs.h
index 7b221d51133a..c82233e950ac 100644
--- a/include/trace/misc/nfs.h
+++ b/include/trace/misc/nfs.h
@@ -51,6 +51,7 @@ TRACE_DEFINE_ENUM(NFSERR_JUKEBOX);
{ NFSERR_IO, "IO" }, \
{ NFSERR_NXIO, "NXIO" }, \
{ ECHILD, "CHILD" }, \
+ { ETIMEDOUT, "TIMEDOUT" }, \
{ NFSERR_EAGAIN, "AGAIN" }, \
{ NFSERR_ACCES, "ACCES" }, \
{ NFSERR_EXIST, "EXIST" }, \
diff --git a/include/uapi/drm/drm_fourcc.h b/include/uapi/drm/drm_fourcc.h
index 2d84a8052b15..78abd819fd62 100644
--- a/include/uapi/drm/drm_fourcc.h
+++ b/include/uapi/drm/drm_fourcc.h
@@ -703,6 +703,31 @@ extern "C" {
#define I915_FORMAT_MOD_4_TILED_MTL_RC_CCS_CC fourcc_mod_code(INTEL, 15)
/*
+ * Intel Color Control Surfaces (CCS) for graphics ver. 20 unified compression
+ * on integrated graphics
+ *
+ * The main surface is Tile 4 and at plane index 0. For semi-planar formats
+ * like NV12, the Y and UV planes are Tile 4 and are located at plane indices
+ * 0 and 1, respectively. The CCS for all planes are stored outside of the
+ * GEM object in a reserved memory area dedicated for the storage of the
+ * CCS data for all compressible GEM objects.
+ */
+#define I915_FORMAT_MOD_4_TILED_LNL_CCS fourcc_mod_code(INTEL, 16)
+
+/*
+ * Intel Color Control Surfaces (CCS) for graphics ver. 20 unified compression
+ * on discrete graphics
+ *
+ * The main surface is Tile 4 and at plane index 0. For semi-planar formats
+ * like NV12, the Y and UV planes are Tile 4 and are located at plane indices
+ * 0 and 1, respectively. The CCS for all planes are stored outside of the
+ * GEM object in a reserved memory area dedicated for the storage of the
+ * CCS data for all compressible GEM objects. The GEM object must be stored in
+ * contiguous memory with a size aligned to 64KB
+ */
+#define I915_FORMAT_MOD_4_TILED_BMG_CCS fourcc_mod_code(INTEL, 17)
+
+/*
* Tiled, NV12MT, grouped in 64 (pixels) x 32 (lines) -sized macroblocks
*
* Macroblocks are laid in a Z-shape, and each pixel data is following the
diff --git a/include/uapi/drm/drm_mode.h b/include/uapi/drm/drm_mode.h
index d390011b89b4..c082810c08a8 100644
--- a/include/uapi/drm/drm_mode.h
+++ b/include/uapi/drm/drm_mode.h
@@ -859,6 +859,8 @@ struct drm_color_lut {
/**
* struct drm_plane_size_hint - Plane size hints
+ * @width: The width of the plane in pixel
+ * @height: The height of the plane in pixel
*
* The plane SIZE_HINTS property blob contains an
* array of struct drm_plane_size_hint.
diff --git a/include/uapi/drm/msm_drm.h b/include/uapi/drm/msm_drm.h
index 3fca72f73861..2377147b6af0 100644
--- a/include/uapi/drm/msm_drm.h
+++ b/include/uapi/drm/msm_drm.h
@@ -88,6 +88,8 @@ struct drm_msm_timespec {
#define MSM_PARAM_VA_SIZE 0x0f /* RO: size of valid GPU iova range (bytes) */
#define MSM_PARAM_HIGHEST_BANK_BIT 0x10 /* RO */
#define MSM_PARAM_RAYTRACING 0x11 /* RO */
+#define MSM_PARAM_UBWC_SWIZZLE 0x12 /* RO */
+#define MSM_PARAM_MACROTILE_MODE 0x13 /* RO */
/* For backwards compat. The original support for preemption was based on
* a single ring per priority level so # of priority levels equals the #
diff --git a/include/uapi/drm/xe_drm.h b/include/uapi/drm/xe_drm.h
index db232a25189e..b6fbe4988f2e 100644
--- a/include/uapi/drm/xe_drm.h
+++ b/include/uapi/drm/xe_drm.h
@@ -517,7 +517,14 @@ struct drm_xe_query_gt_list {
* available per Dual Sub Slices (DSS). For example a query response
* containing the following in mask:
* ``EU_PER_DSS ff ff 00 00 00 00 00 00``
- * means each DSS has 16 EU.
+ * means each DSS has 16 SIMD8 EUs. This type may be omitted if device
+ * doesn't have SIMD8 EUs.
+ * - %DRM_XE_TOPO_SIMD16_EU_PER_DSS - To query the mask of SIMD16 Execution
+ * Units (EU) available per Dual Sub Slices (DSS). For example a query
+ * response containing the following in mask:
+ * ``SIMD16_EU_PER_DSS ff ff 00 00 00 00 00 00``
+ * means each DSS has 16 SIMD16 EUs. This type may be omitted if device
+ * doesn't have SIMD16 EUs.
*/
struct drm_xe_query_topology_mask {
/** @gt_id: GT ID the mask is associated with */
@@ -527,6 +534,7 @@ struct drm_xe_query_topology_mask {
#define DRM_XE_TOPO_DSS_COMPUTE 2
#define DRM_XE_TOPO_L3_BANK 3
#define DRM_XE_TOPO_EU_PER_DSS 4
+#define DRM_XE_TOPO_SIMD16_EU_PER_DSS 5
/** @type: type of mask */
__u16 type;
diff --git a/include/uapi/linux/android/binder.h b/include/uapi/linux/android/binder.h
index d44a8118b2ed..1fd92021a573 100644
--- a/include/uapi/linux/android/binder.h
+++ b/include/uapi/linux/android/binder.h
@@ -236,6 +236,12 @@ struct binder_frozen_status_info {
__u32 async_recv;
};
+struct binder_frozen_state_info {
+ binder_uintptr_t cookie;
+ __u32 is_frozen;
+ __u32 reserved;
+};
+
/* struct binder_extened_error - extended error information
* @id: identifier for the failed operation
* @command: command as defined by binder_driver_return_protocol
@@ -467,6 +473,17 @@ enum binder_driver_return_protocol {
/*
* The target of the last async transaction is frozen. No parameters.
*/
+
+ BR_FROZEN_BINDER = _IOR('r', 21, struct binder_frozen_state_info),
+ /*
+ * The cookie and a boolean (is_frozen) that indicates whether the process
+ * transitioned into a frozen or an unfrozen state.
+ */
+
+ BR_CLEAR_FREEZE_NOTIFICATION_DONE = _IOR('r', 22, binder_uintptr_t),
+ /*
+ * void *: cookie
+ */
};
enum binder_driver_command_protocol {
@@ -550,6 +567,25 @@ enum binder_driver_command_protocol {
/*
* binder_transaction_data_sg: the sent command.
*/
+
+ BC_REQUEST_FREEZE_NOTIFICATION =
+ _IOW('c', 19, struct binder_handle_cookie),
+ /*
+ * int: handle
+ * void *: cookie
+ */
+
+ BC_CLEAR_FREEZE_NOTIFICATION = _IOW('c', 20,
+ struct binder_handle_cookie),
+ /*
+ * int: handle
+ * void *: cookie
+ */
+
+ BC_FREEZE_NOTIFICATION_DONE = _IOW('c', 21, binder_uintptr_t),
+ /*
+ * void *: cookie
+ */
};
#endif /* _UAPI_LINUX_BINDER_H */
diff --git a/include/uapi/linux/bits.h b/include/uapi/linux/bits.h
index 3c2a101986a3..5ee30f882736 100644
--- a/include/uapi/linux/bits.h
+++ b/include/uapi/linux/bits.h
@@ -12,4 +12,7 @@
(((~_ULL(0)) - (_ULL(1) << (l)) + 1) & \
(~_ULL(0) >> (__BITS_PER_LONG_LONG - 1 - (h))))
+#define __GENMASK_U128(h, l) \
+ ((_BIT128((h)) << 1) - (_BIT128(l)))
+
#endif /* _UAPI_LINUX_BITS_H */
diff --git a/include/uapi/linux/bpf.h b/include/uapi/linux/bpf.h
index e05b39e39c3f..c6cd7c7aeeee 100644
--- a/include/uapi/linux/bpf.h
+++ b/include/uapi/linux/bpf.h
@@ -5519,11 +5519,12 @@ union bpf_attr {
* **-EOPNOTSUPP** if the hash calculation failed or **-EINVAL** if
* invalid arguments are passed.
*
- * void *bpf_kptr_xchg(void *map_value, void *ptr)
+ * void *bpf_kptr_xchg(void *dst, void *ptr)
* Description
- * Exchange kptr at pointer *map_value* with *ptr*, and return the
- * old value. *ptr* can be NULL, otherwise it must be a referenced
- * pointer which will be released when this helper is called.
+ * Exchange kptr at pointer *dst* with *ptr*, and return the old value.
+ * *dst* can be map value or local kptr. *ptr* can be NULL, otherwise
+ * it must be a referenced pointer which will be released when this helper
+ * is called.
* Return
* The old value of kptr (which can be NULL). The returned pointer
* if not NULL, is a reference which must be released using its
@@ -7513,4 +7514,13 @@ struct bpf_iter_num {
__u64 __opaque[1];
} __attribute__((aligned(8)));
+/*
+ * Flags to control BPF kfunc behaviour.
+ * - BPF_F_PAD_ZEROS: Pad destination buffer with zeros. (See the respective
+ * helper documentation for details.)
+ */
+enum bpf_kfunc_flags {
+ BPF_F_PAD_ZEROS = (1ULL << 0),
+};
+
#endif /* _UAPI__LINUX_BPF_H__ */
diff --git a/include/uapi/linux/cec.h b/include/uapi/linux/cec.h
index b8e071abaea5..b2af1dddd4d7 100644
--- a/include/uapi/linux/cec.h
+++ b/include/uapi/linux/cec.h
@@ -132,6 +132,8 @@ static inline void cec_msg_init(struct cec_msg *msg,
* Set the msg destination to the orig initiator and the msg initiator to the
* orig destination. Note that msg and orig may be the same pointer, in which
* case the change is done in place.
+ *
+ * It also zeroes the reply, timeout and flags fields.
*/
static inline void cec_msg_set_reply_to(struct cec_msg *msg,
struct cec_msg *orig)
@@ -139,7 +141,9 @@ static inline void cec_msg_set_reply_to(struct cec_msg *msg,
/* The destination becomes the initiator and vice versa */
msg->msg[0] = (cec_msg_destination(orig) << 4) |
cec_msg_initiator(orig);
- msg->reply = msg->timeout = 0;
+ msg->reply = 0;
+ msg->timeout = 0;
+ msg->flags = 0;
}
/**
@@ -165,6 +169,7 @@ static inline int cec_msg_recv_is_rx_result(const struct cec_msg *msg)
/* cec_msg flags field */
#define CEC_MSG_FL_REPLY_TO_FOLLOWERS (1 << 0)
#define CEC_MSG_FL_RAW (1 << 1)
+#define CEC_MSG_FL_REPLY_VENDOR_ID (1 << 2)
/* cec_msg tx/rx_status field */
#define CEC_TX_STATUS_OK (1 << 0)
@@ -339,6 +344,8 @@ static inline int cec_is_unconfigured(__u16 log_addr_mask)
#define CEC_CAP_MONITOR_PIN (1 << 7)
/* CEC_ADAP_G_CONNECTOR_INFO is available */
#define CEC_CAP_CONNECTOR_INFO (1 << 8)
+/* CEC_MSG_FL_REPLY_VENDOR_ID is available */
+#define CEC_CAP_REPLY_VENDOR_ID (1 << 9)
/**
* struct cec_caps - CEC capabilities structure.
diff --git a/include/uapi/linux/const.h b/include/uapi/linux/const.h
index a429381e7ca5..e16be0d37746 100644
--- a/include/uapi/linux/const.h
+++ b/include/uapi/linux/const.h
@@ -28,6 +28,23 @@
#define _BITUL(x) (_UL(1) << (x))
#define _BITULL(x) (_ULL(1) << (x))
+#if !defined(__ASSEMBLY__)
+/*
+ * Missing asm support
+ *
+ * __BIT128() would not work in the asm code, as it shifts an
+ * 'unsigned __init128' data type as direct representation of
+ * 128 bit constants is not supported in the gcc compiler, as
+ * they get silently truncated.
+ *
+ * TODO: Please revisit this implementation when gcc compiler
+ * starts representing 128 bit constants directly like long
+ * and unsigned long etc. Subsequently drop the comment for
+ * GENMASK_U128() which would then start supporting asm code.
+ */
+#define _BIT128(x) ((unsigned __int128)(1) << (x))
+#endif
+
#define __ALIGN_KERNEL(x, a) __ALIGN_KERNEL_MASK(x, (__typeof__(x))(a) - 1)
#define __ALIGN_KERNEL_MASK(x, mask) (((x) + (mask)) & ~(mask))
diff --git a/include/uapi/linux/exfat.h b/include/uapi/linux/exfat.h
new file mode 100644
index 000000000000..46d95b16fc4b
--- /dev/null
+++ b/include/uapi/linux/exfat.h
@@ -0,0 +1,25 @@
+/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
+/*
+ * Copyright (C) 2024 Unisoc Technologies Co., Ltd.
+ */
+
+#ifndef _UAPI_LINUX_EXFAT_H
+#define _UAPI_LINUX_EXFAT_H
+#include <linux/types.h>
+#include <linux/ioctl.h>
+
+/*
+ * exfat-specific ioctl commands
+ */
+
+#define EXFAT_IOC_SHUTDOWN _IOR('X', 125, __u32)
+
+/*
+ * Flags used by EXFAT_IOC_SHUTDOWN
+ */
+
+#define EXFAT_GOING_DOWN_DEFAULT 0x0 /* default with full sync */
+#define EXFAT_GOING_DOWN_FULLSYNC 0x1 /* going down with full sync*/
+#define EXFAT_GOING_DOWN_NOSYNC 0x2 /* going down */
+
+#endif /* _UAPI_LINUX_EXFAT_H */
diff --git a/include/uapi/linux/fuse.h b/include/uapi/linux/fuse.h
index d08b99d60f6f..f1e99458e29e 100644
--- a/include/uapi/linux/fuse.h
+++ b/include/uapi/linux/fuse.h
@@ -217,6 +217,9 @@
* - add backing_id to fuse_open_out, add FOPEN_PASSTHROUGH open flag
* - add FUSE_NO_EXPORT_SUPPORT init flag
* - add FUSE_NOTIFY_RESEND, add FUSE_HAS_RESEND init flag
+ *
+ * 7.41
+ * - add FUSE_ALLOW_IDMAP
*/
#ifndef _LINUX_FUSE_H
@@ -252,7 +255,7 @@
#define FUSE_KERNEL_VERSION 7
/** Minor version number of this interface */
-#define FUSE_KERNEL_MINOR_VERSION 40
+#define FUSE_KERNEL_MINOR_VERSION 41
/** The node ID of the root inode */
#define FUSE_ROOT_ID 1
@@ -421,6 +424,7 @@ struct fuse_file_lock {
* FUSE_NO_EXPORT_SUPPORT: explicitly disable export support
* FUSE_HAS_RESEND: kernel supports resending pending requests, and the high bit
* of the request ID indicates resend requests
+ * FUSE_ALLOW_IDMAP: allow creation of idmapped mounts
*/
#define FUSE_ASYNC_READ (1 << 0)
#define FUSE_POSIX_LOCKS (1 << 1)
@@ -466,6 +470,7 @@ struct fuse_file_lock {
/* Obsolete alias for FUSE_DIRECT_IO_ALLOW_MMAP */
#define FUSE_DIRECT_IO_RELAX FUSE_DIRECT_IO_ALLOW_MMAP
+#define FUSE_ALLOW_IDMAP (1ULL << 40)
/**
* CUSE INIT request/reply flags
@@ -984,6 +989,21 @@ struct fuse_fallocate_in {
*/
#define FUSE_UNIQUE_RESEND (1ULL << 63)
+/**
+ * This value will be set by the kernel to
+ * (struct fuse_in_header).{uid,gid} fields in
+ * case when:
+ * - fuse daemon enabled FUSE_ALLOW_IDMAP
+ * - idmapping information is not available and uid/gid
+ * can not be mapped in accordance with an idmapping.
+ *
+ * Note: an idmapping information always available
+ * for inode creation operations like:
+ * FUSE_MKNOD, FUSE_SYMLINK, FUSE_MKDIR, FUSE_TMPFILE,
+ * FUSE_CREATE and FUSE_RENAME2 (with RENAME_WHITEOUT).
+ */
+#define FUSE_INVALID_UIDGID ((uint32_t)(-1))
+
struct fuse_in_header {
uint32_t len;
uint32_t opcode;
diff --git a/include/uapi/linux/io_uring.h b/include/uapi/linux/io_uring.h
index 9dc5bb428c8a..1fe79e750470 100644
--- a/include/uapi/linux/io_uring.h
+++ b/include/uapi/linux/io_uring.h
@@ -609,8 +609,8 @@ enum io_uring_register_op {
IORING_REGISTER_CLOCK = 29,
- /* copy registered buffers from source ring to current ring */
- IORING_REGISTER_COPY_BUFFERS = 30,
+ /* clone registered buffers from source ring to current ring */
+ IORING_REGISTER_CLONE_BUFFERS = 30,
/* this goes last */
IORING_REGISTER_LAST,
@@ -701,7 +701,7 @@ enum {
IORING_REGISTER_SRC_REGISTERED = 1,
};
-struct io_uring_copy_buffers {
+struct io_uring_clone_buffers {
__u32 src_fd;
__u32 flags;
__u32 pad[6];
diff --git a/include/uapi/linux/iommufd.h b/include/uapi/linux/iommufd.h
index 4dde745cfb7e..72010f71c5e4 100644
--- a/include/uapi/linux/iommufd.h
+++ b/include/uapi/linux/iommufd.h
@@ -4,8 +4,8 @@
#ifndef _UAPI_IOMMUFD_H
#define _UAPI_IOMMUFD_H
-#include <linux/types.h>
#include <linux/ioctl.h>
+#include <linux/types.h>
#define IOMMUFD_TYPE (';')
diff --git a/include/uapi/linux/kernel-page-flags.h b/include/uapi/linux/kernel-page-flags.h
index 6f2f2720f3ac..ff8032227876 100644
--- a/include/uapi/linux/kernel-page-flags.h
+++ b/include/uapi/linux/kernel-page-flags.h
@@ -7,7 +7,7 @@
*/
#define KPF_LOCKED 0
-#define KPF_ERROR 1
+#define KPF_ERROR 1 /* Now unused */
#define KPF_REFERENCED 2
#define KPF_UPTODATE 3
#define KPF_DIRTY 4
diff --git a/include/uapi/linux/kfd_ioctl.h b/include/uapi/linux/kfd_ioctl.h
index 285a36601dc9..717307d6b5b7 100644
--- a/include/uapi/linux/kfd_ioctl.h
+++ b/include/uapi/linux/kfd_ioctl.h
@@ -42,9 +42,10 @@
* - 1.14 - Update kfd_event_data
* - 1.15 - Enable managing mappings in compute VMs with GEM_VA ioctl
* - 1.16 - Add contiguous VRAM allocation flag
+ * - 1.17 - Add SDMA queue creation with target SDMA engine ID
*/
#define KFD_IOCTL_MAJOR_VERSION 1
-#define KFD_IOCTL_MINOR_VERSION 16
+#define KFD_IOCTL_MINOR_VERSION 17
struct kfd_ioctl_get_version_args {
__u32 major_version; /* from KFD */
@@ -56,6 +57,7 @@ struct kfd_ioctl_get_version_args {
#define KFD_IOC_QUEUE_TYPE_SDMA 0x1
#define KFD_IOC_QUEUE_TYPE_COMPUTE_AQL 0x2
#define KFD_IOC_QUEUE_TYPE_SDMA_XGMI 0x3
+#define KFD_IOC_QUEUE_TYPE_SDMA_BY_ENG_ID 0x4
#define KFD_MAX_QUEUE_PERCENTAGE 100
#define KFD_MAX_QUEUE_PRIORITY 15
@@ -78,6 +80,8 @@ struct kfd_ioctl_create_queue_args {
__u64 ctx_save_restore_address; /* to KFD */
__u32 ctx_save_restore_size; /* to KFD */
__u32 ctl_stack_size; /* to KFD */
+ __u32 sdma_engine_id; /* to KFD */
+ __u32 pad;
};
struct kfd_ioctl_destroy_queue_args {
@@ -536,26 +540,29 @@ enum kfd_smi_event {
KFD_SMI_EVENT_ALL_PROCESS = 64
};
+/* The reason of the page migration event */
enum KFD_MIGRATE_TRIGGERS {
- KFD_MIGRATE_TRIGGER_PREFETCH,
- KFD_MIGRATE_TRIGGER_PAGEFAULT_GPU,
- KFD_MIGRATE_TRIGGER_PAGEFAULT_CPU,
- KFD_MIGRATE_TRIGGER_TTM_EVICTION
+ KFD_MIGRATE_TRIGGER_PREFETCH, /* Prefetch to GPU VRAM or system memory */
+ KFD_MIGRATE_TRIGGER_PAGEFAULT_GPU, /* GPU page fault recover */
+ KFD_MIGRATE_TRIGGER_PAGEFAULT_CPU, /* CPU page fault recover */
+ KFD_MIGRATE_TRIGGER_TTM_EVICTION /* TTM eviction */
};
+/* The reason of user queue evition event */
enum KFD_QUEUE_EVICTION_TRIGGERS {
- KFD_QUEUE_EVICTION_TRIGGER_SVM,
- KFD_QUEUE_EVICTION_TRIGGER_USERPTR,
- KFD_QUEUE_EVICTION_TRIGGER_TTM,
- KFD_QUEUE_EVICTION_TRIGGER_SUSPEND,
- KFD_QUEUE_EVICTION_CRIU_CHECKPOINT,
- KFD_QUEUE_EVICTION_CRIU_RESTORE
+ KFD_QUEUE_EVICTION_TRIGGER_SVM, /* SVM buffer migration */
+ KFD_QUEUE_EVICTION_TRIGGER_USERPTR, /* userptr movement */
+ KFD_QUEUE_EVICTION_TRIGGER_TTM, /* TTM move buffer */
+ KFD_QUEUE_EVICTION_TRIGGER_SUSPEND, /* GPU suspend */
+ KFD_QUEUE_EVICTION_CRIU_CHECKPOINT, /* CRIU checkpoint */
+ KFD_QUEUE_EVICTION_CRIU_RESTORE /* CRIU restore */
};
+/* The reason of unmap buffer from GPU event */
enum KFD_SVM_UNMAP_TRIGGERS {
- KFD_SVM_UNMAP_TRIGGER_MMU_NOTIFY,
- KFD_SVM_UNMAP_TRIGGER_MMU_NOTIFY_MIGRATE,
- KFD_SVM_UNMAP_TRIGGER_UNMAP_FROM_CPU
+ KFD_SVM_UNMAP_TRIGGER_MMU_NOTIFY, /* MMU notifier CPU buffer movement */
+ KFD_SVM_UNMAP_TRIGGER_MMU_NOTIFY_MIGRATE,/* MMU notifier page migration */
+ KFD_SVM_UNMAP_TRIGGER_UNMAP_FROM_CPU /* Unmap to free the buffer */
};
#define KFD_SMI_EVENT_MASK_FROM_INDEX(i) (1ULL << ((i) - 1))
@@ -566,6 +573,77 @@ struct kfd_ioctl_smi_events_args {
__u32 anon_fd; /* from KFD */
};
+/*
+ * SVM event tracing via SMI system management interface
+ *
+ * Open event file descriptor
+ * use ioctl AMDKFD_IOC_SMI_EVENTS, pass in gpuid and return a anonymous file
+ * descriptor to receive SMI events.
+ * If calling with sudo permission, then file descriptor can be used to receive
+ * SVM events from all processes, otherwise, to only receive SVM events of same
+ * process.
+ *
+ * To enable the SVM event
+ * Write event file descriptor with KFD_SMI_EVENT_MASK_FROM_INDEX(event) bitmap
+ * mask to start record the event to the kfifo, use bitmap mask combination
+ * for multiple events. New event mask will overwrite the previous event mask.
+ * KFD_SMI_EVENT_MASK_FROM_INDEX(KFD_SMI_EVENT_ALL_PROCESS) bit requires sudo
+ * permisson to receive SVM events from all process.
+ *
+ * To receive the event
+ * Application can poll file descriptor to wait for the events, then read event
+ * from the file into a buffer. Each event is one line string message, starting
+ * with the event id, then the event specific information.
+ *
+ * To decode event information
+ * The following event format string macro can be used with sscanf to decode
+ * the specific event information.
+ * event triggers: the reason to generate the event, defined as enum for unmap,
+ * eviction and migrate events.
+ * node, from, to, prefetch_loc, preferred_loc: GPU ID, or 0 for system memory.
+ * addr: user mode address, in pages
+ * size: in pages
+ * pid: the process ID to generate the event
+ * ns: timestamp in nanosecond-resolution, starts at system boot time but
+ * stops during suspend
+ * migrate_update: GPU page fault is recovered by 'M' for migrate, 'U' for update
+ * rw: 'W' for write page fault, 'R' for read page fault
+ * rescheduled: 'R' if the queue restore failed and rescheduled to try again
+ */
+#define KFD_EVENT_FMT_UPDATE_GPU_RESET(reset_seq_num, reset_cause)\
+ "%x %s\n", (reset_seq_num), (reset_cause)
+
+#define KFD_EVENT_FMT_THERMAL_THROTTLING(bitmask, counter)\
+ "%llx:%llx\n", (bitmask), (counter)
+
+#define KFD_EVENT_FMT_VMFAULT(pid, task_name)\
+ "%x:%s\n", (pid), (task_name)
+
+#define KFD_EVENT_FMT_PAGEFAULT_START(ns, pid, addr, node, rw)\
+ "%lld -%d @%lx(%x) %c\n", (ns), (pid), (addr), (node), (rw)
+
+#define KFD_EVENT_FMT_PAGEFAULT_END(ns, pid, addr, node, migrate_update)\
+ "%lld -%d @%lx(%x) %c\n", (ns), (pid), (addr), (node), (migrate_update)
+
+#define KFD_EVENT_FMT_MIGRATE_START(ns, pid, start, size, from, to, prefetch_loc,\
+ preferred_loc, migrate_trigger)\
+ "%lld -%d @%lx(%lx) %x->%x %x:%x %d\n", (ns), (pid), (start), (size),\
+ (from), (to), (prefetch_loc), (preferred_loc), (migrate_trigger)
+
+#define KFD_EVENT_FMT_MIGRATE_END(ns, pid, start, size, from, to, migrate_trigger)\
+ "%lld -%d @%lx(%lx) %x->%x %d\n", (ns), (pid), (start), (size),\
+ (from), (to), (migrate_trigger)
+
+#define KFD_EVENT_FMT_QUEUE_EVICTION(ns, pid, node, evict_trigger)\
+ "%lld -%d %x %d\n", (ns), (pid), (node), (evict_trigger)
+
+#define KFD_EVENT_FMT_QUEUE_RESTORE(ns, pid, node, rescheduled)\
+ "%lld -%d %x %c\n", (ns), (pid), (node), (rescheduled)
+
+#define KFD_EVENT_FMT_UNMAP_FROM_GPU(ns, pid, addr, size, node, unmap_trigger)\
+ "%lld -%d @%lx(%lx) %x %d\n", (ns), (pid), (addr), (size),\
+ (node), (unmap_trigger)
+
/**************************************************************************************************
* CRIU IOCTLs (Checkpoint Restore In Userspace)
*
diff --git a/include/uapi/linux/landlock.h b/include/uapi/linux/landlock.h
index 2c8dbc74b955..33745642f787 100644
--- a/include/uapi/linux/landlock.h
+++ b/include/uapi/linux/landlock.h
@@ -44,6 +44,12 @@ struct landlock_ruleset_attr {
* flags`_).
*/
__u64 handled_access_net;
+ /**
+ * @scoped: Bitmask of scopes (cf. `Scope flags`_)
+ * restricting a Landlock domain from accessing outside
+ * resources (e.g. IPCs).
+ */
+ __u64 scoped;
};
/*
@@ -274,4 +280,28 @@ struct landlock_net_port_attr {
#define LANDLOCK_ACCESS_NET_BIND_TCP (1ULL << 0)
#define LANDLOCK_ACCESS_NET_CONNECT_TCP (1ULL << 1)
/* clang-format on */
+
+/**
+ * DOC: scope
+ *
+ * Scope flags
+ * ~~~~~~~~~~~
+ *
+ * These flags enable to isolate a sandboxed process from a set of IPC actions.
+ * Setting a flag for a ruleset will isolate the Landlock domain to forbid
+ * connections to resources outside the domain.
+ *
+ * Scopes:
+ *
+ * - %LANDLOCK_SCOPE_ABSTRACT_UNIX_SOCKET: Restrict a sandboxed process from
+ * connecting to an abstract UNIX socket created by a process outside the
+ * related Landlock domain (e.g. a parent domain or a non-sandboxed process).
+ * - %LANDLOCK_SCOPE_SIGNAL: Restrict a sandboxed process from sending a signal
+ * to another process outside the domain.
+ */
+/* clang-format off */
+#define LANDLOCK_SCOPE_ABSTRACT_UNIX_SOCKET (1ULL << 0)
+#define LANDLOCK_SCOPE_SIGNAL (1ULL << 1)
+/* clang-format on*/
+
#endif /* _UAPI_LINUX_LANDLOCK_H */
diff --git a/include/uapi/linux/pci_regs.h b/include/uapi/linux/pci_regs.h
index 94c00996e633..12323b3334a9 100644
--- a/include/uapi/linux/pci_regs.h
+++ b/include/uapi/linux/pci_regs.h
@@ -634,9 +634,11 @@
#define PCI_EXP_RTCTL_SENFEE 0x0002 /* System Error on Non-Fatal Error */
#define PCI_EXP_RTCTL_SEFEE 0x0004 /* System Error on Fatal Error */
#define PCI_EXP_RTCTL_PMEIE 0x0008 /* PME Interrupt Enable */
-#define PCI_EXP_RTCTL_CRSSVE 0x0010 /* CRS Software Visibility Enable */
+#define PCI_EXP_RTCTL_RRS_SVE 0x0010 /* Config RRS Software Visibility Enable */
+#define PCI_EXP_RTCTL_CRSSVE PCI_EXP_RTCTL_RRS_SVE /* compatibility */
#define PCI_EXP_RTCAP 0x1e /* Root Capabilities */
-#define PCI_EXP_RTCAP_CRSVIS 0x0001 /* CRS Software Visibility capability */
+#define PCI_EXP_RTCAP_RRS_SV 0x0001 /* Config RRS Software Visibility */
+#define PCI_EXP_RTCAP_CRSVIS PCI_EXP_RTCAP_RRS_SV /* compatibility */
#define PCI_EXP_RTSTA 0x20 /* Root Status */
#define PCI_EXP_RTSTA_PME_RQ_ID 0x0000ffff /* PME Requester ID */
#define PCI_EXP_RTSTA_PME 0x00010000 /* PME status */
@@ -740,6 +742,7 @@
#define PCI_EXT_CAP_ID_DVSEC 0x23 /* Designated Vendor-Specific */
#define PCI_EXT_CAP_ID_DLF 0x25 /* Data Link Feature */
#define PCI_EXT_CAP_ID_PL_16GT 0x26 /* Physical Layer 16.0 GT/s */
+#define PCI_EXT_CAP_ID_NPEM 0x29 /* Native PCIe Enclosure Management */
#define PCI_EXT_CAP_ID_PL_32GT 0x2A /* Physical Layer 32.0 GT/s */
#define PCI_EXT_CAP_ID_DOE 0x2E /* Data Object Exchange */
#define PCI_EXT_CAP_ID_MAX PCI_EXT_CAP_ID_DOE
@@ -1121,6 +1124,40 @@
#define PCI_PL_16GT_LE_CTRL_USP_TX_PRESET_MASK 0x000000F0
#define PCI_PL_16GT_LE_CTRL_USP_TX_PRESET_SHIFT 4
+/* Native PCIe Enclosure Management */
+#define PCI_NPEM_CAP 0x04 /* NPEM capability register */
+#define PCI_NPEM_CAP_CAPABLE 0x00000001 /* NPEM Capable */
+
+#define PCI_NPEM_CTRL 0x08 /* NPEM control register */
+#define PCI_NPEM_CTRL_ENABLE 0x00000001 /* NPEM Enable */
+
+/*
+ * Native PCIe Enclosure Management indication bits and Reset command bit
+ * are corresponding for capability and control registers.
+ */
+#define PCI_NPEM_CMD_RESET 0x00000002 /* Reset Command */
+#define PCI_NPEM_IND_OK 0x00000004 /* OK */
+#define PCI_NPEM_IND_LOCATE 0x00000008 /* Locate */
+#define PCI_NPEM_IND_FAIL 0x00000010 /* Fail */
+#define PCI_NPEM_IND_REBUILD 0x00000020 /* Rebuild */
+#define PCI_NPEM_IND_PFA 0x00000040 /* Predicted Failure Analysis */
+#define PCI_NPEM_IND_HOTSPARE 0x00000080 /* Hot Spare */
+#define PCI_NPEM_IND_ICA 0x00000100 /* In Critical Array */
+#define PCI_NPEM_IND_IFA 0x00000200 /* In Failed Array */
+#define PCI_NPEM_IND_IDT 0x00000400 /* Device Type */
+#define PCI_NPEM_IND_DISABLED 0x00000800 /* Disabled */
+#define PCI_NPEM_IND_SPEC_0 0x01000000
+#define PCI_NPEM_IND_SPEC_1 0x02000000
+#define PCI_NPEM_IND_SPEC_2 0x04000000
+#define PCI_NPEM_IND_SPEC_3 0x08000000
+#define PCI_NPEM_IND_SPEC_4 0x10000000
+#define PCI_NPEM_IND_SPEC_5 0x20000000
+#define PCI_NPEM_IND_SPEC_6 0x40000000
+#define PCI_NPEM_IND_SPEC_7 0x80000000
+
+#define PCI_NPEM_STATUS 0x0c /* NPEM status register */
+#define PCI_NPEM_STATUS_CC 0x00000001 /* Command Completed */
+
/* Data Object Exchange */
#define PCI_DOE_CAP 0x04 /* DOE Capabilities Register */
#define PCI_DOE_CAP_INT_SUP 0x00000001 /* Interrupt Support */
diff --git a/include/uapi/linux/rkisp1-config.h b/include/uapi/linux/rkisp1-config.h
index 6eeaf8bf2362..430daceafac7 100644
--- a/include/uapi/linux/rkisp1-config.h
+++ b/include/uapi/linux/rkisp1-config.h
@@ -165,6 +165,11 @@
#define RKISP1_CIF_ISP_DPF_MAX_SPATIAL_COEFFS 6
/*
+ * Compand
+ */
+#define RKISP1_CIF_ISP_COMPAND_NUM_POINTS 64
+
+/*
* Measurement types
*/
#define RKISP1_CIF_ISP_STAT_AWB (1U << 0)
@@ -851,6 +856,39 @@ struct rkisp1_params_cfg {
struct rkisp1_cif_isp_isp_other_cfg others;
};
+/**
+ * struct rkisp1_cif_isp_compand_bls_config - Rockchip ISP1 Companding parameters (BLS)
+ * @r: Fixed subtraction value for Bayer pattern R
+ * @gr: Fixed subtraction value for Bayer pattern Gr
+ * @gb: Fixed subtraction value for Bayer pattern Gb
+ * @b: Fixed subtraction value for Bayer pattern B
+ *
+ * The values will be subtracted from the sensor values. Note that unlike the
+ * dedicated BLS block, the BLS values in the compander are 20-bit unsigned.
+ */
+struct rkisp1_cif_isp_compand_bls_config {
+ __u32 r;
+ __u32 gr;
+ __u32 gb;
+ __u32 b;
+};
+
+/**
+ * struct rkisp1_cif_isp_compand_curve_config - Rockchip ISP1 Companding
+ * parameters (expand and compression curves)
+ * @px: Compand curve x-values. Each value stores the distance from the
+ * previous x-value, expressed as log2 of the distance on 5 bits.
+ * @x: Compand curve x-values. The functionality of these parameters are
+ * unknown due to do a lack of hardware documentation, but these are left
+ * here for future compatibility purposes.
+ * @y: Compand curve y-values
+ */
+struct rkisp1_cif_isp_compand_curve_config {
+ __u8 px[RKISP1_CIF_ISP_COMPAND_NUM_POINTS];
+ __u32 x[RKISP1_CIF_ISP_COMPAND_NUM_POINTS];
+ __u32 y[RKISP1_CIF_ISP_COMPAND_NUM_POINTS];
+};
+
/*---------- PART2: Measurement Statistics ------------*/
/**
@@ -996,4 +1034,544 @@ struct rkisp1_stat_buffer {
struct rkisp1_cif_isp_stat params;
};
+/*---------- PART3: Extensible Configuration Parameters ------------*/
+
+/**
+ * enum rkisp1_ext_params_block_type - RkISP1 extensible params block type
+ *
+ * @RKISP1_EXT_PARAMS_BLOCK_TYPE_BLS: Black level subtraction
+ * @RKISP1_EXT_PARAMS_BLOCK_TYPE_DPCC: Defect pixel cluster correction
+ * @RKISP1_EXT_PARAMS_BLOCK_TYPE_SDG: Sensor de-gamma
+ * @RKISP1_EXT_PARAMS_BLOCK_TYPE_AWB_GAIN: Auto white balance gains
+ * @RKISP1_EXT_PARAMS_BLOCK_TYPE_FLT: ISP filtering
+ * @RKISP1_EXT_PARAMS_BLOCK_TYPE_BDM: Bayer de-mosaic
+ * @RKISP1_EXT_PARAMS_BLOCK_TYPE_CTK: Cross-talk correction
+ * @RKISP1_EXT_PARAMS_BLOCK_TYPE_GOC: Gamma out correction
+ * @RKISP1_EXT_PARAMS_BLOCK_TYPE_DPF: De-noise pre-filter
+ * @RKISP1_EXT_PARAMS_BLOCK_TYPE_DPF_STRENGTH: De-noise pre-filter strength
+ * @RKISP1_EXT_PARAMS_BLOCK_TYPE_CPROC: Color processing
+ * @RKISP1_EXT_PARAMS_BLOCK_TYPE_IE: Image effects
+ * @RKISP1_EXT_PARAMS_BLOCK_TYPE_LSC: Lens shading correction
+ * @RKISP1_EXT_PARAMS_BLOCK_TYPE_AWB_MEAS: Auto white balance statistics
+ * @RKISP1_EXT_PARAMS_BLOCK_TYPE_HST_MEAS: Histogram statistics
+ * @RKISP1_EXT_PARAMS_BLOCK_TYPE_AEC_MEAS: Auto exposure statistics
+ * @RKISP1_EXT_PARAMS_BLOCK_TYPE_AFC_MEAS: Auto-focus statistics
+ * @RKISP1_EXT_PARAMS_BLOCK_TYPE_COMPAND_BLS: BLS in the compand block
+ * @RKISP1_EXT_PARAMS_BLOCK_TYPE_COMPAND_EXPAND: Companding expand curve
+ * @RKISP1_EXT_PARAMS_BLOCK_TYPE_COMPAND_COMPRESS: Companding compress curve
+ */
+enum rkisp1_ext_params_block_type {
+ RKISP1_EXT_PARAMS_BLOCK_TYPE_BLS,
+ RKISP1_EXT_PARAMS_BLOCK_TYPE_DPCC,
+ RKISP1_EXT_PARAMS_BLOCK_TYPE_SDG,
+ RKISP1_EXT_PARAMS_BLOCK_TYPE_AWB_GAIN,
+ RKISP1_EXT_PARAMS_BLOCK_TYPE_FLT,
+ RKISP1_EXT_PARAMS_BLOCK_TYPE_BDM,
+ RKISP1_EXT_PARAMS_BLOCK_TYPE_CTK,
+ RKISP1_EXT_PARAMS_BLOCK_TYPE_GOC,
+ RKISP1_EXT_PARAMS_BLOCK_TYPE_DPF,
+ RKISP1_EXT_PARAMS_BLOCK_TYPE_DPF_STRENGTH,
+ RKISP1_EXT_PARAMS_BLOCK_TYPE_CPROC,
+ RKISP1_EXT_PARAMS_BLOCK_TYPE_IE,
+ RKISP1_EXT_PARAMS_BLOCK_TYPE_LSC,
+ RKISP1_EXT_PARAMS_BLOCK_TYPE_AWB_MEAS,
+ RKISP1_EXT_PARAMS_BLOCK_TYPE_HST_MEAS,
+ RKISP1_EXT_PARAMS_BLOCK_TYPE_AEC_MEAS,
+ RKISP1_EXT_PARAMS_BLOCK_TYPE_AFC_MEAS,
+ RKISP1_EXT_PARAMS_BLOCK_TYPE_COMPAND_BLS,
+ RKISP1_EXT_PARAMS_BLOCK_TYPE_COMPAND_EXPAND,
+ RKISP1_EXT_PARAMS_BLOCK_TYPE_COMPAND_COMPRESS,
+};
+
+#define RKISP1_EXT_PARAMS_FL_BLOCK_DISABLE (1U << 0)
+#define RKISP1_EXT_PARAMS_FL_BLOCK_ENABLE (1U << 1)
+
+/**
+ * struct rkisp1_ext_params_block_header - RkISP1 extensible parameters block
+ * header
+ *
+ * This structure represents the common part of all the ISP configuration
+ * blocks. Each parameters block shall embed an instance of this structure type
+ * as its first member, followed by the block-specific configuration data. The
+ * driver inspects this common header to discern the block type and its size and
+ * properly handle the block content by casting it to the correct block-specific
+ * type.
+ *
+ * The @type field is one of the values enumerated by
+ * :c:type:`rkisp1_ext_params_block_type` and specifies how the data should be
+ * interpreted by the driver. The @size field specifies the size of the
+ * parameters block and is used by the driver for validation purposes.
+ *
+ * The @flags field is a bitmask of per-block flags RKISP1_EXT_PARAMS_FL_*.
+ *
+ * When userspace wants to configure and enable an ISP block it shall fully
+ * populate the block configuration and set the
+ * RKISP1_EXT_PARAMS_FL_BLOCK_ENABLE bit in the @flags field.
+ *
+ * When userspace simply wants to disable an ISP block the
+ * RKISP1_EXT_PARAMS_FL_BLOCK_DISABLE bit should be set in @flags field. The
+ * driver ignores the rest of the block configuration structure in this case.
+ *
+ * If a new configuration of an ISP block has to be applied userspace shall
+ * fully populate the ISP block configuration and omit setting the
+ * RKISP1_EXT_PARAMS_FL_BLOCK_ENABLE and RKISP1_EXT_PARAMS_FL_BLOCK_DISABLE bits
+ * in the @flags field.
+ *
+ * Setting both the RKISP1_EXT_PARAMS_FL_BLOCK_ENABLE and
+ * RKISP1_EXT_PARAMS_FL_BLOCK_DISABLE bits in the @flags field is not allowed
+ * and not accepted by the driver.
+ *
+ * Userspace is responsible for correctly populating the parameters block header
+ * fields (@type, @flags and @size) and the block-specific parameters.
+ *
+ * For example:
+ *
+ * .. code-block:: c
+ *
+ * void populate_bls(struct rkisp1_ext_params_block_header *block) {
+ * struct rkisp1_ext_params_bls_config *bls =
+ * (struct rkisp1_ext_params_bls_config *)block;
+ *
+ * bls->header.type = RKISP1_EXT_PARAMS_BLOCK_ID_BLS;
+ * bls->header.flags = RKISP1_EXT_PARAMS_FL_BLOCK_ENABLE;
+ * bls->header.size = sizeof(*bls);
+ *
+ * bls->config.enable_auto = 0;
+ * bls->config.fixed_val.r = blackLevelRed_;
+ * bls->config.fixed_val.gr = blackLevelGreenR_;
+ * bls->config.fixed_val.gb = blackLevelGreenB_;
+ * bls->config.fixed_val.b = blackLevelBlue_;
+ * }
+ *
+ * @type: The parameters block type, see
+ * :c:type:`rkisp1_ext_params_block_type`
+ * @flags: A bitmask of block flags
+ * @size: Size (in bytes) of the parameters block, including this header
+ */
+struct rkisp1_ext_params_block_header {
+ __u16 type;
+ __u16 flags;
+ __u32 size;
+};
+
+/**
+ * struct rkisp1_ext_params_bls_config - RkISP1 extensible params BLS config
+ *
+ * RkISP1 extensible parameters Black Level Subtraction configuration block.
+ * Identified by :c:type:`RKISP1_EXT_PARAMS_BLOCK_TYPE_BLS`.
+ *
+ * @header: The RkISP1 extensible parameters header, see
+ * :c:type:`rkisp1_ext_params_block_header`
+ * @config: Black Level Subtraction configuration, see
+ * :c:type:`rkisp1_cif_isp_bls_config`
+ */
+struct rkisp1_ext_params_bls_config {
+ struct rkisp1_ext_params_block_header header;
+ struct rkisp1_cif_isp_bls_config config;
+} __attribute__((aligned(8)));
+
+/**
+ * struct rkisp1_ext_params_dpcc_config - RkISP1 extensible params DPCC config
+ *
+ * RkISP1 extensible parameters Defective Pixel Cluster Correction configuration
+ * block. Identified by :c:type:`RKISP1_EXT_PARAMS_BLOCK_TYPE_DPCC`.
+ *
+ * @header: The RkISP1 extensible parameters header, see
+ * :c:type:`rkisp1_ext_params_block_header`
+ * @config: Defective Pixel Cluster Correction configuration, see
+ * :c:type:`rkisp1_cif_isp_dpcc_config`
+ */
+struct rkisp1_ext_params_dpcc_config {
+ struct rkisp1_ext_params_block_header header;
+ struct rkisp1_cif_isp_dpcc_config config;
+} __attribute__((aligned(8)));
+
+/**
+ * struct rkisp1_ext_params_sdg_config - RkISP1 extensible params SDG config
+ *
+ * RkISP1 extensible parameters Sensor Degamma configuration block. Identified
+ * by :c:type:`RKISP1_EXT_PARAMS_BLOCK_TYPE_SDG`.
+ *
+ * @header: The RkISP1 extensible parameters header, see
+ * :c:type:`rkisp1_ext_params_block_header`
+ * @config: Sensor Degamma configuration, see
+ * :c:type:`rkisp1_cif_isp_sdg_config`
+ */
+struct rkisp1_ext_params_sdg_config {
+ struct rkisp1_ext_params_block_header header;
+ struct rkisp1_cif_isp_sdg_config config;
+} __attribute__((aligned(8)));
+
+/**
+ * struct rkisp1_ext_params_lsc_config - RkISP1 extensible params LSC config
+ *
+ * RkISP1 extensible parameters Lens Shading Correction configuration block.
+ * Identified by :c:type:`RKISP1_EXT_PARAMS_BLOCK_TYPE_LSC`.
+ *
+ * @header: The RkISP1 extensible parameters header, see
+ * :c:type:`rkisp1_ext_params_block_header`
+ * @config: Lens Shading Correction configuration, see
+ * :c:type:`rkisp1_cif_isp_lsc_config`
+ */
+struct rkisp1_ext_params_lsc_config {
+ struct rkisp1_ext_params_block_header header;
+ struct rkisp1_cif_isp_lsc_config config;
+} __attribute__((aligned(8)));
+
+/**
+ * struct rkisp1_ext_params_awb_gain_config - RkISP1 extensible params AWB
+ * gain config
+ *
+ * RkISP1 extensible parameters Auto-White Balance Gains configuration block.
+ * Identified by :c:type:`RKISP1_EXT_PARAMS_BLOCK_TYPE_AWB_GAIN`.
+ *
+ * @header: The RkISP1 extensible parameters header, see
+ * :c:type:`rkisp1_ext_params_block_header`
+ * @config: Auto-White Balance Gains configuration, see
+ * :c:type:`rkisp1_cif_isp_awb_gain_config`
+ */
+struct rkisp1_ext_params_awb_gain_config {
+ struct rkisp1_ext_params_block_header header;
+ struct rkisp1_cif_isp_awb_gain_config config;
+} __attribute__((aligned(8)));
+
+/**
+ * struct rkisp1_ext_params_flt_config - RkISP1 extensible params FLT config
+ *
+ * RkISP1 extensible parameters Filter configuration block. Identified by
+ * :c:type:`RKISP1_EXT_PARAMS_BLOCK_TYPE_FLT`.
+ *
+ * @header: The RkISP1 extensible parameters header, see
+ * :c:type:`rkisp1_ext_params_block_header`
+ * @config: Filter configuration, see :c:type:`rkisp1_cif_isp_flt_config`
+ */
+struct rkisp1_ext_params_flt_config {
+ struct rkisp1_ext_params_block_header header;
+ struct rkisp1_cif_isp_flt_config config;
+} __attribute__((aligned(8)));
+
+/**
+ * struct rkisp1_ext_params_bdm_config - RkISP1 extensible params BDM config
+ *
+ * RkISP1 extensible parameters Demosaicing configuration block. Identified by
+ * :c:type:`RKISP1_EXT_PARAMS_BLOCK_TYPE_BDM`.
+ *
+ * @header: The RkISP1 extensible parameters header, see
+ * :c:type:`rkisp1_ext_params_block_header`
+ * @config: Demosaicing configuration, see :c:type:`rkisp1_cif_isp_bdm_config`
+ */
+struct rkisp1_ext_params_bdm_config {
+ struct rkisp1_ext_params_block_header header;
+ struct rkisp1_cif_isp_bdm_config config;
+} __attribute__((aligned(8)));
+
+/**
+ * struct rkisp1_ext_params_ctk_config - RkISP1 extensible params CTK config
+ *
+ * RkISP1 extensible parameters Cross-Talk configuration block. Identified by
+ * :c:type:`RKISP1_EXT_PARAMS_BLOCK_TYPE_CTK`.
+ *
+ * @header: The RkISP1 extensible parameters header, see
+ * :c:type:`rkisp1_ext_params_block_header`
+ * @config: Cross-Talk configuration, see :c:type:`rkisp1_cif_isp_ctk_config`
+ */
+struct rkisp1_ext_params_ctk_config {
+ struct rkisp1_ext_params_block_header header;
+ struct rkisp1_cif_isp_ctk_config config;
+} __attribute__((aligned(8)));
+
+/**
+ * struct rkisp1_ext_params_goc_config - RkISP1 extensible params GOC config
+ *
+ * RkISP1 extensible parameters Gamma-Out configuration block. Identified by
+ * :c:type:`RKISP1_EXT_PARAMS_BLOCK_TYPE_GOC`.
+ *
+ * @header: The RkISP1 extensible parameters header, see
+ * :c:type:`rkisp1_ext_params_block_header`
+ * @config: Gamma-Out configuration, see :c:type:`rkisp1_cif_isp_goc_config`
+ */
+struct rkisp1_ext_params_goc_config {
+ struct rkisp1_ext_params_block_header header;
+ struct rkisp1_cif_isp_goc_config config;
+} __attribute__((aligned(8)));
+
+/**
+ * struct rkisp1_ext_params_dpf_config - RkISP1 extensible params DPF config
+ *
+ * RkISP1 extensible parameters De-noise Pre-Filter configuration block.
+ * Identified by :c:type:`RKISP1_EXT_PARAMS_BLOCK_TYPE_DPF`.
+ *
+ * @header: The RkISP1 extensible parameters header, see
+ * :c:type:`rkisp1_ext_params_block_header`
+ * @config: De-noise Pre-Filter configuration, see
+ * :c:type:`rkisp1_cif_isp_dpf_config`
+ */
+struct rkisp1_ext_params_dpf_config {
+ struct rkisp1_ext_params_block_header header;
+ struct rkisp1_cif_isp_dpf_config config;
+} __attribute__((aligned(8)));
+
+/**
+ * struct rkisp1_ext_params_dpf_strength_config - RkISP1 extensible params DPF
+ * strength config
+ *
+ * RkISP1 extensible parameters De-noise Pre-Filter strength configuration
+ * block. Identified by :c:type:`RKISP1_EXT_PARAMS_BLOCK_TYPE_DPF_STRENGTH`.
+ *
+ * @header: The RkISP1 extensible parameters header, see
+ * :c:type:`rkisp1_ext_params_block_header`
+ * @config: De-noise Pre-Filter strength configuration, see
+ * :c:type:`rkisp1_cif_isp_dpf_strength_config`
+ */
+struct rkisp1_ext_params_dpf_strength_config {
+ struct rkisp1_ext_params_block_header header;
+ struct rkisp1_cif_isp_dpf_strength_config config;
+} __attribute__((aligned(8)));
+
+/**
+ * struct rkisp1_ext_params_cproc_config - RkISP1 extensible params CPROC config
+ *
+ * RkISP1 extensible parameters Color Processing configuration block.
+ * Identified by :c:type:`RKISP1_EXT_PARAMS_BLOCK_TYPE_CPROC`.
+ *
+ * @header: The RkISP1 extensible parameters header, see
+ * :c:type:`rkisp1_ext_params_block_header`
+ * @config: Color processing configuration, see
+ * :c:type:`rkisp1_cif_isp_cproc_config`
+ */
+struct rkisp1_ext_params_cproc_config {
+ struct rkisp1_ext_params_block_header header;
+ struct rkisp1_cif_isp_cproc_config config;
+} __attribute__((aligned(8)));
+
+/**
+ * struct rkisp1_ext_params_ie_config - RkISP1 extensible params IE config
+ *
+ * RkISP1 extensible parameters Image Effect configuration block. Identified by
+ * :c:type:`RKISP1_EXT_PARAMS_BLOCK_TYPE_IE`.
+ *
+ * @header: The RkISP1 extensible parameters header, see
+ * :c:type:`rkisp1_ext_params_block_header`
+ * @config: Image Effect configuration, see :c:type:`rkisp1_cif_isp_ie_config`
+ */
+struct rkisp1_ext_params_ie_config {
+ struct rkisp1_ext_params_block_header header;
+ struct rkisp1_cif_isp_ie_config config;
+} __attribute__((aligned(8)));
+
+/**
+ * struct rkisp1_ext_params_awb_meas_config - RkISP1 extensible params AWB
+ * Meas config
+ *
+ * RkISP1 extensible parameters Auto-White Balance Measurement configuration
+ * block. Identified by :c:type:`RKISP1_EXT_PARAMS_BLOCK_TYPE_AWB_MEAS`.
+ *
+ * @header: The RkISP1 extensible parameters header, see
+ * :c:type:`rkisp1_ext_params_block_header`
+ * @config: Auto-White Balance measure configuration, see
+ * :c:type:`rkisp1_cif_isp_awb_meas_config`
+ */
+struct rkisp1_ext_params_awb_meas_config {
+ struct rkisp1_ext_params_block_header header;
+ struct rkisp1_cif_isp_awb_meas_config config;
+} __attribute__((aligned(8)));
+
+/**
+ * struct rkisp1_ext_params_hst_config - RkISP1 extensible params Histogram config
+ *
+ * RkISP1 extensible parameters Histogram statistics configuration block.
+ * Identified by :c:type:`RKISP1_EXT_PARAMS_BLOCK_TYPE_HST_MEAS`.
+ *
+ * @header: The RkISP1 extensible parameters header, see
+ * :c:type:`rkisp1_ext_params_block_header`
+ * @config: Histogram statistics configuration, see
+ * :c:type:`rkisp1_cif_isp_hst_config`
+ */
+struct rkisp1_ext_params_hst_config {
+ struct rkisp1_ext_params_block_header header;
+ struct rkisp1_cif_isp_hst_config config;
+} __attribute__((aligned(8)));
+
+/**
+ * struct rkisp1_ext_params_aec_config - RkISP1 extensible params AEC config
+ *
+ * RkISP1 extensible parameters Auto-Exposure statistics configuration block.
+ * Identified by :c:type:`RKISP1_EXT_PARAMS_BLOCK_TYPE_AEC_MEAS`.
+ *
+ * @header: The RkISP1 extensible parameters header, see
+ * :c:type:`rkisp1_ext_params_block_header`
+ * @config: Auto-Exposure statistics configuration, see
+ * :c:type:`rkisp1_cif_isp_aec_config`
+ */
+struct rkisp1_ext_params_aec_config {
+ struct rkisp1_ext_params_block_header header;
+ struct rkisp1_cif_isp_aec_config config;
+} __attribute__((aligned(8)));
+
+/**
+ * struct rkisp1_ext_params_afc_config - RkISP1 extensible params AFC config
+ *
+ * RkISP1 extensible parameters Auto-Focus statistics configuration block.
+ * Identified by :c:type:`RKISP1_EXT_PARAMS_BLOCK_TYPE_AFC_MEAS`.
+ *
+ * @header: The RkISP1 extensible parameters header, see
+ * :c:type:`rkisp1_ext_params_block_header`
+ * @config: Auto-Focus statistics configuration, see
+ * :c:type:`rkisp1_cif_isp_afc_config`
+ */
+struct rkisp1_ext_params_afc_config {
+ struct rkisp1_ext_params_block_header header;
+ struct rkisp1_cif_isp_afc_config config;
+} __attribute__((aligned(8)));
+
+/**
+ * struct rkisp1_ext_params_compand_bls_config - RkISP1 extensible params
+ * Compand BLS config
+ *
+ * RkISP1 extensible parameters Companding configuration block (black level
+ * subtraction). Identified by :c:type:`RKISP1_EXT_PARAMS_BLOCK_TYPE_COMPAND_BLS`.
+ *
+ * @header: The RkISP1 extensible parameters header, see
+ * :c:type:`rkisp1_ext_params_block_header`
+ * @config: Companding BLS configuration, see
+ * :c:type:`rkisp1_cif_isp_compand_bls_config`
+ */
+struct rkisp1_ext_params_compand_bls_config {
+ struct rkisp1_ext_params_block_header header;
+ struct rkisp1_cif_isp_compand_bls_config config;
+} __attribute__((aligned(8)));
+
+/**
+ * struct rkisp1_ext_params_compand_curve_config - RkISP1 extensible params
+ * Compand curve config
+ *
+ * RkISP1 extensible parameters Companding configuration block (expand and
+ * compression curves). Identified by
+ * :c:type:`RKISP1_EXT_PARAMS_BLOCK_TYPE_COMPAND_EXPAND` or
+ * :c:type:`RKISP1_EXT_PARAMS_BLOCK_TYPE_COMPAND_COMPRESS`.
+ *
+ * @header: The RkISP1 extensible parameters header, see
+ * :c:type:`rkisp1_ext_params_block_header`
+ * @config: Companding curve configuration, see
+ * :c:type:`rkisp1_cif_isp_compand_curve_config`
+ */
+struct rkisp1_ext_params_compand_curve_config {
+ struct rkisp1_ext_params_block_header header;
+ struct rkisp1_cif_isp_compand_curve_config config;
+} __attribute__((aligned(8)));
+
+/*
+ * The rkisp1_ext_params_compand_curve_config structure is counted twice as it
+ * is used for both the COMPAND_EXPAND and COMPAND_COMPRESS block types.
+ */
+#define RKISP1_EXT_PARAMS_MAX_SIZE \
+ (sizeof(struct rkisp1_ext_params_bls_config) +\
+ sizeof(struct rkisp1_ext_params_dpcc_config) +\
+ sizeof(struct rkisp1_ext_params_sdg_config) +\
+ sizeof(struct rkisp1_ext_params_lsc_config) +\
+ sizeof(struct rkisp1_ext_params_awb_gain_config) +\
+ sizeof(struct rkisp1_ext_params_flt_config) +\
+ sizeof(struct rkisp1_ext_params_bdm_config) +\
+ sizeof(struct rkisp1_ext_params_ctk_config) +\
+ sizeof(struct rkisp1_ext_params_goc_config) +\
+ sizeof(struct rkisp1_ext_params_dpf_config) +\
+ sizeof(struct rkisp1_ext_params_dpf_strength_config) +\
+ sizeof(struct rkisp1_ext_params_cproc_config) +\
+ sizeof(struct rkisp1_ext_params_ie_config) +\
+ sizeof(struct rkisp1_ext_params_awb_meas_config) +\
+ sizeof(struct rkisp1_ext_params_hst_config) +\
+ sizeof(struct rkisp1_ext_params_aec_config) +\
+ sizeof(struct rkisp1_ext_params_afc_config) +\
+ sizeof(struct rkisp1_ext_params_compand_bls_config) +\
+ sizeof(struct rkisp1_ext_params_compand_curve_config) +\
+ sizeof(struct rkisp1_ext_params_compand_curve_config))
+
+/**
+ * enum rksip1_ext_param_buffer_version - RkISP1 extensible parameters version
+ *
+ * @RKISP1_EXT_PARAM_BUFFER_V1: First version of RkISP1 extensible parameters
+ */
+enum rksip1_ext_param_buffer_version {
+ RKISP1_EXT_PARAM_BUFFER_V1 = 1,
+};
+
+/**
+ * struct rkisp1_ext_params_cfg - RkISP1 extensible parameters configuration
+ *
+ * This struct contains the configuration parameters of the RkISP1 ISP
+ * algorithms, serialized by userspace into a data buffer. Each configuration
+ * parameter block is represented by a block-specific structure which contains a
+ * :c:type:`rkisp1_ext_params_block_header` entry as first member. Userspace
+ * populates the @data buffer with configuration parameters for the blocks that
+ * it intends to configure. As a consequence, the data buffer effective size
+ * changes according to the number of ISP blocks that userspace intends to
+ * configure and is set by userspace in the @data_size field.
+ *
+ * The parameters buffer is versioned by the @version field to allow modifying
+ * and extending its definition. Userspace shall populate the @version field to
+ * inform the driver about the version it intends to use. The driver will parse
+ * and handle the @data buffer according to the data layout specific to the
+ * indicated version and return an error if the desired version is not
+ * supported.
+ *
+ * Currently the single RKISP1_EXT_PARAM_BUFFER_V1 version is supported.
+ * When a new format version will be added, a mechanism for userspace to query
+ * the supported format versions will be implemented in the form of a read-only
+ * V4L2 control. If such control is not available, userspace should assume only
+ * RKISP1_EXT_PARAM_BUFFER_V1 is supported by the driver.
+ *
+ * For each ISP block that userspace wants to configure, a block-specific
+ * structure is appended to the @data buffer, one after the other without gaps
+ * in between nor overlaps. Userspace shall populate the @data_size field with
+ * the effective size, in bytes, of the @data buffer.
+ *
+ * The expected memory layout of the parameters buffer is::
+ *
+ * +-------------------- struct rkisp1_ext_params_cfg -------------------+
+ * | version = RKISP_EXT_PARAMS_BUFFER_V1; |
+ * | data_size = sizeof(struct rkisp1_ext_params_bls_config) |
+ * | + sizeof(struct rkisp1_ext_params_dpcc_config); |
+ * | +------------------------- data ---------------------------------+ |
+ * | | +------------- struct rkisp1_ext_params_bls_config -----------+ | |
+ * | | | +-------- struct rkisp1_ext_params_block_header ---------+ | | |
+ * | | | | type = RKISP1_EXT_PARAMS_BLOCK_TYPE_BLS; | | | |
+ * | | | | flags = RKISP1_EXT_PARAMS_FL_BLOCK_ENABLE; | | | |
+ * | | | | size = sizeof(struct rkisp1_ext_params_bls_config); | | | |
+ * | | | +---------------------------------------------------------+ | | |
+ * | | | +---------- struct rkisp1_cif_isp_bls_config -------------+ | | |
+ * | | | | enable_auto = 0; | | | |
+ * | | | | fixed_val.r = 256; | | | |
+ * | | | | fixed_val.gr = 256; | | | |
+ * | | | | fixed_val.gb = 256; | | | |
+ * | | | | fixed_val.b = 256; | | | |
+ * | | | +---------------------------------------------------------+ | | |
+ * | | +------------ struct rkisp1_ext_params_dpcc_config -----------+ | |
+ * | | | +-------- struct rkisp1_ext_params_block_header ---------+ | | |
+ * | | | | type = RKISP1_EXT_PARAMS_BLOCK_TYPE_DPCC; | | | |
+ * | | | | flags = RKISP1_EXT_PARAMS_FL_BLOCK_ENABLE; | | | |
+ * | | | | size = sizeof(struct rkisp1_ext_params_dpcc_config); | | | |
+ * | | | +---------------------------------------------------------+ | | |
+ * | | | +---------- struct rkisp1_cif_isp_dpcc_config ------------+ | | |
+ * | | | | mode = RKISP1_CIF_ISP_DPCC_MODE_STAGE1_ENABLE; | | | |
+ * | | | | output_mode = | | | |
+ * | | | | RKISP1_CIF_ISP_DPCC_OUTPUT_MODE_STAGE1_INCL_G_CENTER; | | | |
+ * | | | | set_use = ... ; | | | |
+ * | | | | ... = ... ; | | | |
+ * | | | +---------------------------------------------------------+ | | |
+ * | | +-------------------------------------------------------------+ | |
+ * | +-----------------------------------------------------------------+ |
+ * +---------------------------------------------------------------------+
+ *
+ * @version: The RkISP1 extensible parameters buffer version, see
+ * :c:type:`rksip1_ext_param_buffer_version`
+ * @data_size: The RkISP1 configuration data effective size, excluding this
+ * header
+ * @data: The RkISP1 extensible configuration data blocks
+ */
+struct rkisp1_ext_params_cfg {
+ __u32 version;
+ __u32 data_size;
+ __u8 data[RKISP1_EXT_PARAMS_MAX_SIZE];
+};
+
#endif /* _UAPI_RKISP1_CONFIG_H */
diff --git a/include/uapi/linux/sched.h b/include/uapi/linux/sched.h
index 3bac0a8ceab2..359a14cc76a4 100644
--- a/include/uapi/linux/sched.h
+++ b/include/uapi/linux/sched.h
@@ -118,6 +118,7 @@ struct clone_args {
/* SCHED_ISO: reserved but not implemented yet */
#define SCHED_IDLE 5
#define SCHED_DEADLINE 6
+#define SCHED_EXT 7
/* Can be ORed in to make sure the process is reverted back to SCHED_NORMAL on fork */
#define SCHED_RESET_ON_FORK 0x40000000
diff --git a/include/uapi/linux/sched/types.h b/include/uapi/linux/sched/types.h
index 90662385689b..bf6e9ae031c1 100644
--- a/include/uapi/linux/sched/types.h
+++ b/include/uapi/linux/sched/types.h
@@ -58,9 +58,9 @@
*
* This is reflected by the following fields of the sched_attr structure:
*
- * @sched_deadline representative of the task's deadline
- * @sched_runtime representative of the task's runtime
- * @sched_period representative of the task's period
+ * @sched_deadline representative of the task's deadline in nanoseconds
+ * @sched_runtime representative of the task's runtime in nanoseconds
+ * @sched_period representative of the task's period in nanoseconds
*
* Given this task model, there are a multiplicity of scheduling algorithms
* and policies, that can be used to ensure all the tasks will make their
diff --git a/include/uapi/linux/serio.h b/include/uapi/linux/serio.h
index ed2a96f43ce4..5a2af0942c9f 100644
--- a/include/uapi/linux/serio.h
+++ b/include/uapi/linux/serio.h
@@ -83,5 +83,6 @@
#define SERIO_PULSE8_CEC 0x40
#define SERIO_RAINSHADOW_CEC 0x41
#define SERIO_FSIA6B 0x42
+#define SERIO_EXTRON_DA_HD_4K_PLUS 0x43
#endif /* _UAPI_SERIO_H */
diff --git a/include/uapi/linux/usb/ch9.h b/include/uapi/linux/usb/ch9.h
index 44d73ba8788d..91f0f7e214a5 100644
--- a/include/uapi/linux/usb/ch9.h
+++ b/include/uapi/linux/usb/ch9.h
@@ -254,6 +254,9 @@ struct usb_ctrlrequest {
#define USB_DT_DEVICE_CAPABILITY 0x10
#define USB_DT_WIRELESS_ENDPOINT_COMP 0x11
#define USB_DT_WIRE_ADAPTER 0x21
+/* From USB Device Firmware Upgrade Specification, Revision 1.1 */
+#define USB_DT_DFU_FUNCTIONAL 0x21
+/* these are from the Wireless USB spec */
#define USB_DT_RPIPE 0x22
#define USB_DT_CS_RADIO_CONTROL 0x23
/* From the T10 UAS specification */
@@ -329,9 +332,10 @@ struct usb_device_descriptor {
#define USB_CLASS_USB_TYPE_C_BRIDGE 0x12
#define USB_CLASS_MISC 0xef
#define USB_CLASS_APP_SPEC 0xfe
-#define USB_CLASS_VENDOR_SPEC 0xff
+#define USB_SUBCLASS_DFU 0x01
-#define USB_SUBCLASS_VENDOR_SPEC 0xff
+#define USB_CLASS_VENDOR_SPEC 0xff
+#define USB_SUBCLASS_VENDOR_SPEC 0xff
/*-------------------------------------------------------------------------*/
diff --git a/include/uapi/linux/usb/functionfs.h b/include/uapi/linux/usb/functionfs.h
index 9f88de9c3d66..2ebdba111a8f 100644
--- a/include/uapi/linux/usb/functionfs.h
+++ b/include/uapi/linux/usb/functionfs.h
@@ -3,6 +3,7 @@
#define _UAPI__LINUX_FUNCTIONFS_H__
+#include <linux/const.h>
#include <linux/types.h>
#include <linux/ioctl.h>
@@ -37,6 +38,31 @@ struct usb_endpoint_descriptor_no_audio {
__u8 bInterval;
} __attribute__((packed));
+/**
+ * struct usb_dfu_functional_descriptor - DFU Functional descriptor
+ * @bLength: Size of the descriptor (bytes)
+ * @bDescriptorType: USB_DT_DFU_FUNCTIONAL
+ * @bmAttributes: DFU attributes
+ * @wDetachTimeOut: Maximum time to wait after DFU_DETACH (ms, le16)
+ * @wTransferSize: Maximum number of bytes per control-write (le16)
+ * @bcdDFUVersion: DFU Spec version (BCD, le16)
+ */
+struct usb_dfu_functional_descriptor {
+ __u8 bLength;
+ __u8 bDescriptorType;
+ __u8 bmAttributes;
+ __le16 wDetachTimeOut;
+ __le16 wTransferSize;
+ __le16 bcdDFUVersion;
+} __attribute__ ((packed));
+
+/* from DFU functional descriptor bmAttributes */
+#define DFU_FUNC_ATT_CAN_DOWNLOAD _BITUL(0)
+#define DFU_FUNC_ATT_CAN_UPLOAD _BITUL(1)
+#define DFU_FUNC_ATT_MANIFEST_TOLERANT _BITUL(2)
+#define DFU_FUNC_ATT_WILL_DETACH _BITUL(3)
+
+
struct usb_functionfs_descs_head_v2 {
__le32 magic;
__le32 length;
@@ -104,23 +130,38 @@ struct usb_ffs_dmabuf_transfer_req {
#ifndef __KERNEL__
-/*
+/**
+ * DOC: descriptors
+ *
* Descriptors format:
*
+ * +-----+-----------+--------------+--------------------------------------+
* | off | name | type | description |
- * |-----+-----------+--------------+--------------------------------------|
+ * +-----+-----------+--------------+--------------------------------------+
* | 0 | magic | LE32 | FUNCTIONFS_DESCRIPTORS_MAGIC_V2 |
+ * +-----+-----------+--------------+--------------------------------------+
* | 4 | length | LE32 | length of the whole data chunk |
+ * +-----+-----------+--------------+--------------------------------------+
* | 8 | flags | LE32 | combination of functionfs_flags |
+ * +-----+-----------+--------------+--------------------------------------+
* | | eventfd | LE32 | eventfd file descriptor |
+ * +-----+-----------+--------------+--------------------------------------+
* | | fs_count | LE32 | number of full-speed descriptors |
+ * +-----+-----------+--------------+--------------------------------------+
* | | hs_count | LE32 | number of high-speed descriptors |
+ * +-----+-----------+--------------+--------------------------------------+
* | | ss_count | LE32 | number of super-speed descriptors |
+ * +-----+-----------+--------------+--------------------------------------+
* | | os_count | LE32 | number of MS OS descriptors |
+ * +-----+-----------+--------------+--------------------------------------+
* | | fs_descrs | Descriptor[] | list of full-speed descriptors |
+ * +-----+-----------+--------------+--------------------------------------+
* | | hs_descrs | Descriptor[] | list of high-speed descriptors |
+ * +-----+-----------+--------------+--------------------------------------+
* | | ss_descrs | Descriptor[] | list of super-speed descriptors |
+ * +-----+-----------+--------------+--------------------------------------+
* | | os_descrs | OSDesc[] | list of MS OS descriptors |
+ * +-----+-----------+--------------+--------------------------------------+
*
* Depending on which flags are set, various fields may be missing in the
* structure. Any flags that are not recognised cause the whole block to be
@@ -128,71 +169,111 @@ struct usb_ffs_dmabuf_transfer_req {
*
* Legacy descriptors format (deprecated as of 3.14):
*
+ * +-----+-----------+--------------+--------------------------------------+
* | off | name | type | description |
- * |-----+-----------+--------------+--------------------------------------|
+ * +-----+-----------+--------------+--------------------------------------+
* | 0 | magic | LE32 | FUNCTIONFS_DESCRIPTORS_MAGIC |
+ * +-----+-----------+--------------+--------------------------------------+
* | 4 | length | LE32 | length of the whole data chunk |
+ * +-----+-----------+--------------+--------------------------------------+
* | 8 | fs_count | LE32 | number of full-speed descriptors |
+ * +-----+-----------+--------------+--------------------------------------+
* | 12 | hs_count | LE32 | number of high-speed descriptors |
+ * +-----+-----------+--------------+--------------------------------------+
* | 16 | fs_descrs | Descriptor[] | list of full-speed descriptors |
+ * +-----+-----------+--------------+--------------------------------------+
* | | hs_descrs | Descriptor[] | list of high-speed descriptors |
+ * +-----+-----------+--------------+--------------------------------------+
*
* All numbers must be in little endian order.
*
* Descriptor[] is an array of valid USB descriptors which have the following
* format:
*
+ * +-----+-----------------+------+--------------------------+
* | off | name | type | description |
- * |-----+-----------------+------+--------------------------|
+ * +-----+-----------------+------+--------------------------+
* | 0 | bLength | U8 | length of the descriptor |
+ * +-----+-----------------+------+--------------------------+
* | 1 | bDescriptorType | U8 | descriptor type |
+ * +-----+-----------------+------+--------------------------+
* | 2 | payload | | descriptor's payload |
+ * +-----+-----------------+------+--------------------------+
*
* OSDesc[] is an array of valid MS OS Feature Descriptors which have one of
* the following formats:
*
+ * +-----+-----------------+------+--------------------------+
* | off | name | type | description |
- * |-----+-----------------+------+--------------------------|
+ * +-----+-----------------+------+--------------------------+
* | 0 | inteface | U8 | related interface number |
+ * +-----+-----------------+------+--------------------------+
* | 1 | dwLength | U32 | length of the descriptor |
+ * +-----+-----------------+------+--------------------------+
* | 5 | bcdVersion | U16 | currently supported: 1 |
+ * +-----+-----------------+------+--------------------------+
* | 7 | wIndex | U16 | currently supported: 4 |
+ * +-----+-----------------+------+--------------------------+
* | 9 | bCount | U8 | number of ext. compat. |
+ * +-----+-----------------+------+--------------------------+
* | 10 | Reserved | U8 | 0 |
+ * +-----+-----------------+------+--------------------------+
* | 11 | ExtCompat[] | | list of ext. compat. d. |
+ * +-----+-----------------+------+--------------------------+
*
+ * +-----+-----------------+------+--------------------------+
* | off | name | type | description |
- * |-----+-----------------+------+--------------------------|
+ * +-----+-----------------+------+--------------------------+
* | 0 | inteface | U8 | related interface number |
+ * +-----+-----------------+------+--------------------------+
* | 1 | dwLength | U32 | length of the descriptor |
+ * +-----+-----------------+------+--------------------------+
* | 5 | bcdVersion | U16 | currently supported: 1 |
+ * +-----+-----------------+------+--------------------------+
* | 7 | wIndex | U16 | currently supported: 5 |
+ * +-----+-----------------+------+--------------------------+
* | 9 | wCount | U16 | number of ext. compat. |
+ * +-----+-----------------+------+--------------------------+
* | 11 | ExtProp[] | | list of ext. prop. d. |
+ * +-----+-----------------+------+--------------------------+
*
* ExtCompat[] is an array of valid Extended Compatiblity descriptors
* which have the following format:
*
+ * +-----+-----------------------+------+-------------------------------------+
* | off | name | type | description |
- * |-----+-----------------------+------+-------------------------------------|
+ * +-----+-----------------------+------+-------------------------------------+
* | 0 | bFirstInterfaceNumber | U8 | index of the interface or of the 1st|
+ * +-----+-----------------------+------+-------------------------------------+
* | | | | interface in an IAD group |
+ * +-----+-----------------------+------+-------------------------------------+
* | 1 | Reserved | U8 | 1 |
+ * +-----+-----------------------+------+-------------------------------------+
* | 2 | CompatibleID | U8[8]| compatible ID string |
+ * +-----+-----------------------+------+-------------------------------------+
* | 10 | SubCompatibleID | U8[8]| subcompatible ID string |
+ * +-----+-----------------------+------+-------------------------------------+
* | 18 | Reserved | U8[6]| 0 |
+ * +-----+-----------------------+------+-------------------------------------+
*
* ExtProp[] is an array of valid Extended Properties descriptors
* which have the following format:
*
+ * +-----+-----------------------+------+-------------------------------------+
* | off | name | type | description |
- * |-----+-----------------------+------+-------------------------------------|
+ * +-----+-----------------------+------+-------------------------------------+
* | 0 | dwSize | U32 | length of the descriptor |
+ * +-----+-----------------------+------+-------------------------------------+
* | 4 | dwPropertyDataType | U32 | 1..7 |
+ * +-----+-----------------------+------+-------------------------------------+
* | 8 | wPropertyNameLength | U16 | bPropertyName length (NL) |
+ * +-----+-----------------------+------+-------------------------------------+
* | 10 | bPropertyName |U8[NL]| name of this property |
+ * +-----+-----------------------+------+-------------------------------------+
* |10+NL| dwPropertyDataLength | U32 | bPropertyData length (DL) |
+ * +-----+-----------------------+------+-------------------------------------+
* |14+NL| bProperty |U8[DL]| payload of this property |
+ * +-----+-----------------------+------+-------------------------------------+
*/
struct usb_functionfs_strings_head {
diff --git a/include/uapi/linux/usb/g_hid.h b/include/uapi/linux/usb/g_hid.h
new file mode 100644
index 000000000000..b965092db476
--- /dev/null
+++ b/include/uapi/linux/usb/g_hid.h
@@ -0,0 +1,40 @@
+/* SPDX-License-Identifier: GPL-2.0+ WITH Linux-syscall-note */
+
+#ifndef __UAPI_LINUX_USB_G_HID_H
+#define __UAPI_LINUX_USB_G_HID_H
+
+#include <linux/types.h>
+
+/* Maximum HID report length for High-Speed USB (i.e. USB 2.0) */
+#define MAX_REPORT_LENGTH 64
+
+/**
+ * struct usb_hidg_report - response to GET_REPORT
+ * @report_id: report ID that this is a response for
+ * @userspace_req:
+ * !0 this report is used for any pending GET_REPORT request
+ * but wait on userspace to issue a new report on future requests
+ * 0 this report is to be used for any future GET_REPORT requests
+ * @length: length of the report response
+ * @data: report response
+ * @padding: padding for 32/64 bit compatibility
+ *
+ * Structure used by GADGET_HID_WRITE_GET_REPORT ioctl on /dev/hidg*.
+ */
+struct usb_hidg_report {
+ __u8 report_id;
+ __u8 userspace_req;
+ __u16 length;
+ __u8 data[MAX_REPORT_LENGTH];
+ __u8 padding[4];
+};
+
+/* The 'g' code is used by gadgetfs and hid gadget ioctl requests.
+ * Don't add any colliding codes to either driver, and keep
+ * them in unique ranges.
+ */
+
+#define GADGET_HID_READ_GET_REPORT_ID _IOR('g', 0x41, __u8)
+#define GADGET_HID_WRITE_GET_REPORT _IOW('g', 0x42, struct usb_hidg_report)
+
+#endif /* __UAPI_LINUX_USB_G_HID_H */
diff --git a/include/uapi/linux/usb/gadgetfs.h b/include/uapi/linux/usb/gadgetfs.h
index 835473910a49..9754822b2a40 100644
--- a/include/uapi/linux/usb/gadgetfs.h
+++ b/include/uapi/linux/usb/gadgetfs.h
@@ -62,7 +62,7 @@ struct usb_gadgetfs_event {
};
-/* The 'g' code is also used by printer gadget ioctl requests.
+/* The 'g' code is also used by printer and hid gadget ioctl requests.
* Don't add any colliding codes to either driver, and keep
* them in unique ranges (size 0x20 for now).
*/
diff --git a/include/uapi/linux/vdpa.h b/include/uapi/linux/vdpa.h
index 842bf1201ac4..71edf2c70cc3 100644
--- a/include/uapi/linux/vdpa.h
+++ b/include/uapi/linux/vdpa.h
@@ -19,6 +19,7 @@ enum vdpa_command {
VDPA_CMD_DEV_GET, /* can dump */
VDPA_CMD_DEV_CONFIG_GET, /* can dump */
VDPA_CMD_DEV_VSTATS_GET,
+ VDPA_CMD_DEV_ATTR_SET,
};
enum vdpa_attr {
diff --git a/include/uapi/linux/videodev2.h b/include/uapi/linux/videodev2.h
index 4e91362da6da..27239cb64065 100644
--- a/include/uapi/linux/videodev2.h
+++ b/include/uapi/linux/videodev2.h
@@ -502,6 +502,7 @@ struct v4l2_capability {
#define V4L2_CAP_META_CAPTURE 0x00800000 /* Is a metadata capture device */
#define V4L2_CAP_READWRITE 0x01000000 /* read/write systemcalls */
+#define V4L2_CAP_EDID 0x02000000 /* Is an EDID-only device */
#define V4L2_CAP_STREAMING 0x04000000 /* streaming I/O ioctls */
#define V4L2_CAP_META_OUTPUT 0x08000000 /* Is a metadata output device */
@@ -854,6 +855,7 @@ struct v4l2_pix_format {
/* Vendor specific - used for RK_ISP1 camera sub-system */
#define V4L2_META_FMT_RK_ISP1_PARAMS v4l2_fourcc('R', 'K', '1', 'P') /* Rockchip ISP1 3A Parameters */
#define V4L2_META_FMT_RK_ISP1_STAT_3A v4l2_fourcc('R', 'K', '1', 'S') /* Rockchip ISP1 3A Statistics */
+#define V4L2_META_FMT_RK_ISP1_EXT_PARAMS v4l2_fourcc('R', 'K', '1', 'E') /* Rockchip ISP1 3a Extensible Parameters */
/* Vendor specific - used for RaspberryPi PiSP */
#define V4L2_META_FMT_RPI_BE_CFG v4l2_fourcc('R', 'P', 'B', 'C') /* PiSP BE configuration */
diff --git a/include/uapi/linux/virtio_balloon.h b/include/uapi/linux/virtio_balloon.h
index ddaa45e723c4..ee35a372805d 100644
--- a/include/uapi/linux/virtio_balloon.h
+++ b/include/uapi/linux/virtio_balloon.h
@@ -71,7 +71,13 @@ struct virtio_balloon_config {
#define VIRTIO_BALLOON_S_CACHES 7 /* Disk caches */
#define VIRTIO_BALLOON_S_HTLB_PGALLOC 8 /* Hugetlb page allocations */
#define VIRTIO_BALLOON_S_HTLB_PGFAIL 9 /* Hugetlb page allocation failures */
-#define VIRTIO_BALLOON_S_NR 10
+#define VIRTIO_BALLOON_S_OOM_KILL 10 /* OOM killer invocations */
+#define VIRTIO_BALLOON_S_ALLOC_STALL 11 /* Stall count of memory allocatoin */
+#define VIRTIO_BALLOON_S_ASYNC_SCAN 12 /* Amount of memory scanned asynchronously */
+#define VIRTIO_BALLOON_S_DIRECT_SCAN 13 /* Amount of memory scanned directly */
+#define VIRTIO_BALLOON_S_ASYNC_RECLAIM 14 /* Amount of memory reclaimed asynchronously */
+#define VIRTIO_BALLOON_S_DIRECT_RECLAIM 15 /* Amount of memory reclaimed directly */
+#define VIRTIO_BALLOON_S_NR 16
#define VIRTIO_BALLOON_S_NAMES_WITH_PREFIX(VIRTIO_BALLOON_S_NAMES_prefix) { \
VIRTIO_BALLOON_S_NAMES_prefix "swap-in", \
@@ -83,7 +89,13 @@ struct virtio_balloon_config {
VIRTIO_BALLOON_S_NAMES_prefix "available-memory", \
VIRTIO_BALLOON_S_NAMES_prefix "disk-caches", \
VIRTIO_BALLOON_S_NAMES_prefix "hugetlb-allocations", \
- VIRTIO_BALLOON_S_NAMES_prefix "hugetlb-failures" \
+ VIRTIO_BALLOON_S_NAMES_prefix "hugetlb-failures", \
+ VIRTIO_BALLOON_S_NAMES_prefix "oom-kills", \
+ VIRTIO_BALLOON_S_NAMES_prefix "alloc-stalls", \
+ VIRTIO_BALLOON_S_NAMES_prefix "async-scans", \
+ VIRTIO_BALLOON_S_NAMES_prefix "direct-scans", \
+ VIRTIO_BALLOON_S_NAMES_prefix "async-reclaims", \
+ VIRTIO_BALLOON_S_NAMES_prefix "direct-reclaims" \
}
#define VIRTIO_BALLOON_S_NAMES VIRTIO_BALLOON_S_NAMES_WITH_PREFIX("")
diff --git a/include/uapi/linux/virtio_gpu.h b/include/uapi/linux/virtio_gpu.h
index 0e21f3998108..bf2c9cabd207 100644
--- a/include/uapi/linux/virtio_gpu.h
+++ b/include/uapi/linux/virtio_gpu.h
@@ -311,6 +311,7 @@ struct virtio_gpu_cmd_submit {
#define VIRTIO_GPU_CAPSET_VIRGL2 2
/* 3 is reserved for gfxstream */
#define VIRTIO_GPU_CAPSET_VENUS 4
+#define VIRTIO_GPU_CAPSET_DRM 6
/* VIRTIO_GPU_CMD_GET_CAPSET_INFO */
struct virtio_gpu_get_capset_info {
diff --git a/include/uapi/rdma/bnxt_re-abi.h b/include/uapi/rdma/bnxt_re-abi.h
index e61104f35d73..faa9d62b3b30 100644
--- a/include/uapi/rdma/bnxt_re-abi.h
+++ b/include/uapi/rdma/bnxt_re-abi.h
@@ -66,6 +66,7 @@ enum bnxt_re_wqe_mode {
enum {
BNXT_RE_COMP_MASK_REQ_UCNTX_POW2_SUPPORT = 0x01,
+ BNXT_RE_COMP_MASK_REQ_UCNTX_VAR_WQE_SUPPORT = 0x02,
};
struct bnxt_re_uctx_req {
@@ -118,10 +119,16 @@ struct bnxt_re_resize_cq_req {
__aligned_u64 cq_va;
};
+enum bnxt_re_qp_mask {
+ BNXT_RE_QP_REQ_MASK_VAR_WQE_SQ_SLOTS = 0x1,
+};
+
struct bnxt_re_qp_req {
__aligned_u64 qpsva;
__aligned_u64 qprva;
__aligned_u64 qp_handle;
+ __aligned_u64 comp_mask;
+ __u32 sq_slots;
};
struct bnxt_re_qp_resp {
@@ -134,8 +141,14 @@ struct bnxt_re_srq_req {
__aligned_u64 srq_handle;
};
+enum bnxt_re_srq_mask {
+ BNXT_RE_SRQ_TOGGLE_PAGE_SUPPORT = 0x1,
+};
+
struct bnxt_re_srq_resp {
__u32 srqid;
+ __u32 rsvd; /* padding */
+ __aligned_u64 comp_mask;
};
enum bnxt_re_shpg_offt {
diff --git a/include/uapi/rdma/mlx5_user_ioctl_cmds.h b/include/uapi/rdma/mlx5_user_ioctl_cmds.h
index 5b74d6534899..fd2e4a3a56b3 100644
--- a/include/uapi/rdma/mlx5_user_ioctl_cmds.h
+++ b/include/uapi/rdma/mlx5_user_ioctl_cmds.h
@@ -274,6 +274,10 @@ enum mlx5_ib_create_cq_attrs {
MLX5_IB_ATTR_CREATE_CQ_UAR_INDEX = UVERBS_ID_DRIVER_NS_WITH_UHW,
};
+enum mlx5_ib_reg_dmabuf_mr_attrs {
+ MLX5_IB_ATTR_REG_DMABUF_MR_ACCESS_FLAGS = (1U << UVERBS_ID_NS_SHIFT),
+};
+
#define MLX5_IB_DW_MATCH_PARAM 0xA0
struct mlx5_ib_match_params {
@@ -344,6 +348,7 @@ enum mlx5_ib_pd_methods {
enum mlx5_ib_device_methods {
MLX5_IB_METHOD_QUERY_PORT = (1U << UVERBS_ID_NS_SHIFT),
+ MLX5_IB_METHOD_GET_DATA_DIRECT_SYSFS_PATH,
};
enum mlx5_ib_query_port_attrs {
@@ -351,4 +356,8 @@ enum mlx5_ib_query_port_attrs {
MLX5_IB_ATTR_QUERY_PORT,
};
+enum mlx5_ib_get_data_direct_sysfs_path_attrs {
+ MLX5_IB_ATTR_GET_DATA_DIRECT_SYSFS_PATH = (1U << UVERBS_ID_NS_SHIFT),
+};
+
#endif
diff --git a/include/uapi/rdma/mlx5_user_ioctl_verbs.h b/include/uapi/rdma/mlx5_user_ioctl_verbs.h
index 3189c7f08d17..7c233df475e7 100644
--- a/include/uapi/rdma/mlx5_user_ioctl_verbs.h
+++ b/include/uapi/rdma/mlx5_user_ioctl_verbs.h
@@ -54,6 +54,10 @@ enum mlx5_ib_uapi_flow_action_packet_reformat_type {
MLX5_IB_UAPI_FLOW_ACTION_PACKET_REFORMAT_TYPE_L2_TO_L3_TUNNEL = 0x3,
};
+enum mlx5_ib_uapi_reg_dmabuf_flags {
+ MLX5_IB_UAPI_REG_DMABUF_ACCESS_DATA_DIRECT = 1 << 0,
+};
+
struct mlx5_ib_uapi_devx_async_cmd_hdr {
__aligned_u64 wr_id;
__u8 out_data[];
diff --git a/include/uapi/rdma/rdma_netlink.h b/include/uapi/rdma/rdma_netlink.h
index 2f37568f5556..39be09c0ffbb 100644
--- a/include/uapi/rdma/rdma_netlink.h
+++ b/include/uapi/rdma/rdma_netlink.h
@@ -15,6 +15,7 @@ enum {
enum {
RDMA_NL_GROUP_IWPM = 2,
RDMA_NL_GROUP_LS,
+ RDMA_NL_GROUP_NOTIFY,
RDMA_NL_NUM_GROUPS
};
@@ -305,6 +306,8 @@ enum rdma_nldev_command {
RDMA_NLDEV_CMD_DELDEV,
+ RDMA_NLDEV_CMD_MONITOR,
+
RDMA_NLDEV_NUM_OPS
};
@@ -574,6 +577,9 @@ enum rdma_nldev_attr {
RDMA_NLDEV_ATTR_NAME_ASSIGN_TYPE, /* u8 */
+ RDMA_NLDEV_ATTR_EVENT_TYPE, /* u8 */
+
+ RDMA_NLDEV_SYS_ATTR_MONITOR_MODE, /* u8 */
/*
* Always the end
*/
@@ -624,4 +630,14 @@ enum rdma_nl_name_assign_type {
RDMA_NAME_ASSIGN_TYPE_USER = 1, /* Provided by user-space */
};
+/*
+ * Supported rdma monitoring event types.
+ */
+enum rdma_nl_notify_event_type {
+ RDMA_REGISTER_EVENT,
+ RDMA_UNREGISTER_EVENT,
+ RDMA_NETDEV_ATTACH_EVENT,
+ RDMA_NETDEV_DETACH_EVENT,
+};
+
#endif /* _UAPI_RDMA_NETLINK_H */
diff --git a/include/uapi/xen/privcmd.h b/include/uapi/xen/privcmd.h
index 8b8c5d1420fe..8e2c8fd44764 100644
--- a/include/uapi/xen/privcmd.h
+++ b/include/uapi/xen/privcmd.h
@@ -126,6 +126,11 @@ struct privcmd_ioeventfd {
__u8 pad[2];
};
+struct privcmd_pcidev_get_gsi {
+ __u32 sbdf;
+ __u32 gsi;
+};
+
/*
* @cmd: IOCTL_PRIVCMD_HYPERCALL
* @arg: &privcmd_hypercall_t
@@ -157,5 +162,7 @@ struct privcmd_ioeventfd {
_IOW('P', 8, struct privcmd_irqfd)
#define IOCTL_PRIVCMD_IOEVENTFD \
_IOW('P', 9, struct privcmd_ioeventfd)
+#define IOCTL_PRIVCMD_PCIDEV_GET_GSI \
+ _IOC(_IOC_NONE, 'P', 10, sizeof(struct privcmd_pcidev_get_gsi))
#endif /* __LINUX_PUBLIC_PRIVCMD_H__ */
diff --git a/include/ufs/ufs.h b/include/ufs/ufs.h
index 853e95957c31..e594abe5d05f 100644
--- a/include/ufs/ufs.h
+++ b/include/ufs/ufs.h
@@ -597,7 +597,7 @@ struct ufs_dev_info {
};
/*
- * This enum is used in string mapping in include/trace/events/ufs.h.
+ * This enum is used in string mapping in ufs_trace.h.
*/
enum ufs_trace_str_t {
UFS_CMD_SEND, UFS_CMD_COMP, UFS_DEV_COMP,
@@ -607,7 +607,7 @@ enum ufs_trace_str_t {
/*
* Transaction Specific Fields (TSF) type in the UPIU package, this enum is
- * used in include/trace/events/ufs.h for UFS command trace.
+ * used in ufs_trace.h for UFS command trace.
*/
enum ufs_trace_tsf_t {
UFS_TSF_CDB, UFS_TSF_OSF, UFS_TSF_TM_INPUT, UFS_TSF_TM_OUTPUT
diff --git a/include/ufs/ufshcd.h b/include/ufs/ufshcd.h
index 0fd2aebac728..3f68ae3e4330 100644
--- a/include/ufs/ufshcd.h
+++ b/include/ufs/ufshcd.h
@@ -17,6 +17,7 @@
#include <linux/blk-mq.h>
#include <linux/devfreq.h>
#include <linux/fault-inject.h>
+#include <linux/debugfs.h>
#include <linux/msi.h>
#include <linux/pm_runtime.h>
#include <linux/dma-direction.h>
diff --git a/include/ufs/ufshci.h b/include/ufs/ufshci.h
index 9917c7743d80..27364c4a6ef9 100644
--- a/include/ufs/ufshci.h
+++ b/include/ufs/ufshci.h
@@ -25,8 +25,9 @@ enum {
REG_CONTROLLER_CAPABILITIES = 0x00,
REG_MCQCAP = 0x04,
REG_UFS_VERSION = 0x08,
- REG_CONTROLLER_DEV_ID = 0x10,
- REG_CONTROLLER_PROD_ID = 0x14,
+ REG_EXT_CONTROLLER_CAPABILITIES = 0x0C,
+ REG_CONTROLLER_PID = 0x10,
+ REG_CONTROLLER_MID = 0x14,
REG_AUTO_HIBERNATE_IDLE_TIMER = 0x18,
REG_INTERRUPT_STATUS = 0x20,
REG_INTERRUPT_ENABLE = 0x24,
diff --git a/include/xen/acpi.h b/include/xen/acpi.h
index b1e11863144d..daa96a22d257 100644
--- a/include/xen/acpi.h
+++ b/include/xen/acpi.h
@@ -67,10 +67,37 @@ static inline void xen_acpi_sleep_register(void)
acpi_suspend_lowlevel = xen_acpi_suspend_lowlevel;
}
}
+int xen_pvh_setup_gsi(int gsi, int trigger, int polarity);
+int xen_acpi_get_gsi_info(struct pci_dev *dev,
+ int *gsi_out,
+ int *trigger_out,
+ int *polarity_out);
#else
static inline void xen_acpi_sleep_register(void)
{
}
+
+static inline int xen_pvh_setup_gsi(int gsi, int trigger, int polarity)
+{
+ return -1;
+}
+
+static inline int xen_acpi_get_gsi_info(struct pci_dev *dev,
+ int *gsi_out,
+ int *trigger_out,
+ int *polarity_out)
+{
+ return -1;
+}
+#endif
+
+#ifdef CONFIG_XEN_PCI_STUB
+int pcistub_get_gsi_from_sbdf(unsigned int sbdf);
+#else
+static inline int pcistub_get_gsi_from_sbdf(unsigned int sbdf)
+{
+ return -1;
+}
#endif
#endif /* _XEN_ACPI_H */
diff --git a/include/xen/interface/elfnote.h b/include/xen/interface/elfnote.h
index 38deb1214613..918f47d87d7a 100644
--- a/include/xen/interface/elfnote.h
+++ b/include/xen/interface/elfnote.h
@@ -11,7 +11,9 @@
#define __XEN_PUBLIC_ELFNOTE_H__
/*
- * The notes should live in a SHT_NOTE segment and have "Xen" in the
+ * `incontents 200 elfnotes ELF notes
+ *
+ * The notes should live in a PT_NOTE segment and have "Xen" in the
* name field.
*
* Numeric types are either 4 or 8 bytes depending on the content of
@@ -22,6 +24,8 @@
*
* String values (for non-legacy) are NULL terminated ASCII, also known
* as ASCIZ type.
+ *
+ * Xen only uses ELF Notes contained in x86 binaries.
*/
/*
@@ -52,7 +56,7 @@
#define XEN_ELFNOTE_VIRT_BASE 3
/*
- * The offset of the ELF paddr field from the acutal required
+ * The offset of the ELF paddr field from the actual required
* pseudo-physical address (numeric).
*
* This is used to maintain backwards compatibility with older kernels
@@ -92,7 +96,12 @@
#define XEN_ELFNOTE_LOADER 8
/*
- * The kernel supports PAE (x86/32 only, string = "yes" or "no").
+ * The kernel supports PAE (x86/32 only, string = "yes", "no" or
+ * "bimodal").
+ *
+ * For compatibility with Xen 3.0.3 and earlier the "bimodal" setting
+ * may be given as "yes,bimodal" which will cause older Xen to treat
+ * this kernel as PAE.
*
* LEGACY: PAE (n.b. The legacy interface included a provision to
* indicate 'extended-cr3' support allowing L3 page tables to be
@@ -149,7 +158,9 @@
* The (non-default) location the initial phys-to-machine map should be
* placed at by the hypervisor (Dom0) or the tools (DomU).
* The kernel must be prepared for this mapping to be established using
- * large pages, despite such otherwise not being available to guests.
+ * large pages, despite such otherwise not being available to guests. Note
+ * that these large pages may be misaligned in PFN space (they'll obviously
+ * be aligned in MFN and virtual address spaces).
* The kernel must also be able to handle the page table pages used for
* this mapping not being accessible through the initial mapping.
* (Only x86-64 supports this at present.)
@@ -186,8 +197,80 @@
#define XEN_ELFNOTE_PHYS32_ENTRY 18
/*
+ * Physical loading constraints for PVH kernels
+ *
+ * The presence of this note indicates the kernel supports relocating itself.
+ *
+ * The note may include up to three 32bit values to place constraints on the
+ * guest physical loading addresses and alignment for a PVH kernel. Values
+ * are read in the following order:
+ * - a required start alignment (default 0x200000)
+ * - a minimum address for the start of the image (default 0; see below)
+ * - a maximum address for the last byte of the image (default 0xffffffff)
+ *
+ * When this note specifies an alignment value, it is used. Otherwise the
+ * maximum p_align value from loadable ELF Program Headers is used, if it is
+ * greater than or equal to 4k (0x1000). Otherwise, the default is used.
+ */
+#define XEN_ELFNOTE_PHYS32_RELOC 19
+
+/*
* The number of the highest elfnote defined.
*/
-#define XEN_ELFNOTE_MAX XEN_ELFNOTE_PHYS32_ENTRY
+#define XEN_ELFNOTE_MAX XEN_ELFNOTE_PHYS32_RELOC
+
+/*
+ * System information exported through crash notes.
+ *
+ * The kexec / kdump code will create one XEN_ELFNOTE_CRASH_INFO
+ * note in case of a system crash. This note will contain various
+ * information about the system, see xen/include/xen/elfcore.h.
+ */
+#define XEN_ELFNOTE_CRASH_INFO 0x1000001
+
+/*
+ * System registers exported through crash notes.
+ *
+ * The kexec / kdump code will create one XEN_ELFNOTE_CRASH_REGS
+ * note per cpu in case of a system crash. This note is architecture
+ * specific and will contain registers not saved in the "CORE" note.
+ * See xen/include/xen/elfcore.h for more information.
+ */
+#define XEN_ELFNOTE_CRASH_REGS 0x1000002
+
+
+/*
+ * xen dump-core none note.
+ * xm dump-core code will create one XEN_ELFNOTE_DUMPCORE_NONE
+ * in its dump file to indicate that the file is xen dump-core
+ * file. This note doesn't have any other information.
+ * See tools/libxc/xc_core.h for more information.
+ */
+#define XEN_ELFNOTE_DUMPCORE_NONE 0x2000000
+
+/*
+ * xen dump-core header note.
+ * xm dump-core code will create one XEN_ELFNOTE_DUMPCORE_HEADER
+ * in its dump file.
+ * See tools/libxc/xc_core.h for more information.
+ */
+#define XEN_ELFNOTE_DUMPCORE_HEADER 0x2000001
+
+/*
+ * xen dump-core xen version note.
+ * xm dump-core code will create one XEN_ELFNOTE_DUMPCORE_XEN_VERSION
+ * in its dump file. It contains the xen version obtained via the
+ * XENVER hypercall.
+ * See tools/libxc/xc_core.h for more information.
+ */
+#define XEN_ELFNOTE_DUMPCORE_XEN_VERSION 0x2000002
+
+/*
+ * xen dump-core format version note.
+ * xm dump-core code will create one XEN_ELFNOTE_DUMPCORE_FORMAT_VERSION
+ * in its dump file. It contains a format version identifier.
+ * See tools/libxc/xc_core.h for more information.
+ */
+#define XEN_ELFNOTE_DUMPCORE_FORMAT_VERSION 0x2000003
#endif /* __XEN_PUBLIC_ELFNOTE_H__ */
diff --git a/include/xen/interface/physdev.h b/include/xen/interface/physdev.h
index a237af867873..df74e65a884b 100644
--- a/include/xen/interface/physdev.h
+++ b/include/xen/interface/physdev.h
@@ -256,6 +256,13 @@ struct physdev_pci_device_add {
*/
#define PHYSDEVOP_prepare_msix 30
#define PHYSDEVOP_release_msix 31
+/*
+ * Notify the hypervisor that a PCI device has been reset, so that any
+ * internally cached state is regenerated. Should be called after any
+ * device reset performed by the hardware domain.
+ */
+#define PHYSDEVOP_pci_device_reset 32
+
struct physdev_pci_device {
/* IN */
uint16_t seg;
@@ -263,6 +270,16 @@ struct physdev_pci_device {
uint8_t devfn;
};
+struct pci_device_reset {
+ struct physdev_pci_device dev;
+#define PCI_DEVICE_RESET_COLD 0x0
+#define PCI_DEVICE_RESET_WARM 0x1
+#define PCI_DEVICE_RESET_HOT 0x2
+#define PCI_DEVICE_RESET_FLR 0x3
+#define PCI_DEVICE_RESET_MASK 0x3
+ uint32_t flags;
+};
+
#define PHYSDEVOP_DBGP_RESET_PREPARE 1
#define PHYSDEVOP_DBGP_RESET_DONE 2
diff --git a/include/xen/pci.h b/include/xen/pci.h
index b8337cf85fd1..424b8ea89ca8 100644
--- a/include/xen/pci.h
+++ b/include/xen/pci.h
@@ -4,10 +4,16 @@
#define __XEN_PCI_H__
#if defined(CONFIG_XEN_DOM0)
+int xen_reset_device(const struct pci_dev *dev);
int xen_find_device_domain_owner(struct pci_dev *dev);
int xen_register_device_domain_owner(struct pci_dev *dev, uint16_t domain);
int xen_unregister_device_domain_owner(struct pci_dev *dev);
#else
+static inline int xen_reset_device(const struct pci_dev *dev)
+{
+ return -1;
+}
+
static inline int xen_find_device_domain_owner(struct pci_dev *dev)
{
return -1;
diff --git a/init/Kconfig b/init/Kconfig
index 8afb993b09cc..fbd0cb06a50a 100644
--- a/init/Kconfig
+++ b/init/Kconfig
@@ -60,6 +60,13 @@ config LLD_VERSION
default $(ld-version) if LD_IS_LLD
default 0
+config RUSTC_VERSION
+ int
+ default $(shell,$(srctree)/scripts/rustc-version.sh $(RUSTC))
+ help
+ It does not depend on `RUST` since that one may need to use the version
+ in a `depends on`.
+
config RUST_IS_AVAILABLE
def_bool $(success,$(srctree)/scripts/rust_is_available.sh)
help
@@ -310,8 +317,9 @@ config KERNEL_XZ
BCJ filters which can improve compression ratio of executable
code. The size of the kernel is about 30% smaller with XZ in
comparison to gzip. On architectures for which there is a BCJ
- filter (i386, x86_64, ARM, IA-64, PowerPC, and SPARC), XZ
- will create a few percent smaller kernel than plain LZMA.
+ filter (i386, x86_64, ARM, ARM64, RISC-V, big endian PowerPC,
+ and SPARC), XZ will create a few percent smaller kernel than
+ plain LZMA.
The speed is about the same as with LZMA: The decompression
speed of XZ is better than that of bzip2 but worse than gzip
@@ -986,7 +994,7 @@ config MEMCG_V1
going to shrink due to deprecation process. New deployments with v1
controller are highly discouraged.
- San N is unsure.
+ Say N if unsure.
config BLK_CGROUP
bool "IO controller"
@@ -1024,9 +1032,13 @@ menuconfig CGROUP_SCHED
tasks.
if CGROUP_SCHED
+config GROUP_SCHED_WEIGHT
+ def_bool n
+
config FAIR_GROUP_SCHED
bool "Group scheduling for SCHED_OTHER"
depends on CGROUP_SCHED
+ select GROUP_SCHED_WEIGHT
default CGROUP_SCHED
config CFS_BANDWIDTH
@@ -1051,6 +1063,12 @@ config RT_GROUP_SCHED
realtime bandwidth for them.
See Documentation/scheduler/sched-rt-group.rst for more information.
+config EXT_GROUP_SCHED
+ bool
+ depends on SCHED_CLASS_EXT && CGROUP_SCHED
+ select GROUP_SCHED_WEIGHT
+ default y
+
endif #CGROUP_SCHED
config SCHED_MM_CID
@@ -1924,12 +1942,14 @@ config RUST
bool "Rust support"
depends on HAVE_RUST
depends on RUST_IS_AVAILABLE
- depends on !CFI_CLANG
depends on !MODVERSIONS
- depends on !GCC_PLUGINS
+ depends on !GCC_PLUGIN_RANDSTRUCT
depends on !RANDSTRUCT
- depends on !SHADOW_CALL_STACK
depends on !DEBUG_INFO_BTF || PAHOLE_HAS_LANG_EXCLUDE
+ depends on !CFI_CLANG || RUSTC_VERSION >= 107900 && $(cc-option,-fsanitize=kcfi -fsanitize-cfi-icall-experimental-normalize-integers)
+ select CFI_ICALL_NORMALIZE_INTEGERS if CFI_CLANG
+ depends on !CALL_PADDING || RUSTC_VERSION >= 108000
+ depends on !KASAN_SW_TAGS
help
Enables Rust support in the kernel.
@@ -1946,7 +1966,9 @@ config RUST
config RUSTC_VERSION_TEXT
string
depends on RUST
- default "$(shell,$(RUSTC) --version 2>/dev/null)"
+ default "$(RUSTC_VERSION_TEXT)"
+ help
+ See `CC_VERSION_TEXT`.
config BINDGEN_VERSION_TEXT
string
diff --git a/init/init_task.c b/init/init_task.c
index 5d0399bc8d2f..136a8231355a 100644
--- a/init/init_task.c
+++ b/init/init_task.c
@@ -6,6 +6,7 @@
#include <linux/sched/sysctl.h>
#include <linux/sched/rt.h>
#include <linux/sched/task.h>
+#include <linux/sched/ext.h>
#include <linux/init.h>
#include <linux/fs.h>
#include <linux/mm.h>
@@ -99,6 +100,17 @@ struct task_struct init_task __aligned(L1_CACHE_BYTES) = {
#ifdef CONFIG_CGROUP_SCHED
.sched_task_group = &root_task_group,
#endif
+#ifdef CONFIG_SCHED_CLASS_EXT
+ .scx = {
+ .dsq_list.node = LIST_HEAD_INIT(init_task.scx.dsq_list.node),
+ .sticky_cpu = -1,
+ .holding_cpu = -1,
+ .runnable_node = LIST_HEAD_INIT(init_task.scx.runnable_node),
+ .runnable_at = INITIAL_JIFFIES,
+ .ddsp_dsq_id = SCX_DSQ_INVALID,
+ .slice = SCX_SLICE_DFL,
+ },
+#endif
.ptraced = LIST_HEAD_INIT(init_task.ptraced),
.ptrace_entry = LIST_HEAD_INIT(init_task.ptrace_entry),
.real_parent = &init_task,
diff --git a/io_uring/fdinfo.c b/io_uring/fdinfo.c
index d43e1b5fcb36..6b1247664b35 100644
--- a/io_uring/fdinfo.c
+++ b/io_uring/fdinfo.c
@@ -177,9 +177,8 @@ __cold void io_uring_show_fdinfo(struct seq_file *m, struct file *file)
seq_printf(m, "UserBufs:\t%u\n", ctx->nr_user_bufs);
for (i = 0; has_lock && i < ctx->nr_user_bufs; i++) {
struct io_mapped_ubuf *buf = ctx->user_bufs[i];
- unsigned int len = buf->ubuf_end - buf->ubuf;
- seq_printf(m, "%5u: 0x%llx/%u\n", i, buf->ubuf, len);
+ seq_printf(m, "%5u: 0x%llx/%u\n", i, buf->ubuf, buf->len);
}
if (has_lock && !xa_empty(&ctx->personalities)) {
unsigned long index;
diff --git a/io_uring/io_uring.c b/io_uring/io_uring.c
index f3570e81ecb4..feb61d68dca6 100644
--- a/io_uring/io_uring.c
+++ b/io_uring/io_uring.c
@@ -635,6 +635,21 @@ static void __io_cqring_overflow_flush(struct io_ring_ctx *ctx, bool dying)
}
list_del(&ocqe->list);
kfree(ocqe);
+
+ /*
+ * For silly syzbot cases that deliberately overflow by huge
+ * amounts, check if we need to resched and drop and
+ * reacquire the locks if so. Nothing real would ever hit this.
+ * Ideally we'd have a non-posting unlock for this, but hard
+ * to care for a non-real case.
+ */
+ if (need_resched()) {
+ io_cq_unlock_post(ctx);
+ mutex_unlock(&ctx->uring_lock);
+ cond_resched();
+ mutex_lock(&ctx->uring_lock);
+ io_cq_lock(ctx);
+ }
}
if (list_empty(&ctx->cq_overflow_list)) {
@@ -2164,7 +2179,7 @@ static inline int io_submit_sqe(struct io_ring_ctx *ctx, struct io_kiocb *req,
* conditions are true (normal request), then just queue it.
*/
if (unlikely(link->head)) {
- trace_io_uring_link(req, link->head);
+ trace_io_uring_link(req, link->last);
link->last->link = req;
link->last = req;
@@ -2472,7 +2487,7 @@ static inline int io_cqring_wait_schedule(struct io_ring_ctx *ctx,
return 1;
if (unlikely(!llist_empty(&ctx->work_llist)))
return 1;
- if (unlikely(test_thread_flag(TIF_NOTIFY_SIGNAL)))
+ if (unlikely(task_work_pending(current)))
return 1;
if (unlikely(task_sigpending(current)))
return -EINTR;
@@ -2579,9 +2594,9 @@ static int io_cqring_wait(struct io_ring_ctx *ctx, int min_events, u32 flags,
* If we got woken because of task_work being processed, run it
* now rather than let the caller do another wait loop.
*/
- io_run_task_work();
if (!llist_empty(&ctx->work_llist))
io_run_local_work(ctx, nr_wait);
+ io_run_task_work();
/*
* Non-local task_work will be run on exit to userspace, but
diff --git a/io_uring/register.c b/io_uring/register.c
index dab0f8024ddf..eca26d4884d9 100644
--- a/io_uring/register.c
+++ b/io_uring/register.c
@@ -542,11 +542,11 @@ static int __io_uring_register(struct io_ring_ctx *ctx, unsigned opcode,
break;
ret = io_register_clock(ctx, arg);
break;
- case IORING_REGISTER_COPY_BUFFERS:
+ case IORING_REGISTER_CLONE_BUFFERS:
ret = -EINVAL;
if (!arg || nr_args != 1)
break;
- ret = io_register_copy_buffers(ctx, arg);
+ ret = io_register_clone_buffers(ctx, arg);
break;
default:
ret = -EINVAL;
@@ -561,7 +561,7 @@ static int __io_uring_register(struct io_ring_ctx *ctx, unsigned opcode,
* true, then the registered index is used. Otherwise, the normal fd table.
* Caller must call fput() on the returned file, unless it's an ERR_PTR.
*/
-struct file *io_uring_register_get_file(int fd, bool registered)
+struct file *io_uring_register_get_file(unsigned int fd, bool registered)
{
struct file *file;
diff --git a/io_uring/register.h b/io_uring/register.h
index cc69b88338fe..a5f39d5ef9e0 100644
--- a/io_uring/register.h
+++ b/io_uring/register.h
@@ -4,6 +4,6 @@
int io_eventfd_unregister(struct io_ring_ctx *ctx);
int io_unregister_personality(struct io_ring_ctx *ctx, unsigned id);
-struct file *io_uring_register_get_file(int fd, bool registered);
+struct file *io_uring_register_get_file(unsigned int fd, bool registered);
#endif
diff --git a/io_uring/rsrc.c b/io_uring/rsrc.c
index a7164aa7d13e..33a3d156a85b 100644
--- a/io_uring/rsrc.c
+++ b/io_uring/rsrc.c
@@ -38,7 +38,7 @@ static int io_sqe_buffer_register(struct io_ring_ctx *ctx, struct iovec *iov,
static const struct io_mapped_ubuf dummy_ubuf = {
/* set invalid range, so io_import_fixed() fails meeting it */
.ubuf = -1UL,
- .ubuf_end = 0,
+ .len = UINT_MAX,
};
int __io_account_mem(struct user_struct *user, unsigned long nr_pages)
@@ -991,16 +991,13 @@ static int io_sqe_buffer_register(struct io_ring_ctx *ctx, struct iovec *iov,
size = iov->iov_len;
/* store original address for later verification */
imu->ubuf = (unsigned long) iov->iov_base;
- imu->ubuf_end = imu->ubuf + iov->iov_len;
+ imu->len = iov->iov_len;
imu->nr_bvecs = nr_pages;
imu->folio_shift = PAGE_SHIFT;
- imu->folio_mask = PAGE_MASK;
- if (coalesced) {
+ if (coalesced)
imu->folio_shift = data.folio_shift;
- imu->folio_mask = ~((1UL << data.folio_shift) - 1);
- }
refcount_set(&imu->refs, 1);
- off = (unsigned long) iov->iov_base & ~imu->folio_mask;
+ off = (unsigned long) iov->iov_base & ((1UL << imu->folio_shift) - 1);
*pimu = imu;
ret = 0;
@@ -1100,7 +1097,7 @@ int io_import_fixed(int ddir, struct iov_iter *iter,
if (unlikely(check_add_overflow(buf_addr, (u64)len, &buf_end)))
return -EFAULT;
/* not inside the mapped region */
- if (unlikely(buf_addr < imu->ubuf || buf_end > imu->ubuf_end))
+ if (unlikely(buf_addr < imu->ubuf || buf_end > (imu->ubuf + imu->len)))
return -EFAULT;
/*
@@ -1143,14 +1140,14 @@ int io_import_fixed(int ddir, struct iov_iter *iter,
iter->bvec = bvec + seg_skip;
iter->nr_segs -= seg_skip;
iter->count -= bvec->bv_len + offset;
- iter->iov_offset = offset & ~imu->folio_mask;
+ iter->iov_offset = offset & ((1UL << imu->folio_shift) - 1);
}
}
return 0;
}
-static int io_copy_buffers(struct io_ring_ctx *ctx, struct io_ring_ctx *src_ctx)
+static int io_clone_buffers(struct io_ring_ctx *ctx, struct io_ring_ctx *src_ctx)
{
struct io_mapped_ubuf **user_bufs;
struct io_rsrc_data *data;
@@ -1214,9 +1211,9 @@ out_unlock:
*
* Since the memory is already accounted once, don't account it again.
*/
-int io_register_copy_buffers(struct io_ring_ctx *ctx, void __user *arg)
+int io_register_clone_buffers(struct io_ring_ctx *ctx, void __user *arg)
{
- struct io_uring_copy_buffers buf;
+ struct io_uring_clone_buffers buf;
bool registered_src;
struct file *file;
int ret;
@@ -1234,7 +1231,7 @@ int io_register_copy_buffers(struct io_ring_ctx *ctx, void __user *arg)
file = io_uring_register_get_file(buf.src_fd, registered_src);
if (IS_ERR(file))
return PTR_ERR(file);
- ret = io_copy_buffers(ctx, file->private_data);
+ ret = io_clone_buffers(ctx, file->private_data);
if (!registered_src)
fput(file);
return ret;
diff --git a/io_uring/rsrc.h b/io_uring/rsrc.h
index 93546ab337a6..8ed588036210 100644
--- a/io_uring/rsrc.h
+++ b/io_uring/rsrc.h
@@ -42,12 +42,11 @@ struct io_rsrc_node {
struct io_mapped_ubuf {
u64 ubuf;
- u64 ubuf_end;
+ unsigned int len;
unsigned int nr_bvecs;
unsigned int folio_shift;
- unsigned long acct_pages;
- unsigned long folio_mask;
refcount_t refs;
+ unsigned long acct_pages;
struct bio_vec bvec[] __counted_by(nr_bvecs);
};
@@ -68,7 +67,7 @@ int io_import_fixed(int ddir, struct iov_iter *iter,
struct io_mapped_ubuf *imu,
u64 buf_addr, size_t len);
-int io_register_copy_buffers(struct io_ring_ctx *ctx, void __user *arg);
+int io_register_clone_buffers(struct io_ring_ctx *ctx, void __user *arg);
void __io_sqe_buffers_unregister(struct io_ring_ctx *ctx);
int io_sqe_buffers_unregister(struct io_ring_ctx *ctx);
int io_sqe_buffers_register(struct io_ring_ctx *ctx, void __user *arg,
diff --git a/io_uring/sqpoll.c b/io_uring/sqpoll.c
index 272df9d00f45..a26593979887 100644
--- a/io_uring/sqpoll.c
+++ b/io_uring/sqpoll.c
@@ -109,14 +109,14 @@ static struct io_sq_data *io_attach_sq_data(struct io_uring_params *p)
struct fd f;
f = fdget(p->wq_fd);
- if (!f.file)
+ if (!fd_file(f))
return ERR_PTR(-ENXIO);
- if (!io_is_uring_fops(f.file)) {
+ if (!io_is_uring_fops(fd_file(f))) {
fdput(f);
return ERR_PTR(-EINVAL);
}
- ctx_attach = f.file->private_data;
+ ctx_attach = fd_file(f)->private_data;
sqd = ctx_attach->sq_data;
if (!sqd) {
fdput(f);
@@ -196,9 +196,6 @@ static int __io_sq_thread(struct io_ring_ctx *ctx, bool cap_entries)
ret = io_submit_sqes(ctx, to_submit);
mutex_unlock(&ctx->uring_lock);
- if (io_napi(ctx))
- ret += io_napi_sqpoll_busy_poll(ctx);
-
if (to_submit && wq_has_sleeper(&ctx->sqo_sq_wait))
wake_up(&ctx->sqo_sq_wait);
if (creds)
@@ -323,6 +320,10 @@ static int io_sq_thread(void *data)
if (io_sq_tw(&retry_list, IORING_TW_CAP_ENTRIES_VALUE))
sqt_spin = true;
+ list_for_each_entry(ctx, &sqd->ctx_list, sqd_list)
+ if (io_napi(ctx))
+ io_napi_sqpoll_busy_poll(ctx);
+
if (sqt_spin || !time_after(jiffies, timeout)) {
if (sqt_spin) {
io_sq_update_worktime(sqd, &start);
@@ -419,9 +420,9 @@ __cold int io_sq_offload_create(struct io_ring_ctx *ctx,
struct fd f;
f = fdget(p->wq_fd);
- if (!f.file)
+ if (!fd_file(f))
return -ENXIO;
- if (!io_is_uring_fops(f.file)) {
+ if (!io_is_uring_fops(fd_file(f))) {
fdput(f);
return -EINVAL;
}
@@ -461,13 +462,22 @@ __cold int io_sq_offload_create(struct io_ring_ctx *ctx,
return 0;
if (p->flags & IORING_SETUP_SQ_AFF) {
- struct cpumask allowed_mask;
+ cpumask_var_t allowed_mask;
int cpu = p->sq_thread_cpu;
ret = -EINVAL;
- cpuset_cpus_allowed(current, &allowed_mask);
- if (!cpumask_test_cpu(cpu, &allowed_mask))
+ if (cpu >= nr_cpu_ids || !cpu_online(cpu))
+ goto err_sqpoll;
+ ret = -ENOMEM;
+ if (!alloc_cpumask_var(&allowed_mask, GFP_KERNEL))
+ goto err_sqpoll;
+ ret = -EINVAL;
+ cpuset_cpus_allowed(current, allowed_mask);
+ if (!cpumask_test_cpu(cpu, allowed_mask)) {
+ free_cpumask_var(allowed_mask);
goto err_sqpoll;
+ }
+ free_cpumask_var(allowed_mask);
sqd->sq_cpu = cpu;
} else {
sqd->sq_cpu = -1;
diff --git a/ipc/mqueue.c b/ipc/mqueue.c
index a7cbd69efbef..34fa0bd8bb11 100644
--- a/ipc/mqueue.c
+++ b/ipc/mqueue.c
@@ -1085,20 +1085,20 @@ static int do_mq_timedsend(mqd_t mqdes, const char __user *u_msg_ptr,
audit_mq_sendrecv(mqdes, msg_len, msg_prio, ts);
f = fdget(mqdes);
- if (unlikely(!f.file)) {
+ if (unlikely(!fd_file(f))) {
ret = -EBADF;
goto out;
}
- inode = file_inode(f.file);
- if (unlikely(f.file->f_op != &mqueue_file_operations)) {
+ inode = file_inode(fd_file(f));
+ if (unlikely(fd_file(f)->f_op != &mqueue_file_operations)) {
ret = -EBADF;
goto out_fput;
}
info = MQUEUE_I(inode);
- audit_file(f.file);
+ audit_file(fd_file(f));
- if (unlikely(!(f.file->f_mode & FMODE_WRITE))) {
+ if (unlikely(!(fd_file(f)->f_mode & FMODE_WRITE))) {
ret = -EBADF;
goto out_fput;
}
@@ -1138,7 +1138,7 @@ static int do_mq_timedsend(mqd_t mqdes, const char __user *u_msg_ptr,
}
if (info->attr.mq_curmsgs == info->attr.mq_maxmsg) {
- if (f.file->f_flags & O_NONBLOCK) {
+ if (fd_file(f)->f_flags & O_NONBLOCK) {
ret = -EAGAIN;
} else {
wait.task = current;
@@ -1199,20 +1199,20 @@ static int do_mq_timedreceive(mqd_t mqdes, char __user *u_msg_ptr,
audit_mq_sendrecv(mqdes, msg_len, 0, ts);
f = fdget(mqdes);
- if (unlikely(!f.file)) {
+ if (unlikely(!fd_file(f))) {
ret = -EBADF;
goto out;
}
- inode = file_inode(f.file);
- if (unlikely(f.file->f_op != &mqueue_file_operations)) {
+ inode = file_inode(fd_file(f));
+ if (unlikely(fd_file(f)->f_op != &mqueue_file_operations)) {
ret = -EBADF;
goto out_fput;
}
info = MQUEUE_I(inode);
- audit_file(f.file);
+ audit_file(fd_file(f));
- if (unlikely(!(f.file->f_mode & FMODE_READ))) {
+ if (unlikely(!(fd_file(f)->f_mode & FMODE_READ))) {
ret = -EBADF;
goto out_fput;
}
@@ -1242,7 +1242,7 @@ static int do_mq_timedreceive(mqd_t mqdes, char __user *u_msg_ptr,
}
if (info->attr.mq_curmsgs == 0) {
- if (f.file->f_flags & O_NONBLOCK) {
+ if (fd_file(f)->f_flags & O_NONBLOCK) {
spin_unlock(&info->lock);
ret = -EAGAIN;
} else {
@@ -1356,11 +1356,11 @@ static int do_mq_notify(mqd_t mqdes, const struct sigevent *notification)
/* and attach it to the socket */
retry:
f = fdget(notification->sigev_signo);
- if (!f.file) {
+ if (!fd_file(f)) {
ret = -EBADF;
goto out;
}
- sock = netlink_getsockbyfilp(f.file);
+ sock = netlink_getsockbyfilp(fd_file(f));
fdput(f);
if (IS_ERR(sock)) {
ret = PTR_ERR(sock);
@@ -1379,13 +1379,13 @@ retry:
}
f = fdget(mqdes);
- if (!f.file) {
+ if (!fd_file(f)) {
ret = -EBADF;
goto out;
}
- inode = file_inode(f.file);
- if (unlikely(f.file->f_op != &mqueue_file_operations)) {
+ inode = file_inode(fd_file(f));
+ if (unlikely(fd_file(f)->f_op != &mqueue_file_operations)) {
ret = -EBADF;
goto out_fput;
}
@@ -1460,31 +1460,31 @@ static int do_mq_getsetattr(int mqdes, struct mq_attr *new, struct mq_attr *old)
return -EINVAL;
f = fdget(mqdes);
- if (!f.file)
+ if (!fd_file(f))
return -EBADF;
- if (unlikely(f.file->f_op != &mqueue_file_operations)) {
+ if (unlikely(fd_file(f)->f_op != &mqueue_file_operations)) {
fdput(f);
return -EBADF;
}
- inode = file_inode(f.file);
+ inode = file_inode(fd_file(f));
info = MQUEUE_I(inode);
spin_lock(&info->lock);
if (old) {
*old = info->attr;
- old->mq_flags = f.file->f_flags & O_NONBLOCK;
+ old->mq_flags = fd_file(f)->f_flags & O_NONBLOCK;
}
if (new) {
audit_mq_getsetattr(mqdes, new);
- spin_lock(&f.file->f_lock);
+ spin_lock(&fd_file(f)->f_lock);
if (new->mq_flags & O_NONBLOCK)
- f.file->f_flags |= O_NONBLOCK;
+ fd_file(f)->f_flags |= O_NONBLOCK;
else
- f.file->f_flags &= ~O_NONBLOCK;
- spin_unlock(&f.file->f_lock);
+ fd_file(f)->f_flags &= ~O_NONBLOCK;
+ spin_unlock(&fd_file(f)->f_lock);
inode_set_atime_to_ts(inode, inode_set_ctime_current(inode));
}
diff --git a/ipc/shm.c b/ipc/shm.c
index 3e3071252dac..99564c870084 100644
--- a/ipc/shm.c
+++ b/ipc/shm.c
@@ -1778,8 +1778,8 @@ long ksys_shmdt(char __user *shmaddr)
*/
file = vma->vm_file;
size = i_size_read(file_inode(vma->vm_file));
- do_vma_munmap(&vmi, vma, vma->vm_start, vma->vm_end,
- NULL, false);
+ do_vmi_align_munmap(&vmi, vma, mm, vma->vm_start,
+ vma->vm_end, NULL, false);
/*
* We discovered the size of the shm segment, so
* break out of here and fall through to the next
@@ -1803,8 +1803,8 @@ long ksys_shmdt(char __user *shmaddr)
if ((vma->vm_ops == &shm_vm_ops) &&
((vma->vm_start - addr)/PAGE_SIZE == vma->vm_pgoff) &&
(vma->vm_file == file)) {
- do_vma_munmap(&vmi, vma, vma->vm_start, vma->vm_end,
- NULL, false);
+ do_vmi_align_munmap(&vmi, vma, mm, vma->vm_start,
+ vma->vm_end, NULL, false);
}
vma = vma_next(&vmi);
diff --git a/kernel/Kconfig.preempt b/kernel/Kconfig.preempt
index c2f1fd95a821..fe782cd77388 100644
--- a/kernel/Kconfig.preempt
+++ b/kernel/Kconfig.preempt
@@ -133,4 +133,29 @@ config SCHED_CORE
which is the likely usage by Linux distributions, there should
be no measurable impact on performance.
-
+config SCHED_CLASS_EXT
+ bool "Extensible Scheduling Class"
+ depends on BPF_SYSCALL && BPF_JIT && DEBUG_INFO_BTF
+ select STACKTRACE if STACKTRACE_SUPPORT
+ help
+ This option enables a new scheduler class sched_ext (SCX), which
+ allows scheduling policies to be implemented as BPF programs to
+ achieve the following:
+
+ - Ease of experimentation and exploration: Enabling rapid
+ iteration of new scheduling policies.
+ - Customization: Building application-specific schedulers which
+ implement policies that are not applicable to general-purpose
+ schedulers.
+ - Rapid scheduler deployments: Non-disruptive swap outs of
+ scheduling policies in production environments.
+
+ sched_ext leverages BPF struct_ops feature to define a structure
+ which exports function callbacks and flags to BPF programs that
+ wish to implement scheduling policies. The struct_ops structure
+ exported by sched_ext is struct sched_ext_ops, and is conceptually
+ similar to struct sched_class.
+
+ For more information:
+ Documentation/scheduler/sched-ext.rst
+ https://github.com/sched-ext/scx
diff --git a/kernel/Makefile b/kernel/Makefile
index 3c13240dfc9f..87866b037fbe 100644
--- a/kernel/Makefile
+++ b/kernel/Makefile
@@ -116,7 +116,6 @@ obj-$(CONFIG_SHADOW_CALL_STACK) += scs.o
obj-$(CONFIG_HAVE_STATIC_CALL) += static_call.o
obj-$(CONFIG_HAVE_STATIC_CALL_INLINE) += static_call_inline.o
obj-$(CONFIG_CFI_CLANG) += cfi.o
-obj-$(CONFIG_NUMA) += numa.o
obj-$(CONFIG_PERF_EVENTS) += events/
diff --git a/kernel/bpf/Makefile b/kernel/bpf/Makefile
index 0291eef9ce92..9b9c151b5c82 100644
--- a/kernel/bpf/Makefile
+++ b/kernel/bpf/Makefile
@@ -52,9 +52,3 @@ obj-$(CONFIG_BPF_PRELOAD) += preload/
obj-$(CONFIG_BPF_SYSCALL) += relo_core.o
obj-$(CONFIG_BPF_SYSCALL) += btf_iter.o
obj-$(CONFIG_BPF_SYSCALL) += btf_relocate.o
-
-# Some source files are common to libbpf.
-vpath %.c $(srctree)/kernel/bpf:$(srctree)/tools/lib/bpf
-
-$(obj)/%.o: %.c FORCE
- $(call if_changed_rule,cc_o_c)
diff --git a/kernel/bpf/arraymap.c b/kernel/bpf/arraymap.c
index feabc0193852..79660e3fca4c 100644
--- a/kernel/bpf/arraymap.c
+++ b/kernel/bpf/arraymap.c
@@ -73,6 +73,9 @@ int array_map_alloc_check(union bpf_attr *attr)
/* avoid overflow on round_up(map->value_size) */
if (attr->value_size > INT_MAX)
return -E2BIG;
+ /* percpu map value size is bound by PCPU_MIN_UNIT_SIZE */
+ if (percpu && round_up(attr->value_size, 8) > PCPU_MIN_UNIT_SIZE)
+ return -E2BIG;
return 0;
}
@@ -494,7 +497,7 @@ static void array_map_seq_show_elem(struct bpf_map *map, void *key,
if (map->btf_key_type_id)
seq_printf(m, "%u: ", *(u32 *)key);
btf_type_seq_show(map->btf, map->btf_value_type_id, value, m);
- seq_puts(m, "\n");
+ seq_putc(m, '\n');
rcu_read_unlock();
}
@@ -515,7 +518,7 @@ static void percpu_array_map_seq_show_elem(struct bpf_map *map, void *key,
seq_printf(m, "\tcpu%d: ", cpu);
btf_type_seq_show(map->btf, map->btf_value_type_id,
per_cpu_ptr(pptr, cpu), m);
- seq_puts(m, "\n");
+ seq_putc(m, '\n');
}
seq_puts(m, "}\n");
@@ -600,7 +603,7 @@ static void *bpf_array_map_seq_start(struct seq_file *seq, loff_t *pos)
array = container_of(map, struct bpf_array, map);
index = info->index & array->index_mask;
if (info->percpu_value_buf)
- return array->pptrs[index];
+ return (void *)(uintptr_t)array->pptrs[index];
return array_map_elem_ptr(array, index);
}
@@ -619,7 +622,7 @@ static void *bpf_array_map_seq_next(struct seq_file *seq, void *v, loff_t *pos)
array = container_of(map, struct bpf_array, map);
index = info->index & array->index_mask;
if (info->percpu_value_buf)
- return array->pptrs[index];
+ return (void *)(uintptr_t)array->pptrs[index];
return array_map_elem_ptr(array, index);
}
@@ -632,7 +635,7 @@ static int __bpf_array_map_seq_show(struct seq_file *seq, void *v)
struct bpf_iter_meta meta;
struct bpf_prog *prog;
int off = 0, cpu = 0;
- void __percpu **pptr;
+ void __percpu *pptr;
u32 size;
meta.seq = seq;
@@ -648,7 +651,7 @@ static int __bpf_array_map_seq_show(struct seq_file *seq, void *v)
if (!info->percpu_value_buf) {
ctx.value = v;
} else {
- pptr = v;
+ pptr = (void __percpu *)(uintptr_t)v;
size = array->elem_size;
for_each_possible_cpu(cpu) {
copy_map_value_long(map, info->percpu_value_buf + off,
@@ -993,7 +996,7 @@ static void prog_array_map_seq_show_elem(struct bpf_map *map, void *key,
prog_id = prog_fd_array_sys_lookup_elem(ptr);
btf_type_seq_show(map->btf, map->btf_value_type_id,
&prog_id, m);
- seq_puts(m, "\n");
+ seq_putc(m, '\n');
}
}
diff --git a/kernel/bpf/bpf_inode_storage.c b/kernel/bpf/bpf_inode_storage.c
index b0ef45db207c..29da6d3838f6 100644
--- a/kernel/bpf/bpf_inode_storage.c
+++ b/kernel/bpf/bpf_inode_storage.c
@@ -78,13 +78,12 @@ void bpf_inode_storage_free(struct inode *inode)
static void *bpf_fd_inode_storage_lookup_elem(struct bpf_map *map, void *key)
{
struct bpf_local_storage_data *sdata;
- struct fd f = fdget_raw(*(int *)key);
+ CLASS(fd_raw, f)(*(int *)key);
- if (!f.file)
+ if (fd_empty(f))
return ERR_PTR(-EBADF);
- sdata = inode_storage_lookup(file_inode(f.file), map, true);
- fdput(f);
+ sdata = inode_storage_lookup(file_inode(fd_file(f)), map, true);
return sdata ? sdata->data : NULL;
}
@@ -92,19 +91,16 @@ static long bpf_fd_inode_storage_update_elem(struct bpf_map *map, void *key,
void *value, u64 map_flags)
{
struct bpf_local_storage_data *sdata;
- struct fd f = fdget_raw(*(int *)key);
+ CLASS(fd_raw, f)(*(int *)key);
- if (!f.file)
+ if (fd_empty(f))
return -EBADF;
- if (!inode_storage_ptr(file_inode(f.file))) {
- fdput(f);
+ if (!inode_storage_ptr(file_inode(fd_file(f))))
return -EBADF;
- }
- sdata = bpf_local_storage_update(file_inode(f.file),
+ sdata = bpf_local_storage_update(file_inode(fd_file(f)),
(struct bpf_local_storage_map *)map,
value, map_flags, GFP_ATOMIC);
- fdput(f);
return PTR_ERR_OR_ZERO(sdata);
}
@@ -123,15 +119,11 @@ static int inode_storage_delete(struct inode *inode, struct bpf_map *map)
static long bpf_fd_inode_storage_delete_elem(struct bpf_map *map, void *key)
{
- struct fd f = fdget_raw(*(int *)key);
- int err;
+ CLASS(fd_raw, f)(*(int *)key);
- if (!f.file)
+ if (fd_empty(f))
return -EBADF;
-
- err = inode_storage_delete(file_inode(f.file), map);
- fdput(f);
- return err;
+ return inode_storage_delete(file_inode(fd_file(f)), map);
}
/* *gfp_flags* is a hidden argument provided by the verifier */
diff --git a/kernel/bpf/bpf_iter.c b/kernel/bpf/bpf_iter.c
index 112581cf97e7..106735145948 100644
--- a/kernel/bpf/bpf_iter.c
+++ b/kernel/bpf/bpf_iter.c
@@ -283,7 +283,6 @@ static int iter_release(struct inode *inode, struct file *file)
const struct file_operations bpf_iter_fops = {
.open = iter_open,
- .llseek = no_llseek,
.read = bpf_seq_read,
.release = iter_release,
};
diff --git a/kernel/bpf/bpf_lsm.c b/kernel/bpf/bpf_lsm.c
index 08a338e1f231..6292ac5f9bd1 100644
--- a/kernel/bpf/bpf_lsm.c
+++ b/kernel/bpf/bpf_lsm.c
@@ -11,7 +11,6 @@
#include <linux/lsm_hooks.h>
#include <linux/bpf_lsm.h>
#include <linux/kallsyms.h>
-#include <linux/bpf_verifier.h>
#include <net/bpf_sk_storage.h>
#include <linux/bpf_local_storage.h>
#include <linux/btf_ids.h>
@@ -36,6 +35,24 @@ BTF_SET_START(bpf_lsm_hooks)
#undef LSM_HOOK
BTF_SET_END(bpf_lsm_hooks)
+BTF_SET_START(bpf_lsm_disabled_hooks)
+BTF_ID(func, bpf_lsm_vm_enough_memory)
+BTF_ID(func, bpf_lsm_inode_need_killpriv)
+BTF_ID(func, bpf_lsm_inode_getsecurity)
+BTF_ID(func, bpf_lsm_inode_listsecurity)
+BTF_ID(func, bpf_lsm_inode_copy_up_xattr)
+BTF_ID(func, bpf_lsm_getselfattr)
+BTF_ID(func, bpf_lsm_getprocattr)
+BTF_ID(func, bpf_lsm_setprocattr)
+#ifdef CONFIG_KEYS
+BTF_ID(func, bpf_lsm_key_getsecurity)
+#endif
+#ifdef CONFIG_AUDIT
+BTF_ID(func, bpf_lsm_audit_rule_match)
+#endif
+BTF_ID(func, bpf_lsm_ismaclabel)
+BTF_SET_END(bpf_lsm_disabled_hooks)
+
/* List of LSM hooks that should operate on 'current' cgroup regardless
* of function signature.
*/
@@ -97,15 +114,24 @@ void bpf_lsm_find_cgroup_shim(const struct bpf_prog *prog,
int bpf_lsm_verify_prog(struct bpf_verifier_log *vlog,
const struct bpf_prog *prog)
{
+ u32 btf_id = prog->aux->attach_btf_id;
+ const char *func_name = prog->aux->attach_func_name;
+
if (!prog->gpl_compatible) {
bpf_log(vlog,
"LSM programs must have a GPL compatible license\n");
return -EINVAL;
}
- if (!btf_id_set_contains(&bpf_lsm_hooks, prog->aux->attach_btf_id)) {
+ if (btf_id_set_contains(&bpf_lsm_disabled_hooks, btf_id)) {
+ bpf_log(vlog, "attach_btf_id %u points to disabled hook %s\n",
+ btf_id, func_name);
+ return -EINVAL;
+ }
+
+ if (!btf_id_set_contains(&bpf_lsm_hooks, btf_id)) {
bpf_log(vlog, "attach_btf_id %u points to wrong type name %s\n",
- prog->aux->attach_btf_id, prog->aux->attach_func_name);
+ btf_id, func_name);
return -EINVAL;
}
@@ -390,3 +416,36 @@ const struct bpf_verifier_ops lsm_verifier_ops = {
.get_func_proto = bpf_lsm_func_proto,
.is_valid_access = btf_ctx_access,
};
+
+/* hooks return 0 or 1 */
+BTF_SET_START(bool_lsm_hooks)
+#ifdef CONFIG_SECURITY_NETWORK_XFRM
+BTF_ID(func, bpf_lsm_xfrm_state_pol_flow_match)
+#endif
+#ifdef CONFIG_AUDIT
+BTF_ID(func, bpf_lsm_audit_rule_known)
+#endif
+BTF_ID(func, bpf_lsm_inode_xattr_skipcap)
+BTF_SET_END(bool_lsm_hooks)
+
+int bpf_lsm_get_retval_range(const struct bpf_prog *prog,
+ struct bpf_retval_range *retval_range)
+{
+ /* no return value range for void hooks */
+ if (!prog->aux->attach_func_proto->type)
+ return -EINVAL;
+
+ if (btf_id_set_contains(&bool_lsm_hooks, prog->aux->attach_btf_id)) {
+ retval_range->minval = 0;
+ retval_range->maxval = 1;
+ } else {
+ /* All other available LSM hooks, except task_prctl, return 0
+ * on success and negative error code on failure.
+ * To keep things simple, we only allow bpf progs to return 0
+ * or negative errno for task_prctl too.
+ */
+ retval_range->minval = -MAX_ERRNO;
+ retval_range->maxval = 0;
+ }
+ return 0;
+}
diff --git a/kernel/bpf/bpf_struct_ops.c b/kernel/bpf/bpf_struct_ops.c
index 0d515ec57aa5..fda3dd2ee984 100644
--- a/kernel/bpf/bpf_struct_ops.c
+++ b/kernel/bpf/bpf_struct_ops.c
@@ -837,7 +837,7 @@ static void bpf_struct_ops_map_seq_show_elem(struct bpf_map *map, void *key,
btf_type_seq_show(st_map->btf,
map->btf_vmlinux_value_type_id,
value, m);
- seq_puts(m, "\n");
+ seq_putc(m, '\n');
}
kfree(value);
@@ -1040,6 +1040,13 @@ void bpf_struct_ops_put(const void *kdata)
bpf_map_put(&st_map->map);
}
+int bpf_struct_ops_supported(const struct bpf_struct_ops *st_ops, u32 moff)
+{
+ void *func_ptr = *(void **)(st_ops->cfi_stubs + moff);
+
+ return func_ptr ? 0 : -ENOTSUPP;
+}
+
static bool bpf_struct_ops_valid_to_reg(struct bpf_map *map)
{
struct bpf_struct_ops_map *st_map = (struct bpf_struct_ops_map *)map;
diff --git a/kernel/bpf/btf.c b/kernel/bpf/btf.c
index ba91be08763a..75e4fe83c509 100644
--- a/kernel/bpf/btf.c
+++ b/kernel/bpf/btf.c
@@ -212,7 +212,7 @@ enum btf_kfunc_hook {
BTF_KFUNC_HOOK_TRACING,
BTF_KFUNC_HOOK_SYSCALL,
BTF_KFUNC_HOOK_FMODRET,
- BTF_KFUNC_HOOK_CGROUP_SKB,
+ BTF_KFUNC_HOOK_CGROUP,
BTF_KFUNC_HOOK_SCHED_ACT,
BTF_KFUNC_HOOK_SK_SKB,
BTF_KFUNC_HOOK_SOCKET_FILTER,
@@ -790,7 +790,7 @@ const char *btf_str_by_offset(const struct btf *btf, u32 offset)
return NULL;
}
-static bool __btf_name_valid(const struct btf *btf, u32 offset)
+static bool btf_name_valid_identifier(const struct btf *btf, u32 offset)
{
/* offset must be valid */
const char *src = btf_str_by_offset(btf, offset);
@@ -811,11 +811,6 @@ static bool __btf_name_valid(const struct btf *btf, u32 offset)
return !*src;
}
-static bool btf_name_valid_identifier(const struct btf *btf, u32 offset)
-{
- return __btf_name_valid(btf, offset);
-}
-
/* Allow any printable character in DATASEC names */
static bool btf_name_valid_section(const struct btf *btf, u32 offset)
{
@@ -3761,6 +3756,7 @@ static int btf_find_field(const struct btf *btf, const struct btf_type *t,
return -EINVAL;
}
+/* Callers have to ensure the life cycle of btf if it is program BTF */
static int btf_parse_kptr(const struct btf *btf, struct btf_field *field,
struct btf_field_info *info)
{
@@ -3789,7 +3785,6 @@ static int btf_parse_kptr(const struct btf *btf, struct btf_field *field,
field->kptr.dtor = NULL;
id = info->kptr.type_id;
kptr_btf = (struct btf *)btf;
- btf_get(kptr_btf);
goto found_dtor;
}
if (id < 0)
@@ -4631,7 +4626,7 @@ static s32 btf_var_check_meta(struct btf_verifier_env *env,
}
if (!t->name_off ||
- !__btf_name_valid(env->btf, t->name_off)) {
+ !btf_name_valid_identifier(env->btf, t->name_off)) {
btf_verifier_log_type(env, t, "Invalid name");
return -EINVAL;
}
@@ -5519,36 +5514,72 @@ static const char *alloc_obj_fields[] = {
static struct btf_struct_metas *
btf_parse_struct_metas(struct bpf_verifier_log *log, struct btf *btf)
{
- union {
- struct btf_id_set set;
- struct {
- u32 _cnt;
- u32 _ids[ARRAY_SIZE(alloc_obj_fields)];
- } _arr;
- } aof;
struct btf_struct_metas *tab = NULL;
+ struct btf_id_set *aof;
int i, n, id, ret;
BUILD_BUG_ON(offsetof(struct btf_id_set, cnt) != 0);
BUILD_BUG_ON(sizeof(struct btf_id_set) != sizeof(u32));
- memset(&aof, 0, sizeof(aof));
+ aof = kmalloc(sizeof(*aof), GFP_KERNEL | __GFP_NOWARN);
+ if (!aof)
+ return ERR_PTR(-ENOMEM);
+ aof->cnt = 0;
+
for (i = 0; i < ARRAY_SIZE(alloc_obj_fields); i++) {
/* Try to find whether this special type exists in user BTF, and
* if so remember its ID so we can easily find it among members
* of structs that we iterate in the next loop.
*/
+ struct btf_id_set *new_aof;
+
id = btf_find_by_name_kind(btf, alloc_obj_fields[i], BTF_KIND_STRUCT);
if (id < 0)
continue;
- aof.set.ids[aof.set.cnt++] = id;
+
+ new_aof = krealloc(aof, offsetof(struct btf_id_set, ids[aof->cnt + 1]),
+ GFP_KERNEL | __GFP_NOWARN);
+ if (!new_aof) {
+ ret = -ENOMEM;
+ goto free_aof;
+ }
+ aof = new_aof;
+ aof->ids[aof->cnt++] = id;
+ }
+
+ n = btf_nr_types(btf);
+ for (i = 1; i < n; i++) {
+ /* Try to find if there are kptrs in user BTF and remember their ID */
+ struct btf_id_set *new_aof;
+ struct btf_field_info tmp;
+ const struct btf_type *t;
+
+ t = btf_type_by_id(btf, i);
+ if (!t) {
+ ret = -EINVAL;
+ goto free_aof;
+ }
+
+ ret = btf_find_kptr(btf, t, 0, 0, &tmp);
+ if (ret != BTF_FIELD_FOUND)
+ continue;
+
+ new_aof = krealloc(aof, offsetof(struct btf_id_set, ids[aof->cnt + 1]),
+ GFP_KERNEL | __GFP_NOWARN);
+ if (!new_aof) {
+ ret = -ENOMEM;
+ goto free_aof;
+ }
+ aof = new_aof;
+ aof->ids[aof->cnt++] = i;
}
- if (!aof.set.cnt)
+ if (!aof->cnt) {
+ kfree(aof);
return NULL;
- sort(&aof.set.ids, aof.set.cnt, sizeof(aof.set.ids[0]), btf_id_cmp_func, NULL);
+ }
+ sort(&aof->ids, aof->cnt, sizeof(aof->ids[0]), btf_id_cmp_func, NULL);
- n = btf_nr_types(btf);
for (i = 1; i < n; i++) {
struct btf_struct_metas *new_tab;
const struct btf_member *member;
@@ -5558,17 +5589,13 @@ btf_parse_struct_metas(struct bpf_verifier_log *log, struct btf *btf)
int j, tab_cnt;
t = btf_type_by_id(btf, i);
- if (!t) {
- ret = -EINVAL;
- goto free;
- }
if (!__btf_type_is_struct(t))
continue;
cond_resched();
for_each_member(j, t, member) {
- if (btf_id_set_contains(&aof.set, member->type))
+ if (btf_id_set_contains(aof, member->type))
goto parse;
}
continue;
@@ -5587,7 +5614,8 @@ btf_parse_struct_metas(struct bpf_verifier_log *log, struct btf *btf)
type = &tab->types[tab->cnt];
type->btf_id = i;
record = btf_parse_fields(btf, t, BPF_SPIN_LOCK | BPF_LIST_HEAD | BPF_LIST_NODE |
- BPF_RB_ROOT | BPF_RB_NODE | BPF_REFCOUNT, t->size);
+ BPF_RB_ROOT | BPF_RB_NODE | BPF_REFCOUNT |
+ BPF_KPTR, t->size);
/* The record cannot be unset, treat it as an error if so */
if (IS_ERR_OR_NULL(record)) {
ret = PTR_ERR_OR_ZERO(record) ?: -EFAULT;
@@ -5596,9 +5624,12 @@ btf_parse_struct_metas(struct bpf_verifier_log *log, struct btf *btf)
type->record = record;
tab->cnt++;
}
+ kfree(aof);
return tab;
free:
btf_struct_metas_free(tab);
+free_aof:
+ kfree(aof);
return ERR_PTR(ret);
}
@@ -6245,12 +6276,11 @@ static struct btf *btf_parse_module(const char *module_name, const void *data,
btf->kernel_btf = true;
snprintf(btf->name, sizeof(btf->name), "%s", module_name);
- btf->data = kvmalloc(data_size, GFP_KERNEL | __GFP_NOWARN);
+ btf->data = kvmemdup(data, data_size, GFP_KERNEL | __GFP_NOWARN);
if (!btf->data) {
err = -ENOMEM;
goto errout;
}
- memcpy(btf->data, data, data_size);
btf->data_size = data_size;
err = btf_parse_hdr(env);
@@ -6418,8 +6448,11 @@ bool btf_ctx_access(int off, int size, enum bpf_access_type type,
if (arg == nr_args) {
switch (prog->expected_attach_type) {
- case BPF_LSM_CGROUP:
case BPF_LSM_MAC:
+ /* mark we are accessing the return value */
+ info->is_retval = true;
+ fallthrough;
+ case BPF_LSM_CGROUP:
case BPF_TRACE_FEXIT:
/* When LSM programs are attached to void LSM hooks
* they use FEXIT trampolines and when attached to
@@ -7678,21 +7711,16 @@ int btf_new_fd(const union bpf_attr *attr, bpfptr_t uattr, u32 uattr_size)
struct btf *btf_get_by_fd(int fd)
{
struct btf *btf;
- struct fd f;
-
- f = fdget(fd);
+ CLASS(fd, f)(fd);
- if (!f.file)
+ if (fd_empty(f))
return ERR_PTR(-EBADF);
- if (f.file->f_op != &btf_fops) {
- fdput(f);
+ if (fd_file(f)->f_op != &btf_fops)
return ERR_PTR(-EINVAL);
- }
- btf = f.file->private_data;
+ btf = fd_file(f)->private_data;
refcount_inc(&btf->refcnt);
- fdput(f);
return btf;
}
@@ -8054,15 +8082,44 @@ BTF_ID_LIST_GLOBAL(btf_tracing_ids, MAX_BTF_TRACING_TYPE)
BTF_TRACING_TYPE_xxx
#undef BTF_TRACING_TYPE
+/* Validate well-formedness of iter argument type.
+ * On success, return positive BTF ID of iter state's STRUCT type.
+ * On error, negative error is returned.
+ */
+int btf_check_iter_arg(struct btf *btf, const struct btf_type *func, int arg_idx)
+{
+ const struct btf_param *arg;
+ const struct btf_type *t;
+ const char *name;
+ int btf_id;
+
+ if (btf_type_vlen(func) <= arg_idx)
+ return -EINVAL;
+
+ arg = &btf_params(func)[arg_idx];
+ t = btf_type_skip_modifiers(btf, arg->type, NULL);
+ if (!t || !btf_type_is_ptr(t))
+ return -EINVAL;
+ t = btf_type_skip_modifiers(btf, t->type, &btf_id);
+ if (!t || !__btf_type_is_struct(t))
+ return -EINVAL;
+
+ name = btf_name_by_offset(btf, t->name_off);
+ if (!name || strncmp(name, ITER_PREFIX, sizeof(ITER_PREFIX) - 1))
+ return -EINVAL;
+
+ return btf_id;
+}
+
static int btf_check_iter_kfuncs(struct btf *btf, const char *func_name,
const struct btf_type *func, u32 func_flags)
{
u32 flags = func_flags & (KF_ITER_NEW | KF_ITER_NEXT | KF_ITER_DESTROY);
- const char *name, *sfx, *iter_name;
- const struct btf_param *arg;
+ const char *sfx, *iter_name;
const struct btf_type *t;
char exp_name[128];
u32 nr_args;
+ int btf_id;
/* exactly one of KF_ITER_{NEW,NEXT,DESTROY} can be set */
if (!flags || (flags & (flags - 1)))
@@ -8073,28 +8130,21 @@ static int btf_check_iter_kfuncs(struct btf *btf, const char *func_name,
if (nr_args < 1)
return -EINVAL;
- arg = &btf_params(func)[0];
- t = btf_type_skip_modifiers(btf, arg->type, NULL);
- if (!t || !btf_type_is_ptr(t))
- return -EINVAL;
- t = btf_type_skip_modifiers(btf, t->type, NULL);
- if (!t || !__btf_type_is_struct(t))
- return -EINVAL;
-
- name = btf_name_by_offset(btf, t->name_off);
- if (!name || strncmp(name, ITER_PREFIX, sizeof(ITER_PREFIX) - 1))
- return -EINVAL;
+ btf_id = btf_check_iter_arg(btf, func, 0);
+ if (btf_id < 0)
+ return btf_id;
/* sizeof(struct bpf_iter_<type>) should be a multiple of 8 to
* fit nicely in stack slots
*/
+ t = btf_type_by_id(btf, btf_id);
if (t->size == 0 || (t->size % 8))
return -EINVAL;
/* validate bpf_iter_<type>_{new,next,destroy}(struct bpf_iter_<type> *)
* naming pattern
*/
- iter_name = name + sizeof(ITER_PREFIX) - 1;
+ iter_name = btf_name_by_offset(btf, t->name_off) + sizeof(ITER_PREFIX) - 1;
if (flags & KF_ITER_NEW)
sfx = "new";
else if (flags & KF_ITER_NEXT)
@@ -8309,13 +8359,19 @@ static int bpf_prog_type_to_kfunc_hook(enum bpf_prog_type prog_type)
case BPF_PROG_TYPE_STRUCT_OPS:
return BTF_KFUNC_HOOK_STRUCT_OPS;
case BPF_PROG_TYPE_TRACING:
+ case BPF_PROG_TYPE_TRACEPOINT:
+ case BPF_PROG_TYPE_PERF_EVENT:
case BPF_PROG_TYPE_LSM:
return BTF_KFUNC_HOOK_TRACING;
case BPF_PROG_TYPE_SYSCALL:
return BTF_KFUNC_HOOK_SYSCALL;
case BPF_PROG_TYPE_CGROUP_SKB:
+ case BPF_PROG_TYPE_CGROUP_SOCK:
+ case BPF_PROG_TYPE_CGROUP_DEVICE:
case BPF_PROG_TYPE_CGROUP_SOCK_ADDR:
- return BTF_KFUNC_HOOK_CGROUP_SKB;
+ case BPF_PROG_TYPE_CGROUP_SOCKOPT:
+ case BPF_PROG_TYPE_CGROUP_SYSCTL:
+ return BTF_KFUNC_HOOK_CGROUP;
case BPF_PROG_TYPE_SCHED_ACT:
return BTF_KFUNC_HOOK_SCHED_ACT;
case BPF_PROG_TYPE_SK_SKB:
@@ -8891,6 +8947,7 @@ int bpf_core_apply(struct bpf_core_ctx *ctx, const struct bpf_core_relo *relo,
struct bpf_core_cand_list cands = {};
struct bpf_core_relo_res targ_res;
struct bpf_core_spec *specs;
+ const struct btf_type *type;
int err;
/* ~4k of temp memory necessary to convert LLVM spec like "0:1:0:5"
@@ -8900,6 +8957,13 @@ int bpf_core_apply(struct bpf_core_ctx *ctx, const struct bpf_core_relo *relo,
if (!specs)
return -ENOMEM;
+ type = btf_type_by_id(ctx->btf, relo->type_id);
+ if (!type) {
+ bpf_log(ctx->log, "relo #%u: bad type id %u\n",
+ relo_idx, relo->type_id);
+ return -EINVAL;
+ }
+
if (need_cands) {
struct bpf_cand_cache *cc;
int i;
diff --git a/kernel/bpf/btf_iter.c b/kernel/bpf/btf_iter.c
new file mode 100644
index 000000000000..0e2c66a52df9
--- /dev/null
+++ b/kernel/bpf/btf_iter.c
@@ -0,0 +1,2 @@
+// SPDX-License-Identifier: (LGPL-2.1 OR BSD-2-Clause)
+#include "../../tools/lib/bpf/btf_iter.c"
diff --git a/kernel/bpf/btf_relocate.c b/kernel/bpf/btf_relocate.c
new file mode 100644
index 000000000000..c12ccbf66507
--- /dev/null
+++ b/kernel/bpf/btf_relocate.c
@@ -0,0 +1,2 @@
+// SPDX-License-Identifier: (LGPL-2.1 OR BSD-2-Clause)
+#include "../../tools/lib/bpf/btf_relocate.c"
diff --git a/kernel/bpf/cgroup.c b/kernel/bpf/cgroup.c
index 8ba73042a239..e7113d700b87 100644
--- a/kernel/bpf/cgroup.c
+++ b/kernel/bpf/cgroup.c
@@ -2581,6 +2581,8 @@ cgroup_current_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog)
case BPF_FUNC_get_cgroup_classid:
return &bpf_get_cgroup_classid_curr_proto;
#endif
+ case BPF_FUNC_current_task_under_cgroup:
+ return &bpf_current_task_under_cgroup_proto;
default:
return NULL;
}
diff --git a/kernel/bpf/core.c b/kernel/bpf/core.c
index 7ee62e38faf0..4e07cc057d6f 100644
--- a/kernel/bpf/core.c
+++ b/kernel/bpf/core.c
@@ -2302,6 +2302,7 @@ bool bpf_prog_map_compatible(struct bpf_map *map,
{
enum bpf_prog_type prog_type = resolve_prog_type(fp);
bool ret;
+ struct bpf_prog_aux *aux = fp->aux;
if (fp->kprobe_override)
return false;
@@ -2311,7 +2312,7 @@ bool bpf_prog_map_compatible(struct bpf_map *map,
* in the case of devmap and cpumap). Until device checks
* are implemented, prohibit adding dev-bound programs to program maps.
*/
- if (bpf_prog_is_dev_bound(fp->aux))
+ if (bpf_prog_is_dev_bound(aux))
return false;
spin_lock(&map->owner.lock);
@@ -2321,12 +2322,26 @@ bool bpf_prog_map_compatible(struct bpf_map *map,
*/
map->owner.type = prog_type;
map->owner.jited = fp->jited;
- map->owner.xdp_has_frags = fp->aux->xdp_has_frags;
+ map->owner.xdp_has_frags = aux->xdp_has_frags;
+ map->owner.attach_func_proto = aux->attach_func_proto;
ret = true;
} else {
ret = map->owner.type == prog_type &&
map->owner.jited == fp->jited &&
- map->owner.xdp_has_frags == fp->aux->xdp_has_frags;
+ map->owner.xdp_has_frags == aux->xdp_has_frags;
+ if (ret &&
+ map->owner.attach_func_proto != aux->attach_func_proto) {
+ switch (prog_type) {
+ case BPF_PROG_TYPE_TRACING:
+ case BPF_PROG_TYPE_LSM:
+ case BPF_PROG_TYPE_EXT:
+ case BPF_PROG_TYPE_STRUCT_OPS:
+ ret = false;
+ break;
+ default:
+ break;
+ }
+ }
}
spin_unlock(&map->owner.lock);
diff --git a/kernel/bpf/hashtab.c b/kernel/bpf/hashtab.c
index 06115f8728e8..b14b87463ee0 100644
--- a/kernel/bpf/hashtab.c
+++ b/kernel/bpf/hashtab.c
@@ -462,6 +462,9 @@ static int htab_map_alloc_check(union bpf_attr *attr)
* kmalloc-able later in htab_map_update_elem()
*/
return -E2BIG;
+ /* percpu map value size is bound by PCPU_MIN_UNIT_SIZE */
+ if (percpu && round_up(attr->value_size, 8) > PCPU_MIN_UNIT_SIZE)
+ return -E2BIG;
return 0;
}
@@ -1049,14 +1052,15 @@ static struct htab_elem *alloc_htab_elem(struct bpf_htab *htab, void *key,
pptr = htab_elem_get_ptr(l_new, key_size);
} else {
/* alloc_percpu zero-fills */
- pptr = bpf_mem_cache_alloc(&htab->pcpu_ma);
- if (!pptr) {
+ void *ptr = bpf_mem_cache_alloc(&htab->pcpu_ma);
+
+ if (!ptr) {
bpf_mem_cache_free(&htab->ma, l_new);
l_new = ERR_PTR(-ENOMEM);
goto dec_count;
}
- l_new->ptr_to_pptr = pptr;
- pptr = *(void **)pptr;
+ l_new->ptr_to_pptr = ptr;
+ pptr = *(void __percpu **)ptr;
}
pcpu_init_value(htab, pptr, value, onallcpus);
@@ -1586,7 +1590,7 @@ static void htab_map_seq_show_elem(struct bpf_map *map, void *key,
btf_type_seq_show(map->btf, map->btf_key_type_id, key, m);
seq_puts(m, ": ");
btf_type_seq_show(map->btf, map->btf_value_type_id, value, m);
- seq_puts(m, "\n");
+ seq_putc(m, '\n');
rcu_read_unlock();
}
@@ -2450,7 +2454,7 @@ static void htab_percpu_map_seq_show_elem(struct bpf_map *map, void *key,
seq_printf(m, "\tcpu%d: ", cpu);
btf_type_seq_show(map->btf, map->btf_value_type_id,
per_cpu_ptr(pptr, cpu), m);
- seq_puts(m, "\n");
+ seq_putc(m, '\n');
}
seq_puts(m, "}\n");
diff --git a/kernel/bpf/helpers.c b/kernel/bpf/helpers.c
index b5f0adae8293..1a43d06eab28 100644
--- a/kernel/bpf/helpers.c
+++ b/kernel/bpf/helpers.c
@@ -158,6 +158,7 @@ const struct bpf_func_proto bpf_get_smp_processor_id_proto = {
.func = bpf_get_smp_processor_id,
.gpl_only = false,
.ret_type = RET_INTEGER,
+ .allow_fastcall = true,
};
BPF_CALL_0(bpf_get_numa_node_id)
@@ -517,16 +518,15 @@ static int __bpf_strtoll(const char *buf, size_t buf_len, u64 flags,
}
BPF_CALL_4(bpf_strtol, const char *, buf, size_t, buf_len, u64, flags,
- long *, res)
+ s64 *, res)
{
long long _res;
int err;
+ *res = 0;
err = __bpf_strtoll(buf, buf_len, flags, &_res);
if (err < 0)
return err;
- if (_res != (long)_res)
- return -ERANGE;
*res = _res;
return err;
}
@@ -538,23 +538,23 @@ const struct bpf_func_proto bpf_strtol_proto = {
.arg1_type = ARG_PTR_TO_MEM | MEM_RDONLY,
.arg2_type = ARG_CONST_SIZE,
.arg3_type = ARG_ANYTHING,
- .arg4_type = ARG_PTR_TO_LONG,
+ .arg4_type = ARG_PTR_TO_FIXED_SIZE_MEM | MEM_UNINIT | MEM_ALIGNED,
+ .arg4_size = sizeof(s64),
};
BPF_CALL_4(bpf_strtoul, const char *, buf, size_t, buf_len, u64, flags,
- unsigned long *, res)
+ u64 *, res)
{
unsigned long long _res;
bool is_negative;
int err;
+ *res = 0;
err = __bpf_strtoull(buf, buf_len, flags, &_res, &is_negative);
if (err < 0)
return err;
if (is_negative)
return -EINVAL;
- if (_res != (unsigned long)_res)
- return -ERANGE;
*res = _res;
return err;
}
@@ -566,7 +566,8 @@ const struct bpf_func_proto bpf_strtoul_proto = {
.arg1_type = ARG_PTR_TO_MEM | MEM_RDONLY,
.arg2_type = ARG_CONST_SIZE,
.arg3_type = ARG_ANYTHING,
- .arg4_type = ARG_PTR_TO_LONG,
+ .arg4_type = ARG_PTR_TO_FIXED_SIZE_MEM | MEM_UNINIT | MEM_ALIGNED,
+ .arg4_size = sizeof(u64),
};
BPF_CALL_3(bpf_strncmp, const char *, s1, u32, s1_sz, const char *, s2)
@@ -714,7 +715,7 @@ BPF_CALL_2(bpf_per_cpu_ptr, const void *, ptr, u32, cpu)
if (cpu >= nr_cpu_ids)
return (unsigned long)NULL;
- return (unsigned long)per_cpu_ptr((const void __percpu *)ptr, cpu);
+ return (unsigned long)per_cpu_ptr((const void __percpu *)(const uintptr_t)ptr, cpu);
}
const struct bpf_func_proto bpf_per_cpu_ptr_proto = {
@@ -727,7 +728,7 @@ const struct bpf_func_proto bpf_per_cpu_ptr_proto = {
BPF_CALL_1(bpf_this_cpu_ptr, const void *, percpu_ptr)
{
- return (unsigned long)this_cpu_ptr((const void __percpu *)percpu_ptr);
+ return (unsigned long)this_cpu_ptr((const void __percpu *)(const uintptr_t)percpu_ptr);
}
const struct bpf_func_proto bpf_this_cpu_ptr_proto = {
@@ -1618,9 +1619,9 @@ void bpf_wq_cancel_and_free(void *val)
schedule_work(&work->delete_work);
}
-BPF_CALL_2(bpf_kptr_xchg, void *, map_value, void *, ptr)
+BPF_CALL_2(bpf_kptr_xchg, void *, dst, void *, ptr)
{
- unsigned long *kptr = map_value;
+ unsigned long *kptr = dst;
/* This helper may be inlined by verifier. */
return xchg(kptr, (unsigned long)ptr);
@@ -1635,7 +1636,7 @@ static const struct bpf_func_proto bpf_kptr_xchg_proto = {
.gpl_only = false,
.ret_type = RET_PTR_TO_BTF_ID_OR_NULL,
.ret_btf_id = BPF_PTR_POISON,
- .arg1_type = ARG_PTR_TO_KPTR,
+ .arg1_type = ARG_KPTR_XCHG_DEST,
.arg2_type = ARG_PTR_TO_BTF_ID_OR_NULL | OBJ_RELEASE,
.arg2_btf_id = BPF_PTR_POISON,
};
@@ -2033,6 +2034,7 @@ bpf_base_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog)
return NULL;
}
}
+EXPORT_SYMBOL_GPL(bpf_base_func_proto);
void bpf_list_head_free(const struct btf_field *field, void *list_head,
struct bpf_spin_lock *spin_lock)
@@ -2457,6 +2459,29 @@ __bpf_kfunc long bpf_task_under_cgroup(struct task_struct *task,
return ret;
}
+BPF_CALL_2(bpf_current_task_under_cgroup, struct bpf_map *, map, u32, idx)
+{
+ struct bpf_array *array = container_of(map, struct bpf_array, map);
+ struct cgroup *cgrp;
+
+ if (unlikely(idx >= array->map.max_entries))
+ return -E2BIG;
+
+ cgrp = READ_ONCE(array->ptrs[idx]);
+ if (unlikely(!cgrp))
+ return -EAGAIN;
+
+ return task_under_cgroup_hierarchy(current, cgrp);
+}
+
+const struct bpf_func_proto bpf_current_task_under_cgroup_proto = {
+ .func = bpf_current_task_under_cgroup,
+ .gpl_only = false,
+ .ret_type = RET_INTEGER,
+ .arg1_type = ARG_CONST_MAP_PTR,
+ .arg2_type = ARG_ANYTHING,
+};
+
/**
* bpf_task_get_cgroup1 - Acquires the associated cgroup of a task within a
* specific cgroup1 hierarchy. The cgroup1 hierarchy is identified by its
@@ -2938,6 +2963,47 @@ __bpf_kfunc void bpf_iter_bits_destroy(struct bpf_iter_bits *it)
bpf_mem_free(&bpf_global_ma, kit->bits);
}
+/**
+ * bpf_copy_from_user_str() - Copy a string from an unsafe user address
+ * @dst: Destination address, in kernel space. This buffer must be
+ * at least @dst__sz bytes long.
+ * @dst__sz: Maximum number of bytes to copy, includes the trailing NUL.
+ * @unsafe_ptr__ign: Source address, in user space.
+ * @flags: The only supported flag is BPF_F_PAD_ZEROS
+ *
+ * Copies a NUL-terminated string from userspace to BPF space. If user string is
+ * too long this will still ensure zero termination in the dst buffer unless
+ * buffer size is 0.
+ *
+ * If BPF_F_PAD_ZEROS flag is set, memset the tail of @dst to 0 on success and
+ * memset all of @dst on failure.
+ */
+__bpf_kfunc int bpf_copy_from_user_str(void *dst, u32 dst__sz, const void __user *unsafe_ptr__ign, u64 flags)
+{
+ int ret;
+
+ if (unlikely(flags & ~BPF_F_PAD_ZEROS))
+ return -EINVAL;
+
+ if (unlikely(!dst__sz))
+ return 0;
+
+ ret = strncpy_from_user(dst, unsafe_ptr__ign, dst__sz - 1);
+ if (ret < 0) {
+ if (flags & BPF_F_PAD_ZEROS)
+ memset((char *)dst, 0, dst__sz);
+
+ return ret;
+ }
+
+ if (flags & BPF_F_PAD_ZEROS)
+ memset((char *)dst + ret, 0, dst__sz - ret);
+ else
+ ((char *)dst)[ret] = '\0';
+
+ return ret + 1;
+}
+
__bpf_kfunc_end_defs();
BTF_KFUNCS_START(generic_btf_ids)
@@ -3023,6 +3089,7 @@ BTF_ID_FLAGS(func, bpf_preempt_enable)
BTF_ID_FLAGS(func, bpf_iter_bits_new, KF_ITER_NEW)
BTF_ID_FLAGS(func, bpf_iter_bits_next, KF_ITER_NEXT | KF_RET_NULL)
BTF_ID_FLAGS(func, bpf_iter_bits_destroy, KF_ITER_DESTROY)
+BTF_ID_FLAGS(func, bpf_copy_from_user_str, KF_SLEEPABLE)
BTF_KFUNCS_END(common_btf_ids)
static const struct btf_kfunc_id_set common_kfunc_set = {
@@ -3051,6 +3118,7 @@ static int __init kfunc_init(void)
ret = ret ?: register_btf_kfunc_id_set(BPF_PROG_TYPE_XDP, &generic_kfunc_set);
ret = ret ?: register_btf_kfunc_id_set(BPF_PROG_TYPE_STRUCT_OPS, &generic_kfunc_set);
ret = ret ?: register_btf_kfunc_id_set(BPF_PROG_TYPE_SYSCALL, &generic_kfunc_set);
+ ret = ret ?: register_btf_kfunc_id_set(BPF_PROG_TYPE_CGROUP_SKB, &generic_kfunc_set);
ret = ret ?: register_btf_id_dtor_kfuncs(generic_dtors,
ARRAY_SIZE(generic_dtors),
THIS_MODULE);
diff --git a/kernel/bpf/inode.c b/kernel/bpf/inode.c
index af5d2ffadd70..d8fc5eba529d 100644
--- a/kernel/bpf/inode.c
+++ b/kernel/bpf/inode.c
@@ -709,10 +709,10 @@ static void seq_print_delegate_opts(struct seq_file *m,
msk = 1ULL << e->val;
if (delegate_msk & msk) {
/* emit lower-case name without prefix */
- seq_printf(m, "%c", first ? '=' : ':');
+ seq_putc(m, first ? '=' : ':');
name += pfx_len;
while (*name) {
- seq_printf(m, "%c", tolower(*name));
+ seq_putc(m, tolower(*name));
name++;
}
diff --git a/kernel/bpf/local_storage.c b/kernel/bpf/local_storage.c
index a04f505aefe9..3969eb0382af 100644
--- a/kernel/bpf/local_storage.c
+++ b/kernel/bpf/local_storage.c
@@ -431,7 +431,7 @@ static void cgroup_storage_seq_show_elem(struct bpf_map *map, void *key,
seq_puts(m, ": ");
btf_type_seq_show(map->btf, map->btf_value_type_id,
&READ_ONCE(storage->buf)->data[0], m);
- seq_puts(m, "\n");
+ seq_putc(m, '\n');
} else {
seq_puts(m, ": {\n");
for_each_possible_cpu(cpu) {
@@ -439,7 +439,7 @@ static void cgroup_storage_seq_show_elem(struct bpf_map *map, void *key,
btf_type_seq_show(map->btf, map->btf_value_type_id,
per_cpu_ptr(storage->percpu_buf, cpu),
m);
- seq_puts(m, "\n");
+ seq_putc(m, '\n');
}
seq_puts(m, "}\n");
}
diff --git a/kernel/bpf/map_in_map.c b/kernel/bpf/map_in_map.c
index b4f18c85d7bc..645bd30bc9a9 100644
--- a/kernel/bpf/map_in_map.c
+++ b/kernel/bpf/map_in_map.c
@@ -11,24 +11,18 @@ struct bpf_map *bpf_map_meta_alloc(int inner_map_ufd)
{
struct bpf_map *inner_map, *inner_map_meta;
u32 inner_map_meta_size;
- struct fd f;
- int ret;
+ CLASS(fd, f)(inner_map_ufd);
- f = fdget(inner_map_ufd);
inner_map = __bpf_map_get(f);
if (IS_ERR(inner_map))
return inner_map;
/* Does not support >1 level map-in-map */
- if (inner_map->inner_map_meta) {
- ret = -EINVAL;
- goto put;
- }
+ if (inner_map->inner_map_meta)
+ return ERR_PTR(-EINVAL);
- if (!inner_map->ops->map_meta_equal) {
- ret = -ENOTSUPP;
- goto put;
- }
+ if (!inner_map->ops->map_meta_equal)
+ return ERR_PTR(-ENOTSUPP);
inner_map_meta_size = sizeof(*inner_map_meta);
/* In some cases verifier needs to access beyond just base map. */
@@ -36,10 +30,8 @@ struct bpf_map *bpf_map_meta_alloc(int inner_map_ufd)
inner_map_meta_size = sizeof(struct bpf_array);
inner_map_meta = kzalloc(inner_map_meta_size, GFP_USER);
- if (!inner_map_meta) {
- ret = -ENOMEM;
- goto put;
- }
+ if (!inner_map_meta)
+ return ERR_PTR(-ENOMEM);
inner_map_meta->map_type = inner_map->map_type;
inner_map_meta->key_size = inner_map->key_size;
@@ -53,8 +45,9 @@ struct bpf_map *bpf_map_meta_alloc(int inner_map_ufd)
* invalid/empty/valid, but ERR_PTR in case of errors. During
* equality NULL or IS_ERR is equivalent.
*/
- ret = PTR_ERR(inner_map_meta->record);
- goto free;
+ struct bpf_map *ret = ERR_CAST(inner_map_meta->record);
+ kfree(inner_map_meta);
+ return ret;
}
/* Note: We must use the same BTF, as we also used btf_record_dup above
* which relies on BTF being same for both maps, as some members like
@@ -77,14 +70,7 @@ struct bpf_map *bpf_map_meta_alloc(int inner_map_ufd)
inner_array_meta->elem_size = inner_array->elem_size;
inner_map_meta->bypass_spec_v1 = inner_map->bypass_spec_v1;
}
-
- fdput(f);
return inner_map_meta;
-free:
- kfree(inner_map_meta);
-put:
- fdput(f);
- return ERR_PTR(ret);
}
void bpf_map_meta_free(struct bpf_map *map_meta)
@@ -110,9 +96,8 @@ void *bpf_map_fd_get_ptr(struct bpf_map *map,
int ufd)
{
struct bpf_map *inner_map, *inner_map_meta;
- struct fd f;
+ CLASS(fd, f)(ufd);
- f = fdget(ufd);
inner_map = __bpf_map_get(f);
if (IS_ERR(inner_map))
return inner_map;
@@ -123,7 +108,6 @@ void *bpf_map_fd_get_ptr(struct bpf_map *map,
else
inner_map = ERR_PTR(-EINVAL);
- fdput(f);
return inner_map;
}
diff --git a/kernel/bpf/memalloc.c b/kernel/bpf/memalloc.c
index dec892ded031..b3858a76e0b3 100644
--- a/kernel/bpf/memalloc.c
+++ b/kernel/bpf/memalloc.c
@@ -138,8 +138,8 @@ static struct llist_node notrace *__llist_del_first(struct llist_head *head)
static void *__alloc(struct bpf_mem_cache *c, int node, gfp_t flags)
{
if (c->percpu_size) {
- void **obj = kmalloc_node(c->percpu_size, flags, node);
- void *pptr = __alloc_percpu_gfp(c->unit_size, 8, flags);
+ void __percpu **obj = kmalloc_node(c->percpu_size, flags, node);
+ void __percpu *pptr = __alloc_percpu_gfp(c->unit_size, 8, flags);
if (!obj || !pptr) {
free_percpu(pptr);
@@ -253,7 +253,7 @@ static void alloc_bulk(struct bpf_mem_cache *c, int cnt, int node, bool atomic)
static void free_one(void *obj, bool percpu)
{
if (percpu) {
- free_percpu(((void **)obj)[1]);
+ free_percpu(((void __percpu **)obj)[1]);
kfree(obj);
return;
}
@@ -509,8 +509,8 @@ static void prefill_mem_cache(struct bpf_mem_cache *c, int cpu)
*/
int bpf_mem_alloc_init(struct bpf_mem_alloc *ma, int size, bool percpu)
{
- struct bpf_mem_caches *cc, __percpu *pcc;
- struct bpf_mem_cache *c, __percpu *pc;
+ struct bpf_mem_caches *cc; struct bpf_mem_caches __percpu *pcc;
+ struct bpf_mem_cache *c; struct bpf_mem_cache __percpu *pc;
struct obj_cgroup *objcg = NULL;
int cpu, i, unit_size, percpu_size = 0;
@@ -591,7 +591,7 @@ int bpf_mem_alloc_percpu_init(struct bpf_mem_alloc *ma, struct obj_cgroup *objcg
int bpf_mem_alloc_percpu_unit_init(struct bpf_mem_alloc *ma, int size)
{
- struct bpf_mem_caches *cc, __percpu *pcc;
+ struct bpf_mem_caches *cc; struct bpf_mem_caches __percpu *pcc;
int cpu, i, unit_size, percpu_size;
struct obj_cgroup *objcg;
struct bpf_mem_cache *c;
diff --git a/kernel/bpf/relo_core.c b/kernel/bpf/relo_core.c
new file mode 100644
index 000000000000..aa822c9fcfde
--- /dev/null
+++ b/kernel/bpf/relo_core.c
@@ -0,0 +1,2 @@
+// SPDX-License-Identifier: (LGPL-2.1 OR BSD-2-Clause)
+#include "../../tools/lib/bpf/relo_core.c"
diff --git a/kernel/bpf/reuseport_array.c b/kernel/bpf/reuseport_array.c
index 4b4f9670f1a9..49b8e5a0c6b4 100644
--- a/kernel/bpf/reuseport_array.c
+++ b/kernel/bpf/reuseport_array.c
@@ -308,7 +308,7 @@ put_file_unlock:
spin_unlock_bh(&reuseport_lock);
put_file:
- fput(socket->file);
+ sockfd_put(socket);
return err;
}
diff --git a/kernel/bpf/stackmap.c b/kernel/bpf/stackmap.c
index c99f8e5234ac..3615c06b7dfa 100644
--- a/kernel/bpf/stackmap.c
+++ b/kernel/bpf/stackmap.c
@@ -124,8 +124,24 @@ free_smap:
return ERR_PTR(err);
}
+static int fetch_build_id(struct vm_area_struct *vma, unsigned char *build_id, bool may_fault)
+{
+ return may_fault ? build_id_parse(vma, build_id, NULL)
+ : build_id_parse_nofault(vma, build_id, NULL);
+}
+
+/*
+ * Expects all id_offs[i].ip values to be set to correct initial IPs.
+ * They will be subsequently:
+ * - either adjusted in place to a file offset, if build ID fetching
+ * succeeds; in this case id_offs[i].build_id is set to correct build ID,
+ * and id_offs[i].status is set to BPF_STACK_BUILD_ID_VALID;
+ * - or IP will be kept intact, if build ID fetching failed; in this case
+ * id_offs[i].build_id is zeroed out and id_offs[i].status is set to
+ * BPF_STACK_BUILD_ID_IP.
+ */
static void stack_map_get_build_id_offset(struct bpf_stack_build_id *id_offs,
- u64 *ips, u32 trace_nr, bool user)
+ u32 trace_nr, bool user, bool may_fault)
{
int i;
struct mmap_unlock_irq_work *work = NULL;
@@ -142,30 +158,28 @@ static void stack_map_get_build_id_offset(struct bpf_stack_build_id *id_offs,
/* cannot access current->mm, fall back to ips */
for (i = 0; i < trace_nr; i++) {
id_offs[i].status = BPF_STACK_BUILD_ID_IP;
- id_offs[i].ip = ips[i];
memset(id_offs[i].build_id, 0, BUILD_ID_SIZE_MAX);
}
return;
}
for (i = 0; i < trace_nr; i++) {
- if (range_in_vma(prev_vma, ips[i], ips[i])) {
+ u64 ip = READ_ONCE(id_offs[i].ip);
+
+ if (range_in_vma(prev_vma, ip, ip)) {
vma = prev_vma;
- memcpy(id_offs[i].build_id, prev_build_id,
- BUILD_ID_SIZE_MAX);
+ memcpy(id_offs[i].build_id, prev_build_id, BUILD_ID_SIZE_MAX);
goto build_id_valid;
}
- vma = find_vma(current->mm, ips[i]);
- if (!vma || build_id_parse(vma, id_offs[i].build_id, NULL)) {
+ vma = find_vma(current->mm, ip);
+ if (!vma || fetch_build_id(vma, id_offs[i].build_id, may_fault)) {
/* per entry fall back to ips */
id_offs[i].status = BPF_STACK_BUILD_ID_IP;
- id_offs[i].ip = ips[i];
memset(id_offs[i].build_id, 0, BUILD_ID_SIZE_MAX);
continue;
}
build_id_valid:
- id_offs[i].offset = (vma->vm_pgoff << PAGE_SHIFT) + ips[i]
- - vma->vm_start;
+ id_offs[i].offset = (vma->vm_pgoff << PAGE_SHIFT) + ip - vma->vm_start;
id_offs[i].status = BPF_STACK_BUILD_ID_VALID;
prev_vma = vma;
prev_build_id = id_offs[i].build_id;
@@ -216,7 +230,7 @@ static long __bpf_get_stackid(struct bpf_map *map,
struct bpf_stack_map *smap = container_of(map, struct bpf_stack_map, map);
struct stack_map_bucket *bucket, *new_bucket, *old_bucket;
u32 skip = flags & BPF_F_SKIP_FIELD_MASK;
- u32 hash, id, trace_nr, trace_len;
+ u32 hash, id, trace_nr, trace_len, i;
bool user = flags & BPF_F_USER_STACK;
u64 *ips;
bool hash_matches;
@@ -238,15 +252,18 @@ static long __bpf_get_stackid(struct bpf_map *map,
return id;
if (stack_map_use_build_id(map)) {
+ struct bpf_stack_build_id *id_offs;
+
/* for build_id+offset, pop a bucket before slow cmp */
new_bucket = (struct stack_map_bucket *)
pcpu_freelist_pop(&smap->freelist);
if (unlikely(!new_bucket))
return -ENOMEM;
new_bucket->nr = trace_nr;
- stack_map_get_build_id_offset(
- (struct bpf_stack_build_id *)new_bucket->data,
- ips, trace_nr, user);
+ id_offs = (struct bpf_stack_build_id *)new_bucket->data;
+ for (i = 0; i < trace_nr; i++)
+ id_offs[i].ip = ips[i];
+ stack_map_get_build_id_offset(id_offs, trace_nr, user, false /* !may_fault */);
trace_len = trace_nr * sizeof(struct bpf_stack_build_id);
if (hash_matches && bucket->nr == trace_nr &&
memcmp(bucket->data, new_bucket->data, trace_len) == 0) {
@@ -387,7 +404,7 @@ const struct bpf_func_proto bpf_get_stackid_proto_pe = {
static long __bpf_get_stack(struct pt_regs *regs, struct task_struct *task,
struct perf_callchain_entry *trace_in,
- void *buf, u32 size, u64 flags)
+ void *buf, u32 size, u64 flags, bool may_fault)
{
u32 trace_nr, copy_len, elem_size, num_elem, max_depth;
bool user_build_id = flags & BPF_F_USER_BUILD_ID;
@@ -405,8 +422,7 @@ static long __bpf_get_stack(struct pt_regs *regs, struct task_struct *task,
if (kernel && user_build_id)
goto clear;
- elem_size = (user && user_build_id) ? sizeof(struct bpf_stack_build_id)
- : sizeof(u64);
+ elem_size = user_build_id ? sizeof(struct bpf_stack_build_id) : sizeof(u64);
if (unlikely(size % elem_size))
goto clear;
@@ -427,6 +443,9 @@ static long __bpf_get_stack(struct pt_regs *regs, struct task_struct *task,
if (sysctl_perf_event_max_stack < max_depth)
max_depth = sysctl_perf_event_max_stack;
+ if (may_fault)
+ rcu_read_lock(); /* need RCU for perf's callchain below */
+
if (trace_in)
trace = trace_in;
else if (kernel && task)
@@ -434,21 +453,34 @@ static long __bpf_get_stack(struct pt_regs *regs, struct task_struct *task,
else
trace = get_perf_callchain(regs, 0, kernel, user, max_depth,
crosstask, false);
- if (unlikely(!trace))
- goto err_fault;
- if (trace->nr < skip)
+ if (unlikely(!trace) || trace->nr < skip) {
+ if (may_fault)
+ rcu_read_unlock();
goto err_fault;
+ }
trace_nr = trace->nr - skip;
trace_nr = (trace_nr <= num_elem) ? trace_nr : num_elem;
copy_len = trace_nr * elem_size;
ips = trace->ip + skip;
- if (user && user_build_id)
- stack_map_get_build_id_offset(buf, ips, trace_nr, user);
- else
+ if (user_build_id) {
+ struct bpf_stack_build_id *id_offs = buf;
+ u32 i;
+
+ for (i = 0; i < trace_nr; i++)
+ id_offs[i].ip = ips[i];
+ } else {
memcpy(buf, ips, copy_len);
+ }
+
+ /* trace/ips should not be dereferenced after this point */
+ if (may_fault)
+ rcu_read_unlock();
+
+ if (user_build_id)
+ stack_map_get_build_id_offset(buf, trace_nr, user, may_fault);
if (size > copy_len)
memset(buf + copy_len, 0, size - copy_len);
@@ -464,7 +496,7 @@ clear:
BPF_CALL_4(bpf_get_stack, struct pt_regs *, regs, void *, buf, u32, size,
u64, flags)
{
- return __bpf_get_stack(regs, NULL, NULL, buf, size, flags);
+ return __bpf_get_stack(regs, NULL, NULL, buf, size, flags, false /* !may_fault */);
}
const struct bpf_func_proto bpf_get_stack_proto = {
@@ -477,8 +509,24 @@ const struct bpf_func_proto bpf_get_stack_proto = {
.arg4_type = ARG_ANYTHING,
};
-BPF_CALL_4(bpf_get_task_stack, struct task_struct *, task, void *, buf,
- u32, size, u64, flags)
+BPF_CALL_4(bpf_get_stack_sleepable, struct pt_regs *, regs, void *, buf, u32, size,
+ u64, flags)
+{
+ return __bpf_get_stack(regs, NULL, NULL, buf, size, flags, true /* may_fault */);
+}
+
+const struct bpf_func_proto bpf_get_stack_sleepable_proto = {
+ .func = bpf_get_stack_sleepable,
+ .gpl_only = true,
+ .ret_type = RET_INTEGER,
+ .arg1_type = ARG_PTR_TO_CTX,
+ .arg2_type = ARG_PTR_TO_UNINIT_MEM,
+ .arg3_type = ARG_CONST_SIZE_OR_ZERO,
+ .arg4_type = ARG_ANYTHING,
+};
+
+static long __bpf_get_task_stack(struct task_struct *task, void *buf, u32 size,
+ u64 flags, bool may_fault)
{
struct pt_regs *regs;
long res = -EINVAL;
@@ -488,12 +536,18 @@ BPF_CALL_4(bpf_get_task_stack, struct task_struct *, task, void *, buf,
regs = task_pt_regs(task);
if (regs)
- res = __bpf_get_stack(regs, task, NULL, buf, size, flags);
+ res = __bpf_get_stack(regs, task, NULL, buf, size, flags, may_fault);
put_task_stack(task);
return res;
}
+BPF_CALL_4(bpf_get_task_stack, struct task_struct *, task, void *, buf,
+ u32, size, u64, flags)
+{
+ return __bpf_get_task_stack(task, buf, size, flags, false /* !may_fault */);
+}
+
const struct bpf_func_proto bpf_get_task_stack_proto = {
.func = bpf_get_task_stack,
.gpl_only = false,
@@ -505,6 +559,23 @@ const struct bpf_func_proto bpf_get_task_stack_proto = {
.arg4_type = ARG_ANYTHING,
};
+BPF_CALL_4(bpf_get_task_stack_sleepable, struct task_struct *, task, void *, buf,
+ u32, size, u64, flags)
+{
+ return __bpf_get_task_stack(task, buf, size, flags, true /* !may_fault */);
+}
+
+const struct bpf_func_proto bpf_get_task_stack_sleepable_proto = {
+ .func = bpf_get_task_stack_sleepable,
+ .gpl_only = false,
+ .ret_type = RET_INTEGER,
+ .arg1_type = ARG_PTR_TO_BTF_ID,
+ .arg1_btf_id = &btf_tracing_ids[BTF_TRACING_TYPE_TASK],
+ .arg2_type = ARG_PTR_TO_UNINIT_MEM,
+ .arg3_type = ARG_CONST_SIZE_OR_ZERO,
+ .arg4_type = ARG_ANYTHING,
+};
+
BPF_CALL_4(bpf_get_stack_pe, struct bpf_perf_event_data_kern *, ctx,
void *, buf, u32, size, u64, flags)
{
@@ -516,7 +587,7 @@ BPF_CALL_4(bpf_get_stack_pe, struct bpf_perf_event_data_kern *, ctx,
__u64 nr_kernel;
if (!(event->attr.sample_type & PERF_SAMPLE_CALLCHAIN))
- return __bpf_get_stack(regs, NULL, NULL, buf, size, flags);
+ return __bpf_get_stack(regs, NULL, NULL, buf, size, flags, false /* !may_fault */);
if (unlikely(flags & ~(BPF_F_SKIP_FIELD_MASK | BPF_F_USER_STACK |
BPF_F_USER_BUILD_ID)))
@@ -536,7 +607,7 @@ BPF_CALL_4(bpf_get_stack_pe, struct bpf_perf_event_data_kern *, ctx,
__u64 nr = trace->nr;
trace->nr = nr_kernel;
- err = __bpf_get_stack(regs, NULL, trace, buf, size, flags);
+ err = __bpf_get_stack(regs, NULL, trace, buf, size, flags, false /* !may_fault */);
/* restore nr */
trace->nr = nr;
@@ -548,7 +619,7 @@ BPF_CALL_4(bpf_get_stack_pe, struct bpf_perf_event_data_kern *, ctx,
goto clear;
flags = (flags & ~BPF_F_SKIP_FIELD_MASK) | skip;
- err = __bpf_get_stack(regs, NULL, trace, buf, size, flags);
+ err = __bpf_get_stack(regs, NULL, trace, buf, size, flags, false /* !may_fault */);
}
return err;
diff --git a/kernel/bpf/syscall.c b/kernel/bpf/syscall.c
index bf6c5f685ea2..a8f1808a1ca5 100644
--- a/kernel/bpf/syscall.c
+++ b/kernel/bpf/syscall.c
@@ -550,7 +550,8 @@ void btf_record_free(struct btf_record *rec)
case BPF_KPTR_PERCPU:
if (rec->fields[i].kptr.module)
module_put(rec->fields[i].kptr.module);
- btf_put(rec->fields[i].kptr.btf);
+ if (btf_is_kernel(rec->fields[i].kptr.btf))
+ btf_put(rec->fields[i].kptr.btf);
break;
case BPF_LIST_HEAD:
case BPF_LIST_NODE:
@@ -596,7 +597,8 @@ struct btf_record *btf_record_dup(const struct btf_record *rec)
case BPF_KPTR_UNREF:
case BPF_KPTR_REF:
case BPF_KPTR_PERCPU:
- btf_get(fields[i].kptr.btf);
+ if (btf_is_kernel(fields[i].kptr.btf))
+ btf_get(fields[i].kptr.btf);
if (fields[i].kptr.module && !try_module_get(fields[i].kptr.module)) {
ret = -ENXIO;
goto free;
@@ -733,15 +735,11 @@ void bpf_obj_free_fields(const struct btf_record *rec, void *obj)
}
}
-/* called from workqueue */
-static void bpf_map_free_deferred(struct work_struct *work)
+static void bpf_map_free(struct bpf_map *map)
{
- struct bpf_map *map = container_of(work, struct bpf_map, work);
struct btf_record *rec = map->record;
struct btf *btf = map->btf;
- security_bpf_map_free(map);
- bpf_map_release_memcg(map);
/* implementation dependent freeing */
map->ops->map_free(map);
/* Delay freeing of btf_record for maps, as map_free
@@ -760,6 +758,16 @@ static void bpf_map_free_deferred(struct work_struct *work)
btf_put(btf);
}
+/* called from workqueue */
+static void bpf_map_free_deferred(struct work_struct *work)
+{
+ struct bpf_map *map = container_of(work, struct bpf_map, work);
+
+ security_bpf_map_free(map);
+ bpf_map_release_memcg(map);
+ bpf_map_free(map);
+}
+
static void bpf_map_put_uref(struct bpf_map *map)
{
if (atomic64_dec_and_test(&map->usercnt)) {
@@ -829,7 +837,7 @@ static int bpf_map_release(struct inode *inode, struct file *filp)
static fmode_t map_get_sys_perms(struct bpf_map *map, struct fd f)
{
- fmode_t mode = f.file->f_mode;
+ fmode_t mode = fd_file(f)->f_mode;
/* Our file permissions may have been overridden by global
* map permissions facing syscall side.
@@ -1411,28 +1419,12 @@ static int map_create(union bpf_attr *attr)
free_map_sec:
security_bpf_map_free(map);
free_map:
- btf_put(map->btf);
- map->ops->map_free(map);
+ bpf_map_free(map);
put_token:
bpf_token_put(token);
return err;
}
-/* if error is returned, fd is released.
- * On success caller should complete fd access with matching fdput()
- */
-struct bpf_map *__bpf_map_get(struct fd f)
-{
- if (!f.file)
- return ERR_PTR(-EBADF);
- if (f.file->f_op != &bpf_map_fops) {
- fdput(f);
- return ERR_PTR(-EINVAL);
- }
-
- return f.file->private_data;
-}
-
void bpf_map_inc(struct bpf_map *map)
{
atomic64_inc(&map->refcnt);
@@ -1448,15 +1440,11 @@ EXPORT_SYMBOL_GPL(bpf_map_inc_with_uref);
struct bpf_map *bpf_map_get(u32 ufd)
{
- struct fd f = fdget(ufd);
- struct bpf_map *map;
-
- map = __bpf_map_get(f);
- if (IS_ERR(map))
- return map;
+ CLASS(fd, f)(ufd);
+ struct bpf_map *map = __bpf_map_get(f);
- bpf_map_inc(map);
- fdput(f);
+ if (!IS_ERR(map))
+ bpf_map_inc(map);
return map;
}
@@ -1464,15 +1452,11 @@ EXPORT_SYMBOL(bpf_map_get);
struct bpf_map *bpf_map_get_with_uref(u32 ufd)
{
- struct fd f = fdget(ufd);
- struct bpf_map *map;
-
- map = __bpf_map_get(f);
- if (IS_ERR(map))
- return map;
+ CLASS(fd, f)(ufd);
+ struct bpf_map *map = __bpf_map_get(f);
- bpf_map_inc_with_uref(map);
- fdput(f);
+ if (!IS_ERR(map))
+ bpf_map_inc_with_uref(map);
return map;
}
@@ -1537,11 +1521,9 @@ static int map_lookup_elem(union bpf_attr *attr)
{
void __user *ukey = u64_to_user_ptr(attr->key);
void __user *uvalue = u64_to_user_ptr(attr->value);
- int ufd = attr->map_fd;
struct bpf_map *map;
void *key, *value;
u32 value_size;
- struct fd f;
int err;
if (CHECK_ATTR(BPF_MAP_LOOKUP_ELEM))
@@ -1550,26 +1532,20 @@ static int map_lookup_elem(union bpf_attr *attr)
if (attr->flags & ~BPF_F_LOCK)
return -EINVAL;
- f = fdget(ufd);
+ CLASS(fd, f)(attr->map_fd);
map = __bpf_map_get(f);
if (IS_ERR(map))
return PTR_ERR(map);
- if (!(map_get_sys_perms(map, f) & FMODE_CAN_READ)) {
- err = -EPERM;
- goto err_put;
- }
+ if (!(map_get_sys_perms(map, f) & FMODE_CAN_READ))
+ return -EPERM;
if ((attr->flags & BPF_F_LOCK) &&
- !btf_record_has_field(map->record, BPF_SPIN_LOCK)) {
- err = -EINVAL;
- goto err_put;
- }
+ !btf_record_has_field(map->record, BPF_SPIN_LOCK))
+ return -EINVAL;
key = __bpf_copy_key(ukey, map->key_size);
- if (IS_ERR(key)) {
- err = PTR_ERR(key);
- goto err_put;
- }
+ if (IS_ERR(key))
+ return PTR_ERR(key);
value_size = bpf_map_value_size(map);
@@ -1600,8 +1576,6 @@ free_value:
kvfree(value);
free_key:
kvfree(key);
-err_put:
- fdput(f);
return err;
}
@@ -1612,17 +1586,15 @@ static int map_update_elem(union bpf_attr *attr, bpfptr_t uattr)
{
bpfptr_t ukey = make_bpfptr(attr->key, uattr.is_kernel);
bpfptr_t uvalue = make_bpfptr(attr->value, uattr.is_kernel);
- int ufd = attr->map_fd;
struct bpf_map *map;
void *key, *value;
u32 value_size;
- struct fd f;
int err;
if (CHECK_ATTR(BPF_MAP_UPDATE_ELEM))
return -EINVAL;
- f = fdget(ufd);
+ CLASS(fd, f)(attr->map_fd);
map = __bpf_map_get(f);
if (IS_ERR(map))
return PTR_ERR(map);
@@ -1651,7 +1623,7 @@ static int map_update_elem(union bpf_attr *attr, bpfptr_t uattr)
goto free_key;
}
- err = bpf_map_update_value(map, f.file, key, value, attr->flags);
+ err = bpf_map_update_value(map, fd_file(f), key, value, attr->flags);
if (!err)
maybe_wait_bpf_programs(map);
@@ -1660,7 +1632,6 @@ free_key:
kvfree(key);
err_put:
bpf_map_write_active_dec(map);
- fdput(f);
return err;
}
@@ -1669,16 +1640,14 @@ err_put:
static int map_delete_elem(union bpf_attr *attr, bpfptr_t uattr)
{
bpfptr_t ukey = make_bpfptr(attr->key, uattr.is_kernel);
- int ufd = attr->map_fd;
struct bpf_map *map;
- struct fd f;
void *key;
int err;
if (CHECK_ATTR(BPF_MAP_DELETE_ELEM))
return -EINVAL;
- f = fdget(ufd);
+ CLASS(fd, f)(attr->map_fd);
map = __bpf_map_get(f);
if (IS_ERR(map))
return PTR_ERR(map);
@@ -1715,7 +1684,6 @@ out:
kvfree(key);
err_put:
bpf_map_write_active_dec(map);
- fdput(f);
return err;
}
@@ -1726,30 +1694,24 @@ static int map_get_next_key(union bpf_attr *attr)
{
void __user *ukey = u64_to_user_ptr(attr->key);
void __user *unext_key = u64_to_user_ptr(attr->next_key);
- int ufd = attr->map_fd;
struct bpf_map *map;
void *key, *next_key;
- struct fd f;
int err;
if (CHECK_ATTR(BPF_MAP_GET_NEXT_KEY))
return -EINVAL;
- f = fdget(ufd);
+ CLASS(fd, f)(attr->map_fd);
map = __bpf_map_get(f);
if (IS_ERR(map))
return PTR_ERR(map);
- if (!(map_get_sys_perms(map, f) & FMODE_CAN_READ)) {
- err = -EPERM;
- goto err_put;
- }
+ if (!(map_get_sys_perms(map, f) & FMODE_CAN_READ))
+ return -EPERM;
if (ukey) {
key = __bpf_copy_key(ukey, map->key_size);
- if (IS_ERR(key)) {
- err = PTR_ERR(key);
- goto err_put;
- }
+ if (IS_ERR(key))
+ return PTR_ERR(key);
} else {
key = NULL;
}
@@ -1781,8 +1743,6 @@ free_next_key:
kvfree(next_key);
free_key:
kvfree(key);
-err_put:
- fdput(f);
return err;
}
@@ -2011,11 +1971,9 @@ static int map_lookup_and_delete_elem(union bpf_attr *attr)
{
void __user *ukey = u64_to_user_ptr(attr->key);
void __user *uvalue = u64_to_user_ptr(attr->value);
- int ufd = attr->map_fd;
struct bpf_map *map;
void *key, *value;
u32 value_size;
- struct fd f;
int err;
if (CHECK_ATTR(BPF_MAP_LOOKUP_AND_DELETE_ELEM))
@@ -2024,7 +1982,7 @@ static int map_lookup_and_delete_elem(union bpf_attr *attr)
if (attr->flags & ~BPF_F_LOCK)
return -EINVAL;
- f = fdget(ufd);
+ CLASS(fd, f)(attr->map_fd);
map = __bpf_map_get(f);
if (IS_ERR(map))
return PTR_ERR(map);
@@ -2094,7 +2052,6 @@ free_key:
kvfree(key);
err_put:
bpf_map_write_active_dec(map);
- fdput(f);
return err;
}
@@ -2102,27 +2059,22 @@ err_put:
static int map_freeze(const union bpf_attr *attr)
{
- int err = 0, ufd = attr->map_fd;
+ int err = 0;
struct bpf_map *map;
- struct fd f;
if (CHECK_ATTR(BPF_MAP_FREEZE))
return -EINVAL;
- f = fdget(ufd);
+ CLASS(fd, f)(attr->map_fd);
map = __bpf_map_get(f);
if (IS_ERR(map))
return PTR_ERR(map);
- if (map->map_type == BPF_MAP_TYPE_STRUCT_OPS || !IS_ERR_OR_NULL(map->record)) {
- fdput(f);
+ if (map->map_type == BPF_MAP_TYPE_STRUCT_OPS || !IS_ERR_OR_NULL(map->record))
return -ENOTSUPP;
- }
- if (!(map_get_sys_perms(map, f) & FMODE_CAN_WRITE)) {
- fdput(f);
+ if (!(map_get_sys_perms(map, f) & FMODE_CAN_WRITE))
return -EPERM;
- }
mutex_lock(&map->freeze_mutex);
if (bpf_map_write_active(map)) {
@@ -2137,7 +2089,6 @@ static int map_freeze(const union bpf_attr *attr)
WRITE_ONCE(map->frozen, true);
err_put:
mutex_unlock(&map->freeze_mutex);
- fdput(f);
return err;
}
@@ -2407,18 +2358,6 @@ int bpf_prog_new_fd(struct bpf_prog *prog)
O_RDWR | O_CLOEXEC);
}
-static struct bpf_prog *____bpf_prog_get(struct fd f)
-{
- if (!f.file)
- return ERR_PTR(-EBADF);
- if (f.file->f_op != &bpf_prog_fops) {
- fdput(f);
- return ERR_PTR(-EINVAL);
- }
-
- return f.file->private_data;
-}
-
void bpf_prog_add(struct bpf_prog *prog, int i)
{
atomic64_add(i, &prog->aux->refcnt);
@@ -2474,20 +2413,19 @@ bool bpf_prog_get_ok(struct bpf_prog *prog,
static struct bpf_prog *__bpf_prog_get(u32 ufd, enum bpf_prog_type *attach_type,
bool attach_drv)
{
- struct fd f = fdget(ufd);
+ CLASS(fd, f)(ufd);
struct bpf_prog *prog;
- prog = ____bpf_prog_get(f);
- if (IS_ERR(prog))
- return prog;
- if (!bpf_prog_get_ok(prog, attach_type, attach_drv)) {
- prog = ERR_PTR(-EINVAL);
- goto out;
- }
+ if (fd_empty(f))
+ return ERR_PTR(-EBADF);
+ if (fd_file(f)->f_op != &bpf_prog_fops)
+ return ERR_PTR(-EINVAL);
+
+ prog = fd_file(f)->private_data;
+ if (!bpf_prog_get_ok(prog, attach_type, attach_drv))
+ return ERR_PTR(-EINVAL);
bpf_prog_inc(prog);
-out:
- fdput(f);
return prog;
}
@@ -3256,20 +3194,16 @@ int bpf_link_new_fd(struct bpf_link *link)
struct bpf_link *bpf_link_get_from_fd(u32 ufd)
{
- struct fd f = fdget(ufd);
+ CLASS(fd, f)(ufd);
struct bpf_link *link;
- if (!f.file)
+ if (fd_empty(f))
return ERR_PTR(-EBADF);
- if (f.file->f_op != &bpf_link_fops && f.file->f_op != &bpf_link_fops_poll) {
- fdput(f);
+ if (fd_file(f)->f_op != &bpf_link_fops && fd_file(f)->f_op != &bpf_link_fops_poll)
return ERR_PTR(-EINVAL);
- }
- link = f.file->private_data;
+ link = fd_file(f)->private_data;
bpf_link_inc(link);
- fdput(f);
-
return link;
}
EXPORT_SYMBOL(bpf_link_get_from_fd);
@@ -4974,33 +4908,25 @@ static int bpf_link_get_info_by_fd(struct file *file,
static int bpf_obj_get_info_by_fd(const union bpf_attr *attr,
union bpf_attr __user *uattr)
{
- int ufd = attr->info.bpf_fd;
- struct fd f;
- int err;
-
if (CHECK_ATTR(BPF_OBJ_GET_INFO_BY_FD))
return -EINVAL;
- f = fdget(ufd);
- if (!f.file)
+ CLASS(fd, f)(attr->info.bpf_fd);
+ if (fd_empty(f))
return -EBADFD;
- if (f.file->f_op == &bpf_prog_fops)
- err = bpf_prog_get_info_by_fd(f.file, f.file->private_data, attr,
+ if (fd_file(f)->f_op == &bpf_prog_fops)
+ return bpf_prog_get_info_by_fd(fd_file(f), fd_file(f)->private_data, attr,
uattr);
- else if (f.file->f_op == &bpf_map_fops)
- err = bpf_map_get_info_by_fd(f.file, f.file->private_data, attr,
+ else if (fd_file(f)->f_op == &bpf_map_fops)
+ return bpf_map_get_info_by_fd(fd_file(f), fd_file(f)->private_data, attr,
uattr);
- else if (f.file->f_op == &btf_fops)
- err = bpf_btf_get_info_by_fd(f.file, f.file->private_data, attr, uattr);
- else if (f.file->f_op == &bpf_link_fops || f.file->f_op == &bpf_link_fops_poll)
- err = bpf_link_get_info_by_fd(f.file, f.file->private_data,
+ else if (fd_file(f)->f_op == &btf_fops)
+ return bpf_btf_get_info_by_fd(fd_file(f), fd_file(f)->private_data, attr, uattr);
+ else if (fd_file(f)->f_op == &bpf_link_fops || fd_file(f)->f_op == &bpf_link_fops_poll)
+ return bpf_link_get_info_by_fd(fd_file(f), fd_file(f)->private_data,
attr, uattr);
- else
- err = -EINVAL;
-
- fdput(f);
- return err;
+ return -EINVAL;
}
#define BPF_BTF_LOAD_LAST_FIELD btf_token_fd
@@ -5188,14 +5114,13 @@ static int bpf_map_do_batch(const union bpf_attr *attr,
cmd == BPF_MAP_LOOKUP_AND_DELETE_BATCH;
bool has_write = cmd != BPF_MAP_LOOKUP_BATCH;
struct bpf_map *map;
- int err, ufd;
- struct fd f;
+ int err;
if (CHECK_ATTR(BPF_MAP_BATCH))
return -EINVAL;
- ufd = attr->batch.map_fd;
- f = fdget(ufd);
+ CLASS(fd, f)(attr->batch.map_fd);
+
map = __bpf_map_get(f);
if (IS_ERR(map))
return PTR_ERR(map);
@@ -5215,7 +5140,7 @@ static int bpf_map_do_batch(const union bpf_attr *attr,
else if (cmd == BPF_MAP_LOOKUP_AND_DELETE_BATCH)
BPF_DO_BATCH(map->ops->map_lookup_and_delete_batch, map, attr, uattr);
else if (cmd == BPF_MAP_UPDATE_BATCH)
- BPF_DO_BATCH(map->ops->map_update_batch, map, f.file, attr, uattr);
+ BPF_DO_BATCH(map->ops->map_update_batch, map, fd_file(f), attr, uattr);
else
BPF_DO_BATCH(map->ops->map_delete_batch, map, attr, uattr);
err_put:
@@ -5223,7 +5148,6 @@ err_put:
maybe_wait_bpf_programs(map);
bpf_map_write_active_dec(map);
}
- fdput(f);
return err;
}
@@ -5668,7 +5592,7 @@ static int token_create(union bpf_attr *attr)
return bpf_token_create(attr);
}
-static int __sys_bpf(int cmd, bpfptr_t uattr, unsigned int size)
+static int __sys_bpf(enum bpf_cmd cmd, bpfptr_t uattr, unsigned int size)
{
union bpf_attr attr;
int err;
@@ -5932,6 +5856,7 @@ static const struct bpf_func_proto bpf_sys_close_proto = {
BPF_CALL_4(bpf_kallsyms_lookup_name, const char *, name, int, name_sz, int, flags, u64 *, res)
{
+ *res = 0;
if (flags)
return -EINVAL;
@@ -5952,7 +5877,8 @@ static const struct bpf_func_proto bpf_kallsyms_lookup_name_proto = {
.arg1_type = ARG_PTR_TO_MEM,
.arg2_type = ARG_CONST_SIZE_OR_ZERO,
.arg3_type = ARG_ANYTHING,
- .arg4_type = ARG_PTR_TO_LONG,
+ .arg4_type = ARG_PTR_TO_FIXED_SIZE_MEM | MEM_UNINIT | MEM_ALIGNED,
+ .arg4_size = sizeof(u64),
};
static const struct bpf_func_proto *
diff --git a/kernel/bpf/token.c b/kernel/bpf/token.c
index d6ccf8d00eab..dcbec1a0dfb3 100644
--- a/kernel/bpf/token.c
+++ b/kernel/bpf/token.c
@@ -116,67 +116,52 @@ int bpf_token_create(union bpf_attr *attr)
struct user_namespace *userns;
struct inode *inode;
struct file *file;
+ CLASS(fd, f)(attr->token_create.bpffs_fd);
struct path path;
- struct fd f;
+ struct super_block *sb;
umode_t mode;
int err, fd;
- f = fdget(attr->token_create.bpffs_fd);
- if (!f.file)
+ if (fd_empty(f))
return -EBADF;
- path = f.file->f_path;
- path_get(&path);
- fdput(f);
+ path = fd_file(f)->f_path;
+ sb = path.dentry->d_sb;
- if (path.dentry != path.mnt->mnt_sb->s_root) {
- err = -EINVAL;
- goto out_path;
- }
- if (path.mnt->mnt_sb->s_op != &bpf_super_ops) {
- err = -EINVAL;
- goto out_path;
- }
+ if (path.dentry != sb->s_root)
+ return -EINVAL;
+ if (sb->s_op != &bpf_super_ops)
+ return -EINVAL;
err = path_permission(&path, MAY_ACCESS);
if (err)
- goto out_path;
+ return err;
- userns = path.dentry->d_sb->s_user_ns;
+ userns = sb->s_user_ns;
/*
* Enforce that creators of BPF tokens are in the same user
* namespace as the BPF FS instance. This makes reasoning about
* permissions a lot easier and we can always relax this later.
*/
- if (current_user_ns() != userns) {
- err = -EPERM;
- goto out_path;
- }
- if (!ns_capable(userns, CAP_BPF)) {
- err = -EPERM;
- goto out_path;
- }
+ if (current_user_ns() != userns)
+ return -EPERM;
+ if (!ns_capable(userns, CAP_BPF))
+ return -EPERM;
/* Creating BPF token in init_user_ns doesn't make much sense. */
- if (current_user_ns() == &init_user_ns) {
- err = -EOPNOTSUPP;
- goto out_path;
- }
+ if (current_user_ns() == &init_user_ns)
+ return -EOPNOTSUPP;
- mnt_opts = path.dentry->d_sb->s_fs_info;
+ mnt_opts = sb->s_fs_info;
if (mnt_opts->delegate_cmds == 0 &&
mnt_opts->delegate_maps == 0 &&
mnt_opts->delegate_progs == 0 &&
- mnt_opts->delegate_attachs == 0) {
- err = -ENOENT; /* no BPF token delegation is set up */
- goto out_path;
- }
+ mnt_opts->delegate_attachs == 0)
+ return -ENOENT; /* no BPF token delegation is set up */
mode = S_IFREG | ((S_IRUSR | S_IWUSR) & ~current_umask());
- inode = bpf_get_inode(path.mnt->mnt_sb, NULL, mode);
- if (IS_ERR(inode)) {
- err = PTR_ERR(inode);
- goto out_path;
- }
+ inode = bpf_get_inode(sb, NULL, mode);
+ if (IS_ERR(inode))
+ return PTR_ERR(inode);
inode->i_op = &bpf_token_iops;
inode->i_fop = &bpf_token_fops;
@@ -185,8 +170,7 @@ int bpf_token_create(union bpf_attr *attr)
file = alloc_file_pseudo(inode, path.mnt, BPF_TOKEN_INODE_NAME, O_RDWR, &bpf_token_fops);
if (IS_ERR(file)) {
iput(inode);
- err = PTR_ERR(file);
- goto out_path;
+ return PTR_ERR(file);
}
token = kzalloc(sizeof(*token), GFP_USER);
@@ -218,33 +202,27 @@ int bpf_token_create(union bpf_attr *attr)
file->private_data = token;
fd_install(fd, file);
- path_put(&path);
return fd;
out_token:
bpf_token_free(token);
out_file:
fput(file);
-out_path:
- path_put(&path);
return err;
}
struct bpf_token *bpf_token_get_from_fd(u32 ufd)
{
- struct fd f = fdget(ufd);
+ CLASS(fd, f)(ufd);
struct bpf_token *token;
- if (!f.file)
+ if (fd_empty(f))
return ERR_PTR(-EBADF);
- if (f.file->f_op != &bpf_token_fops) {
- fdput(f);
+ if (fd_file(f)->f_op != &bpf_token_fops)
return ERR_PTR(-EINVAL);
- }
- token = f.file->private_data;
+ token = fd_file(f)->private_data;
bpf_token_inc(token);
- fdput(f);
return token;
}
diff --git a/kernel/bpf/verifier.c b/kernel/bpf/verifier.c
index 39d5710c68ad..9a7ed527e47e 100644
--- a/kernel/bpf/verifier.c
+++ b/kernel/bpf/verifier.c
@@ -385,11 +385,6 @@ static void verbose_invalid_scalar(struct bpf_verifier_env *env,
verbose(env, " should have been in [%d, %d]\n", range.minval, range.maxval);
}
-static bool type_may_be_null(u32 type)
-{
- return type & PTR_MAYBE_NULL;
-}
-
static bool reg_not_null(const struct bpf_reg_state *reg)
{
enum bpf_reg_type type;
@@ -2184,6 +2179,44 @@ static void __reg_deduce_mixed_bounds(struct bpf_reg_state *reg)
reg->smin_value = max_t(s64, reg->smin_value, new_smin);
reg->smax_value = min_t(s64, reg->smax_value, new_smax);
}
+
+ /* Here we would like to handle a special case after sign extending load,
+ * when upper bits for a 64-bit range are all 1s or all 0s.
+ *
+ * Upper bits are all 1s when register is in a range:
+ * [0xffff_ffff_0000_0000, 0xffff_ffff_ffff_ffff]
+ * Upper bits are all 0s when register is in a range:
+ * [0x0000_0000_0000_0000, 0x0000_0000_ffff_ffff]
+ * Together this forms are continuous range:
+ * [0xffff_ffff_0000_0000, 0x0000_0000_ffff_ffff]
+ *
+ * Now, suppose that register range is in fact tighter:
+ * [0xffff_ffff_8000_0000, 0x0000_0000_ffff_ffff] (R)
+ * Also suppose that it's 32-bit range is positive,
+ * meaning that lower 32-bits of the full 64-bit register
+ * are in the range:
+ * [0x0000_0000, 0x7fff_ffff] (W)
+ *
+ * If this happens, then any value in a range:
+ * [0xffff_ffff_0000_0000, 0xffff_ffff_7fff_ffff]
+ * is smaller than a lowest bound of the range (R):
+ * 0xffff_ffff_8000_0000
+ * which means that upper bits of the full 64-bit register
+ * can't be all 1s, when lower bits are in range (W).
+ *
+ * Note that:
+ * - 0xffff_ffff_8000_0000 == (s64)S32_MIN
+ * - 0x0000_0000_7fff_ffff == (s64)S32_MAX
+ * These relations are used in the conditions below.
+ */
+ if (reg->s32_min_value >= 0 && reg->smin_value >= S32_MIN && reg->smax_value <= S32_MAX) {
+ reg->smin_value = reg->s32_min_value;
+ reg->smax_value = reg->s32_max_value;
+ reg->umin_value = reg->s32_min_value;
+ reg->umax_value = reg->s32_max_value;
+ reg->var_off = tnum_intersect(reg->var_off,
+ tnum_range(reg->smin_value, reg->smax_value));
+ }
}
static void __reg_deduce_bounds(struct bpf_reg_state *reg)
@@ -2336,6 +2369,25 @@ static void mark_reg_unknown(struct bpf_verifier_env *env,
__mark_reg_unknown(env, regs + regno);
}
+static int __mark_reg_s32_range(struct bpf_verifier_env *env,
+ struct bpf_reg_state *regs,
+ u32 regno,
+ s32 s32_min,
+ s32 s32_max)
+{
+ struct bpf_reg_state *reg = regs + regno;
+
+ reg->s32_min_value = max_t(s32, reg->s32_min_value, s32_min);
+ reg->s32_max_value = min_t(s32, reg->s32_max_value, s32_max);
+
+ reg->smin_value = max_t(s64, reg->smin_value, s32_min);
+ reg->smax_value = min_t(s64, reg->smax_value, s32_max);
+
+ reg_bounds_sync(reg);
+
+ return reg_bounds_sanity_check(env, reg, "s32_range");
+}
+
static void __mark_reg_not_init(const struct bpf_verifier_env *env,
struct bpf_reg_state *reg)
{
@@ -3337,9 +3389,87 @@ static bool is_jmp_point(struct bpf_verifier_env *env, int insn_idx)
return env->insn_aux_data[insn_idx].jmp_point;
}
+#define LR_FRAMENO_BITS 3
+#define LR_SPI_BITS 6
+#define LR_ENTRY_BITS (LR_SPI_BITS + LR_FRAMENO_BITS + 1)
+#define LR_SIZE_BITS 4
+#define LR_FRAMENO_MASK ((1ull << LR_FRAMENO_BITS) - 1)
+#define LR_SPI_MASK ((1ull << LR_SPI_BITS) - 1)
+#define LR_SIZE_MASK ((1ull << LR_SIZE_BITS) - 1)
+#define LR_SPI_OFF LR_FRAMENO_BITS
+#define LR_IS_REG_OFF (LR_SPI_BITS + LR_FRAMENO_BITS)
+#define LINKED_REGS_MAX 6
+
+struct linked_reg {
+ u8 frameno;
+ union {
+ u8 spi;
+ u8 regno;
+ };
+ bool is_reg;
+};
+
+struct linked_regs {
+ int cnt;
+ struct linked_reg entries[LINKED_REGS_MAX];
+};
+
+static struct linked_reg *linked_regs_push(struct linked_regs *s)
+{
+ if (s->cnt < LINKED_REGS_MAX)
+ return &s->entries[s->cnt++];
+
+ return NULL;
+}
+
+/* Use u64 as a vector of 6 10-bit values, use first 4-bits to track
+ * number of elements currently in stack.
+ * Pack one history entry for linked registers as 10 bits in the following format:
+ * - 3-bits frameno
+ * - 6-bits spi_or_reg
+ * - 1-bit is_reg
+ */
+static u64 linked_regs_pack(struct linked_regs *s)
+{
+ u64 val = 0;
+ int i;
+
+ for (i = 0; i < s->cnt; ++i) {
+ struct linked_reg *e = &s->entries[i];
+ u64 tmp = 0;
+
+ tmp |= e->frameno;
+ tmp |= e->spi << LR_SPI_OFF;
+ tmp |= (e->is_reg ? 1 : 0) << LR_IS_REG_OFF;
+
+ val <<= LR_ENTRY_BITS;
+ val |= tmp;
+ }
+ val <<= LR_SIZE_BITS;
+ val |= s->cnt;
+ return val;
+}
+
+static void linked_regs_unpack(u64 val, struct linked_regs *s)
+{
+ int i;
+
+ s->cnt = val & LR_SIZE_MASK;
+ val >>= LR_SIZE_BITS;
+
+ for (i = 0; i < s->cnt; ++i) {
+ struct linked_reg *e = &s->entries[i];
+
+ e->frameno = val & LR_FRAMENO_MASK;
+ e->spi = (val >> LR_SPI_OFF) & LR_SPI_MASK;
+ e->is_reg = (val >> LR_IS_REG_OFF) & 0x1;
+ val >>= LR_ENTRY_BITS;
+ }
+}
+
/* for any branch, call, exit record the history of jmps in the given state */
static int push_jmp_history(struct bpf_verifier_env *env, struct bpf_verifier_state *cur,
- int insn_flags)
+ int insn_flags, u64 linked_regs)
{
u32 cnt = cur->jmp_history_cnt;
struct bpf_jmp_history_entry *p;
@@ -3355,6 +3485,10 @@ static int push_jmp_history(struct bpf_verifier_env *env, struct bpf_verifier_st
"verifier insn history bug: insn_idx %d cur flags %x new flags %x\n",
env->insn_idx, env->cur_hist_ent->flags, insn_flags);
env->cur_hist_ent->flags |= insn_flags;
+ WARN_ONCE(env->cur_hist_ent->linked_regs != 0,
+ "verifier insn history bug: insn_idx %d linked_regs != 0: %#llx\n",
+ env->insn_idx, env->cur_hist_ent->linked_regs);
+ env->cur_hist_ent->linked_regs = linked_regs;
return 0;
}
@@ -3369,6 +3503,7 @@ static int push_jmp_history(struct bpf_verifier_env *env, struct bpf_verifier_st
p->idx = env->insn_idx;
p->prev_idx = env->prev_insn_idx;
p->flags = insn_flags;
+ p->linked_regs = linked_regs;
cur->jmp_history_cnt = cnt;
env->cur_hist_ent = p;
@@ -3534,6 +3669,11 @@ static inline bool bt_is_reg_set(struct backtrack_state *bt, u32 reg)
return bt->reg_masks[bt->frame] & (1 << reg);
}
+static inline bool bt_is_frame_reg_set(struct backtrack_state *bt, u32 frame, u32 reg)
+{
+ return bt->reg_masks[frame] & (1 << reg);
+}
+
static inline bool bt_is_frame_slot_set(struct backtrack_state *bt, u32 frame, u32 slot)
{
return bt->stack_masks[frame] & (1ull << slot);
@@ -3578,6 +3718,42 @@ static void fmt_stack_mask(char *buf, ssize_t buf_sz, u64 stack_mask)
}
}
+/* If any register R in hist->linked_regs is marked as precise in bt,
+ * do bt_set_frame_{reg,slot}(bt, R) for all registers in hist->linked_regs.
+ */
+static void bt_sync_linked_regs(struct backtrack_state *bt, struct bpf_jmp_history_entry *hist)
+{
+ struct linked_regs linked_regs;
+ bool some_precise = false;
+ int i;
+
+ if (!hist || hist->linked_regs == 0)
+ return;
+
+ linked_regs_unpack(hist->linked_regs, &linked_regs);
+ for (i = 0; i < linked_regs.cnt; ++i) {
+ struct linked_reg *e = &linked_regs.entries[i];
+
+ if ((e->is_reg && bt_is_frame_reg_set(bt, e->frameno, e->regno)) ||
+ (!e->is_reg && bt_is_frame_slot_set(bt, e->frameno, e->spi))) {
+ some_precise = true;
+ break;
+ }
+ }
+
+ if (!some_precise)
+ return;
+
+ for (i = 0; i < linked_regs.cnt; ++i) {
+ struct linked_reg *e = &linked_regs.entries[i];
+
+ if (e->is_reg)
+ bt_set_frame_reg(bt, e->frameno, e->regno);
+ else
+ bt_set_frame_slot(bt, e->frameno, e->spi);
+ }
+}
+
static bool calls_callback(struct bpf_verifier_env *env, int insn_idx);
/* For given verifier state backtrack_insn() is called from the last insn to
@@ -3617,6 +3793,12 @@ static int backtrack_insn(struct bpf_verifier_env *env, int idx, int subseq_idx,
print_bpf_insn(&cbs, insn, env->allow_ptr_leaks);
}
+ /* If there is a history record that some registers gained range at this insn,
+ * propagate precision marks to those registers, so that bt_is_reg_set()
+ * accounts for these registers.
+ */
+ bt_sync_linked_regs(bt, hist);
+
if (class == BPF_ALU || class == BPF_ALU64) {
if (!bt_is_reg_set(bt, dreg))
return 0;
@@ -3846,7 +4028,8 @@ static int backtrack_insn(struct bpf_verifier_env *env, int idx, int subseq_idx,
*/
bt_set_reg(bt, dreg);
bt_set_reg(bt, sreg);
- /* else dreg <cond> K
+ } else if (BPF_SRC(insn->code) == BPF_K) {
+ /* dreg <cond> K
* Only dreg still needs precision before
* this insn, so for the K-based conditional
* there is nothing new to be marked.
@@ -3864,6 +4047,10 @@ static int backtrack_insn(struct bpf_verifier_env *env, int idx, int subseq_idx,
/* to be analyzed */
return -ENOTSUPP;
}
+ /* Propagate precision marks to linked registers, to account for
+ * registers marked as precise in this function.
+ */
+ bt_sync_linked_regs(bt, hist);
return 0;
}
@@ -3991,96 +4178,6 @@ static void mark_all_scalars_imprecise(struct bpf_verifier_env *env, struct bpf_
}
}
-static bool idset_contains(struct bpf_idset *s, u32 id)
-{
- u32 i;
-
- for (i = 0; i < s->count; ++i)
- if (s->ids[i] == (id & ~BPF_ADD_CONST))
- return true;
-
- return false;
-}
-
-static int idset_push(struct bpf_idset *s, u32 id)
-{
- if (WARN_ON_ONCE(s->count >= ARRAY_SIZE(s->ids)))
- return -EFAULT;
- s->ids[s->count++] = id & ~BPF_ADD_CONST;
- return 0;
-}
-
-static void idset_reset(struct bpf_idset *s)
-{
- s->count = 0;
-}
-
-/* Collect a set of IDs for all registers currently marked as precise in env->bt.
- * Mark all registers with these IDs as precise.
- */
-static int mark_precise_scalar_ids(struct bpf_verifier_env *env, struct bpf_verifier_state *st)
-{
- struct bpf_idset *precise_ids = &env->idset_scratch;
- struct backtrack_state *bt = &env->bt;
- struct bpf_func_state *func;
- struct bpf_reg_state *reg;
- DECLARE_BITMAP(mask, 64);
- int i, fr;
-
- idset_reset(precise_ids);
-
- for (fr = bt->frame; fr >= 0; fr--) {
- func = st->frame[fr];
-
- bitmap_from_u64(mask, bt_frame_reg_mask(bt, fr));
- for_each_set_bit(i, mask, 32) {
- reg = &func->regs[i];
- if (!reg->id || reg->type != SCALAR_VALUE)
- continue;
- if (idset_push(precise_ids, reg->id))
- return -EFAULT;
- }
-
- bitmap_from_u64(mask, bt_frame_stack_mask(bt, fr));
- for_each_set_bit(i, mask, 64) {
- if (i >= func->allocated_stack / BPF_REG_SIZE)
- break;
- if (!is_spilled_scalar_reg(&func->stack[i]))
- continue;
- reg = &func->stack[i].spilled_ptr;
- if (!reg->id)
- continue;
- if (idset_push(precise_ids, reg->id))
- return -EFAULT;
- }
- }
-
- for (fr = 0; fr <= st->curframe; ++fr) {
- func = st->frame[fr];
-
- for (i = BPF_REG_0; i < BPF_REG_10; ++i) {
- reg = &func->regs[i];
- if (!reg->id)
- continue;
- if (!idset_contains(precise_ids, reg->id))
- continue;
- bt_set_frame_reg(bt, fr, i);
- }
- for (i = 0; i < func->allocated_stack / BPF_REG_SIZE; ++i) {
- if (!is_spilled_scalar_reg(&func->stack[i]))
- continue;
- reg = &func->stack[i].spilled_ptr;
- if (!reg->id)
- continue;
- if (!idset_contains(precise_ids, reg->id))
- continue;
- bt_set_frame_slot(bt, fr, i);
- }
- }
-
- return 0;
-}
-
/*
* __mark_chain_precision() backtracks BPF program instruction sequence and
* chain of verifier states making sure that register *regno* (if regno >= 0)
@@ -4213,31 +4310,6 @@ static int __mark_chain_precision(struct bpf_verifier_env *env, int regno)
bt->frame, last_idx, first_idx, subseq_idx);
}
- /* If some register with scalar ID is marked as precise,
- * make sure that all registers sharing this ID are also precise.
- * This is needed to estimate effect of find_equal_scalars().
- * Do this at the last instruction of each state,
- * bpf_reg_state::id fields are valid for these instructions.
- *
- * Allows to track precision in situation like below:
- *
- * r2 = unknown value
- * ...
- * --- state #0 ---
- * ...
- * r1 = r2 // r1 and r2 now share the same ID
- * ...
- * --- state #1 {r1.id = A, r2.id = A} ---
- * ...
- * if (r2 > 10) goto exit; // find_equal_scalars() assigns range to r1
- * ...
- * --- state #2 {r1.id = A, r2.id = A} ---
- * r3 = r10
- * r3 += r1 // need to mark both r1 and r2
- */
- if (mark_precise_scalar_ids(env, st))
- return -EFAULT;
-
if (last_idx < 0) {
/* we are at the entry into subprog, which
* is expected for global funcs, but only if
@@ -4458,7 +4530,7 @@ static void assign_scalar_id_before_mov(struct bpf_verifier_env *env,
if (!src_reg->id && !tnum_is_const(src_reg->var_off))
/* Ensure that src_reg has a valid ID that will be copied to
- * dst_reg and then will be used by find_equal_scalars() to
+ * dst_reg and then will be used by sync_linked_regs() to
* propagate min/max range.
*/
src_reg->id = ++env->id_gen;
@@ -4504,6 +4576,31 @@ static int get_reg_width(struct bpf_reg_state *reg)
return fls64(reg->umax_value);
}
+/* See comment for mark_fastcall_pattern_for_call() */
+static void check_fastcall_stack_contract(struct bpf_verifier_env *env,
+ struct bpf_func_state *state, int insn_idx, int off)
+{
+ struct bpf_subprog_info *subprog = &env->subprog_info[state->subprogno];
+ struct bpf_insn_aux_data *aux = env->insn_aux_data;
+ int i;
+
+ if (subprog->fastcall_stack_off <= off || aux[insn_idx].fastcall_pattern)
+ return;
+ /* access to the region [max_stack_depth .. fastcall_stack_off)
+ * from something that is not a part of the fastcall pattern,
+ * disable fastcall rewrites for current subprogram by setting
+ * fastcall_stack_off to a value smaller than any possible offset.
+ */
+ subprog->fastcall_stack_off = S16_MIN;
+ /* reset fastcall aux flags within subprogram,
+ * happens at most once per subprogram
+ */
+ for (i = subprog->start; i < (subprog + 1)->start; ++i) {
+ aux[i].fastcall_spills_num = 0;
+ aux[i].fastcall_pattern = 0;
+ }
+}
+
/* check_stack_{read,write}_fixed_off functions track spill/fill of registers,
* stack boundary and alignment are checked in check_mem_access()
*/
@@ -4552,6 +4649,7 @@ static int check_stack_write_fixed_off(struct bpf_verifier_env *env,
if (err)
return err;
+ check_fastcall_stack_contract(env, state, insn_idx, off);
mark_stack_slot_scratched(env, spi);
if (reg && !(off % BPF_REG_SIZE) && reg->type == SCALAR_VALUE && env->bpf_capable) {
bool reg_value_fits;
@@ -4627,7 +4725,7 @@ static int check_stack_write_fixed_off(struct bpf_verifier_env *env,
}
if (insn_flags)
- return push_jmp_history(env, env->cur_state, insn_flags);
+ return push_jmp_history(env, env->cur_state, insn_flags, 0);
return 0;
}
@@ -4686,6 +4784,7 @@ static int check_stack_write_var_off(struct bpf_verifier_env *env,
return err;
}
+ check_fastcall_stack_contract(env, state, insn_idx, min_off);
/* Variable offset writes destroy any spilled pointers in range. */
for (i = min_off; i < max_off; i++) {
u8 new_type, *stype;
@@ -4824,6 +4923,7 @@ static int check_stack_read_fixed_off(struct bpf_verifier_env *env,
reg = &reg_state->stack[spi].spilled_ptr;
mark_stack_slot_scratched(env, spi);
+ check_fastcall_stack_contract(env, state, env->insn_idx, off);
if (is_spilled_reg(&reg_state->stack[spi])) {
u8 spill_size = 1;
@@ -4932,7 +5032,7 @@ static int check_stack_read_fixed_off(struct bpf_verifier_env *env,
insn_flags = 0; /* we are not restoring spilled register */
}
if (insn_flags)
- return push_jmp_history(env, env->cur_state, insn_flags);
+ return push_jmp_history(env, env->cur_state, insn_flags, 0);
return 0;
}
@@ -4984,6 +5084,7 @@ static int check_stack_read_var_off(struct bpf_verifier_env *env,
min_off = reg->smin_value + off;
max_off = reg->smax_value + off;
mark_reg_stack_read(env, ptr_state, min_off, max_off + size, dst_regno);
+ check_fastcall_stack_contract(env, ptr_state, env->insn_idx, min_off);
return 0;
}
@@ -5589,11 +5690,13 @@ static int check_packet_access(struct bpf_verifier_env *env, u32 regno, int off,
/* check access to 'struct bpf_context' fields. Supports fixed offsets only */
static int check_ctx_access(struct bpf_verifier_env *env, int insn_idx, int off, int size,
enum bpf_access_type t, enum bpf_reg_type *reg_type,
- struct btf **btf, u32 *btf_id)
+ struct btf **btf, u32 *btf_id, bool *is_retval, bool is_ldsx)
{
struct bpf_insn_access_aux info = {
.reg_type = *reg_type,
.log = &env->log,
+ .is_retval = false,
+ .is_ldsx = is_ldsx,
};
if (env->ops->is_valid_access &&
@@ -5606,6 +5709,7 @@ static int check_ctx_access(struct bpf_verifier_env *env, int insn_idx, int off,
* type of narrower access.
*/
*reg_type = info.reg_type;
+ *is_retval = info.is_retval;
if (base_type(*reg_type) == PTR_TO_BTF_ID) {
*btf = info.btf;
@@ -6694,10 +6798,20 @@ static int check_stack_slot_within_bounds(struct bpf_verifier_env *env,
struct bpf_func_state *state,
enum bpf_access_type t)
{
- int min_valid_off;
+ struct bpf_insn_aux_data *aux = &env->insn_aux_data[env->insn_idx];
+ int min_valid_off, max_bpf_stack;
+
+ /* If accessing instruction is a spill/fill from bpf_fastcall pattern,
+ * add room for all caller saved registers below MAX_BPF_STACK.
+ * In case if bpf_fastcall rewrite won't happen maximal stack depth
+ * would be checked by check_max_stack_depth_subprog().
+ */
+ max_bpf_stack = MAX_BPF_STACK;
+ if (aux->fastcall_pattern)
+ max_bpf_stack += CALLER_SAVED_REGS * BPF_REG_SIZE;
if (t == BPF_WRITE || env->allow_uninit_stack)
- min_valid_off = -MAX_BPF_STACK;
+ min_valid_off = -max_bpf_stack;
else
min_valid_off = -state->allocated_stack;
@@ -6774,6 +6888,17 @@ static int check_stack_access_within_bounds(
return grow_stack_state(env, state, -min_off /* size */);
}
+static bool get_func_retval_range(struct bpf_prog *prog,
+ struct bpf_retval_range *range)
+{
+ if (prog->type == BPF_PROG_TYPE_LSM &&
+ prog->expected_attach_type == BPF_LSM_MAC &&
+ !bpf_lsm_get_retval_range(prog, range)) {
+ return true;
+ }
+ return false;
+}
+
/* check whether memory at (regno + off) is accessible for t = (read | write)
* if t==write, value_regno is a register which value is stored into memory
* if t==read, value_regno is a register which will receive the value from memory
@@ -6878,6 +7003,8 @@ static int check_mem_access(struct bpf_verifier_env *env, int insn_idx, u32 regn
if (!err && value_regno >= 0 && (t == BPF_READ || rdonly_mem))
mark_reg_unknown(env, regs, value_regno);
} else if (reg->type == PTR_TO_CTX) {
+ bool is_retval = false;
+ struct bpf_retval_range range;
enum bpf_reg_type reg_type = SCALAR_VALUE;
struct btf *btf = NULL;
u32 btf_id = 0;
@@ -6893,7 +7020,7 @@ static int check_mem_access(struct bpf_verifier_env *env, int insn_idx, u32 regn
return err;
err = check_ctx_access(env, insn_idx, off, size, t, &reg_type, &btf,
- &btf_id);
+ &btf_id, &is_retval, is_ldsx);
if (err)
verbose_linfo(env, insn_idx, "; ");
if (!err && t == BPF_READ && value_regno >= 0) {
@@ -6902,7 +7029,14 @@ static int check_mem_access(struct bpf_verifier_env *env, int insn_idx, u32 regn
* case, we know the offset is zero.
*/
if (reg_type == SCALAR_VALUE) {
- mark_reg_unknown(env, regs, value_regno);
+ if (is_retval && get_func_retval_range(env->prog, &range)) {
+ err = __mark_reg_s32_range(env, regs, value_regno,
+ range.minval, range.maxval);
+ if (err)
+ return err;
+ } else {
+ mark_reg_unknown(env, regs, value_regno);
+ }
} else {
mark_reg_known_zero(env, regs,
value_regno);
@@ -7666,29 +7800,38 @@ static int process_kptr_func(struct bpf_verifier_env *env, int regno,
struct bpf_call_arg_meta *meta)
{
struct bpf_reg_state *regs = cur_regs(env), *reg = &regs[regno];
- struct bpf_map *map_ptr = reg->map_ptr;
struct btf_field *kptr_field;
+ struct bpf_map *map_ptr;
+ struct btf_record *rec;
u32 kptr_off;
+ if (type_is_ptr_alloc_obj(reg->type)) {
+ rec = reg_btf_record(reg);
+ } else { /* PTR_TO_MAP_VALUE */
+ map_ptr = reg->map_ptr;
+ if (!map_ptr->btf) {
+ verbose(env, "map '%s' has to have BTF in order to use bpf_kptr_xchg\n",
+ map_ptr->name);
+ return -EINVAL;
+ }
+ rec = map_ptr->record;
+ meta->map_ptr = map_ptr;
+ }
+
if (!tnum_is_const(reg->var_off)) {
verbose(env,
"R%d doesn't have constant offset. kptr has to be at the constant offset\n",
regno);
return -EINVAL;
}
- if (!map_ptr->btf) {
- verbose(env, "map '%s' has to have BTF in order to use bpf_kptr_xchg\n",
- map_ptr->name);
- return -EINVAL;
- }
- if (!btf_record_has_field(map_ptr->record, BPF_KPTR)) {
- verbose(env, "map '%s' has no valid kptr\n", map_ptr->name);
+
+ if (!btf_record_has_field(rec, BPF_KPTR)) {
+ verbose(env, "R%d has no valid kptr\n", regno);
return -EINVAL;
}
- meta->map_ptr = map_ptr;
kptr_off = reg->off + reg->var_off.value;
- kptr_field = btf_record_find(map_ptr->record, kptr_off, BPF_KPTR);
+ kptr_field = btf_record_find(rec, kptr_off, BPF_KPTR);
if (!kptr_field) {
verbose(env, "off=%d doesn't point to kptr\n", kptr_off);
return -EACCES;
@@ -7833,12 +7976,17 @@ static bool is_iter_destroy_kfunc(struct bpf_kfunc_call_arg_meta *meta)
return meta->kfunc_flags & KF_ITER_DESTROY;
}
-static bool is_kfunc_arg_iter(struct bpf_kfunc_call_arg_meta *meta, int arg)
+static bool is_kfunc_arg_iter(struct bpf_kfunc_call_arg_meta *meta, int arg_idx,
+ const struct btf_param *arg)
{
/* btf_check_iter_kfuncs() guarantees that first argument of any iter
* kfunc is iter state pointer
*/
- return arg == 0 && is_iter_kfunc(meta);
+ if (is_iter_kfunc(meta))
+ return arg_idx == 0;
+
+ /* iter passed as an argument to a generic kfunc */
+ return btf_param_match_suffix(meta->btf, arg, "__iter");
}
static int process_iter_arg(struct bpf_verifier_env *env, int regno, int insn_idx,
@@ -7846,14 +7994,20 @@ static int process_iter_arg(struct bpf_verifier_env *env, int regno, int insn_id
{
struct bpf_reg_state *regs = cur_regs(env), *reg = &regs[regno];
const struct btf_type *t;
- const struct btf_param *arg;
- int spi, err, i, nr_slots;
- u32 btf_id;
+ int spi, err, i, nr_slots, btf_id;
- /* btf_check_iter_kfuncs() ensures we don't need to validate anything here */
- arg = &btf_params(meta->func_proto)[0];
- t = btf_type_skip_modifiers(meta->btf, arg->type, NULL); /* PTR */
- t = btf_type_skip_modifiers(meta->btf, t->type, &btf_id); /* STRUCT */
+ /* For iter_{new,next,destroy} functions, btf_check_iter_kfuncs()
+ * ensures struct convention, so we wouldn't need to do any BTF
+ * validation here. But given iter state can be passed as a parameter
+ * to any kfunc, if arg has "__iter" suffix, we need to be a bit more
+ * conservative here.
+ */
+ btf_id = btf_check_iter_arg(meta->btf, meta->func_proto, regno - 1);
+ if (btf_id < 0) {
+ verbose(env, "expected valid iter pointer as arg #%d\n", regno);
+ return -EINVAL;
+ }
+ t = btf_type_by_id(meta->btf, btf_id);
nr_slots = t->size / BPF_REG_SIZE;
if (is_iter_new_kfunc(meta)) {
@@ -7875,7 +8029,9 @@ static int process_iter_arg(struct bpf_verifier_env *env, int regno, int insn_id
if (err)
return err;
} else {
- /* iter_next() or iter_destroy() expect initialized iter state*/
+ /* iter_next() or iter_destroy(), as well as any kfunc
+ * accepting iter argument, expect initialized iter state
+ */
err = is_iter_reg_valid_init(env, reg, meta->btf, btf_id, nr_slots);
switch (err) {
case 0:
@@ -7989,6 +8145,15 @@ static int widen_imprecise_scalars(struct bpf_verifier_env *env,
return 0;
}
+static struct bpf_reg_state *get_iter_from_state(struct bpf_verifier_state *cur_st,
+ struct bpf_kfunc_call_arg_meta *meta)
+{
+ int iter_frameno = meta->iter.frameno;
+ int iter_spi = meta->iter.spi;
+
+ return &cur_st->frame[iter_frameno]->stack[iter_spi].spilled_ptr;
+}
+
/* process_iter_next_call() is called when verifier gets to iterator's next
* "method" (e.g., bpf_iter_num_next() for numbers iterator) call. We'll refer
* to it as just "iter_next()" in comments below.
@@ -8073,12 +8238,10 @@ static int process_iter_next_call(struct bpf_verifier_env *env, int insn_idx,
struct bpf_verifier_state *cur_st = env->cur_state, *queued_st, *prev_st;
struct bpf_func_state *cur_fr = cur_st->frame[cur_st->curframe], *queued_fr;
struct bpf_reg_state *cur_iter, *queued_iter;
- int iter_frameno = meta->iter.frameno;
- int iter_spi = meta->iter.spi;
BTF_TYPE_EMIT(struct bpf_iter);
- cur_iter = &env->cur_state->frame[iter_frameno]->stack[iter_spi].spilled_ptr;
+ cur_iter = get_iter_from_state(cur_st, meta);
if (cur_iter->iter.state != BPF_ITER_STATE_ACTIVE &&
cur_iter->iter.state != BPF_ITER_STATE_DRAINED) {
@@ -8106,7 +8269,7 @@ static int process_iter_next_call(struct bpf_verifier_env *env, int insn_idx,
if (!queued_st)
return -ENOMEM;
- queued_iter = &queued_st->frame[iter_frameno]->stack[iter_spi].spilled_ptr;
+ queued_iter = get_iter_from_state(queued_st, meta);
queued_iter->iter.state = BPF_ITER_STATE_ACTIVE;
queued_iter->iter.depth++;
if (prev_st)
@@ -8130,6 +8293,12 @@ static bool arg_type_is_mem_size(enum bpf_arg_type type)
type == ARG_CONST_SIZE_OR_ZERO;
}
+static bool arg_type_is_raw_mem(enum bpf_arg_type type)
+{
+ return base_type(type) == ARG_PTR_TO_MEM &&
+ type & MEM_UNINIT;
+}
+
static bool arg_type_is_release(enum bpf_arg_type type)
{
return type & OBJ_RELEASE;
@@ -8140,16 +8309,6 @@ static bool arg_type_is_dynptr(enum bpf_arg_type type)
return base_type(type) == ARG_PTR_TO_DYNPTR;
}
-static int int_ptr_type_to_size(enum bpf_arg_type type)
-{
- if (type == ARG_PTR_TO_INT)
- return sizeof(u32);
- else if (type == ARG_PTR_TO_LONG)
- return sizeof(u64);
-
- return -EINVAL;
-}
-
static int resolve_map_arg_type(struct bpf_verifier_env *env,
const struct bpf_call_arg_meta *meta,
enum bpf_arg_type *arg_type)
@@ -8222,16 +8381,6 @@ static const struct bpf_reg_types mem_types = {
},
};
-static const struct bpf_reg_types int_ptr_types = {
- .types = {
- PTR_TO_STACK,
- PTR_TO_PACKET,
- PTR_TO_PACKET_META,
- PTR_TO_MAP_KEY,
- PTR_TO_MAP_VALUE,
- },
-};
-
static const struct bpf_reg_types spin_lock_types = {
.types = {
PTR_TO_MAP_VALUE,
@@ -8262,7 +8411,12 @@ static const struct bpf_reg_types func_ptr_types = { .types = { PTR_TO_FUNC } };
static const struct bpf_reg_types stack_ptr_types = { .types = { PTR_TO_STACK } };
static const struct bpf_reg_types const_str_ptr_types = { .types = { PTR_TO_MAP_VALUE } };
static const struct bpf_reg_types timer_types = { .types = { PTR_TO_MAP_VALUE } };
-static const struct bpf_reg_types kptr_types = { .types = { PTR_TO_MAP_VALUE } };
+static const struct bpf_reg_types kptr_xchg_dest_types = {
+ .types = {
+ PTR_TO_MAP_VALUE,
+ PTR_TO_BTF_ID | MEM_ALLOC
+ }
+};
static const struct bpf_reg_types dynptr_types = {
.types = {
PTR_TO_STACK,
@@ -8287,14 +8441,12 @@ static const struct bpf_reg_types *compatible_reg_types[__BPF_ARG_TYPE_MAX] = {
[ARG_PTR_TO_SPIN_LOCK] = &spin_lock_types,
[ARG_PTR_TO_MEM] = &mem_types,
[ARG_PTR_TO_RINGBUF_MEM] = &ringbuf_mem_types,
- [ARG_PTR_TO_INT] = &int_ptr_types,
- [ARG_PTR_TO_LONG] = &int_ptr_types,
[ARG_PTR_TO_PERCPU_BTF_ID] = &percpu_btf_ptr_types,
[ARG_PTR_TO_FUNC] = &func_ptr_types,
[ARG_PTR_TO_STACK] = &stack_ptr_types,
[ARG_PTR_TO_CONST_STR] = &const_str_ptr_types,
[ARG_PTR_TO_TIMER] = &timer_types,
- [ARG_PTR_TO_KPTR] = &kptr_types,
+ [ARG_KPTR_XCHG_DEST] = &kptr_xchg_dest_types,
[ARG_PTR_TO_DYNPTR] = &dynptr_types,
};
@@ -8333,7 +8485,8 @@ static int check_reg_type(struct bpf_verifier_env *env, u32 regno,
if (base_type(arg_type) == ARG_PTR_TO_MEM)
type &= ~DYNPTR_TYPE_FLAG_MASK;
- if (meta->func_id == BPF_FUNC_kptr_xchg && type_is_alloc(type)) {
+ /* Local kptr types are allowed as the source argument of bpf_kptr_xchg */
+ if (meta->func_id == BPF_FUNC_kptr_xchg && type_is_alloc(type) && regno == BPF_REG_2) {
type &= ~MEM_ALLOC;
type &= ~MEM_PERCPU;
}
@@ -8426,7 +8579,8 @@ found:
verbose(env, "verifier internal error: unimplemented handling of MEM_ALLOC\n");
return -EFAULT;
}
- if (meta->func_id == BPF_FUNC_kptr_xchg) {
+ /* Check if local kptr in src arg matches kptr in dst arg */
+ if (meta->func_id == BPF_FUNC_kptr_xchg && regno == BPF_REG_2) {
if (map_kptr_match_type(env, meta->kptr_field, reg, regno))
return -EACCES;
}
@@ -8737,7 +8891,7 @@ skip_type_check:
meta->release_regno = regno;
}
- if (reg->ref_obj_id) {
+ if (reg->ref_obj_id && base_type(arg_type) != ARG_KPTR_XCHG_DEST) {
if (meta->ref_obj_id) {
verbose(env, "verifier internal error: more than one arg with ref_obj_id R%d %u %u\n",
regno, reg->ref_obj_id,
@@ -8849,9 +9003,11 @@ skip_type_check:
*/
meta->raw_mode = arg_type & MEM_UNINIT;
if (arg_type & MEM_FIXED_SIZE) {
- err = check_helper_mem_access(env, regno,
- fn->arg_size[arg], false,
- meta);
+ err = check_helper_mem_access(env, regno, fn->arg_size[arg], false, meta);
+ if (err)
+ return err;
+ if (arg_type & MEM_ALIGNED)
+ err = check_ptr_alignment(env, reg, 0, fn->arg_size[arg], true);
}
break;
case ARG_CONST_SIZE:
@@ -8876,17 +9032,6 @@ skip_type_check:
if (err)
return err;
break;
- case ARG_PTR_TO_INT:
- case ARG_PTR_TO_LONG:
- {
- int size = int_ptr_type_to_size(arg_type);
-
- err = check_helper_mem_access(env, regno, size, false, meta);
- if (err)
- return err;
- err = check_ptr_alignment(env, reg, 0, size, true);
- break;
- }
case ARG_PTR_TO_CONST_STR:
{
err = check_reg_const_str(env, reg, regno);
@@ -8894,7 +9039,7 @@ skip_type_check:
return err;
break;
}
- case ARG_PTR_TO_KPTR:
+ case ARG_KPTR_XCHG_DEST:
err = process_kptr_func(env, regno, meta);
if (err)
return err;
@@ -9203,15 +9348,15 @@ static bool check_raw_mode_ok(const struct bpf_func_proto *fn)
{
int count = 0;
- if (fn->arg1_type == ARG_PTR_TO_UNINIT_MEM)
+ if (arg_type_is_raw_mem(fn->arg1_type))
count++;
- if (fn->arg2_type == ARG_PTR_TO_UNINIT_MEM)
+ if (arg_type_is_raw_mem(fn->arg2_type))
count++;
- if (fn->arg3_type == ARG_PTR_TO_UNINIT_MEM)
+ if (arg_type_is_raw_mem(fn->arg3_type))
count++;
- if (fn->arg4_type == ARG_PTR_TO_UNINIT_MEM)
+ if (arg_type_is_raw_mem(fn->arg4_type))
count++;
- if (fn->arg5_type == ARG_PTR_TO_UNINIT_MEM)
+ if (arg_type_is_raw_mem(fn->arg5_type))
count++;
/* We only support one arg being in raw mode at the moment,
@@ -9925,9 +10070,13 @@ static bool in_rbtree_lock_required_cb(struct bpf_verifier_env *env)
return is_rbtree_lock_required_kfunc(kfunc_btf_id);
}
-static bool retval_range_within(struct bpf_retval_range range, const struct bpf_reg_state *reg)
+static bool retval_range_within(struct bpf_retval_range range, const struct bpf_reg_state *reg,
+ bool return_32bit)
{
- return range.minval <= reg->smin_value && reg->smax_value <= range.maxval;
+ if (return_32bit)
+ return range.minval <= reg->s32_min_value && reg->s32_max_value <= range.maxval;
+ else
+ return range.minval <= reg->smin_value && reg->smax_value <= range.maxval;
}
static int prepare_func_exit(struct bpf_verifier_env *env, int *insn_idx)
@@ -9964,8 +10113,8 @@ static int prepare_func_exit(struct bpf_verifier_env *env, int *insn_idx)
if (err)
return err;
- /* enforce R0 return value range */
- if (!retval_range_within(callee->callback_ret_range, r0)) {
+ /* enforce R0 return value range, and bpf_callback_t returns 64bit */
+ if (!retval_range_within(callee->callback_ret_range, r0, false)) {
verbose_invalid_scalar(env, r0, callee->callback_ret_range,
"At callback return", "R0");
return -EINVAL;
@@ -10267,6 +10416,19 @@ static void update_loop_inline_state(struct bpf_verifier_env *env, u32 subprogno
state->callback_subprogno == subprogno);
}
+static int get_helper_proto(struct bpf_verifier_env *env, int func_id,
+ const struct bpf_func_proto **ptr)
+{
+ if (func_id < 0 || func_id >= __BPF_FUNC_MAX_ID)
+ return -ERANGE;
+
+ if (!env->ops->get_func_proto)
+ return -EINVAL;
+
+ *ptr = env->ops->get_func_proto(func_id, env->prog);
+ return *ptr ? 0 : -EINVAL;
+}
+
static int check_helper_call(struct bpf_verifier_env *env, struct bpf_insn *insn,
int *insn_idx_p)
{
@@ -10283,18 +10445,16 @@ static int check_helper_call(struct bpf_verifier_env *env, struct bpf_insn *insn
/* find function prototype */
func_id = insn->imm;
- if (func_id < 0 || func_id >= __BPF_FUNC_MAX_ID) {
- verbose(env, "invalid func %s#%d\n", func_id_name(func_id),
- func_id);
+ err = get_helper_proto(env, insn->imm, &fn);
+ if (err == -ERANGE) {
+ verbose(env, "invalid func %s#%d\n", func_id_name(func_id), func_id);
return -EINVAL;
}
- if (env->ops->get_func_proto)
- fn = env->ops->get_func_proto(func_id, env->prog);
- if (!fn) {
+ if (err) {
verbose(env, "program of this type cannot use helper %s#%d\n",
func_id_name(func_id), func_id);
- return -EINVAL;
+ return err;
}
/* eBPF programs must be GPL compatible to use GPL-ed functions */
@@ -11230,7 +11390,7 @@ get_kfunc_ptr_arg_type(struct bpf_verifier_env *env,
if (is_kfunc_arg_dynptr(meta->btf, &args[argno]))
return KF_ARG_PTR_TO_DYNPTR;
- if (is_kfunc_arg_iter(meta, argno))
+ if (is_kfunc_arg_iter(meta, argno, &args[argno]))
return KF_ARG_PTR_TO_ITER;
if (is_kfunc_arg_list_head(meta->btf, &args[argno]))
@@ -11332,8 +11492,7 @@ static int process_kf_arg_ptr_to_btf_id(struct bpf_verifier_env *env,
* btf_struct_ids_match() to walk the struct at the 0th offset, and
* resolve types.
*/
- if (is_kfunc_acquire(meta) ||
- (is_kfunc_release(meta) && reg->ref_obj_id) ||
+ if ((is_kfunc_release(meta) && reg->ref_obj_id) ||
btf_type_ids_nocast_alias(&env->log, reg_btf, reg_ref_id, meta->btf, ref_id))
strict_type_match = true;
@@ -11950,7 +12109,8 @@ static int check_kfunc_args(struct bpf_verifier_env *env, struct bpf_kfunc_call_
switch (kf_arg_type) {
case KF_ARG_PTR_TO_CTX:
if (reg->type != PTR_TO_CTX) {
- verbose(env, "arg#%d expected pointer to ctx, but got %s\n", i, btf_type_str(t));
+ verbose(env, "arg#%d expected pointer to ctx, but got %s\n",
+ i, reg_type_str(env, reg->type));
return -EINVAL;
}
@@ -12673,6 +12833,17 @@ static int check_kfunc_call(struct bpf_verifier_env *env, struct bpf_insn *insn,
regs[BPF_REG_0].btf = desc_btf;
regs[BPF_REG_0].type = PTR_TO_BTF_ID;
regs[BPF_REG_0].btf_id = ptr_type_id;
+
+ if (is_iter_next_kfunc(&meta)) {
+ struct bpf_reg_state *cur_iter;
+
+ cur_iter = get_iter_from_state(env->cur_state, &meta);
+
+ if (cur_iter->type & MEM_RCU) /* KF_RCU_PROTECTED */
+ regs[BPF_REG_0].type |= MEM_RCU;
+ else
+ regs[BPF_REG_0].type |= PTR_TRUSTED;
+ }
}
if (is_kfunc_ret_null(&meta)) {
@@ -14101,7 +14272,7 @@ static int adjust_reg_min_max_vals(struct bpf_verifier_env *env,
u64 val = reg_const_value(src_reg, alu32);
if ((dst_reg->id & BPF_ADD_CONST) ||
- /* prevent overflow in find_equal_scalars() later */
+ /* prevent overflow in sync_linked_regs() later */
val > (u32)S32_MAX) {
/*
* If the register already went through rX += val
@@ -14116,7 +14287,7 @@ static int adjust_reg_min_max_vals(struct bpf_verifier_env *env,
} else {
/*
* Make sure ID is cleared otherwise dst_reg min/max could be
- * incorrectly propagated into other registers by find_equal_scalars()
+ * incorrectly propagated into other registers by sync_linked_regs()
*/
dst_reg->id = 0;
}
@@ -14266,7 +14437,7 @@ static int check_alu_op(struct bpf_verifier_env *env, struct bpf_insn *insn)
copy_register_state(dst_reg, src_reg);
/* Make sure ID is cleared if src_reg is not in u32
* range otherwise dst_reg min/max could be incorrectly
- * propagated into src_reg by find_equal_scalars()
+ * propagated into src_reg by sync_linked_regs()
*/
if (!is_src_reg_u32)
dst_reg->id = 0;
@@ -15089,14 +15260,66 @@ static bool try_match_pkt_pointers(const struct bpf_insn *insn,
return true;
}
-static void find_equal_scalars(struct bpf_verifier_state *vstate,
- struct bpf_reg_state *known_reg)
+static void __collect_linked_regs(struct linked_regs *reg_set, struct bpf_reg_state *reg,
+ u32 id, u32 frameno, u32 spi_or_reg, bool is_reg)
+{
+ struct linked_reg *e;
+
+ if (reg->type != SCALAR_VALUE || (reg->id & ~BPF_ADD_CONST) != id)
+ return;
+
+ e = linked_regs_push(reg_set);
+ if (e) {
+ e->frameno = frameno;
+ e->is_reg = is_reg;
+ e->regno = spi_or_reg;
+ } else {
+ reg->id = 0;
+ }
+}
+
+/* For all R being scalar registers or spilled scalar registers
+ * in verifier state, save R in linked_regs if R->id == id.
+ * If there are too many Rs sharing same id, reset id for leftover Rs.
+ */
+static void collect_linked_regs(struct bpf_verifier_state *vstate, u32 id,
+ struct linked_regs *linked_regs)
+{
+ struct bpf_func_state *func;
+ struct bpf_reg_state *reg;
+ int i, j;
+
+ id = id & ~BPF_ADD_CONST;
+ for (i = vstate->curframe; i >= 0; i--) {
+ func = vstate->frame[i];
+ for (j = 0; j < BPF_REG_FP; j++) {
+ reg = &func->regs[j];
+ __collect_linked_regs(linked_regs, reg, id, i, j, true);
+ }
+ for (j = 0; j < func->allocated_stack / BPF_REG_SIZE; j++) {
+ if (!is_spilled_reg(&func->stack[j]))
+ continue;
+ reg = &func->stack[j].spilled_ptr;
+ __collect_linked_regs(linked_regs, reg, id, i, j, false);
+ }
+ }
+}
+
+/* For all R in linked_regs, copy known_reg range into R
+ * if R->id == known_reg->id.
+ */
+static void sync_linked_regs(struct bpf_verifier_state *vstate, struct bpf_reg_state *known_reg,
+ struct linked_regs *linked_regs)
{
struct bpf_reg_state fake_reg;
- struct bpf_func_state *state;
struct bpf_reg_state *reg;
+ struct linked_reg *e;
+ int i;
- bpf_for_each_reg_in_vstate(vstate, state, reg, ({
+ for (i = 0; i < linked_regs->cnt; ++i) {
+ e = &linked_regs->entries[i];
+ reg = e->is_reg ? &vstate->frame[e->frameno]->regs[e->regno]
+ : &vstate->frame[e->frameno]->stack[e->spi].spilled_ptr;
if (reg->type != SCALAR_VALUE || reg == known_reg)
continue;
if ((reg->id & ~BPF_ADD_CONST) != (known_reg->id & ~BPF_ADD_CONST))
@@ -15114,7 +15337,7 @@ static void find_equal_scalars(struct bpf_verifier_state *vstate,
copy_register_state(reg, known_reg);
/*
* Must preserve off, id and add_const flag,
- * otherwise another find_equal_scalars() will be incorrect.
+ * otherwise another sync_linked_regs() will be incorrect.
*/
reg->off = saved_off;
@@ -15122,7 +15345,7 @@ static void find_equal_scalars(struct bpf_verifier_state *vstate,
scalar_min_max_add(reg, &fake_reg);
reg->var_off = tnum_add(reg->var_off, fake_reg.var_off);
}
- }));
+ }
}
static int check_cond_jmp_op(struct bpf_verifier_env *env,
@@ -15133,6 +15356,7 @@ static int check_cond_jmp_op(struct bpf_verifier_env *env,
struct bpf_reg_state *regs = this_branch->frame[this_branch->curframe]->regs;
struct bpf_reg_state *dst_reg, *other_branch_regs, *src_reg = NULL;
struct bpf_reg_state *eq_branch_regs;
+ struct linked_regs linked_regs = {};
u8 opcode = BPF_OP(insn->code);
bool is_jmp32;
int pred = -1;
@@ -15247,6 +15471,21 @@ static int check_cond_jmp_op(struct bpf_verifier_env *env,
return 0;
}
+ /* Push scalar registers sharing same ID to jump history,
+ * do this before creating 'other_branch', so that both
+ * 'this_branch' and 'other_branch' share this history
+ * if parent state is created.
+ */
+ if (BPF_SRC(insn->code) == BPF_X && src_reg->type == SCALAR_VALUE && src_reg->id)
+ collect_linked_regs(this_branch, src_reg->id, &linked_regs);
+ if (dst_reg->type == SCALAR_VALUE && dst_reg->id)
+ collect_linked_regs(this_branch, dst_reg->id, &linked_regs);
+ if (linked_regs.cnt > 1) {
+ err = push_jmp_history(env, this_branch, 0, linked_regs_pack(&linked_regs));
+ if (err)
+ return err;
+ }
+
other_branch = push_stack(env, *insn_idx + insn->off + 1, *insn_idx,
false);
if (!other_branch)
@@ -15277,13 +15516,13 @@ static int check_cond_jmp_op(struct bpf_verifier_env *env,
if (BPF_SRC(insn->code) == BPF_X &&
src_reg->type == SCALAR_VALUE && src_reg->id &&
!WARN_ON_ONCE(src_reg->id != other_branch_regs[insn->src_reg].id)) {
- find_equal_scalars(this_branch, src_reg);
- find_equal_scalars(other_branch, &other_branch_regs[insn->src_reg]);
+ sync_linked_regs(this_branch, src_reg, &linked_regs);
+ sync_linked_regs(other_branch, &other_branch_regs[insn->src_reg], &linked_regs);
}
if (dst_reg->type == SCALAR_VALUE && dst_reg->id &&
!WARN_ON_ONCE(dst_reg->id != other_branch_regs[insn->dst_reg].id)) {
- find_equal_scalars(this_branch, dst_reg);
- find_equal_scalars(other_branch, &other_branch_regs[insn->dst_reg]);
+ sync_linked_regs(this_branch, dst_reg, &linked_regs);
+ sync_linked_regs(other_branch, &other_branch_regs[insn->dst_reg], &linked_regs);
}
/* if one pointer register is compared to another pointer
@@ -15571,6 +15810,7 @@ static int check_return_code(struct bpf_verifier_env *env, int regno, const char
int err;
struct bpf_func_state *frame = env->cur_state->frame[0];
const bool is_subprog = frame->subprogno;
+ bool return_32bit = false;
/* LSM and struct_ops func-ptr's return type could be "void" */
if (!is_subprog || frame->in_exception_callback_fn) {
@@ -15676,12 +15916,14 @@ static int check_return_code(struct bpf_verifier_env *env, int regno, const char
case BPF_PROG_TYPE_LSM:
if (env->prog->expected_attach_type != BPF_LSM_CGROUP) {
- /* Regular BPF_PROG_TYPE_LSM programs can return
- * any value.
- */
- return 0;
- }
- if (!env->prog->aux->attach_func_proto->type) {
+ /* no range found, any return value is allowed */
+ if (!get_func_retval_range(env->prog, &range))
+ return 0;
+ /* no restricted range, any return value is allowed */
+ if (range.minval == S32_MIN && range.maxval == S32_MAX)
+ return 0;
+ return_32bit = true;
+ } else if (!env->prog->aux->attach_func_proto->type) {
/* Make sure programs that attach to void
* hooks don't try to modify return value.
*/
@@ -15711,7 +15953,7 @@ enforce_retval:
if (err)
return err;
- if (!retval_range_within(range, reg)) {
+ if (!retval_range_within(range, reg, return_32bit)) {
verbose_invalid_scalar(env, reg, range, exit_ctx, reg_name);
if (!is_subprog &&
prog->expected_attach_type == BPF_LSM_CGROUP &&
@@ -15877,6 +16119,274 @@ static int visit_func_call_insn(int t, struct bpf_insn *insns,
return ret;
}
+/* Bitmask with 1s for all caller saved registers */
+#define ALL_CALLER_SAVED_REGS ((1u << CALLER_SAVED_REGS) - 1)
+
+/* Return a bitmask specifying which caller saved registers are
+ * clobbered by a call to a helper *as if* this helper follows
+ * bpf_fastcall contract:
+ * - includes R0 if function is non-void;
+ * - includes R1-R5 if corresponding parameter has is described
+ * in the function prototype.
+ */
+static u32 helper_fastcall_clobber_mask(const struct bpf_func_proto *fn)
+{
+ u32 mask;
+ int i;
+
+ mask = 0;
+ if (fn->ret_type != RET_VOID)
+ mask |= BIT(BPF_REG_0);
+ for (i = 0; i < ARRAY_SIZE(fn->arg_type); ++i)
+ if (fn->arg_type[i] != ARG_DONTCARE)
+ mask |= BIT(BPF_REG_1 + i);
+ return mask;
+}
+
+/* True if do_misc_fixups() replaces calls to helper number 'imm',
+ * replacement patch is presumed to follow bpf_fastcall contract
+ * (see mark_fastcall_pattern_for_call() below).
+ */
+static bool verifier_inlines_helper_call(struct bpf_verifier_env *env, s32 imm)
+{
+ switch (imm) {
+#ifdef CONFIG_X86_64
+ case BPF_FUNC_get_smp_processor_id:
+ return env->prog->jit_requested && bpf_jit_supports_percpu_insn();
+#endif
+ default:
+ return false;
+ }
+}
+
+/* Same as helper_fastcall_clobber_mask() but for kfuncs, see comment above */
+static u32 kfunc_fastcall_clobber_mask(struct bpf_kfunc_call_arg_meta *meta)
+{
+ u32 vlen, i, mask;
+
+ vlen = btf_type_vlen(meta->func_proto);
+ mask = 0;
+ if (!btf_type_is_void(btf_type_by_id(meta->btf, meta->func_proto->type)))
+ mask |= BIT(BPF_REG_0);
+ for (i = 0; i < vlen; ++i)
+ mask |= BIT(BPF_REG_1 + i);
+ return mask;
+}
+
+/* Same as verifier_inlines_helper_call() but for kfuncs, see comment above */
+static bool is_fastcall_kfunc_call(struct bpf_kfunc_call_arg_meta *meta)
+{
+ if (meta->btf == btf_vmlinux)
+ return meta->func_id == special_kfunc_list[KF_bpf_cast_to_kern_ctx] ||
+ meta->func_id == special_kfunc_list[KF_bpf_rdonly_cast];
+ return false;
+}
+
+/* LLVM define a bpf_fastcall function attribute.
+ * This attribute means that function scratches only some of
+ * the caller saved registers defined by ABI.
+ * For BPF the set of such registers could be defined as follows:
+ * - R0 is scratched only if function is non-void;
+ * - R1-R5 are scratched only if corresponding parameter type is defined
+ * in the function prototype.
+ *
+ * The contract between kernel and clang allows to simultaneously use
+ * such functions and maintain backwards compatibility with old
+ * kernels that don't understand bpf_fastcall calls:
+ *
+ * - for bpf_fastcall calls clang allocates registers as-if relevant r0-r5
+ * registers are not scratched by the call;
+ *
+ * - as a post-processing step, clang visits each bpf_fastcall call and adds
+ * spill/fill for every live r0-r5;
+ *
+ * - stack offsets used for the spill/fill are allocated as lowest
+ * stack offsets in whole function and are not used for any other
+ * purposes;
+ *
+ * - when kernel loads a program, it looks for such patterns
+ * (bpf_fastcall function surrounded by spills/fills) and checks if
+ * spill/fill stack offsets are used exclusively in fastcall patterns;
+ *
+ * - if so, and if verifier or current JIT inlines the call to the
+ * bpf_fastcall function (e.g. a helper call), kernel removes unnecessary
+ * spill/fill pairs;
+ *
+ * - when old kernel loads a program, presence of spill/fill pairs
+ * keeps BPF program valid, albeit slightly less efficient.
+ *
+ * For example:
+ *
+ * r1 = 1;
+ * r2 = 2;
+ * *(u64 *)(r10 - 8) = r1; r1 = 1;
+ * *(u64 *)(r10 - 16) = r2; r2 = 2;
+ * call %[to_be_inlined] --> call %[to_be_inlined]
+ * r2 = *(u64 *)(r10 - 16); r0 = r1;
+ * r1 = *(u64 *)(r10 - 8); r0 += r2;
+ * r0 = r1; exit;
+ * r0 += r2;
+ * exit;
+ *
+ * The purpose of mark_fastcall_pattern_for_call is to:
+ * - look for such patterns;
+ * - mark spill and fill instructions in env->insn_aux_data[*].fastcall_pattern;
+ * - mark set env->insn_aux_data[*].fastcall_spills_num for call instruction;
+ * - update env->subprog_info[*]->fastcall_stack_off to find an offset
+ * at which bpf_fastcall spill/fill stack slots start;
+ * - update env->subprog_info[*]->keep_fastcall_stack.
+ *
+ * The .fastcall_pattern and .fastcall_stack_off are used by
+ * check_fastcall_stack_contract() to check if every stack access to
+ * fastcall spill/fill stack slot originates from spill/fill
+ * instructions, members of fastcall patterns.
+ *
+ * If such condition holds true for a subprogram, fastcall patterns could
+ * be rewritten by remove_fastcall_spills_fills().
+ * Otherwise bpf_fastcall patterns are not changed in the subprogram
+ * (code, presumably, generated by an older clang version).
+ *
+ * For example, it is *not* safe to remove spill/fill below:
+ *
+ * r1 = 1;
+ * *(u64 *)(r10 - 8) = r1; r1 = 1;
+ * call %[to_be_inlined] --> call %[to_be_inlined]
+ * r1 = *(u64 *)(r10 - 8); r0 = *(u64 *)(r10 - 8); <---- wrong !!!
+ * r0 = *(u64 *)(r10 - 8); r0 += r1;
+ * r0 += r1; exit;
+ * exit;
+ */
+static void mark_fastcall_pattern_for_call(struct bpf_verifier_env *env,
+ struct bpf_subprog_info *subprog,
+ int insn_idx, s16 lowest_off)
+{
+ struct bpf_insn *insns = env->prog->insnsi, *stx, *ldx;
+ struct bpf_insn *call = &env->prog->insnsi[insn_idx];
+ const struct bpf_func_proto *fn;
+ u32 clobbered_regs_mask = ALL_CALLER_SAVED_REGS;
+ u32 expected_regs_mask;
+ bool can_be_inlined = false;
+ s16 off;
+ int i;
+
+ if (bpf_helper_call(call)) {
+ if (get_helper_proto(env, call->imm, &fn) < 0)
+ /* error would be reported later */
+ return;
+ clobbered_regs_mask = helper_fastcall_clobber_mask(fn);
+ can_be_inlined = fn->allow_fastcall &&
+ (verifier_inlines_helper_call(env, call->imm) ||
+ bpf_jit_inlines_helper_call(call->imm));
+ }
+
+ if (bpf_pseudo_kfunc_call(call)) {
+ struct bpf_kfunc_call_arg_meta meta;
+ int err;
+
+ err = fetch_kfunc_meta(env, call, &meta, NULL);
+ if (err < 0)
+ /* error would be reported later */
+ return;
+
+ clobbered_regs_mask = kfunc_fastcall_clobber_mask(&meta);
+ can_be_inlined = is_fastcall_kfunc_call(&meta);
+ }
+
+ if (clobbered_regs_mask == ALL_CALLER_SAVED_REGS)
+ return;
+
+ /* e.g. if helper call clobbers r{0,1}, expect r{2,3,4,5} in the pattern */
+ expected_regs_mask = ~clobbered_regs_mask & ALL_CALLER_SAVED_REGS;
+
+ /* match pairs of form:
+ *
+ * *(u64 *)(r10 - Y) = rX (where Y % 8 == 0)
+ * ...
+ * call %[to_be_inlined]
+ * ...
+ * rX = *(u64 *)(r10 - Y)
+ */
+ for (i = 1, off = lowest_off; i <= ARRAY_SIZE(caller_saved); ++i, off += BPF_REG_SIZE) {
+ if (insn_idx - i < 0 || insn_idx + i >= env->prog->len)
+ break;
+ stx = &insns[insn_idx - i];
+ ldx = &insns[insn_idx + i];
+ /* must be a stack spill/fill pair */
+ if (stx->code != (BPF_STX | BPF_MEM | BPF_DW) ||
+ ldx->code != (BPF_LDX | BPF_MEM | BPF_DW) ||
+ stx->dst_reg != BPF_REG_10 ||
+ ldx->src_reg != BPF_REG_10)
+ break;
+ /* must be a spill/fill for the same reg */
+ if (stx->src_reg != ldx->dst_reg)
+ break;
+ /* must be one of the previously unseen registers */
+ if ((BIT(stx->src_reg) & expected_regs_mask) == 0)
+ break;
+ /* must be a spill/fill for the same expected offset,
+ * no need to check offset alignment, BPF_DW stack access
+ * is always 8-byte aligned.
+ */
+ if (stx->off != off || ldx->off != off)
+ break;
+ expected_regs_mask &= ~BIT(stx->src_reg);
+ env->insn_aux_data[insn_idx - i].fastcall_pattern = 1;
+ env->insn_aux_data[insn_idx + i].fastcall_pattern = 1;
+ }
+ if (i == 1)
+ return;
+
+ /* Conditionally set 'fastcall_spills_num' to allow forward
+ * compatibility when more helper functions are marked as
+ * bpf_fastcall at compile time than current kernel supports, e.g:
+ *
+ * 1: *(u64 *)(r10 - 8) = r1
+ * 2: call A ;; assume A is bpf_fastcall for current kernel
+ * 3: r1 = *(u64 *)(r10 - 8)
+ * 4: *(u64 *)(r10 - 8) = r1
+ * 5: call B ;; assume B is not bpf_fastcall for current kernel
+ * 6: r1 = *(u64 *)(r10 - 8)
+ *
+ * There is no need to block bpf_fastcall rewrite for such program.
+ * Set 'fastcall_pattern' for both calls to keep check_fastcall_stack_contract() happy,
+ * don't set 'fastcall_spills_num' for call B so that remove_fastcall_spills_fills()
+ * does not remove spill/fill pair {4,6}.
+ */
+ if (can_be_inlined)
+ env->insn_aux_data[insn_idx].fastcall_spills_num = i - 1;
+ else
+ subprog->keep_fastcall_stack = 1;
+ subprog->fastcall_stack_off = min(subprog->fastcall_stack_off, off);
+}
+
+static int mark_fastcall_patterns(struct bpf_verifier_env *env)
+{
+ struct bpf_subprog_info *subprog = env->subprog_info;
+ struct bpf_insn *insn;
+ s16 lowest_off;
+ int s, i;
+
+ for (s = 0; s < env->subprog_cnt; ++s, ++subprog) {
+ /* find lowest stack spill offset used in this subprog */
+ lowest_off = 0;
+ for (i = subprog->start; i < (subprog + 1)->start; ++i) {
+ insn = env->prog->insnsi + i;
+ if (insn->code != (BPF_STX | BPF_MEM | BPF_DW) ||
+ insn->dst_reg != BPF_REG_10)
+ continue;
+ lowest_off = min(lowest_off, insn->off);
+ }
+ /* use this offset to find fastcall patterns */
+ for (i = subprog->start; i < (subprog + 1)->start; ++i) {
+ insn = env->prog->insnsi + i;
+ if (insn->code != (BPF_JMP | BPF_CALL))
+ continue;
+ mark_fastcall_pattern_for_call(env, subprog, i, lowest_off);
+ }
+ }
+ return 0;
+}
+
/* Visits the instruction at index t and returns one of the following:
* < 0 - an error occurred
* DONE_EXPLORING - the instruction was fully explored
@@ -16772,7 +17282,7 @@ static bool regsafe(struct bpf_verifier_env *env, struct bpf_reg_state *rold,
*
* First verification path is [1-6]:
* - at (4) same bpf_reg_state::id (b) would be assigned to r6 and r7;
- * - at (5) r6 would be marked <= X, find_equal_scalars() would also mark
+ * - at (5) r6 would be marked <= X, sync_linked_regs() would also mark
* r7 <= X, because r6 and r7 share same id.
* Next verification path is [1-4, 6].
*
@@ -17566,7 +18076,7 @@ hit:
* the current state.
*/
if (is_jmp_point(env, env->insn_idx))
- err = err ? : push_jmp_history(env, cur, 0);
+ err = err ? : push_jmp_history(env, cur, 0, 0);
err = err ? : propagate_precision(env, &sl->state);
if (err)
return err;
@@ -17834,7 +18344,7 @@ static int do_check(struct bpf_verifier_env *env)
}
if (is_jmp_point(env, env->insn_idx)) {
- err = push_jmp_history(env, state, 0);
+ err = push_jmp_history(env, state, 0, 0);
if (err)
return err;
}
@@ -18410,6 +18920,53 @@ static bool bpf_map_is_cgroup_storage(struct bpf_map *map)
map->map_type == BPF_MAP_TYPE_PERCPU_CGROUP_STORAGE);
}
+/* Add map behind fd to used maps list, if it's not already there, and return
+ * its index. Also set *reused to true if this map was already in the list of
+ * used maps.
+ * Returns <0 on error, or >= 0 index, on success.
+ */
+static int add_used_map_from_fd(struct bpf_verifier_env *env, int fd, bool *reused)
+{
+ CLASS(fd, f)(fd);
+ struct bpf_map *map;
+ int i;
+
+ map = __bpf_map_get(f);
+ if (IS_ERR(map)) {
+ verbose(env, "fd %d is not pointing to valid bpf_map\n", fd);
+ return PTR_ERR(map);
+ }
+
+ /* check whether we recorded this map already */
+ for (i = 0; i < env->used_map_cnt; i++) {
+ if (env->used_maps[i] == map) {
+ *reused = true;
+ return i;
+ }
+ }
+
+ if (env->used_map_cnt >= MAX_USED_MAPS) {
+ verbose(env, "The total number of maps per program has reached the limit of %u\n",
+ MAX_USED_MAPS);
+ return -E2BIG;
+ }
+
+ if (env->prog->sleepable)
+ atomic64_inc(&map->sleepable_refcnt);
+
+ /* hold the map. If the program is rejected by verifier,
+ * the map will be released by release_maps() or it
+ * will be used by the valid program until it's unloaded
+ * and all maps are released in bpf_free_used_maps()
+ */
+ bpf_map_inc(map);
+
+ *reused = false;
+ env->used_maps[env->used_map_cnt++] = map;
+
+ return env->used_map_cnt - 1;
+}
+
/* find and rewrite pseudo imm in ld_imm64 instructions:
*
* 1. if it accesses map FD, replace it with actual map pointer.
@@ -18421,7 +18978,7 @@ static int resolve_pseudo_ldimm64(struct bpf_verifier_env *env)
{
struct bpf_insn *insn = env->prog->insnsi;
int insn_cnt = env->prog->len;
- int i, j, err;
+ int i, err;
err = bpf_prog_calc_tag(env->prog);
if (err)
@@ -18438,9 +18995,10 @@ static int resolve_pseudo_ldimm64(struct bpf_verifier_env *env)
if (insn[0].code == (BPF_LD | BPF_IMM | BPF_DW)) {
struct bpf_insn_aux_data *aux;
struct bpf_map *map;
- struct fd f;
+ int map_idx;
u64 addr;
u32 fd;
+ bool reused;
if (i == insn_cnt - 1 || insn[1].code != 0 ||
insn[1].dst_reg != 0 || insn[1].src_reg != 0 ||
@@ -18501,20 +19059,18 @@ static int resolve_pseudo_ldimm64(struct bpf_verifier_env *env)
break;
}
- f = fdget(fd);
- map = __bpf_map_get(f);
- if (IS_ERR(map)) {
- verbose(env, "fd %d is not pointing to valid bpf_map\n", fd);
- return PTR_ERR(map);
- }
+ map_idx = add_used_map_from_fd(env, fd, &reused);
+ if (map_idx < 0)
+ return map_idx;
+ map = env->used_maps[map_idx];
+
+ aux = &env->insn_aux_data[i];
+ aux->map_index = map_idx;
err = check_map_prog_compatibility(env, map, env->prog);
- if (err) {
- fdput(f);
+ if (err)
return err;
- }
- aux = &env->insn_aux_data[i];
if (insn[0].src_reg == BPF_PSEUDO_MAP_FD ||
insn[0].src_reg == BPF_PSEUDO_MAP_IDX) {
addr = (unsigned long)map;
@@ -18523,13 +19079,11 @@ static int resolve_pseudo_ldimm64(struct bpf_verifier_env *env)
if (off >= BPF_MAX_VAR_OFF) {
verbose(env, "direct value offset of %u is not allowed\n", off);
- fdput(f);
return -EINVAL;
}
if (!map->ops->map_direct_value_addr) {
verbose(env, "no direct value access support for this map type\n");
- fdput(f);
return -EINVAL;
}
@@ -18537,7 +19091,6 @@ static int resolve_pseudo_ldimm64(struct bpf_verifier_env *env)
if (err) {
verbose(env, "invalid access to map value pointer, value_size=%u off=%u\n",
map->value_size, off);
- fdput(f);
return err;
}
@@ -18548,70 +19101,39 @@ static int resolve_pseudo_ldimm64(struct bpf_verifier_env *env)
insn[0].imm = (u32)addr;
insn[1].imm = addr >> 32;
- /* check whether we recorded this map already */
- for (j = 0; j < env->used_map_cnt; j++) {
- if (env->used_maps[j] == map) {
- aux->map_index = j;
- fdput(f);
- goto next_insn;
- }
- }
-
- if (env->used_map_cnt >= MAX_USED_MAPS) {
- verbose(env, "The total number of maps per program has reached the limit of %u\n",
- MAX_USED_MAPS);
- fdput(f);
- return -E2BIG;
- }
-
- if (env->prog->sleepable)
- atomic64_inc(&map->sleepable_refcnt);
- /* hold the map. If the program is rejected by verifier,
- * the map will be released by release_maps() or it
- * will be used by the valid program until it's unloaded
- * and all maps are released in bpf_free_used_maps()
- */
- bpf_map_inc(map);
-
- aux->map_index = env->used_map_cnt;
- env->used_maps[env->used_map_cnt++] = map;
+ /* proceed with extra checks only if its newly added used map */
+ if (reused)
+ goto next_insn;
if (bpf_map_is_cgroup_storage(map) &&
bpf_cgroup_storage_assign(env->prog->aux, map)) {
verbose(env, "only one cgroup storage of each type is allowed\n");
- fdput(f);
return -EBUSY;
}
if (map->map_type == BPF_MAP_TYPE_ARENA) {
if (env->prog->aux->arena) {
verbose(env, "Only one arena per program\n");
- fdput(f);
return -EBUSY;
}
if (!env->allow_ptr_leaks || !env->bpf_capable) {
verbose(env, "CAP_BPF and CAP_PERFMON are required to use arena\n");
- fdput(f);
return -EPERM;
}
if (!env->prog->jit_requested) {
verbose(env, "JIT is required to use arena\n");
- fdput(f);
return -EOPNOTSUPP;
}
if (!bpf_jit_supports_arena()) {
verbose(env, "JIT doesn't support arena\n");
- fdput(f);
return -EOPNOTSUPP;
}
env->prog->aux->arena = (void *)map;
if (!bpf_arena_get_user_vm_start(env->prog->aux->arena)) {
verbose(env, "arena's user address must be set via map_extra or mmap()\n");
- fdput(f);
return -EINVAL;
}
}
- fdput(f);
next_insn:
insn++;
i++;
@@ -18767,6 +19289,9 @@ static int adjust_jmp_off(struct bpf_prog *prog, u32 tgt_idx, u32 delta)
for (i = 0; i < insn_cnt; i++, insn++) {
u8 code = insn->code;
+ if (tgt_idx <= i && i < tgt_idx + delta)
+ continue;
+
if ((BPF_CLASS(code) != BPF_JMP && BPF_CLASS(code) != BPF_JMP32) ||
BPF_OP(code) == BPF_CALL || BPF_OP(code) == BPF_EXIT)
continue;
@@ -19026,9 +19551,11 @@ static int opt_remove_dead_code(struct bpf_verifier_env *env)
return 0;
}
+static const struct bpf_insn NOP = BPF_JMP_IMM(BPF_JA, 0, 0, 0);
+
static int opt_remove_nops(struct bpf_verifier_env *env)
{
- const struct bpf_insn ja = BPF_JMP_IMM(BPF_JA, 0, 0, 0);
+ const struct bpf_insn ja = NOP;
struct bpf_insn *insn = env->prog->insnsi;
int insn_cnt = env->prog->len;
int i, err;
@@ -19153,14 +19680,39 @@ apply_patch_buffer:
*/
static int convert_ctx_accesses(struct bpf_verifier_env *env)
{
+ struct bpf_subprog_info *subprogs = env->subprog_info;
const struct bpf_verifier_ops *ops = env->ops;
- int i, cnt, size, ctx_field_size, delta = 0;
+ int i, cnt, size, ctx_field_size, delta = 0, epilogue_cnt = 0;
const int insn_cnt = env->prog->len;
- struct bpf_insn insn_buf[16], *insn;
+ struct bpf_insn *epilogue_buf = env->epilogue_buf;
+ struct bpf_insn *insn_buf = env->insn_buf;
+ struct bpf_insn *insn;
u32 target_size, size_default, off;
struct bpf_prog *new_prog;
enum bpf_access_type type;
bool is_narrower_load;
+ int epilogue_idx = 0;
+
+ if (ops->gen_epilogue) {
+ epilogue_cnt = ops->gen_epilogue(epilogue_buf, env->prog,
+ -(subprogs[0].stack_depth + 8));
+ if (epilogue_cnt >= INSN_BUF_SIZE) {
+ verbose(env, "bpf verifier is misconfigured\n");
+ return -EINVAL;
+ } else if (epilogue_cnt) {
+ /* Save the ARG_PTR_TO_CTX for the epilogue to use */
+ cnt = 0;
+ subprogs[0].stack_depth += 8;
+ insn_buf[cnt++] = BPF_STX_MEM(BPF_DW, BPF_REG_FP, BPF_REG_1,
+ -subprogs[0].stack_depth);
+ insn_buf[cnt++] = env->prog->insnsi[0];
+ new_prog = bpf_patch_insn_data(env, 0, insn_buf, cnt);
+ if (!new_prog)
+ return -ENOMEM;
+ env->prog = new_prog;
+ delta += cnt - 1;
+ }
+ }
if (ops->gen_prologue || env->seen_direct_write) {
if (!ops->gen_prologue) {
@@ -19169,7 +19721,7 @@ static int convert_ctx_accesses(struct bpf_verifier_env *env)
}
cnt = ops->gen_prologue(insn_buf, env->seen_direct_write,
env->prog);
- if (cnt >= ARRAY_SIZE(insn_buf)) {
+ if (cnt >= INSN_BUF_SIZE) {
verbose(env, "bpf verifier is misconfigured\n");
return -EINVAL;
} else if (cnt) {
@@ -19182,6 +19734,9 @@ static int convert_ctx_accesses(struct bpf_verifier_env *env)
}
}
+ if (delta)
+ WARN_ON(adjust_jmp_off(env->prog, 0, delta));
+
if (bpf_prog_is_offloaded(env->prog->aux))
return 0;
@@ -19214,6 +19769,25 @@ static int convert_ctx_accesses(struct bpf_verifier_env *env)
insn->code = BPF_STX | BPF_PROBE_ATOMIC | BPF_SIZE(insn->code);
env->prog->aux->num_exentries++;
continue;
+ } else if (insn->code == (BPF_JMP | BPF_EXIT) &&
+ epilogue_cnt &&
+ i + delta < subprogs[1].start) {
+ /* Generate epilogue for the main prog */
+ if (epilogue_idx) {
+ /* jump back to the earlier generated epilogue */
+ insn_buf[0] = BPF_JMP32_A(epilogue_idx - i - delta - 1);
+ cnt = 1;
+ } else {
+ memcpy(insn_buf, epilogue_buf,
+ epilogue_cnt * sizeof(*epilogue_buf));
+ cnt = epilogue_cnt;
+ /* epilogue_idx cannot be 0. It must have at
+ * least one ctx ptr saving insn before the
+ * epilogue.
+ */
+ epilogue_idx = i + delta;
+ }
+ goto patch_insn_buf;
} else {
continue;
}
@@ -19316,7 +19890,7 @@ static int convert_ctx_accesses(struct bpf_verifier_env *env)
target_size = 0;
cnt = convert_ctx_access(type, insn, insn_buf, env->prog,
&target_size);
- if (cnt == 0 || cnt >= ARRAY_SIZE(insn_buf) ||
+ if (cnt == 0 || cnt >= INSN_BUF_SIZE ||
(ctx_field_size && !target_size)) {
verbose(env, "bpf verifier is misconfigured\n");
return -EINVAL;
@@ -19325,7 +19899,7 @@ static int convert_ctx_accesses(struct bpf_verifier_env *env)
if (is_narrower_load && size < target_size) {
u8 shift = bpf_ctx_narrow_access_offset(
off, size, size_default) * 8;
- if (shift && cnt + 1 >= ARRAY_SIZE(insn_buf)) {
+ if (shift && cnt + 1 >= INSN_BUF_SIZE) {
verbose(env, "bpf verifier narrow ctx load misconfigured\n");
return -EINVAL;
}
@@ -19350,6 +19924,7 @@ static int convert_ctx_accesses(struct bpf_verifier_env *env)
insn->dst_reg, insn->dst_reg,
size * 8, 0);
+patch_insn_buf:
new_prog = bpf_patch_insn_data(env, i + delta, insn_buf, cnt);
if (!new_prog)
return -ENOMEM;
@@ -19870,7 +20445,7 @@ static int do_misc_fixups(struct bpf_verifier_env *env)
const int insn_cnt = prog->len;
const struct bpf_map_ops *ops;
struct bpf_insn_aux_data *aux;
- struct bpf_insn insn_buf[16];
+ struct bpf_insn *insn_buf = env->insn_buf;
struct bpf_prog *new_prog;
struct bpf_map *map_ptr;
int i, ret, cnt, delta = 0, cur_subprog = 0;
@@ -19913,13 +20488,46 @@ static int do_misc_fixups(struct bpf_verifier_env *env)
/* Convert BPF_CLASS(insn->code) == BPF_ALU64 to 32-bit ALU */
insn->code = BPF_ALU | BPF_OP(insn->code) | BPF_SRC(insn->code);
- /* Make divide-by-zero exceptions impossible. */
+ /* Make sdiv/smod divide-by-minus-one exceptions impossible. */
+ if ((insn->code == (BPF_ALU64 | BPF_MOD | BPF_K) ||
+ insn->code == (BPF_ALU64 | BPF_DIV | BPF_K) ||
+ insn->code == (BPF_ALU | BPF_MOD | BPF_K) ||
+ insn->code == (BPF_ALU | BPF_DIV | BPF_K)) &&
+ insn->off == 1 && insn->imm == -1) {
+ bool is64 = BPF_CLASS(insn->code) == BPF_ALU64;
+ bool isdiv = BPF_OP(insn->code) == BPF_DIV;
+ struct bpf_insn *patchlet;
+ struct bpf_insn chk_and_sdiv[] = {
+ BPF_RAW_INSN((is64 ? BPF_ALU64 : BPF_ALU) |
+ BPF_NEG | BPF_K, insn->dst_reg,
+ 0, 0, 0),
+ };
+ struct bpf_insn chk_and_smod[] = {
+ BPF_MOV32_IMM(insn->dst_reg, 0),
+ };
+
+ patchlet = isdiv ? chk_and_sdiv : chk_and_smod;
+ cnt = isdiv ? ARRAY_SIZE(chk_and_sdiv) : ARRAY_SIZE(chk_and_smod);
+
+ new_prog = bpf_patch_insn_data(env, i + delta, patchlet, cnt);
+ if (!new_prog)
+ return -ENOMEM;
+
+ delta += cnt - 1;
+ env->prog = prog = new_prog;
+ insn = new_prog->insnsi + i + delta;
+ goto next_insn;
+ }
+
+ /* Make divide-by-zero and divide-by-minus-one exceptions impossible. */
if (insn->code == (BPF_ALU64 | BPF_MOD | BPF_X) ||
insn->code == (BPF_ALU64 | BPF_DIV | BPF_X) ||
insn->code == (BPF_ALU | BPF_MOD | BPF_X) ||
insn->code == (BPF_ALU | BPF_DIV | BPF_X)) {
bool is64 = BPF_CLASS(insn->code) == BPF_ALU64;
bool isdiv = BPF_OP(insn->code) == BPF_DIV;
+ bool is_sdiv = isdiv && insn->off == 1;
+ bool is_smod = !isdiv && insn->off == 1;
struct bpf_insn *patchlet;
struct bpf_insn chk_and_div[] = {
/* [R,W]x div 0 -> 0 */
@@ -19939,10 +20547,62 @@ static int do_misc_fixups(struct bpf_verifier_env *env)
BPF_JMP_IMM(BPF_JA, 0, 0, 1),
BPF_MOV32_REG(insn->dst_reg, insn->dst_reg),
};
+ struct bpf_insn chk_and_sdiv[] = {
+ /* [R,W]x sdiv 0 -> 0
+ * LLONG_MIN sdiv -1 -> LLONG_MIN
+ * INT_MIN sdiv -1 -> INT_MIN
+ */
+ BPF_MOV64_REG(BPF_REG_AX, insn->src_reg),
+ BPF_RAW_INSN((is64 ? BPF_ALU64 : BPF_ALU) |
+ BPF_ADD | BPF_K, BPF_REG_AX,
+ 0, 0, 1),
+ BPF_RAW_INSN((is64 ? BPF_JMP : BPF_JMP32) |
+ BPF_JGT | BPF_K, BPF_REG_AX,
+ 0, 4, 1),
+ BPF_RAW_INSN((is64 ? BPF_JMP : BPF_JMP32) |
+ BPF_JEQ | BPF_K, BPF_REG_AX,
+ 0, 1, 0),
+ BPF_RAW_INSN((is64 ? BPF_ALU64 : BPF_ALU) |
+ BPF_MOV | BPF_K, insn->dst_reg,
+ 0, 0, 0),
+ /* BPF_NEG(LLONG_MIN) == -LLONG_MIN == LLONG_MIN */
+ BPF_RAW_INSN((is64 ? BPF_ALU64 : BPF_ALU) |
+ BPF_NEG | BPF_K, insn->dst_reg,
+ 0, 0, 0),
+ BPF_JMP_IMM(BPF_JA, 0, 0, 1),
+ *insn,
+ };
+ struct bpf_insn chk_and_smod[] = {
+ /* [R,W]x mod 0 -> [R,W]x */
+ /* [R,W]x mod -1 -> 0 */
+ BPF_MOV64_REG(BPF_REG_AX, insn->src_reg),
+ BPF_RAW_INSN((is64 ? BPF_ALU64 : BPF_ALU) |
+ BPF_ADD | BPF_K, BPF_REG_AX,
+ 0, 0, 1),
+ BPF_RAW_INSN((is64 ? BPF_JMP : BPF_JMP32) |
+ BPF_JGT | BPF_K, BPF_REG_AX,
+ 0, 3, 1),
+ BPF_RAW_INSN((is64 ? BPF_JMP : BPF_JMP32) |
+ BPF_JEQ | BPF_K, BPF_REG_AX,
+ 0, 3 + (is64 ? 0 : 1), 1),
+ BPF_MOV32_IMM(insn->dst_reg, 0),
+ BPF_JMP_IMM(BPF_JA, 0, 0, 1),
+ *insn,
+ BPF_JMP_IMM(BPF_JA, 0, 0, 1),
+ BPF_MOV32_REG(insn->dst_reg, insn->dst_reg),
+ };
- patchlet = isdiv ? chk_and_div : chk_and_mod;
- cnt = isdiv ? ARRAY_SIZE(chk_and_div) :
- ARRAY_SIZE(chk_and_mod) - (is64 ? 2 : 0);
+ if (is_sdiv) {
+ patchlet = chk_and_sdiv;
+ cnt = ARRAY_SIZE(chk_and_sdiv);
+ } else if (is_smod) {
+ patchlet = chk_and_smod;
+ cnt = ARRAY_SIZE(chk_and_smod) - (is64 ? 2 : 0);
+ } else {
+ patchlet = isdiv ? chk_and_div : chk_and_mod;
+ cnt = isdiv ? ARRAY_SIZE(chk_and_div) :
+ ARRAY_SIZE(chk_and_mod) - (is64 ? 2 : 0);
+ }
new_prog = bpf_patch_insn_data(env, i + delta, patchlet, cnt);
if (!new_prog)
@@ -19989,7 +20649,7 @@ static int do_misc_fixups(struct bpf_verifier_env *env)
(BPF_MODE(insn->code) == BPF_ABS ||
BPF_MODE(insn->code) == BPF_IND)) {
cnt = env->ops->gen_ld_abs(insn, insn_buf);
- if (cnt == 0 || cnt >= ARRAY_SIZE(insn_buf)) {
+ if (cnt == 0 || cnt >= INSN_BUF_SIZE) {
verbose(env, "bpf verifier is misconfigured\n");
return -EINVAL;
}
@@ -20282,7 +20942,7 @@ static int do_misc_fixups(struct bpf_verifier_env *env)
cnt = ops->map_gen_lookup(map_ptr, insn_buf);
if (cnt == -EOPNOTSUPP)
goto patch_map_ops_generic;
- if (cnt <= 0 || cnt >= ARRAY_SIZE(insn_buf)) {
+ if (cnt <= 0 || cnt >= INSN_BUF_SIZE) {
verbose(env, "bpf verifier is misconfigured\n");
return -EINVAL;
}
@@ -20384,7 +21044,7 @@ patch_map_ops_generic:
#if defined(CONFIG_X86_64) && !defined(CONFIG_UML)
/* Implement bpf_get_smp_processor_id() inline. */
if (insn->imm == BPF_FUNC_get_smp_processor_id &&
- prog->jit_requested && bpf_jit_supports_percpu_insn()) {
+ verifier_inlines_helper_call(env, insn->imm)) {
/* BPF_FUNC_get_smp_processor_id inlining is an
* optimization, so if pcpu_hot.cpu_number is ever
* changed in some incompatible and hard to support
@@ -20642,7 +21302,7 @@ static struct bpf_prog *inline_bpf_loop(struct bpf_verifier_env *env,
int position,
s32 stack_base,
u32 callback_subprogno,
- u32 *cnt)
+ u32 *total_cnt)
{
s32 r6_offset = stack_base + 0 * BPF_REG_SIZE;
s32 r7_offset = stack_base + 1 * BPF_REG_SIZE;
@@ -20651,55 +21311,56 @@ static struct bpf_prog *inline_bpf_loop(struct bpf_verifier_env *env,
int reg_loop_cnt = BPF_REG_7;
int reg_loop_ctx = BPF_REG_8;
+ struct bpf_insn *insn_buf = env->insn_buf;
struct bpf_prog *new_prog;
u32 callback_start;
u32 call_insn_offset;
s32 callback_offset;
+ u32 cnt = 0;
/* This represents an inlined version of bpf_iter.c:bpf_loop,
* be careful to modify this code in sync.
*/
- struct bpf_insn insn_buf[] = {
- /* Return error and jump to the end of the patch if
- * expected number of iterations is too big.
- */
- BPF_JMP_IMM(BPF_JLE, BPF_REG_1, BPF_MAX_LOOPS, 2),
- BPF_MOV32_IMM(BPF_REG_0, -E2BIG),
- BPF_JMP_IMM(BPF_JA, 0, 0, 16),
- /* spill R6, R7, R8 to use these as loop vars */
- BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_6, r6_offset),
- BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_7, r7_offset),
- BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_8, r8_offset),
- /* initialize loop vars */
- BPF_MOV64_REG(reg_loop_max, BPF_REG_1),
- BPF_MOV32_IMM(reg_loop_cnt, 0),
- BPF_MOV64_REG(reg_loop_ctx, BPF_REG_3),
- /* loop header,
- * if reg_loop_cnt >= reg_loop_max skip the loop body
- */
- BPF_JMP_REG(BPF_JGE, reg_loop_cnt, reg_loop_max, 5),
- /* callback call,
- * correct callback offset would be set after patching
- */
- BPF_MOV64_REG(BPF_REG_1, reg_loop_cnt),
- BPF_MOV64_REG(BPF_REG_2, reg_loop_ctx),
- BPF_CALL_REL(0),
- /* increment loop counter */
- BPF_ALU64_IMM(BPF_ADD, reg_loop_cnt, 1),
- /* jump to loop header if callback returned 0 */
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, -6),
- /* return value of bpf_loop,
- * set R0 to the number of iterations
- */
- BPF_MOV64_REG(BPF_REG_0, reg_loop_cnt),
- /* restore original values of R6, R7, R8 */
- BPF_LDX_MEM(BPF_DW, BPF_REG_6, BPF_REG_10, r6_offset),
- BPF_LDX_MEM(BPF_DW, BPF_REG_7, BPF_REG_10, r7_offset),
- BPF_LDX_MEM(BPF_DW, BPF_REG_8, BPF_REG_10, r8_offset),
- };
- *cnt = ARRAY_SIZE(insn_buf);
- new_prog = bpf_patch_insn_data(env, position, insn_buf, *cnt);
+ /* Return error and jump to the end of the patch if
+ * expected number of iterations is too big.
+ */
+ insn_buf[cnt++] = BPF_JMP_IMM(BPF_JLE, BPF_REG_1, BPF_MAX_LOOPS, 2);
+ insn_buf[cnt++] = BPF_MOV32_IMM(BPF_REG_0, -E2BIG);
+ insn_buf[cnt++] = BPF_JMP_IMM(BPF_JA, 0, 0, 16);
+ /* spill R6, R7, R8 to use these as loop vars */
+ insn_buf[cnt++] = BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_6, r6_offset);
+ insn_buf[cnt++] = BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_7, r7_offset);
+ insn_buf[cnt++] = BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_8, r8_offset);
+ /* initialize loop vars */
+ insn_buf[cnt++] = BPF_MOV64_REG(reg_loop_max, BPF_REG_1);
+ insn_buf[cnt++] = BPF_MOV32_IMM(reg_loop_cnt, 0);
+ insn_buf[cnt++] = BPF_MOV64_REG(reg_loop_ctx, BPF_REG_3);
+ /* loop header,
+ * if reg_loop_cnt >= reg_loop_max skip the loop body
+ */
+ insn_buf[cnt++] = BPF_JMP_REG(BPF_JGE, reg_loop_cnt, reg_loop_max, 5);
+ /* callback call,
+ * correct callback offset would be set after patching
+ */
+ insn_buf[cnt++] = BPF_MOV64_REG(BPF_REG_1, reg_loop_cnt);
+ insn_buf[cnt++] = BPF_MOV64_REG(BPF_REG_2, reg_loop_ctx);
+ insn_buf[cnt++] = BPF_CALL_REL(0);
+ /* increment loop counter */
+ insn_buf[cnt++] = BPF_ALU64_IMM(BPF_ADD, reg_loop_cnt, 1);
+ /* jump to loop header if callback returned 0 */
+ insn_buf[cnt++] = BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, -6);
+ /* return value of bpf_loop,
+ * set R0 to the number of iterations
+ */
+ insn_buf[cnt++] = BPF_MOV64_REG(BPF_REG_0, reg_loop_cnt);
+ /* restore original values of R6, R7, R8 */
+ insn_buf[cnt++] = BPF_LDX_MEM(BPF_DW, BPF_REG_6, BPF_REG_10, r6_offset);
+ insn_buf[cnt++] = BPF_LDX_MEM(BPF_DW, BPF_REG_7, BPF_REG_10, r7_offset);
+ insn_buf[cnt++] = BPF_LDX_MEM(BPF_DW, BPF_REG_8, BPF_REG_10, r8_offset);
+
+ *total_cnt = cnt;
+ new_prog = bpf_patch_insn_data(env, position, insn_buf, cnt);
if (!new_prog)
return new_prog;
@@ -20774,6 +21435,40 @@ static int optimize_bpf_loop(struct bpf_verifier_env *env)
return 0;
}
+/* Remove unnecessary spill/fill pairs, members of fastcall pattern,
+ * adjust subprograms stack depth when possible.
+ */
+static int remove_fastcall_spills_fills(struct bpf_verifier_env *env)
+{
+ struct bpf_subprog_info *subprog = env->subprog_info;
+ struct bpf_insn_aux_data *aux = env->insn_aux_data;
+ struct bpf_insn *insn = env->prog->insnsi;
+ int insn_cnt = env->prog->len;
+ u32 spills_num;
+ bool modified = false;
+ int i, j;
+
+ for (i = 0; i < insn_cnt; i++, insn++) {
+ if (aux[i].fastcall_spills_num > 0) {
+ spills_num = aux[i].fastcall_spills_num;
+ /* NOPs would be removed by opt_remove_nops() */
+ for (j = 1; j <= spills_num; ++j) {
+ *(insn - j) = NOP;
+ *(insn + j) = NOP;
+ }
+ modified = true;
+ }
+ if ((subprog + 1)->start == i + 1) {
+ if (modified && !subprog->keep_fastcall_stack)
+ subprog->stack_depth = -subprog->fastcall_stack_off;
+ subprog++;
+ modified = false;
+ }
+ }
+
+ return 0;
+}
+
static void free_states(struct bpf_verifier_env *env)
{
struct bpf_verifier_state_list *sl, *sln;
@@ -21047,6 +21742,7 @@ static int check_struct_ops_btf_id(struct bpf_verifier_env *env)
u32 btf_id, member_idx;
struct btf *btf;
const char *mname;
+ int err;
if (!prog->gpl_compatible) {
verbose(env, "struct ops programs must have a GPL compatible license\n");
@@ -21094,8 +21790,15 @@ static int check_struct_ops_btf_id(struct bpf_verifier_env *env)
return -EINVAL;
}
+ err = bpf_struct_ops_supported(st_ops, __btf_member_bit_offset(t, member) / 8);
+ if (err) {
+ verbose(env, "attach to unsupported member %s of struct %s\n",
+ mname, st_ops->name);
+ return err;
+ }
+
if (st_ops->check_member) {
- int err = st_ops->check_member(t, member, prog);
+ err = st_ops->check_member(t, member, prog);
if (err) {
verbose(env, "attach to unsupported member %s of struct %s\n",
@@ -21706,6 +22409,10 @@ int bpf_check(struct bpf_prog **prog, union bpf_attr *attr, bpfptr_t uattr, __u3
if (ret < 0)
goto skip_full_check;
+ ret = mark_fastcall_patterns(env);
+ if (ret < 0)
+ goto skip_full_check;
+
ret = do_check_main(env);
ret = ret ?: do_check_subprogs(env);
@@ -21715,6 +22422,12 @@ int bpf_check(struct bpf_prog **prog, union bpf_attr *attr, bpfptr_t uattr, __u3
skip_full_check:
kvfree(env->explored_states);
+ /* might decrease stack depth, keep it before passes that
+ * allocate additional slots.
+ */
+ if (ret == 0)
+ ret = remove_fastcall_spills_fills(env);
+
if (ret == 0)
ret = check_max_stack_depth(env);
diff --git a/kernel/cgroup/cgroup-internal.h b/kernel/cgroup/cgroup-internal.h
index 520b90dd97ec..c964dd7ff967 100644
--- a/kernel/cgroup/cgroup-internal.h
+++ b/kernel/cgroup/cgroup-internal.h
@@ -81,6 +81,8 @@ struct cgroup_file_ctx {
struct {
struct cgroup_pidlist *pidlist;
} procs1;
+
+ struct cgroup_of_peak peak;
};
/*
diff --git a/kernel/cgroup/cgroup.c b/kernel/cgroup/cgroup.c
index 90e50d6d3cf3..5886b95c6eae 100644
--- a/kernel/cgroup/cgroup.c
+++ b/kernel/cgroup/cgroup.c
@@ -1972,6 +1972,13 @@ static int cgroup2_parse_param(struct fs_context *fc, struct fs_parameter *param
return -EINVAL;
}
+struct cgroup_of_peak *of_peak(struct kernfs_open_file *of)
+{
+ struct cgroup_file_ctx *ctx = of->priv;
+
+ return &ctx->peak;
+}
+
static void apply_cgroup_root_flags(unsigned int root_flags)
{
if (current->nsproxy->cgroup_ns == &init_cgroup_ns) {
@@ -4623,8 +4630,9 @@ struct cgroup_subsys_state *css_next_child(struct cgroup_subsys_state *pos,
*
* While this function requires cgroup_mutex or RCU read locking, it
* doesn't require the whole traversal to be contained in a single critical
- * section. This function will return the correct next descendant as long
- * as both @pos and @root are accessible and @pos is a descendant of @root.
+ * section. Additionally, it isn't necessary to hold onto a reference to @pos.
+ * This function will return the correct next descendant as long as both @pos
+ * and @root are accessible and @pos is a descendant of @root.
*
* If a subsystem synchronizes ->css_online() and the start of iteration, a
* css which finished ->css_online() is guaranteed to be visible in the
@@ -4672,8 +4680,9 @@ EXPORT_SYMBOL_GPL(css_next_descendant_pre);
*
* While this function requires cgroup_mutex or RCU read locking, it
* doesn't require the whole traversal to be contained in a single critical
- * section. This function will return the correct rightmost descendant as
- * long as @pos is accessible.
+ * section. Additionally, it isn't necessary to hold onto a reference to @pos.
+ * This function will return the correct rightmost descendant as long as @pos
+ * is accessible.
*/
struct cgroup_subsys_state *
css_rightmost_descendant(struct cgroup_subsys_state *pos)
@@ -4717,9 +4726,9 @@ css_leftmost_descendant(struct cgroup_subsys_state *pos)
*
* While this function requires cgroup_mutex or RCU read locking, it
* doesn't require the whole traversal to be contained in a single critical
- * section. This function will return the correct next descendant as long
- * as both @pos and @cgroup are accessible and @pos is a descendant of
- * @cgroup.
+ * section. Additionally, it isn't necessary to hold onto a reference to @pos.
+ * This function will return the correct next descendant as long as both @pos
+ * and @cgroup are accessible and @pos is a descendant of @cgroup.
*
* If a subsystem synchronizes ->css_online() and the start of iteration, a
* css which finished ->css_online() is guaranteed to be visible in the
@@ -6959,10 +6968,10 @@ struct cgroup *cgroup_v1v2_get_from_fd(int fd)
{
struct cgroup *cgrp;
struct fd f = fdget_raw(fd);
- if (!f.file)
+ if (!fd_file(f))
return ERR_PTR(-EBADF);
- cgrp = cgroup_v1v2_get_from_file(f.file);
+ cgrp = cgroup_v1v2_get_from_file(fd_file(f));
fdput(f);
return cgrp;
}
diff --git a/kernel/configs/tiny.config b/kernel/configs/tiny.config
index 00009f7d0835..b753695c5a8f 100644
--- a/kernel/configs/tiny.config
+++ b/kernel/configs/tiny.config
@@ -1,10 +1,4 @@
-# CONFIG_CC_OPTIMIZE_FOR_PERFORMANCE is not set
CONFIG_CC_OPTIMIZE_FOR_SIZE=y
-# CONFIG_KERNEL_GZIP is not set
-# CONFIG_KERNEL_BZIP2 is not set
-# CONFIG_KERNEL_LZMA is not set
CONFIG_KERNEL_XZ=y
-# CONFIG_KERNEL_LZO is not set
-# CONFIG_KERNEL_LZ4 is not set
CONFIG_SLUB=y
CONFIG_SLUB_TINY=y
diff --git a/kernel/crash_core.c b/kernel/crash_core.c
index 63cf89393c6e..c1048893f4b6 100644
--- a/kernel/crash_core.c
+++ b/kernel/crash_core.c
@@ -505,7 +505,7 @@ int crash_check_hotplug_support(void)
crash_hotplug_lock();
/* Obtain lock while reading crash information */
if (!kexec_trylock()) {
- pr_info("kexec_trylock() failed, elfcorehdr may be inaccurate\n");
+ pr_info("kexec_trylock() failed, kdump image may be inaccurate\n");
crash_hotplug_unlock();
return 0;
}
@@ -520,18 +520,25 @@ int crash_check_hotplug_support(void)
}
/*
- * To accurately reflect hot un/plug changes of cpu and memory resources
- * (including onling and offlining of those resources), the elfcorehdr
- * (which is passed to the crash kernel via the elfcorehdr= parameter)
- * must be updated with the new list of CPUs and memories.
+ * To accurately reflect hot un/plug changes of CPU and Memory resources
+ * (including onling and offlining of those resources), the relevant
+ * kexec segments must be updated with latest CPU and Memory resources.
*
- * In order to make changes to elfcorehdr, two conditions are needed:
- * First, the segment containing the elfcorehdr must be large enough
- * to permit a growing number of resources; the elfcorehdr memory size
- * is based on NR_CPUS_DEFAULT and CRASH_MAX_MEMORY_RANGES.
- * Second, purgatory must explicitly exclude the elfcorehdr from the
- * list of segments it checks (since the elfcorehdr changes and thus
- * would require an update to purgatory itself to update the digest).
+ * Architectures must ensure two things for all segments that need
+ * updating during hotplug events:
+ *
+ * 1. Segments must be large enough to accommodate a growing number of
+ * resources.
+ * 2. Exclude the segments from SHA verification.
+ *
+ * For example, on most architectures, the elfcorehdr (which is passed
+ * to the crash kernel via the elfcorehdr= parameter) must include the
+ * new list of CPUs and memory. To make changes to the elfcorehdr, it
+ * should be large enough to permit a growing number of CPU and Memory
+ * resources. One can estimate the elfcorehdr memory size based on
+ * NR_CPUS_DEFAULT and CRASH_MAX_MEMORY_RANGES. The elfcorehdr is
+ * excluded from SHA verification by default if the architecture
+ * supports crash hotplug.
*/
static void crash_handle_hotplug_event(unsigned int hp_action, unsigned int cpu, void *arg)
{
@@ -540,7 +547,7 @@ static void crash_handle_hotplug_event(unsigned int hp_action, unsigned int cpu,
crash_hotplug_lock();
/* Obtain lock while changing crash information */
if (!kexec_trylock()) {
- pr_info("kexec_trylock() failed, elfcorehdr may be inaccurate\n");
+ pr_info("kexec_trylock() failed, kdump image may be inaccurate\n");
crash_hotplug_unlock();
return;
}
diff --git a/kernel/crash_reserve.c b/kernel/crash_reserve.c
index 64d44a52c011..a620fb4b2116 100644
--- a/kernel/crash_reserve.c
+++ b/kernel/crash_reserve.c
@@ -335,6 +335,9 @@ int __init parse_crashkernel(char *cmdline,
if (!*crash_size)
ret = -EINVAL;
+ if (*crash_size >= system_ram)
+ ret = -EINVAL;
+
return ret;
}
diff --git a/kernel/dma/Kconfig b/kernel/dma/Kconfig
index c06e56be0ca1..4c0dcd909121 100644
--- a/kernel/dma/Kconfig
+++ b/kernel/dma/Kconfig
@@ -8,8 +8,7 @@ config HAS_DMA
depends on !NO_DMA
default y
-config DMA_OPS
- depends on HAS_DMA
+config DMA_OPS_HELPERS
bool
#
@@ -109,8 +108,8 @@ config DMA_BOUNCE_UNALIGNED_KMALLOC
config DMA_NEED_SYNC
def_bool ARCH_HAS_SYNC_DMA_FOR_DEVICE || ARCH_HAS_SYNC_DMA_FOR_CPU || \
- ARCH_HAS_SYNC_DMA_FOR_CPU_ALL || DMA_API_DEBUG || DMA_OPS || \
- SWIOTLB
+ ARCH_HAS_SYNC_DMA_FOR_CPU_ALL || DMA_API_DEBUG || \
+ ARCH_HAS_DMA_OPS || SWIOTLB
config DMA_RESTRICTED_POOL
bool "DMA Restricted Pool"
diff --git a/kernel/dma/Makefile b/kernel/dma/Makefile
index 21926e46ef4f..6977033444a3 100644
--- a/kernel/dma/Makefile
+++ b/kernel/dma/Makefile
@@ -1,8 +1,8 @@
# SPDX-License-Identifier: GPL-2.0
obj-$(CONFIG_HAS_DMA) += mapping.o direct.o
-obj-$(CONFIG_DMA_OPS) += ops_helpers.o
-obj-$(CONFIG_DMA_OPS) += dummy.o
+obj-$(CONFIG_DMA_OPS_HELPERS) += ops_helpers.o
+obj-$(CONFIG_ARCH_HAS_DMA_OPS) += dummy.o
obj-$(CONFIG_DMA_CMA) += contiguous.o
obj-$(CONFIG_DMA_DECLARE_COHERENT) += coherent.o
obj-$(CONFIG_DMA_API_DEBUG) += debug.o
diff --git a/kernel/dma/direct.c b/kernel/dma/direct.c
index 4480a3cd92e0..5b4e6d3bf7bc 100644
--- a/kernel/dma/direct.c
+++ b/kernel/dma/direct.c
@@ -20,7 +20,7 @@
* it for entirely different regions. In that case the arch code needs to
* override the variable below for dma-direct to work properly.
*/
-unsigned int zone_dma_bits __ro_after_init = 24;
+u64 zone_dma_limit __ro_after_init = DMA_BIT_MASK(24);
static inline dma_addr_t phys_to_dma_direct(struct device *dev,
phys_addr_t phys)
@@ -59,7 +59,7 @@ static gfp_t dma_direct_optimal_gfp_mask(struct device *dev, u64 *phys_limit)
* zones.
*/
*phys_limit = dma_to_phys(dev, dma_limit);
- if (*phys_limit <= DMA_BIT_MASK(zone_dma_bits))
+ if (*phys_limit <= zone_dma_limit)
return GFP_DMA;
if (*phys_limit <= DMA_BIT_MASK(32))
return GFP_DMA32;
@@ -140,7 +140,7 @@ again:
if (!page)
page = alloc_pages_node(node, gfp, get_order(size));
if (page && !dma_coherent_ok(dev, page_to_phys(page), size)) {
- dma_free_contiguous(dev, page, size);
+ __free_pages(page, get_order(size));
page = NULL;
if (IS_ENABLED(CONFIG_ZONE_DMA32) &&
@@ -580,7 +580,7 @@ int dma_direct_supported(struct device *dev, u64 mask)
* part of the check.
*/
if (IS_ENABLED(CONFIG_ZONE_DMA))
- min_mask = min_t(u64, min_mask, DMA_BIT_MASK(zone_dma_bits));
+ min_mask = min_t(u64, min_mask, zone_dma_limit);
return mask >= phys_to_dma_unencrypted(dev, min_mask);
}
diff --git a/kernel/dma/dummy.c b/kernel/dma/dummy.c
index b492d59ac77e..92de80e5b057 100644
--- a/kernel/dma/dummy.c
+++ b/kernel/dma/dummy.c
@@ -17,6 +17,15 @@ static dma_addr_t dma_dummy_map_page(struct device *dev, struct page *page,
{
return DMA_MAPPING_ERROR;
}
+static void dma_dummy_unmap_page(struct device *dev, dma_addr_t dma_handle,
+ size_t size, enum dma_data_direction dir, unsigned long attrs)
+{
+ /*
+ * Dummy ops doesn't support map_page, so unmap_page should never be
+ * called.
+ */
+ WARN_ON_ONCE(true);
+}
static int dma_dummy_map_sg(struct device *dev, struct scatterlist *sgl,
int nelems, enum dma_data_direction dir,
@@ -25,6 +34,16 @@ static int dma_dummy_map_sg(struct device *dev, struct scatterlist *sgl,
return -EINVAL;
}
+static void dma_dummy_unmap_sg(struct device *dev, struct scatterlist *sgl,
+ int nelems, enum dma_data_direction dir,
+ unsigned long attrs)
+{
+ /*
+ * Dummy ops doesn't support map_sg, so unmap_sg should never be called.
+ */
+ WARN_ON_ONCE(true);
+}
+
static int dma_dummy_supported(struct device *hwdev, u64 mask)
{
return 0;
@@ -33,6 +52,8 @@ static int dma_dummy_supported(struct device *hwdev, u64 mask)
const struct dma_map_ops dma_dummy_ops = {
.mmap = dma_dummy_mmap,
.map_page = dma_dummy_map_page,
+ .unmap_page = dma_dummy_unmap_page,
.map_sg = dma_dummy_map_sg,
+ .unmap_sg = dma_dummy_unmap_sg,
.dma_supported = dma_dummy_supported,
};
diff --git a/kernel/dma/mapping.c b/kernel/dma/mapping.c
index b1c18058d55f..864a1121bf08 100644
--- a/kernel/dma/mapping.c
+++ b/kernel/dma/mapping.c
@@ -10,6 +10,7 @@
#include <linux/dma-map-ops.h>
#include <linux/export.h>
#include <linux/gfp.h>
+#include <linux/iommu-dma.h>
#include <linux/kmsan.h>
#include <linux/of_device.h>
#include <linux/slab.h>
@@ -17,6 +18,9 @@
#include "debug.h"
#include "direct.h"
+#define CREATE_TRACE_POINTS
+#include <trace/events/dma.h>
+
#if defined(CONFIG_ARCH_HAS_SYNC_DMA_FOR_DEVICE) || \
defined(CONFIG_ARCH_HAS_SYNC_DMA_FOR_CPU) || \
defined(CONFIG_ARCH_HAS_SYNC_DMA_FOR_CPU_ALL)
@@ -116,8 +120,12 @@ EXPORT_SYMBOL(dmam_alloc_attrs);
static bool dma_go_direct(struct device *dev, dma_addr_t mask,
const struct dma_map_ops *ops)
{
+ if (use_dma_iommu(dev))
+ return false;
+
if (likely(!ops))
return true;
+
#ifdef CONFIG_DMA_OPS_BYPASS
if (dev->dma_ops_bypass)
return min_not_zero(mask, dev->bus_dma_limit) >=
@@ -159,9 +167,13 @@ dma_addr_t dma_map_page_attrs(struct device *dev, struct page *page,
if (dma_map_direct(dev, ops) ||
arch_dma_map_page_direct(dev, page_to_phys(page) + offset + size))
addr = dma_direct_map_page(dev, page, offset, size, dir, attrs);
+ else if (use_dma_iommu(dev))
+ addr = iommu_dma_map_page(dev, page, offset, size, dir, attrs);
else
addr = ops->map_page(dev, page, offset, size, dir, attrs);
kmsan_handle_dma(page, offset, size, dir);
+ trace_dma_map_page(dev, page_to_phys(page) + offset, addr, size, dir,
+ attrs);
debug_dma_map_page(dev, page, offset, size, dir, addr, attrs);
return addr;
@@ -177,8 +189,11 @@ void dma_unmap_page_attrs(struct device *dev, dma_addr_t addr, size_t size,
if (dma_map_direct(dev, ops) ||
arch_dma_unmap_page_direct(dev, addr + size))
dma_direct_unmap_page(dev, addr, size, dir, attrs);
- else if (ops->unmap_page)
+ else if (use_dma_iommu(dev))
+ iommu_dma_unmap_page(dev, addr, size, dir, attrs);
+ else
ops->unmap_page(dev, addr, size, dir, attrs);
+ trace_dma_unmap_page(dev, addr, size, dir, attrs);
debug_dma_unmap_page(dev, addr, size, dir);
}
EXPORT_SYMBOL(dma_unmap_page_attrs);
@@ -197,11 +212,14 @@ static int __dma_map_sg_attrs(struct device *dev, struct scatterlist *sg,
if (dma_map_direct(dev, ops) ||
arch_dma_map_sg_direct(dev, sg, nents))
ents = dma_direct_map_sg(dev, sg, nents, dir, attrs);
+ else if (use_dma_iommu(dev))
+ ents = iommu_dma_map_sg(dev, sg, nents, dir, attrs);
else
ents = ops->map_sg(dev, sg, nents, dir, attrs);
if (ents > 0) {
kmsan_handle_dma_sg(sg, nents, dir);
+ trace_dma_map_sg(dev, sg, nents, ents, dir, attrs);
debug_dma_map_sg(dev, sg, nents, ents, dir, attrs);
} else if (WARN_ON_ONCE(ents != -EINVAL && ents != -ENOMEM &&
ents != -EIO && ents != -EREMOTEIO)) {
@@ -287,10 +305,13 @@ void dma_unmap_sg_attrs(struct device *dev, struct scatterlist *sg,
const struct dma_map_ops *ops = get_dma_ops(dev);
BUG_ON(!valid_dma_direction(dir));
+ trace_dma_unmap_sg(dev, sg, nents, dir, attrs);
debug_dma_unmap_sg(dev, sg, nents, dir);
if (dma_map_direct(dev, ops) ||
arch_dma_unmap_sg_direct(dev, sg, nents))
dma_direct_unmap_sg(dev, sg, nents, dir, attrs);
+ else if (use_dma_iommu(dev))
+ iommu_dma_unmap_sg(dev, sg, nents, dir, attrs);
else if (ops->unmap_sg)
ops->unmap_sg(dev, sg, nents, dir, attrs);
}
@@ -309,9 +330,12 @@ dma_addr_t dma_map_resource(struct device *dev, phys_addr_t phys_addr,
if (dma_map_direct(dev, ops))
addr = dma_direct_map_resource(dev, phys_addr, size, dir, attrs);
+ else if (use_dma_iommu(dev))
+ addr = iommu_dma_map_resource(dev, phys_addr, size, dir, attrs);
else if (ops->map_resource)
addr = ops->map_resource(dev, phys_addr, size, dir, attrs);
+ trace_dma_map_resource(dev, phys_addr, addr, size, dir, attrs);
debug_dma_map_resource(dev, phys_addr, size, dir, addr, attrs);
return addr;
}
@@ -323,8 +347,13 @@ void dma_unmap_resource(struct device *dev, dma_addr_t addr, size_t size,
const struct dma_map_ops *ops = get_dma_ops(dev);
BUG_ON(!valid_dma_direction(dir));
- if (!dma_map_direct(dev, ops) && ops->unmap_resource)
+ if (dma_map_direct(dev, ops))
+ ; /* nothing to do: uncached and no swiotlb */
+ else if (use_dma_iommu(dev))
+ iommu_dma_unmap_resource(dev, addr, size, dir, attrs);
+ else if (ops->unmap_resource)
ops->unmap_resource(dev, addr, size, dir, attrs);
+ trace_dma_unmap_resource(dev, addr, size, dir, attrs);
debug_dma_unmap_resource(dev, addr, size, dir);
}
EXPORT_SYMBOL(dma_unmap_resource);
@@ -338,8 +367,11 @@ void __dma_sync_single_for_cpu(struct device *dev, dma_addr_t addr, size_t size,
BUG_ON(!valid_dma_direction(dir));
if (dma_map_direct(dev, ops))
dma_direct_sync_single_for_cpu(dev, addr, size, dir);
+ else if (use_dma_iommu(dev))
+ iommu_dma_sync_single_for_cpu(dev, addr, size, dir);
else if (ops->sync_single_for_cpu)
ops->sync_single_for_cpu(dev, addr, size, dir);
+ trace_dma_sync_single_for_cpu(dev, addr, size, dir);
debug_dma_sync_single_for_cpu(dev, addr, size, dir);
}
EXPORT_SYMBOL(__dma_sync_single_for_cpu);
@@ -352,8 +384,11 @@ void __dma_sync_single_for_device(struct device *dev, dma_addr_t addr,
BUG_ON(!valid_dma_direction(dir));
if (dma_map_direct(dev, ops))
dma_direct_sync_single_for_device(dev, addr, size, dir);
+ else if (use_dma_iommu(dev))
+ iommu_dma_sync_single_for_device(dev, addr, size, dir);
else if (ops->sync_single_for_device)
ops->sync_single_for_device(dev, addr, size, dir);
+ trace_dma_sync_single_for_device(dev, addr, size, dir);
debug_dma_sync_single_for_device(dev, addr, size, dir);
}
EXPORT_SYMBOL(__dma_sync_single_for_device);
@@ -366,8 +401,11 @@ void __dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg,
BUG_ON(!valid_dma_direction(dir));
if (dma_map_direct(dev, ops))
dma_direct_sync_sg_for_cpu(dev, sg, nelems, dir);
+ else if (use_dma_iommu(dev))
+ iommu_dma_sync_sg_for_cpu(dev, sg, nelems, dir);
else if (ops->sync_sg_for_cpu)
ops->sync_sg_for_cpu(dev, sg, nelems, dir);
+ trace_dma_sync_sg_for_cpu(dev, sg, nelems, dir);
debug_dma_sync_sg_for_cpu(dev, sg, nelems, dir);
}
EXPORT_SYMBOL(__dma_sync_sg_for_cpu);
@@ -380,8 +418,11 @@ void __dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg,
BUG_ON(!valid_dma_direction(dir));
if (dma_map_direct(dev, ops))
dma_direct_sync_sg_for_device(dev, sg, nelems, dir);
+ else if (use_dma_iommu(dev))
+ iommu_dma_sync_sg_for_device(dev, sg, nelems, dir);
else if (ops->sync_sg_for_device)
ops->sync_sg_for_device(dev, sg, nelems, dir);
+ trace_dma_sync_sg_for_device(dev, sg, nelems, dir);
debug_dma_sync_sg_for_device(dev, sg, nelems, dir);
}
EXPORT_SYMBOL(__dma_sync_sg_for_device);
@@ -405,7 +446,7 @@ static void dma_setup_need_sync(struct device *dev)
{
const struct dma_map_ops *ops = get_dma_ops(dev);
- if (dma_map_direct(dev, ops) || (ops->flags & DMA_F_CAN_SKIP_SYNC))
+ if (dma_map_direct(dev, ops) || use_dma_iommu(dev))
/*
* dma_skip_sync will be reset to %false on first SWIOTLB buffer
* mapping, if any. During the device initialization, it's
@@ -446,6 +487,9 @@ int dma_get_sgtable_attrs(struct device *dev, struct sg_table *sgt,
if (dma_alloc_direct(dev, ops))
return dma_direct_get_sgtable(dev, sgt, cpu_addr, dma_addr,
size, attrs);
+ if (use_dma_iommu(dev))
+ return iommu_dma_get_sgtable(dev, sgt, cpu_addr, dma_addr,
+ size, attrs);
if (!ops->get_sgtable)
return -ENXIO;
return ops->get_sgtable(dev, sgt, cpu_addr, dma_addr, size, attrs);
@@ -482,6 +526,8 @@ bool dma_can_mmap(struct device *dev)
if (dma_alloc_direct(dev, ops))
return dma_direct_can_mmap(dev);
+ if (use_dma_iommu(dev))
+ return true;
return ops->mmap != NULL;
}
EXPORT_SYMBOL_GPL(dma_can_mmap);
@@ -508,6 +554,9 @@ int dma_mmap_attrs(struct device *dev, struct vm_area_struct *vma,
if (dma_alloc_direct(dev, ops))
return dma_direct_mmap(dev, vma, cpu_addr, dma_addr, size,
attrs);
+ if (use_dma_iommu(dev))
+ return iommu_dma_mmap(dev, vma, cpu_addr, dma_addr, size,
+ attrs);
if (!ops->mmap)
return -ENXIO;
return ops->mmap(dev, vma, cpu_addr, dma_addr, size, attrs);
@@ -520,6 +569,10 @@ u64 dma_get_required_mask(struct device *dev)
if (dma_alloc_direct(dev, ops))
return dma_direct_get_required_mask(dev);
+
+ if (use_dma_iommu(dev))
+ return DMA_BIT_MASK(32);
+
if (ops->get_required_mask)
return ops->get_required_mask(dev);
@@ -559,11 +612,14 @@ void *dma_alloc_attrs(struct device *dev, size_t size, dma_addr_t *dma_handle,
if (dma_alloc_direct(dev, ops))
cpu_addr = dma_direct_alloc(dev, size, dma_handle, flag, attrs);
+ else if (use_dma_iommu(dev))
+ cpu_addr = iommu_dma_alloc(dev, size, dma_handle, flag, attrs);
else if (ops->alloc)
cpu_addr = ops->alloc(dev, size, dma_handle, flag, attrs);
else
return NULL;
+ trace_dma_alloc(dev, cpu_addr, *dma_handle, size, flag, attrs);
debug_dma_alloc_coherent(dev, size, *dma_handle, cpu_addr, attrs);
return cpu_addr;
}
@@ -588,9 +644,12 @@ void dma_free_attrs(struct device *dev, size_t size, void *cpu_addr,
if (!cpu_addr)
return;
+ trace_dma_free(dev, cpu_addr, dma_handle, size, attrs);
debug_dma_free_coherent(dev, size, cpu_addr, dma_handle);
if (dma_alloc_direct(dev, ops))
dma_direct_free(dev, size, cpu_addr, dma_handle, attrs);
+ else if (use_dma_iommu(dev))
+ iommu_dma_free(dev, size, cpu_addr, dma_handle, attrs);
else if (ops->free)
ops->free(dev, size, cpu_addr, dma_handle, attrs);
}
@@ -611,6 +670,8 @@ static struct page *__dma_alloc_pages(struct device *dev, size_t size,
size = PAGE_ALIGN(size);
if (dma_alloc_direct(dev, ops))
return dma_direct_alloc_pages(dev, size, dma_handle, dir, gfp);
+ if (use_dma_iommu(dev))
+ return dma_common_alloc_pages(dev, size, dma_handle, dir, gfp);
if (!ops->alloc_pages_op)
return NULL;
return ops->alloc_pages_op(dev, size, dma_handle, dir, gfp);
@@ -621,8 +682,11 @@ struct page *dma_alloc_pages(struct device *dev, size_t size,
{
struct page *page = __dma_alloc_pages(dev, size, dma_handle, dir, gfp);
- if (page)
+ if (page) {
+ trace_dma_map_page(dev, page_to_phys(page), *dma_handle, size,
+ dir, 0);
debug_dma_map_page(dev, page, 0, size, dir, *dma_handle, 0);
+ }
return page;
}
EXPORT_SYMBOL_GPL(dma_alloc_pages);
@@ -635,6 +699,8 @@ static void __dma_free_pages(struct device *dev, size_t size, struct page *page,
size = PAGE_ALIGN(size);
if (dma_alloc_direct(dev, ops))
dma_direct_free_pages(dev, size, page, dma_handle, dir);
+ else if (use_dma_iommu(dev))
+ dma_common_free_pages(dev, size, page, dma_handle, dir);
else if (ops->free_pages)
ops->free_pages(dev, size, page, dma_handle, dir);
}
@@ -642,6 +708,7 @@ static void __dma_free_pages(struct device *dev, size_t size, struct page *page,
void dma_free_pages(struct device *dev, size_t size, struct page *page,
dma_addr_t dma_handle, enum dma_data_direction dir)
{
+ trace_dma_unmap_page(dev, dma_handle, size, dir, 0);
debug_dma_unmap_page(dev, dma_handle, size, dir);
__dma_free_pages(dev, size, page, dma_handle, dir);
}
@@ -687,7 +754,6 @@ out_free_sgt:
struct sg_table *dma_alloc_noncontiguous(struct device *dev, size_t size,
enum dma_data_direction dir, gfp_t gfp, unsigned long attrs)
{
- const struct dma_map_ops *ops = get_dma_ops(dev);
struct sg_table *sgt;
if (WARN_ON_ONCE(attrs & ~DMA_ATTR_ALLOC_SINGLE_PAGES))
@@ -695,13 +761,14 @@ struct sg_table *dma_alloc_noncontiguous(struct device *dev, size_t size,
if (WARN_ON_ONCE(gfp & __GFP_COMP))
return NULL;
- if (ops && ops->alloc_noncontiguous)
- sgt = ops->alloc_noncontiguous(dev, size, dir, gfp, attrs);
+ if (use_dma_iommu(dev))
+ sgt = iommu_dma_alloc_noncontiguous(dev, size, dir, gfp, attrs);
else
sgt = alloc_single_sgt(dev, size, dir, gfp);
if (sgt) {
sgt->nents = 1;
+ trace_dma_map_sg(dev, sgt->sgl, sgt->orig_nents, 1, dir, attrs);
debug_dma_map_sg(dev, sgt->sgl, sgt->orig_nents, 1, dir, attrs);
}
return sgt;
@@ -720,11 +787,11 @@ static void free_single_sgt(struct device *dev, size_t size,
void dma_free_noncontiguous(struct device *dev, size_t size,
struct sg_table *sgt, enum dma_data_direction dir)
{
- const struct dma_map_ops *ops = get_dma_ops(dev);
-
+ trace_dma_unmap_sg(dev, sgt->sgl, sgt->orig_nents, dir, 0);
debug_dma_unmap_sg(dev, sgt->sgl, sgt->orig_nents, dir);
- if (ops && ops->free_noncontiguous)
- ops->free_noncontiguous(dev, size, sgt, dir);
+
+ if (use_dma_iommu(dev))
+ iommu_dma_free_noncontiguous(dev, size, sgt, dir);
else
free_single_sgt(dev, size, sgt, dir);
}
@@ -733,37 +800,26 @@ EXPORT_SYMBOL_GPL(dma_free_noncontiguous);
void *dma_vmap_noncontiguous(struct device *dev, size_t size,
struct sg_table *sgt)
{
- const struct dma_map_ops *ops = get_dma_ops(dev);
- unsigned long count = PAGE_ALIGN(size) >> PAGE_SHIFT;
- if (ops && ops->alloc_noncontiguous)
- return vmap(sgt_handle(sgt)->pages, count, VM_MAP, PAGE_KERNEL);
+ if (use_dma_iommu(dev))
+ return iommu_dma_vmap_noncontiguous(dev, size, sgt);
+
return page_address(sg_page(sgt->sgl));
}
EXPORT_SYMBOL_GPL(dma_vmap_noncontiguous);
void dma_vunmap_noncontiguous(struct device *dev, void *vaddr)
{
- const struct dma_map_ops *ops = get_dma_ops(dev);
-
- if (ops && ops->alloc_noncontiguous)
- vunmap(vaddr);
+ if (use_dma_iommu(dev))
+ iommu_dma_vunmap_noncontiguous(dev, vaddr);
}
EXPORT_SYMBOL_GPL(dma_vunmap_noncontiguous);
int dma_mmap_noncontiguous(struct device *dev, struct vm_area_struct *vma,
size_t size, struct sg_table *sgt)
{
- const struct dma_map_ops *ops = get_dma_ops(dev);
-
- if (ops && ops->alloc_noncontiguous) {
- unsigned long count = PAGE_ALIGN(size) >> PAGE_SHIFT;
-
- if (vma->vm_pgoff >= count ||
- vma_pages(vma) > count - vma->vm_pgoff)
- return -ENXIO;
- return vm_map_pages(vma, sgt_handle(sgt)->pages, count);
- }
+ if (use_dma_iommu(dev))
+ return iommu_dma_mmap_noncontiguous(dev, vma, size, sgt);
return dma_mmap_pages(dev, vma, size, sg_page(sgt->sgl));
}
EXPORT_SYMBOL_GPL(dma_mmap_noncontiguous);
@@ -772,32 +828,37 @@ static int dma_supported(struct device *dev, u64 mask)
{
const struct dma_map_ops *ops = get_dma_ops(dev);
+ if (use_dma_iommu(dev)) {
+ if (WARN_ON(ops))
+ return false;
+ return true;
+ }
+
/*
- * ->dma_supported sets the bypass flag, so we must always call
- * into the method here unless the device is truly direct mapped.
+ * ->dma_supported sets and clears the bypass flag, so ignore it here
+ * and always call into the method if there is one.
*/
- if (!ops)
- return dma_direct_supported(dev, mask);
- if (!ops->dma_supported)
- return 1;
- return ops->dma_supported(dev, mask);
+ if (ops) {
+ if (!ops->dma_supported)
+ return true;
+ return ops->dma_supported(dev, mask);
+ }
+
+ return dma_direct_supported(dev, mask);
}
bool dma_pci_p2pdma_supported(struct device *dev)
{
const struct dma_map_ops *ops = get_dma_ops(dev);
- /* if ops is not set, dma direct will be used which supports P2PDMA */
- if (!ops)
- return true;
-
/*
* Note: dma_ops_bypass is not checked here because P2PDMA should
* not be used with dma mapping ops that do not have support even
* if the specific device is bypassing them.
*/
- return ops->flags & DMA_F_PCI_P2PDMA_SUPPORTED;
+ /* if ops is not set, dma direct and default IOMMU support P2PDMA */
+ return !ops;
}
EXPORT_SYMBOL_GPL(dma_pci_p2pdma_supported);
@@ -852,7 +913,7 @@ bool dma_addressing_limited(struct device *dev)
dma_get_required_mask(dev))
return true;
- if (unlikely(ops))
+ if (unlikely(ops) || use_dma_iommu(dev))
return false;
return !dma_direct_all_ram_mapped(dev);
}
@@ -865,6 +926,8 @@ size_t dma_max_mapping_size(struct device *dev)
if (dma_map_direct(dev, ops))
size = dma_direct_max_mapping_size(dev);
+ else if (use_dma_iommu(dev))
+ size = iommu_dma_max_mapping_size(dev);
else if (ops && ops->max_mapping_size)
size = ops->max_mapping_size(dev);
@@ -877,7 +940,9 @@ size_t dma_opt_mapping_size(struct device *dev)
const struct dma_map_ops *ops = get_dma_ops(dev);
size_t size = SIZE_MAX;
- if (ops && ops->opt_mapping_size)
+ if (use_dma_iommu(dev))
+ size = iommu_dma_opt_mapping_size();
+ else if (ops && ops->opt_mapping_size)
size = ops->opt_mapping_size();
return min(dma_max_mapping_size(dev), size);
@@ -888,6 +953,9 @@ unsigned long dma_get_merge_boundary(struct device *dev)
{
const struct dma_map_ops *ops = get_dma_ops(dev);
+ if (use_dma_iommu(dev))
+ return iommu_dma_get_merge_boundary(dev);
+
if (!ops || !ops->get_merge_boundary)
return 0; /* can't merge */
diff --git a/kernel/dma/ops_helpers.c b/kernel/dma/ops_helpers.c
index af4a6ef48ce0..9afd569eadb9 100644
--- a/kernel/dma/ops_helpers.c
+++ b/kernel/dma/ops_helpers.c
@@ -4,6 +4,7 @@
* the allocated memory contains normal pages in the direct kernel mapping.
*/
#include <linux/dma-map-ops.h>
+#include <linux/iommu-dma.h>
static struct page *dma_common_vaddr_to_page(void *cpu_addr)
{
@@ -70,8 +71,12 @@ struct page *dma_common_alloc_pages(struct device *dev, size_t size,
if (!page)
return NULL;
- *dma_handle = ops->map_page(dev, page, 0, size, dir,
- DMA_ATTR_SKIP_CPU_SYNC);
+ if (use_dma_iommu(dev))
+ *dma_handle = iommu_dma_map_page(dev, page, 0, size, dir,
+ DMA_ATTR_SKIP_CPU_SYNC);
+ else
+ *dma_handle = ops->map_page(dev, page, 0, size, dir,
+ DMA_ATTR_SKIP_CPU_SYNC);
if (*dma_handle == DMA_MAPPING_ERROR) {
dma_free_contiguous(dev, page, size);
return NULL;
@@ -86,7 +91,10 @@ void dma_common_free_pages(struct device *dev, size_t size, struct page *page,
{
const struct dma_map_ops *ops = get_dma_ops(dev);
- if (ops->unmap_page)
+ if (use_dma_iommu(dev))
+ iommu_dma_unmap_page(dev, dma_handle, size, dir,
+ DMA_ATTR_SKIP_CPU_SYNC);
+ else if (ops->unmap_page)
ops->unmap_page(dev, dma_handle, size, dir,
DMA_ATTR_SKIP_CPU_SYNC);
dma_free_contiguous(dev, page, size);
diff --git a/kernel/dma/pool.c b/kernel/dma/pool.c
index d10613eb0f63..7b04f7575796 100644
--- a/kernel/dma/pool.c
+++ b/kernel/dma/pool.c
@@ -70,9 +70,9 @@ static bool cma_in_zone(gfp_t gfp)
/* CMA can't cross zone boundaries, see cma_activate_area() */
end = cma_get_base(cma) + size - 1;
if (IS_ENABLED(CONFIG_ZONE_DMA) && (gfp & GFP_DMA))
- return end <= DMA_BIT_MASK(zone_dma_bits);
+ return end <= zone_dma_limit;
if (IS_ENABLED(CONFIG_ZONE_DMA32) && (gfp & GFP_DMA32))
- return end <= DMA_BIT_MASK(32);
+ return end <= max(DMA_BIT_MASK(32), zone_dma_limit);
return true;
}
diff --git a/kernel/dma/remap.c b/kernel/dma/remap.c
index 27596f3b4aef..9e2afad1c615 100644
--- a/kernel/dma/remap.c
+++ b/kernel/dma/remap.c
@@ -10,8 +10,10 @@ struct page **dma_common_find_pages(void *cpu_addr)
{
struct vm_struct *area = find_vm_area(cpu_addr);
- if (!area || area->flags != VM_DMA_COHERENT)
+ if (!area || !(area->flags & VM_DMA_COHERENT))
return NULL;
+ WARN(area->flags != VM_DMA_COHERENT,
+ "unexpected flags in area: %p\n", cpu_addr);
return area->pages;
}
@@ -61,7 +63,7 @@ void dma_common_free_remap(void *cpu_addr, size_t size)
{
struct vm_struct *area = find_vm_area(cpu_addr);
- if (!area || area->flags != VM_DMA_COHERENT) {
+ if (!area || !(area->flags & VM_DMA_COHERENT)) {
WARN(1, "trying to free invalid coherent area: %p\n", cpu_addr);
return;
}
diff --git a/kernel/dma/swiotlb.c b/kernel/dma/swiotlb.c
index df68d29740a0..abcf3fa63a56 100644
--- a/kernel/dma/swiotlb.c
+++ b/kernel/dma/swiotlb.c
@@ -450,9 +450,9 @@ int swiotlb_init_late(size_t size, gfp_t gfp_mask,
if (!remap)
io_tlb_default_mem.can_grow = true;
if (IS_ENABLED(CONFIG_ZONE_DMA) && (gfp_mask & __GFP_DMA))
- io_tlb_default_mem.phys_limit = DMA_BIT_MASK(zone_dma_bits);
+ io_tlb_default_mem.phys_limit = zone_dma_limit;
else if (IS_ENABLED(CONFIG_ZONE_DMA32) && (gfp_mask & __GFP_DMA32))
- io_tlb_default_mem.phys_limit = DMA_BIT_MASK(32);
+ io_tlb_default_mem.phys_limit = max(DMA_BIT_MASK(32), zone_dma_limit);
else
io_tlb_default_mem.phys_limit = virt_to_phys(high_memory - 1);
#endif
@@ -629,7 +629,7 @@ static struct page *swiotlb_alloc_tlb(struct device *dev, size_t bytes,
}
gfp &= ~GFP_ZONEMASK;
- if (phys_limit <= DMA_BIT_MASK(zone_dma_bits))
+ if (phys_limit <= zone_dma_limit)
gfp |= __GFP_DMA;
else if (phys_limit <= DMA_BIT_MASK(32))
gfp |= __GFP_DMA32;
diff --git a/kernel/events/core.c b/kernel/events/core.c
index 4f03eb908e7f..e3589c4287cb 100644
--- a/kernel/events/core.c
+++ b/kernel/events/core.c
@@ -969,10 +969,10 @@ static inline int perf_cgroup_connect(int fd, struct perf_event *event,
struct fd f = fdget(fd);
int ret = 0;
- if (!f.file)
+ if (!fd_file(f))
return -EBADF;
- css = css_tryget_online_from_dir(f.file->f_path.dentry,
+ css = css_tryget_online_from_dir(fd_file(f)->f_path.dentry,
&perf_event_cgrp_subsys);
if (IS_ERR(css)) {
ret = PTR_ERR(css);
@@ -6001,10 +6001,10 @@ static const struct file_operations perf_fops;
static inline int perf_fget_light(int fd, struct fd *p)
{
struct fd f = fdget(fd);
- if (!f.file)
+ if (!fd_file(f))
return -EBADF;
- if (f.file->f_op != &perf_fops) {
+ if (fd_file(f)->f_op != &perf_fops) {
fdput(f);
return -EBADF;
}
@@ -6064,7 +6064,7 @@ static long _perf_ioctl(struct perf_event *event, unsigned int cmd, unsigned lon
ret = perf_fget_light(arg, &output);
if (ret)
return ret;
- output_event = output.file->private_data;
+ output_event = fd_file(output)->private_data;
ret = perf_event_set_output(event, output_event);
fdput(output);
} else {
@@ -6821,7 +6821,6 @@ static int perf_fasync(int fd, struct file *filp, int on)
}
static const struct file_operations perf_fops = {
- .llseek = no_llseek,
.release = perf_release,
.read = perf_read,
.poll = perf_poll,
@@ -8964,7 +8963,7 @@ got_name:
mmap_event->event_id.header.size = sizeof(mmap_event->event_id) + size;
if (atomic_read(&nr_build_id_events))
- build_id_parse(vma, mmap_event->build_id, &mmap_event->build_id_size);
+ build_id_parse_nofault(vma, mmap_event->build_id, &mmap_event->build_id_size);
perf_iterate_sb(perf_event_mmap_output,
mmap_event,
@@ -12665,7 +12664,7 @@ SYSCALL_DEFINE5(perf_event_open,
struct perf_event_attr attr;
struct perf_event_context *ctx;
struct file *event_file = NULL;
- struct fd group = {NULL, 0};
+ struct fd group = EMPTY_FD;
struct task_struct *task = NULL;
struct pmu *pmu;
int event_fd;
@@ -12740,7 +12739,7 @@ SYSCALL_DEFINE5(perf_event_open,
err = perf_fget_light(group_fd, &group);
if (err)
goto err_fd;
- group_leader = group.file->private_data;
+ group_leader = fd_file(group)->private_data;
if (flags & PERF_FLAG_FD_OUTPUT)
output_event = group_leader;
if (flags & PERF_FLAG_FD_NO_GROUP)
@@ -14002,21 +14001,19 @@ static void perf_event_setup_cpumask(unsigned int cpu)
struct cpumask *pmu_cpumask;
unsigned int scope;
- cpumask_set_cpu(cpu, perf_online_mask);
-
/*
* Early boot stage, the cpumask hasn't been set yet.
* The perf_online_<domain>_masks includes the first CPU of each domain.
- * Always uncondifionally set the boot CPU for the perf_online_<domain>_masks.
+ * Always unconditionally set the boot CPU for the perf_online_<domain>_masks.
*/
- if (!topology_sibling_cpumask(cpu)) {
+ if (cpumask_empty(perf_online_mask)) {
for (scope = PERF_PMU_SCOPE_NONE + 1; scope < PERF_PMU_MAX_SCOPE; scope++) {
pmu_cpumask = perf_scope_cpumask(scope);
if (WARN_ON_ONCE(!pmu_cpumask))
continue;
cpumask_set_cpu(cpu, pmu_cpumask);
}
- return;
+ goto end;
}
for (scope = PERF_PMU_SCOPE_NONE + 1; scope < PERF_PMU_MAX_SCOPE; scope++) {
@@ -14031,6 +14028,8 @@ static void perf_event_setup_cpumask(unsigned int cpu)
cpumask_any_and(pmu_cpumask, cpumask) >= nr_cpu_ids)
cpumask_set_cpu(cpu, pmu_cpumask);
}
+end:
+ cpumask_set_cpu(cpu, perf_online_mask);
}
int perf_event_init_cpu(unsigned int cpu)
diff --git a/kernel/events/uprobes.c b/kernel/events/uprobes.c
index 4b7e590dc428..4b52cb2ae6d6 100644
--- a/kernel/events/uprobes.c
+++ b/kernel/events/uprobes.c
@@ -103,8 +103,7 @@ struct xol_area {
atomic_t slot_count; /* number of in-use slots */
unsigned long *bitmap; /* 0 = free slot */
- struct vm_special_mapping xol_mapping;
- struct page *pages[2];
+ struct page *page;
/*
* We keep the vma's vm_start rather than a pointer to the vma
* itself. The probed process or a naughty kernel module could make
@@ -1466,6 +1465,21 @@ void uprobe_munmap(struct vm_area_struct *vma, unsigned long start, unsigned lon
set_bit(MMF_RECALC_UPROBES, &vma->vm_mm->flags);
}
+static vm_fault_t xol_fault(const struct vm_special_mapping *sm,
+ struct vm_area_struct *vma, struct vm_fault *vmf)
+{
+ struct xol_area *area = vma->vm_mm->uprobes_state.xol_area;
+
+ vmf->page = area->page;
+ get_page(vmf->page);
+ return 0;
+}
+
+static const struct vm_special_mapping xol_mapping = {
+ .name = "[uprobes]",
+ .fault = xol_fault,
+};
+
/* Slot allocation for XOL */
static int xol_add_vma(struct mm_struct *mm, struct xol_area *area)
{
@@ -1492,7 +1506,7 @@ static int xol_add_vma(struct mm_struct *mm, struct xol_area *area)
vma = _install_special_mapping(mm, area->vaddr, PAGE_SIZE,
VM_EXEC|VM_MAYEXEC|VM_DONTCOPY|VM_IO,
- &area->xol_mapping);
+ &xol_mapping);
if (IS_ERR(vma)) {
ret = PTR_ERR(vma);
goto fail;
@@ -1531,12 +1545,9 @@ static struct xol_area *__create_xol_area(unsigned long vaddr)
if (!area->bitmap)
goto free_area;
- area->xol_mapping.name = "[uprobes]";
- area->xol_mapping.pages = area->pages;
- area->pages[0] = alloc_page(GFP_HIGHUSER);
- if (!area->pages[0])
+ area->page = alloc_page(GFP_HIGHUSER | __GFP_ZERO);
+ if (!area->page)
goto free_bitmap;
- area->pages[1] = NULL;
area->vaddr = vaddr;
init_waitqueue_head(&area->wq);
@@ -1544,12 +1555,12 @@ static struct xol_area *__create_xol_area(unsigned long vaddr)
set_bit(0, area->bitmap);
atomic_set(&area->slot_count, 1);
insns = arch_uprobe_trampoline(&insns_size);
- arch_uprobe_copy_ixol(area->pages[0], 0, insns, insns_size);
+ arch_uprobe_copy_ixol(area->page, 0, insns, insns_size);
if (!xol_add_vma(mm, area))
return area;
- __free_page(area->pages[0]);
+ __free_page(area->page);
free_bitmap:
kfree(area->bitmap);
free_area:
@@ -1591,7 +1602,7 @@ void uprobe_clear_state(struct mm_struct *mm)
if (!area)
return;
- put_page(area->pages[0]);
+ put_page(area->page);
kfree(area->bitmap);
kfree(area);
}
@@ -1658,7 +1669,7 @@ static unsigned long xol_get_insn_slot(struct uprobe *uprobe)
if (unlikely(!xol_vaddr))
return 0;
- arch_uprobe_copy_ixol(area->pages[0], xol_vaddr,
+ arch_uprobe_copy_ixol(area->page, xol_vaddr,
&uprobe->arch.ixol, sizeof(uprobe->arch.ixol));
return xol_vaddr;
diff --git a/kernel/exit.c b/kernel/exit.c
index 0d62a53605df..619f0014c33b 100644
--- a/kernel/exit.c
+++ b/kernel/exit.c
@@ -778,6 +778,62 @@ static void exit_notify(struct task_struct *tsk, int group_dead)
}
#ifdef CONFIG_DEBUG_STACK_USAGE
+unsigned long stack_not_used(struct task_struct *p)
+{
+ unsigned long *n = end_of_stack(p);
+
+ do { /* Skip over canary */
+# ifdef CONFIG_STACK_GROWSUP
+ n--;
+# else
+ n++;
+# endif
+ } while (!*n);
+
+# ifdef CONFIG_STACK_GROWSUP
+ return (unsigned long)end_of_stack(p) - (unsigned long)n;
+# else
+ return (unsigned long)n - (unsigned long)end_of_stack(p);
+# endif
+}
+
+/* Count the maximum pages reached in kernel stacks */
+static inline void kstack_histogram(unsigned long used_stack)
+{
+#ifdef CONFIG_VM_EVENT_COUNTERS
+ if (used_stack <= 1024)
+ count_vm_event(KSTACK_1K);
+#if THREAD_SIZE > 1024
+ else if (used_stack <= 2048)
+ count_vm_event(KSTACK_2K);
+#endif
+#if THREAD_SIZE > 2048
+ else if (used_stack <= 4096)
+ count_vm_event(KSTACK_4K);
+#endif
+#if THREAD_SIZE > 4096
+ else if (used_stack <= 8192)
+ count_vm_event(KSTACK_8K);
+#endif
+#if THREAD_SIZE > 8192
+ else if (used_stack <= 16384)
+ count_vm_event(KSTACK_16K);
+#endif
+#if THREAD_SIZE > 16384
+ else if (used_stack <= 32768)
+ count_vm_event(KSTACK_32K);
+#endif
+#if THREAD_SIZE > 32768
+ else if (used_stack <= 65536)
+ count_vm_event(KSTACK_64K);
+#endif
+#if THREAD_SIZE > 65536
+ else
+ count_vm_event(KSTACK_REST);
+#endif
+#endif /* CONFIG_VM_EVENT_COUNTERS */
+}
+
static void check_stack_usage(void)
{
static DEFINE_SPINLOCK(low_water_lock);
@@ -785,6 +841,7 @@ static void check_stack_usage(void)
unsigned long free;
free = stack_not_used(current);
+ kstack_histogram(THREAD_SIZE - free);
if (free >= lowest_to_date)
return;
diff --git a/kernel/fork.c b/kernel/fork.c
index d4b2d543f48c..60c0b4868fd4 100644
--- a/kernel/fork.c
+++ b/kernel/fork.c
@@ -23,6 +23,7 @@
#include <linux/sched/task.h>
#include <linux/sched/task_stack.h>
#include <linux/sched/cputime.h>
+#include <linux/sched/ext.h>
#include <linux/seq_file.h>
#include <linux/rtmutex.h>
#include <linux/init.h>
@@ -832,7 +833,7 @@ static void check_mm(struct mm_struct *mm)
pr_alert("BUG: non-zero pgtables_bytes on freeing mm: %ld\n",
mm_pgtables_bytes(mm));
-#if defined(CONFIG_TRANSPARENT_HUGEPAGE) && !USE_SPLIT_PMD_PTLOCKS
+#if defined(CONFIG_TRANSPARENT_HUGEPAGE) && !defined(CONFIG_SPLIT_PMD_PTLOCKS)
VM_BUG_ON_MM(mm->pmd_huge_pte, mm);
#endif
}
@@ -969,6 +970,7 @@ void __put_task_struct(struct task_struct *tsk)
WARN_ON(refcount_read(&tsk->usage));
WARN_ON(tsk == current);
+ sched_ext_free(tsk);
io_uring_free(tsk);
cgroup_free(tsk);
task_numa_free(tsk, true);
@@ -997,7 +999,7 @@ void __init __weak arch_task_cache_init(void) { }
static void __init set_max_threads(unsigned int max_threads_suggested)
{
u64 threads;
- unsigned long nr_pages = PHYS_PFN(memblock_phys_mem_size() - memblock_reserved_size());
+ unsigned long nr_pages = memblock_estimated_nr_free_pages();
/*
* The number of threads shall be limited such that the thread
@@ -1276,7 +1278,7 @@ static struct mm_struct *mm_init(struct mm_struct *mm, struct task_struct *p,
RCU_INIT_POINTER(mm->exe_file, NULL);
mmu_notifier_subscriptions_init(mm);
init_tlb_flush_pending(mm);
-#if defined(CONFIG_TRANSPARENT_HUGEPAGE) && !USE_SPLIT_PMD_PTLOCKS
+#if defined(CONFIG_TRANSPARENT_HUGEPAGE) && !defined(CONFIG_SPLIT_PMD_PTLOCKS)
mm->pmd_huge_pte = NULL;
#endif
mm_init_uprobes_state(mm);
@@ -2346,7 +2348,7 @@ __latent_entropy struct task_struct *copy_process(
retval = perf_event_init_task(p, clone_flags);
if (retval)
- goto bad_fork_cleanup_policy;
+ goto bad_fork_sched_cancel_fork;
retval = audit_alloc(p);
if (retval)
goto bad_fork_cleanup_perf;
@@ -2479,7 +2481,9 @@ __latent_entropy struct task_struct *copy_process(
* cgroup specific, it unconditionally needs to place the task on a
* runqueue.
*/
- sched_cgroup_fork(p, args);
+ retval = sched_cgroup_fork(p, args);
+ if (retval)
+ goto bad_fork_cancel_cgroup;
/*
* From this point on we must avoid any synchronous user-space
@@ -2525,13 +2529,13 @@ __latent_entropy struct task_struct *copy_process(
/* Don't start children in a dying pid namespace */
if (unlikely(!(ns_of_pid(pid)->pid_allocated & PIDNS_ADDING))) {
retval = -ENOMEM;
- goto bad_fork_cancel_cgroup;
+ goto bad_fork_core_free;
}
/* Let kill terminate clone/fork in the middle */
if (fatal_signal_pending(current)) {
retval = -EINTR;
- goto bad_fork_cancel_cgroup;
+ goto bad_fork_core_free;
}
/* No more failure paths after this point. */
@@ -2605,10 +2609,11 @@ __latent_entropy struct task_struct *copy_process(
return p;
-bad_fork_cancel_cgroup:
+bad_fork_core_free:
sched_core_free(p);
spin_unlock(&current->sighand->siglock);
write_unlock_irq(&tasklist_lock);
+bad_fork_cancel_cgroup:
cgroup_cancel_fork(p, args);
bad_fork_put_pidfd:
if (clone_flags & CLONE_PIDFD) {
@@ -2647,6 +2652,8 @@ bad_fork_cleanup_audit:
audit_free(p);
bad_fork_cleanup_perf:
perf_event_free_task(p);
+bad_fork_sched_cancel_fork:
+ sched_cancel_fork(p);
bad_fork_cleanup_policy:
lockdep_free_task(p);
#ifdef CONFIG_NUMA
diff --git a/kernel/freezer.c b/kernel/freezer.c
index f57aaf96b829..44bbd7dbd2c8 100644
--- a/kernel/freezer.c
+++ b/kernel/freezer.c
@@ -72,7 +72,7 @@ bool __refrigerator(bool check_kthr_stop)
bool freeze;
raw_spin_lock_irq(&current->pi_lock);
- set_current_state(TASK_FROZEN);
+ WRITE_ONCE(current->__state, TASK_FROZEN);
/* unstale saved_state so that __thaw_task() will wake us up */
current->saved_state = TASK_RUNNING;
raw_spin_unlock_irq(&current->pi_lock);
diff --git a/kernel/futex/core.c b/kernel/futex/core.c
index 06a1f091be81..136768ae2637 100644
--- a/kernel/futex/core.c
+++ b/kernel/futex/core.c
@@ -34,6 +34,7 @@
#include <linux/compat.h>
#include <linux/jhash.h>
#include <linux/pagemap.h>
+#include <linux/debugfs.h>
#include <linux/plist.h>
#include <linux/memblock.h>
#include <linux/fault-inject.h>
diff --git a/kernel/irq/msi.c b/kernel/irq/msi.c
index 1c7e5159064c..3a24d6b5f559 100644
--- a/kernel/irq/msi.c
+++ b/kernel/irq/msi.c
@@ -832,7 +832,7 @@ static void msi_domain_update_chip_ops(struct msi_domain_info *info)
struct irq_chip *chip = info->chip;
BUG_ON(!chip || !chip->irq_mask || !chip->irq_unmask);
- if (!chip->irq_set_affinity)
+ if (!chip->irq_set_affinity && !(info->flags & MSI_FLAG_NO_AFFINITY))
chip->irq_set_affinity = msi_domain_set_affinity;
}
diff --git a/kernel/jump_label.c b/kernel/jump_label.c
index 6dc76b590703..93a822d3c468 100644
--- a/kernel/jump_label.c
+++ b/kernel/jump_label.c
@@ -168,7 +168,7 @@ bool static_key_slow_inc_cpuslocked(struct static_key *key)
jump_label_update(key);
/*
* Ensure that when static_key_fast_inc_not_disabled() or
- * static_key_slow_try_dec() observe the positive value,
+ * static_key_dec_not_one() observe the positive value,
* they must also observe all the text changes.
*/
atomic_set_release(&key->enabled, 1);
@@ -250,7 +250,7 @@ void static_key_disable(struct static_key *key)
}
EXPORT_SYMBOL_GPL(static_key_disable);
-static bool static_key_slow_try_dec(struct static_key *key)
+static bool static_key_dec_not_one(struct static_key *key)
{
int v;
@@ -274,6 +274,14 @@ static bool static_key_slow_try_dec(struct static_key *key)
* enabled. This suggests an ordering problem on the user side.
*/
WARN_ON_ONCE(v < 0);
+
+ /*
+ * Warn about underflow, and lie about success in an attempt to
+ * not make things worse.
+ */
+ if (WARN_ON_ONCE(v == 0))
+ return true;
+
if (v <= 1)
return false;
} while (!likely(atomic_try_cmpxchg(&key->enabled, &v, v - 1)));
@@ -284,15 +292,27 @@ static bool static_key_slow_try_dec(struct static_key *key)
static void __static_key_slow_dec_cpuslocked(struct static_key *key)
{
lockdep_assert_cpus_held();
+ int val;
- if (static_key_slow_try_dec(key))
+ if (static_key_dec_not_one(key))
return;
guard(mutex)(&jump_label_mutex);
- if (atomic_cmpxchg(&key->enabled, 1, 0) == 1)
+ val = atomic_read(&key->enabled);
+ /*
+ * It should be impossible to observe -1 with jump_label_mutex held,
+ * see static_key_slow_inc_cpuslocked().
+ */
+ if (WARN_ON_ONCE(val == -1))
+ return;
+ /*
+ * Cannot already be 0, something went sideways.
+ */
+ if (WARN_ON_ONCE(val == 0))
+ return;
+
+ if (atomic_dec_and_test(&key->enabled))
jump_label_update(key);
- else
- WARN_ON_ONCE(!static_key_slow_try_dec(key));
}
static void __static_key_slow_dec(struct static_key *key)
@@ -329,7 +349,7 @@ void __static_key_slow_dec_deferred(struct static_key *key,
{
STATIC_KEY_CHECK_USE(key);
- if (static_key_slow_try_dec(key))
+ if (static_key_dec_not_one(key))
return;
schedule_delayed_work(work, timeout);
diff --git a/kernel/kexec_internal.h b/kernel/kexec_internal.h
index 2595defe8c0d..d35d9792402d 100644
--- a/kernel/kexec_internal.h
+++ b/kernel/kexec_internal.h
@@ -23,7 +23,8 @@ int kimage_is_destination_range(struct kimage *image,
extern atomic_t __kexec_lock;
static inline bool kexec_trylock(void)
{
- return atomic_cmpxchg_acquire(&__kexec_lock, 0, 1) == 0;
+ int old = 0;
+ return atomic_try_cmpxchg_acquire(&__kexec_lock, &old, 1);
}
static inline void kexec_unlock(void)
{
diff --git a/kernel/kthread.c b/kernel/kthread.c
index f7be976ff88a..db4ceb0f503c 100644
--- a/kernel/kthread.c
+++ b/kernel/kthread.c
@@ -845,8 +845,16 @@ repeat:
* event only cares about the address.
*/
trace_sched_kthread_work_execute_end(work, func);
- } else if (!freezing(current))
+ } else if (!freezing(current)) {
schedule();
+ } else {
+ /*
+ * Handle the case where the current remains
+ * TASK_INTERRUPTIBLE. try_to_freeze() expects
+ * the current to be TASK_RUNNING.
+ */
+ __set_current_state(TASK_RUNNING);
+ }
try_to_freeze();
cond_resched();
diff --git a/kernel/locking/lockdep.c b/kernel/locking/lockdep.c
index 7963deac33c3..536bd471557f 100644
--- a/kernel/locking/lockdep.c
+++ b/kernel/locking/lockdep.c
@@ -788,7 +788,7 @@ static void lockdep_print_held_locks(struct task_struct *p)
printk("no locks held by %s/%d.\n", p->comm, task_pid_nr(p));
else
printk("%d lock%s held by %s/%d:\n", depth,
- depth > 1 ? "s" : "", p->comm, task_pid_nr(p));
+ str_plural(depth), p->comm, task_pid_nr(p));
/*
* It's not reliable to print a task's held locks if it's not sleeping
* and it's not the current task.
@@ -2084,6 +2084,9 @@ static noinline void print_bfs_bug(int ret)
/*
* Breadth-first-search failed, graph got corrupted?
*/
+ if (ret == BFS_EQUEUEFULL)
+ pr_warn("Increase LOCKDEP_CIRCULAR_QUEUE_BITS to avoid this warning:\n");
+
WARN(1, "lockdep bfs error:%d\n", ret);
}
@@ -6263,25 +6266,27 @@ static struct pending_free *get_pending_free(void)
static void free_zapped_rcu(struct rcu_head *cb);
/*
- * Schedule an RCU callback if no RCU callback is pending. Must be called with
- * the graph lock held.
- */
-static void call_rcu_zapped(struct pending_free *pf)
+* See if we need to queue an RCU callback, must called with
+* the lockdep lock held, returns false if either we don't have
+* any pending free or the callback is already scheduled.
+* Otherwise, a call_rcu() must follow this function call.
+*/
+static bool prepare_call_rcu_zapped(struct pending_free *pf)
{
WARN_ON_ONCE(inside_selftest());
if (list_empty(&pf->zapped))
- return;
+ return false;
if (delayed_free.scheduled)
- return;
+ return false;
delayed_free.scheduled = true;
WARN_ON_ONCE(delayed_free.pf + delayed_free.index != pf);
delayed_free.index ^= 1;
- call_rcu(&delayed_free.rcu_head, free_zapped_rcu);
+ return true;
}
/* The caller must hold the graph lock. May be called from RCU context. */
@@ -6307,6 +6312,7 @@ static void free_zapped_rcu(struct rcu_head *ch)
{
struct pending_free *pf;
unsigned long flags;
+ bool need_callback;
if (WARN_ON_ONCE(ch != &delayed_free.rcu_head))
return;
@@ -6318,14 +6324,18 @@ static void free_zapped_rcu(struct rcu_head *ch)
pf = delayed_free.pf + (delayed_free.index ^ 1);
__free_zapped_classes(pf);
delayed_free.scheduled = false;
+ need_callback =
+ prepare_call_rcu_zapped(delayed_free.pf + delayed_free.index);
+ lockdep_unlock();
+ raw_local_irq_restore(flags);
/*
- * If there's anything on the open list, close and start a new callback.
- */
- call_rcu_zapped(delayed_free.pf + delayed_free.index);
+ * If there's pending free and its callback has not been scheduled,
+ * queue an RCU callback.
+ */
+ if (need_callback)
+ call_rcu(&delayed_free.rcu_head, free_zapped_rcu);
- lockdep_unlock();
- raw_local_irq_restore(flags);
}
/*
@@ -6365,6 +6375,7 @@ static void lockdep_free_key_range_reg(void *start, unsigned long size)
{
struct pending_free *pf;
unsigned long flags;
+ bool need_callback;
init_data_structures_once();
@@ -6372,10 +6383,11 @@ static void lockdep_free_key_range_reg(void *start, unsigned long size)
lockdep_lock();
pf = get_pending_free();
__lockdep_free_key_range(pf, start, size);
- call_rcu_zapped(pf);
+ need_callback = prepare_call_rcu_zapped(pf);
lockdep_unlock();
raw_local_irq_restore(flags);
-
+ if (need_callback)
+ call_rcu(&delayed_free.rcu_head, free_zapped_rcu);
/*
* Wait for any possible iterators from look_up_lock_class() to pass
* before continuing to free the memory they refer to.
@@ -6469,6 +6481,7 @@ static void lockdep_reset_lock_reg(struct lockdep_map *lock)
struct pending_free *pf;
unsigned long flags;
int locked;
+ bool need_callback = false;
raw_local_irq_save(flags);
locked = graph_lock();
@@ -6477,11 +6490,13 @@ static void lockdep_reset_lock_reg(struct lockdep_map *lock)
pf = get_pending_free();
__lockdep_reset_lock(pf, lock);
- call_rcu_zapped(pf);
+ need_callback = prepare_call_rcu_zapped(pf);
graph_unlock();
out_irq:
raw_local_irq_restore(flags);
+ if (need_callback)
+ call_rcu(&delayed_free.rcu_head, free_zapped_rcu);
}
/*
@@ -6525,6 +6540,7 @@ void lockdep_unregister_key(struct lock_class_key *key)
struct pending_free *pf;
unsigned long flags;
bool found = false;
+ bool need_callback = false;
might_sleep();
@@ -6545,11 +6561,14 @@ void lockdep_unregister_key(struct lock_class_key *key)
if (found) {
pf = get_pending_free();
__lockdep_free_key_range(pf, key, 1);
- call_rcu_zapped(pf);
+ need_callback = prepare_call_rcu_zapped(pf);
}
lockdep_unlock();
raw_local_irq_restore(flags);
+ if (need_callback)
+ call_rcu(&delayed_free.rcu_head, free_zapped_rcu);
+
/* Wait until is_dynamic_key() has finished accessing k->hash_entry. */
synchronize_rcu();
}
diff --git a/kernel/locking/lockdep_proc.c b/kernel/locking/lockdep_proc.c
index e2bfb1db589d..6db0f43fc4df 100644
--- a/kernel/locking/lockdep_proc.c
+++ b/kernel/locking/lockdep_proc.c
@@ -424,7 +424,7 @@ static void seq_line(struct seq_file *m, char c, int offset, int length)
for (i = 0; i < offset; i++)
seq_puts(m, " ");
for (i = 0; i < length; i++)
- seq_printf(m, "%c", c);
+ seq_putc(m, c);
seq_puts(m, "\n");
}
diff --git a/kernel/locking/rtmutex.c b/kernel/locking/rtmutex.c
index fba1229f1de6..ebebd0eec7f6 100644
--- a/kernel/locking/rtmutex.c
+++ b/kernel/locking/rtmutex.c
@@ -347,7 +347,7 @@ static __always_inline int __waiter_prio(struct task_struct *task)
{
int prio = task->prio;
- if (!rt_prio(prio))
+ if (!rt_or_dl_prio(prio))
return DEFAULT_PRIO;
return prio;
@@ -435,7 +435,7 @@ static inline bool rt_mutex_steal(struct rt_mutex_waiter *waiter,
* Note that RT tasks are excluded from same priority (lateral)
* steals to prevent the introduction of an unbounded latency.
*/
- if (rt_prio(waiter->tree.prio) || dl_prio(waiter->tree.prio))
+ if (rt_or_dl_prio(waiter->tree.prio))
return false;
return rt_waiter_node_equal(&waiter->tree, &top_waiter->tree);
diff --git a/kernel/locking/rwsem.c b/kernel/locking/rwsem.c
index 33cac79e3994..2bbb6eca5144 100644
--- a/kernel/locking/rwsem.c
+++ b/kernel/locking/rwsem.c
@@ -181,12 +181,21 @@ static inline void rwsem_set_reader_owned(struct rw_semaphore *sem)
__rwsem_set_reader_owned(sem, current);
}
+#ifdef CONFIG_DEBUG_RWSEMS
+/*
+ * Return just the real task structure pointer of the owner
+ */
+static inline struct task_struct *rwsem_owner(struct rw_semaphore *sem)
+{
+ return (struct task_struct *)
+ (atomic_long_read(&sem->owner) & ~RWSEM_OWNER_FLAGS_MASK);
+}
+
/*
* Return true if the rwsem is owned by a reader.
*/
static inline bool is_rwsem_reader_owned(struct rw_semaphore *sem)
{
-#ifdef CONFIG_DEBUG_RWSEMS
/*
* Check the count to see if it is write-locked.
*/
@@ -194,11 +203,9 @@ static inline bool is_rwsem_reader_owned(struct rw_semaphore *sem)
if (count & RWSEM_WRITER_MASK)
return false;
-#endif
return rwsem_test_oflags(sem, RWSEM_READER_OWNED);
}
-#ifdef CONFIG_DEBUG_RWSEMS
/*
* With CONFIG_DEBUG_RWSEMS configured, it will make sure that if there
* is a task pointer in owner of a reader-owned rwsem, it will be the
@@ -266,15 +273,6 @@ static inline bool rwsem_write_trylock(struct rw_semaphore *sem)
}
/*
- * Return just the real task structure pointer of the owner
- */
-static inline struct task_struct *rwsem_owner(struct rw_semaphore *sem)
-{
- return (struct task_struct *)
- (atomic_long_read(&sem->owner) & ~RWSEM_OWNER_FLAGS_MASK);
-}
-
-/*
* Return the real task structure pointer of the owner and the embedded
* flags in the owner. pflags must be non-NULL.
*/
@@ -631,7 +629,7 @@ static inline bool rwsem_try_write_lock(struct rw_semaphore *sem,
* if it is an RT task or wait in the wait queue
* for too long.
*/
- if (has_handoff || (!rt_task(waiter->task) &&
+ if (has_handoff || (!rt_or_dl_task(waiter->task) &&
!time_after(jiffies, waiter->timeout)))
return false;
@@ -914,7 +912,7 @@ static bool rwsem_optimistic_spin(struct rw_semaphore *sem)
if (owner_state != OWNER_WRITER) {
if (need_resched())
break;
- if (rt_task(current) &&
+ if (rt_or_dl_task(current) &&
(prev_owner_state != OWNER_WRITER))
break;
}
diff --git a/kernel/locking/test-ww_mutex.c b/kernel/locking/test-ww_mutex.c
index 78719e1ef1b1..10a5736a21c2 100644
--- a/kernel/locking/test-ww_mutex.c
+++ b/kernel/locking/test-ww_mutex.c
@@ -697,3 +697,4 @@ module_exit(test_ww_mutex_exit);
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Intel Corporation");
+MODULE_DESCRIPTION("API test facility for ww_mutexes");
diff --git a/kernel/locking/ww_mutex.h b/kernel/locking/ww_mutex.h
index 3ad2cc4823e5..76d204b7d29c 100644
--- a/kernel/locking/ww_mutex.h
+++ b/kernel/locking/ww_mutex.h
@@ -237,7 +237,7 @@ __ww_ctx_less(struct ww_acquire_ctx *a, struct ww_acquire_ctx *b)
int a_prio = a->task->prio;
int b_prio = b->task->prio;
- if (rt_prio(a_prio) || rt_prio(b_prio)) {
+ if (rt_or_dl_prio(a_prio) || rt_or_dl_prio(b_prio)) {
if (a_prio > b_prio)
return true;
diff --git a/kernel/module/Kconfig b/kernel/module/Kconfig
index 4047b6d48255..7c6588148d42 100644
--- a/kernel/module/Kconfig
+++ b/kernel/module/Kconfig
@@ -160,6 +160,7 @@ config MODULE_UNLOAD_TAINT_TRACKING
config MODVERSIONS
bool "Module versioning support"
+ depends on !COMPILE_TEST
help
Usually, you have to use modules compiled with your kernel.
Saying Y here makes it sometimes possible to use modules
@@ -228,7 +229,7 @@ comment "Do not forget to sign required modules with scripts/sign-file"
depends on MODULE_SIG_FORCE && !MODULE_SIG_ALL
choice
- prompt "Which hash algorithm should modules be signed with?"
+ prompt "Hash algorithm to sign modules"
depends on MODULE_SIG || IMA_APPRAISE_MODSIG
help
This determines which sort of hashing algorithm will be used during
@@ -238,31 +239,31 @@ choice
the signature on that module.
config MODULE_SIG_SHA1
- bool "Sign modules with SHA-1"
+ bool "SHA-1"
select CRYPTO_SHA1
config MODULE_SIG_SHA256
- bool "Sign modules with SHA-256"
+ bool "SHA-256"
select CRYPTO_SHA256
config MODULE_SIG_SHA384
- bool "Sign modules with SHA-384"
+ bool "SHA-384"
select CRYPTO_SHA512
config MODULE_SIG_SHA512
- bool "Sign modules with SHA-512"
+ bool "SHA-512"
select CRYPTO_SHA512
config MODULE_SIG_SHA3_256
- bool "Sign modules with SHA3-256"
+ bool "SHA3-256"
select CRYPTO_SHA3
config MODULE_SIG_SHA3_384
- bool "Sign modules with SHA3-384"
+ bool "SHA3-384"
select CRYPTO_SHA3
config MODULE_SIG_SHA3_512
- bool "Sign modules with SHA3-512"
+ bool "SHA3-512"
select CRYPTO_SHA3
endchoice
@@ -278,64 +279,65 @@ config MODULE_SIG_HASH
default "sha3-384" if MODULE_SIG_SHA3_384
default "sha3-512" if MODULE_SIG_SHA3_512
-choice
- prompt "Module compression mode"
+config MODULE_COMPRESS
+ bool "Module compression"
help
- This option allows you to choose the algorithm which will be used to
- compress modules when 'make modules_install' is run. (or, you can
- choose to not compress modules at all.)
-
- External modules will also be compressed in the same way during the
- installation.
-
- For modules inside an initrd or initramfs, it's more efficient to
- compress the whole initrd or initramfs instead.
-
+ Enable module compression to reduce on-disk size of module binaries.
This is fully compatible with signed modules.
- Please note that the tool used to load modules needs to support the
- corresponding algorithm. module-init-tools MAY support gzip, and kmod
- MAY support gzip, xz and zstd.
+ The tool used to work with modules needs to support the selected
+ compression type. kmod MAY support gzip, xz and zstd. Other tools
+ might have a limited selection of the supported types.
- Your build system needs to provide the appropriate compression tool
- to compress the modules.
+ Note that for modules inside an initrd or initramfs, it's more
+ efficient to compress the whole ramdisk instead.
- If in doubt, select 'None'.
+ If unsure, say N.
-config MODULE_COMPRESS_NONE
- bool "None"
+choice
+ prompt "Module compression type"
+ depends on MODULE_COMPRESS
help
- Do not compress modules. The installed modules are suffixed
- with .ko.
+ Choose the supported algorithm for module compression.
config MODULE_COMPRESS_GZIP
bool "GZIP"
help
- Compress modules with GZIP. The installed modules are suffixed
- with .ko.gz.
+ Support modules compressed with GZIP. The installed modules are
+ suffixed with .ko.gz.
config MODULE_COMPRESS_XZ
bool "XZ"
help
- Compress modules with XZ. The installed modules are suffixed
- with .ko.xz.
+ Support modules compressed with XZ. The installed modules are
+ suffixed with .ko.xz.
config MODULE_COMPRESS_ZSTD
bool "ZSTD"
help
- Compress modules with ZSTD. The installed modules are suffixed
- with .ko.zst.
+ Support modules compressed with ZSTD. The installed modules are
+ suffixed with .ko.zst.
endchoice
+config MODULE_COMPRESS_ALL
+ bool "Automatically compress all modules"
+ default y
+ depends on MODULE_COMPRESS
+ help
+ Compress all modules during 'make modules_install'.
+
+ Your build system needs to provide the appropriate compression tool
+ for the selected compression type. External modules will also be
+ compressed in the same way during the installation.
+
config MODULE_DECOMPRESS
bool "Support in-kernel module decompression"
- depends on MODULE_COMPRESS_GZIP || MODULE_COMPRESS_XZ || MODULE_COMPRESS_ZSTD
+ depends on MODULE_COMPRESS
select ZLIB_INFLATE if MODULE_COMPRESS_GZIP
select XZ_DEC if MODULE_COMPRESS_XZ
select ZSTD_DECOMPRESS if MODULE_COMPRESS_ZSTD
help
-
Support for decompressing kernel modules by the kernel itself
instead of relying on userspace to perform this task. Useful when
load pinning security policy is enabled.
diff --git a/kernel/module/debug_kmemleak.c b/kernel/module/debug_kmemleak.c
index 12a569d361e8..b4cc03842d70 100644
--- a/kernel/module/debug_kmemleak.c
+++ b/kernel/module/debug_kmemleak.c
@@ -12,19 +12,9 @@
void kmemleak_load_module(const struct module *mod,
const struct load_info *info)
{
- unsigned int i;
-
- /* only scan the sections containing data */
- kmemleak_scan_area(mod, sizeof(struct module), GFP_KERNEL);
-
- for (i = 1; i < info->hdr->e_shnum; i++) {
- /* Scan all writable sections that's not executable */
- if (!(info->sechdrs[i].sh_flags & SHF_ALLOC) ||
- !(info->sechdrs[i].sh_flags & SHF_WRITE) ||
- (info->sechdrs[i].sh_flags & SHF_EXECINSTR))
- continue;
-
- kmemleak_scan_area((void *)info->sechdrs[i].sh_addr,
- info->sechdrs[i].sh_size, GFP_KERNEL);
+ /* only scan writable, non-executable sections */
+ for_each_mod_mem_type(type) {
+ if (type != MOD_DATA && type != MOD_INIT_DATA)
+ kmemleak_no_scan(mod->mem[type].base);
}
}
diff --git a/kernel/module/main.c b/kernel/module/main.c
index 71396e297499..49b9bca9de12 100644
--- a/kernel/module/main.c
+++ b/kernel/module/main.c
@@ -3234,7 +3234,7 @@ SYSCALL_DEFINE3(finit_module, int, fd, const char __user *, uargs, int, flags)
return -EINVAL;
f = fdget(fd);
- err = idempotent_init_module(f.file, uargs, flags);
+ err = idempotent_init_module(fd_file(f), uargs, flags);
fdput(f);
return err;
}
diff --git a/kernel/module/sysfs.c b/kernel/module/sysfs.c
index 26efe1305c12..456358e1fdc4 100644
--- a/kernel/module/sysfs.c
+++ b/kernel/module/sysfs.c
@@ -69,12 +69,13 @@ static void free_sect_attrs(struct module_sect_attrs *sect_attrs)
kfree(sect_attrs);
}
-static void add_sect_attrs(struct module *mod, const struct load_info *info)
+static int add_sect_attrs(struct module *mod, const struct load_info *info)
{
unsigned int nloaded = 0, i, size[2];
struct module_sect_attrs *sect_attrs;
struct module_sect_attr *sattr;
struct bin_attribute **gattr;
+ int ret;
/* Count loaded sections and allocate structures */
for (i = 0; i < info->hdr->e_shnum; i++)
@@ -85,7 +86,7 @@ static void add_sect_attrs(struct module *mod, const struct load_info *info)
size[1] = (nloaded + 1) * sizeof(sect_attrs->grp.bin_attrs[0]);
sect_attrs = kzalloc(size[0] + size[1], GFP_KERNEL);
if (!sect_attrs)
- return;
+ return -ENOMEM;
/* Setup section attributes. */
sect_attrs->grp.name = "sections";
@@ -103,8 +104,10 @@ static void add_sect_attrs(struct module *mod, const struct load_info *info)
sattr->address = sec->sh_addr;
sattr->battr.attr.name =
kstrdup(info->secstrings + sec->sh_name, GFP_KERNEL);
- if (!sattr->battr.attr.name)
+ if (!sattr->battr.attr.name) {
+ ret = -ENOMEM;
goto out;
+ }
sect_attrs->nsections++;
sattr->battr.read = module_sect_read;
sattr->battr.size = MODULE_SECT_READ_SIZE;
@@ -113,13 +116,15 @@ static void add_sect_attrs(struct module *mod, const struct load_info *info)
}
*gattr = NULL;
- if (sysfs_create_group(&mod->mkobj.kobj, &sect_attrs->grp))
+ ret = sysfs_create_group(&mod->mkobj.kobj, &sect_attrs->grp);
+ if (ret)
goto out;
mod->sect_attrs = sect_attrs;
- return;
+ return 0;
out:
free_sect_attrs(sect_attrs);
+ return ret;
}
static void remove_sect_attrs(struct module *mod)
@@ -158,15 +163,12 @@ static void free_notes_attrs(struct module_notes_attrs *notes_attrs,
kfree(notes_attrs);
}
-static void add_notes_attrs(struct module *mod, const struct load_info *info)
+static int add_notes_attrs(struct module *mod, const struct load_info *info)
{
unsigned int notes, loaded, i;
struct module_notes_attrs *notes_attrs;
struct bin_attribute *nattr;
-
- /* failed to create section attributes, so can't create notes */
- if (!mod->sect_attrs)
- return;
+ int ret;
/* Count notes sections and allocate structures. */
notes = 0;
@@ -176,12 +178,12 @@ static void add_notes_attrs(struct module *mod, const struct load_info *info)
++notes;
if (notes == 0)
- return;
+ return 0;
notes_attrs = kzalloc(struct_size(notes_attrs, attrs, notes),
GFP_KERNEL);
if (!notes_attrs)
- return;
+ return -ENOMEM;
notes_attrs->notes = notes;
nattr = &notes_attrs->attrs[0];
@@ -201,19 +203,23 @@ static void add_notes_attrs(struct module *mod, const struct load_info *info)
}
notes_attrs->dir = kobject_create_and_add("notes", &mod->mkobj.kobj);
- if (!notes_attrs->dir)
+ if (!notes_attrs->dir) {
+ ret = -ENOMEM;
goto out;
+ }
- for (i = 0; i < notes; ++i)
- if (sysfs_create_bin_file(notes_attrs->dir,
- &notes_attrs->attrs[i]))
+ for (i = 0; i < notes; ++i) {
+ ret = sysfs_create_bin_file(notes_attrs->dir, &notes_attrs->attrs[i]);
+ if (ret)
goto out;
+ }
mod->notes_attrs = notes_attrs;
- return;
+ return 0;
out:
free_notes_attrs(notes_attrs, i);
+ return ret;
}
static void remove_notes_attrs(struct module *mod)
@@ -223,9 +229,15 @@ static void remove_notes_attrs(struct module *mod)
}
#else /* !CONFIG_KALLSYMS */
-static inline void add_sect_attrs(struct module *mod, const struct load_info *info) { }
+static inline int add_sect_attrs(struct module *mod, const struct load_info *info)
+{
+ return 0;
+}
static inline void remove_sect_attrs(struct module *mod) { }
-static inline void add_notes_attrs(struct module *mod, const struct load_info *info) { }
+static inline int add_notes_attrs(struct module *mod, const struct load_info *info)
+{
+ return 0;
+}
static inline void remove_notes_attrs(struct module *mod) { }
#endif /* CONFIG_KALLSYMS */
@@ -385,11 +397,20 @@ int mod_sysfs_setup(struct module *mod,
if (err)
goto out_unreg_modinfo_attrs;
- add_sect_attrs(mod, info);
- add_notes_attrs(mod, info);
+ err = add_sect_attrs(mod, info);
+ if (err)
+ goto out_del_usage_links;
+
+ err = add_notes_attrs(mod, info);
+ if (err)
+ goto out_unreg_sect_attrs;
return 0;
+out_unreg_sect_attrs:
+ remove_sect_attrs(mod);
+out_del_usage_links:
+ del_usage_links(mod);
out_unreg_modinfo_attrs:
module_remove_modinfo_attrs(mod, -1);
out_unreg_param:
diff --git a/kernel/nsproxy.c b/kernel/nsproxy.c
index 6ec3deec68c2..dc952c3b05af 100644
--- a/kernel/nsproxy.c
+++ b/kernel/nsproxy.c
@@ -550,15 +550,15 @@ SYSCALL_DEFINE2(setns, int, fd, int, flags)
struct nsset nsset = {};
int err = 0;
- if (!f.file)
+ if (!fd_file(f))
return -EBADF;
- if (proc_ns_file(f.file)) {
- ns = get_proc_ns(file_inode(f.file));
+ if (proc_ns_file(fd_file(f))) {
+ ns = get_proc_ns(file_inode(fd_file(f)));
if (flags && (ns->ops->type != flags))
err = -EINVAL;
flags = ns->ops->type;
- } else if (!IS_ERR(pidfd_pid(f.file))) {
+ } else if (!IS_ERR(pidfd_pid(fd_file(f)))) {
err = check_setns_flags(flags);
} else {
err = -EINVAL;
@@ -570,10 +570,10 @@ SYSCALL_DEFINE2(setns, int, fd, int, flags)
if (err)
goto out;
- if (proc_ns_file(f.file))
+ if (proc_ns_file(fd_file(f)))
err = validate_ns(&nsset, ns);
else
- err = validate_nsset(&nsset, pidfd_pid(f.file));
+ err = validate_nsset(&nsset, pidfd_pid(fd_file(f)));
if (!err) {
commit_nsset(&nsset);
perf_event_namespaces(current);
diff --git a/kernel/numa.c b/kernel/numa.c
deleted file mode 100644
index 67ca6b8585c0..000000000000
--- a/kernel/numa.c
+++ /dev/null
@@ -1,26 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-or-later
-
-#include <linux/printk.h>
-#include <linux/numa.h>
-
-/* Stub functions: */
-
-#ifndef memory_add_physaddr_to_nid
-int memory_add_physaddr_to_nid(u64 start)
-{
- pr_info_once("Unknown online node for memory at 0x%llx, assuming node 0\n",
- start);
- return 0;
-}
-EXPORT_SYMBOL_GPL(memory_add_physaddr_to_nid);
-#endif
-
-#ifndef phys_to_target_node
-int phys_to_target_node(u64 start)
-{
- pr_info_once("Unknown target node for memory at 0x%llx, assuming node 0\n",
- start);
- return 0;
-}
-EXPORT_SYMBOL_GPL(phys_to_target_node);
-#endif
diff --git a/kernel/panic.c b/kernel/panic.c
index 753d12f4dc8f..fbc59b3b64d0 100644
--- a/kernel/panic.c
+++ b/kernel/panic.c
@@ -384,7 +384,7 @@ void panic(const char *fmt, ...)
panic_print_sys_info(false);
- kmsg_dump(KMSG_DUMP_PANIC);
+ kmsg_dump_desc(KMSG_DUMP_PANIC, buf);
/*
* If you doubt kdump always works fine in any situation,
diff --git a/kernel/pid.c b/kernel/pid.c
index da76ed1873f7..2715afb77eab 100644
--- a/kernel/pid.c
+++ b/kernel/pid.c
@@ -540,13 +540,13 @@ struct pid *pidfd_get_pid(unsigned int fd, unsigned int *flags)
struct pid *pid;
f = fdget(fd);
- if (!f.file)
+ if (!fd_file(f))
return ERR_PTR(-EBADF);
- pid = pidfd_pid(f.file);
+ pid = pidfd_pid(fd_file(f));
if (!IS_ERR(pid)) {
get_pid(pid);
- *flags = f.file->f_flags;
+ *flags = fd_file(f)->f_flags;
}
fdput(f);
@@ -755,10 +755,10 @@ SYSCALL_DEFINE3(pidfd_getfd, int, pidfd, int, fd,
return -EINVAL;
f = fdget(pidfd);
- if (!f.file)
+ if (!fd_file(f))
return -EBADF;
- pid = pidfd_pid(f.file);
+ pid = pidfd_pid(fd_file(f));
if (IS_ERR(pid))
ret = PTR_ERR(pid);
else
diff --git a/kernel/power/user.c b/kernel/power/user.c
index 3aa41ba22129..3f9e3efb9f6e 100644
--- a/kernel/power/user.c
+++ b/kernel/power/user.c
@@ -447,7 +447,6 @@ static const struct file_operations snapshot_fops = {
.release = snapshot_release,
.read = snapshot_read,
.write = snapshot_write,
- .llseek = no_llseek,
.unlocked_ioctl = snapshot_ioctl,
#ifdef CONFIG_COMPAT
.compat_ioctl = snapshot_compat_ioctl,
diff --git a/kernel/printk/printk.c b/kernel/printk/printk.c
index 71e4fe6f9b85..beb808f4c367 100644
--- a/kernel/printk/printk.c
+++ b/kernel/printk/printk.c
@@ -4668,16 +4668,21 @@ const char *kmsg_dump_reason_str(enum kmsg_dump_reason reason)
EXPORT_SYMBOL_GPL(kmsg_dump_reason_str);
/**
- * kmsg_dump - dump kernel log to kernel message dumpers.
+ * kmsg_dump_desc - dump kernel log to kernel message dumpers.
* @reason: the reason (oops, panic etc) for dumping
+ * @desc: a short string to describe what caused the panic or oops. Can be NULL
+ * if no additional description is available.
*
* Call each of the registered dumper's dump() callback, which can
* retrieve the kmsg records with kmsg_dump_get_line() or
* kmsg_dump_get_buffer().
*/
-void kmsg_dump(enum kmsg_dump_reason reason)
+void kmsg_dump_desc(enum kmsg_dump_reason reason, const char *desc)
{
struct kmsg_dumper *dumper;
+ struct kmsg_dump_detail detail = {
+ .reason = reason,
+ .description = desc};
rcu_read_lock();
list_for_each_entry_rcu(dumper, &dump_list, list) {
@@ -4695,7 +4700,7 @@ void kmsg_dump(enum kmsg_dump_reason reason)
continue;
/* invoke dumper which will iterate over records */
- dumper->dump(dumper, reason);
+ dumper->dump(dumper, &detail);
}
rcu_read_unlock();
}
diff --git a/kernel/relay.c b/kernel/relay.c
index a8e90e98bf2c..a8ae436dc77e 100644
--- a/kernel/relay.c
+++ b/kernel/relay.c
@@ -1079,7 +1079,6 @@ const struct file_operations relay_file_operations = {
.poll = relay_file_poll,
.mmap = relay_file_mmap,
.read = relay_file_read,
- .llseek = no_llseek,
.release = relay_file_release,
};
EXPORT_SYMBOL_GPL(relay_file_operations);
diff --git a/kernel/resource.c b/kernel/resource.c
index a83040fde236..b730bd28b422 100644
--- a/kernel/resource.c
+++ b/kernel/resource.c
@@ -450,8 +450,7 @@ int walk_system_ram_res_rev(u64 start, u64 end, void *arg,
/* re-alloc */
struct resource *rams_new;
- rams_new = kvrealloc(rams, rams_size * sizeof(struct resource),
- (rams_size + 16) * sizeof(struct resource),
+ rams_new = kvrealloc(rams, (rams_size + 16) * sizeof(struct resource),
GFP_KERNEL);
if (!rams_new)
goto out;
@@ -540,20 +539,62 @@ static int __region_intersects(struct resource *parent, resource_size_t start,
size_t size, unsigned long flags,
unsigned long desc)
{
- struct resource res;
+ resource_size_t ostart, oend;
int type = 0; int other = 0;
- struct resource *p;
+ struct resource *p, *dp;
+ bool is_type, covered;
+ struct resource res;
res.start = start;
res.end = start + size - 1;
for (p = parent->child; p ; p = p->sibling) {
- bool is_type = (((p->flags & flags) == flags) &&
- ((desc == IORES_DESC_NONE) ||
- (desc == p->desc)));
-
- if (resource_overlaps(p, &res))
- is_type ? type++ : other++;
+ if (!resource_overlaps(p, &res))
+ continue;
+ is_type = (p->flags & flags) == flags &&
+ (desc == IORES_DESC_NONE || desc == p->desc);
+ if (is_type) {
+ type++;
+ continue;
+ }
+ /*
+ * Continue to search in descendant resources as if the
+ * matched descendant resources cover some ranges of 'p'.
+ *
+ * |------------- "CXL Window 0" ------------|
+ * |-- "System RAM" --|
+ *
+ * will behave similar as the following fake resource
+ * tree when searching "System RAM".
+ *
+ * |-- "System RAM" --||-- "CXL Window 0a" --|
+ */
+ covered = false;
+ ostart = max(res.start, p->start);
+ oend = min(res.end, p->end);
+ for_each_resource(p, dp, false) {
+ if (!resource_overlaps(dp, &res))
+ continue;
+ is_type = (dp->flags & flags) == flags &&
+ (desc == IORES_DESC_NONE || desc == dp->desc);
+ if (is_type) {
+ type++;
+ /*
+ * Range from 'ostart' to 'dp->start'
+ * isn't covered by matched resource.
+ */
+ if (dp->start > ostart)
+ break;
+ if (dp->end >= oend) {
+ covered = true;
+ break;
+ }
+ /* Remove covered range */
+ ostart = max(ostart, dp->end + 1);
+ }
+ }
+ if (!covered)
+ other++;
}
if (type == 0)
@@ -1818,7 +1859,11 @@ EXPORT_SYMBOL(resource_list_free);
#ifdef CONFIG_GET_FREE_REGION
#define GFR_DESCENDING (1UL << 0)
#define GFR_REQUEST_REGION (1UL << 1)
-#define GFR_DEFAULT_ALIGN (1UL << PA_SECTION_SHIFT)
+#ifdef PA_SECTION_SHIFT
+#define GFR_DEFAULT_ALIGN (1UL << PA_SECTION_SHIFT)
+#else
+#define GFR_DEFAULT_ALIGN PAGE_SIZE
+#endif
static resource_size_t gfr_start(struct resource *base, resource_size_t size,
resource_size_t align, unsigned long flags)
@@ -1830,7 +1875,7 @@ static resource_size_t gfr_start(struct resource *base, resource_size_t size,
return end - size + 1;
}
- return ALIGN(base->start, align);
+ return ALIGN(max(base->start, align), align);
}
static bool gfr_continue(struct resource *base, resource_size_t addr,
@@ -2004,7 +2049,7 @@ struct resource *alloc_free_mem_region(struct resource *base,
return get_free_mem_region(NULL, base, size, align, name,
IORES_DESC_NONE, flags);
}
-EXPORT_SYMBOL_NS_GPL(alloc_free_mem_region, CXL);
+EXPORT_SYMBOL_GPL(alloc_free_mem_region);
#endif /* CONFIG_GET_FREE_REGION */
static int __init strict_iomem(char *str)
diff --git a/kernel/resource_kunit.c b/kernel/resource_kunit.c
index 0e509985a44a..42d2d8d20f5d 100644
--- a/kernel/resource_kunit.c
+++ b/kernel/resource_kunit.c
@@ -7,6 +7,8 @@
#include <linux/ioport.h>
#include <linux/kernel.h>
#include <linux/string.h>
+#include <linux/sizes.h>
+#include <linux/mm.h>
#define R0_START 0x0000
#define R0_END 0xffff
@@ -137,9 +139,150 @@ static void resource_test_intersection(struct kunit *test)
} while (++i < ARRAY_SIZE(results_for_intersection));
}
+/*
+ * The test resource tree for region_intersects() test:
+ *
+ * BASE-BASE+1M-1 : Test System RAM 0
+ * # hole 0 (BASE+1M-BASE+2M)
+ * BASE+2M-BASE+3M-1 : Test CXL Window 0
+ * BASE+3M-BASE+4M-1 : Test System RAM 1
+ * BASE+4M-BASE+7M-1 : Test CXL Window 1
+ * BASE+4M-BASE+5M-1 : Test System RAM 2
+ * BASE+4M+128K-BASE+4M+256K-1: Test Code
+ * BASE+5M-BASE+6M-1 : Test System RAM 3
+ */
+#define RES_TEST_RAM0_OFFSET 0
+#define RES_TEST_RAM0_SIZE SZ_1M
+#define RES_TEST_HOLE0_OFFSET (RES_TEST_RAM0_OFFSET + RES_TEST_RAM0_SIZE)
+#define RES_TEST_HOLE0_SIZE SZ_1M
+#define RES_TEST_WIN0_OFFSET (RES_TEST_HOLE0_OFFSET + RES_TEST_HOLE0_SIZE)
+#define RES_TEST_WIN0_SIZE SZ_1M
+#define RES_TEST_RAM1_OFFSET (RES_TEST_WIN0_OFFSET + RES_TEST_WIN0_SIZE)
+#define RES_TEST_RAM1_SIZE SZ_1M
+#define RES_TEST_WIN1_OFFSET (RES_TEST_RAM1_OFFSET + RES_TEST_RAM1_SIZE)
+#define RES_TEST_WIN1_SIZE (SZ_1M * 3)
+#define RES_TEST_RAM2_OFFSET RES_TEST_WIN1_OFFSET
+#define RES_TEST_RAM2_SIZE SZ_1M
+#define RES_TEST_CODE_OFFSET (RES_TEST_RAM2_OFFSET + SZ_128K)
+#define RES_TEST_CODE_SIZE SZ_128K
+#define RES_TEST_RAM3_OFFSET (RES_TEST_RAM2_OFFSET + RES_TEST_RAM2_SIZE)
+#define RES_TEST_RAM3_SIZE SZ_1M
+#define RES_TEST_TOTAL_SIZE ((RES_TEST_WIN1_OFFSET + RES_TEST_WIN1_SIZE))
+
+static void remove_free_resource(void *ctx)
+{
+ struct resource *res = (struct resource *)ctx;
+
+ remove_resource(res);
+ kfree(res);
+}
+
+static void resource_test_request_region(struct kunit *test, struct resource *parent,
+ resource_size_t start, resource_size_t size,
+ const char *name, unsigned long flags)
+{
+ struct resource *res;
+
+ res = __request_region(parent, start, size, name, flags);
+ KUNIT_ASSERT_NOT_NULL(test, res);
+ kunit_add_action_or_reset(test, remove_free_resource, res);
+}
+
+static void resource_test_insert_resource(struct kunit *test, struct resource *parent,
+ resource_size_t start, resource_size_t size,
+ const char *name, unsigned long flags)
+{
+ struct resource *res;
+
+ res = kzalloc(sizeof(*res), GFP_KERNEL);
+ KUNIT_ASSERT_NOT_NULL(test, res);
+
+ res->name = name;
+ res->start = start;
+ res->end = start + size - 1;
+ res->flags = flags;
+ if (insert_resource(parent, res)) {
+ kfree(res);
+ KUNIT_FAIL_AND_ABORT(test, "Fail to insert resource %pR\n", res);
+ }
+
+ kunit_add_action_or_reset(test, remove_free_resource, res);
+}
+
+static void resource_test_region_intersects(struct kunit *test)
+{
+ unsigned long flags = IORESOURCE_SYSTEM_RAM | IORESOURCE_BUSY;
+ struct resource *parent;
+ resource_size_t start;
+
+ /* Find an iomem_resource hole to hold test resources */
+ parent = alloc_free_mem_region(&iomem_resource, RES_TEST_TOTAL_SIZE, SZ_1M,
+ "test resources");
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, parent);
+ start = parent->start;
+ kunit_add_action_or_reset(test, remove_free_resource, parent);
+
+ resource_test_request_region(test, parent, start + RES_TEST_RAM0_OFFSET,
+ RES_TEST_RAM0_SIZE, "Test System RAM 0", flags);
+ resource_test_insert_resource(test, parent, start + RES_TEST_WIN0_OFFSET,
+ RES_TEST_WIN0_SIZE, "Test CXL Window 0",
+ IORESOURCE_MEM);
+ resource_test_request_region(test, parent, start + RES_TEST_RAM1_OFFSET,
+ RES_TEST_RAM1_SIZE, "Test System RAM 1", flags);
+ resource_test_insert_resource(test, parent, start + RES_TEST_WIN1_OFFSET,
+ RES_TEST_WIN1_SIZE, "Test CXL Window 1",
+ IORESOURCE_MEM);
+ resource_test_request_region(test, parent, start + RES_TEST_RAM2_OFFSET,
+ RES_TEST_RAM2_SIZE, "Test System RAM 2", flags);
+ resource_test_insert_resource(test, parent, start + RES_TEST_CODE_OFFSET,
+ RES_TEST_CODE_SIZE, "Test Code", flags);
+ resource_test_request_region(test, parent, start + RES_TEST_RAM3_OFFSET,
+ RES_TEST_RAM3_SIZE, "Test System RAM 3", flags);
+ kunit_release_action(test, remove_free_resource, parent);
+
+ KUNIT_EXPECT_EQ(test, REGION_INTERSECTS,
+ region_intersects(start + RES_TEST_RAM0_OFFSET, PAGE_SIZE,
+ IORESOURCE_SYSTEM_RAM, IORES_DESC_NONE));
+ KUNIT_EXPECT_EQ(test, REGION_INTERSECTS,
+ region_intersects(start + RES_TEST_RAM0_OFFSET +
+ RES_TEST_RAM0_SIZE - PAGE_SIZE, 2 * PAGE_SIZE,
+ IORESOURCE_SYSTEM_RAM, IORES_DESC_NONE));
+ KUNIT_EXPECT_EQ(test, REGION_DISJOINT,
+ region_intersects(start + RES_TEST_HOLE0_OFFSET, PAGE_SIZE,
+ IORESOURCE_SYSTEM_RAM, IORES_DESC_NONE));
+ KUNIT_EXPECT_EQ(test, REGION_DISJOINT,
+ region_intersects(start + RES_TEST_HOLE0_OFFSET +
+ RES_TEST_HOLE0_SIZE - PAGE_SIZE, 2 * PAGE_SIZE,
+ IORESOURCE_SYSTEM_RAM, IORES_DESC_NONE));
+ KUNIT_EXPECT_EQ(test, REGION_MIXED,
+ region_intersects(start + RES_TEST_WIN0_OFFSET +
+ RES_TEST_WIN0_SIZE - PAGE_SIZE, 2 * PAGE_SIZE,
+ IORESOURCE_SYSTEM_RAM, IORES_DESC_NONE));
+ KUNIT_EXPECT_EQ(test, REGION_INTERSECTS,
+ region_intersects(start + RES_TEST_RAM1_OFFSET +
+ RES_TEST_RAM1_SIZE - PAGE_SIZE, 2 * PAGE_SIZE,
+ IORESOURCE_SYSTEM_RAM, IORES_DESC_NONE));
+ KUNIT_EXPECT_EQ(test, REGION_INTERSECTS,
+ region_intersects(start + RES_TEST_RAM2_OFFSET +
+ RES_TEST_RAM2_SIZE - PAGE_SIZE, 2 * PAGE_SIZE,
+ IORESOURCE_SYSTEM_RAM, IORES_DESC_NONE));
+ KUNIT_EXPECT_EQ(test, REGION_INTERSECTS,
+ region_intersects(start + RES_TEST_CODE_OFFSET, PAGE_SIZE,
+ IORESOURCE_SYSTEM_RAM, IORES_DESC_NONE));
+ KUNIT_EXPECT_EQ(test, REGION_INTERSECTS,
+ region_intersects(start + RES_TEST_RAM2_OFFSET,
+ RES_TEST_RAM2_SIZE + PAGE_SIZE,
+ IORESOURCE_SYSTEM_RAM, IORES_DESC_NONE));
+ KUNIT_EXPECT_EQ(test, REGION_MIXED,
+ region_intersects(start + RES_TEST_RAM3_OFFSET,
+ RES_TEST_RAM3_SIZE + PAGE_SIZE,
+ IORESOURCE_SYSTEM_RAM, IORES_DESC_NONE));
+}
+
static struct kunit_case resource_test_cases[] = {
KUNIT_CASE(resource_test_union),
KUNIT_CASE(resource_test_intersection),
+ KUNIT_CASE(resource_test_region_intersects),
{}
};
diff --git a/kernel/sched/build_policy.c b/kernel/sched/build_policy.c
index 39c315182b35..fae1f5c921eb 100644
--- a/kernel/sched/build_policy.c
+++ b/kernel/sched/build_policy.c
@@ -16,18 +16,25 @@
#include <linux/sched/clock.h>
#include <linux/sched/cputime.h>
#include <linux/sched/hotplug.h>
+#include <linux/sched/isolation.h>
#include <linux/sched/posix-timers.h>
#include <linux/sched/rt.h>
#include <linux/cpuidle.h>
#include <linux/jiffies.h>
+#include <linux/kobject.h>
#include <linux/livepatch.h>
+#include <linux/pm.h>
#include <linux/psi.h>
+#include <linux/rhashtable.h>
+#include <linux/seq_buf.h>
#include <linux/seqlock_api.h>
#include <linux/slab.h>
#include <linux/suspend.h>
#include <linux/tsacct_kern.h>
#include <linux/vtime.h>
+#include <linux/sysrq.h>
+#include <linux/percpu-rwsem.h>
#include <uapi/linux/sched/types.h>
@@ -52,4 +59,8 @@
#include "cputime.c"
#include "deadline.c"
+#ifdef CONFIG_SCHED_CLASS_EXT
+# include "ext.c"
+#endif
+
#include "syscalls.c"
diff --git a/kernel/sched/core.c b/kernel/sched/core.c
index 1d7f5941bcdc..43e453ab7e20 100644
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
@@ -163,13 +163,19 @@ static inline int __task_prio(const struct task_struct *p)
if (p->sched_class == &stop_sched_class) /* trumps deadline */
return -2;
- if (rt_prio(p->prio)) /* includes deadline */
+ if (p->dl_server)
+ return -1; /* deadline */
+
+ if (rt_or_dl_prio(p->prio))
return p->prio; /* [-1, 99] */
if (p->sched_class == &idle_sched_class)
return MAX_RT_PRIO + NICE_WIDTH; /* 140 */
- return MAX_RT_PRIO + MAX_NICE; /* 120, squash fair */
+ if (task_on_scx(p))
+ return MAX_RT_PRIO + MAX_NICE + 1; /* 120, squash ext */
+
+ return MAX_RT_PRIO + MAX_NICE; /* 119, squash fair */
}
/*
@@ -192,12 +198,33 @@ static inline bool prio_less(const struct task_struct *a,
if (-pb < -pa)
return false;
- if (pa == -1) /* dl_prio() doesn't work because of stop_class above */
- return !dl_time_before(a->dl.deadline, b->dl.deadline);
+ if (pa == -1) { /* dl_prio() doesn't work because of stop_class above */
+ const struct sched_dl_entity *a_dl, *b_dl;
+
+ a_dl = &a->dl;
+ /*
+ * Since,'a' and 'b' can be CFS tasks served by DL server,
+ * __task_prio() can return -1 (for DL) even for those. In that
+ * case, get to the dl_server's DL entity.
+ */
+ if (a->dl_server)
+ a_dl = a->dl_server;
+
+ b_dl = &b->dl;
+ if (b->dl_server)
+ b_dl = b->dl_server;
+
+ return !dl_time_before(a_dl->deadline, b_dl->deadline);
+ }
if (pa == MAX_RT_PRIO + MAX_NICE) /* fair */
return cfs_prio_less(a, b, in_fi);
+#ifdef CONFIG_SCHED_CLASS_EXT
+ if (pa == MAX_RT_PRIO + MAX_NICE + 1) /* ext */
+ return scx_prio_less(a, b, in_fi);
+#endif
+
return false;
}
@@ -240,6 +267,9 @@ static inline int rb_sched_core_cmp(const void *key, const struct rb_node *node)
void sched_core_enqueue(struct rq *rq, struct task_struct *p)
{
+ if (p->se.sched_delayed)
+ return;
+
rq->core->core_task_seq++;
if (!p->core_cookie)
@@ -250,6 +280,9 @@ void sched_core_enqueue(struct rq *rq, struct task_struct *p)
void sched_core_dequeue(struct rq *rq, struct task_struct *p, int flags)
{
+ if (p->se.sched_delayed)
+ return;
+
rq->core->core_task_seq++;
if (sched_core_enqueued(p)) {
@@ -1255,11 +1288,14 @@ bool sched_can_stop_tick(struct rq *rq)
return true;
/*
- * If there are no DL,RR/FIFO tasks, there must only be CFS tasks left;
- * if there's more than one we need the tick for involuntary
- * preemption.
+ * If there are no DL,RR/FIFO tasks, there must only be CFS or SCX tasks
+ * left. For CFS, if there's more than one we need the tick for
+ * involuntary preemption. For SCX, ask.
*/
- if (rq->nr_running > 1)
+ if (scx_enabled() && !scx_can_stop_tick(rq))
+ return false;
+
+ if (rq->cfs.nr_running > 1)
return false;
/*
@@ -1269,7 +1305,7 @@ bool sched_can_stop_tick(struct rq *rq)
* dequeued by migrating while the constrained task continues to run.
* E.g. going from 2->1 without going through pick_next_task().
*/
- if (sched_feat(HZ_BW) && __need_bw_check(rq, rq->curr)) {
+ if (__need_bw_check(rq, rq->curr)) {
if (cfs_task_bw_constrained(rq->curr))
return false;
}
@@ -1341,8 +1377,8 @@ void set_load_weight(struct task_struct *p, bool update_load)
* SCHED_OTHER tasks have to update their load when changing their
* weight
*/
- if (update_load && p->sched_class == &fair_sched_class)
- reweight_task(p, &lw);
+ if (update_load && p->sched_class->reweight_task)
+ p->sched_class->reweight_task(task_rq(p), p, &lw);
else
p->se.load = lw;
}
@@ -1672,6 +1708,9 @@ static inline void uclamp_rq_inc(struct rq *rq, struct task_struct *p)
if (unlikely(!p->sched_class->uclamp_enabled))
return;
+ if (p->se.sched_delayed)
+ return;
+
for_each_clamp_id(clamp_id)
uclamp_rq_inc_id(rq, p, clamp_id);
@@ -1696,6 +1735,9 @@ static inline void uclamp_rq_dec(struct rq *rq, struct task_struct *p)
if (unlikely(!p->sched_class->uclamp_enabled))
return;
+ if (p->se.sched_delayed)
+ return;
+
for_each_clamp_id(clamp_id)
uclamp_rq_dec_id(rq, p, clamp_id);
}
@@ -1975,14 +2017,21 @@ void enqueue_task(struct rq *rq, struct task_struct *p, int flags)
psi_enqueue(p, (flags & ENQUEUE_WAKEUP) && !(flags & ENQUEUE_MIGRATED));
}
- uclamp_rq_inc(rq, p);
p->sched_class->enqueue_task(rq, p, flags);
+ /*
+ * Must be after ->enqueue_task() because ENQUEUE_DELAYED can clear
+ * ->sched_delayed.
+ */
+ uclamp_rq_inc(rq, p);
if (sched_core_enabled(rq))
sched_core_enqueue(rq, p);
}
-void dequeue_task(struct rq *rq, struct task_struct *p, int flags)
+/*
+ * Must only return false when DEQUEUE_SLEEP.
+ */
+inline bool dequeue_task(struct rq *rq, struct task_struct *p, int flags)
{
if (sched_core_enabled(rq))
sched_core_dequeue(rq, p, flags);
@@ -1995,8 +2044,12 @@ void dequeue_task(struct rq *rq, struct task_struct *p, int flags)
psi_dequeue(p, flags & DEQUEUE_SLEEP);
}
+ /*
+ * Must be before ->dequeue_task() because ->dequeue_task() can 'fail'
+ * and mark the task ->sched_delayed.
+ */
uclamp_rq_dec(rq, p);
- p->sched_class->dequeue_task(rq, p, flags);
+ return p->sched_class->dequeue_task(rq, p, flags);
}
void activate_task(struct rq *rq, struct task_struct *p, int flags)
@@ -2014,12 +2067,25 @@ void activate_task(struct rq *rq, struct task_struct *p, int flags)
void deactivate_task(struct rq *rq, struct task_struct *p, int flags)
{
- WRITE_ONCE(p->on_rq, (flags & DEQUEUE_SLEEP) ? 0 : TASK_ON_RQ_MIGRATING);
+ SCHED_WARN_ON(flags & DEQUEUE_SLEEP);
+
+ WRITE_ONCE(p->on_rq, TASK_ON_RQ_MIGRATING);
ASSERT_EXCLUSIVE_WRITER(p->on_rq);
+ /*
+ * Code explicitly relies on TASK_ON_RQ_MIGRATING begin set *before*
+ * dequeue_task() and cleared *after* enqueue_task().
+ */
+
dequeue_task(rq, p, flags);
}
+static void block_task(struct rq *rq, struct task_struct *p, int flags)
+{
+ if (dequeue_task(rq, p, DEQUEUE_SLEEP | flags))
+ __block_task(rq, p);
+}
+
/**
* task_curr - is this task currently executing on a CPU?
* @p: the task in question.
@@ -2032,6 +2098,17 @@ inline int task_curr(const struct task_struct *p)
}
/*
+ * ->switching_to() is called with the pi_lock and rq_lock held and must not
+ * mess with locking.
+ */
+void check_class_changing(struct rq *rq, struct task_struct *p,
+ const struct sched_class *prev_class)
+{
+ if (prev_class != p->sched_class && p->sched_class->switching_to)
+ p->sched_class->switching_to(rq, p);
+}
+
+/*
* switched_from, switched_to and prio_changed must _NOT_ drop rq->lock,
* use the balance_callback list if you want balancing.
*
@@ -2233,6 +2310,12 @@ void migrate_disable(void)
struct task_struct *p = current;
if (p->migration_disabled) {
+#ifdef CONFIG_DEBUG_PREEMPT
+ /*
+ *Warn about overflow half-way through the range.
+ */
+ WARN_ON_ONCE((s16)p->migration_disabled < 0);
+#endif
p->migration_disabled++;
return;
}
@@ -2251,14 +2334,20 @@ void migrate_enable(void)
.flags = SCA_MIGRATE_ENABLE,
};
+#ifdef CONFIG_DEBUG_PREEMPT
+ /*
+ * Check both overflow from migrate_disable() and superfluous
+ * migrate_enable().
+ */
+ if (WARN_ON_ONCE((s16)p->migration_disabled <= 0))
+ return;
+#endif
+
if (p->migration_disabled > 1) {
p->migration_disabled--;
return;
}
- if (WARN_ON_ONCE(!p->migration_disabled))
- return;
-
/*
* Ensure stop_task runs either before or after this, and that
* __set_cpus_allowed_ptr(SCA_MIGRATE_ENABLE) doesn't schedule().
@@ -2289,7 +2378,7 @@ static inline bool rq_has_pinned_tasks(struct rq *rq)
static inline bool is_cpu_allowed(struct task_struct *p, int cpu)
{
/* When not in the task's cpumask, no point in looking further. */
- if (!cpumask_test_cpu(cpu, p->cpus_ptr))
+ if (!task_allowed_on_cpu(p, cpu))
return false;
/* migrate_disabled() must be allowed to finish. */
@@ -2298,7 +2387,7 @@ static inline bool is_cpu_allowed(struct task_struct *p, int cpu)
/* Non kernel threads are not allowed during either online or offline. */
if (!(p->flags & PF_KTHREAD))
- return cpu_active(cpu) && task_cpu_possible(cpu, p);
+ return cpu_active(cpu);
/* KTHREAD_IS_PER_CPU is always allowed. */
if (kthread_is_per_cpu(p))
@@ -3607,8 +3696,6 @@ ttwu_do_activate(struct rq *rq, struct task_struct *p, int wake_flags,
rq->idle_stamp = 0;
}
#endif
-
- p->dl_server = NULL;
}
/*
@@ -3644,12 +3731,14 @@ static int ttwu_runnable(struct task_struct *p, int wake_flags)
rq = __task_rq_lock(p, &rf);
if (task_on_rq_queued(p)) {
+ update_rq_clock(rq);
+ if (p->se.sched_delayed)
+ enqueue_task(rq, p, ENQUEUE_NOCLOCK | ENQUEUE_DELAYED);
if (!task_on_cpu(rq, p)) {
/*
* When on_rq && !on_cpu the task is preempted, see if
* it should preempt the task that is current now.
*/
- update_rq_clock(rq);
wakeup_preempt(rq, p, wake_flags);
}
ttwu_do_wakeup(p);
@@ -3776,6 +3865,15 @@ bool cpus_share_resources(int this_cpu, int that_cpu)
static inline bool ttwu_queue_cond(struct task_struct *p, int cpu)
{
/*
+ * The BPF scheduler may depend on select_task_rq() being invoked during
+ * wakeups. In addition, @p may end up executing on a different CPU
+ * regardless of what happens in the wakeup path making the ttwu_queue
+ * optimization less meaningful. Skip if on SCX.
+ */
+ if (task_on_scx(p))
+ return false;
+
+ /*
* Do not complicate things with the async wake_list while the CPU is
* in hotplug state.
*/
@@ -4029,11 +4127,16 @@ int try_to_wake_up(struct task_struct *p, unsigned int state, int wake_flags)
* case the whole 'p->on_rq && ttwu_runnable()' case below
* without taking any locks.
*
+ * Specifically, given current runs ttwu() we must be before
+ * schedule()'s block_task(), as such this must not observe
+ * sched_delayed.
+ *
* In particular:
* - we rely on Program-Order guarantees for all the ordering,
* - we're serialized against set_special_state() by virtue of
* it disabling IRQs (this allows not taking ->pi_lock).
*/
+ SCHED_WARN_ON(p->se.sched_delayed);
if (!ttwu_state_match(p, state, &success))
goto out;
@@ -4322,9 +4425,11 @@ static void __sched_fork(unsigned long clone_flags, struct task_struct *p)
p->se.nr_migrations = 0;
p->se.vruntime = 0;
p->se.vlag = 0;
- p->se.slice = sysctl_sched_base_slice;
INIT_LIST_HEAD(&p->se.group_node);
+ /* A delayed task cannot be in clone(). */
+ SCHED_WARN_ON(p->se.sched_delayed);
+
#ifdef CONFIG_FAIR_GROUP_SCHED
p->se.cfs_rq = NULL;
#endif
@@ -4342,6 +4447,10 @@ static void __sched_fork(unsigned long clone_flags, struct task_struct *p)
p->rt.on_rq = 0;
p->rt.on_list = 0;
+#ifdef CONFIG_SCHED_CLASS_EXT
+ init_scx_entity(&p->scx);
+#endif
+
#ifdef CONFIG_PREEMPT_NOTIFIERS
INIT_HLIST_HEAD(&p->preempt_notifiers);
#endif
@@ -4572,6 +4681,8 @@ int sched_fork(unsigned long clone_flags, struct task_struct *p)
p->prio = p->normal_prio = p->static_prio;
set_load_weight(p, false);
+ p->se.custom_slice = 0;
+ p->se.slice = sysctl_sched_base_slice;
/*
* We don't need the reset flag anymore after the fork. It has
@@ -4582,10 +4693,18 @@ int sched_fork(unsigned long clone_flags, struct task_struct *p)
if (dl_prio(p->prio))
return -EAGAIN;
- else if (rt_prio(p->prio))
+
+ scx_pre_fork(p);
+
+ if (rt_prio(p->prio)) {
p->sched_class = &rt_sched_class;
- else
+#ifdef CONFIG_SCHED_CLASS_EXT
+ } else if (task_should_scx(p)) {
+ p->sched_class = &ext_sched_class;
+#endif
+ } else {
p->sched_class = &fair_sched_class;
+ }
init_entity_runnable_average(&p->se);
@@ -4605,7 +4724,7 @@ int sched_fork(unsigned long clone_flags, struct task_struct *p)
return 0;
}
-void sched_cgroup_fork(struct task_struct *p, struct kernel_clone_args *kargs)
+int sched_cgroup_fork(struct task_struct *p, struct kernel_clone_args *kargs)
{
unsigned long flags;
@@ -4632,11 +4751,19 @@ void sched_cgroup_fork(struct task_struct *p, struct kernel_clone_args *kargs)
if (p->sched_class->task_fork)
p->sched_class->task_fork(p);
raw_spin_unlock_irqrestore(&p->pi_lock, flags);
+
+ return scx_fork(p);
+}
+
+void sched_cancel_fork(struct task_struct *p)
+{
+ scx_cancel_fork(p);
}
void sched_post_fork(struct task_struct *p)
{
uclamp_post_fork(p);
+ scx_post_fork(p);
}
unsigned long to_ratio(u64 period, u64 runtime)
@@ -4686,7 +4813,7 @@ void wake_up_new_task(struct task_struct *p)
update_rq_clock(rq);
post_init_entity_util_avg(p);
- activate_task(rq, p, ENQUEUE_NOCLOCK);
+ activate_task(rq, p, ENQUEUE_NOCLOCK | ENQUEUE_INITIAL);
trace_sched_wakeup_new(p);
wakeup_preempt(rq, p, WF_FORK);
#ifdef CONFIG_SMP
@@ -5469,6 +5596,7 @@ void sched_tick(void)
calc_global_load_tick(rq);
sched_core_tick(rq);
task_tick_mm_cid(rq, curr);
+ scx_tick(rq);
rq_unlock(rq, &rf);
@@ -5481,8 +5609,10 @@ void sched_tick(void)
wq_worker_tick(curr);
#ifdef CONFIG_SMP
- rq->idle_balance = idle_cpu(cpu);
- sched_balance_trigger(rq);
+ if (!scx_switched_all()) {
+ rq->idle_balance = idle_cpu(cpu);
+ sched_balance_trigger(rq);
+ }
#endif
}
@@ -5769,11 +5899,22 @@ static inline void schedule_debug(struct task_struct *prev, bool preempt)
schedstat_inc(this_rq()->sched_count);
}
-static void put_prev_task_balance(struct rq *rq, struct task_struct *prev,
- struct rq_flags *rf)
+static void prev_balance(struct rq *rq, struct task_struct *prev,
+ struct rq_flags *rf)
{
-#ifdef CONFIG_SMP
+ const struct sched_class *start_class = prev->sched_class;
const struct sched_class *class;
+
+#ifdef CONFIG_SCHED_CLASS_EXT
+ /*
+ * SCX requires a balance() call before every pick_next_task() including
+ * when waking up from SCHED_IDLE. If @start_class is below SCX, start
+ * from SCX instead.
+ */
+ if (scx_enabled() && sched_class_above(&ext_sched_class, start_class))
+ start_class = &ext_sched_class;
+#endif
+
/*
* We must do the balancing pass before put_prev_task(), such
* that when we release the rq->lock the task is in the same
@@ -5782,13 +5923,10 @@ static void put_prev_task_balance(struct rq *rq, struct task_struct *prev,
* We can terminate the balance pass as soon as we know there is
* a runnable task of @class priority or higher.
*/
- for_class_range(class, prev->sched_class, &idle_sched_class) {
- if (class->balance(rq, prev, rf))
+ for_active_class_range(class, start_class, &idle_sched_class) {
+ if (class->balance && class->balance(rq, prev, rf))
break;
}
-#endif
-
- put_prev_task(rq, prev);
}
/*
@@ -5800,6 +5938,11 @@ __pick_next_task(struct rq *rq, struct task_struct *prev, struct rq_flags *rf)
const struct sched_class *class;
struct task_struct *p;
+ rq->dl_server = NULL;
+
+ if (scx_enabled())
+ goto restart;
+
/*
* Optimization: we know that if all tasks are in the fair class we can
* call that function directly, but only if the @prev task wasn't of a
@@ -5815,35 +5958,28 @@ __pick_next_task(struct rq *rq, struct task_struct *prev, struct rq_flags *rf)
/* Assume the next prioritized class is idle_sched_class */
if (!p) {
- put_prev_task(rq, prev);
- p = pick_next_task_idle(rq);
+ p = pick_task_idle(rq);
+ put_prev_set_next_task(rq, prev, p);
}
- /*
- * This is the fast path; it cannot be a DL server pick;
- * therefore even if @p == @prev, ->dl_server must be NULL.
- */
- if (p->dl_server)
- p->dl_server = NULL;
-
return p;
}
restart:
- put_prev_task_balance(rq, prev, rf);
-
- /*
- * We've updated @prev and no longer need the server link, clear it.
- * Must be done before ->pick_next_task() because that can (re)set
- * ->dl_server.
- */
- if (prev->dl_server)
- prev->dl_server = NULL;
+ prev_balance(rq, prev, rf);
- for_each_class(class) {
- p = class->pick_next_task(rq);
- if (p)
- return p;
+ for_each_active_class(class) {
+ if (class->pick_next_task) {
+ p = class->pick_next_task(rq, prev);
+ if (p)
+ return p;
+ } else {
+ p = class->pick_task(rq);
+ if (p) {
+ put_prev_set_next_task(rq, prev, p);
+ return p;
+ }
+ }
}
BUG(); /* The idle class should always have a runnable task. */
@@ -5873,7 +6009,9 @@ static inline struct task_struct *pick_task(struct rq *rq)
const struct sched_class *class;
struct task_struct *p;
- for_each_class(class) {
+ rq->dl_server = NULL;
+
+ for_each_active_class(class) {
p = class->pick_task(rq);
if (p)
return p;
@@ -5911,6 +6049,7 @@ pick_next_task(struct rq *rq, struct task_struct *prev, struct rq_flags *rf)
* another cpu during offline.
*/
rq->core_pick = NULL;
+ rq->core_dl_server = NULL;
return __pick_next_task(rq, prev, rf);
}
@@ -5929,16 +6068,13 @@ pick_next_task(struct rq *rq, struct task_struct *prev, struct rq_flags *rf)
WRITE_ONCE(rq->core_sched_seq, rq->core->core_pick_seq);
next = rq->core_pick;
- if (next != prev) {
- put_prev_task(rq, prev);
- set_next_task(rq, next);
- }
-
+ rq->dl_server = rq->core_dl_server;
rq->core_pick = NULL;
- goto out;
+ rq->core_dl_server = NULL;
+ goto out_set_next;
}
- put_prev_task_balance(rq, prev, rf);
+ prev_balance(rq, prev, rf);
smt_mask = cpu_smt_mask(cpu);
need_sync = !!rq->core->core_cookie;
@@ -5979,6 +6115,7 @@ pick_next_task(struct rq *rq, struct task_struct *prev, struct rq_flags *rf)
next = pick_task(rq);
if (!next->core_cookie) {
rq->core_pick = NULL;
+ rq->core_dl_server = NULL;
/*
* For robustness, update the min_vruntime_fi for
* unconstrained picks as well.
@@ -6006,7 +6143,9 @@ pick_next_task(struct rq *rq, struct task_struct *prev, struct rq_flags *rf)
if (i != cpu && (rq_i != rq->core || !core_clock_updated))
update_rq_clock(rq_i);
- p = rq_i->core_pick = pick_task(rq_i);
+ rq_i->core_pick = p = pick_task(rq_i);
+ rq_i->core_dl_server = rq_i->dl_server;
+
if (!max || prio_less(max, p, fi_before))
max = p;
}
@@ -6030,6 +6169,7 @@ pick_next_task(struct rq *rq, struct task_struct *prev, struct rq_flags *rf)
}
rq_i->core_pick = p;
+ rq_i->core_dl_server = NULL;
if (p == rq_i->idle) {
if (rq_i->nr_running) {
@@ -6090,6 +6230,7 @@ pick_next_task(struct rq *rq, struct task_struct *prev, struct rq_flags *rf)
if (i == cpu) {
rq_i->core_pick = NULL;
+ rq_i->core_dl_server = NULL;
continue;
}
@@ -6098,6 +6239,7 @@ pick_next_task(struct rq *rq, struct task_struct *prev, struct rq_flags *rf)
if (rq_i->curr == rq_i->core_pick) {
rq_i->core_pick = NULL;
+ rq_i->core_dl_server = NULL;
continue;
}
@@ -6105,8 +6247,7 @@ pick_next_task(struct rq *rq, struct task_struct *prev, struct rq_flags *rf)
}
out_set_next:
- set_next_task(rq, next);
-out:
+ put_prev_set_next_task(rq, prev, next);
if (rq->core->core_forceidle_count && next == rq->idle)
queue_core_balance(rq);
@@ -6342,19 +6483,12 @@ pick_next_task(struct rq *rq, struct task_struct *prev, struct rq_flags *rf)
* Constants for the sched_mode argument of __schedule().
*
* The mode argument allows RT enabled kernels to differentiate a
- * preemption from blocking on an 'sleeping' spin/rwlock. Note that
- * SM_MASK_PREEMPT for !RT has all bits set, which allows the compiler to
- * optimize the AND operation out and just check for zero.
+ * preemption from blocking on an 'sleeping' spin/rwlock.
*/
-#define SM_NONE 0x0
-#define SM_PREEMPT 0x1
-#define SM_RTLOCK_WAIT 0x2
-
-#ifndef CONFIG_PREEMPT_RT
-# define SM_MASK_PREEMPT (~0U)
-#else
-# define SM_MASK_PREEMPT SM_PREEMPT
-#endif
+#define SM_IDLE (-1)
+#define SM_NONE 0
+#define SM_PREEMPT 1
+#define SM_RTLOCK_WAIT 2
/*
* __schedule() is the main scheduler function.
@@ -6395,9 +6529,14 @@ pick_next_task(struct rq *rq, struct task_struct *prev, struct rq_flags *rf)
*
* WARNING: must be called with preemption disabled!
*/
-static void __sched notrace __schedule(unsigned int sched_mode)
+static void __sched notrace __schedule(int sched_mode)
{
struct task_struct *prev, *next;
+ /*
+ * On PREEMPT_RT kernel, SM_RTLOCK_WAIT is noted
+ * as a preemption by schedule_debug() and RCU.
+ */
+ bool preempt = sched_mode > SM_NONE;
unsigned long *switch_count;
unsigned long prev_state;
struct rq_flags rf;
@@ -6408,13 +6547,13 @@ static void __sched notrace __schedule(unsigned int sched_mode)
rq = cpu_rq(cpu);
prev = rq->curr;
- schedule_debug(prev, !!sched_mode);
+ schedule_debug(prev, preempt);
if (sched_feat(HRTICK) || sched_feat(HRTICK_DL))
hrtick_clear(rq);
local_irq_disable();
- rcu_note_context_switch(!!sched_mode);
+ rcu_note_context_switch(preempt);
/*
* Make sure that signal_pending_state()->signal_pending() below
@@ -6443,22 +6582,33 @@ static void __sched notrace __schedule(unsigned int sched_mode)
switch_count = &prev->nivcsw;
+ /* Task state changes only considers SM_PREEMPT as preemption */
+ preempt = sched_mode == SM_PREEMPT;
+
/*
* We must load prev->state once (task_struct::state is volatile), such
* that we form a control dependency vs deactivate_task() below.
*/
prev_state = READ_ONCE(prev->__state);
- if (!(sched_mode & SM_MASK_PREEMPT) && prev_state) {
+ if (sched_mode == SM_IDLE) {
+ /* SCX must consult the BPF scheduler to tell if rq is empty */
+ if (!rq->nr_running && !scx_enabled()) {
+ next = prev;
+ goto picked;
+ }
+ } else if (!preempt && prev_state) {
if (signal_pending_state(prev_state, prev)) {
WRITE_ONCE(prev->__state, TASK_RUNNING);
} else {
+ int flags = DEQUEUE_NOCLOCK;
+
prev->sched_contributes_to_load =
(prev_state & TASK_UNINTERRUPTIBLE) &&
!(prev_state & TASK_NOLOAD) &&
!(prev_state & TASK_FROZEN);
- if (prev->sched_contributes_to_load)
- rq->nr_uninterruptible++;
+ if (unlikely(is_special_task_state(prev_state)))
+ flags |= DEQUEUE_SPECIAL;
/*
* __schedule() ttwu()
@@ -6471,17 +6621,13 @@ static void __sched notrace __schedule(unsigned int sched_mode)
*
* After this, schedule() must not care about p->state any more.
*/
- deactivate_task(rq, prev, DEQUEUE_SLEEP | DEQUEUE_NOCLOCK);
-
- if (prev->in_iowait) {
- atomic_inc(&rq->nr_iowait);
- delayacct_blkio_start();
- }
+ block_task(rq, prev, flags);
}
switch_count = &prev->nvcsw;
}
next = pick_next_task(rq, prev, &rf);
+picked:
clear_tsk_need_resched(prev);
clear_preempt_need_resched();
#ifdef CONFIG_SCHED_DEBUG
@@ -6523,7 +6669,7 @@ static void __sched notrace __schedule(unsigned int sched_mode)
psi_account_irqtime(rq, prev, next);
psi_sched_switch(prev, next, !task_on_rq_queued(prev));
- trace_sched_switch(sched_mode & SM_MASK_PREEMPT, prev, next, prev_state);
+ trace_sched_switch(preempt, prev, next, prev_state);
/* Also unlocks the rq: */
rq = context_switch(rq, prev, next, &rf);
@@ -6599,7 +6745,7 @@ static void sched_update_worker(struct task_struct *tsk)
}
}
-static __always_inline void __schedule_loop(unsigned int sched_mode)
+static __always_inline void __schedule_loop(int sched_mode)
{
do {
preempt_disable();
@@ -6644,7 +6790,7 @@ void __sched schedule_idle(void)
*/
WARN_ON_ONCE(current->__state);
do {
- __schedule(SM_NONE);
+ __schedule(SM_IDLE);
} while (need_resched());
}
@@ -6870,6 +7016,10 @@ void __setscheduler_prio(struct task_struct *p, int prio)
p->sched_class = &dl_sched_class;
else if (rt_prio(prio))
p->sched_class = &rt_sched_class;
+#ifdef CONFIG_SCHED_CLASS_EXT
+ else if (task_should_scx(p))
+ p->sched_class = &ext_sched_class;
+#endif
else
p->sched_class = &fair_sched_class;
@@ -7015,6 +7165,7 @@ void rt_mutex_setprio(struct task_struct *p, struct task_struct *pi_task)
}
__setscheduler_prio(p, prio);
+ check_class_changing(rq, p, prev_class);
if (queued)
enqueue_task(rq, p, queue_flag);
@@ -7405,7 +7556,7 @@ EXPORT_SYMBOL(io_schedule);
void sched_show_task(struct task_struct *p)
{
- unsigned long free = 0;
+ unsigned long free;
int ppid;
if (!try_get_task_stack(p))
@@ -7415,9 +7566,7 @@ void sched_show_task(struct task_struct *p)
if (task_is_running(p))
pr_cont(" running task ");
-#ifdef CONFIG_DEBUG_STACK_USAGE
free = stack_not_used(p);
-#endif
ppid = 0;
rcu_read_lock();
if (pid_alive(p))
@@ -7429,6 +7578,7 @@ void sched_show_task(struct task_struct *p)
print_worker_info(KERN_INFO, p);
print_stop_info(KERN_INFO, p);
+ print_scx_info(KERN_INFO, p);
show_stack(p, NULL, KERN_INFO);
put_task_stack(p);
}
@@ -7957,6 +8107,8 @@ int sched_cpu_activate(unsigned int cpu)
cpuset_cpu_active();
}
+ scx_rq_activate(rq);
+
/*
* Put the rq online, if not already. This happens:
*
@@ -8006,6 +8158,8 @@ int sched_cpu_deactivate(unsigned int cpu)
sched_set_rq_offline(rq, cpu);
+ scx_rq_deactivate(rq);
+
/*
* When going down, decrement the number of cores with SMT present.
*/
@@ -8190,11 +8344,15 @@ void __init sched_init(void)
int i;
/* Make sure the linker didn't screw up */
- BUG_ON(&idle_sched_class != &fair_sched_class + 1 ||
- &fair_sched_class != &rt_sched_class + 1 ||
- &rt_sched_class != &dl_sched_class + 1);
#ifdef CONFIG_SMP
- BUG_ON(&dl_sched_class != &stop_sched_class + 1);
+ BUG_ON(!sched_class_above(&stop_sched_class, &dl_sched_class));
+#endif
+ BUG_ON(!sched_class_above(&dl_sched_class, &rt_sched_class));
+ BUG_ON(!sched_class_above(&rt_sched_class, &fair_sched_class));
+ BUG_ON(!sched_class_above(&fair_sched_class, &idle_sched_class));
+#ifdef CONFIG_SCHED_CLASS_EXT
+ BUG_ON(!sched_class_above(&fair_sched_class, &ext_sched_class));
+ BUG_ON(!sched_class_above(&ext_sched_class, &idle_sched_class));
#endif
wait_bit_init();
@@ -8218,6 +8376,9 @@ void __init sched_init(void)
root_task_group.shares = ROOT_TASK_GROUP_LOAD;
init_cfs_bandwidth(&root_task_group.cfs_bandwidth, NULL);
#endif /* CONFIG_FAIR_GROUP_SCHED */
+#ifdef CONFIG_EXT_GROUP_SCHED
+ root_task_group.scx_weight = CGROUP_WEIGHT_DFL;
+#endif /* CONFIG_EXT_GROUP_SCHED */
#ifdef CONFIG_RT_GROUP_SCHED
root_task_group.rt_se = (struct sched_rt_entity **)ptr;
ptr += nr_cpu_ids * sizeof(void **);
@@ -8228,8 +8389,6 @@ void __init sched_init(void)
#endif /* CONFIG_RT_GROUP_SCHED */
}
- init_rt_bandwidth(&def_rt_bandwidth, global_rt_period(), global_rt_runtime());
-
#ifdef CONFIG_SMP
init_defrootdomain();
#endif
@@ -8284,8 +8443,13 @@ void __init sched_init(void)
init_tg_cfs_entry(&root_task_group, &rq->cfs, NULL, i, NULL);
#endif /* CONFIG_FAIR_GROUP_SCHED */
- rq->rt.rt_runtime = def_rt_bandwidth.rt_runtime;
#ifdef CONFIG_RT_GROUP_SCHED
+ /*
+ * This is required for init cpu because rt.c:__enable_runtime()
+ * starts working after scheduler_running, which is not the case
+ * yet.
+ */
+ rq->rt.rt_runtime = global_rt_runtime();
init_tg_rt_entry(&root_task_group, &rq->rt, NULL, i, NULL);
#endif
#ifdef CONFIG_SMP
@@ -8317,10 +8481,12 @@ void __init sched_init(void)
#endif /* CONFIG_SMP */
hrtick_rq_init(rq);
atomic_set(&rq->nr_iowait, 0);
+ fair_server_init(rq);
#ifdef CONFIG_SCHED_CORE
rq->core = rq;
rq->core_pick = NULL;
+ rq->core_dl_server = NULL;
rq->core_enabled = 0;
rq->core_tree = RB_ROOT;
rq->core_forceidle_count = 0;
@@ -8333,6 +8499,7 @@ void __init sched_init(void)
}
set_load_weight(&init_task, false);
+ init_task.se.slice = sysctl_sched_base_slice,
/*
* The boot idle thread does lazy MMU switching as well:
@@ -8363,6 +8530,7 @@ void __init sched_init(void)
balance_push_set(smp_processor_id(), false);
#endif
init_sched_fair_class();
+ init_sched_ext_class();
psi_init();
@@ -8548,7 +8716,7 @@ void normalize_rt_tasks(void)
schedstat_set(p->stats.sleep_start, 0);
schedstat_set(p->stats.block_start, 0);
- if (!dl_task(p) && !rt_task(p)) {
+ if (!rt_or_dl_task(p)) {
/*
* Renice negative nice level userspace
* tasks back to 0:
@@ -8648,6 +8816,7 @@ struct task_group *sched_create_group(struct task_group *parent)
if (!alloc_rt_sched_group(tg, parent))
goto err;
+ scx_group_set_weight(tg, CGROUP_WEIGHT_DFL);
alloc_uclamp_sched_group(tg, parent);
return tg;
@@ -8775,6 +8944,7 @@ void sched_move_task(struct task_struct *tsk)
put_prev_task(rq, tsk);
sched_change_group(tsk, group);
+ scx_move_task(tsk);
if (queued)
enqueue_task(rq, tsk, queue_flags);
@@ -8789,11 +8959,6 @@ void sched_move_task(struct task_struct *tsk)
}
}
-static inline struct task_group *css_tg(struct cgroup_subsys_state *css)
-{
- return css ? container_of(css, struct task_group, css) : NULL;
-}
-
static struct cgroup_subsys_state *
cpu_cgroup_css_alloc(struct cgroup_subsys_state *parent_css)
{
@@ -8817,6 +8982,11 @@ static int cpu_cgroup_css_online(struct cgroup_subsys_state *css)
{
struct task_group *tg = css_tg(css);
struct task_group *parent = css_tg(css->parent);
+ int ret;
+
+ ret = scx_tg_online(tg);
+ if (ret)
+ return ret;
if (parent)
sched_online_group(tg, parent);
@@ -8831,6 +9001,13 @@ static int cpu_cgroup_css_online(struct cgroup_subsys_state *css)
return 0;
}
+static void cpu_cgroup_css_offline(struct cgroup_subsys_state *css)
+{
+ struct task_group *tg = css_tg(css);
+
+ scx_tg_offline(tg);
+}
+
static void cpu_cgroup_css_released(struct cgroup_subsys_state *css)
{
struct task_group *tg = css_tg(css);
@@ -8848,9 +9025,9 @@ static void cpu_cgroup_css_free(struct cgroup_subsys_state *css)
sched_unregister_group(tg);
}
-#ifdef CONFIG_RT_GROUP_SCHED
static int cpu_cgroup_can_attach(struct cgroup_taskset *tset)
{
+#ifdef CONFIG_RT_GROUP_SCHED
struct task_struct *task;
struct cgroup_subsys_state *css;
@@ -8858,9 +9035,9 @@ static int cpu_cgroup_can_attach(struct cgroup_taskset *tset)
if (!sched_rt_can_attach(css_tg(css), task))
return -EINVAL;
}
- return 0;
-}
#endif
+ return scx_cgroup_can_attach(tset);
+}
static void cpu_cgroup_attach(struct cgroup_taskset *tset)
{
@@ -8869,6 +9046,13 @@ static void cpu_cgroup_attach(struct cgroup_taskset *tset)
cgroup_taskset_for_each(task, css, tset)
sched_move_task(task);
+
+ scx_cgroup_finish_attach();
+}
+
+static void cpu_cgroup_cancel_attach(struct cgroup_taskset *tset)
+{
+ scx_cgroup_cancel_attach(tset);
}
#ifdef CONFIG_UCLAMP_TASK_GROUP
@@ -9045,22 +9229,36 @@ static int cpu_uclamp_max_show(struct seq_file *sf, void *v)
}
#endif /* CONFIG_UCLAMP_TASK_GROUP */
+#ifdef CONFIG_GROUP_SCHED_WEIGHT
+static unsigned long tg_weight(struct task_group *tg)
+{
#ifdef CONFIG_FAIR_GROUP_SCHED
+ return scale_load_down(tg->shares);
+#else
+ return sched_weight_from_cgroup(tg->scx_weight);
+#endif
+}
+
static int cpu_shares_write_u64(struct cgroup_subsys_state *css,
struct cftype *cftype, u64 shareval)
{
+ int ret;
+
if (shareval > scale_load_down(ULONG_MAX))
shareval = MAX_SHARES;
- return sched_group_set_shares(css_tg(css), scale_load(shareval));
+ ret = sched_group_set_shares(css_tg(css), scale_load(shareval));
+ if (!ret)
+ scx_group_set_weight(css_tg(css),
+ sched_weight_to_cgroup(shareval));
+ return ret;
}
static u64 cpu_shares_read_u64(struct cgroup_subsys_state *css,
struct cftype *cft)
{
- struct task_group *tg = css_tg(css);
-
- return (u64) scale_load_down(tg->shares);
+ return tg_weight(css_tg(css));
}
+#endif /* CONFIG_GROUP_SCHED_WEIGHT */
#ifdef CONFIG_CFS_BANDWIDTH
static DEFINE_MUTEX(cfs_constraints_mutex);
@@ -9406,7 +9604,6 @@ static int cpu_cfs_local_stat_show(struct seq_file *sf, void *v)
return 0;
}
#endif /* CONFIG_CFS_BANDWIDTH */
-#endif /* CONFIG_FAIR_GROUP_SCHED */
#ifdef CONFIG_RT_GROUP_SCHED
static int cpu_rt_runtime_write(struct cgroup_subsys_state *css,
@@ -9434,7 +9631,7 @@ static u64 cpu_rt_period_read_uint(struct cgroup_subsys_state *css,
}
#endif /* CONFIG_RT_GROUP_SCHED */
-#ifdef CONFIG_FAIR_GROUP_SCHED
+#ifdef CONFIG_GROUP_SCHED_WEIGHT
static s64 cpu_idle_read_s64(struct cgroup_subsys_state *css,
struct cftype *cft)
{
@@ -9444,12 +9641,17 @@ static s64 cpu_idle_read_s64(struct cgroup_subsys_state *css,
static int cpu_idle_write_s64(struct cgroup_subsys_state *css,
struct cftype *cft, s64 idle)
{
- return sched_group_set_idle(css_tg(css), idle);
+ int ret;
+
+ ret = sched_group_set_idle(css_tg(css), idle);
+ if (!ret)
+ scx_group_set_idle(css_tg(css), idle);
+ return ret;
}
#endif
static struct cftype cpu_legacy_files[] = {
-#ifdef CONFIG_FAIR_GROUP_SCHED
+#ifdef CONFIG_GROUP_SCHED_WEIGHT
{
.name = "shares",
.read_u64 = cpu_shares_read_u64,
@@ -9559,38 +9761,35 @@ static int cpu_local_stat_show(struct seq_file *sf,
return 0;
}
-#ifdef CONFIG_FAIR_GROUP_SCHED
+#ifdef CONFIG_GROUP_SCHED_WEIGHT
+
static u64 cpu_weight_read_u64(struct cgroup_subsys_state *css,
struct cftype *cft)
{
- struct task_group *tg = css_tg(css);
- u64 weight = scale_load_down(tg->shares);
-
- return DIV_ROUND_CLOSEST_ULL(weight * CGROUP_WEIGHT_DFL, 1024);
+ return sched_weight_to_cgroup(tg_weight(css_tg(css)));
}
static int cpu_weight_write_u64(struct cgroup_subsys_state *css,
- struct cftype *cft, u64 weight)
+ struct cftype *cft, u64 cgrp_weight)
{
- /*
- * cgroup weight knobs should use the common MIN, DFL and MAX
- * values which are 1, 100 and 10000 respectively. While it loses
- * a bit of range on both ends, it maps pretty well onto the shares
- * value used by scheduler and the round-trip conversions preserve
- * the original value over the entire range.
- */
- if (weight < CGROUP_WEIGHT_MIN || weight > CGROUP_WEIGHT_MAX)
+ unsigned long weight;
+ int ret;
+
+ if (cgrp_weight < CGROUP_WEIGHT_MIN || cgrp_weight > CGROUP_WEIGHT_MAX)
return -ERANGE;
- weight = DIV_ROUND_CLOSEST_ULL(weight * 1024, CGROUP_WEIGHT_DFL);
+ weight = sched_weight_from_cgroup(cgrp_weight);
- return sched_group_set_shares(css_tg(css), scale_load(weight));
+ ret = sched_group_set_shares(css_tg(css), scale_load(weight));
+ if (!ret)
+ scx_group_set_weight(css_tg(css), cgrp_weight);
+ return ret;
}
static s64 cpu_weight_nice_read_s64(struct cgroup_subsys_state *css,
struct cftype *cft)
{
- unsigned long weight = scale_load_down(css_tg(css)->shares);
+ unsigned long weight = tg_weight(css_tg(css));
int last_delta = INT_MAX;
int prio, delta;
@@ -9609,7 +9808,7 @@ static int cpu_weight_nice_write_s64(struct cgroup_subsys_state *css,
struct cftype *cft, s64 nice)
{
unsigned long weight;
- int idx;
+ int idx, ret;
if (nice < MIN_NICE || nice > MAX_NICE)
return -ERANGE;
@@ -9618,9 +9817,13 @@ static int cpu_weight_nice_write_s64(struct cgroup_subsys_state *css,
idx = array_index_nospec(idx, 40);
weight = sched_prio_to_weight[idx];
- return sched_group_set_shares(css_tg(css), scale_load(weight));
+ ret = sched_group_set_shares(css_tg(css), scale_load(weight));
+ if (!ret)
+ scx_group_set_weight(css_tg(css),
+ sched_weight_to_cgroup(weight));
+ return ret;
}
-#endif
+#endif /* CONFIG_GROUP_SCHED_WEIGHT */
static void __maybe_unused cpu_period_quota_print(struct seq_file *sf,
long period, long quota)
@@ -9680,7 +9883,7 @@ static ssize_t cpu_max_write(struct kernfs_open_file *of,
#endif
static struct cftype cpu_files[] = {
-#ifdef CONFIG_FAIR_GROUP_SCHED
+#ifdef CONFIG_GROUP_SCHED_WEIGHT
{
.name = "weight",
.flags = CFTYPE_NOT_ON_ROOT,
@@ -9734,14 +9937,14 @@ static struct cftype cpu_files[] = {
struct cgroup_subsys cpu_cgrp_subsys = {
.css_alloc = cpu_cgroup_css_alloc,
.css_online = cpu_cgroup_css_online,
+ .css_offline = cpu_cgroup_css_offline,
.css_released = cpu_cgroup_css_released,
.css_free = cpu_cgroup_css_free,
.css_extra_stat_show = cpu_extra_stat_show,
.css_local_stat_show = cpu_local_stat_show,
-#ifdef CONFIG_RT_GROUP_SCHED
.can_attach = cpu_cgroup_can_attach,
-#endif
.attach = cpu_cgroup_attach,
+ .cancel_attach = cpu_cgroup_cancel_attach,
.legacy_cftypes = cpu_legacy_files,
.dfl_cftypes = cpu_files,
.early_init = true,
@@ -10331,3 +10534,38 @@ void sched_mm_cid_fork(struct task_struct *t)
t->mm_cid_active = 1;
}
#endif
+
+#ifdef CONFIG_SCHED_CLASS_EXT
+void sched_deq_and_put_task(struct task_struct *p, int queue_flags,
+ struct sched_enq_and_set_ctx *ctx)
+{
+ struct rq *rq = task_rq(p);
+
+ lockdep_assert_rq_held(rq);
+
+ *ctx = (struct sched_enq_and_set_ctx){
+ .p = p,
+ .queue_flags = queue_flags,
+ .queued = task_on_rq_queued(p),
+ .running = task_current(rq, p),
+ };
+
+ update_rq_clock(rq);
+ if (ctx->queued)
+ dequeue_task(rq, p, queue_flags | DEQUEUE_NOCLOCK);
+ if (ctx->running)
+ put_prev_task(rq, p);
+}
+
+void sched_enq_and_set_task(struct sched_enq_and_set_ctx *ctx)
+{
+ struct rq *rq = task_rq(ctx->p);
+
+ lockdep_assert_rq_held(rq);
+
+ if (ctx->queued)
+ enqueue_task(rq, ctx->p, ctx->queue_flags | ENQUEUE_NOCLOCK);
+ if (ctx->running)
+ set_next_task(rq, ctx->p);
+}
+#endif /* CONFIG_SCHED_CLASS_EXT */
diff --git a/kernel/sched/cpufreq_schedutil.c b/kernel/sched/cpufreq_schedutil.c
index eece6244f9d2..c6ba15388ea7 100644
--- a/kernel/sched/cpufreq_schedutil.c
+++ b/kernel/sched/cpufreq_schedutil.c
@@ -197,8 +197,10 @@ unsigned long sugov_effective_cpu_perf(int cpu, unsigned long actual,
static void sugov_get_util(struct sugov_cpu *sg_cpu, unsigned long boost)
{
- unsigned long min, max, util = cpu_util_cfs_boost(sg_cpu->cpu);
+ unsigned long min, max, util = scx_cpuperf_target(sg_cpu->cpu);
+ if (!scx_switched_all())
+ util += cpu_util_cfs_boost(sg_cpu->cpu);
util = effective_cpu_util(sg_cpu->cpu, util, &min, &max);
util = max(util, boost);
sg_cpu->bw_min = min;
@@ -325,16 +327,35 @@ static unsigned long sugov_iowait_apply(struct sugov_cpu *sg_cpu, u64 time,
}
#ifdef CONFIG_NO_HZ_COMMON
-static bool sugov_cpu_is_busy(struct sugov_cpu *sg_cpu)
+static bool sugov_hold_freq(struct sugov_cpu *sg_cpu)
{
- unsigned long idle_calls = tick_nohz_get_idle_calls_cpu(sg_cpu->cpu);
- bool ret = idle_calls == sg_cpu->saved_idle_calls;
+ unsigned long idle_calls;
+ bool ret;
+
+ /*
+ * The heuristics in this function is for the fair class. For SCX, the
+ * performance target comes directly from the BPF scheduler. Let's just
+ * follow it.
+ */
+ if (scx_switched_all())
+ return false;
+
+ /* if capped by uclamp_max, always update to be in compliance */
+ if (uclamp_rq_is_capped(cpu_rq(sg_cpu->cpu)))
+ return false;
+
+ /*
+ * Maintain the frequency if the CPU has not been idle recently, as
+ * reduction is likely to be premature.
+ */
+ idle_calls = tick_nohz_get_idle_calls_cpu(sg_cpu->cpu);
+ ret = idle_calls == sg_cpu->saved_idle_calls;
sg_cpu->saved_idle_calls = idle_calls;
return ret;
}
#else
-static inline bool sugov_cpu_is_busy(struct sugov_cpu *sg_cpu) { return false; }
+static inline bool sugov_hold_freq(struct sugov_cpu *sg_cpu) { return false; }
#endif /* CONFIG_NO_HZ_COMMON */
/*
@@ -382,14 +403,8 @@ static void sugov_update_single_freq(struct update_util_data *hook, u64 time,
return;
next_f = get_next_freq(sg_policy, sg_cpu->util, max_cap);
- /*
- * Do not reduce the frequency if the CPU has not been idle
- * recently, as the reduction is likely to be premature then.
- *
- * Except when the rq is capped by uclamp_max.
- */
- if (!uclamp_rq_is_capped(cpu_rq(sg_cpu->cpu)) &&
- sugov_cpu_is_busy(sg_cpu) && next_f < sg_policy->next_freq &&
+
+ if (sugov_hold_freq(sg_cpu) && next_f < sg_policy->next_freq &&
!sg_policy->need_freq_update) {
next_f = sg_policy->next_freq;
@@ -436,14 +451,7 @@ static void sugov_update_single_perf(struct update_util_data *hook, u64 time,
if (!sugov_update_single_common(sg_cpu, time, max_cap, flags))
return;
- /*
- * Do not reduce the target performance level if the CPU has not been
- * idle recently, as the reduction is likely to be premature then.
- *
- * Except when the rq is capped by uclamp_max.
- */
- if (!uclamp_rq_is_capped(cpu_rq(sg_cpu->cpu)) &&
- sugov_cpu_is_busy(sg_cpu) && sg_cpu->util < prev_util)
+ if (sugov_hold_freq(sg_cpu) && sg_cpu->util < prev_util)
sg_cpu->util = prev_util;
cpufreq_driver_adjust_perf(sg_cpu->cpu, sg_cpu->bw_min,
@@ -654,9 +662,9 @@ static int sugov_kthread_create(struct sugov_policy *sg_policy)
* Fake (unused) bandwidth; workaround to "fix"
* priority inheritance.
*/
- .sched_runtime = 1000000,
- .sched_deadline = 10000000,
- .sched_period = 10000000,
+ .sched_runtime = NSEC_PER_MSEC,
+ .sched_deadline = 10 * NSEC_PER_MSEC,
+ .sched_period = 10 * NSEC_PER_MSEC,
};
struct cpufreq_policy *policy = sg_policy->policy;
int ret;
diff --git a/kernel/sched/deadline.c b/kernel/sched/deadline.c
index f59e5c19d944..9ce93d0bf452 100644
--- a/kernel/sched/deadline.c
+++ b/kernel/sched/deadline.c
@@ -320,19 +320,12 @@ void sub_running_bw(struct sched_dl_entity *dl_se, struct dl_rq *dl_rq)
__sub_running_bw(dl_se->dl_bw, dl_rq);
}
-static void dl_change_utilization(struct task_struct *p, u64 new_bw)
+static void dl_rq_change_utilization(struct rq *rq, struct sched_dl_entity *dl_se, u64 new_bw)
{
- struct rq *rq;
-
- WARN_ON_ONCE(p->dl.flags & SCHED_FLAG_SUGOV);
-
- if (task_on_rq_queued(p))
- return;
+ if (dl_se->dl_non_contending) {
+ sub_running_bw(dl_se, &rq->dl);
+ dl_se->dl_non_contending = 0;
- rq = task_rq(p);
- if (p->dl.dl_non_contending) {
- sub_running_bw(&p->dl, &rq->dl);
- p->dl.dl_non_contending = 0;
/*
* If the timer handler is currently running and the
* timer cannot be canceled, inactive_task_timer()
@@ -340,13 +333,25 @@ static void dl_change_utilization(struct task_struct *p, u64 new_bw)
* will not touch the rq's active utilization,
* so we are still safe.
*/
- if (hrtimer_try_to_cancel(&p->dl.inactive_timer) == 1)
- put_task_struct(p);
+ if (hrtimer_try_to_cancel(&dl_se->inactive_timer) == 1) {
+ if (!dl_server(dl_se))
+ put_task_struct(dl_task_of(dl_se));
+ }
}
- __sub_rq_bw(p->dl.dl_bw, &rq->dl);
+ __sub_rq_bw(dl_se->dl_bw, &rq->dl);
__add_rq_bw(new_bw, &rq->dl);
}
+static void dl_change_utilization(struct task_struct *p, u64 new_bw)
+{
+ WARN_ON_ONCE(p->dl.flags & SCHED_FLAG_SUGOV);
+
+ if (task_on_rq_queued(p))
+ return;
+
+ dl_rq_change_utilization(task_rq(p), &p->dl, new_bw);
+}
+
static void __dl_clear_params(struct sched_dl_entity *dl_se);
/*
@@ -771,6 +776,15 @@ static inline void replenish_dl_new_period(struct sched_dl_entity *dl_se,
/* for non-boosted task, pi_of(dl_se) == dl_se */
dl_se->deadline = rq_clock(rq) + pi_of(dl_se)->dl_deadline;
dl_se->runtime = pi_of(dl_se)->dl_runtime;
+
+ /*
+ * If it is a deferred reservation, and the server
+ * is not handling an starvation case, defer it.
+ */
+ if (dl_se->dl_defer & !dl_se->dl_defer_running) {
+ dl_se->dl_throttled = 1;
+ dl_se->dl_defer_armed = 1;
+ }
}
/*
@@ -809,6 +823,9 @@ static inline void setup_new_dl_entity(struct sched_dl_entity *dl_se)
replenish_dl_new_period(dl_se, rq);
}
+static int start_dl_timer(struct sched_dl_entity *dl_se);
+static bool dl_entity_overflow(struct sched_dl_entity *dl_se, u64 t);
+
/*
* Pure Earliest Deadline First (EDF) scheduling does not deal with the
* possibility of a entity lasting more than what it declared, and thus
@@ -837,9 +854,18 @@ static void replenish_dl_entity(struct sched_dl_entity *dl_se)
/*
* This could be the case for a !-dl task that is boosted.
* Just go with full inherited parameters.
+ *
+ * Or, it could be the case of a deferred reservation that
+ * was not able to consume its runtime in background and
+ * reached this point with current u > U.
+ *
+ * In both cases, set a new period.
*/
- if (dl_se->dl_deadline == 0)
- replenish_dl_new_period(dl_se, rq);
+ if (dl_se->dl_deadline == 0 ||
+ (dl_se->dl_defer_armed && dl_entity_overflow(dl_se, rq_clock(rq)))) {
+ dl_se->deadline = rq_clock(rq) + pi_of(dl_se)->dl_deadline;
+ dl_se->runtime = pi_of(dl_se)->dl_runtime;
+ }
if (dl_se->dl_yielded && dl_se->runtime > 0)
dl_se->runtime = 0;
@@ -873,6 +899,44 @@ static void replenish_dl_entity(struct sched_dl_entity *dl_se)
dl_se->dl_yielded = 0;
if (dl_se->dl_throttled)
dl_se->dl_throttled = 0;
+
+ /*
+ * If this is the replenishment of a deferred reservation,
+ * clear the flag and return.
+ */
+ if (dl_se->dl_defer_armed) {
+ dl_se->dl_defer_armed = 0;
+ return;
+ }
+
+ /*
+ * A this point, if the deferred server is not armed, and the deadline
+ * is in the future, if it is not running already, throttle the server
+ * and arm the defer timer.
+ */
+ if (dl_se->dl_defer && !dl_se->dl_defer_running &&
+ dl_time_before(rq_clock(dl_se->rq), dl_se->deadline - dl_se->runtime)) {
+ if (!is_dl_boosted(dl_se) && dl_se->server_has_tasks(dl_se)) {
+
+ /*
+ * Set dl_se->dl_defer_armed and dl_throttled variables to
+ * inform the start_dl_timer() that this is a deferred
+ * activation.
+ */
+ dl_se->dl_defer_armed = 1;
+ dl_se->dl_throttled = 1;
+ if (!start_dl_timer(dl_se)) {
+ /*
+ * If for whatever reason (delays), a previous timer was
+ * queued but not serviced, cancel it and clean the
+ * deferrable server variables intended for start_dl_timer().
+ */
+ hrtimer_try_to_cancel(&dl_se->dl_timer);
+ dl_se->dl_defer_armed = 0;
+ dl_se->dl_throttled = 0;
+ }
+ }
+ }
}
/*
@@ -1023,6 +1087,15 @@ static void update_dl_entity(struct sched_dl_entity *dl_se)
}
replenish_dl_new_period(dl_se, rq);
+ } else if (dl_server(dl_se) && dl_se->dl_defer) {
+ /*
+ * The server can still use its previous deadline, so check if
+ * it left the dl_defer_running state.
+ */
+ if (!dl_se->dl_defer_running) {
+ dl_se->dl_defer_armed = 1;
+ dl_se->dl_throttled = 1;
+ }
}
}
@@ -1055,8 +1128,21 @@ static int start_dl_timer(struct sched_dl_entity *dl_se)
* We want the timer to fire at the deadline, but considering
* that it is actually coming from rq->clock and not from
* hrtimer's time base reading.
+ *
+ * The deferred reservation will have its timer set to
+ * (deadline - runtime). At that point, the CBS rule will decide
+ * if the current deadline can be used, or if a replenishment is
+ * required to avoid add too much pressure on the system
+ * (current u > U).
*/
- act = ns_to_ktime(dl_next_period(dl_se));
+ if (dl_se->dl_defer_armed) {
+ WARN_ON_ONCE(!dl_se->dl_throttled);
+ act = ns_to_ktime(dl_se->deadline - dl_se->runtime);
+ } else {
+ /* act = deadline - rel-deadline + period */
+ act = ns_to_ktime(dl_next_period(dl_se));
+ }
+
now = hrtimer_cb_get_time(timer);
delta = ktime_to_ns(now) - rq_clock(rq);
act = ktime_add_ns(act, delta);
@@ -1106,6 +1192,62 @@ static void __push_dl_task(struct rq *rq, struct rq_flags *rf)
#endif
}
+/* a defer timer will not be reset if the runtime consumed was < dl_server_min_res */
+static const u64 dl_server_min_res = 1 * NSEC_PER_MSEC;
+
+static enum hrtimer_restart dl_server_timer(struct hrtimer *timer, struct sched_dl_entity *dl_se)
+{
+ struct rq *rq = rq_of_dl_se(dl_se);
+ u64 fw;
+
+ scoped_guard (rq_lock, rq) {
+ struct rq_flags *rf = &scope.rf;
+
+ if (!dl_se->dl_throttled || !dl_se->dl_runtime)
+ return HRTIMER_NORESTART;
+
+ sched_clock_tick();
+ update_rq_clock(rq);
+
+ if (!dl_se->dl_runtime)
+ return HRTIMER_NORESTART;
+
+ if (!dl_se->server_has_tasks(dl_se)) {
+ replenish_dl_entity(dl_se);
+ return HRTIMER_NORESTART;
+ }
+
+ if (dl_se->dl_defer_armed) {
+ /*
+ * First check if the server could consume runtime in background.
+ * If so, it is possible to push the defer timer for this amount
+ * of time. The dl_server_min_res serves as a limit to avoid
+ * forwarding the timer for a too small amount of time.
+ */
+ if (dl_time_before(rq_clock(dl_se->rq),
+ (dl_se->deadline - dl_se->runtime - dl_server_min_res))) {
+
+ /* reset the defer timer */
+ fw = dl_se->deadline - rq_clock(dl_se->rq) - dl_se->runtime;
+
+ hrtimer_forward_now(timer, ns_to_ktime(fw));
+ return HRTIMER_RESTART;
+ }
+
+ dl_se->dl_defer_running = 1;
+ }
+
+ enqueue_dl_entity(dl_se, ENQUEUE_REPLENISH);
+
+ if (!dl_task(dl_se->rq->curr) || dl_entity_preempt(dl_se, &dl_se->rq->curr->dl))
+ resched_curr(rq);
+
+ __push_dl_task(rq, rf);
+ }
+
+ return HRTIMER_NORESTART;
+}
+
/*
* This is the bandwidth enforcement timer callback. If here, we know
* a task is not on its dl_rq, since the fact that the timer was running
@@ -1128,28 +1270,8 @@ static enum hrtimer_restart dl_task_timer(struct hrtimer *timer)
struct rq_flags rf;
struct rq *rq;
- if (dl_server(dl_se)) {
- struct rq *rq = rq_of_dl_se(dl_se);
- struct rq_flags rf;
-
- rq_lock(rq, &rf);
- if (dl_se->dl_throttled) {
- sched_clock_tick();
- update_rq_clock(rq);
-
- if (dl_se->server_has_tasks(dl_se)) {
- enqueue_dl_entity(dl_se, ENQUEUE_REPLENISH);
- resched_curr(rq);
- __push_dl_task(rq, &rf);
- } else {
- replenish_dl_entity(dl_se);
- }
-
- }
- rq_unlock(rq, &rf);
-
- return HRTIMER_NORESTART;
- }
+ if (dl_server(dl_se))
+ return dl_server_timer(timer, dl_se);
p = dl_task_of(dl_se);
rq = task_rq_lock(p, &rf);
@@ -1319,22 +1441,10 @@ static u64 grub_reclaim(u64 delta, struct rq *rq, struct sched_dl_entity *dl_se)
return (delta * u_act) >> BW_SHIFT;
}
-static inline void
-update_stats_dequeue_dl(struct dl_rq *dl_rq, struct sched_dl_entity *dl_se,
- int flags);
-static void update_curr_dl_se(struct rq *rq, struct sched_dl_entity *dl_se, s64 delta_exec)
+s64 dl_scaled_delta_exec(struct rq *rq, struct sched_dl_entity *dl_se, s64 delta_exec)
{
s64 scaled_delta_exec;
- if (unlikely(delta_exec <= 0)) {
- if (unlikely(dl_se->dl_yielded))
- goto throttle;
- return;
- }
-
- if (dl_entity_is_special(dl_se))
- return;
-
/*
* For tasks that participate in GRUB, we implement GRUB-PA: the
* spare reclaimed bandwidth is used to clock down frequency.
@@ -1353,8 +1463,64 @@ static void update_curr_dl_se(struct rq *rq, struct sched_dl_entity *dl_se, s64
scaled_delta_exec = cap_scale(scaled_delta_exec, scale_cpu);
}
+ return scaled_delta_exec;
+}
+
+static inline void
+update_stats_dequeue_dl(struct dl_rq *dl_rq, struct sched_dl_entity *dl_se,
+ int flags);
+static void update_curr_dl_se(struct rq *rq, struct sched_dl_entity *dl_se, s64 delta_exec)
+{
+ s64 scaled_delta_exec;
+
+ if (unlikely(delta_exec <= 0)) {
+ if (unlikely(dl_se->dl_yielded))
+ goto throttle;
+ return;
+ }
+
+ if (dl_server(dl_se) && dl_se->dl_throttled && !dl_se->dl_defer)
+ return;
+
+ if (dl_entity_is_special(dl_se))
+ return;
+
+ scaled_delta_exec = dl_scaled_delta_exec(rq, dl_se, delta_exec);
+
dl_se->runtime -= scaled_delta_exec;
+ /*
+ * The fair server can consume its runtime while throttled (not queued/
+ * running as regular CFS).
+ *
+ * If the server consumes its entire runtime in this state. The server
+ * is not required for the current period. Thus, reset the server by
+ * starting a new period, pushing the activation.
+ */
+ if (dl_se->dl_defer && dl_se->dl_throttled && dl_runtime_exceeded(dl_se)) {
+ /*
+ * If the server was previously activated - the starving condition
+ * took place, it this point it went away because the fair scheduler
+ * was able to get runtime in background. So return to the initial
+ * state.
+ */
+ dl_se->dl_defer_running = 0;
+
+ hrtimer_try_to_cancel(&dl_se->dl_timer);
+
+ replenish_dl_new_period(dl_se, dl_se->rq);
+
+ /*
+ * Not being able to start the timer seems problematic. If it could not
+ * be started for whatever reason, we need to "unthrottle" the DL server
+ * and queue right away. Otherwise nothing might queue it. That's similar
+ * to what enqueue_dl_entity() does on start_dl_timer==0. For now, just warn.
+ */
+ WARN_ON_ONCE(!start_dl_timer(dl_se));
+
+ return;
+ }
+
throttle:
if (dl_runtime_exceeded(dl_se) || dl_se->dl_yielded) {
dl_se->dl_throttled = 1;
@@ -1382,6 +1548,14 @@ throttle:
}
/*
+ * The fair server (sole dl_server) does not account for real-time
+ * workload because it is running fair work.
+ */
+ if (dl_se == &rq->fair_server)
+ return;
+
+#ifdef CONFIG_RT_GROUP_SCHED
+ /*
* Because -- for now -- we share the rt bandwidth, we need to
* account our runtime there too, otherwise actual rt tasks
* would be able to exceed the shared quota.
@@ -1405,34 +1579,155 @@ throttle:
rt_rq->rt_time += delta_exec;
raw_spin_unlock(&rt_rq->rt_runtime_lock);
}
+#endif
+}
+
+/*
+ * In the non-defer mode, the idle time is not accounted, as the
+ * server provides a guarantee.
+ *
+ * If the dl_server is in defer mode, the idle time is also considered
+ * as time available for the fair server, avoiding a penalty for the
+ * rt scheduler that did not consumed that time.
+ */
+void dl_server_update_idle_time(struct rq *rq, struct task_struct *p)
+{
+ s64 delta_exec, scaled_delta_exec;
+
+ if (!rq->fair_server.dl_defer)
+ return;
+
+ /* no need to discount more */
+ if (rq->fair_server.runtime < 0)
+ return;
+
+ delta_exec = rq_clock_task(rq) - p->se.exec_start;
+ if (delta_exec < 0)
+ return;
+
+ scaled_delta_exec = dl_scaled_delta_exec(rq, &rq->fair_server, delta_exec);
+
+ rq->fair_server.runtime -= scaled_delta_exec;
+
+ if (rq->fair_server.runtime < 0) {
+ rq->fair_server.dl_defer_running = 0;
+ rq->fair_server.runtime = 0;
+ }
+
+ p->se.exec_start = rq_clock_task(rq);
}
void dl_server_update(struct sched_dl_entity *dl_se, s64 delta_exec)
{
- update_curr_dl_se(dl_se->rq, dl_se, delta_exec);
+ /* 0 runtime = fair server disabled */
+ if (dl_se->dl_runtime)
+ update_curr_dl_se(dl_se->rq, dl_se, delta_exec);
}
void dl_server_start(struct sched_dl_entity *dl_se)
{
+ struct rq *rq = dl_se->rq;
+
+ /*
+ * XXX: the apply do not work fine at the init phase for the
+ * fair server because things are not yet set. We need to improve
+ * this before getting generic.
+ */
if (!dl_server(dl_se)) {
+ u64 runtime = 50 * NSEC_PER_MSEC;
+ u64 period = 1000 * NSEC_PER_MSEC;
+
+ dl_server_apply_params(dl_se, runtime, period, 1);
+
dl_se->dl_server = 1;
+ dl_se->dl_defer = 1;
setup_new_dl_entity(dl_se);
}
+
+ if (!dl_se->dl_runtime)
+ return;
+
enqueue_dl_entity(dl_se, ENQUEUE_WAKEUP);
+ if (!dl_task(dl_se->rq->curr) || dl_entity_preempt(dl_se, &rq->curr->dl))
+ resched_curr(dl_se->rq);
}
void dl_server_stop(struct sched_dl_entity *dl_se)
{
+ if (!dl_se->dl_runtime)
+ return;
+
dequeue_dl_entity(dl_se, DEQUEUE_SLEEP);
+ hrtimer_try_to_cancel(&dl_se->dl_timer);
+ dl_se->dl_defer_armed = 0;
+ dl_se->dl_throttled = 0;
}
void dl_server_init(struct sched_dl_entity *dl_se, struct rq *rq,
dl_server_has_tasks_f has_tasks,
- dl_server_pick_f pick)
+ dl_server_pick_f pick_task)
{
dl_se->rq = rq;
dl_se->server_has_tasks = has_tasks;
- dl_se->server_pick = pick;
+ dl_se->server_pick_task = pick_task;
+}
+
+void __dl_server_attach_root(struct sched_dl_entity *dl_se, struct rq *rq)
+{
+ u64 new_bw = dl_se->dl_bw;
+ int cpu = cpu_of(rq);
+ struct dl_bw *dl_b;
+
+ dl_b = dl_bw_of(cpu_of(rq));
+ guard(raw_spinlock)(&dl_b->lock);
+
+ if (!dl_bw_cpus(cpu))
+ return;
+
+ __dl_add(dl_b, new_bw, dl_bw_cpus(cpu));
+}
+
+int dl_server_apply_params(struct sched_dl_entity *dl_se, u64 runtime, u64 period, bool init)
+{
+ u64 old_bw = init ? 0 : to_ratio(dl_se->dl_period, dl_se->dl_runtime);
+ u64 new_bw = to_ratio(period, runtime);
+ struct rq *rq = dl_se->rq;
+ int cpu = cpu_of(rq);
+ struct dl_bw *dl_b;
+ unsigned long cap;
+ int retval = 0;
+ int cpus;
+
+ dl_b = dl_bw_of(cpu);
+ guard(raw_spinlock)(&dl_b->lock);
+
+ cpus = dl_bw_cpus(cpu);
+ cap = dl_bw_capacity(cpu);
+
+ if (__dl_overflow(dl_b, cap, old_bw, new_bw))
+ return -EBUSY;
+
+ if (init) {
+ __add_rq_bw(new_bw, &rq->dl);
+ __dl_add(dl_b, new_bw, cpus);
+ } else {
+ __dl_sub(dl_b, dl_se->dl_bw, cpus);
+ __dl_add(dl_b, new_bw, cpus);
+
+ dl_rq_change_utilization(rq, dl_se, new_bw);
+ }
+
+ dl_se->dl_runtime = runtime;
+ dl_se->dl_deadline = period;
+ dl_se->dl_period = period;
+
+ dl_se->runtime = 0;
+ dl_se->deadline = 0;
+
+ dl_se->dl_bw = to_ratio(dl_se->dl_period, dl_se->dl_runtime);
+ dl_se->dl_density = to_ratio(dl_se->dl_deadline, dl_se->dl_runtime);
+
+ return retval;
}
/*
@@ -1599,46 +1894,40 @@ static inline bool __dl_less(struct rb_node *a, const struct rb_node *b)
return dl_time_before(__node_2_dle(a)->deadline, __node_2_dle(b)->deadline);
}
-static inline struct sched_statistics *
+static __always_inline struct sched_statistics *
__schedstats_from_dl_se(struct sched_dl_entity *dl_se)
{
+ if (!schedstat_enabled())
+ return NULL;
+
+ if (dl_server(dl_se))
+ return NULL;
+
return &dl_task_of(dl_se)->stats;
}
static inline void
update_stats_wait_start_dl(struct dl_rq *dl_rq, struct sched_dl_entity *dl_se)
{
- struct sched_statistics *stats;
-
- if (!schedstat_enabled())
- return;
-
- stats = __schedstats_from_dl_se(dl_se);
- __update_stats_wait_start(rq_of_dl_rq(dl_rq), dl_task_of(dl_se), stats);
+ struct sched_statistics *stats = __schedstats_from_dl_se(dl_se);
+ if (stats)
+ __update_stats_wait_start(rq_of_dl_rq(dl_rq), dl_task_of(dl_se), stats);
}
static inline void
update_stats_wait_end_dl(struct dl_rq *dl_rq, struct sched_dl_entity *dl_se)
{
- struct sched_statistics *stats;
-
- if (!schedstat_enabled())
- return;
-
- stats = __schedstats_from_dl_se(dl_se);
- __update_stats_wait_end(rq_of_dl_rq(dl_rq), dl_task_of(dl_se), stats);
+ struct sched_statistics *stats = __schedstats_from_dl_se(dl_se);
+ if (stats)
+ __update_stats_wait_end(rq_of_dl_rq(dl_rq), dl_task_of(dl_se), stats);
}
static inline void
update_stats_enqueue_sleeper_dl(struct dl_rq *dl_rq, struct sched_dl_entity *dl_se)
{
- struct sched_statistics *stats;
-
- if (!schedstat_enabled())
- return;
-
- stats = __schedstats_from_dl_se(dl_se);
- __update_stats_enqueue_sleeper(rq_of_dl_rq(dl_rq), dl_task_of(dl_se), stats);
+ struct sched_statistics *stats = __schedstats_from_dl_se(dl_se);
+ if (stats)
+ __update_stats_enqueue_sleeper(rq_of_dl_rq(dl_rq), dl_task_of(dl_se), stats);
}
static inline void
@@ -1735,7 +2024,7 @@ enqueue_dl_entity(struct sched_dl_entity *dl_se, int flags)
* be counted in the active utilization; hence, we need to call
* add_running_bw().
*/
- if (dl_se->dl_throttled && !(flags & ENQUEUE_REPLENISH)) {
+ if (!dl_se->dl_defer && dl_se->dl_throttled && !(flags & ENQUEUE_REPLENISH)) {
if (flags & ENQUEUE_WAKEUP)
task_contending(dl_se, flags);
@@ -1757,6 +2046,25 @@ enqueue_dl_entity(struct sched_dl_entity *dl_se, int flags)
setup_new_dl_entity(dl_se);
}
+ /*
+ * If the reservation is still throttled, e.g., it got replenished but is a
+ * deferred task and still got to wait, don't enqueue.
+ */
+ if (dl_se->dl_throttled && start_dl_timer(dl_se))
+ return;
+
+ /*
+ * We're about to enqueue, make sure we're not ->dl_throttled!
+ * In case the timer was not started, say because the defer time
+ * has passed, mark as not throttled and mark unarmed.
+ * Also cancel earlier timers, since letting those run is pointless.
+ */
+ if (dl_se->dl_throttled) {
+ hrtimer_try_to_cancel(&dl_se->dl_timer);
+ dl_se->dl_defer_armed = 0;
+ dl_se->dl_throttled = 0;
+ }
+
__enqueue_dl_entity(dl_se);
}
@@ -1846,7 +2154,7 @@ static void enqueue_task_dl(struct rq *rq, struct task_struct *p, int flags)
enqueue_pushable_dl_task(rq, p);
}
-static void dequeue_task_dl(struct rq *rq, struct task_struct *p, int flags)
+static bool dequeue_task_dl(struct rq *rq, struct task_struct *p, int flags)
{
update_curr_dl(rq);
@@ -1856,6 +2164,8 @@ static void dequeue_task_dl(struct rq *rq, struct task_struct *p, int flags)
dequeue_dl_entity(&p->dl, flags);
if (!p->dl.dl_throttled && !dl_server(&p->dl))
dequeue_pushable_dl_task(rq, p);
+
+ return true;
}
/*
@@ -2074,6 +2384,9 @@ static void set_next_task_dl(struct rq *rq, struct task_struct *p, bool first)
update_dl_rq_load_avg(rq_clock_pelt(rq), rq, 0);
deadline_queue_push_tasks(rq);
+
+ if (hrtick_enabled(rq))
+ start_hrtick_dl(rq, &p->dl);
}
static struct sched_dl_entity *pick_next_dl_entity(struct dl_rq *dl_rq)
@@ -2086,7 +2399,11 @@ static struct sched_dl_entity *pick_next_dl_entity(struct dl_rq *dl_rq)
return __node_2_dle(left);
}
-static struct task_struct *pick_task_dl(struct rq *rq)
+/*
+ * __pick_next_task_dl - Helper to pick the next -deadline task to run.
+ * @rq: The runqueue to pick the next task from.
+ */
+static struct task_struct *__pick_task_dl(struct rq *rq)
{
struct sched_dl_entity *dl_se;
struct dl_rq *dl_rq = &rq->dl;
@@ -2100,14 +2417,13 @@ again:
WARN_ON_ONCE(!dl_se);
if (dl_server(dl_se)) {
- p = dl_se->server_pick(dl_se);
+ p = dl_se->server_pick_task(dl_se);
if (!p) {
- WARN_ON_ONCE(1);
dl_se->dl_yielded = 1;
update_curr_dl_se(rq, dl_se, 0);
goto again;
}
- p->dl_server = dl_se;
+ rq->dl_server = dl_se;
} else {
p = dl_task_of(dl_se);
}
@@ -2115,24 +2431,12 @@ again:
return p;
}
-static struct task_struct *pick_next_task_dl(struct rq *rq)
+static struct task_struct *pick_task_dl(struct rq *rq)
{
- struct task_struct *p;
-
- p = pick_task_dl(rq);
- if (!p)
- return p;
-
- if (!p->dl_server)
- set_next_task_dl(rq, p, true);
-
- if (hrtick_enabled(rq))
- start_hrtick_dl(rq, &p->dl);
-
- return p;
+ return __pick_task_dl(rq);
}
-static void put_prev_task_dl(struct rq *rq, struct task_struct *p)
+static void put_prev_task_dl(struct rq *rq, struct task_struct *p, struct task_struct *next)
{
struct sched_dl_entity *dl_se = &p->dl;
struct dl_rq *dl_rq = &rq->dl;
@@ -2824,13 +3128,12 @@ DEFINE_SCHED_CLASS(dl) = {
.wakeup_preempt = wakeup_preempt_dl,
- .pick_next_task = pick_next_task_dl,
+ .pick_task = pick_task_dl,
.put_prev_task = put_prev_task_dl,
.set_next_task = set_next_task_dl,
#ifdef CONFIG_SMP
.balance = balance_dl,
- .pick_task = pick_task_dl,
.select_task_rq = select_task_rq_dl,
.migrate_task_rq = migrate_task_rq_dl,
.set_cpus_allowed = set_cpus_allowed_dl,
diff --git a/kernel/sched/debug.c b/kernel/sched/debug.c
index c1eb9a1afd13..f4035c7a0fa1 100644
--- a/kernel/sched/debug.c
+++ b/kernel/sched/debug.c
@@ -333,8 +333,165 @@ static const struct file_operations sched_debug_fops = {
.release = seq_release,
};
+enum dl_param {
+ DL_RUNTIME = 0,
+ DL_PERIOD,
+};
+
+static unsigned long fair_server_period_max = (1UL << 22) * NSEC_PER_USEC; /* ~4 seconds */
+static unsigned long fair_server_period_min = (100) * NSEC_PER_USEC; /* 100 us */
+
+static ssize_t sched_fair_server_write(struct file *filp, const char __user *ubuf,
+ size_t cnt, loff_t *ppos, enum dl_param param)
+{
+ long cpu = (long) ((struct seq_file *) filp->private_data)->private;
+ struct rq *rq = cpu_rq(cpu);
+ u64 runtime, period;
+ size_t err;
+ int retval;
+ u64 value;
+
+ err = kstrtoull_from_user(ubuf, cnt, 10, &value);
+ if (err)
+ return err;
+
+ scoped_guard (rq_lock_irqsave, rq) {
+ runtime = rq->fair_server.dl_runtime;
+ period = rq->fair_server.dl_period;
+
+ switch (param) {
+ case DL_RUNTIME:
+ if (runtime == value)
+ break;
+ runtime = value;
+ break;
+ case DL_PERIOD:
+ if (value == period)
+ break;
+ period = value;
+ break;
+ }
+
+ if (runtime > period ||
+ period > fair_server_period_max ||
+ period < fair_server_period_min) {
+ return -EINVAL;
+ }
+
+ if (rq->cfs.h_nr_running) {
+ update_rq_clock(rq);
+ dl_server_stop(&rq->fair_server);
+ }
+
+ retval = dl_server_apply_params(&rq->fair_server, runtime, period, 0);
+ if (retval)
+ cnt = retval;
+
+ if (!runtime)
+ printk_deferred("Fair server disabled in CPU %d, system may crash due to starvation.\n",
+ cpu_of(rq));
+
+ if (rq->cfs.h_nr_running)
+ dl_server_start(&rq->fair_server);
+ }
+
+ *ppos += cnt;
+ return cnt;
+}
+
+static size_t sched_fair_server_show(struct seq_file *m, void *v, enum dl_param param)
+{
+ unsigned long cpu = (unsigned long) m->private;
+ struct rq *rq = cpu_rq(cpu);
+ u64 value;
+
+ switch (param) {
+ case DL_RUNTIME:
+ value = rq->fair_server.dl_runtime;
+ break;
+ case DL_PERIOD:
+ value = rq->fair_server.dl_period;
+ break;
+ }
+
+ seq_printf(m, "%llu\n", value);
+ return 0;
+
+}
+
+static ssize_t
+sched_fair_server_runtime_write(struct file *filp, const char __user *ubuf,
+ size_t cnt, loff_t *ppos)
+{
+ return sched_fair_server_write(filp, ubuf, cnt, ppos, DL_RUNTIME);
+}
+
+static int sched_fair_server_runtime_show(struct seq_file *m, void *v)
+{
+ return sched_fair_server_show(m, v, DL_RUNTIME);
+}
+
+static int sched_fair_server_runtime_open(struct inode *inode, struct file *filp)
+{
+ return single_open(filp, sched_fair_server_runtime_show, inode->i_private);
+}
+
+static const struct file_operations fair_server_runtime_fops = {
+ .open = sched_fair_server_runtime_open,
+ .write = sched_fair_server_runtime_write,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+};
+
+static ssize_t
+sched_fair_server_period_write(struct file *filp, const char __user *ubuf,
+ size_t cnt, loff_t *ppos)
+{
+ return sched_fair_server_write(filp, ubuf, cnt, ppos, DL_PERIOD);
+}
+
+static int sched_fair_server_period_show(struct seq_file *m, void *v)
+{
+ return sched_fair_server_show(m, v, DL_PERIOD);
+}
+
+static int sched_fair_server_period_open(struct inode *inode, struct file *filp)
+{
+ return single_open(filp, sched_fair_server_period_show, inode->i_private);
+}
+
+static const struct file_operations fair_server_period_fops = {
+ .open = sched_fair_server_period_open,
+ .write = sched_fair_server_period_write,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+};
+
static struct dentry *debugfs_sched;
+static void debugfs_fair_server_init(void)
+{
+ struct dentry *d_fair;
+ unsigned long cpu;
+
+ d_fair = debugfs_create_dir("fair_server", debugfs_sched);
+ if (!d_fair)
+ return;
+
+ for_each_possible_cpu(cpu) {
+ struct dentry *d_cpu;
+ char buf[32];
+
+ snprintf(buf, sizeof(buf), "cpu%lu", cpu);
+ d_cpu = debugfs_create_dir(buf, d_fair);
+
+ debugfs_create_file("runtime", 0644, d_cpu, (void *) cpu, &fair_server_runtime_fops);
+ debugfs_create_file("period", 0644, d_cpu, (void *) cpu, &fair_server_period_fops);
+ }
+}
+
static __init int sched_init_debug(void)
{
struct dentry __maybe_unused *numa;
@@ -374,6 +531,8 @@ static __init int sched_init_debug(void)
debugfs_create_file("debug", 0444, debugfs_sched, NULL, &sched_debug_fops);
+ debugfs_fair_server_init();
+
return 0;
}
late_initcall(sched_init_debug);
@@ -580,27 +739,27 @@ print_task(struct seq_file *m, struct rq *rq, struct task_struct *p)
else
SEQ_printf(m, " %c", task_state_to_char(p));
- SEQ_printf(m, "%15s %5d %9Ld.%06ld %c %9Ld.%06ld %9Ld.%06ld %9Ld.%06ld %9Ld %5d ",
+ SEQ_printf(m, " %15s %5d %9Ld.%06ld %c %9Ld.%06ld %c %9Ld.%06ld %9Ld.%06ld %9Ld %5d ",
p->comm, task_pid_nr(p),
SPLIT_NS(p->se.vruntime),
entity_eligible(cfs_rq_of(&p->se), &p->se) ? 'E' : 'N',
SPLIT_NS(p->se.deadline),
+ p->se.custom_slice ? 'S' : ' ',
SPLIT_NS(p->se.slice),
SPLIT_NS(p->se.sum_exec_runtime),
(long long)(p->nvcsw + p->nivcsw),
p->prio);
- SEQ_printf(m, "%9lld.%06ld %9lld.%06ld %9lld.%06ld %9lld.%06ld",
+ SEQ_printf(m, "%9lld.%06ld %9lld.%06ld %9lld.%06ld",
SPLIT_NS(schedstat_val_or_zero(p->stats.wait_sum)),
- SPLIT_NS(p->se.sum_exec_runtime),
SPLIT_NS(schedstat_val_or_zero(p->stats.sum_sleep_runtime)),
SPLIT_NS(schedstat_val_or_zero(p->stats.sum_block_runtime)));
#ifdef CONFIG_NUMA_BALANCING
- SEQ_printf(m, " %d %d", task_node(p), task_numa_group_id(p));
+ SEQ_printf(m, " %d %d", task_node(p), task_numa_group_id(p));
#endif
#ifdef CONFIG_CGROUP_SCHED
- SEQ_printf_task_group_path(m, task_group(p), " %s")
+ SEQ_printf_task_group_path(m, task_group(p), " %s")
#endif
SEQ_printf(m, "\n");
@@ -612,10 +771,26 @@ static void print_rq(struct seq_file *m, struct rq *rq, int rq_cpu)
SEQ_printf(m, "\n");
SEQ_printf(m, "runnable tasks:\n");
- SEQ_printf(m, " S task PID tree-key switches prio"
- " wait-time sum-exec sum-sleep\n");
+ SEQ_printf(m, " S task PID vruntime eligible "
+ "deadline slice sum-exec switches "
+ "prio wait-time sum-sleep sum-block"
+#ifdef CONFIG_NUMA_BALANCING
+ " node group-id"
+#endif
+#ifdef CONFIG_CGROUP_SCHED
+ " group-path"
+#endif
+ "\n");
SEQ_printf(m, "-------------------------------------------------------"
- "------------------------------------------------------\n");
+ "------------------------------------------------------"
+ "------------------------------------------------------"
+#ifdef CONFIG_NUMA_BALANCING
+ "--------------"
+#endif
+#ifdef CONFIG_CGROUP_SCHED
+ "--------------"
+#endif
+ "\n");
rcu_read_lock();
for_each_process_thread(g, p) {
@@ -641,8 +816,6 @@ void print_cfs_rq(struct seq_file *m, int cpu, struct cfs_rq *cfs_rq)
SEQ_printf(m, "\n");
SEQ_printf(m, "cfs_rq[%d]:\n", cpu);
#endif
- SEQ_printf(m, " .%-30s: %Ld.%06ld\n", "exec_clock",
- SPLIT_NS(cfs_rq->exec_clock));
raw_spin_rq_lock_irqsave(rq, flags);
root = __pick_root_entity(cfs_rq);
@@ -669,8 +842,6 @@ void print_cfs_rq(struct seq_file *m, int cpu, struct cfs_rq *cfs_rq)
SPLIT_NS(right_vruntime));
spread = right_vruntime - left_vruntime;
SEQ_printf(m, " .%-30s: %Ld.%06ld\n", "spread", SPLIT_NS(spread));
- SEQ_printf(m, " .%-30s: %d\n", "nr_spread_over",
- cfs_rq->nr_spread_over);
SEQ_printf(m, " .%-30s: %d\n", "nr_running", cfs_rq->nr_running);
SEQ_printf(m, " .%-30s: %d\n", "h_nr_running", cfs_rq->h_nr_running);
SEQ_printf(m, " .%-30s: %d\n", "idle_nr_running",
@@ -730,9 +901,12 @@ void print_rt_rq(struct seq_file *m, int cpu, struct rt_rq *rt_rq)
SEQ_printf(m, " .%-30s: %Ld.%06ld\n", #x, SPLIT_NS(rt_rq->x))
PU(rt_nr_running);
+
+#ifdef CONFIG_RT_GROUP_SCHED
P(rt_throttled);
PN(rt_time);
PN(rt_runtime);
+#endif
#undef PN
#undef PU
@@ -1090,6 +1264,9 @@ void proc_sched_show_task(struct task_struct *p, struct pid_namespace *ns,
P(dl.runtime);
P(dl.deadline);
}
+#ifdef CONFIG_SCHED_CLASS_EXT
+ __PS("ext.enabled", task_on_scx(p));
+#endif
#undef PN_SCHEDSTAT
#undef P_SCHEDSTAT
diff --git a/kernel/sched/ext.c b/kernel/sched/ext.c
new file mode 100644
index 000000000000..3cd7c50a51c5
--- /dev/null
+++ b/kernel/sched/ext.c
@@ -0,0 +1,7215 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * BPF extensible scheduler class: Documentation/scheduler/sched-ext.rst
+ *
+ * Copyright (c) 2022 Meta Platforms, Inc. and affiliates.
+ * Copyright (c) 2022 Tejun Heo <tj@kernel.org>
+ * Copyright (c) 2022 David Vernet <dvernet@meta.com>
+ */
+#define SCX_OP_IDX(op) (offsetof(struct sched_ext_ops, op) / sizeof(void (*)(void)))
+
+enum scx_consts {
+ SCX_SLICE_BYPASS = SCX_SLICE_DFL / 4,
+ SCX_DSP_DFL_MAX_BATCH = 32,
+ SCX_DSP_MAX_LOOPS = 32,
+ SCX_WATCHDOG_MAX_TIMEOUT = 30 * HZ,
+
+ SCX_EXIT_BT_LEN = 64,
+ SCX_EXIT_MSG_LEN = 1024,
+ SCX_EXIT_DUMP_DFL_LEN = 32768,
+
+ SCX_CPUPERF_ONE = SCHED_CAPACITY_SCALE,
+};
+
+enum scx_exit_kind {
+ SCX_EXIT_NONE,
+ SCX_EXIT_DONE,
+
+ SCX_EXIT_UNREG = 64, /* user-space initiated unregistration */
+ SCX_EXIT_UNREG_BPF, /* BPF-initiated unregistration */
+ SCX_EXIT_UNREG_KERN, /* kernel-initiated unregistration */
+ SCX_EXIT_SYSRQ, /* requested by 'S' sysrq */
+
+ SCX_EXIT_ERROR = 1024, /* runtime error, error msg contains details */
+ SCX_EXIT_ERROR_BPF, /* ERROR but triggered through scx_bpf_error() */
+ SCX_EXIT_ERROR_STALL, /* watchdog detected stalled runnable tasks */
+};
+
+/*
+ * An exit code can be specified when exiting with scx_bpf_exit() or
+ * scx_ops_exit(), corresponding to exit_kind UNREG_BPF and UNREG_KERN
+ * respectively. The codes are 64bit of the format:
+ *
+ * Bits: [63 .. 48 47 .. 32 31 .. 0]
+ * [ SYS ACT ] [ SYS RSN ] [ USR ]
+ *
+ * SYS ACT: System-defined exit actions
+ * SYS RSN: System-defined exit reasons
+ * USR : User-defined exit codes and reasons
+ *
+ * Using the above, users may communicate intention and context by ORing system
+ * actions and/or system reasons with a user-defined exit code.
+ */
+enum scx_exit_code {
+ /* Reasons */
+ SCX_ECODE_RSN_HOTPLUG = 1LLU << 32,
+
+ /* Actions */
+ SCX_ECODE_ACT_RESTART = 1LLU << 48,
+};
+
+/*
+ * scx_exit_info is passed to ops.exit() to describe why the BPF scheduler is
+ * being disabled.
+ */
+struct scx_exit_info {
+ /* %SCX_EXIT_* - broad category of the exit reason */
+ enum scx_exit_kind kind;
+
+ /* exit code if gracefully exiting */
+ s64 exit_code;
+
+ /* textual representation of the above */
+ const char *reason;
+
+ /* backtrace if exiting due to an error */
+ unsigned long *bt;
+ u32 bt_len;
+
+ /* informational message */
+ char *msg;
+
+ /* debug dump */
+ char *dump;
+};
+
+/* sched_ext_ops.flags */
+enum scx_ops_flags {
+ /*
+ * Keep built-in idle tracking even if ops.update_idle() is implemented.
+ */
+ SCX_OPS_KEEP_BUILTIN_IDLE = 1LLU << 0,
+
+ /*
+ * By default, if there are no other task to run on the CPU, ext core
+ * keeps running the current task even after its slice expires. If this
+ * flag is specified, such tasks are passed to ops.enqueue() with
+ * %SCX_ENQ_LAST. See the comment above %SCX_ENQ_LAST for more info.
+ */
+ SCX_OPS_ENQ_LAST = 1LLU << 1,
+
+ /*
+ * An exiting task may schedule after PF_EXITING is set. In such cases,
+ * bpf_task_from_pid() may not be able to find the task and if the BPF
+ * scheduler depends on pid lookup for dispatching, the task will be
+ * lost leading to various issues including RCU grace period stalls.
+ *
+ * To mask this problem, by default, unhashed tasks are automatically
+ * dispatched to the local DSQ on enqueue. If the BPF scheduler doesn't
+ * depend on pid lookups and wants to handle these tasks directly, the
+ * following flag can be used.
+ */
+ SCX_OPS_ENQ_EXITING = 1LLU << 2,
+
+ /*
+ * If set, only tasks with policy set to SCHED_EXT are attached to
+ * sched_ext. If clear, SCHED_NORMAL tasks are also included.
+ */
+ SCX_OPS_SWITCH_PARTIAL = 1LLU << 3,
+
+ /*
+ * CPU cgroup support flags
+ */
+ SCX_OPS_HAS_CGROUP_WEIGHT = 1LLU << 16, /* cpu.weight */
+
+ SCX_OPS_ALL_FLAGS = SCX_OPS_KEEP_BUILTIN_IDLE |
+ SCX_OPS_ENQ_LAST |
+ SCX_OPS_ENQ_EXITING |
+ SCX_OPS_SWITCH_PARTIAL |
+ SCX_OPS_HAS_CGROUP_WEIGHT,
+};
+
+/* argument container for ops.init_task() */
+struct scx_init_task_args {
+ /*
+ * Set if ops.init_task() is being invoked on the fork path, as opposed
+ * to the scheduler transition path.
+ */
+ bool fork;
+#ifdef CONFIG_EXT_GROUP_SCHED
+ /* the cgroup the task is joining */
+ struct cgroup *cgroup;
+#endif
+};
+
+/* argument container for ops.exit_task() */
+struct scx_exit_task_args {
+ /* Whether the task exited before running on sched_ext. */
+ bool cancelled;
+};
+
+/* argument container for ops->cgroup_init() */
+struct scx_cgroup_init_args {
+ /* the weight of the cgroup [1..10000] */
+ u32 weight;
+};
+
+enum scx_cpu_preempt_reason {
+ /* next task is being scheduled by &sched_class_rt */
+ SCX_CPU_PREEMPT_RT,
+ /* next task is being scheduled by &sched_class_dl */
+ SCX_CPU_PREEMPT_DL,
+ /* next task is being scheduled by &sched_class_stop */
+ SCX_CPU_PREEMPT_STOP,
+ /* unknown reason for SCX being preempted */
+ SCX_CPU_PREEMPT_UNKNOWN,
+};
+
+/*
+ * Argument container for ops->cpu_acquire(). Currently empty, but may be
+ * expanded in the future.
+ */
+struct scx_cpu_acquire_args {};
+
+/* argument container for ops->cpu_release() */
+struct scx_cpu_release_args {
+ /* the reason the CPU was preempted */
+ enum scx_cpu_preempt_reason reason;
+
+ /* the task that's going to be scheduled on the CPU */
+ struct task_struct *task;
+};
+
+/*
+ * Informational context provided to dump operations.
+ */
+struct scx_dump_ctx {
+ enum scx_exit_kind kind;
+ s64 exit_code;
+ const char *reason;
+ u64 at_ns;
+ u64 at_jiffies;
+};
+
+/**
+ * struct sched_ext_ops - Operation table for BPF scheduler implementation
+ *
+ * Userland can implement an arbitrary scheduling policy by implementing and
+ * loading operations in this table.
+ */
+struct sched_ext_ops {
+ /**
+ * select_cpu - Pick the target CPU for a task which is being woken up
+ * @p: task being woken up
+ * @prev_cpu: the cpu @p was on before sleeping
+ * @wake_flags: SCX_WAKE_*
+ *
+ * Decision made here isn't final. @p may be moved to any CPU while it
+ * is getting dispatched for execution later. However, as @p is not on
+ * the rq at this point, getting the eventual execution CPU right here
+ * saves a small bit of overhead down the line.
+ *
+ * If an idle CPU is returned, the CPU is kicked and will try to
+ * dispatch. While an explicit custom mechanism can be added,
+ * select_cpu() serves as the default way to wake up idle CPUs.
+ *
+ * @p may be dispatched directly by calling scx_bpf_dispatch(). If @p
+ * is dispatched, the ops.enqueue() callback will be skipped. Finally,
+ * if @p is dispatched to SCX_DSQ_LOCAL, it will be dispatched to the
+ * local DSQ of whatever CPU is returned by this callback.
+ */
+ s32 (*select_cpu)(struct task_struct *p, s32 prev_cpu, u64 wake_flags);
+
+ /**
+ * enqueue - Enqueue a task on the BPF scheduler
+ * @p: task being enqueued
+ * @enq_flags: %SCX_ENQ_*
+ *
+ * @p is ready to run. Dispatch directly by calling scx_bpf_dispatch()
+ * or enqueue on the BPF scheduler. If not directly dispatched, the bpf
+ * scheduler owns @p and if it fails to dispatch @p, the task will
+ * stall.
+ *
+ * If @p was dispatched from ops.select_cpu(), this callback is
+ * skipped.
+ */
+ void (*enqueue)(struct task_struct *p, u64 enq_flags);
+
+ /**
+ * dequeue - Remove a task from the BPF scheduler
+ * @p: task being dequeued
+ * @deq_flags: %SCX_DEQ_*
+ *
+ * Remove @p from the BPF scheduler. This is usually called to isolate
+ * the task while updating its scheduling properties (e.g. priority).
+ *
+ * The ext core keeps track of whether the BPF side owns a given task or
+ * not and can gracefully ignore spurious dispatches from BPF side,
+ * which makes it safe to not implement this method. However, depending
+ * on the scheduling logic, this can lead to confusing behaviors - e.g.
+ * scheduling position not being updated across a priority change.
+ */
+ void (*dequeue)(struct task_struct *p, u64 deq_flags);
+
+ /**
+ * dispatch - Dispatch tasks from the BPF scheduler and/or consume DSQs
+ * @cpu: CPU to dispatch tasks for
+ * @prev: previous task being switched out
+ *
+ * Called when a CPU's local dsq is empty. The operation should dispatch
+ * one or more tasks from the BPF scheduler into the DSQs using
+ * scx_bpf_dispatch() and/or consume user DSQs into the local DSQ using
+ * scx_bpf_consume().
+ *
+ * The maximum number of times scx_bpf_dispatch() can be called without
+ * an intervening scx_bpf_consume() is specified by
+ * ops.dispatch_max_batch. See the comments on top of the two functions
+ * for more details.
+ *
+ * When not %NULL, @prev is an SCX task with its slice depleted. If
+ * @prev is still runnable as indicated by set %SCX_TASK_QUEUED in
+ * @prev->scx.flags, it is not enqueued yet and will be enqueued after
+ * ops.dispatch() returns. To keep executing @prev, return without
+ * dispatching or consuming any tasks. Also see %SCX_OPS_ENQ_LAST.
+ */
+ void (*dispatch)(s32 cpu, struct task_struct *prev);
+
+ /**
+ * tick - Periodic tick
+ * @p: task running currently
+ *
+ * This operation is called every 1/HZ seconds on CPUs which are
+ * executing an SCX task. Setting @p->scx.slice to 0 will trigger an
+ * immediate dispatch cycle on the CPU.
+ */
+ void (*tick)(struct task_struct *p);
+
+ /**
+ * runnable - A task is becoming runnable on its associated CPU
+ * @p: task becoming runnable
+ * @enq_flags: %SCX_ENQ_*
+ *
+ * This and the following three functions can be used to track a task's
+ * execution state transitions. A task becomes ->runnable() on a CPU,
+ * and then goes through one or more ->running() and ->stopping() pairs
+ * as it runs on the CPU, and eventually becomes ->quiescent() when it's
+ * done running on the CPU.
+ *
+ * @p is becoming runnable on the CPU because it's
+ *
+ * - waking up (%SCX_ENQ_WAKEUP)
+ * - being moved from another CPU
+ * - being restored after temporarily taken off the queue for an
+ * attribute change.
+ *
+ * This and ->enqueue() are related but not coupled. This operation
+ * notifies @p's state transition and may not be followed by ->enqueue()
+ * e.g. when @p is being dispatched to a remote CPU, or when @p is
+ * being enqueued on a CPU experiencing a hotplug event. Likewise, a
+ * task may be ->enqueue()'d without being preceded by this operation
+ * e.g. after exhausting its slice.
+ */
+ void (*runnable)(struct task_struct *p, u64 enq_flags);
+
+ /**
+ * running - A task is starting to run on its associated CPU
+ * @p: task starting to run
+ *
+ * See ->runnable() for explanation on the task state notifiers.
+ */
+ void (*running)(struct task_struct *p);
+
+ /**
+ * stopping - A task is stopping execution
+ * @p: task stopping to run
+ * @runnable: is task @p still runnable?
+ *
+ * See ->runnable() for explanation on the task state notifiers. If
+ * !@runnable, ->quiescent() will be invoked after this operation
+ * returns.
+ */
+ void (*stopping)(struct task_struct *p, bool runnable);
+
+ /**
+ * quiescent - A task is becoming not runnable on its associated CPU
+ * @p: task becoming not runnable
+ * @deq_flags: %SCX_DEQ_*
+ *
+ * See ->runnable() for explanation on the task state notifiers.
+ *
+ * @p is becoming quiescent on the CPU because it's
+ *
+ * - sleeping (%SCX_DEQ_SLEEP)
+ * - being moved to another CPU
+ * - being temporarily taken off the queue for an attribute change
+ * (%SCX_DEQ_SAVE)
+ *
+ * This and ->dequeue() are related but not coupled. This operation
+ * notifies @p's state transition and may not be preceded by ->dequeue()
+ * e.g. when @p is being dispatched to a remote CPU.
+ */
+ void (*quiescent)(struct task_struct *p, u64 deq_flags);
+
+ /**
+ * yield - Yield CPU
+ * @from: yielding task
+ * @to: optional yield target task
+ *
+ * If @to is NULL, @from is yielding the CPU to other runnable tasks.
+ * The BPF scheduler should ensure that other available tasks are
+ * dispatched before the yielding task. Return value is ignored in this
+ * case.
+ *
+ * If @to is not-NULL, @from wants to yield the CPU to @to. If the bpf
+ * scheduler can implement the request, return %true; otherwise, %false.
+ */
+ bool (*yield)(struct task_struct *from, struct task_struct *to);
+
+ /**
+ * core_sched_before - Task ordering for core-sched
+ * @a: task A
+ * @b: task B
+ *
+ * Used by core-sched to determine the ordering between two tasks. See
+ * Documentation/admin-guide/hw-vuln/core-scheduling.rst for details on
+ * core-sched.
+ *
+ * Both @a and @b are runnable and may or may not currently be queued on
+ * the BPF scheduler. Should return %true if @a should run before @b.
+ * %false if there's no required ordering or @b should run before @a.
+ *
+ * If not specified, the default is ordering them according to when they
+ * became runnable.
+ */
+ bool (*core_sched_before)(struct task_struct *a, struct task_struct *b);
+
+ /**
+ * set_weight - Set task weight
+ * @p: task to set weight for
+ * @weight: new weight [1..10000]
+ *
+ * Update @p's weight to @weight.
+ */
+ void (*set_weight)(struct task_struct *p, u32 weight);
+
+ /**
+ * set_cpumask - Set CPU affinity
+ * @p: task to set CPU affinity for
+ * @cpumask: cpumask of cpus that @p can run on
+ *
+ * Update @p's CPU affinity to @cpumask.
+ */
+ void (*set_cpumask)(struct task_struct *p,
+ const struct cpumask *cpumask);
+
+ /**
+ * update_idle - Update the idle state of a CPU
+ * @cpu: CPU to udpate the idle state for
+ * @idle: whether entering or exiting the idle state
+ *
+ * This operation is called when @rq's CPU goes or leaves the idle
+ * state. By default, implementing this operation disables the built-in
+ * idle CPU tracking and the following helpers become unavailable:
+ *
+ * - scx_bpf_select_cpu_dfl()
+ * - scx_bpf_test_and_clear_cpu_idle()
+ * - scx_bpf_pick_idle_cpu()
+ *
+ * The user also must implement ops.select_cpu() as the default
+ * implementation relies on scx_bpf_select_cpu_dfl().
+ *
+ * Specify the %SCX_OPS_KEEP_BUILTIN_IDLE flag to keep the built-in idle
+ * tracking.
+ */
+ void (*update_idle)(s32 cpu, bool idle);
+
+ /**
+ * cpu_acquire - A CPU is becoming available to the BPF scheduler
+ * @cpu: The CPU being acquired by the BPF scheduler.
+ * @args: Acquire arguments, see the struct definition.
+ *
+ * A CPU that was previously released from the BPF scheduler is now once
+ * again under its control.
+ */
+ void (*cpu_acquire)(s32 cpu, struct scx_cpu_acquire_args *args);
+
+ /**
+ * cpu_release - A CPU is taken away from the BPF scheduler
+ * @cpu: The CPU being released by the BPF scheduler.
+ * @args: Release arguments, see the struct definition.
+ *
+ * The specified CPU is no longer under the control of the BPF
+ * scheduler. This could be because it was preempted by a higher
+ * priority sched_class, though there may be other reasons as well. The
+ * caller should consult @args->reason to determine the cause.
+ */
+ void (*cpu_release)(s32 cpu, struct scx_cpu_release_args *args);
+
+ /**
+ * init_task - Initialize a task to run in a BPF scheduler
+ * @p: task to initialize for BPF scheduling
+ * @args: init arguments, see the struct definition
+ *
+ * Either we're loading a BPF scheduler or a new task is being forked.
+ * Initialize @p for BPF scheduling. This operation may block and can
+ * be used for allocations, and is called exactly once for a task.
+ *
+ * Return 0 for success, -errno for failure. An error return while
+ * loading will abort loading of the BPF scheduler. During a fork, it
+ * will abort that specific fork.
+ */
+ s32 (*init_task)(struct task_struct *p, struct scx_init_task_args *args);
+
+ /**
+ * exit_task - Exit a previously-running task from the system
+ * @p: task to exit
+ *
+ * @p is exiting or the BPF scheduler is being unloaded. Perform any
+ * necessary cleanup for @p.
+ */
+ void (*exit_task)(struct task_struct *p, struct scx_exit_task_args *args);
+
+ /**
+ * enable - Enable BPF scheduling for a task
+ * @p: task to enable BPF scheduling for
+ *
+ * Enable @p for BPF scheduling. enable() is called on @p any time it
+ * enters SCX, and is always paired with a matching disable().
+ */
+ void (*enable)(struct task_struct *p);
+
+ /**
+ * disable - Disable BPF scheduling for a task
+ * @p: task to disable BPF scheduling for
+ *
+ * @p is exiting, leaving SCX or the BPF scheduler is being unloaded.
+ * Disable BPF scheduling for @p. A disable() call is always matched
+ * with a prior enable() call.
+ */
+ void (*disable)(struct task_struct *p);
+
+ /**
+ * dump - Dump BPF scheduler state on error
+ * @ctx: debug dump context
+ *
+ * Use scx_bpf_dump() to generate BPF scheduler specific debug dump.
+ */
+ void (*dump)(struct scx_dump_ctx *ctx);
+
+ /**
+ * dump_cpu - Dump BPF scheduler state for a CPU on error
+ * @ctx: debug dump context
+ * @cpu: CPU to generate debug dump for
+ * @idle: @cpu is currently idle without any runnable tasks
+ *
+ * Use scx_bpf_dump() to generate BPF scheduler specific debug dump for
+ * @cpu. If @idle is %true and this operation doesn't produce any
+ * output, @cpu is skipped for dump.
+ */
+ void (*dump_cpu)(struct scx_dump_ctx *ctx, s32 cpu, bool idle);
+
+ /**
+ * dump_task - Dump BPF scheduler state for a runnable task on error
+ * @ctx: debug dump context
+ * @p: runnable task to generate debug dump for
+ *
+ * Use scx_bpf_dump() to generate BPF scheduler specific debug dump for
+ * @p.
+ */
+ void (*dump_task)(struct scx_dump_ctx *ctx, struct task_struct *p);
+
+#ifdef CONFIG_EXT_GROUP_SCHED
+ /**
+ * cgroup_init - Initialize a cgroup
+ * @cgrp: cgroup being initialized
+ * @args: init arguments, see the struct definition
+ *
+ * Either the BPF scheduler is being loaded or @cgrp created, initialize
+ * @cgrp for sched_ext. This operation may block.
+ *
+ * Return 0 for success, -errno for failure. An error return while
+ * loading will abort loading of the BPF scheduler. During cgroup
+ * creation, it will abort the specific cgroup creation.
+ */
+ s32 (*cgroup_init)(struct cgroup *cgrp,
+ struct scx_cgroup_init_args *args);
+
+ /**
+ * cgroup_exit - Exit a cgroup
+ * @cgrp: cgroup being exited
+ *
+ * Either the BPF scheduler is being unloaded or @cgrp destroyed, exit
+ * @cgrp for sched_ext. This operation my block.
+ */
+ void (*cgroup_exit)(struct cgroup *cgrp);
+
+ /**
+ * cgroup_prep_move - Prepare a task to be moved to a different cgroup
+ * @p: task being moved
+ * @from: cgroup @p is being moved from
+ * @to: cgroup @p is being moved to
+ *
+ * Prepare @p for move from cgroup @from to @to. This operation may
+ * block and can be used for allocations.
+ *
+ * Return 0 for success, -errno for failure. An error return aborts the
+ * migration.
+ */
+ s32 (*cgroup_prep_move)(struct task_struct *p,
+ struct cgroup *from, struct cgroup *to);
+
+ /**
+ * cgroup_move - Commit cgroup move
+ * @p: task being moved
+ * @from: cgroup @p is being moved from
+ * @to: cgroup @p is being moved to
+ *
+ * Commit the move. @p is dequeued during this operation.
+ */
+ void (*cgroup_move)(struct task_struct *p,
+ struct cgroup *from, struct cgroup *to);
+
+ /**
+ * cgroup_cancel_move - Cancel cgroup move
+ * @p: task whose cgroup move is being canceled
+ * @from: cgroup @p was being moved from
+ * @to: cgroup @p was being moved to
+ *
+ * @p was cgroup_prep_move()'d but failed before reaching cgroup_move().
+ * Undo the preparation.
+ */
+ void (*cgroup_cancel_move)(struct task_struct *p,
+ struct cgroup *from, struct cgroup *to);
+
+ /**
+ * cgroup_set_weight - A cgroup's weight is being changed
+ * @cgrp: cgroup whose weight is being updated
+ * @weight: new weight [1..10000]
+ *
+ * Update @tg's weight to @weight.
+ */
+ void (*cgroup_set_weight)(struct cgroup *cgrp, u32 weight);
+#endif /* CONFIG_CGROUPS */
+
+ /*
+ * All online ops must come before ops.cpu_online().
+ */
+
+ /**
+ * cpu_online - A CPU became online
+ * @cpu: CPU which just came up
+ *
+ * @cpu just came online. @cpu will not call ops.enqueue() or
+ * ops.dispatch(), nor run tasks associated with other CPUs beforehand.
+ */
+ void (*cpu_online)(s32 cpu);
+
+ /**
+ * cpu_offline - A CPU is going offline
+ * @cpu: CPU which is going offline
+ *
+ * @cpu is going offline. @cpu will not call ops.enqueue() or
+ * ops.dispatch(), nor run tasks associated with other CPUs afterwards.
+ */
+ void (*cpu_offline)(s32 cpu);
+
+ /*
+ * All CPU hotplug ops must come before ops.init().
+ */
+
+ /**
+ * init - Initialize the BPF scheduler
+ */
+ s32 (*init)(void);
+
+ /**
+ * exit - Clean up after the BPF scheduler
+ * @info: Exit info
+ */
+ void (*exit)(struct scx_exit_info *info);
+
+ /**
+ * dispatch_max_batch - Max nr of tasks that dispatch() can dispatch
+ */
+ u32 dispatch_max_batch;
+
+ /**
+ * flags - %SCX_OPS_* flags
+ */
+ u64 flags;
+
+ /**
+ * timeout_ms - The maximum amount of time, in milliseconds, that a
+ * runnable task should be able to wait before being scheduled. The
+ * maximum timeout may not exceed the default timeout of 30 seconds.
+ *
+ * Defaults to the maximum allowed timeout value of 30 seconds.
+ */
+ u32 timeout_ms;
+
+ /**
+ * exit_dump_len - scx_exit_info.dump buffer length. If 0, the default
+ * value of 32768 is used.
+ */
+ u32 exit_dump_len;
+
+ /**
+ * hotplug_seq - A sequence number that may be set by the scheduler to
+ * detect when a hotplug event has occurred during the loading process.
+ * If 0, no detection occurs. Otherwise, the scheduler will fail to
+ * load if the sequence number does not match @scx_hotplug_seq on the
+ * enable path.
+ */
+ u64 hotplug_seq;
+
+ /**
+ * name - BPF scheduler's name
+ *
+ * Must be a non-zero valid BPF object name including only isalnum(),
+ * '_' and '.' chars. Shows up in kernel.sched_ext_ops sysctl while the
+ * BPF scheduler is enabled.
+ */
+ char name[SCX_OPS_NAME_LEN];
+};
+
+enum scx_opi {
+ SCX_OPI_BEGIN = 0,
+ SCX_OPI_NORMAL_BEGIN = 0,
+ SCX_OPI_NORMAL_END = SCX_OP_IDX(cpu_online),
+ SCX_OPI_CPU_HOTPLUG_BEGIN = SCX_OP_IDX(cpu_online),
+ SCX_OPI_CPU_HOTPLUG_END = SCX_OP_IDX(init),
+ SCX_OPI_END = SCX_OP_IDX(init),
+};
+
+enum scx_wake_flags {
+ /* expose select WF_* flags as enums */
+ SCX_WAKE_FORK = WF_FORK,
+ SCX_WAKE_TTWU = WF_TTWU,
+ SCX_WAKE_SYNC = WF_SYNC,
+};
+
+enum scx_enq_flags {
+ /* expose select ENQUEUE_* flags as enums */
+ SCX_ENQ_WAKEUP = ENQUEUE_WAKEUP,
+ SCX_ENQ_HEAD = ENQUEUE_HEAD,
+
+ /* high 32bits are SCX specific */
+
+ /*
+ * Set the following to trigger preemption when calling
+ * scx_bpf_dispatch() with a local dsq as the target. The slice of the
+ * current task is cleared to zero and the CPU is kicked into the
+ * scheduling path. Implies %SCX_ENQ_HEAD.
+ */
+ SCX_ENQ_PREEMPT = 1LLU << 32,
+
+ /*
+ * The task being enqueued was previously enqueued on the current CPU's
+ * %SCX_DSQ_LOCAL, but was removed from it in a call to the
+ * bpf_scx_reenqueue_local() kfunc. If bpf_scx_reenqueue_local() was
+ * invoked in a ->cpu_release() callback, and the task is again
+ * dispatched back to %SCX_LOCAL_DSQ by this current ->enqueue(), the
+ * task will not be scheduled on the CPU until at least the next invocation
+ * of the ->cpu_acquire() callback.
+ */
+ SCX_ENQ_REENQ = 1LLU << 40,
+
+ /*
+ * The task being enqueued is the only task available for the cpu. By
+ * default, ext core keeps executing such tasks but when
+ * %SCX_OPS_ENQ_LAST is specified, they're ops.enqueue()'d with the
+ * %SCX_ENQ_LAST flag set.
+ *
+ * The BPF scheduler is responsible for triggering a follow-up
+ * scheduling event. Otherwise, Execution may stall.
+ */
+ SCX_ENQ_LAST = 1LLU << 41,
+
+ /* high 8 bits are internal */
+ __SCX_ENQ_INTERNAL_MASK = 0xffLLU << 56,
+
+ SCX_ENQ_CLEAR_OPSS = 1LLU << 56,
+ SCX_ENQ_DSQ_PRIQ = 1LLU << 57,
+};
+
+enum scx_deq_flags {
+ /* expose select DEQUEUE_* flags as enums */
+ SCX_DEQ_SLEEP = DEQUEUE_SLEEP,
+
+ /* high 32bits are SCX specific */
+
+ /*
+ * The generic core-sched layer decided to execute the task even though
+ * it hasn't been dispatched yet. Dequeue from the BPF side.
+ */
+ SCX_DEQ_CORE_SCHED_EXEC = 1LLU << 32,
+};
+
+enum scx_pick_idle_cpu_flags {
+ SCX_PICK_IDLE_CORE = 1LLU << 0, /* pick a CPU whose SMT siblings are also idle */
+};
+
+enum scx_kick_flags {
+ /*
+ * Kick the target CPU if idle. Guarantees that the target CPU goes
+ * through at least one full scheduling cycle before going idle. If the
+ * target CPU can be determined to be currently not idle and going to go
+ * through a scheduling cycle before going idle, noop.
+ */
+ SCX_KICK_IDLE = 1LLU << 0,
+
+ /*
+ * Preempt the current task and execute the dispatch path. If the
+ * current task of the target CPU is an SCX task, its ->scx.slice is
+ * cleared to zero before the scheduling path is invoked so that the
+ * task expires and the dispatch path is invoked.
+ */
+ SCX_KICK_PREEMPT = 1LLU << 1,
+
+ /*
+ * Wait for the CPU to be rescheduled. The scx_bpf_kick_cpu() call will
+ * return after the target CPU finishes picking the next task.
+ */
+ SCX_KICK_WAIT = 1LLU << 2,
+};
+
+enum scx_tg_flags {
+ SCX_TG_ONLINE = 1U << 0,
+ SCX_TG_INITED = 1U << 1,
+};
+
+enum scx_ops_enable_state {
+ SCX_OPS_ENABLING,
+ SCX_OPS_ENABLED,
+ SCX_OPS_DISABLING,
+ SCX_OPS_DISABLED,
+};
+
+static const char *scx_ops_enable_state_str[] = {
+ [SCX_OPS_ENABLING] = "enabling",
+ [SCX_OPS_ENABLED] = "enabled",
+ [SCX_OPS_DISABLING] = "disabling",
+ [SCX_OPS_DISABLED] = "disabled",
+};
+
+/*
+ * sched_ext_entity->ops_state
+ *
+ * Used to track the task ownership between the SCX core and the BPF scheduler.
+ * State transitions look as follows:
+ *
+ * NONE -> QUEUEING -> QUEUED -> DISPATCHING
+ * ^ | |
+ * | v v
+ * \-------------------------------/
+ *
+ * QUEUEING and DISPATCHING states can be waited upon. See wait_ops_state() call
+ * sites for explanations on the conditions being waited upon and why they are
+ * safe. Transitions out of them into NONE or QUEUED must store_release and the
+ * waiters should load_acquire.
+ *
+ * Tracking scx_ops_state enables sched_ext core to reliably determine whether
+ * any given task can be dispatched by the BPF scheduler at all times and thus
+ * relaxes the requirements on the BPF scheduler. This allows the BPF scheduler
+ * to try to dispatch any task anytime regardless of its state as the SCX core
+ * can safely reject invalid dispatches.
+ */
+enum scx_ops_state {
+ SCX_OPSS_NONE, /* owned by the SCX core */
+ SCX_OPSS_QUEUEING, /* in transit to the BPF scheduler */
+ SCX_OPSS_QUEUED, /* owned by the BPF scheduler */
+ SCX_OPSS_DISPATCHING, /* in transit back to the SCX core */
+
+ /*
+ * QSEQ brands each QUEUED instance so that, when dispatch races
+ * dequeue/requeue, the dispatcher can tell whether it still has a claim
+ * on the task being dispatched.
+ *
+ * As some 32bit archs can't do 64bit store_release/load_acquire,
+ * p->scx.ops_state is atomic_long_t which leaves 30 bits for QSEQ on
+ * 32bit machines. The dispatch race window QSEQ protects is very narrow
+ * and runs with IRQ disabled. 30 bits should be sufficient.
+ */
+ SCX_OPSS_QSEQ_SHIFT = 2,
+};
+
+/* Use macros to ensure that the type is unsigned long for the masks */
+#define SCX_OPSS_STATE_MASK ((1LU << SCX_OPSS_QSEQ_SHIFT) - 1)
+#define SCX_OPSS_QSEQ_MASK (~SCX_OPSS_STATE_MASK)
+
+/*
+ * During exit, a task may schedule after losing its PIDs. When disabling the
+ * BPF scheduler, we need to be able to iterate tasks in every state to
+ * guarantee system safety. Maintain a dedicated task list which contains every
+ * task between its fork and eventual free.
+ */
+static DEFINE_SPINLOCK(scx_tasks_lock);
+static LIST_HEAD(scx_tasks);
+
+/* ops enable/disable */
+static struct kthread_worker *scx_ops_helper;
+static DEFINE_MUTEX(scx_ops_enable_mutex);
+DEFINE_STATIC_KEY_FALSE(__scx_ops_enabled);
+DEFINE_STATIC_PERCPU_RWSEM(scx_fork_rwsem);
+static atomic_t scx_ops_enable_state_var = ATOMIC_INIT(SCX_OPS_DISABLED);
+static atomic_t scx_ops_bypass_depth = ATOMIC_INIT(0);
+static bool scx_ops_init_task_enabled;
+static bool scx_switching_all;
+DEFINE_STATIC_KEY_FALSE(__scx_switched_all);
+
+static struct sched_ext_ops scx_ops;
+static bool scx_warned_zero_slice;
+
+static DEFINE_STATIC_KEY_FALSE(scx_ops_enq_last);
+static DEFINE_STATIC_KEY_FALSE(scx_ops_enq_exiting);
+static DEFINE_STATIC_KEY_FALSE(scx_ops_cpu_preempt);
+static DEFINE_STATIC_KEY_FALSE(scx_builtin_idle_enabled);
+
+static struct static_key_false scx_has_op[SCX_OPI_END] =
+ { [0 ... SCX_OPI_END-1] = STATIC_KEY_FALSE_INIT };
+
+static atomic_t scx_exit_kind = ATOMIC_INIT(SCX_EXIT_DONE);
+static struct scx_exit_info *scx_exit_info;
+
+static atomic_long_t scx_nr_rejected = ATOMIC_LONG_INIT(0);
+static atomic_long_t scx_hotplug_seq = ATOMIC_LONG_INIT(0);
+
+/*
+ * A monotically increasing sequence number that is incremented every time a
+ * scheduler is enabled. This can be used by to check if any custom sched_ext
+ * scheduler has ever been used in the system.
+ */
+static atomic_long_t scx_enable_seq = ATOMIC_LONG_INIT(0);
+
+/*
+ * The maximum amount of time in jiffies that a task may be runnable without
+ * being scheduled on a CPU. If this timeout is exceeded, it will trigger
+ * scx_ops_error().
+ */
+static unsigned long scx_watchdog_timeout;
+
+/*
+ * The last time the delayed work was run. This delayed work relies on
+ * ksoftirqd being able to run to service timer interrupts, so it's possible
+ * that this work itself could get wedged. To account for this, we check that
+ * it's not stalled in the timer tick, and trigger an error if it is.
+ */
+static unsigned long scx_watchdog_timestamp = INITIAL_JIFFIES;
+
+static struct delayed_work scx_watchdog_work;
+
+/* idle tracking */
+#ifdef CONFIG_SMP
+#ifdef CONFIG_CPUMASK_OFFSTACK
+#define CL_ALIGNED_IF_ONSTACK
+#else
+#define CL_ALIGNED_IF_ONSTACK __cacheline_aligned_in_smp
+#endif
+
+static struct {
+ cpumask_var_t cpu;
+ cpumask_var_t smt;
+} idle_masks CL_ALIGNED_IF_ONSTACK;
+
+#endif /* CONFIG_SMP */
+
+/* for %SCX_KICK_WAIT */
+static unsigned long __percpu *scx_kick_cpus_pnt_seqs;
+
+/*
+ * Direct dispatch marker.
+ *
+ * Non-NULL values are used for direct dispatch from enqueue path. A valid
+ * pointer points to the task currently being enqueued. An ERR_PTR value is used
+ * to indicate that direct dispatch has already happened.
+ */
+static DEFINE_PER_CPU(struct task_struct *, direct_dispatch_task);
+
+/*
+ * Dispatch queues.
+ *
+ * The global DSQ (%SCX_DSQ_GLOBAL) is split per-node for scalability. This is
+ * to avoid live-locking in bypass mode where all tasks are dispatched to
+ * %SCX_DSQ_GLOBAL and all CPUs consume from it. If per-node split isn't
+ * sufficient, it can be further split.
+ */
+static struct scx_dispatch_q **global_dsqs;
+
+static const struct rhashtable_params dsq_hash_params = {
+ .key_len = 8,
+ .key_offset = offsetof(struct scx_dispatch_q, id),
+ .head_offset = offsetof(struct scx_dispatch_q, hash_node),
+};
+
+static struct rhashtable dsq_hash;
+static LLIST_HEAD(dsqs_to_free);
+
+/* dispatch buf */
+struct scx_dsp_buf_ent {
+ struct task_struct *task;
+ unsigned long qseq;
+ u64 dsq_id;
+ u64 enq_flags;
+};
+
+static u32 scx_dsp_max_batch;
+
+struct scx_dsp_ctx {
+ struct rq *rq;
+ u32 cursor;
+ u32 nr_tasks;
+ struct scx_dsp_buf_ent buf[];
+};
+
+static struct scx_dsp_ctx __percpu *scx_dsp_ctx;
+
+/* string formatting from BPF */
+struct scx_bstr_buf {
+ u64 data[MAX_BPRINTF_VARARGS];
+ char line[SCX_EXIT_MSG_LEN];
+};
+
+static DEFINE_RAW_SPINLOCK(scx_exit_bstr_buf_lock);
+static struct scx_bstr_buf scx_exit_bstr_buf;
+
+/* ops debug dump */
+struct scx_dump_data {
+ s32 cpu;
+ bool first;
+ s32 cursor;
+ struct seq_buf *s;
+ const char *prefix;
+ struct scx_bstr_buf buf;
+};
+
+static struct scx_dump_data scx_dump_data = {
+ .cpu = -1,
+};
+
+/* /sys/kernel/sched_ext interface */
+static struct kset *scx_kset;
+static struct kobject *scx_root_kobj;
+
+#define CREATE_TRACE_POINTS
+#include <trace/events/sched_ext.h>
+
+static void process_ddsp_deferred_locals(struct rq *rq);
+static void scx_bpf_kick_cpu(s32 cpu, u64 flags);
+static __printf(3, 4) void scx_ops_exit_kind(enum scx_exit_kind kind,
+ s64 exit_code,
+ const char *fmt, ...);
+
+#define scx_ops_error_kind(err, fmt, args...) \
+ scx_ops_exit_kind((err), 0, fmt, ##args)
+
+#define scx_ops_exit(code, fmt, args...) \
+ scx_ops_exit_kind(SCX_EXIT_UNREG_KERN, (code), fmt, ##args)
+
+#define scx_ops_error(fmt, args...) \
+ scx_ops_error_kind(SCX_EXIT_ERROR, fmt, ##args)
+
+#define SCX_HAS_OP(op) static_branch_likely(&scx_has_op[SCX_OP_IDX(op)])
+
+static long jiffies_delta_msecs(unsigned long at, unsigned long now)
+{
+ if (time_after(at, now))
+ return jiffies_to_msecs(at - now);
+ else
+ return -(long)jiffies_to_msecs(now - at);
+}
+
+/* if the highest set bit is N, return a mask with bits [N+1, 31] set */
+static u32 higher_bits(u32 flags)
+{
+ return ~((1 << fls(flags)) - 1);
+}
+
+/* return the mask with only the highest bit set */
+static u32 highest_bit(u32 flags)
+{
+ int bit = fls(flags);
+ return ((u64)1 << bit) >> 1;
+}
+
+static bool u32_before(u32 a, u32 b)
+{
+ return (s32)(a - b) < 0;
+}
+
+static struct scx_dispatch_q *find_global_dsq(struct task_struct *p)
+{
+ return global_dsqs[cpu_to_node(task_cpu(p))];
+}
+
+static struct scx_dispatch_q *find_user_dsq(u64 dsq_id)
+{
+ return rhashtable_lookup_fast(&dsq_hash, &dsq_id, dsq_hash_params);
+}
+
+/*
+ * scx_kf_mask enforcement. Some kfuncs can only be called from specific SCX
+ * ops. When invoking SCX ops, SCX_CALL_OP[_RET]() should be used to indicate
+ * the allowed kfuncs and those kfuncs should use scx_kf_allowed() to check
+ * whether it's running from an allowed context.
+ *
+ * @mask is constant, always inline to cull the mask calculations.
+ */
+static __always_inline void scx_kf_allow(u32 mask)
+{
+ /* nesting is allowed only in increasing scx_kf_mask order */
+ WARN_ONCE((mask | higher_bits(mask)) & current->scx.kf_mask,
+ "invalid nesting current->scx.kf_mask=0x%x mask=0x%x\n",
+ current->scx.kf_mask, mask);
+ current->scx.kf_mask |= mask;
+ barrier();
+}
+
+static void scx_kf_disallow(u32 mask)
+{
+ barrier();
+ current->scx.kf_mask &= ~mask;
+}
+
+#define SCX_CALL_OP(mask, op, args...) \
+do { \
+ if (mask) { \
+ scx_kf_allow(mask); \
+ scx_ops.op(args); \
+ scx_kf_disallow(mask); \
+ } else { \
+ scx_ops.op(args); \
+ } \
+} while (0)
+
+#define SCX_CALL_OP_RET(mask, op, args...) \
+({ \
+ __typeof__(scx_ops.op(args)) __ret; \
+ if (mask) { \
+ scx_kf_allow(mask); \
+ __ret = scx_ops.op(args); \
+ scx_kf_disallow(mask); \
+ } else { \
+ __ret = scx_ops.op(args); \
+ } \
+ __ret; \
+})
+
+/*
+ * Some kfuncs are allowed only on the tasks that are subjects of the
+ * in-progress scx_ops operation for, e.g., locking guarantees. To enforce such
+ * restrictions, the following SCX_CALL_OP_*() variants should be used when
+ * invoking scx_ops operations that take task arguments. These can only be used
+ * for non-nesting operations due to the way the tasks are tracked.
+ *
+ * kfuncs which can only operate on such tasks can in turn use
+ * scx_kf_allowed_on_arg_tasks() to test whether the invocation is allowed on
+ * the specific task.
+ */
+#define SCX_CALL_OP_TASK(mask, op, task, args...) \
+do { \
+ BUILD_BUG_ON((mask) & ~__SCX_KF_TERMINAL); \
+ current->scx.kf_tasks[0] = task; \
+ SCX_CALL_OP(mask, op, task, ##args); \
+ current->scx.kf_tasks[0] = NULL; \
+} while (0)
+
+#define SCX_CALL_OP_TASK_RET(mask, op, task, args...) \
+({ \
+ __typeof__(scx_ops.op(task, ##args)) __ret; \
+ BUILD_BUG_ON((mask) & ~__SCX_KF_TERMINAL); \
+ current->scx.kf_tasks[0] = task; \
+ __ret = SCX_CALL_OP_RET(mask, op, task, ##args); \
+ current->scx.kf_tasks[0] = NULL; \
+ __ret; \
+})
+
+#define SCX_CALL_OP_2TASKS_RET(mask, op, task0, task1, args...) \
+({ \
+ __typeof__(scx_ops.op(task0, task1, ##args)) __ret; \
+ BUILD_BUG_ON((mask) & ~__SCX_KF_TERMINAL); \
+ current->scx.kf_tasks[0] = task0; \
+ current->scx.kf_tasks[1] = task1; \
+ __ret = SCX_CALL_OP_RET(mask, op, task0, task1, ##args); \
+ current->scx.kf_tasks[0] = NULL; \
+ current->scx.kf_tasks[1] = NULL; \
+ __ret; \
+})
+
+/* @mask is constant, always inline to cull unnecessary branches */
+static __always_inline bool scx_kf_allowed(u32 mask)
+{
+ if (unlikely(!(current->scx.kf_mask & mask))) {
+ scx_ops_error("kfunc with mask 0x%x called from an operation only allowing 0x%x",
+ mask, current->scx.kf_mask);
+ return false;
+ }
+
+ /*
+ * Enforce nesting boundaries. e.g. A kfunc which can be called from
+ * DISPATCH must not be called if we're running DEQUEUE which is nested
+ * inside ops.dispatch(). We don't need to check boundaries for any
+ * blocking kfuncs as the verifier ensures they're only called from
+ * sleepable progs.
+ */
+ if (unlikely(highest_bit(mask) == SCX_KF_CPU_RELEASE &&
+ (current->scx.kf_mask & higher_bits(SCX_KF_CPU_RELEASE)))) {
+ scx_ops_error("cpu_release kfunc called from a nested operation");
+ return false;
+ }
+
+ if (unlikely(highest_bit(mask) == SCX_KF_DISPATCH &&
+ (current->scx.kf_mask & higher_bits(SCX_KF_DISPATCH)))) {
+ scx_ops_error("dispatch kfunc called from a nested operation");
+ return false;
+ }
+
+ return true;
+}
+
+/* see SCX_CALL_OP_TASK() */
+static __always_inline bool scx_kf_allowed_on_arg_tasks(u32 mask,
+ struct task_struct *p)
+{
+ if (!scx_kf_allowed(mask))
+ return false;
+
+ if (unlikely((p != current->scx.kf_tasks[0] &&
+ p != current->scx.kf_tasks[1]))) {
+ scx_ops_error("called on a task not being operated on");
+ return false;
+ }
+
+ return true;
+}
+
+static bool scx_kf_allowed_if_unlocked(void)
+{
+ return !current->scx.kf_mask;
+}
+
+/**
+ * nldsq_next_task - Iterate to the next task in a non-local DSQ
+ * @dsq: user dsq being interated
+ * @cur: current position, %NULL to start iteration
+ * @rev: walk backwards
+ *
+ * Returns %NULL when iteration is finished.
+ */
+static struct task_struct *nldsq_next_task(struct scx_dispatch_q *dsq,
+ struct task_struct *cur, bool rev)
+{
+ struct list_head *list_node;
+ struct scx_dsq_list_node *dsq_lnode;
+
+ lockdep_assert_held(&dsq->lock);
+
+ if (cur)
+ list_node = &cur->scx.dsq_list.node;
+ else
+ list_node = &dsq->list;
+
+ /* find the next task, need to skip BPF iteration cursors */
+ do {
+ if (rev)
+ list_node = list_node->prev;
+ else
+ list_node = list_node->next;
+
+ if (list_node == &dsq->list)
+ return NULL;
+
+ dsq_lnode = container_of(list_node, struct scx_dsq_list_node,
+ node);
+ } while (dsq_lnode->flags & SCX_DSQ_LNODE_ITER_CURSOR);
+
+ return container_of(dsq_lnode, struct task_struct, scx.dsq_list);
+}
+
+#define nldsq_for_each_task(p, dsq) \
+ for ((p) = nldsq_next_task((dsq), NULL, false); (p); \
+ (p) = nldsq_next_task((dsq), (p), false))
+
+
+/*
+ * BPF DSQ iterator. Tasks in a non-local DSQ can be iterated in [reverse]
+ * dispatch order. BPF-visible iterator is opaque and larger to allow future
+ * changes without breaking backward compatibility. Can be used with
+ * bpf_for_each(). See bpf_iter_scx_dsq_*().
+ */
+enum scx_dsq_iter_flags {
+ /* iterate in the reverse dispatch order */
+ SCX_DSQ_ITER_REV = 1U << 16,
+
+ __SCX_DSQ_ITER_HAS_SLICE = 1U << 30,
+ __SCX_DSQ_ITER_HAS_VTIME = 1U << 31,
+
+ __SCX_DSQ_ITER_USER_FLAGS = SCX_DSQ_ITER_REV,
+ __SCX_DSQ_ITER_ALL_FLAGS = __SCX_DSQ_ITER_USER_FLAGS |
+ __SCX_DSQ_ITER_HAS_SLICE |
+ __SCX_DSQ_ITER_HAS_VTIME,
+};
+
+struct bpf_iter_scx_dsq_kern {
+ struct scx_dsq_list_node cursor;
+ struct scx_dispatch_q *dsq;
+ u64 slice;
+ u64 vtime;
+} __attribute__((aligned(8)));
+
+struct bpf_iter_scx_dsq {
+ u64 __opaque[6];
+} __attribute__((aligned(8)));
+
+
+/*
+ * SCX task iterator.
+ */
+struct scx_task_iter {
+ struct sched_ext_entity cursor;
+ struct task_struct *locked;
+ struct rq *rq;
+ struct rq_flags rf;
+};
+
+/**
+ * scx_task_iter_init - Initialize a task iterator
+ * @iter: iterator to init
+ *
+ * Initialize @iter. Must be called with scx_tasks_lock held. Once initialized,
+ * @iter must eventually be exited with scx_task_iter_exit().
+ *
+ * scx_tasks_lock may be released between this and the first next() call or
+ * between any two next() calls. If scx_tasks_lock is released between two
+ * next() calls, the caller is responsible for ensuring that the task being
+ * iterated remains accessible either through RCU read lock or obtaining a
+ * reference count.
+ *
+ * All tasks which existed when the iteration started are guaranteed to be
+ * visited as long as they still exist.
+ */
+static void scx_task_iter_init(struct scx_task_iter *iter)
+{
+ lockdep_assert_held(&scx_tasks_lock);
+
+ BUILD_BUG_ON(__SCX_DSQ_ITER_ALL_FLAGS &
+ ((1U << __SCX_DSQ_LNODE_PRIV_SHIFT) - 1));
+
+ iter->cursor = (struct sched_ext_entity){ .flags = SCX_TASK_CURSOR };
+ list_add(&iter->cursor.tasks_node, &scx_tasks);
+ iter->locked = NULL;
+}
+
+/**
+ * scx_task_iter_rq_unlock - Unlock rq locked by a task iterator
+ * @iter: iterator to unlock rq for
+ *
+ * If @iter is in the middle of a locked iteration, it may be locking the rq of
+ * the task currently being visited. Unlock the rq if so. This function can be
+ * safely called anytime during an iteration.
+ *
+ * Returns %true if the rq @iter was locking is unlocked. %false if @iter was
+ * not locking an rq.
+ */
+static bool scx_task_iter_rq_unlock(struct scx_task_iter *iter)
+{
+ if (iter->locked) {
+ task_rq_unlock(iter->rq, iter->locked, &iter->rf);
+ iter->locked = NULL;
+ return true;
+ } else {
+ return false;
+ }
+}
+
+/**
+ * scx_task_iter_exit - Exit a task iterator
+ * @iter: iterator to exit
+ *
+ * Exit a previously initialized @iter. Must be called with scx_tasks_lock held.
+ * If the iterator holds a task's rq lock, that rq lock is released. See
+ * scx_task_iter_init() for details.
+ */
+static void scx_task_iter_exit(struct scx_task_iter *iter)
+{
+ lockdep_assert_held(&scx_tasks_lock);
+
+ scx_task_iter_rq_unlock(iter);
+ list_del_init(&iter->cursor.tasks_node);
+}
+
+/**
+ * scx_task_iter_next - Next task
+ * @iter: iterator to walk
+ *
+ * Visit the next task. See scx_task_iter_init() for details.
+ */
+static struct task_struct *scx_task_iter_next(struct scx_task_iter *iter)
+{
+ struct list_head *cursor = &iter->cursor.tasks_node;
+ struct sched_ext_entity *pos;
+
+ lockdep_assert_held(&scx_tasks_lock);
+
+ list_for_each_entry(pos, cursor, tasks_node) {
+ if (&pos->tasks_node == &scx_tasks)
+ return NULL;
+ if (!(pos->flags & SCX_TASK_CURSOR)) {
+ list_move(cursor, &pos->tasks_node);
+ return container_of(pos, struct task_struct, scx);
+ }
+ }
+
+ /* can't happen, should always terminate at scx_tasks above */
+ BUG();
+}
+
+/**
+ * scx_task_iter_next_locked - Next non-idle task with its rq locked
+ * @iter: iterator to walk
+ * @include_dead: Whether we should include dead tasks in the iteration
+ *
+ * Visit the non-idle task with its rq lock held. Allows callers to specify
+ * whether they would like to filter out dead tasks. See scx_task_iter_init()
+ * for details.
+ */
+static struct task_struct *scx_task_iter_next_locked(struct scx_task_iter *iter)
+{
+ struct task_struct *p;
+
+ scx_task_iter_rq_unlock(iter);
+
+ while ((p = scx_task_iter_next(iter))) {
+ /*
+ * scx_task_iter is used to prepare and move tasks into SCX
+ * while loading the BPF scheduler and vice-versa while
+ * unloading. The init_tasks ("swappers") should be excluded
+ * from the iteration because:
+ *
+ * - It's unsafe to use __setschduler_prio() on an init_task to
+ * determine the sched_class to use as it won't preserve its
+ * idle_sched_class.
+ *
+ * - ops.init/exit_task() can easily be confused if called with
+ * init_tasks as they, e.g., share PID 0.
+ *
+ * As init_tasks are never scheduled through SCX, they can be
+ * skipped safely. Note that is_idle_task() which tests %PF_IDLE
+ * doesn't work here:
+ *
+ * - %PF_IDLE may not be set for an init_task whose CPU hasn't
+ * yet been onlined.
+ *
+ * - %PF_IDLE can be set on tasks that are not init_tasks. See
+ * play_idle_precise() used by CONFIG_IDLE_INJECT.
+ *
+ * Test for idle_sched_class as only init_tasks are on it.
+ */
+ if (p->sched_class != &idle_sched_class)
+ break;
+ }
+ if (!p)
+ return NULL;
+
+ iter->rq = task_rq_lock(p, &iter->rf);
+ iter->locked = p;
+
+ return p;
+}
+
+static enum scx_ops_enable_state scx_ops_enable_state(void)
+{
+ return atomic_read(&scx_ops_enable_state_var);
+}
+
+static enum scx_ops_enable_state
+scx_ops_set_enable_state(enum scx_ops_enable_state to)
+{
+ return atomic_xchg(&scx_ops_enable_state_var, to);
+}
+
+static bool scx_ops_tryset_enable_state(enum scx_ops_enable_state to,
+ enum scx_ops_enable_state from)
+{
+ int from_v = from;
+
+ return atomic_try_cmpxchg(&scx_ops_enable_state_var, &from_v, to);
+}
+
+static bool scx_rq_bypassing(struct rq *rq)
+{
+ return unlikely(rq->scx.flags & SCX_RQ_BYPASSING);
+}
+
+/**
+ * wait_ops_state - Busy-wait the specified ops state to end
+ * @p: target task
+ * @opss: state to wait the end of
+ *
+ * Busy-wait for @p to transition out of @opss. This can only be used when the
+ * state part of @opss is %SCX_QUEUEING or %SCX_DISPATCHING. This function also
+ * has load_acquire semantics to ensure that the caller can see the updates made
+ * in the enqueueing and dispatching paths.
+ */
+static void wait_ops_state(struct task_struct *p, unsigned long opss)
+{
+ do {
+ cpu_relax();
+ } while (atomic_long_read_acquire(&p->scx.ops_state) == opss);
+}
+
+/**
+ * ops_cpu_valid - Verify a cpu number
+ * @cpu: cpu number which came from a BPF ops
+ * @where: extra information reported on error
+ *
+ * @cpu is a cpu number which came from the BPF scheduler and can be any value.
+ * Verify that it is in range and one of the possible cpus. If invalid, trigger
+ * an ops error.
+ */
+static bool ops_cpu_valid(s32 cpu, const char *where)
+{
+ if (likely(cpu >= 0 && cpu < nr_cpu_ids && cpu_possible(cpu))) {
+ return true;
+ } else {
+ scx_ops_error("invalid CPU %d%s%s", cpu,
+ where ? " " : "", where ?: "");
+ return false;
+ }
+}
+
+/**
+ * ops_sanitize_err - Sanitize a -errno value
+ * @ops_name: operation to blame on failure
+ * @err: -errno value to sanitize
+ *
+ * Verify @err is a valid -errno. If not, trigger scx_ops_error() and return
+ * -%EPROTO. This is necessary because returning a rogue -errno up the chain can
+ * cause misbehaviors. For an example, a large negative return from
+ * ops.init_task() triggers an oops when passed up the call chain because the
+ * value fails IS_ERR() test after being encoded with ERR_PTR() and then is
+ * handled as a pointer.
+ */
+static int ops_sanitize_err(const char *ops_name, s32 err)
+{
+ if (err < 0 && err >= -MAX_ERRNO)
+ return err;
+
+ scx_ops_error("ops.%s() returned an invalid errno %d", ops_name, err);
+ return -EPROTO;
+}
+
+static void run_deferred(struct rq *rq)
+{
+ process_ddsp_deferred_locals(rq);
+}
+
+#ifdef CONFIG_SMP
+static void deferred_bal_cb_workfn(struct rq *rq)
+{
+ run_deferred(rq);
+}
+#endif
+
+static void deferred_irq_workfn(struct irq_work *irq_work)
+{
+ struct rq *rq = container_of(irq_work, struct rq, scx.deferred_irq_work);
+
+ raw_spin_rq_lock(rq);
+ run_deferred(rq);
+ raw_spin_rq_unlock(rq);
+}
+
+/**
+ * schedule_deferred - Schedule execution of deferred actions on an rq
+ * @rq: target rq
+ *
+ * Schedule execution of deferred actions on @rq. Must be called with @rq
+ * locked. Deferred actions are executed with @rq locked but unpinned, and thus
+ * can unlock @rq to e.g. migrate tasks to other rqs.
+ */
+static void schedule_deferred(struct rq *rq)
+{
+ lockdep_assert_rq_held(rq);
+
+#ifdef CONFIG_SMP
+ /*
+ * If in the middle of waking up a task, task_woken_scx() will be called
+ * afterwards which will then run the deferred actions, no need to
+ * schedule anything.
+ */
+ if (rq->scx.flags & SCX_RQ_IN_WAKEUP)
+ return;
+
+ /*
+ * If in balance, the balance callbacks will be called before rq lock is
+ * released. Schedule one.
+ */
+ if (rq->scx.flags & SCX_RQ_IN_BALANCE) {
+ queue_balance_callback(rq, &rq->scx.deferred_bal_cb,
+ deferred_bal_cb_workfn);
+ return;
+ }
+#endif
+ /*
+ * No scheduler hooks available. Queue an irq work. They are executed on
+ * IRQ re-enable which may take a bit longer than the scheduler hooks.
+ * The above WAKEUP and BALANCE paths should cover most of the cases and
+ * the time to IRQ re-enable shouldn't be long.
+ */
+ irq_work_queue(&rq->scx.deferred_irq_work);
+}
+
+/**
+ * touch_core_sched - Update timestamp used for core-sched task ordering
+ * @rq: rq to read clock from, must be locked
+ * @p: task to update the timestamp for
+ *
+ * Update @p->scx.core_sched_at timestamp. This is used by scx_prio_less() to
+ * implement global or local-DSQ FIFO ordering for core-sched. Should be called
+ * when a task becomes runnable and its turn on the CPU ends (e.g. slice
+ * exhaustion).
+ */
+static void touch_core_sched(struct rq *rq, struct task_struct *p)
+{
+ lockdep_assert_rq_held(rq);
+
+#ifdef CONFIG_SCHED_CORE
+ /*
+ * It's okay to update the timestamp spuriously. Use
+ * sched_core_disabled() which is cheaper than enabled().
+ *
+ * As this is used to determine ordering between tasks of sibling CPUs,
+ * it may be better to use per-core dispatch sequence instead.
+ */
+ if (!sched_core_disabled())
+ p->scx.core_sched_at = sched_clock_cpu(cpu_of(rq));
+#endif
+}
+
+/**
+ * touch_core_sched_dispatch - Update core-sched timestamp on dispatch
+ * @rq: rq to read clock from, must be locked
+ * @p: task being dispatched
+ *
+ * If the BPF scheduler implements custom core-sched ordering via
+ * ops.core_sched_before(), @p->scx.core_sched_at is used to implement FIFO
+ * ordering within each local DSQ. This function is called from dispatch paths
+ * and updates @p->scx.core_sched_at if custom core-sched ordering is in effect.
+ */
+static void touch_core_sched_dispatch(struct rq *rq, struct task_struct *p)
+{
+ lockdep_assert_rq_held(rq);
+
+#ifdef CONFIG_SCHED_CORE
+ if (SCX_HAS_OP(core_sched_before))
+ touch_core_sched(rq, p);
+#endif
+}
+
+static void update_curr_scx(struct rq *rq)
+{
+ struct task_struct *curr = rq->curr;
+ s64 delta_exec;
+
+ delta_exec = update_curr_common(rq);
+ if (unlikely(delta_exec <= 0))
+ return;
+
+ if (curr->scx.slice != SCX_SLICE_INF) {
+ curr->scx.slice -= min_t(u64, curr->scx.slice, delta_exec);
+ if (!curr->scx.slice)
+ touch_core_sched(rq, curr);
+ }
+}
+
+static bool scx_dsq_priq_less(struct rb_node *node_a,
+ const struct rb_node *node_b)
+{
+ const struct task_struct *a =
+ container_of(node_a, struct task_struct, scx.dsq_priq);
+ const struct task_struct *b =
+ container_of(node_b, struct task_struct, scx.dsq_priq);
+
+ return time_before64(a->scx.dsq_vtime, b->scx.dsq_vtime);
+}
+
+static void dsq_mod_nr(struct scx_dispatch_q *dsq, s32 delta)
+{
+ /* scx_bpf_dsq_nr_queued() reads ->nr without locking, use WRITE_ONCE() */
+ WRITE_ONCE(dsq->nr, dsq->nr + delta);
+}
+
+static void dispatch_enqueue(struct scx_dispatch_q *dsq, struct task_struct *p,
+ u64 enq_flags)
+{
+ bool is_local = dsq->id == SCX_DSQ_LOCAL;
+
+ WARN_ON_ONCE(p->scx.dsq || !list_empty(&p->scx.dsq_list.node));
+ WARN_ON_ONCE((p->scx.dsq_flags & SCX_TASK_DSQ_ON_PRIQ) ||
+ !RB_EMPTY_NODE(&p->scx.dsq_priq));
+
+ if (!is_local) {
+ raw_spin_lock(&dsq->lock);
+ if (unlikely(dsq->id == SCX_DSQ_INVALID)) {
+ scx_ops_error("attempting to dispatch to a destroyed dsq");
+ /* fall back to the global dsq */
+ raw_spin_unlock(&dsq->lock);
+ dsq = find_global_dsq(p);
+ raw_spin_lock(&dsq->lock);
+ }
+ }
+
+ if (unlikely((dsq->id & SCX_DSQ_FLAG_BUILTIN) &&
+ (enq_flags & SCX_ENQ_DSQ_PRIQ))) {
+ /*
+ * SCX_DSQ_LOCAL and SCX_DSQ_GLOBAL DSQs always consume from
+ * their FIFO queues. To avoid confusion and accidentally
+ * starving vtime-dispatched tasks by FIFO-dispatched tasks, we
+ * disallow any internal DSQ from doing vtime ordering of
+ * tasks.
+ */
+ scx_ops_error("cannot use vtime ordering for built-in DSQs");
+ enq_flags &= ~SCX_ENQ_DSQ_PRIQ;
+ }
+
+ if (enq_flags & SCX_ENQ_DSQ_PRIQ) {
+ struct rb_node *rbp;
+
+ /*
+ * A PRIQ DSQ shouldn't be using FIFO enqueueing. As tasks are
+ * linked to both the rbtree and list on PRIQs, this can only be
+ * tested easily when adding the first task.
+ */
+ if (unlikely(RB_EMPTY_ROOT(&dsq->priq) &&
+ nldsq_next_task(dsq, NULL, false)))
+ scx_ops_error("DSQ ID 0x%016llx already had FIFO-enqueued tasks",
+ dsq->id);
+
+ p->scx.dsq_flags |= SCX_TASK_DSQ_ON_PRIQ;
+ rb_add(&p->scx.dsq_priq, &dsq->priq, scx_dsq_priq_less);
+
+ /*
+ * Find the previous task and insert after it on the list so
+ * that @dsq->list is vtime ordered.
+ */
+ rbp = rb_prev(&p->scx.dsq_priq);
+ if (rbp) {
+ struct task_struct *prev =
+ container_of(rbp, struct task_struct,
+ scx.dsq_priq);
+ list_add(&p->scx.dsq_list.node, &prev->scx.dsq_list.node);
+ } else {
+ list_add(&p->scx.dsq_list.node, &dsq->list);
+ }
+ } else {
+ /* a FIFO DSQ shouldn't be using PRIQ enqueuing */
+ if (unlikely(!RB_EMPTY_ROOT(&dsq->priq)))
+ scx_ops_error("DSQ ID 0x%016llx already had PRIQ-enqueued tasks",
+ dsq->id);
+
+ if (enq_flags & (SCX_ENQ_HEAD | SCX_ENQ_PREEMPT))
+ list_add(&p->scx.dsq_list.node, &dsq->list);
+ else
+ list_add_tail(&p->scx.dsq_list.node, &dsq->list);
+ }
+
+ /* seq records the order tasks are queued, used by BPF DSQ iterator */
+ dsq->seq++;
+ p->scx.dsq_seq = dsq->seq;
+
+ dsq_mod_nr(dsq, 1);
+ p->scx.dsq = dsq;
+
+ /*
+ * scx.ddsp_dsq_id and scx.ddsp_enq_flags are only relevant on the
+ * direct dispatch path, but we clear them here because the direct
+ * dispatch verdict may be overridden on the enqueue path during e.g.
+ * bypass.
+ */
+ p->scx.ddsp_dsq_id = SCX_DSQ_INVALID;
+ p->scx.ddsp_enq_flags = 0;
+
+ /*
+ * We're transitioning out of QUEUEING or DISPATCHING. store_release to
+ * match waiters' load_acquire.
+ */
+ if (enq_flags & SCX_ENQ_CLEAR_OPSS)
+ atomic_long_set_release(&p->scx.ops_state, SCX_OPSS_NONE);
+
+ if (is_local) {
+ struct rq *rq = container_of(dsq, struct rq, scx.local_dsq);
+ bool preempt = false;
+
+ if ((enq_flags & SCX_ENQ_PREEMPT) && p != rq->curr &&
+ rq->curr->sched_class == &ext_sched_class) {
+ rq->curr->scx.slice = 0;
+ preempt = true;
+ }
+
+ if (preempt || sched_class_above(&ext_sched_class,
+ rq->curr->sched_class))
+ resched_curr(rq);
+ } else {
+ raw_spin_unlock(&dsq->lock);
+ }
+}
+
+static void task_unlink_from_dsq(struct task_struct *p,
+ struct scx_dispatch_q *dsq)
+{
+ WARN_ON_ONCE(list_empty(&p->scx.dsq_list.node));
+
+ if (p->scx.dsq_flags & SCX_TASK_DSQ_ON_PRIQ) {
+ rb_erase(&p->scx.dsq_priq, &dsq->priq);
+ RB_CLEAR_NODE(&p->scx.dsq_priq);
+ p->scx.dsq_flags &= ~SCX_TASK_DSQ_ON_PRIQ;
+ }
+
+ list_del_init(&p->scx.dsq_list.node);
+ dsq_mod_nr(dsq, -1);
+}
+
+static void dispatch_dequeue(struct rq *rq, struct task_struct *p)
+{
+ struct scx_dispatch_q *dsq = p->scx.dsq;
+ bool is_local = dsq == &rq->scx.local_dsq;
+
+ if (!dsq) {
+ /*
+ * If !dsq && on-list, @p is on @rq's ddsp_deferred_locals.
+ * Unlinking is all that's needed to cancel.
+ */
+ if (unlikely(!list_empty(&p->scx.dsq_list.node)))
+ list_del_init(&p->scx.dsq_list.node);
+
+ /*
+ * When dispatching directly from the BPF scheduler to a local
+ * DSQ, the task isn't associated with any DSQ but
+ * @p->scx.holding_cpu may be set under the protection of
+ * %SCX_OPSS_DISPATCHING.
+ */
+ if (p->scx.holding_cpu >= 0)
+ p->scx.holding_cpu = -1;
+
+ return;
+ }
+
+ if (!is_local)
+ raw_spin_lock(&dsq->lock);
+
+ /*
+ * Now that we hold @dsq->lock, @p->holding_cpu and @p->scx.dsq_* can't
+ * change underneath us.
+ */
+ if (p->scx.holding_cpu < 0) {
+ /* @p must still be on @dsq, dequeue */
+ task_unlink_from_dsq(p, dsq);
+ } else {
+ /*
+ * We're racing against dispatch_to_local_dsq() which already
+ * removed @p from @dsq and set @p->scx.holding_cpu. Clear the
+ * holding_cpu which tells dispatch_to_local_dsq() that it lost
+ * the race.
+ */
+ WARN_ON_ONCE(!list_empty(&p->scx.dsq_list.node));
+ p->scx.holding_cpu = -1;
+ }
+ p->scx.dsq = NULL;
+
+ if (!is_local)
+ raw_spin_unlock(&dsq->lock);
+}
+
+static struct scx_dispatch_q *find_dsq_for_dispatch(struct rq *rq, u64 dsq_id,
+ struct task_struct *p)
+{
+ struct scx_dispatch_q *dsq;
+
+ if (dsq_id == SCX_DSQ_LOCAL)
+ return &rq->scx.local_dsq;
+
+ if ((dsq_id & SCX_DSQ_LOCAL_ON) == SCX_DSQ_LOCAL_ON) {
+ s32 cpu = dsq_id & SCX_DSQ_LOCAL_CPU_MASK;
+
+ if (!ops_cpu_valid(cpu, "in SCX_DSQ_LOCAL_ON dispatch verdict"))
+ return find_global_dsq(p);
+
+ return &cpu_rq(cpu)->scx.local_dsq;
+ }
+
+ if (dsq_id == SCX_DSQ_GLOBAL)
+ dsq = find_global_dsq(p);
+ else
+ dsq = find_user_dsq(dsq_id);
+
+ if (unlikely(!dsq)) {
+ scx_ops_error("non-existent DSQ 0x%llx for %s[%d]",
+ dsq_id, p->comm, p->pid);
+ return find_global_dsq(p);
+ }
+
+ return dsq;
+}
+
+static void mark_direct_dispatch(struct task_struct *ddsp_task,
+ struct task_struct *p, u64 dsq_id,
+ u64 enq_flags)
+{
+ /*
+ * Mark that dispatch already happened from ops.select_cpu() or
+ * ops.enqueue() by spoiling direct_dispatch_task with a non-NULL value
+ * which can never match a valid task pointer.
+ */
+ __this_cpu_write(direct_dispatch_task, ERR_PTR(-ESRCH));
+
+ /* @p must match the task on the enqueue path */
+ if (unlikely(p != ddsp_task)) {
+ if (IS_ERR(ddsp_task))
+ scx_ops_error("%s[%d] already direct-dispatched",
+ p->comm, p->pid);
+ else
+ scx_ops_error("scheduling for %s[%d] but trying to direct-dispatch %s[%d]",
+ ddsp_task->comm, ddsp_task->pid,
+ p->comm, p->pid);
+ return;
+ }
+
+ WARN_ON_ONCE(p->scx.ddsp_dsq_id != SCX_DSQ_INVALID);
+ WARN_ON_ONCE(p->scx.ddsp_enq_flags);
+
+ p->scx.ddsp_dsq_id = dsq_id;
+ p->scx.ddsp_enq_flags = enq_flags;
+}
+
+static void direct_dispatch(struct task_struct *p, u64 enq_flags)
+{
+ struct rq *rq = task_rq(p);
+ struct scx_dispatch_q *dsq =
+ find_dsq_for_dispatch(rq, p->scx.ddsp_dsq_id, p);
+
+ touch_core_sched_dispatch(rq, p);
+
+ p->scx.ddsp_enq_flags |= enq_flags;
+
+ /*
+ * We are in the enqueue path with @rq locked and pinned, and thus can't
+ * double lock a remote rq and enqueue to its local DSQ. For
+ * DSQ_LOCAL_ON verdicts targeting the local DSQ of a remote CPU, defer
+ * the enqueue so that it's executed when @rq can be unlocked.
+ */
+ if (dsq->id == SCX_DSQ_LOCAL && dsq != &rq->scx.local_dsq) {
+ unsigned long opss;
+
+ opss = atomic_long_read(&p->scx.ops_state) & SCX_OPSS_STATE_MASK;
+
+ switch (opss & SCX_OPSS_STATE_MASK) {
+ case SCX_OPSS_NONE:
+ break;
+ case SCX_OPSS_QUEUEING:
+ /*
+ * As @p was never passed to the BPF side, _release is
+ * not strictly necessary. Still do it for consistency.
+ */
+ atomic_long_set_release(&p->scx.ops_state, SCX_OPSS_NONE);
+ break;
+ default:
+ WARN_ONCE(true, "sched_ext: %s[%d] has invalid ops state 0x%lx in direct_dispatch()",
+ p->comm, p->pid, opss);
+ atomic_long_set_release(&p->scx.ops_state, SCX_OPSS_NONE);
+ break;
+ }
+
+ WARN_ON_ONCE(p->scx.dsq || !list_empty(&p->scx.dsq_list.node));
+ list_add_tail(&p->scx.dsq_list.node,
+ &rq->scx.ddsp_deferred_locals);
+ schedule_deferred(rq);
+ return;
+ }
+
+ dispatch_enqueue(dsq, p, p->scx.ddsp_enq_flags | SCX_ENQ_CLEAR_OPSS);
+}
+
+static bool scx_rq_online(struct rq *rq)
+{
+ /*
+ * Test both cpu_active() and %SCX_RQ_ONLINE. %SCX_RQ_ONLINE indicates
+ * the online state as seen from the BPF scheduler. cpu_active() test
+ * guarantees that, if this function returns %true, %SCX_RQ_ONLINE will
+ * stay set until the current scheduling operation is complete even if
+ * we aren't locking @rq.
+ */
+ return likely((rq->scx.flags & SCX_RQ_ONLINE) && cpu_active(cpu_of(rq)));
+}
+
+static void do_enqueue_task(struct rq *rq, struct task_struct *p, u64 enq_flags,
+ int sticky_cpu)
+{
+ bool bypassing = scx_rq_bypassing(rq);
+ struct task_struct **ddsp_taskp;
+ unsigned long qseq;
+
+ WARN_ON_ONCE(!(p->scx.flags & SCX_TASK_QUEUED));
+
+ /* rq migration */
+ if (sticky_cpu == cpu_of(rq))
+ goto local_norefill;
+
+ /*
+ * If !scx_rq_online(), we already told the BPF scheduler that the CPU
+ * is offline and are just running the hotplug path. Don't bother the
+ * BPF scheduler.
+ */
+ if (!scx_rq_online(rq))
+ goto local;
+
+ if (bypassing)
+ goto global;
+
+ if (p->scx.ddsp_dsq_id != SCX_DSQ_INVALID)
+ goto direct;
+
+ /* see %SCX_OPS_ENQ_EXITING */
+ if (!static_branch_unlikely(&scx_ops_enq_exiting) &&
+ unlikely(p->flags & PF_EXITING))
+ goto local;
+
+ if (!SCX_HAS_OP(enqueue))
+ goto global;
+
+ /* DSQ bypass didn't trigger, enqueue on the BPF scheduler */
+ qseq = rq->scx.ops_qseq++ << SCX_OPSS_QSEQ_SHIFT;
+
+ WARN_ON_ONCE(atomic_long_read(&p->scx.ops_state) != SCX_OPSS_NONE);
+ atomic_long_set(&p->scx.ops_state, SCX_OPSS_QUEUEING | qseq);
+
+ ddsp_taskp = this_cpu_ptr(&direct_dispatch_task);
+ WARN_ON_ONCE(*ddsp_taskp);
+ *ddsp_taskp = p;
+
+ SCX_CALL_OP_TASK(SCX_KF_ENQUEUE, enqueue, p, enq_flags);
+
+ *ddsp_taskp = NULL;
+ if (p->scx.ddsp_dsq_id != SCX_DSQ_INVALID)
+ goto direct;
+
+ /*
+ * If not directly dispatched, QUEUEING isn't clear yet and dispatch or
+ * dequeue may be waiting. The store_release matches their load_acquire.
+ */
+ atomic_long_set_release(&p->scx.ops_state, SCX_OPSS_QUEUED | qseq);
+ return;
+
+direct:
+ direct_dispatch(p, enq_flags);
+ return;
+
+local:
+ /*
+ * For task-ordering, slice refill must be treated as implying the end
+ * of the current slice. Otherwise, the longer @p stays on the CPU, the
+ * higher priority it becomes from scx_prio_less()'s POV.
+ */
+ touch_core_sched(rq, p);
+ p->scx.slice = SCX_SLICE_DFL;
+local_norefill:
+ dispatch_enqueue(&rq->scx.local_dsq, p, enq_flags);
+ return;
+
+global:
+ touch_core_sched(rq, p); /* see the comment in local: */
+ p->scx.slice = bypassing ? SCX_SLICE_BYPASS : SCX_SLICE_DFL;
+ dispatch_enqueue(find_global_dsq(p), p, enq_flags);
+}
+
+static bool task_runnable(const struct task_struct *p)
+{
+ return !list_empty(&p->scx.runnable_node);
+}
+
+static void set_task_runnable(struct rq *rq, struct task_struct *p)
+{
+ lockdep_assert_rq_held(rq);
+
+ if (p->scx.flags & SCX_TASK_RESET_RUNNABLE_AT) {
+ p->scx.runnable_at = jiffies;
+ p->scx.flags &= ~SCX_TASK_RESET_RUNNABLE_AT;
+ }
+
+ /*
+ * list_add_tail() must be used. scx_ops_bypass() depends on tasks being
+ * appened to the runnable_list.
+ */
+ list_add_tail(&p->scx.runnable_node, &rq->scx.runnable_list);
+}
+
+static void clr_task_runnable(struct task_struct *p, bool reset_runnable_at)
+{
+ list_del_init(&p->scx.runnable_node);
+ if (reset_runnable_at)
+ p->scx.flags |= SCX_TASK_RESET_RUNNABLE_AT;
+}
+
+static void enqueue_task_scx(struct rq *rq, struct task_struct *p, int enq_flags)
+{
+ int sticky_cpu = p->scx.sticky_cpu;
+
+ if (enq_flags & ENQUEUE_WAKEUP)
+ rq->scx.flags |= SCX_RQ_IN_WAKEUP;
+
+ enq_flags |= rq->scx.extra_enq_flags;
+
+ if (sticky_cpu >= 0)
+ p->scx.sticky_cpu = -1;
+
+ /*
+ * Restoring a running task will be immediately followed by
+ * set_next_task_scx() which expects the task to not be on the BPF
+ * scheduler as tasks can only start running through local DSQs. Force
+ * direct-dispatch into the local DSQ by setting the sticky_cpu.
+ */
+ if (unlikely(enq_flags & ENQUEUE_RESTORE) && task_current(rq, p))
+ sticky_cpu = cpu_of(rq);
+
+ if (p->scx.flags & SCX_TASK_QUEUED) {
+ WARN_ON_ONCE(!task_runnable(p));
+ goto out;
+ }
+
+ set_task_runnable(rq, p);
+ p->scx.flags |= SCX_TASK_QUEUED;
+ rq->scx.nr_running++;
+ add_nr_running(rq, 1);
+
+ if (SCX_HAS_OP(runnable) && !task_on_rq_migrating(p))
+ SCX_CALL_OP_TASK(SCX_KF_REST, runnable, p, enq_flags);
+
+ if (enq_flags & SCX_ENQ_WAKEUP)
+ touch_core_sched(rq, p);
+
+ do_enqueue_task(rq, p, enq_flags, sticky_cpu);
+out:
+ rq->scx.flags &= ~SCX_RQ_IN_WAKEUP;
+}
+
+static void ops_dequeue(struct task_struct *p, u64 deq_flags)
+{
+ unsigned long opss;
+
+ /* dequeue is always temporary, don't reset runnable_at */
+ clr_task_runnable(p, false);
+
+ /* acquire ensures that we see the preceding updates on QUEUED */
+ opss = atomic_long_read_acquire(&p->scx.ops_state);
+
+ switch (opss & SCX_OPSS_STATE_MASK) {
+ case SCX_OPSS_NONE:
+ break;
+ case SCX_OPSS_QUEUEING:
+ /*
+ * QUEUEING is started and finished while holding @p's rq lock.
+ * As we're holding the rq lock now, we shouldn't see QUEUEING.
+ */
+ BUG();
+ case SCX_OPSS_QUEUED:
+ if (SCX_HAS_OP(dequeue))
+ SCX_CALL_OP_TASK(SCX_KF_REST, dequeue, p, deq_flags);
+
+ if (atomic_long_try_cmpxchg(&p->scx.ops_state, &opss,
+ SCX_OPSS_NONE))
+ break;
+ fallthrough;
+ case SCX_OPSS_DISPATCHING:
+ /*
+ * If @p is being dispatched from the BPF scheduler to a DSQ,
+ * wait for the transfer to complete so that @p doesn't get
+ * added to its DSQ after dequeueing is complete.
+ *
+ * As we're waiting on DISPATCHING with the rq locked, the
+ * dispatching side shouldn't try to lock the rq while
+ * DISPATCHING is set. See dispatch_to_local_dsq().
+ *
+ * DISPATCHING shouldn't have qseq set and control can reach
+ * here with NONE @opss from the above QUEUED case block.
+ * Explicitly wait on %SCX_OPSS_DISPATCHING instead of @opss.
+ */
+ wait_ops_state(p, SCX_OPSS_DISPATCHING);
+ BUG_ON(atomic_long_read(&p->scx.ops_state) != SCX_OPSS_NONE);
+ break;
+ }
+}
+
+static bool dequeue_task_scx(struct rq *rq, struct task_struct *p, int deq_flags)
+{
+ if (!(p->scx.flags & SCX_TASK_QUEUED)) {
+ WARN_ON_ONCE(task_runnable(p));
+ return true;
+ }
+
+ ops_dequeue(p, deq_flags);
+
+ /*
+ * A currently running task which is going off @rq first gets dequeued
+ * and then stops running. As we want running <-> stopping transitions
+ * to be contained within runnable <-> quiescent transitions, trigger
+ * ->stopping() early here instead of in put_prev_task_scx().
+ *
+ * @p may go through multiple stopping <-> running transitions between
+ * here and put_prev_task_scx() if task attribute changes occur while
+ * balance_scx() leaves @rq unlocked. However, they don't contain any
+ * information meaningful to the BPF scheduler and can be suppressed by
+ * skipping the callbacks if the task is !QUEUED.
+ */
+ if (SCX_HAS_OP(stopping) && task_current(rq, p)) {
+ update_curr_scx(rq);
+ SCX_CALL_OP_TASK(SCX_KF_REST, stopping, p, false);
+ }
+
+ if (SCX_HAS_OP(quiescent) && !task_on_rq_migrating(p))
+ SCX_CALL_OP_TASK(SCX_KF_REST, quiescent, p, deq_flags);
+
+ if (deq_flags & SCX_DEQ_SLEEP)
+ p->scx.flags |= SCX_TASK_DEQD_FOR_SLEEP;
+ else
+ p->scx.flags &= ~SCX_TASK_DEQD_FOR_SLEEP;
+
+ p->scx.flags &= ~SCX_TASK_QUEUED;
+ rq->scx.nr_running--;
+ sub_nr_running(rq, 1);
+
+ dispatch_dequeue(rq, p);
+ return true;
+}
+
+static void yield_task_scx(struct rq *rq)
+{
+ struct task_struct *p = rq->curr;
+
+ if (SCX_HAS_OP(yield))
+ SCX_CALL_OP_2TASKS_RET(SCX_KF_REST, yield, p, NULL);
+ else
+ p->scx.slice = 0;
+}
+
+static bool yield_to_task_scx(struct rq *rq, struct task_struct *to)
+{
+ struct task_struct *from = rq->curr;
+
+ if (SCX_HAS_OP(yield))
+ return SCX_CALL_OP_2TASKS_RET(SCX_KF_REST, yield, from, to);
+ else
+ return false;
+}
+
+static void move_local_task_to_local_dsq(struct task_struct *p, u64 enq_flags,
+ struct scx_dispatch_q *src_dsq,
+ struct rq *dst_rq)
+{
+ struct scx_dispatch_q *dst_dsq = &dst_rq->scx.local_dsq;
+
+ /* @dsq is locked and @p is on @dst_rq */
+ lockdep_assert_held(&src_dsq->lock);
+ lockdep_assert_rq_held(dst_rq);
+
+ WARN_ON_ONCE(p->scx.holding_cpu >= 0);
+
+ if (enq_flags & (SCX_ENQ_HEAD | SCX_ENQ_PREEMPT))
+ list_add(&p->scx.dsq_list.node, &dst_dsq->list);
+ else
+ list_add_tail(&p->scx.dsq_list.node, &dst_dsq->list);
+
+ dsq_mod_nr(dst_dsq, 1);
+ p->scx.dsq = dst_dsq;
+}
+
+#ifdef CONFIG_SMP
+/**
+ * move_remote_task_to_local_dsq - Move a task from a foreign rq to a local DSQ
+ * @p: task to move
+ * @enq_flags: %SCX_ENQ_*
+ * @src_rq: rq to move the task from, locked on entry, released on return
+ * @dst_rq: rq to move the task into, locked on return
+ *
+ * Move @p which is currently on @src_rq to @dst_rq's local DSQ.
+ */
+static void move_remote_task_to_local_dsq(struct task_struct *p, u64 enq_flags,
+ struct rq *src_rq, struct rq *dst_rq)
+{
+ lockdep_assert_rq_held(src_rq);
+
+ /* the following marks @p MIGRATING which excludes dequeue */
+ deactivate_task(src_rq, p, 0);
+ set_task_cpu(p, cpu_of(dst_rq));
+ p->scx.sticky_cpu = cpu_of(dst_rq);
+
+ raw_spin_rq_unlock(src_rq);
+ raw_spin_rq_lock(dst_rq);
+
+ /*
+ * We want to pass scx-specific enq_flags but activate_task() will
+ * truncate the upper 32 bit. As we own @rq, we can pass them through
+ * @rq->scx.extra_enq_flags instead.
+ */
+ WARN_ON_ONCE(!cpumask_test_cpu(cpu_of(dst_rq), p->cpus_ptr));
+ WARN_ON_ONCE(dst_rq->scx.extra_enq_flags);
+ dst_rq->scx.extra_enq_flags = enq_flags;
+ activate_task(dst_rq, p, 0);
+ dst_rq->scx.extra_enq_flags = 0;
+}
+
+/*
+ * Similar to kernel/sched/core.c::is_cpu_allowed(). However, there are two
+ * differences:
+ *
+ * - is_cpu_allowed() asks "Can this task run on this CPU?" while
+ * task_can_run_on_remote_rq() asks "Can the BPF scheduler migrate the task to
+ * this CPU?".
+ *
+ * While migration is disabled, is_cpu_allowed() has to say "yes" as the task
+ * must be allowed to finish on the CPU that it's currently on regardless of
+ * the CPU state. However, task_can_run_on_remote_rq() must say "no" as the
+ * BPF scheduler shouldn't attempt to migrate a task which has migration
+ * disabled.
+ *
+ * - The BPF scheduler is bypassed while the rq is offline and we can always say
+ * no to the BPF scheduler initiated migrations while offline.
+ */
+static bool task_can_run_on_remote_rq(struct task_struct *p, struct rq *rq,
+ bool trigger_error)
+{
+ int cpu = cpu_of(rq);
+
+ /*
+ * We don't require the BPF scheduler to avoid dispatching to offline
+ * CPUs mostly for convenience but also because CPUs can go offline
+ * between scx_bpf_dispatch() calls and here. Trigger error iff the
+ * picked CPU is outside the allowed mask.
+ */
+ if (!task_allowed_on_cpu(p, cpu)) {
+ if (trigger_error)
+ scx_ops_error("SCX_DSQ_LOCAL[_ON] verdict target cpu %d not allowed for %s[%d]",
+ cpu_of(rq), p->comm, p->pid);
+ return false;
+ }
+
+ if (unlikely(is_migration_disabled(p)))
+ return false;
+
+ if (!scx_rq_online(rq))
+ return false;
+
+ return true;
+}
+
+/**
+ * unlink_dsq_and_lock_src_rq() - Unlink task from its DSQ and lock its task_rq
+ * @p: target task
+ * @dsq: locked DSQ @p is currently on
+ * @src_rq: rq @p is currently on, stable with @dsq locked
+ *
+ * Called with @dsq locked but no rq's locked. We want to move @p to a different
+ * DSQ, including any local DSQ, but are not locking @src_rq. Locking @src_rq is
+ * required when transferring into a local DSQ. Even when transferring into a
+ * non-local DSQ, it's better to use the same mechanism to protect against
+ * dequeues and maintain the invariant that @p->scx.dsq can only change while
+ * @src_rq is locked, which e.g. scx_dump_task() depends on.
+ *
+ * We want to grab @src_rq but that can deadlock if we try while locking @dsq,
+ * so we want to unlink @p from @dsq, drop its lock and then lock @src_rq. As
+ * this may race with dequeue, which can't drop the rq lock or fail, do a little
+ * dancing from our side.
+ *
+ * @p->scx.holding_cpu is set to this CPU before @dsq is unlocked. If @p gets
+ * dequeued after we unlock @dsq but before locking @src_rq, the holding_cpu
+ * would be cleared to -1. While other cpus may have updated it to different
+ * values afterwards, as this operation can't be preempted or recurse, the
+ * holding_cpu can never become this CPU again before we're done. Thus, we can
+ * tell whether we lost to dequeue by testing whether the holding_cpu still
+ * points to this CPU. See dispatch_dequeue() for the counterpart.
+ *
+ * On return, @dsq is unlocked and @src_rq is locked. Returns %true if @p is
+ * still valid. %false if lost to dequeue.
+ */
+static bool unlink_dsq_and_lock_src_rq(struct task_struct *p,
+ struct scx_dispatch_q *dsq,
+ struct rq *src_rq)
+{
+ s32 cpu = raw_smp_processor_id();
+
+ lockdep_assert_held(&dsq->lock);
+
+ WARN_ON_ONCE(p->scx.holding_cpu >= 0);
+ task_unlink_from_dsq(p, dsq);
+ p->scx.holding_cpu = cpu;
+
+ raw_spin_unlock(&dsq->lock);
+ raw_spin_rq_lock(src_rq);
+
+ /* task_rq couldn't have changed if we're still the holding cpu */
+ return likely(p->scx.holding_cpu == cpu) &&
+ !WARN_ON_ONCE(src_rq != task_rq(p));
+}
+
+static bool consume_remote_task(struct rq *this_rq, struct task_struct *p,
+ struct scx_dispatch_q *dsq, struct rq *src_rq)
+{
+ raw_spin_rq_unlock(this_rq);
+
+ if (unlink_dsq_and_lock_src_rq(p, dsq, src_rq)) {
+ move_remote_task_to_local_dsq(p, 0, src_rq, this_rq);
+ return true;
+ } else {
+ raw_spin_rq_unlock(src_rq);
+ raw_spin_rq_lock(this_rq);
+ return false;
+ }
+}
+#else /* CONFIG_SMP */
+static inline void move_remote_task_to_local_dsq(struct task_struct *p, u64 enq_flags, struct rq *src_rq, struct rq *dst_rq) { WARN_ON_ONCE(1); }
+static inline bool task_can_run_on_remote_rq(struct task_struct *p, struct rq *rq, bool trigger_error) { return false; }
+static inline bool consume_remote_task(struct rq *this_rq, struct task_struct *p, struct scx_dispatch_q *dsq, struct rq *task_rq) { return false; }
+#endif /* CONFIG_SMP */
+
+static bool consume_dispatch_q(struct rq *rq, struct scx_dispatch_q *dsq)
+{
+ struct task_struct *p;
+retry:
+ /*
+ * The caller can't expect to successfully consume a task if the task's
+ * addition to @dsq isn't guaranteed to be visible somehow. Test
+ * @dsq->list without locking and skip if it seems empty.
+ */
+ if (list_empty(&dsq->list))
+ return false;
+
+ raw_spin_lock(&dsq->lock);
+
+ nldsq_for_each_task(p, dsq) {
+ struct rq *task_rq = task_rq(p);
+
+ if (rq == task_rq) {
+ task_unlink_from_dsq(p, dsq);
+ move_local_task_to_local_dsq(p, 0, dsq, rq);
+ raw_spin_unlock(&dsq->lock);
+ return true;
+ }
+
+ if (task_can_run_on_remote_rq(p, rq, false)) {
+ if (likely(consume_remote_task(rq, p, dsq, task_rq)))
+ return true;
+ goto retry;
+ }
+ }
+
+ raw_spin_unlock(&dsq->lock);
+ return false;
+}
+
+static bool consume_global_dsq(struct rq *rq)
+{
+ int node = cpu_to_node(cpu_of(rq));
+
+ return consume_dispatch_q(rq, global_dsqs[node]);
+}
+
+/**
+ * dispatch_to_local_dsq - Dispatch a task to a local dsq
+ * @rq: current rq which is locked
+ * @dst_dsq: destination DSQ
+ * @p: task to dispatch
+ * @enq_flags: %SCX_ENQ_*
+ *
+ * We're holding @rq lock and want to dispatch @p to @dst_dsq which is a local
+ * DSQ. This function performs all the synchronization dancing needed because
+ * local DSQs are protected with rq locks.
+ *
+ * The caller must have exclusive ownership of @p (e.g. through
+ * %SCX_OPSS_DISPATCHING).
+ */
+static void dispatch_to_local_dsq(struct rq *rq, struct scx_dispatch_q *dst_dsq,
+ struct task_struct *p, u64 enq_flags)
+{
+ struct rq *src_rq = task_rq(p);
+ struct rq *dst_rq = container_of(dst_dsq, struct rq, scx.local_dsq);
+
+ /*
+ * We're synchronized against dequeue through DISPATCHING. As @p can't
+ * be dequeued, its task_rq and cpus_allowed are stable too.
+ *
+ * If dispatching to @rq that @p is already on, no lock dancing needed.
+ */
+ if (rq == src_rq && rq == dst_rq) {
+ dispatch_enqueue(dst_dsq, p, enq_flags | SCX_ENQ_CLEAR_OPSS);
+ return;
+ }
+
+#ifdef CONFIG_SMP
+ if (unlikely(!task_can_run_on_remote_rq(p, dst_rq, true))) {
+ dispatch_enqueue(find_global_dsq(p), p,
+ enq_flags | SCX_ENQ_CLEAR_OPSS);
+ return;
+ }
+
+ /*
+ * @p is on a possibly remote @src_rq which we need to lock to move the
+ * task. If dequeue is in progress, it'd be locking @src_rq and waiting
+ * on DISPATCHING, so we can't grab @src_rq lock while holding
+ * DISPATCHING.
+ *
+ * As DISPATCHING guarantees that @p is wholly ours, we can pretend that
+ * we're moving from a DSQ and use the same mechanism - mark the task
+ * under transfer with holding_cpu, release DISPATCHING and then follow
+ * the same protocol. See unlink_dsq_and_lock_src_rq().
+ */
+ p->scx.holding_cpu = raw_smp_processor_id();
+
+ /* store_release ensures that dequeue sees the above */
+ atomic_long_set_release(&p->scx.ops_state, SCX_OPSS_NONE);
+
+ /* switch to @src_rq lock */
+ if (rq != src_rq) {
+ raw_spin_rq_unlock(rq);
+ raw_spin_rq_lock(src_rq);
+ }
+
+ /* task_rq couldn't have changed if we're still the holding cpu */
+ if (likely(p->scx.holding_cpu == raw_smp_processor_id()) &&
+ !WARN_ON_ONCE(src_rq != task_rq(p))) {
+ /*
+ * If @p is staying on the same rq, there's no need to go
+ * through the full deactivate/activate cycle. Optimize by
+ * abbreviating move_remote_task_to_local_dsq().
+ */
+ if (src_rq == dst_rq) {
+ p->scx.holding_cpu = -1;
+ dispatch_enqueue(&dst_rq->scx.local_dsq, p, enq_flags);
+ } else {
+ move_remote_task_to_local_dsq(p, enq_flags,
+ src_rq, dst_rq);
+ }
+
+ /* if the destination CPU is idle, wake it up */
+ if (sched_class_above(p->sched_class, dst_rq->curr->sched_class))
+ resched_curr(dst_rq);
+ }
+
+ /* switch back to @rq lock */
+ if (rq != dst_rq) {
+ raw_spin_rq_unlock(dst_rq);
+ raw_spin_rq_lock(rq);
+ }
+#else /* CONFIG_SMP */
+ BUG(); /* control can not reach here on UP */
+#endif /* CONFIG_SMP */
+}
+
+/**
+ * finish_dispatch - Asynchronously finish dispatching a task
+ * @rq: current rq which is locked
+ * @p: task to finish dispatching
+ * @qseq_at_dispatch: qseq when @p started getting dispatched
+ * @dsq_id: destination DSQ ID
+ * @enq_flags: %SCX_ENQ_*
+ *
+ * Dispatching to local DSQs may need to wait for queueing to complete or
+ * require rq lock dancing. As we don't wanna do either while inside
+ * ops.dispatch() to avoid locking order inversion, we split dispatching into
+ * two parts. scx_bpf_dispatch() which is called by ops.dispatch() records the
+ * task and its qseq. Once ops.dispatch() returns, this function is called to
+ * finish up.
+ *
+ * There is no guarantee that @p is still valid for dispatching or even that it
+ * was valid in the first place. Make sure that the task is still owned by the
+ * BPF scheduler and claim the ownership before dispatching.
+ */
+static void finish_dispatch(struct rq *rq, struct task_struct *p,
+ unsigned long qseq_at_dispatch,
+ u64 dsq_id, u64 enq_flags)
+{
+ struct scx_dispatch_q *dsq;
+ unsigned long opss;
+
+ touch_core_sched_dispatch(rq, p);
+retry:
+ /*
+ * No need for _acquire here. @p is accessed only after a successful
+ * try_cmpxchg to DISPATCHING.
+ */
+ opss = atomic_long_read(&p->scx.ops_state);
+
+ switch (opss & SCX_OPSS_STATE_MASK) {
+ case SCX_OPSS_DISPATCHING:
+ case SCX_OPSS_NONE:
+ /* someone else already got to it */
+ return;
+ case SCX_OPSS_QUEUED:
+ /*
+ * If qseq doesn't match, @p has gone through at least one
+ * dispatch/dequeue and re-enqueue cycle between
+ * scx_bpf_dispatch() and here and we have no claim on it.
+ */
+ if ((opss & SCX_OPSS_QSEQ_MASK) != qseq_at_dispatch)
+ return;
+
+ /*
+ * While we know @p is accessible, we don't yet have a claim on
+ * it - the BPF scheduler is allowed to dispatch tasks
+ * spuriously and there can be a racing dequeue attempt. Let's
+ * claim @p by atomically transitioning it from QUEUED to
+ * DISPATCHING.
+ */
+ if (likely(atomic_long_try_cmpxchg(&p->scx.ops_state, &opss,
+ SCX_OPSS_DISPATCHING)))
+ break;
+ goto retry;
+ case SCX_OPSS_QUEUEING:
+ /*
+ * do_enqueue_task() is in the process of transferring the task
+ * to the BPF scheduler while holding @p's rq lock. As we aren't
+ * holding any kernel or BPF resource that the enqueue path may
+ * depend upon, it's safe to wait.
+ */
+ wait_ops_state(p, opss);
+ goto retry;
+ }
+
+ BUG_ON(!(p->scx.flags & SCX_TASK_QUEUED));
+
+ dsq = find_dsq_for_dispatch(this_rq(), dsq_id, p);
+
+ if (dsq->id == SCX_DSQ_LOCAL)
+ dispatch_to_local_dsq(rq, dsq, p, enq_flags);
+ else
+ dispatch_enqueue(dsq, p, enq_flags | SCX_ENQ_CLEAR_OPSS);
+}
+
+static void flush_dispatch_buf(struct rq *rq)
+{
+ struct scx_dsp_ctx *dspc = this_cpu_ptr(scx_dsp_ctx);
+ u32 u;
+
+ for (u = 0; u < dspc->cursor; u++) {
+ struct scx_dsp_buf_ent *ent = &dspc->buf[u];
+
+ finish_dispatch(rq, ent->task, ent->qseq, ent->dsq_id,
+ ent->enq_flags);
+ }
+
+ dspc->nr_tasks += dspc->cursor;
+ dspc->cursor = 0;
+}
+
+static int balance_one(struct rq *rq, struct task_struct *prev)
+{
+ struct scx_dsp_ctx *dspc = this_cpu_ptr(scx_dsp_ctx);
+ bool prev_on_scx = prev->sched_class == &ext_sched_class;
+ int nr_loops = SCX_DSP_MAX_LOOPS;
+
+ lockdep_assert_rq_held(rq);
+ rq->scx.flags |= SCX_RQ_IN_BALANCE;
+ rq->scx.flags &= ~SCX_RQ_BAL_KEEP;
+
+ if (static_branch_unlikely(&scx_ops_cpu_preempt) &&
+ unlikely(rq->scx.cpu_released)) {
+ /*
+ * If the previous sched_class for the current CPU was not SCX,
+ * notify the BPF scheduler that it again has control of the
+ * core. This callback complements ->cpu_release(), which is
+ * emitted in scx_next_task_picked().
+ */
+ if (SCX_HAS_OP(cpu_acquire))
+ SCX_CALL_OP(0, cpu_acquire, cpu_of(rq), NULL);
+ rq->scx.cpu_released = false;
+ }
+
+ if (prev_on_scx) {
+ update_curr_scx(rq);
+
+ /*
+ * If @prev is runnable & has slice left, it has priority and
+ * fetching more just increases latency for the fetched tasks.
+ * Tell pick_task_scx() to keep running @prev. If the BPF
+ * scheduler wants to handle this explicitly, it should
+ * implement ->cpu_release().
+ *
+ * See scx_ops_disable_workfn() for the explanation on the
+ * bypassing test.
+ */
+ if ((prev->scx.flags & SCX_TASK_QUEUED) &&
+ prev->scx.slice && !scx_rq_bypassing(rq)) {
+ rq->scx.flags |= SCX_RQ_BAL_KEEP;
+ goto has_tasks;
+ }
+ }
+
+ /* if there already are tasks to run, nothing to do */
+ if (rq->scx.local_dsq.nr)
+ goto has_tasks;
+
+ if (consume_global_dsq(rq))
+ goto has_tasks;
+
+ if (!SCX_HAS_OP(dispatch) || scx_rq_bypassing(rq) || !scx_rq_online(rq))
+ goto no_tasks;
+
+ dspc->rq = rq;
+
+ /*
+ * The dispatch loop. Because flush_dispatch_buf() may drop the rq lock,
+ * the local DSQ might still end up empty after a successful
+ * ops.dispatch(). If the local DSQ is empty even after ops.dispatch()
+ * produced some tasks, retry. The BPF scheduler may depend on this
+ * looping behavior to simplify its implementation.
+ */
+ do {
+ dspc->nr_tasks = 0;
+
+ SCX_CALL_OP(SCX_KF_DISPATCH, dispatch, cpu_of(rq),
+ prev_on_scx ? prev : NULL);
+
+ flush_dispatch_buf(rq);
+
+ if (rq->scx.local_dsq.nr)
+ goto has_tasks;
+ if (consume_global_dsq(rq))
+ goto has_tasks;
+
+ /*
+ * ops.dispatch() can trap us in this loop by repeatedly
+ * dispatching ineligible tasks. Break out once in a while to
+ * allow the watchdog to run. As IRQ can't be enabled in
+ * balance(), we want to complete this scheduling cycle and then
+ * start a new one. IOW, we want to call resched_curr() on the
+ * next, most likely idle, task, not the current one. Use
+ * scx_bpf_kick_cpu() for deferred kicking.
+ */
+ if (unlikely(!--nr_loops)) {
+ scx_bpf_kick_cpu(cpu_of(rq), 0);
+ break;
+ }
+ } while (dspc->nr_tasks);
+
+no_tasks:
+ /*
+ * Didn't find another task to run. Keep running @prev unless
+ * %SCX_OPS_ENQ_LAST is in effect.
+ */
+ if ((prev->scx.flags & SCX_TASK_QUEUED) &&
+ (!static_branch_unlikely(&scx_ops_enq_last) ||
+ scx_rq_bypassing(rq))) {
+ rq->scx.flags |= SCX_RQ_BAL_KEEP;
+ goto has_tasks;
+ }
+ rq->scx.flags &= ~SCX_RQ_IN_BALANCE;
+ return false;
+
+has_tasks:
+ rq->scx.flags &= ~SCX_RQ_IN_BALANCE;
+ return true;
+}
+
+static int balance_scx(struct rq *rq, struct task_struct *prev,
+ struct rq_flags *rf)
+{
+ int ret;
+
+ rq_unpin_lock(rq, rf);
+
+ ret = balance_one(rq, prev);
+
+#ifdef CONFIG_SCHED_SMT
+ /*
+ * When core-sched is enabled, this ops.balance() call will be followed
+ * by pick_task_scx() on this CPU and the SMT siblings. Balance the
+ * siblings too.
+ */
+ if (sched_core_enabled(rq)) {
+ const struct cpumask *smt_mask = cpu_smt_mask(cpu_of(rq));
+ int scpu;
+
+ for_each_cpu_andnot(scpu, smt_mask, cpumask_of(cpu_of(rq))) {
+ struct rq *srq = cpu_rq(scpu);
+ struct task_struct *sprev = srq->curr;
+
+ WARN_ON_ONCE(__rq_lockp(rq) != __rq_lockp(srq));
+ update_rq_clock(srq);
+ balance_one(srq, sprev);
+ }
+ }
+#endif
+ rq_repin_lock(rq, rf);
+
+ return ret;
+}
+
+static void process_ddsp_deferred_locals(struct rq *rq)
+{
+ struct task_struct *p;
+
+ lockdep_assert_rq_held(rq);
+
+ /*
+ * Now that @rq can be unlocked, execute the deferred enqueueing of
+ * tasks directly dispatched to the local DSQs of other CPUs. See
+ * direct_dispatch(). Keep popping from the head instead of using
+ * list_for_each_entry_safe() as dispatch_local_dsq() may unlock @rq
+ * temporarily.
+ */
+ while ((p = list_first_entry_or_null(&rq->scx.ddsp_deferred_locals,
+ struct task_struct, scx.dsq_list.node))) {
+ struct scx_dispatch_q *dsq;
+
+ list_del_init(&p->scx.dsq_list.node);
+
+ dsq = find_dsq_for_dispatch(rq, p->scx.ddsp_dsq_id, p);
+ if (!WARN_ON_ONCE(dsq->id != SCX_DSQ_LOCAL))
+ dispatch_to_local_dsq(rq, dsq, p, p->scx.ddsp_enq_flags);
+ }
+}
+
+static void set_next_task_scx(struct rq *rq, struct task_struct *p, bool first)
+{
+ if (p->scx.flags & SCX_TASK_QUEUED) {
+ /*
+ * Core-sched might decide to execute @p before it is
+ * dispatched. Call ops_dequeue() to notify the BPF scheduler.
+ */
+ ops_dequeue(p, SCX_DEQ_CORE_SCHED_EXEC);
+ dispatch_dequeue(rq, p);
+ }
+
+ p->se.exec_start = rq_clock_task(rq);
+
+ /* see dequeue_task_scx() on why we skip when !QUEUED */
+ if (SCX_HAS_OP(running) && (p->scx.flags & SCX_TASK_QUEUED))
+ SCX_CALL_OP_TASK(SCX_KF_REST, running, p);
+
+ clr_task_runnable(p, true);
+
+ /*
+ * @p is getting newly scheduled or got kicked after someone updated its
+ * slice. Refresh whether tick can be stopped. See scx_can_stop_tick().
+ */
+ if ((p->scx.slice == SCX_SLICE_INF) !=
+ (bool)(rq->scx.flags & SCX_RQ_CAN_STOP_TICK)) {
+ if (p->scx.slice == SCX_SLICE_INF)
+ rq->scx.flags |= SCX_RQ_CAN_STOP_TICK;
+ else
+ rq->scx.flags &= ~SCX_RQ_CAN_STOP_TICK;
+
+ sched_update_tick_dependency(rq);
+
+ /*
+ * For now, let's refresh the load_avgs just when transitioning
+ * in and out of nohz. In the future, we might want to add a
+ * mechanism which calls the following periodically on
+ * tick-stopped CPUs.
+ */
+ update_other_load_avgs(rq);
+ }
+}
+
+static enum scx_cpu_preempt_reason
+preempt_reason_from_class(const struct sched_class *class)
+{
+#ifdef CONFIG_SMP
+ if (class == &stop_sched_class)
+ return SCX_CPU_PREEMPT_STOP;
+#endif
+ if (class == &dl_sched_class)
+ return SCX_CPU_PREEMPT_DL;
+ if (class == &rt_sched_class)
+ return SCX_CPU_PREEMPT_RT;
+ return SCX_CPU_PREEMPT_UNKNOWN;
+}
+
+static void switch_class(struct rq *rq, struct task_struct *next)
+{
+ const struct sched_class *next_class = next->sched_class;
+
+#ifdef CONFIG_SMP
+ /*
+ * Pairs with the smp_load_acquire() issued by a CPU in
+ * kick_cpus_irq_workfn() who is waiting for this CPU to perform a
+ * resched.
+ */
+ smp_store_release(&rq->scx.pnt_seq, rq->scx.pnt_seq + 1);
+#endif
+ if (!static_branch_unlikely(&scx_ops_cpu_preempt))
+ return;
+
+ /*
+ * The callback is conceptually meant to convey that the CPU is no
+ * longer under the control of SCX. Therefore, don't invoke the callback
+ * if the next class is below SCX (in which case the BPF scheduler has
+ * actively decided not to schedule any tasks on the CPU).
+ */
+ if (sched_class_above(&ext_sched_class, next_class))
+ return;
+
+ /*
+ * At this point we know that SCX was preempted by a higher priority
+ * sched_class, so invoke the ->cpu_release() callback if we have not
+ * done so already. We only send the callback once between SCX being
+ * preempted, and it regaining control of the CPU.
+ *
+ * ->cpu_release() complements ->cpu_acquire(), which is emitted the
+ * next time that balance_scx() is invoked.
+ */
+ if (!rq->scx.cpu_released) {
+ if (SCX_HAS_OP(cpu_release)) {
+ struct scx_cpu_release_args args = {
+ .reason = preempt_reason_from_class(next_class),
+ .task = next,
+ };
+
+ SCX_CALL_OP(SCX_KF_CPU_RELEASE,
+ cpu_release, cpu_of(rq), &args);
+ }
+ rq->scx.cpu_released = true;
+ }
+}
+
+static void put_prev_task_scx(struct rq *rq, struct task_struct *p,
+ struct task_struct *next)
+{
+ update_curr_scx(rq);
+
+ /* see dequeue_task_scx() on why we skip when !QUEUED */
+ if (SCX_HAS_OP(stopping) && (p->scx.flags & SCX_TASK_QUEUED))
+ SCX_CALL_OP_TASK(SCX_KF_REST, stopping, p, true);
+
+ if (p->scx.flags & SCX_TASK_QUEUED) {
+ set_task_runnable(rq, p);
+
+ /*
+ * If @p has slice left and is being put, @p is getting
+ * preempted by a higher priority scheduler class or core-sched
+ * forcing a different task. Leave it at the head of the local
+ * DSQ.
+ */
+ if (p->scx.slice && !scx_rq_bypassing(rq)) {
+ dispatch_enqueue(&rq->scx.local_dsq, p, SCX_ENQ_HEAD);
+ return;
+ }
+
+ /*
+ * If @p is runnable but we're about to enter a lower
+ * sched_class, %SCX_OPS_ENQ_LAST must be set. Tell
+ * ops.enqueue() that @p is the only one available for this cpu,
+ * which should trigger an explicit follow-up scheduling event.
+ */
+ if (sched_class_above(&ext_sched_class, next->sched_class)) {
+ WARN_ON_ONCE(!static_branch_unlikely(&scx_ops_enq_last));
+ do_enqueue_task(rq, p, SCX_ENQ_LAST, -1);
+ } else {
+ do_enqueue_task(rq, p, 0, -1);
+ }
+ }
+
+ if (next && next->sched_class != &ext_sched_class)
+ switch_class(rq, next);
+}
+
+static struct task_struct *first_local_task(struct rq *rq)
+{
+ return list_first_entry_or_null(&rq->scx.local_dsq.list,
+ struct task_struct, scx.dsq_list.node);
+}
+
+static struct task_struct *pick_task_scx(struct rq *rq)
+{
+ struct task_struct *prev = rq->curr;
+ struct task_struct *p;
+
+ /*
+ * If balance_scx() is telling us to keep running @prev, replenish slice
+ * if necessary and keep running @prev. Otherwise, pop the first one
+ * from the local DSQ.
+ *
+ * WORKAROUND:
+ *
+ * %SCX_RQ_BAL_KEEP should be set iff $prev is on SCX as it must just
+ * have gone through balance_scx(). Unfortunately, there currently is a
+ * bug where fair could say yes on balance() but no on pick_task(),
+ * which then ends up calling pick_task_scx() without preceding
+ * balance_scx().
+ *
+ * For now, ignore cases where $prev is not on SCX. This isn't great and
+ * can theoretically lead to stalls. However, for switch_all cases, this
+ * happens only while a BPF scheduler is being loaded or unloaded, and,
+ * for partial cases, fair will likely keep triggering this CPU.
+ *
+ * Once fair is fixed, restore WARN_ON_ONCE().
+ */
+ if ((rq->scx.flags & SCX_RQ_BAL_KEEP) &&
+ prev->sched_class == &ext_sched_class) {
+ p = prev;
+ if (!p->scx.slice)
+ p->scx.slice = SCX_SLICE_DFL;
+ } else {
+ p = first_local_task(rq);
+ if (!p)
+ return NULL;
+
+ if (unlikely(!p->scx.slice)) {
+ if (!scx_rq_bypassing(rq) && !scx_warned_zero_slice) {
+ printk_deferred(KERN_WARNING "sched_ext: %s[%d] has zero slice in pick_next_task_scx()\n",
+ p->comm, p->pid);
+ scx_warned_zero_slice = true;
+ }
+ p->scx.slice = SCX_SLICE_DFL;
+ }
+ }
+
+ return p;
+}
+
+#ifdef CONFIG_SCHED_CORE
+/**
+ * scx_prio_less - Task ordering for core-sched
+ * @a: task A
+ * @b: task B
+ *
+ * Core-sched is implemented as an additional scheduling layer on top of the
+ * usual sched_class'es and needs to find out the expected task ordering. For
+ * SCX, core-sched calls this function to interrogate the task ordering.
+ *
+ * Unless overridden by ops.core_sched_before(), @p->scx.core_sched_at is used
+ * to implement the default task ordering. The older the timestamp, the higher
+ * prority the task - the global FIFO ordering matching the default scheduling
+ * behavior.
+ *
+ * When ops.core_sched_before() is enabled, @p->scx.core_sched_at is used to
+ * implement FIFO ordering within each local DSQ. See pick_task_scx().
+ */
+bool scx_prio_less(const struct task_struct *a, const struct task_struct *b,
+ bool in_fi)
+{
+ /*
+ * The const qualifiers are dropped from task_struct pointers when
+ * calling ops.core_sched_before(). Accesses are controlled by the
+ * verifier.
+ */
+ if (SCX_HAS_OP(core_sched_before) && !scx_rq_bypassing(task_rq(a)))
+ return SCX_CALL_OP_2TASKS_RET(SCX_KF_REST, core_sched_before,
+ (struct task_struct *)a,
+ (struct task_struct *)b);
+ else
+ return time_after64(a->scx.core_sched_at, b->scx.core_sched_at);
+}
+#endif /* CONFIG_SCHED_CORE */
+
+#ifdef CONFIG_SMP
+
+static bool test_and_clear_cpu_idle(int cpu)
+{
+#ifdef CONFIG_SCHED_SMT
+ /*
+ * SMT mask should be cleared whether we can claim @cpu or not. The SMT
+ * cluster is not wholly idle either way. This also prevents
+ * scx_pick_idle_cpu() from getting caught in an infinite loop.
+ */
+ if (sched_smt_active()) {
+ const struct cpumask *smt = cpu_smt_mask(cpu);
+
+ /*
+ * If offline, @cpu is not its own sibling and
+ * scx_pick_idle_cpu() can get caught in an infinite loop as
+ * @cpu is never cleared from idle_masks.smt. Ensure that @cpu
+ * is eventually cleared.
+ */
+ if (cpumask_intersects(smt, idle_masks.smt))
+ cpumask_andnot(idle_masks.smt, idle_masks.smt, smt);
+ else if (cpumask_test_cpu(cpu, idle_masks.smt))
+ __cpumask_clear_cpu(cpu, idle_masks.smt);
+ }
+#endif
+ return cpumask_test_and_clear_cpu(cpu, idle_masks.cpu);
+}
+
+static s32 scx_pick_idle_cpu(const struct cpumask *cpus_allowed, u64 flags)
+{
+ int cpu;
+
+retry:
+ if (sched_smt_active()) {
+ cpu = cpumask_any_and_distribute(idle_masks.smt, cpus_allowed);
+ if (cpu < nr_cpu_ids)
+ goto found;
+
+ if (flags & SCX_PICK_IDLE_CORE)
+ return -EBUSY;
+ }
+
+ cpu = cpumask_any_and_distribute(idle_masks.cpu, cpus_allowed);
+ if (cpu >= nr_cpu_ids)
+ return -EBUSY;
+
+found:
+ if (test_and_clear_cpu_idle(cpu))
+ return cpu;
+ else
+ goto retry;
+}
+
+static s32 scx_select_cpu_dfl(struct task_struct *p, s32 prev_cpu,
+ u64 wake_flags, bool *found)
+{
+ s32 cpu;
+
+ *found = false;
+
+ if (!static_branch_likely(&scx_builtin_idle_enabled)) {
+ scx_ops_error("built-in idle tracking is disabled");
+ return prev_cpu;
+ }
+
+ /*
+ * If WAKE_SYNC, the waker's local DSQ is empty, and the system is
+ * under utilized, wake up @p to the local DSQ of the waker. Checking
+ * only for an empty local DSQ is insufficient as it could give the
+ * wakee an unfair advantage when the system is oversaturated.
+ * Checking only for the presence of idle CPUs is also insufficient as
+ * the local DSQ of the waker could have tasks piled up on it even if
+ * there is an idle core elsewhere on the system.
+ */
+ cpu = smp_processor_id();
+ if ((wake_flags & SCX_WAKE_SYNC) &&
+ !cpumask_empty(idle_masks.cpu) && !(current->flags & PF_EXITING) &&
+ cpu_rq(cpu)->scx.local_dsq.nr == 0) {
+ if (cpumask_test_cpu(cpu, p->cpus_ptr))
+ goto cpu_found;
+ }
+
+ /*
+ * If CPU has SMT, any wholly idle CPU is likely a better pick than
+ * partially idle @prev_cpu.
+ */
+ if (sched_smt_active()) {
+ if (cpumask_test_cpu(prev_cpu, idle_masks.smt) &&
+ test_and_clear_cpu_idle(prev_cpu)) {
+ cpu = prev_cpu;
+ goto cpu_found;
+ }
+
+ cpu = scx_pick_idle_cpu(p->cpus_ptr, SCX_PICK_IDLE_CORE);
+ if (cpu >= 0)
+ goto cpu_found;
+ }
+
+ if (test_and_clear_cpu_idle(prev_cpu)) {
+ cpu = prev_cpu;
+ goto cpu_found;
+ }
+
+ cpu = scx_pick_idle_cpu(p->cpus_ptr, 0);
+ if (cpu >= 0)
+ goto cpu_found;
+
+ return prev_cpu;
+
+cpu_found:
+ *found = true;
+ return cpu;
+}
+
+static int select_task_rq_scx(struct task_struct *p, int prev_cpu, int wake_flags)
+{
+ /*
+ * sched_exec() calls with %WF_EXEC when @p is about to exec(2) as it
+ * can be a good migration opportunity with low cache and memory
+ * footprint. Returning a CPU different than @prev_cpu triggers
+ * immediate rq migration. However, for SCX, as the current rq
+ * association doesn't dictate where the task is going to run, this
+ * doesn't fit well. If necessary, we can later add a dedicated method
+ * which can decide to preempt self to force it through the regular
+ * scheduling path.
+ */
+ if (unlikely(wake_flags & WF_EXEC))
+ return prev_cpu;
+
+ if (SCX_HAS_OP(select_cpu)) {
+ s32 cpu;
+ struct task_struct **ddsp_taskp;
+
+ ddsp_taskp = this_cpu_ptr(&direct_dispatch_task);
+ WARN_ON_ONCE(*ddsp_taskp);
+ *ddsp_taskp = p;
+
+ cpu = SCX_CALL_OP_TASK_RET(SCX_KF_ENQUEUE | SCX_KF_SELECT_CPU,
+ select_cpu, p, prev_cpu, wake_flags);
+ *ddsp_taskp = NULL;
+ if (ops_cpu_valid(cpu, "from ops.select_cpu()"))
+ return cpu;
+ else
+ return prev_cpu;
+ } else {
+ bool found;
+ s32 cpu;
+
+ cpu = scx_select_cpu_dfl(p, prev_cpu, wake_flags, &found);
+ if (found) {
+ p->scx.slice = SCX_SLICE_DFL;
+ p->scx.ddsp_dsq_id = SCX_DSQ_LOCAL;
+ }
+ return cpu;
+ }
+}
+
+static void task_woken_scx(struct rq *rq, struct task_struct *p)
+{
+ run_deferred(rq);
+}
+
+static void set_cpus_allowed_scx(struct task_struct *p,
+ struct affinity_context *ac)
+{
+ set_cpus_allowed_common(p, ac);
+
+ /*
+ * The effective cpumask is stored in @p->cpus_ptr which may temporarily
+ * differ from the configured one in @p->cpus_mask. Always tell the bpf
+ * scheduler the effective one.
+ *
+ * Fine-grained memory write control is enforced by BPF making the const
+ * designation pointless. Cast it away when calling the operation.
+ */
+ if (SCX_HAS_OP(set_cpumask))
+ SCX_CALL_OP_TASK(SCX_KF_REST, set_cpumask, p,
+ (struct cpumask *)p->cpus_ptr);
+}
+
+static void reset_idle_masks(void)
+{
+ /*
+ * Consider all online cpus idle. Should converge to the actual state
+ * quickly.
+ */
+ cpumask_copy(idle_masks.cpu, cpu_online_mask);
+ cpumask_copy(idle_masks.smt, cpu_online_mask);
+}
+
+void __scx_update_idle(struct rq *rq, bool idle)
+{
+ int cpu = cpu_of(rq);
+
+ if (SCX_HAS_OP(update_idle)) {
+ SCX_CALL_OP(SCX_KF_REST, update_idle, cpu_of(rq), idle);
+ if (!static_branch_unlikely(&scx_builtin_idle_enabled))
+ return;
+ }
+
+ if (idle)
+ cpumask_set_cpu(cpu, idle_masks.cpu);
+ else
+ cpumask_clear_cpu(cpu, idle_masks.cpu);
+
+#ifdef CONFIG_SCHED_SMT
+ if (sched_smt_active()) {
+ const struct cpumask *smt = cpu_smt_mask(cpu);
+
+ if (idle) {
+ /*
+ * idle_masks.smt handling is racy but that's fine as
+ * it's only for optimization and self-correcting.
+ */
+ for_each_cpu(cpu, smt) {
+ if (!cpumask_test_cpu(cpu, idle_masks.cpu))
+ return;
+ }
+ cpumask_or(idle_masks.smt, idle_masks.smt, smt);
+ } else {
+ cpumask_andnot(idle_masks.smt, idle_masks.smt, smt);
+ }
+ }
+#endif
+}
+
+static void handle_hotplug(struct rq *rq, bool online)
+{
+ int cpu = cpu_of(rq);
+
+ atomic_long_inc(&scx_hotplug_seq);
+
+ if (online && SCX_HAS_OP(cpu_online))
+ SCX_CALL_OP(SCX_KF_UNLOCKED, cpu_online, cpu);
+ else if (!online && SCX_HAS_OP(cpu_offline))
+ SCX_CALL_OP(SCX_KF_UNLOCKED, cpu_offline, cpu);
+ else
+ scx_ops_exit(SCX_ECODE_ACT_RESTART | SCX_ECODE_RSN_HOTPLUG,
+ "cpu %d going %s, exiting scheduler", cpu,
+ online ? "online" : "offline");
+}
+
+void scx_rq_activate(struct rq *rq)
+{
+ handle_hotplug(rq, true);
+}
+
+void scx_rq_deactivate(struct rq *rq)
+{
+ handle_hotplug(rq, false);
+}
+
+static void rq_online_scx(struct rq *rq)
+{
+ rq->scx.flags |= SCX_RQ_ONLINE;
+}
+
+static void rq_offline_scx(struct rq *rq)
+{
+ rq->scx.flags &= ~SCX_RQ_ONLINE;
+}
+
+#else /* CONFIG_SMP */
+
+static bool test_and_clear_cpu_idle(int cpu) { return false; }
+static s32 scx_pick_idle_cpu(const struct cpumask *cpus_allowed, u64 flags) { return -EBUSY; }
+static void reset_idle_masks(void) {}
+
+#endif /* CONFIG_SMP */
+
+static bool check_rq_for_timeouts(struct rq *rq)
+{
+ struct task_struct *p;
+ struct rq_flags rf;
+ bool timed_out = false;
+
+ rq_lock_irqsave(rq, &rf);
+ list_for_each_entry(p, &rq->scx.runnable_list, scx.runnable_node) {
+ unsigned long last_runnable = p->scx.runnable_at;
+
+ if (unlikely(time_after(jiffies,
+ last_runnable + scx_watchdog_timeout))) {
+ u32 dur_ms = jiffies_to_msecs(jiffies - last_runnable);
+
+ scx_ops_error_kind(SCX_EXIT_ERROR_STALL,
+ "%s[%d] failed to run for %u.%03us",
+ p->comm, p->pid,
+ dur_ms / 1000, dur_ms % 1000);
+ timed_out = true;
+ break;
+ }
+ }
+ rq_unlock_irqrestore(rq, &rf);
+
+ return timed_out;
+}
+
+static void scx_watchdog_workfn(struct work_struct *work)
+{
+ int cpu;
+
+ WRITE_ONCE(scx_watchdog_timestamp, jiffies);
+
+ for_each_online_cpu(cpu) {
+ if (unlikely(check_rq_for_timeouts(cpu_rq(cpu))))
+ break;
+
+ cond_resched();
+ }
+ queue_delayed_work(system_unbound_wq, to_delayed_work(work),
+ scx_watchdog_timeout / 2);
+}
+
+void scx_tick(struct rq *rq)
+{
+ unsigned long last_check;
+
+ if (!scx_enabled())
+ return;
+
+ last_check = READ_ONCE(scx_watchdog_timestamp);
+ if (unlikely(time_after(jiffies,
+ last_check + READ_ONCE(scx_watchdog_timeout)))) {
+ u32 dur_ms = jiffies_to_msecs(jiffies - last_check);
+
+ scx_ops_error_kind(SCX_EXIT_ERROR_STALL,
+ "watchdog failed to check in for %u.%03us",
+ dur_ms / 1000, dur_ms % 1000);
+ }
+
+ update_other_load_avgs(rq);
+}
+
+static void task_tick_scx(struct rq *rq, struct task_struct *curr, int queued)
+{
+ update_curr_scx(rq);
+
+ /*
+ * While disabling, always resched and refresh core-sched timestamp as
+ * we can't trust the slice management or ops.core_sched_before().
+ */
+ if (scx_rq_bypassing(rq)) {
+ curr->scx.slice = 0;
+ touch_core_sched(rq, curr);
+ } else if (SCX_HAS_OP(tick)) {
+ SCX_CALL_OP(SCX_KF_REST, tick, curr);
+ }
+
+ if (!curr->scx.slice)
+ resched_curr(rq);
+}
+
+#ifdef CONFIG_EXT_GROUP_SCHED
+static struct cgroup *tg_cgrp(struct task_group *tg)
+{
+ /*
+ * If CGROUP_SCHED is disabled, @tg is NULL. If @tg is an autogroup,
+ * @tg->css.cgroup is NULL. In both cases, @tg can be treated as the
+ * root cgroup.
+ */
+ if (tg && tg->css.cgroup)
+ return tg->css.cgroup;
+ else
+ return &cgrp_dfl_root.cgrp;
+}
+
+#define SCX_INIT_TASK_ARGS_CGROUP(tg) .cgroup = tg_cgrp(tg),
+
+#else /* CONFIG_EXT_GROUP_SCHED */
+
+#define SCX_INIT_TASK_ARGS_CGROUP(tg)
+
+#endif /* CONFIG_EXT_GROUP_SCHED */
+
+static enum scx_task_state scx_get_task_state(const struct task_struct *p)
+{
+ return (p->scx.flags & SCX_TASK_STATE_MASK) >> SCX_TASK_STATE_SHIFT;
+}
+
+static void scx_set_task_state(struct task_struct *p, enum scx_task_state state)
+{
+ enum scx_task_state prev_state = scx_get_task_state(p);
+ bool warn = false;
+
+ BUILD_BUG_ON(SCX_TASK_NR_STATES > (1 << SCX_TASK_STATE_BITS));
+
+ switch (state) {
+ case SCX_TASK_NONE:
+ break;
+ case SCX_TASK_INIT:
+ warn = prev_state != SCX_TASK_NONE;
+ break;
+ case SCX_TASK_READY:
+ warn = prev_state == SCX_TASK_NONE;
+ break;
+ case SCX_TASK_ENABLED:
+ warn = prev_state != SCX_TASK_READY;
+ break;
+ default:
+ warn = true;
+ return;
+ }
+
+ WARN_ONCE(warn, "sched_ext: Invalid task state transition %d -> %d for %s[%d]",
+ prev_state, state, p->comm, p->pid);
+
+ p->scx.flags &= ~SCX_TASK_STATE_MASK;
+ p->scx.flags |= state << SCX_TASK_STATE_SHIFT;
+}
+
+static int scx_ops_init_task(struct task_struct *p, struct task_group *tg, bool fork)
+{
+ int ret;
+
+ p->scx.disallow = false;
+
+ if (SCX_HAS_OP(init_task)) {
+ struct scx_init_task_args args = {
+ SCX_INIT_TASK_ARGS_CGROUP(tg)
+ .fork = fork,
+ };
+
+ ret = SCX_CALL_OP_RET(SCX_KF_UNLOCKED, init_task, p, &args);
+ if (unlikely(ret)) {
+ ret = ops_sanitize_err("init_task", ret);
+ return ret;
+ }
+ }
+
+ scx_set_task_state(p, SCX_TASK_INIT);
+
+ if (p->scx.disallow) {
+ if (!fork) {
+ struct rq *rq;
+ struct rq_flags rf;
+
+ rq = task_rq_lock(p, &rf);
+
+ /*
+ * We're in the load path and @p->policy will be applied
+ * right after. Reverting @p->policy here and rejecting
+ * %SCHED_EXT transitions from scx_check_setscheduler()
+ * guarantees that if ops.init_task() sets @p->disallow,
+ * @p can never be in SCX.
+ */
+ if (p->policy == SCHED_EXT) {
+ p->policy = SCHED_NORMAL;
+ atomic_long_inc(&scx_nr_rejected);
+ }
+
+ task_rq_unlock(rq, p, &rf);
+ } else if (p->policy == SCHED_EXT) {
+ scx_ops_error("ops.init_task() set task->scx.disallow for %s[%d] during fork",
+ p->comm, p->pid);
+ }
+ }
+
+ p->scx.flags |= SCX_TASK_RESET_RUNNABLE_AT;
+ return 0;
+}
+
+static void scx_ops_enable_task(struct task_struct *p)
+{
+ u32 weight;
+
+ lockdep_assert_rq_held(task_rq(p));
+
+ /*
+ * Set the weight before calling ops.enable() so that the scheduler
+ * doesn't see a stale value if they inspect the task struct.
+ */
+ if (task_has_idle_policy(p))
+ weight = WEIGHT_IDLEPRIO;
+ else
+ weight = sched_prio_to_weight[p->static_prio - MAX_RT_PRIO];
+
+ p->scx.weight = sched_weight_to_cgroup(weight);
+
+ if (SCX_HAS_OP(enable))
+ SCX_CALL_OP_TASK(SCX_KF_REST, enable, p);
+ scx_set_task_state(p, SCX_TASK_ENABLED);
+
+ if (SCX_HAS_OP(set_weight))
+ SCX_CALL_OP_TASK(SCX_KF_REST, set_weight, p, p->scx.weight);
+}
+
+static void scx_ops_disable_task(struct task_struct *p)
+{
+ lockdep_assert_rq_held(task_rq(p));
+ WARN_ON_ONCE(scx_get_task_state(p) != SCX_TASK_ENABLED);
+
+ if (SCX_HAS_OP(disable))
+ SCX_CALL_OP(SCX_KF_REST, disable, p);
+ scx_set_task_state(p, SCX_TASK_READY);
+}
+
+static void scx_ops_exit_task(struct task_struct *p)
+{
+ struct scx_exit_task_args args = {
+ .cancelled = false,
+ };
+
+ lockdep_assert_rq_held(task_rq(p));
+
+ switch (scx_get_task_state(p)) {
+ case SCX_TASK_NONE:
+ return;
+ case SCX_TASK_INIT:
+ args.cancelled = true;
+ break;
+ case SCX_TASK_READY:
+ break;
+ case SCX_TASK_ENABLED:
+ scx_ops_disable_task(p);
+ break;
+ default:
+ WARN_ON_ONCE(true);
+ return;
+ }
+
+ if (SCX_HAS_OP(exit_task))
+ SCX_CALL_OP(SCX_KF_REST, exit_task, p, &args);
+ scx_set_task_state(p, SCX_TASK_NONE);
+}
+
+void init_scx_entity(struct sched_ext_entity *scx)
+{
+ /*
+ * init_idle() calls this function again after fork sequence is
+ * complete. Don't touch ->tasks_node as it's already linked.
+ */
+ memset(scx, 0, offsetof(struct sched_ext_entity, tasks_node));
+
+ INIT_LIST_HEAD(&scx->dsq_list.node);
+ RB_CLEAR_NODE(&scx->dsq_priq);
+ scx->sticky_cpu = -1;
+ scx->holding_cpu = -1;
+ INIT_LIST_HEAD(&scx->runnable_node);
+ scx->runnable_at = jiffies;
+ scx->ddsp_dsq_id = SCX_DSQ_INVALID;
+ scx->slice = SCX_SLICE_DFL;
+}
+
+void scx_pre_fork(struct task_struct *p)
+{
+ /*
+ * BPF scheduler enable/disable paths want to be able to iterate and
+ * update all tasks which can become complex when racing forks. As
+ * enable/disable are very cold paths, let's use a percpu_rwsem to
+ * exclude forks.
+ */
+ percpu_down_read(&scx_fork_rwsem);
+}
+
+int scx_fork(struct task_struct *p)
+{
+ percpu_rwsem_assert_held(&scx_fork_rwsem);
+
+ if (scx_ops_init_task_enabled)
+ return scx_ops_init_task(p, task_group(p), true);
+ else
+ return 0;
+}
+
+void scx_post_fork(struct task_struct *p)
+{
+ if (scx_ops_init_task_enabled) {
+ scx_set_task_state(p, SCX_TASK_READY);
+
+ /*
+ * Enable the task immediately if it's running on sched_ext.
+ * Otherwise, it'll be enabled in switching_to_scx() if and
+ * when it's ever configured to run with a SCHED_EXT policy.
+ */
+ if (p->sched_class == &ext_sched_class) {
+ struct rq_flags rf;
+ struct rq *rq;
+
+ rq = task_rq_lock(p, &rf);
+ scx_ops_enable_task(p);
+ task_rq_unlock(rq, p, &rf);
+ }
+ }
+
+ spin_lock_irq(&scx_tasks_lock);
+ list_add_tail(&p->scx.tasks_node, &scx_tasks);
+ spin_unlock_irq(&scx_tasks_lock);
+
+ percpu_up_read(&scx_fork_rwsem);
+}
+
+void scx_cancel_fork(struct task_struct *p)
+{
+ if (scx_enabled()) {
+ struct rq *rq;
+ struct rq_flags rf;
+
+ rq = task_rq_lock(p, &rf);
+ WARN_ON_ONCE(scx_get_task_state(p) >= SCX_TASK_READY);
+ scx_ops_exit_task(p);
+ task_rq_unlock(rq, p, &rf);
+ }
+
+ percpu_up_read(&scx_fork_rwsem);
+}
+
+void sched_ext_free(struct task_struct *p)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&scx_tasks_lock, flags);
+ list_del_init(&p->scx.tasks_node);
+ spin_unlock_irqrestore(&scx_tasks_lock, flags);
+
+ /*
+ * @p is off scx_tasks and wholly ours. scx_ops_enable()'s READY ->
+ * ENABLED transitions can't race us. Disable ops for @p.
+ */
+ if (scx_get_task_state(p) != SCX_TASK_NONE) {
+ struct rq_flags rf;
+ struct rq *rq;
+
+ rq = task_rq_lock(p, &rf);
+ scx_ops_exit_task(p);
+ task_rq_unlock(rq, p, &rf);
+ }
+}
+
+static void reweight_task_scx(struct rq *rq, struct task_struct *p,
+ const struct load_weight *lw)
+{
+ lockdep_assert_rq_held(task_rq(p));
+
+ p->scx.weight = sched_weight_to_cgroup(scale_load_down(lw->weight));
+ if (SCX_HAS_OP(set_weight))
+ SCX_CALL_OP_TASK(SCX_KF_REST, set_weight, p, p->scx.weight);
+}
+
+static void prio_changed_scx(struct rq *rq, struct task_struct *p, int oldprio)
+{
+}
+
+static void switching_to_scx(struct rq *rq, struct task_struct *p)
+{
+ scx_ops_enable_task(p);
+
+ /*
+ * set_cpus_allowed_scx() is not called while @p is associated with a
+ * different scheduler class. Keep the BPF scheduler up-to-date.
+ */
+ if (SCX_HAS_OP(set_cpumask))
+ SCX_CALL_OP_TASK(SCX_KF_REST, set_cpumask, p,
+ (struct cpumask *)p->cpus_ptr);
+}
+
+static void switched_from_scx(struct rq *rq, struct task_struct *p)
+{
+ scx_ops_disable_task(p);
+}
+
+static void wakeup_preempt_scx(struct rq *rq, struct task_struct *p,int wake_flags) {}
+static void switched_to_scx(struct rq *rq, struct task_struct *p) {}
+
+int scx_check_setscheduler(struct task_struct *p, int policy)
+{
+ lockdep_assert_rq_held(task_rq(p));
+
+ /* if disallow, reject transitioning into SCX */
+ if (scx_enabled() && READ_ONCE(p->scx.disallow) &&
+ p->policy != policy && policy == SCHED_EXT)
+ return -EACCES;
+
+ return 0;
+}
+
+#ifdef CONFIG_NO_HZ_FULL
+bool scx_can_stop_tick(struct rq *rq)
+{
+ struct task_struct *p = rq->curr;
+
+ if (scx_rq_bypassing(rq))
+ return false;
+
+ if (p->sched_class != &ext_sched_class)
+ return true;
+
+ /*
+ * @rq can dispatch from different DSQs, so we can't tell whether it
+ * needs the tick or not by looking at nr_running. Allow stopping ticks
+ * iff the BPF scheduler indicated so. See set_next_task_scx().
+ */
+ return rq->scx.flags & SCX_RQ_CAN_STOP_TICK;
+}
+#endif
+
+#ifdef CONFIG_EXT_GROUP_SCHED
+
+DEFINE_STATIC_PERCPU_RWSEM(scx_cgroup_rwsem);
+static bool scx_cgroup_enabled;
+static bool cgroup_warned_missing_weight;
+static bool cgroup_warned_missing_idle;
+
+static void scx_cgroup_warn_missing_weight(struct task_group *tg)
+{
+ if (scx_ops_enable_state() == SCX_OPS_DISABLED ||
+ cgroup_warned_missing_weight)
+ return;
+
+ if ((scx_ops.flags & SCX_OPS_HAS_CGROUP_WEIGHT) || !tg->css.parent)
+ return;
+
+ pr_warn("sched_ext: \"%s\" does not implement cgroup cpu.weight\n",
+ scx_ops.name);
+ cgroup_warned_missing_weight = true;
+}
+
+static void scx_cgroup_warn_missing_idle(struct task_group *tg)
+{
+ if (!scx_cgroup_enabled || cgroup_warned_missing_idle)
+ return;
+
+ if (!tg->idle)
+ return;
+
+ pr_warn("sched_ext: \"%s\" does not implement cgroup cpu.idle\n",
+ scx_ops.name);
+ cgroup_warned_missing_idle = true;
+}
+
+int scx_tg_online(struct task_group *tg)
+{
+ int ret = 0;
+
+ WARN_ON_ONCE(tg->scx_flags & (SCX_TG_ONLINE | SCX_TG_INITED));
+
+ percpu_down_read(&scx_cgroup_rwsem);
+
+ scx_cgroup_warn_missing_weight(tg);
+
+ if (scx_cgroup_enabled) {
+ if (SCX_HAS_OP(cgroup_init)) {
+ struct scx_cgroup_init_args args =
+ { .weight = tg->scx_weight };
+
+ ret = SCX_CALL_OP_RET(SCX_KF_UNLOCKED, cgroup_init,
+ tg->css.cgroup, &args);
+ if (ret)
+ ret = ops_sanitize_err("cgroup_init", ret);
+ }
+ if (ret == 0)
+ tg->scx_flags |= SCX_TG_ONLINE | SCX_TG_INITED;
+ } else {
+ tg->scx_flags |= SCX_TG_ONLINE;
+ }
+
+ percpu_up_read(&scx_cgroup_rwsem);
+ return ret;
+}
+
+void scx_tg_offline(struct task_group *tg)
+{
+ WARN_ON_ONCE(!(tg->scx_flags & SCX_TG_ONLINE));
+
+ percpu_down_read(&scx_cgroup_rwsem);
+
+ if (SCX_HAS_OP(cgroup_exit) && (tg->scx_flags & SCX_TG_INITED))
+ SCX_CALL_OP(SCX_KF_UNLOCKED, cgroup_exit, tg->css.cgroup);
+ tg->scx_flags &= ~(SCX_TG_ONLINE | SCX_TG_INITED);
+
+ percpu_up_read(&scx_cgroup_rwsem);
+}
+
+int scx_cgroup_can_attach(struct cgroup_taskset *tset)
+{
+ struct cgroup_subsys_state *css;
+ struct task_struct *p;
+ int ret;
+
+ /* released in scx_finish/cancel_attach() */
+ percpu_down_read(&scx_cgroup_rwsem);
+
+ if (!scx_cgroup_enabled)
+ return 0;
+
+ cgroup_taskset_for_each(p, css, tset) {
+ struct cgroup *from = tg_cgrp(task_group(p));
+ struct cgroup *to = tg_cgrp(css_tg(css));
+
+ WARN_ON_ONCE(p->scx.cgrp_moving_from);
+
+ /*
+ * sched_move_task() omits identity migrations. Let's match the
+ * behavior so that ops.cgroup_prep_move() and ops.cgroup_move()
+ * always match one-to-one.
+ */
+ if (from == to)
+ continue;
+
+ if (SCX_HAS_OP(cgroup_prep_move)) {
+ ret = SCX_CALL_OP_RET(SCX_KF_UNLOCKED, cgroup_prep_move,
+ p, from, css->cgroup);
+ if (ret)
+ goto err;
+ }
+
+ p->scx.cgrp_moving_from = from;
+ }
+
+ return 0;
+
+err:
+ cgroup_taskset_for_each(p, css, tset) {
+ if (SCX_HAS_OP(cgroup_cancel_move) && p->scx.cgrp_moving_from)
+ SCX_CALL_OP(SCX_KF_UNLOCKED, cgroup_cancel_move, p,
+ p->scx.cgrp_moving_from, css->cgroup);
+ p->scx.cgrp_moving_from = NULL;
+ }
+
+ percpu_up_read(&scx_cgroup_rwsem);
+ return ops_sanitize_err("cgroup_prep_move", ret);
+}
+
+void scx_move_task(struct task_struct *p)
+{
+ if (!scx_cgroup_enabled)
+ return;
+
+ /*
+ * We're called from sched_move_task() which handles both cgroup and
+ * autogroup moves. Ignore the latter.
+ *
+ * Also ignore exiting tasks, because in the exit path tasks transition
+ * from the autogroup to the root group, so task_group_is_autogroup()
+ * alone isn't able to catch exiting autogroup tasks. This is safe for
+ * cgroup_move(), because cgroup migrations never happen for PF_EXITING
+ * tasks.
+ */
+ if (task_group_is_autogroup(task_group(p)) || (p->flags & PF_EXITING))
+ return;
+
+ /*
+ * @p must have ops.cgroup_prep_move() called on it and thus
+ * cgrp_moving_from set.
+ */
+ if (SCX_HAS_OP(cgroup_move) && !WARN_ON_ONCE(!p->scx.cgrp_moving_from))
+ SCX_CALL_OP_TASK(SCX_KF_UNLOCKED, cgroup_move, p,
+ p->scx.cgrp_moving_from, tg_cgrp(task_group(p)));
+ p->scx.cgrp_moving_from = NULL;
+}
+
+void scx_cgroup_finish_attach(void)
+{
+ percpu_up_read(&scx_cgroup_rwsem);
+}
+
+void scx_cgroup_cancel_attach(struct cgroup_taskset *tset)
+{
+ struct cgroup_subsys_state *css;
+ struct task_struct *p;
+
+ if (!scx_cgroup_enabled)
+ goto out_unlock;
+
+ cgroup_taskset_for_each(p, css, tset) {
+ if (SCX_HAS_OP(cgroup_cancel_move) && p->scx.cgrp_moving_from)
+ SCX_CALL_OP(SCX_KF_UNLOCKED, cgroup_cancel_move, p,
+ p->scx.cgrp_moving_from, css->cgroup);
+ p->scx.cgrp_moving_from = NULL;
+ }
+out_unlock:
+ percpu_up_read(&scx_cgroup_rwsem);
+}
+
+void scx_group_set_weight(struct task_group *tg, unsigned long weight)
+{
+ percpu_down_read(&scx_cgroup_rwsem);
+
+ if (scx_cgroup_enabled && tg->scx_weight != weight) {
+ if (SCX_HAS_OP(cgroup_set_weight))
+ SCX_CALL_OP(SCX_KF_UNLOCKED, cgroup_set_weight,
+ tg_cgrp(tg), weight);
+ tg->scx_weight = weight;
+ }
+
+ percpu_up_read(&scx_cgroup_rwsem);
+}
+
+void scx_group_set_idle(struct task_group *tg, bool idle)
+{
+ percpu_down_read(&scx_cgroup_rwsem);
+ scx_cgroup_warn_missing_idle(tg);
+ percpu_up_read(&scx_cgroup_rwsem);
+}
+
+static void scx_cgroup_lock(void)
+{
+ percpu_down_write(&scx_cgroup_rwsem);
+}
+
+static void scx_cgroup_unlock(void)
+{
+ percpu_up_write(&scx_cgroup_rwsem);
+}
+
+#else /* CONFIG_EXT_GROUP_SCHED */
+
+static inline void scx_cgroup_lock(void) {}
+static inline void scx_cgroup_unlock(void) {}
+
+#endif /* CONFIG_EXT_GROUP_SCHED */
+
+/*
+ * Omitted operations:
+ *
+ * - wakeup_preempt: NOOP as it isn't useful in the wakeup path because the task
+ * isn't tied to the CPU at that point. Preemption is implemented by resetting
+ * the victim task's slice to 0 and triggering reschedule on the target CPU.
+ *
+ * - migrate_task_rq: Unnecessary as task to cpu mapping is transient.
+ *
+ * - task_fork/dead: We need fork/dead notifications for all tasks regardless of
+ * their current sched_class. Call them directly from sched core instead.
+ */
+DEFINE_SCHED_CLASS(ext) = {
+ .enqueue_task = enqueue_task_scx,
+ .dequeue_task = dequeue_task_scx,
+ .yield_task = yield_task_scx,
+ .yield_to_task = yield_to_task_scx,
+
+ .wakeup_preempt = wakeup_preempt_scx,
+
+ .balance = balance_scx,
+ .pick_task = pick_task_scx,
+
+ .put_prev_task = put_prev_task_scx,
+ .set_next_task = set_next_task_scx,
+
+#ifdef CONFIG_SMP
+ .select_task_rq = select_task_rq_scx,
+ .task_woken = task_woken_scx,
+ .set_cpus_allowed = set_cpus_allowed_scx,
+
+ .rq_online = rq_online_scx,
+ .rq_offline = rq_offline_scx,
+#endif
+
+ .task_tick = task_tick_scx,
+
+ .switching_to = switching_to_scx,
+ .switched_from = switched_from_scx,
+ .switched_to = switched_to_scx,
+ .reweight_task = reweight_task_scx,
+ .prio_changed = prio_changed_scx,
+
+ .update_curr = update_curr_scx,
+
+#ifdef CONFIG_UCLAMP_TASK
+ .uclamp_enabled = 1,
+#endif
+};
+
+static void init_dsq(struct scx_dispatch_q *dsq, u64 dsq_id)
+{
+ memset(dsq, 0, sizeof(*dsq));
+
+ raw_spin_lock_init(&dsq->lock);
+ INIT_LIST_HEAD(&dsq->list);
+ dsq->id = dsq_id;
+}
+
+static struct scx_dispatch_q *create_dsq(u64 dsq_id, int node)
+{
+ struct scx_dispatch_q *dsq;
+ int ret;
+
+ if (dsq_id & SCX_DSQ_FLAG_BUILTIN)
+ return ERR_PTR(-EINVAL);
+
+ dsq = kmalloc_node(sizeof(*dsq), GFP_KERNEL, node);
+ if (!dsq)
+ return ERR_PTR(-ENOMEM);
+
+ init_dsq(dsq, dsq_id);
+
+ ret = rhashtable_insert_fast(&dsq_hash, &dsq->hash_node,
+ dsq_hash_params);
+ if (ret) {
+ kfree(dsq);
+ return ERR_PTR(ret);
+ }
+ return dsq;
+}
+
+static void free_dsq_irq_workfn(struct irq_work *irq_work)
+{
+ struct llist_node *to_free = llist_del_all(&dsqs_to_free);
+ struct scx_dispatch_q *dsq, *tmp_dsq;
+
+ llist_for_each_entry_safe(dsq, tmp_dsq, to_free, free_node)
+ kfree_rcu(dsq, rcu);
+}
+
+static DEFINE_IRQ_WORK(free_dsq_irq_work, free_dsq_irq_workfn);
+
+static void destroy_dsq(u64 dsq_id)
+{
+ struct scx_dispatch_q *dsq;
+ unsigned long flags;
+
+ rcu_read_lock();
+
+ dsq = find_user_dsq(dsq_id);
+ if (!dsq)
+ goto out_unlock_rcu;
+
+ raw_spin_lock_irqsave(&dsq->lock, flags);
+
+ if (dsq->nr) {
+ scx_ops_error("attempting to destroy in-use dsq 0x%016llx (nr=%u)",
+ dsq->id, dsq->nr);
+ goto out_unlock_dsq;
+ }
+
+ if (rhashtable_remove_fast(&dsq_hash, &dsq->hash_node, dsq_hash_params))
+ goto out_unlock_dsq;
+
+ /*
+ * Mark dead by invalidating ->id to prevent dispatch_enqueue() from
+ * queueing more tasks. As this function can be called from anywhere,
+ * freeing is bounced through an irq work to avoid nesting RCU
+ * operations inside scheduler locks.
+ */
+ dsq->id = SCX_DSQ_INVALID;
+ llist_add(&dsq->free_node, &dsqs_to_free);
+ irq_work_queue(&free_dsq_irq_work);
+
+out_unlock_dsq:
+ raw_spin_unlock_irqrestore(&dsq->lock, flags);
+out_unlock_rcu:
+ rcu_read_unlock();
+}
+
+#ifdef CONFIG_EXT_GROUP_SCHED
+static void scx_cgroup_exit(void)
+{
+ struct cgroup_subsys_state *css;
+
+ percpu_rwsem_assert_held(&scx_cgroup_rwsem);
+
+ WARN_ON_ONCE(!scx_cgroup_enabled);
+ scx_cgroup_enabled = false;
+
+ /*
+ * scx_tg_on/offline() are excluded through scx_cgroup_rwsem. If we walk
+ * cgroups and exit all the inited ones, all online cgroups are exited.
+ */
+ rcu_read_lock();
+ css_for_each_descendant_post(css, &root_task_group.css) {
+ struct task_group *tg = css_tg(css);
+
+ if (!(tg->scx_flags & SCX_TG_INITED))
+ continue;
+ tg->scx_flags &= ~SCX_TG_INITED;
+
+ if (!scx_ops.cgroup_exit)
+ continue;
+
+ if (WARN_ON_ONCE(!css_tryget(css)))
+ continue;
+ rcu_read_unlock();
+
+ SCX_CALL_OP(SCX_KF_UNLOCKED, cgroup_exit, css->cgroup);
+
+ rcu_read_lock();
+ css_put(css);
+ }
+ rcu_read_unlock();
+}
+
+static int scx_cgroup_init(void)
+{
+ struct cgroup_subsys_state *css;
+ int ret;
+
+ percpu_rwsem_assert_held(&scx_cgroup_rwsem);
+
+ cgroup_warned_missing_weight = false;
+ cgroup_warned_missing_idle = false;
+
+ /*
+ * scx_tg_on/offline() are excluded thorugh scx_cgroup_rwsem. If we walk
+ * cgroups and init, all online cgroups are initialized.
+ */
+ rcu_read_lock();
+ css_for_each_descendant_pre(css, &root_task_group.css) {
+ struct task_group *tg = css_tg(css);
+ struct scx_cgroup_init_args args = { .weight = tg->scx_weight };
+
+ scx_cgroup_warn_missing_weight(tg);
+ scx_cgroup_warn_missing_idle(tg);
+
+ if ((tg->scx_flags &
+ (SCX_TG_ONLINE | SCX_TG_INITED)) != SCX_TG_ONLINE)
+ continue;
+
+ if (!scx_ops.cgroup_init) {
+ tg->scx_flags |= SCX_TG_INITED;
+ continue;
+ }
+
+ if (WARN_ON_ONCE(!css_tryget(css)))
+ continue;
+ rcu_read_unlock();
+
+ ret = SCX_CALL_OP_RET(SCX_KF_UNLOCKED, cgroup_init,
+ css->cgroup, &args);
+ if (ret) {
+ css_put(css);
+ return ret;
+ }
+ tg->scx_flags |= SCX_TG_INITED;
+
+ rcu_read_lock();
+ css_put(css);
+ }
+ rcu_read_unlock();
+
+ WARN_ON_ONCE(scx_cgroup_enabled);
+ scx_cgroup_enabled = true;
+
+ return 0;
+}
+
+#else
+static void scx_cgroup_exit(void) {}
+static int scx_cgroup_init(void) { return 0; }
+#endif
+
+
+/********************************************************************************
+ * Sysfs interface and ops enable/disable.
+ */
+
+#define SCX_ATTR(_name) \
+ static struct kobj_attribute scx_attr_##_name = { \
+ .attr = { .name = __stringify(_name), .mode = 0444 }, \
+ .show = scx_attr_##_name##_show, \
+ }
+
+static ssize_t scx_attr_state_show(struct kobject *kobj,
+ struct kobj_attribute *ka, char *buf)
+{
+ return sysfs_emit(buf, "%s\n",
+ scx_ops_enable_state_str[scx_ops_enable_state()]);
+}
+SCX_ATTR(state);
+
+static ssize_t scx_attr_switch_all_show(struct kobject *kobj,
+ struct kobj_attribute *ka, char *buf)
+{
+ return sysfs_emit(buf, "%d\n", READ_ONCE(scx_switching_all));
+}
+SCX_ATTR(switch_all);
+
+static ssize_t scx_attr_nr_rejected_show(struct kobject *kobj,
+ struct kobj_attribute *ka, char *buf)
+{
+ return sysfs_emit(buf, "%ld\n", atomic_long_read(&scx_nr_rejected));
+}
+SCX_ATTR(nr_rejected);
+
+static ssize_t scx_attr_hotplug_seq_show(struct kobject *kobj,
+ struct kobj_attribute *ka, char *buf)
+{
+ return sysfs_emit(buf, "%ld\n", atomic_long_read(&scx_hotplug_seq));
+}
+SCX_ATTR(hotplug_seq);
+
+static ssize_t scx_attr_enable_seq_show(struct kobject *kobj,
+ struct kobj_attribute *ka, char *buf)
+{
+ return sysfs_emit(buf, "%ld\n", atomic_long_read(&scx_enable_seq));
+}
+SCX_ATTR(enable_seq);
+
+static struct attribute *scx_global_attrs[] = {
+ &scx_attr_state.attr,
+ &scx_attr_switch_all.attr,
+ &scx_attr_nr_rejected.attr,
+ &scx_attr_hotplug_seq.attr,
+ &scx_attr_enable_seq.attr,
+ NULL,
+};
+
+static const struct attribute_group scx_global_attr_group = {
+ .attrs = scx_global_attrs,
+};
+
+static void scx_kobj_release(struct kobject *kobj)
+{
+ kfree(kobj);
+}
+
+static ssize_t scx_attr_ops_show(struct kobject *kobj,
+ struct kobj_attribute *ka, char *buf)
+{
+ return sysfs_emit(buf, "%s\n", scx_ops.name);
+}
+SCX_ATTR(ops);
+
+static struct attribute *scx_sched_attrs[] = {
+ &scx_attr_ops.attr,
+ NULL,
+};
+ATTRIBUTE_GROUPS(scx_sched);
+
+static const struct kobj_type scx_ktype = {
+ .release = scx_kobj_release,
+ .sysfs_ops = &kobj_sysfs_ops,
+ .default_groups = scx_sched_groups,
+};
+
+static int scx_uevent(const struct kobject *kobj, struct kobj_uevent_env *env)
+{
+ return add_uevent_var(env, "SCXOPS=%s", scx_ops.name);
+}
+
+static const struct kset_uevent_ops scx_uevent_ops = {
+ .uevent = scx_uevent,
+};
+
+/*
+ * Used by sched_fork() and __setscheduler_prio() to pick the matching
+ * sched_class. dl/rt are already handled.
+ */
+bool task_should_scx(struct task_struct *p)
+{
+ if (!scx_enabled() ||
+ unlikely(scx_ops_enable_state() == SCX_OPS_DISABLING))
+ return false;
+ if (READ_ONCE(scx_switching_all))
+ return true;
+ return p->policy == SCHED_EXT;
+}
+
+/**
+ * scx_ops_bypass - [Un]bypass scx_ops and guarantee forward progress
+ *
+ * Bypassing guarantees that all runnable tasks make forward progress without
+ * trusting the BPF scheduler. We can't grab any mutexes or rwsems as they might
+ * be held by tasks that the BPF scheduler is forgetting to run, which
+ * unfortunately also excludes toggling the static branches.
+ *
+ * Let's work around by overriding a couple ops and modifying behaviors based on
+ * the DISABLING state and then cycling the queued tasks through dequeue/enqueue
+ * to force global FIFO scheduling.
+ *
+ * a. ops.enqueue() is ignored and tasks are queued in simple global FIFO order.
+ * %SCX_OPS_ENQ_LAST is also ignored.
+ *
+ * b. ops.dispatch() is ignored.
+ *
+ * c. balance_scx() does not set %SCX_RQ_BAL_KEEP on non-zero slice as slice
+ * can't be trusted. Whenever a tick triggers, the running task is rotated to
+ * the tail of the queue with core_sched_at touched.
+ *
+ * d. pick_next_task() suppresses zero slice warning.
+ *
+ * e. scx_bpf_kick_cpu() is disabled to avoid irq_work malfunction during PM
+ * operations.
+ *
+ * f. scx_prio_less() reverts to the default core_sched_at order.
+ */
+static void scx_ops_bypass(bool bypass)
+{
+ int depth, cpu;
+
+ if (bypass) {
+ depth = atomic_inc_return(&scx_ops_bypass_depth);
+ WARN_ON_ONCE(depth <= 0);
+ if (depth != 1)
+ return;
+ } else {
+ depth = atomic_dec_return(&scx_ops_bypass_depth);
+ WARN_ON_ONCE(depth < 0);
+ if (depth != 0)
+ return;
+ }
+
+ /*
+ * No task property is changing. We just need to make sure all currently
+ * queued tasks are re-queued according to the new scx_rq_bypassing()
+ * state. As an optimization, walk each rq's runnable_list instead of
+ * the scx_tasks list.
+ *
+ * This function can't trust the scheduler and thus can't use
+ * cpus_read_lock(). Walk all possible CPUs instead of online.
+ */
+ for_each_possible_cpu(cpu) {
+ struct rq *rq = cpu_rq(cpu);
+ struct rq_flags rf;
+ struct task_struct *p, *n;
+
+ rq_lock_irqsave(rq, &rf);
+
+ if (bypass) {
+ WARN_ON_ONCE(rq->scx.flags & SCX_RQ_BYPASSING);
+ rq->scx.flags |= SCX_RQ_BYPASSING;
+ } else {
+ WARN_ON_ONCE(!(rq->scx.flags & SCX_RQ_BYPASSING));
+ rq->scx.flags &= ~SCX_RQ_BYPASSING;
+ }
+
+ /*
+ * We need to guarantee that no tasks are on the BPF scheduler
+ * while bypassing. Either we see enabled or the enable path
+ * sees scx_rq_bypassing() before moving tasks to SCX.
+ */
+ if (!scx_enabled()) {
+ rq_unlock_irqrestore(rq, &rf);
+ continue;
+ }
+
+ /*
+ * The use of list_for_each_entry_safe_reverse() is required
+ * because each task is going to be removed from and added back
+ * to the runnable_list during iteration. Because they're added
+ * to the tail of the list, safe reverse iteration can still
+ * visit all nodes.
+ */
+ list_for_each_entry_safe_reverse(p, n, &rq->scx.runnable_list,
+ scx.runnable_node) {
+ struct sched_enq_and_set_ctx ctx;
+
+ /* cycling deq/enq is enough, see the function comment */
+ sched_deq_and_put_task(p, DEQUEUE_SAVE | DEQUEUE_MOVE, &ctx);
+ sched_enq_and_set_task(&ctx);
+ }
+
+ rq_unlock_irqrestore(rq, &rf);
+
+ /* kick to restore ticks */
+ resched_cpu(cpu);
+ }
+}
+
+static void free_exit_info(struct scx_exit_info *ei)
+{
+ kfree(ei->dump);
+ kfree(ei->msg);
+ kfree(ei->bt);
+ kfree(ei);
+}
+
+static struct scx_exit_info *alloc_exit_info(size_t exit_dump_len)
+{
+ struct scx_exit_info *ei;
+
+ ei = kzalloc(sizeof(*ei), GFP_KERNEL);
+ if (!ei)
+ return NULL;
+
+ ei->bt = kcalloc(SCX_EXIT_BT_LEN, sizeof(ei->bt[0]), GFP_KERNEL);
+ ei->msg = kzalloc(SCX_EXIT_MSG_LEN, GFP_KERNEL);
+ ei->dump = kzalloc(exit_dump_len, GFP_KERNEL);
+
+ if (!ei->bt || !ei->msg || !ei->dump) {
+ free_exit_info(ei);
+ return NULL;
+ }
+
+ return ei;
+}
+
+static const char *scx_exit_reason(enum scx_exit_kind kind)
+{
+ switch (kind) {
+ case SCX_EXIT_UNREG:
+ return "unregistered from user space";
+ case SCX_EXIT_UNREG_BPF:
+ return "unregistered from BPF";
+ case SCX_EXIT_UNREG_KERN:
+ return "unregistered from the main kernel";
+ case SCX_EXIT_SYSRQ:
+ return "disabled by sysrq-S";
+ case SCX_EXIT_ERROR:
+ return "runtime error";
+ case SCX_EXIT_ERROR_BPF:
+ return "scx_bpf_error";
+ case SCX_EXIT_ERROR_STALL:
+ return "runnable task stall";
+ default:
+ return "<UNKNOWN>";
+ }
+}
+
+static void scx_ops_disable_workfn(struct kthread_work *work)
+{
+ struct scx_exit_info *ei = scx_exit_info;
+ struct scx_task_iter sti;
+ struct task_struct *p;
+ struct rhashtable_iter rht_iter;
+ struct scx_dispatch_q *dsq;
+ int i, kind;
+
+ kind = atomic_read(&scx_exit_kind);
+ while (true) {
+ /*
+ * NONE indicates that a new scx_ops has been registered since
+ * disable was scheduled - don't kill the new ops. DONE
+ * indicates that the ops has already been disabled.
+ */
+ if (kind == SCX_EXIT_NONE || kind == SCX_EXIT_DONE)
+ return;
+ if (atomic_try_cmpxchg(&scx_exit_kind, &kind, SCX_EXIT_DONE))
+ break;
+ }
+ ei->kind = kind;
+ ei->reason = scx_exit_reason(ei->kind);
+
+ /* guarantee forward progress by bypassing scx_ops */
+ scx_ops_bypass(true);
+
+ switch (scx_ops_set_enable_state(SCX_OPS_DISABLING)) {
+ case SCX_OPS_DISABLING:
+ WARN_ONCE(true, "sched_ext: duplicate disabling instance?");
+ break;
+ case SCX_OPS_DISABLED:
+ pr_warn("sched_ext: ops error detected without ops (%s)\n",
+ scx_exit_info->msg);
+ WARN_ON_ONCE(scx_ops_set_enable_state(SCX_OPS_DISABLED) !=
+ SCX_OPS_DISABLING);
+ goto done;
+ default:
+ break;
+ }
+
+ /*
+ * Here, every runnable task is guaranteed to make forward progress and
+ * we can safely use blocking synchronization constructs. Actually
+ * disable ops.
+ */
+ mutex_lock(&scx_ops_enable_mutex);
+
+ static_branch_disable(&__scx_switched_all);
+ WRITE_ONCE(scx_switching_all, false);
+
+ /*
+ * Shut down cgroup support before tasks so that the cgroup attach path
+ * doesn't race against scx_ops_exit_task().
+ */
+ scx_cgroup_lock();
+ scx_cgroup_exit();
+ scx_cgroup_unlock();
+
+ /*
+ * The BPF scheduler is going away. All tasks including %TASK_DEAD ones
+ * must be switched out and exited synchronously.
+ */
+ percpu_down_write(&scx_fork_rwsem);
+
+ scx_ops_init_task_enabled = false;
+
+ spin_lock_irq(&scx_tasks_lock);
+ scx_task_iter_init(&sti);
+ while ((p = scx_task_iter_next_locked(&sti))) {
+ const struct sched_class *old_class = p->sched_class;
+ struct sched_enq_and_set_ctx ctx;
+
+ sched_deq_and_put_task(p, DEQUEUE_SAVE | DEQUEUE_MOVE, &ctx);
+
+ p->scx.slice = min_t(u64, p->scx.slice, SCX_SLICE_DFL);
+ __setscheduler_prio(p, p->prio);
+ check_class_changing(task_rq(p), p, old_class);
+
+ sched_enq_and_set_task(&ctx);
+
+ check_class_changed(task_rq(p), p, old_class, p->prio);
+ scx_ops_exit_task(p);
+ }
+ scx_task_iter_exit(&sti);
+ spin_unlock_irq(&scx_tasks_lock);
+ percpu_up_write(&scx_fork_rwsem);
+
+ /* no task is on scx, turn off all the switches and flush in-progress calls */
+ static_branch_disable(&__scx_ops_enabled);
+ for (i = SCX_OPI_BEGIN; i < SCX_OPI_END; i++)
+ static_branch_disable(&scx_has_op[i]);
+ static_branch_disable(&scx_ops_enq_last);
+ static_branch_disable(&scx_ops_enq_exiting);
+ static_branch_disable(&scx_ops_cpu_preempt);
+ static_branch_disable(&scx_builtin_idle_enabled);
+ synchronize_rcu();
+
+ if (ei->kind >= SCX_EXIT_ERROR) {
+ pr_err("sched_ext: BPF scheduler \"%s\" disabled (%s)\n",
+ scx_ops.name, ei->reason);
+
+ if (ei->msg[0] != '\0')
+ pr_err("sched_ext: %s: %s\n", scx_ops.name, ei->msg);
+#ifdef CONFIG_STACKTRACE
+ stack_trace_print(ei->bt, ei->bt_len, 2);
+#endif
+ } else {
+ pr_info("sched_ext: BPF scheduler \"%s\" disabled (%s)\n",
+ scx_ops.name, ei->reason);
+ }
+
+ if (scx_ops.exit)
+ SCX_CALL_OP(SCX_KF_UNLOCKED, exit, ei);
+
+ cancel_delayed_work_sync(&scx_watchdog_work);
+
+ /*
+ * Delete the kobject from the hierarchy eagerly in addition to just
+ * dropping a reference. Otherwise, if the object is deleted
+ * asynchronously, sysfs could observe an object of the same name still
+ * in the hierarchy when another scheduler is loaded.
+ */
+ kobject_del(scx_root_kobj);
+ kobject_put(scx_root_kobj);
+ scx_root_kobj = NULL;
+
+ memset(&scx_ops, 0, sizeof(scx_ops));
+
+ rhashtable_walk_enter(&dsq_hash, &rht_iter);
+ do {
+ rhashtable_walk_start(&rht_iter);
+
+ while ((dsq = rhashtable_walk_next(&rht_iter)) && !IS_ERR(dsq))
+ destroy_dsq(dsq->id);
+
+ rhashtable_walk_stop(&rht_iter);
+ } while (dsq == ERR_PTR(-EAGAIN));
+ rhashtable_walk_exit(&rht_iter);
+
+ free_percpu(scx_dsp_ctx);
+ scx_dsp_ctx = NULL;
+ scx_dsp_max_batch = 0;
+
+ free_exit_info(scx_exit_info);
+ scx_exit_info = NULL;
+
+ mutex_unlock(&scx_ops_enable_mutex);
+
+ WARN_ON_ONCE(scx_ops_set_enable_state(SCX_OPS_DISABLED) !=
+ SCX_OPS_DISABLING);
+done:
+ scx_ops_bypass(false);
+}
+
+static DEFINE_KTHREAD_WORK(scx_ops_disable_work, scx_ops_disable_workfn);
+
+static void schedule_scx_ops_disable_work(void)
+{
+ struct kthread_worker *helper = READ_ONCE(scx_ops_helper);
+
+ /*
+ * We may be called spuriously before the first bpf_sched_ext_reg(). If
+ * scx_ops_helper isn't set up yet, there's nothing to do.
+ */
+ if (helper)
+ kthread_queue_work(helper, &scx_ops_disable_work);
+}
+
+static void scx_ops_disable(enum scx_exit_kind kind)
+{
+ int none = SCX_EXIT_NONE;
+
+ if (WARN_ON_ONCE(kind == SCX_EXIT_NONE || kind == SCX_EXIT_DONE))
+ kind = SCX_EXIT_ERROR;
+
+ atomic_try_cmpxchg(&scx_exit_kind, &none, kind);
+
+ schedule_scx_ops_disable_work();
+}
+
+static void dump_newline(struct seq_buf *s)
+{
+ trace_sched_ext_dump("");
+
+ /* @s may be zero sized and seq_buf triggers WARN if so */
+ if (s->size)
+ seq_buf_putc(s, '\n');
+}
+
+static __printf(2, 3) void dump_line(struct seq_buf *s, const char *fmt, ...)
+{
+ va_list args;
+
+#ifdef CONFIG_TRACEPOINTS
+ if (trace_sched_ext_dump_enabled()) {
+ /* protected by scx_dump_state()::dump_lock */
+ static char line_buf[SCX_EXIT_MSG_LEN];
+
+ va_start(args, fmt);
+ vscnprintf(line_buf, sizeof(line_buf), fmt, args);
+ va_end(args);
+
+ trace_sched_ext_dump(line_buf);
+ }
+#endif
+ /* @s may be zero sized and seq_buf triggers WARN if so */
+ if (s->size) {
+ va_start(args, fmt);
+ seq_buf_vprintf(s, fmt, args);
+ va_end(args);
+
+ seq_buf_putc(s, '\n');
+ }
+}
+
+static void dump_stack_trace(struct seq_buf *s, const char *prefix,
+ const unsigned long *bt, unsigned int len)
+{
+ unsigned int i;
+
+ for (i = 0; i < len; i++)
+ dump_line(s, "%s%pS", prefix, (void *)bt[i]);
+}
+
+static void ops_dump_init(struct seq_buf *s, const char *prefix)
+{
+ struct scx_dump_data *dd = &scx_dump_data;
+
+ lockdep_assert_irqs_disabled();
+
+ dd->cpu = smp_processor_id(); /* allow scx_bpf_dump() */
+ dd->first = true;
+ dd->cursor = 0;
+ dd->s = s;
+ dd->prefix = prefix;
+}
+
+static void ops_dump_flush(void)
+{
+ struct scx_dump_data *dd = &scx_dump_data;
+ char *line = dd->buf.line;
+
+ if (!dd->cursor)
+ return;
+
+ /*
+ * There's something to flush and this is the first line. Insert a blank
+ * line to distinguish ops dump.
+ */
+ if (dd->first) {
+ dump_newline(dd->s);
+ dd->first = false;
+ }
+
+ /*
+ * There may be multiple lines in $line. Scan and emit each line
+ * separately.
+ */
+ while (true) {
+ char *end = line;
+ char c;
+
+ while (*end != '\n' && *end != '\0')
+ end++;
+
+ /*
+ * If $line overflowed, it may not have newline at the end.
+ * Always emit with a newline.
+ */
+ c = *end;
+ *end = '\0';
+ dump_line(dd->s, "%s%s", dd->prefix, line);
+ if (c == '\0')
+ break;
+
+ /* move to the next line */
+ end++;
+ if (*end == '\0')
+ break;
+ line = end;
+ }
+
+ dd->cursor = 0;
+}
+
+static void ops_dump_exit(void)
+{
+ ops_dump_flush();
+ scx_dump_data.cpu = -1;
+}
+
+static void scx_dump_task(struct seq_buf *s, struct scx_dump_ctx *dctx,
+ struct task_struct *p, char marker)
+{
+ static unsigned long bt[SCX_EXIT_BT_LEN];
+ char dsq_id_buf[19] = "(n/a)";
+ unsigned long ops_state = atomic_long_read(&p->scx.ops_state);
+ unsigned int bt_len = 0;
+
+ if (p->scx.dsq)
+ scnprintf(dsq_id_buf, sizeof(dsq_id_buf), "0x%llx",
+ (unsigned long long)p->scx.dsq->id);
+
+ dump_newline(s);
+ dump_line(s, " %c%c %s[%d] %+ldms",
+ marker, task_state_to_char(p), p->comm, p->pid,
+ jiffies_delta_msecs(p->scx.runnable_at, dctx->at_jiffies));
+ dump_line(s, " scx_state/flags=%u/0x%x dsq_flags=0x%x ops_state/qseq=%lu/%lu",
+ scx_get_task_state(p), p->scx.flags & ~SCX_TASK_STATE_MASK,
+ p->scx.dsq_flags, ops_state & SCX_OPSS_STATE_MASK,
+ ops_state >> SCX_OPSS_QSEQ_SHIFT);
+ dump_line(s, " sticky/holding_cpu=%d/%d dsq_id=%s dsq_vtime=%llu",
+ p->scx.sticky_cpu, p->scx.holding_cpu, dsq_id_buf,
+ p->scx.dsq_vtime);
+ dump_line(s, " cpus=%*pb", cpumask_pr_args(p->cpus_ptr));
+
+ if (SCX_HAS_OP(dump_task)) {
+ ops_dump_init(s, " ");
+ SCX_CALL_OP(SCX_KF_REST, dump_task, dctx, p);
+ ops_dump_exit();
+ }
+
+#ifdef CONFIG_STACKTRACE
+ bt_len = stack_trace_save_tsk(p, bt, SCX_EXIT_BT_LEN, 1);
+#endif
+ if (bt_len) {
+ dump_newline(s);
+ dump_stack_trace(s, " ", bt, bt_len);
+ }
+}
+
+static void scx_dump_state(struct scx_exit_info *ei, size_t dump_len)
+{
+ static DEFINE_SPINLOCK(dump_lock);
+ static const char trunc_marker[] = "\n\n~~~~ TRUNCATED ~~~~\n";
+ struct scx_dump_ctx dctx = {
+ .kind = ei->kind,
+ .exit_code = ei->exit_code,
+ .reason = ei->reason,
+ .at_ns = ktime_get_ns(),
+ .at_jiffies = jiffies,
+ };
+ struct seq_buf s;
+ unsigned long flags;
+ char *buf;
+ int cpu;
+
+ spin_lock_irqsave(&dump_lock, flags);
+
+ seq_buf_init(&s, ei->dump, dump_len);
+
+ if (ei->kind == SCX_EXIT_NONE) {
+ dump_line(&s, "Debug dump triggered by %s", ei->reason);
+ } else {
+ dump_line(&s, "%s[%d] triggered exit kind %d:",
+ current->comm, current->pid, ei->kind);
+ dump_line(&s, " %s (%s)", ei->reason, ei->msg);
+ dump_newline(&s);
+ dump_line(&s, "Backtrace:");
+ dump_stack_trace(&s, " ", ei->bt, ei->bt_len);
+ }
+
+ if (SCX_HAS_OP(dump)) {
+ ops_dump_init(&s, "");
+ SCX_CALL_OP(SCX_KF_UNLOCKED, dump, &dctx);
+ ops_dump_exit();
+ }
+
+ dump_newline(&s);
+ dump_line(&s, "CPU states");
+ dump_line(&s, "----------");
+
+ for_each_possible_cpu(cpu) {
+ struct rq *rq = cpu_rq(cpu);
+ struct rq_flags rf;
+ struct task_struct *p;
+ struct seq_buf ns;
+ size_t avail, used;
+ bool idle;
+
+ rq_lock(rq, &rf);
+
+ idle = list_empty(&rq->scx.runnable_list) &&
+ rq->curr->sched_class == &idle_sched_class;
+
+ if (idle && !SCX_HAS_OP(dump_cpu))
+ goto next;
+
+ /*
+ * We don't yet know whether ops.dump_cpu() will produce output
+ * and we may want to skip the default CPU dump if it doesn't.
+ * Use a nested seq_buf to generate the standard dump so that we
+ * can decide whether to commit later.
+ */
+ avail = seq_buf_get_buf(&s, &buf);
+ seq_buf_init(&ns, buf, avail);
+
+ dump_newline(&ns);
+ dump_line(&ns, "CPU %-4d: nr_run=%u flags=0x%x cpu_rel=%d ops_qseq=%lu pnt_seq=%lu",
+ cpu, rq->scx.nr_running, rq->scx.flags,
+ rq->scx.cpu_released, rq->scx.ops_qseq,
+ rq->scx.pnt_seq);
+ dump_line(&ns, " curr=%s[%d] class=%ps",
+ rq->curr->comm, rq->curr->pid,
+ rq->curr->sched_class);
+ if (!cpumask_empty(rq->scx.cpus_to_kick))
+ dump_line(&ns, " cpus_to_kick : %*pb",
+ cpumask_pr_args(rq->scx.cpus_to_kick));
+ if (!cpumask_empty(rq->scx.cpus_to_kick_if_idle))
+ dump_line(&ns, " idle_to_kick : %*pb",
+ cpumask_pr_args(rq->scx.cpus_to_kick_if_idle));
+ if (!cpumask_empty(rq->scx.cpus_to_preempt))
+ dump_line(&ns, " cpus_to_preempt: %*pb",
+ cpumask_pr_args(rq->scx.cpus_to_preempt));
+ if (!cpumask_empty(rq->scx.cpus_to_wait))
+ dump_line(&ns, " cpus_to_wait : %*pb",
+ cpumask_pr_args(rq->scx.cpus_to_wait));
+
+ used = seq_buf_used(&ns);
+ if (SCX_HAS_OP(dump_cpu)) {
+ ops_dump_init(&ns, " ");
+ SCX_CALL_OP(SCX_KF_REST, dump_cpu, &dctx, cpu, idle);
+ ops_dump_exit();
+ }
+
+ /*
+ * If idle && nothing generated by ops.dump_cpu(), there's
+ * nothing interesting. Skip.
+ */
+ if (idle && used == seq_buf_used(&ns))
+ goto next;
+
+ /*
+ * $s may already have overflowed when $ns was created. If so,
+ * calling commit on it will trigger BUG.
+ */
+ if (avail) {
+ seq_buf_commit(&s, seq_buf_used(&ns));
+ if (seq_buf_has_overflowed(&ns))
+ seq_buf_set_overflow(&s);
+ }
+
+ if (rq->curr->sched_class == &ext_sched_class)
+ scx_dump_task(&s, &dctx, rq->curr, '*');
+
+ list_for_each_entry(p, &rq->scx.runnable_list, scx.runnable_node)
+ scx_dump_task(&s, &dctx, p, ' ');
+ next:
+ rq_unlock(rq, &rf);
+ }
+
+ if (seq_buf_has_overflowed(&s) && dump_len >= sizeof(trunc_marker))
+ memcpy(ei->dump + dump_len - sizeof(trunc_marker),
+ trunc_marker, sizeof(trunc_marker));
+
+ spin_unlock_irqrestore(&dump_lock, flags);
+}
+
+static void scx_ops_error_irq_workfn(struct irq_work *irq_work)
+{
+ struct scx_exit_info *ei = scx_exit_info;
+
+ if (ei->kind >= SCX_EXIT_ERROR)
+ scx_dump_state(ei, scx_ops.exit_dump_len);
+
+ schedule_scx_ops_disable_work();
+}
+
+static DEFINE_IRQ_WORK(scx_ops_error_irq_work, scx_ops_error_irq_workfn);
+
+static __printf(3, 4) void scx_ops_exit_kind(enum scx_exit_kind kind,
+ s64 exit_code,
+ const char *fmt, ...)
+{
+ struct scx_exit_info *ei = scx_exit_info;
+ int none = SCX_EXIT_NONE;
+ va_list args;
+
+ if (!atomic_try_cmpxchg(&scx_exit_kind, &none, kind))
+ return;
+
+ ei->exit_code = exit_code;
+#ifdef CONFIG_STACKTRACE
+ if (kind >= SCX_EXIT_ERROR)
+ ei->bt_len = stack_trace_save(ei->bt, SCX_EXIT_BT_LEN, 1);
+#endif
+ va_start(args, fmt);
+ vscnprintf(ei->msg, SCX_EXIT_MSG_LEN, fmt, args);
+ va_end(args);
+
+ /*
+ * Set ei->kind and ->reason for scx_dump_state(). They'll be set again
+ * in scx_ops_disable_workfn().
+ */
+ ei->kind = kind;
+ ei->reason = scx_exit_reason(ei->kind);
+
+ irq_work_queue(&scx_ops_error_irq_work);
+}
+
+static struct kthread_worker *scx_create_rt_helper(const char *name)
+{
+ struct kthread_worker *helper;
+
+ helper = kthread_create_worker(0, name);
+ if (helper)
+ sched_set_fifo(helper->task);
+ return helper;
+}
+
+static void check_hotplug_seq(const struct sched_ext_ops *ops)
+{
+ unsigned long long global_hotplug_seq;
+
+ /*
+ * If a hotplug event has occurred between when a scheduler was
+ * initialized, and when we were able to attach, exit and notify user
+ * space about it.
+ */
+ if (ops->hotplug_seq) {
+ global_hotplug_seq = atomic_long_read(&scx_hotplug_seq);
+ if (ops->hotplug_seq != global_hotplug_seq) {
+ scx_ops_exit(SCX_ECODE_ACT_RESTART | SCX_ECODE_RSN_HOTPLUG,
+ "expected hotplug seq %llu did not match actual %llu",
+ ops->hotplug_seq, global_hotplug_seq);
+ }
+ }
+}
+
+static int validate_ops(const struct sched_ext_ops *ops)
+{
+ /*
+ * It doesn't make sense to specify the SCX_OPS_ENQ_LAST flag if the
+ * ops.enqueue() callback isn't implemented.
+ */
+ if ((ops->flags & SCX_OPS_ENQ_LAST) && !ops->enqueue) {
+ scx_ops_error("SCX_OPS_ENQ_LAST requires ops.enqueue() to be implemented");
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int scx_ops_enable(struct sched_ext_ops *ops, struct bpf_link *link)
+{
+ struct scx_task_iter sti;
+ struct task_struct *p;
+ unsigned long timeout;
+ int i, cpu, node, ret;
+
+ if (!cpumask_equal(housekeeping_cpumask(HK_TYPE_DOMAIN),
+ cpu_possible_mask)) {
+ pr_err("sched_ext: Not compatible with \"isolcpus=\" domain isolation");
+ return -EINVAL;
+ }
+
+ mutex_lock(&scx_ops_enable_mutex);
+
+ if (!scx_ops_helper) {
+ WRITE_ONCE(scx_ops_helper,
+ scx_create_rt_helper("sched_ext_ops_helper"));
+ if (!scx_ops_helper) {
+ ret = -ENOMEM;
+ goto err_unlock;
+ }
+ }
+
+ if (!global_dsqs) {
+ struct scx_dispatch_q **dsqs;
+
+ dsqs = kcalloc(nr_node_ids, sizeof(dsqs[0]), GFP_KERNEL);
+ if (!dsqs) {
+ ret = -ENOMEM;
+ goto err_unlock;
+ }
+
+ for_each_node_state(node, N_POSSIBLE) {
+ struct scx_dispatch_q *dsq;
+
+ dsq = kzalloc_node(sizeof(*dsq), GFP_KERNEL, node);
+ if (!dsq) {
+ for_each_node_state(node, N_POSSIBLE)
+ kfree(dsqs[node]);
+ kfree(dsqs);
+ ret = -ENOMEM;
+ goto err_unlock;
+ }
+
+ init_dsq(dsq, SCX_DSQ_GLOBAL);
+ dsqs[node] = dsq;
+ }
+
+ global_dsqs = dsqs;
+ }
+
+ if (scx_ops_enable_state() != SCX_OPS_DISABLED) {
+ ret = -EBUSY;
+ goto err_unlock;
+ }
+
+ scx_root_kobj = kzalloc(sizeof(*scx_root_kobj), GFP_KERNEL);
+ if (!scx_root_kobj) {
+ ret = -ENOMEM;
+ goto err_unlock;
+ }
+
+ scx_root_kobj->kset = scx_kset;
+ ret = kobject_init_and_add(scx_root_kobj, &scx_ktype, NULL, "root");
+ if (ret < 0)
+ goto err;
+
+ scx_exit_info = alloc_exit_info(ops->exit_dump_len);
+ if (!scx_exit_info) {
+ ret = -ENOMEM;
+ goto err_del;
+ }
+
+ /*
+ * Set scx_ops, transition to ENABLING and clear exit info to arm the
+ * disable path. Failure triggers full disabling from here on.
+ */
+ scx_ops = *ops;
+
+ WARN_ON_ONCE(scx_ops_set_enable_state(SCX_OPS_ENABLING) !=
+ SCX_OPS_DISABLED);
+
+ atomic_set(&scx_exit_kind, SCX_EXIT_NONE);
+ scx_warned_zero_slice = false;
+
+ atomic_long_set(&scx_nr_rejected, 0);
+
+ for_each_possible_cpu(cpu)
+ cpu_rq(cpu)->scx.cpuperf_target = SCX_CPUPERF_ONE;
+
+ /*
+ * Keep CPUs stable during enable so that the BPF scheduler can track
+ * online CPUs by watching ->on/offline_cpu() after ->init().
+ */
+ cpus_read_lock();
+
+ if (scx_ops.init) {
+ ret = SCX_CALL_OP_RET(SCX_KF_UNLOCKED, init);
+ if (ret) {
+ ret = ops_sanitize_err("init", ret);
+ cpus_read_unlock();
+ goto err_disable;
+ }
+ }
+
+ for (i = SCX_OPI_CPU_HOTPLUG_BEGIN; i < SCX_OPI_CPU_HOTPLUG_END; i++)
+ if (((void (**)(void))ops)[i])
+ static_branch_enable_cpuslocked(&scx_has_op[i]);
+
+ check_hotplug_seq(ops);
+ cpus_read_unlock();
+
+ ret = validate_ops(ops);
+ if (ret)
+ goto err_disable;
+
+ WARN_ON_ONCE(scx_dsp_ctx);
+ scx_dsp_max_batch = ops->dispatch_max_batch ?: SCX_DSP_DFL_MAX_BATCH;
+ scx_dsp_ctx = __alloc_percpu(struct_size_t(struct scx_dsp_ctx, buf,
+ scx_dsp_max_batch),
+ __alignof__(struct scx_dsp_ctx));
+ if (!scx_dsp_ctx) {
+ ret = -ENOMEM;
+ goto err_disable;
+ }
+
+ if (ops->timeout_ms)
+ timeout = msecs_to_jiffies(ops->timeout_ms);
+ else
+ timeout = SCX_WATCHDOG_MAX_TIMEOUT;
+
+ WRITE_ONCE(scx_watchdog_timeout, timeout);
+ WRITE_ONCE(scx_watchdog_timestamp, jiffies);
+ queue_delayed_work(system_unbound_wq, &scx_watchdog_work,
+ scx_watchdog_timeout / 2);
+
+ /*
+ * Once __scx_ops_enabled is set, %current can be switched to SCX
+ * anytime. This can lead to stalls as some BPF schedulers (e.g.
+ * userspace scheduling) may not function correctly before all tasks are
+ * switched. Init in bypass mode to guarantee forward progress.
+ */
+ scx_ops_bypass(true);
+
+ for (i = SCX_OPI_NORMAL_BEGIN; i < SCX_OPI_NORMAL_END; i++)
+ if (((void (**)(void))ops)[i])
+ static_branch_enable(&scx_has_op[i]);
+
+ if (ops->flags & SCX_OPS_ENQ_LAST)
+ static_branch_enable(&scx_ops_enq_last);
+
+ if (ops->flags & SCX_OPS_ENQ_EXITING)
+ static_branch_enable(&scx_ops_enq_exiting);
+ if (scx_ops.cpu_acquire || scx_ops.cpu_release)
+ static_branch_enable(&scx_ops_cpu_preempt);
+
+ if (!ops->update_idle || (ops->flags & SCX_OPS_KEEP_BUILTIN_IDLE)) {
+ reset_idle_masks();
+ static_branch_enable(&scx_builtin_idle_enabled);
+ } else {
+ static_branch_disable(&scx_builtin_idle_enabled);
+ }
+
+ /*
+ * Lock out forks, cgroup on/offlining and moves before opening the
+ * floodgate so that they don't wander into the operations prematurely.
+ */
+ percpu_down_write(&scx_fork_rwsem);
+
+ WARN_ON_ONCE(scx_ops_init_task_enabled);
+ scx_ops_init_task_enabled = true;
+
+ /*
+ * Enable ops for every task. Fork is excluded by scx_fork_rwsem
+ * preventing new tasks from being added. No need to exclude tasks
+ * leaving as sched_ext_free() can handle both prepped and enabled
+ * tasks. Prep all tasks first and then enable them with preemption
+ * disabled.
+ *
+ * All cgroups should be initialized before scx_ops_init_task() so that
+ * the BPF scheduler can reliably track each task's cgroup membership
+ * from scx_ops_init_task(). Lock out cgroup on/offlining and task
+ * migrations while tasks are being initialized so that
+ * scx_cgroup_can_attach() never sees uninitialized tasks.
+ */
+ scx_cgroup_lock();
+ ret = scx_cgroup_init();
+ if (ret)
+ goto err_disable_unlock_all;
+
+ spin_lock_irq(&scx_tasks_lock);
+ scx_task_iter_init(&sti);
+ while ((p = scx_task_iter_next_locked(&sti))) {
+ /*
+ * @p may already be dead, have lost all its usages counts and
+ * be waiting for RCU grace period before being freed. @p can't
+ * be initialized for SCX in such cases and should be ignored.
+ */
+ if (!tryget_task_struct(p))
+ continue;
+
+ scx_task_iter_rq_unlock(&sti);
+ spin_unlock_irq(&scx_tasks_lock);
+
+ ret = scx_ops_init_task(p, task_group(p), false);
+ if (ret) {
+ put_task_struct(p);
+ spin_lock_irq(&scx_tasks_lock);
+ scx_task_iter_exit(&sti);
+ spin_unlock_irq(&scx_tasks_lock);
+ pr_err("sched_ext: ops.init_task() failed (%d) for %s[%d] while loading\n",
+ ret, p->comm, p->pid);
+ goto err_disable_unlock_all;
+ }
+
+ scx_set_task_state(p, SCX_TASK_READY);
+
+ put_task_struct(p);
+ spin_lock_irq(&scx_tasks_lock);
+ }
+ scx_task_iter_exit(&sti);
+ spin_unlock_irq(&scx_tasks_lock);
+ scx_cgroup_unlock();
+ percpu_up_write(&scx_fork_rwsem);
+
+ /*
+ * All tasks are READY. It's safe to turn on scx_enabled() and switch
+ * all eligible tasks.
+ */
+ WRITE_ONCE(scx_switching_all, !(ops->flags & SCX_OPS_SWITCH_PARTIAL));
+ static_branch_enable(&__scx_ops_enabled);
+
+ /*
+ * We're fully committed and can't fail. The task READY -> ENABLED
+ * transitions here are synchronized against sched_ext_free() through
+ * scx_tasks_lock.
+ */
+ percpu_down_write(&scx_fork_rwsem);
+ spin_lock_irq(&scx_tasks_lock);
+ scx_task_iter_init(&sti);
+ while ((p = scx_task_iter_next_locked(&sti))) {
+ const struct sched_class *old_class = p->sched_class;
+ struct sched_enq_and_set_ctx ctx;
+
+ sched_deq_and_put_task(p, DEQUEUE_SAVE | DEQUEUE_MOVE, &ctx);
+
+ __setscheduler_prio(p, p->prio);
+ check_class_changing(task_rq(p), p, old_class);
+
+ sched_enq_and_set_task(&ctx);
+
+ check_class_changed(task_rq(p), p, old_class, p->prio);
+ }
+ scx_task_iter_exit(&sti);
+ spin_unlock_irq(&scx_tasks_lock);
+ percpu_up_write(&scx_fork_rwsem);
+
+ scx_ops_bypass(false);
+
+ /*
+ * Returning an error code here would lose the recorded error
+ * information. Exit indicating success so that the error is notified
+ * through ops.exit() with all the details.
+ */
+ if (!scx_ops_tryset_enable_state(SCX_OPS_ENABLED, SCX_OPS_ENABLING)) {
+ WARN_ON_ONCE(atomic_read(&scx_exit_kind) == SCX_EXIT_NONE);
+ ret = 0;
+ goto err_disable;
+ }
+
+ if (!(ops->flags & SCX_OPS_SWITCH_PARTIAL))
+ static_branch_enable(&__scx_switched_all);
+
+ pr_info("sched_ext: BPF scheduler \"%s\" enabled%s\n",
+ scx_ops.name, scx_switched_all() ? "" : " (partial)");
+ kobject_uevent(scx_root_kobj, KOBJ_ADD);
+ mutex_unlock(&scx_ops_enable_mutex);
+
+ atomic_long_inc(&scx_enable_seq);
+
+ return 0;
+
+err_del:
+ kobject_del(scx_root_kobj);
+err:
+ kobject_put(scx_root_kobj);
+ scx_root_kobj = NULL;
+ if (scx_exit_info) {
+ free_exit_info(scx_exit_info);
+ scx_exit_info = NULL;
+ }
+err_unlock:
+ mutex_unlock(&scx_ops_enable_mutex);
+ return ret;
+
+err_disable_unlock_all:
+ scx_cgroup_unlock();
+ percpu_up_write(&scx_fork_rwsem);
+ scx_ops_bypass(false);
+err_disable:
+ mutex_unlock(&scx_ops_enable_mutex);
+ /* must be fully disabled before returning */
+ scx_ops_disable(SCX_EXIT_ERROR);
+ kthread_flush_work(&scx_ops_disable_work);
+ return ret;
+}
+
+
+/********************************************************************************
+ * bpf_struct_ops plumbing.
+ */
+#include <linux/bpf_verifier.h>
+#include <linux/bpf.h>
+#include <linux/btf.h>
+
+extern struct btf *btf_vmlinux;
+static const struct btf_type *task_struct_type;
+static u32 task_struct_type_id;
+
+static bool set_arg_maybe_null(const char *op, int arg_n, int off, int size,
+ enum bpf_access_type type,
+ const struct bpf_prog *prog,
+ struct bpf_insn_access_aux *info)
+{
+ struct btf *btf = bpf_get_btf_vmlinux();
+ const struct bpf_struct_ops_desc *st_ops_desc;
+ const struct btf_member *member;
+ const struct btf_type *t;
+ u32 btf_id, member_idx;
+ const char *mname;
+
+ /* struct_ops op args are all sequential, 64-bit numbers */
+ if (off != arg_n * sizeof(__u64))
+ return false;
+
+ /* btf_id should be the type id of struct sched_ext_ops */
+ btf_id = prog->aux->attach_btf_id;
+ st_ops_desc = bpf_struct_ops_find(btf, btf_id);
+ if (!st_ops_desc)
+ return false;
+
+ /* BTF type of struct sched_ext_ops */
+ t = st_ops_desc->type;
+
+ member_idx = prog->expected_attach_type;
+ if (member_idx >= btf_type_vlen(t))
+ return false;
+
+ /*
+ * Get the member name of this struct_ops program, which corresponds to
+ * a field in struct sched_ext_ops. For example, the member name of the
+ * dispatch struct_ops program (callback) is "dispatch".
+ */
+ member = &btf_type_member(t)[member_idx];
+ mname = btf_name_by_offset(btf_vmlinux, member->name_off);
+
+ if (!strcmp(mname, op)) {
+ /*
+ * The value is a pointer to a type (struct task_struct) given
+ * by a BTF ID (PTR_TO_BTF_ID). It is trusted (PTR_TRUSTED),
+ * however, can be a NULL (PTR_MAYBE_NULL). The BPF program
+ * should check the pointer to make sure it is not NULL before
+ * using it, or the verifier will reject the program.
+ *
+ * Longer term, this is something that should be addressed by
+ * BTF, and be fully contained within the verifier.
+ */
+ info->reg_type = PTR_MAYBE_NULL | PTR_TO_BTF_ID | PTR_TRUSTED;
+ info->btf = btf_vmlinux;
+ info->btf_id = task_struct_type_id;
+
+ return true;
+ }
+
+ return false;
+}
+
+static bool bpf_scx_is_valid_access(int off, int size,
+ enum bpf_access_type type,
+ const struct bpf_prog *prog,
+ struct bpf_insn_access_aux *info)
+{
+ if (type != BPF_READ)
+ return false;
+ if (set_arg_maybe_null("dispatch", 1, off, size, type, prog, info) ||
+ set_arg_maybe_null("yield", 1, off, size, type, prog, info))
+ return true;
+ if (off < 0 || off >= sizeof(__u64) * MAX_BPF_FUNC_ARGS)
+ return false;
+ if (off % size != 0)
+ return false;
+
+ return btf_ctx_access(off, size, type, prog, info);
+}
+
+static int bpf_scx_btf_struct_access(struct bpf_verifier_log *log,
+ const struct bpf_reg_state *reg, int off,
+ int size)
+{
+ const struct btf_type *t;
+
+ t = btf_type_by_id(reg->btf, reg->btf_id);
+ if (t == task_struct_type) {
+ if (off >= offsetof(struct task_struct, scx.slice) &&
+ off + size <= offsetofend(struct task_struct, scx.slice))
+ return SCALAR_VALUE;
+ if (off >= offsetof(struct task_struct, scx.dsq_vtime) &&
+ off + size <= offsetofend(struct task_struct, scx.dsq_vtime))
+ return SCALAR_VALUE;
+ if (off >= offsetof(struct task_struct, scx.disallow) &&
+ off + size <= offsetofend(struct task_struct, scx.disallow))
+ return SCALAR_VALUE;
+ }
+
+ return -EACCES;
+}
+
+static const struct bpf_func_proto *
+bpf_scx_get_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog)
+{
+ switch (func_id) {
+ case BPF_FUNC_task_storage_get:
+ return &bpf_task_storage_get_proto;
+ case BPF_FUNC_task_storage_delete:
+ return &bpf_task_storage_delete_proto;
+ default:
+ return bpf_base_func_proto(func_id, prog);
+ }
+}
+
+static const struct bpf_verifier_ops bpf_scx_verifier_ops = {
+ .get_func_proto = bpf_scx_get_func_proto,
+ .is_valid_access = bpf_scx_is_valid_access,
+ .btf_struct_access = bpf_scx_btf_struct_access,
+};
+
+static int bpf_scx_init_member(const struct btf_type *t,
+ const struct btf_member *member,
+ void *kdata, const void *udata)
+{
+ const struct sched_ext_ops *uops = udata;
+ struct sched_ext_ops *ops = kdata;
+ u32 moff = __btf_member_bit_offset(t, member) / 8;
+ int ret;
+
+ switch (moff) {
+ case offsetof(struct sched_ext_ops, dispatch_max_batch):
+ if (*(u32 *)(udata + moff) > INT_MAX)
+ return -E2BIG;
+ ops->dispatch_max_batch = *(u32 *)(udata + moff);
+ return 1;
+ case offsetof(struct sched_ext_ops, flags):
+ if (*(u64 *)(udata + moff) & ~SCX_OPS_ALL_FLAGS)
+ return -EINVAL;
+ ops->flags = *(u64 *)(udata + moff);
+ return 1;
+ case offsetof(struct sched_ext_ops, name):
+ ret = bpf_obj_name_cpy(ops->name, uops->name,
+ sizeof(ops->name));
+ if (ret < 0)
+ return ret;
+ if (ret == 0)
+ return -EINVAL;
+ return 1;
+ case offsetof(struct sched_ext_ops, timeout_ms):
+ if (msecs_to_jiffies(*(u32 *)(udata + moff)) >
+ SCX_WATCHDOG_MAX_TIMEOUT)
+ return -E2BIG;
+ ops->timeout_ms = *(u32 *)(udata + moff);
+ return 1;
+ case offsetof(struct sched_ext_ops, exit_dump_len):
+ ops->exit_dump_len =
+ *(u32 *)(udata + moff) ?: SCX_EXIT_DUMP_DFL_LEN;
+ return 1;
+ case offsetof(struct sched_ext_ops, hotplug_seq):
+ ops->hotplug_seq = *(u64 *)(udata + moff);
+ return 1;
+ }
+
+ return 0;
+}
+
+static int bpf_scx_check_member(const struct btf_type *t,
+ const struct btf_member *member,
+ const struct bpf_prog *prog)
+{
+ u32 moff = __btf_member_bit_offset(t, member) / 8;
+
+ switch (moff) {
+ case offsetof(struct sched_ext_ops, init_task):
+#ifdef CONFIG_EXT_GROUP_SCHED
+ case offsetof(struct sched_ext_ops, cgroup_init):
+ case offsetof(struct sched_ext_ops, cgroup_exit):
+ case offsetof(struct sched_ext_ops, cgroup_prep_move):
+#endif
+ case offsetof(struct sched_ext_ops, cpu_online):
+ case offsetof(struct sched_ext_ops, cpu_offline):
+ case offsetof(struct sched_ext_ops, init):
+ case offsetof(struct sched_ext_ops, exit):
+ break;
+ default:
+ if (prog->sleepable)
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int bpf_scx_reg(void *kdata, struct bpf_link *link)
+{
+ return scx_ops_enable(kdata, link);
+}
+
+static void bpf_scx_unreg(void *kdata, struct bpf_link *link)
+{
+ scx_ops_disable(SCX_EXIT_UNREG);
+ kthread_flush_work(&scx_ops_disable_work);
+}
+
+static int bpf_scx_init(struct btf *btf)
+{
+ s32 type_id;
+
+ type_id = btf_find_by_name_kind(btf, "task_struct", BTF_KIND_STRUCT);
+ if (type_id < 0)
+ return -EINVAL;
+ task_struct_type = btf_type_by_id(btf, type_id);
+ task_struct_type_id = type_id;
+
+ return 0;
+}
+
+static int bpf_scx_update(void *kdata, void *old_kdata, struct bpf_link *link)
+{
+ /*
+ * sched_ext does not support updating the actively-loaded BPF
+ * scheduler, as registering a BPF scheduler can always fail if the
+ * scheduler returns an error code for e.g. ops.init(), ops.init_task(),
+ * etc. Similarly, we can always race with unregistration happening
+ * elsewhere, such as with sysrq.
+ */
+ return -EOPNOTSUPP;
+}
+
+static int bpf_scx_validate(void *kdata)
+{
+ return 0;
+}
+
+static s32 select_cpu_stub(struct task_struct *p, s32 prev_cpu, u64 wake_flags) { return -EINVAL; }
+static void enqueue_stub(struct task_struct *p, u64 enq_flags) {}
+static void dequeue_stub(struct task_struct *p, u64 enq_flags) {}
+static void dispatch_stub(s32 prev_cpu, struct task_struct *p) {}
+static void tick_stub(struct task_struct *p) {}
+static void runnable_stub(struct task_struct *p, u64 enq_flags) {}
+static void running_stub(struct task_struct *p) {}
+static void stopping_stub(struct task_struct *p, bool runnable) {}
+static void quiescent_stub(struct task_struct *p, u64 deq_flags) {}
+static bool yield_stub(struct task_struct *from, struct task_struct *to) { return false; }
+static bool core_sched_before_stub(struct task_struct *a, struct task_struct *b) { return false; }
+static void set_weight_stub(struct task_struct *p, u32 weight) {}
+static void set_cpumask_stub(struct task_struct *p, const struct cpumask *mask) {}
+static void update_idle_stub(s32 cpu, bool idle) {}
+static void cpu_acquire_stub(s32 cpu, struct scx_cpu_acquire_args *args) {}
+static void cpu_release_stub(s32 cpu, struct scx_cpu_release_args *args) {}
+static s32 init_task_stub(struct task_struct *p, struct scx_init_task_args *args) { return -EINVAL; }
+static void exit_task_stub(struct task_struct *p, struct scx_exit_task_args *args) {}
+static void enable_stub(struct task_struct *p) {}
+static void disable_stub(struct task_struct *p) {}
+#ifdef CONFIG_EXT_GROUP_SCHED
+static s32 cgroup_init_stub(struct cgroup *cgrp, struct scx_cgroup_init_args *args) { return -EINVAL; }
+static void cgroup_exit_stub(struct cgroup *cgrp) {}
+static s32 cgroup_prep_move_stub(struct task_struct *p, struct cgroup *from, struct cgroup *to) { return -EINVAL; }
+static void cgroup_move_stub(struct task_struct *p, struct cgroup *from, struct cgroup *to) {}
+static void cgroup_cancel_move_stub(struct task_struct *p, struct cgroup *from, struct cgroup *to) {}
+static void cgroup_set_weight_stub(struct cgroup *cgrp, u32 weight) {}
+#endif
+static void cpu_online_stub(s32 cpu) {}
+static void cpu_offline_stub(s32 cpu) {}
+static s32 init_stub(void) { return -EINVAL; }
+static void exit_stub(struct scx_exit_info *info) {}
+static void dump_stub(struct scx_dump_ctx *ctx) {}
+static void dump_cpu_stub(struct scx_dump_ctx *ctx, s32 cpu, bool idle) {}
+static void dump_task_stub(struct scx_dump_ctx *ctx, struct task_struct *p) {}
+
+static struct sched_ext_ops __bpf_ops_sched_ext_ops = {
+ .select_cpu = select_cpu_stub,
+ .enqueue = enqueue_stub,
+ .dequeue = dequeue_stub,
+ .dispatch = dispatch_stub,
+ .tick = tick_stub,
+ .runnable = runnable_stub,
+ .running = running_stub,
+ .stopping = stopping_stub,
+ .quiescent = quiescent_stub,
+ .yield = yield_stub,
+ .core_sched_before = core_sched_before_stub,
+ .set_weight = set_weight_stub,
+ .set_cpumask = set_cpumask_stub,
+ .update_idle = update_idle_stub,
+ .cpu_acquire = cpu_acquire_stub,
+ .cpu_release = cpu_release_stub,
+ .init_task = init_task_stub,
+ .exit_task = exit_task_stub,
+ .enable = enable_stub,
+ .disable = disable_stub,
+#ifdef CONFIG_EXT_GROUP_SCHED
+ .cgroup_init = cgroup_init_stub,
+ .cgroup_exit = cgroup_exit_stub,
+ .cgroup_prep_move = cgroup_prep_move_stub,
+ .cgroup_move = cgroup_move_stub,
+ .cgroup_cancel_move = cgroup_cancel_move_stub,
+ .cgroup_set_weight = cgroup_set_weight_stub,
+#endif
+ .cpu_online = cpu_online_stub,
+ .cpu_offline = cpu_offline_stub,
+ .init = init_stub,
+ .exit = exit_stub,
+ .dump = dump_stub,
+ .dump_cpu = dump_cpu_stub,
+ .dump_task = dump_task_stub,
+};
+
+static struct bpf_struct_ops bpf_sched_ext_ops = {
+ .verifier_ops = &bpf_scx_verifier_ops,
+ .reg = bpf_scx_reg,
+ .unreg = bpf_scx_unreg,
+ .check_member = bpf_scx_check_member,
+ .init_member = bpf_scx_init_member,
+ .init = bpf_scx_init,
+ .update = bpf_scx_update,
+ .validate = bpf_scx_validate,
+ .name = "sched_ext_ops",
+ .owner = THIS_MODULE,
+ .cfi_stubs = &__bpf_ops_sched_ext_ops
+};
+
+
+/********************************************************************************
+ * System integration and init.
+ */
+
+static void sysrq_handle_sched_ext_reset(u8 key)
+{
+ if (scx_ops_helper)
+ scx_ops_disable(SCX_EXIT_SYSRQ);
+ else
+ pr_info("sched_ext: BPF scheduler not yet used\n");
+}
+
+static const struct sysrq_key_op sysrq_sched_ext_reset_op = {
+ .handler = sysrq_handle_sched_ext_reset,
+ .help_msg = "reset-sched-ext(S)",
+ .action_msg = "Disable sched_ext and revert all tasks to CFS",
+ .enable_mask = SYSRQ_ENABLE_RTNICE,
+};
+
+static void sysrq_handle_sched_ext_dump(u8 key)
+{
+ struct scx_exit_info ei = { .kind = SCX_EXIT_NONE, .reason = "SysRq-D" };
+
+ if (scx_enabled())
+ scx_dump_state(&ei, 0);
+}
+
+static const struct sysrq_key_op sysrq_sched_ext_dump_op = {
+ .handler = sysrq_handle_sched_ext_dump,
+ .help_msg = "dump-sched-ext(D)",
+ .action_msg = "Trigger sched_ext debug dump",
+ .enable_mask = SYSRQ_ENABLE_RTNICE,
+};
+
+static bool can_skip_idle_kick(struct rq *rq)
+{
+ lockdep_assert_rq_held(rq);
+
+ /*
+ * We can skip idle kicking if @rq is going to go through at least one
+ * full SCX scheduling cycle before going idle. Just checking whether
+ * curr is not idle is insufficient because we could be racing
+ * balance_one() trying to pull the next task from a remote rq, which
+ * may fail, and @rq may become idle afterwards.
+ *
+ * The race window is small and we don't and can't guarantee that @rq is
+ * only kicked while idle anyway. Skip only when sure.
+ */
+ return !is_idle_task(rq->curr) && !(rq->scx.flags & SCX_RQ_IN_BALANCE);
+}
+
+static bool kick_one_cpu(s32 cpu, struct rq *this_rq, unsigned long *pseqs)
+{
+ struct rq *rq = cpu_rq(cpu);
+ struct scx_rq *this_scx = &this_rq->scx;
+ bool should_wait = false;
+ unsigned long flags;
+
+ raw_spin_rq_lock_irqsave(rq, flags);
+
+ /*
+ * During CPU hotplug, a CPU may depend on kicking itself to make
+ * forward progress. Allow kicking self regardless of online state.
+ */
+ if (cpu_online(cpu) || cpu == cpu_of(this_rq)) {
+ if (cpumask_test_cpu(cpu, this_scx->cpus_to_preempt)) {
+ if (rq->curr->sched_class == &ext_sched_class)
+ rq->curr->scx.slice = 0;
+ cpumask_clear_cpu(cpu, this_scx->cpus_to_preempt);
+ }
+
+ if (cpumask_test_cpu(cpu, this_scx->cpus_to_wait)) {
+ pseqs[cpu] = rq->scx.pnt_seq;
+ should_wait = true;
+ }
+
+ resched_curr(rq);
+ } else {
+ cpumask_clear_cpu(cpu, this_scx->cpus_to_preempt);
+ cpumask_clear_cpu(cpu, this_scx->cpus_to_wait);
+ }
+
+ raw_spin_rq_unlock_irqrestore(rq, flags);
+
+ return should_wait;
+}
+
+static void kick_one_cpu_if_idle(s32 cpu, struct rq *this_rq)
+{
+ struct rq *rq = cpu_rq(cpu);
+ unsigned long flags;
+
+ raw_spin_rq_lock_irqsave(rq, flags);
+
+ if (!can_skip_idle_kick(rq) &&
+ (cpu_online(cpu) || cpu == cpu_of(this_rq)))
+ resched_curr(rq);
+
+ raw_spin_rq_unlock_irqrestore(rq, flags);
+}
+
+static void kick_cpus_irq_workfn(struct irq_work *irq_work)
+{
+ struct rq *this_rq = this_rq();
+ struct scx_rq *this_scx = &this_rq->scx;
+ unsigned long *pseqs = this_cpu_ptr(scx_kick_cpus_pnt_seqs);
+ bool should_wait = false;
+ s32 cpu;
+
+ for_each_cpu(cpu, this_scx->cpus_to_kick) {
+ should_wait |= kick_one_cpu(cpu, this_rq, pseqs);
+ cpumask_clear_cpu(cpu, this_scx->cpus_to_kick);
+ cpumask_clear_cpu(cpu, this_scx->cpus_to_kick_if_idle);
+ }
+
+ for_each_cpu(cpu, this_scx->cpus_to_kick_if_idle) {
+ kick_one_cpu_if_idle(cpu, this_rq);
+ cpumask_clear_cpu(cpu, this_scx->cpus_to_kick_if_idle);
+ }
+
+ if (!should_wait)
+ return;
+
+ for_each_cpu(cpu, this_scx->cpus_to_wait) {
+ unsigned long *wait_pnt_seq = &cpu_rq(cpu)->scx.pnt_seq;
+
+ if (cpu != cpu_of(this_rq)) {
+ /*
+ * Pairs with smp_store_release() issued by this CPU in
+ * scx_next_task_picked() on the resched path.
+ *
+ * We busy-wait here to guarantee that no other task can
+ * be scheduled on our core before the target CPU has
+ * entered the resched path.
+ */
+ while (smp_load_acquire(wait_pnt_seq) == pseqs[cpu])
+ cpu_relax();
+ }
+
+ cpumask_clear_cpu(cpu, this_scx->cpus_to_wait);
+ }
+}
+
+/**
+ * print_scx_info - print out sched_ext scheduler state
+ * @log_lvl: the log level to use when printing
+ * @p: target task
+ *
+ * If a sched_ext scheduler is enabled, print the name and state of the
+ * scheduler. If @p is on sched_ext, print further information about the task.
+ *
+ * This function can be safely called on any task as long as the task_struct
+ * itself is accessible. While safe, this function isn't synchronized and may
+ * print out mixups or garbages of limited length.
+ */
+void print_scx_info(const char *log_lvl, struct task_struct *p)
+{
+ enum scx_ops_enable_state state = scx_ops_enable_state();
+ const char *all = READ_ONCE(scx_switching_all) ? "+all" : "";
+ char runnable_at_buf[22] = "?";
+ struct sched_class *class;
+ unsigned long runnable_at;
+
+ if (state == SCX_OPS_DISABLED)
+ return;
+
+ /*
+ * Carefully check if the task was running on sched_ext, and then
+ * carefully copy the time it's been runnable, and its state.
+ */
+ if (copy_from_kernel_nofault(&class, &p->sched_class, sizeof(class)) ||
+ class != &ext_sched_class) {
+ printk("%sSched_ext: %s (%s%s)", log_lvl, scx_ops.name,
+ scx_ops_enable_state_str[state], all);
+ return;
+ }
+
+ if (!copy_from_kernel_nofault(&runnable_at, &p->scx.runnable_at,
+ sizeof(runnable_at)))
+ scnprintf(runnable_at_buf, sizeof(runnable_at_buf), "%+ldms",
+ jiffies_delta_msecs(runnable_at, jiffies));
+
+ /* print everything onto one line to conserve console space */
+ printk("%sSched_ext: %s (%s%s), task: runnable_at=%s",
+ log_lvl, scx_ops.name, scx_ops_enable_state_str[state], all,
+ runnable_at_buf);
+}
+
+static int scx_pm_handler(struct notifier_block *nb, unsigned long event, void *ptr)
+{
+ /*
+ * SCX schedulers often have userspace components which are sometimes
+ * involved in critial scheduling paths. PM operations involve freezing
+ * userspace which can lead to scheduling misbehaviors including stalls.
+ * Let's bypass while PM operations are in progress.
+ */
+ switch (event) {
+ case PM_HIBERNATION_PREPARE:
+ case PM_SUSPEND_PREPARE:
+ case PM_RESTORE_PREPARE:
+ scx_ops_bypass(true);
+ break;
+ case PM_POST_HIBERNATION:
+ case PM_POST_SUSPEND:
+ case PM_POST_RESTORE:
+ scx_ops_bypass(false);
+ break;
+ }
+
+ return NOTIFY_OK;
+}
+
+static struct notifier_block scx_pm_notifier = {
+ .notifier_call = scx_pm_handler,
+};
+
+void __init init_sched_ext_class(void)
+{
+ s32 cpu, v;
+
+ /*
+ * The following is to prevent the compiler from optimizing out the enum
+ * definitions so that BPF scheduler implementations can use them
+ * through the generated vmlinux.h.
+ */
+ WRITE_ONCE(v, SCX_ENQ_WAKEUP | SCX_DEQ_SLEEP | SCX_KICK_PREEMPT |
+ SCX_TG_ONLINE);
+
+ BUG_ON(rhashtable_init(&dsq_hash, &dsq_hash_params));
+#ifdef CONFIG_SMP
+ BUG_ON(!alloc_cpumask_var(&idle_masks.cpu, GFP_KERNEL));
+ BUG_ON(!alloc_cpumask_var(&idle_masks.smt, GFP_KERNEL));
+#endif
+ scx_kick_cpus_pnt_seqs =
+ __alloc_percpu(sizeof(scx_kick_cpus_pnt_seqs[0]) * nr_cpu_ids,
+ __alignof__(scx_kick_cpus_pnt_seqs[0]));
+ BUG_ON(!scx_kick_cpus_pnt_seqs);
+
+ for_each_possible_cpu(cpu) {
+ struct rq *rq = cpu_rq(cpu);
+
+ init_dsq(&rq->scx.local_dsq, SCX_DSQ_LOCAL);
+ INIT_LIST_HEAD(&rq->scx.runnable_list);
+ INIT_LIST_HEAD(&rq->scx.ddsp_deferred_locals);
+
+ BUG_ON(!zalloc_cpumask_var(&rq->scx.cpus_to_kick, GFP_KERNEL));
+ BUG_ON(!zalloc_cpumask_var(&rq->scx.cpus_to_kick_if_idle, GFP_KERNEL));
+ BUG_ON(!zalloc_cpumask_var(&rq->scx.cpus_to_preempt, GFP_KERNEL));
+ BUG_ON(!zalloc_cpumask_var(&rq->scx.cpus_to_wait, GFP_KERNEL));
+ init_irq_work(&rq->scx.deferred_irq_work, deferred_irq_workfn);
+ init_irq_work(&rq->scx.kick_cpus_irq_work, kick_cpus_irq_workfn);
+
+ if (cpu_online(cpu))
+ cpu_rq(cpu)->scx.flags |= SCX_RQ_ONLINE;
+ }
+
+ register_sysrq_key('S', &sysrq_sched_ext_reset_op);
+ register_sysrq_key('D', &sysrq_sched_ext_dump_op);
+ INIT_DELAYED_WORK(&scx_watchdog_work, scx_watchdog_workfn);
+}
+
+
+/********************************************************************************
+ * Helpers that can be called from the BPF scheduler.
+ */
+#include <linux/btf_ids.h>
+
+__bpf_kfunc_start_defs();
+
+/**
+ * scx_bpf_select_cpu_dfl - The default implementation of ops.select_cpu()
+ * @p: task_struct to select a CPU for
+ * @prev_cpu: CPU @p was on previously
+ * @wake_flags: %SCX_WAKE_* flags
+ * @is_idle: out parameter indicating whether the returned CPU is idle
+ *
+ * Can only be called from ops.select_cpu() if the built-in CPU selection is
+ * enabled - ops.update_idle() is missing or %SCX_OPS_KEEP_BUILTIN_IDLE is set.
+ * @p, @prev_cpu and @wake_flags match ops.select_cpu().
+ *
+ * Returns the picked CPU with *@is_idle indicating whether the picked CPU is
+ * currently idle and thus a good candidate for direct dispatching.
+ */
+__bpf_kfunc s32 scx_bpf_select_cpu_dfl(struct task_struct *p, s32 prev_cpu,
+ u64 wake_flags, bool *is_idle)
+{
+ if (!scx_kf_allowed(SCX_KF_SELECT_CPU)) {
+ *is_idle = false;
+ return prev_cpu;
+ }
+#ifdef CONFIG_SMP
+ return scx_select_cpu_dfl(p, prev_cpu, wake_flags, is_idle);
+#else
+ *is_idle = false;
+ return prev_cpu;
+#endif
+}
+
+__bpf_kfunc_end_defs();
+
+BTF_KFUNCS_START(scx_kfunc_ids_select_cpu)
+BTF_ID_FLAGS(func, scx_bpf_select_cpu_dfl, KF_RCU)
+BTF_KFUNCS_END(scx_kfunc_ids_select_cpu)
+
+static const struct btf_kfunc_id_set scx_kfunc_set_select_cpu = {
+ .owner = THIS_MODULE,
+ .set = &scx_kfunc_ids_select_cpu,
+};
+
+static bool scx_dispatch_preamble(struct task_struct *p, u64 enq_flags)
+{
+ if (!scx_kf_allowed(SCX_KF_ENQUEUE | SCX_KF_DISPATCH))
+ return false;
+
+ lockdep_assert_irqs_disabled();
+
+ if (unlikely(!p)) {
+ scx_ops_error("called with NULL task");
+ return false;
+ }
+
+ if (unlikely(enq_flags & __SCX_ENQ_INTERNAL_MASK)) {
+ scx_ops_error("invalid enq_flags 0x%llx", enq_flags);
+ return false;
+ }
+
+ return true;
+}
+
+static void scx_dispatch_commit(struct task_struct *p, u64 dsq_id, u64 enq_flags)
+{
+ struct scx_dsp_ctx *dspc = this_cpu_ptr(scx_dsp_ctx);
+ struct task_struct *ddsp_task;
+
+ ddsp_task = __this_cpu_read(direct_dispatch_task);
+ if (ddsp_task) {
+ mark_direct_dispatch(ddsp_task, p, dsq_id, enq_flags);
+ return;
+ }
+
+ if (unlikely(dspc->cursor >= scx_dsp_max_batch)) {
+ scx_ops_error("dispatch buffer overflow");
+ return;
+ }
+
+ dspc->buf[dspc->cursor++] = (struct scx_dsp_buf_ent){
+ .task = p,
+ .qseq = atomic_long_read(&p->scx.ops_state) & SCX_OPSS_QSEQ_MASK,
+ .dsq_id = dsq_id,
+ .enq_flags = enq_flags,
+ };
+}
+
+__bpf_kfunc_start_defs();
+
+/**
+ * scx_bpf_dispatch - Dispatch a task into the FIFO queue of a DSQ
+ * @p: task_struct to dispatch
+ * @dsq_id: DSQ to dispatch to
+ * @slice: duration @p can run for in nsecs, 0 to keep the current value
+ * @enq_flags: SCX_ENQ_*
+ *
+ * Dispatch @p into the FIFO queue of the DSQ identified by @dsq_id. It is safe
+ * to call this function spuriously. Can be called from ops.enqueue(),
+ * ops.select_cpu(), and ops.dispatch().
+ *
+ * When called from ops.select_cpu() or ops.enqueue(), it's for direct dispatch
+ * and @p must match the task being enqueued. Also, %SCX_DSQ_LOCAL_ON can't be
+ * used to target the local DSQ of a CPU other than the enqueueing one. Use
+ * ops.select_cpu() to be on the target CPU in the first place.
+ *
+ * When called from ops.select_cpu(), @enq_flags and @dsp_id are stored, and @p
+ * will be directly dispatched to the corresponding dispatch queue after
+ * ops.select_cpu() returns. If @p is dispatched to SCX_DSQ_LOCAL, it will be
+ * dispatched to the local DSQ of the CPU returned by ops.select_cpu().
+ * @enq_flags are OR'd with the enqueue flags on the enqueue path before the
+ * task is dispatched.
+ *
+ * When called from ops.dispatch(), there are no restrictions on @p or @dsq_id
+ * and this function can be called upto ops.dispatch_max_batch times to dispatch
+ * multiple tasks. scx_bpf_dispatch_nr_slots() returns the number of the
+ * remaining slots. scx_bpf_consume() flushes the batch and resets the counter.
+ *
+ * This function doesn't have any locking restrictions and may be called under
+ * BPF locks (in the future when BPF introduces more flexible locking).
+ *
+ * @p is allowed to run for @slice. The scheduling path is triggered on slice
+ * exhaustion. If zero, the current residual slice is maintained. If
+ * %SCX_SLICE_INF, @p never expires and the BPF scheduler must kick the CPU with
+ * scx_bpf_kick_cpu() to trigger scheduling.
+ */
+__bpf_kfunc void scx_bpf_dispatch(struct task_struct *p, u64 dsq_id, u64 slice,
+ u64 enq_flags)
+{
+ if (!scx_dispatch_preamble(p, enq_flags))
+ return;
+
+ if (slice)
+ p->scx.slice = slice;
+ else
+ p->scx.slice = p->scx.slice ?: 1;
+
+ scx_dispatch_commit(p, dsq_id, enq_flags);
+}
+
+/**
+ * scx_bpf_dispatch_vtime - Dispatch a task into the vtime priority queue of a DSQ
+ * @p: task_struct to dispatch
+ * @dsq_id: DSQ to dispatch to
+ * @slice: duration @p can run for in nsecs, 0 to keep the current value
+ * @vtime: @p's ordering inside the vtime-sorted queue of the target DSQ
+ * @enq_flags: SCX_ENQ_*
+ *
+ * Dispatch @p into the vtime priority queue of the DSQ identified by @dsq_id.
+ * Tasks queued into the priority queue are ordered by @vtime and always
+ * consumed after the tasks in the FIFO queue. All other aspects are identical
+ * to scx_bpf_dispatch().
+ *
+ * @vtime ordering is according to time_before64() which considers wrapping. A
+ * numerically larger vtime may indicate an earlier position in the ordering and
+ * vice-versa.
+ */
+__bpf_kfunc void scx_bpf_dispatch_vtime(struct task_struct *p, u64 dsq_id,
+ u64 slice, u64 vtime, u64 enq_flags)
+{
+ if (!scx_dispatch_preamble(p, enq_flags))
+ return;
+
+ if (slice)
+ p->scx.slice = slice;
+ else
+ p->scx.slice = p->scx.slice ?: 1;
+
+ p->scx.dsq_vtime = vtime;
+
+ scx_dispatch_commit(p, dsq_id, enq_flags | SCX_ENQ_DSQ_PRIQ);
+}
+
+__bpf_kfunc_end_defs();
+
+BTF_KFUNCS_START(scx_kfunc_ids_enqueue_dispatch)
+BTF_ID_FLAGS(func, scx_bpf_dispatch, KF_RCU)
+BTF_ID_FLAGS(func, scx_bpf_dispatch_vtime, KF_RCU)
+BTF_KFUNCS_END(scx_kfunc_ids_enqueue_dispatch)
+
+static const struct btf_kfunc_id_set scx_kfunc_set_enqueue_dispatch = {
+ .owner = THIS_MODULE,
+ .set = &scx_kfunc_ids_enqueue_dispatch,
+};
+
+static bool scx_dispatch_from_dsq(struct bpf_iter_scx_dsq_kern *kit,
+ struct task_struct *p, u64 dsq_id,
+ u64 enq_flags)
+{
+ struct scx_dispatch_q *src_dsq = kit->dsq, *dst_dsq;
+ struct rq *this_rq, *src_rq, *dst_rq, *locked_rq;
+ bool dispatched = false;
+ bool in_balance;
+ unsigned long flags;
+
+ if (!scx_kf_allowed_if_unlocked() && !scx_kf_allowed(SCX_KF_DISPATCH))
+ return false;
+
+ /*
+ * Can be called from either ops.dispatch() locking this_rq() or any
+ * context where no rq lock is held. If latter, lock @p's task_rq which
+ * we'll likely need anyway.
+ */
+ src_rq = task_rq(p);
+
+ local_irq_save(flags);
+ this_rq = this_rq();
+ in_balance = this_rq->scx.flags & SCX_RQ_IN_BALANCE;
+
+ if (in_balance) {
+ if (this_rq != src_rq) {
+ raw_spin_rq_unlock(this_rq);
+ raw_spin_rq_lock(src_rq);
+ }
+ } else {
+ raw_spin_rq_lock(src_rq);
+ }
+
+ locked_rq = src_rq;
+ raw_spin_lock(&src_dsq->lock);
+
+ /*
+ * Did someone else get to it? @p could have already left $src_dsq, got
+ * re-enqueud, or be in the process of being consumed by someone else.
+ */
+ if (unlikely(p->scx.dsq != src_dsq ||
+ u32_before(kit->cursor.priv, p->scx.dsq_seq) ||
+ p->scx.holding_cpu >= 0) ||
+ WARN_ON_ONCE(src_rq != task_rq(p))) {
+ raw_spin_unlock(&src_dsq->lock);
+ goto out;
+ }
+
+ /* @p is still on $src_dsq and stable, determine the destination */
+ dst_dsq = find_dsq_for_dispatch(this_rq, dsq_id, p);
+
+ if (dst_dsq->id == SCX_DSQ_LOCAL) {
+ dst_rq = container_of(dst_dsq, struct rq, scx.local_dsq);
+ if (!task_can_run_on_remote_rq(p, dst_rq, true)) {
+ dst_dsq = find_global_dsq(p);
+ dst_rq = src_rq;
+ }
+ } else {
+ /* no need to migrate if destination is a non-local DSQ */
+ dst_rq = src_rq;
+ }
+
+ /*
+ * Move @p into $dst_dsq. If $dst_dsq is the local DSQ of a different
+ * CPU, @p will be migrated.
+ */
+ if (dst_dsq->id == SCX_DSQ_LOCAL) {
+ /* @p is going from a non-local DSQ to a local DSQ */
+ if (src_rq == dst_rq) {
+ task_unlink_from_dsq(p, src_dsq);
+ move_local_task_to_local_dsq(p, enq_flags,
+ src_dsq, dst_rq);
+ raw_spin_unlock(&src_dsq->lock);
+ } else {
+ raw_spin_unlock(&src_dsq->lock);
+ move_remote_task_to_local_dsq(p, enq_flags,
+ src_rq, dst_rq);
+ locked_rq = dst_rq;
+ }
+ } else {
+ /*
+ * @p is going from a non-local DSQ to a non-local DSQ. As
+ * $src_dsq is already locked, do an abbreviated dequeue.
+ */
+ task_unlink_from_dsq(p, src_dsq);
+ p->scx.dsq = NULL;
+ raw_spin_unlock(&src_dsq->lock);
+
+ if (kit->cursor.flags & __SCX_DSQ_ITER_HAS_VTIME)
+ p->scx.dsq_vtime = kit->vtime;
+ dispatch_enqueue(dst_dsq, p, enq_flags);
+ }
+
+ if (kit->cursor.flags & __SCX_DSQ_ITER_HAS_SLICE)
+ p->scx.slice = kit->slice;
+
+ dispatched = true;
+out:
+ if (in_balance) {
+ if (this_rq != locked_rq) {
+ raw_spin_rq_unlock(locked_rq);
+ raw_spin_rq_lock(this_rq);
+ }
+ } else {
+ raw_spin_rq_unlock_irqrestore(locked_rq, flags);
+ }
+
+ kit->cursor.flags &= ~(__SCX_DSQ_ITER_HAS_SLICE |
+ __SCX_DSQ_ITER_HAS_VTIME);
+ return dispatched;
+}
+
+__bpf_kfunc_start_defs();
+
+/**
+ * scx_bpf_dispatch_nr_slots - Return the number of remaining dispatch slots
+ *
+ * Can only be called from ops.dispatch().
+ */
+__bpf_kfunc u32 scx_bpf_dispatch_nr_slots(void)
+{
+ if (!scx_kf_allowed(SCX_KF_DISPATCH))
+ return 0;
+
+ return scx_dsp_max_batch - __this_cpu_read(scx_dsp_ctx->cursor);
+}
+
+/**
+ * scx_bpf_dispatch_cancel - Cancel the latest dispatch
+ *
+ * Cancel the latest dispatch. Can be called multiple times to cancel further
+ * dispatches. Can only be called from ops.dispatch().
+ */
+__bpf_kfunc void scx_bpf_dispatch_cancel(void)
+{
+ struct scx_dsp_ctx *dspc = this_cpu_ptr(scx_dsp_ctx);
+
+ if (!scx_kf_allowed(SCX_KF_DISPATCH))
+ return;
+
+ if (dspc->cursor > 0)
+ dspc->cursor--;
+ else
+ scx_ops_error("dispatch buffer underflow");
+}
+
+/**
+ * scx_bpf_consume - Transfer a task from a DSQ to the current CPU's local DSQ
+ * @dsq_id: DSQ to consume
+ *
+ * Consume a task from the non-local DSQ identified by @dsq_id and transfer it
+ * to the current CPU's local DSQ for execution. Can only be called from
+ * ops.dispatch().
+ *
+ * This function flushes the in-flight dispatches from scx_bpf_dispatch() before
+ * trying to consume the specified DSQ. It may also grab rq locks and thus can't
+ * be called under any BPF locks.
+ *
+ * Returns %true if a task has been consumed, %false if there isn't any task to
+ * consume.
+ */
+__bpf_kfunc bool scx_bpf_consume(u64 dsq_id)
+{
+ struct scx_dsp_ctx *dspc = this_cpu_ptr(scx_dsp_ctx);
+ struct scx_dispatch_q *dsq;
+
+ if (!scx_kf_allowed(SCX_KF_DISPATCH))
+ return false;
+
+ flush_dispatch_buf(dspc->rq);
+
+ dsq = find_user_dsq(dsq_id);
+ if (unlikely(!dsq)) {
+ scx_ops_error("invalid DSQ ID 0x%016llx", dsq_id);
+ return false;
+ }
+
+ if (consume_dispatch_q(dspc->rq, dsq)) {
+ /*
+ * A successfully consumed task can be dequeued before it starts
+ * running while the CPU is trying to migrate other dispatched
+ * tasks. Bump nr_tasks to tell balance_scx() to retry on empty
+ * local DSQ.
+ */
+ dspc->nr_tasks++;
+ return true;
+ } else {
+ return false;
+ }
+}
+
+/**
+ * scx_bpf_dispatch_from_dsq_set_slice - Override slice when dispatching from DSQ
+ * @it__iter: DSQ iterator in progress
+ * @slice: duration the dispatched task can run for in nsecs
+ *
+ * Override the slice of the next task that will be dispatched from @it__iter
+ * using scx_bpf_dispatch_from_dsq[_vtime](). If this function is not called,
+ * the previous slice duration is kept.
+ */
+__bpf_kfunc void scx_bpf_dispatch_from_dsq_set_slice(
+ struct bpf_iter_scx_dsq *it__iter, u64 slice)
+{
+ struct bpf_iter_scx_dsq_kern *kit = (void *)it__iter;
+
+ kit->slice = slice;
+ kit->cursor.flags |= __SCX_DSQ_ITER_HAS_SLICE;
+}
+
+/**
+ * scx_bpf_dispatch_from_dsq_set_vtime - Override vtime when dispatching from DSQ
+ * @it__iter: DSQ iterator in progress
+ * @vtime: task's ordering inside the vtime-sorted queue of the target DSQ
+ *
+ * Override the vtime of the next task that will be dispatched from @it__iter
+ * using scx_bpf_dispatch_from_dsq_vtime(). If this function is not called, the
+ * previous slice vtime is kept. If scx_bpf_dispatch_from_dsq() is used to
+ * dispatch the next task, the override is ignored and cleared.
+ */
+__bpf_kfunc void scx_bpf_dispatch_from_dsq_set_vtime(
+ struct bpf_iter_scx_dsq *it__iter, u64 vtime)
+{
+ struct bpf_iter_scx_dsq_kern *kit = (void *)it__iter;
+
+ kit->vtime = vtime;
+ kit->cursor.flags |= __SCX_DSQ_ITER_HAS_VTIME;
+}
+
+/**
+ * scx_bpf_dispatch_from_dsq - Move a task from DSQ iteration to a DSQ
+ * @it__iter: DSQ iterator in progress
+ * @p: task to transfer
+ * @dsq_id: DSQ to move @p to
+ * @enq_flags: SCX_ENQ_*
+ *
+ * Transfer @p which is on the DSQ currently iterated by @it__iter to the DSQ
+ * specified by @dsq_id. All DSQs - local DSQs, global DSQ and user DSQs - can
+ * be the destination.
+ *
+ * For the transfer to be successful, @p must still be on the DSQ and have been
+ * queued before the DSQ iteration started. This function doesn't care whether
+ * @p was obtained from the DSQ iteration. @p just has to be on the DSQ and have
+ * been queued before the iteration started.
+ *
+ * @p's slice is kept by default. Use scx_bpf_dispatch_from_dsq_set_slice() to
+ * update.
+ *
+ * Can be called from ops.dispatch() or any BPF context which doesn't hold a rq
+ * lock (e.g. BPF timers or SYSCALL programs).
+ *
+ * Returns %true if @p has been consumed, %false if @p had already been consumed
+ * or dequeued.
+ */
+__bpf_kfunc bool scx_bpf_dispatch_from_dsq(struct bpf_iter_scx_dsq *it__iter,
+ struct task_struct *p, u64 dsq_id,
+ u64 enq_flags)
+{
+ return scx_dispatch_from_dsq((struct bpf_iter_scx_dsq_kern *)it__iter,
+ p, dsq_id, enq_flags);
+}
+
+/**
+ * scx_bpf_dispatch_vtime_from_dsq - Move a task from DSQ iteration to a PRIQ DSQ
+ * @it__iter: DSQ iterator in progress
+ * @p: task to transfer
+ * @dsq_id: DSQ to move @p to
+ * @enq_flags: SCX_ENQ_*
+ *
+ * Transfer @p which is on the DSQ currently iterated by @it__iter to the
+ * priority queue of the DSQ specified by @dsq_id. The destination must be a
+ * user DSQ as only user DSQs support priority queue.
+ *
+ * @p's slice and vtime are kept by default. Use
+ * scx_bpf_dispatch_from_dsq_set_slice() and
+ * scx_bpf_dispatch_from_dsq_set_vtime() to update.
+ *
+ * All other aspects are identical to scx_bpf_dispatch_from_dsq(). See
+ * scx_bpf_dispatch_vtime() for more information on @vtime.
+ */
+__bpf_kfunc bool scx_bpf_dispatch_vtime_from_dsq(struct bpf_iter_scx_dsq *it__iter,
+ struct task_struct *p, u64 dsq_id,
+ u64 enq_flags)
+{
+ return scx_dispatch_from_dsq((struct bpf_iter_scx_dsq_kern *)it__iter,
+ p, dsq_id, enq_flags | SCX_ENQ_DSQ_PRIQ);
+}
+
+__bpf_kfunc_end_defs();
+
+BTF_KFUNCS_START(scx_kfunc_ids_dispatch)
+BTF_ID_FLAGS(func, scx_bpf_dispatch_nr_slots)
+BTF_ID_FLAGS(func, scx_bpf_dispatch_cancel)
+BTF_ID_FLAGS(func, scx_bpf_consume)
+BTF_ID_FLAGS(func, scx_bpf_dispatch_from_dsq_set_slice)
+BTF_ID_FLAGS(func, scx_bpf_dispatch_from_dsq_set_vtime)
+BTF_ID_FLAGS(func, scx_bpf_dispatch_from_dsq, KF_RCU)
+BTF_ID_FLAGS(func, scx_bpf_dispatch_vtime_from_dsq, KF_RCU)
+BTF_KFUNCS_END(scx_kfunc_ids_dispatch)
+
+static const struct btf_kfunc_id_set scx_kfunc_set_dispatch = {
+ .owner = THIS_MODULE,
+ .set = &scx_kfunc_ids_dispatch,
+};
+
+__bpf_kfunc_start_defs();
+
+/**
+ * scx_bpf_reenqueue_local - Re-enqueue tasks on a local DSQ
+ *
+ * Iterate over all of the tasks currently enqueued on the local DSQ of the
+ * caller's CPU, and re-enqueue them in the BPF scheduler. Returns the number of
+ * processed tasks. Can only be called from ops.cpu_release().
+ */
+__bpf_kfunc u32 scx_bpf_reenqueue_local(void)
+{
+ LIST_HEAD(tasks);
+ u32 nr_enqueued = 0;
+ struct rq *rq;
+ struct task_struct *p, *n;
+
+ if (!scx_kf_allowed(SCX_KF_CPU_RELEASE))
+ return 0;
+
+ rq = cpu_rq(smp_processor_id());
+ lockdep_assert_rq_held(rq);
+
+ /*
+ * The BPF scheduler may choose to dispatch tasks back to
+ * @rq->scx.local_dsq. Move all candidate tasks off to a private list
+ * first to avoid processing the same tasks repeatedly.
+ */
+ list_for_each_entry_safe(p, n, &rq->scx.local_dsq.list,
+ scx.dsq_list.node) {
+ /*
+ * If @p is being migrated, @p's current CPU may not agree with
+ * its allowed CPUs and the migration_cpu_stop is about to
+ * deactivate and re-activate @p anyway. Skip re-enqueueing.
+ *
+ * While racing sched property changes may also dequeue and
+ * re-enqueue a migrating task while its current CPU and allowed
+ * CPUs disagree, they use %ENQUEUE_RESTORE which is bypassed to
+ * the current local DSQ for running tasks and thus are not
+ * visible to the BPF scheduler.
+ */
+ if (p->migration_pending)
+ continue;
+
+ dispatch_dequeue(rq, p);
+ list_add_tail(&p->scx.dsq_list.node, &tasks);
+ }
+
+ list_for_each_entry_safe(p, n, &tasks, scx.dsq_list.node) {
+ list_del_init(&p->scx.dsq_list.node);
+ do_enqueue_task(rq, p, SCX_ENQ_REENQ, -1);
+ nr_enqueued++;
+ }
+
+ return nr_enqueued;
+}
+
+__bpf_kfunc_end_defs();
+
+BTF_KFUNCS_START(scx_kfunc_ids_cpu_release)
+BTF_ID_FLAGS(func, scx_bpf_reenqueue_local)
+BTF_KFUNCS_END(scx_kfunc_ids_cpu_release)
+
+static const struct btf_kfunc_id_set scx_kfunc_set_cpu_release = {
+ .owner = THIS_MODULE,
+ .set = &scx_kfunc_ids_cpu_release,
+};
+
+__bpf_kfunc_start_defs();
+
+/**
+ * scx_bpf_create_dsq - Create a custom DSQ
+ * @dsq_id: DSQ to create
+ * @node: NUMA node to allocate from
+ *
+ * Create a custom DSQ identified by @dsq_id. Can be called from any sleepable
+ * scx callback, and any BPF_PROG_TYPE_SYSCALL prog.
+ */
+__bpf_kfunc s32 scx_bpf_create_dsq(u64 dsq_id, s32 node)
+{
+ if (unlikely(node >= (int)nr_node_ids ||
+ (node < 0 && node != NUMA_NO_NODE)))
+ return -EINVAL;
+ return PTR_ERR_OR_ZERO(create_dsq(dsq_id, node));
+}
+
+__bpf_kfunc_end_defs();
+
+BTF_KFUNCS_START(scx_kfunc_ids_unlocked)
+BTF_ID_FLAGS(func, scx_bpf_create_dsq, KF_SLEEPABLE)
+BTF_ID_FLAGS(func, scx_bpf_dispatch_from_dsq, KF_RCU)
+BTF_ID_FLAGS(func, scx_bpf_dispatch_vtime_from_dsq, KF_RCU)
+BTF_KFUNCS_END(scx_kfunc_ids_unlocked)
+
+static const struct btf_kfunc_id_set scx_kfunc_set_unlocked = {
+ .owner = THIS_MODULE,
+ .set = &scx_kfunc_ids_unlocked,
+};
+
+__bpf_kfunc_start_defs();
+
+/**
+ * scx_bpf_kick_cpu - Trigger reschedule on a CPU
+ * @cpu: cpu to kick
+ * @flags: %SCX_KICK_* flags
+ *
+ * Kick @cpu into rescheduling. This can be used to wake up an idle CPU or
+ * trigger rescheduling on a busy CPU. This can be called from any online
+ * scx_ops operation and the actual kicking is performed asynchronously through
+ * an irq work.
+ */
+__bpf_kfunc void scx_bpf_kick_cpu(s32 cpu, u64 flags)
+{
+ struct rq *this_rq;
+ unsigned long irq_flags;
+
+ if (!ops_cpu_valid(cpu, NULL))
+ return;
+
+ local_irq_save(irq_flags);
+
+ this_rq = this_rq();
+
+ /*
+ * While bypassing for PM ops, IRQ handling may not be online which can
+ * lead to irq_work_queue() malfunction such as infinite busy wait for
+ * IRQ status update. Suppress kicking.
+ */
+ if (scx_rq_bypassing(this_rq))
+ goto out;
+
+ /*
+ * Actual kicking is bounced to kick_cpus_irq_workfn() to avoid nesting
+ * rq locks. We can probably be smarter and avoid bouncing if called
+ * from ops which don't hold a rq lock.
+ */
+ if (flags & SCX_KICK_IDLE) {
+ struct rq *target_rq = cpu_rq(cpu);
+
+ if (unlikely(flags & (SCX_KICK_PREEMPT | SCX_KICK_WAIT)))
+ scx_ops_error("PREEMPT/WAIT cannot be used with SCX_KICK_IDLE");
+
+ if (raw_spin_rq_trylock(target_rq)) {
+ if (can_skip_idle_kick(target_rq)) {
+ raw_spin_rq_unlock(target_rq);
+ goto out;
+ }
+ raw_spin_rq_unlock(target_rq);
+ }
+ cpumask_set_cpu(cpu, this_rq->scx.cpus_to_kick_if_idle);
+ } else {
+ cpumask_set_cpu(cpu, this_rq->scx.cpus_to_kick);
+
+ if (flags & SCX_KICK_PREEMPT)
+ cpumask_set_cpu(cpu, this_rq->scx.cpus_to_preempt);
+ if (flags & SCX_KICK_WAIT)
+ cpumask_set_cpu(cpu, this_rq->scx.cpus_to_wait);
+ }
+
+ irq_work_queue(&this_rq->scx.kick_cpus_irq_work);
+out:
+ local_irq_restore(irq_flags);
+}
+
+/**
+ * scx_bpf_dsq_nr_queued - Return the number of queued tasks
+ * @dsq_id: id of the DSQ
+ *
+ * Return the number of tasks in the DSQ matching @dsq_id. If not found,
+ * -%ENOENT is returned.
+ */
+__bpf_kfunc s32 scx_bpf_dsq_nr_queued(u64 dsq_id)
+{
+ struct scx_dispatch_q *dsq;
+ s32 ret;
+
+ preempt_disable();
+
+ if (dsq_id == SCX_DSQ_LOCAL) {
+ ret = READ_ONCE(this_rq()->scx.local_dsq.nr);
+ goto out;
+ } else if ((dsq_id & SCX_DSQ_LOCAL_ON) == SCX_DSQ_LOCAL_ON) {
+ s32 cpu = dsq_id & SCX_DSQ_LOCAL_CPU_MASK;
+
+ if (ops_cpu_valid(cpu, NULL)) {
+ ret = READ_ONCE(cpu_rq(cpu)->scx.local_dsq.nr);
+ goto out;
+ }
+ } else {
+ dsq = find_user_dsq(dsq_id);
+ if (dsq) {
+ ret = READ_ONCE(dsq->nr);
+ goto out;
+ }
+ }
+ ret = -ENOENT;
+out:
+ preempt_enable();
+ return ret;
+}
+
+/**
+ * scx_bpf_destroy_dsq - Destroy a custom DSQ
+ * @dsq_id: DSQ to destroy
+ *
+ * Destroy the custom DSQ identified by @dsq_id. Only DSQs created with
+ * scx_bpf_create_dsq() can be destroyed. The caller must ensure that the DSQ is
+ * empty and no further tasks are dispatched to it. Ignored if called on a DSQ
+ * which doesn't exist. Can be called from any online scx_ops operations.
+ */
+__bpf_kfunc void scx_bpf_destroy_dsq(u64 dsq_id)
+{
+ destroy_dsq(dsq_id);
+}
+
+/**
+ * bpf_iter_scx_dsq_new - Create a DSQ iterator
+ * @it: iterator to initialize
+ * @dsq_id: DSQ to iterate
+ * @flags: %SCX_DSQ_ITER_*
+ *
+ * Initialize BPF iterator @it which can be used with bpf_for_each() to walk
+ * tasks in the DSQ specified by @dsq_id. Iteration using @it only includes
+ * tasks which are already queued when this function is invoked.
+ */
+__bpf_kfunc int bpf_iter_scx_dsq_new(struct bpf_iter_scx_dsq *it, u64 dsq_id,
+ u64 flags)
+{
+ struct bpf_iter_scx_dsq_kern *kit = (void *)it;
+
+ BUILD_BUG_ON(sizeof(struct bpf_iter_scx_dsq_kern) >
+ sizeof(struct bpf_iter_scx_dsq));
+ BUILD_BUG_ON(__alignof__(struct bpf_iter_scx_dsq_kern) !=
+ __alignof__(struct bpf_iter_scx_dsq));
+
+ if (flags & ~__SCX_DSQ_ITER_USER_FLAGS)
+ return -EINVAL;
+
+ kit->dsq = find_user_dsq(dsq_id);
+ if (!kit->dsq)
+ return -ENOENT;
+
+ INIT_LIST_HEAD(&kit->cursor.node);
+ kit->cursor.flags |= SCX_DSQ_LNODE_ITER_CURSOR | flags;
+ kit->cursor.priv = READ_ONCE(kit->dsq->seq);
+
+ return 0;
+}
+
+/**
+ * bpf_iter_scx_dsq_next - Progress a DSQ iterator
+ * @it: iterator to progress
+ *
+ * Return the next task. See bpf_iter_scx_dsq_new().
+ */
+__bpf_kfunc struct task_struct *bpf_iter_scx_dsq_next(struct bpf_iter_scx_dsq *it)
+{
+ struct bpf_iter_scx_dsq_kern *kit = (void *)it;
+ bool rev = kit->cursor.flags & SCX_DSQ_ITER_REV;
+ struct task_struct *p;
+ unsigned long flags;
+
+ if (!kit->dsq)
+ return NULL;
+
+ raw_spin_lock_irqsave(&kit->dsq->lock, flags);
+
+ if (list_empty(&kit->cursor.node))
+ p = NULL;
+ else
+ p = container_of(&kit->cursor, struct task_struct, scx.dsq_list);
+
+ /*
+ * Only tasks which were queued before the iteration started are
+ * visible. This bounds BPF iterations and guarantees that vtime never
+ * jumps in the other direction while iterating.
+ */
+ do {
+ p = nldsq_next_task(kit->dsq, p, rev);
+ } while (p && unlikely(u32_before(kit->cursor.priv, p->scx.dsq_seq)));
+
+ if (p) {
+ if (rev)
+ list_move_tail(&kit->cursor.node, &p->scx.dsq_list.node);
+ else
+ list_move(&kit->cursor.node, &p->scx.dsq_list.node);
+ } else {
+ list_del_init(&kit->cursor.node);
+ }
+
+ raw_spin_unlock_irqrestore(&kit->dsq->lock, flags);
+
+ return p;
+}
+
+/**
+ * bpf_iter_scx_dsq_destroy - Destroy a DSQ iterator
+ * @it: iterator to destroy
+ *
+ * Undo scx_iter_scx_dsq_new().
+ */
+__bpf_kfunc void bpf_iter_scx_dsq_destroy(struct bpf_iter_scx_dsq *it)
+{
+ struct bpf_iter_scx_dsq_kern *kit = (void *)it;
+
+ if (!kit->dsq)
+ return;
+
+ if (!list_empty(&kit->cursor.node)) {
+ unsigned long flags;
+
+ raw_spin_lock_irqsave(&kit->dsq->lock, flags);
+ list_del_init(&kit->cursor.node);
+ raw_spin_unlock_irqrestore(&kit->dsq->lock, flags);
+ }
+ kit->dsq = NULL;
+}
+
+__bpf_kfunc_end_defs();
+
+static s32 __bstr_format(u64 *data_buf, char *line_buf, size_t line_size,
+ char *fmt, unsigned long long *data, u32 data__sz)
+{
+ struct bpf_bprintf_data bprintf_data = { .get_bin_args = true };
+ s32 ret;
+
+ if (data__sz % 8 || data__sz > MAX_BPRINTF_VARARGS * 8 ||
+ (data__sz && !data)) {
+ scx_ops_error("invalid data=%p and data__sz=%u",
+ (void *)data, data__sz);
+ return -EINVAL;
+ }
+
+ ret = copy_from_kernel_nofault(data_buf, data, data__sz);
+ if (ret < 0) {
+ scx_ops_error("failed to read data fields (%d)", ret);
+ return ret;
+ }
+
+ ret = bpf_bprintf_prepare(fmt, UINT_MAX, data_buf, data__sz / 8,
+ &bprintf_data);
+ if (ret < 0) {
+ scx_ops_error("format preparation failed (%d)", ret);
+ return ret;
+ }
+
+ ret = bstr_printf(line_buf, line_size, fmt,
+ bprintf_data.bin_args);
+ bpf_bprintf_cleanup(&bprintf_data);
+ if (ret < 0) {
+ scx_ops_error("(\"%s\", %p, %u) failed to format",
+ fmt, data, data__sz);
+ return ret;
+ }
+
+ return ret;
+}
+
+static s32 bstr_format(struct scx_bstr_buf *buf,
+ char *fmt, unsigned long long *data, u32 data__sz)
+{
+ return __bstr_format(buf->data, buf->line, sizeof(buf->line),
+ fmt, data, data__sz);
+}
+
+__bpf_kfunc_start_defs();
+
+/**
+ * scx_bpf_exit_bstr - Gracefully exit the BPF scheduler.
+ * @exit_code: Exit value to pass to user space via struct scx_exit_info.
+ * @fmt: error message format string
+ * @data: format string parameters packaged using ___bpf_fill() macro
+ * @data__sz: @data len, must end in '__sz' for the verifier
+ *
+ * Indicate that the BPF scheduler wants to exit gracefully, and initiate ops
+ * disabling.
+ */
+__bpf_kfunc void scx_bpf_exit_bstr(s64 exit_code, char *fmt,
+ unsigned long long *data, u32 data__sz)
+{
+ unsigned long flags;
+
+ raw_spin_lock_irqsave(&scx_exit_bstr_buf_lock, flags);
+ if (bstr_format(&scx_exit_bstr_buf, fmt, data, data__sz) >= 0)
+ scx_ops_exit_kind(SCX_EXIT_UNREG_BPF, exit_code, "%s",
+ scx_exit_bstr_buf.line);
+ raw_spin_unlock_irqrestore(&scx_exit_bstr_buf_lock, flags);
+}
+
+/**
+ * scx_bpf_error_bstr - Indicate fatal error
+ * @fmt: error message format string
+ * @data: format string parameters packaged using ___bpf_fill() macro
+ * @data__sz: @data len, must end in '__sz' for the verifier
+ *
+ * Indicate that the BPF scheduler encountered a fatal error and initiate ops
+ * disabling.
+ */
+__bpf_kfunc void scx_bpf_error_bstr(char *fmt, unsigned long long *data,
+ u32 data__sz)
+{
+ unsigned long flags;
+
+ raw_spin_lock_irqsave(&scx_exit_bstr_buf_lock, flags);
+ if (bstr_format(&scx_exit_bstr_buf, fmt, data, data__sz) >= 0)
+ scx_ops_exit_kind(SCX_EXIT_ERROR_BPF, 0, "%s",
+ scx_exit_bstr_buf.line);
+ raw_spin_unlock_irqrestore(&scx_exit_bstr_buf_lock, flags);
+}
+
+/**
+ * scx_bpf_dump - Generate extra debug dump specific to the BPF scheduler
+ * @fmt: format string
+ * @data: format string parameters packaged using ___bpf_fill() macro
+ * @data__sz: @data len, must end in '__sz' for the verifier
+ *
+ * To be called through scx_bpf_dump() helper from ops.dump(), dump_cpu() and
+ * dump_task() to generate extra debug dump specific to the BPF scheduler.
+ *
+ * The extra dump may be multiple lines. A single line may be split over
+ * multiple calls. The last line is automatically terminated.
+ */
+__bpf_kfunc void scx_bpf_dump_bstr(char *fmt, unsigned long long *data,
+ u32 data__sz)
+{
+ struct scx_dump_data *dd = &scx_dump_data;
+ struct scx_bstr_buf *buf = &dd->buf;
+ s32 ret;
+
+ if (raw_smp_processor_id() != dd->cpu) {
+ scx_ops_error("scx_bpf_dump() must only be called from ops.dump() and friends");
+ return;
+ }
+
+ /* append the formatted string to the line buf */
+ ret = __bstr_format(buf->data, buf->line + dd->cursor,
+ sizeof(buf->line) - dd->cursor, fmt, data, data__sz);
+ if (ret < 0) {
+ dump_line(dd->s, "%s[!] (\"%s\", %p, %u) failed to format (%d)",
+ dd->prefix, fmt, data, data__sz, ret);
+ return;
+ }
+
+ dd->cursor += ret;
+ dd->cursor = min_t(s32, dd->cursor, sizeof(buf->line));
+
+ if (!dd->cursor)
+ return;
+
+ /*
+ * If the line buf overflowed or ends in a newline, flush it into the
+ * dump. This is to allow the caller to generate a single line over
+ * multiple calls. As ops_dump_flush() can also handle multiple lines in
+ * the line buf, the only case which can lead to an unexpected
+ * truncation is when the caller keeps generating newlines in the middle
+ * instead of the end consecutively. Don't do that.
+ */
+ if (dd->cursor >= sizeof(buf->line) || buf->line[dd->cursor - 1] == '\n')
+ ops_dump_flush();
+}
+
+/**
+ * scx_bpf_cpuperf_cap - Query the maximum relative capacity of a CPU
+ * @cpu: CPU of interest
+ *
+ * Return the maximum relative capacity of @cpu in relation to the most
+ * performant CPU in the system. The return value is in the range [1,
+ * %SCX_CPUPERF_ONE]. See scx_bpf_cpuperf_cur().
+ */
+__bpf_kfunc u32 scx_bpf_cpuperf_cap(s32 cpu)
+{
+ if (ops_cpu_valid(cpu, NULL))
+ return arch_scale_cpu_capacity(cpu);
+ else
+ return SCX_CPUPERF_ONE;
+}
+
+/**
+ * scx_bpf_cpuperf_cur - Query the current relative performance of a CPU
+ * @cpu: CPU of interest
+ *
+ * Return the current relative performance of @cpu in relation to its maximum.
+ * The return value is in the range [1, %SCX_CPUPERF_ONE].
+ *
+ * The current performance level of a CPU in relation to the maximum performance
+ * available in the system can be calculated as follows:
+ *
+ * scx_bpf_cpuperf_cap() * scx_bpf_cpuperf_cur() / %SCX_CPUPERF_ONE
+ *
+ * The result is in the range [1, %SCX_CPUPERF_ONE].
+ */
+__bpf_kfunc u32 scx_bpf_cpuperf_cur(s32 cpu)
+{
+ if (ops_cpu_valid(cpu, NULL))
+ return arch_scale_freq_capacity(cpu);
+ else
+ return SCX_CPUPERF_ONE;
+}
+
+/**
+ * scx_bpf_cpuperf_set - Set the relative performance target of a CPU
+ * @cpu: CPU of interest
+ * @perf: target performance level [0, %SCX_CPUPERF_ONE]
+ * @flags: %SCX_CPUPERF_* flags
+ *
+ * Set the target performance level of @cpu to @perf. @perf is in linear
+ * relative scale between 0 and %SCX_CPUPERF_ONE. This determines how the
+ * schedutil cpufreq governor chooses the target frequency.
+ *
+ * The actual performance level chosen, CPU grouping, and the overhead and
+ * latency of the operations are dependent on the hardware and cpufreq driver in
+ * use. Consult hardware and cpufreq documentation for more information. The
+ * current performance level can be monitored using scx_bpf_cpuperf_cur().
+ */
+__bpf_kfunc void scx_bpf_cpuperf_set(s32 cpu, u32 perf)
+{
+ if (unlikely(perf > SCX_CPUPERF_ONE)) {
+ scx_ops_error("Invalid cpuperf target %u for CPU %d", perf, cpu);
+ return;
+ }
+
+ if (ops_cpu_valid(cpu, NULL)) {
+ struct rq *rq = cpu_rq(cpu);
+
+ rq->scx.cpuperf_target = perf;
+
+ rcu_read_lock_sched_notrace();
+ cpufreq_update_util(cpu_rq(cpu), 0);
+ rcu_read_unlock_sched_notrace();
+ }
+}
+
+/**
+ * scx_bpf_nr_cpu_ids - Return the number of possible CPU IDs
+ *
+ * All valid CPU IDs in the system are smaller than the returned value.
+ */
+__bpf_kfunc u32 scx_bpf_nr_cpu_ids(void)
+{
+ return nr_cpu_ids;
+}
+
+/**
+ * scx_bpf_get_possible_cpumask - Get a referenced kptr to cpu_possible_mask
+ */
+__bpf_kfunc const struct cpumask *scx_bpf_get_possible_cpumask(void)
+{
+ return cpu_possible_mask;
+}
+
+/**
+ * scx_bpf_get_online_cpumask - Get a referenced kptr to cpu_online_mask
+ */
+__bpf_kfunc const struct cpumask *scx_bpf_get_online_cpumask(void)
+{
+ return cpu_online_mask;
+}
+
+/**
+ * scx_bpf_put_cpumask - Release a possible/online cpumask
+ * @cpumask: cpumask to release
+ */
+__bpf_kfunc void scx_bpf_put_cpumask(const struct cpumask *cpumask)
+{
+ /*
+ * Empty function body because we aren't actually acquiring or releasing
+ * a reference to a global cpumask, which is read-only in the caller and
+ * is never released. The acquire / release semantics here are just used
+ * to make the cpumask is a trusted pointer in the caller.
+ */
+}
+
+/**
+ * scx_bpf_get_idle_cpumask - Get a referenced kptr to the idle-tracking
+ * per-CPU cpumask.
+ *
+ * Returns NULL if idle tracking is not enabled, or running on a UP kernel.
+ */
+__bpf_kfunc const struct cpumask *scx_bpf_get_idle_cpumask(void)
+{
+ if (!static_branch_likely(&scx_builtin_idle_enabled)) {
+ scx_ops_error("built-in idle tracking is disabled");
+ return cpu_none_mask;
+ }
+
+#ifdef CONFIG_SMP
+ return idle_masks.cpu;
+#else
+ return cpu_none_mask;
+#endif
+}
+
+/**
+ * scx_bpf_get_idle_smtmask - Get a referenced kptr to the idle-tracking,
+ * per-physical-core cpumask. Can be used to determine if an entire physical
+ * core is free.
+ *
+ * Returns NULL if idle tracking is not enabled, or running on a UP kernel.
+ */
+__bpf_kfunc const struct cpumask *scx_bpf_get_idle_smtmask(void)
+{
+ if (!static_branch_likely(&scx_builtin_idle_enabled)) {
+ scx_ops_error("built-in idle tracking is disabled");
+ return cpu_none_mask;
+ }
+
+#ifdef CONFIG_SMP
+ if (sched_smt_active())
+ return idle_masks.smt;
+ else
+ return idle_masks.cpu;
+#else
+ return cpu_none_mask;
+#endif
+}
+
+/**
+ * scx_bpf_put_idle_cpumask - Release a previously acquired referenced kptr to
+ * either the percpu, or SMT idle-tracking cpumask.
+ */
+__bpf_kfunc void scx_bpf_put_idle_cpumask(const struct cpumask *idle_mask)
+{
+ /*
+ * Empty function body because we aren't actually acquiring or releasing
+ * a reference to a global idle cpumask, which is read-only in the
+ * caller and is never released. The acquire / release semantics here
+ * are just used to make the cpumask a trusted pointer in the caller.
+ */
+}
+
+/**
+ * scx_bpf_test_and_clear_cpu_idle - Test and clear @cpu's idle state
+ * @cpu: cpu to test and clear idle for
+ *
+ * Returns %true if @cpu was idle and its idle state was successfully cleared.
+ * %false otherwise.
+ *
+ * Unavailable if ops.update_idle() is implemented and
+ * %SCX_OPS_KEEP_BUILTIN_IDLE is not set.
+ */
+__bpf_kfunc bool scx_bpf_test_and_clear_cpu_idle(s32 cpu)
+{
+ if (!static_branch_likely(&scx_builtin_idle_enabled)) {
+ scx_ops_error("built-in idle tracking is disabled");
+ return false;
+ }
+
+ if (ops_cpu_valid(cpu, NULL))
+ return test_and_clear_cpu_idle(cpu);
+ else
+ return false;
+}
+
+/**
+ * scx_bpf_pick_idle_cpu - Pick and claim an idle cpu
+ * @cpus_allowed: Allowed cpumask
+ * @flags: %SCX_PICK_IDLE_CPU_* flags
+ *
+ * Pick and claim an idle cpu in @cpus_allowed. Returns the picked idle cpu
+ * number on success. -%EBUSY if no matching cpu was found.
+ *
+ * Idle CPU tracking may race against CPU scheduling state transitions. For
+ * example, this function may return -%EBUSY as CPUs are transitioning into the
+ * idle state. If the caller then assumes that there will be dispatch events on
+ * the CPUs as they were all busy, the scheduler may end up stalling with CPUs
+ * idling while there are pending tasks. Use scx_bpf_pick_any_cpu() and
+ * scx_bpf_kick_cpu() to guarantee that there will be at least one dispatch
+ * event in the near future.
+ *
+ * Unavailable if ops.update_idle() is implemented and
+ * %SCX_OPS_KEEP_BUILTIN_IDLE is not set.
+ */
+__bpf_kfunc s32 scx_bpf_pick_idle_cpu(const struct cpumask *cpus_allowed,
+ u64 flags)
+{
+ if (!static_branch_likely(&scx_builtin_idle_enabled)) {
+ scx_ops_error("built-in idle tracking is disabled");
+ return -EBUSY;
+ }
+
+ return scx_pick_idle_cpu(cpus_allowed, flags);
+}
+
+/**
+ * scx_bpf_pick_any_cpu - Pick and claim an idle cpu if available or pick any CPU
+ * @cpus_allowed: Allowed cpumask
+ * @flags: %SCX_PICK_IDLE_CPU_* flags
+ *
+ * Pick and claim an idle cpu in @cpus_allowed. If none is available, pick any
+ * CPU in @cpus_allowed. Guaranteed to succeed and returns the picked idle cpu
+ * number if @cpus_allowed is not empty. -%EBUSY is returned if @cpus_allowed is
+ * empty.
+ *
+ * If ops.update_idle() is implemented and %SCX_OPS_KEEP_BUILTIN_IDLE is not
+ * set, this function can't tell which CPUs are idle and will always pick any
+ * CPU.
+ */
+__bpf_kfunc s32 scx_bpf_pick_any_cpu(const struct cpumask *cpus_allowed,
+ u64 flags)
+{
+ s32 cpu;
+
+ if (static_branch_likely(&scx_builtin_idle_enabled)) {
+ cpu = scx_pick_idle_cpu(cpus_allowed, flags);
+ if (cpu >= 0)
+ return cpu;
+ }
+
+ cpu = cpumask_any_distribute(cpus_allowed);
+ if (cpu < nr_cpu_ids)
+ return cpu;
+ else
+ return -EBUSY;
+}
+
+/**
+ * scx_bpf_task_running - Is task currently running?
+ * @p: task of interest
+ */
+__bpf_kfunc bool scx_bpf_task_running(const struct task_struct *p)
+{
+ return task_rq(p)->curr == p;
+}
+
+/**
+ * scx_bpf_task_cpu - CPU a task is currently associated with
+ * @p: task of interest
+ */
+__bpf_kfunc s32 scx_bpf_task_cpu(const struct task_struct *p)
+{
+ return task_cpu(p);
+}
+
+/**
+ * scx_bpf_cpu_rq - Fetch the rq of a CPU
+ * @cpu: CPU of the rq
+ */
+__bpf_kfunc struct rq *scx_bpf_cpu_rq(s32 cpu)
+{
+ if (!ops_cpu_valid(cpu, NULL))
+ return NULL;
+
+ return cpu_rq(cpu);
+}
+
+/**
+ * scx_bpf_task_cgroup - Return the sched cgroup of a task
+ * @p: task of interest
+ *
+ * @p->sched_task_group->css.cgroup represents the cgroup @p is associated with
+ * from the scheduler's POV. SCX operations should use this function to
+ * determine @p's current cgroup as, unlike following @p->cgroups,
+ * @p->sched_task_group is protected by @p's rq lock and thus atomic w.r.t. all
+ * rq-locked operations. Can be called on the parameter tasks of rq-locked
+ * operations. The restriction guarantees that @p's rq is locked by the caller.
+ */
+#ifdef CONFIG_CGROUP_SCHED
+__bpf_kfunc struct cgroup *scx_bpf_task_cgroup(struct task_struct *p)
+{
+ struct task_group *tg = p->sched_task_group;
+ struct cgroup *cgrp = &cgrp_dfl_root.cgrp;
+
+ if (!scx_kf_allowed_on_arg_tasks(__SCX_KF_RQ_LOCKED, p))
+ goto out;
+
+ /*
+ * A task_group may either be a cgroup or an autogroup. In the latter
+ * case, @tg->css.cgroup is %NULL. A task_group can't become the other
+ * kind once created.
+ */
+ if (tg && tg->css.cgroup)
+ cgrp = tg->css.cgroup;
+ else
+ cgrp = &cgrp_dfl_root.cgrp;
+out:
+ cgroup_get(cgrp);
+ return cgrp;
+}
+#endif
+
+__bpf_kfunc_end_defs();
+
+BTF_KFUNCS_START(scx_kfunc_ids_any)
+BTF_ID_FLAGS(func, scx_bpf_kick_cpu)
+BTF_ID_FLAGS(func, scx_bpf_dsq_nr_queued)
+BTF_ID_FLAGS(func, scx_bpf_destroy_dsq)
+BTF_ID_FLAGS(func, bpf_iter_scx_dsq_new, KF_ITER_NEW | KF_RCU_PROTECTED)
+BTF_ID_FLAGS(func, bpf_iter_scx_dsq_next, KF_ITER_NEXT | KF_RET_NULL)
+BTF_ID_FLAGS(func, bpf_iter_scx_dsq_destroy, KF_ITER_DESTROY)
+BTF_ID_FLAGS(func, scx_bpf_exit_bstr, KF_TRUSTED_ARGS)
+BTF_ID_FLAGS(func, scx_bpf_error_bstr, KF_TRUSTED_ARGS)
+BTF_ID_FLAGS(func, scx_bpf_dump_bstr, KF_TRUSTED_ARGS)
+BTF_ID_FLAGS(func, scx_bpf_cpuperf_cap)
+BTF_ID_FLAGS(func, scx_bpf_cpuperf_cur)
+BTF_ID_FLAGS(func, scx_bpf_cpuperf_set)
+BTF_ID_FLAGS(func, scx_bpf_nr_cpu_ids)
+BTF_ID_FLAGS(func, scx_bpf_get_possible_cpumask, KF_ACQUIRE)
+BTF_ID_FLAGS(func, scx_bpf_get_online_cpumask, KF_ACQUIRE)
+BTF_ID_FLAGS(func, scx_bpf_put_cpumask, KF_RELEASE)
+BTF_ID_FLAGS(func, scx_bpf_get_idle_cpumask, KF_ACQUIRE)
+BTF_ID_FLAGS(func, scx_bpf_get_idle_smtmask, KF_ACQUIRE)
+BTF_ID_FLAGS(func, scx_bpf_put_idle_cpumask, KF_RELEASE)
+BTF_ID_FLAGS(func, scx_bpf_test_and_clear_cpu_idle)
+BTF_ID_FLAGS(func, scx_bpf_pick_idle_cpu, KF_RCU)
+BTF_ID_FLAGS(func, scx_bpf_pick_any_cpu, KF_RCU)
+BTF_ID_FLAGS(func, scx_bpf_task_running, KF_RCU)
+BTF_ID_FLAGS(func, scx_bpf_task_cpu, KF_RCU)
+BTF_ID_FLAGS(func, scx_bpf_cpu_rq)
+#ifdef CONFIG_CGROUP_SCHED
+BTF_ID_FLAGS(func, scx_bpf_task_cgroup, KF_RCU | KF_ACQUIRE)
+#endif
+BTF_KFUNCS_END(scx_kfunc_ids_any)
+
+static const struct btf_kfunc_id_set scx_kfunc_set_any = {
+ .owner = THIS_MODULE,
+ .set = &scx_kfunc_ids_any,
+};
+
+static int __init scx_init(void)
+{
+ int ret;
+
+ /*
+ * kfunc registration can't be done from init_sched_ext_class() as
+ * register_btf_kfunc_id_set() needs most of the system to be up.
+ *
+ * Some kfuncs are context-sensitive and can only be called from
+ * specific SCX ops. They are grouped into BTF sets accordingly.
+ * Unfortunately, BPF currently doesn't have a way of enforcing such
+ * restrictions. Eventually, the verifier should be able to enforce
+ * them. For now, register them the same and make each kfunc explicitly
+ * check using scx_kf_allowed().
+ */
+ if ((ret = register_btf_kfunc_id_set(BPF_PROG_TYPE_STRUCT_OPS,
+ &scx_kfunc_set_select_cpu)) ||
+ (ret = register_btf_kfunc_id_set(BPF_PROG_TYPE_STRUCT_OPS,
+ &scx_kfunc_set_enqueue_dispatch)) ||
+ (ret = register_btf_kfunc_id_set(BPF_PROG_TYPE_STRUCT_OPS,
+ &scx_kfunc_set_dispatch)) ||
+ (ret = register_btf_kfunc_id_set(BPF_PROG_TYPE_STRUCT_OPS,
+ &scx_kfunc_set_cpu_release)) ||
+ (ret = register_btf_kfunc_id_set(BPF_PROG_TYPE_STRUCT_OPS,
+ &scx_kfunc_set_unlocked)) ||
+ (ret = register_btf_kfunc_id_set(BPF_PROG_TYPE_SYSCALL,
+ &scx_kfunc_set_unlocked)) ||
+ (ret = register_btf_kfunc_id_set(BPF_PROG_TYPE_STRUCT_OPS,
+ &scx_kfunc_set_any)) ||
+ (ret = register_btf_kfunc_id_set(BPF_PROG_TYPE_TRACING,
+ &scx_kfunc_set_any)) ||
+ (ret = register_btf_kfunc_id_set(BPF_PROG_TYPE_SYSCALL,
+ &scx_kfunc_set_any))) {
+ pr_err("sched_ext: Failed to register kfunc sets (%d)\n", ret);
+ return ret;
+ }
+
+ ret = register_bpf_struct_ops(&bpf_sched_ext_ops, sched_ext_ops);
+ if (ret) {
+ pr_err("sched_ext: Failed to register struct_ops (%d)\n", ret);
+ return ret;
+ }
+
+ ret = register_pm_notifier(&scx_pm_notifier);
+ if (ret) {
+ pr_err("sched_ext: Failed to register PM notifier (%d)\n", ret);
+ return ret;
+ }
+
+ scx_kset = kset_create_and_add("sched_ext", &scx_uevent_ops, kernel_kobj);
+ if (!scx_kset) {
+ pr_err("sched_ext: Failed to create /sys/kernel/sched_ext\n");
+ return -ENOMEM;
+ }
+
+ ret = sysfs_create_group(&scx_kset->kobj, &scx_global_attr_group);
+ if (ret < 0) {
+ pr_err("sched_ext: Failed to add global attributes\n");
+ return ret;
+ }
+
+ return 0;
+}
+__initcall(scx_init);
diff --git a/kernel/sched/ext.h b/kernel/sched/ext.h
new file mode 100644
index 000000000000..246019519231
--- /dev/null
+++ b/kernel/sched/ext.h
@@ -0,0 +1,91 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * BPF extensible scheduler class: Documentation/scheduler/sched-ext.rst
+ *
+ * Copyright (c) 2022 Meta Platforms, Inc. and affiliates.
+ * Copyright (c) 2022 Tejun Heo <tj@kernel.org>
+ * Copyright (c) 2022 David Vernet <dvernet@meta.com>
+ */
+#ifdef CONFIG_SCHED_CLASS_EXT
+
+void scx_tick(struct rq *rq);
+void init_scx_entity(struct sched_ext_entity *scx);
+void scx_pre_fork(struct task_struct *p);
+int scx_fork(struct task_struct *p);
+void scx_post_fork(struct task_struct *p);
+void scx_cancel_fork(struct task_struct *p);
+bool scx_can_stop_tick(struct rq *rq);
+void scx_rq_activate(struct rq *rq);
+void scx_rq_deactivate(struct rq *rq);
+int scx_check_setscheduler(struct task_struct *p, int policy);
+bool task_should_scx(struct task_struct *p);
+void init_sched_ext_class(void);
+
+static inline u32 scx_cpuperf_target(s32 cpu)
+{
+ if (scx_enabled())
+ return cpu_rq(cpu)->scx.cpuperf_target;
+ else
+ return 0;
+}
+
+static inline bool task_on_scx(const struct task_struct *p)
+{
+ return scx_enabled() && p->sched_class == &ext_sched_class;
+}
+
+#ifdef CONFIG_SCHED_CORE
+bool scx_prio_less(const struct task_struct *a, const struct task_struct *b,
+ bool in_fi);
+#endif
+
+#else /* CONFIG_SCHED_CLASS_EXT */
+
+static inline void scx_tick(struct rq *rq) {}
+static inline void scx_pre_fork(struct task_struct *p) {}
+static inline int scx_fork(struct task_struct *p) { return 0; }
+static inline void scx_post_fork(struct task_struct *p) {}
+static inline void scx_cancel_fork(struct task_struct *p) {}
+static inline u32 scx_cpuperf_target(s32 cpu) { return 0; }
+static inline bool scx_can_stop_tick(struct rq *rq) { return true; }
+static inline void scx_rq_activate(struct rq *rq) {}
+static inline void scx_rq_deactivate(struct rq *rq) {}
+static inline int scx_check_setscheduler(struct task_struct *p, int policy) { return 0; }
+static inline bool task_on_scx(const struct task_struct *p) { return false; }
+static inline void init_sched_ext_class(void) {}
+
+#endif /* CONFIG_SCHED_CLASS_EXT */
+
+#if defined(CONFIG_SCHED_CLASS_EXT) && defined(CONFIG_SMP)
+void __scx_update_idle(struct rq *rq, bool idle);
+
+static inline void scx_update_idle(struct rq *rq, bool idle)
+{
+ if (scx_enabled())
+ __scx_update_idle(rq, idle);
+}
+#else
+static inline void scx_update_idle(struct rq *rq, bool idle) {}
+#endif
+
+#ifdef CONFIG_CGROUP_SCHED
+#ifdef CONFIG_EXT_GROUP_SCHED
+int scx_tg_online(struct task_group *tg);
+void scx_tg_offline(struct task_group *tg);
+int scx_cgroup_can_attach(struct cgroup_taskset *tset);
+void scx_move_task(struct task_struct *p);
+void scx_cgroup_finish_attach(void);
+void scx_cgroup_cancel_attach(struct cgroup_taskset *tset);
+void scx_group_set_weight(struct task_group *tg, unsigned long cgrp_weight);
+void scx_group_set_idle(struct task_group *tg, bool idle);
+#else /* CONFIG_EXT_GROUP_SCHED */
+static inline int scx_tg_online(struct task_group *tg) { return 0; }
+static inline void scx_tg_offline(struct task_group *tg) {}
+static inline int scx_cgroup_can_attach(struct cgroup_taskset *tset) { return 0; }
+static inline void scx_move_task(struct task_struct *p) {}
+static inline void scx_cgroup_finish_attach(void) {}
+static inline void scx_cgroup_cancel_attach(struct cgroup_taskset *tset) {}
+static inline void scx_group_set_weight(struct task_group *tg, unsigned long cgrp_weight) {}
+static inline void scx_group_set_idle(struct task_group *tg, bool idle) {}
+#endif /* CONFIG_EXT_GROUP_SCHED */
+#endif /* CONFIG_CGROUP_SCHED */
diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
index 8dc9385f6da4..225b31aaee55 100644
--- a/kernel/sched/fair.c
+++ b/kernel/sched/fair.c
@@ -511,7 +511,7 @@ static int cfs_rq_is_idle(struct cfs_rq *cfs_rq)
static int se_is_idle(struct sched_entity *se)
{
- return 0;
+ return task_has_idle_policy(task_of(se));
}
#endif /* CONFIG_FAIR_GROUP_SCHED */
@@ -779,8 +779,22 @@ static void update_min_vruntime(struct cfs_rq *cfs_rq)
}
/* ensure we never gain time by being placed backwards. */
- u64_u32_store(cfs_rq->min_vruntime,
- __update_min_vruntime(cfs_rq, vruntime));
+ cfs_rq->min_vruntime = __update_min_vruntime(cfs_rq, vruntime);
+}
+
+static inline u64 cfs_rq_min_slice(struct cfs_rq *cfs_rq)
+{
+ struct sched_entity *root = __pick_root_entity(cfs_rq);
+ struct sched_entity *curr = cfs_rq->curr;
+ u64 min_slice = ~0ULL;
+
+ if (curr && curr->on_rq)
+ min_slice = curr->slice;
+
+ if (root)
+ min_slice = min(min_slice, root->min_slice);
+
+ return min_slice;
}
static inline bool __entity_less(struct rb_node *a, const struct rb_node *b)
@@ -799,19 +813,34 @@ static inline void __min_vruntime_update(struct sched_entity *se, struct rb_node
}
}
+static inline void __min_slice_update(struct sched_entity *se, struct rb_node *node)
+{
+ if (node) {
+ struct sched_entity *rse = __node_2_se(node);
+ if (rse->min_slice < se->min_slice)
+ se->min_slice = rse->min_slice;
+ }
+}
+
/*
* se->min_vruntime = min(se->vruntime, {left,right}->min_vruntime)
*/
static inline bool min_vruntime_update(struct sched_entity *se, bool exit)
{
u64 old_min_vruntime = se->min_vruntime;
+ u64 old_min_slice = se->min_slice;
struct rb_node *node = &se->run_node;
se->min_vruntime = se->vruntime;
__min_vruntime_update(se, node->rb_right);
__min_vruntime_update(se, node->rb_left);
- return se->min_vruntime == old_min_vruntime;
+ se->min_slice = se->slice;
+ __min_slice_update(se, node->rb_right);
+ __min_slice_update(se, node->rb_left);
+
+ return se->min_vruntime == old_min_vruntime &&
+ se->min_slice == old_min_slice;
}
RB_DECLARE_CALLBACKS(static, min_vruntime_cb, struct sched_entity,
@@ -824,6 +853,7 @@ static void __enqueue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se)
{
avg_vruntime_add(cfs_rq, se);
se->min_vruntime = se->vruntime;
+ se->min_slice = se->slice;
rb_add_augmented_cached(&se->run_node, &cfs_rq->tasks_timeline,
__entity_less, &min_vruntime_cb);
}
@@ -974,17 +1004,18 @@ static void clear_buddies(struct cfs_rq *cfs_rq, struct sched_entity *se);
* XXX: strictly: vd_i += N*r_i/w_i such that: vd_i > ve_i
* this is probably good enough.
*/
-static void update_deadline(struct cfs_rq *cfs_rq, struct sched_entity *se)
+static bool update_deadline(struct cfs_rq *cfs_rq, struct sched_entity *se)
{
if ((s64)(se->vruntime - se->deadline) < 0)
- return;
+ return false;
/*
* For EEVDF the virtual time slope is determined by w_i (iow.
* nice) while the request time r_i is determined by
* sysctl_sched_base_slice.
*/
- se->slice = sysctl_sched_base_slice;
+ if (!se->custom_slice)
+ se->slice = sysctl_sched_base_slice;
/*
* EEVDF: vd_i = ve_i + r_i / w_i
@@ -994,10 +1025,7 @@ static void update_deadline(struct cfs_rq *cfs_rq, struct sched_entity *se)
/*
* The task has consumed its request, reschedule.
*/
- if (cfs_rq->nr_running > 1) {
- resched_curr(rq_of(cfs_rq));
- clear_buddies(cfs_rq, se);
- }
+ return true;
}
#include "pelt.h"
@@ -1135,6 +1163,38 @@ static inline void update_curr_task(struct task_struct *p, s64 delta_exec)
dl_server_update(p->dl_server, delta_exec);
}
+static inline bool did_preempt_short(struct cfs_rq *cfs_rq, struct sched_entity *curr)
+{
+ if (!sched_feat(PREEMPT_SHORT))
+ return false;
+
+ if (curr->vlag == curr->deadline)
+ return false;
+
+ return !entity_eligible(cfs_rq, curr);
+}
+
+static inline bool do_preempt_short(struct cfs_rq *cfs_rq,
+ struct sched_entity *pse, struct sched_entity *se)
+{
+ if (!sched_feat(PREEMPT_SHORT))
+ return false;
+
+ if (pse->slice >= se->slice)
+ return false;
+
+ if (!entity_eligible(cfs_rq, pse))
+ return false;
+
+ if (entity_before(pse, se))
+ return true;
+
+ if (!entity_eligible(cfs_rq, se))
+ return true;
+
+ return false;
+}
+
/*
* Used by other classes to account runtime.
*/
@@ -1156,23 +1216,44 @@ s64 update_curr_common(struct rq *rq)
static void update_curr(struct cfs_rq *cfs_rq)
{
struct sched_entity *curr = cfs_rq->curr;
+ struct rq *rq = rq_of(cfs_rq);
s64 delta_exec;
+ bool resched;
if (unlikely(!curr))
return;
- delta_exec = update_curr_se(rq_of(cfs_rq), curr);
+ delta_exec = update_curr_se(rq, curr);
if (unlikely(delta_exec <= 0))
return;
curr->vruntime += calc_delta_fair(delta_exec, curr);
- update_deadline(cfs_rq, curr);
+ resched = update_deadline(cfs_rq, curr);
update_min_vruntime(cfs_rq);
- if (entity_is_task(curr))
- update_curr_task(task_of(curr), delta_exec);
+ if (entity_is_task(curr)) {
+ struct task_struct *p = task_of(curr);
+
+ update_curr_task(p, delta_exec);
+
+ /*
+ * Any fair task that runs outside of fair_server should
+ * account against fair_server such that it can account for
+ * this time and possibly avoid running this period.
+ */
+ if (p->dl_server != &rq->fair_server)
+ dl_server_update(&rq->fair_server, delta_exec);
+ }
account_cfs_rq_runtime(cfs_rq, delta_exec);
+
+ if (rq->nr_running == 1)
+ return;
+
+ if (resched || did_preempt_short(cfs_rq, curr)) {
+ resched_curr(rq);
+ clear_buddies(cfs_rq, curr);
+ }
}
static void update_curr_fair(struct rq *rq)
@@ -1742,7 +1823,7 @@ static bool pgdat_free_space_enough(struct pglist_data *pgdat)
continue;
if (zone_watermark_ok(zone, 0,
- wmark_pages(zone, WMARK_PROMO) + enough_wmark,
+ promo_wmark_pages(zone) + enough_wmark,
ZONE_MOVABLE, 0))
return true;
}
@@ -1840,8 +1921,7 @@ bool should_numa_migrate_memory(struct task_struct *p, struct folio *folio,
* The pages in slow memory node should be migrated according
* to hot/cold instead of private/shared.
*/
- if (sysctl_numa_balancing_mode & NUMA_BALANCING_MEMORY_TIERING &&
- !node_is_toptier(src_nid)) {
+ if (folio_use_access_time(folio)) {
struct pglist_data *pgdat;
unsigned long rate_limit;
unsigned int latency, th, def_th;
@@ -3188,6 +3268,15 @@ static bool vma_is_accessed(struct mm_struct *mm, struct vm_area_struct *vma)
return true;
}
+ /*
+ * This vma has not been accessed for a while, and if the number
+ * the threads in the same process is low, which means no other
+ * threads can help scan this vma, force a vma scan.
+ */
+ if (READ_ONCE(mm->numa_scan_seq) >
+ (vma->numab_state->prev_scan_seq + get_nr_threads(current)))
+ return true;
+
return false;
}
@@ -3835,7 +3924,8 @@ static void reweight_entity(struct cfs_rq *cfs_rq, struct sched_entity *se,
}
}
-void reweight_task(struct task_struct *p, const struct load_weight *lw)
+static void reweight_task_fair(struct rq *rq, struct task_struct *p,
+ const struct load_weight *lw)
{
struct sched_entity *se = &p->se;
struct cfs_rq *cfs_rq = cfs_rq_of(se);
@@ -5178,7 +5268,8 @@ place_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int flags)
u64 vslice, vruntime = avg_vruntime(cfs_rq);
s64 lag = 0;
- se->slice = sysctl_sched_base_slice;
+ if (!se->custom_slice)
+ se->slice = sysctl_sched_base_slice;
vslice = calc_delta_fair(se->slice, se);
/*
@@ -5259,6 +5350,12 @@ place_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int flags)
se->vruntime = vruntime - lag;
+ if (sched_feat(PLACE_REL_DEADLINE) && se->rel_deadline) {
+ se->deadline += se->vruntime;
+ se->rel_deadline = 0;
+ return;
+ }
+
/*
* When joining the competition; the existing tasks will be,
* on average, halfway through their slice, as such start tasks
@@ -5279,6 +5376,9 @@ static inline int cfs_rq_throttled(struct cfs_rq *cfs_rq);
static inline bool cfs_bandwidth_used(void);
static void
+requeue_delayed_entity(struct sched_entity *se);
+
+static void
enqueue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int flags)
{
bool curr = cfs_rq->curr == se;
@@ -5365,20 +5465,48 @@ static void clear_buddies(struct cfs_rq *cfs_rq, struct sched_entity *se)
static __always_inline void return_cfs_rq_runtime(struct cfs_rq *cfs_rq);
-static void
+static inline void finish_delayed_dequeue_entity(struct sched_entity *se)
+{
+ se->sched_delayed = 0;
+ if (sched_feat(DELAY_ZERO) && se->vlag > 0)
+ se->vlag = 0;
+}
+
+static bool
dequeue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int flags)
{
- int action = UPDATE_TG;
+ bool sleep = flags & DEQUEUE_SLEEP;
+ update_curr(cfs_rq);
+
+ if (flags & DEQUEUE_DELAYED) {
+ SCHED_WARN_ON(!se->sched_delayed);
+ } else {
+ bool delay = sleep;
+ /*
+ * DELAY_DEQUEUE relies on spurious wakeups, special task
+ * states must not suffer spurious wakeups, excempt them.
+ */
+ if (flags & DEQUEUE_SPECIAL)
+ delay = false;
+
+ SCHED_WARN_ON(delay && se->sched_delayed);
+
+ if (sched_feat(DELAY_DEQUEUE) && delay &&
+ !entity_eligible(cfs_rq, se)) {
+ if (cfs_rq->next == se)
+ cfs_rq->next = NULL;
+ update_load_avg(cfs_rq, se, 0);
+ se->sched_delayed = 1;
+ return false;
+ }
+ }
+
+ int action = UPDATE_TG;
if (entity_is_task(se) && task_on_rq_migrating(task_of(se)))
action |= DO_DETACH;
/*
- * Update run-time statistics of the 'current'.
- */
- update_curr(cfs_rq);
-
- /*
* When dequeuing a sched_entity, we must:
* - Update loads to have both entity and cfs_rq synced with now.
* - For group_entity, update its runnable_weight to reflect the new
@@ -5395,6 +5523,11 @@ dequeue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int flags)
clear_buddies(cfs_rq, se);
update_entity_lag(cfs_rq, se);
+ if (sched_feat(PLACE_REL_DEADLINE) && !sleep) {
+ se->deadline -= se->vruntime;
+ se->rel_deadline = 1;
+ }
+
if (se != cfs_rq->curr)
__dequeue_entity(cfs_rq, se);
se->on_rq = 0;
@@ -5414,8 +5547,13 @@ dequeue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int flags)
if ((flags & (DEQUEUE_SAVE | DEQUEUE_MOVE)) != DEQUEUE_SAVE)
update_min_vruntime(cfs_rq);
+ if (flags & DEQUEUE_DELAYED)
+ finish_delayed_dequeue_entity(se);
+
if (cfs_rq->nr_running == 0)
update_idle_cfs_rq_clock_pelt(cfs_rq);
+
+ return true;
}
static void
@@ -5441,6 +5579,7 @@ set_next_entity(struct cfs_rq *cfs_rq, struct sched_entity *se)
}
update_stats_curr_start(cfs_rq, se);
+ SCHED_WARN_ON(cfs_rq->curr);
cfs_rq->curr = se;
/*
@@ -5461,6 +5600,8 @@ set_next_entity(struct cfs_rq *cfs_rq, struct sched_entity *se)
se->prev_sum_exec_runtime = se->sum_exec_runtime;
}
+static int dequeue_entities(struct rq *rq, struct sched_entity *se, int flags);
+
/*
* Pick the next process, keeping these things in mind, in this order:
* 1) keep things fair between processes/task groups
@@ -5469,16 +5610,26 @@ set_next_entity(struct cfs_rq *cfs_rq, struct sched_entity *se)
* 4) do not run the "skip" process, if something else is available
*/
static struct sched_entity *
-pick_next_entity(struct cfs_rq *cfs_rq)
+pick_next_entity(struct rq *rq, struct cfs_rq *cfs_rq)
{
/*
* Enabling NEXT_BUDDY will affect latency but not fairness.
*/
if (sched_feat(NEXT_BUDDY) &&
- cfs_rq->next && entity_eligible(cfs_rq, cfs_rq->next))
+ cfs_rq->next && entity_eligible(cfs_rq, cfs_rq->next)) {
+ /* ->next will never be delayed */
+ SCHED_WARN_ON(cfs_rq->next->sched_delayed);
return cfs_rq->next;
+ }
- return pick_eevdf(cfs_rq);
+ struct sched_entity *se = pick_eevdf(cfs_rq);
+ if (se->sched_delayed) {
+ dequeue_entities(rq, se, DEQUEUE_SLEEP | DEQUEUE_DELAYED);
+ SCHED_WARN_ON(se->sched_delayed);
+ SCHED_WARN_ON(se->on_rq);
+ return NULL;
+ }
+ return se;
}
static bool check_cfs_rq_runtime(struct cfs_rq *cfs_rq);
@@ -5502,6 +5653,7 @@ static void put_prev_entity(struct cfs_rq *cfs_rq, struct sched_entity *prev)
/* in !on_rq case, update occurred at dequeue */
update_load_avg(cfs_rq, prev, 0);
}
+ SCHED_WARN_ON(cfs_rq->curr != prev);
cfs_rq->curr = NULL;
}
@@ -5765,6 +5917,7 @@ static bool throttle_cfs_rq(struct cfs_rq *cfs_rq)
struct cfs_bandwidth *cfs_b = tg_cfs_bandwidth(cfs_rq->tg);
struct sched_entity *se;
long task_delta, idle_task_delta, dequeue = 1;
+ long rq_h_nr_running = rq->cfs.h_nr_running;
raw_spin_lock(&cfs_b->lock);
/* This will start the period timer if necessary */
@@ -5798,11 +5951,21 @@ static bool throttle_cfs_rq(struct cfs_rq *cfs_rq)
idle_task_delta = cfs_rq->idle_h_nr_running;
for_each_sched_entity(se) {
struct cfs_rq *qcfs_rq = cfs_rq_of(se);
+ int flags;
+
/* throttled entity or throttle-on-deactivate */
if (!se->on_rq)
goto done;
- dequeue_entity(qcfs_rq, se, DEQUEUE_SLEEP);
+ /*
+ * Abuse SPECIAL to avoid delayed dequeue in this instance.
+ * This avoids teaching dequeue_entities() about throttled
+ * entities and keeps things relatively simple.
+ */
+ flags = DEQUEUE_SLEEP | DEQUEUE_SPECIAL;
+ if (se->sched_delayed)
+ flags |= DEQUEUE_DELAYED;
+ dequeue_entity(qcfs_rq, se, flags);
if (cfs_rq_is_idle(group_cfs_rq(se)))
idle_task_delta = cfs_rq->h_nr_running;
@@ -5836,6 +5999,9 @@ static bool throttle_cfs_rq(struct cfs_rq *cfs_rq)
/* At this point se is NULL and we are at root level*/
sub_nr_running(rq, task_delta);
+ /* Stop the fair server if throttling resulted in no runnable tasks */
+ if (rq_h_nr_running && !rq->cfs.h_nr_running)
+ dl_server_stop(&rq->fair_server);
done:
/*
* Note: distribution will already see us throttled via the
@@ -5854,6 +6020,7 @@ void unthrottle_cfs_rq(struct cfs_rq *cfs_rq)
struct cfs_bandwidth *cfs_b = tg_cfs_bandwidth(cfs_rq->tg);
struct sched_entity *se;
long task_delta, idle_task_delta;
+ long rq_h_nr_running = rq->cfs.h_nr_running;
se = cfs_rq->tg->se[cpu_of(rq)];
@@ -5891,8 +6058,10 @@ void unthrottle_cfs_rq(struct cfs_rq *cfs_rq)
for_each_sched_entity(se) {
struct cfs_rq *qcfs_rq = cfs_rq_of(se);
- if (se->on_rq)
+ if (se->on_rq) {
+ SCHED_WARN_ON(se->sched_delayed);
break;
+ }
enqueue_entity(qcfs_rq, se, ENQUEUE_WAKEUP);
if (cfs_rq_is_idle(group_cfs_rq(se)))
@@ -5923,6 +6092,10 @@ void unthrottle_cfs_rq(struct cfs_rq *cfs_rq)
goto unthrottle_throttle;
}
+ /* Start the fair server if un-throttling resulted in new runnable tasks */
+ if (!rq_h_nr_running && rq->cfs.h_nr_running)
+ dl_server_start(&rq->fair_server);
+
/* At this point se is NULL and we are at root level*/
add_nr_running(rq, task_delta);
@@ -6555,7 +6728,7 @@ static void sched_fair_update_stop_tick(struct rq *rq, struct task_struct *p)
{
int cpu = cpu_of(rq);
- if (!sched_feat(HZ_BW) || !cfs_bandwidth_used())
+ if (!cfs_bandwidth_used())
return;
if (!tick_nohz_full_cpu(cpu))
@@ -6738,6 +6911,37 @@ static int sched_idle_cpu(int cpu)
}
#endif
+static void
+requeue_delayed_entity(struct sched_entity *se)
+{
+ struct cfs_rq *cfs_rq = cfs_rq_of(se);
+
+ /*
+ * se->sched_delayed should imply: se->on_rq == 1.
+ * Because a delayed entity is one that is still on
+ * the runqueue competing until elegibility.
+ */
+ SCHED_WARN_ON(!se->sched_delayed);
+ SCHED_WARN_ON(!se->on_rq);
+
+ if (sched_feat(DELAY_ZERO)) {
+ update_entity_lag(cfs_rq, se);
+ if (se->vlag > 0) {
+ cfs_rq->nr_running--;
+ if (se != cfs_rq->curr)
+ __dequeue_entity(cfs_rq, se);
+ se->vlag = 0;
+ place_entity(cfs_rq, se, 0);
+ if (se != cfs_rq->curr)
+ __enqueue_entity(cfs_rq, se);
+ cfs_rq->nr_running++;
+ }
+ }
+
+ update_load_avg(cfs_rq, se, 0);
+ se->sched_delayed = 0;
+}
+
/*
* The enqueue_task method is called before nr_running is
* increased. Here we update the fair scheduling stats and
@@ -6750,6 +6954,8 @@ enqueue_task_fair(struct rq *rq, struct task_struct *p, int flags)
struct sched_entity *se = &p->se;
int idle_h_nr_running = task_has_idle_policy(p);
int task_new = !(flags & ENQUEUE_WAKEUP);
+ int rq_h_nr_running = rq->cfs.h_nr_running;
+ u64 slice = 0;
/*
* The code below (indirectly) updates schedutil which looks at
@@ -6757,7 +6963,13 @@ enqueue_task_fair(struct rq *rq, struct task_struct *p, int flags)
* Let's add the task's estimated utilization to the cfs_rq's
* estimated utilization, before we update schedutil.
*/
- util_est_enqueue(&rq->cfs, p);
+ if (!(p->se.sched_delayed && (task_on_rq_migrating(p) || (flags & ENQUEUE_RESTORE))))
+ util_est_enqueue(&rq->cfs, p);
+
+ if (flags & ENQUEUE_DELAYED) {
+ requeue_delayed_entity(se);
+ return;
+ }
/*
* If in_iowait is set, the code below may not trigger any cpufreq
@@ -6768,10 +6980,24 @@ enqueue_task_fair(struct rq *rq, struct task_struct *p, int flags)
cpufreq_update_util(rq, SCHED_CPUFREQ_IOWAIT);
for_each_sched_entity(se) {
- if (se->on_rq)
+ if (se->on_rq) {
+ if (se->sched_delayed)
+ requeue_delayed_entity(se);
break;
+ }
cfs_rq = cfs_rq_of(se);
+
+ /*
+ * Basically set the slice of group entries to the min_slice of
+ * their respective cfs_rq. This ensures the group can service
+ * its entities in the desired time-frame.
+ */
+ if (slice) {
+ se->slice = slice;
+ se->custom_slice = 1;
+ }
enqueue_entity(cfs_rq, se, flags);
+ slice = cfs_rq_min_slice(cfs_rq);
cfs_rq->h_nr_running++;
cfs_rq->idle_h_nr_running += idle_h_nr_running;
@@ -6793,6 +7019,9 @@ enqueue_task_fair(struct rq *rq, struct task_struct *p, int flags)
se_update_runnable(se);
update_cfs_group(se);
+ se->slice = slice;
+ slice = cfs_rq_min_slice(cfs_rq);
+
cfs_rq->h_nr_running++;
cfs_rq->idle_h_nr_running += idle_h_nr_running;
@@ -6804,6 +7033,13 @@ enqueue_task_fair(struct rq *rq, struct task_struct *p, int flags)
goto enqueue_throttle;
}
+ if (!rq_h_nr_running && rq->cfs.h_nr_running) {
+ /* Account for idle runtime */
+ if (!rq->nr_running)
+ dl_server_update_idle_time(rq, rq->curr);
+ dl_server_start(&rq->fair_server);
+ }
+
/* At this point se is NULL and we are at root level*/
add_nr_running(rq, 1);
@@ -6833,36 +7069,59 @@ enqueue_throttle:
static void set_next_buddy(struct sched_entity *se);
/*
- * The dequeue_task method is called before nr_running is
- * decreased. We remove the task from the rbtree and
- * update the fair scheduling stats:
+ * Basically dequeue_task_fair(), except it can deal with dequeue_entity()
+ * failing half-way through and resume the dequeue later.
+ *
+ * Returns:
+ * -1 - dequeue delayed
+ * 0 - dequeue throttled
+ * 1 - dequeue complete
*/
-static void dequeue_task_fair(struct rq *rq, struct task_struct *p, int flags)
+static int dequeue_entities(struct rq *rq, struct sched_entity *se, int flags)
{
- struct cfs_rq *cfs_rq;
- struct sched_entity *se = &p->se;
- int task_sleep = flags & DEQUEUE_SLEEP;
- int idle_h_nr_running = task_has_idle_policy(p);
bool was_sched_idle = sched_idle_rq(rq);
+ int rq_h_nr_running = rq->cfs.h_nr_running;
+ bool task_sleep = flags & DEQUEUE_SLEEP;
+ bool task_delayed = flags & DEQUEUE_DELAYED;
+ struct task_struct *p = NULL;
+ int idle_h_nr_running = 0;
+ int h_nr_running = 0;
+ struct cfs_rq *cfs_rq;
+ u64 slice = 0;
- util_est_dequeue(&rq->cfs, p);
+ if (entity_is_task(se)) {
+ p = task_of(se);
+ h_nr_running = 1;
+ idle_h_nr_running = task_has_idle_policy(p);
+ } else {
+ cfs_rq = group_cfs_rq(se);
+ slice = cfs_rq_min_slice(cfs_rq);
+ }
for_each_sched_entity(se) {
cfs_rq = cfs_rq_of(se);
- dequeue_entity(cfs_rq, se, flags);
- cfs_rq->h_nr_running--;
+ if (!dequeue_entity(cfs_rq, se, flags)) {
+ if (p && &p->se == se)
+ return -1;
+
+ break;
+ }
+
+ cfs_rq->h_nr_running -= h_nr_running;
cfs_rq->idle_h_nr_running -= idle_h_nr_running;
if (cfs_rq_is_idle(cfs_rq))
- idle_h_nr_running = 1;
+ idle_h_nr_running = h_nr_running;
/* end evaluation on encountering a throttled cfs_rq */
if (cfs_rq_throttled(cfs_rq))
- goto dequeue_throttle;
+ return 0;
/* Don't dequeue parent if it has other entities besides us */
if (cfs_rq->load.weight) {
+ slice = cfs_rq_min_slice(cfs_rq);
+
/* Avoid re-evaluating load for this entity: */
se = parent_entity(se);
/*
@@ -6874,6 +7133,7 @@ static void dequeue_task_fair(struct rq *rq, struct task_struct *p, int flags)
break;
}
flags |= DEQUEUE_SLEEP;
+ flags &= ~(DEQUEUE_DELAYED | DEQUEUE_SPECIAL);
}
for_each_sched_entity(se) {
@@ -6883,28 +7143,61 @@ static void dequeue_task_fair(struct rq *rq, struct task_struct *p, int flags)
se_update_runnable(se);
update_cfs_group(se);
- cfs_rq->h_nr_running--;
+ se->slice = slice;
+ slice = cfs_rq_min_slice(cfs_rq);
+
+ cfs_rq->h_nr_running -= h_nr_running;
cfs_rq->idle_h_nr_running -= idle_h_nr_running;
if (cfs_rq_is_idle(cfs_rq))
- idle_h_nr_running = 1;
+ idle_h_nr_running = h_nr_running;
/* end evaluation on encountering a throttled cfs_rq */
if (cfs_rq_throttled(cfs_rq))
- goto dequeue_throttle;
-
+ return 0;
}
- /* At this point se is NULL and we are at root level*/
- sub_nr_running(rq, 1);
+ sub_nr_running(rq, h_nr_running);
+
+ if (rq_h_nr_running && !rq->cfs.h_nr_running)
+ dl_server_stop(&rq->fair_server);
/* balance early to pull high priority tasks */
if (unlikely(!was_sched_idle && sched_idle_rq(rq)))
rq->next_balance = jiffies;
-dequeue_throttle:
- util_est_update(&rq->cfs, p, task_sleep);
+ if (p && task_delayed) {
+ SCHED_WARN_ON(!task_sleep);
+ SCHED_WARN_ON(p->on_rq != 1);
+
+ /* Fix-up what dequeue_task_fair() skipped */
+ hrtick_update(rq);
+
+ /* Fix-up what block_task() skipped. */
+ __block_task(rq, p);
+ }
+
+ return 1;
+}
+
+/*
+ * The dequeue_task method is called before nr_running is
+ * decreased. We remove the task from the rbtree and
+ * update the fair scheduling stats:
+ */
+static bool dequeue_task_fair(struct rq *rq, struct task_struct *p, int flags)
+{
+ if (!(p->se.sched_delayed && (task_on_rq_migrating(p) || (flags & DEQUEUE_SAVE))))
+ util_est_dequeue(&rq->cfs, p);
+
+ if (dequeue_entities(rq, &p->se, flags) < 0) {
+ util_est_update(&rq->cfs, p, DEQUEUE_SLEEP);
+ return false;
+ }
+
+ util_est_update(&rq->cfs, p, flags & DEQUEUE_SLEEP);
hrtick_update(rq);
+ return true;
}
#ifdef CONFIG_SMP
@@ -7803,6 +8096,105 @@ static unsigned long cpu_util_without(int cpu, struct task_struct *p)
}
/*
+ * This function computes an effective utilization for the given CPU, to be
+ * used for frequency selection given the linear relation: f = u * f_max.
+ *
+ * The scheduler tracks the following metrics:
+ *
+ * cpu_util_{cfs,rt,dl,irq}()
+ * cpu_bw_dl()
+ *
+ * Where the cfs,rt and dl util numbers are tracked with the same metric and
+ * synchronized windows and are thus directly comparable.
+ *
+ * The cfs,rt,dl utilization are the running times measured with rq->clock_task
+ * which excludes things like IRQ and steal-time. These latter are then accrued
+ * in the IRQ utilization.
+ *
+ * The DL bandwidth number OTOH is not a measured metric but a value computed
+ * based on the task model parameters and gives the minimal utilization
+ * required to meet deadlines.
+ */
+unsigned long effective_cpu_util(int cpu, unsigned long util_cfs,
+ unsigned long *min,
+ unsigned long *max)
+{
+ unsigned long util, irq, scale;
+ struct rq *rq = cpu_rq(cpu);
+
+ scale = arch_scale_cpu_capacity(cpu);
+
+ /*
+ * Early check to see if IRQ/steal time saturates the CPU, can be
+ * because of inaccuracies in how we track these -- see
+ * update_irq_load_avg().
+ */
+ irq = cpu_util_irq(rq);
+ if (unlikely(irq >= scale)) {
+ if (min)
+ *min = scale;
+ if (max)
+ *max = scale;
+ return scale;
+ }
+
+ if (min) {
+ /*
+ * The minimum utilization returns the highest level between:
+ * - the computed DL bandwidth needed with the IRQ pressure which
+ * steals time to the deadline task.
+ * - The minimum performance requirement for CFS and/or RT.
+ */
+ *min = max(irq + cpu_bw_dl(rq), uclamp_rq_get(rq, UCLAMP_MIN));
+
+ /*
+ * When an RT task is runnable and uclamp is not used, we must
+ * ensure that the task will run at maximum compute capacity.
+ */
+ if (!uclamp_is_used() && rt_rq_is_runnable(&rq->rt))
+ *min = max(*min, scale);
+ }
+
+ /*
+ * Because the time spend on RT/DL tasks is visible as 'lost' time to
+ * CFS tasks and we use the same metric to track the effective
+ * utilization (PELT windows are synchronized) we can directly add them
+ * to obtain the CPU's actual utilization.
+ */
+ util = util_cfs + cpu_util_rt(rq);
+ util += cpu_util_dl(rq);
+
+ /*
+ * The maximum hint is a soft bandwidth requirement, which can be lower
+ * than the actual utilization because of uclamp_max requirements.
+ */
+ if (max)
+ *max = min(scale, uclamp_rq_get(rq, UCLAMP_MAX));
+
+ if (util >= scale)
+ return scale;
+
+ /*
+ * There is still idle time; further improve the number by using the
+ * IRQ metric. Because IRQ/steal time is hidden from the task clock we
+ * need to scale the task numbers:
+ *
+ * max - irq
+ * U' = irq + --------- * U
+ * max
+ */
+ util = scale_irq_capacity(util, irq, scale);
+ util += irq;
+
+ return min(scale, util);
+}
+
+unsigned long sched_cpu_util(int cpu)
+{
+ return effective_cpu_util(cpu, cpu_util_cfs(cpu), NULL, NULL);
+}
+
+/*
* energy_env - Utilization landscape for energy estimation.
* @task_busy_time: Utilization contribution by the task for which we test the
* placement. Given by eenv_task_busy_time().
@@ -8286,7 +8678,21 @@ static void migrate_task_rq_fair(struct task_struct *p, int new_cpu)
static void task_dead_fair(struct task_struct *p)
{
- remove_entity_load_avg(&p->se);
+ struct sched_entity *se = &p->se;
+
+ if (se->sched_delayed) {
+ struct rq_flags rf;
+ struct rq *rq;
+
+ rq = task_rq_lock(p, &rf);
+ if (se->sched_delayed) {
+ update_rq_clock(rq);
+ dequeue_entities(rq, se, DEQUEUE_SLEEP | DEQUEUE_DELAYED);
+ }
+ task_rq_unlock(rq, p, &rf);
+ }
+
+ remove_entity_load_avg(se);
}
/*
@@ -8322,7 +8728,7 @@ static void set_cpus_allowed_fair(struct task_struct *p, struct affinity_context
static int
balance_fair(struct rq *rq, struct task_struct *prev, struct rq_flags *rf)
{
- if (rq->nr_running)
+ if (sched_fair_runnable(rq))
return 1;
return sched_balance_newidle(rq, rf) != 0;
@@ -8381,16 +8787,7 @@ static void check_preempt_wakeup_fair(struct rq *rq, struct task_struct *p, int
if (test_tsk_need_resched(curr))
return;
- /* Idle tasks are by definition preempted by non-idle tasks. */
- if (unlikely(task_has_idle_policy(curr)) &&
- likely(!task_has_idle_policy(p)))
- goto preempt;
-
- /*
- * Batch and idle tasks do not preempt non-idle tasks (their preemption
- * is driven by the tick):
- */
- if (unlikely(p->policy != SCHED_NORMAL) || !sched_feat(WAKEUP_PREEMPTION))
+ if (!sched_feat(WAKEUP_PREEMPTION))
return;
find_matching_se(&se, &pse);
@@ -8400,7 +8797,7 @@ static void check_preempt_wakeup_fair(struct rq *rq, struct task_struct *p, int
pse_is_idle = se_is_idle(pse);
/*
- * Preempt an idle group in favor of a non-idle group (and don't preempt
+ * Preempt an idle entity in favor of a non-idle entity (and don't preempt
* in the inverse case).
*/
if (cse_is_idle && !pse_is_idle)
@@ -8408,11 +8805,26 @@ static void check_preempt_wakeup_fair(struct rq *rq, struct task_struct *p, int
if (cse_is_idle != pse_is_idle)
return;
+ /*
+ * BATCH and IDLE tasks do not preempt others.
+ */
+ if (unlikely(!normal_policy(p->policy)))
+ return;
+
cfs_rq = cfs_rq_of(se);
update_curr(cfs_rq);
+ /*
+ * If @p has a shorter slice than current and @p is eligible, override
+ * current's slice protection in order to allow preemption.
+ *
+ * Note that even if @p does not turn out to be the most eligible
+ * task at this moment, current's slice protection will be lost.
+ */
+ if (do_preempt_short(cfs_rq, pse, se) && se->vlag == se->deadline)
+ se->vlag = se->deadline + 1;
/*
- * XXX pick_eevdf(cfs_rq) != se ?
+ * If @p has become the most eligible task, force preemption.
*/
if (pick_eevdf(cfs_rq) == pse)
goto preempt;
@@ -8423,7 +8835,6 @@ preempt:
resched_curr(rq);
}
-#ifdef CONFIG_SMP
static struct task_struct *pick_task_fair(struct rq *rq)
{
struct sched_entity *se;
@@ -8435,95 +8846,58 @@ again:
return NULL;
do {
- struct sched_entity *curr = cfs_rq->curr;
-
- /* When we pick for a remote RQ, we'll not have done put_prev_entity() */
- if (curr) {
- if (curr->on_rq)
- update_curr(cfs_rq);
- else
- curr = NULL;
+ /* Might not have done put_prev_entity() */
+ if (cfs_rq->curr && cfs_rq->curr->on_rq)
+ update_curr(cfs_rq);
- if (unlikely(check_cfs_rq_runtime(cfs_rq)))
- goto again;
- }
+ if (unlikely(check_cfs_rq_runtime(cfs_rq)))
+ goto again;
- se = pick_next_entity(cfs_rq);
+ se = pick_next_entity(rq, cfs_rq);
+ if (!se)
+ goto again;
cfs_rq = group_cfs_rq(se);
} while (cfs_rq);
return task_of(se);
}
-#endif
+
+static void __set_next_task_fair(struct rq *rq, struct task_struct *p, bool first);
+static void set_next_task_fair(struct rq *rq, struct task_struct *p, bool first);
struct task_struct *
pick_next_task_fair(struct rq *rq, struct task_struct *prev, struct rq_flags *rf)
{
- struct cfs_rq *cfs_rq = &rq->cfs;
struct sched_entity *se;
struct task_struct *p;
int new_tasks;
again:
- if (!sched_fair_runnable(rq))
+ p = pick_task_fair(rq);
+ if (!p)
goto idle;
+ se = &p->se;
#ifdef CONFIG_FAIR_GROUP_SCHED
- if (!prev || prev->sched_class != &fair_sched_class)
+ if (prev->sched_class != &fair_sched_class)
goto simple;
+ __put_prev_set_next_dl_server(rq, prev, p);
+
/*
* Because of the set_next_buddy() in dequeue_task_fair() it is rather
* likely that a next task is from the same cgroup as the current.
*
* Therefore attempt to avoid putting and setting the entire cgroup
* hierarchy, only change the part that actually changes.
- */
-
- do {
- struct sched_entity *curr = cfs_rq->curr;
-
- /*
- * Since we got here without doing put_prev_entity() we also
- * have to consider cfs_rq->curr. If it is still a runnable
- * entity, update_curr() will update its vruntime, otherwise
- * forget we've ever seen it.
- */
- if (curr) {
- if (curr->on_rq)
- update_curr(cfs_rq);
- else
- curr = NULL;
-
- /*
- * This call to check_cfs_rq_runtime() will do the
- * throttle and dequeue its entity in the parent(s).
- * Therefore the nr_running test will indeed
- * be correct.
- */
- if (unlikely(check_cfs_rq_runtime(cfs_rq))) {
- cfs_rq = &rq->cfs;
-
- if (!cfs_rq->nr_running)
- goto idle;
-
- goto simple;
- }
- }
-
- se = pick_next_entity(cfs_rq);
- cfs_rq = group_cfs_rq(se);
- } while (cfs_rq);
-
- p = task_of(se);
-
- /*
+ *
* Since we haven't yet done put_prev_entity and if the selected task
* is a different task than we started out with, try and touch the
* least amount of cfs_rqs.
*/
if (prev != p) {
struct sched_entity *pse = &prev->se;
+ struct cfs_rq *cfs_rq;
while (!(cfs_rq = is_same_group(se, pse))) {
int se_depth = se->depth;
@@ -8541,38 +8915,15 @@ again:
put_prev_entity(cfs_rq, pse);
set_next_entity(cfs_rq, se);
- }
-
- goto done;
-simple:
-#endif
- if (prev)
- put_prev_task(rq, prev);
- do {
- se = pick_next_entity(cfs_rq);
- set_next_entity(cfs_rq, se);
- cfs_rq = group_cfs_rq(se);
- } while (cfs_rq);
+ __set_next_task_fair(rq, p, true);
+ }
- p = task_of(se);
+ return p;
-done: __maybe_unused;
-#ifdef CONFIG_SMP
- /*
- * Move the next running task to the front of
- * the list, so our cfs_tasks list becomes MRU
- * one.
- */
- list_move(&p->se.group_node, &rq->cfs_tasks);
+simple:
#endif
-
- if (hrtick_enabled_fair(rq))
- hrtick_start_fair(rq, p);
-
- update_misfit_status(p, rq);
- sched_fair_update_stop_tick(rq, p);
-
+ put_prev_set_next_task(rq, prev, p);
return p;
idle:
@@ -8601,15 +8952,34 @@ idle:
return NULL;
}
-static struct task_struct *__pick_next_task_fair(struct rq *rq)
+static struct task_struct *__pick_next_task_fair(struct rq *rq, struct task_struct *prev)
+{
+ return pick_next_task_fair(rq, prev, NULL);
+}
+
+static bool fair_server_has_tasks(struct sched_dl_entity *dl_se)
{
- return pick_next_task_fair(rq, NULL, NULL);
+ return !!dl_se->rq->cfs.nr_running;
+}
+
+static struct task_struct *fair_server_pick_task(struct sched_dl_entity *dl_se)
+{
+ return pick_task_fair(dl_se->rq);
+}
+
+void fair_server_init(struct rq *rq)
+{
+ struct sched_dl_entity *dl_se = &rq->fair_server;
+
+ init_dl_entity(dl_se);
+
+ dl_server_init(dl_se, rq, fair_server_has_tasks, fair_server_pick_task);
}
/*
* Account for a descheduled task:
*/
-static void put_prev_task_fair(struct rq *rq, struct task_struct *prev)
+static void put_prev_task_fair(struct rq *rq, struct task_struct *prev, struct task_struct *next)
{
struct sched_entity *se = &prev->se;
struct cfs_rq *cfs_rq;
@@ -9347,28 +9717,18 @@ static inline void update_blocked_load_status(struct rq *rq, bool has_blocked) {
static bool __update_blocked_others(struct rq *rq, bool *done)
{
- const struct sched_class *curr_class;
- u64 now = rq_clock_pelt(rq);
- unsigned long hw_pressure;
- bool decayed;
+ bool updated;
/*
* update_load_avg() can call cpufreq_update_util(). Make sure that RT,
* DL and IRQ signals have been updated before updating CFS.
*/
- curr_class = rq->curr->sched_class;
-
- hw_pressure = arch_scale_hw_pressure(cpu_of(rq));
-
- decayed = update_rt_rq_load_avg(now, rq, curr_class == &rt_sched_class) |
- update_dl_rq_load_avg(now, rq, curr_class == &dl_sched_class) |
- update_hw_load_avg(now, rq, hw_pressure) |
- update_irq_load_avg(rq, 0);
+ updated = update_other_load_avgs(rq);
if (others_have_blocked(rq))
*done = false;
- return decayed;
+ return updated;
}
#ifdef CONFIG_FAIR_GROUP_SCHED
@@ -12702,22 +13062,7 @@ static void task_tick_fair(struct rq *rq, struct task_struct *curr, int queued)
*/
static void task_fork_fair(struct task_struct *p)
{
- struct sched_entity *se = &p->se, *curr;
- struct cfs_rq *cfs_rq;
- struct rq *rq = this_rq();
- struct rq_flags rf;
-
- rq_lock(rq, &rf);
- update_rq_clock(rq);
-
set_task_max_allowed_capacity(p);
-
- cfs_rq = task_cfs_rq(current);
- curr = cfs_rq->curr;
- if (curr)
- update_curr(cfs_rq);
- place_entity(cfs_rq, se, ENQUEUE_INITIAL);
- rq_unlock(rq, &rf);
}
/*
@@ -12829,10 +13174,28 @@ static void attach_task_cfs_rq(struct task_struct *p)
static void switched_from_fair(struct rq *rq, struct task_struct *p)
{
detach_task_cfs_rq(p);
+ /*
+ * Since this is called after changing class, this is a little weird
+ * and we cannot use DEQUEUE_DELAYED.
+ */
+ if (p->se.sched_delayed) {
+ /* First, dequeue it from its new class' structures */
+ dequeue_task(rq, p, DEQUEUE_NOCLOCK | DEQUEUE_SLEEP);
+ /*
+ * Now, clean up the fair_sched_class side of things
+ * related to sched_delayed being true and that wasn't done
+ * due to the generic dequeue not using DEQUEUE_DELAYED.
+ */
+ finish_delayed_dequeue_entity(&p->se);
+ p->se.rel_deadline = 0;
+ __block_task(rq, p);
+ }
}
static void switched_to_fair(struct rq *rq, struct task_struct *p)
{
+ SCHED_WARN_ON(p->se.sched_delayed);
+
attach_task_cfs_rq(p);
set_task_max_allowed_capacity(p);
@@ -12850,12 +13213,7 @@ static void switched_to_fair(struct rq *rq, struct task_struct *p)
}
}
-/* Account for a task changing its policy or group.
- *
- * This routine is mostly called to set cfs_rq->curr field when a task
- * migrates between groups/classes.
- */
-static void set_next_task_fair(struct rq *rq, struct task_struct *p, bool first)
+static void __set_next_task_fair(struct rq *rq, struct task_struct *p, bool first)
{
struct sched_entity *se = &p->se;
@@ -12868,6 +13226,27 @@ static void set_next_task_fair(struct rq *rq, struct task_struct *p, bool first)
list_move(&se->group_node, &rq->cfs_tasks);
}
#endif
+ if (!first)
+ return;
+
+ SCHED_WARN_ON(se->sched_delayed);
+
+ if (hrtick_enabled_fair(rq))
+ hrtick_start_fair(rq, p);
+
+ update_misfit_status(p, rq);
+ sched_fair_update_stop_tick(rq, p);
+}
+
+/*
+ * Account for a task changing its policy or group.
+ *
+ * This routine is mostly called to set cfs_rq->curr field when a task
+ * migrates between groups/classes.
+ */
+static void set_next_task_fair(struct rq *rq, struct task_struct *p, bool first)
+{
+ struct sched_entity *se = &p->se;
for_each_sched_entity(se) {
struct cfs_rq *cfs_rq = cfs_rq_of(se);
@@ -12876,12 +13255,14 @@ static void set_next_task_fair(struct rq *rq, struct task_struct *p, bool first)
/* ensure bandwidth has been allocated on our new cfs_rq */
account_cfs_rq_runtime(cfs_rq, 0);
}
+
+ __set_next_task_fair(rq, p, first);
}
void init_cfs_rq(struct cfs_rq *cfs_rq)
{
cfs_rq->tasks_timeline = RB_ROOT_CACHED;
- u64_u32_store(cfs_rq->min_vruntime, (u64)(-(1LL << 20)));
+ cfs_rq->min_vruntime = (u64)(-(1LL << 20));
#ifdef CONFIG_SMP
raw_spin_lock_init(&cfs_rq->removed.lock);
#endif
@@ -12983,28 +13364,35 @@ void online_fair_sched_group(struct task_group *tg)
void unregister_fair_sched_group(struct task_group *tg)
{
- unsigned long flags;
- struct rq *rq;
int cpu;
destroy_cfs_bandwidth(tg_cfs_bandwidth(tg));
for_each_possible_cpu(cpu) {
- if (tg->se[cpu])
- remove_entity_load_avg(tg->se[cpu]);
+ struct cfs_rq *cfs_rq = tg->cfs_rq[cpu];
+ struct sched_entity *se = tg->se[cpu];
+ struct rq *rq = cpu_rq(cpu);
+
+ if (se) {
+ if (se->sched_delayed) {
+ guard(rq_lock_irqsave)(rq);
+ if (se->sched_delayed) {
+ update_rq_clock(rq);
+ dequeue_entities(rq, se, DEQUEUE_SLEEP | DEQUEUE_DELAYED);
+ }
+ list_del_leaf_cfs_rq(cfs_rq);
+ }
+ remove_entity_load_avg(se);
+ }
/*
* Only empty task groups can be destroyed; so we can speculatively
* check on_list without danger of it being re-added.
*/
- if (!tg->cfs_rq[cpu]->on_list)
- continue;
-
- rq = cpu_rq(cpu);
-
- raw_spin_rq_lock_irqsave(rq, flags);
- list_del_leaf_cfs_rq(tg->cfs_rq[cpu]);
- raw_spin_rq_unlock_irqrestore(rq, flags);
+ if (cfs_rq->on_list) {
+ guard(rq_lock_irqsave)(rq);
+ list_del_leaf_cfs_rq(cfs_rq);
+ }
}
}
@@ -13194,13 +13582,13 @@ DEFINE_SCHED_CLASS(fair) = {
.wakeup_preempt = check_preempt_wakeup_fair,
+ .pick_task = pick_task_fair,
.pick_next_task = __pick_next_task_fair,
.put_prev_task = put_prev_task_fair,
.set_next_task = set_next_task_fair,
#ifdef CONFIG_SMP
.balance = balance_fair,
- .pick_task = pick_task_fair,
.select_task_rq = select_task_rq_fair,
.migrate_task_rq = migrate_task_rq_fair,
@@ -13214,6 +13602,7 @@ DEFINE_SCHED_CLASS(fair) = {
.task_tick = task_tick_fair,
.task_fork = task_fork_fair,
+ .reweight_task = reweight_task_fair,
.prio_changed = prio_changed_fair,
.switched_from = switched_from_fair,
.switched_to = switched_to_fair,
diff --git a/kernel/sched/features.h b/kernel/sched/features.h
index 143f55df890b..290874079f60 100644
--- a/kernel/sched/features.h
+++ b/kernel/sched/features.h
@@ -5,8 +5,24 @@
* sleep+wake cycles. EEVDF placement strategy #1, #2 if disabled.
*/
SCHED_FEAT(PLACE_LAG, true)
+/*
+ * Give new tasks half a slice to ease into the competition.
+ */
SCHED_FEAT(PLACE_DEADLINE_INITIAL, true)
+/*
+ * Preserve relative virtual deadline on 'migration'.
+ */
+SCHED_FEAT(PLACE_REL_DEADLINE, true)
+/*
+ * Inhibit (wakeup) preemption until the current task has either matched the
+ * 0-lag point or until is has exhausted it's slice.
+ */
SCHED_FEAT(RUN_TO_PARITY, true)
+/*
+ * Allow wakeup of tasks with a shorter slice to cancel RESPECT_SLICE for
+ * current.
+ */
+SCHED_FEAT(PREEMPT_SHORT, true)
/*
* Prefer to schedule the task we woke last (assuming it failed
@@ -22,6 +38,18 @@ SCHED_FEAT(NEXT_BUDDY, false)
SCHED_FEAT(CACHE_HOT_BUDDY, true)
/*
+ * Delay dequeueing tasks until they get selected or woken.
+ *
+ * By delaying the dequeue for non-eligible tasks, they remain in the
+ * competition and can burn off their negative lag. When they get selected
+ * they'll have positive lag by definition.
+ *
+ * DELAY_ZERO clips the lag on dequeue (or wakeup) to 0.
+ */
+SCHED_FEAT(DELAY_DEQUEUE, true)
+SCHED_FEAT(DELAY_ZERO, true)
+
+/*
* Allow wakeup-time preemption of the current task:
*/
SCHED_FEAT(WAKEUP_PREEMPTION, true)
@@ -85,5 +113,3 @@ SCHED_FEAT(WA_BIAS, true)
SCHED_FEAT(UTIL_EST, true)
SCHED_FEAT(LATENCY_WARN, false)
-
-SCHED_FEAT(HZ_BW, true)
diff --git a/kernel/sched/idle.c b/kernel/sched/idle.c
index 6e78d071beb5..d2f096bb274c 100644
--- a/kernel/sched/idle.c
+++ b/kernel/sched/idle.c
@@ -450,43 +450,37 @@ static void wakeup_preempt_idle(struct rq *rq, struct task_struct *p, int flags)
resched_curr(rq);
}
-static void put_prev_task_idle(struct rq *rq, struct task_struct *prev)
+static void put_prev_task_idle(struct rq *rq, struct task_struct *prev, struct task_struct *next)
{
+ dl_server_update_idle_time(rq, prev);
+ scx_update_idle(rq, false);
}
static void set_next_task_idle(struct rq *rq, struct task_struct *next, bool first)
{
update_idle_core(rq);
+ scx_update_idle(rq, true);
schedstat_inc(rq->sched_goidle);
+ next->se.exec_start = rq_clock_task(rq);
}
-#ifdef CONFIG_SMP
-static struct task_struct *pick_task_idle(struct rq *rq)
+struct task_struct *pick_task_idle(struct rq *rq)
{
return rq->idle;
}
-#endif
-
-struct task_struct *pick_next_task_idle(struct rq *rq)
-{
- struct task_struct *next = rq->idle;
-
- set_next_task_idle(rq, next, true);
-
- return next;
-}
/*
* It is not legal to sleep in the idle task - print a warning
* message if some code attempts to do it:
*/
-static void
+static bool
dequeue_task_idle(struct rq *rq, struct task_struct *p, int flags)
{
raw_spin_rq_unlock_irq(rq);
printk(KERN_ERR "bad: scheduling from the idle thread!\n");
dump_stack();
raw_spin_rq_lock_irq(rq);
+ return true;
}
/*
@@ -528,13 +522,12 @@ DEFINE_SCHED_CLASS(idle) = {
.wakeup_preempt = wakeup_preempt_idle,
- .pick_next_task = pick_next_task_idle,
+ .pick_task = pick_task_idle,
.put_prev_task = put_prev_task_idle,
.set_next_task = set_next_task_idle,
#ifdef CONFIG_SMP
.balance = balance_idle,
- .pick_task = pick_task_idle,
.select_task_rq = select_task_rq_idle,
.set_cpus_allowed = set_cpus_allowed_common,
#endif
diff --git a/kernel/sched/pelt.c b/kernel/sched/pelt.c
index fa52906a4478..a9c65d97b3ca 100644
--- a/kernel/sched/pelt.c
+++ b/kernel/sched/pelt.c
@@ -467,3 +467,23 @@ int update_irq_load_avg(struct rq *rq, u64 running)
return ret;
}
#endif
+
+/*
+ * Load avg and utiliztion metrics need to be updated periodically and before
+ * consumption. This function updates the metrics for all subsystems except for
+ * the fair class. @rq must be locked and have its clock updated.
+ */
+bool update_other_load_avgs(struct rq *rq)
+{
+ u64 now = rq_clock_pelt(rq);
+ const struct sched_class *curr_class = rq->curr->sched_class;
+ unsigned long hw_pressure = arch_scale_hw_pressure(cpu_of(rq));
+
+ lockdep_assert_rq_held(rq);
+
+ /* hw_pressure doesn't care about invariance */
+ return update_rt_rq_load_avg(now, rq, curr_class == &rt_sched_class) |
+ update_dl_rq_load_avg(now, rq, curr_class == &dl_sched_class) |
+ update_hw_load_avg(rq_clock_task(rq), rq, hw_pressure) |
+ update_irq_load_avg(rq, 0);
+}
diff --git a/kernel/sched/pelt.h b/kernel/sched/pelt.h
index 2150062949d4..f4f6a0875c66 100644
--- a/kernel/sched/pelt.h
+++ b/kernel/sched/pelt.h
@@ -6,6 +6,7 @@ int __update_load_avg_se(u64 now, struct cfs_rq *cfs_rq, struct sched_entity *se
int __update_load_avg_cfs_rq(u64 now, struct cfs_rq *cfs_rq);
int update_rt_rq_load_avg(u64 now, struct rq *rq, int running);
int update_dl_rq_load_avg(u64 now, struct rq *rq, int running);
+bool update_other_load_avgs(struct rq *rq);
#ifdef CONFIG_SCHED_HW_PRESSURE
int update_hw_load_avg(u64 now, struct rq *rq, u64 capacity);
diff --git a/kernel/sched/rt.c b/kernel/sched/rt.c
index 310523c1b9e3..172c588de542 100644
--- a/kernel/sched/rt.c
+++ b/kernel/sched/rt.c
@@ -8,10 +8,6 @@ int sched_rr_timeslice = RR_TIMESLICE;
/* More than 4 hours if BW_SHIFT equals 20. */
static const u64 max_rt_runtime = MAX_BW;
-static int do_sched_rt_period_timer(struct rt_bandwidth *rt_b, int overrun);
-
-struct rt_bandwidth def_rt_bandwidth;
-
/*
* period over which we measure -rt task CPU usage in us.
* default: 1s
@@ -66,6 +62,40 @@ static int __init sched_rt_sysctl_init(void)
late_initcall(sched_rt_sysctl_init);
#endif
+void init_rt_rq(struct rt_rq *rt_rq)
+{
+ struct rt_prio_array *array;
+ int i;
+
+ array = &rt_rq->active;
+ for (i = 0; i < MAX_RT_PRIO; i++) {
+ INIT_LIST_HEAD(array->queue + i);
+ __clear_bit(i, array->bitmap);
+ }
+ /* delimiter for bitsearch: */
+ __set_bit(MAX_RT_PRIO, array->bitmap);
+
+#if defined CONFIG_SMP
+ rt_rq->highest_prio.curr = MAX_RT_PRIO-1;
+ rt_rq->highest_prio.next = MAX_RT_PRIO-1;
+ rt_rq->overloaded = 0;
+ plist_head_init(&rt_rq->pushable_tasks);
+#endif /* CONFIG_SMP */
+ /* We start is dequeued state, because no RT tasks are queued */
+ rt_rq->rt_queued = 0;
+
+#ifdef CONFIG_RT_GROUP_SCHED
+ rt_rq->rt_time = 0;
+ rt_rq->rt_throttled = 0;
+ rt_rq->rt_runtime = 0;
+ raw_spin_lock_init(&rt_rq->rt_runtime_lock);
+#endif
+}
+
+#ifdef CONFIG_RT_GROUP_SCHED
+
+static int do_sched_rt_period_timer(struct rt_bandwidth *rt_b, int overrun);
+
static enum hrtimer_restart sched_rt_period_timer(struct hrtimer *timer)
{
struct rt_bandwidth *rt_b =
@@ -130,35 +160,6 @@ static void start_rt_bandwidth(struct rt_bandwidth *rt_b)
do_start_rt_bandwidth(rt_b);
}
-void init_rt_rq(struct rt_rq *rt_rq)
-{
- struct rt_prio_array *array;
- int i;
-
- array = &rt_rq->active;
- for (i = 0; i < MAX_RT_PRIO; i++) {
- INIT_LIST_HEAD(array->queue + i);
- __clear_bit(i, array->bitmap);
- }
- /* delimiter for bit-search: */
- __set_bit(MAX_RT_PRIO, array->bitmap);
-
-#if defined CONFIG_SMP
- rt_rq->highest_prio.curr = MAX_RT_PRIO-1;
- rt_rq->highest_prio.next = MAX_RT_PRIO-1;
- rt_rq->overloaded = 0;
- plist_head_init(&rt_rq->pushable_tasks);
-#endif /* CONFIG_SMP */
- /* We start is dequeued state, because no RT tasks are queued */
- rt_rq->rt_queued = 0;
-
- rt_rq->rt_time = 0;
- rt_rq->rt_throttled = 0;
- rt_rq->rt_runtime = 0;
- raw_spin_lock_init(&rt_rq->rt_runtime_lock);
-}
-
-#ifdef CONFIG_RT_GROUP_SCHED
static void destroy_rt_bandwidth(struct rt_bandwidth *rt_b)
{
hrtimer_cancel(&rt_b->rt_period_timer);
@@ -195,7 +196,6 @@ void unregister_rt_sched_group(struct task_group *tg)
{
if (tg->rt_se)
destroy_rt_bandwidth(&tg->rt_bandwidth);
-
}
void free_rt_sched_group(struct task_group *tg)
@@ -253,8 +253,7 @@ int alloc_rt_sched_group(struct task_group *tg, struct task_group *parent)
if (!tg->rt_se)
goto err;
- init_rt_bandwidth(&tg->rt_bandwidth,
- ktime_to_ns(def_rt_bandwidth.rt_period), 0);
+ init_rt_bandwidth(&tg->rt_bandwidth, ktime_to_ns(global_rt_period()), 0);
for_each_possible_cpu(i) {
rt_rq = kzalloc_node(sizeof(struct rt_rq),
@@ -604,70 +603,6 @@ static inline struct rt_bandwidth *sched_rt_bandwidth(struct rt_rq *rt_rq)
return &rt_rq->tg->rt_bandwidth;
}
-#else /* !CONFIG_RT_GROUP_SCHED */
-
-static inline u64 sched_rt_runtime(struct rt_rq *rt_rq)
-{
- return rt_rq->rt_runtime;
-}
-
-static inline u64 sched_rt_period(struct rt_rq *rt_rq)
-{
- return ktime_to_ns(def_rt_bandwidth.rt_period);
-}
-
-typedef struct rt_rq *rt_rq_iter_t;
-
-#define for_each_rt_rq(rt_rq, iter, rq) \
- for ((void) iter, rt_rq = &rq->rt; rt_rq; rt_rq = NULL)
-
-#define for_each_sched_rt_entity(rt_se) \
- for (; rt_se; rt_se = NULL)
-
-static inline struct rt_rq *group_rt_rq(struct sched_rt_entity *rt_se)
-{
- return NULL;
-}
-
-static inline void sched_rt_rq_enqueue(struct rt_rq *rt_rq)
-{
- struct rq *rq = rq_of_rt_rq(rt_rq);
-
- if (!rt_rq->rt_nr_running)
- return;
-
- enqueue_top_rt_rq(rt_rq);
- resched_curr(rq);
-}
-
-static inline void sched_rt_rq_dequeue(struct rt_rq *rt_rq)
-{
- dequeue_top_rt_rq(rt_rq, rt_rq->rt_nr_running);
-}
-
-static inline int rt_rq_throttled(struct rt_rq *rt_rq)
-{
- return rt_rq->rt_throttled;
-}
-
-static inline const struct cpumask *sched_rt_period_mask(void)
-{
- return cpu_online_mask;
-}
-
-static inline
-struct rt_rq *sched_rt_period_rt_rq(struct rt_bandwidth *rt_b, int cpu)
-{
- return &cpu_rq(cpu)->rt;
-}
-
-static inline struct rt_bandwidth *sched_rt_bandwidth(struct rt_rq *rt_rq)
-{
- return &def_rt_bandwidth;
-}
-
-#endif /* CONFIG_RT_GROUP_SCHED */
-
bool sched_rt_bandwidth_account(struct rt_rq *rt_rq)
{
struct rt_bandwidth *rt_b = sched_rt_bandwidth(rt_rq);
@@ -859,7 +794,7 @@ static int do_sched_rt_period_timer(struct rt_bandwidth *rt_b, int overrun)
const struct cpumask *span;
span = sched_rt_period_mask();
-#ifdef CONFIG_RT_GROUP_SCHED
+
/*
* FIXME: isolated CPUs should really leave the root task group,
* whether they are isolcpus or were isolated via cpusets, lest
@@ -871,7 +806,7 @@ static int do_sched_rt_period_timer(struct rt_bandwidth *rt_b, int overrun)
*/
if (rt_b == &root_task_group.rt_bandwidth)
span = cpu_online_mask;
-#endif
+
for_each_cpu(i, span) {
int enqueue = 0;
struct rt_rq *rt_rq = sched_rt_period_rt_rq(rt_b, i);
@@ -938,18 +873,6 @@ static int do_sched_rt_period_timer(struct rt_bandwidth *rt_b, int overrun)
return idle;
}
-static inline int rt_se_prio(struct sched_rt_entity *rt_se)
-{
-#ifdef CONFIG_RT_GROUP_SCHED
- struct rt_rq *rt_rq = group_rt_rq(rt_se);
-
- if (rt_rq)
- return rt_rq->highest_prio.curr;
-#endif
-
- return rt_task_of(rt_se)->prio;
-}
-
static int sched_rt_runtime_exceeded(struct rt_rq *rt_rq)
{
u64 runtime = sched_rt_runtime(rt_rq);
@@ -993,6 +916,72 @@ static int sched_rt_runtime_exceeded(struct rt_rq *rt_rq)
return 0;
}
+#else /* !CONFIG_RT_GROUP_SCHED */
+
+typedef struct rt_rq *rt_rq_iter_t;
+
+#define for_each_rt_rq(rt_rq, iter, rq) \
+ for ((void) iter, rt_rq = &rq->rt; rt_rq; rt_rq = NULL)
+
+#define for_each_sched_rt_entity(rt_se) \
+ for (; rt_se; rt_se = NULL)
+
+static inline struct rt_rq *group_rt_rq(struct sched_rt_entity *rt_se)
+{
+ return NULL;
+}
+
+static inline void sched_rt_rq_enqueue(struct rt_rq *rt_rq)
+{
+ struct rq *rq = rq_of_rt_rq(rt_rq);
+
+ if (!rt_rq->rt_nr_running)
+ return;
+
+ enqueue_top_rt_rq(rt_rq);
+ resched_curr(rq);
+}
+
+static inline void sched_rt_rq_dequeue(struct rt_rq *rt_rq)
+{
+ dequeue_top_rt_rq(rt_rq, rt_rq->rt_nr_running);
+}
+
+static inline int rt_rq_throttled(struct rt_rq *rt_rq)
+{
+ return false;
+}
+
+static inline const struct cpumask *sched_rt_period_mask(void)
+{
+ return cpu_online_mask;
+}
+
+static inline
+struct rt_rq *sched_rt_period_rt_rq(struct rt_bandwidth *rt_b, int cpu)
+{
+ return &cpu_rq(cpu)->rt;
+}
+
+#ifdef CONFIG_SMP
+static void __enable_runtime(struct rq *rq) { }
+static void __disable_runtime(struct rq *rq) { }
+#endif
+
+#endif /* CONFIG_RT_GROUP_SCHED */
+
+static inline int rt_se_prio(struct sched_rt_entity *rt_se)
+{
+#ifdef CONFIG_RT_GROUP_SCHED
+ struct rt_rq *rt_rq = group_rt_rq(rt_se);
+
+ if (rt_rq)
+ return rt_rq->highest_prio.curr;
+#endif
+
+ return rt_task_of(rt_se)->prio;
+}
+
/*
* Update the current task's runtime statistics. Skip current tasks that
* are not in our scheduling class.
@@ -1000,7 +989,6 @@ static int sched_rt_runtime_exceeded(struct rt_rq *rt_rq)
static void update_curr_rt(struct rq *rq)
{
struct task_struct *curr = rq->curr;
- struct sched_rt_entity *rt_se = &curr->rt;
s64 delta_exec;
if (curr->sched_class != &rt_sched_class)
@@ -1010,6 +998,9 @@ static void update_curr_rt(struct rq *rq)
if (unlikely(delta_exec <= 0))
return;
+#ifdef CONFIG_RT_GROUP_SCHED
+ struct sched_rt_entity *rt_se = &curr->rt;
+
if (!rt_bandwidth_enabled())
return;
@@ -1028,6 +1019,7 @@ static void update_curr_rt(struct rq *rq)
do_start_rt_bandwidth(sched_rt_bandwidth(rt_rq));
}
}
+#endif
}
static void
@@ -1184,7 +1176,6 @@ dec_rt_group(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq)
static void
inc_rt_group(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq)
{
- start_rt_bandwidth(&def_rt_bandwidth);
}
static inline
@@ -1492,7 +1483,7 @@ enqueue_task_rt(struct rq *rq, struct task_struct *p, int flags)
enqueue_pushable_task(rq, p);
}
-static void dequeue_task_rt(struct rq *rq, struct task_struct *p, int flags)
+static bool dequeue_task_rt(struct rq *rq, struct task_struct *p, int flags)
{
struct sched_rt_entity *rt_se = &p->rt;
@@ -1500,6 +1491,8 @@ static void dequeue_task_rt(struct rq *rq, struct task_struct *p, int flags)
dequeue_rt_entity(rt_se, flags);
dequeue_pushable_task(rq, p);
+
+ return true;
}
/*
@@ -1755,17 +1748,7 @@ static struct task_struct *pick_task_rt(struct rq *rq)
return p;
}
-static struct task_struct *pick_next_task_rt(struct rq *rq)
-{
- struct task_struct *p = pick_task_rt(rq);
-
- if (p)
- set_next_task_rt(rq, p, true);
-
- return p;
-}
-
-static void put_prev_task_rt(struct rq *rq, struct task_struct *p)
+static void put_prev_task_rt(struct rq *rq, struct task_struct *p, struct task_struct *next)
{
struct sched_rt_entity *rt_se = &p->rt;
struct rt_rq *rt_rq = &rq->rt;
@@ -2652,13 +2635,12 @@ DEFINE_SCHED_CLASS(rt) = {
.wakeup_preempt = wakeup_preempt_rt,
- .pick_next_task = pick_next_task_rt,
+ .pick_task = pick_task_rt,
.put_prev_task = put_prev_task_rt,
.set_next_task = set_next_task_rt,
#ifdef CONFIG_SMP
.balance = balance_rt,
- .pick_task = pick_task_rt,
.select_task_rq = select_task_rq_rt,
.set_cpus_allowed = set_cpus_allowed_common,
.rq_online = rq_online_rt,
@@ -2912,19 +2894,6 @@ int sched_rt_can_attach(struct task_group *tg, struct task_struct *tsk)
#ifdef CONFIG_SYSCTL
static int sched_rt_global_constraints(void)
{
- unsigned long flags;
- int i;
-
- raw_spin_lock_irqsave(&def_rt_bandwidth.rt_runtime_lock, flags);
- for_each_possible_cpu(i) {
- struct rt_rq *rt_rq = &cpu_rq(i)->rt;
-
- raw_spin_lock(&rt_rq->rt_runtime_lock);
- rt_rq->rt_runtime = global_rt_runtime();
- raw_spin_unlock(&rt_rq->rt_runtime_lock);
- }
- raw_spin_unlock_irqrestore(&def_rt_bandwidth.rt_runtime_lock, flags);
-
return 0;
}
#endif /* CONFIG_SYSCTL */
@@ -2944,12 +2913,6 @@ static int sched_rt_global_validate(void)
static void sched_rt_do_global(void)
{
- unsigned long flags;
-
- raw_spin_lock_irqsave(&def_rt_bandwidth.rt_runtime_lock, flags);
- def_rt_bandwidth.rt_runtime = global_rt_runtime();
- def_rt_bandwidth.rt_period = ns_to_ktime(global_rt_period());
- raw_spin_unlock_irqrestore(&def_rt_bandwidth.rt_runtime_lock, flags);
}
static int sched_rt_handler(const struct ctl_table *table, int write, void *buffer,
diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h
index 4c36cc680361..b1c3588a8f00 100644
--- a/kernel/sched/sched.h
+++ b/kernel/sched/sched.h
@@ -68,6 +68,7 @@
#include <linux/wait_api.h>
#include <linux/wait_bit.h>
#include <linux/workqueue_api.h>
+#include <linux/delayacct.h>
#include <trace/events/power.h>
#include <trace/events/sched.h>
@@ -192,9 +193,18 @@ static inline int idle_policy(int policy)
return policy == SCHED_IDLE;
}
+static inline int normal_policy(int policy)
+{
+#ifdef CONFIG_SCHED_CLASS_EXT
+ if (policy == SCHED_EXT)
+ return true;
+#endif
+ return policy == SCHED_NORMAL;
+}
+
static inline int fair_policy(int policy)
{
- return policy == SCHED_NORMAL || policy == SCHED_BATCH;
+ return normal_policy(policy) || policy == SCHED_BATCH;
}
static inline int rt_policy(int policy)
@@ -245,6 +255,24 @@ static inline void update_avg(u64 *avg, u64 sample)
(val >> min_t(typeof(shift), shift, BITS_PER_TYPE(typeof(val)) - 1))
/*
+ * cgroup weight knobs should use the common MIN, DFL and MAX values which are
+ * 1, 100 and 10000 respectively. While it loses a bit of range on both ends, it
+ * maps pretty well onto the shares value used by scheduler and the round-trip
+ * conversions preserve the original value over the entire range.
+ */
+static inline unsigned long sched_weight_from_cgroup(unsigned long cgrp_weight)
+{
+ return DIV_ROUND_CLOSEST_ULL(cgrp_weight * 1024, CGROUP_WEIGHT_DFL);
+}
+
+static inline unsigned long sched_weight_to_cgroup(unsigned long weight)
+{
+ return clamp_t(unsigned long,
+ DIV_ROUND_CLOSEST_ULL(weight * CGROUP_WEIGHT_DFL, 1024),
+ CGROUP_WEIGHT_MIN, CGROUP_WEIGHT_MAX);
+}
+
+/*
* !! For sched_setattr_nocheck() (kernel) only !!
*
* This is actually gross. :(
@@ -335,7 +363,7 @@ extern bool __checkparam_dl(const struct sched_attr *attr);
extern bool dl_param_changed(struct task_struct *p, const struct sched_attr *attr);
extern int dl_cpuset_cpumask_can_shrink(const struct cpumask *cur, const struct cpumask *trial);
extern int dl_bw_check_overflow(int cpu);
-
+extern s64 dl_scaled_delta_exec(struct rq *rq, struct sched_dl_entity *dl_se, s64 delta_exec);
/*
* SCHED_DEADLINE supports servers (nested scheduling) with the following
* interface:
@@ -361,7 +389,14 @@ extern void dl_server_start(struct sched_dl_entity *dl_se);
extern void dl_server_stop(struct sched_dl_entity *dl_se);
extern void dl_server_init(struct sched_dl_entity *dl_se, struct rq *rq,
dl_server_has_tasks_f has_tasks,
- dl_server_pick_f pick);
+ dl_server_pick_f pick_task);
+
+extern void dl_server_update_idle_time(struct rq *rq,
+ struct task_struct *p);
+extern void fair_server_init(struct rq *rq);
+extern void __dl_server_attach_root(struct sched_dl_entity *dl_se, struct rq *rq);
+extern int dl_server_apply_params(struct sched_dl_entity *dl_se,
+ u64 runtime, u64 period, bool init);
#ifdef CONFIG_CGROUP_SCHED
@@ -397,16 +432,17 @@ struct cfs_bandwidth {
struct task_group {
struct cgroup_subsys_state css;
+#ifdef CONFIG_GROUP_SCHED_WEIGHT
+ /* A positive value indicates that this is a SCHED_IDLE group. */
+ int idle;
+#endif
+
#ifdef CONFIG_FAIR_GROUP_SCHED
/* schedulable entities of this group on each CPU */
struct sched_entity **se;
/* runqueue "owned" by this group on each CPU */
struct cfs_rq **cfs_rq;
unsigned long shares;
-
- /* A positive value indicates that this is a SCHED_IDLE group. */
- int idle;
-
#ifdef CONFIG_SMP
/*
* load_avg can be heavily contended at clock tick time, so put
@@ -424,6 +460,11 @@ struct task_group {
struct rt_bandwidth rt_bandwidth;
#endif
+#ifdef CONFIG_EXT_GROUP_SCHED
+ u32 scx_flags; /* SCX_TG_* */
+ u32 scx_weight;
+#endif
+
struct rcu_head rcu;
struct list_head list;
@@ -448,7 +489,7 @@ struct task_group {
};
-#ifdef CONFIG_FAIR_GROUP_SCHED
+#ifdef CONFIG_GROUP_SCHED_WEIGHT
#define ROOT_TASK_GROUP_LOAD NICE_0_LOAD
/*
@@ -479,6 +520,11 @@ static inline int walk_tg_tree(tg_visitor down, tg_visitor up, void *data)
return walk_tg_tree_from(&root_task_group, down, up, data);
}
+static inline struct task_group *css_tg(struct cgroup_subsys_state *css)
+{
+ return css ? container_of(css, struct task_group, css) : NULL;
+}
+
extern int tg_nop(struct task_group *tg, void *data);
#ifdef CONFIG_FAIR_GROUP_SCHED
@@ -535,6 +581,9 @@ extern void set_task_rq_fair(struct sched_entity *se,
static inline void set_task_rq_fair(struct sched_entity *se,
struct cfs_rq *prev, struct cfs_rq *next) { }
#endif /* CONFIG_SMP */
+#else /* !CONFIG_FAIR_GROUP_SCHED */
+static inline int sched_group_set_shares(struct task_group *tg, unsigned long shares) { return 0; }
+static inline int sched_group_set_idle(struct task_group *tg, long idle) { return 0; }
#endif /* CONFIG_FAIR_GROUP_SCHED */
#else /* CONFIG_CGROUP_SCHED */
@@ -588,6 +637,11 @@ do { \
# define u64_u32_load(var) u64_u32_load_copy(var, var##_copy)
# define u64_u32_store(var, val) u64_u32_store_copy(var, var##_copy, val)
+struct balance_callback {
+ struct balance_callback *next;
+ void (*func)(struct rq *rq);
+};
+
/* CFS-related fields in a runqueue */
struct cfs_rq {
struct load_weight load;
@@ -599,17 +653,12 @@ struct cfs_rq {
s64 avg_vruntime;
u64 avg_load;
- u64 exec_clock;
u64 min_vruntime;
#ifdef CONFIG_SCHED_CORE
unsigned int forceidle_seq;
u64 min_vruntime_fi;
#endif
-#ifndef CONFIG_64BIT
- u64 min_vruntime_copy;
-#endif
-
struct rb_root_cached tasks_timeline;
/*
@@ -619,10 +668,6 @@ struct cfs_rq {
struct sched_entity *curr;
struct sched_entity *next;
-#ifdef CONFIG_SCHED_DEBUG
- unsigned int nr_spread_over;
-#endif
-
#ifdef CONFIG_SMP
/*
* CFS load tracking
@@ -696,6 +741,44 @@ struct cfs_rq {
#endif /* CONFIG_FAIR_GROUP_SCHED */
};
+#ifdef CONFIG_SCHED_CLASS_EXT
+/* scx_rq->flags, protected by the rq lock */
+enum scx_rq_flags {
+ /*
+ * A hotplugged CPU starts scheduling before rq_online_scx(). Track
+ * ops.cpu_on/offline() state so that ops.enqueue/dispatch() are called
+ * only while the BPF scheduler considers the CPU to be online.
+ */
+ SCX_RQ_ONLINE = 1 << 0,
+ SCX_RQ_CAN_STOP_TICK = 1 << 1,
+ SCX_RQ_BAL_KEEP = 1 << 2, /* balance decided to keep current */
+ SCX_RQ_BYPASSING = 1 << 3,
+
+ SCX_RQ_IN_WAKEUP = 1 << 16,
+ SCX_RQ_IN_BALANCE = 1 << 17,
+};
+
+struct scx_rq {
+ struct scx_dispatch_q local_dsq;
+ struct list_head runnable_list; /* runnable tasks on this rq */
+ struct list_head ddsp_deferred_locals; /* deferred ddsps from enq */
+ unsigned long ops_qseq;
+ u64 extra_enq_flags; /* see move_task_to_local_dsq() */
+ u32 nr_running;
+ u32 flags;
+ u32 cpuperf_target; /* [0, SCHED_CAPACITY_SCALE] */
+ bool cpu_released;
+ cpumask_var_t cpus_to_kick;
+ cpumask_var_t cpus_to_kick_if_idle;
+ cpumask_var_t cpus_to_preempt;
+ cpumask_var_t cpus_to_wait;
+ unsigned long pnt_seq;
+ struct balance_callback deferred_bal_cb;
+ struct irq_work deferred_irq_work;
+ struct irq_work kick_cpus_irq_work;
+};
+#endif /* CONFIG_SCHED_CLASS_EXT */
+
static inline int rt_bandwidth_enabled(void)
{
return sysctl_sched_rt_runtime >= 0;
@@ -726,13 +809,13 @@ struct rt_rq {
#endif /* CONFIG_SMP */
int rt_queued;
+#ifdef CONFIG_RT_GROUP_SCHED
int rt_throttled;
u64 rt_time;
u64 rt_runtime;
/* Nests inside the rq lock: */
raw_spinlock_t rt_runtime_lock;
-#ifdef CONFIG_RT_GROUP_SCHED
unsigned int rt_nr_boosted;
struct rq *rq;
@@ -820,6 +903,9 @@ static inline void se_update_runnable(struct sched_entity *se)
static inline long se_runnable(struct sched_entity *se)
{
+ if (se->sched_delayed)
+ return false;
+
if (entity_is_task(se))
return !!se->on_rq;
else
@@ -834,6 +920,9 @@ static inline void se_update_runnable(struct sched_entity *se) { }
static inline long se_runnable(struct sched_entity *se)
{
+ if (se->sched_delayed)
+ return false;
+
return !!se->on_rq;
}
@@ -996,11 +1085,6 @@ struct uclamp_rq {
DECLARE_STATIC_KEY_FALSE(sched_uclamp_used);
#endif /* CONFIG_UCLAMP_TASK */
-struct balance_callback {
- struct balance_callback *next;
- void (*func)(struct rq *rq);
-};
-
/*
* This is the main, per-CPU runqueue data structure.
*
@@ -1043,6 +1127,11 @@ struct rq {
struct cfs_rq cfs;
struct rt_rq rt;
struct dl_rq dl;
+#ifdef CONFIG_SCHED_CLASS_EXT
+ struct scx_rq scx;
+#endif
+
+ struct sched_dl_entity fair_server;
#ifdef CONFIG_FAIR_GROUP_SCHED
/* list of leaf cfs_rq on this CPU: */
@@ -1059,6 +1148,7 @@ struct rq {
unsigned int nr_uninterruptible;
struct task_struct __rcu *curr;
+ struct sched_dl_entity *dl_server;
struct task_struct *idle;
struct task_struct *stop;
unsigned long next_balance;
@@ -1158,7 +1248,6 @@ struct rq {
/* latency stats */
struct sched_info rq_sched_info;
unsigned long long rq_cpu_time;
- /* could above be rq->cfs_rq.exec_clock + rq->rt_rq.rt_runtime ? */
/* sys_sched_yield() stats */
unsigned int yld_count;
@@ -1187,6 +1276,7 @@ struct rq {
/* per rq */
struct rq *core;
struct task_struct *core_pick;
+ struct sched_dl_entity *core_dl_server;
unsigned int core_enabled;
unsigned int core_sched_seq;
struct rb_root core_tree;
@@ -2247,11 +2337,13 @@ extern const u32 sched_prio_to_wmult[40];
*
*/
-#define DEQUEUE_SLEEP 0x01
+#define DEQUEUE_SLEEP 0x01 /* Matches ENQUEUE_WAKEUP */
#define DEQUEUE_SAVE 0x02 /* Matches ENQUEUE_RESTORE */
#define DEQUEUE_MOVE 0x04 /* Matches ENQUEUE_MOVE */
#define DEQUEUE_NOCLOCK 0x08 /* Matches ENQUEUE_NOCLOCK */
+#define DEQUEUE_SPECIAL 0x10
#define DEQUEUE_MIGRATING 0x100 /* Matches ENQUEUE_MIGRATING */
+#define DEQUEUE_DELAYED 0x200 /* Matches ENQUEUE_DELAYED */
#define ENQUEUE_WAKEUP 0x01
#define ENQUEUE_RESTORE 0x02
@@ -2267,6 +2359,7 @@ extern const u32 sched_prio_to_wmult[40];
#endif
#define ENQUEUE_INITIAL 0x80
#define ENQUEUE_MIGRATING 0x100
+#define ENQUEUE_DELAYED 0x200
#define RETRY_TASK ((void *)-1UL)
@@ -2285,23 +2378,31 @@ struct sched_class {
#endif
void (*enqueue_task) (struct rq *rq, struct task_struct *p, int flags);
- void (*dequeue_task) (struct rq *rq, struct task_struct *p, int flags);
+ bool (*dequeue_task) (struct rq *rq, struct task_struct *p, int flags);
void (*yield_task) (struct rq *rq);
bool (*yield_to_task)(struct rq *rq, struct task_struct *p);
void (*wakeup_preempt)(struct rq *rq, struct task_struct *p, int flags);
- struct task_struct *(*pick_next_task)(struct rq *rq);
+ int (*balance)(struct rq *rq, struct task_struct *prev, struct rq_flags *rf);
+ struct task_struct *(*pick_task)(struct rq *rq);
+ /*
+ * Optional! When implemented pick_next_task() should be equivalent to:
+ *
+ * next = pick_task();
+ * if (next) {
+ * put_prev_task(prev);
+ * set_next_task_first(next);
+ * }
+ */
+ struct task_struct *(*pick_next_task)(struct rq *rq, struct task_struct *prev);
- void (*put_prev_task)(struct rq *rq, struct task_struct *p);
+ void (*put_prev_task)(struct rq *rq, struct task_struct *p, struct task_struct *next);
void (*set_next_task)(struct rq *rq, struct task_struct *p, bool first);
#ifdef CONFIG_SMP
- int (*balance)(struct rq *rq, struct task_struct *prev, struct rq_flags *rf);
int (*select_task_rq)(struct task_struct *p, int task_cpu, int flags);
- struct task_struct * (*pick_task)(struct rq *rq);
-
void (*migrate_task_rq)(struct task_struct *p, int new_cpu);
void (*task_woken)(struct rq *this_rq, struct task_struct *task);
@@ -2323,8 +2424,11 @@ struct sched_class {
* cannot assume the switched_from/switched_to pair is serialized by
* rq->lock. They are however serialized by p->pi_lock.
*/
+ void (*switching_to) (struct rq *this_rq, struct task_struct *task);
void (*switched_from)(struct rq *this_rq, struct task_struct *task);
void (*switched_to) (struct rq *this_rq, struct task_struct *task);
+ void (*reweight_task)(struct rq *this_rq, struct task_struct *task,
+ const struct load_weight *lw);
void (*prio_changed) (struct rq *this_rq, struct task_struct *task,
int oldprio);
@@ -2345,7 +2449,7 @@ struct sched_class {
static inline void put_prev_task(struct rq *rq, struct task_struct *prev)
{
WARN_ON_ONCE(rq->curr != prev);
- prev->sched_class->put_prev_task(rq, prev);
+ prev->sched_class->put_prev_task(rq, prev, NULL);
}
static inline void set_next_task(struct rq *rq, struct task_struct *next)
@@ -2353,6 +2457,30 @@ static inline void set_next_task(struct rq *rq, struct task_struct *next)
next->sched_class->set_next_task(rq, next, false);
}
+static inline void
+__put_prev_set_next_dl_server(struct rq *rq,
+ struct task_struct *prev,
+ struct task_struct *next)
+{
+ prev->dl_server = NULL;
+ next->dl_server = rq->dl_server;
+ rq->dl_server = NULL;
+}
+
+static inline void put_prev_set_next_task(struct rq *rq,
+ struct task_struct *prev,
+ struct task_struct *next)
+{
+ WARN_ON_ONCE(rq->curr != prev);
+
+ __put_prev_set_next_dl_server(rq, prev, next);
+
+ if (next == prev)
+ return;
+
+ prev->sched_class->put_prev_task(rq, prev, next);
+ next->sched_class->set_next_task(rq, next, true);
+}
/*
* Helper to define a sched_class instance; each one is placed in a separate
@@ -2373,19 +2501,54 @@ const struct sched_class name##_sched_class \
extern struct sched_class __sched_class_highest[];
extern struct sched_class __sched_class_lowest[];
+extern const struct sched_class stop_sched_class;
+extern const struct sched_class dl_sched_class;
+extern const struct sched_class rt_sched_class;
+extern const struct sched_class fair_sched_class;
+extern const struct sched_class idle_sched_class;
+
+#ifdef CONFIG_SCHED_CLASS_EXT
+extern const struct sched_class ext_sched_class;
+
+DECLARE_STATIC_KEY_FALSE(__scx_ops_enabled); /* SCX BPF scheduler loaded */
+DECLARE_STATIC_KEY_FALSE(__scx_switched_all); /* all fair class tasks on SCX */
+
+#define scx_enabled() static_branch_unlikely(&__scx_ops_enabled)
+#define scx_switched_all() static_branch_unlikely(&__scx_switched_all)
+#else /* !CONFIG_SCHED_CLASS_EXT */
+#define scx_enabled() false
+#define scx_switched_all() false
+#endif /* !CONFIG_SCHED_CLASS_EXT */
+
+/*
+ * Iterate only active classes. SCX can take over all fair tasks or be
+ * completely disabled. If the former, skip fair. If the latter, skip SCX.
+ */
+static inline const struct sched_class *next_active_class(const struct sched_class *class)
+{
+ class++;
+#ifdef CONFIG_SCHED_CLASS_EXT
+ if (scx_switched_all() && class == &fair_sched_class)
+ class++;
+ if (!scx_enabled() && class == &ext_sched_class)
+ class++;
+#endif
+ return class;
+}
+
#define for_class_range(class, _from, _to) \
for (class = (_from); class < (_to); class++)
#define for_each_class(class) \
for_class_range(class, __sched_class_highest, __sched_class_lowest)
-#define sched_class_above(_a, _b) ((_a) < (_b))
+#define for_active_class_range(class, _from, _to) \
+ for (class = (_from); class != (_to); class = next_active_class(class))
-extern const struct sched_class stop_sched_class;
-extern const struct sched_class dl_sched_class;
-extern const struct sched_class rt_sched_class;
-extern const struct sched_class fair_sched_class;
-extern const struct sched_class idle_sched_class;
+#define for_each_active_class(class) \
+ for_active_class_range(class, __sched_class_highest, __sched_class_lowest)
+
+#define sched_class_above(_a, _b) ((_a) < (_b))
static inline bool sched_stop_runnable(struct rq *rq)
{
@@ -2408,7 +2571,7 @@ static inline bool sched_fair_runnable(struct rq *rq)
}
extern struct task_struct *pick_next_task_fair(struct rq *rq, struct task_struct *prev, struct rq_flags *rf);
-extern struct task_struct *pick_next_task_idle(struct rq *rq);
+extern struct task_struct *pick_task_idle(struct rq *rq);
#define SCA_CHECK 0x01
#define SCA_MIGRATE_DISABLE 0x02
@@ -2424,6 +2587,19 @@ extern void sched_balance_trigger(struct rq *rq);
extern int __set_cpus_allowed_ptr(struct task_struct *p, struct affinity_context *ctx);
extern void set_cpus_allowed_common(struct task_struct *p, struct affinity_context *ctx);
+static inline bool task_allowed_on_cpu(struct task_struct *p, int cpu)
+{
+ /* When not in the task's cpumask, no point in looking further. */
+ if (!cpumask_test_cpu(cpu, p->cpus_ptr))
+ return false;
+
+ /* Can @cpu run a user thread? */
+ if (!(p->flags & PF_KTHREAD) && !task_cpu_possible(cpu, p))
+ return false;
+
+ return true;
+}
+
static inline cpumask_t *alloc_user_cpus_ptr(int node)
{
/*
@@ -2457,6 +2633,11 @@ extern int push_cpu_stop(void *arg);
#else /* !CONFIG_SMP: */
+static inline bool task_allowed_on_cpu(struct task_struct *p, int cpu)
+{
+ return true;
+}
+
static inline int __set_cpus_allowed_ptr(struct task_struct *p,
struct affinity_context *ctx)
{
@@ -2510,12 +2691,9 @@ extern void init_sched_dl_class(void);
extern void init_sched_rt_class(void);
extern void init_sched_fair_class(void);
-extern void reweight_task(struct task_struct *p, const struct load_weight *lw);
-
extern void resched_curr(struct rq *rq);
extern void resched_cpu(int cpu);
-extern struct rt_bandwidth def_rt_bandwidth;
extern void init_rt_bandwidth(struct rt_bandwidth *rt_b, u64 period, u64 runtime);
extern bool sched_rt_bandwidth_account(struct rt_rq *rt_rq);
@@ -2586,6 +2764,19 @@ static inline void sub_nr_running(struct rq *rq, unsigned count)
sched_update_tick_dependency(rq);
}
+static inline void __block_task(struct rq *rq, struct task_struct *p)
+{
+ WRITE_ONCE(p->on_rq, 0);
+ ASSERT_EXCLUSIVE_WRITER(p->on_rq);
+ if (p->sched_contributes_to_load)
+ rq->nr_uninterruptible++;
+
+ if (p->in_iowait) {
+ atomic_inc(&rq->nr_iowait);
+ delayacct_blkio_start();
+ }
+}
+
extern void activate_task(struct rq *rq, struct task_struct *p, int flags);
extern void deactivate_task(struct rq *rq, struct task_struct *p, int flags);
@@ -3099,6 +3290,8 @@ static inline unsigned long cpu_util_rt(struct rq *rq)
return READ_ONCE(rq->avg_rt.util_avg);
}
+#else /* !CONFIG_SMP */
+static inline bool update_other_load_avgs(struct rq *rq) { return false; }
#endif /* CONFIG_SMP */
#ifdef CONFIG_UCLAMP_TASK
@@ -3607,8 +3800,10 @@ extern int __sched_setaffinity(struct task_struct *p, struct affinity_context *c
extern void __setscheduler_prio(struct task_struct *p, int prio);
extern void set_load_weight(struct task_struct *p, bool update_load);
extern void enqueue_task(struct rq *rq, struct task_struct *p, int flags);
-extern void dequeue_task(struct rq *rq, struct task_struct *p, int flags);
+extern bool dequeue_task(struct rq *rq, struct task_struct *p, int flags);
+extern void check_class_changing(struct rq *rq, struct task_struct *p,
+ const struct sched_class *prev_class);
extern void check_class_changed(struct rq *rq, struct task_struct *p,
const struct sched_class *prev_class,
int oldprio);
@@ -3629,4 +3824,24 @@ static inline void balance_callbacks(struct rq *rq, struct balance_callback *hea
#endif
+#ifdef CONFIG_SCHED_CLASS_EXT
+/*
+ * Used by SCX in the enable/disable paths to move tasks between sched_classes
+ * and establish invariants.
+ */
+struct sched_enq_and_set_ctx {
+ struct task_struct *p;
+ int queue_flags;
+ bool queued;
+ bool running;
+};
+
+void sched_deq_and_put_task(struct task_struct *p, int queue_flags,
+ struct sched_enq_and_set_ctx *ctx);
+void sched_enq_and_set_task(struct sched_enq_and_set_ctx *ctx);
+
+#endif /* CONFIG_SCHED_CLASS_EXT */
+
+#include "ext.h"
+
#endif /* _KERNEL_SCHED_SCHED_H */
diff --git a/kernel/sched/stop_task.c b/kernel/sched/stop_task.c
index b1b8fe61c532..058dd42e3d9b 100644
--- a/kernel/sched/stop_task.c
+++ b/kernel/sched/stop_task.c
@@ -41,26 +41,17 @@ static struct task_struct *pick_task_stop(struct rq *rq)
return rq->stop;
}
-static struct task_struct *pick_next_task_stop(struct rq *rq)
-{
- struct task_struct *p = pick_task_stop(rq);
-
- if (p)
- set_next_task_stop(rq, p, true);
-
- return p;
-}
-
static void
enqueue_task_stop(struct rq *rq, struct task_struct *p, int flags)
{
add_nr_running(rq, 1);
}
-static void
+static bool
dequeue_task_stop(struct rq *rq, struct task_struct *p, int flags)
{
sub_nr_running(rq, 1);
+ return true;
}
static void yield_task_stop(struct rq *rq)
@@ -68,7 +59,7 @@ static void yield_task_stop(struct rq *rq)
BUG(); /* the stop task should never yield, its pointless. */
}
-static void put_prev_task_stop(struct rq *rq, struct task_struct *prev)
+static void put_prev_task_stop(struct rq *rq, struct task_struct *prev, struct task_struct *next)
{
update_curr_common(rq);
}
@@ -111,13 +102,12 @@ DEFINE_SCHED_CLASS(stop) = {
.wakeup_preempt = wakeup_preempt_stop,
- .pick_next_task = pick_next_task_stop,
+ .pick_task = pick_task_stop,
.put_prev_task = put_prev_task_stop,
.set_next_task = set_next_task_stop,
#ifdef CONFIG_SMP
.balance = balance_stop,
- .pick_task = pick_task_stop,
.select_task_rq = select_task_rq_stop,
.set_cpus_allowed = set_cpus_allowed_common,
#endif
diff --git a/kernel/sched/syscalls.c b/kernel/sched/syscalls.c
index 195d2f2834a9..aa70beee9895 100644
--- a/kernel/sched/syscalls.c
+++ b/kernel/sched/syscalls.c
@@ -57,7 +57,7 @@ static int effective_prio(struct task_struct *p)
* keep the priority unchanged. Otherwise, update priority
* to the normal priority:
*/
- if (!rt_prio(p->prio))
+ if (!rt_or_dl_prio(p->prio))
return p->normal_prio;
return p->prio;
}
@@ -258,107 +258,6 @@ int sched_core_idle_cpu(int cpu)
#endif
-#ifdef CONFIG_SMP
-/*
- * This function computes an effective utilization for the given CPU, to be
- * used for frequency selection given the linear relation: f = u * f_max.
- *
- * The scheduler tracks the following metrics:
- *
- * cpu_util_{cfs,rt,dl,irq}()
- * cpu_bw_dl()
- *
- * Where the cfs,rt and dl util numbers are tracked with the same metric and
- * synchronized windows and are thus directly comparable.
- *
- * The cfs,rt,dl utilization are the running times measured with rq->clock_task
- * which excludes things like IRQ and steal-time. These latter are then accrued
- * in the IRQ utilization.
- *
- * The DL bandwidth number OTOH is not a measured metric but a value computed
- * based on the task model parameters and gives the minimal utilization
- * required to meet deadlines.
- */
-unsigned long effective_cpu_util(int cpu, unsigned long util_cfs,
- unsigned long *min,
- unsigned long *max)
-{
- unsigned long util, irq, scale;
- struct rq *rq = cpu_rq(cpu);
-
- scale = arch_scale_cpu_capacity(cpu);
-
- /*
- * Early check to see if IRQ/steal time saturates the CPU, can be
- * because of inaccuracies in how we track these -- see
- * update_irq_load_avg().
- */
- irq = cpu_util_irq(rq);
- if (unlikely(irq >= scale)) {
- if (min)
- *min = scale;
- if (max)
- *max = scale;
- return scale;
- }
-
- if (min) {
- /*
- * The minimum utilization returns the highest level between:
- * - the computed DL bandwidth needed with the IRQ pressure which
- * steals time to the deadline task.
- * - The minimum performance requirement for CFS and/or RT.
- */
- *min = max(irq + cpu_bw_dl(rq), uclamp_rq_get(rq, UCLAMP_MIN));
-
- /*
- * When an RT task is runnable and uclamp is not used, we must
- * ensure that the task will run at maximum compute capacity.
- */
- if (!uclamp_is_used() && rt_rq_is_runnable(&rq->rt))
- *min = max(*min, scale);
- }
-
- /*
- * Because the time spend on RT/DL tasks is visible as 'lost' time to
- * CFS tasks and we use the same metric to track the effective
- * utilization (PELT windows are synchronized) we can directly add them
- * to obtain the CPU's actual utilization.
- */
- util = util_cfs + cpu_util_rt(rq);
- util += cpu_util_dl(rq);
-
- /*
- * The maximum hint is a soft bandwidth requirement, which can be lower
- * than the actual utilization because of uclamp_max requirements.
- */
- if (max)
- *max = min(scale, uclamp_rq_get(rq, UCLAMP_MAX));
-
- if (util >= scale)
- return scale;
-
- /*
- * There is still idle time; further improve the number by using the
- * IRQ metric. Because IRQ/steal time is hidden from the task clock we
- * need to scale the task numbers:
- *
- * max - irq
- * U' = irq + --------- * U
- * max
- */
- util = scale_irq_capacity(util, irq, scale);
- util += irq;
-
- return min(scale, util);
-}
-
-unsigned long sched_cpu_util(int cpu)
-{
- return effective_cpu_util(cpu, cpu_util_cfs(cpu), NULL, NULL);
-}
-#endif /* CONFIG_SMP */
-
/**
* find_process_by_pid - find a process with a matching PID value.
* @pid: the pid in question.
@@ -401,13 +300,23 @@ static void __setscheduler_params(struct task_struct *p,
p->policy = policy;
- if (dl_policy(policy))
+ if (dl_policy(policy)) {
__setparam_dl(p, attr);
- else if (fair_policy(policy))
+ } else if (fair_policy(policy)) {
p->static_prio = NICE_TO_PRIO(attr->sched_nice);
+ if (attr->sched_runtime) {
+ p->se.custom_slice = 1;
+ p->se.slice = clamp_t(u64, attr->sched_runtime,
+ NSEC_PER_MSEC/10, /* HZ=1000 * 10 */
+ NSEC_PER_MSEC*100); /* HZ=100 / 10 */
+ } else {
+ p->se.custom_slice = 0;
+ p->se.slice = sysctl_sched_base_slice;
+ }
+ }
/* rt-policy tasks do not have a timerslack */
- if (task_is_realtime(p)) {
+ if (rt_or_dl_task_policy(p)) {
p->timer_slack_ns = 0;
} else if (p->timer_slack_ns == 0) {
/* when switching back to non-rt policy, restore timerslack */
@@ -703,12 +612,18 @@ recheck:
goto unlock;
}
+ retval = scx_check_setscheduler(p, policy);
+ if (retval)
+ goto unlock;
+
/*
* If not changing anything there's no need to proceed further,
* but store a possible modification of reset_on_fork.
*/
if (unlikely(policy == p->policy)) {
- if (fair_policy(policy) && attr->sched_nice != task_nice(p))
+ if (fair_policy(policy) &&
+ (attr->sched_nice != task_nice(p) ||
+ (attr->sched_runtime != p->se.slice)))
goto change;
if (rt_policy(policy) && attr->sched_priority != p->rt_priority)
goto change;
@@ -805,6 +720,7 @@ change:
__setscheduler_prio(p, newprio);
}
__setscheduler_uclamp(p, attr);
+ check_class_changing(rq, p, prev_class);
if (queued) {
/*
@@ -854,6 +770,9 @@ static int _sched_setscheduler(struct task_struct *p, int policy,
.sched_nice = PRIO_TO_NICE(p->static_prio),
};
+ if (p->se.custom_slice)
+ attr.sched_runtime = p->se.slice;
+
/* Fixup the legacy SCHED_RESET_ON_FORK hack. */
if ((policy != SETPARAM_POLICY) && (policy & SCHED_RESET_ON_FORK)) {
attr.sched_flags |= SCHED_FLAG_RESET_ON_FORK;
@@ -1020,12 +939,14 @@ err_size:
static void get_params(struct task_struct *p, struct sched_attr *attr)
{
- if (task_has_dl_policy(p))
+ if (task_has_dl_policy(p)) {
__getparam_dl(p, attr);
- else if (task_has_rt_policy(p))
+ } else if (task_has_rt_policy(p)) {
attr->sched_priority = p->rt_priority;
- else
+ } else {
attr->sched_nice = task_nice(p);
+ attr->sched_runtime = p->se.slice;
+ }
}
/**
@@ -1610,6 +1531,7 @@ SYSCALL_DEFINE1(sched_get_priority_max, int, policy)
case SCHED_NORMAL:
case SCHED_BATCH:
case SCHED_IDLE:
+ case SCHED_EXT:
ret = 0;
break;
}
@@ -1637,6 +1559,7 @@ SYSCALL_DEFINE1(sched_get_priority_min, int, policy)
case SCHED_NORMAL:
case SCHED_BATCH:
case SCHED_IDLE:
+ case SCHED_EXT:
ret = 0;
}
return ret;
diff --git a/kernel/sched/topology.c b/kernel/sched/topology.c
index 76504b776d03..9748a4c8d668 100644
--- a/kernel/sched/topology.c
+++ b/kernel/sched/topology.c
@@ -516,6 +516,14 @@ void rq_attach_root(struct rq *rq, struct root_domain *rd)
if (cpumask_test_cpu(rq->cpu, cpu_active_mask))
set_rq_online(rq);
+ /*
+ * Because the rq is not a task, dl_add_task_root_domain() did not
+ * move the fair server bw to the rd if it already started.
+ * Add it now.
+ */
+ if (rq->fair_server.dl_server)
+ __dl_server_attach_root(&rq->fair_server, rq);
+
rq_unlock_irqrestore(rq, &rf);
if (old_rd)
diff --git a/kernel/signal.c b/kernel/signal.c
index 6fe29715105b..4344860ffcac 100644
--- a/kernel/signal.c
+++ b/kernel/signal.c
@@ -2888,8 +2888,6 @@ relock:
current->flags |= PF_SIGNALED;
if (sig_kernel_coredump(signr)) {
- int ret;
-
if (print_fatal_signals)
print_fatal_signal(signr);
proc_coredump_connector(current);
@@ -2901,24 +2899,7 @@ relock:
* first and our do_group_exit call below will use
* that value and ignore the one we pass it.
*/
- ret = do_coredump(&ksig->info);
- if (ret)
- coredump_report_failure("coredump has not been created, error %d",
- ret);
- else if (!IS_ENABLED(CONFIG_COREDUMP)) {
- /*
- * Coredumps are not available, can't fail collecting
- * the coredump.
- *
- * Leave a note though that the coredump is going to be
- * not created. This is not an error or a warning as disabling
- * support in the kernel for coredumps isn't commonplace, and
- * the user must've built the kernel with the custom config so
- * let them know all works as desired.
- */
- coredump_report("no coredump collected as "
- "that is disabled in the kernel configuration");
- }
+ do_coredump(&ksig->info);
}
/*
@@ -3941,11 +3922,11 @@ SYSCALL_DEFINE4(pidfd_send_signal, int, pidfd, int, sig,
return -EINVAL;
f = fdget(pidfd);
- if (!f.file)
+ if (!fd_file(f))
return -EBADF;
/* Is this a pidfd? */
- pid = pidfd_to_pid(f.file);
+ pid = pidfd_to_pid(fd_file(f));
if (IS_ERR(pid)) {
ret = PTR_ERR(pid);
goto err;
@@ -3958,7 +3939,7 @@ SYSCALL_DEFINE4(pidfd_send_signal, int, pidfd, int, sig,
switch (flags) {
case 0:
/* Infer scope from the type of pidfd. */
- if (f.file->f_flags & PIDFD_THREAD)
+ if (fd_file(f)->f_flags & PIDFD_THREAD)
type = PIDTYPE_PID;
else
type = PIDTYPE_TGID;
diff --git a/kernel/static_call_inline.c b/kernel/static_call_inline.c
index 639397b5491c..5259cda486d0 100644
--- a/kernel/static_call_inline.c
+++ b/kernel/static_call_inline.c
@@ -411,6 +411,17 @@ static void static_call_del_module(struct module *mod)
for (site = start; site < stop; site++) {
key = static_call_key(site);
+
+ /*
+ * If the key was not updated due to a memory allocation
+ * failure in __static_call_init() then treating key::sites
+ * as key::mods in the code below would cause random memory
+ * access and #GP. In that case all subsequent sites have
+ * not been touched either, so stop iterating.
+ */
+ if (!static_call_key_has_mods(key))
+ break;
+
if (key == prev_key)
continue;
@@ -442,7 +453,7 @@ static int static_call_module_notify(struct notifier_block *nb,
case MODULE_STATE_COMING:
ret = static_call_add_module(mod);
if (ret) {
- WARN(1, "Failed to allocate memory for static calls");
+ pr_warn("Failed to allocate memory for static calls\n");
static_call_del_module(mod);
}
break;
diff --git a/kernel/sys.c b/kernel/sys.c
index e3c4cffb520c..4da31f28fda8 100644
--- a/kernel/sys.c
+++ b/kernel/sys.c
@@ -1916,10 +1916,10 @@ static int prctl_set_mm_exe_file(struct mm_struct *mm, unsigned int fd)
int err;
exe = fdget(fd);
- if (!exe.file)
+ if (!fd_file(exe))
return -EBADF;
- inode = file_inode(exe.file);
+ inode = file_inode(fd_file(exe));
/*
* Because the original mm->exe_file points to executable file, make
@@ -1927,14 +1927,14 @@ static int prctl_set_mm_exe_file(struct mm_struct *mm, unsigned int fd)
* overall picture.
*/
err = -EACCES;
- if (!S_ISREG(inode->i_mode) || path_noexec(&exe.file->f_path))
+ if (!S_ISREG(inode->i_mode) || path_noexec(&fd_file(exe)->f_path))
goto exit;
- err = file_permission(exe.file, MAY_EXEC);
+ err = file_permission(fd_file(exe), MAY_EXEC);
if (err)
goto exit;
- err = replace_mm_exe_file(mm, exe.file);
+ err = replace_mm_exe_file(mm, fd_file(exe));
exit:
fdput(exe);
return err;
@@ -2557,7 +2557,7 @@ SYSCALL_DEFINE5(prctl, int, option, unsigned long, arg2, unsigned long, arg3,
error = current->timer_slack_ns;
break;
case PR_SET_TIMERSLACK:
- if (task_is_realtime(current))
+ if (rt_or_dl_task_policy(current))
break;
if (arg2 <= 0)
current->timer_slack_ns =
diff --git a/kernel/taskstats.c b/kernel/taskstats.c
index 4354ea231fab..0700f40c53ac 100644
--- a/kernel/taskstats.c
+++ b/kernel/taskstats.c
@@ -419,7 +419,7 @@ static int cgroupstats_user_cmd(struct sk_buff *skb, struct genl_info *info)
fd = nla_get_u32(info->attrs[CGROUPSTATS_CMD_ATTR_FD]);
f = fdget(fd);
- if (!f.file)
+ if (!fd_file(f))
return 0;
size = nla_total_size(sizeof(struct cgroupstats));
@@ -440,7 +440,7 @@ static int cgroupstats_user_cmd(struct sk_buff *skb, struct genl_info *info)
stats = nla_data(na);
memset(stats, 0, sizeof(*stats));
- rc = cgroupstats_build(stats, f.file->f_path.dentry);
+ rc = cgroupstats_build(stats, fd_file(f)->f_path.dentry);
if (rc < 0) {
nlmsg_free(rep_skb);
goto err;
diff --git a/kernel/time/hrtimer.c b/kernel/time/hrtimer.c
index 12eb40d6290e..cddcd08ea827 100644
--- a/kernel/time/hrtimer.c
+++ b/kernel/time/hrtimer.c
@@ -1977,7 +1977,7 @@ static void __hrtimer_init_sleeper(struct hrtimer_sleeper *sl,
* expiry.
*/
if (IS_ENABLED(CONFIG_PREEMPT_RT)) {
- if (task_is_realtime(current) && !(mode & HRTIMER_MODE_SOFT))
+ if (rt_or_dl_task_policy(current) && !(mode & HRTIMER_MODE_SOFT))
mode |= HRTIMER_MODE_HARD;
}
diff --git a/kernel/time/posix-clock.c b/kernel/time/posix-clock.c
index 4782edcbe7b9..c2f3d0c490d5 100644
--- a/kernel/time/posix-clock.c
+++ b/kernel/time/posix-clock.c
@@ -168,7 +168,6 @@ static int posix_clock_release(struct inode *inode, struct file *fp)
static const struct file_operations posix_clock_file_operations = {
.owner = THIS_MODULE,
- .llseek = no_llseek,
.read = posix_clock_read,
.poll = posix_clock_poll,
.unlocked_ioctl = posix_clock_ioctl,
diff --git a/kernel/trace/bpf_trace.c b/kernel/trace/bpf_trace.c
index ac0a01cc8634..a582cd25ca87 100644
--- a/kernel/trace/bpf_trace.c
+++ b/kernel/trace/bpf_trace.c
@@ -24,7 +24,6 @@
#include <linux/key.h>
#include <linux/verification.h>
#include <linux/namei.h>
-#include <linux/fileattr.h>
#include <net/bpf_sk_storage.h>
@@ -798,29 +797,6 @@ const struct bpf_func_proto bpf_task_pt_regs_proto = {
.ret_btf_id = &bpf_task_pt_regs_ids[0],
};
-BPF_CALL_2(bpf_current_task_under_cgroup, struct bpf_map *, map, u32, idx)
-{
- struct bpf_array *array = container_of(map, struct bpf_array, map);
- struct cgroup *cgrp;
-
- if (unlikely(idx >= array->map.max_entries))
- return -E2BIG;
-
- cgrp = READ_ONCE(array->ptrs[idx]);
- if (unlikely(!cgrp))
- return -EAGAIN;
-
- return task_under_cgroup_hierarchy(current, cgrp);
-}
-
-static const struct bpf_func_proto bpf_current_task_under_cgroup_proto = {
- .func = bpf_current_task_under_cgroup,
- .gpl_only = false,
- .ret_type = RET_INTEGER,
- .arg1_type = ARG_CONST_MAP_PTR,
- .arg2_type = ARG_ANYTHING,
-};
-
struct send_signal_irq_work {
struct irq_work irq_work;
struct task_struct *task;
@@ -1226,7 +1202,8 @@ static const struct bpf_func_proto bpf_get_func_arg_proto = {
.ret_type = RET_INTEGER,
.arg1_type = ARG_PTR_TO_CTX,
.arg2_type = ARG_ANYTHING,
- .arg3_type = ARG_PTR_TO_LONG,
+ .arg3_type = ARG_PTR_TO_FIXED_SIZE_MEM | MEM_UNINIT | MEM_ALIGNED,
+ .arg3_size = sizeof(u64),
};
BPF_CALL_2(get_func_ret, void *, ctx, u64 *, value)
@@ -1242,7 +1219,8 @@ static const struct bpf_func_proto bpf_get_func_ret_proto = {
.func = get_func_ret,
.ret_type = RET_INTEGER,
.arg1_type = ARG_PTR_TO_CTX,
- .arg2_type = ARG_PTR_TO_LONG,
+ .arg2_type = ARG_PTR_TO_FIXED_SIZE_MEM | MEM_UNINIT | MEM_ALIGNED,
+ .arg2_size = sizeof(u64),
};
BPF_CALL_1(get_func_arg_cnt, void *, ctx)
@@ -1439,73 +1417,6 @@ static int __init bpf_key_sig_kfuncs_init(void)
late_initcall(bpf_key_sig_kfuncs_init);
#endif /* CONFIG_KEYS */
-/* filesystem kfuncs */
-__bpf_kfunc_start_defs();
-
-/**
- * bpf_get_file_xattr - get xattr of a file
- * @file: file to get xattr from
- * @name__str: name of the xattr
- * @value_p: output buffer of the xattr value
- *
- * Get xattr *name__str* of *file* and store the output in *value_ptr*.
- *
- * For security reasons, only *name__str* with prefix "user." is allowed.
- *
- * Return: 0 on success, a negative value on error.
- */
-__bpf_kfunc int bpf_get_file_xattr(struct file *file, const char *name__str,
- struct bpf_dynptr *value_p)
-{
- struct bpf_dynptr_kern *value_ptr = (struct bpf_dynptr_kern *)value_p;
- struct dentry *dentry;
- u32 value_len;
- void *value;
- int ret;
-
- if (strncmp(name__str, XATTR_USER_PREFIX, XATTR_USER_PREFIX_LEN))
- return -EPERM;
-
- value_len = __bpf_dynptr_size(value_ptr);
- value = __bpf_dynptr_data_rw(value_ptr, value_len);
- if (!value)
- return -EINVAL;
-
- dentry = file_dentry(file);
- ret = inode_permission(&nop_mnt_idmap, dentry->d_inode, MAY_READ);
- if (ret)
- return ret;
- return __vfs_getxattr(dentry, dentry->d_inode, name__str, value, value_len);
-}
-
-__bpf_kfunc_end_defs();
-
-BTF_KFUNCS_START(fs_kfunc_set_ids)
-BTF_ID_FLAGS(func, bpf_get_file_xattr, KF_SLEEPABLE | KF_TRUSTED_ARGS)
-BTF_KFUNCS_END(fs_kfunc_set_ids)
-
-static int bpf_get_file_xattr_filter(const struct bpf_prog *prog, u32 kfunc_id)
-{
- if (!btf_id_set8_contains(&fs_kfunc_set_ids, kfunc_id))
- return 0;
-
- /* Only allow to attach from LSM hooks, to avoid recursion */
- return prog->type != BPF_PROG_TYPE_LSM ? -EACCES : 0;
-}
-
-static const struct btf_kfunc_id_set bpf_fs_kfunc_set = {
- .owner = THIS_MODULE,
- .set = &fs_kfunc_set_ids,
- .filter = bpf_get_file_xattr_filter,
-};
-
-static int __init bpf_fs_kfuncs_init(void)
-{
- return register_btf_kfunc_id_set(BPF_PROG_TYPE_LSM, &bpf_fs_kfunc_set);
-}
-
-late_initcall(bpf_fs_kfuncs_init);
-
static const struct bpf_func_proto *
bpf_tracing_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog)
{
@@ -1548,8 +1459,6 @@ bpf_tracing_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog)
return &bpf_get_numa_node_id_proto;
case BPF_FUNC_perf_event_read:
return &bpf_perf_event_read_proto;
- case BPF_FUNC_current_task_under_cgroup:
- return &bpf_current_task_under_cgroup_proto;
case BPF_FUNC_get_prandom_u32:
return &bpf_get_prandom_u32_proto;
case BPF_FUNC_probe_write_user:
@@ -1578,6 +1487,8 @@ bpf_tracing_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog)
return &bpf_cgrp_storage_get_proto;
case BPF_FUNC_cgrp_storage_delete:
return &bpf_cgrp_storage_delete_proto;
+ case BPF_FUNC_current_task_under_cgroup:
+ return &bpf_current_task_under_cgroup_proto;
#endif
case BPF_FUNC_send_signal:
return &bpf_send_signal_proto;
@@ -1598,7 +1509,8 @@ bpf_tracing_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog)
case BPF_FUNC_jiffies64:
return &bpf_jiffies64_proto;
case BPF_FUNC_get_task_stack:
- return &bpf_get_task_stack_proto;
+ return prog->sleepable ? &bpf_get_task_stack_sleepable_proto
+ : &bpf_get_task_stack_proto;
case BPF_FUNC_copy_from_user:
return &bpf_copy_from_user_proto;
case BPF_FUNC_copy_from_user_task:
@@ -1654,7 +1566,7 @@ kprobe_prog_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog)
case BPF_FUNC_get_stackid:
return &bpf_get_stackid_proto;
case BPF_FUNC_get_stack:
- return &bpf_get_stack_proto;
+ return prog->sleepable ? &bpf_get_stack_sleepable_proto : &bpf_get_stack_proto;
#ifdef CONFIG_BPF_KPROBE_OVERRIDE
case BPF_FUNC_override_return:
return &bpf_override_return_proto;
@@ -3299,7 +3211,7 @@ static int uprobe_prog_run(struct bpf_uprobe *uprobe,
struct bpf_run_ctx *old_run_ctx;
int err = 0;
- if (link->task && current->mm != link->task->mm)
+ if (link->task && !same_thread_group(current, link->task))
return 0;
if (sleepable)
diff --git a/kernel/trace/ring_buffer.c b/kernel/trace/ring_buffer.c
index cebd879a30cb..77dc0b25140e 100644
--- a/kernel/trace/ring_buffer.c
+++ b/kernel/trace/ring_buffer.c
@@ -32,6 +32,8 @@
#include <asm/local64.h>
#include <asm/local.h>
+#include "trace.h"
+
/*
* The "absolute" timestamp in the buffer is only 59 bits.
* If a clock has the 5 MSBs set, it needs to be saved and
@@ -42,6 +44,21 @@
static void update_pages_handler(struct work_struct *work);
+#define RING_BUFFER_META_MAGIC 0xBADFEED
+
+struct ring_buffer_meta {
+ int magic;
+ int struct_size;
+ unsigned long text_addr;
+ unsigned long data_addr;
+ unsigned long first_buffer;
+ unsigned long head_buffer;
+ unsigned long commit_buffer;
+ __u32 subbuf_size;
+ __u32 nr_subbufs;
+ int buffers[];
+};
+
/*
* The ring buffer header is special. We must manually up keep it.
*/
@@ -342,7 +359,8 @@ struct buffer_page {
local_t entries; /* entries on this page */
unsigned long real_end; /* real end of data */
unsigned order; /* order of the page */
- u32 id; /* ID for external mapping */
+ u32 id:30; /* ID for external mapping */
+ u32 range:1; /* Mapped via a range */
struct buffer_data_page *page; /* Actual data page */
};
@@ -373,7 +391,9 @@ static __always_inline unsigned int rb_page_commit(struct buffer_page *bpage)
static void free_buffer_page(struct buffer_page *bpage)
{
- free_pages((unsigned long)bpage->page, bpage->order);
+ /* Range pages are not to be freed */
+ if (!bpage->range)
+ free_pages((unsigned long)bpage->page, bpage->order);
kfree(bpage);
}
@@ -491,9 +511,11 @@ struct ring_buffer_per_cpu {
unsigned long pages_removed;
unsigned int mapped;
+ unsigned int user_mapped; /* user space mapping */
struct mutex mapping_lock;
unsigned long *subbuf_ids; /* ID to subbuf VA */
struct trace_buffer_meta *meta_page;
+ struct ring_buffer_meta *ring_meta;
/* ring buffer pages to update, > 0 to add, < 0 to remove */
long nr_pages_to_update;
@@ -523,6 +545,12 @@ struct trace_buffer {
struct rb_irq_work irq_work;
bool time_stamp_abs;
+ unsigned long range_addr_start;
+ unsigned long range_addr_end;
+
+ long last_text_delta;
+ long last_data_delta;
+
unsigned int subbuf_size;
unsigned int subbuf_order;
unsigned int max_data_size;
@@ -1239,6 +1267,11 @@ static void rb_head_page_activate(struct ring_buffer_per_cpu *cpu_buffer)
* Set the previous list pointer to have the HEAD flag.
*/
rb_set_list_to_head(head->list.prev);
+
+ if (cpu_buffer->ring_meta) {
+ struct ring_buffer_meta *meta = cpu_buffer->ring_meta;
+ meta->head_buffer = (unsigned long)head->page;
+ }
}
static void rb_list_head_clear(struct list_head *list)
@@ -1478,9 +1511,484 @@ static void rb_check_pages(struct ring_buffer_per_cpu *cpu_buffer)
}
}
+/*
+ * Take an address, add the meta data size as well as the array of
+ * array subbuffer indexes, then align it to a subbuffer size.
+ *
+ * This is used to help find the next per cpu subbuffer within a mapped range.
+ */
+static unsigned long
+rb_range_align_subbuf(unsigned long addr, int subbuf_size, int nr_subbufs)
+{
+ addr += sizeof(struct ring_buffer_meta) +
+ sizeof(int) * nr_subbufs;
+ return ALIGN(addr, subbuf_size);
+}
+
+/*
+ * Return the ring_buffer_meta for a given @cpu.
+ */
+static void *rb_range_meta(struct trace_buffer *buffer, int nr_pages, int cpu)
+{
+ int subbuf_size = buffer->subbuf_size + BUF_PAGE_HDR_SIZE;
+ unsigned long ptr = buffer->range_addr_start;
+ struct ring_buffer_meta *meta;
+ int nr_subbufs;
+
+ if (!ptr)
+ return NULL;
+
+ /* When nr_pages passed in is zero, the first meta has already been initialized */
+ if (!nr_pages) {
+ meta = (struct ring_buffer_meta *)ptr;
+ nr_subbufs = meta->nr_subbufs;
+ } else {
+ meta = NULL;
+ /* Include the reader page */
+ nr_subbufs = nr_pages + 1;
+ }
+
+ /*
+ * The first chunk may not be subbuffer aligned, where as
+ * the rest of the chunks are.
+ */
+ if (cpu) {
+ ptr = rb_range_align_subbuf(ptr, subbuf_size, nr_subbufs);
+ ptr += subbuf_size * nr_subbufs;
+
+ /* We can use multiplication to find chunks greater than 1 */
+ if (cpu > 1) {
+ unsigned long size;
+ unsigned long p;
+
+ /* Save the beginning of this CPU chunk */
+ p = ptr;
+ ptr = rb_range_align_subbuf(ptr, subbuf_size, nr_subbufs);
+ ptr += subbuf_size * nr_subbufs;
+
+ /* Now all chunks after this are the same size */
+ size = ptr - p;
+ ptr += size * (cpu - 2);
+ }
+ }
+ return (void *)ptr;
+}
+
+/* Return the start of subbufs given the meta pointer */
+static void *rb_subbufs_from_meta(struct ring_buffer_meta *meta)
+{
+ int subbuf_size = meta->subbuf_size;
+ unsigned long ptr;
+
+ ptr = (unsigned long)meta;
+ ptr = rb_range_align_subbuf(ptr, subbuf_size, meta->nr_subbufs);
+
+ return (void *)ptr;
+}
+
+/*
+ * Return a specific sub-buffer for a given @cpu defined by @idx.
+ */
+static void *rb_range_buffer(struct ring_buffer_per_cpu *cpu_buffer, int idx)
+{
+ struct ring_buffer_meta *meta;
+ unsigned long ptr;
+ int subbuf_size;
+
+ meta = rb_range_meta(cpu_buffer->buffer, 0, cpu_buffer->cpu);
+ if (!meta)
+ return NULL;
+
+ if (WARN_ON_ONCE(idx >= meta->nr_subbufs))
+ return NULL;
+
+ subbuf_size = meta->subbuf_size;
+
+ /* Map this buffer to the order that's in meta->buffers[] */
+ idx = meta->buffers[idx];
+
+ ptr = (unsigned long)rb_subbufs_from_meta(meta);
+
+ ptr += subbuf_size * idx;
+ if (ptr + subbuf_size > cpu_buffer->buffer->range_addr_end)
+ return NULL;
+
+ return (void *)ptr;
+}
+
+/*
+ * See if the existing memory contains valid ring buffer data.
+ * As the previous kernel must be the same as this kernel, all
+ * the calculations (size of buffers and number of buffers)
+ * must be the same.
+ */
+static bool rb_meta_valid(struct ring_buffer_meta *meta, int cpu,
+ struct trace_buffer *buffer, int nr_pages)
+{
+ int subbuf_size = PAGE_SIZE;
+ struct buffer_data_page *subbuf;
+ unsigned long buffers_start;
+ unsigned long buffers_end;
+ int i;
+
+ /* Check the meta magic and meta struct size */
+ if (meta->magic != RING_BUFFER_META_MAGIC ||
+ meta->struct_size != sizeof(*meta)) {
+ pr_info("Ring buffer boot meta[%d] mismatch of magic or struct size\n", cpu);
+ return false;
+ }
+
+ /* The subbuffer's size and number of subbuffers must match */
+ if (meta->subbuf_size != subbuf_size ||
+ meta->nr_subbufs != nr_pages + 1) {
+ pr_info("Ring buffer boot meta [%d] mismatch of subbuf_size/nr_pages\n", cpu);
+ return false;
+ }
+
+ buffers_start = meta->first_buffer;
+ buffers_end = meta->first_buffer + (subbuf_size * meta->nr_subbufs);
+
+ /* Is the head and commit buffers within the range of buffers? */
+ if (meta->head_buffer < buffers_start ||
+ meta->head_buffer >= buffers_end) {
+ pr_info("Ring buffer boot meta [%d] head buffer out of range\n", cpu);
+ return false;
+ }
+
+ if (meta->commit_buffer < buffers_start ||
+ meta->commit_buffer >= buffers_end) {
+ pr_info("Ring buffer boot meta [%d] commit buffer out of range\n", cpu);
+ return false;
+ }
+
+ subbuf = rb_subbufs_from_meta(meta);
+
+ /* Is the meta buffers and the subbufs themselves have correct data? */
+ for (i = 0; i < meta->nr_subbufs; i++) {
+ if (meta->buffers[i] < 0 ||
+ meta->buffers[i] >= meta->nr_subbufs) {
+ pr_info("Ring buffer boot meta [%d] array out of range\n", cpu);
+ return false;
+ }
+
+ if ((unsigned)local_read(&subbuf->commit) > subbuf_size) {
+ pr_info("Ring buffer boot meta [%d] buffer invalid commit\n", cpu);
+ return false;
+ }
+
+ subbuf = (void *)subbuf + subbuf_size;
+ }
+
+ return true;
+}
+
+static int rb_meta_subbuf_idx(struct ring_buffer_meta *meta, void *subbuf);
+
+static int rb_read_data_buffer(struct buffer_data_page *dpage, int tail, int cpu,
+ unsigned long long *timestamp, u64 *delta_ptr)
+{
+ struct ring_buffer_event *event;
+ u64 ts, delta;
+ int events = 0;
+ int e;
+
+ *delta_ptr = 0;
+ *timestamp = 0;
+
+ ts = dpage->time_stamp;
+
+ for (e = 0; e < tail; e += rb_event_length(event)) {
+
+ event = (struct ring_buffer_event *)(dpage->data + e);
+
+ switch (event->type_len) {
+
+ case RINGBUF_TYPE_TIME_EXTEND:
+ delta = rb_event_time_stamp(event);
+ ts += delta;
+ break;
+
+ case RINGBUF_TYPE_TIME_STAMP:
+ delta = rb_event_time_stamp(event);
+ delta = rb_fix_abs_ts(delta, ts);
+ if (delta < ts) {
+ *delta_ptr = delta;
+ *timestamp = ts;
+ return -1;
+ }
+ ts = delta;
+ break;
+
+ case RINGBUF_TYPE_PADDING:
+ if (event->time_delta == 1)
+ break;
+ fallthrough;
+ case RINGBUF_TYPE_DATA:
+ events++;
+ ts += event->time_delta;
+ break;
+
+ default:
+ return -1;
+ }
+ }
+ *timestamp = ts;
+ return events;
+}
+
+static int rb_validate_buffer(struct buffer_data_page *dpage, int cpu)
+{
+ unsigned long long ts;
+ u64 delta;
+ int tail;
+
+ tail = local_read(&dpage->commit);
+ return rb_read_data_buffer(dpage, tail, cpu, &ts, &delta);
+}
+
+/* If the meta data has been validated, now validate the events */
+static void rb_meta_validate_events(struct ring_buffer_per_cpu *cpu_buffer)
+{
+ struct ring_buffer_meta *meta = cpu_buffer->ring_meta;
+ struct buffer_page *head_page;
+ unsigned long entry_bytes = 0;
+ unsigned long entries = 0;
+ int ret;
+ int i;
+
+ if (!meta || !meta->head_buffer)
+ return;
+
+ /* Do the reader page first */
+ ret = rb_validate_buffer(cpu_buffer->reader_page->page, cpu_buffer->cpu);
+ if (ret < 0) {
+ pr_info("Ring buffer reader page is invalid\n");
+ goto invalid;
+ }
+ entries += ret;
+ entry_bytes += local_read(&cpu_buffer->reader_page->page->commit);
+ local_set(&cpu_buffer->reader_page->entries, ret);
+
+ head_page = cpu_buffer->head_page;
+
+ /* If both the head and commit are on the reader_page then we are done. */
+ if (head_page == cpu_buffer->reader_page &&
+ head_page == cpu_buffer->commit_page)
+ goto done;
+
+ /* Iterate until finding the commit page */
+ for (i = 0; i < meta->nr_subbufs + 1; i++, rb_inc_page(&head_page)) {
+
+ /* Reader page has already been done */
+ if (head_page == cpu_buffer->reader_page)
+ continue;
+
+ ret = rb_validate_buffer(head_page->page, cpu_buffer->cpu);
+ if (ret < 0) {
+ pr_info("Ring buffer meta [%d] invalid buffer page\n",
+ cpu_buffer->cpu);
+ goto invalid;
+ }
+ entries += ret;
+ entry_bytes += local_read(&head_page->page->commit);
+ local_set(&cpu_buffer->head_page->entries, ret);
+
+ if (head_page == cpu_buffer->commit_page)
+ break;
+ }
+
+ if (head_page != cpu_buffer->commit_page) {
+ pr_info("Ring buffer meta [%d] commit page not found\n",
+ cpu_buffer->cpu);
+ goto invalid;
+ }
+ done:
+ local_set(&cpu_buffer->entries, entries);
+ local_set(&cpu_buffer->entries_bytes, entry_bytes);
+
+ pr_info("Ring buffer meta [%d] is from previous boot!\n", cpu_buffer->cpu);
+ return;
+
+ invalid:
+ /* The content of the buffers are invalid, reset the meta data */
+ meta->head_buffer = 0;
+ meta->commit_buffer = 0;
+
+ /* Reset the reader page */
+ local_set(&cpu_buffer->reader_page->entries, 0);
+ local_set(&cpu_buffer->reader_page->page->commit, 0);
+
+ /* Reset all the subbuffers */
+ for (i = 0; i < meta->nr_subbufs - 1; i++, rb_inc_page(&head_page)) {
+ local_set(&head_page->entries, 0);
+ local_set(&head_page->page->commit, 0);
+ }
+}
+
+/* Used to calculate data delta */
+static char rb_data_ptr[] = "";
+
+#define THIS_TEXT_PTR ((unsigned long)rb_meta_init_text_addr)
+#define THIS_DATA_PTR ((unsigned long)rb_data_ptr)
+
+static void rb_meta_init_text_addr(struct ring_buffer_meta *meta)
+{
+ meta->text_addr = THIS_TEXT_PTR;
+ meta->data_addr = THIS_DATA_PTR;
+}
+
+static void rb_range_meta_init(struct trace_buffer *buffer, int nr_pages)
+{
+ struct ring_buffer_meta *meta;
+ unsigned long delta;
+ void *subbuf;
+ int cpu;
+ int i;
+
+ for (cpu = 0; cpu < nr_cpu_ids; cpu++) {
+ void *next_meta;
+
+ meta = rb_range_meta(buffer, nr_pages, cpu);
+
+ if (rb_meta_valid(meta, cpu, buffer, nr_pages)) {
+ /* Make the mappings match the current address */
+ subbuf = rb_subbufs_from_meta(meta);
+ delta = (unsigned long)subbuf - meta->first_buffer;
+ meta->first_buffer += delta;
+ meta->head_buffer += delta;
+ meta->commit_buffer += delta;
+ buffer->last_text_delta = THIS_TEXT_PTR - meta->text_addr;
+ buffer->last_data_delta = THIS_DATA_PTR - meta->data_addr;
+ continue;
+ }
+
+ if (cpu < nr_cpu_ids - 1)
+ next_meta = rb_range_meta(buffer, nr_pages, cpu + 1);
+ else
+ next_meta = (void *)buffer->range_addr_end;
+
+ memset(meta, 0, next_meta - (void *)meta);
+
+ meta->magic = RING_BUFFER_META_MAGIC;
+ meta->struct_size = sizeof(*meta);
+
+ meta->nr_subbufs = nr_pages + 1;
+ meta->subbuf_size = PAGE_SIZE;
+
+ subbuf = rb_subbufs_from_meta(meta);
+
+ meta->first_buffer = (unsigned long)subbuf;
+ rb_meta_init_text_addr(meta);
+
+ /*
+ * The buffers[] array holds the order of the sub-buffers
+ * that are after the meta data. The sub-buffers may
+ * be swapped out when read and inserted into a different
+ * location of the ring buffer. Although their addresses
+ * remain the same, the buffers[] array contains the
+ * index into the sub-buffers holding their actual order.
+ */
+ for (i = 0; i < meta->nr_subbufs; i++) {
+ meta->buffers[i] = i;
+ rb_init_page(subbuf);
+ subbuf += meta->subbuf_size;
+ }
+ }
+}
+
+static void *rbm_start(struct seq_file *m, loff_t *pos)
+{
+ struct ring_buffer_per_cpu *cpu_buffer = m->private;
+ struct ring_buffer_meta *meta = cpu_buffer->ring_meta;
+ unsigned long val;
+
+ if (!meta)
+ return NULL;
+
+ if (*pos > meta->nr_subbufs)
+ return NULL;
+
+ val = *pos;
+ val++;
+
+ return (void *)val;
+}
+
+static void *rbm_next(struct seq_file *m, void *v, loff_t *pos)
+{
+ (*pos)++;
+
+ return rbm_start(m, pos);
+}
+
+static int rbm_show(struct seq_file *m, void *v)
+{
+ struct ring_buffer_per_cpu *cpu_buffer = m->private;
+ struct ring_buffer_meta *meta = cpu_buffer->ring_meta;
+ unsigned long val = (unsigned long)v;
+
+ if (val == 1) {
+ seq_printf(m, "head_buffer: %d\n",
+ rb_meta_subbuf_idx(meta, (void *)meta->head_buffer));
+ seq_printf(m, "commit_buffer: %d\n",
+ rb_meta_subbuf_idx(meta, (void *)meta->commit_buffer));
+ seq_printf(m, "subbuf_size: %d\n", meta->subbuf_size);
+ seq_printf(m, "nr_subbufs: %d\n", meta->nr_subbufs);
+ return 0;
+ }
+
+ val -= 2;
+ seq_printf(m, "buffer[%ld]: %d\n", val, meta->buffers[val]);
+
+ return 0;
+}
+
+static void rbm_stop(struct seq_file *m, void *p)
+{
+}
+
+static const struct seq_operations rb_meta_seq_ops = {
+ .start = rbm_start,
+ .next = rbm_next,
+ .show = rbm_show,
+ .stop = rbm_stop,
+};
+
+int ring_buffer_meta_seq_init(struct file *file, struct trace_buffer *buffer, int cpu)
+{
+ struct seq_file *m;
+ int ret;
+
+ ret = seq_open(file, &rb_meta_seq_ops);
+ if (ret)
+ return ret;
+
+ m = file->private_data;
+ m->private = buffer->buffers[cpu];
+
+ return 0;
+}
+
+/* Map the buffer_pages to the previous head and commit pages */
+static void rb_meta_buffer_update(struct ring_buffer_per_cpu *cpu_buffer,
+ struct buffer_page *bpage)
+{
+ struct ring_buffer_meta *meta = cpu_buffer->ring_meta;
+
+ if (meta->head_buffer == (unsigned long)bpage->page)
+ cpu_buffer->head_page = bpage;
+
+ if (meta->commit_buffer == (unsigned long)bpage->page) {
+ cpu_buffer->commit_page = bpage;
+ cpu_buffer->tail_page = bpage;
+ }
+}
+
static int __rb_allocate_pages(struct ring_buffer_per_cpu *cpu_buffer,
long nr_pages, struct list_head *pages)
{
+ struct trace_buffer *buffer = cpu_buffer->buffer;
+ struct ring_buffer_meta *meta = NULL;
struct buffer_page *bpage, *tmp;
bool user_thread = current->mm != NULL;
gfp_t mflags;
@@ -1515,6 +2023,10 @@ static int __rb_allocate_pages(struct ring_buffer_per_cpu *cpu_buffer,
*/
if (user_thread)
set_current_oom_origin();
+
+ if (buffer->range_addr_start)
+ meta = rb_range_meta(buffer, nr_pages, cpu_buffer->cpu);
+
for (i = 0; i < nr_pages; i++) {
struct page *page;
@@ -1525,16 +2037,32 @@ static int __rb_allocate_pages(struct ring_buffer_per_cpu *cpu_buffer,
rb_check_bpage(cpu_buffer, bpage);
- list_add(&bpage->list, pages);
-
- page = alloc_pages_node(cpu_to_node(cpu_buffer->cpu),
- mflags | __GFP_COMP | __GFP_ZERO,
- cpu_buffer->buffer->subbuf_order);
- if (!page)
- goto free_pages;
- bpage->page = page_address(page);
+ /*
+ * Append the pages as for mapped buffers we want to keep
+ * the order
+ */
+ list_add_tail(&bpage->list, pages);
+
+ if (meta) {
+ /* A range was given. Use that for the buffer page */
+ bpage->page = rb_range_buffer(cpu_buffer, i + 1);
+ if (!bpage->page)
+ goto free_pages;
+ /* If this is valid from a previous boot */
+ if (meta->head_buffer)
+ rb_meta_buffer_update(cpu_buffer, bpage);
+ bpage->range = 1;
+ bpage->id = i + 1;
+ } else {
+ page = alloc_pages_node(cpu_to_node(cpu_buffer->cpu),
+ mflags | __GFP_COMP | __GFP_ZERO,
+ cpu_buffer->buffer->subbuf_order);
+ if (!page)
+ goto free_pages;
+ bpage->page = page_address(page);
+ rb_init_page(bpage->page);
+ }
bpage->order = cpu_buffer->buffer->subbuf_order;
- rb_init_page(bpage->page);
if (user_thread && fatal_signal_pending(current))
goto free_pages;
@@ -1584,6 +2112,7 @@ static struct ring_buffer_per_cpu *
rb_allocate_cpu_buffer(struct trace_buffer *buffer, long nr_pages, int cpu)
{
struct ring_buffer_per_cpu *cpu_buffer;
+ struct ring_buffer_meta *meta;
struct buffer_page *bpage;
struct page *page;
int ret;
@@ -1614,12 +2143,28 @@ rb_allocate_cpu_buffer(struct trace_buffer *buffer, long nr_pages, int cpu)
cpu_buffer->reader_page = bpage;
- page = alloc_pages_node(cpu_to_node(cpu), GFP_KERNEL | __GFP_COMP | __GFP_ZERO,
- cpu_buffer->buffer->subbuf_order);
- if (!page)
- goto fail_free_reader;
- bpage->page = page_address(page);
- rb_init_page(bpage->page);
+ if (buffer->range_addr_start) {
+ /*
+ * Range mapped buffers have the same restrictions as memory
+ * mapped ones do.
+ */
+ cpu_buffer->mapped = 1;
+ cpu_buffer->ring_meta = rb_range_meta(buffer, nr_pages, cpu);
+ bpage->page = rb_range_buffer(cpu_buffer, 0);
+ if (!bpage->page)
+ goto fail_free_reader;
+ if (cpu_buffer->ring_meta->head_buffer)
+ rb_meta_buffer_update(cpu_buffer, bpage);
+ bpage->range = 1;
+ } else {
+ page = alloc_pages_node(cpu_to_node(cpu),
+ GFP_KERNEL | __GFP_COMP | __GFP_ZERO,
+ cpu_buffer->buffer->subbuf_order);
+ if (!page)
+ goto fail_free_reader;
+ bpage->page = page_address(page);
+ rb_init_page(bpage->page);
+ }
INIT_LIST_HEAD(&cpu_buffer->reader_page->list);
INIT_LIST_HEAD(&cpu_buffer->new_pages);
@@ -1628,11 +2173,35 @@ rb_allocate_cpu_buffer(struct trace_buffer *buffer, long nr_pages, int cpu)
if (ret < 0)
goto fail_free_reader;
- cpu_buffer->head_page
- = list_entry(cpu_buffer->pages, struct buffer_page, list);
- cpu_buffer->tail_page = cpu_buffer->commit_page = cpu_buffer->head_page;
+ rb_meta_validate_events(cpu_buffer);
+
+ /* If the boot meta was valid then this has already been updated */
+ meta = cpu_buffer->ring_meta;
+ if (!meta || !meta->head_buffer ||
+ !cpu_buffer->head_page || !cpu_buffer->commit_page || !cpu_buffer->tail_page) {
+ if (meta && meta->head_buffer &&
+ (cpu_buffer->head_page || cpu_buffer->commit_page || cpu_buffer->tail_page)) {
+ pr_warn("Ring buffer meta buffers not all mapped\n");
+ if (!cpu_buffer->head_page)
+ pr_warn(" Missing head_page\n");
+ if (!cpu_buffer->commit_page)
+ pr_warn(" Missing commit_page\n");
+ if (!cpu_buffer->tail_page)
+ pr_warn(" Missing tail_page\n");
+ }
- rb_head_page_activate(cpu_buffer);
+ cpu_buffer->head_page
+ = list_entry(cpu_buffer->pages, struct buffer_page, list);
+ cpu_buffer->tail_page = cpu_buffer->commit_page = cpu_buffer->head_page;
+
+ rb_head_page_activate(cpu_buffer);
+
+ if (cpu_buffer->ring_meta)
+ meta->commit_buffer = meta->head_buffer;
+ } else {
+ /* The valid meta buffer still needs to activate the head page */
+ rb_head_page_activate(cpu_buffer);
+ }
return cpu_buffer;
@@ -1669,22 +2238,14 @@ static void rb_free_cpu_buffer(struct ring_buffer_per_cpu *cpu_buffer)
kfree(cpu_buffer);
}
-/**
- * __ring_buffer_alloc - allocate a new ring_buffer
- * @size: the size in bytes per cpu that is needed.
- * @flags: attributes to set for the ring buffer.
- * @key: ring buffer reader_lock_key.
- *
- * Currently the only flag that is available is the RB_FL_OVERWRITE
- * flag. This flag means that the buffer will overwrite old data
- * when the buffer wraps. If this flag is not set, the buffer will
- * drop data when the tail hits the head.
- */
-struct trace_buffer *__ring_buffer_alloc(unsigned long size, unsigned flags,
- struct lock_class_key *key)
+static struct trace_buffer *alloc_buffer(unsigned long size, unsigned flags,
+ int order, unsigned long start,
+ unsigned long end,
+ struct lock_class_key *key)
{
struct trace_buffer *buffer;
long nr_pages;
+ int subbuf_size;
int bsize;
int cpu;
int ret;
@@ -1698,14 +2259,13 @@ struct trace_buffer *__ring_buffer_alloc(unsigned long size, unsigned flags,
if (!zalloc_cpumask_var(&buffer->cpumask, GFP_KERNEL))
goto fail_free_buffer;
- /* Default buffer page size - one system page */
- buffer->subbuf_order = 0;
- buffer->subbuf_size = PAGE_SIZE - BUF_PAGE_HDR_SIZE;
+ buffer->subbuf_order = order;
+ subbuf_size = (PAGE_SIZE << order);
+ buffer->subbuf_size = subbuf_size - BUF_PAGE_HDR_SIZE;
/* Max payload is buffer page size - header (8bytes) */
buffer->max_data_size = buffer->subbuf_size - (sizeof(u32) * 2);
- nr_pages = DIV_ROUND_UP(size, buffer->subbuf_size);
buffer->flags = flags;
buffer->clock = trace_clock_local;
buffer->reader_lock_key = key;
@@ -1713,10 +2273,6 @@ struct trace_buffer *__ring_buffer_alloc(unsigned long size, unsigned flags,
init_irq_work(&buffer->irq_work.work, rb_wake_up_waiters);
init_waitqueue_head(&buffer->irq_work.waiters);
- /* need at least two pages */
- if (nr_pages < 2)
- nr_pages = 2;
-
buffer->cpus = nr_cpu_ids;
bsize = sizeof(void *) * nr_cpu_ids;
@@ -1725,6 +2281,56 @@ struct trace_buffer *__ring_buffer_alloc(unsigned long size, unsigned flags,
if (!buffer->buffers)
goto fail_free_cpumask;
+ /* If start/end are specified, then that overrides size */
+ if (start && end) {
+ unsigned long ptr;
+ int n;
+
+ size = end - start;
+ size = size / nr_cpu_ids;
+
+ /*
+ * The number of sub-buffers (nr_pages) is determined by the
+ * total size allocated minus the meta data size.
+ * Then that is divided by the number of per CPU buffers
+ * needed, plus account for the integer array index that
+ * will be appended to the meta data.
+ */
+ nr_pages = (size - sizeof(struct ring_buffer_meta)) /
+ (subbuf_size + sizeof(int));
+ /* Need at least two pages plus the reader page */
+ if (nr_pages < 3)
+ goto fail_free_buffers;
+
+ again:
+ /* Make sure that the size fits aligned */
+ for (n = 0, ptr = start; n < nr_cpu_ids; n++) {
+ ptr += sizeof(struct ring_buffer_meta) +
+ sizeof(int) * nr_pages;
+ ptr = ALIGN(ptr, subbuf_size);
+ ptr += subbuf_size * nr_pages;
+ }
+ if (ptr > end) {
+ if (nr_pages <= 3)
+ goto fail_free_buffers;
+ nr_pages--;
+ goto again;
+ }
+
+ /* nr_pages should not count the reader page */
+ nr_pages--;
+ buffer->range_addr_start = start;
+ buffer->range_addr_end = end;
+
+ rb_range_meta_init(buffer, nr_pages);
+ } else {
+
+ /* need at least two pages */
+ nr_pages = DIV_ROUND_UP(size, buffer->subbuf_size);
+ if (nr_pages < 2)
+ nr_pages = 2;
+ }
+
cpu = raw_smp_processor_id();
cpumask_set_cpu(cpu, buffer->cpumask);
buffer->buffers[cpu] = rb_allocate_cpu_buffer(buffer, nr_pages, cpu);
@@ -1753,9 +2359,73 @@ struct trace_buffer *__ring_buffer_alloc(unsigned long size, unsigned flags,
kfree(buffer);
return NULL;
}
+
+/**
+ * __ring_buffer_alloc - allocate a new ring_buffer
+ * @size: the size in bytes per cpu that is needed.
+ * @flags: attributes to set for the ring buffer.
+ * @key: ring buffer reader_lock_key.
+ *
+ * Currently the only flag that is available is the RB_FL_OVERWRITE
+ * flag. This flag means that the buffer will overwrite old data
+ * when the buffer wraps. If this flag is not set, the buffer will
+ * drop data when the tail hits the head.
+ */
+struct trace_buffer *__ring_buffer_alloc(unsigned long size, unsigned flags,
+ struct lock_class_key *key)
+{
+ /* Default buffer page size - one system page */
+ return alloc_buffer(size, flags, 0, 0, 0,key);
+
+}
EXPORT_SYMBOL_GPL(__ring_buffer_alloc);
/**
+ * __ring_buffer_alloc_range - allocate a new ring_buffer from existing memory
+ * @size: the size in bytes per cpu that is needed.
+ * @flags: attributes to set for the ring buffer.
+ * @start: start of allocated range
+ * @range_size: size of allocated range
+ * @order: sub-buffer order
+ * @key: ring buffer reader_lock_key.
+ *
+ * Currently the only flag that is available is the RB_FL_OVERWRITE
+ * flag. This flag means that the buffer will overwrite old data
+ * when the buffer wraps. If this flag is not set, the buffer will
+ * drop data when the tail hits the head.
+ */
+struct trace_buffer *__ring_buffer_alloc_range(unsigned long size, unsigned flags,
+ int order, unsigned long start,
+ unsigned long range_size,
+ struct lock_class_key *key)
+{
+ return alloc_buffer(size, flags, order, start, start + range_size, key);
+}
+
+/**
+ * ring_buffer_last_boot_delta - return the delta offset from last boot
+ * @buffer: The buffer to return the delta from
+ * @text: Return text delta
+ * @data: Return data delta
+ *
+ * Returns: The true if the delta is non zero
+ */
+bool ring_buffer_last_boot_delta(struct trace_buffer *buffer, long *text,
+ long *data)
+{
+ if (!buffer)
+ return false;
+
+ if (!buffer->last_text_delta)
+ return false;
+
+ *text = buffer->last_text_delta;
+ *data = buffer->last_data_delta;
+
+ return true;
+}
+
+/**
* ring_buffer_free - free a ring buffer.
* @buffer: the buffer to free.
*/
@@ -2364,6 +3034,52 @@ static void rb_inc_iter(struct ring_buffer_iter *iter)
iter->next_event = 0;
}
+/* Return the index into the sub-buffers for a given sub-buffer */
+static int rb_meta_subbuf_idx(struct ring_buffer_meta *meta, void *subbuf)
+{
+ void *subbuf_array;
+
+ subbuf_array = (void *)meta + sizeof(int) * meta->nr_subbufs;
+ subbuf_array = (void *)ALIGN((unsigned long)subbuf_array, meta->subbuf_size);
+ return (subbuf - subbuf_array) / meta->subbuf_size;
+}
+
+static void rb_update_meta_head(struct ring_buffer_per_cpu *cpu_buffer,
+ struct buffer_page *next_page)
+{
+ struct ring_buffer_meta *meta = cpu_buffer->ring_meta;
+ unsigned long old_head = (unsigned long)next_page->page;
+ unsigned long new_head;
+
+ rb_inc_page(&next_page);
+ new_head = (unsigned long)next_page->page;
+
+ /*
+ * Only move it forward once, if something else came in and
+ * moved it forward, then we don't want to touch it.
+ */
+ (void)cmpxchg(&meta->head_buffer, old_head, new_head);
+}
+
+static void rb_update_meta_reader(struct ring_buffer_per_cpu *cpu_buffer,
+ struct buffer_page *reader)
+{
+ struct ring_buffer_meta *meta = cpu_buffer->ring_meta;
+ void *old_reader = cpu_buffer->reader_page->page;
+ void *new_reader = reader->page;
+ int id;
+
+ id = reader->id;
+ cpu_buffer->reader_page->id = id;
+ reader->id = 0;
+
+ meta->buffers[0] = rb_meta_subbuf_idx(meta, new_reader);
+ meta->buffers[id] = rb_meta_subbuf_idx(meta, old_reader);
+
+ /* The head pointer is the one after the reader */
+ rb_update_meta_head(cpu_buffer, reader);
+}
+
/*
* rb_handle_head_page - writer hit the head page
*
@@ -2413,6 +3129,8 @@ rb_handle_head_page(struct ring_buffer_per_cpu *cpu_buffer,
local_sub(rb_page_commit(next_page), &cpu_buffer->entries_bytes);
local_inc(&cpu_buffer->pages_lost);
+ if (cpu_buffer->ring_meta)
+ rb_update_meta_head(cpu_buffer, next_page);
/*
* The entries will be zeroed out when we move the
* tail page.
@@ -2974,6 +3692,10 @@ rb_set_commit_to_write(struct ring_buffer_per_cpu *cpu_buffer)
local_set(&cpu_buffer->commit_page->page->commit,
rb_page_write(cpu_buffer->commit_page));
rb_inc_page(&cpu_buffer->commit_page);
+ if (cpu_buffer->ring_meta) {
+ struct ring_buffer_meta *meta = cpu_buffer->ring_meta;
+ meta->commit_buffer = (unsigned long)cpu_buffer->commit_page->page;
+ }
/* add barrier to keep gcc from optimizing too much */
barrier();
}
@@ -3420,11 +4142,10 @@ static void check_buffer(struct ring_buffer_per_cpu *cpu_buffer,
struct rb_event_info *info,
unsigned long tail)
{
- struct ring_buffer_event *event;
struct buffer_data_page *bpage;
u64 ts, delta;
bool full = false;
- int e;
+ int ret;
bpage = info->tail_page->page;
@@ -3450,39 +4171,12 @@ static void check_buffer(struct ring_buffer_per_cpu *cpu_buffer,
if (atomic_inc_return(this_cpu_ptr(&checking)) != 1)
goto out;
- ts = bpage->time_stamp;
-
- for (e = 0; e < tail; e += rb_event_length(event)) {
-
- event = (struct ring_buffer_event *)(bpage->data + e);
-
- switch (event->type_len) {
-
- case RINGBUF_TYPE_TIME_EXTEND:
- delta = rb_event_time_stamp(event);
- ts += delta;
- break;
-
- case RINGBUF_TYPE_TIME_STAMP:
- delta = rb_event_time_stamp(event);
- delta = rb_fix_abs_ts(delta, ts);
- if (delta < ts) {
- buffer_warn_return("[CPU: %d]ABSOLUTE TIME WENT BACKWARDS: last ts: %lld absolute ts: %lld\n",
- cpu_buffer->cpu, ts, delta);
- }
- ts = delta;
- break;
-
- case RINGBUF_TYPE_PADDING:
- if (event->time_delta == 1)
- break;
- fallthrough;
- case RINGBUF_TYPE_DATA:
- ts += event->time_delta;
- break;
-
- default:
- RB_WARN_ON(cpu_buffer, 1);
+ ret = rb_read_data_buffer(bpage, tail, cpu_buffer->cpu, &ts, &delta);
+ if (ret < 0) {
+ if (delta < ts) {
+ buffer_warn_return("[CPU: %d]ABSOLUTE TIME WENT BACKWARDS: last ts: %lld absolute ts: %lld\n",
+ cpu_buffer->cpu, ts, delta);
+ goto out;
}
}
if ((full && ts > info->ts) ||
@@ -4591,6 +5285,9 @@ rb_get_reader_page(struct ring_buffer_per_cpu *cpu_buffer)
if (!ret)
goto spin;
+ if (cpu_buffer->ring_meta)
+ rb_update_meta_reader(cpu_buffer, reader);
+
/*
* Yay! We succeeded in replacing the page.
*
@@ -5212,6 +5909,9 @@ static void rb_update_meta_page(struct ring_buffer_per_cpu *cpu_buffer)
{
struct trace_buffer_meta *meta = cpu_buffer->meta_page;
+ if (!meta)
+ return;
+
meta->reader.read = cpu_buffer->reader_page->read;
meta->reader.id = cpu_buffer->reader_page->id;
meta->reader.lost_events = cpu_buffer->lost_events;
@@ -5268,11 +5968,16 @@ rb_reset_cpu(struct ring_buffer_per_cpu *cpu_buffer)
cpu_buffer->lost_events = 0;
cpu_buffer->last_overrun = 0;
- if (cpu_buffer->mapped)
- rb_update_meta_page(cpu_buffer);
-
rb_head_page_activate(cpu_buffer);
cpu_buffer->pages_removed = 0;
+
+ if (cpu_buffer->mapped) {
+ rb_update_meta_page(cpu_buffer);
+ if (cpu_buffer->ring_meta) {
+ struct ring_buffer_meta *meta = cpu_buffer->ring_meta;
+ meta->commit_buffer = meta->head_buffer;
+ }
+ }
}
/* Must have disabled the cpu buffer then done a synchronize_rcu */
@@ -5303,6 +6008,7 @@ static void reset_disabled_cpu_buffer(struct ring_buffer_per_cpu *cpu_buffer)
void ring_buffer_reset_cpu(struct trace_buffer *buffer, int cpu)
{
struct ring_buffer_per_cpu *cpu_buffer = buffer->buffers[cpu];
+ struct ring_buffer_meta *meta;
if (!cpumask_test_cpu(cpu, buffer->cpumask))
return;
@@ -5321,6 +6027,11 @@ void ring_buffer_reset_cpu(struct trace_buffer *buffer, int cpu)
atomic_dec(&cpu_buffer->record_disabled);
atomic_dec(&cpu_buffer->resize_disabled);
+ /* Make sure persistent meta now uses this buffer's addresses */
+ meta = rb_range_meta(buffer, 0, cpu_buffer->cpu);
+ if (meta)
+ rb_meta_init_text_addr(meta);
+
mutex_unlock(&buffer->mutex);
}
EXPORT_SYMBOL_GPL(ring_buffer_reset_cpu);
@@ -5335,6 +6046,7 @@ EXPORT_SYMBOL_GPL(ring_buffer_reset_cpu);
void ring_buffer_reset_online_cpus(struct trace_buffer *buffer)
{
struct ring_buffer_per_cpu *cpu_buffer;
+ struct ring_buffer_meta *meta;
int cpu;
/* prevent another thread from changing buffer sizes */
@@ -5362,6 +6074,11 @@ void ring_buffer_reset_online_cpus(struct trace_buffer *buffer)
reset_disabled_cpu_buffer(cpu_buffer);
+ /* Make sure persistent meta now uses this buffer's addresses */
+ meta = rb_range_meta(buffer, 0, cpu_buffer->cpu);
+ if (meta)
+ rb_meta_init_text_addr(meta);
+
atomic_dec(&cpu_buffer->record_disabled);
atomic_sub(RESET_BIT, &cpu_buffer->resize_disabled);
}
@@ -6135,10 +6852,10 @@ static void rb_setup_ids_meta_page(struct ring_buffer_per_cpu *cpu_buffer,
/* install subbuf ID to kern VA translation */
cpu_buffer->subbuf_ids = subbuf_ids;
- meta->meta_page_size = PAGE_SIZE;
meta->meta_struct_len = sizeof(*meta);
meta->nr_subbufs = nr_subbufs;
meta->subbuf_size = cpu_buffer->buffer->subbuf_size + BUF_PAGE_HDR_SIZE;
+ meta->meta_page_size = meta->subbuf_size;
rb_update_meta_page(cpu_buffer);
}
@@ -6155,7 +6872,7 @@ rb_get_mapped_buffer(struct trace_buffer *buffer, int cpu)
mutex_lock(&cpu_buffer->mapping_lock);
- if (!cpu_buffer->mapped) {
+ if (!cpu_buffer->user_mapped) {
mutex_unlock(&cpu_buffer->mapping_lock);
return ERR_PTR(-ENODEV);
}
@@ -6179,19 +6896,26 @@ static int __rb_inc_dec_mapped(struct ring_buffer_per_cpu *cpu_buffer,
lockdep_assert_held(&cpu_buffer->mapping_lock);
+ /* mapped is always greater or equal to user_mapped */
+ if (WARN_ON(cpu_buffer->mapped < cpu_buffer->user_mapped))
+ return -EINVAL;
+
if (inc && cpu_buffer->mapped == UINT_MAX)
return -EBUSY;
- if (WARN_ON(!inc && cpu_buffer->mapped == 0))
+ if (WARN_ON(!inc && cpu_buffer->user_mapped == 0))
return -EINVAL;
mutex_lock(&cpu_buffer->buffer->mutex);
raw_spin_lock_irqsave(&cpu_buffer->reader_lock, flags);
- if (inc)
+ if (inc) {
+ cpu_buffer->user_mapped++;
cpu_buffer->mapped++;
- else
+ } else {
+ cpu_buffer->user_mapped--;
cpu_buffer->mapped--;
+ }
raw_spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags);
mutex_unlock(&cpu_buffer->buffer->mutex);
@@ -6214,7 +6938,7 @@ static int __rb_inc_dec_mapped(struct ring_buffer_per_cpu *cpu_buffer,
static int __rb_map_vma(struct ring_buffer_per_cpu *cpu_buffer,
struct vm_area_struct *vma)
{
- unsigned long nr_subbufs, nr_pages, vma_pages, pgoff = vma->vm_pgoff;
+ unsigned long nr_subbufs, nr_pages, nr_vma_pages, pgoff = vma->vm_pgoff;
unsigned int subbuf_pages, subbuf_order;
struct page **pages;
int p = 0, s = 0;
@@ -6225,6 +6949,12 @@ static int __rb_map_vma(struct ring_buffer_per_cpu *cpu_buffer,
!(vma->vm_flags & VM_MAYSHARE))
return -EPERM;
+ subbuf_order = cpu_buffer->buffer->subbuf_order;
+ subbuf_pages = 1 << subbuf_order;
+
+ if (subbuf_order && pgoff % subbuf_pages)
+ return -EINVAL;
+
/*
* Make sure the mapping cannot become writable later. Also tell the VM
* to not touch these pages (VM_DONTCOPY | VM_DONTEXPAND).
@@ -6234,37 +6964,38 @@ static int __rb_map_vma(struct ring_buffer_per_cpu *cpu_buffer,
lockdep_assert_held(&cpu_buffer->mapping_lock);
- subbuf_order = cpu_buffer->buffer->subbuf_order;
- subbuf_pages = 1 << subbuf_order;
-
nr_subbufs = cpu_buffer->nr_pages + 1; /* + reader-subbuf */
- nr_pages = ((nr_subbufs) << subbuf_order) - pgoff + 1; /* + meta-page */
+ nr_pages = ((nr_subbufs + 1) << subbuf_order) - pgoff; /* + meta-page */
- vma_pages = (vma->vm_end - vma->vm_start) >> PAGE_SHIFT;
- if (!vma_pages || vma_pages > nr_pages)
+ nr_vma_pages = vma_pages(vma);
+ if (!nr_vma_pages || nr_vma_pages > nr_pages)
return -EINVAL;
- nr_pages = vma_pages;
+ nr_pages = nr_vma_pages;
pages = kcalloc(nr_pages, sizeof(*pages), GFP_KERNEL);
if (!pages)
return -ENOMEM;
if (!pgoff) {
+ unsigned long meta_page_padding;
+
pages[p++] = virt_to_page(cpu_buffer->meta_page);
/*
- * TODO: Align sub-buffers on their size, once
- * vm_insert_pages() supports the zero-page.
+ * Pad with the zero-page to align the meta-page with the
+ * sub-buffers.
*/
- } else {
- /* Skip the meta-page */
- pgoff--;
+ meta_page_padding = subbuf_pages - 1;
+ while (meta_page_padding-- && p < nr_pages) {
+ unsigned long __maybe_unused zero_addr =
+ vma->vm_start + (PAGE_SIZE * p);
- if (pgoff % subbuf_pages) {
- err = -EINVAL;
- goto out;
+ pages[p++] = ZERO_PAGE(zero_addr);
}
+ } else {
+ /* Skip the meta-page */
+ pgoff -= subbuf_pages;
s += pgoff / subbuf_pages;
}
@@ -6316,7 +7047,7 @@ int ring_buffer_map(struct trace_buffer *buffer, int cpu,
mutex_lock(&cpu_buffer->mapping_lock);
- if (cpu_buffer->mapped) {
+ if (cpu_buffer->user_mapped) {
err = __rb_map_vma(cpu_buffer, vma);
if (!err)
err = __rb_inc_dec_mapped(cpu_buffer, true);
@@ -6347,12 +7078,15 @@ int ring_buffer_map(struct trace_buffer *buffer, int cpu,
*/
raw_spin_lock_irqsave(&cpu_buffer->reader_lock, flags);
rb_setup_ids_meta_page(cpu_buffer, subbuf_ids);
+
raw_spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags);
err = __rb_map_vma(cpu_buffer, vma);
if (!err) {
raw_spin_lock_irqsave(&cpu_buffer->reader_lock, flags);
- cpu_buffer->mapped = 1;
+ /* This is the first time it is mapped by user */
+ cpu_buffer->mapped++;
+ cpu_buffer->user_mapped = 1;
raw_spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags);
} else {
kfree(cpu_buffer->subbuf_ids);
@@ -6380,10 +7114,10 @@ int ring_buffer_unmap(struct trace_buffer *buffer, int cpu)
mutex_lock(&cpu_buffer->mapping_lock);
- if (!cpu_buffer->mapped) {
+ if (!cpu_buffer->user_mapped) {
err = -ENODEV;
goto out;
- } else if (cpu_buffer->mapped > 1) {
+ } else if (cpu_buffer->user_mapped > 1) {
__rb_inc_dec_mapped(cpu_buffer, false);
goto out;
}
@@ -6391,7 +7125,10 @@ int ring_buffer_unmap(struct trace_buffer *buffer, int cpu)
mutex_lock(&buffer->mutex);
raw_spin_lock_irqsave(&cpu_buffer->reader_lock, flags);
- cpu_buffer->mapped = 0;
+ /* This is the last user space mapping */
+ if (!WARN_ON_ONCE(cpu_buffer->mapped < cpu_buffer->user_mapped))
+ cpu_buffer->mapped--;
+ cpu_buffer->user_mapped = 0;
raw_spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags);
diff --git a/kernel/trace/rv/rv.c b/kernel/trace/rv/rv.c
index df0745a42a3f..dc819aec43e8 100644
--- a/kernel/trace/rv/rv.c
+++ b/kernel/trace/rv/rv.c
@@ -306,7 +306,6 @@ static ssize_t monitor_enable_write_data(struct file *filp, const char __user *u
static const struct file_operations interface_enable_fops = {
.open = simple_open,
- .llseek = no_llseek,
.write = monitor_enable_write_data,
.read = monitor_enable_read_data,
};
@@ -329,7 +328,6 @@ static ssize_t monitor_desc_read_data(struct file *filp, char __user *user_buf,
static const struct file_operations interface_desc_fops = {
.open = simple_open,
- .llseek = no_llseek,
.read = monitor_desc_read_data,
};
@@ -674,7 +672,6 @@ static ssize_t monitoring_on_write_data(struct file *filp, const char __user *us
static const struct file_operations monitoring_on_fops = {
.open = simple_open,
- .llseek = no_llseek,
.write = monitoring_on_write_data,
.read = monitoring_on_read_data,
};
diff --git a/kernel/trace/rv/rv_reactors.c b/kernel/trace/rv/rv_reactors.c
index 6aae106695b6..7b49cbe388d4 100644
--- a/kernel/trace/rv/rv_reactors.c
+++ b/kernel/trace/rv/rv_reactors.c
@@ -426,7 +426,6 @@ static ssize_t reacting_on_write_data(struct file *filp, const char __user *user
static const struct file_operations reacting_on_fops = {
.open = simple_open,
- .llseek = no_llseek,
.write = reacting_on_write_data,
.read = reacting_on_read_data,
};
diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c
index c3b2c7dfadef..c01375adc471 100644
--- a/kernel/trace/trace.c
+++ b/kernel/trace/trace.c
@@ -482,7 +482,7 @@ EXPORT_SYMBOL_GPL(unregister_ftrace_export);
TRACE_ITER_ANNOTATE | TRACE_ITER_CONTEXT_INFO | \
TRACE_ITER_RECORD_CMD | TRACE_ITER_OVERWRITE | \
TRACE_ITER_IRQ_INFO | TRACE_ITER_MARKERS | \
- TRACE_ITER_HASH_PTR)
+ TRACE_ITER_HASH_PTR | TRACE_ITER_TRACE_PRINTK)
/* trace_options that are only supported by global_trace */
#define TOP_LEVEL_TRACE_FLAGS (TRACE_ITER_PRINTK | \
@@ -490,7 +490,7 @@ EXPORT_SYMBOL_GPL(unregister_ftrace_export);
/* trace_flags that are default zero for instances */
#define ZEROED_TRACE_FLAGS \
- (TRACE_ITER_EVENT_FORK | TRACE_ITER_FUNC_FORK)
+ (TRACE_ITER_EVENT_FORK | TRACE_ITER_FUNC_FORK | TRACE_ITER_TRACE_PRINTK)
/*
* The global_trace is the descriptor that holds the top-level tracing
@@ -500,6 +500,29 @@ static struct trace_array global_trace = {
.trace_flags = TRACE_DEFAULT_FLAGS,
};
+static struct trace_array *printk_trace = &global_trace;
+
+static __always_inline bool printk_binsafe(struct trace_array *tr)
+{
+ /*
+ * The binary format of traceprintk can cause a crash if used
+ * by a buffer from another boot. Force the use of the
+ * non binary version of trace_printk if the trace_printk
+ * buffer is a boot mapped ring buffer.
+ */
+ return !(tr->flags & TRACE_ARRAY_FL_BOOT);
+}
+
+static void update_printk_trace(struct trace_array *tr)
+{
+ if (printk_trace == tr)
+ return;
+
+ printk_trace->trace_flags &= ~TRACE_ITER_TRACE_PRINTK;
+ printk_trace = tr;
+ tr->trace_flags |= TRACE_ITER_TRACE_PRINTK;
+}
+
void trace_set_ring_buffer_expanded(struct trace_array *tr)
{
if (!tr)
@@ -1117,7 +1140,7 @@ EXPORT_SYMBOL_GPL(__trace_array_puts);
*/
int __trace_puts(unsigned long ip, const char *str, int size)
{
- return __trace_array_puts(&global_trace, ip, str, size);
+ return __trace_array_puts(printk_trace, ip, str, size);
}
EXPORT_SYMBOL_GPL(__trace_puts);
@@ -1128,6 +1151,7 @@ EXPORT_SYMBOL_GPL(__trace_puts);
*/
int __trace_bputs(unsigned long ip, const char *str)
{
+ struct trace_array *tr = READ_ONCE(printk_trace);
struct ring_buffer_event *event;
struct trace_buffer *buffer;
struct bputs_entry *entry;
@@ -1135,14 +1159,17 @@ int __trace_bputs(unsigned long ip, const char *str)
int size = sizeof(struct bputs_entry);
int ret = 0;
- if (!(global_trace.trace_flags & TRACE_ITER_PRINTK))
+ if (!printk_binsafe(tr))
+ return __trace_puts(ip, str, strlen(str));
+
+ if (!(tr->trace_flags & TRACE_ITER_PRINTK))
return 0;
if (unlikely(tracing_selftest_running || tracing_disabled))
return 0;
trace_ctx = tracing_gen_ctx();
- buffer = global_trace.array_buffer.buffer;
+ buffer = tr->array_buffer.buffer;
ring_buffer_nest_start(buffer);
event = __trace_buffer_lock_reserve(buffer, TRACE_BPUTS, size,
@@ -1155,7 +1182,7 @@ int __trace_bputs(unsigned long ip, const char *str)
entry->str = str;
__buffer_unlock_commit(buffer, event);
- ftrace_trace_stack(&global_trace, buffer, trace_ctx, 4, NULL);
+ ftrace_trace_stack(tr, buffer, trace_ctx, 4, NULL);
ret = 1;
out:
@@ -3021,7 +3048,7 @@ void trace_dump_stack(int skip)
/* Skip 1 to skip this function. */
skip++;
#endif
- __ftrace_trace_stack(global_trace.array_buffer.buffer,
+ __ftrace_trace_stack(printk_trace->array_buffer.buffer,
tracing_gen_ctx(), skip, NULL);
}
EXPORT_SYMBOL_GPL(trace_dump_stack);
@@ -3240,12 +3267,15 @@ int trace_vbprintk(unsigned long ip, const char *fmt, va_list args)
struct trace_event_call *call = &event_bprint;
struct ring_buffer_event *event;
struct trace_buffer *buffer;
- struct trace_array *tr = &global_trace;
+ struct trace_array *tr = READ_ONCE(printk_trace);
struct bprint_entry *entry;
unsigned int trace_ctx;
char *tbuffer;
int len = 0, size;
+ if (!printk_binsafe(tr))
+ return trace_vprintk(ip, fmt, args);
+
if (unlikely(tracing_selftest_running || tracing_disabled))
return 0;
@@ -3338,7 +3368,7 @@ __trace_array_vprintk(struct trace_buffer *buffer,
memcpy(&entry->buf, tbuffer, len + 1);
if (!call_filter_check_discard(call, entry, buffer, event)) {
__buffer_unlock_commit(buffer, event);
- ftrace_trace_stack(&global_trace, buffer, trace_ctx, 6, NULL);
+ ftrace_trace_stack(printk_trace, buffer, trace_ctx, 6, NULL);
}
out:
@@ -3434,7 +3464,7 @@ int trace_array_printk_buf(struct trace_buffer *buffer,
int ret;
va_list ap;
- if (!(global_trace.trace_flags & TRACE_ITER_PRINTK))
+ if (!(printk_trace->trace_flags & TRACE_ITER_PRINTK))
return 0;
va_start(ap, fmt);
@@ -3446,7 +3476,7 @@ int trace_array_printk_buf(struct trace_buffer *buffer,
__printf(2, 0)
int trace_vprintk(unsigned long ip, const char *fmt, va_list args)
{
- return trace_array_vprintk(&global_trace, ip, fmt, args);
+ return trace_array_vprintk(printk_trace, ip, fmt, args);
}
EXPORT_SYMBOL_GPL(trace_vprintk);
@@ -3667,8 +3697,11 @@ static void test_can_verify(void)
void trace_check_vprintf(struct trace_iterator *iter, const char *fmt,
va_list ap)
{
+ long text_delta = iter->tr->text_delta;
+ long data_delta = iter->tr->data_delta;
const char *p = fmt;
const char *str;
+ bool good;
int i, j;
if (WARN_ON_ONCE(!fmt))
@@ -3687,7 +3720,10 @@ void trace_check_vprintf(struct trace_iterator *iter, const char *fmt,
j = 0;
- /* We only care about %s and variants */
+ /*
+ * We only care about %s and variants
+ * as well as %p[sS] if delta is non-zero
+ */
for (i = 0; p[i]; i++) {
if (i + 1 >= iter->fmt_size) {
/*
@@ -3716,6 +3752,11 @@ void trace_check_vprintf(struct trace_iterator *iter, const char *fmt,
}
if (p[i+j] == 's')
break;
+
+ if (text_delta && p[i+1] == 'p' &&
+ ((p[i+2] == 's' || p[i+2] == 'S')))
+ break;
+
star = false;
}
j = 0;
@@ -3729,6 +3770,24 @@ void trace_check_vprintf(struct trace_iterator *iter, const char *fmt,
iter->fmt[i] = '\0';
trace_seq_vprintf(&iter->seq, iter->fmt, ap);
+ /* Add delta to %pS pointers */
+ if (p[i+1] == 'p') {
+ unsigned long addr;
+ char fmt[4];
+
+ fmt[0] = '%';
+ fmt[1] = 'p';
+ fmt[2] = p[i+2]; /* Either %ps or %pS */
+ fmt[3] = '\0';
+
+ addr = va_arg(ap, unsigned long);
+ addr += text_delta;
+ trace_seq_printf(&iter->seq, fmt, (void *)addr);
+
+ p += i + 3;
+ continue;
+ }
+
/*
* If iter->seq is full, the above call no longer guarantees
* that ap is in sync with fmt processing, and further calls
@@ -3747,6 +3806,14 @@ void trace_check_vprintf(struct trace_iterator *iter, const char *fmt,
/* The ap now points to the string data of the %s */
str = va_arg(ap, const char *);
+ good = trace_safe_str(iter, str, star, len);
+
+ /* Could be from the last boot */
+ if (data_delta && !good) {
+ str += data_delta;
+ good = trace_safe_str(iter, str, star, len);
+ }
+
/*
* If you hit this warning, it is likely that the
* trace event in question used %s on a string that
@@ -3756,8 +3823,7 @@ void trace_check_vprintf(struct trace_iterator *iter, const char *fmt,
* instead. See samples/trace_events/trace-events-sample.h
* for reference.
*/
- if (WARN_ONCE(!trace_safe_str(iter, str, star, len),
- "fmt: '%s' current_buffer: '%s'",
+ if (WARN_ONCE(!good, "fmt: '%s' current_buffer: '%s'",
fmt, seq_buf_str(&iter->seq.seq))) {
int ret;
@@ -4919,6 +4985,11 @@ static int tracing_open(struct inode *inode, struct file *file)
static bool
trace_ok_for_array(struct tracer *t, struct trace_array *tr)
{
+#ifdef CONFIG_TRACER_SNAPSHOT
+ /* arrays with mapped buffer range do not have snapshots */
+ if (tr->range_addr_start && t->use_max_tr)
+ return false;
+#endif
return (tr->flags & TRACE_ARRAY_FL_GLOBAL) || t->allow_instances;
}
@@ -5011,7 +5082,7 @@ static int show_traces_open(struct inode *inode, struct file *file)
return 0;
}
-static int show_traces_release(struct inode *inode, struct file *file)
+static int tracing_seq_release(struct inode *inode, struct file *file)
{
struct trace_array *tr = inode->i_private;
@@ -5052,7 +5123,7 @@ static const struct file_operations show_traces_fops = {
.open = show_traces_open,
.read = seq_read,
.llseek = seq_lseek,
- .release = show_traces_release,
+ .release = tracing_seq_release,
};
static ssize_t
@@ -5237,7 +5308,8 @@ int trace_keep_overwrite(struct tracer *tracer, u32 mask, int set)
int set_tracer_flag(struct trace_array *tr, unsigned int mask, int enabled)
{
if ((mask == TRACE_ITER_RECORD_TGID) ||
- (mask == TRACE_ITER_RECORD_CMD))
+ (mask == TRACE_ITER_RECORD_CMD) ||
+ (mask == TRACE_ITER_TRACE_PRINTK))
lockdep_assert_held(&event_mutex);
/* do nothing if flag is already set */
@@ -5249,6 +5321,25 @@ int set_tracer_flag(struct trace_array *tr, unsigned int mask, int enabled)
if (tr->current_trace->flag_changed(tr, mask, !!enabled))
return -EINVAL;
+ if (mask == TRACE_ITER_TRACE_PRINTK) {
+ if (enabled) {
+ update_printk_trace(tr);
+ } else {
+ /*
+ * The global_trace cannot clear this.
+ * It's flag only gets cleared if another instance sets it.
+ */
+ if (printk_trace == &global_trace)
+ return -EINVAL;
+ /*
+ * An instance must always have it set.
+ * by default, that's the global_trace instane.
+ */
+ if (printk_trace == tr)
+ update_printk_trace(&global_trace);
+ }
+ }
+
if (enabled)
tr->trace_flags |= mask;
else
@@ -6034,6 +6125,18 @@ out:
return ret;
}
+static void update_last_data(struct trace_array *tr)
+{
+ if (!tr->text_delta && !tr->data_delta)
+ return;
+
+ /* Clear old data */
+ tracing_reset_online_cpus(&tr->array_buffer);
+
+ /* Using current data now */
+ tr->text_delta = 0;
+ tr->data_delta = 0;
+}
/**
* tracing_update_buffers - used by tracing facility to expand ring buffers
@@ -6051,6 +6154,9 @@ int tracing_update_buffers(struct trace_array *tr)
int ret = 0;
mutex_lock(&trace_types_lock);
+
+ update_last_data(tr);
+
if (!tr->ring_buffer_expanded)
ret = __tracing_resize_ring_buffer(tr, trace_buf_size,
RING_BUFFER_ALL_CPUS);
@@ -6106,6 +6212,8 @@ int tracing_set_tracer(struct trace_array *tr, const char *buf)
mutex_lock(&trace_types_lock);
+ update_last_data(tr);
+
if (!tr->ring_buffer_expanded) {
ret = __tracing_resize_ring_buffer(tr, trace_buf_size,
RING_BUFFER_ALL_CPUS);
@@ -6854,6 +6962,37 @@ tracing_total_entries_read(struct file *filp, char __user *ubuf,
}
static ssize_t
+tracing_last_boot_read(struct file *filp, char __user *ubuf, size_t cnt, loff_t *ppos)
+{
+ struct trace_array *tr = filp->private_data;
+ struct seq_buf seq;
+ char buf[64];
+
+ seq_buf_init(&seq, buf, 64);
+
+ seq_buf_printf(&seq, "text delta:\t%ld\n", tr->text_delta);
+ seq_buf_printf(&seq, "data delta:\t%ld\n", tr->data_delta);
+
+ return simple_read_from_buffer(ubuf, cnt, ppos, buf, seq_buf_used(&seq));
+}
+
+static int tracing_buffer_meta_open(struct inode *inode, struct file *filp)
+{
+ struct trace_array *tr = inode->i_private;
+ int cpu = tracing_get_cpu(inode);
+ int ret;
+
+ ret = tracing_check_open_get_tr(tr);
+ if (ret)
+ return ret;
+
+ ret = ring_buffer_meta_seq_init(filp, tr->array_buffer.buffer, cpu);
+ if (ret < 0)
+ __trace_array_put(tr);
+ return ret;
+}
+
+static ssize_t
tracing_free_buffer_write(struct file *filp, const char __user *ubuf,
size_t cnt, loff_t *ppos)
{
@@ -7418,7 +7557,6 @@ static const struct file_operations tracing_pipe_fops = {
.read = tracing_read_pipe,
.splice_read = tracing_splice_read_pipe,
.release = tracing_release_pipe,
- .llseek = no_llseek,
};
static const struct file_operations tracing_entries_fops = {
@@ -7429,6 +7567,13 @@ static const struct file_operations tracing_entries_fops = {
.release = tracing_release_generic_tr,
};
+static const struct file_operations tracing_buffer_meta_fops = {
+ .open = tracing_buffer_meta_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = tracing_seq_release,
+};
+
static const struct file_operations tracing_total_entries_fops = {
.open = tracing_open_generic_tr,
.read = tracing_total_entries_read,
@@ -7469,6 +7614,13 @@ static const struct file_operations trace_time_stamp_mode_fops = {
.release = tracing_single_release_tr,
};
+static const struct file_operations last_boot_fops = {
+ .open = tracing_open_generic_tr,
+ .read = tracing_last_boot_read,
+ .llseek = generic_file_llseek,
+ .release = tracing_release_generic_tr,
+};
+
#ifdef CONFIG_TRACER_SNAPSHOT
static const struct file_operations snapshot_fops = {
.open = tracing_snapshot_open,
@@ -7483,7 +7635,6 @@ static const struct file_operations snapshot_raw_fops = {
.read = tracing_buffers_read,
.release = tracing_buffers_release,
.splice_read = tracing_buffers_splice_read,
- .llseek = no_llseek,
};
#endif /* CONFIG_TRACER_SNAPSHOT */
@@ -8313,7 +8464,6 @@ static const struct file_operations tracing_buffers_fops = {
.flush = tracing_buffers_flush,
.splice_read = tracing_buffers_splice_read,
.unlocked_ioctl = tracing_buffers_ioctl,
- .llseek = no_llseek,
.mmap = tracing_buffers_mmap,
};
@@ -8661,12 +8811,17 @@ tracing_init_tracefs_percpu(struct trace_array *tr, long cpu)
trace_create_cpu_file("buffer_size_kb", TRACE_MODE_READ, d_cpu,
tr, cpu, &tracing_entries_fops);
+ if (tr->range_addr_start)
+ trace_create_cpu_file("buffer_meta", TRACE_MODE_READ, d_cpu,
+ tr, cpu, &tracing_buffer_meta_fops);
#ifdef CONFIG_TRACER_SNAPSHOT
- trace_create_cpu_file("snapshot", TRACE_MODE_WRITE, d_cpu,
- tr, cpu, &snapshot_fops);
+ if (!tr->range_addr_start) {
+ trace_create_cpu_file("snapshot", TRACE_MODE_WRITE, d_cpu,
+ tr, cpu, &snapshot_fops);
- trace_create_cpu_file("snapshot_raw", TRACE_MODE_READ, d_cpu,
- tr, cpu, &snapshot_raw_fops);
+ trace_create_cpu_file("snapshot_raw", TRACE_MODE_READ, d_cpu,
+ tr, cpu, &snapshot_raw_fops);
+ }
#endif
}
@@ -9203,7 +9358,21 @@ allocate_trace_buffer(struct trace_array *tr, struct array_buffer *buf, int size
buf->tr = tr;
- buf->buffer = ring_buffer_alloc(size, rb_flags);
+ if (tr->range_addr_start && tr->range_addr_size) {
+ buf->buffer = ring_buffer_alloc_range(size, rb_flags, 0,
+ tr->range_addr_start,
+ tr->range_addr_size);
+
+ ring_buffer_last_boot_delta(buf->buffer,
+ &tr->text_delta, &tr->data_delta);
+ /*
+ * This is basically the same as a mapped buffer,
+ * with the same restrictions.
+ */
+ tr->mapped++;
+ } else {
+ buf->buffer = ring_buffer_alloc(size, rb_flags);
+ }
if (!buf->buffer)
return -ENOMEM;
@@ -9240,6 +9409,10 @@ static int allocate_trace_buffers(struct trace_array *tr, int size)
return ret;
#ifdef CONFIG_TRACER_MAX_TRACE
+ /* Fix mapped buffer trace arrays do not have snapshot buffers */
+ if (tr->range_addr_start)
+ return 0;
+
ret = allocate_trace_buffer(tr, &tr->max_buffer,
allocate_snapshot ? size : 1);
if (MEM_FAIL(ret, "Failed to allocate trace buffer\n")) {
@@ -9340,7 +9513,9 @@ static int trace_array_create_dir(struct trace_array *tr)
}
static struct trace_array *
-trace_array_create_systems(const char *name, const char *systems)
+trace_array_create_systems(const char *name, const char *systems,
+ unsigned long range_addr_start,
+ unsigned long range_addr_size)
{
struct trace_array *tr;
int ret;
@@ -9366,6 +9541,10 @@ trace_array_create_systems(const char *name, const char *systems)
goto out_free_tr;
}
+ /* Only for boot up memory mapped ring buffers */
+ tr->range_addr_start = range_addr_start;
+ tr->range_addr_size = range_addr_size;
+
tr->trace_flags = global_trace.trace_flags & ~ZEROED_TRACE_FLAGS;
cpumask_copy(tr->tracing_cpumask, cpu_all_mask);
@@ -9423,7 +9602,7 @@ trace_array_create_systems(const char *name, const char *systems)
static struct trace_array *trace_array_create(const char *name)
{
- return trace_array_create_systems(name, NULL);
+ return trace_array_create_systems(name, NULL, 0, 0);
}
static int instance_mkdir(const char *name)
@@ -9448,6 +9627,31 @@ out_unlock:
return ret;
}
+static u64 map_pages(u64 start, u64 size)
+{
+ struct page **pages;
+ phys_addr_t page_start;
+ unsigned int page_count;
+ unsigned int i;
+ void *vaddr;
+
+ page_count = DIV_ROUND_UP(size, PAGE_SIZE);
+
+ page_start = start;
+ pages = kmalloc_array(page_count, sizeof(struct page *), GFP_KERNEL);
+ if (!pages)
+ return 0;
+
+ for (i = 0; i < page_count; i++) {
+ phys_addr_t addr = page_start + i * PAGE_SIZE;
+ pages[i] = pfn_to_page(addr >> PAGE_SHIFT);
+ }
+ vaddr = vmap(pages, page_count, VM_MAP, PAGE_KERNEL);
+ kfree(pages);
+
+ return (u64)(unsigned long)vaddr;
+}
+
/**
* trace_array_get_by_name - Create/Lookup a trace array, given its name.
* @name: The name of the trace array to be looked up/created.
@@ -9477,7 +9681,7 @@ struct trace_array *trace_array_get_by_name(const char *name, const char *system
goto out_unlock;
}
- tr = trace_array_create_systems(name, systems);
+ tr = trace_array_create_systems(name, systems, 0, 0);
if (IS_ERR(tr))
tr = NULL;
@@ -9507,6 +9711,9 @@ static int __remove_instance(struct trace_array *tr)
set_tracer_flag(tr, 1 << i, 0);
}
+ if (printk_trace == tr)
+ update_printk_trace(&global_trace);
+
tracing_set_nop(tr);
clear_ftrace_function_probes(tr);
event_trace_del_tracer(tr);
@@ -9669,10 +9876,15 @@ init_tracer_tracefs(struct trace_array *tr, struct dentry *d_tracer)
if (ftrace_create_function_files(tr, d_tracer))
MEM_FAIL(1, "Could not allocate function filter files");
+ if (tr->range_addr_start) {
+ trace_create_file("last_boot_info", TRACE_MODE_READ, d_tracer,
+ tr, &last_boot_fops);
#ifdef CONFIG_TRACER_SNAPSHOT
- trace_create_file("snapshot", TRACE_MODE_WRITE, d_tracer,
- tr, &snapshot_fops);
+ } else {
+ trace_create_file("snapshot", TRACE_MODE_WRITE, d_tracer,
+ tr, &snapshot_fops);
#endif
+ }
trace_create_file("error_log", TRACE_MODE_WRITE, d_tracer,
tr, &tracing_err_log_fops);
@@ -10292,6 +10504,7 @@ __init static void enable_instances(void)
{
struct trace_array *tr;
char *curr_str;
+ char *name;
char *str;
char *tok;
@@ -10300,19 +10513,107 @@ __init static void enable_instances(void)
str = boot_instance_info;
while ((curr_str = strsep(&str, "\t"))) {
+ phys_addr_t start = 0;
+ phys_addr_t size = 0;
+ unsigned long addr = 0;
+ bool traceprintk = false;
+ bool traceoff = false;
+ char *flag_delim;
+ char *addr_delim;
tok = strsep(&curr_str, ",");
- if (IS_ENABLED(CONFIG_TRACER_MAX_TRACE))
- do_allocate_snapshot(tok);
+ flag_delim = strchr(tok, '^');
+ addr_delim = strchr(tok, '@');
- tr = trace_array_get_by_name(tok, NULL);
- if (!tr) {
- pr_warn("Failed to create instance buffer %s\n", curr_str);
+ if (addr_delim)
+ *addr_delim++ = '\0';
+
+ if (flag_delim)
+ *flag_delim++ = '\0';
+
+ name = tok;
+
+ if (flag_delim) {
+ char *flag;
+
+ while ((flag = strsep(&flag_delim, "^"))) {
+ if (strcmp(flag, "traceoff") == 0) {
+ traceoff = true;
+ } else if ((strcmp(flag, "printk") == 0) ||
+ (strcmp(flag, "traceprintk") == 0) ||
+ (strcmp(flag, "trace_printk") == 0)) {
+ traceprintk = true;
+ } else {
+ pr_info("Tracing: Invalid instance flag '%s' for %s\n",
+ flag, name);
+ }
+ }
+ }
+
+ tok = addr_delim;
+ if (tok && isdigit(*tok)) {
+ start = memparse(tok, &tok);
+ if (!start) {
+ pr_warn("Tracing: Invalid boot instance address for %s\n",
+ name);
+ continue;
+ }
+ if (*tok != ':') {
+ pr_warn("Tracing: No size specified for instance %s\n", name);
+ continue;
+ }
+ tok++;
+ size = memparse(tok, &tok);
+ if (!size) {
+ pr_warn("Tracing: Invalid boot instance size for %s\n",
+ name);
+ continue;
+ }
+ } else if (tok) {
+ if (!reserve_mem_find_by_name(tok, &start, &size)) {
+ start = 0;
+ pr_warn("Failed to map boot instance %s to %s\n", name, tok);
+ continue;
+ }
+ }
+
+ if (start) {
+ addr = map_pages(start, size);
+ if (addr) {
+ pr_info("Tracing: mapped boot instance %s at physical memory %pa of size 0x%lx\n",
+ name, &start, (unsigned long)size);
+ } else {
+ pr_warn("Tracing: Failed to map boot instance %s\n", name);
+ continue;
+ }
+ } else {
+ /* Only non mapped buffers have snapshot buffers */
+ if (IS_ENABLED(CONFIG_TRACER_MAX_TRACE))
+ do_allocate_snapshot(name);
+ }
+
+ tr = trace_array_create_systems(name, NULL, addr, size);
+ if (IS_ERR(tr)) {
+ pr_warn("Tracing: Failed to create instance buffer %s\n", curr_str);
continue;
}
- /* Allow user space to delete it */
- trace_array_put(tr);
+
+ if (traceoff)
+ tracer_tracing_off(tr);
+
+ if (traceprintk)
+ update_printk_trace(tr);
+
+ /*
+ * If start is set, then this is a mapped buffer, and
+ * cannot be deleted by user space, so keep the reference
+ * to it.
+ */
+ if (start)
+ tr->flags |= TRACE_ARRAY_FL_BOOT;
+ else
+ trace_array_put(tr);
while ((tok = strsep(&curr_str, ","))) {
early_enable_events(tr, tok, true);
diff --git a/kernel/trace/trace.h b/kernel/trace/trace.h
index bd3e3069300e..c866991b9c78 100644
--- a/kernel/trace/trace.h
+++ b/kernel/trace/trace.h
@@ -336,7 +336,6 @@ struct trace_array {
bool allocated_snapshot;
spinlock_t snapshot_trigger_lock;
unsigned int snapshot;
- unsigned int mapped;
unsigned long max_latency;
#ifdef CONFIG_FSNOTIFY
struct dentry *d_max_latency;
@@ -344,6 +343,13 @@ struct trace_array {
struct irq_work fsnotify_irqwork;
#endif
#endif
+ /* The below is for memory mapped ring buffer */
+ unsigned int mapped;
+ unsigned long range_addr_start;
+ unsigned long range_addr_size;
+ long text_delta;
+ long data_delta;
+
struct trace_pid_list __rcu *filtered_pids;
struct trace_pid_list __rcu *filtered_no_pids;
/*
@@ -423,7 +429,8 @@ struct trace_array {
};
enum {
- TRACE_ARRAY_FL_GLOBAL = (1 << 0)
+ TRACE_ARRAY_FL_GLOBAL = BIT(0),
+ TRACE_ARRAY_FL_BOOT = BIT(1),
};
extern struct list_head ftrace_trace_arrays;
@@ -644,6 +651,8 @@ trace_buffer_lock_reserve(struct trace_buffer *buffer,
unsigned long len,
unsigned int trace_ctx);
+int ring_buffer_meta_seq_init(struct file *file, struct trace_buffer *buffer, int cpu);
+
struct trace_entry *tracing_get_trace_entry(struct trace_array *tr,
struct trace_array_cpu *data);
@@ -1312,6 +1321,7 @@ extern int trace_get_user(struct trace_parser *parser, const char __user *ubuf,
C(IRQ_INFO, "irq-info"), \
C(MARKERS, "markers"), \
C(EVENT_FORK, "event-fork"), \
+ C(TRACE_PRINTK, "trace_printk_dest"), \
C(PAUSE_ON_TRACE, "pause-on-trace"), \
C(HASH_PTR, "hash-ptr"), /* Print hashed pointer */ \
FUNCTION_FLAGS \
diff --git a/kernel/trace/trace_fprobe.c b/kernel/trace/trace_fprobe.c
index 62e6a8f4aae9..a079abd8955b 100644
--- a/kernel/trace/trace_fprobe.c
+++ b/kernel/trace/trace_fprobe.c
@@ -21,6 +21,7 @@
#define FPROBE_EVENT_SYSTEM "fprobes"
#define TRACEPOINT_EVENT_SYSTEM "tracepoints"
#define RETHOOK_MAXACTIVE_MAX 4096
+#define TRACEPOINT_STUB ERR_PTR(-ENOENT)
static int trace_fprobe_create(const char *raw_command);
static int trace_fprobe_show(struct seq_file *m, struct dyn_event *ev);
@@ -385,6 +386,7 @@ static struct trace_fprobe *alloc_trace_fprobe(const char *group,
const char *event,
const char *symbol,
struct tracepoint *tpoint,
+ struct module *mod,
int maxactive,
int nargs, bool is_return)
{
@@ -405,6 +407,7 @@ static struct trace_fprobe *alloc_trace_fprobe(const char *group,
tf->fp.entry_handler = fentry_dispatcher;
tf->tpoint = tpoint;
+ tf->mod = mod;
tf->fp.nr_maxactive = maxactive;
ret = trace_probe_init(&tf->tp, event, group, false, nargs);
@@ -672,6 +675,24 @@ static int unregister_fprobe_event(struct trace_fprobe *tf)
return trace_probe_unregister_event_call(&tf->tp);
}
+static int __regsiter_tracepoint_fprobe(struct trace_fprobe *tf)
+{
+ struct tracepoint *tpoint = tf->tpoint;
+ unsigned long ip = (unsigned long)tpoint->probestub;
+ int ret;
+
+ /*
+ * Here, we do 2 steps to enable fprobe on a tracepoint.
+ * At first, put __probestub_##TP function on the tracepoint
+ * and put a fprobe on the stub function.
+ */
+ ret = tracepoint_probe_register_prio_may_exist(tpoint,
+ tpoint->probestub, NULL, 0);
+ if (ret < 0)
+ return ret;
+ return register_fprobe_ips(&tf->fp, &ip, 1);
+}
+
/* Internal register function - just handle fprobe and flags */
static int __register_trace_fprobe(struct trace_fprobe *tf)
{
@@ -698,18 +719,12 @@ static int __register_trace_fprobe(struct trace_fprobe *tf)
tf->fp.flags |= FPROBE_FL_DISABLED;
if (trace_fprobe_is_tracepoint(tf)) {
- struct tracepoint *tpoint = tf->tpoint;
- unsigned long ip = (unsigned long)tpoint->probestub;
- /*
- * Here, we do 2 steps to enable fprobe on a tracepoint.
- * At first, put __probestub_##TP function on the tracepoint
- * and put a fprobe on the stub function.
- */
- ret = tracepoint_probe_register_prio_may_exist(tpoint,
- tpoint->probestub, NULL, 0);
- if (ret < 0)
- return ret;
- return register_fprobe_ips(&tf->fp, &ip, 1);
+
+ /* This tracepoint is not loaded yet */
+ if (tf->tpoint == TRACEPOINT_STUB)
+ return 0;
+
+ return __regsiter_tracepoint_fprobe(tf);
}
/* TODO: handle filter, nofilter or symbol list */
@@ -862,20 +877,106 @@ end:
return ret;
}
+struct __find_tracepoint_cb_data {
+ const char *tp_name;
+ struct tracepoint *tpoint;
+ struct module *mod;
+};
+
+static void __find_tracepoint_module_cb(struct tracepoint *tp, struct module *mod, void *priv)
+{
+ struct __find_tracepoint_cb_data *data = priv;
+
+ if (!data->tpoint && !strcmp(data->tp_name, tp->name)) {
+ data->tpoint = tp;
+ if (!data->mod) {
+ data->mod = mod;
+ if (!try_module_get(data->mod)) {
+ data->tpoint = NULL;
+ data->mod = NULL;
+ }
+ }
+ }
+}
+
+static void __find_tracepoint_cb(struct tracepoint *tp, void *priv)
+{
+ struct __find_tracepoint_cb_data *data = priv;
+
+ if (!data->tpoint && !strcmp(data->tp_name, tp->name))
+ data->tpoint = tp;
+}
+
+/*
+ * Find a tracepoint from kernel and module. If the tracepoint is in a module,
+ * this increments the module refcount to prevent unloading until the
+ * trace_fprobe is registered to the list. After registering the trace_fprobe
+ * on the trace_fprobe list, the module refcount is decremented because
+ * tracepoint_probe_module_cb will handle it.
+ */
+static struct tracepoint *find_tracepoint(const char *tp_name,
+ struct module **tp_mod)
+{
+ struct __find_tracepoint_cb_data data = {
+ .tp_name = tp_name,
+ .mod = NULL,
+ };
+
+ for_each_kernel_tracepoint(__find_tracepoint_cb, &data);
+
+ if (!data.tpoint && IS_ENABLED(CONFIG_MODULES)) {
+ for_each_module_tracepoint(__find_tracepoint_module_cb, &data);
+ *tp_mod = data.mod;
+ }
+
+ return data.tpoint;
+}
+
#ifdef CONFIG_MODULES
+static void reenable_trace_fprobe(struct trace_fprobe *tf)
+{
+ struct trace_probe *tp = &tf->tp;
+
+ list_for_each_entry(tf, trace_probe_probe_list(tp), tp.list) {
+ __enable_trace_fprobe(tf);
+ }
+}
+
+static struct tracepoint *find_tracepoint_in_module(struct module *mod,
+ const char *tp_name)
+{
+ struct __find_tracepoint_cb_data data = {
+ .tp_name = tp_name,
+ .mod = mod,
+ };
+
+ for_each_tracepoint_in_module(mod, __find_tracepoint_module_cb, &data);
+ return data.tpoint;
+}
+
static int __tracepoint_probe_module_cb(struct notifier_block *self,
unsigned long val, void *data)
{
struct tp_module *tp_mod = data;
+ struct tracepoint *tpoint;
struct trace_fprobe *tf;
struct dyn_event *pos;
- if (val != MODULE_STATE_GOING)
+ if (val != MODULE_STATE_GOING && val != MODULE_STATE_COMING)
return NOTIFY_DONE;
mutex_lock(&event_mutex);
for_each_trace_fprobe(tf, pos) {
- if (tp_mod->mod == tf->mod) {
+ if (val == MODULE_STATE_COMING && tf->tpoint == TRACEPOINT_STUB) {
+ tpoint = find_tracepoint_in_module(tp_mod->mod, tf->symbol);
+ if (tpoint) {
+ tf->tpoint = tpoint;
+ tf->mod = tp_mod->mod;
+ if (!WARN_ON_ONCE(__regsiter_tracepoint_fprobe(tf)) &&
+ trace_probe_is_enabled(&tf->tp))
+ reenable_trace_fprobe(tf);
+ }
+ } else if (val == MODULE_STATE_GOING && tp_mod->mod == tf->mod) {
tracepoint_probe_unregister(tf->tpoint,
tf->tpoint->probestub, NULL);
tf->tpoint = NULL;
@@ -892,30 +993,6 @@ static struct notifier_block tracepoint_module_nb = {
};
#endif /* CONFIG_MODULES */
-struct __find_tracepoint_cb_data {
- const char *tp_name;
- struct tracepoint *tpoint;
-};
-
-static void __find_tracepoint_cb(struct tracepoint *tp, void *priv)
-{
- struct __find_tracepoint_cb_data *data = priv;
-
- if (!data->tpoint && !strcmp(data->tp_name, tp->name))
- data->tpoint = tp;
-}
-
-static struct tracepoint *find_tracepoint(const char *tp_name)
-{
- struct __find_tracepoint_cb_data data = {
- .tp_name = tp_name,
- };
-
- for_each_kernel_tracepoint(__find_tracepoint_cb, &data);
-
- return data.tpoint;
-}
-
static int parse_symbol_and_return(int argc, const char *argv[],
char **symbol, bool *is_return,
bool is_tracepoint)
@@ -996,6 +1073,7 @@ static int __trace_fprobe_create(int argc, const char *argv[])
char abuf[MAX_BTF_ARGS_LEN];
char *dbuf = NULL;
bool is_tracepoint = false;
+ struct module *tp_mod = NULL;
struct tracepoint *tpoint = NULL;
struct traceprobe_parse_context ctx = {
.flags = TPARG_FL_KERNEL | TPARG_FL_FPROBE,
@@ -1080,15 +1158,20 @@ static int __trace_fprobe_create(int argc, const char *argv[])
if (is_tracepoint) {
ctx.flags |= TPARG_FL_TPOINT;
- tpoint = find_tracepoint(symbol);
- if (!tpoint) {
+ tpoint = find_tracepoint(symbol, &tp_mod);
+ if (tpoint) {
+ ctx.funcname = kallsyms_lookup(
+ (unsigned long)tpoint->probestub,
+ NULL, NULL, NULL, sbuf);
+ } else if (IS_ENABLED(CONFIG_MODULES)) {
+ /* This *may* be loaded afterwards */
+ tpoint = TRACEPOINT_STUB;
+ ctx.funcname = symbol;
+ } else {
trace_probe_log_set_index(1);
trace_probe_log_err(0, NO_TRACEPOINT);
goto parse_error;
}
- ctx.funcname = kallsyms_lookup(
- (unsigned long)tpoint->probestub,
- NULL, NULL, NULL, sbuf);
} else
ctx.funcname = symbol;
@@ -1110,8 +1193,8 @@ static int __trace_fprobe_create(int argc, const char *argv[])
goto out;
/* setup a probe */
- tf = alloc_trace_fprobe(group, event, symbol, tpoint, maxactive,
- argc, is_return);
+ tf = alloc_trace_fprobe(group, event, symbol, tpoint, tp_mod,
+ maxactive, argc, is_return);
if (IS_ERR(tf)) {
ret = PTR_ERR(tf);
/* This must return -ENOMEM, else there is a bug */
@@ -1119,10 +1202,6 @@ static int __trace_fprobe_create(int argc, const char *argv[])
goto out; /* We know tf is not allocated */
}
- if (is_tracepoint)
- tf->mod = __module_text_address(
- (unsigned long)tf->tpoint->probestub);
-
/* parse arguments */
for (i = 0; i < argc && i < MAX_TRACE_ARGS; i++) {
trace_probe_log_set_index(i + 2);
@@ -1155,6 +1234,8 @@ static int __trace_fprobe_create(int argc, const char *argv[])
}
out:
+ if (tp_mod)
+ module_put(tp_mod);
traceprobe_finish_parse(&ctx);
trace_probe_log_clear();
kfree(new_argv);
diff --git a/kernel/trace/trace_functions_graph.c b/kernel/trace/trace_functions_graph.c
index 13d0387ac6a6..a569daaac4c4 100644
--- a/kernel/trace/trace_functions_graph.c
+++ b/kernel/trace/trace_functions_graph.c
@@ -544,6 +544,8 @@ print_graph_irq(struct trace_iterator *iter, unsigned long addr,
struct trace_seq *s = &iter->seq;
struct trace_entry *ent = iter->ent;
+ addr += iter->tr->text_delta;
+
if (addr < (unsigned long)__irqentry_text_start ||
addr >= (unsigned long)__irqentry_text_end)
return;
@@ -710,6 +712,7 @@ print_graph_entry_leaf(struct trace_iterator *iter,
struct ftrace_graph_ret *graph_ret;
struct ftrace_graph_ent *call;
unsigned long long duration;
+ unsigned long func;
int cpu = iter->cpu;
int i;
@@ -717,6 +720,8 @@ print_graph_entry_leaf(struct trace_iterator *iter,
call = &entry->graph_ent;
duration = graph_ret->rettime - graph_ret->calltime;
+ func = call->func + iter->tr->text_delta;
+
if (data) {
struct fgraph_cpu_data *cpu_data;
@@ -747,10 +752,10 @@ print_graph_entry_leaf(struct trace_iterator *iter,
* enabled.
*/
if (flags & __TRACE_GRAPH_PRINT_RETVAL)
- print_graph_retval(s, graph_ret->retval, true, (void *)call->func,
+ print_graph_retval(s, graph_ret->retval, true, (void *)func,
!!(flags & TRACE_GRAPH_PRINT_RETVAL_HEX));
else
- trace_seq_printf(s, "%ps();\n", (void *)call->func);
+ trace_seq_printf(s, "%ps();\n", (void *)func);
print_graph_irq(iter, graph_ret->func, TRACE_GRAPH_RET,
cpu, iter->ent->pid, flags);
@@ -766,6 +771,7 @@ print_graph_entry_nested(struct trace_iterator *iter,
struct ftrace_graph_ent *call = &entry->graph_ent;
struct fgraph_data *data = iter->private;
struct trace_array *tr = iter->tr;
+ unsigned long func;
int i;
if (data) {
@@ -788,7 +794,9 @@ print_graph_entry_nested(struct trace_iterator *iter,
for (i = 0; i < call->depth * TRACE_GRAPH_INDENT; i++)
trace_seq_putc(s, ' ');
- trace_seq_printf(s, "%ps() {\n", (void *)call->func);
+ func = call->func + iter->tr->text_delta;
+
+ trace_seq_printf(s, "%ps() {\n", (void *)func);
if (trace_seq_has_overflowed(s))
return TRACE_TYPE_PARTIAL_LINE;
@@ -863,6 +871,8 @@ check_irq_entry(struct trace_iterator *iter, u32 flags,
int *depth_irq;
struct fgraph_data *data = iter->private;
+ addr += iter->tr->text_delta;
+
/*
* If we are either displaying irqs, or we got called as
* a graph event and private data does not exist,
@@ -990,11 +1000,14 @@ print_graph_return(struct ftrace_graph_ret *trace, struct trace_seq *s,
unsigned long long duration = trace->rettime - trace->calltime;
struct fgraph_data *data = iter->private;
struct trace_array *tr = iter->tr;
+ unsigned long func;
pid_t pid = ent->pid;
int cpu = iter->cpu;
int func_match = 1;
int i;
+ func = trace->func + iter->tr->text_delta;
+
if (check_irq_return(iter, flags, trace->depth))
return TRACE_TYPE_HANDLED;
@@ -1033,7 +1046,7 @@ print_graph_return(struct ftrace_graph_ret *trace, struct trace_seq *s,
* function-retval option is enabled.
*/
if (flags & __TRACE_GRAPH_PRINT_RETVAL) {
- print_graph_retval(s, trace->retval, false, (void *)trace->func,
+ print_graph_retval(s, trace->retval, false, (void *)func,
!!(flags & TRACE_GRAPH_PRINT_RETVAL_HEX));
} else {
/*
@@ -1046,7 +1059,7 @@ print_graph_return(struct ftrace_graph_ret *trace, struct trace_seq *s,
if (func_match && !(flags & TRACE_GRAPH_PRINT_TAIL))
trace_seq_puts(s, "}\n");
else
- trace_seq_printf(s, "} /* %ps */\n", (void *)trace->func);
+ trace_seq_printf(s, "} /* %ps */\n", (void *)func);
}
/* Overrun */
diff --git a/kernel/trace/trace_output.c b/kernel/trace/trace_output.c
index d8b302d01083..868f2f912f28 100644
--- a/kernel/trace/trace_output.c
+++ b/kernel/trace/trace_output.c
@@ -990,8 +990,11 @@ enum print_line_t trace_nop_print(struct trace_iterator *iter, int flags,
}
static void print_fn_trace(struct trace_seq *s, unsigned long ip,
- unsigned long parent_ip, int flags)
+ unsigned long parent_ip, long delta, int flags)
{
+ ip += delta;
+ parent_ip += delta;
+
seq_print_ip_sym(s, ip, flags);
if ((flags & TRACE_ITER_PRINT_PARENT) && parent_ip) {
@@ -1009,7 +1012,7 @@ static enum print_line_t trace_fn_trace(struct trace_iterator *iter, int flags,
trace_assign_type(field, iter->ent);
- print_fn_trace(s, field->ip, field->parent_ip, flags);
+ print_fn_trace(s, field->ip, field->parent_ip, iter->tr->text_delta, flags);
trace_seq_putc(s, '\n');
return trace_handle_return(s);
@@ -1230,6 +1233,7 @@ static enum print_line_t trace_stack_print(struct trace_iterator *iter,
struct trace_seq *s = &iter->seq;
unsigned long *p;
unsigned long *end;
+ long delta = iter->tr->text_delta;
trace_assign_type(field, iter->ent);
end = (unsigned long *)((long)iter->ent + iter->ent_size);
@@ -1242,7 +1246,7 @@ static enum print_line_t trace_stack_print(struct trace_iterator *iter,
break;
trace_seq_puts(s, " => ");
- seq_print_ip_sym(s, *p, flags);
+ seq_print_ip_sym(s, (*p) + delta, flags);
trace_seq_putc(s, '\n');
}
@@ -1587,10 +1591,13 @@ static enum print_line_t trace_print_print(struct trace_iterator *iter,
{
struct print_entry *field;
struct trace_seq *s = &iter->seq;
+ unsigned long ip;
trace_assign_type(field, iter->ent);
- seq_print_ip_sym(s, field->ip, flags);
+ ip = field->ip + iter->tr->text_delta;
+
+ seq_print_ip_sym(s, ip, flags);
trace_seq_printf(s, ": %s", field->buf);
return trace_handle_return(s);
@@ -1674,7 +1681,7 @@ trace_func_repeats_print(struct trace_iterator *iter, int flags,
trace_assign_type(field, iter->ent);
- print_fn_trace(s, field->ip, field->parent_ip, flags);
+ print_fn_trace(s, field->ip, field->parent_ip, iter->tr->text_delta, flags);
trace_seq_printf(s, " (repeats: %u, last_ts:", field->count);
trace_print_time(s, iter,
iter->ts - FUNC_REPEATS_GET_DELTA_TS(field));
diff --git a/kernel/trace/trace_sched_wakeup.c b/kernel/trace/trace_sched_wakeup.c
index 130ca7e7787e..ae2ace5e515a 100644
--- a/kernel/trace/trace_sched_wakeup.c
+++ b/kernel/trace/trace_sched_wakeup.c
@@ -547,7 +547,7 @@ probe_wakeup(void *ignore, struct task_struct *p)
* - wakeup_dl handles tasks belonging to sched_dl class only.
*/
if (tracing_dl || (wakeup_dl && !dl_task(p)) ||
- (wakeup_rt && !dl_task(p) && !rt_task(p)) ||
+ (wakeup_rt && !rt_or_dl_task(p)) ||
(!dl_task(p) && (p->prio >= wakeup_prio || p->prio >= current->prio)))
return;
diff --git a/kernel/trace/trace_syscalls.c b/kernel/trace/trace_syscalls.c
index 9c581d6da843..785733245ead 100644
--- a/kernel/trace/trace_syscalls.c
+++ b/kernel/trace/trace_syscalls.c
@@ -564,6 +564,7 @@ static int perf_call_bpf_enter(struct trace_event_call *call, struct pt_regs *re
BUILD_BUG_ON(sizeof(param.ent) < sizeof(void *));
/* bpf prog requires 'regs' to be the first member in the ctx (a.k.a. &param) */
+ perf_fetch_caller_regs(regs);
*(struct pt_regs **)&param = regs;
param.syscall_nr = rec->nr;
for (i = 0; i < sys_data->nb_args; i++)
@@ -575,6 +576,7 @@ static void perf_syscall_enter(void *ignore, struct pt_regs *regs, long id)
{
struct syscall_metadata *sys_data;
struct syscall_trace_enter *rec;
+ struct pt_regs *fake_regs;
struct hlist_head *head;
unsigned long args[6];
bool valid_prog_array;
@@ -602,7 +604,7 @@ static void perf_syscall_enter(void *ignore, struct pt_regs *regs, long id)
size = ALIGN(size + sizeof(u32), sizeof(u64));
size -= sizeof(u32);
- rec = perf_trace_buf_alloc(size, NULL, &rctx);
+ rec = perf_trace_buf_alloc(size, &fake_regs, &rctx);
if (!rec)
return;
@@ -611,7 +613,7 @@ static void perf_syscall_enter(void *ignore, struct pt_regs *regs, long id)
memcpy(&rec->args, args, sizeof(unsigned long) * sys_data->nb_args);
if ((valid_prog_array &&
- !perf_call_bpf_enter(sys_data->enter_event, regs, sys_data, rec)) ||
+ !perf_call_bpf_enter(sys_data->enter_event, fake_regs, sys_data, rec)) ||
hlist_empty(head)) {
perf_swevent_put_recursion_context(rctx);
return;
@@ -666,6 +668,7 @@ static int perf_call_bpf_exit(struct trace_event_call *call, struct pt_regs *reg
} __aligned(8) param;
/* bpf prog requires 'regs' to be the first member in the ctx (a.k.a. &param) */
+ perf_fetch_caller_regs(regs);
*(struct pt_regs **)&param = regs;
param.syscall_nr = rec->nr;
param.ret = rec->ret;
@@ -676,6 +679,7 @@ static void perf_syscall_exit(void *ignore, struct pt_regs *regs, long ret)
{
struct syscall_metadata *sys_data;
struct syscall_trace_exit *rec;
+ struct pt_regs *fake_regs;
struct hlist_head *head;
bool valid_prog_array;
int syscall_nr;
@@ -701,7 +705,7 @@ static void perf_syscall_exit(void *ignore, struct pt_regs *regs, long ret)
size = ALIGN(sizeof(*rec) + sizeof(u32), sizeof(u64));
size -= sizeof(u32);
- rec = perf_trace_buf_alloc(size, NULL, &rctx);
+ rec = perf_trace_buf_alloc(size, &fake_regs, &rctx);
if (!rec)
return;
@@ -709,7 +713,7 @@ static void perf_syscall_exit(void *ignore, struct pt_regs *regs, long ret)
rec->ret = syscall_get_return_value(current, regs);
if ((valid_prog_array &&
- !perf_call_bpf_exit(sys_data->exit_event, regs, rec)) ||
+ !perf_call_bpf_exit(sys_data->exit_event, fake_regs, rec)) ||
hlist_empty(head)) {
perf_swevent_put_recursion_context(rctx);
return;
diff --git a/kernel/trace/trace_uprobe.c b/kernel/trace/trace_uprobe.c
index f7443e996b1b..c40531d2cbad 100644
--- a/kernel/trace/trace_uprobe.c
+++ b/kernel/trace/trace_uprobe.c
@@ -17,6 +17,7 @@
#include <linux/string.h>
#include <linux/rculist.h>
#include <linux/filter.h>
+#include <linux/percpu.h>
#include "trace_dynevent.h"
#include "trace_probe.h"
@@ -62,7 +63,7 @@ struct trace_uprobe {
struct uprobe *uprobe;
unsigned long offset;
unsigned long ref_ctr_offset;
- unsigned long nhit;
+ unsigned long __percpu *nhits;
struct trace_probe tp;
};
@@ -337,6 +338,12 @@ alloc_trace_uprobe(const char *group, const char *event, int nargs, bool is_ret)
if (!tu)
return ERR_PTR(-ENOMEM);
+ tu->nhits = alloc_percpu(unsigned long);
+ if (!tu->nhits) {
+ ret = -ENOMEM;
+ goto error;
+ }
+
ret = trace_probe_init(&tu->tp, event, group, true, nargs);
if (ret < 0)
goto error;
@@ -349,6 +356,7 @@ alloc_trace_uprobe(const char *group, const char *event, int nargs, bool is_ret)
return tu;
error:
+ free_percpu(tu->nhits);
kfree(tu);
return ERR_PTR(ret);
@@ -362,6 +370,7 @@ static void free_trace_uprobe(struct trace_uprobe *tu)
path_put(&tu->path);
trace_probe_cleanup(&tu->tp);
kfree(tu->filename);
+ free_percpu(tu->nhits);
kfree(tu);
}
@@ -815,13 +824,21 @@ static int probes_profile_seq_show(struct seq_file *m, void *v)
{
struct dyn_event *ev = v;
struct trace_uprobe *tu;
+ unsigned long nhits;
+ int cpu;
if (!is_trace_uprobe(ev))
return 0;
tu = to_trace_uprobe(ev);
+
+ nhits = 0;
+ for_each_possible_cpu(cpu) {
+ nhits += per_cpu(*tu->nhits, cpu);
+ }
+
seq_printf(m, " %s %-44s %15lu\n", tu->filename,
- trace_probe_name(&tu->tp), tu->nhit);
+ trace_probe_name(&tu->tp), nhits);
return 0;
}
@@ -1508,7 +1525,8 @@ static int uprobe_dispatcher(struct uprobe_consumer *con, struct pt_regs *regs)
int ret = 0;
tu = container_of(con, struct trace_uprobe, consumer);
- tu->nhit++;
+
+ this_cpu_inc(*tu->nhits);
udd.tu = tu;
udd.bp_addr = instruction_pointer(regs);
diff --git a/kernel/tracepoint.c b/kernel/tracepoint.c
index 8d1507dd0724..8879da16ef4d 100644
--- a/kernel/tracepoint.c
+++ b/kernel/tracepoint.c
@@ -735,6 +735,48 @@ static __init int init_tracepoints(void)
return ret;
}
__initcall(init_tracepoints);
+
+/**
+ * for_each_tracepoint_in_module - iteration on all tracepoints in a module
+ * @mod: module
+ * @fct: callback
+ * @priv: private data
+ */
+void for_each_tracepoint_in_module(struct module *mod,
+ void (*fct)(struct tracepoint *tp,
+ struct module *mod, void *priv),
+ void *priv)
+{
+ tracepoint_ptr_t *begin, *end, *iter;
+
+ lockdep_assert_held(&tracepoint_module_list_mutex);
+
+ if (!mod)
+ return;
+
+ begin = mod->tracepoints_ptrs;
+ end = mod->tracepoints_ptrs + mod->num_tracepoints;
+
+ for (iter = begin; iter < end; iter++)
+ fct(tracepoint_ptr_deref(iter), mod, priv);
+}
+
+/**
+ * for_each_module_tracepoint - iteration on all tracepoints in all modules
+ * @fct: callback
+ * @priv: private data
+ */
+void for_each_module_tracepoint(void (*fct)(struct tracepoint *tp,
+ struct module *mod, void *priv),
+ void *priv)
+{
+ struct tp_module *tp_mod;
+
+ mutex_lock(&tracepoint_module_list_mutex);
+ list_for_each_entry(tp_mod, &tracepoint_module_list, list)
+ for_each_tracepoint_in_module(tp_mod->mod, fct, priv);
+ mutex_unlock(&tracepoint_module_list_mutex);
+}
#endif /* CONFIG_MODULES */
/**
diff --git a/kernel/user_namespace.c b/kernel/user_namespace.c
index 0b0b95418b16..aa0b2e47f2f2 100644
--- a/kernel/user_namespace.c
+++ b/kernel/user_namespace.c
@@ -853,9 +853,8 @@ static int sort_idmaps(struct uid_gid_map *map)
cmp_extents_forward, NULL);
/* Only copy the memory from forward we actually need. */
- map->reverse = kmemdup(map->forward,
- map->nr_extents * sizeof(struct uid_gid_extent),
- GFP_KERNEL);
+ map->reverse = kmemdup_array(map->forward, map->nr_extents,
+ sizeof(struct uid_gid_extent), GFP_KERNEL);
if (!map->reverse)
return -ENOMEM;
diff --git a/kernel/vmcore_info.c b/kernel/vmcore_info.c
index 8b4f8cc2e0ec..1fec61603ef3 100644
--- a/kernel/vmcore_info.c
+++ b/kernel/vmcore_info.c
@@ -198,17 +198,17 @@ static int __init crash_save_vmcoreinfo_init(void)
VMCOREINFO_NUMBER(PG_private);
VMCOREINFO_NUMBER(PG_swapcache);
VMCOREINFO_NUMBER(PG_swapbacked);
-#define PAGE_SLAB_MAPCOUNT_VALUE (~PG_slab)
+#define PAGE_SLAB_MAPCOUNT_VALUE (PGTY_slab << 24)
VMCOREINFO_NUMBER(PAGE_SLAB_MAPCOUNT_VALUE);
#ifdef CONFIG_MEMORY_FAILURE
VMCOREINFO_NUMBER(PG_hwpoison);
#endif
VMCOREINFO_NUMBER(PG_head_mask);
-#define PAGE_BUDDY_MAPCOUNT_VALUE (~PG_buddy)
+#define PAGE_BUDDY_MAPCOUNT_VALUE (PGTY_buddy << 24)
VMCOREINFO_NUMBER(PAGE_BUDDY_MAPCOUNT_VALUE);
-#define PAGE_HUGETLB_MAPCOUNT_VALUE (~PG_hugetlb)
+#define PAGE_HUGETLB_MAPCOUNT_VALUE (PGTY_hugetlb << 24)
VMCOREINFO_NUMBER(PAGE_HUGETLB_MAPCOUNT_VALUE);
-#define PAGE_OFFLINE_MAPCOUNT_VALUE (~PG_offline)
+#define PAGE_OFFLINE_MAPCOUNT_VALUE (PGTY_offline << 24)
VMCOREINFO_NUMBER(PAGE_OFFLINE_MAPCOUNT_VALUE);
#ifdef CONFIG_KALLSYMS
diff --git a/kernel/watch_queue.c b/kernel/watch_queue.c
index 03b90d7d2175..d36242fd4936 100644
--- a/kernel/watch_queue.c
+++ b/kernel/watch_queue.c
@@ -666,8 +666,8 @@ struct watch_queue *get_watch_queue(int fd)
struct fd f;
f = fdget(fd);
- if (f.file) {
- pipe = get_pipe_info(f.file, false);
+ if (fd_file(f)) {
+ pipe = get_pipe_info(fd_file(f), false);
if (pipe && pipe->watch_queue) {
wqueue = pipe->watch_queue;
kref_get(&wqueue->usage);
diff --git a/kernel/watchdog.c b/kernel/watchdog.c
index 830a83895493..262691ba62b7 100644
--- a/kernel/watchdog.c
+++ b/kernel/watchdog.c
@@ -1203,7 +1203,10 @@ static void __init lockup_detector_delay_init(struct work_struct *work)
ret = watchdog_hardlockup_probe();
if (ret) {
- pr_info("Delayed init of the lockup detector failed: %d\n", ret);
+ if (ret == -ENODEV)
+ pr_info("NMI not fully supported\n");
+ else
+ pr_info("Delayed init of the lockup detector failed: %d\n", ret);
pr_info("Hard watchdog permanently disabled\n");
return;
}
diff --git a/lib/Kconfig.debug b/lib/Kconfig.debug
index 38e58f74fd2b..7315f643817a 100644
--- a/lib/Kconfig.debug
+++ b/lib/Kconfig.debug
@@ -379,13 +379,15 @@ config DEBUG_INFO_BTF
depends on !DEBUG_INFO_SPLIT && !DEBUG_INFO_REDUCED
depends on !GCC_PLUGIN_RANDSTRUCT || COMPILE_TEST
depends on BPF_SYSCALL
- depends on !DEBUG_INFO_DWARF5 || PAHOLE_VERSION >= 121
+ depends on PAHOLE_VERSION >= 116
+ depends on DEBUG_INFO_DWARF4 || PAHOLE_VERSION >= 121
# pahole uses elfutils, which does not have support for Hexagon relocations
depends on !HEXAGON
help
Generate deduplicated BTF type information from DWARF debug info.
- Turning this on expects presence of pahole tool, which will convert
- DWARF type info into equivalent deduplicated BTF type info.
+ Turning this on requires pahole v1.16 or later (v1.21 or later to
+ support DWARF 5), which will convert DWARF type info into equivalent
+ deduplicated BTF type info.
config PAHOLE_HAS_SPLIT_BTF
def_bool PAHOLE_VERSION >= 119
@@ -571,6 +573,21 @@ config VMLINUX_MAP
pieces of code get eliminated with
CONFIG_LD_DEAD_CODE_DATA_ELIMINATION.
+config BUILTIN_MODULE_RANGES
+ bool "Generate address range information for builtin modules"
+ depends on !LTO
+ depends on VMLINUX_MAP
+ help
+ When modules are built into the kernel, there will be no module name
+ associated with its symbols in /proc/kallsyms. Tracers may want to
+ identify symbols by module name and symbol name regardless of whether
+ the module is configured as loadable or not.
+
+ This option generates modules.builtin.ranges in the build tree with
+ offset ranges (per ELF section) for the module(s) they belong to.
+ It also records an anchor symbol to determine the load address of the
+ section.
+
config DEBUG_FORCE_WEAK_PER_CPU
bool "Force weak per-cpu definitions"
depends on DEBUG_KERNEL
@@ -1515,7 +1532,7 @@ config LOCKDEP_BITS
config LOCKDEP_CHAINS_BITS
int "Bitsize for MAX_LOCKDEP_CHAINS"
depends on LOCKDEP && !LOCKDEP_SMALL
- range 10 30
+ range 10 21
default 16
help
Try increasing this value if you hit "BUG: MAX_LOCKDEP_CHAINS too low!" message.
@@ -2289,6 +2306,16 @@ config TEST_DIV64
If unsure, say N.
+config TEST_MULDIV64
+ tristate "mul_u64_u64_div_u64() test"
+ depends on DEBUG_KERNEL || m
+ help
+ Enable this to turn on 'mul_u64_u64_div_u64()' function test.
+ This test is executed only once during system boot (so affects
+ only boot time), or at module load time.
+
+ If unsure, say N.
+
config TEST_IOV_ITER
tristate "Test iov_iter operation" if !KUNIT_ALL_TESTS
depends on KUNIT
@@ -2625,6 +2652,7 @@ config RESOURCE_KUNIT_TEST
tristate "KUnit test for resource API" if !KUNIT_ALL_TESTS
depends on KUNIT
default KUNIT_ALL_TESTS
+ select GET_FREE_REGION
help
This builds the resource API unit test.
Tests the logic of API provided by resource.c and ioport.h.
diff --git a/lib/Makefile b/lib/Makefile
index 2f1c5a9277af..773adf88af41 100644
--- a/lib/Makefile
+++ b/lib/Makefile
@@ -14,6 +14,7 @@ KCOV_INSTRUMENT_list_debug.o := n
KCOV_INSTRUMENT_debugobjects.o := n
KCOV_INSTRUMENT_dynamic_debug.o := n
KCOV_INSTRUMENT_fault-inject.o := n
+KCOV_INSTRUMENT_find_bit.o := n
# string.o implements standard library functions like memset/memcpy etc.
# Use -ffreestanding to ensure that the compiler does not try to "optimize"
diff --git a/lib/bcd.c b/lib/bcd.c
index 7e4750b6e801..c5e79ba9cd7b 100644
--- a/lib/bcd.c
+++ b/lib/bcd.c
@@ -10,6 +10,8 @@ EXPORT_SYMBOL(_bcd2bin);
unsigned char _bin2bcd(unsigned val)
{
- return ((val / 10) << 4) + val % 10;
+ const unsigned int t = (val * 103) >> 10;
+
+ return (t << 4) | (val - t * 10);
}
EXPORT_SYMBOL(_bin2bcd);
diff --git a/lib/buildid.c b/lib/buildid.c
index e02b5507418b..290641d92ac1 100644
--- a/lib/buildid.c
+++ b/lib/buildid.c
@@ -8,154 +8,302 @@
#define BUILD_ID 3
+#define MAX_PHDR_CNT 256
+
+struct freader {
+ void *buf;
+ u32 buf_sz;
+ int err;
+ union {
+ struct {
+ struct file *file;
+ struct folio *folio;
+ void *addr;
+ loff_t folio_off;
+ bool may_fault;
+ };
+ struct {
+ const char *data;
+ u64 data_sz;
+ };
+ };
+};
+
+static void freader_init_from_file(struct freader *r, void *buf, u32 buf_sz,
+ struct file *file, bool may_fault)
+{
+ memset(r, 0, sizeof(*r));
+ r->buf = buf;
+ r->buf_sz = buf_sz;
+ r->file = file;
+ r->may_fault = may_fault;
+}
+
+static void freader_init_from_mem(struct freader *r, const char *data, u64 data_sz)
+{
+ memset(r, 0, sizeof(*r));
+ r->data = data;
+ r->data_sz = data_sz;
+}
+
+static void freader_put_folio(struct freader *r)
+{
+ if (!r->folio)
+ return;
+ kunmap_local(r->addr);
+ folio_put(r->folio);
+ r->folio = NULL;
+}
+
+static int freader_get_folio(struct freader *r, loff_t file_off)
+{
+ /* check if we can just reuse current folio */
+ if (r->folio && file_off >= r->folio_off &&
+ file_off < r->folio_off + folio_size(r->folio))
+ return 0;
+
+ freader_put_folio(r);
+
+ r->folio = filemap_get_folio(r->file->f_mapping, file_off >> PAGE_SHIFT);
+
+ /* if sleeping is allowed, wait for the page, if necessary */
+ if (r->may_fault && (IS_ERR(r->folio) || !folio_test_uptodate(r->folio))) {
+ filemap_invalidate_lock_shared(r->file->f_mapping);
+ r->folio = read_cache_folio(r->file->f_mapping, file_off >> PAGE_SHIFT,
+ NULL, r->file);
+ filemap_invalidate_unlock_shared(r->file->f_mapping);
+ }
+
+ if (IS_ERR(r->folio) || !folio_test_uptodate(r->folio)) {
+ if (!IS_ERR(r->folio))
+ folio_put(r->folio);
+ r->folio = NULL;
+ return -EFAULT;
+ }
+
+ r->folio_off = folio_pos(r->folio);
+ r->addr = kmap_local_folio(r->folio, 0);
+
+ return 0;
+}
+
+static const void *freader_fetch(struct freader *r, loff_t file_off, size_t sz)
+{
+ size_t folio_sz;
+
+ /* provided internal temporary buffer should be sized correctly */
+ if (WARN_ON(r->buf && sz > r->buf_sz)) {
+ r->err = -E2BIG;
+ return NULL;
+ }
+
+ if (unlikely(file_off + sz < file_off)) {
+ r->err = -EOVERFLOW;
+ return NULL;
+ }
+
+ /* working with memory buffer is much more straightforward */
+ if (!r->buf) {
+ if (file_off + sz > r->data_sz) {
+ r->err = -ERANGE;
+ return NULL;
+ }
+ return r->data + file_off;
+ }
+
+ /* fetch or reuse folio for given file offset */
+ r->err = freader_get_folio(r, file_off);
+ if (r->err)
+ return NULL;
+
+ /* if requested data is crossing folio boundaries, we have to copy
+ * everything into our local buffer to keep a simple linear memory
+ * access interface
+ */
+ folio_sz = folio_size(r->folio);
+ if (file_off + sz > r->folio_off + folio_sz) {
+ int part_sz = r->folio_off + folio_sz - file_off;
+
+ /* copy the part that resides in the current folio */
+ memcpy(r->buf, r->addr + (file_off - r->folio_off), part_sz);
+
+ /* fetch next folio */
+ r->err = freader_get_folio(r, r->folio_off + folio_sz);
+ if (r->err)
+ return NULL;
+
+ /* copy the rest of requested data */
+ memcpy(r->buf + part_sz, r->addr, sz - part_sz);
+
+ return r->buf;
+ }
+
+ /* if data fits in a single folio, just return direct pointer */
+ return r->addr + (file_off - r->folio_off);
+}
+
+static void freader_cleanup(struct freader *r)
+{
+ if (!r->buf)
+ return; /* non-file-backed mode */
+
+ freader_put_folio(r);
+}
+
/*
* Parse build id from the note segment. This logic can be shared between
* 32-bit and 64-bit system, because Elf32_Nhdr and Elf64_Nhdr are
* identical.
*/
-static int parse_build_id_buf(unsigned char *build_id,
- __u32 *size,
- const void *note_start,
- Elf32_Word note_size)
+static int parse_build_id(struct freader *r, unsigned char *build_id, __u32 *size,
+ loff_t note_off, Elf32_Word note_size)
{
- Elf32_Word note_offs = 0, new_offs;
+ const char note_name[] = "GNU";
+ const size_t note_name_sz = sizeof(note_name);
+ u32 build_id_off, new_off, note_end, name_sz, desc_sz;
+ const Elf32_Nhdr *nhdr;
+ const char *data;
+
+ if (check_add_overflow(note_off, note_size, &note_end))
+ return -EINVAL;
- while (note_offs + sizeof(Elf32_Nhdr) < note_size) {
- Elf32_Nhdr *nhdr = (Elf32_Nhdr *)(note_start + note_offs);
+ while (note_end - note_off > sizeof(Elf32_Nhdr) + note_name_sz) {
+ nhdr = freader_fetch(r, note_off, sizeof(Elf32_Nhdr) + note_name_sz);
+ if (!nhdr)
+ return r->err;
+
+ name_sz = READ_ONCE(nhdr->n_namesz);
+ desc_sz = READ_ONCE(nhdr->n_descsz);
+
+ new_off = note_off + sizeof(Elf32_Nhdr);
+ if (check_add_overflow(new_off, ALIGN(name_sz, 4), &new_off) ||
+ check_add_overflow(new_off, ALIGN(desc_sz, 4), &new_off) ||
+ new_off > note_end)
+ break;
if (nhdr->n_type == BUILD_ID &&
- nhdr->n_namesz == sizeof("GNU") &&
- !strcmp((char *)(nhdr + 1), "GNU") &&
- nhdr->n_descsz > 0 &&
- nhdr->n_descsz <= BUILD_ID_SIZE_MAX) {
- memcpy(build_id,
- note_start + note_offs +
- ALIGN(sizeof("GNU"), 4) + sizeof(Elf32_Nhdr),
- nhdr->n_descsz);
- memset(build_id + nhdr->n_descsz, 0,
- BUILD_ID_SIZE_MAX - nhdr->n_descsz);
+ name_sz == note_name_sz &&
+ memcmp(nhdr + 1, note_name, note_name_sz) == 0 &&
+ desc_sz > 0 && desc_sz <= BUILD_ID_SIZE_MAX) {
+ build_id_off = note_off + sizeof(Elf32_Nhdr) + ALIGN(note_name_sz, 4);
+
+ /* freader_fetch() will invalidate nhdr pointer */
+ data = freader_fetch(r, build_id_off, desc_sz);
+ if (!data)
+ return r->err;
+
+ memcpy(build_id, data, desc_sz);
+ memset(build_id + desc_sz, 0, BUILD_ID_SIZE_MAX - desc_sz);
if (size)
- *size = nhdr->n_descsz;
+ *size = desc_sz;
return 0;
}
- new_offs = note_offs + sizeof(Elf32_Nhdr) +
- ALIGN(nhdr->n_namesz, 4) + ALIGN(nhdr->n_descsz, 4);
- if (new_offs <= note_offs) /* overflow */
- break;
- note_offs = new_offs;
+
+ note_off = new_off;
}
return -EINVAL;
}
-static inline int parse_build_id(const void *page_addr,
- unsigned char *build_id,
- __u32 *size,
- const void *note_start,
- Elf32_Word note_size)
+/* Parse build ID from 32-bit ELF */
+static int get_build_id_32(struct freader *r, unsigned char *build_id, __u32 *size)
{
- /* check for overflow */
- if (note_start < page_addr || note_start + note_size < note_start)
- return -EINVAL;
+ const Elf32_Ehdr *ehdr;
+ const Elf32_Phdr *phdr;
+ __u32 phnum, phoff, i;
- /* only supports note that fits in the first page */
- if (note_start + note_size > page_addr + PAGE_SIZE)
- return -EINVAL;
+ ehdr = freader_fetch(r, 0, sizeof(Elf32_Ehdr));
+ if (!ehdr)
+ return r->err;
- return parse_build_id_buf(build_id, size, note_start, note_size);
-}
+ /* subsequent freader_fetch() calls invalidate pointers, so remember locally */
+ phnum = READ_ONCE(ehdr->e_phnum);
+ phoff = READ_ONCE(ehdr->e_phoff);
-/* Parse build ID from 32-bit ELF */
-static int get_build_id_32(const void *page_addr, unsigned char *build_id,
- __u32 *size)
-{
- Elf32_Ehdr *ehdr = (Elf32_Ehdr *)page_addr;
- Elf32_Phdr *phdr;
- int i;
-
- /*
- * FIXME
- * Neither ELF spec nor ELF loader require that program headers
- * start immediately after ELF header.
- */
- if (ehdr->e_phoff != sizeof(Elf32_Ehdr))
- return -EINVAL;
- /* only supports phdr that fits in one page */
- if (ehdr->e_phnum >
- (PAGE_SIZE - sizeof(Elf32_Ehdr)) / sizeof(Elf32_Phdr))
+ /* set upper bound on amount of segments (phdrs) we iterate */
+ if (phnum > MAX_PHDR_CNT)
+ phnum = MAX_PHDR_CNT;
+
+ /* check that phoff is not large enough to cause an overflow */
+ if (phoff + phnum * sizeof(Elf32_Phdr) < phoff)
return -EINVAL;
- phdr = (Elf32_Phdr *)(page_addr + sizeof(Elf32_Ehdr));
+ for (i = 0; i < phnum; ++i) {
+ phdr = freader_fetch(r, phoff + i * sizeof(Elf32_Phdr), sizeof(Elf32_Phdr));
+ if (!phdr)
+ return r->err;
- for (i = 0; i < ehdr->e_phnum; ++i) {
- if (phdr[i].p_type == PT_NOTE &&
- !parse_build_id(page_addr, build_id, size,
- page_addr + phdr[i].p_offset,
- phdr[i].p_filesz))
+ if (phdr->p_type == PT_NOTE &&
+ !parse_build_id(r, build_id, size, READ_ONCE(phdr->p_offset),
+ READ_ONCE(phdr->p_filesz)))
return 0;
}
return -EINVAL;
}
/* Parse build ID from 64-bit ELF */
-static int get_build_id_64(const void *page_addr, unsigned char *build_id,
- __u32 *size)
+static int get_build_id_64(struct freader *r, unsigned char *build_id, __u32 *size)
{
- Elf64_Ehdr *ehdr = (Elf64_Ehdr *)page_addr;
- Elf64_Phdr *phdr;
- int i;
-
- /*
- * FIXME
- * Neither ELF spec nor ELF loader require that program headers
- * start immediately after ELF header.
- */
- if (ehdr->e_phoff != sizeof(Elf64_Ehdr))
- return -EINVAL;
- /* only supports phdr that fits in one page */
- if (ehdr->e_phnum >
- (PAGE_SIZE - sizeof(Elf64_Ehdr)) / sizeof(Elf64_Phdr))
+ const Elf64_Ehdr *ehdr;
+ const Elf64_Phdr *phdr;
+ __u32 phnum, i;
+ __u64 phoff;
+
+ ehdr = freader_fetch(r, 0, sizeof(Elf64_Ehdr));
+ if (!ehdr)
+ return r->err;
+
+ /* subsequent freader_fetch() calls invalidate pointers, so remember locally */
+ phnum = READ_ONCE(ehdr->e_phnum);
+ phoff = READ_ONCE(ehdr->e_phoff);
+
+ /* set upper bound on amount of segments (phdrs) we iterate */
+ if (phnum > MAX_PHDR_CNT)
+ phnum = MAX_PHDR_CNT;
+
+ /* check that phoff is not large enough to cause an overflow */
+ if (phoff + phnum * sizeof(Elf64_Phdr) < phoff)
return -EINVAL;
- phdr = (Elf64_Phdr *)(page_addr + sizeof(Elf64_Ehdr));
+ for (i = 0; i < phnum; ++i) {
+ phdr = freader_fetch(r, phoff + i * sizeof(Elf64_Phdr), sizeof(Elf64_Phdr));
+ if (!phdr)
+ return r->err;
- for (i = 0; i < ehdr->e_phnum; ++i) {
- if (phdr[i].p_type == PT_NOTE &&
- !parse_build_id(page_addr, build_id, size,
- page_addr + phdr[i].p_offset,
- phdr[i].p_filesz))
+ if (phdr->p_type == PT_NOTE &&
+ !parse_build_id(r, build_id, size, READ_ONCE(phdr->p_offset),
+ READ_ONCE(phdr->p_filesz)))
return 0;
}
+
return -EINVAL;
}
-/*
- * Parse build ID of ELF file mapped to vma
- * @vma: vma object
- * @build_id: buffer to store build id, at least BUILD_ID_SIZE long
- * @size: returns actual build id size in case of success
- *
- * Return: 0 on success, -EINVAL otherwise
- */
-int build_id_parse(struct vm_area_struct *vma, unsigned char *build_id,
- __u32 *size)
+/* enough for Elf64_Ehdr, Elf64_Phdr, and all the smaller requests */
+#define MAX_FREADER_BUF_SZ 64
+
+static int __build_id_parse(struct vm_area_struct *vma, unsigned char *build_id,
+ __u32 *size, bool may_fault)
{
- Elf32_Ehdr *ehdr;
- struct page *page;
- void *page_addr;
+ const Elf32_Ehdr *ehdr;
+ struct freader r;
+ char buf[MAX_FREADER_BUF_SZ];
int ret;
/* only works for page backed storage */
if (!vma->vm_file)
return -EINVAL;
- page = find_get_page(vma->vm_file->f_mapping, 0);
- if (!page)
- return -EFAULT; /* page not mapped */
+ freader_init_from_file(&r, buf, sizeof(buf), vma->vm_file, may_fault);
+
+ /* fetch first 18 bytes of ELF header for checks */
+ ehdr = freader_fetch(&r, 0, offsetofend(Elf32_Ehdr, e_type));
+ if (!ehdr) {
+ ret = r.err;
+ goto out;
+ }
ret = -EINVAL;
- page_addr = kmap_local_page(page);
- ehdr = (Elf32_Ehdr *)page_addr;
/* compare magic x7f "ELF" */
if (memcmp(ehdr->e_ident, ELFMAG, SELFMAG) != 0)
@@ -166,15 +314,46 @@ int build_id_parse(struct vm_area_struct *vma, unsigned char *build_id,
goto out;
if (ehdr->e_ident[EI_CLASS] == ELFCLASS32)
- ret = get_build_id_32(page_addr, build_id, size);
+ ret = get_build_id_32(&r, build_id, size);
else if (ehdr->e_ident[EI_CLASS] == ELFCLASS64)
- ret = get_build_id_64(page_addr, build_id, size);
+ ret = get_build_id_64(&r, build_id, size);
out:
- kunmap_local(page_addr);
- put_page(page);
+ freader_cleanup(&r);
return ret;
}
+/*
+ * Parse build ID of ELF file mapped to vma
+ * @vma: vma object
+ * @build_id: buffer to store build id, at least BUILD_ID_SIZE long
+ * @size: returns actual build id size in case of success
+ *
+ * Assumes no page fault can be taken, so if relevant portions of ELF file are
+ * not already paged in, fetching of build ID fails.
+ *
+ * Return: 0 on success; negative error, otherwise
+ */
+int build_id_parse_nofault(struct vm_area_struct *vma, unsigned char *build_id, __u32 *size)
+{
+ return __build_id_parse(vma, build_id, size, false /* !may_fault */);
+}
+
+/*
+ * Parse build ID of ELF file mapped to VMA
+ * @vma: vma object
+ * @build_id: buffer to store build id, at least BUILD_ID_SIZE long
+ * @size: returns actual build id size in case of success
+ *
+ * Assumes faultable context and can cause page faults to bring in file data
+ * into page cache.
+ *
+ * Return: 0 on success; negative error, otherwise
+ */
+int build_id_parse(struct vm_area_struct *vma, unsigned char *build_id, __u32 *size)
+{
+ return __build_id_parse(vma, build_id, size, true /* may_fault */);
+}
+
/**
* build_id_parse_buf - Get build ID from a buffer
* @buf: ELF note section(s) to parse
@@ -185,7 +364,15 @@ out:
*/
int build_id_parse_buf(const void *buf, unsigned char *build_id, u32 buf_size)
{
- return parse_build_id_buf(build_id, NULL, buf, buf_size);
+ struct freader r;
+ int err;
+
+ freader_init_from_mem(&r, buf, buf_size);
+
+ err = parse_build_id(&r, build_id, NULL, 0, buf_size);
+
+ freader_cleanup(&r);
+ return err;
}
#if IS_ENABLED(CONFIG_STACKTRACE_BUILD_ID) || IS_ENABLED(CONFIG_VMCORE_INFO)
diff --git a/lib/checksum_kunit.c b/lib/checksum_kunit.c
index 4e4d081a1d3b..be04aa42125c 100644
--- a/lib/checksum_kunit.c
+++ b/lib/checksum_kunit.c
@@ -468,12 +468,9 @@ static __wsum to_wsum(u32 x)
static void assert_setup_correct(struct kunit *test)
{
- CHECK_EQ(sizeof(random_buf) / sizeof(random_buf[0]), MAX_LEN);
- CHECK_EQ(sizeof(expected_results) / sizeof(expected_results[0]),
- MAX_LEN);
- CHECK_EQ(sizeof(init_sums_no_overflow) /
- sizeof(init_sums_no_overflow[0]),
- MAX_LEN);
+ CHECK_EQ(ARRAY_SIZE(random_buf), MAX_LEN);
+ CHECK_EQ(ARRAY_SIZE(expected_results), MAX_LEN);
+ CHECK_EQ(ARRAY_SIZE(init_sums_no_overflow), MAX_LEN);
}
/*
diff --git a/lib/closure.c b/lib/closure.c
index 116afae2eed9..2bfe7d2a0048 100644
--- a/lib/closure.c
+++ b/lib/closure.c
@@ -278,7 +278,7 @@ static int debug_show(struct seq_file *f, void *data)
seq_printf(f, " W %pS\n",
(void *) cl->waiting_on);
- seq_puts(f, "\n");
+ seq_putc(f, '\n');
}
spin_unlock_irq(&closure_list_lock);
diff --git a/lib/decompress_unxz.c b/lib/decompress_unxz.c
index 842894158944..32138bb8ef77 100644
--- a/lib/decompress_unxz.c
+++ b/lib/decompress_unxz.c
@@ -1,10 +1,9 @@
+// SPDX-License-Identifier: 0BSD
+
/*
* Wrapper for decompressing XZ-compressed kernel, initramfs, and initrd
*
* Author: Lasse Collin <lasse.collin@tukaani.org>
- *
- * This file has been put into the public domain.
- * You can do whatever you want with this file.
*/
/*
@@ -103,12 +102,11 @@
#ifdef STATIC
# define XZ_PREBOOT
#else
-#include <linux/decompress/unxz.h>
+# include <linux/decompress/unxz.h>
#endif
#ifdef __KERNEL__
# include <linux/decompress/mm.h>
#endif
-#define XZ_EXTERN STATIC
#ifndef XZ_PREBOOT
# include <linux/slab.h>
@@ -127,11 +125,21 @@
#ifdef CONFIG_X86
# define XZ_DEC_X86
#endif
-#ifdef CONFIG_PPC
+#if defined(CONFIG_PPC) && defined(CONFIG_CPU_BIG_ENDIAN)
# define XZ_DEC_POWERPC
#endif
#ifdef CONFIG_ARM
-# define XZ_DEC_ARM
+# ifdef CONFIG_THUMB2_KERNEL
+# define XZ_DEC_ARMTHUMB
+# else
+# define XZ_DEC_ARM
+# endif
+#endif
+#ifdef CONFIG_ARM64
+# define XZ_DEC_ARM64
+#endif
+#ifdef CONFIG_RISCV
+# define XZ_DEC_RISCV
#endif
#ifdef CONFIG_SPARC
# define XZ_DEC_SPARC
@@ -220,7 +228,7 @@ void *memmove(void *dest, const void *src, size_t size)
#endif
/*
- * Since we need memmove anyway, would use it as memcpy too.
+ * Since we need memmove anyway, we could use it as memcpy too.
* Commented out for now to avoid breaking things.
*/
/*
@@ -390,17 +398,17 @@ error_alloc_state:
}
/*
- * This macro is used by architecture-specific files to decompress
+ * This function is used by architecture-specific files to decompress
* the kernel image.
*/
#ifdef XZ_PREBOOT
-STATIC int INIT __decompress(unsigned char *buf, long len,
- long (*fill)(void*, unsigned long),
- long (*flush)(void*, unsigned long),
- unsigned char *out_buf, long olen,
- long *pos,
- void (*error)(char *x))
+STATIC int INIT __decompress(unsigned char *in, long in_size,
+ long (*fill)(void *dest, unsigned long size),
+ long (*flush)(void *src, unsigned long size),
+ unsigned char *out, long out_size,
+ long *in_used,
+ void (*error)(char *x))
{
- return unxz(buf, len, fill, flush, out_buf, pos, error);
+ return unxz(in, in_size, fill, flush, out, in_used, error);
}
#endif
diff --git a/lib/dim/Makefile b/lib/dim/Makefile
index c4cc4026c451..5b9bfaac7ac1 100644
--- a/lib/dim/Makefile
+++ b/lib/dim/Makefile
@@ -4,4 +4,4 @@
obj-$(CONFIG_DIMLIB) += dimlib.o
-dimlib-objs := dim.o net_dim.o rdma_dim.o
+dimlib-y := dim.o net_dim.o rdma_dim.o
diff --git a/lib/dump_stack.c b/lib/dump_stack.c
index 1a996fbbf50a..388da1aea14a 100644
--- a/lib/dump_stack.c
+++ b/lib/dump_stack.c
@@ -73,6 +73,7 @@ void dump_stack_print_info(const char *log_lvl)
print_worker_info(log_lvl, current);
print_stop_info(log_lvl, current);
+ print_scx_info(log_lvl, current);
}
/**
diff --git a/lib/dynamic_debug.c b/lib/dynamic_debug.c
index f2c5e7910bb1..5a007952f7f2 100644
--- a/lib/dynamic_debug.c
+++ b/lib/dynamic_debug.c
@@ -1147,7 +1147,7 @@ static int ddebug_proc_show(struct seq_file *m, void *p)
iter->table->mod_name, dp->function,
ddebug_describe_flags(dp->flags, &flags));
seq_escape_str(m, dp->format, ESCAPE_SPACE, "\t\r\n\"");
- seq_puts(m, "\"");
+ seq_putc(m, '"');
if (dp->class_id != _DPRINTK_CLASS_DFLT) {
class = ddebug_class_name(iter, dp);
@@ -1156,7 +1156,7 @@ static int ddebug_proc_show(struct seq_file *m, void *p)
else
seq_printf(m, " class unknown, _id:%d", dp->class_id);
}
- seq_puts(m, "\n");
+ seq_putc(m, '\n');
return 0;
}
diff --git a/lib/fault-inject.c b/lib/fault-inject.c
index d608f9b48c10..52eb6ba29698 100644
--- a/lib/fault-inject.c
+++ b/lib/fault-inject.c
@@ -2,6 +2,7 @@
#include <linux/kernel.h>
#include <linux/init.h>
#include <linux/random.h>
+#include <linux/debugfs.h>
#include <linux/sched.h>
#include <linux/stat.h>
#include <linux/types.h>
diff --git a/lib/fortify_kunit.c b/lib/fortify_kunit.c
index f9ad60a9c7bd..ecb638d4cde1 100644
--- a/lib/fortify_kunit.c
+++ b/lib/fortify_kunit.c
@@ -306,8 +306,7 @@ DEFINE_ALLOC_SIZE_TEST_PAIR(vmalloc)
orig = kvmalloc(prev_size, gfp); \
KUNIT_EXPECT_TRUE(test, orig != NULL); \
checker(((expected_pages) * PAGE_SIZE) * 2, \
- kvrealloc(orig, prev_size, \
- ((alloc_pages) * PAGE_SIZE) * 2, gfp), \
+ kvrealloc(orig, ((alloc_pages) * PAGE_SIZE) * 2, gfp), \
kvfree(p)); \
} while (0)
DEFINE_ALLOC_SIZE_TEST_PAIR(kvmalloc)
diff --git a/lib/generic-radix-tree.c b/lib/generic-radix-tree.c
index fa692c86f069..79e067b51488 100644
--- a/lib/generic-radix-tree.c
+++ b/lib/generic-radix-tree.c
@@ -5,99 +5,31 @@
#include <linux/gfp.h>
#include <linux/kmemleak.h>
-#define GENRADIX_ARY (GENRADIX_NODE_SIZE / sizeof(struct genradix_node *))
-#define GENRADIX_ARY_SHIFT ilog2(GENRADIX_ARY)
-
-struct genradix_node {
- union {
- /* Interior node: */
- struct genradix_node *children[GENRADIX_ARY];
-
- /* Leaf: */
- u8 data[GENRADIX_NODE_SIZE];
- };
-};
-
-static inline int genradix_depth_shift(unsigned depth)
-{
- return GENRADIX_NODE_SHIFT + GENRADIX_ARY_SHIFT * depth;
-}
-
-/*
- * Returns size (of data, in bytes) that a tree of a given depth holds:
- */
-static inline size_t genradix_depth_size(unsigned depth)
-{
- return 1UL << genradix_depth_shift(depth);
-}
-
-/* depth that's needed for a genradix that can address up to ULONG_MAX: */
-#define GENRADIX_MAX_DEPTH \
- DIV_ROUND_UP(BITS_PER_LONG - GENRADIX_NODE_SHIFT, GENRADIX_ARY_SHIFT)
-
-#define GENRADIX_DEPTH_MASK \
- ((unsigned long) (roundup_pow_of_two(GENRADIX_MAX_DEPTH + 1) - 1))
-
-static inline unsigned genradix_root_to_depth(struct genradix_root *r)
-{
- return (unsigned long) r & GENRADIX_DEPTH_MASK;
-}
-
-static inline struct genradix_node *genradix_root_to_node(struct genradix_root *r)
-{
- return (void *) ((unsigned long) r & ~GENRADIX_DEPTH_MASK);
-}
-
/*
* Returns pointer to the specified byte @offset within @radix, or NULL if not
* allocated
*/
void *__genradix_ptr(struct __genradix *radix, size_t offset)
{
- struct genradix_root *r = READ_ONCE(radix->root);
- struct genradix_node *n = genradix_root_to_node(r);
- unsigned level = genradix_root_to_depth(r);
-
- if (ilog2(offset) >= genradix_depth_shift(level))
- return NULL;
-
- while (1) {
- if (!n)
- return NULL;
- if (!level)
- break;
-
- level--;
-
- n = n->children[offset >> genradix_depth_shift(level)];
- offset &= genradix_depth_size(level) - 1;
- }
-
- return &n->data[offset];
+ return __genradix_ptr_inlined(radix, offset);
}
EXPORT_SYMBOL(__genradix_ptr);
-static inline struct genradix_node *genradix_alloc_node(gfp_t gfp_mask)
-{
- return kzalloc(GENRADIX_NODE_SIZE, gfp_mask);
-}
-
-static inline void genradix_free_node(struct genradix_node *node)
-{
- kfree(node);
-}
-
/*
* Returns pointer to the specified byte @offset within @radix, allocating it if
* necessary - newly allocated slots are always zeroed out:
*/
void *__genradix_ptr_alloc(struct __genradix *radix, size_t offset,
+ struct genradix_node **preallocated,
gfp_t gfp_mask)
{
struct genradix_root *v = READ_ONCE(radix->root);
struct genradix_node *n, *new_node = NULL;
unsigned level;
+ if (preallocated)
+ swap(new_node, *preallocated);
+
/* Increase tree depth if necessary: */
while (1) {
struct genradix_root *r = v, *new_root;
@@ -281,7 +213,7 @@ int __genradix_prealloc(struct __genradix *radix, size_t size,
size_t offset;
for (offset = 0; offset < size; offset += GENRADIX_NODE_SIZE)
- if (!__genradix_ptr_alloc(radix, offset, gfp_mask))
+ if (!__genradix_ptr_alloc(radix, offset, NULL, gfp_mask))
return -ENOMEM;
return 0;
diff --git a/lib/glob.c b/lib/glob.c
index 15b73f490720..aa57900d2062 100644
--- a/lib/glob.c
+++ b/lib/glob.c
@@ -68,6 +68,8 @@ bool __pure glob_match(char const *pat, char const *str)
back_str = --str; /* Allow zero-length match */
break;
case '[': { /* Character class */
+ if (c == '\0') /* No possible match */
+ return false;
bool match = false, inverted = (*pat == '!');
char const *class = pat + inverted;
unsigned char a = *class++;
diff --git a/lib/kunit/Makefile b/lib/kunit/Makefile
index 30f6bbf04a4a..5aa51978e456 100644
--- a/lib/kunit/Makefile
+++ b/lib/kunit/Makefile
@@ -9,7 +9,8 @@ kunit-objs += test.o \
try-catch.o \
executor.o \
attributes.o \
- device.o
+ device.o \
+ platform.o
ifeq ($(CONFIG_KUNIT_DEBUGFS),y)
kunit-objs += debugfs.o
@@ -19,6 +20,7 @@ endif
obj-y += hooks.o
obj-$(CONFIG_KUNIT_TEST) += kunit-test.o
+obj-$(CONFIG_KUNIT_TEST) += platform-test.o
# string-stream-test compiles built-in only.
ifeq ($(CONFIG_KUNIT_TEST),y)
diff --git a/lib/kunit/platform-test.c b/lib/kunit/platform-test.c
new file mode 100644
index 000000000000..e3debb8fbcef
--- /dev/null
+++ b/lib/kunit/platform-test.c
@@ -0,0 +1,224 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * KUnit test for KUnit platform driver infrastructure.
+ */
+
+#include <linux/platform_device.h>
+
+#include <kunit/platform_device.h>
+#include <kunit/test.h>
+
+/*
+ * Test that kunit_platform_device_alloc() creates a platform device.
+ */
+static void kunit_platform_device_alloc_test(struct kunit *test)
+{
+ KUNIT_EXPECT_NOT_ERR_OR_NULL(test,
+ kunit_platform_device_alloc(test, "kunit-platform", 1));
+}
+
+/*
+ * Test that kunit_platform_device_add() registers a platform device on the
+ * platform bus with the proper name and id.
+ */
+static void kunit_platform_device_add_test(struct kunit *test)
+{
+ struct platform_device *pdev;
+ const char *name = "kunit-platform-add";
+ const int id = -1;
+
+ pdev = kunit_platform_device_alloc(test, name, id);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, pdev);
+
+ KUNIT_EXPECT_EQ(test, 0, kunit_platform_device_add(test, pdev));
+ KUNIT_EXPECT_TRUE(test, dev_is_platform(&pdev->dev));
+ KUNIT_EXPECT_STREQ(test, pdev->name, name);
+ KUNIT_EXPECT_EQ(test, pdev->id, id);
+}
+
+/*
+ * Test that kunit_platform_device_add() called twice with the same device name
+ * and id fails the second time and properly cleans up.
+ */
+static void kunit_platform_device_add_twice_fails_test(struct kunit *test)
+{
+ struct platform_device *pdev;
+ const char *name = "kunit-platform-add-2";
+ const int id = -1;
+
+ pdev = kunit_platform_device_alloc(test, name, id);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, pdev);
+ KUNIT_ASSERT_EQ(test, 0, kunit_platform_device_add(test, pdev));
+
+ pdev = kunit_platform_device_alloc(test, name, id);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, pdev);
+
+ KUNIT_EXPECT_NE(test, 0, kunit_platform_device_add(test, pdev));
+}
+
+static int kunit_platform_device_find_by_name(struct device *dev, const void *data)
+{
+ return strcmp(dev_name(dev), data) == 0;
+}
+
+/*
+ * Test that kunit_platform_device_add() cleans up by removing the platform
+ * device when the test finishes. */
+static void kunit_platform_device_add_cleans_up(struct kunit *test)
+{
+ struct platform_device *pdev;
+ const char *name = "kunit-platform-clean";
+ const int id = -1;
+ struct kunit fake;
+ struct device *dev;
+
+ kunit_init_test(&fake, "kunit_platform_device_add_fake_test", NULL);
+ KUNIT_ASSERT_EQ(test, fake.status, KUNIT_SUCCESS);
+
+ pdev = kunit_platform_device_alloc(&fake, name, id);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, pdev);
+ KUNIT_ASSERT_EQ(test, 0, kunit_platform_device_add(&fake, pdev));
+ dev = bus_find_device(&platform_bus_type, NULL, name,
+ kunit_platform_device_find_by_name);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, dev);
+ put_device(dev);
+
+ /* Remove pdev */
+ kunit_cleanup(&fake);
+
+ /*
+ * Failing to migrate the kunit_resource would lead to an extra
+ * put_device() call on the platform device. The best we can do here is
+ * make sure the device no longer exists on the bus, but if something
+ * is wrong we'll see a refcount underflow here. We can't test for a
+ * refcount underflow because the kref matches the lifetime of the
+ * device which should already be freed and could be used by something
+ * else.
+ */
+ dev = bus_find_device(&platform_bus_type, NULL, name,
+ kunit_platform_device_find_by_name);
+ KUNIT_EXPECT_PTR_EQ(test, NULL, dev);
+ put_device(dev);
+}
+
+/*
+ * Test suite for struct platform_device kunit APIs
+ */
+static struct kunit_case kunit_platform_device_test_cases[] = {
+ KUNIT_CASE(kunit_platform_device_alloc_test),
+ KUNIT_CASE(kunit_platform_device_add_test),
+ KUNIT_CASE(kunit_platform_device_add_twice_fails_test),
+ KUNIT_CASE(kunit_platform_device_add_cleans_up),
+ {}
+};
+
+static struct kunit_suite kunit_platform_device_suite = {
+ .name = "kunit_platform_device",
+ .test_cases = kunit_platform_device_test_cases,
+};
+
+struct kunit_platform_driver_test_context {
+ struct platform_driver pdrv;
+ const char *data;
+};
+
+static const char * const test_data = "test data";
+
+static inline struct kunit_platform_driver_test_context *
+to_test_context(struct platform_device *pdev)
+{
+ return container_of(to_platform_driver(pdev->dev.driver),
+ struct kunit_platform_driver_test_context,
+ pdrv);
+}
+
+static int kunit_platform_driver_probe(struct platform_device *pdev)
+{
+ struct kunit_platform_driver_test_context *ctx;
+
+ ctx = to_test_context(pdev);
+ ctx->data = test_data;
+
+ return 0;
+}
+
+/* Test that kunit_platform_driver_register() registers a driver that probes. */
+static void kunit_platform_driver_register_test(struct kunit *test)
+{
+ struct platform_device *pdev;
+ struct kunit_platform_driver_test_context *ctx;
+ DECLARE_COMPLETION_ONSTACK(comp);
+ const char *name = "kunit-platform-register";
+
+ ctx = kunit_kzalloc(test, sizeof(*ctx), GFP_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ctx);
+
+ pdev = kunit_platform_device_alloc(test, name, -1);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, pdev);
+ KUNIT_ASSERT_EQ(test, 0, kunit_platform_device_add(test, pdev));
+
+ ctx->pdrv.probe = kunit_platform_driver_probe;
+ ctx->pdrv.driver.name = name;
+ ctx->pdrv.driver.owner = THIS_MODULE;
+
+ KUNIT_ASSERT_EQ(test, 0, kunit_platform_device_prepare_wait_for_probe(test, pdev, &comp));
+
+ KUNIT_EXPECT_EQ(test, 0, kunit_platform_driver_register(test, &ctx->pdrv));
+ KUNIT_EXPECT_NE(test, 0, wait_for_completion_timeout(&comp, 3 * HZ));
+ KUNIT_EXPECT_STREQ(test, ctx->data, test_data);
+}
+
+/*
+ * Test that kunit_platform_device_prepare_wait_for_probe() completes the completion
+ * when the device is already probed.
+ */
+static void kunit_platform_device_prepare_wait_for_probe_completes_when_already_probed(struct kunit *test)
+{
+ struct platform_device *pdev;
+ struct kunit_platform_driver_test_context *ctx;
+ DECLARE_COMPLETION_ONSTACK(comp);
+ const char *name = "kunit-platform-wait";
+
+ ctx = kunit_kzalloc(test, sizeof(*ctx), GFP_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ctx);
+
+ pdev = kunit_platform_device_alloc(test, name, -1);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, pdev);
+ KUNIT_ASSERT_EQ(test, 0, kunit_platform_device_add(test, pdev));
+
+ ctx->pdrv.probe = kunit_platform_driver_probe;
+ ctx->pdrv.driver.name = name;
+ ctx->pdrv.driver.owner = THIS_MODULE;
+
+ /* Make sure driver has actually probed */
+ KUNIT_ASSERT_EQ(test, 0, kunit_platform_device_prepare_wait_for_probe(test, pdev, &comp));
+ KUNIT_ASSERT_EQ(test, 0, kunit_platform_driver_register(test, &ctx->pdrv));
+ KUNIT_ASSERT_NE(test, 0, wait_for_completion_timeout(&comp, 3 * HZ));
+
+ reinit_completion(&comp);
+ KUNIT_ASSERT_EQ(test, 0, kunit_platform_device_prepare_wait_for_probe(test, pdev, &comp));
+
+ KUNIT_EXPECT_NE(test, 0, wait_for_completion_timeout(&comp, HZ));
+}
+
+static struct kunit_case kunit_platform_driver_test_cases[] = {
+ KUNIT_CASE(kunit_platform_driver_register_test),
+ KUNIT_CASE(kunit_platform_device_prepare_wait_for_probe_completes_when_already_probed),
+ {}
+};
+
+/*
+ * Test suite for struct platform_driver kunit APIs
+ */
+static struct kunit_suite kunit_platform_driver_suite = {
+ .name = "kunit_platform_driver",
+ .test_cases = kunit_platform_driver_test_cases,
+};
+
+kunit_test_suites(
+ &kunit_platform_device_suite,
+ &kunit_platform_driver_suite,
+);
+
+MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("KUnit test for KUnit platform driver infrastructure");
diff --git a/lib/kunit/platform.c b/lib/kunit/platform.c
new file mode 100644
index 000000000000..0b518de26065
--- /dev/null
+++ b/lib/kunit/platform.c
@@ -0,0 +1,302 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Test managed platform driver
+ */
+
+#include <linux/completion.h>
+#include <linux/device/bus.h>
+#include <linux/device/driver.h>
+#include <linux/platform_device.h>
+
+#include <kunit/platform_device.h>
+#include <kunit/resource.h>
+
+struct kunit_platform_device_alloc_params {
+ const char *name;
+ int id;
+};
+
+static int kunit_platform_device_alloc_init(struct kunit_resource *res, void *context)
+{
+ struct kunit_platform_device_alloc_params *params = context;
+ struct platform_device *pdev;
+
+ pdev = platform_device_alloc(params->name, params->id);
+ if (!pdev)
+ return -ENOMEM;
+
+ res->data = pdev;
+
+ return 0;
+}
+
+static void kunit_platform_device_alloc_exit(struct kunit_resource *res)
+{
+ struct platform_device *pdev = res->data;
+
+ platform_device_put(pdev);
+}
+
+/**
+ * kunit_platform_device_alloc() - Allocate a KUnit test managed platform device
+ * @test: test context
+ * @name: device name of platform device to alloc
+ * @id: identifier of platform device to alloc.
+ *
+ * Allocate a test managed platform device. The device is put when the test completes.
+ *
+ * Return: Allocated platform device on success, NULL on failure.
+ */
+struct platform_device *
+kunit_platform_device_alloc(struct kunit *test, const char *name, int id)
+{
+ struct kunit_platform_device_alloc_params params = {
+ .name = name,
+ .id = id,
+ };
+
+ return kunit_alloc_resource(test,
+ kunit_platform_device_alloc_init,
+ kunit_platform_device_alloc_exit,
+ GFP_KERNEL, &params);
+}
+EXPORT_SYMBOL_GPL(kunit_platform_device_alloc);
+
+static void kunit_platform_device_add_exit(struct kunit_resource *res)
+{
+ struct platform_device *pdev = res->data;
+
+ platform_device_unregister(pdev);
+}
+
+static bool
+kunit_platform_device_alloc_match(struct kunit *test,
+ struct kunit_resource *res, void *match_data)
+{
+ struct platform_device *pdev = match_data;
+
+ return res->data == pdev && res->free == kunit_platform_device_alloc_exit;
+}
+
+KUNIT_DEFINE_ACTION_WRAPPER(platform_device_unregister_wrapper,
+ platform_device_unregister, struct platform_device *);
+/**
+ * kunit_platform_device_add() - Register a KUnit test managed platform device
+ * @test: test context
+ * @pdev: platform device to add
+ *
+ * Register a test managed platform device. The device is unregistered when the
+ * test completes.
+ *
+ * Return: 0 on success, negative errno on failure.
+ */
+int kunit_platform_device_add(struct kunit *test, struct platform_device *pdev)
+{
+ struct kunit_resource *res;
+ int ret;
+
+ ret = platform_device_add(pdev);
+ if (ret)
+ return ret;
+
+ res = kunit_find_resource(test, kunit_platform_device_alloc_match, pdev);
+ if (res) {
+ /*
+ * Transfer the reference count of the platform device if it
+ * was allocated with kunit_platform_device_alloc(). In this
+ * case, calling platform_device_put() when the test exits from
+ * kunit_platform_device_alloc_exit() would lead to reference
+ * count underflow because platform_device_unregister_wrapper()
+ * calls platform_device_unregister() which also calls
+ * platform_device_put().
+ *
+ * Usually callers transfer the refcount initialized in
+ * platform_device_alloc() to platform_device_add() by calling
+ * platform_device_unregister() when platform_device_add()
+ * succeeds or platform_device_put() when it fails. KUnit has to
+ * keep this straight by redirecting the free routine for the
+ * resource to the right function. Luckily this only has to
+ * account for the success scenario.
+ */
+ res->free = kunit_platform_device_add_exit;
+ kunit_put_resource(res);
+ } else {
+ ret = kunit_add_action_or_reset(test, platform_device_unregister_wrapper, pdev);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(kunit_platform_device_add);
+
+struct kunit_platform_device_probe_nb {
+ struct completion *x;
+ struct device *dev;
+ struct notifier_block nb;
+};
+
+static int kunit_platform_device_probe_notify(struct notifier_block *nb,
+ unsigned long event, void *data)
+{
+ struct kunit_platform_device_probe_nb *knb;
+ struct device *dev = data;
+
+ knb = container_of(nb, struct kunit_platform_device_probe_nb, nb);
+ if (event != BUS_NOTIFY_BOUND_DRIVER || knb->dev != dev)
+ return NOTIFY_DONE;
+
+ complete(knb->x);
+
+ return NOTIFY_OK;
+}
+
+static void kunit_platform_device_probe_nb_remove(void *nb)
+{
+ bus_unregister_notifier(&platform_bus_type, nb);
+}
+
+/**
+ * kunit_platform_device_prepare_wait_for_probe() - Prepare a completion
+ * variable to wait for a platform device to probe
+ * @test: test context
+ * @pdev: platform device to prepare to wait for probe of
+ * @x: completion variable completed when @dev has probed
+ *
+ * Prepare a completion variable @x to wait for @pdev to probe. Waiting on the
+ * completion forces a preemption, allowing the platform driver to probe.
+ *
+ * Example
+ *
+ * .. code-block:: c
+ *
+ * static int kunit_platform_driver_probe(struct platform_device *pdev)
+ * {
+ * return 0;
+ * }
+ *
+ * static void kunit_platform_driver_test(struct kunit *test)
+ * {
+ * struct platform_device *pdev;
+ * struct platform_driver *pdrv;
+ * DECLARE_COMPLETION_ONSTACK(comp);
+ *
+ * pdev = kunit_platform_device_alloc(test, "kunit-platform", -1);
+ * KUNIT_ASSERT_NOT_ERR_OR_NULL(test, pdev);
+ * KUNIT_ASSERT_EQ(test, 0, kunit_platform_device_add(test, pdev));
+ *
+ * pdrv = kunit_kzalloc(test, sizeof(*pdrv), GFP_KERNEL);
+ * KUNIT_ASSERT_NOT_ERR_OR_NULL(test, pdrv);
+ *
+ * pdrv->probe = kunit_platform_driver_probe;
+ * pdrv->driver.name = "kunit-platform";
+ * pdrv->driver.owner = THIS_MODULE;
+ *
+ * KUNIT_ASSERT_EQ(test, 0, kunit_platform_device_prepare_wait_for_probe(test, pdev, &comp));
+ * KUNIT_ASSERT_EQ(test, 0, kunit_platform_driver_register(test, pdrv));
+ *
+ * KUNIT_EXPECT_NE(test, 0, wait_for_completion_timeout(&comp, 3 * HZ));
+ * }
+ *
+ * Return: 0 on success, negative errno on failure.
+ */
+int kunit_platform_device_prepare_wait_for_probe(struct kunit *test,
+ struct platform_device *pdev,
+ struct completion *x)
+{
+ struct device *dev = &pdev->dev;
+ struct kunit_platform_device_probe_nb *knb;
+ bool bound;
+
+ knb = kunit_kzalloc(test, sizeof(*knb), GFP_KERNEL);
+ if (!knb)
+ return -ENOMEM;
+
+ knb->nb.notifier_call = kunit_platform_device_probe_notify;
+ knb->dev = dev;
+ knb->x = x;
+
+ device_lock(dev);
+ bound = device_is_bound(dev);
+ if (bound) {
+ device_unlock(dev);
+ complete(x);
+ kunit_kfree(test, knb);
+ return 0;
+ }
+
+ bus_register_notifier(&platform_bus_type, &knb->nb);
+ device_unlock(&pdev->dev);
+
+ return kunit_add_action_or_reset(test, kunit_platform_device_probe_nb_remove, &knb->nb);
+}
+EXPORT_SYMBOL_GPL(kunit_platform_device_prepare_wait_for_probe);
+
+KUNIT_DEFINE_ACTION_WRAPPER(platform_driver_unregister_wrapper,
+ platform_driver_unregister, struct platform_driver *);
+/**
+ * kunit_platform_driver_register() - Register a KUnit test managed platform driver
+ * @test: test context
+ * @drv: platform driver to register
+ *
+ * Register a test managed platform driver. This allows callers to embed the
+ * @drv in a container structure and use container_of() in the probe function
+ * to pass information to KUnit tests.
+ *
+ * Example
+ *
+ * .. code-block:: c
+ *
+ * struct kunit_test_context {
+ * struct platform_driver pdrv;
+ * const char *data;
+ * };
+ *
+ * static inline struct kunit_test_context *
+ * to_test_context(struct platform_device *pdev)
+ * {
+ * return container_of(to_platform_driver(pdev->dev.driver),
+ * struct kunit_test_context,
+ * pdrv);
+ * }
+ *
+ * static int kunit_platform_driver_probe(struct platform_device *pdev)
+ * {
+ * struct kunit_test_context *ctx;
+ *
+ * ctx = to_test_context(pdev);
+ * ctx->data = "test data";
+ *
+ * return 0;
+ * }
+ *
+ * static void kunit_platform_driver_test(struct kunit *test)
+ * {
+ * struct kunit_test_context *ctx;
+ *
+ * ctx = kunit_kzalloc(test, sizeof(*ctx), GFP_KERNEL);
+ * KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ctx);
+ *
+ * ctx->pdrv.probe = kunit_platform_driver_probe;
+ * ctx->pdrv.driver.name = "kunit-platform";
+ * ctx->pdrv.driver.owner = THIS_MODULE;
+ *
+ * KUNIT_EXPECT_EQ(test, 0, kunit_platform_driver_register(test, &ctx->pdrv));
+ * <... wait for driver to probe ...>
+ * KUNIT_EXPECT_STREQ(test, ctx->data, "test data");
+ * }
+ *
+ * Return: 0 on success, negative errno on failure.
+ */
+int kunit_platform_driver_register(struct kunit *test,
+ struct platform_driver *drv)
+{
+ int ret;
+
+ ret = platform_driver_register(drv);
+ if (ret)
+ return ret;
+
+ return kunit_add_action_or_reset(test, platform_driver_unregister_wrapper, drv);
+}
+EXPORT_SYMBOL_GPL(kunit_platform_driver_register);
diff --git a/lib/list-test.c b/lib/list-test.c
index 37cbc33e9fdb..e207c4c98d70 100644
--- a/lib/list-test.c
+++ b/lib/list-test.c
@@ -102,6 +102,8 @@ static void list_test_list_replace(struct kunit *test)
/* now: [list] -> a_new -> b */
KUNIT_EXPECT_PTR_EQ(test, list.next, &a_new);
KUNIT_EXPECT_PTR_EQ(test, b.prev, &a_new);
+ KUNIT_EXPECT_PTR_EQ(test, a_new.next, &b);
+ KUNIT_EXPECT_PTR_EQ(test, a_new.prev, &list);
}
static void list_test_list_replace_init(struct kunit *test)
@@ -118,6 +120,8 @@ static void list_test_list_replace_init(struct kunit *test)
/* now: [list] -> a_new -> b */
KUNIT_EXPECT_PTR_EQ(test, list.next, &a_new);
KUNIT_EXPECT_PTR_EQ(test, b.prev, &a_new);
+ KUNIT_EXPECT_PTR_EQ(test, a_new.next, &b);
+ KUNIT_EXPECT_PTR_EQ(test, a_new.prev, &list);
/* check a_old is empty (initialized) */
KUNIT_EXPECT_TRUE(test, list_empty_careful(&a_old));
diff --git a/lib/lru_cache.c b/lib/lru_cache.c
index b3d9187611de..9e0d469c7658 100644
--- a/lib/lru_cache.c
+++ b/lib/lru_cache.c
@@ -243,7 +243,7 @@ static struct lc_element *__lc_find(struct lru_cache *lc, unsigned int enr,
BUG_ON(!lc);
BUG_ON(!lc->nr_elements);
- hlist_for_each_entry(e, lc_hash_slot(lc, enr), colision) {
+ hlist_for_each_entry(e, lc_hash_slot(lc, enr), collision) {
/* "about to be changed" elements, pending transaction commit,
* are hashed by their "new number". "Normal" elements have
* lc_number == lc_new_number. */
@@ -303,7 +303,7 @@ void lc_del(struct lru_cache *lc, struct lc_element *e)
BUG_ON(e->refcnt);
e->lc_number = e->lc_new_number = LC_FREE;
- hlist_del_init(&e->colision);
+ hlist_del_init(&e->collision);
list_move(&e->list, &lc->free);
RETURN();
}
@@ -324,9 +324,9 @@ static struct lc_element *lc_prepare_for_change(struct lru_cache *lc, unsigned n
PARANOIA_LC_ELEMENT(lc, e);
e->lc_new_number = new_number;
- if (!hlist_unhashed(&e->colision))
- __hlist_del(&e->colision);
- hlist_add_head(&e->colision, lc_hash_slot(lc, new_number));
+ if (!hlist_unhashed(&e->collision))
+ __hlist_del(&e->collision);
+ hlist_add_head(&e->collision, lc_hash_slot(lc, new_number));
list_move(&e->list, &lc->to_be_changed);
return e;
diff --git a/lib/lz4/lz4hc_compress.c b/lib/lz4/lz4hc_compress.c
index e7ac8694b797..bc45594ad2a8 100644
--- a/lib/lz4/lz4hc_compress.c
+++ b/lib/lz4/lz4hc_compress.c
@@ -621,6 +621,7 @@ void LZ4_resetStreamHC(LZ4_streamHC_t *LZ4_streamHCPtr, int compressionLevel)
LZ4_streamHCPtr->internal_donotuse.base = NULL;
LZ4_streamHCPtr->internal_donotuse.compressionLevel = (unsigned int)compressionLevel;
}
+EXPORT_SYMBOL(LZ4_resetStreamHC);
int LZ4_loadDictHC(LZ4_streamHC_t *LZ4_streamHCPtr,
const char *dictionary,
diff --git a/lib/maple_tree.c b/lib/maple_tree.c
index 6df3a8b95808..20990ecba2dd 100644
--- a/lib/maple_tree.c
+++ b/lib/maple_tree.c
@@ -348,17 +348,17 @@ static inline void *mte_safe_root(const struct maple_enode *node)
return (void *)((unsigned long)node & ~MAPLE_ROOT_NODE);
}
-static inline void *mte_set_full(const struct maple_enode *node)
+static inline void __maybe_unused *mte_set_full(const struct maple_enode *node)
{
return (void *)((unsigned long)node & ~MAPLE_ENODE_NULL);
}
-static inline void *mte_clear_full(const struct maple_enode *node)
+static inline void __maybe_unused *mte_clear_full(const struct maple_enode *node)
{
return (void *)((unsigned long)node | MAPLE_ENODE_NULL);
}
-static inline bool mte_has_null(const struct maple_enode *node)
+static inline bool __maybe_unused mte_has_null(const struct maple_enode *node)
{
return (unsigned long)node & MAPLE_ENODE_NULL;
}
@@ -474,6 +474,7 @@ enum maple_type mas_parent_type(struct ma_state *mas, struct maple_enode *enode)
/*
* mas_set_parent() - Set the parent node and encode the slot
+ * @mas: The maple state
* @enode: The encoded maple node.
* @parent: The encoded maple node that is the parent of @enode.
* @slot: The slot that @enode resides in @parent.
@@ -534,7 +535,7 @@ unsigned int mte_parent_slot(const struct maple_enode *enode)
/*
* mte_parent() - Get the parent of @node.
- * @node: The encoded maple node.
+ * @enode: The encoded maple node.
*
* Return: The parent maple node.
*/
@@ -641,8 +642,8 @@ static inline unsigned int mas_alloc_req(const struct ma_state *mas)
/*
* ma_pivots() - Get a pointer to the maple node pivots.
- * @node - the maple node
- * @type - the node type
+ * @node: the maple node
+ * @type: the node type
*
* In the event of a dead node, this array may be %NULL
*
@@ -665,8 +666,8 @@ static inline unsigned long *ma_pivots(struct maple_node *node,
/*
* ma_gaps() - Get a pointer to the maple node gaps.
- * @node - the maple node
- * @type - the node type
+ * @node: the maple node
+ * @type: the node type
*
* Return: A pointer to the maple node gaps
*/
@@ -880,8 +881,6 @@ static inline void ma_set_meta(struct maple_node *mn, enum maple_type mt,
* @mt: The maple tree
* @mn: The maple node
* @type: The maple node type
- * @offset: The offset of the highest sub-gap in this node.
- * @end: The end of the data in this node.
*/
static inline void mt_clear_meta(struct maple_tree *mt, struct maple_node *mn,
enum maple_type type)
@@ -939,7 +938,7 @@ static inline unsigned char ma_meta_gap(struct maple_node *mn)
/*
* ma_set_meta_gap() - Set the largest gap location in a nodes metadata
* @mn: The maple node
- * @mn: The maple node type
+ * @mt: The maple node type
* @offset: The location of the largest gap.
*/
static inline void ma_set_meta_gap(struct maple_node *mn, enum maple_type mt,
@@ -953,8 +952,8 @@ static inline void ma_set_meta_gap(struct maple_node *mn, enum maple_type mt,
/*
* mat_add() - Add a @dead_enode to the ma_topiary of a list of dead nodes.
- * @mat - the ma_topiary, a linked list of dead nodes.
- * @dead_enode - the node to be marked as dead and added to the tail of the list
+ * @mat: the ma_topiary, a linked list of dead nodes.
+ * @dead_enode: the node to be marked as dead and added to the tail of the list
*
* Add the @dead_enode to the linked list in @mat.
*/
@@ -977,8 +976,8 @@ static void mt_destroy_walk(struct maple_enode *enode, struct maple_tree *mt,
bool free);
/*
* mas_mat_destroy() - Free all nodes and subtrees in a dead list.
- * @mas - the maple state
- * @mat - the ma_topiary linked list of dead nodes to free.
+ * @mas: the maple state
+ * @mat: the ma_topiary linked list of dead nodes to free.
*
* Destroy walk a dead list.
*/
@@ -999,7 +998,7 @@ static void mas_mat_destroy(struct ma_state *mas, struct ma_topiary *mat)
}
/*
* mas_descend() - Descend into the slot stored in the ma_state.
- * @mas - the maple state.
+ * @mas: the maple state.
*
* Note: Not RCU safe, only use in write side or debug code.
*/
@@ -1346,8 +1345,8 @@ static void mas_node_count(struct ma_state *mas, int count)
* Return:
* - If mas->node is an error or not mas_start, return NULL.
* - If it's an empty tree: NULL & mas->status == ma_none
- * - If it's a single entry: The entry & mas->status == mas_root
- * - If it's a tree: NULL & mas->status == safe root node.
+ * - If it's a single entry: The entry & mas->status == ma_root
+ * - If it's a tree: NULL & mas->status == ma_active
*/
static inline struct maple_enode *mas_start(struct ma_state *mas)
{
@@ -1372,9 +1371,9 @@ retry:
return NULL;
}
+ mas->node = NULL;
/* empty tree */
if (unlikely(!root)) {
- mas->node = NULL;
mas->status = ma_none;
mas->offset = MAPLE_NODE_SLOTS;
return NULL;
@@ -1462,7 +1461,7 @@ static inline unsigned char mas_data_end(struct ma_state *mas)
/*
* mas_leaf_max_gap() - Returns the largest gap in a leaf node
- * @mas - the maple state
+ * @mas: the maple state
*
* Return: The maximum gap in the leaf.
*/
@@ -1544,7 +1543,7 @@ static unsigned long mas_leaf_max_gap(struct ma_state *mas)
* @node: The maple node
* @gaps: The pointer to the gaps
* @mt: The maple node type
- * @*off: Pointer to store the offset location of the gap.
+ * @off: Pointer to store the offset location of the gap.
*
* Uses the metadata data end to scan backwards across set gaps.
*
@@ -1651,7 +1650,7 @@ ascend:
/*
* mas_update_gap() - Update a nodes gaps and propagate up if necessary.
- * @mas - the maple state.
+ * @mas: the maple state.
*/
static inline void mas_update_gap(struct ma_state *mas)
{
@@ -1678,8 +1677,8 @@ static inline void mas_update_gap(struct ma_state *mas)
/*
* mas_adopt_children() - Set the parent pointer of all nodes in @parent to
* @parent with the slot encoded.
- * @mas - the maple state (for the tree)
- * @parent - the maple encoded node containing the children.
+ * @mas: the maple state (for the tree)
+ * @parent: the maple encoded node containing the children.
*/
static inline void mas_adopt_children(struct ma_state *mas,
struct maple_enode *parent)
@@ -1701,8 +1700,8 @@ static inline void mas_adopt_children(struct ma_state *mas,
/*
* mas_put_in_tree() - Put a new node in the tree, smp_wmb(), and mark the old
* node as dead.
- * @mas - the maple state with the new node
- * @old_enode - The old maple encoded node to replace.
+ * @mas: the maple state with the new node
+ * @old_enode: The old maple encoded node to replace.
*/
static inline void mas_put_in_tree(struct ma_state *mas,
struct maple_enode *old_enode)
@@ -1730,8 +1729,8 @@ static inline void mas_put_in_tree(struct ma_state *mas,
* mas_replace_node() - Replace a node by putting it in the tree, marking it
* dead, and freeing it.
* the parent encoding to locate the maple node in the tree.
- * @mas - the ma_state with @mas->node pointing to the new node.
- * @old_enode - The old maple encoded node.
+ * @mas: the ma_state with @mas->node pointing to the new node.
+ * @old_enode: The old maple encoded node.
*/
static inline void mas_replace_node(struct ma_state *mas,
struct maple_enode *old_enode)
@@ -1796,7 +1795,6 @@ static inline void mab_shift_right(struct maple_big_node *b_node,
/*
* mab_middle_node() - Check if a middle node is needed (unlikely)
* @b_node: the maple_big_node that contains the data.
- * @size: the amount of data in the b_node
* @split: the potential split location
* @slot_count: the size that can be stored in a single node being considered.
*
@@ -1844,6 +1842,7 @@ static inline int mab_no_null_split(struct maple_big_node *b_node,
/*
* mab_calc_split() - Calculate the split location and if there needs to be two
* splits.
+ * @mas: The maple state
* @bn: The maple_big_node with the data
* @mid_split: The second split, if required. 0 otherwise.
*
@@ -2177,7 +2176,8 @@ static inline bool mas_next_sibling(struct ma_state *mas)
}
/*
- * mte_node_or_none() - Set the enode and state.
+ * mas_node_or_none() - Set the enode and state.
+ * @mas: the maple state
* @enode: The encoded maple node.
*
* Set the node to the enode and the status.
@@ -2228,7 +2228,6 @@ static inline void mas_wr_node_walk(struct ma_wr_state *wr_mas)
/*
* mast_rebalance_next() - Rebalance against the next node
* @mast: The maple subtree state
- * @old_r: The encoded maple node to the right (next node).
*/
static inline void mast_rebalance_next(struct maple_subtree_state *mast)
{
@@ -2242,7 +2241,6 @@ static inline void mast_rebalance_next(struct maple_subtree_state *mast)
/*
* mast_rebalance_prev() - Rebalance against the previous node
* @mast: The maple subtree state
- * @old_l: The encoded maple node to the left (previous node)
*/
static inline void mast_rebalance_prev(struct maple_subtree_state *mast)
{
@@ -2393,9 +2391,9 @@ static inline unsigned char mas_mab_to_node(struct ma_state *mas,
/*
* mab_set_b_end() - Add entry to b_node at b_node->b_end and increment the end
* pointer.
- * @b_node - the big node to add the entry
- * @mas - the maple state to get the pivot (mas->max)
- * @entry - the entry to add, if NULL nothing happens.
+ * @b_node: the big node to add the entry
+ * @mas: the maple state to get the pivot (mas->max)
+ * @entry: the entry to add, if NULL nothing happens.
*/
static inline void mab_set_b_end(struct maple_big_node *b_node,
struct ma_state *mas,
@@ -2414,11 +2412,11 @@ static inline void mab_set_b_end(struct maple_big_node *b_node,
* mas_set_split_parent() - combine_then_separate helper function. Sets the parent
* of @mas->node to either @left or @right, depending on @slot and @split
*
- * @mas - the maple state with the node that needs a parent
- * @left - possible parent 1
- * @right - possible parent 2
- * @slot - the slot the mas->node was placed
- * @split - the split location between @left and @right
+ * @mas: the maple state with the node that needs a parent
+ * @left: possible parent 1
+ * @right: possible parent 2
+ * @slot: the slot the mas->node was placed
+ * @split: the split location between @left and @right
*/
static inline void mas_set_split_parent(struct ma_state *mas,
struct maple_enode *left,
@@ -2438,11 +2436,11 @@ static inline void mas_set_split_parent(struct ma_state *mas,
/*
* mte_mid_split_check() - Check if the next node passes the mid-split
- * @**l: Pointer to left encoded maple node.
- * @**m: Pointer to middle encoded maple node.
- * @**r: Pointer to right encoded maple node.
+ * @l: Pointer to left encoded maple node.
+ * @m: Pointer to middle encoded maple node.
+ * @r: Pointer to right encoded maple node.
* @slot: The offset
- * @*split: The split location.
+ * @split: The split location.
* @mid_split: The middle split.
*/
static inline void mte_mid_split_check(struct maple_enode **l,
@@ -2466,10 +2464,10 @@ static inline void mte_mid_split_check(struct maple_enode **l,
/*
* mast_set_split_parents() - Helper function to set three nodes parents. Slot
* is taken from @mast->l.
- * @mast - the maple subtree state
- * @left - the left node
- * @right - the right node
- * @split - the split location.
+ * @mast: the maple subtree state
+ * @left: the left node
+ * @right: the right node
+ * @split: the split location.
*/
static inline void mast_set_split_parents(struct maple_subtree_state *mast,
struct maple_enode *left,
@@ -2503,7 +2501,6 @@ static inline void mast_set_split_parents(struct maple_subtree_state *mast,
/*
* mas_topiary_node() - Dispose of a single node
* @mas: The maple state for pushing nodes
- * @enode: The encoded maple node
* @in_rcu: If the tree is in rcu mode
*
* The node will either be RCU freed or pushed back on the maple state.
@@ -2635,7 +2632,7 @@ static inline void mas_topiary_replace(struct ma_state *mas,
/*
* mas_wmb_replace() - Write memory barrier and replace
* @mas: The maple state
- * @old: The old maple encoded node that is being replaced.
+ * @old_enode: The old maple encoded node that is being replaced.
*
* Updates gap as necessary.
*/
@@ -2823,10 +2820,8 @@ dead_node:
* orig_l_mas->last is used in mas_consume to find the slots that will need to
* be either freed or destroyed. orig_l_mas->depth keeps track of the height of
* the new sub-tree in case the sub-tree becomes the full tree.
- *
- * Return: the number of elements in b_node during the last loop.
*/
-static int mas_spanning_rebalance(struct ma_state *mas,
+static void mas_spanning_rebalance(struct ma_state *mas,
struct maple_subtree_state *mast, unsigned char count)
{
unsigned char split, mid_split;
@@ -2942,7 +2937,7 @@ new_root:
mas->offset = l_mas.offset;
mas_wmb_replace(mas, old_enode);
mtree_range_walk(mas);
- return mast->bn->b_end;
+ return;
}
/*
@@ -2952,10 +2947,8 @@ new_root:
*
* Rebalance two nodes into a single node or two new nodes that are sufficient.
* Continue upwards until tree is sufficient.
- *
- * Return: the number of elements in b_node during the last loop.
*/
-static inline int mas_rebalance(struct ma_state *mas,
+static inline void mas_rebalance(struct ma_state *mas,
struct maple_big_node *b_node)
{
char empty_count = mas_mt_height(mas);
@@ -2976,9 +2969,6 @@ static inline int mas_rebalance(struct ma_state *mas,
* tries to combine the data in the same way. If one node contains the
* entire range of the tree, then that node is used as a new root node.
*/
- mas_node_count(mas, empty_count * 2 - 1);
- if (mas_is_err(mas))
- return 0;
mast.orig_l = &l_mas;
mast.orig_r = &r_mas;
@@ -3029,11 +3019,6 @@ static inline void mas_destroy_rebalance(struct ma_state *mas, unsigned char end
/* set up node. */
if (in_rcu) {
- /* Allocate for both left and right as well as parent. */
- mas_node_count(mas, 3);
- if (mas_is_err(mas))
- return;
-
newnode = mas_pop_node(mas);
} else {
newnode = &reuse;
@@ -3308,9 +3293,8 @@ static inline bool mas_push_data(struct ma_state *mas, int height,
* mas_split() - Split data that is too big for one node into two.
* @mas: The maple state
* @b_node: The maple big node
- * Return: 1 on success, 0 on failure.
*/
-static int mas_split(struct ma_state *mas, struct maple_big_node *b_node)
+static void mas_split(struct ma_state *mas, struct maple_big_node *b_node)
{
struct maple_subtree_state mast;
int height = 0;
@@ -3341,10 +3325,6 @@ static int mas_split(struct ma_state *mas, struct maple_big_node *b_node)
trace_ma_op(__func__, mas);
mas->depth = mas_mt_height(mas);
- /* Allocation failures will happen early. */
- mas_node_count(mas, 1 + mas->depth * 2);
- if (mas_is_err(mas))
- return 0;
mast.l = &l_mas;
mast.r = &r_mas;
@@ -3392,75 +3372,25 @@ static int mas_split(struct ma_state *mas, struct maple_big_node *b_node)
mas->node = l_mas.node;
mas_wmb_replace(mas, old);
mtree_range_walk(mas);
- return 1;
-}
-
-/*
- * mas_reuse_node() - Reuse the node to store the data.
- * @wr_mas: The maple write state
- * @bn: The maple big node
- * @end: The end of the data.
- *
- * Will always return false in RCU mode.
- *
- * Return: True if node was reused, false otherwise.
- */
-static inline bool mas_reuse_node(struct ma_wr_state *wr_mas,
- struct maple_big_node *bn, unsigned char end)
-{
- /* Need to be rcu safe. */
- if (mt_in_rcu(wr_mas->mas->tree))
- return false;
-
- if (end > bn->b_end) {
- int clear = mt_slots[wr_mas->type] - bn->b_end;
-
- memset(wr_mas->slots + bn->b_end, 0, sizeof(void *) * clear--);
- memset(wr_mas->pivots + bn->b_end, 0, sizeof(void *) * clear);
- }
- mab_mas_cp(bn, 0, bn->b_end, wr_mas->mas, false);
- return true;
+ return;
}
/*
* mas_commit_b_node() - Commit the big node into the tree.
* @wr_mas: The maple write state
* @b_node: The maple big node
- * @end: The end of the data.
*/
-static noinline_for_kasan int mas_commit_b_node(struct ma_wr_state *wr_mas,
- struct maple_big_node *b_node, unsigned char end)
+static noinline_for_kasan void mas_commit_b_node(struct ma_wr_state *wr_mas,
+ struct maple_big_node *b_node)
{
- struct maple_node *node;
- struct maple_enode *old_enode;
- unsigned char b_end = b_node->b_end;
- enum maple_type b_type = b_node->type;
-
- old_enode = wr_mas->mas->node;
- if ((b_end < mt_min_slots[b_type]) &&
- (!mte_is_root(old_enode)) &&
- (mas_mt_height(wr_mas->mas) > 1))
- return mas_rebalance(wr_mas->mas, b_node);
+ enum store_type type = wr_mas->mas->store_type;
- if (b_end >= mt_slots[b_type])
- return mas_split(wr_mas->mas, b_node);
+ WARN_ON_ONCE(type != wr_rebalance && type != wr_split_store);
- if (mas_reuse_node(wr_mas, b_node, end))
- goto reuse_node;
-
- mas_node_count(wr_mas->mas, 1);
- if (mas_is_err(wr_mas->mas))
- return 0;
+ if (type == wr_rebalance)
+ return mas_rebalance(wr_mas->mas, b_node);
- node = mas_pop_node(wr_mas->mas);
- node->parent = mas_mn(wr_mas->mas)->parent;
- wr_mas->mas->node = mt_mk_node(node, b_type);
- mab_mas_cp(b_node, 0, b_end, wr_mas->mas, false);
- mas_replace_node(wr_mas->mas, old_enode);
-reuse_node:
- mas_update_gap(wr_mas->mas);
- wr_mas->mas->end = b_end;
- return 1;
+ return mas_split(wr_mas->mas, b_node);
}
/*
@@ -3477,10 +3407,6 @@ static inline int mas_root_expand(struct ma_state *mas, void *entry)
unsigned long *pivots;
int slot = 0;
- mas_node_count(mas, 1);
- if (unlikely(mas_is_err(mas)))
- return 0;
-
node = mas_pop_node(mas);
pivots = ma_pivots(node, type);
slots = ma_slots(node, type);
@@ -3526,10 +3452,7 @@ static inline void mas_store_root(struct ma_state *mas, void *entry)
/*
* mas_is_span_wr() - Check if the write needs to be treated as a write that
* spans the node.
- * @mas: The maple state
- * @piv: The pivot value being written
- * @type: The maple node type
- * @entry: The data to write
+ * @wr_mas: The maple write state
*
* Spanning writes are writes that start in one node and end in another OR if
* the write of a %NULL will cause the node to end with a %NULL.
@@ -3730,10 +3653,8 @@ static void mte_destroy_walk(struct maple_enode *, struct maple_tree *);
* @entry: The entry to store.
*
* Only valid when the index == 0 and the last == ULONG_MAX
- *
- * Return 0 on error, 1 on success.
*/
-static inline int mas_new_root(struct ma_state *mas, void *entry)
+static inline void mas_new_root(struct ma_state *mas, void *entry)
{
struct maple_enode *root = mas_root_locked(mas);
enum maple_type type = maple_leaf_64;
@@ -3749,10 +3670,6 @@ static inline int mas_new_root(struct ma_state *mas, void *entry)
goto done;
}
- mas_node_count(mas, 1);
- if (mas_is_err(mas))
- return 0;
-
node = mas_pop_node(mas);
pivots = ma_pivots(node, type);
slots = ma_slots(node, type);
@@ -3769,7 +3686,7 @@ done:
if (xa_is_node(root))
mte_destroy_walk(root, mas->tree);
- return 1;
+ return;
}
/*
* mas_wr_spanning_store() - Create a subtree with the store operation completed
@@ -3777,10 +3694,8 @@ done:
* Note that mas is expected to point to the node which caused the store to
* span.
* @wr_mas: The maple write state
- *
- * Return: 0 on error, positive on success.
*/
-static inline int mas_wr_spanning_store(struct ma_wr_state *wr_mas)
+static noinline void mas_wr_spanning_store(struct ma_wr_state *wr_mas)
{
struct maple_subtree_state mast;
struct maple_big_node b_node;
@@ -3815,9 +3730,6 @@ static inline int mas_wr_spanning_store(struct ma_wr_state *wr_mas)
* entries per level plus a new root.
*/
height = mas_mt_height(mas);
- mas_node_count(mas, 1 + height * 3);
- if (mas_is_err(mas))
- return 0;
/*
* Set up right side. Need to get to the next offset after the spanning
@@ -3875,10 +3787,8 @@ static inline int mas_wr_spanning_store(struct ma_wr_state *wr_mas)
* @wr_mas: The maple write state
*
* Attempts to reuse the node, but may allocate.
- *
- * Return: True if stored, false otherwise
*/
-static inline bool mas_wr_node_store(struct ma_wr_state *wr_mas,
+static inline void mas_wr_node_store(struct ma_wr_state *wr_mas,
unsigned char new_end)
{
struct ma_state *mas = wr_mas->mas;
@@ -3889,11 +3799,6 @@ static inline bool mas_wr_node_store(struct ma_wr_state *wr_mas,
unsigned char copy_size, node_pivots = mt_pivots[wr_mas->type];
bool in_rcu = mt_in_rcu(mas->tree);
- /* Check if there is enough data. The room is enough. */
- if (!mte_is_root(mas->node) && (new_end <= mt_min_slots[wr_mas->type]) &&
- !(mas->mas_flags & MA_STATE_BULK))
- return false;
-
if (mas->last == wr_mas->end_piv)
offset_end++; /* don't copy this offset */
else if (unlikely(wr_mas->r_max == ULONG_MAX))
@@ -3901,10 +3806,6 @@ static inline bool mas_wr_node_store(struct ma_wr_state *wr_mas,
/* set up node. */
if (in_rcu) {
- mas_node_count(mas, 1);
- if (mas_is_err(mas))
- return false;
-
newnode = mas_pop_node(mas);
} else {
memset(&reuse, 0, sizeof(struct maple_node));
@@ -3960,16 +3861,14 @@ done:
trace_ma_write(__func__, mas, 0, wr_mas->entry);
mas_update_gap(mas);
mas->end = new_end;
- return true;
+ return;
}
/*
* mas_wr_slot_store: Attempt to store a value in a slot.
* @wr_mas: the maple write state
- *
- * Return: True if stored, false otherwise
*/
-static inline bool mas_wr_slot_store(struct ma_wr_state *wr_mas)
+static inline void mas_wr_slot_store(struct ma_wr_state *wr_mas)
{
struct ma_state *mas = wr_mas->mas;
unsigned char offset = mas->offset;
@@ -4001,7 +3900,7 @@ static inline bool mas_wr_slot_store(struct ma_wr_state *wr_mas)
wr_mas->pivots[offset + 1] = mas->last;
mas->offset++; /* Keep mas accurate. */
} else {
- return false;
+ return;
}
trace_ma_write(__func__, mas, 0, wr_mas->entry);
@@ -4012,7 +3911,7 @@ static inline bool mas_wr_slot_store(struct ma_wr_state *wr_mas)
if (!wr_mas->entry || gap)
mas_update_gap(mas);
- return true;
+ return;
}
static inline void mas_wr_extend_null(struct ma_wr_state *wr_mas)
@@ -4061,9 +3960,6 @@ static inline void mas_wr_end_piv(struct ma_wr_state *wr_mas)
wr_mas->end_piv = wr_mas->pivots[wr_mas->offset_end];
else
wr_mas->end_piv = wr_mas->mas->max;
-
- if (!wr_mas->entry)
- mas_wr_extend_null(wr_mas);
}
static inline unsigned char mas_wr_new_end(struct ma_wr_state *wr_mas)
@@ -4089,23 +3985,13 @@ static inline unsigned char mas_wr_new_end(struct ma_wr_state *wr_mas)
* This is currently unsafe in rcu mode since the end of the node may be cached
* by readers while the node contents may be updated which could result in
* inaccurate information.
- *
- * Return: True if appended, false otherwise
*/
-static inline bool mas_wr_append(struct ma_wr_state *wr_mas,
+static inline void mas_wr_append(struct ma_wr_state *wr_mas,
unsigned char new_end)
{
- struct ma_state *mas;
+ struct ma_state *mas = wr_mas->mas;
void __rcu **slots;
- unsigned char end;
-
- mas = wr_mas->mas;
- if (mt_in_rcu(mas->tree))
- return false;
-
- end = mas->end;
- if (mas->offset != end)
- return false;
+ unsigned char end = mas->end;
if (new_end < mt_pivots[wr_mas->type]) {
wr_mas->pivots[new_end] = wr_mas->pivots[end];
@@ -4139,7 +4025,7 @@ static inline bool mas_wr_append(struct ma_wr_state *wr_mas,
mas->end = new_end;
trace_ma_write(__func__, mas, new_end, wr_mas->entry);
- return true;
+ return;
}
/*
@@ -4155,76 +4041,235 @@ static void mas_wr_bnode(struct ma_wr_state *wr_mas)
trace_ma_write(__func__, wr_mas->mas, 0, wr_mas->entry);
memset(&b_node, 0, sizeof(struct maple_big_node));
mas_store_b_node(wr_mas, &b_node, wr_mas->offset_end);
- mas_commit_b_node(wr_mas, &b_node, wr_mas->mas->end);
+ mas_commit_b_node(wr_mas, &b_node);
}
-static inline void mas_wr_modify(struct ma_wr_state *wr_mas)
+/*
+ * mas_wr_store_entry() - Internal call to store a value
+ * @wr_mas: The maple write state
+ */
+static inline void mas_wr_store_entry(struct ma_wr_state *wr_mas)
{
struct ma_state *mas = wr_mas->mas;
- unsigned char new_end;
+ unsigned char new_end = mas_wr_new_end(wr_mas);
- /* Direct replacement */
- if (wr_mas->r_min == mas->index && wr_mas->r_max == mas->last) {
+ switch (mas->store_type) {
+ case wr_invalid:
+ MT_BUG_ON(mas->tree, 1);
+ return;
+ case wr_new_root:
+ mas_new_root(mas, wr_mas->entry);
+ break;
+ case wr_store_root:
+ mas_store_root(mas, wr_mas->entry);
+ break;
+ case wr_exact_fit:
rcu_assign_pointer(wr_mas->slots[mas->offset], wr_mas->entry);
if (!!wr_mas->entry ^ !!wr_mas->content)
mas_update_gap(mas);
- return;
+ break;
+ case wr_append:
+ mas_wr_append(wr_mas, new_end);
+ break;
+ case wr_slot_store:
+ mas_wr_slot_store(wr_mas);
+ break;
+ case wr_node_store:
+ mas_wr_node_store(wr_mas, new_end);
+ break;
+ case wr_spanning_store:
+ mas_wr_spanning_store(wr_mas);
+ break;
+ case wr_split_store:
+ case wr_rebalance:
+ mas_wr_bnode(wr_mas);
+ break;
+ }
+
+ return;
+}
+
+static inline void mas_wr_prealloc_setup(struct ma_wr_state *wr_mas)
+{
+ struct ma_state *mas = wr_mas->mas;
+
+ if (!mas_is_active(mas)) {
+ if (mas_is_start(mas))
+ goto set_content;
+
+ if (unlikely(mas_is_paused(mas)))
+ goto reset;
+
+ if (unlikely(mas_is_none(mas)))
+ goto reset;
+
+ if (unlikely(mas_is_overflow(mas)))
+ goto reset;
+
+ if (unlikely(mas_is_underflow(mas)))
+ goto reset;
}
/*
- * new_end exceeds the size of the maple node and cannot enter the fast
- * path.
+ * A less strict version of mas_is_span_wr() where we allow spanning
+ * writes within this node. This is to stop partial walks in
+ * mas_prealloc() from being reset.
*/
- new_end = mas_wr_new_end(wr_mas);
- if (new_end >= mt_slots[wr_mas->type])
- goto slow_path;
-
- /* Attempt to append */
- if (mas_wr_append(wr_mas, new_end))
- return;
+ if (mas->last > mas->max)
+ goto reset;
- if (new_end == mas->end && mas_wr_slot_store(wr_mas))
- return;
+ if (wr_mas->entry)
+ goto set_content;
- if (mas_wr_node_store(wr_mas, new_end))
- return;
+ if (mte_is_leaf(mas->node) && mas->last == mas->max)
+ goto reset;
- if (mas_is_err(mas))
- return;
+ goto set_content;
-slow_path:
- mas_wr_bnode(wr_mas);
+reset:
+ mas_reset(mas);
+set_content:
+ wr_mas->content = mas_start(mas);
}
-/*
- * mas_wr_store_entry() - Internal call to store a value
+/**
+ * mas_prealloc_calc() - Calculate number of nodes needed for a
+ * given store oepration
* @mas: The maple state
- * @entry: The entry to store.
+ * @entry: The entry to store into the tree
*
- * Return: The contents that was stored at the index.
+ * Return: Number of nodes required for preallocation.
*/
-static inline void mas_wr_store_entry(struct ma_wr_state *wr_mas)
+static inline int mas_prealloc_calc(struct ma_state *mas, void *entry)
+{
+ int ret = mas_mt_height(mas) * 3 + 1;
+
+ switch (mas->store_type) {
+ case wr_invalid:
+ WARN_ON_ONCE(1);
+ break;
+ case wr_new_root:
+ ret = 1;
+ break;
+ case wr_store_root:
+ if (likely((mas->last != 0) || (mas->index != 0)))
+ ret = 1;
+ else if (((unsigned long) (entry) & 3) == 2)
+ ret = 1;
+ else
+ ret = 0;
+ break;
+ case wr_spanning_store:
+ ret = mas_mt_height(mas) * 3 + 1;
+ break;
+ case wr_split_store:
+ ret = mas_mt_height(mas) * 2 + 1;
+ break;
+ case wr_rebalance:
+ ret = mas_mt_height(mas) * 2 - 1;
+ break;
+ case wr_node_store:
+ ret = mt_in_rcu(mas->tree) ? 1 : 0;
+ break;
+ case wr_append:
+ case wr_exact_fit:
+ case wr_slot_store:
+ ret = 0;
+ }
+
+ return ret;
+}
+
+/*
+ * mas_wr_store_type() - Set the store type for a given
+ * store operation.
+ * @wr_mas: The maple write state
+ */
+static inline void mas_wr_store_type(struct ma_wr_state *wr_mas)
{
struct ma_state *mas = wr_mas->mas;
+ unsigned char new_end;
- wr_mas->content = mas_start(mas);
- if (mas_is_none(mas) || mas_is_ptr(mas)) {
- mas_store_root(mas, wr_mas->entry);
+ if (unlikely(mas_is_none(mas) || mas_is_ptr(mas))) {
+ mas->store_type = wr_store_root;
return;
}
if (unlikely(!mas_wr_walk(wr_mas))) {
- mas_wr_spanning_store(wr_mas);
+ mas->store_type = wr_spanning_store;
return;
}
/* At this point, we are at the leaf node that needs to be altered. */
mas_wr_end_piv(wr_mas);
- /* New root for a single pointer */
- if (unlikely(!mas->index && mas->last == ULONG_MAX))
- mas_new_root(mas, wr_mas->entry);
- else
- mas_wr_modify(wr_mas);
+ if (!wr_mas->entry)
+ mas_wr_extend_null(wr_mas);
+
+ new_end = mas_wr_new_end(wr_mas);
+ if ((wr_mas->r_min == mas->index) && (wr_mas->r_max == mas->last)) {
+ mas->store_type = wr_exact_fit;
+ return;
+ }
+
+ if (unlikely(!mas->index && mas->last == ULONG_MAX)) {
+ mas->store_type = wr_new_root;
+ return;
+ }
+
+ /* Potential spanning rebalance collapsing a node */
+ if (new_end < mt_min_slots[wr_mas->type]) {
+ if (!mte_is_root(mas->node)) {
+ mas->store_type = wr_rebalance;
+ return;
+ }
+ mas->store_type = wr_node_store;
+ return;
+ }
+
+ if (new_end >= mt_slots[wr_mas->type]) {
+ mas->store_type = wr_split_store;
+ return;
+ }
+
+ if (!mt_in_rcu(mas->tree) && (mas->offset == mas->end)) {
+ mas->store_type = wr_append;
+ return;
+ }
+
+ if ((new_end == mas->end) && (!mt_in_rcu(mas->tree) ||
+ (wr_mas->offset_end - mas->offset == 1))) {
+ mas->store_type = wr_slot_store;
+ return;
+ }
+
+ if (mte_is_root(mas->node) || (new_end >= mt_min_slots[wr_mas->type]) ||
+ (mas->mas_flags & MA_STATE_BULK)) {
+ mas->store_type = wr_node_store;
+ return;
+ }
+
+ mas->store_type = wr_invalid;
+ MAS_WARN_ON(mas, 1);
+}
+
+/**
+ * mas_wr_preallocate() - Preallocate enough nodes for a store operation
+ * @wr_mas: The maple write state
+ * @entry: The entry that will be stored
+ *
+ */
+static inline void mas_wr_preallocate(struct ma_wr_state *wr_mas, void *entry)
+{
+ struct ma_state *mas = wr_mas->mas;
+ int request;
+
+ mas_wr_prealloc_setup(wr_mas);
+ mas_wr_store_type(wr_mas);
+ request = mas_prealloc_calc(mas, entry);
+ if (!request)
+ return;
+
+ mas_node_count(mas, request);
}
/**
@@ -4257,26 +4302,24 @@ static inline void *mas_insert(struct ma_state *mas, void *entry)
if (wr_mas.content)
goto exists;
- if (mas_is_none(mas) || mas_is_ptr(mas)) {
- mas_store_root(mas, entry);
+ mas_wr_preallocate(&wr_mas, entry);
+ if (mas_is_err(mas))
return NULL;
- }
/* spanning writes always overwrite something */
- if (!mas_wr_walk(&wr_mas))
+ if (mas->store_type == wr_spanning_store)
goto exists;
/* At this point, we are at the leaf node that needs to be altered. */
- wr_mas.offset_end = mas->offset;
- wr_mas.end_piv = wr_mas.r_max;
+ if (mas->store_type != wr_new_root && mas->store_type != wr_store_root) {
+ wr_mas.offset_end = mas->offset;
+ wr_mas.end_piv = wr_mas.r_max;
- if (wr_mas.content || (mas->last > wr_mas.r_max))
- goto exists;
-
- if (!entry)
- return NULL;
+ if (wr_mas.content || (mas->last > wr_mas.r_max))
+ goto exists;
+ }
- mas_wr_modify(&wr_mas);
+ mas_wr_store_entry(&wr_mas);
return wr_mas.content;
exists:
@@ -4331,6 +4374,7 @@ int mas_alloc_cyclic(struct ma_state *mas, unsigned long *startp,
if (*next == 0)
mas->tree->ma_flags |= MT_FLAGS_ALLOC_WRAPPED;
+ mas_destroy(mas);
return ret;
}
EXPORT_SYMBOL(mas_alloc_cyclic);
@@ -4440,9 +4484,8 @@ no_entry:
* mas_prev_slot() - Get the entry in the previous slot
*
* @mas: The maple state
- * @max: The minimum starting range
+ * @min: The minimum starting range
* @empty: Can be empty
- * @set_underflow: Set the @mas->node to underflow state on limit.
*
* Return: The entry in the previous slot which is possibly NULL
*/
@@ -4525,6 +4568,7 @@ underflow:
/*
* mas_next_node() - Get the next node at the same level in the tree.
* @mas: The maple state
+ * @node: The maple node
* @max: The maximum pivot value to check.
*
* The next value will be mas->node[mas->offset] or the status will have
@@ -4615,8 +4659,6 @@ overflow:
* @mas: The maple state
* @max: The maximum starting range
* @empty: Can be empty
- * @set_overflow: Should @mas->node be set to overflow when the limit is
- * reached.
*
* Return: The entry in the next slot which is possibly NULL
*/
@@ -5150,9 +5192,9 @@ EXPORT_SYMBOL_GPL(mas_empty_area_rev);
/*
* mte_dead_leaves() - Mark all leaves of a node as dead.
- * @mas: The maple state
+ * @enode: the encoded node
+ * @mt: the maple tree
* @slots: Pointer to the slot array
- * @type: The maple node type
*
* Must hold the write lock.
*
@@ -5358,47 +5400,6 @@ static inline void mte_destroy_walk(struct maple_enode *enode,
mt_destroy_walk(enode, mt, true);
}
}
-
-static void mas_wr_store_setup(struct ma_wr_state *wr_mas)
-{
- if (!mas_is_active(wr_mas->mas)) {
- if (mas_is_start(wr_mas->mas))
- return;
-
- if (unlikely(mas_is_paused(wr_mas->mas)))
- goto reset;
-
- if (unlikely(mas_is_none(wr_mas->mas)))
- goto reset;
-
- if (unlikely(mas_is_overflow(wr_mas->mas)))
- goto reset;
-
- if (unlikely(mas_is_underflow(wr_mas->mas)))
- goto reset;
- }
-
- /*
- * A less strict version of mas_is_span_wr() where we allow spanning
- * writes within this node. This is to stop partial walks in
- * mas_prealloc() from being reset.
- */
- if (wr_mas->mas->last > wr_mas->mas->max)
- goto reset;
-
- if (wr_mas->entry)
- return;
-
- if (mte_is_leaf(wr_mas->mas->node) &&
- wr_mas->mas->last == wr_mas->mas->max)
- goto reset;
-
- return;
-
-reset:
- mas_reset(wr_mas->mas);
-}
-
/* Interface */
/**
@@ -5407,13 +5408,12 @@ reset:
* @entry: The entry to store.
*
* The @mas->index and @mas->last is used to set the range for the @entry.
- * Note: The @mas should have pre-allocated entries to ensure there is memory to
- * store the entry. Please see mas_expected_entries()/mas_destroy() for more details.
*
* Return: the first entry between mas->index and mas->last or %NULL.
*/
void *mas_store(struct ma_state *mas, void *entry)
{
+ int request;
MA_WR_STATE(wr_mas, mas, entry);
trace_ma_write(__func__, mas, 0, entry);
@@ -5434,8 +5434,25 @@ void *mas_store(struct ma_state *mas, void *entry)
* want to examine what happens if a single store operation was to
* overwrite multiple entries within a self-balancing B-Tree.
*/
- mas_wr_store_setup(&wr_mas);
+ mas_wr_prealloc_setup(&wr_mas);
+ mas_wr_store_type(&wr_mas);
+ if (mas->mas_flags & MA_STATE_PREALLOC) {
+ mas_wr_store_entry(&wr_mas);
+ MAS_WR_BUG_ON(&wr_mas, mas_is_err(mas));
+ return wr_mas.content;
+ }
+
+ request = mas_prealloc_calc(mas, entry);
+ if (!request)
+ goto store;
+
+ mas_node_count(mas, request);
+ if (mas_is_err(mas))
+ return NULL;
+
+store:
mas_wr_store_entry(&wr_mas);
+ mas_destroy(mas);
return wr_mas.content;
}
EXPORT_SYMBOL_GPL(mas_store);
@@ -5451,19 +5468,28 @@ EXPORT_SYMBOL_GPL(mas_store);
*/
int mas_store_gfp(struct ma_state *mas, void *entry, gfp_t gfp)
{
+ unsigned long index = mas->index;
+ unsigned long last = mas->last;
MA_WR_STATE(wr_mas, mas, entry);
+ int ret = 0;
- mas_wr_store_setup(&wr_mas);
- trace_ma_write(__func__, mas, 0, entry);
retry:
- mas_wr_store_entry(&wr_mas);
- if (unlikely(mas_nomem(mas, gfp)))
+ mas_wr_preallocate(&wr_mas, entry);
+ if (unlikely(mas_nomem(mas, gfp))) {
+ if (!entry)
+ __mas_set_range(mas, index, last);
goto retry;
+ }
- if (unlikely(mas_is_err(mas)))
- return xa_err(mas->node);
+ if (mas_is_err(mas)) {
+ ret = xa_err(mas->node);
+ goto out;
+ }
- return 0;
+ mas_wr_store_entry(&wr_mas);
+out:
+ mas_destroy(mas);
+ return ret;
}
EXPORT_SYMBOL_GPL(mas_store_gfp);
@@ -5477,7 +5503,19 @@ void mas_store_prealloc(struct ma_state *mas, void *entry)
{
MA_WR_STATE(wr_mas, mas, entry);
- mas_wr_store_setup(&wr_mas);
+ if (mas->store_type == wr_store_root) {
+ mas_wr_prealloc_setup(&wr_mas);
+ goto store;
+ }
+
+ mas_wr_walk_descend(&wr_mas);
+ if (mas->store_type != wr_spanning_store) {
+ /* set wr_mas->content to current slot */
+ wr_mas.content = mas_slot_locked(mas, wr_mas.slots, mas->offset);
+ mas_wr_end_piv(&wr_mas);
+ }
+
+store:
trace_ma_write(__func__, mas, 0, entry);
mas_wr_store_entry(&wr_mas);
MAS_WR_BUG_ON(&wr_mas, mas_is_err(mas));
@@ -5496,70 +5534,25 @@ EXPORT_SYMBOL_GPL(mas_store_prealloc);
int mas_preallocate(struct ma_state *mas, void *entry, gfp_t gfp)
{
MA_WR_STATE(wr_mas, mas, entry);
- unsigned char node_size;
- int request = 1;
- int ret;
-
-
- if (unlikely(!mas->index && mas->last == ULONG_MAX))
- goto ask_now;
-
- mas_wr_store_setup(&wr_mas);
- wr_mas.content = mas_start(mas);
- /* Root expand */
- if (unlikely(mas_is_none(mas) || mas_is_ptr(mas)))
- goto ask_now;
-
- if (unlikely(!mas_wr_walk(&wr_mas))) {
- /* Spanning store, use worst case for now */
- request = 1 + mas_mt_height(mas) * 3;
- goto ask_now;
- }
-
- /* At this point, we are at the leaf node that needs to be altered. */
- /* Exact fit, no nodes needed. */
- if (wr_mas.r_min == mas->index && wr_mas.r_max == mas->last)
- return 0;
-
- mas_wr_end_piv(&wr_mas);
- node_size = mas_wr_new_end(&wr_mas);
+ int ret = 0;
+ int request;
- /* Slot store, does not require additional nodes */
- if (node_size == mas->end) {
- /* reuse node */
- if (!mt_in_rcu(mas->tree))
- return 0;
- /* shifting boundary */
- if (wr_mas.offset_end - mas->offset == 1)
- return 0;
- }
+ mas_wr_prealloc_setup(&wr_mas);
+ mas_wr_store_type(&wr_mas);
+ request = mas_prealloc_calc(mas, entry);
+ if (!request)
+ return ret;
- if (node_size >= mt_slots[wr_mas.type]) {
- /* Split, worst case for now. */
- request = 1 + mas_mt_height(mas) * 2;
- goto ask_now;
+ mas_node_count_gfp(mas, request, gfp);
+ if (mas_is_err(mas)) {
+ mas_set_alloc_req(mas, 0);
+ ret = xa_err(mas->node);
+ mas_destroy(mas);
+ mas_reset(mas);
+ return ret;
}
- /* New root needs a single node */
- if (unlikely(mte_is_root(mas->node)))
- goto ask_now;
-
- /* Potential spanning rebalance collapsing a node, use worst-case */
- if (node_size - 1 <= mt_min_slots[wr_mas.type])
- request = mas_mt_height(mas) * 2 - 1;
-
- /* node store, slot store needs one node */
-ask_now:
- mas_node_count_gfp(mas, request, gfp);
mas->mas_flags |= MA_STATE_PREALLOC;
- if (likely(!mas_is_err(mas)))
- return 0;
-
- mas_set_alloc_req(mas, 0);
- ret = xa_err(mas->node);
- mas_reset(mas);
- mas_destroy(mas);
- mas_reset(mas);
return ret;
}
EXPORT_SYMBOL_GPL(mas_preallocate);
@@ -5585,7 +5578,8 @@ void mas_destroy(struct ma_state *mas)
*/
if (mas->mas_flags & MA_STATE_REBALANCE) {
unsigned char end;
-
+ if (mas_is_err(mas))
+ mas_reset(mas);
mas_start(mas);
mtree_range_walk(mas);
end = mas->end + 1;
@@ -6245,24 +6239,32 @@ EXPORT_SYMBOL_GPL(mas_find_range_rev);
void *mas_erase(struct ma_state *mas)
{
void *entry;
+ unsigned long index = mas->index;
MA_WR_STATE(wr_mas, mas, NULL);
if (!mas_is_active(mas) || !mas_is_start(mas))
mas->status = ma_start;
- /* Retry unnecessary when holding the write lock. */
+write_retry:
entry = mas_state_walk(mas);
if (!entry)
return NULL;
-write_retry:
/* Must reset to ensure spanning writes of last slot are detected */
mas_reset(mas);
- mas_wr_store_setup(&wr_mas);
- mas_wr_store_entry(&wr_mas);
- if (mas_nomem(mas, GFP_KERNEL))
+ mas_wr_preallocate(&wr_mas, NULL);
+ if (mas_nomem(mas, GFP_KERNEL)) {
+ /* in case the range of entry changed when unlocked */
+ mas->index = mas->last = index;
goto write_retry;
+ }
+ if (mas_is_err(mas))
+ goto out;
+
+ mas_wr_store_entry(&wr_mas);
+out:
+ mas_destroy(mas);
return entry;
}
EXPORT_SYMBOL_GPL(mas_erase);
@@ -6277,10 +6279,8 @@ EXPORT_SYMBOL_GPL(mas_erase);
bool mas_nomem(struct ma_state *mas, gfp_t gfp)
__must_hold(mas->tree->ma_lock)
{
- if (likely(mas->node != MA_ERROR(-ENOMEM))) {
- mas_destroy(mas);
+ if (likely(mas->node != MA_ERROR(-ENOMEM)))
return false;
- }
if (gfpflags_allow_blocking(gfp) && !mt_external_lock(mas->tree)) {
mtree_unlock(mas->tree);
@@ -6357,7 +6357,7 @@ int mtree_store_range(struct maple_tree *mt, unsigned long index,
unsigned long last, void *entry, gfp_t gfp)
{
MA_STATE(mas, mt, index, last);
- MA_WR_STATE(wr_mas, &mas, entry);
+ int ret = 0;
trace_ma_write(__func__, &mas, 0, entry);
if (WARN_ON_ONCE(xa_is_advanced(entry)))
@@ -6367,16 +6367,10 @@ int mtree_store_range(struct maple_tree *mt, unsigned long index,
return -EINVAL;
mtree_lock(mt);
-retry:
- mas_wr_store_entry(&wr_mas);
- if (mas_nomem(&mas, gfp))
- goto retry;
-
+ ret = mas_store_gfp(&mas, entry, gfp);
mtree_unlock(mt);
- if (mas_is_err(&mas))
- return xa_err(mas.node);
- return 0;
+ return ret;
}
EXPORT_SYMBOL(mtree_store_range);
@@ -6412,6 +6406,7 @@ int mtree_insert_range(struct maple_tree *mt, unsigned long first,
unsigned long last, void *entry, gfp_t gfp)
{
MA_STATE(ms, mt, first, last);
+ int ret = 0;
if (WARN_ON_ONCE(xa_is_advanced(entry)))
return -EINVAL;
@@ -6427,9 +6422,10 @@ retry:
mtree_unlock(mt);
if (mas_is_err(&ms))
- return xa_err(ms.node);
+ ret = xa_err(ms.node);
- return 0;
+ mas_destroy(&ms);
+ return ret;
}
EXPORT_SYMBOL(mtree_insert_range);
@@ -6484,6 +6480,7 @@ retry:
unlock:
mtree_unlock(mt);
+ mas_destroy(&mas);
return ret;
}
EXPORT_SYMBOL(mtree_alloc_range);
@@ -6565,6 +6562,7 @@ retry:
unlock:
mtree_unlock(mt);
+ mas_destroy(&mas);
return ret;
}
EXPORT_SYMBOL(mtree_alloc_rrange);
@@ -6997,6 +6995,19 @@ void mt_set_non_kernel(unsigned int val)
kmem_cache_set_non_kernel(maple_node_cache, val);
}
+extern void kmem_cache_set_callback(struct kmem_cache *cachep,
+ void (*callback)(void *));
+void mt_set_callback(void (*callback)(void *))
+{
+ kmem_cache_set_callback(maple_node_cache, callback);
+}
+
+extern void kmem_cache_set_private(struct kmem_cache *cachep, void *private);
+void mt_set_private(void *private)
+{
+ kmem_cache_set_private(maple_node_cache, private);
+}
+
extern unsigned long kmem_cache_get_alloc(struct kmem_cache *);
unsigned long mt_get_alloc_size(void)
{
@@ -7181,7 +7192,6 @@ static void mt_dump_arange64(const struct maple_tree *mt, void *entry,
enum mt_dump_format format)
{
struct maple_arange_64 *node = &mte_to_node(entry)->ma64;
- bool leaf = mte_is_leaf(entry);
unsigned long first = min;
int i;
@@ -7215,19 +7225,22 @@ static void mt_dump_arange64(const struct maple_tree *mt, void *entry,
break;
if (last == 0 && i > 0)
break;
- if (leaf)
- mt_dump_entry(mt_slot(mt, node->slot, i),
- first, last, depth + 1, format);
- else if (node->slot[i])
+ if (node->slot[i])
mt_dump_node(mt, mt_slot(mt, node->slot, i),
first, last, depth + 1, format);
if (last == max)
break;
if (last > max) {
- pr_err("node %p last (%lu) > max (%lu) at pivot %d!\n",
+ switch(format) {
+ case mt_dump_hex:
+ pr_err("node %p last (%lx) > max (%lx) at pivot %d!\n",
node, last, max, i);
- break;
+ break;
+ case mt_dump_dec:
+ pr_err("node %p last (%lu) > max (%lu) at pivot %d!\n",
+ node, last, max, i);
+ }
}
first = last + 1;
}
@@ -7627,6 +7640,40 @@ void mas_dump(const struct ma_state *mas)
break;
}
+ pr_err("Store Type: ");
+ switch (mas->store_type) {
+ case wr_invalid:
+ pr_err("invalid store type\n");
+ break;
+ case wr_new_root:
+ pr_err("new_root\n");
+ break;
+ case wr_store_root:
+ pr_err("store_root\n");
+ break;
+ case wr_exact_fit:
+ pr_err("exact_fit\n");
+ break;
+ case wr_split_store:
+ pr_err("split_store\n");
+ break;
+ case wr_slot_store:
+ pr_err("slot_store\n");
+ break;
+ case wr_append:
+ pr_err("append\n");
+ break;
+ case wr_node_store:
+ pr_err("node_store\n");
+ break;
+ case wr_spanning_store:
+ pr_err("spanning_store\n");
+ break;
+ case wr_rebalance:
+ pr_err("rebalance\n");
+ break;
+ }
+
pr_err("[%u/%u] index=%lx last=%lx\n", mas->offset, mas->end,
mas->index, mas->last);
pr_err(" min=%lx max=%lx alloc=%p, depth=%u, flags=%x\n",
diff --git a/lib/math/Makefile b/lib/math/Makefile
index 3c1f92a7459d..3ef11305f8d2 100644
--- a/lib/math/Makefile
+++ b/lib/math/Makefile
@@ -7,4 +7,5 @@ obj-$(CONFIG_RATIONAL) += rational.o
obj-$(CONFIG_INT_POW_TEST) += tests/int_pow_kunit.o
obj-$(CONFIG_TEST_DIV64) += test_div64.o
+obj-$(CONFIG_TEST_MULDIV64) += test_mul_u64_u64_div_u64.o
obj-$(CONFIG_RATIONAL_KUNIT_TEST) += rational-test.o
diff --git a/lib/math/div64.c b/lib/math/div64.c
index 191761b1b623..5faa29208bdb 100644
--- a/lib/math/div64.c
+++ b/lib/math/div64.c
@@ -186,55 +186,84 @@ EXPORT_SYMBOL(iter_div_u64_rem);
#ifndef mul_u64_u64_div_u64
u64 mul_u64_u64_div_u64(u64 a, u64 b, u64 c)
{
- u64 res = 0, div, rem;
- int shift;
+ if (ilog2(a) + ilog2(b) <= 62)
+ return div64_u64(a * b, c);
- /* can a * b overflow ? */
- if (ilog2(a) + ilog2(b) > 62) {
- /*
- * Note that the algorithm after the if block below might lose
- * some precision and the result is more exact for b > a. So
- * exchange a and b if a is bigger than b.
- *
- * For example with a = 43980465100800, b = 100000000, c = 1000000000
- * the below calculation doesn't modify b at all because div == 0
- * and then shift becomes 45 + 26 - 62 = 9 and so the result
- * becomes 4398035251080. However with a and b swapped the exact
- * result is calculated (i.e. 4398046510080).
- */
- if (a > b)
- swap(a, b);
+#if defined(__SIZEOF_INT128__)
+
+ /* native 64x64=128 bits multiplication */
+ u128 prod = (u128)a * b;
+ u64 n_lo = prod, n_hi = prod >> 64;
+
+#else
+
+ /* perform a 64x64=128 bits multiplication manually */
+ u32 a_lo = a, a_hi = a >> 32, b_lo = b, b_hi = b >> 32;
+ u64 x, y, z;
+
+ x = (u64)a_lo * b_lo;
+ y = (u64)a_lo * b_hi + (u32)(x >> 32);
+ z = (u64)a_hi * b_hi + (u32)(y >> 32);
+ y = (u64)a_hi * b_lo + (u32)y;
+ z += (u32)(y >> 32);
+ x = (y << 32) + (u32)x;
+
+ u64 n_lo = x, n_hi = z;
+
+#endif
+
+ /* make sure c is not zero, trigger exception otherwise */
+#pragma GCC diagnostic push
+#pragma GCC diagnostic ignored "-Wdiv-by-zero"
+ if (unlikely(c == 0))
+ return 1/0;
+#pragma GCC diagnostic pop
+
+ int shift = __builtin_ctzll(c);
+ /* try reducing the fraction in case the dividend becomes <= 64 bits */
+ if ((n_hi >> shift) == 0) {
+ u64 n = shift ? (n_lo >> shift) | (n_hi << (64 - shift)) : n_lo;
+
+ return div64_u64(n, c >> shift);
/*
- * (b * a) / c is equal to
- *
- * (b / c) * a +
- * (b % c) * a / c
- *
- * if nothing overflows. Can the 1st multiplication
- * overflow? Yes, but we do not care: this can only
- * happen if the end result can't fit in u64 anyway.
- *
- * So the code below does
- *
- * res = (b / c) * a;
- * b = b % c;
+ * The remainder value if needed would be:
+ * res = div64_u64_rem(n, c >> shift, &rem);
+ * rem = (rem << shift) + (n_lo - (n << shift));
*/
- div = div64_u64_rem(b, c, &rem);
- res = div * a;
- b = rem;
-
- shift = ilog2(a) + ilog2(b) - 62;
- if (shift > 0) {
- /* drop precision */
- b >>= shift;
- c >>= shift;
- if (!c)
- return res;
- }
}
- return res + div64_u64(a * b, c);
+ if (n_hi >= c) {
+ /* overflow: result is unrepresentable in a u64 */
+ return -1;
+ }
+
+ /* Do the full 128 by 64 bits division */
+
+ shift = __builtin_clzll(c);
+ c <<= shift;
+
+ int p = 64 + shift;
+ u64 res = 0;
+ bool carry;
+
+ do {
+ carry = n_hi >> 63;
+ shift = carry ? 1 : __builtin_clzll(n_hi);
+ if (p < shift)
+ break;
+ p -= shift;
+ n_hi <<= shift;
+ n_hi |= n_lo >> (64 - shift);
+ n_lo <<= shift;
+ if (carry || (n_hi >= c)) {
+ n_hi -= c;
+ res |= 1ULL << p;
+ }
+ } while (n_hi);
+ /* The remainder value if needed would be n_hi << p */
+
+ return res;
}
EXPORT_SYMBOL(mul_u64_u64_div_u64);
#endif
diff --git a/lib/math/test_mul_u64_u64_div_u64.c b/lib/math/test_mul_u64_u64_div_u64.c
new file mode 100644
index 000000000000..58d058de4e73
--- /dev/null
+++ b/lib/math/test_mul_u64_u64_div_u64.c
@@ -0,0 +1,99 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2024 BayLibre SAS
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/printk.h>
+#include <linux/math64.h>
+
+typedef struct { u64 a; u64 b; u64 c; u64 result; } test_params;
+
+static test_params test_values[] = {
+/* this contains many edge values followed by a couple random values */
+{ 0xb, 0x7, 0x3, 0x19 },
+{ 0xffff0000, 0xffff0000, 0xf, 0x1110eeef00000000 },
+{ 0xffffffff, 0xffffffff, 0x1, 0xfffffffe00000001 },
+{ 0xffffffff, 0xffffffff, 0x2, 0x7fffffff00000000 },
+{ 0x1ffffffff, 0xffffffff, 0x2, 0xfffffffe80000000 },
+{ 0x1ffffffff, 0xffffffff, 0x3, 0xaaaaaaa9aaaaaaab },
+{ 0x1ffffffff, 0x1ffffffff, 0x4, 0xffffffff00000000 },
+{ 0xffff000000000000, 0xffff000000000000, 0xffff000000000001, 0xfffeffffffffffff },
+{ 0x3333333333333333, 0x3333333333333333, 0x5555555555555555, 0x1eb851eb851eb851 },
+{ 0x7fffffffffffffff, 0x2, 0x3, 0x5555555555555554 },
+{ 0xffffffffffffffff, 0x2, 0x8000000000000000, 0x3 },
+{ 0xffffffffffffffff, 0x2, 0xc000000000000000, 0x2 },
+{ 0xffffffffffffffff, 0x4000000000000004, 0x8000000000000000, 0x8000000000000007 },
+{ 0xffffffffffffffff, 0x4000000000000001, 0x8000000000000000, 0x8000000000000001 },
+{ 0xffffffffffffffff, 0x8000000000000001, 0xffffffffffffffff, 0x8000000000000001 },
+{ 0xfffffffffffffffe, 0x8000000000000001, 0xffffffffffffffff, 0x8000000000000000 },
+{ 0xffffffffffffffff, 0x8000000000000001, 0xfffffffffffffffe, 0x8000000000000001 },
+{ 0xffffffffffffffff, 0x8000000000000001, 0xfffffffffffffffd, 0x8000000000000002 },
+{ 0x7fffffffffffffff, 0xffffffffffffffff, 0xc000000000000000, 0xaaaaaaaaaaaaaaa8 },
+{ 0xffffffffffffffff, 0x7fffffffffffffff, 0xa000000000000000, 0xccccccccccccccca },
+{ 0xffffffffffffffff, 0x7fffffffffffffff, 0x9000000000000000, 0xe38e38e38e38e38b },
+{ 0x7fffffffffffffff, 0x7fffffffffffffff, 0x5000000000000000, 0xccccccccccccccc9 },
+{ 0xffffffffffffffff, 0xfffffffffffffffe, 0xffffffffffffffff, 0xfffffffffffffffe },
+{ 0xe6102d256d7ea3ae, 0x70a77d0be4c31201, 0xd63ec35ab3220357, 0x78f8bf8cc86c6e18 },
+{ 0xf53bae05cb86c6e1, 0x3847b32d2f8d32e0, 0xcfd4f55a647f403c, 0x42687f79d8998d35 },
+{ 0x9951c5498f941092, 0x1f8c8bfdf287a251, 0xa3c8dc5f81ea3fe2, 0x1d887cb25900091f },
+{ 0x374fee9daa1bb2bb, 0x0d0bfbff7b8ae3ef, 0xc169337bd42d5179, 0x03bb2dbaffcbb961 },
+{ 0xeac0d03ac10eeaf0, 0x89be05dfa162ed9b, 0x92bb1679a41f0e4b, 0xdc5f5cc9e270d216 },
+};
+
+/*
+ * The above table can be verified with the following shell script:
+ *
+ * #!/bin/sh
+ * sed -ne 's/^{ \+\(.*\), \+\(.*\), \+\(.*\), \+\(.*\) },$/\1 \2 \3 \4/p' \
+ * lib/math/test_mul_u64_u64_div_u64.c |
+ * while read a b c r; do
+ * expected=$( printf "obase=16; ibase=16; %X * %X / %X\n" $a $b $c | bc )
+ * given=$( printf "%X\n" $r )
+ * if [ "$expected" = "$given" ]; then
+ * echo "$a * $b / $c = $r OK"
+ * else
+ * echo "$a * $b / $c = $r is wrong" >&2
+ * echo "should be equivalent to 0x$expected" >&2
+ * exit 1
+ * fi
+ * done
+ */
+
+static int __init test_init(void)
+{
+ int i;
+
+ pr_info("Starting mul_u64_u64_div_u64() test\n");
+
+ for (i = 0; i < ARRAY_SIZE(test_values); i++) {
+ u64 a = test_values[i].a;
+ u64 b = test_values[i].b;
+ u64 c = test_values[i].c;
+ u64 expected_result = test_values[i].result;
+ u64 result = mul_u64_u64_div_u64(a, b, c);
+
+ if (result != expected_result) {
+ pr_err("ERROR: 0x%016llx * 0x%016llx / 0x%016llx\n", a, b, c);
+ pr_err("ERROR: expected result: %016llx\n", expected_result);
+ pr_err("ERROR: obtained result: %016llx\n", result);
+ }
+ }
+
+ pr_info("Completed mul_u64_u64_div_u64() test\n");
+ return 0;
+}
+
+static void __exit test_exit(void)
+{
+}
+
+module_init(test_init);
+module_exit(test_exit);
+
+MODULE_AUTHOR("Nicolas Pitre");
+MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("mul_u64_u64_div_u64() test module");
diff --git a/lib/percpu_counter.c b/lib/percpu_counter.c
index 51bc5246986d..2891f94a11c6 100644
--- a/lib/percpu_counter.c
+++ b/lib/percpu_counter.c
@@ -209,7 +209,7 @@ int __percpu_counter_init_many(struct percpu_counter *fbc, s64 amount,
INIT_LIST_HEAD(&fbc[i].list);
#endif
fbc[i].count = amount;
- fbc[i].counters = (void *)counters + (i * counter_size);
+ fbc[i].counters = (void __percpu *)counters + i * counter_size;
debug_percpu_counter_activate(&fbc[i]);
}
diff --git a/lib/rhashtable.c b/lib/rhashtable.c
index dbbed19f8fff..6c902639728b 100644
--- a/lib/rhashtable.c
+++ b/lib/rhashtable.c
@@ -189,7 +189,7 @@ static struct bucket_table *bucket_table_alloc(struct rhashtable *ht,
size = nbuckets;
- if (tbl == NULL && (gfp & ~__GFP_NOFAIL) != GFP_KERNEL) {
+ if (tbl == NULL && !gfpflags_allow_blocking(gfp)) {
tbl = nested_bucket_table_alloc(ht, nbuckets, gfp);
nbuckets = 0;
}
diff --git a/lib/sbitmap.c b/lib/sbitmap.c
index 5e2e93307f0d..d3412984170c 100644
--- a/lib/sbitmap.c
+++ b/lib/sbitmap.c
@@ -65,7 +65,7 @@ static inline bool sbitmap_deferred_clear(struct sbitmap_word *map,
{
unsigned long mask, word_mask;
- guard(spinlock_irqsave)(&map->swap_lock);
+ guard(raw_spinlock_irqsave)(&map->swap_lock);
if (!map->cleared) {
if (depth == 0)
@@ -136,7 +136,7 @@ int sbitmap_init_node(struct sbitmap *sb, unsigned int depth, int shift,
}
for (i = 0; i < sb->map_nr; i++)
- spin_lock_init(&sb->map[i].swap_lock);
+ raw_spin_lock_init(&sb->map[i].swap_lock);
return 0;
}
diff --git a/lib/strncpy_from_user.c b/lib/strncpy_from_user.c
index 6432b8c3e431..989a12a67872 100644
--- a/lib/strncpy_from_user.c
+++ b/lib/strncpy_from_user.c
@@ -120,6 +120,15 @@ long strncpy_from_user(char *dst, const char __user *src, long count)
if (unlikely(count <= 0))
return 0;
+ if (can_do_masked_user_access()) {
+ long retval;
+
+ src = masked_user_access_begin(src);
+ retval = do_strncpy_from_user(dst, src, count, count);
+ user_read_access_end();
+ return retval;
+ }
+
max_addr = TASK_SIZE_MAX;
src_addr = (unsigned long)untagged_addr(src);
if (likely(src_addr < max_addr)) {
diff --git a/lib/strnlen_user.c b/lib/strnlen_user.c
index feeb935a2299..6e489f9e90f1 100644
--- a/lib/strnlen_user.c
+++ b/lib/strnlen_user.c
@@ -96,6 +96,15 @@ long strnlen_user(const char __user *str, long count)
if (unlikely(count <= 0))
return 0;
+ if (can_do_masked_user_access()) {
+ long retval;
+
+ str = masked_user_access_begin(str);
+ retval = do_strnlen_user(str, count, count);
+ user_read_access_end();
+ return retval;
+ }
+
max_addr = TASK_SIZE_MAX;
src_addr = (unsigned long)untagged_addr(str);
if (likely(src_addr < max_addr)) {
diff --git a/lib/test_bits.c b/lib/test_bits.c
index 01313980f175..c7b38d91e1f1 100644
--- a/lib/test_bits.c
+++ b/lib/test_bits.c
@@ -39,6 +39,36 @@ static void genmask_ull_test(struct kunit *test)
#endif
}
+static void genmask_u128_test(struct kunit *test)
+{
+#ifdef CONFIG_ARCH_SUPPORTS_INT128
+ /* Below 64 bit masks */
+ KUNIT_EXPECT_EQ(test, 0x0000000000000001ull, GENMASK_U128(0, 0));
+ KUNIT_EXPECT_EQ(test, 0x0000000000000003ull, GENMASK_U128(1, 0));
+ KUNIT_EXPECT_EQ(test, 0x0000000000000006ull, GENMASK_U128(2, 1));
+ KUNIT_EXPECT_EQ(test, 0x00000000ffffffffull, GENMASK_U128(31, 0));
+ KUNIT_EXPECT_EQ(test, 0x000000ffffe00000ull, GENMASK_U128(39, 21));
+ KUNIT_EXPECT_EQ(test, 0xffffffffffffffffull, GENMASK_U128(63, 0));
+
+ /* Above 64 bit masks - only 64 bit portion can be validated once */
+ KUNIT_EXPECT_EQ(test, 0xffffffffffffffffull, GENMASK_U128(64, 0) >> 1);
+ KUNIT_EXPECT_EQ(test, 0x00000000ffffffffull, GENMASK_U128(81, 50) >> 50);
+ KUNIT_EXPECT_EQ(test, 0x0000000000ffffffull, GENMASK_U128(87, 64) >> 64);
+ KUNIT_EXPECT_EQ(test, 0x0000000000ff0000ull, GENMASK_U128(87, 80) >> 64);
+
+ KUNIT_EXPECT_EQ(test, 0xffffffffffffffffull, GENMASK_U128(127, 0) >> 64);
+ KUNIT_EXPECT_EQ(test, 0xffffffffffffffffull, (u64)GENMASK_U128(127, 0));
+ KUNIT_EXPECT_EQ(test, 0x0000000000000003ull, GENMASK_U128(127, 126) >> 126);
+ KUNIT_EXPECT_EQ(test, 0x0000000000000001ull, GENMASK_U128(127, 127) >> 127);
+#ifdef TEST_GENMASK_FAILURES
+ /* these should fail compilation */
+ GENMASK_U128(0, 1);
+ GENMASK_U128(0, 10);
+ GENMASK_U128(9, 10);
+#endif /* TEST_GENMASK_FAILURES */
+#endif /* CONFIG_ARCH_SUPPORTS_INT128 */
+}
+
static void genmask_input_check_test(struct kunit *test)
{
unsigned int x, y;
@@ -56,12 +86,16 @@ static void genmask_input_check_test(struct kunit *test)
/* Valid input */
KUNIT_EXPECT_EQ(test, 0, GENMASK_INPUT_CHECK(1, 1));
KUNIT_EXPECT_EQ(test, 0, GENMASK_INPUT_CHECK(39, 21));
+ KUNIT_EXPECT_EQ(test, 0, GENMASK_INPUT_CHECK(100, 80));
+ KUNIT_EXPECT_EQ(test, 0, GENMASK_INPUT_CHECK(110, 65));
+ KUNIT_EXPECT_EQ(test, 0, GENMASK_INPUT_CHECK(127, 0));
}
static struct kunit_case bits_test_cases[] = {
KUNIT_CASE(genmask_test),
KUNIT_CASE(genmask_ull_test),
+ KUNIT_CASE(genmask_u128_test),
KUNIT_CASE(genmask_input_check_test),
{}
};
diff --git a/lib/test_fpu_glue.c b/lib/test_fpu_glue.c
index 074f30301f29..c0596426370a 100644
--- a/lib/test_fpu_glue.c
+++ b/lib/test_fpu_glue.c
@@ -42,7 +42,7 @@ static int __init test_fpu_init(void)
return -EINVAL;
selftest_dir = debugfs_create_dir("selftest_helpers", NULL);
- if (!selftest_dir)
+ if (IS_ERR(selftest_dir))
return -ENOMEM;
debugfs_create_file_unsafe("test_fpu", 0444, selftest_dir, NULL,
diff --git a/lib/test_hmm.c b/lib/test_hmm.c
index ee20e1f9bae9..056f2e411d7b 100644
--- a/lib/test_hmm.c
+++ b/lib/test_hmm.c
@@ -799,10 +799,7 @@ static int dmirror_exclusive(struct dmirror *dmirror,
unsigned long mapped = 0;
int i;
- if (end < addr + (ARRAY_SIZE(pages) << PAGE_SHIFT))
- next = end;
- else
- next = addr + (ARRAY_SIZE(pages) << PAGE_SHIFT);
+ next = min(end, addr + (ARRAY_SIZE(pages) << PAGE_SHIFT));
ret = make_device_exclusive_range(mm, addr, next, pages, NULL);
/*
diff --git a/lib/test_objpool.c b/lib/test_objpool.c
index bfdb81599832..5a3f6961a70f 100644
--- a/lib/test_objpool.c
+++ b/lib/test_objpool.c
@@ -687,4 +687,5 @@ static void __exit ot_mod_exit(void)
module_init(ot_mod_init);
module_exit(ot_mod_exit);
-MODULE_LICENSE("GPL"); \ No newline at end of file
+MODULE_DESCRIPTION("Test module for lockless object pool");
+MODULE_LICENSE("GPL");
diff --git a/lib/test_printf.c b/lib/test_printf.c
index 965cb6f28527..8448b6d02bd9 100644
--- a/lib/test_printf.c
+++ b/lib/test_printf.c
@@ -641,26 +641,12 @@ page_flags_test(int section, int node, int zone, int last_cpupid,
test(cmp_buf, "%pGp", &flags);
}
-static void __init page_type_test(unsigned int page_type, const char *name,
- char *cmp_buf)
-{
- unsigned long size;
-
- size = scnprintf(cmp_buf, BUF_SIZE, "%#x(", page_type);
- if (page_type_has_type(page_type))
- size += scnprintf(cmp_buf + size, BUF_SIZE - size, "%s", name);
-
- snprintf(cmp_buf + size, BUF_SIZE - size, ")");
- test(cmp_buf, "%pGt", &page_type);
-}
-
static void __init
flags(void)
{
unsigned long flags;
char *cmp_buffer;
gfp_t gfp;
- unsigned int page_type;
cmp_buffer = kmalloc(BUF_SIZE, GFP_KERNEL);
if (!cmp_buffer)
@@ -700,18 +686,6 @@ flags(void)
gfp |= __GFP_HIGH;
test(cmp_buffer, "%pGg", &gfp);
- page_type = ~0;
- page_type_test(page_type, "", cmp_buffer);
-
- page_type = 10;
- page_type_test(page_type, "", cmp_buffer);
-
- page_type = ~PG_buddy;
- page_type_test(page_type, "buddy", cmp_buffer);
-
- page_type = ~(PG_table | PG_buddy);
- page_type_test(page_type, "table|buddy", cmp_buffer);
-
kfree(cmp_buffer);
}
diff --git a/lib/vsprintf.c b/lib/vsprintf.c
index 2d71b1115916..09f022ba1c05 100644
--- a/lib/vsprintf.c
+++ b/lib/vsprintf.c
@@ -2054,25 +2054,6 @@ char *format_page_flags(char *buf, char *end, unsigned long flags)
return buf;
}
-static
-char *format_page_type(char *buf, char *end, unsigned int page_type)
-{
- buf = number(buf, end, page_type, default_flag_spec);
-
- if (buf < end)
- *buf = '(';
- buf++;
-
- if (page_type_has_type(page_type))
- buf = format_flags(buf, end, ~page_type, pagetype_names);
-
- if (buf < end)
- *buf = ')';
- buf++;
-
- return buf;
-}
-
static noinline_for_stack
char *flags_string(char *buf, char *end, void *flags_ptr,
struct printf_spec spec, const char *fmt)
@@ -2086,8 +2067,6 @@ char *flags_string(char *buf, char *end, void *flags_ptr,
switch (fmt[1]) {
case 'p':
return format_page_flags(buf, end, *(unsigned long *)flags_ptr);
- case 't':
- return format_page_type(buf, end, *(unsigned int *)flags_ptr);
case 'v':
flags = *(unsigned long *)flags_ptr;
names = vmaflag_names;
diff --git a/lib/xz/Kconfig b/lib/xz/Kconfig
index aef086a6bf2f..20aa459bfb3e 100644
--- a/lib/xz/Kconfig
+++ b/lib/xz/Kconfig
@@ -5,7 +5,8 @@ config XZ_DEC
help
LZMA2 compression algorithm and BCJ filters are supported using
the .xz file format as the container. For integrity checking,
- CRC32 is supported. See Documentation/staging/xz.rst for more information.
+ CRC32 is supported. See Documentation/staging/xz.rst for more
+ information.
if XZ_DEC
@@ -29,11 +30,21 @@ config XZ_DEC_ARMTHUMB
default y
select XZ_DEC_BCJ
+config XZ_DEC_ARM64
+ bool "ARM64 BCJ filter decoder" if EXPERT
+ default y
+ select XZ_DEC_BCJ
+
config XZ_DEC_SPARC
bool "SPARC BCJ filter decoder" if EXPERT
default y
select XZ_DEC_BCJ
+config XZ_DEC_RISCV
+ bool "RISC-V BCJ filter decoder" if EXPERT
+ default y
+ select XZ_DEC_BCJ
+
config XZ_DEC_MICROLZMA
bool "MicroLZMA decoder"
default n
diff --git a/lib/xz/xz_crc32.c b/lib/xz/xz_crc32.c
index 88a2c35e1b59..6a7906a328ba 100644
--- a/lib/xz/xz_crc32.c
+++ b/lib/xz/xz_crc32.c
@@ -1,11 +1,10 @@
+// SPDX-License-Identifier: 0BSD
+
/*
* CRC32 using the polynomial from IEEE-802.3
*
* Authors: Lasse Collin <lasse.collin@tukaani.org>
* Igor Pavlov <https://7-zip.org/>
- *
- * This file has been put into the public domain.
- * You can do whatever you want with this file.
*/
/*
@@ -27,9 +26,9 @@
STATIC_RW_DATA uint32_t xz_crc32_table[256];
-XZ_EXTERN void xz_crc32_init(void)
+void xz_crc32_init(void)
{
- const uint32_t poly = CRC32_POLY_LE;
+ const uint32_t poly = 0xEDB88320;
uint32_t i;
uint32_t j;
@@ -46,7 +45,7 @@ XZ_EXTERN void xz_crc32_init(void)
return;
}
-XZ_EXTERN uint32_t xz_crc32(const uint8_t *buf, size_t size, uint32_t crc)
+uint32_t xz_crc32(const uint8_t *buf, size_t size, uint32_t crc)
{
crc = ~crc;
diff --git a/lib/xz/xz_dec_bcj.c b/lib/xz/xz_dec_bcj.c
index ef449e97d1a1..8237db17eee3 100644
--- a/lib/xz/xz_dec_bcj.c
+++ b/lib/xz/xz_dec_bcj.c
@@ -1,11 +1,10 @@
+// SPDX-License-Identifier: 0BSD
+
/*
* Branch/Call/Jump (BCJ) filter decoders
*
* Authors: Lasse Collin <lasse.collin@tukaani.org>
* Igor Pavlov <https://7-zip.org/>
- *
- * This file has been put into the public domain.
- * You can do whatever you want with this file.
*/
#include "xz_private.h"
@@ -24,7 +23,9 @@ struct xz_dec_bcj {
BCJ_IA64 = 6, /* Big or little endian */
BCJ_ARM = 7, /* Little endian only */
BCJ_ARMTHUMB = 8, /* Little endian only */
- BCJ_SPARC = 9 /* Big or little endian */
+ BCJ_SPARC = 9, /* Big or little endian */
+ BCJ_ARM64 = 10, /* AArch64 */
+ BCJ_RISCV = 11 /* RV32GQC_Zfh, RV64GQC_Zfh */
} type;
/*
@@ -162,7 +163,9 @@ static size_t bcj_powerpc(struct xz_dec_bcj *s, uint8_t *buf, size_t size)
size_t i;
uint32_t instr;
- for (i = 0; i + 4 <= size; i += 4) {
+ size &= ~(size_t)3;
+
+ for (i = 0; i < size; i += 4) {
instr = get_unaligned_be32(buf + i);
if ((instr & 0xFC000003) == 0x48000001) {
instr &= 0x03FFFFFC;
@@ -219,7 +222,9 @@ static size_t bcj_ia64(struct xz_dec_bcj *s, uint8_t *buf, size_t size)
/* Instruction normalized with bit_res for easier manipulation */
uint64_t norm;
- for (i = 0; i + 16 <= size; i += 16) {
+ size &= ~(size_t)15;
+
+ for (i = 0; i < size; i += 16) {
mask = branch_table[buf[i] & 0x1F];
for (slot = 0, bit_pos = 5; slot < 3; ++slot, bit_pos += 41) {
if (((mask >> slot) & 1) == 0)
@@ -267,7 +272,9 @@ static size_t bcj_arm(struct xz_dec_bcj *s, uint8_t *buf, size_t size)
size_t i;
uint32_t addr;
- for (i = 0; i + 4 <= size; i += 4) {
+ size &= ~(size_t)3;
+
+ for (i = 0; i < size; i += 4) {
if (buf[i + 3] == 0xEB) {
addr = (uint32_t)buf[i] | ((uint32_t)buf[i + 1] << 8)
| ((uint32_t)buf[i + 2] << 16);
@@ -290,7 +297,12 @@ static size_t bcj_armthumb(struct xz_dec_bcj *s, uint8_t *buf, size_t size)
size_t i;
uint32_t addr;
- for (i = 0; i + 4 <= size; i += 2) {
+ if (size < 4)
+ return 0;
+
+ size -= 4;
+
+ for (i = 0; i <= size; i += 2) {
if ((buf[i + 1] & 0xF8) == 0xF0
&& (buf[i + 3] & 0xF8) == 0xF8) {
addr = (((uint32_t)buf[i + 1] & 0x07) << 19)
@@ -318,7 +330,9 @@ static size_t bcj_sparc(struct xz_dec_bcj *s, uint8_t *buf, size_t size)
size_t i;
uint32_t instr;
- for (i = 0; i + 4 <= size; i += 4) {
+ size &= ~(size_t)3;
+
+ for (i = 0; i < size; i += 4) {
instr = get_unaligned_be32(buf + i);
if ((instr >> 22) == 0x100 || (instr >> 22) == 0x1FF) {
instr <<= 2;
@@ -334,6 +348,140 @@ static size_t bcj_sparc(struct xz_dec_bcj *s, uint8_t *buf, size_t size)
}
#endif
+#ifdef XZ_DEC_ARM64
+static size_t bcj_arm64(struct xz_dec_bcj *s, uint8_t *buf, size_t size)
+{
+ size_t i;
+ uint32_t instr;
+ uint32_t addr;
+
+ size &= ~(size_t)3;
+
+ for (i = 0; i < size; i += 4) {
+ instr = get_unaligned_le32(buf + i);
+
+ if ((instr >> 26) == 0x25) {
+ /* BL instruction */
+ addr = instr - ((s->pos + (uint32_t)i) >> 2);
+ instr = 0x94000000 | (addr & 0x03FFFFFF);
+ put_unaligned_le32(instr, buf + i);
+
+ } else if ((instr & 0x9F000000) == 0x90000000) {
+ /* ADRP instruction */
+ addr = ((instr >> 29) & 3) | ((instr >> 3) & 0x1FFFFC);
+
+ /* Only convert values in the range +/-512 MiB. */
+ if ((addr + 0x020000) & 0x1C0000)
+ continue;
+
+ addr -= (s->pos + (uint32_t)i) >> 12;
+
+ instr &= 0x9000001F;
+ instr |= (addr & 3) << 29;
+ instr |= (addr & 0x03FFFC) << 3;
+ instr |= (0U - (addr & 0x020000)) & 0xE00000;
+
+ put_unaligned_le32(instr, buf + i);
+ }
+ }
+
+ return i;
+}
+#endif
+
+#ifdef XZ_DEC_RISCV
+static size_t bcj_riscv(struct xz_dec_bcj *s, uint8_t *buf, size_t size)
+{
+ size_t i;
+ uint32_t b1;
+ uint32_t b2;
+ uint32_t b3;
+ uint32_t instr;
+ uint32_t instr2;
+ uint32_t instr2_rs1;
+ uint32_t addr;
+
+ if (size < 8)
+ return 0;
+
+ size -= 8;
+
+ for (i = 0; i <= size; i += 2) {
+ instr = buf[i];
+
+ if (instr == 0xEF) {
+ /* JAL */
+ b1 = buf[i + 1];
+ if ((b1 & 0x0D) != 0)
+ continue;
+
+ b2 = buf[i + 2];
+ b3 = buf[i + 3];
+
+ addr = ((b1 & 0xF0) << 13) | (b2 << 9) | (b3 << 1);
+ addr -= s->pos + (uint32_t)i;
+
+ buf[i + 1] = (uint8_t)((b1 & 0x0F)
+ | ((addr >> 8) & 0xF0));
+
+ buf[i + 2] = (uint8_t)(((addr >> 16) & 0x0F)
+ | ((addr >> 7) & 0x10)
+ | ((addr << 4) & 0xE0));
+
+ buf[i + 3] = (uint8_t)(((addr >> 4) & 0x7F)
+ | ((addr >> 13) & 0x80));
+
+ i += 4 - 2;
+
+ } else if ((instr & 0x7F) == 0x17) {
+ /* AUIPC */
+ instr |= (uint32_t)buf[i + 1] << 8;
+ instr |= (uint32_t)buf[i + 2] << 16;
+ instr |= (uint32_t)buf[i + 3] << 24;
+
+ if (instr & 0xE80) {
+ /* AUIPC's rd doesn't equal x0 or x2. */
+ instr2 = get_unaligned_le32(buf + i + 4);
+
+ if (((instr << 8) ^ (instr2 - 3)) & 0xF8003) {
+ i += 6 - 2;
+ continue;
+ }
+
+ addr = (instr & 0xFFFFF000) + (instr2 >> 20);
+
+ instr = 0x17 | (2 << 7) | (instr2 << 12);
+ instr2 = addr;
+ } else {
+ /* AUIPC's rd equals x0 or x2. */
+ instr2_rs1 = instr >> 27;
+
+ if ((uint32_t)((instr - 0x3117) << 18)
+ >= (instr2_rs1 & 0x1D)) {
+ i += 4 - 2;
+ continue;
+ }
+
+ addr = get_unaligned_be32(buf + i + 4);
+ addr -= s->pos + (uint32_t)i;
+
+ instr2 = (instr >> 12) | (addr << 20);
+
+ instr = 0x17 | (instr2_rs1 << 7)
+ | ((addr + 0x800) & 0xFFFFF000);
+ }
+
+ put_unaligned_le32(instr, buf + i);
+ put_unaligned_le32(instr2, buf + i + 4);
+
+ i += 8 - 2;
+ }
+ }
+
+ return i;
+}
+#endif
+
/*
* Apply the selected BCJ filter. Update *pos and s->pos to match the amount
* of data that got filtered.
@@ -381,6 +529,16 @@ static void bcj_apply(struct xz_dec_bcj *s,
filtered = bcj_sparc(s, buf, size);
break;
#endif
+#ifdef XZ_DEC_ARM64
+ case BCJ_ARM64:
+ filtered = bcj_arm64(s, buf, size);
+ break;
+#endif
+#ifdef XZ_DEC_RISCV
+ case BCJ_RISCV:
+ filtered = bcj_riscv(s, buf, size);
+ break;
+#endif
default:
/* Never reached but silence compiler warnings. */
filtered = 0;
@@ -414,9 +572,8 @@ static void bcj_flush(struct xz_dec_bcj *s, struct xz_buf *b)
* data in chunks of 1-16 bytes. To hide this issue, this function does
* some buffering.
*/
-XZ_EXTERN enum xz_ret xz_dec_bcj_run(struct xz_dec_bcj *s,
- struct xz_dec_lzma2 *lzma2,
- struct xz_buf *b)
+enum xz_ret xz_dec_bcj_run(struct xz_dec_bcj *s, struct xz_dec_lzma2 *lzma2,
+ struct xz_buf *b)
{
size_t out_start;
@@ -524,7 +681,7 @@ XZ_EXTERN enum xz_ret xz_dec_bcj_run(struct xz_dec_bcj *s,
return s->ret;
}
-XZ_EXTERN struct xz_dec_bcj *xz_dec_bcj_create(bool single_call)
+struct xz_dec_bcj *xz_dec_bcj_create(bool single_call)
{
struct xz_dec_bcj *s = kmalloc(sizeof(*s), GFP_KERNEL);
if (s != NULL)
@@ -533,7 +690,7 @@ XZ_EXTERN struct xz_dec_bcj *xz_dec_bcj_create(bool single_call)
return s;
}
-XZ_EXTERN enum xz_ret xz_dec_bcj_reset(struct xz_dec_bcj *s, uint8_t id)
+enum xz_ret xz_dec_bcj_reset(struct xz_dec_bcj *s, uint8_t id)
{
switch (id) {
#ifdef XZ_DEC_X86
@@ -554,6 +711,12 @@ XZ_EXTERN enum xz_ret xz_dec_bcj_reset(struct xz_dec_bcj *s, uint8_t id)
#ifdef XZ_DEC_SPARC
case BCJ_SPARC:
#endif
+#ifdef XZ_DEC_ARM64
+ case BCJ_ARM64:
+#endif
+#ifdef XZ_DEC_RISCV
+ case BCJ_RISCV:
+#endif
break;
default:
diff --git a/lib/xz/xz_dec_lzma2.c b/lib/xz/xz_dec_lzma2.c
index 27ce34520e78..83bb66b6016d 100644
--- a/lib/xz/xz_dec_lzma2.c
+++ b/lib/xz/xz_dec_lzma2.c
@@ -1,11 +1,10 @@
+// SPDX-License-Identifier: 0BSD
+
/*
* LZMA2 decoder
*
* Authors: Lasse Collin <lasse.collin@tukaani.org>
* Igor Pavlov <https://7-zip.org/>
- *
- * This file has been put into the public domain.
- * You can do whatever you want with this file.
*/
#include "xz_private.h"
@@ -961,8 +960,7 @@ static bool lzma2_lzma(struct xz_dec_lzma2 *s, struct xz_buf *b)
* Take care of the LZMA2 control layer, and forward the job of actual LZMA
* decoding or copying of uncompressed chunks to other functions.
*/
-XZ_EXTERN enum xz_ret xz_dec_lzma2_run(struct xz_dec_lzma2 *s,
- struct xz_buf *b)
+enum xz_ret xz_dec_lzma2_run(struct xz_dec_lzma2 *s, struct xz_buf *b)
{
uint32_t tmp;
@@ -1138,8 +1136,7 @@ XZ_EXTERN enum xz_ret xz_dec_lzma2_run(struct xz_dec_lzma2 *s,
return XZ_OK;
}
-XZ_EXTERN struct xz_dec_lzma2 *xz_dec_lzma2_create(enum xz_mode mode,
- uint32_t dict_max)
+struct xz_dec_lzma2 *xz_dec_lzma2_create(enum xz_mode mode, uint32_t dict_max)
{
struct xz_dec_lzma2 *s = kmalloc(sizeof(*s), GFP_KERNEL);
if (s == NULL)
@@ -1162,7 +1159,7 @@ XZ_EXTERN struct xz_dec_lzma2 *xz_dec_lzma2_create(enum xz_mode mode,
return s;
}
-XZ_EXTERN enum xz_ret xz_dec_lzma2_reset(struct xz_dec_lzma2 *s, uint8_t props)
+enum xz_ret xz_dec_lzma2_reset(struct xz_dec_lzma2 *s, uint8_t props)
{
/* This limits dictionary size to 3 GiB to keep parsing simpler. */
if (props > 39)
@@ -1198,7 +1195,7 @@ XZ_EXTERN enum xz_ret xz_dec_lzma2_reset(struct xz_dec_lzma2 *s, uint8_t props)
return XZ_OK;
}
-XZ_EXTERN void xz_dec_lzma2_end(struct xz_dec_lzma2 *s)
+void xz_dec_lzma2_end(struct xz_dec_lzma2 *s)
{
if (DEC_IS_MULTI(s->dict.mode))
vfree(s->dict.buf);
diff --git a/lib/xz/xz_dec_stream.c b/lib/xz/xz_dec_stream.c
index 683570b93a8c..f9d003684d56 100644
--- a/lib/xz/xz_dec_stream.c
+++ b/lib/xz/xz_dec_stream.c
@@ -1,10 +1,9 @@
+// SPDX-License-Identifier: 0BSD
+
/*
* .xz Stream decoder
*
* Author: Lasse Collin <lasse.collin@tukaani.org>
- *
- * This file has been put into the public domain.
- * You can do whatever you want with this file.
*/
#include "xz_private.h"
@@ -747,7 +746,7 @@ static enum xz_ret dec_main(struct xz_dec *s, struct xz_buf *b)
* actually succeeds (that's the price to pay of using the output buffer as
* the workspace).
*/
-XZ_EXTERN enum xz_ret xz_dec_run(struct xz_dec *s, struct xz_buf *b)
+enum xz_ret xz_dec_run(struct xz_dec *s, struct xz_buf *b)
{
size_t in_start;
size_t out_start;
@@ -783,7 +782,7 @@ XZ_EXTERN enum xz_ret xz_dec_run(struct xz_dec *s, struct xz_buf *b)
return ret;
}
-XZ_EXTERN struct xz_dec *xz_dec_init(enum xz_mode mode, uint32_t dict_max)
+struct xz_dec *xz_dec_init(enum xz_mode mode, uint32_t dict_max)
{
struct xz_dec *s = kmalloc(sizeof(*s), GFP_KERNEL);
if (s == NULL)
@@ -813,7 +812,7 @@ error_bcj:
return NULL;
}
-XZ_EXTERN void xz_dec_reset(struct xz_dec *s)
+void xz_dec_reset(struct xz_dec *s)
{
s->sequence = SEQ_STREAM_HEADER;
s->allow_buf_error = false;
@@ -825,7 +824,7 @@ XZ_EXTERN void xz_dec_reset(struct xz_dec *s)
s->temp.size = STREAM_HEADER_SIZE;
}
-XZ_EXTERN void xz_dec_end(struct xz_dec *s)
+void xz_dec_end(struct xz_dec *s)
{
if (s != NULL) {
xz_dec_lzma2_end(s->lzma2);
diff --git a/lib/xz/xz_dec_syms.c b/lib/xz/xz_dec_syms.c
index 61098c67a413..f40817d65897 100644
--- a/lib/xz/xz_dec_syms.c
+++ b/lib/xz/xz_dec_syms.c
@@ -1,10 +1,9 @@
+// SPDX-License-Identifier: 0BSD
+
/*
* XZ decoder module information
*
* Author: Lasse Collin <lasse.collin@tukaani.org>
- *
- * This file has been put into the public domain.
- * You can do whatever you want with this file.
*/
#include <linux/module.h>
@@ -23,11 +22,6 @@ EXPORT_SYMBOL(xz_dec_microlzma_end);
#endif
MODULE_DESCRIPTION("XZ decompressor");
-MODULE_VERSION("1.1");
+MODULE_VERSION("1.2");
MODULE_AUTHOR("Lasse Collin <lasse.collin@tukaani.org> and Igor Pavlov");
-
-/*
- * This code is in the public domain, but in Linux it's simplest to just
- * say it's GPL and consider the authors as the copyright holders.
- */
-MODULE_LICENSE("GPL");
+MODULE_LICENSE("Dual BSD/GPL");
diff --git a/lib/xz/xz_dec_test.c b/lib/xz/xz_dec_test.c
index da28a19d6c98..53d3600f2ddb 100644
--- a/lib/xz/xz_dec_test.c
+++ b/lib/xz/xz_dec_test.c
@@ -1,10 +1,9 @@
+// SPDX-License-Identifier: 0BSD
+
/*
* XZ decoder tester
*
* Author: Lasse Collin <lasse.collin@tukaani.org>
- *
- * This file has been put into the public domain.
- * You can do whatever you want with this file.
*/
#include <linux/kernel.h>
@@ -212,9 +211,4 @@ module_exit(xz_dec_test_exit);
MODULE_DESCRIPTION("XZ decompressor tester");
MODULE_VERSION("1.0");
MODULE_AUTHOR("Lasse Collin <lasse.collin@tukaani.org>");
-
-/*
- * This code is in the public domain, but in Linux it's simplest to just
- * say it's GPL and consider the authors as the copyright holders.
- */
-MODULE_LICENSE("GPL");
+MODULE_LICENSE("Dual BSD/GPL");
diff --git a/lib/xz/xz_lzma2.h b/lib/xz/xz_lzma2.h
index 92d852d4f87a..d2632b7dfb9c 100644
--- a/lib/xz/xz_lzma2.h
+++ b/lib/xz/xz_lzma2.h
@@ -1,11 +1,10 @@
+/* SPDX-License-Identifier: 0BSD */
+
/*
* LZMA2 definitions
*
* Authors: Lasse Collin <lasse.collin@tukaani.org>
* Igor Pavlov <https://7-zip.org/>
- *
- * This file has been put into the public domain.
- * You can do whatever you want with this file.
*/
#ifndef XZ_LZMA2_H
diff --git a/lib/xz/xz_private.h b/lib/xz/xz_private.h
index bf1e94ec7873..5f1294a1408c 100644
--- a/lib/xz/xz_private.h
+++ b/lib/xz/xz_private.h
@@ -1,10 +1,9 @@
+/* SPDX-License-Identifier: 0BSD */
+
/*
* Private includes and definitions
*
* Author: Lasse Collin <lasse.collin@tukaani.org>
- *
- * This file has been put into the public domain.
- * You can do whatever you want with this file.
*/
#ifndef XZ_PRIVATE_H
@@ -37,6 +36,12 @@
# ifdef CONFIG_XZ_DEC_SPARC
# define XZ_DEC_SPARC
# endif
+# ifdef CONFIG_XZ_DEC_ARM64
+# define XZ_DEC_ARM64
+# endif
+# ifdef CONFIG_XZ_DEC_RISCV
+# define XZ_DEC_RISCV
+# endif
# ifdef CONFIG_XZ_DEC_MICROLZMA
# define XZ_DEC_MICROLZMA
# endif
@@ -98,23 +103,19 @@
*/
#ifndef XZ_DEC_BCJ
# if defined(XZ_DEC_X86) || defined(XZ_DEC_POWERPC) \
- || defined(XZ_DEC_IA64) || defined(XZ_DEC_ARM) \
+ || defined(XZ_DEC_IA64) \
|| defined(XZ_DEC_ARM) || defined(XZ_DEC_ARMTHUMB) \
- || defined(XZ_DEC_SPARC)
+ || defined(XZ_DEC_SPARC) || defined(XZ_DEC_ARM64) \
+ || defined(XZ_DEC_RISCV)
# define XZ_DEC_BCJ
# endif
#endif
-#ifndef CRC32_POLY_LE
-#define CRC32_POLY_LE 0xedb88320
-#endif
-
/*
* Allocate memory for LZMA2 decoder. xz_dec_lzma2_reset() must be used
* before calling xz_dec_lzma2_run().
*/
-XZ_EXTERN struct xz_dec_lzma2 *xz_dec_lzma2_create(enum xz_mode mode,
- uint32_t dict_max);
+struct xz_dec_lzma2 *xz_dec_lzma2_create(enum xz_mode mode, uint32_t dict_max);
/*
* Decode the LZMA2 properties (one byte) and reset the decoder. Return
@@ -122,22 +123,20 @@ XZ_EXTERN struct xz_dec_lzma2 *xz_dec_lzma2_create(enum xz_mode mode,
* big enough, and XZ_OPTIONS_ERROR if props indicates something that this
* decoder doesn't support.
*/
-XZ_EXTERN enum xz_ret xz_dec_lzma2_reset(struct xz_dec_lzma2 *s,
- uint8_t props);
+enum xz_ret xz_dec_lzma2_reset(struct xz_dec_lzma2 *s, uint8_t props);
/* Decode raw LZMA2 stream from b->in to b->out. */
-XZ_EXTERN enum xz_ret xz_dec_lzma2_run(struct xz_dec_lzma2 *s,
- struct xz_buf *b);
+enum xz_ret xz_dec_lzma2_run(struct xz_dec_lzma2 *s, struct xz_buf *b);
/* Free the memory allocated for the LZMA2 decoder. */
-XZ_EXTERN void xz_dec_lzma2_end(struct xz_dec_lzma2 *s);
+void xz_dec_lzma2_end(struct xz_dec_lzma2 *s);
#ifdef XZ_DEC_BCJ
/*
* Allocate memory for BCJ decoders. xz_dec_bcj_reset() must be used before
* calling xz_dec_bcj_run().
*/
-XZ_EXTERN struct xz_dec_bcj *xz_dec_bcj_create(bool single_call);
+struct xz_dec_bcj *xz_dec_bcj_create(bool single_call);
/*
* Decode the Filter ID of a BCJ filter. This implementation doesn't
@@ -145,16 +144,15 @@ XZ_EXTERN struct xz_dec_bcj *xz_dec_bcj_create(bool single_call);
* is needed. Returns XZ_OK if the given Filter ID is supported.
* Otherwise XZ_OPTIONS_ERROR is returned.
*/
-XZ_EXTERN enum xz_ret xz_dec_bcj_reset(struct xz_dec_bcj *s, uint8_t id);
+enum xz_ret xz_dec_bcj_reset(struct xz_dec_bcj *s, uint8_t id);
/*
* Decode raw BCJ + LZMA2 stream. This must be used only if there actually is
* a BCJ filter in the chain. If the chain has only LZMA2, xz_dec_lzma2_run()
* must be called directly.
*/
-XZ_EXTERN enum xz_ret xz_dec_bcj_run(struct xz_dec_bcj *s,
- struct xz_dec_lzma2 *lzma2,
- struct xz_buf *b);
+enum xz_ret xz_dec_bcj_run(struct xz_dec_bcj *s, struct xz_dec_lzma2 *lzma2,
+ struct xz_buf *b);
/* Free the memory allocated for the BCJ filters. */
#define xz_dec_bcj_end(s) kfree(s)
diff --git a/lib/xz/xz_stream.h b/lib/xz/xz_stream.h
index 430bb3a0d195..55f9f6f94b78 100644
--- a/lib/xz/xz_stream.h
+++ b/lib/xz/xz_stream.h
@@ -1,10 +1,9 @@
+/* SPDX-License-Identifier: 0BSD */
+
/*
* Definitions for handling the .xz file format
*
* Author: Lasse Collin <lasse.collin@tukaani.org>
- *
- * This file has been put into the public domain.
- * You can do whatever you want with this file.
*/
#ifndef XZ_STREAM_H
diff --git a/lib/zstd/compress/zstd_compress.c b/lib/zstd/compress/zstd_compress.c
index f620cafca633..16bb995bc6c4 100644
--- a/lib/zstd/compress/zstd_compress.c
+++ b/lib/zstd/compress/zstd_compress.c
@@ -4810,6 +4810,8 @@ ZSTD_CDict* ZSTD_createCDict_advanced2(
dictLoadMethod, cctxParams.cParams,
cctxParams.useRowMatchFinder, cctxParams.enableDedicatedDictSearch,
customMem);
+ if (!cdict)
+ return NULL;
if (ZSTD_isError( ZSTD_initCDict_internal(cdict,
dict, dictSize,
diff --git a/lib/zstd/zstd_compress_module.c b/lib/zstd/zstd_compress_module.c
index 04e1b5c01d9b..bd8784449b31 100644
--- a/lib/zstd/zstd_compress_module.c
+++ b/lib/zstd/zstd_compress_module.c
@@ -66,6 +66,12 @@ int zstd_max_clevel(void)
}
EXPORT_SYMBOL(zstd_max_clevel);
+int zstd_default_clevel(void)
+{
+ return ZSTD_defaultCLevel();
+}
+EXPORT_SYMBOL(zstd_default_clevel);
+
size_t zstd_compress_bound(size_t src_size)
{
return ZSTD_compressBound(src_size);
@@ -79,6 +85,13 @@ zstd_parameters zstd_get_params(int level,
}
EXPORT_SYMBOL(zstd_get_params);
+zstd_compression_parameters zstd_get_cparams(int level,
+ unsigned long long estimated_src_size, size_t dict_size)
+{
+ return ZSTD_getCParams(level, estimated_src_size, dict_size);
+}
+EXPORT_SYMBOL(zstd_get_cparams);
+
size_t zstd_cctx_workspace_bound(const zstd_compression_parameters *cparams)
{
return ZSTD_estimateCCtxSize_usingCParams(*cparams);
@@ -93,6 +106,33 @@ zstd_cctx *zstd_init_cctx(void *workspace, size_t workspace_size)
}
EXPORT_SYMBOL(zstd_init_cctx);
+zstd_cctx *zstd_create_cctx_advanced(zstd_custom_mem custom_mem)
+{
+ return ZSTD_createCCtx_advanced(custom_mem);
+}
+EXPORT_SYMBOL(zstd_create_cctx_advanced);
+
+size_t zstd_free_cctx(zstd_cctx *cctx)
+{
+ return ZSTD_freeCCtx(cctx);
+}
+EXPORT_SYMBOL(zstd_free_cctx);
+
+zstd_cdict *zstd_create_cdict_byreference(const void *dict, size_t dict_size,
+ zstd_compression_parameters cparams,
+ zstd_custom_mem custom_mem)
+{
+ return ZSTD_createCDict_advanced(dict, dict_size, ZSTD_dlm_byRef,
+ ZSTD_dct_auto, cparams, custom_mem);
+}
+EXPORT_SYMBOL(zstd_create_cdict_byreference);
+
+size_t zstd_free_cdict(zstd_cdict *cdict)
+{
+ return ZSTD_freeCDict(cdict);
+}
+EXPORT_SYMBOL(zstd_free_cdict);
+
size_t zstd_compress_cctx(zstd_cctx *cctx, void *dst, size_t dst_capacity,
const void *src, size_t src_size, const zstd_parameters *parameters)
{
@@ -101,6 +141,15 @@ size_t zstd_compress_cctx(zstd_cctx *cctx, void *dst, size_t dst_capacity,
}
EXPORT_SYMBOL(zstd_compress_cctx);
+size_t zstd_compress_using_cdict(zstd_cctx *cctx, void *dst,
+ size_t dst_capacity, const void *src, size_t src_size,
+ const ZSTD_CDict *cdict)
+{
+ return ZSTD_compress_usingCDict(cctx, dst, dst_capacity,
+ src, src_size, cdict);
+}
+EXPORT_SYMBOL(zstd_compress_using_cdict);
+
size_t zstd_cstream_workspace_bound(const zstd_compression_parameters *cparams)
{
return ZSTD_estimateCStreamSize_usingCParams(*cparams);
diff --git a/lib/zstd/zstd_decompress_module.c b/lib/zstd/zstd_decompress_module.c
index f4ed952ed485..469fc3059be0 100644
--- a/lib/zstd/zstd_decompress_module.c
+++ b/lib/zstd/zstd_decompress_module.c
@@ -44,6 +44,33 @@ size_t zstd_dctx_workspace_bound(void)
}
EXPORT_SYMBOL(zstd_dctx_workspace_bound);
+zstd_dctx *zstd_create_dctx_advanced(zstd_custom_mem custom_mem)
+{
+ return ZSTD_createDCtx_advanced(custom_mem);
+}
+EXPORT_SYMBOL(zstd_create_dctx_advanced);
+
+size_t zstd_free_dctx(zstd_dctx *dctx)
+{
+ return ZSTD_freeDCtx(dctx);
+}
+EXPORT_SYMBOL(zstd_free_dctx);
+
+zstd_ddict *zstd_create_ddict_byreference(const void *dict, size_t dict_size,
+ zstd_custom_mem custom_mem)
+{
+ return ZSTD_createDDict_advanced(dict, dict_size, ZSTD_dlm_byRef,
+ ZSTD_dct_auto, custom_mem);
+
+}
+EXPORT_SYMBOL(zstd_create_ddict_byreference);
+
+size_t zstd_free_ddict(zstd_ddict *ddict)
+{
+ return ZSTD_freeDDict(ddict);
+}
+EXPORT_SYMBOL(zstd_free_ddict);
+
zstd_dctx *zstd_init_dctx(void *workspace, size_t workspace_size)
{
if (workspace == NULL)
@@ -59,6 +86,15 @@ size_t zstd_decompress_dctx(zstd_dctx *dctx, void *dst, size_t dst_capacity,
}
EXPORT_SYMBOL(zstd_decompress_dctx);
+size_t zstd_decompress_using_ddict(zstd_dctx *dctx,
+ void *dst, size_t dst_capacity, const void* src, size_t src_size,
+ const zstd_ddict* ddict)
+{
+ return ZSTD_decompress_usingDDict(dctx, dst, dst_capacity, src,
+ src_size, ddict);
+}
+EXPORT_SYMBOL(zstd_decompress_using_ddict);
+
size_t zstd_dstream_workspace_bound(size_t max_window_size)
{
return ZSTD_estimateDStreamSize(max_window_size);
diff --git a/mm/Kconfig b/mm/Kconfig
index b72e7d040f78..4c9f5ea13271 100644
--- a/mm/Kconfig
+++ b/mm/Kconfig
@@ -128,7 +128,7 @@ config ZSWAP_COMPRESSOR_DEFAULT
choice
prompt "Default allocator"
depends on ZSWAP
- default ZSWAP_ZPOOL_DEFAULT_ZSMALLOC if HAVE_ZSMALLOC
+ default ZSWAP_ZPOOL_DEFAULT_ZSMALLOC if MMU
default ZSWAP_ZPOOL_DEFAULT_ZBUD
help
Selects the default allocator for the compressed cache for
@@ -146,15 +146,17 @@ config ZSWAP_ZPOOL_DEFAULT_ZBUD
help
Use the zbud allocator as the default allocator.
-config ZSWAP_ZPOOL_DEFAULT_Z3FOLD
- bool "z3fold"
- select Z3FOLD
+config ZSWAP_ZPOOL_DEFAULT_Z3FOLD_DEPRECATED
+ bool "z3foldi (DEPRECATED)"
+ select Z3FOLD_DEPRECATED
help
Use the z3fold allocator as the default allocator.
+ Deprecated and scheduled for removal in a few cycles,
+ see CONFIG_Z3FOLD_DEPRECATED.
+
config ZSWAP_ZPOOL_DEFAULT_ZSMALLOC
bool "zsmalloc"
- depends on HAVE_ZSMALLOC
select ZSMALLOC
help
Use the zsmalloc allocator as the default allocator.
@@ -164,7 +166,7 @@ config ZSWAP_ZPOOL_DEFAULT
string
depends on ZSWAP
default "zbud" if ZSWAP_ZPOOL_DEFAULT_ZBUD
- default "z3fold" if ZSWAP_ZPOOL_DEFAULT_Z3FOLD
+ default "z3fold" if ZSWAP_ZPOOL_DEFAULT_Z3FOLD_DEPRECATED
default "zsmalloc" if ZSWAP_ZPOOL_DEFAULT_ZSMALLOC
default ""
@@ -178,24 +180,29 @@ config ZBUD
deterministic reclaim properties that make it preferable to a higher
density approach when reclaim will be used.
-config Z3FOLD
- tristate "3:1 compression allocator (z3fold)"
+config Z3FOLD_DEPRECATED
+ tristate "3:1 compression allocator (z3fold) (DEPRECATED)"
depends on ZSWAP
help
+ Deprecated and scheduled for removal in a few cycles. If you have
+ a good reason for using Z3FOLD over ZSMALLOC, please contact
+ linux-mm@kvack.org and the zswap maintainers.
+
A special purpose allocator for storing compressed pages.
It is designed to store up to three compressed pages per physical
page. It is a ZBUD derivative so the simplicity and determinism are
still there.
-config HAVE_ZSMALLOC
- def_bool y
- depends on MMU
- depends on PAGE_SIZE_LESS_THAN_256KB # we want <= 64 KiB
+config Z3FOLD
+ tristate
+ default y if Z3FOLD_DEPRECATED=y
+ default m if Z3FOLD_DEPRECATED=m
+ depends on Z3FOLD_DEPRECATED
config ZSMALLOC
tristate
- prompt "N:1 compression allocator (zsmalloc)" if ZSWAP
- depends on HAVE_ZSMALLOC
+ prompt "N:1 compression allocator (zsmalloc)" if (ZSWAP || ZRAM)
+ depends on MMU
help
zsmalloc is a slab-based memory allocator designed to store
pages of various compression levels efficiently. It achieves
@@ -585,17 +592,22 @@ config ARCH_MHP_MEMMAP_ON_MEMORY_ENABLE
# at the same time (e.g. copy_page_range()).
# DEBUG_SPINLOCK and DEBUG_LOCK_ALLOC spinlock_t also enlarge struct page.
#
-config SPLIT_PTLOCK_CPUS
- int
- default "999999" if !MMU
- default "999999" if ARM && !CPU_CACHE_VIPT
- default "999999" if PARISC && !PA20
- default "999999" if SPARC32
- default "4"
+config SPLIT_PTE_PTLOCKS
+ def_bool y
+ depends on MMU
+ depends on SMP
+ depends on NR_CPUS >= 4
+ depends on !ARM || CPU_CACHE_VIPT
+ depends on !PARISC || PA20
+ depends on !SPARC32
config ARCH_ENABLE_SPLIT_PMD_PTLOCK
bool
+config SPLIT_PMD_PTLOCKS
+ def_bool y
+ depends on SPLIT_PTE_PTLOCKS && ARCH_ENABLE_SPLIT_PMD_PTLOCK
+
#
# support for memory balloon
config MEMORY_BALLOON
@@ -877,6 +889,19 @@ endif # TRANSPARENT_HUGEPAGE
config PGTABLE_HAS_HUGE_LEAVES
def_bool TRANSPARENT_HUGEPAGE || HUGETLB_PAGE
+# TODO: Allow to be enabled without THP
+config ARCH_SUPPORTS_HUGE_PFNMAP
+ def_bool n
+ depends on TRANSPARENT_HUGEPAGE
+
+config ARCH_SUPPORTS_PMD_PFNMAP
+ def_bool y
+ depends on ARCH_SUPPORTS_HUGE_PFNMAP && HAVE_ARCH_TRANSPARENT_HUGEPAGE
+
+config ARCH_SUPPORTS_PUD_PFNMAP
+ def_bool y
+ depends on ARCH_SUPPORTS_HUGE_PFNMAP && HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD
+
#
# UP and nommu archs use km based percpu allocator
#
@@ -1081,13 +1106,10 @@ config ARCH_USES_HIGH_VMA_FLAGS
config ARCH_HAS_PKEYS
bool
-config ARCH_USES_PG_ARCH_X
+config ARCH_USES_PG_ARCH_2
+ bool
+config ARCH_USES_PG_ARCH_3
bool
- help
- Enable the definition of PG_arch_x page flags with x > 1. Only
- suitable for 64-bit architectures with CONFIG_FLATMEM or
- CONFIG_SPARSEMEM_VMEMMAP enabled, otherwise there may not be
- enough room for additional bits in page->flags.
config VM_EVENT_COUNTERS
default y
@@ -1263,6 +1285,17 @@ config IOMMU_MM_DATA
config EXECMEM
bool
+config NUMA_MEMBLKS
+ bool
+
+config NUMA_EMU
+ bool "NUMA emulation"
+ depends on NUMA_MEMBLKS
+ help
+ Enable NUMA emulation. A flat machine will be split
+ into virtual nodes when booted with "numa=fake=N", where N is the
+ number of nodes. This is only useful for debugging.
+
source "mm/damon/Kconfig"
endmenu
diff --git a/mm/Makefile b/mm/Makefile
index d2915f8c9dc0..d5639b036166 100644
--- a/mm/Makefile
+++ b/mm/Makefile
@@ -37,7 +37,7 @@ mmu-y := nommu.o
mmu-$(CONFIG_MMU) := highmem.o memory.o mincore.o \
mlock.o mmap.o mmu_gather.o mprotect.o mremap.o \
msync.o page_vma_mapped.o pagewalk.o \
- pgtable-generic.o rmap.o vmalloc.o
+ pgtable-generic.o rmap.o vmalloc.o vma.o
ifdef CONFIG_CROSS_MEMORY_ATTACH
@@ -53,7 +53,7 @@ obj-y := filemap.o mempool.o oom_kill.o fadvise.o \
readahead.o swap.o truncate.o vmscan.o shrinker.o \
shmem.o util.o mmzone.o vmstat.o backing-dev.o \
mm_init.o percpu.o slab_common.o \
- compaction.o show_mem.o shmem_quota.o\
+ compaction.o show_mem.o \
interval_tree.o list_lru.o workingset.o \
debug.o gup.o mmap_lock.o $(mmu-y)
@@ -117,6 +117,9 @@ obj-$(CONFIG_ZSMALLOC) += zsmalloc.o
obj-$(CONFIG_Z3FOLD) += z3fold.o
obj-$(CONFIG_GENERIC_EARLY_IOREMAP) += early_ioremap.o
obj-$(CONFIG_CMA) += cma.o
+obj-$(CONFIG_NUMA) += numa.o
+obj-$(CONFIG_NUMA_MEMBLKS) += numa_memblks.o
+obj-$(CONFIG_NUMA_EMU) += numa_emulation.o
obj-$(CONFIG_MEMORY_BALLOON) += balloon_compaction.o
obj-$(CONFIG_PAGE_EXTENSION) += page_ext.o
obj-$(CONFIG_PAGE_TABLE_CHECK) += page_table_check.o
@@ -141,3 +144,4 @@ obj-$(CONFIG_HAVE_BOOTMEM_INFO_NODE) += bootmem_info.o
obj-$(CONFIG_GENERIC_IOREMAP) += ioremap.o
obj-$(CONFIG_SHRINKER_DEBUG) += shrinker_debug.o
obj-$(CONFIG_EXECMEM) += execmem.o
+obj-$(CONFIG_TMPFS_QUOTA) += shmem_quota.o
diff --git a/mm/cma.c b/mm/cma.c
index 3e9724716bad..2d9fae939283 100644
--- a/mm/cma.c
+++ b/mm/cma.c
@@ -202,7 +202,7 @@ int __init cma_init_reserved_mem(phys_addr_t base, phys_addr_t size,
cma->order_per_bit = order_per_bit;
*res_cma = cma;
cma_area_count++;
- totalcma_pages += (size / PAGE_SIZE);
+ totalcma_pages += cma->count;
return 0;
}
@@ -403,18 +403,8 @@ static void cma_debug_show_areas(struct cma *cma)
spin_unlock_irq(&cma->lock);
}
-/**
- * cma_alloc() - allocate pages from contiguous area
- * @cma: Contiguous memory region for which the allocation is performed.
- * @count: Requested number of pages.
- * @align: Requested alignment of pages (in PAGE_SIZE order).
- * @no_warn: Avoid printing message about failed allocation
- *
- * This function allocates part of contiguous memory on specific
- * contiguous memory area.
- */
-struct page *cma_alloc(struct cma *cma, unsigned long count,
- unsigned int align, bool no_warn)
+static struct page *__cma_alloc(struct cma *cma, unsigned long count,
+ unsigned int align, gfp_t gfp)
{
unsigned long mask, offset;
unsigned long pfn = -1;
@@ -463,8 +453,7 @@ struct page *cma_alloc(struct cma *cma, unsigned long count,
pfn = cma->base_pfn + (bitmap_no << cma->order_per_bit);
mutex_lock(&cma_mutex);
- ret = alloc_contig_range(pfn, pfn + count, MIGRATE_CMA,
- GFP_KERNEL | (no_warn ? __GFP_NOWARN : 0));
+ ret = alloc_contig_range(pfn, pfn + count, MIGRATE_CMA, gfp);
mutex_unlock(&cma_mutex);
if (ret == 0) {
page = pfn_to_page(pfn);
@@ -494,7 +483,7 @@ struct page *cma_alloc(struct cma *cma, unsigned long count,
page_kasan_tag_reset(nth_page(page, i));
}
- if (ret && !no_warn) {
+ if (ret && !(gfp & __GFP_NOWARN)) {
pr_err_ratelimited("%s: %s: alloc failed, req-size: %lu pages, ret: %d\n",
__func__, cma->name, count, ret);
cma_debug_show_areas(cma);
@@ -513,6 +502,34 @@ struct page *cma_alloc(struct cma *cma, unsigned long count,
return page;
}
+/**
+ * cma_alloc() - allocate pages from contiguous area
+ * @cma: Contiguous memory region for which the allocation is performed.
+ * @count: Requested number of pages.
+ * @align: Requested alignment of pages (in PAGE_SIZE order).
+ * @no_warn: Avoid printing message about failed allocation
+ *
+ * This function allocates part of contiguous memory on specific
+ * contiguous memory area.
+ */
+struct page *cma_alloc(struct cma *cma, unsigned long count,
+ unsigned int align, bool no_warn)
+{
+ return __cma_alloc(cma, count, align, GFP_KERNEL | (no_warn ? __GFP_NOWARN : 0));
+}
+
+struct folio *cma_alloc_folio(struct cma *cma, int order, gfp_t gfp)
+{
+ struct page *page;
+
+ if (WARN_ON(!order || !(gfp & __GFP_COMP)))
+ return NULL;
+
+ page = __cma_alloc(cma, 1 << order, order, gfp);
+
+ return page ? page_folio(page) : NULL;
+}
+
bool cma_pages_valid(struct cma *cma, const struct page *pages,
unsigned long count)
{
@@ -564,6 +581,14 @@ bool cma_release(struct cma *cma, const struct page *pages,
return true;
}
+bool cma_free_folio(struct cma *cma, const struct folio *folio)
+{
+ if (WARN_ON(!folio_test_large(folio)))
+ return false;
+
+ return cma_release(cma, &folio->page, folio_nr_pages(folio));
+}
+
int cma_for_each_area(int (*it)(struct cma *cma, void *data), void *data)
{
int i;
diff --git a/mm/compaction.c b/mm/compaction.c
index eb95e9b435d0..a2b16b08cbbf 100644
--- a/mm/compaction.c
+++ b/mm/compaction.c
@@ -23,6 +23,7 @@
#include <linux/freezer.h>
#include <linux/page_owner.h>
#include <linux/psi.h>
+#include <linux/cpuset.h>
#include "internal.h"
#ifdef CONFIG_COMPACTION
@@ -86,33 +87,6 @@ static struct page *mark_allocated_noprof(struct page *page, unsigned int order,
}
#define mark_allocated(...) alloc_hooks(mark_allocated_noprof(__VA_ARGS__))
-static void split_map_pages(struct list_head *freepages)
-{
- unsigned int i, order;
- struct page *page, *next;
- LIST_HEAD(tmp_list);
-
- for (order = 0; order < NR_PAGE_ORDERS; order++) {
- list_for_each_entry_safe(page, next, &freepages[order], lru) {
- unsigned int nr_pages;
-
- list_del(&page->lru);
-
- nr_pages = 1 << order;
-
- mark_allocated(page, order, __GFP_MOVABLE);
- if (order)
- split_page(page, order);
-
- for (i = 0; i < nr_pages; i++) {
- list_add(&page->lru, &tmp_list);
- page++;
- }
- }
- list_splice_init(&tmp_list, &freepages[0]);
- }
-}
-
static unsigned long release_free_list(struct list_head *freepages)
{
int order;
@@ -742,11 +716,11 @@ isolate_fail:
*
* Non-free pages, invalid PFNs, or zone boundaries within the
* [start_pfn, end_pfn) range are considered errors, cause function to
- * undo its actions and return zero.
+ * undo its actions and return zero. cc->freepages[] are empty.
*
* Otherwise, function returns one-past-the-last PFN of isolated page
* (which may be greater then end_pfn if end fell in a middle of
- * a free page).
+ * a free page). cc->freepages[] contain free pages isolated.
*/
unsigned long
isolate_freepages_range(struct compact_control *cc,
@@ -754,10 +728,9 @@ isolate_freepages_range(struct compact_control *cc,
{
unsigned long isolated, pfn, block_start_pfn, block_end_pfn;
int order;
- struct list_head tmp_freepages[NR_PAGE_ORDERS];
for (order = 0; order < NR_PAGE_ORDERS; order++)
- INIT_LIST_HEAD(&tmp_freepages[order]);
+ INIT_LIST_HEAD(&cc->freepages[order]);
pfn = start_pfn;
block_start_pfn = pageblock_start_pfn(pfn);
@@ -788,7 +761,7 @@ isolate_freepages_range(struct compact_control *cc,
break;
isolated = isolate_freepages_block(cc, &isolate_start_pfn,
- block_end_pfn, tmp_freepages, 0, true);
+ block_end_pfn, cc->freepages, 0, true);
/*
* In strict mode, isolate_freepages_block() returns 0 if
@@ -807,13 +780,10 @@ isolate_freepages_range(struct compact_control *cc,
if (pfn < end_pfn) {
/* Loop terminated early, cleanup. */
- release_free_list(tmp_freepages);
+ release_free_list(cc->freepages);
return 0;
}
- /* __isolate_free_page() does not map the pages */
- split_map_pages(tmp_freepages);
-
/* We don't use freelists for anything. */
return pfn;
}
@@ -2853,6 +2823,11 @@ enum compact_result try_to_compact_pages(gfp_t gfp_mask, unsigned int order,
ac->highest_zoneidx, ac->nodemask) {
enum compact_result status;
+ if (cpusets_enabled() &&
+ (alloc_flags & ALLOC_CPUSET) &&
+ !__cpuset_zone_allowed(zone, gfp_mask))
+ continue;
+
if (prio > MIN_COMPACT_PRIORITY
&& compaction_deferred(zone, order)) {
rc = max_t(enum compact_result, COMPACT_DEFERRED, rc);
diff --git a/mm/damon/Kconfig b/mm/damon/Kconfig
index fecb8172410c..35b72f88983a 100644
--- a/mm/damon/Kconfig
+++ b/mm/damon/Kconfig
@@ -9,7 +9,7 @@ config DAMON
access frequency of each memory region. The information can be useful
for performance-centric DRAM level memory management.
- See https://damonitor.github.io/doc/html/latest-damon/index.html for
+ See https://www.kernel.org/doc/html/latest/mm/damon/index.html for
more information.
config DAMON_KUNIT_TEST
diff --git a/mm/damon/core.c b/mm/damon/core.c
index 7a87628b76ab..a83f3b736d51 100644
--- a/mm/damon/core.c
+++ b/mm/damon/core.c
@@ -552,7 +552,13 @@ static unsigned int damon_accesses_bp_to_nr_accesses(
return accesses_bp * damon_max_nr_accesses(attrs) / 10000;
}
-/* convert nr_accesses to access ratio in bp (per 10,000) */
+/*
+ * Convert nr_accesses to access ratio in bp (per 10,000).
+ *
+ * Callers should ensure attrs.aggr_interval is not zero, like
+ * damon_update_monitoring_results() does . Otherwise, divide-by-zero would
+ * happen.
+ */
static unsigned int damon_nr_accesses_to_accesses_bp(
unsigned int nr_accesses, struct damon_attrs *attrs)
{
@@ -1582,13 +1588,16 @@ static void damos_adjust_quota(struct damon_ctx *c, struct damos *s)
return;
/* Fill up the score histogram */
- memset(quota->histogram, 0, sizeof(quota->histogram));
+ memset(c->regions_score_histogram, 0,
+ sizeof(*c->regions_score_histogram) *
+ (DAMOS_MAX_SCORE + 1));
damon_for_each_target(t, c) {
damon_for_each_region(r, t) {
if (!__damos_valid_target(r, s))
continue;
score = c->ops.get_scheme_score(c, t, r, s);
- quota->histogram[score] += damon_sz_region(r);
+ c->regions_score_histogram[score] +=
+ damon_sz_region(r);
if (score > max_score)
max_score = score;
}
@@ -1596,7 +1605,7 @@ static void damos_adjust_quota(struct damon_ctx *c, struct damos *s)
/* Set the min score limit */
for (cumulated_sz = 0, score = max_score; ; score--) {
- cumulated_sz += quota->histogram[score];
+ cumulated_sz += c->regions_score_histogram[score];
if (cumulated_sz >= quota->esz || !score)
break;
}
@@ -1957,6 +1966,10 @@ static int kdamond_fn(void *data)
ctx->ops.init(ctx);
if (ctx->callback.before_start && ctx->callback.before_start(ctx))
goto done;
+ ctx->regions_score_histogram = kmalloc_array(DAMOS_MAX_SCORE + 1,
+ sizeof(*ctx->regions_score_histogram), GFP_KERNEL);
+ if (!ctx->regions_score_histogram)
+ goto done;
sz_limit = damon_region_sz_limit(ctx);
@@ -2034,6 +2047,7 @@ done:
ctx->callback.before_terminate(ctx);
if (ctx->ops.cleanup)
ctx->ops.cleanup(ctx);
+ kfree(ctx->regions_score_histogram);
pr_debug("kdamond (%d) finishes\n", current->pid);
mutex_lock(&ctx->kdamond_lock);
@@ -2205,4 +2219,4 @@ static int __init damon_init(void)
subsys_initcall(damon_init);
-#include "core-test.h"
+#include "tests/core-kunit.h"
diff --git a/mm/damon/dbgfs.c b/mm/damon/dbgfs.c
index 51a6f1cac385..b4213bc47e44 100644
--- a/mm/damon/dbgfs.c
+++ b/mm/damon/dbgfs.c
@@ -1145,4 +1145,4 @@ out:
module_init(damon_dbgfs_init);
-#include "dbgfs-test.h"
+#include "tests/dbgfs-kunit.h"
diff --git a/mm/damon/sysfs.c b/mm/damon/sysfs.c
index cffc755e7775..58145d59881d 100644
--- a/mm/damon/sysfs.c
+++ b/mm/damon/sysfs.c
@@ -1882,4 +1882,4 @@ out:
}
subsys_initcall(damon_sysfs_init);
-#include "sysfs-test.h"
+#include "tests/sysfs-kunit.h"
diff --git a/mm/damon/tests/.kunitconfig b/mm/damon/tests/.kunitconfig
new file mode 100644
index 000000000000..a73be044fc9b
--- /dev/null
+++ b/mm/damon/tests/.kunitconfig
@@ -0,0 +1,22 @@
+# for DAMON core
+CONFIG_KUNIT=y
+CONFIG_DAMON=y
+CONFIG_DAMON_KUNIT_TEST=y
+
+# for DAMON vaddr ops
+CONFIG_MMU=y
+CONFIG_PAGE_IDLE_FLAG=y
+CONFIG_DAMON_VADDR=y
+CONFIG_DAMON_VADDR_KUNIT_TEST=y
+
+# for DAMON sysfs interface
+CONFIG_SYSFS=y
+CONFIG_DAMON_SYSFS=y
+CONFIG_DAMON_SYSFS_KUNIT_TEST=y
+
+# for DAMON debugfs interface
+CONFIG_DEBUG_FS=y
+CONFIG_DAMON_PADDR=y
+CONFIG_DAMON_DBGFS_DEPRECATED=y
+CONFIG_DAMON_DBGFS=y
+CONFIG_DAMON_DBGFS_KUNIT_TEST=y
diff --git a/mm/damon/core-test.h b/mm/damon/tests/core-kunit.h
index 0cee634f3544..cf22e09a3507 100644
--- a/mm/damon/core-test.h
+++ b/mm/damon/tests/core-kunit.h
@@ -246,16 +246,20 @@ static void damon_test_split_regions_of(struct kunit *test)
static void damon_test_ops_registration(struct kunit *test)
{
struct damon_ctx *c = damon_new_ctx();
- struct damon_operations ops, bak;
+ struct damon_operations ops = {.id = DAMON_OPS_VADDR}, bak;
+ bool need_cleanup = false;
+
+ /* DAMON_OPS_VADDR is registered only if CONFIG_DAMON_VADDR is set */
+ if (!damon_is_registered_ops(DAMON_OPS_VADDR)) {
+ bak.id = DAMON_OPS_VADDR;
+ KUNIT_EXPECT_EQ(test, damon_register_ops(&bak), 0);
+ need_cleanup = true;
+ }
- /* DAMON_OPS_{V,P}ADDR are registered on subsys_initcall */
+ /* DAMON_OPS_VADDR is ensured to be registered */
KUNIT_EXPECT_EQ(test, damon_select_ops(c, DAMON_OPS_VADDR), 0);
- KUNIT_EXPECT_EQ(test, damon_select_ops(c, DAMON_OPS_PADDR), 0);
/* Double-registration is prohibited */
- ops.id = DAMON_OPS_VADDR;
- KUNIT_EXPECT_EQ(test, damon_register_ops(&ops), -EINVAL);
- ops.id = DAMON_OPS_PADDR;
KUNIT_EXPECT_EQ(test, damon_register_ops(&ops), -EINVAL);
/* Unknown ops id cannot be registered */
@@ -278,6 +282,13 @@ static void damon_test_ops_registration(struct kunit *test)
KUNIT_EXPECT_EQ(test, damon_register_ops(&ops), -EINVAL);
damon_destroy_ctx(c);
+
+ if (need_cleanup) {
+ mutex_lock(&damon_ops_lock);
+ damon_registered_ops[DAMON_OPS_VADDR] =
+ (struct damon_operations){};
+ mutex_unlock(&damon_ops_lock);
+ }
}
static void damon_test_set_regions(struct kunit *test)
@@ -309,6 +320,18 @@ static void damon_test_nr_accesses_to_accesses_bp(struct kunit *test)
.aggr_interval = ((unsigned long)UINT_MAX + 1) * 10
};
+ /*
+ * In some cases such as 32bit architectures where UINT_MAX is
+ * ULONG_MAX, attrs.aggr_interval becomes zero. Calling
+ * damon_nr_accesses_to_accesses_bp() in the case will cause
+ * divide-by-zero. Such case is prohibited in normal execution since
+ * the caution is documented on the comment for the function, and
+ * damon_update_monitoring_results() does the check. Skip the test in
+ * the case.
+ */
+ if (!attrs.aggr_interval)
+ kunit_skip(test, "aggr_interval is zero.");
+
KUNIT_EXPECT_EQ(test, damon_nr_accesses_to_accesses_bp(123, &attrs), 0);
}
diff --git a/mm/damon/dbgfs-test.h b/mm/damon/tests/dbgfs-kunit.h
index 2d85217f5ba4..d2ecfcc8db86 100644
--- a/mm/damon/dbgfs-test.h
+++ b/mm/damon/tests/dbgfs-kunit.h
@@ -73,6 +73,11 @@ static void damon_dbgfs_test_set_targets(struct kunit *test)
struct damon_ctx *ctx = dbgfs_new_ctx();
char buf[64];
+ if (!damon_is_registered_ops(DAMON_OPS_PADDR)) {
+ dbgfs_destroy_ctx(ctx);
+ kunit_skip(test, "PADDR not registered");
+ }
+
/* Make DAMON consider target has no pid */
damon_select_ops(ctx, DAMON_OPS_PADDR);
@@ -111,6 +116,11 @@ static void damon_dbgfs_test_set_init_regions(struct kunit *test)
int i, rc;
char buf[256];
+ if (!damon_is_registered_ops(DAMON_OPS_PADDR)) {
+ damon_destroy_ctx(ctx);
+ kunit_skip(test, "PADDR not registered");
+ }
+
damon_select_ops(ctx, DAMON_OPS_PADDR);
dbgfs_set_targets(ctx, 3, NULL);
diff --git a/mm/damon/sysfs-test.h b/mm/damon/tests/sysfs-kunit.h
index 1c9b596057a7..1c9b596057a7 100644
--- a/mm/damon/sysfs-test.h
+++ b/mm/damon/tests/sysfs-kunit.h
diff --git a/mm/damon/vaddr-test.h b/mm/damon/tests/vaddr-kunit.h
index 83626483f82b..a339d117150f 100644
--- a/mm/damon/vaddr-test.h
+++ b/mm/damon/tests/vaddr-kunit.h
@@ -77,7 +77,7 @@ static void damon_test_three_regions_in_vmas(struct kunit *test)
(struct vm_area_struct) {.vm_start = 307, .vm_end = 330},
};
- mt_init_flags(&mm.mm_mt, MM_MT_FLAGS);
+ mt_init_flags(&mm.mm_mt, MT_FLAGS_ALLOC_RANGE | MT_FLAGS_USE_RCU);
if (__link_vmas(&mm.mm_mt, vmas, ARRAY_SIZE(vmas)))
kunit_skip(test, "Failed to create VMA tree");
diff --git a/mm/damon/vaddr.c b/mm/damon/vaddr.c
index 58829baf8b5d..08cfd22b5249 100644
--- a/mm/damon/vaddr.c
+++ b/mm/damon/vaddr.c
@@ -126,6 +126,7 @@ static int __damon_va_three_regions(struct mm_struct *mm,
* If this is too slow, it can be optimised to examine the maple
* tree gaps.
*/
+ rcu_read_lock();
for_each_vma(vmi, vma) {
unsigned long gap;
@@ -146,6 +147,7 @@ static int __damon_va_three_regions(struct mm_struct *mm,
next:
prev = vma;
}
+ rcu_read_unlock();
if (!sz_range(&second_gap) || !sz_range(&first_gap))
return -EINVAL;
@@ -730,4 +732,4 @@ static int __init damon_va_initcall(void)
subsys_initcall(damon_va_initcall);
-#include "vaddr-test.h"
+#include "tests/vaddr-kunit.h"
diff --git a/mm/debug.c b/mm/debug.c
index 69e524c3e601..aa57d3ffd4ed 100644
--- a/mm/debug.c
+++ b/mm/debug.c
@@ -36,11 +36,6 @@ const struct trace_print_flags pageflag_names[] = {
{0, NULL}
};
-const struct trace_print_flags pagetype_names[] = {
- __def_pagetype_names,
- {0, NULL}
-};
-
const struct trace_print_flags gfpflag_names[] = {
__def_gfpflag_names,
{0, NULL}
@@ -51,6 +46,27 @@ const struct trace_print_flags vmaflag_names[] = {
{0, NULL}
};
+#define DEF_PAGETYPE_NAME(_name) [PGTY_##_name - 0xf0] = __stringify(_name)
+
+static const char *page_type_names[] = {
+ DEF_PAGETYPE_NAME(slab),
+ DEF_PAGETYPE_NAME(hugetlb),
+ DEF_PAGETYPE_NAME(offline),
+ DEF_PAGETYPE_NAME(guard),
+ DEF_PAGETYPE_NAME(table),
+ DEF_PAGETYPE_NAME(buddy),
+ DEF_PAGETYPE_NAME(unaccepted),
+};
+
+static const char *page_type_name(unsigned int page_type)
+{
+ unsigned i = (page_type >> 24) - 0xf0;
+
+ if (i >= ARRAY_SIZE(page_type_names))
+ return "unknown";
+ return page_type_names[i];
+}
+
static void __dump_folio(struct folio *folio, struct page *page,
unsigned long pfn, unsigned long idx)
{
@@ -58,7 +74,7 @@ static void __dump_folio(struct folio *folio, struct page *page,
int mapcount = atomic_read(&page->_mapcount);
char *type = "";
- mapcount = page_type_has_type(mapcount) ? 0 : mapcount + 1;
+ mapcount = page_mapcount_is_type(mapcount) ? 0 : mapcount + 1;
pr_warn("page: refcount:%d mapcount:%d mapping:%p index:%#lx pfn:%#lx\n",
folio_ref_count(folio), mapcount, mapping,
folio->index + idx, pfn);
@@ -92,7 +108,8 @@ static void __dump_folio(struct folio *folio, struct page *page,
pr_warn("%sflags: %pGp%s\n", type, &folio->flags,
is_migrate_cma_folio(folio, pfn) ? " CMA" : "");
if (page_has_type(&folio->page))
- pr_warn("page_type: %pGt\n", &folio->page.page_type);
+ pr_warn("page_type: %x(%s)\n", folio->page.page_type >> 24,
+ page_type_name(folio->page.page_type));
print_hex_dump(KERN_WARNING, "raw: ", DUMP_PREFIX_NONE, 32,
sizeof(unsigned long), page,
diff --git a/mm/debug_vm_pgtable.c b/mm/debug_vm_pgtable.c
index e4969fb54da3..bc748f700a9e 100644
--- a/mm/debug_vm_pgtable.c
+++ b/mm/debug_vm_pgtable.c
@@ -231,10 +231,10 @@ static void __init pmd_advanced_tests(struct pgtable_debug_args *args)
set_pmd_at(args->mm, vaddr, args->pmdp, pmd);
flush_dcache_page(page);
pmdp_set_wrprotect(args->mm, vaddr, args->pmdp);
- pmd = READ_ONCE(*args->pmdp);
+ pmd = pmdp_get(args->pmdp);
WARN_ON(pmd_write(pmd));
pmdp_huge_get_and_clear(args->mm, vaddr, args->pmdp);
- pmd = READ_ONCE(*args->pmdp);
+ pmd = pmdp_get(args->pmdp);
WARN_ON(!pmd_none(pmd));
pmd = pfn_pmd(args->pmd_pfn, args->page_prot);
@@ -245,10 +245,10 @@ static void __init pmd_advanced_tests(struct pgtable_debug_args *args)
pmd = pmd_mkwrite(pmd, args->vma);
pmd = pmd_mkdirty(pmd);
pmdp_set_access_flags(args->vma, vaddr, args->pmdp, pmd, 1);
- pmd = READ_ONCE(*args->pmdp);
+ pmd = pmdp_get(args->pmdp);
WARN_ON(!(pmd_write(pmd) && pmd_dirty(pmd)));
pmdp_huge_get_and_clear_full(args->vma, vaddr, args->pmdp, 1);
- pmd = READ_ONCE(*args->pmdp);
+ pmd = pmdp_get(args->pmdp);
WARN_ON(!pmd_none(pmd));
pmd = pmd_mkhuge(pfn_pmd(args->pmd_pfn, args->page_prot));
@@ -256,7 +256,7 @@ static void __init pmd_advanced_tests(struct pgtable_debug_args *args)
set_pmd_at(args->mm, vaddr, args->pmdp, pmd);
flush_dcache_page(page);
pmdp_test_and_clear_young(args->vma, vaddr, args->pmdp);
- pmd = READ_ONCE(*args->pmdp);
+ pmd = pmdp_get(args->pmdp);
WARN_ON(pmd_young(pmd));
/* Clear the pte entries */
@@ -357,12 +357,12 @@ static void __init pud_advanced_tests(struct pgtable_debug_args *args)
set_pud_at(args->mm, vaddr, args->pudp, pud);
flush_dcache_page(page);
pudp_set_wrprotect(args->mm, vaddr, args->pudp);
- pud = READ_ONCE(*args->pudp);
+ pud = pudp_get(args->pudp);
WARN_ON(pud_write(pud));
#ifndef __PAGETABLE_PMD_FOLDED
pudp_huge_get_and_clear(args->mm, vaddr, args->pudp);
- pud = READ_ONCE(*args->pudp);
+ pud = pudp_get(args->pudp);
WARN_ON(!pud_none(pud));
#endif /* __PAGETABLE_PMD_FOLDED */
pud = pfn_pud(args->pud_pfn, args->page_prot);
@@ -374,12 +374,12 @@ static void __init pud_advanced_tests(struct pgtable_debug_args *args)
pud = pud_mkwrite(pud);
pud = pud_mkdirty(pud);
pudp_set_access_flags(args->vma, vaddr, args->pudp, pud, 1);
- pud = READ_ONCE(*args->pudp);
+ pud = pudp_get(args->pudp);
WARN_ON(!(pud_write(pud) && pud_dirty(pud)));
#ifndef __PAGETABLE_PMD_FOLDED
pudp_huge_get_and_clear_full(args->vma, vaddr, args->pudp, 1);
- pud = READ_ONCE(*args->pudp);
+ pud = pudp_get(args->pudp);
WARN_ON(!pud_none(pud));
#endif /* __PAGETABLE_PMD_FOLDED */
@@ -389,7 +389,7 @@ static void __init pud_advanced_tests(struct pgtable_debug_args *args)
set_pud_at(args->mm, vaddr, args->pudp, pud);
flush_dcache_page(page);
pudp_test_and_clear_young(args->vma, vaddr, args->pudp);
- pud = READ_ONCE(*args->pudp);
+ pud = pudp_get(args->pudp);
WARN_ON(pud_young(pud));
pudp_huge_get_and_clear(args->mm, vaddr, args->pudp);
@@ -441,7 +441,7 @@ static void __init pmd_huge_tests(struct pgtable_debug_args *args)
WRITE_ONCE(*args->pmdp, __pmd(0));
WARN_ON(!pmd_set_huge(args->pmdp, __pfn_to_phys(args->fixed_pmd_pfn), args->page_prot));
WARN_ON(!pmd_clear_huge(args->pmdp));
- pmd = READ_ONCE(*args->pmdp);
+ pmd = pmdp_get(args->pmdp);
WARN_ON(!pmd_none(pmd));
}
@@ -461,7 +461,7 @@ static void __init pud_huge_tests(struct pgtable_debug_args *args)
WRITE_ONCE(*args->pudp, __pud(0));
WARN_ON(!pud_set_huge(args->pudp, __pfn_to_phys(args->fixed_pud_pfn), args->page_prot));
WARN_ON(!pud_clear_huge(args->pudp));
- pud = READ_ONCE(*args->pudp);
+ pud = pudp_get(args->pudp);
WARN_ON(!pud_none(pud));
}
#else /* !CONFIG_HAVE_ARCH_HUGE_VMAP */
@@ -490,7 +490,7 @@ static void __init pgd_basic_tests(struct pgtable_debug_args *args)
#ifndef __PAGETABLE_PUD_FOLDED
static void __init pud_clear_tests(struct pgtable_debug_args *args)
{
- pud_t pud = READ_ONCE(*args->pudp);
+ pud_t pud = pudp_get(args->pudp);
if (mm_pmd_folded(args->mm))
return;
@@ -498,7 +498,7 @@ static void __init pud_clear_tests(struct pgtable_debug_args *args)
pr_debug("Validating PUD clear\n");
WARN_ON(pud_none(pud));
pud_clear(args->pudp);
- pud = READ_ONCE(*args->pudp);
+ pud = pudp_get(args->pudp);
WARN_ON(!pud_none(pud));
}
@@ -515,7 +515,7 @@ static void __init pud_populate_tests(struct pgtable_debug_args *args)
* Hence this must not qualify as pud_bad().
*/
pud_populate(args->mm, args->pudp, args->start_pmdp);
- pud = READ_ONCE(*args->pudp);
+ pud = pudp_get(args->pudp);
WARN_ON(pud_bad(pud));
}
#else /* !__PAGETABLE_PUD_FOLDED */
@@ -526,7 +526,7 @@ static void __init pud_populate_tests(struct pgtable_debug_args *args) { }
#ifndef __PAGETABLE_P4D_FOLDED
static void __init p4d_clear_tests(struct pgtable_debug_args *args)
{
- p4d_t p4d = READ_ONCE(*args->p4dp);
+ p4d_t p4d = p4dp_get(args->p4dp);
if (mm_pud_folded(args->mm))
return;
@@ -534,7 +534,7 @@ static void __init p4d_clear_tests(struct pgtable_debug_args *args)
pr_debug("Validating P4D clear\n");
WARN_ON(p4d_none(p4d));
p4d_clear(args->p4dp);
- p4d = READ_ONCE(*args->p4dp);
+ p4d = p4dp_get(args->p4dp);
WARN_ON(!p4d_none(p4d));
}
@@ -553,13 +553,13 @@ static void __init p4d_populate_tests(struct pgtable_debug_args *args)
pud_clear(args->pudp);
p4d_clear(args->p4dp);
p4d_populate(args->mm, args->p4dp, args->start_pudp);
- p4d = READ_ONCE(*args->p4dp);
+ p4d = p4dp_get(args->p4dp);
WARN_ON(p4d_bad(p4d));
}
static void __init pgd_clear_tests(struct pgtable_debug_args *args)
{
- pgd_t pgd = READ_ONCE(*(args->pgdp));
+ pgd_t pgd = pgdp_get(args->pgdp);
if (mm_p4d_folded(args->mm))
return;
@@ -567,7 +567,7 @@ static void __init pgd_clear_tests(struct pgtable_debug_args *args)
pr_debug("Validating PGD clear\n");
WARN_ON(pgd_none(pgd));
pgd_clear(args->pgdp);
- pgd = READ_ONCE(*args->pgdp);
+ pgd = pgdp_get(args->pgdp);
WARN_ON(!pgd_none(pgd));
}
@@ -586,7 +586,7 @@ static void __init pgd_populate_tests(struct pgtable_debug_args *args)
p4d_clear(args->p4dp);
pgd_clear(args->pgdp);
pgd_populate(args->mm, args->pgdp, args->start_p4dp);
- pgd = READ_ONCE(*args->pgdp);
+ pgd = pgdp_get(args->pgdp);
WARN_ON(pgd_bad(pgd));
}
#else /* !__PAGETABLE_P4D_FOLDED */
@@ -627,12 +627,12 @@ static void __init pte_clear_tests(struct pgtable_debug_args *args)
static void __init pmd_clear_tests(struct pgtable_debug_args *args)
{
- pmd_t pmd = READ_ONCE(*args->pmdp);
+ pmd_t pmd = pmdp_get(args->pmdp);
pr_debug("Validating PMD clear\n");
WARN_ON(pmd_none(pmd));
pmd_clear(args->pmdp);
- pmd = READ_ONCE(*args->pmdp);
+ pmd = pmdp_get(args->pmdp);
WARN_ON(!pmd_none(pmd));
}
@@ -646,7 +646,7 @@ static void __init pmd_populate_tests(struct pgtable_debug_args *args)
* Hence this must not qualify as pmd_bad().
*/
pmd_populate(args->mm, args->pmdp, args->start_ptep);
- pmd = READ_ONCE(*args->pmdp);
+ pmd = pmdp_get(args->pmdp);
WARN_ON(pmd_bad(pmd));
}
@@ -1251,7 +1251,7 @@ static int __init init_args(struct pgtable_debug_args *args)
ret = -ENOMEM;
goto error;
}
- args->start_ptep = pmd_pgtable(READ_ONCE(*args->pmdp));
+ args->start_ptep = pmd_pgtable(pmdp_get(args->pmdp));
WARN_ON(!args->start_ptep);
init_fixed_pfns(args);
diff --git a/mm/fadvise.c b/mm/fadvise.c
index 6c39d42f16dc..532dee205c6e 100644
--- a/mm/fadvise.c
+++ b/mm/fadvise.c
@@ -193,10 +193,10 @@ int ksys_fadvise64_64(int fd, loff_t offset, loff_t len, int advice)
struct fd f = fdget(fd);
int ret;
- if (!f.file)
+ if (!fd_file(f))
return -EBADF;
- ret = vfs_fadvise(f.file, offset, len, advice);
+ ret = vfs_fadvise(fd_file(f), offset, len, advice);
fdput(f);
return ret;
diff --git a/mm/fail_page_alloc.c b/mm/fail_page_alloc.c
index 532851ce5132..7647096170e9 100644
--- a/mm/fail_page_alloc.c
+++ b/mm/fail_page_alloc.c
@@ -1,5 +1,6 @@
// SPDX-License-Identifier: GPL-2.0
#include <linux/fault-inject.h>
+#include <linux/debugfs.h>
#include <linux/error-injection.h>
#include <linux/mm.h>
diff --git a/mm/failslab.c b/mm/failslab.c
index af16c2ed578f..c3901b136498 100644
--- a/mm/failslab.c
+++ b/mm/failslab.c
@@ -1,6 +1,7 @@
// SPDX-License-Identifier: GPL-2.0
#include <linux/fault-inject.h>
#include <linux/error-injection.h>
+#include <linux/debugfs.h>
#include <linux/slab.h>
#include <linux/mm.h>
#include "slab.h"
diff --git a/mm/filemap.c b/mm/filemap.c
index 60a9cc593e9b..36d22968be9a 100644
--- a/mm/filemap.c
+++ b/mm/filemap.c
@@ -46,6 +46,7 @@
#include <linux/pipe_fs_i.h>
#include <linux/splice.h>
#include <linux/rcupdate_wait.h>
+#include <linux/sched/mm.h>
#include <asm/pgalloc.h>
#include <asm/tlbflush.h>
#include "internal.h"
@@ -112,8 +113,8 @@
* ->swap_lock (try_to_unmap_one)
* ->private_lock (try_to_unmap_one)
* ->i_pages lock (try_to_unmap_one)
- * ->lruvec->lru_lock (follow_page->mark_page_accessed)
- * ->lruvec->lru_lock (check_pte_range->isolate_lru_page)
+ * ->lruvec->lru_lock (follow_page_mask->mark_page_accessed)
+ * ->lruvec->lru_lock (check_pte_range->folio_isolate_lru)
* ->private_lock (folio_remove_rmap_pte->set_page_dirty)
* ->i_pages lock (folio_remove_rmap_pte->set_page_dirty)
* bdi.wb->list_lock (folio_remove_rmap_pte->set_page_dirty)
@@ -530,7 +531,6 @@ static void __filemap_fdatawait_range(struct address_space *mapping,
struct folio *folio = fbatch.folios[i];
folio_wait_writeback(folio);
- folio_clear_error(folio);
}
folio_batch_release(&fbatch);
cond_resched();
@@ -859,6 +859,8 @@ noinline int __filemap_add_folio(struct address_space *mapping,
VM_BUG_ON_FOLIO(!folio_test_locked(folio), folio);
VM_BUG_ON_FOLIO(folio_test_swapbacked(folio), folio);
+ VM_BUG_ON_FOLIO(folio_order(folio) < mapping_min_folio_order(mapping),
+ folio);
mapping_set_update(&xas, mapping);
VM_BUG_ON_FOLIO(index & (folio_nr_pages(folio) - 1), folio);
@@ -1919,8 +1921,10 @@ repeat:
folio_wait_stable(folio);
no_page:
if (!folio && (fgp_flags & FGP_CREAT)) {
- unsigned order = FGF_GET_ORDER(fgp_flags);
+ unsigned int min_order = mapping_min_folio_order(mapping);
+ unsigned int order = max(min_order, FGF_GET_ORDER(fgp_flags));
int err;
+ index = mapping_align_index(mapping, index);
if ((fgp_flags & FGP_WRITE) && mapping_can_writeback(mapping))
gfp |= __GFP_WRITE;
@@ -1933,10 +1937,8 @@ no_page:
if (WARN_ON_ONCE(!(fgp_flags & (FGP_LOCK | FGP_FOR_MMAP))))
fgp_flags |= FGP_LOCK;
- if (!mapping_large_folio_support(mapping))
- order = 0;
- if (order > MAX_PAGECACHE_ORDER)
- order = MAX_PAGECACHE_ORDER;
+ if (order > mapping_max_folio_order(mapping))
+ order = mapping_max_folio_order(mapping);
/* If we're not aligned, allocate a smaller folio */
if (index & ((1UL << order) - 1))
order = __ffs(index);
@@ -1945,7 +1947,7 @@ no_page:
gfp_t alloc_gfp = gfp;
err = -ENOMEM;
- if (order > 0)
+ if (order > min_order)
alloc_gfp |= __GFP_NORETRY | __GFP_NOWARN;
folio = filemap_alloc_folio(alloc_gfp, order);
if (!folio)
@@ -1960,7 +1962,7 @@ no_page:
break;
folio_put(folio);
folio = NULL;
- } while (order-- > 0);
+ } while (order-- > min_order);
if (err == -EEXIST)
goto repeat;
@@ -2047,17 +2049,20 @@ unsigned find_get_entries(struct address_space *mapping, pgoff_t *start,
if (!folio_batch_add(fbatch, folio))
break;
}
- rcu_read_unlock();
if (folio_batch_count(fbatch)) {
- unsigned long nr = 1;
+ unsigned long nr;
int idx = folio_batch_count(fbatch) - 1;
folio = fbatch->folios[idx];
if (!xa_is_value(folio))
nr = folio_nr_pages(folio);
- *start = indices[idx] + nr;
+ else
+ nr = 1 << xa_get_order(&mapping->i_pages, indices[idx]);
+ *start = round_down(indices[idx] + nr, nr);
}
+ rcu_read_unlock();
+
return folio_batch_count(fbatch);
}
@@ -2089,10 +2094,17 @@ unsigned find_lock_entries(struct address_space *mapping, pgoff_t *start,
rcu_read_lock();
while ((folio = find_get_entry(&xas, end, XA_PRESENT))) {
+ unsigned long base;
+ unsigned long nr;
+
if (!xa_is_value(folio)) {
- if (folio->index < *start)
+ nr = folio_nr_pages(folio);
+ base = folio->index;
+ /* Omit large folio which begins before the start */
+ if (base < *start)
goto put;
- if (folio_next_index(folio) - 1 > end)
+ /* Omit large folio which extends beyond the end */
+ if (base + nr - 1 > end)
goto put;
if (!folio_trylock(folio))
goto put;
@@ -2101,7 +2113,19 @@ unsigned find_lock_entries(struct address_space *mapping, pgoff_t *start,
goto unlock;
VM_BUG_ON_FOLIO(!folio_contains(folio, xas.xa_index),
folio);
+ } else {
+ nr = 1 << xas_get_order(&xas);
+ base = xas.xa_index & ~(nr - 1);
+ /* Omit order>0 value which begins before the start */
+ if (base < *start)
+ continue;
+ /* Omit order>0 value which extends beyond the end */
+ if (base + nr - 1 > end)
+ break;
}
+
+ /* Update start now so that last update is correct on return */
+ *start = base + nr;
indices[fbatch->nr] = xas.xa_index;
if (!folio_batch_add(fbatch, folio))
break;
@@ -2113,15 +2137,6 @@ put:
}
rcu_read_unlock();
- if (folio_batch_count(fbatch)) {
- unsigned long nr = 1;
- int idx = folio_batch_count(fbatch) - 1;
-
- folio = fbatch->folios[idx];
- if (!xa_is_value(folio))
- nr = folio_nr_pages(folio);
- *start = indices[idx] + nr;
- }
return folio_batch_count(fbatch);
}
@@ -2181,6 +2196,10 @@ unsigned filemap_get_folios_contig(struct address_space *mapping,
if (xa_is_value(folio))
goto update_start;
+ /* If we landed in the middle of a THP, continue at its end. */
+ if (xa_is_sibling(folio))
+ goto update_start;
+
if (!folio_try_get(folio))
goto retry;
@@ -2342,13 +2361,6 @@ static int filemap_read_folio(struct file *file, filler_t filler,
unsigned long pflags;
int error;
- /*
- * A previous I/O error may have been due to temporary failures,
- * eg. multipath errors. PG_error will be set again if read_folio
- * fails.
- */
- folio_clear_error(folio);
-
/* Start the actual read. The read will unlock the page. */
if (unlikely(workingset))
psi_memstall_enter(&pflags);
@@ -2449,13 +2461,15 @@ unlock_mapping:
}
static int filemap_create_folio(struct file *file,
- struct address_space *mapping, pgoff_t index,
+ struct address_space *mapping, loff_t pos,
struct folio_batch *fbatch)
{
struct folio *folio;
int error;
+ unsigned int min_order = mapping_min_folio_order(mapping);
+ pgoff_t index;
- folio = filemap_alloc_folio(mapping_gfp_mask(mapping), 0);
+ folio = filemap_alloc_folio(mapping_gfp_mask(mapping), min_order);
if (!folio)
return -ENOMEM;
@@ -2473,6 +2487,7 @@ static int filemap_create_folio(struct file *file,
* well to keep locking rules simple.
*/
filemap_invalidate_lock_shared(mapping);
+ index = (pos >> (PAGE_SHIFT + min_order)) << min_order;
error = filemap_add_folio(mapping, folio, index,
mapping_gfp_constraint(mapping, GFP_KERNEL));
if (error == -EEXIST)
@@ -2514,6 +2529,7 @@ static int filemap_get_pages(struct kiocb *iocb, size_t count,
pgoff_t index = iocb->ki_pos >> PAGE_SHIFT;
pgoff_t last_index;
struct folio *folio;
+ unsigned int flags;
int err = 0;
/* "last_index" is the index of the page beyond the end of the read */
@@ -2526,15 +2542,18 @@ retry:
if (!folio_batch_count(fbatch)) {
if (iocb->ki_flags & IOCB_NOIO)
return -EAGAIN;
+ if (iocb->ki_flags & IOCB_NOWAIT)
+ flags = memalloc_noio_save();
page_cache_sync_readahead(mapping, ra, filp, index,
last_index - index);
+ if (iocb->ki_flags & IOCB_NOWAIT)
+ memalloc_noio_restore(flags);
filemap_get_read_batch(mapping, index, last_index - 1, fbatch);
}
if (!folio_batch_count(fbatch)) {
if (iocb->ki_flags & (IOCB_NOWAIT | IOCB_WAITQ))
return -EAGAIN;
- err = filemap_create_folio(filp, mapping,
- iocb->ki_pos >> PAGE_SHIFT, fbatch);
+ err = filemap_create_folio(filp, mapping, iocb->ki_pos, fbatch);
if (err == AOP_TRUNCATED_PAGE)
goto retry;
return err;
@@ -2556,6 +2575,7 @@ retry:
goto err;
}
+ trace_mm_filemap_get_pages(mapping, index, last_index - 1);
return 0;
err:
if (err < 0)
@@ -2996,7 +3016,7 @@ unlock:
static inline size_t seek_folio_size(struct xa_state *xas, struct folio *folio)
{
if (xa_is_value(folio))
- return PAGE_SIZE << xa_get_order(xas->xa, xas->xa_index);
+ return PAGE_SIZE << xas_get_order(xas);
return folio_size(folio);
}
@@ -3294,6 +3314,8 @@ vm_fault_t filemap_fault(struct vm_fault *vmf)
if (unlikely(index >= max_idx))
return VM_FAULT_SIGBUS;
+ trace_mm_filemap_fault(mapping, index);
+
/*
* Do we have something in the page cache already?
*/
@@ -3611,7 +3633,7 @@ vm_fault_t filemap_map_pages(struct vm_fault *vmf,
struct vm_area_struct *vma = vmf->vma;
struct file *file = vma->vm_file;
struct address_space *mapping = file->f_mapping;
- pgoff_t last_pgoff = start_pgoff;
+ pgoff_t file_end, last_pgoff = start_pgoff;
unsigned long addr;
XA_STATE(xas, &mapping->i_pages, start_pgoff);
struct folio *folio;
@@ -3637,6 +3659,10 @@ vm_fault_t filemap_map_pages(struct vm_fault *vmf,
goto out;
}
+ file_end = DIV_ROUND_UP(i_size_read(mapping->host), PAGE_SIZE) - 1;
+ if (end_pgoff > file_end)
+ end_pgoff = file_end;
+
folio_type = mm_counter_file(folio);
do {
unsigned long end;
@@ -3660,6 +3686,7 @@ vm_fault_t filemap_map_pages(struct vm_fault *vmf,
} while ((folio = next_uptodate_folio(&xas, mapping, end_pgoff)) != NULL);
add_mm_counter(vma->vm_mm, folio_type, rss);
pte_unmap_unlock(vmf->pte, vmf->ptl);
+ trace_mm_filemap_map_pages(mapping, start_pgoff, end_pgoff);
out:
rcu_read_unlock();
@@ -3757,9 +3784,11 @@ static struct folio *do_read_cache_folio(struct address_space *mapping,
repeat:
folio = filemap_get_folio(mapping, index);
if (IS_ERR(folio)) {
- folio = filemap_alloc_folio(gfp, 0);
+ folio = filemap_alloc_folio(gfp,
+ mapping_min_folio_order(mapping));
if (!folio)
return ERR_PTR(-ENOMEM);
+ index = mapping_align_index(mapping, index);
err = filemap_add_folio(mapping, folio, index, gfp);
if (unlikely(err)) {
folio_put(folio);
@@ -4287,7 +4316,7 @@ static void filemap_cachestat(struct address_space *mapping,
if (xas_retry(&xas, folio))
continue;
- order = xa_get_order(xas.xa, xas.xa_index);
+ order = xas_get_order(&xas);
nr_pages = 1 << order;
folio_first_index = round_down(xas.xa_index, 1 << order);
folio_last_index = folio_first_index + nr_pages - 1;
@@ -4398,7 +4427,7 @@ SYSCALL_DEFINE4(cachestat, unsigned int, fd,
struct cachestat cs;
pgoff_t first_index, last_index;
- if (!f.file)
+ if (!fd_file(f))
return -EBADF;
if (copy_from_user(&csr, cstat_range,
@@ -4408,7 +4437,7 @@ SYSCALL_DEFINE4(cachestat, unsigned int, fd,
}
/* hugetlbfs is not supported */
- if (is_file_hugepages(f.file)) {
+ if (is_file_hugepages(fd_file(f))) {
fdput(f);
return -EOPNOTSUPP;
}
@@ -4422,7 +4451,7 @@ SYSCALL_DEFINE4(cachestat, unsigned int, fd,
last_index =
csr.len == 0 ? ULONG_MAX : (csr.off + csr.len - 1) >> PAGE_SHIFT;
memset(&cs, 0, sizeof(struct cachestat));
- mapping = f.file->f_mapping;
+ mapping = fd_file(f)->f_mapping;
filemap_cachestat(mapping, first_index, last_index, &cs);
fdput(f);
diff --git a/mm/folio-compat.c b/mm/folio-compat.c
index f05906006b3c..80746182e9e8 100644
--- a/mm/folio-compat.c
+++ b/mm/folio-compat.c
@@ -92,15 +92,3 @@ struct page *grab_cache_page_write_begin(struct address_space *mapping,
mapping_gfp_mask(mapping));
}
EXPORT_SYMBOL(grab_cache_page_write_begin);
-
-bool isolate_lru_page(struct page *page)
-{
- if (WARN_RATELIMIT(PageTail(page), "trying to isolate tail page"))
- return false;
- return folio_isolate_lru((struct folio *)page);
-}
-
-void putback_lru_page(struct page *page)
-{
- folio_putback_lru(page_folio(page));
-}
diff --git a/mm/gup.c b/mm/gup.c
index 02c46ae33028..a82890b46a36 100644
--- a/mm/gup.c
+++ b/mm/gup.c
@@ -832,6 +832,7 @@ static struct page *follow_page_pte(struct vm_area_struct *vma,
struct dev_pagemap **pgmap)
{
struct mm_struct *mm = vma->vm_mm;
+ struct folio *folio;
struct page *page;
spinlock_t *ptl;
pte_t *ptep, pte;
@@ -889,6 +890,7 @@ static struct page *follow_page_pte(struct vm_area_struct *vma,
goto out;
}
}
+ folio = page_folio(page);
if (!pte_write(pte) && gup_must_unshare(vma, flags, page)) {
page = ERR_PTR(-EMLINK);
@@ -899,7 +901,7 @@ static struct page *follow_page_pte(struct vm_area_struct *vma,
!PageAnonExclusive(page), page);
/* try_grab_folio() does nothing unless FOLL_GET or FOLL_PIN is set. */
- ret = try_grab_folio(page_folio(page), 1, flags);
+ ret = try_grab_folio(folio, 1, flags);
if (unlikely(ret)) {
page = ERR_PTR(ret);
goto out;
@@ -911,7 +913,7 @@ static struct page *follow_page_pte(struct vm_area_struct *vma,
* Documentation/core-api/pin_user_pages.rst for details.
*/
if (flags & FOLL_PIN) {
- ret = arch_make_page_accessible(page);
+ ret = arch_make_folio_accessible(folio);
if (ret) {
unpin_user_page(page);
page = ERR_PTR(ret);
@@ -1083,28 +1085,6 @@ static struct page *follow_page_mask(struct vm_area_struct *vma,
return page;
}
-struct page *follow_page(struct vm_area_struct *vma, unsigned long address,
- unsigned int foll_flags)
-{
- struct follow_page_context ctx = { NULL };
- struct page *page;
-
- if (vma_is_secretmem(vma))
- return NULL;
-
- if (WARN_ON_ONCE(foll_flags & FOLL_PIN))
- return NULL;
-
- /*
- * We never set FOLL_HONOR_NUMA_FAULT because callers don't expect
- * to fail on PROT_NONE-mapped pages.
- */
- page = follow_page_mask(vma, address, foll_flags, &ctx);
- if (ctx.pgmap)
- put_dev_pagemap(ctx.pgmap);
- return page;
-}
-
static int get_gate_page(struct mm_struct *mm, unsigned long address,
unsigned int gup_flags, struct vm_area_struct **vma,
struct page **page)
@@ -1166,19 +1146,19 @@ unmap:
* to 0 and -EBUSY returned.
*/
static int faultin_page(struct vm_area_struct *vma,
- unsigned long address, unsigned int *flags, bool unshare,
+ unsigned long address, unsigned int flags, bool unshare,
int *locked)
{
unsigned int fault_flags = 0;
vm_fault_t ret;
- if (*flags & FOLL_NOFAULT)
+ if (flags & FOLL_NOFAULT)
return -EFAULT;
- if (*flags & FOLL_WRITE)
+ if (flags & FOLL_WRITE)
fault_flags |= FAULT_FLAG_WRITE;
- if (*flags & FOLL_REMOTE)
+ if (flags & FOLL_REMOTE)
fault_flags |= FAULT_FLAG_REMOTE;
- if (*flags & FOLL_UNLOCKABLE) {
+ if (flags & FOLL_UNLOCKABLE) {
fault_flags |= FAULT_FLAG_ALLOW_RETRY | FAULT_FLAG_KILLABLE;
/*
* FAULT_FLAG_INTERRUPTIBLE is opt-in. GUP callers must set
@@ -1186,12 +1166,12 @@ static int faultin_page(struct vm_area_struct *vma,
* That's because some callers may not be prepared to
* handle early exits caused by non-fatal signals.
*/
- if (*flags & FOLL_INTERRUPTIBLE)
+ if (flags & FOLL_INTERRUPTIBLE)
fault_flags |= FAULT_FLAG_INTERRUPTIBLE;
}
- if (*flags & FOLL_NOWAIT)
+ if (flags & FOLL_NOWAIT)
fault_flags |= FAULT_FLAG_ALLOW_RETRY | FAULT_FLAG_RETRY_NOWAIT;
- if (*flags & FOLL_TRIED) {
+ if (flags & FOLL_TRIED) {
/*
* Note: FAULT_FLAG_ALLOW_RETRY and FAULT_FLAG_TRIED
* can co-exist
@@ -1225,7 +1205,7 @@ static int faultin_page(struct vm_area_struct *vma,
}
if (ret & VM_FAULT_ERROR) {
- int err = vm_fault_to_errno(ret, *flags);
+ int err = vm_fault_to_errno(ret, flags);
if (err)
return err;
@@ -1450,7 +1430,6 @@ static long __get_user_pages(struct mm_struct *mm,
do {
struct page *page;
- unsigned int foll_flags = gup_flags;
unsigned int page_increm;
/* first iteration or cross vma bound */
@@ -1501,9 +1480,9 @@ retry:
}
cond_resched();
- page = follow_page_mask(vma, start, foll_flags, &ctx);
+ page = follow_page_mask(vma, start, gup_flags, &ctx);
if (!page || PTR_ERR(page) == -EMLINK) {
- ret = faultin_page(vma, start, &foll_flags,
+ ret = faultin_page(vma, start, gup_flags,
PTR_ERR(page) == -EMLINK, locked);
switch (ret) {
case 0:
@@ -1560,13 +1539,12 @@ next_page:
* large folio, this should never fail.
*/
if (try_grab_folio(folio, page_increm - 1,
- foll_flags)) {
+ gup_flags)) {
/*
* Release the 1st page ref if the
* folio is problematic, fail hard.
*/
- gup_put_folio(folio, 1,
- foll_flags);
+ gup_put_folio(folio, 1, gup_flags);
ret = -EFAULT;
goto out;
}
@@ -2370,7 +2348,7 @@ static int migrate_longterm_unpinnable_folios(
folio_get(folio);
gup_put_folio(folio, 1, FOLL_PIN);
- if (migrate_device_coherent_page(&folio->page)) {
+ if (migrate_device_coherent_folio(folio)) {
ret = -EBUSY;
goto err;
}
@@ -2532,7 +2510,7 @@ static bool is_valid_gup_args(struct page **pages, int *locked,
* These flags not allowed to be specified externally to the gup
* interfaces:
* - FOLL_TOUCH/FOLL_PIN/FOLL_TRIED/FOLL_FAST_ONLY are internal only
- * - FOLL_REMOTE is internal only and used on follow_page()
+ * - FOLL_REMOTE is internal only, set in (get|pin)_user_pages_remote()
* - FOLL_UNLOCKABLE is internal only and used if locked is !NULL
*/
if (WARN_ON_ONCE(gup_flags & INTERNAL_GUP_FLAGS))
@@ -2934,7 +2912,7 @@ static int gup_fast_pte_range(pmd_t pmd, pmd_t *pmdp, unsigned long addr,
* details.
*/
if (flags & FOLL_PIN) {
- ret = arch_make_page_accessible(page);
+ ret = arch_make_folio_accessible(folio);
if (ret) {
gup_put_folio(folio, 1, flags);
goto pte_unmap;
@@ -3073,6 +3051,9 @@ static int gup_fast_pmd_leaf(pmd_t orig, pmd_t *pmdp, unsigned long addr,
if (!pmd_access_permitted(orig, flags & FOLL_WRITE))
return 0;
+ if (pmd_special(orig))
+ return 0;
+
if (pmd_devmap(orig)) {
if (unlikely(flags & FOLL_LONGTERM))
return 0;
@@ -3117,6 +3098,9 @@ static int gup_fast_pud_leaf(pud_t orig, pud_t *pudp, unsigned long addr,
if (!pud_access_permitted(orig, flags & FOLL_WRITE))
return 0;
+ if (pud_special(orig))
+ return 0;
+
if (pud_devmap(orig)) {
if (unlikely(flags & FOLL_LONGTERM))
return 0;
@@ -3716,6 +3700,7 @@ long memfd_pin_folios(struct file *memfd, loff_t start, loff_t end,
ret = PTR_ERR(folio);
if (ret != -EEXIST)
goto err;
+ folio = NULL;
}
}
}
diff --git a/mm/huge_memory.c b/mm/huge_memory.c
index 67c86a5d64a6..3ca89e0279a7 100644
--- a/mm/huge_memory.c
+++ b/mm/huge_memory.c
@@ -40,6 +40,7 @@
#include <linux/memory-tiers.h>
#include <linux/compat.h>
#include <linux/pgalloc_tag.h>
+#include <linux/pagewalk.h>
#include <asm/tlb.h>
#include <asm/pgalloc.h>
@@ -73,6 +74,7 @@ static unsigned long deferred_split_count(struct shrinker *shrink,
struct shrink_control *sc);
static unsigned long deferred_split_scan(struct shrinker *shrink,
struct shrink_control *sc);
+static bool split_underused_thp = true;
static atomic_t huge_zero_refcount;
struct folio *huge_zero_folio __read_mostly;
@@ -80,6 +82,7 @@ unsigned long huge_zero_pfn __read_mostly = ~0UL;
unsigned long huge_anon_orders_always __read_mostly;
unsigned long huge_anon_orders_madvise __read_mostly;
unsigned long huge_anon_orders_inherit __read_mostly;
+static bool anon_orders_configured __initdata;
unsigned long __thp_vma_allowable_orders(struct vm_area_struct *vma,
unsigned long vm_flags,
@@ -94,8 +97,8 @@ unsigned long __thp_vma_allowable_orders(struct vm_area_struct *vma,
/* Check the intersection of requested and supported orders. */
if (vma_is_anonymous(vma))
supported_orders = THP_ORDERS_ALL_ANON;
- else if (vma_is_dax(vma))
- supported_orders = THP_ORDERS_ALL_FILE_DAX;
+ else if (vma_is_special_huge(vma))
+ supported_orders = THP_ORDERS_ALL_SPECIAL;
else
supported_orders = THP_ORDERS_ALL_FILE_DEFAULT;
@@ -159,15 +162,10 @@ unsigned long __thp_vma_allowable_orders(struct vm_area_struct *vma,
* Must be done before hugepage flags check since shmem has its
* own flags.
*/
- if (!in_pf && shmem_file(vma->vm_file)) {
- bool global_huge = shmem_is_huge(file_inode(vma->vm_file), vma->vm_pgoff,
- !enforce_sysfs, vma->vm_mm, vm_flags);
-
- if (!vma_is_anon_shmem(vma))
- return global_huge ? orders : 0;
+ if (!in_pf && shmem_file(vma->vm_file))
return shmem_allowable_huge_orders(file_inode(vma->vm_file),
- vma, vma->vm_pgoff, global_huge);
- }
+ vma, vma->vm_pgoff, 0,
+ !enforce_sysfs);
if (!vma_is_anonymous(vma)) {
/*
@@ -220,6 +218,8 @@ retry:
count_vm_event(THP_ZERO_PAGE_ALLOC_FAILED);
return false;
}
+ /* Ensure zero folio won't have large_rmappable flag set. */
+ folio_clear_large_rmappable(zero_folio);
preempt_disable();
if (cmpxchg(&huge_zero_folio, NULL, zero_folio)) {
preempt_enable();
@@ -443,6 +443,27 @@ static ssize_t hpage_pmd_size_show(struct kobject *kobj,
static struct kobj_attribute hpage_pmd_size_attr =
__ATTR_RO(hpage_pmd_size);
+static ssize_t split_underused_thp_show(struct kobject *kobj,
+ struct kobj_attribute *attr, char *buf)
+{
+ return sysfs_emit(buf, "%d\n", split_underused_thp);
+}
+
+static ssize_t split_underused_thp_store(struct kobject *kobj,
+ struct kobj_attribute *attr,
+ const char *buf, size_t count)
+{
+ int err = kstrtobool(buf, &split_underused_thp);
+
+ if (err < 0)
+ return err;
+
+ return count;
+}
+
+static struct kobj_attribute split_underused_thp_attr = __ATTR(
+ shrink_underused, 0644, split_underused_thp_show, split_underused_thp_store);
+
static struct attribute *hugepage_attr[] = {
&enabled_attr.attr,
&defrag_attr.attr,
@@ -451,6 +472,7 @@ static struct attribute *hugepage_attr[] = {
#ifdef CONFIG_SHMEM
&shmem_enabled_attr.attr,
#endif
+ &split_underused_thp_attr.attr,
NULL,
};
@@ -463,8 +485,8 @@ static void thpsize_release(struct kobject *kobj);
static DEFINE_SPINLOCK(huge_anon_orders_lock);
static LIST_HEAD(thpsize_list);
-static ssize_t thpsize_enabled_show(struct kobject *kobj,
- struct kobj_attribute *attr, char *buf)
+static ssize_t anon_enabled_show(struct kobject *kobj,
+ struct kobj_attribute *attr, char *buf)
{
int order = to_thpsize(kobj)->order;
const char *output;
@@ -481,9 +503,9 @@ static ssize_t thpsize_enabled_show(struct kobject *kobj,
return sysfs_emit(buf, "%s\n", output);
}
-static ssize_t thpsize_enabled_store(struct kobject *kobj,
- struct kobj_attribute *attr,
- const char *buf, size_t count)
+static ssize_t anon_enabled_store(struct kobject *kobj,
+ struct kobj_attribute *attr,
+ const char *buf, size_t count)
{
int order = to_thpsize(kobj)->order;
ssize_t ret = count;
@@ -525,19 +547,35 @@ static ssize_t thpsize_enabled_store(struct kobject *kobj,
return ret;
}
-static struct kobj_attribute thpsize_enabled_attr =
- __ATTR(enabled, 0644, thpsize_enabled_show, thpsize_enabled_store);
+static struct kobj_attribute anon_enabled_attr =
+ __ATTR(enabled, 0644, anon_enabled_show, anon_enabled_store);
-static struct attribute *thpsize_attrs[] = {
- &thpsize_enabled_attr.attr,
+static struct attribute *anon_ctrl_attrs[] = {
+ &anon_enabled_attr.attr,
+ NULL,
+};
+
+static const struct attribute_group anon_ctrl_attr_grp = {
+ .attrs = anon_ctrl_attrs,
+};
+
+static struct attribute *file_ctrl_attrs[] = {
#ifdef CONFIG_SHMEM
&thpsize_shmem_enabled_attr.attr,
#endif
NULL,
};
-static const struct attribute_group thpsize_attr_group = {
- .attrs = thpsize_attrs,
+static const struct attribute_group file_ctrl_attr_grp = {
+ .attrs = file_ctrl_attrs,
+};
+
+static struct attribute *any_ctrl_attrs[] = {
+ NULL,
+};
+
+static const struct attribute_group any_ctrl_attr_grp = {
+ .attrs = any_ctrl_attrs,
};
static const struct kobj_type thpsize_ktype = {
@@ -576,64 +614,136 @@ DEFINE_MTHP_STAT_ATTR(anon_fault_fallback, MTHP_STAT_ANON_FAULT_FALLBACK);
DEFINE_MTHP_STAT_ATTR(anon_fault_fallback_charge, MTHP_STAT_ANON_FAULT_FALLBACK_CHARGE);
DEFINE_MTHP_STAT_ATTR(swpout, MTHP_STAT_SWPOUT);
DEFINE_MTHP_STAT_ATTR(swpout_fallback, MTHP_STAT_SWPOUT_FALLBACK);
+#ifdef CONFIG_SHMEM
DEFINE_MTHP_STAT_ATTR(shmem_alloc, MTHP_STAT_SHMEM_ALLOC);
DEFINE_MTHP_STAT_ATTR(shmem_fallback, MTHP_STAT_SHMEM_FALLBACK);
DEFINE_MTHP_STAT_ATTR(shmem_fallback_charge, MTHP_STAT_SHMEM_FALLBACK_CHARGE);
+#endif
DEFINE_MTHP_STAT_ATTR(split, MTHP_STAT_SPLIT);
DEFINE_MTHP_STAT_ATTR(split_failed, MTHP_STAT_SPLIT_FAILED);
DEFINE_MTHP_STAT_ATTR(split_deferred, MTHP_STAT_SPLIT_DEFERRED);
+DEFINE_MTHP_STAT_ATTR(nr_anon, MTHP_STAT_NR_ANON);
+DEFINE_MTHP_STAT_ATTR(nr_anon_partially_mapped, MTHP_STAT_NR_ANON_PARTIALLY_MAPPED);
-static struct attribute *stats_attrs[] = {
+static struct attribute *anon_stats_attrs[] = {
&anon_fault_alloc_attr.attr,
&anon_fault_fallback_attr.attr,
&anon_fault_fallback_charge_attr.attr,
+#ifndef CONFIG_SHMEM
&swpout_attr.attr,
&swpout_fallback_attr.attr,
+#endif
+ &split_deferred_attr.attr,
+ &nr_anon_attr.attr,
+ &nr_anon_partially_mapped_attr.attr,
+ NULL,
+};
+
+static struct attribute_group anon_stats_attr_grp = {
+ .name = "stats",
+ .attrs = anon_stats_attrs,
+};
+
+static struct attribute *file_stats_attrs[] = {
+#ifdef CONFIG_SHMEM
&shmem_alloc_attr.attr,
&shmem_fallback_attr.attr,
&shmem_fallback_charge_attr.attr,
+#endif
+ NULL,
+};
+
+static struct attribute_group file_stats_attr_grp = {
+ .name = "stats",
+ .attrs = file_stats_attrs,
+};
+
+static struct attribute *any_stats_attrs[] = {
+#ifdef CONFIG_SHMEM
+ &swpout_attr.attr,
+ &swpout_fallback_attr.attr,
+#endif
&split_attr.attr,
&split_failed_attr.attr,
- &split_deferred_attr.attr,
NULL,
};
-static struct attribute_group stats_attr_group = {
+static struct attribute_group any_stats_attr_grp = {
.name = "stats",
- .attrs = stats_attrs,
+ .attrs = any_stats_attrs,
};
+static int sysfs_add_group(struct kobject *kobj,
+ const struct attribute_group *grp)
+{
+ int ret = -ENOENT;
+
+ /*
+ * If the group is named, try to merge first, assuming the subdirectory
+ * was already created. This avoids the warning emitted by
+ * sysfs_create_group() if the directory already exists.
+ */
+ if (grp->name)
+ ret = sysfs_merge_group(kobj, grp);
+ if (ret)
+ ret = sysfs_create_group(kobj, grp);
+
+ return ret;
+}
+
static struct thpsize *thpsize_create(int order, struct kobject *parent)
{
unsigned long size = (PAGE_SIZE << order) / SZ_1K;
struct thpsize *thpsize;
- int ret;
+ int ret = -ENOMEM;
thpsize = kzalloc(sizeof(*thpsize), GFP_KERNEL);
if (!thpsize)
- return ERR_PTR(-ENOMEM);
+ goto err;
+
+ thpsize->order = order;
ret = kobject_init_and_add(&thpsize->kobj, &thpsize_ktype, parent,
"hugepages-%lukB", size);
if (ret) {
kfree(thpsize);
- return ERR_PTR(ret);
+ goto err;
}
- ret = sysfs_create_group(&thpsize->kobj, &thpsize_attr_group);
- if (ret) {
- kobject_put(&thpsize->kobj);
- return ERR_PTR(ret);
+
+ ret = sysfs_add_group(&thpsize->kobj, &any_ctrl_attr_grp);
+ if (ret)
+ goto err_put;
+
+ ret = sysfs_add_group(&thpsize->kobj, &any_stats_attr_grp);
+ if (ret)
+ goto err_put;
+
+ if (BIT(order) & THP_ORDERS_ALL_ANON) {
+ ret = sysfs_add_group(&thpsize->kobj, &anon_ctrl_attr_grp);
+ if (ret)
+ goto err_put;
+
+ ret = sysfs_add_group(&thpsize->kobj, &anon_stats_attr_grp);
+ if (ret)
+ goto err_put;
}
- ret = sysfs_create_group(&thpsize->kobj, &stats_attr_group);
- if (ret) {
- kobject_put(&thpsize->kobj);
- return ERR_PTR(ret);
+ if (BIT(order) & THP_ORDERS_ALL_FILE_DEFAULT) {
+ ret = sysfs_add_group(&thpsize->kobj, &file_ctrl_attr_grp);
+ if (ret)
+ goto err_put;
+
+ ret = sysfs_add_group(&thpsize->kobj, &file_stats_attr_grp);
+ if (ret)
+ goto err_put;
}
- thpsize->order = order;
return thpsize;
+err_put:
+ kobject_put(&thpsize->kobj);
+err:
+ return ERR_PTR(ret);
}
static void thpsize_release(struct kobject *kobj)
@@ -653,7 +763,8 @@ static int __init hugepage_init_sysfs(struct kobject **hugepage_kobj)
* disable all other sizes. powerpc's PMD_ORDER isn't a compile-time
* constant so we have to do this here.
*/
- huge_anon_orders_inherit = BIT(PMD_ORDER);
+ if (!anon_orders_configured)
+ huge_anon_orders_inherit = BIT(PMD_ORDER);
*hugepage_kobj = kobject_create_and_add("transparent_hugepage", mm_kobj);
if (unlikely(!*hugepage_kobj)) {
@@ -673,7 +784,7 @@ static int __init hugepage_init_sysfs(struct kobject **hugepage_kobj)
goto remove_hp_group;
}
- orders = THP_ORDERS_ALL_ANON;
+ orders = THP_ORDERS_ALL_ANON | THP_ORDERS_ALL_FILE_DEFAULT;
order = highest_order(orders);
while (orders) {
thpsize = thpsize_create(order, *hugepage_kobj);
@@ -838,6 +949,100 @@ out:
}
__setup("transparent_hugepage=", setup_transparent_hugepage);
+static inline int get_order_from_str(const char *size_str)
+{
+ unsigned long size;
+ char *endptr;
+ int order;
+
+ size = memparse(size_str, &endptr);
+
+ if (!is_power_of_2(size))
+ goto err;
+ order = get_order(size);
+ if (BIT(order) & ~THP_ORDERS_ALL_ANON)
+ goto err;
+
+ return order;
+err:
+ pr_err("invalid size %s in thp_anon boot parameter\n", size_str);
+ return -EINVAL;
+}
+
+static char str_dup[PAGE_SIZE] __initdata;
+static int __init setup_thp_anon(char *str)
+{
+ char *token, *range, *policy, *subtoken;
+ unsigned long always, inherit, madvise;
+ char *start_size, *end_size;
+ int start, end, nr;
+ char *p;
+
+ if (!str || strlen(str) + 1 > PAGE_SIZE)
+ goto err;
+ strcpy(str_dup, str);
+
+ always = huge_anon_orders_always;
+ madvise = huge_anon_orders_madvise;
+ inherit = huge_anon_orders_inherit;
+ p = str_dup;
+ while ((token = strsep(&p, ";")) != NULL) {
+ range = strsep(&token, ":");
+ policy = token;
+
+ if (!policy)
+ goto err;
+
+ while ((subtoken = strsep(&range, ",")) != NULL) {
+ if (strchr(subtoken, '-')) {
+ start_size = strsep(&subtoken, "-");
+ end_size = subtoken;
+
+ start = get_order_from_str(start_size);
+ end = get_order_from_str(end_size);
+ } else {
+ start = end = get_order_from_str(subtoken);
+ }
+
+ if (start < 0 || end < 0 || start > end)
+ goto err;
+
+ nr = end - start + 1;
+ if (!strcmp(policy, "always")) {
+ bitmap_set(&always, start, nr);
+ bitmap_clear(&inherit, start, nr);
+ bitmap_clear(&madvise, start, nr);
+ } else if (!strcmp(policy, "madvise")) {
+ bitmap_set(&madvise, start, nr);
+ bitmap_clear(&inherit, start, nr);
+ bitmap_clear(&always, start, nr);
+ } else if (!strcmp(policy, "inherit")) {
+ bitmap_set(&inherit, start, nr);
+ bitmap_clear(&madvise, start, nr);
+ bitmap_clear(&always, start, nr);
+ } else if (!strcmp(policy, "never")) {
+ bitmap_clear(&inherit, start, nr);
+ bitmap_clear(&madvise, start, nr);
+ bitmap_clear(&always, start, nr);
+ } else {
+ pr_err("invalid policy %s in thp_anon boot parameter\n", policy);
+ goto err;
+ }
+ }
+ }
+
+ huge_anon_orders_always = always;
+ huge_anon_orders_madvise = madvise;
+ huge_anon_orders_inherit = inherit;
+ anon_orders_configured = true;
+ return 1;
+
+err:
+ pr_warn("thp_anon=%s: error parsing string, ignoring setting\n", str);
+ return 0;
+}
+__setup("thp_anon=", setup_thp_anon);
+
pmd_t maybe_pmd_mkwrite(pmd_t pmd, struct vm_area_struct *vma)
{
if (likely(vma->vm_flags & VM_WRITE))
@@ -1007,6 +1212,7 @@ static vm_fault_t __do_huge_pmd_anonymous_page(struct vm_fault *vmf,
update_mmu_cache_pmd(vma, vmf->address, vmf->pmd);
add_mm_counter(vma->vm_mm, MM_ANONPAGES, HPAGE_PMD_NR);
mm_inc_nr_ptes(vma->vm_mm);
+ deferred_split_folio(folio, false);
spin_unlock(vmf->ptl);
count_vm_event(THP_FAULT_ALLOC);
count_mthp_stat(HPAGE_PMD_ORDER, MTHP_STAT_ANON_FAULT_ALLOC);
@@ -1166,6 +1372,8 @@ static void insert_pfn_pmd(struct vm_area_struct *vma, unsigned long addr,
entry = pmd_mkhuge(pfn_t_pmd(pfn, prot));
if (pfn_t_devmap(pfn))
entry = pmd_mkdevmap(entry);
+ else
+ entry = pmd_mkspecial(entry);
if (write) {
entry = pmd_mkyoung(pmd_mkdirty(entry));
entry = maybe_pmd_mkwrite(entry, vma);
@@ -1249,10 +1457,8 @@ static void insert_pfn_pud(struct vm_area_struct *vma, unsigned long addr,
ptl = pud_lock(mm, pud);
if (!pud_none(*pud)) {
if (write) {
- if (pud_pfn(*pud) != pfn_t_to_pfn(pfn)) {
- WARN_ON_ONCE(!is_huge_zero_pud(*pud));
+ if (WARN_ON_ONCE(pud_pfn(*pud) != pfn_t_to_pfn(pfn)))
goto out_unlock;
- }
entry = pud_mkyoung(*pud);
entry = maybe_pud_mkwrite(pud_mkdirty(entry), vma);
if (pudp_set_access_flags(vma, addr, pud, entry, 1))
@@ -1264,6 +1470,8 @@ static void insert_pfn_pud(struct vm_area_struct *vma, unsigned long addr,
entry = pud_mkhuge(pfn_t_pud(pfn, prot));
if (pfn_t_devmap(pfn))
entry = pud_mkdevmap(entry);
+ else
+ entry = pud_mkspecial(entry);
if (write) {
entry = pud_mkyoung(pud_mkdirty(entry));
entry = maybe_pud_mkwrite(entry, vma);
@@ -1377,6 +1585,24 @@ int copy_huge_pmd(struct mm_struct *dst_mm, struct mm_struct *src_mm,
pgtable_t pgtable = NULL;
int ret = -ENOMEM;
+ pmd = pmdp_get_lockless(src_pmd);
+ if (unlikely(pmd_special(pmd))) {
+ dst_ptl = pmd_lock(dst_mm, dst_pmd);
+ src_ptl = pmd_lockptr(src_mm, src_pmd);
+ spin_lock_nested(src_ptl, SINGLE_DEPTH_NESTING);
+ /*
+ * No need to recheck the pmd, it can't change with write
+ * mmap lock held here.
+ *
+ * Meanwhile, making sure it's not a CoW VMA with writable
+ * mapping, otherwise it means either the anon page wrongly
+ * applied special bit, or we made the PRIVATE mapping be
+ * able to wrongly write to the backend MMIO.
+ */
+ VM_WARN_ON_ONCE(is_cow_mapping(src_vma->vm_flags) && pmd_write(pmd));
+ goto set_pmd;
+ }
+
/* Skip if can be re-fill on fault */
if (!vma_is_anonymous(dst_vma))
return 0;
@@ -1458,7 +1684,9 @@ out_zero_page:
pmdp_set_wrprotect(src_mm, addr, src_pmd);
if (!userfaultfd_wp(dst_vma))
pmd = pmd_clear_uffd_wp(pmd);
- pmd = pmd_mkold(pmd_wrprotect(pmd));
+ pmd = pmd_wrprotect(pmd);
+set_pmd:
+ pmd = pmd_mkold(pmd);
set_pmd_at(dst_mm, addr, dst_pmd, pmd);
ret = 0;
@@ -1501,20 +1729,14 @@ int copy_huge_pud(struct mm_struct *dst_mm, struct mm_struct *src_mm,
goto out_unlock;
/*
- * When page table lock is held, the huge zero pud should not be
- * under splitting since we don't split the page itself, only pud to
- * a page table.
- */
- if (is_huge_zero_pud(pud)) {
- /* No huge zero pud yet */
- }
-
- /*
* TODO: once we support anonymous pages, use
* folio_try_dup_anon_rmap_*() and split if duplicating fails.
*/
- pudp_set_wrprotect(src_mm, addr, src_pud);
- pud = pud_mkold(pud_wrprotect(pud));
+ if (is_cow_mapping(vma->vm_flags) && pud_write(pud)) {
+ pudp_set_wrprotect(src_mm, addr, src_pud);
+ pud = pud_wrprotect(pud);
+ }
+ pud = pud_mkold(pud);
set_pud_at(dst_mm, addr, dst_pud, pud);
ret = 0;
@@ -1673,22 +1895,23 @@ static inline bool can_change_pmd_writable(struct vm_area_struct *vma,
vm_fault_t do_huge_pmd_numa_page(struct vm_fault *vmf)
{
struct vm_area_struct *vma = vmf->vma;
- pmd_t oldpmd = vmf->orig_pmd;
- pmd_t pmd;
struct folio *folio;
unsigned long haddr = vmf->address & HPAGE_PMD_MASK;
int nid = NUMA_NO_NODE;
- int target_nid, last_cpupid = (-1 & LAST_CPUPID_MASK);
+ int target_nid, last_cpupid;
+ pmd_t pmd, old_pmd;
bool writable = false;
int flags = 0;
vmf->ptl = pmd_lock(vma->vm_mm, vmf->pmd);
- if (unlikely(!pmd_same(oldpmd, *vmf->pmd))) {
+ old_pmd = pmdp_get(vmf->pmd);
+
+ if (unlikely(!pmd_same(old_pmd, vmf->orig_pmd))) {
spin_unlock(vmf->ptl);
return 0;
}
- pmd = pmd_modify(oldpmd, vma->vm_page_prot);
+ pmd = pmd_modify(old_pmd, vma->vm_page_prot);
/*
* Detect now whether the PMD could be writable; this information
@@ -1703,18 +1926,10 @@ vm_fault_t do_huge_pmd_numa_page(struct vm_fault *vmf)
if (!folio)
goto out_map;
- /* See similar comment in do_numa_page for explanation */
- if (!writable)
- flags |= TNF_NO_GROUP;
-
nid = folio_nid(folio);
- /*
- * For memory tiering mode, cpupid of slow memory page is used
- * to record page access time. So use default value.
- */
- if (node_is_toptier(nid))
- last_cpupid = folio_last_cpupid(folio);
- target_nid = numa_migrate_prep(folio, vmf, haddr, nid, &flags);
+
+ target_nid = numa_migrate_check(folio, vmf, haddr, &flags, writable,
+ &last_cpupid);
if (target_nid == NUMA_NO_NODE)
goto out_map;
if (migrate_misplaced_folio_prepare(folio, vma, target_nid)) {
@@ -1734,13 +1949,13 @@ vm_fault_t do_huge_pmd_numa_page(struct vm_fault *vmf)
flags |= TNF_MIGRATE_FAIL;
vmf->ptl = pmd_lock(vma->vm_mm, vmf->pmd);
- if (unlikely(!pmd_same(oldpmd, *vmf->pmd))) {
+ if (unlikely(!pmd_same(pmdp_get(vmf->pmd), vmf->orig_pmd))) {
spin_unlock(vmf->ptl);
return 0;
}
out_map:
/* Restore the PMD */
- pmd = pmd_modify(oldpmd, vma->vm_page_prot);
+ pmd = pmd_modify(pmdp_get(vmf->pmd), vma->vm_page_prot);
pmd = pmd_mkyoung(pmd);
if (writable)
pmd = pmd_mkwrite(pmd, vma);
@@ -2062,8 +2277,7 @@ int change_huge_pmd(struct mmu_gather *tlb, struct vm_area_struct *vma,
toptier)
goto unlock;
- if (sysctl_numa_balancing_mode & NUMA_BALANCING_MEMORY_TIERING &&
- !toptier)
+ if (folio_use_access_time(folio))
folio_xchg_access_time(folio,
jiffies_to_msecs(jiffies));
}
@@ -2116,6 +2330,53 @@ unlock:
return ret;
}
+/*
+ * Returns:
+ *
+ * - 0: if pud leaf changed from under us
+ * - 1: if pud can be skipped
+ * - HPAGE_PUD_NR: if pud was successfully processed
+ */
+#ifdef CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD
+int change_huge_pud(struct mmu_gather *tlb, struct vm_area_struct *vma,
+ pud_t *pudp, unsigned long addr, pgprot_t newprot,
+ unsigned long cp_flags)
+{
+ struct mm_struct *mm = vma->vm_mm;
+ pud_t oldpud, entry;
+ spinlock_t *ptl;
+
+ tlb_change_page_size(tlb, HPAGE_PUD_SIZE);
+
+ /* NUMA balancing doesn't apply to dax */
+ if (cp_flags & MM_CP_PROT_NUMA)
+ return 1;
+
+ /*
+ * Huge entries on userfault-wp only works with anonymous, while we
+ * don't have anonymous PUDs yet.
+ */
+ if (WARN_ON_ONCE(cp_flags & MM_CP_UFFD_WP_ALL))
+ return 1;
+
+ ptl = __pud_trans_huge_lock(pudp, vma);
+ if (!ptl)
+ return 0;
+
+ /*
+ * Can't clear PUD or it can race with concurrent zapping. See
+ * change_huge_pmd().
+ */
+ oldpud = pudp_invalidate(vma, addr, pudp);
+ entry = pud_modify(oldpud, newprot);
+ set_pud_at(mm, addr, pudp, entry);
+ tlb_flush_pud_range(tlb, addr, HPAGE_PUD_SIZE);
+
+ spin_unlock(ptl);
+ return HPAGE_PUD_NR;
+}
+#endif
+
#ifdef CONFIG_USERFAULTFD
/*
* The PT lock for src_pmd and dst_vma/src_vma (for reading) are locked by
@@ -2295,12 +2556,14 @@ int zap_huge_pud(struct mmu_gather *tlb, struct vm_area_struct *vma,
pud_t *pud, unsigned long addr)
{
spinlock_t *ptl;
+ pud_t orig_pud;
ptl = __pud_trans_huge_lock(pud, vma);
if (!ptl)
return 0;
- pudp_huge_get_and_clear_full(vma, addr, pud, tlb->fullmm);
+ orig_pud = pudp_huge_get_and_clear_full(vma, addr, pud, tlb->fullmm);
+ arch_check_zapped_pud(vma, orig_pud);
tlb_remove_pud_tlb_entry(tlb, pud, addr);
if (vma_is_special_huge(vma)) {
spin_unlock(ptl);
@@ -2344,6 +2607,11 @@ out:
spin_unlock(ptl);
mmu_notifier_invalidate_range_end(&range);
}
+#else
+void __split_huge_pud(struct vm_area_struct *vma, pud_t *pud,
+ unsigned long address)
+{
+}
#endif /* CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD */
static void __split_huge_zero_page_pmd(struct vm_area_struct *vma,
@@ -2778,7 +3046,7 @@ bool unmap_huge_pmd_locked(struct vm_area_struct *vma, unsigned long addr,
return false;
}
-static void remap_page(struct folio *folio, unsigned long nr)
+static void remap_page(struct folio *folio, unsigned long nr, int flags)
{
int i = 0;
@@ -2786,7 +3054,7 @@ static void remap_page(struct folio *folio, unsigned long nr)
if (!folio_test_anon(folio))
return;
for (;;) {
- remove_migration_ptes(folio, folio, true);
+ remove_migration_ptes(folio, folio, RMP_LOCKED | flags);
i += folio_nr_pages(folio);
if (i >= nr)
break;
@@ -2794,25 +3062,25 @@ static void remap_page(struct folio *folio, unsigned long nr)
}
}
-static void lru_add_page_tail(struct page *head, struct page *tail,
+static void lru_add_page_tail(struct folio *folio, struct page *tail,
struct lruvec *lruvec, struct list_head *list)
{
- VM_BUG_ON_PAGE(!PageHead(head), head);
- VM_BUG_ON_PAGE(PageLRU(tail), head);
+ VM_BUG_ON_FOLIO(!folio_test_large(folio), folio);
+ VM_BUG_ON_FOLIO(PageLRU(tail), folio);
lockdep_assert_held(&lruvec->lru_lock);
if (list) {
/* page reclaim is reclaiming a huge page */
- VM_WARN_ON(PageLRU(head));
+ VM_WARN_ON(folio_test_lru(folio));
get_page(tail);
list_add_tail(&tail->lru, list);
} else {
/* head is still on lru (and we have it frozen) */
- VM_WARN_ON(!PageLRU(head));
- if (PageUnevictable(tail))
+ VM_WARN_ON(!folio_test_lru(folio));
+ if (folio_test_unevictable(folio))
tail->mlock_count = 0;
else
- list_add_tail(&tail->lru, &head->lru);
+ list_add_tail(&tail->lru, &folio->lru);
SetPageLRU(tail);
}
}
@@ -2855,8 +3123,10 @@ static void __split_huge_page_tail(struct folio *folio, int tail,
(1L << PG_workingset) |
(1L << PG_locked) |
(1L << PG_unevictable) |
-#ifdef CONFIG_ARCH_USES_PG_ARCH_X
+#ifdef CONFIG_ARCH_USES_PG_ARCH_2
(1L << PG_arch_2) |
+#endif
+#ifdef CONFIG_ARCH_USES_PG_ARCH_3
(1L << PG_arch_3) |
#endif
(1L << PG_dirty) |
@@ -2911,7 +3181,7 @@ static void __split_huge_page_tail(struct folio *folio, int tail,
* pages to show after the currently processed elements - e.g.
* migrate_pages
*/
- lru_add_page_tail(head, page_tail, lruvec, list);
+ lru_add_page_tail(folio, page_tail, lruvec, list);
}
static void __split_huge_page(struct page *page, struct list_head *list,
@@ -2974,7 +3244,7 @@ static void __split_huge_page(struct page *page, struct list_head *list,
/* Caller disabled irqs, so they are still disabled here */
split_page_owner(head, order, new_order);
- pgalloc_tag_split(head, 1 << order);
+ pgalloc_tag_split(folio, order, new_order);
/* See comment in __split_huge_page_tail() */
if (folio_test_anon(folio)) {
@@ -2994,7 +3264,7 @@ static void __split_huge_page(struct page *page, struct list_head *list,
if (nr_dropped)
shmem_uncharge(folio->mapping->host, nr_dropped);
- remap_page(folio, nr);
+ remap_page(folio, nr, PageAnon(head) ? RMP_USE_SHARED_ZEROPAGE : 0);
/*
* set page to its compound_head when split to non order-0 pages, so
@@ -3023,7 +3293,7 @@ static void __split_huge_page(struct page *page, struct list_head *list,
}
/* Racy check whether the huge page can be split */
-bool can_split_folio(struct folio *folio, int *pextra_pins)
+bool can_split_folio(struct folio *folio, int caller_pins, int *pextra_pins)
{
int extra_pins;
@@ -3035,7 +3305,8 @@ bool can_split_folio(struct folio *folio, int *pextra_pins)
extra_pins = folio_nr_pages(folio);
if (pextra_pins)
*pextra_pins = extra_pins;
- return folio_mapcount(folio) == folio_ref_count(folio) - extra_pins - 1;
+ return folio_mapcount(folio) == folio_ref_count(folio) - extra_pins -
+ caller_pins;
}
/*
@@ -3079,6 +3350,9 @@ bool can_split_folio(struct folio *folio, int *pextra_pins)
* released, or if some unexpected race happened (e.g., anon VMA disappeared,
* truncation).
*
+ * Callers should ensure that the order respects the address space mapping
+ * min-order if one is set for non-anonymous folios.
+ *
* Returns -EINVAL when trying to split to an order that is incompatible
* with the folio. Splitting to order 0 is compatible with all folios.
*/
@@ -3089,8 +3363,9 @@ int split_huge_page_to_list_to_order(struct page *page, struct list_head *list,
struct deferred_split *ds_queue = get_deferred_split_queue(folio);
/* reset xarray order to new order after split */
XA_STATE_ORDER(xas, &folio->mapping->i_pages, folio->index, new_order);
- struct anon_vma *anon_vma = NULL;
+ bool is_anon = folio_test_anon(folio);
struct address_space *mapping = NULL;
+ struct anon_vma *anon_vma = NULL;
int order = folio_order(folio);
int extra_pins, ret;
pgoff_t end;
@@ -3102,7 +3377,7 @@ int split_huge_page_to_list_to_order(struct page *page, struct list_head *list,
if (new_order >= folio_order(folio))
return -EINVAL;
- if (folio_test_anon(folio)) {
+ if (is_anon) {
/* order-1 is not supported for anonymous THP. */
if (new_order == 1) {
VM_WARN_ONCE(1, "Cannot split to order-1 folio");
@@ -3142,7 +3417,7 @@ int split_huge_page_to_list_to_order(struct page *page, struct list_head *list,
if (folio_test_writeback(folio))
return -EBUSY;
- if (folio_test_anon(folio)) {
+ if (is_anon) {
/*
* The caller does not necessarily hold an mmap_lock that would
* prevent the anon_vma disappearing so we first we take a
@@ -3160,6 +3435,7 @@ int split_huge_page_to_list_to_order(struct page *page, struct list_head *list,
mapping = NULL;
anon_vma_lock_write(anon_vma);
} else {
+ unsigned int min_order;
gfp_t gfp;
mapping = folio->mapping;
@@ -3170,6 +3446,14 @@ int split_huge_page_to_list_to_order(struct page *page, struct list_head *list,
goto out;
}
+ min_order = mapping_min_folio_order(folio->mapping);
+ if (new_order < min_order) {
+ VM_WARN_ONCE(1, "Cannot split mapped folio below min-order: %u",
+ min_order);
+ ret = -EINVAL;
+ goto out;
+ }
+
gfp = current_gfp_context(mapping_gfp_mask(mapping) &
GFP_RECLAIM_MASK);
@@ -3203,7 +3487,7 @@ int split_huge_page_to_list_to_order(struct page *page, struct list_head *list,
* Racy check if we can split the page, before unmap_folio() will
* split PMDs
*/
- if (!can_split_folio(folio, &extra_pins)) {
+ if (!can_split_folio(folio, 1, &extra_pins)) {
ret = -EAGAIN;
goto out_unlock;
}
@@ -3229,6 +3513,11 @@ int split_huge_page_to_list_to_order(struct page *page, struct list_head *list,
if (folio_order(folio) > 1 &&
!list_empty(&folio->_deferred_list)) {
ds_queue->split_queue_len--;
+ if (folio_test_partially_mapped(folio)) {
+ __folio_clear_partially_mapped(folio);
+ mod_mthp_stat(folio_order(folio),
+ MTHP_STAT_NR_ANON_PARTIALLY_MAPPED, -1);
+ }
/*
* Reinitialize page_deferred_list after removing the
* page from the split_queue, otherwise a subsequent
@@ -3255,6 +3544,10 @@ int split_huge_page_to_list_to_order(struct page *page, struct list_head *list,
}
}
+ if (is_anon) {
+ mod_mthp_stat(order, MTHP_STAT_NR_ANON, -1);
+ mod_mthp_stat(new_order, MTHP_STAT_NR_ANON, 1 << (order - new_order));
+ }
__split_huge_page(page, list, end, new_order);
ret = 0;
} else {
@@ -3263,7 +3556,7 @@ fail:
if (mapping)
xas_unlock(&xas);
local_irq_enable();
- remap_page(folio, folio_nr_pages(folio));
+ remap_page(folio, folio_nr_pages(folio), 0);
ret = -EAGAIN;
}
@@ -3282,6 +3575,30 @@ out:
return ret;
}
+int min_order_for_split(struct folio *folio)
+{
+ if (folio_test_anon(folio))
+ return 0;
+
+ if (!folio->mapping) {
+ if (folio_test_pmd_mappable(folio))
+ count_vm_event(THP_SPLIT_PAGE_FAILED);
+ return -EBUSY;
+ }
+
+ return mapping_min_folio_order(folio->mapping);
+}
+
+int split_folio_to_list(struct folio *folio, struct list_head *list)
+{
+ int ret = min_order_for_split(folio);
+
+ if (ret < 0)
+ return ret;
+
+ return split_huge_page_to_list_to_order(&folio->page, list, ret);
+}
+
void __folio_undo_large_rmappable(struct folio *folio)
{
struct deferred_split *ds_queue;
@@ -3291,12 +3608,18 @@ void __folio_undo_large_rmappable(struct folio *folio)
spin_lock_irqsave(&ds_queue->split_queue_lock, flags);
if (!list_empty(&folio->_deferred_list)) {
ds_queue->split_queue_len--;
+ if (folio_test_partially_mapped(folio)) {
+ __folio_clear_partially_mapped(folio);
+ mod_mthp_stat(folio_order(folio),
+ MTHP_STAT_NR_ANON_PARTIALLY_MAPPED, -1);
+ }
list_del_init(&folio->_deferred_list);
}
spin_unlock_irqrestore(&ds_queue->split_queue_lock, flags);
}
-void deferred_split_folio(struct folio *folio)
+/* partially_mapped=false won't clear PG_partially_mapped folio flag */
+void deferred_split_folio(struct folio *folio, bool partially_mapped)
{
struct deferred_split *ds_queue = get_deferred_split_queue(folio);
#ifdef CONFIG_MEMCG
@@ -3311,6 +3634,9 @@ void deferred_split_folio(struct folio *folio)
if (folio_order(folio) <= 1)
return;
+ if (!partially_mapped && !split_underused_thp)
+ return;
+
/*
* The try_to_unmap() in page reclaim path might reach here too,
* this may cause a race condition to corrupt deferred split queue.
@@ -3324,14 +3650,21 @@ void deferred_split_folio(struct folio *folio)
if (folio_test_swapcache(folio))
return;
- if (!list_empty(&folio->_deferred_list))
- return;
-
spin_lock_irqsave(&ds_queue->split_queue_lock, flags);
+ if (partially_mapped) {
+ if (!folio_test_partially_mapped(folio)) {
+ __folio_set_partially_mapped(folio);
+ if (folio_test_pmd_mappable(folio))
+ count_vm_event(THP_DEFERRED_SPLIT_PAGE);
+ count_mthp_stat(folio_order(folio), MTHP_STAT_SPLIT_DEFERRED);
+ mod_mthp_stat(folio_order(folio), MTHP_STAT_NR_ANON_PARTIALLY_MAPPED, 1);
+
+ }
+ } else {
+ /* partially mapped folios cannot become non-partially mapped */
+ VM_WARN_ON_FOLIO(folio_test_partially_mapped(folio), folio);
+ }
if (list_empty(&folio->_deferred_list)) {
- if (folio_test_pmd_mappable(folio))
- count_vm_event(THP_DEFERRED_SPLIT_PAGE);
- count_mthp_stat(folio_order(folio), MTHP_STAT_SPLIT_DEFERRED);
list_add_tail(&folio->_deferred_list, &ds_queue->split_queue);
ds_queue->split_queue_len++;
#ifdef CONFIG_MEMCG
@@ -3356,6 +3689,39 @@ static unsigned long deferred_split_count(struct shrinker *shrink,
return READ_ONCE(ds_queue->split_queue_len);
}
+static bool thp_underused(struct folio *folio)
+{
+ int num_zero_pages = 0, num_filled_pages = 0;
+ void *kaddr;
+ int i;
+
+ if (khugepaged_max_ptes_none == HPAGE_PMD_NR - 1)
+ return false;
+
+ for (i = 0; i < folio_nr_pages(folio); i++) {
+ kaddr = kmap_local_folio(folio, i * PAGE_SIZE);
+ if (!memchr_inv(kaddr, 0, PAGE_SIZE)) {
+ num_zero_pages++;
+ if (num_zero_pages > khugepaged_max_ptes_none) {
+ kunmap_local(kaddr);
+ return true;
+ }
+ } else {
+ /*
+ * Another path for early exit once the number
+ * of non-zero filled pages exceeds threshold.
+ */
+ num_filled_pages++;
+ if (num_filled_pages >= HPAGE_PMD_NR - khugepaged_max_ptes_none) {
+ kunmap_local(kaddr);
+ return false;
+ }
+ }
+ kunmap_local(kaddr);
+ }
+ return false;
+}
+
static unsigned long deferred_split_scan(struct shrinker *shrink,
struct shrink_control *sc)
{
@@ -3379,6 +3745,11 @@ static unsigned long deferred_split_scan(struct shrinker *shrink,
list_move(&folio->_deferred_list, &list);
} else {
/* We lost race with folio_put() */
+ if (folio_test_partially_mapped(folio)) {
+ __folio_clear_partially_mapped(folio);
+ mod_mthp_stat(folio_order(folio),
+ MTHP_STAT_NR_ANON_PARTIALLY_MAPPED, -1);
+ }
list_del_init(&folio->_deferred_list);
ds_queue->split_queue_len--;
}
@@ -3388,13 +3759,35 @@ static unsigned long deferred_split_scan(struct shrinker *shrink,
spin_unlock_irqrestore(&ds_queue->split_queue_lock, flags);
list_for_each_entry_safe(folio, next, &list, _deferred_list) {
+ bool did_split = false;
+ bool underused = false;
+
+ if (!folio_test_partially_mapped(folio)) {
+ underused = thp_underused(folio);
+ if (!underused)
+ goto next;
+ }
if (!folio_trylock(folio))
goto next;
- /* split_huge_page() removes page from list on success */
- if (!split_folio(folio))
+ if (!split_folio(folio)) {
+ did_split = true;
+ if (underused)
+ count_vm_event(THP_UNDERUSED_SPLIT_PAGE);
split++;
+ }
folio_unlock(folio);
next:
+ /*
+ * split_folio() removes folio from list on success.
+ * Only add back to the queue if folio is partially mapped.
+ * If thp_underused returns false, or if split_folio fails
+ * in the case it was underused, then consider it used and
+ * don't add it back to split_queue.
+ */
+ if (!did_split && !folio_test_partially_mapped(folio)) {
+ list_del_init(&folio->_deferred_list);
+ ds_queue->split_queue_len--;
+ }
folio_put(folio);
}
@@ -3480,16 +3873,11 @@ static int split_huge_pages_pid(int pid, unsigned long vaddr_start,
vaddr_start &= PAGE_MASK;
vaddr_end &= PAGE_MASK;
- /* Find the task_struct from pid */
- rcu_read_lock();
- task = find_task_by_vpid(pid);
+ task = find_get_task_by_vpid(pid);
if (!task) {
- rcu_read_unlock();
ret = -ESRCH;
goto out;
}
- get_task_struct(task);
- rcu_read_unlock();
/* Find the mm_struct */
mm = get_task_mm(task);
@@ -3510,8 +3898,10 @@ static int split_huge_pages_pid(int pid, unsigned long vaddr_start,
*/
for (addr = vaddr_start; addr < vaddr_end; addr += PAGE_SIZE) {
struct vm_area_struct *vma = vma_lookup(mm, addr);
- struct page *page;
+ struct folio_walk fw;
struct folio *folio;
+ struct address_space *mapping;
+ unsigned int target_order = new_order;
if (!vma)
break;
@@ -3522,17 +3912,20 @@ static int split_huge_pages_pid(int pid, unsigned long vaddr_start,
continue;
}
- /* FOLL_DUMP to ignore special (like zero) pages */
- page = follow_page(vma, addr, FOLL_GET | FOLL_DUMP);
-
- if (IS_ERR_OR_NULL(page))
+ folio = folio_walk_start(&fw, vma, addr, 0);
+ if (!folio)
continue;
- folio = page_folio(page);
if (!is_transparent_hugepage(folio))
goto next;
- if (new_order >= folio_order(folio))
+ if (!folio_test_anon(folio)) {
+ mapping = folio->mapping;
+ target_order = max(new_order,
+ mapping_min_folio_order(mapping));
+ }
+
+ if (target_order >= folio_order(folio))
goto next;
total++;
@@ -3542,18 +3935,29 @@ static int split_huge_pages_pid(int pid, unsigned long vaddr_start,
* can be split or not. So skip the check here.
*/
if (!folio_test_private(folio) &&
- !can_split_folio(folio, NULL))
+ !can_split_folio(folio, 0, NULL))
goto next;
if (!folio_trylock(folio))
goto next;
+ folio_get(folio);
+ folio_walk_end(&fw, vma);
- if (!split_folio_to_order(folio, new_order))
+ if (!folio_test_anon(folio) && folio->mapping != mapping)
+ goto unlock;
+
+ if (!split_folio_to_order(folio, target_order))
split++;
+unlock:
+
folio_unlock(folio);
-next:
folio_put(folio);
+
+ cond_resched();
+ continue;
+next:
+ folio_walk_end(&fw, vma);
cond_resched();
}
mmap_read_unlock(mm);
@@ -3575,6 +3979,8 @@ static int split_huge_pages_in_file(const char *file_path, pgoff_t off_start,
pgoff_t index;
int nr_pages = 1;
unsigned long total = 0, split = 0;
+ unsigned int min_order;
+ unsigned int target_order;
file = getname_kernel(file_path);
if (IS_ERR(file))
@@ -3588,6 +3994,8 @@ static int split_huge_pages_in_file(const char *file_path, pgoff_t off_start,
file_path, off_start, off_end);
mapping = candidate->f_mapping;
+ min_order = mapping_min_folio_order(mapping);
+ target_order = max(new_order, min_order);
for (index = off_start; index < off_end; index += nr_pages) {
struct folio *folio = filemap_get_folio(mapping, index);
@@ -3602,15 +4010,19 @@ static int split_huge_pages_in_file(const char *file_path, pgoff_t off_start,
total++;
nr_pages = folio_nr_pages(folio);
- if (new_order >= folio_order(folio))
+ if (target_order >= folio_order(folio))
goto next;
if (!folio_trylock(folio))
goto next;
- if (!split_folio_to_order(folio, new_order))
+ if (folio->mapping != mapping)
+ goto unlock;
+
+ if (!split_folio_to_order(folio, target_order))
split++;
+unlock:
folio_unlock(folio);
next:
folio_put(folio);
@@ -3703,7 +4115,6 @@ out:
static const struct file_operations split_huge_pages_fops = {
.owner = THIS_MODULE,
.write = split_huge_pages_write,
- .llseek = no_llseek,
};
static int __init split_huge_pages_debugfs(void)
diff --git a/mm/hugetlb.c b/mm/hugetlb.c
index aaf508be0a2b..190fa05635f4 100644
--- a/mm/hugetlb.c
+++ b/mm/hugetlb.c
@@ -56,16 +56,6 @@ struct hstate hstates[HUGE_MAX_HSTATE];
#ifdef CONFIG_CMA
static struct cma *hugetlb_cma[MAX_NUMNODES];
static unsigned long hugetlb_cma_size_in_node[MAX_NUMNODES] __initdata;
-static bool hugetlb_cma_folio(struct folio *folio, unsigned int order)
-{
- return cma_pages_valid(hugetlb_cma[folio_nid(folio)], &folio->page,
- 1 << order);
-}
-#else
-static bool hugetlb_cma_folio(struct folio *folio, unsigned int order)
-{
- return false;
-}
#endif
static unsigned long hugetlb_cma_size __initdata;
@@ -82,14 +72,14 @@ static unsigned int default_hugepages_in_node[MAX_NUMNODES] __initdata;
* Protects updates to hugepage_freelists, hugepage_activelist, nr_huge_pages,
* free_huge_pages, and surplus_huge_pages.
*/
-DEFINE_SPINLOCK(hugetlb_lock);
+__cacheline_aligned_in_smp DEFINE_SPINLOCK(hugetlb_lock);
/*
* Serializes faults on the same logical page. This is used to
* prevent spurious OOMs when the hugepage pool is fully utilized.
*/
-static int num_fault_mutexes;
-struct mutex *hugetlb_fault_mutex_table ____cacheline_aligned_in_smp;
+static int num_fault_mutexes __ro_after_init;
+struct mutex *hugetlb_fault_mutex_table __ro_after_init;
/* Forward declaration */
static int hugetlb_acct_memory(struct hstate *h, long delta);
@@ -100,6 +90,17 @@ static void hugetlb_unshare_pmds(struct vm_area_struct *vma,
unsigned long start, unsigned long end);
static struct resv_map *vma_resv_map(struct vm_area_struct *vma);
+static void hugetlb_free_folio(struct folio *folio)
+{
+#ifdef CONFIG_CMA
+ int nid = folio_nid(folio);
+
+ if (cma_free_folio(hugetlb_cma[nid], folio))
+ return;
+#endif
+ folio_put(folio);
+}
+
static inline bool subpool_is_free(struct hugepage_subpool *spool)
{
if (spool->count)
@@ -1512,95 +1513,54 @@ static int hstate_next_node_to_free(struct hstate *h, nodemask_t *nodes_allowed)
((node = hstate_next_node_to_free(hs, mask)) || 1); \
nr_nodes--)
-/* used to demote non-gigantic_huge pages as well */
-static void __destroy_compound_gigantic_folio(struct folio *folio,
- unsigned int order, bool demote)
-{
- int i;
- int nr_pages = 1 << order;
- struct page *p;
-
- atomic_set(&folio->_entire_mapcount, 0);
- atomic_set(&folio->_large_mapcount, 0);
- atomic_set(&folio->_pincount, 0);
-
- for (i = 1; i < nr_pages; i++) {
- p = folio_page(folio, i);
- p->flags &= ~PAGE_FLAGS_CHECK_AT_FREE;
- p->mapping = NULL;
- clear_compound_head(p);
- if (!demote)
- set_page_refcounted(p);
- }
-
- __folio_clear_head(folio);
-}
-
-static void destroy_compound_hugetlb_folio_for_demote(struct folio *folio,
- unsigned int order)
-{
- __destroy_compound_gigantic_folio(folio, order, true);
-}
-
#ifdef CONFIG_ARCH_HAS_GIGANTIC_PAGE
-static void destroy_compound_gigantic_folio(struct folio *folio,
- unsigned int order)
-{
- __destroy_compound_gigantic_folio(folio, order, false);
-}
-
-static void free_gigantic_folio(struct folio *folio, unsigned int order)
-{
- /*
- * If the page isn't allocated using the cma allocator,
- * cma_release() returns false.
- */
-#ifdef CONFIG_CMA
- int nid = folio_nid(folio);
-
- if (cma_release(hugetlb_cma[nid], &folio->page, 1 << order))
- return;
-#endif
-
- free_contig_range(folio_pfn(folio), 1 << order);
-}
-
#ifdef CONFIG_CONTIG_ALLOC
static struct folio *alloc_gigantic_folio(struct hstate *h, gfp_t gfp_mask,
int nid, nodemask_t *nodemask)
{
- struct page *page;
- unsigned long nr_pages = pages_per_huge_page(h);
+ struct folio *folio;
+ int order = huge_page_order(h);
+ bool retried = false;
+
if (nid == NUMA_NO_NODE)
nid = numa_mem_id();
-
+retry:
+ folio = NULL;
#ifdef CONFIG_CMA
{
int node;
- if (hugetlb_cma[nid]) {
- page = cma_alloc(hugetlb_cma[nid], nr_pages,
- huge_page_order(h), true);
- if (page)
- return page_folio(page);
- }
+ if (hugetlb_cma[nid])
+ folio = cma_alloc_folio(hugetlb_cma[nid], order, gfp_mask);
- if (!(gfp_mask & __GFP_THISNODE)) {
+ if (!folio && !(gfp_mask & __GFP_THISNODE)) {
for_each_node_mask(node, *nodemask) {
if (node == nid || !hugetlb_cma[node])
continue;
- page = cma_alloc(hugetlb_cma[node], nr_pages,
- huge_page_order(h), true);
- if (page)
- return page_folio(page);
+ folio = cma_alloc_folio(hugetlb_cma[node], order, gfp_mask);
+ if (folio)
+ break;
}
}
}
#endif
+ if (!folio) {
+ folio = folio_alloc_gigantic(order, gfp_mask, nid, nodemask);
+ if (!folio)
+ return NULL;
+ }
- page = alloc_contig_pages(nr_pages, gfp_mask, nid, nodemask);
- return page ? page_folio(page) : NULL;
+ if (folio_ref_freeze(folio, 1))
+ return folio;
+
+ pr_warn("HugeTLB: unexpected refcount on PFN %lu\n", folio_pfn(folio));
+ hugetlb_free_folio(folio);
+ if (!retried) {
+ retried = true;
+ goto retry;
+ }
+ return NULL;
}
#else /* !CONFIG_CONTIG_ALLOC */
@@ -1617,10 +1577,6 @@ static struct folio *alloc_gigantic_folio(struct hstate *h, gfp_t gfp_mask,
{
return NULL;
}
-static inline void free_gigantic_folio(struct folio *folio,
- unsigned int order) { }
-static inline void destroy_compound_gigantic_folio(struct folio *folio,
- unsigned int order) { }
#endif
/*
@@ -1748,18 +1704,8 @@ static void __update_and_free_hugetlb_folio(struct hstate *h,
folio_ref_unfreeze(folio, 1);
- /*
- * Non-gigantic pages demoted from CMA allocated gigantic pages
- * need to be given back to CMA in free_gigantic_folio.
- */
- if (hstate_is_gigantic(h) ||
- hugetlb_cma_folio(folio, huge_page_order(h))) {
- destroy_compound_gigantic_folio(folio, huge_page_order(h));
- free_gigantic_folio(folio, huge_page_order(h));
- } else {
- INIT_LIST_HEAD(&folio->_deferred_list);
- folio_put(folio);
- }
+ INIT_LIST_HEAD(&folio->_deferred_list);
+ hugetlb_free_folio(folio);
}
/*
@@ -2032,95 +1978,6 @@ static void prep_new_hugetlb_folio(struct hstate *h, struct folio *folio, int ni
spin_unlock_irq(&hugetlb_lock);
}
-static bool __prep_compound_gigantic_folio(struct folio *folio,
- unsigned int order, bool demote)
-{
- int i, j;
- int nr_pages = 1 << order;
- struct page *p;
-
- __folio_clear_reserved(folio);
- for (i = 0; i < nr_pages; i++) {
- p = folio_page(folio, i);
-
- /*
- * For gigantic hugepages allocated through bootmem at
- * boot, it's safer to be consistent with the not-gigantic
- * hugepages and clear the PG_reserved bit from all tail pages
- * too. Otherwise drivers using get_user_pages() to access tail
- * pages may get the reference counting wrong if they see
- * PG_reserved set on a tail page (despite the head page not
- * having PG_reserved set). Enforcing this consistency between
- * head and tail pages allows drivers to optimize away a check
- * on the head page when they need know if put_page() is needed
- * after get_user_pages().
- */
- if (i != 0) /* head page cleared above */
- __ClearPageReserved(p);
- /*
- * Subtle and very unlikely
- *
- * Gigantic 'page allocators' such as memblock or cma will
- * return a set of pages with each page ref counted. We need
- * to turn this set of pages into a compound page with tail
- * page ref counts set to zero. Code such as speculative page
- * cache adding could take a ref on a 'to be' tail page.
- * We need to respect any increased ref count, and only set
- * the ref count to zero if count is currently 1. If count
- * is not 1, we return an error. An error return indicates
- * the set of pages can not be converted to a gigantic page.
- * The caller who allocated the pages should then discard the
- * pages using the appropriate free interface.
- *
- * In the case of demote, the ref count will be zero.
- */
- if (!demote) {
- if (!page_ref_freeze(p, 1)) {
- pr_warn("HugeTLB page can not be used due to unexpected inflated ref count\n");
- goto out_error;
- }
- } else {
- VM_BUG_ON_PAGE(page_count(p), p);
- }
- if (i != 0)
- set_compound_head(p, &folio->page);
- }
- __folio_set_head(folio);
- /* we rely on prep_new_hugetlb_folio to set the hugetlb flag */
- folio_set_order(folio, order);
- atomic_set(&folio->_entire_mapcount, -1);
- atomic_set(&folio->_large_mapcount, -1);
- atomic_set(&folio->_pincount, 0);
- return true;
-
-out_error:
- /* undo page modifications made above */
- for (j = 0; j < i; j++) {
- p = folio_page(folio, j);
- if (j != 0)
- clear_compound_head(p);
- set_page_refcounted(p);
- }
- /* need to clear PG_reserved on remaining tail pages */
- for (; j < nr_pages; j++) {
- p = folio_page(folio, j);
- __ClearPageReserved(p);
- }
- return false;
-}
-
-static bool prep_compound_gigantic_folio(struct folio *folio,
- unsigned int order)
-{
- return __prep_compound_gigantic_folio(folio, order, false);
-}
-
-static bool prep_compound_gigantic_folio_for_demote(struct folio *folio,
- unsigned int order)
-{
- return __prep_compound_gigantic_folio(folio, order, true);
-}
-
/*
* Find and lock address space (mapping) in write mode.
*
@@ -2159,7 +2016,6 @@ static struct folio *alloc_buddy_hugetlb_folio(struct hstate *h,
*/
if (node_alloc_noretry && node_isset(nid, *node_alloc_noretry))
alloc_try_hard = false;
- gfp_mask |= __GFP_COMP|__GFP_NOWARN;
if (alloc_try_hard)
gfp_mask |= __GFP_RETRY_MAYFAIL;
if (nid == NUMA_NO_NODE)
@@ -2206,48 +2062,16 @@ retry:
return folio;
}
-static struct folio *__alloc_fresh_hugetlb_folio(struct hstate *h,
- gfp_t gfp_mask, int nid, nodemask_t *nmask,
- nodemask_t *node_alloc_noretry)
-{
- struct folio *folio;
- bool retry = false;
-
-retry:
- if (hstate_is_gigantic(h))
- folio = alloc_gigantic_folio(h, gfp_mask, nid, nmask);
- else
- folio = alloc_buddy_hugetlb_folio(h, gfp_mask,
- nid, nmask, node_alloc_noretry);
- if (!folio)
- return NULL;
-
- if (hstate_is_gigantic(h)) {
- if (!prep_compound_gigantic_folio(folio, huge_page_order(h))) {
- /*
- * Rare failure to convert pages to compound page.
- * Free pages and try again - ONCE!
- */
- free_gigantic_folio(folio, huge_page_order(h));
- if (!retry) {
- retry = true;
- goto retry;
- }
- return NULL;
- }
- }
-
- return folio;
-}
-
static struct folio *only_alloc_fresh_hugetlb_folio(struct hstate *h,
gfp_t gfp_mask, int nid, nodemask_t *nmask,
nodemask_t *node_alloc_noretry)
{
struct folio *folio;
- folio = __alloc_fresh_hugetlb_folio(h, gfp_mask, nid, nmask,
- node_alloc_noretry);
+ if (hstate_is_gigantic(h))
+ folio = alloc_gigantic_folio(h, gfp_mask, nid, nmask);
+ else
+ folio = alloc_buddy_hugetlb_folio(h, gfp_mask, nid, nmask, node_alloc_noretry);
if (folio)
init_new_hugetlb_folio(h, folio);
return folio;
@@ -2265,7 +2089,10 @@ static struct folio *alloc_fresh_hugetlb_folio(struct hstate *h,
{
struct folio *folio;
- folio = __alloc_fresh_hugetlb_folio(h, gfp_mask, nid, nmask, NULL);
+ if (hstate_is_gigantic(h))
+ folio = alloc_gigantic_folio(h, gfp_mask, nid, nmask);
+ else
+ folio = alloc_buddy_hugetlb_folio(h, gfp_mask, nid, nmask, NULL);
if (!folio)
return NULL;
@@ -2549,9 +2376,8 @@ struct folio *alloc_buddy_hugetlb_folio_with_mpol(struct hstate *h,
nid = huge_node(vma, addr, gfp_mask, &mpol, &nodemask);
if (mpol_is_preferred_many(mpol)) {
- gfp_t gfp = gfp_mask | __GFP_NOWARN;
+ gfp_t gfp = gfp_mask & ~(__GFP_DIRECT_RECLAIM | __GFP_NOFAIL);
- gfp &= ~(__GFP_DIRECT_RECLAIM | __GFP_NOFAIL);
folio = alloc_surplus_hugetlb_folio(h, gfp, nid, nodemask);
/* Fallback to all nodes if page==NULL */
@@ -2564,6 +2390,23 @@ struct folio *alloc_buddy_hugetlb_folio_with_mpol(struct hstate *h,
return folio;
}
+struct folio *alloc_hugetlb_folio_reserve(struct hstate *h, int preferred_nid,
+ nodemask_t *nmask, gfp_t gfp_mask)
+{
+ struct folio *folio;
+
+ spin_lock_irq(&hugetlb_lock);
+ folio = dequeue_hugetlb_folio_nodemask(h, gfp_mask, preferred_nid,
+ nmask);
+ if (folio) {
+ VM_BUG_ON(!h->resv_huge_pages);
+ h->resv_huge_pages--;
+ }
+
+ spin_unlock_irq(&hugetlb_lock);
+ return folio;
+}
+
/* folio migration callback function */
struct folio *alloc_hugetlb_folio_nodemask(struct hstate *h, int preferred_nid,
nodemask_t *nmask, gfp_t gfp_mask, bool allow_alloc_fallback)
@@ -3333,6 +3176,7 @@ static void __init hugetlb_folio_init_tail_vmemmap(struct folio *folio,
for (pfn = head_pfn + start_page_number; pfn < end_pfn; pfn++) {
struct page *page = pfn_to_page(pfn);
+ __ClearPageReserved(folio_page(folio, pfn - head_pfn));
__init_single_page(page, pfn, zone, nid);
prep_compound_tail((struct page *)folio, pfn - head_pfn);
ret = page_ref_freeze(page, 1);
@@ -3921,101 +3765,120 @@ out:
return 0;
}
-static int demote_free_hugetlb_folio(struct hstate *h, struct folio *folio)
+static long demote_free_hugetlb_folios(struct hstate *src, struct hstate *dst,
+ struct list_head *src_list)
{
- int i, nid = folio_nid(folio);
- struct hstate *target_hstate;
- struct page *subpage;
- struct folio *inner_folio;
- int rc = 0;
-
- target_hstate = size_to_hstate(PAGE_SIZE << h->demote_order);
-
- remove_hugetlb_folio(h, folio, false);
- spin_unlock_irq(&hugetlb_lock);
-
- /*
- * If vmemmap already existed for folio, the remove routine above would
- * have cleared the hugetlb folio flag. Hence the folio is technically
- * no longer a hugetlb folio. hugetlb_vmemmap_restore_folio can only be
- * passed hugetlb folios and will BUG otherwise.
- */
- if (folio_test_hugetlb(folio)) {
- rc = hugetlb_vmemmap_restore_folio(h, folio);
- if (rc) {
- /* Allocation of vmemmmap failed, we can not demote folio */
- spin_lock_irq(&hugetlb_lock);
- add_hugetlb_folio(h, folio, false);
- return rc;
- }
- }
+ long rc;
+ struct folio *folio, *next;
+ LIST_HEAD(dst_list);
+ LIST_HEAD(ret_list);
- /*
- * Use destroy_compound_hugetlb_folio_for_demote for all huge page
- * sizes as it will not ref count folios.
- */
- destroy_compound_hugetlb_folio_for_demote(folio, huge_page_order(h));
+ rc = hugetlb_vmemmap_restore_folios(src, src_list, &ret_list);
+ list_splice_init(&ret_list, src_list);
/*
* Taking target hstate mutex synchronizes with set_max_huge_pages.
* Without the mutex, pages added to target hstate could be marked
* as surplus.
*
- * Note that we already hold h->resize_lock. To prevent deadlock,
+ * Note that we already hold src->resize_lock. To prevent deadlock,
* use the convention of always taking larger size hstate mutex first.
*/
- mutex_lock(&target_hstate->resize_lock);
- for (i = 0; i < pages_per_huge_page(h);
- i += pages_per_huge_page(target_hstate)) {
- subpage = folio_page(folio, i);
- inner_folio = page_folio(subpage);
- if (hstate_is_gigantic(target_hstate))
- prep_compound_gigantic_folio_for_demote(inner_folio,
- target_hstate->order);
- else
- prep_compound_page(subpage, target_hstate->order);
- folio_change_private(inner_folio, NULL);
- prep_new_hugetlb_folio(target_hstate, inner_folio, nid);
- free_huge_folio(inner_folio);
+ mutex_lock(&dst->resize_lock);
+
+ list_for_each_entry_safe(folio, next, src_list, lru) {
+ int i;
+
+ if (folio_test_hugetlb_vmemmap_optimized(folio))
+ continue;
+
+ list_del(&folio->lru);
+
+ split_page_owner(&folio->page, huge_page_order(src), huge_page_order(dst));
+ pgalloc_tag_split(folio, huge_page_order(src), huge_page_order(dst));
+
+ for (i = 0; i < pages_per_huge_page(src); i += pages_per_huge_page(dst)) {
+ struct page *page = folio_page(folio, i);
+
+ page->mapping = NULL;
+ clear_compound_head(page);
+ prep_compound_page(page, dst->order);
+
+ init_new_hugetlb_folio(dst, page_folio(page));
+ list_add(&page->lru, &dst_list);
+ }
}
- mutex_unlock(&target_hstate->resize_lock);
- spin_lock_irq(&hugetlb_lock);
+ prep_and_add_allocated_folios(dst, &dst_list);
- /*
- * Not absolutely necessary, but for consistency update max_huge_pages
- * based on pool changes for the demoted page.
- */
- h->max_huge_pages--;
- target_hstate->max_huge_pages +=
- pages_per_huge_page(h) / pages_per_huge_page(target_hstate);
+ mutex_unlock(&dst->resize_lock);
return rc;
}
-static int demote_pool_huge_page(struct hstate *h, nodemask_t *nodes_allowed)
+static long demote_pool_huge_page(struct hstate *src, nodemask_t *nodes_allowed,
+ unsigned long nr_to_demote)
__must_hold(&hugetlb_lock)
{
int nr_nodes, node;
- struct folio *folio;
+ struct hstate *dst;
+ long rc = 0;
+ long nr_demoted = 0;
lockdep_assert_held(&hugetlb_lock);
/* We should never get here if no demote order */
- if (!h->demote_order) {
+ if (!src->demote_order) {
pr_warn("HugeTLB: NULL demote order passed to demote_pool_huge_page.\n");
return -EINVAL; /* internal error */
}
+ dst = size_to_hstate(PAGE_SIZE << src->demote_order);
- for_each_node_mask_to_free(h, nr_nodes, node, nodes_allowed) {
- list_for_each_entry(folio, &h->hugepage_freelists[node], lru) {
+ for_each_node_mask_to_free(src, nr_nodes, node, nodes_allowed) {
+ LIST_HEAD(list);
+ struct folio *folio, *next;
+
+ list_for_each_entry_safe(folio, next, &src->hugepage_freelists[node], lru) {
if (folio_test_hwpoison(folio))
continue;
- return demote_free_hugetlb_folio(h, folio);
+
+ remove_hugetlb_folio(src, folio, false);
+ list_add(&folio->lru, &list);
+
+ if (++nr_demoted == nr_to_demote)
+ break;
}
+
+ spin_unlock_irq(&hugetlb_lock);
+
+ rc = demote_free_hugetlb_folios(src, dst, &list);
+
+ spin_lock_irq(&hugetlb_lock);
+
+ list_for_each_entry_safe(folio, next, &list, lru) {
+ list_del(&folio->lru);
+ add_hugetlb_folio(src, folio, false);
+
+ nr_demoted--;
+ }
+
+ if (rc < 0 || nr_demoted == nr_to_demote)
+ break;
}
/*
+ * Not absolutely necessary, but for consistency update max_huge_pages
+ * based on pool changes for the demoted page.
+ */
+ src->max_huge_pages -= nr_demoted;
+ dst->max_huge_pages += nr_demoted << (huge_page_order(src) - huge_page_order(dst));
+
+ if (rc < 0)
+ return rc;
+
+ if (nr_demoted)
+ return nr_demoted;
+ /*
* Only way to get here is if all pages on free lists are poisoned.
* Return -EBUSY so that caller will not retry.
*/
@@ -4249,6 +4112,8 @@ static ssize_t demote_store(struct kobject *kobj,
spin_lock_irq(&hugetlb_lock);
while (nr_demote) {
+ long rc;
+
/*
* Check for available pages to demote each time thorough the
* loop as demote_pool_huge_page will drop hugetlb_lock.
@@ -4261,11 +4126,13 @@ static ssize_t demote_store(struct kobject *kobj,
if (!nr_available)
break;
- err = demote_pool_huge_page(h, n_mask);
- if (err)
+ rc = demote_pool_huge_page(h, n_mask, nr_demote);
+ if (rc < 0) {
+ err = rc;
break;
+ }
- nr_demote--;
+ nr_demote -= rc;
}
spin_unlock_irq(&hugetlb_lock);
@@ -6048,7 +5915,7 @@ retry_avoidcopy:
* When the original hugepage is shared one, it does not have
* anon_vma prepared.
*/
- ret = vmf_anon_prepare(vmf);
+ ret = __vmf_anon_prepare(vmf);
if (unlikely(ret))
goto out_release_all;
@@ -6247,7 +6114,7 @@ static vm_fault_t hugetlb_no_page(struct address_space *mapping,
}
if (!(vma->vm_flags & VM_MAYSHARE)) {
- ret = vmf_anon_prepare(vmf);
+ ret = __vmf_anon_prepare(vmf);
if (unlikely(ret))
goto out;
}
@@ -6378,6 +6245,14 @@ static vm_fault_t hugetlb_no_page(struct address_space *mapping,
folio_unlock(folio);
out:
hugetlb_vma_unlock_read(vma);
+
+ /*
+ * We must check to release the per-VMA lock. __vmf_anon_prepare() is
+ * the only way ret can be set to VM_FAULT_RETRY.
+ */
+ if (unlikely(ret & VM_FAULT_RETRY))
+ vma_end_read(vma);
+
mutex_unlock(&hugetlb_fault_mutex_table[hash]);
return ret;
@@ -6599,6 +6474,14 @@ out_ptl:
}
out_mutex:
hugetlb_vma_unlock_read(vma);
+
+ /*
+ * We must check to release the per-VMA lock. __vmf_anon_prepare() in
+ * hugetlb_wp() is the only way ret can be set to VM_FAULT_RETRY.
+ */
+ if (unlikely(ret & VM_FAULT_RETRY))
+ vma_end_read(vma);
+
mutex_unlock(&hugetlb_fault_mutex_table[hash]);
/*
* Generally it's safe to hold refcount during waiting page lock. But
@@ -7211,7 +7094,7 @@ long hugetlb_unreserve_pages(struct inode *inode, long start, long end,
return 0;
}
-#ifdef CONFIG_ARCH_WANT_HUGE_PMD_SHARE
+#ifdef CONFIG_HUGETLB_PMD_PAGE_TABLE_SHARING
static unsigned long page_table_shareable(struct vm_area_struct *svma,
struct vm_area_struct *vma,
unsigned long addr, pgoff_t idx)
@@ -7373,7 +7256,7 @@ int huge_pmd_unshare(struct mm_struct *mm, struct vm_area_struct *vma,
return 1;
}
-#else /* !CONFIG_ARCH_WANT_HUGE_PMD_SHARE */
+#else /* !CONFIG_HUGETLB_PMD_PAGE_TABLE_SHARING */
pte_t *huge_pmd_share(struct mm_struct *mm, struct vm_area_struct *vma,
unsigned long addr, pud_t *pud)
@@ -7396,7 +7279,7 @@ bool want_pmd_share(struct vm_area_struct *vma, unsigned long addr)
{
return false;
}
-#endif /* CONFIG_ARCH_WANT_HUGE_PMD_SHARE */
+#endif /* CONFIG_HUGETLB_PMD_PAGE_TABLE_SHARING */
#ifdef CONFIG_ARCH_WANT_GENERAL_HUGETLB
pte_t *huge_pte_alloc(struct mm_struct *mm, struct vm_area_struct *vma,
@@ -7494,7 +7377,7 @@ unsigned long hugetlb_mask_last_page(struct hstate *h)
/* See description above. Architectures can provide their own version. */
__weak unsigned long hugetlb_mask_last_page(struct hstate *h)
{
-#ifdef CONFIG_ARCH_WANT_HUGE_PMD_SHARE
+#ifdef CONFIG_HUGETLB_PMD_PAGE_TABLE_SHARING
if (huge_page_size(h) == PMD_SIZE)
return PUD_SIZE - PMD_SIZE;
#endif
@@ -7503,10 +7386,6 @@ __weak unsigned long hugetlb_mask_last_page(struct hstate *h)
#endif /* CONFIG_ARCH_WANT_GENERAL_HUGETLB */
-/*
- * These functions are overwritable if your architecture needs its own
- * behavior.
- */
bool isolate_hugetlb(struct folio *folio, struct list_head *list)
{
bool ret = true;
diff --git a/mm/hugetlb_cgroup.c b/mm/hugetlb_cgroup.c
index 4ff238ba1250..e716c4671a15 100644
--- a/mm/hugetlb_cgroup.c
+++ b/mm/hugetlb_cgroup.c
@@ -114,10 +114,10 @@ static void hugetlb_cgroup_init(struct hugetlb_cgroup *h_cgroup,
}
page_counter_init(hugetlb_cgroup_counter_from_cgroup(h_cgroup,
idx),
- fault_parent);
+ fault_parent, false);
page_counter_init(
hugetlb_cgroup_counter_from_cgroup_rsvd(h_cgroup, idx),
- rsvd_parent);
+ rsvd_parent, false);
limit = round_down(PAGE_COUNTER_MAX,
pages_per_huge_page(&hstates[idx]));
diff --git a/mm/hugetlb_vmemmap.c b/mm/hugetlb_vmemmap.c
index 0c3f56b3578e..57b7f591eee8 100644
--- a/mm/hugetlb_vmemmap.c
+++ b/mm/hugetlb_vmemmap.c
@@ -43,6 +43,8 @@ struct vmemmap_remap_walk {
#define VMEMMAP_SPLIT_NO_TLB_FLUSH BIT(0)
/* Skip the TLB flush when we remap the PTE */
#define VMEMMAP_REMAP_NO_TLB_FLUSH BIT(1)
+/* synchronize_rcu() to avoid writes from page_ref_add_unless() */
+#define VMEMMAP_SYNCHRONIZE_RCU BIT(2)
unsigned long flags;
};
@@ -457,6 +459,9 @@ static int __hugetlb_vmemmap_restore_folio(const struct hstate *h,
if (!folio_test_hugetlb_vmemmap_optimized(folio))
return 0;
+ if (flags & VMEMMAP_SYNCHRONIZE_RCU)
+ synchronize_rcu();
+
vmemmap_end = vmemmap_start + hugetlb_vmemmap_size(h);
vmemmap_reuse = vmemmap_start;
vmemmap_start += HUGETLB_VMEMMAP_RESERVE_SIZE;
@@ -489,10 +494,7 @@ static int __hugetlb_vmemmap_restore_folio(const struct hstate *h,
*/
int hugetlb_vmemmap_restore_folio(const struct hstate *h, struct folio *folio)
{
- /* avoid writes from page_ref_add_unless() while unfolding vmemmap */
- synchronize_rcu();
-
- return __hugetlb_vmemmap_restore_folio(h, folio, 0);
+ return __hugetlb_vmemmap_restore_folio(h, folio, VMEMMAP_SYNCHRONIZE_RCU);
}
/**
@@ -515,14 +517,14 @@ long hugetlb_vmemmap_restore_folios(const struct hstate *h,
struct folio *folio, *t_folio;
long restored = 0;
long ret = 0;
-
- /* avoid writes from page_ref_add_unless() while unfolding vmemmap */
- synchronize_rcu();
+ unsigned long flags = VMEMMAP_REMAP_NO_TLB_FLUSH | VMEMMAP_SYNCHRONIZE_RCU;
list_for_each_entry_safe(folio, t_folio, folio_list, lru) {
if (folio_test_hugetlb_vmemmap_optimized(folio)) {
- ret = __hugetlb_vmemmap_restore_folio(h, folio,
- VMEMMAP_REMAP_NO_TLB_FLUSH);
+ ret = __hugetlb_vmemmap_restore_folio(h, folio, flags);
+ /* only need to synchronize_rcu() once for each batch */
+ flags &= ~VMEMMAP_SYNCHRONIZE_RCU;
+
if (ret)
break;
restored++;
@@ -570,6 +572,9 @@ static int __hugetlb_vmemmap_optimize_folio(const struct hstate *h,
return ret;
static_branch_inc(&hugetlb_optimize_vmemmap_key);
+
+ if (flags & VMEMMAP_SYNCHRONIZE_RCU)
+ synchronize_rcu();
/*
* Very Subtle
* If VMEMMAP_REMAP_NO_TLB_FLUSH is set, TLB flushing is not performed
@@ -617,10 +622,7 @@ void hugetlb_vmemmap_optimize_folio(const struct hstate *h, struct folio *folio)
{
LIST_HEAD(vmemmap_pages);
- /* avoid writes from page_ref_add_unless() while folding vmemmap */
- synchronize_rcu();
-
- __hugetlb_vmemmap_optimize_folio(h, folio, &vmemmap_pages, 0);
+ __hugetlb_vmemmap_optimize_folio(h, folio, &vmemmap_pages, VMEMMAP_SYNCHRONIZE_RCU);
free_vmemmap_page_list(&vmemmap_pages);
}
@@ -647,6 +649,7 @@ void hugetlb_vmemmap_optimize_folios(struct hstate *h, struct list_head *folio_l
{
struct folio *folio;
LIST_HEAD(vmemmap_pages);
+ unsigned long flags = VMEMMAP_REMAP_NO_TLB_FLUSH | VMEMMAP_SYNCHRONIZE_RCU;
list_for_each_entry(folio, folio_list, lru) {
int ret = hugetlb_vmemmap_split_folio(h, folio);
@@ -663,14 +666,12 @@ void hugetlb_vmemmap_optimize_folios(struct hstate *h, struct list_head *folio_l
flush_tlb_all();
- /* avoid writes from page_ref_add_unless() while folding vmemmap */
- synchronize_rcu();
-
list_for_each_entry(folio, folio_list, lru) {
int ret;
- ret = __hugetlb_vmemmap_optimize_folio(h, folio, &vmemmap_pages,
- VMEMMAP_REMAP_NO_TLB_FLUSH);
+ ret = __hugetlb_vmemmap_optimize_folio(h, folio, &vmemmap_pages, flags);
+ /* only need to synchronize_rcu() once for each batch */
+ flags &= ~VMEMMAP_SYNCHRONIZE_RCU;
/*
* Pages to be freed may have been accumulated. If we
@@ -684,8 +685,7 @@ void hugetlb_vmemmap_optimize_folios(struct hstate *h, struct list_head *folio_l
flush_tlb_all();
free_vmemmap_page_list(&vmemmap_pages);
INIT_LIST_HEAD(&vmemmap_pages);
- __hugetlb_vmemmap_optimize_folio(h, folio, &vmemmap_pages,
- VMEMMAP_REMAP_NO_TLB_FLUSH);
+ __hugetlb_vmemmap_optimize_folio(h, folio, &vmemmap_pages, flags);
}
}
diff --git a/mm/internal.h b/mm/internal.h
index b4d86436565b..93083bbeeefa 100644
--- a/mm/internal.h
+++ b/mm/internal.h
@@ -8,13 +8,19 @@
#define __MM_INTERNAL_H
#include <linux/fs.h>
+#include <linux/khugepaged.h>
#include <linux/mm.h>
+#include <linux/mm_inline.h>
#include <linux/pagemap.h>
#include <linux/rmap.h>
#include <linux/swap.h>
#include <linux/swapops.h>
+#include <linux/swap_cgroup.h>
#include <linux/tracepoint-defs.h>
+/* Internal core VMA manipulation functions. */
+#include "vma.h"
+
struct folio_batch;
/*
@@ -270,18 +276,22 @@ static inline int swap_pte_batch(pte_t *start_ptep, int max_nr, pte_t pte)
{
pte_t expected_pte = pte_next_swp_offset(pte);
const pte_t *end_ptep = start_ptep + max_nr;
+ swp_entry_t entry = pte_to_swp_entry(pte);
pte_t *ptep = start_ptep + 1;
+ unsigned short cgroup_id;
VM_WARN_ON(max_nr < 1);
VM_WARN_ON(!is_swap_pte(pte));
- VM_WARN_ON(non_swap_entry(pte_to_swp_entry(pte)));
+ VM_WARN_ON(non_swap_entry(entry));
+ cgroup_id = lookup_swap_cgroup_id(entry);
while (ptep < end_ptep) {
pte = ptep_get(ptep);
if (!pte_same(pte, expected_pte))
break;
-
+ if (lookup_swap_cgroup_id(pte_to_swp_entry(pte)) != cgroup_id)
+ break;
expected_pte = pte_next_swp_offset(expected_pte);
ptep++;
}
@@ -310,7 +320,16 @@ static inline void wake_throttle_isolated(pg_data_t *pgdat)
wake_up(wqh);
}
-vm_fault_t vmf_anon_prepare(struct vm_fault *vmf);
+vm_fault_t __vmf_anon_prepare(struct vm_fault *vmf);
+static inline vm_fault_t vmf_anon_prepare(struct vm_fault *vmf)
+{
+ vm_fault_t ret = __vmf_anon_prepare(vmf);
+
+ if (unlikely(ret & VM_FAULT_RETRY))
+ vma_end_read(vmf->vma);
+ return ret;
+}
+
vm_fault_t do_swap_page(struct vm_fault *vmf);
void folio_rotate_reclaimable(struct folio *folio);
bool __folio_end_writeback(struct folio *folio);
@@ -406,9 +425,7 @@ extern unsigned long highest_memmap_pfn;
/*
* in mm/vmscan.c:
*/
-bool isolate_lru_page(struct page *page);
bool folio_isolate_lru(struct folio *folio);
-void putback_lru_page(struct page *page);
void folio_putback_lru(struct folio *folio);
extern void reclaim_throttle(pg_data_t *pgdat, enum vmscan_throttle_state reason);
@@ -778,37 +795,6 @@ static inline bool free_area_empty(struct free_area *area, int migratetype)
return list_empty(&area->free_list[migratetype]);
}
-/*
- * These three helpers classifies VMAs for virtual memory accounting.
- */
-
-/*
- * Executable code area - executable, not writable, not stack
- */
-static inline bool is_exec_mapping(vm_flags_t flags)
-{
- return (flags & (VM_EXEC | VM_WRITE | VM_STACK)) == VM_EXEC;
-}
-
-/*
- * Stack area (including shadow stacks)
- *
- * VM_GROWSUP / VM_GROWSDOWN VMAs are always private anonymous:
- * do_mmap() forbids all other combinations.
- */
-static inline bool is_stack_mapping(vm_flags_t flags)
-{
- return ((flags & VM_STACK) == VM_STACK) || (flags & VM_SHADOW_STACK);
-}
-
-/*
- * Data area - private, writable, not stack
- */
-static inline bool is_data_mapping(vm_flags_t flags)
-{
- return (flags & (VM_WRITE | VM_SHARED | VM_STACK)) == VM_WRITE;
-}
-
/* mm/util.c */
struct anon_vma *folio_anon_vma(struct folio *folio);
@@ -1069,6 +1055,8 @@ static inline int find_next_best_node(int node, nodemask_t *used_node_mask)
/*
* mm/memory-failure.c
*/
+#ifdef CONFIG_MEMORY_FAILURE
+void unmap_poisoned_folio(struct folio *folio, enum ttu_flags ttu);
void shake_folio(struct folio *folio);
extern int hwpoison_filter(struct page *p);
@@ -1089,6 +1077,12 @@ void add_to_kill_ksm(struct task_struct *tsk, struct page *p,
unsigned long ksm_addr);
unsigned long page_mapped_in_vma(struct page *page, struct vm_area_struct *vma);
+#else
+static inline void unmap_poisoned_folio(struct folio *folio, enum ttu_flags ttu)
+{
+}
+#endif
+
extern unsigned long __must_check vm_mmap_pgoff(struct file *, unsigned long,
unsigned long, unsigned long,
unsigned long, unsigned long);
@@ -1165,7 +1159,6 @@ static inline void flush_tlb_batched_pending(struct mm_struct *mm)
#endif /* CONFIG_ARCH_WANT_BATCHED_UNMAP_TLB_FLUSH */
extern const struct trace_print_flags pageflag_names[];
-extern const struct trace_print_flags pagetype_names[];
extern const struct trace_print_flags vmaflag_names[];
extern const struct trace_print_flags gfpflag_names[];
@@ -1217,11 +1210,12 @@ void vunmap_range_noflush(unsigned long start, unsigned long end);
void __vunmap_range_noflush(unsigned long start, unsigned long end);
-int numa_migrate_prep(struct folio *folio, struct vm_fault *vmf,
- unsigned long addr, int page_nid, int *flags);
+int numa_migrate_check(struct folio *folio, struct vm_fault *vmf,
+ unsigned long addr, int *flags, bool writable,
+ int *last_cpupid);
void free_zone_device_folio(struct folio *folio);
-int migrate_device_coherent_page(struct page *page);
+int migrate_device_coherent_folio(struct folio *folio);
/*
* mm/gup.c
@@ -1237,13 +1231,6 @@ void touch_pud(struct vm_area_struct *vma, unsigned long addr,
void touch_pmd(struct vm_area_struct *vma, unsigned long addr,
pmd_t *pmd, bool write);
-/*
- * mm/mmap.c
- */
-struct vm_area_struct *vma_merge_extend(struct vma_iterator *vmi,
- struct vm_area_struct *vma,
- unsigned long delta);
-
enum {
/* mark page accessed */
FOLL_TOUCH = 1 << 16,
@@ -1370,117 +1357,6 @@ static inline bool pte_needs_soft_dirty_wp(struct vm_area_struct *vma, pte_t pte
return vma_soft_dirty_enabled(vma) && !pte_soft_dirty(pte);
}
-static inline void vma_iter_config(struct vma_iterator *vmi,
- unsigned long index, unsigned long last)
-{
- __mas_set_range(&vmi->mas, index, last - 1);
-}
-
-static inline void vma_iter_reset(struct vma_iterator *vmi)
-{
- mas_reset(&vmi->mas);
-}
-
-static inline
-struct vm_area_struct *vma_iter_prev_range_limit(struct vma_iterator *vmi, unsigned long min)
-{
- return mas_prev_range(&vmi->mas, min);
-}
-
-static inline
-struct vm_area_struct *vma_iter_next_range_limit(struct vma_iterator *vmi, unsigned long max)
-{
- return mas_next_range(&vmi->mas, max);
-}
-
-static inline int vma_iter_area_lowest(struct vma_iterator *vmi, unsigned long min,
- unsigned long max, unsigned long size)
-{
- return mas_empty_area(&vmi->mas, min, max - 1, size);
-}
-
-static inline int vma_iter_area_highest(struct vma_iterator *vmi, unsigned long min,
- unsigned long max, unsigned long size)
-{
- return mas_empty_area_rev(&vmi->mas, min, max - 1, size);
-}
-
-/*
- * VMA Iterator functions shared between nommu and mmap
- */
-static inline int vma_iter_prealloc(struct vma_iterator *vmi,
- struct vm_area_struct *vma)
-{
- return mas_preallocate(&vmi->mas, vma, GFP_KERNEL);
-}
-
-static inline void vma_iter_clear(struct vma_iterator *vmi)
-{
- mas_store_prealloc(&vmi->mas, NULL);
-}
-
-static inline struct vm_area_struct *vma_iter_load(struct vma_iterator *vmi)
-{
- return mas_walk(&vmi->mas);
-}
-
-/* Store a VMA with preallocated memory */
-static inline void vma_iter_store(struct vma_iterator *vmi,
- struct vm_area_struct *vma)
-{
-
-#if defined(CONFIG_DEBUG_VM_MAPLE_TREE)
- if (MAS_WARN_ON(&vmi->mas, vmi->mas.status != ma_start &&
- vmi->mas.index > vma->vm_start)) {
- pr_warn("%lx > %lx\n store vma %lx-%lx\n into slot %lx-%lx\n",
- vmi->mas.index, vma->vm_start, vma->vm_start,
- vma->vm_end, vmi->mas.index, vmi->mas.last);
- }
- if (MAS_WARN_ON(&vmi->mas, vmi->mas.status != ma_start &&
- vmi->mas.last < vma->vm_start)) {
- pr_warn("%lx < %lx\nstore vma %lx-%lx\ninto slot %lx-%lx\n",
- vmi->mas.last, vma->vm_start, vma->vm_start, vma->vm_end,
- vmi->mas.index, vmi->mas.last);
- }
-#endif
-
- if (vmi->mas.status != ma_start &&
- ((vmi->mas.index > vma->vm_start) || (vmi->mas.last < vma->vm_start)))
- vma_iter_invalidate(vmi);
-
- __mas_set_range(&vmi->mas, vma->vm_start, vma->vm_end - 1);
- mas_store_prealloc(&vmi->mas, vma);
-}
-
-static inline int vma_iter_store_gfp(struct vma_iterator *vmi,
- struct vm_area_struct *vma, gfp_t gfp)
-{
- if (vmi->mas.status != ma_start &&
- ((vmi->mas.index > vma->vm_start) || (vmi->mas.last < vma->vm_start)))
- vma_iter_invalidate(vmi);
-
- __mas_set_range(&vmi->mas, vma->vm_start, vma->vm_end - 1);
- mas_store_gfp(&vmi->mas, vma, gfp);
- if (unlikely(mas_is_err(&vmi->mas)))
- return -ENOMEM;
-
- return 0;
-}
-
-/*
- * VMA lock generalization
- */
-struct vma_prepare {
- struct vm_area_struct *vma;
- struct vm_area_struct *adj_next;
- struct file *file;
- struct address_space *mapping;
- struct anon_vma *anon_vma;
- struct vm_area_struct *insert;
- struct vm_area_struct *remove;
- struct vm_area_struct *remove2;
-};
-
void __meminit __init_single_page(struct page *page, unsigned long pfn,
unsigned long zone, int nid);
@@ -1497,27 +1373,11 @@ static inline int can_do_mseal(unsigned long flags)
return 0;
}
-bool can_modify_mm(struct mm_struct *mm, unsigned long start,
- unsigned long end);
-bool can_modify_mm_madv(struct mm_struct *mm, unsigned long start,
- unsigned long end, int behavior);
#else
static inline int can_do_mseal(unsigned long flags)
{
return -EPERM;
}
-
-static inline bool can_modify_mm(struct mm_struct *mm, unsigned long start,
- unsigned long end)
-{
- return true;
-}
-
-static inline bool can_modify_mm_madv(struct mm_struct *mm, unsigned long start,
- unsigned long end, int behavior)
-{
- return true;
-}
#endif
#ifdef CONFIG_SHRINKER_DEBUG
@@ -1569,13 +1429,18 @@ static inline void shrinker_debugfs_remove(struct dentry *debugfs_entry,
void workingset_update_node(struct xa_node *node);
extern struct list_lru shadow_nodes;
-struct unlink_vma_file_batch {
- int count;
- struct vm_area_struct *vmas[8];
-};
+/* mremap.c */
+unsigned long move_page_tables(struct vm_area_struct *vma,
+ unsigned long old_addr, struct vm_area_struct *new_vma,
+ unsigned long new_addr, unsigned long len,
+ bool need_rmap_locks, bool for_stack);
-void unlink_file_vma_batch_init(struct unlink_vma_file_batch *);
-void unlink_file_vma_batch_add(struct unlink_vma_file_batch *, struct vm_area_struct *);
-void unlink_file_vma_batch_final(struct unlink_vma_file_batch *);
+#ifdef CONFIG_UNACCEPTED_MEMORY
+void accept_page(struct page *page);
+#else /* CONFIG_UNACCEPTED_MEMORY */
+static inline void accept_page(struct page *page)
+{
+}
+#endif /* CONFIG_UNACCEPTED_MEMORY */
#endif /* __MM_INTERNAL_H */
diff --git a/mm/kasan/Makefile b/mm/kasan/Makefile
index 7634dd2a6128..b88543e5c0cc 100644
--- a/mm/kasan/Makefile
+++ b/mm/kasan/Makefile
@@ -44,7 +44,8 @@ ifndef CONFIG_CC_HAS_KASAN_MEMINTRINSIC_PREFIX
CFLAGS_KASAN_TEST += -fno-builtin
endif
-CFLAGS_kasan_test.o := $(CFLAGS_KASAN_TEST)
+CFLAGS_kasan_test_c.o := $(CFLAGS_KASAN_TEST)
+RUSTFLAGS_kasan_test_rust.o := $(RUSTFLAGS_KASAN)
CFLAGS_kasan_test_module.o := $(CFLAGS_KASAN_TEST)
obj-y := common.o report.o
@@ -52,5 +53,10 @@ obj-$(CONFIG_KASAN_GENERIC) += init.o generic.o report_generic.o shadow.o quaran
obj-$(CONFIG_KASAN_HW_TAGS) += hw_tags.o report_hw_tags.o tags.o report_tags.o
obj-$(CONFIG_KASAN_SW_TAGS) += init.o report_sw_tags.o shadow.o sw_tags.o tags.o report_tags.o
+kasan_test-objs := kasan_test_c.o
+ifdef CONFIG_RUST
+ kasan_test-objs += kasan_test_rust.o
+endif
+
obj-$(CONFIG_KASAN_KUNIT_TEST) += kasan_test.o
obj-$(CONFIG_KASAN_MODULE_TEST) += kasan_test_module.o
diff --git a/mm/kasan/kasan.h b/mm/kasan/kasan.h
index fb2b9ac0659a..f438a6cdc964 100644
--- a/mm/kasan/kasan.h
+++ b/mm/kasan/kasan.h
@@ -555,6 +555,12 @@ static inline bool kasan_arch_is_ready(void) { return true; }
void kasan_kunit_test_suite_start(void);
void kasan_kunit_test_suite_end(void);
+#ifdef CONFIG_RUST
+char kasan_test_rust_uaf(void);
+#else
+static inline char kasan_test_rust_uaf(void) { return '\0'; }
+#endif
+
#else /* CONFIG_KASAN_KUNIT_TEST */
static inline void kasan_kunit_test_suite_start(void) { }
diff --git a/mm/kasan/kasan_test.c b/mm/kasan/kasan_test_c.c
index 567d33b493e2..a181e4780d9d 100644
--- a/mm/kasan/kasan_test.c
+++ b/mm/kasan/kasan_test_c.c
@@ -1944,6 +1944,16 @@ static void match_all_mem_tag(struct kunit *test)
kfree(ptr);
}
+/*
+ * Check that Rust performing a use-after-free using `unsafe` is detected.
+ * This is a smoke test to make sure that Rust is being sanitized properly.
+ */
+static void rust_uaf(struct kunit *test)
+{
+ KASAN_TEST_NEEDS_CONFIG_ON(test, CONFIG_RUST);
+ KUNIT_EXPECT_KASAN_FAIL(test, kasan_test_rust_uaf());
+}
+
static struct kunit_case kasan_kunit_test_cases[] = {
KUNIT_CASE(kmalloc_oob_right),
KUNIT_CASE(kmalloc_oob_left),
@@ -2017,6 +2027,7 @@ static struct kunit_case kasan_kunit_test_cases[] = {
KUNIT_CASE(match_all_not_assigned),
KUNIT_CASE(match_all_ptr_tag),
KUNIT_CASE(match_all_mem_tag),
+ KUNIT_CASE(rust_uaf),
{}
};
diff --git a/mm/kasan/kasan_test_rust.rs b/mm/kasan/kasan_test_rust.rs
new file mode 100644
index 000000000000..caa7175964ef
--- /dev/null
+++ b/mm/kasan/kasan_test_rust.rs
@@ -0,0 +1,21 @@
+// SPDX-License-Identifier: GPL-2.0
+
+//! Helper crate for KASAN testing.
+//!
+//! Provides behavior to check the sanitization of Rust code.
+
+use core::ptr::addr_of_mut;
+use kernel::prelude::*;
+
+/// Trivial UAF - allocate a big vector, grab a pointer partway through,
+/// drop the vector, and touch it.
+#[no_mangle]
+pub extern "C" fn kasan_test_rust_uaf() -> u8 {
+ let mut v: Vec<u8> = Vec::new();
+ for _ in 0..4096 {
+ v.push(0x42, GFP_KERNEL).unwrap();
+ }
+ let ptr: *mut u8 = addr_of_mut!(v[2048]);
+ drop(v);
+ unsafe { *ptr }
+}
diff --git a/mm/kfence/core.c b/mm/kfence/core.c
index c5cb54fc696d..67fc321db79b 100644
--- a/mm/kfence/core.c
+++ b/mm/kfence/core.c
@@ -99,6 +99,10 @@ module_param_cb(sample_interval, &sample_interval_param_ops, &kfence_sample_inte
static unsigned long kfence_skip_covered_thresh __read_mostly = 75;
module_param_named(skip_covered_thresh, kfence_skip_covered_thresh, ulong, 0644);
+/* Allocation burst count: number of excess KFENCE allocations per sample. */
+static unsigned int kfence_burst __read_mostly;
+module_param_named(burst, kfence_burst, uint, 0644);
+
/* If true, use a deferrable timer. */
static bool kfence_deferrable __read_mostly = IS_ENABLED(CONFIG_KFENCE_DEFERRABLE);
module_param_named(deferrable, kfence_deferrable, bool, 0444);
@@ -269,6 +273,13 @@ static inline unsigned long metadata_to_pageaddr(const struct kfence_metadata *m
return pageaddr;
}
+static inline bool kfence_obj_allocated(const struct kfence_metadata *meta)
+{
+ enum kfence_object_state state = READ_ONCE(meta->state);
+
+ return state == KFENCE_OBJECT_ALLOCATED || state == KFENCE_OBJECT_RCU_FREEING;
+}
+
/*
* Update the object's metadata state, including updating the alloc/free stacks
* depending on the state transition.
@@ -278,10 +289,14 @@ metadata_update_state(struct kfence_metadata *meta, enum kfence_object_state nex
unsigned long *stack_entries, size_t num_stack_entries)
{
struct kfence_track *track =
- next == KFENCE_OBJECT_FREED ? &meta->free_track : &meta->alloc_track;
+ next == KFENCE_OBJECT_ALLOCATED ? &meta->alloc_track : &meta->free_track;
lockdep_assert_held(&meta->lock);
+ /* Stack has been saved when calling rcu, skip. */
+ if (READ_ONCE(meta->state) == KFENCE_OBJECT_RCU_FREEING)
+ goto out;
+
if (stack_entries) {
memcpy(track->stack_entries, stack_entries,
num_stack_entries * sizeof(stack_entries[0]));
@@ -297,6 +312,7 @@ metadata_update_state(struct kfence_metadata *meta, enum kfence_object_state nex
track->cpu = raw_smp_processor_id();
track->ts_nsec = local_clock(); /* Same source as printk timestamps. */
+out:
/*
* Pairs with READ_ONCE() in
* kfence_shutdown_cache(),
@@ -502,7 +518,7 @@ static void kfence_guarded_free(void *addr, struct kfence_metadata *meta, bool z
raw_spin_lock_irqsave(&meta->lock, flags);
- if (meta->state != KFENCE_OBJECT_ALLOCATED || meta->addr != (unsigned long)addr) {
+ if (!kfence_obj_allocated(meta) || meta->addr != (unsigned long)addr) {
/* Invalid or double-free, bail out. */
atomic_long_inc(&counters[KFENCE_COUNTER_BUGS]);
kfence_report_error((unsigned long)addr, false, NULL, meta,
@@ -780,7 +796,7 @@ static void kfence_check_all_canary(void)
for (i = 0; i < CONFIG_KFENCE_NUM_OBJECTS; i++) {
struct kfence_metadata *meta = &kfence_metadata[i];
- if (meta->state == KFENCE_OBJECT_ALLOCATED)
+ if (kfence_obj_allocated(meta))
check_canary(meta);
}
}
@@ -827,12 +843,12 @@ static void toggle_allocation_gate(struct work_struct *work)
if (!READ_ONCE(kfence_enabled))
return;
- atomic_set(&kfence_allocation_gate, 0);
+ atomic_set(&kfence_allocation_gate, -kfence_burst);
#ifdef CONFIG_KFENCE_STATIC_KEYS
/* Enable static key, and await allocation to happen. */
static_branch_enable(&kfence_allocation_key);
- wait_event_idle(allocation_wait, atomic_read(&kfence_allocation_gate));
+ wait_event_idle(allocation_wait, atomic_read(&kfence_allocation_gate) > 0);
/* Disable static key and reset timer. */
static_branch_disable(&kfence_allocation_key);
@@ -1006,12 +1022,11 @@ void kfence_shutdown_cache(struct kmem_cache *s)
* the lock will not help, as different critical section
* serialization will have the same outcome.
*/
- if (READ_ONCE(meta->cache) != s ||
- READ_ONCE(meta->state) != KFENCE_OBJECT_ALLOCATED)
+ if (READ_ONCE(meta->cache) != s || !kfence_obj_allocated(meta))
continue;
raw_spin_lock_irqsave(&meta->lock, flags);
- in_use = meta->cache == s && meta->state == KFENCE_OBJECT_ALLOCATED;
+ in_use = meta->cache == s && kfence_obj_allocated(meta);
raw_spin_unlock_irqrestore(&meta->lock, flags);
if (in_use) {
@@ -1052,6 +1067,7 @@ void *__kfence_alloc(struct kmem_cache *s, size_t size, gfp_t flags)
unsigned long stack_entries[KFENCE_STACK_DEPTH];
size_t num_stack_entries;
u32 alloc_stack_hash;
+ int allocation_gate;
/*
* Perform size check before switching kfence_allocation_gate, so that
@@ -1080,14 +1096,15 @@ void *__kfence_alloc(struct kmem_cache *s, size_t size, gfp_t flags)
if (s->flags & SLAB_SKIP_KFENCE)
return NULL;
- if (atomic_inc_return(&kfence_allocation_gate) > 1)
+ allocation_gate = atomic_inc_return(&kfence_allocation_gate);
+ if (allocation_gate > 1)
return NULL;
#ifdef CONFIG_KFENCE_STATIC_KEYS
/*
* waitqueue_active() is fully ordered after the update of
* kfence_allocation_gate per atomic_inc_return().
*/
- if (waitqueue_active(&allocation_wait)) {
+ if (allocation_gate == 1 && waitqueue_active(&allocation_wait)) {
/*
* Calling wake_up() here may deadlock when allocations happen
* from within timer code. Use an irq_work to defer it.
@@ -1154,11 +1171,19 @@ void __kfence_free(void *addr)
* the object, as the object page may be recycled for other-typed
* objects once it has been freed. meta->cache may be NULL if the cache
* was destroyed.
+ * Save the stack trace here so that reports show where the user freed
+ * the object.
*/
- if (unlikely(meta->cache && (meta->cache->flags & SLAB_TYPESAFE_BY_RCU)))
+ if (unlikely(meta->cache && (meta->cache->flags & SLAB_TYPESAFE_BY_RCU))) {
+ unsigned long flags;
+
+ raw_spin_lock_irqsave(&meta->lock, flags);
+ metadata_update_state(meta, KFENCE_OBJECT_RCU_FREEING, NULL, 0);
+ raw_spin_unlock_irqrestore(&meta->lock, flags);
call_rcu(&meta->rcu_head, rcu_guarded_free);
- else
+ } else {
kfence_guarded_free(addr, meta, false);
+ }
}
bool kfence_handle_page_fault(unsigned long addr, bool is_write, struct pt_regs *regs)
@@ -1182,14 +1207,14 @@ bool kfence_handle_page_fault(unsigned long addr, bool is_write, struct pt_regs
int distance = 0;
meta = addr_to_metadata(addr - PAGE_SIZE);
- if (meta && READ_ONCE(meta->state) == KFENCE_OBJECT_ALLOCATED) {
+ if (meta && kfence_obj_allocated(meta)) {
to_report = meta;
/* Data race ok; distance calculation approximate. */
distance = addr - data_race(meta->addr + meta->size);
}
meta = addr_to_metadata(addr + PAGE_SIZE);
- if (meta && READ_ONCE(meta->state) == KFENCE_OBJECT_ALLOCATED) {
+ if (meta && kfence_obj_allocated(meta)) {
/* Data race ok; distance calculation approximate. */
if (!to_report || distance > data_race(meta->addr) - addr)
to_report = meta;
diff --git a/mm/kfence/kfence.h b/mm/kfence/kfence.h
index db87a05047bd..dfba5ea06b01 100644
--- a/mm/kfence/kfence.h
+++ b/mm/kfence/kfence.h
@@ -38,6 +38,7 @@
enum kfence_object_state {
KFENCE_OBJECT_UNUSED, /* Object is unused. */
KFENCE_OBJECT_ALLOCATED, /* Object is currently allocated. */
+ KFENCE_OBJECT_RCU_FREEING, /* Object was allocated, and then being freed by rcu. */
KFENCE_OBJECT_FREED, /* Object was allocated, and then freed. */
};
diff --git a/mm/kfence/report.c b/mm/kfence/report.c
index c509aed326ce..6370c5207d1a 100644
--- a/mm/kfence/report.c
+++ b/mm/kfence/report.c
@@ -16,6 +16,7 @@
#include <linux/sprintf.h>
#include <linux/stacktrace.h>
#include <linux/string.h>
+#include <linux/sched/clock.h>
#include <trace/events/error_report.h>
#include <asm/kfence.h>
@@ -108,11 +109,15 @@ static void kfence_print_stack(struct seq_file *seq, const struct kfence_metadat
const struct kfence_track *track = show_alloc ? &meta->alloc_track : &meta->free_track;
u64 ts_sec = track->ts_nsec;
unsigned long rem_nsec = do_div(ts_sec, NSEC_PER_SEC);
+ u64 interval_nsec = local_clock() - track->ts_nsec;
+ unsigned long rem_interval_nsec = do_div(interval_nsec, NSEC_PER_SEC);
/* Timestamp matches printk timestamp format. */
- seq_con_printf(seq, "%s by task %d on cpu %d at %lu.%06lus:\n",
- show_alloc ? "allocated" : "freed", track->pid,
- track->cpu, (unsigned long)ts_sec, rem_nsec / 1000);
+ seq_con_printf(seq, "%s by task %d on cpu %d at %lu.%06lus (%lu.%06lus ago):\n",
+ show_alloc ? "allocated" : meta->state == KFENCE_OBJECT_RCU_FREEING ?
+ "rcu freeing" : "freed", track->pid,
+ track->cpu, (unsigned long)ts_sec, rem_nsec / 1000,
+ (unsigned long)interval_nsec, rem_interval_nsec / 1000);
if (track->num_stack_entries) {
/* Skip allocation/free internals stack. */
@@ -145,7 +150,7 @@ void kfence_print_object(struct seq_file *seq, const struct kfence_metadata *met
kfence_print_stack(seq, meta, true);
- if (meta->state == KFENCE_OBJECT_FREED) {
+ if (meta->state == KFENCE_OBJECT_FREED || meta->state == KFENCE_OBJECT_RCU_FREEING) {
seq_con_printf(seq, "\n");
kfence_print_stack(seq, meta, false);
}
@@ -314,7 +319,7 @@ bool __kfence_obj_info(struct kmem_obj_info *kpp, void *object, struct slab *sla
kpp->kp_slab_cache = meta->cache;
kpp->kp_objp = (void *)meta->addr;
kfence_to_kp_stack(&meta->alloc_track, kpp->kp_stack);
- if (meta->state == KFENCE_OBJECT_FREED)
+ if (meta->state == KFENCE_OBJECT_FREED || meta->state == KFENCE_OBJECT_RCU_FREEING)
kfence_to_kp_stack(&meta->free_track, kpp->kp_free_stack);
/* get_stack_skipnr() ensures the first entry is outside allocator. */
kpp->kp_ret = kpp->kp_stack[0];
diff --git a/mm/khugepaged.c b/mm/khugepaged.c
index cdd1d8655a76..f9c39898eaff 100644
--- a/mm/khugepaged.c
+++ b/mm/khugepaged.c
@@ -85,7 +85,7 @@ static DECLARE_WAIT_QUEUE_HEAD(khugepaged_wait);
*
* Note that these are only respected if collapse was initiated by khugepaged.
*/
-static unsigned int khugepaged_max_ptes_none __read_mostly;
+unsigned int khugepaged_max_ptes_none __read_mostly;
static unsigned int khugepaged_max_ptes_swap __read_mostly;
static unsigned int khugepaged_max_ptes_shared __read_mostly;
@@ -546,12 +546,14 @@ static void release_pte_pages(pte_t *pte, pte_t *_pte,
static bool is_refcount_suitable(struct folio *folio)
{
- int expected_refcount;
+ int expected_refcount = folio_mapcount(folio);
- expected_refcount = folio_mapcount(folio);
- if (folio_test_swapcache(folio))
+ if (!folio_test_anon(folio) || folio_test_swapcache(folio))
expected_refcount += folio_nr_pages(folio);
+ if (folio_test_private(folio))
+ expected_refcount++;
+
return folio_ref_count(folio) == expected_refcount;
}
@@ -625,8 +627,8 @@ static int __collapse_huge_page_isolate(struct vm_area_struct *vma,
}
/*
- * We can do it before isolate_lru_page because the
- * page can't be freed from under us. NOTE: PG_lock
+ * We can do it before folio_isolate_lru because the
+ * folio can't be freed from under us. NOTE: PG_lock
* is needed to serialize against split_huge_page
* when invoked from the VM.
*/
@@ -1235,6 +1237,7 @@ static int collapse_huge_page(struct mm_struct *mm, unsigned long address,
pgtable_trans_huge_deposit(mm, pmd, pgtable);
set_pmd_at(mm, address, pmd, _pmd);
update_mmu_cache_pmd(vma, address, pmd);
+ deferred_split_folio(folio, false);
spin_unlock(pmd_ptl);
folio = NULL;
@@ -1841,7 +1844,7 @@ static int collapse_file(struct mm_struct *mm, unsigned long addr,
}
} while (1);
- for (index = start; index < end; index++) {
+ for (index = start; index < end;) {
xas_set(&xas, index);
folio = xas_load(&xas);
@@ -1860,18 +1863,19 @@ static int collapse_file(struct mm_struct *mm, unsigned long addr,
}
}
nr_none++;
+ index++;
continue;
}
if (xa_is_value(folio) || !folio_test_uptodate(folio)) {
xas_unlock_irq(&xas);
/* swap in or instantiate fallocated page */
- if (shmem_get_folio(mapping->host, index,
+ if (shmem_get_folio(mapping->host, index, 0,
&folio, SGP_NOALLOC)) {
result = SCAN_FAIL;
goto xa_unlocked;
}
- /* drain lru cache to help isolate_lru_page() */
+ /* drain lru cache to help folio_isolate_lru() */
lru_add_drain();
} else if (folio_trylock(folio)) {
folio_get(folio);
@@ -1886,7 +1890,7 @@ static int collapse_file(struct mm_struct *mm, unsigned long addr,
page_cache_sync_readahead(mapping, &file->f_ra,
file, index,
end - index);
- /* drain lru cache to help isolate_lru_page() */
+ /* drain lru cache to help folio_isolate_lru() */
lru_add_drain();
folio = filemap_lock_folio(mapping, index);
if (IS_ERR(folio)) {
@@ -1941,12 +1945,10 @@ static int collapse_file(struct mm_struct *mm, unsigned long addr,
* we locked the first folio, then a THP might be there already.
* This will be discovered on the first iteration.
*/
- if (folio_test_large(folio)) {
- result = folio_order(folio) == HPAGE_PMD_ORDER &&
- folio->index == start
- /* Maybe PMD-mapped */
- ? SCAN_PTE_MAPPED_HUGEPAGE
- : SCAN_PAGE_COMPOUND;
+ if (folio_order(folio) == HPAGE_PMD_ORDER &&
+ folio->index == start) {
+ /* Maybe PMD-mapped */
+ result = SCAN_PTE_MAPPED_HUGEPAGE;
goto out_unlock;
}
@@ -1986,9 +1988,9 @@ static int collapse_file(struct mm_struct *mm, unsigned long addr,
VM_BUG_ON_FOLIO(folio != xa_load(xas.xa, index), folio);
/*
- * We control three references to the folio:
+ * We control 2 + nr_pages references to the folio:
* - we hold a pin on it;
- * - one reference from page cache;
+ * - nr_pages reference from page cache;
* - one from lru_isolate_folio;
* If those are the only references, then any new usage
* of the folio will have to fetch it from the page
@@ -1996,7 +1998,7 @@ static int collapse_file(struct mm_struct *mm, unsigned long addr,
* truncate, so any new usage will be blocked until we
* unlock folio after collapse/during rollback.
*/
- if (folio_ref_count(folio) != 3) {
+ if (folio_ref_count(folio) != 2 + folio_nr_pages(folio)) {
result = SCAN_PAGE_COUNT;
xas_unlock_irq(&xas);
folio_putback_lru(folio);
@@ -2007,6 +2009,7 @@ static int collapse_file(struct mm_struct *mm, unsigned long addr,
* Accumulate the folios that are being collapsed.
*/
list_add_tail(&folio->lru, &pagelist);
+ index += folio_nr_pages(folio);
continue;
out_unlock:
folio_unlock(folio);
@@ -2054,17 +2057,22 @@ xa_unlocked:
index = start;
dst = folio_page(new_folio, 0);
list_for_each_entry(folio, &pagelist, lru) {
+ int i, nr_pages = folio_nr_pages(folio);
+
while (index < folio->index) {
clear_highpage(dst);
index++;
dst++;
}
- if (copy_mc_highpage(dst, folio_page(folio, 0)) > 0) {
- result = SCAN_COPY_MC;
- goto rollback;
+
+ for (i = 0; i < nr_pages; i++) {
+ if (copy_mc_highpage(dst, folio_page(folio, i)) > 0) {
+ result = SCAN_COPY_MC;
+ goto rollback;
+ }
+ index++;
+ dst++;
}
- index++;
- dst++;
}
while (index < end) {
clear_highpage(dst);
@@ -2179,7 +2187,7 @@ immap_locked:
folio_clear_active(folio);
folio_clear_unevictable(folio);
folio_unlock(folio);
- folio_put_refs(folio, 3);
+ folio_put_refs(folio, 2 + folio_nr_pages(folio));
}
goto out;
@@ -2254,16 +2262,10 @@ static int hpage_collapse_scan_file(struct mm_struct *mm, unsigned long addr,
continue;
}
- /*
- * TODO: khugepaged should compact smaller compound pages
- * into a PMD sized page
- */
- if (folio_test_large(folio)) {
- result = folio_order(folio) == HPAGE_PMD_ORDER &&
- folio->index == start
- /* Maybe PMD-mapped */
- ? SCAN_PTE_MAPPED_HUGEPAGE
- : SCAN_PAGE_COMPOUND;
+ if (folio_order(folio) == HPAGE_PMD_ORDER &&
+ folio->index == start) {
+ /* Maybe PMD-mapped */
+ result = SCAN_PTE_MAPPED_HUGEPAGE;
/*
* For SCAN_PTE_MAPPED_HUGEPAGE, further processing
* by the caller won't touch the page cache, and so
@@ -2285,8 +2287,7 @@ static int hpage_collapse_scan_file(struct mm_struct *mm, unsigned long addr,
break;
}
- if (folio_ref_count(folio) !=
- 1 + folio_mapcount(folio) + folio_test_private(folio)) {
+ if (!is_refcount_suitable(folio)) {
result = SCAN_PAGE_COUNT;
break;
}
diff --git a/mm/kmemleak.c b/mm/kmemleak.c
index 764b08100570..0400f5e8ac60 100644
--- a/mm/kmemleak.c
+++ b/mm/kmemleak.c
@@ -224,6 +224,10 @@ static int kmemleak_error;
static unsigned long min_addr = ULONG_MAX;
static unsigned long max_addr;
+/* minimum and maximum address that may be valid per-CPU pointers */
+static unsigned long min_percpu_addr = ULONG_MAX;
+static unsigned long max_percpu_addr;
+
static struct task_struct *scan_thread;
/* used to avoid reporting of recently allocated objects */
static unsigned long jiffies_min_age;
@@ -294,13 +298,20 @@ static void hex_dump_object(struct seq_file *seq,
const u8 *ptr = (const u8 *)object->pointer;
size_t len;
- if (WARN_ON_ONCE(object->flags & (OBJECT_PHYS | OBJECT_PERCPU)))
+ if (WARN_ON_ONCE(object->flags & OBJECT_PHYS))
return;
+ if (object->flags & OBJECT_PERCPU)
+ ptr = (const u8 *)this_cpu_ptr((void __percpu *)object->pointer);
+
/* limit the number of lines to HEX_MAX_LINES */
len = min_t(size_t, object->size, HEX_MAX_LINES * HEX_ROW_SIZE);
- warn_or_seq_printf(seq, " hex dump (first %zu bytes):\n", len);
+ if (object->flags & OBJECT_PERCPU)
+ warn_or_seq_printf(seq, " hex dump (first %zu bytes on cpu %d):\n",
+ len, raw_smp_processor_id());
+ else
+ warn_or_seq_printf(seq, " hex dump (first %zu bytes):\n", len);
kasan_disable_current();
warn_or_seq_hex_dump(seq, DUMP_PREFIX_NONE, HEX_ROW_SIZE,
HEX_GROUP_SIZE, kasan_reset_tag((void *)ptr), len, HEX_ASCII);
@@ -695,10 +706,14 @@ static int __link_object(struct kmemleak_object *object, unsigned long ptr,
untagged_ptr = (unsigned long)kasan_reset_tag((void *)ptr);
/*
- * Only update min_addr and max_addr with object
- * storing virtual address.
+ * Only update min_addr and max_addr with object storing virtual
+ * address. And update min_percpu_addr max_percpu_addr for per-CPU
+ * objects.
*/
- if (!(objflags & (OBJECT_PHYS | OBJECT_PERCPU))) {
+ if (objflags & OBJECT_PERCPU) {
+ min_percpu_addr = min(min_percpu_addr, untagged_ptr);
+ max_percpu_addr = max(max_percpu_addr, untagged_ptr + size);
+ } else if (!(objflags & OBJECT_PHYS)) {
min_addr = min(min_addr, untagged_ptr);
max_addr = max(max_addr, untagged_ptr + size);
}
@@ -1055,12 +1070,8 @@ void __ref kmemleak_alloc_percpu(const void __percpu *ptr, size_t size,
{
pr_debug("%s(0x%px, %zu)\n", __func__, ptr, size);
- /*
- * Percpu allocations are only scanned and not reported as leaks
- * (min_count is set to 0).
- */
- if (kmemleak_enabled && ptr && !IS_ERR(ptr))
- create_object_percpu((unsigned long)ptr, size, 0, gfp);
+ if (kmemleak_enabled && ptr && !IS_ERR_PCPU(ptr))
+ create_object_percpu((__force unsigned long)ptr, size, 0, gfp);
}
EXPORT_SYMBOL_GPL(kmemleak_alloc_percpu);
@@ -1134,8 +1145,8 @@ void __ref kmemleak_free_percpu(const void __percpu *ptr)
{
pr_debug("%s(0x%px)\n", __func__, ptr);
- if (kmemleak_free_enabled && ptr && !IS_ERR(ptr))
- delete_object_full((unsigned long)ptr, OBJECT_PERCPU);
+ if (kmemleak_free_enabled && ptr && !IS_ERR_PCPU(ptr))
+ delete_object_full((__force unsigned long)ptr, OBJECT_PERCPU);
}
EXPORT_SYMBOL_GPL(kmemleak_free_percpu);
@@ -1304,12 +1315,23 @@ static bool update_checksum(struct kmemleak_object *object)
{
u32 old_csum = object->checksum;
- if (WARN_ON_ONCE(object->flags & (OBJECT_PHYS | OBJECT_PERCPU)))
+ if (WARN_ON_ONCE(object->flags & OBJECT_PHYS))
return false;
kasan_disable_current();
kcsan_disable_current();
- object->checksum = crc32(0, kasan_reset_tag((void *)object->pointer), object->size);
+ if (object->flags & OBJECT_PERCPU) {
+ unsigned int cpu;
+
+ object->checksum = 0;
+ for_each_possible_cpu(cpu) {
+ void *ptr = per_cpu_ptr((void __percpu *)object->pointer, cpu);
+
+ object->checksum ^= crc32(0, kasan_reset_tag((void *)ptr), object->size);
+ }
+ } else {
+ object->checksum = crc32(0, kasan_reset_tag((void *)object->pointer), object->size);
+ }
kasan_enable_current();
kcsan_enable_current();
@@ -1340,6 +1362,64 @@ static void update_refs(struct kmemleak_object *object)
}
}
+static void pointer_update_refs(struct kmemleak_object *scanned,
+ unsigned long pointer, unsigned int objflags)
+{
+ struct kmemleak_object *object;
+ unsigned long untagged_ptr;
+ unsigned long excess_ref;
+
+ untagged_ptr = (unsigned long)kasan_reset_tag((void *)pointer);
+ if (objflags & OBJECT_PERCPU) {
+ if (untagged_ptr < min_percpu_addr || untagged_ptr >= max_percpu_addr)
+ return;
+ } else {
+ if (untagged_ptr < min_addr || untagged_ptr >= max_addr)
+ return;
+ }
+
+ /*
+ * No need for get_object() here since we hold kmemleak_lock.
+ * object->use_count cannot be dropped to 0 while the object
+ * is still present in object_tree_root and object_list
+ * (with updates protected by kmemleak_lock).
+ */
+ object = __lookup_object(pointer, 1, objflags);
+ if (!object)
+ return;
+ if (object == scanned)
+ /* self referenced, ignore */
+ return;
+
+ /*
+ * Avoid the lockdep recursive warning on object->lock being
+ * previously acquired in scan_object(). These locks are
+ * enclosed by scan_mutex.
+ */
+ raw_spin_lock_nested(&object->lock, SINGLE_DEPTH_NESTING);
+ /* only pass surplus references (object already gray) */
+ if (color_gray(object)) {
+ excess_ref = object->excess_ref;
+ /* no need for update_refs() if object already gray */
+ } else {
+ excess_ref = 0;
+ update_refs(object);
+ }
+ raw_spin_unlock(&object->lock);
+
+ if (excess_ref) {
+ object = lookup_object(excess_ref, 0);
+ if (!object)
+ return;
+ if (object == scanned)
+ /* circular reference, ignore */
+ return;
+ raw_spin_lock_nested(&object->lock, SINGLE_DEPTH_NESTING);
+ update_refs(object);
+ raw_spin_unlock(&object->lock);
+ }
+}
+
/*
* Memory scanning is a long process and it needs to be interruptible. This
* function checks whether such interrupt condition occurred.
@@ -1372,13 +1452,10 @@ static void scan_block(void *_start, void *_end,
unsigned long *start = PTR_ALIGN(_start, BYTES_PER_POINTER);
unsigned long *end = _end - (BYTES_PER_POINTER - 1);
unsigned long flags;
- unsigned long untagged_ptr;
raw_spin_lock_irqsave(&kmemleak_lock, flags);
for (ptr = start; ptr < end; ptr++) {
- struct kmemleak_object *object;
unsigned long pointer;
- unsigned long excess_ref;
if (scan_should_stop())
break;
@@ -1387,50 +1464,8 @@ static void scan_block(void *_start, void *_end,
pointer = *(unsigned long *)kasan_reset_tag((void *)ptr);
kasan_enable_current();
- untagged_ptr = (unsigned long)kasan_reset_tag((void *)pointer);
- if (untagged_ptr < min_addr || untagged_ptr >= max_addr)
- continue;
-
- /*
- * No need for get_object() here since we hold kmemleak_lock.
- * object->use_count cannot be dropped to 0 while the object
- * is still present in object_tree_root and object_list
- * (with updates protected by kmemleak_lock).
- */
- object = lookup_object(pointer, 1);
- if (!object)
- continue;
- if (object == scanned)
- /* self referenced, ignore */
- continue;
-
- /*
- * Avoid the lockdep recursive warning on object->lock being
- * previously acquired in scan_object(). These locks are
- * enclosed by scan_mutex.
- */
- raw_spin_lock_nested(&object->lock, SINGLE_DEPTH_NESTING);
- /* only pass surplus references (object already gray) */
- if (color_gray(object)) {
- excess_ref = object->excess_ref;
- /* no need for update_refs() if object already gray */
- } else {
- excess_ref = 0;
- update_refs(object);
- }
- raw_spin_unlock(&object->lock);
-
- if (excess_ref) {
- object = lookup_object(excess_ref, 0);
- if (!object)
- continue;
- if (object == scanned)
- /* circular reference, ignore */
- continue;
- raw_spin_lock_nested(&object->lock, SINGLE_DEPTH_NESTING);
- update_refs(object);
- raw_spin_unlock(&object->lock);
- }
+ pointer_update_refs(scanned, pointer, 0);
+ pointer_update_refs(scanned, pointer, OBJECT_PERCPU);
}
raw_spin_unlock_irqrestore(&kmemleak_lock, flags);
}
diff --git a/mm/ksm.c b/mm/ksm.c
index 14d9e53b1ec2..a2e2a521df0a 100644
--- a/mm/ksm.c
+++ b/mm/ksm.c
@@ -608,47 +608,6 @@ static inline bool ksm_test_exit(struct mm_struct *mm)
return atomic_read(&mm->mm_users) == 0;
}
-static int break_ksm_pmd_entry(pmd_t *pmd, unsigned long addr, unsigned long next,
- struct mm_walk *walk)
-{
- struct page *page = NULL;
- spinlock_t *ptl;
- pte_t *pte;
- pte_t ptent;
- int ret;
-
- pte = pte_offset_map_lock(walk->mm, pmd, addr, &ptl);
- if (!pte)
- return 0;
- ptent = ptep_get(pte);
- if (pte_present(ptent)) {
- page = vm_normal_page(walk->vma, addr, ptent);
- } else if (!pte_none(ptent)) {
- swp_entry_t entry = pte_to_swp_entry(ptent);
-
- /*
- * As KSM pages remain KSM pages until freed, no need to wait
- * here for migration to end.
- */
- if (is_migration_entry(entry))
- page = pfn_swap_entry_to_page(entry);
- }
- /* return 1 if the page is an normal ksm page or KSM-placed zero page */
- ret = (page && PageKsm(page)) || is_ksm_zero_pte(ptent);
- pte_unmap_unlock(pte, ptl);
- return ret;
-}
-
-static const struct mm_walk_ops break_ksm_ops = {
- .pmd_entry = break_ksm_pmd_entry,
- .walk_lock = PGWALK_RDLOCK,
-};
-
-static const struct mm_walk_ops break_ksm_lock_vma_ops = {
- .pmd_entry = break_ksm_pmd_entry,
- .walk_lock = PGWALK_WRLOCK,
-};
-
/*
* We use break_ksm to break COW on a ksm page by triggering unsharing,
* such that the ksm page will get replaced by an exclusive anonymous page.
@@ -665,16 +624,26 @@ static const struct mm_walk_ops break_ksm_lock_vma_ops = {
static int break_ksm(struct vm_area_struct *vma, unsigned long addr, bool lock_vma)
{
vm_fault_t ret = 0;
- const struct mm_walk_ops *ops = lock_vma ?
- &break_ksm_lock_vma_ops : &break_ksm_ops;
+
+ if (lock_vma)
+ vma_start_write(vma);
do {
- int ksm_page;
+ bool ksm_page = false;
+ struct folio_walk fw;
+ struct folio *folio;
cond_resched();
- ksm_page = walk_page_range_vma(vma, addr, addr + 1, ops, NULL);
- if (WARN_ON_ONCE(ksm_page < 0))
- return ksm_page;
+ folio = folio_walk_start(&fw, vma, addr,
+ FW_MIGRATION | FW_ZEROPAGE);
+ if (folio) {
+ /* Small folio implies FW_LEVEL_PTE. */
+ if (!folio_test_large(folio) &&
+ (folio_test_ksm(folio) || is_ksm_zero_pte(fw.pte)))
+ ksm_page = true;
+ folio_walk_end(&fw, vma);
+ }
+
if (!ksm_page)
return 0;
ret = handle_mm_fault(vma, addr,
@@ -767,26 +736,28 @@ static struct page *get_mergeable_page(struct ksm_rmap_item *rmap_item)
struct mm_struct *mm = rmap_item->mm;
unsigned long addr = rmap_item->address;
struct vm_area_struct *vma;
- struct page *page;
+ struct page *page = NULL;
+ struct folio_walk fw;
+ struct folio *folio;
mmap_read_lock(mm);
vma = find_mergeable_vma(mm, addr);
if (!vma)
goto out;
- page = follow_page(vma, addr, FOLL_GET);
- if (IS_ERR_OR_NULL(page))
- goto out;
- if (is_zone_device_page(page))
- goto out_putpage;
- if (PageAnon(page)) {
+ folio = folio_walk_start(&fw, vma, addr, 0);
+ if (folio) {
+ if (!folio_is_zone_device(folio) &&
+ folio_test_anon(folio)) {
+ folio_get(folio);
+ page = fw.page;
+ }
+ folio_walk_end(&fw, vma);
+ }
+out:
+ if (page) {
flush_anon_page(vma, page, addr);
flush_dcache_page(page);
- } else {
-out_putpage:
- put_page(page);
-out:
- page = NULL;
}
mmap_read_unlock(mm);
return page;
@@ -938,12 +909,13 @@ again:
*/
while (!folio_try_get(folio)) {
/*
- * Another check for page->mapping != expected_mapping would
- * work here too. We have chosen the !PageSwapCache test to
- * optimize the common case, when the page is or is about to
- * be freed: PageSwapCache is cleared (under spin_lock_irq)
- * in the ref_freeze section of __remove_mapping(); but Anon
- * folio->mapping reset to NULL later, in free_pages_prepare().
+ * Another check for folio->mapping != expected_mapping
+ * would work here too. We have chosen to test the
+ * swapcache flag to optimize the common case, when the
+ * folio is or is about to be freed: the swapcache flag
+ * is cleared (under spin_lock_irq) in the ref_freeze
+ * section of __remove_mapping(); but anon folio->mapping
+ * is reset to NULL later, in free_pages_prepare().
*/
if (!folio_test_swapcache(folio))
goto stale;
@@ -974,7 +946,7 @@ again:
stale:
/*
- * We come here from above when page->mapping or !PageSwapCache
+ * We come here from above when folio->mapping or the swapcache flag
* suggests that the node is stale; but it might be under migration.
* We need smp_rmb(), matching the smp_wmb() in folio_migrate_ksm(),
* before checking whether node->kpfn has been changed.
@@ -1481,7 +1453,7 @@ static int try_to_merge_one_page(struct vm_area_struct *vma,
goto out;
/*
- * We need the page lock to read a stable PageSwapCache in
+ * We need the folio lock to read a stable swapcache flag in
* write_protect_page(). We use trylock_page() instead of
* lock_page() because we don't want to wait here - we
* prefer to continue scanning and merging different pages,
@@ -2562,36 +2534,46 @@ next_mm:
ksm_scan.address = vma->vm_end;
while (ksm_scan.address < vma->vm_end) {
+ struct page *tmp_page = NULL;
+ struct folio_walk fw;
+ struct folio *folio;
+
if (ksm_test_exit(mm))
break;
- *page = follow_page(vma, ksm_scan.address, FOLL_GET);
- if (IS_ERR_OR_NULL(*page)) {
- ksm_scan.address += PAGE_SIZE;
- cond_resched();
- continue;
+
+ folio = folio_walk_start(&fw, vma, ksm_scan.address, 0);
+ if (folio) {
+ if (!folio_is_zone_device(folio) &&
+ folio_test_anon(folio)) {
+ folio_get(folio);
+ tmp_page = fw.page;
+ }
+ folio_walk_end(&fw, vma);
}
- if (is_zone_device_page(*page))
- goto next_page;
- if (PageAnon(*page)) {
- flush_anon_page(vma, *page, ksm_scan.address);
- flush_dcache_page(*page);
+
+ if (tmp_page) {
+ flush_anon_page(vma, tmp_page, ksm_scan.address);
+ flush_dcache_page(tmp_page);
rmap_item = get_next_rmap_item(mm_slot,
ksm_scan.rmap_list, ksm_scan.address);
if (rmap_item) {
ksm_scan.rmap_list =
&rmap_item->rmap_list;
- if (should_skip_rmap_item(*page, rmap_item))
+ if (should_skip_rmap_item(tmp_page, rmap_item)) {
+ folio_put(folio);
goto next_page;
+ }
ksm_scan.address += PAGE_SIZE;
- } else
- put_page(*page);
+ *page = tmp_page;
+ } else {
+ folio_put(folio);
+ }
mmap_read_unlock(mm);
return rmap_item;
}
next_page:
- put_page(*page);
ksm_scan.address += PAGE_SIZE;
cond_resched();
}
@@ -3142,7 +3124,7 @@ void folio_migrate_ksm(struct folio *newfolio, struct folio *folio)
* newfolio->mapping was set in advance; now we need smp_wmb()
* to make sure that the new stable_node->kpfn is visible
* to ksm_get_folio() before it can see that folio->mapping
- * has gone stale (or that folio_test_swapcache has been cleared).
+ * has gone stale (or that the swapcache flag has been cleared).
*/
smp_wmb();
folio_set_stable_node(folio, NULL);
diff --git a/mm/madvise.c b/mm/madvise.c
index 89089d84f8df..ff139e57cca2 100644
--- a/mm/madvise.c
+++ b/mm/madvise.c
@@ -1031,6 +1031,9 @@ static int madvise_vma_behavior(struct vm_area_struct *vma,
struct anon_vma_name *anon_name;
unsigned long new_flags = vma->vm_flags;
+ if (unlikely(!can_modify_vma_madv(vma, behavior)))
+ return -EPERM;
+
switch (behavior) {
case MADV_REMOVE:
return madvise_remove(vma, prev, start, end);
@@ -1448,15 +1451,6 @@ int do_madvise(struct mm_struct *mm, unsigned long start, size_t len_in, int beh
start = untagged_addr_remote(mm, start);
end = start + len;
- /*
- * Check if the address range is sealed for do_madvise().
- * can_modify_mm_madv assumes we have acquired the lock on MM.
- */
- if (unlikely(!can_modify_mm_madv(mm, start, end, behavior))) {
- error = -EPERM;
- goto out;
- }
-
blk_start_plug(&plug);
switch (behavior) {
case MADV_POPULATE_READ:
@@ -1470,7 +1464,6 @@ int do_madvise(struct mm_struct *mm, unsigned long start, size_t len_in, int beh
}
blk_finish_plug(&plug);
-out:
if (write)
mmap_write_unlock(mm);
else
@@ -1527,7 +1520,7 @@ SYSCALL_DEFINE5(process_madvise, int, pidfd, const struct iovec __user *, vec,
* Require CAP_SYS_NICE for influencing process performance. Note that
* only non-destructive hints are currently supported.
*/
- if (!capable(CAP_SYS_NICE)) {
+ if (mm != current->mm && !capable(CAP_SYS_NICE)) {
ret = -EPERM;
goto release_mm;
}
diff --git a/mm/memblock.c b/mm/memblock.c
index 3b9dc2d89b8a..0389ce5cd281 100644
--- a/mm/memblock.c
+++ b/mm/memblock.c
@@ -1500,7 +1500,7 @@ done:
*
* Accept the memory of the allocated buffer.
*/
- accept_memory(found, found + size);
+ accept_memory(found, size);
return found;
}
@@ -1731,6 +1731,23 @@ phys_addr_t __init_memblock memblock_reserved_size(void)
return memblock.reserved.total_size;
}
+/**
+ * memblock_estimated_nr_free_pages - return estimated number of free pages
+ * from memblock point of view
+ *
+ * During bootup, subsystems might need a rough estimate of the number of free
+ * pages in the whole system, before precise numbers are available from the
+ * buddy. Especially with CONFIG_DEFERRED_STRUCT_PAGE_INIT, the numbers
+ * obtained from the buddy might be very imprecise during bootup.
+ *
+ * Return:
+ * An estimated number of free pages from memblock point of view.
+ */
+unsigned long __init memblock_estimated_nr_free_pages(void)
+{
+ return PHYS_PFN(memblock_phys_mem_size() - memblock_reserved_size());
+}
+
/* lowest address */
phys_addr_t __init_memblock memblock_start_of_DRAM(void)
{
diff --git a/mm/memcontrol-v1.c b/mm/memcontrol-v1.c
index 417c96f2da28..81d8819f13cd 100644
--- a/mm/memcontrol-v1.c
+++ b/mm/memcontrol-v1.c
@@ -742,6 +742,9 @@ static struct page *mc_handle_file_pte(struct vm_area_struct *vma,
return folio_file_page(folio, index);
}
+static void memcg1_check_events(struct mem_cgroup *memcg, int nid);
+static void memcg1_charge_statistics(struct mem_cgroup *memcg, int nr_pages);
+
/**
* mem_cgroup_move_account - move account of the folio
* @folio: The folio.
@@ -853,9 +856,9 @@ static int mem_cgroup_move_account(struct folio *folio,
nid = folio_nid(folio);
local_irq_disable();
- mem_cgroup_charge_statistics(to, nr_pages);
+ memcg1_charge_statistics(to, nr_pages);
memcg1_check_events(to, nid);
- mem_cgroup_charge_statistics(from, -nr_pages);
+ memcg1_charge_statistics(from, -nr_pages);
memcg1_check_events(from, nid);
local_irq_enable();
out:
@@ -1439,21 +1442,68 @@ static void mem_cgroup_threshold(struct mem_cgroup *memcg)
}
}
+/* Cgroup1: threshold notifications & softlimit tree updates */
+struct memcg1_events_percpu {
+ unsigned long nr_page_events;
+ unsigned long targets[MEM_CGROUP_NTARGETS];
+};
+
+static void memcg1_charge_statistics(struct mem_cgroup *memcg, int nr_pages)
+{
+ /* pagein of a big page is an event. So, ignore page size */
+ if (nr_pages > 0)
+ __count_memcg_events(memcg, PGPGIN, 1);
+ else {
+ __count_memcg_events(memcg, PGPGOUT, 1);
+ nr_pages = -nr_pages; /* for event */
+ }
+
+ __this_cpu_add(memcg->events_percpu->nr_page_events, nr_pages);
+}
+
+#define THRESHOLDS_EVENTS_TARGET 128
+#define SOFTLIMIT_EVENTS_TARGET 1024
+
+static bool memcg1_event_ratelimit(struct mem_cgroup *memcg,
+ enum mem_cgroup_events_target target)
+{
+ unsigned long val, next;
+
+ val = __this_cpu_read(memcg->events_percpu->nr_page_events);
+ next = __this_cpu_read(memcg->events_percpu->targets[target]);
+ /* from time_after() in jiffies.h */
+ if ((long)(next - val) < 0) {
+ switch (target) {
+ case MEM_CGROUP_TARGET_THRESH:
+ next = val + THRESHOLDS_EVENTS_TARGET;
+ break;
+ case MEM_CGROUP_TARGET_SOFTLIMIT:
+ next = val + SOFTLIMIT_EVENTS_TARGET;
+ break;
+ default:
+ break;
+ }
+ __this_cpu_write(memcg->events_percpu->targets[target], next);
+ return true;
+ }
+ return false;
+}
+
/*
* Check events in order.
*
*/
-void memcg1_check_events(struct mem_cgroup *memcg, int nid)
+static void memcg1_check_events(struct mem_cgroup *memcg, int nid)
{
if (IS_ENABLED(CONFIG_PREEMPT_RT))
return;
/* threshold event is triggered in finer grain than soft limit */
- if (unlikely(mem_cgroup_event_ratelimit(memcg,
+ if (unlikely(memcg1_event_ratelimit(memcg,
MEM_CGROUP_TARGET_THRESH))) {
bool do_softlimit;
- do_softlimit = mem_cgroup_event_ratelimit(memcg,
+ do_softlimit = memcg1_event_ratelimit(memcg,
MEM_CGROUP_TARGET_SOFTLIMIT);
mem_cgroup_threshold(memcg);
if (unlikely(do_softlimit))
@@ -1461,6 +1511,43 @@ void memcg1_check_events(struct mem_cgroup *memcg, int nid)
}
}
+void memcg1_commit_charge(struct folio *folio, struct mem_cgroup *memcg)
+{
+ unsigned long flags;
+
+ local_irq_save(flags);
+ memcg1_charge_statistics(memcg, folio_nr_pages(folio));
+ memcg1_check_events(memcg, folio_nid(folio));
+ local_irq_restore(flags);
+}
+
+void memcg1_swapout(struct folio *folio, struct mem_cgroup *memcg)
+{
+ /*
+ * Interrupts should be disabled here because the caller holds the
+ * i_pages lock which is taken with interrupts-off. It is
+ * important here to have the interrupts disabled because it is the
+ * only synchronisation we have for updating the per-CPU variables.
+ */
+ preempt_disable_nested();
+ VM_WARN_ON_IRQS_ENABLED();
+ memcg1_charge_statistics(memcg, -folio_nr_pages(folio));
+ preempt_enable_nested();
+ memcg1_check_events(memcg, folio_nid(folio));
+}
+
+void memcg1_uncharge_batch(struct mem_cgroup *memcg, unsigned long pgpgout,
+ unsigned long nr_memory, int nid)
+{
+ unsigned long flags;
+
+ local_irq_save(flags);
+ __count_memcg_events(memcg, PGPGOUT, pgpgout);
+ __this_cpu_add(memcg->events_percpu->nr_page_events, nr_memory);
+ memcg1_check_events(memcg, nid);
+ local_irq_restore(flags);
+}
+
static int compare_thresholds(const void *a, const void *b)
{
const struct mem_cgroup_threshold *_a = a;
@@ -1860,26 +1947,26 @@ static ssize_t memcg_write_event_control(struct kernfs_open_file *of,
INIT_WORK(&event->remove, memcg_event_remove);
efile = fdget(efd);
- if (!efile.file) {
+ if (!fd_file(efile)) {
ret = -EBADF;
goto out_kfree;
}
- event->eventfd = eventfd_ctx_fileget(efile.file);
+ event->eventfd = eventfd_ctx_fileget(fd_file(efile));
if (IS_ERR(event->eventfd)) {
ret = PTR_ERR(event->eventfd);
goto out_put_efile;
}
cfile = fdget(cfd);
- if (!cfile.file) {
+ if (!fd_file(cfile)) {
ret = -EBADF;
goto out_put_eventfd;
}
/* the process need read permission on control file */
/* AV: shouldn't we check that it's been opened for read instead? */
- ret = file_permission(cfile.file, MAY_READ);
+ ret = file_permission(fd_file(cfile), MAY_READ);
if (ret < 0)
goto out_put_cfile;
@@ -1887,7 +1974,7 @@ static ssize_t memcg_write_event_control(struct kernfs_open_file *of,
* The control file must be a regular cgroup1 file. As a regular cgroup
* file can't be renamed, it's safe to access its name afterwards.
*/
- cdentry = cfile.file->f_path.dentry;
+ cdentry = fd_file(cfile)->f_path.dentry;
if (cdentry->d_sb->s_type != &cgroup_fs_type || !d_is_reg(cdentry)) {
ret = -EINVAL;
goto out_put_cfile;
@@ -1907,9 +1994,15 @@ static ssize_t memcg_write_event_control(struct kernfs_open_file *of,
event->register_event = mem_cgroup_usage_register_event;
event->unregister_event = mem_cgroup_usage_unregister_event;
} else if (!strcmp(name, "memory.oom_control")) {
+ pr_warn_once("oom_control is deprecated and will be removed. "
+ "Please report your usecase to linux-mm-@kvack.org"
+ " if you depend on this functionality. \n");
event->register_event = mem_cgroup_oom_register_event;
event->unregister_event = mem_cgroup_oom_unregister_event;
} else if (!strcmp(name, "memory.pressure_level")) {
+ pr_warn_once("pressure_level is deprecated and will be removed. "
+ "Please report your usecase to linux-mm-@kvack.org "
+ "if you depend on this functionality. \n");
event->register_event = vmpressure_register_event;
event->unregister_event = vmpressure_unregister_event;
} else if (!strcmp(name, "memory.memsw.usage_in_bytes")) {
@@ -1939,7 +2032,7 @@ static ssize_t memcg_write_event_control(struct kernfs_open_file *of,
if (ret)
goto out_put_css;
- vfs_poll(efile.file, &event->pt);
+ vfs_poll(fd_file(efile), &event->pt);
spin_lock_irq(&memcg->event_list_lock);
list_add(&event->list, &memcg->event_list);
@@ -2447,6 +2540,9 @@ static ssize_t mem_cgroup_write(struct kernfs_open_file *of,
ret = 0;
break;
case _TCP:
+ pr_warn_once("kmem.tcp.limit_in_bytes is deprecated and will be removed. "
+ "Please report your usecase to linux-mm@kvack.org if you "
+ "depend on this functionality.\n");
ret = memcg_update_tcp_max(memcg, nr_pages);
break;
}
@@ -2455,6 +2551,9 @@ static ssize_t mem_cgroup_write(struct kernfs_open_file *of,
if (IS_ENABLED(CONFIG_PREEMPT_RT)) {
ret = -EOPNOTSUPP;
} else {
+ pr_warn_once("soft_limit_in_bytes is deprecated and will be removed. "
+ "Please report your usecase to linux-mm@kvack.org if you "
+ "depend on this functionality.\n");
WRITE_ONCE(memcg->soft_limit, nr_pages);
ret = 0;
}
@@ -2748,6 +2847,10 @@ static int mem_cgroup_oom_control_write(struct cgroup_subsys_state *css,
{
struct mem_cgroup *memcg = mem_cgroup_from_css(css);
+ pr_warn_once("oom_control is deprecated and will be removed. "
+ "Please report your usecase to linux-mm-@kvack.org if you "
+ "depend on this functionality. \n");
+
/* cannot set to root cgroup and only 0 and 1 are allowed */
if (mem_cgroup_is_root(memcg) || !((val == 0) || (val == 1)))
return -EINVAL;
@@ -2952,6 +3055,19 @@ bool memcg1_charge_skmem(struct mem_cgroup *memcg, unsigned int nr_pages,
return false;
}
+bool memcg1_alloc_events(struct mem_cgroup *memcg)
+{
+ memcg->events_percpu = alloc_percpu_gfp(struct memcg1_events_percpu,
+ GFP_KERNEL_ACCOUNT);
+ return !!memcg->events_percpu;
+}
+
+void memcg1_free_events(struct mem_cgroup *memcg)
+{
+ if (memcg->events_percpu)
+ free_percpu(memcg->events_percpu);
+}
+
static int __init memcg1_init(void)
{
int node;
diff --git a/mm/memcontrol-v1.h b/mm/memcontrol-v1.h
index 56d7eaa98274..c0672e25bcdb 100644
--- a/mm/memcontrol-v1.h
+++ b/mm/memcontrol-v1.h
@@ -7,7 +7,6 @@
/* Cgroup v1 and v2 common declarations */
-void mem_cgroup_charge_statistics(struct mem_cgroup *memcg, int nr_pages);
int try_charge_memcg(struct mem_cgroup *memcg, gfp_t gfp_mask,
unsigned int nr_pages);
@@ -56,8 +55,6 @@ enum mem_cgroup_events_target {
MEM_CGROUP_NTARGETS,
};
-bool mem_cgroup_event_ratelimit(struct mem_cgroup *memcg,
- enum mem_cgroup_events_target target);
unsigned long mem_cgroup_usage(struct mem_cgroup *memcg, bool swap);
void drain_all_stock(struct mem_cgroup *root_memcg);
@@ -71,6 +68,10 @@ int memory_stat_show(struct seq_file *m, void *v);
/* Cgroup v1-specific declarations */
#ifdef CONFIG_MEMCG_V1
+
+bool memcg1_alloc_events(struct mem_cgroup *memcg);
+void memcg1_free_events(struct mem_cgroup *memcg);
+
void memcg1_memcg_init(struct mem_cgroup *memcg);
void memcg1_remove_from_trees(struct mem_cgroup *memcg);
@@ -99,7 +100,10 @@ bool memcg1_oom_prepare(struct mem_cgroup *memcg, bool *locked);
void memcg1_oom_finish(struct mem_cgroup *memcg, bool locked);
void memcg1_oom_recover(struct mem_cgroup *memcg);
-void memcg1_check_events(struct mem_cgroup *memcg, int nid);
+void memcg1_commit_charge(struct folio *folio, struct mem_cgroup *memcg);
+void memcg1_swapout(struct folio *folio, struct mem_cgroup *memcg);
+void memcg1_uncharge_batch(struct mem_cgroup *memcg, unsigned long pgpgout,
+ unsigned long nr_memory, int nid);
void memcg1_stat_format(struct mem_cgroup *memcg, struct seq_buf *s);
@@ -120,6 +124,9 @@ extern struct cftype mem_cgroup_legacy_files[];
#else /* CONFIG_MEMCG_V1 */
+static inline bool memcg1_alloc_events(struct mem_cgroup *memcg) { return true; }
+static inline void memcg1_free_events(struct mem_cgroup *memcg) {}
+
static inline void memcg1_memcg_init(struct mem_cgroup *memcg) {}
static inline void memcg1_remove_from_trees(struct mem_cgroup *memcg) {}
static inline void memcg1_soft_limit_reset(struct mem_cgroup *memcg) {}
@@ -130,7 +137,14 @@ static inline bool memcg1_oom_prepare(struct mem_cgroup *memcg, bool *locked) {
static inline void memcg1_oom_finish(struct mem_cgroup *memcg, bool locked) {}
static inline void memcg1_oom_recover(struct mem_cgroup *memcg) {}
-static inline void memcg1_check_events(struct mem_cgroup *memcg, int nid) {}
+static inline void memcg1_commit_charge(struct folio *folio,
+ struct mem_cgroup *memcg) {}
+
+static inline void memcg1_swapout(struct folio *folio, struct mem_cgroup *memcg) {}
+
+static inline void memcg1_uncharge_batch(struct mem_cgroup *memcg,
+ unsigned long pgpgout,
+ unsigned long nr_memory, int nid) {}
static inline void memcg1_stat_format(struct mem_cgroup *memcg, struct seq_buf *s) {}
@@ -140,8 +154,6 @@ static inline bool memcg1_charge_skmem(struct mem_cgroup *memcg, unsigned int nr
gfp_t gfp_mask) { return true; }
static inline void memcg1_uncharge_skmem(struct mem_cgroup *memcg, unsigned int nr_pages) {}
-extern struct cftype memsw_files[];
-extern struct cftype mem_cgroup_legacy_files[];
#endif /* CONFIG_MEMCG_V1 */
#endif /* __MM_MEMCONTROL_V1_H */
diff --git a/mm/memcontrol.c b/mm/memcontrol.c
index d563fb515766..7845c64a2c57 100644
--- a/mm/memcontrol.c
+++ b/mm/memcontrol.c
@@ -25,6 +25,7 @@
* Copyright (C) 2020 Alibaba, Inc, Alex Shi
*/
+#include <linux/cgroup-defs.h>
#include <linux/page_counter.h>
#include <linux/memcontrol.h>
#include <linux/cgroup.h>
@@ -41,6 +42,7 @@
#include <linux/rcupdate.h>
#include <linux/limits.h>
#include <linux/export.h>
+#include <linux/list.h>
#include <linux/mutex.h>
#include <linux/rbtree.h>
#include <linux/slab.h>
@@ -93,9 +95,6 @@ static bool cgroup_memory_nobpf __ro_after_init;
static DECLARE_WAIT_QUEUE_HEAD(memcg_cgwb_frn_waitq);
#endif
-#define THRESHOLDS_EVENTS_TARGET 128
-#define SOFTLIMIT_EVENTS_TARGET 1024
-
static inline bool task_is_dying(void)
{
return tsk_is_oom_victim(current) || fatal_signal_pending(current) ||
@@ -305,6 +304,12 @@ static const unsigned int memcg_node_stat_items[] = {
#ifdef CONFIG_SWAP
NR_SWAPCACHE,
#endif
+#ifdef CONFIG_NUMA_BALANCING
+ PGPROMOTE_SUCCESS,
+#endif
+ PGDEMOTE_KSWAPD,
+ PGDEMOTE_DIRECT,
+ PGDEMOTE_KHUGEPAGED,
};
static const unsigned int memcg_stat_items[] = {
@@ -320,24 +325,27 @@ static const unsigned int memcg_stat_items[] = {
#define NR_MEMCG_NODE_STAT_ITEMS ARRAY_SIZE(memcg_node_stat_items)
#define MEMCG_VMSTAT_SIZE (NR_MEMCG_NODE_STAT_ITEMS + \
ARRAY_SIZE(memcg_stat_items))
-static int8_t mem_cgroup_stats_index[MEMCG_NR_STAT] __read_mostly;
+#define BAD_STAT_IDX(index) ((u32)(index) >= U8_MAX)
+static u8 mem_cgroup_stats_index[MEMCG_NR_STAT] __read_mostly;
static void init_memcg_stats(void)
{
- int8_t i, j = 0;
+ u8 i, j = 0;
+
+ BUILD_BUG_ON(MEMCG_NR_STAT >= U8_MAX);
- BUILD_BUG_ON(MEMCG_NR_STAT >= S8_MAX);
+ memset(mem_cgroup_stats_index, U8_MAX, sizeof(mem_cgroup_stats_index));
- for (i = 0; i < NR_MEMCG_NODE_STAT_ITEMS; ++i)
- mem_cgroup_stats_index[memcg_node_stat_items[i]] = ++j;
+ for (i = 0; i < NR_MEMCG_NODE_STAT_ITEMS; ++i, ++j)
+ mem_cgroup_stats_index[memcg_node_stat_items[i]] = j;
- for (i = 0; i < ARRAY_SIZE(memcg_stat_items); ++i)
- mem_cgroup_stats_index[memcg_stat_items[i]] = ++j;
+ for (i = 0; i < ARRAY_SIZE(memcg_stat_items); ++i, ++j)
+ mem_cgroup_stats_index[memcg_stat_items[i]] = j;
}
static inline int memcg_stats_index(int idx)
{
- return mem_cgroup_stats_index[idx] - 1;
+ return mem_cgroup_stats_index[idx];
}
struct lruvec_stats_percpu {
@@ -369,7 +377,7 @@ unsigned long lruvec_page_state(struct lruvec *lruvec, enum node_stat_item idx)
return node_page_state(lruvec_pgdat(lruvec), idx);
i = memcg_stats_index(idx);
- if (WARN_ONCE(i < 0, "%s: missing stat item %d\n", __func__, idx))
+ if (WARN_ONCE(BAD_STAT_IDX(i), "%s: missing stat item %d\n", __func__, idx))
return 0;
pn = container_of(lruvec, struct mem_cgroup_per_node, lruvec);
@@ -392,7 +400,7 @@ unsigned long lruvec_page_state_local(struct lruvec *lruvec,
return node_page_state(lruvec_pgdat(lruvec), idx);
i = memcg_stats_index(idx);
- if (WARN_ONCE(i < 0, "%s: missing stat item %d\n", __func__, idx))
+ if (WARN_ONCE(BAD_STAT_IDX(i), "%s: missing stat item %d\n", __func__, idx))
return 0;
pn = container_of(lruvec, struct mem_cgroup_per_node, lruvec);
@@ -406,8 +414,10 @@ unsigned long lruvec_page_state_local(struct lruvec *lruvec,
/* Subset of vm_event_item to report for memcg event stats */
static const unsigned int memcg_vm_event_stat[] = {
+#ifdef CONFIG_MEMCG_V1
PGPGIN,
PGPGOUT,
+#endif
PGSCAN_KSWAPD,
PGSCAN_DIRECT,
PGSCAN_KHUGEPAGED,
@@ -432,24 +442,32 @@ static const unsigned int memcg_vm_event_stat[] = {
THP_SWPOUT,
THP_SWPOUT_FALLBACK,
#endif
+#ifdef CONFIG_NUMA_BALANCING
+ NUMA_PAGE_MIGRATE,
+ NUMA_PTE_UPDATES,
+ NUMA_HINT_FAULTS,
+#endif
};
#define NR_MEMCG_EVENTS ARRAY_SIZE(memcg_vm_event_stat)
-static int8_t mem_cgroup_events_index[NR_VM_EVENT_ITEMS] __read_mostly;
+static u8 mem_cgroup_events_index[NR_VM_EVENT_ITEMS] __read_mostly;
static void init_memcg_events(void)
{
- int8_t i;
+ u8 i;
- BUILD_BUG_ON(NR_VM_EVENT_ITEMS >= S8_MAX);
+ BUILD_BUG_ON(NR_VM_EVENT_ITEMS >= U8_MAX);
+
+ memset(mem_cgroup_events_index, U8_MAX,
+ sizeof(mem_cgroup_events_index));
for (i = 0; i < NR_MEMCG_EVENTS; ++i)
- mem_cgroup_events_index[memcg_vm_event_stat[i]] = i + 1;
+ mem_cgroup_events_index[memcg_vm_event_stat[i]] = i;
}
static inline int memcg_events_index(enum vm_event_item idx)
{
- return mem_cgroup_events_index[idx] - 1;
+ return mem_cgroup_events_index[idx];
}
struct memcg_vmstats_percpu {
@@ -469,10 +487,6 @@ struct memcg_vmstats_percpu {
/* Delta calculation for lockless upward propagation */
long state_prev[MEMCG_VMSTAT_SIZE];
unsigned long events_prev[NR_MEMCG_EVENTS];
-
- /* Cgroup1: threshold notifications & softlimit tree updates */
- unsigned long nr_page_events;
- unsigned long targets[MEM_CGROUP_NTARGETS];
} ____cacheline_aligned;
struct memcg_vmstats {
@@ -621,7 +635,7 @@ unsigned long memcg_page_state(struct mem_cgroup *memcg, int idx)
long x;
int i = memcg_stats_index(idx);
- if (WARN_ONCE(i < 0, "%s: missing stat item %d\n", __func__, idx))
+ if (WARN_ONCE(BAD_STAT_IDX(i), "%s: missing stat item %d\n", __func__, idx))
return 0;
x = READ_ONCE(memcg->vmstats->state[i]);
@@ -662,7 +676,7 @@ void __mod_memcg_state(struct mem_cgroup *memcg, enum memcg_stat_item idx,
if (mem_cgroup_disabled())
return;
- if (WARN_ONCE(i < 0, "%s: missing stat item %d\n", __func__, idx))
+ if (WARN_ONCE(BAD_STAT_IDX(i), "%s: missing stat item %d\n", __func__, idx))
return;
__this_cpu_add(memcg->vmstats_percpu->state[i], val);
@@ -675,7 +689,7 @@ unsigned long memcg_page_state_local(struct mem_cgroup *memcg, int idx)
long x;
int i = memcg_stats_index(idx);
- if (WARN_ONCE(i < 0, "%s: missing stat item %d\n", __func__, idx))
+ if (WARN_ONCE(BAD_STAT_IDX(i), "%s: missing stat item %d\n", __func__, idx))
return 0;
x = READ_ONCE(memcg->vmstats->state_local[i]);
@@ -694,7 +708,7 @@ static void __mod_memcg_lruvec_state(struct lruvec *lruvec,
struct mem_cgroup *memcg;
int i = memcg_stats_index(idx);
- if (WARN_ONCE(i < 0, "%s: missing stat item %d\n", __func__, idx))
+ if (WARN_ONCE(BAD_STAT_IDX(i), "%s: missing stat item %d\n", __func__, idx))
return;
pn = container_of(lruvec, struct mem_cgroup_per_node, lruvec);
@@ -810,7 +824,7 @@ void __count_memcg_events(struct mem_cgroup *memcg, enum vm_event_item idx,
if (mem_cgroup_disabled())
return;
- if (WARN_ONCE(i < 0, "%s: missing stat item %d\n", __func__, idx))
+ if (WARN_ONCE(BAD_STAT_IDX(i), "%s: missing stat item %d\n", __func__, idx))
return;
memcg_stats_lock();
@@ -823,7 +837,7 @@ unsigned long memcg_events(struct mem_cgroup *memcg, int event)
{
int i = memcg_events_index(event);
- if (WARN_ONCE(i < 0, "%s: missing stat item %d\n", __func__, event))
+ if (WARN_ONCE(BAD_STAT_IDX(i), "%s: missing stat item %d\n", __func__, event))
return 0;
return READ_ONCE(memcg->vmstats->events[i]);
@@ -833,50 +847,12 @@ unsigned long memcg_events_local(struct mem_cgroup *memcg, int event)
{
int i = memcg_events_index(event);
- if (WARN_ONCE(i < 0, "%s: missing stat item %d\n", __func__, event))
+ if (WARN_ONCE(BAD_STAT_IDX(i), "%s: missing stat item %d\n", __func__, event))
return 0;
return READ_ONCE(memcg->vmstats->events_local[i]);
}
-void mem_cgroup_charge_statistics(struct mem_cgroup *memcg, int nr_pages)
-{
- /* pagein of a big page is an event. So, ignore page size */
- if (nr_pages > 0)
- __count_memcg_events(memcg, PGPGIN, 1);
- else {
- __count_memcg_events(memcg, PGPGOUT, 1);
- nr_pages = -nr_pages; /* for event */
- }
-
- __this_cpu_add(memcg->vmstats_percpu->nr_page_events, nr_pages);
-}
-
-bool mem_cgroup_event_ratelimit(struct mem_cgroup *memcg,
- enum mem_cgroup_events_target target)
-{
- unsigned long val, next;
-
- val = __this_cpu_read(memcg->vmstats_percpu->nr_page_events);
- next = __this_cpu_read(memcg->vmstats_percpu->targets[target]);
- /* from time_after() in jiffies.h */
- if ((long)(next - val) < 0) {
- switch (target) {
- case MEM_CGROUP_TARGET_THRESH:
- next = val + THRESHOLDS_EVENTS_TARGET;
- break;
- case MEM_CGROUP_TARGET_SOFTLIMIT:
- next = val + SOFTLIMIT_EVENTS_TARGET;
- break;
- default:
- break;
- }
- __this_cpu_write(memcg->vmstats_percpu->targets[target], next);
- return true;
- }
- return false;
-}
-
struct mem_cgroup *mem_cgroup_from_task(struct task_struct *p)
{
/*
@@ -971,6 +947,24 @@ again:
}
/**
+ * get_mem_cgroup_from_folio - Obtain a reference on a given folio's memcg.
+ * @folio: folio from which memcg should be extracted.
+ */
+struct mem_cgroup *get_mem_cgroup_from_folio(struct folio *folio)
+{
+ struct mem_cgroup *memcg = folio_memcg(folio);
+
+ if (mem_cgroup_disabled())
+ return NULL;
+
+ rcu_read_lock();
+ if (!memcg || WARN_ON_ONCE(!css_tryget(&memcg->css)))
+ memcg = root_mem_cgroup;
+ rcu_read_unlock();
+ return memcg;
+}
+
+/**
* mem_cgroup_iter - iterate over memory cgroup hierarchy
* @root: hierarchy root
* @prev: previously returned memcg, NULL on first invocation
@@ -992,9 +986,9 @@ struct mem_cgroup *mem_cgroup_iter(struct mem_cgroup *root,
struct mem_cgroup_reclaim_cookie *reclaim)
{
struct mem_cgroup_reclaim_iter *iter;
- struct cgroup_subsys_state *css = NULL;
- struct mem_cgroup *memcg = NULL;
- struct mem_cgroup *pos = NULL;
+ struct cgroup_subsys_state *css;
+ struct mem_cgroup *pos;
+ struct mem_cgroup *next;
if (mem_cgroup_disabled())
return NULL;
@@ -1003,81 +997,67 @@ struct mem_cgroup *mem_cgroup_iter(struct mem_cgroup *root,
root = root_mem_cgroup;
rcu_read_lock();
+restart:
+ next = NULL;
if (reclaim) {
- struct mem_cgroup_per_node *mz;
+ int gen;
+ int nid = reclaim->pgdat->node_id;
- mz = root->nodeinfo[reclaim->pgdat->node_id];
- iter = &mz->iter;
+ iter = &root->nodeinfo[nid]->iter;
+ gen = atomic_read(&iter->generation);
/*
* On start, join the current reclaim iteration cycle.
* Exit when a concurrent walker completes it.
*/
if (!prev)
- reclaim->generation = iter->generation;
- else if (reclaim->generation != iter->generation)
+ reclaim->generation = gen;
+ else if (reclaim->generation != gen)
goto out_unlock;
- while (1) {
- pos = READ_ONCE(iter->position);
- if (!pos || css_tryget(&pos->css))
- break;
- /*
- * css reference reached zero, so iter->position will
- * be cleared by ->css_released. However, we should not
- * rely on this happening soon, because ->css_released
- * is called from a work queue, and by busy-waiting we
- * might block it. So we clear iter->position right
- * away.
- */
- (void)cmpxchg(&iter->position, pos, NULL);
- }
- } else if (prev) {
+ pos = READ_ONCE(iter->position);
+ } else
pos = prev;
- }
- if (pos)
- css = &pos->css;
-
- for (;;) {
- css = css_next_descendant_pre(css, &root->css);
- if (!css) {
- /*
- * Reclaimers share the hierarchy walk, and a
- * new one might jump in right at the end of
- * the hierarchy - make sure they see at least
- * one group and restart from the beginning.
- */
- if (!prev)
- continue;
- break;
- }
+ css = pos ? &pos->css : NULL;
+ while ((css = css_next_descendant_pre(css, &root->css))) {
/*
* Verify the css and acquire a reference. The root
* is provided by the caller, so we know it's alive
* and kicking, and don't take an extra reference.
*/
- if (css == &root->css || css_tryget(css)) {
- memcg = mem_cgroup_from_css(css);
+ if (css == &root->css || css_tryget(css))
break;
- }
}
+ next = mem_cgroup_from_css(css);
+
if (reclaim) {
/*
* The position could have already been updated by a competing
* thread, so check that the value hasn't changed since we read
* it to avoid reclaiming from the same cgroup twice.
*/
- (void)cmpxchg(&iter->position, pos, memcg);
+ if (cmpxchg(&iter->position, pos, next) != pos) {
+ if (css && css != &root->css)
+ css_put(css);
+ goto restart;
+ }
- if (pos)
- css_put(&pos->css);
+ if (!next) {
+ atomic_inc(&iter->generation);
- if (!memcg)
- iter->generation++;
+ /*
+ * Reclaimers share the hierarchy walk, and a
+ * new one might jump in right at the end of
+ * the hierarchy - make sure they see at least
+ * one group and restart from the beginning.
+ */
+ if (!prev)
+ goto restart;
+ }
}
out_unlock:
@@ -1085,7 +1065,7 @@ out_unlock:
if (prev && prev != root)
css_put(&prev->css);
- return memcg;
+ return next;
}
/**
@@ -1375,6 +1355,13 @@ static const struct memory_stat memory_stats[] = {
{ "workingset_restore_anon", WORKINGSET_RESTORE_ANON },
{ "workingset_restore_file", WORKINGSET_RESTORE_FILE },
{ "workingset_nodereclaim", WORKINGSET_NODERECLAIM },
+
+ { "pgdemote_kswapd", PGDEMOTE_KSWAPD },
+ { "pgdemote_direct", PGDEMOTE_DIRECT },
+ { "pgdemote_khugepaged", PGDEMOTE_KHUGEPAGED },
+#ifdef CONFIG_NUMA_BALANCING
+ { "pgpromote_success", PGPROMOTE_SUCCESS },
+#endif
};
/* The actual unit of the state item, not the same as the output unit */
@@ -1399,6 +1386,9 @@ static int memcg_page_state_output_unit(int item)
/*
* Workingset state is actually in pages, but we export it to userspace
* as a scalar count of events, so special case it here.
+ *
+ * Demotion and promotion activities are exported in pages, consistent
+ * with their global counterparts.
*/
switch (item) {
case WORKINGSET_REFAULT_ANON:
@@ -1408,6 +1398,12 @@ static int memcg_page_state_output_unit(int item)
case WORKINGSET_RESTORE_ANON:
case WORKINGSET_RESTORE_FILE:
case WORKINGSET_NODERECLAIM:
+ case PGDEMOTE_KSWAPD:
+ case PGDEMOTE_DIRECT:
+ case PGDEMOTE_KHUGEPAGED:
+#ifdef CONFIG_NUMA_BALANCING
+ case PGPROMOTE_SUCCESS:
+#endif
return 1;
default:
return memcg_page_state_unit(item);
@@ -1466,10 +1462,11 @@ static void memcg_stat_format(struct mem_cgroup *memcg, struct seq_buf *s)
memcg_events(memcg, PGSTEAL_KHUGEPAGED));
for (i = 0; i < ARRAY_SIZE(memcg_vm_event_stat); i++) {
+#ifdef CONFIG_MEMCG_V1
if (memcg_vm_event_stat[i] == PGPGIN ||
memcg_vm_event_stat[i] == PGPGOUT)
continue;
-
+#endif
seq_buf_printf(s, "%s %lu\n",
vm_event_name(memcg_vm_event_stat[i]),
memcg_events(memcg, memcg_vm_event_stat[i]));
@@ -2366,7 +2363,7 @@ void mem_cgroup_cancel_charge(struct mem_cgroup *memcg, unsigned int nr_pages)
static void commit_charge(struct folio *folio, struct mem_cgroup *memcg)
{
- VM_BUG_ON_FOLIO(folio_memcg(folio), folio);
+ VM_BUG_ON_FOLIO(folio_memcg_charged(folio), folio);
/*
* Any of the following ensures page's memcg stability:
*
@@ -2388,11 +2385,7 @@ void mem_cgroup_commit_charge(struct folio *folio, struct mem_cgroup *memcg)
{
css_get(&memcg->css);
commit_charge(folio, memcg);
-
- local_irq_disable();
- mem_cgroup_charge_statistics(memcg, folio_nr_pages(folio));
- memcg1_check_events(memcg, folio_nid(folio));
- local_irq_enable();
+ memcg1_commit_charge(folio, memcg);
}
static inline void __mod_objcg_mlstate(struct obj_cgroup *objcg,
@@ -2446,37 +2439,7 @@ struct mem_cgroup *mem_cgroup_from_obj_folio(struct folio *folio, void *p)
/*
* Returns a pointer to the memory cgroup to which the kernel object is charged.
- *
- * A passed kernel object can be a slab object, vmalloc object or a generic
- * kernel page, so different mechanisms for getting the memory cgroup pointer
- * should be used.
- *
- * In certain cases (e.g. kernel stacks or large kmallocs with SLUB) the caller
- * can not know for sure how the kernel object is implemented.
- * mem_cgroup_from_obj() can be safely used in such cases.
- *
- * The caller must ensure the memcg lifetime, e.g. by taking rcu_read_lock(),
- * cgroup_mutex, etc.
- */
-struct mem_cgroup *mem_cgroup_from_obj(void *p)
-{
- struct folio *folio;
-
- if (mem_cgroup_disabled())
- return NULL;
-
- if (unlikely(is_vmalloc_addr(p)))
- folio = page_folio(vmalloc_to_page(p));
- else
- folio = virt_to_folio(p);
-
- return mem_cgroup_from_obj_folio(folio, p);
-}
-
-/*
- * Returns a pointer to the memory cgroup to which the kernel object is charged.
- * Similar to mem_cgroup_from_obj(), but faster and not suitable for objects,
- * allocated using vmalloc().
+ * It is not suitable for objects allocated using vmalloc().
*
* A passed kernel object must be a slab object or a generic kernel page.
*
@@ -3057,12 +3020,11 @@ void __memcg_slab_free_hook(struct kmem_cache *s, struct slab *slab,
void split_page_memcg(struct page *head, int old_order, int new_order)
{
struct folio *folio = page_folio(head);
- struct mem_cgroup *memcg = folio_memcg(folio);
int i;
unsigned int old_nr = 1 << old_order;
unsigned int new_nr = 1 << new_order;
- if (mem_cgroup_disabled() || !memcg)
+ if (mem_cgroup_disabled() || !folio_memcg_charged(folio))
return;
for (i = new_nr; i < old_nr; i += new_nr)
@@ -3071,7 +3033,7 @@ void split_page_memcg(struct page *head, int old_order, int new_order)
if (folio_memcg_kmem(folio))
obj_cgroup_get_many(__folio_objcg(folio), old_nr / new_nr - 1);
else
- css_get_many(&memcg->css, old_nr / new_nr - 1);
+ css_get_many(&folio_memcg(folio)->css, old_nr / new_nr - 1);
}
unsigned long mem_cgroup_usage(struct mem_cgroup *memcg, bool swap)
@@ -3385,29 +3347,12 @@ static void memcg_wb_domain_size_changed(struct mem_cgroup *memcg)
*/
#define MEM_CGROUP_ID_MAX ((1UL << MEM_CGROUP_ID_SHIFT) - 1)
-static DEFINE_IDR(mem_cgroup_idr);
-static DEFINE_SPINLOCK(memcg_idr_lock);
-
-static int mem_cgroup_alloc_id(void)
-{
- int ret;
-
- idr_preload(GFP_KERNEL);
- spin_lock(&memcg_idr_lock);
- ret = idr_alloc(&mem_cgroup_idr, NULL, 1, MEM_CGROUP_ID_MAX + 1,
- GFP_NOWAIT);
- spin_unlock(&memcg_idr_lock);
- idr_preload_end();
- return ret;
-}
+static DEFINE_XARRAY_ALLOC1(mem_cgroup_ids);
static void mem_cgroup_id_remove(struct mem_cgroup *memcg)
{
if (memcg->id.id > 0) {
- spin_lock(&memcg_idr_lock);
- idr_remove(&mem_cgroup_idr, memcg->id.id);
- spin_unlock(&memcg_idr_lock);
-
+ xa_erase(&mem_cgroup_ids, memcg->id.id);
memcg->id.id = 0;
}
}
@@ -3442,7 +3387,7 @@ static inline void mem_cgroup_id_put(struct mem_cgroup *memcg)
struct mem_cgroup *mem_cgroup_from_id(unsigned short id)
{
WARN_ON_ONCE(!rcu_read_lock_held());
- return idr_find(&mem_cgroup_idr, id);
+ return xa_load(&mem_cgroup_ids, id);
}
#ifdef CONFIG_SHRINKER_DEBUG
@@ -3517,6 +3462,7 @@ static void __mem_cgroup_free(struct mem_cgroup *memcg)
for_each_node(node)
free_mem_cgroup_per_node_info(memcg, node);
+ memcg1_free_events(memcg);
kfree(memcg->vmstats);
free_percpu(memcg->vmstats_percpu);
kfree(memcg);
@@ -3535,17 +3481,17 @@ static struct mem_cgroup *mem_cgroup_alloc(struct mem_cgroup *parent)
struct mem_cgroup *memcg;
int node, cpu;
int __maybe_unused i;
- long error = -ENOMEM;
+ long error;
memcg = kzalloc(struct_size(memcg, nodeinfo, nr_node_ids), GFP_KERNEL);
if (!memcg)
- return ERR_PTR(error);
+ return ERR_PTR(-ENOMEM);
- memcg->id.id = mem_cgroup_alloc_id();
- if (memcg->id.id < 0) {
- error = memcg->id.id;
+ error = xa_alloc(&mem_cgroup_ids, &memcg->id.id, NULL,
+ XA_LIMIT(1, MEM_CGROUP_ID_MAX), GFP_KERNEL);
+ if (error)
goto fail;
- }
+ error = -ENOMEM;
memcg->vmstats = kzalloc(sizeof(struct memcg_vmstats),
GFP_KERNEL_ACCOUNT);
@@ -3557,6 +3503,9 @@ static struct mem_cgroup *mem_cgroup_alloc(struct mem_cgroup *parent)
if (!memcg->vmstats_percpu)
goto fail;
+ if (!memcg1_alloc_events(memcg))
+ goto fail;
+
for_each_possible_cpu(cpu) {
if (parent)
pstatc = per_cpu_ptr(parent->vmstats_percpu, cpu);
@@ -3574,6 +3523,9 @@ static struct mem_cgroup *mem_cgroup_alloc(struct mem_cgroup *parent)
INIT_WORK(&memcg->high_work, high_work_func);
vmpressure_init(&memcg->vmpressure);
+ INIT_LIST_HEAD(&memcg->memory_peaks);
+ INIT_LIST_HEAD(&memcg->swap_peaks);
+ spin_lock_init(&memcg->peaks_lock);
memcg->socket_pressure = jiffies;
memcg1_memcg_init(memcg);
memcg->kmemcg_id = -1;
@@ -3619,21 +3571,21 @@ mem_cgroup_css_alloc(struct cgroup_subsys_state *parent_css)
if (parent) {
WRITE_ONCE(memcg->swappiness, mem_cgroup_swappiness(parent));
- page_counter_init(&memcg->memory, &parent->memory);
- page_counter_init(&memcg->swap, &parent->swap);
+ page_counter_init(&memcg->memory, &parent->memory, true);
+ page_counter_init(&memcg->swap, &parent->swap, false);
#ifdef CONFIG_MEMCG_V1
WRITE_ONCE(memcg->oom_kill_disable, READ_ONCE(parent->oom_kill_disable));
- page_counter_init(&memcg->kmem, &parent->kmem);
- page_counter_init(&memcg->tcpmem, &parent->tcpmem);
+ page_counter_init(&memcg->kmem, &parent->kmem, false);
+ page_counter_init(&memcg->tcpmem, &parent->tcpmem, false);
#endif
} else {
init_memcg_stats();
init_memcg_events();
- page_counter_init(&memcg->memory, NULL);
- page_counter_init(&memcg->swap, NULL);
+ page_counter_init(&memcg->memory, NULL, true);
+ page_counter_init(&memcg->swap, NULL, false);
#ifdef CONFIG_MEMCG_V1
- page_counter_init(&memcg->kmem, NULL);
- page_counter_init(&memcg->tcpmem, NULL);
+ page_counter_init(&memcg->kmem, NULL, false);
+ page_counter_init(&memcg->tcpmem, NULL, false);
#endif
root_mem_cgroup = memcg;
return &memcg->css;
@@ -3682,9 +3634,7 @@ static int mem_cgroup_css_online(struct cgroup_subsys_state *css)
* publish it here at the end of onlining. This matches the
* regular ID destruction during offlining.
*/
- spin_lock(&memcg_idr_lock);
- idr_replace(&mem_cgroup_idr, memcg, memcg->id.id);
- spin_unlock(&memcg_idr_lock);
+ xa_store(&mem_cgroup_ids, memcg->id.id, memcg, GFP_KERNEL);
return 0;
offline_kmem:
@@ -3967,14 +3917,91 @@ static u64 memory_current_read(struct cgroup_subsys_state *css,
return (u64)page_counter_read(&memcg->memory) * PAGE_SIZE;
}
-static u64 memory_peak_read(struct cgroup_subsys_state *css,
- struct cftype *cft)
+#define OFP_PEAK_UNSET (((-1UL)))
+
+static int peak_show(struct seq_file *sf, void *v, struct page_counter *pc)
{
- struct mem_cgroup *memcg = mem_cgroup_from_css(css);
+ struct cgroup_of_peak *ofp = of_peak(sf->private);
+ u64 fd_peak = READ_ONCE(ofp->value), peak;
+
+ /* User wants global or local peak? */
+ if (fd_peak == OFP_PEAK_UNSET)
+ peak = pc->watermark;
+ else
+ peak = max(fd_peak, READ_ONCE(pc->local_watermark));
+
+ seq_printf(sf, "%llu\n", peak * PAGE_SIZE);
+ return 0;
+}
+
+static int memory_peak_show(struct seq_file *sf, void *v)
+{
+ struct mem_cgroup *memcg = mem_cgroup_from_css(seq_css(sf));
- return (u64)memcg->memory.watermark * PAGE_SIZE;
+ return peak_show(sf, v, &memcg->memory);
}
+static int peak_open(struct kernfs_open_file *of)
+{
+ struct cgroup_of_peak *ofp = of_peak(of);
+
+ ofp->value = OFP_PEAK_UNSET;
+ return 0;
+}
+
+static void peak_release(struct kernfs_open_file *of)
+{
+ struct mem_cgroup *memcg = mem_cgroup_from_css(of_css(of));
+ struct cgroup_of_peak *ofp = of_peak(of);
+
+ if (ofp->value == OFP_PEAK_UNSET) {
+ /* fast path (no writes on this fd) */
+ return;
+ }
+ spin_lock(&memcg->peaks_lock);
+ list_del(&ofp->list);
+ spin_unlock(&memcg->peaks_lock);
+}
+
+static ssize_t peak_write(struct kernfs_open_file *of, char *buf, size_t nbytes,
+ loff_t off, struct page_counter *pc,
+ struct list_head *watchers)
+{
+ unsigned long usage;
+ struct cgroup_of_peak *peer_ctx;
+ struct mem_cgroup *memcg = mem_cgroup_from_css(of_css(of));
+ struct cgroup_of_peak *ofp = of_peak(of);
+
+ spin_lock(&memcg->peaks_lock);
+
+ usage = page_counter_read(pc);
+ WRITE_ONCE(pc->local_watermark, usage);
+
+ list_for_each_entry(peer_ctx, watchers, list)
+ if (usage > peer_ctx->value)
+ WRITE_ONCE(peer_ctx->value, usage);
+
+ /* initial write, register watcher */
+ if (ofp->value == -1)
+ list_add(&ofp->list, watchers);
+
+ WRITE_ONCE(ofp->value, usage);
+ spin_unlock(&memcg->peaks_lock);
+
+ return nbytes;
+}
+
+static ssize_t memory_peak_write(struct kernfs_open_file *of, char *buf,
+ size_t nbytes, loff_t off)
+{
+ struct mem_cgroup *memcg = mem_cgroup_from_css(of_css(of));
+
+ return peak_write(of, buf, nbytes, off, &memcg->memory,
+ &memcg->memory_peaks);
+}
+
+#undef OFP_PEAK_UNSET
+
static int memory_min_show(struct seq_file *m, void *v)
{
return seq_puts_memcg_tunable(m,
@@ -4324,7 +4351,10 @@ static struct cftype memory_files[] = {
{
.name = "peak",
.flags = CFTYPE_NOT_ON_ROOT,
- .read_u64 = memory_peak_read,
+ .open = peak_open,
+ .release = peak_release,
+ .seq_show = memory_peak_show,
+ .write = memory_peak_write,
},
{
.name = "min",
@@ -4528,14 +4558,15 @@ int mem_cgroup_swapin_charge_folio(struct folio *folio, struct mm_struct *mm,
/*
* mem_cgroup_swapin_uncharge_swap - uncharge swap slot
- * @entry: swap entry for which the page is charged
+ * @entry: the first swap entry for which the pages are charged
+ * @nr_pages: number of pages which will be uncharged
*
* Call this function after successfully adding the charged page to swapcache.
*
* Note: This function assumes the page for which swap slot is being uncharged
* is order 0 page.
*/
-void mem_cgroup_swapin_uncharge_swap(swp_entry_t entry)
+void mem_cgroup_swapin_uncharge_swap(swp_entry_t entry, unsigned int nr_pages)
{
/*
* Cgroup1's unified memory+swap counter has been charged with the
@@ -4555,7 +4586,7 @@ void mem_cgroup_swapin_uncharge_swap(swp_entry_t entry)
* let's not wait for it. The page already received a
* memory+swap charge, drop the swap entry duplicate.
*/
- mem_cgroup_uncharge_swap(entry, 1);
+ mem_cgroup_uncharge_swap(entry, nr_pages);
}
}
@@ -4574,8 +4605,6 @@ static inline void uncharge_gather_clear(struct uncharge_gather *ug)
static void uncharge_batch(const struct uncharge_gather *ug)
{
- unsigned long flags;
-
if (ug->nr_memory) {
page_counter_uncharge(&ug->memcg->memory, ug->nr_memory);
if (do_memsw_account())
@@ -4587,11 +4616,7 @@ static void uncharge_batch(const struct uncharge_gather *ug)
memcg1_oom_recover(ug->memcg);
}
- local_irq_save(flags);
- __count_memcg_events(ug->memcg, PGPGOUT, ug->pgpgout);
- __this_cpu_add(ug->memcg->vmstats_percpu->nr_page_events, ug->nr_memory);
- memcg1_check_events(ug->memcg, ug->nid);
- local_irq_restore(flags);
+ memcg1_uncharge_batch(ug->memcg, ug->pgpgout, ug->nr_memory, ug->nid);
/* drop reference from uncharge_folio */
css_put(&ug->memcg->css);
@@ -4606,7 +4631,8 @@ static void uncharge_folio(struct folio *folio, struct uncharge_gather *ug)
VM_BUG_ON_FOLIO(folio_test_lru(folio), folio);
VM_BUG_ON_FOLIO(folio_order(folio) > 1 &&
!folio_test_hugetlb(folio) &&
- !list_empty(&folio->_deferred_list), folio);
+ !list_empty(&folio->_deferred_list) &&
+ folio_test_partially_mapped(folio), folio);
/*
* Nobody should be changing or seriously looking at
@@ -4664,7 +4690,7 @@ void __mem_cgroup_uncharge(struct folio *folio)
struct uncharge_gather ug;
/* Don't touch folio->lru of any random page, pre-check: */
- if (!folio_memcg(folio))
+ if (!folio_memcg_charged(folio))
return;
uncharge_gather_clear(&ug);
@@ -4698,7 +4724,6 @@ void mem_cgroup_replace_folio(struct folio *old, struct folio *new)
{
struct mem_cgroup *memcg;
long nr_pages = folio_nr_pages(new);
- unsigned long flags;
VM_BUG_ON_FOLIO(!folio_test_locked(old), old);
VM_BUG_ON_FOLIO(!folio_test_locked(new), new);
@@ -4709,7 +4734,7 @@ void mem_cgroup_replace_folio(struct folio *old, struct folio *new)
return;
/* Page cache replacement: new folio already charged? */
- if (folio_memcg(new))
+ if (folio_memcg_charged(new))
return;
memcg = folio_memcg(old);
@@ -4726,11 +4751,7 @@ void mem_cgroup_replace_folio(struct folio *old, struct folio *new)
css_get(&memcg->css);
commit_charge(new, memcg);
-
- local_irq_save(flags);
- mem_cgroup_charge_statistics(memcg, nr_pages);
- memcg1_check_events(memcg, folio_nid(new));
- local_irq_restore(flags);
+ memcg1_commit_charge(new, memcg);
}
/**
@@ -4966,17 +4987,7 @@ void mem_cgroup_swapout(struct folio *folio, swp_entry_t entry)
page_counter_uncharge(&memcg->memsw, nr_entries);
}
- /*
- * Interrupts should be disabled here because the caller holds the
- * i_pages lock which is taken with interrupts-off. It is
- * important here to have the interrupts disabled because it is the
- * only synchronisation we have for updating the per-CPU variables.
- */
- memcg_stats_lock();
- mem_cgroup_charge_statistics(memcg, -nr_entries);
- memcg_stats_unlock();
- memcg1_check_events(memcg, folio_nid(folio));
-
+ memcg1_swapout(folio, memcg);
css_put(&memcg->css);
}
@@ -5116,12 +5127,20 @@ static u64 swap_current_read(struct cgroup_subsys_state *css,
return (u64)page_counter_read(&memcg->swap) * PAGE_SIZE;
}
-static u64 swap_peak_read(struct cgroup_subsys_state *css,
- struct cftype *cft)
+static int swap_peak_show(struct seq_file *sf, void *v)
{
- struct mem_cgroup *memcg = mem_cgroup_from_css(css);
+ struct mem_cgroup *memcg = mem_cgroup_from_css(seq_css(sf));
+
+ return peak_show(sf, v, &memcg->swap);
+}
+
+static ssize_t swap_peak_write(struct kernfs_open_file *of, char *buf,
+ size_t nbytes, loff_t off)
+{
+ struct mem_cgroup *memcg = mem_cgroup_from_css(of_css(of));
- return (u64)memcg->swap.watermark * PAGE_SIZE;
+ return peak_write(of, buf, nbytes, off, &memcg->swap,
+ &memcg->swap_peaks);
}
static int swap_high_show(struct seq_file *m, void *v)
@@ -5205,7 +5224,10 @@ static struct cftype swap_files[] = {
{
.name = "swap.peak",
.flags = CFTYPE_NOT_ON_ROOT,
- .read_u64 = swap_peak_read,
+ .open = peak_open,
+ .release = peak_release,
+ .seq_show = swap_peak_show,
+ .write = swap_peak_write,
},
{
.name = "swap.events",
diff --git a/mm/memfd.c b/mm/memfd.c
index e7b7c5294d59..c17c3ea701a1 100644
--- a/mm/memfd.c
+++ b/mm/memfd.c
@@ -79,23 +79,25 @@ struct folio *memfd_alloc_folio(struct file *memfd, pgoff_t idx)
* alloc from. Also, the folio will be pinned for an indefinite
* amount of time, so it is not expected to be migrated away.
*/
- gfp_mask = htlb_alloc_mask(hstate_file(memfd));
+ struct hstate *h = hstate_file(memfd);
+
+ gfp_mask = htlb_alloc_mask(h);
gfp_mask &= ~(__GFP_HIGHMEM | __GFP_MOVABLE);
+ idx >>= huge_page_order(h);
- folio = alloc_hugetlb_folio_nodemask(hstate_file(memfd),
- numa_node_id(),
- NULL,
- gfp_mask,
- false);
- if (folio && folio_try_get(folio)) {
+ folio = alloc_hugetlb_folio_reserve(h,
+ numa_node_id(),
+ NULL,
+ gfp_mask);
+ if (folio) {
err = hugetlb_add_to_page_cache(folio,
memfd->f_mapping,
idx);
if (err) {
folio_put(folio);
- free_huge_folio(folio);
return ERR_PTR(err);
}
+ folio_unlock(folio);
return folio;
}
return ERR_PTR(-ENOMEM);
diff --git a/mm/memory-failure.c b/mm/memory-failure.c
index 7066fc84f351..96ce31e5a203 100644
--- a/mm/memory-failure.c
+++ b/mm/memory-failure.c
@@ -1554,6 +1554,32 @@ static int get_hwpoison_page(struct page *p, unsigned long flags)
return ret;
}
+void unmap_poisoned_folio(struct folio *folio, enum ttu_flags ttu)
+{
+ if (folio_test_hugetlb(folio) && !folio_test_anon(folio)) {
+ struct address_space *mapping;
+
+ /*
+ * For hugetlb folios in shared mappings, try_to_unmap
+ * could potentially call huge_pmd_unshare. Because of
+ * this, take semaphore in write mode here and set
+ * TTU_RMAP_LOCKED to indicate we have taken the lock
+ * at this higher level.
+ */
+ mapping = hugetlb_folio_mapping_lock_write(folio);
+ if (!mapping) {
+ pr_info("%#lx: could not lock mapping for mapped hugetlb folio\n",
+ folio_pfn(folio));
+ return;
+ }
+
+ try_to_unmap(folio, ttu|TTU_RMAP_LOCKED);
+ i_mmap_unlock_write(mapping);
+ } else {
+ try_to_unmap(folio, ttu);
+ }
+}
+
/*
* Do all that is necessary to remove user space mappings. Unmap
* the pages and send SIGBUS to the processes if the data was dirty.
@@ -1615,23 +1641,7 @@ static bool hwpoison_user_mappings(struct folio *folio, struct page *p,
*/
collect_procs(folio, p, &tokill, flags & MF_ACTION_REQUIRED);
- if (folio_test_hugetlb(folio) && !folio_test_anon(folio)) {
- /*
- * For hugetlb pages in shared mappings, try_to_unmap
- * could potentially call huge_pmd_unshare. Because of
- * this, take semaphore in write mode here and set
- * TTU_RMAP_LOCKED to indicate we have taken the lock
- * at this higher level.
- */
- mapping = hugetlb_folio_mapping_lock_write(folio);
- if (mapping) {
- try_to_unmap(folio, ttu|TTU_RMAP_LOCKED);
- i_mmap_unlock_write(mapping);
- } else
- pr_info("%#lx: could not lock mapping for mapped huge page\n", pfn);
- } else {
- try_to_unmap(folio, ttu);
- }
+ unmap_poisoned_folio(folio, ttu);
unmap_success = !folio_mapped(folio);
if (!unmap_success)
@@ -2643,40 +2653,6 @@ EXPORT_SYMBOL(unpoison_memory);
#undef pr_fmt
#define pr_fmt(fmt) "Soft offline: " fmt
-static bool mf_isolate_folio(struct folio *folio, struct list_head *pagelist)
-{
- bool isolated = false;
-
- if (folio_test_hugetlb(folio)) {
- isolated = isolate_hugetlb(folio, pagelist);
- } else {
- bool lru = !__folio_test_movable(folio);
-
- if (lru)
- isolated = folio_isolate_lru(folio);
- else
- isolated = isolate_movable_page(&folio->page,
- ISOLATE_UNEVICTABLE);
-
- if (isolated) {
- list_add(&folio->lru, pagelist);
- if (lru)
- node_stat_add_folio(folio, NR_ISOLATED_ANON +
- folio_is_file_lru(folio));
- }
- }
-
- /*
- * If we succeed to isolate the folio, we grabbed another refcount on
- * the folio, so we can safely drop the one we got from get_any_page().
- * If we failed to isolate the folio, it means that we cannot go further
- * and we will return an error, so drop the reference we got from
- * get_any_page() as well.
- */
- folio_put(folio);
- return isolated;
-}
-
/*
* soft_offline_in_use_page handles hugetlb-pages and non-hugetlb pages.
* If the page is a non-dirty unmapped page-cache page, it simply invalidates.
@@ -2689,6 +2665,7 @@ static int soft_offline_in_use_page(struct page *page)
struct folio *folio = page_folio(page);
char const *msg_page[] = {"page", "hugepage"};
bool huge = folio_test_hugetlb(folio);
+ bool isolated;
LIST_HEAD(pagelist);
struct migration_target_control mtc = {
.nid = NUMA_NO_NODE,
@@ -2728,7 +2705,18 @@ static int soft_offline_in_use_page(struct page *page)
return 0;
}
- if (mf_isolate_folio(folio, &pagelist)) {
+ isolated = isolate_folio_to_list(folio, &pagelist);
+
+ /*
+ * If we succeed to isolate the folio, we grabbed another refcount on
+ * the folio, so we can safely drop the one we got from get_any_page().
+ * If we failed to isolate the folio, it means that we cannot go further
+ * and we will return an error, so drop the reference we got from
+ * get_any_page() as well.
+ */
+ folio_put(folio);
+
+ if (isolated) {
ret = migrate_pages(&pagelist, alloc_migration_target, NULL,
(unsigned long)&mtc, MIGRATE_SYNC, MR_MEMORY_FAILURE, NULL);
if (!ret) {
diff --git a/mm/memory-tiers.c b/mm/memory-tiers.c
index 4775b3a3dabe..fc14fe53e9b7 100644
--- a/mm/memory-tiers.c
+++ b/mm/memory-tiers.c
@@ -6,6 +6,7 @@
#include <linux/memory.h>
#include <linux/memory-tiers.h>
#include <linux/notifier.h>
+#include <linux/sched/sysctl.h>
#include "internal.h"
@@ -50,6 +51,24 @@ static const struct bus_type memory_tier_subsys = {
.dev_name = "memory_tier",
};
+#ifdef CONFIG_NUMA_BALANCING
+/**
+ * folio_use_access_time - check if a folio reuses cpupid for page access time
+ * @folio: folio to check
+ *
+ * folio's _last_cpupid field is repurposed by memory tiering. In memory
+ * tiering mode, cpupid of slow memory folio (not toptier memory) is used to
+ * record page access time.
+ *
+ * Return: the folio _last_cpupid is used to record page access time
+ */
+bool folio_use_access_time(struct folio *folio)
+{
+ return (sysctl_numa_balancing_mode & NUMA_BALANCING_MEMORY_TIERING) &&
+ !node_is_toptier(folio_nid(folio));
+}
+#endif
+
#ifdef CONFIG_MIGRATION
static int top_tier_adistance;
/*
@@ -749,10 +768,10 @@ int mt_set_default_dram_perf(int nid, struct access_coordinate *perf,
pr_info(
"memory-tiers: the performance of DRAM node %d mismatches that of the reference\n"
"DRAM node %d.\n", nid, default_dram_perf_ref_nid);
- pr_info(" performance of reference DRAM node %d:\n",
- default_dram_perf_ref_nid);
+ pr_info(" performance of reference DRAM node %d from %s:\n",
+ default_dram_perf_ref_nid, default_dram_perf_ref_source);
dump_hmem_attrs(&default_dram_perf, " ");
- pr_info(" performance of DRAM node %d:\n", nid);
+ pr_info(" performance of DRAM node %d from %s:\n", nid, source);
dump_hmem_attrs(perf, " ");
pr_info(
" disable default DRAM node performance based abstract distance algorithm.\n");
@@ -895,13 +914,14 @@ static int __init memory_tier_init(void)
WARN_ON(!node_demotion);
#endif
- guard(mutex)(&memory_tier_lock);
+ mutex_lock(&memory_tier_lock);
/*
* For now we can have 4 faster memory tiers with smaller adistance
* than default DRAM tier.
*/
default_dram_type = mt_find_alloc_memory_type(MEMTIER_ADISTANCE_DRAM,
&default_memory_types);
+ mutex_unlock(&memory_tier_lock);
if (IS_ERR(default_dram_type))
panic("%s() failed to allocate default DRAM tier\n", __func__);
@@ -921,8 +941,7 @@ bool numa_demotion_enabled = false;
static ssize_t demotion_enabled_show(struct kobject *kobj,
struct kobj_attribute *attr, char *buf)
{
- return sysfs_emit(buf, "%s\n",
- numa_demotion_enabled ? "true" : "false");
+ return sysfs_emit(buf, "%s\n", str_true_false(numa_demotion_enabled));
}
static ssize_t demotion_enabled_store(struct kobject *kobj,
diff --git a/mm/memory.c b/mm/memory.c
index ebfc9768f801..2366578015ad 100644
--- a/mm/memory.c
+++ b/mm/memory.c
@@ -666,17 +666,16 @@ struct folio *vm_normal_folio(struct vm_area_struct *vma, unsigned long addr,
return NULL;
}
-#ifdef CONFIG_TRANSPARENT_HUGEPAGE
+#ifdef CONFIG_PGTABLE_HAS_HUGE_LEAVES
struct page *vm_normal_page_pmd(struct vm_area_struct *vma, unsigned long addr,
pmd_t pmd)
{
unsigned long pfn = pmd_pfn(pmd);
- /*
- * There is no pmd_special() but there may be special pmds, e.g.
- * in a direct-access (dax) mapping, so let's just replicate the
- * !CONFIG_ARCH_HAS_PTE_SPECIAL case from vm_normal_page() here.
- */
+ /* Currently it's only used for huge pfnmaps */
+ if (unlikely(pmd_special(pmd)))
+ return NULL;
+
if (unlikely(vma->vm_flags & (VM_PFNMAP|VM_MIXEDMAP))) {
if (vma->vm_flags & VM_MIXEDMAP) {
if (!pfn_valid(pfn))
@@ -927,8 +926,11 @@ copy_present_page(struct vm_area_struct *dst_vma, struct vm_area_struct *src_vma
* We have a prealloc page, all good! Take it
* over and copy the page & arm it.
*/
+
+ if (copy_mc_user_highpage(&new_folio->page, page, addr, src_vma))
+ return -EHWPOISON;
+
*prealloc = NULL;
- copy_user_highpage(&new_folio->page, page, addr, src_vma);
__folio_mark_uptodate(new_folio);
folio_add_new_anon_rmap(new_folio, dst_vma, addr, RMAP_EXCLUSIVE);
folio_add_lru_vma(new_folio, dst_vma);
@@ -1167,8 +1169,9 @@ again:
/*
* If we need a pre-allocated page for this pte, drop the
* locks, allocate, and try again.
+ * If copy failed due to hwpoison in source page, break out.
*/
- if (unlikely(ret == -EAGAIN))
+ if (unlikely(ret == -EAGAIN || ret == -EHWPOISON))
break;
if (unlikely(prealloc)) {
/*
@@ -1198,7 +1201,7 @@ again:
goto out;
}
entry.val = 0;
- } else if (ret == -EBUSY) {
+ } else if (ret == -EBUSY || unlikely(ret == -EHWPOISON)) {
goto out;
} else if (ret == -EAGAIN) {
prealloc = folio_prealloc(src_mm, src_vma, addr, false);
@@ -3276,7 +3279,7 @@ static inline vm_fault_t vmf_can_call_fault(const struct vm_fault *vmf)
}
/**
- * vmf_anon_prepare - Prepare to handle an anonymous fault.
+ * __vmf_anon_prepare - Prepare to handle an anonymous fault.
* @vmf: The vm_fault descriptor passed from the fault handler.
*
* When preparing to insert an anonymous page into a VMA from a
@@ -3290,7 +3293,7 @@ static inline vm_fault_t vmf_can_call_fault(const struct vm_fault *vmf)
* Return: 0 if fault handling can proceed. Any other value should be
* returned to the caller.
*/
-vm_fault_t vmf_anon_prepare(struct vm_fault *vmf)
+vm_fault_t __vmf_anon_prepare(struct vm_fault *vmf)
{
struct vm_area_struct *vma = vmf->vma;
vm_fault_t ret = 0;
@@ -3298,10 +3301,8 @@ vm_fault_t vmf_anon_prepare(struct vm_fault *vmf)
if (likely(vma->anon_vma))
return 0;
if (vmf->flags & FAULT_FLAG_VMA_LOCK) {
- if (!mmap_read_trylock(vma->vm_mm)) {
- vma_end_read(vma);
+ if (!mmap_read_trylock(vma->vm_mm))
return VM_FAULT_RETRY;
- }
}
if (__anon_vma_prepare(vma))
ret = VM_FAULT_OOM;
@@ -4003,6 +4004,194 @@ static vm_fault_t handle_pte_marker(struct vm_fault *vmf)
return VM_FAULT_SIGBUS;
}
+static struct folio *__alloc_swap_folio(struct vm_fault *vmf)
+{
+ struct vm_area_struct *vma = vmf->vma;
+ struct folio *folio;
+ swp_entry_t entry;
+
+ folio = vma_alloc_folio(GFP_HIGHUSER_MOVABLE, 0, vma,
+ vmf->address, false);
+ if (!folio)
+ return NULL;
+
+ entry = pte_to_swp_entry(vmf->orig_pte);
+ if (mem_cgroup_swapin_charge_folio(folio, vma->vm_mm,
+ GFP_KERNEL, entry)) {
+ folio_put(folio);
+ return NULL;
+ }
+
+ return folio;
+}
+
+#ifdef CONFIG_TRANSPARENT_HUGEPAGE
+static inline int non_swapcache_batch(swp_entry_t entry, int max_nr)
+{
+ struct swap_info_struct *si = swp_swap_info(entry);
+ pgoff_t offset = swp_offset(entry);
+ int i;
+
+ /*
+ * While allocating a large folio and doing swap_read_folio, which is
+ * the case the being faulted pte doesn't have swapcache. We need to
+ * ensure all PTEs have no cache as well, otherwise, we might go to
+ * swap devices while the content is in swapcache.
+ */
+ for (i = 0; i < max_nr; i++) {
+ if ((si->swap_map[offset + i] & SWAP_HAS_CACHE))
+ return i;
+ }
+
+ return i;
+}
+
+/*
+ * Check if the PTEs within a range are contiguous swap entries
+ * and have consistent swapcache, zeromap.
+ */
+static bool can_swapin_thp(struct vm_fault *vmf, pte_t *ptep, int nr_pages)
+{
+ unsigned long addr;
+ swp_entry_t entry;
+ int idx;
+ pte_t pte;
+
+ addr = ALIGN_DOWN(vmf->address, nr_pages * PAGE_SIZE);
+ idx = (vmf->address - addr) / PAGE_SIZE;
+ pte = ptep_get(ptep);
+
+ if (!pte_same(pte, pte_move_swp_offset(vmf->orig_pte, -idx)))
+ return false;
+ entry = pte_to_swp_entry(pte);
+ if (swap_pte_batch(ptep, nr_pages, pte) != nr_pages)
+ return false;
+
+ /*
+ * swap_read_folio() can't handle the case a large folio is hybridly
+ * from different backends. And they are likely corner cases. Similar
+ * things might be added once zswap support large folios.
+ */
+ if (unlikely(swap_zeromap_batch(entry, nr_pages, NULL) != nr_pages))
+ return false;
+ if (unlikely(non_swapcache_batch(entry, nr_pages) != nr_pages))
+ return false;
+
+ return true;
+}
+
+static inline unsigned long thp_swap_suitable_orders(pgoff_t swp_offset,
+ unsigned long addr,
+ unsigned long orders)
+{
+ int order, nr;
+
+ order = highest_order(orders);
+
+ /*
+ * To swap in a THP with nr pages, we require that its first swap_offset
+ * is aligned with that number, as it was when the THP was swapped out.
+ * This helps filter out most invalid entries.
+ */
+ while (orders) {
+ nr = 1 << order;
+ if ((addr >> PAGE_SHIFT) % nr == swp_offset % nr)
+ break;
+ order = next_order(&orders, order);
+ }
+
+ return orders;
+}
+
+static struct folio *alloc_swap_folio(struct vm_fault *vmf)
+{
+ struct vm_area_struct *vma = vmf->vma;
+ unsigned long orders;
+ struct folio *folio;
+ unsigned long addr;
+ swp_entry_t entry;
+ spinlock_t *ptl;
+ pte_t *pte;
+ gfp_t gfp;
+ int order;
+
+ /*
+ * If uffd is active for the vma we need per-page fault fidelity to
+ * maintain the uffd semantics.
+ */
+ if (unlikely(userfaultfd_armed(vma)))
+ goto fallback;
+
+ /*
+ * A large swapped out folio could be partially or fully in zswap. We
+ * lack handling for such cases, so fallback to swapping in order-0
+ * folio.
+ */
+ if (!zswap_never_enabled())
+ goto fallback;
+
+ entry = pte_to_swp_entry(vmf->orig_pte);
+ /*
+ * Get a list of all the (large) orders below PMD_ORDER that are enabled
+ * and suitable for swapping THP.
+ */
+ orders = thp_vma_allowable_orders(vma, vma->vm_flags,
+ TVA_IN_PF | TVA_ENFORCE_SYSFS, BIT(PMD_ORDER) - 1);
+ orders = thp_vma_suitable_orders(vma, vmf->address, orders);
+ orders = thp_swap_suitable_orders(swp_offset(entry),
+ vmf->address, orders);
+
+ if (!orders)
+ goto fallback;
+
+ pte = pte_offset_map_lock(vmf->vma->vm_mm, vmf->pmd,
+ vmf->address & PMD_MASK, &ptl);
+ if (unlikely(!pte))
+ goto fallback;
+
+ /*
+ * For do_swap_page, find the highest order where the aligned range is
+ * completely swap entries with contiguous swap offsets.
+ */
+ order = highest_order(orders);
+ while (orders) {
+ addr = ALIGN_DOWN(vmf->address, PAGE_SIZE << order);
+ if (can_swapin_thp(vmf, pte + pte_index(addr), 1 << order))
+ break;
+ order = next_order(&orders, order);
+ }
+
+ pte_unmap_unlock(pte, ptl);
+
+ /* Try allocating the highest of the remaining orders. */
+ gfp = vma_thp_gfp_mask(vma);
+ while (orders) {
+ addr = ALIGN_DOWN(vmf->address, PAGE_SIZE << order);
+ folio = vma_alloc_folio(gfp, order, vma, addr, true);
+ if (folio) {
+ if (!mem_cgroup_swapin_charge_folio(folio, vma->vm_mm,
+ gfp, entry))
+ return folio;
+ folio_put(folio);
+ }
+ order = next_order(&orders, order);
+ }
+
+fallback:
+ return __alloc_swap_folio(vmf);
+}
+#else /* !CONFIG_TRANSPARENT_HUGEPAGE */
+static inline bool can_swapin_thp(struct vm_fault *vmf, pte_t *ptep, int nr_pages)
+{
+ return false;
+}
+
+static struct folio *alloc_swap_folio(struct vm_fault *vmf)
+{
+ return __alloc_swap_folio(vmf);
+}
+#endif /* CONFIG_TRANSPARENT_HUGEPAGE */
+
/*
* We enter with non-exclusive mmap_lock (to exclude vma changes,
* but allow concurrent faults), and pte mapped but not yet locked.
@@ -4091,35 +4280,34 @@ vm_fault_t do_swap_page(struct vm_fault *vmf)
if (!folio) {
if (data_race(si->flags & SWP_SYNCHRONOUS_IO) &&
__swap_count(entry) == 1) {
- /*
- * Prevent parallel swapin from proceeding with
- * the cache flag. Otherwise, another thread may
- * finish swapin first, free the entry, and swapout
- * reusing the same entry. It's undetectable as
- * pte_same() returns true due to entry reuse.
- */
- if (swapcache_prepare(entry)) {
- /* Relax a bit to prevent rapid repeated page faults */
- schedule_timeout_uninterruptible(1);
- goto out;
- }
- need_clear_cache = true;
-
/* skip swapcache */
- folio = vma_alloc_folio(GFP_HIGHUSER_MOVABLE, 0,
- vma, vmf->address, false);
- page = &folio->page;
+ folio = alloc_swap_folio(vmf);
if (folio) {
__folio_set_locked(folio);
__folio_set_swapbacked(folio);
- if (mem_cgroup_swapin_charge_folio(folio,
- vma->vm_mm, GFP_KERNEL,
- entry)) {
- ret = VM_FAULT_OOM;
+ nr_pages = folio_nr_pages(folio);
+ if (folio_test_large(folio))
+ entry.val = ALIGN_DOWN(entry.val, nr_pages);
+ /*
+ * Prevent parallel swapin from proceeding with
+ * the cache flag. Otherwise, another thread
+ * may finish swapin first, free the entry, and
+ * swapout reusing the same entry. It's
+ * undetectable as pte_same() returns true due
+ * to entry reuse.
+ */
+ if (swapcache_prepare(entry, nr_pages)) {
+ /*
+ * Relax a bit to prevent rapid
+ * repeated page faults.
+ */
+ schedule_timeout_uninterruptible(1);
goto out_page;
}
- mem_cgroup_swapin_uncharge_swap(entry);
+ need_clear_cache = true;
+
+ mem_cgroup_swapin_uncharge_swap(entry, nr_pages);
shadow = get_shadow_from_swap_cache(entry);
if (shadow)
@@ -4133,10 +4321,8 @@ vm_fault_t do_swap_page(struct vm_fault *vmf)
folio->private = NULL;
}
} else {
- page = swapin_readahead(entry, GFP_HIGHUSER_MOVABLE,
+ folio = swapin_readahead(entry, GFP_HIGHUSER_MOVABLE,
vmf);
- if (page)
- folio = page_folio(page);
swapcache = folio;
}
@@ -4157,6 +4343,7 @@ vm_fault_t do_swap_page(struct vm_fault *vmf)
ret = VM_FAULT_MAJOR;
count_vm_event(PGMAJFAULT);
count_memcg_event_mm(vma->vm_mm, PGMAJFAULT);
+ page = folio_file_page(folio, swp_offset(entry));
} else if (PageHWPoison(page)) {
/*
* hwpoisoned dirty swapcache pages are kept for killing
@@ -4226,6 +4413,24 @@ vm_fault_t do_swap_page(struct vm_fault *vmf)
goto out_nomap;
}
+ /* allocated large folios for SWP_SYNCHRONOUS_IO */
+ if (folio_test_large(folio) && !folio_test_swapcache(folio)) {
+ unsigned long nr = folio_nr_pages(folio);
+ unsigned long folio_start = ALIGN_DOWN(vmf->address, nr * PAGE_SIZE);
+ unsigned long idx = (vmf->address - folio_start) / PAGE_SIZE;
+ pte_t *folio_ptep = vmf->pte - idx;
+ pte_t folio_pte = ptep_get(folio_ptep);
+
+ if (!pte_same(folio_pte, pte_move_swp_offset(vmf->orig_pte, -idx)) ||
+ swap_pte_batch(folio_ptep, nr, folio_pte) != nr)
+ goto out_nomap;
+
+ page_idx = idx;
+ address = folio_start;
+ ptep = folio_ptep;
+ goto check_folio;
+ }
+
nr_pages = 1;
page_idx = 0;
address = vmf->address;
@@ -4357,11 +4562,12 @@ check_folio:
folio_add_lru_vma(folio, vma);
} else if (!folio_test_anon(folio)) {
/*
- * We currently only expect small !anon folios, which are either
- * fully exclusive or fully shared. If we ever get large folios
- * here, we have to be careful.
+ * We currently only expect small !anon folios which are either
+ * fully exclusive or fully shared, or new allocated large
+ * folios which are fully exclusive. If we ever get large
+ * folios within swapcache here, we have to be careful.
*/
- VM_WARN_ON_ONCE(folio_test_large(folio));
+ VM_WARN_ON_ONCE(folio_test_large(folio) && folio_test_swapcache(folio));
VM_WARN_ON_FOLIO(!folio_test_locked(folio), folio);
folio_add_new_anon_rmap(folio, vma, address, rmap_flags);
} else {
@@ -4404,7 +4610,7 @@ unlock:
out:
/* Clear the swap cache pin for direct swapin after PTL unlock */
if (need_clear_cache)
- swapcache_clear(si, entry);
+ swapcache_clear(si, entry, nr_pages);
if (si)
put_swap_device(si);
return ret;
@@ -4420,7 +4626,7 @@ out_release:
folio_put(swapcache);
}
if (need_clear_cache)
- swapcache_clear(si, entry);
+ swapcache_clear(si, entry, nr_pages);
if (si)
put_swap_device(si);
return ret;
@@ -4614,9 +4820,7 @@ static vm_fault_t do_anonymous_page(struct vm_fault *vmf)
folio_ref_add(folio, nr_pages - 1);
add_mm_counter(vma->vm_mm, MM_ANONPAGES, nr_pages);
-#ifdef CONFIG_TRANSPARENT_HUGEPAGE
count_mthp_stat(folio_order(folio), MTHP_STAT_ANON_FAULT_ALLOC);
-#endif
folio_add_new_anon_rmap(folio, vma, addr, RMAP_EXCLUSIVE);
folio_add_lru_vma(folio, vma);
setpte:
@@ -5111,10 +5315,14 @@ static vm_fault_t do_cow_fault(struct vm_fault *vmf)
if (ret & VM_FAULT_DONE_COW)
return ret;
- copy_user_highpage(vmf->cow_page, vmf->page, vmf->address, vma);
+ if (copy_mc_user_highpage(vmf->cow_page, vmf->page, vmf->address, vma)) {
+ ret = VM_FAULT_HWPOISON;
+ goto unlock;
+ }
__folio_mark_uptodate(folio);
ret |= finish_fault(vmf);
+unlock:
unlock_page(vmf->page);
put_page(vmf->page);
if (unlikely(ret & (VM_FAULT_ERROR | VM_FAULT_NOPAGE | VM_FAULT_RETRY)))
@@ -5219,16 +5427,46 @@ static vm_fault_t do_fault(struct vm_fault *vmf)
return ret;
}
-int numa_migrate_prep(struct folio *folio, struct vm_fault *vmf,
- unsigned long addr, int page_nid, int *flags)
+int numa_migrate_check(struct folio *folio, struct vm_fault *vmf,
+ unsigned long addr, int *flags,
+ bool writable, int *last_cpupid)
{
struct vm_area_struct *vma = vmf->vma;
+ /*
+ * Avoid grouping on RO pages in general. RO pages shouldn't hurt as
+ * much anyway since they can be in shared cache state. This misses
+ * the case where a mapping is writable but the process never writes
+ * to it but pte_write gets cleared during protection updates and
+ * pte_dirty has unpredictable behaviour between PTE scan updates,
+ * background writeback, dirty balancing and application behaviour.
+ */
+ if (!writable)
+ *flags |= TNF_NO_GROUP;
+
+ /*
+ * Flag if the folio is shared between multiple address spaces. This
+ * is later used when determining whether to group tasks together
+ */
+ if (folio_likely_mapped_shared(folio) && (vma->vm_flags & VM_SHARED))
+ *flags |= TNF_SHARED;
+ /*
+ * For memory tiering mode, cpupid of slow memory page is used
+ * to record page access time. So use default value.
+ */
+ if (folio_use_access_time(folio))
+ *last_cpupid = (-1 & LAST_CPUPID_MASK);
+ else
+ *last_cpupid = folio_last_cpupid(folio);
+
/* Record the current PID acceesing VMA */
vma_set_access_pid_bit(vma);
count_vm_numa_event(NUMA_HINT_FAULTS);
- if (page_nid == numa_node_id()) {
+#ifdef CONFIG_NUMA_BALANCING
+ count_memcg_folio_events(folio, NUMA_HINT_FAULTS, 1);
+#endif
+ if (folio_nid(folio) == numa_node_id()) {
count_vm_numa_event(NUMA_HINT_FAULTS_LOCAL);
*flags |= TNF_FAULT_LOCAL;
}
@@ -5330,36 +5568,11 @@ static vm_fault_t do_numa_page(struct vm_fault *vmf)
if (!folio || folio_is_zone_device(folio))
goto out_map;
- /*
- * Avoid grouping on RO pages in general. RO pages shouldn't hurt as
- * much anyway since they can be in shared cache state. This misses
- * the case where a mapping is writable but the process never writes
- * to it but pte_write gets cleared during protection updates and
- * pte_dirty has unpredictable behaviour between PTE scan updates,
- * background writeback, dirty balancing and application behaviour.
- */
- if (!writable)
- flags |= TNF_NO_GROUP;
-
- /*
- * Flag if the folio is shared between multiple address spaces. This
- * is later used when determining whether to group tasks together
- */
- if (folio_likely_mapped_shared(folio) && (vma->vm_flags & VM_SHARED))
- flags |= TNF_SHARED;
-
nid = folio_nid(folio);
nr_pages = folio_nr_pages(folio);
- /*
- * For memory tiering mode, cpupid of slow memory page is used
- * to record page access time. So use default value.
- */
- if ((sysctl_numa_balancing_mode & NUMA_BALANCING_MEMORY_TIERING) &&
- !node_is_toptier(nid))
- last_cpupid = (-1 & LAST_CPUPID_MASK);
- else
- last_cpupid = folio_last_cpupid(folio);
- target_nid = numa_migrate_prep(folio, vmf, vmf->address, nid, &flags);
+
+ target_nid = numa_migrate_check(folio, vmf, vmf->address, &flags,
+ writable, &last_cpupid);
if (target_nid == NUMA_NO_NODE)
goto out_map;
if (migrate_misplaced_folio_prepare(folio, vma, target_nid)) {
@@ -6015,10 +6228,6 @@ retry:
if (!vma_start_read(vma))
goto inval;
- /* Check since vm_start/vm_end might change before we lock the VMA */
- if (unlikely(address < vma->vm_start || address >= vma->vm_end))
- goto inval_end_read;
-
/* Check if the VMA got isolated after we found it */
if (vma->detached) {
vma_end_read(vma);
@@ -6026,6 +6235,16 @@ retry:
/* The area was replaced with another one */
goto retry;
}
+ /*
+ * At this point, we have a stable reference to a VMA: The VMA is
+ * locked and we know it hasn't already been isolated.
+ * From here on, we can access the VMA without worrying about which
+ * fields are accessible for RCU readers.
+ */
+
+ /* Check since vm_start/vm_end might change before we lock the VMA */
+ if (unlikely(address < vma->vm_start || address >= vma->vm_end))
+ goto inval_end_read;
rcu_read_unlock();
return vma;
@@ -6110,78 +6329,155 @@ int __pmd_alloc(struct mm_struct *mm, pud_t *pud, unsigned long address)
}
#endif /* __PAGETABLE_PMD_FOLDED */
+static inline void pfnmap_args_setup(struct follow_pfnmap_args *args,
+ spinlock_t *lock, pte_t *ptep,
+ pgprot_t pgprot, unsigned long pfn_base,
+ unsigned long addr_mask, bool writable,
+ bool special)
+{
+ args->lock = lock;
+ args->ptep = ptep;
+ args->pfn = pfn_base + ((args->address & ~addr_mask) >> PAGE_SHIFT);
+ args->pgprot = pgprot;
+ args->writable = writable;
+ args->special = special;
+}
+
+static inline void pfnmap_lockdep_assert(struct vm_area_struct *vma)
+{
+#ifdef CONFIG_LOCKDEP
+ struct address_space *mapping = vma->vm_file->f_mapping;
+
+ if (mapping)
+ lockdep_assert(lockdep_is_held(&vma->vm_file->f_mapping->i_mmap_rwsem) ||
+ lockdep_is_held(&vma->vm_mm->mmap_lock));
+ else
+ lockdep_assert(lockdep_is_held(&vma->vm_mm->mmap_lock));
+#endif
+}
+
/**
- * follow_pte - look up PTE at a user virtual address
- * @vma: the memory mapping
- * @address: user virtual address
- * @ptepp: location to store found PTE
- * @ptlp: location to store the lock for the PTE
+ * follow_pfnmap_start() - Look up a pfn mapping at a user virtual address
+ * @args: Pointer to struct @follow_pfnmap_args
+ *
+ * The caller needs to setup args->vma and args->address to point to the
+ * virtual address as the target of such lookup. On a successful return,
+ * the results will be put into other output fields.
*
- * On a successful return, the pointer to the PTE is stored in @ptepp;
- * the corresponding lock is taken and its location is stored in @ptlp.
+ * After the caller finished using the fields, the caller must invoke
+ * another follow_pfnmap_end() to proper releases the locks and resources
+ * of such look up request.
*
- * The contents of the PTE are only stable until @ptlp is released using
- * pte_unmap_unlock(). This function will fail if the PTE is non-present.
- * Present PTEs may include PTEs that map refcounted pages, such as
- * anonymous folios in COW mappings.
+ * During the start() and end() calls, the results in @args will be valid
+ * as proper locks will be held. After the end() is called, all the fields
+ * in @follow_pfnmap_args will be invalid to be further accessed. Further
+ * use of such information after end() may require proper synchronizations
+ * by the caller with page table updates, otherwise it can create a
+ * security bug.
*
- * Callers must be careful when relying on PTE content after
- * pte_unmap_unlock(). Especially if the PTE maps a refcounted page,
- * callers must protect against invalidation with MMU notifiers; otherwise
- * access to the PFN at a later point in time can trigger use-after-free.
+ * If the PTE maps a refcounted page, callers are responsible to protect
+ * against invalidation with MMU notifiers; otherwise access to the PFN at
+ * a later point in time can trigger use-after-free.
*
* Only IO mappings and raw PFN mappings are allowed. The mmap semaphore
- * should be taken for read.
+ * should be taken for read, and the mmap semaphore cannot be released
+ * before the end() is invoked.
*
* This function must not be used to modify PTE content.
*
- * Return: zero on success, -ve otherwise.
+ * Return: zero on success, negative otherwise.
*/
-int follow_pte(struct vm_area_struct *vma, unsigned long address,
- pte_t **ptepp, spinlock_t **ptlp)
+int follow_pfnmap_start(struct follow_pfnmap_args *args)
{
+ struct vm_area_struct *vma = args->vma;
+ unsigned long address = args->address;
struct mm_struct *mm = vma->vm_mm;
- pgd_t *pgd;
- p4d_t *p4d;
- pud_t *pud;
- pmd_t *pmd;
- pte_t *ptep;
+ spinlock_t *lock;
+ pgd_t *pgdp;
+ p4d_t *p4dp, p4d;
+ pud_t *pudp, pud;
+ pmd_t *pmdp, pmd;
+ pte_t *ptep, pte;
+
+ pfnmap_lockdep_assert(vma);
- mmap_assert_locked(mm);
if (unlikely(address < vma->vm_start || address >= vma->vm_end))
goto out;
if (!(vma->vm_flags & (VM_IO | VM_PFNMAP)))
goto out;
-
- pgd = pgd_offset(mm, address);
- if (pgd_none(*pgd) || unlikely(pgd_bad(*pgd)))
+retry:
+ pgdp = pgd_offset(mm, address);
+ if (pgd_none(*pgdp) || unlikely(pgd_bad(*pgdp)))
goto out;
- p4d = p4d_offset(pgd, address);
- if (p4d_none(*p4d) || unlikely(p4d_bad(*p4d)))
+ p4dp = p4d_offset(pgdp, address);
+ p4d = READ_ONCE(*p4dp);
+ if (p4d_none(p4d) || unlikely(p4d_bad(p4d)))
goto out;
- pud = pud_offset(p4d, address);
- if (pud_none(*pud) || unlikely(pud_bad(*pud)))
+ pudp = pud_offset(p4dp, address);
+ pud = READ_ONCE(*pudp);
+ if (pud_none(pud))
goto out;
+ if (pud_leaf(pud)) {
+ lock = pud_lock(mm, pudp);
+ if (!unlikely(pud_leaf(pud))) {
+ spin_unlock(lock);
+ goto retry;
+ }
+ pfnmap_args_setup(args, lock, NULL, pud_pgprot(pud),
+ pud_pfn(pud), PUD_MASK, pud_write(pud),
+ pud_special(pud));
+ return 0;
+ }
- pmd = pmd_offset(pud, address);
- VM_BUG_ON(pmd_trans_huge(*pmd));
+ pmdp = pmd_offset(pudp, address);
+ pmd = pmdp_get_lockless(pmdp);
+ if (pmd_leaf(pmd)) {
+ lock = pmd_lock(mm, pmdp);
+ if (!unlikely(pmd_leaf(pmd))) {
+ spin_unlock(lock);
+ goto retry;
+ }
+ pfnmap_args_setup(args, lock, NULL, pmd_pgprot(pmd),
+ pmd_pfn(pmd), PMD_MASK, pmd_write(pmd),
+ pmd_special(pmd));
+ return 0;
+ }
- ptep = pte_offset_map_lock(mm, pmd, address, ptlp);
+ ptep = pte_offset_map_lock(mm, pmdp, address, &lock);
if (!ptep)
goto out;
- if (!pte_present(ptep_get(ptep)))
+ pte = ptep_get(ptep);
+ if (!pte_present(pte))
goto unlock;
- *ptepp = ptep;
+ pfnmap_args_setup(args, lock, ptep, pte_pgprot(pte),
+ pte_pfn(pte), PAGE_MASK, pte_write(pte),
+ pte_special(pte));
return 0;
unlock:
- pte_unmap_unlock(ptep, *ptlp);
+ pte_unmap_unlock(ptep, lock);
out:
return -EINVAL;
}
-EXPORT_SYMBOL_GPL(follow_pte);
+EXPORT_SYMBOL_GPL(follow_pfnmap_start);
+
+/**
+ * follow_pfnmap_end(): End a follow_pfnmap_start() process
+ * @args: Pointer to struct @follow_pfnmap_args
+ *
+ * Must be used in pair of follow_pfnmap_start(). See the start() function
+ * above for more information.
+ */
+void follow_pfnmap_end(struct follow_pfnmap_args *args)
+{
+ if (args->lock)
+ spin_unlock(args->lock);
+ if (args->ptep)
+ pte_unmap(args->ptep);
+}
+EXPORT_SYMBOL_GPL(follow_pfnmap_end);
#ifdef CONFIG_HAVE_IOREMAP_PROT
/**
@@ -6202,34 +6498,34 @@ int generic_access_phys(struct vm_area_struct *vma, unsigned long addr,
resource_size_t phys_addr;
unsigned long prot = 0;
void __iomem *maddr;
- pte_t *ptep, pte;
- spinlock_t *ptl;
int offset = offset_in_page(addr);
int ret = -EINVAL;
+ bool writable;
+ struct follow_pfnmap_args args = { .vma = vma, .address = addr };
retry:
- if (follow_pte(vma, addr, &ptep, &ptl))
+ if (follow_pfnmap_start(&args))
return -EINVAL;
- pte = ptep_get(ptep);
- pte_unmap_unlock(ptep, ptl);
-
- prot = pgprot_val(pte_pgprot(pte));
- phys_addr = (resource_size_t)pte_pfn(pte) << PAGE_SHIFT;
+ prot = pgprot_val(args.pgprot);
+ phys_addr = (resource_size_t)args.pfn << PAGE_SHIFT;
+ writable = args.writable;
+ follow_pfnmap_end(&args);
- if ((write & FOLL_WRITE) && !pte_write(pte))
+ if ((write & FOLL_WRITE) && !writable)
return -EINVAL;
maddr = ioremap_prot(phys_addr, PAGE_ALIGN(len + offset), prot);
if (!maddr)
return -ENOMEM;
- if (follow_pte(vma, addr, &ptep, &ptl))
+ if (follow_pfnmap_start(&args))
goto out_unmap;
- if (!pte_same(pte, ptep_get(ptep))) {
- pte_unmap_unlock(ptep, ptl);
+ if ((prot != pgprot_val(args.pgprot)) ||
+ (phys_addr != (args.pfn << PAGE_SHIFT)) ||
+ (writable != args.writable)) {
+ follow_pfnmap_end(&args);
iounmap(maddr);
-
goto retry;
}
@@ -6238,7 +6534,7 @@ retry:
else
memcpy_fromio(buf, maddr + offset, len);
ret = len;
- pte_unmap_unlock(ptep, ptl);
+ follow_pfnmap_end(&args);
out_unmap:
iounmap(maddr);
@@ -6589,7 +6885,7 @@ long copy_folio_from_user(struct folio *dst_folio,
}
#endif /* CONFIG_TRANSPARENT_HUGEPAGE || CONFIG_HUGETLBFS */
-#if USE_SPLIT_PTE_PTLOCKS && ALLOC_SPLIT_PTLOCKS
+#if defined(CONFIG_SPLIT_PTE_PTLOCKS) && ALLOC_SPLIT_PTLOCKS
static struct kmem_cache *page_ptl_cachep;
diff --git a/mm/memory_hotplug.c b/mm/memory_hotplug.c
index 951878ab627a..621ae1015106 100644
--- a/mm/memory_hotplug.c
+++ b/mm/memory_hotplug.c
@@ -366,7 +366,7 @@ struct page *pfn_to_online_page(unsigned long pfn)
}
EXPORT_SYMBOL_GPL(pfn_to_online_page);
-int __ref __add_pages(int nid, unsigned long pfn, unsigned long nr_pages,
+int __add_pages(int nid, unsigned long pfn, unsigned long nr_pages,
struct mhp_params *params)
{
const unsigned long end_pfn = pfn + nr_pages;
@@ -524,7 +524,7 @@ static void update_pgdat_span(struct pglist_data *pgdat)
pgdat->node_spanned_pages = node_end_pfn - node_start_pfn;
}
-void __ref remove_pfn_range_from_zone(struct zone *zone,
+void remove_pfn_range_from_zone(struct zone *zone,
unsigned long start_pfn,
unsigned long nr_pages)
{
@@ -629,7 +629,7 @@ int restore_online_page_callback(online_page_callback_t callback)
EXPORT_SYMBOL_GPL(restore_online_page_callback);
/* we are OK calling __meminit stuff here - we have CONFIG_MEMORY_HOTPLUG */
-void __ref generic_online_page(struct page *page, unsigned int order)
+void generic_online_page(struct page *page, unsigned int order)
{
__free_pages_core(page, order, MEMINIT_HOTPLUG);
}
@@ -741,7 +741,7 @@ static inline void section_taint_zone_device(unsigned long pfn)
* (usually MIGRATE_MOVABLE). Besides setting the migratetype, no related
* zone stats (e.g., nr_isolate_pageblock) are touched.
*/
-void __ref move_pfn_range_to_zone(struct zone *zone, unsigned long start_pfn,
+void move_pfn_range_to_zone(struct zone *zone, unsigned long start_pfn,
unsigned long nr_pages,
struct vmem_altmap *altmap, int migratetype)
{
@@ -1143,7 +1143,7 @@ void mhp_deinit_memmap_on_memory(unsigned long pfn, unsigned long nr_pages)
/*
* Must be called with mem_hotplug_lock in write mode.
*/
-int __ref online_pages(unsigned long pfn, unsigned long nr_pages,
+int online_pages(unsigned long pfn, unsigned long nr_pages,
struct zone *zone, struct memory_group *group)
{
unsigned long flags;
@@ -1233,7 +1233,7 @@ failed_addition:
}
/* we are OK calling __meminit stuff here - we have CONFIG_MEMORY_HOTPLUG */
-static pg_data_t __ref *hotadd_init_pgdat(int nid)
+static pg_data_t *hotadd_init_pgdat(int nid)
{
struct pglist_data *pgdat;
@@ -1386,7 +1386,7 @@ bool mhp_supports_memmap_on_memory(void)
}
EXPORT_SYMBOL_GPL(mhp_supports_memmap_on_memory);
-static void __ref remove_memory_blocks_and_altmaps(u64 start, u64 size)
+static void remove_memory_blocks_and_altmaps(u64 start, u64 size)
{
unsigned long memblock_size = memory_block_size_bytes();
u64 cur_start;
@@ -1473,7 +1473,7 @@ out:
*
* we are OK calling __meminit stuff here - we have CONFIG_MEMORY_HOTPLUG
*/
-int __ref add_memory_resource(int nid, struct resource *res, mhp_t mhp_flags)
+int add_memory_resource(int nid, struct resource *res, mhp_t mhp_flags)
{
struct mhp_params params = { .pgprot = pgprot_mhp(PAGE_KERNEL) };
enum memblock_flags memblock_flags = MEMBLOCK_NONE;
@@ -1580,7 +1580,7 @@ error_mem_hotplug_end:
}
/* requires device_hotplug_lock, see add_memory_resource() */
-int __ref __add_memory(int nid, u64 start, u64 size, mhp_t mhp_flags)
+int __add_memory(int nid, u64 start, u64 size, mhp_t mhp_flags)
{
struct resource *res;
int ret;
@@ -1772,67 +1772,59 @@ found:
static void do_migrate_range(unsigned long start_pfn, unsigned long end_pfn)
{
+ struct folio *folio;
unsigned long pfn;
- struct page *page, *head;
LIST_HEAD(source);
static DEFINE_RATELIMIT_STATE(migrate_rs, DEFAULT_RATELIMIT_INTERVAL,
DEFAULT_RATELIMIT_BURST);
for (pfn = start_pfn; pfn < end_pfn; pfn++) {
- struct folio *folio;
- bool isolated;
+ struct page *page;
if (!pfn_valid(pfn))
continue;
page = pfn_to_page(pfn);
folio = page_folio(page);
- head = &folio->page;
- if (PageHuge(page)) {
- pfn = page_to_pfn(head) + compound_nr(head) - 1;
- isolate_hugetlb(folio, &source);
- continue;
- } else if (PageTransHuge(page))
- pfn = page_to_pfn(head) + thp_nr_pages(page) - 1;
+ /*
+ * No reference or lock is held on the folio, so it might
+ * be modified concurrently (e.g. split). As such,
+ * folio_nr_pages() may read garbage. This is fine as the outer
+ * loop will revisit the split folio later.
+ */
+ if (folio_test_large(folio))
+ pfn = folio_pfn(folio) + folio_nr_pages(folio) - 1;
/*
* HWPoison pages have elevated reference counts so the migration would
* fail on them. It also doesn't make any sense to migrate them in the
* first place. Still try to unmap such a page in case it is still mapped
- * (e.g. current hwpoison implementation doesn't unmap KSM pages but keep
- * the unmap as the catch all safety net).
+ * (keep the unmap as the catch all safety net).
*/
- if (PageHWPoison(page)) {
+ if (folio_test_hwpoison(folio) ||
+ (folio_test_large(folio) && folio_test_has_hwpoisoned(folio))) {
if (WARN_ON(folio_test_lru(folio)))
folio_isolate_lru(folio);
if (folio_mapped(folio))
- try_to_unmap(folio, TTU_IGNORE_MLOCK);
+ unmap_poisoned_folio(folio, TTU_IGNORE_MLOCK);
continue;
}
- if (!get_page_unless_zero(page))
+ if (!folio_try_get(folio))
continue;
- /*
- * We can skip free pages. And we can deal with pages on
- * LRU and non-lru movable pages.
- */
- if (PageLRU(page))
- isolated = isolate_lru_page(page);
- else
- isolated = isolate_movable_page(page, ISOLATE_UNEVICTABLE);
- if (isolated) {
- list_add_tail(&page->lru, &source);
- if (!__PageMovable(page))
- inc_node_page_state(page, NR_ISOLATED_ANON +
- page_is_file_lru(page));
- } else {
+ if (unlikely(page_folio(page) != folio))
+ goto put_folio;
+
+ if (!isolate_folio_to_list(folio, &source)) {
if (__ratelimit(&migrate_rs)) {
- pr_warn("failed to isolate pfn %lx\n", pfn);
+ pr_warn("failed to isolate pfn %lx\n",
+ page_to_pfn(page));
dump_page(page, "isolation failed");
}
}
- put_page(page);
+put_folio:
+ folio_put(folio);
}
if (!list_empty(&source)) {
nodemask_t nmask = node_states[N_MEMORY];
@@ -1847,7 +1839,7 @@ static void do_migrate_range(unsigned long start_pfn, unsigned long end_pfn)
* We have checked that migration range is on a single zone so
* we can use the nid of the first page to all the others.
*/
- mtc.nid = page_to_nid(list_first_entry(&source, struct page, lru));
+ mtc.nid = folio_nid(list_first_entry(&source, struct folio, lru));
/*
* try to allocate from a different node but reuse this node
@@ -1860,11 +1852,12 @@ static void do_migrate_range(unsigned long start_pfn, unsigned long end_pfn)
ret = migrate_pages(&source, alloc_migration_target, NULL,
(unsigned long)&mtc, MIGRATE_SYNC, MR_MEMORY_HOTPLUG, NULL);
if (ret) {
- list_for_each_entry(page, &source, lru) {
+ list_for_each_entry(folio, &source, lru) {
if (__ratelimit(&migrate_rs)) {
pr_warn("migrating pfn %lx failed ret:%d\n",
- page_to_pfn(page), ret);
- dump_page(page, "migration failure");
+ folio_pfn(folio), ret);
+ dump_page(&folio->page,
+ "migration failure");
}
}
putback_movable_pages(&source);
@@ -1939,7 +1932,7 @@ static int count_system_ram_pages_cb(unsigned long start_pfn,
/*
* Must be called with mem_hotplug_lock in write mode.
*/
-int __ref offline_pages(unsigned long start_pfn, unsigned long nr_pages,
+int offline_pages(unsigned long start_pfn, unsigned long nr_pages,
struct zone *zone, struct memory_group *group)
{
const unsigned long end_pfn = start_pfn + nr_pages;
@@ -2240,7 +2233,7 @@ static int memory_blocks_have_altmaps(u64 start, u64 size)
return 1;
}
-static int __ref try_remove_memory(u64 start, u64 size)
+static int try_remove_memory(u64 start, u64 size)
{
int rc, nid = NUMA_NO_NODE;
diff --git a/mm/mempolicy.c b/mm/mempolicy.c
index b858e22b259d..b646fab3e45e 100644
--- a/mm/mempolicy.c
+++ b/mm/mempolicy.c
@@ -676,8 +676,10 @@ unsigned long change_prot_numa(struct vm_area_struct *vma,
tlb_gather_mmu(&tlb, vma->vm_mm);
nr_updated = change_protection(&tlb, vma, addr, end, MM_CP_PROT_NUMA);
- if (nr_updated > 0)
+ if (nr_updated > 0) {
count_vm_numa_events(NUMA_PTE_UPDATES, nr_updated);
+ count_memcg_events_mm(vma->vm_mm, NUMA_PTE_UPDATES, nr_updated);
+ }
tlb_finish_mmu(&tlb);
@@ -1951,7 +1953,7 @@ unsigned int mempolicy_slab_node(void)
zonelist = &NODE_DATA(node)->node_zonelists[ZONELIST_FALLBACK];
z = first_zones_zonelist(zonelist, highest_zoneidx,
&policy->nodes);
- return z->zone ? zone_to_nid(z->zone) : node;
+ return zonelist_zone(z) ? zonelist_node_idx(z) : node;
}
case MPOL_LOCAL:
return node;
@@ -2809,7 +2811,7 @@ int mpol_misplaced(struct folio *folio, struct vm_fault *vmf,
node_zonelist(thisnid, GFP_HIGHUSER),
gfp_zone(GFP_HIGHUSER),
&pol->nodes);
- polnid = zone_to_nid(z->zone);
+ polnid = zonelist_node_idx(z);
break;
default:
diff --git a/mm/migrate.c b/mm/migrate.c
index 923ea80ba744..df91248755e4 100644
--- a/mm/migrate.c
+++ b/mm/migrate.c
@@ -20,7 +20,6 @@
#include <linux/pagemap.h>
#include <linux/buffer_head.h>
#include <linux/mm_inline.h>
-#include <linux/nsproxy.h>
#include <linux/ksm.h>
#include <linux/rmap.h>
#include <linux/topology.h>
@@ -35,21 +34,16 @@
#include <linux/syscalls.h>
#include <linux/compat.h>
#include <linux/hugetlb.h>
-#include <linux/hugetlb_cgroup.h>
#include <linux/gfp.h>
#include <linux/pfn_t.h>
-#include <linux/memremap.h>
-#include <linux/userfaultfd_k.h>
-#include <linux/balloon_compaction.h>
#include <linux/page_idle.h>
#include <linux/page_owner.h>
#include <linux/sched/mm.h>
#include <linux/ptrace.h>
-#include <linux/oom.h>
#include <linux/memory.h>
-#include <linux/random.h>
#include <linux/sched/sysctl.h>
#include <linux/memory-tiers.h>
+#include <linux/pagewalk.h>
#include <asm/tlbflush.h>
@@ -177,13 +171,83 @@ void putback_movable_pages(struct list_head *l)
}
}
+/* Must be called with an elevated refcount on the non-hugetlb folio */
+bool isolate_folio_to_list(struct folio *folio, struct list_head *list)
+{
+ bool isolated, lru;
+
+ if (folio_test_hugetlb(folio))
+ return isolate_hugetlb(folio, list);
+
+ lru = !__folio_test_movable(folio);
+ if (lru)
+ isolated = folio_isolate_lru(folio);
+ else
+ isolated = isolate_movable_page(&folio->page,
+ ISOLATE_UNEVICTABLE);
+
+ if (!isolated)
+ return false;
+
+ list_add(&folio->lru, list);
+ if (lru)
+ node_stat_add_folio(folio, NR_ISOLATED_ANON +
+ folio_is_file_lru(folio));
+
+ return true;
+}
+
+static bool try_to_map_unused_to_zeropage(struct page_vma_mapped_walk *pvmw,
+ struct folio *folio,
+ unsigned long idx)
+{
+ struct page *page = folio_page(folio, idx);
+ bool contains_data;
+ pte_t newpte;
+ void *addr;
+
+ VM_BUG_ON_PAGE(PageCompound(page), page);
+ VM_BUG_ON_PAGE(!PageAnon(page), page);
+ VM_BUG_ON_PAGE(!PageLocked(page), page);
+ VM_BUG_ON_PAGE(pte_present(*pvmw->pte), page);
+
+ if (folio_test_mlocked(folio) || (pvmw->vma->vm_flags & VM_LOCKED) ||
+ mm_forbids_zeropage(pvmw->vma->vm_mm))
+ return false;
+
+ /*
+ * The pmd entry mapping the old thp was flushed and the pte mapping
+ * this subpage has been non present. If the subpage is only zero-filled
+ * then map it to the shared zeropage.
+ */
+ addr = kmap_local_page(page);
+ contains_data = memchr_inv(addr, 0, PAGE_SIZE);
+ kunmap_local(addr);
+
+ if (contains_data)
+ return false;
+
+ newpte = pte_mkspecial(pfn_pte(my_zero_pfn(pvmw->address),
+ pvmw->vma->vm_page_prot));
+ set_pte_at(pvmw->vma->vm_mm, pvmw->address, pvmw->pte, newpte);
+
+ dec_mm_counter(pvmw->vma->vm_mm, mm_counter(folio));
+ return true;
+}
+
+struct rmap_walk_arg {
+ struct folio *folio;
+ bool map_unused_to_zeropage;
+};
+
/*
* Restore a potential migration pte to a working pte entry
*/
static bool remove_migration_pte(struct folio *folio,
- struct vm_area_struct *vma, unsigned long addr, void *old)
+ struct vm_area_struct *vma, unsigned long addr, void *arg)
{
- DEFINE_FOLIO_VMA_WALK(pvmw, old, vma, addr, PVMW_SYNC | PVMW_MIGRATION);
+ struct rmap_walk_arg *rmap_walk_arg = arg;
+ DEFINE_FOLIO_VMA_WALK(pvmw, rmap_walk_arg->folio, vma, addr, PVMW_SYNC | PVMW_MIGRATION);
while (page_vma_mapped_walk(&pvmw)) {
rmap_t rmap_flags = RMAP_NONE;
@@ -207,6 +271,9 @@ static bool remove_migration_pte(struct folio *folio,
continue;
}
#endif
+ if (rmap_walk_arg->map_unused_to_zeropage &&
+ try_to_map_unused_to_zeropage(&pvmw, folio, idx))
+ continue;
folio_get(folio);
pte = mk_pte(new, READ_ONCE(vma->vm_page_prot));
@@ -285,14 +352,21 @@ static bool remove_migration_pte(struct folio *folio,
* Get rid of all migration entries and replace them by
* references to the indicated page.
*/
-void remove_migration_ptes(struct folio *src, struct folio *dst, bool locked)
+void remove_migration_ptes(struct folio *src, struct folio *dst, int flags)
{
+ struct rmap_walk_arg rmap_walk_arg = {
+ .folio = src,
+ .map_unused_to_zeropage = flags & RMP_USE_SHARED_ZEROPAGE,
+ };
+
struct rmap_walk_control rwc = {
.rmap_one = remove_migration_pte,
- .arg = src,
+ .arg = &rmap_walk_arg,
};
- if (locked)
+ VM_BUG_ON_FOLIO((flags & RMP_USE_SHARED_ZEROPAGE) && (src != dst), src);
+
+ if (flags & RMP_LOCKED)
rmap_walk_locked(dst, &rwc);
else
rmap_walk(dst, &rwc);
@@ -422,6 +496,8 @@ static int __folio_migrate_mapping(struct address_space *mapping,
/* No turning back from here */
newfolio->index = folio->index;
newfolio->mapping = folio->mapping;
+ if (folio_test_anon(folio) && folio_test_large(folio))
+ mod_mthp_stat(folio_order(folio), MTHP_STAT_NR_ANON, 1);
if (folio_test_swapbacked(folio))
__folio_set_swapbacked(newfolio);
@@ -446,6 +522,8 @@ static int __folio_migrate_mapping(struct address_space *mapping,
*/
newfolio->index = folio->index;
newfolio->mapping = folio->mapping;
+ if (folio_test_anon(folio) && folio_test_large(folio))
+ mod_mthp_stat(folio_order(folio), MTHP_STAT_NR_ANON, 1);
folio_ref_add(newfolio, nr); /* add cache reference */
if (folio_test_swapbacked(folio)) {
__folio_set_swapbacked(newfolio);
@@ -585,8 +663,6 @@ void folio_migrate_flags(struct folio *newfolio, struct folio *folio)
{
int cpupid;
- if (folio_test_error(folio))
- folio_set_error(newfolio);
if (folio_test_referenced(folio))
folio_set_referenced(newfolio);
if (folio_test_uptodate(folio))
@@ -640,7 +716,8 @@ void folio_migrate_flags(struct folio *newfolio, struct folio *folio)
folio_migrate_ksm(newfolio, folio);
/*
* Please do not reorder this without considering how mm/ksm.c's
- * ksm_get_folio() depends upon ksm_migrate_page() and PageSwapCache().
+ * ksm_get_folio() depends upon ksm_migrate_page() and the
+ * swapcache flag.
*/
if (folio_test_swapcache(folio))
folio_clear_swapcache(folio);
@@ -666,6 +743,7 @@ void folio_migrate_flags(struct folio *newfolio, struct folio *folio)
folio_set_readahead(newfolio);
folio_copy_owner(newfolio, folio);
+ pgalloc_tag_copy(newfolio, folio);
mem_cgroup_migrate(folio, newfolio);
}
@@ -904,7 +982,7 @@ static int writeout(struct address_space *mapping, struct folio *folio)
* At this point we know that the migration attempt cannot
* be successful.
*/
- remove_migration_ptes(folio, folio, false);
+ remove_migration_ptes(folio, folio, 0);
rc = mapping->a_ops->writepage(&folio->page, &wbc);
@@ -1068,7 +1146,7 @@ static void migrate_folio_undo_src(struct folio *src,
struct list_head *ret)
{
if (page_was_mapped)
- remove_migration_ptes(src, src, false);
+ remove_migration_ptes(src, src, 0);
/* Drop an anon_vma reference if we took one */
if (anon_vma)
put_anon_vma(anon_vma);
@@ -1118,7 +1196,7 @@ static int migrate_folio_unmap(new_folio_t get_new_folio,
int rc = -EAGAIN;
int old_page_state = 0;
struct anon_vma *anon_vma = NULL;
- bool is_lru = !__folio_test_movable(src);
+ bool is_lru = data_race(!__folio_test_movable(src));
bool locked = false;
bool dst_locked = false;
@@ -1306,7 +1384,7 @@ static int migrate_folio_move(free_folio_t put_new_folio, unsigned long private,
lru_add_drain();
if (old_page_state & PAGE_WAS_MAPPED)
- remove_migration_ptes(src, dst, false);
+ remove_migration_ptes(src, dst, 0);
out_unlock_both:
folio_unlock(dst);
@@ -1444,7 +1522,7 @@ static int unmap_and_move_huge_page(new_folio_t get_new_folio,
if (page_was_mapped)
remove_migration_ptes(src,
- rc == MIGRATEPAGE_SUCCESS ? dst : src, false);
+ rc == MIGRATEPAGE_SUCCESS ? dst : src, 0);
unlock_put_anon:
folio_unlock(dst);
@@ -1682,7 +1760,8 @@ static int migrate_pages_batch(struct list_head *from,
* use _deferred_list.
*/
if (nr_pages > 2 &&
- !list_empty(&folio->_deferred_list)) {
+ !list_empty(&folio->_deferred_list) &&
+ folio_test_partially_mapped(folio)) {
if (!try_split_folio(folio, split_folios, mode)) {
nr_failed++;
stats->nr_thp_failed += is_thp;
@@ -2111,76 +2190,66 @@ static int do_move_pages_to_node(struct list_head *pagelist, int node)
return err;
}
+static int __add_folio_for_migration(struct folio *folio, int node,
+ struct list_head *pagelist, bool migrate_all)
+{
+ if (is_zero_folio(folio) || is_huge_zero_folio(folio))
+ return -EFAULT;
+
+ if (folio_is_zone_device(folio))
+ return -ENOENT;
+
+ if (folio_nid(folio) == node)
+ return 0;
+
+ if (folio_likely_mapped_shared(folio) && !migrate_all)
+ return -EACCES;
+
+ if (folio_test_hugetlb(folio)) {
+ if (isolate_hugetlb(folio, pagelist))
+ return 1;
+ } else if (folio_isolate_lru(folio)) {
+ list_add_tail(&folio->lru, pagelist);
+ node_stat_mod_folio(folio,
+ NR_ISOLATED_ANON + folio_is_file_lru(folio),
+ folio_nr_pages(folio));
+ return 1;
+ }
+ return -EBUSY;
+}
+
/*
- * Resolves the given address to a struct page, isolates it from the LRU and
+ * Resolves the given address to a struct folio, isolates it from the LRU and
* puts it to the given pagelist.
* Returns:
- * errno - if the page cannot be found/isolated
+ * errno - if the folio cannot be found/isolated
* 0 - when it doesn't have to be migrated because it is already on the
* target node
* 1 - when it has been queued
*/
-static int add_page_for_migration(struct mm_struct *mm, const void __user *p,
+static int add_folio_for_migration(struct mm_struct *mm, const void __user *p,
int node, struct list_head *pagelist, bool migrate_all)
{
struct vm_area_struct *vma;
- unsigned long addr;
- struct page *page;
+ struct folio_walk fw;
struct folio *folio;
- int err;
+ unsigned long addr;
+ int err = -EFAULT;
mmap_read_lock(mm);
addr = (unsigned long)untagged_addr_remote(mm, p);
- err = -EFAULT;
vma = vma_lookup(mm, addr);
- if (!vma || !vma_migratable(vma))
- goto out;
-
- /* FOLL_DUMP to ignore special (like zero) pages */
- page = follow_page(vma, addr, FOLL_GET | FOLL_DUMP);
-
- err = PTR_ERR(page);
- if (IS_ERR(page))
- goto out;
-
- err = -ENOENT;
- if (!page)
- goto out;
-
- folio = page_folio(page);
- if (folio_is_zone_device(folio))
- goto out_putfolio;
-
- err = 0;
- if (folio_nid(folio) == node)
- goto out_putfolio;
-
- err = -EACCES;
- if (folio_likely_mapped_shared(folio) && !migrate_all)
- goto out_putfolio;
-
- err = -EBUSY;
- if (folio_test_hugetlb(folio)) {
- if (isolate_hugetlb(folio, pagelist))
- err = 1;
- } else {
- if (!folio_isolate_lru(folio))
- goto out_putfolio;
-
- err = 1;
- list_add_tail(&folio->lru, pagelist);
- node_stat_mod_folio(folio,
- NR_ISOLATED_ANON + folio_is_file_lru(folio),
- folio_nr_pages(folio));
+ if (vma && vma_migratable(vma)) {
+ folio = folio_walk_start(&fw, vma, addr, FW_ZEROPAGE);
+ if (folio) {
+ err = __add_folio_for_migration(folio, node, pagelist,
+ migrate_all);
+ folio_walk_end(&fw, vma);
+ } else {
+ err = -ENOENT;
+ }
}
-out_putfolio:
- /*
- * Either remove the duplicate refcount from folio_isolate_lru()
- * or drop the folio ref if it was not isolated.
- */
- folio_put(folio);
-out:
mmap_read_unlock(mm);
return err;
}
@@ -2274,8 +2343,8 @@ static int do_pages_move(struct mm_struct *mm, nodemask_t task_nodes,
* Errors in the page lookup or isolation are not fatal and we simply
* report them via status
*/
- err = add_page_for_migration(mm, p, current_node, &pagelist,
- flags & MPOL_MF_MOVE_ALL);
+ err = add_folio_for_migration(mm, p, current_node, &pagelist,
+ flags & MPOL_MF_MOVE_ALL);
if (err > 0) {
/* The page is successfully queued for migration */
@@ -2331,28 +2400,26 @@ static void do_pages_stat_array(struct mm_struct *mm, unsigned long nr_pages,
for (i = 0; i < nr_pages; i++) {
unsigned long addr = (unsigned long)(*pages);
struct vm_area_struct *vma;
- struct page *page;
+ struct folio_walk fw;
+ struct folio *folio;
int err = -EFAULT;
vma = vma_lookup(mm, addr);
if (!vma)
goto set_status;
- /* FOLL_DUMP to ignore special (like zero) pages */
- page = follow_page(vma, addr, FOLL_GET | FOLL_DUMP);
-
- err = PTR_ERR(page);
- if (IS_ERR(page))
- goto set_status;
-
- err = -ENOENT;
- if (!page)
- goto set_status;
-
- if (!is_zone_device_page(page))
- err = page_to_nid(page);
-
- put_page(page);
+ folio = folio_walk_start(&fw, vma, addr, FW_ZEROPAGE);
+ if (folio) {
+ if (is_zero_folio(folio) || is_huge_zero_folio(folio))
+ err = -EFAULT;
+ else if (folio_is_zone_device(folio))
+ err = -ENOENT;
+ else
+ err = folio_nid(folio);
+ folio_walk_end(&fw, vma);
+ } else {
+ err = -ENOENT;
+ }
set_status:
*status = err;
@@ -2432,25 +2499,19 @@ static struct mm_struct *find_mm_struct(pid_t pid, nodemask_t *mem_nodes)
return current->mm;
}
- /* Find the mm_struct */
- rcu_read_lock();
- task = find_task_by_vpid(pid);
+ task = find_get_task_by_vpid(pid);
if (!task) {
- rcu_read_unlock();
return ERR_PTR(-ESRCH);
}
- get_task_struct(task);
/*
* Check if this process has the right to modify the specified
* process. Use the regular "ptrace_may_access()" checks.
*/
if (!ptrace_may_access(task, PTRACE_MODE_READ_REALCREDS)) {
- rcu_read_unlock();
mm = ERR_PTR(-EPERM);
goto out;
}
- rcu_read_unlock();
mm = ERR_PTR(security_task_movememory(task));
if (IS_ERR(mm))
@@ -2526,7 +2587,7 @@ static bool migrate_balanced_pgdat(struct pglist_data *pgdat,
if (!zone_watermark_ok(zone, 0,
high_wmark_pages(zone) +
nr_migrate_pages,
- ZONE_MOVABLE, 0))
+ ZONE_MOVABLE, ALLOC_CMA))
continue;
return true;
}
@@ -2627,6 +2688,8 @@ int migrate_misplaced_folio(struct folio *folio, struct vm_area_struct *vma,
int nr_remaining;
unsigned int nr_succeeded;
LIST_HEAD(migratepages);
+ struct mem_cgroup *memcg = get_mem_cgroup_from_folio(folio);
+ struct lruvec *lruvec = mem_cgroup_lruvec(memcg, pgdat);
list_add(&folio->lru, &migratepages);
nr_remaining = migrate_pages(&migratepages, alloc_misplaced_dst_folio,
@@ -2636,10 +2699,13 @@ int migrate_misplaced_folio(struct folio *folio, struct vm_area_struct *vma,
putback_movable_pages(&migratepages);
if (nr_succeeded) {
count_vm_numa_events(NUMA_PAGE_MIGRATE, nr_succeeded);
- if (!node_is_toptier(folio_nid(folio)) && node_is_toptier(node))
- mod_node_page_state(pgdat, PGPROMOTE_SUCCESS,
- nr_succeeded);
+ count_memcg_events(memcg, NUMA_PAGE_MIGRATE, nr_succeeded);
+ if ((sysctl_numa_balancing_mode & NUMA_BALANCING_MEMORY_TIERING)
+ && !node_is_toptier(folio_nid(folio))
+ && node_is_toptier(node))
+ mod_lruvec_state(lruvec, PGPROMOTE_SUCCESS, nr_succeeded);
}
+ mem_cgroup_put(memcg);
BUG_ON(!list_empty(&migratepages));
return nr_remaining ? -EAGAIN : 0;
}
diff --git a/mm/migrate_device.c b/mm/migrate_device.c
index 6d66dc1c6ffa..9cf26592ac93 100644
--- a/mm/migrate_device.c
+++ b/mm/migrate_device.c
@@ -328,8 +328,8 @@ static bool migrate_vma_check_page(struct page *page, struct page *fault_page)
/*
* One extra ref because caller holds an extra reference, either from
- * isolate_lru_page() for a regular page, or migrate_vma_collect() for
- * a device page.
+ * folio_isolate_lru() for a regular folio, or migrate_vma_collect() for
+ * a device folio.
*/
int extra = 1 + (page == fault_page);
@@ -379,33 +379,33 @@ static unsigned long migrate_device_unmap(unsigned long *src_pfns,
continue;
}
- /* ZONE_DEVICE pages are not on LRU */
- if (!is_zone_device_page(page)) {
- if (!PageLRU(page) && allow_drain) {
+ folio = page_folio(page);
+ /* ZONE_DEVICE folios are not on LRU */
+ if (!folio_is_zone_device(folio)) {
+ if (!folio_test_lru(folio) && allow_drain) {
/* Drain CPU's lru cache */
lru_add_drain_all();
allow_drain = false;
}
- if (!isolate_lru_page(page)) {
+ if (!folio_isolate_lru(folio)) {
src_pfns[i] &= ~MIGRATE_PFN_MIGRATE;
restore++;
continue;
}
/* Drop the reference we took in collect */
- put_page(page);
+ folio_put(folio);
}
- folio = page_folio(page);
if (folio_mapped(folio))
try_to_migrate(folio, 0);
- if (page_mapped(page) ||
+ if (folio_mapped(folio) ||
!migrate_vma_check_page(page, fault_page)) {
- if (!is_zone_device_page(page)) {
- get_page(page);
- putback_lru_page(page);
+ if (!folio_is_zone_device(folio)) {
+ folio_get(folio);
+ folio_putback_lru(folio);
}
src_pfns[i] &= ~MIGRATE_PFN_MIGRATE;
@@ -424,7 +424,7 @@ static unsigned long migrate_device_unmap(unsigned long *src_pfns,
continue;
folio = page_folio(page);
- remove_migration_ptes(folio, folio, false);
+ remove_migration_ptes(folio, folio, 0);
src_pfns[i] = 0;
folio_unlock(folio);
@@ -708,7 +708,7 @@ static void __migrate_device_pages(unsigned long *src_pfns,
/*
* The only time there is no vma is when called from
- * migrate_device_coherent_page(). However this isn't
+ * migrate_device_coherent_folio(). However this isn't
* called if the page could not be unmapped.
*/
VM_BUG_ON(!migrate);
@@ -815,42 +815,45 @@ void migrate_device_finalize(unsigned long *src_pfns,
unsigned long i;
for (i = 0; i < npages; i++) {
- struct folio *dst, *src;
+ struct folio *dst = NULL, *src = NULL;
struct page *newpage = migrate_pfn_to_page(dst_pfns[i]);
struct page *page = migrate_pfn_to_page(src_pfns[i]);
+ if (newpage)
+ dst = page_folio(newpage);
+
if (!page) {
- if (newpage) {
- unlock_page(newpage);
- put_page(newpage);
+ if (dst) {
+ folio_unlock(dst);
+ folio_put(dst);
}
continue;
}
- if (!(src_pfns[i] & MIGRATE_PFN_MIGRATE) || !newpage) {
- if (newpage) {
- unlock_page(newpage);
- put_page(newpage);
+ src = page_folio(page);
+
+ if (!(src_pfns[i] & MIGRATE_PFN_MIGRATE) || !dst) {
+ if (dst) {
+ folio_unlock(dst);
+ folio_put(dst);
}
- newpage = page;
+ dst = src;
}
- src = page_folio(page);
- dst = page_folio(newpage);
- remove_migration_ptes(src, dst, false);
+ remove_migration_ptes(src, dst, 0);
folio_unlock(src);
- if (is_zone_device_page(page))
- put_page(page);
+ if (folio_is_zone_device(src))
+ folio_put(src);
else
- putback_lru_page(page);
+ folio_putback_lru(src);
- if (newpage != page) {
- unlock_page(newpage);
- if (is_zone_device_page(newpage))
- put_page(newpage);
+ if (dst != src) {
+ folio_unlock(dst);
+ if (folio_is_zone_device(dst))
+ folio_put(dst);
else
- putback_lru_page(newpage);
+ folio_putback_lru(dst);
}
}
}
@@ -898,16 +901,17 @@ int migrate_device_range(unsigned long *src_pfns, unsigned long start,
unsigned long i, pfn;
for (pfn = start, i = 0; i < npages; pfn++, i++) {
- struct page *page = pfn_to_page(pfn);
+ struct folio *folio;
- if (!get_page_unless_zero(page)) {
+ folio = folio_get_nontail_page(pfn_to_page(pfn));
+ if (!folio) {
src_pfns[i] = 0;
continue;
}
- if (!trylock_page(page)) {
+ if (!folio_trylock(folio)) {
src_pfns[i] = 0;
- put_page(page);
+ folio_put(folio);
continue;
}
@@ -921,38 +925,38 @@ int migrate_device_range(unsigned long *src_pfns, unsigned long start,
EXPORT_SYMBOL(migrate_device_range);
/*
- * Migrate a device coherent page back to normal memory. The caller should have
- * a reference on page which will be copied to the new page if migration is
+ * Migrate a device coherent folio back to normal memory. The caller should have
+ * a reference on folio which will be copied to the new folio if migration is
* successful or dropped on failure.
*/
-int migrate_device_coherent_page(struct page *page)
+int migrate_device_coherent_folio(struct folio *folio)
{
unsigned long src_pfn, dst_pfn = 0;
- struct page *dpage;
+ struct folio *dfolio;
- WARN_ON_ONCE(PageCompound(page));
+ WARN_ON_ONCE(folio_test_large(folio));
- lock_page(page);
- src_pfn = migrate_pfn(page_to_pfn(page)) | MIGRATE_PFN_MIGRATE;
+ folio_lock(folio);
+ src_pfn = migrate_pfn(folio_pfn(folio)) | MIGRATE_PFN_MIGRATE;
/*
* We don't have a VMA and don't need to walk the page tables to find
- * the source page. So call migrate_vma_unmap() directly to unmap the
- * page as migrate_vma_setup() will fail if args.vma == NULL.
+ * the source folio. So call migrate_vma_unmap() directly to unmap the
+ * folio as migrate_vma_setup() will fail if args.vma == NULL.
*/
migrate_device_unmap(&src_pfn, 1, NULL);
if (!(src_pfn & MIGRATE_PFN_MIGRATE))
return -EBUSY;
- dpage = alloc_page(GFP_USER | __GFP_NOWARN);
- if (dpage) {
- lock_page(dpage);
- dst_pfn = migrate_pfn(page_to_pfn(dpage));
+ dfolio = folio_alloc(GFP_USER | __GFP_NOWARN, 0);
+ if (dfolio) {
+ folio_lock(dfolio);
+ dst_pfn = migrate_pfn(folio_pfn(dfolio));
}
migrate_device_pages(&src_pfn, &dst_pfn, 1);
if (src_pfn & MIGRATE_PFN_MIGRATE)
- copy_highpage(dpage, page);
+ folio_copy(dfolio, folio);
migrate_device_finalize(&src_pfn, &dst_pfn, 1);
if (src_pfn & MIGRATE_PFN_MIGRATE)
diff --git a/mm/mm_init.c b/mm/mm_init.c
index 51960079875b..4ba5607aaf19 100644
--- a/mm/mm_init.c
+++ b/mm/mm_init.c
@@ -1835,14 +1835,8 @@ void __init free_area_init(unsigned long *max_zone_pfn)
for_each_node(nid) {
pg_data_t *pgdat;
- if (!node_online(nid)) {
- /* Allocator not initialized yet */
- pgdat = arch_alloc_nodedata(nid);
- if (!pgdat)
- panic("Cannot allocate %zuB for node %d.\n",
- sizeof(*pgdat), nid);
- arch_refresh_nodedata(nid, pgdat);
- }
+ if (!node_online(nid))
+ alloc_offline_node_data(nid);
pgdat = NODE_DATA(nid);
free_area_init_node(nid);
@@ -1939,7 +1933,7 @@ static void __init deferred_free_pages(unsigned long pfn,
}
/* Accept chunks smaller than MAX_PAGE_ORDER upfront */
- accept_memory(PFN_PHYS(pfn), PFN_PHYS(pfn + nr_pages));
+ accept_memory(PFN_PHYS(pfn), nr_pages * PAGE_SIZE);
for (i = 0; i < nr_pages; i++, page++, pfn++) {
if (pageblock_aligned(pfn))
diff --git a/mm/mmap.c b/mm/mmap.c
index 6ddb278a5ee8..dd4b35a25aeb 100644
--- a/mm/mmap.c
+++ b/mm/mmap.c
@@ -76,16 +76,6 @@ int mmap_rnd_compat_bits __read_mostly = CONFIG_ARCH_MMAP_RND_COMPAT_BITS;
static bool ignore_rlimit_data;
core_param(ignore_rlimit_data, ignore_rlimit_data, bool, 0644);
-static void unmap_region(struct mm_struct *mm, struct ma_state *mas,
- struct vm_area_struct *vma, struct vm_area_struct *prev,
- struct vm_area_struct *next, unsigned long start,
- unsigned long end, unsigned long tree_end, bool mm_wr_locked);
-
-static pgprot_t vm_pgprot_modify(pgprot_t oldprot, unsigned long vm_flags)
-{
- return pgprot_modify(oldprot, vm_get_page_prot(vm_flags));
-}
-
/* Update vma->vm_page_prot to reflect vma->vm_flags. */
void vma_set_page_prot(struct vm_area_struct *vma)
{
@@ -102,100 +92,6 @@ void vma_set_page_prot(struct vm_area_struct *vma)
}
/*
- * Requires inode->i_mapping->i_mmap_rwsem
- */
-static void __remove_shared_vm_struct(struct vm_area_struct *vma,
- struct address_space *mapping)
-{
- if (vma_is_shared_maywrite(vma))
- mapping_unmap_writable(mapping);
-
- flush_dcache_mmap_lock(mapping);
- vma_interval_tree_remove(vma, &mapping->i_mmap);
- flush_dcache_mmap_unlock(mapping);
-}
-
-/*
- * Unlink a file-based vm structure from its interval tree, to hide
- * vma from rmap and vmtruncate before freeing its page tables.
- */
-void unlink_file_vma(struct vm_area_struct *vma)
-{
- struct file *file = vma->vm_file;
-
- if (file) {
- struct address_space *mapping = file->f_mapping;
- i_mmap_lock_write(mapping);
- __remove_shared_vm_struct(vma, mapping);
- i_mmap_unlock_write(mapping);
- }
-}
-
-void unlink_file_vma_batch_init(struct unlink_vma_file_batch *vb)
-{
- vb->count = 0;
-}
-
-static void unlink_file_vma_batch_process(struct unlink_vma_file_batch *vb)
-{
- struct address_space *mapping;
- int i;
-
- mapping = vb->vmas[0]->vm_file->f_mapping;
- i_mmap_lock_write(mapping);
- for (i = 0; i < vb->count; i++) {
- VM_WARN_ON_ONCE(vb->vmas[i]->vm_file->f_mapping != mapping);
- __remove_shared_vm_struct(vb->vmas[i], mapping);
- }
- i_mmap_unlock_write(mapping);
-
- unlink_file_vma_batch_init(vb);
-}
-
-void unlink_file_vma_batch_add(struct unlink_vma_file_batch *vb,
- struct vm_area_struct *vma)
-{
- if (vma->vm_file == NULL)
- return;
-
- if ((vb->count > 0 && vb->vmas[0]->vm_file != vma->vm_file) ||
- vb->count == ARRAY_SIZE(vb->vmas))
- unlink_file_vma_batch_process(vb);
-
- vb->vmas[vb->count] = vma;
- vb->count++;
-}
-
-void unlink_file_vma_batch_final(struct unlink_vma_file_batch *vb)
-{
- if (vb->count > 0)
- unlink_file_vma_batch_process(vb);
-}
-
-/*
- * Close a vm structure and free it.
- */
-static void remove_vma(struct vm_area_struct *vma, bool unreachable)
-{
- might_sleep();
- if (vma->vm_ops && vma->vm_ops->close)
- vma->vm_ops->close(vma);
- if (vma->vm_file)
- fput(vma->vm_file);
- mpol_put(vma_policy(vma));
- if (unreachable)
- __vm_area_free(vma);
- else
- vm_area_free(vma);
-}
-
-static inline struct vm_area_struct *vma_prev_limit(struct vma_iterator *vmi,
- unsigned long min)
-{
- return mas_prev(&vmi->mas, min);
-}
-
-/*
* check_brk_limits() - Use platform specific check of range & verify mlock
* limits.
* @addr: The address to check
@@ -273,11 +169,12 @@ SYSCALL_DEFINE1(brk, unsigned long, brk)
goto out; /* mapping intersects with an existing non-brk vma. */
/*
* mm->brk must be protected by write mmap_lock.
- * do_vma_munmap() will drop the lock on success, so update it
- * before calling do_vma_munmap().
+ * do_vmi_align_munmap() will drop the lock on success, so
+ * update it before calling do_vma_munmap().
*/
mm->brk = brk;
- if (do_vma_munmap(&vmi, brkvma, newbrk, oldbrk, &uf, true))
+ if (do_vmi_align_munmap(&vmi, brkvma, mm, newbrk, oldbrk, &uf,
+ /* unlock = */ true))
goto out;
goto success_unlocked;
@@ -318,875 +215,6 @@ out:
return origbrk;
}
-#if defined(CONFIG_DEBUG_VM_MAPLE_TREE)
-static void validate_mm(struct mm_struct *mm)
-{
- int bug = 0;
- int i = 0;
- struct vm_area_struct *vma;
- VMA_ITERATOR(vmi, mm, 0);
-
- mt_validate(&mm->mm_mt);
- for_each_vma(vmi, vma) {
-#ifdef CONFIG_DEBUG_VM_RB
- struct anon_vma *anon_vma = vma->anon_vma;
- struct anon_vma_chain *avc;
-#endif
- unsigned long vmi_start, vmi_end;
- bool warn = 0;
-
- vmi_start = vma_iter_addr(&vmi);
- vmi_end = vma_iter_end(&vmi);
- if (VM_WARN_ON_ONCE_MM(vma->vm_end != vmi_end, mm))
- warn = 1;
-
- if (VM_WARN_ON_ONCE_MM(vma->vm_start != vmi_start, mm))
- warn = 1;
-
- if (warn) {
- pr_emerg("issue in %s\n", current->comm);
- dump_stack();
- dump_vma(vma);
- pr_emerg("tree range: %px start %lx end %lx\n", vma,
- vmi_start, vmi_end - 1);
- vma_iter_dump_tree(&vmi);
- }
-
-#ifdef CONFIG_DEBUG_VM_RB
- if (anon_vma) {
- anon_vma_lock_read(anon_vma);
- list_for_each_entry(avc, &vma->anon_vma_chain, same_vma)
- anon_vma_interval_tree_verify(avc);
- anon_vma_unlock_read(anon_vma);
- }
-#endif
- i++;
- }
- if (i != mm->map_count) {
- pr_emerg("map_count %d vma iterator %d\n", mm->map_count, i);
- bug = 1;
- }
- VM_BUG_ON_MM(bug, mm);
-}
-
-#else /* !CONFIG_DEBUG_VM_MAPLE_TREE */
-#define validate_mm(mm) do { } while (0)
-#endif /* CONFIG_DEBUG_VM_MAPLE_TREE */
-
-/*
- * vma has some anon_vma assigned, and is already inserted on that
- * anon_vma's interval trees.
- *
- * Before updating the vma's vm_start / vm_end / vm_pgoff fields, the
- * vma must be removed from the anon_vma's interval trees using
- * anon_vma_interval_tree_pre_update_vma().
- *
- * After the update, the vma will be reinserted using
- * anon_vma_interval_tree_post_update_vma().
- *
- * The entire update must be protected by exclusive mmap_lock and by
- * the root anon_vma's mutex.
- */
-static inline void
-anon_vma_interval_tree_pre_update_vma(struct vm_area_struct *vma)
-{
- struct anon_vma_chain *avc;
-
- list_for_each_entry(avc, &vma->anon_vma_chain, same_vma)
- anon_vma_interval_tree_remove(avc, &avc->anon_vma->rb_root);
-}
-
-static inline void
-anon_vma_interval_tree_post_update_vma(struct vm_area_struct *vma)
-{
- struct anon_vma_chain *avc;
-
- list_for_each_entry(avc, &vma->anon_vma_chain, same_vma)
- anon_vma_interval_tree_insert(avc, &avc->anon_vma->rb_root);
-}
-
-static unsigned long count_vma_pages_range(struct mm_struct *mm,
- unsigned long addr, unsigned long end)
-{
- VMA_ITERATOR(vmi, mm, addr);
- struct vm_area_struct *vma;
- unsigned long nr_pages = 0;
-
- for_each_vma_range(vmi, vma, end) {
- unsigned long vm_start = max(addr, vma->vm_start);
- unsigned long vm_end = min(end, vma->vm_end);
-
- nr_pages += PHYS_PFN(vm_end - vm_start);
- }
-
- return nr_pages;
-}
-
-static void __vma_link_file(struct vm_area_struct *vma,
- struct address_space *mapping)
-{
- if (vma_is_shared_maywrite(vma))
- mapping_allow_writable(mapping);
-
- flush_dcache_mmap_lock(mapping);
- vma_interval_tree_insert(vma, &mapping->i_mmap);
- flush_dcache_mmap_unlock(mapping);
-}
-
-static void vma_link_file(struct vm_area_struct *vma)
-{
- struct file *file = vma->vm_file;
- struct address_space *mapping;
-
- if (file) {
- mapping = file->f_mapping;
- i_mmap_lock_write(mapping);
- __vma_link_file(vma, mapping);
- i_mmap_unlock_write(mapping);
- }
-}
-
-static int vma_link(struct mm_struct *mm, struct vm_area_struct *vma)
-{
- VMA_ITERATOR(vmi, mm, 0);
-
- vma_iter_config(&vmi, vma->vm_start, vma->vm_end);
- if (vma_iter_prealloc(&vmi, vma))
- return -ENOMEM;
-
- vma_start_write(vma);
- vma_iter_store(&vmi, vma);
- vma_link_file(vma);
- mm->map_count++;
- validate_mm(mm);
- return 0;
-}
-
-/*
- * init_multi_vma_prep() - Initializer for struct vma_prepare
- * @vp: The vma_prepare struct
- * @vma: The vma that will be altered once locked
- * @next: The next vma if it is to be adjusted
- * @remove: The first vma to be removed
- * @remove2: The second vma to be removed
- */
-static inline void init_multi_vma_prep(struct vma_prepare *vp,
- struct vm_area_struct *vma, struct vm_area_struct *next,
- struct vm_area_struct *remove, struct vm_area_struct *remove2)
-{
- memset(vp, 0, sizeof(struct vma_prepare));
- vp->vma = vma;
- vp->anon_vma = vma->anon_vma;
- vp->remove = remove;
- vp->remove2 = remove2;
- vp->adj_next = next;
- if (!vp->anon_vma && next)
- vp->anon_vma = next->anon_vma;
-
- vp->file = vma->vm_file;
- if (vp->file)
- vp->mapping = vma->vm_file->f_mapping;
-
-}
-
-/*
- * init_vma_prep() - Initializer wrapper for vma_prepare struct
- * @vp: The vma_prepare struct
- * @vma: The vma that will be altered once locked
- */
-static inline void init_vma_prep(struct vma_prepare *vp,
- struct vm_area_struct *vma)
-{
- init_multi_vma_prep(vp, vma, NULL, NULL, NULL);
-}
-
-
-/*
- * vma_prepare() - Helper function for handling locking VMAs prior to altering
- * @vp: The initialized vma_prepare struct
- */
-static inline void vma_prepare(struct vma_prepare *vp)
-{
- if (vp->file) {
- uprobe_munmap(vp->vma, vp->vma->vm_start, vp->vma->vm_end);
-
- if (vp->adj_next)
- uprobe_munmap(vp->adj_next, vp->adj_next->vm_start,
- vp->adj_next->vm_end);
-
- i_mmap_lock_write(vp->mapping);
- if (vp->insert && vp->insert->vm_file) {
- /*
- * Put into interval tree now, so instantiated pages
- * are visible to arm/parisc __flush_dcache_page
- * throughout; but we cannot insert into address
- * space until vma start or end is updated.
- */
- __vma_link_file(vp->insert,
- vp->insert->vm_file->f_mapping);
- }
- }
-
- if (vp->anon_vma) {
- anon_vma_lock_write(vp->anon_vma);
- anon_vma_interval_tree_pre_update_vma(vp->vma);
- if (vp->adj_next)
- anon_vma_interval_tree_pre_update_vma(vp->adj_next);
- }
-
- if (vp->file) {
- flush_dcache_mmap_lock(vp->mapping);
- vma_interval_tree_remove(vp->vma, &vp->mapping->i_mmap);
- if (vp->adj_next)
- vma_interval_tree_remove(vp->adj_next,
- &vp->mapping->i_mmap);
- }
-
-}
-
-/*
- * vma_complete- Helper function for handling the unlocking after altering VMAs,
- * or for inserting a VMA.
- *
- * @vp: The vma_prepare struct
- * @vmi: The vma iterator
- * @mm: The mm_struct
- */
-static inline void vma_complete(struct vma_prepare *vp,
- struct vma_iterator *vmi, struct mm_struct *mm)
-{
- if (vp->file) {
- if (vp->adj_next)
- vma_interval_tree_insert(vp->adj_next,
- &vp->mapping->i_mmap);
- vma_interval_tree_insert(vp->vma, &vp->mapping->i_mmap);
- flush_dcache_mmap_unlock(vp->mapping);
- }
-
- if (vp->remove && vp->file) {
- __remove_shared_vm_struct(vp->remove, vp->mapping);
- if (vp->remove2)
- __remove_shared_vm_struct(vp->remove2, vp->mapping);
- } else if (vp->insert) {
- /*
- * split_vma has split insert from vma, and needs
- * us to insert it before dropping the locks
- * (it may either follow vma or precede it).
- */
- vma_iter_store(vmi, vp->insert);
- mm->map_count++;
- }
-
- if (vp->anon_vma) {
- anon_vma_interval_tree_post_update_vma(vp->vma);
- if (vp->adj_next)
- anon_vma_interval_tree_post_update_vma(vp->adj_next);
- anon_vma_unlock_write(vp->anon_vma);
- }
-
- if (vp->file) {
- i_mmap_unlock_write(vp->mapping);
- uprobe_mmap(vp->vma);
-
- if (vp->adj_next)
- uprobe_mmap(vp->adj_next);
- }
-
- if (vp->remove) {
-again:
- vma_mark_detached(vp->remove, true);
- if (vp->file) {
- uprobe_munmap(vp->remove, vp->remove->vm_start,
- vp->remove->vm_end);
- fput(vp->file);
- }
- if (vp->remove->anon_vma)
- anon_vma_merge(vp->vma, vp->remove);
- mm->map_count--;
- mpol_put(vma_policy(vp->remove));
- if (!vp->remove2)
- WARN_ON_ONCE(vp->vma->vm_end < vp->remove->vm_end);
- vm_area_free(vp->remove);
-
- /*
- * In mprotect's case 6 (see comments on vma_merge),
- * we are removing both mid and next vmas
- */
- if (vp->remove2) {
- vp->remove = vp->remove2;
- vp->remove2 = NULL;
- goto again;
- }
- }
- if (vp->insert && vp->file)
- uprobe_mmap(vp->insert);
- validate_mm(mm);
-}
-
-/*
- * dup_anon_vma() - Helper function to duplicate anon_vma
- * @dst: The destination VMA
- * @src: The source VMA
- * @dup: Pointer to the destination VMA when successful.
- *
- * Returns: 0 on success.
- */
-static inline int dup_anon_vma(struct vm_area_struct *dst,
- struct vm_area_struct *src, struct vm_area_struct **dup)
-{
- /*
- * Easily overlooked: when mprotect shifts the boundary, make sure the
- * expanding vma has anon_vma set if the shrinking vma had, to cover any
- * anon pages imported.
- */
- if (src->anon_vma && !dst->anon_vma) {
- int ret;
-
- vma_assert_write_locked(dst);
- dst->anon_vma = src->anon_vma;
- ret = anon_vma_clone(dst, src);
- if (ret)
- return ret;
-
- *dup = dst;
- }
-
- return 0;
-}
-
-/*
- * vma_expand - Expand an existing VMA
- *
- * @vmi: The vma iterator
- * @vma: The vma to expand
- * @start: The start of the vma
- * @end: The exclusive end of the vma
- * @pgoff: The page offset of vma
- * @next: The current of next vma.
- *
- * Expand @vma to @start and @end. Can expand off the start and end. Will
- * expand over @next if it's different from @vma and @end == @next->vm_end.
- * Checking if the @vma can expand and merge with @next needs to be handled by
- * the caller.
- *
- * Returns: 0 on success
- */
-int vma_expand(struct vma_iterator *vmi, struct vm_area_struct *vma,
- unsigned long start, unsigned long end, pgoff_t pgoff,
- struct vm_area_struct *next)
-{
- struct vm_area_struct *anon_dup = NULL;
- bool remove_next = false;
- struct vma_prepare vp;
-
- vma_start_write(vma);
- if (next && (vma != next) && (end == next->vm_end)) {
- int ret;
-
- remove_next = true;
- vma_start_write(next);
- ret = dup_anon_vma(vma, next, &anon_dup);
- if (ret)
- return ret;
- }
-
- init_multi_vma_prep(&vp, vma, NULL, remove_next ? next : NULL, NULL);
- /* Not merging but overwriting any part of next is not handled. */
- VM_WARN_ON(next && !vp.remove &&
- next != vma && end > next->vm_start);
- /* Only handles expanding */
- VM_WARN_ON(vma->vm_start < start || vma->vm_end > end);
-
- /* Note: vma iterator must be pointing to 'start' */
- vma_iter_config(vmi, start, end);
- if (vma_iter_prealloc(vmi, vma))
- goto nomem;
-
- vma_prepare(&vp);
- vma_adjust_trans_huge(vma, start, end, 0);
- vma_set_range(vma, start, end, pgoff);
- vma_iter_store(vmi, vma);
-
- vma_complete(&vp, vmi, vma->vm_mm);
- return 0;
-
-nomem:
- if (anon_dup)
- unlink_anon_vmas(anon_dup);
- return -ENOMEM;
-}
-
-/*
- * vma_shrink() - Reduce an existing VMAs memory area
- * @vmi: The vma iterator
- * @vma: The VMA to modify
- * @start: The new start
- * @end: The new end
- *
- * Returns: 0 on success, -ENOMEM otherwise
- */
-int vma_shrink(struct vma_iterator *vmi, struct vm_area_struct *vma,
- unsigned long start, unsigned long end, pgoff_t pgoff)
-{
- struct vma_prepare vp;
-
- WARN_ON((vma->vm_start != start) && (vma->vm_end != end));
-
- if (vma->vm_start < start)
- vma_iter_config(vmi, vma->vm_start, start);
- else
- vma_iter_config(vmi, end, vma->vm_end);
-
- if (vma_iter_prealloc(vmi, NULL))
- return -ENOMEM;
-
- vma_start_write(vma);
-
- init_vma_prep(&vp, vma);
- vma_prepare(&vp);
- vma_adjust_trans_huge(vma, start, end, 0);
-
- vma_iter_clear(vmi);
- vma_set_range(vma, start, end, pgoff);
- vma_complete(&vp, vmi, vma->vm_mm);
- return 0;
-}
-
-/*
- * If the vma has a ->close operation then the driver probably needs to release
- * per-vma resources, so we don't attempt to merge those if the caller indicates
- * the current vma may be removed as part of the merge.
- */
-static inline bool is_mergeable_vma(struct vm_area_struct *vma,
- struct file *file, unsigned long vm_flags,
- struct vm_userfaultfd_ctx vm_userfaultfd_ctx,
- struct anon_vma_name *anon_name, bool may_remove_vma)
-{
- /*
- * VM_SOFTDIRTY should not prevent from VMA merging, if we
- * match the flags but dirty bit -- the caller should mark
- * merged VMA as dirty. If dirty bit won't be excluded from
- * comparison, we increase pressure on the memory system forcing
- * the kernel to generate new VMAs when old one could be
- * extended instead.
- */
- if ((vma->vm_flags ^ vm_flags) & ~VM_SOFTDIRTY)
- return false;
- if (vma->vm_file != file)
- return false;
- if (may_remove_vma && vma->vm_ops && vma->vm_ops->close)
- return false;
- if (!is_mergeable_vm_userfaultfd_ctx(vma, vm_userfaultfd_ctx))
- return false;
- if (!anon_vma_name_eq(anon_vma_name(vma), anon_name))
- return false;
- return true;
-}
-
-static inline bool is_mergeable_anon_vma(struct anon_vma *anon_vma1,
- struct anon_vma *anon_vma2, struct vm_area_struct *vma)
-{
- /*
- * The list_is_singular() test is to avoid merging VMA cloned from
- * parents. This can improve scalability caused by anon_vma lock.
- */
- if ((!anon_vma1 || !anon_vma2) && (!vma ||
- list_is_singular(&vma->anon_vma_chain)))
- return true;
- return anon_vma1 == anon_vma2;
-}
-
-/*
- * Return true if we can merge this (vm_flags,anon_vma,file,vm_pgoff)
- * in front of (at a lower virtual address and file offset than) the vma.
- *
- * We cannot merge two vmas if they have differently assigned (non-NULL)
- * anon_vmas, nor if same anon_vma is assigned but offsets incompatible.
- *
- * We don't check here for the merged mmap wrapping around the end of pagecache
- * indices (16TB on ia32) because do_mmap() does not permit mmap's which
- * wrap, nor mmaps which cover the final page at index -1UL.
- *
- * We assume the vma may be removed as part of the merge.
- */
-static bool
-can_vma_merge_before(struct vm_area_struct *vma, unsigned long vm_flags,
- struct anon_vma *anon_vma, struct file *file,
- pgoff_t vm_pgoff, struct vm_userfaultfd_ctx vm_userfaultfd_ctx,
- struct anon_vma_name *anon_name)
-{
- if (is_mergeable_vma(vma, file, vm_flags, vm_userfaultfd_ctx, anon_name, true) &&
- is_mergeable_anon_vma(anon_vma, vma->anon_vma, vma)) {
- if (vma->vm_pgoff == vm_pgoff)
- return true;
- }
- return false;
-}
-
-/*
- * Return true if we can merge this (vm_flags,anon_vma,file,vm_pgoff)
- * beyond (at a higher virtual address and file offset than) the vma.
- *
- * We cannot merge two vmas if they have differently assigned (non-NULL)
- * anon_vmas, nor if same anon_vma is assigned but offsets incompatible.
- *
- * We assume that vma is not removed as part of the merge.
- */
-static bool
-can_vma_merge_after(struct vm_area_struct *vma, unsigned long vm_flags,
- struct anon_vma *anon_vma, struct file *file,
- pgoff_t vm_pgoff, struct vm_userfaultfd_ctx vm_userfaultfd_ctx,
- struct anon_vma_name *anon_name)
-{
- if (is_mergeable_vma(vma, file, vm_flags, vm_userfaultfd_ctx, anon_name, false) &&
- is_mergeable_anon_vma(anon_vma, vma->anon_vma, vma)) {
- pgoff_t vm_pglen;
- vm_pglen = vma_pages(vma);
- if (vma->vm_pgoff + vm_pglen == vm_pgoff)
- return true;
- }
- return false;
-}
-
-/*
- * Given a mapping request (addr,end,vm_flags,file,pgoff,anon_name),
- * figure out whether that can be merged with its predecessor or its
- * successor. Or both (it neatly fills a hole).
- *
- * In most cases - when called for mmap, brk or mremap - [addr,end) is
- * certain not to be mapped by the time vma_merge is called; but when
- * called for mprotect, it is certain to be already mapped (either at
- * an offset within prev, or at the start of next), and the flags of
- * this area are about to be changed to vm_flags - and the no-change
- * case has already been eliminated.
- *
- * The following mprotect cases have to be considered, where **** is
- * the area passed down from mprotect_fixup, never extending beyond one
- * vma, PPPP is the previous vma, CCCC is a concurrent vma that starts
- * at the same address as **** and is of the same or larger span, and
- * NNNN the next vma after ****:
- *
- * **** **** ****
- * PPPPPPNNNNNN PPPPPPNNNNNN PPPPPPCCCCCC
- * cannot merge might become might become
- * PPNNNNNNNNNN PPPPPPPPPPCC
- * mmap, brk or case 4 below case 5 below
- * mremap move:
- * **** ****
- * PPPP NNNN PPPPCCCCNNNN
- * might become might become
- * PPPPPPPPPPPP 1 or PPPPPPPPPPPP 6 or
- * PPPPPPPPNNNN 2 or PPPPPPPPNNNN 7 or
- * PPPPNNNNNNNN 3 PPPPNNNNNNNN 8
- *
- * It is important for case 8 that the vma CCCC overlapping the
- * region **** is never going to extended over NNNN. Instead NNNN must
- * be extended in region **** and CCCC must be removed. This way in
- * all cases where vma_merge succeeds, the moment vma_merge drops the
- * rmap_locks, the properties of the merged vma will be already
- * correct for the whole merged range. Some of those properties like
- * vm_page_prot/vm_flags may be accessed by rmap_walks and they must
- * be correct for the whole merged range immediately after the
- * rmap_locks are released. Otherwise if NNNN would be removed and
- * CCCC would be extended over the NNNN range, remove_migration_ptes
- * or other rmap walkers (if working on addresses beyond the "end"
- * parameter) may establish ptes with the wrong permissions of CCCC
- * instead of the right permissions of NNNN.
- *
- * In the code below:
- * PPPP is represented by *prev
- * CCCC is represented by *curr or not represented at all (NULL)
- * NNNN is represented by *next or not represented at all (NULL)
- * **** is not represented - it will be merged and the vma containing the
- * area is returned, or the function will return NULL
- */
-static struct vm_area_struct
-*vma_merge(struct vma_iterator *vmi, struct vm_area_struct *prev,
- struct vm_area_struct *src, unsigned long addr, unsigned long end,
- unsigned long vm_flags, pgoff_t pgoff, struct mempolicy *policy,
- struct vm_userfaultfd_ctx vm_userfaultfd_ctx,
- struct anon_vma_name *anon_name)
-{
- struct mm_struct *mm = src->vm_mm;
- struct anon_vma *anon_vma = src->anon_vma;
- struct file *file = src->vm_file;
- struct vm_area_struct *curr, *next, *res;
- struct vm_area_struct *vma, *adjust, *remove, *remove2;
- struct vm_area_struct *anon_dup = NULL;
- struct vma_prepare vp;
- pgoff_t vma_pgoff;
- int err = 0;
- bool merge_prev = false;
- bool merge_next = false;
- bool vma_expanded = false;
- unsigned long vma_start = addr;
- unsigned long vma_end = end;
- pgoff_t pglen = (end - addr) >> PAGE_SHIFT;
- long adj_start = 0;
-
- /*
- * We later require that vma->vm_flags == vm_flags,
- * so this tests vma->vm_flags & VM_SPECIAL, too.
- */
- if (vm_flags & VM_SPECIAL)
- return NULL;
-
- /* Does the input range span an existing VMA? (cases 5 - 8) */
- curr = find_vma_intersection(mm, prev ? prev->vm_end : 0, end);
-
- if (!curr || /* cases 1 - 4 */
- end == curr->vm_end) /* cases 6 - 8, adjacent VMA */
- next = vma_lookup(mm, end);
- else
- next = NULL; /* case 5 */
-
- if (prev) {
- vma_start = prev->vm_start;
- vma_pgoff = prev->vm_pgoff;
-
- /* Can we merge the predecessor? */
- if (addr == prev->vm_end && mpol_equal(vma_policy(prev), policy)
- && can_vma_merge_after(prev, vm_flags, anon_vma, file,
- pgoff, vm_userfaultfd_ctx, anon_name)) {
- merge_prev = true;
- vma_prev(vmi);
- }
- }
-
- /* Can we merge the successor? */
- if (next && mpol_equal(policy, vma_policy(next)) &&
- can_vma_merge_before(next, vm_flags, anon_vma, file, pgoff+pglen,
- vm_userfaultfd_ctx, anon_name)) {
- merge_next = true;
- }
-
- /* Verify some invariant that must be enforced by the caller. */
- VM_WARN_ON(prev && addr <= prev->vm_start);
- VM_WARN_ON(curr && (addr != curr->vm_start || end > curr->vm_end));
- VM_WARN_ON(addr >= end);
-
- if (!merge_prev && !merge_next)
- return NULL; /* Not mergeable. */
-
- if (merge_prev)
- vma_start_write(prev);
-
- res = vma = prev;
- remove = remove2 = adjust = NULL;
-
- /* Can we merge both the predecessor and the successor? */
- if (merge_prev && merge_next &&
- is_mergeable_anon_vma(prev->anon_vma, next->anon_vma, NULL)) {
- vma_start_write(next);
- remove = next; /* case 1 */
- vma_end = next->vm_end;
- err = dup_anon_vma(prev, next, &anon_dup);
- if (curr) { /* case 6 */
- vma_start_write(curr);
- remove = curr;
- remove2 = next;
- /*
- * Note that the dup_anon_vma below cannot overwrite err
- * since the first caller would do nothing unless next
- * has an anon_vma.
- */
- if (!next->anon_vma)
- err = dup_anon_vma(prev, curr, &anon_dup);
- }
- } else if (merge_prev) { /* case 2 */
- if (curr) {
- vma_start_write(curr);
- if (end == curr->vm_end) { /* case 7 */
- /*
- * can_vma_merge_after() assumed we would not be
- * removing prev vma, so it skipped the check
- * for vm_ops->close, but we are removing curr
- */
- if (curr->vm_ops && curr->vm_ops->close)
- err = -EINVAL;
- remove = curr;
- } else { /* case 5 */
- adjust = curr;
- adj_start = (end - curr->vm_start);
- }
- if (!err)
- err = dup_anon_vma(prev, curr, &anon_dup);
- }
- } else { /* merge_next */
- vma_start_write(next);
- res = next;
- if (prev && addr < prev->vm_end) { /* case 4 */
- vma_start_write(prev);
- vma_end = addr;
- adjust = next;
- adj_start = -(prev->vm_end - addr);
- err = dup_anon_vma(next, prev, &anon_dup);
- } else {
- /*
- * Note that cases 3 and 8 are the ONLY ones where prev
- * is permitted to be (but is not necessarily) NULL.
- */
- vma = next; /* case 3 */
- vma_start = addr;
- vma_end = next->vm_end;
- vma_pgoff = next->vm_pgoff - pglen;
- if (curr) { /* case 8 */
- vma_pgoff = curr->vm_pgoff;
- vma_start_write(curr);
- remove = curr;
- err = dup_anon_vma(next, curr, &anon_dup);
- }
- }
- }
-
- /* Error in anon_vma clone. */
- if (err)
- goto anon_vma_fail;
-
- if (vma_start < vma->vm_start || vma_end > vma->vm_end)
- vma_expanded = true;
-
- if (vma_expanded) {
- vma_iter_config(vmi, vma_start, vma_end);
- } else {
- vma_iter_config(vmi, adjust->vm_start + adj_start,
- adjust->vm_end);
- }
-
- if (vma_iter_prealloc(vmi, vma))
- goto prealloc_fail;
-
- init_multi_vma_prep(&vp, vma, adjust, remove, remove2);
- VM_WARN_ON(vp.anon_vma && adjust && adjust->anon_vma &&
- vp.anon_vma != adjust->anon_vma);
-
- vma_prepare(&vp);
- vma_adjust_trans_huge(vma, vma_start, vma_end, adj_start);
- vma_set_range(vma, vma_start, vma_end, vma_pgoff);
-
- if (vma_expanded)
- vma_iter_store(vmi, vma);
-
- if (adj_start) {
- adjust->vm_start += adj_start;
- adjust->vm_pgoff += adj_start >> PAGE_SHIFT;
- if (adj_start < 0) {
- WARN_ON(vma_expanded);
- vma_iter_store(vmi, next);
- }
- }
-
- vma_complete(&vp, vmi, mm);
- khugepaged_enter_vma(res, vm_flags);
- return res;
-
-prealloc_fail:
- if (anon_dup)
- unlink_anon_vmas(anon_dup);
-
-anon_vma_fail:
- vma_iter_set(vmi, addr);
- vma_iter_load(vmi);
- return NULL;
-}
-
-/*
- * Rough compatibility check to quickly see if it's even worth looking
- * at sharing an anon_vma.
- *
- * They need to have the same vm_file, and the flags can only differ
- * in things that mprotect may change.
- *
- * NOTE! The fact that we share an anon_vma doesn't _have_ to mean that
- * we can merge the two vma's. For example, we refuse to merge a vma if
- * there is a vm_ops->close() function, because that indicates that the
- * driver is doing some kind of reference counting. But that doesn't
- * really matter for the anon_vma sharing case.
- */
-static int anon_vma_compatible(struct vm_area_struct *a, struct vm_area_struct *b)
-{
- return a->vm_end == b->vm_start &&
- mpol_equal(vma_policy(a), vma_policy(b)) &&
- a->vm_file == b->vm_file &&
- !((a->vm_flags ^ b->vm_flags) & ~(VM_ACCESS_FLAGS | VM_SOFTDIRTY)) &&
- b->vm_pgoff == a->vm_pgoff + ((b->vm_start - a->vm_start) >> PAGE_SHIFT);
-}
-
-/*
- * Do some basic sanity checking to see if we can re-use the anon_vma
- * from 'old'. The 'a'/'b' vma's are in VM order - one of them will be
- * the same as 'old', the other will be the new one that is trying
- * to share the anon_vma.
- *
- * NOTE! This runs with mmap_lock held for reading, so it is possible that
- * the anon_vma of 'old' is concurrently in the process of being set up
- * by another page fault trying to merge _that_. But that's ok: if it
- * is being set up, that automatically means that it will be a singleton
- * acceptable for merging, so we can do all of this optimistically. But
- * we do that READ_ONCE() to make sure that we never re-load the pointer.
- *
- * IOW: that the "list_is_singular()" test on the anon_vma_chain only
- * matters for the 'stable anon_vma' case (ie the thing we want to avoid
- * is to return an anon_vma that is "complex" due to having gone through
- * a fork).
- *
- * We also make sure that the two vma's are compatible (adjacent,
- * and with the same memory policies). That's all stable, even with just
- * a read lock on the mmap_lock.
- */
-static struct anon_vma *reusable_anon_vma(struct vm_area_struct *old, struct vm_area_struct *a, struct vm_area_struct *b)
-{
- if (anon_vma_compatible(a, b)) {
- struct anon_vma *anon_vma = READ_ONCE(old->anon_vma);
-
- if (anon_vma && list_is_singular(&old->anon_vma_chain))
- return anon_vma;
- }
- return NULL;
-}
-
-/*
- * find_mergeable_anon_vma is used by anon_vma_prepare, to check
- * neighbouring vmas for a suitable anon_vma, before it goes off
- * to allocate a new anon_vma. It checks because a repetitive
- * sequence of mprotects and faults may otherwise lead to distinct
- * anon_vmas being allocated, preventing vma merge in subsequent
- * mprotect.
- */
-struct anon_vma *find_mergeable_anon_vma(struct vm_area_struct *vma)
-{
- struct anon_vma *anon_vma = NULL;
- struct vm_area_struct *prev, *next;
- VMA_ITERATOR(vmi, vma->vm_mm, vma->vm_end);
-
- /* Try next first. */
- next = vma_iter_load(&vmi);
- if (next) {
- anon_vma = reusable_anon_vma(next, vma, next);
- if (anon_vma)
- return anon_vma;
- }
-
- prev = vma_prev(&vmi);
- VM_BUG_ON_VMA(prev != vma, vma);
- prev = vma_prev(&vmi);
- /* Try prev next. */
- if (prev)
- anon_vma = reusable_anon_vma(prev, prev, vma);
-
- /*
- * We might reach here with anon_vma == NULL if we can't find
- * any reusable anon_vma.
- * There's no absolute need to look only at touching neighbours:
- * we could search further afield for "compatible" anon_vmas.
- * But it would probably just be a waste of time searching,
- * or lead to too many vmas hanging off the same anon_vma.
- * We're trying to allow mprotect remerging later on,
- * not trying to minimize memory used for anon_vmas.
- */
- return anon_vma;
-}
-
/*
* If a hint addr is less than mmap_min_addr change hint to be as
* low as possible but still greater than mmap_min_addr
@@ -1549,85 +577,6 @@ SYSCALL_DEFINE1(old_mmap, struct mmap_arg_struct __user *, arg)
}
#endif /* __ARCH_WANT_SYS_OLD_MMAP */
-static bool vm_ops_needs_writenotify(const struct vm_operations_struct *vm_ops)
-{
- return vm_ops && (vm_ops->page_mkwrite || vm_ops->pfn_mkwrite);
-}
-
-static bool vma_is_shared_writable(struct vm_area_struct *vma)
-{
- return (vma->vm_flags & (VM_WRITE | VM_SHARED)) ==
- (VM_WRITE | VM_SHARED);
-}
-
-static bool vma_fs_can_writeback(struct vm_area_struct *vma)
-{
- /* No managed pages to writeback. */
- if (vma->vm_flags & VM_PFNMAP)
- return false;
-
- return vma->vm_file && vma->vm_file->f_mapping &&
- mapping_can_writeback(vma->vm_file->f_mapping);
-}
-
-/*
- * Does this VMA require the underlying folios to have their dirty state
- * tracked?
- */
-bool vma_needs_dirty_tracking(struct vm_area_struct *vma)
-{
- /* Only shared, writable VMAs require dirty tracking. */
- if (!vma_is_shared_writable(vma))
- return false;
-
- /* Does the filesystem need to be notified? */
- if (vm_ops_needs_writenotify(vma->vm_ops))
- return true;
-
- /*
- * Even if the filesystem doesn't indicate a need for writenotify, if it
- * can writeback, dirty tracking is still required.
- */
- return vma_fs_can_writeback(vma);
-}
-
-/*
- * Some shared mappings will want the pages marked read-only
- * to track write events. If so, we'll downgrade vm_page_prot
- * to the private version (using protection_map[] without the
- * VM_SHARED bit).
- */
-bool vma_wants_writenotify(struct vm_area_struct *vma, pgprot_t vm_page_prot)
-{
- /* If it was private or non-writable, the write bit is already clear */
- if (!vma_is_shared_writable(vma))
- return false;
-
- /* The backer wishes to know when pages are first written to? */
- if (vm_ops_needs_writenotify(vma->vm_ops))
- return true;
-
- /* The open routine did something to the protections that pgprot_modify
- * won't preserve? */
- if (pgprot_val(vm_page_prot) !=
- pgprot_val(vm_pgprot_modify(vm_page_prot, vma->vm_flags)))
- return false;
-
- /*
- * Do we need to track softdirty? hugetlb does not support softdirty
- * tracking yet.
- */
- if (vma_soft_dirty_enabled(vma) && !is_vm_hugetlb_page(vma))
- return true;
-
- /* Do we need write faults for uffd-wp tracking? */
- if (userfaultfd_wp(vma))
- return true;
-
- /* Can the mapping track the dirty pages? */
- return vma_fs_can_writeback(vma);
-}
-
/*
* We account for memory if it's a private writeable mapping,
* not hugepages and VM_NORESERVE wasn't set.
@@ -1754,6 +703,18 @@ retry:
}
/*
+ * Determine if the allocation needs to ensure that there is no
+ * existing mapping within it's guard gaps, for use as start_gap.
+ */
+static inline unsigned long stack_guard_placement(vm_flags_t vm_flags)
+{
+ if (vm_flags & VM_SHADOW_STACK)
+ return PAGE_SIZE;
+
+ return 0;
+}
+
+/*
* Search for an unmapped address range.
*
* We are looking for a range that:
@@ -1789,7 +750,7 @@ unsigned long vm_unmapped_area(struct vm_unmapped_area_info *info)
unsigned long
generic_get_unmapped_area(struct file *filp, unsigned long addr,
unsigned long len, unsigned long pgoff,
- unsigned long flags)
+ unsigned long flags, vm_flags_t vm_flags)
{
struct mm_struct *mm = current->mm;
struct vm_area_struct *vma, *prev;
@@ -1814,6 +775,7 @@ generic_get_unmapped_area(struct file *filp, unsigned long addr,
info.length = len;
info.low_limit = mm->mmap_base;
info.high_limit = mmap_end;
+ info.start_gap = stack_guard_placement(vm_flags);
return vm_unmapped_area(&info);
}
@@ -1821,9 +783,10 @@ generic_get_unmapped_area(struct file *filp, unsigned long addr,
unsigned long
arch_get_unmapped_area(struct file *filp, unsigned long addr,
unsigned long len, unsigned long pgoff,
- unsigned long flags)
+ unsigned long flags, vm_flags_t vm_flags)
{
- return generic_get_unmapped_area(filp, addr, len, pgoff, flags);
+ return generic_get_unmapped_area(filp, addr, len, pgoff, flags,
+ vm_flags);
}
#endif
@@ -1834,7 +797,7 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr,
unsigned long
generic_get_unmapped_area_topdown(struct file *filp, unsigned long addr,
unsigned long len, unsigned long pgoff,
- unsigned long flags)
+ unsigned long flags, vm_flags_t vm_flags)
{
struct vm_area_struct *vma, *prev;
struct mm_struct *mm = current->mm;
@@ -1862,6 +825,7 @@ generic_get_unmapped_area_topdown(struct file *filp, unsigned long addr,
info.length = len;
info.low_limit = PAGE_SIZE;
info.high_limit = arch_get_mmap_base(addr, mm->mmap_base);
+ info.start_gap = stack_guard_placement(vm_flags);
addr = vm_unmapped_area(&info);
/*
@@ -1885,26 +849,10 @@ generic_get_unmapped_area_topdown(struct file *filp, unsigned long addr,
unsigned long
arch_get_unmapped_area_topdown(struct file *filp, unsigned long addr,
unsigned long len, unsigned long pgoff,
- unsigned long flags)
+ unsigned long flags, vm_flags_t vm_flags)
{
- return generic_get_unmapped_area_topdown(filp, addr, len, pgoff, flags);
-}
-#endif
-
-#ifndef HAVE_ARCH_UNMAPPED_AREA_VMFLAGS
-unsigned long
-arch_get_unmapped_area_vmflags(struct file *filp, unsigned long addr, unsigned long len,
- unsigned long pgoff, unsigned long flags, vm_flags_t vm_flags)
-{
- return arch_get_unmapped_area(filp, addr, len, pgoff, flags);
-}
-
-unsigned long
-arch_get_unmapped_area_topdown_vmflags(struct file *filp, unsigned long addr,
- unsigned long len, unsigned long pgoff,
- unsigned long flags, vm_flags_t vm_flags)
-{
- return arch_get_unmapped_area_topdown(filp, addr, len, pgoff, flags);
+ return generic_get_unmapped_area_topdown(filp, addr, len, pgoff, flags,
+ vm_flags);
}
#endif
@@ -1914,9 +862,9 @@ unsigned long mm_get_unmapped_area_vmflags(struct mm_struct *mm, struct file *fi
vm_flags_t vm_flags)
{
if (test_bit(MMF_TOPDOWN, &mm->flags))
- return arch_get_unmapped_area_topdown_vmflags(filp, addr, len, pgoff,
- flags, vm_flags);
- return arch_get_unmapped_area_vmflags(filp, addr, len, pgoff, flags, vm_flags);
+ return arch_get_unmapped_area_topdown(filp, addr, len, pgoff,
+ flags, vm_flags);
+ return arch_get_unmapped_area(filp, addr, len, pgoff, flags, vm_flags);
}
unsigned long
@@ -1978,8 +926,8 @@ mm_get_unmapped_area(struct mm_struct *mm, struct file *file,
unsigned long pgoff, unsigned long flags)
{
if (test_bit(MMF_TOPDOWN, &mm->flags))
- return arch_get_unmapped_area_topdown(file, addr, len, pgoff, flags);
- return arch_get_unmapped_area(file, addr, len, pgoff, flags);
+ return arch_get_unmapped_area_topdown(file, addr, len, pgoff, flags, 0);
+ return arch_get_unmapped_area(file, addr, len, pgoff, flags, 0);
}
EXPORT_SYMBOL(mm_get_unmapped_area);
@@ -2393,443 +1341,6 @@ success:
return vma;
}
-/*
- * Ok - we have the memory areas we should free on a maple tree so release them,
- * and do the vma updates.
- *
- * Called with the mm semaphore held.
- */
-static inline void remove_mt(struct mm_struct *mm, struct ma_state *mas)
-{
- unsigned long nr_accounted = 0;
- struct vm_area_struct *vma;
-
- /* Update high watermark before we lower total_vm */
- update_hiwater_vm(mm);
- mas_for_each(mas, vma, ULONG_MAX) {
- long nrpages = vma_pages(vma);
-
- if (vma->vm_flags & VM_ACCOUNT)
- nr_accounted += nrpages;
- vm_stat_account(mm, vma->vm_flags, -nrpages);
- remove_vma(vma, false);
- }
- vm_unacct_memory(nr_accounted);
-}
-
-/*
- * Get rid of page table information in the indicated region.
- *
- * Called with the mm semaphore held.
- */
-static void unmap_region(struct mm_struct *mm, struct ma_state *mas,
- struct vm_area_struct *vma, struct vm_area_struct *prev,
- struct vm_area_struct *next, unsigned long start,
- unsigned long end, unsigned long tree_end, bool mm_wr_locked)
-{
- struct mmu_gather tlb;
- unsigned long mt_start = mas->index;
-
- lru_add_drain();
- tlb_gather_mmu(&tlb, mm);
- update_hiwater_rss(mm);
- unmap_vmas(&tlb, mas, vma, start, end, tree_end, mm_wr_locked);
- mas_set(mas, mt_start);
- free_pgtables(&tlb, mas, vma, prev ? prev->vm_end : FIRST_USER_ADDRESS,
- next ? next->vm_start : USER_PGTABLES_CEILING,
- mm_wr_locked);
- tlb_finish_mmu(&tlb);
-}
-
-/*
- * __split_vma() bypasses sysctl_max_map_count checking. We use this where it
- * has already been checked or doesn't make sense to fail.
- * VMA Iterator will point to the end VMA.
- */
-static int __split_vma(struct vma_iterator *vmi, struct vm_area_struct *vma,
- unsigned long addr, int new_below)
-{
- struct vma_prepare vp;
- struct vm_area_struct *new;
- int err;
-
- WARN_ON(vma->vm_start >= addr);
- WARN_ON(vma->vm_end <= addr);
-
- if (vma->vm_ops && vma->vm_ops->may_split) {
- err = vma->vm_ops->may_split(vma, addr);
- if (err)
- return err;
- }
-
- new = vm_area_dup(vma);
- if (!new)
- return -ENOMEM;
-
- if (new_below) {
- new->vm_end = addr;
- } else {
- new->vm_start = addr;
- new->vm_pgoff += ((addr - vma->vm_start) >> PAGE_SHIFT);
- }
-
- err = -ENOMEM;
- vma_iter_config(vmi, new->vm_start, new->vm_end);
- if (vma_iter_prealloc(vmi, new))
- goto out_free_vma;
-
- err = vma_dup_policy(vma, new);
- if (err)
- goto out_free_vmi;
-
- err = anon_vma_clone(new, vma);
- if (err)
- goto out_free_mpol;
-
- if (new->vm_file)
- get_file(new->vm_file);
-
- if (new->vm_ops && new->vm_ops->open)
- new->vm_ops->open(new);
-
- vma_start_write(vma);
- vma_start_write(new);
-
- init_vma_prep(&vp, vma);
- vp.insert = new;
- vma_prepare(&vp);
- vma_adjust_trans_huge(vma, vma->vm_start, addr, 0);
-
- if (new_below) {
- vma->vm_start = addr;
- vma->vm_pgoff += (addr - new->vm_start) >> PAGE_SHIFT;
- } else {
- vma->vm_end = addr;
- }
-
- /* vma_complete stores the new vma */
- vma_complete(&vp, vmi, vma->vm_mm);
-
- /* Success. */
- if (new_below)
- vma_next(vmi);
- return 0;
-
-out_free_mpol:
- mpol_put(vma_policy(new));
-out_free_vmi:
- vma_iter_free(vmi);
-out_free_vma:
- vm_area_free(new);
- return err;
-}
-
-/*
- * Split a vma into two pieces at address 'addr', a new vma is allocated
- * either for the first part or the tail.
- */
-static int split_vma(struct vma_iterator *vmi, struct vm_area_struct *vma,
- unsigned long addr, int new_below)
-{
- if (vma->vm_mm->map_count >= sysctl_max_map_count)
- return -ENOMEM;
-
- return __split_vma(vmi, vma, addr, new_below);
-}
-
-/*
- * We are about to modify one or multiple of a VMA's flags, policy, userfaultfd
- * context and anonymous VMA name within the range [start, end).
- *
- * As a result, we might be able to merge the newly modified VMA range with an
- * adjacent VMA with identical properties.
- *
- * If no merge is possible and the range does not span the entirety of the VMA,
- * we then need to split the VMA to accommodate the change.
- *
- * The function returns either the merged VMA, the original VMA if a split was
- * required instead, or an error if the split failed.
- */
-struct vm_area_struct *vma_modify(struct vma_iterator *vmi,
- struct vm_area_struct *prev,
- struct vm_area_struct *vma,
- unsigned long start, unsigned long end,
- unsigned long vm_flags,
- struct mempolicy *policy,
- struct vm_userfaultfd_ctx uffd_ctx,
- struct anon_vma_name *anon_name)
-{
- pgoff_t pgoff = vma->vm_pgoff + ((start - vma->vm_start) >> PAGE_SHIFT);
- struct vm_area_struct *merged;
-
- merged = vma_merge(vmi, prev, vma, start, end, vm_flags,
- pgoff, policy, uffd_ctx, anon_name);
- if (merged)
- return merged;
-
- if (vma->vm_start < start) {
- int err = split_vma(vmi, vma, start, 1);
-
- if (err)
- return ERR_PTR(err);
- }
-
- if (vma->vm_end > end) {
- int err = split_vma(vmi, vma, end, 0);
-
- if (err)
- return ERR_PTR(err);
- }
-
- return vma;
-}
-
-/*
- * Attempt to merge a newly mapped VMA with those adjacent to it. The caller
- * must ensure that [start, end) does not overlap any existing VMA.
- */
-static struct vm_area_struct
-*vma_merge_new_vma(struct vma_iterator *vmi, struct vm_area_struct *prev,
- struct vm_area_struct *vma, unsigned long start,
- unsigned long end, pgoff_t pgoff)
-{
- return vma_merge(vmi, prev, vma, start, end, vma->vm_flags, pgoff,
- vma_policy(vma), vma->vm_userfaultfd_ctx, anon_vma_name(vma));
-}
-
-/*
- * Expand vma by delta bytes, potentially merging with an immediately adjacent
- * VMA with identical properties.
- */
-struct vm_area_struct *vma_merge_extend(struct vma_iterator *vmi,
- struct vm_area_struct *vma,
- unsigned long delta)
-{
- pgoff_t pgoff = vma->vm_pgoff + vma_pages(vma);
-
- /* vma is specified as prev, so case 1 or 2 will apply. */
- return vma_merge(vmi, vma, vma, vma->vm_end, vma->vm_end + delta,
- vma->vm_flags, pgoff, vma_policy(vma),
- vma->vm_userfaultfd_ctx, anon_vma_name(vma));
-}
-
-/*
- * do_vmi_align_munmap() - munmap the aligned region from @start to @end.
- * @vmi: The vma iterator
- * @vma: The starting vm_area_struct
- * @mm: The mm_struct
- * @start: The aligned start address to munmap.
- * @end: The aligned end address to munmap.
- * @uf: The userfaultfd list_head
- * @unlock: Set to true to drop the mmap_lock. unlocking only happens on
- * success.
- *
- * Return: 0 on success and drops the lock if so directed, error and leaves the
- * lock held otherwise.
- */
-static int
-do_vmi_align_munmap(struct vma_iterator *vmi, struct vm_area_struct *vma,
- struct mm_struct *mm, unsigned long start,
- unsigned long end, struct list_head *uf, bool unlock)
-{
- struct vm_area_struct *prev, *next = NULL;
- struct maple_tree mt_detach;
- int count = 0;
- int error = -ENOMEM;
- unsigned long locked_vm = 0;
- MA_STATE(mas_detach, &mt_detach, 0, 0);
- mt_init_flags(&mt_detach, vmi->mas.tree->ma_flags & MT_FLAGS_LOCK_MASK);
- mt_on_stack(mt_detach);
-
- /*
- * If we need to split any vma, do it now to save pain later.
- *
- * Note: mremap's move_vma VM_ACCOUNT handling assumes a partially
- * unmapped vm_area_struct will remain in use: so lower split_vma
- * places tmp vma above, and higher split_vma places tmp vma below.
- */
-
- /* Does it split the first one? */
- if (start > vma->vm_start) {
-
- /*
- * Make sure that map_count on return from munmap() will
- * not exceed its limit; but let map_count go just above
- * its limit temporarily, to help free resources as expected.
- */
- if (end < vma->vm_end && mm->map_count >= sysctl_max_map_count)
- goto map_count_exceeded;
-
- error = __split_vma(vmi, vma, start, 1);
- if (error)
- goto start_split_failed;
- }
-
- /*
- * Detach a range of VMAs from the mm. Using next as a temp variable as
- * it is always overwritten.
- */
- next = vma;
- do {
- /* Does it split the end? */
- if (next->vm_end > end) {
- error = __split_vma(vmi, next, end, 0);
- if (error)
- goto end_split_failed;
- }
- vma_start_write(next);
- mas_set(&mas_detach, count);
- error = mas_store_gfp(&mas_detach, next, GFP_KERNEL);
- if (error)
- goto munmap_gather_failed;
- vma_mark_detached(next, true);
- if (next->vm_flags & VM_LOCKED)
- locked_vm += vma_pages(next);
-
- count++;
- if (unlikely(uf)) {
- /*
- * If userfaultfd_unmap_prep returns an error the vmas
- * will remain split, but userland will get a
- * highly unexpected error anyway. This is no
- * different than the case where the first of the two
- * __split_vma fails, but we don't undo the first
- * split, despite we could. This is unlikely enough
- * failure that it's not worth optimizing it for.
- */
- error = userfaultfd_unmap_prep(next, start, end, uf);
-
- if (error)
- goto userfaultfd_error;
- }
-#ifdef CONFIG_DEBUG_VM_MAPLE_TREE
- BUG_ON(next->vm_start < start);
- BUG_ON(next->vm_start > end);
-#endif
- } for_each_vma_range(*vmi, next, end);
-
-#if defined(CONFIG_DEBUG_VM_MAPLE_TREE)
- /* Make sure no VMAs are about to be lost. */
- {
- MA_STATE(test, &mt_detach, 0, 0);
- struct vm_area_struct *vma_mas, *vma_test;
- int test_count = 0;
-
- vma_iter_set(vmi, start);
- rcu_read_lock();
- vma_test = mas_find(&test, count - 1);
- for_each_vma_range(*vmi, vma_mas, end) {
- BUG_ON(vma_mas != vma_test);
- test_count++;
- vma_test = mas_next(&test, count - 1);
- }
- rcu_read_unlock();
- BUG_ON(count != test_count);
- }
-#endif
-
- while (vma_iter_addr(vmi) > start)
- vma_iter_prev_range(vmi);
-
- error = vma_iter_clear_gfp(vmi, start, end, GFP_KERNEL);
- if (error)
- goto clear_tree_failed;
-
- /* Point of no return */
- mm->locked_vm -= locked_vm;
- mm->map_count -= count;
- if (unlock)
- mmap_write_downgrade(mm);
-
- prev = vma_iter_prev_range(vmi);
- next = vma_next(vmi);
- if (next)
- vma_iter_prev_range(vmi);
-
- /*
- * We can free page tables without write-locking mmap_lock because VMAs
- * were isolated before we downgraded mmap_lock.
- */
- mas_set(&mas_detach, 1);
- unmap_region(mm, &mas_detach, vma, prev, next, start, end, count,
- !unlock);
- /* Statistics and freeing VMAs */
- mas_set(&mas_detach, 0);
- remove_mt(mm, &mas_detach);
- validate_mm(mm);
- if (unlock)
- mmap_read_unlock(mm);
-
- __mt_destroy(&mt_detach);
- return 0;
-
-clear_tree_failed:
-userfaultfd_error:
-munmap_gather_failed:
-end_split_failed:
- mas_set(&mas_detach, 0);
- mas_for_each(&mas_detach, next, end)
- vma_mark_detached(next, false);
-
- __mt_destroy(&mt_detach);
-start_split_failed:
-map_count_exceeded:
- validate_mm(mm);
- return error;
-}
-
-/*
- * do_vmi_munmap() - munmap a given range.
- * @vmi: The vma iterator
- * @mm: The mm_struct
- * @start: The start address to munmap
- * @len: The length of the range to munmap
- * @uf: The userfaultfd list_head
- * @unlock: set to true if the user wants to drop the mmap_lock on success
- *
- * This function takes a @mas that is either pointing to the previous VMA or set
- * to MA_START and sets it up to remove the mapping(s). The @len will be
- * aligned and any arch_unmap work will be preformed.
- *
- * Return: 0 on success and drops the lock if so directed, error and leaves the
- * lock held otherwise.
- */
-int do_vmi_munmap(struct vma_iterator *vmi, struct mm_struct *mm,
- unsigned long start, size_t len, struct list_head *uf,
- bool unlock)
-{
- unsigned long end;
- struct vm_area_struct *vma;
-
- if ((offset_in_page(start)) || start > TASK_SIZE || len > TASK_SIZE-start)
- return -EINVAL;
-
- end = start + PAGE_ALIGN(len);
- if (end == start)
- return -EINVAL;
-
- /*
- * Check if memory is sealed before arch_unmap.
- * Prevent unmapping a sealed VMA.
- * can_modify_mm assumes we have acquired the lock on MM.
- */
- if (unlikely(!can_modify_mm(mm, start, end)))
- return -EPERM;
-
- /* arch_unmap() might do unmaps itself. */
- arch_unmap(mm, start, end);
-
- /* Find the first overlapping VMA */
- vma = vma_find(vmi, end);
- if (!vma) {
- if (unlock)
- mmap_write_unlock(mm);
- return 0;
- }
-
- return do_vmi_align_munmap(vmi, vma, mm, start, end, uf, unlock);
-}
-
/* do_munmap() - Wrapper function for non-maple tree aware do_munmap() calls.
* @mm: The mm_struct
* @start: The start address to munmap
@@ -2852,100 +1363,67 @@ unsigned long mmap_region(struct file *file, unsigned long addr,
{
struct mm_struct *mm = current->mm;
struct vm_area_struct *vma = NULL;
- struct vm_area_struct *next, *prev, *merge;
- pgoff_t pglen = len >> PAGE_SHIFT;
+ pgoff_t pglen = PHYS_PFN(len);
+ struct vm_area_struct *merge;
unsigned long charged = 0;
+ struct vma_munmap_struct vms;
+ struct ma_state mas_detach;
+ struct maple_tree mt_detach;
unsigned long end = addr + len;
- unsigned long merge_start = addr, merge_end = end;
bool writable_file_mapping = false;
- pgoff_t vm_pgoff;
- int error;
+ int error = -ENOMEM;
VMA_ITERATOR(vmi, mm, addr);
+ VMG_STATE(vmg, mm, &vmi, addr, end, vm_flags, pgoff);
- /* Check against address space limit. */
- if (!may_expand_vm(mm, vm_flags, len >> PAGE_SHIFT)) {
- unsigned long nr_pages;
-
- /*
- * MAP_FIXED may remove pages of mappings that intersects with
- * requested mapping. Account for the pages it would unmap.
- */
- nr_pages = count_vma_pages_range(mm, addr, end);
+ vmg.file = file;
+ /* Find the first overlapping VMA */
+ vma = vma_find(&vmi, end);
+ init_vma_munmap(&vms, &vmi, vma, addr, end, uf, /* unlock = */ false);
+ if (vma) {
+ mt_init_flags(&mt_detach, vmi.mas.tree->ma_flags & MT_FLAGS_LOCK_MASK);
+ mt_on_stack(mt_detach);
+ mas_init(&mas_detach, &mt_detach, /* addr = */ 0);
+ /* Prepare to unmap any existing mapping in the area */
+ error = vms_gather_munmap_vmas(&vms, &mas_detach);
+ if (error)
+ goto gather_failed;
- if (!may_expand_vm(mm, vm_flags,
- (len >> PAGE_SHIFT) - nr_pages))
- return -ENOMEM;
+ vmg.next = vms.next;
+ vmg.prev = vms.prev;
+ vma = NULL;
+ } else {
+ vmg.next = vma_iter_next_rewind(&vmi, &vmg.prev);
}
- /* Unmap any existing mapping in the area */
- error = do_vmi_munmap(&vmi, mm, addr, len, uf, false);
- if (error == -EPERM)
- return error;
- else if (error)
- return -ENOMEM;
+ /* Check against address space limit. */
+ if (!may_expand_vm(mm, vm_flags, pglen - vms.nr_pages))
+ goto abort_munmap;
/*
* Private writable mapping: check memory availability
*/
if (accountable_mapping(file, vm_flags)) {
- charged = len >> PAGE_SHIFT;
- if (security_vm_enough_memory_mm(mm, charged))
- return -ENOMEM;
- vm_flags |= VM_ACCOUNT;
- }
-
- next = vma_next(&vmi);
- prev = vma_prev(&vmi);
- if (vm_flags & VM_SPECIAL) {
- if (prev)
- vma_iter_next_range(&vmi);
- goto cannot_expand;
- }
-
- /* Attempt to expand an old mapping */
- /* Check next */
- if (next && next->vm_start == end && !vma_policy(next) &&
- can_vma_merge_before(next, vm_flags, NULL, file, pgoff+pglen,
- NULL_VM_UFFD_CTX, NULL)) {
- merge_end = next->vm_end;
- vma = next;
- vm_pgoff = next->vm_pgoff - pglen;
- }
+ charged = pglen;
+ charged -= vms.nr_accounted;
+ if (charged && security_vm_enough_memory_mm(mm, charged))
+ goto abort_munmap;
- /* Check prev */
- if (prev && prev->vm_end == addr && !vma_policy(prev) &&
- (vma ? can_vma_merge_after(prev, vm_flags, vma->anon_vma, file,
- pgoff, vma->vm_userfaultfd_ctx, NULL) :
- can_vma_merge_after(prev, vm_flags, NULL, file, pgoff,
- NULL_VM_UFFD_CTX, NULL))) {
- merge_start = prev->vm_start;
- vma = prev;
- vm_pgoff = prev->vm_pgoff;
- } else if (prev) {
- vma_iter_next_range(&vmi);
+ vms.nr_accounted = 0;
+ vm_flags |= VM_ACCOUNT;
+ vmg.flags = vm_flags;
}
- /* Actually expand, if possible */
- if (vma &&
- !vma_expand(&vmi, vma, merge_start, merge_end, vm_pgoff, next)) {
- khugepaged_enter_vma(vma, vm_flags);
+ vma = vma_merge_new_range(&vmg);
+ if (vma)
goto expanded;
- }
-
- if (vma == prev)
- vma_iter_set(&vmi, addr);
-cannot_expand:
-
/*
* Determine the object being mapped and call the appropriate
* specific mapper. the address has already been validated, but
* not unmapped, but the maps are removed from the list.
*/
vma = vm_area_alloc(mm);
- if (!vma) {
- error = -ENOMEM;
+ if (!vma)
goto unacct_error;
- }
vma_iter_config(&vmi, addr, end);
vma_set_range(vma, addr, end, pgoff);
@@ -2954,6 +1432,11 @@ cannot_expand:
if (file) {
vma->vm_file = get_file(file);
+ /*
+ * call_mmap() may map PTE, so ensure there are no existing PTEs
+ * and call the vm_ops close function if one exists.
+ */
+ vms_clean_up_area(&vms, &mas_detach);
error = call_mmap(file, vma);
if (error)
goto unmap_and_free_vma;
@@ -2979,10 +1462,11 @@ cannot_expand:
* If vm_flags changed after call_mmap(), we should try merge
* vma again as we may succeed this time.
*/
- if (unlikely(vm_flags != vma->vm_flags && prev)) {
- merge = vma_merge_new_vma(&vmi, prev, vma,
- vma->vm_start, vma->vm_end,
- vma->vm_pgoff);
+ if (unlikely(vm_flags != vma->vm_flags && vmg.prev)) {
+ vmg.flags = vma->vm_flags;
+ /* If this fails, state is reset ready for a reattempt. */
+ merge = vma_merge_new_range(&vmg);
+
if (merge) {
/*
* ->mmap() can change vma->vm_file and fput
@@ -2998,6 +1482,7 @@ cannot_expand:
vm_flags = vma->vm_flags;
goto unmap_writable;
}
+ vma_iter_config(&vmi, addr, end);
}
vm_flags = vma->vm_flags;
@@ -3030,7 +1515,7 @@ cannot_expand:
vma_link_file(vma);
/*
- * vma_merge() calls khugepaged_enter_vma() either, the below
+ * vma_merge_new_range() calls khugepaged_enter_vma() too, the below
* call covers the non-merge case.
*/
khugepaged_enter_vma(vma, vma->vm_flags);
@@ -3044,14 +1529,17 @@ unmap_writable:
expanded:
perf_event_mmap(vma);
- vm_stat_account(mm, vm_flags, len >> PAGE_SHIFT);
+ /* Unmap any existing mapping in the area */
+ vms_complete_munmap_vmas(&vms, &mas_detach);
+
+ vm_stat_account(mm, vm_flags, pglen);
if (vm_flags & VM_LOCKED) {
if ((vm_flags & VM_SPECIAL) || vma_is_dax(vma) ||
is_vm_hugetlb_page(vma) ||
vma == get_gate_vma(current->mm))
vm_flags_clear(vma, VM_LOCKED_MASK);
else
- mm->locked_vm += (len >> PAGE_SHIFT);
+ mm->locked_vm += pglen;
}
if (file)
@@ -3072,7 +1560,7 @@ expanded:
return addr;
close_and_free_vma:
- if (file && vma->vm_ops && vma->vm_ops->close)
+ if (file && !vms.closed_vm_ops && vma->vm_ops && vma->vm_ops->close)
vma->vm_ops->close(vma);
if (file || vma->vm_file) {
@@ -3082,8 +1570,7 @@ unmap_and_free_vma:
vma_iter_set(&vmi, vma->vm_end);
/* Undo any partial mapping done by a device driver. */
- unmap_region(mm, &vmi.mas, vma, prev, next, vma->vm_start,
- vma->vm_end, vma->vm_end, true);
+ unmap_region(&vmi.mas, vma, vmg.prev, vmg.next);
}
if (writable_file_mapping)
mapping_unmap_writable(file->f_mapping);
@@ -3092,6 +1579,10 @@ free_vma:
unacct_error:
if (charged)
vm_unacct_memory(charged);
+
+abort_munmap:
+ vms_abort_munmap_vmas(&vms, &mas_detach);
+gather_failed:
validate_mm(mm);
return error;
}
@@ -3198,8 +1689,12 @@ SYSCALL_DEFINE5(remap_file_pages, unsigned long, start, unsigned long, size,
flags |= MAP_LOCKED;
file = get_file(vma->vm_file);
+ ret = security_mmap_file(vma->vm_file, prot, flags);
+ if (ret)
+ goto out_fput;
ret = do_mmap(vma->vm_file, start, size,
prot, flags, 0, pgoff, &populate, NULL);
+out_fput:
fput(file);
out:
mmap_write_unlock(mm);
@@ -3211,39 +1706,6 @@ out:
}
/*
- * do_vma_munmap() - Unmap a full or partial vma.
- * @vmi: The vma iterator pointing at the vma
- * @vma: The first vma to be munmapped
- * @start: the start of the address to unmap
- * @end: The end of the address to unmap
- * @uf: The userfaultfd list_head
- * @unlock: Drop the lock on success
- *
- * unmaps a VMA mapping when the vma iterator is already in position.
- * Does not handle alignment.
- *
- * Return: 0 on success drops the lock of so directed, error on failure and will
- * still hold the lock.
- */
-int do_vma_munmap(struct vma_iterator *vmi, struct vm_area_struct *vma,
- unsigned long start, unsigned long end, struct list_head *uf,
- bool unlock)
-{
- struct mm_struct *mm = vma->vm_mm;
-
- /*
- * Check if memory is sealed before arch_unmap.
- * Prevent unmapping a sealed VMA.
- * can_modify_mm assumes we have acquired the lock on MM.
- */
- if (unlikely(!can_modify_mm(mm, start, end)))
- return -EPERM;
-
- arch_unmap(mm, start, end);
- return do_vmi_align_munmap(vmi, vma, mm, start, end, uf, unlock);
-}
-
-/*
* do_brk_flags() - Increase the brk vma if the flags match.
* @vmi: The vma iterator
* @addr: The start address
@@ -3259,7 +1721,6 @@ static int do_brk_flags(struct vma_iterator *vmi, struct vm_area_struct *vma,
unsigned long addr, unsigned long len, unsigned long flags)
{
struct mm_struct *mm = current->mm;
- struct vma_prepare vp;
/*
* Check against address space limits by the changed size
@@ -3279,25 +1740,16 @@ static int do_brk_flags(struct vma_iterator *vmi, struct vm_area_struct *vma,
* Expand the existing vma if possible; Note that singular lists do not
* occur after forking, so the expand will only happen on new VMAs.
*/
- if (vma && vma->vm_end == addr && !vma_policy(vma) &&
- can_vma_merge_after(vma, flags, NULL, NULL,
- addr >> PAGE_SHIFT, NULL_VM_UFFD_CTX, NULL)) {
- vma_iter_config(vmi, vma->vm_start, addr + len);
- if (vma_iter_prealloc(vmi, vma))
- goto unacct_fail;
+ if (vma && vma->vm_end == addr) {
+ VMG_STATE(vmg, mm, vmi, addr, addr + len, flags, PHYS_PFN(addr));
- vma_start_write(vma);
-
- init_vma_prep(&vp, vma);
- vma_prepare(&vp);
- vma_adjust_trans_huge(vma, vma->vm_start, addr + len, 0);
- vma->vm_end = addr + len;
- vm_flags_set(vma, VM_SOFTDIRTY);
- vma_iter_store(vmi, vma);
+ vmg.prev = vma;
+ vma_iter_next_range(vmi);
- vma_complete(&vp, vmi, mm);
- khugepaged_enter_vma(vma, flags);
- goto out;
+ if (vma_merge_new_range(&vmg))
+ goto out;
+ else if (vmg_nomem(&vmg))
+ goto unacct_fail;
}
if (vma)
@@ -3433,7 +1885,7 @@ void exit_mmap(struct mm_struct *mm)
do {
if (vma->vm_flags & VM_ACCOUNT)
nr_accounted += vma_pages(vma);
- remove_vma(vma, true);
+ remove_vma(vma, /* unreachable = */ true, /* closed = */ false);
count++;
cond_resched();
vma = vma_next(&vmi);
@@ -3491,92 +1943,6 @@ int insert_vm_struct(struct mm_struct *mm, struct vm_area_struct *vma)
}
/*
- * Copy the vma structure to a new location in the same mm,
- * prior to moving page table entries, to effect an mremap move.
- */
-struct vm_area_struct *copy_vma(struct vm_area_struct **vmap,
- unsigned long addr, unsigned long len, pgoff_t pgoff,
- bool *need_rmap_locks)
-{
- struct vm_area_struct *vma = *vmap;
- unsigned long vma_start = vma->vm_start;
- struct mm_struct *mm = vma->vm_mm;
- struct vm_area_struct *new_vma, *prev;
- bool faulted_in_anon_vma = true;
- VMA_ITERATOR(vmi, mm, addr);
-
- /*
- * If anonymous vma has not yet been faulted, update new pgoff
- * to match new location, to increase its chance of merging.
- */
- if (unlikely(vma_is_anonymous(vma) && !vma->anon_vma)) {
- pgoff = addr >> PAGE_SHIFT;
- faulted_in_anon_vma = false;
- }
-
- new_vma = find_vma_prev(mm, addr, &prev);
- if (new_vma && new_vma->vm_start < addr + len)
- return NULL; /* should never get here */
-
- new_vma = vma_merge_new_vma(&vmi, prev, vma, addr, addr + len, pgoff);
- if (new_vma) {
- /*
- * Source vma may have been merged into new_vma
- */
- if (unlikely(vma_start >= new_vma->vm_start &&
- vma_start < new_vma->vm_end)) {
- /*
- * The only way we can get a vma_merge with
- * self during an mremap is if the vma hasn't
- * been faulted in yet and we were allowed to
- * reset the dst vma->vm_pgoff to the
- * destination address of the mremap to allow
- * the merge to happen. mremap must change the
- * vm_pgoff linearity between src and dst vmas
- * (in turn preventing a vma_merge) to be
- * safe. It is only safe to keep the vm_pgoff
- * linear if there are no pages mapped yet.
- */
- VM_BUG_ON_VMA(faulted_in_anon_vma, new_vma);
- *vmap = vma = new_vma;
- }
- *need_rmap_locks = (new_vma->vm_pgoff <= vma->vm_pgoff);
- } else {
- new_vma = vm_area_dup(vma);
- if (!new_vma)
- goto out;
- vma_set_range(new_vma, addr, addr + len, pgoff);
- if (vma_dup_policy(vma, new_vma))
- goto out_free_vma;
- if (anon_vma_clone(new_vma, vma))
- goto out_free_mempol;
- if (new_vma->vm_file)
- get_file(new_vma->vm_file);
- if (new_vma->vm_ops && new_vma->vm_ops->open)
- new_vma->vm_ops->open(new_vma);
- if (vma_link(mm, new_vma))
- goto out_vma_link;
- *need_rmap_locks = false;
- }
- return new_vma;
-
-out_vma_link:
- if (new_vma->vm_ops && new_vma->vm_ops->close)
- new_vma->vm_ops->close(new_vma);
-
- if (new_vma->vm_file)
- fput(new_vma->vm_file);
-
- unlink_anon_vmas(new_vma);
-out_free_mempol:
- mpol_put(vma_policy(new_vma));
-out_free_vma:
- vm_area_free(new_vma);
-out:
- return NULL;
-}
-
-/*
* Return true if the calling process may expand its vm space by the passed
* number of pages
*/
@@ -3620,10 +1986,16 @@ void vm_stat_account(struct mm_struct *mm, vm_flags_t flags, long npages)
static vm_fault_t special_mapping_fault(struct vm_fault *vmf);
/*
+ * Close hook, called for unmap() and on the old vma for mremap().
+ *
* Having a close hook prevents vma merging regardless of flags.
*/
static void special_mapping_close(struct vm_area_struct *vma)
{
+ const struct vm_special_mapping *sm = vma->vm_private_data;
+
+ if (sm->close)
+ sm->close(sm, vma);
}
static const char *special_mapping_name(struct vm_area_struct *vma)
@@ -3665,27 +2037,17 @@ static const struct vm_operations_struct special_mapping_vmops = {
.may_split = special_mapping_split,
};
-static const struct vm_operations_struct legacy_special_mapping_vmops = {
- .close = special_mapping_close,
- .fault = special_mapping_fault,
-};
-
static vm_fault_t special_mapping_fault(struct vm_fault *vmf)
{
struct vm_area_struct *vma = vmf->vma;
pgoff_t pgoff;
struct page **pages;
+ struct vm_special_mapping *sm = vma->vm_private_data;
- if (vma->vm_ops == &legacy_special_mapping_vmops) {
- pages = vma->vm_private_data;
- } else {
- struct vm_special_mapping *sm = vma->vm_private_data;
-
- if (sm->fault)
- return sm->fault(sm, vmf->vma, vmf);
+ if (sm->fault)
+ return sm->fault(sm, vmf->vma, vmf);
- pages = sm->pages;
- }
+ pages = sm->pages;
for (pgoff = vmf->pgoff; pgoff && *pages; ++pages)
pgoff--;
@@ -3740,8 +2102,7 @@ bool vma_is_special_mapping(const struct vm_area_struct *vma,
const struct vm_special_mapping *sm)
{
return vma->vm_private_data == sm &&
- (vma->vm_ops == &special_mapping_vmops ||
- vma->vm_ops == &legacy_special_mapping_vmops);
+ vma->vm_ops == &special_mapping_vmops;
}
/*
@@ -3762,214 +2123,6 @@ struct vm_area_struct *_install_special_mapping(
&special_mapping_vmops);
}
-int install_special_mapping(struct mm_struct *mm,
- unsigned long addr, unsigned long len,
- unsigned long vm_flags, struct page **pages)
-{
- struct vm_area_struct *vma = __install_special_mapping(
- mm, addr, len, vm_flags, (void *)pages,
- &legacy_special_mapping_vmops);
-
- return PTR_ERR_OR_ZERO(vma);
-}
-
-static DEFINE_MUTEX(mm_all_locks_mutex);
-
-static void vm_lock_anon_vma(struct mm_struct *mm, struct anon_vma *anon_vma)
-{
- if (!test_bit(0, (unsigned long *) &anon_vma->root->rb_root.rb_root.rb_node)) {
- /*
- * The LSB of head.next can't change from under us
- * because we hold the mm_all_locks_mutex.
- */
- down_write_nest_lock(&anon_vma->root->rwsem, &mm->mmap_lock);
- /*
- * We can safely modify head.next after taking the
- * anon_vma->root->rwsem. If some other vma in this mm shares
- * the same anon_vma we won't take it again.
- *
- * No need of atomic instructions here, head.next
- * can't change from under us thanks to the
- * anon_vma->root->rwsem.
- */
- if (__test_and_set_bit(0, (unsigned long *)
- &anon_vma->root->rb_root.rb_root.rb_node))
- BUG();
- }
-}
-
-static void vm_lock_mapping(struct mm_struct *mm, struct address_space *mapping)
-{
- if (!test_bit(AS_MM_ALL_LOCKS, &mapping->flags)) {
- /*
- * AS_MM_ALL_LOCKS can't change from under us because
- * we hold the mm_all_locks_mutex.
- *
- * Operations on ->flags have to be atomic because
- * even if AS_MM_ALL_LOCKS is stable thanks to the
- * mm_all_locks_mutex, there may be other cpus
- * changing other bitflags in parallel to us.
- */
- if (test_and_set_bit(AS_MM_ALL_LOCKS, &mapping->flags))
- BUG();
- down_write_nest_lock(&mapping->i_mmap_rwsem, &mm->mmap_lock);
- }
-}
-
-/*
- * This operation locks against the VM for all pte/vma/mm related
- * operations that could ever happen on a certain mm. This includes
- * vmtruncate, try_to_unmap, and all page faults.
- *
- * The caller must take the mmap_lock in write mode before calling
- * mm_take_all_locks(). The caller isn't allowed to release the
- * mmap_lock until mm_drop_all_locks() returns.
- *
- * mmap_lock in write mode is required in order to block all operations
- * that could modify pagetables and free pages without need of
- * altering the vma layout. It's also needed in write mode to avoid new
- * anon_vmas to be associated with existing vmas.
- *
- * A single task can't take more than one mm_take_all_locks() in a row
- * or it would deadlock.
- *
- * The LSB in anon_vma->rb_root.rb_node and the AS_MM_ALL_LOCKS bitflag in
- * mapping->flags avoid to take the same lock twice, if more than one
- * vma in this mm is backed by the same anon_vma or address_space.
- *
- * We take locks in following order, accordingly to comment at beginning
- * of mm/rmap.c:
- * - all hugetlbfs_i_mmap_rwsem_key locks (aka mapping->i_mmap_rwsem for
- * hugetlb mapping);
- * - all vmas marked locked
- * - all i_mmap_rwsem locks;
- * - all anon_vma->rwseml
- *
- * We can take all locks within these types randomly because the VM code
- * doesn't nest them and we protected from parallel mm_take_all_locks() by
- * mm_all_locks_mutex.
- *
- * mm_take_all_locks() and mm_drop_all_locks are expensive operations
- * that may have to take thousand of locks.
- *
- * mm_take_all_locks() can fail if it's interrupted by signals.
- */
-int mm_take_all_locks(struct mm_struct *mm)
-{
- struct vm_area_struct *vma;
- struct anon_vma_chain *avc;
- VMA_ITERATOR(vmi, mm, 0);
-
- mmap_assert_write_locked(mm);
-
- mutex_lock(&mm_all_locks_mutex);
-
- /*
- * vma_start_write() does not have a complement in mm_drop_all_locks()
- * because vma_start_write() is always asymmetrical; it marks a VMA as
- * being written to until mmap_write_unlock() or mmap_write_downgrade()
- * is reached.
- */
- for_each_vma(vmi, vma) {
- if (signal_pending(current))
- goto out_unlock;
- vma_start_write(vma);
- }
-
- vma_iter_init(&vmi, mm, 0);
- for_each_vma(vmi, vma) {
- if (signal_pending(current))
- goto out_unlock;
- if (vma->vm_file && vma->vm_file->f_mapping &&
- is_vm_hugetlb_page(vma))
- vm_lock_mapping(mm, vma->vm_file->f_mapping);
- }
-
- vma_iter_init(&vmi, mm, 0);
- for_each_vma(vmi, vma) {
- if (signal_pending(current))
- goto out_unlock;
- if (vma->vm_file && vma->vm_file->f_mapping &&
- !is_vm_hugetlb_page(vma))
- vm_lock_mapping(mm, vma->vm_file->f_mapping);
- }
-
- vma_iter_init(&vmi, mm, 0);
- for_each_vma(vmi, vma) {
- if (signal_pending(current))
- goto out_unlock;
- if (vma->anon_vma)
- list_for_each_entry(avc, &vma->anon_vma_chain, same_vma)
- vm_lock_anon_vma(mm, avc->anon_vma);
- }
-
- return 0;
-
-out_unlock:
- mm_drop_all_locks(mm);
- return -EINTR;
-}
-
-static void vm_unlock_anon_vma(struct anon_vma *anon_vma)
-{
- if (test_bit(0, (unsigned long *) &anon_vma->root->rb_root.rb_root.rb_node)) {
- /*
- * The LSB of head.next can't change to 0 from under
- * us because we hold the mm_all_locks_mutex.
- *
- * We must however clear the bitflag before unlocking
- * the vma so the users using the anon_vma->rb_root will
- * never see our bitflag.
- *
- * No need of atomic instructions here, head.next
- * can't change from under us until we release the
- * anon_vma->root->rwsem.
- */
- if (!__test_and_clear_bit(0, (unsigned long *)
- &anon_vma->root->rb_root.rb_root.rb_node))
- BUG();
- anon_vma_unlock_write(anon_vma);
- }
-}
-
-static void vm_unlock_mapping(struct address_space *mapping)
-{
- if (test_bit(AS_MM_ALL_LOCKS, &mapping->flags)) {
- /*
- * AS_MM_ALL_LOCKS can't change to 0 from under us
- * because we hold the mm_all_locks_mutex.
- */
- i_mmap_unlock_write(mapping);
- if (!test_and_clear_bit(AS_MM_ALL_LOCKS,
- &mapping->flags))
- BUG();
- }
-}
-
-/*
- * The mmap_lock cannot be released by the caller until
- * mm_drop_all_locks() returns.
- */
-void mm_drop_all_locks(struct mm_struct *mm)
-{
- struct vm_area_struct *vma;
- struct anon_vma_chain *avc;
- VMA_ITERATOR(vmi, mm, 0);
-
- mmap_assert_write_locked(mm);
- BUG_ON(!mutex_is_locked(&mm_all_locks_mutex));
-
- for_each_vma(vmi, vma) {
- if (vma->anon_vma)
- list_for_each_entry(avc, &vma->anon_vma_chain, same_vma)
- vm_unlock_anon_vma(avc->anon_vma);
- if (vma->vm_file && vma->vm_file->f_mapping)
- vm_unlock_mapping(vma->vm_file->f_mapping);
- }
-
- mutex_unlock(&mm_all_locks_mutex);
-}
-
/*
* initialise the percpu counter for VM
*/
@@ -4088,3 +2241,86 @@ static int __meminit init_reserve_notifier(void)
return 0;
}
subsys_initcall(init_reserve_notifier);
+
+/*
+ * Relocate a VMA downwards by shift bytes. There cannot be any VMAs between
+ * this VMA and its relocated range, which will now reside at [vma->vm_start -
+ * shift, vma->vm_end - shift).
+ *
+ * This function is almost certainly NOT what you want for anything other than
+ * early executable temporary stack relocation.
+ */
+int relocate_vma_down(struct vm_area_struct *vma, unsigned long shift)
+{
+ /*
+ * The process proceeds as follows:
+ *
+ * 1) Use shift to calculate the new vma endpoints.
+ * 2) Extend vma to cover both the old and new ranges. This ensures the
+ * arguments passed to subsequent functions are consistent.
+ * 3) Move vma's page tables to the new range.
+ * 4) Free up any cleared pgd range.
+ * 5) Shrink the vma to cover only the new range.
+ */
+
+ struct mm_struct *mm = vma->vm_mm;
+ unsigned long old_start = vma->vm_start;
+ unsigned long old_end = vma->vm_end;
+ unsigned long length = old_end - old_start;
+ unsigned long new_start = old_start - shift;
+ unsigned long new_end = old_end - shift;
+ VMA_ITERATOR(vmi, mm, new_start);
+ VMG_STATE(vmg, mm, &vmi, new_start, old_end, 0, vma->vm_pgoff);
+ struct vm_area_struct *next;
+ struct mmu_gather tlb;
+
+ BUG_ON(new_start > new_end);
+
+ /*
+ * ensure there are no vmas between where we want to go
+ * and where we are
+ */
+ if (vma != vma_next(&vmi))
+ return -EFAULT;
+
+ vma_iter_prev_range(&vmi);
+ /*
+ * cover the whole range: [new_start, old_end)
+ */
+ vmg.vma = vma;
+ if (vma_expand(&vmg))
+ return -ENOMEM;
+
+ /*
+ * move the page tables downwards, on failure we rely on
+ * process cleanup to remove whatever mess we made.
+ */
+ if (length != move_page_tables(vma, old_start,
+ vma, new_start, length, false, true))
+ return -ENOMEM;
+
+ lru_add_drain();
+ tlb_gather_mmu(&tlb, mm);
+ next = vma_next(&vmi);
+ if (new_end > old_start) {
+ /*
+ * when the old and new regions overlap clear from new_end.
+ */
+ free_pgd_range(&tlb, new_end, old_end, new_end,
+ next ? next->vm_start : USER_PGTABLES_CEILING);
+ } else {
+ /*
+ * otherwise, clean from old_start; this is done to not touch
+ * the address space in [new_end, old_start) some architectures
+ * have constraints on va-space that make this illegal (IA64) -
+ * for the others its just a little faster.
+ */
+ free_pgd_range(&tlb, old_start, old_end, new_end,
+ next ? next->vm_start : USER_PGTABLES_CEILING);
+ }
+ tlb_finish_mmu(&tlb);
+
+ vma_prev(&vmi);
+ /* Shrink the vma to just the new range */
+ return vma_shrink(&vmi, vma, new_start, new_end, vma->vm_pgoff);
+}
diff --git a/mm/mmu_notifier.c b/mm/mmu_notifier.c
index 8982e6139d07..fc18fe274505 100644
--- a/mm/mmu_notifier.c
+++ b/mm/mmu_notifier.c
@@ -19,6 +19,8 @@
#include <linux/sched/mm.h>
#include <linux/slab.h>
+#include "vma.h"
+
/* global SRCU for all MMs */
DEFINE_STATIC_SRCU(srcu);
diff --git a/mm/mmzone.c b/mm/mmzone.c
index c01896eca736..f9baa8882fbf 100644
--- a/mm/mmzone.c
+++ b/mm/mmzone.c
@@ -66,7 +66,7 @@ struct zoneref *__next_zones_zonelist(struct zoneref *z,
z++;
else
while (zonelist_zone_idx(z) > highest_zoneidx ||
- (z->zone && !zref_in_nodemask(z, nodes)))
+ (zonelist_zone(z) && !zref_in_nodemask(z, nodes)))
z++;
return z;
diff --git a/mm/mprotect.c b/mm/mprotect.c
index 222ab434da54..0c5d6d06107d 100644
--- a/mm/mprotect.c
+++ b/mm/mprotect.c
@@ -161,8 +161,7 @@ static long change_pte_range(struct mmu_gather *tlb,
if (!(sysctl_numa_balancing_mode & NUMA_BALANCING_NORMAL) &&
toptier)
continue;
- if (sysctl_numa_balancing_mode & NUMA_BALANCING_MEMORY_TIERING &&
- !toptier)
+ if (folio_use_access_time(folio))
folio_xchg_access_time(folio,
jiffies_to_msecs(jiffies));
}
@@ -303,8 +302,9 @@ pgtable_split_needed(struct vm_area_struct *vma, unsigned long cp_flags)
{
/*
* pte markers only resides in pte level, if we need pte markers,
- * we need to split. We cannot wr-protect shmem thp because file
- * thp is handled differently when split by erasing the pmd so far.
+ * we need to split. For example, we cannot wr-protect a file thp
+ * (e.g. 2M shmem) because file thp is handled differently when
+ * split by erasing the pmd so far.
*/
return (cp_flags & MM_CP_UFFD_WP) && !vma_is_anonymous(vma);
}
@@ -364,9 +364,6 @@ static inline long change_pmd_range(struct mmu_gather *tlb,
unsigned long next;
long pages = 0;
unsigned long nr_huge_updates = 0;
- struct mmu_notifier_range range;
-
- range.start = 0;
pmd = pmd_offset(pud, addr);
do {
@@ -384,14 +381,6 @@ again:
if (pmd_none(*pmd))
goto next;
- /* invoke the mmu notifier if the pmd is populated */
- if (!range.start) {
- mmu_notifier_range_init(&range,
- MMU_NOTIFY_PROTECTION_VMA, 0,
- vma->vm_mm, addr, end);
- mmu_notifier_invalidate_range_start(&range);
- }
-
_pmd = pmdp_get_lockless(pmd);
if (is_swap_pmd(_pmd) || pmd_trans_huge(_pmd) || pmd_devmap(_pmd)) {
if ((next - addr != HPAGE_PMD_SIZE) ||
@@ -432,9 +421,6 @@ next:
cond_resched();
} while (pmd++, addr = next, addr != end);
- if (range.start)
- mmu_notifier_invalidate_range_end(&range);
-
if (nr_huge_updates)
count_vm_numa_events(NUMA_HUGE_PTE_UPDATES, nr_huge_updates);
return pages;
@@ -444,21 +430,57 @@ static inline long change_pud_range(struct mmu_gather *tlb,
struct vm_area_struct *vma, p4d_t *p4d, unsigned long addr,
unsigned long end, pgprot_t newprot, unsigned long cp_flags)
{
- pud_t *pud;
+ struct mmu_notifier_range range;
+ pud_t *pudp, pud;
unsigned long next;
long pages = 0, ret;
- pud = pud_offset(p4d, addr);
+ range.start = 0;
+
+ pudp = pud_offset(p4d, addr);
do {
+again:
next = pud_addr_end(addr, end);
- ret = change_prepare(vma, pud, pmd, addr, cp_flags);
- if (ret)
- return ret;
- if (pud_none_or_clear_bad(pud))
+ ret = change_prepare(vma, pudp, pmd, addr, cp_flags);
+ if (ret) {
+ pages = ret;
+ break;
+ }
+
+ pud = READ_ONCE(*pudp);
+ if (pud_none(pud))
continue;
- pages += change_pmd_range(tlb, vma, pud, addr, next, newprot,
+
+ if (!range.start) {
+ mmu_notifier_range_init(&range,
+ MMU_NOTIFY_PROTECTION_VMA, 0,
+ vma->vm_mm, addr, end);
+ mmu_notifier_invalidate_range_start(&range);
+ }
+
+ if (pud_leaf(pud)) {
+ if ((next - addr != PUD_SIZE) ||
+ pgtable_split_needed(vma, cp_flags)) {
+ __split_huge_pud(vma, pudp, addr);
+ goto again;
+ } else {
+ ret = change_huge_pud(tlb, vma, pudp,
+ addr, newprot, cp_flags);
+ if (ret == 0)
+ goto again;
+ /* huge pud was handled */
+ if (ret == HPAGE_PUD_NR)
+ pages += HPAGE_PUD_NR;
+ continue;
+ }
+ }
+
+ pages += change_pmd_range(tlb, vma, pudp, addr, next, newprot,
cp_flags);
- } while (pud++, addr = next, addr != end);
+ } while (pudp++, addr = next, addr != end);
+
+ if (range.start)
+ mmu_notifier_invalidate_range_end(&range);
return pages;
}
@@ -589,6 +611,9 @@ mprotect_fixup(struct vma_iterator *vmi, struct mmu_gather *tlb,
unsigned long charged = 0;
int error;
+ if (!can_modify_vma(vma))
+ return -EPERM;
+
if (newflags == oldflags) {
*pprev = vma;
return 0;
@@ -747,15 +772,6 @@ static int do_mprotect_pkey(unsigned long start, size_t len,
}
}
- /*
- * checking if memory is sealed.
- * can_modify_mm assumes we have acquired the lock on MM.
- */
- if (unlikely(!can_modify_mm(current->mm, start, end))) {
- error = -EPERM;
- goto out;
- }
-
prev = vma_prev(&vmi);
if (start > vma->vm_start)
prev = vma;
diff --git a/mm/mremap.c b/mm/mremap.c
index e7ae140fc640..24712f8dbb6b 100644
--- a/mm/mremap.c
+++ b/mm/mremap.c
@@ -902,19 +902,6 @@ static unsigned long mremap_to(unsigned long addr, unsigned long old_len,
if ((mm->map_count + 2) >= sysctl_max_map_count - 3)
return -ENOMEM;
- /*
- * In mremap_to().
- * Move a VMA to another location, check if src addr is sealed.
- *
- * Place can_modify_mm here because mremap_to()
- * does its own checking for address range, and we only
- * check the sealing after passing those checks.
- *
- * can_modify_mm assumes we have acquired the lock on MM.
- */
- if (unlikely(!can_modify_mm(mm, addr, addr + old_len)))
- return -EPERM;
-
if (flags & MREMAP_FIXED) {
/*
* In mremap_to().
@@ -1052,6 +1039,12 @@ SYSCALL_DEFINE5(mremap, unsigned long, addr, unsigned long, old_len,
goto out;
}
+ /* Don't allow remapping vmas when they have already been sealed */
+ if (!can_modify_vma(vma)) {
+ ret = -EPERM;
+ goto out;
+ }
+
if (is_vm_hugetlb_page(vma)) {
struct hstate *h __maybe_unused = hstate_vma(vma);
@@ -1080,19 +1073,6 @@ SYSCALL_DEFINE5(mremap, unsigned long, addr, unsigned long, old_len,
}
/*
- * Below is shrink/expand case (not mremap_to())
- * Check if src address is sealed, if so, reject.
- * In other words, prevent shrinking or expanding a sealed VMA.
- *
- * Place can_modify_mm here so we can keep the logic related to
- * shrink/expand together.
- */
- if (unlikely(!can_modify_mm(mm, addr, addr + old_len))) {
- ret = -EPERM;
- goto out;
- }
-
- /*
* Always allow a shrinking remap: that just unmaps
* the unnecessary pages..
* do_vmi_munmap does all the needed commit accounting, and
diff --git a/mm/mseal.c b/mm/mseal.c
index c8787cc6ba55..ece977bd21e1 100644
--- a/mm/mseal.c
+++ b/mm/mseal.c
@@ -16,28 +16,11 @@
#include <linux/sched.h>
#include "internal.h"
-static inline bool vma_is_sealed(struct vm_area_struct *vma)
-{
- return (vma->vm_flags & VM_SEALED);
-}
-
static inline void set_vma_sealed(struct vm_area_struct *vma)
{
vm_flags_set(vma, VM_SEALED);
}
-/*
- * check if a vma is sealed for modification.
- * return true, if modification is allowed.
- */
-static bool can_modify_vma(struct vm_area_struct *vma)
-{
- if (unlikely(vma_is_sealed(vma)))
- return false;
-
- return true;
-}
-
static bool is_madv_discard(int behavior)
{
switch (behavior) {
@@ -71,45 +54,15 @@ static bool is_ro_anon(struct vm_area_struct *vma)
}
/*
- * Check if the vmas of a memory range are allowed to be modified.
- * the memory ranger can have a gap (unallocated memory).
- * return true, if it is allowed.
- */
-bool can_modify_mm(struct mm_struct *mm, unsigned long start, unsigned long end)
-{
- struct vm_area_struct *vma;
-
- VMA_ITERATOR(vmi, mm, start);
-
- /* going through each vma to check. */
- for_each_vma_range(vmi, vma, end) {
- if (unlikely(!can_modify_vma(vma)))
- return false;
- }
-
- /* Allow by default. */
- return true;
-}
-
-/*
- * Check if the vmas of a memory range are allowed to be modified by madvise.
- * the memory ranger can have a gap (unallocated memory).
- * return true, if it is allowed.
+ * Check if a vma is allowed to be modified by madvise.
*/
-bool can_modify_mm_madv(struct mm_struct *mm, unsigned long start, unsigned long end,
- int behavior)
+bool can_modify_vma_madv(struct vm_area_struct *vma, int behavior)
{
- struct vm_area_struct *vma;
-
- VMA_ITERATOR(vmi, mm, start);
-
if (!is_madv_discard(behavior))
return true;
- /* going through each vma to check. */
- for_each_vma_range(vmi, vma, end)
- if (unlikely(is_ro_anon(vma) && !can_modify_vma(vma)))
- return false;
+ if (unlikely(!can_modify_vma(vma) && is_ro_anon(vma)))
+ return false;
/* Allow by default. */
return true;
diff --git a/mm/nommu.c b/mm/nommu.c
index 7296e775e04e..385b0c15add8 100644
--- a/mm/nommu.c
+++ b/mm/nommu.c
@@ -126,6 +126,11 @@ void *__vmalloc_noprof(unsigned long size, gfp_t gfp_mask)
}
EXPORT_SYMBOL(__vmalloc_noprof);
+void *vrealloc_noprof(const void *p, size_t size, gfp_t flags)
+{
+ return krealloc_noprof(p, size, (flags | __GFP_COMP) & ~__GFP_HIGHMEM);
+}
+
void *__vmalloc_node_range_noprof(unsigned long size, unsigned long align,
unsigned long start, unsigned long end, gfp_t gfp_mask,
pgprot_t prot, unsigned long vm_flags, int node,
@@ -1573,12 +1578,6 @@ SYSCALL_DEFINE5(mremap, unsigned long, addr, unsigned long, old_len,
return ret;
}
-struct page *follow_page(struct vm_area_struct *vma, unsigned long address,
- unsigned int foll_flags)
-{
- return NULL;
-}
-
int remap_pfn_range(struct vm_area_struct *vma, unsigned long addr,
unsigned long pfn, unsigned long size, pgprot_t prot)
{
diff --git a/mm/numa.c b/mm/numa.c
new file mode 100644
index 000000000000..e2eec07707d1
--- /dev/null
+++ b/mm/numa.c
@@ -0,0 +1,69 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+
+#include <linux/memblock.h>
+#include <linux/printk.h>
+#include <linux/numa.h>
+#include <linux/numa_memblks.h>
+
+struct pglist_data *node_data[MAX_NUMNODES];
+EXPORT_SYMBOL(node_data);
+
+/* Allocate NODE_DATA for a node on the local memory */
+void __init alloc_node_data(int nid)
+{
+ const size_t nd_size = roundup(sizeof(pg_data_t), SMP_CACHE_BYTES);
+ u64 nd_pa;
+ void *nd;
+ int tnid;
+
+ /* Allocate node data. Try node-local memory and then any node. */
+ nd_pa = memblock_phys_alloc_try_nid(nd_size, SMP_CACHE_BYTES, nid);
+ if (!nd_pa)
+ panic("Cannot allocate %zu bytes for node %d data\n",
+ nd_size, nid);
+ nd = __va(nd_pa);
+
+ /* report and initialize */
+ pr_info("NODE_DATA(%d) allocated [mem %#010Lx-%#010Lx]\n", nid,
+ nd_pa, nd_pa + nd_size - 1);
+ tnid = early_pfn_to_nid(nd_pa >> PAGE_SHIFT);
+ if (tnid != nid)
+ pr_info(" NODE_DATA(%d) on node %d\n", nid, tnid);
+
+ node_data[nid] = nd;
+ memset(NODE_DATA(nid), 0, sizeof(pg_data_t));
+}
+
+void __init alloc_offline_node_data(int nid)
+{
+ pg_data_t *pgdat;
+
+ pgdat = memblock_alloc(sizeof(*pgdat), SMP_CACHE_BYTES);
+ if (!pgdat)
+ panic("Cannot allocate %zuB for node %d.\n",
+ sizeof(*pgdat), nid);
+
+ node_data[nid] = pgdat;
+}
+
+/* Stub functions: */
+
+#ifndef memory_add_physaddr_to_nid
+int memory_add_physaddr_to_nid(u64 start)
+{
+ pr_info_once("Unknown online node for memory at 0x%llx, assuming node 0\n",
+ start);
+ return 0;
+}
+EXPORT_SYMBOL_GPL(memory_add_physaddr_to_nid);
+#endif
+
+#ifndef phys_to_target_node
+int phys_to_target_node(u64 start)
+{
+ pr_info_once("Unknown target node for memory at 0x%llx, assuming node 0\n",
+ start);
+ return 0;
+}
+EXPORT_SYMBOL_GPL(phys_to_target_node);
+#endif
diff --git a/arch/x86/mm/numa_emulation.c b/mm/numa_emulation.c
index 9a9305367fdd..031fb9961bf7 100644
--- a/arch/x86/mm/numa_emulation.c
+++ b/mm/numa_emulation.c
@@ -6,9 +6,11 @@
#include <linux/errno.h>
#include <linux/topology.h>
#include <linux/memblock.h>
-#include <asm/dma.h>
+#include <linux/numa_memblks.h>
+#include <asm/numa.h>
-#include "numa_internal.h"
+#define FAKE_NODE_MIN_SIZE ((u64)32 << 20)
+#define FAKE_NODE_MIN_HASH_MASK (~(FAKE_NODE_MIN_SIZE - 1UL))
static int emu_nid_to_phys[MAX_NUMNODES];
static char *emu_cmdline __initdata;
@@ -125,7 +127,7 @@ static int __init split_nodes_interleave(struct numa_meminfo *ei,
*/
while (!nodes_empty(physnode_mask)) {
for_each_node_mask(i, physnode_mask) {
- u64 dma32_end = PFN_PHYS(MAX_DMA32_PFN);
+ u64 dma32_end = numa_emu_dma_end();
u64 start, limit, end;
int phys_blk;
@@ -272,7 +274,7 @@ static int __init split_nodes_size_interleave_uniform(struct numa_meminfo *ei,
*/
while (!nodes_empty(physnode_mask)) {
for_each_node_mask(i, physnode_mask) {
- u64 dma32_end = PFN_PHYS(MAX_DMA32_PFN);
+ u64 dma32_end = numa_emu_dma_end();
u64 start, limit, end;
int phys_blk;
@@ -445,15 +447,11 @@ void __init numa_emulation(struct numa_meminfo *numa_meminfo, int numa_dist_cnt)
/* copy the physical distance table */
if (numa_dist_cnt) {
- u64 phys;
-
- phys = memblock_phys_alloc_range(phys_size, PAGE_SIZE, 0,
- PFN_PHYS(max_pfn_mapped));
- if (!phys) {
+ phys_dist = memblock_alloc(phys_size, PAGE_SIZE);
+ if (!phys_dist) {
pr_warn("NUMA: Warning: can't allocate copy of distance table, disabling emulation\n");
goto no_emu;
}
- phys_dist = __va(phys);
for (i = 0; i < numa_dist_cnt; i++)
for (j = 0; j < numa_dist_cnt; j++)
@@ -477,19 +475,7 @@ void __init numa_emulation(struct numa_meminfo *numa_meminfo, int numa_dist_cnt)
ei.blk[i].nid != NUMA_NO_NODE)
node_set(ei.blk[i].nid, numa_nodes_parsed);
- /*
- * Transform __apicid_to_node table to use emulated nids by
- * reverse-mapping phys_nid. The maps should always exist but fall
- * back to zero just in case.
- */
- for (i = 0; i < ARRAY_SIZE(__apicid_to_node); i++) {
- if (__apicid_to_node[i] == NUMA_NO_NODE)
- continue;
- for (j = 0; j < ARRAY_SIZE(emu_nid_to_phys); j++)
- if (__apicid_to_node[i] == emu_nid_to_phys[j])
- break;
- __apicid_to_node[i] = j < ARRAY_SIZE(emu_nid_to_phys) ? j : 0;
- }
+ numa_emu_update_cpu_to_node(emu_nid_to_phys, ARRAY_SIZE(emu_nid_to_phys));
/* make sure all emulated nodes are mapped to a physical node */
for (i = 0; i < ARRAY_SIZE(emu_nid_to_phys); i++)
@@ -527,7 +513,7 @@ no_emu:
}
#ifndef CONFIG_DEBUG_PER_CPU_MAPS
-void numa_add_cpu(int cpu)
+void numa_add_cpu(unsigned int cpu)
{
int physnid, nid;
@@ -545,7 +531,7 @@ void numa_add_cpu(int cpu)
cpumask_set_cpu(cpu, node_to_cpumask_map[nid]);
}
-void numa_remove_cpu(int cpu)
+void numa_remove_cpu(unsigned int cpu)
{
int i;
@@ -553,7 +539,7 @@ void numa_remove_cpu(int cpu)
cpumask_clear_cpu(cpu, node_to_cpumask_map[i]);
}
#else /* !CONFIG_DEBUG_PER_CPU_MAPS */
-static void numa_set_cpumask(int cpu, bool enable)
+static void numa_set_cpumask(unsigned int cpu, bool enable)
{
int nid, physnid;
@@ -573,12 +559,12 @@ static void numa_set_cpumask(int cpu, bool enable)
}
}
-void numa_add_cpu(int cpu)
+void numa_add_cpu(unsigned int cpu)
{
numa_set_cpumask(cpu, true);
}
-void numa_remove_cpu(int cpu)
+void numa_remove_cpu(unsigned int cpu)
{
numa_set_cpumask(cpu, false);
}
diff --git a/mm/numa_memblks.c b/mm/numa_memblks.c
new file mode 100644
index 000000000000..be52b93a9c58
--- /dev/null
+++ b/mm/numa_memblks.c
@@ -0,0 +1,571 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+
+#include <linux/array_size.h>
+#include <linux/sort.h>
+#include <linux/printk.h>
+#include <linux/memblock.h>
+#include <linux/numa.h>
+#include <linux/numa_memblks.h>
+
+static int numa_distance_cnt;
+static u8 *numa_distance;
+
+nodemask_t numa_nodes_parsed __initdata;
+
+static struct numa_meminfo numa_meminfo __initdata_or_meminfo;
+static struct numa_meminfo numa_reserved_meminfo __initdata_or_meminfo;
+
+/*
+ * Set nodes, which have memory in @mi, in *@nodemask.
+ */
+static void __init numa_nodemask_from_meminfo(nodemask_t *nodemask,
+ const struct numa_meminfo *mi)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(mi->blk); i++)
+ if (mi->blk[i].start != mi->blk[i].end &&
+ mi->blk[i].nid != NUMA_NO_NODE)
+ node_set(mi->blk[i].nid, *nodemask);
+}
+
+/**
+ * numa_reset_distance - Reset NUMA distance table
+ *
+ * The current table is freed. The next numa_set_distance() call will
+ * create a new one.
+ */
+void __init numa_reset_distance(void)
+{
+ size_t size = numa_distance_cnt * numa_distance_cnt * sizeof(numa_distance[0]);
+
+ /* numa_distance could be 1LU marking allocation failure, test cnt */
+ if (numa_distance_cnt)
+ memblock_free(numa_distance, size);
+ numa_distance_cnt = 0;
+ numa_distance = NULL; /* enable table creation */
+}
+
+static int __init numa_alloc_distance(void)
+{
+ nodemask_t nodes_parsed;
+ size_t size;
+ int i, j, cnt = 0;
+
+ /* size the new table and allocate it */
+ nodes_parsed = numa_nodes_parsed;
+ numa_nodemask_from_meminfo(&nodes_parsed, &numa_meminfo);
+
+ for_each_node_mask(i, nodes_parsed)
+ cnt = i;
+ cnt++;
+ size = cnt * cnt * sizeof(numa_distance[0]);
+
+ numa_distance = memblock_alloc(size, PAGE_SIZE);
+ if (!numa_distance) {
+ pr_warn("Warning: can't allocate distance table!\n");
+ /* don't retry until explicitly reset */
+ numa_distance = (void *)1LU;
+ return -ENOMEM;
+ }
+
+ numa_distance_cnt = cnt;
+
+ /* fill with the default distances */
+ for (i = 0; i < cnt; i++)
+ for (j = 0; j < cnt; j++)
+ numa_distance[i * cnt + j] = i == j ?
+ LOCAL_DISTANCE : REMOTE_DISTANCE;
+ printk(KERN_DEBUG "NUMA: Initialized distance table, cnt=%d\n", cnt);
+
+ return 0;
+}
+
+/**
+ * numa_set_distance - Set NUMA distance from one NUMA to another
+ * @from: the 'from' node to set distance
+ * @to: the 'to' node to set distance
+ * @distance: NUMA distance
+ *
+ * Set the distance from node @from to @to to @distance. If distance table
+ * doesn't exist, one which is large enough to accommodate all the currently
+ * known nodes will be created.
+ *
+ * If such table cannot be allocated, a warning is printed and further
+ * calls are ignored until the distance table is reset with
+ * numa_reset_distance().
+ *
+ * If @from or @to is higher than the highest known node or lower than zero
+ * at the time of table creation or @distance doesn't make sense, the call
+ * is ignored.
+ * This is to allow simplification of specific NUMA config implementations.
+ */
+void __init numa_set_distance(int from, int to, int distance)
+{
+ if (!numa_distance && numa_alloc_distance() < 0)
+ return;
+
+ if (from >= numa_distance_cnt || to >= numa_distance_cnt ||
+ from < 0 || to < 0) {
+ pr_warn_once("Warning: node ids are out of bound, from=%d to=%d distance=%d\n",
+ from, to, distance);
+ return;
+ }
+
+ if ((u8)distance != distance ||
+ (from == to && distance != LOCAL_DISTANCE)) {
+ pr_warn_once("Warning: invalid distance parameter, from=%d to=%d distance=%d\n",
+ from, to, distance);
+ return;
+ }
+
+ numa_distance[from * numa_distance_cnt + to] = distance;
+}
+
+int __node_distance(int from, int to)
+{
+ if (from >= numa_distance_cnt || to >= numa_distance_cnt)
+ return from == to ? LOCAL_DISTANCE : REMOTE_DISTANCE;
+ return numa_distance[from * numa_distance_cnt + to];
+}
+EXPORT_SYMBOL(__node_distance);
+
+static int __init numa_add_memblk_to(int nid, u64 start, u64 end,
+ struct numa_meminfo *mi)
+{
+ /* ignore zero length blks */
+ if (start == end)
+ return 0;
+
+ /* whine about and ignore invalid blks */
+ if (start > end || nid < 0 || nid >= MAX_NUMNODES) {
+ pr_warn("Warning: invalid memblk node %d [mem %#010Lx-%#010Lx]\n",
+ nid, start, end - 1);
+ return 0;
+ }
+
+ if (mi->nr_blks >= NR_NODE_MEMBLKS) {
+ pr_err("too many memblk ranges\n");
+ return -EINVAL;
+ }
+
+ mi->blk[mi->nr_blks].start = start;
+ mi->blk[mi->nr_blks].end = end;
+ mi->blk[mi->nr_blks].nid = nid;
+ mi->nr_blks++;
+ return 0;
+}
+
+/**
+ * numa_remove_memblk_from - Remove one numa_memblk from a numa_meminfo
+ * @idx: Index of memblk to remove
+ * @mi: numa_meminfo to remove memblk from
+ *
+ * Remove @idx'th numa_memblk from @mi by shifting @mi->blk[] and
+ * decrementing @mi->nr_blks.
+ */
+void __init numa_remove_memblk_from(int idx, struct numa_meminfo *mi)
+{
+ mi->nr_blks--;
+ memmove(&mi->blk[idx], &mi->blk[idx + 1],
+ (mi->nr_blks - idx) * sizeof(mi->blk[0]));
+}
+
+/**
+ * numa_move_tail_memblk - Move a numa_memblk from one numa_meminfo to another
+ * @dst: numa_meminfo to append block to
+ * @idx: Index of memblk to remove
+ * @src: numa_meminfo to remove memblk from
+ */
+static void __init numa_move_tail_memblk(struct numa_meminfo *dst, int idx,
+ struct numa_meminfo *src)
+{
+ dst->blk[dst->nr_blks++] = src->blk[idx];
+ numa_remove_memblk_from(idx, src);
+}
+
+/**
+ * numa_add_memblk - Add one numa_memblk to numa_meminfo
+ * @nid: NUMA node ID of the new memblk
+ * @start: Start address of the new memblk
+ * @end: End address of the new memblk
+ *
+ * Add a new memblk to the default numa_meminfo.
+ *
+ * RETURNS:
+ * 0 on success, -errno on failure.
+ */
+int __init numa_add_memblk(int nid, u64 start, u64 end)
+{
+ return numa_add_memblk_to(nid, start, end, &numa_meminfo);
+}
+
+/**
+ * numa_cleanup_meminfo - Cleanup a numa_meminfo
+ * @mi: numa_meminfo to clean up
+ *
+ * Sanitize @mi by merging and removing unnecessary memblks. Also check for
+ * conflicts and clear unused memblks.
+ *
+ * RETURNS:
+ * 0 on success, -errno on failure.
+ */
+int __init numa_cleanup_meminfo(struct numa_meminfo *mi)
+{
+ const u64 low = memblock_start_of_DRAM();
+ const u64 high = memblock_end_of_DRAM();
+ int i, j, k;
+
+ /* first, trim all entries */
+ for (i = 0; i < mi->nr_blks; i++) {
+ struct numa_memblk *bi = &mi->blk[i];
+
+ /* move / save reserved memory ranges */
+ if (!memblock_overlaps_region(&memblock.memory,
+ bi->start, bi->end - bi->start)) {
+ numa_move_tail_memblk(&numa_reserved_meminfo, i--, mi);
+ continue;
+ }
+
+ /* make sure all non-reserved blocks are inside the limits */
+ bi->start = max(bi->start, low);
+
+ /* preserve info for non-RAM areas above 'max_pfn': */
+ if (bi->end > high) {
+ numa_add_memblk_to(bi->nid, high, bi->end,
+ &numa_reserved_meminfo);
+ bi->end = high;
+ }
+
+ /* and there's no empty block */
+ if (bi->start >= bi->end)
+ numa_remove_memblk_from(i--, mi);
+ }
+
+ /* merge neighboring / overlapping entries */
+ for (i = 0; i < mi->nr_blks; i++) {
+ struct numa_memblk *bi = &mi->blk[i];
+
+ for (j = i + 1; j < mi->nr_blks; j++) {
+ struct numa_memblk *bj = &mi->blk[j];
+ u64 start, end;
+
+ /*
+ * See whether there are overlapping blocks. Whine
+ * about but allow overlaps of the same nid. They
+ * will be merged below.
+ */
+ if (bi->end > bj->start && bi->start < bj->end) {
+ if (bi->nid != bj->nid) {
+ pr_err("node %d [mem %#010Lx-%#010Lx] overlaps with node %d [mem %#010Lx-%#010Lx]\n",
+ bi->nid, bi->start, bi->end - 1,
+ bj->nid, bj->start, bj->end - 1);
+ return -EINVAL;
+ }
+ pr_warn("Warning: node %d [mem %#010Lx-%#010Lx] overlaps with itself [mem %#010Lx-%#010Lx]\n",
+ bi->nid, bi->start, bi->end - 1,
+ bj->start, bj->end - 1);
+ }
+
+ /*
+ * Join together blocks on the same node, holes
+ * between which don't overlap with memory on other
+ * nodes.
+ */
+ if (bi->nid != bj->nid)
+ continue;
+ start = min(bi->start, bj->start);
+ end = max(bi->end, bj->end);
+ for (k = 0; k < mi->nr_blks; k++) {
+ struct numa_memblk *bk = &mi->blk[k];
+
+ if (bi->nid == bk->nid)
+ continue;
+ if (start < bk->end && end > bk->start)
+ break;
+ }
+ if (k < mi->nr_blks)
+ continue;
+ pr_info("NUMA: Node %d [mem %#010Lx-%#010Lx] + [mem %#010Lx-%#010Lx] -> [mem %#010Lx-%#010Lx]\n",
+ bi->nid, bi->start, bi->end - 1, bj->start,
+ bj->end - 1, start, end - 1);
+ bi->start = start;
+ bi->end = end;
+ numa_remove_memblk_from(j--, mi);
+ }
+ }
+
+ /* clear unused ones */
+ for (i = mi->nr_blks; i < ARRAY_SIZE(mi->blk); i++) {
+ mi->blk[i].start = mi->blk[i].end = 0;
+ mi->blk[i].nid = NUMA_NO_NODE;
+ }
+
+ return 0;
+}
+
+/*
+ * Mark all currently memblock-reserved physical memory (which covers the
+ * kernel's own memory ranges) as hot-unswappable.
+ */
+static void __init numa_clear_kernel_node_hotplug(void)
+{
+ nodemask_t reserved_nodemask = NODE_MASK_NONE;
+ struct memblock_region *mb_region;
+ int i;
+
+ /*
+ * We have to do some preprocessing of memblock regions, to
+ * make them suitable for reservation.
+ *
+ * At this time, all memory regions reserved by memblock are
+ * used by the kernel, but those regions are not split up
+ * along node boundaries yet, and don't necessarily have their
+ * node ID set yet either.
+ *
+ * So iterate over all parsed memory blocks and use those ranges to
+ * set the nid in memblock.reserved. This will split up the
+ * memblock regions along node boundaries and will set the node IDs
+ * as well.
+ */
+ for (i = 0; i < numa_meminfo.nr_blks; i++) {
+ struct numa_memblk *mb = numa_meminfo.blk + i;
+ int ret;
+
+ ret = memblock_set_node(mb->start, mb->end - mb->start,
+ &memblock.reserved, mb->nid);
+ WARN_ON_ONCE(ret);
+ }
+
+ /*
+ * Now go over all reserved memblock regions, to construct a
+ * node mask of all kernel reserved memory areas.
+ *
+ * [ Note, when booting with mem=nn[kMG] or in a kdump kernel,
+ * numa_meminfo might not include all memblock.reserved
+ * memory ranges, because quirks such as trim_snb_memory()
+ * reserve specific pages for Sandy Bridge graphics. ]
+ */
+ for_each_reserved_mem_region(mb_region) {
+ int nid = memblock_get_region_node(mb_region);
+
+ if (nid != MAX_NUMNODES)
+ node_set(nid, reserved_nodemask);
+ }
+
+ /*
+ * Finally, clear the MEMBLOCK_HOTPLUG flag for all memory
+ * belonging to the reserved node mask.
+ *
+ * Note that this will include memory regions that reside
+ * on nodes that contain kernel memory - entire nodes
+ * become hot-unpluggable:
+ */
+ for (i = 0; i < numa_meminfo.nr_blks; i++) {
+ struct numa_memblk *mb = numa_meminfo.blk + i;
+
+ if (!node_isset(mb->nid, reserved_nodemask))
+ continue;
+
+ memblock_clear_hotplug(mb->start, mb->end - mb->start);
+ }
+}
+
+static int __init numa_register_meminfo(struct numa_meminfo *mi)
+{
+ int i;
+
+ /* Account for nodes with cpus and no memory */
+ node_possible_map = numa_nodes_parsed;
+ numa_nodemask_from_meminfo(&node_possible_map, mi);
+ if (WARN_ON(nodes_empty(node_possible_map)))
+ return -EINVAL;
+
+ for (i = 0; i < mi->nr_blks; i++) {
+ struct numa_memblk *mb = &mi->blk[i];
+
+ memblock_set_node(mb->start, mb->end - mb->start,
+ &memblock.memory, mb->nid);
+ }
+
+ /*
+ * At very early time, the kernel have to use some memory such as
+ * loading the kernel image. We cannot prevent this anyway. So any
+ * node the kernel resides in should be un-hotpluggable.
+ *
+ * And when we come here, alloc node data won't fail.
+ */
+ numa_clear_kernel_node_hotplug();
+
+ /*
+ * If sections array is gonna be used for pfn -> nid mapping, check
+ * whether its granularity is fine enough.
+ */
+ if (IS_ENABLED(NODE_NOT_IN_PAGE_FLAGS)) {
+ unsigned long pfn_align = node_map_pfn_alignment();
+
+ if (pfn_align && pfn_align < PAGES_PER_SECTION) {
+ unsigned long node_align_mb = PFN_PHYS(pfn_align) >> 20;
+
+ unsigned long sect_align_mb = PFN_PHYS(PAGES_PER_SECTION) >> 20;
+
+ pr_warn("Node alignment %luMB < min %luMB, rejecting NUMA config\n",
+ node_align_mb, sect_align_mb);
+ return -EINVAL;
+ }
+ }
+
+ return 0;
+}
+
+int __init numa_memblks_init(int (*init_func)(void),
+ bool memblock_force_top_down)
+{
+ phys_addr_t max_addr = (phys_addr_t)ULLONG_MAX;
+ int ret;
+
+ nodes_clear(numa_nodes_parsed);
+ nodes_clear(node_possible_map);
+ nodes_clear(node_online_map);
+ memset(&numa_meminfo, 0, sizeof(numa_meminfo));
+ WARN_ON(memblock_set_node(0, max_addr, &memblock.memory, NUMA_NO_NODE));
+ WARN_ON(memblock_set_node(0, max_addr, &memblock.reserved,
+ NUMA_NO_NODE));
+ /* In case that parsing SRAT failed. */
+ WARN_ON(memblock_clear_hotplug(0, max_addr));
+ numa_reset_distance();
+
+ ret = init_func();
+ if (ret < 0)
+ return ret;
+
+ /*
+ * We reset memblock back to the top-down direction
+ * here because if we configured ACPI_NUMA, we have
+ * parsed SRAT in init_func(). It is ok to have the
+ * reset here even if we did't configure ACPI_NUMA
+ * or acpi numa init fails and fallbacks to dummy
+ * numa init.
+ */
+ if (memblock_force_top_down)
+ memblock_set_bottom_up(false);
+
+ ret = numa_cleanup_meminfo(&numa_meminfo);
+ if (ret < 0)
+ return ret;
+
+ numa_emulation(&numa_meminfo, numa_distance_cnt);
+
+ return numa_register_meminfo(&numa_meminfo);
+}
+
+static int __init cmp_memblk(const void *a, const void *b)
+{
+ const struct numa_memblk *ma = *(const struct numa_memblk **)a;
+ const struct numa_memblk *mb = *(const struct numa_memblk **)b;
+
+ return (ma->start > mb->start) - (ma->start < mb->start);
+}
+
+static struct numa_memblk *numa_memblk_list[NR_NODE_MEMBLKS] __initdata;
+
+/**
+ * numa_fill_memblks - Fill gaps in numa_meminfo memblks
+ * @start: address to begin fill
+ * @end: address to end fill
+ *
+ * Find and extend numa_meminfo memblks to cover the physical
+ * address range @start-@end
+ *
+ * RETURNS:
+ * 0 : Success
+ * NUMA_NO_MEMBLK : No memblks exist in address range @start-@end
+ */
+
+int __init numa_fill_memblks(u64 start, u64 end)
+{
+ struct numa_memblk **blk = &numa_memblk_list[0];
+ struct numa_meminfo *mi = &numa_meminfo;
+ int count = 0;
+ u64 prev_end;
+
+ /*
+ * Create a list of pointers to numa_meminfo memblks that
+ * overlap start, end. The list is used to make in-place
+ * changes that fill out the numa_meminfo memblks.
+ */
+ for (int i = 0; i < mi->nr_blks; i++) {
+ struct numa_memblk *bi = &mi->blk[i];
+
+ if (memblock_addrs_overlap(start, end - start, bi->start,
+ bi->end - bi->start)) {
+ blk[count] = &mi->blk[i];
+ count++;
+ }
+ }
+ if (!count)
+ return NUMA_NO_MEMBLK;
+
+ /* Sort the list of pointers in memblk->start order */
+ sort(&blk[0], count, sizeof(blk[0]), cmp_memblk, NULL);
+
+ /* Make sure the first/last memblks include start/end */
+ blk[0]->start = min(blk[0]->start, start);
+ blk[count - 1]->end = max(blk[count - 1]->end, end);
+
+ /*
+ * Fill any gaps by tracking the previous memblks
+ * end address and backfilling to it if needed.
+ */
+ prev_end = blk[0]->end;
+ for (int i = 1; i < count; i++) {
+ struct numa_memblk *curr = blk[i];
+
+ if (prev_end >= curr->start) {
+ if (prev_end < curr->end)
+ prev_end = curr->end;
+ } else {
+ curr->start = prev_end;
+ prev_end = curr->end;
+ }
+ }
+ return 0;
+}
+
+#ifdef CONFIG_NUMA_KEEP_MEMINFO
+static int meminfo_to_nid(struct numa_meminfo *mi, u64 start)
+{
+ int i;
+
+ for (i = 0; i < mi->nr_blks; i++)
+ if (mi->blk[i].start <= start && mi->blk[i].end > start)
+ return mi->blk[i].nid;
+ return NUMA_NO_NODE;
+}
+
+int phys_to_target_node(u64 start)
+{
+ int nid = meminfo_to_nid(&numa_meminfo, start);
+
+ /*
+ * Prefer online nodes, but if reserved memory might be
+ * hot-added continue the search with reserved ranges.
+ */
+ if (nid != NUMA_NO_NODE)
+ return nid;
+
+ return meminfo_to_nid(&numa_reserved_meminfo, start);
+}
+EXPORT_SYMBOL_GPL(phys_to_target_node);
+
+int memory_add_physaddr_to_nid(u64 start)
+{
+ int nid = meminfo_to_nid(&numa_meminfo, start);
+
+ if (nid == NUMA_NO_NODE)
+ nid = numa_meminfo.blk[0].nid;
+ return nid;
+}
+EXPORT_SYMBOL_GPL(memory_add_physaddr_to_nid);
+
+#endif /* CONFIG_NUMA_KEEP_MEMINFO */
diff --git a/mm/page-writeback.c b/mm/page-writeback.c
index 4430ac68e4c4..fcd4c1439cb9 100644
--- a/mm/page-writeback.c
+++ b/mm/page-writeback.c
@@ -418,7 +418,7 @@ static void domain_dirty_limits(struct dirty_throttle_control *dtc)
bg_thresh = (bg_ratio * available_memory) / PAGE_SIZE;
tsk = current;
- if (rt_task(tsk)) {
+ if (rt_or_dl_task(tsk)) {
bg_thresh += bg_thresh / 4 + global_wb_domain.dirty_limit / 32;
thresh += thresh / 4 + global_wb_domain.dirty_limit / 32;
}
@@ -477,7 +477,7 @@ static unsigned long node_dirty_limit(struct pglist_data *pgdat)
else
dirty = vm_dirty_ratio * node_memory / 100;
- if (rt_task(tsk))
+ if (rt_or_dl_task(tsk))
dirty += dirty / 4;
/*
@@ -2612,7 +2612,7 @@ struct folio *writeback_iter(struct address_space *mapping,
done:
if (wbc->range_cyclic)
- mapping->writeback_index = folio->index + folio_nr_pages(folio);
+ mapping->writeback_index = folio_next_index(folio);
folio_batch_release(&wbc->fbatch);
return NULL;
}
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index 91ace8ca97e2..8afab64814dc 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -286,9 +286,7 @@ EXPORT_SYMBOL(nr_online_nodes);
#endif
static bool page_contains_unaccepted(struct page *page, unsigned int order);
-static void accept_page(struct page *page, unsigned int order);
static bool cond_accept_memory(struct zone *zone, unsigned int order);
-static inline bool has_unaccepted_memory(void);
static bool __free_unaccepted(struct page *page);
int page_group_by_mobility_disabled __read_mostly;
@@ -322,6 +320,11 @@ static inline bool deferred_pages_enabled(void)
{
return false;
}
+
+static inline bool _deferred_grow_zone(struct zone *zone, unsigned int order)
+{
+ return false;
+}
#endif /* CONFIG_DEFERRED_STRUCT_PAGE_INIT */
/* Return a pointer to the bitmap storing bits affecting a block of pages */
@@ -958,8 +961,9 @@ static int free_tail_page_prepare(struct page *head_page, struct page *page)
break;
case 2:
/* the second tail page: deferred_list overlaps ->mapping */
- if (unlikely(!list_empty(&folio->_deferred_list))) {
- bad_page(page, "on deferred list");
+ if (unlikely(!list_empty(&folio->_deferred_list) &&
+ folio_test_partially_mapped(folio))) {
+ bad_page(page, "partially mapped folio on deferred list");
goto out;
}
break;
@@ -1087,8 +1091,11 @@ __always_inline bool free_pages_prepare(struct page *page,
(page + i)->flags &= ~PAGE_FLAGS_CHECK_AT_PREP;
}
}
- if (PageMappingFlags(page))
+ if (PageMappingFlags(page)) {
+ if (PageAnon(page))
+ mod_mthp_stat(order, MTHP_STAT_NR_ANON, -1);
page->mapping = NULL;
+ }
if (is_check_pages_enabled()) {
if (free_page_is_bad(page))
bad++;
@@ -1199,17 +1206,39 @@ static void free_pcppages_bulk(struct zone *zone, int count,
spin_unlock_irqrestore(&zone->lock, flags);
}
+/* Split a multi-block free page into its individual pageblocks. */
+static void split_large_buddy(struct zone *zone, struct page *page,
+ unsigned long pfn, int order, fpi_t fpi)
+{
+ unsigned long end = pfn + (1 << order);
+
+ VM_WARN_ON_ONCE(!IS_ALIGNED(pfn, 1 << order));
+ /* Caller removed page from freelist, buddy info cleared! */
+ VM_WARN_ON_ONCE(PageBuddy(page));
+
+ if (order > pageblock_order)
+ order = pageblock_order;
+
+ while (pfn != end) {
+ int mt = get_pfnblock_migratetype(page, pfn);
+
+ __free_one_page(page, pfn, zone, order, mt, fpi);
+ pfn += 1 << order;
+ page = pfn_to_page(pfn);
+ }
+}
+
static void free_one_page(struct zone *zone, struct page *page,
unsigned long pfn, unsigned int order,
fpi_t fpi_flags)
{
unsigned long flags;
- int migratetype;
spin_lock_irqsave(&zone->lock, flags);
- migratetype = get_pfnblock_migratetype(page, pfn);
- __free_one_page(page, pfn, zone, order, migratetype, fpi_flags);
+ split_large_buddy(zone, page, pfn, order, fpi_flags);
spin_unlock_irqrestore(&zone->lock, flags);
+
+ __count_vm_events(PGFREE, 1 << order);
}
static void __free_pages_ok(struct page *page, unsigned int order,
@@ -1218,12 +1247,8 @@ static void __free_pages_ok(struct page *page, unsigned int order,
unsigned long pfn = page_to_pfn(page);
struct zone *zone = page_zone(page);
- if (!free_pages_prepare(page, order))
- return;
-
- free_one_page(zone, page, pfn, order, fpi_flags);
-
- __count_vm_events(PGFREE, 1 << order);
+ if (free_pages_prepare(page, order))
+ free_one_page(zone, page, pfn, order, fpi_flags);
}
void __meminit __free_pages_core(struct page *page, unsigned int order,
@@ -1270,7 +1295,7 @@ void __meminit __free_pages_core(struct page *page, unsigned int order,
if (order == MAX_PAGE_ORDER && __free_unaccepted(page))
return;
- accept_page(page, order);
+ accept_memory(page_to_phys(page), PAGE_SIZE << order);
}
/*
@@ -1346,11 +1371,11 @@ struct page *__pageblock_pfn_to_page(unsigned long start_pfn,
*
* -- nyc
*/
-static inline void expand(struct zone *zone, struct page *page,
- int low, int high, int migratetype)
+static inline unsigned int expand(struct zone *zone, struct page *page, int low,
+ int high, int migratetype)
{
- unsigned long size = 1 << high;
- unsigned long nr_added = 0;
+ unsigned int size = 1 << high;
+ unsigned int nr_added = 0;
while (high > low) {
high--;
@@ -1370,7 +1395,19 @@ static inline void expand(struct zone *zone, struct page *page,
set_buddy_order(&page[size], high);
nr_added += size;
}
- account_freepages(zone, nr_added, migratetype);
+
+ return nr_added;
+}
+
+static __always_inline void page_del_and_expand(struct zone *zone,
+ struct page *page, int low,
+ int high, int migratetype)
+{
+ int nr_pages = 1 << high;
+
+ __del_page_from_free_list(page, zone, high, migratetype);
+ nr_pages -= expand(zone, page, low, high, migratetype);
+ account_freepages(zone, -nr_pages, migratetype);
}
static void check_new_page_bad(struct page *page)
@@ -1540,8 +1577,9 @@ struct page *__rmqueue_smallest(struct zone *zone, unsigned int order,
page = get_page_from_free_area(area, migratetype);
if (!page)
continue;
- del_page_from_free_list(page, zone, current_order, migratetype);
- expand(zone, page, order, current_order, migratetype);
+
+ page_del_and_expand(zone, page, order, current_order,
+ migratetype);
trace_mm_page_alloc_zone_locked(page, order, migratetype,
pcp_allowed_order(order) &&
migratetype < MIGRATE_PCPTYPES);
@@ -1700,27 +1738,6 @@ static unsigned long find_large_buddy(unsigned long start_pfn)
return start_pfn;
}
-/* Split a multi-block free page into its individual pageblocks */
-static void split_large_buddy(struct zone *zone, struct page *page,
- unsigned long pfn, int order)
-{
- unsigned long end_pfn = pfn + (1 << order);
-
- VM_WARN_ON_ONCE(order <= pageblock_order);
- VM_WARN_ON_ONCE(pfn & (pageblock_nr_pages - 1));
-
- /* Caller removed page from freelist, buddy info cleared! */
- VM_WARN_ON_ONCE(PageBuddy(page));
-
- while (pfn != end_pfn) {
- int mt = get_pfnblock_migratetype(page, pfn);
-
- __free_one_page(page, pfn, zone, pageblock_order, mt, FPI_NONE);
- pfn += pageblock_nr_pages;
- page = pfn_to_page(pfn);
- }
-}
-
/**
* move_freepages_block_isolate - move free pages in block for page isolation
* @zone: the zone
@@ -1761,7 +1778,7 @@ bool move_freepages_block_isolate(struct zone *zone, struct page *page,
del_page_from_free_list(buddy, zone, order,
get_pfnblock_migratetype(buddy, pfn));
set_pageblock_migratetype(page, migratetype);
- split_large_buddy(zone, buddy, pfn, order);
+ split_large_buddy(zone, buddy, pfn, order, FPI_NONE);
return true;
}
@@ -1772,7 +1789,7 @@ bool move_freepages_block_isolate(struct zone *zone, struct page *page,
del_page_from_free_list(page, zone, order,
get_pfnblock_migratetype(page, pfn));
set_pageblock_migratetype(page, migratetype);
- split_large_buddy(zone, page, pfn, order);
+ split_large_buddy(zone, page, pfn, order, FPI_NONE);
return true;
}
move:
@@ -1892,9 +1909,12 @@ steal_suitable_fallback(struct zone *zone, struct page *page,
/* Take ownership for orders >= pageblock_order */
if (current_order >= pageblock_order) {
+ unsigned int nr_added;
+
del_page_from_free_list(page, zone, current_order, block_type);
change_pageblock_range(page, current_order, start_type);
- expand(zone, page, order, current_order, start_type);
+ nr_added = expand(zone, page, order, current_order, start_type);
+ account_freepages(zone, nr_added, start_type);
return page;
}
@@ -1947,8 +1967,7 @@ steal_suitable_fallback(struct zone *zone, struct page *page,
}
single_page:
- del_page_from_free_list(page, zone, current_order, block_type);
- expand(zone, page, order, current_order, block_type);
+ page_del_and_expand(zone, page, order, current_order, block_type);
return page;
}
@@ -2764,7 +2783,7 @@ void split_page(struct page *page, unsigned int order)
for (i = 1; i < (1 << order); i++)
set_page_refcounted(page + i);
split_page_owner(page, order, 0);
- pgalloc_tag_split(page, 1 << order);
+ pgalloc_tag_split(page_folio(page), order, 0);
split_page_memcg(page, order, 0);
}
EXPORT_SYMBOL_GPL(split_page);
@@ -3033,12 +3052,6 @@ struct page *rmqueue(struct zone *preferred_zone,
{
struct page *page;
- /*
- * We most definitely don't want callers attempting to
- * allocate greater than order-1 page units with __GFP_NOFAIL.
- */
- WARN_ON_ONCE((gfp_flags & __GFP_NOFAIL) && (order > 1));
-
if (likely(pcp_allowed_order(order))) {
page = rmqueue_pcplist(preferred_zone, zone, order,
migratetype, alloc_flags);
@@ -3357,7 +3370,7 @@ retry:
}
if (no_fallback && nr_online_nodes > 1 &&
- zone != ac->preferred_zoneref->zone) {
+ zone != zonelist_zone(ac->preferred_zoneref)) {
int local_nid;
/*
@@ -3365,7 +3378,7 @@ retry:
* fragmenting fallbacks. Locality is more important
* than fragmentation avoidance.
*/
- local_nid = zone_to_nid(ac->preferred_zoneref->zone);
+ local_nid = zonelist_node_idx(ac->preferred_zoneref);
if (zone_to_nid(zone) != local_nid) {
alloc_flags &= ~ALLOC_NOFRAGMENT;
goto retry;
@@ -3402,7 +3415,6 @@ check_alloc_wmark:
if (cond_accept_memory(zone, order))
goto try_this_zone;
-#ifdef CONFIG_DEFERRED_STRUCT_PAGE_INIT
/*
* Watermark failed for this zone, but see if we can
* grow this zone if it contains deferred pages.
@@ -3411,14 +3423,13 @@ check_alloc_wmark:
if (_deferred_grow_zone(zone, order))
goto try_this_zone;
}
-#endif
/* Checked here to keep the fast path fast */
BUILD_BUG_ON(ALLOC_NO_WATERMARKS < NR_WMARK);
if (alloc_flags & ALLOC_NO_WATERMARKS)
goto try_this_zone;
if (!node_reclaim_enabled() ||
- !zone_allows_reclaim(ac->preferred_zoneref->zone, zone))
+ !zone_allows_reclaim(zonelist_zone(ac->preferred_zoneref), zone))
continue;
ret = node_reclaim(zone->zone_pgdat, gfp_mask, order);
@@ -3440,7 +3451,7 @@ check_alloc_wmark:
}
try_this_zone:
- page = rmqueue(ac->preferred_zoneref->zone, zone, order,
+ page = rmqueue(zonelist_zone(ac->preferred_zoneref), zone, order,
gfp_mask, alloc_flags, ac->migratetype);
if (page) {
prep_new_page(page, order, gfp_mask, alloc_flags);
@@ -3457,13 +3468,11 @@ try_this_zone:
if (cond_accept_memory(zone, order))
goto try_this_zone;
-#ifdef CONFIG_DEFERRED_STRUCT_PAGE_INIT
/* Try again if zone has deferred pages */
if (deferred_pages_enabled()) {
if (_deferred_grow_zone(zone, order))
goto try_this_zone;
}
-#endif
}
}
@@ -4004,7 +4013,7 @@ gfp_to_alloc_flags(gfp_t gfp_mask, unsigned int order)
*/
if (alloc_flags & ALLOC_MIN_RESERVE)
alloc_flags &= ~ALLOC_CPUSET;
- } else if (unlikely(rt_task(current)) && in_task())
+ } else if (unlikely(rt_or_dl_task(current)) && in_task())
alloc_flags |= ALLOC_MIN_RESERVE;
alloc_flags = gfp_to_alloc_flags_cma(gfp_mask, alloc_flags);
@@ -4100,6 +4109,11 @@ should_reclaim_retry(gfp_t gfp_mask, unsigned order,
unsigned long min_wmark = min_wmark_pages(zone);
bool wmark;
+ if (cpusets_enabled() &&
+ (alloc_flags & ALLOC_CPUSET) &&
+ !__cpuset_zone_allowed(zone, gfp_mask))
+ continue;
+
available = reclaimable = zone_reclaimable_pages(zone);
available += zone_page_state_snapshot(zone, NR_FREE_PAGES);
@@ -4175,6 +4189,7 @@ __alloc_pages_slowpath(gfp_t gfp_mask, unsigned int order,
{
bool can_direct_reclaim = gfp_mask & __GFP_DIRECT_RECLAIM;
bool can_compact = gfp_compaction_allowed(gfp_mask);
+ bool nofail = gfp_mask & __GFP_NOFAIL;
const bool costly_order = order > PAGE_ALLOC_COSTLY_ORDER;
struct page *page = NULL;
unsigned int alloc_flags;
@@ -4187,6 +4202,25 @@ __alloc_pages_slowpath(gfp_t gfp_mask, unsigned int order,
unsigned int zonelist_iter_cookie;
int reserve_flags;
+ if (unlikely(nofail)) {
+ /*
+ * We most definitely don't want callers attempting to
+ * allocate greater than order-1 page units with __GFP_NOFAIL.
+ */
+ WARN_ON_ONCE(order > 1);
+ /*
+ * Also we don't support __GFP_NOFAIL without __GFP_DIRECT_RECLAIM,
+ * otherwise, we may result in lockup.
+ */
+ WARN_ON_ONCE(!can_direct_reclaim);
+ /*
+ * PF_MEMALLOC request from this context is rather bizarre
+ * because we cannot reclaim anything and only can loop waiting
+ * for somebody to do a work for us.
+ */
+ WARN_ON_ONCE(current->flags & PF_MEMALLOC);
+ }
+
restart:
compaction_retries = 0;
no_progress_loops = 0;
@@ -4209,7 +4243,7 @@ restart:
*/
ac->preferred_zoneref = first_zones_zonelist(ac->zonelist,
ac->highest_zoneidx, ac->nodemask);
- if (!ac->preferred_zoneref->zone)
+ if (!zonelist_zone(ac->preferred_zoneref))
goto nopage;
/*
@@ -4221,7 +4255,7 @@ restart:
struct zoneref *z = first_zones_zonelist(ac->zonelist,
ac->highest_zoneidx,
&cpuset_current_mems_allowed);
- if (!z->zone)
+ if (!zonelist_zone(z))
goto nopage;
}
@@ -4404,30 +4438,16 @@ nopage:
* Make sure that __GFP_NOFAIL request doesn't leak out and make sure
* we always retry
*/
- if (gfp_mask & __GFP_NOFAIL) {
+ if (unlikely(nofail)) {
/*
- * All existing users of the __GFP_NOFAIL are blockable, so warn
- * of any new users that actually require GFP_NOWAIT
+ * Lacking direct_reclaim we can't do anything to reclaim memory,
+ * we disregard these unreasonable nofail requests and still
+ * return NULL
*/
- if (WARN_ON_ONCE_GFP(!can_direct_reclaim, gfp_mask))
+ if (!can_direct_reclaim)
goto fail;
/*
- * PF_MEMALLOC request from this context is rather bizarre
- * because we cannot reclaim anything and only can loop waiting
- * for somebody to do a work for us
- */
- WARN_ON_ONCE_GFP(current->flags & PF_MEMALLOC, gfp_mask);
-
- /*
- * non failing costly orders are a hard requirement which we
- * are not prepared for much so let's warn about these users
- * so that we can identify them and convert them to something
- * else.
- */
- WARN_ON_ONCE_GFP(costly_order, gfp_mask);
-
- /*
* Help non-failing allocations by giving some access to memory
* reserves normally used for high priority non-blocking
* allocations but do not use ALLOC_NO_WATERMARKS because this
@@ -4578,17 +4598,28 @@ unsigned long alloc_pages_bulk_noprof(gfp_t gfp, int preferred_nid,
continue;
}
- if (nr_online_nodes > 1 && zone != ac.preferred_zoneref->zone &&
- zone_to_nid(zone) != zone_to_nid(ac.preferred_zoneref->zone)) {
+ if (nr_online_nodes > 1 && zone != zonelist_zone(ac.preferred_zoneref) &&
+ zone_to_nid(zone) != zonelist_node_idx(ac.preferred_zoneref)) {
goto failed;
}
+ cond_accept_memory(zone, 0);
+retry_this_zone:
mark = wmark_pages(zone, alloc_flags & ALLOC_WMARK_MASK) + nr_pages;
if (zone_watermark_fast(zone, 0, mark,
zonelist_zone_idx(ac.preferred_zoneref),
alloc_flags, gfp)) {
break;
}
+
+ if (cond_accept_memory(zone, 0))
+ goto retry_this_zone;
+
+ /* Try again if zone has deferred pages */
+ if (deferred_pages_enabled()) {
+ if (_deferred_grow_zone(zone, 0))
+ goto retry_this_zone;
+ }
}
/*
@@ -4638,7 +4669,7 @@ unsigned long alloc_pages_bulk_noprof(gfp_t gfp, int preferred_nid,
pcp_trylock_finish(UP_flags);
__count_zid_vm_events(PGALLOC, zone_idx(zone), nr_account);
- zone_statistics(ac.preferred_zoneref->zone, zone, nr_account);
+ zone_statistics(zonelist_zone(ac.preferred_zoneref), zone, nr_account);
out:
return nr_populated;
@@ -4696,7 +4727,7 @@ struct page *__alloc_pages_noprof(gfp_t gfp, unsigned int order,
* Forbid the first pass from falling back to types that fragment
* memory until all local zones are considered.
*/
- alloc_flags |= alloc_flags_nofragment(ac.preferred_zoneref->zone, gfp);
+ alloc_flags |= alloc_flags_nofragment(zonelist_zone(ac.preferred_zoneref), gfp);
/* First allocation attempt */
page = get_page_from_freelist(alloc_gfp, order, alloc_flags, &ac);
@@ -4950,7 +4981,7 @@ static void *make_alloc_exact(unsigned long addr, unsigned int order,
struct page *last = page + nr;
split_page_owner(page, order, 0);
- pgalloc_tag_split(page, 1 << order);
+ pgalloc_tag_split(page_folio(page), order, 0);
split_page_memcg(page, order, 0);
while (page < --last)
set_page_refcounted(last);
@@ -5301,7 +5332,7 @@ int local_memory_node(int node)
z = first_zones_zonelist(node_zonelist(node, GFP_KERNEL),
gfp_zone(GFP_KERNEL),
NULL);
- return zone_to_nid(z->zone);
+ return zonelist_node_idx(z);
}
#endif
@@ -6433,6 +6464,31 @@ int __alloc_contig_migrate_range(struct compact_control *cc,
return (ret < 0) ? ret : 0;
}
+static void split_free_pages(struct list_head *list)
+{
+ int order;
+
+ for (order = 0; order < NR_PAGE_ORDERS; order++) {
+ struct page *page, *next;
+ int nr_pages = 1 << order;
+
+ list_for_each_entry_safe(page, next, &list[order], lru) {
+ int i;
+
+ post_alloc_hook(page, order, __GFP_MOVABLE);
+ if (!order)
+ continue;
+
+ split_page(page, order);
+
+ /* Add all subpages to the order-0 head, in sequence. */
+ list_del(&page->lru);
+ for (i = 0; i < nr_pages; i++)
+ list_add_tail(&page[i].lru, &list[0]);
+ }
+ }
+}
+
/**
* alloc_contig_range() -- tries to allocate given range of pages
* @start: start PFN to allocate
@@ -6545,12 +6601,25 @@ int alloc_contig_range_noprof(unsigned long start, unsigned long end,
goto done;
}
- /* Free head and tail (if any) */
- if (start != outer_start)
- free_contig_range(outer_start, start - outer_start);
- if (end != outer_end)
- free_contig_range(end, outer_end - end);
+ if (!(gfp_mask & __GFP_COMP)) {
+ split_free_pages(cc.freepages);
+ /* Free head and tail (if any) */
+ if (start != outer_start)
+ free_contig_range(outer_start, start - outer_start);
+ if (end != outer_end)
+ free_contig_range(end, outer_end - end);
+ } else if (start == outer_start && end == outer_end && is_power_of_2(end - start)) {
+ struct page *head = pfn_to_page(start);
+ int order = ilog2(end - start);
+
+ check_new_pages(head, order);
+ prep_new_page(head, order, gfp_mask, 0);
+ } else {
+ ret = -EINVAL;
+ WARN(true, "PFN range: requested [%lu, %lu), allocated [%lu, %lu)\n",
+ start, end, outer_start, outer_end);
+ }
done:
undo_isolate_page_range(start, end, migratetype);
return ret;
@@ -6659,6 +6728,18 @@ struct page *alloc_contig_pages_noprof(unsigned long nr_pages, gfp_t gfp_mask,
void free_contig_range(unsigned long pfn, unsigned long nr_pages)
{
unsigned long count = 0;
+ struct folio *folio = pfn_folio(pfn);
+
+ if (folio_test_large(folio)) {
+ int expected = folio_nr_pages(folio);
+
+ if (nr_pages == expected)
+ folio_put(folio);
+ else
+ WARN(true, "PFN %lu: nr_pages %lu != expected %d\n",
+ pfn, nr_pages, expected);
+ return;
+ }
for (; nr_pages--; pfn++) {
struct page *page = pfn_to_page(pfn);
@@ -6927,23 +7008,50 @@ early_param("accept_memory", accept_memory_parse);
static bool page_contains_unaccepted(struct page *page, unsigned int order)
{
phys_addr_t start = page_to_phys(page);
- phys_addr_t end = start + (PAGE_SIZE << order);
- return range_contains_unaccepted_memory(start, end);
+ return range_contains_unaccepted_memory(start, PAGE_SIZE << order);
}
-static void accept_page(struct page *page, unsigned int order)
+static void __accept_page(struct zone *zone, unsigned long *flags,
+ struct page *page)
{
- phys_addr_t start = page_to_phys(page);
+ bool last;
+
+ list_del(&page->lru);
+ last = list_empty(&zone->unaccepted_pages);
+
+ account_freepages(zone, -MAX_ORDER_NR_PAGES, MIGRATE_MOVABLE);
+ __mod_zone_page_state(zone, NR_UNACCEPTED, -MAX_ORDER_NR_PAGES);
+ __ClearPageUnaccepted(page);
+ spin_unlock_irqrestore(&zone->lock, *flags);
+
+ accept_memory(page_to_phys(page), PAGE_SIZE << MAX_PAGE_ORDER);
+
+ __free_pages_ok(page, MAX_PAGE_ORDER, FPI_TO_TAIL);
- accept_memory(start, start + (PAGE_SIZE << order));
+ if (last)
+ static_branch_dec(&zones_with_unaccepted_pages);
+}
+
+void accept_page(struct page *page)
+{
+ struct zone *zone = page_zone(page);
+ unsigned long flags;
+
+ spin_lock_irqsave(&zone->lock, flags);
+ if (!PageUnaccepted(page)) {
+ spin_unlock_irqrestore(&zone->lock, flags);
+ return;
+ }
+
+ /* Unlocks zone->lock */
+ __accept_page(zone, &flags, page);
}
static bool try_to_accept_memory_one(struct zone *zone)
{
unsigned long flags;
struct page *page;
- bool last;
spin_lock_irqsave(&zone->lock, flags);
page = list_first_entry_or_null(&zone->unaccepted_pages,
@@ -6953,23 +7061,17 @@ static bool try_to_accept_memory_one(struct zone *zone)
return false;
}
- list_del(&page->lru);
- last = list_empty(&zone->unaccepted_pages);
-
- account_freepages(zone, -MAX_ORDER_NR_PAGES, MIGRATE_MOVABLE);
- __mod_zone_page_state(zone, NR_UNACCEPTED, -MAX_ORDER_NR_PAGES);
- spin_unlock_irqrestore(&zone->lock, flags);
-
- accept_page(page, MAX_PAGE_ORDER);
-
- __free_pages_ok(page, MAX_PAGE_ORDER, FPI_TO_TAIL);
-
- if (last)
- static_branch_dec(&zones_with_unaccepted_pages);
+ /* Unlocks zone->lock */
+ __accept_page(zone, &flags, page);
return true;
}
+static inline bool has_unaccepted_memory(void)
+{
+ return static_branch_unlikely(&zones_with_unaccepted_pages);
+}
+
static bool cond_accept_memory(struct zone *zone, unsigned int order)
{
long to_accept;
@@ -6981,8 +7083,8 @@ static bool cond_accept_memory(struct zone *zone, unsigned int order)
if (list_empty(&zone->unaccepted_pages))
return false;
- /* How much to accept to get to high watermark? */
- to_accept = high_wmark_pages(zone) -
+ /* How much to accept to get to promo watermark? */
+ to_accept = promo_wmark_pages(zone) -
(zone_page_state(zone, NR_FREE_PAGES) -
__zone_watermark_unusable_free(zone, order, 0) -
zone_page_state(zone, NR_UNACCEPTED));
@@ -6997,11 +7099,6 @@ static bool cond_accept_memory(struct zone *zone, unsigned int order)
return ret;
}
-static inline bool has_unaccepted_memory(void)
-{
- return static_branch_unlikely(&zones_with_unaccepted_pages);
-}
-
static bool __free_unaccepted(struct page *page)
{
struct zone *zone = page_zone(page);
@@ -7016,6 +7113,7 @@ static bool __free_unaccepted(struct page *page)
list_add_tail(&page->lru, &zone->unaccepted_pages);
account_freepages(zone, MAX_ORDER_NR_PAGES, MIGRATE_MOVABLE);
__mod_zone_page_state(zone, NR_UNACCEPTED, MAX_ORDER_NR_PAGES);
+ __SetPageUnaccepted(page);
spin_unlock_irqrestore(&zone->lock, flags);
if (first)
@@ -7031,20 +7129,11 @@ static bool page_contains_unaccepted(struct page *page, unsigned int order)
return false;
}
-static void accept_page(struct page *page, unsigned int order)
-{
-}
-
static bool cond_accept_memory(struct zone *zone, unsigned int order)
{
return false;
}
-static inline bool has_unaccepted_memory(void)
-{
- return false;
-}
-
static bool __free_unaccepted(struct page *page)
{
BUILD_BUG();
diff --git a/mm/page_counter.c b/mm/page_counter.c
index 0153f5bb3161..b249d15af9dd 100644
--- a/mm/page_counter.c
+++ b/mm/page_counter.c
@@ -13,6 +13,11 @@
#include <linux/bug.h>
#include <asm/page.h>
+static bool track_protection(struct page_counter *c)
+{
+ return c->protection_support;
+}
+
static void propagate_protected_usage(struct page_counter *c,
unsigned long usage)
{
@@ -57,7 +62,8 @@ void page_counter_cancel(struct page_counter *counter, unsigned long nr_pages)
new = 0;
atomic_long_set(&counter->usage, new);
}
- propagate_protected_usage(counter, new);
+ if (track_protection(counter))
+ propagate_protected_usage(counter, new);
}
/**
@@ -70,18 +76,33 @@ void page_counter_cancel(struct page_counter *counter, unsigned long nr_pages)
void page_counter_charge(struct page_counter *counter, unsigned long nr_pages)
{
struct page_counter *c;
+ bool protection = track_protection(counter);
for (c = counter; c; c = c->parent) {
long new;
new = atomic_long_add_return(nr_pages, &c->usage);
- propagate_protected_usage(c, new);
+ if (protection)
+ propagate_protected_usage(c, new);
/*
* This is indeed racy, but we can live with some
* inaccuracy in the watermark.
+ *
+ * Notably, we have two watermarks to allow for both a globally
+ * visible peak and one that can be reset at a smaller scope.
+ *
+ * Since we reset both watermarks when the global reset occurs,
+ * we can guarantee that watermark >= local_watermark, so we
+ * don't need to do both comparisons every time.
+ *
+ * On systems with branch predictors, the inner condition should
+ * be almost free.
*/
- if (new > READ_ONCE(c->watermark))
- WRITE_ONCE(c->watermark, new);
+ if (new > READ_ONCE(c->local_watermark)) {
+ WRITE_ONCE(c->local_watermark, new);
+ if (new > READ_ONCE(c->watermark))
+ WRITE_ONCE(c->watermark, new);
+ }
}
}
@@ -99,6 +120,7 @@ bool page_counter_try_charge(struct page_counter *counter,
struct page_counter **fail)
{
struct page_counter *c;
+ bool protection = track_protection(counter);
for (c = counter; c; c = c->parent) {
long new;
@@ -128,13 +150,15 @@ bool page_counter_try_charge(struct page_counter *counter,
*fail = c;
goto failed;
}
- propagate_protected_usage(c, new);
- /*
- * Just like with failcnt, we can live with some
- * inaccuracy in the watermark.
- */
- if (new > READ_ONCE(c->watermark))
- WRITE_ONCE(c->watermark, new);
+ if (protection)
+ propagate_protected_usage(c, new);
+
+ /* see comment on page_counter_charge */
+ if (new > READ_ONCE(c->local_watermark)) {
+ WRITE_ONCE(c->local_watermark, new);
+ if (new > READ_ONCE(c->watermark))
+ WRITE_ONCE(c->watermark, new);
+ }
}
return true;
@@ -264,6 +288,7 @@ int page_counter_memparse(const char *buf, const char *max,
}
+#ifdef CONFIG_MEMCG
/*
* This function calculates an individual page counter's effective
* protection which is derived from its own memory.min/low, its
@@ -435,3 +460,4 @@ void page_counter_calculate_protection(struct page_counter *root,
atomic_long_read(&parent->children_low_usage),
recursive_protection));
}
+#endif /* CONFIG_MEMCG */
diff --git a/mm/page_io.c b/mm/page_io.c
index ff8c99ee3af7..78bc88acee79 100644
--- a/mm/page_io.c
+++ b/mm/page_io.c
@@ -172,6 +172,60 @@ bad_bmap:
goto out;
}
+static bool is_folio_zero_filled(struct folio *folio)
+{
+ unsigned int pos, last_pos;
+ unsigned long *data;
+ unsigned int i;
+
+ last_pos = PAGE_SIZE / sizeof(*data) - 1;
+ for (i = 0; i < folio_nr_pages(folio); i++) {
+ data = kmap_local_folio(folio, i * PAGE_SIZE);
+ /*
+ * Check last word first, incase the page is zero-filled at
+ * the start and has non-zero data at the end, which is common
+ * in real-world workloads.
+ */
+ if (data[last_pos]) {
+ kunmap_local(data);
+ return false;
+ }
+ for (pos = 0; pos < last_pos; pos++) {
+ if (data[pos]) {
+ kunmap_local(data);
+ return false;
+ }
+ }
+ kunmap_local(data);
+ }
+
+ return true;
+}
+
+static void swap_zeromap_folio_set(struct folio *folio)
+{
+ struct swap_info_struct *sis = swp_swap_info(folio->swap);
+ swp_entry_t entry;
+ unsigned int i;
+
+ for (i = 0; i < folio_nr_pages(folio); i++) {
+ entry = page_swap_entry(folio_page(folio, i));
+ set_bit(swp_offset(entry), sis->zeromap);
+ }
+}
+
+static void swap_zeromap_folio_clear(struct folio *folio)
+{
+ struct swap_info_struct *sis = swp_swap_info(folio->swap);
+ swp_entry_t entry;
+ unsigned int i;
+
+ for (i = 0; i < folio_nr_pages(folio); i++) {
+ entry = page_swap_entry(folio_page(folio, i));
+ clear_bit(swp_offset(entry), sis->zeromap);
+ }
+}
+
/*
* We may have stale swap cache pages in memory: notice
* them here and get rid of the unnecessary final write.
@@ -195,6 +249,25 @@ int swap_writepage(struct page *page, struct writeback_control *wbc)
folio_unlock(folio);
return ret;
}
+
+ /*
+ * Use a bitmap (zeromap) to avoid doing IO for zero-filled pages.
+ * The bits in zeromap are protected by the locked swapcache folio
+ * and atomic updates are used to protect against read-modify-write
+ * corruption due to other zero swap entries seeing concurrent updates.
+ */
+ if (is_folio_zero_filled(folio)) {
+ swap_zeromap_folio_set(folio);
+ folio_unlock(folio);
+ return 0;
+ } else {
+ /*
+ * Clear bits this folio occupies in the zeromap to prevent
+ * zero data being read in from any previous zero writes that
+ * occupied the same swap entries.
+ */
+ swap_zeromap_folio_clear(folio);
+ }
if (zswap_store(folio)) {
folio_unlock(folio);
return 0;
@@ -273,9 +346,7 @@ static void sio_write_complete(struct kiocb *iocb, long ret)
* memory for allocating transmit buffers.
* Mark the page dirty and avoid
* folio_rotate_reclaimable but rate-limit the
- * messages but do not flag PageError like
- * the normal direct-to-bio case as it could
- * be temporary.
+ * messages.
*/
pr_err_ratelimited("Write error %ld on dio swapfile (%llu)\n",
ret, swap_dev_pos(page_swap_entry(page)));
@@ -429,6 +500,28 @@ static void sio_read_complete(struct kiocb *iocb, long ret)
mempool_free(sio, sio_pool);
}
+static bool swap_read_folio_zeromap(struct folio *folio)
+{
+ int nr_pages = folio_nr_pages(folio);
+ bool is_zeromap;
+
+ /*
+ * Swapping in a large folio that is partially in the zeromap is not
+ * currently handled. Return true without marking the folio uptodate so
+ * that an IO error is emitted (e.g. do_swap_page() will sigbus).
+ */
+ if (WARN_ON_ONCE(swap_zeromap_batch(folio->swap, nr_pages,
+ &is_zeromap) != nr_pages))
+ return true;
+
+ if (!is_zeromap)
+ return false;
+
+ folio_zero_range(folio, 0, folio_size(folio));
+ folio_mark_uptodate(folio);
+ return true;
+}
+
static void swap_read_folio_fs(struct folio *folio, struct swap_iocb **plug)
{
struct swap_info_struct *sis = swp_swap_info(folio->swap);
@@ -519,9 +612,18 @@ void swap_read_folio(struct folio *folio, struct swap_iocb **plug)
}
delayacct_swapin_start();
- if (zswap_load(folio)) {
+ if (swap_read_folio_zeromap(folio)) {
+ folio_unlock(folio);
+ goto finish;
+ } else if (zswap_load(folio)) {
folio_unlock(folio);
- } else if (data_race(sis->flags & SWP_FS_OPS)) {
+ goto finish;
+ }
+
+ /* We have to read from slower devices. Increase zswap protection. */
+ zswap_folio_swapin(folio);
+
+ if (data_race(sis->flags & SWP_FS_OPS)) {
swap_read_folio_fs(folio, plug);
} else if (synchronous) {
swap_read_folio_bdev_sync(folio, sis);
@@ -529,6 +631,7 @@ void swap_read_folio(struct folio *folio, struct swap_iocb **plug)
swap_read_folio_bdev_async(folio, sis);
}
+finish:
if (workingset) {
delayacct_thrashing_end(&in_thrashing);
psi_memstall_leave(&pflags);
diff --git a/mm/page_isolation.c b/mm/page_isolation.c
index 042937d5abe4..7e04047977cf 100644
--- a/mm/page_isolation.c
+++ b/mm/page_isolation.c
@@ -152,6 +152,9 @@ static int set_migratetype_isolate(struct page *page, int migratetype, int isol_
unsigned long flags;
unsigned long check_unmovable_start, check_unmovable_end;
+ if (PageUnaccepted(page))
+ accept_page(page);
+
spin_lock_irqsave(&zone->lock, flags);
/*
@@ -367,6 +370,11 @@ static int isolate_single_pageblock(unsigned long boundary_pfn, int flags,
VM_BUG_ON(!page);
pfn = page_to_pfn(page);
+ if (PageUnaccepted(page)) {
+ pfn += MAX_ORDER_NR_PAGES;
+ continue;
+ }
+
if (PageBuddy(page)) {
int order = buddy_order(page);
@@ -395,30 +403,8 @@ static int isolate_single_pageblock(unsigned long boundary_pfn, int flags,
unsigned long head_pfn = page_to_pfn(head);
unsigned long nr_pages = compound_nr(head);
- if (head_pfn + nr_pages <= boundary_pfn) {
- pfn = head_pfn + nr_pages;
- continue;
- }
-
-#if defined CONFIG_COMPACTION || defined CONFIG_CMA
- if (PageHuge(page)) {
- int page_mt = get_pageblock_migratetype(page);
- struct compact_control cc = {
- .nr_migratepages = 0,
- .order = -1,
- .zone = page_zone(pfn_to_page(head_pfn)),
- .mode = MIGRATE_SYNC,
- .ignore_skip_hint = true,
- .no_set_skip_hint = true,
- .gfp_mask = gfp_flags,
- .alloc_contig = true,
- };
- INIT_LIST_HEAD(&cc.migratepages);
-
- ret = __alloc_contig_migrate_range(&cc, head_pfn,
- head_pfn + nr_pages, page_mt);
- if (ret)
- goto failed;
+ if (head_pfn + nr_pages <= boundary_pfn ||
+ PageHuge(page)) {
pfn = head_pfn + nr_pages;
continue;
}
@@ -432,7 +418,7 @@ static int isolate_single_pageblock(unsigned long boundary_pfn, int flags,
*/
VM_WARN_ON_ONCE_PAGE(PageLRU(page), page);
VM_WARN_ON_ONCE_PAGE(__PageMovable(page), page);
-#endif
+
goto failed;
}
diff --git a/mm/pagewalk.c b/mm/pagewalk.c
index ae2f08ce991b..461ea3bbd8d9 100644
--- a/mm/pagewalk.c
+++ b/mm/pagewalk.c
@@ -3,6 +3,8 @@
#include <linux/highmem.h>
#include <linux/sched.h>
#include <linux/hugetlb.h>
+#include <linux/swap.h>
+#include <linux/swapops.h>
/*
* We want to know the real level where a entry is located ignoring any
@@ -654,3 +656,203 @@ int walk_page_mapping(struct address_space *mapping, pgoff_t first_index,
return err;
}
+
+/**
+ * folio_walk_start - walk the page tables to a folio
+ * @fw: filled with information on success.
+ * @vma: the VMA.
+ * @addr: the virtual address to use for the page table walk.
+ * @flags: flags modifying which folios to walk to.
+ *
+ * Walk the page tables using @addr in a given @vma to a mapped folio and
+ * return the folio, making sure that the page table entry referenced by
+ * @addr cannot change until folio_walk_end() was called.
+ *
+ * As default, this function returns only folios that are not special (e.g., not
+ * the zeropage) and never returns folios that are supposed to be ignored by the
+ * VM as documented by vm_normal_page(). If requested, zeropages will be
+ * returned as well.
+ *
+ * As default, this function only considers present page table entries.
+ * If requested, it will also consider migration entries.
+ *
+ * If this function returns NULL it might either indicate "there is nothing" or
+ * "there is nothing suitable".
+ *
+ * On success, @fw is filled and the function returns the folio while the PTL
+ * is still held and folio_walk_end() must be called to clean up,
+ * releasing any held locks. The returned folio must *not* be used after the
+ * call to folio_walk_end(), unless a short-term folio reference is taken before
+ * that call.
+ *
+ * @fw->page will correspond to the page that is effectively referenced by
+ * @addr. However, for migration entries and shared zeropages @fw->page is
+ * set to NULL. Note that large folios might be mapped by multiple page table
+ * entries, and this function will always only lookup a single entry as
+ * specified by @addr, which might or might not cover more than a single page of
+ * the returned folio.
+ *
+ * This function must *not* be used as a naive replacement for
+ * get_user_pages() / pin_user_pages(), especially not to perform DMA or
+ * to carelessly modify page content. This function may *only* be used to grab
+ * short-term folio references, never to grab long-term folio references.
+ *
+ * Using the page table entry pointers in @fw for reading or modifying the
+ * entry should be avoided where possible: however, there might be valid
+ * use cases.
+ *
+ * WARNING: Modifying page table entries in hugetlb VMAs requires a lot of care.
+ * For example, PMD page table sharing might require prior unsharing. Also,
+ * logical hugetlb entries might span multiple physical page table entries,
+ * which *must* be modified in a single operation (set_huge_pte_at(),
+ * huge_ptep_set_*, ...). Note that the page table entry stored in @fw might
+ * not correspond to the first physical entry of a logical hugetlb entry.
+ *
+ * The mmap lock must be held in read mode.
+ *
+ * Return: folio pointer on success, otherwise NULL.
+ */
+struct folio *folio_walk_start(struct folio_walk *fw,
+ struct vm_area_struct *vma, unsigned long addr,
+ folio_walk_flags_t flags)
+{
+ unsigned long entry_size;
+ bool expose_page = true;
+ struct page *page;
+ pud_t *pudp, pud;
+ pmd_t *pmdp, pmd;
+ pte_t *ptep, pte;
+ spinlock_t *ptl;
+ pgd_t *pgdp;
+ p4d_t *p4dp;
+
+ mmap_assert_locked(vma->vm_mm);
+ vma_pgtable_walk_begin(vma);
+
+ if (WARN_ON_ONCE(addr < vma->vm_start || addr >= vma->vm_end))
+ goto not_found;
+
+ pgdp = pgd_offset(vma->vm_mm, addr);
+ if (pgd_none_or_clear_bad(pgdp))
+ goto not_found;
+
+ p4dp = p4d_offset(pgdp, addr);
+ if (p4d_none_or_clear_bad(p4dp))
+ goto not_found;
+
+ pudp = pud_offset(p4dp, addr);
+ pud = pudp_get(pudp);
+ if (pud_none(pud))
+ goto not_found;
+ if (IS_ENABLED(CONFIG_PGTABLE_HAS_HUGE_LEAVES) && pud_leaf(pud)) {
+ ptl = pud_lock(vma->vm_mm, pudp);
+ pud = pudp_get(pudp);
+
+ entry_size = PUD_SIZE;
+ fw->level = FW_LEVEL_PUD;
+ fw->pudp = pudp;
+ fw->pud = pud;
+
+ if (!pud_present(pud) || pud_devmap(pud) || pud_special(pud)) {
+ spin_unlock(ptl);
+ goto not_found;
+ } else if (!pud_leaf(pud)) {
+ spin_unlock(ptl);
+ goto pmd_table;
+ }
+ /*
+ * TODO: vm_normal_page_pud() will be handy once we want to
+ * support PUD mappings in VM_PFNMAP|VM_MIXEDMAP VMAs.
+ */
+ page = pud_page(pud);
+ goto found;
+ }
+
+pmd_table:
+ VM_WARN_ON_ONCE(pud_leaf(*pudp));
+ pmdp = pmd_offset(pudp, addr);
+ pmd = pmdp_get_lockless(pmdp);
+ if (pmd_none(pmd))
+ goto not_found;
+ if (IS_ENABLED(CONFIG_PGTABLE_HAS_HUGE_LEAVES) && pmd_leaf(pmd)) {
+ ptl = pmd_lock(vma->vm_mm, pmdp);
+ pmd = pmdp_get(pmdp);
+
+ entry_size = PMD_SIZE;
+ fw->level = FW_LEVEL_PMD;
+ fw->pmdp = pmdp;
+ fw->pmd = pmd;
+
+ if (pmd_none(pmd)) {
+ spin_unlock(ptl);
+ goto not_found;
+ } else if (!pmd_leaf(pmd)) {
+ spin_unlock(ptl);
+ goto pte_table;
+ } else if (pmd_present(pmd)) {
+ page = vm_normal_page_pmd(vma, addr, pmd);
+ if (page) {
+ goto found;
+ } else if ((flags & FW_ZEROPAGE) &&
+ is_huge_zero_pmd(pmd)) {
+ page = pfn_to_page(pmd_pfn(pmd));
+ expose_page = false;
+ goto found;
+ }
+ } else if ((flags & FW_MIGRATION) &&
+ is_pmd_migration_entry(pmd)) {
+ swp_entry_t entry = pmd_to_swp_entry(pmd);
+
+ page = pfn_swap_entry_to_page(entry);
+ expose_page = false;
+ goto found;
+ }
+ spin_unlock(ptl);
+ goto not_found;
+ }
+
+pte_table:
+ VM_WARN_ON_ONCE(pmd_leaf(pmdp_get_lockless(pmdp)));
+ ptep = pte_offset_map_lock(vma->vm_mm, pmdp, addr, &ptl);
+ if (!ptep)
+ goto not_found;
+ pte = ptep_get(ptep);
+
+ entry_size = PAGE_SIZE;
+ fw->level = FW_LEVEL_PTE;
+ fw->ptep = ptep;
+ fw->pte = pte;
+
+ if (pte_present(pte)) {
+ page = vm_normal_page(vma, addr, pte);
+ if (page)
+ goto found;
+ if ((flags & FW_ZEROPAGE) &&
+ is_zero_pfn(pte_pfn(pte))) {
+ page = pfn_to_page(pte_pfn(pte));
+ expose_page = false;
+ goto found;
+ }
+ } else if (!pte_none(pte)) {
+ swp_entry_t entry = pte_to_swp_entry(pte);
+
+ if ((flags & FW_MIGRATION) &&
+ is_migration_entry(entry)) {
+ page = pfn_swap_entry_to_page(entry);
+ expose_page = false;
+ goto found;
+ }
+ }
+ pte_unmap_unlock(ptep, ptl);
+not_found:
+ vma_pgtable_walk_end(vma);
+ return NULL;
+found:
+ if (expose_page)
+ /* Note: Offset from the mapped page, not the folio start. */
+ fw->page = nth_page(page, (addr & (entry_size - 1)) >> PAGE_SHIFT);
+ else
+ fw->page = NULL;
+ fw->ptl = ptl;
+ return page_folio(page);
+}
diff --git a/mm/percpu.c b/mm/percpu.c
index 20d91af8c033..da21680ff294 100644
--- a/mm/percpu.c
+++ b/mm/percpu.c
@@ -2217,37 +2217,6 @@ static void pcpu_balance_workfn(struct work_struct *work)
}
/**
- * pcpu_alloc_size - the size of the dynamic percpu area
- * @ptr: pointer to the dynamic percpu area
- *
- * Returns the size of the @ptr allocation. This is undefined for statically
- * defined percpu variables as there is no corresponding chunk->bound_map.
- *
- * RETURNS:
- * The size of the dynamic percpu area.
- *
- * CONTEXT:
- * Can be called from atomic context.
- */
-size_t pcpu_alloc_size(void __percpu *ptr)
-{
- struct pcpu_chunk *chunk;
- unsigned long bit_off, end;
- void *addr;
-
- if (!ptr)
- return 0;
-
- addr = __pcpu_ptr_to_addr(ptr);
- /* No pcpu_lock here: ptr has not been freed, so chunk is still alive */
- chunk = pcpu_chunk_addr_search(addr);
- bit_off = (addr - chunk->base_addr) / PCPU_MIN_ALLOC_SIZE;
- end = find_next_bit(chunk->bound_map, pcpu_chunk_map_bits(chunk),
- bit_off + 1);
- return (end - bit_off) * PCPU_MIN_ALLOC_SIZE;
-}
-
-/**
* free_percpu - free percpu area
* @ptr: pointer to area to free
*
diff --git a/mm/readahead.c b/mm/readahead.c
index 517c0be7ce66..3dc6c7a128dd 100644
--- a/mm/readahead.c
+++ b/mm/readahead.c
@@ -206,9 +206,10 @@ void page_cache_ra_unbounded(struct readahead_control *ractl,
unsigned long nr_to_read, unsigned long lookahead_size)
{
struct address_space *mapping = ractl->mapping;
- unsigned long index = readahead_index(ractl);
+ unsigned long ra_folio_index, index = readahead_index(ractl);
gfp_t gfp_mask = readahead_gfp_mask(mapping);
- unsigned long i;
+ unsigned long mark, i = 0;
+ unsigned int min_nrpages = mapping_min_folio_nrpages(mapping);
/*
* Partway through the readahead operation, we will have added
@@ -223,10 +224,24 @@ void page_cache_ra_unbounded(struct readahead_control *ractl,
unsigned int nofs = memalloc_nofs_save();
filemap_invalidate_lock_shared(mapping);
+ index = mapping_align_index(mapping, index);
+
+ /*
+ * As iterator `i` is aligned to min_nrpages, round_up the
+ * difference between nr_to_read and lookahead_size to mark the
+ * index that only has lookahead or "async_region" to set the
+ * readahead flag.
+ */
+ ra_folio_index = round_up(readahead_index(ractl) + nr_to_read - lookahead_size,
+ min_nrpages);
+ mark = ra_folio_index - index;
+ nr_to_read += readahead_index(ractl) - index;
+ ractl->_index = index;
+
/*
* Preallocate as many pages as we will need.
*/
- for (i = 0; i < nr_to_read; i++) {
+ while (i < nr_to_read) {
struct folio *folio = xa_load(&mapping->i_pages, index + i);
int ret;
@@ -240,12 +255,13 @@ void page_cache_ra_unbounded(struct readahead_control *ractl,
* not worth getting one just for that.
*/
read_pages(ractl);
- ractl->_index++;
- i = ractl->_index + ractl->_nr_pages - index - 1;
+ ractl->_index += min_nrpages;
+ i = ractl->_index + ractl->_nr_pages - index;
continue;
}
- folio = filemap_alloc_folio(gfp_mask, 0);
+ folio = filemap_alloc_folio(gfp_mask,
+ mapping_min_folio_order(mapping));
if (!folio)
break;
@@ -255,14 +271,15 @@ void page_cache_ra_unbounded(struct readahead_control *ractl,
if (ret == -ENOMEM)
break;
read_pages(ractl);
- ractl->_index++;
- i = ractl->_index + ractl->_nr_pages - index - 1;
+ ractl->_index += min_nrpages;
+ i = ractl->_index + ractl->_nr_pages - index;
continue;
}
- if (i == nr_to_read - lookahead_size)
+ if (i == mark)
folio_set_readahead(folio);
ractl->_workingset |= folio_test_workingset(folio);
- ractl->_nr_pages++;
+ ractl->_nr_pages += min_nrpages;
+ i += min_nrpages;
}
/*
@@ -438,26 +455,41 @@ void page_cache_ra_order(struct readahead_control *ractl,
struct address_space *mapping = ractl->mapping;
pgoff_t start = readahead_index(ractl);
pgoff_t index = start;
+ unsigned int min_order = mapping_min_folio_order(mapping);
pgoff_t limit = (i_size_read(mapping->host) - 1) >> PAGE_SHIFT;
pgoff_t mark = index + ra->size - ra->async_size;
unsigned int nofs;
int err = 0;
gfp_t gfp = readahead_gfp_mask(mapping);
+ unsigned int min_ra_size = max(4, mapping_min_folio_nrpages(mapping));
- if (!mapping_large_folio_support(mapping) || ra->size < 4)
+ /*
+ * Fallback when size < min_nrpages as each folio should be
+ * at least min_nrpages anyway.
+ */
+ if (!mapping_large_folio_support(mapping) || ra->size < min_ra_size)
goto fallback;
limit = min(limit, index + ra->size - 1);
- if (new_order < MAX_PAGECACHE_ORDER)
+ if (new_order < mapping_max_folio_order(mapping))
new_order += 2;
- new_order = min_t(unsigned int, MAX_PAGECACHE_ORDER, new_order);
+ new_order = min(mapping_max_folio_order(mapping), new_order);
new_order = min_t(unsigned int, new_order, ilog2(ra->size));
+ new_order = max(new_order, min_order);
/* See comment in page_cache_ra_unbounded() */
nofs = memalloc_nofs_save();
filemap_invalidate_lock_shared(mapping);
+ /*
+ * If the new_order is greater than min_order and index is
+ * already aligned to new_order, then this will be noop as index
+ * aligned to new_order should also be aligned to min_order.
+ */
+ ractl->_index = mapping_align_index(mapping, index);
+ index = readahead_index(ractl);
+
while (index <= limit) {
unsigned int order = new_order;
@@ -465,7 +497,7 @@ void page_cache_ra_order(struct readahead_control *ractl,
if (index & ((1UL << order) - 1))
order = __ffs(index);
/* Don't allocate pages past EOF */
- while (index + (1UL << order) - 1 > limit)
+ while (order > min_order && index + (1UL << order) - 1 > limit)
order--;
err = ra_alloc_folio(ractl, index, mark, order, gfp);
if (err)
@@ -646,7 +678,7 @@ ssize_t ksys_readahead(int fd, loff_t offset, size_t count)
ret = -EBADF;
f = fdget(fd);
- if (!f.file || !(f.file->f_mode & FMODE_READ))
+ if (!fd_file(f) || !(fd_file(f)->f_mode & FMODE_READ))
goto out;
/*
@@ -655,12 +687,12 @@ ssize_t ksys_readahead(int fd, loff_t offset, size_t count)
* on this file, then we must return -EINVAL.
*/
ret = -EINVAL;
- if (!f.file->f_mapping || !f.file->f_mapping->a_ops ||
- (!S_ISREG(file_inode(f.file)->i_mode) &&
- !S_ISBLK(file_inode(f.file)->i_mode)))
+ if (!fd_file(f)->f_mapping || !fd_file(f)->f_mapping->a_ops ||
+ (!S_ISREG(file_inode(fd_file(f))->i_mode) &&
+ !S_ISBLK(file_inode(fd_file(f))->i_mode)))
goto out;
- ret = vfs_fadvise(f.file, offset, count, POSIX_FADV_WILLNEED);
+ ret = vfs_fadvise(fd_file(f), offset, count, POSIX_FADV_WILLNEED);
out:
fdput(f);
return ret;
@@ -703,8 +735,15 @@ void readahead_expand(struct readahead_control *ractl,
struct file_ra_state *ra = ractl->ra;
pgoff_t new_index, new_nr_pages;
gfp_t gfp_mask = readahead_gfp_mask(mapping);
+ unsigned long min_nrpages = mapping_min_folio_nrpages(mapping);
+ unsigned int min_order = mapping_min_folio_order(mapping);
new_index = new_start / PAGE_SIZE;
+ /*
+ * Readahead code should have aligned the ractl->_index to
+ * min_nrpages before calling readahead aops.
+ */
+ VM_BUG_ON(!IS_ALIGNED(ractl->_index, min_nrpages));
/* Expand the leading edge downwards */
while (ractl->_index > new_index) {
@@ -714,9 +753,11 @@ void readahead_expand(struct readahead_control *ractl,
if (folio && !xa_is_value(folio))
return; /* Folio apparently present */
- folio = filemap_alloc_folio(gfp_mask, 0);
+ folio = filemap_alloc_folio(gfp_mask, min_order);
if (!folio)
return;
+
+ index = mapping_align_index(mapping, index);
if (filemap_add_folio(mapping, folio, index, gfp_mask) < 0) {
folio_put(folio);
return;
@@ -726,7 +767,7 @@ void readahead_expand(struct readahead_control *ractl,
ractl->_workingset = true;
psi_memstall_enter(&ractl->_pflags);
}
- ractl->_nr_pages++;
+ ractl->_nr_pages += min_nrpages;
ractl->_index = folio->index;
}
@@ -741,9 +782,11 @@ void readahead_expand(struct readahead_control *ractl,
if (folio && !xa_is_value(folio))
return; /* Folio apparently present */
- folio = filemap_alloc_folio(gfp_mask, 0);
+ folio = filemap_alloc_folio(gfp_mask, min_order);
if (!folio)
return;
+
+ index = mapping_align_index(mapping, index);
if (filemap_add_folio(mapping, folio, index, gfp_mask) < 0) {
folio_put(folio);
return;
@@ -753,10 +796,10 @@ void readahead_expand(struct readahead_control *ractl,
ractl->_workingset = true;
psi_memstall_enter(&ractl->_pflags);
}
- ractl->_nr_pages++;
+ ractl->_nr_pages += min_nrpages;
if (ra) {
- ra->size++;
- ra->async_size++;
+ ra->size += min_nrpages;
+ ra->async_size += min_nrpages;
}
}
}
diff --git a/mm/rmap.c b/mm/rmap.c
index 2490e727e2dc..a8797d1b3d49 100644
--- a/mm/rmap.c
+++ b/mm/rmap.c
@@ -75,6 +75,7 @@
#include <linux/memremap.h>
#include <linux/userfaultfd_k.h>
#include <linux/mm_inline.h>
+#include <linux/oom.h>
#include <asm/tlbflush.h>
@@ -870,6 +871,20 @@ static bool folio_referenced_one(struct folio *folio,
continue;
}
+ /*
+ * Skip the non-shared swapbacked folio mapped solely by
+ * the exiting or OOM-reaped process. This avoids redundant
+ * swap-out followed by an immediate unmap.
+ */
+ if ((!atomic_read(&vma->vm_mm->mm_users) ||
+ check_stable_address_space(vma->vm_mm)) &&
+ folio_test_anon(folio) && folio_test_swapbacked(folio) &&
+ !folio_likely_mapped_shared(folio)) {
+ pra->referenced = -1;
+ page_vma_mapped_walk_done(&pvmw);
+ return false;
+ }
+
if (pvmw.pte) {
if (lru_gen_enabled() &&
pte_young(ptep_get(pvmw.pte))) {
@@ -1143,25 +1158,25 @@ static __always_inline unsigned int __folio_add_rmap(struct folio *folio,
{
atomic_t *mapped = &folio->_nr_pages_mapped;
const int orig_nr_pages = nr_pages;
- int first, nr = 0;
+ int first = 0, nr = 0;
__folio_rmap_sanity_checks(folio, page, nr_pages, level);
switch (level) {
case RMAP_LEVEL_PTE:
if (!folio_test_large(folio)) {
- nr = atomic_inc_and_test(&page->_mapcount);
+ nr = atomic_inc_and_test(&folio->_mapcount);
break;
}
do {
- first = atomic_inc_and_test(&page->_mapcount);
- if (first) {
- first = atomic_inc_return_relaxed(mapped);
- if (first < ENTIRELY_MAPPED)
- nr++;
- }
+ first += atomic_inc_and_test(&page->_mapcount);
} while (page++, --nr_pages > 0);
+
+ if (first &&
+ atomic_add_return_relaxed(first, mapped) < ENTIRELY_MAPPED)
+ nr = first;
+
atomic_add(orig_nr_pages, &folio->_large_mapcount);
break;
case RMAP_LEVEL_PMD:
@@ -1452,6 +1467,7 @@ void folio_add_new_anon_rmap(struct folio *folio, struct vm_area_struct *vma,
}
__folio_mod_stat(folio, nr, nr_pmdmapped);
+ mod_mthp_stat(folio_order(folio), MTHP_STAT_NR_ANON, 1);
}
static __always_inline void __folio_add_file_rmap(struct folio *folio,
@@ -1512,7 +1528,7 @@ static __always_inline void __folio_remove_rmap(struct folio *folio,
enum rmap_level level)
{
atomic_t *mapped = &folio->_nr_pages_mapped;
- int last, nr = 0, nr_pmdmapped = 0;
+ int last = 0, nr = 0, nr_pmdmapped = 0;
bool partially_mapped = false;
__folio_rmap_sanity_checks(folio, page, nr_pages, level);
@@ -1520,20 +1536,19 @@ static __always_inline void __folio_remove_rmap(struct folio *folio,
switch (level) {
case RMAP_LEVEL_PTE:
if (!folio_test_large(folio)) {
- nr = atomic_add_negative(-1, &page->_mapcount);
+ nr = atomic_add_negative(-1, &folio->_mapcount);
break;
}
atomic_sub(nr_pages, &folio->_large_mapcount);
do {
- last = atomic_add_negative(-1, &page->_mapcount);
- if (last) {
- last = atomic_dec_return_relaxed(mapped);
- if (last < ENTIRELY_MAPPED)
- nr++;
- }
+ last += atomic_add_negative(-1, &page->_mapcount);
} while (page++, --nr_pages > 0);
+ if (last &&
+ atomic_sub_return_relaxed(last, mapped) < ENTIRELY_MAPPED)
+ nr = last;
+
partially_mapped = nr && atomic_read(mapped);
break;
case RMAP_LEVEL_PMD:
@@ -1553,22 +1568,20 @@ static __always_inline void __folio_remove_rmap(struct folio *folio,
}
}
- partially_mapped = nr < nr_pmdmapped;
+ partially_mapped = nr && nr < nr_pmdmapped;
break;
}
- if (nr) {
- /*
- * Queue anon large folio for deferred split if at least one
- * page of the folio is unmapped and at least one page
- * is still mapped.
- *
- * Check partially_mapped first to ensure it is a large folio.
- */
- if (folio_test_anon(folio) && partially_mapped &&
- list_empty(&folio->_deferred_list))
- deferred_split_folio(folio);
- }
+ /*
+ * Queue anon large folio for deferred split if at least one page of
+ * the folio is unmapped and at least one page is still mapped.
+ *
+ * Check partially_mapped first to ensure it is a large folio.
+ */
+ if (partially_mapped && folio_test_anon(folio) &&
+ !folio_test_partially_mapped(folio))
+ deferred_split_folio(folio, true);
+
__folio_mod_stat(folio, -nr, -nr_pmdmapped);
/*
diff --git a/mm/shmem.c b/mm/shmem.c
index b875852df51f..4f11b5506363 100644
--- a/mm/shmem.c
+++ b/mm/shmem.c
@@ -155,7 +155,7 @@ static unsigned long shmem_default_max_inodes(void)
static int shmem_swapin_folio(struct inode *inode, pgoff_t index,
struct folio **foliop, enum sgp_type sgp, gfp_t gfp,
- struct mm_struct *fault_mm, vm_fault_t *fault_type);
+ struct vm_area_struct *vma, vm_fault_t *fault_type);
static inline struct shmem_sb_info *SHMEM_SB(struct super_block *sb)
{
@@ -502,8 +502,8 @@ static int shmem_replace_entry(struct address_space *mapping,
* Sometimes, before we decide whether to proceed or to fail, we must check
* that an entry was not already brought back from swap by a racing thread.
*
- * Checking page is not enough: by the time a SwapCache page is locked, it
- * might be reused, and again be SwapCache, using the same swap as before.
+ * Checking folio is not enough: by the time a swapcache folio is locked, it
+ * might be reused, and again be swapcache, using the same swap as before.
*/
static bool shmem_confirm_swap(struct address_space *mapping,
pgoff_t index, swp_entry_t swap)
@@ -548,10 +548,12 @@ static bool shmem_confirm_swap(struct address_space *mapping,
static int shmem_huge __read_mostly = SHMEM_HUGE_NEVER;
-static bool __shmem_is_huge(struct inode *inode, pgoff_t index,
- bool shmem_huge_force, struct mm_struct *mm,
- unsigned long vm_flags)
+static bool __shmem_huge_global_enabled(struct inode *inode, pgoff_t index,
+ loff_t write_end, bool shmem_huge_force,
+ struct vm_area_struct *vma,
+ unsigned long vm_flags)
{
+ struct mm_struct *mm = vma ? vma->vm_mm : NULL;
loff_t i_size;
if (!S_ISREG(inode->i_mode))
@@ -568,7 +570,8 @@ static bool __shmem_is_huge(struct inode *inode, pgoff_t index,
return true;
case SHMEM_HUGE_WITHIN_SIZE:
index = round_up(index + 1, HPAGE_PMD_NR);
- i_size = round_up(i_size_read(inode), PAGE_SIZE);
+ i_size = max(write_end, i_size_read(inode));
+ i_size = round_up(i_size, PAGE_SIZE);
if (i_size >> PAGE_SHIFT >= index)
return true;
fallthrough;
@@ -581,14 +584,15 @@ static bool __shmem_is_huge(struct inode *inode, pgoff_t index,
}
}
-bool shmem_is_huge(struct inode *inode, pgoff_t index,
- bool shmem_huge_force, struct mm_struct *mm,
- unsigned long vm_flags)
+static bool shmem_huge_global_enabled(struct inode *inode, pgoff_t index,
+ loff_t write_end, bool shmem_huge_force,
+ struct vm_area_struct *vma, unsigned long vm_flags)
{
if (HPAGE_PMD_ORDER > MAX_PAGECACHE_ORDER)
return false;
- return __shmem_is_huge(inode, index, shmem_huge_force, mm, vm_flags);
+ return __shmem_huge_global_enabled(inode, index, write_end,
+ shmem_huge_force, vma, vm_flags);
}
#if defined(CONFIG_SYSFS)
@@ -634,15 +638,14 @@ static const char *shmem_format_huge(int huge)
#endif
static unsigned long shmem_unused_huge_shrink(struct shmem_sb_info *sbinfo,
- struct shrink_control *sc, unsigned long nr_to_split)
+ struct shrink_control *sc, unsigned long nr_to_free)
{
LIST_HEAD(list), *pos, *next;
- LIST_HEAD(to_remove);
struct inode *inode;
struct shmem_inode_info *info;
struct folio *folio;
unsigned long batch = sc ? sc->nr_to_scan : 128;
- int split = 0;
+ unsigned long split = 0, freed = 0;
if (list_empty(&sbinfo->shrinklist))
return SHRINK_STOP;
@@ -660,13 +663,6 @@ static unsigned long shmem_unused_huge_shrink(struct shmem_sb_info *sbinfo,
goto next;
}
- /* Check if there's anything to gain */
- if (round_up(inode->i_size, PAGE_SIZE) ==
- round_up(inode->i_size, HPAGE_PMD_SIZE)) {
- list_move(&info->shrinklist, &to_remove);
- goto next;
- }
-
list_move(&info->shrinklist, &list);
next:
sbinfo->shrinklist_len--;
@@ -675,34 +671,36 @@ next:
}
spin_unlock(&sbinfo->shrinklist_lock);
- list_for_each_safe(pos, next, &to_remove) {
- info = list_entry(pos, struct shmem_inode_info, shrinklist);
- inode = &info->vfs_inode;
- list_del_init(&info->shrinklist);
- iput(inode);
- }
-
list_for_each_safe(pos, next, &list) {
+ pgoff_t next, end;
+ loff_t i_size;
int ret;
- pgoff_t index;
info = list_entry(pos, struct shmem_inode_info, shrinklist);
inode = &info->vfs_inode;
- if (nr_to_split && split >= nr_to_split)
+ if (nr_to_free && freed >= nr_to_free)
goto move_back;
- index = (inode->i_size & HPAGE_PMD_MASK) >> PAGE_SHIFT;
- folio = filemap_get_folio(inode->i_mapping, index);
- if (IS_ERR(folio))
+ i_size = i_size_read(inode);
+ folio = filemap_get_entry(inode->i_mapping, i_size / PAGE_SIZE);
+ if (!folio || xa_is_value(folio))
goto drop;
- /* No huge page at the end of the file: nothing to split */
+ /* No large folio at the end of the file: nothing to split */
if (!folio_test_large(folio)) {
folio_put(folio);
goto drop;
}
+ /* Check if there is anything to gain from splitting */
+ next = folio_next_index(folio);
+ end = shmem_fallocend(inode, DIV_ROUND_UP(i_size, PAGE_SIZE));
+ if (end <= folio->index || end >= next) {
+ folio_put(folio);
+ goto drop;
+ }
+
/*
* Move the inode on the list back to shrinklist if we failed
* to lock the page at this time.
@@ -723,6 +721,7 @@ next:
if (ret)
goto move_back;
+ freed += next - end;
split++;
drop:
list_del_init(&info->shrinklist);
@@ -767,10 +766,17 @@ static long shmem_unused_huge_count(struct super_block *sb,
#define shmem_huge SHMEM_HUGE_DENY
static unsigned long shmem_unused_huge_shrink(struct shmem_sb_info *sbinfo,
- struct shrink_control *sc, unsigned long nr_to_split)
+ struct shrink_control *sc, unsigned long nr_to_free)
{
return 0;
}
+
+static bool shmem_huge_global_enabled(struct inode *inode, pgoff_t index,
+ loff_t write_end, bool shmem_huge_force,
+ struct vm_area_struct *vma, unsigned long vm_flags)
+{
+ return false;
+}
#endif /* CONFIG_TRANSPARENT_HUGEPAGE */
/*
@@ -786,7 +792,6 @@ static int shmem_add_to_page_cache(struct folio *folio,
VM_BUG_ON_FOLIO(index != round_down(index, nr), folio);
VM_BUG_ON_FOLIO(!folio_test_locked(folio), folio);
VM_BUG_ON_FOLIO(!folio_test_swapbacked(folio), folio);
- VM_BUG_ON(expected && folio_test_large(folio));
folio_ref_add(folio, nr);
folio->mapping = mapping;
@@ -842,23 +847,27 @@ static void shmem_delete_from_page_cache(struct folio *folio, void *radswap)
__lruvec_stat_mod_folio(folio, NR_FILE_PAGES, -nr);
__lruvec_stat_mod_folio(folio, NR_SHMEM, -nr);
xa_unlock_irq(&mapping->i_pages);
- folio_put(folio);
+ folio_put_refs(folio, nr);
BUG_ON(error);
}
/*
- * Remove swap entry from page cache, free the swap and its page cache.
+ * Remove swap entry from page cache, free the swap and its page cache. Returns
+ * the number of pages being freed. 0 means entry not found in XArray (0 pages
+ * being freed).
*/
-static int shmem_free_swap(struct address_space *mapping,
- pgoff_t index, void *radswap)
+static long shmem_free_swap(struct address_space *mapping,
+ pgoff_t index, void *radswap)
{
+ int order = xa_get_order(&mapping->i_pages, index);
void *old;
old = xa_cmpxchg_irq(&mapping->i_pages, index, radswap, NULL, 0);
if (old != radswap)
- return -ENOENT;
- free_swap_and_cache(radix_to_swp_entry(radswap));
- return 0;
+ return 0;
+ free_swap_and_cache_nr(radix_to_swp_entry(radswap), 1 << order);
+
+ return 1 << order;
}
/*
@@ -881,7 +890,7 @@ unsigned long shmem_partial_swap_usage(struct address_space *mapping,
if (xas_retry(&xas, page))
continue;
if (xa_is_value(page))
- swapped++;
+ swapped += 1 << xas_get_order(&xas);
if (xas.xa_index == max)
break;
if (need_resched()) {
@@ -971,7 +980,7 @@ static struct folio *shmem_get_partial_folio(struct inode *inode, pgoff_t index)
* (although in some cases this is just a waste of time).
*/
folio = NULL;
- shmem_get_folio(inode, index, &folio, SGP_READ);
+ shmem_get_folio(inode, index, 0, &folio, SGP_READ);
return folio;
}
@@ -1010,7 +1019,7 @@ static void shmem_undo_range(struct inode *inode, loff_t lstart, loff_t lend,
if (xa_is_value(folio)) {
if (unfalloc)
continue;
- nr_swaps_freed += !shmem_free_swap(mapping,
+ nr_swaps_freed += shmem_free_swap(mapping,
indices[i], folio);
continue;
}
@@ -1077,14 +1086,17 @@ whole_folios:
folio = fbatch.folios[i];
if (xa_is_value(folio)) {
+ long swaps_freed;
+
if (unfalloc)
continue;
- if (shmem_free_swap(mapping, indices[i], folio)) {
+ swaps_freed = shmem_free_swap(mapping, indices[i], folio);
+ if (!swaps_freed) {
/* Swap was replaced by page: retry */
index = indices[i];
break;
}
- nr_swaps_freed++;
+ nr_swaps_freed += swaps_freed;
continue;
}
@@ -1156,7 +1168,7 @@ static int shmem_getattr(struct mnt_idmap *idmap,
STATX_ATTR_NODUMP);
generic_fillattr(idmap, request_mask, inode, stat);
- if (shmem_is_huge(inode, 0, false, NULL, 0))
+ if (shmem_huge_global_enabled(inode, 0, 0, false, NULL, 0))
stat->blksize = HPAGE_PMD_SIZE;
if (request_mask & STATX_BTIME) {
@@ -1443,6 +1455,8 @@ static int shmem_writepage(struct page *page, struct writeback_control *wbc)
struct shmem_sb_info *sbinfo = SHMEM_SB(inode->i_sb);
swp_entry_t swap;
pgoff_t index;
+ int nr_pages;
+ bool split = false;
/*
* Our capabilities prevent regular writeback or sync from ever calling
@@ -1461,20 +1475,33 @@ static int shmem_writepage(struct page *page, struct writeback_control *wbc)
goto redirty;
/*
- * If /sys/kernel/mm/transparent_hugepage/shmem_enabled is "always" or
- * "force", drivers/gpu/drm/i915/gem/i915_gem_shmem.c gets huge pages,
- * and its shmem_writeback() needs them to be split when swapping.
+ * If CONFIG_THP_SWAP is not enabled, the large folio should be
+ * split when swapping.
+ *
+ * And shrinkage of pages beyond i_size does not split swap, so
+ * swapout of a large folio crossing i_size needs to split too
+ * (unless fallocate has been used to preallocate beyond EOF).
*/
if (folio_test_large(folio)) {
+ index = shmem_fallocend(inode,
+ DIV_ROUND_UP(i_size_read(inode), PAGE_SIZE));
+ if ((index > folio->index && index < folio_next_index(folio)) ||
+ !IS_ENABLED(CONFIG_THP_SWAP))
+ split = true;
+ }
+
+ if (split) {
+try_split:
/* Ensure the subpages are still dirty */
folio_test_set_dirty(folio);
- if (split_huge_page(page) < 0)
+ if (split_huge_page_to_list_to_order(page, wbc->list, 0))
goto redirty;
folio = page_folio(page);
folio_clear_dirty(folio);
}
index = folio->index;
+ nr_pages = folio_nr_pages(folio);
/*
* This is somewhat ridiculous, but without plumbing a SWAP_MAP_FALLOC
@@ -1509,8 +1536,12 @@ static int shmem_writepage(struct page *page, struct writeback_control *wbc)
}
swap = folio_alloc_swap(folio);
- if (!swap.val)
+ if (!swap.val) {
+ if (nr_pages > 1)
+ goto try_split;
+
goto redirty;
+ }
/*
* Add inode to shmem_unuse()'s list of swapped-out inodes,
@@ -1527,8 +1558,8 @@ static int shmem_writepage(struct page *page, struct writeback_control *wbc)
if (add_to_swap_cache(folio, swap,
__GFP_HIGH | __GFP_NOMEMALLOC | __GFP_NOWARN,
NULL) == 0) {
- shmem_recalc_inode(inode, 0, 1);
- swap_shmem_alloc(swap);
+ shmem_recalc_inode(inode, 0, nr_pages);
+ swap_shmem_alloc(swap, nr_pages);
shmem_delete_from_page_cache(folio, swp_to_radix_entry(swap));
mutex_unlock(&shmem_swaplist_mutex);
@@ -1624,22 +1655,33 @@ static gfp_t limit_gfp_mask(gfp_t huge_gfp, gfp_t limit_gfp)
#ifdef CONFIG_TRANSPARENT_HUGEPAGE
unsigned long shmem_allowable_huge_orders(struct inode *inode,
struct vm_area_struct *vma, pgoff_t index,
- bool global_huge)
+ loff_t write_end, bool shmem_huge_force)
{
unsigned long mask = READ_ONCE(huge_shmem_orders_always);
unsigned long within_size_orders = READ_ONCE(huge_shmem_orders_within_size);
- unsigned long vm_flags = vma->vm_flags;
+ unsigned long vm_flags = vma ? vma->vm_flags : 0;
+ bool global_huge;
loff_t i_size;
int order;
- if ((vm_flags & VM_NOHUGEPAGE) ||
- test_bit(MMF_DISABLE_THP, &vma->vm_mm->flags))
+ if (vma && ((vm_flags & VM_NOHUGEPAGE) ||
+ test_bit(MMF_DISABLE_THP, &vma->vm_mm->flags)))
return 0;
/* If the hardware/firmware marked hugepage support disabled. */
if (transparent_hugepage_flags & (1 << TRANSPARENT_HUGEPAGE_UNSUPPORTED))
return 0;
+ global_huge = shmem_huge_global_enabled(inode, index, write_end,
+ shmem_huge_force, vma, vm_flags);
+ if (!vma || !vma_is_anon_shmem(vma)) {
+ /*
+ * For tmpfs, we now only support PMD sized THP if huge page
+ * is enabled, otherwise fallback to order 0.
+ */
+ return global_huge ? BIT(HPAGE_PMD_ORDER) : 0;
+ }
+
/*
* Following the 'deny' semantics of the top level, force the huge
* option off from all mounts.
@@ -1680,20 +1722,30 @@ static unsigned long shmem_suitable_orders(struct inode *inode, struct vm_fault
struct address_space *mapping, pgoff_t index,
unsigned long orders)
{
- struct vm_area_struct *vma = vmf->vma;
+ struct vm_area_struct *vma = vmf ? vmf->vma : NULL;
pgoff_t aligned_index;
unsigned long pages;
int order;
- orders = thp_vma_suitable_orders(vma, vmf->address, orders);
- if (!orders)
- return 0;
+ if (vma) {
+ orders = thp_vma_suitable_orders(vma, vmf->address, orders);
+ if (!orders)
+ return 0;
+ }
/* Find the highest order that can add into the page cache */
order = highest_order(orders);
while (orders) {
pages = 1UL << order;
aligned_index = round_down(index, pages);
+ /*
+ * Check for conflict before waiting on a huge allocation.
+ * Conflict might be that a huge page has just been allocated
+ * and added to page cache by a racing thread, or that there
+ * is already at least one small page in the huge extent.
+ * Be careful to retry when appropriate, but not forever!
+ * Elsewhere -EEXIST would be the right code, but not here.
+ */
if (!xa_find(&mapping->i_pages, &aligned_index,
aligned_index + pages - 1, XA_PRESENT))
break;
@@ -1731,7 +1783,6 @@ static struct folio *shmem_alloc_and_add_folio(struct vm_fault *vmf,
{
struct address_space *mapping = inode->i_mapping;
struct shmem_inode_info *info = SHMEM_I(inode);
- struct vm_area_struct *vma = vmf ? vmf->vma : NULL;
unsigned long suitable_orders = 0;
struct folio *folio = NULL;
long pages;
@@ -1741,26 +1792,8 @@ static struct folio *shmem_alloc_and_add_folio(struct vm_fault *vmf,
orders = 0;
if (orders > 0) {
- if (vma && vma_is_anon_shmem(vma)) {
- suitable_orders = shmem_suitable_orders(inode, vmf,
+ suitable_orders = shmem_suitable_orders(inode, vmf,
mapping, index, orders);
- } else if (orders & BIT(HPAGE_PMD_ORDER)) {
- pages = HPAGE_PMD_NR;
- suitable_orders = BIT(HPAGE_PMD_ORDER);
- index = round_down(index, HPAGE_PMD_NR);
-
- /*
- * Check for conflict before waiting on a huge allocation.
- * Conflict might be that a huge page has just been allocated
- * and added to page cache by a racing thread, or that there
- * is already at least one small page in the huge extent.
- * Be careful to retry when appropriate, but not forever!
- * Elsewhere -EEXIST would be the right code, but not here.
- */
- if (xa_find(&mapping->i_pages, &index,
- index + HPAGE_PMD_NR - 1, XA_PRESENT))
- return ERR_PTR(-E2BIG);
- }
order = highest_order(suitable_orders);
while (suitable_orders) {
@@ -1772,9 +1805,7 @@ static struct folio *shmem_alloc_and_add_folio(struct vm_fault *vmf,
if (pages == HPAGE_PMD_NR)
count_vm_event(THP_FILE_FALLBACK);
-#ifdef CONFIG_TRANSPARENT_HUGEPAGE
count_mthp_stat(order, MTHP_STAT_SHMEM_FALLBACK);
-#endif
order = next_order(&suitable_orders, order);
}
} else {
@@ -1799,10 +1830,8 @@ allocated:
count_vm_event(THP_FILE_FALLBACK);
count_vm_event(THP_FILE_FALLBACK_CHARGE);
}
-#ifdef CONFIG_TRANSPARENT_HUGEPAGE
count_mthp_stat(folio_order(folio), MTHP_STAT_SHMEM_FALLBACK);
count_mthp_stat(folio_order(folio), MTHP_STAT_SHMEM_FALLBACK_CHARGE);
-#endif
}
goto unlock;
}
@@ -1819,7 +1848,7 @@ allocated:
* Try to reclaim some space by splitting a few
* large folios beyond i_size on the filesystem.
*/
- shmem_unused_huge_shrink(sbinfo, NULL, 2);
+ shmem_unused_huge_shrink(sbinfo, NULL, pages);
/*
* And do a shmem_recalc_inode() to account for freed pages:
* except our folio is there in cache, so not quite balanced.
@@ -1867,30 +1896,35 @@ static bool shmem_should_replace_folio(struct folio *folio, gfp_t gfp)
}
static int shmem_replace_folio(struct folio **foliop, gfp_t gfp,
- struct shmem_inode_info *info, pgoff_t index)
+ struct shmem_inode_info *info, pgoff_t index,
+ struct vm_area_struct *vma)
{
- struct folio *old, *new;
- struct address_space *swap_mapping;
- swp_entry_t entry;
- pgoff_t swap_index;
- int error;
-
- old = *foliop;
- entry = old->swap;
- swap_index = swap_cache_index(entry);
- swap_mapping = swap_address_space(entry);
+ struct folio *new, *old = *foliop;
+ swp_entry_t entry = old->swap;
+ struct address_space *swap_mapping = swap_address_space(entry);
+ pgoff_t swap_index = swap_cache_index(entry);
+ XA_STATE(xas, &swap_mapping->i_pages, swap_index);
+ int nr_pages = folio_nr_pages(old);
+ int error = 0, i;
/*
* We have arrived here because our zones are constrained, so don't
* limit chance of success by further cpuset and node constraints.
*/
gfp &= ~GFP_CONSTRAINT_MASK;
- VM_BUG_ON_FOLIO(folio_test_large(old), old);
- new = shmem_alloc_folio(gfp, 0, info, index);
+#ifdef CONFIG_TRANSPARENT_HUGEPAGE
+ if (nr_pages > 1) {
+ gfp_t huge_gfp = vma_thp_gfp_mask(vma);
+
+ gfp = limit_gfp_mask(huge_gfp, gfp);
+ }
+#endif
+
+ new = shmem_alloc_folio(gfp, folio_order(old), info, index);
if (!new)
return -ENOMEM;
- folio_get(new);
+ folio_ref_add(new, nr_pages);
folio_copy(new, old);
flush_dcache_folio(new);
@@ -1900,26 +1934,34 @@ static int shmem_replace_folio(struct folio **foliop, gfp_t gfp,
new->swap = entry;
folio_set_swapcache(new);
- /*
- * Our caller will very soon move newpage out of swapcache, but it's
- * a nice clean interface for us to replace oldpage by newpage there.
- */
+ /* Swap cache still stores N entries instead of a high-order entry */
xa_lock_irq(&swap_mapping->i_pages);
- error = shmem_replace_entry(swap_mapping, swap_index, old, new);
+ for (i = 0; i < nr_pages; i++) {
+ void *item = xas_load(&xas);
+
+ if (item != old) {
+ error = -ENOENT;
+ break;
+ }
+
+ xas_store(&xas, new);
+ xas_next(&xas);
+ }
if (!error) {
mem_cgroup_replace_folio(old, new);
- __lruvec_stat_mod_folio(new, NR_FILE_PAGES, 1);
- __lruvec_stat_mod_folio(new, NR_SHMEM, 1);
- __lruvec_stat_mod_folio(old, NR_FILE_PAGES, -1);
- __lruvec_stat_mod_folio(old, NR_SHMEM, -1);
+ __lruvec_stat_mod_folio(new, NR_FILE_PAGES, nr_pages);
+ __lruvec_stat_mod_folio(new, NR_SHMEM, nr_pages);
+ __lruvec_stat_mod_folio(old, NR_FILE_PAGES, -nr_pages);
+ __lruvec_stat_mod_folio(old, NR_SHMEM, -nr_pages);
}
xa_unlock_irq(&swap_mapping->i_pages);
if (unlikely(error)) {
/*
- * Is this possible? I think not, now that our callers check
- * both PageSwapCache and page_private after getting page lock;
- * but be defensive. Reverse old to newpage for clear and free.
+ * Is this possible? I think not, now that our callers
+ * check both the swapcache flag and folio->private
+ * after getting the folio lock; but be defensive.
+ * Reverse old to newpage for clear and free.
*/
old = new;
} else {
@@ -1931,7 +1973,12 @@ static int shmem_replace_folio(struct folio **foliop, gfp_t gfp,
old->private = NULL;
folio_unlock(old);
- folio_put_refs(old, 2);
+ /*
+ * The old folio are removed from swap cache, drop the 'nr_pages'
+ * reference, as well as one temporary reference getting from swap
+ * cache.
+ */
+ folio_put_refs(old, nr_pages + 1);
return error;
}
@@ -1941,6 +1988,7 @@ static void shmem_set_folio_swapin_error(struct inode *inode, pgoff_t index,
struct address_space *mapping = inode->i_mapping;
swp_entry_t swapin_error;
void *old;
+ int nr_pages;
swapin_error = make_poisoned_swp_entry();
old = xa_cmpxchg_irq(&mapping->i_pages, index,
@@ -1949,6 +1997,7 @@ static void shmem_set_folio_swapin_error(struct inode *inode, pgoff_t index,
if (old != swp_to_radix_entry(swap))
return;
+ nr_pages = folio_nr_pages(folio);
folio_wait_writeback(folio);
delete_from_swap_cache(folio);
/*
@@ -1956,8 +2005,86 @@ static void shmem_set_folio_swapin_error(struct inode *inode, pgoff_t index,
* won't be 0 when inode is released and thus trigger WARN_ON(i_blocks)
* in shmem_evict_inode().
*/
- shmem_recalc_inode(inode, -1, -1);
- swap_free(swap);
+ shmem_recalc_inode(inode, -nr_pages, -nr_pages);
+ swap_free_nr(swap, nr_pages);
+}
+
+static int shmem_split_large_entry(struct inode *inode, pgoff_t index,
+ swp_entry_t swap, gfp_t gfp)
+{
+ struct address_space *mapping = inode->i_mapping;
+ XA_STATE_ORDER(xas, &mapping->i_pages, index, 0);
+ void *alloced_shadow = NULL;
+ int alloced_order = 0, i;
+
+ /* Convert user data gfp flags to xarray node gfp flags */
+ gfp &= GFP_RECLAIM_MASK;
+
+ for (;;) {
+ int order = -1, split_order = 0;
+ void *old = NULL;
+
+ xas_lock_irq(&xas);
+ old = xas_load(&xas);
+ if (!xa_is_value(old) || swp_to_radix_entry(swap) != old) {
+ xas_set_err(&xas, -EEXIST);
+ goto unlock;
+ }
+
+ order = xas_get_order(&xas);
+
+ /* Swap entry may have changed before we re-acquire the lock */
+ if (alloced_order &&
+ (old != alloced_shadow || order != alloced_order)) {
+ xas_destroy(&xas);
+ alloced_order = 0;
+ }
+
+ /* Try to split large swap entry in pagecache */
+ if (order > 0) {
+ if (!alloced_order) {
+ split_order = order;
+ goto unlock;
+ }
+ xas_split(&xas, old, order);
+
+ /*
+ * Re-set the swap entry after splitting, and the swap
+ * offset of the original large entry must be continuous.
+ */
+ for (i = 0; i < 1 << order; i++) {
+ pgoff_t aligned_index = round_down(index, 1 << order);
+ swp_entry_t tmp;
+
+ tmp = swp_entry(swp_type(swap), swp_offset(swap) + i);
+ __xa_store(&mapping->i_pages, aligned_index + i,
+ swp_to_radix_entry(tmp), 0);
+ }
+ }
+
+unlock:
+ xas_unlock_irq(&xas);
+
+ /* split needed, alloc here and retry. */
+ if (split_order) {
+ xas_split_alloc(&xas, old, split_order, gfp);
+ if (xas_error(&xas))
+ goto error;
+ alloced_shadow = old;
+ alloced_order = split_order;
+ xas_reset(&xas);
+ continue;
+ }
+
+ if (!xas_nomem(&xas, gfp))
+ break;
+ }
+
+error:
+ if (xas_error(&xas))
+ return xas_error(&xas);
+
+ return alloced_order;
}
/*
@@ -1968,15 +2095,16 @@ static void shmem_set_folio_swapin_error(struct inode *inode, pgoff_t index,
*/
static int shmem_swapin_folio(struct inode *inode, pgoff_t index,
struct folio **foliop, enum sgp_type sgp,
- gfp_t gfp, struct mm_struct *fault_mm,
+ gfp_t gfp, struct vm_area_struct *vma,
vm_fault_t *fault_type)
{
struct address_space *mapping = inode->i_mapping;
+ struct mm_struct *fault_mm = vma ? vma->vm_mm : NULL;
struct shmem_inode_info *info = SHMEM_I(inode);
struct swap_info_struct *si;
struct folio *folio = NULL;
swp_entry_t swap;
- int error;
+ int error, nr_pages;
VM_BUG_ON(!*foliop || !xa_is_value(*foliop));
swap = radix_to_swp_entry(*foliop);
@@ -1996,12 +2124,37 @@ static int shmem_swapin_folio(struct inode *inode, pgoff_t index,
/* Look it up and read it in.. */
folio = swap_cache_get_folio(swap, NULL, 0);
if (!folio) {
+ int split_order;
+
/* Or update major stats only when swapin succeeds?? */
if (fault_type) {
*fault_type |= VM_FAULT_MAJOR;
count_vm_event(PGMAJFAULT);
count_memcg_event_mm(fault_mm, PGMAJFAULT);
}
+
+ /*
+ * Now swap device can only swap in order 0 folio, then we
+ * should split the large swap entry stored in the pagecache
+ * if necessary.
+ */
+ split_order = shmem_split_large_entry(inode, index, swap, gfp);
+ if (split_order < 0) {
+ error = split_order;
+ goto failed;
+ }
+
+ /*
+ * If the large swap entry has already been split, it is
+ * necessary to recalculate the new swap entry based on
+ * the old order alignment.
+ */
+ if (split_order > 0) {
+ pgoff_t offset = index - round_down(index, 1 << split_order);
+
+ swap = swp_entry(swp_type(swap), swp_offset(swap) + offset);
+ }
+
/* Here we actually start the io */
folio = shmem_swapin_cluster(swap, gfp, info, index);
if (!folio) {
@@ -2023,6 +2176,7 @@ static int shmem_swapin_folio(struct inode *inode, pgoff_t index,
goto failed;
}
folio_wait_writeback(folio);
+ nr_pages = folio_nr_pages(folio);
/*
* Some architectures may have to restore extra metadata to the
@@ -2031,24 +2185,25 @@ static int shmem_swapin_folio(struct inode *inode, pgoff_t index,
arch_swap_restore(folio_swap(swap, folio), folio);
if (shmem_should_replace_folio(folio, gfp)) {
- error = shmem_replace_folio(&folio, gfp, info, index);
+ error = shmem_replace_folio(&folio, gfp, info, index, vma);
if (error)
goto failed;
}
- error = shmem_add_to_page_cache(folio, mapping, index,
+ error = shmem_add_to_page_cache(folio, mapping,
+ round_down(index, nr_pages),
swp_to_radix_entry(swap), gfp);
if (error)
goto failed;
- shmem_recalc_inode(inode, 0, -1);
+ shmem_recalc_inode(inode, 0, -nr_pages);
if (sgp == SGP_WRITE)
folio_mark_accessed(folio);
delete_from_swap_cache(folio);
folio_mark_dirty(folio);
- swap_free(swap);
+ swap_free_nr(swap, nr_pages);
put_swap_device(si);
*foliop = folio;
@@ -2078,14 +2233,14 @@ unlock:
* vmf and fault_type are only supplied by shmem_fault: otherwise they are NULL.
*/
static int shmem_get_folio_gfp(struct inode *inode, pgoff_t index,
- struct folio **foliop, enum sgp_type sgp, gfp_t gfp,
- struct vm_fault *vmf, vm_fault_t *fault_type)
+ loff_t write_end, struct folio **foliop, enum sgp_type sgp,
+ gfp_t gfp, struct vm_fault *vmf, vm_fault_t *fault_type)
{
struct vm_area_struct *vma = vmf ? vmf->vma : NULL;
struct mm_struct *fault_mm;
struct folio *folio;
int error;
- bool alloced, huge;
+ bool alloced;
unsigned long orders = 0;
if (WARN_ON_ONCE(!shmem_mapping(inode->i_mapping)))
@@ -2111,7 +2266,7 @@ repeat:
if (xa_is_value(folio)) {
error = shmem_swapin_folio(inode, index, &folio,
- sgp, gfp, fault_mm, fault_type);
+ sgp, gfp, vma, fault_type);
if (error == -EEXIST)
goto repeat;
@@ -2158,14 +2313,8 @@ repeat:
return 0;
}
- huge = shmem_is_huge(inode, index, false, fault_mm,
- vma ? vma->vm_flags : 0);
- /* Find hugepage orders that are allowed for anonymous shmem. */
- if (vma && vma_is_anon_shmem(vma))
- orders = shmem_allowable_huge_orders(inode, vma, index, huge);
- else if (huge)
- orders = BIT(HPAGE_PMD_ORDER);
-
+ /* Find hugepage orders that are allowed for anonymous shmem and tmpfs. */
+ orders = shmem_allowable_huge_orders(inode, vma, index, write_end, false);
if (orders > 0) {
gfp_t huge_gfp;
@@ -2176,9 +2325,7 @@ repeat:
if (!IS_ERR(folio)) {
if (folio_test_pmd_mappable(folio))
count_vm_event(THP_FILE_ALLOC);
-#ifdef CONFIG_TRANSPARENT_HUGEPAGE
count_mthp_stat(folio_order(folio), MTHP_STAT_SHMEM_ALLOC);
-#endif
goto alloced;
}
if (PTR_ERR(folio) == -EEXIST)
@@ -2198,7 +2345,7 @@ alloced:
alloced = true;
if (folio_test_large(folio) &&
DIV_ROUND_UP(i_size_read(inode), PAGE_SIZE) <
- folio_next_index(folio) - 1) {
+ folio_next_index(folio)) {
struct shmem_sb_info *sbinfo = SHMEM_SB(inode->i_sb);
struct shmem_inode_info *info = SHMEM_I(inode);
/*
@@ -2268,6 +2415,7 @@ unlock:
* shmem_get_folio - find, and lock a shmem folio.
* @inode: inode to search
* @index: the page index.
+ * @write_end: end of a write, could extend inode size
* @foliop: pointer to the folio if found
* @sgp: SGP_* flags to control behavior
*
@@ -2287,10 +2435,10 @@ unlock:
* Context: May sleep.
* Return: 0 if successful, else a negative error code.
*/
-int shmem_get_folio(struct inode *inode, pgoff_t index, struct folio **foliop,
- enum sgp_type sgp)
+int shmem_get_folio(struct inode *inode, pgoff_t index, loff_t write_end,
+ struct folio **foliop, enum sgp_type sgp)
{
- return shmem_get_folio_gfp(inode, index, foliop, sgp,
+ return shmem_get_folio_gfp(inode, index, write_end, foliop, sgp,
mapping_gfp_mask(inode->i_mapping), NULL, NULL);
}
EXPORT_SYMBOL_GPL(shmem_get_folio);
@@ -2385,7 +2533,7 @@ static vm_fault_t shmem_fault(struct vm_fault *vmf)
}
WARN_ON_ONCE(vmf->page != NULL);
- err = shmem_get_folio_gfp(inode, vmf->pgoff, &folio, SGP_CACHE,
+ err = shmem_get_folio_gfp(inode, vmf->pgoff, 0, &folio, SGP_CACHE,
gfp, vmf, &ret);
if (err)
return vmf_error(err);
@@ -2895,7 +3043,7 @@ shmem_write_begin(struct file *file, struct address_space *mapping,
return -EPERM;
}
- ret = shmem_get_folio(inode, index, &folio, SGP_WRITE);
+ ret = shmem_get_folio(inode, index, pos + len, &folio, SGP_WRITE);
if (ret)
return ret;
@@ -2965,7 +3113,7 @@ static ssize_t shmem_file_read_iter(struct kiocb *iocb, struct iov_iter *to)
break;
}
- error = shmem_get_folio(inode, index, &folio, SGP_READ);
+ error = shmem_get_folio(inode, index, 0, &folio, SGP_READ);
if (error) {
if (error == -EINVAL)
error = 0;
@@ -3141,7 +3289,7 @@ static ssize_t shmem_file_splice_read(struct file *in, loff_t *ppos,
if (*ppos >= i_size_read(inode))
break;
- error = shmem_get_folio(inode, *ppos / PAGE_SIZE, &folio,
+ error = shmem_get_folio(inode, *ppos / PAGE_SIZE, 0, &folio,
SGP_READ);
if (error) {
if (error == -EINVAL)
@@ -3331,8 +3479,8 @@ static long shmem_fallocate(struct file *file, int mode, loff_t offset,
else if (shmem_falloc.nr_unswapped > shmem_falloc.nr_falloced)
error = -ENOMEM;
else
- error = shmem_get_folio(inode, index, &folio,
- SGP_FALLOC);
+ error = shmem_get_folio(inode, index, offset + len,
+ &folio, SGP_FALLOC);
if (error) {
info->fallocend = undo_fallocend;
/* Remove the !uptodate folios we added */
@@ -3683,7 +3831,7 @@ static int shmem_symlink(struct mnt_idmap *idmap, struct inode *dir,
} else {
inode_nohighmem(inode);
inode->i_mapping->a_ops = &shmem_aops;
- error = shmem_get_folio(inode, 0, &folio, SGP_WRITE);
+ error = shmem_get_folio(inode, 0, 0, &folio, SGP_WRITE);
if (error)
goto out_remove_offset;
inode->i_op = &shmem_symlink_inode_operations;
@@ -3729,7 +3877,7 @@ static const char *shmem_get_link(struct dentry *dentry, struct inode *inode,
return ERR_PTR(-ECHILD);
}
} else {
- error = shmem_get_folio(inode, 0, &folio, SGP_READ);
+ error = shmem_get_folio(inode, 0, 0, &folio, SGP_READ);
if (error)
return ERR_PTR(error);
if (!folio)
@@ -4813,11 +4961,7 @@ void __init shmem_init(void)
shmem_init_inodecache();
#ifdef CONFIG_TMPFS_QUOTA
- error = register_quota_format(&shmem_quota_format);
- if (error < 0) {
- pr_err("Could not register quota format\n");
- goto out3;
- }
+ register_quota_format(&shmem_quota_format);
#endif
error = register_filesystem(&shmem_fs_type);
@@ -4852,7 +4996,6 @@ out1:
out2:
#ifdef CONFIG_TMPFS_QUOTA
unregister_quota_format(&shmem_quota_format);
-out3:
#endif
shmem_destroy_inodecache();
shm_mnt = ERR_PTR(error);
@@ -5197,7 +5340,7 @@ struct folio *shmem_read_folio_gfp(struct address_space *mapping,
struct folio *folio;
int error;
- error = shmem_get_folio_gfp(inode, index, &folio, SGP_CACHE,
+ error = shmem_get_folio_gfp(inode, index, 0, &folio, SGP_CACHE,
gfp, NULL, NULL);
if (error)
return ERR_PTR(error);
diff --git a/mm/shmem_quota.c b/mm/shmem_quota.c
index ce514e700d2f..d1e32ac01407 100644
--- a/mm/shmem_quota.c
+++ b/mm/shmem_quota.c
@@ -34,8 +34,6 @@
#include <linux/quotaops.h>
#include <linux/quota.h>
-#ifdef CONFIG_TMPFS_QUOTA
-
/*
* The following constants define the amount of time given a user
* before the soft limits are treated as hard limits (usually resulting
@@ -351,4 +349,3 @@ const struct dquot_operations shmem_quota_operations = {
.mark_dirty = shmem_mark_dquot_dirty,
.get_next_id = shmem_get_next_id,
};
-#endif /* CONFIG_TMPFS_QUOTA */
diff --git a/mm/show_mem.c b/mm/show_mem.c
index bdb439551eef..ec885a398fa0 100644
--- a/mm/show_mem.c
+++ b/mm/show_mem.c
@@ -435,15 +435,18 @@ void __show_mem(unsigned int filter, nodemask_t *nodemask, int max_zone_idx)
struct codetag *ct = tags[i].ct;
struct alloc_tag *tag = ct_to_alloc_tag(ct);
struct alloc_tag_counters counter = alloc_tag_read(tag);
+ char bytes[10];
+
+ string_get_size(counter.bytes, 1, STRING_UNITS_2, bytes, sizeof(bytes));
/* Same as alloc_tag_to_text() but w/o intermediate buffer */
if (ct->modname)
- pr_notice("%12lli %8llu %s:%u [%s] func:%s\n",
- counter.bytes, counter.calls, ct->filename,
+ pr_notice("%12s %8llu %s:%u [%s] func:%s\n",
+ bytes, counter.calls, ct->filename,
ct->lineno, ct->modname, ct->function);
else
- pr_notice("%12lli %8llu %s:%u func:%s\n",
- counter.bytes, counter.calls, ct->filename,
+ pr_notice("%12s %8llu %s:%u func:%s\n",
+ bytes, counter.calls, ct->filename,
ct->lineno, ct->function);
}
}
diff --git a/mm/shrinker_debug.c b/mm/shrinker_debug.c
index 12ea5486a3e9..4a85b94d12ce 100644
--- a/mm/shrinker_debug.c
+++ b/mm/shrinker_debug.c
@@ -114,7 +114,7 @@ static ssize_t shrinker_debugfs_scan_write(struct file *file,
int nid;
char kbuf[72];
- read_len = size < (sizeof(kbuf) - 1) ? size : (sizeof(kbuf) - 1);
+ read_len = min(size, sizeof(kbuf) - 1);
if (copy_from_user(kbuf, buf, read_len))
return -EFAULT;
kbuf[read_len] = '\0';
diff --git a/mm/slab_common.c b/mm/slab_common.c
index 61f32420230a..744324465615 100644
--- a/mm/slab_common.c
+++ b/mm/slab_common.c
@@ -1205,6 +1205,13 @@ __do_krealloc(const void *p, size_t new_size, gfp_t flags)
/* If the object still fits, repoison it precisely. */
if (ks >= new_size) {
+ /* Zero out spare memory. */
+ if (want_init_on_alloc(flags)) {
+ kasan_disable_current();
+ memset((void *)p + new_size, 0, ks - new_size);
+ kasan_enable_current();
+ }
+
p = kasan_krealloc((void *)p, new_size, flags);
return (void *)p;
}
@@ -1226,11 +1233,27 @@ __do_krealloc(const void *p, size_t new_size, gfp_t flags)
* @new_size: how many bytes of memory are required.
* @flags: the type of memory to allocate.
*
- * The contents of the object pointed to are preserved up to the
- * lesser of the new and old sizes (__GFP_ZERO flag is effectively ignored).
* If @p is %NULL, krealloc() behaves exactly like kmalloc(). If @new_size
* is 0 and @p is not a %NULL pointer, the object pointed to is freed.
*
+ * If __GFP_ZERO logic is requested, callers must ensure that, starting with the
+ * initial memory allocation, every subsequent call to this API for the same
+ * memory allocation is flagged with __GFP_ZERO. Otherwise, it is possible that
+ * __GFP_ZERO is not fully honored by this API.
+ *
+ * This is the case, since krealloc() only knows about the bucket size of an
+ * allocation (but not the exact size it was allocated with) and hence
+ * implements the following semantics for shrinking and growing buffers with
+ * __GFP_ZERO.
+ *
+ * new bucket
+ * 0 size size
+ * |--------|----------------|
+ * | keep | zero |
+ *
+ * In any case, the contents of the object pointed to are preserved up to the
+ * lesser of the new and old sizes.
+ *
* Return: pointer to the allocated memory or %NULL in case of error
*/
void *krealloc_noprof(const void *p, size_t new_size, gfp_t flags)
diff --git a/mm/swap.c b/mm/swap.c
index 9caf6b017cf0..835bdf324b76 100644
--- a/mm/swap.c
+++ b/mm/swap.c
@@ -47,31 +47,27 @@
int page_cluster;
const int page_cluster_max = 31;
-/* Protecting only lru_rotate.fbatch which requires disabling interrupts */
-struct lru_rotate {
- local_lock_t lock;
- struct folio_batch fbatch;
-};
-static DEFINE_PER_CPU(struct lru_rotate, lru_rotate) = {
- .lock = INIT_LOCAL_LOCK(lock),
-};
-
-/*
- * The following folio batches are grouped together because they are protected
- * by disabling preemption (and interrupts remain enabled).
- */
struct cpu_fbatches {
+ /*
+ * The following folio batches are grouped together because they are protected
+ * by disabling preemption (and interrupts remain enabled).
+ */
local_lock_t lock;
struct folio_batch lru_add;
struct folio_batch lru_deactivate_file;
struct folio_batch lru_deactivate;
struct folio_batch lru_lazyfree;
#ifdef CONFIG_SMP
- struct folio_batch activate;
+ struct folio_batch lru_activate;
#endif
+ /* Protecting the following batches which require disabling interrupts */
+ local_lock_t lock_irq;
+ struct folio_batch lru_move_tail;
};
+
static DEFINE_PER_CPU(struct cpu_fbatches, cpu_fbatches) = {
.lock = INIT_LOCAL_LOCK(lock),
+ .lock_irq = INIT_LOCAL_LOCK(lock_irq),
};
static void __page_cache_release(struct folio *folio, struct lruvec **lruvecp,
@@ -117,7 +113,9 @@ void __folio_put(struct folio *folio)
if (unlikely(folio_is_zone_device(folio))) {
free_zone_device_folio(folio);
return;
- } else if (folio_test_hugetlb(folio)) {
+ }
+
+ if (folio_test_hugetlb(folio)) {
free_huge_folio(folio);
return;
}
@@ -162,7 +160,7 @@ EXPORT_SYMBOL(put_pages_list);
typedef void (*move_fn_t)(struct lruvec *lruvec, struct folio *folio);
-static void lru_add_fn(struct lruvec *lruvec, struct folio *folio)
+static void lru_add(struct lruvec *lruvec, struct folio *folio)
{
int was_unevictable = folio_test_clear_unevictable(folio);
long nr_pages = folio_nr_pages(folio);
@@ -222,23 +220,50 @@ static void folio_batch_move_lru(struct folio_batch *fbatch, move_fn_t move_fn)
folios_put(fbatch);
}
-static void folio_batch_add_and_move(struct folio_batch *fbatch,
- struct folio *folio, move_fn_t move_fn)
+static void __folio_batch_add_and_move(struct folio_batch __percpu *fbatch,
+ struct folio *folio, move_fn_t move_fn,
+ bool on_lru, bool disable_irq)
{
- if (folio_batch_add(fbatch, folio) && !folio_test_large(folio) &&
- !lru_cache_disabled())
+ unsigned long flags;
+
+ if (on_lru && !folio_test_clear_lru(folio))
return;
- folio_batch_move_lru(fbatch, move_fn);
+
+ folio_get(folio);
+
+ if (disable_irq)
+ local_lock_irqsave(&cpu_fbatches.lock_irq, flags);
+ else
+ local_lock(&cpu_fbatches.lock);
+
+ if (!folio_batch_add(this_cpu_ptr(fbatch), folio) || folio_test_large(folio) ||
+ lru_cache_disabled())
+ folio_batch_move_lru(this_cpu_ptr(fbatch), move_fn);
+
+ if (disable_irq)
+ local_unlock_irqrestore(&cpu_fbatches.lock_irq, flags);
+ else
+ local_unlock(&cpu_fbatches.lock);
}
-static void lru_move_tail_fn(struct lruvec *lruvec, struct folio *folio)
+#define folio_batch_add_and_move(folio, op, on_lru) \
+ __folio_batch_add_and_move( \
+ &cpu_fbatches.op, \
+ folio, \
+ op, \
+ on_lru, \
+ offsetof(struct cpu_fbatches, op) >= offsetof(struct cpu_fbatches, lock_irq) \
+ )
+
+static void lru_move_tail(struct lruvec *lruvec, struct folio *folio)
{
- if (!folio_test_unevictable(folio)) {
- lruvec_del_folio(lruvec, folio);
- folio_clear_active(folio);
- lruvec_add_folio_tail(lruvec, folio);
- __count_vm_events(PGROTATED, folio_nr_pages(folio));
- }
+ if (folio_test_unevictable(folio))
+ return;
+
+ lruvec_del_folio(lruvec, folio);
+ folio_clear_active(folio);
+ lruvec_add_folio_tail(lruvec, folio);
+ __count_vm_events(PGROTATED, folio_nr_pages(folio));
}
/*
@@ -250,22 +275,11 @@ static void lru_move_tail_fn(struct lruvec *lruvec, struct folio *folio)
*/
void folio_rotate_reclaimable(struct folio *folio)
{
- if (!folio_test_locked(folio) && !folio_test_dirty(folio) &&
- !folio_test_unevictable(folio)) {
- struct folio_batch *fbatch;
- unsigned long flags;
-
- folio_get(folio);
- if (!folio_test_clear_lru(folio)) {
- folio_put(folio);
- return;
- }
+ if (folio_test_locked(folio) || folio_test_dirty(folio) ||
+ folio_test_unevictable(folio))
+ return;
- local_lock_irqsave(&lru_rotate.lock, flags);
- fbatch = this_cpu_ptr(&lru_rotate.fbatch);
- folio_batch_add_and_move(fbatch, folio, lru_move_tail_fn);
- local_unlock_irqrestore(&lru_rotate.lock, flags);
- }
+ folio_batch_add_and_move(folio, lru_move_tail, true);
}
void lru_note_cost(struct lruvec *lruvec, bool file,
@@ -326,47 +340,38 @@ void lru_note_cost_refault(struct folio *folio)
folio_nr_pages(folio), 0);
}
-static void folio_activate_fn(struct lruvec *lruvec, struct folio *folio)
+static void lru_activate(struct lruvec *lruvec, struct folio *folio)
{
- if (!folio_test_active(folio) && !folio_test_unevictable(folio)) {
- long nr_pages = folio_nr_pages(folio);
+ long nr_pages = folio_nr_pages(folio);
- lruvec_del_folio(lruvec, folio);
- folio_set_active(folio);
- lruvec_add_folio(lruvec, folio);
- trace_mm_lru_activate(folio);
+ if (folio_test_active(folio) || folio_test_unevictable(folio))
+ return;
- __count_vm_events(PGACTIVATE, nr_pages);
- __count_memcg_events(lruvec_memcg(lruvec), PGACTIVATE,
- nr_pages);
- }
+
+ lruvec_del_folio(lruvec, folio);
+ folio_set_active(folio);
+ lruvec_add_folio(lruvec, folio);
+ trace_mm_lru_activate(folio);
+
+ __count_vm_events(PGACTIVATE, nr_pages);
+ __count_memcg_events(lruvec_memcg(lruvec), PGACTIVATE, nr_pages);
}
#ifdef CONFIG_SMP
static void folio_activate_drain(int cpu)
{
- struct folio_batch *fbatch = &per_cpu(cpu_fbatches.activate, cpu);
+ struct folio_batch *fbatch = &per_cpu(cpu_fbatches.lru_activate, cpu);
if (folio_batch_count(fbatch))
- folio_batch_move_lru(fbatch, folio_activate_fn);
+ folio_batch_move_lru(fbatch, lru_activate);
}
void folio_activate(struct folio *folio)
{
- if (!folio_test_active(folio) && !folio_test_unevictable(folio)) {
- struct folio_batch *fbatch;
-
- folio_get(folio);
- if (!folio_test_clear_lru(folio)) {
- folio_put(folio);
- return;
- }
+ if (folio_test_active(folio) || folio_test_unevictable(folio))
+ return;
- local_lock(&cpu_fbatches.lock);
- fbatch = this_cpu_ptr(&cpu_fbatches.activate);
- folio_batch_add_and_move(fbatch, folio, folio_activate_fn);
- local_unlock(&cpu_fbatches.lock);
- }
+ folio_batch_add_and_move(folio, lru_activate, true);
}
#else
@@ -378,12 +383,13 @@ void folio_activate(struct folio *folio)
{
struct lruvec *lruvec;
- if (folio_test_clear_lru(folio)) {
- lruvec = folio_lruvec_lock_irq(folio);
- folio_activate_fn(lruvec, folio);
- unlock_page_lruvec_irq(lruvec);
- folio_set_lru(folio);
- }
+ if (!folio_test_clear_lru(folio))
+ return;
+
+ lruvec = folio_lruvec_lock_irq(folio);
+ lru_activate(lruvec, folio);
+ unlock_page_lruvec_irq(lruvec);
+ folio_set_lru(folio);
}
#endif
@@ -482,7 +488,7 @@ void folio_mark_accessed(struct folio *folio)
} else if (!folio_test_active(folio)) {
/*
* If the folio is on the LRU, queue it for activation via
- * cpu_fbatches.activate. Otherwise, assume the folio is in a
+ * cpu_fbatches.lru_activate. Otherwise, assume the folio is in a
* folio_batch, mark it active and it'll be moved to the active
* LRU on the next drain.
*/
@@ -509,8 +515,6 @@ EXPORT_SYMBOL(folio_mark_accessed);
*/
void folio_add_lru(struct folio *folio)
{
- struct folio_batch *fbatch;
-
VM_BUG_ON_FOLIO(folio_test_active(folio) &&
folio_test_unevictable(folio), folio);
VM_BUG_ON_FOLIO(folio_test_lru(folio), folio);
@@ -520,11 +524,7 @@ void folio_add_lru(struct folio *folio)
lru_gen_in_fault() && !(current->flags & PF_MEMALLOC))
folio_set_active(folio);
- folio_get(folio);
- local_lock(&cpu_fbatches.lock);
- fbatch = this_cpu_ptr(&cpu_fbatches.lru_add);
- folio_batch_add_and_move(fbatch, folio, lru_add_fn);
- local_unlock(&cpu_fbatches.lock);
+ folio_batch_add_and_move(folio, lru_add, false);
}
EXPORT_SYMBOL(folio_add_lru);
@@ -567,7 +567,7 @@ void folio_add_lru_vma(struct folio *folio, struct vm_area_struct *vma)
* written out by flusher threads as this is much more efficient
* than the single-page writeout from reclaim.
*/
-static void lru_deactivate_file_fn(struct lruvec *lruvec, struct folio *folio)
+static void lru_deactivate_file(struct lruvec *lruvec, struct folio *folio)
{
bool active = folio_test_active(folio);
long nr_pages = folio_nr_pages(folio);
@@ -608,43 +608,43 @@ static void lru_deactivate_file_fn(struct lruvec *lruvec, struct folio *folio)
}
}
-static void lru_deactivate_fn(struct lruvec *lruvec, struct folio *folio)
+static void lru_deactivate(struct lruvec *lruvec, struct folio *folio)
{
- if (!folio_test_unevictable(folio) && (folio_test_active(folio) || lru_gen_enabled())) {
- long nr_pages = folio_nr_pages(folio);
+ long nr_pages = folio_nr_pages(folio);
- lruvec_del_folio(lruvec, folio);
- folio_clear_active(folio);
- folio_clear_referenced(folio);
- lruvec_add_folio(lruvec, folio);
+ if (folio_test_unevictable(folio) || !(folio_test_active(folio) || lru_gen_enabled()))
+ return;
- __count_vm_events(PGDEACTIVATE, nr_pages);
- __count_memcg_events(lruvec_memcg(lruvec), PGDEACTIVATE,
- nr_pages);
- }
+ lruvec_del_folio(lruvec, folio);
+ folio_clear_active(folio);
+ folio_clear_referenced(folio);
+ lruvec_add_folio(lruvec, folio);
+
+ __count_vm_events(PGDEACTIVATE, nr_pages);
+ __count_memcg_events(lruvec_memcg(lruvec), PGDEACTIVATE, nr_pages);
}
-static void lru_lazyfree_fn(struct lruvec *lruvec, struct folio *folio)
+static void lru_lazyfree(struct lruvec *lruvec, struct folio *folio)
{
- if (folio_test_anon(folio) && folio_test_swapbacked(folio) &&
- !folio_test_swapcache(folio) && !folio_test_unevictable(folio)) {
- long nr_pages = folio_nr_pages(folio);
+ long nr_pages = folio_nr_pages(folio);
- lruvec_del_folio(lruvec, folio);
- folio_clear_active(folio);
- folio_clear_referenced(folio);
- /*
- * Lazyfree folios are clean anonymous folios. They have
- * the swapbacked flag cleared, to distinguish them from normal
- * anonymous folios
- */
- folio_clear_swapbacked(folio);
- lruvec_add_folio(lruvec, folio);
+ if (!folio_test_anon(folio) || !folio_test_swapbacked(folio) ||
+ folio_test_swapcache(folio) || folio_test_unevictable(folio))
+ return;
- __count_vm_events(PGLAZYFREE, nr_pages);
- __count_memcg_events(lruvec_memcg(lruvec), PGLAZYFREE,
- nr_pages);
- }
+ lruvec_del_folio(lruvec, folio);
+ folio_clear_active(folio);
+ folio_clear_referenced(folio);
+ /*
+ * Lazyfree folios are clean anonymous folios. They have
+ * the swapbacked flag cleared, to distinguish them from normal
+ * anonymous folios
+ */
+ folio_clear_swapbacked(folio);
+ lruvec_add_folio(lruvec, folio);
+
+ __count_vm_events(PGLAZYFREE, nr_pages);
+ __count_memcg_events(lruvec_memcg(lruvec), PGLAZYFREE, nr_pages);
}
/*
@@ -658,30 +658,30 @@ void lru_add_drain_cpu(int cpu)
struct folio_batch *fbatch = &fbatches->lru_add;
if (folio_batch_count(fbatch))
- folio_batch_move_lru(fbatch, lru_add_fn);
+ folio_batch_move_lru(fbatch, lru_add);
- fbatch = &per_cpu(lru_rotate.fbatch, cpu);
+ fbatch = &fbatches->lru_move_tail;
/* Disabling interrupts below acts as a compiler barrier. */
if (data_race(folio_batch_count(fbatch))) {
unsigned long flags;
/* No harm done if a racing interrupt already did this */
- local_lock_irqsave(&lru_rotate.lock, flags);
- folio_batch_move_lru(fbatch, lru_move_tail_fn);
- local_unlock_irqrestore(&lru_rotate.lock, flags);
+ local_lock_irqsave(&cpu_fbatches.lock_irq, flags);
+ folio_batch_move_lru(fbatch, lru_move_tail);
+ local_unlock_irqrestore(&cpu_fbatches.lock_irq, flags);
}
fbatch = &fbatches->lru_deactivate_file;
if (folio_batch_count(fbatch))
- folio_batch_move_lru(fbatch, lru_deactivate_file_fn);
+ folio_batch_move_lru(fbatch, lru_deactivate_file);
fbatch = &fbatches->lru_deactivate;
if (folio_batch_count(fbatch))
- folio_batch_move_lru(fbatch, lru_deactivate_fn);
+ folio_batch_move_lru(fbatch, lru_deactivate);
fbatch = &fbatches->lru_lazyfree;
if (folio_batch_count(fbatch))
- folio_batch_move_lru(fbatch, lru_lazyfree_fn);
+ folio_batch_move_lru(fbatch, lru_lazyfree);
folio_activate_drain(cpu);
}
@@ -698,22 +698,11 @@ void lru_add_drain_cpu(int cpu)
*/
void deactivate_file_folio(struct folio *folio)
{
- struct folio_batch *fbatch;
-
/* Deactivating an unevictable folio will not accelerate reclaim */
if (folio_test_unevictable(folio))
return;
- folio_get(folio);
- if (!folio_test_clear_lru(folio)) {
- folio_put(folio);
- return;
- }
-
- local_lock(&cpu_fbatches.lock);
- fbatch = this_cpu_ptr(&cpu_fbatches.lru_deactivate_file);
- folio_batch_add_and_move(fbatch, folio, lru_deactivate_file_fn);
- local_unlock(&cpu_fbatches.lock);
+ folio_batch_add_and_move(folio, lru_deactivate_file, true);
}
/*
@@ -726,21 +715,10 @@ void deactivate_file_folio(struct folio *folio)
*/
void folio_deactivate(struct folio *folio)
{
- if (!folio_test_unevictable(folio) && (folio_test_active(folio) ||
- lru_gen_enabled())) {
- struct folio_batch *fbatch;
-
- folio_get(folio);
- if (!folio_test_clear_lru(folio)) {
- folio_put(folio);
- return;
- }
+ if (folio_test_unevictable(folio) || !(folio_test_active(folio) || lru_gen_enabled()))
+ return;
- local_lock(&cpu_fbatches.lock);
- fbatch = this_cpu_ptr(&cpu_fbatches.lru_deactivate);
- folio_batch_add_and_move(fbatch, folio, lru_deactivate_fn);
- local_unlock(&cpu_fbatches.lock);
- }
+ folio_batch_add_and_move(folio, lru_deactivate, true);
}
/**
@@ -752,21 +730,11 @@ void folio_deactivate(struct folio *folio)
*/
void folio_mark_lazyfree(struct folio *folio)
{
- if (folio_test_anon(folio) && folio_test_swapbacked(folio) &&
- !folio_test_swapcache(folio) && !folio_test_unevictable(folio)) {
- struct folio_batch *fbatch;
-
- folio_get(folio);
- if (!folio_test_clear_lru(folio)) {
- folio_put(folio);
- return;
- }
+ if (!folio_test_anon(folio) || !folio_test_swapbacked(folio) ||
+ folio_test_swapcache(folio) || folio_test_unevictable(folio))
+ return;
- local_lock(&cpu_fbatches.lock);
- fbatch = this_cpu_ptr(&cpu_fbatches.lru_lazyfree);
- folio_batch_add_and_move(fbatch, folio, lru_lazyfree_fn);
- local_unlock(&cpu_fbatches.lock);
- }
+ folio_batch_add_and_move(folio, lru_lazyfree, true);
}
void lru_add_drain(void)
@@ -816,11 +784,11 @@ static bool cpu_needs_drain(unsigned int cpu)
/* Check these in order of likelihood that they're not zero */
return folio_batch_count(&fbatches->lru_add) ||
- data_race(folio_batch_count(&per_cpu(lru_rotate.fbatch, cpu))) ||
+ folio_batch_count(&fbatches->lru_move_tail) ||
folio_batch_count(&fbatches->lru_deactivate_file) ||
folio_batch_count(&fbatches->lru_deactivate) ||
folio_batch_count(&fbatches->lru_lazyfree) ||
- folio_batch_count(&fbatches->activate) ||
+ folio_batch_count(&fbatches->lru_activate) ||
need_mlock_drain(cpu) ||
has_bh_in_lru(cpu, NULL);
}
@@ -938,8 +906,8 @@ atomic_t lru_disable_count = ATOMIC_INIT(0);
/*
* lru_cache_disable() needs to be called before we start compiling
- * a list of pages to be migrated using isolate_lru_page().
- * It drains pages on LRU cache and then disable on all cpus until
+ * a list of folios to be migrated using folio_isolate_lru().
+ * It drains folios on LRU cache and then disable on all cpus until
* lru_cache_enable is called.
*
* Must be paired with a call to lru_cache_enable().
diff --git a/mm/swap.h b/mm/swap.h
index baa1fa946b34..ad2f121de970 100644
--- a/mm/swap.h
+++ b/mm/swap.h
@@ -59,7 +59,7 @@ void __delete_from_swap_cache(struct folio *folio,
void delete_from_swap_cache(struct folio *folio);
void clear_shadow_from_swap_cache(int type, unsigned long begin,
unsigned long end);
-void swapcache_clear(struct swap_info_struct *si, swp_entry_t entry);
+void swapcache_clear(struct swap_info_struct *si, swp_entry_t entry, int nr);
struct folio *swap_cache_get_folio(swp_entry_t entry,
struct vm_area_struct *vma, unsigned long addr);
struct folio *filemap_get_incore_folio(struct address_space *mapping,
@@ -73,13 +73,39 @@ struct folio *__read_swap_cache_async(swp_entry_t entry, gfp_t gfp_flags,
bool skip_if_exists);
struct folio *swap_cluster_readahead(swp_entry_t entry, gfp_t flag,
struct mempolicy *mpol, pgoff_t ilx);
-struct page *swapin_readahead(swp_entry_t entry, gfp_t flag,
- struct vm_fault *vmf);
+struct folio *swapin_readahead(swp_entry_t entry, gfp_t flag,
+ struct vm_fault *vmf);
static inline unsigned int folio_swap_flags(struct folio *folio)
{
return swp_swap_info(folio->swap)->flags;
}
+
+/*
+ * Return the count of contiguous swap entries that share the same
+ * zeromap status as the starting entry. If is_zeromap is not NULL,
+ * it will return the zeromap status of the starting entry.
+ */
+static inline int swap_zeromap_batch(swp_entry_t entry, int max_nr,
+ bool *is_zeromap)
+{
+ struct swap_info_struct *sis = swp_swap_info(entry);
+ unsigned long start = swp_offset(entry);
+ unsigned long end = start + max_nr;
+ bool first_bit;
+
+ first_bit = test_bit(start, sis->zeromap);
+ if (is_zeromap)
+ *is_zeromap = first_bit;
+
+ if (max_nr <= 1)
+ return max_nr;
+ if (first_bit)
+ return find_next_zero_bit(sis->zeromap, end, start) - start;
+ else
+ return find_next_bit(sis->zeromap, end, start) - start;
+}
+
#else /* CONFIG_SWAP */
struct swap_iocb;
static inline void swap_read_folio(struct folio *folio, struct swap_iocb **plug)
@@ -109,7 +135,7 @@ static inline struct folio *swap_cluster_readahead(swp_entry_t entry,
return NULL;
}
-static inline struct page *swapin_readahead(swp_entry_t swp, gfp_t gfp_mask,
+static inline struct folio *swapin_readahead(swp_entry_t swp, gfp_t gfp_mask,
struct vm_fault *vmf)
{
return NULL;
@@ -120,7 +146,7 @@ static inline int swap_writepage(struct page *p, struct writeback_control *wbc)
return 0;
}
-static inline void swapcache_clear(struct swap_info_struct *si, swp_entry_t entry)
+static inline void swapcache_clear(struct swap_info_struct *si, swp_entry_t entry, int nr)
{
}
@@ -171,5 +197,13 @@ static inline unsigned int folio_swap_flags(struct folio *folio)
{
return 0;
}
+
+static inline int swap_zeromap_batch(swp_entry_t entry, int max_nr,
+ bool *has_zeromap)
+{
+ return 0;
+}
+
#endif /* CONFIG_SWAP */
+
#endif /* _MM_SWAP_H */
diff --git a/mm/swap_cgroup.c b/mm/swap_cgroup.c
index db6c4a26cf59..da1278f0563b 100644
--- a/mm/swap_cgroup.c
+++ b/mm/swap_cgroup.c
@@ -161,6 +161,8 @@ unsigned short swap_cgroup_record(swp_entry_t ent, unsigned short id,
*/
unsigned short lookup_swap_cgroup_id(swp_entry_t ent)
{
+ if (mem_cgroup_disabled())
+ return 0;
return lookup_swap_cgroup(ent, NULL)->id;
}
diff --git a/mm/swap_state.c b/mm/swap_state.c
index a1726e49a5eb..4669f29cf555 100644
--- a/mm/swap_state.c
+++ b/mm/swap_state.c
@@ -435,6 +435,8 @@ struct folio *__read_swap_cache_async(swp_entry_t entry, gfp_t gfp_mask,
{
struct swap_info_struct *si;
struct folio *folio;
+ struct folio *new_folio = NULL;
+ struct folio *result = NULL;
void *shadow = NULL;
*new_page_allocated = false;
@@ -463,27 +465,28 @@ struct folio *__read_swap_cache_async(swp_entry_t entry, gfp_t gfp_mask,
* else swap_off will be aborted if we return NULL.
*/
if (!swap_swapcount(si, entry) && swap_slot_cache_enabled)
- goto fail_put_swap;
+ goto put_and_return;
/*
- * Get a new folio to read into from swap. Allocate it now,
- * before marking swap_map SWAP_HAS_CACHE, when -EEXIST will
- * cause any racers to loop around until we add it to cache.
+ * Get a new folio to read into from swap. Allocate it now if
+ * new_folio not exist, before marking swap_map SWAP_HAS_CACHE,
+ * when -EEXIST will cause any racers to loop around until we
+ * add it to cache.
*/
- folio = folio_alloc_mpol(gfp_mask, 0, mpol, ilx, numa_node_id());
- if (!folio)
- goto fail_put_swap;
+ if (!new_folio) {
+ new_folio = folio_alloc_mpol(gfp_mask, 0, mpol, ilx, numa_node_id());
+ if (!new_folio)
+ goto put_and_return;
+ }
/*
* Swap entry may have been freed since our caller observed it.
*/
- err = swapcache_prepare(entry);
+ err = swapcache_prepare(entry, 1);
if (!err)
break;
-
- folio_put(folio);
- if (err != -EEXIST)
- goto fail_put_swap;
+ else if (err != -EEXIST)
+ goto put_and_return;
/*
* Protect against a recursive call to __read_swap_cache_async()
@@ -494,7 +497,7 @@ struct folio *__read_swap_cache_async(swp_entry_t entry, gfp_t gfp_mask,
* __read_swap_cache_async() in the writeback path.
*/
if (skip_if_exists)
- goto fail_put_swap;
+ goto put_and_return;
/*
* We might race against __delete_from_swap_cache(), and
@@ -509,36 +512,37 @@ struct folio *__read_swap_cache_async(swp_entry_t entry, gfp_t gfp_mask,
/*
* The swap entry is ours to swap in. Prepare the new folio.
*/
+ __folio_set_locked(new_folio);
+ __folio_set_swapbacked(new_folio);
- __folio_set_locked(folio);
- __folio_set_swapbacked(folio);
-
- if (mem_cgroup_swapin_charge_folio(folio, NULL, gfp_mask, entry))
+ if (mem_cgroup_swapin_charge_folio(new_folio, NULL, gfp_mask, entry))
goto fail_unlock;
/* May fail (-ENOMEM) if XArray node allocation failed. */
- if (add_to_swap_cache(folio, entry, gfp_mask & GFP_RECLAIM_MASK, &shadow))
+ if (add_to_swap_cache(new_folio, entry, gfp_mask & GFP_RECLAIM_MASK, &shadow))
goto fail_unlock;
- mem_cgroup_swapin_uncharge_swap(entry);
+ mem_cgroup_swapin_uncharge_swap(entry, 1);
if (shadow)
- workingset_refault(folio, shadow);
+ workingset_refault(new_folio, shadow);
- /* Caller will initiate read into locked folio */
- folio_add_lru(folio);
+ /* Caller will initiate read into locked new_folio */
+ folio_add_lru(new_folio);
*new_page_allocated = true;
+ folio = new_folio;
got_folio:
- put_swap_device(si);
- return folio;
+ result = folio;
+ goto put_and_return;
fail_unlock:
- put_swap_folio(folio, entry);
- folio_unlock(folio);
- folio_put(folio);
-fail_put_swap:
+ put_swap_folio(new_folio, entry);
+ folio_unlock(new_folio);
+put_and_return:
put_swap_device(si);
- return NULL;
+ if (!(*new_page_allocated) && new_folio)
+ folio_put(new_folio);
+ return result;
}
/*
@@ -698,10 +702,8 @@ skip:
/* The page was likely read above, so no need for plugging here */
folio = __read_swap_cache_async(entry, gfp_mask, mpol, ilx,
&page_allocated, false);
- if (unlikely(page_allocated)) {
- zswap_folio_swapin(folio);
+ if (unlikely(page_allocated))
swap_read_folio(folio, NULL);
- }
return folio;
}
@@ -850,10 +852,8 @@ skip:
/* The folio was likely read above, so no need for plugging here */
folio = __read_swap_cache_async(targ_entry, gfp_mask, mpol, targ_ilx,
&page_allocated, false);
- if (unlikely(page_allocated)) {
- zswap_folio_swapin(folio);
+ if (unlikely(page_allocated))
swap_read_folio(folio, NULL);
- }
return folio;
}
@@ -863,13 +863,13 @@ skip:
* @gfp_mask: memory allocation flags
* @vmf: fault information
*
- * Returns the struct page for entry and addr, after queueing swapin.
+ * Returns the struct folio for entry and addr, after queueing swapin.
*
* It's a main entry function for swap readahead. By the configuration,
* it will read ahead blocks by cluster-based(ie, physical disk based)
* or vma-based(ie, virtual address based on faulty address) readahead.
*/
-struct page *swapin_readahead(swp_entry_t entry, gfp_t gfp_mask,
+struct folio *swapin_readahead(swp_entry_t entry, gfp_t gfp_mask,
struct vm_fault *vmf)
{
struct mempolicy *mpol;
@@ -882,9 +882,7 @@ struct page *swapin_readahead(swp_entry_t entry, gfp_t gfp_mask,
swap_cluster_readahead(entry, gfp_mask, mpol, ilx);
mpol_cond_put(mpol);
- if (!folio)
- return NULL;
- return folio_file_page(folio, swp_offset(entry));
+ return folio;
}
#ifdef CONFIG_SYSFS
diff --git a/mm/swapfile.c b/mm/swapfile.c
index 38bdc439651a..0cded32414a1 100644
--- a/mm/swapfile.c
+++ b/mm/swapfile.c
@@ -53,6 +53,15 @@
static bool swap_count_continued(struct swap_info_struct *, pgoff_t,
unsigned char);
static void free_swap_count_continuations(struct swap_info_struct *);
+static void swap_entry_range_free(struct swap_info_struct *si, swp_entry_t entry,
+ unsigned int nr_pages);
+static void swap_range_alloc(struct swap_info_struct *si, unsigned long offset,
+ unsigned int nr_entries);
+static bool folio_swapcache_freeable(struct folio *folio);
+static struct swap_cluster_info *lock_cluster_or_swap_info(
+ struct swap_info_struct *si, unsigned long offset);
+static void unlock_cluster_or_swap_info(struct swap_info_struct *si,
+ struct swap_cluster_info *ci);
static DEFINE_SPINLOCK(swap_lock);
static unsigned int nr_swapfiles;
@@ -127,8 +136,44 @@ static inline unsigned char swap_count(unsigned char ent)
* corresponding page
*/
#define TTRS_UNMAPPED 0x2
-/* Reclaim the swap entry if swap is getting full*/
+/* Reclaim the swap entry if swap is getting full */
#define TTRS_FULL 0x4
+/* Reclaim directly, bypass the slot cache and don't touch device lock */
+#define TTRS_DIRECT 0x8
+
+static bool swap_is_has_cache(struct swap_info_struct *si,
+ unsigned long offset, int nr_pages)
+{
+ unsigned char *map = si->swap_map + offset;
+ unsigned char *map_end = map + nr_pages;
+
+ do {
+ VM_BUG_ON(!(*map & SWAP_HAS_CACHE));
+ if (*map != SWAP_HAS_CACHE)
+ return false;
+ } while (++map < map_end);
+
+ return true;
+}
+
+static bool swap_is_last_map(struct swap_info_struct *si,
+ unsigned long offset, int nr_pages, bool *has_cache)
+{
+ unsigned char *map = si->swap_map + offset;
+ unsigned char *map_end = map + nr_pages;
+ unsigned char count = *map;
+
+ if (swap_count(count) != 1)
+ return false;
+
+ while (++map < map_end) {
+ if (*map != count)
+ return false;
+ }
+
+ *has_cache = !!(count & SWAP_HAS_CACHE);
+ return true;
+}
/*
* returns number of pages in the folio that backs the swap entry. If positive,
@@ -139,12 +184,22 @@ static int __try_to_reclaim_swap(struct swap_info_struct *si,
unsigned long offset, unsigned long flags)
{
swp_entry_t entry = swp_entry(si->type, offset);
+ struct address_space *address_space = swap_address_space(entry);
+ struct swap_cluster_info *ci;
struct folio *folio;
- int ret = 0;
+ int ret, nr_pages;
+ bool need_reclaim;
- folio = filemap_get_folio(swap_address_space(entry), swap_cache_index(entry));
+ folio = filemap_get_folio(address_space, swap_cache_index(entry));
if (IS_ERR(folio))
return 0;
+
+ /* offset could point to the middle of a large folio */
+ entry = folio->swap;
+ offset = swp_offset(entry);
+ nr_pages = folio_nr_pages(folio);
+ ret = -nr_pages;
+
/*
* When this function is called from scan_swap_map_slots() and it's
* called by vmscan.c at reclaiming folios. So we hold a folio lock
@@ -152,14 +207,50 @@ static int __try_to_reclaim_swap(struct swap_info_struct *si,
* case and you should use folio_free_swap() with explicit folio_lock()
* in usual operations.
*/
- if (folio_trylock(folio)) {
- if ((flags & TTRS_ANYWAY) ||
- ((flags & TTRS_UNMAPPED) && !folio_mapped(folio)) ||
- ((flags & TTRS_FULL) && mem_cgroup_swap_full(folio)))
- ret = folio_free_swap(folio);
- folio_unlock(folio);
+ if (!folio_trylock(folio))
+ goto out;
+
+ need_reclaim = ((flags & TTRS_ANYWAY) ||
+ ((flags & TTRS_UNMAPPED) && !folio_mapped(folio)) ||
+ ((flags & TTRS_FULL) && mem_cgroup_swap_full(folio)));
+ if (!need_reclaim || !folio_swapcache_freeable(folio))
+ goto out_unlock;
+
+ /*
+ * It's safe to delete the folio from swap cache only if the folio's
+ * swap_map is HAS_CACHE only, which means the slots have no page table
+ * reference or pending writeback, and can't be allocated to others.
+ */
+ ci = lock_cluster_or_swap_info(si, offset);
+ need_reclaim = swap_is_has_cache(si, offset, nr_pages);
+ unlock_cluster_or_swap_info(si, ci);
+ if (!need_reclaim)
+ goto out_unlock;
+
+ if (!(flags & TTRS_DIRECT)) {
+ /* Free through slot cache */
+ delete_from_swap_cache(folio);
+ folio_set_dirty(folio);
+ ret = nr_pages;
+ goto out_unlock;
}
- ret = ret ? folio_nr_pages(folio) : -folio_nr_pages(folio);
+
+ xa_lock_irq(&address_space->i_pages);
+ __delete_from_swap_cache(folio, entry, NULL);
+ xa_unlock_irq(&address_space->i_pages);
+ folio_ref_sub(folio, nr_pages);
+ folio_set_dirty(folio);
+
+ spin_lock(&si->lock);
+ /* Only sinple page folio can be backed by zswap */
+ if (nr_pages == 1)
+ zswap_invalidate(entry);
+ swap_entry_range_free(si, entry, nr_pages);
+ spin_unlock(&si->lock);
+ ret = nr_pages;
+out_unlock:
+ folio_unlock(folio);
+out:
folio_put(folio);
return ret;
}
@@ -290,62 +381,21 @@ static void discard_swap_cluster(struct swap_info_struct *si,
#endif
#define LATENCY_LIMIT 256
-static inline void cluster_set_flag(struct swap_cluster_info *info,
- unsigned int flag)
-{
- info->flags = flag;
-}
-
-static inline unsigned int cluster_count(struct swap_cluster_info *info)
-{
- return info->data;
-}
-
-static inline void cluster_set_count(struct swap_cluster_info *info,
- unsigned int c)
-{
- info->data = c;
-}
-
-static inline void cluster_set_count_flag(struct swap_cluster_info *info,
- unsigned int c, unsigned int f)
-{
- info->flags = f;
- info->data = c;
-}
-
-static inline unsigned int cluster_next(struct swap_cluster_info *info)
-{
- return info->data;
-}
-
-static inline void cluster_set_next(struct swap_cluster_info *info,
- unsigned int n)
-{
- info->data = n;
-}
-
-static inline void cluster_set_next_flag(struct swap_cluster_info *info,
- unsigned int n, unsigned int f)
-{
- info->flags = f;
- info->data = n;
-}
-
static inline bool cluster_is_free(struct swap_cluster_info *info)
{
return info->flags & CLUSTER_FLAG_FREE;
}
-static inline bool cluster_is_null(struct swap_cluster_info *info)
+static inline unsigned int cluster_index(struct swap_info_struct *si,
+ struct swap_cluster_info *ci)
{
- return info->flags & CLUSTER_FLAG_NEXT_NULL;
+ return ci - si->cluster_info;
}
-static inline void cluster_set_null(struct swap_cluster_info *info)
+static inline unsigned int cluster_offset(struct swap_info_struct *si,
+ struct swap_cluster_info *ci)
{
- info->flags = CLUSTER_FLAG_NEXT_NULL;
- info->data = 0;
+ return cluster_index(si, ci) * SWAPFILE_CLUSTER;
}
static inline struct swap_cluster_info *lock_cluster(struct swap_info_struct *si,
@@ -394,65 +444,11 @@ static inline void unlock_cluster_or_swap_info(struct swap_info_struct *si,
spin_unlock(&si->lock);
}
-static inline bool cluster_list_empty(struct swap_cluster_list *list)
-{
- return cluster_is_null(&list->head);
-}
-
-static inline unsigned int cluster_list_first(struct swap_cluster_list *list)
-{
- return cluster_next(&list->head);
-}
-
-static void cluster_list_init(struct swap_cluster_list *list)
-{
- cluster_set_null(&list->head);
- cluster_set_null(&list->tail);
-}
-
-static void cluster_list_add_tail(struct swap_cluster_list *list,
- struct swap_cluster_info *ci,
- unsigned int idx)
-{
- if (cluster_list_empty(list)) {
- cluster_set_next_flag(&list->head, idx, 0);
- cluster_set_next_flag(&list->tail, idx, 0);
- } else {
- struct swap_cluster_info *ci_tail;
- unsigned int tail = cluster_next(&list->tail);
-
- /*
- * Nested cluster lock, but both cluster locks are
- * only acquired when we held swap_info_struct->lock
- */
- ci_tail = ci + tail;
- spin_lock_nested(&ci_tail->lock, SINGLE_DEPTH_NESTING);
- cluster_set_next(ci_tail, idx);
- spin_unlock(&ci_tail->lock);
- cluster_set_next_flag(&list->tail, idx, 0);
- }
-}
-
-static unsigned int cluster_list_del_first(struct swap_cluster_list *list,
- struct swap_cluster_info *ci)
-{
- unsigned int idx;
-
- idx = cluster_next(&list->head);
- if (cluster_next(&list->tail) == idx) {
- cluster_set_null(&list->head);
- cluster_set_null(&list->tail);
- } else
- cluster_set_next_flag(&list->head,
- cluster_next(&ci[idx]), 0);
-
- return idx;
-}
-
/* Add a cluster to discard list and schedule it to do discard */
static void swap_cluster_schedule_discard(struct swap_info_struct *si,
- unsigned int idx)
+ struct swap_cluster_info *ci)
{
+ unsigned int idx = cluster_index(si, ci);
/*
* If scan_swap_map_slots() can't find a free cluster, it will check
* si->swap_map directly. To make sure the discarding cluster isn't
@@ -462,17 +458,23 @@ static void swap_cluster_schedule_discard(struct swap_info_struct *si,
memset(si->swap_map + idx * SWAPFILE_CLUSTER,
SWAP_MAP_BAD, SWAPFILE_CLUSTER);
- cluster_list_add_tail(&si->discard_clusters, si->cluster_info, idx);
-
+ VM_BUG_ON(ci->flags & CLUSTER_FLAG_FREE);
+ list_move_tail(&ci->list, &si->discard_clusters);
+ ci->flags = 0;
schedule_work(&si->discard_work);
}
-static void __free_cluster(struct swap_info_struct *si, unsigned long idx)
+static void __free_cluster(struct swap_info_struct *si, struct swap_cluster_info *ci)
{
- struct swap_cluster_info *ci = si->cluster_info;
+ lockdep_assert_held(&si->lock);
+ lockdep_assert_held(&ci->lock);
- cluster_set_flag(ci + idx, CLUSTER_FLAG_FREE);
- cluster_list_add_tail(&si->free_clusters, ci, idx);
+ if (ci->flags)
+ list_move_tail(&ci->list, &si->free_clusters);
+ else
+ list_add_tail(&ci->list, &si->free_clusters);
+ ci->flags = CLUSTER_FLAG_FREE;
+ ci->order = 0;
}
/*
@@ -481,24 +483,24 @@ static void __free_cluster(struct swap_info_struct *si, unsigned long idx)
*/
static void swap_do_scheduled_discard(struct swap_info_struct *si)
{
- struct swap_cluster_info *info, *ci;
+ struct swap_cluster_info *ci;
unsigned int idx;
- info = si->cluster_info;
-
- while (!cluster_list_empty(&si->discard_clusters)) {
- idx = cluster_list_del_first(&si->discard_clusters, info);
+ while (!list_empty(&si->discard_clusters)) {
+ ci = list_first_entry(&si->discard_clusters, struct swap_cluster_info, list);
+ list_del(&ci->list);
+ idx = cluster_index(si, ci);
spin_unlock(&si->lock);
discard_swap_cluster(si, idx * SWAPFILE_CLUSTER,
SWAPFILE_CLUSTER);
spin_lock(&si->lock);
- ci = lock_cluster(si, idx * SWAPFILE_CLUSTER);
- __free_cluster(si, idx);
+ spin_lock(&ci->lock);
+ __free_cluster(si, ci);
memset(si->swap_map + idx * SWAPFILE_CLUSTER,
0, SWAPFILE_CLUSTER);
- unlock_cluster(ci);
+ spin_unlock(&ci->lock);
}
}
@@ -521,20 +523,15 @@ static void swap_users_ref_free(struct percpu_ref *ref)
complete(&si->comp);
}
-static void alloc_cluster(struct swap_info_struct *si, unsigned long idx)
+static void free_cluster(struct swap_info_struct *si, struct swap_cluster_info *ci)
{
- struct swap_cluster_info *ci = si->cluster_info;
-
- VM_BUG_ON(cluster_list_first(&si->free_clusters) != idx);
- cluster_list_del_first(&si->free_clusters, ci);
- cluster_set_count_flag(ci + idx, 0, 0);
-}
+ VM_BUG_ON(ci->count != 0);
+ lockdep_assert_held(&si->lock);
+ lockdep_assert_held(&ci->lock);
-static void free_cluster(struct swap_info_struct *si, unsigned long idx)
-{
- struct swap_cluster_info *ci = si->cluster_info + idx;
+ if (ci->flags & CLUSTER_FLAG_FRAG)
+ si->frag_cluster_nr[ci->order]--;
- VM_BUG_ON(cluster_count(ci) != 0);
/*
* If the swap is discardable, prepare discard the cluster
* instead of free it immediately. The cluster will be freed
@@ -542,175 +539,371 @@ static void free_cluster(struct swap_info_struct *si, unsigned long idx)
*/
if ((si->flags & (SWP_WRITEOK | SWP_PAGE_DISCARD)) ==
(SWP_WRITEOK | SWP_PAGE_DISCARD)) {
- swap_cluster_schedule_discard(si, idx);
+ swap_cluster_schedule_discard(si, ci);
return;
}
- __free_cluster(si, idx);
+ __free_cluster(si, ci);
}
/*
- * The cluster corresponding to page_nr will be used. The cluster will be
- * removed from free cluster list and its usage counter will be increased by
- * count.
+ * The cluster corresponding to page_nr will be used. The cluster will not be
+ * added to free cluster list and its usage counter will be increased by 1.
+ * Only used for initialization.
*/
-static void add_cluster_info_page(struct swap_info_struct *p,
- struct swap_cluster_info *cluster_info, unsigned long page_nr,
- unsigned long count)
+static void inc_cluster_info_page(struct swap_info_struct *si,
+ struct swap_cluster_info *cluster_info, unsigned long page_nr)
{
unsigned long idx = page_nr / SWAPFILE_CLUSTER;
+ struct swap_cluster_info *ci;
if (!cluster_info)
return;
- if (cluster_is_free(&cluster_info[idx]))
- alloc_cluster(p, idx);
- VM_BUG_ON(cluster_count(&cluster_info[idx]) + count > SWAPFILE_CLUSTER);
- cluster_set_count(&cluster_info[idx],
- cluster_count(&cluster_info[idx]) + count);
-}
+ ci = cluster_info + idx;
+ ci->count++;
-/*
- * The cluster corresponding to page_nr will be used. The cluster will be
- * removed from free cluster list and its usage counter will be increased by 1.
- */
-static void inc_cluster_info_page(struct swap_info_struct *p,
- struct swap_cluster_info *cluster_info, unsigned long page_nr)
-{
- add_cluster_info_page(p, cluster_info, page_nr, 1);
+ VM_BUG_ON(ci->count > SWAPFILE_CLUSTER);
+ VM_BUG_ON(ci->flags);
}
/*
- * The cluster corresponding to page_nr decreases one usage. If the usage
- * counter becomes 0, which means no page in the cluster is in using, we can
- * optionally discard the cluster and add it to free cluster list.
+ * The cluster ci decreases @nr_pages usage. If the usage counter becomes 0,
+ * which means no page in the cluster is in use, we can optionally discard
+ * the cluster and add it to free cluster list.
*/
-static void dec_cluster_info_page(struct swap_info_struct *p,
- struct swap_cluster_info *cluster_info, unsigned long page_nr)
+static void dec_cluster_info_page(struct swap_info_struct *si,
+ struct swap_cluster_info *ci, int nr_pages)
{
- unsigned long idx = page_nr / SWAPFILE_CLUSTER;
-
- if (!cluster_info)
+ if (!si->cluster_info)
return;
- VM_BUG_ON(cluster_count(&cluster_info[idx]) == 0);
- cluster_set_count(&cluster_info[idx],
- cluster_count(&cluster_info[idx]) - 1);
+ VM_BUG_ON(ci->count < nr_pages);
+ VM_BUG_ON(cluster_is_free(ci));
+ lockdep_assert_held(&si->lock);
+ lockdep_assert_held(&ci->lock);
+ ci->count -= nr_pages;
- if (cluster_count(&cluster_info[idx]) == 0)
- free_cluster(p, idx);
+ if (!ci->count) {
+ free_cluster(si, ci);
+ return;
+ }
+
+ if (!(ci->flags & CLUSTER_FLAG_NONFULL)) {
+ VM_BUG_ON(ci->flags & CLUSTER_FLAG_FREE);
+ if (ci->flags & CLUSTER_FLAG_FRAG)
+ si->frag_cluster_nr[ci->order]--;
+ list_move_tail(&ci->list, &si->nonfull_clusters[ci->order]);
+ ci->flags = CLUSTER_FLAG_NONFULL;
+ }
}
-/*
- * It's possible scan_swap_map_slots() uses a free cluster in the middle of free
- * cluster list. Avoiding such abuse to avoid list corruption.
- */
-static bool
-scan_swap_map_ssd_cluster_conflict(struct swap_info_struct *si,
- unsigned long offset, int order)
+static bool cluster_reclaim_range(struct swap_info_struct *si,
+ struct swap_cluster_info *ci,
+ unsigned long start, unsigned long end)
{
- struct percpu_cluster *percpu_cluster;
- bool conflict;
+ unsigned char *map = si->swap_map;
+ unsigned long offset;
- offset /= SWAPFILE_CLUSTER;
- conflict = !cluster_list_empty(&si->free_clusters) &&
- offset != cluster_list_first(&si->free_clusters) &&
- cluster_is_free(&si->cluster_info[offset]);
+ spin_unlock(&ci->lock);
+ spin_unlock(&si->lock);
- if (!conflict)
- return false;
+ for (offset = start; offset < end; offset++) {
+ switch (READ_ONCE(map[offset])) {
+ case 0:
+ continue;
+ case SWAP_HAS_CACHE:
+ if (__try_to_reclaim_swap(si, offset, TTRS_ANYWAY | TTRS_DIRECT) > 0)
+ continue;
+ goto out;
+ default:
+ goto out;
+ }
+ }
+out:
+ spin_lock(&si->lock);
+ spin_lock(&ci->lock);
+
+ /*
+ * Recheck the range no matter reclaim succeeded or not, the slot
+ * could have been be freed while we are not holding the lock.
+ */
+ for (offset = start; offset < end; offset++)
+ if (READ_ONCE(map[offset]))
+ return false;
- percpu_cluster = this_cpu_ptr(si->percpu_cluster);
- percpu_cluster->next[order] = SWAP_NEXT_INVALID;
return true;
}
-static inline bool swap_range_empty(char *swap_map, unsigned int start,
- unsigned int nr_pages)
+static bool cluster_scan_range(struct swap_info_struct *si,
+ struct swap_cluster_info *ci,
+ unsigned long start, unsigned int nr_pages)
{
- unsigned int i;
+ unsigned long offset, end = start + nr_pages;
+ unsigned char *map = si->swap_map;
+ bool need_reclaim = false;
- for (i = 0; i < nr_pages; i++) {
- if (swap_map[start + i])
+ for (offset = start; offset < end; offset++) {
+ switch (READ_ONCE(map[offset])) {
+ case 0:
+ continue;
+ case SWAP_HAS_CACHE:
+ if (!vm_swap_full())
+ return false;
+ need_reclaim = true;
+ continue;
+ default:
return false;
+ }
}
+ if (need_reclaim)
+ return cluster_reclaim_range(si, ci, start, end);
+
return true;
}
+static void cluster_alloc_range(struct swap_info_struct *si, struct swap_cluster_info *ci,
+ unsigned int start, unsigned char usage,
+ unsigned int order)
+{
+ unsigned int nr_pages = 1 << order;
+
+ if (cluster_is_free(ci)) {
+ if (nr_pages < SWAPFILE_CLUSTER) {
+ list_move_tail(&ci->list, &si->nonfull_clusters[order]);
+ ci->flags = CLUSTER_FLAG_NONFULL;
+ }
+ ci->order = order;
+ }
+
+ memset(si->swap_map + start, usage, nr_pages);
+ swap_range_alloc(si, start, nr_pages);
+ ci->count += nr_pages;
+
+ if (ci->count == SWAPFILE_CLUSTER) {
+ VM_BUG_ON(!(ci->flags &
+ (CLUSTER_FLAG_FREE | CLUSTER_FLAG_NONFULL | CLUSTER_FLAG_FRAG)));
+ if (ci->flags & CLUSTER_FLAG_FRAG)
+ si->frag_cluster_nr[ci->order]--;
+ list_move_tail(&ci->list, &si->full_clusters);
+ ci->flags = CLUSTER_FLAG_FULL;
+ }
+}
+
+static unsigned int alloc_swap_scan_cluster(struct swap_info_struct *si, unsigned long offset,
+ unsigned int *foundp, unsigned int order,
+ unsigned char usage)
+{
+ unsigned long start = offset & ~(SWAPFILE_CLUSTER - 1);
+ unsigned long end = min(start + SWAPFILE_CLUSTER, si->max);
+ unsigned int nr_pages = 1 << order;
+ struct swap_cluster_info *ci;
+
+ if (end < nr_pages)
+ return SWAP_NEXT_INVALID;
+ end -= nr_pages;
+
+ ci = lock_cluster(si, offset);
+ if (ci->count + nr_pages > SWAPFILE_CLUSTER) {
+ offset = SWAP_NEXT_INVALID;
+ goto done;
+ }
+
+ while (offset <= end) {
+ if (cluster_scan_range(si, ci, offset, nr_pages)) {
+ cluster_alloc_range(si, ci, offset, usage, order);
+ *foundp = offset;
+ if (ci->count == SWAPFILE_CLUSTER) {
+ offset = SWAP_NEXT_INVALID;
+ goto done;
+ }
+ offset += nr_pages;
+ break;
+ }
+ offset += nr_pages;
+ }
+ if (offset > end)
+ offset = SWAP_NEXT_INVALID;
+done:
+ unlock_cluster(ci);
+ return offset;
+}
+
+static void swap_reclaim_full_clusters(struct swap_info_struct *si)
+{
+ long to_scan = 1;
+ unsigned long offset, end;
+ struct swap_cluster_info *ci;
+ unsigned char *map = si->swap_map;
+ int nr_reclaim, total_reclaimed = 0;
+
+ if (atomic_long_read(&nr_swap_pages) <= SWAPFILE_CLUSTER)
+ to_scan = si->inuse_pages / SWAPFILE_CLUSTER;
+
+ while (!list_empty(&si->full_clusters)) {
+ ci = list_first_entry(&si->full_clusters, struct swap_cluster_info, list);
+ list_move_tail(&ci->list, &si->full_clusters);
+ offset = cluster_offset(si, ci);
+ end = min(si->max, offset + SWAPFILE_CLUSTER);
+ to_scan--;
+
+ while (offset < end) {
+ if (READ_ONCE(map[offset]) == SWAP_HAS_CACHE) {
+ spin_unlock(&si->lock);
+ nr_reclaim = __try_to_reclaim_swap(si, offset,
+ TTRS_ANYWAY | TTRS_DIRECT);
+ spin_lock(&si->lock);
+ if (nr_reclaim > 0) {
+ offset += nr_reclaim;
+ total_reclaimed += nr_reclaim;
+ continue;
+ } else if (nr_reclaim < 0) {
+ offset += -nr_reclaim;
+ continue;
+ }
+ }
+ offset++;
+ }
+ if (to_scan <= 0 || total_reclaimed)
+ break;
+ }
+}
+
/*
* Try to get swap entries with specified order from current cpu's swap entry
* pool (a cluster). This might involve allocating a new cluster for current CPU
* too.
*/
-static bool scan_swap_map_try_ssd_cluster(struct swap_info_struct *si,
- unsigned long *offset, unsigned long *scan_base, int order)
+static unsigned long cluster_alloc_swap_entry(struct swap_info_struct *si, int order,
+ unsigned char usage)
{
- unsigned int nr_pages = 1 << order;
struct percpu_cluster *cluster;
struct swap_cluster_info *ci;
- unsigned int tmp, max;
+ unsigned int offset, found = 0;
new_cluster:
+ lockdep_assert_held(&si->lock);
cluster = this_cpu_ptr(si->percpu_cluster);
- tmp = cluster->next[order];
- if (tmp == SWAP_NEXT_INVALID) {
- if (!cluster_list_empty(&si->free_clusters)) {
- tmp = cluster_next(&si->free_clusters.head) *
- SWAPFILE_CLUSTER;
- } else if (!cluster_list_empty(&si->discard_clusters)) {
- /*
- * we don't have free cluster but have some clusters in
- * discarding, do discard now and reclaim them, then
- * reread cluster_next_cpu since we dropped si->lock
- */
- swap_do_scheduled_discard(si);
- *scan_base = this_cpu_read(*si->cluster_next_cpu);
- *offset = *scan_base;
- goto new_cluster;
- } else
- return false;
+ offset = cluster->next[order];
+ if (offset) {
+ offset = alloc_swap_scan_cluster(si, offset, &found, order, usage);
+ if (found)
+ goto done;
}
- /*
- * Other CPUs can use our cluster if they can't find a free cluster,
- * check if there is still free entry in the cluster, maintaining
- * natural alignment.
- */
- max = min_t(unsigned long, si->max, ALIGN(tmp + 1, SWAPFILE_CLUSTER));
- if (tmp < max) {
- ci = lock_cluster(si, tmp);
- while (tmp < max) {
- if (swap_range_empty(si->swap_map, tmp, nr_pages))
+ if (!list_empty(&si->free_clusters)) {
+ ci = list_first_entry(&si->free_clusters, struct swap_cluster_info, list);
+ offset = alloc_swap_scan_cluster(si, cluster_offset(si, ci), &found, order, usage);
+ VM_BUG_ON(!found);
+ goto done;
+ }
+
+ if (order < PMD_ORDER) {
+ unsigned int frags = 0;
+
+ while (!list_empty(&si->nonfull_clusters[order])) {
+ ci = list_first_entry(&si->nonfull_clusters[order],
+ struct swap_cluster_info, list);
+ list_move_tail(&ci->list, &si->frag_clusters[order]);
+ ci->flags = CLUSTER_FLAG_FRAG;
+ si->frag_cluster_nr[order]++;
+ offset = alloc_swap_scan_cluster(si, cluster_offset(si, ci),
+ &found, order, usage);
+ frags++;
+ if (found)
break;
- tmp += nr_pages;
}
- unlock_cluster(ci);
+
+ if (!found) {
+ /*
+ * Nonfull clusters are moved to frag tail if we reached
+ * here, count them too, don't over scan the frag list.
+ */
+ while (frags < si->frag_cluster_nr[order]) {
+ ci = list_first_entry(&si->frag_clusters[order],
+ struct swap_cluster_info, list);
+ /*
+ * Rotate the frag list to iterate, they were all failing
+ * high order allocation or moved here due to per-CPU usage,
+ * this help keeping usable cluster ahead.
+ */
+ list_move_tail(&ci->list, &si->frag_clusters[order]);
+ offset = alloc_swap_scan_cluster(si, cluster_offset(si, ci),
+ &found, order, usage);
+ frags++;
+ if (found)
+ break;
+ }
+ }
}
- if (tmp >= max) {
- cluster->next[order] = SWAP_NEXT_INVALID;
+
+ if (found)
+ goto done;
+
+ if (!list_empty(&si->discard_clusters)) {
+ /*
+ * we don't have free cluster but have some clusters in
+ * discarding, do discard now and reclaim them, then
+ * reread cluster_next_cpu since we dropped si->lock
+ */
+ swap_do_scheduled_discard(si);
goto new_cluster;
}
- *offset = tmp;
- *scan_base = tmp;
- tmp += nr_pages;
- cluster->next[order] = tmp < max ? tmp : SWAP_NEXT_INVALID;
- return true;
+
+ if (order)
+ goto done;
+
+ /* Order 0 stealing from higher order */
+ for (int o = 1; o < SWAP_NR_ORDERS; o++) {
+ /*
+ * Clusters here have at least one usable slots and can't fail order 0
+ * allocation, but reclaim may drop si->lock and race with another user.
+ */
+ while (!list_empty(&si->frag_clusters[o])) {
+ ci = list_first_entry(&si->frag_clusters[o],
+ struct swap_cluster_info, list);
+ offset = alloc_swap_scan_cluster(si, cluster_offset(si, ci),
+ &found, 0, usage);
+ if (found)
+ goto done;
+ }
+
+ while (!list_empty(&si->nonfull_clusters[o])) {
+ ci = list_first_entry(&si->nonfull_clusters[o],
+ struct swap_cluster_info, list);
+ offset = alloc_swap_scan_cluster(si, cluster_offset(si, ci),
+ &found, 0, usage);
+ if (found)
+ goto done;
+ }
+ }
+
+done:
+ /* Try reclaim from full clusters if device is nearfull */
+ if (vm_swap_full() && (!found || (si->pages - si->inuse_pages) < SWAPFILE_CLUSTER)) {
+ swap_reclaim_full_clusters(si);
+ if (!found && !order && si->pages != si->inuse_pages)
+ goto new_cluster;
+ }
+
+ cluster->next[order] = offset;
+ return found;
}
-static void __del_from_avail_list(struct swap_info_struct *p)
+static void __del_from_avail_list(struct swap_info_struct *si)
{
int nid;
- assert_spin_locked(&p->lock);
+ assert_spin_locked(&si->lock);
for_each_node(nid)
- plist_del(&p->avail_lists[nid], &swap_avail_heads[nid]);
+ plist_del(&si->avail_lists[nid], &swap_avail_heads[nid]);
}
-static void del_from_avail_list(struct swap_info_struct *p)
+static void del_from_avail_list(struct swap_info_struct *si)
{
spin_lock(&swap_avail_lock);
- __del_from_avail_list(p);
+ __del_from_avail_list(si);
spin_unlock(&swap_avail_lock);
}
@@ -731,13 +924,13 @@ static void swap_range_alloc(struct swap_info_struct *si, unsigned long offset,
}
}
-static void add_to_avail_list(struct swap_info_struct *p)
+static void add_to_avail_list(struct swap_info_struct *si)
{
int nid;
spin_lock(&swap_avail_lock);
for_each_node(nid)
- plist_add(&p->avail_lists[nid], &swap_avail_heads[nid]);
+ plist_add(&si->avail_lists[nid], &swap_avail_heads[nid]);
spin_unlock(&swap_avail_lock);
}
@@ -747,6 +940,14 @@ static void swap_range_free(struct swap_info_struct *si, unsigned long offset,
unsigned long begin = offset;
unsigned long end = offset + nr_entries - 1;
void (*swap_slot_free_notify)(struct block_device *, unsigned long);
+ unsigned int i;
+
+ /*
+ * Use atomic clear_bit operations only on zeromap instead of non-atomic
+ * bitmap_clear to prevent adjacent bits corruption due to simultaneous writes.
+ */
+ for (i = 0; i < nr_entries; i++)
+ clear_bit(offset + i, si->zeromap);
if (offset < si->lowest_bit)
si->lowest_bit = offset;
@@ -822,11 +1023,29 @@ static bool swap_offset_available_and_locked(struct swap_info_struct *si,
return false;
}
+static int cluster_alloc_swap(struct swap_info_struct *si,
+ unsigned char usage, int nr,
+ swp_entry_t slots[], int order)
+{
+ int n_ret = 0;
+
+ VM_BUG_ON(!si->cluster_info);
+
+ while (n_ret < nr) {
+ unsigned long offset = cluster_alloc_swap_entry(si, order, usage);
+
+ if (!offset)
+ break;
+ slots[n_ret++] = swp_entry(si->type, offset);
+ }
+
+ return n_ret;
+}
+
static int scan_swap_map_slots(struct swap_info_struct *si,
unsigned char usage, int nr,
swp_entry_t slots[], int order)
{
- struct swap_cluster_info *ci;
unsigned long offset;
unsigned long scan_base;
unsigned long last_in_cluster = 0;
@@ -865,26 +1084,16 @@ static int scan_swap_map_slots(struct swap_info_struct *si,
return 0;
}
+ if (si->cluster_info)
+ return cluster_alloc_swap(si, usage, nr, slots, order);
+
si->flags += SWP_SCANNING;
- /*
- * Use percpu scan base for SSD to reduce lock contention on
- * cluster and swap cache. For HDD, sequential access is more
- * important.
- */
- if (si->flags & SWP_SOLIDSTATE)
- scan_base = this_cpu_read(*si->cluster_next_cpu);
- else
- scan_base = si->cluster_next;
+
+ /* For HDD, sequential access is more important. */
+ scan_base = si->cluster_next;
offset = scan_base;
- /* SSD algorithm */
- if (si->cluster_info) {
- if (!scan_swap_map_try_ssd_cluster(si, &offset, &scan_base, order)) {
- if (order > 0)
- goto no_page;
- goto scan;
- }
- } else if (unlikely(!si->cluster_nr--)) {
+ if (unlikely(!si->cluster_nr--)) {
if (si->pages - si->inuse_pages < SWAPFILE_CLUSTER) {
si->cluster_nr = SWAPFILE_CLUSTER - 1;
goto checks;
@@ -895,8 +1104,6 @@ static int scan_swap_map_slots(struct swap_info_struct *si,
/*
* If seek is expensive, start searching for new cluster from
* start of partition, to minimize the span of allocated swap.
- * If seek is cheap, that is the SWP_SOLIDSTATE si->cluster_info
- * case, just handled by scan_swap_map_try_ssd_cluster() above.
*/
scan_base = offset = si->lowest_bit;
last_in_cluster = offset + SWAPFILE_CLUSTER - 1;
@@ -924,19 +1131,6 @@ static int scan_swap_map_slots(struct swap_info_struct *si,
}
checks:
- if (si->cluster_info) {
- while (scan_swap_map_ssd_cluster_conflict(si, offset, order)) {
- /* take a break if we already got some slots */
- if (n_ret)
- goto done;
- if (!scan_swap_map_try_ssd_cluster(si, &offset,
- &scan_base, order)) {
- if (order > 0)
- goto no_page;
- goto scan;
- }
- }
- }
if (!(si->flags & SWP_WRITEOK))
goto no_page;
if (!si->highest_bit)
@@ -944,13 +1138,11 @@ checks:
if (offset > si->highest_bit)
scan_base = offset = si->lowest_bit;
- ci = lock_cluster(si, offset);
/* reuse swap entry of cache-only swap if not busy. */
if (vm_swap_full() && si->swap_map[offset] == SWAP_HAS_CACHE) {
int swap_was_freed;
- unlock_cluster(ci);
spin_unlock(&si->lock);
- swap_was_freed = __try_to_reclaim_swap(si, offset, TTRS_ANYWAY);
+ swap_was_freed = __try_to_reclaim_swap(si, offset, TTRS_ANYWAY | TTRS_DIRECT);
spin_lock(&si->lock);
/* entry was freed successfully, try to use this again */
if (swap_was_freed > 0)
@@ -959,15 +1151,12 @@ checks:
}
if (si->swap_map[offset]) {
- unlock_cluster(ci);
if (!n_ret)
goto scan;
else
goto done;
}
memset(si->swap_map + offset, usage, nr_pages);
- add_cluster_info_page(si, si->cluster_info, offset, nr_pages);
- unlock_cluster(ci);
swap_range_alloc(si, offset, nr_pages);
slots[n_ret++] = swp_entry(si->type, offset);
@@ -988,13 +1177,7 @@ checks:
latency_ration = LATENCY_LIMIT;
}
- /* try to get more slots in cluster */
- if (si->cluster_info) {
- if (scan_swap_map_try_ssd_cluster(si, &offset, &scan_base, order))
- goto checks;
- if (order > 0)
- goto done;
- } else if (si->cluster_nr && !si->swap_map[++offset]) {
+ if (si->cluster_nr && !si->swap_map[++offset]) {
/* non-ssd case, still more slots in cluster? */
--si->cluster_nr;
goto checks;
@@ -1055,19 +1238,6 @@ no_page:
return n_ret;
}
-static void swap_free_cluster(struct swap_info_struct *si, unsigned long idx)
-{
- unsigned long offset = idx * SWAPFILE_CLUSTER;
- struct swap_cluster_info *ci;
-
- ci = lock_cluster(si, offset);
- memset(si->swap_map + offset, 0, SWAPFILE_CLUSTER);
- cluster_set_count_flag(ci, 0, 0);
- free_cluster(si, idx);
- unlock_cluster(ci);
- swap_range_free(si, offset, SWAPFILE_CLUSTER);
-}
-
int get_swap_pages(int n_goal, swp_entry_t swp_entries[], int entry_order)
{
int order = swap_entry_order(entry_order);
@@ -1148,22 +1318,22 @@ noswap:
static struct swap_info_struct *_swap_info_get(swp_entry_t entry)
{
- struct swap_info_struct *p;
+ struct swap_info_struct *si;
unsigned long offset;
if (!entry.val)
goto out;
- p = swp_swap_info(entry);
- if (!p)
+ si = swp_swap_info(entry);
+ if (!si)
goto bad_nofile;
- if (data_race(!(p->flags & SWP_USED)))
+ if (data_race(!(si->flags & SWP_USED)))
goto bad_device;
offset = swp_offset(entry);
- if (offset >= p->max)
+ if (offset >= si->max)
goto bad_offset;
- if (data_race(!p->swap_map[swp_offset(entry)]))
+ if (data_race(!si->swap_map[swp_offset(entry)]))
goto bad_free;
- return p;
+ return si;
bad_free:
pr_err("%s: %s%08lx\n", __func__, Unused_offset, entry.val);
@@ -1196,14 +1366,14 @@ static struct swap_info_struct *swap_info_get_cont(swp_entry_t entry,
return p;
}
-static unsigned char __swap_entry_free_locked(struct swap_info_struct *p,
+static unsigned char __swap_entry_free_locked(struct swap_info_struct *si,
unsigned long offset,
unsigned char usage)
{
unsigned char count;
unsigned char has_cache;
- count = p->swap_map[offset];
+ count = si->swap_map[offset];
has_cache = count & SWAP_HAS_CACHE;
count &= ~SWAP_HAS_CACHE;
@@ -1219,7 +1389,7 @@ static unsigned char __swap_entry_free_locked(struct swap_info_struct *p,
count = 0;
} else if ((count & ~COUNT_CONTINUED) <= SWAP_MAP_MAX) {
if (count == COUNT_CONTINUED) {
- if (swap_count_continued(p, offset, count))
+ if (swap_count_continued(si, offset, count))
count = SWAP_MAP_MAX | COUNT_CONTINUED;
else
count = SWAP_MAP_MAX;
@@ -1229,9 +1399,9 @@ static unsigned char __swap_entry_free_locked(struct swap_info_struct *p,
usage = count | has_cache;
if (usage)
- WRITE_ONCE(p->swap_map[offset], usage);
+ WRITE_ONCE(si->swap_map[offset], usage);
else
- WRITE_ONCE(p->swap_map[offset], SWAP_HAS_CACHE);
+ WRITE_ONCE(si->swap_map[offset], SWAP_HAS_CACHE);
return usage;
}
@@ -1310,66 +1480,121 @@ put_out:
return NULL;
}
-static unsigned char __swap_entry_free(struct swap_info_struct *p,
+static unsigned char __swap_entry_free(struct swap_info_struct *si,
swp_entry_t entry)
{
struct swap_cluster_info *ci;
unsigned long offset = swp_offset(entry);
unsigned char usage;
- ci = lock_cluster_or_swap_info(p, offset);
- usage = __swap_entry_free_locked(p, offset, 1);
- unlock_cluster_or_swap_info(p, ci);
+ ci = lock_cluster_or_swap_info(si, offset);
+ usage = __swap_entry_free_locked(si, offset, 1);
+ unlock_cluster_or_swap_info(si, ci);
if (!usage)
free_swap_slot(entry);
return usage;
}
-static void swap_entry_free(struct swap_info_struct *p, swp_entry_t entry)
+static bool __swap_entries_free(struct swap_info_struct *si,
+ swp_entry_t entry, int nr)
{
- struct swap_cluster_info *ci;
unsigned long offset = swp_offset(entry);
+ unsigned int type = swp_type(entry);
+ struct swap_cluster_info *ci;
+ bool has_cache = false;
unsigned char count;
+ int i;
+
+ if (nr <= 1 || swap_count(data_race(si->swap_map[offset])) != 1)
+ goto fallback;
+ /* cross into another cluster */
+ if (nr > SWAPFILE_CLUSTER - offset % SWAPFILE_CLUSTER)
+ goto fallback;
+
+ ci = lock_cluster_or_swap_info(si, offset);
+ if (!swap_is_last_map(si, offset, nr, &has_cache)) {
+ unlock_cluster_or_swap_info(si, ci);
+ goto fallback;
+ }
+ for (i = 0; i < nr; i++)
+ WRITE_ONCE(si->swap_map[offset + i], SWAP_HAS_CACHE);
+ unlock_cluster_or_swap_info(si, ci);
+
+ if (!has_cache) {
+ for (i = 0; i < nr; i++)
+ zswap_invalidate(swp_entry(si->type, offset + i));
+ spin_lock(&si->lock);
+ swap_entry_range_free(si, entry, nr);
+ spin_unlock(&si->lock);
+ }
+ return has_cache;
+
+fallback:
+ for (i = 0; i < nr; i++) {
+ if (data_race(si->swap_map[offset + i])) {
+ count = __swap_entry_free(si, swp_entry(type, offset + i));
+ if (count == SWAP_HAS_CACHE)
+ has_cache = true;
+ } else {
+ WARN_ON_ONCE(1);
+ }
+ }
+ return has_cache;
+}
- ci = lock_cluster(p, offset);
- count = p->swap_map[offset];
- VM_BUG_ON(count != SWAP_HAS_CACHE);
- p->swap_map[offset] = 0;
- dec_cluster_info_page(p, p->cluster_info, offset);
+/*
+ * Drop the last HAS_CACHE flag of swap entries, caller have to
+ * ensure all entries belong to the same cgroup.
+ */
+static void swap_entry_range_free(struct swap_info_struct *si, swp_entry_t entry,
+ unsigned int nr_pages)
+{
+ unsigned long offset = swp_offset(entry);
+ unsigned char *map = si->swap_map + offset;
+ unsigned char *map_end = map + nr_pages;
+ struct swap_cluster_info *ci;
+
+ ci = lock_cluster(si, offset);
+ do {
+ VM_BUG_ON(*map != SWAP_HAS_CACHE);
+ *map = 0;
+ } while (++map < map_end);
+ dec_cluster_info_page(si, ci, nr_pages);
unlock_cluster(ci);
- mem_cgroup_uncharge_swap(entry, 1);
- swap_range_free(p, offset, 1);
+ mem_cgroup_uncharge_swap(entry, nr_pages);
+ swap_range_free(si, offset, nr_pages);
}
-static void cluster_swap_free_nr(struct swap_info_struct *sis,
- unsigned long offset, int nr_pages)
+static void cluster_swap_free_nr(struct swap_info_struct *si,
+ unsigned long offset, int nr_pages,
+ unsigned char usage)
{
struct swap_cluster_info *ci;
DECLARE_BITMAP(to_free, BITS_PER_LONG) = { 0 };
int i, nr;
- ci = lock_cluster_or_swap_info(sis, offset);
+ ci = lock_cluster_or_swap_info(si, offset);
while (nr_pages) {
nr = min(BITS_PER_LONG, nr_pages);
for (i = 0; i < nr; i++) {
- if (!__swap_entry_free_locked(sis, offset + i, 1))
+ if (!__swap_entry_free_locked(si, offset + i, usage))
bitmap_set(to_free, i, 1);
}
if (!bitmap_empty(to_free, BITS_PER_LONG)) {
- unlock_cluster_or_swap_info(sis, ci);
+ unlock_cluster_or_swap_info(si, ci);
for_each_set_bit(i, to_free, BITS_PER_LONG)
- free_swap_slot(swp_entry(sis->type, offset + i));
+ free_swap_slot(swp_entry(si->type, offset + i));
if (nr == nr_pages)
return;
bitmap_clear(to_free, 0, BITS_PER_LONG);
- ci = lock_cluster_or_swap_info(sis, offset);
+ ci = lock_cluster_or_swap_info(si, offset);
}
offset += nr;
nr_pages -= nr;
}
- unlock_cluster_or_swap_info(sis, ci);
+ unlock_cluster_or_swap_info(si, ci);
}
/*
@@ -1388,7 +1613,7 @@ void swap_free_nr(swp_entry_t entry, int nr_pages)
while (nr_pages) {
nr = min_t(int, nr_pages, SWAPFILE_CLUSTER - offset % SWAPFILE_CLUSTER);
- cluster_swap_free_nr(sis, offset, nr);
+ cluster_swap_free_nr(sis, offset, nr, 1);
offset += nr;
nr_pages -= nr;
}
@@ -1400,12 +1625,8 @@ void swap_free_nr(swp_entry_t entry, int nr_pages)
void put_swap_folio(struct folio *folio, swp_entry_t entry)
{
unsigned long offset = swp_offset(entry);
- unsigned long idx = offset / SWAPFILE_CLUSTER;
struct swap_cluster_info *ci;
struct swap_info_struct *si;
- unsigned char *map;
- unsigned int i, free_entries = 0;
- unsigned char val;
int size = 1 << swap_entry_order(folio_order(folio));
si = _swap_info_get(entry);
@@ -1413,24 +1634,14 @@ void put_swap_folio(struct folio *folio, swp_entry_t entry)
return;
ci = lock_cluster_or_swap_info(si, offset);
- if (size == SWAPFILE_CLUSTER) {
- map = si->swap_map + offset;
- for (i = 0; i < SWAPFILE_CLUSTER; i++) {
- val = map[i];
- VM_BUG_ON(!(val & SWAP_HAS_CACHE));
- if (val == SWAP_HAS_CACHE)
- free_entries++;
- }
- if (free_entries == SWAPFILE_CLUSTER) {
- unlock_cluster_or_swap_info(si, ci);
- spin_lock(&si->lock);
- mem_cgroup_uncharge_swap(entry, SWAPFILE_CLUSTER);
- swap_free_cluster(si, idx);
- spin_unlock(&si->lock);
- return;
- }
+ if (size > 1 && swap_is_has_cache(si, offset, size)) {
+ unlock_cluster_or_swap_info(si, ci);
+ spin_lock(&si->lock);
+ swap_entry_range_free(si, entry, size);
+ spin_unlock(&si->lock);
+ return;
}
- for (i = 0; i < size; i++, entry.val++) {
+ for (int i = 0; i < size; i++, entry.val++) {
if (!__swap_entry_free_locked(si, offset + i, SWAP_HAS_CACHE)) {
unlock_cluster_or_swap_info(si, ci);
free_swap_slot(entry);
@@ -1470,7 +1681,7 @@ void swapcache_free_entries(swp_entry_t *entries, int n)
for (i = 0; i < n; ++i) {
p = swap_info_get_cont(entries[i], prev);
if (p)
- swap_entry_free(p, entries[i]);
+ swap_entry_range_free(p, entries[i], 1);
prev = p;
}
if (p)
@@ -1509,28 +1720,28 @@ int swap_swapcount(struct swap_info_struct *si, swp_entry_t entry)
int swp_swapcount(swp_entry_t entry)
{
int count, tmp_count, n;
- struct swap_info_struct *p;
+ struct swap_info_struct *si;
struct swap_cluster_info *ci;
struct page *page;
pgoff_t offset;
unsigned char *map;
- p = _swap_info_get(entry);
- if (!p)
+ si = _swap_info_get(entry);
+ if (!si)
return 0;
offset = swp_offset(entry);
- ci = lock_cluster_or_swap_info(p, offset);
+ ci = lock_cluster_or_swap_info(si, offset);
- count = swap_count(p->swap_map[offset]);
+ count = swap_count(si->swap_map[offset]);
if (!(count & COUNT_CONTINUED))
goto out;
count &= ~COUNT_CONTINUED;
n = SWAP_MAP_MAX + 1;
- page = vmalloc_to_page(p->swap_map + offset);
+ page = vmalloc_to_page(si->swap_map + offset);
offset &= ~PAGE_MASK;
VM_BUG_ON(page_private(page) != SWP_CONTINUED);
@@ -1544,7 +1755,7 @@ int swp_swapcount(swp_entry_t entry)
n *= (SWAP_CONT_MAX + 1);
} while (tmp_count & COUNT_CONTINUED);
out:
- unlock_cluster_or_swap_info(p, ci);
+ unlock_cluster_or_swap_info(si, ci);
return count;
}
@@ -1590,16 +1801,7 @@ static bool folio_swapped(struct folio *folio)
return swap_page_trans_huge_swapped(si, entry, folio_order(folio));
}
-/**
- * folio_free_swap() - Free the swap space used for this folio.
- * @folio: The folio to remove.
- *
- * If swap is getting full, or if there are no more mappings of this folio,
- * then call folio_free_swap to free its swap space.
- *
- * Return: true if we were able to release the swap space.
- */
-bool folio_free_swap(struct folio *folio)
+static bool folio_swapcache_freeable(struct folio *folio)
{
VM_BUG_ON_FOLIO(!folio_test_locked(folio), folio);
@@ -1607,8 +1809,6 @@ bool folio_free_swap(struct folio *folio)
return false;
if (folio_test_writeback(folio))
return false;
- if (folio_swapped(folio))
- return false;
/*
* Once hibernation has begun to create its image of memory,
@@ -1628,6 +1828,25 @@ bool folio_free_swap(struct folio *folio)
if (pm_suspended_storage())
return false;
+ return true;
+}
+
+/**
+ * folio_free_swap() - Free the swap space used for this folio.
+ * @folio: The folio to remove.
+ *
+ * If swap is getting full, or if there are no more mappings of this folio,
+ * then call folio_free_swap to free its swap space.
+ *
+ * Return: true if we were able to release the swap space.
+ */
+bool folio_free_swap(struct folio *folio)
+{
+ if (!folio_swapcache_freeable(folio))
+ return false;
+ if (folio_swapped(folio))
+ return false;
+
delete_from_swap_cache(folio);
folio_set_dirty(folio);
return true;
@@ -1647,11 +1866,9 @@ void free_swap_and_cache_nr(swp_entry_t entry, int nr)
{
const unsigned long start_offset = swp_offset(entry);
const unsigned long end_offset = start_offset + nr;
- unsigned int type = swp_type(entry);
struct swap_info_struct *si;
bool any_only_cache = false;
unsigned long offset;
- unsigned char count;
if (non_swap_entry(entry))
return;
@@ -1666,15 +1883,7 @@ void free_swap_and_cache_nr(swp_entry_t entry, int nr)
/*
* First free all entries in the range.
*/
- for (offset = start_offset; offset < end_offset; offset++) {
- if (data_race(si->swap_map[offset])) {
- count = __swap_entry_free(si, swp_entry(type, offset));
- if (count == SWAP_HAS_CACHE)
- any_only_cache = true;
- } else {
- WARN_ON_ONCE(1);
- }
- }
+ any_only_cache = __swap_entries_free(si, entry, nr);
/*
* Short-circuit the below loop if none of the entries had their
@@ -1704,7 +1913,7 @@ void free_swap_and_cache_nr(swp_entry_t entry, int nr)
* to the next boundary.
*/
nr = __try_to_reclaim_swap(si, offset,
- TTRS_UNMAPPED | TTRS_FULL);
+ TTRS_UNMAPPED | TTRS_FULL);
if (nr == 0)
nr = 1;
else if (nr < 0)
@@ -1979,7 +2188,6 @@ static int unuse_pte_range(struct vm_area_struct *vma, pmd_t *pmd,
folio = swap_cache_get_folio(entry, vma, addr);
if (!folio) {
- struct page *page;
struct vm_fault vmf = {
.vma = vma,
.address = addr,
@@ -1987,10 +2195,8 @@ static int unuse_pte_range(struct vm_area_struct *vma, pmd_t *pmd,
.pmd = pmd,
};
- page = swapin_readahead(entry, GFP_HIGHUSER_MOVABLE,
+ folio = swapin_readahead(entry, GFP_HIGHUSER_MOVABLE,
&vmf);
- if (page)
- folio = page_folio(page);
}
if (!folio) {
swp_count = READ_ONCE(si->swap_map[offset]);
@@ -2397,52 +2603,54 @@ static int setup_swap_extents(struct swap_info_struct *sis, sector_t *span)
return generic_swapfile_activate(sis, swap_file, span);
}
-static int swap_node(struct swap_info_struct *p)
+static int swap_node(struct swap_info_struct *si)
{
struct block_device *bdev;
- if (p->bdev)
- bdev = p->bdev;
+ if (si->bdev)
+ bdev = si->bdev;
else
- bdev = p->swap_file->f_inode->i_sb->s_bdev;
+ bdev = si->swap_file->f_inode->i_sb->s_bdev;
return bdev ? bdev->bd_disk->node_id : NUMA_NO_NODE;
}
-static void setup_swap_info(struct swap_info_struct *p, int prio,
+static void setup_swap_info(struct swap_info_struct *si, int prio,
unsigned char *swap_map,
- struct swap_cluster_info *cluster_info)
+ struct swap_cluster_info *cluster_info,
+ unsigned long *zeromap)
{
int i;
if (prio >= 0)
- p->prio = prio;
+ si->prio = prio;
else
- p->prio = --least_priority;
+ si->prio = --least_priority;
/*
* the plist prio is negated because plist ordering is
* low-to-high, while swap ordering is high-to-low
*/
- p->list.prio = -p->prio;
+ si->list.prio = -si->prio;
for_each_node(i) {
- if (p->prio >= 0)
- p->avail_lists[i].prio = -p->prio;
+ if (si->prio >= 0)
+ si->avail_lists[i].prio = -si->prio;
else {
- if (swap_node(p) == i)
- p->avail_lists[i].prio = 1;
+ if (swap_node(si) == i)
+ si->avail_lists[i].prio = 1;
else
- p->avail_lists[i].prio = -p->prio;
+ si->avail_lists[i].prio = -si->prio;
}
}
- p->swap_map = swap_map;
- p->cluster_info = cluster_info;
+ si->swap_map = swap_map;
+ si->cluster_info = cluster_info;
+ si->zeromap = zeromap;
}
-static void _enable_swap_info(struct swap_info_struct *p)
+static void _enable_swap_info(struct swap_info_struct *si)
{
- p->flags |= SWP_WRITEOK;
- atomic_long_add(p->pages, &nr_swap_pages);
- total_swap_pages += p->pages;
+ si->flags |= SWP_WRITEOK;
+ atomic_long_add(si->pages, &nr_swap_pages);
+ total_swap_pages += si->pages;
assert_spin_locked(&swap_lock);
/*
@@ -2455,40 +2663,41 @@ static void _enable_swap_info(struct swap_info_struct *p)
* which allocates swap pages from the highest available priority
* swap_info_struct.
*/
- plist_add(&p->list, &swap_active_head);
+ plist_add(&si->list, &swap_active_head);
/* add to available list iff swap device is not full */
- if (p->highest_bit)
- add_to_avail_list(p);
+ if (si->highest_bit)
+ add_to_avail_list(si);
}
-static void enable_swap_info(struct swap_info_struct *p, int prio,
+static void enable_swap_info(struct swap_info_struct *si, int prio,
unsigned char *swap_map,
- struct swap_cluster_info *cluster_info)
+ struct swap_cluster_info *cluster_info,
+ unsigned long *zeromap)
{
spin_lock(&swap_lock);
- spin_lock(&p->lock);
- setup_swap_info(p, prio, swap_map, cluster_info);
- spin_unlock(&p->lock);
+ spin_lock(&si->lock);
+ setup_swap_info(si, prio, swap_map, cluster_info, zeromap);
+ spin_unlock(&si->lock);
spin_unlock(&swap_lock);
/*
* Finished initializing swap device, now it's safe to reference it.
*/
- percpu_ref_resurrect(&p->users);
+ percpu_ref_resurrect(&si->users);
spin_lock(&swap_lock);
- spin_lock(&p->lock);
- _enable_swap_info(p);
- spin_unlock(&p->lock);
+ spin_lock(&si->lock);
+ _enable_swap_info(si);
+ spin_unlock(&si->lock);
spin_unlock(&swap_lock);
}
-static void reinsert_swap_info(struct swap_info_struct *p)
+static void reinsert_swap_info(struct swap_info_struct *si)
{
spin_lock(&swap_lock);
- spin_lock(&p->lock);
- setup_swap_info(p, p->prio, p->swap_map, p->cluster_info);
- _enable_swap_info(p);
- spin_unlock(&p->lock);
+ spin_lock(&si->lock);
+ setup_swap_info(si, si->prio, si->swap_map, si->cluster_info, si->zeromap);
+ _enable_swap_info(si);
+ spin_unlock(&si->lock);
spin_unlock(&swap_lock);
}
@@ -2511,6 +2720,7 @@ SYSCALL_DEFINE1(swapoff, const char __user *, specialfile)
{
struct swap_info_struct *p = NULL;
unsigned char *swap_map;
+ unsigned long *zeromap;
struct swap_cluster_info *cluster_info;
struct file *swap_file, *victim;
struct address_space *mapping;
@@ -2633,6 +2843,8 @@ SYSCALL_DEFINE1(swapoff, const char __user *, specialfile)
p->max = 0;
swap_map = p->swap_map;
p->swap_map = NULL;
+ zeromap = p->zeromap;
+ p->zeromap = NULL;
cluster_info = p->cluster_info;
p->cluster_info = NULL;
spin_unlock(&p->lock);
@@ -2645,6 +2857,7 @@ SYSCALL_DEFINE1(swapoff, const char __user *, specialfile)
free_percpu(p->cluster_next_cpu);
p->cluster_next_cpu = NULL;
vfree(swap_map);
+ kvfree(zeromap);
kvfree(cluster_info);
/* Destroy swap account information */
swap_cgroup_swapoff(p->type);
@@ -2874,20 +3087,20 @@ static struct swap_info_struct *alloc_swap_info(void)
return p;
}
-static int claim_swapfile(struct swap_info_struct *p, struct inode *inode)
+static int claim_swapfile(struct swap_info_struct *si, struct inode *inode)
{
if (S_ISBLK(inode->i_mode)) {
- p->bdev = I_BDEV(inode);
+ si->bdev = I_BDEV(inode);
/*
* Zoned block devices contain zones that have a sequential
* write only restriction. Hence zoned block devices are not
* suitable for swapping. Disallow them here.
*/
- if (bdev_is_zoned(p->bdev))
+ if (bdev_is_zoned(si->bdev))
return -EINVAL;
- p->flags |= SWP_BLKDEV;
+ si->flags |= SWP_BLKDEV;
} else if (S_ISREG(inode->i_mode)) {
- p->bdev = inode->i_sb->s_bdev;
+ si->bdev = inode->i_sb->s_bdev;
}
return 0;
@@ -2922,7 +3135,7 @@ __weak unsigned long arch_max_swapfile_size(void)
return generic_max_swapfile_size();
}
-static unsigned long read_swap_header(struct swap_info_struct *p,
+static unsigned long read_swap_header(struct swap_info_struct *si,
union swap_header *swap_header,
struct inode *inode)
{
@@ -2953,9 +3166,9 @@ static unsigned long read_swap_header(struct swap_info_struct *p,
return 0;
}
- p->lowest_bit = 1;
- p->cluster_next = 1;
- p->cluster_nr = 0;
+ si->lowest_bit = 1;
+ si->cluster_next = 1;
+ si->cluster_nr = 0;
maxpages = swapfile_maximum_size;
last_page = swap_header->info.last_page;
@@ -2973,7 +3186,7 @@ static unsigned long read_swap_header(struct swap_info_struct *p,
if ((unsigned int)maxpages == 0)
maxpages = UINT_MAX;
}
- p->highest_bit = maxpages - 1;
+ si->highest_bit = maxpages - 1;
if (!maxpages)
return 0;
@@ -2997,25 +3210,18 @@ static unsigned long read_swap_header(struct swap_info_struct *p,
#define SWAP_CLUSTER_COLS \
max_t(unsigned int, SWAP_CLUSTER_INFO_COLS, SWAP_CLUSTER_SPACE_COLS)
-static int setup_swap_map_and_extents(struct swap_info_struct *p,
+static int setup_swap_map_and_extents(struct swap_info_struct *si,
union swap_header *swap_header,
unsigned char *swap_map,
- struct swap_cluster_info *cluster_info,
unsigned long maxpages,
sector_t *span)
{
- unsigned int j, k;
unsigned int nr_good_pages;
+ unsigned long i;
int nr_extents;
- unsigned long nr_clusters = DIV_ROUND_UP(maxpages, SWAPFILE_CLUSTER);
- unsigned long col = p->cluster_next / SWAPFILE_CLUSTER % SWAP_CLUSTER_COLS;
- unsigned long i, idx;
nr_good_pages = maxpages - 1; /* omit header page */
- cluster_list_init(&p->free_clusters);
- cluster_list_init(&p->discard_clusters);
-
for (i = 0; i < swap_header->info.nr_badpages; i++) {
unsigned int page_nr = swap_header->info.badpages[i];
if (page_nr == 0 || page_nr > swap_header->info.last_page)
@@ -3023,40 +3229,87 @@ static int setup_swap_map_and_extents(struct swap_info_struct *p,
if (page_nr < maxpages) {
swap_map[page_nr] = SWAP_MAP_BAD;
nr_good_pages--;
- /*
- * Haven't marked the cluster free yet, no list
- * operation involved
- */
- inc_cluster_info_page(p, cluster_info, page_nr);
}
}
- /* Haven't marked the cluster free yet, no list operation involved */
- for (i = maxpages; i < round_up(maxpages, SWAPFILE_CLUSTER); i++)
- inc_cluster_info_page(p, cluster_info, i);
-
if (nr_good_pages) {
swap_map[0] = SWAP_MAP_BAD;
- /*
- * Not mark the cluster free yet, no list
- * operation involved
- */
- inc_cluster_info_page(p, cluster_info, 0);
- p->max = maxpages;
- p->pages = nr_good_pages;
- nr_extents = setup_swap_extents(p, span);
+ si->max = maxpages;
+ si->pages = nr_good_pages;
+ nr_extents = setup_swap_extents(si, span);
if (nr_extents < 0)
return nr_extents;
- nr_good_pages = p->pages;
+ nr_good_pages = si->pages;
}
if (!nr_good_pages) {
pr_warn("Empty swap-file\n");
return -EINVAL;
}
+ return nr_extents;
+}
+
+static struct swap_cluster_info *setup_clusters(struct swap_info_struct *si,
+ union swap_header *swap_header,
+ unsigned long maxpages)
+{
+ unsigned long nr_clusters = DIV_ROUND_UP(maxpages, SWAPFILE_CLUSTER);
+ unsigned long col = si->cluster_next / SWAPFILE_CLUSTER % SWAP_CLUSTER_COLS;
+ struct swap_cluster_info *cluster_info;
+ unsigned long i, j, k, idx;
+ int cpu, err = -ENOMEM;
+
+ cluster_info = kvcalloc(nr_clusters, sizeof(*cluster_info), GFP_KERNEL);
if (!cluster_info)
- return nr_extents;
+ goto err;
+
+ for (i = 0; i < nr_clusters; i++)
+ spin_lock_init(&cluster_info[i].lock);
+
+ si->cluster_next_cpu = alloc_percpu(unsigned int);
+ if (!si->cluster_next_cpu)
+ goto err_free;
+
+ /* Random start position to help with wear leveling */
+ for_each_possible_cpu(cpu)
+ per_cpu(*si->cluster_next_cpu, cpu) =
+ get_random_u32_inclusive(1, si->highest_bit);
+
+ si->percpu_cluster = alloc_percpu(struct percpu_cluster);
+ if (!si->percpu_cluster)
+ goto err_free;
+
+ for_each_possible_cpu(cpu) {
+ struct percpu_cluster *cluster;
+
+ cluster = per_cpu_ptr(si->percpu_cluster, cpu);
+ for (i = 0; i < SWAP_NR_ORDERS; i++)
+ cluster->next[i] = SWAP_NEXT_INVALID;
+ }
+
+ /*
+ * Mark unusable pages as unavailable. The clusters aren't
+ * marked free yet, so no list operations are involved yet.
+ *
+ * See setup_swap_map_and_extents(): header page, bad pages,
+ * and the EOF part of the last cluster.
+ */
+ inc_cluster_info_page(si, cluster_info, 0);
+ for (i = 0; i < swap_header->info.nr_badpages; i++)
+ inc_cluster_info_page(si, cluster_info,
+ swap_header->info.badpages[i]);
+ for (i = maxpages; i < round_up(maxpages, SWAPFILE_CLUSTER); i++)
+ inc_cluster_info_page(si, cluster_info, i);
+
+ INIT_LIST_HEAD(&si->free_clusters);
+ INIT_LIST_HEAD(&si->full_clusters);
+ INIT_LIST_HEAD(&si->discard_clusters);
+ for (i = 0; i < SWAP_NR_ORDERS; i++) {
+ INIT_LIST_HEAD(&si->nonfull_clusters[i]);
+ INIT_LIST_HEAD(&si->frag_clusters[i]);
+ si->frag_cluster_nr[i] = 0;
+ }
/*
* Reduce false cache line sharing between cluster_info and
@@ -3065,22 +3318,32 @@ static int setup_swap_map_and_extents(struct swap_info_struct *p,
for (k = 0; k < SWAP_CLUSTER_COLS; k++) {
j = (k + col) % SWAP_CLUSTER_COLS;
for (i = 0; i < DIV_ROUND_UP(nr_clusters, SWAP_CLUSTER_COLS); i++) {
+ struct swap_cluster_info *ci;
idx = i * SWAP_CLUSTER_COLS + j;
+ ci = cluster_info + idx;
if (idx >= nr_clusters)
continue;
- if (cluster_count(&cluster_info[idx]))
+ if (ci->count) {
+ ci->flags = CLUSTER_FLAG_NONFULL;
+ list_add_tail(&ci->list, &si->nonfull_clusters[0]);
continue;
- cluster_set_flag(&cluster_info[idx], CLUSTER_FLAG_FREE);
- cluster_list_add_tail(&p->free_clusters, cluster_info,
- idx);
+ }
+ ci->flags = CLUSTER_FLAG_FREE;
+ list_add_tail(&ci->list, &si->free_clusters);
}
}
- return nr_extents;
+
+ return cluster_info;
+
+err_free:
+ kvfree(cluster_info);
+err:
+ return ERR_PTR(err);
}
SYSCALL_DEFINE2(swapon, const char __user *, specialfile, int, swap_flags)
{
- struct swap_info_struct *p;
+ struct swap_info_struct *si;
struct filename *name;
struct file *swap_file = NULL;
struct address_space *mapping;
@@ -3092,8 +3355,9 @@ SYSCALL_DEFINE2(swapon, const char __user *, specialfile, int, swap_flags)
sector_t span;
unsigned long maxpages;
unsigned char *swap_map = NULL;
+ unsigned long *zeromap = NULL;
struct swap_cluster_info *cluster_info = NULL;
- struct page *page = NULL;
+ struct folio *folio = NULL;
struct inode *inode = NULL;
bool inced_nr_rotate_swap = false;
@@ -3106,11 +3370,11 @@ SYSCALL_DEFINE2(swapon, const char __user *, specialfile, int, swap_flags)
if (!swap_avail_heads)
return -ENOMEM;
- p = alloc_swap_info();
- if (IS_ERR(p))
- return PTR_ERR(p);
+ si = alloc_swap_info();
+ if (IS_ERR(si))
+ return PTR_ERR(si);
- INIT_WORK(&p->discard_work, swap_discard_work);
+ INIT_WORK(&si->discard_work, swap_discard_work);
name = getname(specialfile);
if (IS_ERR(name)) {
@@ -3125,12 +3389,12 @@ SYSCALL_DEFINE2(swapon, const char __user *, specialfile, int, swap_flags)
goto bad_swap;
}
- p->swap_file = swap_file;
+ si->swap_file = swap_file;
mapping = swap_file->f_mapping;
dentry = swap_file->f_path.dentry;
inode = mapping->host;
- error = claim_swapfile(p, inode);
+ error = claim_swapfile(si, inode);
if (unlikely(error))
goto bad_swap;
@@ -3151,14 +3415,14 @@ SYSCALL_DEFINE2(swapon, const char __user *, specialfile, int, swap_flags)
error = -EINVAL;
goto bad_swap_unlock_inode;
}
- page = read_mapping_page(mapping, 0, swap_file);
- if (IS_ERR(page)) {
- error = PTR_ERR(page);
+ folio = read_mapping_folio(mapping, 0, swap_file);
+ if (IS_ERR(folio)) {
+ error = PTR_ERR(folio);
goto bad_swap_unlock_inode;
}
- swap_header = kmap(page);
+ swap_header = kmap_local_folio(folio, 0);
- maxpages = read_swap_header(p, swap_header, inode);
+ maxpages = read_swap_header(si, swap_header, inode);
if (unlikely(!maxpages)) {
error = -EINVAL;
goto bad_swap_unlock_inode;
@@ -3171,79 +3435,57 @@ SYSCALL_DEFINE2(swapon, const char __user *, specialfile, int, swap_flags)
goto bad_swap_unlock_inode;
}
- if (p->bdev && bdev_stable_writes(p->bdev))
- p->flags |= SWP_STABLE_WRITES;
+ error = swap_cgroup_swapon(si->type, maxpages);
+ if (error)
+ goto bad_swap_unlock_inode;
- if (p->bdev && bdev_synchronous(p->bdev))
- p->flags |= SWP_SYNCHRONOUS_IO;
+ nr_extents = setup_swap_map_and_extents(si, swap_header, swap_map,
+ maxpages, &span);
+ if (unlikely(nr_extents < 0)) {
+ error = nr_extents;
+ goto bad_swap_unlock_inode;
+ }
- if (p->bdev && bdev_nonrot(p->bdev)) {
- int cpu, i;
- unsigned long ci, nr_cluster;
+ /*
+ * Use kvmalloc_array instead of bitmap_zalloc as the allocation order might
+ * be above MAX_PAGE_ORDER incase of a large swap file.
+ */
+ zeromap = kvmalloc_array(BITS_TO_LONGS(maxpages), sizeof(long),
+ GFP_KERNEL | __GFP_ZERO);
+ if (!zeromap) {
+ error = -ENOMEM;
+ goto bad_swap_unlock_inode;
+ }
- p->flags |= SWP_SOLIDSTATE;
- p->cluster_next_cpu = alloc_percpu(unsigned int);
- if (!p->cluster_next_cpu) {
- error = -ENOMEM;
- goto bad_swap_unlock_inode;
- }
- /*
- * select a random position to start with to help wear leveling
- * SSD
- */
- for_each_possible_cpu(cpu) {
- per_cpu(*p->cluster_next_cpu, cpu) =
- get_random_u32_inclusive(1, p->highest_bit);
- }
- nr_cluster = DIV_ROUND_UP(maxpages, SWAPFILE_CLUSTER);
+ if (si->bdev && bdev_stable_writes(si->bdev))
+ si->flags |= SWP_STABLE_WRITES;
- cluster_info = kvcalloc(nr_cluster, sizeof(*cluster_info),
- GFP_KERNEL);
- if (!cluster_info) {
- error = -ENOMEM;
- goto bad_swap_unlock_inode;
- }
+ if (si->bdev && bdev_synchronous(si->bdev))
+ si->flags |= SWP_SYNCHRONOUS_IO;
- for (ci = 0; ci < nr_cluster; ci++)
- spin_lock_init(&((cluster_info + ci)->lock));
+ if (si->bdev && bdev_nonrot(si->bdev)) {
+ si->flags |= SWP_SOLIDSTATE;
- p->percpu_cluster = alloc_percpu(struct percpu_cluster);
- if (!p->percpu_cluster) {
- error = -ENOMEM;
+ cluster_info = setup_clusters(si, swap_header, maxpages);
+ if (IS_ERR(cluster_info)) {
+ error = PTR_ERR(cluster_info);
+ cluster_info = NULL;
goto bad_swap_unlock_inode;
}
- for_each_possible_cpu(cpu) {
- struct percpu_cluster *cluster;
-
- cluster = per_cpu_ptr(p->percpu_cluster, cpu);
- for (i = 0; i < SWAP_NR_ORDERS; i++)
- cluster->next[i] = SWAP_NEXT_INVALID;
- }
} else {
atomic_inc(&nr_rotate_swap);
inced_nr_rotate_swap = true;
}
- error = swap_cgroup_swapon(p->type, maxpages);
- if (error)
- goto bad_swap_unlock_inode;
-
- nr_extents = setup_swap_map_and_extents(p, swap_header, swap_map,
- cluster_info, maxpages, &span);
- if (unlikely(nr_extents < 0)) {
- error = nr_extents;
- goto bad_swap_unlock_inode;
- }
-
if ((swap_flags & SWAP_FLAG_DISCARD) &&
- p->bdev && bdev_max_discard_sectors(p->bdev)) {
+ si->bdev && bdev_max_discard_sectors(si->bdev)) {
/*
* When discard is enabled for swap with no particular
* policy flagged, we set all swap discard flags here in
* order to sustain backward compatibility with older
* swapon(8) releases.
*/
- p->flags |= (SWP_DISCARDABLE | SWP_AREA_DISCARD |
+ si->flags |= (SWP_DISCARDABLE | SWP_AREA_DISCARD |
SWP_PAGE_DISCARD);
/*
@@ -3253,24 +3495,24 @@ SYSCALL_DEFINE2(swapon, const char __user *, specialfile, int, swap_flags)
* Now it's time to adjust the p->flags accordingly.
*/
if (swap_flags & SWAP_FLAG_DISCARD_ONCE)
- p->flags &= ~SWP_PAGE_DISCARD;
+ si->flags &= ~SWP_PAGE_DISCARD;
else if (swap_flags & SWAP_FLAG_DISCARD_PAGES)
- p->flags &= ~SWP_AREA_DISCARD;
+ si->flags &= ~SWP_AREA_DISCARD;
/* issue a swapon-time discard if it's still required */
- if (p->flags & SWP_AREA_DISCARD) {
- int err = discard_swap(p);
+ if (si->flags & SWP_AREA_DISCARD) {
+ int err = discard_swap(si);
if (unlikely(err))
pr_err("swapon: discard_swap(%p): %d\n",
- p, err);
+ si, err);
}
}
- error = init_swap_address_space(p->type, maxpages);
+ error = init_swap_address_space(si->type, maxpages);
if (error)
goto bad_swap_unlock_inode;
- error = zswap_swapon(p->type, maxpages);
+ error = zswap_swapon(si->type, maxpages);
if (error)
goto free_swap_address_space;
@@ -3290,15 +3532,15 @@ SYSCALL_DEFINE2(swapon, const char __user *, specialfile, int, swap_flags)
if (swap_flags & SWAP_FLAG_PREFER)
prio =
(swap_flags & SWAP_FLAG_PRIO_MASK) >> SWAP_FLAG_PRIO_SHIFT;
- enable_swap_info(p, prio, swap_map, cluster_info);
+ enable_swap_info(si, prio, swap_map, cluster_info, zeromap);
pr_info("Adding %uk swap on %s. Priority:%d extents:%d across:%lluk %s%s%s%s\n",
- K(p->pages), name->name, p->prio, nr_extents,
+ K(si->pages), name->name, si->prio, nr_extents,
K((unsigned long long)span),
- (p->flags & SWP_SOLIDSTATE) ? "SS" : "",
- (p->flags & SWP_DISCARDABLE) ? "D" : "",
- (p->flags & SWP_AREA_DISCARD) ? "s" : "",
- (p->flags & SWP_PAGE_DISCARD) ? "c" : "");
+ (si->flags & SWP_SOLIDSTATE) ? "SS" : "",
+ (si->flags & SWP_DISCARDABLE) ? "D" : "",
+ (si->flags & SWP_AREA_DISCARD) ? "s" : "",
+ (si->flags & SWP_PAGE_DISCARD) ? "c" : "");
mutex_unlock(&swapon_mutex);
atomic_inc(&proc_poll_event);
@@ -3307,34 +3549,33 @@ SYSCALL_DEFINE2(swapon, const char __user *, specialfile, int, swap_flags)
error = 0;
goto out;
free_swap_zswap:
- zswap_swapoff(p->type);
+ zswap_swapoff(si->type);
free_swap_address_space:
- exit_swap_address_space(p->type);
+ exit_swap_address_space(si->type);
bad_swap_unlock_inode:
inode_unlock(inode);
bad_swap:
- free_percpu(p->percpu_cluster);
- p->percpu_cluster = NULL;
- free_percpu(p->cluster_next_cpu);
- p->cluster_next_cpu = NULL;
+ free_percpu(si->percpu_cluster);
+ si->percpu_cluster = NULL;
+ free_percpu(si->cluster_next_cpu);
+ si->cluster_next_cpu = NULL;
inode = NULL;
- destroy_swap_extents(p);
- swap_cgroup_swapoff(p->type);
+ destroy_swap_extents(si);
+ swap_cgroup_swapoff(si->type);
spin_lock(&swap_lock);
- p->swap_file = NULL;
- p->flags = 0;
+ si->swap_file = NULL;
+ si->flags = 0;
spin_unlock(&swap_lock);
vfree(swap_map);
+ kvfree(zeromap);
kvfree(cluster_info);
if (inced_nr_rotate_swap)
atomic_dec(&nr_rotate_swap);
if (swap_file)
filp_close(swap_file, NULL);
out:
- if (page && !IS_ERR(page)) {
- kunmap(page);
- put_page(page);
- }
+ if (!IS_ERR_OR_NULL(folio))
+ folio_release_kmap(folio, swap_header);
if (name)
putname(name);
if (inode)
@@ -3362,7 +3603,7 @@ void si_swapinfo(struct sysinfo *val)
}
/*
- * Verify that a swap entry is valid and increment its swap map count.
+ * Verify that nr swap entries are valid and increment their swap map counts.
*
* Returns error code in following case.
* - success -> 0
@@ -3372,63 +3613,76 @@ void si_swapinfo(struct sysinfo *val)
* - swap-cache reference is requested but the entry is not used. -> ENOENT
* - swap-mapped reference requested but needs continued swap count. -> ENOMEM
*/
-static int __swap_duplicate(swp_entry_t entry, unsigned char usage)
+static int __swap_duplicate(swp_entry_t entry, unsigned char usage, int nr)
{
- struct swap_info_struct *p;
+ struct swap_info_struct *si;
struct swap_cluster_info *ci;
unsigned long offset;
unsigned char count;
unsigned char has_cache;
- int err;
+ int err, i;
- p = swp_swap_info(entry);
+ si = swp_swap_info(entry);
offset = swp_offset(entry);
- ci = lock_cluster_or_swap_info(p, offset);
-
- count = p->swap_map[offset];
-
- /*
- * swapin_readahead() doesn't check if a swap entry is valid, so the
- * swap entry could be SWAP_MAP_BAD. Check here with lock held.
- */
- if (unlikely(swap_count(count) == SWAP_MAP_BAD)) {
- err = -ENOENT;
- goto unlock_out;
- }
+ VM_WARN_ON(nr > SWAPFILE_CLUSTER - offset % SWAPFILE_CLUSTER);
+ VM_WARN_ON(usage == 1 && nr > 1);
+ ci = lock_cluster_or_swap_info(si, offset);
- has_cache = count & SWAP_HAS_CACHE;
- count &= ~SWAP_HAS_CACHE;
err = 0;
+ for (i = 0; i < nr; i++) {
+ count = si->swap_map[offset + i];
- if (usage == SWAP_HAS_CACHE) {
+ /*
+ * swapin_readahead() doesn't check if a swap entry is valid, so the
+ * swap entry could be SWAP_MAP_BAD. Check here with lock held.
+ */
+ if (unlikely(swap_count(count) == SWAP_MAP_BAD)) {
+ err = -ENOENT;
+ goto unlock_out;
+ }
- /* set SWAP_HAS_CACHE if there is no cache and entry is used */
- if (!has_cache && count)
- has_cache = SWAP_HAS_CACHE;
- else if (has_cache) /* someone else added cache */
- err = -EEXIST;
- else /* no users remaining */
+ has_cache = count & SWAP_HAS_CACHE;
+ count &= ~SWAP_HAS_CACHE;
+
+ if (!count && !has_cache) {
err = -ENOENT;
+ } else if (usage == SWAP_HAS_CACHE) {
+ if (has_cache)
+ err = -EEXIST;
+ } else if ((count & ~COUNT_CONTINUED) > SWAP_MAP_MAX) {
+ err = -EINVAL;
+ }
+
+ if (err)
+ goto unlock_out;
+ }
- } else if (count || has_cache) {
+ for (i = 0; i < nr; i++) {
+ count = si->swap_map[offset + i];
+ has_cache = count & SWAP_HAS_CACHE;
+ count &= ~SWAP_HAS_CACHE;
- if ((count & ~COUNT_CONTINUED) < SWAP_MAP_MAX)
+ if (usage == SWAP_HAS_CACHE)
+ has_cache = SWAP_HAS_CACHE;
+ else if ((count & ~COUNT_CONTINUED) < SWAP_MAP_MAX)
count += usage;
- else if ((count & ~COUNT_CONTINUED) > SWAP_MAP_MAX)
- err = -EINVAL;
- else if (swap_count_continued(p, offset, count))
+ else if (swap_count_continued(si, offset + i, count))
count = COUNT_CONTINUED;
- else
+ else {
+ /*
+ * Don't need to rollback changes, because if
+ * usage == 1, there must be nr == 1.
+ */
err = -ENOMEM;
- } else
- err = -ENOENT; /* unused swap entry */
+ goto unlock_out;
+ }
- if (!err)
- WRITE_ONCE(p->swap_map[offset], count | has_cache);
+ WRITE_ONCE(si->swap_map[offset + i], count | has_cache);
+ }
unlock_out:
- unlock_cluster_or_swap_info(p, ci);
+ unlock_cluster_or_swap_info(si, ci);
return err;
}
@@ -3436,9 +3690,9 @@ unlock_out:
* Help swapoff by noting that swap entry belongs to shmem/tmpfs
* (in which case its reference count is never incremented).
*/
-void swap_shmem_alloc(swp_entry_t entry)
+void swap_shmem_alloc(swp_entry_t entry, int nr)
{
- __swap_duplicate(entry, SWAP_MAP_SHMEM);
+ __swap_duplicate(entry, SWAP_MAP_SHMEM, nr);
}
/*
@@ -3452,35 +3706,29 @@ int swap_duplicate(swp_entry_t entry)
{
int err = 0;
- while (!err && __swap_duplicate(entry, 1) == -ENOMEM)
+ while (!err && __swap_duplicate(entry, 1, 1) == -ENOMEM)
err = add_swap_count_continuation(entry, GFP_ATOMIC);
return err;
}
/*
- * @entry: swap entry for which we allocate swap cache.
+ * @entry: first swap entry from which we allocate nr swap cache.
*
- * Called when allocating swap cache for existing swap entry,
+ * Called when allocating swap cache for existing swap entries,
* This can return error codes. Returns 0 at success.
* -EEXIST means there is a swap cache.
* Note: return code is different from swap_duplicate().
*/
-int swapcache_prepare(swp_entry_t entry)
+int swapcache_prepare(swp_entry_t entry, int nr)
{
- return __swap_duplicate(entry, SWAP_HAS_CACHE);
+ return __swap_duplicate(entry, SWAP_HAS_CACHE, nr);
}
-void swapcache_clear(struct swap_info_struct *si, swp_entry_t entry)
+void swapcache_clear(struct swap_info_struct *si, swp_entry_t entry, int nr)
{
- struct swap_cluster_info *ci;
unsigned long offset = swp_offset(entry);
- unsigned char usage;
- ci = lock_cluster_or_swap_info(si, offset);
- usage = __swap_entry_free_locked(si, offset, SWAP_HAS_CACHE);
- unlock_cluster_or_swap_info(si, ci);
- if (!usage)
- free_swap_slot(entry);
+ cluster_swap_free_nr(si, offset, nr, SWAP_HAS_CACHE);
}
struct swap_info_struct *swp_swap_info(swp_entry_t entry)
diff --git a/mm/userfaultfd.c b/mm/userfaultfd.c
index acc56c75ba99..ce13c4062647 100644
--- a/mm/userfaultfd.c
+++ b/mm/userfaultfd.c
@@ -391,7 +391,7 @@ static int mfill_atomic_pte_continue(pmd_t *dst_pmd,
struct page *page;
int ret;
- ret = shmem_get_folio(inode, pgoff, &folio, SGP_NOALLOC);
+ ret = shmem_get_folio(inode, pgoff, 0, &folio, SGP_NOALLOC);
/* Our caller expects us to return -EFAULT if we failed to find folio */
if (ret == -ENOENT)
ret = -EFAULT;
@@ -1763,3 +1763,171 @@ out:
VM_WARN_ON(!moved && !err);
return moved ? moved : err;
}
+
+static void userfaultfd_set_vm_flags(struct vm_area_struct *vma,
+ vm_flags_t flags)
+{
+ const bool uffd_wp_changed = (vma->vm_flags ^ flags) & VM_UFFD_WP;
+
+ vm_flags_reset(vma, flags);
+ /*
+ * For shared mappings, we want to enable writenotify while
+ * userfaultfd-wp is enabled (see vma_wants_writenotify()). We'll simply
+ * recalculate vma->vm_page_prot whenever userfaultfd-wp changes.
+ */
+ if ((vma->vm_flags & VM_SHARED) && uffd_wp_changed)
+ vma_set_page_prot(vma);
+}
+
+static void userfaultfd_set_ctx(struct vm_area_struct *vma,
+ struct userfaultfd_ctx *ctx,
+ unsigned long flags)
+{
+ vma_start_write(vma);
+ vma->vm_userfaultfd_ctx = (struct vm_userfaultfd_ctx){ctx};
+ userfaultfd_set_vm_flags(vma,
+ (vma->vm_flags & ~__VM_UFFD_FLAGS) | flags);
+}
+
+void userfaultfd_reset_ctx(struct vm_area_struct *vma)
+{
+ userfaultfd_set_ctx(vma, NULL, 0);
+}
+
+struct vm_area_struct *userfaultfd_clear_vma(struct vma_iterator *vmi,
+ struct vm_area_struct *prev,
+ struct vm_area_struct *vma,
+ unsigned long start,
+ unsigned long end)
+{
+ struct vm_area_struct *ret;
+
+ /* Reset ptes for the whole vma range if wr-protected */
+ if (userfaultfd_wp(vma))
+ uffd_wp_range(vma, start, end - start, false);
+
+ ret = vma_modify_flags_uffd(vmi, prev, vma, start, end,
+ vma->vm_flags & ~__VM_UFFD_FLAGS,
+ NULL_VM_UFFD_CTX);
+
+ /*
+ * In the vma_merge() successful mprotect-like case 8:
+ * the next vma was merged into the current one and
+ * the current one has not been updated yet.
+ */
+ if (!IS_ERR(ret))
+ userfaultfd_reset_ctx(ret);
+
+ return ret;
+}
+
+/* Assumes mmap write lock taken, and mm_struct pinned. */
+int userfaultfd_register_range(struct userfaultfd_ctx *ctx,
+ struct vm_area_struct *vma,
+ unsigned long vm_flags,
+ unsigned long start, unsigned long end,
+ bool wp_async)
+{
+ VMA_ITERATOR(vmi, ctx->mm, start);
+ struct vm_area_struct *prev = vma_prev(&vmi);
+ unsigned long vma_end;
+ unsigned long new_flags;
+
+ if (vma->vm_start < start)
+ prev = vma;
+
+ for_each_vma_range(vmi, vma, end) {
+ cond_resched();
+
+ BUG_ON(!vma_can_userfault(vma, vm_flags, wp_async));
+ BUG_ON(vma->vm_userfaultfd_ctx.ctx &&
+ vma->vm_userfaultfd_ctx.ctx != ctx);
+ WARN_ON(!(vma->vm_flags & VM_MAYWRITE));
+
+ /*
+ * Nothing to do: this vma is already registered into this
+ * userfaultfd and with the right tracking mode too.
+ */
+ if (vma->vm_userfaultfd_ctx.ctx == ctx &&
+ (vma->vm_flags & vm_flags) == vm_flags)
+ goto skip;
+
+ if (vma->vm_start > start)
+ start = vma->vm_start;
+ vma_end = min(end, vma->vm_end);
+
+ new_flags = (vma->vm_flags & ~__VM_UFFD_FLAGS) | vm_flags;
+ vma = vma_modify_flags_uffd(&vmi, prev, vma, start, vma_end,
+ new_flags,
+ (struct vm_userfaultfd_ctx){ctx});
+ if (IS_ERR(vma))
+ return PTR_ERR(vma);
+
+ /*
+ * In the vma_merge() successful mprotect-like case 8:
+ * the next vma was merged into the current one and
+ * the current one has not been updated yet.
+ */
+ userfaultfd_set_ctx(vma, ctx, vm_flags);
+
+ if (is_vm_hugetlb_page(vma) && uffd_disable_huge_pmd_share(vma))
+ hugetlb_unshare_all_pmds(vma);
+
+skip:
+ prev = vma;
+ start = vma->vm_end;
+ }
+
+ return 0;
+}
+
+void userfaultfd_release_new(struct userfaultfd_ctx *ctx)
+{
+ struct mm_struct *mm = ctx->mm;
+ struct vm_area_struct *vma;
+ VMA_ITERATOR(vmi, mm, 0);
+
+ /* the various vma->vm_userfaultfd_ctx still points to it */
+ mmap_write_lock(mm);
+ for_each_vma(vmi, vma) {
+ if (vma->vm_userfaultfd_ctx.ctx == ctx)
+ userfaultfd_reset_ctx(vma);
+ }
+ mmap_write_unlock(mm);
+}
+
+void userfaultfd_release_all(struct mm_struct *mm,
+ struct userfaultfd_ctx *ctx)
+{
+ struct vm_area_struct *vma, *prev;
+ VMA_ITERATOR(vmi, mm, 0);
+
+ if (!mmget_not_zero(mm))
+ return;
+
+ /*
+ * Flush page faults out of all CPUs. NOTE: all page faults
+ * must be retried without returning VM_FAULT_SIGBUS if
+ * userfaultfd_ctx_get() succeeds but vma->vma_userfault_ctx
+ * changes while handle_userfault released the mmap_lock. So
+ * it's critical that released is set to true (above), before
+ * taking the mmap_lock for writing.
+ */
+ mmap_write_lock(mm);
+ prev = NULL;
+ for_each_vma(vmi, vma) {
+ cond_resched();
+ BUG_ON(!!vma->vm_userfaultfd_ctx.ctx ^
+ !!(vma->vm_flags & __VM_UFFD_FLAGS));
+ if (vma->vm_userfaultfd_ctx.ctx != ctx) {
+ prev = vma;
+ continue;
+ }
+
+ vma = userfaultfd_clear_vma(&vmi, prev, vma,
+ vma->vm_start, vma->vm_end);
+ prev = vma;
+ }
+ mmap_write_unlock(mm);
+ mmput(mm);
+}
diff --git a/mm/util.c b/mm/util.c
index bd283e2132e0..4f1275023eb7 100644
--- a/mm/util.c
+++ b/mm/util.c
@@ -463,7 +463,7 @@ static unsigned long mmap_base(unsigned long rnd, struct rlimit *rlim_stack)
if (gap + pad > gap)
gap += pad;
- if (gap < MIN_GAP)
+ if (gap < MIN_GAP && MIN_GAP < MAX_GAP)
gap = MIN_GAP;
else if (gap > MAX_GAP)
gap = MAX_GAP;
@@ -608,6 +608,28 @@ unsigned long vm_mmap(struct file *file, unsigned long addr,
}
EXPORT_SYMBOL(vm_mmap);
+static gfp_t kmalloc_gfp_adjust(gfp_t flags, size_t size)
+{
+ /*
+ * We want to attempt a large physically contiguous block first because
+ * it is less likely to fragment multiple larger blocks and therefore
+ * contribute to a long term fragmentation less than vmalloc fallback.
+ * However make sure that larger requests are not too disruptive - no
+ * OOM killer and no allocation failure warnings as we have a fallback.
+ */
+ if (size > PAGE_SIZE) {
+ flags |= __GFP_NOWARN;
+
+ if (!(flags & __GFP_RETRY_MAYFAIL))
+ flags |= __GFP_NORETRY;
+
+ /* nofail semantic is implemented by the vmalloc fallback */
+ flags &= ~__GFP_NOFAIL;
+ }
+
+ return flags;
+}
+
/**
* __kvmalloc_node - attempt to allocate physically contiguous memory, but upon
* failure, fall back to non-contiguous (vmalloc) allocation.
@@ -627,32 +649,15 @@ EXPORT_SYMBOL(vm_mmap);
*/
void *__kvmalloc_node_noprof(DECL_BUCKET_PARAMS(size, b), gfp_t flags, int node)
{
- gfp_t kmalloc_flags = flags;
void *ret;
/*
- * We want to attempt a large physically contiguous block first because
- * it is less likely to fragment multiple larger blocks and therefore
- * contribute to a long term fragmentation less than vmalloc fallback.
- * However make sure that larger requests are not too disruptive - no
- * OOM killer and no allocation failure warnings as we have a fallback.
- */
- if (size > PAGE_SIZE) {
- kmalloc_flags |= __GFP_NOWARN;
-
- if (!(kmalloc_flags & __GFP_RETRY_MAYFAIL))
- kmalloc_flags |= __GFP_NORETRY;
-
- /* nofail semantic is implemented by the vmalloc fallback */
- kmalloc_flags &= ~__GFP_NOFAIL;
- }
-
- ret = __kmalloc_node_noprof(PASS_BUCKET_PARAMS(size, b), kmalloc_flags, node);
-
- /*
* It doesn't really make sense to fallback to vmalloc for sub page
* requests
*/
+ ret = __kmalloc_node_noprof(PASS_BUCKET_PARAMS(size, b),
+ kmalloc_gfp_adjust(flags, size),
+ node);
if (ret || size <= PAGE_SIZE)
return ret;
@@ -715,18 +720,53 @@ void kvfree_sensitive(const void *addr, size_t len)
}
EXPORT_SYMBOL(kvfree_sensitive);
-void *kvrealloc_noprof(const void *p, size_t oldsize, size_t newsize, gfp_t flags)
+/**
+ * kvrealloc - reallocate memory; contents remain unchanged
+ * @p: object to reallocate memory for
+ * @size: the size to reallocate
+ * @flags: the flags for the page level allocator
+ *
+ * If @p is %NULL, kvrealloc() behaves exactly like kvmalloc(). If @size is 0
+ * and @p is not a %NULL pointer, the object pointed to is freed.
+ *
+ * If __GFP_ZERO logic is requested, callers must ensure that, starting with the
+ * initial memory allocation, every subsequent call to this API for the same
+ * memory allocation is flagged with __GFP_ZERO. Otherwise, it is possible that
+ * __GFP_ZERO is not fully honored by this API.
+ *
+ * In any case, the contents of the object pointed to are preserved up to the
+ * lesser of the new and old sizes.
+ *
+ * This function must not be called concurrently with itself or kvfree() for the
+ * same memory allocation.
+ *
+ * Return: pointer to the allocated memory or %NULL in case of error
+ */
+void *kvrealloc_noprof(const void *p, size_t size, gfp_t flags)
{
- void *newp;
+ void *n;
- if (oldsize >= newsize)
- return (void *)p;
- newp = kvmalloc_noprof(newsize, flags);
- if (!newp)
- return NULL;
- memcpy(newp, p, oldsize);
- kvfree(p);
- return newp;
+ if (is_vmalloc_addr(p))
+ return vrealloc_noprof(p, size, flags);
+
+ n = krealloc_noprof(p, size, kmalloc_gfp_adjust(flags, size));
+ if (!n) {
+ /* We failed to krealloc(), fall back to kvmalloc(). */
+ n = kvmalloc_noprof(size, flags);
+ if (!n)
+ return NULL;
+
+ if (p) {
+ /* We already know that `p` is not a vmalloc address. */
+ kasan_disable_current();
+ memcpy(n, kasan_reset_tag(p), ksize(p));
+ kasan_enable_current();
+
+ kfree(p);
+ }
+ }
+
+ return n;
}
EXPORT_SYMBOL(kvrealloc_noprof);
diff --git a/mm/vma.c b/mm/vma.c
new file mode 100644
index 000000000000..4737afcb064c
--- /dev/null
+++ b/mm/vma.c
@@ -0,0 +1,2068 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+
+/*
+ * VMA-specific functions.
+ */
+
+#include "vma_internal.h"
+#include "vma.h"
+
+static inline bool is_mergeable_vma(struct vma_merge_struct *vmg, bool merge_next)
+{
+ struct vm_area_struct *vma = merge_next ? vmg->next : vmg->prev;
+
+ if (!mpol_equal(vmg->policy, vma_policy(vma)))
+ return false;
+ /*
+ * VM_SOFTDIRTY should not prevent from VMA merging, if we
+ * match the flags but dirty bit -- the caller should mark
+ * merged VMA as dirty. If dirty bit won't be excluded from
+ * comparison, we increase pressure on the memory system forcing
+ * the kernel to generate new VMAs when old one could be
+ * extended instead.
+ */
+ if ((vma->vm_flags ^ vmg->flags) & ~VM_SOFTDIRTY)
+ return false;
+ if (vma->vm_file != vmg->file)
+ return false;
+ if (!is_mergeable_vm_userfaultfd_ctx(vma, vmg->uffd_ctx))
+ return false;
+ if (!anon_vma_name_eq(anon_vma_name(vma), vmg->anon_name))
+ return false;
+ return true;
+}
+
+static inline bool is_mergeable_anon_vma(struct anon_vma *anon_vma1,
+ struct anon_vma *anon_vma2, struct vm_area_struct *vma)
+{
+ /*
+ * The list_is_singular() test is to avoid merging VMA cloned from
+ * parents. This can improve scalability caused by anon_vma lock.
+ */
+ if ((!anon_vma1 || !anon_vma2) && (!vma ||
+ list_is_singular(&vma->anon_vma_chain)))
+ return true;
+ return anon_vma1 == anon_vma2;
+}
+
+/* Are the anon_vma's belonging to each VMA compatible with one another? */
+static inline bool are_anon_vmas_compatible(struct vm_area_struct *vma1,
+ struct vm_area_struct *vma2)
+{
+ return is_mergeable_anon_vma(vma1->anon_vma, vma2->anon_vma, NULL);
+}
+
+/*
+ * init_multi_vma_prep() - Initializer for struct vma_prepare
+ * @vp: The vma_prepare struct
+ * @vma: The vma that will be altered once locked
+ * @next: The next vma if it is to be adjusted
+ * @remove: The first vma to be removed
+ * @remove2: The second vma to be removed
+ */
+static void init_multi_vma_prep(struct vma_prepare *vp,
+ struct vm_area_struct *vma,
+ struct vm_area_struct *next,
+ struct vm_area_struct *remove,
+ struct vm_area_struct *remove2)
+{
+ memset(vp, 0, sizeof(struct vma_prepare));
+ vp->vma = vma;
+ vp->anon_vma = vma->anon_vma;
+ vp->remove = remove;
+ vp->remove2 = remove2;
+ vp->adj_next = next;
+ if (!vp->anon_vma && next)
+ vp->anon_vma = next->anon_vma;
+
+ vp->file = vma->vm_file;
+ if (vp->file)
+ vp->mapping = vma->vm_file->f_mapping;
+
+}
+
+/*
+ * Return true if we can merge this (vm_flags,anon_vma,file,vm_pgoff)
+ * in front of (at a lower virtual address and file offset than) the vma.
+ *
+ * We cannot merge two vmas if they have differently assigned (non-NULL)
+ * anon_vmas, nor if same anon_vma is assigned but offsets incompatible.
+ *
+ * We don't check here for the merged mmap wrapping around the end of pagecache
+ * indices (16TB on ia32) because do_mmap() does not permit mmap's which
+ * wrap, nor mmaps which cover the final page at index -1UL.
+ *
+ * We assume the vma may be removed as part of the merge.
+ */
+static bool can_vma_merge_before(struct vma_merge_struct *vmg)
+{
+ pgoff_t pglen = PHYS_PFN(vmg->end - vmg->start);
+
+ if (is_mergeable_vma(vmg, /* merge_next = */ true) &&
+ is_mergeable_anon_vma(vmg->anon_vma, vmg->next->anon_vma, vmg->next)) {
+ if (vmg->next->vm_pgoff == vmg->pgoff + pglen)
+ return true;
+ }
+
+ return false;
+}
+
+/*
+ * Return true if we can merge this (vm_flags,anon_vma,file,vm_pgoff)
+ * beyond (at a higher virtual address and file offset than) the vma.
+ *
+ * We cannot merge two vmas if they have differently assigned (non-NULL)
+ * anon_vmas, nor if same anon_vma is assigned but offsets incompatible.
+ *
+ * We assume that vma is not removed as part of the merge.
+ */
+static bool can_vma_merge_after(struct vma_merge_struct *vmg)
+{
+ if (is_mergeable_vma(vmg, /* merge_next = */ false) &&
+ is_mergeable_anon_vma(vmg->anon_vma, vmg->prev->anon_vma, vmg->prev)) {
+ if (vmg->prev->vm_pgoff + vma_pages(vmg->prev) == vmg->pgoff)
+ return true;
+ }
+ return false;
+}
+
+static void __vma_link_file(struct vm_area_struct *vma,
+ struct address_space *mapping)
+{
+ if (vma_is_shared_maywrite(vma))
+ mapping_allow_writable(mapping);
+
+ flush_dcache_mmap_lock(mapping);
+ vma_interval_tree_insert(vma, &mapping->i_mmap);
+ flush_dcache_mmap_unlock(mapping);
+}
+
+/*
+ * Requires inode->i_mapping->i_mmap_rwsem
+ */
+static void __remove_shared_vm_struct(struct vm_area_struct *vma,
+ struct address_space *mapping)
+{
+ if (vma_is_shared_maywrite(vma))
+ mapping_unmap_writable(mapping);
+
+ flush_dcache_mmap_lock(mapping);
+ vma_interval_tree_remove(vma, &mapping->i_mmap);
+ flush_dcache_mmap_unlock(mapping);
+}
+
+/*
+ * vma_prepare() - Helper function for handling locking VMAs prior to altering
+ * @vp: The initialized vma_prepare struct
+ */
+static void vma_prepare(struct vma_prepare *vp)
+{
+ if (vp->file) {
+ uprobe_munmap(vp->vma, vp->vma->vm_start, vp->vma->vm_end);
+
+ if (vp->adj_next)
+ uprobe_munmap(vp->adj_next, vp->adj_next->vm_start,
+ vp->adj_next->vm_end);
+
+ i_mmap_lock_write(vp->mapping);
+ if (vp->insert && vp->insert->vm_file) {
+ /*
+ * Put into interval tree now, so instantiated pages
+ * are visible to arm/parisc __flush_dcache_page
+ * throughout; but we cannot insert into address
+ * space until vma start or end is updated.
+ */
+ __vma_link_file(vp->insert,
+ vp->insert->vm_file->f_mapping);
+ }
+ }
+
+ if (vp->anon_vma) {
+ anon_vma_lock_write(vp->anon_vma);
+ anon_vma_interval_tree_pre_update_vma(vp->vma);
+ if (vp->adj_next)
+ anon_vma_interval_tree_pre_update_vma(vp->adj_next);
+ }
+
+ if (vp->file) {
+ flush_dcache_mmap_lock(vp->mapping);
+ vma_interval_tree_remove(vp->vma, &vp->mapping->i_mmap);
+ if (vp->adj_next)
+ vma_interval_tree_remove(vp->adj_next,
+ &vp->mapping->i_mmap);
+ }
+
+}
+
+/*
+ * vma_complete- Helper function for handling the unlocking after altering VMAs,
+ * or for inserting a VMA.
+ *
+ * @vp: The vma_prepare struct
+ * @vmi: The vma iterator
+ * @mm: The mm_struct
+ */
+static void vma_complete(struct vma_prepare *vp, struct vma_iterator *vmi,
+ struct mm_struct *mm)
+{
+ if (vp->file) {
+ if (vp->adj_next)
+ vma_interval_tree_insert(vp->adj_next,
+ &vp->mapping->i_mmap);
+ vma_interval_tree_insert(vp->vma, &vp->mapping->i_mmap);
+ flush_dcache_mmap_unlock(vp->mapping);
+ }
+
+ if (vp->remove && vp->file) {
+ __remove_shared_vm_struct(vp->remove, vp->mapping);
+ if (vp->remove2)
+ __remove_shared_vm_struct(vp->remove2, vp->mapping);
+ } else if (vp->insert) {
+ /*
+ * split_vma has split insert from vma, and needs
+ * us to insert it before dropping the locks
+ * (it may either follow vma or precede it).
+ */
+ vma_iter_store(vmi, vp->insert);
+ mm->map_count++;
+ }
+
+ if (vp->anon_vma) {
+ anon_vma_interval_tree_post_update_vma(vp->vma);
+ if (vp->adj_next)
+ anon_vma_interval_tree_post_update_vma(vp->adj_next);
+ anon_vma_unlock_write(vp->anon_vma);
+ }
+
+ if (vp->file) {
+ i_mmap_unlock_write(vp->mapping);
+ uprobe_mmap(vp->vma);
+
+ if (vp->adj_next)
+ uprobe_mmap(vp->adj_next);
+ }
+
+ if (vp->remove) {
+again:
+ vma_mark_detached(vp->remove, true);
+ if (vp->file) {
+ uprobe_munmap(vp->remove, vp->remove->vm_start,
+ vp->remove->vm_end);
+ fput(vp->file);
+ }
+ if (vp->remove->anon_vma)
+ anon_vma_merge(vp->vma, vp->remove);
+ mm->map_count--;
+ mpol_put(vma_policy(vp->remove));
+ if (!vp->remove2)
+ WARN_ON_ONCE(vp->vma->vm_end < vp->remove->vm_end);
+ vm_area_free(vp->remove);
+
+ /*
+ * In mprotect's case 6 (see comments on vma_merge),
+ * we are removing both mid and next vmas
+ */
+ if (vp->remove2) {
+ vp->remove = vp->remove2;
+ vp->remove2 = NULL;
+ goto again;
+ }
+ }
+ if (vp->insert && vp->file)
+ uprobe_mmap(vp->insert);
+}
+
+/*
+ * init_vma_prep() - Initializer wrapper for vma_prepare struct
+ * @vp: The vma_prepare struct
+ * @vma: The vma that will be altered once locked
+ */
+static void init_vma_prep(struct vma_prepare *vp, struct vm_area_struct *vma)
+{
+ init_multi_vma_prep(vp, vma, NULL, NULL, NULL);
+}
+
+/*
+ * Can the proposed VMA be merged with the left (previous) VMA taking into
+ * account the start position of the proposed range.
+ */
+static bool can_vma_merge_left(struct vma_merge_struct *vmg)
+
+{
+ return vmg->prev && vmg->prev->vm_end == vmg->start &&
+ can_vma_merge_after(vmg);
+}
+
+/*
+ * Can the proposed VMA be merged with the right (next) VMA taking into
+ * account the end position of the proposed range.
+ *
+ * In addition, if we can merge with the left VMA, ensure that left and right
+ * anon_vma's are also compatible.
+ */
+static bool can_vma_merge_right(struct vma_merge_struct *vmg,
+ bool can_merge_left)
+{
+ if (!vmg->next || vmg->end != vmg->next->vm_start ||
+ !can_vma_merge_before(vmg))
+ return false;
+
+ if (!can_merge_left)
+ return true;
+
+ /*
+ * If we can merge with prev (left) and next (right), indicating that
+ * each VMA's anon_vma is compatible with the proposed anon_vma, this
+ * does not mean prev and next are compatible with EACH OTHER.
+ *
+ * We therefore check this in addition to mergeability to either side.
+ */
+ return are_anon_vmas_compatible(vmg->prev, vmg->next);
+}
+
+/*
+ * Close a vm structure and free it.
+ */
+void remove_vma(struct vm_area_struct *vma, bool unreachable, bool closed)
+{
+ might_sleep();
+ if (!closed && vma->vm_ops && vma->vm_ops->close)
+ vma->vm_ops->close(vma);
+ if (vma->vm_file)
+ fput(vma->vm_file);
+ mpol_put(vma_policy(vma));
+ if (unreachable)
+ __vm_area_free(vma);
+ else
+ vm_area_free(vma);
+}
+
+/*
+ * Get rid of page table information in the indicated region.
+ *
+ * Called with the mm semaphore held.
+ */
+void unmap_region(struct ma_state *mas, struct vm_area_struct *vma,
+ struct vm_area_struct *prev, struct vm_area_struct *next)
+{
+ struct mm_struct *mm = vma->vm_mm;
+ struct mmu_gather tlb;
+
+ lru_add_drain();
+ tlb_gather_mmu(&tlb, mm);
+ update_hiwater_rss(mm);
+ unmap_vmas(&tlb, mas, vma, vma->vm_start, vma->vm_end, vma->vm_end,
+ /* mm_wr_locked = */ true);
+ mas_set(mas, vma->vm_end);
+ free_pgtables(&tlb, mas, vma, prev ? prev->vm_end : FIRST_USER_ADDRESS,
+ next ? next->vm_start : USER_PGTABLES_CEILING,
+ /* mm_wr_locked = */ true);
+ tlb_finish_mmu(&tlb);
+}
+
+/*
+ * __split_vma() bypasses sysctl_max_map_count checking. We use this where it
+ * has already been checked or doesn't make sense to fail.
+ * VMA Iterator will point to the original VMA.
+ */
+static int __split_vma(struct vma_iterator *vmi, struct vm_area_struct *vma,
+ unsigned long addr, int new_below)
+{
+ struct vma_prepare vp;
+ struct vm_area_struct *new;
+ int err;
+
+ WARN_ON(vma->vm_start >= addr);
+ WARN_ON(vma->vm_end <= addr);
+
+ if (vma->vm_ops && vma->vm_ops->may_split) {
+ err = vma->vm_ops->may_split(vma, addr);
+ if (err)
+ return err;
+ }
+
+ new = vm_area_dup(vma);
+ if (!new)
+ return -ENOMEM;
+
+ if (new_below) {
+ new->vm_end = addr;
+ } else {
+ new->vm_start = addr;
+ new->vm_pgoff += ((addr - vma->vm_start) >> PAGE_SHIFT);
+ }
+
+ err = -ENOMEM;
+ vma_iter_config(vmi, new->vm_start, new->vm_end);
+ if (vma_iter_prealloc(vmi, new))
+ goto out_free_vma;
+
+ err = vma_dup_policy(vma, new);
+ if (err)
+ goto out_free_vmi;
+
+ err = anon_vma_clone(new, vma);
+ if (err)
+ goto out_free_mpol;
+
+ if (new->vm_file)
+ get_file(new->vm_file);
+
+ if (new->vm_ops && new->vm_ops->open)
+ new->vm_ops->open(new);
+
+ vma_start_write(vma);
+ vma_start_write(new);
+
+ init_vma_prep(&vp, vma);
+ vp.insert = new;
+ vma_prepare(&vp);
+ vma_adjust_trans_huge(vma, vma->vm_start, addr, 0);
+
+ if (new_below) {
+ vma->vm_start = addr;
+ vma->vm_pgoff += (addr - new->vm_start) >> PAGE_SHIFT;
+ } else {
+ vma->vm_end = addr;
+ }
+
+ /* vma_complete stores the new vma */
+ vma_complete(&vp, vmi, vma->vm_mm);
+ validate_mm(vma->vm_mm);
+
+ /* Success. */
+ if (new_below)
+ vma_next(vmi);
+ else
+ vma_prev(vmi);
+
+ return 0;
+
+out_free_mpol:
+ mpol_put(vma_policy(new));
+out_free_vmi:
+ vma_iter_free(vmi);
+out_free_vma:
+ vm_area_free(new);
+ return err;
+}
+
+/*
+ * Split a vma into two pieces at address 'addr', a new vma is allocated
+ * either for the first part or the tail.
+ */
+static int split_vma(struct vma_iterator *vmi, struct vm_area_struct *vma,
+ unsigned long addr, int new_below)
+{
+ if (vma->vm_mm->map_count >= sysctl_max_map_count)
+ return -ENOMEM;
+
+ return __split_vma(vmi, vma, addr, new_below);
+}
+
+/*
+ * vma has some anon_vma assigned, and is already inserted on that
+ * anon_vma's interval trees.
+ *
+ * Before updating the vma's vm_start / vm_end / vm_pgoff fields, the
+ * vma must be removed from the anon_vma's interval trees using
+ * anon_vma_interval_tree_pre_update_vma().
+ *
+ * After the update, the vma will be reinserted using
+ * anon_vma_interval_tree_post_update_vma().
+ *
+ * The entire update must be protected by exclusive mmap_lock and by
+ * the root anon_vma's mutex.
+ */
+void
+anon_vma_interval_tree_pre_update_vma(struct vm_area_struct *vma)
+{
+ struct anon_vma_chain *avc;
+
+ list_for_each_entry(avc, &vma->anon_vma_chain, same_vma)
+ anon_vma_interval_tree_remove(avc, &avc->anon_vma->rb_root);
+}
+
+void
+anon_vma_interval_tree_post_update_vma(struct vm_area_struct *vma)
+{
+ struct anon_vma_chain *avc;
+
+ list_for_each_entry(avc, &vma->anon_vma_chain, same_vma)
+ anon_vma_interval_tree_insert(avc, &avc->anon_vma->rb_root);
+}
+
+/*
+ * dup_anon_vma() - Helper function to duplicate anon_vma
+ * @dst: The destination VMA
+ * @src: The source VMA
+ * @dup: Pointer to the destination VMA when successful.
+ *
+ * Returns: 0 on success.
+ */
+static int dup_anon_vma(struct vm_area_struct *dst,
+ struct vm_area_struct *src, struct vm_area_struct **dup)
+{
+ /*
+ * Easily overlooked: when mprotect shifts the boundary, make sure the
+ * expanding vma has anon_vma set if the shrinking vma had, to cover any
+ * anon pages imported.
+ */
+ if (src->anon_vma && !dst->anon_vma) {
+ int ret;
+
+ vma_assert_write_locked(dst);
+ dst->anon_vma = src->anon_vma;
+ ret = anon_vma_clone(dst, src);
+ if (ret)
+ return ret;
+
+ *dup = dst;
+ }
+
+ return 0;
+}
+
+#ifdef CONFIG_DEBUG_VM_MAPLE_TREE
+void validate_mm(struct mm_struct *mm)
+{
+ int bug = 0;
+ int i = 0;
+ struct vm_area_struct *vma;
+ VMA_ITERATOR(vmi, mm, 0);
+
+ mt_validate(&mm->mm_mt);
+ for_each_vma(vmi, vma) {
+#ifdef CONFIG_DEBUG_VM_RB
+ struct anon_vma *anon_vma = vma->anon_vma;
+ struct anon_vma_chain *avc;
+#endif
+ unsigned long vmi_start, vmi_end;
+ bool warn = 0;
+
+ vmi_start = vma_iter_addr(&vmi);
+ vmi_end = vma_iter_end(&vmi);
+ if (VM_WARN_ON_ONCE_MM(vma->vm_end != vmi_end, mm))
+ warn = 1;
+
+ if (VM_WARN_ON_ONCE_MM(vma->vm_start != vmi_start, mm))
+ warn = 1;
+
+ if (warn) {
+ pr_emerg("issue in %s\n", current->comm);
+ dump_stack();
+ dump_vma(vma);
+ pr_emerg("tree range: %px start %lx end %lx\n", vma,
+ vmi_start, vmi_end - 1);
+ vma_iter_dump_tree(&vmi);
+ }
+
+#ifdef CONFIG_DEBUG_VM_RB
+ if (anon_vma) {
+ anon_vma_lock_read(anon_vma);
+ list_for_each_entry(avc, &vma->anon_vma_chain, same_vma)
+ anon_vma_interval_tree_verify(avc);
+ anon_vma_unlock_read(anon_vma);
+ }
+#endif
+ i++;
+ }
+ if (i != mm->map_count) {
+ pr_emerg("map_count %d vma iterator %d\n", mm->map_count, i);
+ bug = 1;
+ }
+ VM_BUG_ON_MM(bug, mm);
+}
+#endif /* CONFIG_DEBUG_VM_MAPLE_TREE */
+
+/* Actually perform the VMA merge operation. */
+static int commit_merge(struct vma_merge_struct *vmg,
+ struct vm_area_struct *adjust,
+ struct vm_area_struct *remove,
+ struct vm_area_struct *remove2,
+ long adj_start,
+ bool expanded)
+{
+ struct vma_prepare vp;
+
+ init_multi_vma_prep(&vp, vmg->vma, adjust, remove, remove2);
+
+ VM_WARN_ON(vp.anon_vma && adjust && adjust->anon_vma &&
+ vp.anon_vma != adjust->anon_vma);
+
+ if (expanded) {
+ /* Note: vma iterator must be pointing to 'start'. */
+ vma_iter_config(vmg->vmi, vmg->start, vmg->end);
+ } else {
+ vma_iter_config(vmg->vmi, adjust->vm_start + adj_start,
+ adjust->vm_end);
+ }
+
+ if (vma_iter_prealloc(vmg->vmi, vmg->vma))
+ return -ENOMEM;
+
+ vma_prepare(&vp);
+ vma_adjust_trans_huge(vmg->vma, vmg->start, vmg->end, adj_start);
+ vma_set_range(vmg->vma, vmg->start, vmg->end, vmg->pgoff);
+
+ if (expanded)
+ vma_iter_store(vmg->vmi, vmg->vma);
+
+ if (adj_start) {
+ adjust->vm_start += adj_start;
+ adjust->vm_pgoff += PHYS_PFN(adj_start);
+ if (adj_start < 0) {
+ WARN_ON(expanded);
+ vma_iter_store(vmg->vmi, adjust);
+ }
+ }
+
+ vma_complete(&vp, vmg->vmi, vmg->vma->vm_mm);
+
+ return 0;
+}
+
+/* We can only remove VMAs when merging if they do not have a close hook. */
+static bool can_merge_remove_vma(struct vm_area_struct *vma)
+{
+ return !vma->vm_ops || !vma->vm_ops->close;
+}
+
+/*
+ * vma_merge_existing_range - Attempt to merge VMAs based on a VMA having its
+ * attributes modified.
+ *
+ * @vmg: Describes the modifications being made to a VMA and associated
+ * metadata.
+ *
+ * When the attributes of a range within a VMA change, then it might be possible
+ * for immediately adjacent VMAs to be merged into that VMA due to having
+ * identical properties.
+ *
+ * This function checks for the existence of any such mergeable VMAs and updates
+ * the maple tree describing the @vmg->vma->vm_mm address space to account for
+ * this, as well as any VMAs shrunk/expanded/deleted as a result of this merge.
+ *
+ * As part of this operation, if a merge occurs, the @vmg object will have its
+ * vma, start, end, and pgoff fields modified to execute the merge. Subsequent
+ * calls to this function should reset these fields.
+ *
+ * Returns: The merged VMA if merge succeeds, or NULL otherwise.
+ *
+ * ASSUMPTIONS:
+ * - The caller must assign the VMA to be modifed to @vmg->vma.
+ * - The caller must have set @vmg->prev to the previous VMA, if there is one.
+ * - The caller must not set @vmg->next, as we determine this.
+ * - The caller must hold a WRITE lock on the mm_struct->mmap_lock.
+ * - vmi must be positioned within [@vmg->vma->vm_start, @vmg->vma->vm_end).
+ */
+static struct vm_area_struct *vma_merge_existing_range(struct vma_merge_struct *vmg)
+{
+ struct vm_area_struct *vma = vmg->vma;
+ struct vm_area_struct *prev = vmg->prev;
+ struct vm_area_struct *next, *res;
+ struct vm_area_struct *anon_dup = NULL;
+ struct vm_area_struct *adjust = NULL;
+ unsigned long start = vmg->start;
+ unsigned long end = vmg->end;
+ bool left_side = vma && start == vma->vm_start;
+ bool right_side = vma && end == vma->vm_end;
+ int err = 0;
+ long adj_start = 0;
+ bool merge_will_delete_vma, merge_will_delete_next;
+ bool merge_left, merge_right, merge_both;
+ bool expanded;
+
+ mmap_assert_write_locked(vmg->mm);
+ VM_WARN_ON(!vma); /* We are modifying a VMA, so caller must specify. */
+ VM_WARN_ON(vmg->next); /* We set this. */
+ VM_WARN_ON(prev && start <= prev->vm_start);
+ VM_WARN_ON(start >= end);
+ /*
+ * If vma == prev, then we are offset into a VMA. Otherwise, if we are
+ * not, we must span a portion of the VMA.
+ */
+ VM_WARN_ON(vma && ((vma != prev && vmg->start != vma->vm_start) ||
+ vmg->end > vma->vm_end));
+ /* The vmi must be positioned within vmg->vma. */
+ VM_WARN_ON(vma && !(vma_iter_addr(vmg->vmi) >= vma->vm_start &&
+ vma_iter_addr(vmg->vmi) < vma->vm_end));
+
+ vmg->state = VMA_MERGE_NOMERGE;
+
+ /*
+ * If a special mapping or if the range being modified is neither at the
+ * furthermost left or right side of the VMA, then we have no chance of
+ * merging and should abort.
+ */
+ if (vmg->flags & VM_SPECIAL || (!left_side && !right_side))
+ return NULL;
+
+ if (left_side)
+ merge_left = can_vma_merge_left(vmg);
+ else
+ merge_left = false;
+
+ if (right_side) {
+ next = vmg->next = vma_iter_next_range(vmg->vmi);
+ vma_iter_prev_range(vmg->vmi);
+
+ merge_right = can_vma_merge_right(vmg, merge_left);
+ } else {
+ merge_right = false;
+ next = NULL;
+ }
+
+ if (merge_left) /* If merging prev, position iterator there. */
+ vma_prev(vmg->vmi);
+ else if (!merge_right) /* If we have nothing to merge, abort. */
+ return NULL;
+
+ merge_both = merge_left && merge_right;
+ /* If we span the entire VMA, a merge implies it will be deleted. */
+ merge_will_delete_vma = left_side && right_side;
+
+ /*
+ * If we need to remove vma in its entirety but are unable to do so,
+ * we have no sensible recourse but to abort the merge.
+ */
+ if (merge_will_delete_vma && !can_merge_remove_vma(vma))
+ return NULL;
+
+ /*
+ * If we merge both VMAs, then next is also deleted. This implies
+ * merge_will_delete_vma also.
+ */
+ merge_will_delete_next = merge_both;
+
+ /*
+ * If we cannot delete next, then we can reduce the operation to merging
+ * prev and vma (thereby deleting vma).
+ */
+ if (merge_will_delete_next && !can_merge_remove_vma(next)) {
+ merge_will_delete_next = false;
+ merge_right = false;
+ merge_both = false;
+ }
+
+ /* No matter what happens, we will be adjusting vma. */
+ vma_start_write(vma);
+
+ if (merge_left)
+ vma_start_write(prev);
+
+ if (merge_right)
+ vma_start_write(next);
+
+ if (merge_both) {
+ /*
+ * |<----->|
+ * |-------*********-------|
+ * prev vma next
+ * extend delete delete
+ */
+
+ vmg->vma = prev;
+ vmg->start = prev->vm_start;
+ vmg->end = next->vm_end;
+ vmg->pgoff = prev->vm_pgoff;
+
+ /*
+ * We already ensured anon_vma compatibility above, so now it's
+ * simply a case of, if prev has no anon_vma object, which of
+ * next or vma contains the anon_vma we must duplicate.
+ */
+ err = dup_anon_vma(prev, next->anon_vma ? next : vma, &anon_dup);
+ } else if (merge_left) {
+ /*
+ * |<----->| OR
+ * |<--------->|
+ * |-------*************
+ * prev vma
+ * extend shrink/delete
+ */
+
+ vmg->vma = prev;
+ vmg->start = prev->vm_start;
+ vmg->pgoff = prev->vm_pgoff;
+
+ if (!merge_will_delete_vma) {
+ adjust = vma;
+ adj_start = vmg->end - vma->vm_start;
+ }
+
+ err = dup_anon_vma(prev, vma, &anon_dup);
+ } else { /* merge_right */
+ /*
+ * |<----->| OR
+ * |<--------->|
+ * *************-------|
+ * vma next
+ * shrink/delete extend
+ */
+
+ pgoff_t pglen = PHYS_PFN(vmg->end - vmg->start);
+
+ VM_WARN_ON(!merge_right);
+ /* If we are offset into a VMA, then prev must be vma. */
+ VM_WARN_ON(vmg->start > vma->vm_start && prev && vma != prev);
+
+ if (merge_will_delete_vma) {
+ vmg->vma = next;
+ vmg->end = next->vm_end;
+ vmg->pgoff = next->vm_pgoff - pglen;
+ } else {
+ /*
+ * We shrink vma and expand next.
+ *
+ * IMPORTANT: This is the ONLY case where the final
+ * merged VMA is NOT vmg->vma, but rather vmg->next.
+ */
+
+ vmg->start = vma->vm_start;
+ vmg->end = start;
+ vmg->pgoff = vma->vm_pgoff;
+
+ adjust = next;
+ adj_start = -(vma->vm_end - start);
+ }
+
+ err = dup_anon_vma(next, vma, &anon_dup);
+ }
+
+ if (err)
+ goto abort;
+
+ /*
+ * In nearly all cases, we expand vmg->vma. There is one exception -
+ * merge_right where we partially span the VMA. In this case we shrink
+ * the end of vmg->vma and adjust the start of vmg->next accordingly.
+ */
+ expanded = !merge_right || merge_will_delete_vma;
+
+ if (commit_merge(vmg, adjust,
+ merge_will_delete_vma ? vma : NULL,
+ merge_will_delete_next ? next : NULL,
+ adj_start, expanded)) {
+ if (anon_dup)
+ unlink_anon_vmas(anon_dup);
+
+ vmg->state = VMA_MERGE_ERROR_NOMEM;
+ return NULL;
+ }
+
+ res = merge_left ? prev : next;
+ khugepaged_enter_vma(res, vmg->flags);
+
+ vmg->state = VMA_MERGE_SUCCESS;
+ return res;
+
+abort:
+ vma_iter_set(vmg->vmi, start);
+ vma_iter_load(vmg->vmi);
+ vmg->state = VMA_MERGE_ERROR_NOMEM;
+ return NULL;
+}
+
+/*
+ * vma_merge_new_range - Attempt to merge a new VMA into address space
+ *
+ * @vmg: Describes the VMA we are adding, in the range @vmg->start to @vmg->end
+ * (exclusive), which we try to merge with any adjacent VMAs if possible.
+ *
+ * We are about to add a VMA to the address space starting at @vmg->start and
+ * ending at @vmg->end. There are three different possible scenarios:
+ *
+ * 1. There is a VMA with identical properties immediately adjacent to the
+ * proposed new VMA [@vmg->start, @vmg->end) either before or after it -
+ * EXPAND that VMA:
+ *
+ * Proposed: |-----| or |-----|
+ * Existing: |----| |----|
+ *
+ * 2. There are VMAs with identical properties immediately adjacent to the
+ * proposed new VMA [@vmg->start, @vmg->end) both before AND after it -
+ * EXPAND the former and REMOVE the latter:
+ *
+ * Proposed: |-----|
+ * Existing: |----| |----|
+ *
+ * 3. There are no VMAs immediately adjacent to the proposed new VMA or those
+ * VMAs do not have identical attributes - NO MERGE POSSIBLE.
+ *
+ * In instances where we can merge, this function returns the expanded VMA which
+ * will have its range adjusted accordingly and the underlying maple tree also
+ * adjusted.
+ *
+ * Returns: In instances where no merge was possible, NULL. Otherwise, a pointer
+ * to the VMA we expanded.
+ *
+ * This function adjusts @vmg to provide @vmg->next if not already specified,
+ * and adjusts [@vmg->start, @vmg->end) to span the expanded range.
+ *
+ * ASSUMPTIONS:
+ * - The caller must hold a WRITE lock on the mm_struct->mmap_lock.
+ * - The caller must have determined that [@vmg->start, @vmg->end) is empty,
+ other than VMAs that will be unmapped should the operation succeed.
+ * - The caller must have specified the previous vma in @vmg->prev.
+ * - The caller must have specified the next vma in @vmg->next.
+ * - The caller must have positioned the vmi at or before the gap.
+ */
+struct vm_area_struct *vma_merge_new_range(struct vma_merge_struct *vmg)
+{
+ struct vm_area_struct *prev = vmg->prev;
+ struct vm_area_struct *next = vmg->next;
+ unsigned long start = vmg->start;
+ unsigned long end = vmg->end;
+ pgoff_t pgoff = vmg->pgoff;
+ pgoff_t pglen = PHYS_PFN(end - start);
+ bool can_merge_left, can_merge_right;
+
+ mmap_assert_write_locked(vmg->mm);
+ VM_WARN_ON(vmg->vma);
+ /* vmi must point at or before the gap. */
+ VM_WARN_ON(vma_iter_addr(vmg->vmi) > end);
+
+ vmg->state = VMA_MERGE_NOMERGE;
+
+ /* Special VMAs are unmergeable, also if no prev/next. */
+ if ((vmg->flags & VM_SPECIAL) || (!prev && !next))
+ return NULL;
+
+ can_merge_left = can_vma_merge_left(vmg);
+ can_merge_right = can_vma_merge_right(vmg, can_merge_left);
+
+ /* If we can merge with the next VMA, adjust vmg accordingly. */
+ if (can_merge_right) {
+ vmg->end = next->vm_end;
+ vmg->vma = next;
+ vmg->pgoff = next->vm_pgoff - pglen;
+ }
+
+ /* If we can merge with the previous VMA, adjust vmg accordingly. */
+ if (can_merge_left) {
+ vmg->start = prev->vm_start;
+ vmg->vma = prev;
+ vmg->pgoff = prev->vm_pgoff;
+
+ /*
+ * If this merge would result in removal of the next VMA but we
+ * are not permitted to do so, reduce the operation to merging
+ * prev and vma.
+ */
+ if (can_merge_right && !can_merge_remove_vma(next))
+ vmg->end = end;
+
+ vma_prev(vmg->vmi); /* Equivalent to going to the previous range */
+ }
+
+ /*
+ * Now try to expand adjacent VMA(s). This takes care of removing the
+ * following VMA if we have VMAs on both sides.
+ */
+ if (vmg->vma && !vma_expand(vmg)) {
+ khugepaged_enter_vma(vmg->vma, vmg->flags);
+ vmg->state = VMA_MERGE_SUCCESS;
+ return vmg->vma;
+ }
+
+ /* If expansion failed, reset state. Allows us to retry merge later. */
+ vmg->vma = NULL;
+ vmg->start = start;
+ vmg->end = end;
+ vmg->pgoff = pgoff;
+ if (vmg->vma == prev)
+ vma_iter_set(vmg->vmi, start);
+
+ return NULL;
+}
+
+/*
+ * vma_expand - Expand an existing VMA
+ *
+ * @vmg: Describes a VMA expansion operation.
+ *
+ * Expand @vma to vmg->start and vmg->end. Can expand off the start and end.
+ * Will expand over vmg->next if it's different from vmg->vma and vmg->end ==
+ * vmg->next->vm_end. Checking if the vmg->vma can expand and merge with
+ * vmg->next needs to be handled by the caller.
+ *
+ * Returns: 0 on success.
+ *
+ * ASSUMPTIONS:
+ * - The caller must hold a WRITE lock on vmg->vma->mm->mmap_lock.
+ * - The caller must have set @vmg->vma and @vmg->next.
+ */
+int vma_expand(struct vma_merge_struct *vmg)
+{
+ struct vm_area_struct *anon_dup = NULL;
+ bool remove_next = false;
+ struct vm_area_struct *vma = vmg->vma;
+ struct vm_area_struct *next = vmg->next;
+
+ mmap_assert_write_locked(vmg->mm);
+
+ vma_start_write(vma);
+ if (next && (vma != next) && (vmg->end == next->vm_end)) {
+ int ret;
+
+ remove_next = true;
+ /* This should already have been checked by this point. */
+ VM_WARN_ON(!can_merge_remove_vma(next));
+ vma_start_write(next);
+ ret = dup_anon_vma(vma, next, &anon_dup);
+ if (ret)
+ return ret;
+ }
+
+ /* Not merging but overwriting any part of next is not handled. */
+ VM_WARN_ON(next && !remove_next &&
+ next != vma && vmg->end > next->vm_start);
+ /* Only handles expanding */
+ VM_WARN_ON(vma->vm_start < vmg->start || vma->vm_end > vmg->end);
+
+ if (commit_merge(vmg, NULL, remove_next ? next : NULL, NULL, 0, true))
+ goto nomem;
+
+ return 0;
+
+nomem:
+ vmg->state = VMA_MERGE_ERROR_NOMEM;
+ if (anon_dup)
+ unlink_anon_vmas(anon_dup);
+ return -ENOMEM;
+}
+
+/*
+ * vma_shrink() - Reduce an existing VMAs memory area
+ * @vmi: The vma iterator
+ * @vma: The VMA to modify
+ * @start: The new start
+ * @end: The new end
+ *
+ * Returns: 0 on success, -ENOMEM otherwise
+ */
+int vma_shrink(struct vma_iterator *vmi, struct vm_area_struct *vma,
+ unsigned long start, unsigned long end, pgoff_t pgoff)
+{
+ struct vma_prepare vp;
+
+ WARN_ON((vma->vm_start != start) && (vma->vm_end != end));
+
+ if (vma->vm_start < start)
+ vma_iter_config(vmi, vma->vm_start, start);
+ else
+ vma_iter_config(vmi, end, vma->vm_end);
+
+ if (vma_iter_prealloc(vmi, NULL))
+ return -ENOMEM;
+
+ vma_start_write(vma);
+
+ init_vma_prep(&vp, vma);
+ vma_prepare(&vp);
+ vma_adjust_trans_huge(vma, start, end, 0);
+
+ vma_iter_clear(vmi);
+ vma_set_range(vma, start, end, pgoff);
+ vma_complete(&vp, vmi, vma->vm_mm);
+ validate_mm(vma->vm_mm);
+ return 0;
+}
+
+static inline void vms_clear_ptes(struct vma_munmap_struct *vms,
+ struct ma_state *mas_detach, bool mm_wr_locked)
+{
+ struct mmu_gather tlb;
+
+ if (!vms->clear_ptes) /* Nothing to do */
+ return;
+
+ /*
+ * We can free page tables without write-locking mmap_lock because VMAs
+ * were isolated before we downgraded mmap_lock.
+ */
+ mas_set(mas_detach, 1);
+ lru_add_drain();
+ tlb_gather_mmu(&tlb, vms->vma->vm_mm);
+ update_hiwater_rss(vms->vma->vm_mm);
+ unmap_vmas(&tlb, mas_detach, vms->vma, vms->start, vms->end,
+ vms->vma_count, mm_wr_locked);
+
+ mas_set(mas_detach, 1);
+ /* start and end may be different if there is no prev or next vma. */
+ free_pgtables(&tlb, mas_detach, vms->vma, vms->unmap_start,
+ vms->unmap_end, mm_wr_locked);
+ tlb_finish_mmu(&tlb);
+ vms->clear_ptes = false;
+}
+
+void vms_clean_up_area(struct vma_munmap_struct *vms,
+ struct ma_state *mas_detach)
+{
+ struct vm_area_struct *vma;
+
+ if (!vms->nr_pages)
+ return;
+
+ vms_clear_ptes(vms, mas_detach, true);
+ mas_set(mas_detach, 0);
+ mas_for_each(mas_detach, vma, ULONG_MAX)
+ if (vma->vm_ops && vma->vm_ops->close)
+ vma->vm_ops->close(vma);
+ vms->closed_vm_ops = true;
+}
+
+/*
+ * vms_complete_munmap_vmas() - Finish the munmap() operation
+ * @vms: The vma munmap struct
+ * @mas_detach: The maple state of the detached vmas
+ *
+ * This updates the mm_struct, unmaps the region, frees the resources
+ * used for the munmap() and may downgrade the lock - if requested. Everything
+ * needed to be done once the vma maple tree is updated.
+ */
+void vms_complete_munmap_vmas(struct vma_munmap_struct *vms,
+ struct ma_state *mas_detach)
+{
+ struct vm_area_struct *vma;
+ struct mm_struct *mm;
+
+ mm = current->mm;
+ mm->map_count -= vms->vma_count;
+ mm->locked_vm -= vms->locked_vm;
+ if (vms->unlock)
+ mmap_write_downgrade(mm);
+
+ if (!vms->nr_pages)
+ return;
+
+ vms_clear_ptes(vms, mas_detach, !vms->unlock);
+ /* Update high watermark before we lower total_vm */
+ update_hiwater_vm(mm);
+ /* Stat accounting */
+ WRITE_ONCE(mm->total_vm, READ_ONCE(mm->total_vm) - vms->nr_pages);
+ /* Paranoid bookkeeping */
+ VM_WARN_ON(vms->exec_vm > mm->exec_vm);
+ VM_WARN_ON(vms->stack_vm > mm->stack_vm);
+ VM_WARN_ON(vms->data_vm > mm->data_vm);
+ mm->exec_vm -= vms->exec_vm;
+ mm->stack_vm -= vms->stack_vm;
+ mm->data_vm -= vms->data_vm;
+
+ /* Remove and clean up vmas */
+ mas_set(mas_detach, 0);
+ mas_for_each(mas_detach, vma, ULONG_MAX)
+ remove_vma(vma, /* = */ false, vms->closed_vm_ops);
+
+ vm_unacct_memory(vms->nr_accounted);
+ validate_mm(mm);
+ if (vms->unlock)
+ mmap_read_unlock(mm);
+
+ __mt_destroy(mas_detach->tree);
+}
+
+/*
+ * vms_gather_munmap_vmas() - Put all VMAs within a range into a maple tree
+ * for removal at a later date. Handles splitting first and last if necessary
+ * and marking the vmas as isolated.
+ *
+ * @vms: The vma munmap struct
+ * @mas_detach: The maple state tracking the detached tree
+ *
+ * Return: 0 on success, error otherwise
+ */
+int vms_gather_munmap_vmas(struct vma_munmap_struct *vms,
+ struct ma_state *mas_detach)
+{
+ struct vm_area_struct *next = NULL;
+ int error;
+
+ /*
+ * If we need to split any vma, do it now to save pain later.
+ * Does it split the first one?
+ */
+ if (vms->start > vms->vma->vm_start) {
+
+ /*
+ * Make sure that map_count on return from munmap() will
+ * not exceed its limit; but let map_count go just above
+ * its limit temporarily, to help free resources as expected.
+ */
+ if (vms->end < vms->vma->vm_end &&
+ vms->vma->vm_mm->map_count >= sysctl_max_map_count) {
+ error = -ENOMEM;
+ goto map_count_exceeded;
+ }
+
+ /* Don't bother splitting the VMA if we can't unmap it anyway */
+ if (!can_modify_vma(vms->vma)) {
+ error = -EPERM;
+ goto start_split_failed;
+ }
+
+ error = __split_vma(vms->vmi, vms->vma, vms->start, 1);
+ if (error)
+ goto start_split_failed;
+ }
+ vms->prev = vma_prev(vms->vmi);
+ if (vms->prev)
+ vms->unmap_start = vms->prev->vm_end;
+
+ /*
+ * Detach a range of VMAs from the mm. Using next as a temp variable as
+ * it is always overwritten.
+ */
+ for_each_vma_range(*(vms->vmi), next, vms->end) {
+ long nrpages;
+
+ if (!can_modify_vma(next)) {
+ error = -EPERM;
+ goto modify_vma_failed;
+ }
+ /* Does it split the end? */
+ if (next->vm_end > vms->end) {
+ error = __split_vma(vms->vmi, next, vms->end, 0);
+ if (error)
+ goto end_split_failed;
+ }
+ vma_start_write(next);
+ mas_set(mas_detach, vms->vma_count++);
+ error = mas_store_gfp(mas_detach, next, GFP_KERNEL);
+ if (error)
+ goto munmap_gather_failed;
+
+ vma_mark_detached(next, true);
+ nrpages = vma_pages(next);
+
+ vms->nr_pages += nrpages;
+ if (next->vm_flags & VM_LOCKED)
+ vms->locked_vm += nrpages;
+
+ if (next->vm_flags & VM_ACCOUNT)
+ vms->nr_accounted += nrpages;
+
+ if (is_exec_mapping(next->vm_flags))
+ vms->exec_vm += nrpages;
+ else if (is_stack_mapping(next->vm_flags))
+ vms->stack_vm += nrpages;
+ else if (is_data_mapping(next->vm_flags))
+ vms->data_vm += nrpages;
+
+ if (unlikely(vms->uf)) {
+ /*
+ * If userfaultfd_unmap_prep returns an error the vmas
+ * will remain split, but userland will get a
+ * highly unexpected error anyway. This is no
+ * different than the case where the first of the two
+ * __split_vma fails, but we don't undo the first
+ * split, despite we could. This is unlikely enough
+ * failure that it's not worth optimizing it for.
+ */
+ error = userfaultfd_unmap_prep(next, vms->start,
+ vms->end, vms->uf);
+ if (error)
+ goto userfaultfd_error;
+ }
+#ifdef CONFIG_DEBUG_VM_MAPLE_TREE
+ BUG_ON(next->vm_start < vms->start);
+ BUG_ON(next->vm_start > vms->end);
+#endif
+ }
+
+ vms->next = vma_next(vms->vmi);
+ if (vms->next)
+ vms->unmap_end = vms->next->vm_start;
+
+#if defined(CONFIG_DEBUG_VM_MAPLE_TREE)
+ /* Make sure no VMAs are about to be lost. */
+ {
+ MA_STATE(test, mas_detach->tree, 0, 0);
+ struct vm_area_struct *vma_mas, *vma_test;
+ int test_count = 0;
+
+ vma_iter_set(vms->vmi, vms->start);
+ rcu_read_lock();
+ vma_test = mas_find(&test, vms->vma_count - 1);
+ for_each_vma_range(*(vms->vmi), vma_mas, vms->end) {
+ BUG_ON(vma_mas != vma_test);
+ test_count++;
+ vma_test = mas_next(&test, vms->vma_count - 1);
+ }
+ rcu_read_unlock();
+ BUG_ON(vms->vma_count != test_count);
+ }
+#endif
+
+ while (vma_iter_addr(vms->vmi) > vms->start)
+ vma_iter_prev_range(vms->vmi);
+
+ vms->clear_ptes = true;
+ return 0;
+
+userfaultfd_error:
+munmap_gather_failed:
+end_split_failed:
+modify_vma_failed:
+ reattach_vmas(mas_detach);
+start_split_failed:
+map_count_exceeded:
+ return error;
+}
+
+/*
+ * do_vmi_align_munmap() - munmap the aligned region from @start to @end.
+ * @vmi: The vma iterator
+ * @vma: The starting vm_area_struct
+ * @mm: The mm_struct
+ * @start: The aligned start address to munmap.
+ * @end: The aligned end address to munmap.
+ * @uf: The userfaultfd list_head
+ * @unlock: Set to true to drop the mmap_lock. unlocking only happens on
+ * success.
+ *
+ * Return: 0 on success and drops the lock if so directed, error and leaves the
+ * lock held otherwise.
+ */
+int do_vmi_align_munmap(struct vma_iterator *vmi, struct vm_area_struct *vma,
+ struct mm_struct *mm, unsigned long start, unsigned long end,
+ struct list_head *uf, bool unlock)
+{
+ struct maple_tree mt_detach;
+ MA_STATE(mas_detach, &mt_detach, 0, 0);
+ mt_init_flags(&mt_detach, vmi->mas.tree->ma_flags & MT_FLAGS_LOCK_MASK);
+ mt_on_stack(mt_detach);
+ struct vma_munmap_struct vms;
+ int error;
+
+ init_vma_munmap(&vms, vmi, vma, start, end, uf, unlock);
+ error = vms_gather_munmap_vmas(&vms, &mas_detach);
+ if (error)
+ goto gather_failed;
+
+ error = vma_iter_clear_gfp(vmi, start, end, GFP_KERNEL);
+ if (error)
+ goto clear_tree_failed;
+
+ /* Point of no return */
+ vms_complete_munmap_vmas(&vms, &mas_detach);
+ return 0;
+
+clear_tree_failed:
+ reattach_vmas(&mas_detach);
+gather_failed:
+ validate_mm(mm);
+ return error;
+}
+
+/*
+ * do_vmi_munmap() - munmap a given range.
+ * @vmi: The vma iterator
+ * @mm: The mm_struct
+ * @start: The start address to munmap
+ * @len: The length of the range to munmap
+ * @uf: The userfaultfd list_head
+ * @unlock: set to true if the user wants to drop the mmap_lock on success
+ *
+ * This function takes a @mas that is either pointing to the previous VMA or set
+ * to MA_START and sets it up to remove the mapping(s). The @len will be
+ * aligned.
+ *
+ * Return: 0 on success and drops the lock if so directed, error and leaves the
+ * lock held otherwise.
+ */
+int do_vmi_munmap(struct vma_iterator *vmi, struct mm_struct *mm,
+ unsigned long start, size_t len, struct list_head *uf,
+ bool unlock)
+{
+ unsigned long end;
+ struct vm_area_struct *vma;
+
+ if ((offset_in_page(start)) || start > TASK_SIZE || len > TASK_SIZE-start)
+ return -EINVAL;
+
+ end = start + PAGE_ALIGN(len);
+ if (end == start)
+ return -EINVAL;
+
+ /* Find the first overlapping VMA */
+ vma = vma_find(vmi, end);
+ if (!vma) {
+ if (unlock)
+ mmap_write_unlock(mm);
+ return 0;
+ }
+
+ return do_vmi_align_munmap(vmi, vma, mm, start, end, uf, unlock);
+}
+
+/*
+ * We are about to modify one or multiple of a VMA's flags, policy, userfaultfd
+ * context and anonymous VMA name within the range [start, end).
+ *
+ * As a result, we might be able to merge the newly modified VMA range with an
+ * adjacent VMA with identical properties.
+ *
+ * If no merge is possible and the range does not span the entirety of the VMA,
+ * we then need to split the VMA to accommodate the change.
+ *
+ * The function returns either the merged VMA, the original VMA if a split was
+ * required instead, or an error if the split failed.
+ */
+static struct vm_area_struct *vma_modify(struct vma_merge_struct *vmg)
+{
+ struct vm_area_struct *vma = vmg->vma;
+ struct vm_area_struct *merged;
+
+ /* First, try to merge. */
+ merged = vma_merge_existing_range(vmg);
+ if (merged)
+ return merged;
+
+ /* Split any preceding portion of the VMA. */
+ if (vma->vm_start < vmg->start) {
+ int err = split_vma(vmg->vmi, vma, vmg->start, 1);
+
+ if (err)
+ return ERR_PTR(err);
+ }
+
+ /* Split any trailing portion of the VMA. */
+ if (vma->vm_end > vmg->end) {
+ int err = split_vma(vmg->vmi, vma, vmg->end, 0);
+
+ if (err)
+ return ERR_PTR(err);
+ }
+
+ return vma;
+}
+
+struct vm_area_struct *vma_modify_flags(
+ struct vma_iterator *vmi, struct vm_area_struct *prev,
+ struct vm_area_struct *vma, unsigned long start, unsigned long end,
+ unsigned long new_flags)
+{
+ VMG_VMA_STATE(vmg, vmi, prev, vma, start, end);
+
+ vmg.flags = new_flags;
+
+ return vma_modify(&vmg);
+}
+
+struct vm_area_struct
+*vma_modify_flags_name(struct vma_iterator *vmi,
+ struct vm_area_struct *prev,
+ struct vm_area_struct *vma,
+ unsigned long start,
+ unsigned long end,
+ unsigned long new_flags,
+ struct anon_vma_name *new_name)
+{
+ VMG_VMA_STATE(vmg, vmi, prev, vma, start, end);
+
+ vmg.flags = new_flags;
+ vmg.anon_name = new_name;
+
+ return vma_modify(&vmg);
+}
+
+struct vm_area_struct
+*vma_modify_policy(struct vma_iterator *vmi,
+ struct vm_area_struct *prev,
+ struct vm_area_struct *vma,
+ unsigned long start, unsigned long end,
+ struct mempolicy *new_pol)
+{
+ VMG_VMA_STATE(vmg, vmi, prev, vma, start, end);
+
+ vmg.policy = new_pol;
+
+ return vma_modify(&vmg);
+}
+
+struct vm_area_struct
+*vma_modify_flags_uffd(struct vma_iterator *vmi,
+ struct vm_area_struct *prev,
+ struct vm_area_struct *vma,
+ unsigned long start, unsigned long end,
+ unsigned long new_flags,
+ struct vm_userfaultfd_ctx new_ctx)
+{
+ VMG_VMA_STATE(vmg, vmi, prev, vma, start, end);
+
+ vmg.flags = new_flags;
+ vmg.uffd_ctx = new_ctx;
+
+ return vma_modify(&vmg);
+}
+
+/*
+ * Expand vma by delta bytes, potentially merging with an immediately adjacent
+ * VMA with identical properties.
+ */
+struct vm_area_struct *vma_merge_extend(struct vma_iterator *vmi,
+ struct vm_area_struct *vma,
+ unsigned long delta)
+{
+ VMG_VMA_STATE(vmg, vmi, vma, vma, vma->vm_end, vma->vm_end + delta);
+
+ vmg.next = vma_iter_next_rewind(vmi, NULL);
+ vmg.vma = NULL; /* We use the VMA to populate VMG fields only. */
+
+ return vma_merge_new_range(&vmg);
+}
+
+void unlink_file_vma_batch_init(struct unlink_vma_file_batch *vb)
+{
+ vb->count = 0;
+}
+
+static void unlink_file_vma_batch_process(struct unlink_vma_file_batch *vb)
+{
+ struct address_space *mapping;
+ int i;
+
+ mapping = vb->vmas[0]->vm_file->f_mapping;
+ i_mmap_lock_write(mapping);
+ for (i = 0; i < vb->count; i++) {
+ VM_WARN_ON_ONCE(vb->vmas[i]->vm_file->f_mapping != mapping);
+ __remove_shared_vm_struct(vb->vmas[i], mapping);
+ }
+ i_mmap_unlock_write(mapping);
+
+ unlink_file_vma_batch_init(vb);
+}
+
+void unlink_file_vma_batch_add(struct unlink_vma_file_batch *vb,
+ struct vm_area_struct *vma)
+{
+ if (vma->vm_file == NULL)
+ return;
+
+ if ((vb->count > 0 && vb->vmas[0]->vm_file != vma->vm_file) ||
+ vb->count == ARRAY_SIZE(vb->vmas))
+ unlink_file_vma_batch_process(vb);
+
+ vb->vmas[vb->count] = vma;
+ vb->count++;
+}
+
+void unlink_file_vma_batch_final(struct unlink_vma_file_batch *vb)
+{
+ if (vb->count > 0)
+ unlink_file_vma_batch_process(vb);
+}
+
+/*
+ * Unlink a file-based vm structure from its interval tree, to hide
+ * vma from rmap and vmtruncate before freeing its page tables.
+ */
+void unlink_file_vma(struct vm_area_struct *vma)
+{
+ struct file *file = vma->vm_file;
+
+ if (file) {
+ struct address_space *mapping = file->f_mapping;
+
+ i_mmap_lock_write(mapping);
+ __remove_shared_vm_struct(vma, mapping);
+ i_mmap_unlock_write(mapping);
+ }
+}
+
+void vma_link_file(struct vm_area_struct *vma)
+{
+ struct file *file = vma->vm_file;
+ struct address_space *mapping;
+
+ if (file) {
+ mapping = file->f_mapping;
+ i_mmap_lock_write(mapping);
+ __vma_link_file(vma, mapping);
+ i_mmap_unlock_write(mapping);
+ }
+}
+
+int vma_link(struct mm_struct *mm, struct vm_area_struct *vma)
+{
+ VMA_ITERATOR(vmi, mm, 0);
+
+ vma_iter_config(&vmi, vma->vm_start, vma->vm_end);
+ if (vma_iter_prealloc(&vmi, vma))
+ return -ENOMEM;
+
+ vma_start_write(vma);
+ vma_iter_store(&vmi, vma);
+ vma_link_file(vma);
+ mm->map_count++;
+ validate_mm(mm);
+ return 0;
+}
+
+/*
+ * Copy the vma structure to a new location in the same mm,
+ * prior to moving page table entries, to effect an mremap move.
+ */
+struct vm_area_struct *copy_vma(struct vm_area_struct **vmap,
+ unsigned long addr, unsigned long len, pgoff_t pgoff,
+ bool *need_rmap_locks)
+{
+ struct vm_area_struct *vma = *vmap;
+ unsigned long vma_start = vma->vm_start;
+ struct mm_struct *mm = vma->vm_mm;
+ struct vm_area_struct *new_vma;
+ bool faulted_in_anon_vma = true;
+ VMA_ITERATOR(vmi, mm, addr);
+ VMG_VMA_STATE(vmg, &vmi, NULL, vma, addr, addr + len);
+
+ /*
+ * If anonymous vma has not yet been faulted, update new pgoff
+ * to match new location, to increase its chance of merging.
+ */
+ if (unlikely(vma_is_anonymous(vma) && !vma->anon_vma)) {
+ pgoff = addr >> PAGE_SHIFT;
+ faulted_in_anon_vma = false;
+ }
+
+ new_vma = find_vma_prev(mm, addr, &vmg.prev);
+ if (new_vma && new_vma->vm_start < addr + len)
+ return NULL; /* should never get here */
+
+ vmg.vma = NULL; /* New VMA range. */
+ vmg.pgoff = pgoff;
+ vmg.next = vma_iter_next_rewind(&vmi, NULL);
+ new_vma = vma_merge_new_range(&vmg);
+
+ if (new_vma) {
+ /*
+ * Source vma may have been merged into new_vma
+ */
+ if (unlikely(vma_start >= new_vma->vm_start &&
+ vma_start < new_vma->vm_end)) {
+ /*
+ * The only way we can get a vma_merge with
+ * self during an mremap is if the vma hasn't
+ * been faulted in yet and we were allowed to
+ * reset the dst vma->vm_pgoff to the
+ * destination address of the mremap to allow
+ * the merge to happen. mremap must change the
+ * vm_pgoff linearity between src and dst vmas
+ * (in turn preventing a vma_merge) to be
+ * safe. It is only safe to keep the vm_pgoff
+ * linear if there are no pages mapped yet.
+ */
+ VM_BUG_ON_VMA(faulted_in_anon_vma, new_vma);
+ *vmap = vma = new_vma;
+ }
+ *need_rmap_locks = (new_vma->vm_pgoff <= vma->vm_pgoff);
+ } else {
+ new_vma = vm_area_dup(vma);
+ if (!new_vma)
+ goto out;
+ vma_set_range(new_vma, addr, addr + len, pgoff);
+ if (vma_dup_policy(vma, new_vma))
+ goto out_free_vma;
+ if (anon_vma_clone(new_vma, vma))
+ goto out_free_mempol;
+ if (new_vma->vm_file)
+ get_file(new_vma->vm_file);
+ if (new_vma->vm_ops && new_vma->vm_ops->open)
+ new_vma->vm_ops->open(new_vma);
+ if (vma_link(mm, new_vma))
+ goto out_vma_link;
+ *need_rmap_locks = false;
+ }
+ return new_vma;
+
+out_vma_link:
+ if (new_vma->vm_ops && new_vma->vm_ops->close)
+ new_vma->vm_ops->close(new_vma);
+
+ if (new_vma->vm_file)
+ fput(new_vma->vm_file);
+
+ unlink_anon_vmas(new_vma);
+out_free_mempol:
+ mpol_put(vma_policy(new_vma));
+out_free_vma:
+ vm_area_free(new_vma);
+out:
+ return NULL;
+}
+
+/*
+ * Rough compatibility check to quickly see if it's even worth looking
+ * at sharing an anon_vma.
+ *
+ * They need to have the same vm_file, and the flags can only differ
+ * in things that mprotect may change.
+ *
+ * NOTE! The fact that we share an anon_vma doesn't _have_ to mean that
+ * we can merge the two vma's. For example, we refuse to merge a vma if
+ * there is a vm_ops->close() function, because that indicates that the
+ * driver is doing some kind of reference counting. But that doesn't
+ * really matter for the anon_vma sharing case.
+ */
+static int anon_vma_compatible(struct vm_area_struct *a, struct vm_area_struct *b)
+{
+ return a->vm_end == b->vm_start &&
+ mpol_equal(vma_policy(a), vma_policy(b)) &&
+ a->vm_file == b->vm_file &&
+ !((a->vm_flags ^ b->vm_flags) & ~(VM_ACCESS_FLAGS | VM_SOFTDIRTY)) &&
+ b->vm_pgoff == a->vm_pgoff + ((b->vm_start - a->vm_start) >> PAGE_SHIFT);
+}
+
+/*
+ * Do some basic sanity checking to see if we can re-use the anon_vma
+ * from 'old'. The 'a'/'b' vma's are in VM order - one of them will be
+ * the same as 'old', the other will be the new one that is trying
+ * to share the anon_vma.
+ *
+ * NOTE! This runs with mmap_lock held for reading, so it is possible that
+ * the anon_vma of 'old' is concurrently in the process of being set up
+ * by another page fault trying to merge _that_. But that's ok: if it
+ * is being set up, that automatically means that it will be a singleton
+ * acceptable for merging, so we can do all of this optimistically. But
+ * we do that READ_ONCE() to make sure that we never re-load the pointer.
+ *
+ * IOW: that the "list_is_singular()" test on the anon_vma_chain only
+ * matters for the 'stable anon_vma' case (ie the thing we want to avoid
+ * is to return an anon_vma that is "complex" due to having gone through
+ * a fork).
+ *
+ * We also make sure that the two vma's are compatible (adjacent,
+ * and with the same memory policies). That's all stable, even with just
+ * a read lock on the mmap_lock.
+ */
+static struct anon_vma *reusable_anon_vma(struct vm_area_struct *old,
+ struct vm_area_struct *a,
+ struct vm_area_struct *b)
+{
+ if (anon_vma_compatible(a, b)) {
+ struct anon_vma *anon_vma = READ_ONCE(old->anon_vma);
+
+ if (anon_vma && list_is_singular(&old->anon_vma_chain))
+ return anon_vma;
+ }
+ return NULL;
+}
+
+/*
+ * find_mergeable_anon_vma is used by anon_vma_prepare, to check
+ * neighbouring vmas for a suitable anon_vma, before it goes off
+ * to allocate a new anon_vma. It checks because a repetitive
+ * sequence of mprotects and faults may otherwise lead to distinct
+ * anon_vmas being allocated, preventing vma merge in subsequent
+ * mprotect.
+ */
+struct anon_vma *find_mergeable_anon_vma(struct vm_area_struct *vma)
+{
+ struct anon_vma *anon_vma = NULL;
+ struct vm_area_struct *prev, *next;
+ VMA_ITERATOR(vmi, vma->vm_mm, vma->vm_end);
+
+ /* Try next first. */
+ next = vma_iter_load(&vmi);
+ if (next) {
+ anon_vma = reusable_anon_vma(next, vma, next);
+ if (anon_vma)
+ return anon_vma;
+ }
+
+ prev = vma_prev(&vmi);
+ VM_BUG_ON_VMA(prev != vma, vma);
+ prev = vma_prev(&vmi);
+ /* Try prev next. */
+ if (prev)
+ anon_vma = reusable_anon_vma(prev, prev, vma);
+
+ /*
+ * We might reach here with anon_vma == NULL if we can't find
+ * any reusable anon_vma.
+ * There's no absolute need to look only at touching neighbours:
+ * we could search further afield for "compatible" anon_vmas.
+ * But it would probably just be a waste of time searching,
+ * or lead to too many vmas hanging off the same anon_vma.
+ * We're trying to allow mprotect remerging later on,
+ * not trying to minimize memory used for anon_vmas.
+ */
+ return anon_vma;
+}
+
+static bool vm_ops_needs_writenotify(const struct vm_operations_struct *vm_ops)
+{
+ return vm_ops && (vm_ops->page_mkwrite || vm_ops->pfn_mkwrite);
+}
+
+static bool vma_is_shared_writable(struct vm_area_struct *vma)
+{
+ return (vma->vm_flags & (VM_WRITE | VM_SHARED)) ==
+ (VM_WRITE | VM_SHARED);
+}
+
+static bool vma_fs_can_writeback(struct vm_area_struct *vma)
+{
+ /* No managed pages to writeback. */
+ if (vma->vm_flags & VM_PFNMAP)
+ return false;
+
+ return vma->vm_file && vma->vm_file->f_mapping &&
+ mapping_can_writeback(vma->vm_file->f_mapping);
+}
+
+/*
+ * Does this VMA require the underlying folios to have their dirty state
+ * tracked?
+ */
+bool vma_needs_dirty_tracking(struct vm_area_struct *vma)
+{
+ /* Only shared, writable VMAs require dirty tracking. */
+ if (!vma_is_shared_writable(vma))
+ return false;
+
+ /* Does the filesystem need to be notified? */
+ if (vm_ops_needs_writenotify(vma->vm_ops))
+ return true;
+
+ /*
+ * Even if the filesystem doesn't indicate a need for writenotify, if it
+ * can writeback, dirty tracking is still required.
+ */
+ return vma_fs_can_writeback(vma);
+}
+
+/*
+ * Some shared mappings will want the pages marked read-only
+ * to track write events. If so, we'll downgrade vm_page_prot
+ * to the private version (using protection_map[] without the
+ * VM_SHARED bit).
+ */
+bool vma_wants_writenotify(struct vm_area_struct *vma, pgprot_t vm_page_prot)
+{
+ /* If it was private or non-writable, the write bit is already clear */
+ if (!vma_is_shared_writable(vma))
+ return false;
+
+ /* The backer wishes to know when pages are first written to? */
+ if (vm_ops_needs_writenotify(vma->vm_ops))
+ return true;
+
+ /* The open routine did something to the protections that pgprot_modify
+ * won't preserve? */
+ if (pgprot_val(vm_page_prot) !=
+ pgprot_val(vm_pgprot_modify(vm_page_prot, vma->vm_flags)))
+ return false;
+
+ /*
+ * Do we need to track softdirty? hugetlb does not support softdirty
+ * tracking yet.
+ */
+ if (vma_soft_dirty_enabled(vma) && !is_vm_hugetlb_page(vma))
+ return true;
+
+ /* Do we need write faults for uffd-wp tracking? */
+ if (userfaultfd_wp(vma))
+ return true;
+
+ /* Can the mapping track the dirty pages? */
+ return vma_fs_can_writeback(vma);
+}
+
+static DEFINE_MUTEX(mm_all_locks_mutex);
+
+static void vm_lock_anon_vma(struct mm_struct *mm, struct anon_vma *anon_vma)
+{
+ if (!test_bit(0, (unsigned long *) &anon_vma->root->rb_root.rb_root.rb_node)) {
+ /*
+ * The LSB of head.next can't change from under us
+ * because we hold the mm_all_locks_mutex.
+ */
+ down_write_nest_lock(&anon_vma->root->rwsem, &mm->mmap_lock);
+ /*
+ * We can safely modify head.next after taking the
+ * anon_vma->root->rwsem. If some other vma in this mm shares
+ * the same anon_vma we won't take it again.
+ *
+ * No need of atomic instructions here, head.next
+ * can't change from under us thanks to the
+ * anon_vma->root->rwsem.
+ */
+ if (__test_and_set_bit(0, (unsigned long *)
+ &anon_vma->root->rb_root.rb_root.rb_node))
+ BUG();
+ }
+}
+
+static void vm_lock_mapping(struct mm_struct *mm, struct address_space *mapping)
+{
+ if (!test_bit(AS_MM_ALL_LOCKS, &mapping->flags)) {
+ /*
+ * AS_MM_ALL_LOCKS can't change from under us because
+ * we hold the mm_all_locks_mutex.
+ *
+ * Operations on ->flags have to be atomic because
+ * even if AS_MM_ALL_LOCKS is stable thanks to the
+ * mm_all_locks_mutex, there may be other cpus
+ * changing other bitflags in parallel to us.
+ */
+ if (test_and_set_bit(AS_MM_ALL_LOCKS, &mapping->flags))
+ BUG();
+ down_write_nest_lock(&mapping->i_mmap_rwsem, &mm->mmap_lock);
+ }
+}
+
+/*
+ * This operation locks against the VM for all pte/vma/mm related
+ * operations that could ever happen on a certain mm. This includes
+ * vmtruncate, try_to_unmap, and all page faults.
+ *
+ * The caller must take the mmap_lock in write mode before calling
+ * mm_take_all_locks(). The caller isn't allowed to release the
+ * mmap_lock until mm_drop_all_locks() returns.
+ *
+ * mmap_lock in write mode is required in order to block all operations
+ * that could modify pagetables and free pages without need of
+ * altering the vma layout. It's also needed in write mode to avoid new
+ * anon_vmas to be associated with existing vmas.
+ *
+ * A single task can't take more than one mm_take_all_locks() in a row
+ * or it would deadlock.
+ *
+ * The LSB in anon_vma->rb_root.rb_node and the AS_MM_ALL_LOCKS bitflag in
+ * mapping->flags avoid to take the same lock twice, if more than one
+ * vma in this mm is backed by the same anon_vma or address_space.
+ *
+ * We take locks in following order, accordingly to comment at beginning
+ * of mm/rmap.c:
+ * - all hugetlbfs_i_mmap_rwsem_key locks (aka mapping->i_mmap_rwsem for
+ * hugetlb mapping);
+ * - all vmas marked locked
+ * - all i_mmap_rwsem locks;
+ * - all anon_vma->rwseml
+ *
+ * We can take all locks within these types randomly because the VM code
+ * doesn't nest them and we protected from parallel mm_take_all_locks() by
+ * mm_all_locks_mutex.
+ *
+ * mm_take_all_locks() and mm_drop_all_locks are expensive operations
+ * that may have to take thousand of locks.
+ *
+ * mm_take_all_locks() can fail if it's interrupted by signals.
+ */
+int mm_take_all_locks(struct mm_struct *mm)
+{
+ struct vm_area_struct *vma;
+ struct anon_vma_chain *avc;
+ VMA_ITERATOR(vmi, mm, 0);
+
+ mmap_assert_write_locked(mm);
+
+ mutex_lock(&mm_all_locks_mutex);
+
+ /*
+ * vma_start_write() does not have a complement in mm_drop_all_locks()
+ * because vma_start_write() is always asymmetrical; it marks a VMA as
+ * being written to until mmap_write_unlock() or mmap_write_downgrade()
+ * is reached.
+ */
+ for_each_vma(vmi, vma) {
+ if (signal_pending(current))
+ goto out_unlock;
+ vma_start_write(vma);
+ }
+
+ vma_iter_init(&vmi, mm, 0);
+ for_each_vma(vmi, vma) {
+ if (signal_pending(current))
+ goto out_unlock;
+ if (vma->vm_file && vma->vm_file->f_mapping &&
+ is_vm_hugetlb_page(vma))
+ vm_lock_mapping(mm, vma->vm_file->f_mapping);
+ }
+
+ vma_iter_init(&vmi, mm, 0);
+ for_each_vma(vmi, vma) {
+ if (signal_pending(current))
+ goto out_unlock;
+ if (vma->vm_file && vma->vm_file->f_mapping &&
+ !is_vm_hugetlb_page(vma))
+ vm_lock_mapping(mm, vma->vm_file->f_mapping);
+ }
+
+ vma_iter_init(&vmi, mm, 0);
+ for_each_vma(vmi, vma) {
+ if (signal_pending(current))
+ goto out_unlock;
+ if (vma->anon_vma)
+ list_for_each_entry(avc, &vma->anon_vma_chain, same_vma)
+ vm_lock_anon_vma(mm, avc->anon_vma);
+ }
+
+ return 0;
+
+out_unlock:
+ mm_drop_all_locks(mm);
+ return -EINTR;
+}
+
+static void vm_unlock_anon_vma(struct anon_vma *anon_vma)
+{
+ if (test_bit(0, (unsigned long *) &anon_vma->root->rb_root.rb_root.rb_node)) {
+ /*
+ * The LSB of head.next can't change to 0 from under
+ * us because we hold the mm_all_locks_mutex.
+ *
+ * We must however clear the bitflag before unlocking
+ * the vma so the users using the anon_vma->rb_root will
+ * never see our bitflag.
+ *
+ * No need of atomic instructions here, head.next
+ * can't change from under us until we release the
+ * anon_vma->root->rwsem.
+ */
+ if (!__test_and_clear_bit(0, (unsigned long *)
+ &anon_vma->root->rb_root.rb_root.rb_node))
+ BUG();
+ anon_vma_unlock_write(anon_vma);
+ }
+}
+
+static void vm_unlock_mapping(struct address_space *mapping)
+{
+ if (test_bit(AS_MM_ALL_LOCKS, &mapping->flags)) {
+ /*
+ * AS_MM_ALL_LOCKS can't change to 0 from under us
+ * because we hold the mm_all_locks_mutex.
+ */
+ i_mmap_unlock_write(mapping);
+ if (!test_and_clear_bit(AS_MM_ALL_LOCKS,
+ &mapping->flags))
+ BUG();
+ }
+}
+
+/*
+ * The mmap_lock cannot be released by the caller until
+ * mm_drop_all_locks() returns.
+ */
+void mm_drop_all_locks(struct mm_struct *mm)
+{
+ struct vm_area_struct *vma;
+ struct anon_vma_chain *avc;
+ VMA_ITERATOR(vmi, mm, 0);
+
+ mmap_assert_write_locked(mm);
+ BUG_ON(!mutex_is_locked(&mm_all_locks_mutex));
+
+ for_each_vma(vmi, vma) {
+ if (vma->anon_vma)
+ list_for_each_entry(avc, &vma->anon_vma_chain, same_vma)
+ vm_unlock_anon_vma(avc->anon_vma);
+ if (vma->vm_file && vma->vm_file->f_mapping)
+ vm_unlock_mapping(vma->vm_file->f_mapping);
+ }
+
+ mutex_unlock(&mm_all_locks_mutex);
+}
diff --git a/mm/vma.h b/mm/vma.h
new file mode 100644
index 000000000000..819f994cf727
--- /dev/null
+++ b/mm/vma.h
@@ -0,0 +1,558 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+/*
+ * vma.h
+ *
+ * Core VMA manipulation API implemented in vma.c.
+ */
+#ifndef __MM_VMA_H
+#define __MM_VMA_H
+
+/*
+ * VMA lock generalization
+ */
+struct vma_prepare {
+ struct vm_area_struct *vma;
+ struct vm_area_struct *adj_next;
+ struct file *file;
+ struct address_space *mapping;
+ struct anon_vma *anon_vma;
+ struct vm_area_struct *insert;
+ struct vm_area_struct *remove;
+ struct vm_area_struct *remove2;
+};
+
+struct unlink_vma_file_batch {
+ int count;
+ struct vm_area_struct *vmas[8];
+};
+
+/*
+ * vma munmap operation
+ */
+struct vma_munmap_struct {
+ struct vma_iterator *vmi;
+ struct vm_area_struct *vma; /* The first vma to munmap */
+ struct vm_area_struct *prev; /* vma before the munmap area */
+ struct vm_area_struct *next; /* vma after the munmap area */
+ struct list_head *uf; /* Userfaultfd list_head */
+ unsigned long start; /* Aligned start addr (inclusive) */
+ unsigned long end; /* Aligned end addr (exclusive) */
+ unsigned long unmap_start; /* Unmap PTE start */
+ unsigned long unmap_end; /* Unmap PTE end */
+ int vma_count; /* Number of vmas that will be removed */
+ bool unlock; /* Unlock after the munmap */
+ bool clear_ptes; /* If there are outstanding PTE to be cleared */
+ bool closed_vm_ops; /* call_mmap() was encountered, so vmas may be closed */
+ /* 1 byte hole */
+ unsigned long nr_pages; /* Number of pages being removed */
+ unsigned long locked_vm; /* Number of locked pages */
+ unsigned long nr_accounted; /* Number of VM_ACCOUNT pages */
+ unsigned long exec_vm;
+ unsigned long stack_vm;
+ unsigned long data_vm;
+};
+
+enum vma_merge_state {
+ VMA_MERGE_START,
+ VMA_MERGE_ERROR_NOMEM,
+ VMA_MERGE_NOMERGE,
+ VMA_MERGE_SUCCESS,
+};
+
+/* Represents a VMA merge operation. */
+struct vma_merge_struct {
+ struct mm_struct *mm;
+ struct vma_iterator *vmi;
+ pgoff_t pgoff;
+ struct vm_area_struct *prev;
+ struct vm_area_struct *next; /* Modified by vma_merge(). */
+ struct vm_area_struct *vma; /* Either a new VMA or the one being modified. */
+ unsigned long start;
+ unsigned long end;
+ unsigned long flags;
+ struct file *file;
+ struct anon_vma *anon_vma;
+ struct mempolicy *policy;
+ struct vm_userfaultfd_ctx uffd_ctx;
+ struct anon_vma_name *anon_name;
+ enum vma_merge_state state;
+};
+
+static inline bool vmg_nomem(struct vma_merge_struct *vmg)
+{
+ return vmg->state == VMA_MERGE_ERROR_NOMEM;
+}
+
+/* Assumes addr >= vma->vm_start. */
+static inline pgoff_t vma_pgoff_offset(struct vm_area_struct *vma,
+ unsigned long addr)
+{
+ return vma->vm_pgoff + PHYS_PFN(addr - vma->vm_start);
+}
+
+#define VMG_STATE(name, mm_, vmi_, start_, end_, flags_, pgoff_) \
+ struct vma_merge_struct name = { \
+ .mm = mm_, \
+ .vmi = vmi_, \
+ .start = start_, \
+ .end = end_, \
+ .flags = flags_, \
+ .pgoff = pgoff_, \
+ .state = VMA_MERGE_START, \
+ }
+
+#define VMG_VMA_STATE(name, vmi_, prev_, vma_, start_, end_) \
+ struct vma_merge_struct name = { \
+ .mm = vma_->vm_mm, \
+ .vmi = vmi_, \
+ .prev = prev_, \
+ .next = NULL, \
+ .vma = vma_, \
+ .start = start_, \
+ .end = end_, \
+ .flags = vma_->vm_flags, \
+ .pgoff = vma_pgoff_offset(vma_, start_), \
+ .file = vma_->vm_file, \
+ .anon_vma = vma_->anon_vma, \
+ .policy = vma_policy(vma_), \
+ .uffd_ctx = vma_->vm_userfaultfd_ctx, \
+ .anon_name = anon_vma_name(vma_), \
+ .state = VMA_MERGE_START, \
+ }
+
+#ifdef CONFIG_DEBUG_VM_MAPLE_TREE
+void validate_mm(struct mm_struct *mm);
+#else
+#define validate_mm(mm) do { } while (0)
+#endif
+
+/* Required for expand_downwards(). */
+void anon_vma_interval_tree_pre_update_vma(struct vm_area_struct *vma);
+
+/* Required for expand_downwards(). */
+void anon_vma_interval_tree_post_update_vma(struct vm_area_struct *vma);
+
+int vma_expand(struct vma_merge_struct *vmg);
+int vma_shrink(struct vma_iterator *vmi, struct vm_area_struct *vma,
+ unsigned long start, unsigned long end, pgoff_t pgoff);
+
+static inline int vma_iter_store_gfp(struct vma_iterator *vmi,
+ struct vm_area_struct *vma, gfp_t gfp)
+
+{
+ if (vmi->mas.status != ma_start &&
+ ((vmi->mas.index > vma->vm_start) || (vmi->mas.last < vma->vm_start)))
+ vma_iter_invalidate(vmi);
+
+ __mas_set_range(&vmi->mas, vma->vm_start, vma->vm_end - 1);
+ mas_store_gfp(&vmi->mas, vma, gfp);
+ if (unlikely(mas_is_err(&vmi->mas)))
+ return -ENOMEM;
+
+ return 0;
+}
+
+#ifdef CONFIG_MMU
+/*
+ * init_vma_munmap() - Initializer wrapper for vma_munmap_struct
+ * @vms: The vma munmap struct
+ * @vmi: The vma iterator
+ * @vma: The first vm_area_struct to munmap
+ * @start: The aligned start address to munmap
+ * @end: The aligned end address to munmap
+ * @uf: The userfaultfd list_head
+ * @unlock: Unlock after the operation. Only unlocked on success
+ */
+static inline void init_vma_munmap(struct vma_munmap_struct *vms,
+ struct vma_iterator *vmi, struct vm_area_struct *vma,
+ unsigned long start, unsigned long end, struct list_head *uf,
+ bool unlock)
+{
+ vms->vmi = vmi;
+ vms->vma = vma;
+ if (vma) {
+ vms->start = start;
+ vms->end = end;
+ } else {
+ vms->start = vms->end = 0;
+ }
+ vms->unlock = unlock;
+ vms->uf = uf;
+ vms->vma_count = 0;
+ vms->nr_pages = vms->locked_vm = vms->nr_accounted = 0;
+ vms->exec_vm = vms->stack_vm = vms->data_vm = 0;
+ vms->unmap_start = FIRST_USER_ADDRESS;
+ vms->unmap_end = USER_PGTABLES_CEILING;
+ vms->clear_ptes = false;
+ vms->closed_vm_ops = false;
+}
+#endif
+
+int vms_gather_munmap_vmas(struct vma_munmap_struct *vms,
+ struct ma_state *mas_detach);
+
+void vms_complete_munmap_vmas(struct vma_munmap_struct *vms,
+ struct ma_state *mas_detach);
+
+void vms_clean_up_area(struct vma_munmap_struct *vms,
+ struct ma_state *mas_detach);
+
+/*
+ * reattach_vmas() - Undo any munmap work and free resources
+ * @mas_detach: The maple state with the detached maple tree
+ *
+ * Reattach any detached vmas and free up the maple tree used to track the vmas.
+ */
+static inline void reattach_vmas(struct ma_state *mas_detach)
+{
+ struct vm_area_struct *vma;
+
+ mas_set(mas_detach, 0);
+ mas_for_each(mas_detach, vma, ULONG_MAX)
+ vma_mark_detached(vma, false);
+
+ __mt_destroy(mas_detach->tree);
+}
+
+/*
+ * vms_abort_munmap_vmas() - Undo as much as possible from an aborted munmap()
+ * operation.
+ * @vms: The vma unmap structure
+ * @mas_detach: The maple state with the detached maple tree
+ *
+ * Reattach any detached vmas, free up the maple tree used to track the vmas.
+ * If that's not possible because the ptes are cleared (and vm_ops->closed() may
+ * have been called), then a NULL is written over the vmas and the vmas are
+ * removed (munmap() completed).
+ */
+static inline void vms_abort_munmap_vmas(struct vma_munmap_struct *vms,
+ struct ma_state *mas_detach)
+{
+ struct ma_state *mas = &vms->vmi->mas;
+ if (!vms->nr_pages)
+ return;
+
+ if (vms->clear_ptes)
+ return reattach_vmas(mas_detach);
+
+ /*
+ * Aborting cannot just call the vm_ops open() because they are often
+ * not symmetrical and state data has been lost. Resort to the old
+ * failure method of leaving a gap where the MAP_FIXED mapping failed.
+ */
+ mas_set_range(mas, vms->start, vms->end - 1);
+ if (unlikely(mas_store_gfp(mas, NULL, GFP_KERNEL))) {
+ pr_warn_once("%s: (%d) Unable to abort munmap() operation\n",
+ current->comm, current->pid);
+ /* Leaving vmas detached and in-tree may hamper recovery */
+ reattach_vmas(mas_detach);
+ } else {
+ /* Clean up the insertion of the unfortunate gap */
+ vms_complete_munmap_vmas(vms, mas_detach);
+ }
+}
+
+int
+do_vmi_align_munmap(struct vma_iterator *vmi, struct vm_area_struct *vma,
+ struct mm_struct *mm, unsigned long start,
+ unsigned long end, struct list_head *uf, bool unlock);
+
+int do_vmi_munmap(struct vma_iterator *vmi, struct mm_struct *mm,
+ unsigned long start, size_t len, struct list_head *uf,
+ bool unlock);
+
+void remove_vma(struct vm_area_struct *vma, bool unreachable, bool closed);
+
+void unmap_region(struct ma_state *mas, struct vm_area_struct *vma,
+ struct vm_area_struct *prev, struct vm_area_struct *next);
+
+/* We are about to modify the VMA's flags. */
+struct vm_area_struct *vma_modify_flags(struct vma_iterator *vmi,
+ struct vm_area_struct *prev, struct vm_area_struct *vma,
+ unsigned long start, unsigned long end,
+ unsigned long new_flags);
+
+/* We are about to modify the VMA's flags and/or anon_name. */
+struct vm_area_struct
+*vma_modify_flags_name(struct vma_iterator *vmi,
+ struct vm_area_struct *prev,
+ struct vm_area_struct *vma,
+ unsigned long start,
+ unsigned long end,
+ unsigned long new_flags,
+ struct anon_vma_name *new_name);
+
+/* We are about to modify the VMA's memory policy. */
+struct vm_area_struct
+*vma_modify_policy(struct vma_iterator *vmi,
+ struct vm_area_struct *prev,
+ struct vm_area_struct *vma,
+ unsigned long start, unsigned long end,
+ struct mempolicy *new_pol);
+
+/* We are about to modify the VMA's flags and/or uffd context. */
+struct vm_area_struct
+*vma_modify_flags_uffd(struct vma_iterator *vmi,
+ struct vm_area_struct *prev,
+ struct vm_area_struct *vma,
+ unsigned long start, unsigned long end,
+ unsigned long new_flags,
+ struct vm_userfaultfd_ctx new_ctx);
+
+struct vm_area_struct *vma_merge_new_range(struct vma_merge_struct *vmg);
+
+struct vm_area_struct *vma_merge_extend(struct vma_iterator *vmi,
+ struct vm_area_struct *vma,
+ unsigned long delta);
+
+void unlink_file_vma_batch_init(struct unlink_vma_file_batch *vb);
+
+void unlink_file_vma_batch_final(struct unlink_vma_file_batch *vb);
+
+void unlink_file_vma_batch_add(struct unlink_vma_file_batch *vb,
+ struct vm_area_struct *vma);
+
+void unlink_file_vma(struct vm_area_struct *vma);
+
+void vma_link_file(struct vm_area_struct *vma);
+
+int vma_link(struct mm_struct *mm, struct vm_area_struct *vma);
+
+struct vm_area_struct *copy_vma(struct vm_area_struct **vmap,
+ unsigned long addr, unsigned long len, pgoff_t pgoff,
+ bool *need_rmap_locks);
+
+struct anon_vma *find_mergeable_anon_vma(struct vm_area_struct *vma);
+
+bool vma_needs_dirty_tracking(struct vm_area_struct *vma);
+bool vma_wants_writenotify(struct vm_area_struct *vma, pgprot_t vm_page_prot);
+
+int mm_take_all_locks(struct mm_struct *mm);
+void mm_drop_all_locks(struct mm_struct *mm);
+
+static inline bool vma_wants_manual_pte_write_upgrade(struct vm_area_struct *vma)
+{
+ /*
+ * We want to check manually if we can change individual PTEs writable
+ * if we can't do that automatically for all PTEs in a mapping. For
+ * private mappings, that's always the case when we have write
+ * permissions as we properly have to handle COW.
+ */
+ if (vma->vm_flags & VM_SHARED)
+ return vma_wants_writenotify(vma, vma->vm_page_prot);
+ return !!(vma->vm_flags & VM_WRITE);
+}
+
+#ifdef CONFIG_MMU
+static inline pgprot_t vm_pgprot_modify(pgprot_t oldprot, unsigned long vm_flags)
+{
+ return pgprot_modify(oldprot, vm_get_page_prot(vm_flags));
+}
+#endif
+
+static inline struct vm_area_struct *vma_prev_limit(struct vma_iterator *vmi,
+ unsigned long min)
+{
+ return mas_prev(&vmi->mas, min);
+}
+
+/*
+ * These three helpers classifies VMAs for virtual memory accounting.
+ */
+
+/*
+ * Executable code area - executable, not writable, not stack
+ */
+static inline bool is_exec_mapping(vm_flags_t flags)
+{
+ return (flags & (VM_EXEC | VM_WRITE | VM_STACK)) == VM_EXEC;
+}
+
+/*
+ * Stack area (including shadow stacks)
+ *
+ * VM_GROWSUP / VM_GROWSDOWN VMAs are always private anonymous:
+ * do_mmap() forbids all other combinations.
+ */
+static inline bool is_stack_mapping(vm_flags_t flags)
+{
+ return ((flags & VM_STACK) == VM_STACK) || (flags & VM_SHADOW_STACK);
+}
+
+/*
+ * Data area - private, writable, not stack
+ */
+static inline bool is_data_mapping(vm_flags_t flags)
+{
+ return (flags & (VM_WRITE | VM_SHARED | VM_STACK)) == VM_WRITE;
+}
+
+
+static inline void vma_iter_config(struct vma_iterator *vmi,
+ unsigned long index, unsigned long last)
+{
+ __mas_set_range(&vmi->mas, index, last - 1);
+}
+
+static inline void vma_iter_reset(struct vma_iterator *vmi)
+{
+ mas_reset(&vmi->mas);
+}
+
+static inline
+struct vm_area_struct *vma_iter_prev_range_limit(struct vma_iterator *vmi, unsigned long min)
+{
+ return mas_prev_range(&vmi->mas, min);
+}
+
+static inline
+struct vm_area_struct *vma_iter_next_range_limit(struct vma_iterator *vmi, unsigned long max)
+{
+ return mas_next_range(&vmi->mas, max);
+}
+
+static inline int vma_iter_area_lowest(struct vma_iterator *vmi, unsigned long min,
+ unsigned long max, unsigned long size)
+{
+ return mas_empty_area(&vmi->mas, min, max - 1, size);
+}
+
+static inline int vma_iter_area_highest(struct vma_iterator *vmi, unsigned long min,
+ unsigned long max, unsigned long size)
+{
+ return mas_empty_area_rev(&vmi->mas, min, max - 1, size);
+}
+
+/*
+ * VMA Iterator functions shared between nommu and mmap
+ */
+static inline int vma_iter_prealloc(struct vma_iterator *vmi,
+ struct vm_area_struct *vma)
+{
+ return mas_preallocate(&vmi->mas, vma, GFP_KERNEL);
+}
+
+static inline void vma_iter_clear(struct vma_iterator *vmi)
+{
+ mas_store_prealloc(&vmi->mas, NULL);
+}
+
+static inline struct vm_area_struct *vma_iter_load(struct vma_iterator *vmi)
+{
+ return mas_walk(&vmi->mas);
+}
+
+/* Store a VMA with preallocated memory */
+static inline void vma_iter_store(struct vma_iterator *vmi,
+ struct vm_area_struct *vma)
+{
+
+#if defined(CONFIG_DEBUG_VM_MAPLE_TREE)
+ if (MAS_WARN_ON(&vmi->mas, vmi->mas.status != ma_start &&
+ vmi->mas.index > vma->vm_start)) {
+ pr_warn("%lx > %lx\n store vma %lx-%lx\n into slot %lx-%lx\n",
+ vmi->mas.index, vma->vm_start, vma->vm_start,
+ vma->vm_end, vmi->mas.index, vmi->mas.last);
+ }
+ if (MAS_WARN_ON(&vmi->mas, vmi->mas.status != ma_start &&
+ vmi->mas.last < vma->vm_start)) {
+ pr_warn("%lx < %lx\nstore vma %lx-%lx\ninto slot %lx-%lx\n",
+ vmi->mas.last, vma->vm_start, vma->vm_start, vma->vm_end,
+ vmi->mas.index, vmi->mas.last);
+ }
+#endif
+
+ if (vmi->mas.status != ma_start &&
+ ((vmi->mas.index > vma->vm_start) || (vmi->mas.last < vma->vm_start)))
+ vma_iter_invalidate(vmi);
+
+ __mas_set_range(&vmi->mas, vma->vm_start, vma->vm_end - 1);
+ mas_store_prealloc(&vmi->mas, vma);
+}
+
+static inline unsigned long vma_iter_addr(struct vma_iterator *vmi)
+{
+ return vmi->mas.index;
+}
+
+static inline unsigned long vma_iter_end(struct vma_iterator *vmi)
+{
+ return vmi->mas.last + 1;
+}
+
+static inline int vma_iter_bulk_alloc(struct vma_iterator *vmi,
+ unsigned long count)
+{
+ return mas_expected_entries(&vmi->mas, count);
+}
+
+static inline
+struct vm_area_struct *vma_iter_prev_range(struct vma_iterator *vmi)
+{
+ return mas_prev_range(&vmi->mas, 0);
+}
+
+/*
+ * Retrieve the next VMA and rewind the iterator to end of the previous VMA, or
+ * if no previous VMA, to index 0.
+ */
+static inline
+struct vm_area_struct *vma_iter_next_rewind(struct vma_iterator *vmi,
+ struct vm_area_struct **pprev)
+{
+ struct vm_area_struct *next = vma_next(vmi);
+ struct vm_area_struct *prev = vma_prev(vmi);
+
+ /*
+ * Consider the case where no previous VMA exists. We advance to the
+ * next VMA, skipping any gap, then rewind to the start of the range.
+ *
+ * If we were to unconditionally advance to the next range we'd wind up
+ * at the next VMA again, so we check to ensure there is a previous VMA
+ * to skip over.
+ */
+ if (prev)
+ vma_iter_next_range(vmi);
+
+ if (pprev)
+ *pprev = prev;
+
+ return next;
+}
+
+#ifdef CONFIG_64BIT
+
+static inline bool vma_is_sealed(struct vm_area_struct *vma)
+{
+ return (vma->vm_flags & VM_SEALED);
+}
+
+/*
+ * check if a vma is sealed for modification.
+ * return true, if modification is allowed.
+ */
+static inline bool can_modify_vma(struct vm_area_struct *vma)
+{
+ if (unlikely(vma_is_sealed(vma)))
+ return false;
+
+ return true;
+}
+
+bool can_modify_vma_madv(struct vm_area_struct *vma, int behavior);
+
+#else
+
+static inline bool can_modify_vma(struct vm_area_struct *vma)
+{
+ return true;
+}
+
+static inline bool can_modify_vma_madv(struct vm_area_struct *vma, int behavior)
+{
+ return true;
+}
+
+#endif
+
+#endif /* __MM_VMA_H */
diff --git a/mm/vma_internal.h b/mm/vma_internal.h
new file mode 100644
index 000000000000..b930ab12a587
--- /dev/null
+++ b/mm/vma_internal.h
@@ -0,0 +1,49 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+/*
+ * vma_internal.h
+ *
+ * Headers required by vma.c, which can be substituted accordingly when testing
+ * VMA functionality.
+ */
+
+#ifndef __MM_VMA_INTERNAL_H
+#define __MM_VMA_INTERNAL_H
+
+#include <linux/backing-dev.h>
+#include <linux/bitops.h>
+#include <linux/bug.h>
+#include <linux/cacheflush.h>
+#include <linux/err.h>
+#include <linux/file.h>
+#include <linux/fs.h>
+#include <linux/huge_mm.h>
+#include <linux/hugetlb_inline.h>
+#include <linux/kernel.h>
+#include <linux/khugepaged.h>
+#include <linux/list.h>
+#include <linux/maple_tree.h>
+#include <linux/mempolicy.h>
+#include <linux/mm.h>
+#include <linux/mm_inline.h>
+#include <linux/mm_types.h>
+#include <linux/mman.h>
+#include <linux/mmap_lock.h>
+#include <linux/mmdebug.h>
+#include <linux/mmu_context.h>
+#include <linux/mutex.h>
+#include <linux/pagemap.h>
+#include <linux/pfn.h>
+#include <linux/rcupdate.h>
+#include <linux/rmap.h>
+#include <linux/rwsem.h>
+#include <linux/sched/signal.h>
+#include <linux/swap.h>
+#include <linux/uprobes.h>
+#include <linux/userfaultfd_k.h>
+
+#include <asm/current.h>
+#include <asm/tlb.h>
+
+#include "internal.h"
+
+#endif /* __MM_VMA_INTERNAL_H */
diff --git a/mm/vmalloc.c b/mm/vmalloc.c
index a0df1e2e155a..634162271c00 100644
--- a/mm/vmalloc.c
+++ b/mm/vmalloc.c
@@ -105,7 +105,7 @@ static int vmap_pte_range(pmd_t *pmd, unsigned long addr, unsigned long end,
if (!pte)
return -ENOMEM;
do {
- if (!pte_none(ptep_get(pte))) {
+ if (unlikely(!pte_none(ptep_get(pte)))) {
if (pfn_valid(pfn)) {
page = pfn_to_page(pfn);
dump_page(page, "remapping already mapped page");
@@ -1940,7 +1940,7 @@ static inline void setup_vmalloc_vm(struct vm_struct *vm,
{
vm->flags = flags;
vm->addr = (void *)va->va_start;
- vm->size = va->va_end - va->va_start;
+ vm->size = va_size(va);
vm->caller = caller;
va->vm = vm;
}
@@ -2018,7 +2018,7 @@ retry:
if (vm) {
vm->addr = (void *)va->va_start;
- vm->size = va->va_end - va->va_start;
+ vm->size = va_size(va);
va->vm = vm;
}
@@ -2131,23 +2131,18 @@ reclaim_list_global(struct list_head *head)
static void
decay_va_pool_node(struct vmap_node *vn, bool full_decay)
{
+ LIST_HEAD(decay_list);
+ struct rb_root decay_root = RB_ROOT;
struct vmap_area *va, *nva;
- struct list_head decay_list;
- struct rb_root decay_root;
unsigned long n_decay;
int i;
- decay_root = RB_ROOT;
- INIT_LIST_HEAD(&decay_list);
-
for (i = 0; i < MAX_VA_SIZE_PAGES; i++) {
- struct list_head tmp_list;
+ LIST_HEAD(tmp_list);
if (list_empty(&vn->pool[i].head))
continue;
- INIT_LIST_HEAD(&tmp_list);
-
/* Detach the pool, so no-one can access it. */
spin_lock(&vn->pool_lock);
list_replace_init(&vn->pool[i].head, &tmp_list);
@@ -2198,7 +2193,7 @@ static void purge_vmap_node(struct work_struct *work)
vn->nr_purged = 0;
list_for_each_entry_safe(va, n_va, &vn->purge_list, list) {
- unsigned long nr = (va->va_end - va->va_start) >> PAGE_SHIFT;
+ unsigned long nr = va_size(va) >> PAGE_SHIFT;
unsigned long orig_start = va->va_start;
unsigned long orig_end = va->va_end;
unsigned int vn_id = decode_vn_id(va->flags);
@@ -2344,8 +2339,8 @@ static void free_vmap_area_noflush(struct vmap_area *va)
if (WARN_ON_ONCE(!list_empty(&va->list)))
return;
- nr_lazy = atomic_long_add_return((va->va_end - va->va_start) >>
- PAGE_SHIFT, &vmap_lazy_nr);
+ nr_lazy = atomic_long_add_return(va_size(va) >> PAGE_SHIFT,
+ &vmap_lazy_nr);
/*
* If it was request by a certain node we would like to
@@ -2941,8 +2936,7 @@ void vm_unmap_ram(const void *mem, unsigned int count)
if (WARN_ON_ONCE(!va))
return;
- debug_check_no_locks_freed((void *)va->va_start,
- (va->va_end - va->va_start));
+ debug_check_no_locks_freed((void *)va->va_start, va_size(va));
free_unmap_vmap_area(va);
}
EXPORT_SYMBOL(vm_unmap_ram);
@@ -3518,8 +3512,6 @@ vm_area_alloc_pages(gfp_t gfp, int nid,
unsigned int order, unsigned int nr_pages, struct page **pages)
{
unsigned int nr_allocated = 0;
- gfp_t alloc_gfp = gfp;
- bool nofail = gfp & __GFP_NOFAIL;
struct page *page;
int i;
@@ -3530,9 +3522,6 @@ vm_area_alloc_pages(gfp_t gfp, int nid,
* more permissive.
*/
if (!order) {
- /* bulk allocator doesn't support nofail req. officially */
- gfp_t bulk_gfp = gfp & ~__GFP_NOFAIL;
-
while (nr_allocated < nr_pages) {
unsigned int nr, nr_pages_request;
@@ -3550,12 +3539,11 @@ vm_area_alloc_pages(gfp_t gfp, int nid,
* but mempolicy wants to alloc memory by interleaving.
*/
if (IS_ENABLED(CONFIG_NUMA) && nid == NUMA_NO_NODE)
- nr = alloc_pages_bulk_array_mempolicy_noprof(bulk_gfp,
+ nr = alloc_pages_bulk_array_mempolicy_noprof(gfp,
nr_pages_request,
pages + nr_allocated);
-
else
- nr = alloc_pages_bulk_array_node_noprof(bulk_gfp, nid,
+ nr = alloc_pages_bulk_array_node_noprof(gfp, nid,
nr_pages_request,
pages + nr_allocated);
@@ -3569,30 +3557,24 @@ vm_area_alloc_pages(gfp_t gfp, int nid,
if (nr != nr_pages_request)
break;
}
- } else if (gfp & __GFP_NOFAIL) {
- /*
- * Higher order nofail allocations are really expensive and
- * potentially dangerous (pre-mature OOM, disruptive reclaim
- * and compaction etc.
- */
- alloc_gfp &= ~__GFP_NOFAIL;
}
/* High-order pages or fallback path if "bulk" fails. */
while (nr_allocated < nr_pages) {
- if (!nofail && fatal_signal_pending(current))
+ if (!(gfp & __GFP_NOFAIL) && fatal_signal_pending(current))
break;
if (nid == NUMA_NO_NODE)
- page = alloc_pages_noprof(alloc_gfp, order);
+ page = alloc_pages_noprof(gfp, order);
else
- page = alloc_pages_node_noprof(nid, alloc_gfp, order);
+ page = alloc_pages_node_noprof(nid, gfp, order);
+
if (unlikely(!page))
break;
/*
- * Higher order allocations must be able to be treated as
- * indepdenent small pages by callers (as they can with
+ * High-order allocations must be able to be treated as
+ * independent small pages by callers (as they can with
* small-page vmallocs). Some drivers do their own refcounting
* on vmalloc_to_page() pages, some use page->mapping,
* page->lru, etc.
@@ -3653,7 +3635,16 @@ static void *__vmalloc_area_node(struct vm_struct *area, gfp_t gfp_mask,
set_vm_area_page_order(area, page_shift - PAGE_SHIFT);
page_order = vm_area_page_order(area);
- area->nr_pages = vm_area_alloc_pages(gfp_mask | __GFP_NOWARN,
+ /*
+ * High-order nofail allocations are really expensive and
+ * potentially dangerous (pre-mature OOM, disruptive reclaim
+ * and compaction etc.
+ *
+ * Please note, the __vmalloc_node_range_noprof() falls-back
+ * to order-0 pages if high-order attempt is unsuccessful.
+ */
+ area->nr_pages = vm_area_alloc_pages((page_order ?
+ gfp_mask & ~__GFP_NOFAIL : gfp_mask) | __GFP_NOWARN,
node, page_order, nr_small_pages, area->pages);
atomic_long_add(area->nr_pages, &nr_vmalloc_pages);
@@ -4033,6 +4024,76 @@ void *vzalloc_node_noprof(unsigned long size, int node)
}
EXPORT_SYMBOL(vzalloc_node_noprof);
+/**
+ * vrealloc - reallocate virtually contiguous memory; contents remain unchanged
+ * @p: object to reallocate memory for
+ * @size: the size to reallocate
+ * @flags: the flags for the page level allocator
+ *
+ * If @p is %NULL, vrealloc() behaves exactly like vmalloc(). If @size is 0 and
+ * @p is not a %NULL pointer, the object pointed to is freed.
+ *
+ * If __GFP_ZERO logic is requested, callers must ensure that, starting with the
+ * initial memory allocation, every subsequent call to this API for the same
+ * memory allocation is flagged with __GFP_ZERO. Otherwise, it is possible that
+ * __GFP_ZERO is not fully honored by this API.
+ *
+ * In any case, the contents of the object pointed to are preserved up to the
+ * lesser of the new and old sizes.
+ *
+ * This function must not be called concurrently with itself or vfree() for the
+ * same memory allocation.
+ *
+ * Return: pointer to the allocated memory; %NULL if @size is zero or in case of
+ * failure
+ */
+void *vrealloc_noprof(const void *p, size_t size, gfp_t flags)
+{
+ size_t old_size = 0;
+ void *n;
+
+ if (!size) {
+ vfree(p);
+ return NULL;
+ }
+
+ if (p) {
+ struct vm_struct *vm;
+
+ vm = find_vm_area(p);
+ if (unlikely(!vm)) {
+ WARN(1, "Trying to vrealloc() nonexistent vm area (%p)\n", p);
+ return NULL;
+ }
+
+ old_size = get_vm_area_size(vm);
+ }
+
+ /*
+ * TODO: Shrink the vm_area, i.e. unmap and free unused pages. What
+ * would be a good heuristic for when to shrink the vm_area?
+ */
+ if (size <= old_size) {
+ /* Zero out spare memory. */
+ if (want_init_on_alloc(flags))
+ memset((void *)p + size, 0, old_size - size);
+
+ return (void *)p;
+ }
+
+ /* TODO: Grow the vm_area, i.e. allocate and map additional pages. */
+ n = __vmalloc_noprof(size, flags);
+ if (!n)
+ return NULL;
+
+ if (p) {
+ memcpy(n, p, old_size);
+ vfree(p);
+ }
+
+ return n;
+}
+
#if defined(CONFIG_64BIT) && defined(CONFIG_ZONE_DMA32)
#define GFP_VMALLOC32 (GFP_DMA32 | GFP_KERNEL)
#elif defined(CONFIG_64BIT) && defined(CONFIG_ZONE_DMA)
@@ -4873,7 +4934,7 @@ static void show_purge_info(struct seq_file *m)
list_for_each_entry(va, &vn->lazy.head, list) {
seq_printf(m, "0x%pK-0x%pK %7ld unpurged vm_area\n",
(void *)va->va_start, (void *)va->va_end,
- va->va_end - va->va_start);
+ va_size(va));
}
spin_unlock(&vn->lazy.lock);
}
@@ -4895,7 +4956,7 @@ static int vmalloc_info_show(struct seq_file *m, void *p)
if (va->flags & VMAP_RAM)
seq_printf(m, "0x%pK-0x%pK %7ld vm_map_ram\n",
(void *)va->va_start, (void *)va->va_end,
- va->va_end - va->va_start);
+ va_size(va));
continue;
}
diff --git a/mm/vmscan.c b/mm/vmscan.c
index bd489c1af228..749cdc110c74 100644
--- a/mm/vmscan.c
+++ b/mm/vmscan.c
@@ -628,7 +628,7 @@ typedef enum {
* Calls ->writepage().
*/
static pageout_t pageout(struct folio *folio, struct address_space *mapping,
- struct swap_iocb **plug)
+ struct swap_iocb **plug, struct list_head *folio_list)
{
/*
* If the folio is dirty, only perform writeback if that write
@@ -676,6 +676,14 @@ static pageout_t pageout(struct folio *folio, struct address_space *mapping,
.swap_plug = plug,
};
+ /*
+ * The large shmem folio can be split if CONFIG_THP_SWAP is
+ * not enabled or contiguous swap entries are failed to
+ * allocate.
+ */
+ if (shmem_mapping(mapping) && folio_test_large(folio))
+ wbc.list = folio_list;
+
folio_set_reclaim(folio);
res = mapping->a_ops->writepage(&folio->page, &wbc);
if (res < 0)
@@ -863,7 +871,12 @@ static enum folio_references folio_check_references(struct folio *folio,
if (vm_flags & VM_LOCKED)
return FOLIOREF_ACTIVATE;
- /* rmap lock contention: rotate */
+ /*
+ * There are two cases to consider.
+ * 1) Rmap lock contention: rotate.
+ * 2) Skip the non-shared swapbacked folio mapped solely by
+ * the exiting or OOM-reaped process.
+ */
if (referenced_ptes == -1)
return FOLIOREF_KEEP;
@@ -1003,9 +1016,6 @@ static unsigned int demote_folio_list(struct list_head *demote_folios,
(unsigned long)&mtc, MIGRATE_ASYNC, MR_DEMOTION,
&nr_succeeded);
- mod_node_page_state(pgdat, PGDEMOTE_KSWAPD + reclaimer_offset(),
- nr_succeeded);
-
return nr_succeeded;
}
@@ -1222,13 +1232,14 @@ retry:
goto keep_locked;
if (folio_test_large(folio)) {
/* cannot split folio, skip it */
- if (!can_split_folio(folio, NULL))
+ if (!can_split_folio(folio, 1, NULL))
goto activate_locked;
/*
* Split partially mapped folios right away.
* We can free the unmapped pages without IO.
*/
- if (data_race(!list_empty(&folio->_deferred_list)) &&
+ if (data_race(!list_empty(&folio->_deferred_list) &&
+ folio_test_partially_mapped(folio)) &&
split_folio_to_list(folio, folio_list))
goto activate_locked;
}
@@ -1252,11 +1263,6 @@ retry:
goto activate_locked_split;
}
}
- } else if (folio_test_swapbacked(folio) &&
- folio_test_large(folio)) {
- /* Split shmem folio */
- if (split_folio_to_list(folio, folio_list))
- goto keep_locked;
}
/*
@@ -1357,12 +1363,25 @@ retry:
* starts and then write it out here.
*/
try_to_unmap_flush_dirty();
- switch (pageout(folio, mapping, &plug)) {
+ switch (pageout(folio, mapping, &plug, folio_list)) {
case PAGE_KEEP:
goto keep_locked;
case PAGE_ACTIVATE:
+ /*
+ * If shmem folio is split when writeback to swap,
+ * the tail pages will make their own pass through
+ * this function and be accounted then.
+ */
+ if (nr_pages > 1 && !folio_test_large(folio)) {
+ sc->nr_scanned -= (nr_pages - 1);
+ nr_pages = 1;
+ }
goto activate_locked;
case PAGE_SUCCESS:
+ if (nr_pages > 1 && !folio_test_large(folio)) {
+ sc->nr_scanned -= (nr_pages - 1);
+ nr_pages = 1;
+ }
stat->nr_pageout += nr_pages;
if (folio_test_writeback(folio))
@@ -1495,7 +1514,8 @@ keep:
/* 'folio_list' is always empty here */
/* Migrate folios selected for demotion */
- nr_reclaimed += demote_folio_list(&demote_folios, pgdat);
+ stat->nr_demoted = demote_folio_list(&demote_folios, pgdat);
+ nr_reclaimed += stat->nr_demoted;
/* Folios that could not be demoted are still in @demote_folios */
if (!list_empty(&demote_folios)) {
/* Folios which weren't demoted go back on @folio_list */
@@ -1941,6 +1961,8 @@ static unsigned long shrink_inactive_list(unsigned long nr_to_scan,
spin_lock_irq(&lruvec->lru_lock);
move_folios_to_lru(lruvec, &folio_list);
+ __mod_lruvec_state(lruvec, PGDEMOTE_KSWAPD + reclaimer_offset(),
+ stat.nr_demoted);
__mod_node_page_state(pgdat, NR_ISOLATED_ANON + file, -nr_taken);
item = PGSTEAL_KSWAPD + reclaimer_offset();
if (!cgroup_reclaim(sc))
@@ -2239,10 +2261,11 @@ static void prepare_scan_control(pg_data_t *pgdat, struct scan_control *sc)
target_lruvec = mem_cgroup_lruvec(sc->target_mem_cgroup, pgdat);
/*
- * Flush the memory cgroup stats, so that we read accurate per-memcg
- * lruvec stats for heuristics.
+ * Flush the memory cgroup stats in rate-limited way as we don't need
+ * most accurate stats here. We may switch to regular stats flushing
+ * in the future once it is cheap enough.
*/
- mem_cgroup_flush_stats(sc->target_mem_cgroup);
+ mem_cgroup_flush_stats_ratelimited(sc->target_mem_cgroup);
/*
* Determine the scan balance between anon and file LRUs.
@@ -3456,7 +3479,7 @@ static void walk_pmd_range_locked(pud_t *pud, unsigned long addr, struct vm_area
goto next;
if (!pmd_trans_huge(pmd[i])) {
- if (should_clear_pmd_young())
+ if (!walk->force_scan && should_clear_pmd_young())
pmdp_test_and_clear_young(vma, addr, pmd + i);
goto next;
}
@@ -3543,7 +3566,7 @@ restart:
walk->mm_stats[MM_NONLEAF_TOTAL]++;
- if (should_clear_pmd_young()) {
+ if (!walk->force_scan && should_clear_pmd_young()) {
if (!pmd_young(val))
continue;
@@ -4300,7 +4323,7 @@ static bool sort_folio(struct lruvec *lruvec, struct folio *folio, struct scan_c
}
/* ineligible */
- if (zone > sc->reclaim_idx) {
+ if (!folio_test_lru(folio) || zone > sc->reclaim_idx) {
gen = folio_inc_gen(lruvec, folio, false);
list_move_tail(&folio->lru, &lrugen->folios[gen][type][zone]);
return true;
@@ -6644,7 +6667,7 @@ static bool pgdat_balanced(pg_data_t *pgdat, int order, int highest_zoneidx)
continue;
if (sysctl_numa_balancing_mode & NUMA_BALANCING_MEMORY_TIERING)
- mark = wmark_pages(zone, WMARK_PROMO);
+ mark = promo_wmark_pages(zone);
else
mark = high_wmark_pages(zone);
if (zone_watermark_ok_safe(zone, order, mark, highest_zoneidx))
@@ -7519,7 +7542,9 @@ int node_reclaim(struct pglist_data *pgdat, gfp_t gfp_mask, unsigned int order)
ret = __node_reclaim(pgdat, gfp_mask, order);
clear_bit(PGDAT_RECLAIM_LOCKED, &pgdat->flags);
- if (!ret)
+ if (ret)
+ count_vm_event(PGSCAN_ZONE_RECLAIM_SUCCESS);
+ else
count_vm_event(PGSCAN_ZONE_RECLAIM_FAILED);
return ret;
diff --git a/mm/vmstat.c b/mm/vmstat.c
index e875f2a4915f..b5a4cea423e1 100644
--- a/mm/vmstat.c
+++ b/mm/vmstat.c
@@ -1314,6 +1314,7 @@ const char * const vmstat_text[] = {
"pgsteal_file",
#ifdef CONFIG_NUMA
+ "zone_reclaim_success",
"zone_reclaim_failed",
#endif
"pginodesteal",
@@ -1384,6 +1385,7 @@ const char * const vmstat_text[] = {
"thp_split_page",
"thp_split_page_failed",
"thp_deferred_split_page",
+ "thp_underused_split_page",
"thp_split_pmd",
"thp_scan_exceed_none_pte",
"thp_scan_exceed_swap_pte",
@@ -1435,6 +1437,30 @@ const char * const vmstat_text[] = {
"vma_lock_retry",
"vma_lock_miss",
#endif
+#ifdef CONFIG_DEBUG_STACK_USAGE
+ "kstack_1k",
+#if THREAD_SIZE > 1024
+ "kstack_2k",
+#endif
+#if THREAD_SIZE > 2048
+ "kstack_4k",
+#endif
+#if THREAD_SIZE > 4096
+ "kstack_8k",
+#endif
+#if THREAD_SIZE > 8192
+ "kstack_16k",
+#endif
+#if THREAD_SIZE > 16384
+ "kstack_32k",
+#endif
+#if THREAD_SIZE > 32768
+ "kstack_64k",
+#endif
+#if THREAD_SIZE > 65536
+ "kstack_rest",
+#endif
+#endif
#endif /* CONFIG_VM_EVENT_COUNTERS || CONFIG_MEMCG */
};
#endif /* CONFIG_PROC_FS || CONFIG_SYSFS || CONFIG_NUMA || CONFIG_MEMCG */
@@ -1718,6 +1744,7 @@ static void zoneinfo_show_print(struct seq_file *m, pg_data_t *pgdat,
"\n min %lu"
"\n low %lu"
"\n high %lu"
+ "\n promo %lu"
"\n spanned %lu"
"\n present %lu"
"\n managed %lu"
@@ -1727,6 +1754,7 @@ static void zoneinfo_show_print(struct seq_file *m, pg_data_t *pgdat,
min_wmark_pages(zone),
low_wmark_pages(zone),
high_wmark_pages(zone),
+ promo_wmark_pages(zone),
zone->spanned_pages,
zone->present_pages,
zone_managed_pages(zone),
diff --git a/mm/z3fold.c b/mm/z3fold.c
index 2ebfed32871b..379d24b4fef9 100644
--- a/mm/z3fold.c
+++ b/mm/z3fold.c
@@ -144,7 +144,7 @@ struct z3fold_pool {
const char *name;
spinlock_t lock;
spinlock_t stale_lock;
- struct list_head *unbuddied;
+ struct list_head __percpu *unbuddied;
struct list_head stale;
atomic64_t pages_nr;
struct kmem_cache *c_handle;
diff --git a/mm/zsmalloc.c b/mm/zsmalloc.c
index 2d3163e4da96..16a07def09c9 100644
--- a/mm/zsmalloc.c
+++ b/mm/zsmalloc.c
@@ -20,7 +20,7 @@
* page->index: links together all component pages of a zspage
* For the huge page, this is always 0, so we use this field
* to store handle.
- * page->page_type: PG_zsmalloc, lower 16 bit locate the first object
+ * page->page_type: PGTY_zsmalloc, lower 24 bits locate the first object
* offset in a subpage of a zspage
*
* Usage of struct page flags:
@@ -54,6 +54,7 @@
#include <linux/vmalloc.h>
#include <linux/preempt.h>
#include <linux/spinlock.h>
+#include <linux/sprintf.h>
#include <linux/shrinker.h>
#include <linux/types.h>
#include <linux/debugfs.h>
@@ -293,17 +294,27 @@ static void SetZsPageMovable(struct zs_pool *pool, struct zspage *zspage) {}
static int create_cache(struct zs_pool *pool)
{
- pool->handle_cachep = kmem_cache_create("zs_handle", ZS_HANDLE_SIZE,
- 0, 0, NULL);
+ char *name;
+
+ name = kasprintf(GFP_KERNEL, "zs_handle-%s", pool->name);
+ if (!name)
+ return -ENOMEM;
+ pool->handle_cachep = kmem_cache_create(name, ZS_HANDLE_SIZE,
+ 0, 0, NULL);
+ kfree(name);
if (!pool->handle_cachep)
- return 1;
+ return -EINVAL;
- pool->zspage_cachep = kmem_cache_create("zspage", sizeof(struct zspage),
- 0, 0, NULL);
+ name = kasprintf(GFP_KERNEL, "zspage-%s", pool->name);
+ if (!name)
+ return -ENOMEM;
+ pool->zspage_cachep = kmem_cache_create(name, sizeof(struct zspage),
+ 0, 0, NULL);
+ kfree(name);
if (!pool->zspage_cachep) {
kmem_cache_destroy(pool->handle_cachep);
pool->handle_cachep = NULL;
- return 1;
+ return -EINVAL;
}
return 0;
@@ -452,13 +463,7 @@ static inline struct page *get_first_page(struct zspage *zspage)
return first_page;
}
-#define FIRST_OBJ_PAGE_TYPE_MASK 0xffff
-
-static inline void reset_first_obj_offset(struct page *page)
-{
- VM_WARN_ON_ONCE(!PageZsmalloc(page));
- page->page_type |= FIRST_OBJ_PAGE_TYPE_MASK;
-}
+#define FIRST_OBJ_PAGE_TYPE_MASK 0xffffff
static inline unsigned int get_first_obj_offset(struct page *page)
{
@@ -468,8 +473,8 @@ static inline unsigned int get_first_obj_offset(struct page *page)
static inline void set_first_obj_offset(struct page *page, unsigned int offset)
{
- /* With 16 bit available, we can support offsets into 64 KiB pages. */
- BUILD_BUG_ON(PAGE_SIZE > SZ_64K);
+ /* With 24 bits available, we can support offsets into 16 MiB pages. */
+ BUILD_BUG_ON(PAGE_SIZE > SZ_16M);
VM_WARN_ON_ONCE(!PageZsmalloc(page));
VM_WARN_ON_ONCE(offset & ~FIRST_OBJ_PAGE_TYPE_MASK);
page->page_type &= ~FIRST_OBJ_PAGE_TYPE_MASK;
@@ -808,7 +813,6 @@ static void reset_page(struct page *page)
ClearPagePrivate(page);
set_page_private(page, 0);
page->index = 0;
- reset_first_obj_offset(page);
__ClearPageZsmalloc(page);
}
diff --git a/mm/zswap.c b/mm/zswap.c
index adeaf9c97fde..449914ea9919 100644
--- a/mm/zswap.c
+++ b/mm/zswap.c
@@ -44,8 +44,6 @@
**********************************/
/* The number of compressed pages currently stored in zswap */
atomic_t zswap_stored_pages = ATOMIC_INIT(0);
-/* The number of same-value filled pages currently stored in zswap */
-static atomic_t zswap_same_filled_pages = ATOMIC_INIT(0);
/*
* The statistics below are not protected from concurrent access for
@@ -185,8 +183,11 @@ static struct shrinker *zswap_shrinker;
*
* swpentry - associated swap entry, the offset indexes into the red-black tree
* length - the length in bytes of the compressed page data. Needed during
- * decompression. For a same value filled page length is 0, and both
- * pool and lru are invalid and must be ignored.
+ * decompression.
+ * referenced - true if the entry recently entered the zswap pool. Unset by the
+ * writeback logic. The entry is only reclaimed by the writeback
+ * logic if referenced is unset. See comments in the shrinker
+ * section for context.
* pool - the zswap_pool the entry's data is in
* handle - zpool allocation handle that stores the compressed page data
* value - value of the same-value filled pages which have same content
@@ -196,11 +197,9 @@ static struct shrinker *zswap_shrinker;
struct zswap_entry {
swp_entry_t swpentry;
unsigned int length;
+ bool referenced;
struct zswap_pool *pool;
- union {
- unsigned long handle;
- unsigned long value;
- };
+ unsigned long handle;
struct obj_cgroup *objcg;
struct list_head lru;
};
@@ -700,11 +699,8 @@ static inline int entry_to_nid(struct zswap_entry *entry)
static void zswap_lru_add(struct list_lru *list_lru, struct zswap_entry *entry)
{
- atomic_long_t *nr_zswap_protected;
- unsigned long lru_size, old, new;
int nid = entry_to_nid(entry);
struct mem_cgroup *memcg;
- struct lruvec *lruvec;
/*
* Note that it is safe to use rcu_read_lock() here, even in the face of
@@ -722,19 +718,6 @@ static void zswap_lru_add(struct list_lru *list_lru, struct zswap_entry *entry)
memcg = mem_cgroup_from_entry(entry);
/* will always succeed */
list_lru_add(list_lru, &entry->lru, nid, memcg);
-
- /* Update the protection area */
- lru_size = list_lru_count_one(list_lru, nid, memcg);
- lruvec = mem_cgroup_lruvec(memcg, NODE_DATA(nid));
- nr_zswap_protected = &lruvec->zswap_lruvec_state.nr_zswap_protected;
- old = atomic_long_inc_return(nr_zswap_protected);
- /*
- * Decay to avoid overflow and adapt to changing workloads.
- * This is based on LRU reclaim cost decaying heuristics.
- */
- do {
- new = old > lru_size / 4 ? old / 2 : old;
- } while (!atomic_long_try_cmpxchg(nr_zswap_protected, &old, new));
rcu_read_unlock();
}
@@ -752,7 +735,7 @@ static void zswap_lru_del(struct list_lru *list_lru, struct zswap_entry *entry)
void zswap_lruvec_state_init(struct lruvec *lruvec)
{
- atomic_long_set(&lruvec->zswap_lruvec_state.nr_zswap_protected, 0);
+ atomic_long_set(&lruvec->zswap_lruvec_state.nr_disk_swapins, 0);
}
void zswap_folio_swapin(struct folio *folio)
@@ -761,16 +744,29 @@ void zswap_folio_swapin(struct folio *folio)
if (folio) {
lruvec = folio_lruvec(folio);
- atomic_long_inc(&lruvec->zswap_lruvec_state.nr_zswap_protected);
+ atomic_long_inc(&lruvec->zswap_lruvec_state.nr_disk_swapins);
}
}
+/*
+ * This function should be called when a memcg is being offlined.
+ *
+ * Since the global shrinker shrink_worker() may hold a reference
+ * of the memcg, we must check and release the reference in
+ * zswap_next_shrink.
+ *
+ * shrink_worker() must handle the case where this function releases
+ * the reference of memcg being shrunk.
+ */
void zswap_memcg_offline_cleanup(struct mem_cgroup *memcg)
{
/* lock out zswap shrinker walking memcg tree */
spin_lock(&zswap_shrink_lock);
- if (zswap_next_shrink == memcg)
- zswap_next_shrink = mem_cgroup_iter(NULL, zswap_next_shrink, NULL);
+ if (zswap_next_shrink == memcg) {
+ do {
+ zswap_next_shrink = mem_cgroup_iter(NULL, zswap_next_shrink, NULL);
+ } while (zswap_next_shrink && !mem_cgroup_online(zswap_next_shrink));
+ }
spin_unlock(&zswap_shrink_lock);
}
@@ -799,13 +795,9 @@ static void zswap_entry_cache_free(struct zswap_entry *entry)
*/
static void zswap_entry_free(struct zswap_entry *entry)
{
- if (!entry->length)
- atomic_dec(&zswap_same_filled_pages);
- else {
- zswap_lru_del(&zswap_list_lru, entry);
- zpool_free(entry->pool->zpool, entry->handle);
- zswap_pool_put(entry->pool);
- }
+ zswap_lru_del(&zswap_list_lru, entry);
+ zpool_free(entry->pool->zpool, entry->handle);
+ zswap_pool_put(entry->pool);
if (entry->objcg) {
obj_cgroup_uncharge_zswap(entry->objcg, entry->length);
obj_cgroup_put(entry->objcg);
@@ -1082,6 +1074,28 @@ static int zswap_writeback_entry(struct zswap_entry *entry,
/*********************************
* shrinker functions
**********************************/
+/*
+ * The dynamic shrinker is modulated by the following factors:
+ *
+ * 1. Each zswap entry has a referenced bit, which the shrinker unsets (giving
+ * the entry a second chance) before rotating it in the LRU list. If the
+ * entry is considered again by the shrinker, with its referenced bit unset,
+ * it is written back. The writeback rate as a result is dynamically
+ * adjusted by the pool activities - if the pool is dominated by new entries
+ * (i.e lots of recent zswapouts), these entries will be protected and
+ * the writeback rate will slow down. On the other hand, if the pool has a
+ * lot of stagnant entries, these entries will be reclaimed immediately,
+ * effectively increasing the writeback rate.
+ *
+ * 2. Swapins counter: If we observe swapins, it is a sign that we are
+ * overshrinking and should slow down. We maintain a swapins counter, which
+ * is consumed and subtract from the number of eligible objects on the LRU
+ * in zswap_shrinker_count().
+ *
+ * 3. Compression ratio. The better the workload compresses, the less gains we
+ * can expect from writeback. We scale down the number of objects available
+ * for reclaim by this ratio.
+ */
static enum lru_status shrink_memcg_cb(struct list_head *item, struct list_lru_one *l,
spinlock_t *lock, void *arg)
{
@@ -1092,6 +1106,16 @@ static enum lru_status shrink_memcg_cb(struct list_head *item, struct list_lru_o
int writeback_result;
/*
+ * Second chance algorithm: if the entry has its referenced bit set, give it
+ * a second chance. Only clear the referenced bit and rotate it in the
+ * zswap's LRU list.
+ */
+ if (entry->referenced) {
+ entry->referenced = false;
+ return LRU_ROTATE;
+ }
+
+ /*
* As soon as we drop the LRU lock, the entry can be freed by
* a concurrent invalidation. This means the following:
*
@@ -1157,8 +1181,7 @@ static enum lru_status shrink_memcg_cb(struct list_head *item, struct list_lru_o
static unsigned long zswap_shrinker_scan(struct shrinker *shrinker,
struct shrink_control *sc)
{
- struct lruvec *lruvec = mem_cgroup_lruvec(sc->memcg, NODE_DATA(sc->nid));
- unsigned long shrink_ret, nr_protected, lru_size;
+ unsigned long shrink_ret;
bool encountered_page_in_swapcache = false;
if (!zswap_shrinker_enabled ||
@@ -1167,25 +1190,6 @@ static unsigned long zswap_shrinker_scan(struct shrinker *shrinker,
return SHRINK_STOP;
}
- nr_protected =
- atomic_long_read(&lruvec->zswap_lruvec_state.nr_zswap_protected);
- lru_size = list_lru_shrink_count(&zswap_list_lru, sc);
-
- /*
- * Abort if we are shrinking into the protected region.
- *
- * This short-circuiting is necessary because if we have too many multiple
- * concurrent reclaimers getting the freeable zswap object counts at the
- * same time (before any of them made reasonable progress), the total
- * number of reclaimed objects might be more than the number of unprotected
- * objects (i.e the reclaimers will reclaim into the protected area of the
- * zswap LRU).
- */
- if (nr_protected >= lru_size - sc->nr_to_scan) {
- sc->nr_scanned = 0;
- return SHRINK_STOP;
- }
-
shrink_ret = list_lru_shrink_walk(&zswap_list_lru, sc, &shrink_memcg_cb,
&encountered_page_in_swapcache);
@@ -1200,7 +1204,10 @@ static unsigned long zswap_shrinker_count(struct shrinker *shrinker,
{
struct mem_cgroup *memcg = sc->memcg;
struct lruvec *lruvec = mem_cgroup_lruvec(memcg, NODE_DATA(sc->nid));
- unsigned long nr_backing, nr_stored, nr_freeable, nr_protected;
+ atomic_long_t *nr_disk_swapins =
+ &lruvec->zswap_lruvec_state.nr_disk_swapins;
+ unsigned long nr_backing, nr_stored, nr_freeable, nr_disk_swapins_cur,
+ nr_remain;
if (!zswap_shrinker_enabled || !mem_cgroup_zswap_writeback_enabled(memcg))
return 0;
@@ -1233,25 +1240,33 @@ static unsigned long zswap_shrinker_count(struct shrinker *shrinker,
if (!nr_stored)
return 0;
- nr_protected =
- atomic_long_read(&lruvec->zswap_lruvec_state.nr_zswap_protected);
nr_freeable = list_lru_shrink_count(&zswap_list_lru, sc);
+ if (!nr_freeable)
+ return 0;
+
/*
- * Subtract the lru size by an estimate of the number of pages
- * that should be protected.
+ * Subtract from the lru size the number of pages that are recently swapped
+ * in from disk. The idea is that had we protect the zswap's LRU by this
+ * amount of pages, these disk swapins would not have happened.
*/
- nr_freeable = nr_freeable > nr_protected ? nr_freeable - nr_protected : 0;
+ nr_disk_swapins_cur = atomic_long_read(nr_disk_swapins);
+ do {
+ if (nr_freeable >= nr_disk_swapins_cur)
+ nr_remain = 0;
+ else
+ nr_remain = nr_disk_swapins_cur - nr_freeable;
+ } while (!atomic_long_try_cmpxchg(
+ nr_disk_swapins, &nr_disk_swapins_cur, nr_remain));
+
+ nr_freeable -= nr_disk_swapins_cur - nr_remain;
+ if (!nr_freeable)
+ return 0;
/*
* Scale the number of freeable pages by the memory saving factor.
* This ensures that the better zswap compresses memory, the fewer
* pages we will evict to swap (as it will otherwise incur IO for
* relatively small memory saving).
- *
- * The memory saving factor calculated here takes same-filled pages into
- * account, but those are not freeable since they almost occupy no
- * space. Hence, we may scale nr_freeable down a little bit more than we
- * should if we have a lot of same-filled pages.
*/
return mult_frac(nr_freeable, nr_backing, nr_stored);
}
@@ -1274,10 +1289,10 @@ static struct shrinker *zswap_alloc_shrinker(void)
static int shrink_memcg(struct mem_cgroup *memcg)
{
- int nid, shrunk = 0;
+ int nid, shrunk = 0, scanned = 0;
if (!mem_cgroup_zswap_writeback_enabled(memcg))
- return -EINVAL;
+ return -ENOENT;
/*
* Skip zombies because their LRUs are reparented and we would be
@@ -1291,63 +1306,94 @@ static int shrink_memcg(struct mem_cgroup *memcg)
shrunk += list_lru_walk_one(&zswap_list_lru, nid, memcg,
&shrink_memcg_cb, NULL, &nr_to_walk);
+ scanned += 1 - nr_to_walk;
}
+
+ if (!scanned)
+ return -ENOENT;
+
return shrunk ? 0 : -EAGAIN;
}
static void shrink_worker(struct work_struct *w)
{
struct mem_cgroup *memcg;
- int ret, failures = 0;
+ int ret, failures = 0, attempts = 0;
unsigned long thr;
/* Reclaim down to the accept threshold */
thr = zswap_accept_thr_pages();
- /* global reclaim will select cgroup in a round-robin fashion. */
+ /*
+ * Global reclaim will select cgroup in a round-robin fashion from all
+ * online memcgs, but memcgs that have no pages in zswap and
+ * writeback-disabled memcgs (memory.zswap.writeback=0) are not
+ * candidates for shrinking.
+ *
+ * Shrinking will be aborted if we encounter the following
+ * MAX_RECLAIM_RETRIES times:
+ * - No writeback-candidate memcgs found in a memcg tree walk.
+ * - Shrinking a writeback-candidate memcg failed.
+ *
+ * We save iteration cursor memcg into zswap_next_shrink,
+ * which can be modified by the offline memcg cleaner
+ * zswap_memcg_offline_cleanup().
+ *
+ * Since the offline cleaner is called only once, we cannot leave an
+ * offline memcg reference in zswap_next_shrink.
+ * We can rely on the cleaner only if we get online memcg under lock.
+ *
+ * If we get an offline memcg, we cannot determine if the cleaner has
+ * already been called or will be called later. We must put back the
+ * reference before returning from this function. Otherwise, the
+ * offline memcg left in zswap_next_shrink will hold the reference
+ * until the next run of shrink_worker().
+ */
do {
- spin_lock(&zswap_shrink_lock);
- zswap_next_shrink = mem_cgroup_iter(NULL, zswap_next_shrink, NULL);
- memcg = zswap_next_shrink;
-
/*
- * We need to retry if we have gone through a full round trip, or if we
- * got an offline memcg (or else we risk undoing the effect of the
- * zswap memcg offlining cleanup callback). This is not catastrophic
- * per se, but it will keep the now offlined memcg hostage for a while.
+ * Start shrinking from the next memcg after zswap_next_shrink.
+ * When the offline cleaner has already advanced the cursor,
+ * advancing the cursor here overlooks one memcg, but this
+ * should be negligibly rare.
*
- * Note that if we got an online memcg, we will keep the extra
- * reference in case the original reference obtained by mem_cgroup_iter
- * is dropped by the zswap memcg offlining callback, ensuring that the
- * memcg is not killed when we are reclaiming.
+ * If we get an online memcg, keep the extra reference in case
+ * the original one obtained by mem_cgroup_iter() is dropped by
+ * zswap_memcg_offline_cleanup() while we are shrinking the
+ * memcg.
*/
- if (!memcg) {
- spin_unlock(&zswap_shrink_lock);
- if (++failures == MAX_RECLAIM_RETRIES)
- break;
-
- goto resched;
- }
-
- if (!mem_cgroup_tryget_online(memcg)) {
- /* drop the reference from mem_cgroup_iter() */
- mem_cgroup_iter_break(NULL, memcg);
- zswap_next_shrink = NULL;
- spin_unlock(&zswap_shrink_lock);
+ spin_lock(&zswap_shrink_lock);
+ do {
+ memcg = mem_cgroup_iter(NULL, zswap_next_shrink, NULL);
+ zswap_next_shrink = memcg;
+ } while (memcg && !mem_cgroup_tryget_online(memcg));
+ spin_unlock(&zswap_shrink_lock);
- if (++failures == MAX_RECLAIM_RETRIES)
+ if (!memcg) {
+ /*
+ * Continue shrinking without incrementing failures if
+ * we found candidate memcgs in the last tree walk.
+ */
+ if (!attempts && ++failures == MAX_RECLAIM_RETRIES)
break;
+ attempts = 0;
goto resched;
}
- spin_unlock(&zswap_shrink_lock);
ret = shrink_memcg(memcg);
/* drop the extra reference */
mem_cgroup_put(memcg);
- if (ret == -EINVAL)
- break;
+ /*
+ * There are no writeback-candidate pages in the memcg.
+ * This is not an issue as long as we can find another memcg
+ * with pages in zswap. Skip this without incrementing attempts
+ * and failures.
+ */
+ if (ret == -ENOENT)
+ continue;
+ ++attempts;
+
if (ret && ++failures == MAX_RECLAIM_RETRIES)
break;
resched:
@@ -1356,42 +1402,6 @@ resched:
}
/*********************************
-* same-filled functions
-**********************************/
-static bool zswap_is_folio_same_filled(struct folio *folio, unsigned long *value)
-{
- unsigned long *data;
- unsigned long val;
- unsigned int pos, last_pos = PAGE_SIZE / sizeof(*data) - 1;
- bool ret = false;
-
- data = kmap_local_folio(folio, 0);
- val = data[0];
-
- if (val != data[last_pos])
- goto out;
-
- for (pos = 1; pos < last_pos; pos++) {
- if (val != data[pos])
- goto out;
- }
-
- *value = val;
- ret = true;
-out:
- kunmap_local(data);
- return ret;
-}
-
-static void zswap_fill_folio(struct folio *folio, unsigned long value)
-{
- unsigned long *data = kmap_local_folio(folio, 0);
-
- memset_l(data, value, PAGE_SIZE / sizeof(unsigned long));
- kunmap_local(data);
-}
-
-/*********************************
* main API
**********************************/
bool zswap_store(struct folio *folio)
@@ -1402,7 +1412,6 @@ bool zswap_store(struct folio *folio)
struct zswap_entry *entry, *old;
struct obj_cgroup *objcg = NULL;
struct mem_cgroup *memcg = NULL;
- unsigned long value;
VM_WARN_ON_ONCE(!folio_test_locked(folio));
VM_WARN_ON_ONCE(!folio_test_swapcache(folio));
@@ -1435,13 +1444,6 @@ bool zswap_store(struct folio *folio)
goto reject;
}
- if (zswap_is_folio_same_filled(folio, &value)) {
- entry->length = 0;
- entry->value = value;
- atomic_inc(&zswap_same_filled_pages);
- goto store_entry;
- }
-
/* if entry is successfully added, it keeps the reference */
entry->pool = zswap_pool_current_get();
if (!entry->pool)
@@ -1459,9 +1461,9 @@ bool zswap_store(struct folio *folio)
if (!zswap_compress(folio, entry))
goto put_pool;
-store_entry:
entry->swpentry = swp;
entry->objcg = objcg;
+ entry->referenced = true;
old = xa_store(tree, offset, entry, GFP_KERNEL);
if (xa_is_err(old)) {
@@ -1507,13 +1509,9 @@ store_entry:
return true;
store_failed:
- if (!entry->length)
- atomic_dec(&zswap_same_filled_pages);
- else {
- zpool_free(entry->pool->zpool, entry->handle);
+ zpool_free(entry->pool->zpool, entry->handle);
put_pool:
- zswap_pool_put(entry->pool);
- }
+ zswap_pool_put(entry->pool);
freepage:
zswap_entry_cache_free(entry);
reject:
@@ -1576,10 +1574,7 @@ bool zswap_load(struct folio *folio)
if (!entry)
return false;
- if (entry->length)
- zswap_decompress(entry, folio);
- else
- zswap_fill_folio(folio, entry->value);
+ zswap_decompress(entry, folio);
count_vm_event(ZSWPIN);
if (entry->objcg)
@@ -1682,8 +1677,6 @@ static int zswap_debugfs_init(void)
zswap_debugfs_root, NULL, &total_size_fops);
debugfs_create_atomic_t("stored_pages", 0444,
zswap_debugfs_root, &zswap_stored_pages);
- debugfs_create_atomic_t("same_filled_pages", 0444,
- zswap_debugfs_root, &zswap_same_filled_pages);
return 0;
}
diff --git a/net/9p/Kconfig b/net/9p/Kconfig
index bcdab9c23b40..63f988f0c9e8 100644
--- a/net/9p/Kconfig
+++ b/net/9p/Kconfig
@@ -40,6 +40,12 @@ config NET_9P_XEN
This builds support for a transport for 9pfs between
two Xen domains.
+config NET_9P_USBG
+ bool "9P USB Gadget Transport"
+ depends on USB_GADGET=y || USB_GADGET=NET_9P
+ help
+ This builds support for a transport for 9pfs over
+ usb gadget.
config NET_9P_RDMA
depends on INET && INFINIBAND && INFINIBAND_ADDR_TRANS
diff --git a/net/9p/Makefile b/net/9p/Makefile
index 1df9b344c30b..22794a451c3f 100644
--- a/net/9p/Makefile
+++ b/net/9p/Makefile
@@ -4,6 +4,7 @@ obj-$(CONFIG_NET_9P_FD) += 9pnet_fd.o
obj-$(CONFIG_NET_9P_XEN) += 9pnet_xen.o
obj-$(CONFIG_NET_9P_VIRTIO) += 9pnet_virtio.o
obj-$(CONFIG_NET_9P_RDMA) += 9pnet_rdma.o
+obj-$(CONFIG_NET_9P_USBG) += 9pnet_usbg.o
9pnet-objs := \
mod.o \
@@ -23,3 +24,6 @@ obj-$(CONFIG_NET_9P_RDMA) += 9pnet_rdma.o
9pnet_rdma-objs := \
trans_rdma.o \
+
+9pnet_usbg-objs := \
+ trans_usbg.o \
diff --git a/net/9p/trans_usbg.c b/net/9p/trans_usbg.c
new file mode 100644
index 000000000000..975b76839dca
--- /dev/null
+++ b/net/9p/trans_usbg.c
@@ -0,0 +1,956 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * trans_usbg.c - USB peripheral usb9pfs configuration driver and transport.
+ *
+ * Copyright (C) 2024 Michael Grzeschik <m.grzeschik@pengutronix.de>
+ */
+
+/* Gadget usb9pfs only needs two bulk endpoints, and will use the usb9pfs
+ * transport to mount host exported filesystem via usb gadget.
+ */
+
+/* +--------------------------+ | +--------------------------+
+ * | 9PFS mounting client | | | 9PFS exporting server |
+ * SW | | | | |
+ * | (this:trans_usbg) | | |(e.g. diod or nfs-ganesha)|
+ * +-------------^------------+ | +-------------^------------+
+ * | | |
+ * ------------------|------------------------------------|-------------
+ * | | |
+ * +-------------v------------+ | +-------------v------------+
+ * | | | | |
+ * HW | USB Device Controller <---------> USB Host Controller |
+ * | | | | |
+ * +--------------------------+ | +--------------------------+
+ */
+
+#include <linux/cleanup.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/usb/composite.h>
+#include <linux/usb/func_utils.h>
+
+#include <net/9p/9p.h>
+#include <net/9p/client.h>
+#include <net/9p/transport.h>
+
+#define DEFAULT_BUFLEN 16384
+
+struct f_usb9pfs {
+ struct p9_client *client;
+
+ /* 9p request lock for en/dequeue */
+ spinlock_t lock;
+
+ struct usb_request *in_req;
+ struct usb_request *out_req;
+
+ struct usb_ep *in_ep;
+ struct usb_ep *out_ep;
+
+ struct completion send;
+ struct completion received;
+
+ unsigned int buflen;
+
+ struct usb_function function;
+};
+
+static inline struct f_usb9pfs *func_to_usb9pfs(struct usb_function *f)
+{
+ return container_of(f, struct f_usb9pfs, function);
+}
+
+struct f_usb9pfs_opts {
+ struct usb_function_instance func_inst;
+ unsigned int buflen;
+
+ struct f_usb9pfs_dev *dev;
+
+ /* Read/write access to configfs attributes is handled by configfs.
+ *
+ * This is to protect the data from concurrent access by read/write
+ * and create symlink/remove symlink.
+ */
+ struct mutex lock;
+ int refcnt;
+};
+
+struct f_usb9pfs_dev {
+ struct f_usb9pfs *usb9pfs;
+ struct f_usb9pfs_opts *opts;
+ char tag[41];
+ bool inuse;
+
+ struct list_head usb9pfs_instance;
+};
+
+static DEFINE_MUTEX(usb9pfs_lock);
+static struct list_head usbg_instance_list;
+
+static int usb9pfs_queue_tx(struct f_usb9pfs *usb9pfs, struct p9_req_t *p9_tx_req,
+ gfp_t gfp_flags)
+{
+ struct usb_composite_dev *cdev = usb9pfs->function.config->cdev;
+ struct usb_request *req = usb9pfs->in_req;
+ int ret;
+
+ if (!(p9_tx_req->tc.size % usb9pfs->in_ep->maxpacket))
+ req->zero = 1;
+
+ req->buf = p9_tx_req->tc.sdata;
+ req->length = p9_tx_req->tc.size;
+ req->context = p9_tx_req;
+
+ dev_dbg(&cdev->gadget->dev, "%s usb9pfs send --> %d/%d, zero: %d\n",
+ usb9pfs->in_ep->name, req->actual, req->length, req->zero);
+
+ ret = usb_ep_queue(usb9pfs->in_ep, req, gfp_flags);
+ if (ret)
+ req->context = NULL;
+
+ dev_dbg(&cdev->gadget->dev, "tx submit --> %d\n", ret);
+
+ return ret;
+}
+
+static int usb9pfs_queue_rx(struct f_usb9pfs *usb9pfs, struct usb_request *req,
+ gfp_t gfp_flags)
+{
+ struct usb_composite_dev *cdev = usb9pfs->function.config->cdev;
+ int ret;
+
+ ret = usb_ep_queue(usb9pfs->out_ep, req, gfp_flags);
+
+ dev_dbg(&cdev->gadget->dev, "rx submit --> %d\n", ret);
+
+ return ret;
+}
+
+static int usb9pfs_transmit(struct f_usb9pfs *usb9pfs, struct p9_req_t *p9_req)
+{
+ int ret = 0;
+
+ guard(spinlock_irqsave)(&usb9pfs->lock);
+
+ ret = usb9pfs_queue_tx(usb9pfs, p9_req, GFP_ATOMIC);
+ if (ret)
+ return ret;
+
+ list_del(&p9_req->req_list);
+
+ p9_req_get(p9_req);
+
+ return ret;
+}
+
+static void usb9pfs_tx_complete(struct usb_ep *ep, struct usb_request *req)
+{
+ struct f_usb9pfs *usb9pfs = ep->driver_data;
+ struct usb_composite_dev *cdev = usb9pfs->function.config->cdev;
+ struct p9_req_t *p9_tx_req = req->context;
+ unsigned long flags;
+
+ /* reset zero packages */
+ req->zero = 0;
+
+ if (req->status) {
+ dev_err(&cdev->gadget->dev, "%s usb9pfs complete --> %d, %d/%d\n",
+ ep->name, req->status, req->actual, req->length);
+ return;
+ }
+
+ dev_dbg(&cdev->gadget->dev, "%s usb9pfs complete --> %d, %d/%d\n",
+ ep->name, req->status, req->actual, req->length);
+
+ spin_lock_irqsave(&usb9pfs->lock, flags);
+ WRITE_ONCE(p9_tx_req->status, REQ_STATUS_SENT);
+
+ p9_req_put(usb9pfs->client, p9_tx_req);
+
+ req->context = NULL;
+
+ spin_unlock_irqrestore(&usb9pfs->lock, flags);
+
+ complete(&usb9pfs->send);
+}
+
+static struct p9_req_t *usb9pfs_rx_header(struct f_usb9pfs *usb9pfs, void *buf)
+{
+ struct p9_req_t *p9_rx_req;
+ struct p9_fcall rc;
+ int ret;
+
+ /* start by reading header */
+ rc.sdata = buf;
+ rc.offset = 0;
+ rc.capacity = P9_HDRSZ;
+ rc.size = P9_HDRSZ;
+
+ p9_debug(P9_DEBUG_TRANS, "mux %p got %zu bytes\n", usb9pfs,
+ rc.capacity - rc.offset);
+
+ ret = p9_parse_header(&rc, &rc.size, NULL, NULL, 0);
+ if (ret) {
+ p9_debug(P9_DEBUG_ERROR,
+ "error parsing header: %d\n", ret);
+ return NULL;
+ }
+
+ p9_debug(P9_DEBUG_TRANS,
+ "mux %p pkt: size: %d bytes tag: %d\n",
+ usb9pfs, rc.size, rc.tag);
+
+ p9_rx_req = p9_tag_lookup(usb9pfs->client, rc.tag);
+ if (!p9_rx_req || p9_rx_req->status != REQ_STATUS_SENT) {
+ p9_debug(P9_DEBUG_ERROR, "Unexpected packet tag %d\n", rc.tag);
+ return NULL;
+ }
+
+ if (rc.size > p9_rx_req->rc.capacity) {
+ p9_debug(P9_DEBUG_ERROR,
+ "requested packet size too big: %d for tag %d with capacity %zd\n",
+ rc.size, rc.tag, p9_rx_req->rc.capacity);
+ p9_req_put(usb9pfs->client, p9_rx_req);
+ return NULL;
+ }
+
+ if (!p9_rx_req->rc.sdata) {
+ p9_debug(P9_DEBUG_ERROR,
+ "No recv fcall for tag %d (req %p), disconnecting!\n",
+ rc.tag, p9_rx_req);
+ p9_req_put(usb9pfs->client, p9_rx_req);
+ return NULL;
+ }
+
+ return p9_rx_req;
+}
+
+static void usb9pfs_rx_complete(struct usb_ep *ep, struct usb_request *req)
+{
+ struct f_usb9pfs *usb9pfs = ep->driver_data;
+ struct usb_composite_dev *cdev = usb9pfs->function.config->cdev;
+ struct p9_req_t *p9_rx_req;
+
+ if (req->status) {
+ dev_err(&cdev->gadget->dev, "%s usb9pfs complete --> %d, %d/%d\n",
+ ep->name, req->status, req->actual, req->length);
+ return;
+ }
+
+ p9_rx_req = usb9pfs_rx_header(usb9pfs, req->buf);
+ if (!p9_rx_req)
+ return;
+
+ memcpy(p9_rx_req->rc.sdata, req->buf, req->actual);
+
+ p9_rx_req->rc.size = req->actual;
+
+ p9_client_cb(usb9pfs->client, p9_rx_req, REQ_STATUS_RCVD);
+ p9_req_put(usb9pfs->client, p9_rx_req);
+
+ complete(&usb9pfs->received);
+}
+
+static void disable_ep(struct usb_composite_dev *cdev, struct usb_ep *ep)
+{
+ int value;
+
+ value = usb_ep_disable(ep);
+ if (value < 0)
+ dev_info(&cdev->gadget->dev,
+ "disable %s --> %d\n", ep->name, value);
+}
+
+static void disable_usb9pfs(struct f_usb9pfs *usb9pfs)
+{
+ struct usb_composite_dev *cdev =
+ usb9pfs->function.config->cdev;
+
+ if (usb9pfs->in_req) {
+ usb_ep_free_request(usb9pfs->in_ep, usb9pfs->in_req);
+ usb9pfs->in_req = NULL;
+ }
+
+ if (usb9pfs->out_req) {
+ usb_ep_free_request(usb9pfs->out_ep, usb9pfs->out_req);
+ usb9pfs->out_req = NULL;
+ }
+
+ disable_ep(cdev, usb9pfs->in_ep);
+ disable_ep(cdev, usb9pfs->out_ep);
+ dev_dbg(&cdev->gadget->dev, "%s disabled\n",
+ usb9pfs->function.name);
+}
+
+static int alloc_requests(struct usb_composite_dev *cdev,
+ struct f_usb9pfs *usb9pfs)
+{
+ int ret;
+
+ usb9pfs->in_req = usb_ep_alloc_request(usb9pfs->in_ep, GFP_ATOMIC);
+ if (!usb9pfs->in_req) {
+ ret = -ENOENT;
+ goto fail;
+ }
+
+ usb9pfs->out_req = alloc_ep_req(usb9pfs->out_ep, usb9pfs->buflen);
+ if (!usb9pfs->out_req) {
+ ret = -ENOENT;
+ goto fail_in;
+ }
+
+ usb9pfs->in_req->complete = usb9pfs_tx_complete;
+ usb9pfs->out_req->complete = usb9pfs_rx_complete;
+
+ /* length will be set in complete routine */
+ usb9pfs->in_req->context = usb9pfs;
+ usb9pfs->out_req->context = usb9pfs;
+
+ return 0;
+
+fail_in:
+ usb_ep_free_request(usb9pfs->in_ep, usb9pfs->in_req);
+fail:
+ return ret;
+}
+
+static int enable_endpoint(struct usb_composite_dev *cdev,
+ struct f_usb9pfs *usb9pfs, struct usb_ep *ep)
+{
+ int ret;
+
+ ret = config_ep_by_speed(cdev->gadget, &usb9pfs->function, ep);
+ if (ret)
+ return ret;
+
+ ret = usb_ep_enable(ep);
+ if (ret < 0)
+ return ret;
+
+ ep->driver_data = usb9pfs;
+
+ return 0;
+}
+
+static int
+enable_usb9pfs(struct usb_composite_dev *cdev, struct f_usb9pfs *usb9pfs)
+{
+ struct p9_client *client;
+ int ret = 0;
+
+ ret = enable_endpoint(cdev, usb9pfs, usb9pfs->in_ep);
+ if (ret)
+ goto out;
+
+ ret = enable_endpoint(cdev, usb9pfs, usb9pfs->out_ep);
+ if (ret)
+ goto disable_in;
+
+ ret = alloc_requests(cdev, usb9pfs);
+ if (ret)
+ goto disable_out;
+
+ client = usb9pfs->client;
+ if (client)
+ client->status = Connected;
+
+ dev_dbg(&cdev->gadget->dev, "%s enabled\n", usb9pfs->function.name);
+ return 0;
+
+disable_out:
+ usb_ep_disable(usb9pfs->out_ep);
+disable_in:
+ usb_ep_disable(usb9pfs->in_ep);
+out:
+ return ret;
+}
+
+static int p9_usbg_create(struct p9_client *client, const char *devname, char *args)
+{
+ struct f_usb9pfs_dev *dev;
+ struct f_usb9pfs *usb9pfs;
+ int ret = -ENOENT;
+ int found = 0;
+
+ if (!devname)
+ return -EINVAL;
+
+ guard(mutex)(&usb9pfs_lock);
+
+ list_for_each_entry(dev, &usbg_instance_list, usb9pfs_instance) {
+ if (!strncmp(devname, dev->tag, strlen(devname))) {
+ if (!dev->inuse) {
+ dev->inuse = true;
+ found = 1;
+ break;
+ }
+ ret = -EBUSY;
+ break;
+ }
+ }
+
+ if (!found) {
+ pr_err("no channels available for device %s\n", devname);
+ return ret;
+ }
+
+ usb9pfs = dev->usb9pfs;
+ if (!usb9pfs)
+ return -EINVAL;
+
+ client->trans = (void *)usb9pfs;
+ if (!usb9pfs->in_req)
+ client->status = Disconnected;
+ else
+ client->status = Connected;
+ usb9pfs->client = client;
+
+ client->trans_mod->maxsize = usb9pfs->buflen;
+
+ complete(&usb9pfs->received);
+
+ return 0;
+}
+
+static void usb9pfs_clear_tx(struct f_usb9pfs *usb9pfs)
+{
+ struct p9_req_t *req;
+
+ guard(spinlock_irqsave)(&usb9pfs->lock);
+
+ req = usb9pfs->in_req->context;
+ if (!req)
+ return;
+
+ if (!req->t_err)
+ req->t_err = -ECONNRESET;
+
+ p9_client_cb(usb9pfs->client, req, REQ_STATUS_ERROR);
+}
+
+static void p9_usbg_close(struct p9_client *client)
+{
+ struct f_usb9pfs *usb9pfs;
+ struct f_usb9pfs_dev *dev;
+ struct f_usb9pfs_opts *opts;
+
+ if (!client)
+ return;
+
+ usb9pfs = client->trans;
+ if (!usb9pfs)
+ return;
+
+ client->status = Disconnected;
+
+ usb9pfs_clear_tx(usb9pfs);
+
+ opts = container_of(usb9pfs->function.fi,
+ struct f_usb9pfs_opts, func_inst);
+
+ dev = opts->dev;
+
+ mutex_lock(&usb9pfs_lock);
+ dev->inuse = false;
+ mutex_unlock(&usb9pfs_lock);
+}
+
+static int p9_usbg_request(struct p9_client *client, struct p9_req_t *p9_req)
+{
+ struct f_usb9pfs *usb9pfs = client->trans;
+ int ret;
+
+ if (client->status != Connected)
+ return -EBUSY;
+
+ ret = wait_for_completion_killable(&usb9pfs->received);
+ if (ret)
+ return ret;
+
+ ret = usb9pfs_transmit(usb9pfs, p9_req);
+ if (ret)
+ return ret;
+
+ ret = wait_for_completion_killable(&usb9pfs->send);
+ if (ret)
+ return ret;
+
+ return usb9pfs_queue_rx(usb9pfs, usb9pfs->out_req, GFP_ATOMIC);
+}
+
+static int p9_usbg_cancel(struct p9_client *client, struct p9_req_t *req)
+{
+ struct f_usb9pfs *usb9pfs = client->trans;
+ int ret = 1;
+
+ p9_debug(P9_DEBUG_TRANS, "client %p req %p\n", client, req);
+
+ guard(spinlock_irqsave)(&usb9pfs->lock);
+
+ if (req->status == REQ_STATUS_UNSENT) {
+ list_del(&req->req_list);
+ WRITE_ONCE(req->status, REQ_STATUS_FLSHD);
+ p9_req_put(client, req);
+ ret = 0;
+ }
+
+ return ret;
+}
+
+static struct p9_trans_module p9_usbg_trans = {
+ .name = "usbg",
+ .create = p9_usbg_create,
+ .close = p9_usbg_close,
+ .request = p9_usbg_request,
+ .cancel = p9_usbg_cancel,
+ .owner = THIS_MODULE,
+};
+
+/*-------------------------------------------------------------------------*/
+
+#define USB_PROTOCOL_9PFS 0x09
+
+static struct usb_interface_descriptor usb9pfs_intf = {
+ .bLength = sizeof(usb9pfs_intf),
+ .bDescriptorType = USB_DT_INTERFACE,
+
+ .bNumEndpoints = 2,
+ .bInterfaceClass = USB_CLASS_VENDOR_SPEC,
+ .bInterfaceSubClass = USB_SUBCLASS_VENDOR_SPEC,
+ .bInterfaceProtocol = USB_PROTOCOL_9PFS,
+
+ /* .iInterface = DYNAMIC */
+};
+
+/* full speed support: */
+
+static struct usb_endpoint_descriptor fs_usb9pfs_source_desc = {
+ .bLength = USB_DT_ENDPOINT_SIZE,
+ .bDescriptorType = USB_DT_ENDPOINT,
+
+ .bEndpointAddress = USB_DIR_IN,
+ .bmAttributes = USB_ENDPOINT_XFER_BULK,
+};
+
+static struct usb_endpoint_descriptor fs_usb9pfs_sink_desc = {
+ .bLength = USB_DT_ENDPOINT_SIZE,
+ .bDescriptorType = USB_DT_ENDPOINT,
+
+ .bEndpointAddress = USB_DIR_OUT,
+ .bmAttributes = USB_ENDPOINT_XFER_BULK,
+};
+
+static struct usb_descriptor_header *fs_usb9pfs_descs[] = {
+ (struct usb_descriptor_header *)&usb9pfs_intf,
+ (struct usb_descriptor_header *)&fs_usb9pfs_sink_desc,
+ (struct usb_descriptor_header *)&fs_usb9pfs_source_desc,
+ NULL,
+};
+
+/* high speed support: */
+
+static struct usb_endpoint_descriptor hs_usb9pfs_source_desc = {
+ .bLength = USB_DT_ENDPOINT_SIZE,
+ .bDescriptorType = USB_DT_ENDPOINT,
+
+ .bmAttributes = USB_ENDPOINT_XFER_BULK,
+ .wMaxPacketSize = cpu_to_le16(512),
+};
+
+static struct usb_endpoint_descriptor hs_usb9pfs_sink_desc = {
+ .bLength = USB_DT_ENDPOINT_SIZE,
+ .bDescriptorType = USB_DT_ENDPOINT,
+
+ .bmAttributes = USB_ENDPOINT_XFER_BULK,
+ .wMaxPacketSize = cpu_to_le16(512),
+};
+
+static struct usb_descriptor_header *hs_usb9pfs_descs[] = {
+ (struct usb_descriptor_header *)&usb9pfs_intf,
+ (struct usb_descriptor_header *)&hs_usb9pfs_source_desc,
+ (struct usb_descriptor_header *)&hs_usb9pfs_sink_desc,
+ NULL,
+};
+
+/* super speed support: */
+
+static struct usb_endpoint_descriptor ss_usb9pfs_source_desc = {
+ .bLength = USB_DT_ENDPOINT_SIZE,
+ .bDescriptorType = USB_DT_ENDPOINT,
+
+ .bmAttributes = USB_ENDPOINT_XFER_BULK,
+ .wMaxPacketSize = cpu_to_le16(1024),
+};
+
+static struct usb_ss_ep_comp_descriptor ss_usb9pfs_source_comp_desc = {
+ .bLength = USB_DT_SS_EP_COMP_SIZE,
+ .bDescriptorType = USB_DT_SS_ENDPOINT_COMP,
+ .bMaxBurst = 0,
+ .bmAttributes = 0,
+ .wBytesPerInterval = 0,
+};
+
+static struct usb_endpoint_descriptor ss_usb9pfs_sink_desc = {
+ .bLength = USB_DT_ENDPOINT_SIZE,
+ .bDescriptorType = USB_DT_ENDPOINT,
+
+ .bmAttributes = USB_ENDPOINT_XFER_BULK,
+ .wMaxPacketSize = cpu_to_le16(1024),
+};
+
+static struct usb_ss_ep_comp_descriptor ss_usb9pfs_sink_comp_desc = {
+ .bLength = USB_DT_SS_EP_COMP_SIZE,
+ .bDescriptorType = USB_DT_SS_ENDPOINT_COMP,
+ .bMaxBurst = 0,
+ .bmAttributes = 0,
+ .wBytesPerInterval = 0,
+};
+
+static struct usb_descriptor_header *ss_usb9pfs_descs[] = {
+ (struct usb_descriptor_header *)&usb9pfs_intf,
+ (struct usb_descriptor_header *)&ss_usb9pfs_source_desc,
+ (struct usb_descriptor_header *)&ss_usb9pfs_source_comp_desc,
+ (struct usb_descriptor_header *)&ss_usb9pfs_sink_desc,
+ (struct usb_descriptor_header *)&ss_usb9pfs_sink_comp_desc,
+ NULL,
+};
+
+/* function-specific strings: */
+static struct usb_string strings_usb9pfs[] = {
+ [0].s = "usb9pfs input to output",
+ { } /* end of list */
+};
+
+static struct usb_gadget_strings stringtab_usb9pfs = {
+ .language = 0x0409, /* en-us */
+ .strings = strings_usb9pfs,
+};
+
+static struct usb_gadget_strings *usb9pfs_strings[] = {
+ &stringtab_usb9pfs,
+ NULL,
+};
+
+/*-------------------------------------------------------------------------*/
+
+static int usb9pfs_func_bind(struct usb_configuration *c,
+ struct usb_function *f)
+{
+ struct f_usb9pfs *usb9pfs = func_to_usb9pfs(f);
+ struct f_usb9pfs_opts *opts;
+ struct usb_composite_dev *cdev = c->cdev;
+ int ret;
+ int id;
+
+ /* allocate interface ID(s) */
+ id = usb_interface_id(c, f);
+ if (id < 0)
+ return id;
+ usb9pfs_intf.bInterfaceNumber = id;
+
+ id = usb_string_id(cdev);
+ if (id < 0)
+ return id;
+ strings_usb9pfs[0].id = id;
+ usb9pfs_intf.iInterface = id;
+
+ /* allocate endpoints */
+ usb9pfs->in_ep = usb_ep_autoconfig(cdev->gadget,
+ &fs_usb9pfs_source_desc);
+ if (!usb9pfs->in_ep)
+ goto autoconf_fail;
+
+ usb9pfs->out_ep = usb_ep_autoconfig(cdev->gadget,
+ &fs_usb9pfs_sink_desc);
+ if (!usb9pfs->out_ep)
+ goto autoconf_fail;
+
+ /* support high speed hardware */
+ hs_usb9pfs_source_desc.bEndpointAddress =
+ fs_usb9pfs_source_desc.bEndpointAddress;
+ hs_usb9pfs_sink_desc.bEndpointAddress =
+ fs_usb9pfs_sink_desc.bEndpointAddress;
+
+ /* support super speed hardware */
+ ss_usb9pfs_source_desc.bEndpointAddress =
+ fs_usb9pfs_source_desc.bEndpointAddress;
+ ss_usb9pfs_sink_desc.bEndpointAddress =
+ fs_usb9pfs_sink_desc.bEndpointAddress;
+
+ ret = usb_assign_descriptors(f, fs_usb9pfs_descs, hs_usb9pfs_descs,
+ ss_usb9pfs_descs, ss_usb9pfs_descs);
+ if (ret)
+ return ret;
+
+ opts = container_of(f->fi, struct f_usb9pfs_opts, func_inst);
+ opts->dev->usb9pfs = usb9pfs;
+
+ dev_dbg(&cdev->gadget->dev, "%s speed %s: IN/%s, OUT/%s\n",
+ (gadget_is_superspeed(c->cdev->gadget) ? "super" :
+ (gadget_is_dualspeed(c->cdev->gadget) ? "dual" : "full")),
+ f->name, usb9pfs->in_ep->name, usb9pfs->out_ep->name);
+
+ return 0;
+
+autoconf_fail:
+ ERROR(cdev, "%s: can't autoconfigure on %s\n",
+ f->name, cdev->gadget->name);
+ return -ENODEV;
+}
+
+static void usb9pfs_func_unbind(struct usb_configuration *c,
+ struct usb_function *f)
+{
+ struct f_usb9pfs *usb9pfs = func_to_usb9pfs(f);
+
+ disable_usb9pfs(usb9pfs);
+}
+
+static void usb9pfs_free_func(struct usb_function *f)
+{
+ struct f_usb9pfs *usb9pfs = func_to_usb9pfs(f);
+ struct f_usb9pfs_opts *opts;
+
+ kfree(usb9pfs);
+
+ opts = container_of(f->fi, struct f_usb9pfs_opts, func_inst);
+
+ mutex_lock(&opts->lock);
+ opts->refcnt--;
+ mutex_unlock(&opts->lock);
+
+ usb_free_all_descriptors(f);
+}
+
+static int usb9pfs_set_alt(struct usb_function *f,
+ unsigned int intf, unsigned int alt)
+{
+ struct f_usb9pfs *usb9pfs = func_to_usb9pfs(f);
+ struct usb_composite_dev *cdev = f->config->cdev;
+
+ return enable_usb9pfs(cdev, usb9pfs);
+}
+
+static void usb9pfs_disable(struct usb_function *f)
+{
+ struct f_usb9pfs *usb9pfs = func_to_usb9pfs(f);
+
+ usb9pfs_clear_tx(usb9pfs);
+}
+
+static struct usb_function *usb9pfs_alloc(struct usb_function_instance *fi)
+{
+ struct f_usb9pfs_opts *usb9pfs_opts;
+ struct f_usb9pfs *usb9pfs;
+
+ usb9pfs = kzalloc(sizeof(*usb9pfs), GFP_KERNEL);
+ if (!usb9pfs)
+ return ERR_PTR(-ENOMEM);
+
+ spin_lock_init(&usb9pfs->lock);
+
+ init_completion(&usb9pfs->send);
+ init_completion(&usb9pfs->received);
+
+ usb9pfs_opts = container_of(fi, struct f_usb9pfs_opts, func_inst);
+
+ mutex_lock(&usb9pfs_opts->lock);
+ usb9pfs_opts->refcnt++;
+ mutex_unlock(&usb9pfs_opts->lock);
+
+ usb9pfs->buflen = usb9pfs_opts->buflen;
+
+ usb9pfs->function.name = "usb9pfs";
+ usb9pfs->function.bind = usb9pfs_func_bind;
+ usb9pfs->function.unbind = usb9pfs_func_unbind;
+ usb9pfs->function.set_alt = usb9pfs_set_alt;
+ usb9pfs->function.disable = usb9pfs_disable;
+ usb9pfs->function.strings = usb9pfs_strings;
+
+ usb9pfs->function.free_func = usb9pfs_free_func;
+
+ return &usb9pfs->function;
+}
+
+static inline struct f_usb9pfs_opts *to_f_usb9pfs_opts(struct config_item *item)
+{
+ return container_of(to_config_group(item), struct f_usb9pfs_opts,
+ func_inst.group);
+}
+
+static inline struct f_usb9pfs_opts *fi_to_f_usb9pfs_opts(struct usb_function_instance *fi)
+{
+ return container_of(fi, struct f_usb9pfs_opts, func_inst);
+}
+
+static void usb9pfs_attr_release(struct config_item *item)
+{
+ struct f_usb9pfs_opts *usb9pfs_opts = to_f_usb9pfs_opts(item);
+
+ usb_put_function_instance(&usb9pfs_opts->func_inst);
+}
+
+static struct configfs_item_operations usb9pfs_item_ops = {
+ .release = usb9pfs_attr_release,
+};
+
+static ssize_t f_usb9pfs_opts_buflen_show(struct config_item *item, char *page)
+{
+ struct f_usb9pfs_opts *opts = to_f_usb9pfs_opts(item);
+ int ret;
+
+ mutex_lock(&opts->lock);
+ ret = sysfs_emit(page, "%d\n", opts->buflen);
+ mutex_unlock(&opts->lock);
+
+ return ret;
+}
+
+static ssize_t f_usb9pfs_opts_buflen_store(struct config_item *item,
+ const char *page, size_t len)
+{
+ struct f_usb9pfs_opts *opts = to_f_usb9pfs_opts(item);
+ int ret;
+ u32 num;
+
+ guard(mutex)(&opts->lock);
+
+ if (opts->refcnt)
+ return -EBUSY;
+
+ ret = kstrtou32(page, 0, &num);
+ if (ret)
+ return ret;
+
+ opts->buflen = num;
+
+ return len;
+}
+
+CONFIGFS_ATTR(f_usb9pfs_opts_, buflen);
+
+static struct configfs_attribute *usb9pfs_attrs[] = {
+ &f_usb9pfs_opts_attr_buflen,
+ NULL,
+};
+
+static const struct config_item_type usb9pfs_func_type = {
+ .ct_item_ops = &usb9pfs_item_ops,
+ .ct_attrs = usb9pfs_attrs,
+ .ct_owner = THIS_MODULE,
+};
+
+static struct f_usb9pfs_dev *_usb9pfs_do_find_dev(const char *tag)
+{
+ struct f_usb9pfs_dev *usb9pfs_dev;
+
+ if (!tag)
+ return NULL;
+
+ list_for_each_entry(usb9pfs_dev, &usbg_instance_list, usb9pfs_instance) {
+ if (strcmp(usb9pfs_dev->tag, tag) == 0)
+ return usb9pfs_dev;
+ }
+
+ return NULL;
+}
+
+static int usb9pfs_tag_instance(struct f_usb9pfs_dev *dev, const char *tag)
+{
+ struct f_usb9pfs_dev *existing;
+ int ret = 0;
+
+ guard(mutex)(&usb9pfs_lock);
+
+ existing = _usb9pfs_do_find_dev(tag);
+ if (!existing)
+ strscpy(dev->tag, tag, ARRAY_SIZE(dev->tag));
+ else if (existing != dev)
+ ret = -EBUSY;
+
+ return ret;
+}
+
+static int usb9pfs_set_inst_tag(struct usb_function_instance *fi, const char *tag)
+{
+ if (strlen(tag) >= sizeof_field(struct f_usb9pfs_dev, tag))
+ return -ENAMETOOLONG;
+ return usb9pfs_tag_instance(fi_to_f_usb9pfs_opts(fi)->dev, tag);
+}
+
+static void usb9pfs_free_instance(struct usb_function_instance *fi)
+{
+ struct f_usb9pfs_opts *usb9pfs_opts =
+ container_of(fi, struct f_usb9pfs_opts, func_inst);
+ struct f_usb9pfs_dev *dev = usb9pfs_opts->dev;
+
+ mutex_lock(&usb9pfs_lock);
+ list_del(&dev->usb9pfs_instance);
+ mutex_unlock(&usb9pfs_lock);
+
+ kfree(usb9pfs_opts);
+}
+
+static struct usb_function_instance *usb9pfs_alloc_instance(void)
+{
+ struct f_usb9pfs_opts *usb9pfs_opts;
+ struct f_usb9pfs_dev *dev;
+
+ usb9pfs_opts = kzalloc(sizeof(*usb9pfs_opts), GFP_KERNEL);
+ if (!usb9pfs_opts)
+ return ERR_PTR(-ENOMEM);
+
+ mutex_init(&usb9pfs_opts->lock);
+
+ usb9pfs_opts->func_inst.set_inst_name = usb9pfs_set_inst_tag;
+ usb9pfs_opts->func_inst.free_func_inst = usb9pfs_free_instance;
+
+ usb9pfs_opts->buflen = DEFAULT_BUFLEN;
+
+ dev = kzalloc(sizeof(*dev), GFP_KERNEL);
+ if (IS_ERR(dev)) {
+ kfree(usb9pfs_opts);
+ return ERR_CAST(dev);
+ }
+
+ usb9pfs_opts->dev = dev;
+ dev->opts = usb9pfs_opts;
+
+ config_group_init_type_name(&usb9pfs_opts->func_inst.group, "",
+ &usb9pfs_func_type);
+
+ mutex_lock(&usb9pfs_lock);
+ list_add_tail(&dev->usb9pfs_instance, &usbg_instance_list);
+ mutex_unlock(&usb9pfs_lock);
+
+ return &usb9pfs_opts->func_inst;
+}
+DECLARE_USB_FUNCTION(usb9pfs, usb9pfs_alloc_instance, usb9pfs_alloc);
+
+static int __init usb9pfs_modinit(void)
+{
+ int ret;
+
+ INIT_LIST_HEAD(&usbg_instance_list);
+
+ ret = usb_function_register(&usb9pfsusb_func);
+ if (!ret)
+ v9fs_register_trans(&p9_usbg_trans);
+
+ return ret;
+}
+
+static void __exit usb9pfs_modexit(void)
+{
+ usb_function_unregister(&usb9pfsusb_func);
+ v9fs_unregister_trans(&p9_usbg_trans);
+}
+
+module_init(usb9pfs_modinit);
+module_exit(usb9pfs_modexit);
+
+MODULE_ALIAS_9P("usbg");
+MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("USB gadget 9pfs transport");
+MODULE_AUTHOR("Michael Grzeschik");
diff --git a/net/bpf/bpf_dummy_struct_ops.c b/net/bpf/bpf_dummy_struct_ops.c
index 3ea52b05adfb..f71f67c6896b 100644
--- a/net/bpf/bpf_dummy_struct_ops.c
+++ b/net/bpf/bpf_dummy_struct_ops.c
@@ -115,7 +115,7 @@ static int check_test_run_args(struct bpf_prog *prog, struct bpf_dummy_ops_test_
offset = btf_ctx_arg_offset(bpf_dummy_ops_btf, func_proto, arg_no);
info = find_ctx_arg_info(prog->aux, offset);
- if (info && (info->reg_type & PTR_MAYBE_NULL))
+ if (info && type_may_be_null(info->reg_type))
continue;
return -EINVAL;
diff --git a/net/ceph/messenger.c b/net/ceph/messenger.c
index 3c8b78d9c4d1..d1b5705dc0c6 100644
--- a/net/ceph/messenger.c
+++ b/net/ceph/messenger.c
@@ -1254,7 +1254,7 @@ static int ceph_dns_resolve_name(const char *name, size_t namelen,
colon_p = memchr(name, ':', namelen);
if (delim_p && colon_p)
- end = delim_p < colon_p ? delim_p : colon_p;
+ end = min(delim_p, colon_p);
else if (!delim_p && colon_p)
end = colon_p;
else {
diff --git a/net/core/filter.c b/net/core/filter.c
index 0f4d9f3b206e..cd3524cb326b 100644
--- a/net/core/filter.c
+++ b/net/core/filter.c
@@ -1266,8 +1266,8 @@ static struct bpf_prog *bpf_migrate_filter(struct bpf_prog *fp)
* so we need to keep the user BPF around until the 2nd
* pass. At this time, the user BPF is stored in fp->insns.
*/
- old_prog = kmemdup(fp->insns, old_len * sizeof(struct sock_filter),
- GFP_KERNEL | __GFP_NOWARN);
+ old_prog = kmemdup_array(fp->insns, old_len, sizeof(struct sock_filter),
+ GFP_KERNEL | __GFP_NOWARN);
if (!old_prog) {
err = -ENOMEM;
goto out_err;
@@ -6280,20 +6280,25 @@ BPF_CALL_5(bpf_skb_check_mtu, struct sk_buff *, skb,
int ret = BPF_MTU_CHK_RET_FRAG_NEEDED;
struct net_device *dev = skb->dev;
int skb_len, dev_len;
- int mtu;
+ int mtu = 0;
- if (unlikely(flags & ~(BPF_MTU_CHK_SEGS)))
- return -EINVAL;
+ if (unlikely(flags & ~(BPF_MTU_CHK_SEGS))) {
+ ret = -EINVAL;
+ goto out;
+ }
- if (unlikely(flags & BPF_MTU_CHK_SEGS && (len_diff || *mtu_len)))
- return -EINVAL;
+ if (unlikely(flags & BPF_MTU_CHK_SEGS && (len_diff || *mtu_len))) {
+ ret = -EINVAL;
+ goto out;
+ }
dev = __dev_via_ifindex(dev, ifindex);
- if (unlikely(!dev))
- return -ENODEV;
+ if (unlikely(!dev)) {
+ ret = -ENODEV;
+ goto out;
+ }
mtu = READ_ONCE(dev->mtu);
-
dev_len = mtu + dev->hard_header_len;
/* If set use *mtu_len as input, L3 as iph->tot_len (like fib_lookup) */
@@ -6311,15 +6316,12 @@ BPF_CALL_5(bpf_skb_check_mtu, struct sk_buff *, skb,
*/
if (skb_is_gso(skb)) {
ret = BPF_MTU_CHK_RET_SUCCESS;
-
if (flags & BPF_MTU_CHK_SEGS &&
!skb_gso_validate_network_len(skb, mtu))
ret = BPF_MTU_CHK_RET_SEGS_TOOBIG;
}
out:
- /* BPF verifier guarantees valid pointer */
*mtu_len = mtu;
-
return ret;
}
@@ -6329,19 +6331,21 @@ BPF_CALL_5(bpf_xdp_check_mtu, struct xdp_buff *, xdp,
struct net_device *dev = xdp->rxq->dev;
int xdp_len = xdp->data_end - xdp->data;
int ret = BPF_MTU_CHK_RET_SUCCESS;
- int mtu, dev_len;
+ int mtu = 0, dev_len;
/* XDP variant doesn't support multi-buffer segment check (yet) */
- if (unlikely(flags))
- return -EINVAL;
+ if (unlikely(flags)) {
+ ret = -EINVAL;
+ goto out;
+ }
dev = __dev_via_ifindex(dev, ifindex);
- if (unlikely(!dev))
- return -ENODEV;
+ if (unlikely(!dev)) {
+ ret = -ENODEV;
+ goto out;
+ }
mtu = READ_ONCE(dev->mtu);
-
- /* Add L2-header as dev MTU is L3 size */
dev_len = mtu + dev->hard_header_len;
/* Use *mtu_len as input, L3 as iph->tot_len (like fib_lookup) */
@@ -6351,10 +6355,8 @@ BPF_CALL_5(bpf_xdp_check_mtu, struct xdp_buff *, xdp,
xdp_len += len_diff; /* minus result pass check */
if (xdp_len > dev_len)
ret = BPF_MTU_CHK_RET_FRAG_NEEDED;
-
- /* BPF verifier guarantees valid pointer */
+out:
*mtu_len = mtu;
-
return ret;
}
@@ -6364,7 +6366,8 @@ static const struct bpf_func_proto bpf_skb_check_mtu_proto = {
.ret_type = RET_INTEGER,
.arg1_type = ARG_PTR_TO_CTX,
.arg2_type = ARG_ANYTHING,
- .arg3_type = ARG_PTR_TO_INT,
+ .arg3_type = ARG_PTR_TO_FIXED_SIZE_MEM | MEM_UNINIT | MEM_ALIGNED,
+ .arg3_size = sizeof(u32),
.arg4_type = ARG_ANYTHING,
.arg5_type = ARG_ANYTHING,
};
@@ -6375,7 +6378,8 @@ static const struct bpf_func_proto bpf_xdp_check_mtu_proto = {
.ret_type = RET_INTEGER,
.arg1_type = ARG_PTR_TO_CTX,
.arg2_type = ARG_ANYTHING,
- .arg3_type = ARG_PTR_TO_INT,
+ .arg3_type = ARG_PTR_TO_FIXED_SIZE_MEM | MEM_UNINIT | MEM_ALIGNED,
+ .arg3_size = sizeof(u32),
.arg4_type = ARG_ANYTHING,
.arg5_type = ARG_ANYTHING,
};
@@ -8597,13 +8601,16 @@ static bool bpf_skb_is_valid_access(int off, int size, enum bpf_access_type type
if (off + size > offsetofend(struct __sk_buff, cb[4]))
return false;
break;
+ case bpf_ctx_range(struct __sk_buff, data):
+ case bpf_ctx_range(struct __sk_buff, data_meta):
+ case bpf_ctx_range(struct __sk_buff, data_end):
+ if (info->is_ldsx || size != size_default)
+ return false;
+ break;
case bpf_ctx_range_till(struct __sk_buff, remote_ip6[0], remote_ip6[3]):
case bpf_ctx_range_till(struct __sk_buff, local_ip6[0], local_ip6[3]):
case bpf_ctx_range_till(struct __sk_buff, remote_ip4, remote_ip4):
case bpf_ctx_range_till(struct __sk_buff, local_ip4, local_ip4):
- case bpf_ctx_range(struct __sk_buff, data):
- case bpf_ctx_range(struct __sk_buff, data_meta):
- case bpf_ctx_range(struct __sk_buff, data_end):
if (size != size_default)
return false;
break;
@@ -9047,6 +9054,14 @@ static bool xdp_is_valid_access(int off, int size,
}
}
return false;
+ } else {
+ switch (off) {
+ case offsetof(struct xdp_md, data_meta):
+ case offsetof(struct xdp_md, data):
+ case offsetof(struct xdp_md, data_end):
+ if (info->is_ldsx)
+ return false;
+ }
}
switch (off) {
@@ -9372,12 +9387,12 @@ static bool flow_dissector_is_valid_access(int off, int size,
switch (off) {
case bpf_ctx_range(struct __sk_buff, data):
- if (size != size_default)
+ if (info->is_ldsx || size != size_default)
return false;
info->reg_type = PTR_TO_PACKET;
return true;
case bpf_ctx_range(struct __sk_buff, data_end):
- if (size != size_default)
+ if (info->is_ldsx || size != size_default)
return false;
info->reg_type = PTR_TO_PACKET_END;
return true;
diff --git a/net/core/net_namespace.c b/net/core/net_namespace.c
index 11e4dd4f09ed..e39479f1c9a4 100644
--- a/net/core/net_namespace.c
+++ b/net/core/net_namespace.c
@@ -697,11 +697,11 @@ struct net *get_net_ns_by_fd(int fd)
struct fd f = fdget(fd);
struct net *net = ERR_PTR(-EINVAL);
- if (!f.file)
+ if (!fd_file(f))
return ERR_PTR(-EBADF);
- if (proc_ns_file(f.file)) {
- struct ns_common *ns = get_proc_ns(file_inode(f.file));
+ if (proc_ns_file(fd_file(f))) {
+ struct ns_common *ns = get_proc_ns(file_inode(fd_file(f)));
if (ns->ops == &netns_operations)
net = get_net(container_of(ns, struct net, ns));
}
diff --git a/net/core/sock_map.c b/net/core/sock_map.c
index 724b6856fcc3..242c91a6e3d3 100644
--- a/net/core/sock_map.c
+++ b/net/core/sock_map.c
@@ -67,46 +67,39 @@ static struct bpf_map *sock_map_alloc(union bpf_attr *attr)
int sock_map_get_from_fd(const union bpf_attr *attr, struct bpf_prog *prog)
{
- u32 ufd = attr->target_fd;
struct bpf_map *map;
- struct fd f;
int ret;
if (attr->attach_flags || attr->replace_bpf_fd)
return -EINVAL;
- f = fdget(ufd);
+ CLASS(fd, f)(attr->target_fd);
map = __bpf_map_get(f);
if (IS_ERR(map))
return PTR_ERR(map);
mutex_lock(&sockmap_mutex);
ret = sock_map_prog_update(map, prog, NULL, NULL, attr->attach_type);
mutex_unlock(&sockmap_mutex);
- fdput(f);
return ret;
}
int sock_map_prog_detach(const union bpf_attr *attr, enum bpf_prog_type ptype)
{
- u32 ufd = attr->target_fd;
struct bpf_prog *prog;
struct bpf_map *map;
- struct fd f;
int ret;
if (attr->attach_flags || attr->replace_bpf_fd)
return -EINVAL;
- f = fdget(ufd);
+ CLASS(fd, f)(attr->target_fd);
map = __bpf_map_get(f);
if (IS_ERR(map))
return PTR_ERR(map);
prog = bpf_prog_get(attr->attach_bpf_fd);
- if (IS_ERR(prog)) {
- ret = PTR_ERR(prog);
- goto put_map;
- }
+ if (IS_ERR(prog))
+ return PTR_ERR(prog);
if (prog->type != ptype) {
ret = -EINVAL;
@@ -118,8 +111,6 @@ int sock_map_prog_detach(const union bpf_attr *attr, enum bpf_prog_type ptype)
mutex_unlock(&sockmap_mutex);
put_prog:
bpf_prog_put(prog);
-put_map:
- fdput(f);
return ret;
}
@@ -1551,18 +1542,17 @@ int sock_map_bpf_prog_query(const union bpf_attr *attr,
union bpf_attr __user *uattr)
{
__u32 __user *prog_ids = u64_to_user_ptr(attr->query.prog_ids);
- u32 prog_cnt = 0, flags = 0, ufd = attr->target_fd;
+ u32 prog_cnt = 0, flags = 0;
struct bpf_prog **pprog;
struct bpf_prog *prog;
struct bpf_map *map;
- struct fd f;
u32 id = 0;
int ret;
if (attr->query.query_flags)
return -EINVAL;
- f = fdget(ufd);
+ CLASS(fd, f)(attr->target_fd);
map = __bpf_map_get(f);
if (IS_ERR(map))
return PTR_ERR(map);
@@ -1594,7 +1584,6 @@ end:
copy_to_user(&uattr->query.prog_cnt, &prog_cnt, sizeof(prog_cnt)))
ret = -EFAULT;
- fdput(f);
return ret;
}
diff --git a/net/ipv4/bpf_tcp_ca.c b/net/ipv4/bpf_tcp_ca.c
index 3f88d0961e5b..554804774628 100644
--- a/net/ipv4/bpf_tcp_ca.c
+++ b/net/ipv4/bpf_tcp_ca.c
@@ -14,10 +14,6 @@
/* "extern" is to avoid sparse warning. It is only used in bpf_struct_ops.c. */
static struct bpf_struct_ops bpf_tcp_congestion_ops;
-static u32 unsupported_ops[] = {
- offsetof(struct tcp_congestion_ops, get_info),
-};
-
static const struct btf_type *tcp_sock_type;
static u32 tcp_sock_id, sock_id;
static const struct btf_type *tcp_congestion_ops_type;
@@ -45,18 +41,6 @@ static int bpf_tcp_ca_init(struct btf *btf)
return 0;
}
-static bool is_unsupported(u32 member_offset)
-{
- unsigned int i;
-
- for (i = 0; i < ARRAY_SIZE(unsupported_ops); i++) {
- if (member_offset == unsupported_ops[i])
- return true;
- }
-
- return false;
-}
-
static bool bpf_tcp_ca_is_valid_access(int off, int size,
enum bpf_access_type type,
const struct bpf_prog *prog,
@@ -251,15 +235,6 @@ static int bpf_tcp_ca_init_member(const struct btf_type *t,
return 0;
}
-static int bpf_tcp_ca_check_member(const struct btf_type *t,
- const struct btf_member *member,
- const struct bpf_prog *prog)
-{
- if (is_unsupported(__btf_member_bit_offset(t, member) / 8))
- return -ENOTSUPP;
- return 0;
-}
-
static int bpf_tcp_ca_reg(void *kdata, struct bpf_link *link)
{
return tcp_register_congestion_control(kdata);
@@ -354,7 +329,6 @@ static struct bpf_struct_ops bpf_tcp_congestion_ops = {
.reg = bpf_tcp_ca_reg,
.unreg = bpf_tcp_ca_unreg,
.update = bpf_tcp_ca_update,
- .check_member = bpf_tcp_ca_check_member,
.init_member = bpf_tcp_ca_init_member,
.init = bpf_tcp_ca_init,
.validate = bpf_tcp_ca_validate,
diff --git a/net/ipv4/netfilter/nf_reject_ipv4.c b/net/ipv4/netfilter/nf_reject_ipv4.c
index 04504b2b51df..87fd945a0d27 100644
--- a/net/ipv4/netfilter/nf_reject_ipv4.c
+++ b/net/ipv4/netfilter/nf_reject_ipv4.c
@@ -239,9 +239,8 @@ static int nf_reject_fill_skb_dst(struct sk_buff *skb_in)
void nf_send_reset(struct net *net, struct sock *sk, struct sk_buff *oldskb,
int hook)
{
- struct sk_buff *nskb;
- struct iphdr *niph;
const struct tcphdr *oth;
+ struct sk_buff *nskb;
struct tcphdr _oth;
oth = nf_reject_ip_tcphdr_get(oldskb, &_oth, hook);
@@ -266,14 +265,12 @@ void nf_send_reset(struct net *net, struct sock *sk, struct sk_buff *oldskb,
nskb->mark = IP4_REPLY_MARK(net, oldskb->mark);
skb_reserve(nskb, LL_MAX_HEADER);
- niph = nf_reject_iphdr_put(nskb, oldskb, IPPROTO_TCP,
- ip4_dst_hoplimit(skb_dst(nskb)));
+ nf_reject_iphdr_put(nskb, oldskb, IPPROTO_TCP,
+ ip4_dst_hoplimit(skb_dst(nskb)));
nf_reject_ip_tcphdr_put(nskb, oldskb, oth);
if (ip_route_me_harder(net, sk, nskb, RTN_UNSPEC))
goto free_nskb;
- niph = ip_hdr(nskb);
-
/* "Never happens" */
if (nskb->len > dst_mtu(skb_dst(nskb)))
goto free_nskb;
@@ -290,6 +287,7 @@ void nf_send_reset(struct net *net, struct sock *sk, struct sk_buff *oldskb,
*/
if (nf_bridge_info_exists(oldskb)) {
struct ethhdr *oeth = eth_hdr(oldskb);
+ struct iphdr *niph = ip_hdr(nskb);
struct net_device *br_indev;
br_indev = nf_bridge_get_physindev(oldskb, net);
diff --git a/net/ipv6/Kconfig b/net/ipv6/Kconfig
index 08d4b7132d4c..1c9c686d9522 100644
--- a/net/ipv6/Kconfig
+++ b/net/ipv6/Kconfig
@@ -323,6 +323,7 @@ config IPV6_RPL_LWTUNNEL
bool "IPv6: RPL Source Routing Header support"
depends on IPV6
select LWTUNNEL
+ select DST_CACHE
help
Support for RFC6554 RPL Source Routing Header using the lightweight
tunnels mechanism.
diff --git a/net/ipv6/netfilter/nf_reject_ipv6.c b/net/ipv6/netfilter/nf_reject_ipv6.c
index dedee264b8f6..7db0437140bf 100644
--- a/net/ipv6/netfilter/nf_reject_ipv6.c
+++ b/net/ipv6/netfilter/nf_reject_ipv6.c
@@ -223,33 +223,23 @@ void nf_reject_ip6_tcphdr_put(struct sk_buff *nskb,
const struct tcphdr *oth, unsigned int otcplen)
{
struct tcphdr *tcph;
- int needs_ack;
skb_reset_transport_header(nskb);
- tcph = skb_put(nskb, sizeof(struct tcphdr));
+ tcph = skb_put_zero(nskb, sizeof(struct tcphdr));
/* Truncate to length (no data) */
tcph->doff = sizeof(struct tcphdr)/4;
tcph->source = oth->dest;
tcph->dest = oth->source;
if (oth->ack) {
- needs_ack = 0;
tcph->seq = oth->ack_seq;
- tcph->ack_seq = 0;
} else {
- needs_ack = 1;
tcph->ack_seq = htonl(ntohl(oth->seq) + oth->syn + oth->fin +
otcplen - (oth->doff<<2));
- tcph->seq = 0;
+ tcph->ack = 1;
}
- /* Reset flags */
- ((u_int8_t *)tcph)[13] = 0;
tcph->rst = 1;
- tcph->ack = needs_ack;
- tcph->window = 0;
- tcph->urg_ptr = 0;
- tcph->check = 0;
/* Adjust TCP checksum */
tcph->check = csum_ipv6_magic(&ipv6_hdr(nskb)->saddr,
@@ -283,7 +273,6 @@ void nf_send_reset6(struct net *net, struct sock *sk, struct sk_buff *oldskb,
const struct tcphdr *otcph;
unsigned int otcplen, hh_len;
const struct ipv6hdr *oip6h = ipv6_hdr(oldskb);
- struct ipv6hdr *ip6h;
struct dst_entry *dst = NULL;
struct flowi6 fl6;
@@ -339,8 +328,7 @@ void nf_send_reset6(struct net *net, struct sock *sk, struct sk_buff *oldskb,
nskb->mark = fl6.flowi6_mark;
skb_reserve(nskb, hh_len + dst->header_len);
- ip6h = nf_reject_ip6hdr_put(nskb, oldskb, IPPROTO_TCP,
- ip6_dst_hoplimit(dst));
+ nf_reject_ip6hdr_put(nskb, oldskb, IPPROTO_TCP, ip6_dst_hoplimit(dst));
nf_reject_ip6_tcphdr_put(nskb, oldskb, otcph, otcplen);
nf_ct_attach(nskb, oldskb);
@@ -355,6 +343,7 @@ void nf_send_reset6(struct net *net, struct sock *sk, struct sk_buff *oldskb,
*/
if (nf_bridge_info_exists(oldskb)) {
struct ethhdr *oeth = eth_hdr(oldskb);
+ struct ipv6hdr *ip6h = ipv6_hdr(nskb);
struct net_device *br_indev;
br_indev = nf_bridge_get_physindev(oldskb, net);
diff --git a/net/mac80211/rc80211_minstrel_ht_debugfs.c b/net/mac80211/rc80211_minstrel_ht_debugfs.c
index 25b8a67a63a4..85149c774505 100644
--- a/net/mac80211/rc80211_minstrel_ht_debugfs.c
+++ b/net/mac80211/rc80211_minstrel_ht_debugfs.c
@@ -187,7 +187,6 @@ static const struct file_operations minstrel_ht_stat_fops = {
.open = minstrel_ht_stats_open,
.read = minstrel_stats_read,
.release = minstrel_stats_release,
- .llseek = no_llseek,
};
static char *
@@ -323,7 +322,6 @@ static const struct file_operations minstrel_ht_stat_csv_fops = {
.open = minstrel_ht_stats_csv_open,
.read = minstrel_stats_read,
.release = minstrel_stats_release,
- .llseek = no_llseek,
};
void
diff --git a/net/netfilter/nf_conntrack_core.c b/net/netfilter/nf_conntrack_core.c
index d3cb53b008f5..9db3e2b0b1c3 100644
--- a/net/netfilter/nf_conntrack_core.c
+++ b/net/netfilter/nf_conntrack_core.c
@@ -988,6 +988,56 @@ static void __nf_conntrack_insert_prepare(struct nf_conn *ct)
tstamp->start = ktime_get_real_ns();
}
+/**
+ * nf_ct_match_reverse - check if ct1 and ct2 refer to identical flow
+ * @ct1: conntrack in hash table to check against
+ * @ct2: merge candidate
+ *
+ * returns true if ct1 and ct2 happen to refer to the same flow, but
+ * in opposing directions, i.e.
+ * ct1: a:b -> c:d
+ * ct2: c:d -> a:b
+ * for both directions. If so, @ct2 should not have been created
+ * as the skb should have been picked up as ESTABLISHED flow.
+ * But ct1 was not yet committed to hash table before skb that created
+ * ct2 had arrived.
+ *
+ * Note we don't compare netns because ct entries in different net
+ * namespace cannot clash to begin with.
+ *
+ * @return: true if ct1 and ct2 are identical when swapping origin/reply.
+ */
+static bool
+nf_ct_match_reverse(const struct nf_conn *ct1, const struct nf_conn *ct2)
+{
+ u16 id1, id2;
+
+ if (!nf_ct_tuple_equal(&ct1->tuplehash[IP_CT_DIR_ORIGINAL].tuple,
+ &ct2->tuplehash[IP_CT_DIR_REPLY].tuple))
+ return false;
+
+ if (!nf_ct_tuple_equal(&ct1->tuplehash[IP_CT_DIR_REPLY].tuple,
+ &ct2->tuplehash[IP_CT_DIR_ORIGINAL].tuple))
+ return false;
+
+ id1 = nf_ct_zone_id(nf_ct_zone(ct1), IP_CT_DIR_ORIGINAL);
+ id2 = nf_ct_zone_id(nf_ct_zone(ct2), IP_CT_DIR_REPLY);
+ if (id1 != id2)
+ return false;
+
+ id1 = nf_ct_zone_id(nf_ct_zone(ct1), IP_CT_DIR_REPLY);
+ id2 = nf_ct_zone_id(nf_ct_zone(ct2), IP_CT_DIR_ORIGINAL);
+
+ return id1 == id2;
+}
+
+static int nf_ct_can_merge(const struct nf_conn *ct,
+ const struct nf_conn *loser_ct)
+{
+ return nf_ct_match(ct, loser_ct) ||
+ nf_ct_match_reverse(ct, loser_ct);
+}
+
/* caller must hold locks to prevent concurrent changes */
static int __nf_ct_resolve_clash(struct sk_buff *skb,
struct nf_conntrack_tuple_hash *h)
@@ -999,11 +1049,7 @@ static int __nf_ct_resolve_clash(struct sk_buff *skb,
loser_ct = nf_ct_get(skb, &ctinfo);
- if (nf_ct_is_dying(ct))
- return NF_DROP;
-
- if (((ct->status & IPS_NAT_DONE_MASK) == 0) ||
- nf_ct_match(ct, loser_ct)) {
+ if (nf_ct_can_merge(ct, loser_ct)) {
struct net *net = nf_ct_net(ct);
nf_conntrack_get(&ct->ct_general);
@@ -2151,80 +2197,6 @@ static void nf_conntrack_attach(struct sk_buff *nskb, const struct sk_buff *skb)
nf_conntrack_get(skb_nfct(nskb));
}
-static int __nf_conntrack_update(struct net *net, struct sk_buff *skb,
- struct nf_conn *ct,
- enum ip_conntrack_info ctinfo)
-{
- const struct nf_nat_hook *nat_hook;
- struct nf_conntrack_tuple_hash *h;
- struct nf_conntrack_tuple tuple;
- unsigned int status;
- int dataoff;
- u16 l3num;
- u8 l4num;
-
- l3num = nf_ct_l3num(ct);
-
- dataoff = get_l4proto(skb, skb_network_offset(skb), l3num, &l4num);
- if (dataoff <= 0)
- return NF_DROP;
-
- if (!nf_ct_get_tuple(skb, skb_network_offset(skb), dataoff, l3num,
- l4num, net, &tuple))
- return NF_DROP;
-
- if (ct->status & IPS_SRC_NAT) {
- memcpy(tuple.src.u3.all,
- ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple.src.u3.all,
- sizeof(tuple.src.u3.all));
- tuple.src.u.all =
- ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple.src.u.all;
- }
-
- if (ct->status & IPS_DST_NAT) {
- memcpy(tuple.dst.u3.all,
- ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple.dst.u3.all,
- sizeof(tuple.dst.u3.all));
- tuple.dst.u.all =
- ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple.dst.u.all;
- }
-
- h = nf_conntrack_find_get(net, nf_ct_zone(ct), &tuple);
- if (!h)
- return NF_ACCEPT;
-
- /* Store status bits of the conntrack that is clashing to re-do NAT
- * mangling according to what it has been done already to this packet.
- */
- status = ct->status;
-
- nf_ct_put(ct);
- ct = nf_ct_tuplehash_to_ctrack(h);
- nf_ct_set(skb, ct, ctinfo);
-
- nat_hook = rcu_dereference(nf_nat_hook);
- if (!nat_hook)
- return NF_ACCEPT;
-
- if (status & IPS_SRC_NAT) {
- unsigned int verdict = nat_hook->manip_pkt(skb, ct,
- NF_NAT_MANIP_SRC,
- IP_CT_DIR_ORIGINAL);
- if (verdict != NF_ACCEPT)
- return verdict;
- }
-
- if (status & IPS_DST_NAT) {
- unsigned int verdict = nat_hook->manip_pkt(skb, ct,
- NF_NAT_MANIP_DST,
- IP_CT_DIR_ORIGINAL);
- if (verdict != NF_ACCEPT)
- return verdict;
- }
-
- return NF_ACCEPT;
-}
-
/* This packet is coming from userspace via nf_queue, complete the packet
* processing after the helper invocation in nf_confirm().
*/
@@ -2288,17 +2260,6 @@ static int nf_conntrack_update(struct net *net, struct sk_buff *skb)
if (!ct)
return NF_ACCEPT;
- if (!nf_ct_is_confirmed(ct)) {
- int ret = __nf_conntrack_update(net, skb, ct, ctinfo);
-
- if (ret != NF_ACCEPT)
- return ret;
-
- ct = nf_ct_get(skb, &ctinfo);
- if (!ct)
- return NF_ACCEPT;
- }
-
return nf_confirm_cthelper(skb, ct, ctinfo);
}
diff --git a/net/netfilter/nf_conntrack_netlink.c b/net/netfilter/nf_conntrack_netlink.c
index 123e2e933e9b..6a1239433830 100644
--- a/net/netfilter/nf_conntrack_netlink.c
+++ b/net/netfilter/nf_conntrack_netlink.c
@@ -382,7 +382,7 @@ nla_put_failure:
#define ctnetlink_dump_secctx(a, b) (0)
#endif
-#ifdef CONFIG_NF_CONNTRACK_LABELS
+#ifdef CONFIG_NF_CONNTRACK_EVENTS
static inline int ctnetlink_label_size(const struct nf_conn *ct)
{
struct nf_conn_labels *labels = nf_ct_labels_find(ct);
@@ -391,6 +391,7 @@ static inline int ctnetlink_label_size(const struct nf_conn *ct)
return 0;
return nla_total_size(sizeof(labels->bits));
}
+#endif
static int
ctnetlink_dump_labels(struct sk_buff *skb, const struct nf_conn *ct)
@@ -411,10 +412,6 @@ ctnetlink_dump_labels(struct sk_buff *skb, const struct nf_conn *ct)
return 0;
}
-#else
-#define ctnetlink_dump_labels(a, b) (0)
-#define ctnetlink_label_size(a) (0)
-#endif
#define master_tuple(ct) &(ct->master->tuplehash[IP_CT_DIR_ORIGINAL].tuple)
@@ -652,7 +649,6 @@ static size_t ctnetlink_proto_size(const struct nf_conn *ct)
return len + len4;
}
-#endif
static inline size_t ctnetlink_acct_size(const struct nf_conn *ct)
{
@@ -690,6 +686,7 @@ static inline size_t ctnetlink_timestamp_size(const struct nf_conn *ct)
return 0;
#endif
}
+#endif
#ifdef CONFIG_NF_CONNTRACK_EVENTS
static size_t ctnetlink_nlmsg_size(const struct nf_conn *ct)
diff --git a/net/netfilter/nf_nat_core.c b/net/netfilter/nf_nat_core.c
index 6d8da6dddf99..4085c436e306 100644
--- a/net/netfilter/nf_nat_core.c
+++ b/net/netfilter/nf_nat_core.c
@@ -183,7 +183,35 @@ hash_by_src(const struct net *net,
return reciprocal_scale(hash, nf_nat_htable_size);
}
-/* Is this tuple already taken? (not by us) */
+/**
+ * nf_nat_used_tuple - check if proposed nat tuple clashes with existing entry
+ * @tuple: proposed NAT binding
+ * @ignored_conntrack: our (unconfirmed) conntrack entry
+ *
+ * A conntrack entry can be inserted to the connection tracking table
+ * if there is no existing entry with an identical tuple in either direction.
+ *
+ * Example:
+ * INITIATOR -> NAT/PAT -> RESPONDER
+ *
+ * INITIATOR passes through NAT/PAT ("us") and SNAT is done (saddr rewrite).
+ * Then, later, NAT/PAT itself also connects to RESPONDER.
+ *
+ * This will not work if the SNAT done earlier has same IP:PORT source pair.
+ *
+ * Conntrack table has:
+ * ORIGINAL: $IP_INITIATOR:$SPORT -> $IP_RESPONDER:$DPORT
+ * REPLY: $IP_RESPONDER:$DPORT -> $IP_NAT:$SPORT
+ *
+ * and new locally originating connection wants:
+ * ORIGINAL: $IP_NAT:$SPORT -> $IP_RESPONDER:$DPORT
+ * REPLY: $IP_RESPONDER:$DPORT -> $IP_NAT:$SPORT
+ *
+ * ... which would mean incoming packets cannot be distinguished between
+ * the existing and the newly added entry (identical IP_CT_DIR_REPLY tuple).
+ *
+ * @return: true if the proposed NAT mapping collides with an existing entry.
+ */
static int
nf_nat_used_tuple(const struct nf_conntrack_tuple *tuple,
const struct nf_conn *ignored_conntrack)
@@ -200,6 +228,94 @@ nf_nat_used_tuple(const struct nf_conntrack_tuple *tuple,
return nf_conntrack_tuple_taken(&reply, ignored_conntrack);
}
+static bool nf_nat_allow_clash(const struct nf_conn *ct)
+{
+ return nf_ct_l4proto_find(nf_ct_protonum(ct))->allow_clash;
+}
+
+/**
+ * nf_nat_used_tuple_new - check if to-be-inserted conntrack collides with existing entry
+ * @tuple: proposed NAT binding
+ * @ignored_ct: our (unconfirmed) conntrack entry
+ *
+ * Same as nf_nat_used_tuple, but also check for rare clash in reverse
+ * direction. Should be called only when @tuple has not been altered, i.e.
+ * @ignored_conntrack will not be subject to NAT.
+ *
+ * @return: true if the proposed NAT mapping collides with existing entry.
+ */
+static noinline bool
+nf_nat_used_tuple_new(const struct nf_conntrack_tuple *tuple,
+ const struct nf_conn *ignored_ct)
+{
+ static const unsigned long uses_nat = IPS_NAT_MASK | IPS_SEQ_ADJUST_BIT;
+ const struct nf_conntrack_tuple_hash *thash;
+ const struct nf_conntrack_zone *zone;
+ struct nf_conn *ct;
+ bool taken = true;
+ struct net *net;
+
+ if (!nf_nat_used_tuple(tuple, ignored_ct))
+ return false;
+
+ if (!nf_nat_allow_clash(ignored_ct))
+ return true;
+
+ /* Initial choice clashes with existing conntrack.
+ * Check for (rare) reverse collision.
+ *
+ * This can happen when new packets are received in both directions
+ * at the exact same time on different CPUs.
+ *
+ * Without SMP, first packet creates new conntrack entry and second
+ * packet is resolved as established reply packet.
+ *
+ * With parallel processing, both packets could be picked up as
+ * new and both get their own ct entry allocated.
+ *
+ * If ignored_conntrack and colliding ct are not subject to NAT then
+ * pretend the tuple is available and let later clash resolution
+ * handle this at insertion time.
+ *
+ * Without it, the 'reply' packet has its source port rewritten
+ * by nat engine.
+ */
+ if (READ_ONCE(ignored_ct->status) & uses_nat)
+ return true;
+
+ net = nf_ct_net(ignored_ct);
+ zone = nf_ct_zone(ignored_ct);
+
+ thash = nf_conntrack_find_get(net, zone, tuple);
+ if (unlikely(!thash)) /* clashing entry went away */
+ return false;
+
+ ct = nf_ct_tuplehash_to_ctrack(thash);
+
+ /* NB: IP_CT_DIR_ORIGINAL should be impossible because
+ * nf_nat_used_tuple() handles origin collisions.
+ *
+ * Handle remote chance other CPU confirmed its ct right after.
+ */
+ if (thash->tuple.dst.dir != IP_CT_DIR_REPLY)
+ goto out;
+
+ /* clashing connection subject to NAT? Retry with new tuple. */
+ if (READ_ONCE(ct->status) & uses_nat)
+ goto out;
+
+ if (nf_ct_tuple_equal(&ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple,
+ &ignored_ct->tuplehash[IP_CT_DIR_REPLY].tuple) &&
+ nf_ct_tuple_equal(&ct->tuplehash[IP_CT_DIR_REPLY].tuple,
+ &ignored_ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple)) {
+ taken = false;
+ goto out;
+ }
+out:
+ nf_ct_put(ct);
+ return taken;
+}
+
static bool nf_nat_may_kill(struct nf_conn *ct, unsigned long flags)
{
static const unsigned long flags_refuse = IPS_FIXED_TIMEOUT |
@@ -611,7 +727,7 @@ get_unique_tuple(struct nf_conntrack_tuple *tuple,
!(range->flags & NF_NAT_RANGE_PROTO_RANDOM_ALL)) {
/* try the original tuple first */
if (nf_in_range(orig_tuple, range)) {
- if (!nf_nat_used_tuple(orig_tuple, ct)) {
+ if (!nf_nat_used_tuple_new(orig_tuple, ct)) {
*tuple = *orig_tuple;
return;
}
@@ -1208,7 +1324,6 @@ static const struct nf_nat_hook nat_hook = {
#ifdef CONFIG_XFRM
.decode_session = __nf_nat_decode_session,
#endif
- .manip_pkt = nf_nat_manip_pkt,
.remove_nat_bysrc = nf_nat_cleanup_conntrack,
};
diff --git a/net/netfilter/nf_tables_api.c b/net/netfilter/nf_tables_api.c
index 57259b5f3ef5..a24fe62650a7 100644
--- a/net/netfilter/nf_tables_api.c
+++ b/net/netfilter/nf_tables_api.c
@@ -1849,7 +1849,7 @@ static int nft_dump_basechain_hook(struct sk_buff *skb, int family,
if (!hook_list)
hook_list = &basechain->hook_list;
- list_for_each_entry(hook, hook_list, list) {
+ list_for_each_entry_rcu(hook, hook_list, list) {
if (!first)
first = hook;
@@ -6684,7 +6684,7 @@ static int nft_setelem_catchall_insert(const struct net *net,
}
}
- catchall = kmalloc(sizeof(*catchall), GFP_KERNEL);
+ catchall = kmalloc(sizeof(*catchall), GFP_KERNEL_ACCOUNT);
if (!catchall)
return -ENOMEM;
@@ -9207,7 +9207,7 @@ static void nf_tables_flowtable_destroy(struct nft_flowtable *flowtable)
flowtable->data.type->setup(&flowtable->data, hook->ops.dev,
FLOW_BLOCK_UNBIND);
list_del_rcu(&hook->list);
- kfree(hook);
+ kfree_rcu(hook, rcu);
}
kfree(flowtable->name);
module_put(flowtable->data.type->owner);
diff --git a/net/netfilter/nft_compat.c b/net/netfilter/nft_compat.c
index 52cdfee17f73..7ca4f0d21fe2 100644
--- a/net/netfilter/nft_compat.c
+++ b/net/netfilter/nft_compat.c
@@ -535,7 +535,7 @@ nft_match_large_init(const struct nft_ctx *ctx, const struct nft_expr *expr,
struct xt_match *m = expr->ops->data;
int ret;
- priv->info = kmalloc(XT_ALIGN(m->matchsize), GFP_KERNEL);
+ priv->info = kmalloc(XT_ALIGN(m->matchsize), GFP_KERNEL_ACCOUNT);
if (!priv->info)
return -ENOMEM;
@@ -808,7 +808,7 @@ nft_match_select_ops(const struct nft_ctx *ctx,
goto err;
}
- ops = kzalloc(sizeof(struct nft_expr_ops), GFP_KERNEL);
+ ops = kzalloc(sizeof(struct nft_expr_ops), GFP_KERNEL_ACCOUNT);
if (!ops) {
err = -ENOMEM;
goto err;
@@ -898,7 +898,7 @@ nft_target_select_ops(const struct nft_ctx *ctx,
goto err;
}
- ops = kzalloc(sizeof(struct nft_expr_ops), GFP_KERNEL);
+ ops = kzalloc(sizeof(struct nft_expr_ops), GFP_KERNEL_ACCOUNT);
if (!ops) {
err = -ENOMEM;
goto err;
diff --git a/net/netfilter/nft_log.c b/net/netfilter/nft_log.c
index 5defe6e4fd98..e35588137995 100644
--- a/net/netfilter/nft_log.c
+++ b/net/netfilter/nft_log.c
@@ -163,7 +163,7 @@ static int nft_log_init(const struct nft_ctx *ctx,
nla = tb[NFTA_LOG_PREFIX];
if (nla != NULL) {
- priv->prefix = kmalloc(nla_len(nla) + 1, GFP_KERNEL);
+ priv->prefix = kmalloc(nla_len(nla) + 1, GFP_KERNEL_ACCOUNT);
if (priv->prefix == NULL)
return -ENOMEM;
nla_strscpy(priv->prefix, nla, nla_len(nla) + 1);
diff --git a/net/netfilter/nft_meta.c b/net/netfilter/nft_meta.c
index 8c8eb14d647b..05cd1e6e6a2f 100644
--- a/net/netfilter/nft_meta.c
+++ b/net/netfilter/nft_meta.c
@@ -952,7 +952,7 @@ static int nft_secmark_obj_init(const struct nft_ctx *ctx,
if (tb[NFTA_SECMARK_CTX] == NULL)
return -EINVAL;
- priv->ctx = nla_strdup(tb[NFTA_SECMARK_CTX], GFP_KERNEL);
+ priv->ctx = nla_strdup(tb[NFTA_SECMARK_CTX], GFP_KERNEL_ACCOUNT);
if (!priv->ctx)
return -ENOMEM;
diff --git a/net/netfilter/nft_numgen.c b/net/netfilter/nft_numgen.c
index 7d29db7c2ac0..bd058babfc82 100644
--- a/net/netfilter/nft_numgen.c
+++ b/net/netfilter/nft_numgen.c
@@ -66,7 +66,7 @@ static int nft_ng_inc_init(const struct nft_ctx *ctx,
if (priv->offset + priv->modulus - 1 < priv->offset)
return -EOVERFLOW;
- priv->counter = kmalloc(sizeof(*priv->counter), GFP_KERNEL);
+ priv->counter = kmalloc(sizeof(*priv->counter), GFP_KERNEL_ACCOUNT);
if (!priv->counter)
return -ENOMEM;
diff --git a/net/netfilter/nft_set_pipapo.c b/net/netfilter/nft_set_pipapo.c
index eb4c4a4ac7ac..7be342b495f5 100644
--- a/net/netfilter/nft_set_pipapo.c
+++ b/net/netfilter/nft_set_pipapo.c
@@ -663,7 +663,7 @@ static int pipapo_realloc_mt(struct nft_pipapo_field *f,
check_add_overflow(rules, extra, &rules_alloc))
return -EOVERFLOW;
- new_mt = kvmalloc_array(rules_alloc, sizeof(*new_mt), GFP_KERNEL);
+ new_mt = kvmalloc_array(rules_alloc, sizeof(*new_mt), GFP_KERNEL_ACCOUNT);
if (!new_mt)
return -ENOMEM;
@@ -936,7 +936,7 @@ static void pipapo_lt_bits_adjust(struct nft_pipapo_field *f)
return;
}
- new_lt = kvzalloc(lt_size + NFT_PIPAPO_ALIGN_HEADROOM, GFP_KERNEL);
+ new_lt = kvzalloc(lt_size + NFT_PIPAPO_ALIGN_HEADROOM, GFP_KERNEL_ACCOUNT);
if (!new_lt)
return;
@@ -1212,7 +1212,7 @@ static int pipapo_realloc_scratch(struct nft_pipapo_match *clone,
scratch = kzalloc_node(struct_size(scratch, map,
bsize_max * 2) +
NFT_PIPAPO_ALIGN_HEADROOM,
- GFP_KERNEL, cpu_to_node(i));
+ GFP_KERNEL_ACCOUNT, cpu_to_node(i));
if (!scratch) {
/* On failure, there's no need to undo previous
* allocations: this means that some scratch maps have
@@ -1427,7 +1427,7 @@ static struct nft_pipapo_match *pipapo_clone(struct nft_pipapo_match *old)
struct nft_pipapo_match *new;
int i;
- new = kmalloc(struct_size(new, f, old->field_count), GFP_KERNEL);
+ new = kmalloc(struct_size(new, f, old->field_count), GFP_KERNEL_ACCOUNT);
if (!new)
return NULL;
@@ -1457,7 +1457,7 @@ static struct nft_pipapo_match *pipapo_clone(struct nft_pipapo_match *old)
new_lt = kvzalloc(src->groups * NFT_PIPAPO_BUCKETS(src->bb) *
src->bsize * sizeof(*dst->lt) +
NFT_PIPAPO_ALIGN_HEADROOM,
- GFP_KERNEL);
+ GFP_KERNEL_ACCOUNT);
if (!new_lt)
goto out_lt;
@@ -1470,7 +1470,8 @@ static struct nft_pipapo_match *pipapo_clone(struct nft_pipapo_match *old)
if (src->rules > 0) {
dst->mt = kvmalloc_array(src->rules_alloc,
- sizeof(*src->mt), GFP_KERNEL);
+ sizeof(*src->mt),
+ GFP_KERNEL_ACCOUNT);
if (!dst->mt)
goto out_mt;
diff --git a/net/netfilter/nft_tunnel.c b/net/netfilter/nft_tunnel.c
index 60a76e6e348e..5c6ed68cc6e0 100644
--- a/net/netfilter/nft_tunnel.c
+++ b/net/netfilter/nft_tunnel.c
@@ -509,13 +509,14 @@ static int nft_tunnel_obj_init(const struct nft_ctx *ctx,
return err;
}
- md = metadata_dst_alloc(priv->opts.len, METADATA_IP_TUNNEL, GFP_KERNEL);
+ md = metadata_dst_alloc(priv->opts.len, METADATA_IP_TUNNEL,
+ GFP_KERNEL_ACCOUNT);
if (!md)
return -ENOMEM;
memcpy(&md->u.tun_info, &info, sizeof(info));
#ifdef CONFIG_DST_CACHE
- err = dst_cache_init(&md->u.tun_info.dst_cache, GFP_KERNEL);
+ err = dst_cache_init(&md->u.tun_info.dst_cache, GFP_KERNEL_ACCOUNT);
if (err < 0) {
metadata_dst_free(md);
return err;
diff --git a/net/qrtr/af_qrtr.c b/net/qrtr/af_qrtr.c
index 41ece61eb57a..00c51cf693f3 100644
--- a/net/qrtr/af_qrtr.c
+++ b/net/qrtr/af_qrtr.c
@@ -884,7 +884,7 @@ static int qrtr_bcast_enqueue(struct qrtr_node *node, struct sk_buff *skb,
mutex_lock(&qrtr_node_lock);
list_for_each_entry(node, &qrtr_all_nodes, item) {
- skbn = skb_clone(skb, GFP_KERNEL);
+ skbn = pskb_copy(skb, GFP_KERNEL);
if (!skbn)
break;
skb_set_owner_w(skbn, skb->sk);
diff --git a/net/rfkill/core.c b/net/rfkill/core.c
index 13a5126bc36e..7d3e82e4c2fc 100644
--- a/net/rfkill/core.c
+++ b/net/rfkill/core.c
@@ -1394,7 +1394,6 @@ static const struct file_operations rfkill_fops = {
.release = rfkill_fop_release,
.unlocked_ioctl = rfkill_fop_ioctl,
.compat_ioctl = compat_ptr_ioctl,
- .llseek = no_llseek,
};
#define RFKILL_NAME "rfkill"
diff --git a/net/socket.c b/net/socket.c
index 8d8b84fa404a..601ad74930ef 100644
--- a/net/socket.c
+++ b/net/socket.c
@@ -153,7 +153,6 @@ static void sock_show_fdinfo(struct seq_file *m, struct file *f)
static const struct file_operations socket_file_ops = {
.owner = THIS_MODULE,
- .llseek = no_llseek,
.read_iter = sock_read_iter,
.write_iter = sock_write_iter,
.poll = sock_poll,
@@ -556,10 +555,10 @@ static struct socket *sockfd_lookup_light(int fd, int *err, int *fput_needed)
struct socket *sock;
*err = -EBADF;
- if (f.file) {
- sock = sock_from_file(f.file);
+ if (fd_file(f)) {
+ sock = sock_from_file(fd_file(f));
if (likely(sock)) {
- *fput_needed = f.flags & FDPUT_FPUT;
+ *fput_needed = f.word & FDPUT_FPUT;
return sock;
}
*err = -ENOTSOCK;
@@ -2014,8 +2013,8 @@ int __sys_accept4(int fd, struct sockaddr __user *upeer_sockaddr,
struct fd f;
f = fdget(fd);
- if (f.file) {
- ret = __sys_accept4_file(f.file, upeer_sockaddr,
+ if (fd_file(f)) {
+ ret = __sys_accept4_file(fd_file(f), upeer_sockaddr,
upeer_addrlen, flags);
fdput(f);
}
@@ -2076,12 +2075,12 @@ int __sys_connect(int fd, struct sockaddr __user *uservaddr, int addrlen)
struct fd f;
f = fdget(fd);
- if (f.file) {
+ if (fd_file(f)) {
struct sockaddr_storage address;
ret = move_addr_to_kernel(uservaddr, addrlen, &address);
if (!ret)
- ret = __sys_connect_file(f.file, &address, addrlen, 0);
+ ret = __sys_connect_file(fd_file(f), &address, addrlen, 0);
fdput(f);
}
diff --git a/net/sunrpc/cache.c b/net/sunrpc/cache.c
index 95ff74706104..1bd3e531b0e0 100644
--- a/net/sunrpc/cache.c
+++ b/net/sunrpc/cache.c
@@ -731,11 +731,10 @@ static bool cache_defer_req(struct cache_req *req, struct cache_head *item)
static void cache_revisit_request(struct cache_head *item)
{
struct cache_deferred_req *dreq;
- struct list_head pending;
struct hlist_node *tmp;
int hash = DFR_HASH(item);
+ LIST_HEAD(pending);
- INIT_LIST_HEAD(&pending);
spin_lock(&cache_defer_lock);
hlist_for_each_entry_safe(dreq, tmp, &cache_defer_hash[hash], hash)
@@ -756,10 +755,8 @@ static void cache_revisit_request(struct cache_head *item)
void cache_clean_deferred(void *owner)
{
struct cache_deferred_req *dreq, *tmp;
- struct list_head pending;
+ LIST_HEAD(pending);
-
- INIT_LIST_HEAD(&pending);
spin_lock(&cache_defer_lock);
list_for_each_entry_safe(dreq, tmp, &cache_defer_list, recent) {
@@ -1085,9 +1082,8 @@ static void cache_dequeue(struct cache_detail *detail, struct cache_head *ch)
{
struct cache_queue *cq, *tmp;
struct cache_request *cr;
- struct list_head dequeued;
+ LIST_HEAD(dequeued);
- INIT_LIST_HEAD(&dequeued);
spin_lock(&queue_lock);
list_for_each_entry_safe(cq, tmp, &detail->queue, list)
if (!cq->reader) {
@@ -1596,7 +1592,6 @@ static int cache_release_procfs(struct inode *inode, struct file *filp)
}
static const struct proc_ops cache_channel_proc_ops = {
- .proc_lseek = no_llseek,
.proc_read = cache_read_procfs,
.proc_write = cache_write_procfs,
.proc_poll = cache_poll_procfs,
@@ -1662,7 +1657,6 @@ static const struct proc_ops cache_flush_proc_ops = {
.proc_read = read_flush_procfs,
.proc_write = write_flush_procfs,
.proc_release = release_flush_procfs,
- .proc_lseek = no_llseek,
};
static void remove_cache_proc_entries(struct cache_detail *cd)
@@ -1815,7 +1809,6 @@ static int cache_release_pipefs(struct inode *inode, struct file *filp)
const struct file_operations cache_file_operations_pipefs = {
.owner = THIS_MODULE,
- .llseek = no_llseek,
.read = cache_read_pipefs,
.write = cache_write_pipefs,
.poll = cache_poll_pipefs,
@@ -1881,7 +1874,6 @@ const struct file_operations cache_flush_operations_pipefs = {
.read = read_flush_pipefs,
.write = write_flush_pipefs,
.release = release_flush_pipefs,
- .llseek = no_llseek,
};
int sunrpc_cache_register_pipefs(struct dentry *parent,
diff --git a/net/sunrpc/clnt.c b/net/sunrpc/clnt.c
index 09f29a95f2bc..0090162ee8c3 100644
--- a/net/sunrpc/clnt.c
+++ b/net/sunrpc/clnt.c
@@ -48,13 +48,8 @@
# define RPCDBG_FACILITY RPCDBG_CALL
#endif
-/*
- * All RPC clients are linked into this list
- */
-
static DECLARE_WAIT_QUEUE_HEAD(destroy_wait);
-
static void call_start(struct rpc_task *task);
static void call_reserve(struct rpc_task *task);
static void call_reserveresult(struct rpc_task *task);
@@ -546,7 +541,7 @@ struct rpc_clnt *rpc_create(struct rpc_create_args *args)
.connect_timeout = args->connect_timeout,
.reconnect_timeout = args->reconnect_timeout,
};
- char servername[48];
+ char servername[RPC_MAXNETNAMELEN];
struct rpc_clnt *clnt;
int i;
@@ -1893,12 +1888,6 @@ call_allocate(struct rpc_task *task)
if (req->rq_buffer)
return;
- if (proc->p_proc != 0) {
- BUG_ON(proc->p_arglen == 0);
- if (proc->p_decode != NULL)
- BUG_ON(proc->p_replen == 0);
- }
-
/*
* Calculate the size (in quads) of the RPC call
* and reply headers, and convert both values
diff --git a/net/sunrpc/rpc_pipe.c b/net/sunrpc/rpc_pipe.c
index 910a5d850d04..7ce3721c06ca 100644
--- a/net/sunrpc/rpc_pipe.c
+++ b/net/sunrpc/rpc_pipe.c
@@ -385,7 +385,6 @@ rpc_pipe_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
static const struct file_operations rpc_pipe_fops = {
.owner = THIS_MODULE,
- .llseek = no_llseek,
.read = rpc_pipe_read,
.write = rpc_pipe_write,
.poll = rpc_pipe_poll,
diff --git a/net/sunrpc/sunrpc.h b/net/sunrpc/sunrpc.h
index d4a362c9e4b3..e3c6e3b63f0b 100644
--- a/net/sunrpc/sunrpc.h
+++ b/net/sunrpc/sunrpc.h
@@ -36,7 +36,11 @@ static inline int sock_is_loopback(struct sock *sk)
return loopback;
}
+struct svc_serv;
+struct svc_rqst;
int rpc_clients_notifier_register(void);
void rpc_clients_notifier_unregister(void);
void auth_domain_cleanup(void);
+void svc_sock_update_bufs(struct svc_serv *serv);
+enum svc_auth_status svc_authenticate(struct svc_rqst *rqstp);
#endif /* _NET_SUNRPC_SUNRPC_H */
diff --git a/net/sunrpc/svc.c b/net/sunrpc/svc.c
index 88a59cfa5583..7e7f4e0390c7 100644
--- a/net/sunrpc/svc.c
+++ b/net/sunrpc/svc.c
@@ -32,6 +32,7 @@
#include <trace/events/sunrpc.h>
#include "fail.h"
+#include "sunrpc.h"
#define RPCDBG_FACILITY RPCDBG_SVCDSP
@@ -417,7 +418,7 @@ struct svc_pool *svc_pool_for_cpu(struct svc_serv *serv)
return &serv->sv_pools[pidx % serv->sv_nrpools];
}
-int svc_rpcb_setup(struct svc_serv *serv, struct net *net)
+static int svc_rpcb_setup(struct svc_serv *serv, struct net *net)
{
int err;
@@ -429,7 +430,6 @@ int svc_rpcb_setup(struct svc_serv *serv, struct net *net)
svc_unregister(serv, net);
return 0;
}
-EXPORT_SYMBOL_GPL(svc_rpcb_setup);
void svc_rpcb_cleanup(struct svc_serv *serv, struct net *net)
{
@@ -440,10 +440,11 @@ EXPORT_SYMBOL_GPL(svc_rpcb_cleanup);
static int svc_uses_rpcbind(struct svc_serv *serv)
{
- struct svc_program *progp;
- unsigned int i;
+ unsigned int p, i;
+
+ for (p = 0; p < serv->sv_nprogs; p++) {
+ struct svc_program *progp = &serv->sv_programs[p];
- for (progp = serv->sv_program; progp; progp = progp->pg_next) {
for (i = 0; i < progp->pg_nvers; i++) {
if (progp->pg_vers[i] == NULL)
continue;
@@ -480,7 +481,7 @@ __svc_init_bc(struct svc_serv *serv)
* Create an RPC service
*/
static struct svc_serv *
-__svc_create(struct svc_program *prog, struct svc_stat *stats,
+__svc_create(struct svc_program *prog, int nprogs, struct svc_stat *stats,
unsigned int bufsize, int npools, int (*threadfn)(void *data))
{
struct svc_serv *serv;
@@ -491,7 +492,8 @@ __svc_create(struct svc_program *prog, struct svc_stat *stats,
if (!(serv = kzalloc(sizeof(*serv), GFP_KERNEL)))
return NULL;
serv->sv_name = prog->pg_name;
- serv->sv_program = prog;
+ serv->sv_programs = prog;
+ serv->sv_nprogs = nprogs;
serv->sv_stats = stats;
if (bufsize > RPCSVC_MAXPAYLOAD)
bufsize = RPCSVC_MAXPAYLOAD;
@@ -499,17 +501,18 @@ __svc_create(struct svc_program *prog, struct svc_stat *stats,
serv->sv_max_mesg = roundup(serv->sv_max_payload + PAGE_SIZE, PAGE_SIZE);
serv->sv_threadfn = threadfn;
xdrsize = 0;
- while (prog) {
- prog->pg_lovers = prog->pg_nvers-1;
- for (vers=0; vers<prog->pg_nvers ; vers++)
- if (prog->pg_vers[vers]) {
- prog->pg_hivers = vers;
- if (prog->pg_lovers > vers)
- prog->pg_lovers = vers;
- if (prog->pg_vers[vers]->vs_xdrsize > xdrsize)
- xdrsize = prog->pg_vers[vers]->vs_xdrsize;
+ for (i = 0; i < nprogs; i++) {
+ struct svc_program *progp = &prog[i];
+
+ progp->pg_lovers = progp->pg_nvers-1;
+ for (vers = 0; vers < progp->pg_nvers ; vers++)
+ if (progp->pg_vers[vers]) {
+ progp->pg_hivers = vers;
+ if (progp->pg_lovers > vers)
+ progp->pg_lovers = vers;
+ if (progp->pg_vers[vers]->vs_xdrsize > xdrsize)
+ xdrsize = progp->pg_vers[vers]->vs_xdrsize;
}
- prog = prog->pg_next;
}
serv->sv_xdrsize = xdrsize;
INIT_LIST_HEAD(&serv->sv_tempsocks);
@@ -558,13 +561,14 @@ __svc_create(struct svc_program *prog, struct svc_stat *stats,
struct svc_serv *svc_create(struct svc_program *prog, unsigned int bufsize,
int (*threadfn)(void *data))
{
- return __svc_create(prog, NULL, bufsize, 1, threadfn);
+ return __svc_create(prog, 1, NULL, bufsize, 1, threadfn);
}
EXPORT_SYMBOL_GPL(svc_create);
/**
* svc_create_pooled - Create an RPC service with pooled threads
- * @prog: the RPC program the new service will handle
+ * @prog: Array of RPC programs the new service will handle
+ * @nprogs: Number of programs in the array
* @stats: the stats struct if desired
* @bufsize: maximum message size for @prog
* @threadfn: a function to service RPC requests for @prog
@@ -572,6 +576,7 @@ EXPORT_SYMBOL_GPL(svc_create);
* Returns an instantiated struct svc_serv object or NULL.
*/
struct svc_serv *svc_create_pooled(struct svc_program *prog,
+ unsigned int nprogs,
struct svc_stat *stats,
unsigned int bufsize,
int (*threadfn)(void *data))
@@ -579,7 +584,7 @@ struct svc_serv *svc_create_pooled(struct svc_program *prog,
struct svc_serv *serv;
unsigned int npools = svc_pool_map_get();
- serv = __svc_create(prog, stats, bufsize, npools, threadfn);
+ serv = __svc_create(prog, nprogs, stats, bufsize, npools, threadfn);
if (!serv)
goto out_err;
serv->sv_is_pooled = true;
@@ -602,16 +607,16 @@ svc_destroy(struct svc_serv **servp)
*servp = NULL;
- dprintk("svc: svc_destroy(%s)\n", serv->sv_program->pg_name);
+ dprintk("svc: svc_destroy(%s)\n", serv->sv_programs->pg_name);
timer_shutdown_sync(&serv->sv_temptimer);
/*
* Remaining transports at this point are not expected.
*/
WARN_ONCE(!list_empty(&serv->sv_permsocks),
- "SVC: permsocks remain for %s\n", serv->sv_program->pg_name);
+ "SVC: permsocks remain for %s\n", serv->sv_programs->pg_name);
WARN_ONCE(!list_empty(&serv->sv_tempsocks),
- "SVC: tempsocks remain for %s\n", serv->sv_program->pg_name);
+ "SVC: tempsocks remain for %s\n", serv->sv_programs->pg_name);
cache_clean_deferred(serv);
@@ -664,8 +669,21 @@ svc_release_buffer(struct svc_rqst *rqstp)
put_page(rqstp->rq_pages[i]);
}
-struct svc_rqst *
-svc_rqst_alloc(struct svc_serv *serv, struct svc_pool *pool, int node)
+static void
+svc_rqst_free(struct svc_rqst *rqstp)
+{
+ folio_batch_release(&rqstp->rq_fbatch);
+ svc_release_buffer(rqstp);
+ if (rqstp->rq_scratch_page)
+ put_page(rqstp->rq_scratch_page);
+ kfree(rqstp->rq_resp);
+ kfree(rqstp->rq_argp);
+ kfree(rqstp->rq_auth_data);
+ kfree_rcu(rqstp, rq_rcu_head);
+}
+
+static struct svc_rqst *
+svc_prepare_thread(struct svc_serv *serv, struct svc_pool *pool, int node)
{
struct svc_rqst *rqstp;
@@ -693,27 +711,10 @@ svc_rqst_alloc(struct svc_serv *serv, struct svc_pool *pool, int node)
if (!svc_init_buffer(rqstp, serv->sv_max_mesg, node))
goto out_enomem;
- return rqstp;
-out_enomem:
- svc_rqst_free(rqstp);
- return NULL;
-}
-EXPORT_SYMBOL_GPL(svc_rqst_alloc);
-
-static struct svc_rqst *
-svc_prepare_thread(struct svc_serv *serv, struct svc_pool *pool, int node)
-{
- struct svc_rqst *rqstp;
+ rqstp->rq_err = -EAGAIN; /* No error yet */
- rqstp = svc_rqst_alloc(serv, pool, node);
- if (!rqstp)
- return ERR_PTR(-ENOMEM);
-
- spin_lock_bh(&serv->sv_lock);
serv->sv_nrthreads += 1;
- spin_unlock_bh(&serv->sv_lock);
-
- atomic_inc(&pool->sp_nrthreads);
+ pool->sp_nrthreads += 1;
/* Protected by whatever lock the service uses when calling
* svc_set_num_threads()
@@ -721,6 +722,10 @@ svc_prepare_thread(struct svc_serv *serv, struct svc_pool *pool, int node)
list_add_rcu(&rqstp->rq_all, &pool->sp_all_threads);
return rqstp;
+
+out_enomem:
+ svc_rqst_free(rqstp);
+ return NULL;
}
/**
@@ -768,31 +773,22 @@ svc_pool_victim(struct svc_serv *serv, struct svc_pool *target_pool,
struct svc_pool *pool;
unsigned int i;
-retry:
pool = target_pool;
- if (pool != NULL) {
- if (atomic_inc_not_zero(&pool->sp_nrthreads))
- goto found_pool;
- return NULL;
- } else {
+ if (!pool) {
for (i = 0; i < serv->sv_nrpools; i++) {
pool = &serv->sv_pools[--(*state) % serv->sv_nrpools];
- if (atomic_inc_not_zero(&pool->sp_nrthreads))
- goto found_pool;
+ if (pool->sp_nrthreads)
+ break;
}
- return NULL;
}
-found_pool:
- set_bit(SP_VICTIM_REMAINS, &pool->sp_flags);
- set_bit(SP_NEED_VICTIM, &pool->sp_flags);
- if (!atomic_dec_and_test(&pool->sp_nrthreads))
+ if (pool && pool->sp_nrthreads) {
+ set_bit(SP_VICTIM_REMAINS, &pool->sp_flags);
+ set_bit(SP_NEED_VICTIM, &pool->sp_flags);
return pool;
- /* Nothing left in this pool any more */
- clear_bit(SP_NEED_VICTIM, &pool->sp_flags);
- clear_bit(SP_VICTIM_REMAINS, &pool->sp_flags);
- goto retry;
+ }
+ return NULL;
}
static int
@@ -803,6 +799,7 @@ svc_start_kthreads(struct svc_serv *serv, struct svc_pool *pool, int nrservs)
struct svc_pool *chosen_pool;
unsigned int state = serv->sv_nrthreads-1;
int node;
+ int err;
do {
nrservs--;
@@ -810,8 +807,8 @@ svc_start_kthreads(struct svc_serv *serv, struct svc_pool *pool, int nrservs)
node = svc_pool_map_get_node(chosen_pool->sp_id);
rqstp = svc_prepare_thread(serv, chosen_pool, node);
- if (IS_ERR(rqstp))
- return PTR_ERR(rqstp);
+ if (!rqstp)
+ return -ENOMEM;
task = kthread_create_on_node(serv->sv_threadfn, rqstp,
node, "%s", serv->sv_name);
if (IS_ERR(task)) {
@@ -825,6 +822,13 @@ svc_start_kthreads(struct svc_serv *serv, struct svc_pool *pool, int nrservs)
svc_sock_update_bufs(serv);
wake_up_process(task);
+
+ wait_var_event(&rqstp->rq_err, rqstp->rq_err != -EAGAIN);
+ err = rqstp->rq_err;
+ if (err) {
+ svc_exit_thread(rqstp);
+ return err;
+ }
} while (nrservs > 0);
return 0;
@@ -871,7 +875,7 @@ svc_set_num_threads(struct svc_serv *serv, struct svc_pool *pool, int nrservs)
if (!pool)
nrservs -= serv->sv_nrthreads;
else
- nrservs -= atomic_read(&pool->sp_nrthreads);
+ nrservs -= pool->sp_nrthreads;
if (nrservs > 0)
return svc_start_kthreads(serv, pool, nrservs);
@@ -933,25 +937,21 @@ void svc_rqst_release_pages(struct svc_rqst *rqstp)
}
}
-/*
- * Called from a server thread as it's exiting. Caller must hold the "service
- * mutex" for the service.
+/**
+ * svc_exit_thread - finalise the termination of a sunrpc server thread
+ * @rqstp: the svc_rqst which represents the thread.
+ *
+ * When a thread started with svc_new_thread() exits it must call
+ * svc_exit_thread() as its last act. This must be done with the
+ * service mutex held. Normally this is held by a DIFFERENT thread, the
+ * one that is calling svc_set_num_threads() and which will wait for
+ * SP_VICTIM_REMAINS to be cleared before dropping the mutex. If the
+ * thread exits for any reason other than svc_thread_should_stop()
+ * returning %true (which indicated that svc_set_num_threads() is
+ * waiting for it to exit), then it must take the service mutex itself,
+ * which can only safely be done using mutex_try_lock().
*/
void
-svc_rqst_free(struct svc_rqst *rqstp)
-{
- folio_batch_release(&rqstp->rq_fbatch);
- svc_release_buffer(rqstp);
- if (rqstp->rq_scratch_page)
- put_page(rqstp->rq_scratch_page);
- kfree(rqstp->rq_resp);
- kfree(rqstp->rq_argp);
- kfree(rqstp->rq_auth_data);
- kfree_rcu(rqstp, rq_rcu_head);
-}
-EXPORT_SYMBOL_GPL(svc_rqst_free);
-
-void
svc_exit_thread(struct svc_rqst *rqstp)
{
struct svc_serv *serv = rqstp->rq_server;
@@ -959,11 +959,8 @@ svc_exit_thread(struct svc_rqst *rqstp)
list_del_rcu(&rqstp->rq_all);
- atomic_dec(&pool->sp_nrthreads);
-
- spin_lock_bh(&serv->sv_lock);
+ pool->sp_nrthreads -= 1;
serv->sv_nrthreads -= 1;
- spin_unlock_bh(&serv->sv_lock);
svc_sock_update_bufs(serv);
svc_rqst_free(rqstp);
@@ -1098,6 +1095,7 @@ static int __svc_register(struct net *net, const char *progname,
return error;
}
+static
int svc_rpcbind_set_version(struct net *net,
const struct svc_program *progp,
u32 version, int family,
@@ -1108,7 +1106,6 @@ int svc_rpcbind_set_version(struct net *net,
version, family, proto, port);
}
-EXPORT_SYMBOL_GPL(svc_rpcbind_set_version);
int svc_generic_rpcbind_set(struct net *net,
const struct svc_program *progp,
@@ -1156,15 +1153,16 @@ int svc_register(const struct svc_serv *serv, struct net *net,
const int family, const unsigned short proto,
const unsigned short port)
{
- struct svc_program *progp;
- unsigned int i;
+ unsigned int p, i;
int error = 0;
WARN_ON_ONCE(proto == 0 && port == 0);
if (proto == 0 && port == 0)
return -EINVAL;
- for (progp = serv->sv_program; progp; progp = progp->pg_next) {
+ for (p = 0; p < serv->sv_nprogs; p++) {
+ struct svc_program *progp = &serv->sv_programs[p];
+
for (i = 0; i < progp->pg_nvers; i++) {
error = progp->pg_rpcbind_set(net, progp, i,
@@ -1216,13 +1214,14 @@ static void __svc_unregister(struct net *net, const u32 program, const u32 versi
static void svc_unregister(const struct svc_serv *serv, struct net *net)
{
struct sighand_struct *sighand;
- struct svc_program *progp;
unsigned long flags;
- unsigned int i;
+ unsigned int p, i;
clear_thread_flag(TIF_SIGPENDING);
- for (progp = serv->sv_program; progp; progp = progp->pg_next) {
+ for (p = 0; p < serv->sv_nprogs; p++) {
+ struct svc_program *progp = &serv->sv_programs[p];
+
for (i = 0; i < progp->pg_nvers; i++) {
if (progp->pg_vers[i] == NULL)
continue;
@@ -1328,7 +1327,7 @@ svc_process_common(struct svc_rqst *rqstp)
struct svc_process_info process;
enum svc_auth_status auth_res;
unsigned int aoffset;
- int rc;
+ int pr, rc;
__be32 *p;
/* Will be turned off only when NFSv4 Sessions are used */
@@ -1352,9 +1351,12 @@ svc_process_common(struct svc_rqst *rqstp)
rqstp->rq_vers = be32_to_cpup(p++);
rqstp->rq_proc = be32_to_cpup(p);
- for (progp = serv->sv_program; progp; progp = progp->pg_next)
+ for (pr = 0; pr < serv->sv_nprogs; pr++) {
+ progp = &serv->sv_programs[pr];
+
if (rqstp->rq_prog == progp->pg_prog)
break;
+ }
/*
* Decode auth data, and add verifier to reply buffer.
@@ -1526,6 +1528,14 @@ err_system_err:
goto sendit;
}
+/*
+ * Drop request
+ */
+static void svc_drop(struct svc_rqst *rqstp)
+{
+ trace_svc_drop(rqstp);
+}
+
/**
* svc_process - Execute one RPC transaction
* @rqstp: RPC transaction context
diff --git a/net/sunrpc/svc_xprt.c b/net/sunrpc/svc_xprt.c
index d3735ab3e6d1..43c57124de52 100644
--- a/net/sunrpc/svc_xprt.c
+++ b/net/sunrpc/svc_xprt.c
@@ -268,7 +268,7 @@ static int _svc_xprt_create(struct svc_serv *serv, const char *xprt_name,
spin_unlock(&svc_xprt_class_lock);
newxprt = xcl->xcl_ops->xpo_create(serv, net, sap, len, flags);
if (IS_ERR(newxprt)) {
- trace_svc_xprt_create_err(serv->sv_program->pg_name,
+ trace_svc_xprt_create_err(serv->sv_programs->pg_name,
xcl->xcl_name, sap, len,
newxprt);
module_put(xcl->xcl_owner);
@@ -905,15 +905,6 @@ void svc_recv(struct svc_rqst *rqstp)
}
EXPORT_SYMBOL_GPL(svc_recv);
-/*
- * Drop request
- */
-void svc_drop(struct svc_rqst *rqstp)
-{
- trace_svc_drop(rqstp);
-}
-EXPORT_SYMBOL_GPL(svc_drop);
-
/**
* svc_send - Return reply to client
* @rqstp: RPC transaction context
diff --git a/net/sunrpc/svcauth.c b/net/sunrpc/svcauth.c
index 1619211f0960..55b4d2874188 100644
--- a/net/sunrpc/svcauth.c
+++ b/net/sunrpc/svcauth.c
@@ -18,6 +18,7 @@
#include <linux/sunrpc/svcauth.h>
#include <linux/err.h>
#include <linux/hash.h>
+#include <linux/user_namespace.h>
#include <trace/events/sunrpc.h>
@@ -98,7 +99,6 @@ enum svc_auth_status svc_authenticate(struct svc_rqst *rqstp)
rqstp->rq_authop = aops;
return aops->accept(rqstp);
}
-EXPORT_SYMBOL_GPL(svc_authenticate);
/**
* svc_set_client - Assign an appropriate 'auth_domain' as the client
@@ -176,6 +176,33 @@ rpc_authflavor_t svc_auth_flavor(struct svc_rqst *rqstp)
}
EXPORT_SYMBOL_GPL(svc_auth_flavor);
+/**
+ * svcauth_map_clnt_to_svc_cred_local - maps a generic cred
+ * to a svc_cred suitable for use in nfsd.
+ * @clnt: rpc_clnt associated with nfs client
+ * @cred: generic cred associated with nfs client
+ * @svc: returned svc_cred that is suitable for use in nfsd
+ */
+void svcauth_map_clnt_to_svc_cred_local(struct rpc_clnt *clnt,
+ const struct cred *cred,
+ struct svc_cred *svc)
+{
+ struct user_namespace *userns = clnt->cl_cred ?
+ clnt->cl_cred->user_ns : &init_user_ns;
+
+ memset(svc, 0, sizeof(struct svc_cred));
+
+ svc->cr_uid = KUIDT_INIT(from_kuid_munged(userns, cred->fsuid));
+ svc->cr_gid = KGIDT_INIT(from_kgid_munged(userns, cred->fsgid));
+ svc->cr_flavor = clnt->cl_auth->au_flavor;
+ if (cred->group_info)
+ svc->cr_group_info = get_group_info(cred->group_info);
+ /* These aren't relevant for local (network is bypassed) */
+ svc->cr_principal = NULL;
+ svc->cr_gss_mech = NULL;
+}
+EXPORT_SYMBOL_GPL(svcauth_map_clnt_to_svc_cred_local);
+
/**************************************************
* 'auth_domains' are stored in a hash table indexed by name.
* When the last reference to an 'auth_domain' is dropped,
diff --git a/net/sunrpc/svcauth_unix.c b/net/sunrpc/svcauth_unix.c
index 04b45588ae6f..8ca98b146ec8 100644
--- a/net/sunrpc/svcauth_unix.c
+++ b/net/sunrpc/svcauth_unix.c
@@ -697,7 +697,8 @@ svcauth_unix_set_client(struct svc_rqst *rqstp)
rqstp->rq_auth_stat = rpc_autherr_badcred;
ipm = ip_map_cached_get(xprt);
if (ipm == NULL)
- ipm = __ip_map_lookup(sn->ip_map_cache, rqstp->rq_server->sv_program->pg_class,
+ ipm = __ip_map_lookup(sn->ip_map_cache,
+ rqstp->rq_server->sv_programs->pg_class,
&sin6->sin6_addr);
if (ipm == NULL)
diff --git a/net/sunrpc/svcsock.c b/net/sunrpc/svcsock.c
index 6b3f01beb294..825ec5357691 100644
--- a/net/sunrpc/svcsock.c
+++ b/net/sunrpc/svcsock.c
@@ -1378,7 +1378,6 @@ void svc_sock_update_bufs(struct svc_serv *serv)
set_bit(XPT_CHNGBUF, &svsk->sk_xprt.xpt_flags);
spin_unlock_bh(&serv->sv_lock);
}
-EXPORT_SYMBOL_GPL(svc_sock_update_bufs);
/*
* Initialize socket for RPC use and create svc_sock struct
diff --git a/net/sunrpc/xprtrdma/svc_rdma_transport.c b/net/sunrpc/xprtrdma/svc_rdma_transport.c
index f15750cacacf..c3fbf0779d4a 100644
--- a/net/sunrpc/xprtrdma/svc_rdma_transport.c
+++ b/net/sunrpc/xprtrdma/svc_rdma_transport.c
@@ -339,7 +339,6 @@ static int svc_rdma_cma_handler(struct rdma_cm_id *cma_id,
svc_xprt_enqueue(xprt);
break;
case RDMA_CM_EVENT_DISCONNECTED:
- case RDMA_CM_EVENT_DEVICE_REMOVAL:
svc_xprt_deferred_close(xprt);
break;
default:
@@ -370,7 +369,7 @@ static struct svc_xprt *svc_rdma_create(struct svc_serv *serv,
listen_id = svc_rdma_create_listen_id(net, sa, cma_xprt);
if (IS_ERR(listen_id)) {
kfree(cma_xprt);
- return (struct svc_xprt *)listen_id;
+ return ERR_CAST(listen_id);
}
cma_xprt->sc_cm_id = listen_id;
@@ -384,6 +383,16 @@ static struct svc_xprt *svc_rdma_create(struct svc_serv *serv,
return &cma_xprt->sc_xprt;
}
+static void svc_rdma_xprt_done(struct rpcrdma_notification *rn)
+{
+ struct svcxprt_rdma *rdma = container_of(rn, struct svcxprt_rdma,
+ sc_rn);
+ struct rdma_cm_id *id = rdma->sc_cm_id;
+
+ trace_svcrdma_device_removal(id);
+ svc_xprt_close(&rdma->sc_xprt);
+}
+
/*
* This is the xpo_recvfrom function for listening endpoints. Its
* purpose is to accept incoming connections. The CMA callback handler
@@ -425,6 +434,9 @@ static struct svc_xprt *svc_rdma_accept(struct svc_xprt *xprt)
dev = newxprt->sc_cm_id->device;
newxprt->sc_port_num = newxprt->sc_cm_id->port_num;
+ if (rpcrdma_rn_register(dev, &newxprt->sc_rn, svc_rdma_xprt_done))
+ goto errout;
+
newxprt->sc_max_req_size = svcrdma_max_req_size;
newxprt->sc_max_requests = svcrdma_max_requests;
newxprt->sc_max_bc_requests = svcrdma_max_bc_requests;
@@ -580,6 +592,7 @@ static void __svc_rdma_free(struct work_struct *work)
{
struct svcxprt_rdma *rdma =
container_of(work, struct svcxprt_rdma, sc_work);
+ struct ib_device *device = rdma->sc_cm_id->device;
/* This blocks until the Completion Queues are empty */
if (rdma->sc_qp && !IS_ERR(rdma->sc_qp))
@@ -608,6 +621,7 @@ static void __svc_rdma_free(struct work_struct *work)
/* Destroy the CM ID */
rdma_destroy_id(rdma->sc_cm_id);
+ rpcrdma_rn_unregister(device, &rdma->sc_rn);
kfree(rdma);
}
diff --git a/net/vmw_vsock/virtio_transport.c b/net/vmw_vsock/virtio_transport.c
index e0160da4ef43..85e423921734 100644
--- a/net/vmw_vsock/virtio_transport.c
+++ b/net/vmw_vsock/virtio_transport.c
@@ -94,6 +94,63 @@ out_rcu:
return ret;
}
+/* Caller need to hold vsock->tx_lock on vq */
+static int virtio_transport_send_skb(struct sk_buff *skb, struct virtqueue *vq,
+ struct virtio_vsock *vsock)
+{
+ int ret, in_sg = 0, out_sg = 0;
+ struct scatterlist **sgs;
+
+ sgs = vsock->out_sgs;
+ sg_init_one(sgs[out_sg], virtio_vsock_hdr(skb),
+ sizeof(*virtio_vsock_hdr(skb)));
+ out_sg++;
+
+ if (!skb_is_nonlinear(skb)) {
+ if (skb->len > 0) {
+ sg_init_one(sgs[out_sg], skb->data, skb->len);
+ out_sg++;
+ }
+ } else {
+ struct skb_shared_info *si;
+ int i;
+
+ /* If skb is nonlinear, then its buffer must contain
+ * only header and nothing more. Data is stored in
+ * the fragged part.
+ */
+ WARN_ON_ONCE(skb_headroom(skb) != sizeof(*virtio_vsock_hdr(skb)));
+
+ si = skb_shinfo(skb);
+
+ for (i = 0; i < si->nr_frags; i++) {
+ skb_frag_t *skb_frag = &si->frags[i];
+ void *va;
+
+ /* We will use 'page_to_virt()' for the userspace page
+ * here, because virtio or dma-mapping layers will call
+ * 'virt_to_phys()' later to fill the buffer descriptor.
+ * We don't touch memory at "virtual" address of this page.
+ */
+ va = page_to_virt(skb_frag_page(skb_frag));
+ sg_init_one(sgs[out_sg],
+ va + skb_frag_off(skb_frag),
+ skb_frag_size(skb_frag));
+ out_sg++;
+ }
+ }
+
+ ret = virtqueue_add_sgs(vq, sgs, out_sg, in_sg, skb, GFP_KERNEL);
+ /* Usually this means that there is no more space available in
+ * the vq
+ */
+ if (ret < 0)
+ return ret;
+
+ virtio_transport_deliver_tap_pkt(skb);
+ return 0;
+}
+
static void
virtio_transport_send_pkt_work(struct work_struct *work)
{
@@ -111,66 +168,22 @@ virtio_transport_send_pkt_work(struct work_struct *work)
vq = vsock->vqs[VSOCK_VQ_TX];
for (;;) {
- int ret, in_sg = 0, out_sg = 0;
- struct scatterlist **sgs;
struct sk_buff *skb;
bool reply;
+ int ret;
skb = virtio_vsock_skb_dequeue(&vsock->send_pkt_queue);
if (!skb)
break;
reply = virtio_vsock_skb_reply(skb);
- sgs = vsock->out_sgs;
- sg_init_one(sgs[out_sg], virtio_vsock_hdr(skb),
- sizeof(*virtio_vsock_hdr(skb)));
- out_sg++;
-
- if (!skb_is_nonlinear(skb)) {
- if (skb->len > 0) {
- sg_init_one(sgs[out_sg], skb->data, skb->len);
- out_sg++;
- }
- } else {
- struct skb_shared_info *si;
- int i;
-
- /* If skb is nonlinear, then its buffer must contain
- * only header and nothing more. Data is stored in
- * the fragged part.
- */
- WARN_ON_ONCE(skb_headroom(skb) != sizeof(*virtio_vsock_hdr(skb)));
-
- si = skb_shinfo(skb);
-
- for (i = 0; i < si->nr_frags; i++) {
- skb_frag_t *skb_frag = &si->frags[i];
- void *va;
- /* We will use 'page_to_virt()' for the userspace page
- * here, because virtio or dma-mapping layers will call
- * 'virt_to_phys()' later to fill the buffer descriptor.
- * We don't touch memory at "virtual" address of this page.
- */
- va = page_to_virt(skb_frag_page(skb_frag));
- sg_init_one(sgs[out_sg],
- va + skb_frag_off(skb_frag),
- skb_frag_size(skb_frag));
- out_sg++;
- }
- }
-
- ret = virtqueue_add_sgs(vq, sgs, out_sg, in_sg, skb, GFP_KERNEL);
- /* Usually this means that there is no more space available in
- * the vq
- */
+ ret = virtio_transport_send_skb(skb, vq, vsock);
if (ret < 0) {
virtio_vsock_skb_queue_head(&vsock->send_pkt_queue, skb);
break;
}
- virtio_transport_deliver_tap_pkt(skb);
-
if (reply) {
struct virtqueue *rx_vq = vsock->vqs[VSOCK_VQ_RX];
int val;
@@ -195,6 +208,28 @@ out:
queue_work(virtio_vsock_workqueue, &vsock->rx_work);
}
+/* Caller need to hold RCU for vsock.
+ * Returns 0 if the packet is successfully put on the vq.
+ */
+static int virtio_transport_send_skb_fast_path(struct virtio_vsock *vsock, struct sk_buff *skb)
+{
+ struct virtqueue *vq = vsock->vqs[VSOCK_VQ_TX];
+ int ret;
+
+ /* Inside RCU, can't sleep! */
+ ret = mutex_trylock(&vsock->tx_lock);
+ if (unlikely(ret == 0))
+ return -EBUSY;
+
+ ret = virtio_transport_send_skb(skb, vq, vsock);
+ if (ret == 0)
+ virtqueue_kick(vq);
+
+ mutex_unlock(&vsock->tx_lock);
+
+ return ret;
+}
+
static int
virtio_transport_send_pkt(struct sk_buff *skb)
{
@@ -218,11 +253,20 @@ virtio_transport_send_pkt(struct sk_buff *skb)
goto out_rcu;
}
- if (virtio_vsock_skb_reply(skb))
- atomic_inc(&vsock->queued_replies);
+ /* If send_pkt_queue is empty, we can safely bypass this queue
+ * because packet order is maintained and (try) to put the packet
+ * on the virtqueue using virtio_transport_send_skb_fast_path.
+ * If this fails we simply put the packet on the intermediate
+ * queue and schedule the worker.
+ */
+ if (!skb_queue_empty_lockless(&vsock->send_pkt_queue) ||
+ virtio_transport_send_skb_fast_path(vsock, skb)) {
+ if (virtio_vsock_skb_reply(skb))
+ atomic_inc(&vsock->queued_replies);
- virtio_vsock_skb_queue_tail(&vsock->send_pkt_queue, skb);
- queue_work(virtio_vsock_workqueue, &vsock->send_pkt_work);
+ virtio_vsock_skb_queue_tail(&vsock->send_pkt_queue, skb);
+ queue_work(virtio_vsock_workqueue, &vsock->send_pkt_work);
+ }
out_rcu:
rcu_read_unlock();
diff --git a/net/xdp/xsk.c b/net/xdp/xsk.c
index 7e16336044b2..1140b2a120ca 100644
--- a/net/xdp/xsk.c
+++ b/net/xdp/xsk.c
@@ -1320,14 +1320,6 @@ struct xdp_umem_reg_v1 {
__u32 headroom;
};
-struct xdp_umem_reg_v2 {
- __u64 addr; /* Start of packet data area */
- __u64 len; /* Length of packet data area */
- __u32 chunk_size;
- __u32 headroom;
- __u32 flags;
-};
-
static int xsk_setsockopt(struct socket *sock, int level, int optname,
sockptr_t optval, unsigned int optlen)
{
@@ -1371,10 +1363,19 @@ static int xsk_setsockopt(struct socket *sock, int level, int optname,
if (optlen < sizeof(struct xdp_umem_reg_v1))
return -EINVAL;
- else if (optlen < sizeof(struct xdp_umem_reg_v2))
- mr_size = sizeof(struct xdp_umem_reg_v1);
else if (optlen < sizeof(mr))
- mr_size = sizeof(struct xdp_umem_reg_v2);
+ mr_size = sizeof(struct xdp_umem_reg_v1);
+
+ BUILD_BUG_ON(sizeof(struct xdp_umem_reg_v1) >= sizeof(struct xdp_umem_reg));
+
+ /* Make sure the last field of the struct doesn't have
+ * uninitialized padding. All padding has to be explicit
+ * and has to be set to zero by the userspace to make
+ * struct xdp_umem_reg extensible in the future.
+ */
+ BUILD_BUG_ON(offsetof(struct xdp_umem_reg, tx_metadata_len) +
+ sizeof_field(struct xdp_umem_reg, tx_metadata_len) !=
+ sizeof(struct xdp_umem_reg));
if (copy_from_sockptr(&mr, optval, mr_size))
return -EFAULT;
diff --git a/rust/Makefile b/rust/Makefile
index f168d2c98a15..b5e0a73b78f3 100644
--- a/rust/Makefile
+++ b/rust/Makefile
@@ -8,16 +8,16 @@ always-$(CONFIG_RUST) += exports_core_generated.h
# Missing prototypes are expected in the helpers since these are exported
# for Rust only, thus there is no header nor prototypes.
-obj-$(CONFIG_RUST) += helpers.o
-CFLAGS_REMOVE_helpers.o = -Wmissing-prototypes -Wmissing-declarations
+obj-$(CONFIG_RUST) += helpers/helpers.o
+CFLAGS_REMOVE_helpers/helpers.o = -Wmissing-prototypes -Wmissing-declarations
always-$(CONFIG_RUST) += libmacros.so
no-clean-files += libmacros.so
always-$(CONFIG_RUST) += bindings/bindings_generated.rs bindings/bindings_helpers_generated.rs
obj-$(CONFIG_RUST) += alloc.o bindings.o kernel.o
-always-$(CONFIG_RUST) += exports_alloc_generated.h exports_bindings_generated.h \
- exports_kernel_generated.h
+always-$(CONFIG_RUST) += exports_alloc_generated.h exports_helpers_generated.h \
+ exports_bindings_generated.h exports_kernel_generated.h
always-$(CONFIG_RUST) += uapi/uapi_generated.rs
obj-$(CONFIG_RUST) += uapi.o
@@ -63,6 +63,7 @@ quiet_cmd_rustdoc = RUSTDOC $(if $(rustdoc_host),H, ) $<
OBJTREE=$(abspath $(objtree)) \
$(RUSTDOC) $(if $(rustdoc_host),$(rust_common_flags),$(rust_flags)) \
$(rustc_target_flags) -L$(objtree)/$(obj) \
+ -Zunstable-options --generate-link-to-definition \
--output $(rustdoc_output) \
--crate-name $(subst rustdoc-,,$@) \
$(if $(rustdoc_host),,--sysroot=/dev/null) \
@@ -270,7 +271,7 @@ quiet_cmd_bindgen = BINDGEN $@
cmd_bindgen = \
$(BINDGEN) $< $(bindgen_target_flags) \
--use-core --with-derive-default --ctypes-prefix core::ffi --no-layout-tests \
- --no-debug '.*' \
+ --no-debug '.*' --enable-function-attribute-detection \
-o $@ -- $(bindgen_c_flags_final) -DMODULE \
$(bindgen_target_cflags) $(bindgen_target_extra)
@@ -299,13 +300,13 @@ $(obj)/bindings/bindings_helpers_generated.rs: private bindgen_target_cflags = \
-I$(objtree)/$(obj) -Wno-missing-prototypes -Wno-missing-declarations
$(obj)/bindings/bindings_helpers_generated.rs: private bindgen_target_extra = ; \
sed -Ei 's/pub fn rust_helper_([a-zA-Z0-9_]*)/#[link_name="rust_helper_\1"]\n pub fn \1/g' $@
-$(obj)/bindings/bindings_helpers_generated.rs: $(src)/helpers.c FORCE
+$(obj)/bindings/bindings_helpers_generated.rs: $(src)/helpers/helpers.c FORCE
$(call if_changed_dep,bindgen)
quiet_cmd_exports = EXPORTS $@
cmd_exports = \
$(NM) -p --defined-only $< \
- | awk '/ (T|R|D|B) / {printf "EXPORT_SYMBOL_RUST_GPL(%s);\n",$$3}' > $@
+ | awk '$$2~/(T|R|D|B)/ && $$3!~/__cfi/ {printf "EXPORT_SYMBOL_RUST_GPL(%s);\n",$$3}' > $@
$(obj)/exports_core_generated.h: $(obj)/core.o FORCE
$(call if_changed,exports)
@@ -313,6 +314,18 @@ $(obj)/exports_core_generated.h: $(obj)/core.o FORCE
$(obj)/exports_alloc_generated.h: $(obj)/alloc.o FORCE
$(call if_changed,exports)
+# Even though Rust kernel modules should never use the bindings directly,
+# symbols from the `bindings` crate and the C helpers need to be exported
+# because Rust generics and inlined functions may not get their code generated
+# in the crate where they are defined. Other helpers, called from non-inline
+# functions, may not be exported, in principle. However, in general, the Rust
+# compiler does not guarantee codegen will be performed for a non-inline
+# function either. Therefore, we export all symbols from helpers and bindings.
+# In the future, this may be revisited to reduce the number of exports after
+# the compiler is informed about the places codegen is required.
+$(obj)/exports_helpers_generated.h: $(obj)/helpers/helpers.o FORCE
+ $(call if_changed,exports)
+
$(obj)/exports_bindings_generated.h: $(obj)/bindings.o FORCE
$(call if_changed,exports)
@@ -329,9 +342,7 @@ quiet_cmd_rustc_procmacro = $(RUSTC_OR_CLIPPY_QUIET) P $@
--crate-name $(patsubst lib%.so,%,$(notdir $@)) $<
# Procedural macros can only be used with the `rustc` that compiled it.
-# Therefore, to get `libmacros.so` automatically recompiled when the compiler
-# version changes, we add `core.o` as a dependency (even if it is not needed).
-$(obj)/libmacros.so: $(src)/macros/lib.rs $(obj)/core.o FORCE
+$(obj)/libmacros.so: $(src)/macros/lib.rs FORCE
+$(call if_changed_dep,rustc_procmacro)
quiet_cmd_rustc_library = $(if $(skip_clippy),RUSTC,$(RUSTC_OR_CLIPPY_QUIET)) L $@
@@ -344,7 +355,8 @@ quiet_cmd_rustc_library = $(if $(skip_clippy),RUSTC,$(RUSTC_OR_CLIPPY_QUIET)) L
--crate-type rlib -L$(objtree)/$(obj) \
--crate-name $(patsubst %.o,%,$(notdir $@)) $< \
--sysroot=/dev/null \
- $(if $(rustc_objcopy),;$(OBJCOPY) $(rustc_objcopy) $@)
+ $(if $(rustc_objcopy),;$(OBJCOPY) $(rustc_objcopy) $@) \
+ $(cmd_objtool)
rust-analyzer:
$(Q)$(srctree)/scripts/generate_rust_analyzer.py \
@@ -366,44 +378,50 @@ ifneq ($(or $(CONFIG_ARM64),$(and $(CONFIG_RISCV),$(CONFIG_64BIT))),)
__ashlti3 __lshrti3
endif
+define rule_rustc_library
+ $(call cmd_and_fixdep,rustc_library)
+ $(call cmd,gen_objtooldep)
+endef
+
$(obj)/core.o: private skip_clippy = 1
$(obj)/core.o: private skip_flags = -Wunreachable_pub
$(obj)/core.o: private rustc_objcopy = $(foreach sym,$(redirect-intrinsics),--redefine-sym $(sym)=__rust$(sym))
$(obj)/core.o: private rustc_target_flags = $(core-cfgs)
-$(obj)/core.o: $(RUST_LIB_SRC)/core/src/lib.rs FORCE
- +$(call if_changed_dep,rustc_library)
+$(obj)/core.o: $(RUST_LIB_SRC)/core/src/lib.rs \
+ $(wildcard $(objtree)/include/config/RUSTC_VERSION_TEXT) FORCE
+ +$(call if_changed_rule,rustc_library)
ifneq ($(or $(CONFIG_X86_64),$(CONFIG_X86_32)),)
$(obj)/core.o: scripts/target.json
endif
$(obj)/compiler_builtins.o: private rustc_objcopy = -w -W '__*'
$(obj)/compiler_builtins.o: $(src)/compiler_builtins.rs $(obj)/core.o FORCE
- +$(call if_changed_dep,rustc_library)
+ +$(call if_changed_rule,rustc_library)
$(obj)/alloc.o: private skip_clippy = 1
$(obj)/alloc.o: private skip_flags = -Wunreachable_pub
$(obj)/alloc.o: private rustc_target_flags = $(alloc-cfgs)
$(obj)/alloc.o: $(RUST_LIB_SRC)/alloc/src/lib.rs $(obj)/compiler_builtins.o FORCE
- +$(call if_changed_dep,rustc_library)
+ +$(call if_changed_rule,rustc_library)
$(obj)/build_error.o: $(src)/build_error.rs $(obj)/compiler_builtins.o FORCE
- +$(call if_changed_dep,rustc_library)
+ +$(call if_changed_rule,rustc_library)
$(obj)/bindings.o: $(src)/bindings/lib.rs \
$(obj)/compiler_builtins.o \
$(obj)/bindings/bindings_generated.rs \
$(obj)/bindings/bindings_helpers_generated.rs FORCE
- +$(call if_changed_dep,rustc_library)
+ +$(call if_changed_rule,rustc_library)
$(obj)/uapi.o: $(src)/uapi/lib.rs \
$(obj)/compiler_builtins.o \
$(obj)/uapi/uapi_generated.rs FORCE
- +$(call if_changed_dep,rustc_library)
+ +$(call if_changed_rule,rustc_library)
$(obj)/kernel.o: private rustc_target_flags = --extern alloc \
--extern build_error --extern macros --extern bindings --extern uapi
$(obj)/kernel.o: $(src)/kernel/lib.rs $(obj)/alloc.o $(obj)/build_error.o \
$(obj)/libmacros.so $(obj)/bindings.o $(obj)/uapi.o FORCE
- +$(call if_changed_dep,rustc_library)
+ +$(call if_changed_rule,rustc_library)
endif # CONFIG_RUST
diff --git a/rust/bindings/bindings_helper.h b/rust/bindings/bindings_helper.h
index b940a5777330..ae82e9c941af 100644
--- a/rust/bindings/bindings_helper.h
+++ b/rust/bindings/bindings_helper.h
@@ -7,8 +7,8 @@
*/
#include <kunit/test.h>
-#include <linux/blk_types.h>
#include <linux/blk-mq.h>
+#include <linux/blk_types.h>
#include <linux/blkdev.h>
#include <linux/errname.h>
#include <linux/ethtool.h>
diff --git a/rust/exports.c b/rust/exports.c
index 3803c21d1403..e5695f3b45b7 100644
--- a/rust/exports.c
+++ b/rust/exports.c
@@ -17,6 +17,7 @@
#include "exports_core_generated.h"
#include "exports_alloc_generated.h"
+#include "exports_helpers_generated.h"
#include "exports_bindings_generated.h"
#include "exports_kernel_generated.h"
diff --git a/rust/helpers.c b/rust/helpers.c
deleted file mode 100644
index 92d3c03ae1bd..000000000000
--- a/rust/helpers.c
+++ /dev/null
@@ -1,239 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/*
- * Non-trivial C macros cannot be used in Rust. Similarly, inlined C functions
- * cannot be called either. This file explicitly creates functions ("helpers")
- * that wrap those so that they can be called from Rust.
- *
- * Even though Rust kernel modules should never use the bindings directly, some
- * of these helpers need to be exported because Rust generics and inlined
- * functions may not get their code generated in the crate where they are
- * defined. Other helpers, called from non-inline functions, may not be
- * exported, in principle. However, in general, the Rust compiler does not
- * guarantee codegen will be performed for a non-inline function either.
- * Therefore, this file exports all the helpers. In the future, this may be
- * revisited to reduce the number of exports after the compiler is informed
- * about the places codegen is required.
- *
- * All symbols are exported as GPL-only to guarantee no GPL-only feature is
- * accidentally exposed.
- *
- * Sorted alphabetically.
- */
-
-#include <kunit/test-bug.h>
-#include <linux/bug.h>
-#include <linux/build_bug.h>
-#include <linux/device.h>
-#include <linux/err.h>
-#include <linux/errname.h>
-#include <linux/gfp.h>
-#include <linux/highmem.h>
-#include <linux/mutex.h>
-#include <linux/refcount.h>
-#include <linux/sched/signal.h>
-#include <linux/slab.h>
-#include <linux/spinlock.h>
-#include <linux/wait.h>
-#include <linux/workqueue.h>
-
-__noreturn void rust_helper_BUG(void)
-{
- BUG();
-}
-EXPORT_SYMBOL_GPL(rust_helper_BUG);
-
-unsigned long rust_helper_copy_from_user(void *to, const void __user *from,
- unsigned long n)
-{
- return copy_from_user(to, from, n);
-}
-EXPORT_SYMBOL_GPL(rust_helper_copy_from_user);
-
-unsigned long rust_helper_copy_to_user(void __user *to, const void *from,
- unsigned long n)
-{
- return copy_to_user(to, from, n);
-}
-EXPORT_SYMBOL_GPL(rust_helper_copy_to_user);
-
-void rust_helper_mutex_lock(struct mutex *lock)
-{
- mutex_lock(lock);
-}
-EXPORT_SYMBOL_GPL(rust_helper_mutex_lock);
-
-void rust_helper___spin_lock_init(spinlock_t *lock, const char *name,
- struct lock_class_key *key)
-{
-#ifdef CONFIG_DEBUG_SPINLOCK
- __raw_spin_lock_init(spinlock_check(lock), name, key, LD_WAIT_CONFIG);
-#else
- spin_lock_init(lock);
-#endif
-}
-EXPORT_SYMBOL_GPL(rust_helper___spin_lock_init);
-
-void rust_helper_spin_lock(spinlock_t *lock)
-{
- spin_lock(lock);
-}
-EXPORT_SYMBOL_GPL(rust_helper_spin_lock);
-
-void rust_helper_spin_unlock(spinlock_t *lock)
-{
- spin_unlock(lock);
-}
-EXPORT_SYMBOL_GPL(rust_helper_spin_unlock);
-
-void rust_helper_init_wait(struct wait_queue_entry *wq_entry)
-{
- init_wait(wq_entry);
-}
-EXPORT_SYMBOL_GPL(rust_helper_init_wait);
-
-int rust_helper_signal_pending(struct task_struct *t)
-{
- return signal_pending(t);
-}
-EXPORT_SYMBOL_GPL(rust_helper_signal_pending);
-
-struct page *rust_helper_alloc_pages(gfp_t gfp_mask, unsigned int order)
-{
- return alloc_pages(gfp_mask, order);
-}
-EXPORT_SYMBOL_GPL(rust_helper_alloc_pages);
-
-void *rust_helper_kmap_local_page(struct page *page)
-{
- return kmap_local_page(page);
-}
-EXPORT_SYMBOL_GPL(rust_helper_kmap_local_page);
-
-void rust_helper_kunmap_local(const void *addr)
-{
- kunmap_local(addr);
-}
-EXPORT_SYMBOL_GPL(rust_helper_kunmap_local);
-
-refcount_t rust_helper_REFCOUNT_INIT(int n)
-{
- return (refcount_t)REFCOUNT_INIT(n);
-}
-EXPORT_SYMBOL_GPL(rust_helper_REFCOUNT_INIT);
-
-void rust_helper_refcount_inc(refcount_t *r)
-{
- refcount_inc(r);
-}
-EXPORT_SYMBOL_GPL(rust_helper_refcount_inc);
-
-bool rust_helper_refcount_dec_and_test(refcount_t *r)
-{
- return refcount_dec_and_test(r);
-}
-EXPORT_SYMBOL_GPL(rust_helper_refcount_dec_and_test);
-
-__force void *rust_helper_ERR_PTR(long err)
-{
- return ERR_PTR(err);
-}
-EXPORT_SYMBOL_GPL(rust_helper_ERR_PTR);
-
-bool rust_helper_IS_ERR(__force const void *ptr)
-{
- return IS_ERR(ptr);
-}
-EXPORT_SYMBOL_GPL(rust_helper_IS_ERR);
-
-long rust_helper_PTR_ERR(__force const void *ptr)
-{
- return PTR_ERR(ptr);
-}
-EXPORT_SYMBOL_GPL(rust_helper_PTR_ERR);
-
-const char *rust_helper_errname(int err)
-{
- return errname(err);
-}
-EXPORT_SYMBOL_GPL(rust_helper_errname);
-
-struct task_struct *rust_helper_get_current(void)
-{
- return current;
-}
-EXPORT_SYMBOL_GPL(rust_helper_get_current);
-
-void rust_helper_get_task_struct(struct task_struct *t)
-{
- get_task_struct(t);
-}
-EXPORT_SYMBOL_GPL(rust_helper_get_task_struct);
-
-void rust_helper_put_task_struct(struct task_struct *t)
-{
- put_task_struct(t);
-}
-EXPORT_SYMBOL_GPL(rust_helper_put_task_struct);
-
-struct kunit *rust_helper_kunit_get_current_test(void)
-{
- return kunit_get_current_test();
-}
-EXPORT_SYMBOL_GPL(rust_helper_kunit_get_current_test);
-
-void rust_helper_init_work_with_key(struct work_struct *work, work_func_t func,
- bool onstack, const char *name,
- struct lock_class_key *key)
-{
- __init_work(work, onstack);
- work->data = (atomic_long_t)WORK_DATA_INIT();
- lockdep_init_map(&work->lockdep_map, name, key, 0);
- INIT_LIST_HEAD(&work->entry);
- work->func = func;
-}
-EXPORT_SYMBOL_GPL(rust_helper_init_work_with_key);
-
-void * __must_check __realloc_size(2)
-rust_helper_krealloc(const void *objp, size_t new_size, gfp_t flags)
-{
- return krealloc(objp, new_size, flags);
-}
-EXPORT_SYMBOL_GPL(rust_helper_krealloc);
-
-/*
- * `bindgen` binds the C `size_t` type as the Rust `usize` type, so we can
- * use it in contexts where Rust expects a `usize` like slice (array) indices.
- * `usize` is defined to be the same as C's `uintptr_t` type (can hold any
- * pointer) but not necessarily the same as `size_t` (can hold the size of any
- * single object). Most modern platforms use the same concrete integer type for
- * both of them, but in case we find ourselves on a platform where
- * that's not true, fail early instead of risking ABI or
- * integer-overflow issues.
- *
- * If your platform fails this assertion, it means that you are in
- * danger of integer-overflow bugs (even if you attempt to add
- * `--no-size_t-is-usize`). It may be easiest to change the kernel ABI on
- * your platform such that `size_t` matches `uintptr_t` (i.e., to increase
- * `size_t`, because `uintptr_t` has to be at least as big as `size_t`).
- */
-static_assert(
- sizeof(size_t) == sizeof(uintptr_t) &&
- __alignof__(size_t) == __alignof__(uintptr_t),
- "Rust code expects C `size_t` to match Rust `usize`"
-);
-
-// This will soon be moved to a separate file, so no need to merge with above.
-#include <linux/blk-mq.h>
-#include <linux/blkdev.h>
-
-void *rust_helper_blk_mq_rq_to_pdu(struct request *rq)
-{
- return blk_mq_rq_to_pdu(rq);
-}
-EXPORT_SYMBOL_GPL(rust_helper_blk_mq_rq_to_pdu);
-
-struct request *rust_helper_blk_mq_rq_from_pdu(void *pdu)
-{
- return blk_mq_rq_from_pdu(pdu);
-}
-EXPORT_SYMBOL_GPL(rust_helper_blk_mq_rq_from_pdu);
diff --git a/rust/helpers/blk.c b/rust/helpers/blk.c
new file mode 100644
index 000000000000..cc9f4e6a2d23
--- /dev/null
+++ b/rust/helpers/blk.c
@@ -0,0 +1,14 @@
+// SPDX-License-Identifier: GPL-2.0
+
+#include <linux/blk-mq.h>
+#include <linux/blkdev.h>
+
+void *rust_helper_blk_mq_rq_to_pdu(struct request *rq)
+{
+ return blk_mq_rq_to_pdu(rq);
+}
+
+struct request *rust_helper_blk_mq_rq_from_pdu(void *pdu)
+{
+ return blk_mq_rq_from_pdu(pdu);
+}
diff --git a/rust/helpers/bug.c b/rust/helpers/bug.c
new file mode 100644
index 000000000000..e2d13babc737
--- /dev/null
+++ b/rust/helpers/bug.c
@@ -0,0 +1,8 @@
+// SPDX-License-Identifier: GPL-2.0
+
+#include <linux/bug.h>
+
+__noreturn void rust_helper_BUG(void)
+{
+ BUG();
+}
diff --git a/rust/helpers/build_assert.c b/rust/helpers/build_assert.c
new file mode 100644
index 000000000000..6a54b2680b14
--- /dev/null
+++ b/rust/helpers/build_assert.c
@@ -0,0 +1,25 @@
+// SPDX-License-Identifier: GPL-2.0
+
+#include <linux/build_bug.h>
+
+/*
+ * `bindgen` binds the C `size_t` type as the Rust `usize` type, so we can
+ * use it in contexts where Rust expects a `usize` like slice (array) indices.
+ * `usize` is defined to be the same as C's `uintptr_t` type (can hold any
+ * pointer) but not necessarily the same as `size_t` (can hold the size of any
+ * single object). Most modern platforms use the same concrete integer type for
+ * both of them, but in case we find ourselves on a platform where
+ * that's not true, fail early instead of risking ABI or
+ * integer-overflow issues.
+ *
+ * If your platform fails this assertion, it means that you are in
+ * danger of integer-overflow bugs (even if you attempt to add
+ * `--no-size_t-is-usize`). It may be easiest to change the kernel ABI on
+ * your platform such that `size_t` matches `uintptr_t` (i.e., to increase
+ * `size_t`, because `uintptr_t` has to be at least as big as `size_t`).
+ */
+static_assert(
+ sizeof(size_t) == sizeof(uintptr_t) &&
+ __alignof__(size_t) == __alignof__(uintptr_t),
+ "Rust code expects C `size_t` to match Rust `usize`"
+);
diff --git a/rust/helpers/build_bug.c b/rust/helpers/build_bug.c
new file mode 100644
index 000000000000..e994f7b5928c
--- /dev/null
+++ b/rust/helpers/build_bug.c
@@ -0,0 +1,9 @@
+// SPDX-License-Identifier: GPL-2.0
+
+#include <linux/export.h>
+#include <linux/errname.h>
+
+const char *rust_helper_errname(int err)
+{
+ return errname(err);
+}
diff --git a/rust/helpers/err.c b/rust/helpers/err.c
new file mode 100644
index 000000000000..be3d45ef78a2
--- /dev/null
+++ b/rust/helpers/err.c
@@ -0,0 +1,19 @@
+// SPDX-License-Identifier: GPL-2.0
+
+#include <linux/err.h>
+#include <linux/export.h>
+
+__force void *rust_helper_ERR_PTR(long err)
+{
+ return ERR_PTR(err);
+}
+
+bool rust_helper_IS_ERR(__force const void *ptr)
+{
+ return IS_ERR(ptr);
+}
+
+long rust_helper_PTR_ERR(__force const void *ptr)
+{
+ return PTR_ERR(ptr);
+}
diff --git a/rust/helpers/helpers.c b/rust/helpers/helpers.c
new file mode 100644
index 000000000000..30f40149f3a9
--- /dev/null
+++ b/rust/helpers/helpers.c
@@ -0,0 +1,26 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Non-trivial C macros cannot be used in Rust. Similarly, inlined C functions
+ * cannot be called either. This file explicitly creates functions ("helpers")
+ * that wrap those so that they can be called from Rust.
+ *
+ * Sorted alphabetically.
+ */
+
+#include "blk.c"
+#include "bug.c"
+#include "build_assert.c"
+#include "build_bug.c"
+#include "err.c"
+#include "kunit.c"
+#include "mutex.c"
+#include "page.c"
+#include "rbtree.c"
+#include "refcount.c"
+#include "signal.c"
+#include "slab.c"
+#include "spinlock.c"
+#include "task.c"
+#include "uaccess.c"
+#include "wait.c"
+#include "workqueue.c"
diff --git a/rust/helpers/kunit.c b/rust/helpers/kunit.c
new file mode 100644
index 000000000000..9d725067eb3b
--- /dev/null
+++ b/rust/helpers/kunit.c
@@ -0,0 +1,9 @@
+// SPDX-License-Identifier: GPL-2.0
+
+#include <kunit/test-bug.h>
+#include <linux/export.h>
+
+struct kunit *rust_helper_kunit_get_current_test(void)
+{
+ return kunit_get_current_test();
+}
diff --git a/rust/helpers/mutex.c b/rust/helpers/mutex.c
new file mode 100644
index 000000000000..200db7e6279f
--- /dev/null
+++ b/rust/helpers/mutex.c
@@ -0,0 +1,9 @@
+// SPDX-License-Identifier: GPL-2.0
+
+#include <linux/export.h>
+#include <linux/mutex.h>
+
+void rust_helper_mutex_lock(struct mutex *lock)
+{
+ mutex_lock(lock);
+}
diff --git a/rust/helpers/page.c b/rust/helpers/page.c
new file mode 100644
index 000000000000..b3f2b8fbf87f
--- /dev/null
+++ b/rust/helpers/page.c
@@ -0,0 +1,19 @@
+// SPDX-License-Identifier: GPL-2.0
+
+#include <linux/gfp.h>
+#include <linux/highmem.h>
+
+struct page *rust_helper_alloc_pages(gfp_t gfp_mask, unsigned int order)
+{
+ return alloc_pages(gfp_mask, order);
+}
+
+void *rust_helper_kmap_local_page(struct page *page)
+{
+ return kmap_local_page(page);
+}
+
+void rust_helper_kunmap_local(const void *addr)
+{
+ kunmap_local(addr);
+}
diff --git a/rust/helpers/rbtree.c b/rust/helpers/rbtree.c
new file mode 100644
index 000000000000..6d404b84a9b5
--- /dev/null
+++ b/rust/helpers/rbtree.c
@@ -0,0 +1,9 @@
+// SPDX-License-Identifier: GPL-2.0
+
+#include <linux/rbtree.h>
+
+void rust_helper_rb_link_node(struct rb_node *node, struct rb_node *parent,
+ struct rb_node **rb_link)
+{
+ rb_link_node(node, parent, rb_link);
+}
diff --git a/rust/helpers/refcount.c b/rust/helpers/refcount.c
new file mode 100644
index 000000000000..f47afc148ec3
--- /dev/null
+++ b/rust/helpers/refcount.c
@@ -0,0 +1,19 @@
+// SPDX-License-Identifier: GPL-2.0
+
+#include <linux/export.h>
+#include <linux/refcount.h>
+
+refcount_t rust_helper_REFCOUNT_INIT(int n)
+{
+ return (refcount_t)REFCOUNT_INIT(n);
+}
+
+void rust_helper_refcount_inc(refcount_t *r)
+{
+ refcount_inc(r);
+}
+
+bool rust_helper_refcount_dec_and_test(refcount_t *r)
+{
+ return refcount_dec_and_test(r);
+}
diff --git a/rust/helpers/signal.c b/rust/helpers/signal.c
new file mode 100644
index 000000000000..63c407f80c26
--- /dev/null
+++ b/rust/helpers/signal.c
@@ -0,0 +1,9 @@
+// SPDX-License-Identifier: GPL-2.0
+
+#include <linux/export.h>
+#include <linux/sched/signal.h>
+
+int rust_helper_signal_pending(struct task_struct *t)
+{
+ return signal_pending(t);
+}
diff --git a/rust/helpers/slab.c b/rust/helpers/slab.c
new file mode 100644
index 000000000000..f043e087f9d6
--- /dev/null
+++ b/rust/helpers/slab.c
@@ -0,0 +1,9 @@
+// SPDX-License-Identifier: GPL-2.0
+
+#include <linux/slab.h>
+
+void * __must_check __realloc_size(2)
+rust_helper_krealloc(const void *objp, size_t new_size, gfp_t flags)
+{
+ return krealloc(objp, new_size, flags);
+}
diff --git a/rust/helpers/spinlock.c b/rust/helpers/spinlock.c
new file mode 100644
index 000000000000..acc1376b833c
--- /dev/null
+++ b/rust/helpers/spinlock.c
@@ -0,0 +1,24 @@
+// SPDX-License-Identifier: GPL-2.0
+
+#include <linux/export.h>
+#include <linux/spinlock.h>
+
+void rust_helper___spin_lock_init(spinlock_t *lock, const char *name,
+ struct lock_class_key *key)
+{
+#ifdef CONFIG_DEBUG_SPINLOCK
+ __raw_spin_lock_init(spinlock_check(lock), name, key, LD_WAIT_CONFIG);
+#else
+ spin_lock_init(lock);
+#endif
+}
+
+void rust_helper_spin_lock(spinlock_t *lock)
+{
+ spin_lock(lock);
+}
+
+void rust_helper_spin_unlock(spinlock_t *lock)
+{
+ spin_unlock(lock);
+}
diff --git a/rust/helpers/task.c b/rust/helpers/task.c
new file mode 100644
index 000000000000..7ac789232d11
--- /dev/null
+++ b/rust/helpers/task.c
@@ -0,0 +1,19 @@
+// SPDX-License-Identifier: GPL-2.0
+
+#include <linux/export.h>
+#include <linux/sched/task.h>
+
+struct task_struct *rust_helper_get_current(void)
+{
+ return current;
+}
+
+void rust_helper_get_task_struct(struct task_struct *t)
+{
+ get_task_struct(t);
+}
+
+void rust_helper_put_task_struct(struct task_struct *t)
+{
+ put_task_struct(t);
+}
diff --git a/rust/helpers/uaccess.c b/rust/helpers/uaccess.c
new file mode 100644
index 000000000000..f49076f813cd
--- /dev/null
+++ b/rust/helpers/uaccess.c
@@ -0,0 +1,15 @@
+// SPDX-License-Identifier: GPL-2.0
+
+#include <linux/uaccess.h>
+
+unsigned long rust_helper_copy_from_user(void *to, const void __user *from,
+ unsigned long n)
+{
+ return copy_from_user(to, from, n);
+}
+
+unsigned long rust_helper_copy_to_user(void __user *to, const void *from,
+ unsigned long n)
+{
+ return copy_to_user(to, from, n);
+}
diff --git a/rust/helpers/wait.c b/rust/helpers/wait.c
new file mode 100644
index 000000000000..c7336bbf2750
--- /dev/null
+++ b/rust/helpers/wait.c
@@ -0,0 +1,9 @@
+// SPDX-License-Identifier: GPL-2.0
+
+#include <linux/export.h>
+#include <linux/wait.h>
+
+void rust_helper_init_wait(struct wait_queue_entry *wq_entry)
+{
+ init_wait(wq_entry);
+}
diff --git a/rust/helpers/workqueue.c b/rust/helpers/workqueue.c
new file mode 100644
index 000000000000..f59427acc323
--- /dev/null
+++ b/rust/helpers/workqueue.c
@@ -0,0 +1,15 @@
+// SPDX-License-Identifier: GPL-2.0
+
+#include <linux/export.h>
+#include <linux/workqueue.h>
+
+void rust_helper_init_work_with_key(struct work_struct *work, work_func_t func,
+ bool onstack, const char *name,
+ struct lock_class_key *key)
+{
+ __init_work(work, onstack);
+ work->data = (atomic_long_t)WORK_DATA_INIT();
+ lockdep_init_map(&work->lockdep_map, name, key, 0);
+ INIT_LIST_HEAD(&work->entry);
+ work->func = func;
+}
diff --git a/rust/kernel/alloc/box_ext.rs b/rust/kernel/alloc/box_ext.rs
index 9f1c1c489189..7009ad78d4e0 100644
--- a/rust/kernel/alloc/box_ext.rs
+++ b/rust/kernel/alloc/box_ext.rs
@@ -4,7 +4,7 @@
use super::{AllocError, Flags};
use alloc::boxed::Box;
-use core::mem::MaybeUninit;
+use core::{mem::MaybeUninit, ptr, result::Result};
/// Extensions to [`Box`].
pub trait BoxExt<T>: Sized {
@@ -17,6 +17,24 @@ pub trait BoxExt<T>: Sized {
///
/// The allocation may fail, in which case an error is returned.
fn new_uninit(flags: Flags) -> Result<Box<MaybeUninit<T>>, AllocError>;
+
+ /// Drops the contents, but keeps the allocation.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use kernel::alloc::{flags, box_ext::BoxExt};
+ /// let value = Box::new([0; 32], flags::GFP_KERNEL)?;
+ /// assert_eq!(*value, [0; 32]);
+ /// let mut value = Box::drop_contents(value);
+ /// // Now we can re-use `value`:
+ /// value.write([1; 32]);
+ /// // SAFETY: We just wrote to it.
+ /// let value = unsafe { value.assume_init() };
+ /// assert_eq!(*value, [1; 32]);
+ /// # Ok::<(), Error>(())
+ /// ```
+ fn drop_contents(this: Self) -> Box<MaybeUninit<T>>;
}
impl<T> BoxExt<T> for Box<T> {
@@ -55,4 +73,17 @@ impl<T> BoxExt<T> for Box<T> {
// zero-sized types, we use `NonNull::dangling`.
Ok(unsafe { Box::from_raw(ptr) })
}
+
+ fn drop_contents(this: Self) -> Box<MaybeUninit<T>> {
+ let ptr = Box::into_raw(this);
+ // SAFETY: `ptr` is valid, because it came from `Box::into_raw`.
+ unsafe { ptr::drop_in_place(ptr) };
+
+ // CAST: `MaybeUninit<T>` is a transparent wrapper of `T`.
+ let ptr = ptr.cast::<MaybeUninit<T>>();
+
+ // SAFETY: `ptr` is valid for writes, because it came from `Box::into_raw` and it is valid for
+ // reads, since the pointer came from `Box::into_raw` and the type is `MaybeUninit<T>`.
+ unsafe { Box::from_raw(ptr) }
+ }
}
diff --git a/rust/kernel/error.rs b/rust/kernel/error.rs
index 145f5c397009..6f1587a2524e 100644
--- a/rust/kernel/error.rs
+++ b/rust/kernel/error.rs
@@ -135,8 +135,11 @@ impl Error {
/// Returns the error encoded as a pointer.
#[allow(dead_code)]
pub(crate) fn to_ptr<T>(self) -> *mut T {
+ #[cfg_attr(target_pointer_width = "32", allow(clippy::useless_conversion))]
// SAFETY: `self.0` is a valid error due to its invariant.
- unsafe { bindings::ERR_PTR(self.0.into()) as *mut _ }
+ unsafe {
+ bindings::ERR_PTR(self.0.into()) as *mut _
+ }
}
/// Returns a string representing the error, if one exists.
diff --git a/rust/kernel/init.rs b/rust/kernel/init.rs
index 495c09ebe3a3..a17ac8762d8f 100644
--- a/rust/kernel/init.rs
+++ b/rust/kernel/init.rs
@@ -213,6 +213,7 @@
use crate::{
alloc::{box_ext::BoxExt, AllocError, Flags},
error::{self, Error},
+ sync::Arc,
sync::UniqueArc,
types::{Opaque, ScopeGuard},
};
@@ -742,6 +743,74 @@ macro_rules! try_init {
};
}
+/// Asserts that a field on a struct using `#[pin_data]` is marked with `#[pin]` ie. that it is
+/// structurally pinned.
+///
+/// # Example
+///
+/// This will succeed:
+/// ```
+/// use kernel::assert_pinned;
+/// #[pin_data]
+/// struct MyStruct {
+/// #[pin]
+/// some_field: u64,
+/// }
+///
+/// assert_pinned!(MyStruct, some_field, u64);
+/// ```
+///
+/// This will fail:
+// TODO: replace with `compile_fail` when supported.
+/// ```ignore
+/// use kernel::assert_pinned;
+/// #[pin_data]
+/// struct MyStruct {
+/// some_field: u64,
+/// }
+///
+/// assert_pinned!(MyStruct, some_field, u64);
+/// ```
+///
+/// Some uses of the macro may trigger the `can't use generic parameters from outer item` error. To
+/// work around this, you may pass the `inline` parameter to the macro. The `inline` parameter can
+/// only be used when the macro is invoked from a function body.
+/// ```
+/// use kernel::assert_pinned;
+/// #[pin_data]
+/// struct Foo<T> {
+/// #[pin]
+/// elem: T,
+/// }
+///
+/// impl<T> Foo<T> {
+/// fn project(self: Pin<&mut Self>) -> Pin<&mut T> {
+/// assert_pinned!(Foo<T>, elem, T, inline);
+///
+/// // SAFETY: The field is structurally pinned.
+/// unsafe { self.map_unchecked_mut(|me| &mut me.elem) }
+/// }
+/// }
+/// ```
+#[macro_export]
+macro_rules! assert_pinned {
+ ($ty:ty, $field:ident, $field_ty:ty, inline) => {
+ let _ = move |ptr: *mut $field_ty| {
+ // SAFETY: This code is unreachable.
+ let data = unsafe { <$ty as $crate::init::__internal::HasPinData>::__pin_data() };
+ let init = $crate::init::__internal::AlwaysFail::<$field_ty>::new();
+ // SAFETY: This code is unreachable.
+ unsafe { data.$field(ptr, init) }.ok();
+ };
+ };
+
+ ($ty:ty, $field:ident, $field_ty:ty) => {
+ const _: () = {
+ $crate::assert_pinned!($ty, $field, $field_ty, inline);
+ };
+ };
+}
+
/// A pin-initializer for the type `T`.
///
/// To use this initializer, you will need a suitable memory location that can hold a `T`. This can
@@ -1107,11 +1176,17 @@ unsafe impl<T, E> PinInit<T, E> for T {
/// Smart pointer that can initialize memory in-place.
pub trait InPlaceInit<T>: Sized {
+ /// Pinned version of `Self`.
+ ///
+ /// If a type already implicitly pins its pointee, `Pin<Self>` is unnecessary. In this case use
+ /// `Self`, otherwise just use `Pin<Self>`.
+ type PinnedSelf;
+
/// Use the given pin-initializer to pin-initialize a `T` inside of a new smart pointer of this
/// type.
///
/// If `T: !Unpin` it will not be able to move afterwards.
- fn try_pin_init<E>(init: impl PinInit<T, E>, flags: Flags) -> Result<Pin<Self>, E>
+ fn try_pin_init<E>(init: impl PinInit<T, E>, flags: Flags) -> Result<Self::PinnedSelf, E>
where
E: From<AllocError>;
@@ -1119,7 +1194,7 @@ pub trait InPlaceInit<T>: Sized {
/// type.
///
/// If `T: !Unpin` it will not be able to move afterwards.
- fn pin_init<E>(init: impl PinInit<T, E>, flags: Flags) -> error::Result<Pin<Self>>
+ fn pin_init<E>(init: impl PinInit<T, E>, flags: Flags) -> error::Result<Self::PinnedSelf>
where
Error: From<E>,
{
@@ -1148,19 +1223,35 @@ pub trait InPlaceInit<T>: Sized {
}
}
+impl<T> InPlaceInit<T> for Arc<T> {
+ type PinnedSelf = Self;
+
+ #[inline]
+ fn try_pin_init<E>(init: impl PinInit<T, E>, flags: Flags) -> Result<Self::PinnedSelf, E>
+ where
+ E: From<AllocError>,
+ {
+ UniqueArc::try_pin_init(init, flags).map(|u| u.into())
+ }
+
+ #[inline]
+ fn try_init<E>(init: impl Init<T, E>, flags: Flags) -> Result<Self, E>
+ where
+ E: From<AllocError>,
+ {
+ UniqueArc::try_init(init, flags).map(|u| u.into())
+ }
+}
+
impl<T> InPlaceInit<T> for Box<T> {
+ type PinnedSelf = Pin<Self>;
+
#[inline]
- fn try_pin_init<E>(init: impl PinInit<T, E>, flags: Flags) -> Result<Pin<Self>, E>
+ fn try_pin_init<E>(init: impl PinInit<T, E>, flags: Flags) -> Result<Self::PinnedSelf, E>
where
E: From<AllocError>,
{
- let mut this = <Box<_> as BoxExt<_>>::new_uninit(flags)?;
- let slot = this.as_mut_ptr();
- // SAFETY: When init errors/panics, slot will get deallocated but not dropped,
- // slot is valid and will not be moved, because we pin it later.
- unsafe { init.__pinned_init(slot)? };
- // SAFETY: All fields have been initialized.
- Ok(unsafe { this.assume_init() }.into())
+ <Box<_> as BoxExt<_>>::new_uninit(flags)?.write_pin_init(init)
}
#[inline]
@@ -1168,29 +1259,19 @@ impl<T> InPlaceInit<T> for Box<T> {
where
E: From<AllocError>,
{
- let mut this = <Box<_> as BoxExt<_>>::new_uninit(flags)?;
- let slot = this.as_mut_ptr();
- // SAFETY: When init errors/panics, slot will get deallocated but not dropped,
- // slot is valid.
- unsafe { init.__init(slot)? };
- // SAFETY: All fields have been initialized.
- Ok(unsafe { this.assume_init() })
+ <Box<_> as BoxExt<_>>::new_uninit(flags)?.write_init(init)
}
}
impl<T> InPlaceInit<T> for UniqueArc<T> {
+ type PinnedSelf = Pin<Self>;
+
#[inline]
- fn try_pin_init<E>(init: impl PinInit<T, E>, flags: Flags) -> Result<Pin<Self>, E>
+ fn try_pin_init<E>(init: impl PinInit<T, E>, flags: Flags) -> Result<Self::PinnedSelf, E>
where
E: From<AllocError>,
{
- let mut this = UniqueArc::new_uninit(flags)?;
- let slot = this.as_mut_ptr();
- // SAFETY: When init errors/panics, slot will get deallocated but not dropped,
- // slot is valid and will not be moved, because we pin it later.
- unsafe { init.__pinned_init(slot)? };
- // SAFETY: All fields have been initialized.
- Ok(unsafe { this.assume_init() }.into())
+ UniqueArc::new_uninit(flags)?.write_pin_init(init)
}
#[inline]
@@ -1198,13 +1279,67 @@ impl<T> InPlaceInit<T> for UniqueArc<T> {
where
E: From<AllocError>,
{
- let mut this = UniqueArc::new_uninit(flags)?;
- let slot = this.as_mut_ptr();
+ UniqueArc::new_uninit(flags)?.write_init(init)
+ }
+}
+
+/// Smart pointer containing uninitialized memory and that can write a value.
+pub trait InPlaceWrite<T> {
+ /// The type `Self` turns into when the contents are initialized.
+ type Initialized;
+
+ /// Use the given initializer to write a value into `self`.
+ ///
+ /// Does not drop the current value and considers it as uninitialized memory.
+ fn write_init<E>(self, init: impl Init<T, E>) -> Result<Self::Initialized, E>;
+
+ /// Use the given pin-initializer to write a value into `self`.
+ ///
+ /// Does not drop the current value and considers it as uninitialized memory.
+ fn write_pin_init<E>(self, init: impl PinInit<T, E>) -> Result<Pin<Self::Initialized>, E>;
+}
+
+impl<T> InPlaceWrite<T> for Box<MaybeUninit<T>> {
+ type Initialized = Box<T>;
+
+ fn write_init<E>(mut self, init: impl Init<T, E>) -> Result<Self::Initialized, E> {
+ let slot = self.as_mut_ptr();
// SAFETY: When init errors/panics, slot will get deallocated but not dropped,
// slot is valid.
unsafe { init.__init(slot)? };
// SAFETY: All fields have been initialized.
- Ok(unsafe { this.assume_init() })
+ Ok(unsafe { self.assume_init() })
+ }
+
+ fn write_pin_init<E>(mut self, init: impl PinInit<T, E>) -> Result<Pin<Self::Initialized>, E> {
+ let slot = self.as_mut_ptr();
+ // SAFETY: When init errors/panics, slot will get deallocated but not dropped,
+ // slot is valid and will not be moved, because we pin it later.
+ unsafe { init.__pinned_init(slot)? };
+ // SAFETY: All fields have been initialized.
+ Ok(unsafe { self.assume_init() }.into())
+ }
+}
+
+impl<T> InPlaceWrite<T> for UniqueArc<MaybeUninit<T>> {
+ type Initialized = UniqueArc<T>;
+
+ fn write_init<E>(mut self, init: impl Init<T, E>) -> Result<Self::Initialized, E> {
+ let slot = self.as_mut_ptr();
+ // SAFETY: When init errors/panics, slot will get deallocated but not dropped,
+ // slot is valid.
+ unsafe { init.__init(slot)? };
+ // SAFETY: All fields have been initialized.
+ Ok(unsafe { self.assume_init() })
+ }
+
+ fn write_pin_init<E>(mut self, init: impl PinInit<T, E>) -> Result<Pin<Self::Initialized>, E> {
+ let slot = self.as_mut_ptr();
+ // SAFETY: When init errors/panics, slot will get deallocated but not dropped,
+ // slot is valid and will not be moved, because we pin it later.
+ unsafe { init.__pinned_init(slot)? };
+ // SAFETY: All fields have been initialized.
+ Ok(unsafe { self.assume_init() }.into())
}
}
diff --git a/rust/kernel/init/__internal.rs b/rust/kernel/init/__internal.rs
index db3372619ecd..13cefd37512f 100644
--- a/rust/kernel/init/__internal.rs
+++ b/rust/kernel/init/__internal.rs
@@ -228,3 +228,32 @@ impl OnlyCallFromDrop {
Self(())
}
}
+
+/// Initializer that always fails.
+///
+/// Used by [`assert_pinned!`].
+///
+/// [`assert_pinned!`]: crate::assert_pinned
+pub struct AlwaysFail<T: ?Sized> {
+ _t: PhantomData<T>,
+}
+
+impl<T: ?Sized> AlwaysFail<T> {
+ /// Creates a new initializer that always fails.
+ pub fn new() -> Self {
+ Self { _t: PhantomData }
+ }
+}
+
+impl<T: ?Sized> Default for AlwaysFail<T> {
+ fn default() -> Self {
+ Self::new()
+ }
+}
+
+// SAFETY: `__pinned_init` always fails, which is always okay.
+unsafe impl<T: ?Sized> PinInit<T, ()> for AlwaysFail<T> {
+ unsafe fn __pinned_init(self, _slot: *mut T) -> Result<(), ()> {
+ Err(())
+ }
+}
diff --git a/rust/kernel/lib.rs b/rust/kernel/lib.rs
index 58ed400198bf..22a3bfa5a9e9 100644
--- a/rust/kernel/lib.rs
+++ b/rust/kernel/lib.rs
@@ -38,12 +38,14 @@ pub mod init;
pub mod ioctl;
#[cfg(CONFIG_KUNIT)]
pub mod kunit;
+pub mod list;
#[cfg(CONFIG_NET)]
pub mod net;
pub mod page;
pub mod prelude;
pub mod print;
pub mod sizes;
+pub mod rbtree;
mod static_assert;
#[doc(hidden)]
pub mod std_vendor;
diff --git a/rust/kernel/list.rs b/rust/kernel/list.rs
new file mode 100644
index 000000000000..5b4aec29eb67
--- /dev/null
+++ b/rust/kernel/list.rs
@@ -0,0 +1,686 @@
+// SPDX-License-Identifier: GPL-2.0
+
+// Copyright (C) 2024 Google LLC.
+
+//! A linked list implementation.
+
+use crate::init::PinInit;
+use crate::sync::ArcBorrow;
+use crate::types::Opaque;
+use core::iter::{DoubleEndedIterator, FusedIterator};
+use core::marker::PhantomData;
+use core::ptr;
+
+mod impl_list_item_mod;
+pub use self::impl_list_item_mod::{
+ impl_has_list_links, impl_has_list_links_self_ptr, impl_list_item, HasListLinks, HasSelfPtr,
+};
+
+mod arc;
+pub use self::arc::{impl_list_arc_safe, AtomicTracker, ListArc, ListArcSafe, TryNewListArc};
+
+mod arc_field;
+pub use self::arc_field::{define_list_arc_field_getter, ListArcField};
+
+/// A linked list.
+///
+/// All elements in this linked list will be [`ListArc`] references to the value. Since a value can
+/// only have one `ListArc` (for each pair of prev/next pointers), this ensures that the same
+/// prev/next pointers are not used for several linked lists.
+///
+/// # Invariants
+///
+/// * If the list is empty, then `first` is null. Otherwise, `first` points at the `ListLinks`
+/// field of the first element in the list.
+/// * All prev/next pointers in `ListLinks` fields of items in the list are valid and form a cycle.
+/// * For every item in the list, the list owns the associated [`ListArc`] reference and has
+/// exclusive access to the `ListLinks` field.
+pub struct List<T: ?Sized + ListItem<ID>, const ID: u64 = 0> {
+ first: *mut ListLinksFields,
+ _ty: PhantomData<ListArc<T, ID>>,
+}
+
+// SAFETY: This is a container of `ListArc<T, ID>`, and access to the container allows the same
+// type of access to the `ListArc<T, ID>` elements.
+unsafe impl<T, const ID: u64> Send for List<T, ID>
+where
+ ListArc<T, ID>: Send,
+ T: ?Sized + ListItem<ID>,
+{
+}
+// SAFETY: This is a container of `ListArc<T, ID>`, and access to the container allows the same
+// type of access to the `ListArc<T, ID>` elements.
+unsafe impl<T, const ID: u64> Sync for List<T, ID>
+where
+ ListArc<T, ID>: Sync,
+ T: ?Sized + ListItem<ID>,
+{
+}
+
+/// Implemented by types where a [`ListArc<Self>`] can be inserted into a [`List`].
+///
+/// # Safety
+///
+/// Implementers must ensure that they provide the guarantees documented on methods provided by
+/// this trait.
+///
+/// [`ListArc<Self>`]: ListArc
+pub unsafe trait ListItem<const ID: u64 = 0>: ListArcSafe<ID> {
+ /// Views the [`ListLinks`] for this value.
+ ///
+ /// # Guarantees
+ ///
+ /// If there is a previous call to `prepare_to_insert` and there is no call to `post_remove`
+ /// since the most recent such call, then this returns the same pointer as the one returned by
+ /// the most recent call to `prepare_to_insert`.
+ ///
+ /// Otherwise, the returned pointer points at a read-only [`ListLinks`] with two null pointers.
+ ///
+ /// # Safety
+ ///
+ /// The provided pointer must point at a valid value. (It need not be in an `Arc`.)
+ unsafe fn view_links(me: *const Self) -> *mut ListLinks<ID>;
+
+ /// View the full value given its [`ListLinks`] field.
+ ///
+ /// Can only be used when the value is in a list.
+ ///
+ /// # Guarantees
+ ///
+ /// * Returns the same pointer as the one passed to the most recent call to `prepare_to_insert`.
+ /// * The returned pointer is valid until the next call to `post_remove`.
+ ///
+ /// # Safety
+ ///
+ /// * The provided pointer must originate from the most recent call to `prepare_to_insert`, or
+ /// from a call to `view_links` that happened after the most recent call to
+ /// `prepare_to_insert`.
+ /// * Since the most recent call to `prepare_to_insert`, the `post_remove` method must not have
+ /// been called.
+ unsafe fn view_value(me: *mut ListLinks<ID>) -> *const Self;
+
+ /// This is called when an item is inserted into a [`List`].
+ ///
+ /// # Guarantees
+ ///
+ /// The caller is granted exclusive access to the returned [`ListLinks`] until `post_remove` is
+ /// called.
+ ///
+ /// # Safety
+ ///
+ /// * The provided pointer must point at a valid value in an [`Arc`].
+ /// * Calls to `prepare_to_insert` and `post_remove` on the same value must alternate.
+ /// * The caller must own the [`ListArc`] for this value.
+ /// * The caller must not give up ownership of the [`ListArc`] unless `post_remove` has been
+ /// called after this call to `prepare_to_insert`.
+ ///
+ /// [`Arc`]: crate::sync::Arc
+ unsafe fn prepare_to_insert(me: *const Self) -> *mut ListLinks<ID>;
+
+ /// This undoes a previous call to `prepare_to_insert`.
+ ///
+ /// # Guarantees
+ ///
+ /// The returned pointer is the pointer that was originally passed to `prepare_to_insert`.
+ ///
+ /// # Safety
+ ///
+ /// The provided pointer must be the pointer returned by the most recent call to
+ /// `prepare_to_insert`.
+ unsafe fn post_remove(me: *mut ListLinks<ID>) -> *const Self;
+}
+
+#[repr(C)]
+#[derive(Copy, Clone)]
+struct ListLinksFields {
+ next: *mut ListLinksFields,
+ prev: *mut ListLinksFields,
+}
+
+/// The prev/next pointers for an item in a linked list.
+///
+/// # Invariants
+///
+/// The fields are null if and only if this item is not in a list.
+#[repr(transparent)]
+pub struct ListLinks<const ID: u64 = 0> {
+ // This type is `!Unpin` for aliasing reasons as the pointers are part of an intrusive linked
+ // list.
+ inner: Opaque<ListLinksFields>,
+}
+
+// SAFETY: The only way to access/modify the pointers inside of `ListLinks<ID>` is via holding the
+// associated `ListArc<T, ID>`. Since that type correctly implements `Send`, it is impossible to
+// move this an instance of this type to a different thread if the pointees are `!Send`.
+unsafe impl<const ID: u64> Send for ListLinks<ID> {}
+// SAFETY: The type is opaque so immutable references to a ListLinks are useless. Therefore, it's
+// okay to have immutable access to a ListLinks from several threads at once.
+unsafe impl<const ID: u64> Sync for ListLinks<ID> {}
+
+impl<const ID: u64> ListLinks<ID> {
+ /// Creates a new initializer for this type.
+ pub fn new() -> impl PinInit<Self> {
+ // INVARIANT: Pin-init initializers can't be used on an existing `Arc`, so this value will
+ // not be constructed in an `Arc` that already has a `ListArc`.
+ ListLinks {
+ inner: Opaque::new(ListLinksFields {
+ prev: ptr::null_mut(),
+ next: ptr::null_mut(),
+ }),
+ }
+ }
+
+ /// # Safety
+ ///
+ /// `me` must be dereferenceable.
+ #[inline]
+ unsafe fn fields(me: *mut Self) -> *mut ListLinksFields {
+ // SAFETY: The caller promises that the pointer is valid.
+ unsafe { Opaque::raw_get(ptr::addr_of!((*me).inner)) }
+ }
+
+ /// # Safety
+ ///
+ /// `me` must be dereferenceable.
+ #[inline]
+ unsafe fn from_fields(me: *mut ListLinksFields) -> *mut Self {
+ me.cast()
+ }
+}
+
+/// Similar to [`ListLinks`], but also contains a pointer to the full value.
+///
+/// This type can be used instead of [`ListLinks`] to support lists with trait objects.
+#[repr(C)]
+pub struct ListLinksSelfPtr<T: ?Sized, const ID: u64 = 0> {
+ /// The `ListLinks` field inside this value.
+ ///
+ /// This is public so that it can be used with `impl_has_list_links!`.
+ pub inner: ListLinks<ID>,
+ // UnsafeCell is not enough here because we use `Opaque::uninit` as a dummy value, and
+ // `ptr::null()` doesn't work for `T: ?Sized`.
+ self_ptr: Opaque<*const T>,
+}
+
+// SAFETY: The fields of a ListLinksSelfPtr can be moved across thread boundaries.
+unsafe impl<T: ?Sized + Send, const ID: u64> Send for ListLinksSelfPtr<T, ID> {}
+// SAFETY: The type is opaque so immutable references to a ListLinksSelfPtr are useless. Therefore,
+// it's okay to have immutable access to a ListLinks from several threads at once.
+//
+// Note that `inner` being a public field does not prevent this type from being opaque, since
+// `inner` is a opaque type.
+unsafe impl<T: ?Sized + Sync, const ID: u64> Sync for ListLinksSelfPtr<T, ID> {}
+
+impl<T: ?Sized, const ID: u64> ListLinksSelfPtr<T, ID> {
+ /// The offset from the [`ListLinks`] to the self pointer field.
+ pub const LIST_LINKS_SELF_PTR_OFFSET: usize = core::mem::offset_of!(Self, self_ptr);
+
+ /// Creates a new initializer for this type.
+ pub fn new() -> impl PinInit<Self> {
+ // INVARIANT: Pin-init initializers can't be used on an existing `Arc`, so this value will
+ // not be constructed in an `Arc` that already has a `ListArc`.
+ Self {
+ inner: ListLinks {
+ inner: Opaque::new(ListLinksFields {
+ prev: ptr::null_mut(),
+ next: ptr::null_mut(),
+ }),
+ },
+ self_ptr: Opaque::uninit(),
+ }
+ }
+}
+
+impl<T: ?Sized + ListItem<ID>, const ID: u64> List<T, ID> {
+ /// Creates a new empty list.
+ pub const fn new() -> Self {
+ Self {
+ first: ptr::null_mut(),
+ _ty: PhantomData,
+ }
+ }
+
+ /// Returns whether this list is empty.
+ pub fn is_empty(&self) -> bool {
+ self.first.is_null()
+ }
+
+ /// Add the provided item to the back of the list.
+ pub fn push_back(&mut self, item: ListArc<T, ID>) {
+ let raw_item = ListArc::into_raw(item);
+ // SAFETY:
+ // * We just got `raw_item` from a `ListArc`, so it's in an `Arc`.
+ // * Since we have ownership of the `ListArc`, `post_remove` must have been called after
+ // the most recent call to `prepare_to_insert`, if any.
+ // * We own the `ListArc`.
+ // * Removing items from this list is always done using `remove_internal_inner`, which
+ // calls `post_remove` before giving up ownership.
+ let list_links = unsafe { T::prepare_to_insert(raw_item) };
+ // SAFETY: We have not yet called `post_remove`, so `list_links` is still valid.
+ let item = unsafe { ListLinks::fields(list_links) };
+
+ if self.first.is_null() {
+ self.first = item;
+ // SAFETY: The caller just gave us ownership of these fields.
+ // INVARIANT: A linked list with one item should be cyclic.
+ unsafe {
+ (*item).next = item;
+ (*item).prev = item;
+ }
+ } else {
+ let next = self.first;
+ // SAFETY: By the type invariant, this pointer is valid or null. We just checked that
+ // it's not null, so it must be valid.
+ let prev = unsafe { (*next).prev };
+ // SAFETY: Pointers in a linked list are never dangling, and the caller just gave us
+ // ownership of the fields on `item`.
+ // INVARIANT: This correctly inserts `item` between `prev` and `next`.
+ unsafe {
+ (*item).next = next;
+ (*item).prev = prev;
+ (*prev).next = item;
+ (*next).prev = item;
+ }
+ }
+ }
+
+ /// Add the provided item to the front of the list.
+ pub fn push_front(&mut self, item: ListArc<T, ID>) {
+ let raw_item = ListArc::into_raw(item);
+ // SAFETY:
+ // * We just got `raw_item` from a `ListArc`, so it's in an `Arc`.
+ // * If this requirement is violated, then the previous caller of `prepare_to_insert`
+ // violated the safety requirement that they can't give up ownership of the `ListArc`
+ // until they call `post_remove`.
+ // * We own the `ListArc`.
+ // * Removing items] from this list is always done using `remove_internal_inner`, which
+ // calls `post_remove` before giving up ownership.
+ let list_links = unsafe { T::prepare_to_insert(raw_item) };
+ // SAFETY: We have not yet called `post_remove`, so `list_links` is still valid.
+ let item = unsafe { ListLinks::fields(list_links) };
+
+ if self.first.is_null() {
+ // SAFETY: The caller just gave us ownership of these fields.
+ // INVARIANT: A linked list with one item should be cyclic.
+ unsafe {
+ (*item).next = item;
+ (*item).prev = item;
+ }
+ } else {
+ let next = self.first;
+ // SAFETY: We just checked that `next` is non-null.
+ let prev = unsafe { (*next).prev };
+ // SAFETY: Pointers in a linked list are never dangling, and the caller just gave us
+ // ownership of the fields on `item`.
+ // INVARIANT: This correctly inserts `item` between `prev` and `next`.
+ unsafe {
+ (*item).next = next;
+ (*item).prev = prev;
+ (*prev).next = item;
+ (*next).prev = item;
+ }
+ }
+ self.first = item;
+ }
+
+ /// Removes the last item from this list.
+ pub fn pop_back(&mut self) -> Option<ListArc<T, ID>> {
+ if self.first.is_null() {
+ return None;
+ }
+
+ // SAFETY: We just checked that the list is not empty.
+ let last = unsafe { (*self.first).prev };
+ // SAFETY: The last item of this list is in this list.
+ Some(unsafe { self.remove_internal(last) })
+ }
+
+ /// Removes the first item from this list.
+ pub fn pop_front(&mut self) -> Option<ListArc<T, ID>> {
+ if self.first.is_null() {
+ return None;
+ }
+
+ // SAFETY: The first item of this list is in this list.
+ Some(unsafe { self.remove_internal(self.first) })
+ }
+
+ /// Removes the provided item from this list and returns it.
+ ///
+ /// This returns `None` if the item is not in the list. (Note that by the safety requirements,
+ /// this means that the item is not in any list.)
+ ///
+ /// # Safety
+ ///
+ /// `item` must not be in a different linked list (with the same id).
+ pub unsafe fn remove(&mut self, item: &T) -> Option<ListArc<T, ID>> {
+ let mut item = unsafe { ListLinks::fields(T::view_links(item)) };
+ // SAFETY: The user provided a reference, and reference are never dangling.
+ //
+ // As for why this is not a data race, there are two cases:
+ //
+ // * If `item` is not in any list, then these fields are read-only and null.
+ // * If `item` is in this list, then we have exclusive access to these fields since we
+ // have a mutable reference to the list.
+ //
+ // In either case, there's no race.
+ let ListLinksFields { next, prev } = unsafe { *item };
+
+ debug_assert_eq!(next.is_null(), prev.is_null());
+ if !next.is_null() {
+ // This is really a no-op, but this ensures that `item` is a raw pointer that was
+ // obtained without going through a pointer->reference->pointer conversion roundtrip.
+ // This ensures that the list is valid under the more restrictive strict provenance
+ // ruleset.
+ //
+ // SAFETY: We just checked that `next` is not null, and it's not dangling by the
+ // list invariants.
+ unsafe {
+ debug_assert_eq!(item, (*next).prev);
+ item = (*next).prev;
+ }
+
+ // SAFETY: We just checked that `item` is in a list, so the caller guarantees that it
+ // is in this list. The pointers are in the right order.
+ Some(unsafe { self.remove_internal_inner(item, next, prev) })
+ } else {
+ None
+ }
+ }
+
+ /// Removes the provided item from the list.
+ ///
+ /// # Safety
+ ///
+ /// `item` must point at an item in this list.
+ unsafe fn remove_internal(&mut self, item: *mut ListLinksFields) -> ListArc<T, ID> {
+ // SAFETY: The caller promises that this pointer is not dangling, and there's no data race
+ // since we have a mutable reference to the list containing `item`.
+ let ListLinksFields { next, prev } = unsafe { *item };
+ // SAFETY: The pointers are ok and in the right order.
+ unsafe { self.remove_internal_inner(item, next, prev) }
+ }
+
+ /// Removes the provided item from the list.
+ ///
+ /// # Safety
+ ///
+ /// The `item` pointer must point at an item in this list, and we must have `(*item).next ==
+ /// next` and `(*item).prev == prev`.
+ unsafe fn remove_internal_inner(
+ &mut self,
+ item: *mut ListLinksFields,
+ next: *mut ListLinksFields,
+ prev: *mut ListLinksFields,
+ ) -> ListArc<T, ID> {
+ // SAFETY: We have exclusive access to the pointers of items in the list, and the prev/next
+ // pointers are always valid for items in a list.
+ //
+ // INVARIANT: There are three cases:
+ // * If the list has at least three items, then after removing the item, `prev` and `next`
+ // will be next to each other.
+ // * If the list has two items, then the remaining item will point at itself.
+ // * If the list has one item, then `next == prev == item`, so these writes have no
+ // effect. The list remains unchanged and `item` is still in the list for now.
+ unsafe {
+ (*next).prev = prev;
+ (*prev).next = next;
+ }
+ // SAFETY: We have exclusive access to items in the list.
+ // INVARIANT: `item` is being removed, so the pointers should be null.
+ unsafe {
+ (*item).prev = ptr::null_mut();
+ (*item).next = ptr::null_mut();
+ }
+ // INVARIANT: There are three cases:
+ // * If `item` was not the first item, then `self.first` should remain unchanged.
+ // * If `item` was the first item and there is another item, then we just updated
+ // `prev->next` to `next`, which is the new first item, and setting `item->next` to null
+ // did not modify `prev->next`.
+ // * If `item` was the only item in the list, then `prev == item`, and we just set
+ // `item->next` to null, so this correctly sets `first` to null now that the list is
+ // empty.
+ if self.first == item {
+ // SAFETY: The `prev` pointer is the value that `item->prev` had when it was in this
+ // list, so it must be valid. There is no race since `prev` is still in the list and we
+ // still have exclusive access to the list.
+ self.first = unsafe { (*prev).next };
+ }
+
+ // SAFETY: `item` used to be in the list, so it is dereferenceable by the type invariants
+ // of `List`.
+ let list_links = unsafe { ListLinks::from_fields(item) };
+ // SAFETY: Any pointer in the list originates from a `prepare_to_insert` call.
+ let raw_item = unsafe { T::post_remove(list_links) };
+ // SAFETY: The above call to `post_remove` guarantees that we can recreate the `ListArc`.
+ unsafe { ListArc::from_raw(raw_item) }
+ }
+
+ /// Moves all items from `other` into `self`.
+ ///
+ /// The items of `other` are added to the back of `self`, so the last item of `other` becomes
+ /// the last item of `self`.
+ pub fn push_all_back(&mut self, other: &mut List<T, ID>) {
+ // First, we insert the elements into `self`. At the end, we make `other` empty.
+ if self.is_empty() {
+ // INVARIANT: All of the elements in `other` become elements of `self`.
+ self.first = other.first;
+ } else if !other.is_empty() {
+ let other_first = other.first;
+ // SAFETY: The other list is not empty, so this pointer is valid.
+ let other_last = unsafe { (*other_first).prev };
+ let self_first = self.first;
+ // SAFETY: The self list is not empty, so this pointer is valid.
+ let self_last = unsafe { (*self_first).prev };
+
+ // SAFETY: We have exclusive access to both lists, so we can update the pointers.
+ // INVARIANT: This correctly sets the pointers to merge both lists. We do not need to
+ // update `self.first` because the first element of `self` does not change.
+ unsafe {
+ (*self_first).prev = other_last;
+ (*other_last).next = self_first;
+ (*self_last).next = other_first;
+ (*other_first).prev = self_last;
+ }
+ }
+
+ // INVARIANT: The other list is now empty, so update its pointer.
+ other.first = ptr::null_mut();
+ }
+
+ /// Returns a cursor to the first element of the list.
+ ///
+ /// If the list is empty, this returns `None`.
+ pub fn cursor_front(&mut self) -> Option<Cursor<'_, T, ID>> {
+ if self.first.is_null() {
+ None
+ } else {
+ Some(Cursor {
+ current: self.first,
+ list: self,
+ })
+ }
+ }
+
+ /// Creates an iterator over the list.
+ pub fn iter(&self) -> Iter<'_, T, ID> {
+ // INVARIANT: If the list is empty, both pointers are null. Otherwise, both pointers point
+ // at the first element of the same list.
+ Iter {
+ current: self.first,
+ stop: self.first,
+ _ty: PhantomData,
+ }
+ }
+}
+
+impl<T: ?Sized + ListItem<ID>, const ID: u64> Default for List<T, ID> {
+ fn default() -> Self {
+ List::new()
+ }
+}
+
+impl<T: ?Sized + ListItem<ID>, const ID: u64> Drop for List<T, ID> {
+ fn drop(&mut self) {
+ while let Some(item) = self.pop_front() {
+ drop(item);
+ }
+ }
+}
+
+/// An iterator over a [`List`].
+///
+/// # Invariants
+///
+/// * There must be a [`List`] that is immutably borrowed for the duration of `'a`.
+/// * The `current` pointer is null or points at a value in that [`List`].
+/// * The `stop` pointer is equal to the `first` field of that [`List`].
+#[derive(Clone)]
+pub struct Iter<'a, T: ?Sized + ListItem<ID>, const ID: u64 = 0> {
+ current: *mut ListLinksFields,
+ stop: *mut ListLinksFields,
+ _ty: PhantomData<&'a ListArc<T, ID>>,
+}
+
+impl<'a, T: ?Sized + ListItem<ID>, const ID: u64> Iterator for Iter<'a, T, ID> {
+ type Item = ArcBorrow<'a, T>;
+
+ fn next(&mut self) -> Option<ArcBorrow<'a, T>> {
+ if self.current.is_null() {
+ return None;
+ }
+
+ let current = self.current;
+
+ // SAFETY: We just checked that `current` is not null, so it is in a list, and hence not
+ // dangling. There's no race because the iterator holds an immutable borrow to the list.
+ let next = unsafe { (*current).next };
+ // INVARIANT: If `current` was the last element of the list, then this updates it to null.
+ // Otherwise, we update it to the next element.
+ self.current = if next != self.stop {
+ next
+ } else {
+ ptr::null_mut()
+ };
+
+ // SAFETY: The `current` pointer points at a value in the list.
+ let item = unsafe { T::view_value(ListLinks::from_fields(current)) };
+ // SAFETY:
+ // * All values in a list are stored in an `Arc`.
+ // * The value cannot be removed from the list for the duration of the lifetime annotated
+ // on the returned `ArcBorrow`, because removing it from the list would require mutable
+ // access to the list. However, the `ArcBorrow` is annotated with the iterator's
+ // lifetime, and the list is immutably borrowed for that lifetime.
+ // * Values in a list never have a `UniqueArc` reference.
+ Some(unsafe { ArcBorrow::from_raw(item) })
+ }
+}
+
+/// A cursor into a [`List`].
+///
+/// # Invariants
+///
+/// The `current` pointer points a value in `list`.
+pub struct Cursor<'a, T: ?Sized + ListItem<ID>, const ID: u64 = 0> {
+ current: *mut ListLinksFields,
+ list: &'a mut List<T, ID>,
+}
+
+impl<'a, T: ?Sized + ListItem<ID>, const ID: u64> Cursor<'a, T, ID> {
+ /// Access the current element of this cursor.
+ pub fn current(&self) -> ArcBorrow<'_, T> {
+ // SAFETY: The `current` pointer points a value in the list.
+ let me = unsafe { T::view_value(ListLinks::from_fields(self.current)) };
+ // SAFETY:
+ // * All values in a list are stored in an `Arc`.
+ // * The value cannot be removed from the list for the duration of the lifetime annotated
+ // on the returned `ArcBorrow`, because removing it from the list would require mutable
+ // access to the cursor or the list. However, the `ArcBorrow` holds an immutable borrow
+ // on the cursor, which in turn holds a mutable borrow on the list, so any such
+ // mutable access requires first releasing the immutable borrow on the cursor.
+ // * Values in a list never have a `UniqueArc` reference, because the list has a `ListArc`
+ // reference, and `UniqueArc` references must be unique.
+ unsafe { ArcBorrow::from_raw(me) }
+ }
+
+ /// Move the cursor to the next element.
+ pub fn next(self) -> Option<Cursor<'a, T, ID>> {
+ // SAFETY: The `current` field is always in a list.
+ let next = unsafe { (*self.current).next };
+
+ if next == self.list.first {
+ None
+ } else {
+ // INVARIANT: Since `self.current` is in the `list`, its `next` pointer is also in the
+ // `list`.
+ Some(Cursor {
+ current: next,
+ list: self.list,
+ })
+ }
+ }
+
+ /// Move the cursor to the previous element.
+ pub fn prev(self) -> Option<Cursor<'a, T, ID>> {
+ // SAFETY: The `current` field is always in a list.
+ let prev = unsafe { (*self.current).prev };
+
+ if self.current == self.list.first {
+ None
+ } else {
+ // INVARIANT: Since `self.current` is in the `list`, its `prev` pointer is also in the
+ // `list`.
+ Some(Cursor {
+ current: prev,
+ list: self.list,
+ })
+ }
+ }
+
+ /// Remove the current element from the list.
+ pub fn remove(self) -> ListArc<T, ID> {
+ // SAFETY: The `current` pointer always points at a member of the list.
+ unsafe { self.list.remove_internal(self.current) }
+ }
+}
+
+impl<'a, T: ?Sized + ListItem<ID>, const ID: u64> FusedIterator for Iter<'a, T, ID> {}
+
+impl<'a, T: ?Sized + ListItem<ID>, const ID: u64> IntoIterator for &'a List<T, ID> {
+ type IntoIter = Iter<'a, T, ID>;
+ type Item = ArcBorrow<'a, T>;
+
+ fn into_iter(self) -> Iter<'a, T, ID> {
+ self.iter()
+ }
+}
+
+/// An owning iterator into a [`List`].
+pub struct IntoIter<T: ?Sized + ListItem<ID>, const ID: u64 = 0> {
+ list: List<T, ID>,
+}
+
+impl<T: ?Sized + ListItem<ID>, const ID: u64> Iterator for IntoIter<T, ID> {
+ type Item = ListArc<T, ID>;
+
+ fn next(&mut self) -> Option<ListArc<T, ID>> {
+ self.list.pop_front()
+ }
+}
+
+impl<T: ?Sized + ListItem<ID>, const ID: u64> FusedIterator for IntoIter<T, ID> {}
+
+impl<T: ?Sized + ListItem<ID>, const ID: u64> DoubleEndedIterator for IntoIter<T, ID> {
+ fn next_back(&mut self) -> Option<ListArc<T, ID>> {
+ self.list.pop_back()
+ }
+}
+
+impl<T: ?Sized + ListItem<ID>, const ID: u64> IntoIterator for List<T, ID> {
+ type IntoIter = IntoIter<T, ID>;
+ type Item = ListArc<T, ID>;
+
+ fn into_iter(self) -> IntoIter<T, ID> {
+ IntoIter { list: self }
+ }
+}
diff --git a/rust/kernel/list/arc.rs b/rust/kernel/list/arc.rs
new file mode 100644
index 000000000000..d801b9dc6291
--- /dev/null
+++ b/rust/kernel/list/arc.rs
@@ -0,0 +1,521 @@
+// SPDX-License-Identifier: GPL-2.0
+
+// Copyright (C) 2024 Google LLC.
+
+//! A wrapper around `Arc` for linked lists.
+
+use crate::alloc::{AllocError, Flags};
+use crate::prelude::*;
+use crate::sync::{Arc, ArcBorrow, UniqueArc};
+use core::marker::{PhantomPinned, Unsize};
+use core::ops::Deref;
+use core::pin::Pin;
+use core::sync::atomic::{AtomicBool, Ordering};
+
+/// Declares that this type has some way to ensure that there is exactly one `ListArc` instance for
+/// this id.
+///
+/// Types that implement this trait should include some kind of logic for keeping track of whether
+/// a [`ListArc`] exists or not. We refer to this logic as "the tracking inside `T`".
+///
+/// We allow the case where the tracking inside `T` thinks that a [`ListArc`] exists, but actually,
+/// there isn't a [`ListArc`]. However, we do not allow the opposite situation where a [`ListArc`]
+/// exists, but the tracking thinks it doesn't. This is because the former can at most result in us
+/// failing to create a [`ListArc`] when the operation could succeed, whereas the latter can result
+/// in the creation of two [`ListArc`] references. Only the latter situation can lead to memory
+/// safety issues.
+///
+/// A consequence of the above is that you may implement the tracking inside `T` by not actually
+/// keeping track of anything. To do this, you always claim that a [`ListArc`] exists, even if
+/// there isn't one. This implementation is allowed by the above rule, but it means that
+/// [`ListArc`] references can only be created if you have ownership of *all* references to the
+/// refcounted object, as you otherwise have no way of knowing whether a [`ListArc`] exists.
+pub trait ListArcSafe<const ID: u64 = 0> {
+ /// Informs the tracking inside this type that it now has a [`ListArc`] reference.
+ ///
+ /// This method may be called even if the tracking inside this type thinks that a `ListArc`
+ /// reference exists. (But only if that's not actually the case.)
+ ///
+ /// # Safety
+ ///
+ /// Must not be called if a [`ListArc`] already exist for this value.
+ unsafe fn on_create_list_arc_from_unique(self: Pin<&mut Self>);
+
+ /// Informs the tracking inside this type that there is no [`ListArc`] reference anymore.
+ ///
+ /// # Safety
+ ///
+ /// Must only be called if there is no [`ListArc`] reference, but the tracking thinks there is.
+ unsafe fn on_drop_list_arc(&self);
+}
+
+/// Declares that this type is able to safely attempt to create `ListArc`s at any time.
+///
+/// # Safety
+///
+/// The guarantees of `try_new_list_arc` must be upheld.
+pub unsafe trait TryNewListArc<const ID: u64 = 0>: ListArcSafe<ID> {
+ /// Attempts to convert an `Arc<Self>` into an `ListArc<Self>`. Returns `true` if the
+ /// conversion was successful.
+ ///
+ /// This method should not be called directly. Use [`ListArc::try_from_arc`] instead.
+ ///
+ /// # Guarantees
+ ///
+ /// If this call returns `true`, then there is no [`ListArc`] pointing to this value.
+ /// Additionally, this call will have transitioned the tracking inside `Self` from not thinking
+ /// that a [`ListArc`] exists, to thinking that a [`ListArc`] exists.
+ fn try_new_list_arc(&self) -> bool;
+}
+
+/// Declares that this type supports [`ListArc`].
+///
+/// This macro supports a few different strategies for implementing the tracking inside the type:
+///
+/// * The `untracked` strategy does not actually keep track of whether a [`ListArc`] exists. When
+/// using this strategy, the only way to create a [`ListArc`] is using a [`UniqueArc`].
+/// * The `tracked_by` strategy defers the tracking to a field of the struct. The user much specify
+/// which field to defer the tracking to. The field must implement [`ListArcSafe`]. If the field
+/// implements [`TryNewListArc`], then the type will also implement [`TryNewListArc`].
+///
+/// The `tracked_by` strategy is usually used by deferring to a field of type
+/// [`AtomicTracker`]. However, it is also possible to defer the tracking to another struct
+/// using also using this macro.
+#[macro_export]
+macro_rules! impl_list_arc_safe {
+ (impl$({$($generics:tt)*})? ListArcSafe<$num:tt> for $t:ty { untracked; } $($rest:tt)*) => {
+ impl$(<$($generics)*>)? $crate::list::ListArcSafe<$num> for $t {
+ unsafe fn on_create_list_arc_from_unique(self: ::core::pin::Pin<&mut Self>) {}
+ unsafe fn on_drop_list_arc(&self) {}
+ }
+ $crate::list::impl_list_arc_safe! { $($rest)* }
+ };
+
+ (impl$({$($generics:tt)*})? ListArcSafe<$num:tt> for $t:ty {
+ tracked_by $field:ident : $fty:ty;
+ } $($rest:tt)*) => {
+ impl$(<$($generics)*>)? $crate::list::ListArcSafe<$num> for $t {
+ unsafe fn on_create_list_arc_from_unique(self: ::core::pin::Pin<&mut Self>) {
+ $crate::assert_pinned!($t, $field, $fty, inline);
+
+ // SAFETY: This field is structurally pinned as per the above assertion.
+ let field = unsafe {
+ ::core::pin::Pin::map_unchecked_mut(self, |me| &mut me.$field)
+ };
+ // SAFETY: The caller promises that there is no `ListArc`.
+ unsafe {
+ <$fty as $crate::list::ListArcSafe<$num>>::on_create_list_arc_from_unique(field)
+ };
+ }
+ unsafe fn on_drop_list_arc(&self) {
+ // SAFETY: The caller promises that there is no `ListArc` reference, and also
+ // promises that the tracking thinks there is a `ListArc` reference.
+ unsafe { <$fty as $crate::list::ListArcSafe<$num>>::on_drop_list_arc(&self.$field) };
+ }
+ }
+ unsafe impl$(<$($generics)*>)? $crate::list::TryNewListArc<$num> for $t
+ where
+ $fty: TryNewListArc<$num>,
+ {
+ fn try_new_list_arc(&self) -> bool {
+ <$fty as $crate::list::TryNewListArc<$num>>::try_new_list_arc(&self.$field)
+ }
+ }
+ $crate::list::impl_list_arc_safe! { $($rest)* }
+ };
+
+ () => {};
+}
+pub use impl_list_arc_safe;
+
+/// A wrapper around [`Arc`] that's guaranteed unique for the given id.
+///
+/// The `ListArc` type can be thought of as a special reference to a refcounted object that owns the
+/// permission to manipulate the `next`/`prev` pointers stored in the refcounted object. By ensuring
+/// that each object has only one `ListArc` reference, the owner of that reference is assured
+/// exclusive access to the `next`/`prev` pointers. When a `ListArc` is inserted into a [`List`],
+/// the [`List`] takes ownership of the `ListArc` reference.
+///
+/// There are various strategies to ensuring that a value has only one `ListArc` reference. The
+/// simplest is to convert a [`UniqueArc`] into a `ListArc`. However, the refcounted object could
+/// also keep track of whether a `ListArc` exists using a boolean, which could allow for the
+/// creation of new `ListArc` references from an [`Arc`] reference. Whatever strategy is used, the
+/// relevant tracking is referred to as "the tracking inside `T`", and the [`ListArcSafe`] trait
+/// (and its subtraits) are used to update the tracking when a `ListArc` is created or destroyed.
+///
+/// Note that we allow the case where the tracking inside `T` thinks that a `ListArc` exists, but
+/// actually, there isn't a `ListArc`. However, we do not allow the opposite situation where a
+/// `ListArc` exists, but the tracking thinks it doesn't. This is because the former can at most
+/// result in us failing to create a `ListArc` when the operation could succeed, whereas the latter
+/// can result in the creation of two `ListArc` references.
+///
+/// While this `ListArc` is unique for the given id, there still might exist normal `Arc`
+/// references to the object.
+///
+/// # Invariants
+///
+/// * Each reference counted object has at most one `ListArc` for each value of `ID`.
+/// * The tracking inside `T` is aware that a `ListArc` reference exists.
+///
+/// [`List`]: crate::list::List
+#[repr(transparent)]
+pub struct ListArc<T, const ID: u64 = 0>
+where
+ T: ListArcSafe<ID> + ?Sized,
+{
+ arc: Arc<T>,
+}
+
+impl<T: ListArcSafe<ID>, const ID: u64> ListArc<T, ID> {
+ /// Constructs a new reference counted instance of `T`.
+ #[inline]
+ pub fn new(contents: T, flags: Flags) -> Result<Self, AllocError> {
+ Ok(Self::from(UniqueArc::new(contents, flags)?))
+ }
+
+ /// Use the given initializer to in-place initialize a `T`.
+ ///
+ /// If `T: !Unpin` it will not be able to move afterwards.
+ // We don't implement `InPlaceInit` because `ListArc` is implicitly pinned. This is similar to
+ // what we do for `Arc`.
+ #[inline]
+ pub fn pin_init<E>(init: impl PinInit<T, E>, flags: Flags) -> Result<Self, E>
+ where
+ E: From<AllocError>,
+ {
+ Ok(Self::from(UniqueArc::try_pin_init(init, flags)?))
+ }
+
+ /// Use the given initializer to in-place initialize a `T`.
+ ///
+ /// This is equivalent to [`ListArc<T>::pin_init`], since a [`ListArc`] is always pinned.
+ #[inline]
+ pub fn init<E>(init: impl Init<T, E>, flags: Flags) -> Result<Self, E>
+ where
+ E: From<AllocError>,
+ {
+ Ok(Self::from(UniqueArc::try_init(init, flags)?))
+ }
+}
+
+impl<T, const ID: u64> From<UniqueArc<T>> for ListArc<T, ID>
+where
+ T: ListArcSafe<ID> + ?Sized,
+{
+ /// Convert a [`UniqueArc`] into a [`ListArc`].
+ #[inline]
+ fn from(unique: UniqueArc<T>) -> Self {
+ Self::from(Pin::from(unique))
+ }
+}
+
+impl<T, const ID: u64> From<Pin<UniqueArc<T>>> for ListArc<T, ID>
+where
+ T: ListArcSafe<ID> + ?Sized,
+{
+ /// Convert a pinned [`UniqueArc`] into a [`ListArc`].
+ #[inline]
+ fn from(mut unique: Pin<UniqueArc<T>>) -> Self {
+ // SAFETY: We have a `UniqueArc`, so there is no `ListArc`.
+ unsafe { T::on_create_list_arc_from_unique(unique.as_mut()) };
+ let arc = Arc::from(unique);
+ // SAFETY: We just called `on_create_list_arc_from_unique` on an arc without a `ListArc`,
+ // so we can create a `ListArc`.
+ unsafe { Self::transmute_from_arc(arc) }
+ }
+}
+
+impl<T, const ID: u64> ListArc<T, ID>
+where
+ T: ListArcSafe<ID> + ?Sized,
+{
+ /// Creates two `ListArc`s from a [`UniqueArc`].
+ ///
+ /// The two ids must be different.
+ #[inline]
+ pub fn pair_from_unique<const ID2: u64>(unique: UniqueArc<T>) -> (Self, ListArc<T, ID2>)
+ where
+ T: ListArcSafe<ID2>,
+ {
+ Self::pair_from_pin_unique(Pin::from(unique))
+ }
+
+ /// Creates two `ListArc`s from a pinned [`UniqueArc`].
+ ///
+ /// The two ids must be different.
+ #[inline]
+ pub fn pair_from_pin_unique<const ID2: u64>(
+ mut unique: Pin<UniqueArc<T>>,
+ ) -> (Self, ListArc<T, ID2>)
+ where
+ T: ListArcSafe<ID2>,
+ {
+ build_assert!(ID != ID2);
+
+ // SAFETY: We have a `UniqueArc`, so there is no `ListArc`.
+ unsafe { <T as ListArcSafe<ID>>::on_create_list_arc_from_unique(unique.as_mut()) };
+ // SAFETY: We have a `UniqueArc`, so there is no `ListArc`.
+ unsafe { <T as ListArcSafe<ID2>>::on_create_list_arc_from_unique(unique.as_mut()) };
+
+ let arc1 = Arc::from(unique);
+ let arc2 = Arc::clone(&arc1);
+
+ // SAFETY: We just called `on_create_list_arc_from_unique` on an arc without a `ListArc`
+ // for both IDs (which are different), so we can create two `ListArc`s.
+ unsafe {
+ (
+ Self::transmute_from_arc(arc1),
+ ListArc::transmute_from_arc(arc2),
+ )
+ }
+ }
+
+ /// Try to create a new `ListArc`.
+ ///
+ /// This fails if this value already has a `ListArc`.
+ pub fn try_from_arc(arc: Arc<T>) -> Result<Self, Arc<T>>
+ where
+ T: TryNewListArc<ID>,
+ {
+ if arc.try_new_list_arc() {
+ // SAFETY: The `try_new_list_arc` method returned true, so we made the tracking think
+ // that a `ListArc` exists. This lets us create a `ListArc`.
+ Ok(unsafe { Self::transmute_from_arc(arc) })
+ } else {
+ Err(arc)
+ }
+ }
+
+ /// Try to create a new `ListArc`.
+ ///
+ /// This fails if this value already has a `ListArc`.
+ pub fn try_from_arc_borrow(arc: ArcBorrow<'_, T>) -> Option<Self>
+ where
+ T: TryNewListArc<ID>,
+ {
+ if arc.try_new_list_arc() {
+ // SAFETY: The `try_new_list_arc` method returned true, so we made the tracking think
+ // that a `ListArc` exists. This lets us create a `ListArc`.
+ Some(unsafe { Self::transmute_from_arc(Arc::from(arc)) })
+ } else {
+ None
+ }
+ }
+
+ /// Try to create a new `ListArc`.
+ ///
+ /// If it's not possible to create a new `ListArc`, then the `Arc` is dropped. This will never
+ /// run the destructor of the value.
+ pub fn try_from_arc_or_drop(arc: Arc<T>) -> Option<Self>
+ where
+ T: TryNewListArc<ID>,
+ {
+ match Self::try_from_arc(arc) {
+ Ok(list_arc) => Some(list_arc),
+ Err(arc) => Arc::into_unique_or_drop(arc).map(Self::from),
+ }
+ }
+
+ /// Transmutes an [`Arc`] into a `ListArc` without updating the tracking inside `T`.
+ ///
+ /// # Safety
+ ///
+ /// * The value must not already have a `ListArc` reference.
+ /// * The tracking inside `T` must think that there is a `ListArc` reference.
+ #[inline]
+ unsafe fn transmute_from_arc(arc: Arc<T>) -> Self {
+ // INVARIANT: By the safety requirements, the invariants on `ListArc` are satisfied.
+ Self { arc }
+ }
+
+ /// Transmutes a `ListArc` into an [`Arc`] without updating the tracking inside `T`.
+ ///
+ /// After this call, the tracking inside `T` will still think that there is a `ListArc`
+ /// reference.
+ #[inline]
+ fn transmute_to_arc(self) -> Arc<T> {
+ // Use a transmute to skip destructor.
+ //
+ // SAFETY: ListArc is repr(transparent).
+ unsafe { core::mem::transmute(self) }
+ }
+
+ /// Convert ownership of this `ListArc` into a raw pointer.
+ ///
+ /// The returned pointer is indistinguishable from pointers returned by [`Arc::into_raw`]. The
+ /// tracking inside `T` will still think that a `ListArc` exists after this call.
+ #[inline]
+ pub fn into_raw(self) -> *const T {
+ Arc::into_raw(Self::transmute_to_arc(self))
+ }
+
+ /// Take ownership of the `ListArc` from a raw pointer.
+ ///
+ /// # Safety
+ ///
+ /// * `ptr` must satisfy the safety requirements of [`Arc::from_raw`].
+ /// * The value must not already have a `ListArc` reference.
+ /// * The tracking inside `T` must think that there is a `ListArc` reference.
+ #[inline]
+ pub unsafe fn from_raw(ptr: *const T) -> Self {
+ // SAFETY: The pointer satisfies the safety requirements for `Arc::from_raw`.
+ let arc = unsafe { Arc::from_raw(ptr) };
+ // SAFETY: The value doesn't already have a `ListArc` reference, but the tracking thinks it
+ // does.
+ unsafe { Self::transmute_from_arc(arc) }
+ }
+
+ /// Converts the `ListArc` into an [`Arc`].
+ #[inline]
+ pub fn into_arc(self) -> Arc<T> {
+ let arc = Self::transmute_to_arc(self);
+ // SAFETY: There is no longer a `ListArc`, but the tracking thinks there is.
+ unsafe { T::on_drop_list_arc(&arc) };
+ arc
+ }
+
+ /// Clone a `ListArc` into an [`Arc`].
+ #[inline]
+ pub fn clone_arc(&self) -> Arc<T> {
+ self.arc.clone()
+ }
+
+ /// Returns a reference to an [`Arc`] from the given [`ListArc`].
+ ///
+ /// This is useful when the argument of a function call is an [`&Arc`] (e.g., in a method
+ /// receiver), but we have a [`ListArc`] instead.
+ ///
+ /// [`&Arc`]: Arc
+ #[inline]
+ pub fn as_arc(&self) -> &Arc<T> {
+ &self.arc
+ }
+
+ /// Returns an [`ArcBorrow`] from the given [`ListArc`].
+ ///
+ /// This is useful when the argument of a function call is an [`ArcBorrow`] (e.g., in a method
+ /// receiver), but we have an [`Arc`] instead. Getting an [`ArcBorrow`] is free when optimised.
+ #[inline]
+ pub fn as_arc_borrow(&self) -> ArcBorrow<'_, T> {
+ self.arc.as_arc_borrow()
+ }
+
+ /// Compare whether two [`ListArc`] pointers reference the same underlying object.
+ #[inline]
+ pub fn ptr_eq(this: &Self, other: &Self) -> bool {
+ Arc::ptr_eq(&this.arc, &other.arc)
+ }
+}
+
+impl<T, const ID: u64> Deref for ListArc<T, ID>
+where
+ T: ListArcSafe<ID> + ?Sized,
+{
+ type Target = T;
+
+ #[inline]
+ fn deref(&self) -> &Self::Target {
+ self.arc.deref()
+ }
+}
+
+impl<T, const ID: u64> Drop for ListArc<T, ID>
+where
+ T: ListArcSafe<ID> + ?Sized,
+{
+ #[inline]
+ fn drop(&mut self) {
+ // SAFETY: There is no longer a `ListArc`, but the tracking thinks there is by the type
+ // invariants on `Self`.
+ unsafe { T::on_drop_list_arc(&self.arc) };
+ }
+}
+
+impl<T, const ID: u64> AsRef<Arc<T>> for ListArc<T, ID>
+where
+ T: ListArcSafe<ID> + ?Sized,
+{
+ #[inline]
+ fn as_ref(&self) -> &Arc<T> {
+ self.as_arc()
+ }
+}
+
+// This is to allow [`ListArc`] (and variants) to be used as the type of `self`.
+impl<T, const ID: u64> core::ops::Receiver for ListArc<T, ID> where T: ListArcSafe<ID> + ?Sized {}
+
+// This is to allow coercion from `ListArc<T>` to `ListArc<U>` if `T` can be converted to the
+// dynamically-sized type (DST) `U`.
+impl<T, U, const ID: u64> core::ops::CoerceUnsized<ListArc<U, ID>> for ListArc<T, ID>
+where
+ T: ListArcSafe<ID> + Unsize<U> + ?Sized,
+ U: ListArcSafe<ID> + ?Sized,
+{
+}
+
+// This is to allow `ListArc<U>` to be dispatched on when `ListArc<T>` can be coerced into
+// `ListArc<U>`.
+impl<T, U, const ID: u64> core::ops::DispatchFromDyn<ListArc<U, ID>> for ListArc<T, ID>
+where
+ T: ListArcSafe<ID> + Unsize<U> + ?Sized,
+ U: ListArcSafe<ID> + ?Sized,
+{
+}
+
+/// A utility for tracking whether a [`ListArc`] exists using an atomic.
+///
+/// # Invariant
+///
+/// If the boolean is `false`, then there is no [`ListArc`] for this value.
+#[repr(transparent)]
+pub struct AtomicTracker<const ID: u64 = 0> {
+ inner: AtomicBool,
+ // This value needs to be pinned to justify the INVARIANT: comment in `AtomicTracker::new`.
+ _pin: PhantomPinned,
+}
+
+impl<const ID: u64> AtomicTracker<ID> {
+ /// Creates a new initializer for this type.
+ pub fn new() -> impl PinInit<Self> {
+ // INVARIANT: Pin-init initializers can't be used on an existing `Arc`, so this value will
+ // not be constructed in an `Arc` that already has a `ListArc`.
+ Self {
+ inner: AtomicBool::new(false),
+ _pin: PhantomPinned,
+ }
+ }
+
+ fn project_inner(self: Pin<&mut Self>) -> &mut AtomicBool {
+ // SAFETY: The `inner` field is not structurally pinned, so we may obtain a mutable
+ // reference to it even if we only have a pinned reference to `self`.
+ unsafe { &mut Pin::into_inner_unchecked(self).inner }
+ }
+}
+
+impl<const ID: u64> ListArcSafe<ID> for AtomicTracker<ID> {
+ unsafe fn on_create_list_arc_from_unique(self: Pin<&mut Self>) {
+ // INVARIANT: We just created a ListArc, so the boolean should be true.
+ *self.project_inner().get_mut() = true;
+ }
+
+ unsafe fn on_drop_list_arc(&self) {
+ // INVARIANT: We just dropped a ListArc, so the boolean should be false.
+ self.inner.store(false, Ordering::Release);
+ }
+}
+
+// SAFETY: If this method returns `true`, then by the type invariant there is no `ListArc` before
+// this call, so it is okay to create a new `ListArc`.
+//
+// The acquire ordering will synchronize with the release store from the destruction of any
+// previous `ListArc`, so if there was a previous `ListArc`, then the destruction of the previous
+// `ListArc` happens-before the creation of the new `ListArc`.
+unsafe impl<const ID: u64> TryNewListArc<ID> for AtomicTracker<ID> {
+ fn try_new_list_arc(&self) -> bool {
+ // INVARIANT: If this method returns true, then the boolean used to be false, and is no
+ // longer false, so it is okay for the caller to create a new [`ListArc`].
+ self.inner
+ .compare_exchange(false, true, Ordering::Acquire, Ordering::Relaxed)
+ .is_ok()
+ }
+}
diff --git a/rust/kernel/list/arc_field.rs b/rust/kernel/list/arc_field.rs
new file mode 100644
index 000000000000..2330f673427a
--- /dev/null
+++ b/rust/kernel/list/arc_field.rs
@@ -0,0 +1,96 @@
+// SPDX-License-Identifier: GPL-2.0
+
+// Copyright (C) 2024 Google LLC.
+
+//! A field that is exclusively owned by a [`ListArc`].
+//!
+//! This can be used to have reference counted struct where one of the reference counted pointers
+//! has exclusive access to a field of the struct.
+//!
+//! [`ListArc`]: crate::list::ListArc
+
+use core::cell::UnsafeCell;
+
+/// A field owned by a specific [`ListArc`].
+///
+/// [`ListArc`]: crate::list::ListArc
+pub struct ListArcField<T, const ID: u64 = 0> {
+ value: UnsafeCell<T>,
+}
+
+// SAFETY: If the inner type is thread-safe, then it's also okay for `ListArc` to be thread-safe.
+unsafe impl<T: Send + Sync, const ID: u64> Send for ListArcField<T, ID> {}
+// SAFETY: If the inner type is thread-safe, then it's also okay for `ListArc` to be thread-safe.
+unsafe impl<T: Send + Sync, const ID: u64> Sync for ListArcField<T, ID> {}
+
+impl<T, const ID: u64> ListArcField<T, ID> {
+ /// Creates a new `ListArcField`.
+ pub fn new(value: T) -> Self {
+ Self {
+ value: UnsafeCell::new(value),
+ }
+ }
+
+ /// Access the value when we have exclusive access to the `ListArcField`.
+ ///
+ /// This allows access to the field using an `UniqueArc` instead of a `ListArc`.
+ pub fn get_mut(&mut self) -> &mut T {
+ self.value.get_mut()
+ }
+
+ /// Unsafely assert that you have shared access to the `ListArc` for this field.
+ ///
+ /// # Safety
+ ///
+ /// The caller must have shared access to the `ListArc<ID>` containing the struct with this
+ /// field for the duration of the returned reference.
+ pub unsafe fn assert_ref(&self) -> &T {
+ // SAFETY: The caller has shared access to the `ListArc`, so they also have shared access
+ // to this field.
+ unsafe { &*self.value.get() }
+ }
+
+ /// Unsafely assert that you have mutable access to the `ListArc` for this field.
+ ///
+ /// # Safety
+ ///
+ /// The caller must have mutable access to the `ListArc<ID>` containing the struct with this
+ /// field for the duration of the returned reference.
+ #[allow(clippy::mut_from_ref)]
+ pub unsafe fn assert_mut(&self) -> &mut T {
+ // SAFETY: The caller has exclusive access to the `ListArc`, so they also have exclusive
+ // access to this field.
+ unsafe { &mut *self.value.get() }
+ }
+}
+
+/// Defines getters for a [`ListArcField`].
+#[macro_export]
+macro_rules! define_list_arc_field_getter {
+ ($pub:vis fn $name:ident(&self $(<$id:tt>)?) -> &$typ:ty { $field:ident }
+ $($rest:tt)*
+ ) => {
+ $pub fn $name<'a>(self: &'a $crate::list::ListArc<Self $(, $id)?>) -> &'a $typ {
+ let field = &(&**self).$field;
+ // SAFETY: We have a shared reference to the `ListArc`.
+ unsafe { $crate::list::ListArcField::<$typ $(, $id)?>::assert_ref(field) }
+ }
+
+ $crate::list::define_list_arc_field_getter!($($rest)*);
+ };
+
+ ($pub:vis fn $name:ident(&mut self $(<$id:tt>)?) -> &mut $typ:ty { $field:ident }
+ $($rest:tt)*
+ ) => {
+ $pub fn $name<'a>(self: &'a mut $crate::list::ListArc<Self $(, $id)?>) -> &'a mut $typ {
+ let field = &(&**self).$field;
+ // SAFETY: We have a mutable reference to the `ListArc`.
+ unsafe { $crate::list::ListArcField::<$typ $(, $id)?>::assert_mut(field) }
+ }
+
+ $crate::list::define_list_arc_field_getter!($($rest)*);
+ };
+
+ () => {};
+}
+pub use define_list_arc_field_getter;
diff --git a/rust/kernel/list/impl_list_item_mod.rs b/rust/kernel/list/impl_list_item_mod.rs
new file mode 100644
index 000000000000..a0438537cee1
--- /dev/null
+++ b/rust/kernel/list/impl_list_item_mod.rs
@@ -0,0 +1,274 @@
+// SPDX-License-Identifier: GPL-2.0
+
+// Copyright (C) 2024 Google LLC.
+
+//! Helpers for implementing list traits safely.
+
+use crate::list::ListLinks;
+
+/// Declares that this type has a `ListLinks<ID>` field at a fixed offset.
+///
+/// This trait is only used to help implement `ListItem` safely. If `ListItem` is implemented
+/// manually, then this trait is not needed. Use the [`impl_has_list_links!`] macro to implement
+/// this trait.
+///
+/// # Safety
+///
+/// All values of this type must have a `ListLinks<ID>` field at the given offset.
+///
+/// The behavior of `raw_get_list_links` must not be changed.
+pub unsafe trait HasListLinks<const ID: u64 = 0> {
+ /// The offset of the `ListLinks` field.
+ const OFFSET: usize;
+
+ /// Returns a pointer to the [`ListLinks<T, ID>`] field.
+ ///
+ /// # Safety
+ ///
+ /// The provided pointer must point at a valid struct of type `Self`.
+ ///
+ /// [`ListLinks<T, ID>`]: ListLinks
+ // We don't really need this method, but it's necessary for the implementation of
+ // `impl_has_list_links!` to be correct.
+ #[inline]
+ unsafe fn raw_get_list_links(ptr: *mut Self) -> *mut ListLinks<ID> {
+ // SAFETY: The caller promises that the pointer is valid. The implementer promises that the
+ // `OFFSET` constant is correct.
+ unsafe { (ptr as *mut u8).add(Self::OFFSET) as *mut ListLinks<ID> }
+ }
+}
+
+/// Implements the [`HasListLinks`] trait for the given type.
+#[macro_export]
+macro_rules! impl_has_list_links {
+ ($(impl$(<$($implarg:ident),*>)?
+ HasListLinks$(<$id:tt>)?
+ for $self:ident $(<$($selfarg:ty),*>)?
+ { self$(.$field:ident)* }
+ )*) => {$(
+ // SAFETY: The implementation of `raw_get_list_links` only compiles if the field has the
+ // right type.
+ //
+ // The behavior of `raw_get_list_links` is not changed since the `addr_of_mut!` macro is
+ // equivalent to the pointer offset operation in the trait definition.
+ unsafe impl$(<$($implarg),*>)? $crate::list::HasListLinks$(<$id>)? for
+ $self $(<$($selfarg),*>)?
+ {
+ const OFFSET: usize = ::core::mem::offset_of!(Self, $($field).*) as usize;
+
+ #[inline]
+ unsafe fn raw_get_list_links(ptr: *mut Self) -> *mut $crate::list::ListLinks$(<$id>)? {
+ // SAFETY: The caller promises that the pointer is not dangling. We know that this
+ // expression doesn't follow any pointers, as the `offset_of!` invocation above
+ // would otherwise not compile.
+ unsafe { ::core::ptr::addr_of_mut!((*ptr)$(.$field)*) }
+ }
+ }
+ )*};
+}
+pub use impl_has_list_links;
+
+/// Declares that the `ListLinks<ID>` field in this struct is inside a `ListLinksSelfPtr<T, ID>`.
+///
+/// # Safety
+///
+/// The `ListLinks<ID>` field of this struct at the offset `HasListLinks<ID>::OFFSET` must be
+/// inside a `ListLinksSelfPtr<T, ID>`.
+pub unsafe trait HasSelfPtr<T: ?Sized, const ID: u64 = 0>
+where
+ Self: HasListLinks<ID>,
+{
+}
+
+/// Implements the [`HasListLinks`] and [`HasSelfPtr`] traits for the given type.
+#[macro_export]
+macro_rules! impl_has_list_links_self_ptr {
+ ($(impl$({$($implarg:tt)*})?
+ HasSelfPtr<$item_type:ty $(, $id:tt)?>
+ for $self:ident $(<$($selfarg:ty),*>)?
+ { self.$field:ident }
+ )*) => {$(
+ // SAFETY: The implementation of `raw_get_list_links` only compiles if the field has the
+ // right type.
+ unsafe impl$(<$($implarg)*>)? $crate::list::HasSelfPtr<$item_type $(, $id)?> for
+ $self $(<$($selfarg),*>)?
+ {}
+
+ unsafe impl$(<$($implarg)*>)? $crate::list::HasListLinks$(<$id>)? for
+ $self $(<$($selfarg),*>)?
+ {
+ const OFFSET: usize = ::core::mem::offset_of!(Self, $field) as usize;
+
+ #[inline]
+ unsafe fn raw_get_list_links(ptr: *mut Self) -> *mut $crate::list::ListLinks$(<$id>)? {
+ // SAFETY: The caller promises that the pointer is not dangling.
+ let ptr: *mut $crate::list::ListLinksSelfPtr<$item_type $(, $id)?> =
+ unsafe { ::core::ptr::addr_of_mut!((*ptr).$field) };
+ ptr.cast()
+ }
+ }
+ )*};
+}
+pub use impl_has_list_links_self_ptr;
+
+/// Implements the [`ListItem`] trait for the given type.
+///
+/// Requires that the type implements [`HasListLinks`]. Use the [`impl_has_list_links!`] macro to
+/// implement that trait.
+///
+/// [`ListItem`]: crate::list::ListItem
+#[macro_export]
+macro_rules! impl_list_item {
+ (
+ $(impl$({$($generics:tt)*})? ListItem<$num:tt> for $t:ty {
+ using ListLinks;
+ })*
+ ) => {$(
+ // SAFETY: See GUARANTEES comment on each method.
+ unsafe impl$(<$($generics)*>)? $crate::list::ListItem<$num> for $t {
+ // GUARANTEES:
+ // * This returns the same pointer as `prepare_to_insert` because `prepare_to_insert`
+ // is implemented in terms of `view_links`.
+ // * By the type invariants of `ListLinks`, the `ListLinks` has two null pointers when
+ // this value is not in a list.
+ unsafe fn view_links(me: *const Self) -> *mut $crate::list::ListLinks<$num> {
+ // SAFETY: The caller guarantees that `me` points at a valid value of type `Self`.
+ unsafe {
+ <Self as $crate::list::HasListLinks<$num>>::raw_get_list_links(me.cast_mut())
+ }
+ }
+
+ // GUARANTEES:
+ // * `me` originates from the most recent call to `prepare_to_insert`, which just added
+ // `offset` to the pointer passed to `prepare_to_insert`. This method subtracts
+ // `offset` from `me` so it returns the pointer originally passed to
+ // `prepare_to_insert`.
+ // * The pointer remains valid until the next call to `post_remove` because the caller
+ // of the most recent call to `prepare_to_insert` promised to retain ownership of the
+ // `ListArc` containing `Self` until the next call to `post_remove`. The value cannot
+ // be destroyed while a `ListArc` reference exists.
+ unsafe fn view_value(me: *mut $crate::list::ListLinks<$num>) -> *const Self {
+ let offset = <Self as $crate::list::HasListLinks<$num>>::OFFSET;
+ // SAFETY: `me` originates from the most recent call to `prepare_to_insert`, so it
+ // points at the field at offset `offset` in a value of type `Self`. Thus,
+ // subtracting `offset` from `me` is still in-bounds of the allocation.
+ unsafe { (me as *const u8).sub(offset) as *const Self }
+ }
+
+ // GUARANTEES:
+ // This implementation of `ListItem` will not give out exclusive access to the same
+ // `ListLinks` several times because calls to `prepare_to_insert` and `post_remove`
+ // must alternate and exclusive access is given up when `post_remove` is called.
+ //
+ // Other invocations of `impl_list_item!` also cannot give out exclusive access to the
+ // same `ListLinks` because you can only implement `ListItem` once for each value of
+ // `ID`, and the `ListLinks` fields only work with the specified `ID`.
+ unsafe fn prepare_to_insert(me: *const Self) -> *mut $crate::list::ListLinks<$num> {
+ // SAFETY: The caller promises that `me` points at a valid value.
+ unsafe { <Self as $crate::list::ListItem<$num>>::view_links(me) }
+ }
+
+ // GUARANTEES:
+ // * `me` originates from the most recent call to `prepare_to_insert`, which just added
+ // `offset` to the pointer passed to `prepare_to_insert`. This method subtracts
+ // `offset` from `me` so it returns the pointer originally passed to
+ // `prepare_to_insert`.
+ unsafe fn post_remove(me: *mut $crate::list::ListLinks<$num>) -> *const Self {
+ let offset = <Self as $crate::list::HasListLinks<$num>>::OFFSET;
+ // SAFETY: `me` originates from the most recent call to `prepare_to_insert`, so it
+ // points at the field at offset `offset` in a value of type `Self`. Thus,
+ // subtracting `offset` from `me` is still in-bounds of the allocation.
+ unsafe { (me as *const u8).sub(offset) as *const Self }
+ }
+ }
+ )*};
+
+ (
+ $(impl$({$($generics:tt)*})? ListItem<$num:tt> for $t:ty {
+ using ListLinksSelfPtr;
+ })*
+ ) => {$(
+ // SAFETY: See GUARANTEES comment on each method.
+ unsafe impl$(<$($generics)*>)? $crate::list::ListItem<$num> for $t {
+ // GUARANTEES:
+ // This implementation of `ListItem` will not give out exclusive access to the same
+ // `ListLinks` several times because calls to `prepare_to_insert` and `post_remove`
+ // must alternate and exclusive access is given up when `post_remove` is called.
+ //
+ // Other invocations of `impl_list_item!` also cannot give out exclusive access to the
+ // same `ListLinks` because you can only implement `ListItem` once for each value of
+ // `ID`, and the `ListLinks` fields only work with the specified `ID`.
+ unsafe fn prepare_to_insert(me: *const Self) -> *mut $crate::list::ListLinks<$num> {
+ // SAFETY: The caller promises that `me` points at a valid value of type `Self`.
+ let links_field = unsafe { <Self as $crate::list::ListItem<$num>>::view_links(me) };
+
+ let spoff = $crate::list::ListLinksSelfPtr::<Self, $num>::LIST_LINKS_SELF_PTR_OFFSET;
+ // Goes via the offset as the field is private.
+ //
+ // SAFETY: The constant is equal to `offset_of!(ListLinksSelfPtr, self_ptr)`, so
+ // the pointer stays in bounds of the allocation.
+ let self_ptr = unsafe { (links_field as *const u8).add(spoff) }
+ as *const $crate::types::Opaque<*const Self>;
+ let cell_inner = $crate::types::Opaque::raw_get(self_ptr);
+
+ // SAFETY: This value is not accessed in any other places than `prepare_to_insert`,
+ // `post_remove`, or `view_value`. By the safety requirements of those methods,
+ // none of these three methods may be called in parallel with this call to
+ // `prepare_to_insert`, so this write will not race with any other access to the
+ // value.
+ unsafe { ::core::ptr::write(cell_inner, me) };
+
+ links_field
+ }
+
+ // GUARANTEES:
+ // * This returns the same pointer as `prepare_to_insert` because `prepare_to_insert`
+ // returns the return value of `view_links`.
+ // * By the type invariants of `ListLinks`, the `ListLinks` has two null pointers when
+ // this value is not in a list.
+ unsafe fn view_links(me: *const Self) -> *mut $crate::list::ListLinks<$num> {
+ // SAFETY: The caller promises that `me` points at a valid value of type `Self`.
+ unsafe { <Self as HasListLinks<$num>>::raw_get_list_links(me.cast_mut()) }
+ }
+
+ // This function is also used as the implementation of `post_remove`, so the caller
+ // may choose to satisfy the safety requirements of `post_remove` instead of the safety
+ // requirements for `view_value`.
+ //
+ // GUARANTEES: (always)
+ // * This returns the same pointer as the one passed to the most recent call to
+ // `prepare_to_insert` since that call wrote that pointer to this location. The value
+ // is only modified in `prepare_to_insert`, so it has not been modified since the
+ // most recent call.
+ //
+ // GUARANTEES: (only when using the `view_value` safety requirements)
+ // * The pointer remains valid until the next call to `post_remove` because the caller
+ // of the most recent call to `prepare_to_insert` promised to retain ownership of the
+ // `ListArc` containing `Self` until the next call to `post_remove`. The value cannot
+ // be destroyed while a `ListArc` reference exists.
+ unsafe fn view_value(links_field: *mut $crate::list::ListLinks<$num>) -> *const Self {
+ let spoff = $crate::list::ListLinksSelfPtr::<Self, $num>::LIST_LINKS_SELF_PTR_OFFSET;
+ // SAFETY: The constant is equal to `offset_of!(ListLinksSelfPtr, self_ptr)`, so
+ // the pointer stays in bounds of the allocation.
+ let self_ptr = unsafe { (links_field as *const u8).add(spoff) }
+ as *const ::core::cell::UnsafeCell<*const Self>;
+ let cell_inner = ::core::cell::UnsafeCell::raw_get(self_ptr);
+ // SAFETY: This is not a data race, because the only function that writes to this
+ // value is `prepare_to_insert`, but by the safety requirements the
+ // `prepare_to_insert` method may not be called in parallel with `view_value` or
+ // `post_remove`.
+ unsafe { ::core::ptr::read(cell_inner) }
+ }
+
+ // GUARANTEES:
+ // The first guarantee of `view_value` is exactly what `post_remove` guarantees.
+ unsafe fn post_remove(me: *mut $crate::list::ListLinks<$num>) -> *const Self {
+ // SAFETY: This specific implementation of `view_value` allows the caller to
+ // promise the safety requirements of `post_remove` instead of the safety
+ // requirements for `view_value`.
+ unsafe { <Self as $crate::list::ListItem<$num>>::view_value(me) }
+ }
+ }
+ )*};
+}
+pub use impl_list_item;
diff --git a/rust/kernel/prelude.rs b/rust/kernel/prelude.rs
index b37a0b3180fb..4571daec0961 100644
--- a/rust/kernel/prelude.rs
+++ b/rust/kernel/prelude.rs
@@ -37,6 +37,6 @@ pub use super::error::{code::*, Error, Result};
pub use super::{str::CStr, ThisModule};
-pub use super::init::{InPlaceInit, Init, PinInit};
+pub use super::init::{InPlaceInit, InPlaceWrite, Init, PinInit};
pub use super::current;
diff --git a/rust/kernel/print.rs b/rust/kernel/print.rs
index a78aa3514a0a..508b0221256c 100644
--- a/rust/kernel/print.rs
+++ b/rust/kernel/print.rs
@@ -4,7 +4,7 @@
//!
//! C header: [`include/linux/printk.h`](srctree/include/linux/printk.h)
//!
-//! Reference: <https://www.kernel.org/doc/html/latest/core-api/printk-basics.html>
+//! Reference: <https://docs.kernel.org/core-api/printk-basics.html>
use core::{
ffi::{c_char, c_void},
@@ -197,7 +197,7 @@ macro_rules! print_macro (
/// Mimics the interface of [`std::print!`]. See [`core::fmt`] and
/// `alloc::format!` for information about the formatting syntax.
///
-/// [`pr_emerg`]: https://www.kernel.org/doc/html/latest/core-api/printk-basics.html#c.pr_emerg
+/// [`pr_emerg`]: https://docs.kernel.org/core-api/printk-basics.html#c.pr_emerg
/// [`std::print!`]: https://doc.rust-lang.org/std/macro.print.html
///
/// # Examples
@@ -221,7 +221,7 @@ macro_rules! pr_emerg (
/// Mimics the interface of [`std::print!`]. See [`core::fmt`] and
/// `alloc::format!` for information about the formatting syntax.
///
-/// [`pr_alert`]: https://www.kernel.org/doc/html/latest/core-api/printk-basics.html#c.pr_alert
+/// [`pr_alert`]: https://docs.kernel.org/core-api/printk-basics.html#c.pr_alert
/// [`std::print!`]: https://doc.rust-lang.org/std/macro.print.html
///
/// # Examples
@@ -245,7 +245,7 @@ macro_rules! pr_alert (
/// Mimics the interface of [`std::print!`]. See [`core::fmt`] and
/// `alloc::format!` for information about the formatting syntax.
///
-/// [`pr_crit`]: https://www.kernel.org/doc/html/latest/core-api/printk-basics.html#c.pr_crit
+/// [`pr_crit`]: https://docs.kernel.org/core-api/printk-basics.html#c.pr_crit
/// [`std::print!`]: https://doc.rust-lang.org/std/macro.print.html
///
/// # Examples
@@ -269,7 +269,7 @@ macro_rules! pr_crit (
/// Mimics the interface of [`std::print!`]. See [`core::fmt`] and
/// `alloc::format!` for information about the formatting syntax.
///
-/// [`pr_err`]: https://www.kernel.org/doc/html/latest/core-api/printk-basics.html#c.pr_err
+/// [`pr_err`]: https://docs.kernel.org/core-api/printk-basics.html#c.pr_err
/// [`std::print!`]: https://doc.rust-lang.org/std/macro.print.html
///
/// # Examples
@@ -293,7 +293,7 @@ macro_rules! pr_err (
/// Mimics the interface of [`std::print!`]. See [`core::fmt`] and
/// `alloc::format!` for information about the formatting syntax.
///
-/// [`pr_warn`]: https://www.kernel.org/doc/html/latest/core-api/printk-basics.html#c.pr_warn
+/// [`pr_warn`]: https://docs.kernel.org/core-api/printk-basics.html#c.pr_warn
/// [`std::print!`]: https://doc.rust-lang.org/std/macro.print.html
///
/// # Examples
@@ -317,7 +317,7 @@ macro_rules! pr_warn (
/// Mimics the interface of [`std::print!`]. See [`core::fmt`] and
/// `alloc::format!` for information about the formatting syntax.
///
-/// [`pr_notice`]: https://www.kernel.org/doc/html/latest/core-api/printk-basics.html#c.pr_notice
+/// [`pr_notice`]: https://docs.kernel.org/core-api/printk-basics.html#c.pr_notice
/// [`std::print!`]: https://doc.rust-lang.org/std/macro.print.html
///
/// # Examples
@@ -341,7 +341,7 @@ macro_rules! pr_notice (
/// Mimics the interface of [`std::print!`]. See [`core::fmt`] and
/// `alloc::format!` for information about the formatting syntax.
///
-/// [`pr_info`]: https://www.kernel.org/doc/html/latest/core-api/printk-basics.html#c.pr_info
+/// [`pr_info`]: https://docs.kernel.org/core-api/printk-basics.html#c.pr_info
/// [`std::print!`]: https://doc.rust-lang.org/std/macro.print.html
///
/// # Examples
@@ -367,7 +367,7 @@ macro_rules! pr_info (
/// Mimics the interface of [`std::print!`]. See [`core::fmt`] and
/// `alloc::format!` for information about the formatting syntax.
///
-/// [`pr_debug`]: https://www.kernel.org/doc/html/latest/core-api/printk-basics.html#c.pr_debug
+/// [`pr_debug`]: https://docs.kernel.org/core-api/printk-basics.html#c.pr_debug
/// [`std::print!`]: https://doc.rust-lang.org/std/macro.print.html
///
/// # Examples
@@ -395,7 +395,7 @@ macro_rules! pr_debug (
/// `alloc::format!` for information about the formatting syntax.
///
/// [`pr_info!`]: crate::pr_info!
-/// [`pr_cont`]: https://www.kernel.org/doc/html/latest/core-api/printk-basics.html#c.pr_cont
+/// [`pr_cont`]: https://docs.kernel.org/core-api/printk-basics.html#c.pr_cont
/// [`std::print!`]: https://doc.rust-lang.org/std/macro.print.html
///
/// # Examples
diff --git a/rust/kernel/rbtree.rs b/rust/kernel/rbtree.rs
new file mode 100644
index 000000000000..25eb36fd1cdc
--- /dev/null
+++ b/rust/kernel/rbtree.rs
@@ -0,0 +1,1278 @@
+// SPDX-License-Identifier: GPL-2.0
+
+//! Red-black trees.
+//!
+//! C header: [`include/linux/rbtree.h`](srctree/include/linux/rbtree.h)
+//!
+//! Reference: <https://docs.kernel.org/core-api/rbtree.html>
+
+use crate::{alloc::Flags, bindings, container_of, error::Result, prelude::*};
+use alloc::boxed::Box;
+use core::{
+ cmp::{Ord, Ordering},
+ marker::PhantomData,
+ mem::MaybeUninit,
+ ptr::{addr_of_mut, from_mut, NonNull},
+};
+
+/// A red-black tree with owned nodes.
+///
+/// It is backed by the kernel C red-black trees.
+///
+/// # Examples
+///
+/// In the example below we do several operations on a tree. We note that insertions may fail if
+/// the system is out of memory.
+///
+/// ```
+/// use kernel::{alloc::flags, rbtree::{RBTree, RBTreeNode, RBTreeNodeReservation}};
+///
+/// // Create a new tree.
+/// let mut tree = RBTree::new();
+///
+/// // Insert three elements.
+/// tree.try_create_and_insert(20, 200, flags::GFP_KERNEL)?;
+/// tree.try_create_and_insert(10, 100, flags::GFP_KERNEL)?;
+/// tree.try_create_and_insert(30, 300, flags::GFP_KERNEL)?;
+///
+/// // Check the nodes we just inserted.
+/// {
+/// assert_eq!(tree.get(&10).unwrap(), &100);
+/// assert_eq!(tree.get(&20).unwrap(), &200);
+/// assert_eq!(tree.get(&30).unwrap(), &300);
+/// }
+///
+/// // Iterate over the nodes we just inserted.
+/// {
+/// let mut iter = tree.iter();
+/// assert_eq!(iter.next().unwrap(), (&10, &100));
+/// assert_eq!(iter.next().unwrap(), (&20, &200));
+/// assert_eq!(iter.next().unwrap(), (&30, &300));
+/// assert!(iter.next().is_none());
+/// }
+///
+/// // Print all elements.
+/// for (key, value) in &tree {
+/// pr_info!("{} = {}\n", key, value);
+/// }
+///
+/// // Replace one of the elements.
+/// tree.try_create_and_insert(10, 1000, flags::GFP_KERNEL)?;
+///
+/// // Check that the tree reflects the replacement.
+/// {
+/// let mut iter = tree.iter();
+/// assert_eq!(iter.next().unwrap(), (&10, &1000));
+/// assert_eq!(iter.next().unwrap(), (&20, &200));
+/// assert_eq!(iter.next().unwrap(), (&30, &300));
+/// assert!(iter.next().is_none());
+/// }
+///
+/// // Change the value of one of the elements.
+/// *tree.get_mut(&30).unwrap() = 3000;
+///
+/// // Check that the tree reflects the update.
+/// {
+/// let mut iter = tree.iter();
+/// assert_eq!(iter.next().unwrap(), (&10, &1000));
+/// assert_eq!(iter.next().unwrap(), (&20, &200));
+/// assert_eq!(iter.next().unwrap(), (&30, &3000));
+/// assert!(iter.next().is_none());
+/// }
+///
+/// // Remove an element.
+/// tree.remove(&10);
+///
+/// // Check that the tree reflects the removal.
+/// {
+/// let mut iter = tree.iter();
+/// assert_eq!(iter.next().unwrap(), (&20, &200));
+/// assert_eq!(iter.next().unwrap(), (&30, &3000));
+/// assert!(iter.next().is_none());
+/// }
+///
+/// # Ok::<(), Error>(())
+/// ```
+///
+/// In the example below, we first allocate a node, acquire a spinlock, then insert the node into
+/// the tree. This is useful when the insertion context does not allow sleeping, for example, when
+/// holding a spinlock.
+///
+/// ```
+/// use kernel::{alloc::flags, rbtree::{RBTree, RBTreeNode}, sync::SpinLock};
+///
+/// fn insert_test(tree: &SpinLock<RBTree<u32, u32>>) -> Result {
+/// // Pre-allocate node. This may fail (as it allocates memory).
+/// let node = RBTreeNode::new(10, 100, flags::GFP_KERNEL)?;
+///
+/// // Insert node while holding the lock. It is guaranteed to succeed with no allocation
+/// // attempts.
+/// let mut guard = tree.lock();
+/// guard.insert(node);
+/// Ok(())
+/// }
+/// ```
+///
+/// In the example below, we reuse an existing node allocation from an element we removed.
+///
+/// ```
+/// use kernel::{alloc::flags, rbtree::{RBTree, RBTreeNodeReservation}};
+///
+/// // Create a new tree.
+/// let mut tree = RBTree::new();
+///
+/// // Insert three elements.
+/// tree.try_create_and_insert(20, 200, flags::GFP_KERNEL)?;
+/// tree.try_create_and_insert(10, 100, flags::GFP_KERNEL)?;
+/// tree.try_create_and_insert(30, 300, flags::GFP_KERNEL)?;
+///
+/// // Check the nodes we just inserted.
+/// {
+/// let mut iter = tree.iter();
+/// assert_eq!(iter.next().unwrap(), (&10, &100));
+/// assert_eq!(iter.next().unwrap(), (&20, &200));
+/// assert_eq!(iter.next().unwrap(), (&30, &300));
+/// assert!(iter.next().is_none());
+/// }
+///
+/// // Remove a node, getting back ownership of it.
+/// let existing = tree.remove(&30).unwrap();
+///
+/// // Check that the tree reflects the removal.
+/// {
+/// let mut iter = tree.iter();
+/// assert_eq!(iter.next().unwrap(), (&10, &100));
+/// assert_eq!(iter.next().unwrap(), (&20, &200));
+/// assert!(iter.next().is_none());
+/// }
+///
+/// // Create a preallocated reservation that we can re-use later.
+/// let reservation = RBTreeNodeReservation::new(flags::GFP_KERNEL)?;
+///
+/// // Insert a new node into the tree, reusing the previous allocation. This is guaranteed to
+/// // succeed (no memory allocations).
+/// tree.insert(reservation.into_node(15, 150));
+///
+/// // Check that the tree reflect the new insertion.
+/// {
+/// let mut iter = tree.iter();
+/// assert_eq!(iter.next().unwrap(), (&10, &100));
+/// assert_eq!(iter.next().unwrap(), (&15, &150));
+/// assert_eq!(iter.next().unwrap(), (&20, &200));
+/// assert!(iter.next().is_none());
+/// }
+///
+/// # Ok::<(), Error>(())
+/// ```
+///
+/// # Invariants
+///
+/// Non-null parent/children pointers stored in instances of the `rb_node` C struct are always
+/// valid, and pointing to a field of our internal representation of a node.
+pub struct RBTree<K, V> {
+ root: bindings::rb_root,
+ _p: PhantomData<Node<K, V>>,
+}
+
+// SAFETY: An [`RBTree`] allows the same kinds of access to its values that a struct allows to its
+// fields, so we use the same Send condition as would be used for a struct with K and V fields.
+unsafe impl<K: Send, V: Send> Send for RBTree<K, V> {}
+
+// SAFETY: An [`RBTree`] allows the same kinds of access to its values that a struct allows to its
+// fields, so we use the same Sync condition as would be used for a struct with K and V fields.
+unsafe impl<K: Sync, V: Sync> Sync for RBTree<K, V> {}
+
+impl<K, V> RBTree<K, V> {
+ /// Creates a new and empty tree.
+ pub fn new() -> Self {
+ Self {
+ // INVARIANT: There are no nodes in the tree, so the invariant holds vacuously.
+ root: bindings::rb_root::default(),
+ _p: PhantomData,
+ }
+ }
+
+ /// Returns an iterator over the tree nodes, sorted by key.
+ pub fn iter(&self) -> Iter<'_, K, V> {
+ Iter {
+ _tree: PhantomData,
+ // INVARIANT:
+ // - `self.root` is a valid pointer to a tree root.
+ // - `bindings::rb_first` produces a valid pointer to a node given `root` is valid.
+ iter_raw: IterRaw {
+ // SAFETY: by the invariants, all pointers are valid.
+ next: unsafe { bindings::rb_first(&self.root) },
+ _phantom: PhantomData,
+ },
+ }
+ }
+
+ /// Returns a mutable iterator over the tree nodes, sorted by key.
+ pub fn iter_mut(&mut self) -> IterMut<'_, K, V> {
+ IterMut {
+ _tree: PhantomData,
+ // INVARIANT:
+ // - `self.root` is a valid pointer to a tree root.
+ // - `bindings::rb_first` produces a valid pointer to a node given `root` is valid.
+ iter_raw: IterRaw {
+ // SAFETY: by the invariants, all pointers are valid.
+ next: unsafe { bindings::rb_first(from_mut(&mut self.root)) },
+ _phantom: PhantomData,
+ },
+ }
+ }
+
+ /// Returns an iterator over the keys of the nodes in the tree, in sorted order.
+ pub fn keys(&self) -> impl Iterator<Item = &'_ K> {
+ self.iter().map(|(k, _)| k)
+ }
+
+ /// Returns an iterator over the values of the nodes in the tree, sorted by key.
+ pub fn values(&self) -> impl Iterator<Item = &'_ V> {
+ self.iter().map(|(_, v)| v)
+ }
+
+ /// Returns a mutable iterator over the values of the nodes in the tree, sorted by key.
+ pub fn values_mut(&mut self) -> impl Iterator<Item = &'_ mut V> {
+ self.iter_mut().map(|(_, v)| v)
+ }
+
+ /// Returns a cursor over the tree nodes, starting with the smallest key.
+ pub fn cursor_front(&mut self) -> Option<Cursor<'_, K, V>> {
+ let root = addr_of_mut!(self.root);
+ // SAFETY: `self.root` is always a valid root node
+ let current = unsafe { bindings::rb_first(root) };
+ NonNull::new(current).map(|current| {
+ // INVARIANT:
+ // - `current` is a valid node in the [`RBTree`] pointed to by `self`.
+ Cursor {
+ current,
+ tree: self,
+ }
+ })
+ }
+
+ /// Returns a cursor over the tree nodes, starting with the largest key.
+ pub fn cursor_back(&mut self) -> Option<Cursor<'_, K, V>> {
+ let root = addr_of_mut!(self.root);
+ // SAFETY: `self.root` is always a valid root node
+ let current = unsafe { bindings::rb_last(root) };
+ NonNull::new(current).map(|current| {
+ // INVARIANT:
+ // - `current` is a valid node in the [`RBTree`] pointed to by `self`.
+ Cursor {
+ current,
+ tree: self,
+ }
+ })
+ }
+}
+
+impl<K, V> RBTree<K, V>
+where
+ K: Ord,
+{
+ /// Tries to insert a new value into the tree.
+ ///
+ /// It overwrites a node if one already exists with the same key and returns it (containing the
+ /// key/value pair). Returns [`None`] if a node with the same key didn't already exist.
+ ///
+ /// Returns an error if it cannot allocate memory for the new node.
+ pub fn try_create_and_insert(
+ &mut self,
+ key: K,
+ value: V,
+ flags: Flags,
+ ) -> Result<Option<RBTreeNode<K, V>>> {
+ Ok(self.insert(RBTreeNode::new(key, value, flags)?))
+ }
+
+ /// Inserts a new node into the tree.
+ ///
+ /// It overwrites a node if one already exists with the same key and returns it (containing the
+ /// key/value pair). Returns [`None`] if a node with the same key didn't already exist.
+ ///
+ /// This function always succeeds.
+ pub fn insert(&mut self, node: RBTreeNode<K, V>) -> Option<RBTreeNode<K, V>> {
+ match self.raw_entry(&node.node.key) {
+ RawEntry::Occupied(entry) => Some(entry.replace(node)),
+ RawEntry::Vacant(entry) => {
+ entry.insert(node);
+ None
+ }
+ }
+ }
+
+ fn raw_entry(&mut self, key: &K) -> RawEntry<'_, K, V> {
+ let raw_self: *mut RBTree<K, V> = self;
+ // The returned `RawEntry` is used to call either `rb_link_node` or `rb_replace_node`.
+ // The parameters of `bindings::rb_link_node` are as follows:
+ // - `node`: A pointer to an uninitialized node being inserted.
+ // - `parent`: A pointer to an existing node in the tree. One of its child pointers must be
+ // null, and `node` will become a child of `parent` by replacing that child pointer
+ // with a pointer to `node`.
+ // - `rb_link`: A pointer to either the left-child or right-child field of `parent`. This
+ // specifies which child of `parent` should hold `node` after this call. The
+ // value of `*rb_link` must be null before the call to `rb_link_node`. If the
+ // red/black tree is empty, then it’s also possible for `parent` to be null. In
+ // this case, `rb_link` is a pointer to the `root` field of the red/black tree.
+ //
+ // We will traverse the tree looking for a node that has a null pointer as its child,
+ // representing an empty subtree where we can insert our new node. We need to make sure
+ // that we preserve the ordering of the nodes in the tree. In each iteration of the loop
+ // we store `parent` and `child_field_of_parent`, and the new `node` will go somewhere
+ // in the subtree of `parent` that `child_field_of_parent` points at. Once
+ // we find an empty subtree, we can insert the new node using `rb_link_node`.
+ let mut parent = core::ptr::null_mut();
+ let mut child_field_of_parent: &mut *mut bindings::rb_node =
+ // SAFETY: `raw_self` is a valid pointer to the `RBTree` (created from `self` above).
+ unsafe { &mut (*raw_self).root.rb_node };
+ while !(*child_field_of_parent).is_null() {
+ let curr = *child_field_of_parent;
+ // SAFETY: All links fields we create are in a `Node<K, V>`.
+ let node = unsafe { container_of!(curr, Node<K, V>, links) };
+
+ // SAFETY: `node` is a non-null node so it is valid by the type invariants.
+ match key.cmp(unsafe { &(*node).key }) {
+ // SAFETY: `curr` is a non-null node so it is valid by the type invariants.
+ Ordering::Less => child_field_of_parent = unsafe { &mut (*curr).rb_left },
+ // SAFETY: `curr` is a non-null node so it is valid by the type invariants.
+ Ordering::Greater => child_field_of_parent = unsafe { &mut (*curr).rb_right },
+ Ordering::Equal => {
+ return RawEntry::Occupied(OccupiedEntry {
+ rbtree: self,
+ node_links: curr,
+ })
+ }
+ }
+ parent = curr;
+ }
+
+ RawEntry::Vacant(RawVacantEntry {
+ rbtree: raw_self,
+ parent,
+ child_field_of_parent,
+ _phantom: PhantomData,
+ })
+ }
+
+ /// Gets the given key's corresponding entry in the map for in-place manipulation.
+ pub fn entry(&mut self, key: K) -> Entry<'_, K, V> {
+ match self.raw_entry(&key) {
+ RawEntry::Occupied(entry) => Entry::Occupied(entry),
+ RawEntry::Vacant(entry) => Entry::Vacant(VacantEntry { raw: entry, key }),
+ }
+ }
+
+ /// Used for accessing the given node, if it exists.
+ pub fn find_mut(&mut self, key: &K) -> Option<OccupiedEntry<'_, K, V>> {
+ match self.raw_entry(key) {
+ RawEntry::Occupied(entry) => Some(entry),
+ RawEntry::Vacant(_entry) => None,
+ }
+ }
+
+ /// Returns a reference to the value corresponding to the key.
+ pub fn get(&self, key: &K) -> Option<&V> {
+ let mut node = self.root.rb_node;
+ while !node.is_null() {
+ // SAFETY: By the type invariant of `Self`, all non-null `rb_node` pointers stored in `self`
+ // point to the links field of `Node<K, V>` objects.
+ let this = unsafe { container_of!(node, Node<K, V>, links) };
+ // SAFETY: `this` is a non-null node so it is valid by the type invariants.
+ node = match key.cmp(unsafe { &(*this).key }) {
+ // SAFETY: `node` is a non-null node so it is valid by the type invariants.
+ Ordering::Less => unsafe { (*node).rb_left },
+ // SAFETY: `node` is a non-null node so it is valid by the type invariants.
+ Ordering::Greater => unsafe { (*node).rb_right },
+ // SAFETY: `node` is a non-null node so it is valid by the type invariants.
+ Ordering::Equal => return Some(unsafe { &(*this).value }),
+ }
+ }
+ None
+ }
+
+ /// Returns a mutable reference to the value corresponding to the key.
+ pub fn get_mut(&mut self, key: &K) -> Option<&mut V> {
+ self.find_mut(key).map(|node| node.into_mut())
+ }
+
+ /// Removes the node with the given key from the tree.
+ ///
+ /// It returns the node that was removed if one exists, or [`None`] otherwise.
+ pub fn remove_node(&mut self, key: &K) -> Option<RBTreeNode<K, V>> {
+ self.find_mut(key).map(OccupiedEntry::remove_node)
+ }
+
+ /// Removes the node with the given key from the tree.
+ ///
+ /// It returns the value that was removed if one exists, or [`None`] otherwise.
+ pub fn remove(&mut self, key: &K) -> Option<V> {
+ self.find_mut(key).map(OccupiedEntry::remove)
+ }
+
+ /// Returns a cursor over the tree nodes based on the given key.
+ ///
+ /// If the given key exists, the cursor starts there.
+ /// Otherwise it starts with the first larger key in sort order.
+ /// If there is no larger key, it returns [`None`].
+ pub fn cursor_lower_bound(&mut self, key: &K) -> Option<Cursor<'_, K, V>>
+ where
+ K: Ord,
+ {
+ let mut node = self.root.rb_node;
+ let mut best_match: Option<NonNull<Node<K, V>>> = None;
+ while !node.is_null() {
+ // SAFETY: By the type invariant of `Self`, all non-null `rb_node` pointers stored in `self`
+ // point to the links field of `Node<K, V>` objects.
+ let this = unsafe { container_of!(node, Node<K, V>, links) }.cast_mut();
+ // SAFETY: `this` is a non-null node so it is valid by the type invariants.
+ let this_key = unsafe { &(*this).key };
+ // SAFETY: `node` is a non-null node so it is valid by the type invariants.
+ let left_child = unsafe { (*node).rb_left };
+ // SAFETY: `node` is a non-null node so it is valid by the type invariants.
+ let right_child = unsafe { (*node).rb_right };
+ match key.cmp(this_key) {
+ Ordering::Equal => {
+ best_match = NonNull::new(this);
+ break;
+ }
+ Ordering::Greater => {
+ node = right_child;
+ }
+ Ordering::Less => {
+ let is_better_match = match best_match {
+ None => true,
+ Some(best) => {
+ // SAFETY: `best` is a non-null node so it is valid by the type invariants.
+ let best_key = unsafe { &(*best.as_ptr()).key };
+ best_key > this_key
+ }
+ };
+ if is_better_match {
+ best_match = NonNull::new(this);
+ }
+ node = left_child;
+ }
+ };
+ }
+
+ let best = best_match?;
+
+ // SAFETY: `best` is a non-null node so it is valid by the type invariants.
+ let links = unsafe { addr_of_mut!((*best.as_ptr()).links) };
+
+ NonNull::new(links).map(|current| {
+ // INVARIANT:
+ // - `current` is a valid node in the [`RBTree`] pointed to by `self`.
+ Cursor {
+ current,
+ tree: self,
+ }
+ })
+ }
+}
+
+impl<K, V> Default for RBTree<K, V> {
+ fn default() -> Self {
+ Self::new()
+ }
+}
+
+impl<K, V> Drop for RBTree<K, V> {
+ fn drop(&mut self) {
+ // SAFETY: `root` is valid as it's embedded in `self` and we have a valid `self`.
+ let mut next = unsafe { bindings::rb_first_postorder(&self.root) };
+
+ // INVARIANT: The loop invariant is that all tree nodes from `next` in postorder are valid.
+ while !next.is_null() {
+ // SAFETY: All links fields we create are in a `Node<K, V>`.
+ let this = unsafe { container_of!(next, Node<K, V>, links) };
+
+ // Find out what the next node is before disposing of the current one.
+ // SAFETY: `next` and all nodes in postorder are still valid.
+ next = unsafe { bindings::rb_next_postorder(next) };
+
+ // INVARIANT: This is the destructor, so we break the type invariant during clean-up,
+ // but it is not observable. The loop invariant is still maintained.
+
+ // SAFETY: `this` is valid per the loop invariant.
+ unsafe { drop(Box::from_raw(this.cast_mut())) };
+ }
+ }
+}
+
+/// A bidirectional cursor over the tree nodes, sorted by key.
+///
+/// # Examples
+///
+/// In the following example, we obtain a cursor to the first element in the tree.
+/// The cursor allows us to iterate bidirectionally over key/value pairs in the tree.
+///
+/// ```
+/// use kernel::{alloc::flags, rbtree::RBTree};
+///
+/// // Create a new tree.
+/// let mut tree = RBTree::new();
+///
+/// // Insert three elements.
+/// tree.try_create_and_insert(10, 100, flags::GFP_KERNEL)?;
+/// tree.try_create_and_insert(20, 200, flags::GFP_KERNEL)?;
+/// tree.try_create_and_insert(30, 300, flags::GFP_KERNEL)?;
+///
+/// // Get a cursor to the first element.
+/// let mut cursor = tree.cursor_front().unwrap();
+/// let mut current = cursor.current();
+/// assert_eq!(current, (&10, &100));
+///
+/// // Move the cursor, updating it to the 2nd element.
+/// cursor = cursor.move_next().unwrap();
+/// current = cursor.current();
+/// assert_eq!(current, (&20, &200));
+///
+/// // Peek at the next element without impacting the cursor.
+/// let next = cursor.peek_next().unwrap();
+/// assert_eq!(next, (&30, &300));
+/// current = cursor.current();
+/// assert_eq!(current, (&20, &200));
+///
+/// // Moving past the last element causes the cursor to return [`None`].
+/// cursor = cursor.move_next().unwrap();
+/// current = cursor.current();
+/// assert_eq!(current, (&30, &300));
+/// let cursor = cursor.move_next();
+/// assert!(cursor.is_none());
+///
+/// # Ok::<(), Error>(())
+/// ```
+///
+/// A cursor can also be obtained at the last element in the tree.
+///
+/// ```
+/// use kernel::{alloc::flags, rbtree::RBTree};
+///
+/// // Create a new tree.
+/// let mut tree = RBTree::new();
+///
+/// // Insert three elements.
+/// tree.try_create_and_insert(10, 100, flags::GFP_KERNEL)?;
+/// tree.try_create_and_insert(20, 200, flags::GFP_KERNEL)?;
+/// tree.try_create_and_insert(30, 300, flags::GFP_KERNEL)?;
+///
+/// let mut cursor = tree.cursor_back().unwrap();
+/// let current = cursor.current();
+/// assert_eq!(current, (&30, &300));
+///
+/// # Ok::<(), Error>(())
+/// ```
+///
+/// Obtaining a cursor returns [`None`] if the tree is empty.
+///
+/// ```
+/// use kernel::rbtree::RBTree;
+///
+/// let mut tree: RBTree<u16, u16> = RBTree::new();
+/// assert!(tree.cursor_front().is_none());
+///
+/// # Ok::<(), Error>(())
+/// ```
+///
+/// [`RBTree::cursor_lower_bound`] can be used to start at an arbitrary node in the tree.
+///
+/// ```
+/// use kernel::{alloc::flags, rbtree::RBTree};
+///
+/// // Create a new tree.
+/// let mut tree = RBTree::new();
+///
+/// // Insert five elements.
+/// tree.try_create_and_insert(10, 100, flags::GFP_KERNEL)?;
+/// tree.try_create_and_insert(20, 200, flags::GFP_KERNEL)?;
+/// tree.try_create_and_insert(30, 300, flags::GFP_KERNEL)?;
+/// tree.try_create_and_insert(40, 400, flags::GFP_KERNEL)?;
+/// tree.try_create_and_insert(50, 500, flags::GFP_KERNEL)?;
+///
+/// // If the provided key exists, a cursor to that key is returned.
+/// let cursor = tree.cursor_lower_bound(&20).unwrap();
+/// let current = cursor.current();
+/// assert_eq!(current, (&20, &200));
+///
+/// // If the provided key doesn't exist, a cursor to the first larger element in sort order is returned.
+/// let cursor = tree.cursor_lower_bound(&25).unwrap();
+/// let current = cursor.current();
+/// assert_eq!(current, (&30, &300));
+///
+/// // If there is no larger key, [`None`] is returned.
+/// let cursor = tree.cursor_lower_bound(&55);
+/// assert!(cursor.is_none());
+///
+/// # Ok::<(), Error>(())
+/// ```
+///
+/// The cursor allows mutation of values in the tree.
+///
+/// ```
+/// use kernel::{alloc::flags, rbtree::RBTree};
+///
+/// // Create a new tree.
+/// let mut tree = RBTree::new();
+///
+/// // Insert three elements.
+/// tree.try_create_and_insert(10, 100, flags::GFP_KERNEL)?;
+/// tree.try_create_and_insert(20, 200, flags::GFP_KERNEL)?;
+/// tree.try_create_and_insert(30, 300, flags::GFP_KERNEL)?;
+///
+/// // Retrieve a cursor.
+/// let mut cursor = tree.cursor_front().unwrap();
+///
+/// // Get a mutable reference to the current value.
+/// let (k, v) = cursor.current_mut();
+/// *v = 1000;
+///
+/// // The updated value is reflected in the tree.
+/// let updated = tree.get(&10).unwrap();
+/// assert_eq!(updated, &1000);
+///
+/// # Ok::<(), Error>(())
+/// ```
+///
+/// It also allows node removal. The following examples demonstrate the behavior of removing the current node.
+///
+/// ```
+/// use kernel::{alloc::flags, rbtree::RBTree};
+///
+/// // Create a new tree.
+/// let mut tree = RBTree::new();
+///
+/// // Insert three elements.
+/// tree.try_create_and_insert(10, 100, flags::GFP_KERNEL)?;
+/// tree.try_create_and_insert(20, 200, flags::GFP_KERNEL)?;
+/// tree.try_create_and_insert(30, 300, flags::GFP_KERNEL)?;
+///
+/// // Remove the first element.
+/// let mut cursor = tree.cursor_front().unwrap();
+/// let mut current = cursor.current();
+/// assert_eq!(current, (&10, &100));
+/// cursor = cursor.remove_current().0.unwrap();
+///
+/// // If a node exists after the current element, it is returned.
+/// current = cursor.current();
+/// assert_eq!(current, (&20, &200));
+///
+/// // Get a cursor to the last element, and remove it.
+/// cursor = tree.cursor_back().unwrap();
+/// current = cursor.current();
+/// assert_eq!(current, (&30, &300));
+///
+/// // Since there is no next node, the previous node is returned.
+/// cursor = cursor.remove_current().0.unwrap();
+/// current = cursor.current();
+/// assert_eq!(current, (&20, &200));
+///
+/// // Removing the last element in the tree returns [`None`].
+/// assert!(cursor.remove_current().0.is_none());
+///
+/// # Ok::<(), Error>(())
+/// ```
+///
+/// Nodes adjacent to the current node can also be removed.
+///
+/// ```
+/// use kernel::{alloc::flags, rbtree::RBTree};
+///
+/// // Create a new tree.
+/// let mut tree = RBTree::new();
+///
+/// // Insert three elements.
+/// tree.try_create_and_insert(10, 100, flags::GFP_KERNEL)?;
+/// tree.try_create_and_insert(20, 200, flags::GFP_KERNEL)?;
+/// tree.try_create_and_insert(30, 300, flags::GFP_KERNEL)?;
+///
+/// // Get a cursor to the first element.
+/// let mut cursor = tree.cursor_front().unwrap();
+/// let mut current = cursor.current();
+/// assert_eq!(current, (&10, &100));
+///
+/// // Calling `remove_prev` from the first element returns [`None`].
+/// assert!(cursor.remove_prev().is_none());
+///
+/// // Get a cursor to the last element.
+/// cursor = tree.cursor_back().unwrap();
+/// current = cursor.current();
+/// assert_eq!(current, (&30, &300));
+///
+/// // Calling `remove_prev` removes and returns the middle element.
+/// assert_eq!(cursor.remove_prev().unwrap().to_key_value(), (20, 200));
+///
+/// // Calling `remove_next` from the last element returns [`None`].
+/// assert!(cursor.remove_next().is_none());
+///
+/// // Move to the first element
+/// cursor = cursor.move_prev().unwrap();
+/// current = cursor.current();
+/// assert_eq!(current, (&10, &100));
+///
+/// // Calling `remove_next` removes and returns the last element.
+/// assert_eq!(cursor.remove_next().unwrap().to_key_value(), (30, 300));
+///
+/// # Ok::<(), Error>(())
+///
+/// ```
+///
+/// # Invariants
+/// - `current` points to a node that is in the same [`RBTree`] as `tree`.
+pub struct Cursor<'a, K, V> {
+ tree: &'a mut RBTree<K, V>,
+ current: NonNull<bindings::rb_node>,
+}
+
+// SAFETY: The [`Cursor`] has exclusive access to both `K` and `V`, so it is sufficient to require them to be `Send`.
+// The cursor only gives out immutable references to the keys, but since it has excusive access to those same
+// keys, `Send` is sufficient. `Sync` would be okay, but it is more restrictive to the user.
+unsafe impl<'a, K: Send, V: Send> Send for Cursor<'a, K, V> {}
+
+// SAFETY: The [`Cursor`] gives out immutable references to K and mutable references to V,
+// so it has the same thread safety requirements as mutable references.
+unsafe impl<'a, K: Sync, V: Sync> Sync for Cursor<'a, K, V> {}
+
+impl<'a, K, V> Cursor<'a, K, V> {
+ /// The current node
+ pub fn current(&self) -> (&K, &V) {
+ // SAFETY:
+ // - `self.current` is a valid node by the type invariants.
+ // - We have an immutable reference by the function signature.
+ unsafe { Self::to_key_value(self.current) }
+ }
+
+ /// The current node, with a mutable value
+ pub fn current_mut(&mut self) -> (&K, &mut V) {
+ // SAFETY:
+ // - `self.current` is a valid node by the type invariants.
+ // - We have an mutable reference by the function signature.
+ unsafe { Self::to_key_value_mut(self.current) }
+ }
+
+ /// Remove the current node from the tree.
+ ///
+ /// Returns a tuple where the first element is a cursor to the next node, if it exists,
+ /// else the previous node, else [`None`] (if the tree becomes empty). The second element
+ /// is the removed node.
+ pub fn remove_current(self) -> (Option<Self>, RBTreeNode<K, V>) {
+ let prev = self.get_neighbor_raw(Direction::Prev);
+ let next = self.get_neighbor_raw(Direction::Next);
+ // SAFETY: By the type invariant of `Self`, all non-null `rb_node` pointers stored in `self`
+ // point to the links field of `Node<K, V>` objects.
+ let this = unsafe { container_of!(self.current.as_ptr(), Node<K, V>, links) }.cast_mut();
+ // SAFETY: `this` is valid by the type invariants as described above.
+ let node = unsafe { Box::from_raw(this) };
+ let node = RBTreeNode { node };
+ // SAFETY: The reference to the tree used to create the cursor outlives the cursor, so
+ // the tree cannot change. By the tree invariant, all nodes are valid.
+ unsafe { bindings::rb_erase(&mut (*this).links, addr_of_mut!(self.tree.root)) };
+
+ let current = match (prev, next) {
+ (_, Some(next)) => next,
+ (Some(prev), None) => prev,
+ (None, None) => {
+ return (None, node);
+ }
+ };
+
+ (
+ // INVARIANT:
+ // - `current` is a valid node in the [`RBTree`] pointed to by `self.tree`.
+ Some(Self {
+ current,
+ tree: self.tree,
+ }),
+ node,
+ )
+ }
+
+ /// Remove the previous node, returning it if it exists.
+ pub fn remove_prev(&mut self) -> Option<RBTreeNode<K, V>> {
+ self.remove_neighbor(Direction::Prev)
+ }
+
+ /// Remove the next node, returning it if it exists.
+ pub fn remove_next(&mut self) -> Option<RBTreeNode<K, V>> {
+ self.remove_neighbor(Direction::Next)
+ }
+
+ fn remove_neighbor(&mut self, direction: Direction) -> Option<RBTreeNode<K, V>> {
+ if let Some(neighbor) = self.get_neighbor_raw(direction) {
+ let neighbor = neighbor.as_ptr();
+ // SAFETY: The reference to the tree used to create the cursor outlives the cursor, so
+ // the tree cannot change. By the tree invariant, all nodes are valid.
+ unsafe { bindings::rb_erase(neighbor, addr_of_mut!(self.tree.root)) };
+ // SAFETY: By the type invariant of `Self`, all non-null `rb_node` pointers stored in `self`
+ // point to the links field of `Node<K, V>` objects.
+ let this = unsafe { container_of!(neighbor, Node<K, V>, links) }.cast_mut();
+ // SAFETY: `this` is valid by the type invariants as described above.
+ let node = unsafe { Box::from_raw(this) };
+ return Some(RBTreeNode { node });
+ }
+ None
+ }
+
+ /// Move the cursor to the previous node, returning [`None`] if it doesn't exist.
+ pub fn move_prev(self) -> Option<Self> {
+ self.mv(Direction::Prev)
+ }
+
+ /// Move the cursor to the next node, returning [`None`] if it doesn't exist.
+ pub fn move_next(self) -> Option<Self> {
+ self.mv(Direction::Next)
+ }
+
+ fn mv(self, direction: Direction) -> Option<Self> {
+ // INVARIANT:
+ // - `neighbor` is a valid node in the [`RBTree`] pointed to by `self.tree`.
+ self.get_neighbor_raw(direction).map(|neighbor| Self {
+ tree: self.tree,
+ current: neighbor,
+ })
+ }
+
+ /// Access the previous node without moving the cursor.
+ pub fn peek_prev(&self) -> Option<(&K, &V)> {
+ self.peek(Direction::Prev)
+ }
+
+ /// Access the previous node without moving the cursor.
+ pub fn peek_next(&self) -> Option<(&K, &V)> {
+ self.peek(Direction::Next)
+ }
+
+ fn peek(&self, direction: Direction) -> Option<(&K, &V)> {
+ self.get_neighbor_raw(direction).map(|neighbor| {
+ // SAFETY:
+ // - `neighbor` is a valid tree node.
+ // - By the function signature, we have an immutable reference to `self`.
+ unsafe { Self::to_key_value(neighbor) }
+ })
+ }
+
+ /// Access the previous node mutably without moving the cursor.
+ pub fn peek_prev_mut(&mut self) -> Option<(&K, &mut V)> {
+ self.peek_mut(Direction::Prev)
+ }
+
+ /// Access the next node mutably without moving the cursor.
+ pub fn peek_next_mut(&mut self) -> Option<(&K, &mut V)> {
+ self.peek_mut(Direction::Next)
+ }
+
+ fn peek_mut(&mut self, direction: Direction) -> Option<(&K, &mut V)> {
+ self.get_neighbor_raw(direction).map(|neighbor| {
+ // SAFETY:
+ // - `neighbor` is a valid tree node.
+ // - By the function signature, we have a mutable reference to `self`.
+ unsafe { Self::to_key_value_mut(neighbor) }
+ })
+ }
+
+ fn get_neighbor_raw(&self, direction: Direction) -> Option<NonNull<bindings::rb_node>> {
+ // SAFETY: `self.current` is valid by the type invariants.
+ let neighbor = unsafe {
+ match direction {
+ Direction::Prev => bindings::rb_prev(self.current.as_ptr()),
+ Direction::Next => bindings::rb_next(self.current.as_ptr()),
+ }
+ };
+
+ NonNull::new(neighbor)
+ }
+
+ /// SAFETY:
+ /// - `node` must be a valid pointer to a node in an [`RBTree`].
+ /// - The caller has immutable access to `node` for the duration of 'b.
+ unsafe fn to_key_value<'b>(node: NonNull<bindings::rb_node>) -> (&'b K, &'b V) {
+ // SAFETY: the caller guarantees that `node` is a valid pointer in an `RBTree`.
+ let (k, v) = unsafe { Self::to_key_value_raw(node) };
+ // SAFETY: the caller guarantees immutable access to `node`.
+ (k, unsafe { &*v })
+ }
+
+ /// SAFETY:
+ /// - `node` must be a valid pointer to a node in an [`RBTree`].
+ /// - The caller has mutable access to `node` for the duration of 'b.
+ unsafe fn to_key_value_mut<'b>(node: NonNull<bindings::rb_node>) -> (&'b K, &'b mut V) {
+ // SAFETY: the caller guarantees that `node` is a valid pointer in an `RBTree`.
+ let (k, v) = unsafe { Self::to_key_value_raw(node) };
+ // SAFETY: the caller guarantees mutable access to `node`.
+ (k, unsafe { &mut *v })
+ }
+
+ /// SAFETY:
+ /// - `node` must be a valid pointer to a node in an [`RBTree`].
+ /// - The caller has immutable access to the key for the duration of 'b.
+ unsafe fn to_key_value_raw<'b>(node: NonNull<bindings::rb_node>) -> (&'b K, *mut V) {
+ // SAFETY: By the type invariant of `Self`, all non-null `rb_node` pointers stored in `self`
+ // point to the links field of `Node<K, V>` objects.
+ let this = unsafe { container_of!(node.as_ptr(), Node<K, V>, links) }.cast_mut();
+ // SAFETY: The passed `node` is the current node or a non-null neighbor,
+ // thus `this` is valid by the type invariants.
+ let k = unsafe { &(*this).key };
+ // SAFETY: The passed `node` is the current node or a non-null neighbor,
+ // thus `this` is valid by the type invariants.
+ let v = unsafe { addr_of_mut!((*this).value) };
+ (k, v)
+ }
+}
+
+/// Direction for [`Cursor`] operations.
+enum Direction {
+ /// the node immediately before, in sort order
+ Prev,
+ /// the node immediately after, in sort order
+ Next,
+}
+
+impl<'a, K, V> IntoIterator for &'a RBTree<K, V> {
+ type Item = (&'a K, &'a V);
+ type IntoIter = Iter<'a, K, V>;
+
+ fn into_iter(self) -> Self::IntoIter {
+ self.iter()
+ }
+}
+
+/// An iterator over the nodes of a [`RBTree`].
+///
+/// Instances are created by calling [`RBTree::iter`].
+pub struct Iter<'a, K, V> {
+ _tree: PhantomData<&'a RBTree<K, V>>,
+ iter_raw: IterRaw<K, V>,
+}
+
+// SAFETY: The [`Iter`] gives out immutable references to K and V, so it has the same
+// thread safety requirements as immutable references.
+unsafe impl<'a, K: Sync, V: Sync> Send for Iter<'a, K, V> {}
+
+// SAFETY: The [`Iter`] gives out immutable references to K and V, so it has the same
+// thread safety requirements as immutable references.
+unsafe impl<'a, K: Sync, V: Sync> Sync for Iter<'a, K, V> {}
+
+impl<'a, K, V> Iterator for Iter<'a, K, V> {
+ type Item = (&'a K, &'a V);
+
+ fn next(&mut self) -> Option<Self::Item> {
+ // SAFETY: Due to `self._tree`, `k` and `v` are valid for the lifetime of `'a`.
+ self.iter_raw.next().map(|(k, v)| unsafe { (&*k, &*v) })
+ }
+}
+
+impl<'a, K, V> IntoIterator for &'a mut RBTree<K, V> {
+ type Item = (&'a K, &'a mut V);
+ type IntoIter = IterMut<'a, K, V>;
+
+ fn into_iter(self) -> Self::IntoIter {
+ self.iter_mut()
+ }
+}
+
+/// A mutable iterator over the nodes of a [`RBTree`].
+///
+/// Instances are created by calling [`RBTree::iter_mut`].
+pub struct IterMut<'a, K, V> {
+ _tree: PhantomData<&'a mut RBTree<K, V>>,
+ iter_raw: IterRaw<K, V>,
+}
+
+// SAFETY: The [`IterMut`] has exclusive access to both `K` and `V`, so it is sufficient to require them to be `Send`.
+// The iterator only gives out immutable references to the keys, but since the iterator has excusive access to those same
+// keys, `Send` is sufficient. `Sync` would be okay, but it is more restrictive to the user.
+unsafe impl<'a, K: Send, V: Send> Send for IterMut<'a, K, V> {}
+
+// SAFETY: The [`IterMut`] gives out immutable references to K and mutable references to V, so it has the same
+// thread safety requirements as mutable references.
+unsafe impl<'a, K: Sync, V: Sync> Sync for IterMut<'a, K, V> {}
+
+impl<'a, K, V> Iterator for IterMut<'a, K, V> {
+ type Item = (&'a K, &'a mut V);
+
+ fn next(&mut self) -> Option<Self::Item> {
+ self.iter_raw.next().map(|(k, v)|
+ // SAFETY: Due to `&mut self`, we have exclusive access to `k` and `v`, for the lifetime of `'a`.
+ unsafe { (&*k, &mut *v) })
+ }
+}
+
+/// A raw iterator over the nodes of a [`RBTree`].
+///
+/// # Invariants
+/// - `self.next` is a valid pointer.
+/// - `self.next` points to a node stored inside of a valid `RBTree`.
+struct IterRaw<K, V> {
+ next: *mut bindings::rb_node,
+ _phantom: PhantomData<fn() -> (K, V)>,
+}
+
+impl<K, V> Iterator for IterRaw<K, V> {
+ type Item = (*mut K, *mut V);
+
+ fn next(&mut self) -> Option<Self::Item> {
+ if self.next.is_null() {
+ return None;
+ }
+
+ // SAFETY: By the type invariant of `IterRaw`, `self.next` is a valid node in an `RBTree`,
+ // and by the type invariant of `RBTree`, all nodes point to the links field of `Node<K, V>` objects.
+ let cur = unsafe { container_of!(self.next, Node<K, V>, links) }.cast_mut();
+
+ // SAFETY: `self.next` is a valid tree node by the type invariants.
+ self.next = unsafe { bindings::rb_next(self.next) };
+
+ // SAFETY: By the same reasoning above, it is safe to dereference the node.
+ Some(unsafe { (addr_of_mut!((*cur).key), addr_of_mut!((*cur).value)) })
+ }
+}
+
+/// A memory reservation for a red-black tree node.
+///
+///
+/// It contains the memory needed to hold a node that can be inserted into a red-black tree. One
+/// can be obtained by directly allocating it ([`RBTreeNodeReservation::new`]).
+pub struct RBTreeNodeReservation<K, V> {
+ node: Box<MaybeUninit<Node<K, V>>>,
+}
+
+impl<K, V> RBTreeNodeReservation<K, V> {
+ /// Allocates memory for a node to be eventually initialised and inserted into the tree via a
+ /// call to [`RBTree::insert`].
+ pub fn new(flags: Flags) -> Result<RBTreeNodeReservation<K, V>> {
+ Ok(RBTreeNodeReservation {
+ node: <Box<_> as BoxExt<_>>::new_uninit(flags)?,
+ })
+ }
+}
+
+// SAFETY: This doesn't actually contain K or V, and is just a memory allocation. Those can always
+// be moved across threads.
+unsafe impl<K, V> Send for RBTreeNodeReservation<K, V> {}
+
+// SAFETY: This doesn't actually contain K or V, and is just a memory allocation.
+unsafe impl<K, V> Sync for RBTreeNodeReservation<K, V> {}
+
+impl<K, V> RBTreeNodeReservation<K, V> {
+ /// Initialises a node reservation.
+ ///
+ /// It then becomes an [`RBTreeNode`] that can be inserted into a tree.
+ pub fn into_node(mut self, key: K, value: V) -> RBTreeNode<K, V> {
+ self.node.write(Node {
+ key,
+ value,
+ links: bindings::rb_node::default(),
+ });
+ // SAFETY: We just wrote to it.
+ let node = unsafe { self.node.assume_init() };
+ RBTreeNode { node }
+ }
+}
+
+/// A red-black tree node.
+///
+/// The node is fully initialised (with key and value) and can be inserted into a tree without any
+/// extra allocations or failure paths.
+pub struct RBTreeNode<K, V> {
+ node: Box<Node<K, V>>,
+}
+
+impl<K, V> RBTreeNode<K, V> {
+ /// Allocates and initialises a node that can be inserted into the tree via
+ /// [`RBTree::insert`].
+ pub fn new(key: K, value: V, flags: Flags) -> Result<RBTreeNode<K, V>> {
+ Ok(RBTreeNodeReservation::new(flags)?.into_node(key, value))
+ }
+
+ /// Get the key and value from inside the node.
+ pub fn to_key_value(self) -> (K, V) {
+ (self.node.key, self.node.value)
+ }
+}
+
+// SAFETY: If K and V can be sent across threads, then it's also okay to send [`RBTreeNode`] across
+// threads.
+unsafe impl<K: Send, V: Send> Send for RBTreeNode<K, V> {}
+
+// SAFETY: If K and V can be accessed without synchronization, then it's also okay to access
+// [`RBTreeNode`] without synchronization.
+unsafe impl<K: Sync, V: Sync> Sync for RBTreeNode<K, V> {}
+
+impl<K, V> RBTreeNode<K, V> {
+ /// Drop the key and value, but keep the allocation.
+ ///
+ /// It then becomes a reservation that can be re-initialised into a different node (i.e., with
+ /// a different key and/or value).
+ ///
+ /// The existing key and value are dropped in-place as part of this operation, that is, memory
+ /// may be freed (but only for the key/value; memory for the node itself is kept for reuse).
+ pub fn into_reservation(self) -> RBTreeNodeReservation<K, V> {
+ RBTreeNodeReservation {
+ node: Box::drop_contents(self.node),
+ }
+ }
+}
+
+/// A view into a single entry in a map, which may either be vacant or occupied.
+///
+/// This enum is constructed from the [`RBTree::entry`].
+///
+/// [`entry`]: fn@RBTree::entry
+pub enum Entry<'a, K, V> {
+ /// This [`RBTree`] does not have a node with this key.
+ Vacant(VacantEntry<'a, K, V>),
+ /// This [`RBTree`] already has a node with this key.
+ Occupied(OccupiedEntry<'a, K, V>),
+}
+
+/// Like [`Entry`], except that it doesn't have ownership of the key.
+enum RawEntry<'a, K, V> {
+ Vacant(RawVacantEntry<'a, K, V>),
+ Occupied(OccupiedEntry<'a, K, V>),
+}
+
+/// A view into a vacant entry in a [`RBTree`]. It is part of the [`Entry`] enum.
+pub struct VacantEntry<'a, K, V> {
+ key: K,
+ raw: RawVacantEntry<'a, K, V>,
+}
+
+/// Like [`VacantEntry`], but doesn't hold on to the key.
+///
+/// # Invariants
+/// - `parent` may be null if the new node becomes the root.
+/// - `child_field_of_parent` is a valid pointer to the left-child or right-child of `parent`. If `parent` is
+/// null, it is a pointer to the root of the [`RBTree`].
+struct RawVacantEntry<'a, K, V> {
+ rbtree: *mut RBTree<K, V>,
+ /// The node that will become the parent of the new node if we insert one.
+ parent: *mut bindings::rb_node,
+ /// This points to the left-child or right-child field of `parent`, or `root` if `parent` is
+ /// null.
+ child_field_of_parent: *mut *mut bindings::rb_node,
+ _phantom: PhantomData<&'a mut RBTree<K, V>>,
+}
+
+impl<'a, K, V> RawVacantEntry<'a, K, V> {
+ /// Inserts the given node into the [`RBTree`] at this entry.
+ ///
+ /// The `node` must have a key such that inserting it here does not break the ordering of this
+ /// [`RBTree`].
+ fn insert(self, node: RBTreeNode<K, V>) -> &'a mut V {
+ let node = Box::into_raw(node.node);
+
+ // SAFETY: `node` is valid at least until we call `Box::from_raw`, which only happens when
+ // the node is removed or replaced.
+ let node_links = unsafe { addr_of_mut!((*node).links) };
+
+ // INVARIANT: We are linking in a new node, which is valid. It remains valid because we
+ // "forgot" it with `Box::into_raw`.
+ // SAFETY: The type invariants of `RawVacantEntry` are exactly the safety requirements of `rb_link_node`.
+ unsafe { bindings::rb_link_node(node_links, self.parent, self.child_field_of_parent) };
+
+ // SAFETY: All pointers are valid. `node` has just been inserted into the tree.
+ unsafe { bindings::rb_insert_color(node_links, addr_of_mut!((*self.rbtree).root)) };
+
+ // SAFETY: The node is valid until we remove it from the tree.
+ unsafe { &mut (*node).value }
+ }
+}
+
+impl<'a, K, V> VacantEntry<'a, K, V> {
+ /// Inserts the given node into the [`RBTree`] at this entry.
+ pub fn insert(self, value: V, reservation: RBTreeNodeReservation<K, V>) -> &'a mut V {
+ self.raw.insert(reservation.into_node(self.key, value))
+ }
+}
+
+/// A view into an occupied entry in a [`RBTree`]. It is part of the [`Entry`] enum.
+///
+/// # Invariants
+/// - `node_links` is a valid, non-null pointer to a tree node in `self.rbtree`
+pub struct OccupiedEntry<'a, K, V> {
+ rbtree: &'a mut RBTree<K, V>,
+ /// The node that this entry corresponds to.
+ node_links: *mut bindings::rb_node,
+}
+
+impl<'a, K, V> OccupiedEntry<'a, K, V> {
+ /// Gets a reference to the value in the entry.
+ pub fn get(&self) -> &V {
+ // SAFETY:
+ // - `self.node_links` is a valid pointer to a node in the tree.
+ // - We have shared access to the underlying tree, and can thus give out a shared reference.
+ unsafe { &(*container_of!(self.node_links, Node<K, V>, links)).value }
+ }
+
+ /// Gets a mutable reference to the value in the entry.
+ pub fn get_mut(&mut self) -> &mut V {
+ // SAFETY:
+ // - `self.node_links` is a valid pointer to a node in the tree.
+ // - We have exclusive access to the underlying tree, and can thus give out a mutable reference.
+ unsafe { &mut (*(container_of!(self.node_links, Node<K, V>, links).cast_mut())).value }
+ }
+
+ /// Converts the entry into a mutable reference to its value.
+ ///
+ /// If you need multiple references to the `OccupiedEntry`, see [`self#get_mut`].
+ pub fn into_mut(self) -> &'a mut V {
+ // SAFETY:
+ // - `self.node_links` is a valid pointer to a node in the tree.
+ // - This consumes the `&'a mut RBTree<K, V>`, therefore it can give out a mutable reference that lives for `'a`.
+ unsafe { &mut (*(container_of!(self.node_links, Node<K, V>, links).cast_mut())).value }
+ }
+
+ /// Remove this entry from the [`RBTree`].
+ pub fn remove_node(self) -> RBTreeNode<K, V> {
+ // SAFETY: The node is a node in the tree, so it is valid.
+ unsafe { bindings::rb_erase(self.node_links, &mut self.rbtree.root) };
+
+ // INVARIANT: The node is being returned and the caller may free it, however, it was
+ // removed from the tree. So the invariants still hold.
+ RBTreeNode {
+ // SAFETY: The node was a node in the tree, but we removed it, so we can convert it
+ // back into a box.
+ node: unsafe {
+ Box::from_raw(container_of!(self.node_links, Node<K, V>, links).cast_mut())
+ },
+ }
+ }
+
+ /// Takes the value of the entry out of the map, and returns it.
+ pub fn remove(self) -> V {
+ self.remove_node().node.value
+ }
+
+ /// Swap the current node for the provided node.
+ ///
+ /// The key of both nodes must be equal.
+ fn replace(self, node: RBTreeNode<K, V>) -> RBTreeNode<K, V> {
+ let node = Box::into_raw(node.node);
+
+ // SAFETY: `node` is valid at least until we call `Box::from_raw`, which only happens when
+ // the node is removed or replaced.
+ let new_node_links = unsafe { addr_of_mut!((*node).links) };
+
+ // SAFETY: This updates the pointers so that `new_node_links` is in the tree where
+ // `self.node_links` used to be.
+ unsafe {
+ bindings::rb_replace_node(self.node_links, new_node_links, &mut self.rbtree.root)
+ };
+
+ // SAFETY:
+ // - `self.node_ptr` produces a valid pointer to a node in the tree.
+ // - Now that we removed this entry from the tree, we can convert the node to a box.
+ let old_node =
+ unsafe { Box::from_raw(container_of!(self.node_links, Node<K, V>, links).cast_mut()) };
+
+ RBTreeNode { node: old_node }
+ }
+}
+
+struct Node<K, V> {
+ links: bindings::rb_node,
+ key: K,
+ value: V,
+}
diff --git a/rust/kernel/std_vendor.rs b/rust/kernel/std_vendor.rs
index 39679a960c1a..67bf9d37ddb5 100644
--- a/rust/kernel/std_vendor.rs
+++ b/rust/kernel/std_vendor.rs
@@ -136,7 +136,7 @@
///
/// [`std::dbg`]: https://doc.rust-lang.org/std/macro.dbg.html
/// [`eprintln`]: https://doc.rust-lang.org/std/macro.eprintln.html
-/// [`printk`]: https://www.kernel.org/doc/html/latest/core-api/printk-basics.html
+/// [`printk`]: https://docs.kernel.org/core-api/printk-basics.html
/// [`pr_info`]: crate::pr_info!
/// [`pr_debug`]: crate::pr_debug!
#[macro_export]
diff --git a/rust/kernel/sync/arc.rs b/rust/kernel/sync/arc.rs
index 3673496c2363..3021f30fd822 100644
--- a/rust/kernel/sync/arc.rs
+++ b/rust/kernel/sync/arc.rs
@@ -12,12 +12,13 @@
//! 2. It does not support weak references, which allows it to be half the size.
//! 3. It saturates the reference count instead of aborting when it goes over a threshold.
//! 4. It does not provide a `get_mut` method, so the ref counted object is pinned.
+//! 5. The object in [`Arc`] is pinned implicitly.
//!
//! [`Arc`]: https://doc.rust-lang.org/std/sync/struct.Arc.html
use crate::{
alloc::{box_ext::BoxExt, AllocError, Flags},
- error::{self, Error},
+ bindings,
init::{self, InPlaceInit, Init, PinInit},
try_init,
types::{ForeignOwnable, Opaque},
@@ -209,28 +210,6 @@ impl<T> Arc<T> {
// `Arc` object.
Ok(unsafe { Self::from_inner(Box::leak(inner).into()) })
}
-
- /// Use the given initializer to in-place initialize a `T`.
- ///
- /// If `T: !Unpin` it will not be able to move afterwards.
- #[inline]
- pub fn pin_init<E>(init: impl PinInit<T, E>, flags: Flags) -> error::Result<Self>
- where
- Error: From<E>,
- {
- UniqueArc::pin_init(init, flags).map(|u| u.into())
- }
-
- /// Use the given initializer to in-place initialize a `T`.
- ///
- /// This is equivalent to [`Arc<T>::pin_init`], since an [`Arc`] is always pinned.
- #[inline]
- pub fn init<E>(init: impl Init<T, E>, flags: Flags) -> error::Result<Self>
- where
- Error: From<E>,
- {
- UniqueArc::init(init, flags).map(|u| u.into())
- }
}
impl<T: ?Sized> Arc<T> {
diff --git a/rust/kernel/types.rs b/rust/kernel/types.rs
index bd189d646adb..9e7ca066355c 100644
--- a/rust/kernel/types.rs
+++ b/rust/kernel/types.rs
@@ -7,8 +7,9 @@ use alloc::boxed::Box;
use core::{
cell::UnsafeCell,
marker::{PhantomData, PhantomPinned},
- mem::MaybeUninit,
+ mem::{ManuallyDrop, MaybeUninit},
ops::{Deref, DerefMut},
+ pin::Pin,
ptr::NonNull,
};
@@ -26,7 +27,10 @@ pub trait ForeignOwnable: Sized {
/// Converts a Rust-owned object to a foreign-owned one.
///
- /// The foreign representation is a pointer to void.
+ /// The foreign representation is a pointer to void. There are no guarantees for this pointer.
+ /// For example, it might be invalid, dangling or pointing to uninitialized memory. Using it in
+ /// any way except for [`ForeignOwnable::from_foreign`], [`ForeignOwnable::borrow`],
+ /// [`ForeignOwnable::try_from_foreign`] can result in undefined behavior.
fn into_foreign(self) -> *const core::ffi::c_void;
/// Borrows a foreign-owned object.
@@ -89,6 +93,32 @@ impl<T: 'static> ForeignOwnable for Box<T> {
}
}
+impl<T: 'static> ForeignOwnable for Pin<Box<T>> {
+ type Borrowed<'a> = Pin<&'a T>;
+
+ fn into_foreign(self) -> *const core::ffi::c_void {
+ // SAFETY: We are still treating the box as pinned.
+ Box::into_raw(unsafe { Pin::into_inner_unchecked(self) }) as _
+ }
+
+ unsafe fn borrow<'a>(ptr: *const core::ffi::c_void) -> Pin<&'a T> {
+ // SAFETY: The safety requirements for this function ensure that the object is still alive,
+ // so it is safe to dereference the raw pointer.
+ // The safety requirements of `from_foreign` also ensure that the object remains alive for
+ // the lifetime of the returned value.
+ let r = unsafe { &*ptr.cast() };
+
+ // SAFETY: This pointer originates from a `Pin<Box<T>>`.
+ unsafe { Pin::new_unchecked(r) }
+ }
+
+ unsafe fn from_foreign(ptr: *const core::ffi::c_void) -> Self {
+ // SAFETY: The safety requirements of this function ensure that `ptr` comes from a previous
+ // call to `Self::into_foreign`.
+ unsafe { Pin::new_unchecked(Box::from_raw(ptr as _)) }
+ }
+}
+
impl ForeignOwnable for () {
type Borrowed<'a> = ();
@@ -366,6 +396,35 @@ impl<T: AlwaysRefCounted> ARef<T> {
_p: PhantomData,
}
}
+
+ /// Consumes the `ARef`, returning a raw pointer.
+ ///
+ /// This function does not change the refcount. After calling this function, the caller is
+ /// responsible for the refcount previously managed by the `ARef`.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use core::ptr::NonNull;
+ /// use kernel::types::{ARef, AlwaysRefCounted};
+ ///
+ /// struct Empty {}
+ ///
+ /// unsafe impl AlwaysRefCounted for Empty {
+ /// fn inc_ref(&self) {}
+ /// unsafe fn dec_ref(_obj: NonNull<Self>) {}
+ /// }
+ ///
+ /// let mut data = Empty {};
+ /// let ptr = NonNull::<Empty>::new(&mut data as *mut _).unwrap();
+ /// let data_ref: ARef<Empty> = unsafe { ARef::from_raw(ptr) };
+ /// let raw_ptr: NonNull<Empty> = ARef::into_raw(data_ref);
+ ///
+ /// assert_eq!(ptr, raw_ptr);
+ /// ```
+ pub fn into_raw(me: Self) -> NonNull<T> {
+ ManuallyDrop::new(me).ptr
+ }
}
impl<T: AlwaysRefCounted> Clone for ARef<T> {
diff --git a/rust/macros/lib.rs b/rust/macros/lib.rs
index 5be0cb9db3ee..a626b1145e5c 100644
--- a/rust/macros/lib.rs
+++ b/rust/macros/lib.rs
@@ -2,6 +2,10 @@
//! Crate for all kernel procedural macros.
+// When fixdep scans this, it will find this string `CONFIG_RUSTC_VERSION_TEXT`
+// and thus add a dependency on `include/config/RUSTC_VERSION_TEXT`, which is
+// touched by Kconfig when the version string from the compiler changes.
+
#[macro_use]
mod quote;
mod concat_idents;
diff --git a/rust/macros/module.rs b/rust/macros/module.rs
index 7a5b899e47b7..aef3b132f32b 100644
--- a/rust/macros/module.rs
+++ b/rust/macros/module.rs
@@ -262,6 +262,12 @@ pub(crate) fn module(ts: TokenStream) -> TokenStream {
#[cfg(MODULE)]
#[doc(hidden)]
+ #[used]
+ #[link_section = \".init.data\"]
+ static __UNIQUE_ID___addressable_init_module: unsafe extern \"C\" fn() -> i32 = init_module;
+
+ #[cfg(MODULE)]
+ #[doc(hidden)]
#[no_mangle]
pub extern \"C\" fn cleanup_module() {{
// SAFETY:
@@ -273,6 +279,12 @@ pub(crate) fn module(ts: TokenStream) -> TokenStream {
unsafe {{ __exit() }}
}}
+ #[cfg(MODULE)]
+ #[doc(hidden)]
+ #[used]
+ #[link_section = \".exit.data\"]
+ static __UNIQUE_ID___addressable_cleanup_module: extern \"C\" fn() = cleanup_module;
+
// Built-in modules are initialized through an initcall pointer
// and the identifiers need to be unique.
#[cfg(not(MODULE))]
diff --git a/samples/bpf/Makefile b/samples/bpf/Makefile
index 3e003dd6bea0..7afe040cf43b 100644
--- a/samples/bpf/Makefile
+++ b/samples/bpf/Makefile
@@ -13,7 +13,6 @@ tprogs-y += sockex1
tprogs-y += sockex2
tprogs-y += sockex3
tprogs-y += tracex1
-tprogs-y += tracex2
tprogs-y += tracex3
tprogs-y += tracex4
tprogs-y += tracex5
@@ -63,7 +62,6 @@ sockex1-objs := sockex1_user.o
sockex2-objs := sockex2_user.o
sockex3-objs := sockex3_user.o
tracex1-objs := tracex1_user.o $(TRACE_HELPERS)
-tracex2-objs := tracex2_user.o
tracex3-objs := tracex3_user.o
tracex4-objs := tracex4_user.o
tracex5-objs := tracex5_user.o $(TRACE_HELPERS)
@@ -105,7 +103,6 @@ always-y += sockex1_kern.o
always-y += sockex2_kern.o
always-y += sockex3_kern.o
always-y += tracex1.bpf.o
-always-y += tracex2.bpf.o
always-y += tracex3.bpf.o
always-y += tracex4.bpf.o
always-y += tracex5.bpf.o
@@ -169,6 +166,10 @@ BPF_EXTRA_CFLAGS += -I$(srctree)/arch/mips/include/asm/mach-generic
endif
endif
+ifeq ($(ARCH), x86)
+BPF_EXTRA_CFLAGS += -fcf-protection
+endif
+
TPROGS_CFLAGS += -Wall -O2
TPROGS_CFLAGS += -Wmissing-prototypes
TPROGS_CFLAGS += -Wstrict-prototypes
@@ -405,7 +406,7 @@ $(obj)/%.o: $(src)/%.c
-Wno-gnu-variable-sized-type-not-at-end \
-Wno-address-of-packed-member -Wno-tautological-compare \
-Wno-unknown-warning-option $(CLANG_ARCH_ARGS) \
- -fno-asynchronous-unwind-tables -fcf-protection \
+ -fno-asynchronous-unwind-tables \
-I$(srctree)/samples/bpf/ -include asm_goto_workaround.h \
-O2 -emit-llvm -Xclang -disable-llvm-passes -c $< -o - | \
$(OPT) -O2 -mtriple=bpf-pc-linux | $(LLVM_DIS) | \
diff --git a/samples/bpf/tracex2.bpf.c b/samples/bpf/tracex2.bpf.c
deleted file mode 100644
index 0a5c75b367be..000000000000
--- a/samples/bpf/tracex2.bpf.c
+++ /dev/null
@@ -1,99 +0,0 @@
-/* Copyright (c) 2013-2015 PLUMgrid, http://plumgrid.com
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of version 2 of the GNU General Public
- * License as published by the Free Software Foundation.
- */
-#include "vmlinux.h"
-#include <linux/version.h>
-#include <bpf/bpf_helpers.h>
-#include <bpf/bpf_tracing.h>
-#include <bpf/bpf_core_read.h>
-
-struct {
- __uint(type, BPF_MAP_TYPE_HASH);
- __type(key, long);
- __type(value, long);
- __uint(max_entries, 1024);
-} my_map SEC(".maps");
-
-/* kprobe is NOT a stable ABI. If kernel internals change this bpf+kprobe
- * example will no longer be meaningful
- */
-SEC("kprobe/kfree_skb_reason")
-int bpf_prog2(struct pt_regs *ctx)
-{
- long loc = 0;
- long init_val = 1;
- long *value;
-
- /* read ip of kfree_skb_reason caller.
- * non-portable version of __builtin_return_address(0)
- */
- BPF_KPROBE_READ_RET_IP(loc, ctx);
-
- value = bpf_map_lookup_elem(&my_map, &loc);
- if (value)
- *value += 1;
- else
- bpf_map_update_elem(&my_map, &loc, &init_val, BPF_ANY);
- return 0;
-}
-
-static unsigned int log2(unsigned int v)
-{
- unsigned int r;
- unsigned int shift;
-
- r = (v > 0xFFFF) << 4; v >>= r;
- shift = (v > 0xFF) << 3; v >>= shift; r |= shift;
- shift = (v > 0xF) << 2; v >>= shift; r |= shift;
- shift = (v > 0x3) << 1; v >>= shift; r |= shift;
- r |= (v >> 1);
- return r;
-}
-
-static unsigned int log2l(unsigned long v)
-{
- unsigned int hi = v >> 32;
- if (hi)
- return log2(hi) + 32;
- else
- return log2(v);
-}
-
-struct hist_key {
- char comm[16];
- u64 pid_tgid;
- u64 uid_gid;
- u64 index;
-};
-
-struct {
- __uint(type, BPF_MAP_TYPE_PERCPU_HASH);
- __uint(key_size, sizeof(struct hist_key));
- __uint(value_size, sizeof(long));
- __uint(max_entries, 1024);
-} my_hist_map SEC(".maps");
-
-SEC("ksyscall/write")
-int BPF_KSYSCALL(bpf_prog3, unsigned int fd, const char *buf, size_t count)
-{
- long init_val = 1;
- long *value;
- struct hist_key key;
-
- key.index = log2l(count);
- key.pid_tgid = bpf_get_current_pid_tgid();
- key.uid_gid = bpf_get_current_uid_gid();
- bpf_get_current_comm(&key.comm, sizeof(key.comm));
-
- value = bpf_map_lookup_elem(&my_hist_map, &key);
- if (value)
- __sync_fetch_and_add(value, 1);
- else
- bpf_map_update_elem(&my_hist_map, &key, &init_val, BPF_ANY);
- return 0;
-}
-char _license[] SEC("license") = "GPL";
-u32 _version SEC("version") = LINUX_VERSION_CODE;
diff --git a/samples/bpf/tracex2_user.c b/samples/bpf/tracex2_user.c
deleted file mode 100644
index 2131f1648cf1..000000000000
--- a/samples/bpf/tracex2_user.c
+++ /dev/null
@@ -1,187 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-#include <stdio.h>
-#include <unistd.h>
-#include <stdlib.h>
-#include <signal.h>
-#include <string.h>
-
-#include <bpf/bpf.h>
-#include <bpf/libbpf.h>
-#include "bpf_util.h"
-
-#define MAX_INDEX 64
-#define MAX_STARS 38
-
-/* my_map, my_hist_map */
-static int map_fd[2];
-
-static void stars(char *str, long val, long max, int width)
-{
- int i;
-
- for (i = 0; i < (width * val / max) - 1 && i < width - 1; i++)
- str[i] = '*';
- if (val > max)
- str[i - 1] = '+';
- str[i] = '\0';
-}
-
-struct task {
- char comm[16];
- __u64 pid_tgid;
- __u64 uid_gid;
-};
-
-struct hist_key {
- struct task t;
- __u32 index;
-};
-
-#define SIZE sizeof(struct task)
-
-static void print_hist_for_pid(int fd, void *task)
-{
- unsigned int nr_cpus = bpf_num_possible_cpus();
- struct hist_key key = {}, next_key;
- long values[nr_cpus];
- char starstr[MAX_STARS];
- long value;
- long data[MAX_INDEX] = {};
- int max_ind = -1;
- long max_value = 0;
- int i, ind;
-
- while (bpf_map_get_next_key(fd, &key, &next_key) == 0) {
- if (memcmp(&next_key, task, SIZE)) {
- key = next_key;
- continue;
- }
- bpf_map_lookup_elem(fd, &next_key, values);
- value = 0;
- for (i = 0; i < nr_cpus; i++)
- value += values[i];
- ind = next_key.index;
- data[ind] = value;
- if (value && ind > max_ind)
- max_ind = ind;
- if (value > max_value)
- max_value = value;
- key = next_key;
- }
-
- printf(" syscall write() stats\n");
- printf(" byte_size : count distribution\n");
- for (i = 1; i <= max_ind + 1; i++) {
- stars(starstr, data[i - 1], max_value, MAX_STARS);
- printf("%8ld -> %-8ld : %-8ld |%-*s|\n",
- (1l << i) >> 1, (1l << i) - 1, data[i - 1],
- MAX_STARS, starstr);
- }
-}
-
-static void print_hist(int fd)
-{
- struct hist_key key = {}, next_key;
- static struct task tasks[1024];
- int task_cnt = 0;
- int i;
-
- while (bpf_map_get_next_key(fd, &key, &next_key) == 0) {
- int found = 0;
-
- for (i = 0; i < task_cnt; i++)
- if (memcmp(&tasks[i], &next_key, SIZE) == 0)
- found = 1;
- if (!found)
- memcpy(&tasks[task_cnt++], &next_key, SIZE);
- key = next_key;
- }
-
- for (i = 0; i < task_cnt; i++) {
- printf("\npid %d cmd %s uid %d\n",
- (__u32) tasks[i].pid_tgid,
- tasks[i].comm,
- (__u32) tasks[i].uid_gid);
- print_hist_for_pid(fd, &tasks[i]);
- }
-
-}
-
-static void int_exit(int sig)
-{
- print_hist(map_fd[1]);
- exit(0);
-}
-
-int main(int ac, char **argv)
-{
- long key, next_key, value;
- struct bpf_link *links[2];
- struct bpf_program *prog;
- struct bpf_object *obj;
- char filename[256];
- int i, j = 0;
- FILE *f;
-
- snprintf(filename, sizeof(filename), "%s.bpf.o", argv[0]);
- obj = bpf_object__open_file(filename, NULL);
- if (libbpf_get_error(obj)) {
- fprintf(stderr, "ERROR: opening BPF object file failed\n");
- return 0;
- }
-
- /* load BPF program */
- if (bpf_object__load(obj)) {
- fprintf(stderr, "ERROR: loading BPF object file failed\n");
- goto cleanup;
- }
-
- map_fd[0] = bpf_object__find_map_fd_by_name(obj, "my_map");
- map_fd[1] = bpf_object__find_map_fd_by_name(obj, "my_hist_map");
- if (map_fd[0] < 0 || map_fd[1] < 0) {
- fprintf(stderr, "ERROR: finding a map in obj file failed\n");
- goto cleanup;
- }
-
- signal(SIGINT, int_exit);
- signal(SIGTERM, int_exit);
-
- /* start 'ping' in the background to have some kfree_skb_reason
- * events */
- f = popen("ping -4 -c5 localhost", "r");
- (void) f;
-
- /* start 'dd' in the background to have plenty of 'write' syscalls */
- f = popen("dd if=/dev/zero of=/dev/null count=5000000", "r");
- (void) f;
-
- bpf_object__for_each_program(prog, obj) {
- links[j] = bpf_program__attach(prog);
- if (libbpf_get_error(links[j])) {
- fprintf(stderr, "ERROR: bpf_program__attach failed\n");
- links[j] = NULL;
- goto cleanup;
- }
- j++;
- }
-
- for (i = 0; i < 5; i++) {
- key = 0;
- while (bpf_map_get_next_key(map_fd[0], &key, &next_key) == 0) {
- bpf_map_lookup_elem(map_fd[0], &next_key, &value);
- printf("location 0x%lx count %ld\n", next_key, value);
- key = next_key;
- }
- if (key)
- printf("\n");
- sleep(1);
- }
- print_hist(map_fd[1]);
-
-cleanup:
- for (j--; j >= 0; j--)
- bpf_link__destroy(links[j]);
-
- bpf_object__close(obj);
- return 0;
-}
diff --git a/samples/bpf/tracex4.bpf.c b/samples/bpf/tracex4.bpf.c
index ca826750901a..d786492fd926 100644
--- a/samples/bpf/tracex4.bpf.c
+++ b/samples/bpf/tracex4.bpf.c
@@ -33,13 +33,13 @@ int bpf_prog1(struct pt_regs *ctx)
return 0;
}
-SEC("kretprobe/kmem_cache_alloc_node")
+SEC("kretprobe/kmem_cache_alloc_node_noprof")
int bpf_prog2(struct pt_regs *ctx)
{
long ptr = PT_REGS_RC(ctx);
long ip = 0;
- /* get ip address of kmem_cache_alloc_node() caller */
+ /* get ip address of kmem_cache_alloc_node_noprof() caller */
BPF_KRETPROBE_READ_RET_IP(ip, ctx);
struct pair v = {
diff --git a/samples/kmemleak/kmemleak-test.c b/samples/kmemleak/kmemleak-test.c
index f7470ed85a79..544c36d51d56 100644
--- a/samples/kmemleak/kmemleak-test.c
+++ b/samples/kmemleak/kmemleak-test.c
@@ -79,6 +79,8 @@ static int kmemleak_test_init(void)
per_cpu(kmemleak_test_pointer, i));
}
+ pr_info("__alloc_percpu(64, 4) = %p\n", __alloc_percpu(64, 4));
+
return 0;
}
module_init(kmemleak_test_init);
diff --git a/samples/landlock/sandboxer.c b/samples/landlock/sandboxer.c
index e8223c3e781a..f847e832ba14 100644
--- a/samples/landlock/sandboxer.c
+++ b/samples/landlock/sandboxer.c
@@ -14,6 +14,7 @@
#include <fcntl.h>
#include <linux/landlock.h>
#include <linux/prctl.h>
+#include <linux/socket.h>
#include <stddef.h>
#include <stdio.h>
#include <stdlib.h>
@@ -22,6 +23,7 @@
#include <sys/stat.h>
#include <sys/syscall.h>
#include <unistd.h>
+#include <stdbool.h>
#ifndef landlock_create_ruleset
static inline int
@@ -55,6 +57,7 @@ static inline int landlock_restrict_self(const int ruleset_fd,
#define ENV_FS_RW_NAME "LL_FS_RW"
#define ENV_TCP_BIND_NAME "LL_TCP_BIND"
#define ENV_TCP_CONNECT_NAME "LL_TCP_CONNECT"
+#define ENV_SCOPED_NAME "LL_SCOPED"
#define ENV_DELIMITER ":"
static int parse_path(char *env_path, const char ***const path_list)
@@ -184,6 +187,55 @@ out_free_name:
return ret;
}
+/* Returns true on error, false otherwise. */
+static bool check_ruleset_scope(const char *const env_var,
+ struct landlock_ruleset_attr *ruleset_attr)
+{
+ char *env_type_scope, *env_type_scope_next, *ipc_scoping_name;
+ bool error = false;
+ bool abstract_scoping = false;
+ bool signal_scoping = false;
+
+ /* Scoping is not supported by Landlock ABI */
+ if (!(ruleset_attr->scoped &
+ (LANDLOCK_SCOPE_ABSTRACT_UNIX_SOCKET | LANDLOCK_SCOPE_SIGNAL)))
+ goto out_unset;
+
+ env_type_scope = getenv(env_var);
+ /* Scoping is not supported by the user */
+ if (!env_type_scope || strcmp("", env_type_scope) == 0)
+ goto out_unset;
+
+ env_type_scope = strdup(env_type_scope);
+ env_type_scope_next = env_type_scope;
+ while ((ipc_scoping_name =
+ strsep(&env_type_scope_next, ENV_DELIMITER))) {
+ if (strcmp("a", ipc_scoping_name) == 0 && !abstract_scoping) {
+ abstract_scoping = true;
+ } else if (strcmp("s", ipc_scoping_name) == 0 &&
+ !signal_scoping) {
+ signal_scoping = true;
+ } else {
+ fprintf(stderr, "Unknown or duplicate scope \"%s\"\n",
+ ipc_scoping_name);
+ error = true;
+ goto out_free_name;
+ }
+ }
+
+out_free_name:
+ free(env_type_scope);
+
+out_unset:
+ if (!abstract_scoping)
+ ruleset_attr->scoped &= ~LANDLOCK_SCOPE_ABSTRACT_UNIX_SOCKET;
+ if (!signal_scoping)
+ ruleset_attr->scoped &= ~LANDLOCK_SCOPE_SIGNAL;
+
+ unsetenv(env_var);
+ return error;
+}
+
/* clang-format off */
#define ACCESS_FS_ROUGHLY_READ ( \
@@ -208,7 +260,7 @@ out_free_name:
/* clang-format on */
-#define LANDLOCK_ABI_LAST 5
+#define LANDLOCK_ABI_LAST 6
int main(const int argc, char *const argv[], char *const *const envp)
{
@@ -223,14 +275,16 @@ int main(const int argc, char *const argv[], char *const *const envp)
.handled_access_fs = access_fs_rw,
.handled_access_net = LANDLOCK_ACCESS_NET_BIND_TCP |
LANDLOCK_ACCESS_NET_CONNECT_TCP,
+ .scoped = LANDLOCK_SCOPE_ABSTRACT_UNIX_SOCKET |
+ LANDLOCK_SCOPE_SIGNAL,
};
if (argc < 2) {
fprintf(stderr,
- "usage: %s=\"...\" %s=\"...\" %s=\"...\" %s=\"...\"%s "
+ "usage: %s=\"...\" %s=\"...\" %s=\"...\" %s=\"...\" %s=\"...\" %s "
"<cmd> [args]...\n\n",
ENV_FS_RO_NAME, ENV_FS_RW_NAME, ENV_TCP_BIND_NAME,
- ENV_TCP_CONNECT_NAME, argv[0]);
+ ENV_TCP_CONNECT_NAME, ENV_SCOPED_NAME, argv[0]);
fprintf(stderr,
"Execute a command in a restricted environment.\n\n");
fprintf(stderr,
@@ -251,15 +305,18 @@ int main(const int argc, char *const argv[], char *const *const envp)
fprintf(stderr,
"* %s: list of ports allowed to connect (client).\n",
ENV_TCP_CONNECT_NAME);
+ fprintf(stderr, "* %s: list of scoped IPCs.\n",
+ ENV_SCOPED_NAME);
fprintf(stderr,
"\nexample:\n"
"%s=\"${PATH}:/lib:/usr:/proc:/etc:/dev/urandom\" "
"%s=\"/dev/null:/dev/full:/dev/zero:/dev/pts:/tmp\" "
"%s=\"9418\" "
"%s=\"80:443\" "
+ "%s=\"a:s\" "
"%s bash -i\n\n",
ENV_FS_RO_NAME, ENV_FS_RW_NAME, ENV_TCP_BIND_NAME,
- ENV_TCP_CONNECT_NAME, argv[0]);
+ ENV_TCP_CONNECT_NAME, ENV_SCOPED_NAME, argv[0]);
fprintf(stderr,
"This sandboxer can use Landlock features "
"up to ABI version %d.\n",
@@ -327,6 +384,11 @@ int main(const int argc, char *const argv[], char *const *const envp)
/* Removes LANDLOCK_ACCESS_FS_IOCTL_DEV for ABI < 5 */
ruleset_attr.handled_access_fs &= ~LANDLOCK_ACCESS_FS_IOCTL_DEV;
+ __attribute__((fallthrough));
+ case 5:
+ /* Removes LANDLOCK_SCOPE_* for ABI < 6 */
+ ruleset_attr.scoped &= ~(LANDLOCK_SCOPE_ABSTRACT_UNIX_SOCKET |
+ LANDLOCK_SCOPE_SIGNAL);
fprintf(stderr,
"Hint: You should update the running kernel "
"to leverage Landlock features "
@@ -358,6 +420,9 @@ int main(const int argc, char *const argv[], char *const *const envp)
~LANDLOCK_ACCESS_NET_CONNECT_TCP;
}
+ if (check_ruleset_scope(ENV_SCOPED_NAME, &ruleset_attr))
+ return 1;
+
ruleset_fd =
landlock_create_ruleset(&ruleset_attr, sizeof(ruleset_attr), 0);
if (ruleset_fd < 0) {
diff --git a/samples/vfio-mdev/mtty.c b/samples/vfio-mdev/mtty.c
index b382c696c877..59eefe2fed10 100644
--- a/samples/vfio-mdev/mtty.c
+++ b/samples/vfio-mdev/mtty.c
@@ -927,7 +927,6 @@ static const struct file_operations mtty_save_fops = {
.unlocked_ioctl = mtty_precopy_ioctl,
.compat_ioctl = compat_ptr_ioctl,
.release = mtty_release_migf,
- .llseek = no_llseek,
};
static void mtty_save_state(struct mdev_state *mdev_state)
@@ -1082,7 +1081,6 @@ static const struct file_operations mtty_resume_fops = {
.owner = THIS_MODULE,
.write = mtty_resume_write,
.release = mtty_release_migf,
- .llseek = no_llseek,
};
static struct mtty_migration_file *
diff --git a/scripts/Kconfig.include b/scripts/Kconfig.include
index 3500a3d62f0d..785a491e5996 100644
--- a/scripts/Kconfig.include
+++ b/scripts/Kconfig.include
@@ -64,3 +64,11 @@ ld-version := $(shell,set -- $(ld-info) && echo $2)
cc-option-bit = $(if-success,$(CC) -Werror $(1) -E -x c /dev/null -o /dev/null,$(1))
m32-flag := $(cc-option-bit,-m32)
m64-flag := $(cc-option-bit,-m64)
+
+# $(rustc-option,<flag>)
+# Return y if the Rust compiler supports <flag>, n otherwise
+# Calls to this should be guarded so that they are not evaluated if
+# CONFIG_RUST_IS_AVAILABLE is not set.
+# If you are testing for unstable features, consider testing RUSTC_VERSION
+# instead, as features may have different completeness while available.
+rustc-option = $(success,trap "rm -rf .tmp_$$" EXIT; mkdir .tmp_$$; $(RUSTC) $(1) --crate-type=rlib /dev/null --out-dir=.tmp_$$ -o .tmp_$$/tmp.rlib)
diff --git a/scripts/Makefile.build b/scripts/Makefile.build
index a5ac8ed1936f..8f423a1faf50 100644
--- a/scripts/Makefile.build
+++ b/scripts/Makefile.build
@@ -41,20 +41,6 @@ include $(srctree)/scripts/Makefile.compiler
include $(kbuild-file)
include $(srctree)/scripts/Makefile.lib
-# Do not include hostprogs rules unless needed.
-# $(sort ...) is used here to remove duplicated words and excessive spaces.
-hostprogs := $(sort $(hostprogs))
-ifneq ($(hostprogs),)
-include $(srctree)/scripts/Makefile.host
-endif
-
-# Do not include userprogs rules unless needed.
-# $(sort ...) is used here to remove duplicated words and excessive spaces.
-userprogs := $(sort $(userprogs))
-ifneq ($(userprogs),)
-include $(srctree)/scripts/Makefile.userprogs
-endif
-
ifndef obj
$(warning kbuild: Makefile.build is included improperly)
endif
@@ -71,7 +57,6 @@ endif
# subdir-builtin and subdir-modorder may contain duplications. Use $(sort ...)
subdir-builtin := $(sort $(filter %/built-in.a, $(real-obj-y)))
subdir-modorder := $(sort $(filter %/modules.order, $(obj-m)))
-subdir-dtbslist := $(sort $(filter %/dtbs-list, $(dtb-y)))
targets-for-builtin := $(extra-y)
@@ -288,10 +273,15 @@ rust_common_cmd = \
# would not match each other.
quiet_cmd_rustc_o_rs = $(RUSTC_OR_CLIPPY_QUIET) $(quiet_modtag) $@
- cmd_rustc_o_rs = $(rust_common_cmd) --emit=obj=$@ $<
+ cmd_rustc_o_rs = $(rust_common_cmd) --emit=obj=$@ $< $(cmd_objtool)
+
+define rule_rustc_o_rs
+ $(call cmd_and_fixdep,rustc_o_rs)
+ $(call cmd,gen_objtooldep)
+endef
$(obj)/%.o: $(obj)/%.rs FORCE
- +$(call if_changed_dep,rustc_o_rs)
+ +$(call if_changed_rule,rustc_o_rs)
quiet_cmd_rustc_rsi_rs = $(RUSTC_OR_CLIPPY_QUIET) $(quiet_modtag) $@
cmd_rustc_rsi_rs = \
@@ -363,7 +353,7 @@ $(obj)/%.o: $(obj)/%.S FORCE
targets += $(filter-out $(subdir-builtin), $(real-obj-y))
targets += $(filter-out $(subdir-modorder), $(real-obj-m))
-targets += $(real-dtb-y) $(lib-y) $(always-y)
+targets += $(lib-y) $(always-y)
# Linker scripts preprocessor (.lds.S -> .lds)
# ---------------------------------------------------------------------------
@@ -389,7 +379,6 @@ $(obj)/%.asn1.c $(obj)/%.asn1.h: $(src)/%.asn1 $(objtree)/scripts/asn1_compiler
# To build objects in subdirs, we need to descend into the directories
$(subdir-builtin): $(obj)/%/built-in.a: $(obj)/% ;
$(subdir-modorder): $(obj)/%/modules.order: $(obj)/% ;
-$(subdir-dtbslist): $(obj)/%/dtbs-list: $(obj)/% ;
#
# Rule to compile a set of .o files into one .a file (without symbol table)
@@ -405,12 +394,8 @@ quiet_cmd_ar_builtin = AR $@
$(obj)/built-in.a: $(real-obj-y) FORCE
$(call if_changed,ar_builtin)
-#
-# Rule to create modules.order and dtbs-list
-#
-# This is a list of build artifacts (module or dtb) from the current Makefile
-# and its sub-directories. The timestamp should be updated when any of the
-# member files.
+# This is a list of build artifacts from the current Makefile and its
+# sub-directories. The timestamp should be updated when any of the member files.
cmd_gen_order = { $(foreach m, $(real-prereqs), \
$(if $(filter %/$(notdir $@), $m), cat $m, echo $m);) :; } \
@@ -419,9 +404,6 @@ cmd_gen_order = { $(foreach m, $(real-prereqs), \
$(obj)/modules.order: $(obj-m) FORCE
$(call if_changed,gen_order)
-$(obj)/dtbs-list: $(dtb-y) FORCE
- $(call if_changed,gen_order)
-
#
# Rule to compile a set of .o files into one .a file (with symbol table)
#
@@ -450,15 +432,26 @@ intermediate_targets = $(foreach sfx, $(2), \
$(patsubst %$(strip $(1)),%$(sfx), \
$(filter %$(strip $(1)), $(targets))))
# %.asn1.o <- %.asn1.[ch] <- %.asn1
-# %.dtb.o <- %.dtb.S <- %.dtb <- %.dts
-# %.dtbo.o <- %.dtbo.S <- %.dtbo <- %.dtso
-# %.lex.o <- %.lex.c <- %.l
-# %.tab.o <- %.tab.[ch] <- %.y
-targets += $(call intermediate_targets, .asn1.o, .asn1.c .asn1.h) \
- $(call intermediate_targets, .dtb.o, .dtb.S .dtb) \
- $(call intermediate_targets, .dtbo.o, .dtbo.S .dtbo) \
- $(call intermediate_targets, .lex.o, .lex.c) \
- $(call intermediate_targets, .tab.o, .tab.c .tab.h)
+targets += $(call intermediate_targets, .asn1.o, .asn1.c .asn1.h)
+
+# Include additional build rules when necessary
+# ---------------------------------------------------------------------------
+
+# $(sort ...) is used here to remove duplicated words and excessive spaces.
+hostprogs := $(sort $(hostprogs))
+ifneq ($(hostprogs),)
+include $(srctree)/scripts/Makefile.host
+endif
+
+# $(sort ...) is used here to remove duplicated words and excessive spaces.
+userprogs := $(sort $(userprogs))
+ifneq ($(userprogs),)
+include $(srctree)/scripts/Makefile.userprogs
+endif
+
+ifneq ($(need-dtbslist)$(dtb-y)$(dtb-)$(filter %.dtb %.dtb.o %.dtbo.o,$(targets)),)
+include $(srctree)/scripts/Makefile.dtbs
+endif
# Build
# ---------------------------------------------------------------------------
diff --git a/scripts/Makefile.compiler b/scripts/Makefile.compiler
index 92be0c9a13ee..057305eae85c 100644
--- a/scripts/Makefile.compiler
+++ b/scripts/Makefile.compiler
@@ -72,3 +72,18 @@ clang-min-version = $(call test-ge, $(CONFIG_CLANG_VERSION), $1)
# ld-option
# Usage: KBUILD_LDFLAGS += $(call ld-option, -X, -Y)
ld-option = $(call try-run, $(LD) $(KBUILD_LDFLAGS) $(1) -v,$(1),$(2),$(3))
+
+# __rustc-option
+# Usage: MY_RUSTFLAGS += $(call __rustc-option,$(RUSTC),$(MY_RUSTFLAGS),-Cinstrument-coverage,-Zinstrument-coverage)
+__rustc-option = $(call try-run,\
+ $(1) $(2) $(3) --crate-type=rlib /dev/null --out-dir=$$TMPOUT -o "$$TMP",$(3),$(4))
+
+# rustc-option
+# Usage: rustflags-y += $(call rustc-option,-Cinstrument-coverage,-Zinstrument-coverage)
+rustc-option = $(call __rustc-option, $(RUSTC),\
+ $(KBUILD_RUSTFLAGS),$(1),$(2))
+
+# rustc-option-yn
+# Usage: flag := $(call rustc-option-yn,-Cinstrument-coverage)
+rustc-option-yn = $(call try-run,\
+ $(RUSTC) $(KBUILD_RUSTFLAGS) $(1) --crate-type=rlib /dev/null --out-dir=$$TMPOUT -o "$$TMP",y,n)
diff --git a/scripts/Makefile.dtbs b/scripts/Makefile.dtbs
new file mode 100644
index 000000000000..46009d5f1486
--- /dev/null
+++ b/scripts/Makefile.dtbs
@@ -0,0 +1,142 @@
+# SPDX-License-Identifier: GPL-2.0-only
+
+# If CONFIG_OF_ALL_DTBS is enabled, all DT blobs are built
+dtb-$(CONFIG_OF_ALL_DTBS) += $(dtb-)
+
+# Composite DTB (i.e. DTB constructed by overlay)
+multi-dtb-y := $(call multi-search, $(dtb-y), .dtb, -dtbs)
+# Primitive DTB compiled from *.dts
+real-dtb-y := $(call real-search, $(dtb-y), .dtb, -dtbs)
+# Base DTB that overlay is applied onto
+base-dtb-y := $(filter %.dtb, $(call real-search, $(multi-dtb-y), .dtb, -dtbs))
+
+dtb-y := $(addprefix $(obj)/, $(dtb-y))
+multi-dtb-y := $(addprefix $(obj)/, $(multi-dtb-y))
+real-dtb-y := $(addprefix $(obj)/, $(real-dtb-y))
+
+always-y += $(dtb-y)
+targets += $(real-dtb-y)
+
+# dtbs-list
+# ---------------------------------------------------------------------------
+
+ifdef need-dtbslist
+subdir-dtbslist := $(addsuffix /dtbs-list, $(subdir-ym))
+dtb-y += $(subdir-dtbslist)
+always-y += $(obj)/dtbs-list
+endif
+
+$(subdir-dtbslist): $(obj)/%/dtbs-list: $(obj)/% ;
+
+$(obj)/dtbs-list: $(dtb-y) FORCE
+ $(call if_changed,gen_order)
+
+# Assembly file to wrap dtb(o)
+# ---------------------------------------------------------------------------
+
+# Generate an assembly file to wrap the output of the device tree compiler
+quiet_cmd_wrap_S_dtb = WRAP $@
+ cmd_wrap_S_dtb = { \
+ symbase=__$(patsubst .%,%,$(suffix $<))_$(subst -,_,$(notdir $*)); \
+ echo '\#include <asm-generic/vmlinux.lds.h>'; \
+ echo '.section .dtb.init.rodata,"a"'; \
+ echo '.balign STRUCT_ALIGNMENT'; \
+ echo ".global $${symbase}_begin"; \
+ echo "$${symbase}_begin:"; \
+ echo '.incbin "$<" '; \
+ echo ".global $${symbase}_end"; \
+ echo "$${symbase}_end:"; \
+ echo '.balign STRUCT_ALIGNMENT'; \
+ } > $@
+
+$(obj)/%.dtb.S: $(obj)/%.dtb FORCE
+ $(call if_changed,wrap_S_dtb)
+
+$(obj)/%.dtbo.S: $(obj)/%.dtbo FORCE
+ $(call if_changed,wrap_S_dtb)
+
+# Schema check
+# ---------------------------------------------------------------------------
+
+ifneq ($(CHECK_DTBS),)
+DT_CHECKER ?= dt-validate
+DT_CHECKER_FLAGS ?= $(if $(DT_SCHEMA_FILES),-l $(DT_SCHEMA_FILES),-m)
+DT_BINDING_DIR := Documentation/devicetree/bindings
+DT_TMP_SCHEMA := $(objtree)/$(DT_BINDING_DIR)/processed-schema.json
+dtb-check-enabled = $(if $(filter %.dtb, $@),y)
+endif
+
+quiet_dtb_check_tag = $(if $(dtb-check-enabled),[C], )
+cmd_dtb_check = $(if $(dtb-check-enabled),; $(DT_CHECKER) $(DT_CHECKER_FLAGS) -u $(srctree)/$(DT_BINDING_DIR) -p $(DT_TMP_SCHEMA) $@ || true)
+
+# Overlay
+# ---------------------------------------------------------------------------
+
+# NOTE:
+# Do not replace $(filter %.dtb %.dtbo, $^) with $(real-prereqs). When a single
+# DTB is turned into a multi-blob DTB, $^ will contain header file dependencies
+# recorded in the .*.cmd file.
+quiet_cmd_fdtoverlay = OVL $(quiet_dtb_check_tag) $@
+ cmd_fdtoverlay = $(objtree)/scripts/dtc/fdtoverlay -o $@ -i $(filter %.dtb %.dtbo, $^) $(cmd_dtb_check)
+
+$(multi-dtb-y): $(DT_TMP_SCHEMA) FORCE
+ $(call if_changed,fdtoverlay)
+$(call multi_depend, $(multi-dtb-y), .dtb, -dtbs)
+
+# DTC
+# ---------------------------------------------------------------------------
+
+DTC ?= $(objtree)/scripts/dtc/dtc
+DTC_FLAGS += -Wno-unique_unit_address
+
+# Disable noisy checks by default
+ifeq ($(findstring 1,$(KBUILD_EXTRA_WARN)),)
+DTC_FLAGS += -Wno-unit_address_vs_reg \
+ -Wno-avoid_unnecessary_addr_size \
+ -Wno-alias_paths \
+ -Wno-graph_child_address \
+ -Wno-simple_bus_reg
+else
+DTC_FLAGS += -Wunique_unit_address_if_enabled
+endif
+
+ifneq ($(findstring 2,$(KBUILD_EXTRA_WARN)),)
+DTC_FLAGS += -Wnode_name_chars_strict \
+ -Wproperty_name_chars_strict \
+ -Wunique_unit_address
+endif
+
+DTC_FLAGS += $(DTC_FLAGS_$(target-stem))
+
+# Set -@ if the target is a base DTB that overlay is applied onto
+DTC_FLAGS += $(if $(filter $(patsubst $(obj)/%,%,$@), $(base-dtb-y)), -@)
+
+DTC_INCLUDE := $(srctree)/scripts/dtc/include-prefixes
+
+dtc_cpp_flags = -Wp,-MMD,$(depfile).pre.tmp -nostdinc -I $(DTC_INCLUDE) -undef -D__DTS__
+
+dtc-tmp = $(subst $(comma),_,$(dot-target).dts.tmp)
+
+quiet_cmd_dtc = DTC $(quiet_dtb_check_tag) $@
+ cmd_dtc = \
+ $(HOSTCC) -E $(dtc_cpp_flags) -x assembler-with-cpp -o $(dtc-tmp) $< ; \
+ $(DTC) -o $@ -b 0 $(addprefix -i,$(dir $<) $(DTC_INCLUDE)) \
+ $(DTC_FLAGS) -d $(depfile).dtc.tmp $(dtc-tmp) ; \
+ cat $(depfile).pre.tmp $(depfile).dtc.tmp > $(depfile) \
+ $(cmd_dtb_check)
+
+$(obj)/%.dtb: $(obj)/%.dts $(DTC) $(DT_TMP_SCHEMA) FORCE
+ $(call if_changed_dep,dtc)
+
+$(obj)/%.dtbo: $(src)/%.dtso $(DTC) FORCE
+ $(call if_changed_dep,dtc)
+
+# targets
+# ---------------------------------------------------------------------------
+
+targets += $(always-y)
+
+# %.dtb.o <- %.dtb.S <- %.dtb <- %.dts
+# %.dtbo.o <- %.dtbo.S <- %.dtbo <- %.dtso
+targets += $(call intermediate_targets, .dtb.o, .dtb.S .dtb) \
+ $(call intermediate_targets, .dtbo.o, .dtbo.S .dtbo)
diff --git a/scripts/Makefile.host b/scripts/Makefile.host
index e85be7721a48..e01c13a588dd 100644
--- a/scripts/Makefile.host
+++ b/scripts/Makefile.host
@@ -160,3 +160,8 @@ $(host-rust): $(obj)/%: $(src)/%.rs FORCE
targets += $(host-csingle) $(host-cmulti) $(host-cobjs) \
$(host-cxxmulti) $(host-cxxobjs) $(host-rust)
+
+# %.lex.o <- %.lex.c <- %.l
+# %.tab.o <- %.tab.[ch] <- %.y
+targets += $(call intermediate_targets, .lex.o, .lex.c) \
+ $(call intermediate_targets, .tab.o, .tab.c .tab.h)
diff --git a/scripts/Makefile.kasan b/scripts/Makefile.kasan
index 390658a2d5b7..693dbbebebba 100644
--- a/scripts/Makefile.kasan
+++ b/scripts/Makefile.kasan
@@ -12,6 +12,11 @@ endif
KASAN_SHADOW_OFFSET ?= $(CONFIG_KASAN_SHADOW_OFFSET)
cc-param = $(call cc-option, -mllvm -$(1), $(call cc-option, --param $(1)))
+rustc-param = $(call rustc-option, -Cllvm-args=-$(1),)
+
+check-args = $(foreach arg,$(2),$(call $(1),$(arg)))
+
+kasan_params :=
ifdef CONFIG_KASAN_STACK
stack_enable := 1
@@ -22,57 +27,78 @@ endif
ifdef CONFIG_KASAN_GENERIC
ifdef CONFIG_KASAN_INLINE
+ # When the number of memory accesses in a function is less than this
+ # call threshold number, the compiler will use inline instrumentation.
+ # 10000 is chosen offhand as a sufficiently large number to make all
+ # kernel functions to be instrumented inline.
call_threshold := 10000
else
call_threshold := 0
endif
-CFLAGS_KASAN_MINIMAL := -fsanitize=kernel-address
-
-# -fasan-shadow-offset fails without -fsanitize
-CFLAGS_KASAN_SHADOW := $(call cc-option, -fsanitize=kernel-address \
- -fasan-shadow-offset=$(KASAN_SHADOW_OFFSET), \
- $(call cc-option, -fsanitize=kernel-address \
- -mllvm -asan-mapping-offset=$(KASAN_SHADOW_OFFSET)))
-
-ifeq ($(strip $(CFLAGS_KASAN_SHADOW)),)
- CFLAGS_KASAN := $(CFLAGS_KASAN_MINIMAL)
-else
- # Now add all the compiler specific options that are valid standalone
- CFLAGS_KASAN := $(CFLAGS_KASAN_SHADOW) \
- $(call cc-param,asan-globals=1) \
- $(call cc-param,asan-instrumentation-with-call-threshold=$(call_threshold)) \
- $(call cc-param,asan-instrument-allocas=1)
-endif
-
-CFLAGS_KASAN += $(call cc-param,asan-stack=$(stack_enable))
+# First, enable -fsanitize=kernel-address together with providing the shadow
+# mapping offset, as for GCC, -fasan-shadow-offset fails without -fsanitize
+# (GCC accepts the shadow mapping offset via -fasan-shadow-offset instead of
+# a --param like the other KASAN parameters).
+# Instead of ifdef-checking the compiler, rely on cc-option.
+CFLAGS_KASAN := $(call cc-option, -fsanitize=kernel-address \
+ -fasan-shadow-offset=$(KASAN_SHADOW_OFFSET), \
+ $(call cc-option, -fsanitize=kernel-address \
+ -mllvm -asan-mapping-offset=$(KASAN_SHADOW_OFFSET)))
+
+# The minimum supported `rustc` version has a minimum supported LLVM
+# version late enough that we can assume support for -asan-mapping-offset.
+RUSTFLAGS_KASAN := -Zsanitizer=kernel-address \
+ -Zsanitizer-recover=kernel-address \
+ -Cllvm-args=-asan-mapping-offset=$(KASAN_SHADOW_OFFSET)
+
+# Now, add other parameters enabled similarly in GCC, Clang, and rustc.
+# As some of them are not supported by older compilers, these will be filtered
+# through `cc-param` or `rust-param` as applicable.
+kasan_params += asan-instrumentation-with-call-threshold=$(call_threshold) \
+ asan-stack=$(stack_enable) \
+ asan-instrument-allocas=1 \
+ asan-globals=1
# Instrument memcpy/memset/memmove calls by using instrumented __asan_mem*()
# instead. With compilers that don't support this option, compiler-inserted
# memintrinsics won't be checked by KASAN on GENERIC_ENTRY architectures.
-CFLAGS_KASAN += $(call cc-param,asan-kernel-mem-intrinsic-prefix=1)
+kasan_params += asan-kernel-mem-intrinsic-prefix=1
endif # CONFIG_KASAN_GENERIC
ifdef CONFIG_KASAN_SW_TAGS
+CFLAGS_KASAN := -fsanitize=kernel-hwaddress
+
+# This sets flags that will enable SW_TAGS KASAN once enabled in Rust. These
+# will not work today, and is guarded against in dependencies for CONFIG_RUST.
+RUSTFLAGS_KASAN := -Zsanitizer=kernel-hwaddress \
+ -Zsanitizer-recover=kernel-hwaddress
+
ifdef CONFIG_KASAN_INLINE
- instrumentation_flags := $(call cc-param,hwasan-mapping-offset=$(KASAN_SHADOW_OFFSET))
+ kasan_params += hwasan-mapping-offset=$(KASAN_SHADOW_OFFSET)
else
- instrumentation_flags := $(call cc-param,hwasan-instrument-with-calls=1)
+ kasan_params += hwasan-instrument-with-calls=1
endif
-CFLAGS_KASAN := -fsanitize=kernel-hwaddress \
- $(call cc-param,hwasan-instrument-stack=$(stack_enable)) \
- $(call cc-param,hwasan-use-short-granules=0) \
- $(call cc-param,hwasan-inline-all-checks=0) \
- $(instrumentation_flags)
+kasan_params += hwasan-instrument-stack=$(stack_enable) \
+ hwasan-use-short-granules=0 \
+ hwasan-inline-all-checks=0
# Instrument memcpy/memset/memmove calls by using instrumented __hwasan_mem*().
ifeq ($(call clang-min-version, 150000)$(call gcc-min-version, 130000),y)
-CFLAGS_KASAN += $(call cc-param,hwasan-kernel-mem-intrinsic-prefix=1)
+ kasan_params += hwasan-kernel-mem-intrinsic-prefix=1
endif
endif # CONFIG_KASAN_SW_TAGS
-export CFLAGS_KASAN CFLAGS_KASAN_NOSANITIZE
+# Add all as-supported KASAN LLVM parameters requested by the configuration.
+CFLAGS_KASAN += $(call check-args, cc-param, $(kasan_params))
+
+ifdef CONFIG_RUST
+ # Avoid calling `rustc-param` unless Rust is enabled.
+ RUSTFLAGS_KASAN += $(call check-args, rustc-param, $(kasan_params))
+endif # CONFIG_RUST
+
+export CFLAGS_KASAN CFLAGS_KASAN_NOSANITIZE RUSTFLAGS_KASAN
diff --git a/scripts/Makefile.lib b/scripts/Makefile.lib
index 207325eaf1d1..01a9f567d5af 100644
--- a/scripts/Makefile.lib
+++ b/scripts/Makefile.lib
@@ -45,11 +45,6 @@ else
obj-y := $(filter-out %/, $(obj-y))
endif
-ifdef need-dtbslist
-dtb-y += $(addsuffix /dtbs-list, $(subdir-ym))
-always-y += dtbs-list
-endif
-
# Expand $(foo-objs) $(foo-y) etc. by replacing their individuals
suffix-search = $(strip $(foreach s, $3, $($(1:%$(strip $2)=%$s))))
# List composite targets that are constructed by combining other targets
@@ -80,19 +75,6 @@ always-y += $(hostprogs-always-y) $(hostprogs-always-m)
userprogs += $(userprogs-always-y) $(userprogs-always-m)
always-y += $(userprogs-always-y) $(userprogs-always-m)
-# DTB
-# If CONFIG_OF_ALL_DTBS is enabled, all DT blobs are built
-dtb-$(CONFIG_OF_ALL_DTBS) += $(dtb-)
-
-# Composite DTB (i.e. DTB constructed by overlay)
-multi-dtb-y := $(call multi-search, $(dtb-y), .dtb, -dtbs)
-# Primitive DTB compiled from *.dts
-real-dtb-y := $(call real-search, $(dtb-y), .dtb, -dtbs)
-# Base DTB that overlay is applied onto
-base-dtb-y := $(filter %.dtb, $(call real-search, $(multi-dtb-y), .dtb, -dtbs))
-
-always-y += $(dtb-y)
-
# Add subdir path
ifneq ($(obj),.)
@@ -104,9 +86,6 @@ lib-y := $(addprefix $(obj)/,$(lib-y))
real-obj-y := $(addprefix $(obj)/,$(real-obj-y))
real-obj-m := $(addprefix $(obj)/,$(real-obj-m))
multi-obj-m := $(addprefix $(obj)/, $(multi-obj-m))
-dtb-y := $(addprefix $(obj)/, $(dtb-y))
-multi-dtb-y := $(addprefix $(obj)/, $(multi-dtb-y))
-real-dtb-y := $(addprefix $(obj)/, $(real-dtb-y))
subdir-ym := $(addprefix $(obj)/,$(subdir-ym))
endif
@@ -167,6 +146,9 @@ ifneq ($(CONFIG_KASAN_HW_TAGS),y)
_c_flags += $(if $(patsubst n%,, \
$(KASAN_SANITIZE_$(target-stem).o)$(KASAN_SANITIZE)$(is-kernel-object)), \
$(CFLAGS_KASAN), $(CFLAGS_KASAN_NOSANITIZE))
+_rust_flags += $(if $(patsubst n%,, \
+ $(KASAN_SANITIZE_$(target-stem).o)$(KASAN_SANITIZE)$(is-kernel-object)), \
+ $(RUSTFLAGS_KASAN))
endif
endif
@@ -238,7 +220,7 @@ modkern_rustflags = \
modkern_aflags = $(if $(part-of-module), \
$(KBUILD_AFLAGS_MODULE) $(AFLAGS_MODULE), \
- $(KBUILD_AFLAGS_KERNEL) $(AFLAGS_KERNEL))
+ $(KBUILD_AFLAGS_KERNEL) $(AFLAGS_KERNEL) $(modfile_flags))
c_flags = -Wp,-MMD,$(depfile) $(NOSTDINC_FLAGS) $(LINUXINCLUDE) \
-include $(srctree)/include/linux/compiler_types.h \
@@ -248,19 +230,13 @@ c_flags = -Wp,-MMD,$(depfile) $(NOSTDINC_FLAGS) $(LINUXINCLUDE) \
rust_flags = $(_rust_flags) $(modkern_rustflags) @$(objtree)/include/generated/rustc_cfg
a_flags = -Wp,-MMD,$(depfile) $(NOSTDINC_FLAGS) $(LINUXINCLUDE) \
- $(_a_flags) $(modkern_aflags)
+ $(_a_flags) $(modkern_aflags) $(modname_flags)
cpp_flags = -Wp,-MMD,$(depfile) $(NOSTDINC_FLAGS) $(LINUXINCLUDE) \
$(_cpp_flags)
ld_flags = $(KBUILD_LDFLAGS) $(ldflags-y) $(LDFLAGS_$(@F))
-DTC_INCLUDE := $(srctree)/scripts/dtc/include-prefixes
-
-dtc_cpp_flags = -Wp,-MMD,$(depfile).pre.tmp -nostdinc \
- $(addprefix -I,$(DTC_INCLUDE)) \
- -undef -D__DTS__
-
ifdef CONFIG_OBJTOOL
objtool := $(objtree)/tools/objtool/objtool
@@ -350,94 +326,6 @@ cmd_objcopy = $(OBJCOPY) $(OBJCOPYFLAGS) $(OBJCOPYFLAGS_$(@F)) $< $@
quiet_cmd_gzip = GZIP $@
cmd_gzip = cat $(real-prereqs) | $(KGZIP) -n -f -9 > $@
-# DTC
-# ---------------------------------------------------------------------------
-DTC ?= $(objtree)/scripts/dtc/dtc
-DTC_FLAGS += \
- -Wno-unique_unit_address
-
-# Disable noisy checks by default
-ifeq ($(findstring 1,$(KBUILD_EXTRA_WARN)),)
-DTC_FLAGS += -Wno-unit_address_vs_reg \
- -Wno-avoid_unnecessary_addr_size \
- -Wno-alias_paths \
- -Wno-graph_child_address \
- -Wno-simple_bus_reg
-else
-DTC_FLAGS += \
- -Wunique_unit_address_if_enabled
-endif
-
-ifneq ($(findstring 2,$(KBUILD_EXTRA_WARN)),)
-DTC_FLAGS += -Wnode_name_chars_strict \
- -Wproperty_name_chars_strict \
- -Wunique_unit_address
-endif
-
-DTC_FLAGS += $(DTC_FLAGS_$(target-stem))
-
-# Set -@ if the target is a base DTB that overlay is applied onto
-DTC_FLAGS += $(if $(filter $(patsubst $(obj)/%,%,$@), $(base-dtb-y)), -@)
-
-# Generate an assembly file to wrap the output of the device tree compiler
-quiet_cmd_wrap_S_dtb = WRAP $@
- cmd_wrap_S_dtb = { \
- symbase=__$(patsubst .%,%,$(suffix $<))_$(subst -,_,$(notdir $*)); \
- echo '\#include <asm-generic/vmlinux.lds.h>'; \
- echo '.section .dtb.init.rodata,"a"'; \
- echo '.balign STRUCT_ALIGNMENT'; \
- echo ".global $${symbase}_begin"; \
- echo "$${symbase}_begin:"; \
- echo '.incbin "$<" '; \
- echo ".global $${symbase}_end"; \
- echo "$${symbase}_end:"; \
- echo '.balign STRUCT_ALIGNMENT'; \
- } > $@
-
-$(obj)/%.dtb.S: $(obj)/%.dtb FORCE
- $(call if_changed,wrap_S_dtb)
-
-$(obj)/%.dtbo.S: $(obj)/%.dtbo FORCE
- $(call if_changed,wrap_S_dtb)
-
-quiet_dtb_check_tag = $(if $(dtb-check-enabled),[C], )
-cmd_dtb_check = $(if $(dtb-check-enabled),; $(DT_CHECKER) $(DT_CHECKER_FLAGS) -u $(srctree)/$(DT_BINDING_DIR) -p $(DT_TMP_SCHEMA) $@ || true)
-
-quiet_cmd_dtc = DTC $(quiet_dtb_check_tag) $@
-cmd_dtc = $(HOSTCC) -E $(dtc_cpp_flags) -x assembler-with-cpp -o $(dtc-tmp) $< ; \
- $(DTC) -o $@ -b 0 \
- $(addprefix -i,$(dir $<) $(DTC_INCLUDE)) $(DTC_FLAGS) \
- -d $(depfile).dtc.tmp $(dtc-tmp) ; \
- cat $(depfile).pre.tmp $(depfile).dtc.tmp > $(depfile) \
- $(cmd_dtb_check)
-
-# NOTE:
-# Do not replace $(filter %.dtb %.dtbo, $^) with $(real-prereqs). When a single
-# DTB is turned into a multi-blob DTB, $^ will contain header file dependencies
-# recorded in the .*.cmd file.
-quiet_cmd_fdtoverlay = OVL $(quiet_dtb_check_tag) $@
- cmd_fdtoverlay = $(objtree)/scripts/dtc/fdtoverlay -o $@ -i $(filter %.dtb %.dtbo, $^) $(cmd_dtb_check)
-
-$(multi-dtb-y): FORCE
- $(call if_changed,fdtoverlay)
-$(call multi_depend, $(multi-dtb-y), .dtb, -dtbs)
-
-ifneq ($(CHECK_DTBS),)
-DT_CHECKER ?= dt-validate
-DT_CHECKER_FLAGS ?= $(if $(DT_SCHEMA_FILES),-l $(DT_SCHEMA_FILES),-m)
-DT_BINDING_DIR := Documentation/devicetree/bindings
-DT_TMP_SCHEMA := $(objtree)/$(DT_BINDING_DIR)/processed-schema.json
-dtb-check-enabled = $(if $(filter %.dtb, $@),y)
-endif
-
-$(obj)/%.dtb: $(obj)/%.dts $(DTC) $(DT_TMP_SCHEMA) FORCE
- $(call if_changed_dep,dtc)
-
-$(obj)/%.dtbo: $(src)/%.dtso $(DTC) FORCE
- $(call if_changed_dep,dtc)
-
-dtc-tmp = $(subst $(comma),_,$(dot-target).dts.tmp)
-
# Bzip2
# ---------------------------------------------------------------------------
@@ -530,14 +418,17 @@ quiet_cmd_fit = FIT $@
# XZ
# ---------------------------------------------------------------------------
-# Use xzkern to compress the kernel image and xzmisc to compress other things.
+# Use xzkern or xzkern_with_size to compress the kernel image and xzmisc to
+# compress other things.
#
# xzkern uses a big LZMA2 dictionary since it doesn't increase memory usage
# of the kernel decompressor. A BCJ filter is used if it is available for
-# the target architecture. xzkern also appends uncompressed size of the data
-# using size_append. The .xz format has the size information available at
-# the end of the file too, but it's in more complex format and it's good to
-# avoid changing the part of the boot code that reads the uncompressed size.
+# the target architecture.
+#
+# xzkern_with_size also appends uncompressed size of the data using
+# size_append. The .xz format has the size information available at the end
+# of the file too, but it's in more complex format and it's good to avoid
+# changing the part of the boot code that reads the uncompressed size.
# Note that the bytes added by size_append will make the xz tool think that
# the file is corrupt. This is expected.
#
diff --git a/scripts/Makefile.modfinal b/scripts/Makefile.modfinal
index 306a6bb86e4d..1482884ec3ca 100644
--- a/scripts/Makefile.modfinal
+++ b/scripts/Makefile.modfinal
@@ -30,8 +30,11 @@ quiet_cmd_cc_o_c = CC [M] $@
%.mod.o: %.mod.c FORCE
$(call if_changed_dep,cc_o_c)
+$(extmod_prefix).module-common.o: $(srctree)/scripts/module-common.c FORCE
+ $(call if_changed_dep,cc_o_c)
+
quiet_cmd_ld_ko_o = LD [M] $@
- cmd_ld_ko_o += \
+ cmd_ld_ko_o = \
$(LD) -r $(KBUILD_LDFLAGS) \
$(KBUILD_LDFLAGS_MODULE) $(LDFLAGS_MODULE) \
-T scripts/module.lds -o $@ $(filter %.o, $^)
@@ -54,13 +57,13 @@ if_changed_except = $(if $(call newer_prereqs_except,$(2))$(cmd-check), \
printf '%s\n' 'savedcmd_$@ := $(make-cmd)' > $(dot-target).cmd, @:)
# Re-generate module BTFs if either module's .ko or vmlinux changed
-%.ko: %.o %.mod.o scripts/module.lds $(and $(CONFIG_DEBUG_INFO_BTF_MODULES),$(KBUILD_BUILTIN),vmlinux) FORCE
+%.ko: %.o %.mod.o $(extmod_prefix).module-common.o scripts/module.lds $(and $(CONFIG_DEBUG_INFO_BTF_MODULES),$(KBUILD_BUILTIN),vmlinux) FORCE
+$(call if_changed_except,ld_ko_o,vmlinux)
ifdef CONFIG_DEBUG_INFO_BTF_MODULES
+$(if $(newer-prereqs),$(call cmd,btf_ko))
endif
-targets += $(modules:%.o=%.ko) $(modules:%.o=%.mod.o)
+targets += $(modules:%.o=%.ko) $(modules:%.o=%.mod.o) $(extmod_prefix).module-common.o
# Add FORCE to the prerequisites of a target to force it to be always rebuilt.
# ---------------------------------------------------------------------------
diff --git a/scripts/Makefile.modinst b/scripts/Makefile.modinst
index 0afd75472679..d97720943189 100644
--- a/scripts/Makefile.modinst
+++ b/scripts/Makefile.modinst
@@ -30,10 +30,12 @@ $(MODLIB)/modules.order: modules.order FORCE
quiet_cmd_install_modorder = INSTALL $@
cmd_install_modorder = sed 's:^\(.*\)\.o$$:kernel/\1.ko:' $< > $@
-# Install modules.builtin(.modinfo) even when CONFIG_MODULES is disabled.
+# Install modules.builtin(.modinfo,.ranges) even when CONFIG_MODULES is disabled.
install-y += $(addprefix $(MODLIB)/, modules.builtin modules.builtin.modinfo)
-$(addprefix $(MODLIB)/, modules.builtin modules.builtin.modinfo): $(MODLIB)/%: % FORCE
+install-$(CONFIG_BUILTIN_MODULE_RANGES) += $(MODLIB)/modules.builtin.ranges
+
+$(addprefix $(MODLIB)/, modules.builtin modules.builtin.modinfo modules.builtin.ranges): $(MODLIB)/%: % FORCE
$(call cmd,install)
endif
@@ -51,9 +53,11 @@ $(foreach x, % :, $(if $(findstring $x, $(dst)), \
$(error module installation path cannot contain '$x')))
suffix-y :=
+ifdef CONFIG_MODULE_COMPRESS_ALL
suffix-$(CONFIG_MODULE_COMPRESS_GZIP) := .gz
suffix-$(CONFIG_MODULE_COMPRESS_XZ) := .xz
suffix-$(CONFIG_MODULE_COMPRESS_ZSTD) := .zst
+endif
modules := $(patsubst $(extmod_prefix)%.o, $(dst)/%.ko$(suffix-y), $(modules))
install-$(CONFIG_MODULES) += $(modules)
@@ -146,7 +150,7 @@ quiet_cmd_gzip = GZIP $@
quiet_cmd_xz = XZ $@
cmd_xz = $(XZ) --check=crc32 --lzma2=dict=1MiB -f $<
quiet_cmd_zstd = ZSTD $@
- cmd_zstd = $(ZSTD) -T0 --rm -f -q $<
+ cmd_zstd = $(ZSTD) --rm -f -q $<
$(dst)/%.ko.gz: $(dst)/%.ko FORCE
$(call cmd,gzip)
diff --git a/scripts/Makefile.package b/scripts/Makefile.package
index 4a80584ec771..11d53f240a2b 100644
--- a/scripts/Makefile.package
+++ b/scripts/Makefile.package
@@ -147,8 +147,7 @@ snap-pkg:
PHONY += pacman-pkg
pacman-pkg:
@ln -srf $(srctree)/scripts/package/PKGBUILD $(objtree)/PKGBUILD
- +objtree="$(realpath $(objtree))" \
- BUILDDIR="$(realpath $(objtree))/pacman" \
+ +BUILDDIR="$(realpath $(objtree))/pacman" \
CARCH="$(UTS_MACHINE)" \
KBUILD_MAKEFLAGS="$(MAKEFLAGS)" \
KBUILD_REVISION="$(shell $(srctree)/scripts/build-version)" \
diff --git a/scripts/Makefile.vmlinux b/scripts/Makefile.vmlinux
index 5ceecbed31eb..1284f05555b9 100644
--- a/scripts/Makefile.vmlinux
+++ b/scripts/Makefile.vmlinux
@@ -33,6 +33,24 @@ targets += vmlinux
vmlinux: scripts/link-vmlinux.sh vmlinux.o $(KBUILD_LDS) FORCE
+$(call if_changed_dep,link_vmlinux)
+# module.builtin.ranges
+# ---------------------------------------------------------------------------
+ifdef CONFIG_BUILTIN_MODULE_RANGES
+__default: modules.builtin.ranges
+
+quiet_cmd_modules_builtin_ranges = GEN $@
+ cmd_modules_builtin_ranges = gawk -f $(real-prereqs) > $@
+
+targets += modules.builtin.ranges
+modules.builtin.ranges: $(srctree)/scripts/generate_builtin_ranges.awk \
+ modules.builtin vmlinux.map vmlinux.o.map FORCE
+ $(call if_changed,modules_builtin_ranges)
+
+vmlinux.map: vmlinux
+ @:
+
+endif
+
# Add FORCE to the prerequisites of a target to force it to be always rebuilt.
# ---------------------------------------------------------------------------
diff --git a/scripts/Makefile.vmlinux_o b/scripts/Makefile.vmlinux_o
index d64070b6b4bc..0b6e2ebf60dc 100644
--- a/scripts/Makefile.vmlinux_o
+++ b/scripts/Makefile.vmlinux_o
@@ -45,9 +45,12 @@ objtool-args = $(vmlinux-objtool-args-y) --link
# Link of vmlinux.o used for section mismatch analysis
# ---------------------------------------------------------------------------
+vmlinux-o-ld-args-$(CONFIG_BUILTIN_MODULE_RANGES) += -Map=$@.map
+
quiet_cmd_ld_vmlinux.o = LD $@
cmd_ld_vmlinux.o = \
$(LD) ${KBUILD_LDFLAGS} -r -o $@ \
+ $(vmlinux-o-ld-args-y) \
$(addprefix -T , $(initcalls-lds)) \
--whole-archive vmlinux.a --no-whole-archive \
--start-group $(KBUILD_VMLINUX_LIBS) --end-group \
diff --git a/scripts/basic/fixdep.c b/scripts/basic/fixdep.c
index 84b6efa849f4..cdd5da7e009b 100644
--- a/scripts/basic/fixdep.c
+++ b/scripts/basic/fixdep.c
@@ -99,6 +99,8 @@
#include <stdio.h>
#include <ctype.h>
+#include <xalloc.h>
+
static void usage(void)
{
fprintf(stderr, "Usage: fixdep <depfile> <target> <cmdline>\n");
@@ -131,12 +133,9 @@ static unsigned int strhash(const char *str, unsigned int sz)
static void add_to_hashtable(const char *name, int len, unsigned int hash,
struct item *hashtab[])
{
- struct item *aux = malloc(sizeof(*aux) + len);
+ struct item *aux;
- if (!aux) {
- perror("fixdep:malloc");
- exit(1);
- }
+ aux = xmalloc(sizeof(*aux) + len);
memcpy(aux->name, name, len);
aux->len = len;
aux->hash = hash;
@@ -228,11 +227,7 @@ static void *read_file(const char *filename)
perror(filename);
exit(2);
}
- buf = malloc(st.st_size + 1);
- if (!buf) {
- perror("fixdep: malloc");
- exit(2);
- }
+ buf = xmalloc(st.st_size + 1);
if (read(fd, buf, st.st_size) != st.st_size) {
perror("fixdep: read");
exit(2);
diff --git a/scripts/coccinelle/api/stream_open.cocci b/scripts/coccinelle/api/stream_open.cocci
index df00d6619b06..50ab60c81f13 100644
--- a/scripts/coccinelle/api/stream_open.cocci
+++ b/scripts/coccinelle/api/stream_open.cocci
@@ -131,7 +131,6 @@ identifier llseek_f;
identifier fops0.fops;
@@
struct file_operations fops = {
- .llseek = no_llseek,
};
@ has_noop_llseek @
diff --git a/scripts/coccinelle/api/string_choices.cocci b/scripts/coccinelle/api/string_choices.cocci
index 5e729f187f22..375045086912 100644
--- a/scripts/coccinelle/api/string_choices.cocci
+++ b/scripts/coccinelle/api/string_choices.cocci
@@ -14,23 +14,18 @@ expression E;
- ((E == 1) ? "" : "s")
+ str_plural(E)
|
-- ((E != 1) ? "s" : "")
-+ str_plural(E)
-|
- ((E > 1) ? "s" : "")
+ str_plural(E)
)
-@str_plural_r depends on !patch exists@
+@str_plural_r depends on !patch@
expression E;
position P;
@@
(
-* ((E@P == 1) ? "" : "s")
-|
-* ((E@P != 1) ? "s" : "")
+* (E@P == 1) ? "" : "s"
|
-* ((E@P > 1) ? "s" : "")
+* (E@P > 1) ? "s" : ""
)
@script:python depends on report@
@@ -40,21 +35,17 @@ e << str_plural_r.E;
coccilib.report.print_report(p[0], "opportunity for str_plural(%s)" % e)
-@str_up_down depends on patch@
+@str_up_down depends on patch disable neg_if_exp@
expression E;
@@
-(
- ((E) ? "up" : "down")
+ str_up_down(E)
-)
-@str_up_down_r depends on !patch exists@
+@str_up_down_r depends on !patch disable neg_if_exp@
expression E;
position P;
@@
-(
-* ((E@P) ? "up" : "down")
-)
+* E@P ? "up" : "down"
@script:python depends on report@
p << str_up_down_r.P;
@@ -63,21 +54,17 @@ e << str_up_down_r.E;
coccilib.report.print_report(p[0], "opportunity for str_up_down(%s)" % e)
-@str_down_up depends on patch@
+@str_down_up depends on patch disable neg_if_exp@
expression E;
@@
-(
- ((E) ? "down" : "up")
+ str_down_up(E)
-)
-@str_down_up_r depends on !patch exists@
+@str_down_up_r depends on !patch disable neg_if_exp@
expression E;
position P;
@@
-(
-* ((E@P) ? "down" : "up")
-)
+* E@P ? "down" : "up"
@script:python depends on report@
p << str_down_up_r.P;
@@ -85,3 +72,231 @@ e << str_down_up_r.E;
@@
coccilib.report.print_report(p[0], "opportunity for str_down_up(%s)" % e)
+
+@str_true_false depends on patch disable neg_if_exp@
+expression E;
+@@
+- ((E) ? "true" : "false")
++ str_true_false(E)
+
+@str_true_false_r depends on !patch disable neg_if_exp@
+expression E;
+position P;
+@@
+* E@P ? "true" : "false"
+
+@script:python depends on report@
+p << str_true_false_r.P;
+e << str_true_false_r.E;
+@@
+
+coccilib.report.print_report(p[0], "opportunity for str_true_false(%s)" % e)
+
+@str_false_true depends on patch disable neg_if_exp@
+expression E;
+@@
+- ((E) ? "false" : "true")
++ str_false_true(E)
+
+@str_false_true_r depends on !patch disable neg_if_exp@
+expression E;
+position P;
+@@
+* E@P ? "false" : "true"
+
+@script:python depends on report@
+p << str_false_true_r.P;
+e << str_false_true_r.E;
+@@
+
+coccilib.report.print_report(p[0], "opportunity for str_false_true(%s)" % e)
+
+@str_hi_lo depends on patch disable neg_if_exp@
+expression E;
+@@
+- ((E) ? "hi" : "lo")
++ str_hi_lo(E)
+
+@str_hi_lo_r depends on !patch disable neg_if_exp@
+expression E;
+position P;
+@@
+* E@P ? "hi" : "lo"
+
+@script:python depends on report@
+p << str_hi_lo_r.P;
+e << str_hi_lo_r.E;
+@@
+
+coccilib.report.print_report(p[0], "opportunity for str_hi_lo(%s)" % e)
+
+@str_high_low depends on patch disable neg_if_exp@
+expression E;
+@@
+- ((E) ? "high" : "low")
++ str_high_low(E)
+
+@str_high_low_r depends on !patch disable neg_if_exp@
+expression E;
+position P;
+@@
+* E@P ? "high" : "low"
+
+@script:python depends on report@
+p << str_high_low_r.P;
+e << str_high_low_r.E;
+@@
+
+coccilib.report.print_report(p[0], "opportunity for str_high_low(%s)" % e)
+
+@str_lo_hi depends on patch disable neg_if_exp@
+expression E;
+@@
+- ((E) ? "lo" : "hi")
++ str_lo_hi(E)
+
+@str_lo_hi_r depends on !patch disable neg_if_exp@
+expression E;
+position P;
+@@
+* E@P ? "lo" : "hi"
+
+@script:python depends on report@
+p << str_lo_hi_r.P;
+e << str_lo_hi_r.E;
+@@
+
+coccilib.report.print_report(p[0], "opportunity for str_lo_hi(%s)" % e)
+
+@str_low_high depends on patch disable neg_if_exp@
+expression E;
+@@
+- ((E) ? "low" : "high")
++ str_low_high(E)
+
+@str_low_high_r depends on !patch disable neg_if_exp@
+expression E;
+position P;
+@@
+* E@P ? "low" : "high"
+
+@script:python depends on report@
+p << str_low_high_r.P;
+e << str_low_high_r.E;
+@@
+
+coccilib.report.print_report(p[0], "opportunity for str_low_high(%s)" % e)
+
+@str_enable_disable depends on patch@
+expression E;
+@@
+- ((E) ? "enable" : "disable")
++ str_enable_disable(E)
+
+@str_enable_disable_r depends on !patch@
+expression E;
+position P;
+@@
+* E@P ? "enable" : "disable"
+
+@script:python depends on report@
+p << str_enable_disable_r.P;
+e << str_enable_disable_r.E;
+@@
+
+coccilib.report.print_report(p[0], "opportunity for str_enable_disable(%s)" % e)
+
+@str_enabled_disabled depends on patch@
+expression E;
+@@
+- ((E) ? "enabled" : "disabled")
++ str_enabled_disabled(E)
+
+@str_enabled_disabled_r depends on !patch@
+expression E;
+position P;
+@@
+* E@P ? "enabled" : "disabled"
+
+@script:python depends on report@
+p << str_enabled_disabled_r.P;
+e << str_enabled_disabled_r.E;
+@@
+
+coccilib.report.print_report(p[0], "opportunity for str_enabled_disabled(%s)" % e)
+
+@str_read_write depends on patch disable neg_if_exp@
+expression E;
+@@
+- ((E) ? "read" : "write")
++ str_read_write(E)
+
+@str_read_write_r depends on !patch disable neg_if_exp@
+expression E;
+position P;
+@@
+* E@P ? "read" : "write"
+
+@script:python depends on report@
+p << str_read_write_r.P;
+e << str_read_write_r.E;
+@@
+
+coccilib.report.print_report(p[0], "opportunity for str_read_write(%s)" % e)
+
+@str_write_read depends on patch disable neg_if_exp@
+expression E;
+@@
+- ((E) ? "write" : "read")
++ str_write_read(E)
+
+@str_write_read_r depends on !patch disable neg_if_exp@
+expression E;
+position P;
+@@
+* E@P ? "write" : "read"
+
+@script:python depends on report@
+p << str_write_read_r.P;
+e << str_write_read_r.E;
+@@
+
+coccilib.report.print_report(p[0], "opportunity for str_write_read(%s)" % e)
+
+@str_on_off depends on patch@
+expression E;
+@@
+- ((E) ? "on" : "off")
++ str_on_off(E)
+
+@str_on_off_r depends on !patch@
+expression E;
+position P;
+@@
+* E@P ? "on" : "off"
+
+@script:python depends on report@
+p << str_on_off_r.P;
+e << str_on_off_r.E;
+@@
+
+coccilib.report.print_report(p[0], "opportunity for str_on_off(%s)" % e)
+
+@str_yes_no depends on patch@
+expression E;
+@@
+- ((E) ? "yes" : "no")
++ str_yes_no(E)
+
+@str_yes_no_r depends on !patch@
+expression E;
+position P;
+@@
+* E@P ? "yes" : "no"
+
+@script:python depends on report@
+p << str_yes_no_r.P;
+e << str_yes_no_r.E;
+@@
+
+coccilib.report.print_report(p[0], "opportunity for str_yes_no(%s)" % e)
diff --git a/scripts/decode_stacktrace.sh b/scripts/decode_stacktrace.sh
index a0f50a5b4f7c..826836d264c6 100755
--- a/scripts/decode_stacktrace.sh
+++ b/scripts/decode_stacktrace.sh
@@ -1,11 +1,13 @@
-#!/bin/bash
+#!/usr/bin/env bash
# SPDX-License-Identifier: GPL-2.0
# (c) 2014, Sasha Levin <sasha.levin@oracle.com>
#set -x
usage() {
echo "Usage:"
- echo " $0 -r <release> | <vmlinux> [<base path>|auto] [<modules path>]"
+ echo " $0 -r <release>"
+ echo " $0 [<vmlinux> [<base_path>|auto [<modules_path>]]]"
+ echo " $0 -h"
}
# Try to find a Rust demangler
@@ -32,7 +34,10 @@ READELF=${UTIL_PREFIX}readelf${UTIL_SUFFIX}
ADDR2LINE=${UTIL_PREFIX}addr2line${UTIL_SUFFIX}
NM=${UTIL_PREFIX}nm${UTIL_SUFFIX}
-if [[ $1 == "-r" ]] ; then
+if [[ $1 == "-h" ]] ; then
+ usage
+ exit 0
+elif [[ $1 == "-r" ]] ; then
vmlinux=""
basepath="auto"
modpath=""
@@ -89,31 +94,32 @@ find_module() {
fi
fi
- if [[ "$modpath" != "" ]] ; then
- for fn in $(find "$modpath" -name "${module//_/[-_]}.ko*") ; do
- if ${READELF} -WS "$fn" | grep -qwF .debug_line ; then
- echo $fn
- return
- fi
- done
- return 1
- fi
-
- modpath=$(dirname "$vmlinux")
- find_module && return
-
- if [[ $release == "" ]] ; then
+ if [ -z $release ] ; then
release=$(gdb -ex 'print init_uts_ns.name.release' -ex 'quit' -quiet -batch "$vmlinux" 2>/dev/null | sed -n 's/\$1 = "\(.*\)".*/\1/p')
fi
+ if [ -n "${release}" ] ; then
+ release_dirs="/usr/lib/debug/lib/modules/$release /lib/modules/$release"
+ fi
- for dn in {/usr/lib/debug,}/lib/modules/$release ; do
- if [ -e "$dn" ] ; then
- modpath="$dn"
- find_module && return
+ found_without_debug_info=false
+ for dir in "$modpath" "$(dirname "$vmlinux")" ${release_dirs}; do
+ if [ -n "${dir}" ] && [ -e "${dir}" ]; then
+ for fn in $(find "$dir" -name "${module//_/[-_]}.ko*") ; do
+ if ${READELF} -WS "$fn" | grep -qwF .debug_line ; then
+ echo $fn
+ return
+ fi
+ found_without_debug_info=true
+ done
fi
done
- modpath=""
+ if [[ ${found_without_debug_info} == true ]]; then
+ echo "WARNING! No debugging info in module ${module}, rebuild with DEBUG_KERNEL and DEBUG_INFO" >&2
+ else
+ echo "WARNING! Cannot find .ko for module ${module}, please pass a valid module path" >&2
+ fi
+
return 1
}
@@ -131,7 +137,6 @@ parse_symbol() {
else
local objfile=$(find_module)
if [[ $objfile == "" ]] ; then
- echo "WARNING! Modules path isn't set, but is needed to parse this symbol" >&2
return
fi
if [[ $aarray_support == true ]]; then
diff --git a/scripts/gdb/linux/kasan.py b/scripts/gdb/linux/kasan.py
new file mode 100644
index 000000000000..56730b3fde0b
--- /dev/null
+++ b/scripts/gdb/linux/kasan.py
@@ -0,0 +1,44 @@
+# SPDX-License-Identifier: GPL-2.0
+#
+# Copyright 2024 Canonical Ltd.
+#
+# Authors:
+# Kuan-Ying Lee <kuan-ying.lee@canonical.com>
+#
+
+import gdb
+from linux import constants, mm
+
+def help():
+ t = """Usage: lx-kasan_mem_to_shadow [Hex memory addr]
+ Example:
+ lx-kasan_mem_to_shadow 0xffff000008eca008\n"""
+ gdb.write("Unrecognized command\n")
+ raise gdb.GdbError(t)
+
+class KasanMemToShadow(gdb.Command):
+ """Translate memory address to kasan shadow address"""
+
+ p_ops = None
+
+ def __init__(self):
+ if constants.LX_CONFIG_KASAN_GENERIC or constants.LX_CONFIG_KASAN_SW_TAGS:
+ super(KasanMemToShadow, self).__init__("lx-kasan_mem_to_shadow", gdb.COMMAND_SUPPORT)
+
+ def invoke(self, args, from_tty):
+ if not constants.LX_CONFIG_KASAN_GENERIC or constants.LX_CONFIG_KASAN_SW_TAGS:
+ raise gdb.GdbError('CONFIG_KASAN_GENERIC or CONFIG_KASAN_SW_TAGS is not set')
+
+ argv = gdb.string_to_argv(args)
+ if len(argv) == 1:
+ if self.p_ops is None:
+ self.p_ops = mm.page_ops().ops
+ addr = int(argv[0], 16)
+ shadow_addr = self.kasan_mem_to_shadow(addr)
+ gdb.write('shadow addr: 0x%x\n' % shadow_addr)
+ else:
+ help()
+ def kasan_mem_to_shadow(self, addr):
+ return (addr >> self.p_ops.KASAN_SHADOW_SCALE_SHIFT) + self.p_ops.KASAN_SHADOW_OFFSET
+
+KasanMemToShadow()
diff --git a/scripts/gdb/linux/proc.py b/scripts/gdb/linux/proc.py
index 43c687e7a69d..65dd1bd12964 100644
--- a/scripts/gdb/linux/proc.py
+++ b/scripts/gdb/linux/proc.py
@@ -18,6 +18,7 @@ from linux import utils
from linux import tasks
from linux import lists
from linux import vfs
+from linux import rbtree
from struct import *
@@ -172,8 +173,7 @@ values of that process namespace"""
gdb.write("{:^18} {:^15} {:>9} {} {} options\n".format(
"mount", "super_block", "devname", "pathname", "fstype"))
- for mnt in lists.list_for_each_entry(namespace['list'],
- mount_ptr_type, "mnt_list"):
+ for mnt in rbtree.rb_inorder_for_each_entry(namespace['mounts'], mount_ptr_type, "mnt_node"):
devname = mnt['mnt_devname'].string()
devname = devname if devname else "none"
diff --git a/scripts/gdb/linux/rbtree.py b/scripts/gdb/linux/rbtree.py
index fe462855eefd..fcbcc5f4153c 100644
--- a/scripts/gdb/linux/rbtree.py
+++ b/scripts/gdb/linux/rbtree.py
@@ -9,6 +9,18 @@ from linux import utils
rb_root_type = utils.CachedType("struct rb_root")
rb_node_type = utils.CachedType("struct rb_node")
+def rb_inorder_for_each(root):
+ def inorder(node):
+ if node:
+ yield from inorder(node['rb_left'])
+ yield node
+ yield from inorder(node['rb_right'])
+
+ yield from inorder(root['rb_node'])
+
+def rb_inorder_for_each_entry(root, gdbtype, member):
+ for node in rb_inorder_for_each(root):
+ yield utils.container_of(node, gdbtype, member)
def rb_first(root):
if root.type == rb_root_type.get_type():
diff --git a/scripts/gdb/linux/stackdepot.py b/scripts/gdb/linux/stackdepot.py
index bb3a0f843931..37313a5a51a0 100644
--- a/scripts/gdb/linux/stackdepot.py
+++ b/scripts/gdb/linux/stackdepot.py
@@ -13,6 +13,13 @@ if constants.LX_CONFIG_STACKDEPOT:
stack_record_type = utils.CachedType('struct stack_record')
DEPOT_STACK_ALIGN = 4
+def help():
+ t = """Usage: lx-stack_depot_lookup [Hex handle value]
+ Example:
+ lx-stack_depot_lookup 0x00c80300\n"""
+ gdb.write("Unrecognized command\n")
+ raise gdb.GdbError(t)
+
def stack_depot_fetch(handle):
global DEPOT_STACK_ALIGN
global stack_record_type
@@ -57,3 +64,23 @@ def stack_depot_print(handle):
gdb.execute("x /i 0x%x" % (int(entries[i])))
except Exception as e:
gdb.write("%s\n" % e)
+
+class StackDepotLookup(gdb.Command):
+ """Search backtrace by handle"""
+
+ def __init__(self):
+ if constants.LX_CONFIG_STACKDEPOT:
+ super(StackDepotLookup, self).__init__("lx-stack_depot_lookup", gdb.COMMAND_SUPPORT)
+
+ def invoke(self, args, from_tty):
+ if not constants.LX_CONFIG_STACKDEPOT:
+ raise gdb.GdbError('CONFIG_STACKDEPOT is not set')
+
+ argv = gdb.string_to_argv(args)
+ if len(argv) == 1:
+ handle = int(argv[0], 16)
+ stack_depot_print(gdb.Value(handle).cast(utils.get_uint_type()))
+ else:
+ help()
+
+StackDepotLookup()
diff --git a/scripts/gdb/linux/timerlist.py b/scripts/gdb/linux/timerlist.py
index 64bc87191003..98445671fe83 100644
--- a/scripts/gdb/linux/timerlist.py
+++ b/scripts/gdb/linux/timerlist.py
@@ -87,21 +87,22 @@ def print_cpu(hrtimer_bases, cpu, max_clock_bases):
text += "\n"
if constants.LX_CONFIG_TICK_ONESHOT:
- fmts = [(" .{} : {}", 'nohz_mode'),
- (" .{} : {} nsecs", 'last_tick'),
- (" .{} : {}", 'tick_stopped'),
- (" .{} : {}", 'idle_jiffies'),
- (" .{} : {}", 'idle_calls'),
- (" .{} : {}", 'idle_sleeps'),
- (" .{} : {} nsecs", 'idle_entrytime'),
- (" .{} : {} nsecs", 'idle_waketime'),
- (" .{} : {} nsecs", 'idle_exittime'),
- (" .{} : {} nsecs", 'idle_sleeptime'),
- (" .{}: {} nsecs", 'iowait_sleeptime'),
- (" .{} : {}", 'last_jiffies'),
- (" .{} : {}", 'next_timer'),
- (" .{} : {} nsecs", 'idle_expires')]
- text += "\n".join([s.format(f, ts[f]) for s, f in fmts])
+ TS_FLAG_STOPPED = 1 << 1
+ TS_FLAG_NOHZ = 1 << 4
+ text += f" .{'nohz':15s}: {int(bool(ts['flags'] & TS_FLAG_NOHZ))}\n"
+ text += f" .{'last_tick':15s}: {ts['last_tick']}\n"
+ text += f" .{'tick_stopped':15s}: {int(bool(ts['flags'] & TS_FLAG_STOPPED))}\n"
+ text += f" .{'idle_jiffies':15s}: {ts['idle_jiffies']}\n"
+ text += f" .{'idle_calls':15s}: {ts['idle_calls']}\n"
+ text += f" .{'idle_sleeps':15s}: {ts['idle_sleeps']}\n"
+ text += f" .{'idle_entrytime':15s}: {ts['idle_entrytime']} nsecs\n"
+ text += f" .{'idle_waketime':15s}: {ts['idle_waketime']} nsecs\n"
+ text += f" .{'idle_exittime':15s}: {ts['idle_exittime']} nsecs\n"
+ text += f" .{'idle_sleeptime':15s}: {ts['idle_sleeptime']} nsecs\n"
+ text += f" .{'iowait_sleeptime':15s}: {ts['iowait_sleeptime']} nsecs\n"
+ text += f" .{'last_jiffies':15s}: {ts['last_jiffies']}\n"
+ text += f" .{'next_timer':15s}: {ts['next_timer']}\n"
+ text += f" .{'idle_expires':15s}: {ts['idle_expires']} nsecs\n"
text += "\njiffies: {}\n".format(jiffies)
text += "\n"
diff --git a/scripts/gdb/vmlinux-gdb.py b/scripts/gdb/vmlinux-gdb.py
index fc53cdf286f1..d4eeed4506fd 100644
--- a/scripts/gdb/vmlinux-gdb.py
+++ b/scripts/gdb/vmlinux-gdb.py
@@ -49,3 +49,4 @@ else:
import linux.page_owner
import linux.slab
import linux.vmalloc
+ import linux.kasan
diff --git a/scripts/generate_builtin_ranges.awk b/scripts/generate_builtin_ranges.awk
new file mode 100755
index 000000000000..b9ec761b3bef
--- /dev/null
+++ b/scripts/generate_builtin_ranges.awk
@@ -0,0 +1,508 @@
+#!/usr/bin/gawk -f
+# SPDX-License-Identifier: GPL-2.0
+# generate_builtin_ranges.awk: Generate address range data for builtin modules
+# Written by Kris Van Hees <kris.van.hees@oracle.com>
+#
+# Usage: generate_builtin_ranges.awk modules.builtin vmlinux.map \
+# vmlinux.o.map > modules.builtin.ranges
+#
+
+# Return the module name(s) (if any) associated with the given object.
+#
+# If we have seen this object before, return information from the cache.
+# Otherwise, retrieve it from the corresponding .cmd file.
+#
+function get_module_info(fn, mod, obj, s) {
+ if (fn in omod)
+ return omod[fn];
+
+ if (match(fn, /\/[^/]+$/) == 0)
+ return "";
+
+ obj = fn;
+ mod = "";
+ fn = substr(fn, 1, RSTART) "." substr(fn, RSTART + 1) ".cmd";
+ if (getline s <fn == 1) {
+ if (match(s, /DKBUILD_MODFILE=['"]+[^'"]+/) > 0) {
+ mod = substr(s, RSTART + 16, RLENGTH - 16);
+ gsub(/['"]/, "", mod);
+ } else if (match(s, /RUST_MODFILE=[^ ]+/) > 0)
+ mod = substr(s, RSTART + 13, RLENGTH - 13);
+ }
+ close(fn);
+
+ # A single module (common case) also reflects objects that are not part
+ # of a module. Some of those objects have names that are also a module
+ # name (e.g. core). We check the associated module file name, and if
+ # they do not match, the object is not part of a module.
+ if (mod !~ / /) {
+ if (!(mod in mods))
+ mod = "";
+ }
+
+ gsub(/([^/ ]*\/)+/, "", mod);
+ gsub(/-/, "_", mod);
+
+ # At this point, mod is a single (valid) module name, or a list of
+ # module names (that do not need validation).
+ omod[obj] = mod;
+
+ return mod;
+}
+
+# Update the ranges entry for the given module 'mod' in section 'osect'.
+#
+# We use a modified absolute start address (soff + base) as index because we
+# may need to insert an anchor record later that must be at the start of the
+# section data, and the first module may very well start at the same address.
+# So, we use (addr << 1) + 1 to allow a possible anchor record to be placed at
+# (addr << 1). This is safe because the index is only used to sort the entries
+# before writing them out.
+#
+function update_entry(osect, mod, soff, eoff, sect, idx) {
+ sect = sect_in[osect];
+ idx = sprintf("%016x", (soff + sect_base[osect]) * 2 + 1);
+ entries[idx] = sprintf("%s %08x-%08x %s", sect, soff, eoff, mod);
+ count[sect]++;
+}
+
+# (1) Build a lookup map of built-in module names.
+#
+# The first file argument is used as input (modules.builtin).
+#
+# Lines will be like:
+# kernel/crypto/lzo-rle.ko
+# and we record the object name "crypto/lzo-rle".
+#
+ARGIND == 1 {
+ sub(/kernel\//, ""); # strip off "kernel/" prefix
+ sub(/\.ko$/, ""); # strip off .ko suffix
+
+ mods[$1] = 1;
+ next;
+}
+
+# (2) Collect address information for each section.
+#
+# The second file argument is used as input (vmlinux.map).
+#
+# We collect the base address of the section in order to convert all addresses
+# in the section into offset values.
+#
+# We collect the address of the anchor (or first symbol in the section if there
+# is no explicit anchor) to allow users of the range data to calculate address
+# ranges based on the actual load address of the section in the running kernel.
+#
+# We collect the start address of any sub-section (section included in the top
+# level section being processed). This is needed when the final linking was
+# done using vmlinux.a because then the list of objects contained in each
+# section is to be obtained from vmlinux.o.map. The offset of the sub-section
+# is recorded here, to be used as an addend when processing vmlinux.o.map
+# later.
+#
+
+# Both GNU ld and LLVM lld linker map format are supported by converting LLVM
+# lld linker map records into equivalent GNU ld linker map records.
+#
+# The first record of the vmlinux.map file provides enough information to know
+# which format we are dealing with.
+#
+ARGIND == 2 && FNR == 1 && NF == 7 && $1 == "VMA" && $7 == "Symbol" {
+ map_is_lld = 1;
+ if (dbg)
+ printf "NOTE: %s uses LLVM lld linker map format\n", FILENAME >"/dev/stderr";
+ next;
+}
+
+# (LLD) Convert a section record fronm lld format to ld format.
+#
+# lld: ffffffff82c00000 2c00000 2493c0 8192 .data
+# ->
+# ld: .data 0xffffffff82c00000 0x2493c0 load address 0x0000000002c00000
+#
+ARGIND == 2 && map_is_lld && NF == 5 && /[0-9] [^ ]+$/ {
+ $0 = $5 " 0x"$1 " 0x"$3 " load address 0x"$2;
+}
+
+# (LLD) Convert an anchor record from lld format to ld format.
+#
+# lld: ffffffff81000000 1000000 0 1 _text = .
+# ->
+# ld: 0xffffffff81000000 _text = .
+#
+ARGIND == 2 && map_is_lld && !anchor && NF == 7 && raw_addr == "0x"$1 && $6 == "=" && $7 == "." {
+ $0 = " 0x"$1 " " $5 " = .";
+}
+
+# (LLD) Convert an object record from lld format to ld format.
+#
+# lld: 11480 11480 1f07 16 vmlinux.a(arch/x86/events/amd/uncore.o):(.text)
+# ->
+# ld: .text 0x0000000000011480 0x1f07 arch/x86/events/amd/uncore.o
+#
+ARGIND == 2 && map_is_lld && NF == 5 && $5 ~ /:\(/ {
+ gsub(/\)/, "");
+ sub(/ vmlinux\.a\(/, " ");
+ sub(/:\(/, " ");
+ $0 = " "$6 " 0x"$1 " 0x"$3 " " $5;
+}
+
+# (LLD) Convert a symbol record from lld format to ld format.
+#
+# We only care about these while processing a section for which no anchor has
+# been determined yet.
+#
+# lld: ffffffff82a859a4 2a859a4 0 1 btf_ksym_iter_id
+# ->
+# ld: 0xffffffff82a859a4 btf_ksym_iter_id
+#
+ARGIND == 2 && map_is_lld && sect && !anchor && NF == 5 && $5 ~ /^[_A-Za-z][_A-Za-z0-9]*$/ {
+ $0 = " 0x"$1 " " $5;
+}
+
+# (LLD) We do not need any other ldd linker map records.
+#
+ARGIND == 2 && map_is_lld && /^[0-9a-f]{16} / {
+ next;
+}
+
+# (LD) Section records with just the section name at the start of the line
+# need to have the next line pulled in to determine whether it is a
+# loadable section. If it is, the next line will contains a hex value
+# as first and second items.
+#
+ARGIND == 2 && !map_is_lld && NF == 1 && /^[^ ]/ {
+ s = $0;
+ getline;
+ if ($1 !~ /^0x/ || $2 !~ /^0x/)
+ next;
+
+ $0 = s " " $0;
+}
+
+# (LD) Object records with just the section name denote records with a long
+# section name for which the remainder of the record can be found on the
+# next line.
+#
+# (This is also needed for vmlinux.o.map, when used.)
+#
+ARGIND >= 2 && !map_is_lld && NF == 1 && /^ [^ \*]/ {
+ s = $0;
+ getline;
+ $0 = s " " $0;
+}
+
+# Beginning a new section - done with the previous one (if any).
+#
+ARGIND == 2 && /^[^ ]/ {
+ sect = 0;
+}
+
+# Process a loadable section (we only care about .-sections).
+#
+# Record the section name and its base address.
+# We also record the raw (non-stripped) address of the section because it can
+# be used to identify an anchor record.
+#
+# Note:
+# Since some AWK implementations cannot handle large integers, we strip off the
+# first 4 hex digits from the address. This is safe because the kernel space
+# is not large enough for addresses to extend into those digits. The portion
+# to strip off is stored in addr_prefix as a regexp, so further clauses can
+# perform a simple substitution to do the address stripping.
+#
+ARGIND == 2 && /^\./ {
+ # Explicitly ignore a few sections that are not relevant here.
+ if ($1 ~ /^\.orc_/ || $1 ~ /_sites$/ || $1 ~ /\.percpu/)
+ next;
+
+ # Sections with a 0-address can be ignored as well.
+ if ($2 ~ /^0x0+$/)
+ next;
+
+ raw_addr = $2;
+ addr_prefix = "^" substr($2, 1, 6);
+ base = $2;
+ sub(addr_prefix, "0x", base);
+ base = strtonum(base);
+ sect = $1;
+ anchor = 0;
+ sect_base[sect] = base;
+ sect_size[sect] = strtonum($3);
+
+ if (dbg)
+ printf "[%s] BASE %016x\n", sect, base >"/dev/stderr";
+
+ next;
+}
+
+# If we are not in a section we care about, we ignore the record.
+#
+ARGIND == 2 && !sect {
+ next;
+}
+
+# Record the first anchor symbol for the current section.
+#
+# An anchor record for the section bears the same raw address as the section
+# record.
+#
+ARGIND == 2 && !anchor && NF == 4 && raw_addr == $1 && $3 == "=" && $4 == "." {
+ anchor = sprintf("%s %08x-%08x = %s", sect, 0, 0, $2);
+ sect_anchor[sect] = anchor;
+
+ if (dbg)
+ printf "[%s] ANCHOR %016x = %s (.)\n", sect, 0, $2 >"/dev/stderr";
+
+ next;
+}
+
+# If no anchor record was found for the current section, use the first symbol
+# in the section as anchor.
+#
+ARGIND == 2 && !anchor && NF == 2 && $1 ~ /^0x/ && $2 !~ /^0x/ {
+ addr = $1;
+ sub(addr_prefix, "0x", addr);
+ addr = strtonum(addr) - base;
+ anchor = sprintf("%s %08x-%08x = %s", sect, addr, addr, $2);
+ sect_anchor[sect] = anchor;
+
+ if (dbg)
+ printf "[%s] ANCHOR %016x = %s\n", sect, addr, $2 >"/dev/stderr";
+
+ next;
+}
+
+# The first occurrence of a section name in an object record establishes the
+# addend (often 0) for that section. This information is needed to handle
+# sections that get combined in the final linking of vmlinux (e.g. .head.text
+# getting included at the start of .text).
+#
+# If the section does not have a base yet, use the base of the encapsulating
+# section.
+#
+ARGIND == 2 && sect && NF == 4 && /^ [^ \*]/ && !($1 in sect_addend) {
+ if (!($1 in sect_base)) {
+ sect_base[$1] = base;
+
+ if (dbg)
+ printf "[%s] BASE %016x\n", $1, base >"/dev/stderr";
+ }
+
+ addr = $2;
+ sub(addr_prefix, "0x", addr);
+ addr = strtonum(addr);
+ sect_addend[$1] = addr - sect_base[$1];
+ sect_in[$1] = sect;
+
+ if (dbg)
+ printf "[%s] ADDEND %016x - %016x = %016x\n", $1, addr, base, sect_addend[$1] >"/dev/stderr";
+
+ # If the object is vmlinux.o then we will need vmlinux.o.map to get the
+ # actual offsets of objects.
+ if ($4 == "vmlinux.o")
+ need_o_map = 1;
+}
+
+# (3) Collect offset ranges (relative to the section base address) for built-in
+# modules.
+#
+# If the final link was done using the actual objects, vmlinux.map contains all
+# the information we need (see section (3a)).
+# If linking was done using vmlinux.a as intermediary, we will need to process
+# vmlinux.o.map (see section (3b)).
+
+# (3a) Determine offset range info using vmlinux.map.
+#
+# Since we are already processing vmlinux.map, the top level section that is
+# being processed is already known. If we do not have a base address for it,
+# we do not need to process records for it.
+#
+# Given the object name, we determine the module(s) (if any) that the current
+# object is associated with.
+#
+# If we were already processing objects for a (list of) module(s):
+# - If the current object belongs to the same module(s), update the range data
+# to include the current object.
+# - Otherwise, ensure that the end offset of the range is valid.
+#
+# If the current object does not belong to a built-in module, ignore it.
+#
+# If it does, we add a new built-in module offset range record.
+#
+ARGIND == 2 && !need_o_map && /^ [^ ]/ && NF == 4 && $3 != "0x0" {
+ if (!(sect in sect_base))
+ next;
+
+ # Turn the address into an offset from the section base.
+ soff = $2;
+ sub(addr_prefix, "0x", soff);
+ soff = strtonum(soff) - sect_base[sect];
+ eoff = soff + strtonum($3);
+
+ # Determine which (if any) built-in modules the object belongs to.
+ mod = get_module_info($4);
+
+ # If we are processing a built-in module:
+ # - If the current object is within the same module, we update its
+ # entry by extending the range and move on
+ # - Otherwise:
+ # + If we are still processing within the same main section, we
+ # validate the end offset against the start offset of the
+ # current object (e.g. .rodata.str1.[18] objects are often
+ # listed with an incorrect size in the linker map)
+ # + Otherwise, we validate the end offset against the section
+ # size
+ if (mod_name) {
+ if (mod == mod_name) {
+ mod_eoff = eoff;
+ update_entry(mod_sect, mod_name, mod_soff, eoff);
+
+ next;
+ } else if (sect == sect_in[mod_sect]) {
+ if (mod_eoff > soff)
+ update_entry(mod_sect, mod_name, mod_soff, soff);
+ } else {
+ v = sect_size[sect_in[mod_sect]];
+ if (mod_eoff > v)
+ update_entry(mod_sect, mod_name, mod_soff, v);
+ }
+ }
+
+ mod_name = mod;
+
+ # If we encountered an object that is not part of a built-in module, we
+ # do not need to record any data.
+ if (!mod)
+ next;
+
+ # At this point, we encountered the start of a new built-in module.
+ mod_name = mod;
+ mod_soff = soff;
+ mod_eoff = eoff;
+ mod_sect = $1;
+ update_entry($1, mod, soff, mod_eoff);
+
+ next;
+}
+
+# If we do not need to parse the vmlinux.o.map file, we are done.
+#
+ARGIND == 3 && !need_o_map {
+ if (dbg)
+ printf "Note: %s is not needed.\n", FILENAME >"/dev/stderr";
+ exit;
+}
+
+# (3) Collect offset ranges (relative to the section base address) for built-in
+# modules.
+#
+
+# (LLD) Convert an object record from lld format to ld format.
+#
+ARGIND == 3 && map_is_lld && NF == 5 && $5 ~ /:\(/ {
+ gsub(/\)/, "");
+ sub(/:\(/, " ");
+
+ sect = $6;
+ if (!(sect in sect_addend))
+ next;
+
+ sub(/ vmlinux\.a\(/, " ");
+ $0 = " "sect " 0x"$1 " 0x"$3 " " $5;
+}
+
+# (3b) Determine offset range info using vmlinux.o.map.
+#
+# If we do not know an addend for the object's section, we are interested in
+# anything within that section.
+#
+# Determine the top-level section that the object's section was included in
+# during the final link. This is the section name offset range data will be
+# associated with for this object.
+#
+# The remainder of the processing of the current object record follows the
+# procedure outlined in (3a).
+#
+ARGIND == 3 && /^ [^ ]/ && NF == 4 && $3 != "0x0" {
+ osect = $1;
+ if (!(osect in sect_addend))
+ next;
+
+ # We need to work with the main section.
+ sect = sect_in[osect];
+
+ # Turn the address into an offset from the section base.
+ soff = $2;
+ sub(addr_prefix, "0x", soff);
+ soff = strtonum(soff) + sect_addend[osect];
+ eoff = soff + strtonum($3);
+
+ # Determine which (if any) built-in modules the object belongs to.
+ mod = get_module_info($4);
+
+ # If we are processing a built-in module:
+ # - If the current object is within the same module, we update its
+ # entry by extending the range and move on
+ # - Otherwise:
+ # + If we are still processing within the same main section, we
+ # validate the end offset against the start offset of the
+ # current object (e.g. .rodata.str1.[18] objects are often
+ # listed with an incorrect size in the linker map)
+ # + Otherwise, we validate the end offset against the section
+ # size
+ if (mod_name) {
+ if (mod == mod_name) {
+ mod_eoff = eoff;
+ update_entry(mod_sect, mod_name, mod_soff, eoff);
+
+ next;
+ } else if (sect == sect_in[mod_sect]) {
+ if (mod_eoff > soff)
+ update_entry(mod_sect, mod_name, mod_soff, soff);
+ } else {
+ v = sect_size[sect_in[mod_sect]];
+ if (mod_eoff > v)
+ update_entry(mod_sect, mod_name, mod_soff, v);
+ }
+ }
+
+ mod_name = mod;
+
+ # If we encountered an object that is not part of a built-in module, we
+ # do not need to record any data.
+ if (!mod)
+ next;
+
+ # At this point, we encountered the start of a new built-in module.
+ mod_name = mod;
+ mod_soff = soff;
+ mod_eoff = eoff;
+ mod_sect = osect;
+ update_entry(osect, mod, soff, mod_eoff);
+
+ next;
+}
+
+# (4) Generate the output.
+#
+# Anchor records are added for each section that contains offset range data
+# records. They are added at an adjusted section base address (base << 1) to
+# ensure they come first in the second records (see update_entry() above for
+# more information).
+#
+# All entries are sorted by (adjusted) address to ensure that the output can be
+# parsed in strict ascending address order.
+#
+END {
+ for (sect in count) {
+ if (sect in sect_anchor) {
+ idx = sprintf("%016x", sect_base[sect] * 2);
+ entries[idx] = sect_anchor[sect];
+ }
+ }
+
+ n = asorti(entries, indices);
+ for (i = 1; i <= n; i++)
+ print entries[indices[i]];
+}
diff --git a/scripts/generate_rust_target.rs b/scripts/generate_rust_target.rs
index 404edf7587e0..0d00ac3723b5 100644
--- a/scripts/generate_rust_target.rs
+++ b/scripts/generate_rust_target.rs
@@ -20,12 +20,28 @@ enum Value {
Boolean(bool),
Number(i32),
String(String),
+ Array(Vec<Value>),
Object(Object),
}
type Object = Vec<(String, Value)>;
-/// Minimal "almost JSON" generator (e.g. no `null`s, no arrays, no escaping),
+fn comma_sep<T>(
+ seq: &[T],
+ formatter: &mut Formatter<'_>,
+ f: impl Fn(&mut Formatter<'_>, &T) -> Result,
+) -> Result {
+ if let [ref rest @ .., ref last] = seq[..] {
+ for v in rest {
+ f(formatter, v)?;
+ formatter.write_str(",")?;
+ }
+ f(formatter, last)?;
+ }
+ Ok(())
+}
+
+/// Minimal "almost JSON" generator (e.g. no `null`s, no escaping),
/// enough for this purpose.
impl Display for Value {
fn fmt(&self, formatter: &mut Formatter<'_>) -> Result {
@@ -33,59 +49,67 @@ impl Display for Value {
Value::Boolean(boolean) => write!(formatter, "{}", boolean),
Value::Number(number) => write!(formatter, "{}", number),
Value::String(string) => write!(formatter, "\"{}\"", string),
+ Value::Array(values) => {
+ formatter.write_str("[")?;
+ comma_sep(&values[..], formatter, |formatter, v| v.fmt(formatter))?;
+ formatter.write_str("]")
+ }
Value::Object(object) => {
formatter.write_str("{")?;
- if let [ref rest @ .., ref last] = object[..] {
- for (key, value) in rest {
- write!(formatter, "\"{}\": {},", key, value)?;
- }
- write!(formatter, "\"{}\": {}", last.0, last.1)?;
- }
+ comma_sep(&object[..], formatter, |formatter, v| {
+ write!(formatter, "\"{}\": {}", v.0, v.1)
+ })?;
formatter.write_str("}")
}
}
}
}
-struct TargetSpec(Object);
-
-impl TargetSpec {
- fn new() -> TargetSpec {
- TargetSpec(Vec::new())
+impl From<bool> for Value {
+ fn from(value: bool) -> Self {
+ Self::Boolean(value)
}
}
-trait Push<T> {
- fn push(&mut self, key: &str, value: T);
+impl From<i32> for Value {
+ fn from(value: i32) -> Self {
+ Self::Number(value)
+ }
}
-impl Push<bool> for TargetSpec {
- fn push(&mut self, key: &str, value: bool) {
- self.0.push((key.to_string(), Value::Boolean(value)));
+impl From<String> for Value {
+ fn from(value: String) -> Self {
+ Self::String(value)
}
}
-impl Push<i32> for TargetSpec {
- fn push(&mut self, key: &str, value: i32) {
- self.0.push((key.to_string(), Value::Number(value)));
+impl From<&str> for Value {
+ fn from(value: &str) -> Self {
+ Self::String(value.to_string())
}
}
-impl Push<String> for TargetSpec {
- fn push(&mut self, key: &str, value: String) {
- self.0.push((key.to_string(), Value::String(value)));
+impl From<Object> for Value {
+ fn from(object: Object) -> Self {
+ Self::Object(object)
}
}
-impl Push<&str> for TargetSpec {
- fn push(&mut self, key: &str, value: &str) {
- self.push(key, value.to_string());
+impl<T: Into<Value>, const N: usize> From<[T; N]> for Value {
+ fn from(i: [T; N]) -> Self {
+ Self::Array(i.into_iter().map(|v| v.into()).collect())
}
}
-impl Push<Object> for TargetSpec {
- fn push(&mut self, key: &str, value: Object) {
- self.0.push((key.to_string(), Value::Object(value)));
+struct TargetSpec(Object);
+
+impl TargetSpec {
+ fn new() -> TargetSpec {
+ TargetSpec(Vec::new())
+ }
+
+ fn push(&mut self, key: &str, value: impl Into<Value>) {
+ self.0.push((key.to_string(), value.into()));
}
}
@@ -164,10 +188,26 @@ fn main() {
);
let mut features = "-mmx,+soft-float".to_string();
if cfg.has("MITIGATION_RETPOLINE") {
+ // The kernel uses `-mretpoline-external-thunk` (for Clang), which Clang maps to the
+ // target feature of the same name plus the other two target features in
+ // `clang/lib/Driver/ToolChains/Arch/X86.cpp`. These should be eventually enabled via
+ // `-Ctarget-feature` when `rustc` starts recognizing them (or via a new dedicated
+ // flag); see https://github.com/rust-lang/rust/issues/116852.
features += ",+retpoline-external-thunk";
+ features += ",+retpoline-indirect-branches";
+ features += ",+retpoline-indirect-calls";
+ }
+ if cfg.has("MITIGATION_SLS") {
+ // The kernel uses `-mharden-sls=all`, which Clang maps to both these target features in
+ // `clang/lib/Driver/ToolChains/Arch/X86.cpp`. These should be eventually enabled via
+ // `-Ctarget-feature` when `rustc` starts recognizing them (or via a new dedicated
+ // flag); see https://github.com/rust-lang/rust/issues/116851.
+ features += ",+harden-sls-ijmp";
+ features += ",+harden-sls-ret";
}
ts.push("features", features);
ts.push("llvm-target", "x86_64-linux-gnu");
+ ts.push("supported-sanitizers", ["kcfi", "kernel-address"]);
ts.push("target-pointer-width", "64");
} else if cfg.has("X86_32") {
// This only works on UML, as i386 otherwise needs regparm support in rustc
diff --git a/scripts/include/hash.h b/scripts/include/hash.h
new file mode 100644
index 000000000000..efa904368a62
--- /dev/null
+++ b/scripts/include/hash.h
@@ -0,0 +1,28 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+#ifndef HASH_H
+#define HASH_H
+
+static inline unsigned int hash_str(const char *s)
+{
+ /* fnv32 hash */
+ unsigned int hash = 2166136261U;
+
+ for (; *s; s++)
+ hash = (hash ^ *s) * 0x01000193;
+ return hash;
+}
+
+/* simplified version of functions from include/linux/hash.h */
+#define GOLDEN_RATIO_32 0x61C88647
+
+static inline unsigned int hash_32(unsigned int val)
+{
+ return 0x61C88647 * val;
+}
+
+static inline unsigned int hash_ptr(const void *ptr)
+{
+ return hash_32((unsigned int)(unsigned long)ptr);
+}
+
+#endif /* HASH_H */
diff --git a/scripts/include/hashtable.h b/scripts/include/hashtable.h
index a0a2c8f5f639..45abcb12bfce 100644
--- a/scripts/include/hashtable.h
+++ b/scripts/include/hashtable.h
@@ -15,6 +15,23 @@
#define hash_head(table, key) (&(table)[(key) % HASH_SIZE(table)])
+static inline void __hash_init(struct hlist_head *ht, unsigned int sz)
+{
+ unsigned int i;
+
+ for (i = 0; i < sz; i++)
+ INIT_HLIST_HEAD(&ht[i]);
+}
+
+/**
+ * hash_init - initialize a hash table
+ * @table: hashtable to be initialized
+ *
+ * This has to be a macro since HASH_SIZE() will not work on pointers since
+ * it calculates the size during preprocessing.
+ */
+#define hash_init(table) __hash_init(table, HASH_SIZE(table))
+
/**
* hash_add - add an object to a hashtable
* @table: hashtable to add to
@@ -25,6 +42,15 @@
hlist_add_head(node, hash_head(table, key))
/**
+ * hash_del - remove an object from a hashtable
+ * @node: &struct hlist_node of the object to remove
+ */
+static inline void hash_del(struct hlist_node *node)
+{
+ hlist_del_init(node);
+}
+
+/**
* hash_for_each - iterate over a hashtable
* @table: hashtable to iterate
* @obj: the type * to use as a loop cursor for each entry
@@ -35,6 +61,18 @@
hlist_for_each_entry(obj, &table[_bkt], member)
/**
+ * hash_for_each_safe - iterate over a hashtable safe against removal of
+ * hash entry
+ * @table: hashtable to iterate
+ * @obj: the type * to use as a loop cursor for each entry
+ * @tmp: a &struct hlist_node used for temporary storage
+ * @member: the name of the hlist_node within the struct
+ */
+#define hash_for_each_safe(table, obj, tmp, member) \
+ for (int _bkt = 0; _bkt < HASH_SIZE(table); _bkt++) \
+ hlist_for_each_entry_safe(obj, tmp, &table[_bkt], member)
+
+/**
* hash_for_each_possible - iterate over all possible objects hashing to the
* same bucket
* @table: hashtable to iterate
@@ -45,4 +83,16 @@
#define hash_for_each_possible(table, obj, member, key) \
hlist_for_each_entry(obj, hash_head(table, key), member)
+/**
+ * hash_for_each_possible_safe - iterate over all possible objects hashing to the
+ * same bucket safe against removals
+ * @table: hashtable to iterate
+ * @obj: the type * to use as a loop cursor for each entry
+ * @tmp: a &struct hlist_node used for temporary storage
+ * @member: the name of the hlist_node within the struct
+ * @key: the key of the objects to iterate over
+ */
+#define hash_for_each_possible_safe(table, obj, tmp, member, key) \
+ hlist_for_each_entry_safe(obj, tmp, hash_head(table, key), member)
+
#endif /* HASHTABLE_H */
diff --git a/scripts/include/list.h b/scripts/include/list.h
index 409201cd495b..fea1e2b79063 100644
--- a/scripts/include/list.h
+++ b/scripts/include/list.h
@@ -268,6 +268,63 @@ static inline int list_empty(const struct list_head *head)
*/
#define HLIST_HEAD_INIT { .first = NULL }
+#define INIT_HLIST_HEAD(ptr) ((ptr)->first = NULL)
+static inline void INIT_HLIST_NODE(struct hlist_node *h)
+{
+ h->next = NULL;
+ h->pprev = NULL;
+}
+
+/**
+ * hlist_unhashed - Has node been removed from list and reinitialized?
+ * @h: Node to be checked
+ *
+ * Not that not all removal functions will leave a node in unhashed
+ * state. For example, hlist_nulls_del_init_rcu() does leave the
+ * node in unhashed state, but hlist_nulls_del() does not.
+ */
+static inline int hlist_unhashed(const struct hlist_node *h)
+{
+ return !h->pprev;
+}
+
+static inline void __hlist_del(struct hlist_node *n)
+{
+ struct hlist_node *next = n->next;
+ struct hlist_node **pprev = n->pprev;
+
+ *pprev = next;
+ if (next)
+ next->pprev = pprev;
+}
+
+/**
+ * hlist_del - Delete the specified hlist_node from its list
+ * @n: Node to delete.
+ *
+ * Note that this function leaves the node in hashed state. Use
+ * hlist_del_init() or similar instead to unhash @n.
+ */
+static inline void hlist_del(struct hlist_node *n)
+{
+ __hlist_del(n);
+ n->next = LIST_POISON1;
+ n->pprev = LIST_POISON2;
+}
+
+/**
+ * hlist_del_init - Delete the specified hlist_node from its list and initialize
+ * @n: Node to delete.
+ *
+ * Note that this function leaves the node in unhashed state.
+ */
+static inline void hlist_del_init(struct hlist_node *n)
+{
+ if (!hlist_unhashed(n)) {
+ __hlist_del(n);
+ INIT_HLIST_NODE(n);
+ }
+}
/**
* hlist_add_head - add a new entry at the beginning of the hlist
@@ -306,4 +363,16 @@ static inline void hlist_add_head(struct hlist_node *n, struct hlist_head *h)
pos; \
pos = hlist_entry_safe((pos)->member.next, typeof(*(pos)), member))
+/**
+ * hlist_for_each_entry_safe - iterate over list of given type safe against removal of list entry
+ * @pos: the type * to use as a loop cursor.
+ * @n: a &struct hlist_node to use as temporary storage
+ * @head: the head for your list.
+ * @member: the name of the hlist_node within the struct.
+ */
+#define hlist_for_each_entry_safe(pos, n, head, member) \
+ for (pos = hlist_entry_safe((head)->first, typeof(*pos), member);\
+ pos && ({ n = pos->member.next; 1; }); \
+ pos = hlist_entry_safe(n, typeof(*pos), member))
+
#endif /* LIST_H */
diff --git a/scripts/include/xalloc.h b/scripts/include/xalloc.h
new file mode 100644
index 000000000000..cdadb07d0592
--- /dev/null
+++ b/scripts/include/xalloc.h
@@ -0,0 +1,53 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+
+#ifndef XALLOC_H
+#define XALLOC_H
+
+#include <stdlib.h>
+#include <string.h>
+
+static inline void *xmalloc(size_t size)
+{
+ void *p = malloc(size);
+
+ if (!p)
+ exit(1);
+ return p;
+}
+
+static inline void *xcalloc(size_t nmemb, size_t size)
+{
+ void *p = calloc(nmemb, size);
+
+ if (!p)
+ exit(1);
+ return p;
+}
+
+static inline void *xrealloc(void *p, size_t size)
+{
+ p = realloc(p, size);
+ if (!p)
+ exit(1);
+ return p;
+}
+
+static inline char *xstrdup(const char *s)
+{
+ char *p = strdup(s);
+
+ if (!p)
+ exit(1);
+ return p;
+}
+
+static inline char *xstrndup(const char *s, size_t n)
+{
+ char *p = strndup(s, n);
+
+ if (!p)
+ exit(1);
+ return p;
+}
+
+#endif /* XALLOC_H */
diff --git a/scripts/kallsyms.c b/scripts/kallsyms.c
index 123dab0572f8..03852da3d249 100644
--- a/scripts/kallsyms.c
+++ b/scripts/kallsyms.c
@@ -27,6 +27,8 @@
#include <ctype.h>
#include <limits.h>
+#include <xalloc.h>
+
#define ARRAY_SIZE(arr) (sizeof(arr) / sizeof(arr[0]))
#define KSYM_NAME_LEN 512
@@ -168,12 +170,7 @@ static struct sym_entry *read_symbol(FILE *in, char **buf, size_t *buf_len)
* compressed together */
len++;
- sym = malloc(sizeof(*sym) + len + 1);
- if (!sym) {
- fprintf(stderr, "kallsyms failure: "
- "unable to allocate required amount of memory\n");
- exit(EXIT_FAILURE);
- }
+ sym = xmalloc(sizeof(*sym) + len + 1);
sym->addr = addr;
sym->len = len;
sym->sym[0] = type;
@@ -278,12 +275,7 @@ static void read_map(const char *in)
if (table_cnt >= table_size) {
table_size += 10000;
- table = realloc(table, sizeof(*table) * table_size);
- if (!table) {
- fprintf(stderr, "out of memory\n");
- fclose(fp);
- exit (1);
- }
+ table = xrealloc(table, sizeof(*table) * table_size);
}
table[table_cnt++] = sym;
@@ -300,15 +292,6 @@ static void output_label(const char *label)
printf("%s:\n", label);
}
-/* Provide proper symbols relocatability by their '_text' relativeness. */
-static void output_address(unsigned long long addr)
-{
- if (_text <= addr)
- printf("\tPTR\t_text + %#llx\n", addr - _text);
- else
- printf("\tPTR\t_text - %#llx\n", _text - addr);
-}
-
/* uncompress a compressed symbol. When this function is called, the best table
* might still be compressed itself, so the function needs to be recursive */
static int expand_symbol(const unsigned char *data, int len, char *result)
@@ -391,12 +374,7 @@ static void write_src(void)
/* table of offset markers, that give the offset in the compressed stream
* every 256 symbols */
markers_cnt = (table_cnt + 255) / 256;
- markers = malloc(sizeof(*markers) * markers_cnt);
- if (!markers) {
- fprintf(stderr, "kallsyms failure: "
- "unable to allocate required memory\n");
- exit(EXIT_FAILURE);
- }
+ markers = xmalloc(sizeof(*markers) * markers_cnt);
output_label("kallsyms_names");
off = 0;
@@ -477,17 +455,17 @@ static void write_src(void)
*/
long long offset;
- int overflow;
+ bool overflow;
if (!absolute_percpu) {
offset = table[i]->addr - relative_base;
- overflow = (offset < 0 || offset > UINT_MAX);
+ overflow = offset < 0 || offset > UINT_MAX;
} else if (symbol_absolute(table[i])) {
offset = table[i]->addr;
- overflow = (offset < 0 || offset > INT_MAX);
+ overflow = offset < 0 || offset > INT_MAX;
} else {
offset = relative_base - table[i]->addr - 1;
- overflow = (offset < INT_MIN || offset >= 0);
+ overflow = offset < INT_MIN || offset >= 0;
}
if (overflow) {
fprintf(stderr, "kallsyms failure: "
@@ -501,7 +479,11 @@ static void write_src(void)
printf("\n");
output_label("kallsyms_relative_base");
- output_address(relative_base);
+ /* Provide proper symbols relocatability by their '_text' relativeness. */
+ if (_text <= relative_base)
+ printf("\tPTR\t_text + %#llx\n", relative_base - _text);
+ else
+ printf("\tPTR\t_text - %#llx\n", _text - relative_base);
printf("\n");
sort_symbols_by_name();
diff --git a/scripts/kconfig/confdata.c b/scripts/kconfig/confdata.c
index 76193ce5a792..4286d5e7f95d 100644
--- a/scripts/kconfig/confdata.c
+++ b/scripts/kconfig/confdata.c
@@ -18,6 +18,7 @@
#include <time.h>
#include <unistd.h>
+#include <xalloc.h>
#include "internal.h"
#include "lkc.h"
@@ -395,6 +396,8 @@ load:
}
}
+ expr_invalidate_all();
+
while (getline_stripped(&line, &line_asize, in) != -1) {
struct menu *choice;
diff --git a/scripts/kconfig/expr.c b/scripts/kconfig/expr.c
index c349da7fe3f8..78738ef412de 100644
--- a/scripts/kconfig/expr.c
+++ b/scripts/kconfig/expr.c
@@ -9,44 +9,68 @@
#include <stdlib.h>
#include <string.h>
+#include <hash.h>
+#include <xalloc.h>
+#include "internal.h"
#include "lkc.h"
#define DEBUG_EXPR 0
+HASHTABLE_DEFINE(expr_hashtable, EXPR_HASHSIZE);
+
static struct expr *expr_eliminate_yn(struct expr *e);
-struct expr *expr_alloc_symbol(struct symbol *sym)
+/**
+ * expr_lookup - return the expression with the given type and sub-nodes
+ * This looks up an expression with the specified type and sub-nodes. If such
+ * an expression is found in the hash table, it is returned. Otherwise, a new
+ * expression node is allocated and added to the hash table.
+ * @type: expression type
+ * @l: left node
+ * @r: right node
+ * return: expression
+ */
+static struct expr *expr_lookup(enum expr_type type, void *l, void *r)
{
- struct expr *e = xcalloc(1, sizeof(*e));
- e->type = E_SYMBOL;
- e->left.sym = sym;
+ struct expr *e;
+ int hash;
+
+ hash = hash_32((unsigned int)type ^ hash_ptr(l) ^ hash_ptr(r));
+
+ hash_for_each_possible(expr_hashtable, e, node, hash) {
+ if (e->type == type && e->left._initdata == l &&
+ e->right._initdata == r)
+ return e;
+ }
+
+ e = xmalloc(sizeof(*e));
+ e->type = type;
+ e->left._initdata = l;
+ e->right._initdata = r;
+
+ hash_add(expr_hashtable, &e->node, hash);
+
return e;
}
+struct expr *expr_alloc_symbol(struct symbol *sym)
+{
+ return expr_lookup(E_SYMBOL, sym, NULL);
+}
+
struct expr *expr_alloc_one(enum expr_type type, struct expr *ce)
{
- struct expr *e = xcalloc(1, sizeof(*e));
- e->type = type;
- e->left.expr = ce;
- return e;
+ return expr_lookup(type, ce, NULL);
}
struct expr *expr_alloc_two(enum expr_type type, struct expr *e1, struct expr *e2)
{
- struct expr *e = xcalloc(1, sizeof(*e));
- e->type = type;
- e->left.expr = e1;
- e->right.expr = e2;
- return e;
+ return expr_lookup(type, e1, e2);
}
struct expr *expr_alloc_comp(enum expr_type type, struct symbol *s1, struct symbol *s2)
{
- struct expr *e = xcalloc(1, sizeof(*e));
- e->type = type;
- e->left.sym = s1;
- e->right.sym = s2;
- return e;
+ return expr_lookup(type, s1, s2);
}
struct expr *expr_alloc_and(struct expr *e1, struct expr *e2)
@@ -63,76 +87,6 @@ struct expr *expr_alloc_or(struct expr *e1, struct expr *e2)
return e2 ? expr_alloc_two(E_OR, e1, e2) : e1;
}
-struct expr *expr_copy(const struct expr *org)
-{
- struct expr *e;
-
- if (!org)
- return NULL;
-
- e = xmalloc(sizeof(*org));
- memcpy(e, org, sizeof(*org));
- switch (org->type) {
- case E_SYMBOL:
- e->left = org->left;
- break;
- case E_NOT:
- e->left.expr = expr_copy(org->left.expr);
- break;
- case E_EQUAL:
- case E_GEQ:
- case E_GTH:
- case E_LEQ:
- case E_LTH:
- case E_UNEQUAL:
- e->left.sym = org->left.sym;
- e->right.sym = org->right.sym;
- break;
- case E_AND:
- case E_OR:
- e->left.expr = expr_copy(org->left.expr);
- e->right.expr = expr_copy(org->right.expr);
- break;
- default:
- fprintf(stderr, "can't copy type %d\n", e->type);
- free(e);
- e = NULL;
- break;
- }
-
- return e;
-}
-
-void expr_free(struct expr *e)
-{
- if (!e)
- return;
-
- switch (e->type) {
- case E_SYMBOL:
- break;
- case E_NOT:
- expr_free(e->left.expr);
- break;
- case E_EQUAL:
- case E_GEQ:
- case E_GTH:
- case E_LEQ:
- case E_LTH:
- case E_UNEQUAL:
- break;
- case E_OR:
- case E_AND:
- expr_free(e->left.expr);
- expr_free(e->right.expr);
- break;
- default:
- fprintf(stderr, "how to free type %d?\n", e->type);
- break;
- }
- free(e);
-}
-
static int trans_count;
/*
@@ -145,16 +99,24 @@ static int trans_count;
*/
static void __expr_eliminate_eq(enum expr_type type, struct expr **ep1, struct expr **ep2)
{
+ struct expr *l, *r;
+
/* Recurse down to leaves */
if ((*ep1)->type == type) {
- __expr_eliminate_eq(type, &(*ep1)->left.expr, ep2);
- __expr_eliminate_eq(type, &(*ep1)->right.expr, ep2);
+ l = (*ep1)->left.expr;
+ r = (*ep1)->right.expr;
+ __expr_eliminate_eq(type, &l, ep2);
+ __expr_eliminate_eq(type, &r, ep2);
+ *ep1 = expr_alloc_two(type, l, r);
return;
}
if ((*ep2)->type == type) {
- __expr_eliminate_eq(type, ep1, &(*ep2)->left.expr);
- __expr_eliminate_eq(type, ep1, &(*ep2)->right.expr);
+ l = (*ep2)->left.expr;
+ r = (*ep2)->right.expr;
+ __expr_eliminate_eq(type, ep1, &l);
+ __expr_eliminate_eq(type, ep1, &r);
+ *ep2 = expr_alloc_two(type, l, r);
return;
}
@@ -170,7 +132,6 @@ static void __expr_eliminate_eq(enum expr_type type, struct expr **ep1, struct e
/* *ep1 and *ep2 are equal leaves. Prepare them for elimination. */
trans_count++;
- expr_free(*ep1); expr_free(*ep2);
switch (type) {
case E_OR:
*ep1 = expr_alloc_symbol(&symbol_no);
@@ -242,9 +203,10 @@ void expr_eliminate_eq(struct expr **ep1, struct expr **ep2)
* equals some operand in the other (operands do not need to appear in the same
* order), recursively.
*/
-int expr_eq(struct expr *e1, struct expr *e2)
+bool expr_eq(struct expr *e1, struct expr *e2)
{
- int res, old_count;
+ int old_count;
+ bool res;
/*
* A NULL expr is taken to be yes, but there's also a different way to
@@ -254,7 +216,7 @@ int expr_eq(struct expr *e1, struct expr *e2)
return expr_is_yes(e1) && expr_is_yes(e2);
if (e1->type != e2->type)
- return 0;
+ return false;
switch (e1->type) {
case E_EQUAL:
case E_GEQ:
@@ -269,14 +231,10 @@ int expr_eq(struct expr *e1, struct expr *e2)
return expr_eq(e1->left.expr, e2->left.expr);
case E_AND:
case E_OR:
- e1 = expr_copy(e1);
- e2 = expr_copy(e2);
old_count = trans_count;
expr_eliminate_eq(&e1, &e2);
res = (e1->type == E_SYMBOL && e2->type == E_SYMBOL &&
e1->left.sym == e2->left.sym);
- expr_free(e1);
- expr_free(e2);
trans_count = old_count;
return res;
case E_RANGE:
@@ -291,11 +249,11 @@ int expr_eq(struct expr *e1, struct expr *e2)
printf(" ?\n");
}
- return 0;
+ return false;
}
/*
- * Recursively performs the following simplifications in-place (as well as the
+ * Recursively performs the following simplifications (as well as the
* corresponding simplifications with swapped operands):
*
* expr && n -> n
@@ -307,79 +265,39 @@ int expr_eq(struct expr *e1, struct expr *e2)
*/
static struct expr *expr_eliminate_yn(struct expr *e)
{
- struct expr *tmp;
+ struct expr *l, *r;
if (e) switch (e->type) {
case E_AND:
- e->left.expr = expr_eliminate_yn(e->left.expr);
- e->right.expr = expr_eliminate_yn(e->right.expr);
- if (e->left.expr->type == E_SYMBOL) {
- if (e->left.expr->left.sym == &symbol_no) {
- expr_free(e->left.expr);
- expr_free(e->right.expr);
- e->type = E_SYMBOL;
- e->left.sym = &symbol_no;
- e->right.expr = NULL;
- return e;
- } else if (e->left.expr->left.sym == &symbol_yes) {
- free(e->left.expr);
- tmp = e->right.expr;
- *e = *(e->right.expr);
- free(tmp);
- return e;
- }
+ l = expr_eliminate_yn(e->left.expr);
+ r = expr_eliminate_yn(e->right.expr);
+ if (l->type == E_SYMBOL) {
+ if (l->left.sym == &symbol_no)
+ return l;
+ else if (l->left.sym == &symbol_yes)
+ return r;
}
- if (e->right.expr->type == E_SYMBOL) {
- if (e->right.expr->left.sym == &symbol_no) {
- expr_free(e->left.expr);
- expr_free(e->right.expr);
- e->type = E_SYMBOL;
- e->left.sym = &symbol_no;
- e->right.expr = NULL;
- return e;
- } else if (e->right.expr->left.sym == &symbol_yes) {
- free(e->right.expr);
- tmp = e->left.expr;
- *e = *(e->left.expr);
- free(tmp);
- return e;
- }
+ if (r->type == E_SYMBOL) {
+ if (r->left.sym == &symbol_no)
+ return r;
+ else if (r->left.sym == &symbol_yes)
+ return l;
}
break;
case E_OR:
- e->left.expr = expr_eliminate_yn(e->left.expr);
- e->right.expr = expr_eliminate_yn(e->right.expr);
- if (e->left.expr->type == E_SYMBOL) {
- if (e->left.expr->left.sym == &symbol_no) {
- free(e->left.expr);
- tmp = e->right.expr;
- *e = *(e->right.expr);
- free(tmp);
- return e;
- } else if (e->left.expr->left.sym == &symbol_yes) {
- expr_free(e->left.expr);
- expr_free(e->right.expr);
- e->type = E_SYMBOL;
- e->left.sym = &symbol_yes;
- e->right.expr = NULL;
- return e;
- }
+ l = expr_eliminate_yn(e->left.expr);
+ r = expr_eliminate_yn(e->right.expr);
+ if (l->type == E_SYMBOL) {
+ if (l->left.sym == &symbol_no)
+ return r;
+ else if (l->left.sym == &symbol_yes)
+ return l;
}
- if (e->right.expr->type == E_SYMBOL) {
- if (e->right.expr->left.sym == &symbol_no) {
- free(e->right.expr);
- tmp = e->left.expr;
- *e = *(e->left.expr);
- free(tmp);
- return e;
- } else if (e->right.expr->left.sym == &symbol_yes) {
- expr_free(e->left.expr);
- expr_free(e->right.expr);
- e->type = E_SYMBOL;
- e->left.sym = &symbol_yes;
- e->right.expr = NULL;
- return e;
- }
+ if (r->type == E_SYMBOL) {
+ if (r->left.sym == &symbol_no)
+ return l;
+ else if (r->left.sym == &symbol_yes)
+ return r;
}
break;
default:
@@ -397,7 +315,7 @@ static struct expr *expr_join_or(struct expr *e1, struct expr *e2)
struct symbol *sym1, *sym2;
if (expr_eq(e1, e2))
- return expr_copy(e1);
+ return e1;
if (e1->type != E_EQUAL && e1->type != E_UNEQUAL && e1->type != E_SYMBOL && e1->type != E_NOT)
return NULL;
if (e2->type != E_EQUAL && e2->type != E_UNEQUAL && e2->type != E_SYMBOL && e2->type != E_NOT)
@@ -440,6 +358,7 @@ static struct expr *expr_join_or(struct expr *e1, struct expr *e2)
}
}
if (sym1->type == S_BOOLEAN) {
+ // a || !a -> y
if ((e1->type == E_NOT && e1->left.expr->type == E_SYMBOL && e2->type == E_SYMBOL) ||
(e2->type == E_NOT && e2->left.expr->type == E_SYMBOL && e1->type == E_SYMBOL))
return expr_alloc_symbol(&symbol_yes);
@@ -461,7 +380,7 @@ static struct expr *expr_join_and(struct expr *e1, struct expr *e2)
struct symbol *sym1, *sym2;
if (expr_eq(e1, e2))
- return expr_copy(e1);
+ return e1;
if (e1->type != E_EQUAL && e1->type != E_UNEQUAL && e1->type != E_SYMBOL && e1->type != E_NOT)
return NULL;
if (e2->type != E_EQUAL && e2->type != E_UNEQUAL && e2->type != E_SYMBOL && e2->type != E_NOT)
@@ -558,38 +477,33 @@ static struct expr *expr_join_and(struct expr *e1, struct expr *e2)
*/
static void expr_eliminate_dups1(enum expr_type type, struct expr **ep1, struct expr **ep2)
{
- struct expr *tmp;
+ struct expr *tmp, *l, *r;
/* Recurse down to leaves */
if ((*ep1)->type == type) {
- expr_eliminate_dups1(type, &(*ep1)->left.expr, ep2);
- expr_eliminate_dups1(type, &(*ep1)->right.expr, ep2);
+ l = (*ep1)->left.expr;
+ r = (*ep1)->right.expr;
+ expr_eliminate_dups1(type, &l, ep2);
+ expr_eliminate_dups1(type, &r, ep2);
+ *ep1 = expr_alloc_two(type, l, r);
return;
}
if ((*ep2)->type == type) {
- expr_eliminate_dups1(type, ep1, &(*ep2)->left.expr);
- expr_eliminate_dups1(type, ep1, &(*ep2)->right.expr);
+ l = (*ep2)->left.expr;
+ r = (*ep2)->right.expr;
+ expr_eliminate_dups1(type, ep1, &l);
+ expr_eliminate_dups1(type, ep1, &r);
+ *ep2 = expr_alloc_two(type, l, r);
return;
}
/* *ep1 and *ep2 are leaves. Compare and process them. */
- if (*ep1 == *ep2)
- return;
-
- switch ((*ep1)->type) {
- case E_OR: case E_AND:
- expr_eliminate_dups1((*ep1)->type, ep1, ep1);
- default:
- ;
- }
-
switch (type) {
case E_OR:
tmp = expr_join_or(*ep1, *ep2);
if (tmp) {
- expr_free(*ep1); expr_free(*ep2);
*ep1 = expr_alloc_symbol(&symbol_no);
*ep2 = tmp;
trans_count++;
@@ -598,7 +512,6 @@ static void expr_eliminate_dups1(enum expr_type type, struct expr **ep1, struct
case E_AND:
tmp = expr_join_and(*ep1, *ep2);
if (tmp) {
- expr_free(*ep1); expr_free(*ep2);
*ep1 = expr_alloc_symbol(&symbol_yes);
*ep2 = tmp;
trans_count++;
@@ -628,10 +541,15 @@ struct expr *expr_eliminate_dups(struct expr *e)
oldcount = trans_count;
do {
+ struct expr *l, *r;
+
trans_count = 0;
switch (e->type) {
case E_OR: case E_AND:
- expr_eliminate_dups1(e->type, &e, &e);
+ l = expr_eliminate_dups(e->left.expr);
+ r = expr_eliminate_dups(e->right.expr);
+ expr_eliminate_dups1(e->type, &l, &r);
+ e = expr_alloc_two(e->type, l, r);
default:
;
}
@@ -645,12 +563,34 @@ struct expr *expr_eliminate_dups(struct expr *e)
* Performs various simplifications involving logical operators and
* comparisons.
*
+ * For bool type:
+ * A=n -> !A
+ * A=m -> n
+ * A=y -> A
+ * A!=n -> A
+ * A!=m -> y
+ * A!=y -> !A
+ *
+ * For any type:
+ * !!A -> A
+ * !(A=B) -> A!=B
+ * !(A!=B) -> A=B
+ * !(A<=B) -> A>B
+ * !(A>=B) -> A<B
+ * !(A<B) -> A>=B
+ * !(A>B) -> A<=B
+ * !(A || B) -> !A && !B
+ * !(A && B) -> !A || !B
+ *
+ * For constant:
+ * !y -> n
+ * !m -> m
+ * !n -> y
+ *
* Allocates and returns a new expression.
*/
struct expr *expr_transform(struct expr *e)
{
- struct expr *tmp;
-
if (!e)
return NULL;
switch (e->type) {
@@ -663,8 +603,9 @@ struct expr *expr_transform(struct expr *e)
case E_SYMBOL:
break;
default:
- e->left.expr = expr_transform(e->left.expr);
- e->right.expr = expr_transform(e->right.expr);
+ e = expr_alloc_two(e->type,
+ expr_transform(e->left.expr),
+ expr_transform(e->right.expr));
}
switch (e->type) {
@@ -672,21 +613,19 @@ struct expr *expr_transform(struct expr *e)
if (e->left.sym->type != S_BOOLEAN)
break;
if (e->right.sym == &symbol_no) {
- e->type = E_NOT;
- e->left.expr = expr_alloc_symbol(e->left.sym);
- e->right.sym = NULL;
+ // A=n -> !A
+ e = expr_alloc_one(E_NOT, expr_alloc_symbol(e->left.sym));
break;
}
if (e->right.sym == &symbol_mod) {
+ // A=m -> n
printf("boolean symbol %s tested for 'm'? test forced to 'n'\n", e->left.sym->name);
- e->type = E_SYMBOL;
- e->left.sym = &symbol_no;
- e->right.sym = NULL;
+ e = expr_alloc_symbol(&symbol_no);
break;
}
if (e->right.sym == &symbol_yes) {
- e->type = E_SYMBOL;
- e->right.sym = NULL;
+ // A=y -> A
+ e = expr_alloc_symbol(e->left.sym);
break;
}
break;
@@ -694,104 +633,71 @@ struct expr *expr_transform(struct expr *e)
if (e->left.sym->type != S_BOOLEAN)
break;
if (e->right.sym == &symbol_no) {
- e->type = E_SYMBOL;
- e->right.sym = NULL;
+ // A!=n -> A
+ e = expr_alloc_symbol(e->left.sym);
break;
}
if (e->right.sym == &symbol_mod) {
+ // A!=m -> y
printf("boolean symbol %s tested for 'm'? test forced to 'y'\n", e->left.sym->name);
- e->type = E_SYMBOL;
- e->left.sym = &symbol_yes;
- e->right.sym = NULL;
+ e = expr_alloc_symbol(&symbol_yes);
break;
}
if (e->right.sym == &symbol_yes) {
- e->type = E_NOT;
- e->left.expr = expr_alloc_symbol(e->left.sym);
- e->right.sym = NULL;
+ // A!=y -> !A
+ e = expr_alloc_one(E_NOT, e->left.expr);
break;
}
break;
case E_NOT:
switch (e->left.expr->type) {
case E_NOT:
- // !!a -> a
- tmp = e->left.expr->left.expr;
- free(e->left.expr);
- free(e);
- e = tmp;
- e = expr_transform(e);
+ // !!A -> A
+ e = e->left.expr->left.expr;
break;
case E_EQUAL:
case E_UNEQUAL:
- // !a='x' -> a!='x'
- tmp = e->left.expr;
- free(e);
- e = tmp;
- e->type = e->type == E_EQUAL ? E_UNEQUAL : E_EQUAL;
+ // !(A=B) -> A!=B
+ e = expr_alloc_comp(e->left.expr->type == E_EQUAL ? E_UNEQUAL : E_EQUAL,
+ e->left.expr->left.sym,
+ e->left.expr->right.sym);
break;
case E_LEQ:
case E_GEQ:
- // !a<='x' -> a>'x'
- tmp = e->left.expr;
- free(e);
- e = tmp;
- e->type = e->type == E_LEQ ? E_GTH : E_LTH;
+ // !(A<=B) -> A>B
+ e = expr_alloc_comp(e->left.expr->type == E_LEQ ? E_GTH : E_LTH,
+ e->left.expr->left.sym,
+ e->left.expr->right.sym);
break;
case E_LTH:
case E_GTH:
- // !a<'x' -> a>='x'
- tmp = e->left.expr;
- free(e);
- e = tmp;
- e->type = e->type == E_LTH ? E_GEQ : E_LEQ;
+ // !(A<B) -> A>=B
+ e = expr_alloc_comp(e->left.expr->type == E_LTH ? E_GEQ : E_LEQ,
+ e->left.expr->left.sym,
+ e->left.expr->right.sym);
break;
case E_OR:
- // !(a || b) -> !a && !b
- tmp = e->left.expr;
- e->type = E_AND;
- e->right.expr = expr_alloc_one(E_NOT, tmp->right.expr);
- tmp->type = E_NOT;
- tmp->right.expr = NULL;
+ // !(A || B) -> !A && !B
+ e = expr_alloc_and(expr_alloc_one(E_NOT, e->left.expr->left.expr),
+ expr_alloc_one(E_NOT, e->left.expr->right.expr));
e = expr_transform(e);
break;
case E_AND:
- // !(a && b) -> !a || !b
- tmp = e->left.expr;
- e->type = E_OR;
- e->right.expr = expr_alloc_one(E_NOT, tmp->right.expr);
- tmp->type = E_NOT;
- tmp->right.expr = NULL;
+ // !(A && B) -> !A || !B
+ e = expr_alloc_or(expr_alloc_one(E_NOT, e->left.expr->left.expr),
+ expr_alloc_one(E_NOT, e->left.expr->right.expr));
e = expr_transform(e);
break;
case E_SYMBOL:
- if (e->left.expr->left.sym == &symbol_yes) {
+ if (e->left.expr->left.sym == &symbol_yes)
// !'y' -> 'n'
- tmp = e->left.expr;
- free(e);
- e = tmp;
- e->type = E_SYMBOL;
- e->left.sym = &symbol_no;
- break;
- }
- if (e->left.expr->left.sym == &symbol_mod) {
+ e = expr_alloc_symbol(&symbol_no);
+ else if (e->left.expr->left.sym == &symbol_mod)
// !'m' -> 'm'
- tmp = e->left.expr;
- free(e);
- e = tmp;
- e->type = E_SYMBOL;
- e->left.sym = &symbol_mod;
- break;
- }
- if (e->left.expr->left.sym == &symbol_no) {
+ e = expr_alloc_symbol(&symbol_mod);
+ else if (e->left.expr->left.sym == &symbol_no)
// !'n' -> 'y'
- tmp = e->left.expr;
- free(e);
- e = tmp;
- e->type = E_SYMBOL;
- e->left.sym = &symbol_yes;
- break;
- }
+ e = expr_alloc_symbol(&symbol_yes);
break;
default:
;
@@ -803,10 +709,10 @@ struct expr *expr_transform(struct expr *e)
return e;
}
-int expr_contains_symbol(struct expr *dep, struct symbol *sym)
+bool expr_contains_symbol(struct expr *dep, struct symbol *sym)
{
if (!dep)
- return 0;
+ return false;
switch (dep->type) {
case E_AND:
@@ -828,7 +734,7 @@ int expr_contains_symbol(struct expr *dep, struct symbol *sym)
default:
;
}
- return 0;
+ return false;
}
bool expr_depends_symbol(struct expr *dep, struct symbol *sym)
@@ -915,18 +821,18 @@ struct expr *expr_trans_compare(struct expr *e, enum expr_type type, struct symb
case E_EQUAL:
if (type == E_EQUAL) {
if (sym == &symbol_yes)
- return expr_copy(e);
+ return e;
if (sym == &symbol_mod)
return expr_alloc_symbol(&symbol_no);
if (sym == &symbol_no)
- return expr_alloc_one(E_NOT, expr_copy(e));
+ return expr_alloc_one(E_NOT, e);
} else {
if (sym == &symbol_yes)
- return expr_alloc_one(E_NOT, expr_copy(e));
+ return expr_alloc_one(E_NOT, e);
if (sym == &symbol_mod)
return expr_alloc_symbol(&symbol_yes);
if (sym == &symbol_no)
- return expr_copy(e);
+ return e;
}
break;
case E_SYMBOL:
@@ -981,7 +887,7 @@ static enum string_value_kind expr_parse_string(const char *str,
? kind : k_string;
}
-tristate expr_calc_value(struct expr *e)
+static tristate __expr_calc_value(struct expr *e)
{
tristate val1, val2;
const char *str1, *str2;
@@ -989,9 +895,6 @@ tristate expr_calc_value(struct expr *e)
union string_value lval = {}, rval = {};
int res;
- if (!e)
- return yes;
-
switch (e->type) {
case E_SYMBOL:
sym_calc_value(e->left.sym);
@@ -1055,6 +958,35 @@ tristate expr_calc_value(struct expr *e)
}
}
+/**
+ * expr_calc_value - return the tristate value of the given expression
+ * @e: expression
+ * return: tristate value of the expression
+ */
+tristate expr_calc_value(struct expr *e)
+{
+ if (!e)
+ return yes;
+
+ if (!e->val_is_valid) {
+ e->val = __expr_calc_value(e);
+ e->val_is_valid = true;
+ }
+
+ return e->val;
+}
+
+/**
+ * expr_invalidate_all - invalidate all cached expression values
+ */
+void expr_invalidate_all(void)
+{
+ struct expr *e;
+
+ hash_for_each(expr_hashtable, e, node)
+ e->val_is_valid = false;
+}
+
static int expr_compare_type(enum expr_type t1, enum expr_type t2)
{
if (t1 == t2)
diff --git a/scripts/kconfig/expr.h b/scripts/kconfig/expr.h
index 2bc96cd28253..21578dcd4292 100644
--- a/scripts/kconfig/expr.h
+++ b/scripts/kconfig/expr.h
@@ -29,12 +29,26 @@ enum expr_type {
};
union expr_data {
- struct expr *expr;
- struct symbol *sym;
+ struct expr * const expr;
+ struct symbol * const sym;
+ void *_initdata;
};
+/**
+ * struct expr - expression
+ *
+ * @node: link node for the hash table
+ * @type: expressoin type
+ * @val: calculated tristate value
+ * @val_is_valid: indicate whether the value is valid
+ * @left: left node
+ * @right: right node
+ */
struct expr {
+ struct hlist_node node;
enum expr_type type;
+ tristate val;
+ bool val_is_valid;
union expr_data left, right;
};
@@ -168,7 +182,6 @@ enum prop_type {
P_SELECT, /* select BAR */
P_IMPLY, /* imply BAR */
P_RANGE, /* range 7..100 (for a symbol) */
- P_SYMBOL, /* where a symbol is defined */
};
struct property {
@@ -276,14 +289,12 @@ struct expr *expr_alloc_two(enum expr_type type, struct expr *e1, struct expr *e
struct expr *expr_alloc_comp(enum expr_type type, struct symbol *s1, struct symbol *s2);
struct expr *expr_alloc_and(struct expr *e1, struct expr *e2);
struct expr *expr_alloc_or(struct expr *e1, struct expr *e2);
-struct expr *expr_copy(const struct expr *org);
-void expr_free(struct expr *e);
void expr_eliminate_eq(struct expr **ep1, struct expr **ep2);
-int expr_eq(struct expr *e1, struct expr *e2);
+bool expr_eq(struct expr *e1, struct expr *e2);
tristate expr_calc_value(struct expr *e);
struct expr *expr_eliminate_dups(struct expr *e);
struct expr *expr_transform(struct expr *e);
-int expr_contains_symbol(struct expr *dep, struct symbol *sym);
+bool expr_contains_symbol(struct expr *dep, struct symbol *sym);
bool expr_depends_symbol(struct expr *dep, struct symbol *sym);
struct expr *expr_trans_compare(struct expr *e, enum expr_type type, struct symbol *sym);
@@ -293,7 +304,7 @@ void expr_gstr_print(const struct expr *e, struct gstr *gs);
void expr_gstr_print_revdep(struct expr *e, struct gstr *gs,
tristate pr_type, const char *title);
-static inline int expr_is_yes(const struct expr *e)
+static inline bool expr_is_yes(const struct expr *e)
{
return !e || (e->type == E_SYMBOL && e->left.sym == &symbol_yes);
}
diff --git a/scripts/kconfig/internal.h b/scripts/kconfig/internal.h
index 02106eb7815e..d0ffce2dfbba 100644
--- a/scripts/kconfig/internal.h
+++ b/scripts/kconfig/internal.h
@@ -11,6 +11,12 @@ extern HASHTABLE_DECLARE(sym_hashtable, SYMBOL_HASHSIZE);
#define for_all_symbols(sym) \
hash_for_each(sym_hashtable, sym, node)
+#define EXPR_HASHSIZE (1U << 14)
+
+extern HASHTABLE_DECLARE(expr_hashtable, EXPR_HASHSIZE);
+
+void expr_invalidate_all(void);
+
struct menu;
extern struct menu *current_menu, *current_entry;
diff --git a/scripts/kconfig/lexer.l b/scripts/kconfig/lexer.l
index 8dd597c4710d..9c2cdfc33c6f 100644
--- a/scripts/kconfig/lexer.l
+++ b/scripts/kconfig/lexer.l
@@ -13,6 +13,7 @@
#include <stdlib.h>
#include <string.h>
+#include <xalloc.h>
#include "lkc.h"
#include "preprocess.h"
diff --git a/scripts/kconfig/lkc.h b/scripts/kconfig/lkc.h
index 401bdf36323a..b8ebc3094a23 100644
--- a/scripts/kconfig/lkc.h
+++ b/scripts/kconfig/lkc.h
@@ -51,13 +51,7 @@ static inline void xfwrite(const void *str, size_t len, size_t count, FILE *out)
}
/* util.c */
-unsigned int strhash(const char *s);
const char *file_lookup(const char *name);
-void *xmalloc(size_t size);
-void *xcalloc(size_t nmemb, size_t size);
-void *xrealloc(void *p, size_t size);
-char *xstrdup(const char *s);
-char *xstrndup(const char *s, size_t n);
/* lexer.l */
int yylex(void);
diff --git a/scripts/kconfig/mconf.c b/scripts/kconfig/mconf.c
index 3887eac75289..84ea9215c0a7 100644
--- a/scripts/kconfig/mconf.c
+++ b/scripts/kconfig/mconf.c
@@ -20,6 +20,7 @@
#include <unistd.h>
#include <list.h>
+#include <xalloc.h>
#include "lkc.h"
#include "lxdialog/dialog.h"
#include "mnconf-common.h"
diff --git a/scripts/kconfig/menu.c b/scripts/kconfig/menu.c
index 323cc0b62be6..4addd33749bb 100644
--- a/scripts/kconfig/menu.c
+++ b/scripts/kconfig/menu.c
@@ -9,6 +9,7 @@
#include <string.h>
#include <list.h>
+#include <xalloc.h>
#include "lkc.h"
#include "internal.h"
@@ -78,10 +79,8 @@ void menu_add_entry(struct symbol *sym)
*last_entry_ptr = menu;
last_entry_ptr = &menu->next;
current_entry = menu;
- if (sym) {
- menu_add_symbol(P_SYMBOL, sym, NULL);
+ if (sym)
list_add_tail(&menu->link, &sym->menus);
- }
}
struct menu *menu_add_menu(void)
@@ -108,12 +107,13 @@ static struct expr *rewrite_m(struct expr *e)
switch (e->type) {
case E_NOT:
- e->left.expr = rewrite_m(e->left.expr);
+ e = expr_alloc_one(E_NOT, rewrite_m(e->left.expr));
break;
case E_OR:
case E_AND:
- e->left.expr = rewrite_m(e->left.expr);
- e->right.expr = rewrite_m(e->right.expr);
+ e = expr_alloc_two(e->type,
+ rewrite_m(e->left.expr),
+ rewrite_m(e->right.expr));
break;
case E_SYMBOL:
/* change 'm' into 'm' && MODULES */
@@ -193,21 +193,11 @@ struct property *menu_add_prompt(enum prop_type type, const char *prompt,
struct menu *menu = current_entry;
while ((menu = menu->parent) != NULL) {
- struct expr *dup_expr;
if (!menu->visibility)
continue;
- /*
- * Do not add a reference to the menu's visibility
- * expression but use a copy of it. Otherwise the
- * expression reduction functions will modify
- * expressions that have multiple references which
- * can cause unwanted side effects.
- */
- dup_expr = expr_copy(menu->visibility);
-
prop->visible.expr = expr_alloc_and(prop->visible.expr,
- dup_expr);
+ menu->visibility);
}
}
@@ -323,7 +313,7 @@ static void _menu_finalize(struct menu *parent, bool inside_choice)
*/
basedep = rewrite_m(menu->dep);
basedep = expr_transform(basedep);
- basedep = expr_alloc_and(expr_copy(parent->dep), basedep);
+ basedep = expr_alloc_and(parent->dep, basedep);
basedep = expr_eliminate_dups(basedep);
menu->dep = basedep;
@@ -367,7 +357,7 @@ static void _menu_finalize(struct menu *parent, bool inside_choice)
*/
dep = rewrite_m(prop->visible.expr);
dep = expr_transform(dep);
- dep = expr_alloc_and(expr_copy(basedep), dep);
+ dep = expr_alloc_and(basedep, dep);
dep = expr_eliminate_dups(dep);
prop->visible.expr = dep;
@@ -378,11 +368,11 @@ static void _menu_finalize(struct menu *parent, bool inside_choice)
if (prop->type == P_SELECT) {
struct symbol *es = prop_get_symbol(prop);
es->rev_dep.expr = expr_alloc_or(es->rev_dep.expr,
- expr_alloc_and(expr_alloc_symbol(menu->sym), expr_copy(dep)));
+ expr_alloc_and(expr_alloc_symbol(menu->sym), dep));
} else if (prop->type == P_IMPLY) {
struct symbol *es = prop_get_symbol(prop);
es->implied.expr = expr_alloc_or(es->implied.expr,
- expr_alloc_and(expr_alloc_symbol(menu->sym), expr_copy(dep)));
+ expr_alloc_and(expr_alloc_symbol(menu->sym), dep));
}
}
}
@@ -442,22 +432,18 @@ static void _menu_finalize(struct menu *parent, bool inside_choice)
*/
dep = expr_trans_compare(dep, E_UNEQUAL, &symbol_no);
dep = expr_eliminate_dups(expr_transform(dep));
- dep2 = expr_copy(basedep);
+ dep2 = basedep;
expr_eliminate_eq(&dep, &dep2);
- expr_free(dep);
if (!expr_is_yes(dep2)) {
/* Not superset, quit */
- expr_free(dep2);
break;
}
/* Superset, put in submenu */
- expr_free(dep2);
next:
_menu_finalize(menu, false);
menu->parent = parent;
last_menu = menu;
}
- expr_free(basedep);
if (last_menu) {
parent->list = parent->next;
parent->next = last_menu->next;
diff --git a/scripts/kconfig/nconf.c b/scripts/kconfig/nconf.c
index b91ca47e9e9a..063b4f7ccbdb 100644
--- a/scripts/kconfig/nconf.c
+++ b/scripts/kconfig/nconf.c
@@ -12,6 +12,7 @@
#include <stdlib.h>
#include <list.h>
+#include <xalloc.h>
#include "lkc.h"
#include "mnconf-common.h"
#include "nconf.h"
diff --git a/scripts/kconfig/nconf.gui.c b/scripts/kconfig/nconf.gui.c
index 25a7263ef3c8..72b605efe549 100644
--- a/scripts/kconfig/nconf.gui.c
+++ b/scripts/kconfig/nconf.gui.c
@@ -4,6 +4,7 @@
*
* Derived from menuconfig.
*/
+#include <xalloc.h>
#include "nconf.h"
#include "lkc.h"
diff --git a/scripts/kconfig/parser.y b/scripts/kconfig/parser.y
index 61900feb4254..1ad60f9e164e 100644
--- a/scripts/kconfig/parser.y
+++ b/scripts/kconfig/parser.y
@@ -11,6 +11,7 @@
#include <string.h>
#include <stdbool.h>
+#include <xalloc.h>
#include "lkc.h"
#include "internal.h"
#include "preprocess.h"
@@ -530,14 +531,6 @@ void conf_parse(const char *name)
yydebug = 1;
yyparse();
- /*
- * FIXME:
- * cur_filename and cur_lineno are used even after yyparse();
- * menu_finalize() calls menu_add_symbol(). This should be fixed.
- */
- cur_filename = "<none>";
- cur_lineno = 0;
-
str_printf(&autoconf_cmd,
"\n"
"$(autoconfig): $(deps_config)\n"
@@ -715,10 +708,6 @@ static void print_symbol(FILE *out, const struct menu *menu)
print_quoted_string(out, prop->text);
fputc('\n', out);
break;
- case P_SYMBOL:
- fputs( " symbol ", out);
- fprintf(out, "%s\n", prop->menu->sym->name);
- break;
default:
fprintf(out, " unknown prop %d!\n", prop->type);
break;
diff --git a/scripts/kconfig/preprocess.c b/scripts/kconfig/preprocess.c
index 67d1fb95c491..783abcaa5cc5 100644
--- a/scripts/kconfig/preprocess.c
+++ b/scripts/kconfig/preprocess.c
@@ -11,6 +11,7 @@
#include <array_size.h>
#include <list.h>
+#include <xalloc.h>
#include "internal.h"
#include "lkc.h"
#include "preprocess.h"
diff --git a/scripts/kconfig/qconf.cc b/scripts/kconfig/qconf.cc
index 7d239c032b3d..97fce13e551e 100644
--- a/scripts/kconfig/qconf.cc
+++ b/scripts/kconfig/qconf.cc
@@ -22,6 +22,7 @@
#include <stdlib.h>
+#include <xalloc.h>
#include "lkc.h"
#include "qconf.h"
@@ -1094,7 +1095,6 @@ QString ConfigInfoView::debug_info(struct symbol *sym)
case P_RANGE:
case P_COMMENT:
case P_IMPLY:
- case P_SYMBOL:
stream << prop_get_type_name(prop->type);
stream << ": ";
expr_print(prop->expr, expr_print_help,
diff --git a/scripts/kconfig/symbol.c b/scripts/kconfig/symbol.c
index 71502abd3b12..a3af93aaaf32 100644
--- a/scripts/kconfig/symbol.c
+++ b/scripts/kconfig/symbol.c
@@ -9,6 +9,8 @@
#include <string.h>
#include <regex.h>
+#include <hash.h>
+#include <xalloc.h>
#include "internal.h"
#include "lkc.h"
@@ -517,6 +519,7 @@ void sym_clear_all_valid(void)
for_all_symbols(sym)
sym->flags &= ~SYMBOL_VALID;
+ expr_invalidate_all();
conf_set_changed(true);
sym_calc_value(modules_sym);
}
@@ -892,7 +895,7 @@ struct symbol *sym_lookup(const char *name, int flags)
case 'n': return &symbol_no;
}
}
- hash = strhash(name);
+ hash = hash_str(name);
hash_for_each_possible(sym_hashtable, symbol, node, hash) {
if (symbol->name &&
@@ -935,7 +938,7 @@ struct symbol *sym_find(const char *name)
case 'n': return &symbol_no;
}
}
- hash = strhash(name);
+ hash = hash_str(name);
hash_for_each_possible(sym_hashtable, symbol, node, hash) {
if (symbol->name &&
@@ -1321,8 +1324,6 @@ const char *prop_get_type_name(enum prop_type type)
return "imply";
case P_RANGE:
return "range";
- case P_SYMBOL:
- return "symbol";
case P_UNKNOWN:
break;
}
diff --git a/scripts/kconfig/util.c b/scripts/kconfig/util.c
index 696ff477671e..5cdcee144b58 100644
--- a/scripts/kconfig/util.c
+++ b/scripts/kconfig/util.c
@@ -8,19 +8,11 @@
#include <stdlib.h>
#include <string.h>
+#include <hash.h>
#include <hashtable.h>
+#include <xalloc.h>
#include "lkc.h"
-unsigned int strhash(const char *s)
-{
- /* fnv32 hash */
- unsigned int hash = 2166136261U;
-
- for (; *s; s++)
- hash = (hash ^ *s) * 0x01000193;
- return hash;
-}
-
/* hash table of all parsed Kconfig files */
static HASHTABLE_DEFINE(file_hashtable, 1U << 11);
@@ -34,7 +26,7 @@ const char *file_lookup(const char *name)
{
struct file *file;
size_t len;
- int hash = strhash(name);
+ int hash = hash_str(name);
hash_for_each_possible(file_hashtable, file, node, hash)
if (!strcmp(name, file->name))
@@ -102,52 +94,3 @@ char *str_get(const struct gstr *gs)
{
return gs->s;
}
-
-void *xmalloc(size_t size)
-{
- void *p = malloc(size);
- if (p)
- return p;
- fprintf(stderr, "Out of memory.\n");
- exit(1);
-}
-
-void *xcalloc(size_t nmemb, size_t size)
-{
- void *p = calloc(nmemb, size);
- if (p)
- return p;
- fprintf(stderr, "Out of memory.\n");
- exit(1);
-}
-
-void *xrealloc(void *p, size_t size)
-{
- p = realloc(p, size);
- if (p)
- return p;
- fprintf(stderr, "Out of memory.\n");
- exit(1);
-}
-
-char *xstrdup(const char *s)
-{
- char *p;
-
- p = strdup(s);
- if (p)
- return p;
- fprintf(stderr, "Out of memory.\n");
- exit(1);
-}
-
-char *xstrndup(const char *s, size_t n)
-{
- char *p;
-
- p = strndup(s, n);
- if (p)
- return p;
- fprintf(stderr, "Out of memory.\n");
- exit(1);
-}
diff --git a/scripts/link-vmlinux.sh b/scripts/link-vmlinux.sh
index 070a319140e8..a9b3f34a78d2 100755
--- a/scripts/link-vmlinux.sh
+++ b/scripts/link-vmlinux.sh
@@ -107,20 +107,8 @@ vmlinux_link()
# ${1} - vmlinux image
gen_btf()
{
- local pahole_ver
local btf_data=${1}.btf.o
- if ! [ -x "$(command -v ${PAHOLE})" ]; then
- echo >&2 "BTF: ${1}: pahole (${PAHOLE}) is not available"
- return 1
- fi
-
- pahole_ver=$(${PAHOLE} --version | sed -E 's/v([0-9]+)\.([0-9]+)/\1\2/')
- if [ "${pahole_ver}" -lt "116" ]; then
- echo >&2 "BTF: ${1}: pahole version $(${PAHOLE} --version) is too old, need at least v1.16"
- return 1
- fi
-
info BTF "${btf_data}"
LLVM_OBJCOPY="${OBJCOPY}" ${PAHOLE} -J ${PAHOLE_FLAGS} ${1}
@@ -215,7 +203,7 @@ kallsymso=
strip_debug=
if is_enabled CONFIG_KALLSYMS; then
- truncate -s0 .tmp_vmlinux.kallsyms0.syms
+ true > .tmp_vmlinux.kallsyms0.syms
kallsyms .tmp_vmlinux.kallsyms0.syms .tmp_vmlinux0.kallsyms
fi
@@ -284,7 +272,7 @@ strip_debug=
vmlinux_link vmlinux
# fill in BTF IDs
-if is_enabled CONFIG_DEBUG_INFO_BTF && is_enabled CONFIG_BPF; then
+if is_enabled CONFIG_DEBUG_INFO_BTF; then
info BTFIDS vmlinux
${RESOLVE_BTFIDS} vmlinux
fi
diff --git a/scripts/macro_checker.py b/scripts/macro_checker.py
new file mode 100755
index 000000000000..ba550982e98f
--- /dev/null
+++ b/scripts/macro_checker.py
@@ -0,0 +1,131 @@
+#!/usr/bin/python3
+# SPDX-License-Identifier: GPL-2.0
+# Author: Julian Sun <sunjunchao2870@gmail.com>
+
+""" Find macro definitions with unused parameters. """
+
+import argparse
+import os
+import re
+
+parser = argparse.ArgumentParser()
+
+parser.add_argument("path", type=str, help="The file or dir path that needs check")
+parser.add_argument("-v", "--verbose", action="store_true",
+ help="Check conditional macros, but may lead to more false positives")
+args = parser.parse_args()
+
+macro_pattern = r"#define\s+(\w+)\(([^)]*)\)"
+# below vars were used to reduce false positives
+fp_patterns = [r"\s*do\s*\{\s*\}\s*while\s*\(\s*0\s*\)",
+ r"\(?0\)?", r"\(?1\)?"]
+correct_macros = []
+cond_compile_mark = "#if"
+cond_compile_end = "#endif"
+
+def check_macro(macro_line, report):
+ match = re.match(macro_pattern, macro_line)
+ if match:
+ macro_def = re.sub(macro_pattern, '', macro_line)
+ identifier = match.group(1)
+ content = match.group(2)
+ arguments = [item.strip() for item in content.split(',') if item.strip()]
+
+ macro_def = macro_def.strip()
+ if not macro_def:
+ return
+ # used to reduce false positives, like #define endfor_nexthops(rt) }
+ if len(macro_def) == 1:
+ return
+
+ for fp_pattern in fp_patterns:
+ if (re.match(fp_pattern, macro_def)):
+ return
+
+ for arg in arguments:
+ # used to reduce false positives
+ if "..." in arg:
+ return
+ for arg in arguments:
+ if not arg in macro_def and report == False:
+ return
+ # if there is a correct macro with the same name, do not report it.
+ if not arg in macro_def and identifier not in correct_macros:
+ print(f"Argument {arg} is not used in function-line macro {identifier}")
+ return
+
+ correct_macros.append(identifier)
+
+
+# remove comment and whitespace
+def macro_strip(macro):
+ comment_pattern1 = r"\/\/*"
+ comment_pattern2 = r"\/\**\*\/"
+
+ macro = macro.strip()
+ macro = re.sub(comment_pattern1, '', macro)
+ macro = re.sub(comment_pattern2, '', macro)
+
+ return macro
+
+def file_check_macro(file_path, report):
+ # number of conditional compiling
+ cond_compile = 0
+ # only check .c and .h file
+ if not file_path.endswith(".c") and not file_path.endswith(".h"):
+ return
+
+ with open(file_path, "r") as f:
+ while True:
+ line = f.readline()
+ if not line:
+ break
+ line = line.strip()
+ if line.startswith(cond_compile_mark):
+ cond_compile += 1
+ continue
+ if line.startswith(cond_compile_end):
+ cond_compile -= 1
+ continue
+
+ macro = re.match(macro_pattern, line)
+ if macro:
+ macro = macro_strip(macro.string)
+ while macro[-1] == '\\':
+ macro = macro[0:-1]
+ macro = macro.strip()
+ macro += f.readline()
+ macro = macro_strip(macro)
+ if not args.verbose:
+ if file_path.endswith(".c") and cond_compile != 0:
+ continue
+ # 1 is for #ifdef xxx at the beginning of the header file
+ if file_path.endswith(".h") and cond_compile != 1:
+ continue
+ check_macro(macro, report)
+
+def get_correct_macros(path):
+ file_check_macro(path, False)
+
+def dir_check_macro(dir_path):
+
+ for dentry in os.listdir(dir_path):
+ path = os.path.join(dir_path, dentry)
+ if os.path.isdir(path):
+ dir_check_macro(path)
+ elif os.path.isfile(path):
+ get_correct_macros(path)
+ file_check_macro(path, True)
+
+
+def main():
+ if os.path.isfile(args.path):
+ get_correct_macros(args.path)
+ file_check_macro(args.path, True)
+ elif os.path.isdir(args.path):
+ dir_check_macro(args.path)
+ else:
+ print(f"{args.path} doesn't exit or is neither a file nor a dir")
+
+if __name__ == "__main__":
+ main() \ No newline at end of file
diff --git a/scripts/mod/devicetable-offsets.c b/scripts/mod/devicetable-offsets.c
index 518200813d4e..9c7b404defbd 100644
--- a/scripts/mod/devicetable-offsets.c
+++ b/scripts/mod/devicetable-offsets.c
@@ -153,6 +153,10 @@ int main(void)
DEVID_FIELD(i3c_device_id, part_id);
DEVID_FIELD(i3c_device_id, extra_info);
+ DEVID(slim_device_id);
+ DEVID_FIELD(slim_device_id, manf_id);
+ DEVID_FIELD(slim_device_id, prod_code);
+
DEVID(spi_device_id);
DEVID_FIELD(spi_device_id, name);
diff --git a/scripts/mod/file2alias.c b/scripts/mod/file2alias.c
index 5d1c61fa5a55..99dce93a4188 100644
--- a/scripts/mod/file2alias.c
+++ b/scripts/mod/file2alias.c
@@ -960,6 +960,16 @@ static int do_i3c_entry(const char *filename, void *symval,
return 1;
}
+static int do_slim_entry(const char *filename, void *symval, char *alias)
+{
+ DEF_FIELD(symval, slim_device_id, manf_id);
+ DEF_FIELD(symval, slim_device_id, prod_code);
+
+ sprintf(alias, "slim:%x:%x:*", manf_id, prod_code);
+
+ return 1;
+}
+
/* Looks like: spi:S */
static int do_spi_entry(const char *filename, void *symval,
char *alias)
@@ -1555,6 +1565,7 @@ static const struct devtable devtable[] = {
{"rpmsg", SIZE_rpmsg_device_id, do_rpmsg_entry},
{"i2c", SIZE_i2c_device_id, do_i2c_entry},
{"i3c", SIZE_i3c_device_id, do_i3c_entry},
+ {"slim", SIZE_slim_device_id, do_slim_entry},
{"spi", SIZE_spi_device_id, do_spi_entry},
{"dmi", SIZE_dmi_system_id, do_dmi_entry},
{"platform", SIZE_platform_device_id, do_platform_entry},
diff --git a/scripts/mod/mk_elfconfig.c b/scripts/mod/mk_elfconfig.c
index 680eade89be1..e8cee4e4bc73 100644
--- a/scripts/mod/mk_elfconfig.c
+++ b/scripts/mod/mk_elfconfig.c
@@ -8,7 +8,6 @@ int
main(int argc, char **argv)
{
unsigned char ei[EI_NIDENT];
- union { short s; char c[2]; } endian_test;
if (fread(ei, 1, EI_NIDENT, stdin) != EI_NIDENT) {
fprintf(stderr, "Error: input truncated\n");
@@ -28,30 +27,6 @@ main(int argc, char **argv)
default:
exit(1);
}
- switch (ei[EI_DATA]) {
- case ELFDATA2LSB:
- printf("#define KERNEL_ELFDATA ELFDATA2LSB\n");
- break;
- case ELFDATA2MSB:
- printf("#define KERNEL_ELFDATA ELFDATA2MSB\n");
- break;
- default:
- exit(1);
- }
-
- if (sizeof(unsigned long) == 4) {
- printf("#define HOST_ELFCLASS ELFCLASS32\n");
- } else if (sizeof(unsigned long) == 8) {
- printf("#define HOST_ELFCLASS ELFCLASS64\n");
- }
-
- endian_test.s = 0x0102;
- if (memcmp(endian_test.c, "\x01\x02", 2) == 0)
- printf("#define HOST_ELFDATA ELFDATA2MSB\n");
- else if (memcmp(endian_test.c, "\x02\x01", 2) == 0)
- printf("#define HOST_ELFDATA ELFDATA2LSB\n");
- else
- exit(1);
return 0;
}
diff --git a/scripts/mod/modpost.c b/scripts/mod/modpost.c
index d16d0ace2775..107393a8c48a 100644
--- a/scripts/mod/modpost.c
+++ b/scripts/mod/modpost.c
@@ -23,6 +23,7 @@
#include <hashtable.h>
#include <list.h>
+#include <xalloc.h>
#include "modpost.h"
#include "../../include/linux/license.h"
@@ -50,6 +51,9 @@ static bool error_occurred;
static bool extra_warn;
+bool target_is_big_endian;
+bool host_is_big_endian;
+
/*
* Cut off the warnings when there are too many. This typically occurs when
* vmlinux is missing. ('make modules' without building vmlinux.)
@@ -63,20 +67,15 @@ static unsigned int nr_unresolved;
#define MODULE_NAME_LEN (64 - sizeof(Elf_Addr))
-void modpost_log(enum loglevel loglevel, const char *fmt, ...)
+void modpost_log(bool is_error, const char *fmt, ...)
{
va_list arglist;
- switch (loglevel) {
- case LOG_WARN:
- fprintf(stderr, "WARNING: ");
- break;
- case LOG_ERROR:
+ if (is_error) {
fprintf(stderr, "ERROR: ");
error_occurred = true;
- break;
- default: /* invalid loglevel, ignore */
- break;
+ } else {
+ fprintf(stderr, "WARNING: ");
}
fprintf(stderr, "modpost: ");
@@ -94,14 +93,6 @@ static inline bool strends(const char *str, const char *postfix)
return strcmp(str + strlen(str) - strlen(postfix), postfix) == 0;
}
-void *do_nofail(void *ptr, const char *expr)
-{
- if (!ptr)
- fatal("Memory allocation failure: %s.\n", expr);
-
- return ptr;
-}
-
char *read_text_file(const char *filename)
{
struct stat st;
@@ -120,7 +111,7 @@ char *read_text_file(const char *filename)
exit(1);
}
- buf = NOFAIL(malloc(st.st_size + 1));
+ buf = xmalloc(st.st_size + 1);
nbytes = st.st_size;
@@ -178,7 +169,7 @@ static struct module *new_module(const char *name, size_t namelen)
{
struct module *mod;
- mod = NOFAIL(malloc(sizeof(*mod) + namelen + 1));
+ mod = xmalloc(sizeof(*mod) + namelen + 1);
memset(mod, 0, sizeof(*mod));
INIT_LIST_HEAD(&mod->exported_symbols);
@@ -237,7 +228,7 @@ static inline unsigned int tdb_hash(const char *name)
**/
static struct symbol *alloc_symbol(const char *name)
{
- struct symbol *s = NOFAIL(malloc(sizeof(*s) + strlen(name) + 1));
+ struct symbol *s = xmalloc(sizeof(*s) + strlen(name) + 1);
memset(s, 0, sizeof(*s));
strcpy(s->name, name);
@@ -310,8 +301,7 @@ static void add_namespace(struct list_head *head, const char *namespace)
struct namespace_list *ns_entry;
if (!contains_namespace(head, namespace)) {
- ns_entry = NOFAIL(malloc(sizeof(*ns_entry) +
- strlen(namespace) + 1));
+ ns_entry = xmalloc(sizeof(*ns_entry) + strlen(namespace) + 1);
strcpy(ns_entry->namespace, namespace);
list_add_tail(&ns_entry->list, head);
}
@@ -366,7 +356,7 @@ static struct symbol *sym_add_exported(const char *name, struct module *mod,
s = alloc_symbol(name);
s->module = mod;
s->is_gpl_only = gpl_only;
- s->namespace = NOFAIL(strdup(namespace));
+ s->namespace = xstrdup(namespace);
list_add_tail(&s->list, &mod->exported_symbols);
hash_add_symbol(s);
@@ -438,6 +428,18 @@ static int parse_elf(struct elf_info *info, const char *filename)
/* Not an ELF file - silently ignore it */
return 0;
}
+
+ switch (hdr->e_ident[EI_DATA]) {
+ case ELFDATA2LSB:
+ target_is_big_endian = false;
+ break;
+ case ELFDATA2MSB:
+ target_is_big_endian = true;
+ break;
+ default:
+ fatal("target endian is unknown\n");
+ }
+
/* Fix endianness in ELF header */
hdr->e_type = TO_NATIVE(hdr->e_type);
hdr->e_machine = TO_NATIVE(hdr->e_machine);
@@ -622,7 +624,7 @@ static void handle_symbol(struct module *mod, struct elf_info *info,
if (ELF_ST_TYPE(sym->st_info) == STT_SPARC_REGISTER)
break;
if (symname[0] == '.') {
- char *munged = NOFAIL(strdup(symname));
+ char *munged = xstrdup(symname);
munged[0] = '_';
munged[1] = toupper(munged[1]);
symname = munged;
@@ -690,10 +692,7 @@ static char *get_modinfo(struct elf_info *info, const char *tag)
static const char *sym_name(struct elf_info *elf, Elf_Sym *sym)
{
- if (sym)
- return elf->strtab + sym->st_name;
- else
- return "(unknown)";
+ return sym ? elf->strtab + sym->st_name : "";
}
/*
@@ -1006,6 +1005,7 @@ static void default_mismatch_handler(const char *modname, struct elf_info *elf,
Elf_Sym *from;
const char *tosym;
const char *fromsym;
+ char taddr_str[16];
from = find_fromsym(elf, faddr, fsecndx);
fromsym = sym_name(elf, from);
@@ -1019,10 +1019,17 @@ static void default_mismatch_handler(const char *modname, struct elf_info *elf,
sec_mismatch_count++;
- warn("%s: section mismatch in reference: %s+0x%x (section: %s) -> %s (section: %s)\n",
- modname, fromsym,
- (unsigned int)(faddr - (from ? from->st_value : 0)),
- fromsec, tosym, tosec);
+ if (!tosym[0])
+ snprintf(taddr_str, sizeof(taddr_str), "0x%x", (unsigned int)taddr);
+
+ /*
+ * The format for the reference source: <symbol_name>+<offset> or <address>
+ * The format for the reference destination: <symbol_name> or <address>
+ */
+ warn("%s: section mismatch in reference: %s%s0x%x (section: %s) -> %s (section: %s)\n",
+ modname, fromsym, fromsym[0] ? "+" : "",
+ (unsigned int)(faddr - (fromsym[0] ? from->st_value : 0)),
+ fromsec, tosym[0] ? tosym : taddr_str, tosec);
if (mismatch->mismatch == EXTABLE_TO_NON_TEXT) {
if (match(tosec, mismatch->bad_tosec))
@@ -1662,7 +1669,7 @@ void buf_write(struct buffer *buf, const char *s, int len)
{
if (buf->size - buf->pos < len) {
buf->size += len + SZ;
- buf->p = NOFAIL(realloc(buf->p, buf->size));
+ buf->p = xrealloc(buf->p, buf->size);
}
strncpy(buf->p + buf->pos, s, len);
buf->pos += len;
@@ -1677,7 +1684,7 @@ static void check_exports(struct module *mod)
exp = find_symbol(s->name);
if (!exp) {
if (!s->weak && nr_unresolved++ < MAX_UNRESOLVED_REPORTS)
- modpost_log(warn_unresolved ? LOG_WARN : LOG_ERROR,
+ modpost_log(!warn_unresolved,
"\"%s\" [%s.ko] undefined!\n",
s->name, mod->name);
continue;
@@ -1700,7 +1707,7 @@ static void check_exports(struct module *mod)
basename = mod->name;
if (!contains_namespace(&mod->imported_namespaces, exp->namespace)) {
- modpost_log(allow_missing_ns_imports ? LOG_WARN : LOG_ERROR,
+ modpost_log(!allow_missing_ns_imports,
"module %s uses symbol %s from namespace %s, but does not import it.\n",
basename, exp->name, exp->namespace);
add_namespace(&mod->missing_namespaces, exp->namespace);
@@ -1748,26 +1755,9 @@ static void check_modname_len(struct module *mod)
static void add_header(struct buffer *b, struct module *mod)
{
buf_printf(b, "#include <linux/module.h>\n");
- /*
- * Include build-salt.h after module.h in order to
- * inherit the definitions.
- */
- buf_printf(b, "#define INCLUDE_VERMAGIC\n");
- buf_printf(b, "#include <linux/build-salt.h>\n");
- buf_printf(b, "#include <linux/elfnote-lto.h>\n");
buf_printf(b, "#include <linux/export-internal.h>\n");
- buf_printf(b, "#include <linux/vermagic.h>\n");
buf_printf(b, "#include <linux/compiler.h>\n");
buf_printf(b, "\n");
- buf_printf(b, "#ifdef CONFIG_UNWINDER_ORC\n");
- buf_printf(b, "#include <asm/orc_header.h>\n");
- buf_printf(b, "ORC_HEADER;\n");
- buf_printf(b, "#endif\n");
- buf_printf(b, "\n");
- buf_printf(b, "BUILD_SALT;\n");
- buf_printf(b, "BUILD_LTO_INFO;\n");
- buf_printf(b, "\n");
- buf_printf(b, "MODULE_INFO(vermagic, VERMAGIC_STRING);\n");
buf_printf(b, "MODULE_INFO(name, KBUILD_MODNAME);\n");
buf_printf(b, "\n");
buf_printf(b, "__visible struct module __this_module\n");
@@ -1785,12 +1775,6 @@ static void add_header(struct buffer *b, struct module *mod)
if (!external_module)
buf_printf(b, "\nMODULE_INFO(intree, \"Y\");\n");
- buf_printf(b,
- "\n"
- "#ifdef CONFIG_MITIGATION_RETPOLINE\n"
- "MODULE_INFO(retpoline, \"Y\");\n"
- "#endif\n");
-
if (strstarts(mod->name, "drivers/staging"))
buf_printf(b, "\nMODULE_INFO(staging, \"Y\");\n");
@@ -1947,7 +1931,7 @@ static void write_if_changed(struct buffer *b, const char *fname)
if (st.st_size != b->pos)
goto close_write;
- tmp = NOFAIL(malloc(b->pos));
+ tmp = xmalloc(b->pos);
if (fread(tmp, 1, b->pos, file) != b->pos)
goto free_write;
@@ -2117,6 +2101,25 @@ struct dump_list {
const char *file;
};
+static void check_host_endian(void)
+{
+ static const union {
+ short s;
+ char c[2];
+ } endian_test = { .c = {0x01, 0x02} };
+
+ switch (endian_test.s) {
+ case 0x0102:
+ host_is_big_endian = true;
+ break;
+ case 0x0201:
+ host_is_big_endian = false;
+ break;
+ default:
+ fatal("Unknown host endian\n");
+ }
+}
+
int main(int argc, char **argv)
{
struct module *mod;
@@ -2133,7 +2136,7 @@ int main(int argc, char **argv)
external_module = true;
break;
case 'i':
- dl = NOFAIL(malloc(sizeof(*dl)));
+ dl = xmalloc(sizeof(*dl));
dl->file = optarg;
list_add_tail(&dl->list, &dump_lists);
break;
@@ -2181,6 +2184,8 @@ int main(int argc, char **argv)
}
}
+ check_host_endian();
+
list_for_each_entry_safe(dl, dl2, &dump_lists, list) {
read_dump(dl->file);
list_del(&dl->list);
diff --git a/scripts/mod/modpost.h b/scripts/mod/modpost.h
index 58197b34a3c8..ada3a36cc4bc 100644
--- a/scripts/mod/modpost.h
+++ b/scripts/mod/modpost.h
@@ -62,22 +62,11 @@
x); \
})
-#if KERNEL_ELFDATA != HOST_ELFDATA
-
-#define TO_NATIVE(x) (bswap(x))
-
-#else /* endianness matches */
-
-#define TO_NATIVE(x) (x)
-
-#endif
-
-#define NOFAIL(ptr) do_nofail((ptr), #ptr)
+#define TO_NATIVE(x) \
+ (target_is_big_endian == host_is_big_endian ? x : bswap(x))
#define ARRAY_SIZE(arr) (sizeof(arr) / sizeof((arr)[0]))
-void *do_nofail(void *ptr, const char *expr);
-
struct buffer {
char *p;
int pos;
@@ -187,17 +176,14 @@ void add_moddevtable(struct buffer *buf, struct module *mod);
void get_src_version(const char *modname, char sum[], unsigned sumlen);
/* from modpost.c */
+extern bool target_is_big_endian;
+extern bool host_is_big_endian;
char *read_text_file(const char *filename);
char *get_line(char **stringp);
void *sym_get_data(const struct elf_info *info, const Elf_Sym *sym);
-enum loglevel {
- LOG_WARN,
- LOG_ERROR,
-};
-
void __attribute__((format(printf, 2, 3)))
-modpost_log(enum loglevel loglevel, const char *fmt, ...);
+modpost_log(bool is_error, const char *fmt, ...);
/*
* warn - show the given message, then let modpost continue running, still
@@ -212,6 +198,6 @@ modpost_log(enum loglevel loglevel, const char *fmt, ...);
* fatal - show the given message, and bail out immediately. This should be
* used when there is no point to continue running modpost.
*/
-#define warn(fmt, args...) modpost_log(LOG_WARN, fmt, ##args)
-#define error(fmt, args...) modpost_log(LOG_ERROR, fmt, ##args)
+#define warn(fmt, args...) modpost_log(false, fmt, ##args)
+#define error(fmt, args...) modpost_log(true, fmt, ##args)
#define fatal(fmt, args...) do { error(fmt, ##args); exit(1); } while (1)
diff --git a/scripts/mod/sumversion.c b/scripts/mod/sumversion.c
index dc4878502276..e7d2da45b0df 100644
--- a/scripts/mod/sumversion.c
+++ b/scripts/mod/sumversion.c
@@ -8,6 +8,8 @@
#include <errno.h>
#include <string.h>
#include <limits.h>
+
+#include <xalloc.h>
#include "modpost.h"
/*
@@ -305,7 +307,7 @@ static int parse_source_files(const char *objfile, struct md4_ctx *md)
const char *base;
int dirlen, ret = 0, check_files = 0;
- cmd = NOFAIL(malloc(strlen(objfile) + sizeof("..cmd")));
+ cmd = xmalloc(strlen(objfile) + sizeof("..cmd"));
base = strrchr(objfile, '/');
if (base) {
@@ -316,7 +318,7 @@ static int parse_source_files(const char *objfile, struct md4_ctx *md)
dirlen = 0;
sprintf(cmd, ".%s.cmd", objfile);
}
- dir = NOFAIL(malloc(dirlen + 1));
+ dir = xmalloc(dirlen + 1);
strncpy(dir, objfile, dirlen);
dir[dirlen] = '\0';
diff --git a/scripts/mod/symsearch.c b/scripts/mod/symsearch.c
index aa4ed51f9960..b9737b92f7f8 100644
--- a/scripts/mod/symsearch.c
+++ b/scripts/mod/symsearch.c
@@ -4,7 +4,7 @@
* Helper functions for finding the symbol in an ELF which is "nearest"
* to a given address.
*/
-
+#include <xalloc.h>
#include "modpost.h"
struct syminfo {
@@ -125,8 +125,8 @@ void symsearch_init(struct elf_info *elf)
{
unsigned int table_size = symbol_count(elf);
- elf->symsearch = NOFAIL(malloc(sizeof(struct symsearch) +
- sizeof(struct syminfo) * table_size));
+ elf->symsearch = xmalloc(sizeof(struct symsearch) +
+ sizeof(struct syminfo) * table_size);
elf->symsearch->table_size = table_size;
symsearch_populate(elf, elf->symsearch->table, table_size);
diff --git a/scripts/module-common.c b/scripts/module-common.c
new file mode 100644
index 000000000000..12fbc6d3aae8
--- /dev/null
+++ b/scripts/module-common.c
@@ -0,0 +1,25 @@
+// SPDX-License-Identifier: GPL-2.0
+
+#include <linux/module.h>
+/*
+ * Include build-salt.h after module.h in order to
+ * inherit the definitions.
+ */
+#define INCLUDE_VERMAGIC
+#include <linux/build-salt.h>
+#include <linux/elfnote-lto.h>
+#include <linux/vermagic.h>
+
+#ifdef CONFIG_UNWINDER_ORC
+#include <asm/orc_header.h>
+ORC_HEADER;
+#endif
+
+BUILD_SALT;
+BUILD_LTO_INFO;
+
+MODULE_INFO(vermagic, VERMAGIC_STRING);
+
+#ifdef CONFIG_MITIGATION_RETPOLINE
+MODULE_INFO(retpoline, "Y");
+#endif
diff --git a/scripts/package/PKGBUILD b/scripts/package/PKGBUILD
index 663ce300dd06..f83493838cf9 100644
--- a/scripts/package/PKGBUILD
+++ b/scripts/package/PKGBUILD
@@ -3,10 +3,13 @@
# Contributor: Jan Alexander Steffens (heftig) <heftig@archlinux.org>
pkgbase=${PACMAN_PKGBASE:-linux-upstream}
-pkgname=("${pkgbase}" "${pkgbase}-api-headers")
-if grep -q CONFIG_MODULES=y include/config/auto.conf; then
- pkgname+=("${pkgbase}-headers")
-fi
+pkgname=("${pkgbase}")
+
+_extrapackages=${PACMAN_EXTRAPACKAGES-headers api-headers debug}
+for pkg in $_extrapackages; do
+ pkgname+=("${pkgbase}-${pkg}")
+done
+
pkgver="${KERNELRELEASE//-/_}"
# The PKGBUILD is evaluated multiple times.
# Running scripts/build-version from here would introduce inconsistencies.
@@ -33,11 +36,17 @@ makedepends=(
)
options=(!debug !strip !buildflags !makeflags)
-build() {
+_prologue() {
# MAKEFLAGS from makepkg.conf override the ones inherited from kbuild.
# Bypass this override with a custom variable.
export MAKEFLAGS="${KBUILD_MAKEFLAGS}"
- cd "${objtree}"
+
+ # Kbuild works in the output directory, where this PKGBUILD is located.
+ cd "$(dirname "${BASH_SOURCE[0]}")"
+}
+
+build() {
+ _prologue
${MAKE} KERNELRELEASE="${KERNELRELEASE}" KBUILD_BUILD_VERSION="${pkgrel}"
}
@@ -45,10 +54,10 @@ build() {
_package() {
pkgdesc="The ${pkgdesc} kernel and modules"
- export MAKEFLAGS="${KBUILD_MAKEFLAGS}"
- cd "${objtree}"
local modulesdir="${pkgdir}/usr/${MODLIB}"
+ _prologue
+
echo "Installing boot image..."
# systemd expects to find the kernel here to allow hibernation
# https://github.com/systemd/systemd/commit/edda44605f06a41fb86b7ab8128dcf99161d2344
@@ -73,14 +82,17 @@ _package() {
_package-headers() {
pkgdesc="Headers and scripts for building modules for the ${pkgdesc} kernel"
- export MAKEFLAGS="${KBUILD_MAKEFLAGS}"
- cd "${objtree}"
local builddir="${pkgdir}/usr/${MODLIB}/build"
- echo "Installing build files..."
- "${srctree}/scripts/package/install-extmod-build" "${builddir}"
+ _prologue
+
+ if grep -q CONFIG_MODULES=y include/config/auto.conf; then
+ echo "Installing build files..."
+ "${srctree}/scripts/package/install-extmod-build" "${builddir}"
+ fi
echo "Installing System.map and config..."
+ mkdir -p "${builddir}"
cp System.map "${builddir}/System.map"
cp .config "${builddir}/.config"
@@ -94,12 +106,24 @@ _package-api-headers() {
provides=(linux-api-headers)
conflicts=(linux-api-headers)
- export MAKEFLAGS="${KBUILD_MAKEFLAGS}"
- cd "${objtree}"
+ _prologue
${MAKE} headers_install INSTALL_HDR_PATH="${pkgdir}/usr"
}
+_package-debug(){
+ pkgdesc="Non-stripped vmlinux file for the ${pkgdesc} kernel"
+
+ local debugdir="${pkgdir}/usr/src/debug/${pkgbase}"
+ local builddir="${pkgdir}/usr/${MODLIB}/build"
+
+ _prologue
+
+ install -Dt "${debugdir}" -m644 vmlinux
+ mkdir -p "${builddir}"
+ ln -sr "${debugdir}/vmlinux" "${builddir}/vmlinux"
+}
+
for _p in "${pkgname[@]}"; do
eval "package_$_p() {
$(declare -f "_package${_p#$pkgbase}")
diff --git a/scripts/package/install-extmod-build b/scripts/package/install-extmod-build
index 8cc9e13403ae..d2c9cacecc0c 100755
--- a/scripts/package/install-extmod-build
+++ b/scripts/package/install-extmod-build
@@ -9,15 +9,22 @@ is_enabled() {
grep -q "^$1=y" include/config/auto.conf
}
+find_in_scripts() {
+ find scripts \
+ \( -name atomic -o -name dtc -o -name kconfig -o -name package \) -prune -o \
+ ! -name unifdef -a ! -name mk_elfconfig -a \( -type f -o -type l \) -print
+}
+
mkdir -p "${destdir}"
(
cd "${srctree}"
echo Makefile
find "arch/${SRCARCH}" -maxdepth 1 -name 'Makefile*'
- find include scripts -type f -o -type l
+ find "arch/${SRCARCH}" -name generated -prune -o -name include -type d -print
find "arch/${SRCARCH}" -name Kbuild.platforms -o -name Platform
- find "arch/${SRCARCH}" -name include -type d
+ find include \( -name config -o -name generated \) -prune -o \( -type f -o -type l \) -print
+ find_in_scripts
) | tar -c -f - -C "${srctree}" -T - | tar -xf - -C "${destdir}"
{
@@ -25,12 +32,50 @@ mkdir -p "${destdir}"
echo tools/objtool/objtool
fi
- find "arch/${SRCARCH}/include" Module.symvers include scripts -type f
+ echo Module.symvers
+ echo "arch/${SRCARCH}/include/generated"
+ echo include/config/auto.conf
+ echo include/config/kernel.release
+ echo include/generated
+ find_in_scripts
if is_enabled CONFIG_GCC_PLUGINS; then
find scripts/gcc-plugins -name '*.so'
fi
} | tar -c -f - -T - | tar -xf - -C "${destdir}"
-# copy .config manually to be where it's expected to be
-cp "${KCONFIG_CONFIG}" "${destdir}/.config"
+# When ${CC} and ${HOSTCC} differ, we are likely cross-compiling. Rebuild host
+# programs using ${CC}. This assumes CC=${CROSS_COMPILE}gcc, which is usually
+# the case for package building. It does not cross-compile when CC=clang.
+#
+# This caters to host programs that participate in Kbuild. objtool and
+# resolve_btfids are out of scope.
+if [ "${CC}" != "${HOSTCC}" ] && is_enabled CONFIG_CC_CAN_LINK; then
+ echo "Rebuilding host programs with ${CC}..."
+
+ cat <<-'EOF' > "${destdir}/Kbuild"
+ subdir-y := scripts
+ EOF
+
+ # HOSTCXX is not overridden. The C++ compiler is used to build:
+ # - scripts/kconfig/qconf, which is unneeded for external module builds
+ # - GCC plugins, which will not work on the installed system even after
+ # being rebuilt.
+ #
+ # Use the single-target build to avoid the modpost invocation, which
+ # would overwrite Module.symvers.
+ "${MAKE}" HOSTCC="${CC}" KBUILD_EXTMOD="${destdir}" scripts/
+
+ cat <<-'EOF' > "${destdir}/scripts/Kbuild"
+ subdir-y := basic
+ hostprogs-always-y := mod/modpost
+ mod/modpost-objs := $(addprefix mod/, modpost.o file2alias.o sumversion.o symsearch.o)
+ EOF
+
+ # Run once again to rebuild scripts/basic/ and scripts/mod/modpost.
+ "${MAKE}" HOSTCC="${CC}" KBUILD_EXTMOD="${destdir}" scripts/
+
+ rm -f "${destdir}/Kbuild" "${destdir}/scripts/Kbuild"
+fi
+
+find "${destdir}" \( -name '.*.cmd' -o -name '*.o' \) -delete
diff --git a/scripts/rustc-version.sh b/scripts/rustc-version.sh
new file mode 100755
index 000000000000..4e22593e2eab
--- /dev/null
+++ b/scripts/rustc-version.sh
@@ -0,0 +1,26 @@
+#!/bin/sh
+# SPDX-License-Identifier: GPL-2.0
+#
+# Usage: $ ./rustc-version.sh rustc
+#
+# Print the Rust compiler version in a 6 or 7-digit form.
+
+# Convert the version string x.y.z to a canonical up-to-7-digits form.
+#
+# Note that this function uses one more digit (compared to other
+# instances in other version scripts) to give a bit more space to
+# `rustc` since it will reach 1.100.0 in late 2026.
+get_canonical_version()
+{
+ IFS=.
+ set -- $1
+ echo $((100000 * $1 + 100 * $2 + $3))
+}
+
+if output=$("$@" --version 2>/dev/null); then
+ set -- $output
+ get_canonical_version $2
+else
+ echo 0
+ exit 1
+fi
diff --git a/scripts/sign-file.c b/scripts/sign-file.c
index 3edb156ae52c..7070245edfc1 100644
--- a/scripts/sign-file.c
+++ b/scripts/sign-file.c
@@ -27,14 +27,17 @@
#include <openssl/evp.h>
#include <openssl/pem.h>
#include <openssl/err.h>
-#include <openssl/engine.h>
-
-/*
- * OpenSSL 3.0 deprecates the OpenSSL's ENGINE API.
- *
- * Remove this if/when that API is no longer used
- */
-#pragma GCC diagnostic ignored "-Wdeprecated-declarations"
+#if OPENSSL_VERSION_MAJOR >= 3
+# define USE_PKCS11_PROVIDER
+# include <openssl/provider.h>
+# include <openssl/store.h>
+#else
+# if !defined(OPENSSL_NO_ENGINE) && !defined(OPENSSL_NO_DEPRECATED_3_0)
+# define USE_PKCS11_ENGINE
+# include <openssl/engine.h>
+# endif
+#endif
+#include "ssl-common.h"
/*
* Use CMS if we have openssl-1.0.0 or newer available - otherwise we have to
@@ -83,41 +86,6 @@ void format(void)
exit(2);
}
-static void display_openssl_errors(int l)
-{
- const char *file;
- char buf[120];
- int e, line;
-
- if (ERR_peek_error() == 0)
- return;
- fprintf(stderr, "At main.c:%d:\n", l);
-
- while ((e = ERR_get_error_line(&file, &line))) {
- ERR_error_string(e, buf);
- fprintf(stderr, "- SSL %s: %s:%d\n", buf, file, line);
- }
-}
-
-static void drain_openssl_errors(void)
-{
- const char *file;
- int line;
-
- if (ERR_peek_error() == 0)
- return;
- while (ERR_get_error_line(&file, &line)) {}
-}
-
-#define ERR(cond, fmt, ...) \
- do { \
- bool __cond = (cond); \
- display_openssl_errors(__LINE__); \
- if (__cond) { \
- errx(1, fmt, ## __VA_ARGS__); \
- } \
- } while(0)
-
static const char *key_pass;
static int pem_pw_cb(char *buf, int len, int w, void *v)
@@ -139,28 +107,64 @@ static int pem_pw_cb(char *buf, int len, int w, void *v)
return pwlen;
}
-static EVP_PKEY *read_private_key(const char *private_key_name)
+static EVP_PKEY *read_private_key_pkcs11(const char *private_key_name)
{
- EVP_PKEY *private_key;
+ EVP_PKEY *private_key = NULL;
+#ifdef USE_PKCS11_PROVIDER
+ OSSL_STORE_CTX *store;
+ if (!OSSL_PROVIDER_try_load(NULL, "pkcs11", true))
+ ERR(1, "OSSL_PROVIDER_try_load(pkcs11)");
+ if (!OSSL_PROVIDER_try_load(NULL, "default", true))
+ ERR(1, "OSSL_PROVIDER_try_load(default)");
+
+ store = OSSL_STORE_open(private_key_name, NULL, NULL, NULL, NULL);
+ ERR(!store, "OSSL_STORE_open");
+
+ while (!OSSL_STORE_eof(store)) {
+ OSSL_STORE_INFO *info = OSSL_STORE_load(store);
+
+ if (!info) {
+ drain_openssl_errors(__LINE__, 0);
+ continue;
+ }
+ if (OSSL_STORE_INFO_get_type(info) == OSSL_STORE_INFO_PKEY) {
+ private_key = OSSL_STORE_INFO_get1_PKEY(info);
+ ERR(!private_key, "OSSL_STORE_INFO_get1_PKEY");
+ }
+ OSSL_STORE_INFO_free(info);
+ if (private_key)
+ break;
+ }
+ OSSL_STORE_close(store);
+#elif defined(USE_PKCS11_ENGINE)
+ ENGINE *e;
+
+ ENGINE_load_builtin_engines();
+ drain_openssl_errors(__LINE__, 1);
+ e = ENGINE_by_id("pkcs11");
+ ERR(!e, "Load PKCS#11 ENGINE");
+ if (ENGINE_init(e))
+ drain_openssl_errors(__LINE__, 1);
+ else
+ ERR(1, "ENGINE_init");
+ if (key_pass)
+ ERR(!ENGINE_ctrl_cmd_string(e, "PIN", key_pass, 0), "Set PKCS#11 PIN");
+ private_key = ENGINE_load_private_key(e, private_key_name, NULL, NULL);
+ ERR(!private_key, "%s", private_key_name);
+#else
+ fprintf(stderr, "no pkcs11 engine/provider available\n");
+ exit(1);
+#endif
+ return private_key;
+}
+
+static EVP_PKEY *read_private_key(const char *private_key_name)
+{
if (!strncmp(private_key_name, "pkcs11:", 7)) {
- ENGINE *e;
-
- ENGINE_load_builtin_engines();
- drain_openssl_errors();
- e = ENGINE_by_id("pkcs11");
- ERR(!e, "Load PKCS#11 ENGINE");
- if (ENGINE_init(e))
- drain_openssl_errors();
- else
- ERR(1, "ENGINE_init");
- if (key_pass)
- ERR(!ENGINE_ctrl_cmd_string(e, "PIN", key_pass, 0),
- "Set PKCS#11 PIN");
- private_key = ENGINE_load_private_key(e, private_key_name,
- NULL, NULL);
- ERR(!private_key, "%s", private_key_name);
+ return read_private_key_pkcs11(private_key_name);
} else {
+ EVP_PKEY *private_key;
BIO *b;
b = BIO_new_file(private_key_name, "rb");
@@ -169,9 +173,9 @@ static EVP_PKEY *read_private_key(const char *private_key_name)
NULL);
ERR(!private_key, "%s", private_key_name);
BIO_free(b);
- }
- return private_key;
+ return private_key;
+ }
}
static X509 *read_x509(const char *x509_name)
@@ -306,7 +310,7 @@ int main(int argc, char **argv)
/* Digest the module data. */
OpenSSL_add_all_digests();
- display_openssl_errors(__LINE__);
+ drain_openssl_errors(__LINE__, 0);
digest_algo = EVP_get_digestbyname(hash_algo);
ERR(!digest_algo, "EVP_get_digestbyname");
diff --git a/scripts/ssl-common.h b/scripts/ssl-common.h
new file mode 100644
index 000000000000..2db0e181143c
--- /dev/null
+++ b/scripts/ssl-common.h
@@ -0,0 +1,32 @@
+/* SPDX-License-Identifier: LGPL-2.1+ */
+/*
+ * SSL helper functions shared by sign-file and extract-cert.
+ */
+
+static void drain_openssl_errors(int l, int silent)
+{
+ const char *file;
+ char buf[120];
+ int e, line;
+
+ if (ERR_peek_error() == 0)
+ return;
+ if (!silent)
+ fprintf(stderr, "At main.c:%d:\n", l);
+
+ while ((e = ERR_peek_error_line(&file, &line))) {
+ ERR_error_string(e, buf);
+ if (!silent)
+ fprintf(stderr, "- SSL %s: %s:%d\n", buf, file, line);
+ ERR_get_error();
+ }
+}
+
+#define ERR(cond, fmt, ...) \
+ do { \
+ bool __cond = (cond); \
+ drain_openssl_errors(__LINE__, 0); \
+ if (__cond) { \
+ errx(1, fmt, ## __VA_ARGS__); \
+ } \
+ } while (0)
diff --git a/scripts/subarch.include b/scripts/subarch.include
index 4bd327d0ae42..c4592d59d69b 100644
--- a/scripts/subarch.include
+++ b/scripts/subarch.include
@@ -6,7 +6,7 @@
SUBARCH := $(shell uname -m | sed -e s/i.86/x86/ -e s/x86_64/x86/ \
-e s/sun4u/sparc64/ \
- -e s/arm.*/arm/ -e s/sa110/arm/ \
+ -e /^arm64$$/!s/arm.*/arm/ -e s/sa110/arm/ \
-e s/s390x/s390/ \
-e s/ppc.*/powerpc/ -e s/mips.*/mips/ \
-e s/sh[234].*/sh/ -e s/aarch64.*/arm64/ \
diff --git a/scripts/verify_builtin_ranges.awk b/scripts/verify_builtin_ranges.awk
new file mode 100755
index 000000000000..0de7ed521601
--- /dev/null
+++ b/scripts/verify_builtin_ranges.awk
@@ -0,0 +1,370 @@
+#!/usr/bin/gawk -f
+# SPDX-License-Identifier: GPL-2.0
+# verify_builtin_ranges.awk: Verify address range data for builtin modules
+# Written by Kris Van Hees <kris.van.hees@oracle.com>
+#
+# Usage: verify_builtin_ranges.awk modules.builtin.ranges System.map \
+# modules.builtin vmlinux.map vmlinux.o.map
+#
+
+# Return the module name(s) (if any) associated with the given object.
+#
+# If we have seen this object before, return information from the cache.
+# Otherwise, retrieve it from the corresponding .cmd file.
+#
+function get_module_info(fn, mod, obj, s) {
+ if (fn in omod)
+ return omod[fn];
+
+ if (match(fn, /\/[^/]+$/) == 0)
+ return "";
+
+ obj = fn;
+ mod = "";
+ fn = substr(fn, 1, RSTART) "." substr(fn, RSTART + 1) ".cmd";
+ if (getline s <fn == 1) {
+ if (match(s, /DKBUILD_MODFILE=['"]+[^'"]+/) > 0) {
+ mod = substr(s, RSTART + 16, RLENGTH - 16);
+ gsub(/['"]/, "", mod);
+ } else if (match(s, /RUST_MODFILE=[^ ]+/) > 0)
+ mod = substr(s, RSTART + 13, RLENGTH - 13);
+ } else {
+ print "ERROR: Failed to read: " fn "\n\n" \
+ " For kernels built with O=<objdir>, cd to <objdir>\n" \
+ " and execute this script as ./source/scripts/..." \
+ >"/dev/stderr";
+ close(fn);
+ total = 0;
+ exit(1);
+ }
+ close(fn);
+
+ # A single module (common case) also reflects objects that are not part
+ # of a module. Some of those objects have names that are also a module
+ # name (e.g. core). We check the associated module file name, and if
+ # they do not match, the object is not part of a module.
+ if (mod !~ / /) {
+ if (!(mod in mods))
+ mod = "";
+ }
+
+ gsub(/([^/ ]*\/)+/, "", mod);
+ gsub(/-/, "_", mod);
+
+ # At this point, mod is a single (valid) module name, or a list of
+ # module names (that do not need validation).
+ omod[obj] = mod;
+
+ return mod;
+}
+
+# Return a representative integer value for a given hexadecimal address.
+#
+# Since all kernel addresses fall within the same memory region, we can safely
+# strip off the first 6 hex digits before performing the hex-to-dec conversion,
+# thereby avoiding integer overflows.
+#
+function addr2val(val) {
+ sub(/^0x/, "", val);
+ if (length(val) == 16)
+ val = substr(val, 5);
+ return strtonum("0x" val);
+}
+
+# Determine the kernel build directory to use (default is .).
+#
+BEGIN {
+ if (ARGC < 6) {
+ print "Syntax: verify_builtin_ranges.awk <ranges-file> <system-map>\n" \
+ " <builtin-file> <vmlinux-map> <vmlinux-o-map>\n" \
+ >"/dev/stderr";
+ total = 0;
+ exit(1);
+ }
+}
+
+# (1) Load the built-in module address range data.
+#
+ARGIND == 1 {
+ ranges[FNR] = $0;
+ rcnt++;
+ next;
+}
+
+# (2) Annotate System.map symbols with module names.
+#
+ARGIND == 2 {
+ addr = addr2val($1);
+ name = $3;
+
+ while (addr >= mod_eaddr) {
+ if (sect_symb) {
+ if (sect_symb != name)
+ next;
+
+ sect_base = addr - sect_off;
+ if (dbg)
+ printf "[%s] BASE (%s) %016x - %016x = %016x\n", sect_name, sect_symb, addr, sect_off, sect_base >"/dev/stderr";
+ sect_symb = 0;
+ }
+
+ if (++ridx > rcnt)
+ break;
+
+ $0 = ranges[ridx];
+ sub(/-/, " ");
+ if ($4 != "=") {
+ sub(/-/, " ");
+ mod_saddr = strtonum("0x" $2) + sect_base;
+ mod_eaddr = strtonum("0x" $3) + sect_base;
+ $1 = $2 = $3 = "";
+ sub(/^ +/, "");
+ mod_name = $0;
+
+ if (dbg)
+ printf "[%s] %s from %016x to %016x\n", sect_name, mod_name, mod_saddr, mod_eaddr >"/dev/stderr";
+ } else {
+ sect_name = $1;
+ sect_off = strtonum("0x" $2);
+ sect_symb = $5;
+ }
+ }
+
+ idx = addr"-"name;
+ if (addr >= mod_saddr && addr < mod_eaddr)
+ sym2mod[idx] = mod_name;
+
+ next;
+}
+
+# Once we are done annotating the System.map, we no longer need the ranges data.
+#
+FNR == 1 && ARGIND == 3 {
+ delete ranges;
+}
+
+# (3) Build a lookup map of built-in module names.
+#
+# Lines from modules.builtin will be like:
+# kernel/crypto/lzo-rle.ko
+# and we record the object name "crypto/lzo-rle".
+#
+ARGIND == 3 {
+ sub(/kernel\//, ""); # strip off "kernel/" prefix
+ sub(/\.ko$/, ""); # strip off .ko suffix
+
+ mods[$1] = 1;
+ next;
+}
+
+# (4) Get a list of symbols (per object).
+#
+# Symbols by object are read from vmlinux.map, with fallback to vmlinux.o.map
+# if vmlinux is found to have inked in vmlinux.o.
+#
+
+# If we were able to get the data we need from vmlinux.map, there is no need to
+# process vmlinux.o.map.
+#
+FNR == 1 && ARGIND == 5 && total > 0 {
+ if (dbg)
+ printf "Note: %s is not needed.\n", FILENAME >"/dev/stderr";
+ exit;
+}
+
+# First determine whether we are dealing with a GNU ld or LLVM lld linker map.
+#
+ARGIND >= 4 && FNR == 1 && NF == 7 && $1 == "VMA" && $7 == "Symbol" {
+ map_is_lld = 1;
+ next;
+}
+
+# (LLD) Convert a section record fronm lld format to ld format.
+#
+ARGIND >= 4 && map_is_lld && NF == 5 && /[0-9] [^ ]+$/ {
+ $0 = $5 " 0x"$1 " 0x"$3 " load address 0x"$2;
+}
+
+# (LLD) Convert an object record from lld format to ld format.
+#
+ARGIND >= 4 && map_is_lld && NF == 5 && $5 ~ /:\(/ {
+ if (/\.a\(/ && !/ vmlinux\.a\(/)
+ next;
+
+ gsub(/\)/, "");
+ sub(/:\(/, " ");
+ sub(/ vmlinux\.a\(/, " ");
+ $0 = " "$6 " 0x"$1 " 0x"$3 " " $5;
+}
+
+# (LLD) Convert a symbol record from lld format to ld format.
+#
+ARGIND >= 4 && map_is_lld && NF == 5 && $5 ~ /^[A-Za-z_][A-Za-z0-9_]*$/ {
+ $0 = " 0x" $1 " " $5;
+}
+
+# (LLD) We do not need any other ldd linker map records.
+#
+ARGIND >= 4 && map_is_lld && /^[0-9a-f]{16} / {
+ next;
+}
+
+# Handle section records with long section names (spilling onto a 2nd line).
+#
+ARGIND >= 4 && !map_is_lld && NF == 1 && /^[^ ]/ {
+ s = $0;
+ getline;
+ $0 = s " " $0;
+}
+
+# Next section - previous one is done.
+#
+ARGIND >= 4 && /^[^ ]/ {
+ sect = 0;
+}
+
+# Get the (top level) section name.
+#
+ARGIND >= 4 && /^\./ {
+ # Explicitly ignore a few sections that are not relevant here.
+ if ($1 ~ /^\.orc_/ || $1 ~ /_sites$/ || $1 ~ /\.percpu/)
+ next;
+
+ # Sections with a 0-address can be ignored as well (in vmlinux.map).
+ if (ARGIND == 4 && $2 ~ /^0x0+$/)
+ next;
+
+ sect = $1;
+
+ next;
+}
+
+# If we are not currently in a section we care about, ignore records.
+#
+!sect {
+ next;
+}
+
+# Handle object records with long section names (spilling onto a 2nd line).
+#
+ARGIND >= 4 && /^ [^ \*]/ && NF == 1 {
+ # If the section name is long, the remainder of the entry is found on
+ # the next line.
+ s = $0;
+ getline;
+ $0 = s " " $0;
+}
+
+# Objects linked in from static libraries are ignored.
+# If the object is vmlinux.o, we need to consult vmlinux.o.map for per-object
+# symbol information
+#
+ARGIND == 4 && /^ [^ ]/ && NF == 4 {
+ if ($4 ~ /\.a\(/)
+ next;
+
+ idx = sect":"$1;
+ if (!(idx in sect_addend)) {
+ sect_addend[idx] = addr2val($2);
+ if (dbg)
+ printf "ADDEND %s = %016x\n", idx, sect_addend[idx] >"/dev/stderr";
+ }
+ if ($4 == "vmlinux.o") {
+ need_o_map = 1;
+ next;
+ }
+}
+
+# If data from vmlinux.o.map is needed, we only process section and object
+# records from vmlinux.map to determine which section we need to pay attention
+# to in vmlinux.o.map. So skip everything else from vmlinux.map.
+#
+ARGIND == 4 && need_o_map {
+ next;
+}
+
+# Get module information for the current object.
+#
+ARGIND >= 4 && /^ [^ ]/ && NF == 4 {
+ msect = $1;
+ mod_name = get_module_info($4);
+ mod_eaddr = addr2val($2) + addr2val($3);
+
+ next;
+}
+
+# Process a symbol record.
+#
+# Evaluate the module information obtained from vmlinux.map (or vmlinux.o.map)
+# as follows:
+# - For all symbols in a given object:
+# - If the symbol is annotated with the same module name(s) that the object
+# belongs to, count it as a match.
+# - Otherwise:
+# - If the symbol is known to have duplicates of which at least one is
+# in a built-in module, disregard it.
+# - If the symbol us not annotated with any module name(s) AND the
+# object belongs to built-in modules, count it as missing.
+# - Otherwise, count it as a mismatch.
+#
+ARGIND >= 4 && /^ / && NF == 2 && $1 ~ /^0x/ {
+ idx = sect":"msect;
+ if (!(idx in sect_addend))
+ next;
+
+ addr = addr2val($1);
+
+ # Handle the rare but annoying case where a 0-size symbol is placed at
+ # the byte *after* the module range. Based on vmlinux.map it will be
+ # considered part of the current object, but it falls just beyond the
+ # module address range. Unfortunately, its address could be at the
+ # start of another built-in module, so the only safe thing to do is to
+ # ignore it.
+ if (mod_name && addr == mod_eaddr)
+ next;
+
+ # If we are processing vmlinux.o.map, we need to apply the base address
+ # of the section to the relative address on the record.
+ #
+ if (ARGIND == 5)
+ addr += sect_addend[idx];
+
+ idx = addr"-"$2;
+ mod = "";
+ if (idx in sym2mod) {
+ mod = sym2mod[idx];
+ if (sym2mod[idx] == mod_name) {
+ mod_matches++;
+ matches++;
+ } else if (mod_name == "") {
+ print $2 " in " mod " (should NOT be)";
+ mismatches++;
+ } else {
+ print $2 " in " mod " (should be " mod_name ")";
+ mismatches++;
+ }
+ } else if (mod_name != "") {
+ print $2 " should be in " mod_name;
+ missing++;
+ } else
+ matches++;
+
+ total++;
+
+ next;
+}
+
+# Issue the comparison report.
+#
+END {
+ if (total) {
+ printf "Verification of %s:\n", ARGV[1];
+ printf " Correct matches: %6d (%d%% of total)\n", matches, 100 * matches / total;
+ printf " Module matches: %6d (%d%% of matches)\n", mod_matches, 100 * mod_matches / matches;
+ printf " Mismatches: %6d (%d%% of total)\n", mismatches, 100 * mismatches / total;
+ printf " Missing: %6d (%d%% of total)\n", missing, 100 * missing / total;
+
+ if (mismatches || missing)
+ exit(1);
+ }
+}
diff --git a/scripts/xz_wrap.sh b/scripts/xz_wrap.sh
index d06baf626abe..f19369687030 100755
--- a/scripts/xz_wrap.sh
+++ b/scripts/xz_wrap.sh
@@ -1,22 +1,162 @@
#!/bin/sh
+# SPDX-License-Identifier: 0BSD
#
# This is a wrapper for xz to compress the kernel image using appropriate
# compression options depending on the architecture.
#
# Author: Lasse Collin <lasse.collin@tukaani.org>
+
+# This has specialized settings for the following archs. However,
+# XZ-compressed kernel isn't currently supported on every listed arch.
#
-# This file has been put into the public domain.
-# You can do whatever you want with this file.
-#
+# Arch Align Notes
+# arm 2/4 ARM and ARM-Thumb2
+# arm64 4
+# csky 2
+# loongarch 4
+# mips 2/4 MicroMIPS is 2-byte aligned
+# parisc 4
+# powerpc 4 Uses its own wrapper for compressors instead of this.
+# riscv 2/4
+# s390 2
+# sh 2
+# sparc 4
+# x86 1
+
+# A few archs use 2-byte or 4-byte aligned instructions depending on
+# the kernel config. This function is used to check if the relevant
+# config option is set to "y".
+is_enabled()
+{
+ grep -q "^$1=y$" include/config/auto.conf
+}
+
+# XZ_VERSION is needed to disable features that aren't available in
+# old XZ Utils versions.
+XZ_VERSION=$($XZ --robot --version) || exit
+XZ_VERSION=$(printf '%s\n' "$XZ_VERSION" | sed -n 's/^XZ_VERSION=//p')
+# Assume that no BCJ filter is available.
BCJ=
-LZMA2OPTS=
+# Set the instruction alignment to 1, 2, or 4 bytes.
+#
+# Set the BCJ filter if one is available.
+# It must match the #ifdef usage in lib/decompress_unxz.c.
case $SRCARCH in
- x86) BCJ=--x86 ;;
- powerpc) BCJ=--powerpc ;;
- arm) BCJ=--arm ;;
- sparc) BCJ=--sparc ;;
+ arm)
+ if is_enabled CONFIG_THUMB2_KERNEL; then
+ ALIGN=2
+ BCJ=--armthumb
+ else
+ ALIGN=4
+ BCJ=--arm
+ fi
+ ;;
+
+ arm64)
+ ALIGN=4
+
+ # ARM64 filter was added in XZ Utils 5.4.0.
+ if [ "$XZ_VERSION" -ge 50040002 ]; then
+ BCJ=--arm64
+ else
+ echo "$0: Upgrading to xz >= 5.4.0" \
+ "would enable the ARM64 filter" \
+ "for better compression" >&2
+ fi
+ ;;
+
+ csky)
+ ALIGN=2
+ ;;
+
+ loongarch)
+ ALIGN=4
+ ;;
+
+ mips)
+ if is_enabled CONFIG_CPU_MICROMIPS; then
+ ALIGN=2
+ else
+ ALIGN=4
+ fi
+ ;;
+
+ parisc)
+ ALIGN=4
+ ;;
+
+ powerpc)
+ ALIGN=4
+
+ # The filter is only for big endian instruction encoding.
+ if is_enabled CONFIG_CPU_BIG_ENDIAN; then
+ BCJ=--powerpc
+ fi
+ ;;
+
+ riscv)
+ if is_enabled CONFIG_RISCV_ISA_C; then
+ ALIGN=2
+ else
+ ALIGN=4
+ fi
+
+ # RISC-V filter was added in XZ Utils 5.6.0.
+ if [ "$XZ_VERSION" -ge 50060002 ]; then
+ BCJ=--riscv
+ else
+ echo "$0: Upgrading to xz >= 5.6.0" \
+ "would enable the RISC-V filter" \
+ "for better compression" >&2
+ fi
+ ;;
+
+ s390)
+ ALIGN=2
+ ;;
+
+ sh)
+ ALIGN=2
+ ;;
+
+ sparc)
+ ALIGN=4
+ BCJ=--sparc
+ ;;
+
+ x86)
+ ALIGN=1
+ BCJ=--x86
+ ;;
+
+ *)
+ echo "$0: Arch-specific tuning is missing for '$SRCARCH'" >&2
+
+ # Guess 2-byte-aligned instructions. Guessing too low
+ # should hurt less than guessing too high.
+ ALIGN=2
+ ;;
+esac
+
+# Select the LZMA2 options matching the instruction alignment.
+case $ALIGN in
+ 1) LZMA2OPTS= ;;
+ 2) LZMA2OPTS=lp=1 ;;
+ 4) LZMA2OPTS=lp=2,lc=2 ;;
+ *) echo "$0: ALIGN wrong or missing" >&2; exit 1 ;;
esac
-exec $XZ --check=crc32 $BCJ --lzma2=$LZMA2OPTS,dict=32MiB
+# Use single-threaded mode because it compresses a little better
+# (and uses less RAM) than multithreaded mode.
+#
+# For the best compression, the dictionary size shouldn't be
+# smaller than the uncompressed kernel. 128 MiB dictionary
+# needs less than 1400 MiB of RAM in single-threaded mode.
+#
+# On the archs that use this script to compress the kernel,
+# decompression in the preboot code is done in single-call mode.
+# Thus the dictionary size doesn't affect the memory requirements
+# of the preboot decompressor at all.
+exec $XZ --check=crc32 --threads=1 $BCJ --lzma2=$LZMA2OPTS,dict=128MiB
diff --git a/security/bpf/hooks.c b/security/bpf/hooks.c
index 57b9ffd53c98..3663aec7bcbd 100644
--- a/security/bpf/hooks.c
+++ b/security/bpf/hooks.c
@@ -31,7 +31,6 @@ static int __init bpf_lsm_init(void)
struct lsm_blob_sizes bpf_lsm_blob_sizes __ro_after_init = {
.lbs_inode = sizeof(struct bpf_storage_blob),
- .lbs_task = sizeof(struct bpf_storage_blob),
};
DEFINE_LSM(bpf) = {
diff --git a/security/integrity/ima/ima_main.c b/security/integrity/ima/ima_main.c
index 5b3394864b21..06132cf47016 100644
--- a/security/integrity/ima/ima_main.c
+++ b/security/integrity/ima/ima_main.c
@@ -1068,10 +1068,10 @@ void ima_kexec_cmdline(int kernel_fd, const void *buf, int size)
return;
f = fdget(kernel_fd);
- if (!f.file)
+ if (!fd_file(f))
return;
- process_buffer_measurement(file_mnt_idmap(f.file), file_inode(f.file),
+ process_buffer_measurement(file_mnt_idmap(fd_file(f)), file_inode(fd_file(f)),
buf, size, "kexec-cmdline", KEXEC_CMDLINE, 0,
NULL, false, NULL, 0);
fdput(f);
diff --git a/security/ipe/policy_tests.c b/security/ipe/policy_tests.c
index 89521f6b9994..5f1654deeb04 100644
--- a/security/ipe/policy_tests.c
+++ b/security/ipe/policy_tests.c
@@ -286,6 +286,7 @@ static void ipe_parser_widestring_test(struct kunit *test)
static struct kunit_case ipe_parser_test_cases[] = {
KUNIT_CASE_PARAM(ipe_parser_unsigned_test, ipe_policies_gen_params),
KUNIT_CASE(ipe_parser_widestring_test),
+ { }
};
static struct kunit_suite ipe_parser_test_suite = {
diff --git a/security/landlock/cred.h b/security/landlock/cred.h
index af89ab00e6d1..bf755459838a 100644
--- a/security/landlock/cred.h
+++ b/security/landlock/cred.h
@@ -26,7 +26,7 @@ landlock_cred(const struct cred *cred)
return cred->security + landlock_blob_sizes.lbs_cred;
}
-static inline const struct landlock_ruleset *landlock_get_current_domain(void)
+static inline struct landlock_ruleset *landlock_get_current_domain(void)
{
return landlock_cred(current_cred())->domain;
}
diff --git a/security/landlock/fs.c b/security/landlock/fs.c
index 0804f76a67be..7d79fc8abe21 100644
--- a/security/landlock/fs.c
+++ b/security/landlock/fs.c
@@ -1639,6 +1639,29 @@ static int hook_file_ioctl_compat(struct file *file, unsigned int cmd,
return -EACCES;
}
+static void hook_file_set_fowner(struct file *file)
+{
+ struct landlock_ruleset *new_dom, *prev_dom;
+
+ /*
+ * Lock already held by __f_setown(), see commit 26f204380a3c ("fs: Fix
+ * file_set_fowner LSM hook inconsistencies").
+ */
+ lockdep_assert_held(&file_f_owner(file)->lock);
+ new_dom = landlock_get_current_domain();
+ landlock_get_ruleset(new_dom);
+ prev_dom = landlock_file(file)->fown_domain;
+ landlock_file(file)->fown_domain = new_dom;
+
+ /* Called in an RCU read-side critical section. */
+ landlock_put_ruleset_deferred(prev_dom);
+}
+
+static void hook_file_free_security(struct file *file)
+{
+ landlock_put_ruleset_deferred(landlock_file(file)->fown_domain);
+}
+
static struct security_hook_list landlock_hooks[] __ro_after_init = {
LSM_HOOK_INIT(inode_free_security_rcu, hook_inode_free_security_rcu),
@@ -1663,6 +1686,8 @@ static struct security_hook_list landlock_hooks[] __ro_after_init = {
LSM_HOOK_INIT(file_truncate, hook_file_truncate),
LSM_HOOK_INIT(file_ioctl, hook_file_ioctl),
LSM_HOOK_INIT(file_ioctl_compat, hook_file_ioctl_compat),
+ LSM_HOOK_INIT(file_set_fowner, hook_file_set_fowner),
+ LSM_HOOK_INIT(file_free_security, hook_file_free_security),
};
__init void landlock_add_fs_hooks(void)
diff --git a/security/landlock/fs.h b/security/landlock/fs.h
index 488e4813680a..1487e1f023a1 100644
--- a/security/landlock/fs.h
+++ b/security/landlock/fs.h
@@ -52,6 +52,13 @@ struct landlock_file_security {
* needed to authorize later operations on the open file.
*/
access_mask_t allowed_access;
+ /**
+ * @fown_domain: Domain of the task that set the PID that may receive a
+ * signal e.g., SIGURG when writing MSG_OOB to the related socket.
+ * This pointer is protected by the related file->f_owner->lock, as for
+ * fown_struct's members: pid, uid, and euid.
+ */
+ struct landlock_ruleset *fown_domain;
};
/**
diff --git a/security/landlock/limits.h b/security/landlock/limits.h
index 4eb643077a2a..15f7606066c8 100644
--- a/security/landlock/limits.h
+++ b/security/landlock/limits.h
@@ -26,6 +26,9 @@
#define LANDLOCK_MASK_ACCESS_NET ((LANDLOCK_LAST_ACCESS_NET << 1) - 1)
#define LANDLOCK_NUM_ACCESS_NET __const_hweight64(LANDLOCK_MASK_ACCESS_NET)
+#define LANDLOCK_LAST_SCOPE LANDLOCK_SCOPE_SIGNAL
+#define LANDLOCK_MASK_SCOPE ((LANDLOCK_LAST_SCOPE << 1) - 1)
+#define LANDLOCK_NUM_SCOPE __const_hweight64(LANDLOCK_MASK_SCOPE)
/* clang-format on */
#endif /* _SECURITY_LANDLOCK_LIMITS_H */
diff --git a/security/landlock/ruleset.c b/security/landlock/ruleset.c
index 6ff232f58618..a93bdbf52fff 100644
--- a/security/landlock/ruleset.c
+++ b/security/landlock/ruleset.c
@@ -52,12 +52,13 @@ static struct landlock_ruleset *create_ruleset(const u32 num_layers)
struct landlock_ruleset *
landlock_create_ruleset(const access_mask_t fs_access_mask,
- const access_mask_t net_access_mask)
+ const access_mask_t net_access_mask,
+ const access_mask_t scope_mask)
{
struct landlock_ruleset *new_ruleset;
/* Informs about useless ruleset. */
- if (!fs_access_mask && !net_access_mask)
+ if (!fs_access_mask && !net_access_mask && !scope_mask)
return ERR_PTR(-ENOMSG);
new_ruleset = create_ruleset(1);
if (IS_ERR(new_ruleset))
@@ -66,6 +67,8 @@ landlock_create_ruleset(const access_mask_t fs_access_mask,
landlock_add_fs_access_mask(new_ruleset, fs_access_mask, 0);
if (net_access_mask)
landlock_add_net_access_mask(new_ruleset, net_access_mask, 0);
+ if (scope_mask)
+ landlock_add_scope_mask(new_ruleset, scope_mask, 0);
return new_ruleset;
}
diff --git a/security/landlock/ruleset.h b/security/landlock/ruleset.h
index 0f1b5b4c8f6b..61bdbc550172 100644
--- a/security/landlock/ruleset.h
+++ b/security/landlock/ruleset.h
@@ -35,6 +35,8 @@ typedef u16 access_mask_t;
static_assert(BITS_PER_TYPE(access_mask_t) >= LANDLOCK_NUM_ACCESS_FS);
/* Makes sure all network access rights can be stored. */
static_assert(BITS_PER_TYPE(access_mask_t) >= LANDLOCK_NUM_ACCESS_NET);
+/* Makes sure all scoped rights can be stored. */
+static_assert(BITS_PER_TYPE(access_mask_t) >= LANDLOCK_NUM_SCOPE);
/* Makes sure for_each_set_bit() and for_each_clear_bit() calls are OK. */
static_assert(sizeof(unsigned long) >= sizeof(access_mask_t));
@@ -42,6 +44,7 @@ static_assert(sizeof(unsigned long) >= sizeof(access_mask_t));
struct access_masks {
access_mask_t fs : LANDLOCK_NUM_ACCESS_FS;
access_mask_t net : LANDLOCK_NUM_ACCESS_NET;
+ access_mask_t scope : LANDLOCK_NUM_SCOPE;
};
typedef u16 layer_mask_t;
@@ -233,7 +236,8 @@ struct landlock_ruleset {
struct landlock_ruleset *
landlock_create_ruleset(const access_mask_t access_mask_fs,
- const access_mask_t access_mask_net);
+ const access_mask_t access_mask_net,
+ const access_mask_t scope_mask);
void landlock_put_ruleset(struct landlock_ruleset *const ruleset);
void landlock_put_ruleset_deferred(struct landlock_ruleset *const ruleset);
@@ -280,6 +284,17 @@ landlock_add_net_access_mask(struct landlock_ruleset *const ruleset,
ruleset->access_masks[layer_level].net |= net_mask;
}
+static inline void
+landlock_add_scope_mask(struct landlock_ruleset *const ruleset,
+ const access_mask_t scope_mask, const u16 layer_level)
+{
+ access_mask_t mask = scope_mask & LANDLOCK_MASK_SCOPE;
+
+ /* Should already be checked in sys_landlock_create_ruleset(). */
+ WARN_ON_ONCE(scope_mask != mask);
+ ruleset->access_masks[layer_level].scope |= mask;
+}
+
static inline access_mask_t
landlock_get_raw_fs_access_mask(const struct landlock_ruleset *const ruleset,
const u16 layer_level)
@@ -303,6 +318,13 @@ landlock_get_net_access_mask(const struct landlock_ruleset *const ruleset,
return ruleset->access_masks[layer_level].net;
}
+static inline access_mask_t
+landlock_get_scope_mask(const struct landlock_ruleset *const ruleset,
+ const u16 layer_level)
+{
+ return ruleset->access_masks[layer_level].scope;
+}
+
bool landlock_unmask_layers(const struct landlock_rule *const rule,
const access_mask_t access_request,
layer_mask_t (*const layer_masks)[],
diff --git a/security/landlock/syscalls.c b/security/landlock/syscalls.c
index ccc8bc6c1584..f5a0e7182ec0 100644
--- a/security/landlock/syscalls.c
+++ b/security/landlock/syscalls.c
@@ -97,8 +97,9 @@ static void build_check_abi(void)
*/
ruleset_size = sizeof(ruleset_attr.handled_access_fs);
ruleset_size += sizeof(ruleset_attr.handled_access_net);
+ ruleset_size += sizeof(ruleset_attr.scoped);
BUILD_BUG_ON(sizeof(ruleset_attr) != ruleset_size);
- BUILD_BUG_ON(sizeof(ruleset_attr) != 16);
+ BUILD_BUG_ON(sizeof(ruleset_attr) != 24);
path_beneath_size = sizeof(path_beneath_attr.allowed_access);
path_beneath_size += sizeof(path_beneath_attr.parent_fd);
@@ -149,7 +150,7 @@ static const struct file_operations ruleset_fops = {
.write = fop_dummy_write,
};
-#define LANDLOCK_ABI_VERSION 5
+#define LANDLOCK_ABI_VERSION 6
/**
* sys_landlock_create_ruleset - Create a new ruleset
@@ -170,8 +171,9 @@ static const struct file_operations ruleset_fops = {
* Possible returned errors are:
*
* - %EOPNOTSUPP: Landlock is supported by the kernel but disabled at boot time;
- * - %EINVAL: unknown @flags, or unknown access, or too small @size;
- * - %E2BIG or %EFAULT: @attr or @size inconsistencies;
+ * - %EINVAL: unknown @flags, or unknown access, or unknown scope, or too small @size;
+ * - %E2BIG: @attr or @size inconsistencies;
+ * - %EFAULT: @attr or @size inconsistencies;
* - %ENOMSG: empty &landlock_ruleset_attr.handled_access_fs.
*/
SYSCALL_DEFINE3(landlock_create_ruleset,
@@ -213,9 +215,14 @@ SYSCALL_DEFINE3(landlock_create_ruleset,
LANDLOCK_MASK_ACCESS_NET)
return -EINVAL;
+ /* Checks IPC scoping content (and 32-bits cast). */
+ if ((ruleset_attr.scoped | LANDLOCK_MASK_SCOPE) != LANDLOCK_MASK_SCOPE)
+ return -EINVAL;
+
/* Checks arguments and transforms to kernel struct. */
ruleset = landlock_create_ruleset(ruleset_attr.handled_access_fs,
- ruleset_attr.handled_access_net);
+ ruleset_attr.handled_access_net,
+ ruleset_attr.scoped);
if (IS_ERR(ruleset))
return PTR_ERR(ruleset);
@@ -238,19 +245,19 @@ static struct landlock_ruleset *get_ruleset_from_fd(const int fd,
struct landlock_ruleset *ruleset;
ruleset_f = fdget(fd);
- if (!ruleset_f.file)
+ if (!fd_file(ruleset_f))
return ERR_PTR(-EBADF);
/* Checks FD type and access right. */
- if (ruleset_f.file->f_op != &ruleset_fops) {
+ if (fd_file(ruleset_f)->f_op != &ruleset_fops) {
ruleset = ERR_PTR(-EBADFD);
goto out_fdput;
}
- if (!(ruleset_f.file->f_mode & mode)) {
+ if (!(fd_file(ruleset_f)->f_mode & mode)) {
ruleset = ERR_PTR(-EPERM);
goto out_fdput;
}
- ruleset = ruleset_f.file->private_data;
+ ruleset = fd_file(ruleset_f)->private_data;
if (WARN_ON_ONCE(ruleset->num_layers != 1)) {
ruleset = ERR_PTR(-EINVAL);
goto out_fdput;
@@ -277,22 +284,22 @@ static int get_path_from_fd(const s32 fd, struct path *const path)
/* Handles O_PATH. */
f = fdget_raw(fd);
- if (!f.file)
+ if (!fd_file(f))
return -EBADF;
/*
* Forbids ruleset FDs, internal filesystems (e.g. nsfs), including
* pseudo filesystems that will never be mountable (e.g. sockfs,
* pipefs).
*/
- if ((f.file->f_op == &ruleset_fops) ||
- (f.file->f_path.mnt->mnt_flags & MNT_INTERNAL) ||
- (f.file->f_path.dentry->d_sb->s_flags & SB_NOUSER) ||
- d_is_negative(f.file->f_path.dentry) ||
- IS_PRIVATE(d_backing_inode(f.file->f_path.dentry))) {
+ if ((fd_file(f)->f_op == &ruleset_fops) ||
+ (fd_file(f)->f_path.mnt->mnt_flags & MNT_INTERNAL) ||
+ (fd_file(f)->f_path.dentry->d_sb->s_flags & SB_NOUSER) ||
+ d_is_negative(fd_file(f)->f_path.dentry) ||
+ IS_PRIVATE(d_backing_inode(fd_file(f)->f_path.dentry))) {
err = -EBADFD;
goto out_fdput;
}
- *path = f.file->f_path;
+ *path = fd_file(f)->f_path;
path_get(path);
out_fdput:
diff --git a/security/landlock/task.c b/security/landlock/task.c
index 849f5123610b..4acbd7c40eee 100644
--- a/security/landlock/task.c
+++ b/security/landlock/task.c
@@ -13,9 +13,12 @@
#include <linux/lsm_hooks.h>
#include <linux/rcupdate.h>
#include <linux/sched.h>
+#include <net/af_unix.h>
+#include <net/sock.h>
#include "common.h"
#include "cred.h"
+#include "fs.h"
#include "ruleset.h"
#include "setup.h"
#include "task.h"
@@ -108,9 +111,199 @@ static int hook_ptrace_traceme(struct task_struct *const parent)
return task_ptrace(parent, current);
}
+/**
+ * domain_is_scoped - Checks if the client domain is scoped in the same
+ * domain as the server.
+ *
+ * @client: IPC sender domain.
+ * @server: IPC receiver domain.
+ * @scope: The scope restriction criteria.
+ *
+ * Returns: True if the @client domain is scoped to access the @server,
+ * unless the @server is also scoped in the same domain as @client.
+ */
+static bool domain_is_scoped(const struct landlock_ruleset *const client,
+ const struct landlock_ruleset *const server,
+ access_mask_t scope)
+{
+ int client_layer, server_layer;
+ struct landlock_hierarchy *client_walker, *server_walker;
+
+ /* Quick return if client has no domain */
+ if (WARN_ON_ONCE(!client))
+ return false;
+
+ client_layer = client->num_layers - 1;
+ client_walker = client->hierarchy;
+ /*
+ * client_layer must be a signed integer with greater capacity
+ * than client->num_layers to ensure the following loop stops.
+ */
+ BUILD_BUG_ON(sizeof(client_layer) > sizeof(client->num_layers));
+
+ server_layer = server ? (server->num_layers - 1) : -1;
+ server_walker = server ? server->hierarchy : NULL;
+
+ /*
+ * Walks client's parent domains down to the same hierarchy level
+ * as the server's domain, and checks that none of these client's
+ * parent domains are scoped.
+ */
+ for (; client_layer > server_layer; client_layer--) {
+ if (landlock_get_scope_mask(client, client_layer) & scope)
+ return true;
+
+ client_walker = client_walker->parent;
+ }
+ /*
+ * Walks server's parent domains down to the same hierarchy level as
+ * the client's domain.
+ */
+ for (; server_layer > client_layer; server_layer--)
+ server_walker = server_walker->parent;
+
+ for (; client_layer >= 0; client_layer--) {
+ if (landlock_get_scope_mask(client, client_layer) & scope) {
+ /*
+ * Client and server are at the same level in the
+ * hierarchy. If the client is scoped, the request is
+ * only allowed if this domain is also a server's
+ * ancestor.
+ */
+ return server_walker != client_walker;
+ }
+ client_walker = client_walker->parent;
+ server_walker = server_walker->parent;
+ }
+ return false;
+}
+
+static bool sock_is_scoped(struct sock *const other,
+ const struct landlock_ruleset *const domain)
+{
+ const struct landlock_ruleset *dom_other;
+
+ /* The credentials will not change. */
+ lockdep_assert_held(&unix_sk(other)->lock);
+ dom_other = landlock_cred(other->sk_socket->file->f_cred)->domain;
+ return domain_is_scoped(domain, dom_other,
+ LANDLOCK_SCOPE_ABSTRACT_UNIX_SOCKET);
+}
+
+static bool is_abstract_socket(struct sock *const sock)
+{
+ struct unix_address *addr = unix_sk(sock)->addr;
+
+ if (!addr)
+ return false;
+
+ if (addr->len >= offsetof(struct sockaddr_un, sun_path) + 1 &&
+ addr->name->sun_path[0] == '\0')
+ return true;
+
+ return false;
+}
+
+static int hook_unix_stream_connect(struct sock *const sock,
+ struct sock *const other,
+ struct sock *const newsk)
+{
+ const struct landlock_ruleset *const dom =
+ landlock_get_current_domain();
+
+ /* Quick return for non-landlocked tasks. */
+ if (!dom)
+ return 0;
+
+ if (is_abstract_socket(other) && sock_is_scoped(other, dom))
+ return -EPERM;
+
+ return 0;
+}
+
+static int hook_unix_may_send(struct socket *const sock,
+ struct socket *const other)
+{
+ const struct landlock_ruleset *const dom =
+ landlock_get_current_domain();
+
+ if (!dom)
+ return 0;
+
+ /*
+ * Checks if this datagram socket was already allowed to be connected
+ * to other.
+ */
+ if (unix_peer(sock->sk) == other->sk)
+ return 0;
+
+ if (is_abstract_socket(other->sk) && sock_is_scoped(other->sk, dom))
+ return -EPERM;
+
+ return 0;
+}
+
+static int hook_task_kill(struct task_struct *const p,
+ struct kernel_siginfo *const info, const int sig,
+ const struct cred *const cred)
+{
+ bool is_scoped;
+ const struct landlock_ruleset *dom;
+
+ if (cred) {
+ /* Dealing with USB IO. */
+ dom = landlock_cred(cred)->domain;
+ } else {
+ dom = landlock_get_current_domain();
+ }
+
+ /* Quick return for non-landlocked tasks. */
+ if (!dom)
+ return 0;
+
+ rcu_read_lock();
+ is_scoped = domain_is_scoped(dom, landlock_get_task_domain(p),
+ LANDLOCK_SCOPE_SIGNAL);
+ rcu_read_unlock();
+ if (is_scoped)
+ return -EPERM;
+
+ return 0;
+}
+
+static int hook_file_send_sigiotask(struct task_struct *tsk,
+ struct fown_struct *fown, int signum)
+{
+ const struct landlock_ruleset *dom;
+ bool is_scoped = false;
+
+ /* Lock already held by send_sigio() and send_sigurg(). */
+ lockdep_assert_held(&fown->lock);
+ dom = landlock_file(fown->file)->fown_domain;
+
+ /* Quick return for unowned socket. */
+ if (!dom)
+ return 0;
+
+ rcu_read_lock();
+ is_scoped = domain_is_scoped(dom, landlock_get_task_domain(tsk),
+ LANDLOCK_SCOPE_SIGNAL);
+ rcu_read_unlock();
+ if (is_scoped)
+ return -EPERM;
+
+ return 0;
+}
+
static struct security_hook_list landlock_hooks[] __ro_after_init = {
LSM_HOOK_INIT(ptrace_access_check, hook_ptrace_access_check),
LSM_HOOK_INIT(ptrace_traceme, hook_ptrace_traceme),
+
+ LSM_HOOK_INIT(unix_stream_connect, hook_unix_stream_connect),
+ LSM_HOOK_INIT(unix_may_send, hook_unix_may_send),
+
+ LSM_HOOK_INIT(task_kill, hook_task_kill),
+ LSM_HOOK_INIT(file_send_sigiotask, hook_file_send_sigiotask),
};
__init void landlock_add_task_hooks(void)
diff --git a/security/loadpin/loadpin.c b/security/loadpin/loadpin.c
index 93fd4d47b334..02144ec39f43 100644
--- a/security/loadpin/loadpin.c
+++ b/security/loadpin/loadpin.c
@@ -296,7 +296,7 @@ static int read_trusted_verity_root_digests(unsigned int fd)
return -EPERM;
f = fdget(fd);
- if (!f.file)
+ if (!fd_file(f))
return -EINVAL;
data = kzalloc(SZ_4K, GFP_KERNEL);
@@ -305,7 +305,7 @@ static int read_trusted_verity_root_digests(unsigned int fd)
goto err;
}
- rc = kernel_read_file(f.file, 0, (void **)&data, SZ_4K - 1, NULL, READING_POLICY);
+ rc = kernel_read_file(fd_file(f), 0, (void **)&data, SZ_4K - 1, NULL, READING_POLICY);
if (rc < 0)
goto err;
diff --git a/security/security.c b/security/security.c
index 4564a0a1e4ef..6875eb4a59fc 100644
--- a/security/security.c
+++ b/security/security.c
@@ -5681,7 +5681,7 @@ int security_bpf_prog_load(struct bpf_prog *prog, union bpf_attr *attr,
* Return: Returns 0 on success, error on failure.
*/
int security_bpf_token_create(struct bpf_token *token, union bpf_attr *attr,
- struct path *path)
+ const struct path *path)
{
return call_int_hook(bpf_token_create, token, attr, path);
}
diff --git a/security/selinux/hooks.c b/security/selinux/hooks.c
index bd3293021488..fc926d3cac6e 100644
--- a/security/selinux/hooks.c
+++ b/security/selinux/hooks.c
@@ -6735,7 +6735,7 @@ static int selinux_key_getsecurity(struct key *key, char **_buffer)
#ifdef CONFIG_KEY_NOTIFICATIONS
static int selinux_watch_key(struct key *key)
{
- struct key_security_struct *ksec = key->security;
+ struct key_security_struct *ksec = selinux_key(key);
u32 sid = current_sid();
return avc_has_perm(sid, ksec->sid, SECCLASS_KEY, KEY__VIEW, NULL);
@@ -6933,7 +6933,7 @@ static void selinux_bpf_prog_free(struct bpf_prog *prog)
}
static int selinux_bpf_token_create(struct bpf_token *token, union bpf_attr *attr,
- struct path *path)
+ const struct path *path)
{
struct bpf_security_struct *bpfsec;
diff --git a/security/smack/smack_lsm.c b/security/smack/smack_lsm.c
index 8069f17d4404..370fd594da12 100644
--- a/security/smack/smack_lsm.c
+++ b/security/smack/smack_lsm.c
@@ -4629,16 +4629,9 @@ static int smack_watch_key(struct key *key)
{
struct smk_audit_info ad;
struct smack_known *tkp = smk_of_current();
+ struct smack_known **blob = smack_key(key);
int rc;
- if (key == NULL)
- return -EINVAL;
- /*
- * If the key hasn't been initialized give it access so that
- * it may do so.
- */
- if (key->security == NULL)
- return 0;
/*
* This should not occur
*/
@@ -4653,8 +4646,8 @@ static int smack_watch_key(struct key *key)
ad.a.u.key_struct.key = key->serial;
ad.a.u.key_struct.key_desc = key->description;
#endif
- rc = smk_access(tkp, key->security, MAY_READ, &ad);
- rc = smk_bu_note("key watch", tkp, key->security, MAY_READ, rc);
+ rc = smk_access(tkp, *blob, MAY_READ, &ad);
+ rc = smk_bu_note("key watch", tkp, *blob, MAY_READ, rc);
return rc;
}
#endif /* CONFIG_KEY_NOTIFICATIONS */
diff --git a/security/smack/smack_netfilter.c b/security/smack/smack_netfilter.c
index bad71b7e648d..8fd747b3653a 100644
--- a/security/smack/smack_netfilter.c
+++ b/security/smack/smack_netfilter.c
@@ -19,8 +19,8 @@
#include "smack.h"
static unsigned int smack_ip_output(void *priv,
- struct sk_buff *skb,
- const struct nf_hook_state *state)
+ struct sk_buff *skb,
+ const struct nf_hook_state *state)
{
struct sock *sk = skb_to_full_sk(skb);
struct socket_smack *ssp;
diff --git a/security/smack/smackfs.c b/security/smack/smackfs.c
index e22aad7604e8..5dd1e164f9b1 100644
--- a/security/smack/smackfs.c
+++ b/security/smack/smackfs.c
@@ -932,7 +932,7 @@ static ssize_t smk_set_cipso(struct file *file, const char __user *buf,
}
if (rc >= 0) {
old_cat = skp->smk_netlabel.attr.mls.cat;
- skp->smk_netlabel.attr.mls.cat = ncats.attr.mls.cat;
+ rcu_assign_pointer(skp->smk_netlabel.attr.mls.cat, ncats.attr.mls.cat);
skp->smk_netlabel.attr.mls.lvl = ncats.attr.mls.lvl;
synchronize_rcu();
netlbl_catmap_free(old_cat);
diff --git a/security/tomoyo/Kconfig b/security/tomoyo/Kconfig
index 1e0dd1a6d0b0..90eccc6cd464 100644
--- a/security/tomoyo/Kconfig
+++ b/security/tomoyo/Kconfig
@@ -13,6 +13,21 @@ config SECURITY_TOMOYO
found at <https://tomoyo.sourceforge.net/>.
If you are unsure how to answer this question, answer N.
+config SECURITY_TOMOYO_LKM
+ bool "Cut out most of TOMOYO's code to a loadable kernel module"
+ default n
+ depends on SECURITY_TOMOYO
+ depends on MODULES
+ help
+ Say Y here if you want to include TOMOYO without bloating
+ vmlinux file. If you say Y, most of TOMOYO code is cut out to
+ a loadable kernel module named tomoyo.ko . This option will be
+ useful for kernels built by Linux distributors where TOMOYO is
+ included but TOMOYO is not enabled by default. Please be sure
+ to explicitly load tomoyo.ko if you want to activate TOMOYO
+ without calling userspace policy loader, for tomoyo.ko is
+ loaded immediately before calling userspace policy loader.
+
config SECURITY_TOMOYO_MAX_ACCEPT_ENTRY
int "Default maximal count for learning mode"
default 2048
diff --git a/security/tomoyo/Makefile b/security/tomoyo/Makefile
index 55c67b9846a9..287a7d16fa15 100644
--- a/security/tomoyo/Makefile
+++ b/security/tomoyo/Makefile
@@ -1,5 +1,11 @@
# SPDX-License-Identifier: GPL-2.0
-obj-y = audit.o common.o condition.o domain.o environ.o file.o gc.o group.o load_policy.o memory.o mount.o network.o realpath.o securityfs_if.o tomoyo.o util.o
+tomoyo-objs := audit.o common.o condition.o domain.o environ.o file.o gc.o group.o memory.o mount.o network.o proxy.o realpath.o securityfs_if.o util.o
+obj-y += init.o load_policy.o
+ifdef CONFIG_SECURITY_TOMOYO_LKM
+obj-m += tomoyo.o
+else
+obj-y += tomoyo.o
+endif
targets += builtin-policy.h
diff --git a/security/tomoyo/common.c b/security/tomoyo/common.c
index 5c7b059a332a..c0ef014f8009 100644
--- a/security/tomoyo/common.c
+++ b/security/tomoyo/common.c
@@ -998,8 +998,13 @@ static bool tomoyo_select_domain(struct tomoyo_io_buffer *head,
p = find_task_by_pid_ns(pid, &init_pid_ns);
else
p = find_task_by_vpid(pid);
- if (p)
+ if (p) {
domain = tomoyo_task(p)->domain_info;
+#ifdef CONFIG_SECURITY_TOMOYO_LKM
+ if (!domain)
+ domain = &tomoyo_kernel_domain;
+#endif
+ }
rcu_read_unlock();
} else if (!strncmp(data, "domain=", 7)) {
if (tomoyo_domain_def(data + 7))
@@ -1710,8 +1715,13 @@ static void tomoyo_read_pid(struct tomoyo_io_buffer *head)
p = find_task_by_pid_ns(pid, &init_pid_ns);
else
p = find_task_by_vpid(pid);
- if (p)
+ if (p) {
domain = tomoyo_task(p)->domain_info;
+#ifdef CONFIG_SECURITY_TOMOYO_LKM
+ if (!domain)
+ domain = &tomoyo_kernel_domain;
+#endif
+ }
rcu_read_unlock();
if (!domain)
return;
diff --git a/security/tomoyo/common.h b/security/tomoyo/common.h
index 0e8e2e959aef..4f6c52a9f478 100644
--- a/security/tomoyo/common.h
+++ b/security/tomoyo/common.h
@@ -978,6 +978,7 @@ int tomoyo_get_mode(const struct tomoyo_policy_namespace *ns, const u8 profile,
int tomoyo_init_request_info(struct tomoyo_request_info *r,
struct tomoyo_domain_info *domain,
const u8 index);
+int __init tomoyo_interface_init(void);
int tomoyo_mkdev_perm(const u8 operation, const struct path *path,
const unsigned int mode, unsigned int dev);
int tomoyo_mount_permission(const char *dev_name, const struct path *path,
@@ -1214,10 +1215,14 @@ static inline void tomoyo_put_group(struct tomoyo_group *group)
*
* Returns pointer to "struct tomoyo_task" for specified thread.
*/
+#ifdef CONFIG_SECURITY_TOMOYO_LKM
+extern struct tomoyo_task *tomoyo_task(struct task_struct *task);
+#else
static inline struct tomoyo_task *tomoyo_task(struct task_struct *task)
{
return task->security + tomoyo_blob_sizes.lbs_task;
}
+#endif
/**
* tomoyo_same_name_union - Check for duplicated "struct tomoyo_name_union" entry.
@@ -1284,4 +1289,71 @@ static inline struct tomoyo_policy_namespace *tomoyo_current_namespace(void)
pos = srcu_dereference((head)->next, &tomoyo_ss); \
for ( ; pos != (head); pos = srcu_dereference(pos->next, &tomoyo_ss))
+#ifdef CONFIG_SECURITY_TOMOYO_LKM
+
+#define LSM_HOOK(RET, DEFAULT, NAME, ...) typedef RET (NAME##_t)(__VA_ARGS__);
+#include <linux/lsm_hook_defs.h>
+#undef LSM_HOOK
+
+struct tomoyo_hooks {
+ cred_prepare_t *cred_prepare;
+ bprm_committed_creds_t *bprm_committed_creds;
+ task_alloc_t *task_alloc;
+ task_free_t *task_free;
+ bprm_check_security_t *bprm_check_security;
+ file_fcntl_t *file_fcntl;
+ file_open_t *file_open;
+ file_truncate_t *file_truncate;
+ path_truncate_t *path_truncate;
+ path_unlink_t *path_unlink;
+ path_mkdir_t *path_mkdir;
+ path_rmdir_t *path_rmdir;
+ path_symlink_t *path_symlink;
+ path_mknod_t *path_mknod;
+ path_link_t *path_link;
+ path_rename_t *path_rename;
+ inode_getattr_t *inode_getattr;
+ file_ioctl_t *file_ioctl;
+ file_ioctl_compat_t *file_ioctl_compat;
+ path_chmod_t *path_chmod;
+ path_chown_t *path_chown;
+ path_chroot_t *path_chroot;
+ sb_mount_t *sb_mount;
+ sb_umount_t *sb_umount;
+ sb_pivotroot_t *sb_pivotroot;
+ socket_bind_t *socket_bind;
+ socket_connect_t *socket_connect;
+ socket_listen_t *socket_listen;
+ socket_sendmsg_t *socket_sendmsg;
+};
+
+extern void tomoyo_register_hooks(const struct tomoyo_hooks *tomoyo_hooks);
+
+struct tomoyo_operations {
+ void (*check_profile)(void);
+ int enabled;
+};
+
+extern struct tomoyo_operations tomoyo_ops;
+
+/*
+ * Temporary hack: functions needed by tomoyo.ko . This will be removed
+ * after all functions are marked as EXPORT_STMBOL_GPL().
+ */
+struct tomoyo_tmp_exports {
+ struct task_struct * (*find_task_by_vpid)(pid_t nr);
+ struct task_struct * (*find_task_by_pid_ns)(pid_t nr, struct pid_namespace *ns);
+ void (*put_filesystem)(struct file_system_type *fs);
+ struct file * (*get_mm_exe_file)(struct mm_struct *mm);
+ char * (*d_absolute_path)(const struct path *path, char *buf, int buflen);
+};
+extern const struct tomoyo_tmp_exports tomoyo_tmp_exports;
+#define find_task_by_vpid tomoyo_tmp_exports.find_task_by_vpid
+#define find_task_by_pid_ns tomoyo_tmp_exports.find_task_by_pid_ns
+#define put_filesystem tomoyo_tmp_exports.put_filesystem
+#define get_mm_exe_file tomoyo_tmp_exports.get_mm_exe_file
+#define d_absolute_path tomoyo_tmp_exports.d_absolute_path
+
+#endif /* defined(CONFIG_SECURITY_TOMOYO_LKM) */
+
#endif /* !defined(_SECURITY_TOMOYO_COMMON_H) */
diff --git a/security/tomoyo/domain.c b/security/tomoyo/domain.c
index 90b53500a236..aed9e3ef2c9e 100644
--- a/security/tomoyo/domain.c
+++ b/security/tomoyo/domain.c
@@ -723,10 +723,13 @@ int tomoyo_find_next_domain(struct linux_binprm *bprm)
ee->r.obj = &ee->obj;
ee->obj.path1 = bprm->file->f_path;
/* Get symlink's pathname of program. */
- retval = -ENOENT;
exename.name = tomoyo_realpath_nofollow(original_name);
- if (!exename.name)
- goto out;
+ if (!exename.name) {
+ /* Fallback to realpath if symlink's pathname does not exist. */
+ exename.name = tomoyo_realpath_from_path(&bprm->file->f_path);
+ if (!exename.name)
+ goto out;
+ }
tomoyo_fill_path_info(&exename);
retry:
/* Check 'aggregator' directive. */
diff --git a/security/tomoyo/gc.c b/security/tomoyo/gc.c
index 026e29ea3796..6eccca150839 100644
--- a/security/tomoyo/gc.c
+++ b/security/tomoyo/gc.c
@@ -9,6 +9,9 @@
#include <linux/kthread.h>
#include <linux/slab.h>
+/* Lock for GC. */
+DEFINE_SRCU(tomoyo_ss);
+
/**
* tomoyo_memory_free - Free memory for elements.
*
diff --git a/security/tomoyo/tomoyo.c b/security/tomoyo/hooks.h
index 04a92c3d65d4..58929bb71477 100644
--- a/security/tomoyo/tomoyo.c
+++ b/security/tomoyo/hooks.h
@@ -1,12 +1,10 @@
// SPDX-License-Identifier: GPL-2.0
/*
- * security/tomoyo/tomoyo.c
+ * security/tomoyo/hooks.h
*
* Copyright (C) 2005-2011 NTT DATA CORPORATION
*/
-#include <linux/lsm_hooks.h>
-#include <uapi/linux/lsm.h>
#include "common.h"
/**
@@ -18,10 +16,6 @@ struct tomoyo_domain_info *tomoyo_domain(void)
{
struct tomoyo_task *s = tomoyo_task(current);
- if (s->old_domain_info && !current->in_execve) {
- atomic_dec(&s->old_domain_info->users);
- s->old_domain_info = NULL;
- }
return s->domain_info;
}
@@ -62,26 +56,6 @@ static void tomoyo_bprm_committed_creds(const struct linux_binprm *bprm)
s->old_domain_info = NULL;
}
-#ifndef CONFIG_SECURITY_TOMOYO_OMIT_USERSPACE_LOADER
-/**
- * tomoyo_bprm_creds_for_exec - Target for security_bprm_creds_for_exec().
- *
- * @bprm: Pointer to "struct linux_binprm".
- *
- * Returns 0.
- */
-static int tomoyo_bprm_creds_for_exec(struct linux_binprm *bprm)
-{
- /*
- * Load policy if /sbin/tomoyo-init exists and /sbin/init is requested
- * for the first time.
- */
- if (!tomoyo_policy_loaded)
- tomoyo_load_policy(bprm->filename);
- return 0;
-}
-#endif
-
/**
* tomoyo_bprm_check_security - Target for security_bprm_check().
*
@@ -501,10 +475,6 @@ static int tomoyo_socket_sendmsg(struct socket *sock, struct msghdr *msg,
return tomoyo_socket_sendmsg_permission(sock, msg, size);
}
-struct lsm_blob_sizes tomoyo_blob_sizes __ro_after_init = {
- .lbs_task = sizeof(struct tomoyo_task),
-};
-
/**
* tomoyo_task_alloc - Target for security_task_alloc().
*
@@ -543,81 +513,3 @@ static void tomoyo_task_free(struct task_struct *task)
s->old_domain_info = NULL;
}
}
-
-static const struct lsm_id tomoyo_lsmid = {
- .name = "tomoyo",
- .id = LSM_ID_TOMOYO,
-};
-
-/*
- * tomoyo_security_ops is a "struct security_operations" which is used for
- * registering TOMOYO.
- */
-static struct security_hook_list tomoyo_hooks[] __ro_after_init = {
- LSM_HOOK_INIT(cred_prepare, tomoyo_cred_prepare),
- LSM_HOOK_INIT(bprm_committed_creds, tomoyo_bprm_committed_creds),
- LSM_HOOK_INIT(task_alloc, tomoyo_task_alloc),
- LSM_HOOK_INIT(task_free, tomoyo_task_free),
-#ifndef CONFIG_SECURITY_TOMOYO_OMIT_USERSPACE_LOADER
- LSM_HOOK_INIT(bprm_creds_for_exec, tomoyo_bprm_creds_for_exec),
-#endif
- LSM_HOOK_INIT(bprm_check_security, tomoyo_bprm_check_security),
- LSM_HOOK_INIT(file_fcntl, tomoyo_file_fcntl),
- LSM_HOOK_INIT(file_open, tomoyo_file_open),
- LSM_HOOK_INIT(file_truncate, tomoyo_file_truncate),
- LSM_HOOK_INIT(path_truncate, tomoyo_path_truncate),
- LSM_HOOK_INIT(path_unlink, tomoyo_path_unlink),
- LSM_HOOK_INIT(path_mkdir, tomoyo_path_mkdir),
- LSM_HOOK_INIT(path_rmdir, tomoyo_path_rmdir),
- LSM_HOOK_INIT(path_symlink, tomoyo_path_symlink),
- LSM_HOOK_INIT(path_mknod, tomoyo_path_mknod),
- LSM_HOOK_INIT(path_link, tomoyo_path_link),
- LSM_HOOK_INIT(path_rename, tomoyo_path_rename),
- LSM_HOOK_INIT(inode_getattr, tomoyo_inode_getattr),
- LSM_HOOK_INIT(file_ioctl, tomoyo_file_ioctl),
- LSM_HOOK_INIT(file_ioctl_compat, tomoyo_file_ioctl),
- LSM_HOOK_INIT(path_chmod, tomoyo_path_chmod),
- LSM_HOOK_INIT(path_chown, tomoyo_path_chown),
- LSM_HOOK_INIT(path_chroot, tomoyo_path_chroot),
- LSM_HOOK_INIT(sb_mount, tomoyo_sb_mount),
- LSM_HOOK_INIT(sb_umount, tomoyo_sb_umount),
- LSM_HOOK_INIT(sb_pivotroot, tomoyo_sb_pivotroot),
- LSM_HOOK_INIT(socket_bind, tomoyo_socket_bind),
- LSM_HOOK_INIT(socket_connect, tomoyo_socket_connect),
- LSM_HOOK_INIT(socket_listen, tomoyo_socket_listen),
- LSM_HOOK_INIT(socket_sendmsg, tomoyo_socket_sendmsg),
-};
-
-/* Lock for GC. */
-DEFINE_SRCU(tomoyo_ss);
-
-int tomoyo_enabled __ro_after_init = 1;
-
-/**
- * tomoyo_init - Register TOMOYO Linux as a LSM module.
- *
- * Returns 0.
- */
-static int __init tomoyo_init(void)
-{
- struct tomoyo_task *s = tomoyo_task(current);
-
- /* register ourselves with the security framework */
- security_add_hooks(tomoyo_hooks, ARRAY_SIZE(tomoyo_hooks),
- &tomoyo_lsmid);
- pr_info("TOMOYO Linux initialized\n");
- s->domain_info = &tomoyo_kernel_domain;
- atomic_inc(&tomoyo_kernel_domain.users);
- s->old_domain_info = NULL;
- tomoyo_mm_init();
-
- return 0;
-}
-
-DEFINE_LSM(tomoyo) = {
- .name = "tomoyo",
- .enabled = &tomoyo_enabled,
- .flags = LSM_FLAG_LEGACY_MAJOR,
- .blobs = &tomoyo_blob_sizes,
- .init = tomoyo_init,
-};
diff --git a/security/tomoyo/init.c b/security/tomoyo/init.c
new file mode 100644
index 000000000000..034e7db22d4e
--- /dev/null
+++ b/security/tomoyo/init.c
@@ -0,0 +1,366 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * security/tomoyo/init.c
+ *
+ * Copyright (C) 2005-2011 NTT DATA CORPORATION
+ */
+
+#include <linux/lsm_hooks.h>
+#include <uapi/linux/lsm.h>
+#include "common.h"
+
+#ifndef CONFIG_SECURITY_TOMOYO_LKM
+
+#include "hooks.h"
+
+#else
+
+#define DEFINE_STATIC_CALL_PROXY(NAME) \
+ static NAME##_t tomoyo_##NAME; \
+ DEFINE_STATIC_CALL_RET0(tomoyo_##NAME, tomoyo_##NAME);
+DEFINE_STATIC_CALL_PROXY(cred_prepare)
+DEFINE_STATIC_CALL_PROXY(bprm_committed_creds)
+DEFINE_STATIC_CALL_PROXY(bprm_check_security)
+DEFINE_STATIC_CALL_PROXY(inode_getattr)
+DEFINE_STATIC_CALL_PROXY(path_truncate)
+DEFINE_STATIC_CALL_PROXY(file_truncate)
+DEFINE_STATIC_CALL_PROXY(path_unlink)
+DEFINE_STATIC_CALL_PROXY(path_mkdir)
+DEFINE_STATIC_CALL_PROXY(path_rmdir)
+DEFINE_STATIC_CALL_PROXY(path_symlink)
+DEFINE_STATIC_CALL_PROXY(path_mknod)
+DEFINE_STATIC_CALL_PROXY(path_link)
+DEFINE_STATIC_CALL_PROXY(path_rename)
+DEFINE_STATIC_CALL_PROXY(file_fcntl)
+DEFINE_STATIC_CALL_PROXY(file_open)
+DEFINE_STATIC_CALL_PROXY(file_ioctl)
+DEFINE_STATIC_CALL_PROXY(path_chmod)
+DEFINE_STATIC_CALL_PROXY(path_chown)
+DEFINE_STATIC_CALL_PROXY(path_chroot)
+DEFINE_STATIC_CALL_PROXY(sb_mount)
+DEFINE_STATIC_CALL_PROXY(sb_umount)
+DEFINE_STATIC_CALL_PROXY(sb_pivotroot)
+DEFINE_STATIC_CALL_PROXY(socket_listen)
+DEFINE_STATIC_CALL_PROXY(socket_connect)
+DEFINE_STATIC_CALL_PROXY(socket_bind)
+DEFINE_STATIC_CALL_PROXY(socket_sendmsg)
+DEFINE_STATIC_CALL_PROXY(task_alloc)
+DEFINE_STATIC_CALL_PROXY(task_free)
+#undef DEFINE_STATIC_CALL_PROXY
+
+static int tomoyo_cred_prepare(struct cred *new, const struct cred *old, gfp_t gfp)
+{
+ return static_call(tomoyo_cred_prepare)(new, old, gfp);
+}
+
+static void tomoyo_bprm_committed_creds(const struct linux_binprm *bprm)
+{
+ static_call(tomoyo_bprm_committed_creds)(bprm);
+}
+
+static int tomoyo_bprm_check_security(struct linux_binprm *bprm)
+{
+ return static_call(tomoyo_bprm_check_security)(bprm);
+}
+
+static int tomoyo_inode_getattr(const struct path *path)
+{
+ return static_call(tomoyo_inode_getattr)(path);
+}
+
+static int tomoyo_path_truncate(const struct path *path)
+{
+ return static_call(tomoyo_path_truncate)(path);
+}
+
+static int tomoyo_file_truncate(struct file *file)
+{
+ return static_call(tomoyo_file_truncate)(file);
+}
+
+static int tomoyo_path_unlink(const struct path *parent, struct dentry *dentry)
+{
+ return static_call(tomoyo_path_unlink)(parent, dentry);
+}
+
+static int tomoyo_path_mkdir(const struct path *parent, struct dentry *dentry, umode_t mode)
+{
+ return static_call(tomoyo_path_mkdir)(parent, dentry, mode);
+}
+
+static int tomoyo_path_rmdir(const struct path *parent, struct dentry *dentry)
+{
+ return static_call(tomoyo_path_rmdir)(parent, dentry);
+}
+
+static int tomoyo_path_symlink(const struct path *parent, struct dentry *dentry,
+ const char *old_name)
+{
+ return static_call(tomoyo_path_symlink)(parent, dentry, old_name);
+}
+
+static int tomoyo_path_mknod(const struct path *parent, struct dentry *dentry,
+ umode_t mode, unsigned int dev)
+{
+ return static_call(tomoyo_path_mknod)(parent, dentry, mode, dev);
+}
+
+static int tomoyo_path_link(struct dentry *old_dentry, const struct path *new_dir,
+ struct dentry *new_dentry)
+{
+ return static_call(tomoyo_path_link)(old_dentry, new_dir, new_dentry);
+}
+
+static int tomoyo_path_rename(const struct path *old_parent, struct dentry *old_dentry,
+ const struct path *new_parent, struct dentry *new_dentry,
+ const unsigned int flags)
+{
+ return static_call(tomoyo_path_rename)(old_parent, old_dentry, new_parent, new_dentry, flags);
+}
+
+static int tomoyo_file_fcntl(struct file *file, unsigned int cmd, unsigned long arg)
+{
+ return static_call(tomoyo_file_fcntl)(file, cmd, arg);
+}
+
+static int tomoyo_file_open(struct file *f)
+{
+ return static_call(tomoyo_file_open)(f);
+}
+
+static int tomoyo_file_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
+{
+ return static_call(tomoyo_file_ioctl)(file, cmd, arg);
+}
+
+static int tomoyo_path_chmod(const struct path *path, umode_t mode)
+{
+ return static_call(tomoyo_path_chmod)(path, mode);
+}
+
+static int tomoyo_path_chown(const struct path *path, kuid_t uid, kgid_t gid)
+{
+ return static_call(tomoyo_path_chown)(path, uid, gid);
+}
+
+static int tomoyo_path_chroot(const struct path *path)
+{
+ return static_call(tomoyo_path_chroot)(path);
+}
+
+static int tomoyo_sb_mount(const char *dev_name, const struct path *path,
+ const char *type, unsigned long flags, void *data)
+{
+ return static_call(tomoyo_sb_mount)(dev_name, path, type, flags, data);
+}
+
+static int tomoyo_sb_umount(struct vfsmount *mnt, int flags)
+{
+ return static_call(tomoyo_sb_umount)(mnt, flags);
+}
+
+static int tomoyo_sb_pivotroot(const struct path *old_path, const struct path *new_path)
+{
+ return static_call(tomoyo_sb_pivotroot)(old_path, new_path);
+}
+
+static int tomoyo_socket_listen(struct socket *sock, int backlog)
+{
+ return static_call(tomoyo_socket_listen)(sock, backlog);
+}
+
+static int tomoyo_socket_connect(struct socket *sock, struct sockaddr *addr, int addr_len)
+{
+ return static_call(tomoyo_socket_connect)(sock, addr, addr_len);
+}
+
+static int tomoyo_socket_bind(struct socket *sock, struct sockaddr *addr, int addr_len)
+{
+ return static_call(tomoyo_socket_bind)(sock, addr, addr_len);
+}
+
+static int tomoyo_socket_sendmsg(struct socket *sock, struct msghdr *msg, int size)
+{
+ return static_call(tomoyo_socket_sendmsg)(sock, msg, size);
+}
+
+static int tomoyo_task_alloc(struct task_struct *task, unsigned long clone_flags)
+{
+ return static_call(tomoyo_task_alloc)(task, clone_flags);
+}
+
+static void tomoyo_task_free(struct task_struct *task)
+{
+ static_call(tomoyo_task_free)(task);
+}
+
+void tomoyo_register_hooks(const struct tomoyo_hooks *tomoyo_hooks)
+{
+ static void *registered;
+
+ if (cmpxchg(&registered, NULL, &registered))
+ panic("%s was called twice!\n", __func__);
+ static_call_update(tomoyo_task_free, tomoyo_hooks->task_free);
+ static_call_update(tomoyo_task_alloc, tomoyo_hooks->task_alloc);
+ static_call_update(tomoyo_cred_prepare, tomoyo_hooks->cred_prepare);
+ static_call_update(tomoyo_bprm_committed_creds, tomoyo_hooks->bprm_committed_creds);
+ static_call_update(tomoyo_bprm_check_security, tomoyo_hooks->bprm_check_security);
+ static_call_update(tomoyo_inode_getattr, tomoyo_hooks->inode_getattr);
+ static_call_update(tomoyo_path_truncate, tomoyo_hooks->path_truncate);
+ static_call_update(tomoyo_file_truncate, tomoyo_hooks->file_truncate);
+ static_call_update(tomoyo_path_unlink, tomoyo_hooks->path_unlink);
+ static_call_update(tomoyo_path_mkdir, tomoyo_hooks->path_mkdir);
+ static_call_update(tomoyo_path_rmdir, tomoyo_hooks->path_rmdir);
+ static_call_update(tomoyo_path_symlink, tomoyo_hooks->path_symlink);
+ static_call_update(tomoyo_path_mknod, tomoyo_hooks->path_mknod);
+ static_call_update(tomoyo_path_link, tomoyo_hooks->path_link);
+ static_call_update(tomoyo_path_rename, tomoyo_hooks->path_rename);
+ static_call_update(tomoyo_file_fcntl, tomoyo_hooks->file_fcntl);
+ static_call_update(tomoyo_file_open, tomoyo_hooks->file_open);
+ static_call_update(tomoyo_file_ioctl, tomoyo_hooks->file_ioctl);
+ static_call_update(tomoyo_path_chmod, tomoyo_hooks->path_chmod);
+ static_call_update(tomoyo_path_chown, tomoyo_hooks->path_chown);
+ static_call_update(tomoyo_path_chroot, tomoyo_hooks->path_chroot);
+ static_call_update(tomoyo_sb_mount, tomoyo_hooks->sb_mount);
+ static_call_update(tomoyo_sb_umount, tomoyo_hooks->sb_umount);
+ static_call_update(tomoyo_sb_pivotroot, tomoyo_hooks->sb_pivotroot);
+ static_call_update(tomoyo_socket_listen, tomoyo_hooks->socket_listen);
+ static_call_update(tomoyo_socket_connect, tomoyo_hooks->socket_connect);
+ static_call_update(tomoyo_socket_bind, tomoyo_hooks->socket_bind);
+ static_call_update(tomoyo_socket_sendmsg, tomoyo_hooks->socket_sendmsg);
+}
+EXPORT_SYMBOL_GPL(tomoyo_register_hooks);
+
+/*
+ * Temporary hack: functions needed by tomoyo.ko . This hack will be removed
+ * after all functions are marked as EXPORT_STMBOL_GPL().
+ */
+#undef find_task_by_vpid
+#undef find_task_by_pid_ns
+#undef put_filesystem
+#undef get_mm_exe_file
+#undef d_absolute_path
+const struct tomoyo_tmp_exports tomoyo_tmp_exports = {
+ .find_task_by_vpid = find_task_by_vpid,
+ .find_task_by_pid_ns = find_task_by_pid_ns,
+ .put_filesystem = put_filesystem,
+ .get_mm_exe_file = get_mm_exe_file,
+ .d_absolute_path = d_absolute_path,
+};
+EXPORT_SYMBOL_GPL(tomoyo_tmp_exports);
+
+#endif
+
+#ifndef CONFIG_SECURITY_TOMOYO_OMIT_USERSPACE_LOADER
+static int tomoyo_bprm_creds_for_exec(struct linux_binprm *bprm)
+{
+ /*
+ * Load policy if /sbin/tomoyo-init exists and /sbin/init is requested
+ * for the first time.
+ */
+ if (!tomoyo_policy_loaded)
+ tomoyo_load_policy(bprm->filename);
+ return 0;
+}
+#endif
+
+struct lsm_blob_sizes tomoyo_blob_sizes __ro_after_init = {
+ .lbs_task = sizeof(struct tomoyo_task),
+};
+
+static const struct lsm_id tomoyo_lsmid = {
+ .name = "tomoyo",
+ .id = LSM_ID_TOMOYO,
+};
+
+/* tomoyo_hooks is used for registering TOMOYO. */
+static struct security_hook_list tomoyo_hooks[] __ro_after_init = {
+ LSM_HOOK_INIT(cred_prepare, tomoyo_cred_prepare),
+ LSM_HOOK_INIT(bprm_committed_creds, tomoyo_bprm_committed_creds),
+ LSM_HOOK_INIT(task_alloc, tomoyo_task_alloc),
+ LSM_HOOK_INIT(task_free, tomoyo_task_free),
+#ifndef CONFIG_SECURITY_TOMOYO_OMIT_USERSPACE_LOADER
+ LSM_HOOK_INIT(bprm_creds_for_exec, tomoyo_bprm_creds_for_exec),
+#endif
+ LSM_HOOK_INIT(bprm_check_security, tomoyo_bprm_check_security),
+ LSM_HOOK_INIT(file_fcntl, tomoyo_file_fcntl),
+ LSM_HOOK_INIT(file_open, tomoyo_file_open),
+ LSM_HOOK_INIT(file_truncate, tomoyo_file_truncate),
+ LSM_HOOK_INIT(path_truncate, tomoyo_path_truncate),
+ LSM_HOOK_INIT(path_unlink, tomoyo_path_unlink),
+ LSM_HOOK_INIT(path_mkdir, tomoyo_path_mkdir),
+ LSM_HOOK_INIT(path_rmdir, tomoyo_path_rmdir),
+ LSM_HOOK_INIT(path_symlink, tomoyo_path_symlink),
+ LSM_HOOK_INIT(path_mknod, tomoyo_path_mknod),
+ LSM_HOOK_INIT(path_link, tomoyo_path_link),
+ LSM_HOOK_INIT(path_rename, tomoyo_path_rename),
+ LSM_HOOK_INIT(inode_getattr, tomoyo_inode_getattr),
+ LSM_HOOK_INIT(file_ioctl, tomoyo_file_ioctl),
+ LSM_HOOK_INIT(file_ioctl_compat, tomoyo_file_ioctl),
+ LSM_HOOK_INIT(path_chmod, tomoyo_path_chmod),
+ LSM_HOOK_INIT(path_chown, tomoyo_path_chown),
+ LSM_HOOK_INIT(path_chroot, tomoyo_path_chroot),
+ LSM_HOOK_INIT(sb_mount, tomoyo_sb_mount),
+ LSM_HOOK_INIT(sb_umount, tomoyo_sb_umount),
+ LSM_HOOK_INIT(sb_pivotroot, tomoyo_sb_pivotroot),
+ LSM_HOOK_INIT(socket_bind, tomoyo_socket_bind),
+ LSM_HOOK_INIT(socket_connect, tomoyo_socket_connect),
+ LSM_HOOK_INIT(socket_listen, tomoyo_socket_listen),
+ LSM_HOOK_INIT(socket_sendmsg, tomoyo_socket_sendmsg),
+};
+
+int tomoyo_enabled __ro_after_init = 1;
+
+/* Has /sbin/init started? */
+bool tomoyo_policy_loaded;
+
+#ifdef CONFIG_SECURITY_TOMOYO_LKM
+EXPORT_SYMBOL_GPL(tomoyo_blob_sizes);
+EXPORT_SYMBOL_GPL(tomoyo_policy_loaded);
+
+struct tomoyo_operations tomoyo_ops;
+EXPORT_SYMBOL_GPL(tomoyo_ops);
+
+/**
+ * tomoyo_init - Reserve hooks for TOMOYO Linux.
+ *
+ * Returns 0.
+ */
+static int __init tomoyo_init(void)
+{
+ /* register ourselves with the security framework */
+ security_add_hooks(tomoyo_hooks, ARRAY_SIZE(tomoyo_hooks), &tomoyo_lsmid);
+ tomoyo_ops.enabled = tomoyo_enabled;
+ pr_info("Hooks for initializing TOMOYO Linux are ready\n");
+ return 0;
+}
+#else
+/**
+ * tomoyo_init - Register TOMOYO Linux as a LSM module.
+ *
+ * Returns 0.
+ */
+static int __init tomoyo_init(void)
+{
+ struct tomoyo_task *s = tomoyo_task(current);
+
+ /* register ourselves with the security framework */
+ security_add_hooks(tomoyo_hooks, ARRAY_SIZE(tomoyo_hooks),
+ &tomoyo_lsmid);
+ pr_info("TOMOYO Linux initialized\n");
+ s->domain_info = &tomoyo_kernel_domain;
+ atomic_inc(&tomoyo_kernel_domain.users);
+ s->old_domain_info = NULL;
+ tomoyo_mm_init();
+
+ return 0;
+}
+#endif
+
+DEFINE_LSM(tomoyo) = {
+ .name = "tomoyo",
+ .enabled = &tomoyo_enabled,
+ .flags = LSM_FLAG_LEGACY_MAJOR,
+ .blobs = &tomoyo_blob_sizes,
+ .init = tomoyo_init,
+};
diff --git a/security/tomoyo/load_policy.c b/security/tomoyo/load_policy.c
index 363b65be87ab..6a2a72354a64 100644
--- a/security/tomoyo/load_policy.c
+++ b/security/tomoyo/load_policy.c
@@ -97,6 +97,14 @@ void tomoyo_load_policy(const char *filename)
if (!tomoyo_policy_loader_exists())
return;
done = true;
+#ifdef CONFIG_SECURITY_TOMOYO_LKM
+ /* Load tomoyo.ko if not yet loaded. */
+ if (!tomoyo_ops.check_profile)
+ request_module("tomoyo");
+ /* Check if tomoyo.ko was successfully loaded. */
+ if (!tomoyo_ops.check_profile)
+ panic("Failed to load tomoyo module.");
+#endif
pr_info("Calling %s to load policy. Please wait.\n", tomoyo_loader);
argv[0] = (char *) tomoyo_loader;
argv[1] = NULL;
@@ -104,7 +112,11 @@ void tomoyo_load_policy(const char *filename)
envp[1] = "PATH=/sbin:/bin:/usr/sbin:/usr/bin";
envp[2] = NULL;
call_usermodehelper(argv[0], argv, envp, UMH_WAIT_PROC);
+#ifdef CONFIG_SECURITY_TOMOYO_LKM
+ tomoyo_ops.check_profile();
+#else
tomoyo_check_profile();
+#endif
}
#endif
diff --git a/security/tomoyo/proxy.c b/security/tomoyo/proxy.c
new file mode 100644
index 000000000000..1618cc0f2af8
--- /dev/null
+++ b/security/tomoyo/proxy.c
@@ -0,0 +1,82 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * security/tomoyo/proxy.c
+ *
+ * Copyright (C) 2005-2011 NTT DATA CORPORATION
+ */
+
+#include <linux/security.h>
+#include "common.h"
+
+#ifdef CONFIG_SECURITY_TOMOYO_LKM
+
+struct tomoyo_task *tomoyo_task(struct task_struct *task)
+{
+ struct tomoyo_task *s = task->security + tomoyo_blob_sizes.lbs_task;
+
+ if (unlikely(!s->domain_info)) {
+ if (likely(task == current)) {
+ s->domain_info = &tomoyo_kernel_domain;
+ atomic_inc(&tomoyo_kernel_domain.users);
+ } else {
+ /* Caller handles s->domain_info == NULL case. */
+ }
+ }
+ return s;
+}
+
+#include "hooks.h"
+
+/**
+ * tomoyo_runtime_init - Register TOMOYO Linux as a loadable LSM module.
+ *
+ * Returns 0 if TOMOYO is enabled, -EINVAL otherwise.
+ */
+static int __init tomoyo_runtime_init(void)
+{
+ const struct tomoyo_hooks tomoyo_hooks = {
+ .cred_prepare = tomoyo_cred_prepare,
+ .bprm_committed_creds = tomoyo_bprm_committed_creds,
+ .task_alloc = tomoyo_task_alloc,
+ .task_free = tomoyo_task_free,
+ .bprm_check_security = tomoyo_bprm_check_security,
+ .file_fcntl = tomoyo_file_fcntl,
+ .file_open = tomoyo_file_open,
+ .file_truncate = tomoyo_file_truncate,
+ .path_truncate = tomoyo_path_truncate,
+ .path_unlink = tomoyo_path_unlink,
+ .path_mkdir = tomoyo_path_mkdir,
+ .path_rmdir = tomoyo_path_rmdir,
+ .path_symlink = tomoyo_path_symlink,
+ .path_mknod = tomoyo_path_mknod,
+ .path_link = tomoyo_path_link,
+ .path_rename = tomoyo_path_rename,
+ .inode_getattr = tomoyo_inode_getattr,
+ .file_ioctl = tomoyo_file_ioctl,
+ .file_ioctl_compat = tomoyo_file_ioctl,
+ .path_chmod = tomoyo_path_chmod,
+ .path_chown = tomoyo_path_chown,
+ .path_chroot = tomoyo_path_chroot,
+ .sb_mount = tomoyo_sb_mount,
+ .sb_umount = tomoyo_sb_umount,
+ .sb_pivotroot = tomoyo_sb_pivotroot,
+ .socket_bind = tomoyo_socket_bind,
+ .socket_connect = tomoyo_socket_connect,
+ .socket_listen = tomoyo_socket_listen,
+ .socket_sendmsg = tomoyo_socket_sendmsg,
+ };
+
+ if (!tomoyo_ops.enabled)
+ return -EINVAL;
+ tomoyo_ops.check_profile = tomoyo_check_profile;
+ pr_info("TOMOYO Linux initialized\n");
+ tomoyo_task(current);
+ tomoyo_mm_init();
+ tomoyo_interface_init();
+ tomoyo_register_hooks(&tomoyo_hooks);
+ return 0;
+}
+module_init(tomoyo_runtime_init);
+MODULE_LICENSE("GPL");
+
+#endif
diff --git a/security/tomoyo/securityfs_if.c b/security/tomoyo/securityfs_if.c
index a2705798476f..a3b821b7f477 100644
--- a/security/tomoyo/securityfs_if.c
+++ b/security/tomoyo/securityfs_if.c
@@ -229,17 +229,19 @@ static void __init tomoyo_create_entry(const char *name, const umode_t mode,
}
/**
- * tomoyo_initerface_init - Initialize /sys/kernel/security/tomoyo/ interface.
+ * tomoyo_interface_init - Initialize /sys/kernel/security/tomoyo/ interface.
*
* Returns 0.
*/
-static int __init tomoyo_initerface_init(void)
+int __init tomoyo_interface_init(void)
{
struct tomoyo_domain_info *domain;
struct dentry *tomoyo_dir;
+#ifndef CONFIG_SECURITY_TOMOYO_LKM
if (!tomoyo_enabled)
return 0;
+#endif
domain = tomoyo_domain();
/* Don't create securityfs entries unless registered. */
if (domain != &tomoyo_kernel_domain)
@@ -270,4 +272,6 @@ static int __init tomoyo_initerface_init(void)
return 0;
}
-fs_initcall(tomoyo_initerface_init);
+#ifndef CONFIG_SECURITY_TOMOYO_LKM
+fs_initcall(tomoyo_interface_init);
+#endif
diff --git a/security/tomoyo/util.c b/security/tomoyo/util.c
index 6799b1122c9d..b851ff377382 100644
--- a/security/tomoyo/util.c
+++ b/security/tomoyo/util.c
@@ -13,9 +13,6 @@
/* Lock for protecting policy. */
DEFINE_MUTEX(tomoyo_policy_lock);
-/* Has /sbin/init started? */
-bool tomoyo_policy_loaded;
-
/*
* Mapping table from "enum tomoyo_mac_index" to
* "enum tomoyo_mac_category_index".
diff --git a/sound/core/control.c b/sound/core/control.c
index 4f55f64c42e1..2f790a7b1e90 100644
--- a/sound/core/control.c
+++ b/sound/core/control.c
@@ -2267,7 +2267,6 @@ static const struct file_operations snd_ctl_f_ops =
.read = snd_ctl_read,
.open = snd_ctl_open,
.release = snd_ctl_release,
- .llseek = no_llseek,
.poll = snd_ctl_poll,
.unlocked_ioctl = snd_ctl_ioctl,
.compat_ioctl = snd_ctl_ioctl_compat,
diff --git a/sound/core/oss/mixer_oss.c b/sound/core/oss/mixer_oss.c
index 33bf9a220ada..668604d0ec9d 100644
--- a/sound/core/oss/mixer_oss.c
+++ b/sound/core/oss/mixer_oss.c
@@ -412,7 +412,6 @@ static const struct file_operations snd_mixer_oss_f_ops =
.owner = THIS_MODULE,
.open = snd_mixer_oss_open,
.release = snd_mixer_oss_release,
- .llseek = no_llseek,
.unlocked_ioctl = snd_mixer_oss_ioctl,
.compat_ioctl = snd_mixer_oss_ioctl_compat,
};
diff --git a/sound/core/oss/pcm_oss.c b/sound/core/oss/pcm_oss.c
index 7386982cf40e..4683b9139c56 100644
--- a/sound/core/oss/pcm_oss.c
+++ b/sound/core/oss/pcm_oss.c
@@ -3106,7 +3106,6 @@ static const struct file_operations snd_pcm_oss_f_reg =
.write = snd_pcm_oss_write,
.open = snd_pcm_oss_open,
.release = snd_pcm_oss_release,
- .llseek = no_llseek,
.poll = snd_pcm_oss_poll,
.unlocked_ioctl = snd_pcm_oss_ioctl,
.compat_ioctl = snd_pcm_oss_ioctl_compat,
diff --git a/sound/core/pcm_native.c b/sound/core/pcm_native.c
index 5e1e6006707b..5b9076829ade 100644
--- a/sound/core/pcm_native.c
+++ b/sound/core/pcm_native.c
@@ -2250,12 +2250,12 @@ static int snd_pcm_link(struct snd_pcm_substream *substream, int fd)
bool nonatomic = substream->pcm->nonatomic;
CLASS(fd, f)(fd);
- if (!f.file)
+ if (!fd_file(f))
return -EBADFD;
- if (!is_pcm_file(f.file))
+ if (!is_pcm_file(fd_file(f)))
return -EBADFD;
- pcm_file = f.file->private_data;
+ pcm_file = fd_file(f)->private_data;
substream1 = pcm_file->substream;
if (substream == substream1)
@@ -4115,7 +4115,6 @@ const struct file_operations snd_pcm_f_ops[2] = {
.write_iter = snd_pcm_writev,
.open = snd_pcm_playback_open,
.release = snd_pcm_release,
- .llseek = no_llseek,
.poll = snd_pcm_poll,
.unlocked_ioctl = snd_pcm_ioctl,
.compat_ioctl = snd_pcm_ioctl_compat,
@@ -4129,7 +4128,6 @@ const struct file_operations snd_pcm_f_ops[2] = {
.read_iter = snd_pcm_readv,
.open = snd_pcm_capture_open,
.release = snd_pcm_release,
- .llseek = no_llseek,
.poll = snd_pcm_poll,
.unlocked_ioctl = snd_pcm_ioctl,
.compat_ioctl = snd_pcm_ioctl_compat,
diff --git a/sound/core/rawmidi.c b/sound/core/rawmidi.c
index 7accf9a1ddf4..03306be5fa02 100644
--- a/sound/core/rawmidi.c
+++ b/sound/core/rawmidi.c
@@ -1784,7 +1784,6 @@ static const struct file_operations snd_rawmidi_f_ops = {
.write = snd_rawmidi_write,
.open = snd_rawmidi_open,
.release = snd_rawmidi_release,
- .llseek = no_llseek,
.poll = snd_rawmidi_poll,
.unlocked_ioctl = snd_rawmidi_ioctl,
.compat_ioctl = snd_rawmidi_ioctl_compat,
diff --git a/sound/core/seq/seq_clientmgr.c b/sound/core/seq/seq_clientmgr.c
index 6437193e42bf..3930e2f9082f 100644
--- a/sound/core/seq/seq_clientmgr.c
+++ b/sound/core/seq/seq_clientmgr.c
@@ -2722,7 +2722,6 @@ static const struct file_operations snd_seq_f_ops =
.write = snd_seq_write,
.open = snd_seq_open,
.release = snd_seq_release,
- .llseek = no_llseek,
.poll = snd_seq_poll,
.unlocked_ioctl = snd_seq_ioctl,
.compat_ioctl = snd_seq_ioctl_compat,
diff --git a/sound/core/timer.c b/sound/core/timer.c
index 668c40bac318..fbada79380f9 100644
--- a/sound/core/timer.c
+++ b/sound/core/timer.c
@@ -2436,7 +2436,6 @@ static const struct file_operations snd_timer_f_ops =
.read = snd_timer_user_read,
.open = snd_timer_user_open,
.release = snd_timer_user_release,
- .llseek = no_llseek,
.poll = snd_timer_user_poll,
.unlocked_ioctl = snd_timer_user_ioctl,
.compat_ioctl = snd_timer_user_ioctl_compat,
diff --git a/sound/firewire/amdtp-stream.c b/sound/firewire/amdtp-stream.c
index c827d7d8d800..c72b2a754775 100644
--- a/sound/firewire/amdtp-stream.c
+++ b/sound/firewire/amdtp-stream.c
@@ -615,6 +615,22 @@ static void update_pcm_pointers(struct amdtp_stream *s,
// The program in user process should periodically check the status of intermediate
// buffer associated to PCM substream to process PCM frames in the buffer, instead
// of receiving notification of period elapsed by poll wait.
+ //
+ // Use another work item for period elapsed event to prevent the following AB/BA
+ // deadlock:
+ //
+ // thread 1 thread 2
+ // ================================= =================================
+ // A.work item (process) pcm ioctl (process)
+ // v v
+ // process_rx_packets() B.PCM stream lock
+ // process_tx_packets() v
+ // v callbacks in snd_pcm_ops
+ // update_pcm_pointers() v
+ // snd_pcm_elapsed() fw_iso_context_flush_completions()
+ // snd_pcm_stream_lock_irqsave() disable_work_sync()
+ // v v
+ // wait until release of B wait until A exits
if (!pcm->runtime->no_period_wakeup)
queue_work(system_highpri_wq, &s->period_work);
}
@@ -1055,8 +1071,15 @@ static void generate_rx_packet_descs(struct amdtp_stream *s, struct pkt_desc *de
static inline void cancel_stream(struct amdtp_stream *s)
{
+ struct work_struct *work = current_work();
+
s->packet_index = -1;
- if (in_softirq())
+
+ // Detect work items for any isochronous context. The work item for pcm_period_work()
+ // should be avoided since the call of snd_pcm_period_elapsed() can reach via
+ // snd_pcm_ops.pointer() under acquiring PCM stream(group) lock and causes dead lock at
+ // snd_pcm_stop_xrun().
+ if (work && work != &s->period_work)
amdtp_stream_pcm_abort(s);
WRITE_ONCE(s->pcm_buffer_pointer, SNDRV_PCM_POS_XRUN);
}
@@ -1856,12 +1879,9 @@ unsigned long amdtp_domain_stream_pcm_pointer(struct amdtp_domain *d,
struct amdtp_stream *irq_target = d->irq_target;
if (irq_target && amdtp_stream_running(irq_target)) {
- // use wq to prevent AB/BA deadlock competition for
- // substream lock:
- // fw_iso_context_flush_completions() acquires
- // lock by ohci_flush_iso_completions(),
- // amdtp-stream process_rx_packets() attempts to
- // acquire same lock by snd_pcm_elapsed()
+ // The work item to call snd_pcm_period_elapsed() can reach here by the call of
+ // snd_pcm_ops.pointer(), however less packets would be available then. Therefore
+ // the following call is just for user process contexts.
if (current_work() != &s->period_work)
fw_iso_context_flush_completions(irq_target->context);
}
diff --git a/sound/firewire/bebob/bebob_pcm.c b/sound/firewire/bebob/bebob_pcm.c
index ce49eef0fcba..360ebf3c4ca2 100644
--- a/sound/firewire/bebob/bebob_pcm.c
+++ b/sound/firewire/bebob/bebob_pcm.c
@@ -367,6 +367,7 @@ int snd_bebob_create_pcm_devices(struct snd_bebob *bebob)
goto end;
pcm->private_data = bebob;
+ pcm->nonatomic = true;
snprintf(pcm->name, sizeof(pcm->name),
"%s PCM", bebob->card->shortname);
snd_pcm_set_ops(pcm, SNDRV_PCM_STREAM_PLAYBACK, &playback_ops);
diff --git a/sound/firewire/dice/dice-pcm.c b/sound/firewire/dice/dice-pcm.c
index d64366217d57..2cf2adb48f2a 100644
--- a/sound/firewire/dice/dice-pcm.c
+++ b/sound/firewire/dice/dice-pcm.c
@@ -441,6 +441,7 @@ int snd_dice_create_pcm(struct snd_dice *dice)
if (err < 0)
return err;
pcm->private_data = dice;
+ pcm->nonatomic = true;
strcpy(pcm->name, dice->card->shortname);
if (capture > 0)
diff --git a/sound/firewire/digi00x/digi00x-pcm.c b/sound/firewire/digi00x/digi00x-pcm.c
index 3bd1575c9d9c..85e65cbc00c4 100644
--- a/sound/firewire/digi00x/digi00x-pcm.c
+++ b/sound/firewire/digi00x/digi00x-pcm.c
@@ -350,6 +350,7 @@ int snd_dg00x_create_pcm_devices(struct snd_dg00x *dg00x)
return err;
pcm->private_data = dg00x;
+ pcm->nonatomic = true;
snprintf(pcm->name, sizeof(pcm->name),
"%s PCM", dg00x->card->shortname);
snd_pcm_set_ops(pcm, SNDRV_PCM_STREAM_PLAYBACK, &playback_ops);
diff --git a/sound/firewire/fireface/ff-pcm.c b/sound/firewire/fireface/ff-pcm.c
index ec915671a79b..63457d24a288 100644
--- a/sound/firewire/fireface/ff-pcm.c
+++ b/sound/firewire/fireface/ff-pcm.c
@@ -390,6 +390,7 @@ int snd_ff_create_pcm_devices(struct snd_ff *ff)
return err;
pcm->private_data = ff;
+ pcm->nonatomic = true;
snprintf(pcm->name, sizeof(pcm->name),
"%s PCM", ff->card->shortname);
snd_pcm_set_ops(pcm, SNDRV_PCM_STREAM_PLAYBACK, &pcm_playback_ops);
diff --git a/sound/firewire/fireworks/fireworks_pcm.c b/sound/firewire/fireworks/fireworks_pcm.c
index c3c21860b245..eaf7778211de 100644
--- a/sound/firewire/fireworks/fireworks_pcm.c
+++ b/sound/firewire/fireworks/fireworks_pcm.c
@@ -397,6 +397,7 @@ int snd_efw_create_pcm_devices(struct snd_efw *efw)
goto end;
pcm->private_data = efw;
+ pcm->nonatomic = true;
snprintf(pcm->name, sizeof(pcm->name), "%s PCM", efw->card->shortname);
snd_pcm_set_ops(pcm, SNDRV_PCM_STREAM_PLAYBACK, &playback_ops);
snd_pcm_set_ops(pcm, SNDRV_PCM_STREAM_CAPTURE, &capture_ops);
diff --git a/sound/firewire/isight.c b/sound/firewire/isight.c
index 806f82c9ceee..b1e059f0d473 100644
--- a/sound/firewire/isight.c
+++ b/sound/firewire/isight.c
@@ -454,6 +454,7 @@ static int isight_create_pcm(struct isight *isight)
if (err < 0)
return err;
pcm->private_data = isight;
+ pcm->nonatomic = true;
strcpy(pcm->name, "iSight");
isight->pcm = pcm->streams[SNDRV_PCM_STREAM_CAPTURE].substream;
isight->pcm->ops = &ops;
diff --git a/sound/firewire/motu/motu-pcm.c b/sound/firewire/motu/motu-pcm.c
index d410c2efbde5..f3b48495acae 100644
--- a/sound/firewire/motu/motu-pcm.c
+++ b/sound/firewire/motu/motu-pcm.c
@@ -360,6 +360,7 @@ int snd_motu_create_pcm_devices(struct snd_motu *motu)
if (err < 0)
return err;
pcm->private_data = motu;
+ pcm->nonatomic = true;
strcpy(pcm->name, motu->card->shortname);
snd_pcm_set_ops(pcm, SNDRV_PCM_STREAM_CAPTURE, &capture_ops);
diff --git a/sound/firewire/oxfw/oxfw-pcm.c b/sound/firewire/oxfw/oxfw-pcm.c
index 5f43a0b826d2..8ca9dde54ec6 100644
--- a/sound/firewire/oxfw/oxfw-pcm.c
+++ b/sound/firewire/oxfw/oxfw-pcm.c
@@ -440,6 +440,7 @@ int snd_oxfw_create_pcm(struct snd_oxfw *oxfw)
return err;
pcm->private_data = oxfw;
+ pcm->nonatomic = true;
strcpy(pcm->name, oxfw->card->shortname);
snd_pcm_set_ops(pcm, SNDRV_PCM_STREAM_PLAYBACK, &playback_ops);
if (cap > 0)
diff --git a/sound/firewire/tascam/tascam-pcm.c b/sound/firewire/tascam/tascam-pcm.c
index f6da571707ac..a73003ac11e6 100644
--- a/sound/firewire/tascam/tascam-pcm.c
+++ b/sound/firewire/tascam/tascam-pcm.c
@@ -279,6 +279,7 @@ int snd_tscm_create_pcm_devices(struct snd_tscm *tscm)
return err;
pcm->private_data = tscm;
+ pcm->nonatomic = true;
snprintf(pcm->name, sizeof(pcm->name),
"%s PCM", tscm->card->shortname);
snd_pcm_set_ops(pcm, SNDRV_PCM_STREAM_PLAYBACK, &playback_ops);
diff --git a/sound/oss/dmasound/dmasound_core.c b/sound/oss/dmasound/dmasound_core.c
index 4b1baf4dd50e..dea2d9b18fc9 100644
--- a/sound/oss/dmasound/dmasound_core.c
+++ b/sound/oss/dmasound/dmasound_core.c
@@ -381,7 +381,6 @@ static long mixer_unlocked_ioctl(struct file *file, u_int cmd, u_long arg)
static const struct file_operations mixer_fops =
{
.owner = THIS_MODULE,
- .llseek = no_llseek,
.unlocked_ioctl = mixer_unlocked_ioctl,
.compat_ioctl = compat_ptr_ioctl,
.open = mixer_open,
@@ -1155,7 +1154,6 @@ static long sq_unlocked_ioctl(struct file *file, u_int cmd, u_long arg)
static const struct file_operations sq_fops =
{
.owner = THIS_MODULE,
- .llseek = no_llseek,
.write = sq_write,
.poll = sq_poll,
.unlocked_ioctl = sq_unlocked_ioctl,
@@ -1351,7 +1349,6 @@ static ssize_t state_read(struct file *file, char __user *buf, size_t count,
static const struct file_operations state_fops = {
.owner = THIS_MODULE,
- .llseek = no_llseek,
.read = state_read,
.open = state_open,
.release = state_release,
diff --git a/sound/pci/hda/hda_intel.c b/sound/pci/hda/hda_intel.c
index edeaf3ee273c..045cd555c291 100644
--- a/sound/pci/hda/hda_intel.c
+++ b/sound/pci/hda/hda_intel.c
@@ -2688,7 +2688,7 @@ static const struct pci_device_id azx_ids[] = {
.driver_data = AZX_DRIVER_ATIHDMI_NS | AZX_DCAPS_PRESET_ATI_HDMI_NS |
AZX_DCAPS_PM_RUNTIME },
/* GLENFLY */
- { PCI_DEVICE(0x6766, PCI_ANY_ID),
+ { PCI_DEVICE(PCI_VENDOR_ID_GLENFLY, PCI_ANY_ID),
.class = PCI_CLASS_MULTIMEDIA_HD_AUDIO << 8,
.class_mask = 0xffffff,
.driver_data = AZX_DRIVER_GFHDMI | AZX_DCAPS_POSFIX_LPIB |
diff --git a/sound/soc/cirrus/Kconfig b/sound/soc/cirrus/Kconfig
index 38a83c4dcc2d..97def4e53fbc 100644
--- a/sound/soc/cirrus/Kconfig
+++ b/sound/soc/cirrus/Kconfig
@@ -31,12 +31,3 @@ config SND_EP93XX_SOC_I2S_WATCHDOG
endif # if SND_EP93XX_SOC_I2S
-config SND_EP93XX_SOC_EDB93XX
- tristate "SoC Audio support for Cirrus Logic EDB93xx boards"
- depends on SND_EP93XX_SOC && (MACH_EDB9301 || MACH_EDB9302 || MACH_EDB9302A || MACH_EDB9307A || MACH_EDB9315A)
- select SND_EP93XX_SOC_I2S
- select SND_SOC_CS4271_I2C if I2C
- select SND_SOC_CS4271_SPI if SPI_MASTER
- help
- Say Y or M here if you want to add support for I2S audio on the
- Cirrus Logic EDB93xx boards.
diff --git a/sound/soc/cirrus/Makefile b/sound/soc/cirrus/Makefile
index ad606b293715..61d8cf64e859 100644
--- a/sound/soc/cirrus/Makefile
+++ b/sound/soc/cirrus/Makefile
@@ -6,7 +6,3 @@ snd-soc-ep93xx-i2s-y := ep93xx-i2s.o
obj-$(CONFIG_SND_EP93XX_SOC) += snd-soc-ep93xx.o
obj-$(CONFIG_SND_EP93XX_SOC_I2S) += snd-soc-ep93xx-i2s.o
-# EP93XX Machine Support
-snd-soc-edb93xx-y := edb93xx.o
-
-obj-$(CONFIG_SND_EP93XX_SOC_EDB93XX) += snd-soc-edb93xx.o
diff --git a/sound/soc/cirrus/edb93xx.c b/sound/soc/cirrus/edb93xx.c
deleted file mode 100644
index 8dac754ddb0d..000000000000
--- a/sound/soc/cirrus/edb93xx.c
+++ /dev/null
@@ -1,116 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-or-later
-/*
- * SoC audio for EDB93xx
- *
- * Copyright (c) 2010 Alexander Sverdlin <subaparts@yandex.ru>
- *
- * This driver support CS4271 codec being master or slave, working
- * in control port mode, connected either via SPI or I2C.
- * The data format accepted is I2S or left-justified.
- * DAPM support not implemented.
- */
-
-#include <linux/platform_device.h>
-#include <linux/module.h>
-#include <linux/soc/cirrus/ep93xx.h>
-#include <sound/core.h>
-#include <sound/pcm.h>
-#include <sound/soc.h>
-#include <asm/mach-types.h>
-
-static int edb93xx_hw_params(struct snd_pcm_substream *substream,
- struct snd_pcm_hw_params *params)
-{
- struct snd_soc_pcm_runtime *rtd = snd_soc_substream_to_rtd(substream);
- struct snd_soc_dai *codec_dai = snd_soc_rtd_to_codec(rtd, 0);
- struct snd_soc_dai *cpu_dai = snd_soc_rtd_to_cpu(rtd, 0);
- int err;
- unsigned int mclk_rate;
- unsigned int rate = params_rate(params);
-
- /*
- * According to CS4271 datasheet we use MCLK/LRCK=256 for
- * rates below 50kHz and 128 for higher sample rates
- */
- if (rate < 50000)
- mclk_rate = rate * 64 * 4;
- else
- mclk_rate = rate * 64 * 2;
-
- err = snd_soc_dai_set_sysclk(codec_dai, 0, mclk_rate,
- SND_SOC_CLOCK_IN);
- if (err)
- return err;
-
- return snd_soc_dai_set_sysclk(cpu_dai, 0, mclk_rate,
- SND_SOC_CLOCK_OUT);
-}
-
-static const struct snd_soc_ops edb93xx_ops = {
- .hw_params = edb93xx_hw_params,
-};
-
-SND_SOC_DAILINK_DEFS(hifi,
- DAILINK_COMP_ARRAY(COMP_CPU("ep93xx-i2s")),
- DAILINK_COMP_ARRAY(COMP_CODEC("spi0.0", "cs4271-hifi")),
- DAILINK_COMP_ARRAY(COMP_PLATFORM("ep93xx-i2s")));
-
-static struct snd_soc_dai_link edb93xx_dai = {
- .name = "CS4271",
- .stream_name = "CS4271 HiFi",
- .dai_fmt = SND_SOC_DAIFMT_I2S | SND_SOC_DAIFMT_NB_NF |
- SND_SOC_DAIFMT_CBC_CFC,
- .ops = &edb93xx_ops,
- SND_SOC_DAILINK_REG(hifi),
-};
-
-static struct snd_soc_card snd_soc_edb93xx = {
- .name = "EDB93XX",
- .owner = THIS_MODULE,
- .dai_link = &edb93xx_dai,
- .num_links = 1,
-};
-
-static int edb93xx_probe(struct platform_device *pdev)
-{
- struct snd_soc_card *card = &snd_soc_edb93xx;
- int ret;
-
- ret = ep93xx_i2s_acquire();
- if (ret)
- return ret;
-
- card->dev = &pdev->dev;
-
- ret = snd_soc_register_card(card);
- if (ret) {
- dev_err(&pdev->dev, "snd_soc_register_card() failed: %d\n",
- ret);
- ep93xx_i2s_release();
- }
-
- return ret;
-}
-
-static void edb93xx_remove(struct platform_device *pdev)
-{
- struct snd_soc_card *card = platform_get_drvdata(pdev);
-
- snd_soc_unregister_card(card);
- ep93xx_i2s_release();
-}
-
-static struct platform_driver edb93xx_driver = {
- .driver = {
- .name = "edb93xx-audio",
- },
- .probe = edb93xx_probe,
- .remove = edb93xx_remove,
-};
-
-module_platform_driver(edb93xx_driver);
-
-MODULE_AUTHOR("Alexander Sverdlin <subaparts@yandex.ru>");
-MODULE_DESCRIPTION("ALSA SoC EDB93xx");
-MODULE_LICENSE("GPL");
-MODULE_ALIAS("platform:edb93xx-audio");
diff --git a/sound/soc/cirrus/ep93xx-i2s.c b/sound/soc/cirrus/ep93xx-i2s.c
index d45862ceb0c9..cca01c03f048 100644
--- a/sound/soc/cirrus/ep93xx-i2s.c
+++ b/sound/soc/cirrus/ep93xx-i2s.c
@@ -24,7 +24,6 @@
#include <sound/initval.h>
#include <sound/soc.h>
-#include <linux/platform_data/dma-ep93xx.h>
#include <linux/soc/cirrus/ep93xx.h>
#include "ep93xx-pcm.h"
@@ -80,19 +79,6 @@ struct ep93xx_i2s_info {
struct snd_dmaengine_dai_dma_data dma_params_tx;
};
-static struct ep93xx_dma_data ep93xx_i2s_dma_data[] = {
- [SNDRV_PCM_STREAM_PLAYBACK] = {
- .name = "i2s-pcm-out",
- .port = EP93XX_DMA_I2S1,
- .direction = DMA_MEM_TO_DEV,
- },
- [SNDRV_PCM_STREAM_CAPTURE] = {
- .name = "i2s-pcm-in",
- .port = EP93XX_DMA_I2S1,
- .direction = DMA_DEV_TO_MEM,
- },
-};
-
static inline void ep93xx_i2s_write_reg(struct ep93xx_i2s_info *info,
unsigned reg, unsigned val)
{
@@ -198,11 +184,6 @@ static int ep93xx_i2s_dai_probe(struct snd_soc_dai *dai)
{
struct ep93xx_i2s_info *info = snd_soc_dai_get_drvdata(dai);
- info->dma_params_tx.filter_data =
- &ep93xx_i2s_dma_data[SNDRV_PCM_STREAM_PLAYBACK];
- info->dma_params_rx.filter_data =
- &ep93xx_i2s_dma_data[SNDRV_PCM_STREAM_CAPTURE];
-
snd_soc_dai_init_dma_data(dai, &info->dma_params_tx,
&info->dma_params_rx);
diff --git a/sound/soc/cirrus/ep93xx-pcm.c b/sound/soc/cirrus/ep93xx-pcm.c
index fa72acd8d334..5ecb4671cbba 100644
--- a/sound/soc/cirrus/ep93xx-pcm.c
+++ b/sound/soc/cirrus/ep93xx-pcm.c
@@ -18,8 +18,6 @@
#include <sound/soc.h>
#include <sound/dmaengine_pcm.h>
-#include <linux/platform_data/dma-ep93xx.h>
-
#include "ep93xx-pcm.h"
static const struct snd_pcm_hardware ep93xx_pcm_hardware = {
@@ -35,30 +33,15 @@ static const struct snd_pcm_hardware ep93xx_pcm_hardware = {
.fifo_size = 32,
};
-static bool ep93xx_pcm_dma_filter(struct dma_chan *chan, void *filter_param)
-{
- struct ep93xx_dma_data *data = filter_param;
-
- if (data->direction == ep93xx_dma_chan_direction(chan)) {
- chan->private = data;
- return true;
- }
-
- return false;
-}
-
static const struct snd_dmaengine_pcm_config ep93xx_dmaengine_pcm_config = {
.pcm_hardware = &ep93xx_pcm_hardware,
- .compat_filter_fn = ep93xx_pcm_dma_filter,
.prealloc_buffer_size = 131072,
};
int devm_ep93xx_pcm_platform_register(struct device *dev)
{
return devm_snd_dmaengine_pcm_register(dev,
- &ep93xx_dmaengine_pcm_config,
- SND_DMAENGINE_PCM_FLAG_NO_DT |
- SND_DMAENGINE_PCM_FLAG_COMPAT);
+ &ep93xx_dmaengine_pcm_config, 0);
}
EXPORT_SYMBOL_GPL(devm_ep93xx_pcm_platform_register);
diff --git a/sound/soc/intel/avs/debugfs.c b/sound/soc/intel/avs/debugfs.c
index 3fc2bbb63369..1767ded4d983 100644
--- a/sound/soc/intel/avs/debugfs.c
+++ b/sound/soc/intel/avs/debugfs.c
@@ -68,7 +68,6 @@ static ssize_t fw_regs_read(struct file *file, char __user *to, size_t count, lo
static const struct file_operations fw_regs_fops = {
.open = simple_open,
.read = fw_regs_read,
- .llseek = no_llseek,
};
static ssize_t debug_window_read(struct file *file, char __user *to, size_t count, loff_t *ppos)
@@ -93,7 +92,6 @@ static ssize_t debug_window_read(struct file *file, char __user *to, size_t coun
static const struct file_operations debug_window_fops = {
.open = simple_open,
.read = debug_window_read,
- .llseek = no_llseek,
};
static ssize_t probe_points_read(struct file *file, char __user *to, size_t count, loff_t *ppos)
@@ -170,7 +168,6 @@ static const struct file_operations probe_points_fops = {
.open = simple_open,
.read = probe_points_read,
.write = probe_points_write,
- .llseek = no_llseek,
};
static ssize_t probe_points_disconnect_write(struct file *file, const char __user *from,
diff --git a/tools/Makefile b/tools/Makefile
index 276f5d0d53a4..278d24723b74 100644
--- a/tools/Makefile
+++ b/tools/Makefile
@@ -28,6 +28,7 @@ help:
@echo ' pci - PCI tools'
@echo ' perf - Linux performance measurement and analysis tool'
@echo ' selftests - various kernel selftests'
+ @echo ' sched_ext - sched_ext example schedulers'
@echo ' bootconfig - boot config tool'
@echo ' spi - spi tools'
@echo ' tmon - thermal monitoring and tuning tool'
@@ -91,6 +92,9 @@ perf: FORCE
$(Q)mkdir -p $(PERF_O) .
$(Q)$(MAKE) --no-print-directory -C perf O=$(PERF_O) subdir=
+sched_ext: FORCE
+ $(call descend,sched_ext)
+
selftests: FORCE
$(call descend,testing/$@)
@@ -184,6 +188,9 @@ perf_clean:
$(Q)mkdir -p $(PERF_O) .
$(Q)$(MAKE) --no-print-directory -C perf O=$(PERF_O) subdir= clean
+sched_ext_clean:
+ $(call descend,sched_ext,clean)
+
selftests_clean:
$(call descend,testing/$(@:_clean=),clean)
@@ -213,6 +220,7 @@ clean: acpi_clean counter_clean cpupower_clean hv_clean firewire_clean \
mm_clean bpf_clean iio_clean x86_energy_perf_policy_clean tmon_clean \
freefall_clean build_clean libbpf_clean libsubcmd_clean \
gpio_clean objtool_clean leds_clean wmi_clean pci_clean firmware_clean debugging_clean \
- intel-speed-select_clean tracing_clean thermal_clean thermometer_clean thermal-engine_clean
+ intel-speed-select_clean tracing_clean thermal_clean thermometer_clean thermal-engine_clean \
+ sched_ext_clean
.PHONY: FORCE
diff --git a/tools/arch/riscv/include/asm/barrier.h b/tools/arch/riscv/include/asm/barrier.h
new file mode 100644
index 000000000000..6997f197086d
--- /dev/null
+++ b/tools/arch/riscv/include/asm/barrier.h
@@ -0,0 +1,39 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copied from the kernel sources to tools/arch/riscv:
+ *
+ * Copyright (C) 2012 ARM Ltd.
+ * Copyright (C) 2013 Regents of the University of California
+ * Copyright (C) 2017 SiFive
+ */
+
+#ifndef _TOOLS_LINUX_ASM_RISCV_BARRIER_H
+#define _TOOLS_LINUX_ASM_RISCV_BARRIER_H
+
+#include <asm/fence.h>
+#include <linux/compiler.h>
+
+/* These barriers need to enforce ordering on both devices and memory. */
+#define mb() RISCV_FENCE(iorw, iorw)
+#define rmb() RISCV_FENCE(ir, ir)
+#define wmb() RISCV_FENCE(ow, ow)
+
+/* These barriers do not need to enforce ordering on devices, just memory. */
+#define smp_mb() RISCV_FENCE(rw, rw)
+#define smp_rmb() RISCV_FENCE(r, r)
+#define smp_wmb() RISCV_FENCE(w, w)
+
+#define smp_store_release(p, v) \
+do { \
+ RISCV_FENCE(rw, w); \
+ WRITE_ONCE(*p, v); \
+} while (0)
+
+#define smp_load_acquire(p) \
+({ \
+ typeof(*p) ___p1 = READ_ONCE(*p); \
+ RISCV_FENCE(r, rw); \
+ ___p1; \
+})
+
+#endif /* _TOOLS_LINUX_ASM_RISCV_BARRIER_H */
diff --git a/tools/arch/riscv/include/asm/fence.h b/tools/arch/riscv/include/asm/fence.h
new file mode 100644
index 000000000000..37860e86771d
--- /dev/null
+++ b/tools/arch/riscv/include/asm/fence.h
@@ -0,0 +1,13 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copied from the kernel sources to tools/arch/riscv:
+ */
+
+#ifndef _ASM_RISCV_FENCE_H
+#define _ASM_RISCV_FENCE_H
+
+#define RISCV_FENCE_ASM(p, s) "\tfence " #p "," #s "\n"
+#define RISCV_FENCE(p, s) \
+ ({ __asm__ __volatile__ (RISCV_FENCE_ASM(p, s) : : : "memory"); })
+
+#endif /* _ASM_RISCV_FENCE_H */
diff --git a/tools/bpf/bpftool/Documentation/bpftool-gen.rst b/tools/bpf/bpftool/Documentation/bpftool-gen.rst
index c768e6d4ae09..ca860fd97d8d 100644
--- a/tools/bpf/bpftool/Documentation/bpftool-gen.rst
+++ b/tools/bpf/bpftool/Documentation/bpftool-gen.rst
@@ -104,7 +104,7 @@ bpftool gen skeleton *FILE*
- **example__load**.
This function creates maps, loads and verifies BPF programs, initializes
- global data maps. It corresponds to libppf's **bpf_object__load**\ ()
+ global data maps. It corresponds to libbpf's **bpf_object__load**\ ()
API.
- **example__open_and_load** combines **example__open** and
@@ -172,7 +172,7 @@ bpftool gen min_core_btf *INPUT* *OUTPUT* *OBJECT* [*OBJECT*...]
CO-RE based application, turning the application portable to different
kernel versions.
- Check examples bellow for more information how to use it.
+ Check examples below for more information on how to use it.
bpftool gen help
Print short help message.
diff --git a/tools/bpf/bpftool/Documentation/bpftool-net.rst b/tools/bpf/bpftool/Documentation/bpftool-net.rst
index 348812881297..a9ed8992800f 100644
--- a/tools/bpf/bpftool/Documentation/bpftool-net.rst
+++ b/tools/bpf/bpftool/Documentation/bpftool-net.rst
@@ -29,7 +29,7 @@ NET COMMANDS
| **bpftool** **net help**
|
| *PROG* := { **id** *PROG_ID* | **pinned** *FILE* | **tag** *PROG_TAG* | **name** *PROG_NAME* }
-| *ATTACH_TYPE* := { **xdp** | **xdpgeneric** | **xdpdrv** | **xdpoffload** }
+| *ATTACH_TYPE* := { **xdp** | **xdpgeneric** | **xdpdrv** | **xdpoffload** | **tcx_ingress** | **tcx_egress** }
DESCRIPTION
===========
@@ -69,6 +69,8 @@ bpftool net attach *ATTACH_TYPE* *PROG* dev *NAME* [ overwrite ]
**xdpgeneric** - Generic XDP. runs at generic XDP hook when packet already enters receive path as skb;
**xdpdrv** - Native XDP. runs earliest point in driver's receive path;
**xdpoffload** - Offload XDP. runs directly on NIC on each packet reception;
+ **tcx_ingress** - Ingress TCX. runs on ingress net traffic;
+ **tcx_egress** - Egress TCX. runs on egress net traffic;
bpftool net detach *ATTACH_TYPE* dev *NAME*
Detach bpf program attached to network interface *NAME* with type specified
@@ -178,3 +180,23 @@ EXAMPLES
::
xdp:
+
+|
+| **# bpftool net attach tcx_ingress name tc_prog dev lo**
+| **# bpftool net**
+|
+
+::
+
+ tc:
+ lo(1) tcx/ingress tc_prog prog_id 29
+
+|
+| **# bpftool net attach tcx_ingress name tc_prog dev lo**
+| **# bpftool net detach tcx_ingress dev lo**
+| **# bpftool net**
+|
+
+::
+
+ tc:
diff --git a/tools/bpf/bpftool/bash-completion/bpftool b/tools/bpf/bpftool/bash-completion/bpftool
index be99d49b8714..0c541498c301 100644
--- a/tools/bpf/bpftool/bash-completion/bpftool
+++ b/tools/bpf/bpftool/bash-completion/bpftool
@@ -1079,7 +1079,7 @@ _bpftool()
esac
;;
net)
- local ATTACH_TYPES='xdp xdpgeneric xdpdrv xdpoffload'
+ local ATTACH_TYPES='xdp xdpgeneric xdpdrv xdpoffload tcx_ingress tcx_egress'
case $command in
show|list)
[[ $prev != "$command" ]] && return 0
diff --git a/tools/bpf/bpftool/btf.c b/tools/bpf/bpftool/btf.c
index 6789c7a4d5ca..7d2af1ff3c8d 100644
--- a/tools/bpf/bpftool/btf.c
+++ b/tools/bpf/bpftool/btf.c
@@ -50,6 +50,7 @@ struct sort_datum {
int type_rank;
const char *sort_name;
const char *own_name;
+ __u64 disambig_hash;
};
static const char *btf_int_enc_str(__u8 encoding)
@@ -561,9 +562,10 @@ static const char *btf_type_sort_name(const struct btf *btf, __u32 index, bool f
case BTF_KIND_ENUM64: {
int name_off = t->name_off;
- /* Use name of the first element for anonymous enums if allowed */
- if (!from_ref && !t->name_off && btf_vlen(t))
- name_off = btf_enum(t)->name_off;
+ if (!from_ref && !name_off && btf_vlen(t))
+ name_off = btf_kind(t) == BTF_KIND_ENUM64 ?
+ btf_enum64(t)->name_off :
+ btf_enum(t)->name_off;
return btf__name_by_offset(btf, name_off);
}
@@ -583,20 +585,88 @@ static const char *btf_type_sort_name(const struct btf *btf, __u32 index, bool f
return NULL;
}
+static __u64 hasher(__u64 hash, __u64 val)
+{
+ return hash * 31 + val;
+}
+
+static __u64 btf_name_hasher(__u64 hash, const struct btf *btf, __u32 name_off)
+{
+ if (!name_off)
+ return hash;
+
+ return hasher(hash, str_hash(btf__name_by_offset(btf, name_off)));
+}
+
+static __u64 btf_type_disambig_hash(const struct btf *btf, __u32 id, bool include_members)
+{
+ const struct btf_type *t = btf__type_by_id(btf, id);
+ int i;
+ size_t hash = 0;
+
+ hash = btf_name_hasher(hash, btf, t->name_off);
+
+ switch (btf_kind(t)) {
+ case BTF_KIND_ENUM:
+ case BTF_KIND_ENUM64:
+ for (i = 0; i < btf_vlen(t); i++) {
+ __u32 name_off = btf_is_enum(t) ?
+ btf_enum(t)[i].name_off :
+ btf_enum64(t)[i].name_off;
+
+ hash = btf_name_hasher(hash, btf, name_off);
+ }
+ break;
+ case BTF_KIND_STRUCT:
+ case BTF_KIND_UNION:
+ if (!include_members)
+ break;
+ for (i = 0; i < btf_vlen(t); i++) {
+ const struct btf_member *m = btf_members(t) + i;
+
+ hash = btf_name_hasher(hash, btf, m->name_off);
+ /* resolve field type's name and hash it as well */
+ hash = hasher(hash, btf_type_disambig_hash(btf, m->type, false));
+ }
+ break;
+ case BTF_KIND_TYPE_TAG:
+ case BTF_KIND_CONST:
+ case BTF_KIND_PTR:
+ case BTF_KIND_VOLATILE:
+ case BTF_KIND_RESTRICT:
+ case BTF_KIND_TYPEDEF:
+ case BTF_KIND_DECL_TAG:
+ hash = hasher(hash, btf_type_disambig_hash(btf, t->type, include_members));
+ break;
+ case BTF_KIND_ARRAY: {
+ struct btf_array *arr = btf_array(t);
+
+ hash = hasher(hash, arr->nelems);
+ hash = hasher(hash, btf_type_disambig_hash(btf, arr->type, include_members));
+ break;
+ }
+ default:
+ break;
+ }
+ return hash;
+}
+
static int btf_type_compare(const void *left, const void *right)
{
const struct sort_datum *d1 = (const struct sort_datum *)left;
const struct sort_datum *d2 = (const struct sort_datum *)right;
int r;
- if (d1->type_rank != d2->type_rank)
- return d1->type_rank < d2->type_rank ? -1 : 1;
-
- r = strcmp(d1->sort_name, d2->sort_name);
+ r = d1->type_rank - d2->type_rank;
+ r = r ?: strcmp(d1->sort_name, d2->sort_name);
+ r = r ?: strcmp(d1->own_name, d2->own_name);
if (r)
return r;
- return strcmp(d1->own_name, d2->own_name);
+ if (d1->disambig_hash != d2->disambig_hash)
+ return d1->disambig_hash < d2->disambig_hash ? -1 : 1;
+
+ return d1->index - d2->index;
}
static struct sort_datum *sort_btf_c(const struct btf *btf)
@@ -617,6 +687,7 @@ static struct sort_datum *sort_btf_c(const struct btf *btf)
d->type_rank = btf_type_rank(btf, i, false);
d->sort_name = btf_type_sort_name(btf, i, false);
d->own_name = btf__name_by_offset(btf, t->name_off);
+ d->disambig_hash = btf_type_disambig_hash(btf, i, true);
}
qsort(datums, n, sizeof(struct sort_datum), btf_type_compare);
diff --git a/tools/bpf/bpftool/feature.c b/tools/bpf/bpftool/feature.c
index c754a428c8c6..4dbc4fcdf473 100644
--- a/tools/bpf/bpftool/feature.c
+++ b/tools/bpf/bpftool/feature.c
@@ -196,7 +196,7 @@ static void probe_unprivileged_disabled(void)
{
long res;
- /* No support for C-style ouptut */
+ /* No support for C-style output */
res = read_procfs("/proc/sys/kernel/unprivileged_bpf_disabled");
if (json_output) {
@@ -225,7 +225,7 @@ static void probe_jit_enable(void)
{
long res;
- /* No support for C-style ouptut */
+ /* No support for C-style output */
res = read_procfs("/proc/sys/net/core/bpf_jit_enable");
if (json_output) {
@@ -255,7 +255,7 @@ static void probe_jit_harden(void)
{
long res;
- /* No support for C-style ouptut */
+ /* No support for C-style output */
res = read_procfs("/proc/sys/net/core/bpf_jit_harden");
if (json_output) {
@@ -285,7 +285,7 @@ static void probe_jit_kallsyms(void)
{
long res;
- /* No support for C-style ouptut */
+ /* No support for C-style output */
res = read_procfs("/proc/sys/net/core/bpf_jit_kallsyms");
if (json_output) {
@@ -311,7 +311,7 @@ static void probe_jit_limit(void)
{
long res;
- /* No support for C-style ouptut */
+ /* No support for C-style output */
res = read_procfs("/proc/sys/net/core/bpf_jit_limit");
if (json_output) {
diff --git a/tools/bpf/bpftool/net.c b/tools/bpf/bpftool/net.c
index 968714b4c3d4..d2242d9f8441 100644
--- a/tools/bpf/bpftool/net.c
+++ b/tools/bpf/bpftool/net.c
@@ -67,6 +67,8 @@ enum net_attach_type {
NET_ATTACH_TYPE_XDP_GENERIC,
NET_ATTACH_TYPE_XDP_DRIVER,
NET_ATTACH_TYPE_XDP_OFFLOAD,
+ NET_ATTACH_TYPE_TCX_INGRESS,
+ NET_ATTACH_TYPE_TCX_EGRESS,
};
static const char * const attach_type_strings[] = {
@@ -74,6 +76,8 @@ static const char * const attach_type_strings[] = {
[NET_ATTACH_TYPE_XDP_GENERIC] = "xdpgeneric",
[NET_ATTACH_TYPE_XDP_DRIVER] = "xdpdrv",
[NET_ATTACH_TYPE_XDP_OFFLOAD] = "xdpoffload",
+ [NET_ATTACH_TYPE_TCX_INGRESS] = "tcx_ingress",
+ [NET_ATTACH_TYPE_TCX_EGRESS] = "tcx_egress",
};
static const char * const attach_loc_strings[] = {
@@ -482,9 +486,9 @@ static void __show_dev_tc_bpf(const struct ip_devname_ifindex *dev,
if (prog_flags[i] || json_output) {
NET_START_ARRAY("prog_flags", "%s ");
for (j = 0; prog_flags[i] && j < 32; j++) {
- if (!(prog_flags[i] & (1 << j)))
+ if (!(prog_flags[i] & (1U << j)))
continue;
- NET_DUMP_UINT_ONLY(1 << j);
+ NET_DUMP_UINT_ONLY(1U << j);
}
NET_END_ARRAY("");
}
@@ -493,9 +497,9 @@ static void __show_dev_tc_bpf(const struct ip_devname_ifindex *dev,
if (link_flags[i] || json_output) {
NET_START_ARRAY("link_flags", "%s ");
for (j = 0; link_flags[i] && j < 32; j++) {
- if (!(link_flags[i] & (1 << j)))
+ if (!(link_flags[i] & (1U << j)))
continue;
- NET_DUMP_UINT_ONLY(1 << j);
+ NET_DUMP_UINT_ONLY(1U << j);
}
NET_END_ARRAY("");
}
@@ -647,6 +651,32 @@ static int do_attach_detach_xdp(int progfd, enum net_attach_type attach_type,
return bpf_xdp_attach(ifindex, progfd, flags, NULL);
}
+static int get_tcx_type(enum net_attach_type attach_type)
+{
+ switch (attach_type) {
+ case NET_ATTACH_TYPE_TCX_INGRESS:
+ return BPF_TCX_INGRESS;
+ case NET_ATTACH_TYPE_TCX_EGRESS:
+ return BPF_TCX_EGRESS;
+ default:
+ return -1;
+ }
+}
+
+static int do_attach_tcx(int progfd, enum net_attach_type attach_type, int ifindex)
+{
+ int type = get_tcx_type(attach_type);
+
+ return bpf_prog_attach(progfd, ifindex, type, 0);
+}
+
+static int do_detach_tcx(int targetfd, enum net_attach_type attach_type)
+{
+ int type = get_tcx_type(attach_type);
+
+ return bpf_prog_detach(targetfd, type);
+}
+
static int do_attach(int argc, char **argv)
{
enum net_attach_type attach_type;
@@ -684,10 +714,23 @@ static int do_attach(int argc, char **argv)
}
}
+ switch (attach_type) {
/* attach xdp prog */
- if (is_prefix("xdp", attach_type_strings[attach_type]))
- err = do_attach_detach_xdp(progfd, attach_type, ifindex,
- overwrite);
+ case NET_ATTACH_TYPE_XDP:
+ case NET_ATTACH_TYPE_XDP_GENERIC:
+ case NET_ATTACH_TYPE_XDP_DRIVER:
+ case NET_ATTACH_TYPE_XDP_OFFLOAD:
+ err = do_attach_detach_xdp(progfd, attach_type, ifindex, overwrite);
+ break;
+ /* attach tcx prog */
+ case NET_ATTACH_TYPE_TCX_INGRESS:
+ case NET_ATTACH_TYPE_TCX_EGRESS:
+ err = do_attach_tcx(progfd, attach_type, ifindex);
+ break;
+ default:
+ break;
+ }
+
if (err) {
p_err("interface %s attach failed: %s",
attach_type_strings[attach_type], strerror(-err));
@@ -721,10 +764,23 @@ static int do_detach(int argc, char **argv)
if (ifindex < 1)
return -EINVAL;
+ switch (attach_type) {
/* detach xdp prog */
- progfd = -1;
- if (is_prefix("xdp", attach_type_strings[attach_type]))
+ case NET_ATTACH_TYPE_XDP:
+ case NET_ATTACH_TYPE_XDP_GENERIC:
+ case NET_ATTACH_TYPE_XDP_DRIVER:
+ case NET_ATTACH_TYPE_XDP_OFFLOAD:
+ progfd = -1;
err = do_attach_detach_xdp(progfd, attach_type, ifindex, NULL);
+ break;
+ /* detach tcx prog */
+ case NET_ATTACH_TYPE_TCX_INGRESS:
+ case NET_ATTACH_TYPE_TCX_EGRESS:
+ err = do_detach_tcx(ifindex, attach_type);
+ break;
+ default:
+ break;
+ }
if (err < 0) {
p_err("interface %s detach failed: %s",
@@ -824,6 +880,9 @@ static void show_link_netfilter(void)
nf_link_count++;
}
+ if (!nf_link_info)
+ return;
+
qsort(nf_link_info, nf_link_count, sizeof(*nf_link_info), netfilter_link_compar);
for (id = 0; id < nf_link_count; id++) {
@@ -928,7 +987,8 @@ static int do_help(int argc, char **argv)
" %1$s %2$s help\n"
"\n"
" " HELP_SPEC_PROGRAM "\n"
- " ATTACH_TYPE := { xdp | xdpgeneric | xdpdrv | xdpoffload }\n"
+ " ATTACH_TYPE := { xdp | xdpgeneric | xdpdrv | xdpoffload | tcx_ingress\n"
+ " | tcx_egress }\n"
" " HELP_SPEC_OPTIONS " }\n"
"\n"
"Note: Only xdp, tcx, tc, netkit, flow_dissector and netfilter attachments\n"
diff --git a/tools/bpf/bpftool/xlated_dumper.c b/tools/bpf/bpftool/xlated_dumper.c
index 567f56dfd9f1..d0094345fb2b 100644
--- a/tools/bpf/bpftool/xlated_dumper.c
+++ b/tools/bpf/bpftool/xlated_dumper.c
@@ -349,7 +349,7 @@ void dump_xlated_plain(struct dump_data *dd, void *buf, unsigned int len,
double_insn = insn[i].code == (BPF_LD | BPF_IMM | BPF_DW);
- printf("% 4d: ", i);
+ printf("%4u: ", i);
print_bpf_insn(&cbs, insn + i, true);
if (opcodes) {
@@ -415,7 +415,7 @@ void dump_xlated_for_graph(struct dump_data *dd, void *buf_start, void *buf_end,
}
}
- printf("%d: ", insn_off);
+ printf("%u: ", insn_off);
print_bpf_insn(&cbs, cur, true);
if (opcodes) {
diff --git a/tools/bpf/runqslower/Makefile b/tools/bpf/runqslower/Makefile
index d8288936c912..c4f1f1735af6 100644
--- a/tools/bpf/runqslower/Makefile
+++ b/tools/bpf/runqslower/Makefile
@@ -15,6 +15,7 @@ INCLUDES := -I$(OUTPUT) -I$(BPF_INCLUDE) -I$(abspath ../../include/uapi)
CFLAGS := -g -Wall $(CLANG_CROSS_FLAGS)
CFLAGS += $(EXTRA_CFLAGS)
LDFLAGS += $(EXTRA_LDFLAGS)
+LDLIBS += -lelf -lz
# Try to detect best kernel BTF source
KERNEL_REL := $(shell uname -r)
@@ -51,7 +52,7 @@ clean:
libbpf_hdrs: $(BPFOBJ)
$(OUTPUT)/runqslower: $(OUTPUT)/runqslower.o $(BPFOBJ)
- $(QUIET_LINK)$(CC) $(CFLAGS) $^ -lelf -lz -o $@
+ $(QUIET_LINK)$(CC) $(CFLAGS) $(LDFLAGS) $^ $(LDLIBS) -o $@
$(OUTPUT)/runqslower.o: runqslower.h $(OUTPUT)/runqslower.skel.h \
$(OUTPUT)/runqslower.bpf.o | libbpf_hdrs
diff --git a/tools/build/Build b/tools/build/Build
deleted file mode 100644
index 76d1a4960973..000000000000
--- a/tools/build/Build
+++ /dev/null
@@ -1,3 +0,0 @@
-hostprogs := fixdep
-
-fixdep-y := fixdep.o
diff --git a/tools/build/Makefile b/tools/build/Makefile
index 17cdf01e29a0..18ad131f6ea7 100644
--- a/tools/build/Makefile
+++ b/tools/build/Makefile
@@ -43,12 +43,5 @@ ifneq ($(wildcard $(TMP_O)),)
$(Q)$(MAKE) -C feature OUTPUT=$(TMP_O) clean >/dev/null
endif
-$(OUTPUT)fixdep-in.o: FORCE
- $(Q)$(MAKE) $(build)=fixdep
-
-$(OUTPUT)fixdep: $(OUTPUT)fixdep-in.o
- $(QUIET_LINK)$(HOSTCC) $(KBUILD_HOSTLDFLAGS) -o $@ $<
-
-FORCE:
-
-.PHONY: FORCE
+$(OUTPUT)fixdep: $(srctree)/tools/build/fixdep.c
+ $(QUIET_CC)$(HOSTCC) $(KBUILD_HOSTCFLAGS) $(KBUILD_HOSTLDFLAGS) -o $@ $<
diff --git a/tools/build/Makefile.feature b/tools/build/Makefile.feature
index e1900abd44f6..ffd117135094 100644
--- a/tools/build/Makefile.feature
+++ b/tools/build/Makefile.feature
@@ -100,7 +100,6 @@ FEATURE_TESTS_EXTRA := \
libunwind-debug-frame-aarch64 \
cxx \
llvm \
- llvm-version \
clang \
libbpf \
libbpf-btf__load_from_kernel_by_id \
@@ -136,6 +135,7 @@ FEATURE_DISPLAY ?= \
libunwind \
libdw-dwarf-unwind \
libcapstone \
+ llvm-perf \
zlib \
lzma \
get_cpuid \
diff --git a/tools/build/Makefile.include b/tools/build/Makefile.include
index 8dadaa0fbb43..0e4de83400ac 100644
--- a/tools/build/Makefile.include
+++ b/tools/build/Makefile.include
@@ -1,8 +1,18 @@
# SPDX-License-Identifier: GPL-2.0-only
build := -f $(srctree)/tools/build/Makefile.build dir=. obj
+# More than just $(Q), we sometimes want to suppress all command output from a
+# recursive make -- even the 'up to date' printout.
+ifeq ($(V),1)
+ Q ?=
+ SILENT_MAKE = +$(Q)$(MAKE)
+else
+ Q ?= @
+ SILENT_MAKE = +$(Q)$(MAKE) --silent
+endif
+
fixdep:
- $(Q)$(MAKE) -C $(srctree)/tools/build CFLAGS= LDFLAGS= $(OUTPUT)fixdep
+ $(SILENT_MAKE) -C $(srctree)/tools/build CFLAGS= LDFLAGS= $(OUTPUT)fixdep
fixdep-clean:
$(Q)$(MAKE) -C $(srctree)/tools/build clean
diff --git a/tools/build/feature/Makefile b/tools/build/feature/Makefile
index 12796808f07a..5938cf799dc6 100644
--- a/tools/build/feature/Makefile
+++ b/tools/build/feature/Makefile
@@ -73,7 +73,7 @@ FILES= \
test-libopencsd.bin \
test-clang.bin \
test-llvm.bin \
- test-llvm-version.bin \
+ test-llvm-perf.bin \
test-libaio.bin \
test-libzstd.bin \
test-clang-bpf-co-re.bin \
@@ -388,9 +388,12 @@ $(OUTPUT)test-llvm.bin:
$(shell $(LLVM_CONFIG) --system-libs) \
> $(@:.bin=.make.output) 2>&1
-$(OUTPUT)test-llvm-version.bin:
- $(BUILDXX) -std=gnu++17 \
- -I$(shell $(LLVM_CONFIG) --includedir) \
+$(OUTPUT)test-llvm-perf.bin:
+ $(BUILDXX) -std=gnu++17 \
+ -I$(shell $(LLVM_CONFIG) --includedir) \
+ -L$(shell $(LLVM_CONFIG) --libdir) \
+ $(shell $(LLVM_CONFIG) --libs Core BPF) \
+ $(shell $(LLVM_CONFIG) --system-libs) \
> $(@:.bin=.make.output) 2>&1
$(OUTPUT)test-clang.bin:
diff --git a/tools/build/feature/test-all.c b/tools/build/feature/test-all.c
index dd0a18c2ef8f..6f4bf386a3b5 100644
--- a/tools/build/feature/test-all.c
+++ b/tools/build/feature/test-all.c
@@ -134,10 +134,6 @@
#undef main
#endif
-#define main main_test_libcapstone
-# include "test-libcapstone.c"
-#undef main
-
#define main main_test_lzma
# include "test-lzma.c"
#undef main
diff --git a/tools/build/feature/test-llvm-perf.cpp b/tools/build/feature/test-llvm-perf.cpp
new file mode 100644
index 000000000000..a8cbb67e335e
--- /dev/null
+++ b/tools/build/feature/test-llvm-perf.cpp
@@ -0,0 +1,14 @@
+// SPDX-License-Identifier: GPL-2.0
+#include "llvm/Support/ManagedStatic.h"
+#include "llvm/Support/raw_ostream.h"
+
+#if LLVM_VERSION_MAJOR < 13
+# error "Perf requires llvm-devel/llvm-dev version 13 or greater"
+#endif
+
+int main()
+{
+ llvm::errs() << "Hello World!\n";
+ llvm::llvm_shutdown();
+ return 0;
+}
diff --git a/tools/iio/Makefile b/tools/iio/Makefile
index fa720f062229..3bcce0b7d10f 100644
--- a/tools/iio/Makefile
+++ b/tools/iio/Makefile
@@ -58,7 +58,7 @@ $(OUTPUT)iio_generic_buffer: $(IIO_GENERIC_BUFFER_IN)
clean:
rm -f $(ALL_PROGRAMS)
rm -rf $(OUTPUT)include/linux/iio
- find $(or $(OUTPUT),.) -name '*.o' -delete -o -name '\.*.d' -delete
+ find $(or $(OUTPUT),.) -name '*.o' -delete -o -name '\.*.d' -delete -o -name '\.*.cmd' -delete
install: $(ALL_PROGRAMS)
install -d -m 755 $(DESTDIR)$(bindir); \
diff --git a/tools/iio/iio_generic_buffer.c b/tools/iio/iio_generic_buffer.c
index 0d0a7a19d6f9..9ef5ee087eda 100644
--- a/tools/iio/iio_generic_buffer.c
+++ b/tools/iio/iio_generic_buffer.c
@@ -498,6 +498,10 @@ int main(int argc, char **argv)
return -ENOMEM;
}
trigger_name = malloc(IIO_MAX_NAME_LENGTH);
+ if (!trigger_name) {
+ ret = -ENOMEM;
+ goto error;
+ }
ret = read_sysfs_string("name", trig_dev_name, trigger_name);
free(trig_dev_name);
if (ret < 0) {
diff --git a/tools/include/asm/barrier.h b/tools/include/asm/barrier.h
index 8d378c57cb01..0c21678ac5e6 100644
--- a/tools/include/asm/barrier.h
+++ b/tools/include/asm/barrier.h
@@ -8,6 +8,8 @@
#include "../../arch/arm64/include/asm/barrier.h"
#elif defined(__powerpc__)
#include "../../arch/powerpc/include/asm/barrier.h"
+#elif defined(__riscv)
+#include "../../arch/riscv/include/asm/barrier.h"
#elif defined(__s390__)
#include "../../arch/s390/include/asm/barrier.h"
#elif defined(__sh__)
diff --git a/tools/include/linux/compiler.h b/tools/include/linux/compiler.h
index 4366da278033..9c05a59f0184 100644
--- a/tools/include/linux/compiler.h
+++ b/tools/include/linux/compiler.h
@@ -128,10 +128,6 @@
# define unlikely(x) __builtin_expect(!!(x), 0)
#endif
-#ifndef __init
-# define __init
-#endif
-
#include <linux/types.h>
/*
diff --git a/tools/include/linux/coresight-pmu.h b/tools/include/linux/coresight-pmu.h
index 51ac441a37c3..89b0ac0014b0 100644
--- a/tools/include/linux/coresight-pmu.h
+++ b/tools/include/linux/coresight-pmu.h
@@ -49,12 +49,21 @@
* Interpretation of the PERF_RECORD_AUX_OUTPUT_HW_ID payload.
* Used to associate a CPU with the CoreSight Trace ID.
* [07:00] - Trace ID - uses 8 bits to make value easy to read in file.
- * [59:08] - Unused (SBZ)
- * [63:60] - Version
+ * [39:08] - Sink ID - as reported in /sys/bus/event_source/devices/cs_etm/sinks/
+ * Added in minor version 1.
+ * [55:40] - Unused (SBZ)
+ * [59:56] - Minor Version - previously existing fields are compatible with
+ * all minor versions.
+ * [63:60] - Major Version - previously existing fields mean different things
+ * in new major versions.
*/
#define CS_AUX_HW_ID_TRACE_ID_MASK GENMASK_ULL(7, 0)
-#define CS_AUX_HW_ID_VERSION_MASK GENMASK_ULL(63, 60)
+#define CS_AUX_HW_ID_SINK_ID_MASK GENMASK_ULL(39, 8)
-#define CS_AUX_HW_ID_CURR_VERSION 0
+#define CS_AUX_HW_ID_MINOR_VERSION_MASK GENMASK_ULL(59, 56)
+#define CS_AUX_HW_ID_MAJOR_VERSION_MASK GENMASK_ULL(63, 60)
+
+#define CS_AUX_HW_ID_MAJOR_VERSION 0
+#define CS_AUX_HW_ID_MINOR_VERSION 1
#endif
diff --git a/tools/testing/memblock/linux/init.h b/tools/include/linux/init.h
index 828e0ee0bc6c..51b5cde28639 100644
--- a/tools/testing/memblock/linux/init.h
+++ b/tools/include/linux/init.h
@@ -1,10 +1,16 @@
/* SPDX-License-Identifier: GPL-2.0 */
-#ifndef _LINUX_INIT_H
-#define _LINUX_INIT_H
+#ifndef _TOOLS_LINUX_INIT_H_
+#define _TOOLS_LINUX_INIT_H_
#include <linux/compiler.h>
-#include <asm/export.h>
-#include <linux/memory_hotplug.h>
+
+#ifndef __init
+# define __init
+#endif
+
+#ifndef __exit
+# define __exit
+#endif
#define __section(section) __attribute__((__section__(section)))
@@ -28,7 +34,10 @@ struct obs_kernel_param {
__aligned(__alignof__(struct obs_kernel_param)) = \
{ __setup_str_##unique_id, fn, early }
+#define __setup(str, fn) \
+ __setup_param(str, fn, fn, 0)
+
#define early_param(str, fn) \
__setup_param(str, fn, fn, 1)
-#endif
+#endif /* _TOOLS_LINUX_INIT_H_ */
diff --git a/tools/include/linux/linkage.h b/tools/include/linux/linkage.h
index a48ff086899c..7baaa5898ca2 100644
--- a/tools/include/linux/linkage.h
+++ b/tools/include/linux/linkage.h
@@ -1,8 +1,12 @@
#ifndef _TOOLS_INCLUDE_LINUX_LINKAGE_H
#define _TOOLS_INCLUDE_LINUX_LINKAGE_H
-#define SYM_FUNC_START(x) .globl x; x:
+#include <linux/export.h>
+#define SYM_FUNC_START(x) .globl x; x:
#define SYM_FUNC_END(x)
+#define SYM_DATA_START(x) .globl x; x:
+#define SYM_DATA_START_LOCAL(x) x:
+#define SYM_DATA_END(x)
#endif /* _TOOLS_INCLUDE_LINUX_LINKAGE_H */
diff --git a/tools/include/linux/mm.h b/tools/include/linux/mm.h
index cad4f2927983..677c37e4a18c 100644
--- a/tools/include/linux/mm.h
+++ b/tools/include/linux/mm.h
@@ -25,6 +25,12 @@ static inline void *phys_to_virt(unsigned long address)
return __va(address);
}
+#define virt_to_phys virt_to_phys
+static inline phys_addr_t virt_to_phys(volatile void *address)
+{
+ return (phys_addr_t)address;
+}
+
void reserve_bootmem_region(phys_addr_t start, phys_addr_t end, int nid);
static inline void totalram_pages_inc(void)
diff --git a/tools/include/linux/pfn.h b/tools/include/linux/pfn.h
index 7512a58189eb..f77a30d70152 100644
--- a/tools/include/linux/pfn.h
+++ b/tools/include/linux/pfn.h
@@ -7,4 +7,5 @@
#define PFN_UP(x) (((x) + PAGE_SIZE - 1) >> PAGE_SHIFT)
#define PFN_DOWN(x) ((x) >> PAGE_SHIFT)
#define PFN_PHYS(x) ((phys_addr_t)(x) << PAGE_SHIFT)
+#define PHYS_PFN(x) ((unsigned long)((x) >> PAGE_SHIFT))
#endif
diff --git a/tools/include/linux/ring_buffer.h b/tools/include/linux/ring_buffer.h
index 6c02617377c2..a74c397359c7 100644
--- a/tools/include/linux/ring_buffer.h
+++ b/tools/include/linux/ring_buffer.h
@@ -55,7 +55,7 @@ static inline u64 ring_buffer_read_head(struct perf_event_mmap_page *base)
* READ_ONCE() + smp_mb() pair.
*/
#if defined(__x86_64__) || defined(__aarch64__) || defined(__powerpc64__) || \
- defined(__ia64__) || defined(__sparc__) && defined(__arch64__)
+ defined(__ia64__) || defined(__sparc__) && defined(__arch64__) || defined(__riscv)
return smp_load_acquire(&base->data_head);
#else
u64 head = READ_ONCE(base->data_head);
diff --git a/tools/include/linux/string.h b/tools/include/linux/string.h
index db5c99318c79..8499f509f03e 100644
--- a/tools/include/linux/string.h
+++ b/tools/include/linux/string.h
@@ -12,6 +12,8 @@ void argv_free(char **argv);
int strtobool(const char *s, bool *res);
+#define strscpy strcpy
+
/*
* glibc based builds needs the extern while uClibc doesn't.
* However uClibc headers also define __GLIBC__ hence the hack below
@@ -46,5 +48,8 @@ extern char * __must_check skip_spaces(const char *);
extern char *strim(char *);
+extern void remove_spaces(char *s);
+
extern void *memchr_inv(const void *start, int c, size_t bytes);
+extern unsigned long long memparse(const char *ptr, char **retptr);
#endif /* _TOOLS_LINUX_STRING_H_ */
diff --git a/tools/include/uapi/linux/bpf.h b/tools/include/uapi/linux/bpf.h
index e05b39e39c3f..1fb3cb2636e6 100644
--- a/tools/include/uapi/linux/bpf.h
+++ b/tools/include/uapi/linux/bpf.h
@@ -7513,4 +7513,13 @@ struct bpf_iter_num {
__u64 __opaque[1];
} __attribute__((aligned(8)));
+/*
+ * Flags to control BPF kfunc behaviour.
+ * - BPF_F_PAD_ZEROS: Pad destination buffer with zeros. (See the respective
+ * helper documentation for details.)
+ */
+enum bpf_kfunc_flags {
+ BPF_F_PAD_ZEROS = (1ULL << 0),
+};
+
#endif /* _UAPI__LINUX_BPF_H__ */
diff --git a/tools/lib/api/Makefile b/tools/lib/api/Makefile
index 044860ac1ed1..7f6396087b46 100644
--- a/tools/lib/api/Makefile
+++ b/tools/lib/api/Makefile
@@ -31,11 +31,7 @@ CFLAGS := $(EXTRA_WARNINGS) $(EXTRA_CFLAGS)
CFLAGS += -ggdb3 -Wall -Wextra -std=gnu99 -U_FORTIFY_SOURCE -fPIC
ifeq ($(DEBUG),0)
-ifeq ($(CC_NO_CLANG), 0)
CFLAGS += -O3
-else
- CFLAGS += -O6
-endif
endif
ifeq ($(DEBUG),0)
diff --git a/tools/lib/api/fs/tracing_path.c b/tools/lib/api/fs/tracing_path.c
index 30745f35d0d2..834fd64c7130 100644
--- a/tools/lib/api/fs/tracing_path.c
+++ b/tools/lib/api/fs/tracing_path.c
@@ -69,7 +69,7 @@ char *get_tracing_file(const char *name)
{
char *file;
- if (asprintf(&file, "%s/%s", tracing_path_mount(), name) < 0)
+ if (asprintf(&file, "%s%s", tracing_path_mount(), name) < 0)
return NULL;
return file;
diff --git a/tools/lib/bpf/.gitignore b/tools/lib/bpf/.gitignore
index 0da84cb9e66d..f02725b123b3 100644
--- a/tools/lib/bpf/.gitignore
+++ b/tools/lib/bpf/.gitignore
@@ -5,3 +5,4 @@ TAGS
tags
cscope.*
/bpf_helper_defs.h
+fixdep
diff --git a/tools/lib/bpf/Makefile b/tools/lib/bpf/Makefile
index 2cf892774346..1b22f0f37288 100644
--- a/tools/lib/bpf/Makefile
+++ b/tools/lib/bpf/Makefile
@@ -108,6 +108,8 @@ MAKEOVERRIDES=
all:
+OUTPUT ?= ./
+OUTPUT := $(abspath $(OUTPUT))/
export srctree OUTPUT CC LD CFLAGS V
include $(srctree)/tools/build/Makefile.include
@@ -141,7 +143,10 @@ all: fixdep
all_cmd: $(CMD_TARGETS) check
-$(BPF_IN_SHARED): force $(BPF_GENERATED)
+$(SHARED_OBJDIR) $(STATIC_OBJDIR):
+ $(Q)mkdir -p $@
+
+$(BPF_IN_SHARED): force $(BPF_GENERATED) | $(SHARED_OBJDIR)
@(test -f ../../include/uapi/linux/bpf.h -a -f ../../../include/uapi/linux/bpf.h && ( \
(diff -B ../../include/uapi/linux/bpf.h ../../../include/uapi/linux/bpf.h >/dev/null) || \
echo "Warning: Kernel ABI header at 'tools/include/uapi/linux/bpf.h' differs from latest version at 'include/uapi/linux/bpf.h'" >&2 )) || true
@@ -151,9 +156,11 @@ $(BPF_IN_SHARED): force $(BPF_GENERATED)
@(test -f ../../include/uapi/linux/if_xdp.h -a -f ../../../include/uapi/linux/if_xdp.h && ( \
(diff -B ../../include/uapi/linux/if_xdp.h ../../../include/uapi/linux/if_xdp.h >/dev/null) || \
echo "Warning: Kernel ABI header at 'tools/include/uapi/linux/if_xdp.h' differs from latest version at 'include/uapi/linux/if_xdp.h'" >&2 )) || true
+ $(SILENT_MAKE) -C $(srctree)/tools/build CFLAGS= LDFLAGS= OUTPUT=$(SHARED_OBJDIR) $(SHARED_OBJDIR)fixdep
$(Q)$(MAKE) $(build)=libbpf OUTPUT=$(SHARED_OBJDIR) CFLAGS="$(CFLAGS) $(SHLIB_FLAGS)"
-$(BPF_IN_STATIC): force $(BPF_GENERATED)
+$(BPF_IN_STATIC): force $(BPF_GENERATED) | $(STATIC_OBJDIR)
+ $(SILENT_MAKE) -C $(srctree)/tools/build CFLAGS= LDFLAGS= OUTPUT=$(STATIC_OBJDIR) $(STATIC_OBJDIR)fixdep
$(Q)$(MAKE) $(build)=libbpf OUTPUT=$(STATIC_OBJDIR)
$(BPF_HELPER_DEFS): $(srctree)/tools/include/uapi/linux/bpf.h
@@ -263,7 +270,7 @@ install_pkgconfig: $(PC_FILE)
install: install_lib install_pkgconfig install_headers
-clean:
+clean: fixdep-clean
$(call QUIET_CLEAN, libbpf) $(RM) -rf $(CMD_TARGETS) \
*~ .*.d .*.cmd LIBBPF-CFLAGS $(BPF_GENERATED) \
$(SHARED_OBJDIR) $(STATIC_OBJDIR) \
diff --git a/tools/lib/bpf/bpf.h b/tools/lib/bpf/bpf.h
index 972e17ec0c09..a4a7b1ad1b63 100644
--- a/tools/lib/bpf/bpf.h
+++ b/tools/lib/bpf/bpf.h
@@ -100,7 +100,7 @@ struct bpf_prog_load_opts {
__u32 log_level;
__u32 log_size;
char *log_buf;
- /* output: actual total log contents size (including termintaing zero).
+ /* output: actual total log contents size (including terminating zero).
* It could be both larger than original log_size (if log was
* truncated), or smaller (if log buffer wasn't filled completely).
* If kernel doesn't support this feature, log_size is left unchanged.
@@ -129,7 +129,7 @@ struct bpf_btf_load_opts {
char *log_buf;
__u32 log_level;
__u32 log_size;
- /* output: actual total log contents size (including termintaing zero).
+ /* output: actual total log contents size (including terminating zero).
* It could be both larger than original log_size (if log was
* truncated), or smaller (if log buffer wasn't filled completely).
* If kernel doesn't support this feature, log_size is left unchanged.
diff --git a/tools/lib/bpf/bpf_helpers.h b/tools/lib/bpf/bpf_helpers.h
index 305c62817dd3..80bc0242e8dc 100644
--- a/tools/lib/bpf/bpf_helpers.h
+++ b/tools/lib/bpf/bpf_helpers.h
@@ -341,7 +341,7 @@ extern void bpf_iter_num_destroy(struct bpf_iter_num *it) __weak __ksym;
* I.e., it looks almost like high-level for each loop in other languages,
* supports continue/break, and is verifiable by BPF verifier.
*
- * For iterating integers, the difference betwen bpf_for_each(num, i, N, M)
+ * For iterating integers, the difference between bpf_for_each(num, i, N, M)
* and bpf_for(i, N, M) is in that bpf_for() provides additional proof to
* verifier that i is in [N, M) range, and in bpf_for_each() case i is `int
* *`, not just `int`. So for integers bpf_for() is more convenient.
diff --git a/tools/lib/bpf/bpf_tracing.h b/tools/lib/bpf/bpf_tracing.h
index 9314fa95f04e..a8f6cd4841b0 100644
--- a/tools/lib/bpf/bpf_tracing.h
+++ b/tools/lib/bpf/bpf_tracing.h
@@ -163,7 +163,7 @@
struct pt_regs___s390 {
unsigned long orig_gpr2;
-};
+} __attribute__((preserve_access_index));
/* s390 provides user_pt_regs instead of struct pt_regs to userspace */
#define __PT_REGS_CAST(x) ((const user_pt_regs *)(x))
@@ -179,7 +179,7 @@ struct pt_regs___s390 {
#define __PT_PARM4_SYSCALL_REG __PT_PARM4_REG
#define __PT_PARM5_SYSCALL_REG __PT_PARM5_REG
#define __PT_PARM6_SYSCALL_REG gprs[7]
-#define PT_REGS_PARM1_SYSCALL(x) PT_REGS_PARM1_CORE_SYSCALL(x)
+#define PT_REGS_PARM1_SYSCALL(x) (((const struct pt_regs___s390 *)(x))->__PT_PARM1_SYSCALL_REG)
#define PT_REGS_PARM1_CORE_SYSCALL(x) \
BPF_CORE_READ((const struct pt_regs___s390 *)(x), __PT_PARM1_SYSCALL_REG)
@@ -222,7 +222,7 @@ struct pt_regs___s390 {
struct pt_regs___arm64 {
unsigned long orig_x0;
-};
+} __attribute__((preserve_access_index));
/* arm64 provides struct user_pt_regs instead of struct pt_regs to userspace */
#define __PT_REGS_CAST(x) ((const struct user_pt_regs *)(x))
@@ -241,7 +241,7 @@ struct pt_regs___arm64 {
#define __PT_PARM4_SYSCALL_REG __PT_PARM4_REG
#define __PT_PARM5_SYSCALL_REG __PT_PARM5_REG
#define __PT_PARM6_SYSCALL_REG __PT_PARM6_REG
-#define PT_REGS_PARM1_SYSCALL(x) PT_REGS_PARM1_CORE_SYSCALL(x)
+#define PT_REGS_PARM1_SYSCALL(x) (((const struct pt_regs___arm64 *)(x))->__PT_PARM1_SYSCALL_REG)
#define PT_REGS_PARM1_CORE_SYSCALL(x) \
BPF_CORE_READ((const struct pt_regs___arm64 *)(x), __PT_PARM1_SYSCALL_REG)
@@ -351,6 +351,10 @@ struct pt_regs___arm64 {
* https://github.com/riscv-non-isa/riscv-elf-psabi-doc/blob/master/riscv-cc.adoc#risc-v-calling-conventions
*/
+struct pt_regs___riscv {
+ unsigned long orig_a0;
+} __attribute__((preserve_access_index));
+
/* riscv provides struct user_regs_struct instead of struct pt_regs to userspace */
#define __PT_REGS_CAST(x) ((const struct user_regs_struct *)(x))
#define __PT_PARM1_REG a0
@@ -362,12 +366,15 @@ struct pt_regs___arm64 {
#define __PT_PARM7_REG a6
#define __PT_PARM8_REG a7
-#define __PT_PARM1_SYSCALL_REG __PT_PARM1_REG
+#define __PT_PARM1_SYSCALL_REG orig_a0
#define __PT_PARM2_SYSCALL_REG __PT_PARM2_REG
#define __PT_PARM3_SYSCALL_REG __PT_PARM3_REG
#define __PT_PARM4_SYSCALL_REG __PT_PARM4_REG
#define __PT_PARM5_SYSCALL_REG __PT_PARM5_REG
#define __PT_PARM6_SYSCALL_REG __PT_PARM6_REG
+#define PT_REGS_PARM1_SYSCALL(x) (((const struct pt_regs___riscv *)(x))->__PT_PARM1_SYSCALL_REG)
+#define PT_REGS_PARM1_CORE_SYSCALL(x) \
+ BPF_CORE_READ((const struct pt_regs___riscv *)(x), __PT_PARM1_SYSCALL_REG)
#define __PT_RET_REG ra
#define __PT_FP_REG s0
@@ -473,7 +480,7 @@ struct pt_regs;
#endif
/*
* Similarly, syscall-specific conventions might differ between function call
- * conventions within each architecutre. All supported architectures pass
+ * conventions within each architecture. All supported architectures pass
* either 6 or 7 syscall arguments in registers.
*
* See syscall(2) manpage for succinct table with information on each arch.
@@ -515,7 +522,7 @@ struct pt_regs;
#define BPF_KPROBE_READ_RET_IP(ip, ctx) ({ (ip) = (ctx)->link; })
#define BPF_KRETPROBE_READ_RET_IP BPF_KPROBE_READ_RET_IP
-#elif defined(bpf_target_sparc)
+#elif defined(bpf_target_sparc) || defined(bpf_target_arm64)
#define BPF_KPROBE_READ_RET_IP(ip, ctx) ({ (ip) = PT_REGS_RET(ctx); })
#define BPF_KRETPROBE_READ_RET_IP BPF_KPROBE_READ_RET_IP
@@ -651,7 +658,7 @@ struct pt_regs;
* BPF_PROG is a convenience wrapper for generic tp_btf/fentry/fexit and
* similar kinds of BPF programs, that accept input arguments as a single
* pointer to untyped u64 array, where each u64 can actually be a typed
- * pointer or integer of different size. Instead of requring user to write
+ * pointer or integer of different size. Instead of requiring user to write
* manual casts and work with array elements by index, BPF_PROG macro
* allows user to declare a list of named and typed input arguments in the
* same syntax as for normal C function. All the casting is hidden and
@@ -801,7 +808,7 @@ struct pt_regs;
* tp_btf/fentry/fexit BPF programs. It hides the underlying platform-specific
* low-level way of getting kprobe input arguments from struct pt_regs, and
* provides a familiar typed and named function arguments syntax and
- * semantics of accessing kprobe input paremeters.
+ * semantics of accessing kprobe input parameters.
*
* Original struct pt_regs* context is preserved as 'ctx' argument. This might
* be necessary when using BPF helpers like bpf_perf_event_output().
diff --git a/tools/lib/bpf/btf.c b/tools/lib/bpf/btf.c
index 32c00db3b91b..3c131039c523 100644
--- a/tools/lib/bpf/btf.c
+++ b/tools/lib/bpf/btf.c
@@ -996,6 +996,7 @@ static struct btf *btf_new_empty(struct btf *base_btf)
btf->base_btf = base_btf;
btf->start_id = btf__type_cnt(base_btf);
btf->start_str_off = base_btf->hdr->str_len;
+ btf->swapped_endian = base_btf->swapped_endian;
}
/* +1 for empty string at offset 0 */
@@ -4191,7 +4192,7 @@ static bool btf_dedup_identical_structs(struct btf_dedup *d, __u32 id1, __u32 id
* and canonical graphs are not compatible structurally, whole graphs are
* incompatible. If types are structurally equivalent (i.e., all information
* except referenced type IDs is exactly the same), a mapping from `canon_id` to
- * a `cand_id` is recored in hypothetical mapping (`btf_dedup->hypot_map`).
+ * a `cand_id` is recoded in hypothetical mapping (`btf_dedup->hypot_map`).
* If a type references other types, then those referenced types are checked
* for equivalence recursively.
*
@@ -4229,7 +4230,7 @@ static bool btf_dedup_identical_structs(struct btf_dedup *d, __u32 id1, __u32 id
* consists of portions of the graph that come from multiple compilation units.
* This is due to the fact that types within single compilation unit are always
* deduplicated and FWDs are already resolved, if referenced struct/union
- * definiton is available. So, if we had unresolved FWD and found corresponding
+ * definition is available. So, if we had unresolved FWD and found corresponding
* STRUCT/UNION, they will be from different compilation units. This
* consequently means that when we "link" FWD to corresponding STRUCT/UNION,
* type graph will likely have at least two different BTF types that describe
@@ -5394,6 +5395,9 @@ int btf__distill_base(const struct btf *src_btf, struct btf **new_base_btf,
new_base = btf__new_empty();
if (!new_base)
return libbpf_err(-ENOMEM);
+
+ btf__set_endianness(new_base, btf__endianness(src_btf));
+
dist.id_map = calloc(n, sizeof(*dist.id_map));
if (!dist.id_map) {
err = -ENOMEM;
diff --git a/tools/lib/bpf/btf.h b/tools/lib/bpf/btf.h
index b68d216837a9..4e349ad79ee6 100644
--- a/tools/lib/bpf/btf.h
+++ b/tools/lib/bpf/btf.h
@@ -286,7 +286,7 @@ LIBBPF_API void btf_dump__free(struct btf_dump *d);
LIBBPF_API int btf_dump__dump_type(struct btf_dump *d, __u32 id);
struct btf_dump_emit_type_decl_opts {
- /* size of this struct, for forward/backward compatiblity */
+ /* size of this struct, for forward/backward compatibility */
size_t sz;
/* optional field name for type declaration, e.g.:
* - struct my_struct <FNAME>
diff --git a/tools/lib/bpf/btf_dump.c b/tools/lib/bpf/btf_dump.c
index 894860111ddb..0a7327541c17 100644
--- a/tools/lib/bpf/btf_dump.c
+++ b/tools/lib/bpf/btf_dump.c
@@ -304,7 +304,7 @@ int btf_dump__dump_type(struct btf_dump *d, __u32 id)
* definition, in which case they have to be declared inline as part of field
* type declaration; or as a top-level anonymous enum, typically used for
* declaring global constants. It's impossible to distinguish between two
- * without knowning whether given enum type was referenced from other type:
+ * without knowing whether given enum type was referenced from other type:
* top-level anonymous enum won't be referenced by anything, while embedded
* one will.
*/
diff --git a/tools/lib/bpf/btf_relocate.c b/tools/lib/bpf/btf_relocate.c
index 17f8b32f94a0..4f7399d85eab 100644
--- a/tools/lib/bpf/btf_relocate.c
+++ b/tools/lib/bpf/btf_relocate.c
@@ -1,4 +1,4 @@
-// SPDX-License-Identifier: GPL-2.0
+// SPDX-License-Identifier: (LGPL-2.1 OR BSD-2-Clause)
/* Copyright (c) 2024, Oracle and/or its affiliates. */
#ifndef _GNU_SOURCE
diff --git a/tools/lib/bpf/elf.c b/tools/lib/bpf/elf.c
index c92e02394159..b5ab1cb13e5e 100644
--- a/tools/lib/bpf/elf.c
+++ b/tools/lib/bpf/elf.c
@@ -28,6 +28,9 @@ int elf_open(const char *binary_path, struct elf_fd *elf_fd)
int fd, ret;
Elf *elf;
+ elf_fd->elf = NULL;
+ elf_fd->fd = -1;
+
if (elf_version(EV_CURRENT) == EV_NONE) {
pr_warn("elf: failed to init libelf for %s\n", binary_path);
return -LIBBPF_ERRNO__LIBELF;
diff --git a/tools/lib/bpf/libbpf.c b/tools/lib/bpf/libbpf.c
index a3be6f8fac09..219facd0e66e 100644
--- a/tools/lib/bpf/libbpf.c
+++ b/tools/lib/bpf/libbpf.c
@@ -496,8 +496,6 @@ struct bpf_program {
};
struct bpf_struct_ops {
- const char *tname;
- const struct btf_type *type;
struct bpf_program **progs;
__u32 *kern_func_off;
/* e.g. struct tcp_congestion_ops in bpf_prog's btf format */
@@ -988,7 +986,7 @@ find_struct_ops_kern_types(struct bpf_object *obj, const char *tname_raw,
{
const struct btf_type *kern_type, *kern_vtype;
const struct btf_member *kern_data_member;
- struct btf *btf;
+ struct btf *btf = NULL;
__s32 kern_vtype_id, kern_type_id;
char tname[256];
__u32 i;
@@ -1083,11 +1081,14 @@ static int bpf_object_adjust_struct_ops_autoload(struct bpf_object *obj)
continue;
for (j = 0; j < obj->nr_maps; ++j) {
+ const struct btf_type *type;
+
map = &obj->maps[j];
if (!bpf_map__is_struct_ops(map))
continue;
- vlen = btf_vlen(map->st_ops->type);
+ type = btf__type_by_id(obj->btf, map->st_ops->type_id);
+ vlen = btf_vlen(type);
for (k = 0; k < vlen; ++k) {
slot_prog = map->st_ops->progs[k];
if (prog != slot_prog)
@@ -1115,14 +1116,14 @@ static int bpf_map__init_kern_struct_ops(struct bpf_map *map)
const struct btf *btf = obj->btf;
struct bpf_struct_ops *st_ops;
const struct btf *kern_btf;
- struct module_btf *mod_btf;
+ struct module_btf *mod_btf = NULL;
void *data, *kern_data;
const char *tname;
int err;
st_ops = map->st_ops;
- type = st_ops->type;
- tname = st_ops->tname;
+ type = btf__type_by_id(btf, st_ops->type_id);
+ tname = btf__name_by_offset(btf, type->name_off);
err = find_struct_ops_kern_types(obj, tname, &mod_btf,
&kern_type, &kern_type_id,
&kern_vtype, &kern_vtype_id,
@@ -1423,8 +1424,6 @@ static int init_struct_ops_maps(struct bpf_object *obj, const char *sec_name,
memcpy(st_ops->data,
data->d_buf + vsi->offset,
type->size);
- st_ops->tname = tname;
- st_ops->type = type;
st_ops->type_id = type_id;
pr_debug("struct_ops init: struct %s(type_id=%u) %s found at offset %u\n",
@@ -1849,7 +1848,7 @@ static char *internal_map_name(struct bpf_object *obj, const char *real_name)
snprintf(map_name, sizeof(map_name), "%.*s%.*s", pfx_len, obj->name,
sfx_len, real_name);
- /* sanitise map name to characters allowed by kernel */
+ /* sanities map name to characters allowed by kernel */
for (p = map_name; *p && p < map_name + sizeof(map_name); p++)
if (!isalnum(*p) && *p != '_' && *p != '.')
*p = '_';
@@ -7906,16 +7905,19 @@ static int bpf_object_init_progs(struct bpf_object *obj, const struct bpf_object
}
static struct bpf_object *bpf_object_open(const char *path, const void *obj_buf, size_t obj_buf_sz,
+ const char *obj_name,
const struct bpf_object_open_opts *opts)
{
- const char *obj_name, *kconfig, *btf_tmp_path, *token_path;
+ const char *kconfig, *btf_tmp_path, *token_path;
struct bpf_object *obj;
- char tmp_name[64];
int err;
char *log_buf;
size_t log_size;
__u32 log_level;
+ if (obj_buf && !obj_name)
+ return ERR_PTR(-EINVAL);
+
if (elf_version(EV_CURRENT) == EV_NONE) {
pr_warn("failed to init libelf for %s\n",
path ? : "(mem buf)");
@@ -7925,16 +7927,12 @@ static struct bpf_object *bpf_object_open(const char *path, const void *obj_buf,
if (!OPTS_VALID(opts, bpf_object_open_opts))
return ERR_PTR(-EINVAL);
- obj_name = OPTS_GET(opts, object_name, NULL);
+ obj_name = OPTS_GET(opts, object_name, NULL) ?: obj_name;
if (obj_buf) {
- if (!obj_name) {
- snprintf(tmp_name, sizeof(tmp_name), "%lx-%lx",
- (unsigned long)obj_buf,
- (unsigned long)obj_buf_sz);
- obj_name = tmp_name;
- }
path = obj_name;
pr_debug("loading object '%s' from buffer\n", obj_name);
+ } else {
+ pr_debug("loading object from %s\n", path);
}
log_buf = OPTS_GET(opts, kernel_log_buf, NULL);
@@ -8018,9 +8016,7 @@ bpf_object__open_file(const char *path, const struct bpf_object_open_opts *opts)
if (!path)
return libbpf_err_ptr(-EINVAL);
- pr_debug("loading %s\n", path);
-
- return libbpf_ptr(bpf_object_open(path, NULL, 0, opts));
+ return libbpf_ptr(bpf_object_open(path, NULL, 0, NULL, opts));
}
struct bpf_object *bpf_object__open(const char *path)
@@ -8032,10 +8028,15 @@ struct bpf_object *
bpf_object__open_mem(const void *obj_buf, size_t obj_buf_sz,
const struct bpf_object_open_opts *opts)
{
+ char tmp_name[64];
+
if (!obj_buf || obj_buf_sz == 0)
return libbpf_err_ptr(-EINVAL);
- return libbpf_ptr(bpf_object_open(NULL, obj_buf, obj_buf_sz, opts));
+ /* create a (quite useless) default "name" for this memory buffer object */
+ snprintf(tmp_name, sizeof(tmp_name), "%lx-%zx", (unsigned long)obj_buf, obj_buf_sz);
+
+ return libbpf_ptr(bpf_object_open(NULL, obj_buf, obj_buf_sz, tmp_name, opts));
}
static int bpf_object_unload(struct bpf_object *obj)
@@ -8445,11 +8446,13 @@ static int bpf_object__resolve_externs(struct bpf_object *obj,
static void bpf_map_prepare_vdata(const struct bpf_map *map)
{
+ const struct btf_type *type;
struct bpf_struct_ops *st_ops;
__u32 i;
st_ops = map->st_ops;
- for (i = 0; i < btf_vlen(st_ops->type); i++) {
+ type = btf__type_by_id(map->obj->btf, st_ops->type_id);
+ for (i = 0; i < btf_vlen(type); i++) {
struct bpf_program *prog = st_ops->progs[i];
void *kern_data;
int prog_fd;
@@ -9056,6 +9059,11 @@ unsigned int bpf_object__kversion(const struct bpf_object *obj)
return obj ? obj->kern_version : 0;
}
+int bpf_object__token_fd(const struct bpf_object *obj)
+{
+ return obj->token_fd ?: -1;
+}
+
struct btf *bpf_object__btf(const struct bpf_object *obj)
{
return obj ? obj->btf : NULL;
@@ -9712,6 +9720,7 @@ static struct bpf_map *find_struct_ops_map_by_offset(struct bpf_object *obj,
static int bpf_object__collect_st_ops_relos(struct bpf_object *obj,
Elf64_Shdr *shdr, Elf_Data *data)
{
+ const struct btf_type *type;
const struct btf_member *member;
struct bpf_struct_ops *st_ops;
struct bpf_program *prog;
@@ -9771,13 +9780,14 @@ static int bpf_object__collect_st_ops_relos(struct bpf_object *obj,
}
insn_idx = sym->st_value / BPF_INSN_SZ;
- member = find_member_by_offset(st_ops->type, moff * 8);
+ type = btf__type_by_id(btf, st_ops->type_id);
+ member = find_member_by_offset(type, moff * 8);
if (!member) {
pr_warn("struct_ops reloc %s: cannot find member at moff %u\n",
map->name, moff);
return -EINVAL;
}
- member_idx = member - btf_members(st_ops->type);
+ member_idx = member - btf_members(type);
name = btf__name_by_offset(btf, member->name_off);
if (!resolve_func_ptr(btf, member->type, NULL)) {
@@ -11683,7 +11693,7 @@ static int attach_uprobe_multi(const struct bpf_program *prog, long cookie, stru
ret = 0;
break;
case 3:
- opts.retprobe = strcmp(probe_type, "uretprobe.multi") == 0;
+ opts.retprobe = str_has_pfx(probe_type, "uretprobe.multi");
*link = bpf_program__attach_uprobe_multi(prog, -1, binary_path, func_name, &opts);
ret = libbpf_get_error(*link);
break;
@@ -13758,29 +13768,13 @@ static int populate_skeleton_progs(const struct bpf_object *obj,
int bpf_object__open_skeleton(struct bpf_object_skeleton *s,
const struct bpf_object_open_opts *opts)
{
- DECLARE_LIBBPF_OPTS(bpf_object_open_opts, skel_opts,
- .object_name = s->name,
- );
struct bpf_object *obj;
int err;
- /* Attempt to preserve opts->object_name, unless overriden by user
- * explicitly. Overwriting object name for skeletons is discouraged,
- * as it breaks global data maps, because they contain object name
- * prefix as their own map name prefix. When skeleton is generated,
- * bpftool is making an assumption that this name will stay the same.
- */
- if (opts) {
- memcpy(&skel_opts, opts, sizeof(*opts));
- if (!opts->object_name)
- skel_opts.object_name = s->name;
- }
-
- obj = bpf_object__open_mem(s->data, s->data_sz, &skel_opts);
- err = libbpf_get_error(obj);
- if (err) {
- pr_warn("failed to initialize skeleton BPF object '%s': %d\n",
- s->name, err);
+ obj = bpf_object_open(NULL, s->data, s->data_sz, s->name, opts);
+ if (IS_ERR(obj)) {
+ err = PTR_ERR(obj);
+ pr_warn("failed to initialize skeleton BPF object '%s': %d\n", s->name, err);
return libbpf_err(err);
}
diff --git a/tools/lib/bpf/libbpf.h b/tools/lib/bpf/libbpf.h
index 64a6a3d323e3..91484303849c 100644
--- a/tools/lib/bpf/libbpf.h
+++ b/tools/lib/bpf/libbpf.h
@@ -152,7 +152,7 @@ struct bpf_object_open_opts {
* log_buf and log_level settings.
*
* If specified, this log buffer will be passed for:
- * - each BPF progral load (BPF_PROG_LOAD) attempt, unless overriden
+ * - each BPF progral load (BPF_PROG_LOAD) attempt, unless overridden
* with bpf_program__set_log() on per-program level, to get
* BPF verifier log output.
* - during BPF object's BTF load into kernel (BPF_BTF_LOAD) to get
@@ -294,6 +294,14 @@ LIBBPF_API const char *bpf_object__name(const struct bpf_object *obj);
LIBBPF_API unsigned int bpf_object__kversion(const struct bpf_object *obj);
LIBBPF_API int bpf_object__set_kversion(struct bpf_object *obj, __u32 kern_version);
+/**
+ * @brief **bpf_object__token_fd** is an accessor for BPF token FD associated
+ * with BPF object.
+ * @param obj Pointer to a valid BPF object
+ * @return BPF token FD or -1, if it wasn't set
+ */
+LIBBPF_API int bpf_object__token_fd(const struct bpf_object *obj);
+
struct btf;
LIBBPF_API struct btf *bpf_object__btf(const struct bpf_object *obj);
LIBBPF_API int bpf_object__btf_fd(const struct bpf_object *obj);
@@ -455,7 +463,7 @@ LIBBPF_API int bpf_link__destroy(struct bpf_link *link);
/**
* @brief **bpf_program__attach()** is a generic function for attaching
* a BPF program based on auto-detection of program type, attach type,
- * and extra paremeters, where applicable.
+ * and extra parameters, where applicable.
*
* @param prog BPF program to attach
* @return Reference to the newly created BPF link; or NULL is returned on error,
@@ -679,7 +687,7 @@ struct bpf_uprobe_opts {
/**
* @brief **bpf_program__attach_uprobe()** attaches a BPF program
* to the userspace function which is found by binary path and
- * offset. You can optionally specify a particular proccess to attach
+ * offset. You can optionally specify a particular process to attach
* to. You can also optionally attach the program to the function
* exit instead of entry.
*
@@ -1593,11 +1601,11 @@ LIBBPF_API int perf_buffer__buffer_fd(const struct perf_buffer *pb, size_t buf_i
* memory region of the ring buffer.
* This ring buffer can be used to implement a custom events consumer.
* The ring buffer starts with the *struct perf_event_mmap_page*, which
- * holds the ring buffer managment fields, when accessing the header
+ * holds the ring buffer management fields, when accessing the header
* structure it's important to be SMP aware.
* You can refer to *perf_event_read_simple* for a simple example.
* @param pb the perf buffer structure
- * @param buf_idx the buffer index to retreive
+ * @param buf_idx the buffer index to retrieve
* @param buf (out) gets the base pointer of the mmap()'ed memory
* @param buf_size (out) gets the size of the mmap()'ed region
* @return 0 on success, negative error code for failure
diff --git a/tools/lib/bpf/libbpf.map b/tools/lib/bpf/libbpf.map
index 8f0d9ea3b1b4..0096e483f7eb 100644
--- a/tools/lib/bpf/libbpf.map
+++ b/tools/lib/bpf/libbpf.map
@@ -423,6 +423,7 @@ LIBBPF_1.5.0 {
btf__relocate;
bpf_map__autoattach;
bpf_map__set_autoattach;
+ bpf_object__token_fd;
bpf_program__attach_sockmap;
ring__consume_n;
ring_buffer__consume_n;
diff --git a/tools/lib/bpf/libbpf_legacy.h b/tools/lib/bpf/libbpf_legacy.h
index 1e1be467bede..60b2600be88a 100644
--- a/tools/lib/bpf/libbpf_legacy.h
+++ b/tools/lib/bpf/libbpf_legacy.h
@@ -76,7 +76,7 @@ enum libbpf_strict_mode {
* first BPF program or map creation operation. This is done only if
* kernel is too old to support memcg-based memory accounting for BPF
* subsystem. By default, RLIMIT_MEMLOCK limit is set to RLIM_INFINITY,
- * but it can be overriden with libbpf_set_memlock_rlim() API.
+ * but it can be overridden with libbpf_set_memlock_rlim() API.
* Note that libbpf_set_memlock_rlim() needs to be called before
* the very first bpf_prog_load(), bpf_map_create() or bpf_object__load()
* operation.
@@ -97,7 +97,7 @@ LIBBPF_API int libbpf_set_strict_mode(enum libbpf_strict_mode mode);
* @brief **libbpf_get_error()** extracts the error code from the passed
* pointer
* @param ptr pointer returned from libbpf API function
- * @return error code; or 0 if no error occured
+ * @return error code; or 0 if no error occurred
*
* Note, as of libbpf 1.0 this function is not necessary and not recommended
* to be used. Libbpf doesn't return error code embedded into the pointer
diff --git a/tools/lib/bpf/linker.c b/tools/lib/bpf/linker.c
index 9cd3d4109788..e0005c6ade88 100644
--- a/tools/lib/bpf/linker.c
+++ b/tools/lib/bpf/linker.c
@@ -1413,7 +1413,7 @@ recur:
return true;
case BTF_KIND_PTR:
/* just validate overall shape of the referenced type, so no
- * contents comparison for struct/union, and allowd fwd vs
+ * contents comparison for struct/union, and allowed fwd vs
* struct/union
*/
exact = false;
@@ -1962,7 +1962,7 @@ static int linker_append_elf_sym(struct bpf_linker *linker, struct src_obj *obj,
/* If existing symbol is a strong resolved symbol, bail out,
* because we lost resolution battle have nothing to
- * contribute. We already checked abover that there is no
+ * contribute. We already checked above that there is no
* strong-strong conflict. We also already tightened binding
* and visibility, so nothing else to contribute at that point.
*/
diff --git a/tools/lib/bpf/skel_internal.h b/tools/lib/bpf/skel_internal.h
index 1e82ab06c3eb..0875452521e9 100644
--- a/tools/lib/bpf/skel_internal.h
+++ b/tools/lib/bpf/skel_internal.h
@@ -107,7 +107,7 @@ static inline void skel_free(const void *p)
* The loader program will perform probe_read_kernel() from maps.rodata.initial_value.
* skel_finalize_map_data() sets skel->rodata to point to actual value in a bpf map and
* does maps.rodata.initial_value = ~0ULL to signal skel_free_map_data() that kvfree
- * is not nessary.
+ * is not necessary.
*
* For user space:
* skel_prep_map_data() mmaps anon memory into skel->rodata that can be accessed directly.
diff --git a/tools/lib/bpf/usdt.bpf.h b/tools/lib/bpf/usdt.bpf.h
index 76359bcdc94a..b811f754939f 100644
--- a/tools/lib/bpf/usdt.bpf.h
+++ b/tools/lib/bpf/usdt.bpf.h
@@ -39,7 +39,7 @@ enum __bpf_usdt_arg_type {
struct __bpf_usdt_arg_spec {
/* u64 scalar interpreted depending on arg_type, see below */
__u64 val_off;
- /* arg location case, see bpf_udst_arg() for details */
+ /* arg location case, see bpf_usdt_arg() for details */
enum __bpf_usdt_arg_type arg_type;
/* offset of referenced register within struct pt_regs */
short reg_off;
diff --git a/tools/lib/cmdline.c b/tools/lib/cmdline.c
new file mode 100644
index 000000000000..c85f00f43c5e
--- /dev/null
+++ b/tools/lib/cmdline.c
@@ -0,0 +1,53 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * From lib/cmdline.c
+ */
+#include <stdlib.h>
+
+#if __has_attribute(__fallthrough__)
+# define fallthrough __attribute__((__fallthrough__))
+#else
+# define fallthrough do {} while (0) /* fallthrough */
+#endif
+
+unsigned long long memparse(const char *ptr, char **retptr)
+{
+ char *endptr; /* local pointer to end of parsed string */
+
+ unsigned long long ret = strtoll(ptr, &endptr, 0);
+
+ switch (*endptr) {
+ case 'E':
+ case 'e':
+ ret <<= 10;
+ fallthrough;
+ case 'P':
+ case 'p':
+ ret <<= 10;
+ fallthrough;
+ case 'T':
+ case 't':
+ ret <<= 10;
+ fallthrough;
+ case 'G':
+ case 'g':
+ ret <<= 10;
+ fallthrough;
+ case 'M':
+ case 'm':
+ ret <<= 10;
+ fallthrough;
+ case 'K':
+ case 'k':
+ ret <<= 10;
+ endptr++;
+ fallthrough;
+ default:
+ break;
+ }
+
+ if (retptr)
+ *retptr = endptr;
+
+ return ret;
+}
diff --git a/tools/lib/perf/.gitignore b/tools/lib/perf/.gitignore
new file mode 100644
index 000000000000..0f5b4af63f62
--- /dev/null
+++ b/tools/lib/perf/.gitignore
@@ -0,0 +1,5 @@
+# SPDX-License-Identifier: GPL-2.0-only
+libperf.pc
+libperf.so.*
+tests-shared
+tests-static
diff --git a/tools/lib/string.c b/tools/lib/string.c
index 8b6892f959ab..3126d2cff716 100644
--- a/tools/lib/string.c
+++ b/tools/lib/string.c
@@ -153,6 +153,19 @@ char *strim(char *s)
return skip_spaces(s);
}
+/*
+ * remove_spaces - Removes whitespaces from @s
+ */
+void remove_spaces(char *s)
+{
+ char *d = s;
+
+ do {
+ while (*d == ' ')
+ ++d;
+ } while ((*s++ = *d++));
+}
+
/**
* strreplace - Replace all occurrences of character in string.
* @s: The string to operate on.
diff --git a/tools/lib/subcmd/Makefile b/tools/lib/subcmd/Makefile
index b87213263a5e..8703ab487b68 100644
--- a/tools/lib/subcmd/Makefile
+++ b/tools/lib/subcmd/Makefile
@@ -38,10 +38,8 @@ endif
ifeq ($(DEBUG),1)
CFLAGS += -O0
-else ifeq ($(CC_NO_CLANG), 0)
- CFLAGS += -O3
else
- CFLAGS += -O6
+ CFLAGS += -O3
endif
# Treat warnings as errors unless directed not to
@@ -76,7 +74,7 @@ include $(srctree)/tools/build/Makefile.include
all: fixdep $(LIBFILE)
-$(SUBCMD_IN): FORCE
+$(SUBCMD_IN): fixdep FORCE
@$(MAKE) $(build)=libsubcmd
$(LIBFILE): $(SUBCMD_IN)
diff --git a/tools/lib/subcmd/parse-options.c b/tools/lib/subcmd/parse-options.c
index 4b60ec03b0bb..eb896d30545b 100644
--- a/tools/lib/subcmd/parse-options.c
+++ b/tools/lib/subcmd/parse-options.c
@@ -633,10 +633,11 @@ int parse_options_subcommand(int argc, const char **argv, const struct option *o
const char *const subcommands[], const char *usagestr[], int flags)
{
struct parse_opt_ctx_t ctx;
- char *buf = NULL;
/* build usage string if it's not provided */
if (subcommands && !usagestr[0]) {
+ char *buf = NULL;
+
astrcatf(&buf, "%s %s [<options>] {", subcmd_config.exec_name, argv[0]);
for (int i = 0; subcommands[i]; i++) {
@@ -678,10 +679,7 @@ int parse_options_subcommand(int argc, const char **argv, const struct option *o
astrcatf(&error_buf, "unknown switch `%c'", *ctx.opt);
usage_with_options(usagestr, options);
}
- if (buf) {
- usagestr[0] = NULL;
- free(buf);
- }
+
return parse_options_end(&ctx);
}
diff --git a/tools/lib/symbol/Makefile b/tools/lib/symbol/Makefile
index 13d43c6f92b4..426b845edfac 100644
--- a/tools/lib/symbol/Makefile
+++ b/tools/lib/symbol/Makefile
@@ -31,11 +31,7 @@ CFLAGS := $(EXTRA_WARNINGS) $(EXTRA_CFLAGS)
CFLAGS += -ggdb3 -Wall -Wextra -std=gnu11 -U_FORTIFY_SOURCE -fPIC
ifeq ($(DEBUG),0)
-ifeq ($(CC_NO_CLANG), 0)
CFLAGS += -O3
-else
- CFLAGS += -O6
-endif
endif
ifeq ($(DEBUG),0)
diff --git a/tools/mm/Makefile b/tools/mm/Makefile
index 15791c1c5b28..f5725b5c23aa 100644
--- a/tools/mm/Makefile
+++ b/tools/mm/Makefile
@@ -23,7 +23,7 @@ $(LIBS):
$(CC) $(CFLAGS) -o $@ $< $(LDFLAGS)
clean:
- $(RM) page-types slabinfo page_owner_sort
+ $(RM) page-types slabinfo page_owner_sort thp_swap_allocator_test
make -C $(LIB_DIR) clean
sbindir ?= /usr/sbin
diff --git a/tools/mm/page-types.c b/tools/mm/page-types.c
index 8d5595b6c59f..fa050d5a48cd 100644
--- a/tools/mm/page-types.c
+++ b/tools/mm/page-types.c
@@ -71,12 +71,12 @@
/* [32-] kernel hacking assistances */
#define KPF_RESERVED 32
#define KPF_MLOCKED 33
-#define KPF_MAPPEDTODISK 34
+#define KPF_OWNER_2 34
#define KPF_PRIVATE 35
#define KPF_PRIVATE_2 36
#define KPF_OWNER_PRIVATE 37
#define KPF_ARCH 38
-#define KPF_UNCACHED 39
+#define KPF_UNCACHED 39 /* unused */
#define KPF_SOFTDIRTY 40
#define KPF_ARCH_2 41
@@ -129,12 +129,11 @@ static const char * const page_flag_names[] = {
[KPF_RESERVED] = "r:reserved",
[KPF_MLOCKED] = "m:mlocked",
- [KPF_MAPPEDTODISK] = "d:mappedtodisk",
+ [KPF_OWNER_2] = "d:owner_2",
[KPF_PRIVATE] = "P:private",
[KPF_PRIVATE_2] = "p:private_2",
[KPF_OWNER_PRIVATE] = "O:owner_private",
[KPF_ARCH] = "h:arch",
- [KPF_UNCACHED] = "c:uncached",
[KPF_SOFTDIRTY] = "f:softdirty",
[KPF_ARCH_2] = "H:arch_2",
@@ -472,9 +471,9 @@ static int bit_mask_ok(uint64_t flags)
static uint64_t expand_overloaded_flags(uint64_t flags, uint64_t pme)
{
- /* Anonymous pages overload PG_mappedtodisk */
- if ((flags & BIT(ANON)) && (flags & BIT(MAPPEDTODISK)))
- flags ^= BIT(MAPPEDTODISK) | BIT(ANON_EXCLUSIVE);
+ /* Anonymous pages use PG_owner_2 for anon_exclusive */
+ if ((flags & BIT(ANON)) && (flags & BIT(OWNER_2)))
+ flags ^= BIT(OWNER_2) | BIT(ANON_EXCLUSIVE);
/* SLUB overloads several page flags */
if (flags & BIT(SLAB)) {
diff --git a/tools/net/sunrpc/xdrgen/.gitignore b/tools/net/sunrpc/xdrgen/.gitignore
new file mode 100644
index 000000000000..d7366c2f9be8
--- /dev/null
+++ b/tools/net/sunrpc/xdrgen/.gitignore
@@ -0,0 +1,2 @@
+__pycache__
+generators/__pycache__
diff --git a/tools/net/sunrpc/xdrgen/README b/tools/net/sunrpc/xdrgen/README
new file mode 100644
index 000000000000..92f7738ad50c
--- /dev/null
+++ b/tools/net/sunrpc/xdrgen/README
@@ -0,0 +1,244 @@
+xdrgen - Linux Kernel XDR code generator
+
+Introduction
+------------
+
+SunRPC programs are typically specified using a language defined by
+RFC 4506. In fact, all IETF-published NFS specifications provide a
+description of the specified protocol using this language.
+
+Since the 1990's, user space consumers of SunRPC have had access to
+a tool that could read such XDR specifications and then generate C
+code that implements the RPC portions of that protocol. This tool is
+called rpcgen.
+
+This RPC-level code is code that handles input directly from the
+network, and thus a high degree of memory safety and sanity checking
+is needed to help ensure proper levels of security. Bugs in this
+code can have significant impact on security and performance.
+
+However, it is code that is repetitive and tedious to write by hand.
+
+The C code generated by rpcgen makes extensive use of the facilities
+of the user space TI-RPC library and libc. Furthermore, the dialect
+of the generated code is very traditional K&R C.
+
+The Linux kernel's implementation of SunRPC-based protocols hand-roll
+their XDR implementation. There are two main reasons for this:
+
+1. libtirpc (and its predecessors) operate only in user space. The
+ kernel's RPC implementation and its API are significantly
+ different than libtirpc.
+
+2. rpcgen-generated code is believed to be less efficient than code
+ that is hand-written.
+
+These days, gcc and its kin are capable of optimizing code better
+than human authors. There are only a few instances where writing
+XDR code by hand will make a measurable performance different.
+
+In addition, the current hand-written code in the Linux kernel is
+difficult to audit and prove that it implements exactly what is in
+the protocol specification.
+
+In order to accrue the benefits of machine-generated XDR code in the
+kernel, a tool is needed that will output C code that works against
+the kernel's SunRPC implementation rather than libtirpc.
+
+Enter xdrgen.
+
+
+Dependencies
+------------
+
+These dependencies are typically packaged by Linux distributions:
+
+- python3
+- python3-lark
+- python3-jinja2
+
+These dependencies are available via PyPi:
+
+- pip install 'lark[interegular]'
+
+
+XDR Specifications
+------------------
+
+When adding a new protocol implementation to the kernel, the XDR
+specification can be derived by feeding a .txt copy of the RFC to
+the script located in tools/net/sunrpc/extract.sh.
+
+ $ extract.sh < rfc0001.txt > new2.x
+
+
+Operation
+---------
+
+Once a .x file is available, use xdrgen to generate source and
+header files containing an implementation of XDR encoding and
+decoding functions for the specified protocol.
+
+ $ ./xdrgen definitions new2.x > include/linux/sunrpc/xdrgen/new2.h
+ $ ./xdrgen declarations new2.x > new2xdr_gen.h
+
+and
+
+ $ ./xdrgen source new2.x > new2xdr_gen.c
+
+The files are ready to use for a server-side protocol implementation,
+or may be used as a guide for implementing these routines by hand.
+
+By default, the only comments added to this code are kdoc comments
+that appear directly in front of the public per-procedure APIs. For
+deeper introspection, specifying the "--annotate" flag will insert
+additional comments in the generated code to help readers match the
+generated code to specific parts of the XDR specification.
+
+Because the generated code is targeted for the Linux kernel, it
+is tagged with a GPLv2-only license.
+
+The xdrgen tool can also provide lexical and syntax checking of
+an XDR specification:
+
+ $ ./xdrgen lint xdr/new.x
+
+
+How It Works
+------------
+
+xdrgen does not use machine learning to generate source code. The
+translation is entirely deterministic.
+
+RFC 4506 Section 6 contains a BNF grammar of the XDR specification
+language. The grammar has been adapted for use by the Python Lark
+module.
+
+The xdr.ebnf file in this directory contains the grammar used to
+parse XDR specifications. xdrgen configures Lark using the grammar
+in xdr.ebnf. Lark parses the target XDR specification using this
+grammar, creating a parse tree.
+
+xdrgen then transforms the parse tree into an abstract syntax tree.
+This tree is passed to a series of code generators.
+
+The generators are implemented as Python classes residing in the
+generators/ directory. Each generator emits code created from Jinja2
+templates stored in the templates/ directory.
+
+The source code is generated in the same order in which they appear
+in the specification to ensure the generated code compiles. This
+conforms with the behavior of rpcgen.
+
+xdrgen assumes that the generated source code is further compiled by
+a compiler that can optimize in a number of ways, including:
+
+ - Unused functions are discarded (ie, not added to the executable)
+
+ - Aggressive function inlining removes unnecessary stack frames
+
+ - Single-arm switch statements are replaced by a single conditional
+ branch
+
+And so on.
+
+
+Pragmas
+-------
+
+Pragma directives specify exceptions to the normal generation of
+encoding and decoding functions. Currently one directive is
+implemented: "public".
+
+Pragma exclude
+------ -------
+
+ pragma exclude <RPC procedure> ;
+
+In some cases, a procedure encoder or decoder function might need
+special processing that cannot be automatically generated. The
+automatically-generated functions might conflict or interfere with
+the hand-rolled function. To avoid editing the generated source code
+by hand, a pragma can specify that the procedure's encoder and
+decoder functions are not included in the generated header and
+source.
+
+For example:
+
+ pragma exclude NFSPROC3_READDIRPLUS;
+
+Excludes the decoder function for the READDIRPLUS argument and the
+encoder function for the READDIRPLUS result.
+
+Note that because data item encoder and decoder functions are
+defined "static __maybe_unused", subsequent compilation
+automatically excludes data item encoder and decoder functions that
+are used only by excluded procedure.
+
+Pragma header
+------ ------
+
+ pragma header <string> ;
+
+Provide a name to use for the header file. For example:
+
+ pragma header nlm4;
+
+Adds
+
+ #include "nlm4xdr_gen.h"
+
+to the generated source file.
+
+Pragma public
+------ ------
+
+ pragma public <XDR data item> ;
+
+Normally XDR encoder and decoder functions are "static". In case an
+implementer wants to call these functions from other source code,
+s/he can add a public pragma in the input .x file to indicate a set
+of functions that should get a prototype in the generated header,
+and the function definitions will not be declared static.
+
+For example:
+
+ pragma public nfsstat3;
+
+Adds these prototypes in the generated header:
+
+ bool xdrgen_decode_nfsstat3(struct xdr_stream *xdr, enum nfsstat3 *ptr);
+ bool xdrgen_encode_nfsstat3(struct xdr_stream *xdr, enum nfsstat3 value);
+
+And, in the generated source code, both of these functions appear
+without the "static __maybe_unused" modifiers.
+
+
+Future Work
+-----------
+
+Finish implementing XDR pointer and list types.
+
+Generate client-side procedure functions
+
+Expand the README into a user guide similar to rpcgen(1)
+
+Add more pragma directives:
+
+ * @pages -- use xdr_read/write_pages() for the specified opaque
+ field
+ * @skip -- do not decode, but rather skip, the specified argument
+ field
+
+Enable something like a #include to dynamically insert the content
+of other specification files
+
+Properly support line-by-line pass-through via the "%" decorator
+
+Build a unit test suite for verifying translation of XDR language
+into compilable code
+
+Add a command-line option to insert trace_printk call sites in the
+generated source code, for improved (temporary) observability
+
+Generate kernel Rust code as well as C code
diff --git a/tools/net/sunrpc/xdrgen/__init__.py b/tools/net/sunrpc/xdrgen/__init__.py
new file mode 100644
index 000000000000..c940e9275252
--- /dev/null
+++ b/tools/net/sunrpc/xdrgen/__init__.py
@@ -0,0 +1,2 @@
+# SPDX-License-Identifier: GPL-2.0
+# Just to make sphinx-apidoc document this directory
diff --git a/tools/net/sunrpc/xdrgen/generators/__init__.py b/tools/net/sunrpc/xdrgen/generators/__init__.py
new file mode 100644
index 000000000000..fd2457461274
--- /dev/null
+++ b/tools/net/sunrpc/xdrgen/generators/__init__.py
@@ -0,0 +1,113 @@
+# SPDX-License-Identifier: GPL-2.0
+
+"""Define a base code generator class"""
+
+import sys
+from jinja2 import Environment, FileSystemLoader, Template
+
+from xdr_ast import _XdrAst, Specification, _RpcProgram, _XdrTypeSpecifier
+from xdr_ast import public_apis, pass_by_reference, get_header_name
+from xdr_parse import get_xdr_annotate
+
+
+def create_jinja2_environment(language: str, xdr_type: str) -> Environment:
+ """Open a set of templates based on output language"""
+ match language:
+ case "C":
+ environment = Environment(
+ loader=FileSystemLoader(sys.path[0] + "/templates/C/" + xdr_type + "/"),
+ trim_blocks=True,
+ lstrip_blocks=True,
+ )
+ environment.globals["annotate"] = get_xdr_annotate()
+ environment.globals["public_apis"] = public_apis
+ environment.globals["pass_by_reference"] = pass_by_reference
+ return environment
+ case _:
+ raise NotImplementedError("Language not supported")
+
+
+def get_jinja2_template(
+ environment: Environment, template_type: str, template_name: str
+) -> Template:
+ """Retrieve a Jinja2 template for emitting source code"""
+ return environment.get_template(template_type + "/" + template_name + ".j2")
+
+
+def find_xdr_program_name(root: Specification) -> str:
+ """Retrieve the RPC program name from an abstract syntax tree"""
+ raw_name = get_header_name()
+ if raw_name != "none":
+ return raw_name.lower()
+ for definition in root.definitions:
+ if isinstance(definition.value, _RpcProgram):
+ raw_name = definition.value.name
+ return raw_name.lower().removesuffix("_program").removesuffix("_prog")
+ return "noprog"
+
+
+def header_guard_infix(filename: str) -> str:
+ """Extract the header guard infix from the specification filename"""
+ basename = filename.split("/")[-1]
+ program = basename.replace(".x", "")
+ return program.upper()
+
+
+def kernel_c_type(spec: _XdrTypeSpecifier) -> str:
+ """Return name of C type"""
+ builtin_native_c_type = {
+ "bool": "bool",
+ "int": "s32",
+ "unsigned_int": "u32",
+ "long": "s32",
+ "unsigned_long": "u32",
+ "hyper": "s64",
+ "unsigned_hyper": "u64",
+ }
+ if spec.type_name in builtin_native_c_type:
+ return builtin_native_c_type[spec.type_name]
+ return spec.type_name
+
+
+class Boilerplate:
+ """Base class to generate boilerplate for source files"""
+
+ def __init__(self, language: str, peer: str):
+ """Initialize an instance of this class"""
+ raise NotImplementedError("No language support defined")
+
+ def emit_declaration(self, filename: str, root: Specification) -> None:
+ """Emit declaration header boilerplate"""
+ raise NotImplementedError("Header boilerplate generation not supported")
+
+ def emit_definition(self, filename: str, root: Specification) -> None:
+ """Emit definition header boilerplate"""
+ raise NotImplementedError("Header boilerplate generation not supported")
+
+ def emit_source(self, filename: str, root: Specification) -> None:
+ """Emit generic source code for this XDR type"""
+ raise NotImplementedError("Source boilerplate generation not supported")
+
+
+class SourceGenerator:
+ """Base class to generate header and source code for XDR types"""
+
+ def __init__(self, language: str, peer: str):
+ """Initialize an instance of this class"""
+ raise NotImplementedError("No language support defined")
+
+ def emit_declaration(self, node: _XdrAst) -> None:
+ """Emit one function declaration for this XDR type"""
+ raise NotImplementedError("Declaration generation not supported")
+
+ def emit_decoder(self, node: _XdrAst) -> None:
+ """Emit one decoder function for this XDR type"""
+ raise NotImplementedError("Decoder generation not supported")
+
+ def emit_definition(self, node: _XdrAst) -> None:
+ """Emit one definition for this XDR type"""
+ raise NotImplementedError("Definition generation not supported")
+
+ def emit_encoder(self, node: _XdrAst) -> None:
+ """Emit one encoder function for this XDR type"""
+ raise NotImplementedError("Encoder generation not supported")
diff --git a/tools/net/sunrpc/xdrgen/generators/constant.py b/tools/net/sunrpc/xdrgen/generators/constant.py
new file mode 100644
index 000000000000..f2339caf0953
--- /dev/null
+++ b/tools/net/sunrpc/xdrgen/generators/constant.py
@@ -0,0 +1,20 @@
+#!/usr/bin/env python3
+# ex: set filetype=python:
+
+"""Generate code to handle XDR constants"""
+
+from generators import SourceGenerator, create_jinja2_environment
+from xdr_ast import _XdrConstant
+
+class XdrConstantGenerator(SourceGenerator):
+ """Generate source code for XDR constants"""
+
+ def __init__(self, language: str, peer: str):
+ """Initialize an instance of this class"""
+ self.environment = create_jinja2_environment(language, "constants")
+ self.peer = peer
+
+ def emit_definition(self, node: _XdrConstant) -> None:
+ """Emit one definition for a constant"""
+ template = self.environment.get_template("definition.j2")
+ print(template.render(name=node.name, value=node.value))
diff --git a/tools/net/sunrpc/xdrgen/generators/enum.py b/tools/net/sunrpc/xdrgen/generators/enum.py
new file mode 100644
index 000000000000..855e43f4ae38
--- /dev/null
+++ b/tools/net/sunrpc/xdrgen/generators/enum.py
@@ -0,0 +1,44 @@
+#!/usr/bin/env python3
+# ex: set filetype=python:
+
+"""Generate code to handle XDR enum types"""
+
+from generators import SourceGenerator, create_jinja2_environment
+from xdr_ast import _XdrEnum, public_apis
+
+
+class XdrEnumGenerator(SourceGenerator):
+ """Generate source code for XDR enum types"""
+
+ def __init__(self, language: str, peer: str):
+ """Initialize an instance of this class"""
+ self.environment = create_jinja2_environment(language, "enum")
+ self.peer = peer
+
+ def emit_declaration(self, node: _XdrEnum) -> None:
+ """Emit one declaration pair for an XDR enum type"""
+ if node.name in public_apis:
+ template = self.environment.get_template("declaration/close.j2")
+ print(template.render(name=node.name))
+
+ def emit_definition(self, node: _XdrEnum) -> None:
+ """Emit one definition for an XDR enum type"""
+ template = self.environment.get_template("definition/open.j2")
+ print(template.render(name=node.name))
+
+ template = self.environment.get_template("definition/enumerator.j2")
+ for enumerator in node.enumerators:
+ print(template.render(name=enumerator.name, value=enumerator.value))
+
+ template = self.environment.get_template("definition/close.j2")
+ print(template.render(name=node.name))
+
+ def emit_decoder(self, node: _XdrEnum) -> None:
+ """Emit one decoder function for an XDR enum type"""
+ template = self.environment.get_template("decoder/enum.j2")
+ print(template.render(name=node.name))
+
+ def emit_encoder(self, node: _XdrEnum) -> None:
+ """Emit one encoder function for an XDR enum type"""
+ template = self.environment.get_template("encoder/enum.j2")
+ print(template.render(name=node.name))
diff --git a/tools/net/sunrpc/xdrgen/generators/header_bottom.py b/tools/net/sunrpc/xdrgen/generators/header_bottom.py
new file mode 100644
index 000000000000..4b55b282dfc0
--- /dev/null
+++ b/tools/net/sunrpc/xdrgen/generators/header_bottom.py
@@ -0,0 +1,33 @@
+#!/usr/bin/env python3
+# ex: set filetype=python:
+
+"""Generate header bottom boilerplate"""
+
+import os.path
+import time
+
+from generators import Boilerplate, header_guard_infix
+from generators import create_jinja2_environment, get_jinja2_template
+from xdr_ast import Specification
+
+
+class XdrHeaderBottomGenerator(Boilerplate):
+ """Generate header boilerplate"""
+
+ def __init__(self, language: str, peer: str):
+ """Initialize an instance of this class"""
+ self.environment = create_jinja2_environment(language, "header_bottom")
+ self.peer = peer
+
+ def emit_declaration(self, filename: str, root: Specification) -> None:
+ """Emit the bottom header guard"""
+ template = get_jinja2_template(self.environment, "declaration", "header")
+ print(template.render(infix=header_guard_infix(filename)))
+
+ def emit_definition(self, filename: str, root: Specification) -> None:
+ """Emit the bottom header guard"""
+ template = get_jinja2_template(self.environment, "definition", "header")
+ print(template.render(infix=header_guard_infix(filename)))
+
+ def emit_source(self, filename: str, root: Specification) -> None:
+ pass
diff --git a/tools/net/sunrpc/xdrgen/generators/header_top.py b/tools/net/sunrpc/xdrgen/generators/header_top.py
new file mode 100644
index 000000000000..c6bc21c71f19
--- /dev/null
+++ b/tools/net/sunrpc/xdrgen/generators/header_top.py
@@ -0,0 +1,45 @@
+#!/usr/bin/env python3
+# ex: set filetype=python:
+
+"""Generate header top boilerplate"""
+
+import os.path
+import time
+
+from generators import Boilerplate, header_guard_infix
+from generators import create_jinja2_environment, get_jinja2_template
+from xdr_ast import Specification
+
+
+class XdrHeaderTopGenerator(Boilerplate):
+ """Generate header boilerplate"""
+
+ def __init__(self, language: str, peer: str):
+ """Initialize an instance of this class"""
+ self.environment = create_jinja2_environment(language, "header_top")
+ self.peer = peer
+
+ def emit_declaration(self, filename: str, root: Specification) -> None:
+ """Emit the top header guard"""
+ template = get_jinja2_template(self.environment, "declaration", "header")
+ print(
+ template.render(
+ infix=header_guard_infix(filename),
+ filename=filename,
+ mtime=time.ctime(os.path.getmtime(filename)),
+ )
+ )
+
+ def emit_definition(self, filename: str, root: Specification) -> None:
+ """Emit the top header guard"""
+ template = get_jinja2_template(self.environment, "definition", "header")
+ print(
+ template.render(
+ infix=header_guard_infix(filename),
+ filename=filename,
+ mtime=time.ctime(os.path.getmtime(filename)),
+ )
+ )
+
+ def emit_source(self, filename: str, root: Specification) -> None:
+ pass
diff --git a/tools/net/sunrpc/xdrgen/generators/pointer.py b/tools/net/sunrpc/xdrgen/generators/pointer.py
new file mode 100644
index 000000000000..b0b27f1819c8
--- /dev/null
+++ b/tools/net/sunrpc/xdrgen/generators/pointer.py
@@ -0,0 +1,272 @@
+#!/usr/bin/env python3
+# ex: set filetype=python:
+
+"""Generate code to handle XDR pointer types"""
+
+from jinja2 import Environment
+
+from generators import SourceGenerator, kernel_c_type
+from generators import create_jinja2_environment, get_jinja2_template
+
+from xdr_ast import _XdrBasic, _XdrVariableLengthString
+from xdr_ast import _XdrFixedLengthOpaque, _XdrVariableLengthOpaque
+from xdr_ast import _XdrFixedLengthArray, _XdrVariableLengthArray
+from xdr_ast import _XdrOptionalData, _XdrPointer, _XdrDeclaration
+from xdr_ast import public_apis
+
+
+def emit_pointer_declaration(environment: Environment, node: _XdrPointer) -> None:
+ """Emit a declaration pair for an XDR pointer type"""
+ if node.name in public_apis:
+ template = get_jinja2_template(environment, "declaration", "close")
+ print(template.render(name=node.name))
+
+
+def emit_pointer_member_definition(
+ environment: Environment, field: _XdrDeclaration
+) -> None:
+ """Emit a definition for one field in an XDR struct"""
+ if isinstance(field, _XdrBasic):
+ template = get_jinja2_template(environment, "definition", field.template)
+ print(
+ template.render(
+ name=field.name,
+ type=kernel_c_type(field.spec),
+ classifier=field.spec.c_classifier,
+ )
+ )
+ elif isinstance(field, _XdrFixedLengthOpaque):
+ template = get_jinja2_template(environment, "definition", field.template)
+ print(
+ template.render(
+ name=field.name,
+ size=field.size,
+ )
+ )
+ elif isinstance(field, _XdrVariableLengthOpaque):
+ template = get_jinja2_template(environment, "definition", field.template)
+ print(template.render(name=field.name))
+ elif isinstance(field, _XdrVariableLengthString):
+ template = get_jinja2_template(environment, "definition", field.template)
+ print(template.render(name=field.name))
+ elif isinstance(field, _XdrFixedLengthArray):
+ template = get_jinja2_template(environment, "definition", field.template)
+ print(
+ template.render(
+ name=field.name,
+ type=kernel_c_type(field.spec),
+ size=field.size,
+ )
+ )
+ elif isinstance(field, _XdrVariableLengthArray):
+ template = get_jinja2_template(environment, "definition", field.template)
+ print(
+ template.render(
+ name=field.name,
+ type=kernel_c_type(field.spec),
+ classifier=field.spec.c_classifier,
+ )
+ )
+ elif isinstance(field, _XdrOptionalData):
+ template = get_jinja2_template(environment, "definition", field.template)
+ print(
+ template.render(
+ name=field.name,
+ type=kernel_c_type(field.spec),
+ classifier=field.spec.c_classifier,
+ )
+ )
+
+
+def emit_pointer_definition(environment: Environment, node: _XdrPointer) -> None:
+ """Emit a definition for an XDR pointer type"""
+ template = get_jinja2_template(environment, "definition", "open")
+ print(template.render(name=node.name))
+
+ for field in node.fields[0:-1]:
+ emit_pointer_member_definition(environment, field)
+
+ template = get_jinja2_template(environment, "definition", "close")
+ print(template.render(name=node.name))
+
+
+def emit_pointer_member_decoder(
+ environment: Environment, field: _XdrDeclaration
+) -> None:
+ """Emit a decoder for one field in an XDR pointer"""
+ if isinstance(field, _XdrBasic):
+ template = get_jinja2_template(environment, "decoder", field.template)
+ print(
+ template.render(
+ name=field.name,
+ type=field.spec.type_name,
+ classifier=field.spec.c_classifier,
+ )
+ )
+ elif isinstance(field, _XdrFixedLengthOpaque):
+ template = get_jinja2_template(environment, "decoder", field.template)
+ print(
+ template.render(
+ name=field.name,
+ size=field.size,
+ )
+ )
+ elif isinstance(field, _XdrVariableLengthOpaque):
+ template = get_jinja2_template(environment, "decoder", field.template)
+ print(
+ template.render(
+ name=field.name,
+ maxsize=field.maxsize,
+ )
+ )
+ elif isinstance(field, _XdrVariableLengthString):
+ template = get_jinja2_template(environment, "decoder", field.template)
+ print(
+ template.render(
+ name=field.name,
+ maxsize=field.maxsize,
+ )
+ )
+ elif isinstance(field, _XdrFixedLengthArray):
+ template = get_jinja2_template(environment, "decoder", field.template)
+ print(
+ template.render(
+ name=field.name,
+ type=field.spec.type_name,
+ size=field.size,
+ classifier=field.spec.c_classifier,
+ )
+ )
+ elif isinstance(field, _XdrVariableLengthArray):
+ template = get_jinja2_template(environment, "decoder", field.template)
+ print(
+ template.render(
+ name=field.name,
+ type=field.spec.type_name,
+ maxsize=field.maxsize,
+ classifier=field.spec.c_classifier,
+ )
+ )
+ elif isinstance(field, _XdrOptionalData):
+ template = get_jinja2_template(environment, "decoder", field.template)
+ print(
+ template.render(
+ name=field.name,
+ type=field.spec.type_name,
+ classifier=field.spec.c_classifier,
+ )
+ )
+
+
+def emit_pointer_decoder(environment: Environment, node: _XdrPointer) -> None:
+ """Emit one decoder function for an XDR pointer type"""
+ template = get_jinja2_template(environment, "decoder", "open")
+ print(template.render(name=node.name))
+
+ for field in node.fields[0:-1]:
+ emit_pointer_member_decoder(environment, field)
+
+ template = get_jinja2_template(environment, "decoder", "close")
+ print(template.render())
+
+
+def emit_pointer_member_encoder(
+ environment: Environment, field: _XdrDeclaration
+) -> None:
+ """Emit an encoder for one field in a XDR pointer"""
+ if isinstance(field, _XdrBasic):
+ template = get_jinja2_template(environment, "encoder", field.template)
+ print(
+ template.render(
+ name=field.name,
+ type=field.spec.type_name,
+ )
+ )
+ elif isinstance(field, _XdrFixedLengthOpaque):
+ template = get_jinja2_template(environment, "encoder", field.template)
+ print(
+ template.render(
+ name=field.name,
+ size=field.size,
+ )
+ )
+ elif isinstance(field, _XdrVariableLengthOpaque):
+ template = get_jinja2_template(environment, "encoder", field.template)
+ print(
+ template.render(
+ name=field.name,
+ maxsize=field.maxsize,
+ )
+ )
+ elif isinstance(field, _XdrVariableLengthString):
+ template = get_jinja2_template(environment, "encoder", field.template)
+ print(
+ template.render(
+ name=field.name,
+ maxsize=field.maxsize,
+ )
+ )
+ elif isinstance(field, _XdrFixedLengthArray):
+ template = get_jinja2_template(environment, "encoder", field.template)
+ print(
+ template.render(
+ name=field.name,
+ type=field.spec.type_name,
+ size=field.size,
+ )
+ )
+ elif isinstance(field, _XdrVariableLengthArray):
+ template = get_jinja2_template(environment, "encoder", field.template)
+ print(
+ template.render(
+ name=field.name,
+ type=field.spec.type_name,
+ maxsize=field.maxsize,
+ )
+ )
+ elif isinstance(field, _XdrOptionalData):
+ template = get_jinja2_template(environment, "encoder", field.template)
+ print(
+ template.render(
+ name=field.name,
+ type=field.spec.type_name,
+ classifier=field.spec.c_classifier,
+ )
+ )
+
+
+def emit_pointer_encoder(environment: Environment, node: _XdrPointer) -> None:
+ """Emit one encoder function for an XDR pointer type"""
+ template = get_jinja2_template(environment, "encoder", "open")
+ print(template.render(name=node.name))
+
+ for field in node.fields[0:-1]:
+ emit_pointer_member_encoder(environment, field)
+
+ template = get_jinja2_template(environment, "encoder", "close")
+ print(template.render())
+
+
+class XdrPointerGenerator(SourceGenerator):
+ """Generate source code for XDR pointer"""
+
+ def __init__(self, language: str, peer: str):
+ """Initialize an instance of this class"""
+ self.environment = create_jinja2_environment(language, "pointer")
+ self.peer = peer
+
+ def emit_declaration(self, node: _XdrPointer) -> None:
+ """Emit one declaration pair for an XDR pointer type"""
+ emit_pointer_declaration(self.environment, node)
+
+ def emit_definition(self, node: _XdrPointer) -> None:
+ """Emit one declaration for an XDR pointer type"""
+ emit_pointer_definition(self.environment, node)
+
+ def emit_decoder(self, node: _XdrPointer) -> None:
+ """Emit one decoder function for an XDR pointer type"""
+ emit_pointer_decoder(self.environment, node)
+
+ def emit_encoder(self, node: _XdrPointer) -> None:
+ """Emit one encoder function for an XDR pointer type"""
+ emit_pointer_encoder(self.environment, node)
diff --git a/tools/net/sunrpc/xdrgen/generators/program.py b/tools/net/sunrpc/xdrgen/generators/program.py
new file mode 100644
index 000000000000..ac3cf1694b68
--- /dev/null
+++ b/tools/net/sunrpc/xdrgen/generators/program.py
@@ -0,0 +1,168 @@
+#!/usr/bin/env python3
+# ex: set filetype=python:
+
+"""Generate code for an RPC program's procedures"""
+
+from jinja2 import Environment
+
+from generators import SourceGenerator, create_jinja2_environment
+from xdr_ast import _RpcProgram, _RpcVersion, excluded_apis
+
+
+def emit_version_definitions(
+ environment: Environment, program: str, version: _RpcVersion
+) -> None:
+ """Emit procedure numbers for each RPC version's procedures"""
+ template = environment.get_template("definition/open.j2")
+ print(template.render(program=program.upper()))
+
+ template = environment.get_template("definition/procedure.j2")
+ for procedure in version.procedures:
+ if procedure.name not in excluded_apis:
+ print(
+ template.render(
+ name=procedure.name,
+ value=procedure.number,
+ )
+ )
+
+ template = environment.get_template("definition/close.j2")
+ print(template.render())
+
+
+def emit_version_declarations(
+ environment: Environment, program: str, version: _RpcVersion
+) -> None:
+ """Emit declarations for each RPC version's procedures"""
+ arguments = dict.fromkeys([])
+ for procedure in version.procedures:
+ if procedure.name not in excluded_apis:
+ arguments[procedure.argument.type_name] = None
+ if len(arguments) > 0:
+ print("")
+ template = environment.get_template("declaration/argument.j2")
+ for argument in arguments:
+ print(template.render(program=program, argument=argument))
+
+ results = dict.fromkeys([])
+ for procedure in version.procedures:
+ if procedure.name not in excluded_apis:
+ results[procedure.result.type_name] = None
+ if len(results) > 0:
+ print("")
+ template = environment.get_template("declaration/result.j2")
+ for result in results:
+ print(template.render(program=program, result=result))
+
+
+def emit_version_argument_decoders(
+ environment: Environment, program: str, version: _RpcVersion
+) -> None:
+ """Emit server argument decoders for each RPC version's procedures"""
+ arguments = dict.fromkeys([])
+ for procedure in version.procedures:
+ if procedure.name not in excluded_apis:
+ arguments[procedure.argument.type_name] = None
+
+ template = environment.get_template("decoder/argument.j2")
+ for argument in arguments:
+ print(template.render(program=program, argument=argument))
+
+
+def emit_version_result_decoders(
+ environment: Environment, program: str, version: _RpcVersion
+) -> None:
+ """Emit client result decoders for each RPC version's procedures"""
+ results = dict.fromkeys([])
+ for procedure in version.procedures:
+ if procedure.name not in excluded_apis:
+ results[procedure.result.type_name] = None
+
+ template = environment.get_template("decoder/result.j2")
+ for result in results:
+ print(template.render(program=program, result=result))
+
+
+def emit_version_argument_encoders(
+ environment: Environment, program: str, version: _RpcVersion
+) -> None:
+ """Emit client argument encoders for each RPC version's procedures"""
+ arguments = dict.fromkeys([])
+ for procedure in version.procedures:
+ if procedure.name not in excluded_apis:
+ arguments[procedure.argument.type_name] = None
+
+ template = environment.get_template("encoder/argument.j2")
+ for argument in arguments:
+ print(template.render(program=program, argument=argument))
+
+
+def emit_version_result_encoders(
+ environment: Environment, program: str, version: _RpcVersion
+) -> None:
+ """Emit server result encoders for each RPC version's procedures"""
+ results = dict.fromkeys([])
+ for procedure in version.procedures:
+ if procedure.name not in excluded_apis:
+ results[procedure.result.type_name] = None
+
+ template = environment.get_template("encoder/result.j2")
+ for result in results:
+ print(template.render(program=program, result=result))
+
+
+class XdrProgramGenerator(SourceGenerator):
+ """Generate source code for an RPC program's procedures"""
+
+ def __init__(self, language: str, peer: str):
+ """Initialize an instance of this class"""
+ self.environment = create_jinja2_environment(language, "program")
+ self.peer = peer
+
+ def emit_definition(self, node: _RpcProgram) -> None:
+ """Emit procedure numbers for each of an RPC programs's procedures"""
+ raw_name = node.name
+ program = raw_name.lower().removesuffix("_program").removesuffix("_prog")
+
+ for version in node.versions:
+ emit_version_definitions(self.environment, program, version)
+
+ def emit_declaration(self, node: _RpcProgram) -> None:
+ """Emit a declaration pair for each of an RPC programs's procedures"""
+ raw_name = node.name
+ program = raw_name.lower().removesuffix("_program").removesuffix("_prog")
+
+ for version in node.versions:
+ emit_version_declarations(self.environment, program, version)
+
+ def emit_decoder(self, node: _RpcProgram) -> None:
+ """Emit all decoder functions for an RPC program's procedures"""
+ raw_name = node.name
+ program = raw_name.lower().removesuffix("_program").removesuffix("_prog")
+ match self.peer:
+ case "server":
+ for version in node.versions:
+ emit_version_argument_decoders(
+ self.environment, program, version,
+ )
+ case "client":
+ for version in node.versions:
+ emit_version_result_decoders(
+ self.environment, program, version,
+ )
+
+ def emit_encoder(self, node: _RpcProgram) -> None:
+ """Emit all encoder functions for an RPC program's procedures"""
+ raw_name = node.name
+ program = raw_name.lower().removesuffix("_program").removesuffix("_prog")
+ match self.peer:
+ case "server":
+ for version in node.versions:
+ emit_version_result_encoders(
+ self.environment, program, version,
+ )
+ case "client":
+ for version in node.versions:
+ emit_version_argument_encoders(
+ self.environment, program, version,
+ )
diff --git a/tools/net/sunrpc/xdrgen/generators/source_top.py b/tools/net/sunrpc/xdrgen/generators/source_top.py
new file mode 100644
index 000000000000..bcf47d93d6f1
--- /dev/null
+++ b/tools/net/sunrpc/xdrgen/generators/source_top.py
@@ -0,0 +1,32 @@
+#!/usr/bin/env python3
+# ex: set filetype=python:
+
+"""Generate source code boilerplate"""
+
+import os.path
+import time
+
+from generators import Boilerplate
+from generators import find_xdr_program_name, create_jinja2_environment
+from xdr_ast import _RpcProgram, Specification, get_header_name
+
+
+class XdrSourceTopGenerator(Boilerplate):
+ """Generate source code boilerplate"""
+
+ def __init__(self, language: str, peer: str):
+ """Initialize an instance of this class"""
+ self.environment = create_jinja2_environment(language, "source_top")
+ self.peer = peer
+
+ def emit_source(self, filename: str, root: Specification) -> None:
+ """Emit the top source boilerplate"""
+ name = find_xdr_program_name(root)
+ template = self.environment.get_template(self.peer + ".j2")
+ print(
+ template.render(
+ program=name,
+ filename=filename,
+ mtime=time.ctime(os.path.getmtime(filename)),
+ )
+ )
diff --git a/tools/net/sunrpc/xdrgen/generators/struct.py b/tools/net/sunrpc/xdrgen/generators/struct.py
new file mode 100644
index 000000000000..b694cd470829
--- /dev/null
+++ b/tools/net/sunrpc/xdrgen/generators/struct.py
@@ -0,0 +1,272 @@
+#!/usr/bin/env python3
+# ex: set filetype=python:
+
+"""Generate code to handle XDR struct types"""
+
+from jinja2 import Environment
+
+from generators import SourceGenerator, kernel_c_type
+from generators import create_jinja2_environment, get_jinja2_template
+
+from xdr_ast import _XdrBasic, _XdrVariableLengthString
+from xdr_ast import _XdrFixedLengthOpaque, _XdrVariableLengthOpaque
+from xdr_ast import _XdrFixedLengthArray, _XdrVariableLengthArray
+from xdr_ast import _XdrOptionalData, _XdrStruct, _XdrDeclaration
+from xdr_ast import public_apis
+
+
+def emit_struct_declaration(environment: Environment, node: _XdrStruct) -> None:
+ """Emit one declaration pair for an XDR struct type"""
+ if node.name in public_apis:
+ template = get_jinja2_template(environment, "declaration", "close")
+ print(template.render(name=node.name))
+
+
+def emit_struct_member_definition(
+ environment: Environment, field: _XdrDeclaration
+) -> None:
+ """Emit a definition for one field in an XDR struct"""
+ if isinstance(field, _XdrBasic):
+ template = get_jinja2_template(environment, "definition", field.template)
+ print(
+ template.render(
+ name=field.name,
+ type=kernel_c_type(field.spec),
+ classifier=field.spec.c_classifier,
+ )
+ )
+ elif isinstance(field, _XdrFixedLengthOpaque):
+ template = get_jinja2_template(environment, "definition", field.template)
+ print(
+ template.render(
+ name=field.name,
+ size=field.size,
+ )
+ )
+ elif isinstance(field, _XdrVariableLengthOpaque):
+ template = get_jinja2_template(environment, "definition", field.template)
+ print(template.render(name=field.name))
+ elif isinstance(field, _XdrVariableLengthString):
+ template = get_jinja2_template(environment, "definition", field.template)
+ print(template.render(name=field.name))
+ elif isinstance(field, _XdrFixedLengthArray):
+ template = get_jinja2_template(environment, "definition", field.template)
+ print(
+ template.render(
+ name=field.name,
+ type=kernel_c_type(field.spec),
+ size=field.size,
+ )
+ )
+ elif isinstance(field, _XdrVariableLengthArray):
+ template = get_jinja2_template(environment, "definition", field.template)
+ print(
+ template.render(
+ name=field.name,
+ type=kernel_c_type(field.spec),
+ classifier=field.spec.c_classifier,
+ )
+ )
+ elif isinstance(field, _XdrOptionalData):
+ template = get_jinja2_template(environment, "definition", field.template)
+ print(
+ template.render(
+ name=field.name,
+ type=kernel_c_type(field.spec),
+ classifier=field.spec.c_classifier,
+ )
+ )
+
+
+def emit_struct_definition(environment: Environment, node: _XdrStruct) -> None:
+ """Emit one definition for an XDR struct type"""
+ template = get_jinja2_template(environment, "definition", "open")
+ print(template.render(name=node.name))
+
+ for field in node.fields:
+ emit_struct_member_definition(environment, field)
+
+ template = get_jinja2_template(environment, "definition", "close")
+ print(template.render(name=node.name))
+
+
+def emit_struct_member_decoder(
+ environment: Environment, field: _XdrDeclaration
+) -> None:
+ """Emit a decoder for one field in an XDR struct"""
+ if isinstance(field, _XdrBasic):
+ template = get_jinja2_template(environment, "decoder", field.template)
+ print(
+ template.render(
+ name=field.name,
+ type=field.spec.type_name,
+ classifier=field.spec.c_classifier,
+ )
+ )
+ elif isinstance(field, _XdrFixedLengthOpaque):
+ template = get_jinja2_template(environment, "decoder", field.template)
+ print(
+ template.render(
+ name=field.name,
+ size=field.size,
+ )
+ )
+ elif isinstance(field, _XdrVariableLengthOpaque):
+ template = get_jinja2_template(environment, "decoder", field.template)
+ print(
+ template.render(
+ name=field.name,
+ maxsize=field.maxsize,
+ )
+ )
+ elif isinstance(field, _XdrVariableLengthString):
+ template = get_jinja2_template(environment, "decoder", field.template)
+ print(
+ template.render(
+ name=field.name,
+ maxsize=field.maxsize,
+ )
+ )
+ elif isinstance(field, _XdrFixedLengthArray):
+ template = get_jinja2_template(environment, "decoder", field.template)
+ print(
+ template.render(
+ name=field.name,
+ type=field.spec.type_name,
+ size=field.size,
+ classifier=field.spec.c_classifier,
+ )
+ )
+ elif isinstance(field, _XdrVariableLengthArray):
+ template = get_jinja2_template(environment, "decoder", field.template)
+ print(
+ template.render(
+ name=field.name,
+ type=field.spec.type_name,
+ maxsize=field.maxsize,
+ classifier=field.spec.c_classifier,
+ )
+ )
+ elif isinstance(field, _XdrOptionalData):
+ template = get_jinja2_template(environment, "decoder", field.template)
+ print(
+ template.render(
+ name=field.name,
+ type=field.spec.type_name,
+ classifier=field.spec.c_classifier,
+ )
+ )
+
+
+def emit_struct_decoder(environment: Environment, node: _XdrStruct) -> None:
+ """Emit one decoder function for an XDR struct type"""
+ template = get_jinja2_template(environment, "decoder", "open")
+ print(template.render(name=node.name))
+
+ for field in node.fields:
+ emit_struct_member_decoder(environment, field)
+
+ template = get_jinja2_template(environment, "decoder", "close")
+ print(template.render())
+
+
+def emit_struct_member_encoder(
+ environment: Environment, field: _XdrDeclaration
+) -> None:
+ """Emit an encoder for one field in an XDR struct"""
+ if isinstance(field, _XdrBasic):
+ template = get_jinja2_template(environment, "encoder", field.template)
+ print(
+ template.render(
+ name=field.name,
+ type=field.spec.type_name,
+ )
+ )
+ elif isinstance(field, _XdrFixedLengthOpaque):
+ template = get_jinja2_template(environment, "encoder", field.template)
+ print(
+ template.render(
+ name=field.name,
+ size=field.size,
+ )
+ )
+ elif isinstance(field, _XdrVariableLengthOpaque):
+ template = get_jinja2_template(environment, "encoder", field.template)
+ print(
+ template.render(
+ name=field.name,
+ maxsize=field.maxsize,
+ )
+ )
+ elif isinstance(field, _XdrVariableLengthString):
+ template = get_jinja2_template(environment, "encoder", field.template)
+ print(
+ template.render(
+ name=field.name,
+ maxsize=field.maxsize,
+ )
+ )
+ elif isinstance(field, _XdrFixedLengthArray):
+ template = get_jinja2_template(environment, "encoder", field.template)
+ print(
+ template.render(
+ name=field.name,
+ type=field.spec.type_name,
+ size=field.size,
+ )
+ )
+ elif isinstance(field, _XdrVariableLengthArray):
+ template = get_jinja2_template(environment, "encoder", field.template)
+ print(
+ template.render(
+ name=field.name,
+ type=field.spec.type_name,
+ maxsize=field.maxsize,
+ )
+ )
+ elif isinstance(field, _XdrOptionalData):
+ template = get_jinja2_template(environment, "encoder", field.template)
+ print(
+ template.render(
+ name=field.name,
+ type=field.spec.type_name,
+ classifier=field.spec.c_classifier,
+ )
+ )
+
+
+def emit_struct_encoder(environment: Environment, node: _XdrStruct) -> None:
+ """Emit one encoder function for an XDR struct type"""
+ template = get_jinja2_template(environment, "encoder", "open")
+ print(template.render(name=node.name))
+
+ for field in node.fields:
+ emit_struct_member_encoder(environment, field)
+
+ template = get_jinja2_template(environment, "encoder", "close")
+ print(template.render())
+
+
+class XdrStructGenerator(SourceGenerator):
+ """Generate source code for XDR structs"""
+
+ def __init__(self, language: str, peer: str):
+ """Initialize an instance of this class"""
+ self.environment = create_jinja2_environment(language, "struct")
+ self.peer = peer
+
+ def emit_declaration(self, node: _XdrStruct) -> None:
+ """Emit one declaration pair for an XDR struct type"""
+ emit_struct_declaration(self.environment, node)
+
+ def emit_definition(self, node: _XdrStruct) -> None:
+ """Emit one definition for an XDR struct type"""
+ emit_struct_definition(self.environment, node)
+
+ def emit_decoder(self, node: _XdrStruct) -> None:
+ """Emit one decoder function for an XDR struct type"""
+ emit_struct_decoder(self.environment, node)
+
+ def emit_encoder(self, node: _XdrStruct) -> None:
+ """Emit one encoder function for an XDR struct type"""
+ emit_struct_encoder(self.environment, node)
diff --git a/tools/net/sunrpc/xdrgen/generators/typedef.py b/tools/net/sunrpc/xdrgen/generators/typedef.py
new file mode 100644
index 000000000000..85a1b2303333
--- /dev/null
+++ b/tools/net/sunrpc/xdrgen/generators/typedef.py
@@ -0,0 +1,255 @@
+#!/usr/bin/env python3
+# ex: set filetype=python:
+
+"""Generate code to handle XDR typedefs"""
+
+from jinja2 import Environment
+
+from generators import SourceGenerator, kernel_c_type
+from generators import create_jinja2_environment, get_jinja2_template
+
+from xdr_ast import _XdrBasic, _XdrTypedef, _XdrVariableLengthString
+from xdr_ast import _XdrFixedLengthOpaque, _XdrVariableLengthOpaque
+from xdr_ast import _XdrFixedLengthArray, _XdrVariableLengthArray
+from xdr_ast import _XdrOptionalData, _XdrVoid, _XdrDeclaration
+from xdr_ast import public_apis
+
+
+def emit_typedef_declaration(environment: Environment, node: _XdrDeclaration) -> None:
+ """Emit a declaration pair for one XDR typedef"""
+ if node.name not in public_apis:
+ return
+ if isinstance(node, _XdrBasic):
+ template = get_jinja2_template(environment, "declaration", node.template)
+ print(
+ template.render(
+ name=node.name,
+ type=kernel_c_type(node.spec),
+ classifier=node.spec.c_classifier,
+ )
+ )
+ elif isinstance(node, _XdrVariableLengthString):
+ template = get_jinja2_template(environment, "declaration", node.template)
+ print(template.render(name=node.name))
+ elif isinstance(node, _XdrFixedLengthOpaque):
+ template = get_jinja2_template(environment, "declaration", node.template)
+ print(template.render(name=node.name, size=node.size))
+ elif isinstance(node, _XdrVariableLengthOpaque):
+ template = get_jinja2_template(environment, "declaration", node.template)
+ print(template.render(name=node.name))
+ elif isinstance(node, _XdrFixedLengthArray):
+ template = get_jinja2_template(environment, "declaration", node.template)
+ print(
+ template.render(
+ name=node.name,
+ type=node.spec.type_name,
+ size=node.size,
+ )
+ )
+ elif isinstance(node, _XdrVariableLengthArray):
+ template = get_jinja2_template(environment, "declaration", node.template)
+ print(
+ template.render(
+ name=node.name,
+ type=node.spec.type_name,
+ classifier=node.spec.c_classifier,
+ )
+ )
+ elif isinstance(node, _XdrOptionalData):
+ raise NotImplementedError("<optional_data> typedef not yet implemented")
+ elif isinstance(node, _XdrVoid):
+ raise NotImplementedError("<void> typedef not yet implemented")
+ else:
+ raise NotImplementedError("typedef: type not recognized")
+
+
+def emit_type_definition(environment: Environment, node: _XdrDeclaration) -> None:
+ """Emit a definition for one XDR typedef"""
+ if isinstance(node, _XdrBasic):
+ template = get_jinja2_template(environment, "definition", node.template)
+ print(
+ template.render(
+ name=node.name,
+ type=kernel_c_type(node.spec),
+ classifier=node.spec.c_classifier,
+ )
+ )
+ elif isinstance(node, _XdrVariableLengthString):
+ template = get_jinja2_template(environment, "definition", node.template)
+ print(template.render(name=node.name))
+ elif isinstance(node, _XdrFixedLengthOpaque):
+ template = get_jinja2_template(environment, "definition", node.template)
+ print(template.render(name=node.name, size=node.size))
+ elif isinstance(node, _XdrVariableLengthOpaque):
+ template = get_jinja2_template(environment, "definition", node.template)
+ print(template.render(name=node.name))
+ elif isinstance(node, _XdrFixedLengthArray):
+ template = get_jinja2_template(environment, "definition", node.template)
+ print(
+ template.render(
+ name=node.name,
+ type=node.spec.type_name,
+ size=node.size,
+ )
+ )
+ elif isinstance(node, _XdrVariableLengthArray):
+ template = get_jinja2_template(environment, "definition", node.template)
+ print(
+ template.render(
+ name=node.name,
+ type=node.spec.type_name,
+ classifier=node.spec.c_classifier,
+ )
+ )
+ elif isinstance(node, _XdrOptionalData):
+ raise NotImplementedError("<optional_data> typedef not yet implemented")
+ elif isinstance(node, _XdrVoid):
+ raise NotImplementedError("<void> typedef not yet implemented")
+ else:
+ raise NotImplementedError("typedef: type not recognized")
+
+
+def emit_typedef_decoder(environment: Environment, node: _XdrDeclaration) -> None:
+ """Emit a decoder function for one XDR typedef"""
+ if isinstance(node, _XdrBasic):
+ template = get_jinja2_template(environment, "decoder", node.template)
+ print(
+ template.render(
+ name=node.name,
+ type=node.spec.type_name,
+ )
+ )
+ elif isinstance(node, _XdrVariableLengthString):
+ template = get_jinja2_template(environment, "decoder", node.template)
+ print(
+ template.render(
+ name=node.name,
+ maxsize=node.maxsize,
+ )
+ )
+ elif isinstance(node, _XdrFixedLengthOpaque):
+ template = get_jinja2_template(environment, "decoder", node.template)
+ print(
+ template.render(
+ name=node.name,
+ size=node.size,
+ )
+ )
+ elif isinstance(node, _XdrVariableLengthOpaque):
+ template = get_jinja2_template(environment, "decoder", node.template)
+ print(
+ template.render(
+ name=node.name,
+ maxsize=node.maxsize,
+ )
+ )
+ elif isinstance(node, _XdrFixedLengthArray):
+ template = get_jinja2_template(environment, "decoder", node.template)
+ print(
+ template.render(
+ name=node.name,
+ type=node.spec.type_name,
+ size=node.size,
+ classifier=node.spec.c_classifier,
+ )
+ )
+ elif isinstance(node, _XdrVariableLengthArray):
+ template = get_jinja2_template(environment, "decoder", node.template)
+ print(
+ template.render(
+ name=node.name,
+ type=node.spec.type_name,
+ maxsize=node.maxsize,
+ )
+ )
+ elif isinstance(node, _XdrOptionalData):
+ raise NotImplementedError("<optional_data> typedef not yet implemented")
+ elif isinstance(node, _XdrVoid):
+ raise NotImplementedError("<void> typedef not yet implemented")
+ else:
+ raise NotImplementedError("typedef: type not recognized")
+
+
+def emit_typedef_encoder(environment: Environment, node: _XdrDeclaration) -> None:
+ """Emit an encoder function for one XDR typedef"""
+ if isinstance(node, _XdrBasic):
+ template = get_jinja2_template(environment, "encoder", node.template)
+ print(
+ template.render(
+ name=node.name,
+ type=node.spec.type_name,
+ )
+ )
+ elif isinstance(node, _XdrVariableLengthString):
+ template = get_jinja2_template(environment, "encoder", node.template)
+ print(
+ template.render(
+ name=node.name,
+ maxsize=node.maxsize,
+ )
+ )
+ elif isinstance(node, _XdrFixedLengthOpaque):
+ template = get_jinja2_template(environment, "encoder", node.template)
+ print(
+ template.render(
+ name=node.name,
+ size=node.size,
+ )
+ )
+ elif isinstance(node, _XdrVariableLengthOpaque):
+ template = get_jinja2_template(environment, "encoder", node.template)
+ print(
+ template.render(
+ name=node.name,
+ maxsize=node.maxsize,
+ )
+ )
+ elif isinstance(node, _XdrFixedLengthArray):
+ template = get_jinja2_template(environment, "encoder", node.template)
+ print(
+ template.render(
+ name=node.name,
+ type=node.spec.type_name,
+ size=node.size,
+ )
+ )
+ elif isinstance(node, _XdrVariableLengthArray):
+ template = get_jinja2_template(environment, "encoder", node.template)
+ print(
+ template.render(
+ name=node.name,
+ type=node.spec.type_name,
+ maxsize=node.maxsize,
+ )
+ )
+ elif isinstance(node, _XdrOptionalData):
+ raise NotImplementedError("<optional_data> typedef not yet implemented")
+ elif isinstance(node, _XdrVoid):
+ raise NotImplementedError("<void> typedef not yet implemented")
+ else:
+ raise NotImplementedError("typedef: type not recognized")
+
+
+class XdrTypedefGenerator(SourceGenerator):
+ """Generate source code for XDR typedefs"""
+
+ def __init__(self, language: str, peer: str):
+ """Initialize an instance of this class"""
+ self.environment = create_jinja2_environment(language, "typedef")
+ self.peer = peer
+
+ def emit_declaration(self, node: _XdrTypedef) -> None:
+ """Emit one declaration pair for an XDR enum type"""
+ emit_typedef_declaration(self.environment, node.declaration)
+
+ def emit_definition(self, node: _XdrTypedef) -> None:
+ """Emit one definition for an XDR typedef"""
+ emit_type_definition(self.environment, node.declaration)
+
+ def emit_decoder(self, node: _XdrTypedef) -> None:
+ """Emit one decoder function for an XDR typedef"""
+ emit_typedef_decoder(self.environment, node.declaration)
+
+ def emit_encoder(self, node: _XdrTypedef) -> None:
+ """Emit one encoder function for an XDR typedef"""
+ emit_typedef_encoder(self.environment, node.declaration)
diff --git a/tools/net/sunrpc/xdrgen/generators/union.py b/tools/net/sunrpc/xdrgen/generators/union.py
new file mode 100644
index 000000000000..7974967bbb9f
--- /dev/null
+++ b/tools/net/sunrpc/xdrgen/generators/union.py
@@ -0,0 +1,243 @@
+#!/usr/bin/env python3
+# ex: set filetype=python:
+
+"""Generate code to handle XDR unions"""
+
+from jinja2 import Environment
+
+from generators import SourceGenerator
+from generators import create_jinja2_environment, get_jinja2_template
+
+from xdr_ast import _XdrBasic, _XdrUnion, _XdrVoid
+from xdr_ast import _XdrDeclaration, _XdrCaseSpec, public_apis
+
+
+def emit_union_declaration(environment: Environment, node: _XdrUnion) -> None:
+ """Emit one declaration pair for an XDR union type"""
+ if node.name in public_apis:
+ template = get_jinja2_template(environment, "declaration", "close")
+ print(template.render(name=node.name))
+
+
+def emit_union_switch_spec_definition(
+ environment: Environment, node: _XdrDeclaration
+) -> None:
+ """Emit a definition for an XDR union's discriminant"""
+ assert isinstance(node, _XdrBasic)
+ template = get_jinja2_template(environment, "definition", "switch_spec")
+ print(
+ template.render(
+ name=node.name,
+ type=node.spec.type_name,
+ classifier=node.spec.c_classifier,
+ )
+ )
+
+
+def emit_union_case_spec_definition(
+ environment: Environment, node: _XdrDeclaration
+) -> None:
+ """Emit a definition for an XDR union's case arm"""
+ if isinstance(node.arm, _XdrVoid):
+ return
+ assert isinstance(node.arm, _XdrBasic)
+ template = get_jinja2_template(environment, "definition", "case_spec")
+ print(
+ template.render(
+ name=node.arm.name,
+ type=node.arm.spec.type_name,
+ classifier=node.arm.spec.c_classifier,
+ )
+ )
+
+
+def emit_union_definition(environment: Environment, node: _XdrUnion) -> None:
+ """Emit one XDR union definition"""
+ template = get_jinja2_template(environment, "definition", "open")
+ print(template.render(name=node.name))
+
+ emit_union_switch_spec_definition(environment, node.discriminant)
+
+ for case in node.cases:
+ emit_union_case_spec_definition(environment, case)
+
+ if node.default is not None:
+ emit_union_case_spec_definition(environment, node.default)
+
+ template = get_jinja2_template(environment, "definition", "close")
+ print(template.render(name=node.name))
+
+
+def emit_union_switch_spec_decoder(
+ environment: Environment, node: _XdrDeclaration
+) -> None:
+ """Emit a decoder for an XDR union's discriminant"""
+ assert isinstance(node, _XdrBasic)
+ template = get_jinja2_template(environment, "decoder", "switch_spec")
+ print(template.render(name=node.name, type=node.spec.type_name))
+
+
+def emit_union_case_spec_decoder(environment: Environment, node: _XdrCaseSpec) -> None:
+ """Emit decoder functions for an XDR union's case arm"""
+
+ if isinstance(node.arm, _XdrVoid):
+ return
+
+ template = get_jinja2_template(environment, "decoder", "case_spec")
+ for case in node.values:
+ print(template.render(case=case))
+
+ assert isinstance(node.arm, _XdrBasic)
+ template = get_jinja2_template(environment, "decoder", node.arm.template)
+ print(
+ template.render(
+ name=node.arm.name,
+ type=node.arm.spec.type_name,
+ classifier=node.arm.spec.c_classifier,
+ )
+ )
+
+ template = get_jinja2_template(environment, "decoder", "break")
+ print(template.render())
+
+
+def emit_union_default_spec_decoder(environment: Environment, node: _XdrUnion) -> None:
+ """Emit a decoder function for an XDR union's default arm"""
+ default_case = node.default
+
+ # Avoid a gcc warning about a default case with boolean discriminant
+ if default_case is None and node.discriminant.spec.type_name == "bool":
+ return
+
+ template = get_jinja2_template(environment, "decoder", "default_spec")
+ print(template.render())
+
+ if default_case is None or isinstance(default_case.arm, _XdrVoid):
+ template = get_jinja2_template(environment, "decoder", "break")
+ print(template.render())
+ return
+
+ assert isinstance(default_case.arm, _XdrBasic)
+ template = get_jinja2_template(environment, "decoder", default_case.arm.template)
+ print(
+ template.render(
+ name=default_case.arm.name,
+ type=default_case.arm.spec.type_name,
+ classifier=default_case.arm.spec.c_classifier,
+ )
+ )
+
+
+def emit_union_decoder(environment: Environment, node: _XdrUnion) -> None:
+ """Emit one XDR union decoder"""
+ template = get_jinja2_template(environment, "decoder", "open")
+ print(template.render(name=node.name))
+
+ emit_union_switch_spec_decoder(environment, node.discriminant)
+
+ for case in node.cases:
+ emit_union_case_spec_decoder(environment, case)
+
+ emit_union_default_spec_decoder(environment, node)
+
+ template = get_jinja2_template(environment, "decoder", "close")
+ print(template.render())
+
+
+def emit_union_switch_spec_encoder(
+ environment: Environment, node: _XdrDeclaration
+) -> None:
+ """Emit an encoder for an XDR union's discriminant"""
+ assert isinstance(node, _XdrBasic)
+ template = get_jinja2_template(environment, "encoder", "switch_spec")
+ print(template.render(name=node.name, type=node.spec.type_name))
+
+
+def emit_union_case_spec_encoder(environment: Environment, node: _XdrCaseSpec) -> None:
+ """Emit encoder functions for an XDR union's case arm"""
+
+ if isinstance(node.arm, _XdrVoid):
+ return
+
+ template = get_jinja2_template(environment, "encoder", "case_spec")
+ for case in node.values:
+ print(template.render(case=case))
+
+ assert isinstance(node.arm, _XdrBasic)
+ template = get_jinja2_template(environment, "encoder", node.arm.template)
+ print(
+ template.render(
+ name=node.arm.name,
+ type=node.arm.spec.type_name,
+ )
+ )
+
+ template = get_jinja2_template(environment, "encoder", "break")
+ print(template.render())
+
+
+def emit_union_default_spec_encoder(environment: Environment, node: _XdrUnion) -> None:
+ """Emit an encoder function for an XDR union's default arm"""
+ default_case = node.default
+
+ # Avoid a gcc warning about a default case with boolean discriminant
+ if default_case is None and node.discriminant.spec.type_name == "bool":
+ return
+
+ template = get_jinja2_template(environment, "encoder", "default_spec")
+ print(template.render())
+
+ if default_case is None or isinstance(default_case.arm, _XdrVoid):
+ template = get_jinja2_template(environment, "encoder", "break")
+ print(template.render())
+ return
+
+ assert isinstance(default_case.arm, _XdrBasic)
+ template = get_jinja2_template(environment, "encoder", default_case.arm.template)
+ print(
+ template.render(
+ name=default_case.arm.name,
+ type=default_case.arm.spec.type_name,
+ )
+ )
+
+
+def emit_union_encoder(environment, node: _XdrUnion) -> None:
+ """Emit one XDR union encoder"""
+ template = get_jinja2_template(environment, "encoder", "open")
+ print(template.render(name=node.name))
+
+ emit_union_switch_spec_encoder(environment, node.discriminant)
+
+ for case in node.cases:
+ emit_union_case_spec_encoder(environment, case)
+
+ emit_union_default_spec_encoder(environment, node)
+
+ template = get_jinja2_template(environment, "encoder", "close")
+ print(template.render())
+
+
+class XdrUnionGenerator(SourceGenerator):
+ """Generate source code for XDR unions"""
+
+ def __init__(self, language: str, peer: str):
+ """Initialize an instance of this class"""
+ self.environment = create_jinja2_environment(language, "union")
+ self.peer = peer
+
+ def emit_declaration(self, node: _XdrUnion) -> None:
+ """Emit one declaration pair for an XDR union"""
+ emit_union_declaration(self.environment, node)
+
+ def emit_definition(self, node: _XdrUnion) -> None:
+ """Emit one definition for an XDR union"""
+ emit_union_definition(self.environment, node)
+
+ def emit_decoder(self, node: _XdrUnion) -> None:
+ """Emit one decoder function for an XDR union"""
+ emit_union_decoder(self.environment, node)
+
+ def emit_encoder(self, node: _XdrUnion) -> None:
+ """Emit one encoder function for an XDR union"""
+ emit_union_encoder(self.environment, node)
diff --git a/tools/net/sunrpc/xdrgen/grammars/xdr.lark b/tools/net/sunrpc/xdrgen/grammars/xdr.lark
new file mode 100644
index 000000000000..f3c4552e548d
--- /dev/null
+++ b/tools/net/sunrpc/xdrgen/grammars/xdr.lark
@@ -0,0 +1,119 @@
+// A Lark grammar for the XDR specification language based on
+// https://tools.ietf.org/html/rfc4506 Section 6.3
+
+declaration : "opaque" identifier "[" value "]" -> fixed_length_opaque
+ | "opaque" identifier "<" [ value ] ">" -> variable_length_opaque
+ | "string" identifier "<" [ value ] ">" -> variable_length_string
+ | type_specifier identifier "[" value "]" -> fixed_length_array
+ | type_specifier identifier "<" [ value ] ">" -> variable_length_array
+ | type_specifier "*" identifier -> optional_data
+ | type_specifier identifier -> basic
+ | "void" -> void
+
+value : decimal_constant
+ | hexadecimal_constant
+ | octal_constant
+ | identifier
+
+constant : decimal_constant | hexadecimal_constant | octal_constant
+
+type_specifier : unsigned_hyper
+ | unsigned_long
+ | unsigned_int
+ | hyper
+ | long
+ | int
+ | float
+ | double
+ | quadruple
+ | bool
+ | enum_type_spec
+ | struct_type_spec
+ | union_type_spec
+ | identifier
+
+unsigned_hyper : "unsigned" "hyper"
+unsigned_long : "unsigned" "long"
+unsigned_int : "unsigned" "int"
+hyper : "hyper"
+long : "long"
+int : "int"
+float : "float"
+double : "double"
+quadruple : "quadruple"
+bool : "bool"
+
+enum_type_spec : "enum" enum_body
+
+enum_body : "{" ( identifier "=" value ) ( "," identifier "=" value )* "}"
+
+struct_type_spec : "struct" struct_body
+
+struct_body : "{" ( declaration ";" )+ "}"
+
+union_type_spec : "union" union_body
+
+union_body : switch_spec "{" case_spec+ [ default_spec ] "}"
+
+switch_spec : "switch" "(" declaration ")"
+
+case_spec : ( "case" value ":" )+ declaration ";"
+
+default_spec : "default" ":" declaration ";"
+
+constant_def : "const" identifier "=" value ";"
+
+type_def : "typedef" declaration ";" -> typedef
+ | "enum" identifier enum_body ";" -> enum
+ | "struct" identifier struct_body ";" -> struct
+ | "union" identifier union_body ";" -> union
+
+specification : definition*
+
+definition : constant_def
+ | type_def
+ | program_def
+ | pragma_def
+
+//
+// RPC program definitions not specified in RFC 4506
+//
+
+program_def : "program" identifier "{" version_def+ "}" "=" constant ";"
+
+version_def : "version" identifier "{" procedure_def+ "}" "=" constant ";"
+
+procedure_def : type_specifier identifier "(" type_specifier ")" "=" constant ";"
+
+pragma_def : "pragma" directive identifier [ identifier ] ";"
+
+directive : exclude_directive
+ | header_directive
+ | pages_directive
+ | public_directive
+ | skip_directive
+
+exclude_directive : "exclude"
+header_directive : "header"
+pages_directive : "pages"
+public_directive : "public"
+skip_directive : "skip"
+
+//
+// XDR language primitives
+//
+
+identifier : /([a-z]|[A-Z])(_|[a-z]|[A-Z]|[0-9])*/
+
+decimal_constant : /[\+-]?(0|[1-9][0-9]*)/
+hexadecimal_constant : /0x([a-f]|[A-F]|[0-9])+/
+octal_constant : /0[0-7]+/
+
+PASSTHRU : "%" | "%" /.+/
+%ignore PASSTHRU
+
+%import common.C_COMMENT
+%ignore C_COMMENT
+
+%import common.WS
+%ignore WS
diff --git a/tools/net/sunrpc/xdrgen/subcmds/__init__.py b/tools/net/sunrpc/xdrgen/subcmds/__init__.py
new file mode 100644
index 000000000000..c940e9275252
--- /dev/null
+++ b/tools/net/sunrpc/xdrgen/subcmds/__init__.py
@@ -0,0 +1,2 @@
+# SPDX-License-Identifier: GPL-2.0
+# Just to make sphinx-apidoc document this directory
diff --git a/tools/net/sunrpc/xdrgen/subcmds/declarations.py b/tools/net/sunrpc/xdrgen/subcmds/declarations.py
new file mode 100644
index 000000000000..c5e8d79986ef
--- /dev/null
+++ b/tools/net/sunrpc/xdrgen/subcmds/declarations.py
@@ -0,0 +1,76 @@
+#!/usr/bin/env python3
+# ex: set filetype=python:
+
+"""Translate an XDR specification into executable code that
+can be compiled for the Linux kernel."""
+
+import logging
+
+from argparse import Namespace
+from lark import logger
+from lark.exceptions import UnexpectedInput
+
+from generators.constant import XdrConstantGenerator
+from generators.enum import XdrEnumGenerator
+from generators.header_bottom import XdrHeaderBottomGenerator
+from generators.header_top import XdrHeaderTopGenerator
+from generators.pointer import XdrPointerGenerator
+from generators.program import XdrProgramGenerator
+from generators.typedef import XdrTypedefGenerator
+from generators.struct import XdrStructGenerator
+from generators.union import XdrUnionGenerator
+
+from xdr_ast import transform_parse_tree, _RpcProgram, Specification
+from xdr_ast import _XdrConstant, _XdrEnum, _XdrPointer
+from xdr_ast import _XdrTypedef, _XdrStruct, _XdrUnion
+from xdr_parse import xdr_parser, set_xdr_annotate
+
+logger.setLevel(logging.INFO)
+
+
+def emit_header_declarations(
+ root: Specification, language: str, peer: str
+) -> None:
+ """Emit header declarations"""
+ for definition in root.definitions:
+ if isinstance(definition.value, _XdrEnum):
+ gen = XdrEnumGenerator(language, peer)
+ elif isinstance(definition.value, _XdrPointer):
+ gen = XdrPointerGenerator(language, peer)
+ elif isinstance(definition.value, _XdrTypedef):
+ gen = XdrTypedefGenerator(language, peer)
+ elif isinstance(definition.value, _XdrStruct):
+ gen = XdrStructGenerator(language, peer)
+ elif isinstance(definition.value, _XdrUnion):
+ gen = XdrUnionGenerator(language, peer)
+ elif isinstance(definition.value, _RpcProgram):
+ gen = XdrProgramGenerator(language, peer)
+ else:
+ continue
+ gen.emit_declaration(definition.value)
+
+
+def handle_parse_error(e: UnexpectedInput) -> bool:
+ """Simple parse error reporting, no recovery attempted"""
+ print(e)
+ return True
+
+
+def subcmd(args: Namespace) -> int:
+ """Generate definitions and declarations"""
+
+ set_xdr_annotate(args.annotate)
+ parser = xdr_parser()
+ with open(args.filename, encoding="utf-8") as f:
+ parse_tree = parser.parse(f.read(), on_error=handle_parse_error)
+ ast = transform_parse_tree(parse_tree)
+
+ gen = XdrHeaderTopGenerator(args.language, args.peer)
+ gen.emit_declaration(args.filename, ast)
+
+ emit_header_declarations(ast, args.language, args.peer)
+
+ gen = XdrHeaderBottomGenerator(args.language, args.peer)
+ gen.emit_declaration(args.filename, ast)
+
+ return 0
diff --git a/tools/net/sunrpc/xdrgen/subcmds/definitions.py b/tools/net/sunrpc/xdrgen/subcmds/definitions.py
new file mode 100644
index 000000000000..5cd13d53221f
--- /dev/null
+++ b/tools/net/sunrpc/xdrgen/subcmds/definitions.py
@@ -0,0 +1,78 @@
+#!/usr/bin/env python3
+# ex: set filetype=python:
+
+"""Translate an XDR specification into executable code that
+can be compiled for the Linux kernel."""
+
+import logging
+
+from argparse import Namespace
+from lark import logger
+from lark.exceptions import UnexpectedInput
+
+from generators.constant import XdrConstantGenerator
+from generators.enum import XdrEnumGenerator
+from generators.header_bottom import XdrHeaderBottomGenerator
+from generators.header_top import XdrHeaderTopGenerator
+from generators.pointer import XdrPointerGenerator
+from generators.program import XdrProgramGenerator
+from generators.typedef import XdrTypedefGenerator
+from generators.struct import XdrStructGenerator
+from generators.union import XdrUnionGenerator
+
+from xdr_ast import transform_parse_tree, Specification
+from xdr_ast import _RpcProgram, _XdrConstant, _XdrEnum, _XdrPointer
+from xdr_ast import _XdrTypedef, _XdrStruct, _XdrUnion
+from xdr_parse import xdr_parser, set_xdr_annotate
+
+logger.setLevel(logging.INFO)
+
+
+def emit_header_definitions(
+ root: Specification, language: str, peer: str
+) -> None:
+ """Emit header definitions"""
+ for definition in root.definitions:
+ if isinstance(definition.value, _XdrConstant):
+ gen = XdrConstantGenerator(language, peer)
+ elif isinstance(definition.value, _XdrEnum):
+ gen = XdrEnumGenerator(language, peer)
+ elif isinstance(definition.value, _XdrPointer):
+ gen = XdrPointerGenerator(language, peer)
+ elif isinstance(definition.value, _RpcProgram):
+ gen = XdrProgramGenerator(language, peer)
+ elif isinstance(definition.value, _XdrTypedef):
+ gen = XdrTypedefGenerator(language, peer)
+ elif isinstance(definition.value, _XdrStruct):
+ gen = XdrStructGenerator(language, peer)
+ elif isinstance(definition.value, _XdrUnion):
+ gen = XdrUnionGenerator(language, peer)
+ else:
+ continue
+ gen.emit_definition(definition.value)
+
+
+def handle_parse_error(e: UnexpectedInput) -> bool:
+ """Simple parse error reporting, no recovery attempted"""
+ print(e)
+ return True
+
+
+def subcmd(args: Namespace) -> int:
+ """Generate definitions"""
+
+ set_xdr_annotate(args.annotate)
+ parser = xdr_parser()
+ with open(args.filename, encoding="utf-8") as f:
+ parse_tree = parser.parse(f.read(), on_error=handle_parse_error)
+ ast = transform_parse_tree(parse_tree)
+
+ gen = XdrHeaderTopGenerator(args.language, args.peer)
+ gen.emit_definition(args.filename, ast)
+
+ emit_header_definitions(ast, args.language, args.peer)
+
+ gen = XdrHeaderBottomGenerator(args.language, args.peer)
+ gen.emit_definition(args.filename, ast)
+
+ return 0
diff --git a/tools/net/sunrpc/xdrgen/subcmds/lint.py b/tools/net/sunrpc/xdrgen/subcmds/lint.py
new file mode 100644
index 000000000000..36cc43717d30
--- /dev/null
+++ b/tools/net/sunrpc/xdrgen/subcmds/lint.py
@@ -0,0 +1,33 @@
+#!/usr/bin/env python3
+# ex: set filetype=python:
+
+"""Translate an XDR specification into executable code that
+can be compiled for the Linux kernel."""
+
+import logging
+
+from argparse import Namespace
+from lark import logger
+from lark.exceptions import UnexpectedInput
+
+from xdr_parse import xdr_parser
+from xdr_ast import transform_parse_tree
+
+logger.setLevel(logging.DEBUG)
+
+
+def handle_parse_error(e: UnexpectedInput) -> bool:
+ """Simple parse error reporting, no recovery attempted"""
+ print(e)
+ return True
+
+
+def subcmd(args: Namespace) -> int:
+ """Lexical and syntax check of an XDR specification"""
+
+ parser = xdr_parser()
+ with open(args.filename, encoding="utf-8") as f:
+ parse_tree = parser.parse(f.read(), on_error=handle_parse_error)
+ transform_parse_tree(parse_tree)
+
+ return 0
diff --git a/tools/net/sunrpc/xdrgen/subcmds/source.py b/tools/net/sunrpc/xdrgen/subcmds/source.py
new file mode 100644
index 000000000000..00c04ad15b89
--- /dev/null
+++ b/tools/net/sunrpc/xdrgen/subcmds/source.py
@@ -0,0 +1,118 @@
+#!/usr/bin/env python3
+# ex: set filetype=python:
+
+"""Translate an XDR specification into executable code that
+can be compiled for the Linux kernel."""
+
+import logging
+
+from argparse import Namespace
+from lark import logger
+from lark.exceptions import UnexpectedInput
+
+from generators.source_top import XdrSourceTopGenerator
+from generators.enum import XdrEnumGenerator
+from generators.pointer import XdrPointerGenerator
+from generators.program import XdrProgramGenerator
+from generators.typedef import XdrTypedefGenerator
+from generators.struct import XdrStructGenerator
+from generators.union import XdrUnionGenerator
+
+from xdr_ast import transform_parse_tree, _RpcProgram, Specification
+from xdr_ast import _XdrAst, _XdrEnum, _XdrPointer
+from xdr_ast import _XdrStruct, _XdrTypedef, _XdrUnion
+
+from xdr_parse import xdr_parser, set_xdr_annotate
+
+logger.setLevel(logging.INFO)
+
+
+def emit_source_decoder(node: _XdrAst, language: str, peer: str) -> None:
+ """Emit one XDR decoder function for a source file"""
+ if isinstance(node, _XdrEnum):
+ gen = XdrEnumGenerator(language, peer)
+ elif isinstance(node, _XdrPointer):
+ gen = XdrPointerGenerator(language, peer)
+ elif isinstance(node, _XdrTypedef):
+ gen = XdrTypedefGenerator(language, peer)
+ elif isinstance(node, _XdrStruct):
+ gen = XdrStructGenerator(language, peer)
+ elif isinstance(node, _XdrUnion):
+ gen = XdrUnionGenerator(language, peer)
+ elif isinstance(node, _RpcProgram):
+ gen = XdrProgramGenerator(language, peer)
+ else:
+ return
+ gen.emit_decoder(node)
+
+
+def emit_source_encoder(node: _XdrAst, language: str, peer: str) -> None:
+ """Emit one XDR encoder function for a source file"""
+ if isinstance(node, _XdrEnum):
+ gen = XdrEnumGenerator(language, peer)
+ elif isinstance(node, _XdrPointer):
+ gen = XdrPointerGenerator(language, peer)
+ elif isinstance(node, _XdrTypedef):
+ gen = XdrTypedefGenerator(language, peer)
+ elif isinstance(node, _XdrStruct):
+ gen = XdrStructGenerator(language, peer)
+ elif isinstance(node, _XdrUnion):
+ gen = XdrUnionGenerator(language, peer)
+ elif isinstance(node, _RpcProgram):
+ gen = XdrProgramGenerator(language, peer)
+ else:
+ return
+ gen.emit_encoder(node)
+
+
+def generate_server_source(filename: str, root: Specification, language: str) -> None:
+ """Generate server-side source code"""
+
+ gen = XdrSourceTopGenerator(language, "server")
+ gen.emit_source(filename, root)
+
+ for definition in root.definitions:
+ emit_source_decoder(definition.value, language, "server")
+ for definition in root.definitions:
+ emit_source_encoder(definition.value, language, "server")
+
+
+def generate_client_source(filename: str, root: Specification, language: str) -> None:
+ """Generate server-side source code"""
+
+ gen = XdrSourceTopGenerator(language, "client")
+ gen.emit_source(filename, root)
+
+ # cel: todo: client needs XDR size macros
+
+ for definition in root.definitions:
+ emit_source_encoder(definition.value, language, "client")
+ for definition in root.definitions:
+ emit_source_decoder(definition.value, language, "client")
+
+ # cel: todo: client needs PROC macros
+
+
+def handle_parse_error(e: UnexpectedInput) -> bool:
+ """Simple parse error reporting, no recovery attempted"""
+ print(e)
+ return True
+
+
+def subcmd(args: Namespace) -> int:
+ """Generate encoder and decoder functions"""
+
+ set_xdr_annotate(args.annotate)
+ parser = xdr_parser()
+ with open(args.filename, encoding="utf-8") as f:
+ parse_tree = parser.parse(f.read(), on_error=handle_parse_error)
+ ast = transform_parse_tree(parse_tree)
+ match args.peer:
+ case "server":
+ generate_server_source(args.filename, ast, args.language)
+ case "client":
+ generate_client_source(args.filename, ast, args.language)
+ case _:
+ print("Code generation for", args.peer, "is not yet supported")
+
+ return 0
diff --git a/tools/net/sunrpc/xdrgen/templates/C/constants/definition.j2 b/tools/net/sunrpc/xdrgen/templates/C/constants/definition.j2
new file mode 100644
index 000000000000..d648ca4193f8
--- /dev/null
+++ b/tools/net/sunrpc/xdrgen/templates/C/constants/definition.j2
@@ -0,0 +1,3 @@
+{# SPDX-License-Identifier: GPL-2.0 #}
+
+enum { {{ name }} = {{ value }} };
diff --git a/tools/net/sunrpc/xdrgen/templates/C/enum/declaration/close.j2 b/tools/net/sunrpc/xdrgen/templates/C/enum/declaration/close.j2
new file mode 100644
index 000000000000..ab1e576c9531
--- /dev/null
+++ b/tools/net/sunrpc/xdrgen/templates/C/enum/declaration/close.j2
@@ -0,0 +1,4 @@
+{# SPDX-License-Identifier: GPL-2.0 #}
+
+bool xdrgen_decode_{{ name }}(struct xdr_stream *xdr, enum {{ name }} *ptr);
+bool xdrgen_encode_{{ name }}(struct xdr_stream *xdr, enum {{ name }} value);
diff --git a/tools/net/sunrpc/xdrgen/templates/C/enum/decoder/enum.j2 b/tools/net/sunrpc/xdrgen/templates/C/enum/decoder/enum.j2
new file mode 100644
index 000000000000..341d829afeda
--- /dev/null
+++ b/tools/net/sunrpc/xdrgen/templates/C/enum/decoder/enum.j2
@@ -0,0 +1,19 @@
+{# SPDX-License-Identifier: GPL-2.0 #}
+
+{% if annotate %}
+/* enum {{ name }} */
+{% endif %}
+{% if name in public_apis %}
+bool
+{% else %}
+static bool __maybe_unused
+{% endif %}
+xdrgen_decode_{{ name }}(struct xdr_stream *xdr, enum {{ name }} *ptr)
+{
+ u32 val;
+
+ if (xdr_stream_decode_u32(xdr, &val) < 0)
+ return false;
+ *ptr = val;
+ return true;
+}
diff --git a/tools/net/sunrpc/xdrgen/templates/C/enum/definition/close.j2 b/tools/net/sunrpc/xdrgen/templates/C/enum/definition/close.j2
new file mode 100644
index 000000000000..9e62344a976a
--- /dev/null
+++ b/tools/net/sunrpc/xdrgen/templates/C/enum/definition/close.j2
@@ -0,0 +1,2 @@
+{# SPDX-License-Identifier: GPL-2.0 #}
+};
diff --git a/tools/net/sunrpc/xdrgen/templates/C/enum/definition/enumerator.j2 b/tools/net/sunrpc/xdrgen/templates/C/enum/definition/enumerator.j2
new file mode 100644
index 000000000000..ff0b893b8b14
--- /dev/null
+++ b/tools/net/sunrpc/xdrgen/templates/C/enum/definition/enumerator.j2
@@ -0,0 +1,2 @@
+{# SPDX-License-Identifier: GPL-2.0 #}
+ {{ name }} = {{ value }},
diff --git a/tools/net/sunrpc/xdrgen/templates/C/enum/definition/open.j2 b/tools/net/sunrpc/xdrgen/templates/C/enum/definition/open.j2
new file mode 100644
index 000000000000..b25335221d48
--- /dev/null
+++ b/tools/net/sunrpc/xdrgen/templates/C/enum/definition/open.j2
@@ -0,0 +1,3 @@
+{# SPDX-License-Identifier: GPL-2.0 #}
+
+enum {{ name }} {
diff --git a/tools/net/sunrpc/xdrgen/templates/C/enum/encoder/enum.j2 b/tools/net/sunrpc/xdrgen/templates/C/enum/encoder/enum.j2
new file mode 100644
index 000000000000..bd0a770e50f2
--- /dev/null
+++ b/tools/net/sunrpc/xdrgen/templates/C/enum/encoder/enum.j2
@@ -0,0 +1,14 @@
+{# SPDX-License-Identifier: GPL-2.0 #}
+
+{% if annotate %}
+/* enum {{ name }} */
+{% endif %}
+{% if name in public_apis %}
+bool
+{% else %}
+static bool __maybe_unused
+{% endif %}
+xdrgen_encode_{{ name }}(struct xdr_stream *xdr, enum {{ name }} value)
+{
+ return xdr_stream_encode_u32(xdr, value) == XDR_UNIT;
+}
diff --git a/tools/net/sunrpc/xdrgen/templates/C/header_bottom/declaration/header.j2 b/tools/net/sunrpc/xdrgen/templates/C/header_bottom/declaration/header.j2
new file mode 100644
index 000000000000..0bb8c6fc0c20
--- /dev/null
+++ b/tools/net/sunrpc/xdrgen/templates/C/header_bottom/declaration/header.j2
@@ -0,0 +1,3 @@
+{# SPDX-License-Identifier: GPL-2.0 #}
+
+#endif /* _LINUX_XDRGEN_{{ infix }}_DECL_H */
diff --git a/tools/net/sunrpc/xdrgen/templates/C/header_bottom/definition/header.j2 b/tools/net/sunrpc/xdrgen/templates/C/header_bottom/definition/header.j2
new file mode 100644
index 000000000000..69069d08dc91
--- /dev/null
+++ b/tools/net/sunrpc/xdrgen/templates/C/header_bottom/definition/header.j2
@@ -0,0 +1,3 @@
+{# SPDX-License-Identifier: GPL-2.0 #}
+
+#endif /* _LINUX_XDRGEN_{{ infix }}_DEF_H */
diff --git a/tools/net/sunrpc/xdrgen/templates/C/header_top/declaration/header.j2 b/tools/net/sunrpc/xdrgen/templates/C/header_top/declaration/header.j2
new file mode 100644
index 000000000000..ebb4e1d32f85
--- /dev/null
+++ b/tools/net/sunrpc/xdrgen/templates/C/header_top/declaration/header.j2
@@ -0,0 +1,14 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Generated by xdrgen. Manual edits will be lost. */
+/* XDR specification file: {{ filename }} */
+/* XDR specification modification time: {{ mtime }} */
+
+#ifndef _LINUX_XDRGEN_{{ infix }}_DECL_H
+#define _LINUX_XDRGEN_{{ infix }}_DECL_H
+
+#include <linux/types.h>
+
+#include <linux/sunrpc/xdr.h>
+#include <linux/sunrpc/xdrgen/_defs.h>
+#include <linux/sunrpc/xdrgen/_builtins.h>
+#include <linux/sunrpc/xdrgen/{{ infix.lower() }}.h>
diff --git a/tools/net/sunrpc/xdrgen/templates/C/header_top/definition/header.j2 b/tools/net/sunrpc/xdrgen/templates/C/header_top/definition/header.j2
new file mode 100644
index 000000000000..92f1fd4ba024
--- /dev/null
+++ b/tools/net/sunrpc/xdrgen/templates/C/header_top/definition/header.j2
@@ -0,0 +1,10 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Generated by xdrgen. Manual edits will be lost. */
+/* XDR specification file: {{ filename }} */
+/* XDR specification modification time: {{ mtime }} */
+
+#ifndef _LINUX_XDRGEN_{{ infix }}_DEF_H
+#define _LINUX_XDRGEN_{{ infix }}_DEF_H
+
+#include <linux/types.h>
+#include <linux/sunrpc/xdrgen/_defs.h>
diff --git a/tools/net/sunrpc/xdrgen/templates/C/pointer/declaration/close.j2 b/tools/net/sunrpc/xdrgen/templates/C/pointer/declaration/close.j2
new file mode 100644
index 000000000000..816291184e8c
--- /dev/null
+++ b/tools/net/sunrpc/xdrgen/templates/C/pointer/declaration/close.j2
@@ -0,0 +1,4 @@
+{# SPDX-License-Identifier: GPL-2.0 #}
+
+bool xdrgen_decode_{{ name }}(struct xdr_stream *xdr, struct {{ name }} *ptr);
+bool xdrgen_encode_{{ name }}(struct xdr_stream *xdr, const struct {{ name }} *value);
diff --git a/tools/net/sunrpc/xdrgen/templates/C/pointer/decoder/basic.j2 b/tools/net/sunrpc/xdrgen/templates/C/pointer/decoder/basic.j2
new file mode 100644
index 000000000000..cde4ab53f4be
--- /dev/null
+++ b/tools/net/sunrpc/xdrgen/templates/C/pointer/decoder/basic.j2
@@ -0,0 +1,6 @@
+{# SPDX-License-Identifier: GPL-2.0 #}
+{% if annotate %}
+ /* member {{ name }} (basic) */
+{% endif %}
+ if (!xdrgen_decode_{{ type }}(xdr, &ptr->{{ name }}))
+ return false;
diff --git a/tools/net/sunrpc/xdrgen/templates/C/pointer/decoder/close.j2 b/tools/net/sunrpc/xdrgen/templates/C/pointer/decoder/close.j2
new file mode 100644
index 000000000000..5bf010665f84
--- /dev/null
+++ b/tools/net/sunrpc/xdrgen/templates/C/pointer/decoder/close.j2
@@ -0,0 +1,3 @@
+{# SPDX-License-Identifier: GPL-2.0 #}
+ return true;
+};
diff --git a/tools/net/sunrpc/xdrgen/templates/C/pointer/decoder/fixed_length_array.j2 b/tools/net/sunrpc/xdrgen/templates/C/pointer/decoder/fixed_length_array.j2
new file mode 100644
index 000000000000..cfd64217ad82
--- /dev/null
+++ b/tools/net/sunrpc/xdrgen/templates/C/pointer/decoder/fixed_length_array.j2
@@ -0,0 +1,8 @@
+{# SPDX-License-Identifier: GPL-2.0 #}
+{% if annotate %}
+ /* member {{ name }} (fixed-length array) */
+{% endif %}
+ for (u32 i = 0; i < {{ size }}; i++) {
+ if (xdrgen_decode_{{ type }}(xdr, &ptr->{{ name }}.items[i]) < 0)
+ return false;
+ }
diff --git a/tools/net/sunrpc/xdrgen/templates/C/pointer/decoder/fixed_length_opaque.j2 b/tools/net/sunrpc/xdrgen/templates/C/pointer/decoder/fixed_length_opaque.j2
new file mode 100644
index 000000000000..b4695ece1884
--- /dev/null
+++ b/tools/net/sunrpc/xdrgen/templates/C/pointer/decoder/fixed_length_opaque.j2
@@ -0,0 +1,6 @@
+{# SPDX-License-Identifier: GPL-2.0 #}
+{% if annotate %}
+ /* member {{ name }} (fixed-length opaque) */
+{% endif %}
+ if (xdr_stream_decode_opaque_fixed(xdr, ptr->{{ name }}, {{ size }}) < 0)
+ return false;
diff --git a/tools/net/sunrpc/xdrgen/templates/C/pointer/decoder/open.j2 b/tools/net/sunrpc/xdrgen/templates/C/pointer/decoder/open.j2
new file mode 100644
index 000000000000..c093d9e3c9ad
--- /dev/null
+++ b/tools/net/sunrpc/xdrgen/templates/C/pointer/decoder/open.j2
@@ -0,0 +1,22 @@
+{# SPDX-License-Identifier: GPL-2.0 #}
+
+{% if annotate %}
+/* pointer {{ name }} */
+{% endif %}
+{% if name in public_apis %}
+bool
+{% else %}
+static bool __maybe_unused
+{% endif %}
+xdrgen_decode_{{ name }}(struct xdr_stream *xdr, struct {{ name }} *ptr)
+{
+ bool opted;
+
+{% if annotate %}
+ /* opted */
+{% endif %}
+ if (!xdrgen_decode_bool(xdr, &opted))
+ return false;
+ if (!opted)
+ return true;
+
diff --git a/tools/net/sunrpc/xdrgen/templates/C/pointer/decoder/optional_data.j2 b/tools/net/sunrpc/xdrgen/templates/C/pointer/decoder/optional_data.j2
new file mode 100644
index 000000000000..b6834299a04b
--- /dev/null
+++ b/tools/net/sunrpc/xdrgen/templates/C/pointer/decoder/optional_data.j2
@@ -0,0 +1,6 @@
+{# SPDX-License-Identifier: GPL-2.0 #}
+{% if annotate %}
+ /* member {{ name }} (optional data) */
+{% endif %}
+ if (!xdrgen_decode_{{ type }}(xdr, ptr->{{ name }}))
+ return false;
diff --git a/tools/net/sunrpc/xdrgen/templates/C/pointer/decoder/variable_length_array.j2 b/tools/net/sunrpc/xdrgen/templates/C/pointer/decoder/variable_length_array.j2
new file mode 100644
index 000000000000..2f943909cdf7
--- /dev/null
+++ b/tools/net/sunrpc/xdrgen/templates/C/pointer/decoder/variable_length_array.j2
@@ -0,0 +1,13 @@
+{# SPDX-License-Identifier: GPL-2.0 #}
+{% if annotate %}
+ /* member {{ name }} (variable-length array) */
+{% endif %}
+ if (xdr_stream_decode_u32(xdr, &ptr->{{ name }}.count) < 0)
+ return false;
+{% if maxsize != "0" %}
+ if (ptr->{{ name }}.count > {{ maxsize }})
+ return false;
+{% endif %}
+ for (u32 i = 0; i < ptr->{{ name }}.count; i++)
+ if (!xdrgen_decode_{{ type }}(xdr, &ptr->{{ name }}.element[i]))
+ return false;
diff --git a/tools/net/sunrpc/xdrgen/templates/C/pointer/decoder/variable_length_opaque.j2 b/tools/net/sunrpc/xdrgen/templates/C/pointer/decoder/variable_length_opaque.j2
new file mode 100644
index 000000000000..9a814de54ae8
--- /dev/null
+++ b/tools/net/sunrpc/xdrgen/templates/C/pointer/decoder/variable_length_opaque.j2
@@ -0,0 +1,6 @@
+{# SPDX-License-Identifier: GPL-2.0 #}
+{% if annotate %}
+ /* member {{ name }} (variable-length opaque) */
+{% endif %}
+ if (!xdrgen_decode_opaque(xdr, (opaque *)ptr, {{ maxsize }}))
+ return false;
diff --git a/tools/net/sunrpc/xdrgen/templates/C/pointer/decoder/variable_length_string.j2 b/tools/net/sunrpc/xdrgen/templates/C/pointer/decoder/variable_length_string.j2
new file mode 100644
index 000000000000..12d20b143b43
--- /dev/null
+++ b/tools/net/sunrpc/xdrgen/templates/C/pointer/decoder/variable_length_string.j2
@@ -0,0 +1,6 @@
+{# SPDX-License-Identifier: GPL-2.0 #}
+{% if annotate %}
+ /* member {{ name }} (variable-length string) */
+{% endif %}
+ if (!xdrgen_decode_string(xdr, (string *)ptr, {{ maxsize }}))
+ return false;
diff --git a/tools/net/sunrpc/xdrgen/templates/C/pointer/definition/basic.j2 b/tools/net/sunrpc/xdrgen/templates/C/pointer/definition/basic.j2
new file mode 100644
index 000000000000..b3430895f311
--- /dev/null
+++ b/tools/net/sunrpc/xdrgen/templates/C/pointer/definition/basic.j2
@@ -0,0 +1,5 @@
+{# SPDX-License-Identifier: GPL-2.0 #}
+{% if annotate %}
+ /* (basic) */
+{% endif %}
+ {{ classifier }}{{ type }} {{ name }};
diff --git a/tools/net/sunrpc/xdrgen/templates/C/pointer/definition/close.j2 b/tools/net/sunrpc/xdrgen/templates/C/pointer/definition/close.j2
new file mode 100644
index 000000000000..9e62344a976a
--- /dev/null
+++ b/tools/net/sunrpc/xdrgen/templates/C/pointer/definition/close.j2
@@ -0,0 +1,2 @@
+{# SPDX-License-Identifier: GPL-2.0 #}
+};
diff --git a/tools/net/sunrpc/xdrgen/templates/C/pointer/definition/fixed_length_array.j2 b/tools/net/sunrpc/xdrgen/templates/C/pointer/definition/fixed_length_array.j2
new file mode 100644
index 000000000000..66be836826a0
--- /dev/null
+++ b/tools/net/sunrpc/xdrgen/templates/C/pointer/definition/fixed_length_array.j2
@@ -0,0 +1,5 @@
+{# SPDX-License-Identifier: GPL-2.0 #}
+{% if annotate %}
+ /* (fixed-length array) */
+{% endif %}
+ {{ type }} {{ name }}[{{ size }}];
diff --git a/tools/net/sunrpc/xdrgen/templates/C/pointer/definition/fixed_length_opaque.j2 b/tools/net/sunrpc/xdrgen/templates/C/pointer/definition/fixed_length_opaque.j2
new file mode 100644
index 000000000000..0daba19aa0f0
--- /dev/null
+++ b/tools/net/sunrpc/xdrgen/templates/C/pointer/definition/fixed_length_opaque.j2
@@ -0,0 +1,5 @@
+{# SPDX-License-Identifier: GPL-2.0 #}
+{% if annotate %}
+ /* (fixed-length opaque) */
+{% endif %}
+ u8 {{ name }}[{{ size }}];
diff --git a/tools/net/sunrpc/xdrgen/templates/C/pointer/definition/open.j2 b/tools/net/sunrpc/xdrgen/templates/C/pointer/definition/open.j2
new file mode 100644
index 000000000000..bc886b818d85
--- /dev/null
+++ b/tools/net/sunrpc/xdrgen/templates/C/pointer/definition/open.j2
@@ -0,0 +1,6 @@
+{# SPDX-License-Identifier: GPL-2.0 #}
+
+{% if annotate %}
+/* pointer {{ name }} */
+{% endif %}
+struct {{ name }} {
diff --git a/tools/net/sunrpc/xdrgen/templates/C/pointer/definition/optional_data.j2 b/tools/net/sunrpc/xdrgen/templates/C/pointer/definition/optional_data.j2
new file mode 100644
index 000000000000..a33341f45e8f
--- /dev/null
+++ b/tools/net/sunrpc/xdrgen/templates/C/pointer/definition/optional_data.j2
@@ -0,0 +1,5 @@
+{# SPDX-License-Identifier: GPL-2.0 #}
+{% if annotate %}
+ /* (optional data) */
+{% endif %}
+ {{ classifier }}{{ type }} *{{ name }};
diff --git a/tools/net/sunrpc/xdrgen/templates/C/pointer/definition/variable_length_array.j2 b/tools/net/sunrpc/xdrgen/templates/C/pointer/definition/variable_length_array.j2
new file mode 100644
index 000000000000..5d767f9b3674
--- /dev/null
+++ b/tools/net/sunrpc/xdrgen/templates/C/pointer/definition/variable_length_array.j2
@@ -0,0 +1,8 @@
+{# SPDX-License-Identifier: GPL-2.0 #}
+{% if annotate %}
+ /* (variable-length array) */
+{% endif %}
+ struct {
+ u32 count;
+ {{ classifier }}{{ type }} *element;
+ } {{ name }};
diff --git a/tools/net/sunrpc/xdrgen/templates/C/pointer/definition/variable_length_opaque.j2 b/tools/net/sunrpc/xdrgen/templates/C/pointer/definition/variable_length_opaque.j2
new file mode 100644
index 000000000000..4d0cd84be3db
--- /dev/null
+++ b/tools/net/sunrpc/xdrgen/templates/C/pointer/definition/variable_length_opaque.j2
@@ -0,0 +1,5 @@
+{# SPDX-License-Identifier: GPL-2.0 #}
+{% if annotate %}
+ /* (variable-length opaque) */
+{% endif %}
+ opaque {{ name }};
diff --git a/tools/net/sunrpc/xdrgen/templates/C/pointer/definition/variable_length_string.j2 b/tools/net/sunrpc/xdrgen/templates/C/pointer/definition/variable_length_string.j2
new file mode 100644
index 000000000000..2de2feec77db
--- /dev/null
+++ b/tools/net/sunrpc/xdrgen/templates/C/pointer/definition/variable_length_string.j2
@@ -0,0 +1,5 @@
+{# SPDX-License-Identifier: GPL-2.0 #}
+{% if annotate %}
+ /* (variable-length string) */
+{% endif %}
+ string {{ name }};
diff --git a/tools/net/sunrpc/xdrgen/templates/C/pointer/encoder/basic.j2 b/tools/net/sunrpc/xdrgen/templates/C/pointer/encoder/basic.j2
new file mode 100644
index 000000000000..a7d3695c5a6a
--- /dev/null
+++ b/tools/net/sunrpc/xdrgen/templates/C/pointer/encoder/basic.j2
@@ -0,0 +1,10 @@
+{# SPDX-License-Identifier: GPL-2.0 #}
+{% if annotate %}
+ /* member {{ name }} (basic) */
+{% endif %}
+{% if type in pass_by_reference %}
+ if (!xdrgen_encode_{{ type }}(xdr, &value->{{ name }}))
+{% else %}
+ if (!xdrgen_encode_{{ type }}(xdr, value->{{ name }}))
+{% endif %}
+ return false;
diff --git a/tools/net/sunrpc/xdrgen/templates/C/pointer/encoder/close.j2 b/tools/net/sunrpc/xdrgen/templates/C/pointer/encoder/close.j2
new file mode 100644
index 000000000000..5bf010665f84
--- /dev/null
+++ b/tools/net/sunrpc/xdrgen/templates/C/pointer/encoder/close.j2
@@ -0,0 +1,3 @@
+{# SPDX-License-Identifier: GPL-2.0 #}
+ return true;
+};
diff --git a/tools/net/sunrpc/xdrgen/templates/C/pointer/encoder/fixed_length_array.j2 b/tools/net/sunrpc/xdrgen/templates/C/pointer/encoder/fixed_length_array.j2
new file mode 100644
index 000000000000..b01833a2c7a1
--- /dev/null
+++ b/tools/net/sunrpc/xdrgen/templates/C/pointer/encoder/fixed_length_array.j2
@@ -0,0 +1,12 @@
+{# SPDX-License-Identifier: GPL-2.0 #}
+{% if annotate %}
+ /* member {{ name }} (fixed-length array) */
+{% endif %}
+ for (u32 i = 0; i < {{ size }}; i++) {
+{% if type in pass_by_reference %}
+ if (xdrgen_encode_{{ type }}(xdr, &value->items[i]) < 0)
+{% else %}
+ if (xdrgen_encode_{{ type }}(xdr, value->items[i]) < 0)
+{% endif %}
+ return false;
+ }
diff --git a/tools/net/sunrpc/xdrgen/templates/C/pointer/encoder/fixed_length_opaque.j2 b/tools/net/sunrpc/xdrgen/templates/C/pointer/encoder/fixed_length_opaque.j2
new file mode 100644
index 000000000000..07bc91919898
--- /dev/null
+++ b/tools/net/sunrpc/xdrgen/templates/C/pointer/encoder/fixed_length_opaque.j2
@@ -0,0 +1,6 @@
+{# SPDX-License-Identifier: GPL-2.0 #}
+{% if annotate %}
+ /* member {{ name }} (fixed-length opaque) */
+{% endif %}
+ if (xdr_stream_encode_opaque_fixed(xdr, value->{{ name }}, {{ size }}) < 0)
+ return false;
diff --git a/tools/net/sunrpc/xdrgen/templates/C/pointer/encoder/open.j2 b/tools/net/sunrpc/xdrgen/templates/C/pointer/encoder/open.j2
new file mode 100644
index 000000000000..d67fae200261
--- /dev/null
+++ b/tools/net/sunrpc/xdrgen/templates/C/pointer/encoder/open.j2
@@ -0,0 +1,20 @@
+{# SPDX-License-Identifier: GPL-2.0 #}
+
+{% if annotate %}
+/* pointer {{ name }} */
+{% endif %}
+{% if name in public_apis %}
+bool
+{% else %}
+static bool __maybe_unused
+{% endif %}
+xdrgen_encode_{{ name }}(struct xdr_stream *xdr, const struct {{ name }} *value)
+{
+{% if annotate %}
+ /* opted */
+{% endif %}
+ if (!xdrgen_encode_bool(xdr, value != NULL))
+ return false;
+ if (!value)
+ return true;
+
diff --git a/tools/net/sunrpc/xdrgen/templates/C/pointer/encoder/optional_data.j2 b/tools/net/sunrpc/xdrgen/templates/C/pointer/encoder/optional_data.j2
new file mode 100644
index 000000000000..16fb3e09bba1
--- /dev/null
+++ b/tools/net/sunrpc/xdrgen/templates/C/pointer/encoder/optional_data.j2
@@ -0,0 +1,6 @@
+{# SPDX-License-Identifier: GPL-2.0 #}
+{% if annotate %}
+ /* member {{ name }} (optional data) */
+{% endif %}
+ if (!xdrgen_encode_{{ type }}(xdr, value->{{ name }}))
+ return false;
diff --git a/tools/net/sunrpc/xdrgen/templates/C/pointer/encoder/variable_length_array.j2 b/tools/net/sunrpc/xdrgen/templates/C/pointer/encoder/variable_length_array.j2
new file mode 100644
index 000000000000..0ec8660d621a
--- /dev/null
+++ b/tools/net/sunrpc/xdrgen/templates/C/pointer/encoder/variable_length_array.j2
@@ -0,0 +1,15 @@
+{# SPDX-License-Identifier: GPL-2.0 #}
+{% if annotate %}
+ /* member {{ name }} (variable-length array) */
+{% endif %}
+ if (value->{{ name }}.count > {{ maxsize }})
+ return false;
+ if (xdr_stream_encode_u32(xdr, value->{{ name }}.count) != XDR_UNIT)
+ return false;
+ for (u32 i = 0; i < value->{{ name }}.count; i++)
+{% if type in pass_by_reference %}
+ if (!xdrgen_encode_{{ type }}(xdr, &value->{{ name }}.element[i]))
+{% else %}
+ if (!xdrgen_encode_{{ type }}(xdr, value->{{ name }}.element[i]))
+{% endif %}
+ return false;
diff --git a/tools/net/sunrpc/xdrgen/templates/C/pointer/encoder/variable_length_opaque.j2 b/tools/net/sunrpc/xdrgen/templates/C/pointer/encoder/variable_length_opaque.j2
new file mode 100644
index 000000000000..1d477c2d197a
--- /dev/null
+++ b/tools/net/sunrpc/xdrgen/templates/C/pointer/encoder/variable_length_opaque.j2
@@ -0,0 +1,8 @@
+{# SPDX-License-Identifier: GPL-2.0 #}
+{% if annotate %}
+ /* member {{ name }} (variable-length opaque) */
+{% endif %}
+ if (value->{{ name }}.len > {{ maxsize }})
+ return false;
+ if (xdr_stream_encode_opaque(xdr, value->{{ name }}.data, value->{{ name }}.len) < 0)
+ return false;
diff --git a/tools/net/sunrpc/xdrgen/templates/C/pointer/encoder/variable_length_string.j2 b/tools/net/sunrpc/xdrgen/templates/C/pointer/encoder/variable_length_string.j2
new file mode 100644
index 000000000000..cf65b71eaef3
--- /dev/null
+++ b/tools/net/sunrpc/xdrgen/templates/C/pointer/encoder/variable_length_string.j2
@@ -0,0 +1,8 @@
+{# SPDX-License-Identifier: GPL-2.0 #}
+{% if annotate %}
+ /* member {{ name }} (variable-length string) */
+{% endif %}
+ if (value->{{ name }}.len > {{ maxsize }})
+ return false;
+ if (xdr_stream_encode_opaque(xdr, value->{{ name }}.data, value->{{ name }}.len) < 0)
+ return false;
diff --git a/tools/net/sunrpc/xdrgen/templates/C/program/declaration/argument.j2 b/tools/net/sunrpc/xdrgen/templates/C/program/declaration/argument.j2
new file mode 100644
index 000000000000..4364fed19162
--- /dev/null
+++ b/tools/net/sunrpc/xdrgen/templates/C/program/declaration/argument.j2
@@ -0,0 +1,2 @@
+{# SPDX-License-Identifier: GPL-2.0 #}
+bool {{ program }}_svc_decode_{{ argument }}(struct svc_rqst *rqstp, struct xdr_stream *xdr);
diff --git a/tools/net/sunrpc/xdrgen/templates/C/program/declaration/result.j2 b/tools/net/sunrpc/xdrgen/templates/C/program/declaration/result.j2
new file mode 100644
index 000000000000..e0ea1e849910
--- /dev/null
+++ b/tools/net/sunrpc/xdrgen/templates/C/program/declaration/result.j2
@@ -0,0 +1,2 @@
+{# SPDX-License-Identifier: GPL-2.0 #}
+bool {{ program }}_svc_encode_{{ result }}(struct svc_rqst *rqstp, struct xdr_stream *xdr);
diff --git a/tools/net/sunrpc/xdrgen/templates/C/program/decoder/argument.j2 b/tools/net/sunrpc/xdrgen/templates/C/program/decoder/argument.j2
new file mode 100644
index 000000000000..0b1709cca0d4
--- /dev/null
+++ b/tools/net/sunrpc/xdrgen/templates/C/program/decoder/argument.j2
@@ -0,0 +1,21 @@
+{# SPDX-License-Identifier: GPL-2.0 #}
+
+/**
+ * {{ program }}_svc_decode_{{ argument }} - Decode a {{ argument }} argument
+ * @rqstp: RPC transaction context
+ * @xdr: source XDR data stream
+ *
+ * Return values:
+ * %true: procedure arguments decoded successfully
+ * %false: decode failed
+ */
+bool {{ program }}_svc_decode_{{ argument }}(struct svc_rqst *rqstp, struct xdr_stream *xdr)
+{
+{% if argument == 'void' %}
+ return xdrgen_decode_void(xdr);
+{% else %}
+ struct {{ argument }} *argp = rqstp->rq_argp;
+
+ return xdrgen_decode_{{ argument }}(xdr, argp);
+{% endif %}
+}
diff --git a/tools/net/sunrpc/xdrgen/templates/C/program/decoder/result.j2 b/tools/net/sunrpc/xdrgen/templates/C/program/decoder/result.j2
new file mode 100644
index 000000000000..d304eccb5c40
--- /dev/null
+++ b/tools/net/sunrpc/xdrgen/templates/C/program/decoder/result.j2
@@ -0,0 +1,22 @@
+{# SPDX-License-Identifier: GPL-2.0 #}
+
+{% if annotate %}
+/* Decode {{ result }} results */
+{% endif %}
+static int {{ program }}_xdr_dec_{{ result }}(struct rpc_rqst *req,
+ struct xdr_stream *xdr, void *data)
+{
+{% if result == 'void' %}
+ xdrgen_decode_void(xdr);
+{% else %}
+ struct {{ result }} *result = data;
+
+ if (!xdrgen_decode_{{ result }}(xdr, result))
+ return -EIO;
+ if (result->stat != nfs_ok) {
+ trace_nfs_xdr_status(xdr, (int)result->stat);
+ return {{ program }}_stat_to_errno(result->stat);
+ }
+{% endif %}
+ return 0;
+}
diff --git a/tools/net/sunrpc/xdrgen/templates/C/program/definition/close.j2 b/tools/net/sunrpc/xdrgen/templates/C/program/definition/close.j2
new file mode 100644
index 000000000000..9e62344a976a
--- /dev/null
+++ b/tools/net/sunrpc/xdrgen/templates/C/program/definition/close.j2
@@ -0,0 +1,2 @@
+{# SPDX-License-Identifier: GPL-2.0 #}
+};
diff --git a/tools/net/sunrpc/xdrgen/templates/C/program/definition/open.j2 b/tools/net/sunrpc/xdrgen/templates/C/program/definition/open.j2
new file mode 100644
index 000000000000..f9a6d439f156
--- /dev/null
+++ b/tools/net/sunrpc/xdrgen/templates/C/program/definition/open.j2
@@ -0,0 +1,6 @@
+{# SPDX-License-Identifier: GPL-2.0 #}
+
+{% if annotate %}
+/* procedure numbers for {{ program }} */
+{% endif %}
+enum {
diff --git a/tools/net/sunrpc/xdrgen/templates/C/program/definition/procedure.j2 b/tools/net/sunrpc/xdrgen/templates/C/program/definition/procedure.j2
new file mode 100644
index 000000000000..ff0b893b8b14
--- /dev/null
+++ b/tools/net/sunrpc/xdrgen/templates/C/program/definition/procedure.j2
@@ -0,0 +1,2 @@
+{# SPDX-License-Identifier: GPL-2.0 #}
+ {{ name }} = {{ value }},
diff --git a/tools/net/sunrpc/xdrgen/templates/C/program/encoder/argument.j2 b/tools/net/sunrpc/xdrgen/templates/C/program/encoder/argument.j2
new file mode 100644
index 000000000000..2fbb5bd13aec
--- /dev/null
+++ b/tools/net/sunrpc/xdrgen/templates/C/program/encoder/argument.j2
@@ -0,0 +1,16 @@
+{# SPDX-License-Identifier: GPL-2.0 #}
+
+{%if annotate %}
+/* Encode {{ argument }} arguments */
+{% endif %}
+static void {{ program }}_xdr_enc_{{ argument }}(struct rpc_rqst *req,
+ struct xdr_stream *xdr, const void *data)
+{
+{% if argument == 'void' %}
+ xdrgen_encode_void(xdr);
+{% else %}
+ const struct {{ argument }} *args = data;
+
+ xdrgen_encode_{{ argument }}(xdr, args);
+{% endif %}
+}
diff --git a/tools/net/sunrpc/xdrgen/templates/C/program/encoder/result.j2 b/tools/net/sunrpc/xdrgen/templates/C/program/encoder/result.j2
new file mode 100644
index 000000000000..6fc61a5d47b7
--- /dev/null
+++ b/tools/net/sunrpc/xdrgen/templates/C/program/encoder/result.j2
@@ -0,0 +1,21 @@
+{# SPDX-License-Identifier: GPL-2.0 #}
+
+/**
+ * {{ program }}_svc_encode_{{ result }} - Encode a {{ result }} result
+ * @rqstp: RPC transaction context
+ * @xdr: target XDR data stream
+ *
+ * Return values:
+ * %true: procedure results encoded successfully
+ * %false: encode failed
+ */
+bool {{ program }}_svc_encode_{{ result }}(struct svc_rqst *rqstp, struct xdr_stream *xdr)
+{
+{% if result == 'void' %}
+ return xdrgen_encode_void(xdr);
+{% else %}
+ struct {{ result }} *resp = rqstp->rq_resp;
+
+ return xdrgen_encode_{{ result }}(xdr, resp);
+{% endif %}
+}
diff --git a/tools/net/sunrpc/xdrgen/templates/C/source_top/client.j2 b/tools/net/sunrpc/xdrgen/templates/C/source_top/client.j2
new file mode 100644
index 000000000000..e3a802cbc4d7
--- /dev/null
+++ b/tools/net/sunrpc/xdrgen/templates/C/source_top/client.j2
@@ -0,0 +1,8 @@
+// SPDX-License-Identifier: GPL-2.0
+// Generated by xdrgen. Manual edits will be lost.
+// XDR specification file: {{ filename }}
+// XDR specification modification time: {{ mtime }}
+
+#include <linux/sunrpc/xprt.h>
+
+#include "{{ program }}xdr_gen.h"
diff --git a/tools/net/sunrpc/xdrgen/templates/C/source_top/server.j2 b/tools/net/sunrpc/xdrgen/templates/C/source_top/server.j2
new file mode 100644
index 000000000000..974e1d971e5d
--- /dev/null
+++ b/tools/net/sunrpc/xdrgen/templates/C/source_top/server.j2
@@ -0,0 +1,8 @@
+// SPDX-License-Identifier: GPL-2.0
+// Generated by xdrgen. Manual edits will be lost.
+// XDR specification file: {{ filename }}
+// XDR specification modification time: {{ mtime }}
+
+#include <linux/sunrpc/svc.h>
+
+#include "{{ program }}xdr_gen.h"
diff --git a/tools/net/sunrpc/xdrgen/templates/C/struct/declaration/close.j2 b/tools/net/sunrpc/xdrgen/templates/C/struct/declaration/close.j2
new file mode 100644
index 000000000000..816291184e8c
--- /dev/null
+++ b/tools/net/sunrpc/xdrgen/templates/C/struct/declaration/close.j2
@@ -0,0 +1,4 @@
+{# SPDX-License-Identifier: GPL-2.0 #}
+
+bool xdrgen_decode_{{ name }}(struct xdr_stream *xdr, struct {{ name }} *ptr);
+bool xdrgen_encode_{{ name }}(struct xdr_stream *xdr, const struct {{ name }} *value);
diff --git a/tools/net/sunrpc/xdrgen/templates/C/struct/decoder/basic.j2 b/tools/net/sunrpc/xdrgen/templates/C/struct/decoder/basic.j2
new file mode 100644
index 000000000000..cde4ab53f4be
--- /dev/null
+++ b/tools/net/sunrpc/xdrgen/templates/C/struct/decoder/basic.j2
@@ -0,0 +1,6 @@
+{# SPDX-License-Identifier: GPL-2.0 #}
+{% if annotate %}
+ /* member {{ name }} (basic) */
+{% endif %}
+ if (!xdrgen_decode_{{ type }}(xdr, &ptr->{{ name }}))
+ return false;
diff --git a/tools/net/sunrpc/xdrgen/templates/C/struct/decoder/close.j2 b/tools/net/sunrpc/xdrgen/templates/C/struct/decoder/close.j2
new file mode 100644
index 000000000000..5bf010665f84
--- /dev/null
+++ b/tools/net/sunrpc/xdrgen/templates/C/struct/decoder/close.j2
@@ -0,0 +1,3 @@
+{# SPDX-License-Identifier: GPL-2.0 #}
+ return true;
+};
diff --git a/tools/net/sunrpc/xdrgen/templates/C/struct/decoder/fixed_length_array.j2 b/tools/net/sunrpc/xdrgen/templates/C/struct/decoder/fixed_length_array.j2
new file mode 100644
index 000000000000..cfd64217ad82
--- /dev/null
+++ b/tools/net/sunrpc/xdrgen/templates/C/struct/decoder/fixed_length_array.j2
@@ -0,0 +1,8 @@
+{# SPDX-License-Identifier: GPL-2.0 #}
+{% if annotate %}
+ /* member {{ name }} (fixed-length array) */
+{% endif %}
+ for (u32 i = 0; i < {{ size }}; i++) {
+ if (xdrgen_decode_{{ type }}(xdr, &ptr->{{ name }}.items[i]) < 0)
+ return false;
+ }
diff --git a/tools/net/sunrpc/xdrgen/templates/C/struct/decoder/fixed_length_opaque.j2 b/tools/net/sunrpc/xdrgen/templates/C/struct/decoder/fixed_length_opaque.j2
new file mode 100644
index 000000000000..b4695ece1884
--- /dev/null
+++ b/tools/net/sunrpc/xdrgen/templates/C/struct/decoder/fixed_length_opaque.j2
@@ -0,0 +1,6 @@
+{# SPDX-License-Identifier: GPL-2.0 #}
+{% if annotate %}
+ /* member {{ name }} (fixed-length opaque) */
+{% endif %}
+ if (xdr_stream_decode_opaque_fixed(xdr, ptr->{{ name }}, {{ size }}) < 0)
+ return false;
diff --git a/tools/net/sunrpc/xdrgen/templates/C/struct/decoder/open.j2 b/tools/net/sunrpc/xdrgen/templates/C/struct/decoder/open.j2
new file mode 100644
index 000000000000..289e67259f55
--- /dev/null
+++ b/tools/net/sunrpc/xdrgen/templates/C/struct/decoder/open.j2
@@ -0,0 +1,12 @@
+{# SPDX-License-Identifier: GPL-2.0 #}
+
+{% if annotate %}
+/* struct {{ name }} */
+{% endif %}
+{% if name in public_apis %}
+bool
+{% else %}
+static bool __maybe_unused
+{% endif %}
+xdrgen_decode_{{ name }}(struct xdr_stream *xdr, struct {{ name }} *ptr)
+{
diff --git a/tools/net/sunrpc/xdrgen/templates/C/struct/decoder/optional_data.j2 b/tools/net/sunrpc/xdrgen/templates/C/struct/decoder/optional_data.j2
new file mode 100644
index 000000000000..b6834299a04b
--- /dev/null
+++ b/tools/net/sunrpc/xdrgen/templates/C/struct/decoder/optional_data.j2
@@ -0,0 +1,6 @@
+{# SPDX-License-Identifier: GPL-2.0 #}
+{% if annotate %}
+ /* member {{ name }} (optional data) */
+{% endif %}
+ if (!xdrgen_decode_{{ type }}(xdr, ptr->{{ name }}))
+ return false;
diff --git a/tools/net/sunrpc/xdrgen/templates/C/struct/decoder/variable_length_array.j2 b/tools/net/sunrpc/xdrgen/templates/C/struct/decoder/variable_length_array.j2
new file mode 100644
index 000000000000..2f943909cdf7
--- /dev/null
+++ b/tools/net/sunrpc/xdrgen/templates/C/struct/decoder/variable_length_array.j2
@@ -0,0 +1,13 @@
+{# SPDX-License-Identifier: GPL-2.0 #}
+{% if annotate %}
+ /* member {{ name }} (variable-length array) */
+{% endif %}
+ if (xdr_stream_decode_u32(xdr, &ptr->{{ name }}.count) < 0)
+ return false;
+{% if maxsize != "0" %}
+ if (ptr->{{ name }}.count > {{ maxsize }})
+ return false;
+{% endif %}
+ for (u32 i = 0; i < ptr->{{ name }}.count; i++)
+ if (!xdrgen_decode_{{ type }}(xdr, &ptr->{{ name }}.element[i]))
+ return false;
diff --git a/tools/net/sunrpc/xdrgen/templates/C/struct/decoder/variable_length_opaque.j2 b/tools/net/sunrpc/xdrgen/templates/C/struct/decoder/variable_length_opaque.j2
new file mode 100644
index 000000000000..9a814de54ae8
--- /dev/null
+++ b/tools/net/sunrpc/xdrgen/templates/C/struct/decoder/variable_length_opaque.j2
@@ -0,0 +1,6 @@
+{# SPDX-License-Identifier: GPL-2.0 #}
+{% if annotate %}
+ /* member {{ name }} (variable-length opaque) */
+{% endif %}
+ if (!xdrgen_decode_opaque(xdr, (opaque *)ptr, {{ maxsize }}))
+ return false;
diff --git a/tools/net/sunrpc/xdrgen/templates/C/struct/decoder/variable_length_string.j2 b/tools/net/sunrpc/xdrgen/templates/C/struct/decoder/variable_length_string.j2
new file mode 100644
index 000000000000..12d20b143b43
--- /dev/null
+++ b/tools/net/sunrpc/xdrgen/templates/C/struct/decoder/variable_length_string.j2
@@ -0,0 +1,6 @@
+{# SPDX-License-Identifier: GPL-2.0 #}
+{% if annotate %}
+ /* member {{ name }} (variable-length string) */
+{% endif %}
+ if (!xdrgen_decode_string(xdr, (string *)ptr, {{ maxsize }}))
+ return false;
diff --git a/tools/net/sunrpc/xdrgen/templates/C/struct/definition/basic.j2 b/tools/net/sunrpc/xdrgen/templates/C/struct/definition/basic.j2
new file mode 100644
index 000000000000..b3430895f311
--- /dev/null
+++ b/tools/net/sunrpc/xdrgen/templates/C/struct/definition/basic.j2
@@ -0,0 +1,5 @@
+{# SPDX-License-Identifier: GPL-2.0 #}
+{% if annotate %}
+ /* (basic) */
+{% endif %}
+ {{ classifier }}{{ type }} {{ name }};
diff --git a/tools/net/sunrpc/xdrgen/templates/C/struct/definition/close.j2 b/tools/net/sunrpc/xdrgen/templates/C/struct/definition/close.j2
new file mode 100644
index 000000000000..9e62344a976a
--- /dev/null
+++ b/tools/net/sunrpc/xdrgen/templates/C/struct/definition/close.j2
@@ -0,0 +1,2 @@
+{# SPDX-License-Identifier: GPL-2.0 #}
+};
diff --git a/tools/net/sunrpc/xdrgen/templates/C/struct/definition/fixed_length_array.j2 b/tools/net/sunrpc/xdrgen/templates/C/struct/definition/fixed_length_array.j2
new file mode 100644
index 000000000000..66be836826a0
--- /dev/null
+++ b/tools/net/sunrpc/xdrgen/templates/C/struct/definition/fixed_length_array.j2
@@ -0,0 +1,5 @@
+{# SPDX-License-Identifier: GPL-2.0 #}
+{% if annotate %}
+ /* (fixed-length array) */
+{% endif %}
+ {{ type }} {{ name }}[{{ size }}];
diff --git a/tools/net/sunrpc/xdrgen/templates/C/struct/definition/fixed_length_opaque.j2 b/tools/net/sunrpc/xdrgen/templates/C/struct/definition/fixed_length_opaque.j2
new file mode 100644
index 000000000000..0daba19aa0f0
--- /dev/null
+++ b/tools/net/sunrpc/xdrgen/templates/C/struct/definition/fixed_length_opaque.j2
@@ -0,0 +1,5 @@
+{# SPDX-License-Identifier: GPL-2.0 #}
+{% if annotate %}
+ /* (fixed-length opaque) */
+{% endif %}
+ u8 {{ name }}[{{ size }}];
diff --git a/tools/net/sunrpc/xdrgen/templates/C/struct/definition/open.j2 b/tools/net/sunrpc/xdrgen/templates/C/struct/definition/open.j2
new file mode 100644
index 000000000000..07cbf5424546
--- /dev/null
+++ b/tools/net/sunrpc/xdrgen/templates/C/struct/definition/open.j2
@@ -0,0 +1,6 @@
+{# SPDX-License-Identifier: GPL-2.0 #}
+
+{% if annotate %}
+/* struct {{ name }} */
+{% endif %}
+struct {{ name }} {
diff --git a/tools/net/sunrpc/xdrgen/templates/C/struct/definition/optional_data.j2 b/tools/net/sunrpc/xdrgen/templates/C/struct/definition/optional_data.j2
new file mode 100644
index 000000000000..a33341f45e8f
--- /dev/null
+++ b/tools/net/sunrpc/xdrgen/templates/C/struct/definition/optional_data.j2
@@ -0,0 +1,5 @@
+{# SPDX-License-Identifier: GPL-2.0 #}
+{% if annotate %}
+ /* (optional data) */
+{% endif %}
+ {{ classifier }}{{ type }} *{{ name }};
diff --git a/tools/net/sunrpc/xdrgen/templates/C/struct/definition/variable_length_array.j2 b/tools/net/sunrpc/xdrgen/templates/C/struct/definition/variable_length_array.j2
new file mode 100644
index 000000000000..5d767f9b3674
--- /dev/null
+++ b/tools/net/sunrpc/xdrgen/templates/C/struct/definition/variable_length_array.j2
@@ -0,0 +1,8 @@
+{# SPDX-License-Identifier: GPL-2.0 #}
+{% if annotate %}
+ /* (variable-length array) */
+{% endif %}
+ struct {
+ u32 count;
+ {{ classifier }}{{ type }} *element;
+ } {{ name }};
diff --git a/tools/net/sunrpc/xdrgen/templates/C/struct/definition/variable_length_opaque.j2 b/tools/net/sunrpc/xdrgen/templates/C/struct/definition/variable_length_opaque.j2
new file mode 100644
index 000000000000..4d0cd84be3db
--- /dev/null
+++ b/tools/net/sunrpc/xdrgen/templates/C/struct/definition/variable_length_opaque.j2
@@ -0,0 +1,5 @@
+{# SPDX-License-Identifier: GPL-2.0 #}
+{% if annotate %}
+ /* (variable-length opaque) */
+{% endif %}
+ opaque {{ name }};
diff --git a/tools/net/sunrpc/xdrgen/templates/C/struct/definition/variable_length_string.j2 b/tools/net/sunrpc/xdrgen/templates/C/struct/definition/variable_length_string.j2
new file mode 100644
index 000000000000..2de2feec77db
--- /dev/null
+++ b/tools/net/sunrpc/xdrgen/templates/C/struct/definition/variable_length_string.j2
@@ -0,0 +1,5 @@
+{# SPDX-License-Identifier: GPL-2.0 #}
+{% if annotate %}
+ /* (variable-length string) */
+{% endif %}
+ string {{ name }};
diff --git a/tools/net/sunrpc/xdrgen/templates/C/struct/encoder/basic.j2 b/tools/net/sunrpc/xdrgen/templates/C/struct/encoder/basic.j2
new file mode 100644
index 000000000000..a7d3695c5a6a
--- /dev/null
+++ b/tools/net/sunrpc/xdrgen/templates/C/struct/encoder/basic.j2
@@ -0,0 +1,10 @@
+{# SPDX-License-Identifier: GPL-2.0 #}
+{% if annotate %}
+ /* member {{ name }} (basic) */
+{% endif %}
+{% if type in pass_by_reference %}
+ if (!xdrgen_encode_{{ type }}(xdr, &value->{{ name }}))
+{% else %}
+ if (!xdrgen_encode_{{ type }}(xdr, value->{{ name }}))
+{% endif %}
+ return false;
diff --git a/tools/net/sunrpc/xdrgen/templates/C/struct/encoder/close.j2 b/tools/net/sunrpc/xdrgen/templates/C/struct/encoder/close.j2
new file mode 100644
index 000000000000..5bf010665f84
--- /dev/null
+++ b/tools/net/sunrpc/xdrgen/templates/C/struct/encoder/close.j2
@@ -0,0 +1,3 @@
+{# SPDX-License-Identifier: GPL-2.0 #}
+ return true;
+};
diff --git a/tools/net/sunrpc/xdrgen/templates/C/struct/encoder/fixed_length_array.j2 b/tools/net/sunrpc/xdrgen/templates/C/struct/encoder/fixed_length_array.j2
new file mode 100644
index 000000000000..b01833a2c7a1
--- /dev/null
+++ b/tools/net/sunrpc/xdrgen/templates/C/struct/encoder/fixed_length_array.j2
@@ -0,0 +1,12 @@
+{# SPDX-License-Identifier: GPL-2.0 #}
+{% if annotate %}
+ /* member {{ name }} (fixed-length array) */
+{% endif %}
+ for (u32 i = 0; i < {{ size }}; i++) {
+{% if type in pass_by_reference %}
+ if (xdrgen_encode_{{ type }}(xdr, &value->items[i]) < 0)
+{% else %}
+ if (xdrgen_encode_{{ type }}(xdr, value->items[i]) < 0)
+{% endif %}
+ return false;
+ }
diff --git a/tools/net/sunrpc/xdrgen/templates/C/struct/encoder/fixed_length_opaque.j2 b/tools/net/sunrpc/xdrgen/templates/C/struct/encoder/fixed_length_opaque.j2
new file mode 100644
index 000000000000..07bc91919898
--- /dev/null
+++ b/tools/net/sunrpc/xdrgen/templates/C/struct/encoder/fixed_length_opaque.j2
@@ -0,0 +1,6 @@
+{# SPDX-License-Identifier: GPL-2.0 #}
+{% if annotate %}
+ /* member {{ name }} (fixed-length opaque) */
+{% endif %}
+ if (xdr_stream_encode_opaque_fixed(xdr, value->{{ name }}, {{ size }}) < 0)
+ return false;
diff --git a/tools/net/sunrpc/xdrgen/templates/C/struct/encoder/open.j2 b/tools/net/sunrpc/xdrgen/templates/C/struct/encoder/open.j2
new file mode 100644
index 000000000000..2286a3adf82a
--- /dev/null
+++ b/tools/net/sunrpc/xdrgen/templates/C/struct/encoder/open.j2
@@ -0,0 +1,12 @@
+{# SPDX-License-Identifier: GPL-2.0 #}
+
+{% if annotate %}
+/* struct {{ name }} */
+{% endif %}
+{% if name in public_apis %}
+bool
+{% else %}
+static bool __maybe_unused
+{% endif %}
+xdrgen_encode_{{ name }}(struct xdr_stream *xdr, const struct {{ name }} *value)
+{
diff --git a/tools/net/sunrpc/xdrgen/templates/C/struct/encoder/optional_data.j2 b/tools/net/sunrpc/xdrgen/templates/C/struct/encoder/optional_data.j2
new file mode 100644
index 000000000000..16fb3e09bba1
--- /dev/null
+++ b/tools/net/sunrpc/xdrgen/templates/C/struct/encoder/optional_data.j2
@@ -0,0 +1,6 @@
+{# SPDX-License-Identifier: GPL-2.0 #}
+{% if annotate %}
+ /* member {{ name }} (optional data) */
+{% endif %}
+ if (!xdrgen_encode_{{ type }}(xdr, value->{{ name }}))
+ return false;
diff --git a/tools/net/sunrpc/xdrgen/templates/C/struct/encoder/variable_length_array.j2 b/tools/net/sunrpc/xdrgen/templates/C/struct/encoder/variable_length_array.j2
new file mode 100644
index 000000000000..0ec8660d621a
--- /dev/null
+++ b/tools/net/sunrpc/xdrgen/templates/C/struct/encoder/variable_length_array.j2
@@ -0,0 +1,15 @@
+{# SPDX-License-Identifier: GPL-2.0 #}
+{% if annotate %}
+ /* member {{ name }} (variable-length array) */
+{% endif %}
+ if (value->{{ name }}.count > {{ maxsize }})
+ return false;
+ if (xdr_stream_encode_u32(xdr, value->{{ name }}.count) != XDR_UNIT)
+ return false;
+ for (u32 i = 0; i < value->{{ name }}.count; i++)
+{% if type in pass_by_reference %}
+ if (!xdrgen_encode_{{ type }}(xdr, &value->{{ name }}.element[i]))
+{% else %}
+ if (!xdrgen_encode_{{ type }}(xdr, value->{{ name }}.element[i]))
+{% endif %}
+ return false;
diff --git a/tools/net/sunrpc/xdrgen/templates/C/struct/encoder/variable_length_opaque.j2 b/tools/net/sunrpc/xdrgen/templates/C/struct/encoder/variable_length_opaque.j2
new file mode 100644
index 000000000000..1d477c2d197a
--- /dev/null
+++ b/tools/net/sunrpc/xdrgen/templates/C/struct/encoder/variable_length_opaque.j2
@@ -0,0 +1,8 @@
+{# SPDX-License-Identifier: GPL-2.0 #}
+{% if annotate %}
+ /* member {{ name }} (variable-length opaque) */
+{% endif %}
+ if (value->{{ name }}.len > {{ maxsize }})
+ return false;
+ if (xdr_stream_encode_opaque(xdr, value->{{ name }}.data, value->{{ name }}.len) < 0)
+ return false;
diff --git a/tools/net/sunrpc/xdrgen/templates/C/struct/encoder/variable_length_string.j2 b/tools/net/sunrpc/xdrgen/templates/C/struct/encoder/variable_length_string.j2
new file mode 100644
index 000000000000..cf65b71eaef3
--- /dev/null
+++ b/tools/net/sunrpc/xdrgen/templates/C/struct/encoder/variable_length_string.j2
@@ -0,0 +1,8 @@
+{# SPDX-License-Identifier: GPL-2.0 #}
+{% if annotate %}
+ /* member {{ name }} (variable-length string) */
+{% endif %}
+ if (value->{{ name }}.len > {{ maxsize }})
+ return false;
+ if (xdr_stream_encode_opaque(xdr, value->{{ name }}.data, value->{{ name }}.len) < 0)
+ return false;
diff --git a/tools/net/sunrpc/xdrgen/templates/C/typedef/declaration/basic.j2 b/tools/net/sunrpc/xdrgen/templates/C/typedef/declaration/basic.j2
new file mode 100644
index 000000000000..455b10bd90ec
--- /dev/null
+++ b/tools/net/sunrpc/xdrgen/templates/C/typedef/declaration/basic.j2
@@ -0,0 +1,8 @@
+{# SPDX-License-Identifier: GPL-2.0 #}
+
+bool xdrgen_decode_{{ name }}(struct xdr_stream *xdr, {{ name }} *ptr);
+{% if name in pass_by_reference %}
+bool xdrgen_encode_{{ name }}(struct xdr_stream *xdr, const {{ name }} *value);
+{%- else -%}
+bool xdrgen_encode_{{ name }}(struct xdr_stream *xdr, const {{ name }} value);
+{% endif %}
diff --git a/tools/net/sunrpc/xdrgen/templates/C/typedef/declaration/fixed_length_array.j2 b/tools/net/sunrpc/xdrgen/templates/C/typedef/declaration/fixed_length_array.j2
new file mode 100644
index 000000000000..3fe3ddd9f359
--- /dev/null
+++ b/tools/net/sunrpc/xdrgen/templates/C/typedef/declaration/fixed_length_array.j2
@@ -0,0 +1,4 @@
+{# SPDX-License-Identifier: GPL-2.0 #}
+
+bool xdrgen_decode_{{ name }}(struct xdr_stream *xdr, {{ classifier }}{{ name }} *ptr);
+bool xdrgen_encode_{{ name }}(struct xdr_stream *xdr, const {{ classifier }}{{ name }} value);
diff --git a/tools/net/sunrpc/xdrgen/templates/C/typedef/declaration/fixed_length_opaque.j2 b/tools/net/sunrpc/xdrgen/templates/C/typedef/declaration/fixed_length_opaque.j2
new file mode 100644
index 000000000000..3fe3ddd9f359
--- /dev/null
+++ b/tools/net/sunrpc/xdrgen/templates/C/typedef/declaration/fixed_length_opaque.j2
@@ -0,0 +1,4 @@
+{# SPDX-License-Identifier: GPL-2.0 #}
+
+bool xdrgen_decode_{{ name }}(struct xdr_stream *xdr, {{ classifier }}{{ name }} *ptr);
+bool xdrgen_encode_{{ name }}(struct xdr_stream *xdr, const {{ classifier }}{{ name }} value);
diff --git a/tools/net/sunrpc/xdrgen/templates/C/typedef/declaration/variable_length_array.j2 b/tools/net/sunrpc/xdrgen/templates/C/typedef/declaration/variable_length_array.j2
new file mode 100644
index 000000000000..3fe3ddd9f359
--- /dev/null
+++ b/tools/net/sunrpc/xdrgen/templates/C/typedef/declaration/variable_length_array.j2
@@ -0,0 +1,4 @@
+{# SPDX-License-Identifier: GPL-2.0 #}
+
+bool xdrgen_decode_{{ name }}(struct xdr_stream *xdr, {{ classifier }}{{ name }} *ptr);
+bool xdrgen_encode_{{ name }}(struct xdr_stream *xdr, const {{ classifier }}{{ name }} value);
diff --git a/tools/net/sunrpc/xdrgen/templates/C/typedef/declaration/variable_length_opaque.j2 b/tools/net/sunrpc/xdrgen/templates/C/typedef/declaration/variable_length_opaque.j2
new file mode 100644
index 000000000000..3fe3ddd9f359
--- /dev/null
+++ b/tools/net/sunrpc/xdrgen/templates/C/typedef/declaration/variable_length_opaque.j2
@@ -0,0 +1,4 @@
+{# SPDX-License-Identifier: GPL-2.0 #}
+
+bool xdrgen_decode_{{ name }}(struct xdr_stream *xdr, {{ classifier }}{{ name }} *ptr);
+bool xdrgen_encode_{{ name }}(struct xdr_stream *xdr, const {{ classifier }}{{ name }} value);
diff --git a/tools/net/sunrpc/xdrgen/templates/C/typedef/declaration/variable_length_string.j2 b/tools/net/sunrpc/xdrgen/templates/C/typedef/declaration/variable_length_string.j2
new file mode 100644
index 000000000000..3fe3ddd9f359
--- /dev/null
+++ b/tools/net/sunrpc/xdrgen/templates/C/typedef/declaration/variable_length_string.j2
@@ -0,0 +1,4 @@
+{# SPDX-License-Identifier: GPL-2.0 #}
+
+bool xdrgen_decode_{{ name }}(struct xdr_stream *xdr, {{ classifier }}{{ name }} *ptr);
+bool xdrgen_encode_{{ name }}(struct xdr_stream *xdr, const {{ classifier }}{{ name }} value);
diff --git a/tools/net/sunrpc/xdrgen/templates/C/typedef/decoder/basic.j2 b/tools/net/sunrpc/xdrgen/templates/C/typedef/decoder/basic.j2
new file mode 100644
index 000000000000..da4709403dc9
--- /dev/null
+++ b/tools/net/sunrpc/xdrgen/templates/C/typedef/decoder/basic.j2
@@ -0,0 +1,17 @@
+{# SPDX-License-Identifier: GPL-2.0 #}
+
+{% if annotate %}
+/* typedef {{ name }} */
+{% endif %}
+{% if name in public_apis %}
+bool
+{% else %}
+static bool __maybe_unused
+{% endif %}
+xdrgen_decode_{{ name }}(struct xdr_stream *xdr, {{ name }} *ptr)
+{
+{% if annotate %}
+ /* (basic) */
+{% endif %}
+ return xdrgen_decode_{{ type }}(xdr, ptr);
+};
diff --git a/tools/net/sunrpc/xdrgen/templates/C/typedef/decoder/fixed_length_array.j2 b/tools/net/sunrpc/xdrgen/templates/C/typedef/decoder/fixed_length_array.j2
new file mode 100644
index 000000000000..d7c80e472fe3
--- /dev/null
+++ b/tools/net/sunrpc/xdrgen/templates/C/typedef/decoder/fixed_length_array.j2
@@ -0,0 +1,25 @@
+{# SPDX-License-Identifier: GPL-2.0 #}
+
+{% if annotate %}
+/* typedef {{ name }} */
+{% endif %}
+{% if name in public_apis %}
+bool
+{% else %}
+static bool __maybe_unused
+{% endif %}
+xdrgen_decode_{{ name }}(struct xdr_stream *xdr, {{ classifier }}{{ name }} *ptr)
+{
+{% if annotate %}
+ /* (fixed-length array) */
+{% endif %}
+ for (u32 i = 0; i < {{ size }}; i++) {
+{%- if classifier == '' %}
+ if (xdrgen_decode_{{ type }}(xdr, ptr->items[i]) < 0)
+{% else %}
+ if (xdrgen_decode_{{ type }}(xdr, &ptr->items[i]) < 0)
+{% endif %}
+ return false;
+ }
+ return true;
+};
diff --git a/tools/net/sunrpc/xdrgen/templates/C/typedef/decoder/fixed_length_opaque.j2 b/tools/net/sunrpc/xdrgen/templates/C/typedef/decoder/fixed_length_opaque.j2
new file mode 100644
index 000000000000..8b4ff08c49e5
--- /dev/null
+++ b/tools/net/sunrpc/xdrgen/templates/C/typedef/decoder/fixed_length_opaque.j2
@@ -0,0 +1,17 @@
+{# SPDX-License-Identifier: GPL-2.0 #}
+
+{% if annotate %}
+/* typedef {{ name }} */
+{% endif %}
+{% if name in public_apis %}
+bool
+{% else %}
+static bool __maybe_unused
+{% endif %}
+xdrgen_decode_{{ name }}(struct xdr_stream *xdr, {{ classifier }}{{ name }} *ptr)
+{
+{% if annotate %}
+ /* (fixed-length opaque) */
+{% endif %}
+ return xdr_stream_decode_opaque_fixed(xdr, ptr, {{ size }}) >= 0;
+};
diff --git a/tools/net/sunrpc/xdrgen/templates/C/typedef/decoder/variable_length_array.j2 b/tools/net/sunrpc/xdrgen/templates/C/typedef/decoder/variable_length_array.j2
new file mode 100644
index 000000000000..e74ffdd98463
--- /dev/null
+++ b/tools/net/sunrpc/xdrgen/templates/C/typedef/decoder/variable_length_array.j2
@@ -0,0 +1,26 @@
+{# SPDX-License-Identifier: GPL-2.0 #}
+
+{% if annotate %}
+/* typedef {{ name }} */
+{% endif %}
+{% if name in public_apis %}
+bool
+{% else %}
+static bool __maybe_unused
+{% endif %}
+xdrgen_decode_{{ name }}(struct xdr_stream *xdr, {{ classifier }}{{ name }} *ptr)
+{
+{% if annotate %}
+ /* (variable-length array) */
+{% endif %}
+ if (xdr_stream_decode_u32(xdr, &ptr->count) < 0)
+ return false;
+{% if maxsize != "0" %}
+ if (ptr->count > {{ maxsize }})
+ return false;
+{% endif %}
+ for (u32 i = 0; i < ptr->count; i++)
+ if (!xdrgen_decode_{{ type }}(xdr, &ptr->element[i]))
+ return false;
+ return true;
+};
diff --git a/tools/net/sunrpc/xdrgen/templates/C/typedef/decoder/variable_length_opaque.j2 b/tools/net/sunrpc/xdrgen/templates/C/typedef/decoder/variable_length_opaque.j2
new file mode 100644
index 000000000000..f28f8b228ad5
--- /dev/null
+++ b/tools/net/sunrpc/xdrgen/templates/C/typedef/decoder/variable_length_opaque.j2
@@ -0,0 +1,17 @@
+{# SPDX-License-Identifier: GPL-2.0 #}
+
+{% if annotate %}
+/* typedef {{ name }} */
+{% endif %}
+{% if name in public_apis %}
+bool
+{% else %}
+static bool __maybe_unused
+{% endif %}
+xdrgen_decode_{{ name }}(struct xdr_stream *xdr, {{ classifier }}{{ name }} *ptr)
+{
+{% if annotate %}
+ /* (variable-length opaque) */
+{% endif %}
+ return xdrgen_decode_opaque(xdr, ptr, {{ maxsize }});
+};
diff --git a/tools/net/sunrpc/xdrgen/templates/C/typedef/decoder/variable_length_string.j2 b/tools/net/sunrpc/xdrgen/templates/C/typedef/decoder/variable_length_string.j2
new file mode 100644
index 000000000000..56c5a17d6a70
--- /dev/null
+++ b/tools/net/sunrpc/xdrgen/templates/C/typedef/decoder/variable_length_string.j2
@@ -0,0 +1,17 @@
+{# SPDX-License-Identifier: GPL-2.0 #}
+
+{% if annotate %}
+/* typedef {{ name }} */
+{% endif %}
+{% if name in public_apis %}
+bool
+{% else %}
+static bool __maybe_unused
+{% endif %}
+xdrgen_decode_{{ name }}(struct xdr_stream *xdr, {{ classifier }}{{ name }} *ptr)
+{
+{% if annotate %}
+ /* (variable-length string) */
+{% endif %}
+ return xdrgen_decode_string(xdr, ptr, {{ maxsize }});
+};
diff --git a/tools/net/sunrpc/xdrgen/templates/C/typedef/definition/basic.j2 b/tools/net/sunrpc/xdrgen/templates/C/typedef/definition/basic.j2
new file mode 100644
index 000000000000..1c5f28135eec
--- /dev/null
+++ b/tools/net/sunrpc/xdrgen/templates/C/typedef/definition/basic.j2
@@ -0,0 +1,6 @@
+{# SPDX-License-Identifier: GPL-2.0 #}
+
+{% if annotate %}
+/* typedef {{ name }} (basic) */
+{% endif %}
+typedef {{ classifier }}{{ type }} {{ name }};
diff --git a/tools/net/sunrpc/xdrgen/templates/C/typedef/definition/fixed_length_array.j2 b/tools/net/sunrpc/xdrgen/templates/C/typedef/definition/fixed_length_array.j2
new file mode 100644
index 000000000000..c3a67c952e77
--- /dev/null
+++ b/tools/net/sunrpc/xdrgen/templates/C/typedef/definition/fixed_length_array.j2
@@ -0,0 +1,6 @@
+{# SPDX-License-Identifier: GPL-2.0 #}
+
+{% if annotate %}
+/* typedef {{ name }} (fixed-length array) */
+{% endif %}
+typedef {{ type }}{{ name }}[{{ size }}];
diff --git a/tools/net/sunrpc/xdrgen/templates/C/typedef/definition/fixed_length_opaque.j2 b/tools/net/sunrpc/xdrgen/templates/C/typedef/definition/fixed_length_opaque.j2
new file mode 100644
index 000000000000..8788b02fe4f5
--- /dev/null
+++ b/tools/net/sunrpc/xdrgen/templates/C/typedef/definition/fixed_length_opaque.j2
@@ -0,0 +1,6 @@
+{# SPDX-License-Identifier: GPL-2.0 #}
+
+{% if annotate %}
+/* typedef {{ name }} (fixed-length opaque) */
+{% endif %}
+typedef u8 {{ name }}[{{ size }}];
diff --git a/tools/net/sunrpc/xdrgen/templates/C/typedef/definition/variable_length_array.j2 b/tools/net/sunrpc/xdrgen/templates/C/typedef/definition/variable_length_array.j2
new file mode 100644
index 000000000000..f03393760545
--- /dev/null
+++ b/tools/net/sunrpc/xdrgen/templates/C/typedef/definition/variable_length_array.j2
@@ -0,0 +1,9 @@
+{# SPDX-License-Identifier: GPL-2.0 #}
+
+{% if annotate %}
+/* typedef {{ name }} (variable-length array) */
+{% endif %}
+typedef struct {
+ u32 count;
+ {{ classifier }}{{ type }} *element;
+} {{ name }};
diff --git a/tools/net/sunrpc/xdrgen/templates/C/typedef/definition/variable_length_opaque.j2 b/tools/net/sunrpc/xdrgen/templates/C/typedef/definition/variable_length_opaque.j2
new file mode 100644
index 000000000000..162f2610af34
--- /dev/null
+++ b/tools/net/sunrpc/xdrgen/templates/C/typedef/definition/variable_length_opaque.j2
@@ -0,0 +1,6 @@
+{# SPDX-License-Identifier: GPL-2.0 #}
+
+{% if annotate %}
+/* typedef {{ name }} (variable-length opaque) */
+{% endif %}
+typedef opaque {{ name }};
diff --git a/tools/net/sunrpc/xdrgen/templates/C/typedef/definition/variable_length_string.j2 b/tools/net/sunrpc/xdrgen/templates/C/typedef/definition/variable_length_string.j2
new file mode 100644
index 000000000000..c03c2df8e625
--- /dev/null
+++ b/tools/net/sunrpc/xdrgen/templates/C/typedef/definition/variable_length_string.j2
@@ -0,0 +1,6 @@
+{# SPDX-License-Identifier: GPL-2.0 #}
+
+{% if annotate %}
+/* typedef {{ name }} (variable-length string) */
+{% endif %}
+typedef string {{ name }};
diff --git a/tools/net/sunrpc/xdrgen/templates/C/typedef/encoder/basic.j2 b/tools/net/sunrpc/xdrgen/templates/C/typedef/encoder/basic.j2
new file mode 100644
index 000000000000..35effe67e4ef
--- /dev/null
+++ b/tools/net/sunrpc/xdrgen/templates/C/typedef/encoder/basic.j2
@@ -0,0 +1,21 @@
+{# SPDX-License-Identifier: GPL-2.0 #}
+
+{% if annotate %}
+/* typedef {{ name }} */
+{% endif %}
+{% if name in public_apis %}
+bool
+{% else %}
+static bool __maybe_unused
+{% endif %}
+{% if name in pass_by_reference %}
+xdrgen_encode_{{ name }}(struct xdr_stream *xdr, const {{ classifier }}{{ name }} *value)
+{% else %}
+xdrgen_encode_{{ name }}(struct xdr_stream *xdr, const {{ classifier }}{{ name }} value)
+{% endif %}
+{
+{% if annotate %}
+ /* (basic) */
+{% endif %}
+ return xdrgen_encode_{{ type }}(xdr, value);
+};
diff --git a/tools/net/sunrpc/xdrgen/templates/C/typedef/encoder/fixed_length_array.j2 b/tools/net/sunrpc/xdrgen/templates/C/typedef/encoder/fixed_length_array.j2
new file mode 100644
index 000000000000..95202ad5ad2d
--- /dev/null
+++ b/tools/net/sunrpc/xdrgen/templates/C/typedef/encoder/fixed_length_array.j2
@@ -0,0 +1,25 @@
+{# SPDX-License-Identifier: GPL-2.0 #}
+
+{% if annotate %}
+/* typedef {{ name }} */
+{% endif %}
+{% if name in public_apis %}
+bool
+{% else %}
+static bool __maybe_unused
+{% endif %}
+xdrgen_encode_{{ name }}(struct xdr_stream *xdr, const {{ classifier }}{{ name }} value)
+{
+{% if annotate %}
+ /* (fixed-length array) */
+{% endif %}
+ for (u32 i = 0; i < {{ size }}; i++) {
+{% if type in pass_by_reference %}
+ if (xdrgen_encode_{{ type }}(xdr, &value->items[i]) < 0)
+{% else %}
+ if (xdrgen_encode_{{ type }}(xdr, value->items[i]) < 0)
+{% endif %}
+ return false;
+ }
+ return true;
+};
diff --git a/tools/net/sunrpc/xdrgen/templates/C/typedef/encoder/fixed_length_opaque.j2 b/tools/net/sunrpc/xdrgen/templates/C/typedef/encoder/fixed_length_opaque.j2
new file mode 100644
index 000000000000..9c66a11b9912
--- /dev/null
+++ b/tools/net/sunrpc/xdrgen/templates/C/typedef/encoder/fixed_length_opaque.j2
@@ -0,0 +1,17 @@
+{# SPDX-License-Identifier: GPL-2.0 #}
+
+{% if annotate %}
+/* typedef {{ name }} */
+{% endif %}
+{% if name in public_apis %}
+bool
+{% else %}
+static bool __maybe_unused
+{% endif %}
+xdrgen_encode_{{ name }}(struct xdr_stream *xdr, const {{ classifier }}{{ name }} value)
+{
+{% if annotate %}
+ /* (fixed-length opaque) */
+{% endif %}
+ return xdr_stream_encode_opaque_fixed(xdr, value, {{ size }}) >= 0;
+};
diff --git a/tools/net/sunrpc/xdrgen/templates/C/typedef/encoder/variable_length_array.j2 b/tools/net/sunrpc/xdrgen/templates/C/typedef/encoder/variable_length_array.j2
new file mode 100644
index 000000000000..2d2384f64918
--- /dev/null
+++ b/tools/net/sunrpc/xdrgen/templates/C/typedef/encoder/variable_length_array.j2
@@ -0,0 +1,30 @@
+{# SPDX-License-Identifier: GPL-2.0 #}
+
+{% if annotate %}
+/* typedef {{ name }} */
+{% endif %}
+{% if name in public_apis %}
+bool
+{% else %}
+static bool __maybe_unused
+{% endif %}
+xdrgen_encode_{{ name }}(struct xdr_stream *xdr, const {{ classifier }}{{ name }} value)
+{
+{% if annotate %}
+ /* (variable-length array) */
+{% endif %}
+{% if maxsize != "0" %}
+ if (unlikely(value.count > {{ maxsize }}))
+ return false;
+{% endif %}
+ if (xdr_stream_encode_u32(xdr, value.count) != XDR_UNIT)
+ return false;
+ for (u32 i = 0; i < value.count; i++)
+{% if type in pass_by_reference %}
+ if (!xdrgen_encode_{{ type }}(xdr, &value.element[i]))
+{% else %}
+ if (!xdrgen_encode_{{ type }}(xdr, value.element[i]))
+{% endif %}
+ return false;
+ return true;
+};
diff --git a/tools/net/sunrpc/xdrgen/templates/C/typedef/encoder/variable_length_opaque.j2 b/tools/net/sunrpc/xdrgen/templates/C/typedef/encoder/variable_length_opaque.j2
new file mode 100644
index 000000000000..8508f13c95b9
--- /dev/null
+++ b/tools/net/sunrpc/xdrgen/templates/C/typedef/encoder/variable_length_opaque.j2
@@ -0,0 +1,17 @@
+{# SPDX-License-Identifier: GPL-2.0 #}
+
+{% if annotate %}
+/* typedef {{ name }} */
+{% endif %}
+{% if name in public_apis %}
+bool
+{% else %}
+static bool __maybe_unused
+{% endif %}
+xdrgen_encode_{{ name }}(struct xdr_stream *xdr, const {{ classifier }}{{ name }} value)
+{
+{% if annotate %}
+ /* (variable-length opaque) */
+{% endif %}
+ return xdr_stream_encode_opaque(xdr, value.data, value.len) >= 0;
+};
diff --git a/tools/net/sunrpc/xdrgen/templates/C/typedef/encoder/variable_length_string.j2 b/tools/net/sunrpc/xdrgen/templates/C/typedef/encoder/variable_length_string.j2
new file mode 100644
index 000000000000..3d490ff180d0
--- /dev/null
+++ b/tools/net/sunrpc/xdrgen/templates/C/typedef/encoder/variable_length_string.j2
@@ -0,0 +1,17 @@
+{# SPDX-License-Identifier: GPL-2.0 #}
+
+{% if annotate %}
+/* typedef {{ name }} */
+{% endif %}
+{% if name in public_apis %}
+bool
+{% else %}
+static bool __maybe_unused
+{% endif %}
+xdrgen_encode_{{ name }}(struct xdr_stream *xdr, const {{ classifier }}{{ name }} value)
+{
+{% if annotate %}
+ /* (variable-length string) */
+{% endif %}
+ return xdr_stream_encode_opaque(xdr, value.data, value.len) >= 0;
+};
diff --git a/tools/net/sunrpc/xdrgen/templates/C/union/decoder/basic.j2 b/tools/net/sunrpc/xdrgen/templates/C/union/decoder/basic.j2
new file mode 100644
index 000000000000..4d97cc5395eb
--- /dev/null
+++ b/tools/net/sunrpc/xdrgen/templates/C/union/decoder/basic.j2
@@ -0,0 +1,6 @@
+{# SPDX-License-Identifier: GPL-2.0 #}
+{% if annotate %}
+ /* member {{ name }} (basic) */
+{% endif %}
+ if (!xdrgen_decode_{{ type }}(xdr, &ptr->u.{{ name }}))
+ return false;
diff --git a/tools/net/sunrpc/xdrgen/templates/C/union/decoder/break.j2 b/tools/net/sunrpc/xdrgen/templates/C/union/decoder/break.j2
new file mode 100644
index 000000000000..b286d1407029
--- /dev/null
+++ b/tools/net/sunrpc/xdrgen/templates/C/union/decoder/break.j2
@@ -0,0 +1,2 @@
+{# SPDX-License-Identifier: GPL-2.0 #}
+ break;
diff --git a/tools/net/sunrpc/xdrgen/templates/C/union/decoder/case_spec.j2 b/tools/net/sunrpc/xdrgen/templates/C/union/decoder/case_spec.j2
new file mode 100644
index 000000000000..5fa2163f0a74
--- /dev/null
+++ b/tools/net/sunrpc/xdrgen/templates/C/union/decoder/case_spec.j2
@@ -0,0 +1,2 @@
+{# SPDX-License-Identifier: GPL-2.0 #}
+ case {{ case }}:
diff --git a/tools/net/sunrpc/xdrgen/templates/C/union/decoder/close.j2 b/tools/net/sunrpc/xdrgen/templates/C/union/decoder/close.j2
new file mode 100644
index 000000000000..fdc2dfd1843b
--- /dev/null
+++ b/tools/net/sunrpc/xdrgen/templates/C/union/decoder/close.j2
@@ -0,0 +1,4 @@
+{# SPDX-License-Identifier: GPL-2.0 #}
+ }
+ return true;
+};
diff --git a/tools/net/sunrpc/xdrgen/templates/C/union/decoder/default_spec.j2 b/tools/net/sunrpc/xdrgen/templates/C/union/decoder/default_spec.j2
new file mode 100644
index 000000000000..044a002d0589
--- /dev/null
+++ b/tools/net/sunrpc/xdrgen/templates/C/union/decoder/default_spec.j2
@@ -0,0 +1,2 @@
+{# SPDX-License-Identifier: GPL-2.0 #}
+ default:
diff --git a/tools/net/sunrpc/xdrgen/templates/C/union/decoder/open.j2 b/tools/net/sunrpc/xdrgen/templates/C/union/decoder/open.j2
new file mode 100644
index 000000000000..eb9941376e49
--- /dev/null
+++ b/tools/net/sunrpc/xdrgen/templates/C/union/decoder/open.j2
@@ -0,0 +1,12 @@
+{# SPDX-License-Identifier: GPL-2.0 #}
+
+{% if annotate %}
+/* union {{ name }} */
+{% endif %}
+{% if name in public_apis %}
+bool
+{% else %}
+static bool __maybe_unused
+{% endif %}
+xdrgen_decode_{{ name }}(struct xdr_stream *xdr, struct {{ name }} *ptr)
+{
diff --git a/tools/net/sunrpc/xdrgen/templates/C/union/decoder/optional_data.j2 b/tools/net/sunrpc/xdrgen/templates/C/union/decoder/optional_data.j2
new file mode 100644
index 000000000000..e4476f5fd8d3
--- /dev/null
+++ b/tools/net/sunrpc/xdrgen/templates/C/union/decoder/optional_data.j2
@@ -0,0 +1,6 @@
+{# SPDX-License-Identifier: GPL-2.0 #}
+{% if annotate %}
+ /* member {{ name }} (optional data) */
+{% endif %}
+ if (!xdrgen_decode_{{ type }}(xdr, &ptr->u.{{ name }}))
+ return false;
diff --git a/tools/net/sunrpc/xdrgen/templates/C/union/decoder/switch_spec.j2 b/tools/net/sunrpc/xdrgen/templates/C/union/decoder/switch_spec.j2
new file mode 100644
index 000000000000..99b3067ef617
--- /dev/null
+++ b/tools/net/sunrpc/xdrgen/templates/C/union/decoder/switch_spec.j2
@@ -0,0 +1,7 @@
+{# SPDX-License-Identifier: GPL-2.0 #}
+{% if annotate %}
+ /* discriminant {{ name }} */
+{% endif %}
+ if (!xdrgen_decode_{{ type }}(xdr, &ptr->{{ name }}))
+ return false;
+ switch (ptr->{{ name }}) {
diff --git a/tools/net/sunrpc/xdrgen/templates/C/union/decoder/variable_length_array.j2 b/tools/net/sunrpc/xdrgen/templates/C/union/decoder/variable_length_array.j2
new file mode 100644
index 000000000000..51ad736d2530
--- /dev/null
+++ b/tools/net/sunrpc/xdrgen/templates/C/union/decoder/variable_length_array.j2
@@ -0,0 +1,13 @@
+{# SPDX-License-Identifier: GPL-2.0 #}
+{% if annotate %}
+ /* member {{ name }} (variable-length array) */
+{% endif %}
+ if (xdr_stream_decode_u32(xdr, &count) < 0)
+ return false;
+ if (count > {{ maxsize }})
+ return false;
+ for (u32 i = 0; i < count; i++) {
+ if (xdrgen_decode_{{ type }}(xdr, &ptr->{{ name }}.items[i]) < 0)
+ return false;
+ }
+ ptr->{{ name }}.len = count;
diff --git a/tools/net/sunrpc/xdrgen/templates/C/union/decoder/variable_length_opaque.j2 b/tools/net/sunrpc/xdrgen/templates/C/union/decoder/variable_length_opaque.j2
new file mode 100644
index 000000000000..c9d88ed29c78
--- /dev/null
+++ b/tools/net/sunrpc/xdrgen/templates/C/union/decoder/variable_length_opaque.j2
@@ -0,0 +1,6 @@
+{# SPDX-License-Identifier: GPL-2.0 #}
+{% if annotate %}
+ /* member {{ name }} (variable-length opaque) */
+{% endif %}
+ if (!xdrgen_decode_opaque(xdr, (struct opaque *)ptr->u.{{ name }}, {{ maxsize }}))
+ return false;
diff --git a/tools/net/sunrpc/xdrgen/templates/C/union/decoder/variable_length_string.j2 b/tools/net/sunrpc/xdrgen/templates/C/union/decoder/variable_length_string.j2
new file mode 100644
index 000000000000..83b6e5a14e7f
--- /dev/null
+++ b/tools/net/sunrpc/xdrgen/templates/C/union/decoder/variable_length_string.j2
@@ -0,0 +1,6 @@
+{# SPDX-License-Identifier: GPL-2.0 #}
+{% if annotate %}
+ /* member {{ name }} (variable-length string) */
+{% endif %}
+ if (!xdrgen_decode_string(xdr, (struct string *)ptr->u.{{ name }}, {{ maxsize }}))
+ return false;
diff --git a/tools/net/sunrpc/xdrgen/templates/C/union/decoder/void.j2 b/tools/net/sunrpc/xdrgen/templates/C/union/decoder/void.j2
new file mode 100644
index 000000000000..65205ce37b36
--- /dev/null
+++ b/tools/net/sunrpc/xdrgen/templates/C/union/decoder/void.j2
@@ -0,0 +1,3 @@
+{# SPDX-License-Identifier: GPL-2.0 #}
+ if (!xdrgen_decode_void(xdr))
+ return false;
diff --git a/tools/net/sunrpc/xdrgen/templates/C/union/definition/case_spec.j2 b/tools/net/sunrpc/xdrgen/templates/C/union/definition/case_spec.j2
new file mode 100644
index 000000000000..52f8d131b805
--- /dev/null
+++ b/tools/net/sunrpc/xdrgen/templates/C/union/definition/case_spec.j2
@@ -0,0 +1,2 @@
+{# SPDX-License-Identifier: GPL-2.0 #}
+ {{ classifier }}{{ type }} {{ name }};
diff --git a/tools/net/sunrpc/xdrgen/templates/C/union/definition/close.j2 b/tools/net/sunrpc/xdrgen/templates/C/union/definition/close.j2
new file mode 100644
index 000000000000..01d716d0099e
--- /dev/null
+++ b/tools/net/sunrpc/xdrgen/templates/C/union/definition/close.j2
@@ -0,0 +1,8 @@
+{# SPDX-License-Identifier: GPL-2.0 #}
+ } u;
+};
+{%- if name in public_apis %}
+
+bool xdrgen_decode_{{ name }}(struct xdr_stream *xdr, struct {{ name }} *ptr);
+bool xdrgen_encode_{{ name }}(struct xdr_stream *xdr, const struct {{ name }} *ptr);
+{%- endif -%}
diff --git a/tools/net/sunrpc/xdrgen/templates/C/union/definition/default_spec.j2 b/tools/net/sunrpc/xdrgen/templates/C/union/definition/default_spec.j2
new file mode 100644
index 000000000000..52f8d131b805
--- /dev/null
+++ b/tools/net/sunrpc/xdrgen/templates/C/union/definition/default_spec.j2
@@ -0,0 +1,2 @@
+{# SPDX-License-Identifier: GPL-2.0 #}
+ {{ classifier }}{{ type }} {{ name }};
diff --git a/tools/net/sunrpc/xdrgen/templates/C/union/definition/open.j2 b/tools/net/sunrpc/xdrgen/templates/C/union/definition/open.j2
new file mode 100644
index 000000000000..20fcfd1fc4e5
--- /dev/null
+++ b/tools/net/sunrpc/xdrgen/templates/C/union/definition/open.j2
@@ -0,0 +1,6 @@
+{# SPDX-License-Identifier: GPL-2.0 #}
+
+{% if annotate %}
+/* union {{ name }} */
+{% endif %}
+struct {{ name }} {
diff --git a/tools/net/sunrpc/xdrgen/templates/C/union/definition/switch_spec.j2 b/tools/net/sunrpc/xdrgen/templates/C/union/definition/switch_spec.j2
new file mode 100644
index 000000000000..3e552732502c
--- /dev/null
+++ b/tools/net/sunrpc/xdrgen/templates/C/union/definition/switch_spec.j2
@@ -0,0 +1,3 @@
+{# SPDX-License-Identifier: GPL-2.0 #}
+ {{ classifier }}{{ type }} {{ name }};
+ union {
diff --git a/tools/net/sunrpc/xdrgen/templates/C/union/encoder/basic.j2 b/tools/net/sunrpc/xdrgen/templates/C/union/encoder/basic.j2
new file mode 100644
index 000000000000..6452d75c6f9a
--- /dev/null
+++ b/tools/net/sunrpc/xdrgen/templates/C/union/encoder/basic.j2
@@ -0,0 +1,10 @@
+{# SPDX-License-Identifier: GPL-2.0 #}
+{% if annotate %}
+ /* member {{ name }} (basic) */
+{% endif %}
+{% if type in pass_by_reference %}
+ if (!xdrgen_encode_{{ type }}(xdr, &ptr->u.{{ name }}))
+{% else %}
+ if (!xdrgen_encode_{{ type }}(xdr, ptr->u.{{ name }}))
+{% endif %}
+ return false;
diff --git a/tools/net/sunrpc/xdrgen/templates/C/union/encoder/break.j2 b/tools/net/sunrpc/xdrgen/templates/C/union/encoder/break.j2
new file mode 100644
index 000000000000..b286d1407029
--- /dev/null
+++ b/tools/net/sunrpc/xdrgen/templates/C/union/encoder/break.j2
@@ -0,0 +1,2 @@
+{# SPDX-License-Identifier: GPL-2.0 #}
+ break;
diff --git a/tools/net/sunrpc/xdrgen/templates/C/union/encoder/case_spec.j2 b/tools/net/sunrpc/xdrgen/templates/C/union/encoder/case_spec.j2
new file mode 100644
index 000000000000..5fa2163f0a74
--- /dev/null
+++ b/tools/net/sunrpc/xdrgen/templates/C/union/encoder/case_spec.j2
@@ -0,0 +1,2 @@
+{# SPDX-License-Identifier: GPL-2.0 #}
+ case {{ case }}:
diff --git a/tools/net/sunrpc/xdrgen/templates/C/union/encoder/close.j2 b/tools/net/sunrpc/xdrgen/templates/C/union/encoder/close.j2
new file mode 100644
index 000000000000..fdc2dfd1843b
--- /dev/null
+++ b/tools/net/sunrpc/xdrgen/templates/C/union/encoder/close.j2
@@ -0,0 +1,4 @@
+{# SPDX-License-Identifier: GPL-2.0 #}
+ }
+ return true;
+};
diff --git a/tools/net/sunrpc/xdrgen/templates/C/union/encoder/default_spec.j2 b/tools/net/sunrpc/xdrgen/templates/C/union/encoder/default_spec.j2
new file mode 100644
index 000000000000..044a002d0589
--- /dev/null
+++ b/tools/net/sunrpc/xdrgen/templates/C/union/encoder/default_spec.j2
@@ -0,0 +1,2 @@
+{# SPDX-License-Identifier: GPL-2.0 #}
+ default:
diff --git a/tools/net/sunrpc/xdrgen/templates/C/union/encoder/open.j2 b/tools/net/sunrpc/xdrgen/templates/C/union/encoder/open.j2
new file mode 100644
index 000000000000..e5a206df10c6
--- /dev/null
+++ b/tools/net/sunrpc/xdrgen/templates/C/union/encoder/open.j2
@@ -0,0 +1,12 @@
+{# SPDX-License-Identifier: GPL-2.0 #}
+
+{% if annotate %}
+/* union {{ name }} */
+{% endif %}
+{% if name in public_apis %}
+bool
+{% else %}
+static bool __maybe_unused
+{% endif %}
+xdrgen_encode_{{ name }}(struct xdr_stream *xdr, const struct {{ name }} *ptr)
+{
diff --git a/tools/net/sunrpc/xdrgen/templates/C/union/encoder/switch_spec.j2 b/tools/net/sunrpc/xdrgen/templates/C/union/encoder/switch_spec.j2
new file mode 100644
index 000000000000..c8c3ecbe038b
--- /dev/null
+++ b/tools/net/sunrpc/xdrgen/templates/C/union/encoder/switch_spec.j2
@@ -0,0 +1,7 @@
+{# SPDX-License-Identifier: GPL-2.0 #}
+{% if annotate %}
+ /* discriminant {{ name }} */
+{% endif %}
+ if (!xdrgen_encode_{{ type }}(xdr, ptr->{{ name }}))
+ return false;
+ switch (ptr->{{ name }}) {
diff --git a/tools/net/sunrpc/xdrgen/templates/C/union/encoder/void.j2 b/tools/net/sunrpc/xdrgen/templates/C/union/encoder/void.j2
new file mode 100644
index 000000000000..84e7c2127d75
--- /dev/null
+++ b/tools/net/sunrpc/xdrgen/templates/C/union/encoder/void.j2
@@ -0,0 +1,3 @@
+{# SPDX-License-Identifier: GPL-2.0 #}
+ if (!xdrgen_encode_void(xdr))
+ return false;
diff --git a/tools/net/sunrpc/xdrgen/tests/test.x b/tools/net/sunrpc/xdrgen/tests/test.x
new file mode 100644
index 000000000000..90c8587f6fe5
--- /dev/null
+++ b/tools/net/sunrpc/xdrgen/tests/test.x
@@ -0,0 +1,36 @@
+/* Sample XDR specification from RFC 1832 Section 5.5 */
+
+const MAXUSERNAME = 32; /* max length of a user name */
+const MAXFILELEN = 65535; /* max length of a file */
+const MAXNAMELEN = 255; /* max length of a file name */
+
+/*
+ * Types of files:
+ */
+enum filekind {
+ TEXT = 0, /* ascii data */
+ DATA = 1, /* raw data */
+ EXEC = 2 /* executable */
+};
+
+/*
+ * File information, per kind of file:
+ */
+union filetype switch (filekind kind) {
+case TEXT:
+ void; /* no extra information */
+case DATA:
+ string creator<MAXNAMELEN>; /* data creator */
+case EXEC:
+ string interpretor<MAXNAMELEN>; /* program interpretor */
+};
+
+/*
+ * A complete file:
+ */
+struct file {
+ string filename<MAXNAMELEN>; /* name of file */
+ filetype type; /* info about file */
+ string owner<MAXUSERNAME>; /* owner of file */
+ opaque data<MAXFILELEN>; /* file data */
+};
diff --git a/tools/net/sunrpc/xdrgen/xdr_ast.py b/tools/net/sunrpc/xdrgen/xdr_ast.py
new file mode 100644
index 000000000000..dbd3fcf9c957
--- /dev/null
+++ b/tools/net/sunrpc/xdrgen/xdr_ast.py
@@ -0,0 +1,510 @@
+#!/usr/bin/env python3
+# ex: set filetype=python:
+
+"""Define and implement the Abstract Syntax Tree for the XDR language."""
+
+import sys
+from typing import List
+from dataclasses import dataclass
+
+from lark import ast_utils, Transformer
+from lark.tree import Meta
+
+this_module = sys.modules[__name__]
+
+excluded_apis = []
+header_name = "none"
+public_apis = []
+enums = set()
+structs = set()
+pass_by_reference = set()
+
+
+@dataclass
+class _XdrAst(ast_utils.Ast):
+ """Base class for the XDR abstract syntax tree"""
+
+
+@dataclass
+class _XdrIdentifier(_XdrAst):
+ """Corresponds to 'identifier' in the XDR language grammar"""
+
+ symbol: str
+
+
+@dataclass
+class _XdrValue(_XdrAst):
+ """Corresponds to 'value' in the XDR language grammar"""
+
+ value: str
+
+
+@dataclass
+class _XdrConstantValue(_XdrAst):
+ """Corresponds to 'constant' in the XDR language grammar"""
+
+ value: int
+
+
+@dataclass
+class _XdrTypeSpecifier(_XdrAst):
+ """Corresponds to 'type_specifier' in the XDR language grammar"""
+
+ type_name: str
+ c_classifier: str
+
+
+@dataclass
+class _XdrDefinedType(_XdrTypeSpecifier):
+ """Corresponds to a type defined by the input specification"""
+
+
+@dataclass
+class _XdrBuiltInType(_XdrTypeSpecifier):
+ """Corresponds to a built-in XDR type"""
+
+
+@dataclass
+class _XdrDeclaration(_XdrAst):
+ """Base class of XDR type declarations"""
+
+
+@dataclass
+class _XdrFixedLengthOpaque(_XdrDeclaration):
+ """A fixed-length opaque declaration"""
+
+ name: str
+ size: str
+ template: str = "fixed_length_opaque"
+
+
+@dataclass
+class _XdrVariableLengthOpaque(_XdrDeclaration):
+ """A variable-length opaque declaration"""
+
+ name: str
+ maxsize: str
+ template: str = "variable_length_opaque"
+
+
+@dataclass
+class _XdrVariableLengthString(_XdrDeclaration):
+ """A (NUL-terminated) variable-length string declaration"""
+
+ name: str
+ maxsize: str
+ template: str = "variable_length_string"
+
+
+@dataclass
+class _XdrFixedLengthArray(_XdrDeclaration):
+ """A fixed-length array declaration"""
+
+ name: str
+ spec: _XdrTypeSpecifier
+ size: str
+ template: str = "fixed_length_array"
+
+
+@dataclass
+class _XdrVariableLengthArray(_XdrDeclaration):
+ """A variable-length array declaration"""
+
+ name: str
+ spec: _XdrTypeSpecifier
+ maxsize: str
+ template: str = "variable_length_array"
+
+
+@dataclass
+class _XdrOptionalData(_XdrDeclaration):
+ """An 'optional_data' declaration"""
+
+ name: str
+ spec: _XdrTypeSpecifier
+ template: str = "optional_data"
+
+
+@dataclass
+class _XdrBasic(_XdrDeclaration):
+ """A 'basic' declaration"""
+
+ name: str
+ spec: _XdrTypeSpecifier
+ template: str = "basic"
+
+
+@dataclass
+class _XdrVoid(_XdrDeclaration):
+ """A void declaration"""
+
+ template: str = "void"
+
+
+@dataclass
+class _XdrConstant(_XdrAst):
+ """Corresponds to 'constant_def' in the grammar"""
+
+ name: str
+ value: str
+
+
+@dataclass
+class _XdrEnumerator(_XdrAst):
+ """An 'identifier = value' enumerator"""
+
+ name: str
+ value: str
+
+
+@dataclass
+class _XdrEnum(_XdrAst):
+ """An XDR enum definition"""
+
+ name: str
+ minimum: int
+ maximum: int
+ enumerators: List[_XdrEnumerator]
+
+
+@dataclass
+class _XdrStruct(_XdrAst):
+ """An XDR struct definition"""
+
+ name: str
+ fields: List[_XdrDeclaration]
+
+
+@dataclass
+class _XdrPointer(_XdrAst):
+ """An XDR pointer definition"""
+
+ name: str
+ fields: List[_XdrDeclaration]
+
+
+@dataclass
+class _XdrTypedef(_XdrAst):
+ """An XDR typedef"""
+
+ declaration: _XdrDeclaration
+
+
+@dataclass
+class _XdrCaseSpec(_XdrAst):
+ """One case in an XDR union"""
+
+ values: List[str]
+ arm: _XdrDeclaration
+ template: str = "case_spec"
+
+
+@dataclass
+class _XdrDefaultSpec(_XdrAst):
+ """Default case in an XDR union"""
+
+ arm: _XdrDeclaration
+ template: str = "default_spec"
+
+
+@dataclass
+class _XdrUnion(_XdrAst):
+ """An XDR union"""
+
+ name: str
+ discriminant: _XdrDeclaration
+ cases: List[_XdrCaseSpec]
+ default: _XdrDeclaration
+
+
+@dataclass
+class _RpcProcedure(_XdrAst):
+ """RPC procedure definition"""
+
+ name: str
+ number: str
+ argument: _XdrTypeSpecifier
+ result: _XdrTypeSpecifier
+
+
+@dataclass
+class _RpcVersion(_XdrAst):
+ """RPC version definition"""
+
+ name: str
+ number: str
+ procedures: List[_RpcProcedure]
+
+
+@dataclass
+class _RpcProgram(_XdrAst):
+ """RPC program definition"""
+
+ name: str
+ number: str
+ versions: List[_RpcVersion]
+
+
+@dataclass
+class _Pragma(_XdrAst):
+ """Empty class for pragma directives"""
+
+
+@dataclass
+class Definition(_XdrAst, ast_utils.WithMeta):
+ """Corresponds to 'definition' in the grammar"""
+
+ meta: Meta
+ value: _XdrAst
+
+
+@dataclass
+class Specification(_XdrAst, ast_utils.AsList):
+ """Corresponds to 'specification' in the grammar"""
+
+ definitions: List[Definition]
+
+
+class ParseToAst(Transformer):
+ """Functions that transform productions into AST nodes"""
+
+ def identifier(self, children):
+ """Instantiate one _XdrIdentifier object"""
+ return _XdrIdentifier(children[0].value)
+
+ def value(self, children):
+ """Instantiate one _XdrValue object"""
+ if isinstance(children[0], _XdrIdentifier):
+ return _XdrValue(children[0].symbol)
+ return _XdrValue(children[0].children[0].value)
+
+ def constant(self, children):
+ """Instantiate one _XdrConstantValue object"""
+ match children[0].data:
+ case "decimal_constant":
+ value = int(children[0].children[0].value, base=10)
+ case "hexadecimal_constant":
+ value = int(children[0].children[0].value, base=16)
+ case "octal_constant":
+ value = int(children[0].children[0].value, base=8)
+ return _XdrConstantValue(value)
+
+ def type_specifier(self, children):
+ """Instantiate one type_specifier object"""
+ c_classifier = ""
+ if isinstance(children[0], _XdrIdentifier):
+ name = children[0].symbol
+ if name in enums:
+ c_classifier = "enum "
+ if name in structs:
+ c_classifier = "struct "
+ return _XdrDefinedType(
+ type_name=name,
+ c_classifier=c_classifier,
+ )
+
+ token = children[0].data
+ return _XdrBuiltInType(
+ type_name=token.value,
+ c_classifier=c_classifier,
+ )
+
+ def constant_def(self, children):
+ """Instantiate one _XdrConstant object"""
+ name = children[0].symbol
+ value = children[1].value
+ return _XdrConstant(name, value)
+
+ # cel: Python can compute a min() and max() for the enumerator values
+ # so that the generated code can perform proper range checking.
+ def enum(self, children):
+ """Instantiate one _XdrEnum object"""
+ enum_name = children[0].symbol
+ enums.add(enum_name)
+
+ i = 0
+ enumerators = []
+ body = children[1]
+ while i < len(body.children):
+ name = body.children[i].symbol
+ value = body.children[i + 1].value
+ enumerators.append(_XdrEnumerator(name, value))
+ i = i + 2
+
+ return _XdrEnum(enum_name, 0, 0, enumerators)
+
+ def fixed_length_opaque(self, children):
+ """Instantiate one _XdrFixedLengthOpaque declaration object"""
+ name = children[0].symbol
+ size = children[1].value
+
+ return _XdrFixedLengthOpaque(name, size)
+
+ def variable_length_opaque(self, children):
+ """Instantiate one _XdrVariableLengthOpaque declaration object"""
+ name = children[0].symbol
+ if children[1] is not None:
+ maxsize = children[1].value
+ else:
+ maxsize = "0"
+
+ return _XdrVariableLengthOpaque(name, maxsize)
+
+ def variable_length_string(self, children):
+ """Instantiate one _XdrVariableLengthString declaration object"""
+ name = children[0].symbol
+ if children[1] is not None:
+ maxsize = children[1].value
+ else:
+ maxsize = "0"
+
+ return _XdrVariableLengthString(name, maxsize)
+
+ def fixed_length_array(self, children):
+ """Instantiate one _XdrFixedLengthArray declaration object"""
+ spec = children[0]
+ name = children[1].symbol
+ size = children[2].value
+
+ return _XdrFixedLengthArray(name, spec, size)
+
+ def variable_length_array(self, children):
+ """Instantiate one _XdrVariableLengthArray declaration object"""
+ spec = children[0]
+ name = children[1].symbol
+ if children[2] is not None:
+ maxsize = children[2].value
+ else:
+ maxsize = "0"
+
+ return _XdrVariableLengthArray(name, spec, maxsize)
+
+ def optional_data(self, children):
+ """Instantiate one _XdrOptionalData declaration object"""
+ spec = children[0]
+ name = children[1].symbol
+ structs.add(name)
+ pass_by_reference.add(name)
+
+ return _XdrOptionalData(name, spec)
+
+ def basic(self, children):
+ """Instantiate one _XdrBasic object"""
+ spec = children[0]
+ name = children[1].symbol
+
+ return _XdrBasic(name, spec)
+
+ def void(self, children):
+ """Instantiate one _XdrVoid declaration object"""
+
+ return _XdrVoid()
+
+ def struct(self, children):
+ """Instantiate one _XdrStruct object"""
+ name = children[0].symbol
+ structs.add(name)
+ pass_by_reference.add(name)
+ fields = children[1].children
+
+ last_field = fields[-1]
+ if (
+ isinstance(last_field, _XdrOptionalData)
+ and name == last_field.spec.type_name
+ ):
+ return _XdrPointer(name, fields)
+
+ return _XdrStruct(name, fields)
+
+ def typedef(self, children):
+ """Instantiate one _XdrTypedef object"""
+ new_type = children[0]
+ if isinstance(new_type, _XdrBasic) and isinstance(
+ new_type.spec, _XdrDefinedType
+ ):
+ if new_type.spec.type_name in pass_by_reference:
+ pass_by_reference.add(new_type.name)
+
+ return _XdrTypedef(new_type)
+
+ def case_spec(self, children):
+ """Instantiate one _XdrCaseSpec object"""
+ values = []
+ for item in children[0:-1]:
+ values.append(item.value)
+ arm = children[-1]
+
+ return _XdrCaseSpec(values, arm)
+
+ def default_spec(self, children):
+ """Instantiate one _XdrDefaultSpec object"""
+ arm = children[0]
+
+ return _XdrDefaultSpec(arm)
+
+ def union(self, children):
+ """Instantiate one _XdrUnion object"""
+ name = children[0].symbol
+ structs.add(name)
+ pass_by_reference.add(name)
+
+ body = children[1]
+ discriminant = body.children[0].children[0]
+ cases = body.children[1:-1]
+ default = body.children[-1]
+
+ return _XdrUnion(name, discriminant, cases, default)
+
+ def procedure_def(self, children):
+ """Instantiate one _RpcProcedure object"""
+ result = children[0]
+ name = children[1].symbol
+ argument = children[2]
+ number = children[3].value
+
+ return _RpcProcedure(name, number, argument, result)
+
+ def version_def(self, children):
+ """Instantiate one _RpcVersion object"""
+ name = children[0].symbol
+ number = children[-1].value
+ procedures = children[1:-1]
+
+ return _RpcVersion(name, number, procedures)
+
+ def program_def(self, children):
+ """Instantiate one _RpcProgram object"""
+ name = children[0].symbol
+ number = children[-1].value
+ versions = children[1:-1]
+
+ return _RpcProgram(name, number, versions)
+
+ def pragma_def(self, children):
+ """Instantiate one _Pragma object"""
+ directive = children[0].children[0].data
+ match directive:
+ case "exclude_directive":
+ excluded_apis.append(children[1].symbol)
+ case "header_directive":
+ global header_name
+ header_name = children[1].symbol
+ case "public_directive":
+ public_apis.append(children[1].symbol)
+ case _:
+ raise NotImplementedError("Directive not supported")
+ return _Pragma()
+
+
+transformer = ast_utils.create_transformer(this_module, ParseToAst())
+
+
+def transform_parse_tree(parse_tree):
+ """Transform productions into an abstract syntax tree"""
+
+ return transformer.transform(parse_tree)
+
+
+def get_header_name() -> str:
+ """Return header name set by pragma header directive"""
+ return header_name
diff --git a/tools/net/sunrpc/xdrgen/xdr_parse.py b/tools/net/sunrpc/xdrgen/xdr_parse.py
new file mode 100644
index 000000000000..964b44e675df
--- /dev/null
+++ b/tools/net/sunrpc/xdrgen/xdr_parse.py
@@ -0,0 +1,36 @@
+#!/usr/bin/env python3
+# ex: set filetype=python:
+
+"""Common parsing code for xdrgen"""
+
+from lark import Lark
+
+
+# Set to True to emit annotation comments in generated source
+annotate = False
+
+
+def set_xdr_annotate(set_it: bool) -> None:
+ """Set 'annotate' if --annotate was specified on the command line"""
+ global annotate
+ annotate = set_it
+
+
+def get_xdr_annotate() -> bool:
+ """Return True if --annotate was specified on the command line"""
+ return annotate
+
+
+def xdr_parser() -> Lark:
+ """Return a Lark parser instance configured with the XDR language grammar"""
+
+ return Lark.open(
+ "grammars/xdr.lark",
+ rel_to=__file__,
+ start="specification",
+ debug=True,
+ strict=True,
+ propagate_positions=True,
+ parser="lalr",
+ lexer="contextual",
+ )
diff --git a/tools/net/sunrpc/xdrgen/xdrgen b/tools/net/sunrpc/xdrgen/xdrgen
new file mode 100755
index 000000000000..95f303b2861b
--- /dev/null
+++ b/tools/net/sunrpc/xdrgen/xdrgen
@@ -0,0 +1,132 @@
+#!/usr/bin/env python3
+# ex: set filetype=python:
+
+"""Translate an XDR specification into executable code that
+can be compiled for the Linux kernel."""
+
+__author__ = "Chuck Lever"
+__copyright__ = "Copyright (c) 2024 Oracle and/or its affiliates."
+__license__ = "GPL-2.0 only"
+__version__ = "0.2"
+
+import sys
+import argparse
+
+from subcmds import definitions
+from subcmds import declarations
+from subcmds import lint
+from subcmds import source
+
+
+sys.path.insert(1, "@pythondir@")
+
+
+def main() -> int:
+ """Parse command-line options"""
+ parser = argparse.ArgumentParser(
+ formatter_class=argparse.RawDescriptionHelpFormatter,
+ description="Convert an XDR specification to Linux kernel source code",
+ epilog="""\
+Copyright (c) 2024 Oracle and/or its affiliates.
+
+License GPLv2: <http://www.gnu.org/licenses/old-licenses/gpl-2.0.txt>
+This is free software. You are free to change and redistribute it.
+There is NO WARRANTY, to the extent permitted by law.""",
+ )
+ parser.add_argument(
+ "--version",
+ help="Display the version of this tool",
+ action="version",
+ version=__version__,
+ )
+
+ subcommands = parser.add_subparsers(title="Subcommands", required=True)
+
+ definitions_parser = subcommands.add_parser(
+ "definitions", help="Generate XDR definitions"
+ )
+ definitions_parser.add_argument(
+ "--annotate",
+ action="store_true",
+ default=False,
+ help="Add annotation comments",
+ )
+ definitions_parser.add_argument(
+ "--language",
+ action="store_true",
+ default="C",
+ help="Output language",
+ )
+ definitions_parser.add_argument(
+ "--peer",
+ choices=["server", "client",],
+ default="server",
+ help="Generate header code for client or server side",
+ type=str,
+ )
+ definitions_parser.add_argument("filename", help="File containing an XDR specification")
+ definitions_parser.set_defaults(func=definitions.subcmd)
+
+ declarations_parser = subcommands.add_parser(
+ "declarations", help="Generate function declarations"
+ )
+ declarations_parser.add_argument(
+ "--annotate",
+ action="store_true",
+ default=False,
+ help="Add annotation comments",
+ )
+ declarations_parser.add_argument(
+ "--language",
+ action="store_true",
+ default="C",
+ help="Output language",
+ )
+ declarations_parser.add_argument(
+ "--peer",
+ choices=["server", "client",],
+ default="server",
+ help="Generate code for client or server side",
+ type=str,
+ )
+ declarations_parser.add_argument("filename", help="File containing an XDR specification")
+ declarations_parser.set_defaults(func=declarations.subcmd)
+
+ linter_parser = subcommands.add_parser("lint", help="Check an XDR specification")
+ linter_parser.add_argument("filename", help="File containing an XDR specification")
+ linter_parser.set_defaults(func=lint.subcmd)
+
+ source_parser = subcommands.add_parser(
+ "source", help="Generate XDR encoder and decoder source code"
+ )
+ source_parser.add_argument(
+ "--annotate",
+ action="store_true",
+ default=False,
+ help="Add annotation comments",
+ )
+ source_parser.add_argument(
+ "--language",
+ action="store_true",
+ default="C",
+ help="Output language",
+ )
+ source_parser.add_argument(
+ "--peer",
+ choices=["server", "client",],
+ default="server",
+ help="Generate code for client or server side",
+ type=str,
+ )
+ source_parser.add_argument("filename", help="File containing an XDR specification")
+ source_parser.set_defaults(func=source.subcmd)
+
+ args = parser.parse_args()
+ return args.func(args)
+
+
+try:
+ if __name__ == "__main__":
+ sys.exit(main())
+except (SystemExit, KeyboardInterrupt, BrokenPipeError):
+ sys.exit(1)
diff --git a/tools/objtool/arch/loongarch/decode.c b/tools/objtool/arch/loongarch/decode.c
index aee479d2191c..69b66994f2a1 100644
--- a/tools/objtool/arch/loongarch/decode.c
+++ b/tools/objtool/arch/loongarch/decode.c
@@ -122,7 +122,7 @@ static bool decode_insn_reg2i12_fomat(union loongarch_instruction inst,
switch (inst.reg2i12_format.opcode) {
case addid_op:
if ((inst.reg2i12_format.rd == CFI_SP) || (inst.reg2i12_format.rj == CFI_SP)) {
- /* addi.d sp,sp,si12 or addi.d fp,sp,si12 */
+ /* addi.d sp,sp,si12 or addi.d fp,sp,si12 or addi.d sp,fp,si12 */
insn->immediate = sign_extend64(inst.reg2i12_format.immediate, 11);
ADD_OP(op) {
op->src.type = OP_SRC_ADD;
@@ -132,6 +132,15 @@ static bool decode_insn_reg2i12_fomat(union loongarch_instruction inst,
op->dest.reg = inst.reg2i12_format.rd;
}
}
+ if ((inst.reg2i12_format.rd == CFI_SP) && (inst.reg2i12_format.rj == CFI_FP)) {
+ /* addi.d sp,fp,si12 */
+ struct symbol *func = find_func_containing(insn->sec, insn->offset);
+
+ if (!func)
+ return false;
+
+ func->frame_pointer = true;
+ }
break;
case ldd_op:
if (inst.reg2i12_format.rj == CFI_SP) {
diff --git a/tools/objtool/check.c b/tools/objtool/check.c
index 01237d167223..6604f5d038aa 100644
--- a/tools/objtool/check.c
+++ b/tools/objtool/check.c
@@ -178,6 +178,52 @@ static bool is_sibling_call(struct instruction *insn)
}
/*
+ * Checks if a string ends with another.
+ */
+static bool str_ends_with(const char *s, const char *sub)
+{
+ const int slen = strlen(s);
+ const int sublen = strlen(sub);
+
+ if (sublen > slen)
+ return 0;
+
+ return !memcmp(s + slen - sublen, sub, sublen);
+}
+
+/*
+ * Checks if a function is a Rust "noreturn" one.
+ */
+static bool is_rust_noreturn(const struct symbol *func)
+{
+ /*
+ * If it does not start with "_R", then it is not a Rust symbol.
+ */
+ if (strncmp(func->name, "_R", 2))
+ return false;
+
+ /*
+ * These are just heuristics -- we do not control the precise symbol
+ * name, due to the crate disambiguators (which depend on the compiler)
+ * as well as changes to the source code itself between versions (since
+ * these come from the Rust standard library).
+ */
+ return str_ends_with(func->name, "_4core5sliceSp15copy_from_slice17len_mismatch_fail") ||
+ str_ends_with(func->name, "_4core6option13unwrap_failed") ||
+ str_ends_with(func->name, "_4core6result13unwrap_failed") ||
+ str_ends_with(func->name, "_4core9panicking5panic") ||
+ str_ends_with(func->name, "_4core9panicking9panic_fmt") ||
+ str_ends_with(func->name, "_4core9panicking14panic_explicit") ||
+ str_ends_with(func->name, "_4core9panicking14panic_nounwind") ||
+ str_ends_with(func->name, "_4core9panicking18panic_bounds_check") ||
+ str_ends_with(func->name, "_4core9panicking19assert_failed_inner") ||
+ str_ends_with(func->name, "_4core9panicking36panic_misaligned_pointer_dereference") ||
+ strstr(func->name, "_4core9panicking11panic_const24panic_const_") ||
+ (strstr(func->name, "_4core5slice5index24slice_") &&
+ str_ends_with(func->name, "_fail"));
+}
+
+/*
* This checks to see if the given function is a "noreturn" function.
*
* For global functions which are outside the scope of this object file, we
@@ -202,10 +248,14 @@ static bool __dead_end_function(struct objtool_file *file, struct symbol *func,
if (!func)
return false;
- if (func->bind == STB_GLOBAL || func->bind == STB_WEAK)
+ if (func->bind == STB_GLOBAL || func->bind == STB_WEAK) {
+ if (is_rust_noreturn(func))
+ return true;
+
for (i = 0; i < ARRAY_SIZE(global_noreturns); i++)
if (!strcmp(func->name, global_noreturns[i]))
return true;
+ }
if (func->bind == STB_WEAK)
return false;
@@ -2993,10 +3043,27 @@ static int update_cfi_state(struct instruction *insn,
break;
}
- if (op->dest.reg == CFI_SP && op->src.reg == CFI_BP) {
+ if (op->dest.reg == CFI_BP && op->src.reg == CFI_SP &&
+ insn->sym->frame_pointer) {
+ /* addi.d fp,sp,imm on LoongArch */
+ if (cfa->base == CFI_SP && cfa->offset == op->src.offset) {
+ cfa->base = CFI_BP;
+ cfa->offset = 0;
+ }
+ break;
+ }
- /* lea disp(%rbp), %rsp */
- cfi->stack_size = -(op->src.offset + regs[CFI_BP].offset);
+ if (op->dest.reg == CFI_SP && op->src.reg == CFI_BP) {
+ /* addi.d sp,fp,imm on LoongArch */
+ if (cfa->base == CFI_BP && cfa->offset == 0) {
+ if (insn->sym->frame_pointer) {
+ cfa->base = CFI_SP;
+ cfa->offset = -op->src.offset;
+ }
+ } else {
+ /* lea disp(%rbp), %rsp */
+ cfi->stack_size = -(op->src.offset + regs[CFI_BP].offset);
+ }
break;
}
diff --git a/tools/objtool/include/objtool/elf.h b/tools/objtool/include/objtool/elf.h
index 2b8a69de4db8..d7e815c2fd15 100644
--- a/tools/objtool/include/objtool/elf.h
+++ b/tools/objtool/include/objtool/elf.h
@@ -68,6 +68,7 @@ struct symbol {
u8 warned : 1;
u8 embedded_insn : 1;
u8 local_label : 1;
+ u8 frame_pointer : 1;
struct list_head pv_target;
struct reloc *relocs;
};
diff --git a/tools/objtool/noreturns.h b/tools/objtool/noreturns.h
index 1e8141ef1b15..e7da92489167 100644
--- a/tools/objtool/noreturns.h
+++ b/tools/objtool/noreturns.h
@@ -39,6 +39,8 @@ NORETURN(panic)
NORETURN(panic_smp_self_stop)
NORETURN(rest_init)
NORETURN(rewind_stack_and_make_dead)
+NORETURN(rust_begin_unwind)
+NORETURN(rust_helper_BUG)
NORETURN(sev_es_terminate)
NORETURN(snp_abort)
NORETURN(start_kernel)
diff --git a/tools/pci/Makefile b/tools/pci/Makefile
index 57744778b518..62d41f1a1e2c 100644
--- a/tools/pci/Makefile
+++ b/tools/pci/Makefile
@@ -42,7 +42,7 @@ $(OUTPUT)pcitest: $(PCITEST_IN)
clean:
rm -f $(ALL_PROGRAMS)
rm -rf $(OUTPUT)include/
- find $(or $(OUTPUT),.) -name '*.o' -delete -o -name '\.*.d' -delete
+ find $(or $(OUTPUT),.) -name '*.o' -delete -o -name '\.*.cmd' -delete -o -name '\.*.d' -delete
install: $(ALL_PROGRAMS)
install -d -m 755 $(DESTDIR)$(bindir); \
diff --git a/tools/pci/pcitest.c b/tools/pci/pcitest.c
index 441b54234635..470258009ddc 100644
--- a/tools/pci/pcitest.c
+++ b/tools/pci/pcitest.c
@@ -16,8 +16,6 @@
#include <linux/pcitest.h>
-#define BILLION 1E9
-
static char *result[] = { "NOT OKAY", "OKAY" };
static char *irq[] = { "LEGACY", "MSI", "MSI-X" };
diff --git a/tools/perf/Build b/tools/perf/Build
index 1d4957957d75..3e486f7df94b 100644
--- a/tools/perf/Build
+++ b/tools/perf/Build
@@ -1,5 +1,6 @@
perf-bench-y += builtin-bench.o
perf-y += builtin-annotate.o
+perf-y += builtin-check.o
perf-y += builtin-config.o
perf-y += builtin-diff.o
perf-y += builtin-evlist.o
diff --git a/tools/perf/Documentation/perf-annotate.txt b/tools/perf/Documentation/perf-annotate.txt
index b95524bea021..156c5f37b051 100644
--- a/tools/perf/Documentation/perf-annotate.txt
+++ b/tools/perf/Documentation/perf-annotate.txt
@@ -165,6 +165,9 @@ include::itrace.txt[]
--type-stat::
Show stats for the data type annotation.
+--skip-empty::
+ Do not display empty (or dummy) events.
+
SEE ALSO
--------
diff --git a/tools/perf/Documentation/perf-check.txt b/tools/perf/Documentation/perf-check.txt
new file mode 100644
index 000000000000..10f69fb6850b
--- /dev/null
+++ b/tools/perf/Documentation/perf-check.txt
@@ -0,0 +1,82 @@
+perf-check(1)
+===============
+
+NAME
+----
+perf-check - check if features are present in perf
+
+SYNOPSIS
+--------
+[verse]
+'perf check' [<options>]
+'perf check' {feature <feature_list>} [<options>]
+
+DESCRIPTION
+-----------
+With no subcommands given, 'perf check' command just prints the command
+usage on the standard output.
+
+If the subcommand 'feature' is used, then status of feature is printed
+on the standard output (unless '-q' is also passed), ie. whether it is
+compiled-in/built-in or not.
+Also, 'perf check feature' returns with exit status 0 if the feature
+is built-in, otherwise returns with exit status 1.
+
+SUBCOMMANDS
+-----------
+
+feature::
+
+ Print whether feature(s) is compiled-in or not, and also returns with an
+ exit status of 0, if passed feature(s) are compiled-in, else 1.
+
+ It expects a feature list as an argument. There can be a single feature
+ name/macro, or multiple features can also be passed as a comma-separated
+ list, in which case the exit status will be 0 only if all of the passed
+ features are compiled-in.
+
+ The feature names/macros are case-insensitive.
+
+ Example Usage:
+ perf check feature libtraceevent
+ perf check feature HAVE_LIBTRACEEVENT
+ perf check feature libtraceevent,bpf
+
+ Supported feature names/macro:
+ aio / HAVE_AIO_SUPPORT
+ bpf / HAVE_LIBBPF_SUPPORT
+ bpf_skeletons / HAVE_BPF_SKEL
+ debuginfod / HAVE_DEBUGINFOD_SUPPORT
+ dwarf / HAVE_DWARF_SUPPORT
+ dwarf_getlocations / HAVE_DWARF_GETLOCATIONS_SUPPORT
+ dwarf-unwind / HAVE_DWARF_UNWIND_SUPPORT
+ auxtrace / HAVE_AUXTRACE_SUPPORT
+ libaudit / HAVE_LIBAUDIT_SUPPORT
+ libbfd / HAVE_LIBBFD_SUPPORT
+ libcapstone / HAVE_LIBCAPSTONE_SUPPORT
+ libcrypto / HAVE_LIBCRYPTO_SUPPORT
+ libdw-dwarf-unwind / HAVE_DWARF_SUPPORT
+ libelf / HAVE_LIBELF_SUPPORT
+ libnuma / HAVE_LIBNUMA_SUPPORT
+ libopencsd / HAVE_CSTRACE_SUPPORT
+ libperl / HAVE_LIBPERL_SUPPORT
+ libpfm4 / HAVE_LIBPFM
+ libpython / HAVE_LIBPYTHON_SUPPORT
+ libslang / HAVE_SLANG_SUPPORT
+ libtraceevent / HAVE_LIBTRACEEVENT
+ libunwind / HAVE_LIBUNWIND_SUPPORT
+ lzma / HAVE_LZMA_SUPPORT
+ numa_num_possible_cpus / HAVE_LIBNUMA_SUPPORT
+ syscall_table / HAVE_SYSCALL_TABLE_SUPPORT
+ zlib / HAVE_ZLIB_SUPPORT
+ zstd / HAVE_ZSTD_SUPPORT
+
+OPTIONS
+-------
+-q::
+--quiet::
+ Do not print any messages or warnings
+
+ This can be used along with subcommands such as 'perf check feature'
+ to hide unnecessary output in test scripts, eg.
+ 'perf check feature --quiet libtraceevent'
diff --git a/tools/perf/Documentation/perf-ftrace.txt b/tools/perf/Documentation/perf-ftrace.txt
index d780b93fcf87..eaec8253be68 100644
--- a/tools/perf/Documentation/perf-ftrace.txt
+++ b/tools/perf/Documentation/perf-ftrace.txt
@@ -9,7 +9,7 @@ perf-ftrace - simple wrapper for kernel's ftrace functionality
SYNOPSIS
--------
[verse]
-'perf ftrace' {trace|latency} <command>
+'perf ftrace' {trace|latency|profile} <command>
DESCRIPTION
-----------
@@ -23,6 +23,9 @@ kernel's ftrace infrastructure.
'perf ftrace latency' calculates execution latency of a given function
(optionally with BPF) and display it as a histogram.
+ 'perf ftrace profile' show a execution profile for each function including
+ total, average, max time and the number of calls.
+
The following options apply to perf ftrace.
COMMON OPTIONS
@@ -125,6 +128,7 @@ OPTIONS for 'perf ftrace trace'
- verbose - Show process names, PIDs, timestamps, etc.
- thresh=<n> - Setup trace duration threshold in microseconds.
- depth=<n> - Set max depth for function graph tracer to follow.
+ - tail - Print function name at the end.
OPTIONS for 'perf ftrace latency'
@@ -145,6 +149,48 @@ OPTIONS for 'perf ftrace latency'
Use nano-second instead of micro-second as a base unit of the histogram.
+OPTIONS for 'perf ftrace profile'
+---------------------------------
+
+-T::
+--trace-funcs=::
+ Set function filter on the given function (or a glob pattern).
+ Multiple functions can be given by using this option more than once.
+ The function argument also can be a glob pattern. It will be passed
+ to 'set_ftrace_filter' in tracefs.
+
+-N::
+--notrace-funcs=::
+ Do not trace functions given by the argument. Like -T option, this
+ can be used more than once to specify multiple functions (or glob
+ patterns). It will be passed to 'set_ftrace_notrace' in tracefs.
+
+-G::
+--graph-funcs=::
+ Set graph filter on the given function (or a glob pattern). This is
+ useful to trace for functions executed from the given function. This
+ can be used more than once to specify multiple functions. It will be
+ passed to 'set_graph_function' in tracefs.
+
+-g::
+--nograph-funcs=::
+ Set graph notrace filter on the given function (or a glob pattern).
+ Like -G option, this is useful for the function_graph tracer only and
+ disables tracing for function executed from the given function. This
+ can be used more than once to specify multiple functions. It will be
+ passed to 'set_graph_notrace' in tracefs.
+
+-m::
+--buffer-size::
+ Set the size of per-cpu tracing buffer, <size> is expected to
+ be a number with appended unit character - B/K/M/G.
+
+-s::
+--sort=::
+ Sort the result by the given field. Available values are:
+ total, avg, max, count, name. Default is 'total'.
+
+
SEE ALSO
--------
linkperf:perf-record[1], linkperf:perf-trace[1]
diff --git a/tools/perf/Documentation/perf-kvm.txt b/tools/perf/Documentation/perf-kvm.txt
index b66be66fe836..c26524d38f47 100644
--- a/tools/perf/Documentation/perf-kvm.txt
+++ b/tools/perf/Documentation/perf-kvm.txt
@@ -115,9 +115,9 @@ STAT LIVE OPTIONS
-m::
--mmap-pages=::
- Number of mmap data pages (must be a power of two) or size
- specification with appended unit character - B/K/M/G. The
- size is rounded up to have nearest pages power of two value.
+ Number of mmap data pages (must be a power of two) or size
+ specification in bytes with appended unit character - B/K/M/G.
+ The size is rounded up to the nearest power-of-two page value.
-a::
--all-cpus::
diff --git a/tools/perf/Documentation/perf-list.txt b/tools/perf/Documentation/perf-list.txt
index 6bf2468f59d3..dea005410ec0 100644
--- a/tools/perf/Documentation/perf-list.txt
+++ b/tools/perf/Documentation/perf-list.txt
@@ -72,6 +72,7 @@ counted. The following modifiers exist:
W - group is weak and will fallback to non-group if not schedulable,
e - group or event are exclusive and do not share the PMU
b - use BPF aggregration (see perf stat --bpf-counters)
+ R - retire latency value of the event
The 'p' modifier can be used for specifying how precise the instruction
address should be. The 'p' modifier can be specified multiple times:
diff --git a/tools/perf/Documentation/perf-mem.txt b/tools/perf/Documentation/perf-mem.txt
index 47456b212e99..8a1bd9ff0f86 100644
--- a/tools/perf/Documentation/perf-mem.txt
+++ b/tools/perf/Documentation/perf-mem.txt
@@ -28,15 +28,8 @@ and kernel support is required. See linkperf:perf-arm-spe[1] for a setup guide.
Due to the statistical nature of SPE sampling, not every memory operation will
be sampled.
-OPTIONS
--------
-<command>...::
- Any command you can specify in a shell.
-
--i::
---input=<file>::
- Input file name.
-
+COMMON OPTIONS
+--------------
-f::
--force::
Don't do ownership validation
@@ -45,24 +38,9 @@ OPTIONS
--type=<type>::
Select the memory operation type: load or store (default: load,store)
--D::
---dump-raw-samples::
- Dump the raw decoded samples on the screen in a format that is easy to parse with
- one sample per line.
-
--x::
---field-separator=<separator>::
- Specify the field separator used when dump raw samples (-D option). By default,
- The separator is the space character.
-
--C::
---cpu=<cpu>::
- Monitor only on the list of CPUs provided. Multiple CPUs can be provided as a
- comma-separated list with no space: 0,1. Ranges of CPUs are specified with -: 0-2. Default
- is to monitor all CPUS.
--U::
---hide-unresolved::
- Only display entries resolved to a symbol.
+-v::
+--verbose::
+ Be more verbose (show counter open errors, etc)
-p::
--phys-data::
@@ -73,6 +51,9 @@ OPTIONS
RECORD OPTIONS
--------------
+<command>...::
+ Any command you can specify in a shell.
+
-e::
--event <event>::
Event selector. Use 'perf mem record -e list' to list available events.
@@ -85,14 +66,65 @@ RECORD OPTIONS
--all-user::
Configure all used events to run in user space.
--v::
---verbose::
- Be more verbose (show counter open errors, etc)
-
--ldlat <n>::
Specify desired latency for loads event. Supported on Intel and Arm64
processors only. Ignored on other archs.
+REPORT OPTIONS
+--------------
+-i::
+--input=<file>::
+ Input file name.
+
+-C::
+--cpu=<cpu>::
+ Monitor only on the list of CPUs provided. Multiple CPUs can be provided as a
+ comma-separated list with no space: 0,1. Ranges of CPUs are specified with -
+ like 0-2. Default is to monitor all CPUS.
+
+-D::
+--dump-raw-samples::
+ Dump the raw decoded samples on the screen in a format that is easy to parse with
+ one sample per line.
+
+-s::
+--sort=<key>::
+ Group result by given key(s) - multiple keys can be specified
+ in CSV format. The keys are specific to memory samples are:
+ symbol_daddr, symbol_iaddr, dso_daddr, locked, tlb, mem, snoop,
+ dcacheline, phys_daddr, data_page_size, blocked.
+
+ - symbol_daddr: name of data symbol being executed on at the time of sample
+ - symbol_iaddr: name of code symbol being executed on at the time of sample
+ - dso_daddr: name of library or module containing the data being executed
+ on at the time of the sample
+ - locked: whether the bus was locked at the time of the sample
+ - tlb: type of tlb access for the data at the time of the sample
+ - mem: type of memory access for the data at the time of the sample
+ - snoop: type of snoop (if any) for the data at the time of the sample
+ - dcacheline: the cacheline the data address is on at the time of the sample
+ - phys_daddr: physical address of data being executed on at the time of sample
+ - data_page_size: the data page size of data being executed on at the time of sample
+ - blocked: reason of blocked load access for the data at the time of the sample
+
+ And the default sort keys are changed to local_weight, mem, sym, dso,
+ symbol_daddr, dso_daddr, snoop, tlb, locked, blocked, local_ins_lat.
+
+-T::
+--type-profile::
+ Show data-type profile result instead of code symbols. This requires
+ the debug information and it will change the default sort keys to:
+ mem, snoop, tlb, type.
+
+-U::
+--hide-unresolved::
+ Only display entries resolved to a symbol.
+
+-x::
+--field-separator=<separator>::
+ Specify the field separator used when dump raw samples (-D option). By default,
+ The separator is the space character.
+
In addition, for report all perf report options are valid, and for record
all perf record options.
diff --git a/tools/perf/Documentation/perf-record.txt b/tools/perf/Documentation/perf-record.txt
index d6532ed97c02..242223240a08 100644
--- a/tools/perf/Documentation/perf-record.txt
+++ b/tools/perf/Documentation/perf-record.txt
@@ -273,10 +273,11 @@ OPTIONS
-m::
--mmap-pages=::
Number of mmap data pages (must be a power of two) or size
- specification with appended unit character - B/K/M/G. The
- size is rounded up to have nearest pages power of two value.
- Also, by adding a comma, the number of mmap pages for AUX
- area tracing can be specified.
+ specification in bytes with appended unit character - B/K/M/G.
+ The size is rounded up to the nearest power-of-two page value.
+ By adding a comma, an additional parameter with the same
+ semantics used for the normal mmap areas can be specified for
+ AUX tracing area.
-g::
Enables call-graph (stack chain/backtrace) recording for both
@@ -828,6 +829,11 @@ filtered through the mask provided by -C option.
only, as of now. So the applications built without the frame
pointer might see bogus addresses.
+--setup-filter=<action>::
+ Prepare BPF filter to be used by regular users. The action should be
+ either "pin" or "unpin". The filter can be used after it's pinned.
+
+
include::intel-hybrid.txt[]
SEE ALSO
diff --git a/tools/perf/Documentation/perf-report.txt b/tools/perf/Documentation/perf-report.txt
index d2b1593ef700..7c66d81ab978 100644
--- a/tools/perf/Documentation/perf-report.txt
+++ b/tools/perf/Documentation/perf-report.txt
@@ -614,6 +614,7 @@ include::itrace.txt[]
'Avg Cycles%' - block average sampled cycles / sum of total block average
sampled cycles
'Avg Cycles' - block average sampled cycles
+ 'Branch Counter' - block branch counter histogram (with -v showing the number)
--skip-empty::
Do not print 0 results in the --stat output.
diff --git a/tools/perf/Documentation/perf-sched.txt b/tools/perf/Documentation/perf-sched.txt
index 84d49f9241b1..3db64954a267 100644
--- a/tools/perf/Documentation/perf-sched.txt
+++ b/tools/perf/Documentation/perf-sched.txt
@@ -212,6 +212,15 @@ OPTIONS for 'perf sched timehist'
--state::
Show task state when it switched out.
+--show-prio::
+ Show task priority.
+
+--prio::
+ Only show events for given task priority(ies). Multiple priorities can be
+ provided as a comma-separated list with no spaces: 0,120. Ranges of
+ priorities are specified with -: 120-129. A combination of both can also be
+ provided: 0,120-129.
+
OPTIONS for 'perf sched replay'
------------------------------
diff --git a/tools/perf/Documentation/perf-script.txt b/tools/perf/Documentation/perf-script.txt
index ff086ef05a0c..b72866ef270b 100644
--- a/tools/perf/Documentation/perf-script.txt
+++ b/tools/perf/Documentation/perf-script.txt
@@ -134,7 +134,7 @@ OPTIONS
srcline, period, iregs, uregs, brstack, brstacksym, flags, bpf-output,
brstackinsn, brstackinsnlen, brstackdisasm, brstackoff, callindent, insn, disasm,
insnlen, synth, phys_addr, metric, misc, srccode, ipc, data_page_size,
- code_page_size, ins_lat, machine_pid, vcpu, cgroup, retire_lat,
+ code_page_size, ins_lat, machine_pid, vcpu, cgroup, retire_lat, brcntr,
Field list can be prepended with the type, trace, sw or hw,
to indicate to which event type the field list applies.
@@ -369,6 +369,9 @@ OPTIONS
--demangle-kernel::
Demangle kernel symbol names to human readable form (for C++ kernels).
+--addr2line=<path>::
+ Path to addr2line binary.
+
--header
Show perf.data header.
diff --git a/tools/perf/Documentation/perf-stat.txt b/tools/perf/Documentation/perf-stat.txt
index 29756a87ab6f..2bc063672486 100644
--- a/tools/perf/Documentation/perf-stat.txt
+++ b/tools/perf/Documentation/perf-stat.txt
@@ -498,6 +498,14 @@ To interpret the results it is usually needed to know on which
CPUs the workload runs on. If needed the CPUs can be forced using
taskset.
+--record-tpebs::
+Enable automatic sampling on Intel TPEBS retire_latency events (event with :R
+modifier). Without this option, perf would not capture dynamic retire_latency
+at runtime. Currently, a zero value is assigned to the retire_latency event when
+this option is not set. The TPEBS hardware feature starts from Intel Granite
+Rapids microarchitecture. This option only exists in X86_64 and is meaningful on
+Intel platforms with TPEBS feature.
+
--td-level::
Print the top-down statistics that equal the input level. It allows
users to print the interested top-down metrics level instead of the
diff --git a/tools/perf/Documentation/perf-top.txt b/tools/perf/Documentation/perf-top.txt
index 667e5102075e..af3e4230c72f 100644
--- a/tools/perf/Documentation/perf-top.txt
+++ b/tools/perf/Documentation/perf-top.txt
@@ -83,8 +83,8 @@ Default is to monitor all CPUS.
-m <pages>::
--mmap-pages=<pages>::
Number of mmap data pages (must be a power of two) or size
- specification with appended unit character - B/K/M/G. The
- size is rounded up to have nearest pages power of two value.
+ specification in bytes with appended unit character - B/K/M/G.
+ The size is rounded up to the nearest power-of-two page value.
-p <pid>::
--pid=<pid>::
diff --git a/tools/perf/Documentation/perf-trace.txt b/tools/perf/Documentation/perf-trace.txt
index f0da8cf63e9a..6e0cc50bbc13 100644
--- a/tools/perf/Documentation/perf-trace.txt
+++ b/tools/perf/Documentation/perf-trace.txt
@@ -106,8 +106,8 @@ filter out the startup phase of the program, which is often very different.
-m::
--mmap-pages=::
Number of mmap data pages (must be a power of two) or size
- specification with appended unit character - B/K/M/G. The
- size is rounded up to have nearest pages power of two value.
+ specification in bytes with appended unit character - B/K/M/G.
+ The size is rounded up to the nearest power-of-two page value.
-C::
--cpu::
diff --git a/tools/perf/Documentation/topdown.txt b/tools/perf/Documentation/topdown.txt
index ae0aee86844f..5c17fff694ee 100644
--- a/tools/perf/Documentation/topdown.txt
+++ b/tools/perf/Documentation/topdown.txt
@@ -325,6 +325,36 @@ other four level 2 metrics by subtracting corresponding metrics as below.
Fetch_Bandwidth = Frontend_Bound - Fetch_Latency
Core_Bound = Backend_Bound - Memory_Bound
+TPEBS in TopDown
+================
+
+TPEBS (Timed PEBS) is one of the new Intel PMU features provided since Granite
+Rapids microarchitecture. The TPEBS feature adds a 16 bit retire_latency field
+in the Basic Info group of the PEBS record. It records the Core cycles since the
+retirement of the previous instruction to the retirement of current instruction.
+Please refer to Section 8.4.1 of "Intel® Architecture Instruction Set Extensions
+Programming Reference" for more details about this feature. Because this feature
+extends PEBS record, sampling with weight option is required to get the
+retire_latency value.
+
+ perf record -e event_name -W ...
+
+In the most recent release of TMA, the metrics begin to use event retire_latency
+values in some of the metrics’ formulas on processors that support TPEBS feature.
+For previous generations that do not support TPEBS, the values are static and
+predefined per processor family by the hardware architects. Due to the diversity
+of workloads in execution environments, retire_latency values measured at real
+time are more accurate. Therefore, new TMA metrics that use TPEBS will provide
+more accurate performance analysis results.
+
+To support TPEBS in TMA metrics, a new modifier :R on event is added. Perf would
+capture retire_latency value of required events(event with :R in metric formula)
+with perf record. The retire_latency value would be used in metric calculation.
+Currently, this feature is supported through perf stat
+
+ perf stat -M metric_name --record-tpebs ...
+
+
[1] https://software.intel.com/en-us/top-down-microarchitecture-analysis-method-win
[2] https://sites.google.com/site/analysismethods/yasin-pubs
diff --git a/tools/perf/Makefile b/tools/perf/Makefile
index 75f3f6e0a231..816d5d84816b 100644
--- a/tools/perf/Makefile
+++ b/tools/perf/Makefile
@@ -51,8 +51,14 @@ else
override DEBUG = 0
endif
+ifeq ($(JOBS),1)
+ BUILD_TYPE := sequential
+else
+ BUILD_TYPE := parallel
+endif
+
define print_msg
- @printf ' BUILD: Doing '\''make \033[33m-j'$(JOBS)'\033[m'\'' parallel build\n'
+ @printf ' BUILD: Doing '\''make \033[33m-j'$(JOBS)'\033[m'\'' $(BUILD_TYPE) build\n'
endef
define make
diff --git a/tools/perf/Makefile.config b/tools/perf/Makefile.config
index fa679db61f62..4dcf7a0fd235 100644
--- a/tools/perf/Makefile.config
+++ b/tools/perf/Makefile.config
@@ -31,14 +31,8 @@ $(call detected_var,SRCARCH)
ifneq ($(NO_SYSCALL_TABLE),1)
NO_SYSCALL_TABLE := 1
- ifeq ($(SRCARCH),x86)
- ifeq (${IS_64_BIT}, 1)
- NO_SYSCALL_TABLE := 0
- endif
- else
- ifeq ($(SRCARCH),$(filter $(SRCARCH),powerpc arm64 s390 mips loongarch))
- NO_SYSCALL_TABLE := 0
- endif
+ ifeq ($(SRCARCH),$(filter $(SRCARCH),x86 powerpc arm64 s390 mips loongarch))
+ NO_SYSCALL_TABLE := 0
endif
ifneq ($(NO_SYSCALL_TABLE),1)
@@ -55,8 +49,9 @@ endif
# Additional ARCH settings for x86
ifeq ($(SRCARCH),x86)
$(call detected,CONFIG_X86)
+ CFLAGS += -I$(OUTPUT)arch/x86/include/generated
ifeq (${IS_64_BIT}, 1)
- CFLAGS += -DHAVE_ARCH_X86_64_SUPPORT -I$(OUTPUT)arch/x86/include/generated
+ CFLAGS += -DHAVE_ARCH_X86_64_SUPPORT
ARCH_INCLUDE = ../../arch/x86/lib/memcpy_64.S ../../arch/x86/lib/memset_64.S
LIBUNWIND_LIBS = -lunwind-x86_64 -lunwind -llzma
$(call detected,CONFIG_X86_64)
@@ -238,11 +233,7 @@ endif
ifeq ($(DEBUG),0)
CORE_CFLAGS += -DNDEBUG=1
-ifeq ($(CC_NO_CLANG), 0)
- CORE_CFLAGS += -O3
-else
- CORE_CFLAGS += -O6
-endif
+CORE_CFLAGS += -O3
else
CORE_CFLAGS += -g
CXXFLAGS += -g
@@ -710,8 +701,8 @@ ifeq ($(BUILD_BPF_SKEL),1)
BUILD_BPF_SKEL := 0
else
CLANG_VERSION := $(shell $(CLANG) --version | head -1 | sed 's/.*clang version \([[:digit:]]\+.[[:digit:]]\+.[[:digit:]]\+\).*/\1/g')
- ifeq ($(call version-lt3,$(CLANG_VERSION),12.0.1),1)
- $(warning Warning: Disabled BPF skeletons as reliable BTF generation needs at least $(CLANG) version 12.0.1)
+ ifeq ($(call version-lt3,$(CLANG_VERSION),16.0.6),1)
+ $(warning Warning: Disabled BPF skeletons as at least $(CLANG) version 16.0.6 is reported to be a working setup with the current of BPF based perf features)
BUILD_BPF_SKEL := 0
endif
endif
@@ -985,6 +976,23 @@ ifdef BUILD_NONDISTRO
endif
endif
+ifndef NO_LIBLLVM
+ $(call feature_check,llvm-perf)
+ ifeq ($(feature-llvm-perf), 1)
+ CFLAGS += -DHAVE_LIBLLVM_SUPPORT
+ CFLAGS += $(shell $(LLVM_CONFIG) --cflags)
+ CXXFLAGS += -DHAVE_LIBLLVM_SUPPORT
+ CXXFLAGS += $(shell $(LLVM_CONFIG) --cxxflags)
+ LIBLLVM = $(shell $(LLVM_CONFIG) --libs all) $(shell $(LLVM_CONFIG) --system-libs)
+ EXTLIBS += -L$(shell $(LLVM_CONFIG) --libdir) $(LIBLLVM)
+ EXTLIBS += -lstdc++
+ $(call detected,CONFIG_LIBLLVM)
+ else
+ $(warning No libllvm 13+ found, slower source file resolution, please install llvm-devel/llvm-dev)
+ NO_LIBLLVM := 1
+ endif
+endif
+
ifndef NO_DEMANGLE
$(call feature_check,cxa-demangle)
ifeq ($(feature-cxa-demangle), 1)
@@ -1031,17 +1039,6 @@ ifndef NO_LIBZSTD
endif
endif
-ifndef NO_LIBCAP
- ifeq ($(feature-libcap), 1)
- CFLAGS += -DHAVE_LIBCAP_SUPPORT
- EXTLIBS += -lcap
- $(call detected,CONFIG_LIBCAP)
- else
- $(warning No libcap found, disables capability support, please install libcap-devel/libcap-dev)
- NO_LIBCAP := 1
- endif
-endif
-
ifndef NO_BACKTRACE
ifeq ($(feature-backtrace), 1)
CFLAGS += -DHAVE_BACKTRACE_SUPPORT
diff --git a/tools/perf/Makefile.perf b/tools/perf/Makefile.perf
index f8148db5fc38..9dd2e8d3f3c9 100644
--- a/tools/perf/Makefile.perf
+++ b/tools/perf/Makefile.perf
@@ -163,6 +163,8 @@ ifneq ($(OUTPUT),)
# for flex/bison parsers.
VPATH += $(OUTPUT)
export VPATH
+# create symlink to the original source
+SOURCE := $(shell ln -sf $(srctree)/tools/perf $(OUTPUT)/source)
endif
ifeq ($(V),1)
@@ -1140,6 +1142,8 @@ install-tests: all install-gtk
$(INSTALL) tests/shell/common/*.pl '$(DESTDIR_SQ)$(perfexec_instdir_SQ)/tests/shell/common'; \
$(INSTALL) -d -m 755 '$(DESTDIR_SQ)$(perfexec_instdir_SQ)/tests/shell/base_probe'; \
$(INSTALL) tests/shell/base_probe/*.sh '$(DESTDIR_SQ)$(perfexec_instdir_SQ)/tests/shell/base_probe'; \
+ $(INSTALL) -d -m 755 '$(DESTDIR_SQ)$(perfexec_instdir_SQ)/tests/shell/base_report'; \
+ $(INSTALL) tests/shell/base_probe/*.sh '$(DESTDIR_SQ)$(perfexec_instdir_SQ)/tests/shell/base_report'; \
$(INSTALL) -d -m 755 '$(DESTDIR_SQ)$(perfexec_instdir_SQ)/tests/shell/coresight' ; \
$(INSTALL) tests/shell/coresight/*.sh '$(DESTDIR_SQ)$(perfexec_instdir_SQ)/tests/shell/coresight'
$(Q)$(MAKE) -C tests/shell/coresight install-tests
@@ -1277,6 +1281,8 @@ clean:: $(LIBAPI)-clean $(LIBBPF)-clean $(LIBSUBCMD)-clean $(LIBSYMBOL)-clean $(
$(OUTPUT)util/intel-pt-decoder/inat-tables.c \
$(OUTPUT)tests/llvm-src-{base,kbuild,prologue,relocation}.c \
$(OUTPUT)pmu-events/pmu-events.c \
+ $(OUTPUT)pmu-events/test-empty-pmu-events.c \
+ $(OUTPUT)pmu-events/empty-pmu-events.log \
$(OUTPUT)pmu-events/metric_test.log \
$(OUTPUT)$(fadvise_advice_array) \
$(OUTPUT)$(fsconfig_arrays) \
diff --git a/tools/perf/arch/arm/util/cs-etm.c b/tools/perf/arch/arm/util/cs-etm.c
index da6231367993..ea891d12f8f4 100644
--- a/tools/perf/arch/arm/util/cs-etm.c
+++ b/tools/perf/arch/arm/util/cs-etm.c
@@ -643,7 +643,8 @@ static bool cs_etm_is_ete(struct perf_pmu *cs_etm_pmu, struct perf_cpu cpu)
static __u64 cs_etm_get_legacy_trace_id(struct perf_cpu cpu)
{
- return CORESIGHT_LEGACY_CPU_TRACE_ID(cpu.cpu);
+ /* Wrap at 48 so that invalid trace IDs aren't saved into files. */
+ return CORESIGHT_LEGACY_CPU_TRACE_ID(cpu.cpu % 48);
}
static void cs_etm_save_etmv4_header(__u64 data[], struct auxtrace_record *itr, struct perf_cpu cpu)
@@ -654,8 +655,7 @@ static void cs_etm_save_etmv4_header(__u64 data[], struct auxtrace_record *itr,
/* Get trace configuration register */
data[CS_ETMV4_TRCCONFIGR] = cs_etmv4_get_config(itr);
/* traceID set to legacy version, in case new perf running on older system */
- data[CS_ETMV4_TRCTRACEIDR] = cs_etm_get_legacy_trace_id(cpu) |
- CORESIGHT_TRACE_ID_UNUSED_FLAG;
+ data[CS_ETMV4_TRCTRACEIDR] = cs_etm_get_legacy_trace_id(cpu);
/* Get read-only information from sysFS */
cs_etm_get_ro(cs_etm_pmu, cpu, metadata_etmv4_ro[CS_ETMV4_TRCIDR0],
@@ -687,7 +687,7 @@ static void cs_etm_save_ete_header(__u64 data[], struct auxtrace_record *itr, st
/* Get trace configuration register */
data[CS_ETE_TRCCONFIGR] = cs_etmv4_get_config(itr);
/* traceID set to legacy version, in case new perf running on older system */
- data[CS_ETE_TRCTRACEIDR] = cs_etm_get_legacy_trace_id(cpu) | CORESIGHT_TRACE_ID_UNUSED_FLAG;
+ data[CS_ETE_TRCTRACEIDR] = cs_etm_get_legacy_trace_id(cpu);
/* Get read-only information from sysFS */
cs_etm_get_ro(cs_etm_pmu, cpu, metadata_ete_ro[CS_ETE_TRCIDR0], &data[CS_ETE_TRCIDR0]);
@@ -743,8 +743,7 @@ static void cs_etm_get_metadata(struct perf_cpu cpu, u32 *offset,
/* Get configuration register */
info->priv[*offset + CS_ETM_ETMCR] = cs_etm_get_config(itr);
/* traceID set to legacy value in case new perf running on old system */
- info->priv[*offset + CS_ETM_ETMTRACEIDR] = cs_etm_get_legacy_trace_id(cpu) |
- CORESIGHT_TRACE_ID_UNUSED_FLAG;
+ info->priv[*offset + CS_ETM_ETMTRACEIDR] = cs_etm_get_legacy_trace_id(cpu);
/* Get read-only information from sysFS */
cs_etm_get_ro(cs_etm_pmu, cpu, metadata_etmv3_ro[CS_ETM_ETMCCER],
&info->priv[*offset + CS_ETM_ETMCCER]);
@@ -888,7 +887,6 @@ struct auxtrace_record *cs_etm_record_init(int *err)
}
ptr->cs_etm_pmu = cs_etm_pmu;
- ptr->itr.pmu = cs_etm_pmu;
ptr->itr.parse_snapshot_options = cs_etm_parse_snapshot_options;
ptr->itr.recording_options = cs_etm_recording_options;
ptr->itr.info_priv_size = cs_etm_info_priv_size;
diff --git a/tools/perf/arch/arm/util/pmu.c b/tools/perf/arch/arm/util/pmu.c
index 1c9541d01722..57dc94a6e38c 100644
--- a/tools/perf/arch/arm/util/pmu.c
+++ b/tools/perf/arch/arm/util/pmu.c
@@ -23,16 +23,19 @@ void perf_pmu__arch_init(struct perf_pmu *pmu)
#ifdef HAVE_AUXTRACE_SUPPORT
if (!strcmp(pmu->name, CORESIGHT_ETM_PMU_NAME)) {
/* add ETM default config here */
+ pmu->auxtrace = true;
pmu->selectable = true;
pmu->perf_event_attr_init_default = cs_etm_get_default_config;
#if defined(__aarch64__)
} else if (strstarts(pmu->name, ARM_SPE_PMU_NAME)) {
+ pmu->auxtrace = true;
pmu->selectable = true;
pmu->is_uncore = false;
pmu->perf_event_attr_init_default = arm_spe_pmu_default_config;
if (strstarts(pmu->name, "arm_spe_"))
pmu->mem_events = perf_mem_events_arm;
} else if (strstarts(pmu->name, HISI_PTT_PMU_NAME)) {
+ pmu->auxtrace = true;
pmu->selectable = true;
#endif
}
diff --git a/tools/perf/arch/arm64/annotate/instructions.c b/tools/perf/arch/arm64/annotate/instructions.c
index 4af0c3a0f86e..f86d9f4798bd 100644
--- a/tools/perf/arch/arm64/annotate/instructions.c
+++ b/tools/perf/arch/arm64/annotate/instructions.c
@@ -11,7 +11,8 @@ struct arm64_annotate {
static int arm64_mov__parse(struct arch *arch __maybe_unused,
struct ins_operands *ops,
- struct map_symbol *ms __maybe_unused)
+ struct map_symbol *ms __maybe_unused,
+ struct disasm_line *dl __maybe_unused)
{
char *s = strchr(ops->raw, ','), *target, *endptr;
diff --git a/tools/perf/arch/arm64/util/arm-spe.c b/tools/perf/arch/arm64/util/arm-spe.c
index 0b52e67edb3b..2be99fdf997d 100644
--- a/tools/perf/arch/arm64/util/arm-spe.c
+++ b/tools/perf/arch/arm64/util/arm-spe.c
@@ -8,6 +8,7 @@
#include <linux/types.h>
#include <linux/bitops.h>
#include <linux/log2.h>
+#include <linux/string.h>
#include <linux/zalloc.h>
#include <time.h>
@@ -132,32 +133,66 @@ static __u64 arm_spe_pmu__sample_period(const struct perf_pmu *arm_spe_pmu)
return sample_period;
}
+static void arm_spe_setup_evsel(struct evsel *evsel, struct perf_cpu_map *cpus)
+{
+ u64 bit;
+
+ evsel->core.attr.freq = 0;
+ evsel->core.attr.sample_period = arm_spe_pmu__sample_period(evsel->pmu);
+ evsel->needs_auxtrace_mmap = true;
+
+ /*
+ * To obtain the auxtrace buffer file descriptor, the auxtrace event
+ * must come first.
+ */
+ evlist__to_front(evsel->evlist, evsel);
+
+ /*
+ * In the case of per-cpu mmaps, sample CPU for AUX event;
+ * also enable the timestamp tracing for samples correlation.
+ */
+ if (!perf_cpu_map__is_any_cpu_or_is_empty(cpus)) {
+ evsel__set_sample_bit(evsel, CPU);
+ evsel__set_config_if_unset(evsel->pmu, evsel, "ts_enable", 1);
+ }
+
+ /*
+ * Set this only so that perf report knows that SPE generates memory info. It has no effect
+ * on the opening of the event or the SPE data produced.
+ */
+ evsel__set_sample_bit(evsel, DATA_SRC);
+
+ /*
+ * The PHYS_ADDR flag does not affect the driver behaviour, it is used to
+ * inform that the resulting output's SPE samples contain physical addresses
+ * where applicable.
+ */
+ bit = perf_pmu__format_bits(evsel->pmu, "pa_enable");
+ if (evsel->core.attr.config & bit)
+ evsel__set_sample_bit(evsel, PHYS_ADDR);
+}
+
static int arm_spe_recording_options(struct auxtrace_record *itr,
struct evlist *evlist,
struct record_opts *opts)
{
struct arm_spe_recording *sper =
container_of(itr, struct arm_spe_recording, itr);
- struct perf_pmu *arm_spe_pmu = sper->arm_spe_pmu;
- struct evsel *evsel, *arm_spe_evsel = NULL;
+ struct evsel *evsel, *tmp;
struct perf_cpu_map *cpus = evlist->core.user_requested_cpus;
bool privileged = perf_event_paranoid_check(-1);
struct evsel *tracking_evsel;
int err;
- u64 bit;
sper->evlist = evlist;
evlist__for_each_entry(evlist, evsel) {
- if (evsel->core.attr.type == arm_spe_pmu->type) {
- if (arm_spe_evsel) {
- pr_err("There may be only one " ARM_SPE_PMU_NAME "x event\n");
+ if (evsel__is_aux_event(evsel)) {
+ if (!strstarts(evsel->pmu_name, ARM_SPE_PMU_NAME)) {
+ pr_err("Found unexpected auxtrace event: %s\n",
+ evsel->pmu_name);
return -EINVAL;
}
- evsel->core.attr.freq = 0;
- evsel->core.attr.sample_period = arm_spe_pmu__sample_period(arm_spe_pmu);
- evsel->needs_auxtrace_mmap = true;
- arm_spe_evsel = evsel;
opts->full_auxtrace = true;
}
}
@@ -222,37 +257,11 @@ static int arm_spe_recording_options(struct auxtrace_record *itr,
pr_debug2("%sx snapshot size: %zu\n", ARM_SPE_PMU_NAME,
opts->auxtrace_snapshot_size);
- /*
- * To obtain the auxtrace buffer file descriptor, the auxtrace event
- * must come first.
- */
- evlist__to_front(evlist, arm_spe_evsel);
-
- /*
- * In the case of per-cpu mmaps, sample CPU for AUX event;
- * also enable the timestamp tracing for samples correlation.
- */
- if (!perf_cpu_map__is_any_cpu_or_is_empty(cpus)) {
- evsel__set_sample_bit(arm_spe_evsel, CPU);
- evsel__set_config_if_unset(arm_spe_pmu, arm_spe_evsel,
- "ts_enable", 1);
+ evlist__for_each_entry_safe(evlist, tmp, evsel) {
+ if (evsel__is_aux_event(evsel))
+ arm_spe_setup_evsel(evsel, cpus);
}
- /*
- * Set this only so that perf report knows that SPE generates memory info. It has no effect
- * on the opening of the event or the SPE data produced.
- */
- evsel__set_sample_bit(arm_spe_evsel, DATA_SRC);
-
- /*
- * The PHYS_ADDR flag does not affect the driver behaviour, it is used to
- * inform that the resulting output's SPE samples contain physical addresses
- * where applicable.
- */
- bit = perf_pmu__format_bits(arm_spe_pmu, "pa_enable");
- if (arm_spe_evsel->core.attr.config & bit)
- evsel__set_sample_bit(arm_spe_evsel, PHYS_ADDR);
-
/* Add dummy event to keep tracking */
err = parse_event(evlist, "dummy:u");
if (err)
@@ -301,12 +310,16 @@ static int arm_spe_snapshot_start(struct auxtrace_record *itr)
struct arm_spe_recording *ptr =
container_of(itr, struct arm_spe_recording, itr);
struct evsel *evsel;
+ int ret = -EINVAL;
evlist__for_each_entry(ptr->evlist, evsel) {
- if (evsel->core.attr.type == ptr->arm_spe_pmu->type)
- return evsel__disable(evsel);
+ if (evsel__is_aux_event(evsel)) {
+ ret = evsel__disable(evsel);
+ if (ret < 0)
+ return ret;
+ }
}
- return -EINVAL;
+ return ret;
}
static int arm_spe_snapshot_finish(struct auxtrace_record *itr)
@@ -314,12 +327,16 @@ static int arm_spe_snapshot_finish(struct auxtrace_record *itr)
struct arm_spe_recording *ptr =
container_of(itr, struct arm_spe_recording, itr);
struct evsel *evsel;
+ int ret = -EINVAL;
evlist__for_each_entry(ptr->evlist, evsel) {
- if (evsel->core.attr.type == ptr->arm_spe_pmu->type)
- return evsel__enable(evsel);
+ if (evsel__is_aux_event(evsel)) {
+ ret = evsel__enable(evsel);
+ if (ret < 0)
+ return ret;
+ }
}
- return -EINVAL;
+ return ret;
}
static int arm_spe_alloc_wrapped_array(struct arm_spe_recording *ptr, int idx)
@@ -497,7 +514,6 @@ struct auxtrace_record *arm_spe_recording_init(int *err,
}
sper->arm_spe_pmu = arm_spe_pmu;
- sper->itr.pmu = arm_spe_pmu;
sper->itr.snapshot_start = arm_spe_snapshot_start;
sper->itr.snapshot_finish = arm_spe_snapshot_finish;
sper->itr.find_snapshot = arm_spe_find_snapshot;
diff --git a/tools/perf/arch/arm64/util/hisi-ptt.c b/tools/perf/arch/arm64/util/hisi-ptt.c
index ba97c8a562a0..eac9739c87e6 100644
--- a/tools/perf/arch/arm64/util/hisi-ptt.c
+++ b/tools/perf/arch/arm64/util/hisi-ptt.c
@@ -174,7 +174,6 @@ struct auxtrace_record *hisi_ptt_recording_init(int *err,
}
pttr->hisi_ptt_pmu = hisi_ptt_pmu;
- pttr->itr.pmu = hisi_ptt_pmu;
pttr->itr.recording_options = hisi_ptt_recording_options;
pttr->itr.info_priv_size = hisi_ptt_info_priv_size;
pttr->itr.info_fill = hisi_ptt_info_fill;
diff --git a/tools/perf/arch/loongarch/annotate/instructions.c b/tools/perf/arch/loongarch/annotate/instructions.c
index 21cc7e4149f7..ab43b1ab51e3 100644
--- a/tools/perf/arch/loongarch/annotate/instructions.c
+++ b/tools/perf/arch/loongarch/annotate/instructions.c
@@ -5,7 +5,8 @@
* Copyright (C) 2020-2023 Loongson Technology Corporation Limited
*/
-static int loongarch_call__parse(struct arch *arch, struct ins_operands *ops, struct map_symbol *ms)
+static int loongarch_call__parse(struct arch *arch, struct ins_operands *ops, struct map_symbol *ms,
+ struct disasm_line *dl __maybe_unused)
{
char *c, *endptr, *tok, *name;
struct map *map = ms->map;
@@ -51,7 +52,8 @@ static struct ins_ops loongarch_call_ops = {
.scnprintf = call__scnprintf,
};
-static int loongarch_jump__parse(struct arch *arch, struct ins_operands *ops, struct map_symbol *ms)
+static int loongarch_jump__parse(struct arch *arch, struct ins_operands *ops, struct map_symbol *ms,
+ struct disasm_line *dl __maybe_unused)
{
struct map *map = ms->map;
struct symbol *sym = ms->sym;
diff --git a/tools/perf/arch/powerpc/annotate/instructions.c b/tools/perf/arch/powerpc/annotate/instructions.c
index a3f423c27cae..ede9eeade0ab 100644
--- a/tools/perf/arch/powerpc/annotate/instructions.c
+++ b/tools/perf/arch/powerpc/annotate/instructions.c
@@ -49,12 +49,266 @@ static struct ins_ops *powerpc__associate_instruction_ops(struct arch *arch, con
return ops;
}
+#define PPC_OP(op) (((op) >> 26) & 0x3F)
+#define PPC_21_30(R) (((R) >> 1) & 0x3ff)
+#define PPC_22_30(R) (((R) >> 1) & 0x1ff)
+
+struct insn_offset {
+ const char *name;
+ int value;
+};
+
+/*
+ * There are memory instructions with opcode 31 which are
+ * of X Form, Example:
+ * ldx RT,RA,RB
+ * ______________________________________
+ * | 31 | RT | RA | RB | 21 |/|
+ * --------------------------------------
+ * 0 6 11 16 21 30 31
+ *
+ * But all instructions with opcode 31 are not memory.
+ * Example: add RT,RA,RB
+ *
+ * Use bits 21 to 30 to check memory insns with 31 as opcode.
+ * In ins_array below, for ldx instruction:
+ * name => OP_31_XOP_LDX
+ * value => 21
+ */
+
+static struct insn_offset ins_array[] = {
+ { .name = "OP_31_XOP_LXSIWZX", .value = 12, },
+ { .name = "OP_31_XOP_LWARX", .value = 20, },
+ { .name = "OP_31_XOP_LDX", .value = 21, },
+ { .name = "OP_31_XOP_LWZX", .value = 23, },
+ { .name = "OP_31_XOP_LDUX", .value = 53, },
+ { .name = "OP_31_XOP_LWZUX", .value = 55, },
+ { .name = "OP_31_XOP_LXSIWAX", .value = 76, },
+ { .name = "OP_31_XOP_LDARX", .value = 84, },
+ { .name = "OP_31_XOP_LBZX", .value = 87, },
+ { .name = "OP_31_XOP_LVX", .value = 103, },
+ { .name = "OP_31_XOP_LBZUX", .value = 119, },
+ { .name = "OP_31_XOP_STXSIWX", .value = 140, },
+ { .name = "OP_31_XOP_STDX", .value = 149, },
+ { .name = "OP_31_XOP_STWX", .value = 151, },
+ { .name = "OP_31_XOP_STDUX", .value = 181, },
+ { .name = "OP_31_XOP_STWUX", .value = 183, },
+ { .name = "OP_31_XOP_STBX", .value = 215, },
+ { .name = "OP_31_XOP_STVX", .value = 231, },
+ { .name = "OP_31_XOP_STBUX", .value = 247, },
+ { .name = "OP_31_XOP_LHZX", .value = 279, },
+ { .name = "OP_31_XOP_LHZUX", .value = 311, },
+ { .name = "OP_31_XOP_LXVDSX", .value = 332, },
+ { .name = "OP_31_XOP_LWAX", .value = 341, },
+ { .name = "OP_31_XOP_LHAX", .value = 343, },
+ { .name = "OP_31_XOP_LWAUX", .value = 373, },
+ { .name = "OP_31_XOP_LHAUX", .value = 375, },
+ { .name = "OP_31_XOP_STHX", .value = 407, },
+ { .name = "OP_31_XOP_STHUX", .value = 439, },
+ { .name = "OP_31_XOP_LXSSPX", .value = 524, },
+ { .name = "OP_31_XOP_LDBRX", .value = 532, },
+ { .name = "OP_31_XOP_LSWX", .value = 533, },
+ { .name = "OP_31_XOP_LWBRX", .value = 534, },
+ { .name = "OP_31_XOP_LFSUX", .value = 567, },
+ { .name = "OP_31_XOP_LXSDX", .value = 588, },
+ { .name = "OP_31_XOP_LSWI", .value = 597, },
+ { .name = "OP_31_XOP_LFDX", .value = 599, },
+ { .name = "OP_31_XOP_LFDUX", .value = 631, },
+ { .name = "OP_31_XOP_STXSSPX", .value = 652, },
+ { .name = "OP_31_XOP_STDBRX", .value = 660, },
+ { .name = "OP_31_XOP_STXWX", .value = 661, },
+ { .name = "OP_31_XOP_STWBRX", .value = 662, },
+ { .name = "OP_31_XOP_STFSX", .value = 663, },
+ { .name = "OP_31_XOP_STFSUX", .value = 695, },
+ { .name = "OP_31_XOP_STXSDX", .value = 716, },
+ { .name = "OP_31_XOP_STSWI", .value = 725, },
+ { .name = "OP_31_XOP_STFDX", .value = 727, },
+ { .name = "OP_31_XOP_STFDUX", .value = 759, },
+ { .name = "OP_31_XOP_LXVW4X", .value = 780, },
+ { .name = "OP_31_XOP_LHBRX", .value = 790, },
+ { .name = "OP_31_XOP_LXVD2X", .value = 844, },
+ { .name = "OP_31_XOP_LFIWAX", .value = 855, },
+ { .name = "OP_31_XOP_LFIWZX", .value = 887, },
+ { .name = "OP_31_XOP_STXVW4X", .value = 908, },
+ { .name = "OP_31_XOP_STHBRX", .value = 918, },
+ { .name = "OP_31_XOP_STXVD2X", .value = 972, },
+ { .name = "OP_31_XOP_STFIWX", .value = 983, },
+};
+
+/*
+ * Arithmetic instructions which are having opcode as 31.
+ * These instructions are tracked to save the register state
+ * changes. Example:
+ *
+ * lwz r10,264(r3)
+ * add r31, r3, r3
+ * lwz r9, 0(r31)
+ *
+ * Here instruction tracking needs to identify the "add"
+ * instruction and save data type of r3 to r31. If a sample
+ * is hit at next "lwz r9, 0(r31)", by this instruction tracking,
+ * data type of r31 can be resolved.
+ */
+static struct insn_offset arithmetic_ins_op_31[] = {
+ { .name = "SUB_CARRY_XO_FORM", .value = 8, },
+ { .name = "MUL_HDW_XO_FORM1", .value = 9, },
+ { .name = "ADD_CARRY_XO_FORM", .value = 10, },
+ { .name = "MUL_HW_XO_FORM1", .value = 11, },
+ { .name = "SUB_XO_FORM", .value = 40, },
+ { .name = "MUL_HDW_XO_FORM", .value = 73, },
+ { .name = "MUL_HW_XO_FORM", .value = 75, },
+ { .name = "SUB_EXT_XO_FORM", .value = 136, },
+ { .name = "ADD_EXT_XO_FORM", .value = 138, },
+ { .name = "SUB_ZERO_EXT_XO_FORM", .value = 200, },
+ { .name = "ADD_ZERO_EXT_XO_FORM", .value = 202, },
+ { .name = "SUB_EXT_XO_FORM2", .value = 232, },
+ { .name = "MUL_DW_XO_FORM", .value = 233, },
+ { .name = "ADD_EXT_XO_FORM2", .value = 234, },
+ { .name = "MUL_W_XO_FORM", .value = 235, },
+ { .name = "ADD_XO_FORM", .value = 266, },
+ { .name = "DIV_DW_XO_FORM1", .value = 457, },
+ { .name = "DIV_W_XO_FORM1", .value = 459, },
+ { .name = "DIV_DW_XO_FORM", .value = 489, },
+ { .name = "DIV_W_XO_FORM", .value = 491, },
+};
+
+static struct insn_offset arithmetic_two_ops[] = {
+ { .name = "mulli", .value = 7, },
+ { .name = "subfic", .value = 8, },
+ { .name = "addic", .value = 12, },
+ { .name = "addic.", .value = 13, },
+ { .name = "addi", .value = 14, },
+ { .name = "addis", .value = 15, },
+};
+
+static int cmp_offset(const void *a, const void *b)
+{
+ const struct insn_offset *val1 = a;
+ const struct insn_offset *val2 = b;
+
+ return (val1->value - val2->value);
+}
+
+static struct ins_ops *check_ppc_insn(struct disasm_line *dl)
+{
+ int raw_insn = dl->raw.raw_insn;
+ int opcode = PPC_OP(raw_insn);
+ int mem_insn_31 = PPC_21_30(raw_insn);
+ struct insn_offset *ret;
+ struct insn_offset mem_insns_31_opcode = {
+ "OP_31_INSN",
+ mem_insn_31
+ };
+ char name_insn[32];
+
+ /*
+ * Instructions with opcode 32 to 63 are memory
+ * instructions in powerpc
+ */
+ if ((opcode & 0x20)) {
+ /*
+ * Set name in case of raw instruction to
+ * opcode to be used in insn-stat
+ */
+ if (!strlen(dl->ins.name)) {
+ sprintf(name_insn, "%d", opcode);
+ dl->ins.name = strdup(name_insn);
+ }
+ return &load_store_ops;
+ } else if (opcode == 31) {
+ /* Check for memory instructions with opcode 31 */
+ ret = bsearch(&mem_insns_31_opcode, ins_array, ARRAY_SIZE(ins_array), sizeof(ins_array[0]), cmp_offset);
+ if (ret) {
+ if (!strlen(dl->ins.name))
+ dl->ins.name = strdup(ret->name);
+ return &load_store_ops;
+ } else {
+ mem_insns_31_opcode.value = PPC_22_30(raw_insn);
+ ret = bsearch(&mem_insns_31_opcode, arithmetic_ins_op_31, ARRAY_SIZE(arithmetic_ins_op_31),
+ sizeof(arithmetic_ins_op_31[0]), cmp_offset);
+ if (ret != NULL)
+ return &arithmetic_ops;
+ /* Bits 21 to 30 has value 444 for "mr" insn ie, OR X form */
+ if (PPC_21_30(raw_insn) == 444)
+ return &arithmetic_ops;
+ }
+ } else {
+ mem_insns_31_opcode.value = opcode;
+ ret = bsearch(&mem_insns_31_opcode, arithmetic_two_ops, ARRAY_SIZE(arithmetic_two_ops),
+ sizeof(arithmetic_two_ops[0]), cmp_offset);
+ if (ret != NULL)
+ return &arithmetic_ops;
+ }
+
+ return NULL;
+}
+
+/*
+ * Instruction tracking function to track register state moves.
+ * Example sequence:
+ * ld r10,264(r3)
+ * mr r31,r3
+ * <<after some sequence>
+ * ld r9,312(r31)
+ *
+ * Previous instruction sequence shows that register state of r3
+ * is moved to r31. update_insn_state_powerpc tracks these state
+ * changes
+ */
+#ifdef HAVE_DWARF_SUPPORT
+static void update_insn_state_powerpc(struct type_state *state,
+ struct data_loc_info *dloc, Dwarf_Die * cu_die __maybe_unused,
+ struct disasm_line *dl)
+{
+ struct annotated_insn_loc loc;
+ struct annotated_op_loc *src = &loc.ops[INSN_OP_SOURCE];
+ struct annotated_op_loc *dst = &loc.ops[INSN_OP_TARGET];
+ struct type_state_reg *tsr;
+ u32 insn_offset = dl->al.offset;
+
+ if (annotate_get_insn_location(dloc->arch, dl, &loc) < 0)
+ return;
+
+ /*
+ * Value 444 for bits 21:30 is for "mr"
+ * instruction. "mr" is extended OR. So set the
+ * source and destination reg correctly
+ */
+ if (PPC_21_30(dl->raw.raw_insn) == 444) {
+ int src_reg = src->reg1;
+
+ src->reg1 = dst->reg1;
+ dst->reg1 = src_reg;
+ }
+
+ if (!has_reg_type(state, dst->reg1))
+ return;
+
+ tsr = &state->regs[dst->reg1];
+
+ if (!has_reg_type(state, src->reg1) ||
+ !state->regs[src->reg1].ok) {
+ tsr->ok = false;
+ return;
+ }
+
+ tsr->type = state->regs[src->reg1].type;
+ tsr->kind = state->regs[src->reg1].kind;
+ tsr->ok = true;
+
+ pr_debug_dtp("mov [%x] reg%d -> reg%d",
+ insn_offset, src->reg1, dst->reg1);
+ pr_debug_type_name(&tsr->type, tsr->kind);
+}
+#endif /* HAVE_DWARF_SUPPORT */
+
static int powerpc__annotate_init(struct arch *arch, char *cpuid __maybe_unused)
{
if (!arch->initialized) {
arch->initialized = true;
arch->associate_instruction_ops = powerpc__associate_instruction_ops;
arch->objdump.comment_char = '#';
+ annotate_opts.show_asm_raw = true;
}
return 0;
diff --git a/tools/perf/arch/powerpc/util/dwarf-regs.c b/tools/perf/arch/powerpc/util/dwarf-regs.c
index 0c4f4caf53ac..104c7ae5c433 100644
--- a/tools/perf/arch/powerpc/util/dwarf-regs.c
+++ b/tools/perf/arch/powerpc/util/dwarf-regs.c
@@ -98,3 +98,56 @@ int regs_query_register_offset(const char *name)
return roff->ptregs_offset;
return -EINVAL;
}
+
+#define PPC_OP(op) (((op) >> 26) & 0x3F)
+#define PPC_RA(a) (((a) >> 16) & 0x1f)
+#define PPC_RT(t) (((t) >> 21) & 0x1f)
+#define PPC_RB(b) (((b) >> 11) & 0x1f)
+#define PPC_D(D) ((D) & 0xfffe)
+#define PPC_DS(DS) ((DS) & 0xfffc)
+#define OP_LD 58
+#define OP_STD 62
+
+static int get_source_reg(u32 raw_insn)
+{
+ return PPC_RA(raw_insn);
+}
+
+static int get_target_reg(u32 raw_insn)
+{
+ return PPC_RT(raw_insn);
+}
+
+static int get_offset_opcode(u32 raw_insn)
+{
+ int opcode = PPC_OP(raw_insn);
+
+ /* DS- form */
+ if ((opcode == OP_LD) || (opcode == OP_STD))
+ return PPC_DS(raw_insn);
+ else
+ return PPC_D(raw_insn);
+}
+
+/*
+ * Fills the required fields for op_loc depending on if it
+ * is a source or target.
+ * D form: ins RT,D(RA) -> src_reg1 = RA, offset = D, dst_reg1 = RT
+ * DS form: ins RT,DS(RA) -> src_reg1 = RA, offset = DS, dst_reg1 = RT
+ * X form: ins RT,RA,RB -> src_reg1 = RA, src_reg2 = RB, dst_reg1 = RT
+ */
+void get_powerpc_regs(u32 raw_insn, int is_source,
+ struct annotated_op_loc *op_loc)
+{
+ if (is_source)
+ op_loc->reg1 = get_source_reg(raw_insn);
+ else
+ op_loc->reg1 = get_target_reg(raw_insn);
+
+ if (op_loc->multi_regs)
+ op_loc->reg2 = PPC_RB(raw_insn);
+
+ /* TODO: Implement offset handling for X Form */
+ if ((op_loc->mem_ref) && (PPC_OP(raw_insn) != 31))
+ op_loc->offset = get_offset_opcode(raw_insn);
+}
diff --git a/tools/perf/arch/s390/annotate/instructions.c b/tools/perf/arch/s390/annotate/instructions.c
index da5aa3e1f04c..eeac25cca699 100644
--- a/tools/perf/arch/s390/annotate/instructions.c
+++ b/tools/perf/arch/s390/annotate/instructions.c
@@ -2,7 +2,7 @@
#include <linux/compiler.h>
static int s390_call__parse(struct arch *arch, struct ins_operands *ops,
- struct map_symbol *ms)
+ struct map_symbol *ms, struct disasm_line *dl __maybe_unused)
{
char *endptr, *tok, *name;
struct map *map = ms->map;
@@ -52,7 +52,8 @@ static struct ins_ops s390_call_ops = {
static int s390_mov__parse(struct arch *arch __maybe_unused,
struct ins_operands *ops,
- struct map_symbol *ms __maybe_unused)
+ struct map_symbol *ms __maybe_unused,
+ struct disasm_line *dl __maybe_unused)
{
char *s = strchr(ops->raw, ','), *target, *endptr;
diff --git a/tools/perf/arch/x86/Makefile b/tools/perf/arch/x86/Makefile
index 8952e00f9b60..67b4969a6738 100644
--- a/tools/perf/arch/x86/Makefile
+++ b/tools/perf/arch/x86/Makefile
@@ -13,6 +13,7 @@ PERF_HAVE_JITDUMP := 1
generated := $(OUTPUT)arch/x86/include/generated
out := $(generated)/asm
header := $(out)/syscalls_64.c
+header_32 := $(out)/syscalls_32.c
sys := $(srctree)/tools/perf/arch/x86/entry/syscalls
systbl := $(sys)/syscalltbl.sh
@@ -22,7 +23,10 @@ $(shell [ -d '$(out)' ] || mkdir -p '$(out)')
$(header): $(sys)/syscall_64.tbl $(systbl)
$(Q)$(SHELL) '$(systbl)' $(sys)/syscall_64.tbl 'x86_64' > $@
+$(header_32): $(sys)/syscall_32.tbl $(systbl)
+ $(Q)$(SHELL) '$(systbl)' $(sys)/syscall_32.tbl 'x86' > $@
+
clean::
$(call QUIET_CLEAN, x86) $(RM) -r $(header) $(generated)
-archheaders: $(header)
+archheaders: $(header) $(header_32)
diff --git a/tools/perf/arch/x86/annotate/instructions.c b/tools/perf/arch/x86/annotate/instructions.c
index 5cdf457f5cbe..5caf5a17f03d 100644
--- a/tools/perf/arch/x86/annotate/instructions.c
+++ b/tools/perf/arch/x86/annotate/instructions.c
@@ -206,3 +206,392 @@ static int x86__annotate_init(struct arch *arch, char *cpuid)
arch->initialized = true;
return err;
}
+
+#ifdef HAVE_DWARF_SUPPORT
+static void update_insn_state_x86(struct type_state *state,
+ struct data_loc_info *dloc, Dwarf_Die *cu_die,
+ struct disasm_line *dl)
+{
+ struct annotated_insn_loc loc;
+ struct annotated_op_loc *src = &loc.ops[INSN_OP_SOURCE];
+ struct annotated_op_loc *dst = &loc.ops[INSN_OP_TARGET];
+ struct type_state_reg *tsr;
+ Dwarf_Die type_die;
+ u32 insn_offset = dl->al.offset;
+ int fbreg = dloc->fbreg;
+ int fboff = 0;
+
+ if (annotate_get_insn_location(dloc->arch, dl, &loc) < 0)
+ return;
+
+ if (ins__is_call(&dl->ins)) {
+ struct symbol *func = dl->ops.target.sym;
+
+ if (func == NULL)
+ return;
+
+ /* __fentry__ will preserve all registers */
+ if (!strcmp(func->name, "__fentry__"))
+ return;
+
+ pr_debug_dtp("call [%x] %s\n", insn_offset, func->name);
+
+ /* Otherwise invalidate caller-saved registers after call */
+ for (unsigned i = 0; i < ARRAY_SIZE(state->regs); i++) {
+ if (state->regs[i].caller_saved)
+ state->regs[i].ok = false;
+ }
+
+ /* Update register with the return type (if any) */
+ if (die_find_func_rettype(cu_die, func->name, &type_die)) {
+ tsr = &state->regs[state->ret_reg];
+ tsr->type = type_die;
+ tsr->kind = TSR_KIND_TYPE;
+ tsr->ok = true;
+
+ pr_debug_dtp("call [%x] return -> reg%d",
+ insn_offset, state->ret_reg);
+ pr_debug_type_name(&type_die, tsr->kind);
+ }
+ return;
+ }
+
+ if (!strncmp(dl->ins.name, "add", 3)) {
+ u64 imm_value = -1ULL;
+ int offset;
+ const char *var_name = NULL;
+ struct map_symbol *ms = dloc->ms;
+ u64 ip = ms->sym->start + dl->al.offset;
+
+ if (!has_reg_type(state, dst->reg1))
+ return;
+
+ tsr = &state->regs[dst->reg1];
+ tsr->copied_from = -1;
+
+ if (src->imm)
+ imm_value = src->offset;
+ else if (has_reg_type(state, src->reg1) &&
+ state->regs[src->reg1].kind == TSR_KIND_CONST)
+ imm_value = state->regs[src->reg1].imm_value;
+ else if (src->reg1 == DWARF_REG_PC) {
+ u64 var_addr = annotate_calc_pcrel(dloc->ms, ip,
+ src->offset, dl);
+
+ if (get_global_var_info(dloc, var_addr,
+ &var_name, &offset) &&
+ !strcmp(var_name, "this_cpu_off") &&
+ tsr->kind == TSR_KIND_CONST) {
+ tsr->kind = TSR_KIND_PERCPU_BASE;
+ tsr->ok = true;
+ imm_value = tsr->imm_value;
+ }
+ }
+ else
+ return;
+
+ if (tsr->kind != TSR_KIND_PERCPU_BASE)
+ return;
+
+ if (get_global_var_type(cu_die, dloc, ip, imm_value, &offset,
+ &type_die) && offset == 0) {
+ /*
+ * This is not a pointer type, but it should be treated
+ * as a pointer.
+ */
+ tsr->type = type_die;
+ tsr->kind = TSR_KIND_POINTER;
+ tsr->ok = true;
+
+ pr_debug_dtp("add [%x] percpu %#"PRIx64" -> reg%d",
+ insn_offset, imm_value, dst->reg1);
+ pr_debug_type_name(&tsr->type, tsr->kind);
+ }
+ return;
+ }
+
+ if (strncmp(dl->ins.name, "mov", 3))
+ return;
+
+ if (dloc->fb_cfa) {
+ u64 ip = dloc->ms->sym->start + dl->al.offset;
+ u64 pc = map__rip_2objdump(dloc->ms->map, ip);
+
+ if (die_get_cfa(dloc->di->dbg, pc, &fbreg, &fboff) < 0)
+ fbreg = -1;
+ }
+
+ /* Case 1. register to register or segment:offset to register transfers */
+ if (!src->mem_ref && !dst->mem_ref) {
+ if (!has_reg_type(state, dst->reg1))
+ return;
+
+ tsr = &state->regs[dst->reg1];
+ tsr->copied_from = -1;
+
+ if (dso__kernel(map__dso(dloc->ms->map)) &&
+ src->segment == INSN_SEG_X86_GS && src->imm) {
+ u64 ip = dloc->ms->sym->start + dl->al.offset;
+ u64 var_addr;
+ int offset;
+
+ /*
+ * In kernel, %gs points to a per-cpu region for the
+ * current CPU. Access with a constant offset should
+ * be treated as a global variable access.
+ */
+ var_addr = src->offset;
+
+ if (var_addr == 40) {
+ tsr->kind = TSR_KIND_CANARY;
+ tsr->ok = true;
+
+ pr_debug_dtp("mov [%x] stack canary -> reg%d\n",
+ insn_offset, dst->reg1);
+ return;
+ }
+
+ if (!get_global_var_type(cu_die, dloc, ip, var_addr,
+ &offset, &type_die) ||
+ !die_get_member_type(&type_die, offset, &type_die)) {
+ tsr->ok = false;
+ return;
+ }
+
+ tsr->type = type_die;
+ tsr->kind = TSR_KIND_TYPE;
+ tsr->ok = true;
+
+ pr_debug_dtp("mov [%x] this-cpu addr=%#"PRIx64" -> reg%d",
+ insn_offset, var_addr, dst->reg1);
+ pr_debug_type_name(&tsr->type, tsr->kind);
+ return;
+ }
+
+ if (src->imm) {
+ tsr->kind = TSR_KIND_CONST;
+ tsr->imm_value = src->offset;
+ tsr->ok = true;
+
+ pr_debug_dtp("mov [%x] imm=%#x -> reg%d\n",
+ insn_offset, tsr->imm_value, dst->reg1);
+ return;
+ }
+
+ if (!has_reg_type(state, src->reg1) ||
+ !state->regs[src->reg1].ok) {
+ tsr->ok = false;
+ return;
+ }
+
+ tsr->type = state->regs[src->reg1].type;
+ tsr->kind = state->regs[src->reg1].kind;
+ tsr->imm_value = state->regs[src->reg1].imm_value;
+ tsr->ok = true;
+
+ /* To copy back the variable type later (hopefully) */
+ if (tsr->kind == TSR_KIND_TYPE)
+ tsr->copied_from = src->reg1;
+
+ pr_debug_dtp("mov [%x] reg%d -> reg%d",
+ insn_offset, src->reg1, dst->reg1);
+ pr_debug_type_name(&tsr->type, tsr->kind);
+ }
+ /* Case 2. memory to register transers */
+ if (src->mem_ref && !dst->mem_ref) {
+ int sreg = src->reg1;
+
+ if (!has_reg_type(state, dst->reg1))
+ return;
+
+ tsr = &state->regs[dst->reg1];
+ tsr->copied_from = -1;
+
+retry:
+ /* Check stack variables with offset */
+ if (sreg == fbreg) {
+ struct type_state_stack *stack;
+ int offset = src->offset - fboff;
+
+ stack = find_stack_state(state, offset);
+ if (stack == NULL) {
+ tsr->ok = false;
+ return;
+ } else if (!stack->compound) {
+ tsr->type = stack->type;
+ tsr->kind = stack->kind;
+ tsr->ok = true;
+ } else if (die_get_member_type(&stack->type,
+ offset - stack->offset,
+ &type_die)) {
+ tsr->type = type_die;
+ tsr->kind = TSR_KIND_TYPE;
+ tsr->ok = true;
+ } else {
+ tsr->ok = false;
+ return;
+ }
+
+ pr_debug_dtp("mov [%x] -%#x(stack) -> reg%d",
+ insn_offset, -offset, dst->reg1);
+ pr_debug_type_name(&tsr->type, tsr->kind);
+ }
+ /* And then dereference the pointer if it has one */
+ else if (has_reg_type(state, sreg) && state->regs[sreg].ok &&
+ state->regs[sreg].kind == TSR_KIND_TYPE &&
+ die_deref_ptr_type(&state->regs[sreg].type,
+ src->offset, &type_die)) {
+ tsr->type = type_die;
+ tsr->kind = TSR_KIND_TYPE;
+ tsr->ok = true;
+
+ pr_debug_dtp("mov [%x] %#x(reg%d) -> reg%d",
+ insn_offset, src->offset, sreg, dst->reg1);
+ pr_debug_type_name(&tsr->type, tsr->kind);
+ }
+ /* Or check if it's a global variable */
+ else if (sreg == DWARF_REG_PC) {
+ struct map_symbol *ms = dloc->ms;
+ u64 ip = ms->sym->start + dl->al.offset;
+ u64 addr;
+ int offset;
+
+ addr = annotate_calc_pcrel(ms, ip, src->offset, dl);
+
+ if (!get_global_var_type(cu_die, dloc, ip, addr, &offset,
+ &type_die) ||
+ !die_get_member_type(&type_die, offset, &type_die)) {
+ tsr->ok = false;
+ return;
+ }
+
+ tsr->type = type_die;
+ tsr->kind = TSR_KIND_TYPE;
+ tsr->ok = true;
+
+ pr_debug_dtp("mov [%x] global addr=%"PRIx64" -> reg%d",
+ insn_offset, addr, dst->reg1);
+ pr_debug_type_name(&type_die, tsr->kind);
+ }
+ /* And check percpu access with base register */
+ else if (has_reg_type(state, sreg) &&
+ state->regs[sreg].kind == TSR_KIND_PERCPU_BASE) {
+ u64 ip = dloc->ms->sym->start + dl->al.offset;
+ u64 var_addr = src->offset;
+ int offset;
+
+ if (src->multi_regs) {
+ int reg2 = (sreg == src->reg1) ? src->reg2 : src->reg1;
+
+ if (has_reg_type(state, reg2) && state->regs[reg2].ok &&
+ state->regs[reg2].kind == TSR_KIND_CONST)
+ var_addr += state->regs[reg2].imm_value;
+ }
+
+ /*
+ * In kernel, %gs points to a per-cpu region for the
+ * current CPU. Access with a constant offset should
+ * be treated as a global variable access.
+ */
+ if (get_global_var_type(cu_die, dloc, ip, var_addr,
+ &offset, &type_die) &&
+ die_get_member_type(&type_die, offset, &type_die)) {
+ tsr->type = type_die;
+ tsr->kind = TSR_KIND_TYPE;
+ tsr->ok = true;
+
+ if (src->multi_regs) {
+ pr_debug_dtp("mov [%x] percpu %#x(reg%d,reg%d) -> reg%d",
+ insn_offset, src->offset, src->reg1,
+ src->reg2, dst->reg1);
+ } else {
+ pr_debug_dtp("mov [%x] percpu %#x(reg%d) -> reg%d",
+ insn_offset, src->offset, sreg, dst->reg1);
+ }
+ pr_debug_type_name(&tsr->type, tsr->kind);
+ } else {
+ tsr->ok = false;
+ }
+ }
+ /* And then dereference the calculated pointer if it has one */
+ else if (has_reg_type(state, sreg) && state->regs[sreg].ok &&
+ state->regs[sreg].kind == TSR_KIND_POINTER &&
+ die_get_member_type(&state->regs[sreg].type,
+ src->offset, &type_die)) {
+ tsr->type = type_die;
+ tsr->kind = TSR_KIND_TYPE;
+ tsr->ok = true;
+
+ pr_debug_dtp("mov [%x] pointer %#x(reg%d) -> reg%d",
+ insn_offset, src->offset, sreg, dst->reg1);
+ pr_debug_type_name(&tsr->type, tsr->kind);
+ }
+ /* Or try another register if any */
+ else if (src->multi_regs && sreg == src->reg1 &&
+ src->reg1 != src->reg2) {
+ sreg = src->reg2;
+ goto retry;
+ }
+ else {
+ int offset;
+ const char *var_name = NULL;
+
+ /* it might be per-cpu variable (in kernel) access */
+ if (src->offset < 0) {
+ if (get_global_var_info(dloc, (s64)src->offset,
+ &var_name, &offset) &&
+ !strcmp(var_name, "__per_cpu_offset")) {
+ tsr->kind = TSR_KIND_PERCPU_BASE;
+ tsr->ok = true;
+
+ pr_debug_dtp("mov [%x] percpu base reg%d\n",
+ insn_offset, dst->reg1);
+ return;
+ }
+ }
+
+ tsr->ok = false;
+ }
+ }
+ /* Case 3. register to memory transfers */
+ if (!src->mem_ref && dst->mem_ref) {
+ if (!has_reg_type(state, src->reg1) ||
+ !state->regs[src->reg1].ok)
+ return;
+
+ /* Check stack variables with offset */
+ if (dst->reg1 == fbreg) {
+ struct type_state_stack *stack;
+ int offset = dst->offset - fboff;
+
+ tsr = &state->regs[src->reg1];
+
+ stack = find_stack_state(state, offset);
+ if (stack) {
+ /*
+ * The source register is likely to hold a type
+ * of member if it's a compound type. Do not
+ * update the stack variable type since we can
+ * get the member type later by using the
+ * die_get_member_type().
+ */
+ if (!stack->compound)
+ set_stack_state(stack, offset, tsr->kind,
+ &tsr->type);
+ } else {
+ findnew_stack_state(state, offset, tsr->kind,
+ &tsr->type);
+ }
+
+ pr_debug_dtp("mov [%x] reg%d -> -%#x(stack)",
+ insn_offset, src->reg1, -offset);
+ pr_debug_type_name(&tsr->type, tsr->kind);
+ }
+ /*
+ * Ignore other transfers since it'd set a value in a struct
+ * and won't change the type.
+ */
+ }
+ /* Case 4. memory to memory transfers (not handled for now) */
+}
+#endif
diff --git a/tools/perf/arch/x86/entry/syscalls/syscall_32.tbl b/tools/perf/arch/x86/entry/syscalls/syscall_32.tbl
new file mode 100644
index 000000000000..534c74b14fab
--- /dev/null
+++ b/tools/perf/arch/x86/entry/syscalls/syscall_32.tbl
@@ -0,0 +1,470 @@
+# SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note
+#
+# 32-bit system call numbers and entry vectors
+#
+# The format is:
+# <number> <abi> <name> <entry point> [<compat entry point> [noreturn]]
+#
+# The __ia32_sys and __ia32_compat_sys stubs are created on-the-fly for
+# sys_*() system calls and compat_sys_*() compat system calls if
+# IA32_EMULATION is defined, and expect struct pt_regs *regs as their only
+# parameter.
+#
+# The abi is always "i386" for this file.
+#
+0 i386 restart_syscall sys_restart_syscall
+1 i386 exit sys_exit - noreturn
+2 i386 fork sys_fork
+3 i386 read sys_read
+4 i386 write sys_write
+5 i386 open sys_open compat_sys_open
+6 i386 close sys_close
+7 i386 waitpid sys_waitpid
+8 i386 creat sys_creat
+9 i386 link sys_link
+10 i386 unlink sys_unlink
+11 i386 execve sys_execve compat_sys_execve
+12 i386 chdir sys_chdir
+13 i386 time sys_time32
+14 i386 mknod sys_mknod
+15 i386 chmod sys_chmod
+16 i386 lchown sys_lchown16
+17 i386 break
+18 i386 oldstat sys_stat
+19 i386 lseek sys_lseek compat_sys_lseek
+20 i386 getpid sys_getpid
+21 i386 mount sys_mount
+22 i386 umount sys_oldumount
+23 i386 setuid sys_setuid16
+24 i386 getuid sys_getuid16
+25 i386 stime sys_stime32
+26 i386 ptrace sys_ptrace compat_sys_ptrace
+27 i386 alarm sys_alarm
+28 i386 oldfstat sys_fstat
+29 i386 pause sys_pause
+30 i386 utime sys_utime32
+31 i386 stty
+32 i386 gtty
+33 i386 access sys_access
+34 i386 nice sys_nice
+35 i386 ftime
+36 i386 sync sys_sync
+37 i386 kill sys_kill
+38 i386 rename sys_rename
+39 i386 mkdir sys_mkdir
+40 i386 rmdir sys_rmdir
+41 i386 dup sys_dup
+42 i386 pipe sys_pipe
+43 i386 times sys_times compat_sys_times
+44 i386 prof
+45 i386 brk sys_brk
+46 i386 setgid sys_setgid16
+47 i386 getgid sys_getgid16
+48 i386 signal sys_signal
+49 i386 geteuid sys_geteuid16
+50 i386 getegid sys_getegid16
+51 i386 acct sys_acct
+52 i386 umount2 sys_umount
+53 i386 lock
+54 i386 ioctl sys_ioctl compat_sys_ioctl
+55 i386 fcntl sys_fcntl compat_sys_fcntl64
+56 i386 mpx
+57 i386 setpgid sys_setpgid
+58 i386 ulimit
+59 i386 oldolduname sys_olduname
+60 i386 umask sys_umask
+61 i386 chroot sys_chroot
+62 i386 ustat sys_ustat compat_sys_ustat
+63 i386 dup2 sys_dup2
+64 i386 getppid sys_getppid
+65 i386 getpgrp sys_getpgrp
+66 i386 setsid sys_setsid
+67 i386 sigaction sys_sigaction compat_sys_sigaction
+68 i386 sgetmask sys_sgetmask
+69 i386 ssetmask sys_ssetmask
+70 i386 setreuid sys_setreuid16
+71 i386 setregid sys_setregid16
+72 i386 sigsuspend sys_sigsuspend
+73 i386 sigpending sys_sigpending compat_sys_sigpending
+74 i386 sethostname sys_sethostname
+75 i386 setrlimit sys_setrlimit compat_sys_setrlimit
+76 i386 getrlimit sys_old_getrlimit compat_sys_old_getrlimit
+77 i386 getrusage sys_getrusage compat_sys_getrusage
+78 i386 gettimeofday sys_gettimeofday compat_sys_gettimeofday
+79 i386 settimeofday sys_settimeofday compat_sys_settimeofday
+80 i386 getgroups sys_getgroups16
+81 i386 setgroups sys_setgroups16
+82 i386 select sys_old_select compat_sys_old_select
+83 i386 symlink sys_symlink
+84 i386 oldlstat sys_lstat
+85 i386 readlink sys_readlink
+86 i386 uselib sys_uselib
+87 i386 swapon sys_swapon
+88 i386 reboot sys_reboot
+89 i386 readdir sys_old_readdir compat_sys_old_readdir
+90 i386 mmap sys_old_mmap compat_sys_ia32_mmap
+91 i386 munmap sys_munmap
+92 i386 truncate sys_truncate compat_sys_truncate
+93 i386 ftruncate sys_ftruncate compat_sys_ftruncate
+94 i386 fchmod sys_fchmod
+95 i386 fchown sys_fchown16
+96 i386 getpriority sys_getpriority
+97 i386 setpriority sys_setpriority
+98 i386 profil
+99 i386 statfs sys_statfs compat_sys_statfs
+100 i386 fstatfs sys_fstatfs compat_sys_fstatfs
+101 i386 ioperm sys_ioperm
+102 i386 socketcall sys_socketcall compat_sys_socketcall
+103 i386 syslog sys_syslog
+104 i386 setitimer sys_setitimer compat_sys_setitimer
+105 i386 getitimer sys_getitimer compat_sys_getitimer
+106 i386 stat sys_newstat compat_sys_newstat
+107 i386 lstat sys_newlstat compat_sys_newlstat
+108 i386 fstat sys_newfstat compat_sys_newfstat
+109 i386 olduname sys_uname
+110 i386 iopl sys_iopl
+111 i386 vhangup sys_vhangup
+112 i386 idle
+113 i386 vm86old sys_vm86old sys_ni_syscall
+114 i386 wait4 sys_wait4 compat_sys_wait4
+115 i386 swapoff sys_swapoff
+116 i386 sysinfo sys_sysinfo compat_sys_sysinfo
+117 i386 ipc sys_ipc compat_sys_ipc
+118 i386 fsync sys_fsync
+119 i386 sigreturn sys_sigreturn compat_sys_sigreturn
+120 i386 clone sys_clone compat_sys_ia32_clone
+121 i386 setdomainname sys_setdomainname
+122 i386 uname sys_newuname
+123 i386 modify_ldt sys_modify_ldt
+124 i386 adjtimex sys_adjtimex_time32
+125 i386 mprotect sys_mprotect
+126 i386 sigprocmask sys_sigprocmask compat_sys_sigprocmask
+127 i386 create_module
+128 i386 init_module sys_init_module
+129 i386 delete_module sys_delete_module
+130 i386 get_kernel_syms
+131 i386 quotactl sys_quotactl
+132 i386 getpgid sys_getpgid
+133 i386 fchdir sys_fchdir
+134 i386 bdflush sys_ni_syscall
+135 i386 sysfs sys_sysfs
+136 i386 personality sys_personality
+137 i386 afs_syscall
+138 i386 setfsuid sys_setfsuid16
+139 i386 setfsgid sys_setfsgid16
+140 i386 _llseek sys_llseek
+141 i386 getdents sys_getdents compat_sys_getdents
+142 i386 _newselect sys_select compat_sys_select
+143 i386 flock sys_flock
+144 i386 msync sys_msync
+145 i386 readv sys_readv
+146 i386 writev sys_writev
+147 i386 getsid sys_getsid
+148 i386 fdatasync sys_fdatasync
+149 i386 _sysctl sys_ni_syscall
+150 i386 mlock sys_mlock
+151 i386 munlock sys_munlock
+152 i386 mlockall sys_mlockall
+153 i386 munlockall sys_munlockall
+154 i386 sched_setparam sys_sched_setparam
+155 i386 sched_getparam sys_sched_getparam
+156 i386 sched_setscheduler sys_sched_setscheduler
+157 i386 sched_getscheduler sys_sched_getscheduler
+158 i386 sched_yield sys_sched_yield
+159 i386 sched_get_priority_max sys_sched_get_priority_max
+160 i386 sched_get_priority_min sys_sched_get_priority_min
+161 i386 sched_rr_get_interval sys_sched_rr_get_interval_time32
+162 i386 nanosleep sys_nanosleep_time32
+163 i386 mremap sys_mremap
+164 i386 setresuid sys_setresuid16
+165 i386 getresuid sys_getresuid16
+166 i386 vm86 sys_vm86 sys_ni_syscall
+167 i386 query_module
+168 i386 poll sys_poll
+169 i386 nfsservctl
+170 i386 setresgid sys_setresgid16
+171 i386 getresgid sys_getresgid16
+172 i386 prctl sys_prctl
+173 i386 rt_sigreturn sys_rt_sigreturn compat_sys_rt_sigreturn
+174 i386 rt_sigaction sys_rt_sigaction compat_sys_rt_sigaction
+175 i386 rt_sigprocmask sys_rt_sigprocmask compat_sys_rt_sigprocmask
+176 i386 rt_sigpending sys_rt_sigpending compat_sys_rt_sigpending
+177 i386 rt_sigtimedwait sys_rt_sigtimedwait_time32 compat_sys_rt_sigtimedwait_time32
+178 i386 rt_sigqueueinfo sys_rt_sigqueueinfo compat_sys_rt_sigqueueinfo
+179 i386 rt_sigsuspend sys_rt_sigsuspend compat_sys_rt_sigsuspend
+180 i386 pread64 sys_ia32_pread64
+181 i386 pwrite64 sys_ia32_pwrite64
+182 i386 chown sys_chown16
+183 i386 getcwd sys_getcwd
+184 i386 capget sys_capget
+185 i386 capset sys_capset
+186 i386 sigaltstack sys_sigaltstack compat_sys_sigaltstack
+187 i386 sendfile sys_sendfile compat_sys_sendfile
+188 i386 getpmsg
+189 i386 putpmsg
+190 i386 vfork sys_vfork
+191 i386 ugetrlimit sys_getrlimit compat_sys_getrlimit
+192 i386 mmap2 sys_mmap_pgoff
+193 i386 truncate64 sys_ia32_truncate64
+194 i386 ftruncate64 sys_ia32_ftruncate64
+195 i386 stat64 sys_stat64 compat_sys_ia32_stat64
+196 i386 lstat64 sys_lstat64 compat_sys_ia32_lstat64
+197 i386 fstat64 sys_fstat64 compat_sys_ia32_fstat64
+198 i386 lchown32 sys_lchown
+199 i386 getuid32 sys_getuid
+200 i386 getgid32 sys_getgid
+201 i386 geteuid32 sys_geteuid
+202 i386 getegid32 sys_getegid
+203 i386 setreuid32 sys_setreuid
+204 i386 setregid32 sys_setregid
+205 i386 getgroups32 sys_getgroups
+206 i386 setgroups32 sys_setgroups
+207 i386 fchown32 sys_fchown
+208 i386 setresuid32 sys_setresuid
+209 i386 getresuid32 sys_getresuid
+210 i386 setresgid32 sys_setresgid
+211 i386 getresgid32 sys_getresgid
+212 i386 chown32 sys_chown
+213 i386 setuid32 sys_setuid
+214 i386 setgid32 sys_setgid
+215 i386 setfsuid32 sys_setfsuid
+216 i386 setfsgid32 sys_setfsgid
+217 i386 pivot_root sys_pivot_root
+218 i386 mincore sys_mincore
+219 i386 madvise sys_madvise
+220 i386 getdents64 sys_getdents64
+221 i386 fcntl64 sys_fcntl64 compat_sys_fcntl64
+# 222 is unused
+# 223 is unused
+224 i386 gettid sys_gettid
+225 i386 readahead sys_ia32_readahead
+226 i386 setxattr sys_setxattr
+227 i386 lsetxattr sys_lsetxattr
+228 i386 fsetxattr sys_fsetxattr
+229 i386 getxattr sys_getxattr
+230 i386 lgetxattr sys_lgetxattr
+231 i386 fgetxattr sys_fgetxattr
+232 i386 listxattr sys_listxattr
+233 i386 llistxattr sys_llistxattr
+234 i386 flistxattr sys_flistxattr
+235 i386 removexattr sys_removexattr
+236 i386 lremovexattr sys_lremovexattr
+237 i386 fremovexattr sys_fremovexattr
+238 i386 tkill sys_tkill
+239 i386 sendfile64 sys_sendfile64
+240 i386 futex sys_futex_time32
+241 i386 sched_setaffinity sys_sched_setaffinity compat_sys_sched_setaffinity
+242 i386 sched_getaffinity sys_sched_getaffinity compat_sys_sched_getaffinity
+243 i386 set_thread_area sys_set_thread_area
+244 i386 get_thread_area sys_get_thread_area
+245 i386 io_setup sys_io_setup compat_sys_io_setup
+246 i386 io_destroy sys_io_destroy
+247 i386 io_getevents sys_io_getevents_time32
+248 i386 io_submit sys_io_submit compat_sys_io_submit
+249 i386 io_cancel sys_io_cancel
+250 i386 fadvise64 sys_ia32_fadvise64
+# 251 is available for reuse (was briefly sys_set_zone_reclaim)
+252 i386 exit_group sys_exit_group - noreturn
+253 i386 lookup_dcookie
+254 i386 epoll_create sys_epoll_create
+255 i386 epoll_ctl sys_epoll_ctl
+256 i386 epoll_wait sys_epoll_wait
+257 i386 remap_file_pages sys_remap_file_pages
+258 i386 set_tid_address sys_set_tid_address
+259 i386 timer_create sys_timer_create compat_sys_timer_create
+260 i386 timer_settime sys_timer_settime32
+261 i386 timer_gettime sys_timer_gettime32
+262 i386 timer_getoverrun sys_timer_getoverrun
+263 i386 timer_delete sys_timer_delete
+264 i386 clock_settime sys_clock_settime32
+265 i386 clock_gettime sys_clock_gettime32
+266 i386 clock_getres sys_clock_getres_time32
+267 i386 clock_nanosleep sys_clock_nanosleep_time32
+268 i386 statfs64 sys_statfs64 compat_sys_statfs64
+269 i386 fstatfs64 sys_fstatfs64 compat_sys_fstatfs64
+270 i386 tgkill sys_tgkill
+271 i386 utimes sys_utimes_time32
+272 i386 fadvise64_64 sys_ia32_fadvise64_64
+273 i386 vserver
+274 i386 mbind sys_mbind
+275 i386 get_mempolicy sys_get_mempolicy
+276 i386 set_mempolicy sys_set_mempolicy
+277 i386 mq_open sys_mq_open compat_sys_mq_open
+278 i386 mq_unlink sys_mq_unlink
+279 i386 mq_timedsend sys_mq_timedsend_time32
+280 i386 mq_timedreceive sys_mq_timedreceive_time32
+281 i386 mq_notify sys_mq_notify compat_sys_mq_notify
+282 i386 mq_getsetattr sys_mq_getsetattr compat_sys_mq_getsetattr
+283 i386 kexec_load sys_kexec_load compat_sys_kexec_load
+284 i386 waitid sys_waitid compat_sys_waitid
+# 285 sys_setaltroot
+286 i386 add_key sys_add_key
+287 i386 request_key sys_request_key
+288 i386 keyctl sys_keyctl compat_sys_keyctl
+289 i386 ioprio_set sys_ioprio_set
+290 i386 ioprio_get sys_ioprio_get
+291 i386 inotify_init sys_inotify_init
+292 i386 inotify_add_watch sys_inotify_add_watch
+293 i386 inotify_rm_watch sys_inotify_rm_watch
+294 i386 migrate_pages sys_migrate_pages
+295 i386 openat sys_openat compat_sys_openat
+296 i386 mkdirat sys_mkdirat
+297 i386 mknodat sys_mknodat
+298 i386 fchownat sys_fchownat
+299 i386 futimesat sys_futimesat_time32
+300 i386 fstatat64 sys_fstatat64 compat_sys_ia32_fstatat64
+301 i386 unlinkat sys_unlinkat
+302 i386 renameat sys_renameat
+303 i386 linkat sys_linkat
+304 i386 symlinkat sys_symlinkat
+305 i386 readlinkat sys_readlinkat
+306 i386 fchmodat sys_fchmodat
+307 i386 faccessat sys_faccessat
+308 i386 pselect6 sys_pselect6_time32 compat_sys_pselect6_time32
+309 i386 ppoll sys_ppoll_time32 compat_sys_ppoll_time32
+310 i386 unshare sys_unshare
+311 i386 set_robust_list sys_set_robust_list compat_sys_set_robust_list
+312 i386 get_robust_list sys_get_robust_list compat_sys_get_robust_list
+313 i386 splice sys_splice
+314 i386 sync_file_range sys_ia32_sync_file_range
+315 i386 tee sys_tee
+316 i386 vmsplice sys_vmsplice
+317 i386 move_pages sys_move_pages
+318 i386 getcpu sys_getcpu
+319 i386 epoll_pwait sys_epoll_pwait
+320 i386 utimensat sys_utimensat_time32
+321 i386 signalfd sys_signalfd compat_sys_signalfd
+322 i386 timerfd_create sys_timerfd_create
+323 i386 eventfd sys_eventfd
+324 i386 fallocate sys_ia32_fallocate
+325 i386 timerfd_settime sys_timerfd_settime32
+326 i386 timerfd_gettime sys_timerfd_gettime32
+327 i386 signalfd4 sys_signalfd4 compat_sys_signalfd4
+328 i386 eventfd2 sys_eventfd2
+329 i386 epoll_create1 sys_epoll_create1
+330 i386 dup3 sys_dup3
+331 i386 pipe2 sys_pipe2
+332 i386 inotify_init1 sys_inotify_init1
+333 i386 preadv sys_preadv compat_sys_preadv
+334 i386 pwritev sys_pwritev compat_sys_pwritev
+335 i386 rt_tgsigqueueinfo sys_rt_tgsigqueueinfo compat_sys_rt_tgsigqueueinfo
+336 i386 perf_event_open sys_perf_event_open
+337 i386 recvmmsg sys_recvmmsg_time32 compat_sys_recvmmsg_time32
+338 i386 fanotify_init sys_fanotify_init
+339 i386 fanotify_mark sys_fanotify_mark compat_sys_fanotify_mark
+340 i386 prlimit64 sys_prlimit64
+341 i386 name_to_handle_at sys_name_to_handle_at
+342 i386 open_by_handle_at sys_open_by_handle_at compat_sys_open_by_handle_at
+343 i386 clock_adjtime sys_clock_adjtime32
+344 i386 syncfs sys_syncfs
+345 i386 sendmmsg sys_sendmmsg compat_sys_sendmmsg
+346 i386 setns sys_setns
+347 i386 process_vm_readv sys_process_vm_readv
+348 i386 process_vm_writev sys_process_vm_writev
+349 i386 kcmp sys_kcmp
+350 i386 finit_module sys_finit_module
+351 i386 sched_setattr sys_sched_setattr
+352 i386 sched_getattr sys_sched_getattr
+353 i386 renameat2 sys_renameat2
+354 i386 seccomp sys_seccomp
+355 i386 getrandom sys_getrandom
+356 i386 memfd_create sys_memfd_create
+357 i386 bpf sys_bpf
+358 i386 execveat sys_execveat compat_sys_execveat
+359 i386 socket sys_socket
+360 i386 socketpair sys_socketpair
+361 i386 bind sys_bind
+362 i386 connect sys_connect
+363 i386 listen sys_listen
+364 i386 accept4 sys_accept4
+365 i386 getsockopt sys_getsockopt sys_getsockopt
+366 i386 setsockopt sys_setsockopt sys_setsockopt
+367 i386 getsockname sys_getsockname
+368 i386 getpeername sys_getpeername
+369 i386 sendto sys_sendto
+370 i386 sendmsg sys_sendmsg compat_sys_sendmsg
+371 i386 recvfrom sys_recvfrom compat_sys_recvfrom
+372 i386 recvmsg sys_recvmsg compat_sys_recvmsg
+373 i386 shutdown sys_shutdown
+374 i386 userfaultfd sys_userfaultfd
+375 i386 membarrier sys_membarrier
+376 i386 mlock2 sys_mlock2
+377 i386 copy_file_range sys_copy_file_range
+378 i386 preadv2 sys_preadv2 compat_sys_preadv2
+379 i386 pwritev2 sys_pwritev2 compat_sys_pwritev2
+380 i386 pkey_mprotect sys_pkey_mprotect
+381 i386 pkey_alloc sys_pkey_alloc
+382 i386 pkey_free sys_pkey_free
+383 i386 statx sys_statx
+384 i386 arch_prctl sys_arch_prctl compat_sys_arch_prctl
+385 i386 io_pgetevents sys_io_pgetevents_time32 compat_sys_io_pgetevents
+386 i386 rseq sys_rseq
+393 i386 semget sys_semget
+394 i386 semctl sys_semctl compat_sys_semctl
+395 i386 shmget sys_shmget
+396 i386 shmctl sys_shmctl compat_sys_shmctl
+397 i386 shmat sys_shmat compat_sys_shmat
+398 i386 shmdt sys_shmdt
+399 i386 msgget sys_msgget
+400 i386 msgsnd sys_msgsnd compat_sys_msgsnd
+401 i386 msgrcv sys_msgrcv compat_sys_msgrcv
+402 i386 msgctl sys_msgctl compat_sys_msgctl
+403 i386 clock_gettime64 sys_clock_gettime
+404 i386 clock_settime64 sys_clock_settime
+405 i386 clock_adjtime64 sys_clock_adjtime
+406 i386 clock_getres_time64 sys_clock_getres
+407 i386 clock_nanosleep_time64 sys_clock_nanosleep
+408 i386 timer_gettime64 sys_timer_gettime
+409 i386 timer_settime64 sys_timer_settime
+410 i386 timerfd_gettime64 sys_timerfd_gettime
+411 i386 timerfd_settime64 sys_timerfd_settime
+412 i386 utimensat_time64 sys_utimensat
+413 i386 pselect6_time64 sys_pselect6 compat_sys_pselect6_time64
+414 i386 ppoll_time64 sys_ppoll compat_sys_ppoll_time64
+416 i386 io_pgetevents_time64 sys_io_pgetevents compat_sys_io_pgetevents_time64
+417 i386 recvmmsg_time64 sys_recvmmsg compat_sys_recvmmsg_time64
+418 i386 mq_timedsend_time64 sys_mq_timedsend
+419 i386 mq_timedreceive_time64 sys_mq_timedreceive
+420 i386 semtimedop_time64 sys_semtimedop
+421 i386 rt_sigtimedwait_time64 sys_rt_sigtimedwait compat_sys_rt_sigtimedwait_time64
+422 i386 futex_time64 sys_futex
+423 i386 sched_rr_get_interval_time64 sys_sched_rr_get_interval
+424 i386 pidfd_send_signal sys_pidfd_send_signal
+425 i386 io_uring_setup sys_io_uring_setup
+426 i386 io_uring_enter sys_io_uring_enter
+427 i386 io_uring_register sys_io_uring_register
+428 i386 open_tree sys_open_tree
+429 i386 move_mount sys_move_mount
+430 i386 fsopen sys_fsopen
+431 i386 fsconfig sys_fsconfig
+432 i386 fsmount sys_fsmount
+433 i386 fspick sys_fspick
+434 i386 pidfd_open sys_pidfd_open
+435 i386 clone3 sys_clone3
+436 i386 close_range sys_close_range
+437 i386 openat2 sys_openat2
+438 i386 pidfd_getfd sys_pidfd_getfd
+439 i386 faccessat2 sys_faccessat2
+440 i386 process_madvise sys_process_madvise
+441 i386 epoll_pwait2 sys_epoll_pwait2 compat_sys_epoll_pwait2
+442 i386 mount_setattr sys_mount_setattr
+443 i386 quotactl_fd sys_quotactl_fd
+444 i386 landlock_create_ruleset sys_landlock_create_ruleset
+445 i386 landlock_add_rule sys_landlock_add_rule
+446 i386 landlock_restrict_self sys_landlock_restrict_self
+447 i386 memfd_secret sys_memfd_secret
+448 i386 process_mrelease sys_process_mrelease
+449 i386 futex_waitv sys_futex_waitv
+450 i386 set_mempolicy_home_node sys_set_mempolicy_home_node
+451 i386 cachestat sys_cachestat
+452 i386 fchmodat2 sys_fchmodat2
+453 i386 map_shadow_stack sys_map_shadow_stack
+454 i386 futex_wake sys_futex_wake
+455 i386 futex_wait sys_futex_wait
+456 i386 futex_requeue sys_futex_requeue
+457 i386 statmount sys_statmount
+458 i386 listmount sys_listmount
+459 i386 lsm_get_self_attr sys_lsm_get_self_attr
+460 i386 lsm_set_self_attr sys_lsm_set_self_attr
+461 i386 lsm_list_modules sys_lsm_list_modules
+462 i386 mseal sys_mseal
diff --git a/tools/perf/arch/x86/util/event.c b/tools/perf/arch/x86/util/event.c
index e65b7dbe27fb..a0400707180c 100644
--- a/tools/perf/arch/x86/util/event.c
+++ b/tools/perf/arch/x86/util/event.c
@@ -15,7 +15,7 @@
#if defined(__x86_64__)
struct perf_event__synthesize_extra_kmaps_cb_args {
- struct perf_tool *tool;
+ const struct perf_tool *tool;
perf_event__handler_t process;
struct machine *machine;
union perf_event *event;
@@ -65,7 +65,7 @@ static int perf_event__synthesize_extra_kmaps_cb(struct map *map, void *data)
return 0;
}
-int perf_event__synthesize_extra_kmaps(struct perf_tool *tool,
+int perf_event__synthesize_extra_kmaps(const struct perf_tool *tool,
perf_event__handler_t process,
struct machine *machine)
{
diff --git a/tools/perf/arch/x86/util/evlist.c b/tools/perf/arch/x86/util/evlist.c
index b1ce0c52d88d..cebdd483149e 100644
--- a/tools/perf/arch/x86/util/evlist.c
+++ b/tools/perf/arch/x86/util/evlist.c
@@ -89,6 +89,12 @@ int arch_evlist__cmp(const struct evsel *lhs, const struct evsel *rhs)
return 1;
}
+ /* Retire latency event should not be group leader*/
+ if (lhs->retire_lat && !rhs->retire_lat)
+ return 1;
+ if (!lhs->retire_lat && rhs->retire_lat)
+ return -1;
+
/* Default ordering by insertion index. */
return lhs->core.idx - rhs->core.idx;
}
diff --git a/tools/perf/arch/x86/util/intel-bts.c b/tools/perf/arch/x86/util/intel-bts.c
index 34696f3d3d5d..85c8186300c8 100644
--- a/tools/perf/arch/x86/util/intel-bts.c
+++ b/tools/perf/arch/x86/util/intel-bts.c
@@ -434,7 +434,6 @@ struct auxtrace_record *intel_bts_recording_init(int *err)
}
btsr->intel_bts_pmu = intel_bts_pmu;
- btsr->itr.pmu = intel_bts_pmu;
btsr->itr.recording_options = intel_bts_recording_options;
btsr->itr.info_priv_size = intel_bts_info_priv_size;
btsr->itr.info_fill = intel_bts_info_fill;
diff --git a/tools/perf/arch/x86/util/intel-pt.c b/tools/perf/arch/x86/util/intel-pt.c
index 4b710e875953..ea510a7486b1 100644
--- a/tools/perf/arch/x86/util/intel-pt.c
+++ b/tools/perf/arch/x86/util/intel-pt.c
@@ -1197,7 +1197,6 @@ struct auxtrace_record *intel_pt_recording_init(int *err)
}
ptr->intel_pt_pmu = intel_pt_pmu;
- ptr->itr.pmu = intel_pt_pmu;
ptr->itr.recording_options = intel_pt_recording_options;
ptr->itr.info_priv_size = intel_pt_info_priv_size;
ptr->itr.info_fill = intel_pt_info_fill;
diff --git a/tools/perf/bench/synthesize.c b/tools/perf/bench/synthesize.c
index 7401ebbac100..9b333276cbdb 100644
--- a/tools/perf/bench/synthesize.c
+++ b/tools/perf/bench/synthesize.c
@@ -49,7 +49,7 @@ static const char *const bench_usage[] = {
static atomic_t event_count;
-static int process_synthesized_event(struct perf_tool *tool __maybe_unused,
+static int process_synthesized_event(const struct perf_tool *tool __maybe_unused,
union perf_event *event __maybe_unused,
struct perf_sample *sample __maybe_unused,
struct machine *machine __maybe_unused)
diff --git a/tools/perf/builtin-annotate.c b/tools/perf/builtin-annotate.c
index b10b7f005658..3dc6197ef3fa 100644
--- a/tools/perf/builtin-annotate.c
+++ b/tools/perf/builtin-annotate.c
@@ -221,7 +221,8 @@ static int process_branch_callback(struct evsel *evsel,
if (a.map != NULL)
dso__set_hit(map__dso(a.map));
- hist__account_cycles(sample->branch_stack, al, sample, false, NULL);
+ hist__account_cycles(sample->branch_stack, al, sample, false,
+ NULL, evsel);
ret = hist_entry_iter__add(&iter, &a, PERF_MAX_STACK_DEPTH, ann);
out:
@@ -279,7 +280,7 @@ static int evsel__add_sample(struct evsel *evsel, struct perf_sample *sample,
return ret;
}
-static int process_sample_event(struct perf_tool *tool,
+static int process_sample_event(const struct perf_tool *tool,
union perf_event *event,
struct perf_sample *sample,
struct evsel *evsel,
@@ -396,10 +397,10 @@ static void print_annotate_item_stat(struct list_head *head, const char *title)
printf("total %d, ok %d (%.1f%%), bad %d (%.1f%%)\n\n", total,
total_good, 100.0 * total_good / (total ?: 1),
total_bad, 100.0 * total_bad / (total ?: 1));
- printf(" %-10s: %5s %5s\n", "Name", "Good", "Bad");
+ printf(" %-20s: %5s %5s\n", "Name/opcode", "Good", "Bad");
printf("-----------------------------------------------------------\n");
list_for_each_entry(istat, head, list)
- printf(" %-10s: %5d %5d\n", istat->name, istat->good, istat->bad);
+ printf(" %-20s: %5d %5d\n", istat->name, istat->good, istat->bad);
printf("\n");
}
@@ -632,13 +633,23 @@ static int __cmd_annotate(struct perf_annotate *ann)
evlist__for_each_entry(session->evlist, pos) {
struct hists *hists = evsel__hists(pos);
u32 nr_samples = hists->stats.nr_samples;
+ struct ui_progress prog;
+ struct evsel *evsel;
- if (nr_samples == 0)
+ if (!symbol_conf.event_group || !evsel__is_group_leader(pos))
continue;
- if (!symbol_conf.event_group || !evsel__is_group_leader(pos))
+ for_each_group_member(evsel, pos)
+ nr_samples += evsel__hists(evsel)->stats.nr_samples;
+
+ if (nr_samples == 0)
continue;
+ ui_progress__init(&prog, nr_samples,
+ "Sorting group events for output...");
+ evsel__output_resort(pos, &prog);
+ ui_progress__finish();
+
hists__find_annotations(hists, pos, ann);
}
@@ -686,28 +697,7 @@ static const char * const annotate_usage[] = {
int cmd_annotate(int argc, const char **argv)
{
- struct perf_annotate annotate = {
- .tool = {
- .sample = process_sample_event,
- .mmap = perf_event__process_mmap,
- .mmap2 = perf_event__process_mmap2,
- .comm = perf_event__process_comm,
- .exit = perf_event__process_exit,
- .fork = perf_event__process_fork,
- .namespaces = perf_event__process_namespaces,
- .attr = perf_event__process_attr,
- .build_id = perf_event__process_build_id,
-#ifdef HAVE_LIBTRACEEVENT
- .tracing_data = perf_event__process_tracing_data,
-#endif
- .id_index = perf_event__process_id_index,
- .auxtrace_info = perf_event__process_auxtrace_info,
- .auxtrace = perf_event__process_auxtrace,
- .feature = process_feature_event,
- .ordered_events = true,
- .ordering_requires_timestamps = true,
- },
- };
+ struct perf_annotate annotate = {};
struct perf_data data = {
.mode = PERF_DATA_MODE_READ,
};
@@ -795,6 +785,8 @@ int cmd_annotate(int argc, const char **argv)
"Show stats for the data type annotation"),
OPT_BOOLEAN(0, "insn-stat", &annotate.insn_stat,
"Show instruction stats for the data type annotation"),
+ OPT_BOOLEAN(0, "skip-empty", &symbol_conf.skip_empty,
+ "Do not display empty (or dummy) events in the output"),
OPT_END()
};
int ret;
@@ -864,6 +856,25 @@ int cmd_annotate(int argc, const char **argv)
data.path = input_name;
+ perf_tool__init(&annotate.tool, /*ordered_events=*/true);
+ annotate.tool.sample = process_sample_event;
+ annotate.tool.mmap = perf_event__process_mmap;
+ annotate.tool.mmap2 = perf_event__process_mmap2;
+ annotate.tool.comm = perf_event__process_comm;
+ annotate.tool.exit = perf_event__process_exit;
+ annotate.tool.fork = perf_event__process_fork;
+ annotate.tool.namespaces = perf_event__process_namespaces;
+ annotate.tool.attr = perf_event__process_attr;
+ annotate.tool.build_id = perf_event__process_build_id;
+#ifdef HAVE_LIBTRACEEVENT
+ annotate.tool.tracing_data = perf_event__process_tracing_data;
+#endif
+ annotate.tool.id_index = perf_event__process_id_index;
+ annotate.tool.auxtrace_info = perf_event__process_auxtrace_info;
+ annotate.tool.auxtrace = perf_event__process_auxtrace;
+ annotate.tool.feature = process_feature_event;
+ annotate.tool.ordering_requires_timestamps = true;
+
annotate.session = perf_session__new(&data, &annotate.tool);
if (IS_ERR(annotate.session))
return PTR_ERR(annotate.session);
@@ -916,11 +927,15 @@ int cmd_annotate(int argc, const char **argv)
sort_order = "dso,symbol";
/*
- * Set SORT_MODE__BRANCH so that annotate display IPC/Cycle
- * if branch info is in perf data in TUI mode.
+ * Set SORT_MODE__BRANCH so that annotate displays IPC/Cycle and
+ * branch counters, if the corresponding branch info is available
+ * in the perf data in the TUI mode.
*/
- if ((use_browser == 1 || annotate.use_stdio2) && annotate.has_br_stack)
+ if ((use_browser == 1 || annotate.use_stdio2) && annotate.has_br_stack) {
sort__mode = SORT_MODE__BRANCH;
+ if (annotate.session->evlist->nr_br_cntr > 0)
+ annotate_opts.show_br_cntr = true;
+ }
if (setup_sorting(NULL) < 0)
usage_with_options(annotate_usage, options);
diff --git a/tools/perf/builtin-buildid-list.c b/tools/perf/builtin-buildid-list.c
index 383d5de36ce4..52dfacaff8e3 100644
--- a/tools/perf/builtin-buildid-list.c
+++ b/tools/perf/builtin-buildid-list.c
@@ -89,6 +89,7 @@ static int perf_session__list_build_ids(bool force, bool with_hits)
.mode = PERF_DATA_MODE_READ,
.force = force,
};
+ struct perf_tool build_id__mark_dso_hit_ops;
symbol__elf_init();
/*
@@ -97,6 +98,15 @@ static int perf_session__list_build_ids(bool force, bool with_hits)
if (filename__fprintf_build_id(input_name, stdout) > 0)
goto out;
+ perf_tool__init(&build_id__mark_dso_hit_ops, /*ordered_events=*/true);
+ build_id__mark_dso_hit_ops.sample = build_id__mark_dso_hit;
+ build_id__mark_dso_hit_ops.mmap = perf_event__process_mmap;
+ build_id__mark_dso_hit_ops.mmap2 = perf_event__process_mmap2;
+ build_id__mark_dso_hit_ops.fork = perf_event__process_fork;
+ build_id__mark_dso_hit_ops.exit = perf_event__exit_del_thread;
+ build_id__mark_dso_hit_ops.attr = perf_event__process_attr;
+ build_id__mark_dso_hit_ops.build_id = perf_event__process_build_id;
+
session = perf_session__new(&data, &build_id__mark_dso_hit_ops);
if (IS_ERR(session))
return PTR_ERR(session);
diff --git a/tools/perf/builtin-c2c.c b/tools/perf/builtin-c2c.c
index c157bd31f2e5..15e1fce71c72 100644
--- a/tools/perf/builtin-c2c.c
+++ b/tools/perf/builtin-c2c.c
@@ -273,7 +273,7 @@ static void compute_stats(struct c2c_hist_entry *c2c_he,
update_stats(&cstats->load, weight);
}
-static int process_sample_event(struct perf_tool *tool __maybe_unused,
+static int process_sample_event(const struct perf_tool *tool __maybe_unused,
union perf_event *event,
struct perf_sample *sample,
struct evsel *evsel,
@@ -385,24 +385,6 @@ free_mi:
goto out;
}
-static struct perf_c2c c2c = {
- .tool = {
- .sample = process_sample_event,
- .mmap = perf_event__process_mmap,
- .mmap2 = perf_event__process_mmap2,
- .comm = perf_event__process_comm,
- .exit = perf_event__process_exit,
- .fork = perf_event__process_fork,
- .lost = perf_event__process_lost,
- .attr = perf_event__process_attr,
- .auxtrace_info = perf_event__process_auxtrace_info,
- .auxtrace = perf_event__process_auxtrace,
- .auxtrace_error = perf_event__process_auxtrace_error,
- .ordered_events = true,
- .ordering_requires_timestamps = true,
- },
-};
-
static const char * const c2c_usage[] = {
"perf c2c {record|report}",
NULL
@@ -3070,6 +3052,19 @@ static int perf_c2c__report(int argc, const char **argv)
data.path = input_name;
data.force = symbol_conf.force;
+ perf_tool__init(&c2c.tool, /*ordered_events=*/true);
+ c2c.tool.sample = process_sample_event;
+ c2c.tool.mmap = perf_event__process_mmap;
+ c2c.tool.mmap2 = perf_event__process_mmap2;
+ c2c.tool.comm = perf_event__process_comm;
+ c2c.tool.exit = perf_event__process_exit;
+ c2c.tool.fork = perf_event__process_fork;
+ c2c.tool.lost = perf_event__process_lost;
+ c2c.tool.attr = perf_event__process_attr;
+ c2c.tool.auxtrace_info = perf_event__process_auxtrace_info;
+ c2c.tool.auxtrace = perf_event__process_auxtrace;
+ c2c.tool.auxtrace_error = perf_event__process_auxtrace_error;
+ c2c.tool.ordering_requires_timestamps = true;
session = perf_session__new(&data, &c2c.tool);
if (IS_ERR(session)) {
err = PTR_ERR(session);
@@ -3266,7 +3261,7 @@ static int perf_c2c__record(int argc, const char **argv)
return -1;
}
- if (perf_pmu__mem_events_init(pmu)) {
+ if (perf_pmu__mem_events_init()) {
pr_err("failed: memory events not supported\n");
return -1;
}
@@ -3290,19 +3285,15 @@ static int perf_c2c__record(int argc, const char **argv)
* PERF_MEM_EVENTS__LOAD_STORE if it is supported.
*/
if (e->tag) {
- e->record = true;
+ perf_mem_record[PERF_MEM_EVENTS__LOAD_STORE] = true;
rec_argv[i++] = "-W";
} else {
- e = perf_pmu__mem_events_ptr(pmu, PERF_MEM_EVENTS__LOAD);
- e->record = true;
-
- e = perf_pmu__mem_events_ptr(pmu, PERF_MEM_EVENTS__STORE);
- e->record = true;
+ perf_mem_record[PERF_MEM_EVENTS__LOAD] = true;
+ perf_mem_record[PERF_MEM_EVENTS__STORE] = true;
}
}
- e = perf_pmu__mem_events_ptr(pmu, PERF_MEM_EVENTS__LOAD);
- if (e->record)
+ if (perf_mem_record[PERF_MEM_EVENTS__LOAD])
rec_argv[i++] = "-W";
rec_argv[i++] = "-d";
diff --git a/tools/perf/builtin-check.c b/tools/perf/builtin-check.c
new file mode 100644
index 000000000000..0b76b6e42b78
--- /dev/null
+++ b/tools/perf/builtin-check.c
@@ -0,0 +1,180 @@
+// SPDX-License-Identifier: GPL-2.0
+#include "builtin.h"
+#include "color.h"
+#include "util/debug.h"
+#include "util/header.h"
+#include <tools/config.h>
+#include <stdbool.h>
+#include <stdio.h>
+#include <string.h>
+#include <subcmd/parse-options.h>
+
+static const char * const check_subcommands[] = { "feature", NULL };
+static struct option check_options[] = {
+ OPT_BOOLEAN('q', "quiet", &quiet, "do not show any warnings or messages"),
+ OPT_END()
+};
+static struct option check_feature_options[] = { OPT_PARENT(check_options) };
+
+static const char *check_usage[] = { NULL, NULL };
+static const char *check_feature_usage[] = {
+ "perf check feature <feature_list>",
+ NULL
+};
+
+struct feature_status supported_features[] = {
+ FEATURE_STATUS("aio", HAVE_AIO_SUPPORT),
+ FEATURE_STATUS("bpf", HAVE_LIBBPF_SUPPORT),
+ FEATURE_STATUS("bpf_skeletons", HAVE_BPF_SKEL),
+ FEATURE_STATUS("debuginfod", HAVE_DEBUGINFOD_SUPPORT),
+ FEATURE_STATUS("dwarf", HAVE_DWARF_SUPPORT),
+ FEATURE_STATUS("dwarf_getlocations", HAVE_DWARF_GETLOCATIONS_SUPPORT),
+ FEATURE_STATUS("dwarf-unwind", HAVE_DWARF_UNWIND_SUPPORT),
+ FEATURE_STATUS("auxtrace", HAVE_AUXTRACE_SUPPORT),
+ FEATURE_STATUS("libaudit", HAVE_LIBAUDIT_SUPPORT),
+ FEATURE_STATUS("libbfd", HAVE_LIBBFD_SUPPORT),
+ FEATURE_STATUS("libcapstone", HAVE_LIBCAPSTONE_SUPPORT),
+ FEATURE_STATUS("libcrypto", HAVE_LIBCRYPTO_SUPPORT),
+ FEATURE_STATUS("libdw-dwarf-unwind", HAVE_DWARF_SUPPORT),
+ FEATURE_STATUS("libelf", HAVE_LIBELF_SUPPORT),
+ FEATURE_STATUS("libnuma", HAVE_LIBNUMA_SUPPORT),
+ FEATURE_STATUS("libopencsd", HAVE_CSTRACE_SUPPORT),
+ FEATURE_STATUS("libperl", HAVE_LIBPERL_SUPPORT),
+ FEATURE_STATUS("libpfm4", HAVE_LIBPFM),
+ FEATURE_STATUS("libpython", HAVE_LIBPYTHON_SUPPORT),
+ FEATURE_STATUS("libslang", HAVE_SLANG_SUPPORT),
+ FEATURE_STATUS("libtraceevent", HAVE_LIBTRACEEVENT),
+ FEATURE_STATUS("libunwind", HAVE_LIBUNWIND_SUPPORT),
+ FEATURE_STATUS("lzma", HAVE_LZMA_SUPPORT),
+ FEATURE_STATUS("numa_num_possible_cpus", HAVE_LIBNUMA_SUPPORT),
+ FEATURE_STATUS("syscall_table", HAVE_SYSCALL_TABLE_SUPPORT),
+ FEATURE_STATUS("zlib", HAVE_ZLIB_SUPPORT),
+ FEATURE_STATUS("zstd", HAVE_ZSTD_SUPPORT),
+
+ /* this should remain at end, to know the array end */
+ FEATURE_STATUS(NULL, _)
+};
+
+static void on_off_print(const char *status)
+{
+ printf("[ ");
+
+ if (!strcmp(status, "OFF"))
+ color_fprintf(stdout, PERF_COLOR_RED, "%-3s", status);
+ else
+ color_fprintf(stdout, PERF_COLOR_GREEN, "%-3s", status);
+
+ printf(" ]");
+}
+
+/* Helper function to print status of a feature along with name/macro */
+static void status_print(const char *name, const char *macro,
+ const char *status)
+{
+ printf("%22s: ", name);
+ on_off_print(status);
+ printf(" # %s\n", macro);
+}
+
+#define STATUS(feature) \
+do { \
+ if (feature.is_builtin) \
+ status_print(feature.name, feature.macro, "on"); \
+ else \
+ status_print(feature.name, feature.macro, "OFF"); \
+} while (0)
+
+/**
+ * check whether "feature" is built-in with perf
+ *
+ * returns:
+ * 0: NOT built-in or Feature not known
+ * 1: Built-in
+ */
+static int has_support(const char *feature)
+{
+ for (int i = 0; supported_features[i].name; ++i) {
+ if ((strcasecmp(feature, supported_features[i].name) == 0) ||
+ (strcasecmp(feature, supported_features[i].macro) == 0)) {
+ if (!quiet)
+ STATUS(supported_features[i]);
+ return supported_features[i].is_builtin;
+ }
+ }
+
+ if (!quiet)
+ pr_err("Unknown feature '%s', please use 'perf version --build-options' to see which ones are available.\n", feature);
+
+ return 0;
+}
+
+
+/**
+ * Usage: 'perf check feature <feature_list>'
+ *
+ * <feature_list> can be a single feature name/macro, or a comma-separated list
+ * of feature names/macros
+ * eg. argument can be "libtraceevent" or "libtraceevent,bpf" etc
+ *
+ * In case of a comma-separated list, feature_enabled will be 1, only if
+ * all features passed in the string are supported
+ *
+ * Note that argv will get modified
+ */
+static int subcommand_feature(int argc, const char **argv)
+{
+ char *feature_list;
+ char *feature_name;
+ int feature_enabled;
+
+ argc = parse_options(argc, argv, check_feature_options,
+ check_feature_usage, 0);
+
+ if (!argc)
+ usage_with_options(check_feature_usage, check_feature_options);
+
+ if (argc > 1) {
+ pr_err("Too many arguments passed to 'perf check feature'\n");
+ return -1;
+ }
+
+ feature_enabled = 1;
+ /* feature_list is a non-const copy of 'argv[0]' */
+ feature_list = strdup(argv[0]);
+ if (!feature_list) {
+ pr_err("ERROR: failed to allocate memory for feature list\n");
+ return -1;
+ }
+
+ feature_name = strtok(feature_list, ",");
+
+ while (feature_name) {
+ feature_enabled &= has_support(feature_name);
+ feature_name = strtok(NULL, ",");
+ }
+
+ free(feature_list);
+
+ return !feature_enabled;
+}
+
+int cmd_check(int argc, const char **argv)
+{
+ argc = parse_options_subcommand(argc, argv, check_options,
+ check_subcommands, check_usage, 0);
+
+ if (!argc)
+ usage_with_options(check_usage, check_options);
+
+ if (strcmp(argv[0], "feature") == 0)
+ return subcommand_feature(argc, argv);
+
+ /* If no subcommand matched above, print usage help */
+ pr_err("Unknown subcommand: %s\n", argv[0]);
+ usage_with_options(check_usage, check_options);
+
+ /* free usage string allocated by parse_options_subcommand */
+ free((void *)check_usage[0]);
+
+ return 0;
+}
diff --git a/tools/perf/builtin-daemon.c b/tools/perf/builtin-daemon.c
index 9a95871afc95..f0568431fbd5 100644
--- a/tools/perf/builtin-daemon.c
+++ b/tools/perf/builtin-daemon.c
@@ -1434,7 +1434,7 @@ static int __cmd_signal(struct daemon *daemon, struct option parent_options[],
}
memset(&cmd, 0, sizeof(cmd));
- cmd.signal.cmd = CMD_SIGNAL,
+ cmd.signal.cmd = CMD_SIGNAL;
cmd.signal.sig = SIGUSR2;
strncpy(cmd.signal.name, name, sizeof(cmd.signal.name) - 1);
diff --git a/tools/perf/builtin-diff.c b/tools/perf/builtin-diff.c
index 57d300d8e570..23326dd20333 100644
--- a/tools/perf/builtin-diff.c
+++ b/tools/perf/builtin-diff.c
@@ -388,7 +388,7 @@ struct hist_entry_ops block_hist_ops = {
.free = block_hist_free,
};
-static int diff__process_sample_event(struct perf_tool *tool,
+static int diff__process_sample_event(const struct perf_tool *tool,
union perf_event *event,
struct perf_sample *sample,
struct evsel *evsel,
@@ -431,8 +431,8 @@ static int diff__process_sample_event(struct perf_tool *tool,
goto out;
}
- hist__account_cycles(sample->branch_stack, &al, sample, false,
- NULL);
+ hist__account_cycles(sample->branch_stack, &al, sample,
+ false, NULL, evsel);
break;
case COMPUTE_STREAM:
@@ -467,21 +467,7 @@ out:
return ret;
}
-static struct perf_diff pdiff = {
- .tool = {
- .sample = diff__process_sample_event,
- .mmap = perf_event__process_mmap,
- .mmap2 = perf_event__process_mmap2,
- .comm = perf_event__process_comm,
- .exit = perf_event__process_exit,
- .fork = perf_event__process_fork,
- .lost = perf_event__process_lost,
- .namespaces = perf_event__process_namespaces,
- .cgroup = perf_event__process_cgroup,
- .ordered_events = true,
- .ordering_requires_timestamps = true,
- },
-};
+static struct perf_diff pdiff;
static struct evsel *evsel_match(struct evsel *evsel,
struct evlist *evlist)
@@ -705,7 +691,7 @@ static void hists__precompute(struct hists *hists)
if (compute == COMPUTE_CYCLES) {
bh = container_of(he, struct block_hist, he);
init_block_hist(bh);
- block_info__process_sym(he, bh, NULL, 0);
+ block_info__process_sym(he, bh, NULL, 0, 0);
}
data__for_each_file_new(i, d) {
@@ -728,7 +714,7 @@ static void hists__precompute(struct hists *hists)
pair_bh = container_of(pair, struct block_hist,
he);
init_block_hist(pair_bh);
- block_info__process_sym(pair, pair_bh, NULL, 0);
+ block_info__process_sym(pair, pair_bh, NULL, 0, 0);
bh = container_of(he, struct block_hist, he);
@@ -1959,6 +1945,18 @@ int cmd_diff(int argc, const char **argv)
if (ret < 0)
return ret;
+ perf_tool__init(&pdiff.tool, /*ordered_events=*/true);
+ pdiff.tool.sample = diff__process_sample_event;
+ pdiff.tool.mmap = perf_event__process_mmap;
+ pdiff.tool.mmap2 = perf_event__process_mmap2;
+ pdiff.tool.comm = perf_event__process_comm;
+ pdiff.tool.exit = perf_event__process_exit;
+ pdiff.tool.fork = perf_event__process_fork;
+ pdiff.tool.lost = perf_event__process_lost;
+ pdiff.tool.namespaces = perf_event__process_namespaces;
+ pdiff.tool.cgroup = perf_event__process_cgroup;
+ pdiff.tool.ordering_requires_timestamps = true;
+
perf_config(diff__config, NULL);
argc = parse_options(argc, argv, options, diff_usage, 0);
diff --git a/tools/perf/builtin-evlist.c b/tools/perf/builtin-evlist.c
index 7117656939e7..a9bd7bbef5a9 100644
--- a/tools/perf/builtin-evlist.c
+++ b/tools/perf/builtin-evlist.c
@@ -35,13 +35,13 @@ static int __cmd_evlist(const char *file_name, struct perf_attr_details *details
.mode = PERF_DATA_MODE_READ,
.force = details->force,
};
- struct perf_tool tool = {
- /* only needed for pipe mode */
- .attr = perf_event__process_attr,
- .feature = process_header_feature,
- };
- bool has_tracepoint = false;
+ struct perf_tool tool;
+ bool has_tracepoint = false, has_group = false;
+ perf_tool__init(&tool, /*ordered_events=*/false);
+ /* only needed for pipe mode */
+ tool.attr = perf_event__process_attr;
+ tool.feature = process_header_feature;
session = perf_session__new(&data, &tool);
if (IS_ERR(session))
return PTR_ERR(session);
@@ -54,11 +54,17 @@ static int __cmd_evlist(const char *file_name, struct perf_attr_details *details
if (pos->core.attr.type == PERF_TYPE_TRACEPOINT)
has_tracepoint = true;
+
+ if (!evsel__is_group_leader(pos))
+ has_group = true;
}
if (has_tracepoint && !details->trace_fields)
printf("# Tip: use 'perf evlist --trace-fields' to show fields for tracepoint events\n");
+ if (has_group && !details->event_group)
+ printf("# Tip: use 'perf evlist -g' to show group information\n");
+
perf_session__delete(session);
return 0;
}
diff --git a/tools/perf/builtin-ftrace.c b/tools/perf/builtin-ftrace.c
index eb30c8eca488..abcdc49b7a98 100644
--- a/tools/perf/builtin-ftrace.c
+++ b/tools/perf/builtin-ftrace.c
@@ -13,6 +13,7 @@
#include <signal.h>
#include <stdlib.h>
#include <fcntl.h>
+#include <inttypes.h>
#include <math.h>
#include <poll.h>
#include <ctype.h>
@@ -22,15 +23,18 @@
#include "debug.h"
#include <subcmd/pager.h>
#include <subcmd/parse-options.h>
+#include <api/io.h>
#include <api/fs/tracing_path.h>
#include "evlist.h"
#include "target.h"
#include "cpumap.h"
+#include "hashmap.h"
#include "thread_map.h"
#include "strfilter.h"
#include "util/cap.h"
#include "util/config.h"
#include "util/ftrace.h"
+#include "util/stat.h"
#include "util/units.h"
#include "util/parse-sublevel-options.h"
@@ -59,6 +63,41 @@ static void ftrace__workload_exec_failed_signal(int signo __maybe_unused,
done = true;
}
+static bool check_ftrace_capable(void)
+{
+ bool used_root;
+
+ if (perf_cap__capable(CAP_PERFMON, &used_root))
+ return true;
+
+ if (!used_root && perf_cap__capable(CAP_SYS_ADMIN, &used_root))
+ return true;
+
+ pr_err("ftrace only works for %s!\n",
+ used_root ? "root"
+ : "users with the CAP_PERFMON or CAP_SYS_ADMIN capability"
+ );
+ return false;
+}
+
+static bool is_ftrace_supported(void)
+{
+ char *file;
+ bool supported = false;
+
+ file = get_tracing_file("set_ftrace_pid");
+ if (!file) {
+ pr_debug("cannot get tracing file set_ftrace_pid\n");
+ return false;
+ }
+
+ if (!access(file, F_OK))
+ supported = true;
+
+ put_tracing_file(file);
+ return supported;
+}
+
static int __write_tracing_file(const char *name, const char *val, bool append)
{
char *file;
@@ -228,6 +267,7 @@ static void reset_tracing_options(struct perf_ftrace *ftrace __maybe_unused)
write_tracing_option_file("funcgraph-irqs", "1");
write_tracing_option_file("funcgraph-proc", "0");
write_tracing_option_file("funcgraph-abstime", "0");
+ write_tracing_option_file("funcgraph-tail", "0");
write_tracing_option_file("latency-format", "0");
write_tracing_option_file("irq-info", "0");
}
@@ -464,6 +504,17 @@ static int set_tracing_funcgraph_verbose(struct perf_ftrace *ftrace)
return 0;
}
+static int set_tracing_funcgraph_tail(struct perf_ftrace *ftrace)
+{
+ if (!ftrace->graph_tail)
+ return 0;
+
+ if (write_tracing_option_file("funcgraph-tail", "1") < 0)
+ return -1;
+
+ return 0;
+}
+
static int set_tracing_thresh(struct perf_ftrace *ftrace)
{
int ret;
@@ -540,6 +591,11 @@ static int set_tracing_options(struct perf_ftrace *ftrace)
return -1;
}
+ if (set_tracing_funcgraph_tail(ftrace) < 0) {
+ pr_err("failed to set tracing option funcgraph-tail\n");
+ return -1;
+ }
+
return 0;
}
@@ -569,18 +625,6 @@ static int __cmd_ftrace(struct perf_ftrace *ftrace)
.events = POLLIN,
};
- if (!(perf_cap__capable(CAP_PERFMON) ||
- perf_cap__capable(CAP_SYS_ADMIN))) {
- pr_err("ftrace only works for %s!\n",
-#ifdef HAVE_LIBCAP_SUPPORT
- "users with the CAP_PERFMON or CAP_SYS_ADMIN capability"
-#else
- "root"
-#endif
- );
- return -1;
- }
-
select_tracer(ftrace);
if (reset_tracing_files(ftrace) < 0) {
@@ -885,18 +929,6 @@ static int __cmd_latency(struct perf_ftrace *ftrace)
};
int buckets[NUM_BUCKET] = { };
- if (!(perf_cap__capable(CAP_PERFMON) ||
- perf_cap__capable(CAP_SYS_ADMIN))) {
- pr_err("ftrace only works for %s!\n",
-#ifdef HAVE_LIBCAP_SUPPORT
- "users with the CAP_PERFMON or CAP_SYS_ADMIN capability"
-#else
- "root"
-#endif
- );
- return -1;
- }
-
trace_fd = prepare_func_latency(ftrace);
if (trace_fd < 0)
goto out;
@@ -950,6 +982,326 @@ out:
return (done && !workload_exec_errno) ? 0 : -1;
}
+static size_t profile_hash(long func, void *ctx __maybe_unused)
+{
+ return str_hash((char *)func);
+}
+
+static bool profile_equal(long func1, long func2, void *ctx __maybe_unused)
+{
+ return !strcmp((char *)func1, (char *)func2);
+}
+
+static int prepare_func_profile(struct perf_ftrace *ftrace)
+{
+ ftrace->tracer = "function_graph";
+ ftrace->graph_tail = 1;
+
+ ftrace->profile_hash = hashmap__new(profile_hash, profile_equal, NULL);
+ if (ftrace->profile_hash == NULL)
+ return -ENOMEM;
+
+ return 0;
+}
+
+/* This is saved in a hashmap keyed by the function name */
+struct ftrace_profile_data {
+ struct stats st;
+};
+
+static int add_func_duration(struct perf_ftrace *ftrace, char *func, double time_ns)
+{
+ struct ftrace_profile_data *prof = NULL;
+
+ if (!hashmap__find(ftrace->profile_hash, func, &prof)) {
+ char *key = strdup(func);
+
+ if (key == NULL)
+ return -ENOMEM;
+
+ prof = zalloc(sizeof(*prof));
+ if (prof == NULL) {
+ free(key);
+ return -ENOMEM;
+ }
+
+ init_stats(&prof->st);
+ hashmap__add(ftrace->profile_hash, key, prof);
+ }
+
+ update_stats(&prof->st, time_ns);
+ return 0;
+}
+
+/*
+ * The ftrace function_graph text output normally looks like below:
+ *
+ * CPU DURATION FUNCTION
+ *
+ * 0) | syscall_trace_enter.isra.0() {
+ * 0) | __audit_syscall_entry() {
+ * 0) | auditd_test_task() {
+ * 0) 0.271 us | __rcu_read_lock();
+ * 0) 0.275 us | __rcu_read_unlock();
+ * 0) 1.254 us | } /\* auditd_test_task *\/
+ * 0) 0.279 us | ktime_get_coarse_real_ts64();
+ * 0) 2.227 us | } /\* __audit_syscall_entry *\/
+ * 0) 2.713 us | } /\* syscall_trace_enter.isra.0 *\/
+ *
+ * Parse the line and get the duration and function name.
+ */
+static int parse_func_duration(struct perf_ftrace *ftrace, char *line, size_t len)
+{
+ char *p;
+ char *func;
+ double duration;
+
+ /* skip CPU */
+ p = strchr(line, ')');
+ if (p == NULL)
+ return 0;
+
+ /* get duration */
+ p = skip_spaces(p + 1);
+
+ /* no duration? */
+ if (p == NULL || *p == '|')
+ return 0;
+
+ /* skip markers like '*' or '!' for longer than ms */
+ if (!isdigit(*p))
+ p++;
+
+ duration = strtod(p, &p);
+
+ if (strncmp(p, " us", 3)) {
+ pr_debug("non-usec time found.. ignoring\n");
+ return 0;
+ }
+
+ /*
+ * profile stat keeps the max and min values as integer,
+ * convert to nsec time so that we can have accurate max.
+ */
+ duration *= 1000;
+
+ /* skip to the pipe */
+ while (p < line + len && *p != '|')
+ p++;
+
+ if (*p++ != '|')
+ return -EINVAL;
+
+ /* get function name */
+ func = skip_spaces(p);
+
+ /* skip the closing bracket and the start of comment */
+ if (*func == '}')
+ func += 5;
+
+ /* remove semi-colon or end of comment at the end */
+ p = line + len - 1;
+ while (!isalnum(*p) && *p != ']') {
+ *p = '\0';
+ --p;
+ }
+
+ return add_func_duration(ftrace, func, duration);
+}
+
+enum perf_ftrace_profile_sort_key {
+ PFP_SORT_TOTAL = 0,
+ PFP_SORT_AVG,
+ PFP_SORT_MAX,
+ PFP_SORT_COUNT,
+ PFP_SORT_NAME,
+};
+
+static enum perf_ftrace_profile_sort_key profile_sort = PFP_SORT_TOTAL;
+
+static int cmp_profile_data(const void *a, const void *b)
+{
+ const struct hashmap_entry *e1 = *(const struct hashmap_entry **)a;
+ const struct hashmap_entry *e2 = *(const struct hashmap_entry **)b;
+ struct ftrace_profile_data *p1 = e1->pvalue;
+ struct ftrace_profile_data *p2 = e2->pvalue;
+ double v1, v2;
+
+ switch (profile_sort) {
+ case PFP_SORT_NAME:
+ return strcmp(e1->pkey, e2->pkey);
+ case PFP_SORT_AVG:
+ v1 = p1->st.mean;
+ v2 = p2->st.mean;
+ break;
+ case PFP_SORT_MAX:
+ v1 = p1->st.max;
+ v2 = p2->st.max;
+ break;
+ case PFP_SORT_COUNT:
+ v1 = p1->st.n;
+ v2 = p2->st.n;
+ break;
+ case PFP_SORT_TOTAL:
+ default:
+ v1 = p1->st.n * p1->st.mean;
+ v2 = p2->st.n * p2->st.mean;
+ break;
+ }
+
+ if (v1 > v2)
+ return -1;
+ else
+ return 1;
+}
+
+static void print_profile_result(struct perf_ftrace *ftrace)
+{
+ struct hashmap_entry *entry, **profile;
+ size_t i, nr, bkt;
+
+ nr = hashmap__size(ftrace->profile_hash);
+ if (nr == 0)
+ return;
+
+ profile = calloc(nr, sizeof(*profile));
+ if (profile == NULL) {
+ pr_err("failed to allocate memory for the result\n");
+ return;
+ }
+
+ i = 0;
+ hashmap__for_each_entry(ftrace->profile_hash, entry, bkt)
+ profile[i++] = entry;
+
+ assert(i == nr);
+
+ //cmp_profile_data(profile[0], profile[1]);
+ qsort(profile, nr, sizeof(*profile), cmp_profile_data);
+
+ printf("# %10s %10s %10s %10s %s\n",
+ "Total (us)", "Avg (us)", "Max (us)", "Count", "Function");
+
+ for (i = 0; i < nr; i++) {
+ const char *name = profile[i]->pkey;
+ struct ftrace_profile_data *p = profile[i]->pvalue;
+
+ printf("%12.3f %10.3f %6"PRIu64".%03"PRIu64" %10.0f %s\n",
+ p->st.n * p->st.mean / 1000, p->st.mean / 1000,
+ p->st.max / 1000, p->st.max % 1000, p->st.n, name);
+ }
+
+ free(profile);
+
+ hashmap__for_each_entry(ftrace->profile_hash, entry, bkt) {
+ free((char *)entry->pkey);
+ free(entry->pvalue);
+ }
+
+ hashmap__free(ftrace->profile_hash);
+ ftrace->profile_hash = NULL;
+}
+
+static int __cmd_profile(struct perf_ftrace *ftrace)
+{
+ char *trace_file;
+ int trace_fd;
+ char buf[4096];
+ struct io io;
+ char *line = NULL;
+ size_t line_len = 0;
+
+ if (prepare_func_profile(ftrace) < 0) {
+ pr_err("failed to prepare func profiler\n");
+ goto out;
+ }
+
+ if (reset_tracing_files(ftrace) < 0) {
+ pr_err("failed to reset ftrace\n");
+ goto out;
+ }
+
+ /* reset ftrace buffer */
+ if (write_tracing_file("trace", "0") < 0)
+ goto out;
+
+ if (set_tracing_options(ftrace) < 0)
+ return -1;
+
+ if (write_tracing_file("current_tracer", ftrace->tracer) < 0) {
+ pr_err("failed to set current_tracer to %s\n", ftrace->tracer);
+ goto out_reset;
+ }
+
+ setup_pager();
+
+ trace_file = get_tracing_file("trace_pipe");
+ if (!trace_file) {
+ pr_err("failed to open trace_pipe\n");
+ goto out_reset;
+ }
+
+ trace_fd = open(trace_file, O_RDONLY);
+
+ put_tracing_file(trace_file);
+
+ if (trace_fd < 0) {
+ pr_err("failed to open trace_pipe\n");
+ goto out_reset;
+ }
+
+ fcntl(trace_fd, F_SETFL, O_NONBLOCK);
+
+ if (write_tracing_file("tracing_on", "1") < 0) {
+ pr_err("can't enable tracing\n");
+ goto out_close_fd;
+ }
+
+ evlist__start_workload(ftrace->evlist);
+
+ io__init(&io, trace_fd, buf, sizeof(buf));
+ io.timeout_ms = -1;
+
+ while (!done && !io.eof) {
+ if (io__getline(&io, &line, &line_len) < 0)
+ break;
+
+ if (parse_func_duration(ftrace, line, line_len) < 0)
+ break;
+ }
+
+ write_tracing_file("tracing_on", "0");
+
+ if (workload_exec_errno) {
+ const char *emsg = str_error_r(workload_exec_errno, buf, sizeof(buf));
+ /* flush stdout first so below error msg appears at the end. */
+ fflush(stdout);
+ pr_err("workload failed: %s\n", emsg);
+ goto out_free_line;
+ }
+
+ /* read remaining buffer contents */
+ io.timeout_ms = 0;
+ while (!io.eof) {
+ if (io__getline(&io, &line, &line_len) < 0)
+ break;
+
+ if (parse_func_duration(ftrace, line, line_len) < 0)
+ break;
+ }
+
+ print_profile_result(ftrace);
+
+out_free_line:
+ free(line);
+out_close_fd:
+ close(trace_fd);
+out_reset:
+ reset_tracing_files(ftrace);
+out:
+ return (done && !workload_exec_errno) ? 0 : -1;
+}
+
static int perf_ftrace_config(const char *var, const char *value, void *cb)
{
struct perf_ftrace *ftrace = cb;
@@ -1099,6 +1451,7 @@ static int parse_graph_tracer_opts(const struct option *opt,
{ .name = "verbose", .value_ptr = &ftrace->graph_verbose },
{ .name = "thresh", .value_ptr = &ftrace->graph_thresh },
{ .name = "depth", .value_ptr = &ftrace->graph_depth },
+ { .name = "tail", .value_ptr = &ftrace->graph_tail },
{ .name = NULL, }
};
@@ -1112,10 +1465,35 @@ static int parse_graph_tracer_opts(const struct option *opt,
return 0;
}
+static int parse_sort_key(const struct option *opt, const char *str, int unset)
+{
+ enum perf_ftrace_profile_sort_key *key = (void *)opt->value;
+
+ if (unset)
+ return 0;
+
+ if (!strcmp(str, "total"))
+ *key = PFP_SORT_TOTAL;
+ else if (!strcmp(str, "avg"))
+ *key = PFP_SORT_AVG;
+ else if (!strcmp(str, "max"))
+ *key = PFP_SORT_MAX;
+ else if (!strcmp(str, "count"))
+ *key = PFP_SORT_COUNT;
+ else if (!strcmp(str, "name"))
+ *key = PFP_SORT_NAME;
+ else {
+ pr_err("Unknown sort key: %s\n", str);
+ return -1;
+ }
+ return 0;
+}
+
enum perf_ftrace_subcommand {
PERF_FTRACE_NONE,
PERF_FTRACE_TRACE,
PERF_FTRACE_LATENCY,
+ PERF_FTRACE_PROFILE,
};
int cmd_ftrace(int argc, const char **argv)
@@ -1181,13 +1559,31 @@ int cmd_ftrace(int argc, const char **argv)
"Use nano-second histogram"),
OPT_PARENT(common_options),
};
+ const struct option profile_options[] = {
+ OPT_CALLBACK('T', "trace-funcs", &ftrace.filters, "func",
+ "Trace given functions using function tracer",
+ parse_filter_func),
+ OPT_CALLBACK('N', "notrace-funcs", &ftrace.notrace, "func",
+ "Do not trace given functions", parse_filter_func),
+ OPT_CALLBACK('G', "graph-funcs", &ftrace.graph_funcs, "func",
+ "Trace given functions using function_graph tracer",
+ parse_filter_func),
+ OPT_CALLBACK('g', "nograph-funcs", &ftrace.nograph_funcs, "func",
+ "Set nograph filter on given functions", parse_filter_func),
+ OPT_CALLBACK('m', "buffer-size", &ftrace.percpu_buffer_size, "size",
+ "Size of per cpu buffer, needs to use a B, K, M or G suffix.", parse_buffer_size),
+ OPT_CALLBACK('s', "sort", &profile_sort, "key",
+ "Sort result by key: total (default), avg, max, count, name.",
+ parse_sort_key),
+ OPT_PARENT(common_options),
+ };
const struct option *options = ftrace_options;
const char * const ftrace_usage[] = {
"perf ftrace [<options>] [<command>]",
"perf ftrace [<options>] -- [<command>] [<options>]",
- "perf ftrace {trace|latency} [<options>] [<command>]",
- "perf ftrace {trace|latency} [<options>] -- [<command>] [<options>]",
+ "perf ftrace {trace|latency|profile} [<options>] [<command>]",
+ "perf ftrace {trace|latency|profile} [<options>] -- [<command>] [<options>]",
NULL
};
enum perf_ftrace_subcommand subcmd = PERF_FTRACE_NONE;
@@ -1202,6 +1598,14 @@ int cmd_ftrace(int argc, const char **argv)
signal(SIGCHLD, sig_handler);
signal(SIGPIPE, sig_handler);
+ if (!check_ftrace_capable())
+ return -1;
+
+ if (!is_ftrace_supported()) {
+ pr_err("ftrace is not supported on this system\n");
+ return -ENOTSUP;
+ }
+
ret = perf_config(perf_ftrace_config, &ftrace);
if (ret < 0)
return -1;
@@ -1212,6 +1616,9 @@ int cmd_ftrace(int argc, const char **argv)
} else if (!strcmp(argv[1], "latency")) {
subcmd = PERF_FTRACE_LATENCY;
options = latency_options;
+ } else if (!strcmp(argv[1], "profile")) {
+ subcmd = PERF_FTRACE_PROFILE;
+ options = profile_options;
}
if (subcmd != PERF_FTRACE_NONE) {
@@ -1247,6 +1654,9 @@ int cmd_ftrace(int argc, const char **argv)
}
cmd_func = __cmd_latency;
break;
+ case PERF_FTRACE_PROFILE:
+ cmd_func = __cmd_profile;
+ break;
case PERF_FTRACE_NONE:
default:
pr_err("Invalid subcommand\n");
diff --git a/tools/perf/builtin-help.c b/tools/perf/builtin-help.c
index b2a368ae295a..0854d3cd9f6a 100644
--- a/tools/perf/builtin-help.c
+++ b/tools/perf/builtin-help.c
@@ -417,7 +417,7 @@ static void open_html(const char *path)
static int show_html_page(const char *perf_cmd)
{
const char *page = cmd_to_page(perf_cmd);
- char *page_path; /* it leaks but we exec bellow */
+ char *page_path; /* it leaks but we exec below */
if (get_html_page_path(&page_path, page) < 0)
return -1;
diff --git a/tools/perf/builtin-inject.c b/tools/perf/builtin-inject.c
index a212678d47be..d6989195a061 100644
--- a/tools/perf/builtin-inject.c
+++ b/tools/perf/builtin-inject.c
@@ -103,18 +103,24 @@ struct guest_session {
struct guest_event ev;
};
+enum build_id_rewrite_style {
+ BID_RWS__NONE = 0,
+ BID_RWS__INJECT_HEADER_LAZY,
+ BID_RWS__INJECT_HEADER_ALL,
+ BID_RWS__MMAP2_BUILDID_ALL,
+ BID_RWS__MMAP2_BUILDID_LAZY,
+};
+
struct perf_inject {
struct perf_tool tool;
struct perf_session *session;
- bool build_ids;
- bool build_id_all;
+ enum build_id_rewrite_style build_id_style;
bool sched_stat;
bool have_auxtrace;
bool strip;
bool jit_mode;
bool in_place_update;
bool in_place_update_dry_run;
- bool is_pipe;
bool copy_kcore_dir;
const char *input_name;
struct perf_data output;
@@ -126,6 +132,7 @@ struct perf_inject {
struct perf_file_section secs[HEADER_FEAT_BITS];
struct guest_session guest_session;
struct strlist *known_build_ids;
+ const struct evsel *mmap_evsel;
};
struct event_entry {
@@ -134,8 +141,23 @@ struct event_entry {
union perf_event event[];
};
-static int dso__inject_build_id(struct dso *dso, struct perf_tool *tool,
- struct machine *machine, u8 cpumode, u32 flags);
+static int tool__inject_build_id(const struct perf_tool *tool,
+ struct perf_sample *sample,
+ struct machine *machine,
+ const struct evsel *evsel,
+ __u16 misc,
+ const char *filename,
+ struct dso *dso, u32 flags);
+static int tool__inject_mmap2_build_id(const struct perf_tool *tool,
+ struct perf_sample *sample,
+ struct machine *machine,
+ const struct evsel *evsel,
+ __u16 misc,
+ __u32 pid, __u32 tid,
+ __u64 start, __u64 len, __u64 pgoff,
+ struct dso *dso,
+ __u32 prot, __u32 flags,
+ const char *filename);
static int output_bytes(struct perf_inject *inject, void *buf, size_t sz)
{
@@ -149,8 +171,9 @@ static int output_bytes(struct perf_inject *inject, void *buf, size_t sz)
return 0;
}
-static int perf_event__repipe_synth(struct perf_tool *tool,
+static int perf_event__repipe_synth(const struct perf_tool *tool,
union perf_event *event)
+
{
struct perf_inject *inject = container_of(tool, struct perf_inject,
tool);
@@ -158,7 +181,7 @@ static int perf_event__repipe_synth(struct perf_tool *tool,
return output_bytes(inject, event, event->header.size);
}
-static int perf_event__repipe_oe_synth(struct perf_tool *tool,
+static int perf_event__repipe_oe_synth(const struct perf_tool *tool,
union perf_event *event,
struct ordered_events *oe __maybe_unused)
{
@@ -166,7 +189,7 @@ static int perf_event__repipe_oe_synth(struct perf_tool *tool,
}
#ifdef HAVE_JITDUMP
-static int perf_event__drop_oe(struct perf_tool *tool __maybe_unused,
+static int perf_event__drop_oe(const struct perf_tool *tool __maybe_unused,
union perf_event *event __maybe_unused,
struct ordered_events *oe __maybe_unused)
{
@@ -188,7 +211,7 @@ static int perf_event__repipe_op4_synth(struct perf_session *session,
return perf_event__repipe_synth(session->tool, event);
}
-static int perf_event__repipe_attr(struct perf_tool *tool,
+static int perf_event__repipe_attr(const struct perf_tool *tool,
union perf_event *event,
struct evlist **pevlist)
{
@@ -200,13 +223,14 @@ static int perf_event__repipe_attr(struct perf_tool *tool,
if (ret)
return ret;
- if (!inject->is_pipe)
+ /* If the output isn't a pipe then the attributes will be written as part of the header. */
+ if (!inject->output.is_pipe)
return 0;
return perf_event__repipe_synth(tool, event);
}
-static int perf_event__repipe_event_update(struct perf_tool *tool,
+static int perf_event__repipe_event_update(const struct perf_tool *tool,
union perf_event *event,
struct evlist **pevlist __maybe_unused)
{
@@ -237,7 +261,7 @@ static int copy_bytes(struct perf_inject *inject, struct perf_data *data, off_t
static s64 perf_event__repipe_auxtrace(struct perf_session *session,
union perf_event *event)
{
- struct perf_tool *tool = session->tool;
+ const struct perf_tool *tool = session->tool;
struct perf_inject *inject = container_of(tool, struct perf_inject,
tool);
int ret;
@@ -284,7 +308,7 @@ perf_event__repipe_auxtrace(struct perf_session *session __maybe_unused,
#endif
-static int perf_event__repipe(struct perf_tool *tool,
+static int perf_event__repipe(const struct perf_tool *tool,
union perf_event *event,
struct perf_sample *sample __maybe_unused,
struct machine *machine __maybe_unused)
@@ -292,7 +316,7 @@ static int perf_event__repipe(struct perf_tool *tool,
return perf_event__repipe_synth(tool, event);
}
-static int perf_event__drop(struct perf_tool *tool __maybe_unused,
+static int perf_event__drop(const struct perf_tool *tool __maybe_unused,
union perf_event *event __maybe_unused,
struct perf_sample *sample __maybe_unused,
struct machine *machine __maybe_unused)
@@ -300,7 +324,7 @@ static int perf_event__drop(struct perf_tool *tool __maybe_unused,
return 0;
}
-static int perf_event__drop_aux(struct perf_tool *tool,
+static int perf_event__drop_aux(const struct perf_tool *tool,
union perf_event *event __maybe_unused,
struct perf_sample *sample,
struct machine *machine __maybe_unused)
@@ -341,13 +365,13 @@ perf_inject__cut_auxtrace_sample(struct perf_inject *inject,
return ev;
}
-typedef int (*inject_handler)(struct perf_tool *tool,
+typedef int (*inject_handler)(const struct perf_tool *tool,
union perf_event *event,
struct perf_sample *sample,
struct evsel *evsel,
struct machine *machine);
-static int perf_event__repipe_sample(struct perf_tool *tool,
+static int perf_event__repipe_sample(const struct perf_tool *tool,
union perf_event *event,
struct perf_sample *sample,
struct evsel *evsel,
@@ -372,46 +396,8 @@ static int perf_event__repipe_sample(struct perf_tool *tool,
return perf_event__repipe_synth(tool, event);
}
-static int perf_event__repipe_mmap(struct perf_tool *tool,
- union perf_event *event,
- struct perf_sample *sample,
- struct machine *machine)
-{
- int err;
-
- err = perf_event__process_mmap(tool, event, sample, machine);
- perf_event__repipe(tool, event, sample, machine);
-
- return err;
-}
-
-#ifdef HAVE_JITDUMP
-static int perf_event__jit_repipe_mmap(struct perf_tool *tool,
- union perf_event *event,
- struct perf_sample *sample,
- struct machine *machine)
-{
- struct perf_inject *inject = container_of(tool, struct perf_inject, tool);
- u64 n = 0;
- int ret;
-
- /*
- * if jit marker, then inject jit mmaps and generate ELF images
- */
- ret = jit_process(inject->session, &inject->output, machine,
- event->mmap.filename, event->mmap.pid, event->mmap.tid, &n);
- if (ret < 0)
- return ret;
- if (ret) {
- inject->bytes_written += n;
- return 0;
- }
- return perf_event__repipe_mmap(tool, event, sample, machine);
-}
-#endif
-
static struct dso *findnew_dso(int pid, int tid, const char *filename,
- struct dso_id *id, struct machine *machine)
+ const struct dso_id *id, struct machine *machine)
{
struct thread *thread;
struct nsinfo *nsi = NULL;
@@ -455,117 +441,173 @@ static struct dso *findnew_dso(int pid, int tid, const char *filename,
return dso;
}
-static int perf_event__repipe_buildid_mmap(struct perf_tool *tool,
- union perf_event *event,
- struct perf_sample *sample,
- struct machine *machine)
+/*
+ * The evsel used for the sample ID for mmap events. Typically stashed when
+ * processing mmap events. If not stashed, search the evlist for the first mmap
+ * gathering event.
+ */
+static const struct evsel *inject__mmap_evsel(struct perf_inject *inject)
{
- struct dso *dso;
+ struct evsel *pos;
- dso = findnew_dso(event->mmap.pid, event->mmap.tid,
- event->mmap.filename, NULL, machine);
+ if (inject->mmap_evsel)
+ return inject->mmap_evsel;
- if (dso && !dso__hit(dso)) {
- dso__set_hit(dso);
- dso__inject_build_id(dso, tool, machine, sample->cpumode, 0);
+ evlist__for_each_entry(inject->session->evlist, pos) {
+ if (pos->core.attr.mmap) {
+ inject->mmap_evsel = pos;
+ return pos;
+ }
}
- dso__put(dso);
-
- return perf_event__repipe(tool, event, sample, machine);
+ pr_err("No mmap events found\n");
+ return NULL;
}
-static int perf_event__repipe_mmap2(struct perf_tool *tool,
- union perf_event *event,
- struct perf_sample *sample,
- struct machine *machine)
+static int perf_event__repipe_common_mmap(const struct perf_tool *tool,
+ union perf_event *event,
+ struct perf_sample *sample,
+ struct machine *machine,
+ __u32 pid, __u32 tid,
+ __u64 start, __u64 len, __u64 pgoff,
+ __u32 flags, __u32 prot,
+ const char *filename,
+ const struct dso_id *dso_id,
+ int (*perf_event_process)(const struct perf_tool *tool,
+ union perf_event *event,
+ struct perf_sample *sample,
+ struct machine *machine))
{
- int err;
+ struct perf_inject *inject = container_of(tool, struct perf_inject, tool);
+ struct dso *dso = NULL;
+ bool dso_sought = false;
- err = perf_event__process_mmap2(tool, event, sample, machine);
- perf_event__repipe(tool, event, sample, machine);
+#ifdef HAVE_JITDUMP
+ if (inject->jit_mode) {
+ u64 n = 0;
+ int ret;
+ /* If jit marker, then inject jit mmaps and generate ELF images. */
+ ret = jit_process(inject->session, &inject->output, machine,
+ filename, pid, tid, &n);
+ if (ret < 0)
+ return ret;
+ if (ret) {
+ inject->bytes_written += n;
+ return 0;
+ }
+ }
+#endif
if (event->header.misc & PERF_RECORD_MISC_MMAP_BUILD_ID) {
- struct dso *dso;
-
- dso = findnew_dso(event->mmap2.pid, event->mmap2.tid,
- event->mmap2.filename, NULL, machine);
+ dso = findnew_dso(pid, tid, filename, dso_id, machine);
+ dso_sought = true;
if (dso) {
/* mark it not to inject build-id */
dso__set_hit(dso);
}
- dso__put(dso);
}
+ if (inject->build_id_style == BID_RWS__INJECT_HEADER_ALL) {
+ if (!dso_sought) {
+ dso = findnew_dso(pid, tid, filename, dso_id, machine);
+ dso_sought = true;
+ }
- return err;
-}
+ if (dso && !dso__hit(dso)) {
+ struct evsel *evsel = evlist__event2evsel(inject->session->evlist, event);
-#ifdef HAVE_JITDUMP
-static int perf_event__jit_repipe_mmap2(struct perf_tool *tool,
- union perf_event *event,
- struct perf_sample *sample,
- struct machine *machine)
-{
- struct perf_inject *inject = container_of(tool, struct perf_inject, tool);
- u64 n = 0;
- int ret;
+ if (evsel) {
+ dso__set_hit(dso);
+ tool__inject_build_id(tool, sample, machine, evsel,
+ /*misc=*/sample->cpumode,
+ filename, dso, flags);
+ }
+ }
+ } else {
+ int err;
- /*
- * if jit marker, then inject jit mmaps and generate ELF images
- */
- ret = jit_process(inject->session, &inject->output, machine,
- event->mmap2.filename, event->mmap2.pid, event->mmap2.tid, &n);
- if (ret < 0)
- return ret;
- if (ret) {
- inject->bytes_written += n;
- return 0;
- }
- return perf_event__repipe_mmap2(tool, event, sample, machine);
-}
-#endif
+ /*
+ * Remember the evsel for lazy build id generation. It is used
+ * for the sample id header type.
+ */
+ if ((inject->build_id_style == BID_RWS__INJECT_HEADER_LAZY ||
+ inject->build_id_style == BID_RWS__MMAP2_BUILDID_LAZY) &&
+ !inject->mmap_evsel)
+ inject->mmap_evsel = evlist__event2evsel(inject->session->evlist, event);
-static int perf_event__repipe_buildid_mmap2(struct perf_tool *tool,
- union perf_event *event,
- struct perf_sample *sample,
- struct machine *machine)
-{
- struct dso_id dso_id = {
- .maj = event->mmap2.maj,
- .min = event->mmap2.min,
- .ino = event->mmap2.ino,
- .ino_generation = event->mmap2.ino_generation,
- };
- struct dso *dso;
+ /* Create the thread, map, etc. Not done for the unordered inject all case. */
+ err = perf_event_process(tool, event, sample, machine);
- if (event->header.misc & PERF_RECORD_MISC_MMAP_BUILD_ID) {
- /* cannot use dso_id since it'd have invalid info */
- dso = findnew_dso(event->mmap2.pid, event->mmap2.tid,
- event->mmap2.filename, NULL, machine);
- if (dso) {
- /* mark it not to inject build-id */
- dso__set_hit(dso);
+ if (err) {
+ dso__put(dso);
+ return err;
}
- dso__put(dso);
- perf_event__repipe(tool, event, sample, machine);
- return 0;
}
+ if ((inject->build_id_style == BID_RWS__MMAP2_BUILDID_ALL) &&
+ !(event->header.misc & PERF_RECORD_MISC_MMAP_BUILD_ID)) {
+ struct evsel *evsel = evlist__event2evsel(inject->session->evlist, event);
- dso = findnew_dso(event->mmap2.pid, event->mmap2.tid,
- event->mmap2.filename, &dso_id, machine);
-
- if (dso && !dso__hit(dso)) {
- dso__set_hit(dso);
- dso__inject_build_id(dso, tool, machine, sample->cpumode,
- event->mmap2.flags);
+ if (evsel && !dso_sought) {
+ dso = findnew_dso(pid, tid, filename, dso_id, machine);
+ dso_sought = true;
+ }
+ if (evsel && dso &&
+ !tool__inject_mmap2_build_id(tool, sample, machine, evsel,
+ sample->cpumode | PERF_RECORD_MISC_MMAP_BUILD_ID,
+ pid, tid, start, len, pgoff,
+ dso,
+ prot, flags,
+ filename)) {
+ /* Injected mmap2 so no need to repipe. */
+ dso__put(dso);
+ return 0;
+ }
}
dso__put(dso);
+ if (inject->build_id_style == BID_RWS__MMAP2_BUILDID_LAZY)
+ return 0;
- perf_event__repipe(tool, event, sample, machine);
+ return perf_event__repipe(tool, event, sample, machine);
+}
- return 0;
+static int perf_event__repipe_mmap(const struct perf_tool *tool,
+ union perf_event *event,
+ struct perf_sample *sample,
+ struct machine *machine)
+{
+ return perf_event__repipe_common_mmap(
+ tool, event, sample, machine,
+ event->mmap.pid, event->mmap.tid,
+ event->mmap.start, event->mmap.len, event->mmap.pgoff,
+ /*flags=*/0, PROT_EXEC,
+ event->mmap.filename, /*dso_id=*/NULL,
+ perf_event__process_mmap);
}
-static int perf_event__repipe_fork(struct perf_tool *tool,
+static int perf_event__repipe_mmap2(const struct perf_tool *tool,
+ union perf_event *event,
+ struct perf_sample *sample,
+ struct machine *machine)
+{
+ struct dso_id id;
+ struct dso_id *dso_id = NULL;
+
+ if (!(event->header.misc & PERF_RECORD_MISC_MMAP_BUILD_ID)) {
+ id.maj = event->mmap2.maj;
+ id.min = event->mmap2.min;
+ id.ino = event->mmap2.ino;
+ id.ino_generation = event->mmap2.ino_generation;
+ dso_id = &id;
+ }
+
+ return perf_event__repipe_common_mmap(
+ tool, event, sample, machine,
+ event->mmap2.pid, event->mmap2.tid,
+ event->mmap2.start, event->mmap2.len, event->mmap2.pgoff,
+ event->mmap2.flags, event->mmap2.prot,
+ event->mmap2.filename, dso_id,
+ perf_event__process_mmap2);
+}
+
+static int perf_event__repipe_fork(const struct perf_tool *tool,
union perf_event *event,
struct perf_sample *sample,
struct machine *machine)
@@ -578,7 +620,7 @@ static int perf_event__repipe_fork(struct perf_tool *tool,
return err;
}
-static int perf_event__repipe_comm(struct perf_tool *tool,
+static int perf_event__repipe_comm(const struct perf_tool *tool,
union perf_event *event,
struct perf_sample *sample,
struct machine *machine)
@@ -591,7 +633,7 @@ static int perf_event__repipe_comm(struct perf_tool *tool,
return err;
}
-static int perf_event__repipe_namespaces(struct perf_tool *tool,
+static int perf_event__repipe_namespaces(const struct perf_tool *tool,
union perf_event *event,
struct perf_sample *sample,
struct machine *machine)
@@ -603,7 +645,7 @@ static int perf_event__repipe_namespaces(struct perf_tool *tool,
return err;
}
-static int perf_event__repipe_exit(struct perf_tool *tool,
+static int perf_event__repipe_exit(const struct perf_tool *tool,
union perf_event *event,
struct perf_sample *sample,
struct machine *machine)
@@ -712,16 +754,20 @@ static bool perf_inject__lookup_known_build_id(struct perf_inject *inject,
return false;
}
-static int dso__inject_build_id(struct dso *dso, struct perf_tool *tool,
- struct machine *machine, u8 cpumode, u32 flags)
+static int tool__inject_build_id(const struct perf_tool *tool,
+ struct perf_sample *sample,
+ struct machine *machine,
+ const struct evsel *evsel,
+ __u16 misc,
+ const char *filename,
+ struct dso *dso, u32 flags)
{
- struct perf_inject *inject = container_of(tool, struct perf_inject,
- tool);
+ struct perf_inject *inject = container_of(tool, struct perf_inject, tool);
int err;
- if (is_anon_memory(dso__long_name(dso)) || flags & MAP_HUGETLB)
+ if (is_anon_memory(filename) || flags & MAP_HUGETLB)
return 0;
- if (is_no_dso_memory(dso__long_name(dso)))
+ if (is_no_dso_memory(filename))
return 0;
if (inject->known_build_ids != NULL &&
@@ -729,27 +775,158 @@ static int dso__inject_build_id(struct dso *dso, struct perf_tool *tool,
return 1;
if (dso__read_build_id(dso) < 0) {
- pr_debug("no build_id found for %s\n", dso__long_name(dso));
+ pr_debug("no build_id found for %s\n", filename);
return -1;
}
- err = perf_event__synthesize_build_id(tool, dso, cpumode,
- perf_event__repipe, machine);
+ err = perf_event__synthesize_build_id(tool, sample, machine,
+ perf_event__repipe,
+ evsel, misc, dso__bid(dso),
+ filename);
if (err) {
- pr_err("Can't synthesize build_id event for %s\n", dso__long_name(dso));
+ pr_err("Can't synthesize build_id event for %s\n", filename);
return -1;
}
return 0;
}
-int perf_event__inject_buildid(struct perf_tool *tool, union perf_event *event,
+static int tool__inject_mmap2_build_id(const struct perf_tool *tool,
+ struct perf_sample *sample,
+ struct machine *machine,
+ const struct evsel *evsel,
+ __u16 misc,
+ __u32 pid, __u32 tid,
+ __u64 start, __u64 len, __u64 pgoff,
+ struct dso *dso,
+ __u32 prot, __u32 flags,
+ const char *filename)
+{
+ int err;
+
+ /* Return to repipe anonymous maps. */
+ if (is_anon_memory(filename) || flags & MAP_HUGETLB)
+ return 1;
+ if (is_no_dso_memory(filename))
+ return 1;
+
+ if (dso__read_build_id(dso)) {
+ pr_debug("no build_id found for %s\n", filename);
+ return -1;
+ }
+
+ err = perf_event__synthesize_mmap2_build_id(tool, sample, machine,
+ perf_event__repipe,
+ evsel,
+ misc, pid, tid,
+ start, len, pgoff,
+ dso__bid(dso),
+ prot, flags,
+ filename);
+ if (err) {
+ pr_err("Can't synthesize build_id event for %s\n", filename);
+ return -1;
+ }
+ return 0;
+}
+
+static int mark_dso_hit(const struct perf_inject *inject,
+ const struct perf_tool *tool,
+ struct perf_sample *sample,
+ struct machine *machine,
+ const struct evsel *mmap_evsel,
+ struct map *map, bool sample_in_dso)
+{
+ struct dso *dso;
+ u16 misc = sample->cpumode;
+
+ if (!map)
+ return 0;
+
+ if (!sample_in_dso) {
+ u16 guest_mask = PERF_RECORD_MISC_GUEST_KERNEL |
+ PERF_RECORD_MISC_GUEST_USER;
+
+ if ((misc & guest_mask) != 0) {
+ misc &= PERF_RECORD_MISC_HYPERVISOR;
+ misc |= __map__is_kernel(map)
+ ? PERF_RECORD_MISC_GUEST_KERNEL
+ : PERF_RECORD_MISC_GUEST_USER;
+ } else {
+ misc &= PERF_RECORD_MISC_HYPERVISOR;
+ misc |= __map__is_kernel(map)
+ ? PERF_RECORD_MISC_KERNEL
+ : PERF_RECORD_MISC_USER;
+ }
+ }
+ dso = map__dso(map);
+ if (inject->build_id_style == BID_RWS__INJECT_HEADER_LAZY) {
+ if (dso && !dso__hit(dso)) {
+ dso__set_hit(dso);
+ tool__inject_build_id(tool, sample, machine,
+ mmap_evsel, misc, dso__long_name(dso), dso,
+ map__flags(map));
+ }
+ } else if (inject->build_id_style == BID_RWS__MMAP2_BUILDID_LAZY) {
+ if (!map__hit(map)) {
+ const struct build_id null_bid = { .size = 0 };
+ const struct build_id *bid = dso ? dso__bid(dso) : &null_bid;
+ const char *filename = dso ? dso__long_name(dso) : "";
+
+ map__set_hit(map);
+ perf_event__synthesize_mmap2_build_id(tool, sample, machine,
+ perf_event__repipe,
+ mmap_evsel,
+ misc,
+ sample->pid, sample->tid,
+ map__start(map),
+ map__end(map) - map__start(map),
+ map__pgoff(map),
+ bid,
+ map__prot(map),
+ map__flags(map),
+ filename);
+ }
+ }
+ return 0;
+}
+
+struct mark_dso_hit_args {
+ const struct perf_inject *inject;
+ const struct perf_tool *tool;
+ struct perf_sample *sample;
+ struct machine *machine;
+ const struct evsel *mmap_evsel;
+};
+
+static int mark_dso_hit_callback(struct callchain_cursor_node *node, void *data)
+{
+ struct mark_dso_hit_args *args = data;
+ struct map *map = node->ms.map;
+
+ return mark_dso_hit(args->inject, args->tool, args->sample, args->machine,
+ args->mmap_evsel, map, /*sample_in_dso=*/false);
+}
+
+int perf_event__inject_buildid(const struct perf_tool *tool, union perf_event *event,
struct perf_sample *sample,
struct evsel *evsel __maybe_unused,
struct machine *machine)
{
struct addr_location al;
struct thread *thread;
+ struct perf_inject *inject = container_of(tool, struct perf_inject, tool);
+ struct mark_dso_hit_args args = {
+ .inject = inject,
+ .tool = tool,
+ /*
+ * Use the parsed sample data of the sample event, which will
+ * have a later timestamp than the mmap event.
+ */
+ .sample = sample,
+ .machine = machine,
+ .mmap_evsel = inject__mmap_evsel(inject),
+ };
addr_location__init(&al);
thread = machine__findnew_thread(machine, sample->pid, sample->tid);
@@ -760,15 +937,13 @@ int perf_event__inject_buildid(struct perf_tool *tool, union perf_event *event,
}
if (thread__find_map(thread, sample->cpumode, sample->ip, &al)) {
- struct dso *dso = map__dso(al.map);
-
- if (!dso__hit(dso)) {
- dso__set_hit(dso);
- dso__inject_build_id(dso, tool, machine,
- sample->cpumode, map__flags(al.map));
- }
+ mark_dso_hit(inject, tool, sample, machine, args.mmap_evsel, al.map,
+ /*sample_in_dso=*/true);
}
+ sample__for_each_callchain_node(thread, evsel, sample, PERF_MAX_STACK_DEPTH,
+ /*symbols=*/false, mark_dso_hit_callback, &args);
+
thread__put(thread);
repipe:
perf_event__repipe(tool, event, sample, machine);
@@ -776,7 +951,7 @@ repipe:
return 0;
}
-static int perf_inject__sched_process_exit(struct perf_tool *tool,
+static int perf_inject__sched_process_exit(const struct perf_tool *tool,
union perf_event *event __maybe_unused,
struct perf_sample *sample,
struct evsel *evsel __maybe_unused,
@@ -796,7 +971,7 @@ static int perf_inject__sched_process_exit(struct perf_tool *tool,
return 0;
}
-static int perf_inject__sched_switch(struct perf_tool *tool,
+static int perf_inject__sched_switch(const struct perf_tool *tool,
union perf_event *event,
struct perf_sample *sample,
struct evsel *evsel,
@@ -821,7 +996,7 @@ static int perf_inject__sched_switch(struct perf_tool *tool,
}
#ifdef HAVE_LIBTRACEEVENT
-static int perf_inject__sched_stat(struct perf_tool *tool,
+static int perf_inject__sched_stat(const struct perf_tool *tool,
union perf_event *event __maybe_unused,
struct perf_sample *sample,
struct evsel *evsel,
@@ -866,7 +1041,7 @@ static int guest_session__output_bytes(struct guest_session *gs, void *buf, size
return ret < 0 ? ret : 0;
}
-static int guest_session__repipe(struct perf_tool *tool,
+static int guest_session__repipe(const struct perf_tool *tool,
union perf_event *event,
struct perf_sample *sample __maybe_unused,
struct machine *machine __maybe_unused)
@@ -1032,7 +1207,7 @@ static struct guest_id *guest_session__lookup_id(struct guest_session *gs, u64 i
return NULL;
}
-static int process_attr(struct perf_tool *tool, union perf_event *event,
+static int process_attr(const struct perf_tool *tool, union perf_event *event,
struct perf_sample *sample __maybe_unused,
struct machine *machine __maybe_unused)
{
@@ -1160,7 +1335,7 @@ static u64 evlist__first_id(struct evlist *evlist)
return 0;
}
-static int process_build_id(struct perf_tool *tool,
+static int process_build_id(const struct perf_tool *tool,
union perf_event *event,
struct perf_sample *sample __maybe_unused,
struct machine *machine __maybe_unused)
@@ -1173,17 +1348,27 @@ static int process_build_id(struct perf_tool *tool,
static int synthesize_build_id(struct perf_inject *inject, struct dso *dso, pid_t machine_pid)
{
struct machine *machine = perf_session__findnew_machine(inject->session, machine_pid);
- u8 cpumode = dso__is_in_kernel_space(dso) ?
- PERF_RECORD_MISC_GUEST_KERNEL :
- PERF_RECORD_MISC_GUEST_USER;
+ struct perf_sample synth_sample = {
+ .pid = -1,
+ .tid = -1,
+ .time = -1,
+ .stream_id = -1,
+ .cpu = -1,
+ .period = 1,
+ .cpumode = dso__is_in_kernel_space(dso)
+ ? PERF_RECORD_MISC_GUEST_KERNEL
+ : PERF_RECORD_MISC_GUEST_USER,
+ };
if (!machine)
return -ENOMEM;
dso__set_hit(dso);
- return perf_event__synthesize_build_id(&inject->tool, dso, cpumode,
- process_build_id, machine);
+ return perf_event__synthesize_build_id(&inject->tool, &synth_sample, machine,
+ process_build_id, inject__mmap_evsel(inject),
+ /*misc=*/synth_sample.cpumode,
+ dso__bid(dso), dso__long_name(dso));
}
static int guest_session__add_build_ids_cb(struct dso *dso, void *data)
@@ -1210,7 +1395,7 @@ static int guest_session__add_build_ids(struct guest_session *gs)
gs);
}
-static int guest_session__ksymbol_event(struct perf_tool *tool,
+static int guest_session__ksymbol_event(const struct perf_tool *tool,
union perf_event *event,
struct perf_sample *sample __maybe_unused,
struct machine *machine __maybe_unused)
@@ -1574,7 +1759,7 @@ static int guest_session__flush_events(struct guest_session *gs)
return guest_session__inject_events(gs, -1);
}
-static int host__repipe(struct perf_tool *tool,
+static int host__repipe(const struct perf_tool *tool,
union perf_event *event,
struct perf_sample *sample,
struct machine *machine)
@@ -1647,7 +1832,7 @@ static int host__finished_init(struct perf_session *session, union perf_event *e
* guest events up to the same time. Finally write out the FINISHED_ROUND event
* itself.
*/
-static int host__finished_round(struct perf_tool *tool,
+static int host__finished_round(const struct perf_tool *tool,
union perf_event *event,
struct ordered_events *oe)
{
@@ -1665,7 +1850,7 @@ static int host__finished_round(struct perf_tool *tool,
return perf_event__repipe_oe_synth(tool, event, oe);
}
-static int host__context_switch(struct perf_tool *tool,
+static int host__context_switch(const struct perf_tool *tool,
union perf_event *event,
struct perf_sample *sample,
struct machine *machine)
@@ -1719,7 +1904,7 @@ static int evsel__check_stype(struct evsel *evsel, u64 sample_type, const char *
return 0;
}
-static int drop_sample(struct perf_tool *tool __maybe_unused,
+static int drop_sample(const struct perf_tool *tool __maybe_unused,
union perf_event *event __maybe_unused,
struct perf_sample *sample __maybe_unused,
struct evsel *evsel __maybe_unused,
@@ -1980,12 +2165,18 @@ static int __cmd_inject(struct perf_inject *inject)
struct guest_session *gs = &inject->guest_session;
struct perf_session *session = inject->session;
int fd = output_fd(inject);
- u64 output_data_offset;
+ u64 output_data_offset = perf_session__data_offset(session->evlist);
+ /*
+ * Pipe input hasn't loaded the attributes and will handle them as
+ * events. So that the attributes don't overlap the data, write the
+ * attributes after the data.
+ */
+ bool write_attrs_after_data = !inject->output.is_pipe && inject->session->data->is_pipe;
signal(SIGINT, sig_handler);
- if (inject->build_ids || inject->sched_stat ||
- inject->itrace_synth_opts.set || inject->build_id_all) {
+ if (inject->build_id_style != BID_RWS__NONE || inject->sched_stat ||
+ inject->itrace_synth_opts.set) {
inject->tool.mmap = perf_event__repipe_mmap;
inject->tool.mmap2 = perf_event__repipe_mmap2;
inject->tool.fork = perf_event__repipe_fork;
@@ -1994,12 +2185,8 @@ static int __cmd_inject(struct perf_inject *inject)
#endif
}
- output_data_offset = perf_session__data_offset(session->evlist);
-
- if (inject->build_id_all) {
- inject->tool.mmap = perf_event__repipe_buildid_mmap;
- inject->tool.mmap2 = perf_event__repipe_buildid_mmap2;
- } else if (inject->build_ids) {
+ if (inject->build_id_style == BID_RWS__INJECT_HEADER_LAZY ||
+ inject->build_id_style == BID_RWS__MMAP2_BUILDID_LAZY) {
inject->tool.sample = perf_event__inject_buildid;
} else if (inject->sched_stat) {
struct evsel *evsel;
@@ -2069,7 +2256,7 @@ static int __cmd_inject(struct perf_inject *inject)
*/
inject->tool.finished_init = host__finished_init;
/* Obey finished round ordering */
- inject->tool.finished_round = host__finished_round,
+ inject->tool.finished_round = host__finished_round;
/* Keep track of which CPU a VCPU is runnng on */
inject->tool.context_switch = host__context_switch;
/*
@@ -2092,7 +2279,7 @@ static int __cmd_inject(struct perf_inject *inject)
if (!inject->itrace_synth_opts.set)
auxtrace_index__free(&session->auxtrace_index);
- if (!inject->is_pipe && !inject->in_place_update)
+ if (!inject->output.is_pipe && !inject->in_place_update)
lseek(fd, output_data_offset, SEEK_SET);
ret = perf_session__process_events(session);
@@ -2111,15 +2298,15 @@ static int __cmd_inject(struct perf_inject *inject)
}
}
- if (!inject->is_pipe && !inject->in_place_update) {
+ if (!inject->output.is_pipe && !inject->in_place_update) {
struct inject_fc inj_fc = {
.fc.copy = feat_copy_cb,
.inject = inject,
};
- if (inject->build_ids)
- perf_header__set_feat(&session->header,
- HEADER_BUILD_ID);
+ if (inject->build_id_style == BID_RWS__INJECT_HEADER_LAZY ||
+ inject->build_id_style == BID_RWS__INJECT_HEADER_ALL)
+ perf_header__set_feat(&session->header, HEADER_BUILD_ID);
/*
* Keep all buildids when there is unprocessed AUX data because
* it is not known which ones the AUX trace hits.
@@ -2141,7 +2328,8 @@ static int __cmd_inject(struct perf_inject *inject)
}
session->header.data_offset = output_data_offset;
session->header.data_size = inject->bytes_written;
- perf_session__inject_header(session, session->evlist, fd, &inj_fc.fc);
+ perf_session__inject_header(session, session->evlist, fd, &inj_fc.fc,
+ write_attrs_after_data);
if (inject->copy_kcore_dir) {
ret = copy_kcore_dir(inject);
@@ -2165,46 +2353,6 @@ static int __cmd_inject(struct perf_inject *inject)
int cmd_inject(int argc, const char **argv)
{
struct perf_inject inject = {
- .tool = {
- .sample = perf_event__repipe_sample,
- .read = perf_event__repipe_sample,
- .mmap = perf_event__repipe,
- .mmap2 = perf_event__repipe,
- .comm = perf_event__repipe,
- .namespaces = perf_event__repipe,
- .cgroup = perf_event__repipe,
- .fork = perf_event__repipe,
- .exit = perf_event__repipe,
- .lost = perf_event__repipe,
- .lost_samples = perf_event__repipe,
- .aux = perf_event__repipe,
- .itrace_start = perf_event__repipe,
- .aux_output_hw_id = perf_event__repipe,
- .context_switch = perf_event__repipe,
- .throttle = perf_event__repipe,
- .unthrottle = perf_event__repipe,
- .ksymbol = perf_event__repipe,
- .bpf = perf_event__repipe,
- .text_poke = perf_event__repipe,
- .attr = perf_event__repipe_attr,
- .event_update = perf_event__repipe_event_update,
- .tracing_data = perf_event__repipe_op2_synth,
- .finished_round = perf_event__repipe_oe_synth,
- .build_id = perf_event__repipe_op2_synth,
- .id_index = perf_event__repipe_op2_synth,
- .auxtrace_info = perf_event__repipe_op2_synth,
- .auxtrace_error = perf_event__repipe_op2_synth,
- .time_conv = perf_event__repipe_op2_synth,
- .thread_map = perf_event__repipe_op2_synth,
- .cpu_map = perf_event__repipe_op2_synth,
- .stat_config = perf_event__repipe_op2_synth,
- .stat = perf_event__repipe_op2_synth,
- .stat_round = perf_event__repipe_op2_synth,
- .feature = perf_event__repipe_op2_synth,
- .finished_init = perf_event__repipe_op2_synth,
- .compressed = perf_event__repipe_op4_synth,
- .auxtrace = perf_event__repipe_auxtrace,
- },
.input_name = "-",
.samples = LIST_HEAD_INIT(inject.samples),
.output = {
@@ -2218,14 +2366,21 @@ int cmd_inject(int argc, const char **argv)
.use_stdio = true,
};
int ret;
- bool repipe = true;
const char *known_build_ids = NULL;
+ bool build_ids;
+ bool build_id_all;
+ bool mmap2_build_ids;
+ bool mmap2_build_id_all;
struct option options[] = {
- OPT_BOOLEAN('b', "build-ids", &inject.build_ids,
+ OPT_BOOLEAN('b', "build-ids", &build_ids,
"Inject build-ids into the output stream"),
- OPT_BOOLEAN(0, "buildid-all", &inject.build_id_all,
+ OPT_BOOLEAN(0, "buildid-all", &build_id_all,
"Inject build-ids of all DSOs into the output stream"),
+ OPT_BOOLEAN('B', "mmap2-buildids", &mmap2_build_ids,
+ "Drop unused mmap events, make others mmap2 with build IDs"),
+ OPT_BOOLEAN(0, "mmap2-buildid-all", &mmap2_build_id_all,
+ "Rewrite all mmap events as mmap2 events with build IDs"),
OPT_STRING(0, "known-build-ids", &known_build_ids,
"buildid path [,buildid path...]",
"build-ids to use for given paths"),
@@ -2269,6 +2424,7 @@ int cmd_inject(int argc, const char **argv)
"perf inject [<options>]",
NULL
};
+ bool ordered_events;
if (!inject.itrace_synth_opts.set) {
/* Disable eager loading of kernel symbols that adds overhead to perf inject. */
@@ -2321,22 +2477,63 @@ int cmd_inject(int argc, const char **argv)
return -1;
}
}
+ if (mmap2_build_ids)
+ inject.build_id_style = BID_RWS__MMAP2_BUILDID_LAZY;
+ if (mmap2_build_id_all)
+ inject.build_id_style = BID_RWS__MMAP2_BUILDID_ALL;
+ if (build_ids)
+ inject.build_id_style = BID_RWS__INJECT_HEADER_LAZY;
+ if (build_id_all)
+ inject.build_id_style = BID_RWS__INJECT_HEADER_ALL;
data.path = inject.input_name;
- if (!strcmp(inject.input_name, "-") || inject.output.is_pipe) {
- inject.is_pipe = true;
- /*
- * Do not repipe header when input is a regular file
- * since either it can rewrite the header at the end
- * or write a new pipe header.
- */
- if (strcmp(inject.input_name, "-"))
- repipe = false;
- }
- inject.session = __perf_session__new(&data, repipe,
- output_fd(&inject),
- &inject.tool);
+ ordered_events = inject.jit_mode || inject.sched_stat ||
+ inject.build_id_style == BID_RWS__INJECT_HEADER_LAZY ||
+ inject.build_id_style == BID_RWS__MMAP2_BUILDID_LAZY;
+ perf_tool__init(&inject.tool, ordered_events);
+ inject.tool.sample = perf_event__repipe_sample;
+ inject.tool.read = perf_event__repipe_sample;
+ inject.tool.mmap = perf_event__repipe;
+ inject.tool.mmap2 = perf_event__repipe;
+ inject.tool.comm = perf_event__repipe;
+ inject.tool.namespaces = perf_event__repipe;
+ inject.tool.cgroup = perf_event__repipe;
+ inject.tool.fork = perf_event__repipe;
+ inject.tool.exit = perf_event__repipe;
+ inject.tool.lost = perf_event__repipe;
+ inject.tool.lost_samples = perf_event__repipe;
+ inject.tool.aux = perf_event__repipe;
+ inject.tool.itrace_start = perf_event__repipe;
+ inject.tool.aux_output_hw_id = perf_event__repipe;
+ inject.tool.context_switch = perf_event__repipe;
+ inject.tool.throttle = perf_event__repipe;
+ inject.tool.unthrottle = perf_event__repipe;
+ inject.tool.ksymbol = perf_event__repipe;
+ inject.tool.bpf = perf_event__repipe;
+ inject.tool.text_poke = perf_event__repipe;
+ inject.tool.attr = perf_event__repipe_attr;
+ inject.tool.event_update = perf_event__repipe_event_update;
+ inject.tool.tracing_data = perf_event__repipe_op2_synth;
+ inject.tool.finished_round = perf_event__repipe_oe_synth;
+ inject.tool.build_id = perf_event__repipe_op2_synth;
+ inject.tool.id_index = perf_event__repipe_op2_synth;
+ inject.tool.auxtrace_info = perf_event__repipe_op2_synth;
+ inject.tool.auxtrace_error = perf_event__repipe_op2_synth;
+ inject.tool.time_conv = perf_event__repipe_op2_synth;
+ inject.tool.thread_map = perf_event__repipe_op2_synth;
+ inject.tool.cpu_map = perf_event__repipe_op2_synth;
+ inject.tool.stat_config = perf_event__repipe_op2_synth;
+ inject.tool.stat = perf_event__repipe_op2_synth;
+ inject.tool.stat_round = perf_event__repipe_op2_synth;
+ inject.tool.feature = perf_event__repipe_op2_synth;
+ inject.tool.finished_init = perf_event__repipe_op2_synth;
+ inject.tool.compressed = perf_event__repipe_op4_synth;
+ inject.tool.auxtrace = perf_event__repipe_auxtrace;
+ inject.tool.dont_split_sample_group = true;
+ inject.session = __perf_session__new(&data, &inject.tool,
+ /*trace_event_repipe=*/inject.output.is_pipe);
+
if (IS_ERR(inject.session)) {
ret = PTR_ERR(inject.session);
goto out_close_output;
@@ -2350,50 +2547,52 @@ int cmd_inject(int argc, const char **argv)
if (ret)
goto out_delete;
- if (!data.is_pipe && inject.output.is_pipe) {
+ if (inject.output.is_pipe) {
ret = perf_header__write_pipe(perf_data__fd(&inject.output));
if (ret < 0) {
pr_err("Couldn't write a new pipe header.\n");
goto out_delete;
}
- ret = perf_event__synthesize_for_pipe(&inject.tool,
- inject.session,
- &inject.output,
- perf_event__repipe);
- if (ret < 0)
- goto out_delete;
+ /*
+ * If the input is already a pipe then the features and
+ * attributes don't need synthesizing, they will be present in
+ * the input.
+ */
+ if (!data.is_pipe) {
+ ret = perf_event__synthesize_for_pipe(&inject.tool,
+ inject.session,
+ &inject.output,
+ perf_event__repipe);
+ if (ret < 0)
+ goto out_delete;
+ }
}
- if (inject.build_ids && !inject.build_id_all) {
+ if (inject.build_id_style == BID_RWS__INJECT_HEADER_LAZY ||
+ inject.build_id_style == BID_RWS__MMAP2_BUILDID_LAZY) {
/*
* to make sure the mmap records are ordered correctly
* and so that the correct especially due to jitted code
* mmaps. We cannot generate the buildid hit list and
* inject the jit mmaps at the same time for now.
*/
- inject.tool.ordered_events = true;
inject.tool.ordering_requires_timestamps = true;
- if (known_build_ids != NULL) {
- inject.known_build_ids =
- perf_inject__parse_known_build_ids(known_build_ids);
-
- if (inject.known_build_ids == NULL) {
- pr_err("Couldn't parse known build ids.\n");
- goto out_delete;
- }
- }
}
+ if (inject.build_id_style != BID_RWS__NONE && known_build_ids != NULL) {
+ inject.known_build_ids =
+ perf_inject__parse_known_build_ids(known_build_ids);
- if (inject.sched_stat) {
- inject.tool.ordered_events = true;
+ if (inject.known_build_ids == NULL) {
+ pr_err("Couldn't parse known build ids.\n");
+ goto out_delete;
+ }
}
#ifdef HAVE_JITDUMP
if (inject.jit_mode) {
- inject.tool.mmap2 = perf_event__jit_repipe_mmap2;
- inject.tool.mmap = perf_event__jit_repipe_mmap;
- inject.tool.ordered_events = true;
+ inject.tool.mmap2 = perf_event__repipe_mmap2;
+ inject.tool.mmap = perf_event__repipe_mmap;
inject.tool.ordering_requires_timestamps = true;
/*
* JIT MMAP injection injects all MMAP events in one go, so it
diff --git a/tools/perf/builtin-kmem.c b/tools/perf/builtin-kmem.c
index 6fd95be5032b..a756147e2eec 100644
--- a/tools/perf/builtin-kmem.c
+++ b/tools/perf/builtin-kmem.c
@@ -955,7 +955,7 @@ static bool perf_kmem__skip_sample(struct perf_sample *sample)
typedef int (*tracepoint_handler)(struct evsel *evsel,
struct perf_sample *sample);
-static int process_sample_event(struct perf_tool *tool __maybe_unused,
+static int process_sample_event(const struct perf_tool *tool __maybe_unused,
union perf_event *event,
struct perf_sample *sample,
struct evsel *evsel,
@@ -986,15 +986,6 @@ static int process_sample_event(struct perf_tool *tool __maybe_unused,
return err;
}
-static struct perf_tool perf_kmem = {
- .sample = process_sample_event,
- .comm = perf_event__process_comm,
- .mmap = perf_event__process_mmap,
- .mmap2 = perf_event__process_mmap2,
- .namespaces = perf_event__process_namespaces,
- .ordered_events = true,
-};
-
static double fragmentation(unsigned long n_req, unsigned long n_alloc)
{
if (n_alloc == 0)
@@ -1971,6 +1962,7 @@ int cmd_kmem(int argc, const char **argv)
NULL
};
struct perf_session *session;
+ struct perf_tool perf_kmem;
static const char errmsg[] = "No %s allocation events found. Have you run 'perf kmem record --%s'?\n";
int ret = perf_config(kmem_config, NULL);
@@ -1998,6 +1990,13 @@ int cmd_kmem(int argc, const char **argv)
data.path = input_name;
+ perf_tool__init(&perf_kmem, /*ordered_events=*/true);
+ perf_kmem.sample = process_sample_event;
+ perf_kmem.comm = perf_event__process_comm;
+ perf_kmem.mmap = perf_event__process_mmap;
+ perf_kmem.mmap2 = perf_event__process_mmap2;
+ perf_kmem.namespaces = perf_event__process_namespaces;
+
kmem_session = session = perf_session__new(&data, &perf_kmem);
if (IS_ERR(session))
return PTR_ERR(session);
@@ -2058,7 +2057,8 @@ int cmd_kmem(int argc, const char **argv)
out_delete:
perf_session__delete(session);
+ /* free usage string allocated by parse_options_subcommand */
+ free((void *)kmem_usage[0]);
return ret;
}
-
diff --git a/tools/perf/builtin-kvm.c b/tools/perf/builtin-kvm.c
index 71165036e4ca..55ea17c5ff02 100644
--- a/tools/perf/builtin-kvm.c
+++ b/tools/perf/builtin-kvm.c
@@ -1166,7 +1166,7 @@ static void print_result(struct perf_kvm_stat *kvm)
}
#if defined(HAVE_TIMERFD_SUPPORT) && defined(HAVE_LIBTRACEEVENT)
-static int process_lost_event(struct perf_tool *tool,
+static int process_lost_event(const struct perf_tool *tool,
union perf_event *event __maybe_unused,
struct perf_sample *sample __maybe_unused,
struct machine *machine __maybe_unused)
@@ -1187,7 +1187,7 @@ static bool skip_sample(struct perf_kvm_stat *kvm,
return false;
}
-static int process_sample_event(struct perf_tool *tool,
+static int process_sample_event(const struct perf_tool *tool,
union perf_event *event,
struct perf_sample *sample,
struct evsel *evsel,
@@ -1603,19 +1603,17 @@ static int read_events(struct perf_kvm_stat *kvm)
{
int ret;
- struct perf_tool eops = {
- .sample = process_sample_event,
- .comm = perf_event__process_comm,
- .namespaces = perf_event__process_namespaces,
- .ordered_events = true,
- };
struct perf_data file = {
.path = kvm->file_name,
.mode = PERF_DATA_MODE_READ,
.force = kvm->force,
};
- kvm->tool = eops;
+ perf_tool__init(&kvm->tool, /*ordered_events=*/true);
+ kvm->tool.sample = process_sample_event;
+ kvm->tool.comm = perf_event__process_comm;
+ kvm->tool.namespaces = perf_event__process_namespaces;
+
kvm->session = perf_session__new(&file, &kvm->tool);
if (IS_ERR(kvm->session)) {
pr_err("Initializing perf session failed\n");
@@ -1919,14 +1917,13 @@ static int kvm_events_live(struct perf_kvm_stat *kvm,
/* event handling */
+ perf_tool__init(&kvm->tool, /*ordered_events=*/true);
kvm->tool.sample = process_sample_event;
kvm->tool.comm = perf_event__process_comm;
kvm->tool.exit = perf_event__process_exit;
kvm->tool.fork = perf_event__process_fork;
kvm->tool.lost = process_lost_event;
kvm->tool.namespaces = perf_event__process_namespaces;
- kvm->tool.ordered_events = true;
- perf_tool__fill_defaults(&kvm->tool);
/* set defaults */
kvm->display_time = 1;
@@ -2187,5 +2184,8 @@ int cmd_kvm(int argc, const char **argv)
else
usage_with_options(kvm_usage, kvm_options);
+ /* free usage string allocated by parse_options_subcommand */
+ free((void *)kvm_usage[0]);
+
return 0;
}
diff --git a/tools/perf/builtin-kwork.c b/tools/perf/builtin-kwork.c
index 56e3f3a5e03a..c1daf82c9b92 100644
--- a/tools/perf/builtin-kwork.c
+++ b/tools/perf/builtin-kwork.c
@@ -958,7 +958,7 @@ static int top_sched_switch_event(struct perf_kwork *kwork,
}
static struct kwork_class kwork_irq;
-static int process_irq_handler_entry_event(struct perf_tool *tool,
+static int process_irq_handler_entry_event(const struct perf_tool *tool,
struct evsel *evsel,
struct perf_sample *sample,
struct machine *machine)
@@ -971,7 +971,7 @@ static int process_irq_handler_entry_event(struct perf_tool *tool,
return 0;
}
-static int process_irq_handler_exit_event(struct perf_tool *tool,
+static int process_irq_handler_exit_event(const struct perf_tool *tool,
struct evsel *evsel,
struct perf_sample *sample,
struct machine *machine)
@@ -1037,7 +1037,7 @@ static struct kwork_class kwork_irq = {
};
static struct kwork_class kwork_softirq;
-static int process_softirq_raise_event(struct perf_tool *tool,
+static int process_softirq_raise_event(const struct perf_tool *tool,
struct evsel *evsel,
struct perf_sample *sample,
struct machine *machine)
@@ -1051,7 +1051,7 @@ static int process_softirq_raise_event(struct perf_tool *tool,
return 0;
}
-static int process_softirq_entry_event(struct perf_tool *tool,
+static int process_softirq_entry_event(const struct perf_tool *tool,
struct evsel *evsel,
struct perf_sample *sample,
struct machine *machine)
@@ -1065,7 +1065,7 @@ static int process_softirq_entry_event(struct perf_tool *tool,
return 0;
}
-static int process_softirq_exit_event(struct perf_tool *tool,
+static int process_softirq_exit_event(const struct perf_tool *tool,
struct evsel *evsel,
struct perf_sample *sample,
struct machine *machine)
@@ -1167,7 +1167,7 @@ static struct kwork_class kwork_softirq = {
};
static struct kwork_class kwork_workqueue;
-static int process_workqueue_activate_work_event(struct perf_tool *tool,
+static int process_workqueue_activate_work_event(const struct perf_tool *tool,
struct evsel *evsel,
struct perf_sample *sample,
struct machine *machine)
@@ -1181,7 +1181,7 @@ static int process_workqueue_activate_work_event(struct perf_tool *tool,
return 0;
}
-static int process_workqueue_execute_start_event(struct perf_tool *tool,
+static int process_workqueue_execute_start_event(const struct perf_tool *tool,
struct evsel *evsel,
struct perf_sample *sample,
struct machine *machine)
@@ -1195,7 +1195,7 @@ static int process_workqueue_execute_start_event(struct perf_tool *tool,
return 0;
}
-static int process_workqueue_execute_end_event(struct perf_tool *tool,
+static int process_workqueue_execute_end_event(const struct perf_tool *tool,
struct evsel *evsel,
struct perf_sample *sample,
struct machine *machine)
@@ -1266,7 +1266,7 @@ static struct kwork_class kwork_workqueue = {
};
static struct kwork_class kwork_sched;
-static int process_sched_switch_event(struct perf_tool *tool,
+static int process_sched_switch_event(const struct perf_tool *tool,
struct evsel *evsel,
struct perf_sample *sample,
struct machine *machine)
@@ -1945,12 +1945,12 @@ static int perf_kwork__report(struct perf_kwork *kwork)
return 0;
}
-typedef int (*tracepoint_handler)(struct perf_tool *tool,
+typedef int (*tracepoint_handler)(const struct perf_tool *tool,
struct evsel *evsel,
struct perf_sample *sample,
struct machine *machine);
-static int perf_kwork__process_tracepoint_sample(struct perf_tool *tool,
+static int perf_kwork__process_tracepoint_sample(const struct perf_tool *tool,
union perf_event *event __maybe_unused,
struct perf_sample *sample,
struct evsel *evsel,
@@ -2322,12 +2322,6 @@ int cmd_kwork(int argc, const char **argv)
{
static struct perf_kwork kwork = {
.class_list = LIST_HEAD_INIT(kwork.class_list),
- .tool = {
- .mmap = perf_event__process_mmap,
- .mmap2 = perf_event__process_mmap2,
- .sample = perf_kwork__process_tracepoint_sample,
- .ordered_events = true,
- },
.atom_page_list = LIST_HEAD_INIT(kwork.atom_page_list),
.sort_list = LIST_HEAD_INIT(kwork.sort_list),
.cmp_id = LIST_HEAD_INIT(kwork.cmp_id),
@@ -2462,6 +2456,11 @@ int cmd_kwork(int argc, const char **argv)
"record", "report", "latency", "timehist", "top", NULL
};
+ perf_tool__init(&kwork.tool, /*ordered_events=*/true);
+ kwork.tool.mmap = perf_event__process_mmap;
+ kwork.tool.mmap2 = perf_event__process_mmap2;
+ kwork.tool.sample = perf_kwork__process_tracepoint_sample;
+
argc = parse_options_subcommand(argc, argv, kwork_options,
kwork_subcommands, kwork_usage,
PARSE_OPT_STOP_AT_NON_OPTION);
@@ -2520,5 +2519,8 @@ int cmd_kwork(int argc, const char **argv)
} else
usage_with_options(kwork_usage, kwork_options);
+ /* free usage string allocated by parse_options_subcommand */
+ free((void *)kwork_usage[0]);
+
return 0;
}
diff --git a/tools/perf/builtin-list.c b/tools/perf/builtin-list.c
index 82cb4b1010aa..65b8cba324be 100644
--- a/tools/perf/builtin-list.c
+++ b/tools/perf/builtin-list.c
@@ -173,7 +173,7 @@ static void default_print_event(void *ps, const char *pmu_name, const char *topi
if (pmu_name && strcmp(pmu_name, "default_core")) {
desc_len = strlen(desc);
desc_len = asprintf(&desc_with_unit,
- desc[desc_len - 1] != '.'
+ desc_len > 0 && desc[desc_len - 1] != '.'
? "%s. Unit: %s" : "%s Unit: %s",
desc, pmu_name);
}
diff --git a/tools/perf/builtin-lock.c b/tools/perf/builtin-lock.c
index 0253184b3b58..062e2b56a2ab 100644
--- a/tools/perf/builtin-lock.c
+++ b/tools/perf/builtin-lock.c
@@ -1501,7 +1501,7 @@ static const struct evsel_str_handler contention_tracepoints[] = {
{ "lock:contention_end", evsel__process_contention_end, },
};
-static int process_event_update(struct perf_tool *tool,
+static int process_event_update(const struct perf_tool *tool,
union perf_event *event,
struct evlist **pevlist)
{
@@ -1520,7 +1520,7 @@ static int process_event_update(struct perf_tool *tool,
typedef int (*tracepoint_handler)(struct evsel *evsel,
struct perf_sample *sample);
-static int process_sample_event(struct perf_tool *tool __maybe_unused,
+static int process_sample_event(const struct perf_tool *tool __maybe_unused,
union perf_event *event,
struct perf_sample *sample,
struct evsel *evsel,
@@ -1933,22 +1933,21 @@ static bool force;
static int __cmd_report(bool display_info)
{
int err = -EINVAL;
- struct perf_tool eops = {
- .attr = perf_event__process_attr,
- .event_update = process_event_update,
- .sample = process_sample_event,
- .comm = perf_event__process_comm,
- .mmap = perf_event__process_mmap,
- .namespaces = perf_event__process_namespaces,
- .tracing_data = perf_event__process_tracing_data,
- .ordered_events = true,
- };
+ struct perf_tool eops;
struct perf_data data = {
.path = input_name,
.mode = PERF_DATA_MODE_READ,
.force = force,
};
+ perf_tool__init(&eops, /*ordered_events=*/true);
+ eops.attr = perf_event__process_attr;
+ eops.event_update = process_event_update;
+ eops.sample = process_sample_event;
+ eops.comm = perf_event__process_comm;
+ eops.mmap = perf_event__process_mmap;
+ eops.namespaces = perf_event__process_namespaces;
+ eops.tracing_data = perf_event__process_tracing_data;
session = perf_session__new(&data, &eops);
if (IS_ERR(session)) {
pr_err("Initializing perf session failed\n");
@@ -2069,15 +2068,7 @@ static int check_lock_contention_options(const struct option *options,
static int __cmd_contention(int argc, const char **argv)
{
int err = -EINVAL;
- struct perf_tool eops = {
- .attr = perf_event__process_attr,
- .event_update = process_event_update,
- .sample = process_sample_event,
- .comm = perf_event__process_comm,
- .mmap = perf_event__process_mmap,
- .tracing_data = perf_event__process_tracing_data,
- .ordered_events = true,
- };
+ struct perf_tool eops;
struct perf_data data = {
.path = input_name,
.mode = PERF_DATA_MODE_READ,
@@ -2100,6 +2091,14 @@ static int __cmd_contention(int argc, const char **argv)
con.result = &lockhash_table[0];
+ perf_tool__init(&eops, /*ordered_events=*/true);
+ eops.attr = perf_event__process_attr;
+ eops.event_update = process_event_update;
+ eops.sample = process_sample_event;
+ eops.comm = perf_event__process_comm;
+ eops.mmap = perf_event__process_mmap;
+ eops.tracing_data = perf_event__process_tracing_data;
+
session = perf_session__new(use_bpf ? NULL : &data, &eops);
if (IS_ERR(session)) {
pr_err("Initializing perf session failed\n");
@@ -2713,6 +2712,9 @@ int cmd_lock(int argc, const char **argv)
usage_with_options(lock_usage, lock_options);
}
+ /* free usage string allocated by parse_options_subcommand */
+ free((void *)lock_usage[0]);
+
zfree(&lockhash_table);
return rc;
}
diff --git a/tools/perf/builtin-mem.c b/tools/perf/builtin-mem.c
index 863fcd735dae..651188c1d825 100644
--- a/tools/perf/builtin-mem.c
+++ b/tools/perf/builtin-mem.c
@@ -19,6 +19,7 @@
#include "util/symbol.h"
#include "util/pmus.h"
#include "util/sample.h"
+#include "util/sort.h"
#include "util/string2.h"
#include "util/util.h"
#include <linux/err.h>
@@ -28,12 +29,16 @@
struct perf_mem {
struct perf_tool tool;
- char const *input_name;
+ const char *input_name;
+ const char *sort_key;
bool hide_unresolved;
bool dump_raw;
bool force;
bool phys_addr;
bool data_page_size;
+ bool all_kernel;
+ bool all_user;
+ bool data_type;
int operation;
const char *cpu_list;
DECLARE_BITMAP(cpu_bitmap, MAX_NR_CPUS);
@@ -42,7 +47,7 @@ struct perf_mem {
static int parse_record_events(const struct option *opt,
const char *str, int unset __maybe_unused)
{
- struct perf_mem *mem = *(struct perf_mem **)opt->value;
+ struct perf_mem *mem = (struct perf_mem *)opt->value;
struct perf_pmu *pmu;
pmu = perf_mem_events_find_pmu();
@@ -62,33 +67,19 @@ static int parse_record_events(const struct option *opt,
return 0;
}
-static const char * const __usage[] = {
- "perf mem record [<options>] [<command>]",
- "perf mem record [<options>] -- <command> [<options>]",
- NULL
-};
-
-static const char * const *record_mem_usage = __usage;
-
-static int __cmd_record(int argc, const char **argv, struct perf_mem *mem)
+static int __cmd_record(int argc, const char **argv, struct perf_mem *mem,
+ const struct option *options)
{
int rec_argc, i = 0, j;
int start, end;
const char **rec_argv;
int ret;
- bool all_user = false, all_kernel = false;
struct perf_mem_event *e;
struct perf_pmu *pmu;
- struct option options[] = {
- OPT_CALLBACK('e', "event", &mem, "event",
- "event selector. use 'perf mem record -e list' to list available events",
- parse_record_events),
- OPT_UINTEGER(0, "ldlat", &perf_mem_events__loads_ldlat, "mem-loads latency"),
- OPT_INCR('v', "verbose", &verbose,
- "be more verbose (show counter open errors, etc)"),
- OPT_BOOLEAN('U', "all-user", &all_user, "collect only user level data"),
- OPT_BOOLEAN('K', "all-kernel", &all_kernel, "collect only kernel level data"),
- OPT_END()
+ const char * const record_usage[] = {
+ "perf mem record [<options>] [<command>]",
+ "perf mem record [<options>] -- <command> [<options>]",
+ NULL
};
pmu = perf_mem_events_find_pmu();
@@ -97,12 +88,12 @@ static int __cmd_record(int argc, const char **argv, struct perf_mem *mem)
return -1;
}
- if (perf_pmu__mem_events_init(pmu)) {
+ if (perf_pmu__mem_events_init()) {
pr_err("failed: memory events not supported\n");
return -1;
}
- argc = parse_options(argc, argv, options, record_mem_usage,
+ argc = parse_options(argc, argv, options, record_usage,
PARSE_OPT_KEEP_UNKNOWN);
/* Max number of arguments multiplied by number of PMUs that can support them. */
@@ -126,22 +117,17 @@ static int __cmd_record(int argc, const char **argv, struct perf_mem *mem)
if (e->tag &&
(mem->operation & MEM_OPERATION_LOAD) &&
(mem->operation & MEM_OPERATION_STORE)) {
- e->record = true;
+ perf_mem_record[PERF_MEM_EVENTS__LOAD_STORE] = true;
rec_argv[i++] = "-W";
} else {
- if (mem->operation & MEM_OPERATION_LOAD) {
- e = perf_pmu__mem_events_ptr(pmu, PERF_MEM_EVENTS__LOAD);
- e->record = true;
- }
+ if (mem->operation & MEM_OPERATION_LOAD)
+ perf_mem_record[PERF_MEM_EVENTS__LOAD] = true;
- if (mem->operation & MEM_OPERATION_STORE) {
- e = perf_pmu__mem_events_ptr(pmu, PERF_MEM_EVENTS__STORE);
- e->record = true;
- }
+ if (mem->operation & MEM_OPERATION_STORE)
+ perf_mem_record[PERF_MEM_EVENTS__STORE] = true;
}
- e = perf_pmu__mem_events_ptr(pmu, PERF_MEM_EVENTS__LOAD);
- if (e->record)
+ if (perf_mem_record[PERF_MEM_EVENTS__LOAD])
rec_argv[i++] = "-W";
rec_argv[i++] = "-d";
@@ -158,10 +144,10 @@ static int __cmd_record(int argc, const char **argv, struct perf_mem *mem)
goto out;
end = i;
- if (all_user)
+ if (mem->all_user)
rec_argv[i++] = "--all-user";
- if (all_kernel)
+ if (mem->all_kernel)
rec_argv[i++] = "--all-kernel";
if (mem->cpu_list) {
@@ -188,7 +174,7 @@ out:
}
static int
-dump_raw_samples(struct perf_tool *tool,
+dump_raw_samples(const struct perf_tool *tool,
union perf_event *event,
struct perf_sample *sample,
struct machine *machine)
@@ -262,7 +248,7 @@ out_put:
return 0;
}
-static int process_sample_event(struct perf_tool *tool,
+static int process_sample_event(const struct perf_tool *tool,
union perf_event *event,
struct perf_sample *sample,
struct evsel *evsel __maybe_unused,
@@ -285,7 +271,23 @@ static int report_raw_events(struct perf_mem *mem)
.force = mem->force,
};
int ret;
- struct perf_session *session = perf_session__new(&data, &mem->tool);
+ struct perf_session *session;
+
+ perf_tool__init(&mem->tool, /*ordered_events=*/true);
+ mem->tool.sample = process_sample_event;
+ mem->tool.mmap = perf_event__process_mmap;
+ mem->tool.mmap2 = perf_event__process_mmap2;
+ mem->tool.comm = perf_event__process_comm;
+ mem->tool.lost = perf_event__process_lost;
+ mem->tool.fork = perf_event__process_fork;
+ mem->tool.attr = perf_event__process_attr;
+ mem->tool.build_id = perf_event__process_build_id;
+ mem->tool.namespaces = perf_event__process_namespaces;
+ mem->tool.auxtrace_info = perf_event__process_auxtrace_info;
+ mem->tool.auxtrace = perf_event__process_auxtrace;
+ mem->tool.auxtrace_error = perf_event__process_auxtrace_error;
+
+ session = perf_session__new(&data, &mem->tool);
if (IS_ERR(session))
return PTR_ERR(session);
@@ -319,16 +321,21 @@ out_delete:
perf_session__delete(session);
return ret;
}
+
static char *get_sort_order(struct perf_mem *mem)
{
bool has_extra_options = (mem->phys_addr | mem->data_page_size) ? true : false;
char sort[128];
+ if (mem->sort_key)
+ scnprintf(sort, sizeof(sort), "--sort=%s", mem->sort_key);
+ else if (mem->data_type)
+ strcpy(sort, "--sort=mem,snoop,tlb,type");
/*
* there is no weight (cost) associated with stores, so don't print
* the column
*/
- if (!(mem->operation & MEM_OPERATION_LOAD)) {
+ else if (!(mem->operation & MEM_OPERATION_LOAD)) {
strcpy(sort, "--sort=mem,sym,dso,symbol_daddr,"
"dso_daddr,tlb,locked");
} else if (has_extra_options) {
@@ -343,14 +350,26 @@ static char *get_sort_order(struct perf_mem *mem)
if (mem->data_page_size)
strcat(sort, ",data_page_size");
+ /* make sure it has 'type' sort key even -s option is used */
+ if (mem->data_type && !strstr(sort, "type"))
+ strcat(sort, ",type");
+
return strdup(sort);
}
-static int report_events(int argc, const char **argv, struct perf_mem *mem)
+static int __cmd_report(int argc, const char **argv, struct perf_mem *mem,
+ const struct option *options)
{
const char **rep_argv;
int ret, i = 0, j, rep_argc;
char *new_sort_order;
+ const char * const report_usage[] = {
+ "perf mem report [<options>]",
+ NULL
+ };
+
+ argc = parse_options(argc, argv, options, report_usage,
+ PARSE_OPT_KEEP_UNKNOWN);
if (mem->dump_raw)
return report_raw_events(mem);
@@ -368,10 +387,11 @@ static int report_events(int argc, const char **argv, struct perf_mem *mem)
if (new_sort_order)
rep_argv[i++] = new_sort_order;
- for (j = 1; j < argc; j++, i++)
+ for (j = 0; j < argc; j++, i++)
rep_argv[i] = argv[j];
ret = cmd_report(i, rep_argv);
+ free(new_sort_order);
free(rep_argv);
return ret;
}
@@ -449,47 +469,51 @@ int cmd_mem(int argc, const char **argv)
{
struct stat st;
struct perf_mem mem = {
- .tool = {
- .sample = process_sample_event,
- .mmap = perf_event__process_mmap,
- .mmap2 = perf_event__process_mmap2,
- .comm = perf_event__process_comm,
- .lost = perf_event__process_lost,
- .fork = perf_event__process_fork,
- .attr = perf_event__process_attr,
- .build_id = perf_event__process_build_id,
- .namespaces = perf_event__process_namespaces,
- .auxtrace_info = perf_event__process_auxtrace_info,
- .auxtrace = perf_event__process_auxtrace,
- .auxtrace_error = perf_event__process_auxtrace_error,
- .ordered_events = true,
- },
.input_name = "perf.data",
/*
* default to both load an store sampling
*/
.operation = MEM_OPERATION_LOAD | MEM_OPERATION_STORE,
};
+ char *sort_order_help = sort_help("sort by key(s):", SORT_MODE__MEMORY);
const struct option mem_options[] = {
OPT_CALLBACK('t', "type", &mem.operation,
"type", "memory operations(load,store) Default load,store",
parse_mem_ops),
+ OPT_STRING('C', "cpu", &mem.cpu_list, "cpu",
+ "list of cpus to profile"),
+ OPT_BOOLEAN('f', "force", &mem.force, "don't complain, do it"),
+ OPT_INCR('v', "verbose", &verbose,
+ "be more verbose (show counter open errors, etc)"),
+ OPT_BOOLEAN('p', "phys-data", &mem.phys_addr, "Record/Report sample physical addresses"),
+ OPT_BOOLEAN(0, "data-page-size", &mem.data_page_size, "Record/Report sample data address page size"),
+ OPT_END()
+ };
+ const struct option record_options[] = {
+ OPT_CALLBACK('e', "event", &mem, "event",
+ "event selector. use 'perf mem record -e list' to list available events",
+ parse_record_events),
+ OPT_UINTEGER(0, "ldlat", &perf_mem_events__loads_ldlat, "mem-loads latency"),
+ OPT_BOOLEAN('U', "all-user", &mem.all_user, "collect only user level data"),
+ OPT_BOOLEAN('K', "all-kernel", &mem.all_kernel, "collect only kernel level data"),
+ OPT_PARENT(mem_options)
+ };
+ const struct option report_options[] = {
OPT_BOOLEAN('D', "dump-raw-samples", &mem.dump_raw,
"dump raw samples in ASCII"),
OPT_BOOLEAN('U', "hide-unresolved", &mem.hide_unresolved,
"Only display entries resolved to a symbol"),
OPT_STRING('i', "input", &input_name, "file",
"input file name"),
- OPT_STRING('C', "cpu", &mem.cpu_list, "cpu",
- "list of cpus to profile"),
OPT_STRING_NOEMPTY('x', "field-separator", &symbol_conf.field_sep,
"separator",
"separator for columns, no spaces will be added"
" between columns '.' is reserved."),
- OPT_BOOLEAN('f', "force", &mem.force, "don't complain, do it"),
- OPT_BOOLEAN('p', "phys-data", &mem.phys_addr, "Record/Report sample physical addresses"),
- OPT_BOOLEAN(0, "data-page-size", &mem.data_page_size, "Record/Report sample data address page size"),
- OPT_END()
+ OPT_STRING('s', "sort", &mem.sort_key, "key[,key2...]",
+ sort_order_help),
+ OPT_BOOLEAN('T', "type-profile", &mem.data_type,
+ "Show data-type profile result"),
+ OPT_PARENT(mem_options)
};
const char *const mem_subcommands[] = { "record", "report", NULL };
const char *mem_usage[] = {
@@ -498,7 +522,7 @@ int cmd_mem(int argc, const char **argv)
};
argc = parse_options_subcommand(argc, argv, mem_options, mem_subcommands,
- mem_usage, PARSE_OPT_KEEP_UNKNOWN);
+ mem_usage, PARSE_OPT_STOP_AT_NON_OPTION);
if (!argc || !(strncmp(argv[0], "rec", 3) || mem.operation))
usage_with_options(mem_usage, mem_options);
@@ -511,11 +535,14 @@ int cmd_mem(int argc, const char **argv)
}
if (strlen(argv[0]) > 2 && strstarts("record", argv[0]))
- return __cmd_record(argc, argv, &mem);
+ return __cmd_record(argc, argv, &mem, record_options);
else if (strlen(argv[0]) > 2 && strstarts("report", argv[0]))
- return report_events(argc, argv, &mem);
+ return __cmd_report(argc, argv, &mem, report_options);
else
usage_with_options(mem_usage, mem_options);
+ /* free usage string allocated by parse_options_subcommand */
+ free((void *)mem_usage[0]);
+
return 0;
}
diff --git a/tools/perf/builtin-record.c b/tools/perf/builtin-record.c
index a94516e8c522..adbaf80b398c 100644
--- a/tools/perf/builtin-record.c
+++ b/tools/perf/builtin-record.c
@@ -171,6 +171,7 @@ struct record {
bool timestamp_filename;
bool timestamp_boundary;
bool off_cpu;
+ const char *filter_action;
struct switch_output switch_output;
unsigned long long samples;
unsigned long output_max_size; /* = 0: unlimited */
@@ -193,6 +194,15 @@ static const char *affinity_tags[PERF_AFFINITY_MAX] = {
"SYS", "NODE", "CPU"
};
+static int build_id__process_mmap(const struct perf_tool *tool, union perf_event *event,
+ struct perf_sample *sample, struct machine *machine);
+static int build_id__process_mmap2(const struct perf_tool *tool, union perf_event *event,
+ struct perf_sample *sample, struct machine *machine);
+static int process_timestamp_boundary(const struct perf_tool *tool,
+ union perf_event *event,
+ struct perf_sample *sample,
+ struct machine *machine);
+
#ifndef HAVE_GETTID
static inline pid_t gettid(void)
{
@@ -608,7 +618,7 @@ static int record__comp_enabled(struct record *rec)
return rec->opts.comp_level > 0;
}
-static int process_synthesized_event(struct perf_tool *tool,
+static int process_synthesized_event(const struct perf_tool *tool,
union perf_event *event,
struct perf_sample *sample __maybe_unused,
struct machine *machine __maybe_unused)
@@ -619,7 +629,7 @@ static int process_synthesized_event(struct perf_tool *tool,
static struct mutex synth_lock;
-static int process_locked_synthesized_event(struct perf_tool *tool,
+static int process_locked_synthesized_event(const struct perf_tool *tool,
union perf_event *event,
struct perf_sample *sample __maybe_unused,
struct machine *machine __maybe_unused)
@@ -704,7 +714,7 @@ static void record__sig_exit(void)
#ifdef HAVE_AUXTRACE_SUPPORT
-static int record__process_auxtrace(struct perf_tool *tool,
+static int record__process_auxtrace(const struct perf_tool *tool,
struct mmap *map,
union perf_event *event, void *data1,
size_t len1, void *data2, size_t len2)
@@ -1389,7 +1399,7 @@ try_again:
"even with a suitable vmlinux or kallsyms file.\n\n");
}
- if (evlist__apply_filters(evlist, &pos)) {
+ if (evlist__apply_filters(evlist, &pos, &opts->target)) {
pr_err("failed to set filter \"%s\" on event %s with %d (%s)\n",
pos->filter ?: "BPF", evsel__name(pos), errno,
str_error_r(errno, msg, sizeof(msg)));
@@ -1416,7 +1426,7 @@ static void set_timestamp_boundary(struct record *rec, u64 sample_time)
rec->evlist->last_sample_time = sample_time;
}
-static int process_sample_event(struct perf_tool *tool,
+static int process_sample_event(const struct perf_tool *tool,
union perf_event *event,
struct perf_sample *sample,
struct evsel *evsel,
@@ -1458,7 +1468,7 @@ static int process_buildids(struct record *rec)
* first/last samples.
*/
if (rec->buildid_all && !rec->timestamp_boundary)
- rec->tool.sample = NULL;
+ rec->tool.sample = process_event_sample_stub;
return perf_session__process_events(session);
}
@@ -2364,13 +2374,8 @@ static int __cmd_record(struct record *rec, int argc, const char **argv)
signal(SIGTERM, sig_handler);
signal(SIGSEGV, sigsegv_handler);
- if (rec->opts.record_namespaces)
- tool->namespace_events = true;
-
if (rec->opts.record_cgroup) {
-#ifdef HAVE_FILE_HANDLE
- tool->cgroup_events = true;
-#else
+#ifndef HAVE_FILE_HANDLE
pr_err("cgroup tracking is not supported\n");
return -1;
#endif
@@ -2386,6 +2391,18 @@ static int __cmd_record(struct record *rec, int argc, const char **argv)
signal(SIGUSR2, SIG_IGN);
}
+ perf_tool__init(tool, /*ordered_events=*/true);
+ tool->sample = process_sample_event;
+ tool->fork = perf_event__process_fork;
+ tool->exit = perf_event__process_exit;
+ tool->comm = perf_event__process_comm;
+ tool->namespaces = perf_event__process_namespaces;
+ tool->mmap = build_id__process_mmap;
+ tool->mmap2 = build_id__process_mmap2;
+ tool->itrace_start = process_timestamp_boundary;
+ tool->aux = process_timestamp_boundary;
+ tool->namespace_events = rec->opts.record_namespaces;
+ tool->cgroup_events = rec->opts.record_cgroup;
session = perf_session__new(data, tool);
if (IS_ERR(session)) {
pr_err("Perf session creation failed.\n");
@@ -3243,7 +3260,7 @@ static const char * const __record_usage[] = {
};
const char * const *record_usage = __record_usage;
-static int build_id__process_mmap(struct perf_tool *tool, union perf_event *event,
+static int build_id__process_mmap(const struct perf_tool *tool, union perf_event *event,
struct perf_sample *sample, struct machine *machine)
{
/*
@@ -3255,7 +3272,7 @@ static int build_id__process_mmap(struct perf_tool *tool, union perf_event *even
return perf_event__process_mmap(tool, event, sample, machine);
}
-static int build_id__process_mmap2(struct perf_tool *tool, union perf_event *event,
+static int build_id__process_mmap2(const struct perf_tool *tool, union perf_event *event,
struct perf_sample *sample, struct machine *machine)
{
/*
@@ -3268,7 +3285,7 @@ static int build_id__process_mmap2(struct perf_tool *tool, union perf_event *eve
return perf_event__process_mmap2(tool, event, sample, machine);
}
-static int process_timestamp_boundary(struct perf_tool *tool,
+static int process_timestamp_boundary(const struct perf_tool *tool,
union perf_event *event __maybe_unused,
struct perf_sample *sample,
struct machine *machine __maybe_unused)
@@ -3326,18 +3343,6 @@ static struct record record = {
.ctl_fd_ack = -1,
.synth = PERF_SYNTH_ALL,
},
- .tool = {
- .sample = process_sample_event,
- .fork = perf_event__process_fork,
- .exit = perf_event__process_exit,
- .comm = perf_event__process_comm,
- .namespaces = perf_event__process_namespaces,
- .mmap = build_id__process_mmap,
- .mmap2 = build_id__process_mmap2,
- .itrace_start = process_timestamp_boundary,
- .aux = process_timestamp_boundary,
- .ordered_events = true,
- },
};
const char record_callchain_help[] = CALLCHAIN_RECORD_HELP
@@ -3557,6 +3562,8 @@ static struct option __record_options[] = {
"write collected trace data into several data files using parallel threads",
record__parse_threads),
OPT_BOOLEAN(0, "off-cpu", &record.off_cpu, "Enable off-cpu analysis"),
+ OPT_STRING(0, "setup-filter", &record.filter_action, "pin|unpin",
+ "BPF filter action"),
OPT_END()
};
@@ -4086,6 +4093,18 @@ int cmd_record(int argc, const char **argv)
pr_warning("WARNING: --timestamp-filename option is not available in parallel streaming mode.\n");
}
+ if (rec->filter_action) {
+ if (!strcmp(rec->filter_action, "pin"))
+ err = perf_bpf_filter__pin();
+ else if (!strcmp(rec->filter_action, "unpin"))
+ err = perf_bpf_filter__unpin();
+ else {
+ pr_warning("Unknown BPF filter action: %s\n", rec->filter_action);
+ err = -EINVAL;
+ }
+ goto out_opts;
+ }
+
/*
* Allow aliases to facilitate the lookup of symbols for address
* filters. Refer to auxtrace_parse_filters().
@@ -4242,13 +4261,13 @@ int cmd_record(int argc, const char **argv)
err = __cmd_record(&record, argc, argv);
out:
- evlist__delete(rec->evlist);
+ record__free_thread_masks(rec, rec->nr_threads);
+ rec->nr_threads = 0;
symbol__exit();
auxtrace_record__free(rec->itr);
out_opts:
- record__free_thread_masks(rec, rec->nr_threads);
- rec->nr_threads = 0;
evlist__close_control(rec->opts.ctl_fd, rec->opts.ctl_fd_ack, &rec->opts.ctl_fd_close);
+ evlist__delete(rec->evlist);
return err;
}
diff --git a/tools/perf/builtin-report.c b/tools/perf/builtin-report.c
index 6edc0d4ce6fb..5dc17ffee27a 100644
--- a/tools/perf/builtin-report.c
+++ b/tools/perf/builtin-report.c
@@ -263,7 +263,7 @@ static int process_feature_event(struct perf_session *session,
return 0;
}
-static int process_sample_event(struct perf_tool *tool,
+static int process_sample_event(const struct perf_tool *tool,
union perf_event *event,
struct perf_sample *sample,
struct evsel *evsel,
@@ -328,7 +328,7 @@ static int process_sample_event(struct perf_tool *tool,
if (ui__has_annotation() || rep->symbol_ipc || rep->total_cycles_mode) {
hist__account_cycles(sample->branch_stack, &al, sample,
rep->nonany_branch_mode,
- &rep->total_cycles);
+ &rep->total_cycles, evsel);
}
ret = hist_entry_iter__add(&iter, &al, rep->max_stack, rep);
@@ -339,7 +339,7 @@ out_put:
return ret;
}
-static int process_read_event(struct perf_tool *tool,
+static int process_read_event(const struct perf_tool *tool,
union perf_event *event,
struct perf_sample *sample __maybe_unused,
struct evsel *evsel,
@@ -565,6 +565,7 @@ static int evlist__tty_browse_hists(struct evlist *evlist, struct report *rep, c
struct hists *hists = evsel__hists(pos);
const char *evname = evsel__name(pos);
+ i++;
if (symbol_conf.event_group && !evsel__is_group_leader(pos))
continue;
@@ -574,7 +575,14 @@ static int evlist__tty_browse_hists(struct evlist *evlist, struct report *rep, c
hists__fprintf_nr_sample_events(hists, rep, evname, stdout);
if (rep->total_cycles_mode) {
- report__browse_block_hists(&rep->block_reports[i++].hist,
+ char *buf;
+
+ if (!annotation_br_cntr_abbr_list(&buf, pos, true)) {
+ fprintf(stdout, "%s", buf);
+ fprintf(stdout, "#\n");
+ free(buf);
+ }
+ report__browse_block_hists(&rep->block_reports[i - 1].hist,
rep->min_percent, pos, NULL);
continue;
}
@@ -765,7 +773,7 @@ static void report__output_resort(struct report *rep)
ui_progress__finish();
}
-static int count_sample_event(struct perf_tool *tool __maybe_unused,
+static int count_sample_event(const struct perf_tool *tool __maybe_unused,
union perf_event *event __maybe_unused,
struct perf_sample *sample __maybe_unused,
struct evsel *evsel,
@@ -777,7 +785,7 @@ static int count_sample_event(struct perf_tool *tool __maybe_unused,
return 0;
}
-static int count_lost_samples_event(struct perf_tool *tool,
+static int count_lost_samples_event(const struct perf_tool *tool,
union perf_event *event,
struct perf_sample *sample,
struct machine *machine __maybe_unused)
@@ -787,22 +795,28 @@ static int count_lost_samples_event(struct perf_tool *tool,
evsel = evlist__id2evsel(rep->session->evlist, sample->id);
if (evsel) {
- hists__inc_nr_lost_samples(evsel__hists(evsel),
- event->lost_samples.lost);
+ struct hists *hists = evsel__hists(evsel);
+ u32 count = event->lost_samples.lost;
+
+ if (event->header.misc & PERF_RECORD_MISC_LOST_SAMPLES_BPF)
+ hists__inc_nr_dropped_samples(hists, count);
+ else
+ hists__inc_nr_lost_samples(hists, count);
}
return 0;
}
-static int process_attr(struct perf_tool *tool __maybe_unused,
+static int process_attr(const struct perf_tool *tool __maybe_unused,
union perf_event *event,
struct evlist **pevlist);
static void stats_setup(struct report *rep)
{
- memset(&rep->tool, 0, sizeof(rep->tool));
+ perf_tool__init(&rep->tool, /*ordered_events=*/false);
rep->tool.attr = process_attr;
rep->tool.sample = count_sample_event;
rep->tool.lost_samples = count_lost_samples_event;
+ rep->tool.event_update = perf_event__process_event_update;
rep->tool.no_warn = true;
}
@@ -817,8 +831,7 @@ static int stats_print(struct report *rep)
static void tasks_setup(struct report *rep)
{
- memset(&rep->tool, 0, sizeof(rep->tool));
- rep->tool.ordered_events = true;
+ perf_tool__init(&rep->tool, /*ordered_events=*/true);
if (rep->mmaps_mode) {
rep->tool.mmap = perf_event__process_mmap;
rep->tool.mmap2 = perf_event__process_mmap2;
@@ -1119,18 +1132,23 @@ static int __cmd_report(struct report *rep)
report__output_resort(rep);
if (rep->total_cycles_mode) {
- int block_hpps[6] = {
+ int nr_hpps = 4;
+ int block_hpps[PERF_HPP_REPORT__BLOCK_MAX_INDEX] = {
PERF_HPP_REPORT__BLOCK_TOTAL_CYCLES_PCT,
PERF_HPP_REPORT__BLOCK_LBR_CYCLES,
PERF_HPP_REPORT__BLOCK_CYCLES_PCT,
PERF_HPP_REPORT__BLOCK_AVG_CYCLES,
- PERF_HPP_REPORT__BLOCK_RANGE,
- PERF_HPP_REPORT__BLOCK_DSO,
};
+ if (session->evlist->nr_br_cntr > 0)
+ block_hpps[nr_hpps++] = PERF_HPP_REPORT__BLOCK_BRANCH_COUNTER;
+
+ block_hpps[nr_hpps++] = PERF_HPP_REPORT__BLOCK_RANGE;
+ block_hpps[nr_hpps++] = PERF_HPP_REPORT__BLOCK_DSO;
+
rep->block_reports = block_info__create_report(session->evlist,
rep->total_cycles,
- block_hpps, 6,
+ block_hpps, nr_hpps,
&rep->nr_block_reports);
if (!rep->block_reports)
return -1;
@@ -1233,7 +1251,7 @@ parse_percent_limit(const struct option *opt, const char *str,
return 0;
}
-static int process_attr(struct perf_tool *tool __maybe_unused,
+static int process_attr(const struct perf_tool *tool __maybe_unused,
union perf_event *event,
struct evlist **pevlist)
{
@@ -1272,37 +1290,13 @@ int cmd_report(int argc, const char **argv)
NULL
};
struct report report = {
- .tool = {
- .sample = process_sample_event,
- .mmap = perf_event__process_mmap,
- .mmap2 = perf_event__process_mmap2,
- .comm = perf_event__process_comm,
- .namespaces = perf_event__process_namespaces,
- .cgroup = perf_event__process_cgroup,
- .exit = perf_event__process_exit,
- .fork = perf_event__process_fork,
- .lost = perf_event__process_lost,
- .read = process_read_event,
- .attr = process_attr,
-#ifdef HAVE_LIBTRACEEVENT
- .tracing_data = perf_event__process_tracing_data,
-#endif
- .build_id = perf_event__process_build_id,
- .id_index = perf_event__process_id_index,
- .auxtrace_info = perf_event__process_auxtrace_info,
- .auxtrace = perf_event__process_auxtrace,
- .event_update = perf_event__process_event_update,
- .feature = process_feature_event,
- .ordered_events = true,
- .ordering_requires_timestamps = true,
- },
.max_stack = PERF_MAX_STACK_DEPTH,
.pretty_printing_style = "normal",
.socket_filter = -1,
.skip_empty = true,
};
- char *sort_order_help = sort_help("sort by key(s):");
- char *field_order_help = sort_help("output field(s): overhead period sample ");
+ char *sort_order_help = sort_help("sort by key(s):", SORT_MODE__NORMAL);
+ char *field_order_help = sort_help("output field(s):", SORT_MODE__NORMAL);
const char *disassembler_style = NULL, *objdump_path = NULL, *addr2line_path = NULL;
const struct option options[] = {
OPT_STRING('i', "input", &input_name, "file",
@@ -1477,6 +1471,7 @@ int cmd_report(int argc, const char **argv)
};
int ret = hists__init();
char sort_tmp[128];
+ bool ordered_events = true;
if (ret < 0)
goto exit;
@@ -1531,7 +1526,7 @@ int cmd_report(int argc, const char **argv)
report.tasks_mode = true;
if (dump_trace && report.disable_order)
- report.tool.ordered_events = false;
+ ordered_events = false;
if (quiet)
perf_quiet_option();
@@ -1562,6 +1557,29 @@ int cmd_report(int argc, const char **argv)
symbol_conf.skip_empty = report.skip_empty;
repeat:
+ perf_tool__init(&report.tool, ordered_events);
+ report.tool.sample = process_sample_event;
+ report.tool.mmap = perf_event__process_mmap;
+ report.tool.mmap2 = perf_event__process_mmap2;
+ report.tool.comm = perf_event__process_comm;
+ report.tool.namespaces = perf_event__process_namespaces;
+ report.tool.cgroup = perf_event__process_cgroup;
+ report.tool.exit = perf_event__process_exit;
+ report.tool.fork = perf_event__process_fork;
+ report.tool.lost = perf_event__process_lost;
+ report.tool.read = process_read_event;
+ report.tool.attr = process_attr;
+#ifdef HAVE_LIBTRACEEVENT
+ report.tool.tracing_data = perf_event__process_tracing_data;
+#endif
+ report.tool.build_id = perf_event__process_build_id;
+ report.tool.id_index = perf_event__process_id_index;
+ report.tool.auxtrace_info = perf_event__process_auxtrace_info;
+ report.tool.auxtrace = perf_event__process_auxtrace;
+ report.tool.event_update = perf_event__process_event_update;
+ report.tool.feature = process_feature_event;
+ report.tool.ordering_requires_timestamps = true;
+
session = perf_session__new(&data, &report.tool);
if (IS_ERR(session)) {
ret = PTR_ERR(session);
diff --git a/tools/perf/builtin-sched.c b/tools/perf/builtin-sched.c
index 8750b5f2d49b..5981cc51abc8 100644
--- a/tools/perf/builtin-sched.c
+++ b/tools/perf/builtin-sched.c
@@ -51,6 +51,7 @@
#define COMM_LEN 20
#define SYM_LEN 129
#define MAX_PID 1024000
+#define MAX_PRIO 140
static const char *cpu_list;
static DECLARE_BITMAP(cpu_bitmap, MAX_NR_CPUS);
@@ -228,11 +229,14 @@ struct perf_sched {
bool show_next;
bool show_migrations;
bool show_state;
+ bool show_prio;
u64 skipped_samples;
const char *time_str;
struct perf_time_interval ptime;
struct perf_time_interval hist_time;
volatile bool thread_funcs_exit;
+ const char *prio_str;
+ DECLARE_BITMAP(prio_bitmap, MAX_PRIO);
};
/* per thread run time data */
@@ -258,6 +262,8 @@ struct thread_runtime {
bool comm_changed;
u64 migrations;
+
+ int prio;
};
/* per event run time data */
@@ -920,6 +926,11 @@ struct sort_dimension {
struct list_head list;
};
+static inline void init_prio(struct thread_runtime *r)
+{
+ r->prio = -1;
+}
+
/*
* handle runtime stats saved per thread
*/
@@ -932,6 +943,7 @@ static struct thread_runtime *thread__init_runtime(struct thread *thread)
return NULL;
init_stats(&r->run_stats);
+ init_prio(r);
thread__set_priv(thread, r);
return r;
@@ -1489,7 +1501,7 @@ again:
}
}
-static int process_sched_wakeup_event(struct perf_tool *tool,
+static int process_sched_wakeup_event(const struct perf_tool *tool,
struct evsel *evsel,
struct perf_sample *sample,
struct machine *machine)
@@ -1502,7 +1514,7 @@ static int process_sched_wakeup_event(struct perf_tool *tool,
return 0;
}
-static int process_sched_wakeup_ignore(struct perf_tool *tool __maybe_unused,
+static int process_sched_wakeup_ignore(const struct perf_tool *tool __maybe_unused,
struct evsel *evsel __maybe_unused,
struct perf_sample *sample __maybe_unused,
struct machine *machine __maybe_unused)
@@ -1770,7 +1782,7 @@ out:
return 0;
}
-static int process_sched_switch_event(struct perf_tool *tool,
+static int process_sched_switch_event(const struct perf_tool *tool,
struct evsel *evsel,
struct perf_sample *sample,
struct machine *machine)
@@ -1796,7 +1808,7 @@ static int process_sched_switch_event(struct perf_tool *tool,
return err;
}
-static int process_sched_runtime_event(struct perf_tool *tool,
+static int process_sched_runtime_event(const struct perf_tool *tool,
struct evsel *evsel,
struct perf_sample *sample,
struct machine *machine)
@@ -1809,7 +1821,7 @@ static int process_sched_runtime_event(struct perf_tool *tool,
return 0;
}
-static int perf_sched__process_fork_event(struct perf_tool *tool,
+static int perf_sched__process_fork_event(const struct perf_tool *tool,
union perf_event *event,
struct perf_sample *sample,
struct machine *machine)
@@ -1826,7 +1838,7 @@ static int perf_sched__process_fork_event(struct perf_tool *tool,
return 0;
}
-static int process_sched_migrate_task_event(struct perf_tool *tool,
+static int process_sched_migrate_task_event(const struct perf_tool *tool,
struct evsel *evsel,
struct perf_sample *sample,
struct machine *machine)
@@ -1839,12 +1851,12 @@ static int process_sched_migrate_task_event(struct perf_tool *tool,
return 0;
}
-typedef int (*tracepoint_handler)(struct perf_tool *tool,
+typedef int (*tracepoint_handler)(const struct perf_tool *tool,
struct evsel *evsel,
struct perf_sample *sample,
struct machine *machine);
-static int perf_sched__process_tracepoint_sample(struct perf_tool *tool __maybe_unused,
+static int perf_sched__process_tracepoint_sample(const struct perf_tool *tool __maybe_unused,
union perf_event *event __maybe_unused,
struct perf_sample *sample,
struct evsel *evsel,
@@ -1860,7 +1872,7 @@ static int perf_sched__process_tracepoint_sample(struct perf_tool *tool __maybe_
return err;
}
-static int perf_sched__process_comm(struct perf_tool *tool __maybe_unused,
+static int perf_sched__process_comm(const struct perf_tool *tool __maybe_unused,
union perf_event *event,
struct perf_sample *sample,
struct machine *machine)
@@ -2036,6 +2048,24 @@ static char *timehist_get_commstr(struct thread *thread)
return str;
}
+/* prio field format: xxx or xxx->yyy */
+#define MAX_PRIO_STR_LEN 8
+static char *timehist_get_priostr(struct evsel *evsel,
+ struct thread *thread,
+ struct perf_sample *sample)
+{
+ static char prio_str[16];
+ int prev_prio = (int)evsel__intval(evsel, sample, "prev_prio");
+ struct thread_runtime *tr = thread__priv(thread);
+
+ if (tr->prio != prev_prio && tr->prio != -1)
+ scnprintf(prio_str, sizeof(prio_str), "%d->%d", tr->prio, prev_prio);
+ else
+ scnprintf(prio_str, sizeof(prio_str), "%d", prev_prio);
+
+ return prio_str;
+}
+
static void timehist_header(struct perf_sched *sched)
{
u32 ncpus = sched->max_cpu.cpu + 1;
@@ -2053,8 +2083,14 @@ static void timehist_header(struct perf_sched *sched)
printf(" ");
}
- printf(" %-*s %9s %9s %9s", comm_width,
- "task name", "wait time", "sch delay", "run time");
+ if (sched->show_prio) {
+ printf(" %-*s %-*s %9s %9s %9s",
+ comm_width, "task name", MAX_PRIO_STR_LEN, "prio",
+ "wait time", "sch delay", "run time");
+ } else {
+ printf(" %-*s %9s %9s %9s", comm_width,
+ "task name", "wait time", "sch delay", "run time");
+ }
if (sched->show_state)
printf(" %s", "state");
@@ -2069,8 +2105,14 @@ static void timehist_header(struct perf_sched *sched)
if (sched->show_cpu_visual)
printf(" %*s ", ncpus, "");
- printf(" %-*s %9s %9s %9s", comm_width,
- "[tid/pid]", "(msec)", "(msec)", "(msec)");
+ if (sched->show_prio) {
+ printf(" %-*s %-*s %9s %9s %9s",
+ comm_width, "[tid/pid]", MAX_PRIO_STR_LEN, "",
+ "(msec)", "(msec)", "(msec)");
+ } else {
+ printf(" %-*s %9s %9s %9s", comm_width,
+ "[tid/pid]", "(msec)", "(msec)", "(msec)");
+ }
if (sched->show_state)
printf(" %5s", "");
@@ -2085,9 +2127,15 @@ static void timehist_header(struct perf_sched *sched)
if (sched->show_cpu_visual)
printf(" %.*s ", ncpus, graph_dotted_line);
- printf(" %.*s %.9s %.9s %.9s", comm_width,
- graph_dotted_line, graph_dotted_line, graph_dotted_line,
- graph_dotted_line);
+ if (sched->show_prio) {
+ printf(" %.*s %.*s %.9s %.9s %.9s",
+ comm_width, graph_dotted_line, MAX_PRIO_STR_LEN, graph_dotted_line,
+ graph_dotted_line, graph_dotted_line, graph_dotted_line);
+ } else {
+ printf(" %.*s %.9s %.9s %.9s", comm_width,
+ graph_dotted_line, graph_dotted_line, graph_dotted_line,
+ graph_dotted_line);
+ }
if (sched->show_state)
printf(" %.5s", graph_dotted_line);
@@ -2134,6 +2182,9 @@ static void timehist_print_sample(struct perf_sched *sched,
printf(" %-*s ", comm_width, timehist_get_commstr(thread));
+ if (sched->show_prio)
+ printf(" %-*s ", MAX_PRIO_STR_LEN, timehist_get_priostr(evsel, thread, sample));
+
wait_time = tr->dt_sleep + tr->dt_iowait + tr->dt_preempt;
print_sched_time(wait_time, 6);
@@ -2301,6 +2352,7 @@ static int init_idle_thread(struct thread *thread)
if (itr == NULL)
return -ENOMEM;
+ init_prio(&itr->tr);
init_stats(&itr->tr.run_stats);
callchain_init(&itr->callchain);
callchain_cursor_reset(&itr->cursor);
@@ -2455,12 +2507,33 @@ static bool timehist_skip_sample(struct perf_sched *sched,
struct perf_sample *sample)
{
bool rc = false;
+ int prio = -1;
+ struct thread_runtime *tr = NULL;
if (thread__is_filtered(thread)) {
rc = true;
sched->skipped_samples++;
}
+ if (sched->prio_str) {
+ /*
+ * Because priority may be changed during task execution,
+ * first read priority from prev sched_in event for current task.
+ * If prev sched_in event is not saved, then read priority from
+ * current task sched_out event.
+ */
+ tr = thread__get_runtime(thread);
+ if (tr && tr->prio != -1)
+ prio = tr->prio;
+ else if (evsel__name_is(evsel, "sched:sched_switch"))
+ prio = evsel__intval(evsel, sample, "prev_prio");
+
+ if (prio != -1 && !test_bit(prio, sched->prio_bitmap)) {
+ rc = true;
+ sched->skipped_samples++;
+ }
+ }
+
if (sched->idle_hist) {
if (!evsel__name_is(evsel, "sched:sched_switch"))
rc = true;
@@ -2506,7 +2579,7 @@ static void timehist_print_wakeup_event(struct perf_sched *sched,
printf("\n");
}
-static int timehist_sched_wakeup_ignore(struct perf_tool *tool __maybe_unused,
+static int timehist_sched_wakeup_ignore(const struct perf_tool *tool __maybe_unused,
union perf_event *event __maybe_unused,
struct evsel *evsel __maybe_unused,
struct perf_sample *sample __maybe_unused,
@@ -2515,7 +2588,7 @@ static int timehist_sched_wakeup_ignore(struct perf_tool *tool __maybe_unused,
return 0;
}
-static int timehist_sched_wakeup_event(struct perf_tool *tool,
+static int timehist_sched_wakeup_event(const struct perf_tool *tool,
union perf_event *event __maybe_unused,
struct evsel *evsel,
struct perf_sample *sample,
@@ -2599,7 +2672,7 @@ static void timehist_print_migration_event(struct perf_sched *sched,
printf("\n");
}
-static int timehist_migrate_task_event(struct perf_tool *tool,
+static int timehist_migrate_task_event(const struct perf_tool *tool,
union perf_event *event __maybe_unused,
struct evsel *evsel,
struct perf_sample *sample,
@@ -2627,7 +2700,31 @@ static int timehist_migrate_task_event(struct perf_tool *tool,
return 0;
}
-static int timehist_sched_change_event(struct perf_tool *tool,
+static void timehist_update_task_prio(struct evsel *evsel,
+ struct perf_sample *sample,
+ struct machine *machine)
+{
+ struct thread *thread;
+ struct thread_runtime *tr = NULL;
+ const u32 next_pid = evsel__intval(evsel, sample, "next_pid");
+ const u32 next_prio = evsel__intval(evsel, sample, "next_prio");
+
+ if (next_pid == 0)
+ thread = get_idle_thread(sample->cpu);
+ else
+ thread = machine__findnew_thread(machine, -1, next_pid);
+
+ if (thread == NULL)
+ return;
+
+ tr = thread__get_runtime(thread);
+ if (tr == NULL)
+ return;
+
+ tr->prio = next_prio;
+}
+
+static int timehist_sched_change_event(const struct perf_tool *tool,
union perf_event *event,
struct evsel *evsel,
struct perf_sample *sample,
@@ -2650,6 +2747,9 @@ static int timehist_sched_change_event(struct perf_tool *tool,
goto out;
}
+ if (sched->show_prio || sched->prio_str)
+ timehist_update_task_prio(evsel, sample, machine);
+
thread = timehist_get_thread(sched, sample, machine, evsel);
if (thread == NULL) {
rc = -1;
@@ -2683,9 +2783,12 @@ static int timehist_sched_change_event(struct perf_tool *tool,
* - previous sched event is out of window - we are done
* - sample time is beyond window user cares about - reset it
* to close out stats for time window interest
+ * - If tprev is 0, that is, sched_in event for current task is
+ * not recorded, cannot determine whether sched_in event is
+ * within time window interest - ignore it
*/
if (ptime->end) {
- if (tprev > ptime->end)
+ if (!tprev || tprev > ptime->end)
goto out;
if (t > ptime->end)
@@ -2700,8 +2803,6 @@ static int timehist_sched_change_event(struct perf_tool *tool,
struct idle_thread_runtime *itr = (void *)tr;
struct thread_runtime *last_tr;
- BUG_ON(thread__tid(thread) != 0);
-
if (itr->last_thread == NULL)
goto out;
@@ -2727,10 +2828,10 @@ static int timehist_sched_change_event(struct perf_tool *tool,
itr->last_thread = NULL;
}
- }
- if (!sched->summary_only)
- timehist_print_sample(sched, evsel, sample, &al, thread, t, state);
+ if (!sched->summary_only)
+ timehist_print_sample(sched, evsel, sample, &al, thread, t, state);
+ }
out:
if (sched->hist_time.start == 0 && t >= ptime->start)
@@ -2758,7 +2859,7 @@ out:
return rc;
}
-static int timehist_sched_switch_event(struct perf_tool *tool,
+static int timehist_sched_switch_event(const struct perf_tool *tool,
union perf_event *event,
struct evsel *evsel,
struct perf_sample *sample,
@@ -2767,7 +2868,7 @@ static int timehist_sched_switch_event(struct perf_tool *tool,
return timehist_sched_change_event(tool, event, evsel, sample, machine);
}
-static int process_lost(struct perf_tool *tool __maybe_unused,
+static int process_lost(const struct perf_tool *tool __maybe_unused,
union perf_event *event,
struct perf_sample *sample,
struct machine *machine __maybe_unused)
@@ -3010,13 +3111,13 @@ static void timehist_print_summary(struct perf_sched *sched,
printf(" (x %d)\n", sched->max_cpu.cpu);
}
-typedef int (*sched_handler)(struct perf_tool *tool,
+typedef int (*sched_handler)(const struct perf_tool *tool,
union perf_event *event,
struct evsel *evsel,
struct perf_sample *sample,
struct machine *machine);
-static int perf_timehist__process_sample(struct perf_tool *tool,
+static int perf_timehist__process_sample(const struct perf_tool *tool,
union perf_event *event,
struct perf_sample *sample,
struct evsel *evsel,
@@ -3066,6 +3167,47 @@ static int timehist_check_attr(struct perf_sched *sched,
return 0;
}
+static int timehist_parse_prio_str(struct perf_sched *sched)
+{
+ char *p;
+ unsigned long start_prio, end_prio;
+ const char *str = sched->prio_str;
+
+ if (!str)
+ return 0;
+
+ while (isdigit(*str)) {
+ p = NULL;
+ start_prio = strtoul(str, &p, 0);
+ if (start_prio >= MAX_PRIO || (*p != '\0' && *p != ',' && *p != '-'))
+ return -1;
+
+ if (*p == '-') {
+ str = ++p;
+ p = NULL;
+ end_prio = strtoul(str, &p, 0);
+
+ if (end_prio >= MAX_PRIO || (*p != '\0' && *p != ','))
+ return -1;
+
+ if (end_prio < start_prio)
+ return -1;
+ } else {
+ end_prio = start_prio;
+ }
+
+ for (; start_prio <= end_prio; start_prio++)
+ __set_bit(start_prio, sched->prio_bitmap);
+
+ if (*p)
+ ++p;
+
+ str = p;
+ }
+
+ return 0;
+}
+
static int perf_sched__timehist(struct perf_sched *sched)
{
struct evsel_str_handler handlers[] = {
@@ -3100,7 +3242,6 @@ static int perf_sched__timehist(struct perf_sched *sched)
sched->tool.tracing_data = perf_event__process_tracing_data;
sched->tool.build_id = perf_event__process_build_id;
- sched->tool.ordered_events = true;
sched->tool.ordering_requires_timestamps = true;
symbol_conf.use_callchain = sched->show_callchain;
@@ -3121,12 +3262,18 @@ static int perf_sched__timehist(struct perf_sched *sched)
if (perf_time__parse_str(&sched->ptime, sched->time_str) != 0) {
pr_err("Invalid time string\n");
- return -EINVAL;
+ err = -EINVAL;
+ goto out;
}
if (timehist_check_attr(sched, evlist) != 0)
goto out;
+ if (timehist_parse_prio_str(sched) != 0) {
+ pr_err("Invalid prio string\n");
+ goto out;
+ }
+
setup_pager();
/* prefer sched_waking if it is captured */
@@ -3605,14 +3752,6 @@ int cmd_sched(int argc, const char **argv)
{
static const char default_sort_order[] = "avg, max, switch, runtime";
struct perf_sched sched = {
- .tool = {
- .sample = perf_sched__process_tracepoint_sample,
- .comm = perf_sched__process_comm,
- .namespaces = perf_event__process_namespaces,
- .lost = perf_event__process_lost,
- .fork = perf_sched__process_fork_event,
- .ordered_events = true,
- },
.cmp_pid = LIST_HEAD_INIT(sched.cmp_pid),
.sort_list = LIST_HEAD_INIT(sched.sort_list),
.sort_order = default_sort_order,
@@ -3691,6 +3830,9 @@ int cmd_sched(int argc, const char **argv)
OPT_STRING('t', "tid", &symbol_conf.tid_list_str, "tid[,tid...]",
"analyze events only for given thread id(s)"),
OPT_STRING('C', "cpu", &cpu_list, "cpu", "list of cpus to profile"),
+ OPT_BOOLEAN(0, "show-prio", &sched.show_prio, "Show task priority"),
+ OPT_STRING(0, "prio", &sched.prio_str, "prio",
+ "analyze events only for given task priority(ies)"),
OPT_PARENT(sched_options)
};
@@ -3733,6 +3875,13 @@ int cmd_sched(int argc, const char **argv)
};
int ret;
+ perf_tool__init(&sched.tool, /*ordered_events=*/true);
+ sched.tool.sample = perf_sched__process_tracepoint_sample;
+ sched.tool.comm = perf_sched__process_comm;
+ sched.tool.namespaces = perf_event__process_namespaces;
+ sched.tool.lost = perf_event__process_lost;
+ sched.tool.fork = perf_sched__process_fork_event;
+
argc = parse_options_subcommand(argc, argv, sched_options, sched_subcommands,
sched_usage, PARSE_OPT_STOP_AT_NON_OPTION);
if (!argc)
@@ -3805,5 +3954,8 @@ int cmd_sched(int argc, const char **argv)
usage_with_options(sched_usage, sched_options);
}
+ /* free usage string allocated by parse_options_subcommand */
+ free((void *)sched_usage[0]);
+
return 0;
}
diff --git a/tools/perf/builtin-script.c b/tools/perf/builtin-script.c
index c16224b1fef3..a644787fa9e1 100644
--- a/tools/perf/builtin-script.c
+++ b/tools/perf/builtin-script.c
@@ -62,6 +62,7 @@
#include "util/record.h"
#include "util/util.h"
#include "util/cgroup.h"
+#include "util/annotate.h"
#include "perf.h"
#include <linux/ctype.h>
@@ -138,6 +139,7 @@ enum perf_output_field {
PERF_OUTPUT_DSOFF = 1ULL << 41,
PERF_OUTPUT_DISASM = 1ULL << 42,
PERF_OUTPUT_BRSTACKDISASM = 1ULL << 43,
+ PERF_OUTPUT_BRCNTR = 1ULL << 44,
};
struct perf_script {
@@ -213,6 +215,7 @@ struct output_option {
{.str = "cgroup", .field = PERF_OUTPUT_CGROUP},
{.str = "retire_lat", .field = PERF_OUTPUT_RETIRE_LAT},
{.str = "brstackdisasm", .field = PERF_OUTPUT_BRSTACKDISASM},
+ {.str = "brcntr", .field = PERF_OUTPUT_BRCNTR},
};
enum {
@@ -520,6 +523,12 @@ static int evsel__check_attr(struct evsel *evsel, struct perf_session *session)
"Hint: run 'perf record -b ...'\n");
return -EINVAL;
}
+ if (PRINT_FIELD(BRCNTR) &&
+ !(evlist__combined_branch_type(session->evlist) & PERF_SAMPLE_BRANCH_COUNTERS)) {
+ pr_err("Display of branch counter requested but it's not enabled\n"
+ "Hint: run 'perf record -j any,counter ...'\n");
+ return -EINVAL;
+ }
if ((PRINT_FIELD(PID) || PRINT_FIELD(TID)) &&
evsel__check_stype(evsel, PERF_SAMPLE_TID, "TID", PERF_OUTPUT_TID|PERF_OUTPUT_PID))
return -EINVAL;
@@ -789,6 +798,19 @@ static int perf_sample__fprintf_start(struct perf_script *script,
int printed = 0;
char tstr[128];
+ /*
+ * Print the branch counter's abbreviation list,
+ * if the branch counter is available.
+ */
+ if (PRINT_FIELD(BRCNTR) && !verbose) {
+ char *buf;
+
+ if (!annotation_br_cntr_abbr_list(&buf, evsel, true)) {
+ printed += fprintf(stdout, "%s", buf);
+ free(buf);
+ }
+ }
+
if (PRINT_FIELD(MACHINE_PID) && sample->machine_pid)
printed += fprintf(fp, "VM:%5d ", sample->machine_pid);
@@ -1195,7 +1217,9 @@ static int ip__fprintf_jump(uint64_t ip, struct branch_entry *en,
struct perf_insn *x, u8 *inbuf, int len,
int insn, FILE *fp, int *total_cycles,
struct perf_event_attr *attr,
- struct thread *thread)
+ struct thread *thread,
+ struct evsel *evsel,
+ u64 br_cntr)
{
int ilen = 0;
int printed = fprintf(fp, "\t%016" PRIx64 "\t", ip);
@@ -1216,6 +1240,29 @@ static int ip__fprintf_jump(uint64_t ip, struct branch_entry *en,
addr_location__exit(&al);
}
+ if (PRINT_FIELD(BRCNTR)) {
+ struct evsel *pos = evsel__leader(evsel);
+ unsigned int i = 0, j, num, mask, width;
+
+ perf_env__find_br_cntr_info(evsel__env(evsel), NULL, &width);
+ mask = (1L << width) - 1;
+ printed += fprintf(fp, "br_cntr: ");
+ evlist__for_each_entry_from(evsel->evlist, pos) {
+ if (!(pos->core.attr.branch_sample_type & PERF_SAMPLE_BRANCH_COUNTERS))
+ continue;
+ if (evsel__leader(pos) != evsel__leader(evsel))
+ break;
+
+ num = (br_cntr >> (i++ * width)) & mask;
+ if (!verbose) {
+ for (j = 0; j < num; j++)
+ printed += fprintf(fp, "%s", pos->abbr_name);
+ } else
+ printed += fprintf(fp, "%s %d ", pos->name, num);
+ }
+ printed += fprintf(fp, "\t");
+ }
+
printed += fprintf(fp, "#%s%s%s%s",
en->flags.predicted ? " PRED" : "",
en->flags.mispred ? " MISPRED" : "",
@@ -1272,6 +1319,7 @@ out:
}
static int perf_sample__fprintf_brstackinsn(struct perf_sample *sample,
+ struct evsel *evsel,
struct thread *thread,
struct perf_event_attr *attr,
struct machine *machine, FILE *fp)
@@ -1285,6 +1333,7 @@ static int perf_sample__fprintf_brstackinsn(struct perf_sample *sample,
unsigned off;
struct symbol *lastsym = NULL;
int total_cycles = 0;
+ u64 br_cntr = 0;
if (!(br && br->nr))
return 0;
@@ -1296,6 +1345,9 @@ static int perf_sample__fprintf_brstackinsn(struct perf_sample *sample,
x.machine = machine;
x.cpu = sample->cpu;
+ if (PRINT_FIELD(BRCNTR) && sample->branch_stack_cntr)
+ br_cntr = sample->branch_stack_cntr[nr - 1];
+
printed += fprintf(fp, "%c", '\n');
/* Handle first from jump, of which we don't know the entry. */
@@ -1307,7 +1359,7 @@ static int perf_sample__fprintf_brstackinsn(struct perf_sample *sample,
x.cpumode, x.cpu, &lastsym, attr, fp);
printed += ip__fprintf_jump(entries[nr - 1].from, &entries[nr - 1],
&x, buffer, len, 0, fp, &total_cycles,
- attr, thread);
+ attr, thread, evsel, br_cntr);
if (PRINT_FIELD(SRCCODE))
printed += print_srccode(thread, x.cpumode, entries[nr - 1].from);
}
@@ -1337,8 +1389,10 @@ static int perf_sample__fprintf_brstackinsn(struct perf_sample *sample,
printed += ip__fprintf_sym(ip, thread, x.cpumode, x.cpu, &lastsym, attr, fp);
if (ip == end) {
+ if (PRINT_FIELD(BRCNTR) && sample->branch_stack_cntr)
+ br_cntr = sample->branch_stack_cntr[i];
printed += ip__fprintf_jump(ip, &entries[i], &x, buffer + off, len - off, ++insn, fp,
- &total_cycles, attr, thread);
+ &total_cycles, attr, thread, evsel, br_cntr);
if (PRINT_FIELD(SRCCODE))
printed += print_srccode(thread, x.cpumode, ip);
break;
@@ -1375,7 +1429,7 @@ static int perf_sample__fprintf_brstackinsn(struct perf_sample *sample,
* Due to pipeline delays the LBRs might be missing a branch
* or two, which can result in very large or negative blocks
* between final branch and sample. When this happens just
- * continue walking after the last TO until we hit a branch.
+ * continue walking after the last TO.
*/
start = entries[0].to;
end = sample->ip;
@@ -1410,7 +1464,9 @@ static int perf_sample__fprintf_brstackinsn(struct perf_sample *sample,
printed += fprintf(fp, "\n");
if (ilen == 0)
break;
- if (arch_is_branch(buffer + off, len - off, x.is64bit) && start + off != sample->ip) {
+ if ((attr->branch_sample_type == 0 || attr->branch_sample_type & PERF_SAMPLE_BRANCH_ANY)
+ && arch_is_uncond_branch(buffer + off, len - off, x.is64bit)
+ && start + off != sample->ip) {
/*
* Hit a missing branch. Just stop.
*/
@@ -1547,6 +1603,7 @@ void script_fetch_insn(struct perf_sample *sample, struct thread *thread,
}
static int perf_sample__fprintf_insn(struct perf_sample *sample,
+ struct evsel *evsel,
struct perf_event_attr *attr,
struct thread *thread,
struct machine *machine, FILE *fp,
@@ -1567,7 +1624,7 @@ static int perf_sample__fprintf_insn(struct perf_sample *sample,
printed += sample__fprintf_insn_asm(sample, thread, machine, fp, al);
}
if (PRINT_FIELD(BRSTACKINSN) || PRINT_FIELD(BRSTACKINSNLEN) || PRINT_FIELD(BRSTACKDISASM))
- printed += perf_sample__fprintf_brstackinsn(sample, thread, attr, machine, fp);
+ printed += perf_sample__fprintf_brstackinsn(sample, evsel, thread, attr, machine, fp);
return printed;
}
@@ -1639,7 +1696,7 @@ static int perf_sample__fprintf_bts(struct perf_sample *sample,
if (print_srcline_last)
printed += map__fprintf_srcline(al->map, al->addr, "\n ", fp);
- printed += perf_sample__fprintf_insn(sample, attr, thread, machine, fp, al);
+ printed += perf_sample__fprintf_insn(sample, evsel, attr, thread, machine, fp, al);
printed += fprintf(fp, "\n");
if (PRINT_FIELD(SRCCODE)) {
int ret = map__fprintf_srccode(al->map, al->addr, stdout,
@@ -2297,7 +2354,7 @@ static void process_event(struct perf_script *script,
if (evsel__is_bpf_output(evsel) && PRINT_FIELD(BPF_OUTPUT))
perf_sample__fprintf_bpf_output(sample, fp);
- perf_sample__fprintf_insn(sample, attr, thread, machine, fp, al);
+ perf_sample__fprintf_insn(sample, evsel, attr, thread, machine, fp, al);
if (PRINT_FIELD(PHYS_ADDR))
fprintf(fp, "%16" PRIx64, sample->phys_addr);
@@ -2399,7 +2456,7 @@ static bool filter_cpu(struct perf_sample *sample)
return false;
}
-static int process_sample_event(struct perf_tool *tool,
+static int process_sample_event(const struct perf_tool *tool,
union perf_event *event,
struct perf_sample *sample,
struct evsel *evsel,
@@ -2486,7 +2543,7 @@ out_put:
// Used when scr->per_event_dump is not set
static struct evsel_script es_stdout;
-static int process_attr(struct perf_tool *tool, union perf_event *event,
+static int process_attr(const struct perf_tool *tool, union perf_event *event,
struct evlist **pevlist)
{
struct perf_script *scr = container_of(tool, struct perf_script, tool);
@@ -2552,7 +2609,7 @@ static int process_attr(struct perf_tool *tool, union perf_event *event,
return 0;
}
-static int print_event_with_time(struct perf_tool *tool,
+static int print_event_with_time(const struct perf_tool *tool,
union perf_event *event,
struct perf_sample *sample,
struct machine *machine,
@@ -2588,14 +2645,14 @@ static int print_event_with_time(struct perf_tool *tool,
return 0;
}
-static int print_event(struct perf_tool *tool, union perf_event *event,
+static int print_event(const struct perf_tool *tool, union perf_event *event,
struct perf_sample *sample, struct machine *machine,
pid_t pid, pid_t tid)
{
return print_event_with_time(tool, event, sample, machine, pid, tid, 0);
}
-static int process_comm_event(struct perf_tool *tool,
+static int process_comm_event(const struct perf_tool *tool,
union perf_event *event,
struct perf_sample *sample,
struct machine *machine)
@@ -2607,7 +2664,7 @@ static int process_comm_event(struct perf_tool *tool,
event->comm.tid);
}
-static int process_namespaces_event(struct perf_tool *tool,
+static int process_namespaces_event(const struct perf_tool *tool,
union perf_event *event,
struct perf_sample *sample,
struct machine *machine)
@@ -2619,7 +2676,7 @@ static int process_namespaces_event(struct perf_tool *tool,
event->namespaces.tid);
}
-static int process_cgroup_event(struct perf_tool *tool,
+static int process_cgroup_event(const struct perf_tool *tool,
union perf_event *event,
struct perf_sample *sample,
struct machine *machine)
@@ -2631,7 +2688,7 @@ static int process_cgroup_event(struct perf_tool *tool,
sample->tid);
}
-static int process_fork_event(struct perf_tool *tool,
+static int process_fork_event(const struct perf_tool *tool,
union perf_event *event,
struct perf_sample *sample,
struct machine *machine)
@@ -2643,7 +2700,7 @@ static int process_fork_event(struct perf_tool *tool,
event->fork.pid, event->fork.tid,
event->fork.time);
}
-static int process_exit_event(struct perf_tool *tool,
+static int process_exit_event(const struct perf_tool *tool,
union perf_event *event,
struct perf_sample *sample,
struct machine *machine)
@@ -2656,7 +2713,7 @@ static int process_exit_event(struct perf_tool *tool,
return perf_event__process_exit(tool, event, sample, machine);
}
-static int process_mmap_event(struct perf_tool *tool,
+static int process_mmap_event(const struct perf_tool *tool,
union perf_event *event,
struct perf_sample *sample,
struct machine *machine)
@@ -2668,7 +2725,7 @@ static int process_mmap_event(struct perf_tool *tool,
event->mmap.tid);
}
-static int process_mmap2_event(struct perf_tool *tool,
+static int process_mmap2_event(const struct perf_tool *tool,
union perf_event *event,
struct perf_sample *sample,
struct machine *machine)
@@ -2680,7 +2737,7 @@ static int process_mmap2_event(struct perf_tool *tool,
event->mmap2.tid);
}
-static int process_switch_event(struct perf_tool *tool,
+static int process_switch_event(const struct perf_tool *tool,
union perf_event *event,
struct perf_sample *sample,
struct machine *machine)
@@ -2712,7 +2769,7 @@ static int process_auxtrace_error(struct perf_session *session,
}
static int
-process_lost_event(struct perf_tool *tool,
+process_lost_event(const struct perf_tool *tool,
union perf_event *event,
struct perf_sample *sample,
struct machine *machine)
@@ -2722,7 +2779,7 @@ process_lost_event(struct perf_tool *tool,
}
static int
-process_throttle_event(struct perf_tool *tool __maybe_unused,
+process_throttle_event(const struct perf_tool *tool __maybe_unused,
union perf_event *event,
struct perf_sample *sample,
struct machine *machine)
@@ -2733,7 +2790,7 @@ process_throttle_event(struct perf_tool *tool __maybe_unused,
}
static int
-process_finished_round_event(struct perf_tool *tool __maybe_unused,
+process_finished_round_event(const struct perf_tool *tool __maybe_unused,
union perf_event *event,
struct ordered_events *oe __maybe_unused)
@@ -2743,7 +2800,7 @@ process_finished_round_event(struct perf_tool *tool __maybe_unused,
}
static int
-process_bpf_events(struct perf_tool *tool __maybe_unused,
+process_bpf_events(const struct perf_tool *tool __maybe_unused,
union perf_event *event,
struct perf_sample *sample,
struct machine *machine)
@@ -2755,7 +2812,7 @@ process_bpf_events(struct perf_tool *tool __maybe_unused,
sample->tid);
}
-static int process_text_poke_events(struct perf_tool *tool,
+static int process_text_poke_events(const struct perf_tool *tool,
union perf_event *event,
struct perf_sample *sample,
struct machine *machine)
@@ -3757,7 +3814,7 @@ static
int process_thread_map_event(struct perf_session *session,
union perf_event *event)
{
- struct perf_tool *tool = session->tool;
+ const struct perf_tool *tool = session->tool;
struct perf_script *script = container_of(tool, struct perf_script, tool);
if (dump_trace)
@@ -3779,7 +3836,7 @@ static
int process_cpu_map_event(struct perf_session *session,
union perf_event *event)
{
- struct perf_tool *tool = session->tool;
+ const struct perf_tool *tool = session->tool;
struct perf_script *script = container_of(tool, struct perf_script, tool);
if (dump_trace)
@@ -3809,11 +3866,10 @@ static int process_feature_event(struct perf_session *session,
static int perf_script__process_auxtrace_info(struct perf_session *session,
union perf_event *event)
{
- struct perf_tool *tool = session->tool;
-
int ret = perf_event__process_auxtrace_info(session, event);
if (ret == 0) {
+ const struct perf_tool *tool = session->tool;
struct perf_script *script = container_of(tool, struct perf_script, tool);
ret = perf_script__setup_per_event_dump(script);
@@ -3900,38 +3956,7 @@ int cmd_script(int argc, const char **argv)
const char *dlfilter_file = NULL;
const char **__argv;
int i, j, err = 0;
- struct perf_script script = {
- .tool = {
- .sample = process_sample_event,
- .mmap = perf_event__process_mmap,
- .mmap2 = perf_event__process_mmap2,
- .comm = perf_event__process_comm,
- .namespaces = perf_event__process_namespaces,
- .cgroup = perf_event__process_cgroup,
- .exit = perf_event__process_exit,
- .fork = perf_event__process_fork,
- .attr = process_attr,
- .event_update = perf_event__process_event_update,
-#ifdef HAVE_LIBTRACEEVENT
- .tracing_data = perf_event__process_tracing_data,
-#endif
- .feature = process_feature_event,
- .build_id = perf_event__process_build_id,
- .id_index = perf_event__process_id_index,
- .auxtrace_info = perf_script__process_auxtrace_info,
- .auxtrace = perf_event__process_auxtrace,
- .auxtrace_error = perf_event__process_auxtrace_error,
- .stat = perf_event__process_stat_event,
- .stat_round = process_stat_round_event,
- .stat_config = process_stat_config_event,
- .thread_map = process_thread_map_event,
- .cpu_map = process_cpu_map_event,
- .throttle = process_throttle_event,
- .unthrottle = process_throttle_event,
- .ordered_events = true,
- .ordering_requires_timestamps = true,
- },
- };
+ struct perf_script script = {};
struct perf_data data = {
.mode = PERF_DATA_MODE_READ,
};
@@ -3979,7 +4004,8 @@ int cmd_script(int argc, const char **argv)
"brstacksym,flags,data_src,weight,bpf-output,brstackinsn,"
"brstackinsnlen,brstackdisasm,brstackoff,callindent,insn,disasm,insnlen,synth,"
"phys_addr,metric,misc,srccode,ipc,tod,data_page_size,"
- "code_page_size,ins_lat,machine_pid,vcpu,cgroup,retire_lat",
+ "code_page_size,ins_lat,machine_pid,vcpu,cgroup,retire_lat,"
+ "brcntr",
parse_output_fields),
OPT_BOOLEAN('a', "all-cpus", &system_wide,
"system-wide collection from all CPUs"),
@@ -4052,6 +4078,8 @@ int cmd_script(int argc, const char **argv)
"Enable symbol demangling"),
OPT_BOOLEAN(0, "demangle-kernel", &symbol_conf.demangle_kernel,
"Enable kernel symbol demangling"),
+ OPT_STRING(0, "addr2line", &symbol_conf.addr2line_path, "path",
+ "addr2line binary to use for line numbers"),
OPT_STRING(0, "time", &script.time_str, "str",
"Time span of interest (start,stop)"),
OPT_BOOLEAN(0, "inline", &symbol_conf.inline_name,
@@ -4103,10 +4131,8 @@ int cmd_script(int argc, const char **argv)
data.path = input_name;
data.force = symbol_conf.force;
- if (unsorted_dump) {
+ if (unsorted_dump)
dump_trace = true;
- script.tool.ordered_events = false;
- }
if (symbol__validate_sym_arguments())
return -1;
@@ -4297,6 +4323,34 @@ script_found:
use_browser = 0;
}
+ perf_tool__init(&script.tool, !unsorted_dump);
+ script.tool.sample = process_sample_event;
+ script.tool.mmap = perf_event__process_mmap;
+ script.tool.mmap2 = perf_event__process_mmap2;
+ script.tool.comm = perf_event__process_comm;
+ script.tool.namespaces = perf_event__process_namespaces;
+ script.tool.cgroup = perf_event__process_cgroup;
+ script.tool.exit = perf_event__process_exit;
+ script.tool.fork = perf_event__process_fork;
+ script.tool.attr = process_attr;
+ script.tool.event_update = perf_event__process_event_update;
+#ifdef HAVE_LIBTRACEEVENT
+ script.tool.tracing_data = perf_event__process_tracing_data;
+#endif
+ script.tool.feature = process_feature_event;
+ script.tool.build_id = perf_event__process_build_id;
+ script.tool.id_index = perf_event__process_id_index;
+ script.tool.auxtrace_info = perf_script__process_auxtrace_info;
+ script.tool.auxtrace = perf_event__process_auxtrace;
+ script.tool.auxtrace_error = perf_event__process_auxtrace_error;
+ script.tool.stat = perf_event__process_stat_event;
+ script.tool.stat_round = process_stat_round_event;
+ script.tool.stat_config = process_stat_config_event;
+ script.tool.thread_map = process_thread_map_event;
+ script.tool.cpu_map = process_cpu_map_event;
+ script.tool.throttle = process_throttle_event;
+ script.tool.unthrottle = process_throttle_event;
+ script.tool.ordering_requires_timestamps = true;
session = perf_session__new(&data, &script.tool);
if (IS_ERR(session))
return PTR_ERR(session);
diff --git a/tools/perf/builtin-stat.c b/tools/perf/builtin-stat.c
index 661832756a24..689a3d43c258 100644
--- a/tools/perf/builtin-stat.c
+++ b/tools/perf/builtin-stat.c
@@ -70,6 +70,7 @@
#include "util/bpf_counter.h"
#include "util/iostat.h"
#include "util/util.h"
+#include "util/intel-tpebs.h"
#include "asm/bug.h"
#include <linux/time64.h>
@@ -248,7 +249,7 @@ static void perf_stat__reset_stats(void)
perf_stat__reset_shadow_stats();
}
-static int process_synthesized_event(struct perf_tool *tool __maybe_unused,
+static int process_synthesized_event(const struct perf_tool *tool __maybe_unused,
union perf_event *event,
struct perf_sample *sample __maybe_unused,
struct machine *machine __maybe_unused)
@@ -293,14 +294,14 @@ static int read_single_counter(struct evsel *counter, int cpu_map_idx, int threa
* terminates. Use the wait4 values in that case.
*/
if (err && cpu_map_idx == 0 &&
- (counter->tool_event == PERF_TOOL_USER_TIME ||
- counter->tool_event == PERF_TOOL_SYSTEM_TIME)) {
+ (evsel__tool_event(counter) == PERF_TOOL_USER_TIME ||
+ evsel__tool_event(counter) == PERF_TOOL_SYSTEM_TIME)) {
u64 val, *start_time;
struct perf_counts_values *count =
perf_counts(counter->counts, cpu_map_idx, thread);
start_time = xyarray__entry(counter->start_times, cpu_map_idx, thread);
- if (counter->tool_event == PERF_TOOL_USER_TIME)
+ if (evsel__tool_event(counter) == PERF_TOOL_USER_TIME)
val = ru_stats.ru_utime_usec_stat.mean;
else
val = ru_stats.ru_stime_usec_stat.mean;
@@ -683,6 +684,9 @@ static enum counter_recovery stat_handle_error(struct evsel *counter)
if (child_pid != -1)
kill(child_pid, SIGTERM);
+
+ tpebs_delete();
+
return COUNTER_FATAL;
}
@@ -833,7 +837,7 @@ try_again_reset:
return -1;
}
- if (evlist__apply_filters(evsel_list, &counter)) {
+ if (evlist__apply_filters(evsel_list, &counter, &target)) {
pr_err("failed to set filter \"%s\" on event %s with %d (%s)\n",
counter->filter, evsel__name(counter), errno,
str_error_r(errno, msg, sizeof(msg)));
@@ -2180,7 +2184,7 @@ static
int process_stat_config_event(struct perf_session *session,
union perf_event *event)
{
- struct perf_tool *tool = session->tool;
+ const struct perf_tool *tool = session->tool;
struct perf_stat *st = container_of(tool, struct perf_stat, tool);
perf_event__read_stat_config(&stat_config, &event->stat_config);
@@ -2229,7 +2233,7 @@ static
int process_thread_map_event(struct perf_session *session,
union perf_event *event)
{
- struct perf_tool *tool = session->tool;
+ const struct perf_tool *tool = session->tool;
struct perf_stat *st = container_of(tool, struct perf_stat, tool);
if (st->threads) {
@@ -2248,7 +2252,7 @@ static
int process_cpu_map_event(struct perf_session *session,
union perf_event *event)
{
- struct perf_tool *tool = session->tool;
+ const struct perf_tool *tool = session->tool;
struct perf_stat *st = container_of(tool, struct perf_stat, tool);
struct perf_cpu_map *cpus;
@@ -2271,15 +2275,6 @@ static const char * const stat_report_usage[] = {
};
static struct perf_stat perf_stat = {
- .tool = {
- .attr = perf_event__process_attr,
- .event_update = perf_event__process_event_update,
- .thread_map = process_thread_map_event,
- .cpu_map = process_cpu_map_event,
- .stat_config = process_stat_config_event,
- .stat = perf_event__process_stat_event,
- .stat_round = process_stat_round_event,
- },
.aggr_mode = AGGR_UNSET,
.aggr_level = 0,
};
@@ -2322,6 +2317,15 @@ static int __cmd_report(int argc, const char **argv)
perf_stat.data.path = input_name;
perf_stat.data.mode = PERF_DATA_MODE_READ;
+ perf_tool__init(&perf_stat.tool, /*ordered_events=*/false);
+ perf_stat.tool.attr = perf_event__process_attr;
+ perf_stat.tool.event_update = perf_event__process_event_update;
+ perf_stat.tool.thread_map = process_thread_map_event;
+ perf_stat.tool.cpu_map = process_cpu_map_event;
+ perf_stat.tool.stat_config = process_stat_config_event;
+ perf_stat.tool.stat = perf_event__process_stat_event;
+ perf_stat.tool.stat_round = process_stat_round_event;
+
session = perf_session__new(&perf_stat.data, &perf_stat.tool);
if (IS_ERR(session))
return PTR_ERR(session);
@@ -2471,6 +2475,10 @@ int cmd_stat(int argc, const char **argv)
"disable adding events for the metric threshold calculation"),
OPT_BOOLEAN(0, "topdown", &topdown_run,
"measure top-down statistics"),
+#ifdef HAVE_ARCH_X86_64_SUPPORT
+ OPT_BOOLEAN(0, "record-tpebs", &tpebs_recording,
+ "enable recording for tpebs when retire_latency required"),
+#endif
OPT_UINTEGER(0, "td-level", &stat_config.topdown_level,
"Set the metrics level for the top-down statistics (0: max level)"),
OPT_BOOLEAN(0, "smi-cost", &smi_cost,
diff --git a/tools/perf/builtin-timechart.c b/tools/perf/builtin-timechart.c
index 19d4542ea18a..218c8b44d7be 100644
--- a/tools/perf/builtin-timechart.c
+++ b/tools/perf/builtin-timechart.c
@@ -320,7 +320,7 @@ static int *cpus_cstate_state;
static u64 *cpus_pstate_start_times;
static u64 *cpus_pstate_state;
-static int process_comm_event(struct perf_tool *tool,
+static int process_comm_event(const struct perf_tool *tool,
union perf_event *event,
struct perf_sample *sample __maybe_unused,
struct machine *machine __maybe_unused)
@@ -330,7 +330,7 @@ static int process_comm_event(struct perf_tool *tool,
return 0;
}
-static int process_fork_event(struct perf_tool *tool,
+static int process_fork_event(const struct perf_tool *tool,
union perf_event *event,
struct perf_sample *sample __maybe_unused,
struct machine *machine __maybe_unused)
@@ -340,7 +340,7 @@ static int process_fork_event(struct perf_tool *tool,
return 0;
}
-static int process_exit_event(struct perf_tool *tool,
+static int process_exit_event(const struct perf_tool *tool,
union perf_event *event,
struct perf_sample *sample __maybe_unused,
struct machine *machine __maybe_unused)
@@ -571,7 +571,7 @@ typedef int (*tracepoint_handler)(struct timechart *tchart,
struct perf_sample *sample,
const char *backtrace);
-static int process_sample_event(struct perf_tool *tool,
+static int process_sample_event(const struct perf_tool *tool,
union perf_event *event,
struct perf_sample *sample,
struct evsel *evsel,
@@ -1606,10 +1606,16 @@ static int __cmd_timechart(struct timechart *tchart, const char *output_name)
.mode = PERF_DATA_MODE_READ,
.force = tchart->force,
};
-
- struct perf_session *session = perf_session__new(&data, &tchart->tool);
+ struct perf_session *session;
int ret = -EINVAL;
+ perf_tool__init(&tchart->tool, /*ordered_events=*/true);
+ tchart->tool.comm = process_comm_event;
+ tchart->tool.fork = process_fork_event;
+ tchart->tool.exit = process_exit_event;
+ tchart->tool.sample = process_sample_event;
+
+ session = perf_session__new(&data, &tchart->tool);
if (IS_ERR(session))
return PTR_ERR(session);
@@ -1924,13 +1930,6 @@ parse_time(const struct option *opt, const char *arg, int __maybe_unused unset)
int cmd_timechart(int argc, const char **argv)
{
struct timechart tchart = {
- .tool = {
- .comm = process_comm_event,
- .fork = process_fork_event,
- .exit = process_exit_event,
- .sample = process_sample_event,
- .ordered_events = true,
- },
.proc_num = 15,
.min_time = NSEC_PER_MSEC,
.merge_dist = 1000,
diff --git a/tools/perf/builtin-top.c b/tools/perf/builtin-top.c
index e8cbbf10d361..724a79386321 100644
--- a/tools/perf/builtin-top.c
+++ b/tools/perf/builtin-top.c
@@ -191,7 +191,7 @@ static void ui__warn_map_erange(struct map *map, struct symbol *sym, u64 ip)
if (use_browser <= 0)
sleep(5);
- map__set_erange_warned(map, true);
+ map__set_erange_warned(map);
}
static void perf_top__record_precise_ip(struct perf_top *top,
@@ -735,12 +735,12 @@ static int hist_iter__top_callback(struct hist_entry_iter *iter,
perf_top__record_precise_ip(top, iter->he, iter->sample, evsel, al->addr);
hist__account_cycles(iter->sample->branch_stack, al, iter->sample,
- !(top->record_opts.branch_stack & PERF_SAMPLE_BRANCH_ANY),
- NULL);
+ !(top->record_opts.branch_stack & PERF_SAMPLE_BRANCH_ANY),
+ NULL, evsel);
return 0;
}
-static void perf_event__process_sample(struct perf_tool *tool,
+static void perf_event__process_sample(const struct perf_tool *tool,
const union perf_event *event,
struct evsel *evsel,
struct perf_sample *sample,
@@ -1055,7 +1055,7 @@ try_again:
}
}
- if (evlist__apply_filters(evlist, &counter)) {
+ if (evlist__apply_filters(evlist, &counter, &opts->target)) {
pr_err("failed to set filter \"%s\" on event %s with %d (%s)\n",
counter->filter ?: "BPF", evsel__name(counter), errno,
str_error_r(errno, msg, sizeof(msg)));
diff --git a/tools/perf/builtin-trace.c b/tools/perf/builtin-trace.c
index 8449f2beb54d..f6e847529073 100644
--- a/tools/perf/builtin-trace.c
+++ b/tools/perf/builtin-trace.c
@@ -19,6 +19,7 @@
#ifdef HAVE_LIBBPF_SUPPORT
#include <bpf/bpf.h>
#include <bpf/libbpf.h>
+#include <bpf/btf.h>
#ifdef HAVE_BPF_SKEL
#include "bpf_skel/augmented_raw_syscalls.skel.h"
#endif
@@ -64,6 +65,7 @@
#include "syscalltbl.h"
#include "rb_resort.h"
#include "../perf.h"
+#include "trace_augment.h"
#include <errno.h>
#include <inttypes.h>
@@ -101,6 +103,12 @@
/*
* strtoul: Go from a string to a value, i.e. for msr: MSR_FS_BASE to 0xc0000100
+ *
+ * We have to explicitely mark the direction of the flow of data, if from the
+ * kernel to user space or the other way around, since the BPF collector we
+ * have so far copies only from user to kernel space, mark the arguments that
+ * go that direction, so that we don´t end up collecting the previous contents
+ * for syscall args that goes from kernel to user space.
*/
struct syscall_arg_fmt {
size_t (*scnprintf)(char *bf, size_t size, struct syscall_arg *arg);
@@ -109,7 +117,12 @@ struct syscall_arg_fmt {
void *parm;
const char *name;
u16 nr_entries; // for arrays
+ bool from_user;
bool show_zero;
+#ifdef HAVE_LIBBPF_SUPPORT
+ const struct btf_type *type;
+ int type_id; /* used in btf_dump */
+#endif
};
struct syscall_fmt {
@@ -140,6 +153,9 @@ struct trace {
#ifdef HAVE_BPF_SKEL
struct augmented_raw_syscalls_bpf *skel;
#endif
+#ifdef HAVE_LIBBPF_SUPPORT
+ struct btf *btf;
+#endif
struct record_opts opts;
struct evlist *evlist;
struct machine *host;
@@ -196,6 +212,7 @@ struct trace {
bool show_string_prefix;
bool force;
bool vfs_getname;
+ bool force_btf;
int trace_pgfaults;
char *perfconfig_events;
struct {
@@ -204,6 +221,20 @@ struct trace {
} oe;
};
+static void trace__load_vmlinux_btf(struct trace *trace __maybe_unused)
+{
+#ifdef HAVE_LIBBPF_SUPPORT
+ if (trace->btf != NULL)
+ return;
+
+ trace->btf = btf__load_vmlinux_btf();
+ if (verbose > 0) {
+ fprintf(trace->output, trace->btf ? "vmlinux BTF loaded\n" :
+ "Failed to load vmlinux BTF\n");
+ }
+#endif
+}
+
struct tp_field {
int offset;
union {
@@ -830,6 +861,15 @@ static size_t syscall_arg__scnprintf_filename(char *bf, size_t size,
#define SCA_FILENAME syscall_arg__scnprintf_filename
+// 'argname' is just documentational at this point, to remove the previous comment with that info
+#define SCA_FILENAME_FROM_USER(argname) \
+ { .scnprintf = SCA_FILENAME, \
+ .from_user = true, }
+
+static size_t syscall_arg__scnprintf_buf(char *bf, size_t size, struct syscall_arg *arg);
+
+#define SCA_BUF syscall_arg__scnprintf_buf
+
static size_t syscall_arg__scnprintf_pipe_flags(char *bf, size_t size,
struct syscall_arg *arg)
{
@@ -887,6 +927,177 @@ static size_t syscall_arg__scnprintf_getrandom_flags(char *bf, size_t size,
#define SCA_GETRANDOM_FLAGS syscall_arg__scnprintf_getrandom_flags
+#ifdef HAVE_LIBBPF_SUPPORT
+static void syscall_arg_fmt__cache_btf_enum(struct syscall_arg_fmt *arg_fmt, struct btf *btf, char *type)
+{
+ int id;
+
+ type = strstr(type, "enum ");
+ if (type == NULL)
+ return;
+
+ type += 5; // skip "enum " to get the enumeration name
+
+ id = btf__find_by_name(btf, type);
+ if (id < 0)
+ return;
+
+ arg_fmt->type = btf__type_by_id(btf, id);
+}
+
+static bool syscall_arg__strtoul_btf_enum(char *bf, size_t size, struct syscall_arg *arg, u64 *val)
+{
+ const struct btf_type *bt = arg->fmt->type;
+ struct btf *btf = arg->trace->btf;
+ struct btf_enum *be = btf_enum(bt);
+
+ for (int i = 0; i < btf_vlen(bt); ++i, ++be) {
+ const char *name = btf__name_by_offset(btf, be->name_off);
+ int max_len = max(size, strlen(name));
+
+ if (strncmp(name, bf, max_len) == 0) {
+ *val = be->val;
+ return true;
+ }
+ }
+
+ return false;
+}
+
+static bool syscall_arg__strtoul_btf_type(char *bf, size_t size, struct syscall_arg *arg, u64 *val)
+{
+ const struct btf_type *bt;
+ char *type = arg->type_name;
+ struct btf *btf;
+
+ trace__load_vmlinux_btf(arg->trace);
+
+ btf = arg->trace->btf;
+ if (btf == NULL)
+ return false;
+
+ if (arg->fmt->type == NULL) {
+ // See if this is an enum
+ syscall_arg_fmt__cache_btf_enum(arg->fmt, btf, type);
+ }
+
+ // Now let's see if we have a BTF type resolved
+ bt = arg->fmt->type;
+ if (bt == NULL)
+ return false;
+
+ // If it is an enum:
+ if (btf_is_enum(arg->fmt->type))
+ return syscall_arg__strtoul_btf_enum(bf, size, arg, val);
+
+ return false;
+}
+
+static size_t btf_enum_scnprintf(const struct btf_type *type, struct btf *btf, char *bf, size_t size, int val)
+{
+ struct btf_enum *be = btf_enum(type);
+ const int nr_entries = btf_vlen(type);
+
+ for (int i = 0; i < nr_entries; ++i, ++be) {
+ if (be->val == val) {
+ return scnprintf(bf, size, "%s",
+ btf__name_by_offset(btf, be->name_off));
+ }
+ }
+
+ return 0;
+}
+
+struct trace_btf_dump_snprintf_ctx {
+ char *bf;
+ size_t printed, size;
+};
+
+static void trace__btf_dump_snprintf(void *vctx, const char *fmt, va_list args)
+{
+ struct trace_btf_dump_snprintf_ctx *ctx = vctx;
+
+ ctx->printed += vscnprintf(ctx->bf + ctx->printed, ctx->size - ctx->printed, fmt, args);
+}
+
+static size_t btf_struct_scnprintf(const struct btf_type *type, struct btf *btf, char *bf, size_t size, struct syscall_arg *arg)
+{
+ struct trace_btf_dump_snprintf_ctx ctx = {
+ .bf = bf,
+ .size = size,
+ };
+ struct augmented_arg *augmented_arg = arg->augmented.args;
+ int type_id = arg->fmt->type_id, consumed;
+ struct btf_dump *btf_dump;
+
+ LIBBPF_OPTS(btf_dump_opts, dump_opts);
+ LIBBPF_OPTS(btf_dump_type_data_opts, dump_data_opts);
+
+ if (arg == NULL || arg->augmented.args == NULL)
+ return 0;
+
+ dump_data_opts.compact = true;
+ dump_data_opts.skip_names = !arg->trace->show_arg_names;
+
+ btf_dump = btf_dump__new(btf, trace__btf_dump_snprintf, &ctx, &dump_opts);
+ if (btf_dump == NULL)
+ return 0;
+
+ /* pretty print the struct data here */
+ if (btf_dump__dump_type_data(btf_dump, type_id, arg->augmented.args->value, type->size, &dump_data_opts) == 0)
+ return 0;
+
+ consumed = sizeof(*augmented_arg) + augmented_arg->size;
+ arg->augmented.args = ((void *)arg->augmented.args) + consumed;
+ arg->augmented.size -= consumed;
+
+ btf_dump__free(btf_dump);
+
+ return ctx.printed;
+}
+
+static size_t trace__btf_scnprintf(struct trace *trace, struct syscall_arg *arg, char *bf,
+ size_t size, int val, char *type)
+{
+ struct syscall_arg_fmt *arg_fmt = arg->fmt;
+
+ if (trace->btf == NULL)
+ return 0;
+
+ if (arg_fmt->type == NULL) {
+ // Check if this is an enum and if we have the BTF type for it.
+ syscall_arg_fmt__cache_btf_enum(arg_fmt, trace->btf, type);
+ }
+
+ // Did we manage to find a BTF type for the syscall/tracepoint argument?
+ if (arg_fmt->type == NULL)
+ return 0;
+
+ if (btf_is_enum(arg_fmt->type))
+ return btf_enum_scnprintf(arg_fmt->type, trace->btf, bf, size, val);
+ else if (btf_is_struct(arg_fmt->type) || btf_is_union(arg_fmt->type))
+ return btf_struct_scnprintf(arg_fmt->type, trace->btf, bf, size, arg);
+
+ return 0;
+}
+
+#else // HAVE_LIBBPF_SUPPORT
+static size_t trace__btf_scnprintf(struct trace *trace __maybe_unused, struct syscall_arg *arg __maybe_unused,
+ char *bf __maybe_unused, size_t size __maybe_unused, int val __maybe_unused,
+ char *type __maybe_unused)
+{
+ return 0;
+}
+
+static bool syscall_arg__strtoul_btf_type(char *bf __maybe_unused, size_t size __maybe_unused,
+ struct syscall_arg *arg __maybe_unused, u64 *val __maybe_unused)
+{
+ return false;
+}
+#endif // HAVE_LIBBPF_SUPPORT
+
+#define STUL_BTF_TYPE syscall_arg__strtoul_btf_type
+
#define STRARRAY(name, array) \
{ .scnprintf = SCA_STRARRAY, \
.strtoul = STUL_STRARRAY, \
@@ -921,16 +1132,17 @@ static const struct syscall_fmt syscall_fmts[] = {
[1] = { .scnprintf = SCA_PTR, /* arg2 */ }, }, },
{ .name = "bind",
.arg = { [0] = { .scnprintf = SCA_INT, /* fd */ },
- [1] = { .scnprintf = SCA_SOCKADDR, /* umyaddr */ },
+ [1] = SCA_SOCKADDR_FROM_USER(umyaddr),
[2] = { .scnprintf = SCA_INT, /* addrlen */ }, }, },
{ .name = "bpf",
- .arg = { [0] = STRARRAY(cmd, bpf_cmd), }, },
+ .arg = { [0] = STRARRAY(cmd, bpf_cmd),
+ [1] = { .from_user = true /* attr */, }, } },
{ .name = "brk", .hexret = true,
.arg = { [0] = { .scnprintf = SCA_PTR, /* brk */ }, }, },
{ .name = "clock_gettime",
.arg = { [0] = STRARRAY(clk_id, clockid), }, },
{ .name = "clock_nanosleep",
- .arg = { [2] = { .scnprintf = SCA_TIMESPEC, /* rqtp */ }, }, },
+ .arg = { [2] = SCA_TIMESPEC_FROM_USER(req), }, },
{ .name = "clone", .errpid = true, .nr_args = 5,
.arg = { [0] = { .name = "flags", .scnprintf = SCA_CLONE_FLAGS, },
[1] = { .name = "child_stack", .scnprintf = SCA_HEX, },
@@ -941,7 +1153,7 @@ static const struct syscall_fmt syscall_fmts[] = {
.arg = { [0] = { .scnprintf = SCA_CLOSE_FD, /* fd */ }, }, },
{ .name = "connect",
.arg = { [0] = { .scnprintf = SCA_INT, /* fd */ },
- [1] = { .scnprintf = SCA_SOCKADDR, /* servaddr */ },
+ [1] = SCA_SOCKADDR_FROM_USER(servaddr),
[2] = { .scnprintf = SCA_INT, /* addrlen */ }, }, },
{ .name = "epoll_ctl",
.arg = { [1] = STRARRAY(op, epoll_ctl_ops), }, },
@@ -949,11 +1161,11 @@ static const struct syscall_fmt syscall_fmts[] = {
.arg = { [1] = { .scnprintf = SCA_EFD_FLAGS, /* flags */ }, }, },
{ .name = "faccessat",
.arg = { [0] = { .scnprintf = SCA_FDAT, /* dirfd */ },
- [1] = { .scnprintf = SCA_FILENAME, /* pathname */ },
+ [1] = SCA_FILENAME_FROM_USER(pathname),
[2] = { .scnprintf = SCA_ACCMODE, /* mode */ }, }, },
{ .name = "faccessat2",
.arg = { [0] = { .scnprintf = SCA_FDAT, /* dirfd */ },
- [1] = { .scnprintf = SCA_FILENAME, /* pathname */ },
+ [1] = SCA_FILENAME_FROM_USER(pathname),
[2] = { .scnprintf = SCA_ACCMODE, /* mode */ },
[3] = { .scnprintf = SCA_FACCESSAT2_FLAGS, /* flags */ }, }, },
{ .name = "fchmodat",
@@ -975,7 +1187,7 @@ static const struct syscall_fmt syscall_fmts[] = {
[2] = { .scnprintf = SCA_FSMOUNT_ATTR_FLAGS, /* attr_flags */ }, }, },
{ .name = "fspick",
.arg = { [0] = { .scnprintf = SCA_FDAT, /* dfd */ },
- [1] = { .scnprintf = SCA_FILENAME, /* path */ },
+ [1] = SCA_FILENAME_FROM_USER(path),
[2] = { .scnprintf = SCA_FSPICK_FLAGS, /* flags */ }, }, },
{ .name = "fstat", .alias = "newfstat", },
{ .name = "futex",
@@ -1039,29 +1251,29 @@ static const struct syscall_fmt syscall_fmts[] = {
.parm = &strarray__mmap_flags, },
[5] = { .scnprintf = SCA_HEX, /* offset */ }, }, },
{ .name = "mount",
- .arg = { [0] = { .scnprintf = SCA_FILENAME, /* dev_name */ },
+ .arg = { [0] = SCA_FILENAME_FROM_USER(devname),
[3] = { .scnprintf = SCA_MOUNT_FLAGS, /* flags */
.mask_val = SCAMV_MOUNT_FLAGS, /* flags */ }, }, },
{ .name = "move_mount",
.arg = { [0] = { .scnprintf = SCA_FDAT, /* from_dfd */ },
- [1] = { .scnprintf = SCA_FILENAME, /* from_pathname */ },
+ [1] = SCA_FILENAME_FROM_USER(pathname),
[2] = { .scnprintf = SCA_FDAT, /* to_dfd */ },
- [3] = { .scnprintf = SCA_FILENAME, /* to_pathname */ },
+ [3] = SCA_FILENAME_FROM_USER(pathname),
[4] = { .scnprintf = SCA_MOVE_MOUNT_FLAGS, /* flags */ }, }, },
{ .name = "mprotect",
.arg = { [0] = { .scnprintf = SCA_HEX, /* start */ },
[2] = { .scnprintf = SCA_MMAP_PROT, .show_zero = true, /* prot */ }, }, },
{ .name = "mq_unlink",
- .arg = { [0] = { .scnprintf = SCA_FILENAME, /* u_name */ }, }, },
+ .arg = { [0] = SCA_FILENAME_FROM_USER(u_name), }, },
{ .name = "mremap", .hexret = true,
.arg = { [3] = { .scnprintf = SCA_MREMAP_FLAGS, /* flags */ }, }, },
{ .name = "name_to_handle_at",
.arg = { [0] = { .scnprintf = SCA_FDAT, /* dfd */ }, }, },
{ .name = "nanosleep",
- .arg = { [0] = { .scnprintf = SCA_TIMESPEC, /* req */ }, }, },
+ .arg = { [0] = SCA_TIMESPEC_FROM_USER(req), }, },
{ .name = "newfstatat", .alias = "fstatat",
.arg = { [0] = { .scnprintf = SCA_FDAT, /* dirfd */ },
- [1] = { .scnprintf = SCA_FILENAME, /* pathname */ },
+ [1] = SCA_FILENAME_FROM_USER(pathname),
[3] = { .scnprintf = SCA_FS_AT_FLAGS, /* flags */ }, }, },
{ .name = "open",
.arg = { [1] = { .scnprintf = SCA_OPEN_FLAGS, /* flags */ }, }, },
@@ -1072,7 +1284,7 @@ static const struct syscall_fmt syscall_fmts[] = {
.arg = { [0] = { .scnprintf = SCA_FDAT, /* dfd */ },
[2] = { .scnprintf = SCA_OPEN_FLAGS, /* flags */ }, }, },
{ .name = "perf_event_open",
- .arg = { [0] = { .scnprintf = SCA_PERF_ATTR, /* attr */ },
+ .arg = { [0] = SCA_PERF_ATTR_FROM_USER(attr),
[2] = { .scnprintf = SCA_INT, /* cpu */ },
[3] = { .scnprintf = SCA_FD, /* group_fd */ },
[4] = { .scnprintf = SCA_PERF_FLAGS, /* flags */ }, }, },
@@ -1097,7 +1309,8 @@ static const struct syscall_fmt syscall_fmts[] = {
{ .name = "pread", .alias = "pread64", },
{ .name = "preadv", .alias = "pread", },
{ .name = "prlimit64",
- .arg = { [1] = STRARRAY(resource, rlimit_resources), }, },
+ .arg = { [1] = STRARRAY(resource, rlimit_resources),
+ [2] = { .from_user = true /* new_rlim */, }, }, },
{ .name = "pwrite", .alias = "pwrite64", },
{ .name = "readlinkat",
.arg = { [0] = { .scnprintf = SCA_FDAT, /* dfd */ }, }, },
@@ -1114,6 +1327,8 @@ static const struct syscall_fmt syscall_fmts[] = {
.arg = { [0] = { .scnprintf = SCA_FDAT, /* olddirfd */ },
[2] = { .scnprintf = SCA_FDAT, /* newdirfd */ },
[4] = { .scnprintf = SCA_RENAMEAT2_FLAGS, /* flags */ }, }, },
+ { .name = "rseq", .errpid = true,
+ .arg = { [0] = { .from_user = true /* rseq */, }, }, },
{ .name = "rt_sigaction",
.arg = { [0] = { .scnprintf = SCA_SIGNUM, /* sig */ }, }, },
{ .name = "rt_sigprocmask",
@@ -1135,12 +1350,15 @@ static const struct syscall_fmt syscall_fmts[] = {
.arg = { [2] = { .scnprintf = SCA_MSG_FLAGS, /* flags */ }, }, },
{ .name = "sendto",
.arg = { [3] = { .scnprintf = SCA_MSG_FLAGS, /* flags */ },
- [4] = { .scnprintf = SCA_SOCKADDR, /* addr */ }, }, },
+ [4] = SCA_SOCKADDR_FROM_USER(addr), }, },
+ { .name = "set_robust_list", .errpid = true,
+ .arg = { [0] = { .from_user = true /* head */, }, }, },
{ .name = "set_tid_address", .errpid = true, },
{ .name = "setitimer",
.arg = { [0] = STRARRAY(which, itimers), }, },
{ .name = "setrlimit",
- .arg = { [0] = STRARRAY(resource, rlimit_resources), }, },
+ .arg = { [0] = STRARRAY(resource, rlimit_resources),
+ [1] = { .from_user = true /* rlim */, }, }, },
{ .name = "setsockopt",
.arg = { [1] = STRARRAY(level, socket_level), }, },
{ .name = "socket",
@@ -1157,9 +1375,9 @@ static const struct syscall_fmt syscall_fmts[] = {
[2] = { .scnprintf = SCA_FS_AT_FLAGS, /* flags */ } ,
[3] = { .scnprintf = SCA_STATX_MASK, /* mask */ }, }, },
{ .name = "swapoff",
- .arg = { [0] = { .scnprintf = SCA_FILENAME, /* specialfile */ }, }, },
+ .arg = { [0] = SCA_FILENAME_FROM_USER(specialfile), }, },
{ .name = "swapon",
- .arg = { [0] = { .scnprintf = SCA_FILENAME, /* specialfile */ }, }, },
+ .arg = { [0] = SCA_FILENAME_FROM_USER(specialfile), }, },
{ .name = "symlinkat",
.arg = { [0] = { .scnprintf = SCA_FDAT, /* dfd */ }, }, },
{ .name = "sync_file_range",
@@ -1169,11 +1387,11 @@ static const struct syscall_fmt syscall_fmts[] = {
{ .name = "tkill",
.arg = { [1] = { .scnprintf = SCA_SIGNUM, /* sig */ }, }, },
{ .name = "umount2", .alias = "umount",
- .arg = { [0] = { .scnprintf = SCA_FILENAME, /* name */ }, }, },
+ .arg = { [0] = SCA_FILENAME_FROM_USER(name), }, },
{ .name = "uname", .alias = "newuname", },
{ .name = "unlinkat",
.arg = { [0] = { .scnprintf = SCA_FDAT, /* dfd */ },
- [1] = { .scnprintf = SCA_FILENAME, /* pathname */ },
+ [1] = SCA_FILENAME_FROM_USER(pathname),
[2] = { .scnprintf = SCA_FS_AT_FLAGS, /* flags */ }, }, },
{ .name = "utimensat",
.arg = { [0] = { .scnprintf = SCA_FDAT, /* dirfd */ }, }, },
@@ -1181,6 +1399,8 @@ static const struct syscall_fmt syscall_fmts[] = {
.arg = { [2] = { .scnprintf = SCA_WAITID_OPTIONS, /* options */ }, }, },
{ .name = "waitid", .errpid = true,
.arg = { [3] = { .scnprintf = SCA_WAITID_OPTIONS, /* options */ }, }, },
+ { .name = "write", .errpid = true,
+ .arg = { [1] = { .scnprintf = SCA_BUF /* buf */, .from_user = true, }, }, },
};
static int syscall_fmt__cmp(const void *name, const void *fmtp)
@@ -1238,6 +1458,7 @@ struct syscall {
bool is_exit;
bool is_open;
bool nonexistent;
+ bool use_btf;
struct tep_format_field *args;
const char *name;
const struct syscall_fmt *fmt;
@@ -1551,6 +1772,32 @@ static size_t syscall_arg__scnprintf_filename(char *bf, size_t size,
return 0;
}
+#define MAX_CONTROL_CHAR 31
+#define MAX_ASCII 127
+
+static size_t syscall_arg__scnprintf_buf(char *bf, size_t size, struct syscall_arg *arg)
+{
+ struct augmented_arg *augmented_arg = arg->augmented.args;
+ unsigned char *orig = (unsigned char *)augmented_arg->value;
+ size_t printed = 0;
+ int consumed;
+
+ if (augmented_arg == NULL)
+ return 0;
+
+ for (int j = 0; j < augmented_arg->size; ++j) {
+ bool control_char = orig[j] <= MAX_CONTROL_CHAR || orig[j] >= MAX_ASCII;
+ /* print control characters (0~31 and 127), and non-ascii characters in \(digits) */
+ printed += scnprintf(bf + printed, size - printed, control_char ? "\\%d" : "%c", (int)orig[j]);
+ }
+
+ consumed = sizeof(*augmented_arg) + augmented_arg->size;
+ arg->augmented.args = ((void *)arg->augmented.args) + consumed;
+ arg->augmented.size -= consumed;
+
+ return printed;
+}
+
static bool trace__filter_duration(struct trace *trace, double t)
{
return t < (trace->duration_filter * NSEC_PER_MSEC);
@@ -1637,7 +1884,7 @@ static int trace__process_event(struct trace *trace, struct machine *machine,
return ret;
}
-static int trace__tool_process(struct perf_tool *tool,
+static int trace__tool_process(const struct perf_tool *tool,
union perf_event *event,
struct perf_sample *sample,
struct machine *machine)
@@ -1744,7 +1991,8 @@ static const struct syscall_arg_fmt *syscall_arg_fmt__find_by_name(const char *n
}
static struct tep_format_field *
-syscall_arg_fmt__init_array(struct syscall_arg_fmt *arg, struct tep_format_field *field)
+syscall_arg_fmt__init_array(struct syscall_arg_fmt *arg, struct tep_format_field *field,
+ bool *use_btf)
{
struct tep_format_field *last_field = NULL;
int len;
@@ -1757,11 +2005,15 @@ syscall_arg_fmt__init_array(struct syscall_arg_fmt *arg, struct tep_format_field
len = strlen(field->name);
+ // As far as heuristics (or intention) goes this seems to hold true, and makes sense!
+ if ((field->flags & TEP_FIELD_IS_POINTER) && strstarts(field->type, "const "))
+ arg->from_user = true;
+
if (strcmp(field->type, "const char *") == 0 &&
((len >= 4 && strcmp(field->name + len - 4, "name") == 0) ||
- strstr(field->name, "path") != NULL))
+ strstr(field->name, "path") != NULL)) {
arg->scnprintf = SCA_FILENAME;
- else if ((field->flags & TEP_FIELD_IS_POINTER) || strstr(field->name, "addr"))
+ } else if ((field->flags & TEP_FIELD_IS_POINTER) || strstr(field->name, "addr"))
arg->scnprintf = SCA_PTR;
else if (strcmp(field->type, "pid_t") == 0)
arg->scnprintf = SCA_PID;
@@ -1782,6 +2034,9 @@ syscall_arg_fmt__init_array(struct syscall_arg_fmt *arg, struct tep_format_field
* 7 unsigned long
*/
arg->scnprintf = SCA_FD;
+ } else if (strstr(field->type, "enum") && use_btf != NULL) {
+ *use_btf = true;
+ arg->strtoul = STUL_BTF_TYPE;
} else {
const struct syscall_arg_fmt *fmt =
syscall_arg_fmt__find_by_name(field->name);
@@ -1798,7 +2053,8 @@ syscall_arg_fmt__init_array(struct syscall_arg_fmt *arg, struct tep_format_field
static int syscall__set_arg_fmts(struct syscall *sc)
{
- struct tep_format_field *last_field = syscall_arg_fmt__init_array(sc->arg_fmt, sc->args);
+ struct tep_format_field *last_field = syscall_arg_fmt__init_array(sc->arg_fmt, sc->args,
+ &sc->use_btf);
if (last_field)
sc->args_size = last_field->offset + last_field->size;
@@ -1811,6 +2067,7 @@ static int trace__read_syscall_info(struct trace *trace, int id)
char tp_name[128];
struct syscall *sc;
const char *name = syscalltbl__name(trace->sctbl, id);
+ int err;
#ifdef HAVE_SYSCALL_TABLE_SUPPORT
if (trace->syscalls.table == NULL) {
@@ -1883,15 +2140,21 @@ static int trace__read_syscall_info(struct trace *trace, int id)
sc->is_exit = !strcmp(name, "exit_group") || !strcmp(name, "exit");
sc->is_open = !strcmp(name, "open") || !strcmp(name, "openat");
- return syscall__set_arg_fmts(sc);
+ err = syscall__set_arg_fmts(sc);
+
+ /* after calling syscall__set_arg_fmts() we'll know whether use_btf is true */
+ if (sc->use_btf)
+ trace__load_vmlinux_btf(trace);
+
+ return err;
}
-static int evsel__init_tp_arg_scnprintf(struct evsel *evsel)
+static int evsel__init_tp_arg_scnprintf(struct evsel *evsel, bool *use_btf)
{
struct syscall_arg_fmt *fmt = evsel__syscall_arg_fmt(evsel);
if (fmt != NULL) {
- syscall_arg_fmt__init_array(fmt, evsel->tp_format->format.fields);
+ syscall_arg_fmt__init_array(fmt, evsel->tp_format->format.fields, use_btf);
return 0;
}
@@ -2050,7 +2313,7 @@ static size_t syscall__scnprintf_args(struct syscall *sc, char *bf, size_t size,
unsigned char *args, void *augmented_args, int augmented_args_size,
struct trace *trace, struct thread *thread)
{
- size_t printed = 0;
+ size_t printed = 0, btf_printed;
unsigned long val;
u8 bit = 1;
struct syscall_arg arg = {
@@ -2066,6 +2329,7 @@ static size_t syscall__scnprintf_args(struct syscall *sc, char *bf, size_t size,
.show_string_prefix = trace->show_string_prefix,
};
struct thread_trace *ttrace = thread__priv(thread);
+ void *default_scnprintf;
/*
* Things like fcntl will set this in its 'cmd' formatter to pick the
@@ -2093,9 +2357,13 @@ static size_t syscall__scnprintf_args(struct syscall *sc, char *bf, size_t size,
/*
* Suppress this argument if its value is zero and show_zero
* property isn't set.
+ *
+ * If it has a BTF type, then override the zero suppression knob
+ * as the common case is for zero in an enum to have an associated entry.
*/
if (val == 0 && !trace->show_zeros &&
- !(sc->arg_fmt && sc->arg_fmt[arg.idx].show_zero))
+ !(sc->arg_fmt && sc->arg_fmt[arg.idx].show_zero) &&
+ !(sc->arg_fmt && sc->arg_fmt[arg.idx].strtoul == STUL_BTF_TYPE))
continue;
printed += scnprintf(bf + printed, size - printed, "%s", printed ? ", " : "");
@@ -2103,6 +2371,17 @@ static size_t syscall__scnprintf_args(struct syscall *sc, char *bf, size_t size,
if (trace->show_arg_names)
printed += scnprintf(bf + printed, size - printed, "%s: ", field->name);
+ default_scnprintf = sc->arg_fmt[arg.idx].scnprintf;
+
+ if (trace->force_btf || default_scnprintf == NULL || default_scnprintf == SCA_PTR) {
+ btf_printed = trace__btf_scnprintf(trace, &arg, bf + printed,
+ size - printed, val, field->type);
+ if (btf_printed) {
+ printed += btf_printed;
+ continue;
+ }
+ }
+
printed += syscall_arg_fmt__scnprintf_val(&sc->arg_fmt[arg.idx],
bf + printed, size - printed, &arg, val);
}
@@ -2749,7 +3028,7 @@ static size_t trace__fprintf_tp_fields(struct trace *trace, struct evsel *evsel,
size_t size = sizeof(bf);
struct tep_format_field *field = evsel->tp_format->format.fields;
struct syscall_arg_fmt *arg = __evsel__syscall_arg_fmt(evsel);
- size_t printed = 0;
+ size_t printed = 0, btf_printed;
unsigned long val;
u8 bit = 1;
struct syscall_arg syscall_arg = {
@@ -2791,7 +3070,7 @@ static size_t trace__fprintf_tp_fields(struct trace *trace, struct evsel *evsel,
val = syscall_arg_fmt__mask_val(arg, &syscall_arg, val);
/* Suppress this argument if its value is zero and show_zero property isn't set. */
- if (val == 0 && !trace->show_zeros && !arg->show_zero)
+ if (val == 0 && !trace->show_zeros && !arg->show_zero && arg->strtoul != STUL_BTF_TYPE)
continue;
printed += scnprintf(bf + printed, size - printed, "%s", printed ? ", " : "");
@@ -2799,6 +3078,12 @@ static size_t trace__fprintf_tp_fields(struct trace *trace, struct evsel *evsel,
if (trace->show_arg_names)
printed += scnprintf(bf + printed, size - printed, "%s: ", field->name);
+ btf_printed = trace__btf_scnprintf(trace, &syscall_arg, bf + printed, size - printed, val, field->type);
+ if (btf_printed) {
+ printed += btf_printed;
+ continue;
+ }
+
printed += syscall_arg_fmt__scnprintf_val(arg, bf + printed, size - printed, &syscall_arg, val);
}
@@ -3009,7 +3294,7 @@ static void trace__set_base_time(struct trace *trace,
trace->base_time = sample->time;
}
-static int trace__process_sample(struct perf_tool *tool,
+static int trace__process_sample(const struct perf_tool *tool,
union perf_event *event,
struct perf_sample *sample,
struct evsel *evsel,
@@ -3276,6 +3561,23 @@ out_enomem:
}
#ifdef HAVE_BPF_SKEL
+static int syscall_arg_fmt__cache_btf_struct(struct syscall_arg_fmt *arg_fmt, struct btf *btf, char *type)
+{
+ int id;
+
+ if (arg_fmt->type != NULL)
+ return -1;
+
+ id = btf__find_by_name(btf, type);
+ if (id < 0)
+ return -1;
+
+ arg_fmt->type = btf__type_by_id(btf, id);
+ arg_fmt->type_id = id;
+
+ return 0;
+}
+
static struct bpf_program *trace__find_bpf_program_by_title(struct trace *trace, const char *name)
{
struct bpf_program *pos, *prog = NULL;
@@ -3351,6 +3653,91 @@ static int trace__bpf_prog_sys_exit_fd(struct trace *trace, int id)
return sc ? bpf_program__fd(sc->bpf_prog.sys_exit) : bpf_program__fd(trace->skel->progs.syscall_unaugmented);
}
+static int trace__bpf_sys_enter_beauty_map(struct trace *trace, int key, unsigned int *beauty_array)
+{
+ struct tep_format_field *field;
+ struct syscall *sc = trace__syscall_info(trace, NULL, key);
+ const struct btf_type *bt;
+ char *struct_offset, *tmp, name[32];
+ bool can_augment = false;
+ int i, cnt;
+
+ if (sc == NULL)
+ return -1;
+
+ trace__load_vmlinux_btf(trace);
+ if (trace->btf == NULL)
+ return -1;
+
+ for (i = 0, field = sc->args; field; ++i, field = field->next) {
+ // XXX We're only collecting pointer payloads _from_ user space
+ if (!sc->arg_fmt[i].from_user)
+ continue;
+
+ struct_offset = strstr(field->type, "struct ");
+ if (struct_offset == NULL)
+ struct_offset = strstr(field->type, "union ");
+ else
+ struct_offset++; // "union" is shorter
+
+ if (field->flags & TEP_FIELD_IS_POINTER && struct_offset) { /* struct or union (think BPF's attr arg) */
+ struct_offset += 6;
+
+ /* for 'struct foo *', we only want 'foo' */
+ for (tmp = struct_offset, cnt = 0; *tmp != ' ' && *tmp != '\0'; ++tmp, ++cnt) {
+ }
+
+ strncpy(name, struct_offset, cnt);
+ name[cnt] = '\0';
+
+ /* cache struct's btf_type and type_id */
+ if (syscall_arg_fmt__cache_btf_struct(&sc->arg_fmt[i], trace->btf, name))
+ continue;
+
+ bt = sc->arg_fmt[i].type;
+ beauty_array[i] = bt->size;
+ can_augment = true;
+ } else if (field->flags & TEP_FIELD_IS_POINTER && /* string */
+ strcmp(field->type, "const char *") == 0 &&
+ (strstr(field->name, "name") ||
+ strstr(field->name, "path") ||
+ strstr(field->name, "file") ||
+ strstr(field->name, "root") ||
+ strstr(field->name, "key") ||
+ strstr(field->name, "special") ||
+ strstr(field->name, "type") ||
+ strstr(field->name, "description"))) {
+ beauty_array[i] = 1;
+ can_augment = true;
+ } else if (field->flags & TEP_FIELD_IS_POINTER && /* buffer */
+ strstr(field->type, "char *") &&
+ (strstr(field->name, "buf") ||
+ strstr(field->name, "val") ||
+ strstr(field->name, "msg"))) {
+ int j;
+ struct tep_format_field *field_tmp;
+
+ /* find the size of the buffer that appears in pairs with buf */
+ for (j = 0, field_tmp = sc->args; field_tmp; ++j, field_tmp = field_tmp->next) {
+ if (!(field_tmp->flags & TEP_FIELD_IS_POINTER) && /* only integers */
+ (strstr(field_tmp->name, "count") ||
+ strstr(field_tmp->name, "siz") || /* size, bufsiz */
+ (strstr(field_tmp->name, "len") && strcmp(field_tmp->name, "filename")))) {
+ /* filename's got 'len' in it, we don't want that */
+ beauty_array[i] = -(j + 1);
+ can_augment = true;
+ break;
+ }
+ }
+ }
+ }
+
+ if (can_augment)
+ return 0;
+
+ return -1;
+}
+
static struct bpf_program *trace__find_usable_bpf_prog_entry(struct trace *trace, struct syscall *sc)
{
struct tep_format_field *field, *candidate_field;
@@ -3455,7 +3842,9 @@ static int trace__init_syscalls_bpf_prog_array_maps(struct trace *trace)
{
int map_enter_fd = bpf_map__fd(trace->skel->maps.syscalls_sys_enter);
int map_exit_fd = bpf_map__fd(trace->skel->maps.syscalls_sys_exit);
+ int beauty_map_fd = bpf_map__fd(trace->skel->maps.beauty_map_enter);
int err = 0;
+ unsigned int beauty_array[6];
for (int i = 0; i < trace->sctbl->syscalls.nr_entries; ++i) {
int prog_fd, key = syscalltbl__id_at_idx(trace->sctbl, i);
@@ -3474,6 +3863,15 @@ static int trace__init_syscalls_bpf_prog_array_maps(struct trace *trace)
err = bpf_map_update_elem(map_exit_fd, &key, &prog_fd, BPF_ANY);
if (err)
break;
+
+ /* use beauty_map to tell BPF how many bytes to collect, set beauty_map's value here */
+ memset(beauty_array, 0, sizeof(beauty_array));
+ err = trace__bpf_sys_enter_beauty_map(trace, key, (unsigned int *)beauty_array);
+ if (err)
+ continue;
+ err = bpf_map_update_elem(beauty_map_fd, &key, beauty_array, BPF_ANY);
+ if (err)
+ break;
}
/*
@@ -3680,7 +4078,8 @@ static int ordered_events__deliver_event(struct ordered_events *oe,
return __trace__deliver_event(trace, event->event);
}
-static struct syscall_arg_fmt *evsel__find_syscall_arg_fmt_by_name(struct evsel *evsel, char *arg)
+static struct syscall_arg_fmt *evsel__find_syscall_arg_fmt_by_name(struct evsel *evsel, char *arg,
+ char **type)
{
struct tep_format_field *field;
struct syscall_arg_fmt *fmt = __evsel__syscall_arg_fmt(evsel);
@@ -3689,13 +4088,15 @@ static struct syscall_arg_fmt *evsel__find_syscall_arg_fmt_by_name(struct evsel
return NULL;
for (field = evsel->tp_format->format.fields; field; field = field->next, ++fmt)
- if (strcmp(field->name, arg) == 0)
+ if (strcmp(field->name, arg) == 0) {
+ *type = field->type;
return fmt;
+ }
return NULL;
}
-static int trace__expand_filter(struct trace *trace __maybe_unused, struct evsel *evsel)
+static int trace__expand_filter(struct trace *trace, struct evsel *evsel)
{
char *tok, *left = evsel->filter, *new_filter = evsel->filter;
@@ -3728,14 +4129,14 @@ static int trace__expand_filter(struct trace *trace __maybe_unused, struct evsel
struct syscall_arg_fmt *fmt;
int left_size = tok - left,
right_size = right_end - right;
- char arg[128];
+ char arg[128], *type;
while (isspace(left[left_size - 1]))
--left_size;
scnprintf(arg, sizeof(arg), "%.*s", left_size, left);
- fmt = evsel__find_syscall_arg_fmt_by_name(evsel, arg);
+ fmt = evsel__find_syscall_arg_fmt_by_name(evsel, arg, &type);
if (fmt == NULL) {
pr_err("\"%s\" not found in \"%s\", can't set filter \"%s\"\n",
arg, evsel->name, evsel->filter);
@@ -3748,6 +4149,9 @@ static int trace__expand_filter(struct trace *trace __maybe_unused, struct evsel
if (fmt->strtoul) {
u64 val;
struct syscall_arg syscall_arg = {
+ .trace = trace,
+ .fmt = fmt,
+ .type_name = type,
.parm = fmt->parm,
};
@@ -3959,7 +4363,7 @@ static int trace__run(struct trace *trace, int argc, const char **argv)
err = trace__expand_filters(trace, &evsel);
if (err)
goto out_delete_evlist;
- err = evlist__apply_filters(evlist, &evsel);
+ err = evlist__apply_filters(evlist, &evsel, &trace->opts.target);
if (err < 0)
goto out_error_apply_filters;
@@ -4451,7 +4855,7 @@ static void evsel__set_syscall_arg_fmt(struct evsel *evsel, const char *name)
}
}
-static int evlist__set_syscall_tp_fields(struct evlist *evlist)
+static int evlist__set_syscall_tp_fields(struct evlist *evlist, bool *use_btf)
{
struct evsel *evsel;
@@ -4460,7 +4864,7 @@ static int evlist__set_syscall_tp_fields(struct evlist *evlist)
continue;
if (strcmp(evsel->tp_format->system, "syscalls")) {
- evsel__init_tp_arg_scnprintf(evsel);
+ evsel__init_tp_arg_scnprintf(evsel, use_btf);
continue;
}
@@ -4781,6 +5185,8 @@ int cmd_trace(int argc, const char **argv)
OPT_INTEGER('D', "delay", &trace.opts.target.initial_delay,
"ms to wait before starting measurement after program "
"start"),
+ OPT_BOOLEAN(0, "force-btf", &trace.force_btf, "Prefer btf_dump general pretty printer"
+ "to customized ones"),
OPTS_EVSWITCH(&trace.evswitch),
OPT_END()
};
@@ -4938,11 +5344,16 @@ skip_augmentation:
}
if (trace.evlist->core.nr_entries > 0) {
+ bool use_btf = false;
+
evlist__set_default_evsel_handler(trace.evlist, trace__event_handler);
- if (evlist__set_syscall_tp_fields(trace.evlist)) {
+ if (evlist__set_syscall_tp_fields(trace.evlist, &use_btf)) {
perror("failed to set syscalls:* tracepoint fields");
goto out;
}
+
+ if (use_btf)
+ trace__load_vmlinux_btf(&trace);
}
if (trace.sort_events) {
diff --git a/tools/perf/builtin-version.c b/tools/perf/builtin-version.c
index 398aa53e9e2e..e149d96c6dc5 100644
--- a/tools/perf/builtin-version.c
+++ b/tools/perf/builtin-version.c
@@ -46,45 +46,18 @@ static void status_print(const char *name, const char *macro,
printf(" # %s\n", macro);
}
-#define STATUS(__d, __m) \
-do { \
- if (IS_BUILTIN(__d)) \
- status_print(#__m, #__d, "on"); \
- else \
- status_print(#__m, #__d, "OFF"); \
+#define STATUS(feature) \
+do { \
+ if (feature.is_builtin) \
+ status_print(feature.name, feature.macro, "on"); \
+ else \
+ status_print(feature.name, feature.macro, "OFF"); \
} while (0)
static void library_status(void)
{
- STATUS(HAVE_DWARF_SUPPORT, dwarf);
- STATUS(HAVE_DWARF_GETLOCATIONS_SUPPORT, dwarf_getlocations);
-#ifndef HAVE_SYSCALL_TABLE_SUPPORT
- STATUS(HAVE_LIBAUDIT_SUPPORT, libaudit);
-#endif
- STATUS(HAVE_SYSCALL_TABLE_SUPPORT, syscall_table);
- STATUS(HAVE_LIBBFD_SUPPORT, libbfd);
- STATUS(HAVE_DEBUGINFOD_SUPPORT, debuginfod);
- STATUS(HAVE_LIBELF_SUPPORT, libelf);
- STATUS(HAVE_LIBNUMA_SUPPORT, libnuma);
- STATUS(HAVE_LIBNUMA_SUPPORT, numa_num_possible_cpus);
- STATUS(HAVE_LIBPERL_SUPPORT, libperl);
- STATUS(HAVE_LIBPYTHON_SUPPORT, libpython);
- STATUS(HAVE_SLANG_SUPPORT, libslang);
- STATUS(HAVE_LIBCRYPTO_SUPPORT, libcrypto);
- STATUS(HAVE_LIBUNWIND_SUPPORT, libunwind);
- STATUS(HAVE_DWARF_SUPPORT, libdw-dwarf-unwind);
- STATUS(HAVE_LIBCAPSTONE_SUPPORT, libcapstone);
- STATUS(HAVE_ZLIB_SUPPORT, zlib);
- STATUS(HAVE_LZMA_SUPPORT, lzma);
- STATUS(HAVE_AUXTRACE_SUPPORT, get_cpuid);
- STATUS(HAVE_LIBBPF_SUPPORT, bpf);
- STATUS(HAVE_AIO_SUPPORT, aio);
- STATUS(HAVE_ZSTD_SUPPORT, zstd);
- STATUS(HAVE_LIBPFM, libpfm4);
- STATUS(HAVE_LIBTRACEEVENT, libtraceevent);
- STATUS(HAVE_BPF_SKEL, bpf_skeletons);
- STATUS(HAVE_DWARF_UNWIND_SUPPORT, dwarf-unwind-support);
- STATUS(HAVE_CSTRACE_SUPPORT, libopencsd);
+ for (int i = 0; supported_features[i].name; ++i)
+ STATUS(supported_features[i]);
}
int cmd_version(int argc, const char **argv)
diff --git a/tools/perf/builtin.h b/tools/perf/builtin.h
index f4375deabfa3..94f4b3769bf7 100644
--- a/tools/perf/builtin.h
+++ b/tools/perf/builtin.h
@@ -2,6 +2,22 @@
#ifndef BUILTIN_H
#define BUILTIN_H
+#include <stddef.h>
+#include <linux/compiler.h>
+#include <tools/config.h>
+
+struct feature_status {
+ const char *name;
+ const char *macro;
+ int is_builtin;
+};
+
+#define FEATURE_STATUS(name_, macro_) { \
+ .name = name_, \
+ .macro = #macro_, \
+ .is_builtin = IS_BUILTIN(macro_) }
+
+extern struct feature_status supported_features[];
struct cmdnames;
void list_common_cmds_help(void);
@@ -11,6 +27,7 @@ int cmd_annotate(int argc, const char **argv);
int cmd_bench(int argc, const char **argv);
int cmd_buildid_cache(int argc, const char **argv);
int cmd_buildid_list(int argc, const char **argv);
+int cmd_check(int argc, const char **argv);
int cmd_config(int argc, const char **argv);
int cmd_c2c(int argc, const char **argv);
int cmd_diff(int argc, const char **argv);
diff --git a/tools/perf/check-headers.sh b/tools/perf/check-headers.sh
index 672421b858ac..714c78e5da07 100755
--- a/tools/perf/check-headers.sh
+++ b/tools/perf/check-headers.sh
@@ -172,6 +172,7 @@ check lib/ctype.c '-I "^EXPORT_SYMBOL" -I "^#include <linux/export.h>" -B
check lib/list_sort.c '-I "^#include <linux/bug.h>"'
# diff non-symmetric files
+check_2 tools/perf/arch/x86/entry/syscalls/syscall_32.tbl arch/x86/entry/syscalls/syscall_32.tbl
check_2 tools/perf/arch/x86/entry/syscalls/syscall_64.tbl arch/x86/entry/syscalls/syscall_64.tbl
check_2 tools/perf/arch/powerpc/entry/syscalls/syscall.tbl arch/powerpc/kernel/syscalls/syscall.tbl
check_2 tools/perf/arch/s390/entry/syscalls/syscall.tbl arch/s390/kernel/syscalls/syscall.tbl
diff --git a/tools/perf/perf.c b/tools/perf/perf.c
index bd3f80b5bb46..4def800f4089 100644
--- a/tools/perf/perf.c
+++ b/tools/perf/perf.c
@@ -52,6 +52,7 @@ static struct cmd_struct commands[] = {
{ "archive", NULL, 0 },
{ "buildid-cache", cmd_buildid_cache, 0 },
{ "buildid-list", cmd_buildid_list, 0 },
+ { "check", cmd_check, 0 },
{ "config", cmd_config, 0 },
{ "c2c", cmd_c2c, 0 },
{ "diff", cmd_diff, 0 },
diff --git a/tools/perf/pmu-events/Build b/tools/perf/pmu-events/Build
index 1d18bb89402e..d941bc9d16e9 100644
--- a/tools/perf/pmu-events/Build
+++ b/tools/perf/pmu-events/Build
@@ -11,6 +11,8 @@ METRIC_TEST_PY = pmu-events/metric_test.py
EMPTY_PMU_EVENTS_C = pmu-events/empty-pmu-events.c
PMU_EVENTS_C = $(OUTPUT)pmu-events/pmu-events.c
METRIC_TEST_LOG = $(OUTPUT)pmu-events/metric_test.log
+TEST_EMPTY_PMU_EVENTS_C = $(OUTPUT)pmu-events/test-empty-pmu-events.c
+EMPTY_PMU_EVENTS_TEST_LOG = $(OUTPUT)pmu-events/empty-pmu-events.log
ifeq ($(JEVENTS_ARCH),)
JEVENTS_ARCH=$(SRCARCH)
@@ -31,7 +33,15 @@ $(METRIC_TEST_LOG): $(METRIC_TEST_PY) $(METRIC_PY)
$(call rule_mkdir)
$(Q)$(call echo-cmd,test)$(PYTHON) $< 2> $@ || (cat $@ && false)
-$(PMU_EVENTS_C): $(JSON) $(JSON_TEST) $(JEVENTS_PY) $(METRIC_PY) $(METRIC_TEST_LOG)
+$(TEST_EMPTY_PMU_EVENTS_C): $(JSON) $(JSON_TEST) $(JEVENTS_PY) $(METRIC_PY) $(METRIC_TEST_LOG)
+ $(call rule_mkdir)
+ $(Q)$(call echo-cmd,gen)$(PYTHON) $(JEVENTS_PY) none none pmu-events/arch $@
+
+$(EMPTY_PMU_EVENTS_TEST_LOG): $(EMPTY_PMU_EVENTS_C) $(TEST_EMPTY_PMU_EVENTS_C)
+ $(call rule_mkdir)
+ $(Q)$(call echo-cmd,test)diff -u $^ 2> $@ || (cat $@ && false)
+
+$(PMU_EVENTS_C): $(JSON) $(JSON_TEST) $(JEVENTS_PY) $(METRIC_PY) $(METRIC_TEST_LOG) $(EMPTY_PMU_EVENTS_TEST_LOG)
$(call rule_mkdir)
$(Q)$(call echo-cmd,gen)$(PYTHON) $(JEVENTS_PY) $(JEVENTS_ARCH) $(JEVENTS_MODEL) pmu-events/arch $@
endif
diff --git a/tools/perf/pmu-events/arch/arm64/ampere/ampereone/instruction.json b/tools/perf/pmu-events/arch/arm64/ampere/ampereone/instruction.json
index 18d1f2f76a23..9fe697d12fe0 100644
--- a/tools/perf/pmu-events/arch/arm64/ampere/ampereone/instruction.json
+++ b/tools/perf/pmu-events/arch/arm64/ampere/ampereone/instruction.json
@@ -78,9 +78,6 @@
"ArchStdEvent": "OP_RETIRED"
},
{
- "ArchStdEvent": "OP_SPEC"
- },
- {
"PublicDescription": "Operation speculatively executed, NOP",
"EventCode": "0x100",
"EventName": "NOP_SPEC",
diff --git a/tools/perf/pmu-events/arch/arm64/freescale/yitian710/sys/ali_drw.json b/tools/perf/pmu-events/arch/arm64/thead/yitian710/sys/ali_drw.json
index e21c469a8ef0..e21c469a8ef0 100644
--- a/tools/perf/pmu-events/arch/arm64/freescale/yitian710/sys/ali_drw.json
+++ b/tools/perf/pmu-events/arch/arm64/thead/yitian710/sys/ali_drw.json
diff --git a/tools/perf/pmu-events/arch/arm64/freescale/yitian710/sys/metrics.json b/tools/perf/pmu-events/arch/arm64/thead/yitian710/sys/metrics.json
index bc865b374b6a..bc865b374b6a 100644
--- a/tools/perf/pmu-events/arch/arm64/freescale/yitian710/sys/metrics.json
+++ b/tools/perf/pmu-events/arch/arm64/thead/yitian710/sys/metrics.json
diff --git a/tools/perf/pmu-events/arch/powerpc/power10/cache.json b/tools/perf/pmu-events/arch/powerpc/power10/cache.json
index 839ae26945fb..b7e0be09ff57 100644
--- a/tools/perf/pmu-events/arch/powerpc/power10/cache.json
+++ b/tools/perf/pmu-events/arch/powerpc/power10/cache.json
@@ -1,12 +1,22 @@
[
{
+ "EventCode": "0x1002C",
+ "EventName": "PM_LD_PREFETCH_CACHE_LINE_MISS",
+ "BriefDescription": "The L1 cache was reloaded with a line that fulfills a prefetch request."
+ },
+ {
+ "EventCode": "0x200FD",
+ "EventName": "PM_L1_ICACHE_MISS",
+ "BriefDescription": "Demand instruction cache miss."
+ },
+ {
+ "EventCode": "0x30068",
+ "EventName": "PM_L1_ICACHE_RELOADED_PREF",
+ "BriefDescription": "Counts all instruction cache prefetch reloads (includes demand turned into prefetch)."
+ },
+ {
"EventCode": "0x300F4",
"EventName": "PM_RUN_INST_CMPL_CONC",
"BriefDescription": "PowerPC instruction completed by this thread when all threads in the core had the run-latch set."
- },
- {
- "EventCode": "0x400F6",
- "EventName": "PM_BR_MPRED_CMPL",
- "BriefDescription": "A mispredicted branch completed. Includes direction and target."
}
]
diff --git a/tools/perf/pmu-events/arch/powerpc/power10/datasource.json b/tools/perf/pmu-events/arch/powerpc/power10/datasource.json
index 0eeaaf1a95b8..a5d5be35b5e6 100644
--- a/tools/perf/pmu-events/arch/powerpc/power10/datasource.json
+++ b/tools/perf/pmu-events/arch/powerpc/power10/datasource.json
@@ -1,5 +1,15 @@
[
{
+ "EventCode": "0x1505E",
+ "EventName": "PM_LD_HIT_L1",
+ "BriefDescription": "Load finished without experiencing an L1 miss."
+ },
+ {
+ "EventCode": "0x100FC",
+ "EventName": "PM_LD_REF_L1",
+ "BriefDescription": "All L1 D cache load references counted at finish, gated by reject. In P9 and earlier this event counted only cacheable loads but in P10 both cacheable and non-cacheable loads are included."
+ },
+ {
"EventCode": "0x200FE",
"EventName": "PM_DATA_FROM_L2MISS",
"BriefDescription": "The processor's L1 data cache was reloaded from a source beyond the local core's L2 due to a demand miss."
@@ -10,11 +20,41 @@
"BriefDescription": "The processor's L1 data cache was reloaded from beyond the local core's L3 due to a demand miss."
},
{
+ "EventCode": "0x400F0",
+ "EventName": "PM_LD_DEMAND_MISS_L1_FIN",
+ "BriefDescription": "Load missed L1, counted at finish time."
+ },
+ {
"EventCode": "0x400FE",
"EventName": "PM_DATA_FROM_MEMORY",
"BriefDescription": "The processor's data cache was reloaded from local, remote, or distant memory due to a demand miss."
},
{
+ "EventCode": "0x0000004080",
+ "EventName": "PM_INST_FROM_L1",
+ "BriefDescription": "An instruction fetch hit in the L1. Each fetch group contains 8 instructions. The same line can hit 4 times if 32 sequential instructions are fetched."
+ },
+ {
+ "EventCode": "0x000000026080",
+ "EventName": "PM_L2_LD_MISS",
+ "BriefDescription": "All successful D-Side Load dispatches for this thread that missed in the L2. Since the event happens in a 2:1 clock domain and is time-sliced across all 4 threads, the event count should be multiplied by 2."
+ },
+ {
+ "EventCode": "0x000000026880",
+ "EventName": "PM_L2_ST_MISS",
+ "BriefDescription": "All successful D-Side Store dispatches for this thread that missed in the L2. Since the event happens in a 2:1 clock domain and is time-sliced across all 4 threads, the event count should be multiplied by 2."
+ },
+ {
+ "EventCode": "0x010000046880",
+ "EventName": "PM_L2_ST_HIT",
+ "BriefDescription": "All successful D-side store dispatches for this thread that were L2 hits. Since the event happens in a 2:1 clock domain and is time-sliced across all 4 threads, the event count should be multiplied by 2."
+ },
+ {
+ "EventCode": "0x000000036880",
+ "EventName": "PM_L2_INST_MISS",
+ "BriefDescription": "All successful instruction (demand and prefetch) dispatches for this thread that missed in the L2. Since the event happens in a 2:1 clock domain and is time-sliced across all 4 threads, the event count should be multiplied by 2."
+ },
+ {
"EventCode": "0x000300000000C040",
"EventName": "PM_INST_FROM_L2",
"BriefDescription": "The processor's instruction cache was reloaded from the local core's L2 due to a demand miss."
diff --git a/tools/perf/pmu-events/arch/powerpc/power10/frontend.json b/tools/perf/pmu-events/arch/powerpc/power10/frontend.json
index 5977f5e64212..b6998987ab75 100644
--- a/tools/perf/pmu-events/arch/powerpc/power10/frontend.json
+++ b/tools/perf/pmu-events/arch/powerpc/power10/frontend.json
@@ -75,18 +75,48 @@
"BriefDescription": "Cycles in which an instruction or group of instructions were cancelled after being issued. This event increments once per occurrence, regardless of how many instructions are included in the issue group."
},
{
+ "EventCode": "0x44054",
+ "EventName": "PM_VECTOR_LD_CMPL",
+ "BriefDescription": "Vector load instruction completed."
+ },
+ {
"EventCode": "0x44056",
"EventName": "PM_VECTOR_ST_CMPL",
"BriefDescription": "Vector store instruction completed."
},
{
+ "EventCode": "0x4D05E",
+ "EventName": "PM_BR_CMPL",
+ "BriefDescription": "A branch completed. All branches are included."
+ },
+ {
"EventCode": "0x4E054",
"EventName": "PM_DTLB_HIT_1G",
"BriefDescription": "Data TLB hit (DERAT reload) page size 1G. Implies radix translation. When MMCR1[16]=0 this event counts only for demand misses. When MMCR1[16]=1 this event includes demand misses and prefetches."
},
{
+ "EventCode": "0x400F6",
+ "EventName": "PM_BR_MPRED_CMPL",
+ "BriefDescription": "A mispredicted branch completed. Includes direction and target."
+ },
+ {
"EventCode": "0x400FC",
"EventName": "PM_ITLB_MISS",
"BriefDescription": "Instruction TLB reload (after a miss), all page sizes. Includes only demand misses."
+ },
+ {
+ "EventCode": "0x00000048B4",
+ "EventName": "PM_BR_TKN_UNCOND_FIN",
+ "BriefDescription": "An unconditional branch finished. All unconditional branches are taken."
+ },
+ {
+ "EventCode": "0x00000040B8",
+ "EventName": "PM_PRED_BR_TKN_COND_DIR",
+ "BriefDescription": "A conditional branch finished with correctly predicted direction. Resolved taken."
+ },
+ {
+ "EventCode": "0x00000048B8",
+ "EventName": "PM_PRED_BR_NTKN_COND_DIR",
+ "BriefDescription": "A conditional branch finished with correctly predicted direction. Resolved not taken."
}
]
diff --git a/tools/perf/pmu-events/arch/powerpc/power10/locks.json b/tools/perf/pmu-events/arch/powerpc/power10/locks.json
index b5a0d6521963..a8ea4d0def1a 100644
--- a/tools/perf/pmu-events/arch/powerpc/power10/locks.json
+++ b/tools/perf/pmu-events/arch/powerpc/power10/locks.json
@@ -5,8 +5,18 @@
"BriefDescription": "Conditional store instruction (STCX) failed. LARX and STCX are instructions used to acquire a lock."
},
{
+ "EventCode": "0x2E014",
+ "EventName": "PM_STCX_FIN",
+ "BriefDescription": "Conditional store instruction (STCX) finished. LARX and STCX are instructions used to acquire a lock."
+ },
+ {
"EventCode": "0x4E050",
"EventName": "PM_STCX_PASS_FIN",
"BriefDescription": "Conditional store instruction (STCX) passed. LARX and STCX are instructions used to acquire a lock."
+ },
+ {
+ "EventCode": "0x000000C8B8",
+ "EventName": "PM_STCX_SUCCESS_CMPL",
+ "BriefDescription": "STCX instructions that completed successfully. Specifically, counts only when a pass status is returned from the nest."
}
]
diff --git a/tools/perf/pmu-events/arch/powerpc/power10/memory.json b/tools/perf/pmu-events/arch/powerpc/power10/memory.json
index 885262957beb..0d7191b3f2c6 100644
--- a/tools/perf/pmu-events/arch/powerpc/power10/memory.json
+++ b/tools/perf/pmu-events/arch/powerpc/power10/memory.json
@@ -70,6 +70,11 @@
"BriefDescription": "The processor's L1 data cache was reloaded from the source specified in MMCR3[30:42]. If MMCR1[16|17] is 0 (default), this count includes only lines that were reloaded to satisfy a demand miss. If MMCR1[16|17] is 1, this count includes both demand misses and prefetch reloads."
},
{
+ "EventCode": "0x3F04A",
+ "EventName": "PM_LSU_ST5_FIN",
+ "BriefDescription": "LSU Finished an internal operation in ST2 port."
+ },
+ {
"EventCode": "0x3C054",
"EventName": "PM_DERAT_MISS_16M",
"BriefDescription": "Data ERAT Miss (Data TLB Access) page size 16M. When MMCR1[16]=0 this event counts only DERAT reloads for demand misses. When MMCR1[16]=1 this event includes demand misses and prefetches."
@@ -108,5 +113,30 @@
"EventCode": "0x4C05A",
"EventName": "PM_DTLB_MISS_1G",
"BriefDescription": "Data TLB reload (after a miss) page size 1G. Implies radix translation was used. When MMCR1[16]=0 this event counts only for demand misses. When MMCR1[16]=1 this event includes demand misses and prefetches."
+ },
+ {
+ "EventCode": "0x000000F880",
+ "EventName": "PM_SNOOP_TLBIE_CYC",
+ "BriefDescription": "Cycles in which TLBIE snoops are executed in the LSU."
+ },
+ {
+ "EventCode": "0x000000F084",
+ "EventName": "PM_SNOOP_TLBIE_CACHE_WALK_CYC",
+ "BriefDescription": "TLBIE snoop cycles in which the data cache is being walked."
+ },
+ {
+ "EventCode": "0x000000F884",
+ "EventName": "PM_SNOOP_TLBIE_WAIT_ST_CYC",
+ "BriefDescription": "TLBIE snoop cycles in which older stores are still draining."
+ },
+ {
+ "EventCode": "0x000000F088",
+ "EventName": "PM_SNOOP_TLBIE_WAIT_LD_CYC",
+ "BriefDescription": "TLBIE snoop cycles in which older loads are still draining."
+ },
+ {
+ "EventCode": "0x000000F08C",
+ "EventName": "PM_SNOOP_TLBIE_WAIT_MMU_CYC",
+ "BriefDescription": "TLBIE snoop cycles in which the Load-Store unit is waiting for the MMU to finish invalidation."
}
]
diff --git a/tools/perf/pmu-events/arch/powerpc/power10/others.json b/tools/perf/pmu-events/arch/powerpc/power10/others.json
index fcf8a8ebe7bd..1bf802076ee0 100644
--- a/tools/perf/pmu-events/arch/powerpc/power10/others.json
+++ b/tools/perf/pmu-events/arch/powerpc/power10/others.json
@@ -1,112 +1,72 @@
[
{
- "EventCode": "0x1002C",
- "EventName": "PM_LD_PREFETCH_CACHE_LINE_MISS",
- "BriefDescription": "The L1 cache was reloaded with a line that fulfills a prefetch request."
- },
- {
- "EventCode": "0x1505E",
- "EventName": "PM_LD_HIT_L1",
- "BriefDescription": "Load finished without experiencing an L1 miss."
- },
- {
- "EventCode": "0x1F056",
- "EventName": "PM_DISP_SS0_2_INSTR_CYC",
- "BriefDescription": "Cycles in which Superslice 0 dispatches either 1 or 2 instructions."
- },
- {
- "EventCode": "0x1F05A",
- "EventName": "PM_DISP_HELD_SYNC_CYC",
- "BriefDescription": "Cycles dispatch is held because of a synchronizing instruction that requires the ICT to be empty before dispatch."
- },
- {
"EventCode": "0x10066",
"EventName": "PM_ADJUNCT_CYC",
"BriefDescription": "Cycles in which the thread is in Adjunct state. MSR[S HV PR] bits = 011."
},
{
- "EventCode": "0x100FC",
- "EventName": "PM_LD_REF_L1",
- "BriefDescription": "All L1 D cache load references counted at finish, gated by reject. In P9 and earlier this event counted only cacheable loads but in P10 both cacheable and non-cacheable loads are included."
- },
- {
"EventCode": "0x2E010",
"EventName": "PM_ADJUNCT_INST_CMPL",
"BriefDescription": "PowerPC instruction completed while the thread was in Adjunct state."
},
{
- "EventCode": "0x2E014",
- "EventName": "PM_STCX_FIN",
- "BriefDescription": "Conditional store instruction (STCX) finished. LARX and STCX are instructions used to acquire a lock."
- },
- {
- "EventCode": "0x2F054",
- "EventName": "PM_DISP_SS1_2_INSTR_CYC",
- "BriefDescription": "Cycles in which Superslice 1 dispatches either 1 or 2 instructions."
- },
- {
- "EventCode": "0x2F056",
- "EventName": "PM_DISP_SS1_4_INSTR_CYC",
- "BriefDescription": "Cycles in which Superslice 1 dispatches either 3 or 4 instructions."
- },
- {
"EventCode": "0x200F2",
"EventName": "PM_INST_DISP",
"BriefDescription": "PowerPC instruction dispatched."
},
{
- "EventCode": "0x200FD",
- "EventName": "PM_L1_ICACHE_MISS",
- "BriefDescription": "Demand instruction cache miss."
+ "EventCode": "0x300F6",
+ "EventName": "PM_LD_DEMAND_MISS_L1",
+ "BriefDescription": "The L1 cache was reloaded with a line that fulfills a demand miss request. Counted at reload time, before finish."
},
{
- "EventCode": "0x3F04A",
- "EventName": "PM_LSU_ST5_FIN",
- "BriefDescription": "LSU Finished an internal operation in ST2 port."
+ "EventCode": "0x40012",
+ "EventName": "PM_L1_ICACHE_RELOADED_ALL",
+ "BriefDescription": "Counts all instruction cache reloads includes demand, prefetch, prefetch turned into demand and demand turned into prefetch."
},
{
- "EventCode": "0x3405A",
- "EventName": "PM_PRIVILEGED_INST_CMPL",
- "BriefDescription": "PowerPC instruction completed while the thread was in Privileged state."
+ "EventCode": "0x00000038BC",
+ "EventName": "PM_ISYNC_CMPL",
+ "BriefDescription": "Isync completion count per thread."
},
{
- "EventCode": "0x3F054",
- "EventName": "PM_DISP_SS0_4_INSTR_CYC",
- "BriefDescription": "Cycles in which Superslice 0 dispatches either 3 or 4 instructions."
+ "EventCode": "0x000000C088",
+ "EventName": "PM_LD0_32B_FIN",
+ "BriefDescription": "256-bit load finished in the LD0 load execution unit."
},
{
- "EventCode": "0x3F056",
- "EventName": "PM_DISP_SS0_8_INSTR_CYC",
- "BriefDescription": "Cycles in which Superslice 0 dispatches either 5, 6, 7 or 8 instructions."
+ "EventCode": "0x000000C888",
+ "EventName": "PM_LD1_32B_FIN",
+ "BriefDescription": "256-bit load finished in the LD1 load execution unit."
},
{
- "EventCode": "0x30068",
- "EventName": "PM_L1_ICACHE_RELOADED_PREF",
- "BriefDescription": "Counts all instruction cache prefetch reloads (includes demand turned into prefetch)."
+ "EventCode": "0x000000C090",
+ "EventName": "PM_LD0_UNALIGNED_FIN",
+ "BriefDescription": "Load instructions in LD0 port that are either unaligned, or treated as unaligned and require an additional recycle through the pipeline using the load gather buffer. This typically adds about 10 cycles to the latency of the instruction. This includes loads that cross the 128 byte boundary, octword loads that are not aligned, and a special forward progress case of a load that does not hit in the L1 and crosses the 32 byte boundary and is launched NTC. Counted at finish time."
},
{
- "EventCode": "0x300F6",
- "EventName": "PM_LD_DEMAND_MISS_L1",
- "BriefDescription": "The L1 cache was reloaded with a line that fulfills a demand miss request. Counted at reload time, before finish."
+ "EventCode": "0x000000C890",
+ "EventName": "PM_LD1_UNALIGNED_FIN",
+ "BriefDescription": "Load instructions in LD1 port that are either unaligned, or treated as unaligned and require an additional recycle through the pipeline using the load gather buffer. This typically adds about 10 cycles to the latency of the instruction. This includes loads that cross the 128 byte boundary, octword loads that are not aligned, and a special forward progress case of a load that does not hit in the L1 and crosses the 32 byte boundary and is launched NTC. Counted at finish time."
},
{
- "EventCode": "0x40012",
- "EventName": "PM_L1_ICACHE_RELOADED_ALL",
- "BriefDescription": "Counts all instruction cache reloads includes demand, prefetch, prefetch turned into demand and demand turned into prefetch."
+ "EventCode": "0x000000C0A4",
+ "EventName": "PM_ST0_UNALIGNED_FIN",
+ "BriefDescription": "Store instructions in ST0 port that are either unaligned, or treated as unaligned and require an additional recycle through the pipeline. This typically adds about 10 cycles to the latency of the instruction. This only includes stores that cross the 128 byte boundary. Counted at finish time."
},
{
- "EventCode": "0x44054",
- "EventName": "PM_VECTOR_LD_CMPL",
- "BriefDescription": "Vector load instruction completed."
+ "EventCode": "0x000000C8A4",
+ "EventName": "PM_ST1_UNALIGNED_FIN",
+ "BriefDescription": "Store instructions in ST1 port that are either unaligned, or treated as unaligned and require an additional recycle through the pipeline. This typically adds about 10 cycles to the latency of the instruction. This only includes stores that cross the 128 byte boundary. Counted at finish time."
},
{
- "EventCode": "0x4D05E",
- "EventName": "PM_BR_CMPL",
- "BriefDescription": "A branch completed. All branches are included."
+ "EventCode": "0x000000D0B4",
+ "EventName": "PM_DC_PREF_STRIDED_CONF",
+ "BriefDescription": "A demand load referenced a line in an active strided prefetch stream. The stream could have been allocated through the hardware prefetch mechanism or through software."
},
{
- "EventCode": "0x400F0",
- "EventName": "PM_LD_DEMAND_MISS_L1_FIN",
- "BriefDescription": "Load missed L1, counted at finish time."
+ "EventCode": "0x0000004884",
+ "EventName": "PM_NO_FETCH_IBUF_FULL_CYC",
+ "BriefDescription": "Cycles in which no instructions are fetched because there is no room in the instruction buffers."
}
]
diff --git a/tools/perf/pmu-events/arch/powerpc/power10/pipeline.json b/tools/perf/pmu-events/arch/powerpc/power10/pipeline.json
index 21b23bb55d0d..940375d251cb 100644
--- a/tools/perf/pmu-events/arch/powerpc/power10/pipeline.json
+++ b/tools/perf/pmu-events/arch/powerpc/power10/pipeline.json
@@ -95,11 +95,21 @@
"BriefDescription": "Cycles in which the oldest instruction in the pipeline was a lwsync waiting to complete."
},
{
+ "EventCode": "0x1F056",
+ "EventName": "PM_DISP_SS0_2_INSTR_CYC",
+ "BriefDescription": "Cycles in which Superslice 0 dispatches either 1 or 2 instructions."
+ },
+ {
"EventCode": "0x1F058",
"EventName": "PM_DISP_HELD_CYC",
"BriefDescription": "Cycles dispatch is held."
},
{
+ "EventCode": "0x1F05A",
+ "EventName": "PM_DISP_HELD_SYNC_CYC",
+ "BriefDescription": "Cycles dispatch is held because of a synchronizing instruction that requires the ICT to be empty before dispatch."
+ },
+ {
"EventCode": "0x10064",
"EventName": "PM_DISP_STALL_IC_L2",
"BriefDescription": "Cycles when dispatch was stalled while the instruction was fetched from the local L2."
@@ -230,6 +240,16 @@
"BriefDescription": "Cycles in which the oldest instruction in the pipeline (NTC) finishes. Note that instructions can finish out of order, therefore not all the instructions that finish have a Next-to-complete status."
},
{
+ "EventCode": "0x2F054",
+ "EventName": "PM_DISP_SS1_2_INSTR_CYC",
+ "BriefDescription": "Cycles in which Superslice 1 dispatches either 1 or 2 instructions."
+ },
+ {
+ "EventCode": "0x2F056",
+ "EventName": "PM_DISP_SS1_4_INSTR_CYC",
+ "BriefDescription": "Cycles in which Superslice 1 dispatches either 3 or 4 instructions."
+ },
+ {
"EventCode": "0x20066",
"EventName": "PM_DISP_HELD_OTHER_CYC",
"BriefDescription": "Cycles dispatch is held for any other reason."
@@ -330,6 +350,16 @@
"BriefDescription": "Cycles when dispatch was stalled while the instruction was fetched from the local L3."
},
{
+ "EventCode": "0x3F054",
+ "EventName": "PM_DISP_SS0_4_INSTR_CYC",
+ "BriefDescription": "Cycles in which Superslice 0 dispatches either 3 or 4 instructions."
+ },
+ {
+ "EventCode": "0x3F056",
+ "EventName": "PM_DISP_SS0_8_INSTR_CYC",
+ "BriefDescription": "Cycles in which Superslice 0 dispatches either 5, 6, 7 or 8 instructions."
+ },
+ {
"EventCode": "0x30060",
"EventName": "PM_DISP_HELD_XVFC_MAPPER_CYC",
"BriefDescription": "Cycles dispatch is held because the XVFC mapper/SRB was full."
@@ -458,5 +488,20 @@
"EventCode": "0x400F8",
"EventName": "PM_FLUSH",
"BriefDescription": "Flush (any type)."
+ },
+ {
+ "EventCode": "0x0B0000016080",
+ "EventName": "PM_L2_TLBIE_SLBIE_START",
+ "BriefDescription": "NCU Master received a TLBIE/SLBIEG/SLBIAG operation from the core. Event count should be multiplied by 2 since the data is coming from a 2:1 clock domain and the data is time sliced across all 4 threads."
+ },
+ {
+ "EventCode": "0x0B0000016880",
+ "EventName": "PM_L2_TLBIE_SLBIE_DELAY",
+ "BriefDescription": "Cycles when a TLBIE/SLBIEG/SLBIAG command was held in a hottemp condition by the NCU Master. Multiply this count by 1000 to obtain the total number of cycles. This can be divided by PM_L2_TLBIE_SLBIE_SENT to obtain the average time a TLBIE/SLBIEG/SLBIAG command was held. Event count should be multiplied by 2 since the data is coming from a 2:1 clock domain and the data is time sliced across all 4 threads."
+ },
+ {
+ "EventCode": "0x0B0000026880",
+ "EventName": "PM_L2_SNP_TLBIE_SLBIE_DELAY",
+ "BriefDescription": "Cycles when a TLBIE/SLBIEG/SLBIAG that targets this thread's LPAR was in flight while in a hottemp condition. Multiply this count by 1000 to obtain the total number of cycles. This can be divided by PM_L2_SNP_TLBIE_SLBIE_START to obtain the overall efficiency. Note: 'inflight' means SnpTLB has been sent to core(ie doesn't include when SnpTLB is in NCU waiting to be launched serially behind different SnpTLB). The NCU Snooper gets in a 'hottemp' delay window when it detects it is above its TLBIE/SLBIE threshold for process SnpTLBIE/SLBIE with this core. Event count should be multiplied by 2 since the data is coming from a 2:1 clock domain and the data is time sliced across all 4 threads."
}
]
diff --git a/tools/perf/pmu-events/arch/powerpc/power10/pmc.json b/tools/perf/pmu-events/arch/powerpc/power10/pmc.json
index 0e0253d0e757..6f5b0e8fde12 100644
--- a/tools/perf/pmu-events/arch/powerpc/power10/pmc.json
+++ b/tools/perf/pmu-events/arch/powerpc/power10/pmc.json
@@ -105,6 +105,11 @@
"BriefDescription": "Processor cycles gated by the run latch."
},
{
+ "EventCode": "0x200F8",
+ "EventName": "PM_EXT_INT",
+ "BriefDescription": "Cycles an external interrupt was active."
+ },
+ {
"EventCode": "0x30010",
"EventName": "PM_PMC2_OVERFLOW",
"BriefDescription": "The event selected for PMC2 caused the event counter to overflow."
@@ -125,6 +130,11 @@
"BriefDescription": "The event selected for PMC6 caused the event counter to overflow."
},
{
+ "EventCode": "0x3405A",
+ "EventName": "PM_PRIVILEGED_INST_CMPL",
+ "BriefDescription": "PowerPC instruction completed while the thread was in Privileged state."
+ },
+ {
"EventCode": "0x3006C",
"EventName": "PM_RUN_CYC_SMT2_MODE",
"BriefDescription": "Cycles when this thread's run latch is set and the core is in SMT2 mode."
diff --git a/tools/perf/pmu-events/arch/x86/cascadelakex/uncore-cache.json b/tools/perf/pmu-events/arch/x86/cascadelakex/uncore-cache.json
index c9596e18ec09..6347eba48810 100644
--- a/tools/perf/pmu-events/arch/x86/cascadelakex/uncore-cache.json
+++ b/tools/perf/pmu-events/arch/x86/cascadelakex/uncore-cache.json
@@ -4577,7 +4577,7 @@
"Counter": "0,1,2,3",
"EventCode": "0x35",
"EventName": "UNC_CHA_TOR_INSERTS.IA_HIT_CRD",
- "Filter": "config1=0x40233",
+ "Filter": "config1=0x4023300000000",
"PerPkg": "1",
"PublicDescription": "TOR Inserts : CRds issued by iA Cores that Hit the LLC : Counts the number of entries successfully inserted into the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.",
"UMask": "0x11",
@@ -4588,7 +4588,7 @@
"Counter": "0,1,2,3",
"EventCode": "0x35",
"EventName": "UNC_CHA_TOR_INSERTS.IA_HIT_DRD",
- "Filter": "config1=0x40433",
+ "Filter": "config1=0x4043300000000",
"PerPkg": "1",
"PublicDescription": "TOR Inserts : DRds issued by iA Cores that Hit the LLC : Counts the number of entries successfully inserted into the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.",
"UMask": "0x11",
@@ -4599,7 +4599,7 @@
"Counter": "0,1,2,3",
"EventCode": "0x35",
"EventName": "UNC_CHA_TOR_INSERTS.IA_HIT_LlcPrefCRD",
- "Filter": "config1=0x4b233",
+ "Filter": "config1=0x4b23300000000",
"PerPkg": "1",
"UMask": "0x11",
"Unit": "CHA"
@@ -4609,7 +4609,7 @@
"Counter": "0,1,2,3",
"EventCode": "0x35",
"EventName": "UNC_CHA_TOR_INSERTS.IA_HIT_LlcPrefDRD",
- "Filter": "config1=0x4b433",
+ "Filter": "config1=0x4b43300000000",
"PerPkg": "1",
"UMask": "0x11",
"Unit": "CHA"
@@ -4619,7 +4619,7 @@
"Counter": "0,1,2,3",
"EventCode": "0x35",
"EventName": "UNC_CHA_TOR_INSERTS.IA_HIT_LlcPrefRFO",
- "Filter": "config1=0x4b033",
+ "Filter": "config1=0x4b03300000000",
"PerPkg": "1",
"PublicDescription": "TOR Inserts : LLCPrefRFO issued by iA Cores that hit the LLC : Counts the number of entries successfully inserted into the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.",
"UMask": "0x11",
@@ -4630,7 +4630,7 @@
"Counter": "0,1,2,3",
"EventCode": "0x35",
"EventName": "UNC_CHA_TOR_INSERTS.IA_HIT_RFO",
- "Filter": "config1=0x40033",
+ "Filter": "config1=0x4003300000000",
"PerPkg": "1",
"PublicDescription": "TOR Inserts : RFOs issued by iA Cores that Hit the LLC : Counts the number of entries successfully inserted into the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.",
"UMask": "0x11",
@@ -4651,7 +4651,7 @@
"Counter": "0,1,2,3",
"EventCode": "0x35",
"EventName": "UNC_CHA_TOR_INSERTS.IA_MISS_CRD",
- "Filter": "config1=0x40233",
+ "Filter": "config1=0x4023300000000",
"PerPkg": "1",
"PublicDescription": "TOR Inserts : CRds issued by iA Cores that Missed the LLC : Counts the number of entries successfully inserted into the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.",
"UMask": "0x21",
@@ -4662,7 +4662,7 @@
"Counter": "0,1,2,3",
"EventCode": "0x35",
"EventName": "UNC_CHA_TOR_INSERTS.IA_MISS_DRD",
- "Filter": "config1=0x40433",
+ "Filter": "config1=0x4043300000000",
"PerPkg": "1",
"PublicDescription": "TOR Inserts : DRds issued by iA Cores that Missed the LLC : Counts the number of entries successfully inserted into the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.",
"UMask": "0x21",
@@ -4673,7 +4673,7 @@
"Counter": "0,1,2,3",
"EventCode": "0x35",
"EventName": "UNC_CHA_TOR_INSERTS.IA_MISS_LlcPrefCRD",
- "Filter": "config1=0x4b233",
+ "Filter": "config1=0x4b23300000000",
"PerPkg": "1",
"UMask": "0x21",
"Unit": "CHA"
@@ -4683,7 +4683,7 @@
"Counter": "0,1,2,3",
"EventCode": "0x35",
"EventName": "UNC_CHA_TOR_INSERTS.IA_MISS_LlcPrefDRD",
- "Filter": "config1=0x4b433",
+ "Filter": "config1=0x4b43300000000",
"PerPkg": "1",
"UMask": "0x21",
"Unit": "CHA"
@@ -4693,7 +4693,7 @@
"Counter": "0,1,2,3",
"EventCode": "0x35",
"EventName": "UNC_CHA_TOR_INSERTS.IA_MISS_LlcPrefRFO",
- "Filter": "config1=0x4b033",
+ "Filter": "config1=0x4b03300000000",
"PerPkg": "1",
"PublicDescription": "TOR Inserts : LLCPrefRFO issued by iA Cores that missed the LLC : Counts the number of entries successfully inserted into the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.",
"UMask": "0x21",
@@ -4704,7 +4704,7 @@
"Counter": "0,1,2,3",
"EventCode": "0x35",
"EventName": "UNC_CHA_TOR_INSERTS.IA_MISS_RFO",
- "Filter": "config1=0x40033",
+ "Filter": "config1=0x4003300000000",
"PerPkg": "1",
"PublicDescription": "TOR Inserts : RFOs issued by iA Cores that Missed the LLC : Counts the number of entries successfully inserted into the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.",
"UMask": "0x21",
@@ -4747,7 +4747,7 @@
"EventCode": "0x35",
"EventName": "UNC_CHA_TOR_INSERTS.IO_MISS_ITOM",
"Experimental": "1",
- "Filter": "config1=0x49033",
+ "Filter": "config1=0x4903300000000",
"PerPkg": "1",
"PublicDescription": "Counts the number of entries successfully inserted into the TOR that are generated from local IO ItoM requests that miss the LLC. An ItoM request is used by IIO to request a data write without first reading the data for ownership.",
"UMask": "0x24",
@@ -4759,7 +4759,7 @@
"EventCode": "0x35",
"EventName": "UNC_CHA_TOR_INSERTS.IO_MISS_RDCUR",
"Experimental": "1",
- "Filter": "config1=0x43C33",
+ "Filter": "config1=0x43c3300000000",
"PerPkg": "1",
"PublicDescription": "Counts the number of entries successfully inserted into the TOR that are generated from local IO RdCur requests and miss the LLC. A RdCur request is used by IIO to read data without changing state.",
"UMask": "0x24",
@@ -4771,7 +4771,7 @@
"EventCode": "0x35",
"EventName": "UNC_CHA_TOR_INSERTS.IO_MISS_RFO",
"Experimental": "1",
- "Filter": "config1=0x40033",
+ "Filter": "config1=0x4003300000000",
"PerPkg": "1",
"PublicDescription": "Counts the number of entries successfully inserted into the TOR that are generated from local IO RFO requests that miss the LLC. A read for ownership (RFO) requests a cache line to be cached in E state with the intent to modify.",
"UMask": "0x24",
@@ -4999,7 +4999,7 @@
"Counter": "0",
"EventCode": "0x36",
"EventName": "UNC_CHA_TOR_OCCUPANCY.IA_HIT_CRD",
- "Filter": "config1=0x40233",
+ "Filter": "config1=0x4023300000000",
"PerPkg": "1",
"PublicDescription": "TOR Occupancy : CRds issued by iA Cores that Hit the LLC : For each cycle, this event accumulates the number of valid entries in the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.",
"UMask": "0x11",
@@ -5010,7 +5010,7 @@
"Counter": "0",
"EventCode": "0x36",
"EventName": "UNC_CHA_TOR_OCCUPANCY.IA_HIT_DRD",
- "Filter": "config1=0x40433",
+ "Filter": "config1=0x4043300000000",
"PerPkg": "1",
"PublicDescription": "TOR Occupancy : DRds issued by iA Cores that Hit the LLC : For each cycle, this event accumulates the number of valid entries in the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.",
"UMask": "0x11",
@@ -5021,7 +5021,7 @@
"Counter": "0",
"EventCode": "0x36",
"EventName": "UNC_CHA_TOR_OCCUPANCY.IA_HIT_LlcPrefCRD",
- "Filter": "config1=0x4b233",
+ "Filter": "config1=0x4b23300000000",
"PerPkg": "1",
"UMask": "0x11",
"Unit": "CHA"
@@ -5031,7 +5031,7 @@
"Counter": "0",
"EventCode": "0x36",
"EventName": "UNC_CHA_TOR_OCCUPANCY.IA_HIT_LlcPrefDRD",
- "Filter": "config1=0x4b433",
+ "Filter": "config1=0x4b43300000000",
"PerPkg": "1",
"UMask": "0x11",
"Unit": "CHA"
@@ -5041,7 +5041,7 @@
"Counter": "0",
"EventCode": "0x36",
"EventName": "UNC_CHA_TOR_OCCUPANCY.IA_HIT_LlcPrefRFO",
- "Filter": "config1=0x4b033",
+ "Filter": "config1=0x4b03300000000",
"PerPkg": "1",
"PublicDescription": "TOR Occupancy : LLCPrefRFO issued by iA Cores that hit the LLC : For each cycle, this event accumulates the number of valid entries in the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.",
"UMask": "0x11",
@@ -5052,7 +5052,7 @@
"Counter": "0",
"EventCode": "0x36",
"EventName": "UNC_CHA_TOR_OCCUPANCY.IA_HIT_RFO",
- "Filter": "config1=0x40033",
+ "Filter": "config1=0x4003300000000",
"PerPkg": "1",
"PublicDescription": "TOR Occupancy : RFOs issued by iA Cores that Hit the LLC : For each cycle, this event accumulates the number of valid entries in the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.",
"UMask": "0x11",
@@ -5073,7 +5073,7 @@
"Counter": "0",
"EventCode": "0x36",
"EventName": "UNC_CHA_TOR_OCCUPANCY.IA_MISS_CRD",
- "Filter": "config1=0x40233",
+ "Filter": "config1=0x4023300000000",
"PerPkg": "1",
"PublicDescription": "TOR Occupancy : CRds issued by iA Cores that Missed the LLC : For each cycle, this event accumulates the number of valid entries in the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.",
"UMask": "0x21",
@@ -5084,7 +5084,7 @@
"Counter": "0",
"EventCode": "0x36",
"EventName": "UNC_CHA_TOR_OCCUPANCY.IA_MISS_DRD",
- "Filter": "config1=0x40433",
+ "Filter": "config1=0x4043300000000",
"PerPkg": "1",
"PublicDescription": "TOR Occupancy : DRds issued by iA Cores that Missed the LLC : For each cycle, this event accumulates the number of valid entries in the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.",
"UMask": "0x21",
@@ -5095,7 +5095,7 @@
"Counter": "0",
"EventCode": "0x36",
"EventName": "UNC_CHA_TOR_OCCUPANCY.IA_MISS_LlcPrefCRD",
- "Filter": "config1=0x4b233",
+ "Filter": "config1=0x4b23300000000",
"PerPkg": "1",
"UMask": "0x21",
"Unit": "CHA"
@@ -5105,7 +5105,7 @@
"Counter": "0",
"EventCode": "0x36",
"EventName": "UNC_CHA_TOR_OCCUPANCY.IA_MISS_LlcPrefDRD",
- "Filter": "config1=0x4b433",
+ "Filter": "config1=0x4b43300000000",
"PerPkg": "1",
"UMask": "0x21",
"Unit": "CHA"
@@ -5115,7 +5115,7 @@
"Counter": "0",
"EventCode": "0x36",
"EventName": "UNC_CHA_TOR_OCCUPANCY.IA_MISS_LlcPrefRFO",
- "Filter": "config1=0x4b033",
+ "Filter": "config1=0x4b03300000000",
"PerPkg": "1",
"PublicDescription": "TOR Occupancy : LLCPrefRFO issued by iA Cores that missed the LLC : For each cycle, this event accumulates the number of valid entries in the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.",
"UMask": "0x21",
@@ -5126,7 +5126,7 @@
"Counter": "0",
"EventCode": "0x36",
"EventName": "UNC_CHA_TOR_OCCUPANCY.IA_MISS_RFO",
- "Filter": "config1=0x40033",
+ "Filter": "config1=0x4003300000000",
"PerPkg": "1",
"PublicDescription": "TOR Occupancy : RFOs issued by iA Cores that Missed the LLC : For each cycle, this event accumulates the number of valid entries in the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.",
"UMask": "0x21",
@@ -5171,7 +5171,7 @@
"EventCode": "0x36",
"EventName": "UNC_CHA_TOR_OCCUPANCY.IO_MISS_ITOM",
"Experimental": "1",
- "Filter": "config1=0x49033",
+ "Filter": "config1=0x4903300000000",
"PerPkg": "1",
"PublicDescription": "For each cycle, this event accumulates the number of valid entries in the TOR that are generated from local IO ItoM requests that miss the LLC. An ItoM is used by IIO to request a data write without first reading the data for ownership.",
"UMask": "0x24",
@@ -5183,7 +5183,7 @@
"EventCode": "0x36",
"EventName": "UNC_CHA_TOR_OCCUPANCY.IO_MISS_RDCUR",
"Experimental": "1",
- "Filter": "config1=0x43C33",
+ "Filter": "config1=0x43c3300000000",
"PerPkg": "1",
"PublicDescription": "For each cycle, this event accumulates the number of valid entries in the TOR that are generated from local IO RdCur requests that miss the LLC. A RdCur request is used by IIO to read data without changing state.",
"UMask": "0x24",
@@ -5195,7 +5195,7 @@
"EventCode": "0x36",
"EventName": "UNC_CHA_TOR_OCCUPANCY.IO_MISS_RFO",
"Experimental": "1",
- "Filter": "config1=0x40033",
+ "Filter": "config1=0x4003300000000",
"PerPkg": "1",
"PublicDescription": "For each cycle, this event accumulates the number of valid entries in the TOR that are generated from local IO RFO requests that miss the LLC. A read for ownership (RFO) requests data to be cached in E state with the intent to modify.",
"UMask": "0x24",
diff --git a/tools/perf/pmu-events/arch/x86/meteorlake/metricgroups.json b/tools/perf/pmu-events/arch/x86/meteorlake/metricgroups.json
new file mode 100644
index 000000000000..b54a5fc0861f
--- /dev/null
+++ b/tools/perf/pmu-events/arch/x86/meteorlake/metricgroups.json
@@ -0,0 +1,142 @@
+{
+ "Backend": "Grouping from Top-down Microarchitecture Analysis Metrics spreadsheet",
+ "Bad": "Grouping from Top-down Microarchitecture Analysis Metrics spreadsheet",
+ "BadSpec": "Grouping from Top-down Microarchitecture Analysis Metrics spreadsheet",
+ "BigFootprint": "Grouping from Top-down Microarchitecture Analysis Metrics spreadsheet",
+ "BrMispredicts": "Grouping from Top-down Microarchitecture Analysis Metrics spreadsheet",
+ "Branches": "Grouping from Top-down Microarchitecture Analysis Metrics spreadsheet",
+ "BvBC": "Grouping from Top-down Microarchitecture Analysis Metrics spreadsheet",
+ "BvBO": "Grouping from Top-down Microarchitecture Analysis Metrics spreadsheet",
+ "BvCB": "Grouping from Top-down Microarchitecture Analysis Metrics spreadsheet",
+ "BvFB": "Grouping from Top-down Microarchitecture Analysis Metrics spreadsheet",
+ "BvIO": "Grouping from Top-down Microarchitecture Analysis Metrics spreadsheet",
+ "BvMB": "Grouping from Top-down Microarchitecture Analysis Metrics spreadsheet",
+ "BvML": "Grouping from Top-down Microarchitecture Analysis Metrics spreadsheet",
+ "BvMP": "Grouping from Top-down Microarchitecture Analysis Metrics spreadsheet",
+ "BvMS": "Grouping from Top-down Microarchitecture Analysis Metrics spreadsheet",
+ "BvMT": "Grouping from Top-down Microarchitecture Analysis Metrics spreadsheet",
+ "BvOB": "Grouping from Top-down Microarchitecture Analysis Metrics spreadsheet",
+ "BvUW": "Grouping from Top-down Microarchitecture Analysis Metrics spreadsheet",
+ "C0Wait": "Grouping from Top-down Microarchitecture Analysis Metrics spreadsheet",
+ "CacheHits": "Grouping from Top-down Microarchitecture Analysis Metrics spreadsheet",
+ "CacheMisses": "Grouping from Top-down Microarchitecture Analysis Metrics spreadsheet",
+ "CodeGen": "Grouping from Top-down Microarchitecture Analysis Metrics spreadsheet",
+ "Compute": "Grouping from Top-down Microarchitecture Analysis Metrics spreadsheet",
+ "Cor": "Grouping from Top-down Microarchitecture Analysis Metrics spreadsheet",
+ "DSB": "Grouping from Top-down Microarchitecture Analysis Metrics spreadsheet",
+ "DSBmiss": "Grouping from Top-down Microarchitecture Analysis Metrics spreadsheet",
+ "DataSharing": "Grouping from Top-down Microarchitecture Analysis Metrics spreadsheet",
+ "Fed": "Grouping from Top-down Microarchitecture Analysis Metrics spreadsheet",
+ "FetchBW": "Grouping from Top-down Microarchitecture Analysis Metrics spreadsheet",
+ "FetchLat": "Grouping from Top-down Microarchitecture Analysis Metrics spreadsheet",
+ "Flops": "Grouping from Top-down Microarchitecture Analysis Metrics spreadsheet",
+ "FpScalar": "Grouping from Top-down Microarchitecture Analysis Metrics spreadsheet",
+ "FpVector": "Grouping from Top-down Microarchitecture Analysis Metrics spreadsheet",
+ "Frontend": "Grouping from Top-down Microarchitecture Analysis Metrics spreadsheet",
+ "HPC": "Grouping from Top-down Microarchitecture Analysis Metrics spreadsheet",
+ "IcMiss": "Grouping from Top-down Microarchitecture Analysis Metrics spreadsheet",
+ "Ifetch": "Grouping from Top-down Microarchitecture Analysis Metrics spreadsheet",
+ "InsType": "Grouping from Top-down Microarchitecture Analysis Metrics spreadsheet",
+ "IntVector": "Grouping from Top-down Microarchitecture Analysis Metrics spreadsheet",
+ "L2Evicts": "Grouping from Top-down Microarchitecture Analysis Metrics spreadsheet",
+ "LSD": "Grouping from Top-down Microarchitecture Analysis Metrics spreadsheet",
+ "Load_Store_Miss": "Grouping from Top-down Microarchitecture Analysis Metrics spreadsheet",
+ "MachineClears": "Grouping from Top-down Microarchitecture Analysis Metrics spreadsheet",
+ "Machine_Clears": "Grouping from Top-down Microarchitecture Analysis Metrics spreadsheet",
+ "Mem": "Grouping from Top-down Microarchitecture Analysis Metrics spreadsheet",
+ "MemOffcore": "Grouping from Top-down Microarchitecture Analysis Metrics spreadsheet",
+ "Mem_Exec": "Grouping from Top-down Microarchitecture Analysis Metrics spreadsheet",
+ "MemoryBW": "Grouping from Top-down Microarchitecture Analysis Metrics spreadsheet",
+ "MemoryBound": "Grouping from Top-down Microarchitecture Analysis Metrics spreadsheet",
+ "MemoryLat": "Grouping from Top-down Microarchitecture Analysis Metrics spreadsheet",
+ "MemoryTLB": "Grouping from Top-down Microarchitecture Analysis Metrics spreadsheet",
+ "Memory_BW": "Grouping from Top-down Microarchitecture Analysis Metrics spreadsheet",
+ "Memory_Lat": "Grouping from Top-down Microarchitecture Analysis Metrics spreadsheet",
+ "MicroSeq": "Grouping from Top-down Microarchitecture Analysis Metrics spreadsheet",
+ "OS": "Grouping from Top-down Microarchitecture Analysis Metrics spreadsheet",
+ "Offcore": "Grouping from Top-down Microarchitecture Analysis Metrics spreadsheet",
+ "PGO": "Grouping from Top-down Microarchitecture Analysis Metrics spreadsheet",
+ "Pipeline": "Grouping from Top-down Microarchitecture Analysis Metrics spreadsheet",
+ "PortsUtil": "Grouping from Top-down Microarchitecture Analysis Metrics spreadsheet",
+ "Power": "Grouping from Top-down Microarchitecture Analysis Metrics spreadsheet",
+ "Prefetches": "Grouping from Top-down Microarchitecture Analysis Metrics spreadsheet",
+ "Ret": "Grouping from Top-down Microarchitecture Analysis Metrics spreadsheet",
+ "Retire": "Grouping from Top-down Microarchitecture Analysis Metrics spreadsheet",
+ "SMT": "Grouping from Top-down Microarchitecture Analysis Metrics spreadsheet",
+ "Server": "Grouping from Top-down Microarchitecture Analysis Metrics spreadsheet",
+ "Snoop": "Grouping from Top-down Microarchitecture Analysis Metrics spreadsheet",
+ "SoC": "Grouping from Top-down Microarchitecture Analysis Metrics spreadsheet",
+ "Summary": "Grouping from Top-down Microarchitecture Analysis Metrics spreadsheet",
+ "TmaL1": "Grouping from Top-down Microarchitecture Analysis Metrics spreadsheet",
+ "TmaL2": "Grouping from Top-down Microarchitecture Analysis Metrics spreadsheet",
+ "TmaL3mem": "Grouping from Top-down Microarchitecture Analysis Metrics spreadsheet",
+ "TopdownL1": "Metrics for top-down breakdown at level 1",
+ "TopdownL2": "Metrics for top-down breakdown at level 2",
+ "TopdownL3": "Metrics for top-down breakdown at level 3",
+ "TopdownL4": "Metrics for top-down breakdown at level 4",
+ "TopdownL5": "Metrics for top-down breakdown at level 5",
+ "TopdownL6": "Metrics for top-down breakdown at level 6",
+ "load_store_bound": "Grouping from Top-down Microarchitecture Analysis Metrics spreadsheet",
+ "tma_L1_group": "Metrics for top-down breakdown at level 1",
+ "tma_L2_group": "Metrics for top-down breakdown at level 2",
+ "tma_L3_group": "Metrics for top-down breakdown at level 3",
+ "tma_L4_group": "Metrics for top-down breakdown at level 4",
+ "tma_L5_group": "Metrics for top-down breakdown at level 5",
+ "tma_L6_group": "Metrics for top-down breakdown at level 6",
+ "tma_alu_op_utilization_group": "Metrics contributing to tma_alu_op_utilization category",
+ "tma_assists_group": "Metrics contributing to tma_assists category",
+ "tma_backend_bound_group": "Metrics contributing to tma_backend_bound category",
+ "tma_bad_speculation_group": "Metrics contributing to tma_bad_speculation category",
+ "tma_branch_mispredicts_group": "Metrics contributing to tma_branch_mispredicts category",
+ "tma_branch_resteers_group": "Metrics contributing to tma_branch_resteers category",
+ "tma_core_bound_group": "Metrics contributing to tma_core_bound category",
+ "tma_dram_bound_group": "Metrics contributing to tma_dram_bound category",
+ "tma_dtlb_load_group": "Metrics contributing to tma_dtlb_load category",
+ "tma_dtlb_store_group": "Metrics contributing to tma_dtlb_store category",
+ "tma_fetch_bandwidth_group": "Metrics contributing to tma_fetch_bandwidth category",
+ "tma_fetch_latency_group": "Metrics contributing to tma_fetch_latency category",
+ "tma_fp_arith_group": "Metrics contributing to tma_fp_arith category",
+ "tma_fp_vector_group": "Metrics contributing to tma_fp_vector category",
+ "tma_frontend_bound_group": "Metrics contributing to tma_frontend_bound category",
+ "tma_heavy_operations_group": "Metrics contributing to tma_heavy_operations category",
+ "tma_ifetch_bandwidth_group": "Metrics contributing to tma_ifetch_bandwidth category",
+ "tma_ifetch_latency_group": "Metrics contributing to tma_ifetch_latency category",
+ "tma_int_operations_group": "Metrics contributing to tma_int_operations category",
+ "tma_issue2P": "Metrics related by the issue $issue2P",
+ "tma_issueBM": "Metrics related by the issue $issueBM",
+ "tma_issueBW": "Metrics related by the issue $issueBW",
+ "tma_issueComp": "Metrics related by the issue $issueComp",
+ "tma_issueD0": "Metrics related by the issue $issueD0",
+ "tma_issueFB": "Metrics related by the issue $issueFB",
+ "tma_issueFL": "Metrics related by the issue $issueFL",
+ "tma_issueL1": "Metrics related by the issue $issueL1",
+ "tma_issueLat": "Metrics related by the issue $issueLat",
+ "tma_issueMC": "Metrics related by the issue $issueMC",
+ "tma_issueMS": "Metrics related by the issue $issueMS",
+ "tma_issueMV": "Metrics related by the issue $issueMV",
+ "tma_issueRFO": "Metrics related by the issue $issueRFO",
+ "tma_issueSL": "Metrics related by the issue $issueSL",
+ "tma_issueSO": "Metrics related by the issue $issueSO",
+ "tma_issueSmSt": "Metrics related by the issue $issueSmSt",
+ "tma_issueSpSt": "Metrics related by the issue $issueSpSt",
+ "tma_issueSyncxn": "Metrics related by the issue $issueSyncxn",
+ "tma_issueTLB": "Metrics related by the issue $issueTLB",
+ "tma_l1_bound_group": "Metrics contributing to tma_l1_bound category",
+ "tma_l3_bound_group": "Metrics contributing to tma_l3_bound category",
+ "tma_light_operations_group": "Metrics contributing to tma_light_operations category",
+ "tma_load_op_utilization_group": "Metrics contributing to tma_load_op_utilization category",
+ "tma_machine_clears_group": "Metrics contributing to tma_machine_clears category",
+ "tma_mem_latency_group": "Metrics contributing to tma_mem_latency category",
+ "tma_memory_bound_group": "Metrics contributing to tma_memory_bound category",
+ "tma_microcode_sequencer_group": "Metrics contributing to tma_microcode_sequencer category",
+ "tma_mite_group": "Metrics contributing to tma_mite category",
+ "tma_other_light_ops_group": "Metrics contributing to tma_other_light_ops category",
+ "tma_ports_utilization_group": "Metrics contributing to tma_ports_utilization category",
+ "tma_ports_utilized_0_group": "Metrics contributing to tma_ports_utilized_0 category",
+ "tma_ports_utilized_3m_group": "Metrics contributing to tma_ports_utilized_3m category",
+ "tma_resource_bound_group": "Metrics contributing to tma_resource_bound category",
+ "tma_retiring_group": "Metrics contributing to tma_retiring category",
+ "tma_serializing_operation_group": "Metrics contributing to tma_serializing_operation category",
+ "tma_store_bound_group": "Metrics contributing to tma_store_bound category",
+ "tma_store_op_utilization_group": "Metrics contributing to tma_store_op_utilization category"
+}
diff --git a/tools/perf/pmu-events/arch/x86/meteorlake/mtl-metrics.json b/tools/perf/pmu-events/arch/x86/meteorlake/mtl-metrics.json
new file mode 100644
index 000000000000..1a7392f0da86
--- /dev/null
+++ b/tools/perf/pmu-events/arch/x86/meteorlake/mtl-metrics.json
@@ -0,0 +1,2535 @@
+[
+ {
+ "BriefDescription": "C10 residency percent per package",
+ "MetricExpr": "cstate_pkg@c10\\-residency@ / TSC",
+ "MetricGroup": "Power",
+ "MetricName": "C10_Pkg_Residency",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "C1 residency percent per core",
+ "MetricExpr": "cstate_core@c1\\-residency@ / TSC",
+ "MetricGroup": "Power",
+ "MetricName": "C1_Core_Residency",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "C2 residency percent per package",
+ "MetricExpr": "cstate_pkg@c2\\-residency@ / TSC",
+ "MetricGroup": "Power",
+ "MetricName": "C2_Pkg_Residency",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "C3 residency percent per package",
+ "MetricExpr": "cstate_pkg@c3\\-residency@ / TSC",
+ "MetricGroup": "Power",
+ "MetricName": "C3_Pkg_Residency",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "C6 residency percent per core",
+ "MetricExpr": "cstate_core@c6\\-residency@ / TSC",
+ "MetricGroup": "Power",
+ "MetricName": "C6_Core_Residency",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "C6 residency percent per package",
+ "MetricExpr": "cstate_pkg@c6\\-residency@ / TSC",
+ "MetricGroup": "Power",
+ "MetricName": "C6_Pkg_Residency",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "C7 residency percent per core",
+ "MetricExpr": "cstate_core@c7\\-residency@ / TSC",
+ "MetricGroup": "Power",
+ "MetricName": "C7_Core_Residency",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "C7 residency percent per package",
+ "MetricExpr": "cstate_pkg@c7\\-residency@ / TSC",
+ "MetricGroup": "Power",
+ "MetricName": "C7_Pkg_Residency",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "C8 residency percent per package",
+ "MetricExpr": "cstate_pkg@c8\\-residency@ / TSC",
+ "MetricGroup": "Power",
+ "MetricName": "C8_Pkg_Residency",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "C9 residency percent per package",
+ "MetricExpr": "cstate_pkg@c9\\-residency@ / TSC",
+ "MetricGroup": "Power",
+ "MetricName": "C9_Pkg_Residency",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "Percentage of cycles spent in System Management Interrupts.",
+ "MetricExpr": "((msr@aperf@ - cycles) / msr@aperf@ if msr@smi@ > 0 else 0)",
+ "MetricGroup": "smi",
+ "MetricName": "smi_cycles",
+ "MetricThreshold": "smi_cycles > 0.1",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "Number of SMI interrupts.",
+ "MetricExpr": "msr@smi@",
+ "MetricGroup": "smi",
+ "MetricName": "smi_num",
+ "ScaleUnit": "1SMI#"
+ },
+ {
+ "BriefDescription": "Counts the number of issue slots that were not consumed by the backend due to certain allocation restrictions",
+ "MetricExpr": "tma_core_bound",
+ "MetricGroup": "TopdownL3;tma_L3_group;tma_core_bound_group",
+ "MetricName": "tma_allocation_restriction",
+ "MetricThreshold": "tma_allocation_restriction > 0.1 & (tma_core_bound > 0.1 & tma_backend_bound > 0.1)",
+ "ScaleUnit": "100%",
+ "Unit": "cpu_atom"
+ },
+ {
+ "BriefDescription": "Counts the total number of issue slots that were not consumed by the backend due to backend stalls",
+ "MetricExpr": "cpu_atom@TOPDOWN_BE_BOUND.ALL_P@ / (6 * cpu_atom@CPU_CLK_UNHALTED.CORE@)",
+ "MetricGroup": "TopdownL1;tma_L1_group",
+ "MetricName": "tma_backend_bound",
+ "MetricThreshold": "tma_backend_bound > 0.1",
+ "MetricgroupNoGroup": "TopdownL1",
+ "PublicDescription": "Counts the total number of issue slots that were not consumed by the backend due to backend stalls. Note that uops must be available for consumption in order for this event to count. If a uop is not available (IQ is empty), this event will not count",
+ "ScaleUnit": "100%",
+ "Unit": "cpu_atom"
+ },
+ {
+ "BriefDescription": "Counts the total number of issue slots that were not consumed by the backend because allocation is stalled due to a mispredicted jump or a machine clear",
+ "MetricExpr": "cpu_atom@TOPDOWN_BAD_SPECULATION.ALL_P@ / (6 * cpu_atom@CPU_CLK_UNHALTED.CORE@)",
+ "MetricGroup": "TopdownL1;tma_L1_group",
+ "MetricName": "tma_bad_speculation",
+ "MetricThreshold": "tma_bad_speculation > 0.15",
+ "MetricgroupNoGroup": "TopdownL1",
+ "PublicDescription": "Counts the total number of issue slots that were not consumed by the backend because allocation is stalled due to a mispredicted jump or a machine clear. Only issue slots wasted due to fast nukes such as memory ordering nukes are counted. Other nukes are not accounted for. Counts all issue slots blocked during this recovery window including relevant microcode flows and while uops are not yet available in the instruction queue (IQ). Also includes the issue slots that were consumed by the backend but were thrown away because they were younger than the mispredict or machine clear.",
+ "ScaleUnit": "100%",
+ "Unit": "cpu_atom"
+ },
+ {
+ "BriefDescription": "Counts the number of issue slots that were not delivered by the frontend due to BACLEARS, which occurs when the Branch Target Buffer (BTB) prediction or lack thereof, was corrected by a later branch predictor in the frontend",
+ "MetricExpr": "cpu_atom@TOPDOWN_FE_BOUND.BRANCH_DETECT@ / (6 * cpu_atom@CPU_CLK_UNHALTED.CORE@)",
+ "MetricGroup": "TopdownL3;tma_L3_group;tma_ifetch_latency_group",
+ "MetricName": "tma_branch_detect",
+ "MetricThreshold": "tma_branch_detect > 0.05 & (tma_ifetch_latency > 0.15 & tma_frontend_bound > 0.2)",
+ "PublicDescription": "Counts the number of issue slots that were not delivered by the frontend due to BACLEARS, which occurs when the Branch Target Buffer (BTB) prediction or lack thereof, was corrected by a later branch predictor in the frontend. Includes BACLEARS due to all branch types including conditional and unconditional jumps, returns, and indirect branches.",
+ "ScaleUnit": "100%",
+ "Unit": "cpu_atom"
+ },
+ {
+ "BriefDescription": "Counts the number of issue slots that were not consumed by the backend due to branch mispredicts",
+ "MetricExpr": "cpu_atom@TOPDOWN_BAD_SPECULATION.MISPREDICT@ / (6 * cpu_atom@CPU_CLK_UNHALTED.CORE@)",
+ "MetricGroup": "TopdownL2;tma_L2_group;tma_bad_speculation_group",
+ "MetricName": "tma_branch_mispredicts",
+ "MetricThreshold": "tma_branch_mispredicts > 0.05 & tma_bad_speculation > 0.15",
+ "MetricgroupNoGroup": "TopdownL2",
+ "ScaleUnit": "100%",
+ "Unit": "cpu_atom"
+ },
+ {
+ "BriefDescription": "Counts the number of issue slots that were not delivered by the frontend due to BTCLEARS, which occurs when the Branch Target Buffer (BTB) predicts a taken branch.",
+ "MetricExpr": "cpu_atom@TOPDOWN_FE_BOUND.BRANCH_RESTEER@ / (6 * cpu_atom@CPU_CLK_UNHALTED.CORE@)",
+ "MetricGroup": "TopdownL3;tma_L3_group;tma_ifetch_latency_group",
+ "MetricName": "tma_branch_resteer",
+ "MetricThreshold": "tma_branch_resteer > 0.05 & (tma_ifetch_latency > 0.15 & tma_frontend_bound > 0.2)",
+ "ScaleUnit": "100%",
+ "Unit": "cpu_atom"
+ },
+ {
+ "BriefDescription": "Counts the number of issue slots that were not delivered by the frontend due to the microcode sequencer (MS).",
+ "MetricExpr": "cpu_atom@TOPDOWN_FE_BOUND.CISC@ / (6 * cpu_atom@CPU_CLK_UNHALTED.CORE@)",
+ "MetricGroup": "TopdownL3;tma_L3_group;tma_ifetch_bandwidth_group",
+ "MetricName": "tma_cisc",
+ "MetricThreshold": "tma_cisc > 0.05 & (tma_ifetch_bandwidth > 0.1 & tma_frontend_bound > 0.2)",
+ "ScaleUnit": "100%",
+ "Unit": "cpu_atom"
+ },
+ {
+ "BriefDescription": "Counts the number of cycles due to backend bound stalls that are bounded by core restrictions and not attributed to an outstanding load or stores, or resource limitation",
+ "MetricExpr": "cpu_atom@TOPDOWN_BE_BOUND.ALLOC_RESTRICTIONS@ / (6 * cpu_atom@CPU_CLK_UNHALTED.CORE@)",
+ "MetricGroup": "TopdownL2;tma_L2_group;tma_backend_bound_group",
+ "MetricName": "tma_core_bound",
+ "MetricThreshold": "tma_core_bound > 0.1 & tma_backend_bound > 0.1",
+ "MetricgroupNoGroup": "TopdownL2",
+ "ScaleUnit": "100%",
+ "Unit": "cpu_atom"
+ },
+ {
+ "BriefDescription": "Counts the number of issue slots that were not delivered by the frontend due to decode stalls.",
+ "MetricExpr": "cpu_atom@TOPDOWN_FE_BOUND.DECODE@ / (6 * cpu_atom@CPU_CLK_UNHALTED.CORE@)",
+ "MetricGroup": "TopdownL3;tma_L3_group;tma_ifetch_bandwidth_group",
+ "MetricName": "tma_decode",
+ "MetricThreshold": "tma_decode > 0.05 & (tma_ifetch_bandwidth > 0.1 & tma_frontend_bound > 0.2)",
+ "ScaleUnit": "100%",
+ "Unit": "cpu_atom"
+ },
+ {
+ "BriefDescription": "Counts the number of issue slots that were not consumed by the backend due to a machine clear that does not require the use of microcode, classified as a fast nuke, due to memory ordering, memory disambiguation and memory renaming",
+ "MetricExpr": "cpu_atom@TOPDOWN_BAD_SPECULATION.FASTNUKE@ / (6 * cpu_atom@CPU_CLK_UNHALTED.CORE@)",
+ "MetricGroup": "TopdownL3;tma_L3_group;tma_machine_clears_group",
+ "MetricName": "tma_fast_nuke",
+ "MetricThreshold": "tma_fast_nuke > 0.05 & (tma_machine_clears > 0.05 & tma_bad_speculation > 0.15)",
+ "ScaleUnit": "100%",
+ "Unit": "cpu_atom"
+ },
+ {
+ "BriefDescription": "Counts the number of issue slots that were not consumed by the backend due to frontend stalls.",
+ "MetricExpr": "cpu_atom@TOPDOWN_FE_BOUND.ALL_P@ / (6 * cpu_atom@CPU_CLK_UNHALTED.CORE@)",
+ "MetricGroup": "TopdownL1;tma_L1_group",
+ "MetricName": "tma_frontend_bound",
+ "MetricThreshold": "tma_frontend_bound > 0.2",
+ "MetricgroupNoGroup": "TopdownL1",
+ "ScaleUnit": "100%",
+ "Unit": "cpu_atom"
+ },
+ {
+ "BriefDescription": "Counts the number of issue slots that were not delivered by the frontend due to instruction cache misses.",
+ "MetricExpr": "cpu_atom@TOPDOWN_FE_BOUND.ICACHE@ / (6 * cpu_atom@CPU_CLK_UNHALTED.CORE@)",
+ "MetricGroup": "TopdownL3;tma_L3_group;tma_ifetch_latency_group",
+ "MetricName": "tma_icache_misses",
+ "MetricThreshold": "tma_icache_misses > 0.05 & (tma_ifetch_latency > 0.15 & tma_frontend_bound > 0.2)",
+ "ScaleUnit": "100%",
+ "Unit": "cpu_atom"
+ },
+ {
+ "BriefDescription": "Counts the number of issue slots that were not delivered by the frontend due to frontend bandwidth restrictions due to decode, predecode, cisc, and other limitations.",
+ "MetricExpr": "cpu_atom@TOPDOWN_FE_BOUND.FRONTEND_BANDWIDTH@ / (6 * cpu_atom@CPU_CLK_UNHALTED.CORE@)",
+ "MetricGroup": "TopdownL2;tma_L2_group;tma_frontend_bound_group",
+ "MetricName": "tma_ifetch_bandwidth",
+ "MetricThreshold": "tma_ifetch_bandwidth > 0.1 & tma_frontend_bound > 0.2",
+ "MetricgroupNoGroup": "TopdownL2",
+ "ScaleUnit": "100%",
+ "Unit": "cpu_atom"
+ },
+ {
+ "BriefDescription": "Counts the number of issue slots that were not delivered by the frontend due to frontend latency restrictions due to icache misses, itlb misses, branch detection, and resteer limitations.",
+ "MetricExpr": "cpu_atom@TOPDOWN_FE_BOUND.FRONTEND_LATENCY@ / (6 * cpu_atom@CPU_CLK_UNHALTED.CORE@)",
+ "MetricGroup": "TopdownL2;tma_L2_group;tma_frontend_bound_group",
+ "MetricName": "tma_ifetch_latency",
+ "MetricThreshold": "tma_ifetch_latency > 0.15 & tma_frontend_bound > 0.2",
+ "MetricgroupNoGroup": "TopdownL2",
+ "ScaleUnit": "100%",
+ "Unit": "cpu_atom"
+ },
+ {
+ "BriefDescription": "Instructions per Floating Point (FP) Operation",
+ "MetricExpr": "cpu_atom@INST_RETIRED.ANY@ / cpu_atom@FP_FLOPS_RETIRED.ALL@",
+ "MetricGroup": "Flops",
+ "MetricName": "tma_info_arith_inst_mix_ipflop",
+ "Unit": "cpu_atom"
+ },
+ {
+ "BriefDescription": "Instructions per FP Arithmetic AVX/SSE 128-bit instruction",
+ "MetricExpr": "cpu_atom@INST_RETIRED.ANY@ / (cpu_atom@FP_INST_RETIRED.128B_DP@ + cpu_atom@FP_INST_RETIRED.128B_SP@)",
+ "MetricGroup": "Flops",
+ "MetricName": "tma_info_arith_inst_mix_ipfparith_avx128",
+ "Unit": "cpu_atom"
+ },
+ {
+ "BriefDescription": "Instructions per FP Arithmetic Scalar Double-Precision instruction",
+ "MetricExpr": "cpu_atom@INST_RETIRED.ANY@ / cpu_atom@FP_INST_RETIRED.64B_DP@",
+ "MetricGroup": "Flops",
+ "MetricName": "tma_info_arith_inst_mix_ipfparith_scalar_dp",
+ "Unit": "cpu_atom"
+ },
+ {
+ "BriefDescription": "Instructions per FP Arithmetic Scalar Single-Precision instruction",
+ "MetricExpr": "cpu_atom@INST_RETIRED.ANY@ / cpu_atom@FP_INST_RETIRED.32B_SP@",
+ "MetricGroup": "Flops",
+ "MetricName": "tma_info_arith_inst_mix_ipfparith_scalar_sp",
+ "Unit": "cpu_atom"
+ },
+ {
+ "BriefDescription": "Percentage of time that retirement is stalled due to a first level data TLB miss",
+ "MetricExpr": "100 * (cpu_atom@LD_HEAD.DTLB_MISS_AT_RET@ + cpu_atom@LD_HEAD.PGWALK_AT_RET@) / cpu_atom@CPU_CLK_UNHALTED.CORE@",
+ "MetricName": "tma_info_bottleneck_%_dtlb_miss_bound_cycles",
+ "Unit": "cpu_atom"
+ },
+ {
+ "BriefDescription": "Percentage of time that allocation and retirement is stalled by the Frontend Cluster due to an Ifetch Miss, either Icache or ITLB Miss",
+ "MetricExpr": "100 * cpu_atom@MEM_BOUND_STALLS_IFETCH.ALL@ / cpu_atom@CPU_CLK_UNHALTED.CORE@",
+ "MetricGroup": "Ifetch",
+ "MetricName": "tma_info_bottleneck_%_ifetch_miss_bound_cycles",
+ "PublicDescription": "Percentage of time that allocation and retirement is stalled by the Frontend Cluster due to an Ifetch Miss, either Icache or ITLB Miss. See Info.Ifetch_Bound",
+ "Unit": "cpu_atom"
+ },
+ {
+ "BriefDescription": "Percentage of time that retirement is stalled due to an L1 miss",
+ "MetricExpr": "100 * cpu_atom@MEM_BOUND_STALLS_LOAD.ALL@ / cpu_atom@CPU_CLK_UNHALTED.CORE@",
+ "MetricGroup": "Load_Store_Miss",
+ "MetricName": "tma_info_bottleneck_%_load_miss_bound_cycles",
+ "PublicDescription": "Percentage of time that retirement is stalled due to an L1 miss. See Info.Load_Miss_Bound",
+ "Unit": "cpu_atom"
+ },
+ {
+ "BriefDescription": "Percentage of time that retirement is stalled by the Memory Cluster due to a pipeline stall",
+ "MetricExpr": "100 * cpu_atom@LD_HEAD.ANY_AT_RET@ / cpu_atom@CPU_CLK_UNHALTED.CORE@",
+ "MetricGroup": "Mem_Exec",
+ "MetricName": "tma_info_bottleneck_%_mem_exec_bound_cycles",
+ "PublicDescription": "Percentage of time that retirement is stalled by the Memory Cluster due to a pipeline stall. See Info.Mem_Exec_Bound",
+ "Unit": "cpu_atom"
+ },
+ {
+ "BriefDescription": "Instructions per Branch (lower number means higher occurrence rate)",
+ "MetricExpr": "cpu_atom@INST_RETIRED.ANY@ / cpu_atom@BR_INST_RETIRED.ALL_BRANCHES@",
+ "MetricName": "tma_info_br_inst_mix_ipbranch",
+ "Unit": "cpu_atom"
+ },
+ {
+ "BriefDescription": "Instruction per (near) call (lower number means higher occurrence rate)",
+ "MetricExpr": "cpu_atom@INST_RETIRED.ANY@ / cpu_atom@BR_INST_RETIRED.NEAR_CALL@",
+ "MetricName": "tma_info_br_inst_mix_ipcall",
+ "Unit": "cpu_atom"
+ },
+ {
+ "BriefDescription": "Instructions per Far Branch ( Far Branches apply upon transition from application to operating system, handling interrupts, exceptions) [lower number means higher occurrence rate]",
+ "MetricExpr": "cpu_atom@INST_RETIRED.ANY@ / cpu_atom@BR_INST_RETIRED.FAR_BRANCH@u",
+ "MetricName": "tma_info_br_inst_mix_ipfarbranch",
+ "Unit": "cpu_atom"
+ },
+ {
+ "BriefDescription": "Instructions per retired conditional Branch Misprediction where the branch was not taken",
+ "MetricExpr": "cpu_atom@INST_RETIRED.ANY@ / (cpu_atom@BR_MISP_RETIRED.COND@ - cpu_atom@BR_MISP_RETIRED.COND_TAKEN@)",
+ "MetricName": "tma_info_br_inst_mix_ipmisp_cond_ntaken",
+ "Unit": "cpu_atom"
+ },
+ {
+ "BriefDescription": "Instructions per retired conditional Branch Misprediction where the branch was taken",
+ "MetricExpr": "cpu_atom@INST_RETIRED.ANY@ / cpu_atom@BR_MISP_RETIRED.COND_TAKEN@",
+ "MetricName": "tma_info_br_inst_mix_ipmisp_cond_taken",
+ "Unit": "cpu_atom"
+ },
+ {
+ "BriefDescription": "Instructions per retired indirect call or jump Branch Misprediction",
+ "MetricExpr": "cpu_atom@INST_RETIRED.ANY@ / cpu_atom@BR_MISP_RETIRED.INDIRECT@",
+ "MetricName": "tma_info_br_inst_mix_ipmisp_indirect",
+ "Unit": "cpu_atom"
+ },
+ {
+ "BriefDescription": "Instructions per retired return Branch Misprediction",
+ "MetricExpr": "cpu_atom@INST_RETIRED.ANY@ / cpu_atom@BR_MISP_RETIRED.RETURN@",
+ "MetricName": "tma_info_br_inst_mix_ipmisp_ret",
+ "Unit": "cpu_atom"
+ },
+ {
+ "BriefDescription": "Instructions per retired Branch Misprediction",
+ "MetricExpr": "cpu_atom@INST_RETIRED.ANY@ / cpu_atom@BR_MISP_RETIRED.ALL_BRANCHES@",
+ "MetricName": "tma_info_br_inst_mix_ipmispredict",
+ "Unit": "cpu_atom"
+ },
+ {
+ "BriefDescription": "Ratio of all branches which mispredict",
+ "MetricExpr": "cpu_atom@BR_MISP_RETIRED.ALL_BRANCHES@ / cpu_atom@BR_INST_RETIRED.ALL_BRANCHES@",
+ "MetricName": "tma_info_br_mispredict_bound_branch_mispredict_ratio",
+ "Unit": "cpu_atom"
+ },
+ {
+ "BriefDescription": "Ratio between Mispredicted branches and unknown branches",
+ "MetricExpr": "cpu_atom@BR_MISP_RETIRED.ALL_BRANCHES@ / cpu_atom@BACLEARS.ANY@",
+ "MetricName": "tma_info_br_mispredict_bound_branch_mispredict_to_unknown_branch_ratio",
+ "Unit": "cpu_atom"
+ },
+ {
+ "BriefDescription": "Percentage of time that allocation is stalled due to load buffer full",
+ "MetricExpr": "100 * cpu_atom@MEM_SCHEDULER_BLOCK.LD_BUF@ / cpu_atom@CPU_CLK_UNHALTED.CORE@",
+ "MetricName": "tma_info_buffer_stalls_%_load_buffer_stall_cycles",
+ "Unit": "cpu_atom"
+ },
+ {
+ "BriefDescription": "Percentage of time that allocation is stalled due to memory reservation stations full",
+ "MetricExpr": "100 * cpu_atom@MEM_SCHEDULER_BLOCK.RSV@ / cpu_atom@CPU_CLK_UNHALTED.CORE@",
+ "MetricName": "tma_info_buffer_stalls_%_mem_rsv_stall_cycles",
+ "Unit": "cpu_atom"
+ },
+ {
+ "BriefDescription": "Percentage of time that allocation is stalled due to store buffer full",
+ "MetricExpr": "100 * cpu_atom@MEM_SCHEDULER_BLOCK.ST_BUF@ / cpu_atom@CPU_CLK_UNHALTED.CORE@",
+ "MetricName": "tma_info_buffer_stalls_%_store_buffer_stall_cycles",
+ "Unit": "cpu_atom"
+ },
+ {
+ "BriefDescription": "Cycles Per Instruction",
+ "MetricExpr": "cpu_atom@CPU_CLK_UNHALTED.CORE@ / cpu_atom@INST_RETIRED.ANY@",
+ "MetricName": "tma_info_core_cpi",
+ "Unit": "cpu_atom"
+ },
+ {
+ "BriefDescription": "Floating Point Operations Per Cycle",
+ "MetricExpr": "cpu_atom@FP_FLOPS_RETIRED.ALL@ / cpu_atom@CPU_CLK_UNHALTED.CORE@",
+ "MetricGroup": "Flops",
+ "MetricName": "tma_info_core_flopc",
+ "Unit": "cpu_atom"
+ },
+ {
+ "BriefDescription": "Instructions Per Cycle",
+ "MetricExpr": "cpu_atom@INST_RETIRED.ANY@ / cpu_atom@CPU_CLK_UNHALTED.CORE@",
+ "MetricName": "tma_info_core_ipc",
+ "Unit": "cpu_atom"
+ },
+ {
+ "BriefDescription": "Uops Per Instruction",
+ "MetricExpr": "cpu_atom@TOPDOWN_RETIRING.ALL_P@ / cpu_atom@INST_RETIRED.ANY@",
+ "MetricName": "tma_info_core_upi",
+ "Unit": "cpu_atom"
+ },
+ {
+ "BriefDescription": "Percentage of ifetch miss bound stalls, where the ifetch miss hits in the L2",
+ "MetricExpr": "100 * cpu_atom@MEM_BOUND_STALLS_IFETCH.L2_HIT@ / cpu_atom@MEM_BOUND_STALLS_IFETCH.ALL@",
+ "MetricName": "tma_info_ifetch_miss_bound_%_ifetchmissbound_with_l2hit",
+ "Unit": "cpu_atom"
+ },
+ {
+ "BriefDescription": "Percentage of ifetch miss bound stalls, where the ifetch miss hits in the L3",
+ "MetricExpr": "100 * cpu_atom@MEM_BOUND_STALLS_IFETCH.LLC_HIT@ / cpu_atom@MEM_BOUND_STALLS_IFETCH.ALL@",
+ "MetricName": "tma_info_ifetch_miss_bound_%_ifetchmissbound_with_l3hit",
+ "Unit": "cpu_atom"
+ },
+ {
+ "BriefDescription": "Percentage of ifetch miss bound stalls, where the ifetch miss subsequently misses in the L3",
+ "MetricExpr": "100 * cpu_atom@MEM_BOUND_STALLS_IFETCH.LLC_MISS@ / cpu_atom@MEM_BOUND_STALLS_IFETCH.ALL@",
+ "MetricName": "tma_info_ifetch_miss_bound_%_ifetchmissbound_with_l3miss",
+ "Unit": "cpu_atom"
+ },
+ {
+ "BriefDescription": "Percentage of memory bound stalls where retirement is stalled due to an L1 miss that hit the L2",
+ "MetricExpr": "100 * cpu_atom@MEM_BOUND_STALLS_LOAD.L2_HIT@ / cpu_atom@MEM_BOUND_STALLS_LOAD.ALL@",
+ "MetricGroup": "load_store_bound",
+ "MetricName": "tma_info_load_miss_bound_%_loadmissbound_with_l2hit",
+ "Unit": "cpu_atom"
+ },
+ {
+ "BriefDescription": "Percentage of memory bound stalls where retirement is stalled due to an L1 miss that hit the L3",
+ "MetricExpr": "100 * cpu_atom@MEM_BOUND_STALLS_LOAD.LLC_HIT@ / cpu_atom@MEM_BOUND_STALLS_LOAD.ALL@",
+ "MetricGroup": "load_store_bound",
+ "MetricName": "tma_info_load_miss_bound_%_loadmissbound_with_l3hit",
+ "Unit": "cpu_atom"
+ },
+ {
+ "BriefDescription": "Percentage of memory bound stalls where retirement is stalled due to an L1 miss that subsequently misses the L3",
+ "MetricExpr": "100 * cpu_atom@MEM_BOUND_STALLS_LOAD.LLC_MISS@ / cpu_atom@MEM_BOUND_STALLS_LOAD.ALL@",
+ "MetricGroup": "load_store_bound",
+ "MetricName": "tma_info_load_miss_bound_%_loadmissbound_with_l3miss",
+ "Unit": "cpu_atom"
+ },
+ {
+ "BriefDescription": "Counts the number of cycles that the oldest load of the load buffer is stalled at retirement due to a pipeline block",
+ "MetricExpr": "100 * cpu_atom@LD_HEAD.L1_BOUND_AT_RET@ / cpu_atom@CPU_CLK_UNHALTED.CORE@",
+ "MetricGroup": "load_store_bound",
+ "MetricName": "tma_info_load_store_bound_l1_bound",
+ "Unit": "cpu_atom"
+ },
+ {
+ "BriefDescription": "Counts the number of cycles that the oldest load of the load buffer is stalled at retirement",
+ "MetricExpr": "100 * (cpu_atom@LD_HEAD.L1_BOUND_AT_RET@ + cpu_atom@MEM_BOUND_STALLS_LOAD.ALL@) / cpu_atom@CPU_CLK_UNHALTED.CORE@",
+ "MetricGroup": "load_store_bound",
+ "MetricName": "tma_info_load_store_bound_load_bound",
+ "Unit": "cpu_atom"
+ },
+ {
+ "BriefDescription": "Counts the number of cycles the core is stalled due to store buffer full",
+ "MetricExpr": "100 * (cpu_atom@MEM_SCHEDULER_BLOCK.ST_BUF@ / cpu_atom@MEM_SCHEDULER_BLOCK.ALL@) * tma_mem_scheduler",
+ "MetricGroup": "load_store_bound",
+ "MetricName": "tma_info_load_store_bound_store_bound",
+ "Unit": "cpu_atom"
+ },
+ {
+ "BriefDescription": "Counts the number of machine clears relative to thousands of instructions retired, due to floating point assists",
+ "MetricExpr": "1e3 * cpu_atom@MACHINE_CLEARS.FP_ASSIST@ / cpu_atom@INST_RETIRED.ANY@",
+ "MetricName": "tma_info_machine_clear_bound_machine_clears_fp_assist_pki",
+ "Unit": "cpu_atom"
+ },
+ {
+ "BriefDescription": "Counts the number of machine clears relative to thousands of instructions retired, due to page faults",
+ "MetricExpr": "1e3 * cpu_atom@MACHINE_CLEARS.PAGE_FAULT@ / cpu_atom@INST_RETIRED.ANY@",
+ "MetricName": "tma_info_machine_clear_bound_machine_clears_page_fault_pki",
+ "Unit": "cpu_atom"
+ },
+ {
+ "BriefDescription": "Counts the number of machine clears relative to thousands of instructions retired, due to self-modifying code",
+ "MetricExpr": "1e3 * cpu_atom@MACHINE_CLEARS.SMC@ / cpu_atom@INST_RETIRED.ANY@",
+ "MetricName": "tma_info_machine_clear_bound_machine_clears_smc_pki",
+ "Unit": "cpu_atom"
+ },
+ {
+ "BriefDescription": "Percentage of total non-speculative loads with an address aliasing block",
+ "MetricExpr": "100 * cpu_atom@LD_BLOCKS.ADDRESS_ALIAS@ / cpu_atom@MEM_UOPS_RETIRED.ALL_LOADS@",
+ "MetricName": "tma_info_mem_exec_blocks_%_loads_with_adressaliasing",
+ "Unit": "cpu_atom"
+ },
+ {
+ "BriefDescription": "Percentage of total non-speculative loads with a store forward or unknown store address block",
+ "MetricExpr": "100 * cpu_atom@LD_BLOCKS.DATA_UNKNOWN@ / cpu_atom@MEM_UOPS_RETIRED.ALL_LOADS@",
+ "MetricName": "tma_info_mem_exec_blocks_%_loads_with_storefwdblk",
+ "Unit": "cpu_atom"
+ },
+ {
+ "BriefDescription": "Percentage of Memory Execution Bound due to a first level data cache miss",
+ "MetricExpr": "100 * cpu_atom@LD_HEAD.L1_MISS_AT_RET@ / cpu_atom@LD_HEAD.ANY_AT_RET@",
+ "MetricName": "tma_info_mem_exec_bound_%_loadhead_with_l1miss",
+ "Unit": "cpu_atom"
+ },
+ {
+ "BriefDescription": "Percentage of Memory Execution Bound due to other block cases, such as pipeline conflicts, fences, etc",
+ "MetricExpr": "100 * cpu_atom@LD_HEAD.OTHER_AT_RET@ / cpu_atom@LD_HEAD.ANY_AT_RET@",
+ "MetricName": "tma_info_mem_exec_bound_%_loadhead_with_otherpipelineblks",
+ "Unit": "cpu_atom"
+ },
+ {
+ "BriefDescription": "Percentage of Memory Execution Bound due to a pagewalk",
+ "MetricExpr": "100 * cpu_atom@LD_HEAD.PGWALK_AT_RET@ / cpu_atom@LD_HEAD.ANY_AT_RET@",
+ "MetricName": "tma_info_mem_exec_bound_%_loadhead_with_pagewalk",
+ "Unit": "cpu_atom"
+ },
+ {
+ "BriefDescription": "Percentage of Memory Execution Bound due to a second level TLB miss",
+ "MetricExpr": "100 * cpu_atom@LD_HEAD.DTLB_MISS_AT_RET@ / cpu_atom@LD_HEAD.ANY_AT_RET@",
+ "MetricName": "tma_info_mem_exec_bound_%_loadhead_with_stlbhit",
+ "Unit": "cpu_atom"
+ },
+ {
+ "BriefDescription": "Percentage of Memory Execution Bound due to a store forward address match",
+ "MetricExpr": "100 * cpu_atom@LD_HEAD.ST_ADDR_AT_RET@ / cpu_atom@LD_HEAD.ANY_AT_RET@",
+ "MetricName": "tma_info_mem_exec_bound_%_loadhead_with_storefwding",
+ "Unit": "cpu_atom"
+ },
+ {
+ "BriefDescription": "Instructions per Load",
+ "MetricExpr": "cpu_atom@INST_RETIRED.ANY@ / cpu_atom@MEM_UOPS_RETIRED.ALL_LOADS@",
+ "MetricName": "tma_info_mem_mix_ipload",
+ "Unit": "cpu_atom"
+ },
+ {
+ "BriefDescription": "Instructions per Store",
+ "MetricExpr": "cpu_atom@INST_RETIRED.ANY@ / cpu_atom@MEM_UOPS_RETIRED.ALL_STORES@",
+ "MetricName": "tma_info_mem_mix_ipstore",
+ "Unit": "cpu_atom"
+ },
+ {
+ "BriefDescription": "Percentage of total non-speculative loads that perform one or more locks",
+ "MetricExpr": "100 * cpu_atom@MEM_UOPS_RETIRED.LOCK_LOADS@ / cpu_atom@MEM_UOPS_RETIRED.ALL_LOADS@",
+ "MetricName": "tma_info_mem_mix_load_locks_ratio",
+ "Unit": "cpu_atom"
+ },
+ {
+ "BriefDescription": "Percentage of total non-speculative loads that are splits",
+ "MetricExpr": "100 * cpu_atom@MEM_UOPS_RETIRED.SPLIT_LOADS@ / cpu_atom@MEM_UOPS_RETIRED.ALL_LOADS@",
+ "MetricName": "tma_info_mem_mix_load_splits_ratio",
+ "Unit": "cpu_atom"
+ },
+ {
+ "BriefDescription": "Ratio of mem load uops to all uops",
+ "MetricExpr": "1e3 * cpu_atom@MEM_UOPS_RETIRED.ALL_LOADS@ / cpu_atom@TOPDOWN_RETIRING.ALL_P@",
+ "MetricName": "tma_info_mem_mix_memload_ratio",
+ "Unit": "cpu_atom"
+ },
+ {
+ "BriefDescription": "Percentage of time that the core is stalled due to a TPAUSE or UMWAIT instruction",
+ "MetricExpr": "100 * cpu_atom@SERIALIZATION.C01_MS_SCB@ / (6 * cpu_atom@CPU_CLK_UNHALTED.CORE@)",
+ "MetricName": "tma_info_serialization _%_tpause_cycles",
+ "Unit": "cpu_atom"
+ },
+ {
+ "BriefDescription": "Average CPU Utilization",
+ "MetricExpr": "cpu_atom@CPU_CLK_UNHALTED.REF_TSC@ / TSC",
+ "MetricName": "tma_info_system_cpu_utilization",
+ "Unit": "cpu_atom"
+ },
+ {
+ "BriefDescription": "Giga Floating Point Operations Per Second",
+ "MetricExpr": "cpu_atom@FP_FLOPS_RETIRED.ALL@ / (duration_time * 1e9)",
+ "MetricGroup": "Flops",
+ "MetricName": "tma_info_system_gflops",
+ "PublicDescription": "Giga Floating Point Operations Per Second. Aggregate across all supported options of: FP precisions, scalar and vector instructions, vector-width",
+ "Unit": "cpu_atom"
+ },
+ {
+ "BriefDescription": "Fraction of cycles spent in Kernel mode",
+ "MetricExpr": "cpu_atom@CPU_CLK_UNHALTED.CORE_P@k / cpu_atom@CPU_CLK_UNHALTED.CORE@",
+ "MetricGroup": "Summary",
+ "MetricName": "tma_info_system_kernel_utilization",
+ "Unit": "cpu_atom"
+ },
+ {
+ "BriefDescription": "Average Frequency Utilization relative nominal frequency",
+ "MetricExpr": "cpu_atom@CPU_CLK_UNHALTED.CORE@ / cpu_atom@CPU_CLK_UNHALTED.REF_TSC@",
+ "MetricGroup": "Power",
+ "MetricName": "tma_info_system_turbo_utilization",
+ "Unit": "cpu_atom"
+ },
+ {
+ "BriefDescription": "Percentage of all uops which are FPDiv uops",
+ "MetricExpr": "100 * cpu_atom@UOPS_RETIRED.FPDIV@ / cpu_atom@TOPDOWN_RETIRING.ALL_P@",
+ "MetricName": "tma_info_uop_mix_fpdiv_uop_ratio",
+ "Unit": "cpu_atom"
+ },
+ {
+ "BriefDescription": "Percentage of all uops which are IDiv uops",
+ "MetricExpr": "100 * cpu_atom@UOPS_RETIRED.IDIV@ / cpu_atom@TOPDOWN_RETIRING.ALL_P@",
+ "MetricName": "tma_info_uop_mix_idiv_uop_ratio",
+ "Unit": "cpu_atom"
+ },
+ {
+ "BriefDescription": "Percentage of all uops which are microcode ops",
+ "MetricExpr": "100 * cpu_atom@UOPS_RETIRED.MS@ / cpu_atom@TOPDOWN_RETIRING.ALL_P@",
+ "MetricName": "tma_info_uop_mix_microcode_uop_ratio",
+ "Unit": "cpu_atom"
+ },
+ {
+ "BriefDescription": "Percentage of all uops which are x87 uops",
+ "MetricExpr": "100 * cpu_atom@UOPS_RETIRED.X87@ / cpu_atom@TOPDOWN_RETIRING.ALL_P@",
+ "MetricName": "tma_info_uop_mix_x87_uop_ratio",
+ "Unit": "cpu_atom"
+ },
+ {
+ "BriefDescription": "Counts the number of issue slots that were not delivered by the frontend due to Instruction Table Lookaside Buffer (ITLB) misses.",
+ "MetricExpr": "cpu_atom@TOPDOWN_FE_BOUND.ITLB_MISS@ / (6 * cpu_atom@CPU_CLK_UNHALTED.CORE@)",
+ "MetricGroup": "TopdownL3;tma_L3_group;tma_ifetch_latency_group",
+ "MetricName": "tma_itlb_misses",
+ "MetricThreshold": "tma_itlb_misses > 0.05 & (tma_ifetch_latency > 0.15 & tma_frontend_bound > 0.2)",
+ "ScaleUnit": "100%",
+ "Unit": "cpu_atom"
+ },
+ {
+ "BriefDescription": "Counts the total number of issue slots that were not consumed by the backend because allocation is stalled due to a machine clear (nuke) of any kind including memory ordering and memory disambiguation",
+ "MetricExpr": "cpu_atom@TOPDOWN_BAD_SPECULATION.MACHINE_CLEARS@ / (6 * cpu_atom@CPU_CLK_UNHALTED.CORE@)",
+ "MetricGroup": "TopdownL2;tma_L2_group;tma_bad_speculation_group",
+ "MetricName": "tma_machine_clears",
+ "MetricThreshold": "tma_machine_clears > 0.05 & tma_bad_speculation > 0.15",
+ "MetricgroupNoGroup": "TopdownL2",
+ "ScaleUnit": "100%",
+ "Unit": "cpu_atom"
+ },
+ {
+ "BriefDescription": "Counts the number of issue slots that were not consumed by the backend due to memory reservation stalls in which a scheduler is not able to accept uops",
+ "MetricExpr": "cpu_atom@TOPDOWN_BE_BOUND.MEM_SCHEDULER@ / (6 * cpu_atom@CPU_CLK_UNHALTED.CORE@)",
+ "MetricGroup": "TopdownL3;tma_L3_group;tma_resource_bound_group",
+ "MetricName": "tma_mem_scheduler",
+ "MetricThreshold": "tma_mem_scheduler > 0.1 & (tma_resource_bound > 0.2 & tma_backend_bound > 0.1)",
+ "ScaleUnit": "100%",
+ "Unit": "cpu_atom"
+ },
+ {
+ "BriefDescription": "Counts the number of issue slots that were not consumed by the backend due to IEC or FPC RAT stalls, which can be due to FIQ or IEC reservation stalls in which the integer, floating point or SIMD scheduler is not able to accept uops",
+ "MetricExpr": "cpu_atom@TOPDOWN_BE_BOUND.NON_MEM_SCHEDULER@ / (6 * cpu_atom@CPU_CLK_UNHALTED.CORE@)",
+ "MetricGroup": "TopdownL3;tma_L3_group;tma_resource_bound_group",
+ "MetricName": "tma_non_mem_scheduler",
+ "MetricThreshold": "tma_non_mem_scheduler > 0.1 & (tma_resource_bound > 0.2 & tma_backend_bound > 0.1)",
+ "ScaleUnit": "100%",
+ "Unit": "cpu_atom"
+ },
+ {
+ "BriefDescription": "Counts the number of issue slots that were not consumed by the backend due to a machine clear that requires the use of microcode (slow nuke)",
+ "MetricExpr": "cpu_atom@TOPDOWN_BAD_SPECULATION.NUKE@ / (6 * cpu_atom@CPU_CLK_UNHALTED.CORE@)",
+ "MetricGroup": "TopdownL3;tma_L3_group;tma_machine_clears_group",
+ "MetricName": "tma_nuke",
+ "MetricThreshold": "tma_nuke > 0.05 & (tma_machine_clears > 0.05 & tma_bad_speculation > 0.15)",
+ "ScaleUnit": "100%",
+ "Unit": "cpu_atom"
+ },
+ {
+ "BriefDescription": "Counts the number of issue slots that were not delivered by the frontend due to other common frontend stalls not categorized.",
+ "MetricExpr": "cpu_atom@TOPDOWN_FE_BOUND.OTHER@ / (6 * cpu_atom@CPU_CLK_UNHALTED.CORE@)",
+ "MetricGroup": "TopdownL3;tma_L3_group;tma_ifetch_bandwidth_group",
+ "MetricName": "tma_other_fb",
+ "MetricThreshold": "tma_other_fb > 0.05 & (tma_ifetch_bandwidth > 0.1 & tma_frontend_bound > 0.2)",
+ "ScaleUnit": "100%",
+ "Unit": "cpu_atom"
+ },
+ {
+ "BriefDescription": "Counts the number of issue slots that were not delivered by the frontend due to wrong predecodes.",
+ "MetricExpr": "cpu_atom@TOPDOWN_FE_BOUND.PREDECODE@ / (6 * cpu_atom@CPU_CLK_UNHALTED.CORE@)",
+ "MetricGroup": "TopdownL3;tma_L3_group;tma_ifetch_bandwidth_group",
+ "MetricName": "tma_predecode",
+ "MetricThreshold": "tma_predecode > 0.05 & (tma_ifetch_bandwidth > 0.1 & tma_frontend_bound > 0.2)",
+ "ScaleUnit": "100%",
+ "Unit": "cpu_atom"
+ },
+ {
+ "BriefDescription": "Counts the number of issue slots that were not consumed by the backend due to the physical register file unable to accept an entry (marble stalls)",
+ "MetricExpr": "cpu_atom@TOPDOWN_BE_BOUND.REGISTER@ / (6 * cpu_atom@CPU_CLK_UNHALTED.CORE@)",
+ "MetricGroup": "TopdownL3;tma_L3_group;tma_resource_bound_group",
+ "MetricName": "tma_register",
+ "MetricThreshold": "tma_register > 0.1 & (tma_resource_bound > 0.2 & tma_backend_bound > 0.1)",
+ "ScaleUnit": "100%",
+ "Unit": "cpu_atom"
+ },
+ {
+ "BriefDescription": "Counts the number of issue slots that were not consumed by the backend due to the reorder buffer being full (ROB stalls)",
+ "MetricExpr": "cpu_atom@TOPDOWN_BE_BOUND.REORDER_BUFFER@ / (6 * cpu_atom@CPU_CLK_UNHALTED.CORE@)",
+ "MetricGroup": "TopdownL3;tma_L3_group;tma_resource_bound_group",
+ "MetricName": "tma_reorder_buffer",
+ "MetricThreshold": "tma_reorder_buffer > 0.1 & (tma_resource_bound > 0.2 & tma_backend_bound > 0.1)",
+ "ScaleUnit": "100%",
+ "Unit": "cpu_atom"
+ },
+ {
+ "BriefDescription": "Counts the number of cycles the core is stalled due to a resource limitation",
+ "MetricExpr": "tma_backend_bound - tma_core_bound",
+ "MetricGroup": "TopdownL2;tma_L2_group;tma_backend_bound_group",
+ "MetricName": "tma_resource_bound",
+ "MetricThreshold": "tma_resource_bound > 0.2 & tma_backend_bound > 0.1",
+ "MetricgroupNoGroup": "TopdownL2",
+ "ScaleUnit": "100%",
+ "Unit": "cpu_atom"
+ },
+ {
+ "BriefDescription": "Counts the number of issue slots that result in retirement slots",
+ "MetricExpr": "cpu_atom@TOPDOWN_RETIRING.ALL_P@ / (6 * cpu_atom@CPU_CLK_UNHALTED.CORE@)",
+ "MetricGroup": "TopdownL1;tma_L1_group",
+ "MetricName": "tma_retiring",
+ "MetricThreshold": "tma_retiring > 0.75",
+ "MetricgroupNoGroup": "TopdownL1",
+ "ScaleUnit": "100%",
+ "Unit": "cpu_atom"
+ },
+ {
+ "BriefDescription": "Counts the number of issue slots that were not consumed by the backend due to scoreboards from the instruction queue (IQ), jump execution unit (JEU), or microcode sequencer (MS)",
+ "MetricExpr": "cpu_atom@TOPDOWN_BE_BOUND.SERIALIZATION@ / (6 * cpu_atom@CPU_CLK_UNHALTED.CORE@)",
+ "MetricGroup": "TopdownL3;tma_L3_group;tma_resource_bound_group",
+ "MetricName": "tma_serialization",
+ "MetricThreshold": "tma_serialization > 0.1 & (tma_resource_bound > 0.2 & tma_backend_bound > 0.1)",
+ "ScaleUnit": "100%",
+ "Unit": "cpu_atom"
+ },
+ {
+ "BriefDescription": "Uncore frequency per die [GHZ]",
+ "MetricExpr": "tma_info_system_socket_clks / #num_dies / duration_time / 1e9",
+ "MetricGroup": "SoC",
+ "MetricName": "UNCORE_FREQ",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "This metric represents Core fraction of cycles CPU dispatched uops on execution ports for ALU operations.",
+ "MetricExpr": "(cpu_core@UOPS_DISPATCHED.PORT_0@ + cpu_core@UOPS_DISPATCHED.PORT_1@ + cpu_core@UOPS_DISPATCHED.PORT_5_11@ + cpu_core@UOPS_DISPATCHED.PORT_6@) / (5 * tma_info_core_core_clks)",
+ "MetricGroup": "TopdownL5;tma_L5_group;tma_ports_utilized_3m_group",
+ "MetricName": "tma_alu_op_utilization",
+ "MetricThreshold": "tma_alu_op_utilization > 0.4",
+ "ScaleUnit": "100%",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "This metric estimates fraction of slots the CPU retired uops delivered by the Microcode_Sequencer as a result of Assists",
+ "MetricExpr": "78 * cpu_core@ASSISTS.ANY@ / tma_info_thread_slots",
+ "MetricGroup": "BvIO;TopdownL4;tma_L4_group;tma_microcode_sequencer_group",
+ "MetricName": "tma_assists",
+ "MetricThreshold": "tma_assists > 0.1 & (tma_microcode_sequencer > 0.05 & tma_heavy_operations > 0.1)",
+ "PublicDescription": "This metric estimates fraction of slots the CPU retired uops delivered by the Microcode_Sequencer as a result of Assists. Assists are long sequences of uops that are required in certain corner-cases for operations that cannot be handled natively by the execution pipeline. For example; when working with very small floating point values (so-called Denormals); the FP units are not set up to perform these operations natively. Instead; a sequence of instructions to perform the computation on the Denormals is injected into the pipeline. Since these microcode sequences might be dozens of uops long; Assists can be extremely deleterious to performance and they can be avoided in many cases. Sample with: ASSISTS.ANY",
+ "ScaleUnit": "100%",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "This metric estimates fraction of slots the CPU retired uops as a result of handing SSE to AVX* or AVX* to SSE transition Assists.",
+ "MetricExpr": "63 * cpu_core@ASSISTS.SSE_AVX_MIX@ / tma_info_thread_slots",
+ "MetricGroup": "HPC;TopdownL5;tma_L5_group;tma_assists_group",
+ "MetricName": "tma_avx_assists",
+ "MetricThreshold": "tma_avx_assists > 0.1",
+ "ScaleUnit": "100%",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "This category represents fraction of slots where no uops are being delivered due to a lack of required resources for accepting new uops in the Backend",
+ "MetricExpr": "cpu_core@topdown\\-be\\-bound@ / (cpu_core@topdown\\-fe\\-bound@ + cpu_core@topdown\\-bad\\-spec@ + cpu_core@topdown\\-retiring@ + cpu_core@topdown\\-be\\-bound@) + 0 * tma_info_thread_slots",
+ "MetricGroup": "BvOB;TmaL1;TopdownL1;tma_L1_group",
+ "MetricName": "tma_backend_bound",
+ "MetricThreshold": "tma_backend_bound > 0.2",
+ "MetricgroupNoGroup": "TopdownL1",
+ "PublicDescription": "This category represents fraction of slots where no uops are being delivered due to a lack of required resources for accepting new uops in the Backend. Backend is the portion of the processor core where the out-of-order scheduler dispatches ready uops into their respective execution units; and once completed these uops get retired according to program order. For example; stalls due to data-cache misses or stalls due to the divider unit being overloaded are both categorized under Backend Bound. Backend Bound is further divided into two main categories: Memory Bound and Core Bound. Sample with: TOPDOWN.BACKEND_BOUND_SLOTS",
+ "ScaleUnit": "100%",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "This category represents fraction of slots wasted due to incorrect speculations",
+ "MetricExpr": "max(1 - (tma_frontend_bound + tma_backend_bound + tma_retiring), 0)",
+ "MetricGroup": "TmaL1;TopdownL1;tma_L1_group",
+ "MetricName": "tma_bad_speculation",
+ "MetricThreshold": "tma_bad_speculation > 0.15",
+ "MetricgroupNoGroup": "TopdownL1",
+ "PublicDescription": "This category represents fraction of slots wasted due to incorrect speculations. This include slots used to issue uops that do not eventually get retired and slots for which the issue-pipeline was blocked due to recovery from earlier incorrect speculation. For example; wasted work due to miss-predicted branches are categorized under Bad Speculation category. Incorrect data speculation followed by Memory Ordering Nukes is another example.",
+ "ScaleUnit": "100%",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "This metric represents fraction of slots the CPU has wasted due to Branch Misprediction",
+ "MetricExpr": "cpu_core@topdown\\-br\\-mispredict@ / (cpu_core@topdown\\-fe\\-bound@ + cpu_core@topdown\\-bad\\-spec@ + cpu_core@topdown\\-retiring@ + cpu_core@topdown\\-be\\-bound@) + 0 * tma_info_thread_slots",
+ "MetricGroup": "BadSpec;BrMispredicts;BvMP;TmaL2;TopdownL2;tma_L2_group;tma_bad_speculation_group;tma_issueBM",
+ "MetricName": "tma_branch_mispredicts",
+ "MetricThreshold": "tma_branch_mispredicts > 0.1 & tma_bad_speculation > 0.15",
+ "MetricgroupNoGroup": "TopdownL2",
+ "PublicDescription": "This metric represents fraction of slots the CPU has wasted due to Branch Misprediction. These slots are either wasted by uops fetched from an incorrectly speculated program path; or stalls when the out-of-order part of the machine needs to recover its state from a speculative path. Sample with: TOPDOWN.BR_MISPREDICT_SLOTS. Related metrics: tma_info_bad_spec_branch_misprediction_cost, tma_info_bottleneck_mispredictions, tma_mispredicts_resteers",
+ "ScaleUnit": "100%",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "This metric represents fraction of cycles the CPU was stalled due to Branch Resteers",
+ "MetricExpr": "cpu_core@INT_MISC.CLEAR_RESTEER_CYCLES@ / tma_info_thread_clks + tma_unknown_branches",
+ "MetricGroup": "FetchLat;TopdownL3;tma_L3_group;tma_fetch_latency_group",
+ "MetricName": "tma_branch_resteers",
+ "MetricThreshold": "tma_branch_resteers > 0.05 & (tma_fetch_latency > 0.1 & tma_frontend_bound > 0.15)",
+ "PublicDescription": "This metric represents fraction of cycles the CPU was stalled due to Branch Resteers. Branch Resteers estimates the Frontend delay in fetching operations from corrected path; following all sorts of miss-predicted branches. For example; branchy code with lots of miss-predictions might get categorized under Branch Resteers. Note the value of this node may overlap with its siblings. Sample with: BR_MISP_RETIRED.ALL_BRANCHES",
+ "ScaleUnit": "100%",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "This metric represents fraction of cycles the CPU was stalled due staying in C0.1 power-performance optimized state (Faster wakeup time; Smaller power savings).",
+ "MetricExpr": "cpu_core@CPU_CLK_UNHALTED.C01@ / tma_info_thread_clks",
+ "MetricGroup": "C0Wait;TopdownL4;tma_L4_group;tma_serializing_operation_group",
+ "MetricName": "tma_c01_wait",
+ "MetricThreshold": "tma_c01_wait > 0.05 & (tma_serializing_operation > 0.1 & (tma_core_bound > 0.1 & tma_backend_bound > 0.2))",
+ "ScaleUnit": "100%",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "This metric represents fraction of cycles the CPU was stalled due staying in C0.2 power-performance optimized state (Slower wakeup time; Larger power savings).",
+ "MetricExpr": "cpu_core@CPU_CLK_UNHALTED.C02@ / tma_info_thread_clks",
+ "MetricGroup": "C0Wait;TopdownL4;tma_L4_group;tma_serializing_operation_group",
+ "MetricName": "tma_c02_wait",
+ "MetricThreshold": "tma_c02_wait > 0.05 & (tma_serializing_operation > 0.1 & (tma_core_bound > 0.1 & tma_backend_bound > 0.2))",
+ "ScaleUnit": "100%",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "This metric estimates fraction of cycles the CPU retired uops originated from CISC (complex instruction set computer) instruction",
+ "MetricExpr": "max(0, tma_microcode_sequencer - tma_assists)",
+ "MetricGroup": "TopdownL4;tma_L4_group;tma_microcode_sequencer_group",
+ "MetricName": "tma_cisc",
+ "MetricThreshold": "tma_cisc > 0.1 & (tma_microcode_sequencer > 0.05 & tma_heavy_operations > 0.1)",
+ "PublicDescription": "This metric estimates fraction of cycles the CPU retired uops originated from CISC (complex instruction set computer) instruction. A CISC instruction has multiple uops that are required to perform the instruction's functionality as in the case of read-modify-write as an example. Since these instructions require multiple uops they may or may not imply sub-optimal use of machine resources. Sample with: FRONTEND_RETIRED.MS_FLOWS",
+ "ScaleUnit": "100%",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "This metric represents fraction of cycles the CPU was stalled due to Branch Resteers as a result of Machine Clears",
+ "MetricExpr": "(1 - tma_branch_mispredicts / tma_bad_speculation) * cpu_core@INT_MISC.CLEAR_RESTEER_CYCLES@ / tma_info_thread_clks",
+ "MetricGroup": "BadSpec;MachineClears;TopdownL4;tma_L4_group;tma_branch_resteers_group;tma_issueMC",
+ "MetricName": "tma_clears_resteers",
+ "MetricThreshold": "tma_clears_resteers > 0.05 & (tma_branch_resteers > 0.05 & (tma_fetch_latency > 0.1 & tma_frontend_bound > 0.15))",
+ "PublicDescription": "This metric represents fraction of cycles the CPU was stalled due to Branch Resteers as a result of Machine Clears. Sample with: INT_MISC.CLEAR_RESTEER_CYCLES. Related metrics: tma_l1_bound, tma_machine_clears, tma_microcode_sequencer, tma_ms_switches",
+ "ScaleUnit": "100%",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "This metric estimates fraction of cycles while the memory subsystem was handling synchronizations due to contested accesses",
+ "MetricExpr": "(cpu_core@MEM_LOAD_L3_HIT_RETIRED.XSNP_MISS@ * min(cpu_core@MEM_LOAD_L3_HIT_RETIRED.XSNP_MISS@R, 24 * tma_info_system_core_frequency) + cpu_core@MEM_LOAD_L3_HIT_RETIRED.XSNP_FWD@ * min(cpu_core@MEM_LOAD_L3_HIT_RETIRED.XSNP_FWD@R, 25 * tma_info_system_core_frequency) * (cpu_core@OCR.DEMAND_DATA_RD.L3_HIT.SNOOP_HITM@ / (cpu_core@OCR.DEMAND_DATA_RD.L3_HIT.SNOOP_HITM@ + cpu_core@OCR.DEMAND_DATA_RD.L3_HIT.SNOOP_HIT_WITH_FWD@))) * (1 + cpu_core@MEM_LOAD_RETIRED.FB_HIT@ / cpu_core@MEM_LOAD_RETIRED.L1_MISS@ / 2) / tma_info_thread_clks",
+ "MetricGroup": "BvMS;DataSharing;Offcore;Snoop;TopdownL4;tma_L4_group;tma_issueSyncxn;tma_l3_bound_group",
+ "MetricName": "tma_contested_accesses",
+ "MetricThreshold": "tma_contested_accesses > 0.05 & (tma_l3_bound > 0.05 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2))",
+ "PublicDescription": "This metric estimates fraction of cycles while the memory subsystem was handling synchronizations due to contested accesses. Contested accesses occur when data written by one Logical Processor are read by another Logical Processor on a different Physical Core. Examples of contested accesses include synchronizations such as locks; true data sharing such as modified locked variables; and false sharing. Sample with: MEM_LOAD_L3_HIT_RETIRED.XSNP_FWD;MEM_LOAD_L3_HIT_RETIRED.XSNP_MISS. Related metrics: tma_data_sharing, tma_false_sharing, tma_machine_clears, tma_remote_cache",
+ "ScaleUnit": "100%",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "This metric represents fraction of slots where Core non-memory issues were of a bottleneck",
+ "MetricExpr": "max(0, tma_backend_bound - tma_memory_bound)",
+ "MetricGroup": "Backend;Compute;TmaL2;TopdownL2;tma_L2_group;tma_backend_bound_group",
+ "MetricName": "tma_core_bound",
+ "MetricThreshold": "tma_core_bound > 0.1 & tma_backend_bound > 0.2",
+ "MetricgroupNoGroup": "TopdownL2",
+ "PublicDescription": "This metric represents fraction of slots where Core non-memory issues were of a bottleneck. Shortage in hardware compute resources; or dependencies in software's instructions are both categorized under Core Bound. Hence it may indicate the machine ran out of an out-of-order resource; certain execution units are overloaded or dependencies in program's data- or instruction-flow are limiting the performance (e.g. FP-chained long-latency arithmetic operations).",
+ "ScaleUnit": "100%",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "This metric estimates fraction of cycles while the memory subsystem was handling synchronizations due to data-sharing accesses",
+ "MetricExpr": "(cpu_core@MEM_LOAD_L3_HIT_RETIRED.XSNP_NO_FWD@ * min(cpu_core@MEM_LOAD_L3_HIT_RETIRED.XSNP_NO_FWD@R, 24 * tma_info_system_core_frequency) + cpu_core@MEM_LOAD_L3_HIT_RETIRED.XSNP_FWD@ * min(cpu_core@MEM_LOAD_L3_HIT_RETIRED.XSNP_FWD@R, 24 * tma_info_system_core_frequency) * (1 - cpu_core@OCR.DEMAND_DATA_RD.L3_HIT.SNOOP_HITM@ / (cpu_core@OCR.DEMAND_DATA_RD.L3_HIT.SNOOP_HITM@ + cpu_core@OCR.DEMAND_DATA_RD.L3_HIT.SNOOP_HIT_WITH_FWD@))) * (1 + cpu_core@MEM_LOAD_RETIRED.FB_HIT@ / cpu_core@MEM_LOAD_RETIRED.L1_MISS@ / 2) / tma_info_thread_clks",
+ "MetricGroup": "BvMS;Offcore;Snoop;TopdownL4;tma_L4_group;tma_issueSyncxn;tma_l3_bound_group",
+ "MetricName": "tma_data_sharing",
+ "MetricThreshold": "tma_data_sharing > 0.05 & (tma_l3_bound > 0.05 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2))",
+ "PublicDescription": "This metric estimates fraction of cycles while the memory subsystem was handling synchronizations due to data-sharing accesses. Data shared by multiple Logical Processors (even just read shared) may cause increased access latency due to cache coherency. Excessive data sharing can drastically harm multithreaded performance. Sample with: MEM_LOAD_L3_HIT_RETIRED.XSNP_NO_FWD. Related metrics: tma_contested_accesses, tma_false_sharing, tma_machine_clears, tma_remote_cache",
+ "ScaleUnit": "100%",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "This metric represents fraction of cycles where decoder-0 was the only active decoder",
+ "MetricExpr": "(cpu_core@INST_DECODED.DECODERS\\,cmask\\=1@ - cpu_core@INST_DECODED.DECODERS\\,cmask\\=2@) / tma_info_core_core_clks / 2",
+ "MetricGroup": "DSBmiss;FetchBW;TopdownL4;tma_L4_group;tma_issueD0;tma_mite_group",
+ "MetricName": "tma_decoder0_alone",
+ "MetricThreshold": "tma_decoder0_alone > 0.1 & (tma_mite > 0.1 & tma_fetch_bandwidth > 0.2)",
+ "PublicDescription": "This metric represents fraction of cycles where decoder-0 was the only active decoder. Related metrics: tma_few_uops_instructions",
+ "ScaleUnit": "100%",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "This metric represents fraction of cycles where the Divider unit was active",
+ "MetricExpr": "cpu_core@ARITH.DIV_ACTIVE@ / tma_info_thread_clks",
+ "MetricGroup": "BvCB;TopdownL3;tma_L3_group;tma_core_bound_group",
+ "MetricName": "tma_divider",
+ "MetricThreshold": "tma_divider > 0.2 & (tma_core_bound > 0.1 & tma_backend_bound > 0.2)",
+ "PublicDescription": "This metric represents fraction of cycles where the Divider unit was active. Divide and square root instructions are performed by the Divider unit and can take considerably longer latency than integer or Floating Point addition; subtraction; or multiplication. Sample with: ARITH.DIVIDER_UOPS",
+ "ScaleUnit": "100%",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "This metric estimates how often the CPU was stalled on accesses to external memory (DRAM) by loads",
+ "MetricExpr": "cpu_core@MEMORY_ACTIVITY.STALLS_L3_MISS@ / tma_info_thread_clks",
+ "MetricGroup": "MemoryBound;TmaL3mem;TopdownL3;tma_L3_group;tma_memory_bound_group",
+ "MetricName": "tma_dram_bound",
+ "MetricThreshold": "tma_dram_bound > 0.1 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2)",
+ "PublicDescription": "This metric estimates how often the CPU was stalled on accesses to external memory (DRAM) by loads. Better caching can improve the latency and increase performance. Sample with: MEM_LOAD_RETIRED.L3_MISS_PS",
+ "ScaleUnit": "100%",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "This metric represents Core fraction of cycles in which CPU was likely limited due to DSB (decoded uop cache) fetch pipeline",
+ "MetricExpr": "(cpu_core@IDQ.DSB_CYCLES_ANY@ - cpu_core@IDQ.DSB_CYCLES_OK@) / tma_info_core_core_clks / 2",
+ "MetricGroup": "DSB;FetchBW;TopdownL3;tma_L3_group;tma_fetch_bandwidth_group",
+ "MetricName": "tma_dsb",
+ "MetricThreshold": "tma_dsb > 0.15 & tma_fetch_bandwidth > 0.2",
+ "PublicDescription": "This metric represents Core fraction of cycles in which CPU was likely limited due to DSB (decoded uop cache) fetch pipeline. For example; inefficient utilization of the DSB cache structure or bank conflict when reading from it; are categorized here.",
+ "ScaleUnit": "100%",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "This metric represents fraction of cycles the CPU was stalled due to switches from DSB to MITE pipelines",
+ "MetricExpr": "cpu_core@DSB2MITE_SWITCHES.PENALTY_CYCLES@ / tma_info_thread_clks",
+ "MetricGroup": "DSBmiss;FetchLat;TopdownL3;tma_L3_group;tma_fetch_latency_group;tma_issueFB",
+ "MetricName": "tma_dsb_switches",
+ "MetricThreshold": "tma_dsb_switches > 0.05 & (tma_fetch_latency > 0.1 & tma_frontend_bound > 0.15)",
+ "PublicDescription": "This metric represents fraction of cycles the CPU was stalled due to switches from DSB to MITE pipelines. The DSB (decoded i-cache) is a Uop Cache where the front-end directly delivers Uops (micro operations) avoiding heavy x86 decoding. The DSB pipeline has shorter latency and delivered higher bandwidth than the MITE (legacy instruction decode pipeline). Switching between the two pipelines can cause penalties hence this metric measures the exposed penalty. Sample with: FRONTEND_RETIRED.DSB_MISS_PS. Related metrics: tma_fetch_bandwidth, tma_info_botlnk_l2_dsb_bandwidth, tma_info_botlnk_l2_dsb_misses, tma_info_frontend_dsb_coverage, tma_info_inst_mix_iptb, tma_lcp",
+ "ScaleUnit": "100%",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "This metric roughly estimates the fraction of cycles where the Data TLB (DTLB) was missed by load accesses",
+ "MetricExpr": "cpu_core@MEM_INST_RETIRED.STLB_HIT_LOADS@ * min(cpu_core@MEM_INST_RETIRED.STLB_HIT_LOADS@R, 7) / tma_info_thread_clks + tma_load_stlb_miss",
+ "MetricGroup": "BvMT;MemoryTLB;TopdownL4;tma_L4_group;tma_issueTLB;tma_l1_bound_group",
+ "MetricName": "tma_dtlb_load",
+ "MetricThreshold": "tma_dtlb_load > 0.1 & (tma_l1_bound > 0.1 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2))",
+ "PublicDescription": "This metric roughly estimates the fraction of cycles where the Data TLB (DTLB) was missed by load accesses. TLBs (Translation Look-aside Buffers) are processor caches for recently used entries out of the Page Tables that are used to map virtual- to physical-addresses by the operating system. This metric approximates the potential delay of demand loads missing the first-level data TLB (assuming worst case scenario with back to back misses to different pages). This includes hitting in the second-level TLB (STLB) as well as performing a hardware page walk on an STLB miss. Sample with: MEM_INST_RETIRED.STLB_MISS_LOADS_PS. Related metrics: tma_dtlb_store, tma_info_bottleneck_memory_data_tlbs, tma_info_bottleneck_memory_synchronization",
+ "ScaleUnit": "100%",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "This metric roughly estimates the fraction of cycles spent handling first-level data TLB store misses",
+ "MetricExpr": "cpu_core@MEM_INST_RETIRED.STLB_HIT_STORES@ * min(cpu_core@MEM_INST_RETIRED.STLB_HIT_STORES@R, 7) / tma_info_thread_clks + tma_store_stlb_miss",
+ "MetricGroup": "BvMT;MemoryTLB;TopdownL4;tma_L4_group;tma_issueTLB;tma_store_bound_group",
+ "MetricName": "tma_dtlb_store",
+ "MetricThreshold": "tma_dtlb_store > 0.05 & (tma_store_bound > 0.2 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2))",
+ "PublicDescription": "This metric roughly estimates the fraction of cycles spent handling first-level data TLB store misses. As with ordinary data caching; focus on improving data locality and reducing working-set size to reduce DTLB overhead. Additionally; consider using profile-guided optimization (PGO) to collocate frequently-used data on the same page. Try using larger page sizes for large amounts of frequently-used data. Sample with: MEM_INST_RETIRED.STLB_MISS_STORES_PS. Related metrics: tma_dtlb_load, tma_info_bottleneck_memory_data_tlbs, tma_info_bottleneck_memory_synchronization",
+ "ScaleUnit": "100%",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "This metric roughly estimates how often CPU was handling synchronizations due to False Sharing",
+ "MetricExpr": "28 * tma_info_system_core_frequency * cpu_core@OCR.DEMAND_RFO.L3_HIT.SNOOP_HITM@ / tma_info_thread_clks",
+ "MetricGroup": "BvMS;DataSharing;Offcore;Snoop;TopdownL4;tma_L4_group;tma_issueSyncxn;tma_store_bound_group",
+ "MetricName": "tma_false_sharing",
+ "MetricThreshold": "tma_false_sharing > 0.05 & (tma_store_bound > 0.2 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2))",
+ "PublicDescription": "This metric roughly estimates how often CPU was handling synchronizations due to False Sharing. False Sharing is a multithreading hiccup; where multiple Logical Processors contend on different data-elements mapped into the same cache line. Sample with: OCR.DEMAND_RFO.L3_HIT.SNOOP_HITM. Related metrics: tma_contested_accesses, tma_data_sharing, tma_machine_clears, tma_remote_cache",
+ "ScaleUnit": "100%",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "This metric does a *rough estimation* of how often L1D Fill Buffer unavailability limited additional L1D miss memory access requests to proceed",
+ "MetricExpr": "cpu_core@L1D_PEND_MISS.FB_FULL@ / tma_info_thread_clks",
+ "MetricGroup": "BvMS;MemoryBW;TopdownL4;tma_L4_group;tma_issueBW;tma_issueSL;tma_issueSmSt;tma_l1_bound_group",
+ "MetricName": "tma_fb_full",
+ "MetricThreshold": "tma_fb_full > 0.3",
+ "PublicDescription": "This metric does a *rough estimation* of how often L1D Fill Buffer unavailability limited additional L1D miss memory access requests to proceed. The higher the metric value; the deeper the memory hierarchy level the misses are satisfied from (metric values >1 are valid). Often it hints on approaching bandwidth limits (to L2 cache; L3 cache or external memory). Related metrics: tma_info_bottleneck_cache_memory_bandwidth, tma_info_system_dram_bw_use, tma_mem_bandwidth, tma_sq_full, tma_store_latency, tma_streaming_stores",
+ "ScaleUnit": "100%",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "This metric represents fraction of slots the CPU was stalled due to Frontend bandwidth issues",
+ "MetricExpr": "max(0, tma_frontend_bound - tma_fetch_latency)",
+ "MetricGroup": "FetchBW;Frontend;TmaL2;TopdownL2;tma_L2_group;tma_frontend_bound_group;tma_issueFB",
+ "MetricName": "tma_fetch_bandwidth",
+ "MetricThreshold": "tma_fetch_bandwidth > 0.2",
+ "MetricgroupNoGroup": "TopdownL2",
+ "PublicDescription": "This metric represents fraction of slots the CPU was stalled due to Frontend bandwidth issues. For example; inefficiencies at the instruction decoders; or restrictions for caching in the DSB (decoded uops cache) are categorized under Fetch Bandwidth. In such cases; the Frontend typically delivers suboptimal amount of uops to the Backend. Sample with: FRONTEND_RETIRED.LATENCY_GE_2_BUBBLES_GE_1_PS;FRONTEND_RETIRED.LATENCY_GE_1_PS;FRONTEND_RETIRED.LATENCY_GE_2_PS. Related metrics: tma_dsb_switches, tma_info_botlnk_l2_dsb_bandwidth, tma_info_botlnk_l2_dsb_misses, tma_info_frontend_dsb_coverage, tma_info_inst_mix_iptb, tma_lcp",
+ "ScaleUnit": "100%",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "This metric represents fraction of slots the CPU was stalled due to Frontend latency issues",
+ "MetricExpr": "cpu_core@topdown\\-fetch\\-lat@ / (cpu_core@topdown\\-fe\\-bound@ + cpu_core@topdown\\-bad\\-spec@ + cpu_core@topdown\\-retiring@ + cpu_core@topdown\\-be\\-bound@) - cpu_core@INT_MISC.UOP_DROPPING@ / tma_info_thread_slots",
+ "MetricGroup": "Frontend;TmaL2;TopdownL2;tma_L2_group;tma_frontend_bound_group",
+ "MetricName": "tma_fetch_latency",
+ "MetricThreshold": "tma_fetch_latency > 0.1 & tma_frontend_bound > 0.15",
+ "MetricgroupNoGroup": "TopdownL2",
+ "PublicDescription": "This metric represents fraction of slots the CPU was stalled due to Frontend latency issues. For example; instruction-cache misses; iTLB misses or fetch stalls after a branch misprediction are categorized under Frontend Latency. In such cases; the Frontend eventually delivers no uops for some period. Sample with: FRONTEND_RETIRED.LATENCY_GE_16_PS;FRONTEND_RETIRED.LATENCY_GE_8_PS",
+ "ScaleUnit": "100%",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "This metric represents fraction of slots where the CPU was retiring instructions that that are decoder into two or up to ([SNB+] four; [ADL+] five) uops",
+ "MetricExpr": "max(0, tma_heavy_operations - tma_microcode_sequencer)",
+ "MetricGroup": "TopdownL3;tma_L3_group;tma_heavy_operations_group;tma_issueD0",
+ "MetricName": "tma_few_uops_instructions",
+ "MetricThreshold": "tma_few_uops_instructions > 0.05 & tma_heavy_operations > 0.1",
+ "PublicDescription": "This metric represents fraction of slots where the CPU was retiring instructions that that are decoder into two or up to ([SNB+] four; [ADL+] five) uops. This highly-correlates with the number of uops in such instructions. Related metrics: tma_decoder0_alone",
+ "ScaleUnit": "100%",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "This metric represents overall arithmetic floating-point (FP) operations fraction the CPU has executed (retired)",
+ "MetricExpr": "tma_x87_use + tma_fp_scalar + tma_fp_vector",
+ "MetricGroup": "HPC;TopdownL3;tma_L3_group;tma_light_operations_group",
+ "MetricName": "tma_fp_arith",
+ "MetricThreshold": "tma_fp_arith > 0.2 & tma_light_operations > 0.6",
+ "PublicDescription": "This metric represents overall arithmetic floating-point (FP) operations fraction the CPU has executed (retired). Note this metric's value may exceed its parent due to use of \"Uops\" CountDomain and FMA double-counting.",
+ "ScaleUnit": "100%",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "This metric roughly estimates fraction of slots the CPU retired uops as a result of handing Floating Point (FP) Assists",
+ "MetricExpr": "30 * cpu_core@ASSISTS.FP@ / tma_info_thread_slots",
+ "MetricGroup": "HPC;TopdownL5;tma_L5_group;tma_assists_group",
+ "MetricName": "tma_fp_assists",
+ "MetricThreshold": "tma_fp_assists > 0.1",
+ "PublicDescription": "This metric roughly estimates fraction of slots the CPU retired uops as a result of handing Floating Point (FP) Assists. FP Assist may apply when working with very small floating point values (so-called Denormals).",
+ "ScaleUnit": "100%",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "This metric approximates arithmetic floating-point (FP) scalar uops fraction the CPU has retired",
+ "MetricExpr": "cpu_core@FP_ARITH_INST_RETIRED.SCALAR@ / (tma_retiring * tma_info_thread_slots)",
+ "MetricGroup": "Compute;Flops;TopdownL4;tma_L4_group;tma_fp_arith_group;tma_issue2P",
+ "MetricName": "tma_fp_scalar",
+ "MetricThreshold": "tma_fp_scalar > 0.1 & (tma_fp_arith > 0.2 & tma_light_operations > 0.6)",
+ "PublicDescription": "This metric approximates arithmetic floating-point (FP) scalar uops fraction the CPU has retired. May overcount due to FMA double counting. Related metrics: tma_fp_vector, tma_fp_vector_128b, tma_fp_vector_256b, tma_fp_vector_512b, tma_int_vector_128b, tma_int_vector_256b, tma_port_0, tma_port_1, tma_port_5, tma_port_6, tma_ports_utilized_2",
+ "ScaleUnit": "100%",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "This metric approximates arithmetic floating-point (FP) vector uops fraction the CPU has retired aggregated across all vector widths",
+ "MetricExpr": "cpu_core@FP_ARITH_INST_RETIRED.VECTOR@ / (tma_retiring * tma_info_thread_slots)",
+ "MetricGroup": "Compute;Flops;TopdownL4;tma_L4_group;tma_fp_arith_group;tma_issue2P",
+ "MetricName": "tma_fp_vector",
+ "MetricThreshold": "tma_fp_vector > 0.1 & (tma_fp_arith > 0.2 & tma_light_operations > 0.6)",
+ "PublicDescription": "This metric approximates arithmetic floating-point (FP) vector uops fraction the CPU has retired aggregated across all vector widths. May overcount due to FMA double counting. Related metrics: tma_fp_scalar, tma_fp_vector_128b, tma_fp_vector_256b, tma_fp_vector_512b, tma_int_vector_128b, tma_int_vector_256b, tma_port_0, tma_port_1, tma_port_5, tma_port_6, tma_ports_utilized_2",
+ "ScaleUnit": "100%",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "This metric approximates arithmetic FP vector uops fraction the CPU has retired for 128-bit wide vectors",
+ "MetricExpr": "(cpu_core@FP_ARITH_INST_RETIRED.128B_PACKED_DOUBLE@ + cpu_core@FP_ARITH_INST_RETIRED.128B_PACKED_SINGLE@) / (tma_retiring * tma_info_thread_slots)",
+ "MetricGroup": "Compute;Flops;TopdownL5;tma_L5_group;tma_fp_vector_group;tma_issue2P",
+ "MetricName": "tma_fp_vector_128b",
+ "MetricThreshold": "tma_fp_vector_128b > 0.1 & (tma_fp_vector > 0.1 & (tma_fp_arith > 0.2 & tma_light_operations > 0.6))",
+ "PublicDescription": "This metric approximates arithmetic FP vector uops fraction the CPU has retired for 128-bit wide vectors. May overcount due to FMA double counting. Related metrics: tma_fp_scalar, tma_fp_vector, tma_fp_vector_256b, tma_fp_vector_512b, tma_int_vector_128b, tma_int_vector_256b, tma_port_0, tma_port_1, tma_port_5, tma_port_6, tma_ports_utilized_2",
+ "ScaleUnit": "100%",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "This metric approximates arithmetic FP vector uops fraction the CPU has retired for 256-bit wide vectors",
+ "MetricExpr": "(cpu_core@FP_ARITH_INST_RETIRED.256B_PACKED_DOUBLE@ + cpu_core@FP_ARITH_INST_RETIRED.256B_PACKED_SINGLE@) / (tma_retiring * tma_info_thread_slots)",
+ "MetricGroup": "Compute;Flops;TopdownL5;tma_L5_group;tma_fp_vector_group;tma_issue2P",
+ "MetricName": "tma_fp_vector_256b",
+ "MetricThreshold": "tma_fp_vector_256b > 0.1 & (tma_fp_vector > 0.1 & (tma_fp_arith > 0.2 & tma_light_operations > 0.6))",
+ "PublicDescription": "This metric approximates arithmetic FP vector uops fraction the CPU has retired for 256-bit wide vectors. May overcount due to FMA double counting. Related metrics: tma_fp_scalar, tma_fp_vector, tma_fp_vector_128b, tma_fp_vector_512b, tma_int_vector_128b, tma_int_vector_256b, tma_port_0, tma_port_1, tma_port_5, tma_port_6, tma_ports_utilized_2",
+ "ScaleUnit": "100%",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "This category represents fraction of slots where the processor's Frontend undersupplies its Backend",
+ "MetricExpr": "cpu_core@topdown\\-fe\\-bound@ / (cpu_core@topdown\\-fe\\-bound@ + cpu_core@topdown\\-bad\\-spec@ + cpu_core@topdown\\-retiring@ + cpu_core@topdown\\-be\\-bound@) - cpu_core@INT_MISC.UOP_DROPPING@ / tma_info_thread_slots",
+ "MetricGroup": "BvFB;BvIO;PGO;TmaL1;TopdownL1;tma_L1_group",
+ "MetricName": "tma_frontend_bound",
+ "MetricThreshold": "tma_frontend_bound > 0.15",
+ "MetricgroupNoGroup": "TopdownL1",
+ "PublicDescription": "This category represents fraction of slots where the processor's Frontend undersupplies its Backend. Frontend denotes the first part of the processor core responsible to fetch operations that are executed later on by the Backend part. Within the Frontend; a branch predictor predicts the next address to fetch; cache-lines are fetched from the memory subsystem; parsed into instructions; and lastly decoded into micro-operations (uops). Ideally the Frontend can issue Pipeline_Width uops every cycle to the Backend. Frontend Bound denotes unutilized issue-slots when there is no Backend stall; i.e. bubbles where Frontend delivered no uops while Backend could have accepted them. For example; stalls due to instruction-cache misses would be categorized under Frontend Bound. Sample with: FRONTEND_RETIRED.LATENCY_GE_4_PS",
+ "ScaleUnit": "100%",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "This metric represents fraction of slots where the CPU was retiring fused instructions -- where one uop can represent multiple contiguous instructions",
+ "MetricExpr": "tma_light_operations * cpu_core@INST_RETIRED.MACRO_FUSED@ / (tma_retiring * tma_info_thread_slots)",
+ "MetricGroup": "Branches;BvBO;Pipeline;TopdownL3;tma_L3_group;tma_light_operations_group",
+ "MetricName": "tma_fused_instructions",
+ "MetricThreshold": "tma_fused_instructions > 0.1 & tma_light_operations > 0.6",
+ "PublicDescription": "This metric represents fraction of slots where the CPU was retiring fused instructions -- where one uop can represent multiple contiguous instructions. CMP+JCC or DEC+JCC are common examples of legacy fusions. {([MTL] Note new MOV+OP and Load+OP fusions appear under Other_Light_Ops in MTL!)}",
+ "ScaleUnit": "100%",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "This metric represents fraction of slots where the CPU was retiring heavy-weight operations -- instructions that require two or more uops or micro-coded sequences",
+ "MetricExpr": "cpu_core@topdown\\-heavy\\-ops@ / (cpu_core@topdown\\-fe\\-bound@ + cpu_core@topdown\\-bad\\-spec@ + cpu_core@topdown\\-retiring@ + cpu_core@topdown\\-be\\-bound@) + 0 * tma_info_thread_slots",
+ "MetricGroup": "Retire;TmaL2;TopdownL2;tma_L2_group;tma_retiring_group",
+ "MetricName": "tma_heavy_operations",
+ "MetricThreshold": "tma_heavy_operations > 0.1",
+ "MetricgroupNoGroup": "TopdownL2",
+ "PublicDescription": "This metric represents fraction of slots where the CPU was retiring heavy-weight operations -- instructions that require two or more uops or micro-coded sequences. This highly-correlates with the uop length of these instructions/sequences. ([ICL+] Note this may overcount due to approximation using indirect events; [ADL+] .). Sample with: UOPS_RETIRED.HEAVY",
+ "ScaleUnit": "100%",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "This metric represents fraction of cycles the CPU was stalled due to instruction cache misses",
+ "MetricExpr": "cpu_core@ICACHE_DATA.STALLS@ / tma_info_thread_clks",
+ "MetricGroup": "BigFootprint;BvBC;FetchLat;IcMiss;TopdownL3;tma_L3_group;tma_fetch_latency_group",
+ "MetricName": "tma_icache_misses",
+ "MetricThreshold": "tma_icache_misses > 0.05 & (tma_fetch_latency > 0.1 & tma_frontend_bound > 0.15)",
+ "PublicDescription": "This metric represents fraction of cycles the CPU was stalled due to instruction cache misses. Sample with: FRONTEND_RETIRED.L2_MISS_PS;FRONTEND_RETIRED.L1I_MISS_PS",
+ "ScaleUnit": "100%",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "Branch Misprediction Cost: Fraction of TMA slots wasted per non-speculative branch misprediction (retired JEClear)",
+ "MetricExpr": "tma_info_bottleneck_mispredictions * tma_info_thread_slots / cpu_core@BR_MISP_RETIRED.ALL_BRANCHES@ / 100",
+ "MetricGroup": "Bad;BrMispredicts;tma_issueBM",
+ "MetricName": "tma_info_bad_spec_branch_misprediction_cost",
+ "PublicDescription": "Branch Misprediction Cost: Fraction of TMA slots wasted per non-speculative branch misprediction (retired JEClear). Related metrics: tma_branch_mispredicts, tma_info_bottleneck_mispredictions, tma_mispredicts_resteers",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "Instructions per retired mispredicts for conditional non-taken branches (lower number means higher occurrence rate).",
+ "MetricExpr": "cpu_core@INST_RETIRED.ANY@ / cpu_core@BR_MISP_RETIRED.COND_NTAKEN@",
+ "MetricGroup": "Bad;BrMispredicts",
+ "MetricName": "tma_info_bad_spec_ipmisp_cond_ntaken",
+ "MetricThreshold": "tma_info_bad_spec_ipmisp_cond_ntaken < 200",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "Instructions per retired mispredicts for conditional taken branches (lower number means higher occurrence rate).",
+ "MetricExpr": "cpu_core@INST_RETIRED.ANY@ / cpu_core@BR_MISP_RETIRED.COND_TAKEN@",
+ "MetricGroup": "Bad;BrMispredicts",
+ "MetricName": "tma_info_bad_spec_ipmisp_cond_taken",
+ "MetricThreshold": "tma_info_bad_spec_ipmisp_cond_taken < 200",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "Instructions per retired mispredicts for indirect CALL or JMP branches (lower number means higher occurrence rate).",
+ "MetricExpr": "cpu_core@INST_RETIRED.ANY@ / cpu_core@BR_MISP_RETIRED.INDIRECT@",
+ "MetricGroup": "Bad;BrMispredicts",
+ "MetricName": "tma_info_bad_spec_ipmisp_indirect",
+ "MetricThreshold": "tma_info_bad_spec_ipmisp_indirect < 1e3",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "Instructions per retired mispredicts for return branches (lower number means higher occurrence rate).",
+ "MetricExpr": "cpu_core@INST_RETIRED.ANY@ / cpu_core@BR_MISP_RETIRED.RET@",
+ "MetricGroup": "Bad;BrMispredicts",
+ "MetricName": "tma_info_bad_spec_ipmisp_ret",
+ "MetricThreshold": "tma_info_bad_spec_ipmisp_ret < 500",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "Number of Instructions per non-speculative Branch Misprediction (JEClear) (lower number means higher occurrence rate)",
+ "MetricExpr": "cpu_core@INST_RETIRED.ANY@ / cpu_core@BR_MISP_RETIRED.ALL_BRANCHES@",
+ "MetricGroup": "Bad;BadSpec;BrMispredicts",
+ "MetricName": "tma_info_bad_spec_ipmispredict",
+ "MetricThreshold": "tma_info_bad_spec_ipmispredict < 200",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "Speculative to Retired ratio of all clears (covering mispredicts and nukes)",
+ "MetricExpr": "cpu_core@INT_MISC.CLEARS_COUNT@ / (cpu_core@BR_MISP_RETIRED.ALL_BRANCHES@ + cpu_core@MACHINE_CLEARS.COUNT@)",
+ "MetricGroup": "BrMispredicts",
+ "MetricName": "tma_info_bad_spec_spec_clears_ratio",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "Probability of Core Bound bottleneck hidden by SMT-profiling artifacts",
+ "MetricExpr": "(100 * (1 - tma_core_bound / tma_ports_utilization if tma_core_bound < tma_ports_utilization else 1) if tma_info_system_smt_2t_utilization > 0.5 else 0)",
+ "MetricGroup": "Cor;SMT",
+ "MetricName": "tma_info_botlnk_l0_core_bound_likely",
+ "MetricThreshold": "tma_info_botlnk_l0_core_bound_likely > 0.5",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "Total pipeline cost of DSB (uop cache) hits - subset of the Instruction_Fetch_BW Bottleneck",
+ "MetricExpr": "100 * (tma_frontend_bound * (tma_fetch_bandwidth / (tma_fetch_bandwidth + tma_fetch_latency)) * (tma_dsb / (tma_dsb + tma_lsd + tma_mite)))",
+ "MetricGroup": "DSB;FetchBW;tma_issueFB",
+ "MetricName": "tma_info_botlnk_l2_dsb_bandwidth",
+ "MetricThreshold": "tma_info_botlnk_l2_dsb_bandwidth > 10",
+ "PublicDescription": "Total pipeline cost of DSB (uop cache) hits - subset of the Instruction_Fetch_BW Bottleneck. Related metrics: tma_dsb_switches, tma_fetch_bandwidth, tma_info_botlnk_l2_dsb_misses, tma_info_frontend_dsb_coverage, tma_info_inst_mix_iptb, tma_lcp",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "Total pipeline cost of DSB (uop cache) misses - subset of the Instruction_Fetch_BW Bottleneck",
+ "MetricExpr": "100 * (tma_fetch_latency * tma_dsb_switches / (tma_branch_resteers + tma_dsb_switches + tma_icache_misses + tma_itlb_misses + tma_lcp + tma_ms_switches) + tma_fetch_bandwidth * tma_mite / (tma_dsb + tma_lsd + tma_mite))",
+ "MetricGroup": "DSBmiss;Fed;tma_issueFB",
+ "MetricName": "tma_info_botlnk_l2_dsb_misses",
+ "MetricThreshold": "tma_info_botlnk_l2_dsb_misses > 10",
+ "PublicDescription": "Total pipeline cost of DSB (uop cache) misses - subset of the Instruction_Fetch_BW Bottleneck. Related metrics: tma_dsb_switches, tma_fetch_bandwidth, tma_info_botlnk_l2_dsb_bandwidth, tma_info_frontend_dsb_coverage, tma_info_inst_mix_iptb, tma_lcp",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "Total pipeline cost of Instruction Cache misses - subset of the Big_Code Bottleneck",
+ "MetricExpr": "100 * (tma_fetch_latency * tma_icache_misses / (tma_branch_resteers + tma_dsb_switches + tma_icache_misses + tma_itlb_misses + tma_lcp + tma_ms_switches))",
+ "MetricGroup": "Fed;FetchLat;IcMiss;tma_issueFL",
+ "MetricName": "tma_info_botlnk_l2_ic_misses",
+ "MetricThreshold": "tma_info_botlnk_l2_ic_misses > 5",
+ "PublicDescription": "Total pipeline cost of Instruction Cache misses - subset of the Big_Code Bottleneck. Related metrics: ",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "Total pipeline cost of instruction fetch related bottlenecks by large code footprint programs (i-side cache; TLB and BTB misses)",
+ "MetricExpr": "100 * tma_fetch_latency * (tma_itlb_misses + tma_icache_misses + tma_unknown_branches) / (tma_branch_resteers + tma_dsb_switches + tma_icache_misses + tma_itlb_misses + tma_lcp + tma_ms_switches)",
+ "MetricGroup": "BigFootprint;BvBC;Fed;Frontend;IcMiss;MemoryTLB",
+ "MetricName": "tma_info_bottleneck_big_code",
+ "MetricThreshold": "tma_info_bottleneck_big_code > 20",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "Total pipeline cost of instructions used for program control-flow - a subset of the Retiring category in TMA",
+ "MetricExpr": "100 * ((cpu_core@BR_INST_RETIRED.ALL_BRANCHES@ + 2 * cpu_core@BR_INST_RETIRED.NEAR_CALL@ + cpu_core@INST_RETIRED.NOP@) / tma_info_thread_slots)",
+ "MetricGroup": "BvBO;Ret",
+ "MetricName": "tma_info_bottleneck_branching_overhead",
+ "MetricThreshold": "tma_info_bottleneck_branching_overhead > 5",
+ "PublicDescription": "Total pipeline cost of instructions used for program control-flow - a subset of the Retiring category in TMA. Examples include function calls; loops and alignments. (A lower bound)",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "Total pipeline cost of external Memory- or Cache-Bandwidth related bottlenecks",
+ "MetricExpr": "100 * (tma_memory_bound * (tma_dram_bound / (tma_dram_bound + tma_l1_bound + tma_l2_bound + tma_l3_bound + tma_store_bound)) * (tma_mem_bandwidth / (tma_mem_bandwidth + tma_mem_latency)) + tma_memory_bound * (tma_l3_bound / (tma_dram_bound + tma_l1_bound + tma_l2_bound + tma_l3_bound + tma_store_bound)) * (tma_sq_full / (tma_contested_accesses + tma_data_sharing + tma_l3_hit_latency + tma_sq_full)) + tma_memory_bound * (tma_l1_bound / (tma_dram_bound + tma_l1_bound + tma_l2_bound + tma_l3_bound + tma_store_bound)) * (tma_fb_full / (tma_dtlb_load + tma_fb_full + tma_l1_hit_latency + tma_lock_latency + tma_split_loads + tma_store_fwd_blk)))",
+ "MetricGroup": "BvMB;Mem;MemoryBW;Offcore;tma_issueBW",
+ "MetricName": "tma_info_bottleneck_cache_memory_bandwidth",
+ "MetricThreshold": "tma_info_bottleneck_cache_memory_bandwidth > 20",
+ "PublicDescription": "Total pipeline cost of external Memory- or Cache-Bandwidth related bottlenecks. Related metrics: tma_fb_full, tma_info_system_dram_bw_use, tma_mem_bandwidth, tma_sq_full",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "Total pipeline cost of external Memory- or Cache-Latency related bottlenecks",
+ "MetricExpr": "100 * (tma_memory_bound * (tma_dram_bound / (tma_dram_bound + tma_l1_bound + tma_l2_bound + tma_l3_bound + tma_store_bound)) * (tma_mem_latency / (tma_mem_bandwidth + tma_mem_latency)) + tma_memory_bound * (tma_l3_bound / (tma_dram_bound + tma_l1_bound + tma_l2_bound + tma_l3_bound + tma_store_bound)) * (tma_l3_hit_latency / (tma_contested_accesses + tma_data_sharing + tma_l3_hit_latency + tma_sq_full)) + tma_memory_bound * tma_l2_bound / (tma_dram_bound + tma_l1_bound + tma_l2_bound + tma_l3_bound + tma_store_bound) + tma_memory_bound * (tma_store_bound / (tma_dram_bound + tma_l1_bound + tma_l2_bound + tma_l3_bound + tma_store_bound)) * (tma_store_latency / (tma_dtlb_store + tma_false_sharing + tma_split_stores + tma_store_latency + tma_streaming_stores)) + tma_memory_bound * (tma_l1_bound / (tma_dram_bound + tma_l1_bound + tma_l2_bound + tma_l3_bound + tma_store_bound)) * (tma_l1_hit_latency / (tma_dtlb_load + tma_fb_full + tma_l1_hit_latency + tma_lock_latency + tma_split_loads + tma_store_fwd_blk)))",
+ "MetricGroup": "BvML;Mem;MemoryLat;Offcore;tma_issueLat",
+ "MetricName": "tma_info_bottleneck_cache_memory_latency",
+ "MetricThreshold": "tma_info_bottleneck_cache_memory_latency > 20",
+ "PublicDescription": "Total pipeline cost of external Memory- or Cache-Latency related bottlenecks. Related metrics: tma_l3_hit_latency, tma_mem_latency",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "Total pipeline cost when the execution is compute-bound - an estimation",
+ "MetricExpr": "100 * (tma_core_bound * tma_divider / (tma_divider + tma_ports_utilization + tma_serializing_operation) + tma_core_bound * (tma_ports_utilization / (tma_divider + tma_ports_utilization + tma_serializing_operation)) * (tma_ports_utilized_3m / (tma_ports_utilized_0 + tma_ports_utilized_1 + tma_ports_utilized_2 + tma_ports_utilized_3m)))",
+ "MetricGroup": "BvCB;Cor;tma_issueComp",
+ "MetricName": "tma_info_bottleneck_compute_bound_est",
+ "MetricThreshold": "tma_info_bottleneck_compute_bound_est > 20",
+ "PublicDescription": "Total pipeline cost when the execution is compute-bound - an estimation. Covers Core Bound when High ILP as well as when long-latency execution units are busy. Related metrics: ",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "Total pipeline cost of instruction fetch bandwidth related bottlenecks (when the front-end could not sustain operations delivery to the back-end)",
+ "MetricExpr": "100 * (tma_frontend_bound - (1 - 10 * tma_microcode_sequencer * tma_other_mispredicts / tma_branch_mispredicts) * tma_fetch_latency * tma_mispredicts_resteers / (tma_branch_resteers + tma_dsb_switches + tma_icache_misses + tma_itlb_misses + tma_lcp + tma_ms_switches) - (1 - cpu_core@INST_RETIRED.REP_ITERATION@ / cpu_core@UOPS_RETIRED.MS\\,cmask\\=1@) * (tma_fetch_latency * (tma_ms_switches + tma_branch_resteers * (tma_clears_resteers + tma_mispredicts_resteers * tma_other_mispredicts / tma_branch_mispredicts) / (tma_clears_resteers + tma_mispredicts_resteers + tma_unknown_branches)) / (tma_branch_resteers + tma_dsb_switches + tma_icache_misses + tma_itlb_misses + tma_lcp + tma_ms_switches))) - tma_info_bottleneck_big_code",
+ "MetricGroup": "BvFB;Fed;FetchBW;Frontend",
+ "MetricName": "tma_info_bottleneck_instruction_fetch_bw",
+ "MetricThreshold": "tma_info_bottleneck_instruction_fetch_bw > 20",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "Total pipeline cost of irregular execution (e.g",
+ "MetricExpr": "100 * ((1 - cpu_core@INST_RETIRED.REP_ITERATION@ / cpu_core@UOPS_RETIRED.MS\\,cmask\\=1@) * (tma_fetch_latency * (tma_ms_switches + tma_branch_resteers * (tma_clears_resteers + tma_mispredicts_resteers * tma_other_mispredicts / tma_branch_mispredicts) / (tma_clears_resteers + tma_mispredicts_resteers + tma_unknown_branches)) / (tma_branch_resteers + tma_dsb_switches + tma_icache_misses + tma_itlb_misses + tma_lcp + tma_ms_switches)) + 10 * tma_microcode_sequencer * tma_other_mispredicts / tma_branch_mispredicts * tma_branch_mispredicts + tma_machine_clears * tma_other_nukes / tma_other_nukes + tma_core_bound * (tma_serializing_operation + cpu_core@RS.EMPTY\\,umask\\=1@ / tma_info_thread_clks * tma_ports_utilized_0) / (tma_divider + tma_ports_utilization + tma_serializing_operation) + tma_microcode_sequencer / (tma_few_uops_instructions + tma_microcode_sequencer) * (tma_assists / tma_microcode_sequencer) * tma_heavy_operations)",
+ "MetricGroup": "Bad;BvIO;Cor;Ret;tma_issueMS",
+ "MetricName": "tma_info_bottleneck_irregular_overhead",
+ "MetricThreshold": "tma_info_bottleneck_irregular_overhead > 10",
+ "PublicDescription": "Total pipeline cost of irregular execution (e.g. FP-assists in HPC, Wait time with work imbalance multithreaded workloads, overhead in system services or virtualized environments). Related metrics: tma_microcode_sequencer, tma_ms_switches",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "Total pipeline cost of Memory Address Translation related bottlenecks (data-side TLBs)",
+ "MetricExpr": "100 * (tma_memory_bound * (tma_l1_bound / (tma_dram_bound + tma_l1_bound + tma_l2_bound + tma_l3_bound + tma_store_bound)) * (tma_dtlb_load / (tma_dtlb_load + tma_fb_full + tma_l1_hit_latency + tma_lock_latency + tma_split_loads + tma_store_fwd_blk)) + tma_memory_bound * (tma_store_bound / (tma_dram_bound + tma_l1_bound + tma_l2_bound + tma_l3_bound + tma_store_bound)) * (tma_dtlb_store / (tma_dtlb_store + tma_false_sharing + tma_split_stores + tma_store_latency + tma_streaming_stores)))",
+ "MetricGroup": "BvMT;Mem;MemoryTLB;Offcore;tma_issueTLB",
+ "MetricName": "tma_info_bottleneck_memory_data_tlbs",
+ "MetricThreshold": "tma_info_bottleneck_memory_data_tlbs > 20",
+ "PublicDescription": "Total pipeline cost of Memory Address Translation related bottlenecks (data-side TLBs). Related metrics: tma_dtlb_load, tma_dtlb_store, tma_info_bottleneck_memory_synchronization",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "Total pipeline cost of Memory Synchronization related bottlenecks (data transfers and coherency updates across processors)",
+ "MetricExpr": "100 * (tma_memory_bound * (tma_l3_bound / (tma_dram_bound + tma_l1_bound + tma_l2_bound + tma_l3_bound + tma_store_bound) * (tma_contested_accesses + tma_data_sharing) / (tma_contested_accesses + tma_data_sharing + tma_l3_hit_latency + tma_sq_full) + tma_store_bound / (tma_dram_bound + tma_l1_bound + tma_l2_bound + tma_l3_bound + tma_store_bound) * tma_false_sharing / (tma_dtlb_store + tma_false_sharing + tma_split_stores + tma_store_latency + tma_streaming_stores - tma_store_latency)) + tma_machine_clears * (1 - tma_other_nukes / tma_other_nukes))",
+ "MetricGroup": "BvMS;Mem;Offcore;tma_issueTLB",
+ "MetricName": "tma_info_bottleneck_memory_synchronization",
+ "MetricThreshold": "tma_info_bottleneck_memory_synchronization > 10",
+ "PublicDescription": "Total pipeline cost of Memory Synchronization related bottlenecks (data transfers and coherency updates across processors). Related metrics: tma_dtlb_load, tma_dtlb_store, tma_info_bottleneck_memory_data_tlbs",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "Total pipeline cost of Branch Misprediction related bottlenecks",
+ "MetricExpr": "100 * (1 - 10 * tma_microcode_sequencer * tma_other_mispredicts / tma_branch_mispredicts) * (tma_branch_mispredicts + tma_fetch_latency * tma_mispredicts_resteers / (tma_branch_resteers + tma_dsb_switches + tma_icache_misses + tma_itlb_misses + tma_lcp + tma_ms_switches))",
+ "MetricGroup": "Bad;BadSpec;BrMispredicts;BvMP;tma_issueBM",
+ "MetricName": "tma_info_bottleneck_mispredictions",
+ "MetricThreshold": "tma_info_bottleneck_mispredictions > 20",
+ "PublicDescription": "Total pipeline cost of Branch Misprediction related bottlenecks. Related metrics: tma_branch_mispredicts, tma_info_bad_spec_branch_misprediction_cost, tma_mispredicts_resteers",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "Total pipeline cost of remaining bottlenecks in the back-end",
+ "MetricExpr": "100 - (tma_info_bottleneck_big_code + tma_info_bottleneck_instruction_fetch_bw + tma_info_bottleneck_mispredictions + tma_info_bottleneck_cache_memory_bandwidth + tma_info_bottleneck_cache_memory_latency + tma_info_bottleneck_memory_data_tlbs + tma_info_bottleneck_memory_synchronization + tma_info_bottleneck_compute_bound_est + tma_info_bottleneck_irregular_overhead + tma_info_bottleneck_branching_overhead + tma_info_bottleneck_useful_work)",
+ "MetricGroup": "BvOB;Cor;Offcore",
+ "MetricName": "tma_info_bottleneck_other_bottlenecks",
+ "MetricThreshold": "tma_info_bottleneck_other_bottlenecks > 20",
+ "PublicDescription": "Total pipeline cost of remaining bottlenecks in the back-end. Examples include data-dependencies (Core Bound when Low ILP) and other unlisted memory-related stalls.",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "Total pipeline cost of \"useful operations\" - the portion of Retiring category not covered by Branching_Overhead nor Irregular_Overhead.",
+ "MetricExpr": "100 * (tma_retiring - (cpu_core@BR_INST_RETIRED.ALL_BRANCHES@ + 2 * cpu_core@BR_INST_RETIRED.NEAR_CALL@ + cpu_core@INST_RETIRED.NOP@) / tma_info_thread_slots - tma_microcode_sequencer / (tma_few_uops_instructions + tma_microcode_sequencer) * (tma_assists / tma_microcode_sequencer) * tma_heavy_operations)",
+ "MetricGroup": "BvUW;Ret",
+ "MetricName": "tma_info_bottleneck_useful_work",
+ "MetricThreshold": "tma_info_bottleneck_useful_work > 20",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "Fraction of branches that are CALL or RET",
+ "MetricExpr": "(cpu_core@BR_INST_RETIRED.NEAR_CALL@ + cpu_core@BR_INST_RETIRED.NEAR_RETURN@) / cpu_core@BR_INST_RETIRED.ALL_BRANCHES@",
+ "MetricGroup": "Bad;Branches",
+ "MetricName": "tma_info_branches_callret",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "Fraction of branches that are non-taken conditionals",
+ "MetricExpr": "cpu_core@BR_INST_RETIRED.COND_NTAKEN@ / cpu_core@BR_INST_RETIRED.ALL_BRANCHES@",
+ "MetricGroup": "Bad;Branches;CodeGen;PGO",
+ "MetricName": "tma_info_branches_cond_nt",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "Fraction of branches that are taken conditionals",
+ "MetricExpr": "cpu_core@BR_INST_RETIRED.COND_TAKEN@ / cpu_core@BR_INST_RETIRED.ALL_BRANCHES@",
+ "MetricGroup": "Bad;Branches;CodeGen;PGO",
+ "MetricName": "tma_info_branches_cond_tk",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "Fraction of branches that are unconditional (direct or indirect) jumps",
+ "MetricExpr": "(cpu_core@BR_INST_RETIRED.NEAR_TAKEN@ - cpu_core@BR_INST_RETIRED.COND_TAKEN@ - 2 * cpu_core@BR_INST_RETIRED.NEAR_CALL@) / cpu_core@BR_INST_RETIRED.ALL_BRANCHES@",
+ "MetricGroup": "Bad;Branches",
+ "MetricName": "tma_info_branches_jump",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "Fraction of branches of other types (not individually covered by other metrics in Info.Branches group)",
+ "MetricExpr": "1 - (tma_info_branches_cond_nt + tma_info_branches_cond_tk + tma_info_branches_callret + tma_info_branches_jump)",
+ "MetricGroup": "Bad;Branches",
+ "MetricName": "tma_info_branches_other_branches",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "Core actual clocks when any Logical Processor is active on the Physical Core",
+ "MetricExpr": "(cpu_core@CPU_CLK_UNHALTED.DISTRIBUTED@ if #SMT_on else tma_info_thread_clks)",
+ "MetricGroup": "SMT",
+ "MetricName": "tma_info_core_core_clks",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "Instructions Per Cycle across hyper-threads (per physical core)",
+ "MetricExpr": "cpu_core@INST_RETIRED.ANY@ / tma_info_core_core_clks",
+ "MetricGroup": "Ret;SMT;TmaL1;tma_L1_group",
+ "MetricName": "tma_info_core_coreipc",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "uops Executed per Cycle",
+ "MetricExpr": "cpu_core@UOPS_EXECUTED.THREAD@ / tma_info_thread_clks",
+ "MetricGroup": "Power",
+ "MetricName": "tma_info_core_epc",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "Floating Point Operations Per Cycle",
+ "MetricExpr": "(cpu_core@FP_ARITH_INST_RETIRED.SCALAR@ + 2 * cpu_core@FP_ARITH_INST_RETIRED.128B_PACKED_DOUBLE@ + 4 * cpu_core@FP_ARITH_INST_RETIRED.4_FLOPS@ + 8 * cpu_core@FP_ARITH_INST_RETIRED.256B_PACKED_SINGLE@) / tma_info_core_core_clks",
+ "MetricGroup": "Flops;Ret",
+ "MetricName": "tma_info_core_flopc",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "Actual per-core usage of the Floating Point non-X87 execution units (regardless of precision or vector-width)",
+ "MetricExpr": "(cpu_core@FP_ARITH_DISPATCHED.PORT_0@ + cpu_core@FP_ARITH_DISPATCHED.PORT_1@ + cpu_core@FP_ARITH_DISPATCHED.PORT_5@) / (2 * tma_info_core_core_clks)",
+ "MetricGroup": "Cor;Flops;HPC",
+ "MetricName": "tma_info_core_fp_arith_utilization",
+ "PublicDescription": "Actual per-core usage of the Floating Point non-X87 execution units (regardless of precision or vector-width). Values > 1 are possible due to ([BDW+] Fused-Multiply Add (FMA) counting - common; [ADL+] use all of ADD/MUL/FMA in Scalar or 128/256-bit vectors - less common).",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "Instruction-Level-Parallelism (average number of uops executed when there is execution) per thread (logical-processor)",
+ "MetricExpr": "cpu_core@UOPS_EXECUTED.THREAD@ / cpu_core@UOPS_EXECUTED.THREAD\\,cmask\\=1@",
+ "MetricGroup": "Backend;Cor;Pipeline;PortsUtil",
+ "MetricName": "tma_info_core_ilp",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "Fraction of Uops delivered by the DSB (aka Decoded ICache; or Uop Cache)",
+ "MetricExpr": "cpu_core@IDQ.DSB_UOPS@ / cpu_core@UOPS_ISSUED.ANY@",
+ "MetricGroup": "DSB;Fed;FetchBW;tma_issueFB",
+ "MetricName": "tma_info_frontend_dsb_coverage",
+ "MetricThreshold": "tma_info_frontend_dsb_coverage < 0.7 & tma_info_thread_ipc / 6 > 0.35",
+ "PublicDescription": "Fraction of Uops delivered by the DSB (aka Decoded ICache; or Uop Cache). Related metrics: tma_dsb_switches, tma_fetch_bandwidth, tma_info_botlnk_l2_dsb_bandwidth, tma_info_botlnk_l2_dsb_misses, tma_info_inst_mix_iptb, tma_lcp",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "Average number of cycles of a switch from the DSB fetch-unit to MITE fetch unit - see DSB_Switches tree node for details.",
+ "MetricExpr": "cpu_core@DSB2MITE_SWITCHES.PENALTY_CYCLES@ / cpu_core@DSB2MITE_SWITCHES.PENALTY_CYCLES\\,cmask\\=1\\,edge@",
+ "MetricGroup": "DSBmiss",
+ "MetricName": "tma_info_frontend_dsb_switch_cost",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "Average number of Uops issued by front-end when it issued something",
+ "MetricExpr": "cpu_core@UOPS_ISSUED.ANY@ / cpu_core@UOPS_ISSUED.ANY\\,cmask\\=1@",
+ "MetricGroup": "Fed;FetchBW",
+ "MetricName": "tma_info_frontend_fetch_upc",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "Average Latency for L1 instruction cache misses",
+ "MetricExpr": "cpu_core@ICACHE_DATA.STALLS@ / cpu_core@ICACHE_DATA.STALLS\\,cmask\\=1\\,edge@",
+ "MetricGroup": "Fed;FetchLat;IcMiss",
+ "MetricName": "tma_info_frontend_icache_miss_latency",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "Instructions per non-speculative DSB miss (lower number means higher occurrence rate)",
+ "MetricExpr": "cpu_core@INST_RETIRED.ANY@ / cpu_core@FRONTEND_RETIRED.ANY_DSB_MISS@",
+ "MetricGroup": "DSBmiss;Fed",
+ "MetricName": "tma_info_frontend_ipdsb_miss_ret",
+ "MetricThreshold": "tma_info_frontend_ipdsb_miss_ret < 50",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "Instructions per speculative Unknown Branch Misprediction (BAClear) (lower number means higher occurrence rate)",
+ "MetricExpr": "tma_info_inst_mix_instructions / cpu_core@BACLEARS.ANY@",
+ "MetricGroup": "Fed",
+ "MetricName": "tma_info_frontend_ipunknown_branch",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "L2 cache true code cacheline misses per kilo instruction",
+ "MetricExpr": "1e3 * cpu_core@FRONTEND_RETIRED.L2_MISS@ / cpu_core@INST_RETIRED.ANY@",
+ "MetricGroup": "IcMiss",
+ "MetricName": "tma_info_frontend_l2mpki_code",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "L2 cache speculative code cacheline misses per kilo instruction",
+ "MetricExpr": "1e3 * cpu_core@L2_RQSTS.CODE_RD_MISS@ / cpu_core@INST_RETIRED.ANY@",
+ "MetricGroup": "IcMiss",
+ "MetricName": "tma_info_frontend_l2mpki_code_all",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "Fraction of Uops delivered by the LSD (Loop Stream Detector; aka Loop Cache)",
+ "MetricExpr": "cpu_core@LSD.UOPS@ / cpu_core@UOPS_ISSUED.ANY@",
+ "MetricGroup": "Fed;LSD",
+ "MetricName": "tma_info_frontend_lsd_coverage",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "Average number of cycles the front-end was delayed due to an Unknown Branch detection",
+ "MetricExpr": "cpu_core@INT_MISC.UNKNOWN_BRANCH_CYCLES@ / cpu_core@INT_MISC.UNKNOWN_BRANCH_CYCLES\\,cmask\\=1\\,edge@",
+ "MetricGroup": "Fed",
+ "MetricName": "tma_info_frontend_unknown_branch_cost",
+ "PublicDescription": "Average number of cycles the front-end was delayed due to an Unknown Branch detection. See Unknown_Branches node.",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "Branch instructions per taken branch.",
+ "MetricExpr": "cpu_core@BR_INST_RETIRED.ALL_BRANCHES@ / cpu_core@BR_INST_RETIRED.NEAR_TAKEN@",
+ "MetricGroup": "Branches;Fed;PGO",
+ "MetricName": "tma_info_inst_mix_bptkbranch",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "Total number of retired Instructions",
+ "MetricExpr": "cpu_core@INST_RETIRED.ANY@",
+ "MetricGroup": "Summary;TmaL1;tma_L1_group",
+ "MetricName": "tma_info_inst_mix_instructions",
+ "PublicDescription": "Total number of retired Instructions. Sample with: INST_RETIRED.PREC_DIST",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "Instructions per FP Arithmetic instruction (lower number means higher occurrence rate)",
+ "MetricExpr": "cpu_core@INST_RETIRED.ANY@ / (cpu_core@FP_ARITH_INST_RETIRED.SCALAR@ + cpu_core@FP_ARITH_INST_RETIRED.VECTOR@)",
+ "MetricGroup": "Flops;InsType",
+ "MetricName": "tma_info_inst_mix_iparith",
+ "MetricThreshold": "tma_info_inst_mix_iparith < 10",
+ "PublicDescription": "Instructions per FP Arithmetic instruction (lower number means higher occurrence rate). Values < 1 are possible due to intentional FMA double counting. Approximated prior to BDW.",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "Instructions per FP Arithmetic AVX/SSE 128-bit instruction (lower number means higher occurrence rate)",
+ "MetricExpr": "cpu_core@INST_RETIRED.ANY@ / (cpu_core@FP_ARITH_INST_RETIRED.128B_PACKED_DOUBLE@ + cpu_core@FP_ARITH_INST_RETIRED.128B_PACKED_SINGLE@)",
+ "MetricGroup": "Flops;FpVector;InsType",
+ "MetricName": "tma_info_inst_mix_iparith_avx128",
+ "MetricThreshold": "tma_info_inst_mix_iparith_avx128 < 10",
+ "PublicDescription": "Instructions per FP Arithmetic AVX/SSE 128-bit instruction (lower number means higher occurrence rate). Values < 1 are possible due to intentional FMA double counting.",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "Instructions per FP Arithmetic AVX* 256-bit instruction (lower number means higher occurrence rate)",
+ "MetricExpr": "cpu_core@INST_RETIRED.ANY@ / (cpu_core@FP_ARITH_INST_RETIRED.256B_PACKED_DOUBLE@ + cpu_core@FP_ARITH_INST_RETIRED.256B_PACKED_SINGLE@)",
+ "MetricGroup": "Flops;FpVector;InsType",
+ "MetricName": "tma_info_inst_mix_iparith_avx256",
+ "MetricThreshold": "tma_info_inst_mix_iparith_avx256 < 10",
+ "PublicDescription": "Instructions per FP Arithmetic AVX* 256-bit instruction (lower number means higher occurrence rate). Values < 1 are possible due to intentional FMA double counting.",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "Instructions per FP Arithmetic Scalar Double-Precision instruction (lower number means higher occurrence rate)",
+ "MetricExpr": "cpu_core@INST_RETIRED.ANY@ / cpu_core@FP_ARITH_INST_RETIRED.SCALAR_DOUBLE@",
+ "MetricGroup": "Flops;FpScalar;InsType",
+ "MetricName": "tma_info_inst_mix_iparith_scalar_dp",
+ "MetricThreshold": "tma_info_inst_mix_iparith_scalar_dp < 10",
+ "PublicDescription": "Instructions per FP Arithmetic Scalar Double-Precision instruction (lower number means higher occurrence rate). Values < 1 are possible due to intentional FMA double counting.",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "Instructions per FP Arithmetic Scalar Single-Precision instruction (lower number means higher occurrence rate)",
+ "MetricExpr": "cpu_core@INST_RETIRED.ANY@ / cpu_core@FP_ARITH_INST_RETIRED.SCALAR_SINGLE@",
+ "MetricGroup": "Flops;FpScalar;InsType",
+ "MetricName": "tma_info_inst_mix_iparith_scalar_sp",
+ "MetricThreshold": "tma_info_inst_mix_iparith_scalar_sp < 10",
+ "PublicDescription": "Instructions per FP Arithmetic Scalar Single-Precision instruction (lower number means higher occurrence rate). Values < 1 are possible due to intentional FMA double counting.",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "Instructions per Branch (lower number means higher occurrence rate)",
+ "MetricExpr": "cpu_core@INST_RETIRED.ANY@ / cpu_core@BR_INST_RETIRED.ALL_BRANCHES@",
+ "MetricGroup": "Branches;Fed;InsType",
+ "MetricName": "tma_info_inst_mix_ipbranch",
+ "MetricThreshold": "tma_info_inst_mix_ipbranch < 8",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "Instructions per (near) call (lower number means higher occurrence rate)",
+ "MetricExpr": "cpu_core@INST_RETIRED.ANY@ / cpu_core@BR_INST_RETIRED.NEAR_CALL@",
+ "MetricGroup": "Branches;Fed;PGO",
+ "MetricName": "tma_info_inst_mix_ipcall",
+ "MetricThreshold": "tma_info_inst_mix_ipcall < 200",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "Instructions per Floating Point (FP) Operation (lower number means higher occurrence rate)",
+ "MetricExpr": "cpu_core@INST_RETIRED.ANY@ / (cpu_core@FP_ARITH_INST_RETIRED.SCALAR@ + 2 * cpu_core@FP_ARITH_INST_RETIRED.128B_PACKED_DOUBLE@ + 4 * cpu_core@FP_ARITH_INST_RETIRED.4_FLOPS@ + 8 * cpu_core@FP_ARITH_INST_RETIRED.256B_PACKED_SINGLE@)",
+ "MetricGroup": "Flops;InsType",
+ "MetricName": "tma_info_inst_mix_ipflop",
+ "MetricThreshold": "tma_info_inst_mix_ipflop < 10",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "Instructions per Load (lower number means higher occurrence rate)",
+ "MetricExpr": "cpu_core@INST_RETIRED.ANY@ / cpu_core@MEM_INST_RETIRED.ALL_LOADS@",
+ "MetricGroup": "InsType",
+ "MetricName": "tma_info_inst_mix_ipload",
+ "MetricThreshold": "tma_info_inst_mix_ipload < 3",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "Instructions per PAUSE (lower number means higher occurrence rate)",
+ "MetricExpr": "tma_info_inst_mix_instructions / cpu_core@CPU_CLK_UNHALTED.PAUSE_INST@",
+ "MetricGroup": "Flops;FpVector;InsType",
+ "MetricName": "tma_info_inst_mix_ippause",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "Instructions per Store (lower number means higher occurrence rate)",
+ "MetricExpr": "cpu_core@INST_RETIRED.ANY@ / cpu_core@MEM_INST_RETIRED.ALL_STORES@",
+ "MetricGroup": "InsType",
+ "MetricName": "tma_info_inst_mix_ipstore",
+ "MetricThreshold": "tma_info_inst_mix_ipstore < 8",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "Instructions per Software prefetch instruction (of any type: NTA/T0/T1/T2/Prefetch) (lower number means higher occurrence rate)",
+ "MetricExpr": "cpu_core@INST_RETIRED.ANY@ / cpu_core@SW_PREFETCH_ACCESS.T0\\,umask\\=0xF@",
+ "MetricGroup": "Prefetches",
+ "MetricName": "tma_info_inst_mix_ipswpf",
+ "MetricThreshold": "tma_info_inst_mix_ipswpf < 100",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "Instructions per taken branch",
+ "MetricExpr": "cpu_core@INST_RETIRED.ANY@ / cpu_core@BR_INST_RETIRED.NEAR_TAKEN@",
+ "MetricGroup": "Branches;Fed;FetchBW;Frontend;PGO;tma_issueFB",
+ "MetricName": "tma_info_inst_mix_iptb",
+ "MetricThreshold": "tma_info_inst_mix_iptb < 13",
+ "PublicDescription": "Instructions per taken branch. Related metrics: tma_dsb_switches, tma_fetch_bandwidth, tma_info_botlnk_l2_dsb_bandwidth, tma_info_botlnk_l2_dsb_misses, tma_info_frontend_dsb_coverage, tma_lcp",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "Average per-core data fill bandwidth to the L1 data cache [GB / sec]",
+ "MetricExpr": "tma_info_memory_l1d_cache_fill_bw",
+ "MetricGroup": "Mem;MemoryBW",
+ "MetricName": "tma_info_memory_core_l1d_cache_fill_bw_2t",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "Average per-core data fill bandwidth to the L2 cache [GB / sec]",
+ "MetricExpr": "tma_info_memory_l2_cache_fill_bw",
+ "MetricGroup": "Mem;MemoryBW",
+ "MetricName": "tma_info_memory_core_l2_cache_fill_bw_2t",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "Average per-core data access bandwidth to the L3 cache [GB / sec]",
+ "MetricExpr": "tma_info_memory_l3_cache_access_bw",
+ "MetricGroup": "Mem;MemoryBW;Offcore",
+ "MetricName": "tma_info_memory_core_l3_cache_access_bw_2t",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "Average per-core data fill bandwidth to the L3 cache [GB / sec]",
+ "MetricExpr": "tma_info_memory_l3_cache_fill_bw",
+ "MetricGroup": "Mem;MemoryBW",
+ "MetricName": "tma_info_memory_core_l3_cache_fill_bw_2t",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "Fill Buffer (FB) hits per kilo instructions for retired demand loads (L1D misses that merge into ongoing miss-handling entries)",
+ "MetricExpr": "1e3 * cpu_core@MEM_LOAD_RETIRED.FB_HIT@ / cpu_core@INST_RETIRED.ANY@",
+ "MetricGroup": "CacheHits;Mem",
+ "MetricName": "tma_info_memory_fb_hpki",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "Average per-thread data fill bandwidth to the L1 data cache [GB / sec]",
+ "MetricExpr": "64 * cpu_core@L1D.REPLACEMENT@ / 1e9 / duration_time",
+ "MetricGroup": "Mem;MemoryBW",
+ "MetricName": "tma_info_memory_l1d_cache_fill_bw",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "L1 cache true misses per kilo instruction for retired demand loads",
+ "MetricExpr": "1e3 * cpu_core@MEM_LOAD_RETIRED.L1_MISS@ / cpu_core@INST_RETIRED.ANY@",
+ "MetricGroup": "CacheHits;Mem",
+ "MetricName": "tma_info_memory_l1mpki",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "L1 cache true misses per kilo instruction for all demand loads (including speculative)",
+ "MetricExpr": "1e3 * cpu_core@L2_RQSTS.ALL_DEMAND_DATA_RD@ / cpu_core@INST_RETIRED.ANY@",
+ "MetricGroup": "CacheHits;Mem",
+ "MetricName": "tma_info_memory_l1mpki_load",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "Average per-thread data fill bandwidth to the L2 cache [GB / sec]",
+ "MetricExpr": "64 * cpu_core@L2_LINES_IN.ALL@ / 1e9 / duration_time",
+ "MetricGroup": "Mem;MemoryBW",
+ "MetricName": "tma_info_memory_l2_cache_fill_bw",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "L2 cache hits per kilo instruction for all request types (including speculative)",
+ "MetricExpr": "1e3 * (cpu_core@L2_RQSTS.REFERENCES@ - cpu_core@L2_RQSTS.MISS@) / cpu_core@INST_RETIRED.ANY@",
+ "MetricGroup": "CacheHits;Mem",
+ "MetricName": "tma_info_memory_l2hpki_all",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "L2 cache hits per kilo instruction for all demand loads (including speculative)",
+ "MetricExpr": "1e3 * cpu_core@L2_RQSTS.DEMAND_DATA_RD_HIT@ / cpu_core@INST_RETIRED.ANY@",
+ "MetricGroup": "CacheHits;Mem",
+ "MetricName": "tma_info_memory_l2hpki_load",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "L2 cache true misses per kilo instruction for retired demand loads",
+ "MetricExpr": "1e3 * cpu_core@MEM_LOAD_RETIRED.L2_MISS@ / cpu_core@INST_RETIRED.ANY@",
+ "MetricGroup": "Backend;CacheHits;Mem",
+ "MetricName": "tma_info_memory_l2mpki",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "L2 cache ([RKL+] true) misses per kilo instruction for all request types (including speculative)",
+ "MetricExpr": "1e3 * cpu_core@L2_RQSTS.MISS@ / cpu_core@INST_RETIRED.ANY@",
+ "MetricGroup": "CacheHits;Mem;Offcore",
+ "MetricName": "tma_info_memory_l2mpki_all",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "L2 cache ([RKL+] true) misses per kilo instruction for all demand loads (including speculative)",
+ "MetricExpr": "1e3 * cpu_core@L2_RQSTS.DEMAND_DATA_RD_MISS@ / cpu_core@INST_RETIRED.ANY@",
+ "MetricGroup": "CacheHits;Mem",
+ "MetricName": "tma_info_memory_l2mpki_load",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "Offcore requests (L2 cache miss) per kilo instruction for demand RFOs",
+ "MetricExpr": "1e3 * cpu_core@L2_RQSTS.RFO_MISS@ / cpu_core@INST_RETIRED.ANY@",
+ "MetricGroup": "CacheMisses;Offcore",
+ "MetricName": "tma_info_memory_l2mpki_rfo",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "Average per-thread data access bandwidth to the L3 cache [GB / sec]",
+ "MetricExpr": "64 * cpu_core@OFFCORE_REQUESTS.ALL_REQUESTS@ / 1e9 / duration_time",
+ "MetricGroup": "Mem;MemoryBW;Offcore",
+ "MetricName": "tma_info_memory_l3_cache_access_bw",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "Average per-thread data fill bandwidth to the L3 cache [GB / sec]",
+ "MetricExpr": "64 * cpu_core@LONGEST_LAT_CACHE.MISS@ / 1e9 / duration_time",
+ "MetricGroup": "Mem;MemoryBW",
+ "MetricName": "tma_info_memory_l3_cache_fill_bw",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "L3 cache true misses per kilo instruction for retired demand loads",
+ "MetricExpr": "1e3 * cpu_core@MEM_LOAD_RETIRED.L3_MISS@ / cpu_core@INST_RETIRED.ANY@",
+ "MetricGroup": "Mem",
+ "MetricName": "tma_info_memory_l3mpki",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "Average Parallel L2 cache miss data reads",
+ "MetricExpr": "cpu_core@OFFCORE_REQUESTS_OUTSTANDING.DATA_RD@ / cpu_core@OFFCORE_REQUESTS_OUTSTANDING.CYCLES_WITH_DATA_RD@",
+ "MetricGroup": "Memory_BW;Offcore",
+ "MetricName": "tma_info_memory_latency_data_l2_mlp",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "Average Latency for L2 cache miss demand Loads",
+ "MetricExpr": "cpu_core@OFFCORE_REQUESTS_OUTSTANDING.DEMAND_DATA_RD@ / cpu_core@OFFCORE_REQUESTS.DEMAND_DATA_RD@",
+ "MetricGroup": "Memory_Lat;Offcore",
+ "MetricName": "tma_info_memory_latency_load_l2_miss_latency",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "Average Parallel L2 cache miss demand Loads",
+ "MetricExpr": "cpu_core@OFFCORE_REQUESTS_OUTSTANDING.DEMAND_DATA_RD@ / cpu_core@OFFCORE_REQUESTS_OUTSTANDING.DEMAND_DATA_RD\\,cmask\\=1@",
+ "MetricGroup": "Memory_BW;Offcore",
+ "MetricName": "tma_info_memory_latency_load_l2_mlp",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "Average Latency for L3 cache miss demand Loads",
+ "MetricExpr": "cpu_core@OFFCORE_REQUESTS_OUTSTANDING.L3_MISS_DEMAND_DATA_RD@ / cpu_core@OFFCORE_REQUESTS.L3_MISS_DEMAND_DATA_RD@",
+ "MetricGroup": "Memory_Lat;Offcore",
+ "MetricName": "tma_info_memory_latency_load_l3_miss_latency",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "Actual Average Latency for L1 data-cache miss demand load operations (in core cycles)",
+ "MetricExpr": "cpu_core@L1D_PEND_MISS.PENDING@ / cpu_core@MEM_LOAD_COMPLETED.L1_MISS_ANY@",
+ "MetricGroup": "Mem;MemoryBound;MemoryLat",
+ "MetricName": "tma_info_memory_load_miss_real_latency",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "\"Bus lock\" per kilo instruction",
+ "MetricExpr": "1e3 * cpu_core@SQ_MISC.BUS_LOCK@ / cpu_core@INST_RETIRED.ANY@",
+ "MetricGroup": "Mem",
+ "MetricName": "tma_info_memory_mix_bus_lock_pki",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "Un-cacheable retired load per kilo instruction",
+ "MetricExpr": "1e3 * cpu_core@MEM_LOAD_MISC_RETIRED.UC@ / cpu_core@INST_RETIRED.ANY@",
+ "MetricGroup": "Mem",
+ "MetricName": "tma_info_memory_mix_uc_load_pki",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "Memory-Level-Parallelism (average number of L1 miss demand load when there is at least one such miss",
+ "MetricExpr": "cpu_core@L1D_PEND_MISS.PENDING@ / cpu_core@L1D_PEND_MISS.PENDING_CYCLES@",
+ "MetricGroup": "Mem;MemoryBW;MemoryBound",
+ "MetricName": "tma_info_memory_mlp",
+ "PublicDescription": "Memory-Level-Parallelism (average number of L1 miss demand load when there is at least one such miss. Per-Logical Processor)",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "STLB (2nd level TLB) code speculative misses per kilo instruction (misses of any page-size that complete the page walk)",
+ "MetricExpr": "1e3 * cpu_core@ITLB_MISSES.WALK_COMPLETED@ / cpu_core@INST_RETIRED.ANY@",
+ "MetricGroup": "Fed;MemoryTLB",
+ "MetricName": "tma_info_memory_tlb_code_stlb_mpki",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "STLB (2nd level TLB) data load speculative misses per kilo instruction (misses of any page-size that complete the page walk)",
+ "MetricExpr": "1e3 * cpu_core@DTLB_LOAD_MISSES.WALK_COMPLETED@ / cpu_core@INST_RETIRED.ANY@",
+ "MetricGroup": "Mem;MemoryTLB",
+ "MetricName": "tma_info_memory_tlb_load_stlb_mpki",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "Utilization of the core's Page Walker(s) serving STLB misses triggered by instruction/Load/Store accesses",
+ "MetricExpr": "(cpu_core@ITLB_MISSES.WALK_PENDING@ + cpu_core@DTLB_LOAD_MISSES.WALK_PENDING@ + cpu_core@DTLB_STORE_MISSES.WALK_PENDING@) / (4 * tma_info_core_core_clks)",
+ "MetricGroup": "Mem;MemoryTLB",
+ "MetricName": "tma_info_memory_tlb_page_walks_utilization",
+ "MetricThreshold": "tma_info_memory_tlb_page_walks_utilization > 0.5",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "STLB (2nd level TLB) data store speculative misses per kilo instruction (misses of any page-size that complete the page walk)",
+ "MetricExpr": "1e3 * cpu_core@DTLB_STORE_MISSES.WALK_COMPLETED@ / cpu_core@INST_RETIRED.ANY@",
+ "MetricGroup": "Mem;MemoryTLB",
+ "MetricName": "tma_info_memory_tlb_store_stlb_mpki",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "Instruction-Level-Parallelism (average number of uops executed when there is execution) per core",
+ "MetricExpr": "cpu_core@UOPS_EXECUTED.THREAD@ / (cpu_core@UOPS_EXECUTED.CORE_CYCLES_GE_1@ / 2 if #SMT_on else cpu_core@UOPS_EXECUTED.THREAD\\,cmask\\=1@)",
+ "MetricGroup": "Cor;Pipeline;PortsUtil;SMT",
+ "MetricName": "tma_info_pipeline_execute",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "Average number of uops fetched from DSB per cycle",
+ "MetricExpr": "cpu_core@IDQ.DSB_UOPS@ / cpu_core@IDQ.DSB_CYCLES_ANY@",
+ "MetricGroup": "Fed;FetchBW",
+ "MetricName": "tma_info_pipeline_fetch_dsb",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "Average number of uops fetched from LSD per cycle",
+ "MetricExpr": "cpu_core@LSD.UOPS@ / cpu_core@LSD.CYCLES_ACTIVE@",
+ "MetricGroup": "Fed;FetchBW",
+ "MetricName": "tma_info_pipeline_fetch_lsd",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "Average number of uops fetched from MITE per cycle",
+ "MetricExpr": "cpu_core@IDQ.MITE_UOPS@ / cpu_core@IDQ.MITE_CYCLES_ANY@",
+ "MetricGroup": "Fed;FetchBW",
+ "MetricName": "tma_info_pipeline_fetch_mite",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "Instructions per a microcode Assist invocation",
+ "MetricExpr": "cpu_core@INST_RETIRED.ANY@ / cpu_core@ASSISTS.ANY@",
+ "MetricGroup": "MicroSeq;Pipeline;Ret;Retire",
+ "MetricName": "tma_info_pipeline_ipassist",
+ "MetricThreshold": "tma_info_pipeline_ipassist < 100e3",
+ "PublicDescription": "Instructions per a microcode Assist invocation. See Assists tree node for details (lower number means higher occurrence rate)",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "Average number of Uops retired in cycles where at least one uop has retired.",
+ "MetricExpr": "tma_retiring * tma_info_thread_slots / cpu_core@UOPS_RETIRED.SLOTS\\,cmask\\=1@",
+ "MetricGroup": "Pipeline;Ret",
+ "MetricName": "tma_info_pipeline_retire",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "Estimated fraction of retirement-cycles dealing with repeat instructions",
+ "MetricExpr": "cpu_core@INST_RETIRED.REP_ITERATION@ / cpu_core@UOPS_RETIRED.SLOTS\\,cmask\\=1@",
+ "MetricGroup": "MicroSeq;Pipeline;Ret",
+ "MetricName": "tma_info_pipeline_strings_cycles",
+ "MetricThreshold": "tma_info_pipeline_strings_cycles > 0.1",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "Fraction of cycles the processor is waiting yet unhalted; covering legacy PAUSE instruction, as well as C0.1 / C0.2 power-performance optimized states",
+ "MetricExpr": "cpu_core@CPU_CLK_UNHALTED.C0_WAIT@ / tma_info_thread_clks",
+ "MetricGroup": "C0Wait",
+ "MetricName": "tma_info_system_c0_wait",
+ "MetricThreshold": "tma_info_system_c0_wait > 0.05",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "Measured Average Core Frequency for unhalted processors [GHz]",
+ "MetricExpr": "tma_info_system_turbo_utilization * TSC / 1e9 / duration_time",
+ "MetricGroup": "Power;Summary",
+ "MetricName": "tma_info_system_core_frequency",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "Average CPU Utilization (percentage)",
+ "MetricExpr": "tma_info_system_cpus_utilized / #num_cpus_online",
+ "MetricGroup": "HPC;Summary",
+ "MetricName": "tma_info_system_cpu_utilization",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "Average number of utilized CPUs",
+ "MetricExpr": "cpu_core@CPU_CLK_UNHALTED.REF_TSC@ / TSC",
+ "MetricGroup": "Summary",
+ "MetricName": "tma_info_system_cpus_utilized",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "Average external Memory Bandwidth Use for reads and writes [GB / sec]",
+ "MetricExpr": "64 * (UNC_HAC_ARB_TRK_REQUESTS.ALL + UNC_HAC_ARB_COH_TRK_REQUESTS.ALL) / 1e9 / duration_time",
+ "MetricGroup": "HPC;MemOffcore;MemoryBW;SoC;tma_issueBW",
+ "MetricName": "tma_info_system_dram_bw_use",
+ "PublicDescription": "Average external Memory Bandwidth Use for reads and writes [GB / sec]. Related metrics: tma_fb_full, tma_info_bottleneck_cache_memory_bandwidth, tma_mem_bandwidth, tma_sq_full",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "Giga Floating Point Operations Per Second",
+ "MetricExpr": "(cpu_core@FP_ARITH_INST_RETIRED.SCALAR@ + 2 * cpu_core@FP_ARITH_INST_RETIRED.128B_PACKED_DOUBLE@ + 4 * cpu_core@FP_ARITH_INST_RETIRED.4_FLOPS@ + 8 * cpu_core@FP_ARITH_INST_RETIRED.256B_PACKED_SINGLE@) / 1e9 / duration_time",
+ "MetricGroup": "Cor;Flops;HPC",
+ "MetricName": "tma_info_system_gflops",
+ "PublicDescription": "Giga Floating Point Operations Per Second. Aggregate across all supported options of: FP precisions, scalar and vector instructions, vector-width",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "Instructions per Far Branch ( Far Branches apply upon transition from application to operating system, handling interrupts, exceptions) [lower number means higher occurrence rate]",
+ "MetricExpr": "cpu_core@INST_RETIRED.ANY@ / cpu_core@BR_INST_RETIRED.FAR_BRANCH@u",
+ "MetricGroup": "Branches;OS",
+ "MetricName": "tma_info_system_ipfarbranch",
+ "MetricThreshold": "tma_info_system_ipfarbranch < 1e6",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "Cycles Per Instruction for the Operating System (OS) Kernel mode",
+ "MetricExpr": "cpu_core@CPU_CLK_UNHALTED.THREAD_P@k / cpu_core@INST_RETIRED.ANY_P@k",
+ "MetricGroup": "OS",
+ "MetricName": "tma_info_system_kernel_cpi",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "Fraction of cycles spent in the Operating System (OS) Kernel mode",
+ "MetricExpr": "cpu_core@CPU_CLK_UNHALTED.THREAD_P@k / cpu_core@CPU_CLK_UNHALTED.THREAD@",
+ "MetricGroup": "OS",
+ "MetricName": "tma_info_system_kernel_utilization",
+ "MetricThreshold": "tma_info_system_kernel_utilization > 0.05",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "Average number of parallel data read requests to external memory",
+ "MetricExpr": "UNC_ARB_DAT_OCCUPANCY.RD / UNC_ARB_DAT_OCCUPANCY.RD@cmask\\=1@",
+ "MetricGroup": "Mem;MemoryBW;SoC",
+ "MetricName": "tma_info_system_mem_parallel_reads",
+ "PublicDescription": "Average number of parallel data read requests to external memory. Accounts for demand loads and L1/L2 prefetches",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "Fraction of cycles where both hardware Logical Processors were active",
+ "MetricExpr": "(1 - cpu_core@CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE@ / cpu_core@CPU_CLK_UNHALTED.REF_DISTRIBUTED@ if #SMT_on else 0)",
+ "MetricGroup": "SMT",
+ "MetricName": "tma_info_system_smt_2t_utilization",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "Socket actual clocks when any core is active on that socket",
+ "MetricExpr": "UNC_CLOCK.SOCKET",
+ "MetricGroup": "SoC",
+ "MetricName": "tma_info_system_socket_clks",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "Average Frequency Utilization relative nominal frequency",
+ "MetricExpr": "tma_info_thread_clks / cpu_core@CPU_CLK_UNHALTED.REF_TSC@",
+ "MetricGroup": "Power",
+ "MetricName": "tma_info_system_turbo_utilization",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "Per-Logical Processor actual clocks when the Logical Processor is active.",
+ "MetricExpr": "cpu_core@CPU_CLK_UNHALTED.THREAD@",
+ "MetricGroup": "Pipeline",
+ "MetricName": "tma_info_thread_clks",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "Cycles Per Instruction (per Logical Processor)",
+ "MetricExpr": "1 / tma_info_thread_ipc",
+ "MetricGroup": "Mem;Pipeline",
+ "MetricName": "tma_info_thread_cpi",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "The ratio of Executed- by Issued-Uops",
+ "MetricExpr": "cpu_core@UOPS_EXECUTED.THREAD@ / cpu_core@UOPS_ISSUED.ANY@",
+ "MetricGroup": "Cor;Pipeline",
+ "MetricName": "tma_info_thread_execute_per_issue",
+ "PublicDescription": "The ratio of Executed- by Issued-Uops. Ratio > 1 suggests high rate of uop micro-fusions. Ratio < 1 suggest high rate of \"execute\" at rename stage.",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "Instructions Per Cycle (per Logical Processor)",
+ "MetricExpr": "cpu_core@INST_RETIRED.ANY@ / tma_info_thread_clks",
+ "MetricGroup": "Ret;Summary",
+ "MetricName": "tma_info_thread_ipc",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "Total issue-pipeline slots (per-Physical Core till ICL; per-Logical Processor ICL onward)",
+ "MetricExpr": "cpu_core@TOPDOWN.SLOTS@",
+ "MetricGroup": "TmaL1;tma_L1_group",
+ "MetricName": "tma_info_thread_slots",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "Fraction of Physical Core issue-slots utilized by this Logical Processor",
+ "MetricExpr": "(tma_info_thread_slots / (cpu_core@TOPDOWN.SLOTS@ / 2) if #SMT_on else 1)",
+ "MetricGroup": "SMT;TmaL1;tma_L1_group",
+ "MetricName": "tma_info_thread_slots_utilization",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "Uops Per Instruction",
+ "MetricExpr": "tma_retiring * tma_info_thread_slots / cpu_core@INST_RETIRED.ANY@",
+ "MetricGroup": "Pipeline;Ret;Retire",
+ "MetricName": "tma_info_thread_uoppi",
+ "MetricThreshold": "tma_info_thread_uoppi > 1.05",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "Uops per taken branch",
+ "MetricExpr": "tma_retiring * tma_info_thread_slots / cpu_core@BR_INST_RETIRED.NEAR_TAKEN@",
+ "MetricGroup": "Branches;Fed;FetchBW",
+ "MetricName": "tma_info_thread_uptb",
+ "MetricThreshold": "tma_info_thread_uptb < 9",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "This metric represents overall Integer (Int) select operations fraction the CPU has executed (retired)",
+ "MetricExpr": "tma_int_vector_128b + tma_int_vector_256b",
+ "MetricGroup": "Pipeline;TopdownL3;tma_L3_group;tma_light_operations_group",
+ "MetricName": "tma_int_operations",
+ "MetricThreshold": "tma_int_operations > 0.1 & tma_light_operations > 0.6",
+ "PublicDescription": "This metric represents overall Integer (Int) select operations fraction the CPU has executed (retired). Vector/Matrix Int operations and shuffles are counted. Note this metric's value may exceed its parent due to use of \"Uops\" CountDomain.",
+ "ScaleUnit": "100%",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "This metric represents 128-bit vector Integer ADD/SUB/SAD or VNNI (Vector Neural Network Instructions) uops fraction the CPU has retired",
+ "MetricExpr": "(cpu_core@INT_VEC_RETIRED.ADD_128@ + cpu_core@INT_VEC_RETIRED.VNNI_128@) / (tma_retiring * tma_info_thread_slots)",
+ "MetricGroup": "Compute;IntVector;Pipeline;TopdownL4;tma_L4_group;tma_int_operations_group;tma_issue2P",
+ "MetricName": "tma_int_vector_128b",
+ "MetricThreshold": "tma_int_vector_128b > 0.1 & (tma_int_operations > 0.1 & tma_light_operations > 0.6)",
+ "PublicDescription": "This metric represents 128-bit vector Integer ADD/SUB/SAD or VNNI (Vector Neural Network Instructions) uops fraction the CPU has retired. Related metrics: tma_fp_scalar, tma_fp_vector, tma_fp_vector_128b, tma_fp_vector_256b, tma_fp_vector_512b, tma_int_vector_256b, tma_port_0, tma_port_1, tma_port_5, tma_port_6, tma_ports_utilized_2",
+ "ScaleUnit": "100%",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "This metric represents 256-bit vector Integer ADD/SUB/SAD/MUL or VNNI (Vector Neural Network Instructions) uops fraction the CPU has retired",
+ "MetricExpr": "(cpu_core@INT_VEC_RETIRED.ADD_256@ + cpu_core@INT_VEC_RETIRED.MUL_256@ + cpu_core@INT_VEC_RETIRED.VNNI_256@) / (tma_retiring * tma_info_thread_slots)",
+ "MetricGroup": "Compute;IntVector;Pipeline;TopdownL4;tma_L4_group;tma_int_operations_group;tma_issue2P",
+ "MetricName": "tma_int_vector_256b",
+ "MetricThreshold": "tma_int_vector_256b > 0.1 & (tma_int_operations > 0.1 & tma_light_operations > 0.6)",
+ "PublicDescription": "This metric represents 256-bit vector Integer ADD/SUB/SAD/MUL or VNNI (Vector Neural Network Instructions) uops fraction the CPU has retired. Related metrics: tma_fp_scalar, tma_fp_vector, tma_fp_vector_128b, tma_fp_vector_256b, tma_fp_vector_512b, tma_int_vector_128b, tma_port_0, tma_port_1, tma_port_5, tma_port_6, tma_ports_utilized_2",
+ "ScaleUnit": "100%",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "This metric represents fraction of cycles the CPU was stalled due to Instruction TLB (ITLB) misses",
+ "MetricExpr": "cpu_core@ICACHE_TAG.STALLS@ / tma_info_thread_clks",
+ "MetricGroup": "BigFootprint;BvBC;FetchLat;MemoryTLB;TopdownL3;tma_L3_group;tma_fetch_latency_group",
+ "MetricName": "tma_itlb_misses",
+ "MetricThreshold": "tma_itlb_misses > 0.05 & (tma_fetch_latency > 0.1 & tma_frontend_bound > 0.15)",
+ "PublicDescription": "This metric represents fraction of cycles the CPU was stalled due to Instruction TLB (ITLB) misses. Sample with: FRONTEND_RETIRED.STLB_MISS_PS;FRONTEND_RETIRED.ITLB_MISS_PS",
+ "ScaleUnit": "100%",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "This metric estimates how often the CPU was stalled without loads missing the L1 data cache",
+ "MetricExpr": "max((cpu_core@EXE_ACTIVITY.BOUND_ON_LOADS@ - cpu_core@MEMORY_ACTIVITY.STALLS_L1D_MISS@) / tma_info_thread_clks, 0)",
+ "MetricGroup": "CacheHits;MemoryBound;TmaL3mem;TopdownL3;tma_L3_group;tma_issueL1;tma_issueMC;tma_memory_bound_group",
+ "MetricName": "tma_l1_bound",
+ "MetricThreshold": "tma_l1_bound > 0.1 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2)",
+ "PublicDescription": "This metric estimates how often the CPU was stalled without loads missing the L1 data cache. The L1 data cache typically has the shortest latency. However; in certain cases like loads blocked on older stores; a load might suffer due to high latency even though it is being satisfied by the L1. Another example is loads who miss in the TLB. These cases are characterized by execution unit stalls; while some non-completed demand load lives in the machine without having that demand load missing the L1 cache. Sample with: MEM_LOAD_RETIRED.L1_HIT_PS;MEM_LOAD_RETIRED.FB_HIT_PS. Related metrics: tma_clears_resteers, tma_machine_clears, tma_microcode_sequencer, tma_ms_switches, tma_ports_utilized_1",
+ "ScaleUnit": "100%",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "This metric roughly estimates fraction of cycles with demand load accesses that hit the L1 cache",
+ "MetricExpr": "min(2 * (cpu_core@MEM_INST_RETIRED.ALL_LOADS@ - cpu_core@MEM_LOAD_RETIRED.FB_HIT@ - cpu_core@MEM_LOAD_RETIRED.L1_MISS@) * 20 / 100, max(cpu_core@CYCLE_ACTIVITY.CYCLES_MEM_ANY@ - cpu_core@MEMORY_ACTIVITY.CYCLES_L1D_MISS@, 0)) / tma_info_thread_clks",
+ "MetricGroup": "BvML;MemoryLat;TopdownL4;tma_L4_group;tma_l1_bound_group",
+ "MetricName": "tma_l1_hit_latency",
+ "MetricThreshold": "tma_l1_hit_latency > 0.1 & (tma_l1_bound > 0.1 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2))",
+ "PublicDescription": "This metric roughly estimates fraction of cycles with demand load accesses that hit the L1 cache. The short latency of the L1 data cache may be exposed in pointer-chasing memory access patterns as an example. Sample with: MEM_LOAD_RETIRED.L1_HIT",
+ "ScaleUnit": "100%",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "This metric estimates how often the CPU was stalled due to L2 cache accesses by loads",
+ "MetricExpr": "(cpu_core@MEMORY_ACTIVITY.STALLS_L1D_MISS@ - cpu_core@MEMORY_ACTIVITY.STALLS_L2_MISS@) / tma_info_thread_clks",
+ "MetricGroup": "BvML;CacheHits;MemoryBound;TmaL3mem;TopdownL3;tma_L3_group;tma_memory_bound_group",
+ "MetricName": "tma_l2_bound",
+ "MetricThreshold": "tma_l2_bound > 0.05 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2)",
+ "PublicDescription": "This metric estimates how often the CPU was stalled due to L2 cache accesses by loads. Avoiding cache misses (i.e. L1 misses/L2 hits) can improve the latency and increase performance. Sample with: MEM_LOAD_RETIRED.L2_HIT_PS",
+ "ScaleUnit": "100%",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "This metric estimates how often the CPU was stalled due to loads accesses to L3 cache or contended with a sibling Core",
+ "MetricExpr": "(cpu_core@MEMORY_ACTIVITY.STALLS_L2_MISS@ - cpu_core@MEMORY_ACTIVITY.STALLS_L3_MISS@) / tma_info_thread_clks",
+ "MetricGroup": "CacheHits;MemoryBound;TmaL3mem;TopdownL3;tma_L3_group;tma_memory_bound_group",
+ "MetricName": "tma_l3_bound",
+ "MetricThreshold": "tma_l3_bound > 0.05 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2)",
+ "PublicDescription": "This metric estimates how often the CPU was stalled due to loads accesses to L3 cache or contended with a sibling Core. Avoiding cache misses (i.e. L2 misses/L3 hits) can improve the latency and increase performance. Sample with: MEM_LOAD_RETIRED.L3_HIT_PS",
+ "ScaleUnit": "100%",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "This metric estimates fraction of cycles with demand load accesses that hit the L3 cache under unloaded scenarios (possibly L3 latency limited)",
+ "MetricExpr": "cpu_core@MEM_LOAD_RETIRED.L3_HIT@ * min(cpu_core@MEM_LOAD_RETIRED.L3_HIT@R, 9 * tma_info_system_core_frequency) * (1 + cpu_core@MEM_LOAD_RETIRED.FB_HIT@ / cpu_core@MEM_LOAD_RETIRED.L1_MISS@ / 2) / tma_info_thread_clks",
+ "MetricGroup": "BvML;MemoryLat;TopdownL4;tma_L4_group;tma_issueLat;tma_l3_bound_group",
+ "MetricName": "tma_l3_hit_latency",
+ "MetricThreshold": "tma_l3_hit_latency > 0.1 & (tma_l3_bound > 0.05 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2))",
+ "PublicDescription": "This metric estimates fraction of cycles with demand load accesses that hit the L3 cache under unloaded scenarios (possibly L3 latency limited). Avoiding private cache misses (i.e. L2 misses/L3 hits) will improve the latency; reduce contention with sibling physical cores and increase performance. Note the value of this node may overlap with its siblings. Sample with: MEM_LOAD_RETIRED.L3_HIT_PS. Related metrics: tma_info_bottleneck_cache_memory_latency, tma_mem_latency",
+ "ScaleUnit": "100%",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "This metric represents fraction of cycles CPU was stalled due to Length Changing Prefixes (LCPs)",
+ "MetricExpr": "cpu_core@DECODE.LCP@ / tma_info_thread_clks",
+ "MetricGroup": "FetchLat;TopdownL3;tma_L3_group;tma_fetch_latency_group;tma_issueFB",
+ "MetricName": "tma_lcp",
+ "MetricThreshold": "tma_lcp > 0.05 & (tma_fetch_latency > 0.1 & tma_frontend_bound > 0.15)",
+ "PublicDescription": "This metric represents fraction of cycles CPU was stalled due to Length Changing Prefixes (LCPs). Using proper compiler flags or Intel Compiler by default will certainly avoid this. #Link: Optimization Guide about LCP BKMs. Related metrics: tma_dsb_switches, tma_fetch_bandwidth, tma_info_botlnk_l2_dsb_bandwidth, tma_info_botlnk_l2_dsb_misses, tma_info_frontend_dsb_coverage, tma_info_inst_mix_iptb",
+ "ScaleUnit": "100%",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "This metric represents fraction of slots where the CPU was retiring light-weight operations -- instructions that require no more than one uop (micro-operation)",
+ "MetricExpr": "max(0, tma_retiring - tma_heavy_operations)",
+ "MetricGroup": "Retire;TmaL2;TopdownL2;tma_L2_group;tma_retiring_group",
+ "MetricName": "tma_light_operations",
+ "MetricThreshold": "tma_light_operations > 0.6",
+ "MetricgroupNoGroup": "TopdownL2",
+ "PublicDescription": "This metric represents fraction of slots where the CPU was retiring light-weight operations -- instructions that require no more than one uop (micro-operation). This correlates with total number of instructions used by the program. A uops-per-instruction (see UopPI metric) ratio of 1 or less should be expected for decently optimized code running on Intel Core/Xeon products. While this often indicates efficient X86 instructions were executed; high value does not necessarily mean better performance cannot be achieved. ([ICL+] Note this may undercount due to approximation using indirect events; [ADL+] .). Sample with: INST_RETIRED.PREC_DIST",
+ "ScaleUnit": "100%",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "This metric represents Core fraction of cycles CPU dispatched uops on execution port for Load operations",
+ "MetricExpr": "cpu_core@UOPS_DISPATCHED.PORT_2_3_10@ / (3 * tma_info_core_core_clks)",
+ "MetricGroup": "TopdownL5;tma_L5_group;tma_ports_utilized_3m_group",
+ "MetricName": "tma_load_op_utilization",
+ "MetricThreshold": "tma_load_op_utilization > 0.6",
+ "PublicDescription": "This metric represents Core fraction of cycles CPU dispatched uops on execution port for Load operations. Sample with: UOPS_DISPATCHED.PORT_2_3_10",
+ "ScaleUnit": "100%",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "This metric roughly estimates the fraction of cycles where the (first level) DTLB was missed by load accesses, that later on hit in second-level TLB (STLB)",
+ "MetricExpr": "max(0, tma_dtlb_load - tma_load_stlb_miss)",
+ "MetricGroup": "MemoryTLB;TopdownL5;tma_L5_group;tma_dtlb_load_group",
+ "MetricName": "tma_load_stlb_hit",
+ "MetricThreshold": "tma_load_stlb_hit > 0.05 & (tma_dtlb_load > 0.1 & (tma_l1_bound > 0.1 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2)))",
+ "ScaleUnit": "100%",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "This metric estimates the fraction of cycles where the Second-level TLB (STLB) was missed by load accesses, performing a hardware page walk",
+ "MetricExpr": "cpu_core@DTLB_LOAD_MISSES.WALK_ACTIVE@ / tma_info_thread_clks",
+ "MetricGroup": "MemoryTLB;TopdownL5;tma_L5_group;tma_dtlb_load_group",
+ "MetricName": "tma_load_stlb_miss",
+ "MetricThreshold": "tma_load_stlb_miss > 0.05 & (tma_dtlb_load > 0.1 & (tma_l1_bound > 0.1 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2)))",
+ "ScaleUnit": "100%",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "This metric represents fraction of cycles the CPU spent handling cache misses due to lock operations",
+ "MetricExpr": "cpu_core@MEM_INST_RETIRED.LOCK_LOADS@ * cpu_core@MEM_INST_RETIRED.LOCK_LOADS@R / tma_info_thread_clks",
+ "MetricGroup": "Offcore;TopdownL4;tma_L4_group;tma_issueRFO;tma_l1_bound_group",
+ "MetricName": "tma_lock_latency",
+ "MetricThreshold": "tma_lock_latency > 0.2 & (tma_l1_bound > 0.1 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2))",
+ "PublicDescription": "This metric represents fraction of cycles the CPU spent handling cache misses due to lock operations. Due to the microarchitecture handling of locks; they are classified as L1_Bound regardless of what memory source satisfied them. Sample with: MEM_INST_RETIRED.LOCK_LOADS. Related metrics: tma_store_latency",
+ "ScaleUnit": "100%",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "This metric represents Core fraction of cycles in which CPU was likely limited due to LSD (Loop Stream Detector) unit",
+ "MetricExpr": "(cpu_core@LSD.CYCLES_ACTIVE@ - cpu_core@LSD.CYCLES_OK@) / tma_info_core_core_clks / 2",
+ "MetricGroup": "FetchBW;LSD;TopdownL3;tma_L3_group;tma_fetch_bandwidth_group",
+ "MetricName": "tma_lsd",
+ "MetricThreshold": "tma_lsd > 0.15 & tma_fetch_bandwidth > 0.2",
+ "PublicDescription": "This metric represents Core fraction of cycles in which CPU was likely limited due to LSD (Loop Stream Detector) unit. LSD typically does well sustaining Uop supply. However; in some rare cases; optimal uop-delivery could not be reached for small loops whose size (in terms of number of uops) does not suit well the LSD structure.",
+ "ScaleUnit": "100%",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "This metric represents fraction of slots the CPU has wasted due to Machine Clears",
+ "MetricExpr": "max(0, tma_bad_speculation - tma_branch_mispredicts)",
+ "MetricGroup": "BadSpec;BvMS;MachineClears;TmaL2;TopdownL2;tma_L2_group;tma_bad_speculation_group;tma_issueMC;tma_issueSyncxn",
+ "MetricName": "tma_machine_clears",
+ "MetricThreshold": "tma_machine_clears > 0.1 & tma_bad_speculation > 0.15",
+ "MetricgroupNoGroup": "TopdownL2",
+ "PublicDescription": "This metric represents fraction of slots the CPU has wasted due to Machine Clears. These slots are either wasted by uops fetched prior to the clear; or stalls the out-of-order portion of the machine needs to recover its state after the clear. For example; this can happen due to memory ordering Nukes (e.g. Memory Disambiguation) or Self-Modifying-Code (SMC) nukes. Sample with: MACHINE_CLEARS.COUNT. Related metrics: tma_clears_resteers, tma_contested_accesses, tma_data_sharing, tma_false_sharing, tma_l1_bound, tma_microcode_sequencer, tma_ms_switches, tma_remote_cache",
+ "ScaleUnit": "100%",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "This metric estimates fraction of cycles where the core's performance was likely hurt due to approaching bandwidth limits of external memory - DRAM ([SPR-HBM] and/or HBM)",
+ "MetricExpr": "min(cpu_core@CPU_CLK_UNHALTED.THREAD@, cpu_core@OFFCORE_REQUESTS_OUTSTANDING.DATA_RD\\,cmask\\=4@) / tma_info_thread_clks",
+ "MetricGroup": "BvMS;MemoryBW;Offcore;TopdownL4;tma_L4_group;tma_dram_bound_group;tma_issueBW",
+ "MetricName": "tma_mem_bandwidth",
+ "MetricThreshold": "tma_mem_bandwidth > 0.2 & (tma_dram_bound > 0.1 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2))",
+ "PublicDescription": "This metric estimates fraction of cycles where the core's performance was likely hurt due to approaching bandwidth limits of external memory - DRAM ([SPR-HBM] and/or HBM). The underlying heuristic assumes that a similar off-core traffic is generated by all IA cores. This metric does not aggregate non-data-read requests by this logical processor; requests from other IA Logical Processors/Physical Cores/sockets; or other non-IA devices like GPU; hence the maximum external memory bandwidth limits may or may not be approached when this metric is flagged (see Uncore counters for that). Related metrics: tma_fb_full, tma_info_bottleneck_cache_memory_bandwidth, tma_info_system_dram_bw_use, tma_sq_full",
+ "ScaleUnit": "100%",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "This metric estimates fraction of cycles where the performance was likely hurt due to latency from external memory - DRAM ([SPR-HBM] and/or HBM)",
+ "MetricExpr": "min(cpu_core@CPU_CLK_UNHALTED.THREAD@, cpu_core@OFFCORE_REQUESTS_OUTSTANDING.CYCLES_WITH_DATA_RD@) / tma_info_thread_clks - tma_mem_bandwidth",
+ "MetricGroup": "BvML;MemoryLat;Offcore;TopdownL4;tma_L4_group;tma_dram_bound_group;tma_issueLat",
+ "MetricName": "tma_mem_latency",
+ "MetricThreshold": "tma_mem_latency > 0.1 & (tma_dram_bound > 0.1 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2))",
+ "PublicDescription": "This metric estimates fraction of cycles where the performance was likely hurt due to latency from external memory - DRAM ([SPR-HBM] and/or HBM). This metric does not aggregate requests from other Logical Processors/Physical Cores/sockets (see Uncore counters for that). Related metrics: tma_info_bottleneck_cache_memory_latency, tma_l3_hit_latency",
+ "ScaleUnit": "100%",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "This metric represents fraction of slots the Memory subsystem within the Backend was a bottleneck",
+ "MetricExpr": "cpu_core@topdown\\-mem\\-bound@ / (cpu_core@topdown\\-fe\\-bound@ + cpu_core@topdown\\-bad\\-spec@ + cpu_core@topdown\\-retiring@ + cpu_core@topdown\\-be\\-bound@) + 0 * tma_info_thread_slots",
+ "MetricGroup": "Backend;TmaL2;TopdownL2;tma_L2_group;tma_backend_bound_group",
+ "MetricName": "tma_memory_bound",
+ "MetricThreshold": "tma_memory_bound > 0.2 & tma_backend_bound > 0.2",
+ "MetricgroupNoGroup": "TopdownL2",
+ "PublicDescription": "This metric represents fraction of slots the Memory subsystem within the Backend was a bottleneck. Memory Bound estimates fraction of slots where pipeline is likely stalled due to demand load or store instructions. This accounts mainly for (1) non-completed in-flight memory demand loads which coincides with execution units starvation; in addition to (2) cases where stores could impose backpressure on the pipeline when many of them get buffered at the same time (less common out of the two).",
+ "ScaleUnit": "100%",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "This metric represents fraction of cycles the CPU was stalled due to LFENCE Instructions.",
+ "MetricConstraint": "NO_GROUP_EVENTS_NMI",
+ "MetricExpr": "13 * cpu_core@MISC2_RETIRED.LFENCE@ / tma_info_thread_clks",
+ "MetricGroup": "TopdownL4;tma_L4_group;tma_serializing_operation_group",
+ "MetricName": "tma_memory_fence",
+ "MetricThreshold": "tma_memory_fence > 0.05 & (tma_serializing_operation > 0.1 & (tma_core_bound > 0.1 & tma_backend_bound > 0.2))",
+ "ScaleUnit": "100%",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "This metric represents fraction of slots where the CPU was retiring memory operations -- uops for memory load or store accesses.",
+ "MetricExpr": "tma_light_operations * cpu_core@MEM_UOP_RETIRED.ANY@ / (tma_retiring * tma_info_thread_slots)",
+ "MetricGroup": "Pipeline;TopdownL3;tma_L3_group;tma_light_operations_group",
+ "MetricName": "tma_memory_operations",
+ "MetricThreshold": "tma_memory_operations > 0.1 & tma_light_operations > 0.6",
+ "ScaleUnit": "100%",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "This metric represents fraction of slots the CPU was retiring uops fetched by the Microcode Sequencer (MS) unit",
+ "MetricExpr": "cpu_core@UOPS_RETIRED.MS@ / tma_info_thread_slots",
+ "MetricGroup": "MicroSeq;TopdownL3;tma_L3_group;tma_heavy_operations_group;tma_issueMC;tma_issueMS",
+ "MetricName": "tma_microcode_sequencer",
+ "MetricThreshold": "tma_microcode_sequencer > 0.05 & tma_heavy_operations > 0.1",
+ "PublicDescription": "This metric represents fraction of slots the CPU was retiring uops fetched by the Microcode Sequencer (MS) unit. The MS is used for CISC instructions not supported by the default decoders (like repeat move strings; or CPUID); or by microcode assists used to address some operation modes (like in Floating Point assists). These cases can often be avoided. Sample with: UOPS_RETIRED.MS. Related metrics: tma_clears_resteers, tma_info_bottleneck_irregular_overhead, tma_l1_bound, tma_machine_clears, tma_ms_switches",
+ "ScaleUnit": "100%",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "This metric represents fraction of cycles the CPU was stalled due to Branch Resteers as a result of Branch Misprediction at execution stage",
+ "MetricExpr": "tma_branch_mispredicts / tma_bad_speculation * cpu_core@INT_MISC.CLEAR_RESTEER_CYCLES@ / tma_info_thread_clks",
+ "MetricGroup": "BadSpec;BrMispredicts;BvMP;TopdownL4;tma_L4_group;tma_branch_resteers_group;tma_issueBM",
+ "MetricName": "tma_mispredicts_resteers",
+ "MetricThreshold": "tma_mispredicts_resteers > 0.05 & (tma_branch_resteers > 0.05 & (tma_fetch_latency > 0.1 & tma_frontend_bound > 0.15))",
+ "PublicDescription": "This metric represents fraction of cycles the CPU was stalled due to Branch Resteers as a result of Branch Misprediction at execution stage. Sample with: INT_MISC.CLEAR_RESTEER_CYCLES. Related metrics: tma_branch_mispredicts, tma_info_bad_spec_branch_misprediction_cost, tma_info_bottleneck_mispredictions",
+ "ScaleUnit": "100%",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "This metric represents Core fraction of cycles in which CPU was likely limited due to the MITE pipeline (the legacy decode pipeline)",
+ "MetricExpr": "(cpu_core@IDQ.MITE_CYCLES_ANY@ - cpu_core@IDQ.MITE_CYCLES_OK@) / tma_info_core_core_clks / 2",
+ "MetricGroup": "DSBmiss;FetchBW;TopdownL3;tma_L3_group;tma_fetch_bandwidth_group",
+ "MetricName": "tma_mite",
+ "MetricThreshold": "tma_mite > 0.1 & tma_fetch_bandwidth > 0.2",
+ "PublicDescription": "This metric represents Core fraction of cycles in which CPU was likely limited due to the MITE pipeline (the legacy decode pipeline). This pipeline is used for code that was not pre-cached in the DSB or LSD. For example; inefficiencies due to asymmetric decoders; use of long immediate or LCP can manifest as MITE fetch bandwidth bottleneck. Sample with: FRONTEND_RETIRED.ANY_DSB_MISS",
+ "ScaleUnit": "100%",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "This metric estimates penalty in terms of percentage of([SKL+] injected blend uops out of all Uops Issued -- the Count Domain; [ADL+] cycles)",
+ "MetricExpr": "160 * cpu_core@ASSISTS.SSE_AVX_MIX@ / tma_info_thread_clks",
+ "MetricGroup": "TopdownL5;tma_L5_group;tma_issueMV;tma_ports_utilized_0_group",
+ "MetricName": "tma_mixing_vectors",
+ "MetricThreshold": "tma_mixing_vectors > 0.05",
+ "PublicDescription": "This metric estimates penalty in terms of percentage of([SKL+] injected blend uops out of all Uops Issued -- the Count Domain; [ADL+] cycles). Usually a Mixing_Vectors over 5% is worth investigating. Read more in Appendix B1 of the Optimizations Guide for this topic. Related metrics: tma_ms_switches",
+ "ScaleUnit": "100%",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "This metric estimates the fraction of cycles when the CPU was stalled due to switches of uop delivery to the Microcode Sequencer (MS)",
+ "MetricExpr": "3 * cpu_core@UOPS_RETIRED.MS\\,cmask\\=1\\,edge@ / (cpu_core@UOPS_RETIRED.SLOTS@ / cpu_core@UOPS_ISSUED.ANY@) / tma_info_thread_clks",
+ "MetricGroup": "FetchLat;MicroSeq;TopdownL3;tma_L3_group;tma_fetch_latency_group;tma_issueMC;tma_issueMS;tma_issueMV;tma_issueSO",
+ "MetricName": "tma_ms_switches",
+ "MetricThreshold": "tma_ms_switches > 0.05 & (tma_fetch_latency > 0.1 & tma_frontend_bound > 0.15)",
+ "PublicDescription": "This metric estimates the fraction of cycles when the CPU was stalled due to switches of uop delivery to the Microcode Sequencer (MS). Commonly used instructions are optimized for delivery by the DSB (decoded i-cache) or MITE (legacy instruction decode) pipelines. Certain operations cannot be handled natively by the execution pipeline; and must be performed by microcode (small programs injected into the execution stream). Switching to the MS too often can negatively impact performance. The MS is designated to deliver long uop flows required by CISC instructions like CPUID; or uncommon conditions like Floating Point Assists when dealing with Denormals. Sample with: FRONTEND_RETIRED.MS_FLOWS. Related metrics: tma_clears_resteers, tma_info_bottleneck_irregular_overhead, tma_l1_bound, tma_machine_clears, tma_microcode_sequencer, tma_mixing_vectors, tma_serializing_operation",
+ "ScaleUnit": "100%",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "This metric represents fraction of slots where the CPU was retiring branch instructions that were not fused",
+ "MetricExpr": "tma_light_operations * (cpu_core@BR_INST_RETIRED.ALL_BRANCHES@ - cpu_core@INST_RETIRED.MACRO_FUSED@) / (tma_retiring * tma_info_thread_slots)",
+ "MetricGroup": "Branches;BvBO;Pipeline;TopdownL3;tma_L3_group;tma_light_operations_group",
+ "MetricName": "tma_non_fused_branches",
+ "MetricThreshold": "tma_non_fused_branches > 0.1 & tma_light_operations > 0.6",
+ "PublicDescription": "This metric represents fraction of slots where the CPU was retiring branch instructions that were not fused. Non-conditional branches like direct JMP or CALL would count here. Can be used to examine fusible conditional jumps that were not fused.",
+ "ScaleUnit": "100%",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "This metric represents fraction of slots where the CPU was retiring NOP (no op) instructions",
+ "MetricExpr": "tma_light_operations * cpu_core@INST_RETIRED.NOP@ / (tma_retiring * tma_info_thread_slots)",
+ "MetricGroup": "BvBO;Pipeline;TopdownL4;tma_L4_group;tma_other_light_ops_group",
+ "MetricName": "tma_nop_instructions",
+ "MetricThreshold": "tma_nop_instructions > 0.1 & (tma_other_light_ops > 0.3 & tma_light_operations > 0.6)",
+ "PublicDescription": "This metric represents fraction of slots where the CPU was retiring NOP (no op) instructions. Compilers often use NOPs for certain address alignments - e.g. start address of a function or loop body. Sample with: INST_RETIRED.NOP",
+ "ScaleUnit": "100%",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "This metric represents the remaining light uops fraction the CPU has executed - remaining means not covered by other sibling nodes",
+ "MetricExpr": "max(0, tma_light_operations - (tma_fp_arith + tma_int_operations + tma_memory_operations + tma_fused_instructions + tma_non_fused_branches))",
+ "MetricGroup": "Pipeline;TopdownL3;tma_L3_group;tma_light_operations_group",
+ "MetricName": "tma_other_light_ops",
+ "MetricThreshold": "tma_other_light_ops > 0.3 & tma_light_operations > 0.6",
+ "PublicDescription": "This metric represents the remaining light uops fraction the CPU has executed - remaining means not covered by other sibling nodes. May undercount due to FMA double counting",
+ "ScaleUnit": "100%",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "This metric estimates fraction of slots the CPU was stalled due to other cases of misprediction (non-retired x86 branches or other types).",
+ "MetricExpr": "max(tma_branch_mispredicts * (1 - cpu_core@BR_MISP_RETIRED.ALL_BRANCHES@ / (cpu_core@INT_MISC.CLEARS_COUNT@ - cpu_core@MACHINE_CLEARS.COUNT@)), 0.0001)",
+ "MetricGroup": "BrMispredicts;BvIO;TopdownL3;tma_L3_group;tma_branch_mispredicts_group",
+ "MetricName": "tma_other_mispredicts",
+ "MetricThreshold": "tma_other_mispredicts > 0.05 & (tma_branch_mispredicts > 0.1 & tma_bad_speculation > 0.15)",
+ "ScaleUnit": "100%",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "This metric represents fraction of slots the CPU has wasted due to Nukes (Machine Clears) not related to memory ordering.",
+ "MetricExpr": "max(tma_machine_clears * (1 - cpu_core@MACHINE_CLEARS.MEMORY_ORDERING@ / cpu_core@MACHINE_CLEARS.COUNT@), 0.0001)",
+ "MetricGroup": "BvIO;Machine_Clears;TopdownL3;tma_L3_group;tma_machine_clears_group",
+ "MetricName": "tma_other_nukes",
+ "MetricThreshold": "tma_other_nukes > 0.05 & (tma_machine_clears > 0.1 & tma_bad_speculation > 0.15)",
+ "ScaleUnit": "100%",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "This metric roughly estimates fraction of slots the CPU retired uops as a result of handing Page Faults",
+ "MetricExpr": "99 * cpu_core@ASSISTS.PAGE_FAULT@ / tma_info_thread_slots",
+ "MetricGroup": "TopdownL5;tma_L5_group;tma_assists_group",
+ "MetricName": "tma_page_faults",
+ "MetricThreshold": "tma_page_faults > 0.05",
+ "PublicDescription": "This metric roughly estimates fraction of slots the CPU retired uops as a result of handing Page Faults. A Page Fault may apply on first application access to a memory page. Note operating system handling of page faults accounts for the majority of its cost.",
+ "ScaleUnit": "100%",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "This metric represents Core fraction of cycles CPU dispatched uops on execution port 0 ([SNB+] ALU; [HSW+] ALU and 2nd branch)",
+ "MetricExpr": "cpu_core@UOPS_DISPATCHED.PORT_0@ / tma_info_core_core_clks",
+ "MetricGroup": "Compute;TopdownL6;tma_L6_group;tma_alu_op_utilization_group;tma_issue2P",
+ "MetricName": "tma_port_0",
+ "MetricThreshold": "tma_port_0 > 0.6",
+ "PublicDescription": "This metric represents Core fraction of cycles CPU dispatched uops on execution port 0 ([SNB+] ALU; [HSW+] ALU and 2nd branch). Sample with: UOPS_DISPATCHED.PORT_0. Related metrics: tma_fp_scalar, tma_fp_vector, tma_fp_vector_128b, tma_fp_vector_256b, tma_fp_vector_512b, tma_int_vector_128b, tma_int_vector_256b, tma_port_1, tma_port_5, tma_port_6, tma_ports_utilized_2",
+ "ScaleUnit": "100%",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "This metric represents Core fraction of cycles CPU dispatched uops on execution port 1 (ALU)",
+ "MetricExpr": "cpu_core@UOPS_DISPATCHED.PORT_1@ / tma_info_core_core_clks",
+ "MetricGroup": "TopdownL6;tma_L6_group;tma_alu_op_utilization_group;tma_issue2P",
+ "MetricName": "tma_port_1",
+ "MetricThreshold": "tma_port_1 > 0.6",
+ "PublicDescription": "This metric represents Core fraction of cycles CPU dispatched uops on execution port 1 (ALU). Sample with: UOPS_DISPATCHED.PORT_1. Related metrics: tma_fp_scalar, tma_fp_vector, tma_fp_vector_128b, tma_fp_vector_256b, tma_fp_vector_512b, tma_int_vector_128b, tma_int_vector_256b, tma_port_0, tma_port_5, tma_port_6, tma_ports_utilized_2",
+ "ScaleUnit": "100%",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "This metric represents Core fraction of cycles CPU dispatched uops on execution port 6 ([HSW+] Primary Branch and simple ALU)",
+ "MetricExpr": "cpu_core@UOPS_DISPATCHED.PORT_6@ / tma_info_core_core_clks",
+ "MetricGroup": "TopdownL6;tma_L6_group;tma_alu_op_utilization_group;tma_issue2P",
+ "MetricName": "tma_port_6",
+ "MetricThreshold": "tma_port_6 > 0.6",
+ "PublicDescription": "This metric represents Core fraction of cycles CPU dispatched uops on execution port 6 ([HSW+] Primary Branch and simple ALU). Sample with: UOPS_DISPATCHED.PORT_6. Related metrics: tma_fp_scalar, tma_fp_vector, tma_fp_vector_128b, tma_fp_vector_256b, tma_fp_vector_512b, tma_int_vector_128b, tma_int_vector_256b, tma_port_0, tma_port_1, tma_port_5, tma_ports_utilized_2",
+ "ScaleUnit": "100%",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "This metric estimates fraction of cycles the CPU performance was potentially limited due to Core computation issues (non divider-related)",
+ "MetricExpr": "((tma_ports_utilized_0 * tma_info_thread_clks + (cpu_core@EXE_ACTIVITY.1_PORTS_UTIL@ + tma_retiring * cpu_core@EXE_ACTIVITY.2_PORTS_UTIL\\,umask\\=0xc@)) / tma_info_thread_clks if cpu_core@ARITH.DIV_ACTIVE@ < cpu_core@CYCLE_ACTIVITY.STALLS_TOTAL@ - cpu_core@EXE_ACTIVITY.BOUND_ON_LOADS@ else (cpu_core@EXE_ACTIVITY.1_PORTS_UTIL@ + tma_retiring * cpu_core@EXE_ACTIVITY.2_PORTS_UTIL\\,umask\\=0xc@) / tma_info_thread_clks)",
+ "MetricGroup": "PortsUtil;TopdownL3;tma_L3_group;tma_core_bound_group",
+ "MetricName": "tma_ports_utilization",
+ "MetricThreshold": "tma_ports_utilization > 0.15 & (tma_core_bound > 0.1 & tma_backend_bound > 0.2)",
+ "PublicDescription": "This metric estimates fraction of cycles the CPU performance was potentially limited due to Core computation issues (non divider-related). Two distinct categories can be attributed into this metric: (1) heavy data-dependency among contiguous instructions would manifest in this metric - such cases are often referred to as low Instruction Level Parallelism (ILP). (2) Contention on some hardware execution unit other than Divider. For example; when there are too many multiply operations.",
+ "ScaleUnit": "100%",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "This metric represents fraction of cycles CPU executed no uops on any execution port (Logical Processor cycles since ICL, Physical Core cycles otherwise)",
+ "MetricExpr": "max((cpu_core@EXE_ACTIVITY.EXE_BOUND_0_PORTS@ + max(cpu_core@RS.EMPTY_RESOURCE@ - cpu_core@RESOURCE_STALLS.SCOREBOARD@, 0)) / tma_info_thread_clks, 1) * (cpu_core@CYCLE_ACTIVITY.STALLS_TOTAL@ - cpu_core@EXE_ACTIVITY.BOUND_ON_LOADS@) / tma_info_thread_clks",
+ "MetricGroup": "PortsUtil;TopdownL4;tma_L4_group;tma_ports_utilization_group",
+ "MetricName": "tma_ports_utilized_0",
+ "MetricThreshold": "tma_ports_utilized_0 > 0.2 & (tma_ports_utilization > 0.15 & (tma_core_bound > 0.1 & tma_backend_bound > 0.2))",
+ "PublicDescription": "This metric represents fraction of cycles CPU executed no uops on any execution port (Logical Processor cycles since ICL, Physical Core cycles otherwise). Long-latency instructions like divides may contribute to this metric.",
+ "ScaleUnit": "100%",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "This metric represents fraction of cycles where the CPU executed total of 1 uop per cycle on all execution ports (Logical Processor cycles since ICL, Physical Core cycles otherwise)",
+ "MetricExpr": "cpu_core@EXE_ACTIVITY.1_PORTS_UTIL@ / tma_info_thread_clks",
+ "MetricGroup": "PortsUtil;TopdownL4;tma_L4_group;tma_issueL1;tma_ports_utilization_group",
+ "MetricName": "tma_ports_utilized_1",
+ "MetricThreshold": "tma_ports_utilized_1 > 0.2 & (tma_ports_utilization > 0.15 & (tma_core_bound > 0.1 & tma_backend_bound > 0.2))",
+ "PublicDescription": "This metric represents fraction of cycles where the CPU executed total of 1 uop per cycle on all execution ports (Logical Processor cycles since ICL, Physical Core cycles otherwise). This can be due to heavy data-dependency among software instructions; or over oversubscribing a particular hardware resource. In some other cases with high 1_Port_Utilized and L1_Bound; this metric can point to L1 data-cache latency bottleneck that may not necessarily manifest with complete execution starvation (due to the short L1 latency e.g. walking a linked list) - looking at the assembly can be helpful. Sample with: EXE_ACTIVITY.1_PORTS_UTIL. Related metrics: tma_l1_bound",
+ "ScaleUnit": "100%",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "This metric represents fraction of cycles CPU executed total of 2 uops per cycle on all execution ports (Logical Processor cycles since ICL, Physical Core cycles otherwise)",
+ "MetricConstraint": "NO_GROUP_EVENTS_NMI",
+ "MetricExpr": "cpu_core@EXE_ACTIVITY.2_PORTS_UTIL@ / tma_info_thread_clks",
+ "MetricGroup": "PortsUtil;TopdownL4;tma_L4_group;tma_issue2P;tma_ports_utilization_group",
+ "MetricName": "tma_ports_utilized_2",
+ "MetricThreshold": "tma_ports_utilized_2 > 0.15 & (tma_ports_utilization > 0.15 & (tma_core_bound > 0.1 & tma_backend_bound > 0.2))",
+ "PublicDescription": "This metric represents fraction of cycles CPU executed total of 2 uops per cycle on all execution ports (Logical Processor cycles since ICL, Physical Core cycles otherwise). Loop Vectorization -most compilers feature auto-Vectorization options today- reduces pressure on the execution ports as multiple elements are calculated with same uop. Sample with: EXE_ACTIVITY.2_PORTS_UTIL. Related metrics: tma_fp_scalar, tma_fp_vector, tma_fp_vector_128b, tma_fp_vector_256b, tma_fp_vector_512b, tma_int_vector_128b, tma_int_vector_256b, tma_port_0, tma_port_1, tma_port_5, tma_port_6",
+ "ScaleUnit": "100%",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "This metric represents fraction of cycles CPU executed total of 3 or more uops per cycle on all execution ports (Logical Processor cycles since ICL, Physical Core cycles otherwise)",
+ "MetricConstraint": "NO_GROUP_EVENTS_NMI",
+ "MetricExpr": "cpu_core@UOPS_EXECUTED.CYCLES_GE_3@ / tma_info_thread_clks",
+ "MetricGroup": "BvCB;PortsUtil;TopdownL4;tma_L4_group;tma_ports_utilization_group",
+ "MetricName": "tma_ports_utilized_3m",
+ "MetricThreshold": "tma_ports_utilized_3m > 0.4 & (tma_ports_utilization > 0.15 & (tma_core_bound > 0.1 & tma_backend_bound > 0.2))",
+ "PublicDescription": "This metric represents fraction of cycles CPU executed total of 3 or more uops per cycle on all execution ports (Logical Processor cycles since ICL, Physical Core cycles otherwise). Sample with: UOPS_EXECUTED.CYCLES_GE_3",
+ "ScaleUnit": "100%",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "This category represents fraction of slots utilized by useful work i.e. issued uops that eventually get retired",
+ "MetricExpr": "cpu_core@topdown\\-retiring@ / (cpu_core@topdown\\-fe\\-bound@ + cpu_core@topdown\\-bad\\-spec@ + cpu_core@topdown\\-retiring@ + cpu_core@topdown\\-be\\-bound@) + 0 * tma_info_thread_slots",
+ "MetricGroup": "BvUW;TmaL1;TopdownL1;tma_L1_group",
+ "MetricName": "tma_retiring",
+ "MetricThreshold": "tma_retiring > 0.7 | tma_heavy_operations > 0.1",
+ "MetricgroupNoGroup": "TopdownL1",
+ "PublicDescription": "This category represents fraction of slots utilized by useful work i.e. issued uops that eventually get retired. Ideally; all pipeline slots would be attributed to the Retiring category. Retiring of 100% would indicate the maximum Pipeline_Width throughput was achieved. Maximizing Retiring typically increases the Instructions-per-cycle (see IPC metric). Note that a high Retiring value does not necessary mean there is no room for more performance. For example; Heavy-operations or Microcode Assists are categorized under Retiring. They often indicate suboptimal performance and can often be optimized or avoided. Sample with: UOPS_RETIRED.SLOTS",
+ "ScaleUnit": "100%",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "This metric represents fraction of cycles the CPU issue-pipeline was stalled due to serializing operations",
+ "MetricExpr": "cpu_core@RESOURCE_STALLS.SCOREBOARD@ / tma_info_thread_clks + tma_c02_wait",
+ "MetricGroup": "BvIO;PortsUtil;TopdownL3;tma_L3_group;tma_core_bound_group;tma_issueSO",
+ "MetricName": "tma_serializing_operation",
+ "MetricThreshold": "tma_serializing_operation > 0.1 & (tma_core_bound > 0.1 & tma_backend_bound > 0.2)",
+ "PublicDescription": "This metric represents fraction of cycles the CPU issue-pipeline was stalled due to serializing operations. Instructions like CPUID; WRMSR or LFENCE serialize the out-of-order execution which may limit performance. Sample with: RESOURCE_STALLS.SCOREBOARD. Related metrics: tma_ms_switches",
+ "ScaleUnit": "100%",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "This metric represents fraction of slots where the CPU was retiring Shuffle operations of 256-bit vector size (FP or Integer)",
+ "MetricExpr": "tma_light_operations * cpu_core@INT_VEC_RETIRED.SHUFFLES@ / (tma_retiring * tma_info_thread_slots)",
+ "MetricGroup": "HPC;Pipeline;TopdownL4;tma_L4_group;tma_other_light_ops_group",
+ "MetricName": "tma_shuffles_256b",
+ "MetricThreshold": "tma_shuffles_256b > 0.1 & (tma_other_light_ops > 0.3 & tma_light_operations > 0.6)",
+ "PublicDescription": "This metric represents fraction of slots where the CPU was retiring Shuffle operations of 256-bit vector size (FP or Integer). Shuffles may incur slow cross \"vector lane\" data transfers.",
+ "ScaleUnit": "100%",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "This metric represents fraction of cycles the CPU was stalled due to PAUSE Instructions",
+ "MetricConstraint": "NO_GROUP_EVENTS_NMI",
+ "MetricExpr": "cpu_core@CPU_CLK_UNHALTED.PAUSE@ / tma_info_thread_clks",
+ "MetricGroup": "TopdownL4;tma_L4_group;tma_serializing_operation_group",
+ "MetricName": "tma_slow_pause",
+ "MetricThreshold": "tma_slow_pause > 0.05 & (tma_serializing_operation > 0.1 & (tma_core_bound > 0.1 & tma_backend_bound > 0.2))",
+ "PublicDescription": "This metric represents fraction of cycles the CPU was stalled due to PAUSE Instructions. Sample with: CPU_CLK_UNHALTED.PAUSE_INST",
+ "ScaleUnit": "100%",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "This metric estimates fraction of cycles handling memory load split accesses - load that cross 64-byte cache line boundary",
+ "MetricExpr": "cpu_core@MEM_INST_RETIRED.SPLIT_LOADS@ * min(cpu_core@MEM_INST_RETIRED.SPLIT_LOADS@R, tma_info_memory_load_miss_real_latency) / tma_info_thread_clks",
+ "MetricGroup": "TopdownL4;tma_L4_group;tma_l1_bound_group",
+ "MetricName": "tma_split_loads",
+ "MetricThreshold": "tma_split_loads > 0.2 & (tma_l1_bound > 0.1 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2))",
+ "PublicDescription": "This metric estimates fraction of cycles handling memory load split accesses - load that cross 64-byte cache line boundary. Sample with: MEM_INST_RETIRED.SPLIT_LOADS_PS",
+ "ScaleUnit": "100%",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "This metric represents rate of split store accesses",
+ "MetricExpr": "cpu_core@MEM_INST_RETIRED.SPLIT_STORES@ * min(cpu_core@MEM_INST_RETIRED.SPLIT_STORES@R, 1) / tma_info_thread_clks",
+ "MetricGroup": "TopdownL4;tma_L4_group;tma_issueSpSt;tma_store_bound_group",
+ "MetricName": "tma_split_stores",
+ "MetricThreshold": "tma_split_stores > 0.2 & (tma_store_bound > 0.2 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2))",
+ "PublicDescription": "This metric represents rate of split store accesses. Consider aligning your data to the 64-byte cache line granularity. Sample with: MEM_INST_RETIRED.SPLIT_STORES_PS. Related metrics: tma_port_4",
+ "ScaleUnit": "100%",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "This metric measures fraction of cycles where the Super Queue (SQ) was full taking into account all request-types and both hardware SMT threads (Logical Processors)",
+ "MetricExpr": "(cpu_core@XQ.FULL_CYCLES@ + cpu_core@L1D_PEND_MISS.L2_STALLS@) / tma_info_thread_clks",
+ "MetricGroup": "BvMS;MemoryBW;Offcore;TopdownL4;tma_L4_group;tma_issueBW;tma_l3_bound_group",
+ "MetricName": "tma_sq_full",
+ "MetricThreshold": "tma_sq_full > 0.3 & (tma_l3_bound > 0.05 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2))",
+ "PublicDescription": "This metric measures fraction of cycles where the Super Queue (SQ) was full taking into account all request-types and both hardware SMT threads (Logical Processors). Related metrics: tma_fb_full, tma_info_bottleneck_cache_memory_bandwidth, tma_info_system_dram_bw_use, tma_mem_bandwidth",
+ "ScaleUnit": "100%",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "This metric estimates how often CPU was stalled due to RFO store memory accesses; RFO store issue a read-for-ownership request before the write",
+ "MetricExpr": "cpu_core@EXE_ACTIVITY.BOUND_ON_STORES@ / tma_info_thread_clks",
+ "MetricGroup": "MemoryBound;TmaL3mem;TopdownL3;tma_L3_group;tma_memory_bound_group",
+ "MetricName": "tma_store_bound",
+ "MetricThreshold": "tma_store_bound > 0.2 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2)",
+ "PublicDescription": "This metric estimates how often CPU was stalled due to RFO store memory accesses; RFO store issue a read-for-ownership request before the write. Even though store accesses do not typically stall out-of-order CPUs; there are few cases where stores can lead to actual stalls. This metric will be flagged should RFO stores be a bottleneck. Sample with: MEM_INST_RETIRED.ALL_STORES_PS",
+ "ScaleUnit": "100%",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "This metric roughly estimates fraction of cycles when the memory subsystem had loads blocked since they could not forward data from earlier (in program order) overlapping stores",
+ "MetricExpr": "13 * cpu_core@LD_BLOCKS.STORE_FORWARD@ / tma_info_thread_clks",
+ "MetricGroup": "TopdownL4;tma_L4_group;tma_l1_bound_group",
+ "MetricName": "tma_store_fwd_blk",
+ "MetricThreshold": "tma_store_fwd_blk > 0.1 & (tma_l1_bound > 0.1 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2))",
+ "PublicDescription": "This metric roughly estimates fraction of cycles when the memory subsystem had loads blocked since they could not forward data from earlier (in program order) overlapping stores. To streamline memory operations in the pipeline; a load can avoid waiting for memory if a prior in-flight store is writing the data that the load wants to read (store forwarding process). However; in some cases the load may be blocked for a significant time pending the store forward. For example; when the prior store is writing a smaller region than the load is reading.",
+ "ScaleUnit": "100%",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "This metric estimates fraction of cycles the CPU spent handling L1D store misses",
+ "MetricExpr": "(cpu_core@MEM_STORE_RETIRED.L2_HIT@ * 10 * (1 - cpu_core@MEM_INST_RETIRED.LOCK_LOADS@ / cpu_core@MEM_INST_RETIRED.ALL_STORES@) + (1 - cpu_core@MEM_INST_RETIRED.LOCK_LOADS@ / cpu_core@MEM_INST_RETIRED.ALL_STORES@) * min(cpu_core@CPU_CLK_UNHALTED.THREAD@, cpu_core@OFFCORE_REQUESTS_OUTSTANDING.CYCLES_WITH_DEMAND_RFO@)) / tma_info_thread_clks",
+ "MetricGroup": "BvML;MemoryLat;Offcore;TopdownL4;tma_L4_group;tma_issueRFO;tma_issueSL;tma_store_bound_group",
+ "MetricName": "tma_store_latency",
+ "MetricThreshold": "tma_store_latency > 0.1 & (tma_store_bound > 0.2 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2))",
+ "PublicDescription": "This metric estimates fraction of cycles the CPU spent handling L1D store misses. Store accesses usually less impact out-of-order core performance; however; holding resources for longer time can lead into undesired implications (e.g. contention on L1D fill-buffer entries - see FB_Full). Related metrics: tma_fb_full, tma_lock_latency",
+ "ScaleUnit": "100%",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "This metric represents Core fraction of cycles CPU dispatched uops on execution port for Store operations",
+ "MetricExpr": "(cpu_core@UOPS_DISPATCHED.PORT_4_9@ + cpu_core@UOPS_DISPATCHED.PORT_7_8@) / (4 * tma_info_core_core_clks)",
+ "MetricGroup": "TopdownL5;tma_L5_group;tma_ports_utilized_3m_group",
+ "MetricName": "tma_store_op_utilization",
+ "MetricThreshold": "tma_store_op_utilization > 0.6",
+ "PublicDescription": "This metric represents Core fraction of cycles CPU dispatched uops on execution port for Store operations. Sample with: UOPS_DISPATCHED.PORT_7_8",
+ "ScaleUnit": "100%",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "This metric roughly estimates the fraction of cycles where the TLB was missed by store accesses, hitting in the second-level TLB (STLB)",
+ "MetricExpr": "max(0, tma_dtlb_store - tma_store_stlb_miss)",
+ "MetricGroup": "MemoryTLB;TopdownL5;tma_L5_group;tma_dtlb_store_group",
+ "MetricName": "tma_store_stlb_hit",
+ "MetricThreshold": "tma_store_stlb_hit > 0.05 & (tma_dtlb_store > 0.05 & (tma_store_bound > 0.2 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2)))",
+ "ScaleUnit": "100%",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "This metric estimates the fraction of cycles where the STLB was missed by store accesses, performing a hardware page walk",
+ "MetricExpr": "cpu_core@DTLB_STORE_MISSES.WALK_ACTIVE@ / tma_info_core_core_clks",
+ "MetricGroup": "MemoryTLB;TopdownL5;tma_L5_group;tma_dtlb_store_group",
+ "MetricName": "tma_store_stlb_miss",
+ "MetricThreshold": "tma_store_stlb_miss > 0.05 & (tma_dtlb_store > 0.05 & (tma_store_bound > 0.2 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2)))",
+ "ScaleUnit": "100%",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "This metric estimates how often CPU was stalled due to Streaming store memory accesses; Streaming store optimize out a read request required by RFO stores",
+ "MetricExpr": "9 * cpu_core@OCR.STREAMING_WR.ANY_RESPONSE@ / tma_info_thread_clks",
+ "MetricGroup": "MemoryBW;Offcore;TopdownL4;tma_L4_group;tma_issueSmSt;tma_store_bound_group",
+ "MetricName": "tma_streaming_stores",
+ "MetricThreshold": "tma_streaming_stores > 0.2 & (tma_store_bound > 0.2 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2))",
+ "PublicDescription": "This metric estimates how often CPU was stalled due to Streaming store memory accesses; Streaming store optimize out a read request required by RFO stores. Even though store accesses do not typically stall out-of-order CPUs; there are few cases where stores can lead to actual stalls. This metric will be flagged should Streaming stores be a bottleneck. Sample with: OCR.STREAMING_WR.ANY_RESPONSE. Related metrics: tma_fb_full",
+ "ScaleUnit": "100%",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "This metric represents fraction of cycles the CPU was stalled due to new branch address clears",
+ "MetricExpr": "cpu_core@INT_MISC.UNKNOWN_BRANCH_CYCLES@ / tma_info_thread_clks",
+ "MetricGroup": "BigFootprint;BvBC;FetchLat;TopdownL4;tma_L4_group;tma_branch_resteers_group",
+ "MetricName": "tma_unknown_branches",
+ "MetricThreshold": "tma_unknown_branches > 0.05 & (tma_branch_resteers > 0.05 & (tma_fetch_latency > 0.1 & tma_frontend_bound > 0.15))",
+ "PublicDescription": "This metric represents fraction of cycles the CPU was stalled due to new branch address clears. These are fetched branches the Branch Prediction Unit was unable to recognize (e.g. first time the branch is fetched or hitting BTB capacity limit) hence called Unknown Branches. Sample with: FRONTEND_RETIRED.UNKNOWN_BRANCH",
+ "ScaleUnit": "100%",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "This metric serves as an approximation of legacy x87 usage",
+ "MetricExpr": "tma_retiring * cpu_core@UOPS_EXECUTED.X87@ / cpu_core@UOPS_EXECUTED.THREAD@",
+ "MetricGroup": "Compute;TopdownL4;tma_L4_group;tma_fp_arith_group",
+ "MetricName": "tma_x87_use",
+ "MetricThreshold": "tma_x87_use > 0.1 & (tma_fp_arith > 0.2 & tma_light_operations > 0.6)",
+ "PublicDescription": "This metric serves as an approximation of legacy x87 usage. It accounts for instructions beyond X87 FP arithmetic operations; hence may be used as a thermometer to avoid X87 high usage and preferably upgrade to modern ISA. See Tip under Tuning Hint.",
+ "ScaleUnit": "100%",
+ "Unit": "cpu_core"
+ }
+]
diff --git a/tools/perf/pmu-events/arch/x86/skylakex/uncore-cache.json b/tools/perf/pmu-events/arch/x86/skylakex/uncore-cache.json
index da46a3aeb58c..4fc818626491 100644
--- a/tools/perf/pmu-events/arch/x86/skylakex/uncore-cache.json
+++ b/tools/perf/pmu-events/arch/x86/skylakex/uncore-cache.json
@@ -4454,7 +4454,7 @@
"Counter": "0,1,2,3",
"EventCode": "0x35",
"EventName": "UNC_CHA_TOR_INSERTS.IA_HIT_CRD",
- "Filter": "config1=0x40233",
+ "Filter": "config1=0x4023300000000",
"PerPkg": "1",
"PublicDescription": "TOR Inserts : CRds issued by iA Cores that Hit the LLC : Counts the number of entries successfully inserted into the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.",
"UMask": "0x11",
@@ -4465,7 +4465,7 @@
"Counter": "0,1,2,3",
"EventCode": "0x35",
"EventName": "UNC_CHA_TOR_INSERTS.IA_HIT_DRD",
- "Filter": "config1=0x40433",
+ "Filter": "config1=0x4043300000000",
"PerPkg": "1",
"PublicDescription": "TOR Inserts : DRds issued by iA Cores that Hit the LLC : Counts the number of entries successfully inserted into the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.",
"UMask": "0x11",
@@ -4476,7 +4476,7 @@
"Counter": "0,1,2,3",
"EventCode": "0x35",
"EventName": "UNC_CHA_TOR_INSERTS.IA_HIT_LlcPrefCRD",
- "Filter": "config1=0x4b233",
+ "Filter": "config1=0x4b23300000000",
"PerPkg": "1",
"UMask": "0x11",
"Unit": "CHA"
@@ -4486,7 +4486,7 @@
"Counter": "0,1,2,3",
"EventCode": "0x35",
"EventName": "UNC_CHA_TOR_INSERTS.IA_HIT_LlcPrefDRD",
- "Filter": "config1=0x4b433",
+ "Filter": "config1=0x4b43300000000",
"PerPkg": "1",
"UMask": "0x11",
"Unit": "CHA"
@@ -4496,7 +4496,7 @@
"Counter": "0,1,2,3",
"EventCode": "0x35",
"EventName": "UNC_CHA_TOR_INSERTS.IA_HIT_LlcPrefRFO",
- "Filter": "config1=0x4b033",
+ "Filter": "config1=0x4b03300000000",
"PerPkg": "1",
"PublicDescription": "TOR Inserts : LLCPrefRFO issued by iA Cores that hit the LLC : Counts the number of entries successfully inserted into the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.",
"UMask": "0x11",
@@ -4507,7 +4507,7 @@
"Counter": "0,1,2,3",
"EventCode": "0x35",
"EventName": "UNC_CHA_TOR_INSERTS.IA_HIT_RFO",
- "Filter": "config1=0x40033",
+ "Filter": "config1=0x4003300000000",
"PerPkg": "1",
"PublicDescription": "TOR Inserts : RFOs issued by iA Cores that Hit the LLC : Counts the number of entries successfully inserted into the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.",
"UMask": "0x11",
@@ -4528,7 +4528,7 @@
"Counter": "0,1,2,3",
"EventCode": "0x35",
"EventName": "UNC_CHA_TOR_INSERTS.IA_MISS_CRD",
- "Filter": "config1=0x40233",
+ "Filter": "config1=0x4023300000000",
"PerPkg": "1",
"PublicDescription": "TOR Inserts : CRds issued by iA Cores that Missed the LLC : Counts the number of entries successfully inserted into the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.",
"UMask": "0x21",
@@ -4539,7 +4539,7 @@
"Counter": "0,1,2,3",
"EventCode": "0x35",
"EventName": "UNC_CHA_TOR_INSERTS.IA_MISS_DRD",
- "Filter": "config1=0x40433",
+ "Filter": "config1=0x4043300000000",
"PerPkg": "1",
"PublicDescription": "TOR Inserts : DRds issued by iA Cores that Missed the LLC : Counts the number of entries successfully inserted into the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.",
"UMask": "0x21",
@@ -4550,7 +4550,7 @@
"Counter": "0,1,2,3",
"EventCode": "0x35",
"EventName": "UNC_CHA_TOR_INSERTS.IA_MISS_LlcPrefCRD",
- "Filter": "config1=0x4b233",
+ "Filter": "config1=0x4b23300000000",
"PerPkg": "1",
"UMask": "0x21",
"Unit": "CHA"
@@ -4560,7 +4560,7 @@
"Counter": "0,1,2,3",
"EventCode": "0x35",
"EventName": "UNC_CHA_TOR_INSERTS.IA_MISS_LlcPrefDRD",
- "Filter": "config1=0x4b433",
+ "Filter": "config1=0x4b43300000000",
"PerPkg": "1",
"UMask": "0x21",
"Unit": "CHA"
@@ -4570,7 +4570,7 @@
"Counter": "0,1,2,3",
"EventCode": "0x35",
"EventName": "UNC_CHA_TOR_INSERTS.IA_MISS_LlcPrefRFO",
- "Filter": "config1=0x4b033",
+ "Filter": "config1=0x4b03300000000",
"PerPkg": "1",
"PublicDescription": "TOR Inserts : LLCPrefRFO issued by iA Cores that missed the LLC : Counts the number of entries successfully inserted into the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.",
"UMask": "0x21",
@@ -4581,7 +4581,7 @@
"Counter": "0,1,2,3",
"EventCode": "0x35",
"EventName": "UNC_CHA_TOR_INSERTS.IA_MISS_RFO",
- "Filter": "config1=0x40033",
+ "Filter": "config1=0x4003300000000",
"PerPkg": "1",
"PublicDescription": "TOR Inserts : RFOs issued by iA Cores that Missed the LLC : Counts the number of entries successfully inserted into the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.",
"UMask": "0x21",
@@ -4624,7 +4624,7 @@
"EventCode": "0x35",
"EventName": "UNC_CHA_TOR_INSERTS.IO_MISS_ITOM",
"Experimental": "1",
- "Filter": "config1=0x49033",
+ "Filter": "config1=0x4903300000000",
"PerPkg": "1",
"PublicDescription": "Counts the number of entries successfully inserted into the TOR that are generated from local IO ItoM requests that miss the LLC. An ItoM request is used by IIO to request a data write without first reading the data for ownership.",
"UMask": "0x24",
@@ -4636,7 +4636,7 @@
"EventCode": "0x35",
"EventName": "UNC_CHA_TOR_INSERTS.IO_MISS_RDCUR",
"Experimental": "1",
- "Filter": "config1=0x43C33",
+ "Filter": "config1=0x43c3300000000",
"PerPkg": "1",
"PublicDescription": "Counts the number of entries successfully inserted into the TOR that are generated from local IO RdCur requests and miss the LLC. A RdCur request is used by IIO to read data without changing state.",
"UMask": "0x24",
@@ -4648,7 +4648,7 @@
"EventCode": "0x35",
"EventName": "UNC_CHA_TOR_INSERTS.IO_MISS_RFO",
"Experimental": "1",
- "Filter": "config1=0x40033",
+ "Filter": "config1=0x4003300000000",
"PerPkg": "1",
"PublicDescription": "Counts the number of entries successfully inserted into the TOR that are generated from local IO RFO requests that miss the LLC. A read for ownership (RFO) requests a cache line to be cached in E state with the intent to modify.",
"UMask": "0x24",
@@ -4865,7 +4865,7 @@
"Counter": "0",
"EventCode": "0x36",
"EventName": "UNC_CHA_TOR_OCCUPANCY.IA_HIT_CRD",
- "Filter": "config1=0x40233",
+ "Filter": "config1=0x4023300000000",
"PerPkg": "1",
"PublicDescription": "TOR Occupancy : CRds issued by iA Cores that Hit the LLC : For each cycle, this event accumulates the number of valid entries in the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.",
"UMask": "0x11",
@@ -4876,7 +4876,7 @@
"Counter": "0",
"EventCode": "0x36",
"EventName": "UNC_CHA_TOR_OCCUPANCY.IA_HIT_DRD",
- "Filter": "config1=0x40433",
+ "Filter": "config1=0x4043300000000",
"PerPkg": "1",
"PublicDescription": "TOR Occupancy : DRds issued by iA Cores that Hit the LLC : For each cycle, this event accumulates the number of valid entries in the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.",
"UMask": "0x11",
@@ -4887,7 +4887,7 @@
"Counter": "0",
"EventCode": "0x36",
"EventName": "UNC_CHA_TOR_OCCUPANCY.IA_HIT_LlcPrefCRD",
- "Filter": "config1=0x4b233",
+ "Filter": "config1=0x4b23300000000",
"PerPkg": "1",
"UMask": "0x11",
"Unit": "CHA"
@@ -4897,7 +4897,7 @@
"Counter": "0",
"EventCode": "0x36",
"EventName": "UNC_CHA_TOR_OCCUPANCY.IA_HIT_LlcPrefDRD",
- "Filter": "config1=0x4b433",
+ "Filter": "config1=0x4b43300000000",
"PerPkg": "1",
"UMask": "0x11",
"Unit": "CHA"
@@ -4907,7 +4907,7 @@
"Counter": "0",
"EventCode": "0x36",
"EventName": "UNC_CHA_TOR_OCCUPANCY.IA_HIT_LlcPrefRFO",
- "Filter": "config1=0x4b033",
+ "Filter": "config1=0x4b03300000000",
"PerPkg": "1",
"PublicDescription": "TOR Occupancy : LLCPrefRFO issued by iA Cores that hit the LLC : For each cycle, this event accumulates the number of valid entries in the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.",
"UMask": "0x11",
@@ -4918,7 +4918,7 @@
"Counter": "0",
"EventCode": "0x36",
"EventName": "UNC_CHA_TOR_OCCUPANCY.IA_HIT_RFO",
- "Filter": "config1=0x40033",
+ "Filter": "config1=0x4003300000000",
"PerPkg": "1",
"PublicDescription": "TOR Occupancy : RFOs issued by iA Cores that Hit the LLC : For each cycle, this event accumulates the number of valid entries in the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.",
"UMask": "0x11",
@@ -4939,7 +4939,7 @@
"Counter": "0",
"EventCode": "0x36",
"EventName": "UNC_CHA_TOR_OCCUPANCY.IA_MISS_CRD",
- "Filter": "config1=0x40233",
+ "Filter": "config1=0x4023300000000",
"PerPkg": "1",
"PublicDescription": "TOR Occupancy : CRds issued by iA Cores that Missed the LLC : For each cycle, this event accumulates the number of valid entries in the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.",
"UMask": "0x21",
@@ -4950,7 +4950,7 @@
"Counter": "0",
"EventCode": "0x36",
"EventName": "UNC_CHA_TOR_OCCUPANCY.IA_MISS_DRD",
- "Filter": "config1=0x40433",
+ "Filter": "config1=0x4043300000000",
"PerPkg": "1",
"PublicDescription": "TOR Occupancy : DRds issued by iA Cores that Missed the LLC : For each cycle, this event accumulates the number of valid entries in the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.",
"UMask": "0x21",
@@ -4961,7 +4961,7 @@
"Counter": "0",
"EventCode": "0x36",
"EventName": "UNC_CHA_TOR_OCCUPANCY.IA_MISS_LlcPrefCRD",
- "Filter": "config1=0x4b233",
+ "Filter": "config1=0x4b23300000000",
"PerPkg": "1",
"UMask": "0x21",
"Unit": "CHA"
@@ -4971,7 +4971,7 @@
"Counter": "0",
"EventCode": "0x36",
"EventName": "UNC_CHA_TOR_OCCUPANCY.IA_MISS_LlcPrefDRD",
- "Filter": "config1=0x4b433",
+ "Filter": "config1=0x4b43300000000",
"PerPkg": "1",
"UMask": "0x21",
"Unit": "CHA"
@@ -4981,7 +4981,7 @@
"Counter": "0",
"EventCode": "0x36",
"EventName": "UNC_CHA_TOR_OCCUPANCY.IA_MISS_LlcPrefRFO",
- "Filter": "config1=0x4b033",
+ "Filter": "config1=0x4b03300000000",
"PerPkg": "1",
"PublicDescription": "TOR Occupancy : LLCPrefRFO issued by iA Cores that missed the LLC : For each cycle, this event accumulates the number of valid entries in the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.",
"UMask": "0x21",
@@ -4992,7 +4992,7 @@
"Counter": "0",
"EventCode": "0x36",
"EventName": "UNC_CHA_TOR_OCCUPANCY.IA_MISS_RFO",
- "Filter": "config1=0x40033",
+ "Filter": "config1=0x4003300000000",
"PerPkg": "1",
"PublicDescription": "TOR Occupancy : RFOs issued by iA Cores that Missed the LLC : For each cycle, this event accumulates the number of valid entries in the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.",
"UMask": "0x21",
@@ -5037,7 +5037,7 @@
"EventCode": "0x36",
"EventName": "UNC_CHA_TOR_OCCUPANCY.IO_MISS_ITOM",
"Experimental": "1",
- "Filter": "config1=0x49033",
+ "Filter": "config1=0x4903300000000",
"PerPkg": "1",
"PublicDescription": "For each cycle, this event accumulates the number of valid entries in the TOR that are generated from local IO ItoM requests that miss the LLC. An ItoM is used by IIO to request a data write without first reading the data for ownership.",
"UMask": "0x24",
@@ -5049,7 +5049,7 @@
"EventCode": "0x36",
"EventName": "UNC_CHA_TOR_OCCUPANCY.IO_MISS_RDCUR",
"Experimental": "1",
- "Filter": "config1=0x43C33",
+ "Filter": "config1=0x43c3300000000",
"PerPkg": "1",
"PublicDescription": "For each cycle, this event accumulates the number of valid entries in the TOR that are generated from local IO RdCur requests that miss the LLC. A RdCur request is used by IIO to read data without changing state.",
"UMask": "0x24",
@@ -5061,7 +5061,7 @@
"EventCode": "0x36",
"EventName": "UNC_CHA_TOR_OCCUPANCY.IO_MISS_RFO",
"Experimental": "1",
- "Filter": "config1=0x40033",
+ "Filter": "config1=0x4003300000000",
"PerPkg": "1",
"PublicDescription": "For each cycle, this event accumulates the number of valid entries in the TOR that are generated from local IO RFO requests that miss the LLC. A read for ownership (RFO) requests data to be cached in E state with the intent to modify.",
"UMask": "0x24",
diff --git a/tools/perf/pmu-events/arch/x86/snowridgex/uncore-cache.json b/tools/perf/pmu-events/arch/x86/snowridgex/uncore-cache.json
index 7551fb91a9d7..a81776deb2e6 100644
--- a/tools/perf/pmu-events/arch/x86/snowridgex/uncore-cache.json
+++ b/tools/perf/pmu-events/arch/x86/snowridgex/uncore-cache.json
@@ -1,62 +1,5 @@
[
{
- "BriefDescription": "MMIO reads. Derived from unc_cha_tor_inserts.ia_miss",
- "Counter": "0,1,2,3",
- "EventCode": "0x35",
- "EventName": "LLC_MISSES.MMIO_READ",
- "Filter": "config1=0x40040e33",
- "PerPkg": "1",
- "PublicDescription": "TOR Inserts : All requests from iA Cores that Missed the LLC : Counts the number of entries successfully inserted into the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.",
- "UMask": "0xc001fe01",
- "Unit": "CHA"
- },
- {
- "BriefDescription": "MMIO writes. Derived from unc_cha_tor_inserts.ia_miss",
- "Counter": "0,1,2,3",
- "EventCode": "0x35",
- "EventName": "LLC_MISSES.MMIO_WRITE",
- "Filter": "config1=0x40041e33",
- "PerPkg": "1",
- "PublicDescription": "TOR Inserts : All requests from iA Cores that Missed the LLC : Counts the number of entries successfully inserted into the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.",
- "UMask": "0xc001fe01",
- "Unit": "CHA"
- },
- {
- "BriefDescription": "LLC misses - Uncacheable reads (from cpu) . Derived from unc_cha_tor_inserts.ia_miss",
- "Counter": "0,1,2,3",
- "EventCode": "0x35",
- "EventName": "LLC_MISSES.UNCACHEABLE",
- "Filter": "config1=0x40e33",
- "PerPkg": "1",
- "PublicDescription": "TOR Inserts : All requests from iA Cores that Missed the LLC : Counts the number of entries successfully inserted into the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.",
- "UMask": "0xc001fe01",
- "Unit": "CHA"
- },
- {
- "BriefDescription": "Streaming stores (full cache line). Derived from unc_cha_tor_inserts.ia_miss",
- "Counter": "0,1,2,3",
- "EventCode": "0x35",
- "EventName": "LLC_REFERENCES.STREAMING_FULL",
- "Filter": "config1=0x41833",
- "PerPkg": "1",
- "PublicDescription": "TOR Inserts : All requests from iA Cores that Missed the LLC : Counts the number of entries successfully inserted into the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.",
- "ScaleUnit": "64Bytes",
- "UMask": "0xc001fe01",
- "Unit": "CHA"
- },
- {
- "BriefDescription": "Streaming stores (partial cache line). Derived from unc_cha_tor_inserts.ia_miss",
- "Counter": "0,1,2,3",
- "EventCode": "0x35",
- "EventName": "LLC_REFERENCES.STREAMING_PARTIAL",
- "Filter": "config1=0x41a33",
- "PerPkg": "1",
- "PublicDescription": "TOR Inserts : All requests from iA Cores that Missed the LLC : Counts the number of entries successfully inserted into the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.",
- "ScaleUnit": "64Bytes",
- "UMask": "0xc001fe01",
- "Unit": "CHA"
- },
- {
"BriefDescription": "CMS Agent0 AD Credits Acquired : For Transgress 0",
"Counter": "0,1,2,3",
"EventCode": "0x80",
diff --git a/tools/perf/pmu-events/empty-pmu-events.c b/tools/perf/pmu-events/empty-pmu-events.c
index 13727421d424..c592079982fb 100644
--- a/tools/perf/pmu-events/empty-pmu-events.c
+++ b/tools/perf/pmu-events/empty-pmu-events.c
@@ -1,196 +1,193 @@
-// SPDX-License-Identifier: GPL-2.0
-/*
- * An empty pmu-events.c file used when there is no architecture json files in
- * arch or when the jevents.py script cannot be run.
- *
- * The test cpu/soc is provided for testing.
- */
-#include "pmu-events/pmu-events.h"
+
+/* SPDX-License-Identifier: GPL-2.0 */
+/* THIS FILE WAS AUTOGENERATED BY jevents.py arch=none model=none ! */
+
+#include <pmu-events/pmu-events.h>
#include "util/header.h"
#include "util/pmu.h"
#include <string.h>
#include <stddef.h>
-static const struct pmu_event pmu_events__test_soc_cpu[] = {
- {
- .name = "l3_cache_rd",
- .event = "event=0x40",
- .desc = "L3 cache access, read",
- .topic = "cache",
- .long_desc = "Attributable Level 3 cache access, read",
- },
- {
- .name = "segment_reg_loads.any",
- .event = "event=0x6,period=200000,umask=0x80",
- .desc = "Number of segment register loads",
- .topic = "other",
- },
- {
- .name = "dispatch_blocked.any",
- .event = "event=0x9,period=200000,umask=0x20",
- .desc = "Memory cluster signals to block micro-op dispatch for any reason",
- .topic = "other",
- },
- {
- .name = "eist_trans",
- .event = "event=0x3a,period=200000,umask=0x0",
- .desc = "Number of Enhanced Intel SpeedStep(R) Technology (EIST) transitions",
- .topic = "other",
- },
- {
- .name = "uncore_hisi_ddrc.flux_wcmd",
- .event = "event=0x2",
- .desc = "DDRC write commands. Unit: hisi_sccl,ddrc ",
- .topic = "uncore",
- .long_desc = "DDRC write commands",
- .pmu = "hisi_sccl,ddrc",
- },
- {
- .name = "unc_cbo_xsnp_response.miss_eviction",
- .event = "event=0x22,umask=0x81",
- .desc = "A cross-core snoop resulted from L3 Eviction which misses in some processor core. Unit: uncore_cbox ",
- .topic = "uncore",
- .long_desc = "A cross-core snoop resulted from L3 Eviction which misses in some processor core",
- .pmu = "uncore_cbox",
- },
- {
- .name = "event-hyphen",
- .event = "event=0xe0,umask=0x00",
- .desc = "UNC_CBO_HYPHEN. Unit: uncore_cbox ",
- .topic = "uncore",
- .long_desc = "UNC_CBO_HYPHEN",
- .pmu = "uncore_cbox",
- },
- {
- .name = "event-two-hyph",
- .event = "event=0xc0,umask=0x00",
- .desc = "UNC_CBO_TWO_HYPH. Unit: uncore_cbox ",
- .topic = "uncore",
- .long_desc = "UNC_CBO_TWO_HYPH",
- .pmu = "uncore_cbox",
- },
- {
- .name = "uncore_hisi_l3c.rd_hit_cpipe",
- .event = "event=0x7",
- .desc = "Total read hits. Unit: hisi_sccl,l3c ",
- .topic = "uncore",
- .long_desc = "Total read hits",
- .pmu = "hisi_sccl,l3c",
- },
- {
- .name = "uncore_imc_free_running.cache_miss",
- .event = "event=0x12",
- .desc = "Total cache misses. Unit: uncore_imc_free_running ",
- .topic = "uncore",
- .long_desc = "Total cache misses",
- .pmu = "uncore_imc_free_running",
- },
- {
- .name = "uncore_imc.cache_hits",
- .event = "event=0x34",
- .desc = "Total cache hits. Unit: uncore_imc ",
- .topic = "uncore",
- .long_desc = "Total cache hits",
- .pmu = "uncore_imc",
- },
- {
- .name = "bp_l1_btb_correct",
- .event = "event=0x8a",
- .desc = "L1 BTB Correction",
- .topic = "branch",
- },
- {
- .name = "bp_l2_btb_correct",
- .event = "event=0x8b",
- .desc = "L2 BTB Correction",
- .topic = "branch",
- },
- {
- .name = 0,
- .event = 0,
- .desc = 0,
- },
+struct compact_pmu_event {
+ int offset;
};
-static const struct pmu_metric pmu_metrics__test_soc_cpu[] = {
- {
- .metric_expr = "1 / IPC",
- .metric_name = "CPI",
- },
- {
- .metric_expr = "inst_retired.any / cpu_clk_unhalted.thread",
- .metric_name = "IPC",
- .metric_group = "group1",
- },
- {
- .metric_expr = "idq_uops_not_delivered.core / (4 * (( ( cpu_clk_unhalted.thread / 2 ) * "
- "( 1 + cpu_clk_unhalted.one_thread_active / cpu_clk_unhalted.ref_xclk ) )))",
- .metric_name = "Frontend_Bound_SMT",
- },
- {
- .metric_expr = "l1d\\-loads\\-misses / inst_retired.any",
- .metric_name = "dcache_miss_cpi",
- },
- {
- .metric_expr = "l1i\\-loads\\-misses / inst_retired.any",
- .metric_name = "icache_miss_cycles",
- },
- {
- .metric_expr = "(dcache_miss_cpi + icache_miss_cycles)",
- .metric_name = "cache_miss_cycles",
- .metric_group = "group1",
- },
- {
- .metric_expr = "l2_rqsts.demand_data_rd_hit + l2_rqsts.pf_hit + l2_rqsts.rfo_hit",
- .metric_name = "DCache_L2_All_Hits",
- },
- {
- .metric_expr = "max(l2_rqsts.all_demand_data_rd - l2_rqsts.demand_data_rd_hit, 0) + "
- "l2_rqsts.pf_miss + l2_rqsts.rfo_miss",
- .metric_name = "DCache_L2_All_Miss",
- },
- {
- .metric_expr = "DCache_L2_All_Hits + DCache_L2_All_Miss",
- .metric_name = "DCache_L2_All",
- },
- {
- .metric_expr = "d_ratio(DCache_L2_All_Hits, DCache_L2_All)",
- .metric_name = "DCache_L2_Hits",
- },
- {
- .metric_expr = "d_ratio(DCache_L2_All_Miss, DCache_L2_All)",
- .metric_name = "DCache_L2_Misses",
- },
- {
- .metric_expr = "ipc + M2",
- .metric_name = "M1",
- },
- {
- .metric_expr = "ipc + M1",
- .metric_name = "M2",
- },
- {
- .metric_expr = "1/M3",
- .metric_name = "M3",
- },
- {
- .metric_expr = "64 * l1d.replacement / 1000000000 / duration_time",
- .metric_name = "L1D_Cache_Fill_BW",
- },
- {
- .metric_expr = 0,
- .metric_name = 0,
- },
+struct pmu_table_entry {
+ const struct compact_pmu_event *entries;
+ uint32_t num_entries;
+ struct compact_pmu_event pmu_name;
+};
+
+static const char *const big_c_string =
+/* offset=0 */ "default_core\000"
+/* offset=13 */ "bp_l1_btb_correct\000branch\000L1 BTB Correction\000event=0x8a\000\00000\000\000"
+/* offset=72 */ "bp_l2_btb_correct\000branch\000L2 BTB Correction\000event=0x8b\000\00000\000\000"
+/* offset=131 */ "l3_cache_rd\000cache\000L3 cache access, read\000event=0x40\000\00000\000Attributable Level 3 cache access, read\000"
+/* offset=226 */ "segment_reg_loads.any\000other\000Number of segment register loads\000event=6,period=200000,umask=0x80\000\00000\000\000"
+/* offset=325 */ "dispatch_blocked.any\000other\000Memory cluster signals to block micro-op dispatch for any reason\000event=9,period=200000,umask=0x20\000\00000\000\000"
+/* offset=455 */ "eist_trans\000other\000Number of Enhanced Intel SpeedStep(R) Technology (EIST) transitions\000event=0x3a,period=200000\000\00000\000\000"
+/* offset=570 */ "hisi_sccl,ddrc\000"
+/* offset=585 */ "uncore_hisi_ddrc.flux_wcmd\000uncore\000DDRC write commands\000event=2\000\00000\000DDRC write commands\000"
+/* offset=671 */ "uncore_cbox\000"
+/* offset=683 */ "unc_cbo_xsnp_response.miss_eviction\000uncore\000A cross-core snoop resulted from L3 Eviction which misses in some processor core\000event=0x22,umask=0x81\000\00000\000A cross-core snoop resulted from L3 Eviction which misses in some processor core\000"
+/* offset=914 */ "event-hyphen\000uncore\000UNC_CBO_HYPHEN\000event=0xe0\000\00000\000UNC_CBO_HYPHEN\000"
+/* offset=979 */ "event-two-hyph\000uncore\000UNC_CBO_TWO_HYPH\000event=0xc0\000\00000\000UNC_CBO_TWO_HYPH\000"
+/* offset=1050 */ "hisi_sccl,l3c\000"
+/* offset=1064 */ "uncore_hisi_l3c.rd_hit_cpipe\000uncore\000Total read hits\000event=7\000\00000\000Total read hits\000"
+/* offset=1144 */ "uncore_imc_free_running\000"
+/* offset=1168 */ "uncore_imc_free_running.cache_miss\000uncore\000Total cache misses\000event=0x12\000\00000\000Total cache misses\000"
+/* offset=1263 */ "uncore_imc\000"
+/* offset=1274 */ "uncore_imc.cache_hits\000uncore\000Total cache hits\000event=0x34\000\00000\000Total cache hits\000"
+/* offset=1352 */ "uncore_sys_ddr_pmu\000"
+/* offset=1371 */ "sys_ddr_pmu.write_cycles\000uncore\000ddr write-cycles event\000event=0x2b\000v8\00000\000\000"
+/* offset=1444 */ "uncore_sys_ccn_pmu\000"
+/* offset=1463 */ "sys_ccn_pmu.read_cycles\000uncore\000ccn read-cycles event\000config=0x2c\0000x01\00000\000\000"
+/* offset=1537 */ "uncore_sys_cmn_pmu\000"
+/* offset=1556 */ "sys_cmn_pmu.hnf_cache_miss\000uncore\000Counts total cache misses in first lookup result (high priority)\000eventid=1,type=5\000(434|436|43c|43a).*\00000\000\000"
+/* offset=1696 */ "CPI\000\0001 / IPC\000\000\000\000\000\000\000\00000"
+/* offset=1718 */ "IPC\000group1\000inst_retired.any / cpu_clk_unhalted.thread\000\000\000\000\000\000\000\00000"
+/* offset=1781 */ "Frontend_Bound_SMT\000\000idq_uops_not_delivered.core / (4 * (cpu_clk_unhalted.thread / 2 * (1 + cpu_clk_unhalted.one_thread_active / cpu_clk_unhalted.ref_xclk)))\000\000\000\000\000\000\000\00000"
+/* offset=1947 */ "dcache_miss_cpi\000\000l1d\\-loads\\-misses / inst_retired.any\000\000\000\000\000\000\000\00000"
+/* offset=2011 */ "icache_miss_cycles\000\000l1i\\-loads\\-misses / inst_retired.any\000\000\000\000\000\000\000\00000"
+/* offset=2078 */ "cache_miss_cycles\000group1\000dcache_miss_cpi + icache_miss_cycles\000\000\000\000\000\000\000\00000"
+/* offset=2149 */ "DCache_L2_All_Hits\000\000l2_rqsts.demand_data_rd_hit + l2_rqsts.pf_hit + l2_rqsts.rfo_hit\000\000\000\000\000\000\000\00000"
+/* offset=2243 */ "DCache_L2_All_Miss\000\000max(l2_rqsts.all_demand_data_rd - l2_rqsts.demand_data_rd_hit, 0) + l2_rqsts.pf_miss + l2_rqsts.rfo_miss\000\000\000\000\000\000\000\00000"
+/* offset=2377 */ "DCache_L2_All\000\000DCache_L2_All_Hits + DCache_L2_All_Miss\000\000\000\000\000\000\000\00000"
+/* offset=2441 */ "DCache_L2_Hits\000\000d_ratio(DCache_L2_All_Hits, DCache_L2_All)\000\000\000\000\000\000\000\00000"
+/* offset=2509 */ "DCache_L2_Misses\000\000d_ratio(DCache_L2_All_Miss, DCache_L2_All)\000\000\000\000\000\000\000\00000"
+/* offset=2579 */ "M1\000\000ipc + M2\000\000\000\000\000\000\000\00000"
+/* offset=2601 */ "M2\000\000ipc + M1\000\000\000\000\000\000\000\00000"
+/* offset=2623 */ "M3\000\0001 / M3\000\000\000\000\000\000\000\00000"
+/* offset=2643 */ "L1D_Cache_Fill_BW\000\00064 * l1d.replacement / 1e9 / duration_time\000\000\000\000\000\000\000\00000"
+;
+
+static const struct compact_pmu_event pmu_events__test_soc_cpu_default_core[] = {
+{ 13 }, /* bp_l1_btb_correct\000branch\000L1 BTB Correction\000event=0x8a\000\00000\000\000 */
+{ 72 }, /* bp_l2_btb_correct\000branch\000L2 BTB Correction\000event=0x8b\000\00000\000\000 */
+{ 325 }, /* dispatch_blocked.any\000other\000Memory cluster signals to block micro-op dispatch for any reason\000event=9,period=200000,umask=0x20\000\00000\000\000 */
+{ 455 }, /* eist_trans\000other\000Number of Enhanced Intel SpeedStep(R) Technology (EIST) transitions\000event=0x3a,period=200000\000\00000\000\000 */
+{ 131 }, /* l3_cache_rd\000cache\000L3 cache access, read\000event=0x40\000\00000\000Attributable Level 3 cache access, read\000 */
+{ 226 }, /* segment_reg_loads.any\000other\000Number of segment register loads\000event=6,period=200000,umask=0x80\000\00000\000\000 */
+};
+static const struct compact_pmu_event pmu_events__test_soc_cpu_hisi_sccl_ddrc[] = {
+{ 585 }, /* uncore_hisi_ddrc.flux_wcmd\000uncore\000DDRC write commands\000event=2\000\00000\000DDRC write commands\000 */
+};
+static const struct compact_pmu_event pmu_events__test_soc_cpu_hisi_sccl_l3c[] = {
+{ 1064 }, /* uncore_hisi_l3c.rd_hit_cpipe\000uncore\000Total read hits\000event=7\000\00000\000Total read hits\000 */
+};
+static const struct compact_pmu_event pmu_events__test_soc_cpu_uncore_cbox[] = {
+{ 914 }, /* event-hyphen\000uncore\000UNC_CBO_HYPHEN\000event=0xe0\000\00000\000UNC_CBO_HYPHEN\000 */
+{ 979 }, /* event-two-hyph\000uncore\000UNC_CBO_TWO_HYPH\000event=0xc0\000\00000\000UNC_CBO_TWO_HYPH\000 */
+{ 683 }, /* unc_cbo_xsnp_response.miss_eviction\000uncore\000A cross-core snoop resulted from L3 Eviction which misses in some processor core\000event=0x22,umask=0x81\000\00000\000A cross-core snoop resulted from L3 Eviction which misses in some processor core\000 */
+};
+static const struct compact_pmu_event pmu_events__test_soc_cpu_uncore_imc[] = {
+{ 1274 }, /* uncore_imc.cache_hits\000uncore\000Total cache hits\000event=0x34\000\00000\000Total cache hits\000 */
+};
+static const struct compact_pmu_event pmu_events__test_soc_cpu_uncore_imc_free_running[] = {
+{ 1168 }, /* uncore_imc_free_running.cache_miss\000uncore\000Total cache misses\000event=0x12\000\00000\000Total cache misses\000 */
+
+};
+
+const struct pmu_table_entry pmu_events__test_soc_cpu[] = {
+{
+ .entries = pmu_events__test_soc_cpu_default_core,
+ .num_entries = ARRAY_SIZE(pmu_events__test_soc_cpu_default_core),
+ .pmu_name = { 0 /* default_core\000 */ },
+},
+{
+ .entries = pmu_events__test_soc_cpu_hisi_sccl_ddrc,
+ .num_entries = ARRAY_SIZE(pmu_events__test_soc_cpu_hisi_sccl_ddrc),
+ .pmu_name = { 570 /* hisi_sccl,ddrc\000 */ },
+},
+{
+ .entries = pmu_events__test_soc_cpu_hisi_sccl_l3c,
+ .num_entries = ARRAY_SIZE(pmu_events__test_soc_cpu_hisi_sccl_l3c),
+ .pmu_name = { 1050 /* hisi_sccl,l3c\000 */ },
+},
+{
+ .entries = pmu_events__test_soc_cpu_uncore_cbox,
+ .num_entries = ARRAY_SIZE(pmu_events__test_soc_cpu_uncore_cbox),
+ .pmu_name = { 671 /* uncore_cbox\000 */ },
+},
+{
+ .entries = pmu_events__test_soc_cpu_uncore_imc,
+ .num_entries = ARRAY_SIZE(pmu_events__test_soc_cpu_uncore_imc),
+ .pmu_name = { 1263 /* uncore_imc\000 */ },
+},
+{
+ .entries = pmu_events__test_soc_cpu_uncore_imc_free_running,
+ .num_entries = ARRAY_SIZE(pmu_events__test_soc_cpu_uncore_imc_free_running),
+ .pmu_name = { 1144 /* uncore_imc_free_running\000 */ },
+},
};
+static const struct compact_pmu_event pmu_metrics__test_soc_cpu_default_core[] = {
+{ 1696 }, /* CPI\000\0001 / IPC\000\000\000\000\000\000\000\00000 */
+{ 2377 }, /* DCache_L2_All\000\000DCache_L2_All_Hits + DCache_L2_All_Miss\000\000\000\000\000\000\000\00000 */
+{ 2149 }, /* DCache_L2_All_Hits\000\000l2_rqsts.demand_data_rd_hit + l2_rqsts.pf_hit + l2_rqsts.rfo_hit\000\000\000\000\000\000\000\00000 */
+{ 2243 }, /* DCache_L2_All_Miss\000\000max(l2_rqsts.all_demand_data_rd - l2_rqsts.demand_data_rd_hit, 0) + l2_rqsts.pf_miss + l2_rqsts.rfo_miss\000\000\000\000\000\000\000\00000 */
+{ 2441 }, /* DCache_L2_Hits\000\000d_ratio(DCache_L2_All_Hits, DCache_L2_All)\000\000\000\000\000\000\000\00000 */
+{ 2509 }, /* DCache_L2_Misses\000\000d_ratio(DCache_L2_All_Miss, DCache_L2_All)\000\000\000\000\000\000\000\00000 */
+{ 1781 }, /* Frontend_Bound_SMT\000\000idq_uops_not_delivered.core / (4 * (cpu_clk_unhalted.thread / 2 * (1 + cpu_clk_unhalted.one_thread_active / cpu_clk_unhalted.ref_xclk)))\000\000\000\000\000\000\000\00000 */
+{ 1718 }, /* IPC\000group1\000inst_retired.any / cpu_clk_unhalted.thread\000\000\000\000\000\000\000\00000 */
+{ 2643 }, /* L1D_Cache_Fill_BW\000\00064 * l1d.replacement / 1e9 / duration_time\000\000\000\000\000\000\000\00000 */
+{ 2579 }, /* M1\000\000ipc + M2\000\000\000\000\000\000\000\00000 */
+{ 2601 }, /* M2\000\000ipc + M1\000\000\000\000\000\000\000\00000 */
+{ 2623 }, /* M3\000\0001 / M3\000\000\000\000\000\000\000\00000 */
+{ 2078 }, /* cache_miss_cycles\000group1\000dcache_miss_cpi + icache_miss_cycles\000\000\000\000\000\000\000\00000 */
+{ 1947 }, /* dcache_miss_cpi\000\000l1d\\-loads\\-misses / inst_retired.any\000\000\000\000\000\000\000\00000 */
+{ 2011 }, /* icache_miss_cycles\000\000l1i\\-loads\\-misses / inst_retired.any\000\000\000\000\000\000\000\00000 */
+
+};
+
+const struct pmu_table_entry pmu_metrics__test_soc_cpu[] = {
+{
+ .entries = pmu_metrics__test_soc_cpu_default_core,
+ .num_entries = ARRAY_SIZE(pmu_metrics__test_soc_cpu_default_core),
+ .pmu_name = { 0 /* default_core\000 */ },
+},
+};
+
+static const struct compact_pmu_event pmu_events__test_soc_sys_uncore_sys_ccn_pmu[] = {
+{ 1463 }, /* sys_ccn_pmu.read_cycles\000uncore\000ccn read-cycles event\000config=0x2c\0000x01\00000\000\000 */
+};
+static const struct compact_pmu_event pmu_events__test_soc_sys_uncore_sys_cmn_pmu[] = {
+{ 1556 }, /* sys_cmn_pmu.hnf_cache_miss\000uncore\000Counts total cache misses in first lookup result (high priority)\000eventid=1,type=5\000(434|436|43c|43a).*\00000\000\000 */
+};
+static const struct compact_pmu_event pmu_events__test_soc_sys_uncore_sys_ddr_pmu[] = {
+{ 1371 }, /* sys_ddr_pmu.write_cycles\000uncore\000ddr write-cycles event\000event=0x2b\000v8\00000\000\000 */
+
+};
+
+const struct pmu_table_entry pmu_events__test_soc_sys[] = {
+{
+ .entries = pmu_events__test_soc_sys_uncore_sys_ccn_pmu,
+ .num_entries = ARRAY_SIZE(pmu_events__test_soc_sys_uncore_sys_ccn_pmu),
+ .pmu_name = { 1444 /* uncore_sys_ccn_pmu\000 */ },
+},
+{
+ .entries = pmu_events__test_soc_sys_uncore_sys_cmn_pmu,
+ .num_entries = ARRAY_SIZE(pmu_events__test_soc_sys_uncore_sys_cmn_pmu),
+ .pmu_name = { 1537 /* uncore_sys_cmn_pmu\000 */ },
+},
+{
+ .entries = pmu_events__test_soc_sys_uncore_sys_ddr_pmu,
+ .num_entries = ARRAY_SIZE(pmu_events__test_soc_sys_uncore_sys_ddr_pmu),
+ .pmu_name = { 1352 /* uncore_sys_ddr_pmu\000 */ },
+},
+};
+
+
/* Struct used to make the PMU event table implementation opaque to callers. */
struct pmu_events_table {
- const struct pmu_event *entries;
+ const struct pmu_table_entry *pmus;
+ uint32_t num_pmus;
};
/* Struct used to make the PMU metric table implementation opaque to callers. */
struct pmu_metrics_table {
- const struct pmu_metric *entries;
+ const struct pmu_table_entry *pmus;
+ uint32_t num_pmus;
};
/*
@@ -202,92 +199,191 @@ struct pmu_metrics_table {
* The cpuid can contain any character other than the comma.
*/
struct pmu_events_map {
- const char *arch;
- const char *cpuid;
- const struct pmu_events_table event_table;
- const struct pmu_metrics_table metric_table;
+ const char *arch;
+ const char *cpuid;
+ struct pmu_events_table event_table;
+ struct pmu_metrics_table metric_table;
};
/*
* Global table mapping each known CPU for the architecture to its
* table of PMU events.
*/
-static const struct pmu_events_map pmu_events_map[] = {
- {
- .arch = "testarch",
- .cpuid = "testcpu",
- .event_table = { pmu_events__test_soc_cpu },
- .metric_table = { pmu_metrics__test_soc_cpu },
- },
- {
- .arch = 0,
- .cpuid = 0,
- .event_table = { 0 },
- .metric_table = { 0 },
- },
-};
-
-static const struct pmu_event pmu_events__test_soc_sys[] = {
- {
- .name = "sys_ddr_pmu.write_cycles",
- .event = "event=0x2b",
- .desc = "ddr write-cycles event. Unit: uncore_sys_ddr_pmu ",
- .compat = "v8",
- .topic = "uncore",
- .pmu = "uncore_sys_ddr_pmu",
- },
- {
- .name = "sys_ccn_pmu.read_cycles",
- .event = "config=0x2c",
- .desc = "ccn read-cycles event. Unit: uncore_sys_ccn_pmu ",
- .compat = "0x01",
- .topic = "uncore",
- .pmu = "uncore_sys_ccn_pmu",
- },
- {
- .name = "sys_cmn_pmu.hnf_cache_miss",
- .event = "eventid=0x1,type=0x5",
- .desc = "Counts total cache misses in first lookup result (high priority). Unit: uncore_sys_cmn_pmu ",
- .compat = "(434|436|43c|43a).*",
- .topic = "uncore",
- .pmu = "uncore_sys_cmn_pmu",
- },
- {
- .name = 0,
- .event = 0,
- .desc = 0,
- },
+const struct pmu_events_map pmu_events_map[] = {
+{
+ .arch = "testarch",
+ .cpuid = "testcpu",
+ .event_table = {
+ .pmus = pmu_events__test_soc_cpu,
+ .num_pmus = ARRAY_SIZE(pmu_events__test_soc_cpu),
+ },
+ .metric_table = {
+ .pmus = pmu_metrics__test_soc_cpu,
+ .num_pmus = ARRAY_SIZE(pmu_metrics__test_soc_cpu),
+ }
+},
+{
+ .arch = 0,
+ .cpuid = 0,
+ .event_table = { 0, 0 },
+ .metric_table = { 0, 0 },
+}
};
struct pmu_sys_events {
const char *name;
- const struct pmu_events_table table;
+ struct pmu_events_table event_table;
+ struct pmu_metrics_table metric_table;
};
static const struct pmu_sys_events pmu_sys_event_tables[] = {
{
- .table = { pmu_events__test_soc_sys },
+ .event_table = {
+ .pmus = pmu_events__test_soc_sys,
+ .num_pmus = ARRAY_SIZE(pmu_events__test_soc_sys)
+ },
.name = "pmu_events__test_soc_sys",
},
{
- .table = { 0 }
+ .event_table = { 0, 0 },
+ .metric_table = { 0, 0 },
},
};
-int pmu_events_table__for_each_event(const struct pmu_events_table *table, struct perf_pmu *pmu,
- pmu_event_iter_fn fn, void *data)
+static void decompress_event(int offset, struct pmu_event *pe)
+{
+ const char *p = &big_c_string[offset];
+
+ pe->name = (*p == '\0' ? NULL : p);
+ while (*p++);
+ pe->topic = (*p == '\0' ? NULL : p);
+ while (*p++);
+ pe->desc = (*p == '\0' ? NULL : p);
+ while (*p++);
+ pe->event = (*p == '\0' ? NULL : p);
+ while (*p++);
+ pe->compat = (*p == '\0' ? NULL : p);
+ while (*p++);
+ pe->deprecated = *p - '0';
+ p++;
+ pe->perpkg = *p - '0';
+ p++;
+ pe->unit = (*p == '\0' ? NULL : p);
+ while (*p++);
+ pe->long_desc = (*p == '\0' ? NULL : p);
+}
+
+static void decompress_metric(int offset, struct pmu_metric *pm)
{
- for (const struct pmu_event *pe = &table->entries[0]; pe->name; pe++) {
- int ret;
+ const char *p = &big_c_string[offset];
+
+ pm->metric_name = (*p == '\0' ? NULL : p);
+ while (*p++);
+ pm->metric_group = (*p == '\0' ? NULL : p);
+ while (*p++);
+ pm->metric_expr = (*p == '\0' ? NULL : p);
+ while (*p++);
+ pm->metric_threshold = (*p == '\0' ? NULL : p);
+ while (*p++);
+ pm->desc = (*p == '\0' ? NULL : p);
+ while (*p++);
+ pm->long_desc = (*p == '\0' ? NULL : p);
+ while (*p++);
+ pm->unit = (*p == '\0' ? NULL : p);
+ while (*p++);
+ pm->compat = (*p == '\0' ? NULL : p);
+ while (*p++);
+ pm->metricgroup_no_group = (*p == '\0' ? NULL : p);
+ while (*p++);
+ pm->default_metricgroup_name = (*p == '\0' ? NULL : p);
+ while (*p++);
+ pm->aggr_mode = *p - '0';
+ p++;
+ pm->event_grouping = *p - '0';
+}
- if (pmu && !pmu__name_match(pmu, pe->pmu))
+static int pmu_events_table__for_each_event_pmu(const struct pmu_events_table *table,
+ const struct pmu_table_entry *pmu,
+ pmu_event_iter_fn fn,
+ void *data)
+{
+ int ret;
+ struct pmu_event pe = {
+ .pmu = &big_c_string[pmu->pmu_name.offset],
+ };
+
+ for (uint32_t i = 0; i < pmu->num_entries; i++) {
+ decompress_event(pmu->entries[i].offset, &pe);
+ if (!pe.name)
continue;
+ ret = fn(&pe, table, data);
+ if (ret)
+ return ret;
+ }
+ return 0;
+ }
+
+static int pmu_events_table__find_event_pmu(const struct pmu_events_table *table,
+ const struct pmu_table_entry *pmu,
+ const char *name,
+ pmu_event_iter_fn fn,
+ void *data)
+{
+ struct pmu_event pe = {
+ .pmu = &big_c_string[pmu->pmu_name.offset],
+ };
+ int low = 0, high = pmu->num_entries - 1;
- ret = fn(pe, table, data);
- if (ret)
- return ret;
- }
- return 0;
+ while (low <= high) {
+ int cmp, mid = (low + high) / 2;
+
+ decompress_event(pmu->entries[mid].offset, &pe);
+
+ if (!pe.name && !name)
+ goto do_call;
+
+ if (!pe.name && name) {
+ low = mid + 1;
+ continue;
+ }
+ if (pe.name && !name) {
+ high = mid - 1;
+ continue;
+ }
+
+ cmp = strcasecmp(pe.name, name);
+ if (cmp < 0) {
+ low = mid + 1;
+ continue;
+ }
+ if (cmp > 0) {
+ high = mid - 1;
+ continue;
+ }
+ do_call:
+ return fn ? fn(&pe, table, data) : 0;
+ }
+ return PMU_EVENTS__NOT_FOUND;
+}
+
+int pmu_events_table__for_each_event(const struct pmu_events_table *table,
+ struct perf_pmu *pmu,
+ pmu_event_iter_fn fn,
+ void *data)
+{
+ for (size_t i = 0; i < table->num_pmus; i++) {
+ const struct pmu_table_entry *table_pmu = &table->pmus[i];
+ const char *pmu_name = &big_c_string[table_pmu->pmu_name.offset];
+ int ret;
+
+ if (pmu && !pmu__name_match(pmu, pmu_name))
+ continue;
+
+ ret = pmu_events_table__for_each_event_pmu(table, table_pmu, fn, data);
+ if (pmu || ret)
+ return ret;
+ }
+ return 0;
}
int pmu_events_table__find_event(const struct pmu_events_table *table,
@@ -296,14 +392,19 @@ int pmu_events_table__find_event(const struct pmu_events_table *table,
pmu_event_iter_fn fn,
void *data)
{
- for (const struct pmu_event *pe = &table->entries[0]; pe->name; pe++) {
- if (pmu && !pmu__name_match(pmu, pe->pmu))
+ for (size_t i = 0; i < table->num_pmus; i++) {
+ const struct pmu_table_entry *table_pmu = &table->pmus[i];
+ const char *pmu_name = &big_c_string[table_pmu->pmu_name.offset];
+ int ret;
+
+ if (!pmu__name_match(pmu, pmu_name))
continue;
- if (!strcasecmp(pe->name, name))
- return fn(pe, table, data);
- }
- return -1000;
+ ret = pmu_events_table__find_event_pmu(table, table_pmu, name, fn, data);
+ if (ret != PMU_EVENTS__NOT_FOUND)
+ return ret;
+ }
+ return PMU_EVENTS__NOT_FOUND;
}
size_t pmu_events_table__num_events(const struct pmu_events_table *table,
@@ -311,160 +412,253 @@ size_t pmu_events_table__num_events(const struct pmu_events_table *table,
{
size_t count = 0;
- for (const struct pmu_event *pe = &table->entries[0]; pe->name; pe++) {
- if (pmu && !pmu__name_match(pmu, pe->pmu))
- continue;
+ for (size_t i = 0; i < table->num_pmus; i++) {
+ const struct pmu_table_entry *table_pmu = &table->pmus[i];
+ const char *pmu_name = &big_c_string[table_pmu->pmu_name.offset];
- count++;
- }
+ if (pmu__name_match(pmu, pmu_name))
+ count += table_pmu->num_entries;
+ }
return count;
}
-int pmu_metrics_table__for_each_metric(const struct pmu_metrics_table *table, pmu_metric_iter_fn fn,
- void *data)
+static int pmu_metrics_table__for_each_metric_pmu(const struct pmu_metrics_table *table,
+ const struct pmu_table_entry *pmu,
+ pmu_metric_iter_fn fn,
+ void *data)
+{
+ int ret;
+ struct pmu_metric pm = {
+ .pmu = &big_c_string[pmu->pmu_name.offset],
+ };
+
+ for (uint32_t i = 0; i < pmu->num_entries; i++) {
+ decompress_metric(pmu->entries[i].offset, &pm);
+ if (!pm.metric_expr)
+ continue;
+ ret = fn(&pm, table, data);
+ if (ret)
+ return ret;
+ }
+ return 0;
+}
+
+int pmu_metrics_table__for_each_metric(const struct pmu_metrics_table *table,
+ pmu_metric_iter_fn fn,
+ void *data)
{
- for (const struct pmu_metric *pm = &table->entries[0]; pm->metric_expr; pm++) {
- int ret = fn(pm, table, data);
+ for (size_t i = 0; i < table->num_pmus; i++) {
+ int ret = pmu_metrics_table__for_each_metric_pmu(table, &table->pmus[i],
+ fn, data);
+
+ if (ret)
+ return ret;
+ }
+ return 0;
+}
- if (ret)
- return ret;
- }
- return 0;
+static const struct pmu_events_map *map_for_pmu(struct perf_pmu *pmu)
+{
+ static struct {
+ const struct pmu_events_map *map;
+ struct perf_pmu *pmu;
+ } last_result;
+ static struct {
+ const struct pmu_events_map *map;
+ char *cpuid;
+ } last_map_search;
+ static bool has_last_result, has_last_map_search;
+ const struct pmu_events_map *map = NULL;
+ char *cpuid = NULL;
+ size_t i;
+
+ if (has_last_result && last_result.pmu == pmu)
+ return last_result.map;
+
+ cpuid = perf_pmu__getcpuid(pmu);
+
+ /*
+ * On some platforms which uses cpus map, cpuid can be NULL for
+ * PMUs other than CORE PMUs.
+ */
+ if (!cpuid)
+ goto out_update_last_result;
+
+ if (has_last_map_search && !strcmp(last_map_search.cpuid, cpuid)) {
+ map = last_map_search.map;
+ free(cpuid);
+ } else {
+ i = 0;
+ for (;;) {
+ map = &pmu_events_map[i++];
+
+ if (!map->arch) {
+ map = NULL;
+ break;
+ }
+
+ if (!strcmp_cpuid_str(map->cpuid, cpuid))
+ break;
+ }
+ free(last_map_search.cpuid);
+ last_map_search.cpuid = cpuid;
+ last_map_search.map = map;
+ has_last_map_search = true;
+ }
+out_update_last_result:
+ last_result.pmu = pmu;
+ last_result.map = map;
+ has_last_result = true;
+ return map;
}
const struct pmu_events_table *perf_pmu__find_events_table(struct perf_pmu *pmu)
{
- const struct pmu_events_table *table = NULL;
- char *cpuid = perf_pmu__getcpuid(pmu);
- int i;
+ const struct pmu_events_map *map = map_for_pmu(pmu);
- /* on some platforms which uses cpus map, cpuid can be NULL for
- * PMUs other than CORE PMUs.
- */
- if (!cpuid)
- return NULL;
+ if (!map)
+ return NULL;
- i = 0;
- for (;;) {
- const struct pmu_events_map *map = &pmu_events_map[i++];
+ if (!pmu)
+ return &map->event_table;
- if (!map->cpuid)
- break;
+ for (size_t i = 0; i < map->event_table.num_pmus; i++) {
+ const struct pmu_table_entry *table_pmu = &map->event_table.pmus[i];
+ const char *pmu_name = &big_c_string[table_pmu->pmu_name.offset];
- if (!strcmp_cpuid_str(map->cpuid, cpuid)) {
- table = &map->event_table;
- break;
- }
- }
- free(cpuid);
- return table;
+ if (pmu__name_match(pmu, pmu_name))
+ return &map->event_table;
+ }
+ return NULL;
}
const struct pmu_metrics_table *perf_pmu__find_metrics_table(struct perf_pmu *pmu)
{
- const struct pmu_metrics_table *table = NULL;
- char *cpuid = perf_pmu__getcpuid(pmu);
- int i;
+ const struct pmu_events_map *map = map_for_pmu(pmu);
- /* on some platforms which uses cpus map, cpuid can be NULL for
- * PMUs other than CORE PMUs.
- */
- if (!cpuid)
- return NULL;
+ if (!map)
+ return NULL;
- i = 0;
- for (;;) {
- const struct pmu_events_map *map = &pmu_events_map[i++];
+ if (!pmu)
+ return &map->metric_table;
- if (!map->cpuid)
- break;
+ for (size_t i = 0; i < map->metric_table.num_pmus; i++) {
+ const struct pmu_table_entry *table_pmu = &map->metric_table.pmus[i];
+ const char *pmu_name = &big_c_string[table_pmu->pmu_name.offset];
- if (!strcmp_cpuid_str(map->cpuid, cpuid)) {
- table = &map->metric_table;
- break;
- }
- }
- free(cpuid);
- return table;
+ if (pmu__name_match(pmu, pmu_name))
+ return &map->metric_table;
+ }
+ return NULL;
}
const struct pmu_events_table *find_core_events_table(const char *arch, const char *cpuid)
{
- for (const struct pmu_events_map *tables = &pmu_events_map[0];
- tables->arch;
- tables++) {
- if (!strcmp(tables->arch, arch) && !strcmp_cpuid_str(tables->cpuid, cpuid))
- return &tables->event_table;
- }
- return NULL;
+ for (const struct pmu_events_map *tables = &pmu_events_map[0];
+ tables->arch;
+ tables++) {
+ if (!strcmp(tables->arch, arch) && !strcmp_cpuid_str(tables->cpuid, cpuid))
+ return &tables->event_table;
+ }
+ return NULL;
}
const struct pmu_metrics_table *find_core_metrics_table(const char *arch, const char *cpuid)
{
- for (const struct pmu_events_map *tables = &pmu_events_map[0];
- tables->arch;
- tables++) {
- if (!strcmp(tables->arch, arch) && !strcmp_cpuid_str(tables->cpuid, cpuid))
- return &tables->metric_table;
- }
- return NULL;
+ for (const struct pmu_events_map *tables = &pmu_events_map[0];
+ tables->arch;
+ tables++) {
+ if (!strcmp(tables->arch, arch) && !strcmp_cpuid_str(tables->cpuid, cpuid))
+ return &tables->metric_table;
+ }
+ return NULL;
}
int pmu_for_each_core_event(pmu_event_iter_fn fn, void *data)
{
- for (const struct pmu_events_map *tables = &pmu_events_map[0]; tables->arch; tables++) {
- int ret = pmu_events_table__for_each_event(&tables->event_table,
- /*pmu=*/ NULL, fn, data);
-
- if (ret)
- return ret;
- }
- return 0;
+ for (const struct pmu_events_map *tables = &pmu_events_map[0];
+ tables->arch;
+ tables++) {
+ int ret = pmu_events_table__for_each_event(&tables->event_table,
+ /*pmu=*/ NULL, fn, data);
+
+ if (ret)
+ return ret;
+ }
+ return 0;
}
int pmu_for_each_core_metric(pmu_metric_iter_fn fn, void *data)
{
- for (const struct pmu_events_map *tables = &pmu_events_map[0];
- tables->arch;
- tables++) {
- int ret = pmu_metrics_table__for_each_metric(&tables->metric_table, fn, data);
-
- if (ret)
- return ret;
- }
- return 0;
+ for (const struct pmu_events_map *tables = &pmu_events_map[0];
+ tables->arch;
+ tables++) {
+ int ret = pmu_metrics_table__for_each_metric(&tables->metric_table, fn, data);
+
+ if (ret)
+ return ret;
+ }
+ return 0;
}
const struct pmu_events_table *find_sys_events_table(const char *name)
{
- for (const struct pmu_sys_events *tables = &pmu_sys_event_tables[0];
- tables->name;
- tables++) {
- if (!strcmp(tables->name, name))
- return &tables->table;
- }
- return NULL;
+ for (const struct pmu_sys_events *tables = &pmu_sys_event_tables[0];
+ tables->name;
+ tables++) {
+ if (!strcmp(tables->name, name))
+ return &tables->event_table;
+ }
+ return NULL;
}
int pmu_for_each_sys_event(pmu_event_iter_fn fn, void *data)
{
- for (const struct pmu_sys_events *tables = &pmu_sys_event_tables[0];
- tables->name;
- tables++) {
- int ret = pmu_events_table__for_each_event(&tables->table, /*pmu=*/ NULL, fn, data);
-
- if (ret)
- return ret;
- }
- return 0;
+ for (const struct pmu_sys_events *tables = &pmu_sys_event_tables[0];
+ tables->name;
+ tables++) {
+ int ret = pmu_events_table__for_each_event(&tables->event_table,
+ /*pmu=*/ NULL, fn, data);
+
+ if (ret)
+ return ret;
+ }
+ return 0;
}
-int pmu_for_each_sys_metric(pmu_metric_iter_fn fn __maybe_unused, void *data __maybe_unused)
+int pmu_for_each_sys_metric(pmu_metric_iter_fn fn, void *data)
{
- return 0;
+ for (const struct pmu_sys_events *tables = &pmu_sys_event_tables[0];
+ tables->name;
+ tables++) {
+ int ret = pmu_metrics_table__for_each_metric(&tables->metric_table, fn, data);
+
+ if (ret)
+ return ret;
+ }
+ return 0;
}
-const char *describe_metricgroup(const char *group __maybe_unused)
+static const int metricgroups[][2] = {
+
+};
+
+const char *describe_metricgroup(const char *group)
{
- return NULL;
+ int low = 0, high = (int)ARRAY_SIZE(metricgroups) - 1;
+
+ while (low <= high) {
+ int mid = (low + high) / 2;
+ const char *mgroup = &big_c_string[metricgroups[mid][0]];
+ int cmp = strcmp(mgroup, group);
+
+ if (cmp == 0) {
+ return &big_c_string[metricgroups[mid][1]];
+ } else if (cmp < 0) {
+ low = mid + 1;
+ } else {
+ high = mid - 1;
+ }
+ }
+ return NULL;
}
diff --git a/tools/perf/pmu-events/jevents.py b/tools/perf/pmu-events/jevents.py
index ac9b7ca41856..bb0a5d92df4a 100755
--- a/tools/perf/pmu-events/jevents.py
+++ b/tools/perf/pmu-events/jevents.py
@@ -503,8 +503,11 @@ def print_pending_events() -> None:
first = True
last_pmu = None
+ last_name = None
pmus = set()
for event in sorted(_pending_events, key=event_cmp_key):
+ if last_pmu and last_pmu == event.pmu:
+ assert event.name != last_name, f"Duplicate event: {last_pmu}/{last_name}/ in {_pending_events_tblname}"
if event.pmu != last_pmu:
if not first:
_args.output_file.write('};\n')
@@ -516,6 +519,7 @@ def print_pending_events() -> None:
pmus.add((event.pmu, pmu_name))
_args.output_file.write(event.to_c_string(metric=False))
+ last_name = event.name
_pending_events = []
_args.output_file.write(f"""
@@ -631,14 +635,17 @@ def preprocess_one_file(parents: Sequence[str], item: os.DirEntry) -> None:
def process_one_file(parents: Sequence[str], item: os.DirEntry) -> None:
"""Process a JSON file during the main walk."""
- def is_leaf_dir(path: str) -> bool:
+ def is_leaf_dir_ignoring_sys(path: str) -> bool:
for item in os.scandir(path):
- if item.is_dir():
+ if item.is_dir() and item.name != 'sys':
return False
return True
- # model directory, reset topic
- if item.is_dir() and is_leaf_dir(item.path):
+ # Model directories are leaves (ignoring possible sys
+ # directories). The FTW will walk into the directory next. Flush
+ # pending events and metrics and update the table names for the new
+ # model directory.
+ if item.is_dir() and is_leaf_dir_ignoring_sys(item.path):
print_pending_events()
print_pending_metrics()
@@ -906,7 +913,7 @@ static int pmu_events_table__find_event_pmu(const struct pmu_events_table *table
do_call:
return fn ? fn(&pe, table, data) : 0;
}
- return -1000;
+ return PMU_EVENTS__NOT_FOUND;
}
int pmu_events_table__for_each_event(const struct pmu_events_table *table,
@@ -944,10 +951,10 @@ int pmu_events_table__find_event(const struct pmu_events_table *table,
continue;
ret = pmu_events_table__find_event_pmu(table, table_pmu, name, fn, data);
- if (ret != -1000)
+ if (ret != PMU_EVENTS__NOT_FOUND)
return ret;
}
- return -1000;
+ return PMU_EVENTS__NOT_FOUND;
}
size_t pmu_events_table__num_events(const struct pmu_events_table *table,
@@ -1256,6 +1263,10 @@ such as "arm/cortex-a34".''',
'output_file', type=argparse.FileType('w', encoding='utf-8'), nargs='?', default=sys.stdout)
_args = ap.parse_args()
+ _args.output_file.write(f"""
+/* SPDX-License-Identifier: GPL-2.0 */
+/* THIS FILE WAS AUTOGENERATED BY jevents.py arch={_args.arch} model={_args.model} ! */
+""")
_args.output_file.write("""
#include <pmu-events/pmu-events.h>
#include "util/header.h"
@@ -1281,7 +1292,7 @@ struct pmu_table_entry {
if item.name == _args.arch or _args.arch == 'all' or item.name == 'test':
archs.append(item.name)
- if len(archs) < 2:
+ if len(archs) < 2 and _args.arch != 'none':
raise IOError(f'Missing architecture directory \'{_args.arch}\'')
archs.sort()
diff --git a/tools/perf/pmu-events/models.py b/tools/perf/pmu-events/models.py
new file mode 100755
index 000000000000..8f727d29c952
--- /dev/null
+++ b/tools/perf/pmu-events/models.py
@@ -0,0 +1,73 @@
+#!/usr/bin/env python3
+# SPDX-License-Identifier: (LGPL-2.1 OR BSD-2-Clause)
+"""List model names from mapfile.csv files."""
+import argparse
+import csv
+import os
+import re
+from typing import List
+
+def main() -> None:
+ def dir_path(path: str) -> str:
+ """Validate path is a directory for argparse."""
+ if os.path.isdir(path):
+ return path
+ raise argparse.ArgumentTypeError(f'\'{path}\' is not a valid directory')
+
+ def find_archs(start_dir: str, arch: str) -> List[str]:
+ archs = []
+ for item in os.scandir(start_dir):
+ if not item.is_dir():
+ continue
+ if arch in (item.name, 'all'):
+ archs.append(item.name)
+
+ if len(archs) < 1:
+ raise IOError(f'Missing architecture directory \'{arch}\'')
+
+ return archs
+
+ def find_mapfiles(start_dir: str, archs: List[str]) -> List[str]:
+ result = []
+ for arch in archs:
+ for item in os.scandir(f'{start_dir}/{arch}'):
+ if item.is_dir():
+ continue
+ if item.name == 'mapfile.csv':
+ result.append(f'{start_dir}/{arch}/mapfile.csv')
+ return result
+
+ def find_cpuids(mapfiles: List[str], cpuids: str) -> List[str]:
+ result = []
+ for mapfile in mapfiles:
+ with open(mapfile, encoding='utf-8') as csvfile:
+ first = False
+ table = csv.reader(csvfile)
+ for row in table:
+ if not first or len(row) == 0 or row[0].startswith('#'):
+ first = True
+ continue
+ # Python regular expressions don't handle xdigit.
+ regex = row[0].replace('[[:xdigit:]]', '[0-9a-fA-F]')
+ for cpuid in cpuids.split(','):
+ if re.match(regex, cpuid):
+ result.append(row[2])
+ return result
+
+ ap = argparse.ArgumentParser()
+ ap.add_argument('arch', help='Architecture name like x86')
+ ap.add_argument('cpuid', default='all', help='List of cpuids to convert to model names')
+ ap.add_argument(
+ 'starting_dir',
+ type=dir_path,
+ help='Root of tree containing architecture directories containing json files'
+ )
+ args = ap.parse_args()
+
+ archs = find_archs(args.starting_dir, args.arch)
+ mapfiles = find_mapfiles(args.starting_dir, archs)
+ models = find_cpuids(mapfiles, args.cpuid)
+ print(','.join(models))
+
+if __name__ == '__main__':
+ main()
diff --git a/tools/perf/pmu-events/pmu-events.h b/tools/perf/pmu-events/pmu-events.h
index f5aa96f1685c..5435ad92180c 100644
--- a/tools/perf/pmu-events/pmu-events.h
+++ b/tools/perf/pmu-events/pmu-events.h
@@ -70,6 +70,8 @@ struct pmu_metric {
struct pmu_events_table;
struct pmu_metrics_table;
+#define PMU_EVENTS__NOT_FOUND -1000
+
typedef int (*pmu_event_iter_fn)(const struct pmu_event *pe,
const struct pmu_events_table *table,
void *data);
@@ -82,6 +84,13 @@ int pmu_events_table__for_each_event(const struct pmu_events_table *table,
struct perf_pmu *pmu,
pmu_event_iter_fn fn,
void *data);
+/*
+ * Search for table and entry matching with pmu__name_match. Each matching event
+ * has fn called on it. 0 implies to success/continue the search while non-zero
+ * means to terminate. The special value PMU_EVENTS__NOT_FOUND is used to
+ * indicate no event was found in one of the tables which doesn't terminate the
+ * search of all tables.
+ */
int pmu_events_table__find_event(const struct pmu_events_table *table,
struct perf_pmu *pmu,
const char *name,
diff --git a/tools/perf/scripts/python/arm-cs-trace-disasm.py b/tools/perf/scripts/python/arm-cs-trace-disasm.py
index d973c2baed1c..7aff02d84ffb 100755
--- a/tools/perf/scripts/python/arm-cs-trace-disasm.py
+++ b/tools/perf/scripts/python/arm-cs-trace-disasm.py
@@ -192,17 +192,16 @@ def process_event(param_dict):
ip = sample["ip"]
addr = sample["addr"]
+ if (options.verbose == True):
+ print("Event type: %s" % name)
+ print_sample(sample)
+
# Initialize CPU data if it's empty, and directly return back
# if this is the first tracing event for this CPU.
if (cpu_data.get(str(cpu) + 'addr') == None):
cpu_data[str(cpu) + 'addr'] = addr
return
-
- if (options.verbose == True):
- print("Event type: %s" % name)
- print_sample(sample)
-
# If cannot find dso so cannot dump assembler, bail out
if (dso == '[unknown]'):
return
diff --git a/tools/perf/tests/bp_account.c b/tools/perf/tests/bp_account.c
index 6f921db33cf9..4cb7d486b5c1 100644
--- a/tools/perf/tests/bp_account.c
+++ b/tools/perf/tests/bp_account.c
@@ -16,6 +16,7 @@
#include "tests.h"
#include "debug.h"
#include "event.h"
+#include "parse-events.h"
#include "../perf-sys.h"
#include "cloexec.h"
@@ -50,7 +51,7 @@ static int __event(bool is_x, void *addr, struct perf_event_attr *attr)
attr->config = 0;
attr->bp_type = is_x ? HW_BREAKPOINT_X : HW_BREAKPOINT_W;
attr->bp_addr = (unsigned long) addr;
- attr->bp_len = sizeof(long);
+ attr->bp_len = is_x ? default_breakpoint_len() : sizeof(long);
attr->sample_period = 1;
attr->sample_type = PERF_SAMPLE_IP;
@@ -92,6 +93,7 @@ static int bp_accounting(int wp_cnt, int share)
attr_mod = attr;
attr_mod.bp_type = HW_BREAKPOINT_X;
attr_mod.bp_addr = (unsigned long) test_function;
+ attr_mod.bp_len = default_breakpoint_len();
ret = ioctl(fd[0], PERF_EVENT_IOC_MODIFY_ATTRIBUTES, &attr_mod);
TEST_ASSERT_VAL("failed to modify wp\n", ret == 0);
diff --git a/tools/perf/tests/bp_signal.c b/tools/perf/tests/bp_signal.c
index 1f2908f02389..3faeb5b6fe0b 100644
--- a/tools/perf/tests/bp_signal.c
+++ b/tools/perf/tests/bp_signal.c
@@ -26,6 +26,7 @@
#include "tests.h"
#include "debug.h"
#include "event.h"
+#include "parse-events.h"
#include "perf-sys.h"
#include "cloexec.h"
@@ -111,7 +112,7 @@ static int __event(bool is_x, void *addr, int sig)
pe.config = 0;
pe.bp_type = is_x ? HW_BREAKPOINT_X : HW_BREAKPOINT_W;
pe.bp_addr = (unsigned long) addr;
- pe.bp_len = sizeof(long);
+ pe.bp_len = is_x ? default_breakpoint_len() : sizeof(long);
pe.sample_period = 1;
pe.sample_type = PERF_SAMPLE_IP;
diff --git a/tools/perf/tests/bp_signal_overflow.c b/tools/perf/tests/bp_signal_overflow.c
index 4e897c2cf26b..ee560e156be6 100644
--- a/tools/perf/tests/bp_signal_overflow.c
+++ b/tools/perf/tests/bp_signal_overflow.c
@@ -25,6 +25,7 @@
#include "tests.h"
#include "debug.h"
#include "event.h"
+#include "parse-events.h"
#include "../perf-sys.h"
#include "cloexec.h"
@@ -88,7 +89,7 @@ static int test__bp_signal_overflow(struct test_suite *test __maybe_unused, int
pe.config = 0;
pe.bp_type = HW_BREAKPOINT_X;
pe.bp_addr = (unsigned long) test_function;
- pe.bp_len = sizeof(long);
+ pe.bp_len = default_breakpoint_len();
pe.sample_period = THRESHOLD;
pe.sample_type = PERF_SAMPLE_IP;
diff --git a/tools/perf/tests/builtin-test.c b/tools/perf/tests/builtin-test.c
index c3d84b67ca8e..470a9709427d 100644
--- a/tools/perf/tests/builtin-test.c
+++ b/tools/perf/tests/builtin-test.c
@@ -152,6 +152,7 @@ static struct test_workload *workloads[] = {
&workload__sqrtloop,
&workload__brstack,
&workload__datasym,
+ &workload__landlock,
};
static int num_subtests(const struct test_suite *t)
diff --git a/tools/perf/tests/cpumap.c b/tools/perf/tests/cpumap.c
index bd8e396f3e57..2f0168b2a5a9 100644
--- a/tools/perf/tests/cpumap.c
+++ b/tools/perf/tests/cpumap.c
@@ -11,7 +11,7 @@
struct machine;
-static int process_event_mask(struct perf_tool *tool __maybe_unused,
+static int process_event_mask(const struct perf_tool *tool __maybe_unused,
union perf_event *event,
struct perf_sample *sample __maybe_unused,
struct machine *machine __maybe_unused)
@@ -47,7 +47,7 @@ static int process_event_mask(struct perf_tool *tool __maybe_unused,
return 0;
}
-static int process_event_cpus(struct perf_tool *tool __maybe_unused,
+static int process_event_cpus(const struct perf_tool *tool __maybe_unused,
union perf_event *event,
struct perf_sample *sample __maybe_unused,
struct machine *machine __maybe_unused)
@@ -73,7 +73,7 @@ static int process_event_cpus(struct perf_tool *tool __maybe_unused,
return 0;
}
-static int process_event_range_cpus(struct perf_tool *tool __maybe_unused,
+static int process_event_range_cpus(const struct perf_tool *tool __maybe_unused,
union perf_event *event,
struct perf_sample *sample __maybe_unused,
struct machine *machine __maybe_unused)
diff --git a/tools/perf/tests/dlfilter-test.c b/tools/perf/tests/dlfilter-test.c
index da3a9b50b1b1..54f59d1246bc 100644
--- a/tools/perf/tests/dlfilter-test.c
+++ b/tools/perf/tests/dlfilter-test.c
@@ -62,7 +62,7 @@ static int test_result(const char *msg, int ret)
return ret;
}
-static int process(struct perf_tool *tool, union perf_event *event,
+static int process(const struct perf_tool *tool, union perf_event *event,
struct perf_sample *sample __maybe_unused,
struct machine *machine __maybe_unused)
{
diff --git a/tools/perf/tests/dwarf-unwind.c b/tools/perf/tests/dwarf-unwind.c
index d01aa931fe81..f85d391ced98 100644
--- a/tools/perf/tests/dwarf-unwind.c
+++ b/tools/perf/tests/dwarf-unwind.c
@@ -37,7 +37,7 @@
#define NO_TAIL_CALL_BARRIER __asm__ __volatile__("" : : : "memory");
#endif
-static int mmap_handler(struct perf_tool *tool __maybe_unused,
+static int mmap_handler(const struct perf_tool *tool __maybe_unused,
union perf_event *event,
struct perf_sample *sample,
struct machine *machine)
diff --git a/tools/perf/tests/event_update.c b/tools/perf/tests/event_update.c
index d093a9b878d1..d6b4ce3ef4ee 100644
--- a/tools/perf/tests/event_update.c
+++ b/tools/perf/tests/event_update.c
@@ -12,7 +12,7 @@
#include "tests.h"
#include "debug.h"
-static int process_event_unit(struct perf_tool *tool __maybe_unused,
+static int process_event_unit(const struct perf_tool *tool __maybe_unused,
union perf_event *event,
struct perf_sample *sample __maybe_unused,
struct machine *machine __maybe_unused)
@@ -25,7 +25,7 @@ static int process_event_unit(struct perf_tool *tool __maybe_unused,
return 0;
}
-static int process_event_scale(struct perf_tool *tool __maybe_unused,
+static int process_event_scale(const struct perf_tool *tool __maybe_unused,
union perf_event *event,
struct perf_sample *sample __maybe_unused,
struct machine *machine __maybe_unused)
@@ -43,7 +43,7 @@ struct event_name {
const char *name;
};
-static int process_event_name(struct perf_tool *tool,
+static int process_event_name(const struct perf_tool *tool,
union perf_event *event,
struct perf_sample *sample __maybe_unused,
struct machine *machine __maybe_unused)
@@ -57,7 +57,7 @@ static int process_event_name(struct perf_tool *tool,
return 0;
}
-static int process_event_cpus(struct perf_tool *tool __maybe_unused,
+static int process_event_cpus(const struct perf_tool *tool __maybe_unused,
union perf_event *event,
struct perf_sample *sample __maybe_unused,
struct machine *machine __maybe_unused)
@@ -103,6 +103,7 @@ static int test__event_update(struct test_suite *test __maybe_unused, int subtes
TEST_ASSERT_VAL("failed to synthesize attr update scale",
!perf_event__synthesize_event_update_scale(NULL, evsel, process_event_scale));
+ perf_tool__init(&tmp.tool, /*ordered_events=*/false);
tmp.name = evsel__name(evsel);
TEST_ASSERT_VAL("failed to synthesize attr update name",
diff --git a/tools/perf/tests/make b/tools/perf/tests/make
index a1f8adf85367..a5040772043f 100644
--- a/tools/perf/tests/make
+++ b/tools/perf/tests/make
@@ -70,6 +70,7 @@ make_python_perf_so := $(python_perf_so)
make_debug := DEBUG=1
make_nondistro := BUILD_NONDISTRO=1
make_extra_tests := EXTRA_TESTS=1
+make_jevents_all := JEVENTS_ARCH=all
make_no_bpf_skel := BUILD_BPF_SKEL=0
make_gen_vmlinux_h := GEN_VMLINUX_H=1
make_no_libperl := NO_LIBPERL=1
@@ -92,6 +93,7 @@ make_no_libbpf := NO_LIBBPF=1
make_libbpf_dynamic := LIBBPF_DYNAMIC=1
make_no_libbpf_DEBUG := NO_LIBBPF=1 DEBUG=1
make_no_libcrypto := NO_LIBCRYPTO=1
+make_no_libllvm := NO_LIBLLVM=1
make_with_babeltrace:= LIBBABELTRACE=1
make_with_coresight := CORESIGHT=1
make_no_sdt := NO_SDT=1
@@ -140,6 +142,7 @@ run += make_python_perf_so
run += make_debug
run += make_nondistro
run += make_extra_tests
+run += make_jevents_all
run += make_no_bpf_skel
run += make_gen_vmlinux_h
run += make_no_libperl
@@ -161,6 +164,7 @@ run += make_no_auxtrace
run += make_no_libbpf
run += make_no_libbpf_DEBUG
run += make_no_libcrypto
+run += make_no_libllvm
run += make_no_sdt
run += make_no_syscall_tbl
run += make_with_babeltrace
diff --git a/tools/perf/tests/parse-events.c b/tools/perf/tests/parse-events.c
index edc2adcf1bae..9e3086d02150 100644
--- a/tools/perf/tests/parse-events.c
+++ b/tools/perf/tests/parse-events.c
@@ -262,7 +262,7 @@ static int test__checkevent_breakpoint_x(struct evlist *evlist)
TEST_ASSERT_VAL("wrong config", test_config(evsel, 0));
TEST_ASSERT_VAL("wrong bp_type",
HW_BREAKPOINT_X == evsel->core.attr.bp_type);
- TEST_ASSERT_VAL("wrong bp_len", sizeof(long) == evsel->core.attr.bp_len);
+ TEST_ASSERT_VAL("wrong bp_len", default_breakpoint_len() == evsel->core.attr.bp_len);
return TEST_OK;
}
@@ -2500,7 +2500,7 @@ static int test_event(const struct evlist_test *e)
return TEST_FAIL;
}
parse_events_error__init(&err);
- ret = __parse_events(evlist, e->name, /*pmu_filter=*/NULL, &err, /*fake_pmu=*/NULL,
+ ret = __parse_events(evlist, e->name, /*pmu_filter=*/NULL, &err, /*fake_pmu=*/false,
/*warn_if_reordered=*/true, /*fake_tp=*/true);
if (ret) {
pr_debug("failed to parse event '%s', err %d\n", e->name, ret);
@@ -2529,7 +2529,7 @@ static int test_event_fake_pmu(const char *str)
parse_events_error__init(&err);
ret = __parse_events(evlist, str, /*pmu_filter=*/NULL, &err,
- &perf_pmu__fake, /*warn_if_reordered=*/true,
+ /*fake_pmu=*/true, /*warn_if_reordered=*/true,
/*fake_tp=*/true);
if (ret) {
pr_debug("failed to parse event '%s', err %d\n",
diff --git a/tools/perf/tests/pmu-events.c b/tools/perf/tests/pmu-events.c
index ff3e7bc0a77f..db004d26fcb0 100644
--- a/tools/perf/tests/pmu-events.c
+++ b/tools/perf/tests/pmu-events.c
@@ -819,8 +819,7 @@ static bool is_number(const char *str)
return errno == 0 && end_ptr != str;
}
-static int check_parse_id(const char *id, struct parse_events_error *error,
- struct perf_pmu *fake_pmu)
+static int check_parse_id(const char *id, struct parse_events_error *error)
{
struct evlist *evlist;
int ret;
@@ -841,7 +840,7 @@ static int check_parse_id(const char *id, struct parse_events_error *error,
for (cur = strchr(dup, '@') ; cur; cur = strchr(++cur, '@'))
*cur = '/';
- ret = __parse_events(evlist, dup, /*pmu_filter=*/NULL, error, fake_pmu,
+ ret = __parse_events(evlist, dup, /*pmu_filter=*/NULL, error, /*fake_pmu=*/true,
/*warn_if_reordered=*/true, /*fake_tp=*/false);
free(dup);
@@ -855,7 +854,7 @@ static int check_parse_fake(const char *id)
int ret;
parse_events_error__init(&error);
- ret = check_parse_id(id, &error, &perf_pmu__fake);
+ ret = check_parse_id(id, &error);
parse_events_error__exit(&error);
return ret;
}
@@ -1051,9 +1050,8 @@ static int test__parsing_fake_callback(const struct pmu_metric *pm,
}
/*
- * Parse all the metrics for current architecture,
- * or all defined cpus via the 'fake_pmu'
- * in parse_events.
+ * Parse all the metrics for current architecture, or all defined cpus via the
+ * 'fake_pmu' in parse_events.
*/
static int test__parsing_fake(struct test_suite *test __maybe_unused,
int subtest __maybe_unused)
diff --git a/tools/perf/tests/pmu.c b/tools/perf/tests/pmu.c
index c76f53a90a7b..be18506f6a24 100644
--- a/tools/perf/tests/pmu.c
+++ b/tools/perf/tests/pmu.c
@@ -18,9 +18,6 @@
#include <sys/stat.h>
#include <sys/types.h>
-/* Fake PMUs created in temp directory. */
-static LIST_HEAD(test_pmus);
-
/* Cleanup test PMU directory. */
static int test_pmu_put(const char *dir, struct perf_pmu *pmu)
{
@@ -461,10 +458,10 @@ static int test__name_cmp(struct test_suite *test __maybe_unused, int subtest __
*/
static int test__pmu_match(struct test_suite *test __maybe_unused, int subtest __maybe_unused)
{
- struct perf_pmu test_pmu;
- test_pmu.alias_name = NULL;
+ struct perf_pmu test_pmu = {
+ .name = "pmuname",
+ };
- test_pmu.name = "pmuname";
TEST_ASSERT_EQUAL("Exact match", perf_pmu__match(&test_pmu, "pmuname"), true);
TEST_ASSERT_EQUAL("Longer token", perf_pmu__match(&test_pmu, "longertoken"), false);
TEST_ASSERT_EQUAL("Shorter token", perf_pmu__match(&test_pmu, "pmu"), false);
diff --git a/tools/perf/tests/shell/annotate.sh b/tools/perf/tests/shell/annotate.sh
index b072d9b97387..2ccf4f1d46b6 100755
--- a/tools/perf/tests/shell/annotate.sh
+++ b/tools/perf/tests/shell/annotate.sh
@@ -1,4 +1,4 @@
-#!/bin/sh
+#!/bin/bash
# perf annotate basic tests
# SPDX-License-Identifier: GPL-2.0
@@ -28,6 +28,7 @@ cleanup() {
}
trap_cleanup() {
+ echo "Unexpected signal in ${FUNCNAME[1]}"
cleanup
exit 1
}
diff --git a/tools/perf/tests/shell/base_probe/settings.sh b/tools/perf/tests/shell/base_probe/settings.sh
deleted file mode 100644
index 123621c7f95e..000000000000
--- a/tools/perf/tests/shell/base_probe/settings.sh
+++ /dev/null
@@ -1,48 +0,0 @@
-# SPDX-License-Identifier: GPL-2.0
-#
-# settings.sh of perf_probe test
-# Author: Michael Petlan <mpetlan@redhat.com>
-# Author: Masami Hiramatsu <masami.hiramatsu.pt@hitachi.com>
-#
-
-export TEST_NAME="perf_probe"
-
-export MY_ARCH=`arch`
-
-if [ -n "$PERFSUITE_RUN_DIR" ]; then
- # when $PERFSUITE_RUN_DIR is set to something, all the logs and temp files will be placed there
- # --> the $PERFSUITE_RUN_DIR/perf_something/examples and $PERFSUITE_RUN_DIR/perf_something/logs
- # dirs will be used for that
- export PERFSUITE_RUN_DIR=`readlink -f $PERFSUITE_RUN_DIR`
- export CURRENT_TEST_DIR="$PERFSUITE_RUN_DIR/$TEST_NAME"
- export MAKE_TARGET_DIR="$CURRENT_TEST_DIR/examples"
- test -d "$MAKE_TARGET_DIR" || mkdir -p "$MAKE_TARGET_DIR"
- export LOGS_DIR="$PERFSUITE_RUN_DIR/$TEST_NAME/logs"
- test -d "$LOGS_DIR" || mkdir -p "$LOGS_DIR"
-else
- # when $PERFSUITE_RUN_DIR is not set, logs will be placed here
- export CURRENT_TEST_DIR="."
- export LOGS_DIR="."
-fi
-
-check_kprobes_available()
-{
- test -e /sys/kernel/debug/tracing/kprobe_events
-}
-
-check_uprobes_available()
-{
- test -e /sys/kernel/debug/tracing/uprobe_events
-}
-
-clear_all_probes()
-{
- echo 0 > /sys/kernel/debug/tracing/events/enable
- check_kprobes_available && echo > /sys/kernel/debug/tracing/kprobe_events
- check_uprobes_available && echo > /sys/kernel/debug/tracing/uprobe_events
-}
-
-check_sdt_support()
-{
- $CMD_PERF list sdt | grep sdt > /dev/null 2> /dev/null
-}
diff --git a/tools/perf/tests/shell/base_probe/test_adding_blacklisted.sh b/tools/perf/tests/shell/base_probe/test_adding_blacklisted.sh
new file mode 100755
index 000000000000..b5dc10b2a738
--- /dev/null
+++ b/tools/perf/tests/shell/base_probe/test_adding_blacklisted.sh
@@ -0,0 +1,67 @@
+#!/bin/bash
+
+# SPDX-License-Identifier: GPL-2.0
+
+#
+# test_adding_blacklisted of perf_probe test
+# Author: Masami Hiramatsu <masami.hiramatsu.pt@hitachi.com>
+# Author: Michael Petlan <mpetlan@redhat.com>
+#
+# Description:
+#
+# Blacklisted functions should not be added successfully as probes,
+# they must be skipped.
+#
+
+# include working environment
+. ../common/init.sh
+
+TEST_RESULT=0
+
+# skip if not supported
+BLACKFUNC=`head -n 1 /sys/kernel/debug/kprobes/blacklist 2> /dev/null | cut -f2`
+if [ -z "$BLACKFUNC" ]; then
+ print_overall_skipped
+ exit 0
+fi
+
+# remove all previously added probes
+clear_all_probes
+
+
+### adding blacklisted function
+
+# functions from blacklist should be skipped by perf probe
+! $CMD_PERF probe $BLACKFUNC > $LOGS_DIR/adding_blacklisted.log 2> $LOGS_DIR/adding_blacklisted.err
+PERF_EXIT_CODE=$?
+
+REGEX_SCOPE_FAIL="Failed to find scope of probe point"
+REGEX_SKIP_MESSAGE=" is blacklisted function, skip it\."
+REGEX_NOT_FOUND_MESSAGE="Probe point \'$BLACKFUNC\' not found."
+REGEX_ERROR_MESSAGE="Error: Failed to add events."
+REGEX_INVALID_ARGUMENT="Failed to write event: Invalid argument"
+REGEX_SYMBOL_FAIL="Failed to find symbol at $RE_ADDRESS"
+REGEX_OUT_SECTION="$BLACKFUNC is out of \.\w+, skip it"
+../common/check_all_lines_matched.pl "$REGEX_SKIP_MESSAGE" "$REGEX_NOT_FOUND_MESSAGE" "$REGEX_ERROR_MESSAGE" "$REGEX_SCOPE_FAIL" "$REGEX_INVALID_ARGUMENT" "$REGEX_SYMBOL_FAIL" "$REGEX_OUT_SECTION" < $LOGS_DIR/adding_blacklisted.err
+CHECK_EXIT_CODE=$?
+
+print_results $PERF_EXIT_CODE $CHECK_EXIT_CODE "adding blacklisted function $BLACKFUNC"
+(( TEST_RESULT += $? ))
+
+
+### listing not-added probe
+
+# blacklisted probes should NOT appear in perf-list output
+$CMD_PERF list probe:\* > $LOGS_DIR/adding_blacklisted_list.log
+PERF_EXIT_CODE=$?
+
+../common/check_all_lines_matched.pl "$RE_LINE_EMPTY" "List of pre-defined events" "Metric Groups:" < $LOGS_DIR/adding_blacklisted_list.log
+CHECK_EXIT_CODE=$?
+
+print_results $PERF_EXIT_CODE $CHECK_EXIT_CODE "listing blacklisted probe (should NOT be listed)"
+(( TEST_RESULT += $? ))
+
+
+# print overall results
+print_overall_results "$TEST_RESULT"
+exit $?
diff --git a/tools/perf/tests/shell/base_probe/test_adding_kernel.sh b/tools/perf/tests/shell/base_probe/test_adding_kernel.sh
index 187dc8d4b163..d541ffd44a93 100755
--- a/tools/perf/tests/shell/base_probe/test_adding_kernel.sh
+++ b/tools/perf/tests/shell/base_probe/test_adding_kernel.sh
@@ -15,10 +15,7 @@
# include working environment
. ../common/init.sh
-. ./settings.sh
-# shellcheck disable=SC2034 # the variable is later used after the working environment is included
-THIS_TEST_NAME=`basename $0 .sh`
TEST_RESULT=0
# shellcheck source=lib/probe_vfs_getname.sh
diff --git a/tools/perf/tests/shell/base_probe/test_basic.sh b/tools/perf/tests/shell/base_probe/test_basic.sh
new file mode 100755
index 000000000000..09669ec479f2
--- /dev/null
+++ b/tools/perf/tests/shell/base_probe/test_basic.sh
@@ -0,0 +1,80 @@
+#!/bin/bash
+
+# SPDX-License-Identifier: GPL-2.0
+
+#
+# test_basic of perf_probe test
+# Author: Michael Petlan <mpetlan@redhat.com>
+# Author: Masami Hiramatsu <masami.hiramatsu.pt@hitachi.com>
+#
+# Description:
+#
+# This test tests basic functionality of perf probe command.
+#
+
+# include working environment
+. ../common/init.sh
+
+TEST_RESULT=0
+
+if ! check_kprobes_available; then
+ print_overall_skipped
+ exit 0
+fi
+
+
+### help message
+
+if [ "$PARAM_GENERAL_HELP_TEXT_CHECK" = "y" ]; then
+ # test that a help message is shown and looks reasonable
+ $CMD_PERF probe --help > $LOGS_DIR/basic_helpmsg.log 2> $LOGS_DIR/basic_helpmsg.err
+ PERF_EXIT_CODE=$?
+
+ ../common/check_all_patterns_found.pl "PERF-PROBE" "NAME" "SYNOPSIS" "DESCRIPTION" "OPTIONS" "PROBE\s+SYNTAX" "PROBE\s+ARGUMENT" "LINE\s+SYNTAX" < $LOGS_DIR/basic_helpmsg.log
+ CHECK_EXIT_CODE=$?
+ ../common/check_all_patterns_found.pl "LAZY\s+MATCHING" "FILTER\s+PATTERN" "EXAMPLES" "SEE\s+ALSO" < $LOGS_DIR/basic_helpmsg.log
+ (( CHECK_EXIT_CODE += $? ))
+ ../common/check_all_patterns_found.pl "vmlinux" "module=" "source=" "verbose" "quiet" "add=" "del=" "list.*EVENT" "line=" "vars=" "externs" < $LOGS_DIR/basic_helpmsg.log
+ (( CHECK_EXIT_CODE += $? ))
+ ../common/check_all_patterns_found.pl "no-inlines" "funcs.*FILTER" "filter=FILTER" "force" "dry-run" "max-probes" "exec=" "demangle-kernel" < $LOGS_DIR/basic_helpmsg.log
+ (( CHECK_EXIT_CODE += $? ))
+ ../common/check_no_patterns_found.pl "No manual entry for" < $LOGS_DIR/basic_helpmsg.err
+ (( CHECK_EXIT_CODE += $? ))
+
+ print_results $PERF_EXIT_CODE $CHECK_EXIT_CODE "help message"
+ (( TEST_RESULT += $? ))
+else
+ print_testcase_skipped "help message"
+fi
+
+
+### usage message
+
+# without any args perf-probe should print usage
+$CMD_PERF probe 2> $LOGS_DIR/basic_usage.log > /dev/null
+
+../common/check_all_patterns_found.pl "[Uu]sage" "perf probe" "verbose" "quiet" "add" "del" "force" "line" "vars" "externs" "range" < $LOGS_DIR/basic_usage.log
+CHECK_EXIT_CODE=$?
+
+print_results 0 $CHECK_EXIT_CODE "usage message"
+(( TEST_RESULT += $? ))
+
+
+### quiet switch
+
+# '--quiet' should mute all output
+$CMD_PERF probe --quiet --add vfs_read > $LOGS_DIR/basic_quiet01.log 2> $LOGS_DIR/basic_quiet01.err
+PERF_EXIT_CODE=$?
+$CMD_PERF probe --quiet --del vfs_read > $LOGS_DIR/basic_quiet03.log 2> $LOGS_DIR/basic_quiet02.err
+(( PERF_EXIT_CODE += $? ))
+
+test "`cat $LOGS_DIR/basic_quiet*log $LOGS_DIR/basic_quiet*err | wc -l`" -eq 0
+CHECK_EXIT_CODE=$?
+
+print_results $PERF_EXIT_CODE $CHECK_EXIT_CODE "quiet switch"
+(( TEST_RESULT += $? ))
+
+
+# print overall results
+print_overall_results "$TEST_RESULT"
+exit $?
diff --git a/tools/perf/tests/shell/base_probe/test_invalid_options.sh b/tools/perf/tests/shell/base_probe/test_invalid_options.sh
new file mode 100755
index 000000000000..1fedfd8b0d0d
--- /dev/null
+++ b/tools/perf/tests/shell/base_probe/test_invalid_options.sh
@@ -0,0 +1,79 @@
+#!/bin/bash
+
+# SPDX-License-Identifier: GPL-2.0
+
+#
+# test_invalid_options of perf_probe test
+# Author: Masami Hiramatsu <masami.hiramatsu.pt@hitachi.com>
+# Author: Michael Petlan <mpetlan@redhat.com>
+#
+# Description:
+#
+# This test checks whether the invalid and incompatible options are reported
+#
+
+# include working environment
+. ../common/init.sh
+
+TEST_RESULT=0
+
+if ! check_kprobes_available; then
+ print_overall_skipped
+ exit 0
+fi
+
+
+### missing argument
+
+# some options require an argument
+for opt in '-a' '-d' '-L' '-V'; do
+ ! $CMD_PERF probe $opt 2> $LOGS_DIR/invalid_options_missing_argument$opt.err
+ PERF_EXIT_CODE=$?
+
+ ../common/check_all_patterns_found.pl "Error: switch .* requires a value" < $LOGS_DIR/invalid_options_missing_argument$opt.err
+ CHECK_EXIT_CODE=$?
+
+ print_results $PERF_EXIT_CODE $CHECK_EXIT_CODE "missing argument for $opt"
+ (( TEST_RESULT += $? ))
+done
+
+
+### unnecessary argument
+
+# some options may omit the argument
+for opt in '-F' '-l'; do
+ $CMD_PERF probe -F > /dev/null 2> $LOGS_DIR/invalid_options_unnecessary_argument$opt.err
+ PERF_EXIT_CODE=$?
+
+ test ! -s $LOGS_DIR/invalid_options_unnecessary_argument$opt.err
+ CHECK_EXIT_CODE=$?
+
+ print_results $PERF_EXIT_CODE $CHECK_EXIT_CODE "unnecessary argument for $opt"
+ (( TEST_RESULT += $? ))
+done
+
+
+### mutually exclusive options
+
+# some options are mutually exclusive
+test -e $LOGS_DIR/invalid_options_mutually_exclusive.log && rm -f $LOGS_DIR/invalid_options_mutually_exclusive.log
+for opt in '-a xxx -d xxx' '-a xxx -L foo' '-a xxx -V foo' '-a xxx -l' '-a xxx -F' \
+ '-d xxx -L foo' '-d xxx -V foo' '-d xxx -l' '-d xxx -F' \
+ '-L foo -V bar' '-L foo -l' '-L foo -F' '-V foo -l' '-V foo -F' '-l -F'; do
+ ! $CMD_PERF probe $opt > /dev/null 2> $LOGS_DIR/aux.log
+ PERF_EXIT_CODE=$?
+
+ ../common/check_all_patterns_found.pl "Error: switch .+ cannot be used with switch .+" < $LOGS_DIR/aux.log
+ CHECK_EXIT_CODE=$?
+
+ print_results $PERF_EXIT_CODE $CHECK_EXIT_CODE "mutually exclusive options :: $opt"
+ (( TEST_RESULT += $? ))
+
+ # gather the logs
+ cat $LOGS_DIR/aux.log | grep "Error" >> $LOGS_DIR/invalid_options_mutually_exclusive.log
+done
+
+
+# print overall results
+print_overall_results "$TEST_RESULT"
+exit $?
diff --git a/tools/perf/tests/shell/base_probe/test_line_semantics.sh b/tools/perf/tests/shell/base_probe/test_line_semantics.sh
new file mode 100755
index 000000000000..d8f4bde0f585
--- /dev/null
+++ b/tools/perf/tests/shell/base_probe/test_line_semantics.sh
@@ -0,0 +1,55 @@
+#!/bin/bash
+
+# SPDX-License-Identifier: GPL-2.0
+
+#
+# test_line_semantics of perf_probe test
+# Author: Masami Hiramatsu <masami.hiramatsu.pt@hitachi.com>
+# Author: Michael Petlan <mpetlan@redhat.com>
+#
+# Description:
+#
+# This test checks whether the semantic errors of line option's
+# arguments are properly reported.
+#
+
+# include working environment
+. ../common/init.sh
+
+TEST_RESULT=0
+
+if ! check_kprobes_available; then
+ print_overall_skipped
+ exit 0
+fi
+
+
+### acceptable --line descriptions
+
+# testing acceptance of valid patterns for the '--line' option
+VALID_PATTERNS="func func:10 func:0-10 func:2+10 func@source.c func@source.c:1 source.c:1 source.c:1+1 source.c:1-10"
+for desc in $VALID_PATTERNS; do
+ ! ( $CMD_PERF probe --line $desc 2>&1 | grep -q "Semantic error" )
+ CHECK_EXIT_CODE=$?
+
+ print_results 0 $CHECK_EXIT_CODE "acceptable descriptions :: $desc"
+ (( TEST_RESULT += $? ))
+done
+
+
+### unacceptable --line descriptions
+
+# testing handling of invalid patterns for the '--line' option
+INVALID_PATTERNS="func:foo func:1-foo func:1+foo func;lazy\*pattern"
+for desc in $INVALID_PATTERNS; do
+ $CMD_PERF probe --line $desc 2>&1 | grep -q "Semantic error"
+ CHECK_EXIT_CODE=$?
+
+ print_results 0 $CHECK_EXIT_CODE "unacceptable descriptions :: $desc"
+ (( TEST_RESULT += $? ))
+done
+
+
+# print overall results
+print_overall_results "$TEST_RESULT"
+exit $?
diff --git a/tools/perf/tests/shell/base_report/setup.sh b/tools/perf/tests/shell/base_report/setup.sh
new file mode 100755
index 000000000000..4caa496660c6
--- /dev/null
+++ b/tools/perf/tests/shell/base_report/setup.sh
@@ -0,0 +1,32 @@
+#!/bin/bash
+
+# SPDX-License-Identifier: GPL-2.0
+
+#
+# setup.sh of perf report test
+# Author: Michael Petlan <mpetlan@redhat.com>
+#
+# Description:
+#
+# We need some sample data for perf-report testing
+#
+#
+
+# include working environment
+. ../common/init.sh
+
+test -d "$HEADER_TAR_DIR" || mkdir -p "$HEADER_TAR_DIR"
+
+SW_EVENT="cpu-clock"
+
+$CMD_PERF record -asdg -e $SW_EVENT -o $CURRENT_TEST_DIR/perf.data -- $CMD_LONGER_SLEEP 2> $LOGS_DIR/setup.log
+PERF_EXIT_CODE=$?
+
+../common/check_all_patterns_found.pl "$RE_LINE_RECORD1" "$RE_LINE_RECORD2" < $LOGS_DIR/setup.log
+CHECK_EXIT_CODE=$?
+
+print_results $PERF_EXIT_CODE $CHECK_EXIT_CODE "prepare the perf.data file"
+TEST_RESULT=$?
+
+print_overall_results $TEST_RESULT
+exit $?
diff --git a/tools/perf/tests/shell/base_report/stderr-whitelist.txt b/tools/perf/tests/shell/base_report/stderr-whitelist.txt
new file mode 100644
index 000000000000..e3341401b47c
--- /dev/null
+++ b/tools/perf/tests/shell/base_report/stderr-whitelist.txt
@@ -0,0 +1,5 @@
+no symbols found in .*, maybe install a debug package
+was updated .*is prelink enabled.+ Restart the long running apps that use it
+Warning:
+\d+ out of order events recorded.
+detected invalid bpf_prog_info
diff --git a/tools/perf/tests/shell/base_report/test_basic.sh b/tools/perf/tests/shell/base_report/test_basic.sh
new file mode 100755
index 000000000000..47677cbd4df3
--- /dev/null
+++ b/tools/perf/tests/shell/base_report/test_basic.sh
@@ -0,0 +1,190 @@
+#!/bin/bash
+
+# SPDX-License-Identifier: GPL-2.0
+
+#
+# test_basic of perf_report test
+# Author: Michael Petlan <mpetlan@redhat.com>
+#
+# Description:
+#
+# This test tests basic functionality of perf report command.
+#
+#
+
+# include working environment
+. ../common/init.sh
+
+TEST_RESULT=0
+
+
+### help message
+
+if [ "$PARAM_GENERAL_HELP_TEXT_CHECK" = "y" ]; then
+ # test that a help message is shown and looks reasonable
+ $CMD_PERF report --help > $LOGS_DIR/basic_helpmsg.log 2> $LOGS_DIR/basic_helpmsg.err
+ PERF_EXIT_CODE=$?
+
+ ../common/check_all_patterns_found.pl "PERF-REPORT" "NAME" "SYNOPSIS" "DESCRIPTION" "OPTIONS" "OVERHEAD\s+CALCULATION" "SEE ALSO" < $LOGS_DIR/basic_helpmsg.log
+ CHECK_EXIT_CODE=$?
+ ../common/check_all_patterns_found.pl "input" "verbose" "show-nr-samples" "show-cpu-utilization" "threads" "comms" "pid" "tid" "dsos" "symbols" "symbol-filter" < $LOGS_DIR/basic_helpmsg.log
+ (( CHECK_EXIT_CODE += $? ))
+ ../common/check_all_patterns_found.pl "hide-unresolved" "sort" "fields" "parent" "exclude-other" "column-widths" "field-separator" "dump-raw-trace" "children" < $LOGS_DIR/basic_helpmsg.log
+ (( CHECK_EXIT_CODE += $? ))
+ ../common/check_all_patterns_found.pl "call-graph" "max-stack" "inverted" "ignore-callees" "pretty" "stdio" "tui" "gtk" "vmlinux" "kallsyms" "modules" < $LOGS_DIR/basic_helpmsg.log
+ (( CHECK_EXIT_CODE += $? ))
+ ../common/check_all_patterns_found.pl "force" "symfs" "cpu" "disassembler-style" "source" "asm-raw" "show-total-period" "show-info" "branch-stack" "group" < $LOGS_DIR/basic_helpmsg.log
+ (( CHECK_EXIT_CODE += $? ))
+ ../common/check_all_patterns_found.pl "branch-history" "objdump" "demangle" "percent-limit" "percentage" "header" "itrace" "full-source-path" "show-ref-call-graph" < $LOGS_DIR/basic_helpmsg.log
+ (( CHECK_EXIT_CODE += $? ))
+ ../common/check_no_patterns_found.pl "No manual entry for" < $LOGS_DIR/basic_helpmsg.err
+ (( CHECK_EXIT_CODE += $? ))
+
+ print_results $PERF_EXIT_CODE $CHECK_EXIT_CODE "help message"
+ (( TEST_RESULT += $? ))
+else
+ print_testcase_skipped "help message"
+fi
+
+
+### basic execution
+
+# test that perf report is even working
+$CMD_PERF report -i $CURRENT_TEST_DIR/perf.data --stdio > $LOGS_DIR/basic_basic.log 2> $LOGS_DIR/basic_basic.err
+PERF_EXIT_CODE=$?
+
+REGEX_LOST_SAMPLES_INFO="#\s*Total Lost Samples:\s+$RE_NUMBER"
+REGEX_SAMPLES_INFO="#\s*Samples:\s+(?:$RE_NUMBER)\w?\s+of\s+event\s+'$RE_EVENT_ANY'"
+REGEX_LINES_HEADER="#\s*Children\s+Self\s+Command\s+Shared Object\s+Symbol"
+REGEX_LINES="\s*$RE_NUMBER%\s+$RE_NUMBER%\s+\S+\s+\[kernel\.(?:vmlinux)|(?:kallsyms)\]\s+\[[k\.]\]\s+\w+"
+../common/check_all_patterns_found.pl "$REGEX_LOST_SAMPLES_INFO" "$REGEX_SAMPLES_INFO" "$REGEX_LINES_HEADER" "$REGEX_LINES" < $LOGS_DIR/basic_basic.log
+CHECK_EXIT_CODE=$?
+../common/check_errors_whitelisted.pl "stderr-whitelist.txt" < $LOGS_DIR/basic_basic.err
+(( CHECK_EXIT_CODE += $? ))
+
+print_results $PERF_EXIT_CODE $CHECK_EXIT_CODE "basic execution"
+(( TEST_RESULT += $? ))
+
+
+### number of samples
+
+# '--show-nr-samples' should show number of samples for each symbol
+$CMD_PERF report --stdio -i $CURRENT_TEST_DIR/perf.data --show-nr-samples > $LOGS_DIR/basic_nrsamples.log 2> $LOGS_DIR/basic_nrsamples.err
+PERF_EXIT_CODE=$?
+
+REGEX_LINES_HEADER="#\s*Children\s+Self\s+Samples\s+Command\s+Shared Object\s+Symbol"
+REGEX_LINES="\s*$RE_NUMBER%\s+$RE_NUMBER%\s+$RE_NUMBER\s+\S+\s+\[kernel\.(?:vmlinux)|(?:kallsyms)\]\s+\[[k\.]\]\s+\w+"
+../common/check_all_patterns_found.pl "$REGEX_LINES_HEADER" "$REGEX_LINES" < $LOGS_DIR/basic_nrsamples.log
+CHECK_EXIT_CODE=$?
+../common/check_errors_whitelisted.pl "stderr-whitelist.txt" < $LOGS_DIR/basic_nrsamples.err
+(( CHECK_EXIT_CODE += $? ))
+
+print_results $PERF_EXIT_CODE $CHECK_EXIT_CODE "number of samples"
+(( TEST_RESULT += $? ))
+
+
+### header
+
+# '--header' and '--header-only' should show perf report header
+$CMD_PERF report -i $CURRENT_TEST_DIR/perf.data --stdio --header-only > $LOGS_DIR/basic_header.log
+PERF_EXIT_CODE=$?
+
+REGEX_LINE_TIMESTAMP="#\s+captured on\s*:\s*$RE_DATE_TIME"
+REGEX_LINE_HOSTNAME="#\s+hostname\s*:\s*$MY_HOSTNAME"
+REGEX_LINE_KERNEL="#\s+os release\s*:\s*${MY_KERNEL_VERSION//+/\\+}"
+REGEX_LINE_PERF="#\s+perf version\s*:\s*"
+REGEX_LINE_ARCH="#\s+arch\s*:\s*$MY_ARCH"
+REGEX_LINE_CPUS_ONLINE="#\s+nrcpus online\s*:\s*$MY_CPUS_ONLINE"
+REGEX_LINE_CPUS_AVAIL="#\s+nrcpus avail\s*:\s*$MY_CPUS_AVAILABLE"
+# disable precise check for "nrcpus avail" in BASIC runmode
+test $PERFTOOL_TESTSUITE_RUNMODE -lt $RUNMODE_STANDARD && REGEX_LINE_CPUS_AVAIL="#\s+nrcpus avail\s*:\s*$RE_NUMBER"
+../common/check_all_patterns_found.pl "$REGEX_LINE_TIMESTAMP" "$REGEX_LINE_HOSTNAME" "$REGEX_LINE_KERNEL" "$REGEX_LINE_PERF" "$REGEX_LINE_ARCH" "$REGEX_LINE_CPUS_ONLINE" "$REGEX_LINE_CPUS_AVAIL" < $LOGS_DIR/basic_header.log
+CHECK_EXIT_CODE=$?
+
+print_results $PERF_EXIT_CODE $CHECK_EXIT_CODE "header"
+(( TEST_RESULT += $? ))
+
+# '--header' and '--header-only' should use creation time
+OLD_TIMESTAMP=`$CMD_PERF report --stdio --header-only -i $CURRENT_TEST_DIR/perf.data | grep "captured on"`
+PERF_EXIT_CODE=$?
+
+( tar -C $CURRENT_TEST_DIR -c perf.data | xz > $CURRENT_TEST_DIR/perf.data.tar.xz ; xzcat $CURRENT_TEST_DIR/perf.data.tar.xz | tar x -C $HEADER_TAR_DIR )
+(( PERF_EXIT_CODE += $? ))
+
+NEW_TIMESTAMP=`$CMD_PERF report --stdio --header-only -i $HEADER_TAR_DIR/perf.data | grep "captured on"`
+(( PERF_EXIT_CODE += $? ))
+
+test "$OLD_TIMESTAMP" = "$NEW_TIMESTAMP"
+CHECK_EXIT_CODE=$?
+
+print_results $PERF_EXIT_CODE $CHECK_EXIT_CODE "header timestamp"
+(( TEST_RESULT += $? ))
+
+
+### show CPU utilization
+
+# '--showcpuutilization' should show percentage for both system and userspace mode
+$CMD_PERF report -i $CURRENT_TEST_DIR/perf.data --stdio --showcpuutilization > $LOGS_DIR/basic_cpuut.log 2> $LOGS_DIR/basic_cpuut.err
+PERF_EXIT_CODE=$?
+
+REGEX_LINES_HEADER="#\s*Children\s+Self\s+sys\s+usr\s+Command\s+Shared Object\s+Symbol"
+REGEX_LINES="\s*$RE_NUMBER%\s+$RE_NUMBER%\s+$RE_NUMBER%\s+$RE_NUMBER%\s+\S+\s+\[kernel\.(?:vmlinux)|(?:kallsyms)\]\s+\[[k\.]\]\s+\w+"
+../common/check_all_patterns_found.pl "$REGEX_LINES_HEADER" "$REGEX_LINES" < $LOGS_DIR/basic_cpuut.log
+CHECK_EXIT_CODE=$?
+../common/check_errors_whitelisted.pl "stderr-whitelist.txt" < $LOGS_DIR/basic_cpuut.err
+(( CHECK_EXIT_CODE += $? ))
+
+print_results $PERF_EXIT_CODE $CHECK_EXIT_CODE "show CPU utilization"
+(( TEST_RESULT += $? ))
+
+
+### pid
+
+# '--pid=' should limit the output for a process with the given pid only
+$CMD_PERF report --stdio -i $CURRENT_TEST_DIR/perf.data --pid=1 > $LOGS_DIR/basic_pid.log 2> $LOGS_DIR/basic_pid.err
+PERF_EXIT_CODE=$?
+
+grep -P -v '^#' $LOGS_DIR/basic_pid.log | grep -P '\s+[\d\.]+%' | ../common/check_all_lines_matched.pl "systemd|init"
+CHECK_EXIT_CODE=$?
+../common/check_errors_whitelisted.pl "stderr-whitelist.txt" < $LOGS_DIR/basic_pid.err
+(( CHECK_EXIT_CODE += $? ))
+
+print_results $PERF_EXIT_CODE $CHECK_EXIT_CODE "pid"
+(( TEST_RESULT += $? ))
+
+
+### non-existing symbol
+
+# '--symbols' should show only the given symbols
+$CMD_PERF report --stdio -i $CURRENT_TEST_DIR/perf.data --symbols=dummynonexistingsymbol > $LOGS_DIR/basic_symbols.log 2> $LOGS_DIR/basic_symbols.err
+PERF_EXIT_CODE=$?
+
+../common/check_all_lines_matched.pl "$RE_LINE_EMPTY" "$RE_LINE_COMMENT" < $LOGS_DIR/basic_symbols.log
+CHECK_EXIT_CODE=$?
+../common/check_errors_whitelisted.pl "stderr-whitelist.txt" < $LOGS_DIR/basic_symbols.err
+(( CHECK_EXIT_CODE += $? ))
+
+print_results $PERF_EXIT_CODE $CHECK_EXIT_CODE "non-existing symbol"
+(( TEST_RESULT += $? ))
+
+
+### symbol filter
+
+# '--symbol-filter' should filter symbols based on substrings
+$CMD_PERF report --stdio -i $CURRENT_TEST_DIR/perf.data --symbol-filter=map > $LOGS_DIR/basic_symbolfilter.log 2> $LOGS_DIR/basic_symbolfilter.err
+PERF_EXIT_CODE=$?
+
+grep -P -v '^#' $LOGS_DIR/basic_symbolfilter.log | grep -P '\s+[\d\.]+%' | ../common/check_all_lines_matched.pl "\[[k\.]\]\s+.*map"
+CHECK_EXIT_CODE=$?
+../common/check_errors_whitelisted.pl "stderr-whitelist.txt" < $LOGS_DIR/basic_symbolfilter.err
+(( CHECK_EXIT_CODE += $? ))
+
+print_results $PERF_EXIT_CODE $CHECK_EXIT_CODE "symbol filter"
+(( TEST_RESULT += $? ))
+
+
+# TODO: $CMD_PERF report -n --showcpuutilization -TUxDg 2> 01.log
+
+# print overall results
+print_overall_results "$TEST_RESULT"
+exit $?
diff --git a/tools/perf/tests/shell/common/check_errors_whitelisted.pl b/tools/perf/tests/shell/common/check_errors_whitelisted.pl
new file mode 100755
index 000000000000..c57d355dd76e
--- /dev/null
+++ b/tools/perf/tests/shell/common/check_errors_whitelisted.pl
@@ -0,0 +1,51 @@
+#!/usr/bin/perl
+# SPDX-License-Identifier: GPL-2.0
+
+$whitelist_file = shift;
+
+if (defined $whitelist_file)
+{
+ open (INFILE, $whitelist_file) or die "Checker error: Unable to open the whitelist file: $whitelist_file\n";
+ @regexps = <INFILE>;
+ close INFILE or die "Checker error: Unable to close the whitelist file: $whitelist_file\n";
+}
+else
+{
+ @regexps = ();
+}
+
+$max_printed_lines = 20;
+$max_printed_lines = $ENV{TESTLOG_ERR_MSG_MAX_LINES} if (defined $ENV{TESTLOG_ERR_MSG_MAX_LINES});
+
+$quiet = 1;
+$quiet = 0 if (defined $ENV{TESTLOG_VERBOSITY} && $ENV{TESTLOG_VERBOSITY} ge 2);
+
+$passed = 1;
+$lines_printed = 0;
+
+while (<STDIN>)
+{
+ s/\n//;
+
+ $line_matched = 0;
+ for $r (@regexps)
+ {
+ chomp $r;
+ if (/$r/)
+ {
+ $line_matched = 1;
+ last;
+ }
+ }
+
+ unless ($line_matched)
+ {
+ if ($lines_printed++ < $max_printed_lines)
+ {
+ print "Line did not match any pattern: \"$_\"\n" unless $quiet;
+ }
+ $passed = 0;
+ }
+}
+
+exit ($passed == 0);
diff --git a/tools/perf/tests/shell/common/init.sh b/tools/perf/tests/shell/common/init.sh
index aadeaf782e03..075f17623c8e 100644
--- a/tools/perf/tests/shell/common/init.sh
+++ b/tools/perf/tests/shell/common/init.sh
@@ -26,8 +26,8 @@ print_results()
PERF_RETVAL="$1"; shift
CHECK_RETVAL="$1"; shift
FAILURE_REASON=""
- TASK_COMMENT="$@"
- if [ $PERF_RETVAL -eq 0 -a $CHECK_RETVAL -eq 0 ]; then
+ TASK_COMMENT="$*"
+ if [ $PERF_RETVAL -eq 0 ] && [ $CHECK_RETVAL -eq 0 ]; then
_echo "$MPASS-- [ PASS ] --$MEND $TEST_NAME :: $THIS_TEST_NAME :: $TASK_COMMENT"
return 0
else
@@ -56,7 +56,7 @@ print_overall_results()
print_testcase_skipped()
{
- TASK_COMMENT="$@"
+ TASK_COMMENT="$*"
_echo "$MSKIP-- [ SKIP ] --$MEND $TEST_NAME :: $THIS_TEST_NAME :: $TASK_COMMENT :: testcase skipped"
return 0
}
@@ -69,7 +69,7 @@ print_overall_skipped()
print_warning()
{
- WARN_COMMENT="$@"
+ WARN_COMMENT="$*"
_echo "$MWARN-- [ WARN ] --$MEND $TEST_NAME :: $THIS_TEST_NAME :: $WARN_COMMENT"
return 0
}
@@ -115,3 +115,26 @@ detect_amd()
# 1 = is not AMD or unknown
grep "vendor_id" < /proc/cpuinfo | grep -q "AMD"
}
+
+# base probe utility
+check_kprobes_available()
+{
+ test -e /sys/kernel/debug/tracing/kprobe_events
+}
+
+check_uprobes_available()
+{
+ test -e /sys/kernel/debug/tracing/uprobe_events
+}
+
+clear_all_probes()
+{
+ echo 0 > /sys/kernel/debug/tracing/events/enable
+ check_kprobes_available && echo > /sys/kernel/debug/tracing/kprobe_events
+ check_uprobes_available && echo > /sys/kernel/debug/tracing/uprobe_events
+}
+
+check_sdt_support()
+{
+ $CMD_PERF list sdt | grep sdt > /dev/null 2> /dev/null
+}
diff --git a/tools/perf/tests/shell/common/settings.sh b/tools/perf/tests/shell/common/settings.sh
index 361641dbaaad..cba1b338f96f 100644
--- a/tools/perf/tests/shell/common/settings.sh
+++ b/tools/perf/tests/shell/common/settings.sh
@@ -45,7 +45,7 @@ export TEST_IGNORE_MISSING_PMU=${TEST_IGNORE_MISSING_PMU:-n}
export LC_ALL=C
#### colors
-if [ -t 1 -o "$TESTLOG_FORCE_COLOR" = "yes" ]; then
+if [ -t 1 ] || [ "$TESTLOG_FORCE_COLOR" = "yes" ]; then
export MPASS="\e[32m"
export MALLPASS="\e[1;32m"
export MFAIL="\e[31m"
@@ -65,11 +65,37 @@ else
export MEND=""
fi
+### general info
+DIR_PATH=`dirname "$(readlink -e "$0")"`
+
+TEST_NAME=`basename $DIR_PATH | sed 's/base/perf/'`; export TEST_NAME
+MY_ARCH=`arch`; export MY_ARCH
+
+# storing logs and temporary files variables
+if [ -n "$PERFSUITE_RUN_DIR" ]; then
+ # when $PERFSUITE_RUN_DIR is set to something, all the logs and temp files will be placed there
+ # --> the $PERFSUITE_RUN_DIR/perf_something/examples and $PERFSUITE_RUN_DIR/perf_something/logs
+ # dirs will be used for that
+ PERFSUITE_RUN_DIR=`readlink -f $PERFSUITE_RUN_DIR`; export PERFSUITE_RUN_DIR
+ export CURRENT_TEST_DIR="$PERFSUITE_RUN_DIR/$TEST_NAME"
+ export MAKE_TARGET_DIR="$CURRENT_TEST_DIR/examples"
+ export LOGS_DIR="$CURRENT_TEST_DIR/logs"
+ export HEADER_TAR_DIR="$CURRENT_TEST_DIR/header_tar"
+ test -d "$CURRENT_TEST_DIR" || mkdir -p "$CURRENT_TEST_DIR"
+ test -d "$LOGS_DIR" || mkdir -p "$LOGS_DIR"
+else
+ # when $PERFSUITE_RUN_DIR is not set, logs will be placed here
+ export CURRENT_TEST_DIR="."
+ export LOGS_DIR="."
+ export HEADER_TAR_DIR="./header_tar"
+fi
+
#### test parametrization
if [ ! -d ./common ]; then
# set parameters based on runmode
if [ -f ../common/parametrization.$PERFTOOL_TESTSUITE_RUNMODE.sh ]; then
+ # shellcheck source=/dev/null
. ../common/parametrization.$PERFTOOL_TESTSUITE_RUNMODE.sh
fi
# if some parameters haven't been set until now, set them to default
diff --git a/tools/perf/tests/shell/ftrace.sh b/tools/perf/tests/shell/ftrace.sh
new file mode 100755
index 000000000000..a6ee740f0d7e
--- /dev/null
+++ b/tools/perf/tests/shell/ftrace.sh
@@ -0,0 +1,89 @@
+#!/bin/sh
+# perf ftrace tests
+# SPDX-License-Identifier: GPL-2.0
+
+set -e
+
+# perf ftrace commands only works for root
+if [ "$(id -u)" != 0 ]; then
+ echo "perf ftrace test [Skipped: no permission]"
+ exit 2
+fi
+
+output=$(mktemp /tmp/__perf_test.ftrace.XXXXXX)
+
+cleanup() {
+ rm -f "${output}"
+
+ trap - EXIT TERM INT
+}
+
+trap_cleanup() {
+ cleanup
+ exit 1
+}
+trap trap_cleanup EXIT TERM INT
+
+# this will be set in test_ftrace_trace()
+target_function=
+
+test_ftrace_list() {
+ echo "perf ftrace list test"
+ perf ftrace -F > "${output}"
+ # this will be used in test_ftrace_trace()
+ sleep_functions=$(grep 'sys_.*sleep$' "${output}")
+ echo "syscalls for sleep:"
+ echo "${sleep_functions}"
+ echo "perf ftrace list test [Success]"
+}
+
+test_ftrace_trace() {
+ echo "perf ftrace trace test"
+ perf ftrace trace --graph-opts depth=5 sleep 0.1 > "${output}"
+ # it should have some function name contains 'sleep'
+ grep "^#" "${output}"
+ grep -F 'sleep()' "${output}"
+ # find actual syscall function name
+ for FN in ${sleep_functions}; do
+ if grep -q "${FN}" "${output}"; then
+ target_function="${FN}"
+ echo "perf ftrace trace test [Success]"
+ return
+ fi
+ done
+
+ echo "perf ftrace trace test [Failure: sleep syscall not found]"
+ exit 1
+}
+
+test_ftrace_latency() {
+ echo "perf ftrace latency test"
+ echo "target function: ${target_function}"
+ perf ftrace latency -T "${target_function}" sleep 0.1 > "${output}"
+ grep "^#" "${output}"
+ grep "###" "${output}"
+ echo "perf ftrace latency test [Success]"
+}
+
+test_ftrace_profile() {
+ echo "perf ftrace profile test"
+ perf ftrace profile sleep 0.1 > "${output}"
+ grep ^# "${output}"
+ grep sleep "${output}"
+ grep schedule "${output}"
+ grep execve "${output}"
+ time_re="[[:space:]]+10[[:digit:]]{4}\.[[:digit:]]{3}"
+ # 100283.000 100283.000 100283.000 1 __x64_sys_clock_nanosleep
+ # Check for one *clock_nanosleep line with a Count of just 1 that takes a bit more than 0.1 seconds
+ # Strip the _x64_sys part to work with other architectures
+ grep -E "^${time_re}${time_re}${time_re}[[:space:]]+1[[:space:]]+.*clock_nanosleep" "${output}"
+ echo "perf ftrace profile test [Success]"
+}
+
+test_ftrace_list
+test_ftrace_trace
+test_ftrace_latency
+test_ftrace_profile
+
+cleanup
+exit 0
diff --git a/tools/perf/tests/shell/lib/perf_metric_validation.py b/tools/perf/tests/shell/lib/perf_metric_validation.py
index a2d235252183..0b94216c9c46 100644
--- a/tools/perf/tests/shell/lib/perf_metric_validation.py
+++ b/tools/perf/tests/shell/lib/perf_metric_validation.py
@@ -95,7 +95,7 @@ class Validator:
indent=4)
def get_results(self, idx: int = 0):
- return self.results[idx]
+ return self.results.get(idx)
def get_bounds(self, lb, ub, error, alias={}, ridx: int = 0) -> list:
"""
@@ -173,7 +173,10 @@ class Validator:
pcnt = 0
tcnt = 0
rerun = list()
- for name, val in self.get_results().items():
+ results = self.get_results()
+ if not results:
+ return
+ for name, val in results.items():
if val < 0:
negmetric[name] = val
rerun.append(name)
@@ -532,6 +535,9 @@ class Validator:
'''
if not self.collectlist:
self.parse_perf_metrics()
+ if not self.metrics:
+ print("No metric found for testing")
+ return 0
self.create_rules()
for i in range(0, len(self.workloads)):
self.wlidx = i
diff --git a/tools/perf/tests/shell/lib/probe_vfs_getname.sh b/tools/perf/tests/shell/lib/probe_vfs_getname.sh
index bf4c1fb71c4b..5c33ec7a5a63 100644
--- a/tools/perf/tests/shell/lib/probe_vfs_getname.sh
+++ b/tools/perf/tests/shell/lib/probe_vfs_getname.sh
@@ -13,7 +13,12 @@ cleanup_probe_vfs_getname() {
add_probe_vfs_getname() {
add_probe_verbose=$1
if [ $had_vfs_getname -eq 1 ] ; then
- line=$(perf probe -L getname_flags 2>&1 | grep -E 'result.*=.*filename;' | sed -r 's/[[:space:]]+([[:digit:]]+)[[:space:]]+result->uptr.*/\1/')
+ result_filename_re="[[:space:]]+([[:digit:]]+)[[:space:]]+result->uptr.*"
+ line=$(perf probe -L getname_flags 2>&1 | grep -E "$result_filename_re" | sed -r "s/$result_filename_re/\1/")
+ if [ -z "$line" ] ; then
+ result_aname_re="[[:space:]]+([[:digit:]]+)[[:space:]]+result->aname = NULL;"
+ line=$(perf probe -L getname_flags 2>&1 | grep -E "$result_aname_re" | sed -r "s/$result_aname_re/\1/")
+ fi
perf probe -q "vfs_getname=getname_flags:${line} pathname=result->name:string" || \
perf probe $add_probe_verbose "vfs_getname=getname_flags:${line} pathname=filename:ustring"
fi
@@ -27,7 +32,7 @@ skip_if_no_debuginfo() {
# check if perf is compiled with libtraceevent support
skip_no_probe_record_support() {
if [ $had_vfs_getname -eq 1 ] ; then
- perf record --dry-run -e $1 2>&1 | grep "libtraceevent is necessary for tracepoint support" && return 2
- return 1
+ perf check feature -q libtraceevent && return 1
+ return 2
fi
}
diff --git a/tools/perf/tests/shell/perftool-testsuite_report.sh b/tools/perf/tests/shell/perftool-testsuite_report.sh
new file mode 100755
index 000000000000..973012ce92a7
--- /dev/null
+++ b/tools/perf/tests/shell/perftool-testsuite_report.sh
@@ -0,0 +1,23 @@
+#!/bin/bash
+# perftool-testsuite_report
+# SPDX-License-Identifier: GPL-2.0
+
+test -d "$(dirname "$0")/base_report" || exit 2
+cd "$(dirname "$0")/base_report" || exit 2
+status=0
+
+PERFSUITE_RUN_DIR=$(mktemp -d /tmp/"$(basename "$0" .sh)".XXX)
+export PERFSUITE_RUN_DIR
+
+for testcase in setup.sh test_*; do # skip setup.sh if not present or not executable
+ test -x "$testcase" || continue
+ ./"$testcase"
+ (( status += $? ))
+done
+
+if ! [ "$PERFTEST_KEEP_LOGS" = "y" ]; then
+ rm -rf "$PERFSUITE_RUN_DIR"
+fi
+
+test $status -ne 0 && exit 1
+exit 0
diff --git a/tools/perf/tests/shell/pipe_test.sh b/tools/perf/tests/shell/pipe_test.sh
index a78d35d2cff0..d4c8005ce9b9 100755
--- a/tools/perf/tests/shell/pipe_test.sh
+++ b/tools/perf/tests/shell/pipe_test.sh
@@ -1,4 +1,4 @@
-#!/bin/sh
+#!/bin/bash
# perf pipe recording and injection test
# SPDX-License-Identifier: GPL-2.0
@@ -11,31 +11,116 @@ sym="noploop"
skip_test_missing_symbol ${sym}
data=$(mktemp /tmp/perf.data.XXXXXX)
+data2=$(mktemp /tmp/perf.data2.XXXXXX)
prog="perf test -w noploop"
-task="perf"
+err=0
-if ! perf record -e task-clock:u -o - ${prog} | perf report -i - --task | grep ${task}; then
- echo "cannot find the test file in the perf report"
- exit 1
-fi
+set -e
-if ! perf record -e task-clock:u -o - ${prog} | perf inject -b | perf report -i - | grep ${sym}; then
- echo "cannot find noploop function in pipe #1"
- exit 1
-fi
+cleanup() {
+ rm -rf "${data}"
+ rm -rf "${data}".old
+ rm -rf "${data2}"
+ rm -rf "${data2}".old
-perf record -e task-clock:u -o - ${prog} | perf inject -b -o ${data}
-if ! perf report -i ${data} | grep ${sym}; then
- echo "cannot find noploop function in pipe #2"
- exit 1
-fi
+ trap - EXIT TERM INT
+}
-perf record -e task-clock:u -o ${data} ${prog}
-if ! perf inject -b -i ${data} | perf report -i - | grep ${sym}; then
- echo "cannot find noploop function in pipe #3"
- exit 1
-fi
+trap_cleanup() {
+ echo "Unexpected signal in ${FUNCNAME[1]}"
+ cleanup
+ exit 1
+}
+trap trap_cleanup EXIT TERM INT
+test_record_report() {
+ echo
+ echo "Record+report pipe test"
+
+ task="perf"
+ if ! perf record -e task-clock:u -o - ${prog} | perf report -i - --task | grep -q ${task}
+ then
+ echo "Record+report pipe test [Failed - cannot find the test file in the perf report #1]"
+ err=1
+ return
+ fi
+
+ if ! perf record -g -e task-clock:u -o - ${prog} | perf report -i - --task | grep -q ${task}
+ then
+ echo "Record+report pipe test [Failed - cannot find the test file in the perf report #2]"
+ err=1
+ return
+ fi
+
+ perf record -g -e task-clock:u -o - ${prog} > ${data}
+ if ! perf report -i ${data} --task | grep -q ${task}
+ then
+ echo "Record+report pipe test [Failed - cannot find the test file in the perf report #3]"
+ err=1
+ return
+ fi
+
+ echo "Record+report pipe test [Success]"
+}
+
+test_inject_bids() {
+ inject_opt=$1
+
+ echo
+ echo "Inject ${inject_opt} build-ids test"
+
+ if ! perf record -e task-clock:u -o - ${prog} | perf inject ${inject_opt}| perf report -i - | grep -q ${sym}
+ then
+ echo "Inject build-ids test [Failed - cannot find noploop function in pipe #1]"
+ err=1
+ return
+ fi
+
+ if ! perf record -g -e task-clock:u -o - ${prog} | perf inject ${inject_opt} | perf report -i - | grep -q ${sym}
+ then
+ echo "Inject ${inject_opt} build-ids test [Failed - cannot find noploop function in pipe #2]"
+ err=1
+ return
+ fi
+
+ perf record -e task-clock:u -o - ${prog} | perf inject ${inject_opt} -o ${data}
+ if ! perf report -i ${data} | grep -q ${sym}; then
+ echo "Inject ${inject_opt} build-ids test [Failed - cannot find noploop function in pipe #3]"
+ err=1
+ return
+ fi
+
+ perf record -e task-clock:u -o ${data} ${prog}
+ if ! perf inject ${inject_opt} -i ${data} | perf report -i - | grep -q ${sym}; then
+ echo "Inject ${inject_opt} build-ids test [Failed - cannot find noploop function in pipe #4]"
+ err=1
+ return
+ fi
+
+ perf record -e task-clock:u -o - ${prog} > ${data}
+ if ! perf inject ${inject_opt} -i ${data} | perf report -i - | grep -q ${sym}; then
+ echo "Inject ${inject_opt} build-ids test [Failed - cannot find noploop function in pipe #5]"
+ err=1
+ return
+ fi
+
+ perf record -e task-clock:u -o - ${prog} > ${data}
+ perf inject ${inject_opt} -i ${data} -o ${data2}
+ if ! perf report -i ${data2} | grep -q ${sym}; then
+ echo "Inject ${inject_opt} build-ids test [Failed - cannot find noploop function in pipe #6]"
+ err=1
+ return
+ fi
+
+ echo "Inject ${inject_opt} build-ids test [Success]"
+}
+
+test_record_report
+test_inject_bids -B
+test_inject_bids -b
+test_inject_bids --buildid-all
+test_inject_bids --mmap2-buildid-all
+
+cleanup
+exit $err
-rm -f ${data} ${data}.old
-exit 0
diff --git a/tools/perf/tests/shell/record+probe_libc_inet_pton.sh b/tools/perf/tests/shell/record+probe_libc_inet_pton.sh
index 72c65570db37..f38c8ead0b03 100755
--- a/tools/perf/tests/shell/record+probe_libc_inet_pton.sh
+++ b/tools/perf/tests/shell/record+probe_libc_inet_pton.sh
@@ -63,7 +63,10 @@ trace_libc_inet_pton_backtrace() {
# Check presence of libtraceevent support to run perf record
skip_no_probe_record_support "$event_name/$eventattr/"
- [ $? -eq 2 ] && return 2
+ if [ $? -eq 2 ]; then
+ echo "WARN: Skipping test trace_libc_inet_pton_backtrace. No libtraceevent support."
+ return 2
+ fi
perf record -e $event_name/$eventattr/ -o $perf_data ping -6 -c 1 ::1 > /dev/null 2>&1
# check if perf data file got created in above step.
diff --git a/tools/perf/tests/shell/record+script_probe_vfs_getname.sh b/tools/perf/tests/shell/record+script_probe_vfs_getname.sh
index 5eedbe29bba1..9a61928e3c9a 100755
--- a/tools/perf/tests/shell/record+script_probe_vfs_getname.sh
+++ b/tools/perf/tests/shell/record+script_probe_vfs_getname.sh
@@ -21,7 +21,10 @@ record_open_file() {
echo "Recording open file:"
# Check presence of libtraceevent support to run perf record
skip_no_probe_record_support "probe:vfs_getname*"
- [ $? -eq 2 ] && return 2
+ if [ $? -eq 2 ]; then
+ echo "WARN: Skipping test record_open_file. No libtraceevent support"
+ return 2
+ fi
perf record -o ${perfdata} -e probe:vfs_getname\* touch $file
}
diff --git a/tools/perf/tests/shell/record.sh b/tools/perf/tests/shell/record.sh
index 3d1a7759a7b2..048078ee2eca 100755
--- a/tools/perf/tests/shell/record.sh
+++ b/tools/perf/tests/shell/record.sh
@@ -1,4 +1,4 @@
-#!/bin/sh
+#!/bin/bash
# perf record tests
# SPDX-License-Identifier: GPL-2.0
@@ -21,6 +21,16 @@ testprog="perf test -w thloop"
cpu_pmu_dir="/sys/bus/event_source/devices/cpu*"
br_cntr_file="/caps/branch_counter_nr"
br_cntr_output="branch stack counters"
+br_cntr_script_output="br_cntr: A"
+
+default_fd_limit=$(ulimit -Sn)
+# With option --threads=cpu the number of open file descriptors should be
+# equal to sum of: nmb_cpus * nmb_events (2+dummy),
+# nmb_threads for perf.data.n (equal to nmb_cpus) and
+# 2*nmb_cpus of pipes = 4*nmb_cpus (each pipe has 2 ends)
+# All together it needs 8*nmb_cpus file descriptors plus some are also used
+# outside of testing, thus raising the limit to 16*nmb_cpus
+min_fd_limit=$(($(getconf _NPROCESSORS_ONLN) * 16))
cleanup() {
rm -rf "${perfdata}"
@@ -165,7 +175,7 @@ test_workload() {
}
test_branch_counter() {
- echo "Basic branch counter test"
+ echo "Branch counter test"
# Check if the branch counter feature is supported
for dir in $cpu_pmu_dir
do
@@ -175,26 +185,63 @@ test_branch_counter() {
return
fi
done
- if ! perf record -o "${perfdata}" -j any,counter ${testprog} 2> /dev/null
+ if ! perf record -o "${perfdata}" -e "{branches:p,instructions}" -j any,counter ${testprog} 2> /dev/null
then
- echo "Basic branch counter test [Failed record]"
+ echo "Branch counter record test [Failed record]"
err=1
return
fi
if ! perf report -i "${perfdata}" -D -q | grep -q "$br_cntr_output"
then
- echo "Basic branch record test [Failed missing output]"
+ echo "Branch counter report test [Failed missing output]"
+ err=1
+ return
+ fi
+ if ! perf script -i "${perfdata}" -F +brstackinsn,+brcntr | grep -q "$br_cntr_script_output"
+ then
+ echo " Branch counter script test [Failed missing output]"
+ err=1
+ return
+ fi
+ echo "Branch counter test [Success]"
+}
+
+test_cgroup() {
+ echo "Cgroup sampling test"
+ if ! perf record -aB --synth=cgroup --all-cgroups -o "${perfdata}" ${testprog} 2> /dev/null
+ then
+ echo "Cgroup sampling [Skipped not supported]"
+ return
+ fi
+ if ! perf report -i "${perfdata}" -D | grep -q "CGROUP"
+ then
+ echo "Cgroup sampling [Failed missing output]"
+ err=1
+ return
+ fi
+ if ! perf script -i "${perfdata}" -F cgroup | grep -q -v "unknown"
+ then
+ echo "Cgroup sampling [Failed cannot resolve cgroup names]"
err=1
return
fi
- echo "Basic branch counter test [Success]"
+ echo "Cgroup sampling test [Success]"
}
+# raise the limit of file descriptors to minimum
+if [[ $default_fd_limit -lt $min_fd_limit ]]; then
+ ulimit -Sn $min_fd_limit
+fi
+
test_per_thread
test_register_capture
test_system_wide
test_workload
test_branch_counter
+test_cgroup
+
+# restore the default value
+ulimit -Sn $default_fd_limit
cleanup
exit $err
diff --git a/tools/perf/tests/shell/record_bpf_filter.sh b/tools/perf/tests/shell/record_bpf_filter.sh
index 31c593966e8c..1b58ccc1fd88 100755
--- a/tools/perf/tests/shell/record_bpf_filter.sh
+++ b/tools/perf/tests/shell/record_bpf_filter.sh
@@ -22,15 +22,16 @@ trap trap_cleanup EXIT TERM INT
test_bpf_filter_priv() {
echo "Checking BPF-filter privilege"
- if [ "$(id -u)" != 0 ]
- then
- echo "bpf-filter test [Skipped permission]"
- err=2
- return
- fi
if ! perf record -e task-clock --filter 'period > 1' \
-o /dev/null --quiet true 2>&1
then
+ if [ "$(id -u)" != 0 ]
+ then
+ echo "try 'sudo perf record --setup-filter pin' first."
+ echo "bpf-filter test [Skipped permission]"
+ err=2
+ return
+ fi
echo "bpf-filter test [Skipped missing BPF support]"
err=2
return
@@ -67,7 +68,7 @@ test_bpf_filter_fail() {
# 'cpu' requires PERF_SAMPLE_CPU flag
if ! perf record -e task-clock --filter 'cpu > 0' \
- -o /dev/null true 2>&1 | grep PERF_SAMPLE_CPU
+ -o /dev/null true 2>&1 | grep -q PERF_SAMPLE_CPU
then
echo "Failing bpf-filter test [Failed forbidden CPU]"
err=1
@@ -97,7 +98,7 @@ test_bpf_filter_group() {
fi
if ! perf record -e task-clock --filter 'cpu > 0 || ip > 0' \
- -o /dev/null true 2>&1 | grep PERF_SAMPLE_CPU
+ -o /dev/null true 2>&1 | grep -q PERF_SAMPLE_CPU
then
echo "Group bpf-filter test [Failed forbidden CPU]"
err=1
@@ -105,7 +106,7 @@ test_bpf_filter_group() {
fi
if ! perf record -e task-clock --filter 'period > 0 || code_pgsz > 4096' \
- -o /dev/null true 2>&1 | grep PERF_SAMPLE_CODE_PAGE_SIZE
+ -o /dev/null true 2>&1 | grep -q PERF_SAMPLE_CODE_PAGE_SIZE
then
echo "Group bpf-filter test [Failed forbidden CODE_PAGE_SIZE]"
err=1
@@ -115,6 +116,65 @@ test_bpf_filter_group() {
echo "Group bpf-filter test [Success]"
}
+test_bpf_filter_multi() {
+ echo "Multiple bpf-filter test"
+
+ if ! perf record -e task-clock --filter 'period > 100000' \
+ -e page-faults --filter 'ip < 0xffffffff00000000' \
+ -o "${perfdata}" true 2> /dev/null
+ then
+ echo "Multiple bpf-filter test [Failed record]"
+ err=1
+ return
+ fi
+
+ if ! perf script -i "${perfdata}" -F period,event | grep task-clock | \
+ awk '{ if (int($1) <= 100000) { print $0; exit(1); } }'
+ then
+ echo "Multiple bpf-filter test [Failed task-clock period]"
+ err=1
+ return
+ fi
+
+ if perf script -i "${perfdata}" -F event,ip | grep page-fault | \
+ grep 'ffffffff[0-9a-f]*'
+ then
+ echo "Multiple bpf-filter test [Failed page-faults ip]"
+ err=1
+ return
+ fi
+
+ echo "Multiple bpf-filter test [Success]"
+}
+
+test_bpf_filter_cgroup() {
+ echo "Cgroup bpf-filter test"
+
+ if ! perf record -e task-clock --filter 'cgroup == /' \
+ -a --all-cgroups --synth=cgroup -o "${perfdata}" true 2> /dev/null
+ then
+ echo "Cgroup bpf-filter test [Skipped cgroup not supported]"
+ return
+ fi
+
+ # 'cgroup' requires PERF_SAMPLE_CGROUP flag
+ if ! perf record -e task-clock --filter 'cgroup == /' \
+ -o /dev/null true 2>&1 | grep -q PERF_SAMPLE_CGROUP
+ then
+ echo "Cgroup bpf-filter test [Failed CGROUP requires --all-cgroups]"
+ err=1
+ return
+ fi
+
+ if ! perf report -i "${perfdata}" -s cgroup -q | grep -q -F '100.00%'
+ then
+ echo "Cgroup bpf-filter test [Failed root cgroup does not have 100%]"
+ err=1
+ return
+ fi
+
+ echo "Cgroup bpf-filter test [Success]"
+}
test_bpf_filter_priv
@@ -130,5 +190,13 @@ if [ $err = 0 ]; then
test_bpf_filter_group
fi
+if [ $err = 0 ]; then
+ test_bpf_filter_multi
+fi
+
+if [ $err = 0 ]; then
+ test_bpf_filter_cgroup
+fi
+
cleanup
exit $err
diff --git a/tools/perf/tests/shell/record_lbr.sh b/tools/perf/tests/shell/record_lbr.sh
new file mode 100755
index 000000000000..32314641217e
--- /dev/null
+++ b/tools/perf/tests/shell/record_lbr.sh
@@ -0,0 +1,161 @@
+#!/bin/bash
+# perf record LBR tests
+# SPDX-License-Identifier: GPL-2.0
+
+set -e
+
+if [ ! -f /sys/devices/cpu/caps/branches ] && [ ! -f /sys/devices/cpu_core/caps/branches ]
+then
+ echo "Skip: only x86 CPUs support LBR"
+ exit 2
+fi
+
+err=0
+perfdata=$(mktemp /tmp/__perf_test.perf.data.XXXXX)
+
+cleanup() {
+ rm -rf "${perfdata}"
+ rm -rf "${perfdata}".old
+ rm -rf "${perfdata}".txt
+
+ trap - EXIT TERM INT
+}
+
+trap_cleanup() {
+ cleanup
+ exit 1
+}
+trap trap_cleanup EXIT TERM INT
+
+
+lbr_callgraph_test() {
+ test="LBR callgraph"
+
+ echo "$test"
+ if ! perf record -e cycles --call-graph lbr -o "${perfdata}" perf test -w thloop
+ then
+ echo "$test [Failed support missing]"
+ if [ $err -eq 0 ]
+ then
+ err=2
+ fi
+ return
+ fi
+
+ if ! perf report --stitch-lbr -i "${perfdata}" > "${perfdata}".txt
+ then
+ cat "${perfdata}".txt
+ echo "$test [Failed in perf report]"
+ err=1
+ return
+ fi
+
+ echo "$test [Success]"
+}
+
+lbr_test() {
+ local branch_flags=$1
+ local test="LBR $2 test"
+ local threshold=$3
+ local out
+ local sam_nr
+ local bs_nr
+ local zero_nr
+ local r
+
+ echo "$test"
+ if ! perf record -e cycles $branch_flags -o "${perfdata}" perf test -w thloop
+ then
+ echo "$test [Failed support missing]"
+ perf record -e cycles $branch_flags -o "${perfdata}" perf test -w thloop || true
+ if [ $err -eq 0 ]
+ then
+ err=2
+ fi
+ return
+ fi
+
+ out=$(perf report -D -i "${perfdata}" 2> /dev/null | grep -A1 'PERF_RECORD_SAMPLE')
+ sam_nr=$(echo "$out" | grep -c 'PERF_RECORD_SAMPLE' || true)
+ if [ $sam_nr -eq 0 ]
+ then
+ echo "$test [Failed no samples captured]"
+ err=1
+ return
+ fi
+ echo "$test: $sam_nr samples"
+
+ bs_nr=$(echo "$out" | grep -c 'branch stack: nr:' || true)
+ if [ $sam_nr -ne $bs_nr ]
+ then
+ echo "$test [Failed samples missing branch stacks]"
+ err=1
+ return
+ fi
+
+ zero_nr=$(echo "$out" | grep -c 'branch stack: nr:0' || true)
+ r=$(($zero_nr * 100 / $bs_nr))
+ if [ $r -gt $threshold ]; then
+ echo "$test [Failed empty br stack ratio exceed $threshold%: $r%]"
+ err=1
+ return
+ fi
+
+ echo "$test [Success]"
+}
+
+parallel_lbr_test() {
+ err=0
+ perfdata=$(mktemp /tmp/__perf_test.perf.data.XXXXX)
+ lbr_test "$1" "$2" "$3"
+ cleanup
+ exit $err
+}
+
+lbr_callgraph_test
+
+# Sequential
+lbr_test "-b" "any branch" 2
+lbr_test "-j any_call" "any call" 2
+lbr_test "-j any_ret" "any ret" 2
+lbr_test "-j ind_call" "any indirect call" 2
+lbr_test "-j ind_jmp" "any indirect jump" 100
+lbr_test "-j call" "direct calls" 2
+lbr_test "-j ind_call,u" "any indirect user call" 100
+lbr_test "-a -b" "system wide any branch" 2
+lbr_test "-a -j any_call" "system wide any call" 2
+
+# Parallel
+parallel_lbr_test "-b" "parallel any branch" 100 &
+pid1=$!
+parallel_lbr_test "-j any_call" "parallel any call" 100 &
+pid2=$!
+parallel_lbr_test "-j any_ret" "parallel any ret" 100 &
+pid3=$!
+parallel_lbr_test "-j ind_call" "parallel any indirect call" 100 &
+pid4=$!
+parallel_lbr_test "-j ind_jmp" "parallel any indirect jump" 100 &
+pid5=$!
+parallel_lbr_test "-j call" "parallel direct calls" 100 &
+pid6=$!
+parallel_lbr_test "-j ind_call,u" "parallel any indirect user call" 100 &
+pid7=$!
+parallel_lbr_test "-a -b" "parallel system wide any branch" 100 &
+pid8=$!
+parallel_lbr_test "-a -j any_call" "parallel system wide any call" 100 &
+pid9=$!
+
+for pid in $pid1 $pid2 $pid3 $pid4 $pid5 $pid6 $pid7 $pid8 $pid9
+do
+ set +e
+ wait $pid
+ child_err=$?
+ set -e
+ if ([ $err -eq 2 ] && [ $child_err -eq 1 ]) || [ $err -eq 0 ]
+ then
+ err=$child_err
+ fi
+done
+
+cleanup
+exit $err
diff --git a/tools/perf/tests/shell/script.sh b/tools/perf/tests/shell/script.sh
index c1a603653662..d3e2958d2242 100755
--- a/tools/perf/tests/shell/script.sh
+++ b/tools/perf/tests/shell/script.sh
@@ -61,7 +61,10 @@ _end_of_file_
esac
perf record $cmd_flags -o "${perfdatafile}" true
+ # Disable lsan to avoid warnings about python memory leaks.
+ export ASAN_OPTIONS=detect_leaks=0
perf script -i "${perfdatafile}" -s "${db_test}"
+ export ASAN_OPTIONS=
echo "DB test [Success]"
}
diff --git a/tools/perf/tests/shell/test_stat_intel_tpebs.sh b/tools/perf/tests/shell/test_stat_intel_tpebs.sh
new file mode 100755
index 000000000000..c60b29add980
--- /dev/null
+++ b/tools/perf/tests/shell/test_stat_intel_tpebs.sh
@@ -0,0 +1,19 @@
+#!/bin/bash
+# test Intel TPEBS counting mode
+# SPDX-License-Identifier: GPL-2.0
+
+set -e
+grep -q GenuineIntel /proc/cpuinfo || { echo Skipping non-Intel; exit 2; }
+
+# Use this event for testing because it should exist in all platforms
+event=cache-misses:R
+
+# Without this cmd option, default value or zero is returned
+echo "Testing without --record-tpebs"
+result=$(perf stat -e "$event" true 2>&1)
+[[ "$result" =~ $event ]] || exit 1
+
+# In platforms that do not support TPEBS, it should execute without error.
+echo "Testing with --record-tpebs"
+result=$(perf stat -e "$event" --record-tpebs -a sleep 0.01 2>&1)
+[[ "$result" =~ "perf record" && "$result" =~ $event ]] || exit 1
diff --git a/tools/perf/tests/shell/test_task_analyzer.sh b/tools/perf/tests/shell/test_task_analyzer.sh
index 92d15154ba79..7d76fc63d995 100755
--- a/tools/perf/tests/shell/test_task_analyzer.sh
+++ b/tools/perf/tests/shell/test_task_analyzer.sh
@@ -11,6 +11,9 @@ if [ -e "$perfdir/scripts/python/Perf-Trace-Util" ]; then
export PERF_EXEC_PATH=$perfdir
fi
+# Disable lsan to avoid warnings about python memory leaks.
+export ASAN_OPTIONS=detect_leaks=0
+
cleanup() {
rm -f perf.data
rm -f perf.data.old
@@ -52,8 +55,8 @@ find_str_or_fail() {
# check if perf is compiled with libtraceevent support
skip_no_probe_record_support() {
- perf version --build-options | grep -q " OFF .* HAVE_LIBTRACEEVENT" && return 2
- return 0
+ perf check feature -q libtraceevent && return 0
+ return 2
}
prepare_perf_data() {
diff --git a/tools/perf/tests/shell/test_uprobe_from_different_cu.sh b/tools/perf/tests/shell/test_uprobe_from_different_cu.sh
index 82bc774a078a..33387c329f92 100755
--- a/tools/perf/tests/shell/test_uprobe_from_different_cu.sh
+++ b/tools/perf/tests/shell/test_uprobe_from_different_cu.sh
@@ -4,6 +4,13 @@
set -e
+# Skip if there's no probe command.
+if ! perf | grep probe
+then
+ echo "Skip: probe command isn't present"
+ exit 2
+fi
+
# skip if there's no gcc
if ! [ -x "$(command -v gcc)" ]; then
echo "failed: no gcc compiler"
diff --git a/tools/perf/tests/shell/trace_btf_enum.sh b/tools/perf/tests/shell/trace_btf_enum.sh
new file mode 100755
index 000000000000..5a3b8a5a9b5c
--- /dev/null
+++ b/tools/perf/tests/shell/trace_btf_enum.sh
@@ -0,0 +1,62 @@
+#!/bin/sh
+# perf trace enum augmentation tests
+# SPDX-License-Identifier: GPL-2.0
+
+err=0
+set -e
+
+syscall="landlock_add_rule"
+non_syscall="timer:hrtimer_init,timer:hrtimer_start"
+
+TESTPROG="perf test -w landlock"
+
+# shellcheck source=lib/probe.sh
+. "$(dirname $0)"/lib/probe.sh
+skip_if_no_perf_trace || exit 2
+
+check_vmlinux() {
+ echo "Checking if vmlinux exists"
+ if ! ls /sys/kernel/btf/vmlinux 1>/dev/null 2>&1
+ then
+ echo "trace+enum test [Skipped missing vmlinux BTF support]"
+ err=2
+ fi
+}
+
+trace_landlock() {
+ echo "Tracing syscall ${syscall}"
+
+ # test flight just to see if landlock_add_rule and libbpf are available
+ $TESTPROG
+
+ if perf trace -e $syscall $TESTPROG 2>&1 | \
+ grep -q -E ".*landlock_add_rule\(ruleset_fd: 11, rule_type: (LANDLOCK_RULE_PATH_BENEATH|LANDLOCK_RULE_NET_PORT), rule_attr: 0x[a-f0-9]+, flags: 45\) = -1.*"
+ then
+ err=0
+ else
+ err=1
+ fi
+}
+
+trace_non_syscall() {
+ echo "Tracing non-syscall tracepoint ${non-syscall}"
+ if perf trace -e $non_syscall --max-events=1 2>&1 | \
+ grep -q -E '.*timer:hrtimer_.*\(.*mode: HRTIMER_MODE_.*\)$'
+ then
+ err=0
+ else
+ err=1
+ fi
+}
+
+check_vmlinux
+
+if [ $err = 0 ]; then
+ trace_landlock
+fi
+
+if [ $err = 0 ]; then
+ trace_non_syscall
+fi
+
+exit $err
diff --git a/tools/perf/tests/stat.c b/tools/perf/tests/stat.c
index 706780fb5695..6468cc0d0204 100644
--- a/tools/perf/tests/stat.c
+++ b/tools/perf/tests/stat.c
@@ -21,7 +21,7 @@ static bool has_term(struct perf_record_stat_config *config,
return false;
}
-static int process_stat_config_event(struct perf_tool *tool __maybe_unused,
+static int process_stat_config_event(const struct perf_tool *tool __maybe_unused,
union perf_event *event,
struct perf_sample *sample __maybe_unused,
struct machine *machine __maybe_unused)
@@ -62,7 +62,7 @@ static int test__synthesize_stat_config(struct test_suite *test __maybe_unused,
return 0;
}
-static int process_stat_event(struct perf_tool *tool __maybe_unused,
+static int process_stat_event(const struct perf_tool *tool __maybe_unused,
union perf_event *event,
struct perf_sample *sample __maybe_unused,
struct machine *machine __maybe_unused)
@@ -93,7 +93,7 @@ static int test__synthesize_stat(struct test_suite *test __maybe_unused, int sub
return 0;
}
-static int process_stat_round_event(struct perf_tool *tool __maybe_unused,
+static int process_stat_round_event(const struct perf_tool *tool __maybe_unused,
union perf_event *event,
struct perf_sample *sample __maybe_unused,
struct machine *machine __maybe_unused)
diff --git a/tools/perf/tests/tests-scripts.c b/tools/perf/tests/tests-scripts.c
index e2042b368269..ed114b044293 100644
--- a/tools/perf/tests/tests-scripts.c
+++ b/tools/perf/tests/tests-scripts.c
@@ -29,16 +29,45 @@
static int shell_tests__dir_fd(void)
{
- char path[PATH_MAX], *exec_path;
- static const char * const devel_dirs[] = { "./tools/perf/tests/shell", "./tests/shell", };
+ struct stat st;
+ char path[PATH_MAX], path2[PATH_MAX], *exec_path;
+ static const char * const devel_dirs[] = {
+ "./tools/perf/tests/shell",
+ "./tests/shell",
+ "./source/tests/shell"
+ };
+ int fd;
+ char *p;
for (size_t i = 0; i < ARRAY_SIZE(devel_dirs); ++i) {
- int fd = open(devel_dirs[i], O_PATH);
+ fd = open(devel_dirs[i], O_PATH);
if (fd >= 0)
return fd;
}
+ /* Use directory of executable */
+ if (readlink("/proc/self/exe", path2, sizeof path2) < 0)
+ return -1;
+ /* Follow another level of symlink if there */
+ if (lstat(path2, &st) == 0 && (st.st_mode & S_IFMT) == S_IFLNK) {
+ scnprintf(path, sizeof(path), path2);
+ if (readlink(path, path2, sizeof path2) < 0)
+ return -1;
+ }
+ /* Get directory */
+ p = strrchr(path2, '/');
+ if (p)
+ *p = 0;
+ scnprintf(path, sizeof(path), "%s/tests/shell", path2);
+ fd = open(path, O_PATH);
+ if (fd >= 0)
+ return fd;
+ scnprintf(path, sizeof(path), "%s/source/tests/shell", path2);
+ fd = open(path, O_PATH);
+ if (fd >= 0)
+ return fd;
+
/* Then installed path. */
exec_path = get_argv_exec_path();
scnprintf(path, sizeof(path), "%s/tests/shell", exec_path);
@@ -222,6 +251,8 @@ static void append_scripts_in_dir(int dir_fd,
if (!S_ISDIR(st.st_mode))
continue;
}
+ if (strncmp(ent->d_name, "base_", 5) == 0)
+ continue; /* Skip scripts that have a separate driver. */
fd = openat(dir_fd, ent->d_name, O_PATH);
append_scripts_in_dir(fd, result, result_sz);
}
diff --git a/tools/perf/tests/tests.h b/tools/perf/tests/tests.h
index 3aa7701ee0e9..6ea2be86b7bf 100644
--- a/tools/perf/tests/tests.h
+++ b/tools/perf/tests/tests.h
@@ -205,6 +205,7 @@ DECLARE_WORKLOAD(leafloop);
DECLARE_WORKLOAD(sqrtloop);
DECLARE_WORKLOAD(brstack);
DECLARE_WORKLOAD(datasym);
+DECLARE_WORKLOAD(landlock);
extern const char *dso_to_test;
extern const char *test_objdump_path;
diff --git a/tools/perf/tests/thread-map.c b/tools/perf/tests/thread-map.c
index 74308c1368fe..1fe521466bf4 100644
--- a/tools/perf/tests/thread-map.c
+++ b/tools/perf/tests/thread-map.c
@@ -60,7 +60,7 @@ static int test__thread_map(struct test_suite *test __maybe_unused, int subtest
return 0;
}
-static int process_event(struct perf_tool *tool __maybe_unused,
+static int process_event(const struct perf_tool *tool __maybe_unused,
union perf_event *event,
struct perf_sample *sample __maybe_unused,
struct machine *machine __maybe_unused)
diff --git a/tools/perf/tests/vmlinux-kallsyms.c b/tools/perf/tests/vmlinux-kallsyms.c
index cd3b480d20bd..74cdbd2ce9d0 100644
--- a/tools/perf/tests/vmlinux-kallsyms.c
+++ b/tools/perf/tests/vmlinux-kallsyms.c
@@ -131,7 +131,7 @@ static int test__vmlinux_matches_kallsyms_cb1(struct map *map, void *data)
(dso__kernel(dso) ? dso__short_name(dso) : dso__name(dso)));
if (pair) {
- map__set_priv(pair, 1);
+ map__set_priv(pair);
map__put(pair);
} else {
if (!args->header_printed) {
@@ -166,7 +166,7 @@ static int test__vmlinux_matches_kallsyms_cb2(struct map *map, void *data)
pr_info(":\nWARN: *%" PRIx64 "-%" PRIx64 " %" PRIx64,
map__start(pair), map__end(pair), map__pgoff(pair));
pr_info(" %s\n", dso__name(dso));
- map__set_priv(pair, 1);
+ map__set_priv(pair);
}
map__put(pair);
return 0;
diff --git a/tools/perf/tests/workloads/Build b/tools/perf/tests/workloads/Build
index 48bf0d3b0f3d..5af17206f04d 100644
--- a/tools/perf/tests/workloads/Build
+++ b/tools/perf/tests/workloads/Build
@@ -6,6 +6,7 @@ perf-test-y += leafloop.o
perf-test-y += sqrtloop.o
perf-test-y += brstack.o
perf-test-y += datasym.o
+perf-test-y += landlock.o
CFLAGS_sqrtloop.o = -g -O0 -fno-inline -U_FORTIFY_SOURCE
CFLAGS_leafloop.o = -g -O0 -fno-inline -fno-omit-frame-pointer -U_FORTIFY_SOURCE
diff --git a/tools/perf/tests/workloads/landlock.c b/tools/perf/tests/workloads/landlock.c
new file mode 100644
index 000000000000..e2b5ef647c09
--- /dev/null
+++ b/tools/perf/tests/workloads/landlock.c
@@ -0,0 +1,66 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#include <linux/compiler.h>
+#include <linux/types.h>
+#include <unistd.h>
+#include "../tests.h"
+
+/* This workload was initially added to test enum augmentation with BTF in perf
+ * trace because its the only syscall that has an enum argument. Since it is
+ * a recent addition to the Linux kernel (at the time of the introduction of this
+ * 'perf test' workload) we just add the required types and defines here instead
+ * of including linux/landlock, that isn't available in older systems.
+ *
+ * We are not interested in the the result of the syscall, just in intercepting
+ * its arguments.
+ */
+
+#ifndef __NR_landlock_add_rule
+#define __NR_landlock_add_rule 445
+#endif
+
+#ifndef LANDLOCK_ACCESS_FS_READ_FILE
+#define LANDLOCK_ACCESS_FS_READ_FILE (1ULL << 2)
+
+#define LANDLOCK_RULE_PATH_BENEATH 1
+
+struct landlock_path_beneath_attr {
+ __u64 allowed_access;
+ __s32 parent_fd;
+};
+#endif
+
+#ifndef LANDLOCK_ACCESS_NET_CONNECT_TCP
+#define LANDLOCK_ACCESS_NET_CONNECT_TCP (1ULL << 1)
+
+#define LANDLOCK_RULE_NET_PORT 2
+
+struct landlock_net_port_attr {
+ __u64 allowed_access;
+ __u64 port;
+};
+#endif
+
+static int landlock(int argc __maybe_unused, const char **argv __maybe_unused)
+{
+ int fd = 11, flags = 45;
+
+ struct landlock_path_beneath_attr path_beneath_attr = {
+ .allowed_access = LANDLOCK_ACCESS_FS_READ_FILE,
+ .parent_fd = 14,
+ };
+
+ struct landlock_net_port_attr net_port_attr = {
+ .port = 19,
+ .allowed_access = LANDLOCK_ACCESS_NET_CONNECT_TCP,
+ };
+
+ syscall(__NR_landlock_add_rule, fd, LANDLOCK_RULE_PATH_BENEATH,
+ &path_beneath_attr, flags);
+
+ syscall(__NR_landlock_add_rule, fd, LANDLOCK_RULE_NET_PORT,
+ &net_port_attr, flags);
+
+ return 0;
+}
+
+DEFINE_WORKLOAD(landlock);
diff --git a/tools/perf/tests/wp.c b/tools/perf/tests/wp.c
index cc8719609b19..6c178985e37f 100644
--- a/tools/perf/tests/wp.c
+++ b/tools/perf/tests/wp.c
@@ -20,7 +20,12 @@ do { \
TEST_ASSERT_VAL(text, count == val); \
} while (0)
+#ifdef __i386__
+/* Only breakpoint length less-than 8 has hardware support on i386. */
+volatile u32 data1;
+#else
volatile u64 data1;
+#endif
volatile u8 data2[3];
#ifndef __s390x__
diff --git a/tools/perf/trace/beauty/beauty.h b/tools/perf/trace/beauty/beauty.h
index 78d10d92d351..0a07ad158f87 100644
--- a/tools/perf/trace/beauty/beauty.h
+++ b/tools/perf/trace/beauty/beauty.h
@@ -113,6 +113,7 @@ struct syscall_arg {
struct thread *thread;
struct trace *trace;
void *parm;
+ char *type_name;
u16 len;
u8 idx;
u8 mask;
@@ -228,6 +229,11 @@ size_t syscall_arg__scnprintf_renameat2_flags(char *bf, size_t size, struct sysc
size_t syscall_arg__scnprintf_sockaddr(char *bf, size_t size, struct syscall_arg *arg);
#define SCA_SOCKADDR syscall_arg__scnprintf_sockaddr
+// 'argname' is just documentational at this point, to remove the previous comment with that info
+#define SCA_SOCKADDR_FROM_USER(argname) \
+ { .scnprintf = SCA_SOCKADDR, \
+ .from_user = true, }
+
size_t syscall_arg__scnprintf_socket_protocol(char *bf, size_t size, struct syscall_arg *arg);
#define SCA_SK_PROTO syscall_arg__scnprintf_socket_protocol
@@ -249,6 +255,11 @@ size_t syscall_arg__scnprintf_sync_file_range_flags(char *bf, size_t size, struc
size_t syscall_arg__scnprintf_timespec(char *bf, size_t size, struct syscall_arg *arg);
#define SCA_TIMESPEC syscall_arg__scnprintf_timespec
+// 'argname' is just documentational at this point, to remove the previous comment with that info
+#define SCA_TIMESPEC_FROM_USER(argname) \
+ { .scnprintf = SCA_TIMESPEC, \
+ .from_user = true, }
+
size_t open__scnprintf_flags(unsigned long flags, char *bf, size_t size, bool show_prefix);
void syscall_arg__set_ret_scnprintf(struct syscall_arg *arg,
diff --git a/tools/perf/trace/beauty/perf_event_open.c b/tools/perf/trace/beauty/perf_event_open.c
index 01ee15fe9d0c..9f1ed989c775 100644
--- a/tools/perf/trace/beauty/perf_event_open.c
+++ b/tools/perf/trace/beauty/perf_event_open.c
@@ -76,7 +76,7 @@ static size_t perf_event_attr___scnprintf(struct perf_event_attr *attr, char *bf
static size_t syscall_arg__scnprintf_augmented_perf_event_attr(struct syscall_arg *arg, char *bf, size_t size)
{
- return perf_event_attr___scnprintf((void *)arg->augmented.args, bf, size, arg->trace->show_zeros);
+ return perf_event_attr___scnprintf((void *)arg->augmented.args->value, bf, size, arg->trace->show_zeros);
}
static size_t syscall_arg__scnprintf_perf_event_attr(char *bf, size_t size, struct syscall_arg *arg)
@@ -88,3 +88,7 @@ static size_t syscall_arg__scnprintf_perf_event_attr(char *bf, size_t size, stru
}
#define SCA_PERF_ATTR syscall_arg__scnprintf_perf_event_attr
+// 'argname' is just documentational at this point, to remove the previous comment with that info
+#define SCA_PERF_ATTR_FROM_USER(argname) \
+ { .scnprintf = SCA_PERF_ATTR, \
+ .from_user = true, }
diff --git a/tools/perf/trace/beauty/sockaddr.c b/tools/perf/trace/beauty/sockaddr.c
index 2e0e867c0c1b..a17a27ac2a6f 100644
--- a/tools/perf/trace/beauty/sockaddr.c
+++ b/tools/perf/trace/beauty/sockaddr.c
@@ -47,7 +47,7 @@ static size_t (*af_scnprintfs[])(struct sockaddr *sa, char *bf, size_t size) = {
static size_t syscall_arg__scnprintf_augmented_sockaddr(struct syscall_arg *arg, char *bf, size_t size)
{
- struct sockaddr *sa = (struct sockaddr *)arg->augmented.args;
+ struct sockaddr *sa = (struct sockaddr *)&arg->augmented.args->value;
char family[32];
size_t printed;
diff --git a/tools/perf/trace/beauty/timespec.c b/tools/perf/trace/beauty/timespec.c
index e1a61f092aad..b14ab72a2738 100644
--- a/tools/perf/trace/beauty/timespec.c
+++ b/tools/perf/trace/beauty/timespec.c
@@ -7,7 +7,7 @@
static size_t syscall_arg__scnprintf_augmented_timespec(struct syscall_arg *arg, char *bf, size_t size)
{
- struct timespec *ts = (struct timespec *)arg->augmented.args;
+ struct timespec *ts = (struct timespec *)arg->augmented.args->value;
return scnprintf(bf, size, "{ .tv_sec: %" PRIu64 ", .tv_nsec: %" PRIu64 " }", ts->tv_sec, ts->tv_nsec);
}
diff --git a/tools/perf/ui/browsers/annotate-data.c b/tools/perf/ui/browsers/annotate-data.c
index 8d6bf08d371d..cd562a8822b7 100644
--- a/tools/perf/ui/browsers/annotate-data.c
+++ b/tools/perf/ui/browsers/annotate-data.c
@@ -14,17 +14,26 @@
#include "util/evlist.h"
#include "util/sort.h"
-struct annotated_data_browser {
- struct ui_browser b;
- struct list_head entries;
- int nr_events;
-};
+#define FOLDED_SIGN '+'
+#define UNFOLD_SIGN '-'
+#define NOCHLD_SIGN ' '
struct browser_entry {
struct list_head node;
struct annotated_member *data;
struct type_hist_entry *hists;
- int indent;
+ struct browser_entry *parent;
+ struct list_head children;
+ int indent; /*indentation level, starts from 0 */
+ int nr_entries; /* # of visible entries: self + descendents */
+ bool folded; /* only can be false when it has children */
+};
+
+struct annotated_data_browser {
+ struct ui_browser b;
+ struct list_head entries;
+ struct browser_entry *curr;
+ int nr_events;
};
static struct annotated_data_browser *get_browser(struct ui_browser *uib)
@@ -51,23 +60,28 @@ static int get_member_overhead(struct annotated_data_type *adt,
struct evsel *evsel;
int offset = member->offset + i;
+ k = 0;
for_each_group_evsel(evsel, leader) {
+ if (symbol_conf.skip_empty &&
+ evsel__hists(evsel)->stats.nr_samples == 0)
+ continue;
+
h = adt->histograms[evsel->core.idx];
- k = evsel__group_idx(evsel);
- update_hist_entry(&entry->hists[k], &h->addr[offset]);
+ update_hist_entry(&entry->hists[k++], &h->addr[offset]);
}
}
return 0;
}
static int add_child_entries(struct annotated_data_browser *browser,
+ struct browser_entry *parent,
struct annotated_data_type *adt,
struct annotated_member *member,
struct evsel *evsel, int indent)
{
struct annotated_member *pos;
struct browser_entry *entry;
- int nr_entries = 0;
+ struct list_head *parent_list;
entry = zalloc(sizeof(*entry));
if (entry == NULL)
@@ -80,36 +94,60 @@ static int add_child_entries(struct annotated_data_browser *browser,
}
entry->data = member;
+ entry->parent = parent;
entry->indent = indent;
if (get_member_overhead(adt, entry, evsel) < 0) {
free(entry);
return -1;
}
- list_add_tail(&entry->node, &browser->entries);
- nr_entries++;
+ INIT_LIST_HEAD(&entry->children);
+ if (parent)
+ parent_list = &parent->children;
+ else
+ parent_list = &browser->entries;
- list_for_each_entry(pos, &member->children, node) {
- int nr = add_child_entries(browser, adt, pos, evsel, indent + 1);
+ list_add_tail(&entry->node, parent_list);
+ list_for_each_entry(pos, &member->children, node) {
+ int nr = add_child_entries(browser, entry, adt, pos, evsel,
+ indent + 1);
if (nr < 0)
return nr;
-
- nr_entries += nr;
}
/* add an entry for the closing bracket ("}") */
if (!list_empty(&member->children)) {
- entry = zalloc(sizeof(*entry));
- if (entry == NULL)
+ struct browser_entry *bracket;
+
+ bracket = zalloc(sizeof(*bracket));
+ if (bracket == NULL)
return -1;
- entry->indent = indent;
- list_add_tail(&entry->node, &browser->entries);
- nr_entries++;
+ bracket->indent = indent;
+ bracket->parent = entry;
+ bracket->folded = true;
+ bracket->nr_entries = 1;
+
+ INIT_LIST_HEAD(&bracket->children);
+ list_add_tail(&bracket->node, &entry->children);
}
- return nr_entries;
+ /* fold child entries by default */
+ entry->folded = true;
+ entry->nr_entries = 1;
+ return 0;
+}
+
+static u32 count_visible_entries(struct annotated_data_browser *browser)
+{
+ int nr = 0;
+ struct browser_entry *entry;
+
+ list_for_each_entry(entry, &browser->entries, node)
+ nr += entry->nr_entries;
+
+ return nr;
}
static int annotated_data_browser__collect_entries(struct annotated_data_browser *browser)
@@ -119,9 +157,12 @@ static int annotated_data_browser__collect_entries(struct annotated_data_browser
struct evsel *evsel = hists_to_evsel(he->hists);
INIT_LIST_HEAD(&browser->entries);
+
+ add_child_entries(browser, /*parent=*/NULL, adt, &adt->self, evsel,
+ /*indent=*/0);
+
browser->b.entries = &browser->entries;
- browser->b.nr_entries = add_child_entries(browser, adt, &adt->self,
- evsel, /*indent=*/0);
+ browser->b.nr_entries = count_visible_entries(browser);
return 0;
}
@@ -136,9 +177,158 @@ static void annotated_data_browser__delete_entries(struct annotated_data_browser
}
}
+static struct browser_entry *get_first_child(struct browser_entry *entry)
+{
+ if (list_empty(&entry->children))
+ return NULL;
+
+ return list_first_entry(&entry->children, struct browser_entry, node);
+}
+
+static struct browser_entry *get_last_child(struct browser_entry *entry)
+{
+ if (list_empty(&entry->children))
+ return NULL;
+
+ return list_last_entry(&entry->children, struct browser_entry, node);
+}
+
+static bool is_first_child(struct browser_entry *entry)
+{
+ /* This will be checked in a different way */
+ if (entry->parent == NULL)
+ return false;
+
+ return get_first_child(entry->parent) == entry;
+}
+
+static bool is_last_child(struct browser_entry *entry)
+{
+ /* This will be checked in a different way */
+ if (entry->parent == NULL)
+ return false;
+
+ return get_last_child(entry->parent) == entry;
+}
+
+static struct browser_entry *browser__prev_entry(struct ui_browser *uib,
+ struct browser_entry *entry)
+{
+ struct annotated_data_browser *browser = get_browser(uib);
+ struct browser_entry *first;
+
+ first = list_first_entry(&browser->entries, struct browser_entry, node);
+
+ while (entry != first) {
+ if (is_first_child(entry))
+ entry = entry->parent;
+ else {
+ entry = list_prev_entry(entry, node);
+ while (!entry->folded)
+ entry = get_last_child(entry);
+ }
+
+ if (!uib->filter || !uib->filter(uib, &entry->node))
+ return entry;
+ }
+ return first;
+}
+
+static struct browser_entry *browser__next_entry(struct ui_browser *uib,
+ struct browser_entry *entry)
+{
+ struct annotated_data_browser *browser = get_browser(uib);
+ struct browser_entry *last;
+
+ last = list_last_entry(&browser->entries, struct browser_entry, node);
+ while (!last->folded)
+ last = get_last_child(last);
+
+ while (entry != last) {
+ if (!entry->folded)
+ entry = get_first_child(entry);
+ else {
+ while (is_last_child(entry))
+ entry = entry->parent;
+
+ entry = list_next_entry(entry, node);
+ }
+
+ if (!uib->filter || !uib->filter(uib, &entry->node))
+ return entry;
+ }
+ return last;
+}
+
+static void browser__seek(struct ui_browser *uib, off_t offset, int whence)
+{
+ struct annotated_data_browser *browser = get_browser(uib);
+ struct browser_entry *entry;
+
+ if (uib->nr_entries == 0)
+ return;
+
+ switch (whence) {
+ case SEEK_SET:
+ entry = list_first_entry(&browser->entries, typeof(*entry), node);
+ if (uib->filter && uib->filter(uib, &entry->node))
+ entry = browser__next_entry(uib, entry);
+ break;
+ case SEEK_CUR:
+ entry = list_entry(uib->top, typeof(*entry), node);
+ break;
+ case SEEK_END:
+ entry = list_last_entry(&browser->entries, typeof(*entry), node);
+ while (!entry->folded)
+ entry = get_last_child(entry);
+ if (uib->filter && uib->filter(uib, &entry->node))
+ entry = browser__prev_entry(uib, entry);
+ break;
+ default:
+ return;
+ }
+
+ assert(entry != NULL);
+
+ if (offset > 0) {
+ while (offset-- != 0)
+ entry = browser__next_entry(uib, entry);
+ } else {
+ while (offset++ != 0)
+ entry = browser__prev_entry(uib, entry);
+ }
+
+ uib->top = &entry->node;
+}
+
static unsigned int browser__refresh(struct ui_browser *uib)
{
- return ui_browser__list_head_refresh(uib);
+ struct annotated_data_browser *browser = get_browser(uib);
+ struct browser_entry *entry, *next;
+ int row = 0;
+
+ if (uib->top == NULL || uib->top == uib->entries)
+ browser__seek(uib, SEEK_SET, 0);
+
+ entry = list_entry(uib->top, typeof(*entry), node);
+
+ while (true) {
+ if (!uib->filter || !uib->filter(uib, &entry->node)) {
+ ui_browser__gotorc(uib, row, 0);
+ uib->write(uib, entry, row);
+ if (uib->top_idx + row == uib->index)
+ browser->curr = entry;
+ if (++row == uib->rows)
+ break;
+ }
+ next = browser__next_entry(uib, entry);
+ if (next == entry)
+ break;
+
+ entry = next;
+ }
+
+ return row;
}
static int browser__show(struct ui_browser *uib)
@@ -167,7 +357,7 @@ static int browser__show(struct ui_browser *uib)
strcpy(title, "Percent");
ui_browser__printf(uib, "%*s %10s %10s %10s %s",
- 11 * (browser->nr_events - 1), "",
+ 2 + 11 * (browser->nr_events - 1), "",
title, "Offset", "Size", "Field");
ui_browser__write_nstring(uib, "", uib->width);
return 0;
@@ -203,12 +393,13 @@ static void browser__write(struct ui_browser *uib, void *entry, int row)
struct annotated_data_type *adt = he->mem_type;
struct evsel *leader = hists_to_evsel(he->hists);
struct evsel *evsel;
+ int idx = 0;
+ bool current = ui_browser__is_current_entry(uib, row);
if (member == NULL) {
- bool current = ui_browser__is_current_entry(uib, row);
-
/* print the closing bracket */
ui_browser__set_percent_color(uib, 0, current);
+ ui_browser__printf(uib, "%c ", NOCHLD_SIGN);
ui_browser__write_nstring(uib, "", 11 * browser->nr_events);
ui_browser__printf(uib, " %10s %10s %*s};",
"", "", be->indent * 4, "");
@@ -216,31 +407,113 @@ static void browser__write(struct ui_browser *uib, void *entry, int row)
return;
}
+ ui_browser__set_percent_color(uib, 0, current);
+
+ if (!list_empty(&be->children))
+ ui_browser__printf(uib, "%c ", be->folded ? FOLDED_SIGN : UNFOLD_SIGN);
+ else
+ ui_browser__printf(uib, "%c ", NOCHLD_SIGN);
+
/* print the number */
for_each_group_evsel(evsel, leader) {
struct type_hist *h = adt->histograms[evsel->core.idx];
- int idx = evsel__group_idx(evsel);
- browser__write_overhead(uib, h, &be->hists[idx], row);
+ if (symbol_conf.skip_empty &&
+ evsel__hists(evsel)->stats.nr_samples == 0)
+ continue;
+
+ browser__write_overhead(uib, h, &be->hists[idx++], row);
}
/* print type info */
if (be->indent == 0 && !member->var_name) {
- ui_browser__printf(uib, " %10d %10d %s%s",
+ ui_browser__printf(uib, " %#10x %#10x %s%s",
member->offset, member->size,
member->type_name,
- list_empty(&member->children) ? ";" : " {");
+ list_empty(&member->children) || be->folded? ";" : " {");
} else {
- ui_browser__printf(uib, " %10d %10d %*s%s\t%s%s",
+ ui_browser__printf(uib, " %#10x %#10x %*s%s\t%s%s",
member->offset, member->size,
be->indent * 4, "", member->type_name,
member->var_name ?: "",
- list_empty(&member->children) ? ";" : " {");
+ list_empty(&member->children) || be->folded ? ";" : " {");
}
/* fill the rest */
ui_browser__write_nstring(uib, "", uib->width);
}
+static void annotated_data_browser__fold(struct annotated_data_browser *browser,
+ struct browser_entry *entry,
+ bool recursive)
+{
+ struct browser_entry *child;
+
+ if (list_empty(&entry->children))
+ return;
+ if (entry->folded && !recursive)
+ return;
+
+ if (recursive) {
+ list_for_each_entry(child, &entry->children, node)
+ annotated_data_browser__fold(browser, child, true);
+ }
+
+ entry->nr_entries = 1;
+ entry->folded = true;
+}
+
+static void annotated_data_browser__unfold(struct annotated_data_browser *browser,
+ struct browser_entry *entry,
+ bool recursive)
+{
+ struct browser_entry *child;
+ int nr_entries;
+
+ if (list_empty(&entry->children))
+ return;
+ if (!entry->folded && !recursive)
+ return;
+
+ nr_entries = 1; /* for self */
+ list_for_each_entry(child, &entry->children, node) {
+ if (recursive)
+ annotated_data_browser__unfold(browser, child, true);
+
+ nr_entries += child->nr_entries;
+ }
+
+ entry->nr_entries = nr_entries;
+ entry->folded = false;
+}
+
+static void annotated_data_browser__toggle_fold(struct annotated_data_browser *browser,
+ bool recursive)
+{
+ struct browser_entry *curr = browser->curr;
+ struct browser_entry *parent;
+
+ parent = curr->parent;
+ while (parent) {
+ parent->nr_entries -= curr->nr_entries;
+ parent = parent->parent;
+ }
+ browser->b.nr_entries -= curr->nr_entries;
+
+ if (curr->folded)
+ annotated_data_browser__unfold(browser, curr, recursive);
+ else
+ annotated_data_browser__fold(browser, curr, recursive);
+
+ parent = curr->parent;
+ while (parent) {
+ parent->nr_entries += curr->nr_entries;
+ parent = parent->parent;
+ }
+ browser->b.nr_entries += curr->nr_entries;
+
+ assert(browser->b.nr_entries == count_visible_entries(browser));
+}
+
static int annotated_data_browser__run(struct annotated_data_browser *browser,
struct evsel *evsel __maybe_unused,
struct hist_browser_timer *hbt)
@@ -265,8 +538,18 @@ static int annotated_data_browser__run(struct annotated_data_browser *browser,
"UP/DOWN/PGUP\n"
"PGDN/SPACE Navigate\n"
"</> Move to prev/next symbol\n"
+ "e Expand/Collapse current entry\n"
+ "E Expand/Collapse all children of the current\n"
"q/ESC/CTRL+C Exit\n\n");
continue;
+ case 'e':
+ annotated_data_browser__toggle_fold(browser,
+ /*recursive=*/false);
+ break;
+ case 'E':
+ annotated_data_browser__toggle_fold(browser,
+ /*recursive=*/true);
+ break;
case K_LEFT:
case '<':
case '>':
@@ -289,7 +572,7 @@ int hist_entry__annotate_data_tui(struct hist_entry *he, struct evsel *evsel,
struct annotated_data_browser browser = {
.b = {
.refresh = browser__refresh,
- .seek = ui_browser__list_head_seek,
+ .seek = browser__seek,
.write = browser__write,
.priv = he,
.extra_title_lines = 1,
@@ -300,13 +583,30 @@ int hist_entry__annotate_data_tui(struct hist_entry *he, struct evsel *evsel,
ui_helpline__push("Press ESC to exit");
- if (evsel__is_group_event(evsel))
- browser.nr_events = evsel->core.nr_members;
+ if (evsel__is_group_event(evsel)) {
+ struct evsel *pos;
+ int nr = 0;
+
+ for_each_group_evsel(pos, evsel) {
+ if (!symbol_conf.skip_empty ||
+ evsel__hists(pos)->stats.nr_samples)
+ nr++;
+ }
+ browser.nr_events = nr;
+ }
ret = annotated_data_browser__collect_entries(&browser);
- if (ret == 0)
- ret = annotated_data_browser__run(&browser, evsel, hbt);
+ if (ret < 0)
+ goto out;
+ /* To get the top and current entry */
+ browser__refresh(&browser.b);
+ /* Show the first-level child entries by default */
+ annotated_data_browser__toggle_fold(&browser, /*recursive=*/false);
+
+ ret = annotated_data_browser__run(&browser, evsel, hbt);
+
+out:
annotated_data_browser__delete_entries(&browser);
return ret;
diff --git a/tools/perf/ui/browsers/annotate.c b/tools/perf/ui/browsers/annotate.c
index ea986430241e..d7e727345dab 100644
--- a/tools/perf/ui/browsers/annotate.c
+++ b/tools/perf/ui/browsers/annotate.c
@@ -156,6 +156,7 @@ static void annotate_browser__draw_current_jump(struct ui_browser *browser)
struct symbol *sym = ms->sym;
struct annotation *notes = symbol__annotation(sym);
u8 pcnt_width = annotation__pcnt_width(notes);
+ u8 cntr_width = annotation__br_cntr_width();
int width;
int diff = 0;
@@ -205,13 +206,13 @@ static void annotate_browser__draw_current_jump(struct ui_browser *browser)
ui_browser__set_color(browser, HE_COLORSET_JUMP_ARROWS);
__ui_browser__line_arrow(browser,
- pcnt_width + 2 + notes->src->widths.addr + width,
+ pcnt_width + 2 + notes->src->widths.addr + width + cntr_width,
from, to);
diff = is_fused(ab, cursor);
if (diff > 0) {
ui_browser__mark_fused(browser,
- pcnt_width + 3 + notes->src->widths.addr + width,
+ pcnt_width + 3 + notes->src->widths.addr + width + cntr_width,
from - diff, diff, to > from);
}
}
@@ -714,6 +715,7 @@ static int annotate_browser__run(struct annotate_browser *browser,
struct annotation *notes = symbol__annotation(ms->sym);
const char *help = "Press 'h' for help on key bindings";
int delay_secs = hbt ? hbt->refresh : 0;
+ char *br_cntr_text = NULL;
char title[256];
int key;
@@ -730,6 +732,8 @@ static int annotate_browser__run(struct annotate_browser *browser,
nd = browser->curr_hot;
+ annotation_br_cntr_abbr_list(&br_cntr_text, evsel, false);
+
while (1) {
key = ui_browser__run(&browser->b, delay_secs);
@@ -796,6 +800,7 @@ static int annotate_browser__run(struct annotate_browser *browser,
"r Run available scripts\n"
"p Toggle percent type [local/global]\n"
"b Toggle percent base [period/hits]\n"
+ "B Branch counter abbr list (Optional)\n"
"? Search string backwards\n"
"f Toggle showing offsets to full address\n");
continue;
@@ -904,6 +909,14 @@ show_sup_ins:
hists__scnprintf_title(hists, title, sizeof(title));
annotate_browser__show(&browser->b, title, help);
continue;
+ case 'B':
+ if (br_cntr_text)
+ ui_browser__help_window(&browser->b, br_cntr_text);
+ else {
+ ui_browser__help_window(&browser->b,
+ "\n The branch counter is not available.\n");
+ }
+ continue;
case 'f':
annotation__toggle_full_addr(notes, ms);
continue;
@@ -923,6 +936,7 @@ show_sup_ins:
}
out:
ui_browser__hide(&browser->b);
+ free(br_cntr_text);
return key;
}
@@ -985,7 +999,7 @@ int symbol__tui_annotate(struct map_symbol *ms, struct evsel *evsel,
browser.b.width = notes->src->widths.max_line_len;
browser.b.nr_entries = notes->src->nr_entries;
- browser.b.entries = &notes->src->source,
+ browser.b.entries = &notes->src->source;
browser.b.width += 18; /* Percentage */
if (annotate_opts.hide_src_code)
diff --git a/tools/perf/ui/browsers/hists.c b/tools/perf/ui/browsers/hists.c
index b7219df51236..49ba82bf3391 100644
--- a/tools/perf/ui/browsers/hists.c
+++ b/tools/perf/ui/browsers/hists.c
@@ -3684,8 +3684,10 @@ int block_hists_tui_browse(struct block_hist *bh, struct evsel *evsel,
struct hist_browser *browser;
int key = -1;
struct popup_action action;
+ char *br_cntr_text = NULL;
static const char help[] =
- " q Quit \n";
+ " q Quit \n"
+ " B Branch counter abbr list (Optional)\n";
browser = hist_browser__new(hists);
if (!browser)
@@ -3703,6 +3705,9 @@ int block_hists_tui_browse(struct block_hist *bh, struct evsel *evsel,
memset(&action, 0, sizeof(action));
+ if (!annotation_br_cntr_abbr_list(&br_cntr_text, evsel, false))
+ annotate_opts.show_br_cntr = true;
+
while (1) {
key = hist_browser__run(browser, "? - help", true, 0);
@@ -3723,6 +3728,16 @@ int block_hists_tui_browse(struct block_hist *bh, struct evsel *evsel,
action.ms.sym = browser->selection->sym;
do_annotate(browser, &action);
continue;
+ case 'B':
+ if (br_cntr_text) {
+ ui__question_window("Branch counter abbr list",
+ br_cntr_text, "Press any key...", 0);
+ } else {
+ ui__question_window("Branch counter abbr list",
+ "\n The branch counter is not available.\n",
+ "Press any key...", 0);
+ }
+ continue;
default:
break;
}
@@ -3730,5 +3745,6 @@ int block_hists_tui_browse(struct block_hist *bh, struct evsel *evsel,
out:
hist_browser__delete(browser);
+ free(br_cntr_text);
return 0;
}
diff --git a/tools/perf/ui/hist.c b/tools/perf/ui/hist.c
index 5d1f04f66a5a..e5491995adf0 100644
--- a/tools/perf/ui/hist.c
+++ b/tools/perf/ui/hist.c
@@ -62,7 +62,7 @@ static int __hpp__fmt(struct perf_hpp *hpp, struct hist_entry *he,
struct evsel *pos;
char *buf = hpp->buf;
size_t size = hpp->size;
- int i, nr_members = 1;
+ int i = 0, nr_members = 1;
struct hpp_fmt_value *values;
if (evsel__is_group_event(evsel))
@@ -72,16 +72,16 @@ static int __hpp__fmt(struct perf_hpp *hpp, struct hist_entry *he,
if (values == NULL)
return 0;
- i = 0;
- for_each_group_evsel(pos, evsel)
- values[i++].hists = evsel__hists(pos);
-
+ values[0].hists = evsel__hists(evsel);
values[0].val = get_field(he);
values[0].samples = he->stat.nr_events;
if (evsel__is_group_event(evsel)) {
struct hist_entry *pair;
+ for_each_group_member(pos, evsel)
+ values[++i].hists = evsel__hists(pos);
+
list_for_each_entry(pair, &he->pairs.head, pairs.node) {
for (i = 0; i < nr_members; i++) {
if (values[i].hists != pair->hists)
diff --git a/tools/perf/ui/stdio/hist.c b/tools/perf/ui/stdio/hist.c
index 9372e8904d22..74b2c619c56c 100644
--- a/tools/perf/ui/stdio/hist.c
+++ b/tools/perf/ui/stdio/hist.c
@@ -913,11 +913,11 @@ size_t events_stats__fprintf(struct events_stats *stats, FILE *fp)
continue;
if (i && total) {
- ret += fprintf(fp, "%16s events: %10d (%4.1f%%)\n",
+ ret += fprintf(fp, "%20s events: %10d (%4.1f%%)\n",
name, stats->nr_events[i],
100.0 * stats->nr_events[i] / total);
} else {
- ret += fprintf(fp, "%16s events: %10d\n",
+ ret += fprintf(fp, "%20s events: %10d\n",
name, stats->nr_events[i]);
}
}
diff --git a/tools/perf/util/Build b/tools/perf/util/Build
index 0f18fe81ef0b..dc616292b2dd 100644
--- a/tools/perf/util/Build
+++ b/tools/perf/util/Build
@@ -13,6 +13,7 @@ perf-util-y += copyfile.o
perf-util-y += ctype.o
perf-util-y += db-export.o
perf-util-y += disasm.o
+perf-util-y += disasm_bpf.o
perf-util-y += env.o
perf-util-y += event.o
perf-util-y += evlist.o
@@ -65,6 +66,7 @@ perf-util-y += map.o
perf-util-y += maps.o
perf-util-y += pstack.o
perf-util-y += session.o
+perf-util-y += tool.o
perf-util-y += sample-raw.o
perf-util-y += s390-sample-raw.o
perf-util-y += amd-sample-raw.o
@@ -154,6 +156,7 @@ perf-util-y += clockid.o
perf-util-y += list_sort.o
perf-util-y += mutex.o
perf-util-y += sharded_mutex.o
+perf-util-$(CONFIG_X86_64) += intel-tpebs.o
perf-util-$(CONFIG_LIBBPF) += bpf_map.o
perf-util-$(CONFIG_PERF_BPF_SKEL) += bpf_counter.o
@@ -220,12 +223,13 @@ perf-util-$(CONFIG_ZLIB) += zlib.o
perf-util-$(CONFIG_LZMA) += lzma.o
perf-util-$(CONFIG_ZSTD) += zstd.o
-perf-util-$(CONFIG_LIBCAP) += cap.o
+perf-util-y += cap.o
perf-util-$(CONFIG_CXX_DEMANGLE) += demangle-cxx.o
perf-util-y += demangle-ocaml.o
perf-util-y += demangle-java.o
perf-util-y += demangle-rust.o
+perf-util-$(CONFIG_LIBLLVM) += llvm-c-helpers.o
ifdef CONFIG_JITDUMP
perf-util-$(CONFIG_LIBELF) += jitdump.o
@@ -275,12 +279,12 @@ $(OUTPUT)util/pmu-bison.c $(OUTPUT)util/pmu-bison.h: util/pmu.y
$(Q)$(call echo-cmd,bison)$(BISON) -v $< -d $(PARSER_DEBUG_BISON) $(BISON_FILE_PREFIX_MAP) \
-o $(OUTPUT)util/pmu-bison.c -p perf_pmu_
-$(OUTPUT)util/bpf-filter-flex.c $(OUTPUT)util/bpf-filter-flex.h: util/bpf-filter.l $(OUTPUT)util/bpf-filter-bison.c
+$(OUTPUT)util/bpf-filter-flex.c $(OUTPUT)util/bpf-filter-flex.h: util/bpf-filter.l $(OUTPUT)util/bpf-filter-bison.c util/bpf-filter.h util/bpf_skel/sample-filter.h
$(call rule_mkdir)
$(Q)$(call echo-cmd,flex)$(FLEX) -o $(OUTPUT)util/bpf-filter-flex.c \
--header-file=$(OUTPUT)util/bpf-filter-flex.h $(PARSER_DEBUG_FLEX) $<
-$(OUTPUT)util/bpf-filter-bison.c $(OUTPUT)util/bpf-filter-bison.h: util/bpf-filter.y
+$(OUTPUT)util/bpf-filter-bison.c $(OUTPUT)util/bpf-filter-bison.h: util/bpf-filter.y util/bpf-filter.h util/bpf_skel/sample-filter.h
$(call rule_mkdir)
$(Q)$(call echo-cmd,bison)$(BISON) -v $< -d $(PARSER_DEBUG_BISON) $(BISON_FILE_PREFIX_MAP) \
-o $(OUTPUT)util/bpf-filter-bison.c -p perf_bpf_filter_
diff --git a/tools/perf/util/annotate-data.c b/tools/perf/util/annotate-data.c
index 965da6c0b542..976abedca09e 100644
--- a/tools/perf/util/annotate-data.c
+++ b/tools/perf/util/annotate-data.c
@@ -31,15 +31,6 @@
static void delete_var_types(struct die_var_type *var_types);
-enum type_state_kind {
- TSR_KIND_INVALID = 0,
- TSR_KIND_TYPE,
- TSR_KIND_PERCPU_BASE,
- TSR_KIND_CONST,
- TSR_KIND_POINTER,
- TSR_KIND_CANARY,
-};
-
#define pr_debug_dtp(fmt, ...) \
do { \
if (debug_type_profile) \
@@ -48,7 +39,7 @@ do { \
pr_debug3(fmt, ##__VA_ARGS__); \
} while (0)
-static void pr_debug_type_name(Dwarf_Die *die, enum type_state_kind kind)
+void pr_debug_type_name(Dwarf_Die *die, enum type_state_kind kind)
{
struct strbuf sb;
char *str;
@@ -104,7 +95,7 @@ static void pr_debug_location(Dwarf_Die *die, u64 pc, int reg)
return;
while ((off = dwarf_getlocations(&attr, off, &base, &start, &end, &ops, &nops)) > 0) {
- if (reg != DWARF_REG_PC && end < pc)
+ if (reg != DWARF_REG_PC && end <= pc)
continue;
if (reg != DWARF_REG_PC && start > pc)
break;
@@ -140,49 +131,27 @@ static void pr_debug_location(Dwarf_Die *die, u64 pc, int reg)
}
}
-/*
- * Type information in a register, valid when @ok is true.
- * The @caller_saved registers are invalidated after a function call.
- */
-struct type_state_reg {
- Dwarf_Die type;
- u32 imm_value;
- bool ok;
- bool caller_saved;
- u8 kind;
-};
+static void pr_debug_scope(Dwarf_Die *scope_die)
+{
+ int tag;
-/* Type information in a stack location, dynamically allocated */
-struct type_state_stack {
- struct list_head list;
- Dwarf_Die type;
- int offset;
- int size;
- bool compound;
- u8 kind;
-};
+ if (!debug_type_profile && verbose < 3)
+ return;
-/* FIXME: This should be arch-dependent */
-#define TYPE_STATE_MAX_REGS 16
+ pr_info("(die:%lx) ", (long)dwarf_dieoffset(scope_die));
-/*
- * State table to maintain type info in each register and stack location.
- * It'll be updated when new variable is allocated or type info is moved
- * to a new location (register or stack). As it'd be used with the
- * shortest path of basic blocks, it only maintains a single table.
- */
-struct type_state {
- /* state of general purpose registers */
- struct type_state_reg regs[TYPE_STATE_MAX_REGS];
- /* state of stack location */
- struct list_head stack_vars;
- /* return value register */
- int ret_reg;
- /* stack pointer register */
- int stack_reg;
-};
+ tag = dwarf_tag(scope_die);
+ if (tag == DW_TAG_subprogram)
+ pr_info("[function] %s\n", dwarf_diename(scope_die));
+ else if (tag == DW_TAG_inlined_subroutine)
+ pr_info("[inlined] %s\n", dwarf_diename(scope_die));
+ else if (tag == DW_TAG_lexical_block)
+ pr_info("[block]\n");
+ else
+ pr_info("[unknown] tag=%x\n", tag);
+}
-static bool has_reg_type(struct type_state *state, int reg)
+bool has_reg_type(struct type_state *state, int reg)
{
return (unsigned)reg < ARRAY_SIZE(state->regs);
}
@@ -253,7 +222,7 @@ static int __add_member_cb(Dwarf_Die *die, void *arg)
struct annotated_member *parent = arg;
struct annotated_member *member;
Dwarf_Die member_type, die_mem;
- Dwarf_Word size, loc;
+ Dwarf_Word size, loc, bit_size = 0;
Dwarf_Attribute attr;
struct strbuf sb;
int tag;
@@ -268,29 +237,56 @@ static int __add_member_cb(Dwarf_Die *die, void *arg)
strbuf_init(&sb, 32);
die_get_typename(die, &sb);
- die_get_real_type(die, &member_type);
- if (dwarf_aggregate_size(&member_type, &size) < 0)
+ __die_get_real_type(die, &member_type);
+ if (dwarf_tag(&member_type) == DW_TAG_typedef)
+ die_get_real_type(&member_type, &die_mem);
+ else
+ die_mem = member_type;
+
+ if (dwarf_aggregate_size(&die_mem, &size) < 0)
size = 0;
- if (!dwarf_attr_integrate(die, DW_AT_data_member_location, &attr))
- loc = 0;
- else
+ if (dwarf_attr_integrate(die, DW_AT_data_member_location, &attr))
dwarf_formudata(&attr, &loc);
+ else {
+ /* bitfield member */
+ if (dwarf_attr_integrate(die, DW_AT_data_bit_offset, &attr) &&
+ dwarf_formudata(&attr, &loc) == 0)
+ loc /= 8;
+ else
+ loc = 0;
+
+ if (dwarf_attr_integrate(die, DW_AT_bit_size, &attr) &&
+ dwarf_formudata(&attr, &bit_size) == 0)
+ size = (bit_size + 7) / 8;
+ }
member->type_name = strbuf_detach(&sb, NULL);
/* member->var_name can be NULL */
- if (dwarf_diename(die))
- member->var_name = strdup(dwarf_diename(die));
+ if (dwarf_diename(die)) {
+ if (bit_size) {
+ if (asprintf(&member->var_name, "%s:%ld",
+ dwarf_diename(die), (long)bit_size) < 0)
+ member->var_name = NULL;
+ } else {
+ member->var_name = strdup(dwarf_diename(die));
+ }
+
+ if (member->var_name == NULL) {
+ free(member);
+ return DIE_FIND_CB_END;
+ }
+ }
member->size = size;
member->offset = loc + parent->offset;
INIT_LIST_HEAD(&member->children);
list_add_tail(&member->node, &parent->children);
- tag = dwarf_tag(&member_type);
+ tag = dwarf_tag(&die_mem);
switch (tag) {
case DW_TAG_structure_type:
case DW_TAG_union_type:
- die_find_child(&member_type, __add_member_cb, member, &die_mem);
+ die_find_child(&die_mem, __add_member_cb, member, &die_mem);
break;
default:
break;
@@ -332,6 +328,10 @@ static struct annotated_data_type *dso__findnew_data_type(struct dso *dso,
if (die_get_typename_from_type(type_die, &sb) < 0)
strbuf_add(&sb, "(unknown type)", 14);
type_name = strbuf_detach(&sb, NULL);
+
+ if (dwarf_tag(type_die) == DW_TAG_typedef)
+ die_get_real_type(type_die, type_die);
+
dwarf_aggregate_size(type_die, &size);
/* Check existing nodes in dso->data_types tree */
@@ -387,61 +387,142 @@ static bool find_cu_die(struct debuginfo *di, u64 pc, Dwarf_Die *cu_die)
return false;
}
+enum type_match_result {
+ PERF_TMR_UNKNOWN = 0,
+ PERF_TMR_OK,
+ PERF_TMR_NO_TYPE,
+ PERF_TMR_NO_POINTER,
+ PERF_TMR_NO_SIZE,
+ PERF_TMR_BAD_OFFSET,
+ PERF_TMR_BAIL_OUT,
+};
+
+static const char *match_result_str(enum type_match_result tmr)
+{
+ switch (tmr) {
+ case PERF_TMR_OK:
+ return "Good!";
+ case PERF_TMR_NO_TYPE:
+ return "no type information";
+ case PERF_TMR_NO_POINTER:
+ return "no/void pointer";
+ case PERF_TMR_NO_SIZE:
+ return "type size is unknown";
+ case PERF_TMR_BAD_OFFSET:
+ return "offset bigger than size";
+ case PERF_TMR_UNKNOWN:
+ case PERF_TMR_BAIL_OUT:
+ default:
+ return "invalid state";
+ }
+}
+
+static bool is_pointer_type(Dwarf_Die *type_die)
+{
+ int tag = dwarf_tag(type_die);
+
+ return tag == DW_TAG_pointer_type || tag == DW_TAG_array_type;
+}
+
+static bool is_compound_type(Dwarf_Die *type_die)
+{
+ int tag = dwarf_tag(type_die);
+
+ return tag == DW_TAG_structure_type || tag == DW_TAG_union_type;
+}
+
+/* returns if Type B has better information than Type A */
+static bool is_better_type(Dwarf_Die *type_a, Dwarf_Die *type_b)
+{
+ Dwarf_Word size_a, size_b;
+ Dwarf_Die die_a, die_b;
+
+ /* pointer type is preferred */
+ if (is_pointer_type(type_a) != is_pointer_type(type_b))
+ return is_pointer_type(type_b);
+
+ if (is_pointer_type(type_b)) {
+ /*
+ * We want to compare the target type, but 'void *' can fail to
+ * get the target type.
+ */
+ if (die_get_real_type(type_a, &die_a) == NULL)
+ return true;
+ if (die_get_real_type(type_b, &die_b) == NULL)
+ return false;
+
+ type_a = &die_a;
+ type_b = &die_b;
+ }
+
+ /* bigger type is preferred */
+ if (dwarf_aggregate_size(type_a, &size_a) < 0 ||
+ dwarf_aggregate_size(type_b, &size_b) < 0)
+ return false;
+
+ if (size_a != size_b)
+ return size_a < size_b;
+
+ /* struct or union is preferred */
+ if (is_compound_type(type_a) != is_compound_type(type_b))
+ return is_compound_type(type_b);
+
+ /* typedef is preferred */
+ if (dwarf_tag(type_b) == DW_TAG_typedef)
+ return true;
+
+ return false;
+}
+
/* The type info will be saved in @type_die */
-static int check_variable(struct data_loc_info *dloc, Dwarf_Die *var_die,
- Dwarf_Die *type_die, int reg, int offset, bool is_fbreg)
+static enum type_match_result check_variable(struct data_loc_info *dloc,
+ Dwarf_Die *var_die,
+ Dwarf_Die *type_die, int reg,
+ int offset, bool is_fbreg)
{
Dwarf_Word size;
- bool is_pointer = true;
+ bool needs_pointer = true;
+ Dwarf_Die sized_type;
if (reg == DWARF_REG_PC)
- is_pointer = false;
+ needs_pointer = false;
else if (reg == dloc->fbreg || is_fbreg)
- is_pointer = false;
+ needs_pointer = false;
else if (arch__is(dloc->arch, "x86") && reg == X86_REG_SP)
- is_pointer = false;
+ needs_pointer = false;
/* Get the type of the variable */
- if (die_get_real_type(var_die, type_die) == NULL) {
- pr_debug_dtp("variable has no type\n");
- ann_data_stat.no_typeinfo++;
- return -1;
- }
+ if (__die_get_real_type(var_die, type_die) == NULL)
+ return PERF_TMR_NO_TYPE;
/*
* Usually it expects a pointer type for a memory access.
* Convert to a real type it points to. But global variables
* and local variables are accessed directly without a pointer.
*/
- if (is_pointer) {
- if ((dwarf_tag(type_die) != DW_TAG_pointer_type &&
- dwarf_tag(type_die) != DW_TAG_array_type) ||
- die_get_real_type(type_die, type_die) == NULL) {
- pr_debug_dtp("no pointer or no type\n");
- ann_data_stat.no_typeinfo++;
- return -1;
- }
+ if (needs_pointer) {
+ if (!is_pointer_type(type_die) ||
+ __die_get_real_type(type_die, type_die) == NULL)
+ return PERF_TMR_NO_POINTER;
}
+ if (dwarf_tag(type_die) == DW_TAG_typedef)
+ die_get_real_type(type_die, &sized_type);
+ else
+ sized_type = *type_die;
+
/* Get the size of the actual type */
- if (dwarf_aggregate_size(type_die, &size) < 0) {
- pr_debug_dtp("type size is unknown\n");
- ann_data_stat.invalid_size++;
- return -1;
- }
+ if (dwarf_aggregate_size(&sized_type, &size) < 0)
+ return PERF_TMR_NO_SIZE;
/* Minimal sanity check */
- if ((unsigned)offset >= size) {
- pr_debug_dtp("offset: %d is bigger than size: %"PRIu64"\n",
- offset, size);
- ann_data_stat.bad_offset++;
- return -1;
- }
+ if ((unsigned)offset >= size)
+ return PERF_TMR_BAD_OFFSET;
- return 0;
+ return PERF_TMR_OK;
}
-static struct type_state_stack *find_stack_state(struct type_state *state,
+struct type_state_stack *find_stack_state(struct type_state *state,
int offset)
{
struct type_state_stack *stack;
@@ -457,7 +538,7 @@ static struct type_state_stack *find_stack_state(struct type_state *state,
return NULL;
}
-static void set_stack_state(struct type_state_stack *stack, int offset, u8 kind,
+void set_stack_state(struct type_state_stack *stack, int offset, u8 kind,
Dwarf_Die *type_die)
{
int tag;
@@ -484,7 +565,7 @@ static void set_stack_state(struct type_state_stack *stack, int offset, u8 kind,
}
}
-static struct type_state_stack *findnew_stack_state(struct type_state *state,
+struct type_state_stack *findnew_stack_state(struct type_state *state,
int offset, u8 kind,
Dwarf_Die *type_die)
{
@@ -588,7 +669,7 @@ void global_var_type__tree_delete(struct rb_root *root)
}
}
-static bool get_global_var_info(struct data_loc_info *dloc, u64 addr,
+bool get_global_var_info(struct data_loc_info *dloc, u64 addr,
const char **var_name, int *var_offset)
{
struct addr_location al;
@@ -662,7 +743,7 @@ static void global_var__collect(struct data_loc_info *dloc)
}
}
-static bool get_global_var_type(Dwarf_Die *cu_die, struct data_loc_info *dloc,
+bool get_global_var_type(Dwarf_Die *cu_die, struct data_loc_info *dloc,
u64 ip, u64 var_addr, int *var_offset,
Dwarf_Die *type_die)
{
@@ -688,7 +769,7 @@ static bool get_global_var_type(Dwarf_Die *cu_die, struct data_loc_info *dloc,
/* Try to get the variable by address first */
if (die_find_variable_by_addr(cu_die, var_addr, &var_die, &offset) &&
check_variable(dloc, &var_die, type_die, DWARF_REG_PC, offset,
- /*is_fbreg=*/false) == 0) {
+ /*is_fbreg=*/false) == PERF_TMR_OK) {
var_name = dwarf_diename(&var_die);
*var_offset = offset;
goto ok;
@@ -702,7 +783,7 @@ static bool get_global_var_type(Dwarf_Die *cu_die, struct data_loc_info *dloc,
/* Try to get the name of global variable */
if (die_find_variable_at(cu_die, var_name, pc, &var_die) &&
check_variable(dloc, &var_die, type_die, DWARF_REG_PC, *var_offset,
- /*is_fbreg=*/false) == 0)
+ /*is_fbreg=*/false) == PERF_TMR_OK)
goto ok;
return false;
@@ -713,6 +794,11 @@ ok:
return true;
}
+static bool die_is_same(Dwarf_Die *die_a, Dwarf_Die *die_b)
+{
+ return (die_a->cu == die_b->cu) && (die_a->addr == die_b->addr);
+}
+
/**
* update_var_state - Update type state using given variables
* @state: type state table
@@ -744,24 +830,36 @@ static void update_var_state(struct type_state *state, struct data_loc_info *dlo
if (!dwarf_offdie(dloc->di->dbg, var->die_off, &mem_die))
continue;
- if (var->reg == DWARF_REG_FB) {
- findnew_stack_state(state, var->offset, TSR_KIND_TYPE,
- &mem_die);
+ if (var->reg == DWARF_REG_FB || var->reg == fbreg) {
+ int offset = var->offset;
+ struct type_state_stack *stack;
- pr_debug_dtp("var [%"PRIx64"] -%#x(stack)",
- insn_offset, -var->offset);
- pr_debug_type_name(&mem_die, TSR_KIND_TYPE);
- } else if (var->reg == fbreg) {
- findnew_stack_state(state, var->offset - fb_offset,
- TSR_KIND_TYPE, &mem_die);
+ if (var->reg != DWARF_REG_FB)
+ offset -= fb_offset;
+
+ stack = find_stack_state(state, offset);
+ if (stack && stack->kind == TSR_KIND_TYPE &&
+ !is_better_type(&stack->type, &mem_die))
+ continue;
+
+ findnew_stack_state(state, offset, TSR_KIND_TYPE,
+ &mem_die);
pr_debug_dtp("var [%"PRIx64"] -%#x(stack)",
- insn_offset, -var->offset + fb_offset);
+ insn_offset, -offset);
pr_debug_type_name(&mem_die, TSR_KIND_TYPE);
} else if (has_reg_type(state, var->reg) && var->offset == 0) {
struct type_state_reg *reg;
+ Dwarf_Die orig_type;
reg = &state->regs[var->reg];
+
+ if (reg->ok && reg->kind == TSR_KIND_TYPE &&
+ !is_better_type(&reg->type, &mem_die))
+ continue;
+
+ orig_type = reg->type;
+
reg->type = mem_die;
reg->kind = TSR_KIND_TYPE;
reg->ok = true;
@@ -769,383 +867,31 @@ static void update_var_state(struct type_state *state, struct data_loc_info *dlo
pr_debug_dtp("var [%"PRIx64"] reg%d",
insn_offset, var->reg);
pr_debug_type_name(&mem_die, TSR_KIND_TYPE);
- }
- }
-}
-
-static void update_insn_state_x86(struct type_state *state,
- struct data_loc_info *dloc, Dwarf_Die *cu_die,
- struct disasm_line *dl)
-{
- struct annotated_insn_loc loc;
- struct annotated_op_loc *src = &loc.ops[INSN_OP_SOURCE];
- struct annotated_op_loc *dst = &loc.ops[INSN_OP_TARGET];
- struct type_state_reg *tsr;
- Dwarf_Die type_die;
- u32 insn_offset = dl->al.offset;
- int fbreg = dloc->fbreg;
- int fboff = 0;
-
- if (annotate_get_insn_location(dloc->arch, dl, &loc) < 0)
- return;
-
- if (ins__is_call(&dl->ins)) {
- struct symbol *func = dl->ops.target.sym;
-
- if (func == NULL)
- return;
-
- /* __fentry__ will preserve all registers */
- if (!strcmp(func->name, "__fentry__"))
- return;
-
- pr_debug_dtp("call [%x] %s\n", insn_offset, func->name);
-
- /* Otherwise invalidate caller-saved registers after call */
- for (unsigned i = 0; i < ARRAY_SIZE(state->regs); i++) {
- if (state->regs[i].caller_saved)
- state->regs[i].ok = false;
- }
-
- /* Update register with the return type (if any) */
- if (die_find_func_rettype(cu_die, func->name, &type_die)) {
- tsr = &state->regs[state->ret_reg];
- tsr->type = type_die;
- tsr->kind = TSR_KIND_TYPE;
- tsr->ok = true;
-
- pr_debug_dtp("call [%x] return -> reg%d",
- insn_offset, state->ret_reg);
- pr_debug_type_name(&type_die, tsr->kind);
- }
- return;
- }
-
- if (!strncmp(dl->ins.name, "add", 3)) {
- u64 imm_value = -1ULL;
- int offset;
- const char *var_name = NULL;
- struct map_symbol *ms = dloc->ms;
- u64 ip = ms->sym->start + dl->al.offset;
-
- if (!has_reg_type(state, dst->reg1))
- return;
-
- tsr = &state->regs[dst->reg1];
-
- if (src->imm)
- imm_value = src->offset;
- else if (has_reg_type(state, src->reg1) &&
- state->regs[src->reg1].kind == TSR_KIND_CONST)
- imm_value = state->regs[src->reg1].imm_value;
- else if (src->reg1 == DWARF_REG_PC) {
- u64 var_addr = annotate_calc_pcrel(dloc->ms, ip,
- src->offset, dl);
-
- if (get_global_var_info(dloc, var_addr,
- &var_name, &offset) &&
- !strcmp(var_name, "this_cpu_off") &&
- tsr->kind == TSR_KIND_CONST) {
- tsr->kind = TSR_KIND_PERCPU_BASE;
- imm_value = tsr->imm_value;
- }
- }
- else
- return;
-
- if (tsr->kind != TSR_KIND_PERCPU_BASE)
- return;
- if (get_global_var_type(cu_die, dloc, ip, imm_value, &offset,
- &type_die) && offset == 0) {
/*
- * This is not a pointer type, but it should be treated
- * as a pointer.
+ * If this register is directly copied from another and it gets a
+ * better type, also update the type of the source register. This
+ * is usually the case of container_of() macro with offset of 0.
*/
- tsr->type = type_die;
- tsr->kind = TSR_KIND_POINTER;
- tsr->ok = true;
-
- pr_debug_dtp("add [%x] percpu %#"PRIx64" -> reg%d",
- insn_offset, imm_value, dst->reg1);
- pr_debug_type_name(&tsr->type, tsr->kind);
- }
- return;
- }
-
- if (strncmp(dl->ins.name, "mov", 3))
- return;
+ if (has_reg_type(state, reg->copied_from)) {
+ struct type_state_reg *copy_reg;
- if (dloc->fb_cfa) {
- u64 ip = dloc->ms->sym->start + dl->al.offset;
- u64 pc = map__rip_2objdump(dloc->ms->map, ip);
+ copy_reg = &state->regs[reg->copied_from];
- if (die_get_cfa(dloc->di->dbg, pc, &fbreg, &fboff) < 0)
- fbreg = -1;
- }
-
- /* Case 1. register to register or segment:offset to register transfers */
- if (!src->mem_ref && !dst->mem_ref) {
- if (!has_reg_type(state, dst->reg1))
- return;
-
- tsr = &state->regs[dst->reg1];
- if (dso__kernel(map__dso(dloc->ms->map)) &&
- src->segment == INSN_SEG_X86_GS && src->imm) {
- u64 ip = dloc->ms->sym->start + dl->al.offset;
- u64 var_addr;
- int offset;
-
- /*
- * In kernel, %gs points to a per-cpu region for the
- * current CPU. Access with a constant offset should
- * be treated as a global variable access.
- */
- var_addr = src->offset;
-
- if (var_addr == 40) {
- tsr->kind = TSR_KIND_CANARY;
- tsr->ok = true;
-
- pr_debug_dtp("mov [%x] stack canary -> reg%d\n",
- insn_offset, dst->reg1);
- return;
- }
-
- if (!get_global_var_type(cu_die, dloc, ip, var_addr,
- &offset, &type_die) ||
- !die_get_member_type(&type_die, offset, &type_die)) {
- tsr->ok = false;
- return;
- }
+ /* TODO: check if type is compatible or embedded */
+ if (!copy_reg->ok || (copy_reg->kind != TSR_KIND_TYPE) ||
+ !die_is_same(&copy_reg->type, &orig_type) ||
+ !is_better_type(&copy_reg->type, &mem_die))
+ continue;
- tsr->type = type_die;
- tsr->kind = TSR_KIND_TYPE;
- tsr->ok = true;
+ copy_reg->type = mem_die;
- pr_debug_dtp("mov [%x] this-cpu addr=%#"PRIx64" -> reg%d",
- insn_offset, var_addr, dst->reg1);
- pr_debug_type_name(&tsr->type, tsr->kind);
- return;
- }
-
- if (src->imm) {
- tsr->kind = TSR_KIND_CONST;
- tsr->imm_value = src->offset;
- tsr->ok = true;
-
- pr_debug_dtp("mov [%x] imm=%#x -> reg%d\n",
- insn_offset, tsr->imm_value, dst->reg1);
- return;
- }
-
- if (!has_reg_type(state, src->reg1) ||
- !state->regs[src->reg1].ok) {
- tsr->ok = false;
- return;
- }
-
- tsr->type = state->regs[src->reg1].type;
- tsr->kind = state->regs[src->reg1].kind;
- tsr->ok = true;
-
- pr_debug_dtp("mov [%x] reg%d -> reg%d",
- insn_offset, src->reg1, dst->reg1);
- pr_debug_type_name(&tsr->type, tsr->kind);
- }
- /* Case 2. memory to register transers */
- if (src->mem_ref && !dst->mem_ref) {
- int sreg = src->reg1;
-
- if (!has_reg_type(state, dst->reg1))
- return;
-
- tsr = &state->regs[dst->reg1];
-
-retry:
- /* Check stack variables with offset */
- if (sreg == fbreg) {
- struct type_state_stack *stack;
- int offset = src->offset - fboff;
-
- stack = find_stack_state(state, offset);
- if (stack == NULL) {
- tsr->ok = false;
- return;
- } else if (!stack->compound) {
- tsr->type = stack->type;
- tsr->kind = stack->kind;
- tsr->ok = true;
- } else if (die_get_member_type(&stack->type,
- offset - stack->offset,
- &type_die)) {
- tsr->type = type_die;
- tsr->kind = TSR_KIND_TYPE;
- tsr->ok = true;
- } else {
- tsr->ok = false;
- return;
- }
-
- pr_debug_dtp("mov [%x] -%#x(stack) -> reg%d",
- insn_offset, -offset, dst->reg1);
- pr_debug_type_name(&tsr->type, tsr->kind);
- }
- /* And then dereference the pointer if it has one */
- else if (has_reg_type(state, sreg) && state->regs[sreg].ok &&
- state->regs[sreg].kind == TSR_KIND_TYPE &&
- die_deref_ptr_type(&state->regs[sreg].type,
- src->offset, &type_die)) {
- tsr->type = type_die;
- tsr->kind = TSR_KIND_TYPE;
- tsr->ok = true;
-
- pr_debug_dtp("mov [%x] %#x(reg%d) -> reg%d",
- insn_offset, src->offset, sreg, dst->reg1);
- pr_debug_type_name(&tsr->type, tsr->kind);
- }
- /* Or check if it's a global variable */
- else if (sreg == DWARF_REG_PC) {
- struct map_symbol *ms = dloc->ms;
- u64 ip = ms->sym->start + dl->al.offset;
- u64 addr;
- int offset;
-
- addr = annotate_calc_pcrel(ms, ip, src->offset, dl);
-
- if (!get_global_var_type(cu_die, dloc, ip, addr, &offset,
- &type_die) ||
- !die_get_member_type(&type_die, offset, &type_die)) {
- tsr->ok = false;
- return;
- }
-
- tsr->type = type_die;
- tsr->kind = TSR_KIND_TYPE;
- tsr->ok = true;
-
- pr_debug_dtp("mov [%x] global addr=%"PRIx64" -> reg%d",
- insn_offset, addr, dst->reg1);
- pr_debug_type_name(&type_die, tsr->kind);
- }
- /* And check percpu access with base register */
- else if (has_reg_type(state, sreg) &&
- state->regs[sreg].kind == TSR_KIND_PERCPU_BASE) {
- u64 ip = dloc->ms->sym->start + dl->al.offset;
- u64 var_addr = src->offset;
- int offset;
-
- if (src->multi_regs) {
- int reg2 = (sreg == src->reg1) ? src->reg2 : src->reg1;
-
- if (has_reg_type(state, reg2) && state->regs[reg2].ok &&
- state->regs[reg2].kind == TSR_KIND_CONST)
- var_addr += state->regs[reg2].imm_value;
- }
-
- /*
- * In kernel, %gs points to a per-cpu region for the
- * current CPU. Access with a constant offset should
- * be treated as a global variable access.
- */
- if (get_global_var_type(cu_die, dloc, ip, var_addr,
- &offset, &type_die) &&
- die_get_member_type(&type_die, offset, &type_die)) {
- tsr->type = type_die;
- tsr->kind = TSR_KIND_TYPE;
- tsr->ok = true;
-
- if (src->multi_regs) {
- pr_debug_dtp("mov [%x] percpu %#x(reg%d,reg%d) -> reg%d",
- insn_offset, src->offset, src->reg1,
- src->reg2, dst->reg1);
- } else {
- pr_debug_dtp("mov [%x] percpu %#x(reg%d) -> reg%d",
- insn_offset, src->offset, sreg, dst->reg1);
- }
- pr_debug_type_name(&tsr->type, tsr->kind);
- } else {
- tsr->ok = false;
- }
- }
- /* And then dereference the calculated pointer if it has one */
- else if (has_reg_type(state, sreg) && state->regs[sreg].ok &&
- state->regs[sreg].kind == TSR_KIND_POINTER &&
- die_get_member_type(&state->regs[sreg].type,
- src->offset, &type_die)) {
- tsr->type = type_die;
- tsr->kind = TSR_KIND_TYPE;
- tsr->ok = true;
-
- pr_debug_dtp("mov [%x] pointer %#x(reg%d) -> reg%d",
- insn_offset, src->offset, sreg, dst->reg1);
- pr_debug_type_name(&tsr->type, tsr->kind);
- }
- /* Or try another register if any */
- else if (src->multi_regs && sreg == src->reg1 &&
- src->reg1 != src->reg2) {
- sreg = src->reg2;
- goto retry;
- }
- else {
- int offset;
- const char *var_name = NULL;
-
- /* it might be per-cpu variable (in kernel) access */
- if (src->offset < 0) {
- if (get_global_var_info(dloc, (s64)src->offset,
- &var_name, &offset) &&
- !strcmp(var_name, "__per_cpu_offset")) {
- tsr->kind = TSR_KIND_PERCPU_BASE;
-
- pr_debug_dtp("mov [%x] percpu base reg%d\n",
- insn_offset, dst->reg1);
- }
- }
-
- tsr->ok = false;
- }
- }
- /* Case 3. register to memory transfers */
- if (!src->mem_ref && dst->mem_ref) {
- if (!has_reg_type(state, src->reg1) ||
- !state->regs[src->reg1].ok)
- return;
-
- /* Check stack variables with offset */
- if (dst->reg1 == fbreg) {
- struct type_state_stack *stack;
- int offset = dst->offset - fboff;
-
- tsr = &state->regs[src->reg1];
-
- stack = find_stack_state(state, offset);
- if (stack) {
- /*
- * The source register is likely to hold a type
- * of member if it's a compound type. Do not
- * update the stack variable type since we can
- * get the member type later by using the
- * die_get_member_type().
- */
- if (!stack->compound)
- set_stack_state(stack, offset, tsr->kind,
- &tsr->type);
- } else {
- findnew_stack_state(state, offset, tsr->kind,
- &tsr->type);
+ pr_debug_dtp("var [%"PRIx64"] copyback reg%d",
+ insn_offset, reg->copied_from);
+ pr_debug_type_name(&mem_die, TSR_KIND_TYPE);
}
-
- pr_debug_dtp("mov [%x] reg%d -> -%#x(stack)",
- insn_offset, src->reg1, -offset);
- pr_debug_type_name(&tsr->type, tsr->kind);
}
- /*
- * Ignore other transfers since it'd set a value in a struct
- * and won't change the type.
- */
}
- /* Case 4. memory to memory transfers (not handled for now) */
}
/**
@@ -1166,8 +912,8 @@ retry:
static void update_insn_state(struct type_state *state, struct data_loc_info *dloc,
Dwarf_Die *cu_die, struct disasm_line *dl)
{
- if (arch__is(dloc->arch, "x86"))
- update_insn_state_x86(state, dloc, cu_die, dl);
+ if (dloc->arch->update_insn_state)
+ dloc->arch->update_insn_state(state, dloc, cu_die, dl);
}
/*
@@ -1254,75 +1000,164 @@ static void setup_stack_canary(struct data_loc_info *dloc)
/*
* It's at the target address, check if it has a matching type.
- * It returns 1 if found, 0 if not or -1 if not found but no need to
- * repeat the search. The last case is for per-cpu variables which
+ * It returns PERF_TMR_BAIL_OUT when it looks up per-cpu variables which
* are similar to global variables and no additional info is needed.
*/
-static int check_matching_type(struct type_state *state,
- struct data_loc_info *dloc,
- Dwarf_Die *cu_die, Dwarf_Die *type_die)
+static enum type_match_result check_matching_type(struct type_state *state,
+ struct data_loc_info *dloc,
+ Dwarf_Die *cu_die,
+ struct disasm_line *dl,
+ Dwarf_Die *type_die)
{
Dwarf_Word size;
- u32 insn_offset = dloc->ip - dloc->ms->sym->start;
+ u32 insn_offset = dl->al.offset;
int reg = dloc->op->reg1;
+ int offset = dloc->op->offset;
+ const char *offset_sign = "";
+ bool retry = true;
+
+ if (offset < 0) {
+ offset = -offset;
+ offset_sign = "-";
+ }
- pr_debug_dtp("chk [%x] reg%d offset=%#x ok=%d kind=%d",
- insn_offset, reg, dloc->op->offset,
+again:
+ pr_debug_dtp("chk [%x] reg%d offset=%s%#x ok=%d kind=%d ",
+ insn_offset, reg, offset_sign, offset,
state->regs[reg].ok, state->regs[reg].kind);
- if (state->regs[reg].ok && state->regs[reg].kind == TSR_KIND_TYPE) {
- int tag = dwarf_tag(&state->regs[reg].type);
+ if (!state->regs[reg].ok)
+ goto check_non_register;
+
+ if (state->regs[reg].kind == TSR_KIND_TYPE) {
+ Dwarf_Die sized_type;
+ struct strbuf sb;
+
+ strbuf_init(&sb, 32);
+ die_get_typename_from_type(&state->regs[reg].type, &sb);
+ pr_debug_dtp("(%s)", sb.buf);
+ strbuf_release(&sb);
/*
* Normal registers should hold a pointer (or array) to
* dereference a memory location.
*/
- if (tag != DW_TAG_pointer_type && tag != DW_TAG_array_type) {
+ if (!is_pointer_type(&state->regs[reg].type)) {
if (dloc->op->offset < 0 && reg != state->stack_reg)
goto check_kernel;
- pr_debug_dtp("\n");
- return -1;
+ return PERF_TMR_NO_POINTER;
}
- pr_debug_dtp("\n");
-
/* Remove the pointer and get the target type */
- if (die_get_real_type(&state->regs[reg].type, type_die) == NULL)
- return -1;
+ if (__die_get_real_type(&state->regs[reg].type, type_die) == NULL)
+ return PERF_TMR_NO_POINTER;
+
+ dloc->type_offset = dloc->op->offset;
+
+ if (dwarf_tag(type_die) == DW_TAG_typedef)
+ die_get_real_type(type_die, &sized_type);
+ else
+ sized_type = *type_die;
+
+ /* Get the size of the actual type */
+ if (dwarf_aggregate_size(&sized_type, &size) < 0 ||
+ (unsigned)dloc->type_offset >= size)
+ return PERF_TMR_BAD_OFFSET;
+
+ return PERF_TMR_OK;
+ }
+
+ if (state->regs[reg].kind == TSR_KIND_POINTER) {
+ pr_debug_dtp("percpu ptr");
+
+ /*
+ * It's actaully pointer but the address was calculated using
+ * some arithmetic. So it points to the actual type already.
+ */
+ *type_die = state->regs[reg].type;
dloc->type_offset = dloc->op->offset;
/* Get the size of the actual type */
if (dwarf_aggregate_size(type_die, &size) < 0 ||
(unsigned)dloc->type_offset >= size)
- return -1;
+ return PERF_TMR_BAIL_OUT;
- return 1;
+ return PERF_TMR_OK;
}
+ if (state->regs[reg].kind == TSR_KIND_CANARY) {
+ pr_debug_dtp("stack canary");
+
+ /*
+ * This is a saved value of the stack canary which will be handled
+ * in the outer logic when it returns failure here. Pretend it's
+ * from the stack canary directly.
+ */
+ setup_stack_canary(dloc);
+
+ return PERF_TMR_BAIL_OUT;
+ }
+
+ if (state->regs[reg].kind == TSR_KIND_PERCPU_BASE) {
+ u64 var_addr = dloc->op->offset;
+ int var_offset;
+
+ pr_debug_dtp("percpu var");
+
+ if (dloc->op->multi_regs) {
+ int reg2 = dloc->op->reg2;
+
+ if (dloc->op->reg2 == reg)
+ reg2 = dloc->op->reg1;
+
+ if (has_reg_type(state, reg2) && state->regs[reg2].ok &&
+ state->regs[reg2].kind == TSR_KIND_CONST)
+ var_addr += state->regs[reg2].imm_value;
+ }
+
+ if (get_global_var_type(cu_die, dloc, dloc->ip, var_addr,
+ &var_offset, type_die)) {
+ dloc->type_offset = var_offset;
+ return PERF_TMR_OK;
+ }
+ /* No need to retry per-cpu (global) variables */
+ return PERF_TMR_BAIL_OUT;
+ }
+
+check_non_register:
if (reg == dloc->fbreg) {
struct type_state_stack *stack;
- pr_debug_dtp(" fbreg\n");
+ pr_debug_dtp("fbreg");
stack = find_stack_state(state, dloc->type_offset);
- if (stack == NULL)
- return 0;
+ if (stack == NULL) {
+ if (retry) {
+ pr_debug_dtp(" : retry\n");
+ retry = false;
+
+ /* update type info it's the first store to the stack */
+ update_insn_state(state, dloc, cu_die, dl);
+ goto again;
+ }
+ return PERF_TMR_NO_TYPE;
+ }
if (stack->kind == TSR_KIND_CANARY) {
setup_stack_canary(dloc);
- return -1;
+ return PERF_TMR_BAIL_OUT;
}
if (stack->kind != TSR_KIND_TYPE)
- return 0;
+ return PERF_TMR_NO_TYPE;
*type_die = stack->type;
/* Update the type offset from the start of slot */
dloc->type_offset -= stack->offset;
- return 1;
+ return PERF_TMR_OK;
}
if (dloc->fb_cfa) {
@@ -1330,109 +1165,59 @@ static int check_matching_type(struct type_state *state,
u64 pc = map__rip_2objdump(dloc->ms->map, dloc->ip);
int fbreg, fboff;
- pr_debug_dtp(" cfa\n");
+ pr_debug_dtp("cfa");
if (die_get_cfa(dloc->di->dbg, pc, &fbreg, &fboff) < 0)
fbreg = -1;
if (reg != fbreg)
- return 0;
+ return PERF_TMR_NO_TYPE;
stack = find_stack_state(state, dloc->type_offset - fboff);
- if (stack == NULL)
- return 0;
+ if (stack == NULL) {
+ if (retry) {
+ pr_debug_dtp(" : retry\n");
+ retry = false;
+
+ /* update type info it's the first store to the stack */
+ update_insn_state(state, dloc, cu_die, dl);
+ goto again;
+ }
+ return PERF_TMR_NO_TYPE;
+ }
if (stack->kind == TSR_KIND_CANARY) {
setup_stack_canary(dloc);
- return -1;
+ return PERF_TMR_BAIL_OUT;
}
if (stack->kind != TSR_KIND_TYPE)
- return 0;
+ return PERF_TMR_NO_TYPE;
*type_die = stack->type;
/* Update the type offset from the start of slot */
dloc->type_offset -= fboff + stack->offset;
- return 1;
- }
-
- if (state->regs[reg].kind == TSR_KIND_PERCPU_BASE) {
- u64 var_addr = dloc->op->offset;
- int var_offset;
-
- pr_debug_dtp(" percpu var\n");
-
- if (dloc->op->multi_regs) {
- int reg2 = dloc->op->reg2;
-
- if (dloc->op->reg2 == reg)
- reg2 = dloc->op->reg1;
-
- if (has_reg_type(state, reg2) && state->regs[reg2].ok &&
- state->regs[reg2].kind == TSR_KIND_CONST)
- var_addr += state->regs[reg2].imm_value;
- }
-
- if (get_global_var_type(cu_die, dloc, dloc->ip, var_addr,
- &var_offset, type_die)) {
- dloc->type_offset = var_offset;
- return 1;
- }
- /* No need to retry per-cpu (global) variables */
- return -1;
- }
-
- if (state->regs[reg].ok && state->regs[reg].kind == TSR_KIND_POINTER) {
- pr_debug_dtp(" percpu ptr\n");
-
- /*
- * It's actaully pointer but the address was calculated using
- * some arithmetic. So it points to the actual type already.
- */
- *type_die = state->regs[reg].type;
-
- dloc->type_offset = dloc->op->offset;
-
- /* Get the size of the actual type */
- if (dwarf_aggregate_size(type_die, &size) < 0 ||
- (unsigned)dloc->type_offset >= size)
- return -1;
-
- return 1;
- }
-
- if (state->regs[reg].ok && state->regs[reg].kind == TSR_KIND_CANARY) {
- pr_debug_dtp(" stack canary\n");
-
- /*
- * This is a saved value of the stack canary which will be handled
- * in the outer logic when it returns failure here. Pretend it's
- * from the stack canary directly.
- */
- setup_stack_canary(dloc);
-
- return -1;
+ return PERF_TMR_OK;
}
check_kernel:
if (dso__kernel(map__dso(dloc->ms->map))) {
u64 addr;
- int offset;
/* Direct this-cpu access like "%gs:0x34740" */
if (dloc->op->segment == INSN_SEG_X86_GS && dloc->op->imm &&
arch__is(dloc->arch, "x86")) {
- pr_debug_dtp(" this-cpu var\n");
+ pr_debug_dtp("this-cpu var");
addr = dloc->op->offset;
if (get_global_var_type(cu_die, dloc, dloc->ip, addr,
&offset, type_die)) {
dloc->type_offset = offset;
- return 1;
+ return PERF_TMR_OK;
}
- return -1;
+ return PERF_TMR_BAIL_OUT;
}
/* Access to global variable like "-0x7dcf0500(,%rdx,8)" */
@@ -1441,31 +1226,30 @@ check_kernel:
if (get_global_var_type(cu_die, dloc, dloc->ip, addr,
&offset, type_die)) {
- pr_debug_dtp(" global var\n");
+ pr_debug_dtp("global var");
dloc->type_offset = offset;
- return 1;
+ return PERF_TMR_OK;
}
- pr_debug_dtp(" negative offset\n");
- return -1;
+ return PERF_TMR_BAIL_OUT;
}
}
- pr_debug_dtp("\n");
- return 0;
+ return PERF_TMR_UNKNOWN;
}
/* Iterate instructions in basic blocks and update type table */
-static int find_data_type_insn(struct data_loc_info *dloc,
- struct list_head *basic_blocks,
- struct die_var_type *var_types,
- Dwarf_Die *cu_die, Dwarf_Die *type_die)
+static enum type_match_result find_data_type_insn(struct data_loc_info *dloc,
+ struct list_head *basic_blocks,
+ struct die_var_type *var_types,
+ Dwarf_Die *cu_die,
+ Dwarf_Die *type_die)
{
struct type_state state;
struct symbol *sym = dloc->ms->sym;
struct annotation *notes = symbol__annotation(sym);
struct annotated_basic_block *bb;
- int ret = 0;
+ enum type_match_result ret = PERF_TMR_UNKNOWN;
init_type_state(&state, dloc->arch);
@@ -1490,7 +1274,8 @@ static int find_data_type_insn(struct data_loc_info *dloc,
if (this_ip == dloc->ip) {
ret = check_matching_type(&state, dloc,
- cu_die, type_die);
+ cu_die, dl, type_die);
+ pr_debug_dtp(" : %s\n", match_result_str(ret));
goto out;
}
@@ -1506,34 +1291,43 @@ out:
return ret;
}
+static int arch_supports_insn_tracking(struct data_loc_info *dloc)
+{
+ if ((arch__is(dloc->arch, "x86")) || (arch__is(dloc->arch, "powerpc")))
+ return 1;
+ return 0;
+}
+
/*
* Construct a list of basic blocks for each scope with variables and try to find
* the data type by updating a type state table through instructions.
*/
-static int find_data_type_block(struct data_loc_info *dloc,
- Dwarf_Die *cu_die, Dwarf_Die *scopes,
- int nr_scopes, Dwarf_Die *type_die)
+static enum type_match_result find_data_type_block(struct data_loc_info *dloc,
+ Dwarf_Die *cu_die,
+ Dwarf_Die *scopes,
+ int nr_scopes,
+ Dwarf_Die *type_die)
{
LIST_HEAD(basic_blocks);
struct die_var_type *var_types = NULL;
u64 src_ip, dst_ip, prev_dst_ip;
- int ret = -1;
+ enum type_match_result ret = PERF_TMR_UNKNOWN;
/* TODO: other architecture support */
- if (!arch__is(dloc->arch, "x86"))
- return -1;
+ if (!arch_supports_insn_tracking(dloc))
+ return PERF_TMR_BAIL_OUT;
prev_dst_ip = dst_ip = dloc->ip;
for (int i = nr_scopes - 1; i >= 0; i--) {
Dwarf_Addr base, start, end;
LIST_HEAD(this_blocks);
- int found;
if (dwarf_ranges(&scopes[i], 0, &base, &start, &end) < 0)
break;
- pr_debug_dtp("scope: [%d/%d] (die:%lx)\n",
- i + 1, nr_scopes, (long)dwarf_dieoffset(&scopes[i]));
+ pr_debug_dtp("scope: [%d/%d] ", i + 1, nr_scopes);
+ pr_debug_scope(&scopes[i]);
+
src_ip = map__objdump_2rip(dloc->ms->map, start);
again:
@@ -1558,10 +1352,17 @@ again:
fixup_var_address(var_types, start);
/* Find from start of this scope to the target instruction */
- found = find_data_type_insn(dloc, &basic_blocks, var_types,
+ ret = find_data_type_insn(dloc, &basic_blocks, var_types,
cu_die, type_die);
- if (found > 0) {
+ if (ret == PERF_TMR_OK) {
char buf[64];
+ int offset = dloc->op->offset;
+ const char *offset_sign = "";
+
+ if (offset < 0) {
+ offset = -offset;
+ offset_sign = "-";
+ }
if (dloc->op->multi_regs)
snprintf(buf, sizeof(buf), "reg%d, reg%d",
@@ -1569,14 +1370,12 @@ again:
else
snprintf(buf, sizeof(buf), "reg%d", dloc->op->reg1);
- pr_debug_dtp("found by insn track: %#x(%s) type-offset=%#x\n",
- dloc->op->offset, buf, dloc->type_offset);
- pr_debug_type_name(type_die, TSR_KIND_TYPE);
- ret = 0;
+ pr_debug_dtp("found by insn track: %s%#x(%s) type-offset=%#x\n",
+ offset_sign, offset, buf, dloc->type_offset);
break;
}
- if (found < 0)
+ if (ret == PERF_TMR_BAIL_OUT)
break;
/* Go up to the next scope and find blocks to the start */
@@ -1595,14 +1394,17 @@ static int find_data_type_die(struct data_loc_info *dloc, Dwarf_Die *type_die)
struct annotated_op_loc *loc = dloc->op;
Dwarf_Die cu_die, var_die;
Dwarf_Die *scopes = NULL;
- int reg, offset;
+ int reg, offset = loc->offset;
int ret = -1;
int i, nr_scopes;
int fbreg = -1;
int fb_offset = 0;
bool is_fbreg = false;
+ bool found = false;
u64 pc;
char buf[64];
+ enum type_match_result result = PERF_TMR_UNKNOWN;
+ const char *offset_sign = "";
if (dloc->op->multi_regs)
snprintf(buf, sizeof(buf), "reg%d, reg%d", dloc->op->reg1, dloc->op->reg2);
@@ -1611,10 +1413,15 @@ static int find_data_type_die(struct data_loc_info *dloc, Dwarf_Die *type_die)
else
snprintf(buf, sizeof(buf), "reg%d", dloc->op->reg1);
+ if (offset < 0) {
+ offset = -offset;
+ offset_sign = "-";
+ }
+
pr_debug_dtp("-----------------------------------------------------------\n");
- pr_debug_dtp("find data type for %#x(%s) at %s+%#"PRIx64"\n",
- dloc->op->offset, buf, dloc->ms->sym->name,
- dloc->ip - dloc->ms->sym->start);
+ pr_debug_dtp("find data type for %s%#x(%s) at %s+%#"PRIx64"\n",
+ offset_sign, offset, buf,
+ dloc->ms->sym->name, dloc->ip - dloc->ms->sym->start);
/*
* IP is a relative instruction address from the start of the map, as
@@ -1644,7 +1451,7 @@ static int find_data_type_die(struct data_loc_info *dloc, Dwarf_Die *type_die)
pr_debug_dtp("found by addr=%#"PRIx64" type_offset=%#x\n",
dloc->var_addr, offset);
pr_debug_type_name(type_die, TSR_KIND_TYPE);
- ret = 0;
+ found = true;
goto out;
}
}
@@ -1685,65 +1492,95 @@ retry:
/* Search from the inner-most scope to the outer */
for (i = nr_scopes - 1; i >= 0; i--) {
+ Dwarf_Die mem_die;
+ int type_offset = offset;
+
if (reg == DWARF_REG_PC) {
if (!die_find_variable_by_addr(&scopes[i], dloc->var_addr,
- &var_die, &offset))
+ &var_die, &type_offset))
continue;
} else {
/* Look up variables/parameters in this scope */
if (!die_find_variable_by_reg(&scopes[i], pc, reg,
- &offset, is_fbreg, &var_die))
+ &type_offset, is_fbreg, &var_die))
continue;
}
+ pr_debug_dtp("found \"%s\" (die: %#lx) in scope=%d/%d (die: %#lx) ",
+ dwarf_diename(&var_die), (long)dwarf_dieoffset(&var_die),
+ i+1, nr_scopes, (long)dwarf_dieoffset(&scopes[i]));
+
/* Found a variable, see if it's correct */
- ret = check_variable(dloc, &var_die, type_die, reg, offset, is_fbreg);
- if (ret == 0) {
- pr_debug_dtp("found \"%s\" in scope=%d/%d (die: %#lx) ",
- dwarf_diename(&var_die), i+1, nr_scopes,
- (long)dwarf_dieoffset(&scopes[i]));
+ result = check_variable(dloc, &var_die, &mem_die, reg, type_offset, is_fbreg);
+ if (result == PERF_TMR_OK) {
if (reg == DWARF_REG_PC) {
pr_debug_dtp("addr=%#"PRIx64" type_offset=%#x\n",
- dloc->var_addr, offset);
+ dloc->var_addr, type_offset);
} else if (reg == DWARF_REG_FB || is_fbreg) {
pr_debug_dtp("stack_offset=%#x type_offset=%#x\n",
- fb_offset, offset);
+ fb_offset, type_offset);
} else {
- pr_debug_dtp("type_offset=%#x\n", offset);
+ pr_debug_dtp("type_offset=%#x\n", type_offset);
+ }
+
+ if (!found || is_better_type(type_die, &mem_die)) {
+ *type_die = mem_die;
+ dloc->type_offset = type_offset;
+ found = true;
}
- pr_debug_location(&var_die, pc, reg);
- pr_debug_type_name(type_die, TSR_KIND_TYPE);
} else {
- pr_debug_dtp("check variable \"%s\" failed (die: %#lx)\n",
- dwarf_diename(&var_die),
- (long)dwarf_dieoffset(&var_die));
- pr_debug_location(&var_die, pc, reg);
- pr_debug_type_name(type_die, TSR_KIND_TYPE);
+ pr_debug_dtp("failed: %s\n", match_result_str(result));
}
- dloc->type_offset = offset;
- goto out;
+
+ pr_debug_location(&var_die, pc, reg);
+ pr_debug_type_name(&mem_die, TSR_KIND_TYPE);
}
- if (loc->multi_regs && reg == loc->reg1 && loc->reg1 != loc->reg2) {
+ if (!found && loc->multi_regs && reg == loc->reg1 && loc->reg1 != loc->reg2) {
reg = loc->reg2;
goto retry;
}
- if (reg != DWARF_REG_PC) {
- ret = find_data_type_block(dloc, &cu_die, scopes,
- nr_scopes, type_die);
- if (ret == 0) {
+ if (!found && reg != DWARF_REG_PC) {
+ result = find_data_type_block(dloc, &cu_die, scopes,
+ nr_scopes, type_die);
+ if (result == PERF_TMR_OK) {
ann_data_stat.insn_track++;
- goto out;
+ found = true;
}
}
- if (ret < 0) {
- pr_debug_dtp("no variable found\n");
- ann_data_stat.no_var++;
+out:
+ pr_debug_dtp("final result: ");
+ if (found) {
+ pr_debug_type_name(type_die, TSR_KIND_TYPE);
+ ret = 0;
+ } else {
+ switch (result) {
+ case PERF_TMR_NO_TYPE:
+ case PERF_TMR_NO_POINTER:
+ pr_debug_dtp("%s\n", match_result_str(result));
+ ann_data_stat.no_typeinfo++;
+ break;
+ case PERF_TMR_NO_SIZE:
+ pr_debug_dtp("%s\n", match_result_str(result));
+ ann_data_stat.invalid_size++;
+ break;
+ case PERF_TMR_BAD_OFFSET:
+ pr_debug_dtp("%s\n", match_result_str(result));
+ ann_data_stat.bad_offset++;
+ break;
+ case PERF_TMR_UNKNOWN:
+ case PERF_TMR_BAIL_OUT:
+ case PERF_TMR_OK: /* should not reach here */
+ default:
+ pr_debug_dtp("no variable found\n");
+ ann_data_stat.no_var++;
+ break;
+ }
+ ret = -1;
}
-out:
free(scopes);
return ret;
}
@@ -1764,16 +1601,9 @@ out:
*/
struct annotated_data_type *find_data_type(struct data_loc_info *dloc)
{
- struct annotated_data_type *result = NULL;
struct dso *dso = map__dso(dloc->ms->map);
Dwarf_Die type_die;
- dloc->di = debuginfo__new(dso__long_name(dso));
- if (dloc->di == NULL) {
- pr_debug_dtp("cannot get the debug info\n");
- return NULL;
- }
-
/*
* The type offset is the same as instruction offset by default.
* But when finding a global variable, the offset won't be valid.
@@ -1783,13 +1613,9 @@ struct annotated_data_type *find_data_type(struct data_loc_info *dloc)
dloc->fbreg = -1;
if (find_data_type_die(dloc, &type_die) < 0)
- goto out;
-
- result = dso__findnew_data_type(dso, &type_die);
+ return NULL;
-out:
- debuginfo__delete(dloc->di);
- return result;
+ return dso__findnew_data_type(dso, &type_die);
}
static int alloc_data_type_histograms(struct annotated_data_type *adt, int nr_entries)
@@ -1911,10 +1737,15 @@ static void print_annotated_data_header(struct hist_entry *he, struct evsel *evs
struct evsel *pos;
int i = 0;
- for_each_group_evsel(pos, evsel)
- printf(" event[%d] = %s\n", i++, pos->name);
+ nr_members = 0;
+ for_each_group_evsel(pos, evsel) {
+ if (symbol_conf.skip_empty &&
+ evsel__hists(pos)->stats.nr_samples == 0)
+ continue;
- nr_members = evsel->core.nr_members;
+ printf(" event[%d] = %s\n", i++, pos->name);
+ nr_members++;
+ }
}
if (symbol_conf.show_total_period) {
@@ -1949,34 +1780,29 @@ static void print_annotated_data_type(struct annotated_data_type *mem_type,
{
struct annotated_member *child;
struct type_hist *h = mem_type->histograms[evsel->core.idx];
- int i, nr_events = 1, samples = 0;
+ int i, nr_events = 0, samples = 0;
u64 period = 0;
int width = symbol_conf.show_total_period ? 11 : 7;
+ struct evsel *pos;
- for (i = 0; i < member->size; i++) {
- samples += h->addr[member->offset + i].nr_samples;
- period += h->addr[member->offset + i].period;
- }
- print_annotated_data_value(h, period, samples);
+ for_each_group_evsel(pos, evsel) {
+ h = mem_type->histograms[pos->core.idx];
- if (evsel__is_group_event(evsel)) {
- struct evsel *pos;
-
- for_each_group_member(pos, evsel) {
- h = mem_type->histograms[pos->core.idx];
+ if (symbol_conf.skip_empty &&
+ evsel__hists(pos)->stats.nr_samples == 0)
+ continue;
- samples = 0;
- period = 0;
- for (i = 0; i < member->size; i++) {
- samples += h->addr[member->offset + i].nr_samples;
- period += h->addr[member->offset + i].period;
- }
- print_annotated_data_value(h, period, samples);
+ samples = 0;
+ period = 0;
+ for (i = 0; i < member->size; i++) {
+ samples += h->addr[member->offset + i].nr_samples;
+ period += h->addr[member->offset + i].period;
}
- nr_events = evsel->core.nr_members;
+ print_annotated_data_value(h, period, samples);
+ nr_events++;
}
- printf(" %10d %10d %*s%s\t%s",
+ printf(" %#10x %#10x %*s%s\t%s",
member->offset, member->size, indent, "", member->type_name,
member->var_name ?: "");
diff --git a/tools/perf/util/annotate-data.h b/tools/perf/util/annotate-data.h
index 0a57d9f5ee78..8ac0fd94a0ba 100644
--- a/tools/perf/util/annotate-data.h
+++ b/tools/perf/util/annotate-data.h
@@ -6,6 +6,12 @@
#include <linux/compiler.h>
#include <linux/rbtree.h>
#include <linux/types.h>
+#include "dwarf-regs.h"
+#include "annotate.h"
+
+#ifdef HAVE_DWARF_SUPPORT
+#include "debuginfo.h"
+#endif
struct annotated_op_loc;
struct debuginfo;
@@ -15,6 +21,23 @@ struct hist_entry;
struct map_symbol;
struct thread;
+#define pr_debug_dtp(fmt, ...) \
+do { \
+ if (debug_type_profile) \
+ pr_info(fmt, ##__VA_ARGS__); \
+ else \
+ pr_debug3(fmt, ##__VA_ARGS__); \
+} while (0)
+
+enum type_state_kind {
+ TSR_KIND_INVALID = 0,
+ TSR_KIND_TYPE,
+ TSR_KIND_PERCPU_BASE,
+ TSR_KIND_CONST,
+ TSR_KIND_POINTER,
+ TSR_KIND_CANARY,
+};
+
/**
* struct annotated_member - Type of member field
* @node: List entry in the parent list
@@ -100,9 +123,9 @@ struct data_loc_info {
u64 var_addr;
u8 cpumode;
struct annotated_op_loc *op;
+ struct debuginfo *di;
/* These are used internally */
- struct debuginfo *di;
int fbreg;
bool fb_cfa;
@@ -143,6 +166,52 @@ struct annotated_data_stat {
extern struct annotated_data_stat ann_data_stat;
#ifdef HAVE_DWARF_SUPPORT
+/*
+ * Type information in a register, valid when @ok is true.
+ * The @caller_saved registers are invalidated after a function call.
+ */
+struct type_state_reg {
+ Dwarf_Die type;
+ u32 imm_value;
+ bool ok;
+ bool caller_saved;
+ u8 kind;
+ u8 copied_from;
+};
+
+/* Type information in a stack location, dynamically allocated */
+struct type_state_stack {
+ struct list_head list;
+ Dwarf_Die type;
+ int offset;
+ int size;
+ bool compound;
+ u8 kind;
+};
+
+/* FIXME: This should be arch-dependent */
+#ifdef __powerpc__
+#define TYPE_STATE_MAX_REGS 32
+#else
+#define TYPE_STATE_MAX_REGS 16
+#endif
+
+/*
+ * State table to maintain type info in each register and stack location.
+ * It'll be updated when new variable is allocated or type info is moved
+ * to a new location (register or stack). As it'd be used with the
+ * shortest path of basic blocks, it only maintains a single table.
+ */
+struct type_state {
+ /* state of general purpose registers */
+ struct type_state_reg regs[TYPE_STATE_MAX_REGS];
+ /* state of stack location */
+ struct list_head stack_vars;
+ /* return value register */
+ int ret_reg;
+ /* stack pointer register */
+ int stack_reg;
+};
/* Returns data type at the location (ip, reg, offset) */
struct annotated_data_type *find_data_type(struct data_loc_info *dloc);
@@ -160,6 +229,21 @@ void global_var_type__tree_delete(struct rb_root *root);
int hist_entry__annotate_data_tty(struct hist_entry *he, struct evsel *evsel);
+bool has_reg_type(struct type_state *state, int reg);
+struct type_state_stack *findnew_stack_state(struct type_state *state,
+ int offset, u8 kind,
+ Dwarf_Die *type_die);
+void set_stack_state(struct type_state_stack *stack, int offset, u8 kind,
+ Dwarf_Die *type_die);
+struct type_state_stack *find_stack_state(struct type_state *state,
+ int offset);
+bool get_global_var_type(Dwarf_Die *cu_die, struct data_loc_info *dloc,
+ u64 ip, u64 var_addr, int *var_offset,
+ Dwarf_Die *type_die);
+bool get_global_var_info(struct data_loc_info *dloc, u64 addr,
+ const char **var_name, int *var_offset);
+void pr_debug_type_name(Dwarf_Die *die, enum type_state_kind kind);
+
#else /* HAVE_DWARF_SUPPORT */
static inline struct annotated_data_type *
diff --git a/tools/perf/util/annotate.c b/tools/perf/util/annotate.c
index 1451caf25e77..37ce43c4eb8f 100644
--- a/tools/perf/util/annotate.c
+++ b/tools/perf/util/annotate.c
@@ -25,6 +25,7 @@
#include "srcline.h"
#include "units.h"
#include "debug.h"
+#include "debuginfo.h"
#include "annotate.h"
#include "annotate-data.h"
#include "evsel.h"
@@ -40,6 +41,7 @@
#include "namespaces.h"
#include "thread.h"
#include "hashmap.h"
+#include "strbuf.h"
#include <regex.h>
#include <linux/bitops.h>
#include <linux/kernel.h>
@@ -47,6 +49,7 @@
#include <linux/zalloc.h>
#include <subcmd/parse-options.h>
#include <subcmd/run-command.h>
+#include <math.h>
/* FIXME: For the HE_COLORSET */
#include "ui/browser.h"
@@ -265,22 +268,30 @@ struct annotated_branch *annotation__get_branch(struct annotation *notes)
return notes->branch;
}
-static struct cyc_hist *symbol__cycles_hist(struct symbol *sym)
+static struct annotated_branch *symbol__find_branch_hist(struct symbol *sym,
+ unsigned int br_cntr_nr)
{
struct annotation *notes = symbol__annotation(sym);
struct annotated_branch *branch;
+ const size_t size = symbol__size(sym);
branch = annotation__get_branch(notes);
if (branch == NULL)
return NULL;
if (branch->cycles_hist == NULL) {
- const size_t size = symbol__size(sym);
-
branch->cycles_hist = calloc(size, sizeof(struct cyc_hist));
+ if (!branch->cycles_hist)
+ return NULL;
+ }
+
+ if (br_cntr_nr && branch->br_cntr == NULL) {
+ branch->br_cntr = calloc(br_cntr_nr * size, sizeof(u64));
+ if (!branch->br_cntr)
+ return NULL;
}
- return branch->cycles_hist;
+ return branch;
}
struct annotated_source *symbol__hists(struct symbol *sym, int nr_hists)
@@ -315,16 +326,45 @@ static int symbol__inc_addr_samples(struct map_symbol *ms,
return src ? __symbol__inc_addr_samples(ms, src, evsel->core.idx, addr, sample) : 0;
}
-static int symbol__account_cycles(u64 addr, u64 start,
- struct symbol *sym, unsigned cycles)
+static int symbol__account_br_cntr(struct annotated_branch *branch,
+ struct evsel *evsel,
+ unsigned offset,
+ u64 br_cntr)
+{
+ unsigned int br_cntr_nr = evsel__leader(evsel)->br_cntr_nr;
+ unsigned int base = evsel__leader(evsel)->br_cntr_idx;
+ unsigned int off = offset * evsel->evlist->nr_br_cntr;
+ u64 *branch_br_cntr = branch->br_cntr;
+ unsigned int i, mask, width;
+
+ if (!br_cntr || !branch_br_cntr)
+ return 0;
+
+ perf_env__find_br_cntr_info(evsel__env(evsel), NULL, &width);
+ mask = (1L << width) - 1;
+ for (i = 0; i < br_cntr_nr; i++) {
+ u64 cntr = (br_cntr >> i * width) & mask;
+
+ branch_br_cntr[off + i + base] += cntr;
+ if (cntr == mask)
+ branch_br_cntr[off + i + base] |= ANNOTATION__BR_CNTR_SATURATED_FLAG;
+ }
+
+ return 0;
+}
+
+static int symbol__account_cycles(u64 addr, u64 start, struct symbol *sym,
+ unsigned cycles, struct evsel *evsel,
+ u64 br_cntr)
{
- struct cyc_hist *cycles_hist;
+ struct annotated_branch *branch;
unsigned offset;
+ int ret;
if (sym == NULL)
return 0;
- cycles_hist = symbol__cycles_hist(sym);
- if (cycles_hist == NULL)
+ branch = symbol__find_branch_hist(sym, evsel->evlist->nr_br_cntr);
+ if (!branch)
return -ENOMEM;
if (addr < sym->start || addr >= sym->end)
return -ERANGE;
@@ -336,15 +376,22 @@ static int symbol__account_cycles(u64 addr, u64 start,
start = 0;
}
offset = addr - sym->start;
- return __symbol__account_cycles(cycles_hist,
+ ret = __symbol__account_cycles(branch->cycles_hist,
start ? start - sym->start : 0,
offset, cycles,
!!start);
+
+ if (ret)
+ return ret;
+
+ return symbol__account_br_cntr(branch, evsel, offset, br_cntr);
}
int addr_map_symbol__account_cycles(struct addr_map_symbol *ams,
struct addr_map_symbol *start,
- unsigned cycles)
+ unsigned cycles,
+ struct evsel *evsel,
+ u64 br_cntr)
{
u64 saddr = 0;
int err;
@@ -370,7 +417,7 @@ int addr_map_symbol__account_cycles(struct addr_map_symbol *ams,
start ? start->addr : 0,
ams->ms.sym ? ams->ms.sym->start + map__start(ams->ms.map) : 0,
saddr);
- err = symbol__account_cycles(ams->al_addr, saddr, ams->ms.sym, cycles);
+ err = symbol__account_cycles(ams->al_addr, saddr, ams->ms.sym, cycles, evsel, br_cntr);
if (err)
pr_debug2("account_cycles failed %d\n", err);
return err;
@@ -411,6 +458,7 @@ static void annotated_branch__delete(struct annotated_branch *branch)
{
if (branch) {
zfree(&branch->cycles_hist);
+ free(branch->br_cntr);
free(branch);
}
}
@@ -454,8 +502,10 @@ static void annotation__count_and_fill(struct annotation *notes, u64 start, u64
}
}
-static int annotation__compute_ipc(struct annotation *notes, size_t size)
+static int annotation__compute_ipc(struct annotation *notes, size_t size,
+ struct evsel *evsel)
{
+ unsigned int br_cntr_nr = evsel->evlist->nr_br_cntr;
int err = 0;
s64 offset;
@@ -490,6 +540,20 @@ static int annotation__compute_ipc(struct annotation *notes, size_t size)
al->cycles->max = ch->cycles_max;
al->cycles->min = ch->cycles_min;
}
+ if (al && notes->branch->br_cntr) {
+ if (!al->br_cntr) {
+ al->br_cntr = calloc(br_cntr_nr, sizeof(u64));
+ if (!al->br_cntr) {
+ err = ENOMEM;
+ break;
+ }
+ }
+ al->num_aggr = ch->num_aggr;
+ al->br_cntr_nr = br_cntr_nr;
+ al->evsel = evsel;
+ memcpy(al->br_cntr, &notes->branch->br_cntr[offset * br_cntr_nr],
+ br_cntr_nr * sizeof(u64));
+ }
}
}
@@ -501,8 +565,10 @@ static int annotation__compute_ipc(struct annotation *notes, size_t size)
struct annotation_line *al;
al = annotated_source__get_line(notes->src, offset);
- if (al)
+ if (al) {
zfree(&al->cycles);
+ zfree(&al->br_cntr);
+ }
}
}
}
@@ -699,13 +765,13 @@ annotation_line__print(struct annotation_line *al, struct symbol *sym, u64 start
int percent_type)
{
struct disasm_line *dl = container_of(al, struct disasm_line, al);
+ struct annotation *notes = symbol__annotation(sym);
static const char *prev_line;
if (al->offset != -1) {
double max_percent = 0.0;
int i, nr_percent = 1;
const char *color;
- struct annotation *notes = symbol__annotation(sym);
for (i = 0; i < al->data_nr; i++) {
double percent;
@@ -775,14 +841,11 @@ annotation_line__print(struct annotation_line *al, struct symbol *sym, u64 start
} else if (max_lines && printed >= max_lines)
return 1;
else {
- int width = symbol_conf.show_total_period ? 12 : 8;
+ int width = annotation__pcnt_width(notes);
if (queue)
return -1;
- if (evsel__is_group_event(evsel))
- width *= evsel->core.nr_members;
-
if (!*al->line)
printf(" %*s:\n", width, " ");
else
@@ -851,6 +914,10 @@ static void annotation__calc_percent(struct annotation *notes,
BUG_ON(i >= al->data_nr);
+ if (symbol_conf.skip_empty &&
+ evsel__hists(evsel)->stats.nr_samples == 0)
+ continue;
+
data = &al->data[i++];
calc_percent(notes, evsel, data, al->offset, end);
@@ -904,7 +971,7 @@ int symbol__annotate(struct map_symbol *ms, struct evsel *evsel,
.options = &annotate_opts,
};
struct arch *arch = NULL;
- int err;
+ int err, nr;
err = evsel__get_arch(evsel, &arch);
if (err < 0)
@@ -925,6 +992,19 @@ int symbol__annotate(struct map_symbol *ms, struct evsel *evsel,
return -1;
}
+ nr = 0;
+ if (evsel__is_group_event(evsel)) {
+ struct evsel *pos;
+
+ for_each_group_evsel(pos, evsel) {
+ if (symbol_conf.skip_empty &&
+ evsel__hists(pos)->stats.nr_samples == 0)
+ continue;
+ nr++;
+ }
+ }
+ notes->src->nr_events = nr ? nr : 1;
+
if (annotate_opts.full_addr)
notes->src->start = map__objdump_2mem(ms->map, ms->sym->start);
else
@@ -1106,7 +1186,7 @@ int symbol__annotate_printf(struct map_symbol *ms, struct evsel *evsel)
int more = 0;
bool context = opts->context;
u64 len;
- int width = symbol_conf.show_total_period ? 12 : 8;
+ int width = annotation__pcnt_width(notes);
int graph_dotted_len;
char buf[512];
@@ -1122,7 +1202,6 @@ int symbol__annotate_printf(struct map_symbol *ms, struct evsel *evsel)
len = symbol__size(sym);
if (evsel__is_group_event(evsel)) {
- width *= evsel->core.nr_members;
evsel__group_desc(evsel, buf, sizeof(buf));
evsel_name = buf;
}
@@ -1594,13 +1673,12 @@ bool ui__has_annotation(void)
static double annotation_line__max_percent(struct annotation_line *al,
- struct annotation *notes,
unsigned int percent_type)
{
double percent_max = 0.0;
int i;
- for (i = 0; i < notes->src->nr_events; i++) {
+ for (i = 0; i < al->data_nr; i++) {
double percent;
percent = annotation_data__percent(&al->data[i],
@@ -1662,6 +1740,149 @@ static void ipc_coverage_string(char *bf, int size, struct annotation *notes)
ipc, coverage);
}
+int annotation_br_cntr_abbr_list(char **str, struct evsel *evsel, bool header)
+{
+ struct evsel *pos;
+ struct strbuf sb;
+
+ if (evsel->evlist->nr_br_cntr <= 0)
+ return -ENOTSUP;
+
+ strbuf_init(&sb, /*hint=*/ 0);
+
+ if (header && strbuf_addf(&sb, "# Branch counter abbr list:\n"))
+ goto err;
+
+ evlist__for_each_entry(evsel->evlist, pos) {
+ if (!(pos->core.attr.branch_sample_type & PERF_SAMPLE_BRANCH_COUNTERS))
+ continue;
+ if (header && strbuf_addf(&sb, "#"))
+ goto err;
+
+ if (strbuf_addf(&sb, " %s = %s\n", pos->name, pos->abbr_name))
+ goto err;
+ }
+
+ if (header && strbuf_addf(&sb, "#"))
+ goto err;
+ if (strbuf_addf(&sb, " '-' No event occurs\n"))
+ goto err;
+
+ if (header && strbuf_addf(&sb, "#"))
+ goto err;
+ if (strbuf_addf(&sb, " '+' Event occurrences may be lost due to branch counter saturated\n"))
+ goto err;
+
+ *str = strbuf_detach(&sb, NULL);
+
+ return 0;
+err:
+ strbuf_release(&sb);
+ return -ENOMEM;
+}
+
+/* Assume the branch counter saturated at 3 */
+#define ANNOTATION_BR_CNTR_SATURATION 3
+
+int annotation_br_cntr_entry(char **str, int br_cntr_nr,
+ u64 *br_cntr, int num_aggr,
+ struct evsel *evsel)
+{
+ struct evsel *pos = evsel ? evlist__first(evsel->evlist) : NULL;
+ bool saturated = false;
+ int i, j, avg, used;
+ struct strbuf sb;
+
+ strbuf_init(&sb, /*hint=*/ 0);
+ for (i = 0; i < br_cntr_nr; i++) {
+ used = 0;
+ avg = ceil((double)(br_cntr[i] & ~ANNOTATION__BR_CNTR_SATURATED_FLAG) /
+ (double)num_aggr);
+
+ /*
+ * A histogram with the abbr name is displayed by default.
+ * With -v, the exact number of branch counter is displayed.
+ */
+ if (verbose) {
+ evlist__for_each_entry_from(evsel->evlist, pos) {
+ if ((pos->core.attr.branch_sample_type & PERF_SAMPLE_BRANCH_COUNTERS) &&
+ (pos->br_cntr_idx == i))
+ break;
+ }
+ if (strbuf_addstr(&sb, pos->abbr_name))
+ goto err;
+
+ if (!br_cntr[i]) {
+ if (strbuf_addstr(&sb, "=-"))
+ goto err;
+ } else {
+ if (strbuf_addf(&sb, "=%d", avg))
+ goto err;
+ }
+ if (br_cntr[i] & ANNOTATION__BR_CNTR_SATURATED_FLAG) {
+ if (strbuf_addch(&sb, '+'))
+ goto err;
+ } else {
+ if (strbuf_addch(&sb, ' '))
+ goto err;
+ }
+
+ if ((i < br_cntr_nr - 1) && strbuf_addch(&sb, ','))
+ goto err;
+ continue;
+ }
+
+ if (strbuf_addch(&sb, '|'))
+ goto err;
+
+ if (!br_cntr[i]) {
+ if (strbuf_addch(&sb, '-'))
+ goto err;
+ used++;
+ } else {
+ evlist__for_each_entry_from(evsel->evlist, pos) {
+ if ((pos->core.attr.branch_sample_type & PERF_SAMPLE_BRANCH_COUNTERS) &&
+ (pos->br_cntr_idx == i))
+ break;
+ }
+ if (br_cntr[i] & ANNOTATION__BR_CNTR_SATURATED_FLAG)
+ saturated = true;
+
+ for (j = 0; j < avg; j++, used++) {
+ /* Print + if the number of logged events > 3 */
+ if (j >= ANNOTATION_BR_CNTR_SATURATION) {
+ saturated = true;
+ break;
+ }
+ if (strbuf_addstr(&sb, pos->abbr_name))
+ goto err;
+ }
+
+ if (saturated) {
+ if (strbuf_addch(&sb, '+'))
+ goto err;
+ used++;
+ }
+ pos = list_next_entry(pos, core.node);
+ }
+
+ for (j = used; j < ANNOTATION_BR_CNTR_SATURATION + 1; j++) {
+ if (strbuf_addch(&sb, ' '))
+ goto err;
+ }
+ }
+
+ if (!verbose && strbuf_addch(&sb, br_cntr_nr ? '|' : ' '))
+ goto err;
+
+ *str = strbuf_detach(&sb, NULL);
+
+ return 0;
+err:
+ strbuf_release(&sb);
+ return -ENOMEM;
+}
+
static void __annotation_line__write(struct annotation_line *al, struct annotation *notes,
bool first_line, bool current_entry, bool change_color, int width,
void *obj, unsigned int percent_type,
@@ -1672,7 +1893,7 @@ static void __annotation_line__write(struct annotation_line *al, struct annotati
void (*obj__write_graph)(void *obj, int graph))
{
- double percent_max = annotation_line__max_percent(al, notes, percent_type);
+ double percent_max = annotation_line__max_percent(al, percent_type);
int pcnt_width = annotation__pcnt_width(notes),
cycles_width = annotation__cycles_width(notes);
bool show_title = false;
@@ -1690,7 +1911,7 @@ static void __annotation_line__write(struct annotation_line *al, struct annotati
if (al->offset != -1 && percent_max != 0.0) {
int i;
- for (i = 0; i < notes->src->nr_events; i++) {
+ for (i = 0; i < al->data_nr; i++) {
double percent;
percent = annotation_data__percent(&al->data[i], percent_type);
@@ -1699,10 +1920,10 @@ static void __annotation_line__write(struct annotation_line *al, struct annotati
if (symbol_conf.show_total_period) {
obj__printf(obj, "%11" PRIu64 " ", al->data[i].he.period);
} else if (symbol_conf.show_nr_samples) {
- obj__printf(obj, "%6" PRIu64 " ",
+ obj__printf(obj, "%7" PRIu64 " ",
al->data[i].he.nr_samples);
} else {
- obj__printf(obj, "%6.2f ", percent);
+ obj__printf(obj, "%7.2f ", percent);
}
}
} else {
@@ -1758,6 +1979,22 @@ static void __annotation_line__write(struct annotation_line *al, struct annotati
"Cycle(min/max)");
}
+ if (annotate_opts.show_br_cntr) {
+ if (show_title) {
+ obj__printf(obj, "%*s ",
+ ANNOTATION__BR_CNTR_WIDTH,
+ "Branch Counter");
+ } else {
+ char *buf;
+
+ if (!annotation_br_cntr_entry(&buf, al->br_cntr_nr, al->br_cntr,
+ al->num_aggr, al->evsel)) {
+ obj__printf(obj, "%*s ", ANNOTATION__BR_CNTR_WIDTH, buf);
+ free(buf);
+ }
+ }
+ }
+
if (show_title && !*al->line) {
ipc_coverage_string(bf, sizeof(bf), notes);
obj__printf(obj, "%*s", ANNOTATION__AVG_IPC_WIDTH, bf);
@@ -1843,10 +2080,7 @@ int symbol__annotate2(struct map_symbol *ms, struct evsel *evsel,
struct symbol *sym = ms->sym;
struct annotation *notes = symbol__annotation(sym);
size_t size = symbol__size(sym);
- int nr_pcnt = 1, err;
-
- if (evsel__is_group_event(evsel))
- nr_pcnt = evsel->core.nr_members;
+ int err;
err = symbol__annotate(ms, evsel, parch);
if (err)
@@ -1857,13 +2091,11 @@ int symbol__annotate2(struct map_symbol *ms, struct evsel *evsel,
annotation__set_index(notes);
annotation__mark_jump_targets(notes, sym);
- err = annotation__compute_ipc(notes, size);
+ err = annotation__compute_ipc(notes, size, evsel);
if (err)
return err;
annotation__init_column_widths(notes, sym);
- notes->src->nr_events = nr_pcnt;
-
annotation__update_column_widths(notes);
sym->annotate2 = 1;
@@ -2123,20 +2355,33 @@ int annotate_get_insn_location(struct arch *arch, struct disasm_line *dl,
for_each_insn_op_loc(loc, i, op_loc) {
const char *insn_str = ops->source.raw;
bool multi_regs = ops->source.multi_regs;
+ bool mem_ref = ops->source.mem_ref;
if (i == INSN_OP_TARGET) {
insn_str = ops->target.raw;
multi_regs = ops->target.multi_regs;
+ mem_ref = ops->target.mem_ref;
}
/* Invalidate the register by default */
op_loc->reg1 = -1;
op_loc->reg2 = -1;
- if (insn_str == NULL)
- continue;
+ if (insn_str == NULL) {
+ if (!arch__is(arch, "powerpc"))
+ continue;
+ }
- if (strchr(insn_str, arch->objdump.memory_ref_char)) {
+ /*
+ * For powerpc, call get_powerpc_regs function which extracts the
+ * required fields for op_loc, ie reg1, reg2, offset from the
+ * raw instruction.
+ */
+ if (arch__is(arch, "powerpc")) {
+ op_loc->mem_ref = mem_ref;
+ op_loc->multi_regs = multi_regs;
+ get_powerpc_regs(dl->raw.raw_insn, !i, op_loc);
+ } else if (strchr(insn_str, arch->objdump.memory_ref_char)) {
op_loc->mem_ref = true;
op_loc->multi_regs = multi_regs;
extract_reg_offset(arch, insn_str, op_loc);
@@ -2216,7 +2461,7 @@ static struct annotated_item_stat *annotate_data_stat(struct list_head *head,
return NULL;
istat->name = strdup(name);
- if (istat->name == NULL) {
+ if ((istat->name == NULL) || (!strlen(istat->name))) {
free(istat);
return NULL;
}
@@ -2230,6 +2475,7 @@ static bool is_stack_operation(struct arch *arch, struct disasm_line *dl)
if (arch__is(arch, "x86")) {
if (!strncmp(dl->ins.name, "push", 4) ||
!strncmp(dl->ins.name, "pop", 3) ||
+ !strncmp(dl->ins.name, "call", 4) ||
!strncmp(dl->ins.name, "ret", 3))
return true;
}
@@ -2313,6 +2559,20 @@ u64 annotate_calc_pcrel(struct map_symbol *ms, u64 ip, int offset,
return map__rip_2objdump(ms->map, addr);
}
+static struct debuginfo_cache {
+ struct dso *dso;
+ struct debuginfo *dbg;
+} di_cache;
+
+void debuginfo_cache__delete(void)
+{
+ dso__put(di_cache.dso);
+ di_cache.dso = NULL;
+
+ debuginfo__delete(di_cache.dbg);
+ di_cache.dbg = NULL;
+}
+
/**
* hist_entry__get_data_type - find data type for given hist entry
* @he: hist entry
@@ -2347,6 +2607,27 @@ struct annotated_data_type *hist_entry__get_data_type(struct hist_entry *he)
return NULL;
}
+ /*
+ * di_cache holds a pair of values, but code below assumes
+ * di_cache.dso can be compared/updated and di_cache.dbg can be
+ * read/updated independently from each other. That assumption only
+ * holds in single threaded code.
+ */
+ assert(perf_singlethreaded);
+
+ if (map__dso(ms->map) != di_cache.dso) {
+ dso__put(di_cache.dso);
+ di_cache.dso = dso__get(map__dso(ms->map));
+
+ debuginfo__delete(di_cache.dbg);
+ di_cache.dbg = debuginfo__new(dso__long_name(di_cache.dso));
+ }
+
+ if (di_cache.dbg == NULL) {
+ ann_data_stat.no_dbginfo++;
+ return NULL;
+ }
+
/* Make sure it has the disasm of the function */
if (symbol__annotate(ms, evsel, &arch) < 0) {
ann_data_stat.no_insn++;
@@ -2391,6 +2672,7 @@ retry:
.ip = ms->sym->start + dl->al.offset,
.cpumode = he->cpumode,
.op = op_loc,
+ .di = di_cache.dbg,
};
if (!op_loc->mem_ref && op_loc->segment == INSN_SEG_NONE)
diff --git a/tools/perf/util/annotate.h b/tools/perf/util/annotate.h
index d5c821c22f79..8b9e05a1932f 100644
--- a/tools/perf/util/annotate.h
+++ b/tools/perf/util/annotate.h
@@ -14,6 +14,7 @@
#include "spark.h"
#include "hashmap.h"
#include "disasm.h"
+#include "branch.h"
struct hist_browser_timer;
struct hist_entry;
@@ -30,6 +31,7 @@ struct annotated_data_type;
#define ANNOTATION__CYCLES_WIDTH 6
#define ANNOTATION__MINMAX_CYCLES_WIDTH 19
#define ANNOTATION__AVG_IPC_WIDTH 36
+#define ANNOTATION__BR_CNTR_WIDTH 30
#define ANNOTATION_DUMMY_LEN 256
struct annotation_options {
@@ -43,6 +45,7 @@ struct annotation_options {
show_nr_jumps,
show_minmax_cycle,
show_asm_raw,
+ show_br_cntr,
annotate_src,
full_addr;
u8 offset_level;
@@ -103,6 +106,10 @@ struct annotation_line {
char *fileloc;
char *path;
struct cycles_info *cycles;
+ int num_aggr;
+ int br_cntr_nr;
+ u64 *br_cntr;
+ struct evsel *evsel;
int jump_sources;
u32 idx;
int idx_asm;
@@ -113,7 +120,10 @@ struct annotation_line {
struct disasm_line {
struct ins ins;
struct ins_operands ops;
-
+ union {
+ u8 bytes[4];
+ u32 raw_insn;
+ } raw;
/* This needs to be at the end. */
struct annotation_line al;
};
@@ -285,6 +295,9 @@ struct annotated_source {
struct annotation_line *annotated_source__get_line(struct annotated_source *src,
s64 offset);
+/* A branch counter once saturated */
+#define ANNOTATION__BR_CNTR_SATURATED_FLAG (1ULL << 63)
+
/**
* struct annotated_branch - basic block and IPC information for a symbol.
*
@@ -294,6 +307,7 @@ struct annotation_line *annotated_source__get_line(struct annotated_source *src,
* @cover_insn: Number of distinct, actually executed instructions.
* @cycles_hist: Array of cyc_hist for each instruction.
* @max_coverage: Maximum number of covered basic block (used for block-range).
+ * @br_cntr: Array of the occurrences of events (branch counters) during a block.
*
* This struct is used by two different codes when the sample has branch stack
* and cycles information. annotation__compute_ipc() calculates average IPC
@@ -310,6 +324,7 @@ struct annotated_branch {
unsigned int cover_insn;
struct cyc_hist *cycles_hist;
u64 max_coverage;
+ u64 *br_cntr;
};
struct LOCKABLE annotation {
@@ -336,7 +351,7 @@ static inline int annotation__cycles_width(struct annotation *notes)
static inline int annotation__pcnt_width(struct annotation *notes)
{
- return (symbol_conf.show_total_period ? 12 : 7) * notes->src->nr_events;
+ return (symbol_conf.show_total_period ? 12 : 8) * notes->src->nr_events;
}
static inline bool annotation_line__filter(struct annotation_line *al)
@@ -344,6 +359,11 @@ static inline bool annotation_line__filter(struct annotation_line *al)
return annotate_opts.hide_src_code && al->offset == -1;
}
+static inline u8 annotation__br_cntr_width(void)
+{
+ return annotate_opts.show_br_cntr ? ANNOTATION__BR_CNTR_WIDTH : 0;
+}
+
void annotation__update_column_widths(struct annotation *notes);
void annotation__toggle_full_addr(struct annotation *notes, struct map_symbol *ms);
@@ -380,7 +400,9 @@ struct annotated_branch *annotation__get_branch(struct annotation *notes);
int addr_map_symbol__account_cycles(struct addr_map_symbol *ams,
struct addr_map_symbol *start,
- unsigned cycles);
+ unsigned cycles,
+ struct evsel *evsel,
+ u64 br_cntr);
int hist_entry__inc_addr_samples(struct hist_entry *he, struct perf_sample *sample,
struct evsel *evsel, u64 addr);
@@ -540,4 +562,9 @@ struct annotated_basic_block {
int annotate_get_basic_blocks(struct symbol *sym, s64 src, s64 dst,
struct list_head *head);
+void debuginfo_cache__delete(void);
+
+int annotation_br_cntr_entry(char **str, int br_cntr_nr, u64 *br_cntr,
+ int num_aggr, struct evsel *evsel);
+int annotation_br_cntr_abbr_list(char **str, struct evsel *evsel, bool header);
#endif /* __PERF_ANNOTATE_H */
diff --git a/tools/perf/util/arm-spe.c b/tools/perf/util/arm-spe.c
index afbd5869f6bf..138ffc71b32d 100644
--- a/tools/perf/util/arm-spe.c
+++ b/tools/perf/util/arm-spe.c
@@ -899,7 +899,7 @@ static int arm_spe_context_switch(struct arm_spe *spe, union perf_event *event,
static int arm_spe_process_event(struct perf_session *session,
union perf_event *event,
struct perf_sample *sample,
- struct perf_tool *tool)
+ const struct perf_tool *tool)
{
int err = 0;
u64 timestamp;
@@ -947,7 +947,7 @@ static int arm_spe_process_event(struct perf_session *session,
static int arm_spe_process_auxtrace_event(struct perf_session *session,
union perf_event *event,
- struct perf_tool *tool __maybe_unused)
+ const struct perf_tool *tool __maybe_unused)
{
struct arm_spe *spe = container_of(session->auxtrace, struct arm_spe,
auxtrace);
@@ -985,7 +985,7 @@ static int arm_spe_process_auxtrace_event(struct perf_session *session,
}
static int arm_spe_flush(struct perf_session *session __maybe_unused,
- struct perf_tool *tool __maybe_unused)
+ const struct perf_tool *tool __maybe_unused)
{
struct arm_spe *spe = container_of(session->auxtrace, struct arm_spe,
auxtrace);
@@ -1073,35 +1073,6 @@ static void arm_spe_print_info(__u64 *arr)
fprintf(stdout, arm_spe_info_fmts[ARM_SPE_PMU_TYPE], arr[ARM_SPE_PMU_TYPE]);
}
-struct arm_spe_synth {
- struct perf_tool dummy_tool;
- struct perf_session *session;
-};
-
-static int arm_spe_event_synth(struct perf_tool *tool,
- union perf_event *event,
- struct perf_sample *sample __maybe_unused,
- struct machine *machine __maybe_unused)
-{
- struct arm_spe_synth *arm_spe_synth =
- container_of(tool, struct arm_spe_synth, dummy_tool);
-
- return perf_session__deliver_synth_event(arm_spe_synth->session,
- event, NULL);
-}
-
-static int arm_spe_synth_event(struct perf_session *session,
- struct perf_event_attr *attr, u64 id)
-{
- struct arm_spe_synth arm_spe_synth;
-
- memset(&arm_spe_synth, 0, sizeof(struct arm_spe_synth));
- arm_spe_synth.session = session;
-
- return perf_event__synthesize_attr(&arm_spe_synth.dummy_tool, attr, 1,
- &id, arm_spe_event_synth);
-}
-
static void arm_spe_set_event_name(struct evlist *evlist, u64 id,
const char *name)
{
@@ -1172,7 +1143,7 @@ arm_spe_synth_events(struct arm_spe *spe, struct perf_session *session)
spe->sample_flc = true;
/* Level 1 data cache miss */
- err = arm_spe_synth_event(session, &attr, id);
+ err = perf_session__deliver_synth_attr_event(session, &attr, id);
if (err)
return err;
spe->l1d_miss_id = id;
@@ -1180,7 +1151,7 @@ arm_spe_synth_events(struct arm_spe *spe, struct perf_session *session)
id += 1;
/* Level 1 data cache access */
- err = arm_spe_synth_event(session, &attr, id);
+ err = perf_session__deliver_synth_attr_event(session, &attr, id);
if (err)
return err;
spe->l1d_access_id = id;
@@ -1192,7 +1163,7 @@ arm_spe_synth_events(struct arm_spe *spe, struct perf_session *session)
spe->sample_llc = true;
/* Last level cache miss */
- err = arm_spe_synth_event(session, &attr, id);
+ err = perf_session__deliver_synth_attr_event(session, &attr, id);
if (err)
return err;
spe->llc_miss_id = id;
@@ -1200,7 +1171,7 @@ arm_spe_synth_events(struct arm_spe *spe, struct perf_session *session)
id += 1;
/* Last level cache access */
- err = arm_spe_synth_event(session, &attr, id);
+ err = perf_session__deliver_synth_attr_event(session, &attr, id);
if (err)
return err;
spe->llc_access_id = id;
@@ -1212,7 +1183,7 @@ arm_spe_synth_events(struct arm_spe *spe, struct perf_session *session)
spe->sample_tlb = true;
/* TLB miss */
- err = arm_spe_synth_event(session, &attr, id);
+ err = perf_session__deliver_synth_attr_event(session, &attr, id);
if (err)
return err;
spe->tlb_miss_id = id;
@@ -1220,7 +1191,7 @@ arm_spe_synth_events(struct arm_spe *spe, struct perf_session *session)
id += 1;
/* TLB access */
- err = arm_spe_synth_event(session, &attr, id);
+ err = perf_session__deliver_synth_attr_event(session, &attr, id);
if (err)
return err;
spe->tlb_access_id = id;
@@ -1232,7 +1203,7 @@ arm_spe_synth_events(struct arm_spe *spe, struct perf_session *session)
spe->sample_branch = true;
/* Branch miss */
- err = arm_spe_synth_event(session, &attr, id);
+ err = perf_session__deliver_synth_attr_event(session, &attr, id);
if (err)
return err;
spe->branch_miss_id = id;
@@ -1244,7 +1215,7 @@ arm_spe_synth_events(struct arm_spe *spe, struct perf_session *session)
spe->sample_remote_access = true;
/* Remote access */
- err = arm_spe_synth_event(session, &attr, id);
+ err = perf_session__deliver_synth_attr_event(session, &attr, id);
if (err)
return err;
spe->remote_access_id = id;
@@ -1255,7 +1226,7 @@ arm_spe_synth_events(struct arm_spe *spe, struct perf_session *session)
if (spe->synth_opts.mem) {
spe->sample_memory = true;
- err = arm_spe_synth_event(session, &attr, id);
+ err = perf_session__deliver_synth_attr_event(session, &attr, id);
if (err)
return err;
spe->memory_id = id;
@@ -1276,7 +1247,7 @@ arm_spe_synth_events(struct arm_spe *spe, struct perf_session *session)
attr.config = PERF_COUNT_HW_INSTRUCTIONS;
attr.sample_period = spe->synth_opts.period;
spe->instructions_sample_period = attr.sample_period;
- err = arm_spe_synth_event(session, &attr, id);
+ err = perf_session__deliver_synth_attr_event(session, &attr, id);
if (err)
return err;
spe->instructions_id = id;
diff --git a/tools/perf/util/auxtrace.c b/tools/perf/util/auxtrace.c
index e2f317063eec..ca8682966fae 100644
--- a/tools/perf/util/auxtrace.c
+++ b/tools/perf/util/auxtrace.c
@@ -671,11 +671,11 @@ int auxtrace_record__read_finish(struct auxtrace_record *itr, int idx)
{
struct evsel *evsel;
- if (!itr->evlist || !itr->pmu)
+ if (!itr->evlist)
return -EINVAL;
evlist__for_each_entry(itr->evlist, evsel) {
- if (evsel->core.attr.type == itr->pmu->type) {
+ if (evsel__is_aux_event(evsel)) {
if (evsel->disabled)
return 0;
return evlist__enable_event_idx(itr->evlist, evsel, idx);
@@ -1240,7 +1240,7 @@ void auxtrace_synth_error(struct perf_record_auxtrace_error *auxtrace_error, int
}
int perf_event__synthesize_auxtrace_info(struct auxtrace_record *itr,
- struct perf_tool *tool,
+ const struct perf_tool *tool,
struct perf_session *session,
perf_event__handler_t process)
{
@@ -1831,7 +1831,7 @@ int __weak compat_auxtrace_mmap__write_tail(struct auxtrace_mmap *mm, u64 tail)
static int __auxtrace_mmap__read(struct mmap *map,
struct auxtrace_record *itr,
- struct perf_tool *tool, process_auxtrace_t fn,
+ const struct perf_tool *tool, process_auxtrace_t fn,
bool snapshot, size_t snapshot_size)
{
struct auxtrace_mmap *mm = &map->auxtrace_mmap;
@@ -1942,14 +1942,14 @@ static int __auxtrace_mmap__read(struct mmap *map,
}
int auxtrace_mmap__read(struct mmap *map, struct auxtrace_record *itr,
- struct perf_tool *tool, process_auxtrace_t fn)
+ const struct perf_tool *tool, process_auxtrace_t fn)
{
return __auxtrace_mmap__read(map, itr, tool, fn, false, 0);
}
int auxtrace_mmap__read_snapshot(struct mmap *map,
struct auxtrace_record *itr,
- struct perf_tool *tool, process_auxtrace_t fn,
+ const struct perf_tool *tool, process_auxtrace_t fn,
size_t snapshot_size)
{
return __auxtrace_mmap__read(map, itr, tool, fn, true, snapshot_size);
@@ -2829,7 +2829,7 @@ int auxtrace_parse_filters(struct evlist *evlist)
}
int auxtrace__process_event(struct perf_session *session, union perf_event *event,
- struct perf_sample *sample, struct perf_tool *tool)
+ struct perf_sample *sample, const struct perf_tool *tool)
{
if (!session->auxtrace)
return 0;
@@ -2847,7 +2847,7 @@ void auxtrace__dump_auxtrace_sample(struct perf_session *session,
session->auxtrace->dump_auxtrace_sample(session, sample);
}
-int auxtrace__flush_events(struct perf_session *session, struct perf_tool *tool)
+int auxtrace__flush_events(struct perf_session *session, const struct perf_tool *tool)
{
if (!session->auxtrace)
return 0;
diff --git a/tools/perf/util/auxtrace.h b/tools/perf/util/auxtrace.h
index 8a6ec9565835..a1895a4f530b 100644
--- a/tools/perf/util/auxtrace.h
+++ b/tools/perf/util/auxtrace.h
@@ -208,17 +208,17 @@ struct auxtrace {
int (*process_event)(struct perf_session *session,
union perf_event *event,
struct perf_sample *sample,
- struct perf_tool *tool);
+ const struct perf_tool *tool);
int (*process_auxtrace_event)(struct perf_session *session,
union perf_event *event,
- struct perf_tool *tool);
+ const struct perf_tool *tool);
int (*queue_data)(struct perf_session *session,
struct perf_sample *sample, union perf_event *event,
u64 data_offset);
void (*dump_auxtrace_sample)(struct perf_session *session,
struct perf_sample *sample);
int (*flush_events)(struct perf_session *session,
- struct perf_tool *tool);
+ const struct perf_tool *tool);
void (*free_events)(struct perf_session *session);
void (*free)(struct perf_session *session);
bool (*evsel_is_auxtrace)(struct perf_session *session,
@@ -411,7 +411,6 @@ struct auxtrace_record {
int (*read_finish)(struct auxtrace_record *itr, int idx);
unsigned int alignment;
unsigned int default_aux_sample_size;
- struct perf_pmu *pmu;
struct evlist *evlist;
};
@@ -508,17 +507,17 @@ void auxtrace_mmap_params__set_idx(struct auxtrace_mmap_params *mp,
struct evlist *evlist,
struct evsel *evsel, int idx);
-typedef int (*process_auxtrace_t)(struct perf_tool *tool,
+typedef int (*process_auxtrace_t)(const struct perf_tool *tool,
struct mmap *map,
union perf_event *event, void *data1,
size_t len1, void *data2, size_t len2);
int auxtrace_mmap__read(struct mmap *map, struct auxtrace_record *itr,
- struct perf_tool *tool, process_auxtrace_t fn);
+ const struct perf_tool *tool, process_auxtrace_t fn);
int auxtrace_mmap__read_snapshot(struct mmap *map,
struct auxtrace_record *itr,
- struct perf_tool *tool, process_auxtrace_t fn,
+ const struct perf_tool *tool, process_auxtrace_t fn,
size_t snapshot_size);
int auxtrace_queues__init_nr(struct auxtrace_queues *queues, int nr_queues);
@@ -639,10 +638,10 @@ int addr_filters__parse_bare_filter(struct addr_filters *filts,
int auxtrace_parse_filters(struct evlist *evlist);
int auxtrace__process_event(struct perf_session *session, union perf_event *event,
- struct perf_sample *sample, struct perf_tool *tool);
+ struct perf_sample *sample, const struct perf_tool *tool);
void auxtrace__dump_auxtrace_sample(struct perf_session *session,
struct perf_sample *sample);
-int auxtrace__flush_events(struct perf_session *session, struct perf_tool *tool);
+int auxtrace__flush_events(struct perf_session *session, const struct perf_tool *tool);
void auxtrace__free_events(struct perf_session *session);
void auxtrace__free(struct perf_session *session);
bool auxtrace__evsel_is_auxtrace(struct perf_session *session,
@@ -809,7 +808,7 @@ static inline
int auxtrace__process_event(struct perf_session *session __maybe_unused,
union perf_event *event __maybe_unused,
struct perf_sample *sample __maybe_unused,
- struct perf_tool *tool __maybe_unused)
+ const struct perf_tool *tool __maybe_unused)
{
return 0;
}
@@ -822,7 +821,7 @@ void auxtrace__dump_auxtrace_sample(struct perf_session *session __maybe_unused,
static inline
int auxtrace__flush_events(struct perf_session *session __maybe_unused,
- struct perf_tool *tool __maybe_unused)
+ const struct perf_tool *tool __maybe_unused)
{
return 0;
}
diff --git a/tools/perf/util/block-info.c b/tools/perf/util/block-info.c
index 04068d48683f..649392bee7ed 100644
--- a/tools/perf/util/block-info.c
+++ b/tools/perf/util/block-info.c
@@ -40,16 +40,32 @@ static struct block_header_column {
[PERF_HPP_REPORT__BLOCK_DSO] = {
.name = "Shared Object",
.width = 20,
+ },
+ [PERF_HPP_REPORT__BLOCK_BRANCH_COUNTER] = {
+ .name = "Branch Counter",
+ .width = 30,
}
};
-struct block_info *block_info__new(void)
+static struct block_info *block_info__new(unsigned int br_cntr_nr)
{
- return zalloc(sizeof(struct block_info));
+ struct block_info *bi = zalloc(sizeof(struct block_info));
+
+ if (bi && br_cntr_nr) {
+ bi->br_cntr = calloc(br_cntr_nr, sizeof(u64));
+ if (!bi->br_cntr) {
+ free(bi);
+ return NULL;
+ }
+ }
+
+ return bi;
}
void block_info__delete(struct block_info *bi)
{
+ if (bi)
+ free(bi->br_cntr);
free(bi);
}
@@ -86,7 +102,8 @@ int64_t block_info__cmp(struct perf_hpp_fmt *fmt __maybe_unused,
static void init_block_info(struct block_info *bi, struct symbol *sym,
struct cyc_hist *ch, int offset,
- u64 total_cycles)
+ u64 total_cycles, unsigned int br_cntr_nr,
+ u64 *br_cntr, struct evsel *evsel)
{
bi->sym = sym;
bi->start = ch->start;
@@ -99,10 +116,18 @@ static void init_block_info(struct block_info *bi, struct symbol *sym,
memcpy(bi->cycles_spark, ch->cycles_spark,
NUM_SPARKS * sizeof(u64));
+
+ if (br_cntr && br_cntr_nr) {
+ bi->br_cntr_nr = br_cntr_nr;
+ memcpy(bi->br_cntr, &br_cntr[offset * br_cntr_nr],
+ br_cntr_nr * sizeof(u64));
+ }
+ bi->evsel = evsel;
}
int block_info__process_sym(struct hist_entry *he, struct block_hist *bh,
- u64 *block_cycles_aggr, u64 total_cycles)
+ u64 *block_cycles_aggr, u64 total_cycles,
+ unsigned int br_cntr_nr)
{
struct annotation *notes;
struct cyc_hist *ch;
@@ -125,12 +150,14 @@ int block_info__process_sym(struct hist_entry *he, struct block_hist *bh,
struct block_info *bi;
struct hist_entry *he_block;
- bi = block_info__new();
+ bi = block_info__new(br_cntr_nr);
if (!bi)
return -1;
init_block_info(bi, he->ms.sym, &ch[i], i,
- total_cycles);
+ total_cycles, br_cntr_nr,
+ notes->branch->br_cntr,
+ hists_to_evsel(he->hists));
cycles += bi->cycles_aggr / bi->num_aggr;
he_block = hists__add_entry_block(&bh->block_hists,
@@ -327,6 +354,24 @@ static void init_block_header(struct block_fmt *block_fmt)
fmt->width = block_column_width;
}
+static int block_branch_counter_entry(struct perf_hpp_fmt *fmt,
+ struct perf_hpp *hpp,
+ struct hist_entry *he)
+{
+ struct block_fmt *block_fmt = container_of(fmt, struct block_fmt, fmt);
+ struct block_info *bi = he->block_info;
+ char *buf;
+ int ret;
+
+ if (annotation_br_cntr_entry(&buf, bi->br_cntr_nr, bi->br_cntr,
+ bi->num_aggr, bi->evsel))
+ return 0;
+
+ ret = scnprintf(hpp->buf, hpp->size, "%*s", block_fmt->width, buf);
+ free(buf);
+ return ret;
+}
+
static void hpp_register(struct block_fmt *block_fmt, int idx,
struct perf_hpp_list *hpp_list)
{
@@ -357,6 +402,9 @@ static void hpp_register(struct block_fmt *block_fmt, int idx,
case PERF_HPP_REPORT__BLOCK_DSO:
fmt->entry = block_dso_entry;
break;
+ case PERF_HPP_REPORT__BLOCK_BRANCH_COUNTER:
+ fmt->entry = block_branch_counter_entry;
+ break;
default:
return;
}
@@ -390,7 +438,7 @@ static void init_block_hist(struct block_hist *bh, struct block_fmt *block_fmts,
static int process_block_report(struct hists *hists,
struct block_report *block_report,
u64 total_cycles, int *block_hpps,
- int nr_hpps)
+ int nr_hpps, unsigned int br_cntr_nr)
{
struct rb_node *next = rb_first_cached(&hists->entries);
struct block_hist *bh = &block_report->hist;
@@ -405,7 +453,7 @@ static int process_block_report(struct hists *hists,
while (next) {
he = rb_entry(next, struct hist_entry, rb_node);
block_info__process_sym(he, bh, &block_report->cycles,
- total_cycles);
+ total_cycles, br_cntr_nr);
next = rb_next(&he->rb_node);
}
@@ -435,7 +483,7 @@ struct block_report *block_info__create_report(struct evlist *evlist,
struct hists *hists = evsel__hists(pos);
process_block_report(hists, &block_reports[i], total_cycles,
- block_hpps, nr_hpps);
+ block_hpps, nr_hpps, evlist->nr_br_cntr);
i++;
}
diff --git a/tools/perf/util/block-info.h b/tools/perf/util/block-info.h
index 0b9e1aad4c55..b9329dc3ab59 100644
--- a/tools/perf/util/block-info.h
+++ b/tools/perf/util/block-info.h
@@ -18,6 +18,9 @@ struct block_info {
u64 total_cycles;
int num;
int num_aggr;
+ int br_cntr_nr;
+ u64 *br_cntr;
+ struct evsel *evsel;
};
struct block_fmt {
@@ -36,6 +39,7 @@ enum {
PERF_HPP_REPORT__BLOCK_AVG_CYCLES,
PERF_HPP_REPORT__BLOCK_RANGE,
PERF_HPP_REPORT__BLOCK_DSO,
+ PERF_HPP_REPORT__BLOCK_BRANCH_COUNTER,
PERF_HPP_REPORT__BLOCK_MAX_INDEX
};
@@ -46,7 +50,6 @@ struct block_report {
int nr_fmts;
};
-struct block_info *block_info__new(void);
void block_info__delete(struct block_info *bi);
int64_t __block_info__cmp(struct hist_entry *left, struct hist_entry *right);
@@ -55,7 +58,8 @@ int64_t block_info__cmp(struct perf_hpp_fmt *fmt __maybe_unused,
struct hist_entry *left, struct hist_entry *right);
int block_info__process_sym(struct hist_entry *he, struct block_hist *bh,
- u64 *block_cycles_aggr, u64 total_cycles);
+ u64 *block_cycles_aggr, u64 total_cycles,
+ unsigned int br_cntr_nr);
struct block_report *block_info__create_report(struct evlist *evlist,
u64 total_cycles,
diff --git a/tools/perf/util/bpf-event.c b/tools/perf/util/bpf-event.c
index 827695cd0408..13608237c50e 100644
--- a/tools/perf/util/bpf-event.c
+++ b/tools/perf/util/bpf-event.c
@@ -170,7 +170,7 @@ static int perf_event__synthesize_one_bpf_prog(struct perf_session *session,
{
struct perf_record_ksymbol *ksymbol_event = &event->ksymbol;
struct perf_record_bpf_event *bpf_event = &event->bpf;
- struct perf_tool *tool = session->tool;
+ const struct perf_tool *tool = session->tool;
struct bpf_prog_info_node *info_node;
struct perf_bpil *info_linear;
struct bpf_prog_info *info;
@@ -310,7 +310,7 @@ struct kallsyms_parse {
union perf_event *event;
perf_event__handler_t process;
struct machine *machine;
- struct perf_tool *tool;
+ const struct perf_tool *tool;
};
static int
diff --git a/tools/perf/util/bpf-filter.c b/tools/perf/util/bpf-filter.c
index 04f98b6bb291..e87b6789eb9e 100644
--- a/tools/perf/util/bpf-filter.c
+++ b/tools/perf/util/bpf-filter.c
@@ -1,12 +1,60 @@
/* SPDX-License-Identifier: GPL-2.0 */
+/**
+ * Generic event filter for sampling events in BPF.
+ *
+ * The BPF program is fixed and just to read filter expressions in the 'filters'
+ * map and compare the sample data in order to reject samples that don't match.
+ * Each filter expression contains a sample flag (term) to compare, an operation
+ * (==, >=, and so on) and a value.
+ *
+ * Note that each entry has an array of filter expressions and it only succeeds
+ * when all of the expressions are satisfied. But it supports the logical OR
+ * using a GROUP operation which is satisfied when any of its member expression
+ * is evaluated to true. But it doesn't allow nested GROUP operations for now.
+ *
+ * To support non-root users, the filters map can be loaded and pinned in the BPF
+ * filesystem by root (perf record --setup-filter pin). Then each user will get
+ * a new entry in the shared filters map to fill the filter expressions. And the
+ * BPF program will find the filter using (task-id, event-id) as a key.
+ *
+ * The pinned BPF object (shared for regular users) has:
+ *
+ * event_hash |
+ * | | |
+ * event->id ---> | id | ---+ idx_hash | filters
+ * | | | | | | | |
+ * | .... | +-> | idx | --+--> | exprs | ---> perf_bpf_filter_entry[]
+ * | | | | | | .op
+ * task id (tgid) --------------+ | .... | | | ... | .term (+ part)
+ * | .value
+ * |
+ * ======= (root would skip this part) ======== (compares it in a loop)
+ *
+ * This is used for per-task use cases while system-wide profiling (normally from
+ * root user) uses a separate copy of the program and the maps for its own so that
+ * it can proceed even if a lot of non-root users are using the filters at the
+ * same time. In this case the filters map has a single entry and no need to use
+ * the hash maps to get the index (key) of the filters map (IOW it's always 0).
+ *
+ * The BPF program returns 1 to accept the sample or 0 to drop it.
+ * The 'dropped' map is to keep how many samples it dropped by the filter and
+ * it will be reported as lost samples.
+ */
#include <stdlib.h>
+#include <fcntl.h>
+#include <sys/ioctl.h>
+#include <sys/stat.h>
#include <bpf/bpf.h>
#include <linux/err.h>
+#include <linux/list.h>
+#include <api/fs/fs.h>
#include <internal/xyarray.h>
+#include <perf/threadmap.h>
#include "util/debug.h"
#include "util/evsel.h"
+#include "util/target.h"
#include "util/bpf-filter.h"
#include <util/bpf-filter-flex.h>
@@ -20,6 +68,16 @@
#define __PERF_SAMPLE_TYPE(tt, st, opt) { tt, #st, opt }
#define PERF_SAMPLE_TYPE(_st, opt) __PERF_SAMPLE_TYPE(PBF_TERM_##_st, PERF_SAMPLE_##_st, opt)
+/* Index in the pinned 'filters' map. Should be released after use. */
+struct pinned_filter_idx {
+ struct list_head list;
+ struct evsel *evsel;
+ u64 event_id;
+ int hash_idx;
+};
+
+static LIST_HEAD(pinned_filters);
+
static const struct perf_sample_info {
enum perf_bpf_filter_term type;
const char *name;
@@ -42,8 +100,11 @@ static const struct perf_sample_info {
PERF_SAMPLE_TYPE(TRANSACTION, "--transaction"),
PERF_SAMPLE_TYPE(CODE_PAGE_SIZE, "--code-page-size"),
PERF_SAMPLE_TYPE(DATA_PAGE_SIZE, "--data-page-size"),
+ PERF_SAMPLE_TYPE(CGROUP, "--all-cgroups"),
};
+static int get_pinned_fd(const char *name);
+
static const struct perf_sample_info *get_sample_info(enum perf_bpf_filter_term type)
{
size_t i;
@@ -91,92 +152,452 @@ static int check_sample_flags(struct evsel *evsel, struct perf_bpf_filter_expr *
return -1;
}
-int perf_bpf_filter__prepare(struct evsel *evsel)
+static int get_filter_entries(struct evsel *evsel, struct perf_bpf_filter_entry *entry)
{
- int i, x, y, fd;
- struct sample_filter_bpf *skel;
- struct bpf_program *prog;
- struct bpf_link *link;
+ int i = 0;
struct perf_bpf_filter_expr *expr;
- skel = sample_filter_bpf__open_and_load();
- if (!skel) {
- pr_err("Failed to load perf sample-filter BPF skeleton\n");
- return -1;
- }
-
- i = 0;
- fd = bpf_map__fd(skel->maps.filters);
list_for_each_entry(expr, &evsel->bpf_filters, list) {
- struct perf_bpf_filter_entry entry = {
- .op = expr->op,
- .part = expr->part,
- .term = expr->term,
- .value = expr->val,
- };
-
if (check_sample_flags(evsel, expr) < 0)
- return -1;
+ return -EINVAL;
+
+ if (i == MAX_FILTERS)
+ return -E2BIG;
- bpf_map_update_elem(fd, &i, &entry, BPF_ANY);
+ entry[i].op = expr->op;
+ entry[i].part = expr->part;
+ entry[i].term = expr->term;
+ entry[i].value = expr->val;
i++;
if (expr->op == PBF_OP_GROUP_BEGIN) {
struct perf_bpf_filter_expr *group;
list_for_each_entry(group, &expr->groups, list) {
- struct perf_bpf_filter_entry group_entry = {
- .op = group->op,
- .part = group->part,
- .term = group->term,
- .value = group->val,
- };
- bpf_map_update_elem(fd, &i, &group_entry, BPF_ANY);
+ if (i == MAX_FILTERS)
+ return -E2BIG;
+
+ entry[i].op = group->op;
+ entry[i].part = group->part;
+ entry[i].term = group->term;
+ entry[i].value = group->val;
i++;
}
- memset(&entry, 0, sizeof(entry));
- entry.op = PBF_OP_GROUP_END;
- bpf_map_update_elem(fd, &i, &entry, BPF_ANY);
+ if (i == MAX_FILTERS)
+ return -E2BIG;
+
+ entry[i].op = PBF_OP_GROUP_END;
i++;
}
}
- if (i > MAX_FILTERS) {
- pr_err("Too many filters: %d (max = %d)\n", i, MAX_FILTERS);
+ if (i < MAX_FILTERS) {
+ /* to terminate the loop early */
+ entry[i].op = PBF_OP_DONE;
+ i++;
+ }
+ return 0;
+}
+
+static int convert_to_tgid(int tid)
+{
+ char path[128];
+ char *buf, *p, *q;
+ int tgid;
+ size_t len;
+
+ scnprintf(path, sizeof(path), "%d/status", tid);
+ if (procfs__read_str(path, &buf, &len) < 0)
+ return -1;
+
+ p = strstr(buf, "Tgid:");
+ if (p == NULL) {
+ free(buf);
+ return -1;
+ }
+
+ tgid = strtol(p + 6, &q, 0);
+ free(buf);
+ if (*q != '\n')
+ return -1;
+
+ return tgid;
+}
+
+/*
+ * The event might be closed already so we cannot get the list of ids using FD
+ * like in create_event_hash() below, let's iterate the event_hash map and
+ * delete all entries that have the event id as a key.
+ */
+static void destroy_event_hash(u64 event_id)
+{
+ int fd;
+ u64 key, *prev_key = NULL;
+ int num = 0, alloced = 32;
+ u64 *ids = calloc(alloced, sizeof(*ids));
+
+ if (ids == NULL)
+ return;
+
+ fd = get_pinned_fd("event_hash");
+ if (fd < 0) {
+ pr_debug("cannot get fd for 'event_hash' map\n");
+ free(ids);
+ return;
+ }
+
+ /* Iterate the whole map to collect keys for the event id. */
+ while (!bpf_map_get_next_key(fd, prev_key, &key)) {
+ u64 id;
+
+ if (bpf_map_lookup_elem(fd, &key, &id) == 0 && id == event_id) {
+ if (num == alloced) {
+ void *tmp;
+
+ alloced *= 2;
+ tmp = realloc(ids, alloced * sizeof(*ids));
+ if (tmp == NULL)
+ break;
+
+ ids = tmp;
+ }
+ ids[num++] = key;
+ }
+
+ prev_key = &key;
+ }
+
+ for (int i = 0; i < num; i++)
+ bpf_map_delete_elem(fd, &ids[i]);
+
+ free(ids);
+ close(fd);
+}
+
+/*
+ * Return a representative id if ok, or 0 for failures.
+ *
+ * The perf_event->id is good for this, but an evsel would have multiple
+ * instances for CPUs and tasks. So pick up the first id and setup a hash
+ * from id of each instance to the representative id (the first one).
+ */
+static u64 create_event_hash(struct evsel *evsel)
+{
+ int x, y, fd;
+ u64 the_id = 0, id;
+
+ fd = get_pinned_fd("event_hash");
+ if (fd < 0) {
+ pr_err("cannot get fd for 'event_hash' map\n");
+ return 0;
+ }
+
+ for (x = 0; x < xyarray__max_x(evsel->core.fd); x++) {
+ for (y = 0; y < xyarray__max_y(evsel->core.fd); y++) {
+ int ret = ioctl(FD(evsel, x, y), PERF_EVENT_IOC_ID, &id);
+
+ if (ret < 0) {
+ pr_err("Failed to get the event id\n");
+ if (the_id)
+ destroy_event_hash(the_id);
+ return 0;
+ }
+
+ if (the_id == 0)
+ the_id = id;
+
+ bpf_map_update_elem(fd, &id, &the_id, BPF_ANY);
+ }
+ }
+
+ close(fd);
+ return the_id;
+}
+
+static void destroy_idx_hash(struct pinned_filter_idx *pfi)
+{
+ int fd, nr;
+ struct perf_thread_map *threads;
+
+ fd = get_pinned_fd("filters");
+ bpf_map_delete_elem(fd, &pfi->hash_idx);
+ close(fd);
+
+ if (pfi->event_id)
+ destroy_event_hash(pfi->event_id);
+
+ threads = perf_evsel__threads(&pfi->evsel->core);
+ if (threads == NULL)
+ return;
+
+ fd = get_pinned_fd("idx_hash");
+ nr = perf_thread_map__nr(threads);
+ for (int i = 0; i < nr; i++) {
+ /* The target task might be dead already, just try the pid */
+ struct idx_hash_key key = {
+ .evt_id = pfi->event_id,
+ .tgid = perf_thread_map__pid(threads, i),
+ };
+
+ bpf_map_delete_elem(fd, &key);
+ }
+ close(fd);
+}
+
+/* Maintain a hashmap from (tgid, event-id) to filter index */
+static int create_idx_hash(struct evsel *evsel, struct perf_bpf_filter_entry *entry)
+{
+ int filter_idx;
+ int fd, nr, last;
+ u64 event_id = 0;
+ struct pinned_filter_idx *pfi = NULL;
+ struct perf_thread_map *threads;
+
+ fd = get_pinned_fd("filters");
+ if (fd < 0) {
+ pr_err("cannot get fd for 'filters' map\n");
+ return fd;
+ }
+
+ /* Find the first available entry in the filters map */
+ for (filter_idx = 0; filter_idx < MAX_FILTERS; filter_idx++) {
+ if (bpf_map_update_elem(fd, &filter_idx, entry, BPF_NOEXIST) == 0)
+ break;
+ }
+ close(fd);
+
+ if (filter_idx == MAX_FILTERS) {
+ pr_err("Too many users for the filter map\n");
+ return -EBUSY;
+ }
+
+ pfi = zalloc(sizeof(*pfi));
+ if (pfi == NULL) {
+ pr_err("Cannot save pinned filter index\n");
+ goto err;
+ }
+
+ pfi->evsel = evsel;
+ pfi->hash_idx = filter_idx;
+
+ event_id = create_event_hash(evsel);
+ if (event_id == 0) {
+ pr_err("Cannot update the event hash\n");
+ goto err;
+ }
+
+ pfi->event_id = event_id;
+
+ threads = perf_evsel__threads(&evsel->core);
+ if (threads == NULL) {
+ pr_err("Cannot get the thread list of the event\n");
+ goto err;
+ }
+
+ /* save the index to a hash map */
+ fd = get_pinned_fd("idx_hash");
+ if (fd < 0) {
+ pr_err("cannot get fd for 'idx_hash' map\n");
+ goto err;
+ }
+
+ last = -1;
+ nr = perf_thread_map__nr(threads);
+ for (int i = 0; i < nr; i++) {
+ int pid = perf_thread_map__pid(threads, i);
+ int tgid;
+ struct idx_hash_key key = {
+ .evt_id = event_id,
+ };
+
+ /* it actually needs tgid, let's get tgid from /proc. */
+ tgid = convert_to_tgid(pid);
+ if (tgid < 0) {
+ /* the thread may be dead, ignore. */
+ continue;
+ }
+
+ if (tgid == last)
+ continue;
+ last = tgid;
+ key.tgid = tgid;
+
+ if (bpf_map_update_elem(fd, &key, &filter_idx, BPF_ANY) < 0) {
+ pr_err("Failed to update the idx_hash\n");
+ close(fd);
+ goto err;
+ }
+ pr_debug("bpf-filter: idx_hash (task=%d,%s) -> %d\n",
+ tgid, evsel__name(evsel), filter_idx);
+ }
+
+ list_add(&pfi->list, &pinned_filters);
+ close(fd);
+ return filter_idx;
+
+err:
+ destroy_idx_hash(pfi);
+ free(pfi);
+ return -1;
+}
+
+int perf_bpf_filter__prepare(struct evsel *evsel, struct target *target)
+{
+ int i, x, y, fd, ret;
+ struct sample_filter_bpf *skel = NULL;
+ struct bpf_program *prog;
+ struct bpf_link *link;
+ struct perf_bpf_filter_entry *entry;
+ bool needs_idx_hash = !target__has_cpu(target) && !target->uid_str;
+
+ entry = calloc(MAX_FILTERS, sizeof(*entry));
+ if (entry == NULL)
return -1;
+
+ ret = get_filter_entries(evsel, entry);
+ if (ret < 0) {
+ pr_err("Failed to process filter entries\n");
+ goto err;
}
+
+ if (needs_idx_hash && geteuid() != 0) {
+ int zero = 0;
+
+ /* The filters map is shared among other processes */
+ ret = create_idx_hash(evsel, entry);
+ if (ret < 0)
+ goto err;
+
+ fd = get_pinned_fd("dropped");
+ if (fd < 0) {
+ ret = fd;
+ goto err;
+ }
+
+ /* Reset the lost count */
+ bpf_map_update_elem(fd, &ret, &zero, BPF_ANY);
+ close(fd);
+
+ fd = get_pinned_fd("perf_sample_filter");
+ if (fd < 0) {
+ ret = fd;
+ goto err;
+ }
+
+ for (x = 0; x < xyarray__max_x(evsel->core.fd); x++) {
+ for (y = 0; y < xyarray__max_y(evsel->core.fd); y++) {
+ ret = ioctl(FD(evsel, x, y), PERF_EVENT_IOC_SET_BPF, fd);
+ if (ret < 0) {
+ pr_err("Failed to attach perf sample-filter\n");
+ close(fd);
+ goto err;
+ }
+ }
+ }
+
+ close(fd);
+ free(entry);
+ return 0;
+ }
+
+ skel = sample_filter_bpf__open_and_load();
+ if (!skel) {
+ ret = -errno;
+ pr_err("Failed to load perf sample-filter BPF skeleton\n");
+ goto err;
+ }
+
+ i = 0;
+ fd = bpf_map__fd(skel->maps.filters);
+
+ /* The filters map has only one entry in this case */
+ if (bpf_map_update_elem(fd, &i, entry, BPF_ANY) < 0) {
+ ret = -errno;
+ pr_err("Failed to update the filter map\n");
+ goto err;
+ }
+
prog = skel->progs.perf_sample_filter;
for (x = 0; x < xyarray__max_x(evsel->core.fd); x++) {
for (y = 0; y < xyarray__max_y(evsel->core.fd); y++) {
link = bpf_program__attach_perf_event(prog, FD(evsel, x, y));
if (IS_ERR(link)) {
pr_err("Failed to attach perf sample-filter program\n");
- return PTR_ERR(link);
+ ret = PTR_ERR(link);
+ goto err;
}
}
}
+ free(entry);
evsel->bpf_skel = skel;
return 0;
+
+err:
+ free(entry);
+ if (!list_empty(&pinned_filters)) {
+ struct pinned_filter_idx *pfi, *tmp;
+
+ list_for_each_entry_safe(pfi, tmp, &pinned_filters, list) {
+ destroy_idx_hash(pfi);
+ list_del(&pfi->list);
+ free(pfi);
+ }
+ }
+ sample_filter_bpf__destroy(skel);
+ return ret;
}
int perf_bpf_filter__destroy(struct evsel *evsel)
{
struct perf_bpf_filter_expr *expr, *tmp;
+ struct pinned_filter_idx *pfi, *pos;
list_for_each_entry_safe(expr, tmp, &evsel->bpf_filters, list) {
list_del(&expr->list);
free(expr);
}
sample_filter_bpf__destroy(evsel->bpf_skel);
+
+ list_for_each_entry_safe(pfi, pos, &pinned_filters, list) {
+ destroy_idx_hash(pfi);
+ list_del(&pfi->list);
+ free(pfi);
+ }
return 0;
}
u64 perf_bpf_filter__lost_count(struct evsel *evsel)
{
- struct sample_filter_bpf *skel = evsel->bpf_skel;
+ int count = 0;
+
+ if (list_empty(&evsel->bpf_filters))
+ return 0;
+
+ if (!list_empty(&pinned_filters)) {
+ int fd = get_pinned_fd("dropped");
+ struct pinned_filter_idx *pfi;
+
+ if (fd < 0)
+ return 0;
- return skel ? skel->bss->dropped : 0;
+ list_for_each_entry(pfi, &pinned_filters, list) {
+ if (pfi->evsel != evsel)
+ continue;
+
+ bpf_map_lookup_elem(fd, &pfi->hash_idx, &count);
+ break;
+ }
+ close(fd);
+ } else if (evsel->bpf_skel) {
+ struct sample_filter_bpf *skel = evsel->bpf_skel;
+ int fd = bpf_map__fd(skel->maps.dropped);
+ int idx = 0;
+
+ bpf_map_lookup_elem(fd, &idx, &count);
+ }
+
+ return count;
}
struct perf_bpf_filter_expr *perf_bpf_filter_expr__new(enum perf_bpf_filter_term term,
@@ -212,3 +633,139 @@ int perf_bpf_filter__parse(struct list_head *expr_head, const char *str)
return ret;
}
+
+int perf_bpf_filter__pin(void)
+{
+ struct sample_filter_bpf *skel;
+ char *path = NULL;
+ int dir_fd, ret = -1;
+
+ skel = sample_filter_bpf__open();
+ if (!skel) {
+ ret = -errno;
+ pr_err("Failed to open perf sample-filter BPF skeleton\n");
+ goto err;
+ }
+
+ /* pinned program will use pid-hash */
+ bpf_map__set_max_entries(skel->maps.filters, MAX_FILTERS);
+ bpf_map__set_max_entries(skel->maps.event_hash, MAX_EVT_HASH);
+ bpf_map__set_max_entries(skel->maps.idx_hash, MAX_IDX_HASH);
+ bpf_map__set_max_entries(skel->maps.dropped, MAX_FILTERS);
+ skel->rodata->use_idx_hash = 1;
+
+ if (sample_filter_bpf__load(skel) < 0) {
+ ret = -errno;
+ pr_err("Failed to load perf sample-filter BPF skeleton\n");
+ goto err;
+ }
+
+ if (asprintf(&path, "%s/fs/bpf/%s", sysfs__mountpoint(),
+ PERF_BPF_FILTER_PIN_PATH) < 0) {
+ ret = -errno;
+ pr_err("Failed to allocate pathname in the BPF-fs\n");
+ goto err;
+ }
+
+ ret = bpf_object__pin(skel->obj, path);
+ if (ret < 0) {
+ pr_err("Failed to pin BPF filter objects\n");
+ goto err;
+ }
+
+ /* setup access permissions for the pinned objects */
+ dir_fd = open(path, O_PATH);
+ if (dir_fd < 0) {
+ bpf_object__unpin(skel->obj, path);
+ ret = dir_fd;
+ goto err;
+ }
+
+ /* BPF-fs root has the sticky bit */
+ if (fchmodat(dir_fd, "..", 01755, 0) < 0) {
+ pr_debug("chmod for BPF-fs failed\n");
+ ret = -errno;
+ goto err_close;
+ }
+
+ /* perf_filter directory */
+ if (fchmodat(dir_fd, ".", 0755, 0) < 0) {
+ pr_debug("chmod for perf_filter directory failed?\n");
+ ret = -errno;
+ goto err_close;
+ }
+
+ /* programs need write permission for some reason */
+ if (fchmodat(dir_fd, "perf_sample_filter", 0777, 0) < 0) {
+ pr_debug("chmod for perf_sample_filter failed\n");
+ ret = -errno;
+ }
+ /* maps */
+ if (fchmodat(dir_fd, "filters", 0666, 0) < 0) {
+ pr_debug("chmod for filters failed\n");
+ ret = -errno;
+ }
+ if (fchmodat(dir_fd, "event_hash", 0666, 0) < 0) {
+ pr_debug("chmod for event_hash failed\n");
+ ret = -errno;
+ }
+ if (fchmodat(dir_fd, "idx_hash", 0666, 0) < 0) {
+ pr_debug("chmod for idx_hash failed\n");
+ ret = -errno;
+ }
+ if (fchmodat(dir_fd, "dropped", 0666, 0) < 0) {
+ pr_debug("chmod for dropped failed\n");
+ ret = -errno;
+ }
+
+err_close:
+ close(dir_fd);
+
+err:
+ free(path);
+ sample_filter_bpf__destroy(skel);
+ return ret;
+}
+
+int perf_bpf_filter__unpin(void)
+{
+ struct sample_filter_bpf *skel;
+ char *path = NULL;
+ int ret = -1;
+
+ skel = sample_filter_bpf__open_and_load();
+ if (!skel) {
+ ret = -errno;
+ pr_err("Failed to open perf sample-filter BPF skeleton\n");
+ goto err;
+ }
+
+ if (asprintf(&path, "%s/fs/bpf/%s", sysfs__mountpoint(),
+ PERF_BPF_FILTER_PIN_PATH) < 0) {
+ ret = -errno;
+ pr_err("Failed to allocate pathname in the BPF-fs\n");
+ goto err;
+ }
+
+ ret = bpf_object__unpin(skel->obj, path);
+
+err:
+ free(path);
+ sample_filter_bpf__destroy(skel);
+ return ret;
+}
+
+static int get_pinned_fd(const char *name)
+{
+ char *path = NULL;
+ int fd;
+
+ if (asprintf(&path, "%s/fs/bpf/%s/%s", sysfs__mountpoint(),
+ PERF_BPF_FILTER_PIN_PATH, name) < 0)
+ return -1;
+
+ fd = bpf_obj_get(path);
+
+ free(path);
+ return fd;
+}
diff --git a/tools/perf/util/bpf-filter.h b/tools/perf/util/bpf-filter.h
index cd6764442c16..916ed7770b73 100644
--- a/tools/perf/util/bpf-filter.h
+++ b/tools/perf/util/bpf-filter.h
@@ -16,6 +16,10 @@ struct perf_bpf_filter_expr {
};
struct evsel;
+struct target;
+
+/* path in BPF-fs for the pinned program and maps */
+#define PERF_BPF_FILTER_PIN_PATH "perf_filter"
#ifdef HAVE_BPF_SKEL
struct perf_bpf_filter_expr *perf_bpf_filter_expr__new(enum perf_bpf_filter_term term,
@@ -23,9 +27,11 @@ struct perf_bpf_filter_expr *perf_bpf_filter_expr__new(enum perf_bpf_filter_term
enum perf_bpf_filter_op op,
unsigned long val);
int perf_bpf_filter__parse(struct list_head *expr_head, const char *str);
-int perf_bpf_filter__prepare(struct evsel *evsel);
+int perf_bpf_filter__prepare(struct evsel *evsel, struct target *target);
int perf_bpf_filter__destroy(struct evsel *evsel);
u64 perf_bpf_filter__lost_count(struct evsel *evsel);
+int perf_bpf_filter__pin(void);
+int perf_bpf_filter__unpin(void);
#else /* !HAVE_BPF_SKEL */
@@ -34,7 +40,8 @@ static inline int perf_bpf_filter__parse(struct list_head *expr_head __maybe_unu
{
return -EOPNOTSUPP;
}
-static inline int perf_bpf_filter__prepare(struct evsel *evsel __maybe_unused)
+static inline int perf_bpf_filter__prepare(struct evsel *evsel __maybe_unused,
+ struct target *target __maybe_unused)
{
return -EOPNOTSUPP;
}
@@ -46,5 +53,13 @@ static inline u64 perf_bpf_filter__lost_count(struct evsel *evsel __maybe_unused
{
return 0;
}
+static inline int perf_bpf_filter__pin(void)
+{
+ return -EOPNOTSUPP;
+}
+static inline int perf_bpf_filter__unpin(void)
+{
+ return -EOPNOTSUPP;
+}
#endif /* HAVE_BPF_SKEL*/
#endif /* PERF_UTIL_BPF_FILTER_H */
diff --git a/tools/perf/util/bpf-filter.l b/tools/perf/util/bpf-filter.l
index 2a7c839f3fae..f313404f95a9 100644
--- a/tools/perf/util/bpf-filter.l
+++ b/tools/perf/util/bpf-filter.l
@@ -9,8 +9,11 @@
#include "bpf-filter.h"
#include "bpf-filter-bison.h"
+extern int perf_bpf_filter_needs_path;
+
static int sample(enum perf_bpf_filter_term term)
{
+ perf_bpf_filter_needs_path = 0;
perf_bpf_filter_lval.sample.term = term;
perf_bpf_filter_lval.sample.part = 0;
return BFT_SAMPLE;
@@ -18,11 +21,20 @@ static int sample(enum perf_bpf_filter_term term)
static int sample_part(enum perf_bpf_filter_term term, int part)
{
+ perf_bpf_filter_needs_path = 0;
perf_bpf_filter_lval.sample.term = term;
perf_bpf_filter_lval.sample.part = part;
return BFT_SAMPLE;
}
+static int sample_path(enum perf_bpf_filter_term term)
+{
+ perf_bpf_filter_needs_path = 1;
+ perf_bpf_filter_lval.sample.term = term;
+ perf_bpf_filter_lval.sample.part = 0;
+ return BFT_SAMPLE_PATH;
+}
+
static int operator(enum perf_bpf_filter_op op)
{
perf_bpf_filter_lval.op = op;
@@ -48,10 +60,15 @@ static int constant(int val)
return BFT_NUM;
}
-static int error(const char *str)
+static int path_or_error(void)
{
- printf("perf_bpf_filter: Unexpected filter %s: %s\n", str, perf_bpf_filter_text);
- return BFT_ERROR;
+ if (!perf_bpf_filter_needs_path) {
+ printf("perf_bpf_filter: Error: Unexpected item: %s\n",
+ perf_bpf_filter_text);
+ return BFT_ERROR;
+ }
+ perf_bpf_filter_lval.path = perf_bpf_filter_text;
+ return BFT_PATH;
}
%}
@@ -59,6 +76,7 @@ static int error(const char *str)
num_dec [0-9]+
num_hex 0[Xx][0-9a-fA-F]+
space [ \t]+
+path [^ \t\n]+
ident [_a-zA-Z][_a-zA-Z0-9]+
%%
@@ -97,6 +115,7 @@ mem_blk { return sample_part(PBF_TERM_DATA_SRC, 7); }
mem_hops { return sample_part(PBF_TERM_DATA_SRC, 8); }
uid { return sample(PBF_TERM_UID); }
gid { return sample(PBF_TERM_GID); }
+cgroup { return sample_path(PBF_TERM_CGROUP); }
"==" { return operator(PBF_OP_EQ); }
"!=" { return operator(PBF_OP_NEQ); }
@@ -155,7 +174,6 @@ hops3 { return constant(PERF_MEM_HOPS_3); }
"," { return ','; }
"||" { return BFT_LOGICAL_OR; }
-{ident} { return error("ident"); }
-. { return error("input"); }
+{path} { return path_or_error(); }
%%
diff --git a/tools/perf/util/bpf-filter.y b/tools/perf/util/bpf-filter.y
index 0c56fccb8874..5a79a8e7a45b 100644
--- a/tools/perf/util/bpf-filter.y
+++ b/tools/perf/util/bpf-filter.y
@@ -12,9 +12,13 @@
#include <linux/compiler.h>
#include <linux/list.h>
#include "bpf-filter.h"
+#include "cgroup.h"
int perf_bpf_filter_lex(void);
+/* To indicate if the current term needs a pathname or not */
+int perf_bpf_filter_needs_path;
+
static void perf_bpf_filter_error(struct list_head *expr __maybe_unused,
char const *msg)
{
@@ -26,6 +30,7 @@ static void perf_bpf_filter_error(struct list_head *expr __maybe_unused,
%union
{
unsigned long num;
+ char *path;
struct {
enum perf_bpf_filter_term term;
int part;
@@ -34,12 +39,13 @@ static void perf_bpf_filter_error(struct list_head *expr __maybe_unused,
struct perf_bpf_filter_expr *expr;
}
-%token BFT_SAMPLE BFT_OP BFT_ERROR BFT_NUM BFT_LOGICAL_OR
+%token BFT_SAMPLE BFT_SAMPLE_PATH BFT_OP BFT_ERROR BFT_NUM BFT_LOGICAL_OR BFT_PATH
%type <expr> filter_term filter_expr
%destructor { free ($$); } <expr>
-%type <sample> BFT_SAMPLE
+%type <sample> BFT_SAMPLE BFT_SAMPLE_PATH
%type <op> BFT_OP
%type <num> BFT_NUM
+%type <path> BFT_PATH
%%
@@ -81,5 +87,23 @@ BFT_SAMPLE BFT_OP BFT_NUM
{
$$ = perf_bpf_filter_expr__new($1.term, $1.part, $2, $3);
}
+|
+BFT_SAMPLE_PATH BFT_OP BFT_PATH
+{
+ struct cgroup *cgrp;
+ unsigned long cgroup_id = 0;
+
+ if ($2 != PBF_OP_EQ && $2 != PBF_OP_NEQ) {
+ printf("perf_bpf_filter: cgroup accepts '==' or '!=' only\n");
+ YYERROR;
+ }
+
+ cgrp = cgroup__new($3, /*do_open=*/false);
+ if (cgrp && read_cgroup_id(cgrp) == 0)
+ cgroup_id = cgrp->id;
+
+ $$ = perf_bpf_filter_expr__new($1.term, $1.part, $2, cgroup_id);
+ cgroup__put(cgrp);
+}
%%
diff --git a/tools/perf/util/bpf_counter_cgroup.c b/tools/perf/util/bpf_counter_cgroup.c
index ea29c372f339..6ff42619de12 100644
--- a/tools/perf/util/bpf_counter_cgroup.c
+++ b/tools/perf/util/bpf_counter_cgroup.c
@@ -61,6 +61,9 @@ static int bperf_load_program(struct evlist *evlist)
skel->rodata->num_cpus = total_cpus;
skel->rodata->num_events = evlist->core.nr_entries / nr_cgroups;
+ if (cgroup_is_v2("perf_event") > 0)
+ skel->rodata->use_cgroup_v2 = 1;
+
BUG_ON(evlist->core.nr_entries % nr_cgroups != 0);
/* we need one copy of events per cpu for reading */
@@ -82,9 +85,6 @@ static int bperf_load_program(struct evlist *evlist)
goto out;
}
- if (cgroup_is_v2("perf_event") > 0)
- skel->bss->use_cgroup_v2 = 1;
-
err = -1;
cgrp_switch = evsel__new(&cgrp_switch_attr);
diff --git a/tools/perf/util/bpf_ftrace.c b/tools/perf/util/bpf_ftrace.c
index 7a4297d8fd2c..06d1c4018407 100644
--- a/tools/perf/util/bpf_ftrace.c
+++ b/tools/perf/util/bpf_ftrace.c
@@ -40,13 +40,17 @@ int perf_ftrace__latency_prepare_bpf(struct perf_ftrace *ftrace)
if (ftrace->target.cpu_list) {
ncpus = perf_cpu_map__nr(ftrace->evlist->core.user_requested_cpus);
bpf_map__set_max_entries(skel->maps.cpu_filter, ncpus);
+ skel->rodata->has_cpu = 1;
}
if (target__has_task(&ftrace->target) || target__none(&ftrace->target)) {
ntasks = perf_thread_map__nr(ftrace->evlist->core.threads);
bpf_map__set_max_entries(skel->maps.task_filter, ntasks);
+ skel->rodata->has_task = 1;
}
+ skel->rodata->use_nsec = ftrace->use_nsec;
+
set_max_rlimit();
err = func_latency_bpf__load(skel);
@@ -59,7 +63,6 @@ int perf_ftrace__latency_prepare_bpf(struct perf_ftrace *ftrace)
u32 cpu;
u8 val = 1;
- skel->bss->has_cpu = 1;
fd = bpf_map__fd(skel->maps.cpu_filter);
for (i = 0; i < ncpus; i++) {
@@ -72,7 +75,6 @@ int perf_ftrace__latency_prepare_bpf(struct perf_ftrace *ftrace)
u32 pid;
u8 val = 1;
- skel->bss->has_task = 1;
fd = bpf_map__fd(skel->maps.task_filter);
for (i = 0; i < ntasks; i++) {
@@ -81,8 +83,6 @@ int perf_ftrace__latency_prepare_bpf(struct perf_ftrace *ftrace)
}
}
- skel->bss->use_nsec = ftrace->use_nsec;
-
skel->links.func_begin = bpf_program__attach_kprobe(skel->progs.func_begin,
false, func->name);
if (IS_ERR(skel->links.func_begin)) {
diff --git a/tools/perf/util/bpf_kwork.c b/tools/perf/util/bpf_kwork.c
index 44f0f708a15d..6c7126b7670d 100644
--- a/tools/perf/util/bpf_kwork.c
+++ b/tools/perf/util/bpf_kwork.c
@@ -176,8 +176,6 @@ static int setup_filters(struct perf_kwork *kwork)
bpf_map_update_elem(fd, &cpu.cpu, &val, BPF_ANY);
}
perf_cpu_map__put(map);
-
- skel->bss->has_cpu_filter = 1;
}
if (kwork->profile_name != NULL) {
@@ -197,8 +195,6 @@ static int setup_filters(struct perf_kwork *kwork)
key = 0;
bpf_map_update_elem(fd, &key, kwork->profile_name, BPF_ANY);
-
- skel->bss->has_name_filter = 1;
}
return 0;
@@ -239,6 +235,11 @@ int perf_kwork__trace_prepare_bpf(struct perf_kwork *kwork)
class_bpf->load_prepare(kwork);
}
+ if (kwork->cpu_list != NULL)
+ skel->rodata->has_cpu_filter = 1;
+ if (kwork->profile_name != NULL)
+ skel->rodata->has_name_filter = 1;
+
if (kwork_trace_bpf__load(skel)) {
pr_debug("Failed to load kwork trace skeleton\n");
goto out;
diff --git a/tools/perf/util/bpf_kwork_top.c b/tools/perf/util/bpf_kwork_top.c
index 22a3b00a1e23..7261cad43468 100644
--- a/tools/perf/util/bpf_kwork_top.c
+++ b/tools/perf/util/bpf_kwork_top.c
@@ -151,14 +151,12 @@ static int setup_filters(struct perf_kwork *kwork)
bpf_map_update_elem(fd, &cpu.cpu, &val, BPF_ANY);
}
perf_cpu_map__put(map);
-
- skel->bss->has_cpu_filter = 1;
}
return 0;
}
-int perf_kwork__top_prepare_bpf(struct perf_kwork *kwork __maybe_unused)
+int perf_kwork__top_prepare_bpf(struct perf_kwork *kwork)
{
struct bpf_program *prog;
struct kwork_class *class;
@@ -193,6 +191,9 @@ int perf_kwork__top_prepare_bpf(struct perf_kwork *kwork __maybe_unused)
class_bpf->load_prepare();
}
+ if (kwork->cpu_list)
+ skel->rodata->has_cpu_filter = 1;
+
if (kwork_top_bpf__load(skel)) {
pr_debug("Failed to load kwork top skeleton\n");
goto out;
diff --git a/tools/perf/util/bpf_lock_contention.c b/tools/perf/util/bpf_lock_contention.c
index bc4e92c0c08b..41a1ad087895 100644
--- a/tools/perf/util/bpf_lock_contention.c
+++ b/tools/perf/util/bpf_lock_contention.c
@@ -46,14 +46,22 @@ int lock_contention_prepare(struct lock_contention *con)
else
bpf_map__set_max_entries(skel->maps.stacks, 1);
- if (target__has_cpu(target))
+ if (target__has_cpu(target)) {
+ skel->rodata->has_cpu = 1;
ncpus = perf_cpu_map__nr(evlist->core.user_requested_cpus);
- if (target__has_task(target))
+ }
+ if (target__has_task(target)) {
+ skel->rodata->has_task = 1;
ntasks = perf_thread_map__nr(evlist->core.threads);
- if (con->filters->nr_types)
+ }
+ if (con->filters->nr_types) {
+ skel->rodata->has_type = 1;
ntypes = con->filters->nr_types;
- if (con->filters->nr_cgrps)
+ }
+ if (con->filters->nr_cgrps) {
+ skel->rodata->has_cgroup = 1;
ncgrps = con->filters->nr_cgrps;
+ }
/* resolve lock name filters to addr */
if (con->filters->nr_syms) {
@@ -82,6 +90,7 @@ int lock_contention_prepare(struct lock_contention *con)
con->filters->addrs = addrs;
}
naddrs = con->filters->nr_addrs;
+ skel->rodata->has_addr = 1;
}
bpf_map__set_max_entries(skel->maps.cpu_filter, ncpus);
@@ -90,6 +99,16 @@ int lock_contention_prepare(struct lock_contention *con)
bpf_map__set_max_entries(skel->maps.addr_filter, naddrs);
bpf_map__set_max_entries(skel->maps.cgroup_filter, ncgrps);
+ skel->rodata->stack_skip = con->stack_skip;
+ skel->rodata->aggr_mode = con->aggr_mode;
+ skel->rodata->needs_callstack = con->save_callstack;
+ skel->rodata->lock_owner = con->owner;
+
+ if (con->aggr_mode == LOCK_AGGR_CGROUP || con->filters->nr_cgrps) {
+ if (cgroup_is_v2("perf_event"))
+ skel->rodata->use_cgroup_v2 = 1;
+ }
+
if (lock_contention_bpf__load(skel) < 0) {
pr_err("Failed to load lock-contention BPF skeleton\n");
return -1;
@@ -99,7 +118,6 @@ int lock_contention_prepare(struct lock_contention *con)
u32 cpu;
u8 val = 1;
- skel->bss->has_cpu = 1;
fd = bpf_map__fd(skel->maps.cpu_filter);
for (i = 0; i < ncpus; i++) {
@@ -112,7 +130,6 @@ int lock_contention_prepare(struct lock_contention *con)
u32 pid;
u8 val = 1;
- skel->bss->has_task = 1;
fd = bpf_map__fd(skel->maps.task_filter);
for (i = 0; i < ntasks; i++) {
@@ -125,7 +142,6 @@ int lock_contention_prepare(struct lock_contention *con)
u32 pid = evlist->workload.pid;
u8 val = 1;
- skel->bss->has_task = 1;
fd = bpf_map__fd(skel->maps.task_filter);
bpf_map_update_elem(fd, &pid, &val, BPF_ANY);
}
@@ -133,7 +149,6 @@ int lock_contention_prepare(struct lock_contention *con)
if (con->filters->nr_types) {
u8 val = 1;
- skel->bss->has_type = 1;
fd = bpf_map__fd(skel->maps.type_filter);
for (i = 0; i < con->filters->nr_types; i++)
@@ -143,7 +158,6 @@ int lock_contention_prepare(struct lock_contention *con)
if (con->filters->nr_addrs) {
u8 val = 1;
- skel->bss->has_addr = 1;
fd = bpf_map__fd(skel->maps.addr_filter);
for (i = 0; i < con->filters->nr_addrs; i++)
@@ -153,25 +167,14 @@ int lock_contention_prepare(struct lock_contention *con)
if (con->filters->nr_cgrps) {
u8 val = 1;
- skel->bss->has_cgroup = 1;
fd = bpf_map__fd(skel->maps.cgroup_filter);
for (i = 0; i < con->filters->nr_cgrps; i++)
bpf_map_update_elem(fd, &con->filters->cgrps[i], &val, BPF_ANY);
}
- /* these don't work well if in the rodata section */
- skel->bss->stack_skip = con->stack_skip;
- skel->bss->aggr_mode = con->aggr_mode;
- skel->bss->needs_callstack = con->save_callstack;
- skel->bss->lock_owner = con->owner;
-
- if (con->aggr_mode == LOCK_AGGR_CGROUP) {
- if (cgroup_is_v2("perf_event"))
- skel->bss->use_cgroup_v2 = 1;
-
+ if (con->aggr_mode == LOCK_AGGR_CGROUP)
read_all_cgroups(&con->cgroups);
- }
bpf_program__set_autoload(skel->progs.collect_lock_syms, false);
diff --git a/tools/perf/util/bpf_map.c b/tools/perf/util/bpf_map.c
index c863ae0c5cb5..578f27d2d6b4 100644
--- a/tools/perf/util/bpf_map.c
+++ b/tools/perf/util/bpf_map.c
@@ -35,9 +35,6 @@ int bpf_map__fprintf(struct bpf_map *map, FILE *fp)
if (fd < 0)
return fd;
- if (!map)
- return PTR_ERR(map);
-
err = -ENOMEM;
key = malloc(bpf_map__key_size(map));
if (key == NULL)
diff --git a/tools/perf/util/bpf_off_cpu.c b/tools/perf/util/bpf_off_cpu.c
index 6af36142dc5a..a590a8ac1f9d 100644
--- a/tools/perf/util/bpf_off_cpu.c
+++ b/tools/perf/util/bpf_off_cpu.c
@@ -73,14 +73,12 @@ static void off_cpu_start(void *arg)
struct evlist *evlist = arg;
/* update task filter for the given workload */
- if (!skel->bss->has_cpu && !skel->bss->has_task &&
+ if (skel->rodata->has_task && skel->rodata->uses_tgid &&
perf_thread_map__pid(evlist->core.threads, 0) != -1) {
int fd;
u32 pid;
u8 val = 1;
- skel->bss->has_task = 1;
- skel->bss->uses_tgid = 1;
fd = bpf_map__fd(skel->maps.task_filter);
pid = perf_thread_map__pid(evlist->core.threads, 0);
bpf_map_update_elem(fd, &pid, &val, BPF_ANY);
@@ -148,6 +146,7 @@ int off_cpu_prepare(struct evlist *evlist, struct target *target,
if (target->cpu_list) {
ncpus = perf_cpu_map__nr(evlist->core.user_requested_cpus);
bpf_map__set_max_entries(skel->maps.cpu_filter, ncpus);
+ skel->rodata->has_cpu = 1;
}
if (target->pid) {
@@ -173,11 +172,16 @@ int off_cpu_prepare(struct evlist *evlist, struct target *target,
ntasks = MAX_PROC;
bpf_map__set_max_entries(skel->maps.task_filter, ntasks);
+ skel->rodata->has_task = 1;
+ skel->rodata->uses_tgid = 1;
} else if (target__has_task(target)) {
ntasks = perf_thread_map__nr(evlist->core.threads);
bpf_map__set_max_entries(skel->maps.task_filter, ntasks);
+ skel->rodata->has_task = 1;
} else if (target__none(target)) {
bpf_map__set_max_entries(skel->maps.task_filter, MAX_PROC);
+ skel->rodata->has_task = 1;
+ skel->rodata->uses_tgid = 1;
}
if (evlist__first(evlist)->cgrp) {
@@ -186,6 +190,7 @@ int off_cpu_prepare(struct evlist *evlist, struct target *target,
if (!cgroup_is_v2("perf_event"))
skel->rodata->uses_cgroup_v1 = true;
+ skel->rodata->has_cgroup = 1;
}
if (opts->record_cgroup) {
@@ -208,7 +213,6 @@ int off_cpu_prepare(struct evlist *evlist, struct target *target,
u32 cpu;
u8 val = 1;
- skel->bss->has_cpu = 1;
fd = bpf_map__fd(skel->maps.cpu_filter);
for (i = 0; i < ncpus; i++) {
@@ -220,8 +224,6 @@ int off_cpu_prepare(struct evlist *evlist, struct target *target,
if (target->pid) {
u8 val = 1;
- skel->bss->has_task = 1;
- skel->bss->uses_tgid = 1;
fd = bpf_map__fd(skel->maps.task_filter);
strlist__for_each_entry(pos, pid_slist) {
@@ -240,7 +242,6 @@ int off_cpu_prepare(struct evlist *evlist, struct target *target,
u32 pid;
u8 val = 1;
- skel->bss->has_task = 1;
fd = bpf_map__fd(skel->maps.task_filter);
for (i = 0; i < ntasks; i++) {
@@ -253,7 +254,6 @@ int off_cpu_prepare(struct evlist *evlist, struct target *target,
struct evsel *evsel;
u8 val = 1;
- skel->bss->has_cgroup = 1;
fd = bpf_map__fd(skel->maps.cgroup_filter);
evlist__for_each_entry(evlist, evsel) {
diff --git a/tools/perf/util/bpf_skel/augmented_raw_syscalls.bpf.c b/tools/perf/util/bpf_skel/augmented_raw_syscalls.bpf.c
index 0acbd74e8c76..b2f17cca014b 100644
--- a/tools/perf/util/bpf_skel/augmented_raw_syscalls.bpf.c
+++ b/tools/perf/util/bpf_skel/augmented_raw_syscalls.bpf.c
@@ -7,9 +7,14 @@
*/
#include "vmlinux.h"
+#include "../trace_augment.h"
+
#include <bpf/bpf_helpers.h>
#include <linux/limits.h>
+#define PERF_ALIGN(x, a) __PERF_ALIGN_MASK(x, (typeof(x))(a)-1)
+#define __PERF_ALIGN_MASK(x, mask) (((x)+(mask))&~(mask))
+
/**
* is_power_of_2() - check if a value is a power of two
* @n: the value to check
@@ -66,19 +71,6 @@ struct syscall_exit_args {
long ret;
};
-struct augmented_arg {
- unsigned int size;
- int err;
- char value[PATH_MAX];
-};
-
-struct pids_filtered {
- __uint(type, BPF_MAP_TYPE_HASH);
- __type(key, pid_t);
- __type(value, bool);
- __uint(max_entries, 64);
-} pids_filtered SEC(".maps");
-
/*
* Desired design of maximum size and alignment (see RFC2553)
*/
@@ -105,17 +97,27 @@ struct sockaddr_storage {
};
};
-struct augmented_args_payload {
- struct syscall_enter_args args;
- union {
- struct {
- struct augmented_arg arg, arg2;
- };
+struct augmented_arg {
+ unsigned int size;
+ int err;
+ union {
+ char value[PATH_MAX];
struct sockaddr_storage saddr;
- char __data[sizeof(struct augmented_arg)];
};
};
+struct pids_filtered {
+ __uint(type, BPF_MAP_TYPE_HASH);
+ __type(key, pid_t);
+ __type(value, bool);
+ __uint(max_entries, 64);
+} pids_filtered SEC(".maps");
+
+struct augmented_args_payload {
+ struct syscall_enter_args args;
+ struct augmented_arg arg, arg2; // We have to reserve space for two arguments (rename, etc)
+};
+
// We need more tmp space than the BPF stack can give us
struct augmented_args_tmp {
__uint(type, BPF_MAP_TYPE_PERCPU_ARRAY);
@@ -124,6 +126,25 @@ struct augmented_args_tmp {
__uint(max_entries, 1);
} augmented_args_tmp SEC(".maps");
+struct beauty_map_enter {
+ __uint(type, BPF_MAP_TYPE_HASH);
+ __type(key, int);
+ __type(value, __u32[6]);
+ __uint(max_entries, 512);
+} beauty_map_enter SEC(".maps");
+
+struct beauty_payload_enter {
+ struct syscall_enter_args args;
+ struct augmented_arg aug_args[6];
+};
+
+struct beauty_payload_enter_map {
+ __uint(type, BPF_MAP_TYPE_PERCPU_ARRAY);
+ __type(key, int);
+ __type(value, struct beauty_payload_enter);
+ __uint(max_entries, 1);
+} beauty_payload_enter_map SEC(".maps");
+
static inline struct augmented_args_payload *augmented_args_payload(void)
{
int key = 0;
@@ -136,6 +157,11 @@ static inline int augmented__output(void *ctx, struct augmented_args_payload *ar
return bpf_perf_event_output(ctx, &__augmented_syscalls__, BPF_F_CURRENT_CPU, args, len);
}
+static inline int augmented__beauty_output(void *ctx, void *data, int len)
+{
+ return bpf_perf_event_output(ctx, &__augmented_syscalls__, BPF_F_CURRENT_CPU, data, len);
+}
+
static inline
unsigned int augmented_arg__read_str(struct augmented_arg *augmented_arg, const void *arg, unsigned int arg_len)
{
@@ -182,15 +208,17 @@ int sys_enter_connect(struct syscall_enter_args *args)
struct augmented_args_payload *augmented_args = augmented_args_payload();
const void *sockaddr_arg = (const void *)args->args[1];
unsigned int socklen = args->args[2];
- unsigned int len = sizeof(augmented_args->args);
+ unsigned int len = sizeof(u64) + sizeof(augmented_args->args); // the size + err in all 'augmented_arg' structs
if (augmented_args == NULL)
return 1; /* Failure: don't filter */
- _Static_assert(is_power_of_2(sizeof(augmented_args->saddr)), "sizeof(augmented_args->saddr) needs to be a power of two");
- socklen &= sizeof(augmented_args->saddr) - 1;
+ _Static_assert(is_power_of_2(sizeof(augmented_args->arg.saddr)), "sizeof(augmented_args->arg.saddr) needs to be a power of two");
+ socklen &= sizeof(augmented_args->arg.saddr) - 1;
- bpf_probe_read_user(&augmented_args->saddr, socklen, sockaddr_arg);
+ bpf_probe_read_user(&augmented_args->arg.saddr, socklen, sockaddr_arg);
+ augmented_args->arg.size = socklen;
+ augmented_args->arg.err = 0;
return augmented__output(args, augmented_args, len + socklen);
}
@@ -201,14 +229,14 @@ int sys_enter_sendto(struct syscall_enter_args *args)
struct augmented_args_payload *augmented_args = augmented_args_payload();
const void *sockaddr_arg = (const void *)args->args[4];
unsigned int socklen = args->args[5];
- unsigned int len = sizeof(augmented_args->args);
+ unsigned int len = sizeof(u64) + sizeof(augmented_args->args); // the size + err in all 'augmented_arg' structs
if (augmented_args == NULL)
return 1; /* Failure: don't filter */
- socklen &= sizeof(augmented_args->saddr) - 1;
+ socklen &= sizeof(augmented_args->arg.saddr) - 1;
- bpf_probe_read_user(&augmented_args->saddr, socklen, sockaddr_arg);
+ bpf_probe_read_user(&augmented_args->arg.saddr, socklen, sockaddr_arg);
return augmented__output(args, augmented_args, len + socklen);
}
@@ -249,30 +277,50 @@ int sys_enter_rename(struct syscall_enter_args *args)
struct augmented_args_payload *augmented_args = augmented_args_payload();
const void *oldpath_arg = (const void *)args->args[0],
*newpath_arg = (const void *)args->args[1];
- unsigned int len = sizeof(augmented_args->args), oldpath_len;
+ unsigned int len = sizeof(augmented_args->args), oldpath_len, newpath_len;
if (augmented_args == NULL)
return 1; /* Failure: don't filter */
+ len += 2 * sizeof(u64); // The overhead of size and err, just before the payload...
+
oldpath_len = augmented_arg__read_str(&augmented_args->arg, oldpath_arg, sizeof(augmented_args->arg.value));
- len += oldpath_len + augmented_arg__read_str((void *)(&augmented_args->arg) + oldpath_len, newpath_arg, sizeof(augmented_args->arg.value));
+ augmented_args->arg.size = PERF_ALIGN(oldpath_len + 1, sizeof(u64));
+ len += augmented_args->arg.size;
+
+ struct augmented_arg *arg2 = (void *)&augmented_args->arg.value + augmented_args->arg.size;
+
+ newpath_len = augmented_arg__read_str(arg2, newpath_arg, sizeof(augmented_args->arg.value));
+ arg2->size = newpath_len;
+
+ len += newpath_len;
return augmented__output(args, augmented_args, len);
}
-SEC("tp/syscalls/sys_enter_renameat")
-int sys_enter_renameat(struct syscall_enter_args *args)
+SEC("tp/syscalls/sys_enter_renameat2")
+int sys_enter_renameat2(struct syscall_enter_args *args)
{
struct augmented_args_payload *augmented_args = augmented_args_payload();
const void *oldpath_arg = (const void *)args->args[1],
*newpath_arg = (const void *)args->args[3];
- unsigned int len = sizeof(augmented_args->args), oldpath_len;
+ unsigned int len = sizeof(augmented_args->args), oldpath_len, newpath_len;
if (augmented_args == NULL)
return 1; /* Failure: don't filter */
+ len += 2 * sizeof(u64); // The overhead of size and err, just before the payload...
+
oldpath_len = augmented_arg__read_str(&augmented_args->arg, oldpath_arg, sizeof(augmented_args->arg.value));
- len += oldpath_len + augmented_arg__read_str((void *)(&augmented_args->arg) + oldpath_len, newpath_arg, sizeof(augmented_args->arg.value));
+ augmented_args->arg.size = PERF_ALIGN(oldpath_len + 1, sizeof(u64));
+ len += augmented_args->arg.size;
+
+ struct augmented_arg *arg2 = (void *)&augmented_args->arg.value + augmented_args->arg.size;
+
+ newpath_len = augmented_arg__read_str(arg2, newpath_arg, sizeof(augmented_args->arg.value));
+ arg2->size = newpath_len;
+
+ len += newpath_len;
return augmented__output(args, augmented_args, len);
}
@@ -293,26 +341,26 @@ int sys_enter_perf_event_open(struct syscall_enter_args *args)
{
struct augmented_args_payload *augmented_args = augmented_args_payload();
const struct perf_event_attr_size *attr = (const struct perf_event_attr_size *)args->args[0], *attr_read;
- unsigned int len = sizeof(augmented_args->args);
+ unsigned int len = sizeof(u64) + sizeof(augmented_args->args); // the size + err in all 'augmented_arg' structs
if (augmented_args == NULL)
goto failure;
- if (bpf_probe_read_user(&augmented_args->__data, sizeof(*attr), attr) < 0)
+ if (bpf_probe_read_user(&augmented_args->arg.value, sizeof(*attr), attr) < 0)
goto failure;
- attr_read = (const struct perf_event_attr_size *)augmented_args->__data;
+ attr_read = (const struct perf_event_attr_size *)augmented_args->arg.value;
__u32 size = attr_read->size;
if (!size)
size = PERF_ATTR_SIZE_VER0;
- if (size > sizeof(augmented_args->__data))
+ if (size > sizeof(augmented_args->arg.value))
goto failure;
// Now that we read attr->size and tested it against the size limits, read it completely
- if (bpf_probe_read_user(&augmented_args->__data, size, attr) < 0)
+ if (bpf_probe_read_user(&augmented_args->arg.value, size, attr) < 0)
goto failure;
return augmented__output(args, augmented_args, len + size);
@@ -325,16 +373,16 @@ int sys_enter_clock_nanosleep(struct syscall_enter_args *args)
{
struct augmented_args_payload *augmented_args = augmented_args_payload();
const void *rqtp_arg = (const void *)args->args[2];
- unsigned int len = sizeof(augmented_args->args);
+ unsigned int len = sizeof(u64) + sizeof(augmented_args->args); // the size + err in all 'augmented_arg' structs
__u32 size = sizeof(struct timespec64);
if (augmented_args == NULL)
goto failure;
- if (size > sizeof(augmented_args->__data))
+ if (size > sizeof(augmented_args->arg.value))
goto failure;
- bpf_probe_read_user(&augmented_args->__data, size, rqtp_arg);
+ bpf_probe_read_user(&augmented_args->arg.value, size, rqtp_arg);
return augmented__output(args, augmented_args, len + size);
failure:
@@ -352,10 +400,10 @@ int sys_enter_nanosleep(struct syscall_enter_args *args)
if (augmented_args == NULL)
goto failure;
- if (size > sizeof(augmented_args->__data))
+ if (size > sizeof(augmented_args->arg.value))
goto failure;
- bpf_probe_read_user(&augmented_args->__data, size, req_arg);
+ bpf_probe_read_user(&augmented_args->arg.value, size, req_arg);
return augmented__output(args, augmented_args, len + size);
failure:
@@ -372,6 +420,91 @@ static bool pid_filter__has(struct pids_filtered *pids, pid_t pid)
return bpf_map_lookup_elem(pids, &pid) != NULL;
}
+static int augment_sys_enter(void *ctx, struct syscall_enter_args *args)
+{
+ bool augmented, do_output = false;
+ int zero = 0, size, aug_size, index, output = 0,
+ value_size = sizeof(struct augmented_arg) - offsetof(struct augmented_arg, value);
+ unsigned int nr, *beauty_map;
+ struct beauty_payload_enter *payload;
+ void *arg, *payload_offset;
+
+ /* fall back to do predefined tail call */
+ if (args == NULL)
+ return 1;
+
+ /* use syscall number to get beauty_map entry */
+ nr = (__u32)args->syscall_nr;
+ beauty_map = bpf_map_lookup_elem(&beauty_map_enter, &nr);
+
+ /* set up payload for output */
+ payload = bpf_map_lookup_elem(&beauty_payload_enter_map, &zero);
+ payload_offset = (void *)&payload->aug_args;
+
+ if (beauty_map == NULL || payload == NULL)
+ return 1;
+
+ /* copy the sys_enter header, which has the syscall_nr */
+ __builtin_memcpy(&payload->args, args, sizeof(struct syscall_enter_args));
+
+ /*
+ * Determine what type of argument and how many bytes to read from user space, using the
+ * value in the beauty_map. This is the relation of parameter type and its corresponding
+ * value in the beauty map, and how many bytes we read eventually:
+ *
+ * string: 1 -> size of string
+ * struct: size of struct -> size of struct
+ * buffer: -1 * (index of paired len) -> value of paired len (maximum: TRACE_AUG_MAX_BUF)
+ */
+ for (int i = 0; i < 6; i++) {
+ arg = (void *)args->args[i];
+ augmented = false;
+ size = beauty_map[i];
+ aug_size = size; /* size of the augmented data read from user space */
+
+ if (size == 0 || arg == NULL)
+ continue;
+
+ if (size == 1) { /* string */
+ aug_size = bpf_probe_read_user_str(((struct augmented_arg *)payload_offset)->value, value_size, arg);
+ /* minimum of 0 to pass the verifier */
+ if (aug_size < 0)
+ aug_size = 0;
+
+ augmented = true;
+ } else if (size > 0 && size <= value_size) { /* struct */
+ if (!bpf_probe_read_user(((struct augmented_arg *)payload_offset)->value, size, arg))
+ augmented = true;
+ } else if (size < 0 && size >= -6) { /* buffer */
+ index = -(size + 1);
+ aug_size = args->args[index];
+
+ if (aug_size > TRACE_AUG_MAX_BUF)
+ aug_size = TRACE_AUG_MAX_BUF;
+
+ if (aug_size > 0) {
+ if (!bpf_probe_read_user(((struct augmented_arg *)payload_offset)->value, aug_size, arg))
+ augmented = true;
+ }
+ }
+
+ /* write data to payload */
+ if (augmented) {
+ int written = offsetof(struct augmented_arg, value) + aug_size;
+
+ ((struct augmented_arg *)payload_offset)->size = aug_size;
+ output += written;
+ payload_offset += written;
+ do_output = true;
+ }
+ }
+
+ if (!do_output)
+ return 1;
+
+ return augmented__beauty_output(ctx, payload, sizeof(struct syscall_enter_args) + output);
+}
+
SEC("tp/raw_syscalls/sys_enter")
int sys_enter(struct syscall_enter_args *args)
{
@@ -400,7 +533,8 @@ int sys_enter(struct syscall_enter_args *args)
* "!raw_syscalls:unaugmented" that will just return 1 to return the
* unaugmented tracepoint payload.
*/
- bpf_tail_call(args, &syscalls_sys_enter, augmented_args->args.syscall_nr);
+ if (augment_sys_enter(args, &augmented_args->args))
+ bpf_tail_call(args, &syscalls_sys_enter, augmented_args->args.syscall_nr);
// If not found on the PROG_ARRAY syscalls map, then we're filtering it:
return 0;
diff --git a/tools/perf/util/bpf_skel/bperf_cgroup.bpf.c b/tools/perf/util/bpf_skel/bperf_cgroup.bpf.c
index 6a438e0102c5..57cab7647a9a 100644
--- a/tools/perf/util/bpf_skel/bperf_cgroup.bpf.c
+++ b/tools/perf/util/bpf_skel/bperf_cgroup.bpf.c
@@ -57,9 +57,9 @@ struct cgroup___old {
const volatile __u32 num_events = 1;
const volatile __u32 num_cpus = 1;
+const volatile int use_cgroup_v2 = 0;
int enabled = 0;
-int use_cgroup_v2 = 0;
int perf_subsys_id = -1;
static inline __u64 get_cgroup_v1_ancestor_id(struct cgroup *cgrp, int level)
diff --git a/tools/perf/util/bpf_skel/func_latency.bpf.c b/tools/perf/util/bpf_skel/func_latency.bpf.c
index 9d01e3af7479..f613dc9cb123 100644
--- a/tools/perf/util/bpf_skel/func_latency.bpf.c
+++ b/tools/perf/util/bpf_skel/func_latency.bpf.c
@@ -37,9 +37,10 @@ struct {
int enabled = 0;
-int has_cpu = 0;
-int has_task = 0;
-int use_nsec = 0;
+
+const volatile int has_cpu = 0;
+const volatile int has_task = 0;
+const volatile int use_nsec = 0;
SEC("kprobe/func")
int BPF_PROG(func_begin)
diff --git a/tools/perf/util/bpf_skel/kwork_top.bpf.c b/tools/perf/util/bpf_skel/kwork_top.bpf.c
index 84c15ccbab44..594da91965a2 100644
--- a/tools/perf/util/bpf_skel/kwork_top.bpf.c
+++ b/tools/perf/util/bpf_skel/kwork_top.bpf.c
@@ -84,7 +84,7 @@ struct {
int enabled = 0;
-int has_cpu_filter = 0;
+const volatile int has_cpu_filter = 0;
__u64 from_timestamp = 0;
__u64 to_timestamp = 0;
diff --git a/tools/perf/util/bpf_skel/kwork_trace.bpf.c b/tools/perf/util/bpf_skel/kwork_trace.bpf.c
index 063c124e0999..cbd79bc4b330 100644
--- a/tools/perf/util/bpf_skel/kwork_trace.bpf.c
+++ b/tools/perf/util/bpf_skel/kwork_trace.bpf.c
@@ -68,8 +68,9 @@ struct {
} perf_kwork_name_filter SEC(".maps");
int enabled = 0;
-int has_cpu_filter = 0;
-int has_name_filter = 0;
+
+const volatile int has_cpu_filter = 0;
+const volatile int has_name_filter = 0;
static __always_inline int local_strncmp(const char *s1,
unsigned int sz, const char *s2)
diff --git a/tools/perf/util/bpf_skel/lock_contention.bpf.c b/tools/perf/util/bpf_skel/lock_contention.bpf.c
index d931a898c434..1069bda5d733 100644
--- a/tools/perf/util/bpf_skel/lock_contention.bpf.c
+++ b/tools/perf/util/bpf_skel/lock_contention.bpf.c
@@ -117,21 +117,22 @@ struct mm_struct___new {
} __attribute__((preserve_access_index));
/* control flags */
-int enabled;
-int has_cpu;
-int has_task;
-int has_type;
-int has_addr;
-int has_cgroup;
-int needs_callstack;
-int stack_skip;
-int lock_owner;
-
-int use_cgroup_v2;
-int perf_subsys_id = -1;
+const volatile int has_cpu;
+const volatile int has_task;
+const volatile int has_type;
+const volatile int has_addr;
+const volatile int has_cgroup;
+const volatile int needs_callstack;
+const volatile int stack_skip;
+const volatile int lock_owner;
+const volatile int use_cgroup_v2;
/* determine the key of lock stat */
-int aggr_mode;
+const volatile int aggr_mode;
+
+int enabled;
+
+int perf_subsys_id = -1;
__u64 end_ts;
@@ -323,8 +324,7 @@ static inline struct tstamp_data *get_tstamp_elem(__u32 flags)
struct tstamp_data *pelem;
/* Use per-cpu array map for spinlock and rwlock */
- if (flags == (LCB_F_SPIN | LCB_F_READ) || flags == LCB_F_SPIN ||
- flags == (LCB_F_SPIN | LCB_F_WRITE)) {
+ if ((flags & (LCB_F_SPIN | LCB_F_MUTEX)) == LCB_F_SPIN) {
__u32 idx = 0;
pelem = bpf_map_lookup_elem(&tstamp_cpu, &idx);
@@ -439,11 +439,8 @@ int contention_end(u64 *ctx)
duration = bpf_ktime_get_ns() - pelem->timestamp;
if ((__s64)duration < 0) {
- pelem->lock = 0;
- if (need_delete)
- bpf_map_delete_elem(&tstamp, &pid);
__sync_fetch_and_add(&time_fail, 1);
- return 0;
+ goto out;
}
switch (aggr_mode) {
@@ -477,11 +474,8 @@ int contention_end(u64 *ctx)
data = bpf_map_lookup_elem(&lock_stat, &key);
if (!data) {
if (data_map_full) {
- pelem->lock = 0;
- if (need_delete)
- bpf_map_delete_elem(&tstamp, &pid);
__sync_fetch_and_add(&data_fail, 1);
- return 0;
+ goto out;
}
struct contention_data first = {
@@ -498,16 +492,20 @@ int contention_end(u64 *ctx)
err = bpf_map_update_elem(&lock_stat, &key, &first, BPF_NOEXIST);
if (err < 0) {
+ if (err == -EEXIST) {
+ /* it lost the race, try to get it again */
+ data = bpf_map_lookup_elem(&lock_stat, &key);
+ if (data != NULL)
+ goto found;
+ }
if (err == -E2BIG)
data_map_full = 1;
__sync_fetch_and_add(&data_fail, 1);
}
- pelem->lock = 0;
- if (need_delete)
- bpf_map_delete_elem(&tstamp, &pid);
- return 0;
+ goto out;
}
+found:
__sync_fetch_and_add(&data->total_time, duration);
__sync_fetch_and_add(&data->count, 1);
@@ -517,6 +515,7 @@ int contention_end(u64 *ctx)
if (data->min_time > duration)
data->min_time = duration;
+out:
pelem->lock = 0;
if (need_delete)
bpf_map_delete_elem(&tstamp, &pid);
diff --git a/tools/perf/util/bpf_skel/lock_data.h b/tools/perf/util/bpf_skel/lock_data.h
index 36af11faad03..de12892f992f 100644
--- a/tools/perf/util/bpf_skel/lock_data.h
+++ b/tools/perf/util/bpf_skel/lock_data.h
@@ -7,11 +7,11 @@ struct tstamp_data {
u64 timestamp;
u64 lock;
u32 flags;
- u32 stack_id;
+ s32 stack_id;
};
struct contention_key {
- u32 stack_id;
+ s32 stack_id;
u32 pid;
u64 lock_addr_or_cgroup;
};
diff --git a/tools/perf/util/bpf_skel/off_cpu.bpf.c b/tools/perf/util/bpf_skel/off_cpu.bpf.c
index d877a0a9731f..c152116df72f 100644
--- a/tools/perf/util/bpf_skel/off_cpu.bpf.c
+++ b/tools/perf/util/bpf_skel/off_cpu.bpf.c
@@ -85,10 +85,11 @@ struct task_struct___old {
} __attribute__((preserve_access_index));
int enabled = 0;
-int has_cpu = 0;
-int has_task = 0;
-int has_cgroup = 0;
-int uses_tgid = 0;
+
+const volatile int has_cpu = 0;
+const volatile int has_task = 0;
+const volatile int has_cgroup = 0;
+const volatile int uses_tgid = 0;
const volatile bool has_prev_state = false;
const volatile bool needs_cgroup = false;
diff --git a/tools/perf/util/bpf_skel/sample-filter.h b/tools/perf/util/bpf_skel/sample-filter.h
index 350efa121026..683fec85e71e 100644
--- a/tools/perf/util/bpf_skel/sample-filter.h
+++ b/tools/perf/util/bpf_skel/sample-filter.h
@@ -1,7 +1,9 @@
#ifndef PERF_UTIL_BPF_SKEL_SAMPLE_FILTER_H
#define PERF_UTIL_BPF_SKEL_SAMPLE_FILTER_H
-#define MAX_FILTERS 64
+#define MAX_FILTERS 64
+#define MAX_IDX_HASH (16 * 1024)
+#define MAX_EVT_HASH (1024 * 1024)
/* supported filter operations */
enum perf_bpf_filter_op {
@@ -14,6 +16,7 @@ enum perf_bpf_filter_op {
PBF_OP_AND,
PBF_OP_GROUP_BEGIN,
PBF_OP_GROUP_END,
+ PBF_OP_DONE,
};
enum perf_bpf_filter_term {
@@ -42,7 +45,7 @@ enum perf_bpf_filter_term {
__PBF_UNUSED_TERM18 = PBF_TERM_SAMPLE_START + 18, /* SAMPLE_REGS_INTR = 1U << 18 */
PBF_TERM_PHYS_ADDR = PBF_TERM_SAMPLE_START + 19, /* SAMPLE_PHYS_ADDR = 1U << 19 */
__PBF_UNUSED_TERM20 = PBF_TERM_SAMPLE_START + 20, /* SAMPLE_AUX = 1U << 20 */
- __PBF_UNUSED_TERM21 = PBF_TERM_SAMPLE_START + 21, /* SAMPLE_CGROUP = 1U << 21 */
+ PBF_TERM_CGROUP = PBF_TERM_SAMPLE_START + 21, /* SAMPLE_CGROUP = 1U << 21 */
PBF_TERM_DATA_PAGE_SIZE = PBF_TERM_SAMPLE_START + 22, /* SAMPLE_DATA_PAGE_SIZE = 1U << 22 */
PBF_TERM_CODE_PAGE_SIZE = PBF_TERM_SAMPLE_START + 23, /* SAMPLE_CODE_PAGE_SIZE = 1U << 23 */
PBF_TERM_WEIGHT_STRUCT = PBF_TERM_SAMPLE_START + 24, /* SAMPLE_WEIGHT_STRUCT = 1U << 24 */
@@ -60,4 +63,10 @@ struct perf_bpf_filter_entry {
__u64 value;
};
+struct idx_hash_key {
+ __u64 evt_id;
+ __u32 tgid;
+ __u32 reserved;
+};
+
#endif /* PERF_UTIL_BPF_SKEL_SAMPLE_FILTER_H */
diff --git a/tools/perf/util/bpf_skel/sample_filter.bpf.c b/tools/perf/util/bpf_skel/sample_filter.bpf.c
index f59985101973..b195e6efeb8b 100644
--- a/tools/perf/util/bpf_skel/sample_filter.bpf.c
+++ b/tools/perf/util/bpf_skel/sample_filter.bpf.c
@@ -9,13 +9,41 @@
/* BPF map that will be filled by user space */
struct filters {
- __uint(type, BPF_MAP_TYPE_ARRAY);
+ __uint(type, BPF_MAP_TYPE_HASH);
__type(key, int);
- __type(value, struct perf_bpf_filter_entry);
- __uint(max_entries, MAX_FILTERS);
+ __type(value, struct perf_bpf_filter_entry[MAX_FILTERS]);
+ __uint(max_entries, 1);
} filters SEC(".maps");
-int dropped;
+/*
+ * An evsel has multiple instances for each CPU or task but we need a single
+ * id to be used as a key for the idx_hash. This hashmap would translate the
+ * instance's ID to a representative ID.
+ */
+struct event_hash {
+ __uint(type, BPF_MAP_TYPE_HASH);
+ __type(key, __u64);
+ __type(value, __u64);
+ __uint(max_entries, 1);
+} event_hash SEC(".maps");
+
+/* tgid/evtid to filter index */
+struct idx_hash {
+ __uint(type, BPF_MAP_TYPE_HASH);
+ __type(key, struct idx_hash_key);
+ __type(value, int);
+ __uint(max_entries, 1);
+} idx_hash SEC(".maps");
+
+/* tgid to filter index */
+struct lost_count {
+ __uint(type, BPF_MAP_TYPE_ARRAY);
+ __type(key, int);
+ __type(value, int);
+ __uint(max_entries, 1);
+} dropped SEC(".maps");
+
+volatile const int use_idx_hash;
void *bpf_cast_to_kern_ctx(void *) __ksym;
@@ -65,6 +93,7 @@ static inline __u64 perf_get_sample(struct bpf_perf_event_data_kern *kctx,
BUILD_CHECK_SAMPLE(DATA_SRC);
BUILD_CHECK_SAMPLE(TRANSACTION);
BUILD_CHECK_SAMPLE(PHYS_ADDR);
+ BUILD_CHECK_SAMPLE(CGROUP);
BUILD_CHECK_SAMPLE(DATA_PAGE_SIZE);
BUILD_CHECK_SAMPLE(CODE_PAGE_SIZE);
BUILD_CHECK_SAMPLE(WEIGHT_STRUCT);
@@ -107,6 +136,8 @@ static inline __u64 perf_get_sample(struct bpf_perf_event_data_kern *kctx,
return kctx->data->weight.full;
case PBF_TERM_PHYS_ADDR:
return kctx->data->phys_addr;
+ case PBF_TERM_CGROUP:
+ return kctx->data->cgroup;
case PBF_TERM_CODE_PAGE_SIZE:
return kctx->data->code_page_size;
case PBF_TERM_DATA_PAGE_SIZE:
@@ -155,7 +186,6 @@ static inline __u64 perf_get_sample(struct bpf_perf_event_data_kern *kctx,
case __PBF_UNUSED_TERM16:
case __PBF_UNUSED_TERM18:
case __PBF_UNUSED_TERM20:
- case __PBF_UNUSED_TERM21:
default:
break;
}
@@ -179,39 +209,66 @@ int perf_sample_filter(void *ctx)
__u64 sample_data;
int in_group = 0;
int group_result = 0;
- int i;
+ int i, k;
+ int *losts;
kctx = bpf_cast_to_kern_ctx(ctx);
- for (i = 0; i < MAX_FILTERS; i++) {
- int key = i; /* needed for verifier :( */
+ k = 0;
- entry = bpf_map_lookup_elem(&filters, &key);
- if (entry == NULL)
- break;
- sample_data = perf_get_sample(kctx, entry);
+ if (use_idx_hash) {
+ struct idx_hash_key key = {
+ .tgid = bpf_get_current_pid_tgid() >> 32,
+ };
+ __u64 eid = kctx->event->id;
+ __u64 *key_id;
+ int *idx;
+
+ /* get primary_event_id */
+ if (kctx->event->parent)
+ eid = kctx->event->parent->id;
- switch (entry->op) {
+ key_id = bpf_map_lookup_elem(&event_hash, &eid);
+ if (key_id == NULL)
+ goto drop;
+
+ key.evt_id = *key_id;
+
+ idx = bpf_map_lookup_elem(&idx_hash, &key);
+ if (idx)
+ k = *idx;
+ else
+ goto drop;
+ }
+
+ entry = bpf_map_lookup_elem(&filters, &k);
+ if (entry == NULL)
+ goto drop;
+
+ for (i = 0; i < MAX_FILTERS; i++) {
+ sample_data = perf_get_sample(kctx, &entry[i]);
+
+ switch (entry[i].op) {
case PBF_OP_EQ:
- CHECK_RESULT(sample_data, ==, entry->value)
+ CHECK_RESULT(sample_data, ==, entry[i].value)
break;
case PBF_OP_NEQ:
- CHECK_RESULT(sample_data, !=, entry->value)
+ CHECK_RESULT(sample_data, !=, entry[i].value)
break;
case PBF_OP_GT:
- CHECK_RESULT(sample_data, >, entry->value)
+ CHECK_RESULT(sample_data, >, entry[i].value)
break;
case PBF_OP_GE:
- CHECK_RESULT(sample_data, >=, entry->value)
+ CHECK_RESULT(sample_data, >=, entry[i].value)
break;
case PBF_OP_LT:
- CHECK_RESULT(sample_data, <, entry->value)
+ CHECK_RESULT(sample_data, <, entry[i].value)
break;
case PBF_OP_LE:
- CHECK_RESULT(sample_data, <=, entry->value)
+ CHECK_RESULT(sample_data, <=, entry[i].value)
break;
case PBF_OP_AND:
- CHECK_RESULT(sample_data, &, entry->value)
+ CHECK_RESULT(sample_data, &, entry[i].value)
break;
case PBF_OP_GROUP_BEGIN:
in_group = 1;
@@ -222,13 +279,19 @@ int perf_sample_filter(void *ctx)
goto drop;
in_group = 0;
break;
+ case PBF_OP_DONE:
+ /* no failures so far, accept it */
+ return 1;
}
}
/* generate sample data */
return 1;
drop:
- __sync_fetch_and_add(&dropped, 1);
+ losts = bpf_map_lookup_elem(&dropped, &k);
+ if (losts != NULL)
+ __sync_fetch_and_add(losts, 1);
+
return 0;
}
diff --git a/tools/perf/util/bpf_skel/vmlinux/vmlinux.h b/tools/perf/util/bpf_skel/vmlinux/vmlinux.h
index e9028235d771..4dcad7b682bd 100644
--- a/tools/perf/util/bpf_skel/vmlinux/vmlinux.h
+++ b/tools/perf/util/bpf_skel/vmlinux/vmlinux.h
@@ -15,6 +15,7 @@
typedef __u8 u8;
typedef __u32 u32;
+typedef __s32 s32;
typedef __u64 u64;
typedef __s64 s64;
@@ -170,10 +171,16 @@ struct perf_sample_data {
u32 cpu;
} cpu_entry;
u64 phys_addr;
+ u64 cgroup;
u64 data_page_size;
u64 code_page_size;
} __attribute__((__aligned__(64))) __attribute__((preserve_access_index));
+struct perf_event {
+ struct perf_event *parent;
+ u64 id;
+} __attribute__((preserve_access_index));
+
struct bpf_perf_event_data_kern {
struct perf_sample_data *data;
struct perf_event *event;
diff --git a/tools/perf/util/branch.h b/tools/perf/util/branch.h
index 87704d713ff6..b80c12c74bbb 100644
--- a/tools/perf/util/branch.h
+++ b/tools/perf/util/branch.h
@@ -34,6 +34,7 @@ struct branch_info {
struct addr_map_symbol from;
struct addr_map_symbol to;
struct branch_flags flags;
+ u64 branch_stack_cntr;
char *srcline_from;
char *srcline_to;
};
diff --git a/tools/perf/util/build-id.c b/tools/perf/util/build-id.c
index 83a1581e8cf1..8982f68e7230 100644
--- a/tools/perf/util/build-id.c
+++ b/tools/perf/util/build-id.c
@@ -42,7 +42,7 @@
static bool no_buildid_cache;
-int build_id__mark_dso_hit(struct perf_tool *tool __maybe_unused,
+int build_id__mark_dso_hit(const struct perf_tool *tool __maybe_unused,
union perf_event *event,
struct perf_sample *sample,
struct evsel *evsel __maybe_unused,
@@ -67,38 +67,6 @@ int build_id__mark_dso_hit(struct perf_tool *tool __maybe_unused,
return 0;
}
-static int perf_event__exit_del_thread(struct perf_tool *tool __maybe_unused,
- union perf_event *event,
- struct perf_sample *sample
- __maybe_unused,
- struct machine *machine)
-{
- struct thread *thread = machine__findnew_thread(machine,
- event->fork.pid,
- event->fork.tid);
-
- dump_printf("(%d:%d):(%d:%d)\n", event->fork.pid, event->fork.tid,
- event->fork.ppid, event->fork.ptid);
-
- if (thread) {
- machine__remove_thread(machine, thread);
- thread__put(thread);
- }
-
- return 0;
-}
-
-struct perf_tool build_id__mark_dso_hit_ops = {
- .sample = build_id__mark_dso_hit,
- .mmap = perf_event__process_mmap,
- .mmap2 = perf_event__process_mmap2,
- .fork = perf_event__process_fork,
- .exit = perf_event__exit_del_thread,
- .attr = perf_event__process_attr,
- .build_id = perf_event__process_build_id,
- .ordered_events = true,
-};
-
int build_id__sprintf(const struct build_id *build_id, char *bf)
{
char *bid = bf;
@@ -309,8 +277,8 @@ static int write_buildid(const char *name, size_t name_len, struct build_id *bid
struct perf_record_header_build_id b;
size_t len;
- len = name_len + 1;
- len = PERF_ALIGN(len, NAME_ALIGN);
+ len = sizeof(b) + name_len + 1;
+ len = PERF_ALIGN(len, sizeof(u64));
memset(&b, 0, sizeof(b));
memcpy(&b.data, bid->data, bid->size);
@@ -318,7 +286,7 @@ static int write_buildid(const char *name, size_t name_len, struct build_id *bid
misc |= PERF_RECORD_MISC_BUILD_ID_SIZE;
b.pid = pid;
b.header.misc = misc;
- b.header.size = sizeof(b) + len;
+ b.header.size = len;
err = do_write(fd, &b, sizeof(b));
if (err < 0)
diff --git a/tools/perf/util/build-id.h b/tools/perf/util/build-id.h
index 3fa8bffb07ca..a212497bfdb0 100644
--- a/tools/perf/util/build-id.h
+++ b/tools/perf/util/build-id.h
@@ -16,11 +16,9 @@ struct build_id {
size_t size;
};
-struct nsinfo;
-
-extern struct perf_tool build_id__mark_dso_hit_ops;
struct dso;
struct feat_fd;
+struct nsinfo;
void build_id__init(struct build_id *bid, const u8 *data, size_t size);
int build_id__sprintf(const struct build_id *build_id, char *bf);
@@ -35,11 +33,11 @@ char *dso__build_id_filename(const struct dso *dso, char *bf, size_t size,
char *__dso__build_id_filename(const struct dso *dso, char *bf, size_t size,
bool is_debug, bool is_kallsyms);
-int build_id__mark_dso_hit(struct perf_tool *tool, union perf_event *event,
+int build_id__mark_dso_hit(const struct perf_tool *tool, union perf_event *event,
struct perf_sample *sample, struct evsel *evsel,
struct machine *machine);
-int perf_event__inject_buildid(struct perf_tool *tool, union perf_event *event,
+int perf_event__inject_buildid(const struct perf_tool *tool, union perf_event *event,
struct perf_sample *sample, struct evsel *evsel,
struct machine *machine);
diff --git a/tools/perf/util/callchain.c b/tools/perf/util/callchain.c
index 6d075648d2cc..0c7564747a14 100644
--- a/tools/perf/util/callchain.c
+++ b/tools/perf/util/callchain.c
@@ -1797,3 +1797,38 @@ s64 callchain_avg_cycles(struct callchain_node *cnode)
return cycles;
}
+
+int sample__for_each_callchain_node(struct thread *thread, struct evsel *evsel,
+ struct perf_sample *sample, int max_stack,
+ bool symbols, callchain_iter_fn cb, void *data)
+{
+ struct callchain_cursor *cursor = get_tls_callchain_cursor();
+ int ret;
+
+ if (!cursor)
+ return -ENOMEM;
+
+ /* Fill in the callchain. */
+ ret = __thread__resolve_callchain(thread, cursor, evsel, sample,
+ /*parent=*/NULL, /*root_al=*/NULL,
+ max_stack, symbols);
+ if (ret)
+ return ret;
+
+ /* Switch from writing the callchain to reading it. */
+ callchain_cursor_commit(cursor);
+
+ while (1) {
+ struct callchain_cursor_node *node = callchain_cursor_current(cursor);
+
+ if (!node)
+ break;
+
+ ret = cb(node, data);
+ if (ret)
+ return ret;
+
+ callchain_cursor_advance(cursor);
+ }
+ return 0;
+}
diff --git a/tools/perf/util/callchain.h b/tools/perf/util/callchain.h
index d5c66345ae31..86ed9e4d04f9 100644
--- a/tools/perf/util/callchain.h
+++ b/tools/perf/util/callchain.h
@@ -311,4 +311,10 @@ u64 callchain_total_hits(struct hists *hists);
s64 callchain_avg_cycles(struct callchain_node *cnode);
+typedef int (*callchain_iter_fn)(struct callchain_cursor_node *node, void *data);
+
+int sample__for_each_callchain_node(struct thread *thread, struct evsel *evsel,
+ struct perf_sample *sample, int max_stack,
+ bool symbols, callchain_iter_fn cb, void *data);
+
#endif /* __PERF_CALLCHAIN_H */
diff --git a/tools/perf/util/cap.c b/tools/perf/util/cap.c
index c3ba841bbf37..7574a67651bc 100644
--- a/tools/perf/util/cap.c
+++ b/tools/perf/util/cap.c
@@ -3,27 +3,52 @@
* Capability utilities
*/
-#ifdef HAVE_LIBCAP_SUPPORT
-
#include "cap.h"
-#include <stdbool.h>
-#include <sys/capability.h>
-
-bool perf_cap__capable(cap_value_t cap)
-{
- cap_flag_value_t val;
- cap_t caps = cap_get_proc();
+#include "debug.h"
+#include <errno.h>
+#include <string.h>
+#include <unistd.h>
+#include <linux/capability.h>
+#include <sys/syscall.h>
- if (!caps)
- return false;
+#ifndef SYS_capget
+#define SYS_capget 90
+#endif
- if (cap_get_flag(caps, cap, CAP_EFFECTIVE, &val) != 0)
- val = CAP_CLEAR;
+#define MAX_LINUX_CAPABILITY_U32S _LINUX_CAPABILITY_U32S_3
- if (cap_free(caps) != 0)
- return false;
-
- return val == CAP_SET;
+bool perf_cap__capable(int cap, bool *used_root)
+{
+ struct __user_cap_header_struct header = {
+ .version = _LINUX_CAPABILITY_VERSION_3,
+ .pid = getpid(),
+ };
+ struct __user_cap_data_struct data[MAX_LINUX_CAPABILITY_U32S];
+ __u32 cap_val;
+
+ *used_root = false;
+ while (syscall(SYS_capget, &header, &data[0]) == -1) {
+ /* Retry, first attempt has set the header.version correctly. */
+ if (errno == EINVAL && header.version != _LINUX_CAPABILITY_VERSION_3 &&
+ header.version == _LINUX_CAPABILITY_VERSION_1)
+ continue;
+
+ pr_debug2("capget syscall failed (%s - %d) fall back on root check\n",
+ strerror(errno), errno);
+ *used_root = true;
+ return geteuid() == 0;
+ }
+
+ /* Extract the relevant capability bit. */
+ if (cap >= 32) {
+ if (header.version == _LINUX_CAPABILITY_VERSION_3) {
+ cap_val = data[1].effective;
+ } else {
+ /* Capability beyond 32 is requested but only 32 are supported. */
+ return false;
+ }
+ } else {
+ cap_val = data[0].effective;
+ }
+ return (cap_val & (1 << (cap & 0x1f))) != 0;
}
-
-#endif /* HAVE_LIBCAP_SUPPORT */
diff --git a/tools/perf/util/cap.h b/tools/perf/util/cap.h
index ae52878c0b2e..0c6a1ff55f07 100644
--- a/tools/perf/util/cap.h
+++ b/tools/perf/util/cap.h
@@ -3,26 +3,6 @@
#define __PERF_CAP_H
#include <stdbool.h>
-#include <linux/capability.h>
-#include <linux/compiler.h>
-
-#ifdef HAVE_LIBCAP_SUPPORT
-
-#include <sys/capability.h>
-
-bool perf_cap__capable(cap_value_t cap);
-
-#else
-
-#include <unistd.h>
-#include <sys/types.h>
-
-static inline bool perf_cap__capable(int cap __maybe_unused)
-{
- return geteuid() == 0;
-}
-
-#endif /* HAVE_LIBCAP_SUPPORT */
/* For older systems */
#ifndef CAP_SYSLOG
@@ -33,4 +13,7 @@ static inline bool perf_cap__capable(int cap __maybe_unused)
#define CAP_PERFMON 38
#endif
+/* Query if a capability is supported, used_root is set if the fallback root check was used. */
+bool perf_cap__capable(int cap, bool *used_root);
+
#endif /* __PERF_CAP_H */
diff --git a/tools/perf/util/cs-etm-decoder/cs-etm-decoder.c b/tools/perf/util/cs-etm-decoder/cs-etm-decoder.c
index e917985bbbe6..b78ef0262135 100644
--- a/tools/perf/util/cs-etm-decoder/cs-etm-decoder.c
+++ b/tools/perf/util/cs-etm-decoder/cs-etm-decoder.c
@@ -41,7 +41,7 @@ const u32 INSTR_PER_NS = 10;
struct cs_etm_decoder {
void *data;
- void (*packet_printer)(const char *msg);
+ void (*packet_printer)(const char *msg, void *data);
bool suppress_printing;
dcd_tree_handle_t dcd_tree;
cs_etm_mem_cb_type mem_access;
@@ -202,7 +202,7 @@ static void cs_etm_decoder__print_str_cb(const void *p_context,
const struct cs_etm_decoder *decoder = p_context;
if (p_context && str_len && !decoder->suppress_printing)
- decoder->packet_printer(msg);
+ decoder->packet_printer(msg, decoder->data);
}
static int
@@ -388,7 +388,8 @@ cs_etm_decoder__reset_timestamp(struct cs_etm_packet_queue *packet_queue)
}
static ocsd_datapath_resp_t
-cs_etm_decoder__buffer_packet(struct cs_etm_packet_queue *packet_queue,
+cs_etm_decoder__buffer_packet(struct cs_etm_queue *etmq,
+ struct cs_etm_packet_queue *packet_queue,
const u8 trace_chan_id,
enum cs_etm_sample_type sample_type)
{
@@ -398,7 +399,7 @@ cs_etm_decoder__buffer_packet(struct cs_etm_packet_queue *packet_queue,
if (packet_queue->packet_count >= CS_ETM_PACKET_MAX_BUFFER - 1)
return OCSD_RESP_FATAL_SYS_ERR;
- if (cs_etm__get_cpu(trace_chan_id, &cpu) < 0)
+ if (cs_etm__get_cpu(etmq, trace_chan_id, &cpu) < 0)
return OCSD_RESP_FATAL_SYS_ERR;
et = packet_queue->tail;
@@ -436,7 +437,7 @@ cs_etm_decoder__buffer_range(struct cs_etm_queue *etmq,
int ret = 0;
struct cs_etm_packet *packet;
- ret = cs_etm_decoder__buffer_packet(packet_queue, trace_chan_id,
+ ret = cs_etm_decoder__buffer_packet(etmq, packet_queue, trace_chan_id,
CS_ETM_RANGE);
if (ret != OCSD_RESP_CONT && ret != OCSD_RESP_WAIT)
return ret;
@@ -496,7 +497,8 @@ out:
}
static ocsd_datapath_resp_t
-cs_etm_decoder__buffer_discontinuity(struct cs_etm_packet_queue *queue,
+cs_etm_decoder__buffer_discontinuity(struct cs_etm_queue *etmq,
+ struct cs_etm_packet_queue *queue,
const uint8_t trace_chan_id)
{
/*
@@ -504,18 +506,19 @@ cs_etm_decoder__buffer_discontinuity(struct cs_etm_packet_queue *queue,
* reset time statistics.
*/
cs_etm_decoder__reset_timestamp(queue);
- return cs_etm_decoder__buffer_packet(queue, trace_chan_id,
+ return cs_etm_decoder__buffer_packet(etmq, queue, trace_chan_id,
CS_ETM_DISCONTINUITY);
}
static ocsd_datapath_resp_t
-cs_etm_decoder__buffer_exception(struct cs_etm_packet_queue *queue,
+cs_etm_decoder__buffer_exception(struct cs_etm_queue *etmq,
+ struct cs_etm_packet_queue *queue,
const ocsd_generic_trace_elem *elem,
const uint8_t trace_chan_id)
{ int ret = 0;
struct cs_etm_packet *packet;
- ret = cs_etm_decoder__buffer_packet(queue, trace_chan_id,
+ ret = cs_etm_decoder__buffer_packet(etmq, queue, trace_chan_id,
CS_ETM_EXCEPTION);
if (ret != OCSD_RESP_CONT && ret != OCSD_RESP_WAIT)
return ret;
@@ -527,10 +530,11 @@ cs_etm_decoder__buffer_exception(struct cs_etm_packet_queue *queue,
}
static ocsd_datapath_resp_t
-cs_etm_decoder__buffer_exception_ret(struct cs_etm_packet_queue *queue,
+cs_etm_decoder__buffer_exception_ret(struct cs_etm_queue *etmq,
+ struct cs_etm_packet_queue *queue,
const uint8_t trace_chan_id)
{
- return cs_etm_decoder__buffer_packet(queue, trace_chan_id,
+ return cs_etm_decoder__buffer_packet(etmq, queue, trace_chan_id,
CS_ETM_EXCEPTION_RET);
}
@@ -599,7 +603,7 @@ static ocsd_datapath_resp_t cs_etm_decoder__gen_trace_elem_printer(
case OCSD_GEN_TRC_ELEM_EO_TRACE:
case OCSD_GEN_TRC_ELEM_NO_SYNC:
case OCSD_GEN_TRC_ELEM_TRACE_ON:
- resp = cs_etm_decoder__buffer_discontinuity(packet_queue,
+ resp = cs_etm_decoder__buffer_discontinuity(etmq, packet_queue,
trace_chan_id);
break;
case OCSD_GEN_TRC_ELEM_INSTR_RANGE:
@@ -607,11 +611,11 @@ static ocsd_datapath_resp_t cs_etm_decoder__gen_trace_elem_printer(
trace_chan_id);
break;
case OCSD_GEN_TRC_ELEM_EXCEPTION:
- resp = cs_etm_decoder__buffer_exception(packet_queue, elem,
+ resp = cs_etm_decoder__buffer_exception(etmq, packet_queue, elem,
trace_chan_id);
break;
case OCSD_GEN_TRC_ELEM_EXCEPTION_RET:
- resp = cs_etm_decoder__buffer_exception_ret(packet_queue,
+ resp = cs_etm_decoder__buffer_exception_ret(etmq, packet_queue,
trace_chan_id);
break;
case OCSD_GEN_TRC_ELEM_TIMESTAMP:
@@ -680,10 +684,6 @@ cs_etm_decoder__create_etm_decoder(struct cs_etm_decoder_params *d_params,
return -1;
}
- /* if the CPU has no trace ID associated, no decoder needed */
- if (csid == CORESIGHT_TRACE_ID_UNUSED_VAL)
- return 0;
-
if (d_params->operation == CS_ETM_OPERATION_DECODE) {
if (ocsd_dt_create_decoder(decoder->dcd_tree,
decoder->decoder_name,
diff --git a/tools/perf/util/cs-etm-decoder/cs-etm-decoder.h b/tools/perf/util/cs-etm-decoder/cs-etm-decoder.h
index 272c2efe78ee..12c782fa6db2 100644
--- a/tools/perf/util/cs-etm-decoder/cs-etm-decoder.h
+++ b/tools/perf/util/cs-etm-decoder/cs-etm-decoder.h
@@ -60,7 +60,7 @@ struct cs_etm_trace_params {
struct cs_etm_decoder_params {
int operation;
- void (*packet_printer)(const char *msg);
+ void (*packet_printer)(const char *msg, void *data);
cs_etm_mem_cb_type mem_acc_cb;
bool formatted;
bool fsyncs;
diff --git a/tools/perf/util/cs-etm.c b/tools/perf/util/cs-etm.c
index 5e9fbcfad7d4..90f32f327b9b 100644
--- a/tools/perf/util/cs-etm.c
+++ b/tools/perf/util/cs-etm.c
@@ -97,28 +97,43 @@ struct cs_etm_traceid_queue {
struct cs_etm_packet_queue packet_queue;
};
+enum cs_etm_format {
+ UNSET,
+ FORMATTED,
+ UNFORMATTED
+};
+
struct cs_etm_queue {
struct cs_etm_auxtrace *etm;
struct cs_etm_decoder *decoder;
struct auxtrace_buffer *buffer;
unsigned int queue_nr;
u8 pending_timestamp_chan_id;
+ enum cs_etm_format format;
u64 offset;
const unsigned char *buf;
size_t buf_len, buf_used;
/* Conversion between traceID and index in traceid_queues array */
struct intlist *traceid_queues_list;
struct cs_etm_traceid_queue **traceid_queues;
+ /* Conversion between traceID and metadata pointers */
+ struct intlist *traceid_list;
+ /*
+ * Same as traceid_list, but traceid_list may be a reference to another
+ * queue's which has a matching sink ID.
+ */
+ struct intlist *own_traceid_list;
+ u32 sink_id;
};
-/* RB tree for quick conversion between traceID and metadata pointers */
-static struct intlist *traceid_list;
-
static int cs_etm__process_timestamped_queues(struct cs_etm_auxtrace *etm);
static int cs_etm__process_timeless_queues(struct cs_etm_auxtrace *etm,
pid_t tid);
static int cs_etm__get_data_block(struct cs_etm_queue *etmq);
static int cs_etm__decode_data_block(struct cs_etm_queue *etmq);
+static int cs_etm__metadata_get_trace_id(u8 *trace_chan_id, u64 *cpu_metadata);
+static u64 *get_cpu_data(struct cs_etm_auxtrace *etm, int cpu);
+static int cs_etm__metadata_set_trace_id(u8 trace_chan_id, u64 *cpu_metadata);
/* PTMs ETMIDR [11:8] set to b0011 */
#define ETMIDR_PTM_VERSION 0x00000300
@@ -133,6 +148,7 @@ static int cs_etm__decode_data_block(struct cs_etm_queue *etmq);
(queue_nr << 16 | trace_chan_id)
#define TO_QUEUE_NR(cs_queue_nr) (cs_queue_nr >> 16)
#define TO_TRACE_CHAN_ID(cs_queue_nr) (cs_queue_nr & 0x0000ffff)
+#define SINK_UNSET ((u32) -1)
static u32 cs_etm__get_v7_protocol_version(u32 etmidr)
{
@@ -144,12 +160,12 @@ static u32 cs_etm__get_v7_protocol_version(u32 etmidr)
return CS_ETM_PROTO_ETMV3;
}
-static int cs_etm__get_magic(u8 trace_chan_id, u64 *magic)
+static int cs_etm__get_magic(struct cs_etm_queue *etmq, u8 trace_chan_id, u64 *magic)
{
struct int_node *inode;
u64 *metadata;
- inode = intlist__find(traceid_list, trace_chan_id);
+ inode = intlist__find(etmq->traceid_list, trace_chan_id);
if (!inode)
return -EINVAL;
@@ -158,12 +174,12 @@ static int cs_etm__get_magic(u8 trace_chan_id, u64 *magic)
return 0;
}
-int cs_etm__get_cpu(u8 trace_chan_id, int *cpu)
+int cs_etm__get_cpu(struct cs_etm_queue *etmq, u8 trace_chan_id, int *cpu)
{
struct int_node *inode;
u64 *metadata;
- inode = intlist__find(traceid_list, trace_chan_id);
+ inode = intlist__find(etmq->traceid_list, trace_chan_id);
if (!inode)
return -EINVAL;
@@ -215,26 +231,171 @@ enum cs_etm_pid_fmt cs_etm__get_pid_fmt(struct cs_etm_queue *etmq)
return etmq->etm->pid_fmt;
}
-static int cs_etm__map_trace_id(u8 trace_chan_id, u64 *cpu_metadata)
+static int cs_etm__insert_trace_id_node(struct cs_etm_queue *etmq,
+ u8 trace_chan_id, u64 *cpu_metadata)
{
- struct int_node *inode;
-
/* Get an RB node for this CPU */
- inode = intlist__findnew(traceid_list, trace_chan_id);
+ struct int_node *inode = intlist__findnew(etmq->traceid_list, trace_chan_id);
/* Something went wrong, no need to continue */
if (!inode)
return -ENOMEM;
+ /* Disallow re-mapping a different traceID to metadata pair. */
+ if (inode->priv) {
+ u64 *curr_cpu_data = inode->priv;
+ u8 curr_chan_id;
+ int err;
+
+ if (curr_cpu_data[CS_ETM_CPU] != cpu_metadata[CS_ETM_CPU]) {
+ /*
+ * With > CORESIGHT_TRACE_IDS_MAX ETMs, overlapping IDs
+ * are expected (but not supported) in per-thread mode,
+ * rather than signifying an error.
+ */
+ if (etmq->etm->per_thread_decoding)
+ pr_err("CS_ETM: overlapping Trace IDs aren't currently supported in per-thread mode\n");
+ else
+ pr_err("CS_ETM: map mismatch between HW_ID packet CPU and Trace ID\n");
+
+ return -EINVAL;
+ }
+
+ /* check that the mapped ID matches */
+ err = cs_etm__metadata_get_trace_id(&curr_chan_id, curr_cpu_data);
+ if (err)
+ return err;
+
+ if (curr_chan_id != trace_chan_id) {
+ pr_err("CS_ETM: mismatch between CPU trace ID and HW_ID packet ID\n");
+ return -EINVAL;
+ }
+
+ /* Skip re-adding the same mappings if everything matched */
+ return 0;
+ }
+
+ /* Not one we've seen before, associate the traceID with the metadata pointer */
+ inode->priv = cpu_metadata;
+
+ return 0;
+}
+
+static struct cs_etm_queue *cs_etm__get_queue(struct cs_etm_auxtrace *etm, int cpu)
+{
+ if (etm->per_thread_decoding)
+ return etm->queues.queue_array[0].priv;
+ else
+ return etm->queues.queue_array[cpu].priv;
+}
+
+static int cs_etm__map_trace_id_v0(struct cs_etm_auxtrace *etm, u8 trace_chan_id,
+ u64 *cpu_metadata)
+{
+ struct cs_etm_queue *etmq;
+
+ /*
+ * If the queue is unformatted then only save one mapping in the
+ * queue associated with that CPU so only one decoder is made.
+ */
+ etmq = cs_etm__get_queue(etm, cpu_metadata[CS_ETM_CPU]);
+ if (etmq->format == UNFORMATTED)
+ return cs_etm__insert_trace_id_node(etmq, trace_chan_id,
+ cpu_metadata);
+
/*
- * The node for that CPU should not be taken.
- * Back out if that's the case.
+ * Otherwise, version 0 trace IDs are global so save them into every
+ * queue.
*/
- if (inode->priv)
+ for (unsigned int i = 0; i < etm->queues.nr_queues; ++i) {
+ int ret;
+
+ etmq = etm->queues.queue_array[i].priv;
+ ret = cs_etm__insert_trace_id_node(etmq, trace_chan_id,
+ cpu_metadata);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
+
+static int cs_etm__process_trace_id_v0(struct cs_etm_auxtrace *etm, int cpu,
+ u64 hw_id)
+{
+ int err;
+ u64 *cpu_data;
+ u8 trace_chan_id = FIELD_GET(CS_AUX_HW_ID_TRACE_ID_MASK, hw_id);
+
+ cpu_data = get_cpu_data(etm, cpu);
+ if (cpu_data == NULL)
return -EINVAL;
- /* All good, associate the traceID with the metadata pointer */
- inode->priv = cpu_metadata;
+ err = cs_etm__map_trace_id_v0(etm, trace_chan_id, cpu_data);
+ if (err)
+ return err;
+
+ /*
+ * if we are picking up the association from the packet, need to plug
+ * the correct trace ID into the metadata for setting up decoders later.
+ */
+ return cs_etm__metadata_set_trace_id(trace_chan_id, cpu_data);
+}
+
+static int cs_etm__process_trace_id_v0_1(struct cs_etm_auxtrace *etm, int cpu,
+ u64 hw_id)
+{
+ struct cs_etm_queue *etmq = cs_etm__get_queue(etm, cpu);
+ int ret;
+ u64 *cpu_data;
+ u32 sink_id = FIELD_GET(CS_AUX_HW_ID_SINK_ID_MASK, hw_id);
+ u8 trace_id = FIELD_GET(CS_AUX_HW_ID_TRACE_ID_MASK, hw_id);
+
+ /*
+ * Check sink id hasn't changed in per-cpu mode. In per-thread mode,
+ * let it pass for now until an actual overlapping trace ID is hit. In
+ * most cases IDs won't overlap even if the sink changes.
+ */
+ if (!etmq->etm->per_thread_decoding && etmq->sink_id != SINK_UNSET &&
+ etmq->sink_id != sink_id) {
+ pr_err("CS_ETM: mismatch between sink IDs\n");
+ return -EINVAL;
+ }
+
+ etmq->sink_id = sink_id;
+
+ /* Find which other queues use this sink and link their ID maps */
+ for (unsigned int i = 0; i < etm->queues.nr_queues; ++i) {
+ struct cs_etm_queue *other_etmq = etm->queues.queue_array[i].priv;
+
+ /* Different sinks, skip */
+ if (other_etmq->sink_id != etmq->sink_id)
+ continue;
+
+ /* Already linked, skip */
+ if (other_etmq->traceid_list == etmq->traceid_list)
+ continue;
+
+ /* At the point of first linking, this one should be empty */
+ if (!intlist__empty(etmq->traceid_list)) {
+ pr_err("CS_ETM: Can't link populated trace ID lists\n");
+ return -EINVAL;
+ }
+
+ etmq->own_traceid_list = NULL;
+ intlist__delete(etmq->traceid_list);
+ etmq->traceid_list = other_etmq->traceid_list;
+ break;
+ }
+
+ cpu_data = get_cpu_data(etm, cpu);
+ ret = cs_etm__insert_trace_id_node(etmq, trace_id, cpu_data);
+ if (ret)
+ return ret;
+
+ ret = cs_etm__metadata_set_trace_id(trace_id, cpu_data);
+ if (ret)
+ return ret;
return 0;
}
@@ -261,7 +422,6 @@ static int cs_etm__metadata_get_trace_id(u8 *trace_chan_id, u64 *cpu_metadata)
/*
* update metadata trace ID from the value found in the AUX_HW_INFO packet.
- * This will also clear the CORESIGHT_TRACE_ID_UNUSED_FLAG flag if present.
*/
static int cs_etm__metadata_set_trace_id(u8 trace_chan_id, u64 *cpu_metadata)
{
@@ -322,20 +482,16 @@ static int cs_etm__process_aux_output_hw_id(struct perf_session *session,
{
struct cs_etm_auxtrace *etm;
struct perf_sample sample;
- struct int_node *inode;
struct evsel *evsel;
- u64 *cpu_data;
u64 hw_id;
int cpu, version, err;
- u8 trace_chan_id, curr_chan_id;
/* extract and parse the HW ID */
hw_id = event->aux_output_hw_id.hw_id;
- version = FIELD_GET(CS_AUX_HW_ID_VERSION_MASK, hw_id);
- trace_chan_id = FIELD_GET(CS_AUX_HW_ID_TRACE_ID_MASK, hw_id);
+ version = FIELD_GET(CS_AUX_HW_ID_MAJOR_VERSION_MASK, hw_id);
/* check that we can handle this version */
- if (version > CS_AUX_HW_ID_CURR_VERSION) {
+ if (version > CS_AUX_HW_ID_MAJOR_VERSION) {
pr_err("CS ETM Trace: PERF_RECORD_AUX_OUTPUT_HW_ID version %d not supported. Please update Perf.\n",
version);
return -EINVAL;
@@ -360,43 +516,10 @@ static int cs_etm__process_aux_output_hw_id(struct perf_session *session,
return -EINVAL;
}
- /* See if the ID is mapped to a CPU, and it matches the current CPU */
- inode = intlist__find(traceid_list, trace_chan_id);
- if (inode) {
- cpu_data = inode->priv;
- if ((int)cpu_data[CS_ETM_CPU] != cpu) {
- pr_err("CS_ETM: map mismatch between HW_ID packet CPU and Trace ID\n");
- return -EINVAL;
- }
-
- /* check that the mapped ID matches */
- err = cs_etm__metadata_get_trace_id(&curr_chan_id, cpu_data);
- if (err)
- return err;
- if (curr_chan_id != trace_chan_id) {
- pr_err("CS_ETM: mismatch between CPU trace ID and HW_ID packet ID\n");
- return -EINVAL;
- }
-
- /* mapped and matched - return OK */
- return 0;
- }
-
- cpu_data = get_cpu_data(etm, cpu);
- if (cpu_data == NULL)
- return err;
-
- /* not one we've seen before - lets map it */
- err = cs_etm__map_trace_id(trace_chan_id, cpu_data);
- if (err)
- return err;
+ if (FIELD_GET(CS_AUX_HW_ID_MINOR_VERSION_MASK, hw_id) == 0)
+ return cs_etm__process_trace_id_v0(etm, cpu, hw_id);
- /*
- * if we are picking up the association from the packet, need to plug
- * the correct trace ID into the metadata for setting up decoders later.
- */
- err = cs_etm__metadata_set_trace_id(trace_chan_id, cpu_data);
- return err;
+ return cs_etm__process_trace_id_v0_1(etm, cpu, hw_id);
}
void cs_etm__etmq_set_traceid_queue_timestamp(struct cs_etm_queue *etmq,
@@ -639,94 +762,79 @@ static void cs_etm__packet_swap(struct cs_etm_auxtrace *etm,
}
}
-static void cs_etm__packet_dump(const char *pkt_string)
+static void cs_etm__packet_dump(const char *pkt_string, void *data)
{
const char *color = PERF_COLOR_BLUE;
int len = strlen(pkt_string);
+ struct cs_etm_queue *etmq = data;
+ char queue_nr[64];
+
+ if (verbose)
+ snprintf(queue_nr, sizeof(queue_nr), "Qnr:%d; ", etmq->queue_nr);
+ else
+ queue_nr[0] = '\0';
if (len && (pkt_string[len-1] == '\n'))
- color_fprintf(stdout, color, " %s", pkt_string);
+ color_fprintf(stdout, color, " %s%s", queue_nr, pkt_string);
else
- color_fprintf(stdout, color, " %s\n", pkt_string);
+ color_fprintf(stdout, color, " %s%s\n", queue_nr, pkt_string);
fflush(stdout);
}
static void cs_etm__set_trace_param_etmv3(struct cs_etm_trace_params *t_params,
- struct cs_etm_auxtrace *etm, int t_idx,
- int m_idx, u32 etmidr)
+ u64 *metadata, u32 etmidr)
{
- u64 **metadata = etm->metadata;
-
- t_params[t_idx].protocol = cs_etm__get_v7_protocol_version(etmidr);
- t_params[t_idx].etmv3.reg_ctrl = metadata[m_idx][CS_ETM_ETMCR];
- t_params[t_idx].etmv3.reg_trc_id = metadata[m_idx][CS_ETM_ETMTRACEIDR];
+ t_params->protocol = cs_etm__get_v7_protocol_version(etmidr);
+ t_params->etmv3.reg_ctrl = metadata[CS_ETM_ETMCR];
+ t_params->etmv3.reg_trc_id = metadata[CS_ETM_ETMTRACEIDR];
}
static void cs_etm__set_trace_param_etmv4(struct cs_etm_trace_params *t_params,
- struct cs_etm_auxtrace *etm, int t_idx,
- int m_idx)
+ u64 *metadata)
{
- u64 **metadata = etm->metadata;
-
- t_params[t_idx].protocol = CS_ETM_PROTO_ETMV4i;
- t_params[t_idx].etmv4.reg_idr0 = metadata[m_idx][CS_ETMV4_TRCIDR0];
- t_params[t_idx].etmv4.reg_idr1 = metadata[m_idx][CS_ETMV4_TRCIDR1];
- t_params[t_idx].etmv4.reg_idr2 = metadata[m_idx][CS_ETMV4_TRCIDR2];
- t_params[t_idx].etmv4.reg_idr8 = metadata[m_idx][CS_ETMV4_TRCIDR8];
- t_params[t_idx].etmv4.reg_configr = metadata[m_idx][CS_ETMV4_TRCCONFIGR];
- t_params[t_idx].etmv4.reg_traceidr = metadata[m_idx][CS_ETMV4_TRCTRACEIDR];
+ t_params->protocol = CS_ETM_PROTO_ETMV4i;
+ t_params->etmv4.reg_idr0 = metadata[CS_ETMV4_TRCIDR0];
+ t_params->etmv4.reg_idr1 = metadata[CS_ETMV4_TRCIDR1];
+ t_params->etmv4.reg_idr2 = metadata[CS_ETMV4_TRCIDR2];
+ t_params->etmv4.reg_idr8 = metadata[CS_ETMV4_TRCIDR8];
+ t_params->etmv4.reg_configr = metadata[CS_ETMV4_TRCCONFIGR];
+ t_params->etmv4.reg_traceidr = metadata[CS_ETMV4_TRCTRACEIDR];
}
static void cs_etm__set_trace_param_ete(struct cs_etm_trace_params *t_params,
- struct cs_etm_auxtrace *etm, int t_idx,
- int m_idx)
+ u64 *metadata)
{
- u64 **metadata = etm->metadata;
-
- t_params[t_idx].protocol = CS_ETM_PROTO_ETE;
- t_params[t_idx].ete.reg_idr0 = metadata[m_idx][CS_ETE_TRCIDR0];
- t_params[t_idx].ete.reg_idr1 = metadata[m_idx][CS_ETE_TRCIDR1];
- t_params[t_idx].ete.reg_idr2 = metadata[m_idx][CS_ETE_TRCIDR2];
- t_params[t_idx].ete.reg_idr8 = metadata[m_idx][CS_ETE_TRCIDR8];
- t_params[t_idx].ete.reg_configr = metadata[m_idx][CS_ETE_TRCCONFIGR];
- t_params[t_idx].ete.reg_traceidr = metadata[m_idx][CS_ETE_TRCTRACEIDR];
- t_params[t_idx].ete.reg_devarch = metadata[m_idx][CS_ETE_TRCDEVARCH];
+ t_params->protocol = CS_ETM_PROTO_ETE;
+ t_params->ete.reg_idr0 = metadata[CS_ETE_TRCIDR0];
+ t_params->ete.reg_idr1 = metadata[CS_ETE_TRCIDR1];
+ t_params->ete.reg_idr2 = metadata[CS_ETE_TRCIDR2];
+ t_params->ete.reg_idr8 = metadata[CS_ETE_TRCIDR8];
+ t_params->ete.reg_configr = metadata[CS_ETE_TRCCONFIGR];
+ t_params->ete.reg_traceidr = metadata[CS_ETE_TRCTRACEIDR];
+ t_params->ete.reg_devarch = metadata[CS_ETE_TRCDEVARCH];
}
static int cs_etm__init_trace_params(struct cs_etm_trace_params *t_params,
- struct cs_etm_auxtrace *etm,
- bool formatted,
- int sample_cpu,
- int decoders)
-{
- int t_idx, m_idx;
- u32 etmidr;
- u64 architecture;
-
- for (t_idx = 0; t_idx < decoders; t_idx++) {
- if (formatted)
- m_idx = t_idx;
- else {
- m_idx = get_cpu_data_idx(etm, sample_cpu);
- if (m_idx == -1) {
- pr_warning("CS_ETM: unknown CPU, falling back to first metadata\n");
- m_idx = 0;
- }
- }
+ struct cs_etm_queue *etmq)
+{
+ struct int_node *inode;
- architecture = etm->metadata[m_idx][CS_ETM_MAGIC];
+ intlist__for_each_entry(inode, etmq->traceid_list) {
+ u64 *metadata = inode->priv;
+ u64 architecture = metadata[CS_ETM_MAGIC];
+ u32 etmidr;
switch (architecture) {
case __perf_cs_etmv3_magic:
- etmidr = etm->metadata[m_idx][CS_ETM_ETMIDR];
- cs_etm__set_trace_param_etmv3(t_params, etm, t_idx, m_idx, etmidr);
+ etmidr = metadata[CS_ETM_ETMIDR];
+ cs_etm__set_trace_param_etmv3(t_params++, metadata, etmidr);
break;
case __perf_cs_etmv4_magic:
- cs_etm__set_trace_param_etmv4(t_params, etm, t_idx, m_idx);
+ cs_etm__set_trace_param_etmv4(t_params++, metadata);
break;
case __perf_cs_ete_magic:
- cs_etm__set_trace_param_ete(t_params, etm, t_idx, m_idx);
+ cs_etm__set_trace_param_ete(t_params++, metadata);
break;
default:
return -EINVAL;
@@ -738,8 +846,7 @@ static int cs_etm__init_trace_params(struct cs_etm_trace_params *t_params,
static int cs_etm__init_decoder_params(struct cs_etm_decoder_params *d_params,
struct cs_etm_queue *etmq,
- enum cs_etm_decoder_operation mode,
- bool formatted)
+ enum cs_etm_decoder_operation mode)
{
int ret = -EINVAL;
@@ -749,7 +856,7 @@ static int cs_etm__init_decoder_params(struct cs_etm_decoder_params *d_params,
d_params->packet_printer = cs_etm__packet_dump;
d_params->operation = mode;
d_params->data = etmq;
- d_params->formatted = formatted;
+ d_params->formatted = etmq->format == FORMATTED;
d_params->fsyncs = false;
d_params->hsyncs = false;
d_params->frame_aligned = true;
@@ -788,7 +895,7 @@ static void cs_etm__dump_event(struct cs_etm_queue *etmq,
}
static int cs_etm__flush_events(struct perf_session *session,
- struct perf_tool *tool)
+ const struct perf_tool *tool)
{
struct cs_etm_auxtrace *etm = container_of(session->auxtrace,
struct cs_etm_auxtrace,
@@ -850,6 +957,7 @@ static void cs_etm__free_traceid_queues(struct cs_etm_queue *etmq)
static void cs_etm__free_queue(void *priv)
{
+ struct int_node *inode, *tmp;
struct cs_etm_queue *etmq = priv;
if (!etmq)
@@ -857,6 +965,16 @@ static void cs_etm__free_queue(void *priv)
cs_etm_decoder__free(etmq->decoder);
cs_etm__free_traceid_queues(etmq);
+
+ if (etmq->own_traceid_list) {
+ /* First remove all traceID/metadata nodes for the RB tree */
+ intlist__for_each_entry_safe(inode, tmp, etmq->own_traceid_list)
+ intlist__remove(etmq->own_traceid_list, inode);
+
+ /* Then the RB tree itself */
+ intlist__delete(etmq->own_traceid_list);
+ }
+
free(etmq);
}
@@ -879,19 +997,12 @@ static void cs_etm__free_events(struct perf_session *session)
static void cs_etm__free(struct perf_session *session)
{
int i;
- struct int_node *inode, *tmp;
struct cs_etm_auxtrace *aux = container_of(session->auxtrace,
struct cs_etm_auxtrace,
auxtrace);
cs_etm__free_events(session);
session->auxtrace = NULL;
- /* First remove all traceID/metadata nodes for the RB tree */
- intlist__for_each_entry_safe(inode, tmp, traceid_list)
- intlist__remove(traceid_list, inode);
- /* Then the RB tree itself */
- intlist__delete(traceid_list);
-
for (i = 0; i < aux->num_cpu; i++)
zfree(&aux->metadata[i]);
@@ -1041,19 +1152,9 @@ out:
return ret;
}
-static struct cs_etm_queue *cs_etm__alloc_queue(struct cs_etm_auxtrace *etm,
- bool formatted, int sample_cpu)
+static struct cs_etm_queue *cs_etm__alloc_queue(void)
{
- struct cs_etm_decoder_params d_params;
- struct cs_etm_trace_params *t_params = NULL;
- struct cs_etm_queue *etmq;
- /*
- * Each queue can only contain data from one CPU when unformatted, so only one decoder is
- * needed.
- */
- int decoders = formatted ? etm->num_cpu : 1;
-
- etmq = zalloc(sizeof(*etmq));
+ struct cs_etm_queue *etmq = zalloc(sizeof(*etmq));
if (!etmq)
return NULL;
@@ -1061,42 +1162,17 @@ static struct cs_etm_queue *cs_etm__alloc_queue(struct cs_etm_auxtrace *etm,
if (!etmq->traceid_queues_list)
goto out_free;
- /* Use metadata to fill in trace parameters for trace decoder */
- t_params = zalloc(sizeof(*t_params) * decoders);
-
- if (!t_params)
- goto out_free;
-
- if (cs_etm__init_trace_params(t_params, etm, formatted, sample_cpu, decoders))
- goto out_free;
-
- /* Set decoder parameters to decode trace packets */
- if (cs_etm__init_decoder_params(&d_params, etmq,
- dump_trace ? CS_ETM_OPERATION_PRINT :
- CS_ETM_OPERATION_DECODE,
- formatted))
- goto out_free;
-
- etmq->decoder = cs_etm_decoder__new(decoders, &d_params,
- t_params);
-
- if (!etmq->decoder)
- goto out_free;
-
/*
- * Register a function to handle all memory accesses required by
- * the trace decoder library.
+ * Create an RB tree for traceID-metadata tuple. Since the conversion
+ * has to be made for each packet that gets decoded, optimizing access
+ * in anything other than a sequential array is worth doing.
*/
- if (cs_etm_decoder__add_mem_access_cb(etmq->decoder,
- 0x0L, ((u64) -1L),
- cs_etm__mem_access))
- goto out_free_decoder;
+ etmq->traceid_list = etmq->own_traceid_list = intlist__new(NULL);
+ if (!etmq->traceid_list)
+ goto out_free;
- zfree(&t_params);
return etmq;
-out_free_decoder:
- cs_etm_decoder__free(etmq->decoder);
out_free:
intlist__delete(etmq->traceid_queues_list);
free(etmq);
@@ -1106,16 +1182,14 @@ out_free:
static int cs_etm__setup_queue(struct cs_etm_auxtrace *etm,
struct auxtrace_queue *queue,
- unsigned int queue_nr,
- bool formatted,
- int sample_cpu)
+ unsigned int queue_nr)
{
struct cs_etm_queue *etmq = queue->priv;
- if (list_empty(&queue->head) || etmq)
+ if (etmq)
return 0;
- etmq = cs_etm__alloc_queue(etm, formatted, sample_cpu);
+ etmq = cs_etm__alloc_queue();
if (!etmq)
return -ENOMEM;
@@ -1123,7 +1197,9 @@ static int cs_etm__setup_queue(struct cs_etm_auxtrace *etm,
queue->priv = etmq;
etmq->etm = etm;
etmq->queue_nr = queue_nr;
+ queue->cpu = queue_nr; /* Placeholder, may be reset to -1 in per-thread mode */
etmq->offset = 0;
+ etmq->sink_id = SINK_UNSET;
return 0;
}
@@ -1267,8 +1343,12 @@ static inline int cs_etm__t32_instr_size(struct cs_etm_queue *etmq,
static inline u64 cs_etm__first_executed_instr(struct cs_etm_packet *packet)
{
- /* Returns 0 for the CS_ETM_DISCONTINUITY packet */
- if (packet->sample_type == CS_ETM_DISCONTINUITY)
+ /*
+ * Return 0 for packets that have no addresses so that CS_ETM_INVAL_ADDR doesn't
+ * appear in samples.
+ */
+ if (packet->sample_type == CS_ETM_DISCONTINUITY ||
+ packet->sample_type == CS_ETM_EXCEPTION)
return 0;
return packet->start_addr;
@@ -1595,35 +1675,6 @@ static int cs_etm__synth_branch_sample(struct cs_etm_queue *etmq,
return ret;
}
-struct cs_etm_synth {
- struct perf_tool dummy_tool;
- struct perf_session *session;
-};
-
-static int cs_etm__event_synth(struct perf_tool *tool,
- union perf_event *event,
- struct perf_sample *sample __maybe_unused,
- struct machine *machine __maybe_unused)
-{
- struct cs_etm_synth *cs_etm_synth =
- container_of(tool, struct cs_etm_synth, dummy_tool);
-
- return perf_session__deliver_synth_event(cs_etm_synth->session,
- event, NULL);
-}
-
-static int cs_etm__synth_event(struct perf_session *session,
- struct perf_event_attr *attr, u64 id)
-{
- struct cs_etm_synth cs_etm_synth;
-
- memset(&cs_etm_synth, 0, sizeof(struct cs_etm_synth));
- cs_etm_synth.session = session;
-
- return perf_event__synthesize_attr(&cs_etm_synth.dummy_tool, attr, 1,
- &id, cs_etm__event_synth);
-}
-
static int cs_etm__synth_events(struct cs_etm_auxtrace *etm,
struct perf_session *session)
{
@@ -1675,7 +1726,7 @@ static int cs_etm__synth_events(struct cs_etm_auxtrace *etm,
attr.config = PERF_COUNT_HW_BRANCH_INSTRUCTIONS;
attr.sample_period = 1;
attr.sample_type |= PERF_SAMPLE_ADDR;
- err = cs_etm__synth_event(session, &attr, id);
+ err = perf_session__deliver_synth_attr_event(session, &attr, id);
if (err)
return err;
etm->branches_sample_type = attr.sample_type;
@@ -1698,7 +1749,7 @@ static int cs_etm__synth_events(struct cs_etm_auxtrace *etm,
attr.config = PERF_COUNT_HW_INSTRUCTIONS;
attr.sample_period = etm->synth_opts.period;
etm->instructions_sample_period = attr.sample_period;
- err = cs_etm__synth_event(session, &attr, id);
+ err = perf_session__deliver_synth_attr_event(session, &attr, id);
if (err)
return err;
etm->instructions_sample_type = attr.sample_type;
@@ -2252,7 +2303,7 @@ static int cs_etm__set_sample_flags(struct cs_etm_queue *etmq,
PERF_IP_FLAG_TRACE_END;
break;
case CS_ETM_EXCEPTION:
- ret = cs_etm__get_magic(packet->trace_chan_id, &magic);
+ ret = cs_etm__get_magic(etmq, packet->trace_chan_id, &magic);
if (ret)
return ret;
@@ -2740,7 +2791,7 @@ static int cs_etm__process_switch_cpu_wide(struct cs_etm_auxtrace *etm,
static int cs_etm__process_event(struct perf_session *session,
union perf_event *event,
struct perf_sample *sample,
- struct perf_tool *tool)
+ const struct perf_tool *tool)
{
struct cs_etm_auxtrace *etm = container_of(session->auxtrace,
struct cs_etm_auxtrace,
@@ -2810,7 +2861,7 @@ static void dump_queued_data(struct cs_etm_auxtrace *etm,
static int cs_etm__process_auxtrace_event(struct perf_session *session,
union perf_event *event,
- struct perf_tool *tool __maybe_unused)
+ const struct perf_tool *tool __maybe_unused)
{
struct cs_etm_auxtrace *etm = container_of(session->auxtrace,
struct cs_etm_auxtrace,
@@ -2836,17 +2887,6 @@ static int cs_etm__process_auxtrace_event(struct perf_session *session,
if (err)
return err;
- /*
- * Knowing if the trace is formatted or not requires a lookup of
- * the aux record so only works in non-piped mode where data is
- * queued in cs_etm__queue_aux_records(). Always assume
- * formatted in piped mode (true).
- */
- err = cs_etm__setup_queue(etm, &etm->queues.queue_array[idx],
- idx, true, -1);
- if (err)
- return err;
-
if (dump_trace)
if (auxtrace_buffer__get_data(buffer, fd)) {
cs_etm__dump_event(etm->queues.queue_array[idx].priv, buffer);
@@ -2963,8 +3003,7 @@ static int cs_etm__queue_aux_fragment(struct perf_session *session, off_t file_o
struct perf_record_auxtrace *auxtrace_event;
union perf_event auxtrace_fragment;
__u64 aux_offset, aux_size;
- __u32 idx;
- bool formatted;
+ enum cs_etm_format format;
struct cs_etm_auxtrace *etm = container_of(session->auxtrace,
struct cs_etm_auxtrace,
@@ -3030,6 +3069,8 @@ static int cs_etm__queue_aux_fragment(struct perf_session *session, off_t file_o
if (aux_offset >= auxtrace_event->offset &&
aux_offset + aux_size <= auxtrace_event->offset + auxtrace_event->size) {
+ struct cs_etm_queue *etmq = etm->queues.queue_array[auxtrace_event->idx].priv;
+
/*
* If this AUX event was inside this buffer somewhere, create a new auxtrace event
* based on the sizes of the aux event, and queue that fragment.
@@ -3046,10 +3087,14 @@ static int cs_etm__queue_aux_fragment(struct perf_session *session, off_t file_o
if (err)
return err;
- idx = auxtrace_event->idx;
- formatted = !(aux_event->flags & PERF_AUX_FLAG_CORESIGHT_FORMAT_RAW);
- return cs_etm__setup_queue(etm, &etm->queues.queue_array[idx],
- idx, formatted, sample->cpu);
+ format = (aux_event->flags & PERF_AUX_FLAG_CORESIGHT_FORMAT_RAW) ?
+ UNFORMATTED : FORMATTED;
+ if (etmq->format != UNSET && format != etmq->format) {
+ pr_err("CS_ETM: mixed formatted and unformatted trace not supported\n");
+ return -EINVAL;
+ }
+ etmq->format = format;
+ return 0;
}
/* Wasn't inside this buffer, but there were no parse errors. 1 == 'not found' */
@@ -3175,7 +3220,8 @@ static bool cs_etm__has_virtual_ts(u64 **metadata, int num_cpu)
}
/* map trace ids to correct metadata block, from information in metadata */
-static int cs_etm__map_trace_ids_metadata(int num_cpu, u64 **metadata)
+static int cs_etm__map_trace_ids_metadata(struct cs_etm_auxtrace *etm, int num_cpu,
+ u64 **metadata)
{
u64 cs_etm_magic;
u8 trace_chan_id;
@@ -3197,7 +3243,7 @@ static int cs_etm__map_trace_ids_metadata(int num_cpu, u64 **metadata)
/* unknown magic number */
return -EINVAL;
}
- err = cs_etm__map_trace_id(trace_chan_id, metadata[i]);
+ err = cs_etm__map_trace_id_v0(etm, trace_chan_id, metadata[i]);
if (err)
return err;
}
@@ -3205,30 +3251,85 @@ static int cs_etm__map_trace_ids_metadata(int num_cpu, u64 **metadata)
}
/*
- * If we found AUX_HW_ID packets, then set any metadata marked as unused to the
- * unused value to reduce the number of unneeded decoders created.
+ * Use the data gathered by the peeks for HW_ID (trace ID mappings) and AUX
+ * (formatted or not) packets to create the decoders.
*/
-static int cs_etm__clear_unused_trace_ids_metadata(int num_cpu, u64 **metadata)
+static int cs_etm__create_queue_decoders(struct cs_etm_queue *etmq)
{
- u64 cs_etm_magic;
- int i;
+ struct cs_etm_decoder_params d_params;
+ struct cs_etm_trace_params *t_params;
+ int decoders = intlist__nr_entries(etmq->traceid_list);
- for (i = 0; i < num_cpu; i++) {
- cs_etm_magic = metadata[i][CS_ETM_MAGIC];
- switch (cs_etm_magic) {
- case __perf_cs_etmv3_magic:
- if (metadata[i][CS_ETM_ETMTRACEIDR] & CORESIGHT_TRACE_ID_UNUSED_FLAG)
- metadata[i][CS_ETM_ETMTRACEIDR] = CORESIGHT_TRACE_ID_UNUSED_VAL;
- break;
- case __perf_cs_etmv4_magic:
- case __perf_cs_ete_magic:
- if (metadata[i][CS_ETMV4_TRCTRACEIDR] & CORESIGHT_TRACE_ID_UNUSED_FLAG)
- metadata[i][CS_ETMV4_TRCTRACEIDR] = CORESIGHT_TRACE_ID_UNUSED_VAL;
- break;
- default:
- /* unknown magic number */
- return -EINVAL;
- }
+ if (decoders == 0)
+ return 0;
+
+ /*
+ * Each queue can only contain data from one CPU when unformatted, so only one decoder is
+ * needed.
+ */
+ if (etmq->format == UNFORMATTED)
+ assert(decoders == 1);
+
+ /* Use metadata to fill in trace parameters for trace decoder */
+ t_params = zalloc(sizeof(*t_params) * decoders);
+
+ if (!t_params)
+ goto out_free;
+
+ if (cs_etm__init_trace_params(t_params, etmq))
+ goto out_free;
+
+ /* Set decoder parameters to decode trace packets */
+ if (cs_etm__init_decoder_params(&d_params, etmq,
+ dump_trace ? CS_ETM_OPERATION_PRINT :
+ CS_ETM_OPERATION_DECODE))
+ goto out_free;
+
+ etmq->decoder = cs_etm_decoder__new(decoders, &d_params,
+ t_params);
+
+ if (!etmq->decoder)
+ goto out_free;
+
+ /*
+ * Register a function to handle all memory accesses required by
+ * the trace decoder library.
+ */
+ if (cs_etm_decoder__add_mem_access_cb(etmq->decoder,
+ 0x0L, ((u64) -1L),
+ cs_etm__mem_access))
+ goto out_free_decoder;
+
+ zfree(&t_params);
+ return 0;
+
+out_free_decoder:
+ cs_etm_decoder__free(etmq->decoder);
+out_free:
+ zfree(&t_params);
+ return -EINVAL;
+}
+
+static int cs_etm__create_decoders(struct cs_etm_auxtrace *etm)
+{
+ struct auxtrace_queues *queues = &etm->queues;
+
+ for (unsigned int i = 0; i < queues->nr_queues; i++) {
+ bool empty = list_empty(&queues->queue_array[i].head);
+ struct cs_etm_queue *etmq = queues->queue_array[i].priv;
+ int ret;
+
+ /*
+ * Don't create decoders for empty queues, mainly because
+ * etmq->format is unknown for empty queues.
+ */
+ assert(empty == (etmq->format == UNSET));
+ if (empty)
+ continue;
+
+ ret = cs_etm__create_queue_decoders(etmq);
+ if (ret)
+ return ret;
}
return 0;
}
@@ -3242,30 +3343,19 @@ int cs_etm__process_auxtrace_info_full(union perf_event *event,
int event_header_size = sizeof(struct perf_event_header);
int total_size = auxtrace_info->header.size;
int priv_size = 0;
- int num_cpu;
+ int num_cpu, max_cpu = 0;
int err = 0;
int aux_hw_id_found;
- int i, j;
+ int i;
u64 *ptr = NULL;
u64 **metadata = NULL;
- /*
- * Create an RB tree for traceID-metadata tuple. Since the conversion
- * has to be made for each packet that gets decoded, optimizing access
- * in anything other than a sequential array is worth doing.
- */
- traceid_list = intlist__new(NULL);
- if (!traceid_list)
- return -ENOMEM;
-
/* First the global part */
ptr = (u64 *) auxtrace_info->priv;
num_cpu = ptr[CS_PMU_TYPE_CPUS] & 0xffffffff;
metadata = zalloc(sizeof(*metadata) * num_cpu);
- if (!metadata) {
- err = -ENOMEM;
- goto err_free_traceid_list;
- }
+ if (!metadata)
+ return -ENOMEM;
/* Start parsing after the common part of the header */
i = CS_HEADER_VERSION_MAX;
@@ -3276,7 +3366,7 @@ int cs_etm__process_auxtrace_info_full(union perf_event *event,
* required by the trace decoder to properly decode the trace due
* to its highly compressed nature.
*/
- for (j = 0; j < num_cpu; j++) {
+ for (int j = 0; j < num_cpu; j++) {
if (ptr[i] == __perf_cs_etmv3_magic) {
metadata[j] =
cs_etm__create_meta_blk(ptr, &i,
@@ -3300,6 +3390,9 @@ int cs_etm__process_auxtrace_info_full(union perf_event *event,
err = -ENOMEM;
goto err_free_metadata;
}
+
+ if ((int) metadata[j][CS_ETM_CPU] > max_cpu)
+ max_cpu = metadata[j][CS_ETM_CPU];
}
/*
@@ -3329,10 +3422,16 @@ int cs_etm__process_auxtrace_info_full(union perf_event *event,
*/
etm->pid_fmt = cs_etm__init_pid_fmt(metadata[0]);
- err = auxtrace_queues__init(&etm->queues);
+ err = auxtrace_queues__init_nr(&etm->queues, max_cpu + 1);
if (err)
goto err_free_etm;
+ for (unsigned int j = 0; j < etm->queues.nr_queues; ++j) {
+ err = cs_etm__setup_queue(etm, &etm->queues.queue_array[j], j);
+ if (err)
+ goto err_free_queues;
+ }
+
if (session->itrace_synth_opts->set) {
etm->synth_opts = *session->itrace_synth_opts;
} else {
@@ -3396,12 +3495,16 @@ int cs_etm__process_auxtrace_info_full(union perf_event *event,
if (err)
goto err_free_queues;
+ err = cs_etm__queue_aux_records(session);
+ if (err)
+ goto err_free_queues;
+
/*
* Map Trace ID values to CPU metadata.
*
- * Trace metadata will always contain Trace ID values from the legacy algorithm. If the
- * files has been recorded by a "new" perf updated to handle AUX_HW_ID then the metadata
- * ID value will also have the CORESIGHT_TRACE_ID_UNUSED_FLAG set.
+ * Trace metadata will always contain Trace ID values from the legacy algorithm
+ * in case it's read by a version of Perf that doesn't know about HW_ID packets
+ * or the kernel doesn't emit them.
*
* The updated kernel drivers that use AUX_HW_ID to sent Trace IDs will attempt to use
* the same IDs as the old algorithm as far as is possible, unless there are clashes
@@ -3410,15 +3513,14 @@ int cs_etm__process_auxtrace_info_full(union perf_event *event,
*
* For a perf able to interpret AUX_HW_ID packets we first check for the presence of
* those packets. If they are there then the values will be mapped and plugged into
- * the metadata. We then set any remaining metadata values with the used flag to a
- * value CORESIGHT_TRACE_ID_UNUSED_VAL - which indicates no decoder is required.
+ * the metadata and decoders are only created for each mapping received.
*
* If no AUX_HW_ID packets are present - which means a file recorded on an old kernel
- * then we map Trace ID values to CPU directly from the metadata - clearing any unused
- * flags if present.
+ * then we map Trace ID values to CPU directly from the metadata and create decoders
+ * for all mappings.
*/
- /* first scan for AUX_OUTPUT_HW_ID records to map trace ID values to CPU metadata */
+ /* Scan for AUX_OUTPUT_HW_ID records to map trace ID values to CPU metadata */
aux_hw_id_found = 0;
err = perf_session__peek_events(session, session->header.data_offset,
session->header.data_size,
@@ -3426,17 +3528,14 @@ int cs_etm__process_auxtrace_info_full(union perf_event *event,
if (err)
goto err_free_queues;
- /* if HW ID found then clear any unused metadata ID values */
- if (aux_hw_id_found)
- err = cs_etm__clear_unused_trace_ids_metadata(num_cpu, metadata);
- /* otherwise, this is a file with metadata values only, map from metadata */
- else
- err = cs_etm__map_trace_ids_metadata(num_cpu, metadata);
-
- if (err)
- goto err_free_queues;
+ /* if no HW ID found this is a file with metadata values only, map from metadata */
+ if (!aux_hw_id_found) {
+ err = cs_etm__map_trace_ids_metadata(etm, num_cpu, metadata);
+ if (err)
+ goto err_free_queues;
+ }
- err = cs_etm__queue_aux_records(session);
+ err = cs_etm__create_decoders(etm);
if (err)
goto err_free_queues;
@@ -3450,10 +3549,8 @@ err_free_etm:
zfree(&etm);
err_free_metadata:
/* No need to check @metadata[j], free(NULL) is supported */
- for (j = 0; j < num_cpu; j++)
+ for (int j = 0; j < num_cpu; j++)
zfree(&metadata[j]);
zfree(&metadata);
-err_free_traceid_list:
- intlist__delete(traceid_list);
return err;
}
diff --git a/tools/perf/util/cs-etm.h b/tools/perf/util/cs-etm.h
index 4696267a32f0..a8caeea720aa 100644
--- a/tools/perf/util/cs-etm.h
+++ b/tools/perf/util/cs-etm.h
@@ -230,16 +230,6 @@ struct cs_etm_packet_queue {
/* CoreSight trace ID is currently the bottom 7 bits of the value */
#define CORESIGHT_TRACE_ID_VAL_MASK GENMASK(6, 0)
-/*
- * perf record will set the legacy meta data values as unused initially.
- * This allows perf report to manage the decoders created when dynamic
- * allocation in operation.
- */
-#define CORESIGHT_TRACE_ID_UNUSED_FLAG BIT(31)
-
-/* Value to set for unused trace ID values */
-#define CORESIGHT_TRACE_ID_UNUSED_VAL 0x7F
-
int cs_etm__process_auxtrace_info(union perf_event *event,
struct perf_session *session);
void cs_etm_get_default_config(const struct perf_pmu *pmu, struct perf_event_attr *attr);
@@ -252,7 +242,7 @@ enum cs_etm_pid_fmt {
#ifdef HAVE_CSTRACE_SUPPORT
#include <opencsd/ocsd_if_types.h>
-int cs_etm__get_cpu(u8 trace_chan_id, int *cpu);
+int cs_etm__get_cpu(struct cs_etm_queue *etmq, u8 trace_chan_id, int *cpu);
enum cs_etm_pid_fmt cs_etm__get_pid_fmt(struct cs_etm_queue *etmq);
int cs_etm__etmq_set_tid_el(struct cs_etm_queue *etmq, pid_t tid,
u8 trace_chan_id, ocsd_ex_level el);
diff --git a/tools/perf/util/data-convert-bt.c b/tools/perf/util/data-convert-bt.c
index 2b732bccabad..021e9b1d5cc5 100644
--- a/tools/perf/util/data-convert-bt.c
+++ b/tools/perf/util/data-convert-bt.c
@@ -792,7 +792,7 @@ static bool is_flush_needed(struct ctf_stream *cs)
return cs->count >= STREAM_FLUSH_COUNT;
}
-static int process_sample_event(struct perf_tool *tool,
+static int process_sample_event(const struct perf_tool *tool,
union perf_event *_event,
struct perf_sample *sample,
struct evsel *evsel,
@@ -871,7 +871,7 @@ do { \
} while(0)
#define __FUNC_PROCESS_NON_SAMPLE(_name, body) \
-static int process_##_name##_event(struct perf_tool *tool, \
+static int process_##_name##_event(const struct perf_tool *tool, \
union perf_event *_event, \
struct perf_sample *sample, \
struct machine *machine) \
@@ -1607,25 +1607,23 @@ int bt_convert__perf2ctf(const char *input, const char *path,
.mode = PERF_DATA_MODE_READ,
.force = opts->force,
};
- struct convert c = {
- .tool = {
- .sample = process_sample_event,
- .mmap = perf_event__process_mmap,
- .mmap2 = perf_event__process_mmap2,
- .comm = perf_event__process_comm,
- .exit = perf_event__process_exit,
- .fork = perf_event__process_fork,
- .lost = perf_event__process_lost,
- .tracing_data = perf_event__process_tracing_data,
- .build_id = perf_event__process_build_id,
- .namespaces = perf_event__process_namespaces,
- .ordered_events = true,
- .ordering_requires_timestamps = true,
- },
- };
+ struct convert c = {};
struct ctf_writer *cw = &c.writer;
int err;
+ perf_tool__init(&c.tool, /*ordered_events=*/true);
+ c.tool.sample = process_sample_event;
+ c.tool.mmap = perf_event__process_mmap;
+ c.tool.mmap2 = perf_event__process_mmap2;
+ c.tool.comm = perf_event__process_comm;
+ c.tool.exit = perf_event__process_exit;
+ c.tool.fork = perf_event__process_fork;
+ c.tool.lost = perf_event__process_lost;
+ c.tool.tracing_data = perf_event__process_tracing_data;
+ c.tool.build_id = perf_event__process_build_id;
+ c.tool.namespaces = perf_event__process_namespaces;
+ c.tool.ordering_requires_timestamps = true;
+
if (opts->all) {
c.tool.comm = process_comm_event;
c.tool.exit = process_exit_event;
diff --git a/tools/perf/util/data-convert-json.c b/tools/perf/util/data-convert-json.c
index 3cf64f5b23ee..20bfb0884e9e 100644
--- a/tools/perf/util/data-convert-json.c
+++ b/tools/perf/util/data-convert-json.c
@@ -118,7 +118,7 @@ static void output_json_key_format(FILE *out, bool comma, int depth,
va_end(args);
}
-static void output_sample_callchain_entry(struct perf_tool *tool,
+static void output_sample_callchain_entry(const struct perf_tool *tool,
u64 ip, struct addr_location *al)
{
struct convert_json *c = container_of(tool, struct convert_json, tool);
@@ -146,7 +146,7 @@ static void output_sample_callchain_entry(struct perf_tool *tool,
output_json_format(out, false, 4, "}");
}
-static int process_sample_event(struct perf_tool *tool,
+static int process_sample_event(const struct perf_tool *tool,
union perf_event *event __maybe_unused,
struct perf_sample *sample,
struct evsel *evsel __maybe_unused,
@@ -316,39 +316,36 @@ int bt_convert__perf2json(const char *input_name, const char *output_name,
struct perf_session *session;
int fd;
int ret = -1;
-
struct convert_json c = {
- .tool = {
- .sample = process_sample_event,
- .mmap = perf_event__process_mmap,
- .mmap2 = perf_event__process_mmap2,
- .comm = perf_event__process_comm,
- .namespaces = perf_event__process_namespaces,
- .cgroup = perf_event__process_cgroup,
- .exit = perf_event__process_exit,
- .fork = perf_event__process_fork,
- .lost = perf_event__process_lost,
-#ifdef HAVE_LIBTRACEEVENT
- .tracing_data = perf_event__process_tracing_data,
-#endif
- .build_id = perf_event__process_build_id,
- .id_index = perf_event__process_id_index,
- .auxtrace_info = perf_event__process_auxtrace_info,
- .auxtrace = perf_event__process_auxtrace,
- .event_update = perf_event__process_event_update,
- .ordered_events = true,
- .ordering_requires_timestamps = true,
- },
.first = true,
.events_count = 0,
};
-
struct perf_data data = {
.mode = PERF_DATA_MODE_READ,
.path = input_name,
.force = opts->force,
};
+ perf_tool__init(&c.tool, /*ordered_events=*/true);
+ c.tool.sample = process_sample_event;
+ c.tool.mmap = perf_event__process_mmap;
+ c.tool.mmap2 = perf_event__process_mmap2;
+ c.tool.comm = perf_event__process_comm;
+ c.tool.namespaces = perf_event__process_namespaces;
+ c.tool.cgroup = perf_event__process_cgroup;
+ c.tool.exit = perf_event__process_exit;
+ c.tool.fork = perf_event__process_fork;
+ c.tool.lost = perf_event__process_lost;
+#ifdef HAVE_LIBTRACEEVENT
+ c.tool.tracing_data = perf_event__process_tracing_data;
+#endif
+ c.tool.build_id = perf_event__process_build_id;
+ c.tool.id_index = perf_event__process_id_index;
+ c.tool.auxtrace_info = perf_event__process_auxtrace_info;
+ c.tool.auxtrace = perf_event__process_auxtrace;
+ c.tool.event_update = perf_event__process_event_update;
+ c.tool.ordering_requires_timestamps = true;
+
if (opts->all) {
pr_err("--all is currently unsupported for JSON output.\n");
goto err;
diff --git a/tools/perf/util/data.c b/tools/perf/util/data.c
index 08c4bfbd817f..98661ede2a73 100644
--- a/tools/perf/util/data.c
+++ b/tools/perf/util/data.c
@@ -204,7 +204,12 @@ static bool check_pipe(struct perf_data *data)
data->file.fd = fd;
data->use_stdio = false;
}
- } else {
+
+ /*
+ * When is_pipe and data->file.fd is given, use given fd
+ * instead of STDIN_FILENO or STDOUT_FILENO
+ */
+ } else if (data->file.fd <= 0) {
data->file.fd = fd;
}
}
diff --git a/tools/perf/util/debuginfo.h b/tools/perf/util/debuginfo.h
index 4d65b8c605fc..ad6422c3f8ca 100644
--- a/tools/perf/util/debuginfo.h
+++ b/tools/perf/util/debuginfo.h
@@ -40,6 +40,8 @@ static inline void debuginfo__delete(struct debuginfo *dbg __maybe_unused)
{
}
+typedef void Dwarf_Addr;
+
static inline int debuginfo__get_text_offset(struct debuginfo *dbg __maybe_unused,
Dwarf_Addr *offs __maybe_unused,
bool adjust_offset __maybe_unused)
diff --git a/tools/perf/util/disasm.c b/tools/perf/util/disasm.c
index e10558b79504..f05ba7739c1e 100644
--- a/tools/perf/util/disasm.c
+++ b/tools/perf/util/disasm.c
@@ -12,9 +12,11 @@
#include <subcmd/run-command.h>
#include "annotate.h"
+#include "annotate-data.h"
#include "build-id.h"
#include "debug.h"
#include "disasm.h"
+#include "disasm_bpf.h"
#include "dso.h"
#include "env.h"
#include "evsel.h"
@@ -35,6 +37,8 @@ static struct ins_ops mov_ops;
static struct ins_ops nop_ops;
static struct ins_ops lock_ops;
static struct ins_ops ret_ops;
+static struct ins_ops load_store_ops;
+static struct ins_ops arithmetic_ops;
static int jump__scnprintf(struct ins *ins, char *bf, size_t size,
struct ins_operands *ops, int max_ins_name);
@@ -43,6 +47,8 @@ static int call__scnprintf(struct ins *ins, char *bf, size_t size,
static void ins__sort(struct arch *arch);
static int disasm_line__parse(char *line, const char **namep, char **rawp);
+static int disasm_line__parse_powerpc(struct disasm_line *dl);
+static char *expand_tabs(char *line, char **storage, size_t *storage_len);
static __attribute__((constructor)) void symbol__init_regexpr(void)
{
@@ -145,10 +151,16 @@ static struct arch architectures[] = {
.memory_ref_char = '(',
.imm_char = '$',
},
+#ifdef HAVE_DWARF_SUPPORT
+ .update_insn_state = update_insn_state_x86,
+#endif
},
{
.name = "powerpc",
.init = powerpc__annotate_init,
+#ifdef HAVE_DWARF_SUPPORT
+ .update_insn_state = update_insn_state_powerpc,
+#endif
},
{
.name = "riscv64",
@@ -250,7 +262,8 @@ bool ins__is_fused(struct arch *arch, const char *ins1, const char *ins2)
return arch->ins_is_fused(arch, ins1, ins2);
}
-static int call__parse(struct arch *arch, struct ins_operands *ops, struct map_symbol *ms)
+static int call__parse(struct arch *arch, struct ins_operands *ops, struct map_symbol *ms,
+ struct disasm_line *dl __maybe_unused)
{
char *endptr, *tok, *name;
struct map *map = ms->map;
@@ -345,7 +358,8 @@ static inline const char *validate_comma(const char *c, struct ins_operands *ops
return c;
}
-static int jump__parse(struct arch *arch, struct ins_operands *ops, struct map_symbol *ms)
+static int jump__parse(struct arch *arch, struct ins_operands *ops, struct map_symbol *ms,
+ struct disasm_line *dl __maybe_unused)
{
struct map *map = ms->map;
struct symbol *sym = ms->sym;
@@ -504,7 +518,8 @@ static int comment__symbol(char *raw, char *comment, u64 *addrp, char **namep)
return 0;
}
-static int lock__parse(struct arch *arch, struct ins_operands *ops, struct map_symbol *ms)
+static int lock__parse(struct arch *arch, struct ins_operands *ops, struct map_symbol *ms,
+ struct disasm_line *dl __maybe_unused)
{
ops->locked.ops = zalloc(sizeof(*ops->locked.ops));
if (ops->locked.ops == NULL)
@@ -513,13 +528,13 @@ static int lock__parse(struct arch *arch, struct ins_operands *ops, struct map_s
if (disasm_line__parse(ops->raw, &ops->locked.ins.name, &ops->locked.ops->raw) < 0)
goto out_free_ops;
- ops->locked.ins.ops = ins__find(arch, ops->locked.ins.name);
+ ops->locked.ins.ops = ins__find(arch, ops->locked.ins.name, 0);
if (ops->locked.ins.ops == NULL)
goto out_free_ops;
if (ops->locked.ins.ops->parse &&
- ops->locked.ins.ops->parse(arch, ops->locked.ops, ms) < 0)
+ ops->locked.ins.ops->parse(arch, ops->locked.ops, ms, NULL) < 0)
goto out_free_ops;
return 0;
@@ -552,6 +567,7 @@ static void lock__delete(struct ins_operands *ops)
ins_ops__delete(ops->locked.ops);
zfree(&ops->locked.ops);
+ zfree(&ops->locked.ins.name);
zfree(&ops->target.raw);
zfree(&ops->target.name);
}
@@ -590,7 +606,8 @@ static bool check_multi_regs(struct arch *arch, const char *op)
return count > 1;
}
-static int mov__parse(struct arch *arch, struct ins_operands *ops, struct map_symbol *ms __maybe_unused)
+static int mov__parse(struct arch *arch, struct ins_operands *ops, struct map_symbol *ms __maybe_unused,
+ struct disasm_line *dl __maybe_unused)
{
char *s = strchr(ops->raw, ','), *target, *comment, prev;
@@ -668,7 +685,92 @@ static struct ins_ops mov_ops = {
.scnprintf = mov__scnprintf,
};
-static int dec__parse(struct arch *arch __maybe_unused, struct ins_operands *ops, struct map_symbol *ms __maybe_unused)
+#define PPC_22_30(R) (((R) >> 1) & 0x1ff)
+#define MINUS_EXT_XO_FORM 234
+#define SUB_EXT_XO_FORM 232
+#define ADD_ZERO_EXT_XO_FORM 202
+#define SUB_ZERO_EXT_XO_FORM 200
+
+static int arithmetic__scnprintf(struct ins *ins, char *bf, size_t size,
+ struct ins_operands *ops, int max_ins_name)
+{
+ return scnprintf(bf, size, "%-*s %s", max_ins_name, ins->name,
+ ops->raw);
+}
+
+/*
+ * Sets the fields: multi_regs and "mem_ref".
+ * "mem_ref" is set for ops->source which is later used to
+ * fill the objdump->memory_ref-char field. This ops is currently
+ * used by powerpc and since binary instruction code is used to
+ * extract opcode, regs and offset, no other parsing is needed here.
+ *
+ * Dont set multi regs for 4 cases since it has only one operand
+ * for source:
+ * - Add to Minus One Extended XO-form ( Ex: addme, addmeo )
+ * - Subtract From Minus One Extended XO-form ( Ex: subfme )
+ * - Add to Zero Extended XO-form ( Ex: addze, addzeo )
+ * - Subtract From Zero Extended XO-form ( Ex: subfze )
+ */
+static int arithmetic__parse(struct arch *arch __maybe_unused, struct ins_operands *ops,
+ struct map_symbol *ms __maybe_unused, struct disasm_line *dl)
+{
+ int opcode = PPC_OP(dl->raw.raw_insn);
+
+ ops->source.mem_ref = false;
+ if (opcode == 31) {
+ if ((opcode != MINUS_EXT_XO_FORM) && (opcode != SUB_EXT_XO_FORM) \
+ && (opcode != ADD_ZERO_EXT_XO_FORM) && (opcode != SUB_ZERO_EXT_XO_FORM))
+ ops->source.multi_regs = true;
+ }
+
+ ops->target.mem_ref = false;
+ ops->target.multi_regs = false;
+
+ return 0;
+}
+
+static struct ins_ops arithmetic_ops = {
+ .parse = arithmetic__parse,
+ .scnprintf = arithmetic__scnprintf,
+};
+
+static int load_store__scnprintf(struct ins *ins, char *bf, size_t size,
+ struct ins_operands *ops, int max_ins_name)
+{
+ return scnprintf(bf, size, "%-*s %s", max_ins_name, ins->name,
+ ops->raw);
+}
+
+/*
+ * Sets the fields: multi_regs and "mem_ref".
+ * "mem_ref" is set for ops->source which is later used to
+ * fill the objdump->memory_ref-char field. This ops is currently
+ * used by powerpc and since binary instruction code is used to
+ * extract opcode, regs and offset, no other parsing is needed here
+ */
+static int load_store__parse(struct arch *arch __maybe_unused, struct ins_operands *ops,
+ struct map_symbol *ms __maybe_unused, struct disasm_line *dl __maybe_unused)
+{
+ ops->source.mem_ref = true;
+ ops->source.multi_regs = false;
+ /* opcode 31 is of X form */
+ if (PPC_OP(dl->raw.raw_insn) == 31)
+ ops->source.multi_regs = true;
+
+ ops->target.mem_ref = false;
+ ops->target.multi_regs = false;
+
+ return 0;
+}
+
+static struct ins_ops load_store_ops = {
+ .parse = load_store__parse,
+ .scnprintf = load_store__scnprintf,
+};
+
+static int dec__parse(struct arch *arch __maybe_unused, struct ins_operands *ops, struct map_symbol *ms __maybe_unused,
+ struct disasm_line *dl __maybe_unused)
{
char *target, *comment, *s, prev;
@@ -758,11 +860,23 @@ static void ins__sort(struct arch *arch)
qsort(arch->instructions, nmemb, sizeof(struct ins), ins__cmp);
}
-static struct ins_ops *__ins__find(struct arch *arch, const char *name)
+static struct ins_ops *__ins__find(struct arch *arch, const char *name, struct disasm_line *dl)
{
struct ins *ins;
const int nmemb = arch->nr_instructions;
+ if (arch__is(arch, "powerpc")) {
+ /*
+ * For powerpc, identify the instruction ops
+ * from the opcode using raw_insn.
+ */
+ struct ins_ops *ops;
+
+ ops = check_ppc_insn(dl);
+ if (ops)
+ return ops;
+ }
+
if (!arch->sorted_instructions) {
ins__sort(arch);
arch->sorted_instructions = true;
@@ -792,9 +906,9 @@ static struct ins_ops *__ins__find(struct arch *arch, const char *name)
return ins ? ins->ops : NULL;
}
-struct ins_ops *ins__find(struct arch *arch, const char *name)
+struct ins_ops *ins__find(struct arch *arch, const char *name, struct disasm_line *dl)
{
- struct ins_ops *ops = __ins__find(arch, name);
+ struct ins_ops *ops = __ins__find(arch, name, dl);
if (!ops && arch->associate_instruction_ops)
ops = arch->associate_instruction_ops(arch, name);
@@ -804,12 +918,12 @@ struct ins_ops *ins__find(struct arch *arch, const char *name)
static void disasm_line__init_ins(struct disasm_line *dl, struct arch *arch, struct map_symbol *ms)
{
- dl->ins.ops = ins__find(arch, dl->ins.name);
+ dl->ins.ops = ins__find(arch, dl->ins.name, dl);
if (!dl->ins.ops)
return;
- if (dl->ins.ops->parse && dl->ins.ops->parse(arch, &dl->ops, ms) < 0)
+ if (dl->ins.ops->parse && dl->ins.ops->parse(arch, &dl->ops, ms, dl) < 0)
dl->ins.ops = NULL;
}
@@ -841,6 +955,51 @@ out:
return -1;
}
+/*
+ * Parses the result captured from symbol__disassemble_*
+ * Example, line read from DSO file in powerpc:
+ * line: 38 01 81 e8
+ * opcode: fetched from arch specific get_opcode_insn
+ * rawp_insn: e8810138
+ *
+ * rawp_insn is used later to extract the reg/offset fields
+ */
+#define PPC_OP(op) (((op) >> 26) & 0x3F)
+#define RAW_BYTES 11
+
+static int disasm_line__parse_powerpc(struct disasm_line *dl)
+{
+ char *line = dl->al.line;
+ const char **namep = &dl->ins.name;
+ char **rawp = &dl->ops.raw;
+ char *tmp_raw_insn, *name_raw_insn = skip_spaces(line);
+ char *name = skip_spaces(name_raw_insn + RAW_BYTES);
+ int objdump = 0;
+
+ if (strlen(line) > RAW_BYTES)
+ objdump = 1;
+
+ if (name_raw_insn[0] == '\0')
+ return -1;
+
+ if (objdump) {
+ disasm_line__parse(name, namep, rawp);
+ } else
+ *namep = "";
+
+ tmp_raw_insn = strndup(name_raw_insn, 11);
+ if (tmp_raw_insn == NULL)
+ return -1;
+
+ remove_spaces(tmp_raw_insn);
+
+ sscanf(tmp_raw_insn, "%x", &dl->raw.raw_insn);
+ if (objdump)
+ dl->raw.raw_insn = be32_to_cpu(dl->raw.raw_insn);
+
+ return 0;
+}
+
static void annotation_line__init(struct annotation_line *al,
struct annotate_args *args,
int nr)
@@ -857,6 +1016,7 @@ static void annotation_line__exit(struct annotation_line *al)
zfree_srcline(&al->path);
zfree(&al->line);
zfree(&al->cycles);
+ zfree(&al->br_cntr);
}
static size_t disasm_line_size(int nr)
@@ -880,10 +1040,8 @@ static size_t disasm_line_size(int nr)
struct disasm_line *disasm_line__new(struct annotate_args *args)
{
struct disasm_line *dl = NULL;
- int nr = 1;
-
- if (evsel__is_group_event(args->evsel))
- nr = args->evsel->core.nr_members;
+ struct annotation *notes = symbol__annotation(args->ms.sym);
+ int nr = notes->src->nr_events;
dl = zalloc(disasm_line_size(nr));
if (!dl)
@@ -894,7 +1052,10 @@ struct disasm_line *disasm_line__new(struct annotate_args *args)
goto out_delete;
if (args->offset != -1) {
- if (disasm_line__parse(dl->al.line, &dl->ins.name, &dl->ops.raw) < 0)
+ if (arch__is(args->arch, "powerpc")) {
+ if (disasm_line__parse_powerpc(dl) < 0)
+ goto out_free_line;
+ } else if (disasm_line__parse(dl->al.line, &dl->ins.name, &dl->ops.raw) < 0)
goto out_free_line;
disasm_line__init_ins(dl, args->arch, &args->ms);
@@ -1164,195 +1325,11 @@ fallback:
return 0;
}
-#if defined(HAVE_LIBBFD_SUPPORT) && defined(HAVE_LIBBPF_SUPPORT)
-#define PACKAGE "perf"
-#include <bfd.h>
-#include <dis-asm.h>
-#include <bpf/bpf.h>
-#include <bpf/btf.h>
-#include <bpf/libbpf.h>
-#include <linux/btf.h>
-#include <tools/dis-asm-compat.h>
-
-#include "bpf-event.h"
-#include "bpf-utils.h"
-
-static int symbol__disassemble_bpf(struct symbol *sym,
- struct annotate_args *args)
-{
- struct annotation *notes = symbol__annotation(sym);
- struct bpf_prog_linfo *prog_linfo = NULL;
- struct bpf_prog_info_node *info_node;
- int len = sym->end - sym->start;
- disassembler_ftype disassemble;
- struct map *map = args->ms.map;
- struct perf_bpil *info_linear;
- struct disassemble_info info;
- struct dso *dso = map__dso(map);
- int pc = 0, count, sub_id;
- struct btf *btf = NULL;
- char tpath[PATH_MAX];
- size_t buf_size;
- int nr_skip = 0;
- char *buf;
- bfd *bfdf;
- int ret;
- FILE *s;
-
- if (dso__binary_type(dso) != DSO_BINARY_TYPE__BPF_PROG_INFO)
- return SYMBOL_ANNOTATE_ERRNO__BPF_INVALID_FILE;
-
- pr_debug("%s: handling sym %s addr %" PRIx64 " len %" PRIx64 "\n", __func__,
- sym->name, sym->start, sym->end - sym->start);
-
- memset(tpath, 0, sizeof(tpath));
- perf_exe(tpath, sizeof(tpath));
-
- bfdf = bfd_openr(tpath, NULL);
- if (bfdf == NULL)
- abort();
-
- if (!bfd_check_format(bfdf, bfd_object))
- abort();
-
- s = open_memstream(&buf, &buf_size);
- if (!s) {
- ret = errno;
- goto out;
- }
- init_disassemble_info_compat(&info, s,
- (fprintf_ftype) fprintf,
- fprintf_styled);
- info.arch = bfd_get_arch(bfdf);
- info.mach = bfd_get_mach(bfdf);
-
- info_node = perf_env__find_bpf_prog_info(dso__bpf_prog(dso)->env,
- dso__bpf_prog(dso)->id);
- if (!info_node) {
- ret = SYMBOL_ANNOTATE_ERRNO__BPF_MISSING_BTF;
- goto out;
- }
- info_linear = info_node->info_linear;
- sub_id = dso__bpf_prog(dso)->sub_id;
-
- info.buffer = (void *)(uintptr_t)(info_linear->info.jited_prog_insns);
- info.buffer_length = info_linear->info.jited_prog_len;
-
- if (info_linear->info.nr_line_info)
- prog_linfo = bpf_prog_linfo__new(&info_linear->info);
-
- if (info_linear->info.btf_id) {
- struct btf_node *node;
-
- node = perf_env__find_btf(dso__bpf_prog(dso)->env,
- info_linear->info.btf_id);
- if (node)
- btf = btf__new((__u8 *)(node->data),
- node->data_size);
- }
-
- disassemble_init_for_target(&info);
-
-#ifdef DISASM_FOUR_ARGS_SIGNATURE
- disassemble = disassembler(info.arch,
- bfd_big_endian(bfdf),
- info.mach,
- bfdf);
-#else
- disassemble = disassembler(bfdf);
-#endif
- if (disassemble == NULL)
- abort();
-
- fflush(s);
- do {
- const struct bpf_line_info *linfo = NULL;
- struct disasm_line *dl;
- size_t prev_buf_size;
- const char *srcline;
- u64 addr;
-
- addr = pc + ((u64 *)(uintptr_t)(info_linear->info.jited_ksyms))[sub_id];
- count = disassemble(pc, &info);
-
- if (prog_linfo)
- linfo = bpf_prog_linfo__lfind_addr_func(prog_linfo,
- addr, sub_id,
- nr_skip);
-
- if (linfo && btf) {
- srcline = btf__name_by_offset(btf, linfo->line_off);
- nr_skip++;
- } else
- srcline = NULL;
-
- fprintf(s, "\n");
- prev_buf_size = buf_size;
- fflush(s);
-
- if (!annotate_opts.hide_src_code && srcline) {
- args->offset = -1;
- args->line = strdup(srcline);
- args->line_nr = 0;
- args->fileloc = NULL;
- args->ms.sym = sym;
- dl = disasm_line__new(args);
- if (dl) {
- annotation_line__add(&dl->al,
- &notes->src->source);
- }
- }
-
- args->offset = pc;
- args->line = buf + prev_buf_size;
- args->line_nr = 0;
- args->fileloc = NULL;
- args->ms.sym = sym;
- dl = disasm_line__new(args);
- if (dl)
- annotation_line__add(&dl->al, &notes->src->source);
-
- pc += count;
- } while (count > 0 && pc < len);
-
- ret = 0;
-out:
- free(prog_linfo);
- btf__free(btf);
- fclose(s);
- bfd_close(bfdf);
- return ret;
-}
-#else // defined(HAVE_LIBBFD_SUPPORT) && defined(HAVE_LIBBPF_SUPPORT)
-static int symbol__disassemble_bpf(struct symbol *sym __maybe_unused,
- struct annotate_args *args __maybe_unused)
-{
- return SYMBOL_ANNOTATE_ERRNO__NO_LIBOPCODES_FOR_BPF;
-}
-#endif // defined(HAVE_LIBBFD_SUPPORT) && defined(HAVE_LIBBPF_SUPPORT)
-
-static int
-symbol__disassemble_bpf_image(struct symbol *sym,
- struct annotate_args *args)
-{
- struct annotation *notes = symbol__annotation(sym);
- struct disasm_line *dl;
-
- args->offset = -1;
- args->line = strdup("to be implemented");
- args->line_nr = 0;
- args->fileloc = NULL;
- dl = disasm_line__new(args);
- if (dl)
- annotation_line__add(&dl->al, &notes->src->source);
-
- zfree(&args->line);
- return 0;
-}
-
#ifdef HAVE_LIBCAPSTONE_SUPPORT
#include <capstone/capstone.h>
+int capstone_init(struct machine *machine, csh *cs_handle, bool is64, bool disassembler_style);
+
static int open_capstone_handle(struct annotate_args *args, bool is_64bit,
csh *handle)
{
@@ -1378,7 +1355,9 @@ static int open_capstone_handle(struct annotate_args *args, bool is_64bit,
return 0;
}
+#endif
+#if defined(HAVE_LIBCAPSTONE_SUPPORT) || defined(HAVE_LIBLLVM_SUPPORT)
struct find_file_offset_data {
u64 ip;
u64 offset;
@@ -1396,6 +1375,55 @@ static int find_file_offset(u64 start, u64 len, u64 pgoff, void *arg)
return 0;
}
+static u8 *
+read_symbol(const char *filename, struct map *map, struct symbol *sym,
+ u64 *len, bool *is_64bit)
+{
+ struct dso *dso = map__dso(map);
+ struct nscookie nsc;
+ u64 start = map__rip_2objdump(map, sym->start);
+ u64 end = map__rip_2objdump(map, sym->end);
+ int fd, count;
+ u8 *buf = NULL;
+ struct find_file_offset_data data = {
+ .ip = start,
+ };
+
+ *is_64bit = false;
+
+ nsinfo__mountns_enter(dso__nsinfo(dso), &nsc);
+ fd = open(filename, O_RDONLY);
+ nsinfo__mountns_exit(&nsc);
+ if (fd < 0)
+ return NULL;
+
+ if (file__read_maps(fd, /*exe=*/true, find_file_offset, &data,
+ is_64bit) == 0)
+ goto err;
+
+ *len = end - start;
+ buf = malloc(*len);
+ if (buf == NULL)
+ goto err;
+
+ count = pread(fd, buf, *len, data.offset);
+ close(fd);
+ fd = -1;
+
+ if ((u64)count != *len)
+ goto err;
+
+ return buf;
+
+err:
+ if (fd >= 0)
+ close(fd);
+ free(buf);
+ return NULL;
+}
+#endif
+
+#ifdef HAVE_LIBCAPSTONE_SUPPORT
static void print_capstone_detail(cs_insn *insn, char *buf, size_t len,
struct annotate_args *args, u64 addr)
{
@@ -1453,7 +1481,7 @@ static void print_capstone_detail(cs_insn *insn, char *buf, size_t len,
}
}
-static int symbol__disassemble_capstone(char *filename, struct symbol *sym,
+static int symbol__disassemble_capstone_powerpc(char *filename, struct symbol *sym,
struct annotate_args *args)
{
struct annotation *notes = symbol__annotation(sym);
@@ -1472,9 +1500,10 @@ static int symbol__disassemble_capstone(char *filename, struct symbol *sym,
.ip = start,
};
csh handle;
- cs_insn *insn;
char disasm_buf[512];
struct disasm_line *dl;
+ u32 *line;
+ bool disassembler_style = false;
if (args->options->objdump_path)
return -1;
@@ -1489,7 +1518,11 @@ static int symbol__disassemble_capstone(char *filename, struct symbol *sym,
&is_64bit) == 0)
goto err;
- if (open_capstone_handle(args, is_64bit, &handle) < 0)
+ if (!args->options->disassembler_style ||
+ !strcmp(args->options->disassembler_style, "att"))
+ disassembler_style = true;
+
+ if (capstone_init(maps__machine(args->ms.maps), &handle, is_64bit, disassembler_style) < 0)
goto err;
needs_cs_close = true;
@@ -1505,6 +1538,8 @@ static int symbol__disassemble_capstone(char *filename, struct symbol *sym,
if ((u64)count != len)
goto err;
+ line = (u32 *)buf;
+
/* add the function address and name */
scnprintf(disasm_buf, sizeof(disasm_buf), "%#"PRIx64" <%s>:",
start, sym->name);
@@ -1521,6 +1556,114 @@ static int symbol__disassemble_capstone(char *filename, struct symbol *sym,
annotation_line__add(&dl->al, &notes->src->source);
+ /*
+ * TODO: enable disassm for powerpc
+ * count = cs_disasm(handle, buf, len, start, len, &insn);
+ *
+ * For now, only binary code is saved in disassembled line
+ * to be used in "type" and "typeoff" sort keys. Each raw code
+ * is 32 bit instruction. So use "len/4" to get the number of
+ * entries.
+ */
+ count = len/4;
+
+ for (i = 0, offset = 0; i < count; i++) {
+ args->offset = offset;
+ sprintf(args->line, "%x", line[i]);
+
+ dl = disasm_line__new(args);
+ if (dl == NULL)
+ goto err;
+
+ annotation_line__add(&dl->al, &notes->src->source);
+
+ offset += 4;
+ }
+
+ /* It failed in the middle */
+ if (offset != len) {
+ struct list_head *list = &notes->src->source;
+
+ /* Discard all lines and fallback to objdump */
+ while (!list_empty(list)) {
+ dl = list_first_entry(list, struct disasm_line, al.node);
+
+ list_del_init(&dl->al.node);
+ disasm_line__free(dl);
+ }
+ count = -1;
+ }
+
+out:
+ if (needs_cs_close)
+ cs_close(&handle);
+ free(buf);
+ return count < 0 ? count : 0;
+
+err:
+ if (fd >= 0)
+ close(fd);
+ if (needs_cs_close) {
+ struct disasm_line *tmp;
+
+ /*
+ * It probably failed in the middle of the above loop.
+ * Release any resources it might add.
+ */
+ list_for_each_entry_safe(dl, tmp, &notes->src->source, al.node) {
+ list_del(&dl->al.node);
+ free(dl);
+ }
+ }
+ count = -1;
+ goto out;
+}
+
+static int symbol__disassemble_capstone(char *filename, struct symbol *sym,
+ struct annotate_args *args)
+{
+ struct annotation *notes = symbol__annotation(sym);
+ struct map *map = args->ms.map;
+ u64 start = map__rip_2objdump(map, sym->start);
+ u64 len;
+ u64 offset;
+ int i, count;
+ bool is_64bit = false;
+ bool needs_cs_close = false;
+ u8 *buf = NULL;
+ csh handle;
+ cs_insn *insn;
+ char disasm_buf[512];
+ struct disasm_line *dl;
+
+ if (args->options->objdump_path)
+ return -1;
+
+ buf = read_symbol(filename, map, sym, &len, &is_64bit);
+ if (buf == NULL)
+ return -1;
+
+ /* add the function address and name */
+ scnprintf(disasm_buf, sizeof(disasm_buf), "%#"PRIx64" <%s>:",
+ start, sym->name);
+
+ args->offset = -1;
+ args->line = disasm_buf;
+ args->line_nr = 0;
+ args->fileloc = NULL;
+ args->ms.sym = sym;
+
+ dl = disasm_line__new(args);
+ if (dl == NULL)
+ goto err;
+
+ annotation_line__add(&dl->al, &notes->src->source);
+
+ if (open_capstone_handle(args, is_64bit, &handle) < 0)
+ goto err;
+
+ needs_cs_close = true;
+
count = cs_disasm(handle, buf, len, start, len, &insn);
for (i = 0, offset = 0; i < count; i++) {
int printed;
@@ -1565,8 +1708,6 @@ out:
return count < 0 ? count : 0;
err:
- if (fd >= 0)
- close(fd);
if (needs_cs_close) {
struct disasm_line *tmp;
@@ -1584,6 +1725,274 @@ err:
}
#endif
+static int symbol__disassemble_raw(char *filename, struct symbol *sym,
+ struct annotate_args *args)
+{
+ struct annotation *notes = symbol__annotation(sym);
+ struct map *map = args->ms.map;
+ struct dso *dso = map__dso(map);
+ u64 start = map__rip_2objdump(map, sym->start);
+ u64 end = map__rip_2objdump(map, sym->end);
+ u64 len = end - start;
+ u64 offset;
+ int i, count;
+ u8 *buf = NULL;
+ char disasm_buf[512];
+ struct disasm_line *dl;
+ u32 *line;
+
+ /* Return if objdump is specified explicitly */
+ if (args->options->objdump_path)
+ return -1;
+
+ pr_debug("Reading raw instruction from : %s using dso__data_read_offset\n", filename);
+
+ buf = malloc(len);
+ if (buf == NULL)
+ goto err;
+
+ count = dso__data_read_offset(dso, NULL, sym->start, buf, len);
+
+ line = (u32 *)buf;
+
+ if ((u64)count != len)
+ goto err;
+
+ /* add the function address and name */
+ scnprintf(disasm_buf, sizeof(disasm_buf), "%#"PRIx64" <%s>:",
+ start, sym->name);
+
+ args->offset = -1;
+ args->line = disasm_buf;
+ args->line_nr = 0;
+ args->fileloc = NULL;
+ args->ms.sym = sym;
+
+ dl = disasm_line__new(args);
+ if (dl == NULL)
+ goto err;
+
+ annotation_line__add(&dl->al, &notes->src->source);
+
+ /* Each raw instruction is 4 byte */
+ count = len/4;
+
+ for (i = 0, offset = 0; i < count; i++) {
+ args->offset = offset;
+ sprintf(args->line, "%x", line[i]);
+ dl = disasm_line__new(args);
+ if (dl == NULL)
+ goto err;
+
+ annotation_line__add(&dl->al, &notes->src->source);
+ offset += 4;
+ }
+
+ /* It failed in the middle */
+ if (offset != len) {
+ struct list_head *list = &notes->src->source;
+
+ /* Discard all lines and fallback to objdump */
+ while (!list_empty(list)) {
+ dl = list_first_entry(list, struct disasm_line, al.node);
+
+ list_del_init(&dl->al.node);
+ disasm_line__free(dl);
+ }
+ count = -1;
+ }
+
+out:
+ free(buf);
+ return count < 0 ? count : 0;
+
+err:
+ count = -1;
+ goto out;
+}
+
+#ifdef HAVE_LIBLLVM_SUPPORT
+#include <llvm-c/Disassembler.h>
+#include <llvm-c/Target.h>
+#include "util/llvm-c-helpers.h"
+
+struct symbol_lookup_storage {
+ u64 branch_addr;
+ u64 pcrel_load_addr;
+};
+
+/*
+ * Whenever LLVM wants to resolve an address into a symbol, it calls this
+ * callback. We don't ever actually _return_ anything (in particular, because
+ * it puts quotation marks around what we return), but we use this as a hint
+ * that there is a branch or PC-relative address in the expression that we
+ * should add some textual annotation for after the instruction. The caller
+ * will use this information to add the actual annotation.
+ */
+static const char *
+symbol_lookup_callback(void *disinfo, uint64_t value,
+ uint64_t *ref_type,
+ uint64_t address __maybe_unused,
+ const char **ref __maybe_unused)
+{
+ struct symbol_lookup_storage *storage = disinfo;
+
+ if (*ref_type == LLVMDisassembler_ReferenceType_In_Branch)
+ storage->branch_addr = value;
+ else if (*ref_type == LLVMDisassembler_ReferenceType_In_PCrel_Load)
+ storage->pcrel_load_addr = value;
+ *ref_type = LLVMDisassembler_ReferenceType_InOut_None;
+ return NULL;
+}
+
+static int symbol__disassemble_llvm(char *filename, struct symbol *sym,
+ struct annotate_args *args)
+{
+ struct annotation *notes = symbol__annotation(sym);
+ struct map *map = args->ms.map;
+ struct dso *dso = map__dso(map);
+ u64 start = map__rip_2objdump(map, sym->start);
+ u8 *buf;
+ u64 len;
+ u64 pc;
+ bool is_64bit;
+ char triplet[64];
+ char disasm_buf[2048];
+ size_t disasm_len;
+ struct disasm_line *dl;
+ LLVMDisasmContextRef disasm = NULL;
+ struct symbol_lookup_storage storage;
+ char *line_storage = NULL;
+ size_t line_storage_len = 0;
+ int ret = -1;
+
+ if (args->options->objdump_path)
+ return -1;
+
+ LLVMInitializeAllTargetInfos();
+ LLVMInitializeAllTargetMCs();
+ LLVMInitializeAllDisassemblers();
+
+ buf = read_symbol(filename, map, sym, &len, &is_64bit);
+ if (buf == NULL)
+ return -1;
+
+ if (arch__is(args->arch, "x86")) {
+ if (is_64bit)
+ scnprintf(triplet, sizeof(triplet), "x86_64-pc-linux");
+ else
+ scnprintf(triplet, sizeof(triplet), "i686-pc-linux");
+ } else {
+ scnprintf(triplet, sizeof(triplet), "%s-linux-gnu",
+ args->arch->name);
+ }
+
+ disasm = LLVMCreateDisasm(triplet, &storage, 0, NULL,
+ symbol_lookup_callback);
+ if (disasm == NULL)
+ goto err;
+
+ if (args->options->disassembler_style &&
+ !strcmp(args->options->disassembler_style, "intel"))
+ LLVMSetDisasmOptions(disasm,
+ LLVMDisassembler_Option_AsmPrinterVariant);
+
+ /*
+ * This needs to be set after AsmPrinterVariant, due to a bug in LLVM;
+ * setting AsmPrinterVariant makes a new instruction printer, making it
+ * forget about the PrintImmHex flag (which is applied before if both
+ * are given to the same call).
+ */
+ LLVMSetDisasmOptions(disasm, LLVMDisassembler_Option_PrintImmHex);
+
+ /* add the function address and name */
+ scnprintf(disasm_buf, sizeof(disasm_buf), "%#"PRIx64" <%s>:",
+ start, sym->name);
+
+ args->offset = -1;
+ args->line = disasm_buf;
+ args->line_nr = 0;
+ args->fileloc = NULL;
+ args->ms.sym = sym;
+
+ dl = disasm_line__new(args);
+ if (dl == NULL)
+ goto err;
+
+ annotation_line__add(&dl->al, &notes->src->source);
+
+ pc = start;
+ for (u64 offset = 0; offset < len; ) {
+ unsigned int ins_len;
+
+ storage.branch_addr = 0;
+ storage.pcrel_load_addr = 0;
+
+ ins_len = LLVMDisasmInstruction(disasm, buf + offset,
+ len - offset, pc,
+ disasm_buf, sizeof(disasm_buf));
+ if (ins_len == 0)
+ goto err;
+ disasm_len = strlen(disasm_buf);
+
+ if (storage.branch_addr != 0) {
+ char *name = llvm_name_for_code(dso, filename,
+ storage.branch_addr);
+ if (name != NULL) {
+ disasm_len += scnprintf(disasm_buf + disasm_len,
+ sizeof(disasm_buf) -
+ disasm_len,
+ " <%s>", name);
+ free(name);
+ }
+ }
+ if (storage.pcrel_load_addr != 0) {
+ char *name = llvm_name_for_data(dso, filename,
+ storage.pcrel_load_addr);
+ disasm_len += scnprintf(disasm_buf + disasm_len,
+ sizeof(disasm_buf) - disasm_len,
+ " # %#"PRIx64,
+ storage.pcrel_load_addr);
+ if (name) {
+ disasm_len += scnprintf(disasm_buf + disasm_len,
+ sizeof(disasm_buf) -
+ disasm_len,
+ " <%s>", name);
+ free(name);
+ }
+ }
+
+ args->offset = offset;
+ args->line = expand_tabs(disasm_buf, &line_storage,
+ &line_storage_len);
+ args->line_nr = 0;
+ args->fileloc = NULL;
+ args->ms.sym = sym;
+
+ llvm_addr2line(filename, pc, &args->fileloc,
+ (unsigned int *)&args->line_nr, false, NULL);
+
+ dl = disasm_line__new(args);
+ if (dl == NULL)
+ goto err;
+
+ annotation_line__add(&dl->al, &notes->src->source);
+
+ free(args->fileloc);
+ pc += ins_len;
+ offset += ins_len;
+ }
+
+ ret = 0;
+
+err:
+ LLVMDisasmDispose(disasm);
+ free(buf);
+ free(line_storage);
+ return ret;
+}
+#endif
+
/*
* Possibly create a new version of line with tabs expanded. Returns the
* existing or new line, storage is updated if a new line is allocated. If
@@ -1708,6 +2117,33 @@ int symbol__disassemble(struct symbol *sym, struct annotate_args *args)
strcpy(symfs_filename, tmp);
}
+ /*
+ * For powerpc data type profiling, use the dso__data_read_offset
+ * to read raw instruction directly and interpret the binary code
+ * to understand instructions and register fields. For sort keys as
+ * type and typeoff, disassemble to mnemonic notation is
+ * not required in case of powerpc.
+ */
+ if (arch__is(args->arch, "powerpc")) {
+ extern const char *sort_order;
+
+ if (sort_order && !strstr(sort_order, "sym")) {
+ err = symbol__disassemble_raw(symfs_filename, sym, args);
+ if (err == 0)
+ goto out_remove_tmp;
+#ifdef HAVE_LIBCAPSTONE_SUPPORT
+ err = symbol__disassemble_capstone_powerpc(symfs_filename, sym, args);
+ if (err == 0)
+ goto out_remove_tmp;
+#endif
+ }
+ }
+
+#ifdef HAVE_LIBLLVM_SUPPORT
+ err = symbol__disassemble_llvm(symfs_filename, sym, args);
+ if (err == 0)
+ goto out_remove_tmp;
+#endif
#ifdef HAVE_LIBCAPSTONE_SUPPORT
err = symbol__disassemble_capstone(symfs_filename, sym, args);
if (err == 0)
diff --git a/tools/perf/util/disasm.h b/tools/perf/util/disasm.h
index 3d381a043520..f56beedeb9da 100644
--- a/tools/perf/util/disasm.h
+++ b/tools/perf/util/disasm.h
@@ -4,11 +4,18 @@
#include "map_symbol.h"
+#ifdef HAVE_DWARF_SUPPORT
+#include "dwarf-aux.h"
+#endif
+
struct annotation_options;
struct disasm_line;
struct ins;
struct evsel;
struct symbol;
+struct data_loc_info;
+struct type_state;
+struct disasm_line;
struct arch {
const char *name;
@@ -32,6 +39,11 @@ struct arch {
char memory_ref_char;
char imm_char;
} objdump;
+#ifdef HAVE_DWARF_SUPPORT
+ void (*update_insn_state)(struct type_state *state,
+ struct data_loc_info *dloc, Dwarf_Die *cu_die,
+ struct disasm_line *dl);
+#endif
};
struct ins {
@@ -50,6 +62,7 @@ struct ins_operands {
bool offset_avail;
bool outside;
bool multi_regs;
+ bool mem_ref;
} target;
union {
struct {
@@ -57,6 +70,7 @@ struct ins_operands {
char *name;
u64 addr;
bool multi_regs;
+ bool mem_ref;
} source;
struct {
struct ins ins;
@@ -71,7 +85,8 @@ struct ins_operands {
struct ins_ops {
void (*free)(struct ins_operands *ops);
- int (*parse)(struct arch *arch, struct ins_operands *ops, struct map_symbol *ms);
+ int (*parse)(struct arch *arch, struct ins_operands *ops, struct map_symbol *ms,
+ struct disasm_line *dl);
int (*scnprintf)(struct ins *ins, char *bf, size_t size,
struct ins_operands *ops, int max_ins_name);
};
@@ -90,7 +105,7 @@ struct annotate_args {
struct arch *arch__find(const char *name);
bool arch__is(struct arch *arch, const char *name);
-struct ins_ops *ins__find(struct arch *arch, const char *name);
+struct ins_ops *ins__find(struct arch *arch, const char *name, struct disasm_line *dl);
int ins__scnprintf(struct ins *ins, char *bf, size_t size,
struct ins_operands *ops, int max_ins_name);
diff --git a/tools/perf/util/disasm_bpf.c b/tools/perf/util/disasm_bpf.c
new file mode 100644
index 000000000000..1fee71c79b62
--- /dev/null
+++ b/tools/perf/util/disasm_bpf.c
@@ -0,0 +1,195 @@
+// SPDX-License-Identifier: GPL-2.0-only
+
+#include "util/annotate.h"
+#include "util/disasm_bpf.h"
+#include "util/symbol.h"
+#include <linux/zalloc.h>
+#include <string.h>
+
+#if defined(HAVE_LIBBFD_SUPPORT) && defined(HAVE_LIBBPF_SUPPORT)
+#define PACKAGE "perf"
+#include <bfd.h>
+#include <bpf/bpf.h>
+#include <bpf/btf.h>
+#include <bpf/libbpf.h>
+#include <dis-asm.h>
+#include <errno.h>
+#include <linux/btf.h>
+#include <tools/dis-asm-compat.h>
+
+#include "util/bpf-event.h"
+#include "util/bpf-utils.h"
+#include "util/debug.h"
+#include "util/dso.h"
+#include "util/map.h"
+#include "util/env.h"
+#include "util/util.h"
+
+int symbol__disassemble_bpf(struct symbol *sym, struct annotate_args *args)
+{
+ struct annotation *notes = symbol__annotation(sym);
+ struct bpf_prog_linfo *prog_linfo = NULL;
+ struct bpf_prog_info_node *info_node;
+ int len = sym->end - sym->start;
+ disassembler_ftype disassemble;
+ struct map *map = args->ms.map;
+ struct perf_bpil *info_linear;
+ struct disassemble_info info;
+ struct dso *dso = map__dso(map);
+ int pc = 0, count, sub_id;
+ struct btf *btf = NULL;
+ char tpath[PATH_MAX];
+ size_t buf_size;
+ int nr_skip = 0;
+ char *buf;
+ bfd *bfdf;
+ int ret;
+ FILE *s;
+
+ if (dso__binary_type(dso) != DSO_BINARY_TYPE__BPF_PROG_INFO)
+ return SYMBOL_ANNOTATE_ERRNO__BPF_INVALID_FILE;
+
+ pr_debug("%s: handling sym %s addr %" PRIx64 " len %" PRIx64 "\n", __func__,
+ sym->name, sym->start, sym->end - sym->start);
+
+ memset(tpath, 0, sizeof(tpath));
+ perf_exe(tpath, sizeof(tpath));
+
+ bfdf = bfd_openr(tpath, NULL);
+ if (bfdf == NULL)
+ abort();
+
+ if (!bfd_check_format(bfdf, bfd_object))
+ abort();
+
+ s = open_memstream(&buf, &buf_size);
+ if (!s) {
+ ret = errno;
+ goto out;
+ }
+ init_disassemble_info_compat(&info, s,
+ (fprintf_ftype) fprintf,
+ fprintf_styled);
+ info.arch = bfd_get_arch(bfdf);
+ info.mach = bfd_get_mach(bfdf);
+
+ info_node = perf_env__find_bpf_prog_info(dso__bpf_prog(dso)->env,
+ dso__bpf_prog(dso)->id);
+ if (!info_node) {
+ ret = SYMBOL_ANNOTATE_ERRNO__BPF_MISSING_BTF;
+ goto out;
+ }
+ info_linear = info_node->info_linear;
+ sub_id = dso__bpf_prog(dso)->sub_id;
+
+ info.buffer = (void *)(uintptr_t)(info_linear->info.jited_prog_insns);
+ info.buffer_length = info_linear->info.jited_prog_len;
+
+ if (info_linear->info.nr_line_info)
+ prog_linfo = bpf_prog_linfo__new(&info_linear->info);
+
+ if (info_linear->info.btf_id) {
+ struct btf_node *node;
+
+ node = perf_env__find_btf(dso__bpf_prog(dso)->env,
+ info_linear->info.btf_id);
+ if (node)
+ btf = btf__new((__u8 *)(node->data),
+ node->data_size);
+ }
+
+ disassemble_init_for_target(&info);
+
+#ifdef DISASM_FOUR_ARGS_SIGNATURE
+ disassemble = disassembler(info.arch,
+ bfd_big_endian(bfdf),
+ info.mach,
+ bfdf);
+#else
+ disassemble = disassembler(bfdf);
+#endif
+ if (disassemble == NULL)
+ abort();
+
+ fflush(s);
+ do {
+ const struct bpf_line_info *linfo = NULL;
+ struct disasm_line *dl;
+ size_t prev_buf_size;
+ const char *srcline;
+ u64 addr;
+
+ addr = pc + ((u64 *)(uintptr_t)(info_linear->info.jited_ksyms))[sub_id];
+ count = disassemble(pc, &info);
+
+ if (prog_linfo)
+ linfo = bpf_prog_linfo__lfind_addr_func(prog_linfo,
+ addr, sub_id,
+ nr_skip);
+
+ if (linfo && btf) {
+ srcline = btf__name_by_offset(btf, linfo->line_off);
+ nr_skip++;
+ } else
+ srcline = NULL;
+
+ fprintf(s, "\n");
+ prev_buf_size = buf_size;
+ fflush(s);
+
+ if (!annotate_opts.hide_src_code && srcline) {
+ args->offset = -1;
+ args->line = strdup(srcline);
+ args->line_nr = 0;
+ args->fileloc = NULL;
+ args->ms.sym = sym;
+ dl = disasm_line__new(args);
+ if (dl) {
+ annotation_line__add(&dl->al,
+ &notes->src->source);
+ }
+ }
+
+ args->offset = pc;
+ args->line = buf + prev_buf_size;
+ args->line_nr = 0;
+ args->fileloc = NULL;
+ args->ms.sym = sym;
+ dl = disasm_line__new(args);
+ if (dl)
+ annotation_line__add(&dl->al, &notes->src->source);
+
+ pc += count;
+ } while (count > 0 && pc < len);
+
+ ret = 0;
+out:
+ free(prog_linfo);
+ btf__free(btf);
+ fclose(s);
+ bfd_close(bfdf);
+ return ret;
+}
+#else // defined(HAVE_LIBBFD_SUPPORT) && defined(HAVE_LIBBPF_SUPPORT)
+int symbol__disassemble_bpf(struct symbol *sym __maybe_unused, struct annotate_args *args __maybe_unused)
+{
+ return SYMBOL_ANNOTATE_ERRNO__NO_LIBOPCODES_FOR_BPF;
+}
+#endif // defined(HAVE_LIBBFD_SUPPORT) && defined(HAVE_LIBBPF_SUPPORT)
+
+int symbol__disassemble_bpf_image(struct symbol *sym, struct annotate_args *args)
+{
+ struct annotation *notes = symbol__annotation(sym);
+ struct disasm_line *dl;
+
+ args->offset = -1;
+ args->line = strdup("to be implemented");
+ args->line_nr = 0;
+ args->fileloc = NULL;
+ dl = disasm_line__new(args);
+ if (dl)
+ annotation_line__add(&dl->al, &notes->src->source);
+
+ zfree(&args->line);
+ return 0;
+}
diff --git a/tools/perf/util/disasm_bpf.h b/tools/perf/util/disasm_bpf.h
new file mode 100644
index 000000000000..2ecb19545388
--- /dev/null
+++ b/tools/perf/util/disasm_bpf.h
@@ -0,0 +1,12 @@
+// SPDX-License-Identifier: GPL-2.0-only
+
+#ifndef __PERF_DISASM_BPF_H
+#define __PERF_DISASM_BPF_H
+
+struct symbol;
+struct annotate_args;
+
+int symbol__disassemble_bpf(struct symbol *sym, struct annotate_args *args);
+int symbol__disassemble_bpf_image(struct symbol *sym, struct annotate_args *args);
+
+#endif /* __PERF_DISASM_BPF_H */
diff --git a/tools/perf/util/dso.c b/tools/perf/util/dso.c
index 67414944f245..5c6e85fdae0d 100644
--- a/tools/perf/util/dso.c
+++ b/tools/perf/util/dso.c
@@ -1327,7 +1327,7 @@ bool dso_id__empty(const struct dso_id *id)
return !id->maj && !id->min && !id->ino && !id->ino_generation;
}
-void __dso__inject_id(struct dso *dso, struct dso_id *id)
+void __dso__inject_id(struct dso *dso, const struct dso_id *id)
{
struct dsos *dsos = dso__dsos(dso);
struct dso_id *dso_id = dso__id(dso);
@@ -1417,7 +1417,7 @@ void dso__set_sorted_by_name(struct dso *dso)
RC_CHK_ACCESS(dso)->sorted_by_name = true;
}
-struct dso *dso__new_id(const char *name, struct dso_id *id)
+struct dso *dso__new_id(const char *name, const struct dso_id *id)
{
RC_STRUCT(dso) *dso = zalloc(sizeof(*dso) + strlen(name) + 1);
struct dso *res;
diff --git a/tools/perf/util/dso.h b/tools/perf/util/dso.h
index ed0068251c65..bb8e8f444054 100644
--- a/tools/perf/util/dso.h
+++ b/tools/perf/util/dso.h
@@ -640,14 +640,14 @@ static inline void dso__set_text_offset(struct dso *dso, u64 val)
int dso_id__cmp(const struct dso_id *a, const struct dso_id *b);
bool dso_id__empty(const struct dso_id *id);
-struct dso *dso__new_id(const char *name, struct dso_id *id);
+struct dso *dso__new_id(const char *name, const struct dso_id *id);
struct dso *dso__new(const char *name);
void dso__delete(struct dso *dso);
int dso__cmp_id(struct dso *a, struct dso *b);
void dso__set_short_name(struct dso *dso, const char *name, bool name_allocated);
void dso__set_long_name(struct dso *dso, const char *name, bool name_allocated);
-void __dso__inject_id(struct dso *dso, struct dso_id *id);
+void __dso__inject_id(struct dso *dso, const struct dso_id *id);
int dso__name_len(const struct dso *dso);
diff --git a/tools/perf/util/dsos.c b/tools/perf/util/dsos.c
index d4acdb37f046..e0998e2a7c4e 100644
--- a/tools/perf/util/dsos.c
+++ b/tools/perf/util/dsos.c
@@ -155,7 +155,7 @@ static int dsos__cmp_key_long_name_id(const void *vkey, const void *vdso)
*/
static struct dso *__dsos__find_by_longname_id(struct dsos *dsos,
const char *name,
- struct dso_id *id,
+ const struct dso_id *id,
bool write_locked)
{
struct dsos__key key = {
@@ -244,7 +244,7 @@ int dsos__add(struct dsos *dsos, struct dso *dso)
struct dsos__find_id_cb_args {
const char *name;
- struct dso_id *id;
+ const struct dso_id *id;
struct dso *res;
};
@@ -260,7 +260,7 @@ static int dsos__find_id_cb(struct dso *dso, void *data)
}
-static struct dso *__dsos__find_id(struct dsos *dsos, const char *name, struct dso_id *id,
+static struct dso *__dsos__find_id(struct dsos *dsos, const char *name, const struct dso_id *id,
bool cmp_short, bool write_locked)
{
struct dso *res;
@@ -321,7 +321,7 @@ static void dso__set_basename(struct dso *dso)
dso__set_short_name(dso, base, true);
}
-static struct dso *__dsos__addnew_id(struct dsos *dsos, const char *name, struct dso_id *id)
+static struct dso *__dsos__addnew_id(struct dsos *dsos, const char *name, const struct dso_id *id)
{
struct dso *dso = dso__new_id(name, id);
@@ -337,7 +337,7 @@ static struct dso *__dsos__addnew_id(struct dsos *dsos, const char *name, struct
return dso;
}
-static struct dso *__dsos__findnew_id(struct dsos *dsos, const char *name, struct dso_id *id)
+static struct dso *__dsos__findnew_id(struct dsos *dsos, const char *name, const struct dso_id *id)
{
struct dso *dso = __dsos__find_id(dsos, name, id, false, /*write_locked=*/true);
@@ -347,7 +347,7 @@ static struct dso *__dsos__findnew_id(struct dsos *dsos, const char *name, struc
return dso ? dso : __dsos__addnew_id(dsos, name, id);
}
-struct dso *dsos__findnew_id(struct dsos *dsos, const char *name, struct dso_id *id)
+struct dso *dsos__findnew_id(struct dsos *dsos, const char *name, const struct dso_id *id)
{
struct dso *dso;
down_write(&dsos->lock);
diff --git a/tools/perf/util/dsos.h b/tools/perf/util/dsos.h
index 6c13b65648bc..a26774950866 100644
--- a/tools/perf/util/dsos.h
+++ b/tools/perf/util/dsos.h
@@ -32,7 +32,7 @@ int __dsos__add(struct dsos *dsos, struct dso *dso);
int dsos__add(struct dsos *dsos, struct dso *dso);
struct dso *dsos__find(struct dsos *dsos, const char *name, bool cmp_short);
-struct dso *dsos__findnew_id(struct dsos *dsos, const char *name, struct dso_id *id);
+struct dso *dsos__findnew_id(struct dsos *dsos, const char *name, const struct dso_id *id);
bool dsos__read_build_ids(struct dsos *dsos, bool with_hits);
diff --git a/tools/perf/util/dump-insn.c b/tools/perf/util/dump-insn.c
index 2bd8585db93c..c1cc0ade48d0 100644
--- a/tools/perf/util/dump-insn.c
+++ b/tools/perf/util/dump-insn.c
@@ -15,7 +15,7 @@ const char *dump_insn(struct perf_insn *x __maybe_unused,
}
__weak
-int arch_is_branch(const unsigned char *buf __maybe_unused,
+int arch_is_uncond_branch(const unsigned char *buf __maybe_unused,
size_t len __maybe_unused,
int x86_64 __maybe_unused)
{
diff --git a/tools/perf/util/dump-insn.h b/tools/perf/util/dump-insn.h
index 4a7797dd6d09..20d4d7bb5275 100644
--- a/tools/perf/util/dump-insn.h
+++ b/tools/perf/util/dump-insn.h
@@ -21,6 +21,6 @@ struct perf_insn {
const char *dump_insn(struct perf_insn *x, u64 ip,
u8 *inbuf, int inlen, int *lenp);
-int arch_is_branch(const unsigned char *buf, size_t len, int x86_64);
+int arch_is_uncond_branch(const unsigned char *buf, size_t len, int x86_64);
#endif
diff --git a/tools/perf/util/dwarf-aux.c b/tools/perf/util/dwarf-aux.c
index 44ef968a7ad3..92eb9c8dc3e5 100644
--- a/tools/perf/util/dwarf-aux.c
+++ b/tools/perf/util/dwarf-aux.c
@@ -267,7 +267,7 @@ Dwarf_Die *die_get_type(Dwarf_Die *vr_die, Dwarf_Die *die_mem)
}
/* Get a type die, but skip qualifiers */
-static Dwarf_Die *__die_get_real_type(Dwarf_Die *vr_die, Dwarf_Die *die_mem)
+Dwarf_Die *__die_get_real_type(Dwarf_Die *vr_die, Dwarf_Die *die_mem)
{
int tag;
@@ -1444,7 +1444,7 @@ static int __die_find_var_reg_cb(Dwarf_Die *die_mem, void *arg)
while ((off = dwarf_getlocations(&attr, off, &base, &start, &end, &ops, &nops)) > 0) {
/* Assuming the location list is sorted by address */
- if (end < data->pc)
+ if (end <= data->pc)
continue;
if (start > data->pc)
break;
@@ -1598,6 +1598,9 @@ static int __die_collect_vars_cb(Dwarf_Die *die_mem, void *arg)
if (dwarf_getlocations(&attr, 0, &base, &start, &end, &ops, &nops) <= 0)
return DIE_FIND_CB_SIBLING;
+ if (!check_allowed_ops(ops, nops))
+ return DIE_FIND_CB_SIBLING;
+
if (die_get_real_type(die_mem, &type_die) == NULL)
return DIE_FIND_CB_SIBLING;
@@ -1974,8 +1977,15 @@ static int __die_find_member_offset_cb(Dwarf_Die *die_mem, void *arg)
return DIE_FIND_CB_SIBLING;
/* Unions might not have location */
- if (die_get_data_member_location(die_mem, &loc) < 0)
- loc = 0;
+ if (die_get_data_member_location(die_mem, &loc) < 0) {
+ Dwarf_Attribute attr;
+
+ if (dwarf_attr_integrate(die_mem, DW_AT_data_bit_offset, &attr) &&
+ dwarf_formudata(&attr, &loc) == 0)
+ loc /= 8;
+ else
+ loc = 0;
+ }
if (offset == loc)
return DIE_FIND_CB_END;
diff --git a/tools/perf/util/dwarf-aux.h b/tools/perf/util/dwarf-aux.h
index 24446412b869..336a3a183a78 100644
--- a/tools/perf/util/dwarf-aux.h
+++ b/tools/perf/util/dwarf-aux.h
@@ -56,6 +56,8 @@ const char *die_get_decl_file(Dwarf_Die *dw_die);
/* Get type die */
Dwarf_Die *die_get_type(Dwarf_Die *vr_die, Dwarf_Die *die_mem);
+/* Get a type die, but skip qualifiers */
+Dwarf_Die *__die_get_real_type(Dwarf_Die *vr_die, Dwarf_Die *die_mem);
/* Get a type die, but skip qualifiers and typedef */
Dwarf_Die *die_get_real_type(Dwarf_Die *vr_die, Dwarf_Die *die_mem);
diff --git a/tools/perf/util/env.c b/tools/perf/util/env.c
index a459374d0a1a..1edbccfc3281 100644
--- a/tools/perf/util/env.c
+++ b/tools/perf/util/env.c
@@ -624,3 +624,18 @@ out:
free(cap_eq);
return NULL;
}
+
+void perf_env__find_br_cntr_info(struct perf_env *env,
+ unsigned int *nr,
+ unsigned int *width)
+{
+ if (nr) {
+ *nr = env->cpu_pmu_caps ? env->br_cntr_nr :
+ env->pmu_caps->br_cntr_nr;
+ }
+
+ if (width) {
+ *width = env->cpu_pmu_caps ? env->br_cntr_width :
+ env->pmu_caps->br_cntr_width;
+ }
+}
diff --git a/tools/perf/util/env.h b/tools/perf/util/env.h
index 2a2c37cc40b7..51b36c36019b 100644
--- a/tools/perf/util/env.h
+++ b/tools/perf/util/env.h
@@ -192,4 +192,7 @@ char *perf_env__find_pmu_cap(struct perf_env *env, const char *pmu_name,
const char *cap);
bool perf_env__has_pmu_mapping(struct perf_env *env, const char *pmu_name);
+void perf_env__find_br_cntr_info(struct perf_env *env,
+ unsigned int *nr,
+ unsigned int *width);
#endif /* __PERF_ENV_H */
diff --git a/tools/perf/util/event.c b/tools/perf/util/event.c
index f32f9abf6344..aac96d5d1917 100644
--- a/tools/perf/util/event.c
+++ b/tools/perf/util/event.c
@@ -216,7 +216,7 @@ size_t perf_event__fprintf_cgroup(union perf_event *event, FILE *fp)
event->cgroup.id, event->cgroup.path);
}
-int perf_event__process_comm(struct perf_tool *tool __maybe_unused,
+int perf_event__process_comm(const struct perf_tool *tool __maybe_unused,
union perf_event *event,
struct perf_sample *sample,
struct machine *machine)
@@ -224,7 +224,7 @@ int perf_event__process_comm(struct perf_tool *tool __maybe_unused,
return machine__process_comm_event(machine, event, sample);
}
-int perf_event__process_namespaces(struct perf_tool *tool __maybe_unused,
+int perf_event__process_namespaces(const struct perf_tool *tool __maybe_unused,
union perf_event *event,
struct perf_sample *sample,
struct machine *machine)
@@ -232,7 +232,7 @@ int perf_event__process_namespaces(struct perf_tool *tool __maybe_unused,
return machine__process_namespaces_event(machine, event, sample);
}
-int perf_event__process_cgroup(struct perf_tool *tool __maybe_unused,
+int perf_event__process_cgroup(const struct perf_tool *tool __maybe_unused,
union perf_event *event,
struct perf_sample *sample,
struct machine *machine)
@@ -240,7 +240,7 @@ int perf_event__process_cgroup(struct perf_tool *tool __maybe_unused,
return machine__process_cgroup_event(machine, event, sample);
}
-int perf_event__process_lost(struct perf_tool *tool __maybe_unused,
+int perf_event__process_lost(const struct perf_tool *tool __maybe_unused,
union perf_event *event,
struct perf_sample *sample,
struct machine *machine)
@@ -248,7 +248,7 @@ int perf_event__process_lost(struct perf_tool *tool __maybe_unused,
return machine__process_lost_event(machine, event, sample);
}
-int perf_event__process_aux(struct perf_tool *tool __maybe_unused,
+int perf_event__process_aux(const struct perf_tool *tool __maybe_unused,
union perf_event *event,
struct perf_sample *sample __maybe_unused,
struct machine *machine)
@@ -256,7 +256,7 @@ int perf_event__process_aux(struct perf_tool *tool __maybe_unused,
return machine__process_aux_event(machine, event);
}
-int perf_event__process_itrace_start(struct perf_tool *tool __maybe_unused,
+int perf_event__process_itrace_start(const struct perf_tool *tool __maybe_unused,
union perf_event *event,
struct perf_sample *sample __maybe_unused,
struct machine *machine)
@@ -264,7 +264,7 @@ int perf_event__process_itrace_start(struct perf_tool *tool __maybe_unused,
return machine__process_itrace_start_event(machine, event);
}
-int perf_event__process_aux_output_hw_id(struct perf_tool *tool __maybe_unused,
+int perf_event__process_aux_output_hw_id(const struct perf_tool *tool __maybe_unused,
union perf_event *event,
struct perf_sample *sample __maybe_unused,
struct machine *machine)
@@ -272,7 +272,7 @@ int perf_event__process_aux_output_hw_id(struct perf_tool *tool __maybe_unused,
return machine__process_aux_output_hw_id_event(machine, event);
}
-int perf_event__process_lost_samples(struct perf_tool *tool __maybe_unused,
+int perf_event__process_lost_samples(const struct perf_tool *tool __maybe_unused,
union perf_event *event,
struct perf_sample *sample,
struct machine *machine)
@@ -280,7 +280,7 @@ int perf_event__process_lost_samples(struct perf_tool *tool __maybe_unused,
return machine__process_lost_samples_event(machine, event, sample);
}
-int perf_event__process_switch(struct perf_tool *tool __maybe_unused,
+int perf_event__process_switch(const struct perf_tool *tool __maybe_unused,
union perf_event *event,
struct perf_sample *sample __maybe_unused,
struct machine *machine)
@@ -288,7 +288,7 @@ int perf_event__process_switch(struct perf_tool *tool __maybe_unused,
return machine__process_switch_event(machine, event);
}
-int perf_event__process_ksymbol(struct perf_tool *tool __maybe_unused,
+int perf_event__process_ksymbol(const struct perf_tool *tool __maybe_unused,
union perf_event *event,
struct perf_sample *sample __maybe_unused,
struct machine *machine)
@@ -296,7 +296,7 @@ int perf_event__process_ksymbol(struct perf_tool *tool __maybe_unused,
return machine__process_ksymbol(machine, event, sample);
}
-int perf_event__process_bpf(struct perf_tool *tool __maybe_unused,
+int perf_event__process_bpf(const struct perf_tool *tool __maybe_unused,
union perf_event *event,
struct perf_sample *sample,
struct machine *machine)
@@ -304,7 +304,7 @@ int perf_event__process_bpf(struct perf_tool *tool __maybe_unused,
return machine__process_bpf(machine, event, sample);
}
-int perf_event__process_text_poke(struct perf_tool *tool __maybe_unused,
+int perf_event__process_text_poke(const struct perf_tool *tool __maybe_unused,
union perf_event *event,
struct perf_sample *sample,
struct machine *machine)
@@ -387,7 +387,7 @@ size_t perf_event__fprintf_cpu_map(union perf_event *event, FILE *fp)
return ret;
}
-int perf_event__process_mmap(struct perf_tool *tool __maybe_unused,
+int perf_event__process_mmap(const struct perf_tool *tool __maybe_unused,
union perf_event *event,
struct perf_sample *sample,
struct machine *machine)
@@ -395,7 +395,7 @@ int perf_event__process_mmap(struct perf_tool *tool __maybe_unused,
return machine__process_mmap_event(machine, event, sample);
}
-int perf_event__process_mmap2(struct perf_tool *tool __maybe_unused,
+int perf_event__process_mmap2(const struct perf_tool *tool __maybe_unused,
union perf_event *event,
struct perf_sample *sample,
struct machine *machine)
@@ -410,7 +410,7 @@ size_t perf_event__fprintf_task(union perf_event *event, FILE *fp)
event->fork.ppid, event->fork.ptid);
}
-int perf_event__process_fork(struct perf_tool *tool __maybe_unused,
+int perf_event__process_fork(const struct perf_tool *tool __maybe_unused,
union perf_event *event,
struct perf_sample *sample,
struct machine *machine)
@@ -418,7 +418,7 @@ int perf_event__process_fork(struct perf_tool *tool __maybe_unused,
return machine__process_fork_event(machine, event, sample);
}
-int perf_event__process_exit(struct perf_tool *tool __maybe_unused,
+int perf_event__process_exit(const struct perf_tool *tool __maybe_unused,
union perf_event *event,
struct perf_sample *sample,
struct machine *machine)
@@ -426,6 +426,26 @@ int perf_event__process_exit(struct perf_tool *tool __maybe_unused,
return machine__process_exit_event(machine, event, sample);
}
+int perf_event__exit_del_thread(const struct perf_tool *tool __maybe_unused,
+ union perf_event *event,
+ struct perf_sample *sample __maybe_unused,
+ struct machine *machine)
+{
+ struct thread *thread = machine__findnew_thread(machine,
+ event->fork.pid,
+ event->fork.tid);
+
+ dump_printf("(%d:%d):(%d:%d)\n", event->fork.pid, event->fork.tid,
+ event->fork.ppid, event->fork.ptid);
+
+ if (thread) {
+ machine__remove_thread(machine, thread);
+ thread__put(thread);
+ }
+
+ return 0;
+}
+
size_t perf_event__fprintf_aux(union perf_event *event, FILE *fp)
{
return fprintf(fp, " offset: %#"PRI_lx64" size: %#"PRI_lx64" flags: %#"PRI_lx64" [%s%s%s]\n",
@@ -587,7 +607,7 @@ size_t perf_event__fprintf(union perf_event *event, struct machine *machine, FIL
return ret;
}
-int perf_event__process(struct perf_tool *tool __maybe_unused,
+int perf_event__process(const struct perf_tool *tool __maybe_unused,
union perf_event *event,
struct perf_sample *sample,
struct machine *machine)
diff --git a/tools/perf/util/event.h b/tools/perf/util/event.h
index d8bcee2e9b93..f8742e6230a5 100644
--- a/tools/perf/util/event.h
+++ b/tools/perf/util/event.h
@@ -267,71 +267,75 @@ struct perf_tool;
void perf_event__read_stat_config(struct perf_stat_config *config,
struct perf_record_stat_config *event);
-int perf_event__process_comm(struct perf_tool *tool,
+int perf_event__process_comm(const struct perf_tool *tool,
union perf_event *event,
struct perf_sample *sample,
struct machine *machine);
-int perf_event__process_lost(struct perf_tool *tool,
+int perf_event__process_lost(const struct perf_tool *tool,
union perf_event *event,
struct perf_sample *sample,
struct machine *machine);
-int perf_event__process_lost_samples(struct perf_tool *tool,
+int perf_event__process_lost_samples(const struct perf_tool *tool,
union perf_event *event,
struct perf_sample *sample,
struct machine *machine);
-int perf_event__process_aux(struct perf_tool *tool,
+int perf_event__process_aux(const struct perf_tool *tool,
union perf_event *event,
struct perf_sample *sample,
struct machine *machine);
-int perf_event__process_itrace_start(struct perf_tool *tool,
+int perf_event__process_itrace_start(const struct perf_tool *tool,
union perf_event *event,
struct perf_sample *sample,
struct machine *machine);
-int perf_event__process_aux_output_hw_id(struct perf_tool *tool,
+int perf_event__process_aux_output_hw_id(const struct perf_tool *tool,
union perf_event *event,
struct perf_sample *sample,
struct machine *machine);
-int perf_event__process_switch(struct perf_tool *tool,
+int perf_event__process_switch(const struct perf_tool *tool,
union perf_event *event,
struct perf_sample *sample,
struct machine *machine);
-int perf_event__process_namespaces(struct perf_tool *tool,
+int perf_event__process_namespaces(const struct perf_tool *tool,
union perf_event *event,
struct perf_sample *sample,
struct machine *machine);
-int perf_event__process_cgroup(struct perf_tool *tool,
+int perf_event__process_cgroup(const struct perf_tool *tool,
union perf_event *event,
struct perf_sample *sample,
struct machine *machine);
-int perf_event__process_mmap(struct perf_tool *tool,
+int perf_event__process_mmap(const struct perf_tool *tool,
union perf_event *event,
struct perf_sample *sample,
struct machine *machine);
-int perf_event__process_mmap2(struct perf_tool *tool,
+int perf_event__process_mmap2(const struct perf_tool *tool,
union perf_event *event,
struct perf_sample *sample,
struct machine *machine);
-int perf_event__process_fork(struct perf_tool *tool,
+int perf_event__process_fork(const struct perf_tool *tool,
union perf_event *event,
struct perf_sample *sample,
struct machine *machine);
-int perf_event__process_exit(struct perf_tool *tool,
+int perf_event__process_exit(const struct perf_tool *tool,
union perf_event *event,
struct perf_sample *sample,
struct machine *machine);
-int perf_event__process_ksymbol(struct perf_tool *tool,
+int perf_event__exit_del_thread(const struct perf_tool *tool,
union perf_event *event,
struct perf_sample *sample,
struct machine *machine);
-int perf_event__process_bpf(struct perf_tool *tool,
+int perf_event__process_ksymbol(const struct perf_tool *tool,
+ union perf_event *event,
+ struct perf_sample *sample,
+ struct machine *machine);
+int perf_event__process_bpf(const struct perf_tool *tool,
union perf_event *event,
struct perf_sample *sample,
struct machine *machine);
-int perf_event__process_text_poke(struct perf_tool *tool,
+int perf_event__process_text_poke(const struct perf_tool *tool,
union perf_event *event,
struct perf_sample *sample,
struct machine *machine);
-int perf_event__process(struct perf_tool *tool,
+int perf_event__process(const struct perf_tool *tool,
union perf_event *event,
struct perf_sample *sample,
struct machine *machine);
diff --git a/tools/perf/util/events_stats.h b/tools/perf/util/events_stats.h
index f43e5b1a366a..eabd7913c309 100644
--- a/tools/perf/util/events_stats.h
+++ b/tools/perf/util/events_stats.h
@@ -18,7 +18,18 @@
* PERF_RECORD_LOST_SAMPLES event. The number of lost-samples events is stored
* in .nr_events[PERF_RECORD_LOST_SAMPLES] while total_lost_samples tells
* exactly how many samples the kernel in fact dropped, i.e. it is the sum of
- * all struct perf_record_lost_samples.lost fields reported.
+ * all struct perf_record_lost_samples.lost fields reported without setting the
+ * misc field in the header.
+ *
+ * The BPF program can discard samples according to the filter expressions given
+ * by the user. This number is kept in a BPF map and dumped at the end of perf
+ * record in a PERF_RECORD_LOST_SAMPLES event. To differentiate it from other
+ * lost samples, perf tools sets PERF_RECORD_MISC_LOST_SAMPLES_BPF flag in the
+ * header.misc field. The number of dropped-samples events is stored in
+ * .nr_events[PERF_RECORD_LOST_SAMPLES] while total_dropped_samples tells
+ * exactly how many samples the BPF program in fact dropped, i.e. it is the sum
+ * of all struct perf_record_lost_samples.lost fields reported with the misc
+ * field set in the header.
*
* The total_period is needed because by default auto-freq is used, so
* multiplying nr_events[PERF_EVENT_SAMPLE] by a frequency isn't possible to get
@@ -28,6 +39,7 @@
struct events_stats {
u64 total_lost;
u64 total_lost_samples;
+ u64 total_dropped_samples;
u64 total_aux_lost;
u64 total_aux_partial;
u64 total_aux_collision;
@@ -48,6 +60,7 @@ struct hists_stats {
u32 nr_samples;
u32 nr_non_filtered_samples;
u32 nr_lost_samples;
+ u32 nr_dropped_samples;
};
void events_stats__inc(struct events_stats *stats, u32 type);
diff --git a/tools/perf/util/evlist.c b/tools/perf/util/evlist.c
index 3a719edafc7a..f14b7e6ff1dc 100644
--- a/tools/perf/util/evlist.c
+++ b/tools/perf/util/evlist.c
@@ -33,6 +33,8 @@
#include "util/bpf-filter.h"
#include "util/stat.h"
#include "util/util.h"
+#include "util/env.h"
+#include "util/intel-tpebs.h"
#include <signal.h>
#include <unistd.h>
#include <sched.h>
@@ -78,6 +80,7 @@ void evlist__init(struct evlist *evlist, struct perf_cpu_map *cpus,
evlist->ctl_fd.fd = -1;
evlist->ctl_fd.ack = -1;
evlist->ctl_fd.pos = -1;
+ evlist->nr_br_cntr = -1;
}
struct evlist *evlist__new(void)
@@ -179,6 +182,7 @@ void evlist__delete(struct evlist *evlist)
if (evlist == NULL)
return;
+ tpebs_delete();
evlist__free_stats(evlist);
evlist__munmap(evlist);
evlist__close(evlist);
@@ -1063,7 +1067,7 @@ int evlist__create_maps(struct evlist *evlist, struct target *target)
if (!threads)
return -1;
- if (target__uses_dummy_map(target))
+ if (target__uses_dummy_map(target) && !evlist__has_bpf_output(evlist))
cpus = perf_cpu_map__new_any_cpu();
else
cpus = perf_cpu_map__new(target->cpu_list);
@@ -1086,7 +1090,8 @@ out_delete_threads:
return -1;
}
-int evlist__apply_filters(struct evlist *evlist, struct evsel **err_evsel)
+int evlist__apply_filters(struct evlist *evlist, struct evsel **err_evsel,
+ struct target *target)
{
struct evsel *evsel;
int err = 0;
@@ -1108,7 +1113,7 @@ int evlist__apply_filters(struct evlist *evlist, struct evsel **err_evsel)
* non-tracepoint events can have BPF filters.
*/
if (!list_empty(&evsel->bpf_filters)) {
- err = perf_bpf_filter__prepare(evsel);
+ err = perf_bpf_filter__prepare(evsel, target);
if (err) {
*err_evsel = evsel;
break;
@@ -1261,6 +1266,72 @@ u64 evlist__combined_branch_type(struct evlist *evlist)
return branch_type;
}
+static struct evsel *
+evlist__find_dup_event_from_prev(struct evlist *evlist, struct evsel *event)
+{
+ struct evsel *pos;
+
+ evlist__for_each_entry(evlist, pos) {
+ if (event == pos)
+ break;
+ if ((pos->core.attr.branch_sample_type & PERF_SAMPLE_BRANCH_COUNTERS) &&
+ !strcmp(pos->name, event->name))
+ return pos;
+ }
+ return NULL;
+}
+
+#define MAX_NR_ABBR_NAME (26 * 11)
+
+/*
+ * The abbr name is from A to Z9. If the number of event
+ * which requires the branch counter > MAX_NR_ABBR_NAME,
+ * return NA.
+ */
+static void evlist__new_abbr_name(char *name)
+{
+ static int idx;
+ int i = idx / 26;
+
+ if (idx >= MAX_NR_ABBR_NAME) {
+ name[0] = 'N';
+ name[1] = 'A';
+ name[2] = '\0';
+ return;
+ }
+
+ name[0] = 'A' + (idx % 26);
+
+ if (!i)
+ name[1] = '\0';
+ else {
+ name[1] = '0' + i - 1;
+ name[2] = '\0';
+ }
+
+ idx++;
+}
+
+void evlist__update_br_cntr(struct evlist *evlist)
+{
+ struct evsel *evsel, *dup;
+ int i = 0;
+
+ evlist__for_each_entry(evlist, evsel) {
+ if (evsel->core.attr.branch_sample_type & PERF_SAMPLE_BRANCH_COUNTERS) {
+ evsel->br_cntr_idx = i++;
+ evsel__leader(evsel)->br_cntr_nr++;
+
+ dup = evlist__find_dup_event_from_prev(evlist, evsel);
+ if (dup)
+ memcpy(evsel->abbr_name, dup->abbr_name, 3 * sizeof(char));
+ else
+ evlist__new_abbr_name(evsel->abbr_name);
+ }
+ }
+ evlist->nr_br_cntr = i;
+}
+
bool evlist__valid_read_format(struct evlist *evlist)
{
struct evsel *first = evlist__first(evlist), *pos = first;
@@ -2556,3 +2627,15 @@ void evlist__uniquify_name(struct evlist *evlist)
}
}
}
+
+bool evlist__has_bpf_output(struct evlist *evlist)
+{
+ struct evsel *evsel;
+
+ evlist__for_each_entry(evlist, evsel) {
+ if (evsel__is_bpf_output(evsel))
+ return true;
+ }
+
+ return false;
+}
diff --git a/tools/perf/util/evlist.h b/tools/perf/util/evlist.h
index cb91dc9117a2..bcc1c6984bb5 100644
--- a/tools/perf/util/evlist.h
+++ b/tools/perf/util/evlist.h
@@ -20,6 +20,7 @@ struct pollfd;
struct thread_map;
struct perf_cpu_map;
struct record_opts;
+struct target;
/*
* State machine of bkw_mmap_state:
@@ -56,6 +57,7 @@ struct evlist {
bool enabled;
int id_pos;
int is_pos;
+ int nr_br_cntr;
u64 combined_sample_type;
enum bkw_mmap_state bkw_mmap_state;
struct {
@@ -212,11 +214,13 @@ void evlist__enable_non_dummy(struct evlist *evlist);
void evlist__set_selected(struct evlist *evlist, struct evsel *evsel);
int evlist__create_maps(struct evlist *evlist, struct target *target);
-int evlist__apply_filters(struct evlist *evlist, struct evsel **err_evsel);
+int evlist__apply_filters(struct evlist *evlist, struct evsel **err_evsel,
+ struct target *target);
u64 __evlist__combined_sample_type(struct evlist *evlist);
u64 evlist__combined_sample_type(struct evlist *evlist);
u64 evlist__combined_branch_type(struct evlist *evlist);
+void evlist__update_br_cntr(struct evlist *evlist);
bool evlist__sample_id_all(struct evlist *evlist);
u16 evlist__id_hdr_size(struct evlist *evlist);
@@ -443,5 +447,6 @@ int evlist__scnprintf_evsels(struct evlist *evlist, size_t size, char *bf);
void evlist__check_mem_load_aux(struct evlist *evlist);
void evlist__warn_user_requested_cpus(struct evlist *evlist, const char *cpu_list);
void evlist__uniquify_name(struct evlist *evlist);
+bool evlist__has_bpf_output(struct evlist *evlist);
#endif /* __PERF_EVLIST_H */
diff --git a/tools/perf/util/evsel.c b/tools/perf/util/evsel.c
index bc603193c477..dbf9c8cee3c5 100644
--- a/tools/perf/util/evsel.c
+++ b/tools/perf/util/evsel.c
@@ -59,6 +59,7 @@
#include <internal/xyarray.h>
#include <internal/lib.h>
#include <internal/threadmap.h>
+#include "util/intel-tpebs.h"
#include <linux/ctype.h>
@@ -772,7 +773,7 @@ const char *evsel__name(struct evsel *evsel)
case PERF_TYPE_SOFTWARE:
if (evsel__is_tool(evsel))
- evsel__tool_name(evsel->tool_event, bf, sizeof(bf));
+ evsel__tool_name(evsel__tool_event(evsel), bf, sizeof(bf));
else
evsel__sw_name(evsel, bf, sizeof(bf));
break;
@@ -810,7 +811,7 @@ const char *evsel__metric_id(const struct evsel *evsel)
return evsel->metric_id;
if (evsel__is_tool(evsel))
- return perf_tool_event__to_str(evsel->tool_event);
+ return perf_tool_event__to_str(evsel__tool_event(evsel));
return "unknown";
}
@@ -1502,8 +1503,8 @@ void evsel__exit(struct evsel *evsel)
evsel->per_pkg_mask = NULL;
zfree(&evsel->metric_events);
perf_evsel__object.fini(evsel);
- if (evsel->tool_event == PERF_TOOL_SYSTEM_TIME ||
- evsel->tool_event == PERF_TOOL_USER_TIME)
+ if (evsel__tool_event(evsel) == PERF_TOOL_SYSTEM_TIME ||
+ evsel__tool_event(evsel) == PERF_TOOL_USER_TIME)
xyarray__delete(evsel->start_times);
}
@@ -1539,6 +1540,11 @@ static int evsel__read_one(struct evsel *evsel, int cpu_map_idx, int thread)
return perf_evsel__read(&evsel->core, cpu_map_idx, thread, count);
}
+static int evsel__read_retire_lat(struct evsel *evsel, int cpu_map_idx, int thread)
+{
+ return tpebs_set_evsel(evsel, cpu_map_idx, thread);
+}
+
static void evsel__set_count(struct evsel *counter, int cpu_map_idx, int thread,
u64 val, u64 ena, u64 run, u64 lost)
{
@@ -1546,6 +1552,12 @@ static void evsel__set_count(struct evsel *counter, int cpu_map_idx, int thread,
count = perf_counts(counter->counts, cpu_map_idx, thread);
+ if (counter->retire_lat) {
+ evsel__read_retire_lat(counter, cpu_map_idx, thread);
+ perf_counts__set_loaded(counter->counts, cpu_map_idx, thread, true);
+ return;
+ }
+
count->val = val;
count->ena = ena;
count->run = run;
@@ -1554,6 +1566,60 @@ static void evsel__set_count(struct evsel *counter, int cpu_map_idx, int thread,
perf_counts__set_loaded(counter->counts, cpu_map_idx, thread, true);
}
+static bool evsel__group_has_tpebs(struct evsel *leader)
+{
+ struct evsel *evsel;
+
+ for_each_group_evsel(evsel, leader) {
+ if (evsel__is_retire_lat(evsel))
+ return true;
+ }
+ return false;
+}
+
+static u64 evsel__group_read_nr_members(struct evsel *leader)
+{
+ u64 nr = leader->core.nr_members;
+ struct evsel *evsel;
+
+ for_each_group_evsel(evsel, leader) {
+ if (evsel__is_retire_lat(evsel))
+ nr--;
+ }
+ return nr;
+}
+
+static u64 evsel__group_read_size(struct evsel *leader)
+{
+ u64 read_format = leader->core.attr.read_format;
+ int entry = sizeof(u64); /* value */
+ int size = 0;
+ int nr = 1;
+
+ if (!evsel__group_has_tpebs(leader))
+ return perf_evsel__read_size(&leader->core);
+
+ if (read_format & PERF_FORMAT_TOTAL_TIME_ENABLED)
+ size += sizeof(u64);
+
+ if (read_format & PERF_FORMAT_TOTAL_TIME_RUNNING)
+ size += sizeof(u64);
+
+ if (read_format & PERF_FORMAT_ID)
+ entry += sizeof(u64);
+
+ if (read_format & PERF_FORMAT_LOST)
+ entry += sizeof(u64);
+
+ if (read_format & PERF_FORMAT_GROUP) {
+ nr = evsel__group_read_nr_members(leader);
+ size += sizeof(u64);
+ }
+
+ size += entry * nr;
+ return size;
+}
+
static int evsel__process_group_data(struct evsel *leader, int cpu_map_idx, int thread, u64 *data)
{
u64 read_format = leader->core.attr.read_format;
@@ -1562,7 +1628,7 @@ static int evsel__process_group_data(struct evsel *leader, int cpu_map_idx, int
nr = *data++;
- if (nr != (u64) leader->core.nr_members)
+ if (nr != evsel__group_read_nr_members(leader))
return -EINVAL;
if (read_format & PERF_FORMAT_TOTAL_TIME_ENABLED)
@@ -1592,7 +1658,7 @@ static int evsel__read_group(struct evsel *leader, int cpu_map_idx, int thread)
{
struct perf_stat_evsel *ps = leader->stats;
u64 read_format = leader->core.attr.read_format;
- int size = perf_evsel__read_size(&leader->core);
+ int size = evsel__group_read_size(leader);
u64 *data = ps->group_data;
if (!(read_format & PERF_FORMAT_ID))
@@ -1719,7 +1785,7 @@ static int evsel__read_tool(struct evsel *evsel, int cpu_map_idx, int thread)
count = perf_counts(evsel->counts, cpu_map_idx, thread);
- switch (evsel->tool_event) {
+ switch (evsel__tool_event(evsel)) {
case PERF_TOOL_DURATION_TIME:
/*
* Pretend duration_time is only on the first CPU and thread, or
@@ -1734,7 +1800,7 @@ static int evsel__read_tool(struct evsel *evsel, int cpu_map_idx, int thread)
break;
case PERF_TOOL_USER_TIME:
case PERF_TOOL_SYSTEM_TIME: {
- bool system = evsel->tool_event == PERF_TOOL_SYSTEM_TIME;
+ bool system = evsel__tool_event(evsel) == PERF_TOOL_SYSTEM_TIME;
start_time = xyarray__entry(evsel->start_times, cpu_map_idx, thread);
fd = FD(evsel, cpu_map_idx, thread);
@@ -1784,6 +1850,9 @@ int evsel__read_counter(struct evsel *evsel, int cpu_map_idx, int thread)
if (evsel__is_tool(evsel))
return evsel__read_tool(evsel, cpu_map_idx, thread);
+ if (evsel__is_retire_lat(evsel))
+ return evsel__read_retire_lat(evsel, cpu_map_idx, thread);
+
if (evsel->core.attr.read_format & PERF_FORMAT_GROUP)
return evsel__read_group(evsel, cpu_map_idx, thread);
@@ -2003,8 +2072,8 @@ static int __evsel__prepare_open(struct evsel *evsel, struct perf_cpu_map *cpus,
perf_evsel__alloc_fd(&evsel->core, perf_cpu_map__nr(cpus), nthreads) < 0)
return -ENOMEM;
- if ((evsel->tool_event == PERF_TOOL_SYSTEM_TIME ||
- evsel->tool_event == PERF_TOOL_USER_TIME) &&
+ if ((evsel__tool_event(evsel) == PERF_TOOL_SYSTEM_TIME ||
+ evsel__tool_event(evsel) == PERF_TOOL_USER_TIME) &&
!evsel->start_times) {
evsel->start_times = xyarray__new(perf_cpu_map__nr(cpus), nthreads, sizeof(__u64));
if (!evsel->start_times)
@@ -2193,13 +2262,16 @@ static int evsel__open_cpu(struct evsel *evsel, struct perf_cpu_map *cpus,
int pid = -1, err, old_errno;
enum rlimit_action set_rlimit = NO_CHANGE;
- if (evsel->tool_event == PERF_TOOL_DURATION_TIME) {
+ if (evsel__tool_event(evsel) == PERF_TOOL_DURATION_TIME) {
if (evsel->core.attr.sample_period) /* no sampling */
return -EINVAL;
evsel->start_time = rdclock();
return 0;
}
+ if (evsel__is_retire_lat(evsel))
+ return tpebs_start(evsel->evlist);
+
err = __evsel__prepare_open(evsel, cpus, threads);
if (err)
return err;
@@ -2232,9 +2304,9 @@ retry_open:
if (!evsel->cgrp && !evsel->core.system_wide)
pid = perf_thread_map__pid(threads, thread);
- if (evsel->tool_event == PERF_TOOL_USER_TIME ||
- evsel->tool_event == PERF_TOOL_SYSTEM_TIME) {
- bool system = evsel->tool_event == PERF_TOOL_SYSTEM_TIME;
+ if (evsel__tool_event(evsel) == PERF_TOOL_USER_TIME ||
+ evsel__tool_event(evsel) == PERF_TOOL_SYSTEM_TIME) {
+ bool system = evsel__tool_event(evsel) == PERF_TOOL_SYSTEM_TIME;
__u64 *start_time = NULL;
if (evsel->core.attr.sample_period) {
@@ -2392,6 +2464,8 @@ int evsel__open(struct evsel *evsel, struct perf_cpu_map *cpus,
void evsel__close(struct evsel *evsel)
{
+ if (evsel__is_retire_lat(evsel))
+ tpebs_delete();
perf_evsel__close(&evsel->core);
perf_evsel__free_id(&evsel->core);
}
@@ -2562,17 +2636,18 @@ u64 evsel__bitfield_swap_branch_flags(u64 value)
static inline bool evsel__has_branch_counters(const struct evsel *evsel)
{
- struct evsel *cur, *leader = evsel__leader(evsel);
+ struct evsel *leader = evsel__leader(evsel);
/* The branch counters feature only supports group */
if (!leader || !evsel->evlist)
return false;
- evlist__for_each_entry(evsel->evlist, cur) {
- if ((leader == evsel__leader(cur)) &&
- (cur->core.attr.branch_sample_type & PERF_SAMPLE_BRANCH_COUNTERS))
- return true;
- }
+ if (evsel->evlist->nr_br_cntr < 0)
+ evlist__update_br_cntr(evsel->evlist);
+
+ if (leader->br_cntr_nr > 0)
+ return true;
+
return false;
}
@@ -2810,8 +2885,6 @@ int evsel__parse_sample(struct evsel *evsel, union perf_event *event,
array = (void *)array + sz;
if (evsel__has_branch_counters(evsel)) {
- OVERFLOW_CHECK_u64(array);
-
data->branch_stack_cntr = (u64 *)array;
sz = data->branch_stack->nr * sizeof(u64);
@@ -2975,7 +3048,7 @@ int evsel__parse_sample_timestamp(struct evsel *evsel, union perf_event *event,
return 0;
}
-u16 evsel__id_hdr_size(struct evsel *evsel)
+u16 evsel__id_hdr_size(const struct evsel *evsel)
{
u64 sample_type = evsel->core.attr.sample_type;
u16 size = 0;
@@ -3357,6 +3430,9 @@ static int store_evsel_ids(struct evsel *evsel, struct evlist *evlist)
{
int cpu_map_idx, thread;
+ if (evsel__is_retire_lat(evsel))
+ return 0;
+
for (cpu_map_idx = 0; cpu_map_idx < xyarray__max_x(evsel->core.fd); cpu_map_idx++) {
for (thread = 0; thread < xyarray__max_y(evsel->core.fd);
thread++) {
diff --git a/tools/perf/util/evsel.h b/tools/perf/util/evsel.h
index 80b5f6dd868e..15e745a9a798 100644
--- a/tools/perf/util/evsel.h
+++ b/tools/perf/util/evsel.h
@@ -98,6 +98,7 @@ struct evsel {
bool bpf_counter;
bool use_config_name;
bool skippable;
+ bool retire_lat;
int bpf_fd;
struct bpf_object *bpf_obj;
struct list_head config_terms;
@@ -148,6 +149,20 @@ struct evsel {
__u64 synth_sample_type;
/*
+ * Store the branch counter related information.
+ * br_cntr_idx: The idx of the branch counter event in the evlist
+ * br_cntr_nr: The number of the branch counter event in the group
+ * (Only available for the leader event)
+ * abbr_name: The abbreviation name assigned to an event which is
+ * logged by the branch counter.
+ * The abbr name is from A to Z9. NA is applied if out
+ * of the range.
+ */
+ int br_cntr_idx;
+ int br_cntr_nr;
+ char abbr_name[3];
+
+ /*
* bpf_counter_ops serves two use cases:
* 1. perf-stat -b counting events used byBPF programs
* 2. perf-stat --use-bpf use BPF programs to aggregate counts
@@ -310,6 +325,16 @@ static inline bool evsel__is_tool(const struct evsel *evsel)
return evsel->tool_event != PERF_TOOL_NONE;
}
+static inline bool evsel__is_retire_lat(const struct evsel *evsel)
+{
+ return evsel->retire_lat;
+}
+
+static inline enum perf_tool_event evsel__tool_event(const struct evsel *evsel)
+{
+ return evsel->tool_event;
+}
+
const char *evsel__group_name(struct evsel *evsel);
int evsel__group_desc(struct evsel *evsel, char *buf, size_t size);
@@ -422,7 +447,7 @@ int evsel__parse_sample(struct evsel *evsel, union perf_event *event,
int evsel__parse_sample_timestamp(struct evsel *evsel, union perf_event *event,
u64 *timestamp);
-u16 evsel__id_hdr_size(struct evsel *evsel);
+u16 evsel__id_hdr_size(const struct evsel *evsel);
static inline struct evsel *evsel__next(struct evsel *evsel)
{
diff --git a/tools/perf/util/evsel_fprintf.c b/tools/perf/util/evsel_fprintf.c
index 8719b3cb5646..c2c0500d5da9 100644
--- a/tools/perf/util/evsel_fprintf.c
+++ b/tools/perf/util/evsel_fprintf.c
@@ -107,7 +107,6 @@ out:
return ++printed;
}
-#ifndef PYTHON_PERF
int sample__fprintf_callchain(struct perf_sample *sample, int left_alignment,
unsigned int print_opts, struct callchain_cursor *cursor,
struct strlist *bt_stop_list, FILE *fp)
@@ -248,4 +247,3 @@ int sample__fprintf_sym(struct perf_sample *sample, struct addr_location *al,
return printed;
}
-#endif /* PYTHON_PERF */
diff --git a/tools/perf/util/ftrace.h b/tools/perf/util/ftrace.h
index 558efcb98d25..bae649ef50e8 100644
--- a/tools/perf/util/ftrace.h
+++ b/tools/perf/util/ftrace.h
@@ -6,6 +6,7 @@
#include "target.h"
struct evlist;
+struct hashamp;
struct perf_ftrace {
struct evlist *evlist;
@@ -15,6 +16,7 @@ struct perf_ftrace {
struct list_head notrace;
struct list_head graph_funcs;
struct list_head nograph_funcs;
+ struct hashmap *profile_hash;
unsigned long percpu_buffer_size;
bool inherit;
bool use_nsec;
@@ -25,6 +27,7 @@ struct perf_ftrace {
int graph_noirqs;
int graph_verbose;
int graph_thresh;
+ int graph_tail;
};
struct filter_entry {
diff --git a/tools/perf/util/header.c b/tools/perf/util/header.c
index 55e9553861d0..a6386d12afd7 100644
--- a/tools/perf/util/header.c
+++ b/tools/perf/util/header.c
@@ -3676,32 +3676,50 @@ int perf_header__write_pipe(int fd)
static int perf_session__do_write_header(struct perf_session *session,
struct evlist *evlist,
int fd, bool at_exit,
- struct feat_copier *fc)
+ struct feat_copier *fc,
+ bool write_attrs_after_data)
{
struct perf_file_header f_header;
- struct perf_file_attr f_attr;
struct perf_header *header = &session->header;
struct evsel *evsel;
struct feat_fd ff = {
.fd = fd,
};
- u64 attr_offset;
+ u64 attr_offset = sizeof(f_header), attr_size = 0;
int err;
- lseek(fd, sizeof(f_header), SEEK_SET);
+ if (write_attrs_after_data && at_exit) {
+ /*
+ * Write features at the end of the file first so that
+ * attributes may come after them.
+ */
+ if (!header->data_offset && header->data_size) {
+ pr_err("File contains data but offset unknown\n");
+ err = -1;
+ goto err_out;
+ }
+ header->feat_offset = header->data_offset + header->data_size;
+ err = perf_header__adds_write(header, evlist, fd, fc);
+ if (err < 0)
+ goto err_out;
+ attr_offset = lseek(fd, 0, SEEK_CUR);
+ } else {
+ lseek(fd, attr_offset, SEEK_SET);
+ }
evlist__for_each_entry(session->evlist, evsel) {
- evsel->id_offset = lseek(fd, 0, SEEK_CUR);
- err = do_write(&ff, evsel->core.id, evsel->core.ids * sizeof(u64));
- if (err < 0) {
- pr_debug("failed to write perf header\n");
- free(ff.buf);
- return err;
+ evsel->id_offset = attr_offset;
+ /* Avoid writing at the end of the file until the session is exiting. */
+ if (!write_attrs_after_data || at_exit) {
+ err = do_write(&ff, evsel->core.id, evsel->core.ids * sizeof(u64));
+ if (err < 0) {
+ pr_debug("failed to write perf header\n");
+ goto err_out;
+ }
}
+ attr_offset += evsel->core.ids * sizeof(u64);
}
- attr_offset = lseek(ff.fd, 0, SEEK_CUR);
-
evlist__for_each_entry(evlist, evsel) {
if (evsel->core.attr.size < sizeof(evsel->core.attr)) {
/*
@@ -3711,40 +3729,46 @@ static int perf_session__do_write_header(struct perf_session *session,
*/
evsel->core.attr.size = sizeof(evsel->core.attr);
}
- f_attr = (struct perf_file_attr){
- .attr = evsel->core.attr,
- .ids = {
- .offset = evsel->id_offset,
- .size = evsel->core.ids * sizeof(u64),
+ /* Avoid writing at the end of the file until the session is exiting. */
+ if (!write_attrs_after_data || at_exit) {
+ struct perf_file_attr f_attr = {
+ .attr = evsel->core.attr,
+ .ids = {
+ .offset = evsel->id_offset,
+ .size = evsel->core.ids * sizeof(u64),
+ }
+ };
+ err = do_write(&ff, &f_attr, sizeof(f_attr));
+ if (err < 0) {
+ pr_debug("failed to write perf header attribute\n");
+ goto err_out;
}
- };
- err = do_write(&ff, &f_attr, sizeof(f_attr));
- if (err < 0) {
- pr_debug("failed to write perf header attribute\n");
- free(ff.buf);
- return err;
}
+ attr_size += sizeof(struct perf_file_attr);
}
- if (!header->data_offset)
- header->data_offset = lseek(fd, 0, SEEK_CUR);
+ if (!header->data_offset) {
+ if (write_attrs_after_data)
+ header->data_offset = sizeof(f_header);
+ else
+ header->data_offset = attr_offset + attr_size;
+ }
header->feat_offset = header->data_offset + header->data_size;
- if (at_exit) {
+ if (!write_attrs_after_data && at_exit) {
+ /* Write features now feat_offset is known. */
err = perf_header__adds_write(header, evlist, fd, fc);
- if (err < 0) {
- free(ff.buf);
- return err;
- }
+ if (err < 0)
+ goto err_out;
}
f_header = (struct perf_file_header){
.magic = PERF_MAGIC,
.size = sizeof(f_header),
- .attr_size = sizeof(f_attr),
+ .attr_size = sizeof(struct perf_file_attr),
.attrs = {
.offset = attr_offset,
- .size = evlist->core.nr_entries * sizeof(f_attr),
+ .size = attr_size,
},
.data = {
.offset = header->data_offset,
@@ -3757,21 +3781,24 @@ static int perf_session__do_write_header(struct perf_session *session,
lseek(fd, 0, SEEK_SET);
err = do_write(&ff, &f_header, sizeof(f_header));
- free(ff.buf);
if (err < 0) {
pr_debug("failed to write perf header\n");
- return err;
+ goto err_out;
+ } else {
+ lseek(fd, 0, SEEK_END);
+ err = 0;
}
- lseek(fd, header->data_offset + header->data_size, SEEK_SET);
-
- return 0;
+err_out:
+ free(ff.buf);
+ return err;
}
int perf_session__write_header(struct perf_session *session,
struct evlist *evlist,
int fd, bool at_exit)
{
- return perf_session__do_write_header(session, evlist, fd, at_exit, NULL);
+ return perf_session__do_write_header(session, evlist, fd, at_exit, /*fc=*/NULL,
+ /*write_attrs_after_data=*/false);
}
size_t perf_session__data_offset(const struct evlist *evlist)
@@ -3791,9 +3818,11 @@ size_t perf_session__data_offset(const struct evlist *evlist)
int perf_session__inject_header(struct perf_session *session,
struct evlist *evlist,
int fd,
- struct feat_copier *fc)
+ struct feat_copier *fc,
+ bool write_attrs_after_data)
{
- return perf_session__do_write_header(session, evlist, fd, true, fc);
+ return perf_session__do_write_header(session, evlist, fd, true, fc,
+ write_attrs_after_data);
}
static int perf_header__getbuffer64(struct perf_header *header,
@@ -3986,6 +4015,24 @@ int perf_file_header__read(struct perf_file_header *header,
adds_features));
}
+ if (header->size > header->attrs.offset) {
+ pr_err("Perf file header corrupt: header overlaps attrs\n");
+ return -1;
+ }
+
+ if (header->size > header->data.offset) {
+ pr_err("Perf file header corrupt: header overlaps data\n");
+ return -1;
+ }
+
+ if ((header->attrs.offset <= header->data.offset &&
+ header->attrs.offset + header->attrs.size > header->data.offset) ||
+ (header->attrs.offset > header->data.offset &&
+ header->data.offset + header->data.size > header->attrs.offset)) {
+ pr_err("Perf file header corrupt: Attributes and data overlap\n");
+ return -1;
+ }
+
if (header->size != sizeof(*header)) {
/* Support the previous format */
if (header->size == offsetof(typeof(*header), adds_features))
@@ -4066,13 +4113,8 @@ static int perf_file_section__process(struct perf_file_section *section,
static int perf_file_header__read_pipe(struct perf_pipe_file_header *header,
struct perf_header *ph,
- struct perf_data* data,
- bool repipe, int repipe_fd)
+ struct perf_data *data)
{
- struct feat_fd ff = {
- .fd = repipe_fd,
- .ph = ph,
- };
ssize_t ret;
ret = perf_data__read(data, header, sizeof(*header));
@@ -4087,19 +4129,15 @@ static int perf_file_header__read_pipe(struct perf_pipe_file_header *header,
if (ph->needs_swap)
header->size = bswap_64(header->size);
- if (repipe && do_write(&ff, header, sizeof(*header)) < 0)
- return -1;
-
return 0;
}
-static int perf_header__read_pipe(struct perf_session *session, int repipe_fd)
+static int perf_header__read_pipe(struct perf_session *session)
{
struct perf_header *header = &session->header;
struct perf_pipe_file_header f_header;
- if (perf_file_header__read_pipe(&f_header, header, session->data,
- session->repipe, repipe_fd) < 0) {
+ if (perf_file_header__read_pipe(&f_header, header, session->data) < 0) {
pr_debug("incompatible file format\n");
return -EINVAL;
}
@@ -4199,7 +4237,7 @@ static int evlist__prepare_tracepoint_events(struct evlist *evlist, struct tep_h
}
#endif
-int perf_session__read_header(struct perf_session *session, int repipe_fd)
+int perf_session__read_header(struct perf_session *session)
{
struct perf_data *data = session->data;
struct perf_header *header = &session->header;
@@ -4220,7 +4258,7 @@ int perf_session__read_header(struct perf_session *session, int repipe_fd)
* We can read 'pipe' data event from regular file,
* check for the pipe header regardless of source.
*/
- err = perf_header__read_pipe(session, repipe_fd);
+ err = perf_header__read_pipe(session);
if (!err || perf_data__is_pipe(data)) {
data->is_pipe = true;
return err;
@@ -4326,7 +4364,7 @@ out_delete_evlist:
int perf_event__process_feature(struct perf_session *session,
union perf_event *event)
{
- struct perf_tool *tool = session->tool;
+ const struct perf_tool *tool = session->tool;
struct feat_fd ff = { .fd = 0 };
struct perf_record_header_feature *fe = (struct perf_record_header_feature *)event;
int type = fe->header.type;
@@ -4405,7 +4443,7 @@ size_t perf_event__fprintf_event_update(union perf_event *event, FILE *fp)
return ret;
}
-int perf_event__process_attr(struct perf_tool *tool __maybe_unused,
+int perf_event__process_attr(const struct perf_tool *tool __maybe_unused,
union perf_event *event,
struct evlist **pevlist)
{
@@ -4444,7 +4482,7 @@ int perf_event__process_attr(struct perf_tool *tool __maybe_unused,
return 0;
}
-int perf_event__process_event_update(struct perf_tool *tool __maybe_unused,
+int perf_event__process_event_update(const struct perf_tool *tool __maybe_unused,
union perf_event *event,
struct evlist **pevlist)
{
@@ -4514,15 +4552,14 @@ int perf_event__process_tracing_data(struct perf_session *session,
SEEK_SET);
}
- size_read = trace_report(fd, &session->tevent,
- session->repipe);
+ size_read = trace_report(fd, &session->tevent, session->trace_event_repipe);
padding = PERF_ALIGN(size_read, sizeof(u64)) - size_read;
if (readn(fd, buf, padding) < 0) {
pr_err("%s: reading input file", __func__);
return -1;
}
- if (session->repipe) {
+ if (session->trace_event_repipe) {
int retw = write(STDOUT_FILENO, buf, padding);
if (retw <= 0 || retw != padding) {
pr_err("%s: repiping tracing data padding", __func__);
diff --git a/tools/perf/util/header.h b/tools/perf/util/header.h
index 7c16a250e738..a63a361f20f4 100644
--- a/tools/perf/util/header.h
+++ b/tools/perf/util/header.h
@@ -61,14 +61,28 @@ struct perf_file_section {
u64 size;
};
+/**
+ * struct perf_file_header: Header representation on disk.
+ */
struct perf_file_header {
+ /** @magic: Holds "PERFILE2". */
u64 magic;
+ /** @size: Size of this header - sizeof(struct perf_file_header). */
u64 size;
+ /**
+ * @attr_size: Size of attrs entries - sizeof(struct perf_event_attr) +
+ * sizeof(struct perf_file_section).
+ */
u64 attr_size;
+ /** @attrs: Offset and size of file section holding attributes. */
struct perf_file_section attrs;
+ /** @data: Offset and size of file section holding regular event data. */
struct perf_file_section data;
- /* event_types is ignored */
+ /** @event_types: Ignored. */
struct perf_file_section event_types;
+ /**
+ * @adds_features: Bitmap of features. The features are immediately after the data section.
+ */
DECLARE_BITMAP(adds_features, HEADER_FEAT_BITS);
};
@@ -117,7 +131,7 @@ union perf_event;
extern const char perf_version_string[];
-int perf_session__read_header(struct perf_session *session, int repipe_fd);
+int perf_session__read_header(struct perf_session *session);
int perf_session__write_header(struct perf_session *session,
struct evlist *evlist,
int fd, bool at_exit);
@@ -136,7 +150,8 @@ struct feat_copier {
int perf_session__inject_header(struct perf_session *session,
struct evlist *evlist,
int fd,
- struct feat_copier *fc);
+ struct feat_copier *fc,
+ bool write_attrs_after_data);
size_t perf_session__data_offset(const struct evlist *evlist);
@@ -156,9 +171,9 @@ int perf_header__fprintf_info(struct perf_session *s, FILE *fp, bool full);
int perf_event__process_feature(struct perf_session *session,
union perf_event *event);
-int perf_event__process_attr(struct perf_tool *tool, union perf_event *event,
+int perf_event__process_attr(const struct perf_tool *tool, union perf_event *event,
struct evlist **pevlist);
-int perf_event__process_event_update(struct perf_tool *tool,
+int perf_event__process_event_update(const struct perf_tool *tool,
union perf_event *event,
struct evlist **pevlist);
size_t perf_event__fprintf_event_update(union perf_event *event, FILE *fp);
diff --git a/tools/perf/util/hisi-ptt.c b/tools/perf/util/hisi-ptt.c
index 37ea987017f6..e4cc4785f744 100644
--- a/tools/perf/util/hisi-ptt.c
+++ b/tools/perf/util/hisi-ptt.c
@@ -79,14 +79,14 @@ static void hisi_ptt_dump_event(struct hisi_ptt *ptt, unsigned char *buf,
static int hisi_ptt_process_event(struct perf_session *session __maybe_unused,
union perf_event *event __maybe_unused,
struct perf_sample *sample __maybe_unused,
- struct perf_tool *tool __maybe_unused)
+ const struct perf_tool *tool __maybe_unused)
{
return 0;
}
static int hisi_ptt_process_auxtrace_event(struct perf_session *session,
union perf_event *event,
- struct perf_tool *tool __maybe_unused)
+ const struct perf_tool *tool __maybe_unused)
{
struct hisi_ptt *ptt = container_of(session->auxtrace, struct hisi_ptt,
auxtrace);
@@ -123,7 +123,7 @@ static int hisi_ptt_process_auxtrace_event(struct perf_session *session,
}
static int hisi_ptt_flush(struct perf_session *session __maybe_unused,
- struct perf_tool *tool __maybe_unused)
+ const struct perf_tool *tool __maybe_unused)
{
return 0;
}
diff --git a/tools/perf/util/hist.c b/tools/perf/util/hist.c
index f028f113c4fd..f387e85a0087 100644
--- a/tools/perf/util/hist.c
+++ b/tools/perf/util/hist.c
@@ -472,10 +472,18 @@ static int hist_entry__init(struct hist_entry *he,
memcpy(he->branch_info, template->branch_info,
sizeof(*he->branch_info));
+ he->branch_info->from.ms.maps = maps__get(he->branch_info->from.ms.maps);
he->branch_info->from.ms.map = map__get(he->branch_info->from.ms.map);
+ he->branch_info->to.ms.maps = maps__get(he->branch_info->to.ms.maps);
he->branch_info->to.ms.map = map__get(he->branch_info->to.ms.map);
}
+ if (he->mem_info) {
+ he->mem_info = mem_info__clone(template->mem_info);
+ if (he->mem_info == NULL)
+ goto err_infos;
+ }
+
if (hist_entry__has_callchains(he) && symbol_conf.use_callchain)
callchain_init(he->callchain);
@@ -620,12 +628,6 @@ static struct hist_entry *hists__findnew_entry(struct hists *hists,
if (symbol_conf.cumulate_callchain)
he_stat__add_period(he->stat_acc, period);
- /*
- * This mem info was allocated from sample__resolve_mem
- * and will not be used anymore.
- */
- mem_info__zput(entry->mem_info);
-
block_info__delete(entry->block_info);
kvm_info__zput(entry->kvm_info);
@@ -636,7 +638,12 @@ static struct hist_entry *hists__findnew_entry(struct hists *hists,
* mis-adjust symbol addresses when computing
* the history counter to increment.
*/
- if (he->ms.map != entry->ms.map) {
+ if (hists__has(hists, sym) && he->ms.map != entry->ms.map) {
+ if (he->ms.sym) {
+ u64 addr = he->ms.sym->start;
+ he->ms.sym = map__find_symbol(entry->ms.map, addr);
+ }
+
map__put(he->ms.map);
he->ms.map = map__get(entry->ms.map);
}
@@ -739,7 +746,7 @@ __hists__add_entry(struct hists *hists,
.filtered = symbol__parent_filter(sym_parent) | al->filtered,
.hists = hists,
.branch_info = bi,
- .mem_info = mem_info__get(mi),
+ .mem_info = mi,
.kvm_info = ki,
.block_info = block_info,
.transaction = sample->transaction,
@@ -970,10 +977,21 @@ out:
return err;
}
+static void branch_info__exit(struct branch_info *bi)
+{
+ map_symbol__exit(&bi->from.ms);
+ map_symbol__exit(&bi->to.ms);
+ zfree_srcline(&bi->srcline_from);
+ zfree_srcline(&bi->srcline_to);
+}
+
static int
iter_finish_branch_entry(struct hist_entry_iter *iter,
struct addr_location *al __maybe_unused)
{
+ for (int i = 0; i < iter->total; i++)
+ branch_info__exit(&iter->bi[i]);
+
zfree(&iter->bi);
iter->he = NULL;
@@ -1319,10 +1337,7 @@ void hist_entry__delete(struct hist_entry *he)
map_symbol__exit(&he->ms);
if (he->branch_info) {
- map_symbol__exit(&he->branch_info->from.ms);
- map_symbol__exit(&he->branch_info->to.ms);
- zfree_srcline(&he->branch_info->srcline_from);
- zfree_srcline(&he->branch_info->srcline_to);
+ branch_info__exit(he->branch_info);
zfree(&he->branch_info);
}
@@ -2370,6 +2385,11 @@ void hists__inc_nr_lost_samples(struct hists *hists, u32 lost)
hists->stats.nr_lost_samples += lost;
}
+void hists__inc_nr_dropped_samples(struct hists *hists, u32 lost)
+{
+ hists->stats.nr_dropped_samples += lost;
+}
+
static struct hist_entry *hists__add_dummy_entry(struct hists *hists,
struct hist_entry *pair)
{
@@ -2667,7 +2687,7 @@ int hists__unlink(struct hists *hists)
void hist__account_cycles(struct branch_stack *bs, struct addr_location *al,
struct perf_sample *sample, bool nonany_branch_mode,
- u64 *total_cycles)
+ u64 *total_cycles, struct evsel *evsel)
{
struct branch_info *bi;
struct branch_entry *entries = perf_sample__branch_entries(sample);
@@ -2691,7 +2711,8 @@ void hist__account_cycles(struct branch_stack *bs, struct addr_location *al,
for (int i = bs->nr - 1; i >= 0; i--) {
addr_map_symbol__account_cycles(&bi[i].from,
nonany_branch_mode ? NULL : prev,
- bi[i].flags.cycles);
+ bi[i].flags.cycles, evsel,
+ bi[i].branch_stack_cntr);
prev = &bi[i].to;
if (total_cycles)
@@ -2713,18 +2734,24 @@ size_t evlist__fprintf_nr_events(struct evlist *evlist, FILE *fp)
evlist__for_each_entry(evlist, pos) {
struct hists *hists = evsel__hists(pos);
+ u64 total_samples = hists->stats.nr_samples;
+
+ total_samples += hists->stats.nr_lost_samples;
+ total_samples += hists->stats.nr_dropped_samples;
- if (symbol_conf.skip_empty && !hists->stats.nr_samples &&
- !hists->stats.nr_lost_samples)
+ if (symbol_conf.skip_empty && total_samples == 0)
continue;
ret += fprintf(fp, "%s stats:\n", evsel__name(pos));
if (hists->stats.nr_samples)
- ret += fprintf(fp, "%16s events: %10d\n",
+ ret += fprintf(fp, "%20s events: %10d\n",
"SAMPLE", hists->stats.nr_samples);
if (hists->stats.nr_lost_samples)
- ret += fprintf(fp, "%16s events: %10d\n",
+ ret += fprintf(fp, "%20s events: %10d\n",
"LOST_SAMPLES", hists->stats.nr_lost_samples);
+ if (hists->stats.nr_dropped_samples)
+ ret += fprintf(fp, "%20s events: %10d\n",
+ "LOST_SAMPLES (BPF)", hists->stats.nr_dropped_samples);
}
return ret;
diff --git a/tools/perf/util/hist.h b/tools/perf/util/hist.h
index 5273f5c37050..7d7ae94b4b31 100644
--- a/tools/perf/util/hist.h
+++ b/tools/perf/util/hist.h
@@ -86,6 +86,7 @@ enum hist_column {
HISTC_TYPE,
HISTC_TYPE_OFFSET,
HISTC_SYMBOL_OFFSET,
+ HISTC_TYPE_CACHELINE,
HISTC_NR_COLS, /* Last entry */
};
@@ -371,6 +372,7 @@ void hists__inc_stats(struct hists *hists, struct hist_entry *h);
void hists__inc_nr_events(struct hists *hists);
void hists__inc_nr_samples(struct hists *hists, bool filtered);
void hists__inc_nr_lost_samples(struct hists *hists, u32 lost);
+void hists__inc_nr_dropped_samples(struct hists *hists, u32 lost);
size_t hists__fprintf(struct hists *hists, bool show_header, int max_rows,
int max_cols, float min_pcnt, FILE *fp,
@@ -742,7 +744,7 @@ unsigned int hists__overhead_width(struct hists *hists);
void hist__account_cycles(struct branch_stack *bs, struct addr_location *al,
struct perf_sample *sample, bool nonany_branch_mode,
- u64 *total_cycles);
+ u64 *total_cycles, struct evsel *evsel);
struct option;
int parse_filter_percentage(const struct option *opt, const char *arg, int unset);
diff --git a/tools/perf/util/include/dwarf-regs.h b/tools/perf/util/include/dwarf-regs.h
index 01fb25a1150a..75b28dcc8317 100644
--- a/tools/perf/util/include/dwarf-regs.h
+++ b/tools/perf/util/include/dwarf-regs.h
@@ -1,6 +1,7 @@
/* SPDX-License-Identifier: GPL-2.0 */
#ifndef _PERF_DWARF_REGS_H_
#define _PERF_DWARF_REGS_H_
+#include "annotate.h"
#define DWARF_REG_PC 0xd3af9c /* random number */
#define DWARF_REG_FB 0xd3affb /* random number */
@@ -31,6 +32,16 @@ static inline int get_dwarf_regnum(const char *name __maybe_unused,
}
#endif
+#if !defined(__powerpc__) || !defined(HAVE_DWARF_SUPPORT)
+static inline void get_powerpc_regs(u32 raw_insn __maybe_unused, int is_source __maybe_unused,
+ struct annotated_op_loc *op_loc __maybe_unused)
+{
+ return;
+}
+#else
+void get_powerpc_regs(u32 raw_insn, int is_source, struct annotated_op_loc *op_loc);
+#endif
+
#ifdef HAVE_ARCH_REGS_QUERY_REGISTER_OFFSET
/*
* Arch should support fetching the offset of a register in pt_regs
diff --git a/tools/perf/util/intel-bts.c b/tools/perf/util/intel-bts.c
index ec1b3bd9f530..27d9b5c9fec8 100644
--- a/tools/perf/util/intel-bts.c
+++ b/tools/perf/util/intel-bts.c
@@ -591,7 +591,7 @@ static int intel_bts_process_queues(struct intel_bts *bts, u64 timestamp)
static int intel_bts_process_event(struct perf_session *session,
union perf_event *event,
struct perf_sample *sample,
- struct perf_tool *tool)
+ const struct perf_tool *tool)
{
struct intel_bts *bts = container_of(session->auxtrace, struct intel_bts,
auxtrace);
@@ -634,7 +634,7 @@ static int intel_bts_process_event(struct perf_session *session,
static int intel_bts_process_auxtrace_event(struct perf_session *session,
union perf_event *event,
- struct perf_tool *tool __maybe_unused)
+ const struct perf_tool *tool __maybe_unused)
{
struct intel_bts *bts = container_of(session->auxtrace, struct intel_bts,
auxtrace);
@@ -675,7 +675,7 @@ static int intel_bts_process_auxtrace_event(struct perf_session *session,
}
static int intel_bts_flush(struct perf_session *session,
- struct perf_tool *tool __maybe_unused)
+ const struct perf_tool *tool __maybe_unused)
{
struct intel_bts *bts = container_of(session->auxtrace, struct intel_bts,
auxtrace);
@@ -737,35 +737,6 @@ static bool intel_bts_evsel_is_auxtrace(struct perf_session *session,
return evsel->core.attr.type == bts->pmu_type;
}
-struct intel_bts_synth {
- struct perf_tool dummy_tool;
- struct perf_session *session;
-};
-
-static int intel_bts_event_synth(struct perf_tool *tool,
- union perf_event *event,
- struct perf_sample *sample __maybe_unused,
- struct machine *machine __maybe_unused)
-{
- struct intel_bts_synth *intel_bts_synth =
- container_of(tool, struct intel_bts_synth, dummy_tool);
-
- return perf_session__deliver_synth_event(intel_bts_synth->session,
- event, NULL);
-}
-
-static int intel_bts_synth_event(struct perf_session *session,
- struct perf_event_attr *attr, u64 id)
-{
- struct intel_bts_synth intel_bts_synth;
-
- memset(&intel_bts_synth, 0, sizeof(struct intel_bts_synth));
- intel_bts_synth.session = session;
-
- return perf_event__synthesize_attr(&intel_bts_synth.dummy_tool, attr, 1,
- &id, intel_bts_event_synth);
-}
-
static int intel_bts_synth_events(struct intel_bts *bts,
struct perf_session *session)
{
@@ -814,7 +785,7 @@ static int intel_bts_synth_events(struct intel_bts *bts,
attr.sample_type |= PERF_SAMPLE_ADDR;
pr_debug("Synthesizing 'branches' event with id %" PRIu64 " sample type %#" PRIx64 "\n",
id, (u64)attr.sample_type);
- err = intel_bts_synth_event(session, &attr, id);
+ err = perf_session__deliver_synth_attr_event(session, &attr, id);
if (err) {
pr_err("%s: failed to synthesize 'branches' event type\n",
__func__);
diff --git a/tools/perf/util/intel-pt-decoder/intel-pt-insn-decoder.c b/tools/perf/util/intel-pt-decoder/intel-pt-insn-decoder.c
index 4407130d91f8..47cf35799a4d 100644
--- a/tools/perf/util/intel-pt-decoder/intel-pt-insn-decoder.c
+++ b/tools/perf/util/intel-pt-decoder/intel-pt-insn-decoder.c
@@ -209,12 +209,13 @@ int intel_pt_get_insn(const unsigned char *buf, size_t len, int x86_64,
return 0;
}
-int arch_is_branch(const unsigned char *buf, size_t len, int x86_64)
+int arch_is_uncond_branch(const unsigned char *buf, size_t len, int x86_64)
{
struct intel_pt_insn in;
if (intel_pt_get_insn(buf, len, x86_64, &in) < 0)
return -1;
- return in.branch != INTEL_PT_BR_NO_BRANCH;
+ return in.branch == INTEL_PT_BR_UNCONDITIONAL ||
+ in.branch == INTEL_PT_BR_INDIRECT;
}
const char *dump_insn(struct perf_insn *x, uint64_t ip __maybe_unused,
diff --git a/tools/perf/util/intel-pt.c b/tools/perf/util/intel-pt.c
index d6d7b7512505..fd2597613f3d 100644
--- a/tools/perf/util/intel-pt.c
+++ b/tools/perf/util/intel-pt.c
@@ -3449,7 +3449,7 @@ out:
static int intel_pt_process_event(struct perf_session *session,
union perf_event *event,
struct perf_sample *sample,
- struct perf_tool *tool)
+ const struct perf_tool *tool)
{
struct intel_pt *pt = container_of(session->auxtrace, struct intel_pt,
auxtrace);
@@ -3533,7 +3533,7 @@ static int intel_pt_process_event(struct perf_session *session,
return err;
}
-static int intel_pt_flush(struct perf_session *session, struct perf_tool *tool)
+static int intel_pt_flush(struct perf_session *session, const struct perf_tool *tool)
{
struct intel_pt *pt = container_of(session->auxtrace, struct intel_pt,
auxtrace);
@@ -3600,7 +3600,7 @@ static bool intel_pt_evsel_is_auxtrace(struct perf_session *session,
static int intel_pt_process_auxtrace_event(struct perf_session *session,
union perf_event *event,
- struct perf_tool *tool __maybe_unused)
+ const struct perf_tool *tool __maybe_unused)
{
struct intel_pt *pt = container_of(session->auxtrace, struct intel_pt,
auxtrace);
@@ -3659,37 +3659,15 @@ static int intel_pt_queue_data(struct perf_session *session,
data_offset, timestamp);
}
-struct intel_pt_synth {
- struct perf_tool dummy_tool;
- struct perf_session *session;
-};
-
-static int intel_pt_event_synth(struct perf_tool *tool,
- union perf_event *event,
- struct perf_sample *sample __maybe_unused,
- struct machine *machine __maybe_unused)
-{
- struct intel_pt_synth *intel_pt_synth =
- container_of(tool, struct intel_pt_synth, dummy_tool);
-
- return perf_session__deliver_synth_event(intel_pt_synth->session, event,
- NULL);
-}
-
static int intel_pt_synth_event(struct perf_session *session, const char *name,
struct perf_event_attr *attr, u64 id)
{
- struct intel_pt_synth intel_pt_synth;
int err;
pr_debug("Synthesizing '%s' event with id %" PRIu64 " sample type %#" PRIx64 "\n",
name, id, (u64)attr->sample_type);
- memset(&intel_pt_synth, 0, sizeof(struct intel_pt_synth));
- intel_pt_synth.session = session;
-
- err = perf_event__synthesize_attr(&intel_pt_synth.dummy_tool, attr, 1,
- &id, intel_pt_event_synth);
+ err = perf_session__deliver_synth_attr_event(session, attr, id);
if (err)
pr_err("%s: failed to synthesize '%s' event type\n",
__func__, name);
diff --git a/tools/perf/util/intel-tpebs.c b/tools/perf/util/intel-tpebs.c
new file mode 100644
index 000000000000..50a3c3e07160
--- /dev/null
+++ b/tools/perf/util/intel-tpebs.c
@@ -0,0 +1,432 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * intel_tpebs.c: Intel TPEBS support
+ */
+
+
+#include <sys/param.h>
+#include <subcmd/run-command.h>
+#include <thread.h>
+#include "intel-tpebs.h"
+#include <linux/list.h>
+#include <linux/zalloc.h>
+#include <linux/err.h>
+#include "sample.h"
+#include "debug.h"
+#include "evlist.h"
+#include "evsel.h"
+#include "session.h"
+#include "tool.h"
+#include "cpumap.h"
+#include "metricgroup.h"
+#include <sys/stat.h>
+#include <sys/file.h>
+#include <poll.h>
+#include <math.h>
+
+#define PERF_DATA "-"
+
+bool tpebs_recording;
+static pid_t tpebs_pid = -1;
+static size_t tpebs_event_size;
+static LIST_HEAD(tpebs_results);
+static pthread_t tpebs_reader_thread;
+static struct child_process *tpebs_cmd;
+
+struct tpebs_retire_lat {
+ struct list_head nd;
+ /* Event name */
+ const char *name;
+ /* Event name with the TPEBS modifier R */
+ const char *tpebs_name;
+ /* Count of retire_latency values found in sample data */
+ size_t count;
+ /* Sum of all the retire_latency values in sample data */
+ int sum;
+ /* Average of retire_latency, val = sum / count */
+ double val;
+};
+
+static int get_perf_record_args(const char **record_argv, char buf[],
+ const char *cpumap_buf)
+{
+ struct tpebs_retire_lat *e;
+ int i = 0;
+
+ pr_debug("tpebs: Prepare perf record for retire_latency\n");
+
+ record_argv[i++] = "perf";
+ record_argv[i++] = "record";
+ record_argv[i++] = "-W";
+ record_argv[i++] = "--synth=no";
+ record_argv[i++] = buf;
+
+ if (!cpumap_buf) {
+ pr_err("tpebs: Require cpumap list to run sampling\n");
+ return -ECANCELED;
+ }
+ /* Use -C when cpumap_buf is not "-1" */
+ if (strcmp(cpumap_buf, "-1")) {
+ record_argv[i++] = "-C";
+ record_argv[i++] = cpumap_buf;
+ }
+
+ list_for_each_entry(e, &tpebs_results, nd) {
+ record_argv[i++] = "-e";
+ record_argv[i++] = e->name;
+ }
+
+ record_argv[i++] = "-o";
+ record_argv[i++] = PERF_DATA;
+
+ return 0;
+}
+
+static int prepare_run_command(const char **argv)
+{
+ tpebs_cmd = zalloc(sizeof(struct child_process));
+ if (!tpebs_cmd)
+ return -ENOMEM;
+ tpebs_cmd->argv = argv;
+ tpebs_cmd->out = -1;
+ return 0;
+}
+
+static int start_perf_record(int control_fd[], int ack_fd[],
+ const char *cpumap_buf)
+{
+ const char **record_argv;
+ int ret;
+ char buf[32];
+
+ scnprintf(buf, sizeof(buf), "--control=fd:%d,%d", control_fd[0], ack_fd[1]);
+
+ record_argv = calloc(12 + 2 * tpebs_event_size, sizeof(char *));
+ if (!record_argv)
+ return -ENOMEM;
+
+ ret = get_perf_record_args(record_argv, buf, cpumap_buf);
+ if (ret)
+ goto out;
+
+ ret = prepare_run_command(record_argv);
+ if (ret)
+ goto out;
+ ret = start_command(tpebs_cmd);
+out:
+ free(record_argv);
+ return ret;
+}
+
+static int process_sample_event(const struct perf_tool *tool __maybe_unused,
+ union perf_event *event __maybe_unused,
+ struct perf_sample *sample,
+ struct evsel *evsel,
+ struct machine *machine __maybe_unused)
+{
+ int ret = 0;
+ const char *evname;
+ struct tpebs_retire_lat *t;
+
+ evname = evsel__name(evsel);
+
+ /*
+ * Need to handle per core results? We are assuming average retire
+ * latency value will be used. Save the number of samples and the sum of
+ * retire latency value for each event.
+ */
+ list_for_each_entry(t, &tpebs_results, nd) {
+ if (!strcmp(evname, t->name)) {
+ t->count += 1;
+ t->sum += sample->retire_lat;
+ t->val = (double) t->sum / t->count;
+ break;
+ }
+ }
+
+ return ret;
+}
+
+static int process_feature_event(struct perf_session *session,
+ union perf_event *event)
+{
+ if (event->feat.feat_id < HEADER_LAST_FEATURE)
+ return perf_event__process_feature(session, event);
+ return 0;
+}
+
+static void *__sample_reader(void *arg)
+{
+ struct child_process *child = arg;
+ struct perf_session *session;
+ struct perf_data data = {
+ .mode = PERF_DATA_MODE_READ,
+ .path = PERF_DATA,
+ .file.fd = child->out,
+ };
+ struct perf_tool tool;
+
+ perf_tool__init(&tool, /*ordered_events=*/false);
+ tool.sample = process_sample_event;
+ tool.feature = process_feature_event;
+ tool.attr = perf_event__process_attr;
+
+ session = perf_session__new(&data, &tool);
+ if (IS_ERR(session))
+ return NULL;
+ perf_session__process_events(session);
+ perf_session__delete(session);
+
+ return NULL;
+}
+
+/*
+ * tpebs_stop - stop the sample data read thread and the perf record process.
+ */
+static int tpebs_stop(void)
+{
+ int ret = 0;
+
+ /* Like tpebs_start, we should only run tpebs_end once. */
+ if (tpebs_pid != -1) {
+ kill(tpebs_cmd->pid, SIGTERM);
+ tpebs_pid = -1;
+ pthread_join(tpebs_reader_thread, NULL);
+ close(tpebs_cmd->out);
+ ret = finish_command(tpebs_cmd);
+ if (ret == -ERR_RUN_COMMAND_WAITPID_SIGNAL)
+ ret = 0;
+ }
+ return ret;
+}
+
+/*
+ * tpebs_start - start tpebs execution.
+ * @evsel_list: retire_latency evsels in this list will be selected and sampled
+ * to get the average retire_latency value.
+ *
+ * This function will be called from evlist level later when evlist__open() is
+ * called consistently.
+ */
+int tpebs_start(struct evlist *evsel_list)
+{
+ int ret = 0;
+ struct evsel *evsel;
+ char cpumap_buf[50];
+
+ /*
+ * We should only run tpebs_start when tpebs_recording is enabled.
+ * And we should only run it once with all the required events.
+ */
+ if (tpebs_pid != -1 || !tpebs_recording)
+ return 0;
+
+ cpu_map__snprint(evsel_list->core.user_requested_cpus, cpumap_buf, sizeof(cpumap_buf));
+ /*
+ * Prepare perf record for sampling event retire_latency before fork and
+ * prepare workload
+ */
+ evlist__for_each_entry(evsel_list, evsel) {
+ int i;
+ char *name;
+ struct tpebs_retire_lat *new;
+
+ if (!evsel->retire_lat)
+ continue;
+
+ pr_debug("tpebs: Retire_latency of event %s is required\n", evsel->name);
+ for (i = strlen(evsel->name) - 1; i > 0; i--) {
+ if (evsel->name[i] == 'R')
+ break;
+ }
+ if (i <= 0 || evsel->name[i] != 'R') {
+ ret = -1;
+ goto err;
+ }
+
+ name = strdup(evsel->name);
+ if (!name) {
+ ret = -ENOMEM;
+ goto err;
+ }
+ name[i] = 'p';
+
+ new = zalloc(sizeof(*new));
+ if (!new) {
+ ret = -1;
+ zfree(name);
+ goto err;
+ }
+ new->name = name;
+ new->tpebs_name = evsel->name;
+ list_add_tail(&new->nd, &tpebs_results);
+ tpebs_event_size += 1;
+ }
+
+ if (tpebs_event_size > 0) {
+ struct pollfd pollfd = { .events = POLLIN, };
+ int control_fd[2], ack_fd[2], len;
+ char ack_buf[8];
+
+ /*Create control and ack fd for --control*/
+ if (pipe(control_fd) < 0) {
+ pr_err("tpebs: Failed to create control fifo");
+ ret = -1;
+ goto out;
+ }
+ if (pipe(ack_fd) < 0) {
+ pr_err("tpebs: Failed to create control fifo");
+ ret = -1;
+ goto out;
+ }
+
+ ret = start_perf_record(control_fd, ack_fd, cpumap_buf);
+ if (ret)
+ goto out;
+ tpebs_pid = tpebs_cmd->pid;
+ if (pthread_create(&tpebs_reader_thread, NULL, __sample_reader, tpebs_cmd)) {
+ kill(tpebs_cmd->pid, SIGTERM);
+ close(tpebs_cmd->out);
+ pr_err("Could not create thread to process sample data.\n");
+ ret = -1;
+ goto out;
+ }
+ /* Wait for perf record initialization.*/
+ len = strlen(EVLIST_CTL_CMD_ENABLE_TAG);
+ ret = write(control_fd[1], EVLIST_CTL_CMD_ENABLE_TAG, len);
+ if (ret != len) {
+ pr_err("perf record control write control message failed\n");
+ goto out;
+ }
+
+ /* wait for an ack */
+ pollfd.fd = ack_fd[0];
+
+ /*
+ * We need this poll to ensure the ack_fd PIPE will not hang
+ * when perf record failed for any reason. The timeout value
+ * 3000ms is an empirical selection.
+ */
+ if (!poll(&pollfd, 1, 3000)) {
+ pr_err("tpebs failed: perf record ack timeout\n");
+ ret = -1;
+ goto out;
+ }
+
+ if (!(pollfd.revents & POLLIN)) {
+ pr_err("tpebs failed: did not received an ack\n");
+ ret = -1;
+ goto out;
+ }
+
+ ret = read(ack_fd[0], ack_buf, sizeof(ack_buf));
+ if (ret > 0)
+ ret = strcmp(ack_buf, EVLIST_CTL_CMD_ACK_TAG);
+ else {
+ pr_err("tpebs: perf record control ack failed\n");
+ goto out;
+ }
+out:
+ close(control_fd[0]);
+ close(control_fd[1]);
+ close(ack_fd[0]);
+ close(ack_fd[1]);
+ }
+err:
+ if (ret)
+ tpebs_delete();
+ return ret;
+}
+
+
+int tpebs_set_evsel(struct evsel *evsel, int cpu_map_idx, int thread)
+{
+ __u64 val;
+ bool found = false;
+ struct tpebs_retire_lat *t;
+ struct perf_counts_values *count;
+
+ /* Non reitre_latency evsel should never enter this function. */
+ if (!evsel__is_retire_lat(evsel))
+ return -1;
+
+ /*
+ * Need to stop the forked record to ensure get sampled data from the
+ * PIPE to process and get non-zero retire_lat value for hybrid.
+ */
+ tpebs_stop();
+ count = perf_counts(evsel->counts, cpu_map_idx, thread);
+
+ list_for_each_entry(t, &tpebs_results, nd) {
+ if (t->tpebs_name == evsel->name ||
+ (evsel->metric_id && !strcmp(t->tpebs_name, evsel->metric_id))) {
+ found = true;
+ break;
+ }
+ }
+
+ /* Set ena and run to non-zero */
+ count->ena = count->run = 1;
+ count->lost = 0;
+
+ if (!found) {
+ /*
+ * Set default value or 0 when retire_latency for this event is
+ * not found from sampling data (record_tpebs not set or 0
+ * sample recorded).
+ */
+ count->val = 0;
+ return 0;
+ }
+
+ /*
+ * Only set retire_latency value to the first CPU and thread.
+ */
+ if (cpu_map_idx == 0 && thread == 0)
+ val = rint(t->val);
+ else
+ val = 0;
+
+ count->val = val;
+ return 0;
+}
+
+static void tpebs_retire_lat__delete(struct tpebs_retire_lat *r)
+{
+ zfree(&r->name);
+ free(r);
+}
+
+
+/*
+ * tpebs_delete - delete tpebs related data and stop the created thread and
+ * process by calling tpebs_stop().
+ *
+ * This function is called from evlist_delete() and also from builtin-stat
+ * stat_handle_error(). If tpebs_start() is called from places other then perf
+ * stat, need to ensure tpebs_delete() is also called to safely free mem and
+ * close the data read thread and the forked perf record process.
+ *
+ * This function is also called in evsel__close() to be symmetric with
+ * tpebs_start() being called in evsel__open(). We will update this call site
+ * when move tpebs_start() to evlist level.
+ */
+void tpebs_delete(void)
+{
+ struct tpebs_retire_lat *r, *rtmp;
+
+ if (tpebs_pid == -1)
+ return;
+
+ tpebs_stop();
+
+ list_for_each_entry_safe(r, rtmp, &tpebs_results, nd) {
+ list_del_init(&r->nd);
+ tpebs_retire_lat__delete(r);
+ }
+
+ if (tpebs_cmd) {
+ free(tpebs_cmd);
+ tpebs_cmd = NULL;
+ }
+}
diff --git a/tools/perf/util/intel-tpebs.h b/tools/perf/util/intel-tpebs.h
new file mode 100644
index 000000000000..766b3fbd79f1
--- /dev/null
+++ b/tools/perf/util/intel-tpebs.h
@@ -0,0 +1,35 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * intel_tpebs.h: Intel TEPBS support
+ */
+#ifndef INCLUDE__PERF_INTEL_TPEBS_H__
+#define INCLUDE__PERF_INTEL_TPEBS_H__
+
+#include "stat.h"
+#include "evsel.h"
+
+#ifdef HAVE_ARCH_X86_64_SUPPORT
+
+extern bool tpebs_recording;
+int tpebs_start(struct evlist *evsel_list);
+void tpebs_delete(void);
+int tpebs_set_evsel(struct evsel *evsel, int cpu_map_idx, int thread);
+
+#else
+
+static inline int tpebs_start(struct evlist *evsel_list __maybe_unused)
+{
+ return 0;
+}
+
+static inline void tpebs_delete(void) {};
+
+static inline int tpebs_set_evsel(struct evsel *evsel __maybe_unused,
+ int cpu_map_idx __maybe_unused,
+ int thread __maybe_unused)
+{
+ return 0;
+}
+
+#endif
+#endif
diff --git a/tools/perf/util/jit.h b/tools/perf/util/jit.h
index fb810e1b2de7..f4037203e9ec 100644
--- a/tools/perf/util/jit.h
+++ b/tools/perf/util/jit.h
@@ -5,7 +5,8 @@
#include <data.h>
int jit_process(struct perf_session *session, struct perf_data *output,
- struct machine *machine, char *filename, pid_t pid, pid_t tid, u64 *nbytes);
+ struct machine *machine, const char *filename,
+ pid_t pid, pid_t tid, u64 *nbytes);
int jit_inject_record(const char *filename);
diff --git a/tools/perf/util/jitdump.c b/tools/perf/util/jitdump.c
index 1f657ef8975f..346513e5e9b7 100644
--- a/tools/perf/util/jitdump.c
+++ b/tools/perf/util/jitdump.c
@@ -424,7 +424,7 @@ static int jit_repipe_code_load(struct jit_buf_desc *jd, union jr_entry *jr)
{
struct perf_sample sample;
union perf_event *event;
- struct perf_tool *tool = jd->session->tool;
+ const struct perf_tool *tool = jd->session->tool;
uint64_t code, addr;
uintptr_t uaddr;
char *filename;
@@ -543,7 +543,7 @@ static int jit_repipe_code_move(struct jit_buf_desc *jd, union jr_entry *jr)
{
struct perf_sample sample;
union perf_event *event;
- struct perf_tool *tool = jd->session->tool;
+ const struct perf_tool *tool = jd->session->tool;
char *filename;
size_t size;
struct stat st;
@@ -710,7 +710,7 @@ jit_process_dump(struct jit_buf_desc *jd)
}
static int
-jit_inject(struct jit_buf_desc *jd, char *path)
+jit_inject(struct jit_buf_desc *jd, const char *path)
{
int ret;
@@ -737,7 +737,7 @@ jit_inject(struct jit_buf_desc *jd, char *path)
* as captured in the RECORD_MMAP record
*/
static int
-jit_detect(char *mmap_name, pid_t pid, struct nsinfo *nsi)
+jit_detect(const char *mmap_name, pid_t pid, struct nsinfo *nsi)
{
char *p;
char *end = NULL;
@@ -821,7 +821,7 @@ int
jit_process(struct perf_session *session,
struct perf_data *output,
struct machine *machine,
- char *filename,
+ const char *filename,
pid_t pid,
pid_t tid,
u64 *nbytes)
diff --git a/tools/perf/util/llvm-c-helpers.cpp b/tools/perf/util/llvm-c-helpers.cpp
new file mode 100644
index 000000000000..663bcaba2041
--- /dev/null
+++ b/tools/perf/util/llvm-c-helpers.cpp
@@ -0,0 +1,197 @@
+// SPDX-License-Identifier: GPL-2.0
+
+/*
+ * Must come before the linux/compiler.h include, which defines several
+ * macros (e.g. noinline) that conflict with compiler builtins used
+ * by LLVM.
+ */
+#pragma GCC diagnostic push
+#pragma GCC diagnostic ignored "-Wunused-parameter" /* Needed for LLVM <= 15 */
+#include <llvm/DebugInfo/Symbolize/Symbolize.h>
+#include <llvm/Support/TargetSelect.h>
+#pragma GCC diagnostic pop
+
+#include <inttypes.h>
+#include <stdio.h>
+#include <sys/types.h>
+#include <linux/compiler.h>
+extern "C" {
+#include <linux/zalloc.h>
+}
+#include "symbol_conf.h"
+#include "llvm-c-helpers.h"
+
+extern "C"
+char *dso__demangle_sym(struct dso *dso, int kmodule, const char *elf_name);
+
+using namespace llvm;
+using llvm::symbolize::LLVMSymbolizer;
+
+/*
+ * Allocate a static LLVMSymbolizer, which will live to the end of the program.
+ * Unlike the bfd paths, LLVMSymbolizer has its own cache, so we do not need
+ * to store anything in the dso struct.
+ */
+static LLVMSymbolizer *get_symbolizer()
+{
+ static LLVMSymbolizer *instance = nullptr;
+ if (instance == nullptr) {
+ LLVMSymbolizer::Options opts;
+ /*
+ * LLVM sometimes demangles slightly different from the rest
+ * of the code, and this mismatch can cause new_inline_sym()
+ * to get confused and mark non-inline symbol as inlined
+ * (since the name does not properly match up with base_sym).
+ * Thus, disable the demangling and let the rest of the code
+ * handle it.
+ */
+ opts.Demangle = false;
+ instance = new LLVMSymbolizer(opts);
+ }
+ return instance;
+}
+
+/* Returns 0 on error, 1 on success. */
+static int extract_file_and_line(const DILineInfo &line_info, char **file,
+ unsigned int *line)
+{
+ if (file) {
+ if (line_info.FileName == "<invalid>") {
+ /* Match the convention of libbfd. */
+ *file = nullptr;
+ } else {
+ /* The caller expects to get something it can free(). */
+ *file = strdup(line_info.FileName.c_str());
+ if (*file == nullptr)
+ return 0;
+ }
+ }
+ if (line)
+ *line = line_info.Line;
+ return 1;
+}
+
+extern "C"
+int llvm_addr2line(const char *dso_name, u64 addr,
+ char **file, unsigned int *line,
+ bool unwind_inlines,
+ llvm_a2l_frame **inline_frames)
+{
+ LLVMSymbolizer *symbolizer = get_symbolizer();
+ object::SectionedAddress sectioned_addr = {
+ addr,
+ object::SectionedAddress::UndefSection
+ };
+
+ if (unwind_inlines) {
+ Expected<DIInliningInfo> res_or_err =
+ symbolizer->symbolizeInlinedCode(dso_name,
+ sectioned_addr);
+ if (!res_or_err)
+ return 0;
+ unsigned num_frames = res_or_err->getNumberOfFrames();
+ if (num_frames == 0)
+ return 0;
+
+ if (extract_file_and_line(res_or_err->getFrame(0),
+ file, line) == 0)
+ return 0;
+
+ *inline_frames = (llvm_a2l_frame *)calloc(
+ num_frames, sizeof(**inline_frames));
+ if (*inline_frames == nullptr)
+ return 0;
+
+ for (unsigned i = 0; i < num_frames; ++i) {
+ const DILineInfo &src = res_or_err->getFrame(i);
+
+ llvm_a2l_frame &dst = (*inline_frames)[i];
+ if (src.FileName == "<invalid>")
+ /* Match the convention of libbfd. */
+ dst.filename = nullptr;
+ else
+ dst.filename = strdup(src.FileName.c_str());
+ dst.funcname = strdup(src.FunctionName.c_str());
+ dst.line = src.Line;
+
+ if (dst.filename == nullptr ||
+ dst.funcname == nullptr) {
+ for (unsigned j = 0; j <= i; ++j) {
+ zfree(&(*inline_frames)[j].filename);
+ zfree(&(*inline_frames)[j].funcname);
+ }
+ zfree(inline_frames);
+ return 0;
+ }
+ }
+
+ return num_frames;
+ } else {
+ if (inline_frames)
+ *inline_frames = nullptr;
+
+ Expected<DILineInfo> res_or_err =
+ symbolizer->symbolizeCode(dso_name, sectioned_addr);
+ if (!res_or_err)
+ return 0;
+ return extract_file_and_line(*res_or_err, file, line);
+ }
+}
+
+static char *
+make_symbol_relative_string(struct dso *dso, const char *sym_name,
+ u64 addr, u64 base_addr)
+{
+ if (!strcmp(sym_name, "<invalid>"))
+ return NULL;
+
+ char *demangled = dso__demangle_sym(dso, 0, sym_name);
+ if (base_addr && base_addr != addr) {
+ char buf[256];
+ snprintf(buf, sizeof(buf), "%s+0x%" PRIx64,
+ demangled ? demangled : sym_name, addr - base_addr);
+ free(demangled);
+ return strdup(buf);
+ } else {
+ if (demangled)
+ return demangled;
+ else
+ return strdup(sym_name);
+ }
+}
+
+extern "C"
+char *llvm_name_for_code(struct dso *dso, const char *dso_name, u64 addr)
+{
+ LLVMSymbolizer *symbolizer = get_symbolizer();
+ object::SectionedAddress sectioned_addr = {
+ addr,
+ object::SectionedAddress::UndefSection
+ };
+ Expected<DILineInfo> res_or_err =
+ symbolizer->symbolizeCode(dso_name, sectioned_addr);
+ if (!res_or_err) {
+ return NULL;
+ }
+ return make_symbol_relative_string(
+ dso, res_or_err->FunctionName.c_str(),
+ addr, res_or_err->StartAddress ? *res_or_err->StartAddress : 0);
+}
+
+extern "C"
+char *llvm_name_for_data(struct dso *dso, const char *dso_name, u64 addr)
+{
+ LLVMSymbolizer *symbolizer = get_symbolizer();
+ object::SectionedAddress sectioned_addr = {
+ addr,
+ object::SectionedAddress::UndefSection
+ };
+ Expected<DIGlobal> res_or_err =
+ symbolizer->symbolizeData(dso_name, sectioned_addr);
+ if (!res_or_err) {
+ return NULL;
+ }
+ return make_symbol_relative_string(
+ dso, res_or_err->Name.c_str(),
+ addr, res_or_err->Start);
+}
diff --git a/tools/perf/util/llvm-c-helpers.h b/tools/perf/util/llvm-c-helpers.h
new file mode 100644
index 000000000000..d2b99637a28a
--- /dev/null
+++ b/tools/perf/util/llvm-c-helpers.h
@@ -0,0 +1,60 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef __PERF_LLVM_C_HELPERS
+#define __PERF_LLVM_C_HELPERS 1
+
+/*
+ * Helpers to call into LLVM C++ code from C, for the parts that do not have
+ * C APIs.
+ */
+
+#include <linux/compiler.h>
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+struct dso;
+
+struct llvm_a2l_frame {
+ char* filename;
+ char* funcname;
+ unsigned int line;
+};
+
+/*
+ * Implement addr2line() using libLLVM. LLVM is a C++ API, and
+ * many of the linux/ headers cannot be included in a C++ compile unit,
+ * so we need to make a little bridge code here. llvm_addr2line() will
+ * convert the inline frame information from LLVM's internal structures
+ * and put them into a flat array given in inline_frames. The caller
+ * is then responsible for taking that array and convert it into perf's
+ * regular inline frame structures (which depend on e.g. struct list_head).
+ *
+ * If the address could not be resolved, or an error occurred (e.g. OOM),
+ * returns 0. Otherwise, returns the number of inline frames (which means 1
+ * if the address was not part of an inlined function). If unwind_inlines
+ * is set and the return code is nonzero, inline_frames will be set to
+ * a newly allocated array with that length. The caller is then responsible
+ * for freeing both the strings and the array itself.
+ */
+int llvm_addr2line(const char* dso_name,
+ u64 addr,
+ char** file,
+ unsigned int* line,
+ bool unwind_inlines,
+ struct llvm_a2l_frame** inline_frames);
+
+/*
+ * Simple symbolizers for addresses; will convert something like
+ * 0x12345 to "func+0x123". Will return NULL if no symbol was found.
+ *
+ * The returned value must be freed by the caller, with free().
+ */
+char *llvm_name_for_code(struct dso *dso, const char *dso_name, u64 addr);
+char *llvm_name_for_data(struct dso *dso, const char *dso_name, u64 addr);
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* __PERF_LLVM_C_HELPERS */
diff --git a/tools/perf/util/machine.c b/tools/perf/util/machine.c
index 8477edefc299..fad227b625d1 100644
--- a/tools/perf/util/machine.c
+++ b/tools/perf/util/machine.c
@@ -642,8 +642,9 @@ int machine__process_lost_event(struct machine *machine __maybe_unused,
int machine__process_lost_samples_event(struct machine *machine __maybe_unused,
union perf_event *event, struct perf_sample *sample)
{
- dump_printf(": id:%" PRIu64 ": lost samples :%" PRI_lu64 "\n",
- sample->id, event->lost_samples.lost);
+ dump_printf(": id:%" PRIu64 ": lost samples :%" PRI_lu64 "%s\n",
+ sample->id, event->lost_samples.lost,
+ event->header.misc & PERF_RECORD_MISC_LOST_SAMPLES_BPF ? " (BPF)" : "");
return 0;
}
@@ -2059,7 +2060,8 @@ static int add_callchain_ip(struct thread *thread,
bool branch,
struct branch_flags *flags,
struct iterations *iter,
- u64 branch_from)
+ u64 branch_from,
+ bool symbols)
{
struct map_symbol ms = {};
struct addr_location al;
@@ -2098,7 +2100,8 @@ static int add_callchain_ip(struct thread *thread,
}
goto out;
}
- thread__find_symbol(thread, *cpumode, ip, &al);
+ if (symbols)
+ thread__find_symbol(thread, *cpumode, ip, &al);
}
if (al.sym != NULL) {
@@ -2141,6 +2144,7 @@ struct branch_info *sample__resolve_bstack(struct perf_sample *sample,
unsigned int i;
const struct branch_stack *bs = sample->branch_stack;
struct branch_entry *entries = perf_sample__branch_entries(sample);
+ u64 *branch_stack_cntr = sample->branch_stack_cntr;
struct branch_info *bi = calloc(bs->nr, sizeof(struct branch_info));
if (!bi)
@@ -2150,6 +2154,8 @@ struct branch_info *sample__resolve_bstack(struct perf_sample *sample,
ip__resolve_ams(al->thread, &bi[i].to, entries[i].to);
ip__resolve_ams(al->thread, &bi[i].from, entries[i].from);
bi[i].flags = entries[i].flags;
+ if (branch_stack_cntr)
+ bi[i].branch_stack_cntr = branch_stack_cntr[i];
}
return bi;
}
@@ -2224,7 +2230,8 @@ static int lbr_callchain_add_kernel_ip(struct thread *thread,
struct symbol **parent,
struct addr_location *root_al,
u64 branch_from,
- bool callee, int end)
+ bool callee, int end,
+ bool symbols)
{
struct ip_callchain *chain = sample->callchain;
u8 cpumode = PERF_RECORD_MISC_USER;
@@ -2234,7 +2241,8 @@ static int lbr_callchain_add_kernel_ip(struct thread *thread,
for (i = 0; i < end + 1; i++) {
err = add_callchain_ip(thread, cursor, parent,
root_al, &cpumode, chain->ips[i],
- false, NULL, NULL, branch_from);
+ false, NULL, NULL, branch_from,
+ symbols);
if (err)
return err;
}
@@ -2244,7 +2252,8 @@ static int lbr_callchain_add_kernel_ip(struct thread *thread,
for (i = end; i >= 0; i--) {
err = add_callchain_ip(thread, cursor, parent,
root_al, &cpumode, chain->ips[i],
- false, NULL, NULL, branch_from);
+ false, NULL, NULL, branch_from,
+ symbols);
if (err)
return err;
}
@@ -2270,8 +2279,12 @@ static void save_lbr_cursor_node(struct thread *thread,
cursor->curr = cursor->first;
else
cursor->curr = cursor->curr->next;
+
+ map_symbol__exit(&lbr_stitch->prev_lbr_cursor[idx].ms);
memcpy(&lbr_stitch->prev_lbr_cursor[idx], cursor->curr,
sizeof(struct callchain_cursor_node));
+ lbr_stitch->prev_lbr_cursor[idx].ms.maps = maps__get(cursor->curr->ms.maps);
+ lbr_stitch->prev_lbr_cursor[idx].ms.map = map__get(cursor->curr->ms.map);
lbr_stitch->prev_lbr_cursor[idx].valid = true;
cursor->pos++;
@@ -2283,7 +2296,8 @@ static int lbr_callchain_add_lbr_ip(struct thread *thread,
struct symbol **parent,
struct addr_location *root_al,
u64 *branch_from,
- bool callee)
+ bool callee,
+ bool symbols)
{
struct branch_stack *lbr_stack = sample->branch_stack;
struct branch_entry *entries = perf_sample__branch_entries(sample);
@@ -2316,7 +2330,7 @@ static int lbr_callchain_add_lbr_ip(struct thread *thread,
err = add_callchain_ip(thread, cursor, parent,
root_al, &cpumode, ip,
true, flags, NULL,
- *branch_from);
+ *branch_from, symbols);
if (err)
return err;
@@ -2341,7 +2355,7 @@ static int lbr_callchain_add_lbr_ip(struct thread *thread,
err = add_callchain_ip(thread, cursor, parent,
root_al, &cpumode, ip,
true, flags, NULL,
- *branch_from);
+ *branch_from, symbols);
if (err)
return err;
save_lbr_cursor_node(thread, cursor, i);
@@ -2356,7 +2370,7 @@ static int lbr_callchain_add_lbr_ip(struct thread *thread,
err = add_callchain_ip(thread, cursor, parent,
root_al, &cpumode, ip,
true, flags, NULL,
- *branch_from);
+ *branch_from, symbols);
if (err)
return err;
save_lbr_cursor_node(thread, cursor, i);
@@ -2370,7 +2384,7 @@ static int lbr_callchain_add_lbr_ip(struct thread *thread,
err = add_callchain_ip(thread, cursor, parent,
root_al, &cpumode, ip,
true, flags, NULL,
- *branch_from);
+ *branch_from, symbols);
if (err)
return err;
}
@@ -2482,6 +2496,9 @@ static bool has_stitched_lbr(struct thread *thread,
memcpy(&stitch_node->cursor, &lbr_stitch->prev_lbr_cursor[i],
sizeof(struct callchain_cursor_node));
+ stitch_node->cursor.ms.maps = maps__get(lbr_stitch->prev_lbr_cursor[i].ms.maps);
+ stitch_node->cursor.ms.map = map__get(lbr_stitch->prev_lbr_cursor[i].ms.map);
+
if (callee)
list_add(&stitch_node->node, &lbr_stitch->lists);
else
@@ -2505,6 +2522,8 @@ static bool alloc_lbr_stitch(struct thread *thread, unsigned int max_lbr)
if (!thread__lbr_stitch(thread)->prev_lbr_cursor)
goto free_lbr_stitch;
+ thread__lbr_stitch(thread)->prev_lbr_cursor_size = max_lbr + 1;
+
INIT_LIST_HEAD(&thread__lbr_stitch(thread)->lists);
INIT_LIST_HEAD(&thread__lbr_stitch(thread)->free_lists);
@@ -2532,7 +2551,8 @@ static int resolve_lbr_callchain_sample(struct thread *thread,
struct symbol **parent,
struct addr_location *root_al,
int max_stack,
- unsigned int max_lbr)
+ unsigned int max_lbr,
+ bool symbols)
{
bool callee = (callchain_param.order == ORDER_CALLEE);
struct ip_callchain *chain = sample->callchain;
@@ -2560,8 +2580,12 @@ static int resolve_lbr_callchain_sample(struct thread *thread,
max_lbr, callee);
if (!stitched_lbr && !list_empty(&lbr_stitch->lists)) {
- list_replace_init(&lbr_stitch->lists,
- &lbr_stitch->free_lists);
+ struct stitch_list *stitch_node;
+
+ list_for_each_entry(stitch_node, &lbr_stitch->lists, node)
+ map_symbol__exit(&stitch_node->cursor.ms);
+
+ list_splice_init(&lbr_stitch->lists, &lbr_stitch->free_lists);
}
memcpy(&lbr_stitch->prev_sample, sample, sizeof(*sample));
}
@@ -2570,12 +2594,12 @@ static int resolve_lbr_callchain_sample(struct thread *thread,
/* Add kernel ip */
err = lbr_callchain_add_kernel_ip(thread, cursor, sample,
parent, root_al, branch_from,
- true, i);
+ true, i, symbols);
if (err)
goto error;
err = lbr_callchain_add_lbr_ip(thread, cursor, sample, parent,
- root_al, &branch_from, true);
+ root_al, &branch_from, true, symbols);
if (err)
goto error;
@@ -2592,14 +2616,14 @@ static int resolve_lbr_callchain_sample(struct thread *thread,
goto error;
}
err = lbr_callchain_add_lbr_ip(thread, cursor, sample, parent,
- root_al, &branch_from, false);
+ root_al, &branch_from, false, symbols);
if (err)
goto error;
/* Add kernel ip */
err = lbr_callchain_add_kernel_ip(thread, cursor, sample,
parent, root_al, branch_from,
- false, i);
+ false, i, symbols);
if (err)
goto error;
}
@@ -2613,7 +2637,7 @@ static int find_prev_cpumode(struct ip_callchain *chain, struct thread *thread,
struct callchain_cursor *cursor,
struct symbol **parent,
struct addr_location *root_al,
- u8 *cpumode, int ent)
+ u8 *cpumode, int ent, bool symbols)
{
int err = 0;
@@ -2623,7 +2647,7 @@ static int find_prev_cpumode(struct ip_callchain *chain, struct thread *thread,
if (ip >= PERF_CONTEXT_MAX) {
err = add_callchain_ip(thread, cursor, parent,
root_al, cpumode, ip,
- false, NULL, NULL, 0);
+ false, NULL, NULL, 0, symbols);
break;
}
}
@@ -2645,7 +2669,8 @@ static int thread__resolve_callchain_sample(struct thread *thread,
struct perf_sample *sample,
struct symbol **parent,
struct addr_location *root_al,
- int max_stack)
+ int max_stack,
+ bool symbols)
{
struct branch_stack *branch = sample->branch_stack;
struct branch_entry *entries = perf_sample__branch_entries(sample);
@@ -2665,7 +2690,8 @@ static int thread__resolve_callchain_sample(struct thread *thread,
err = resolve_lbr_callchain_sample(thread, cursor, sample, parent,
root_al, max_stack,
- !env ? 0 : env->max_branches);
+ !env ? 0 : env->max_branches,
+ symbols);
if (err)
return (err < 0) ? err : 0;
}
@@ -2730,13 +2756,14 @@ static int thread__resolve_callchain_sample(struct thread *thread,
root_al,
NULL, be[i].to,
true, &be[i].flags,
- NULL, be[i].from);
+ NULL, be[i].from, symbols);
- if (!err)
+ if (!err) {
err = add_callchain_ip(thread, cursor, parent, root_al,
NULL, be[i].from,
true, &be[i].flags,
- &iter[i], 0);
+ &iter[i], 0, symbols);
+ }
if (err == -EINVAL)
break;
if (err)
@@ -2752,7 +2779,7 @@ static int thread__resolve_callchain_sample(struct thread *thread,
check_calls:
if (chain && callchain_param.order != ORDER_CALLEE) {
err = find_prev_cpumode(chain, thread, cursor, parent, root_al,
- &cpumode, chain->nr - first_call);
+ &cpumode, chain->nr - first_call, symbols);
if (err)
return (err < 0) ? err : 0;
}
@@ -2774,7 +2801,7 @@ check_calls:
++nr_entries;
else if (callchain_param.order != ORDER_CALLEE) {
err = find_prev_cpumode(chain, thread, cursor, parent,
- root_al, &cpumode, j);
+ root_al, &cpumode, j, symbols);
if (err)
return (err < 0) ? err : 0;
continue;
@@ -2801,8 +2828,8 @@ check_calls:
if (leaf_frame_caller && leaf_frame_caller != ip) {
err = add_callchain_ip(thread, cursor, parent,
- root_al, &cpumode, leaf_frame_caller,
- false, NULL, NULL, 0);
+ root_al, &cpumode, leaf_frame_caller,
+ false, NULL, NULL, 0, symbols);
if (err)
return (err < 0) ? err : 0;
}
@@ -2810,7 +2837,7 @@ check_calls:
err = add_callchain_ip(thread, cursor, parent,
root_al, &cpumode, ip,
- false, NULL, NULL, 0);
+ false, NULL, NULL, 0, symbols);
if (err)
return (err < 0) ? err : 0;
@@ -2890,7 +2917,7 @@ static int thread__resolve_callchain_unwind(struct thread *thread,
struct callchain_cursor *cursor,
struct evsel *evsel,
struct perf_sample *sample,
- int max_stack)
+ int max_stack, bool symbols)
{
/* Can we do dwarf post unwind? */
if (!((evsel->core.attr.sample_type & PERF_SAMPLE_REGS_USER) &&
@@ -2902,17 +2929,21 @@ static int thread__resolve_callchain_unwind(struct thread *thread,
(!sample->user_stack.size))
return 0;
+ if (!symbols)
+ pr_debug("Not resolving symbols with an unwinder isn't currently supported\n");
+
return unwind__get_entries(unwind_entry, cursor,
thread, sample, max_stack, false);
}
-int thread__resolve_callchain(struct thread *thread,
- struct callchain_cursor *cursor,
- struct evsel *evsel,
- struct perf_sample *sample,
- struct symbol **parent,
- struct addr_location *root_al,
- int max_stack)
+int __thread__resolve_callchain(struct thread *thread,
+ struct callchain_cursor *cursor,
+ struct evsel *evsel,
+ struct perf_sample *sample,
+ struct symbol **parent,
+ struct addr_location *root_al,
+ int max_stack,
+ bool symbols)
{
int ret = 0;
@@ -2925,22 +2956,22 @@ int thread__resolve_callchain(struct thread *thread,
ret = thread__resolve_callchain_sample(thread, cursor,
evsel, sample,
parent, root_al,
- max_stack);
+ max_stack, symbols);
if (ret)
return ret;
ret = thread__resolve_callchain_unwind(thread, cursor,
evsel, sample,
- max_stack);
+ max_stack, symbols);
} else {
ret = thread__resolve_callchain_unwind(thread, cursor,
evsel, sample,
- max_stack);
+ max_stack, symbols);
if (ret)
return ret;
ret = thread__resolve_callchain_sample(thread, cursor,
evsel, sample,
parent, root_al,
- max_stack);
+ max_stack, symbols);
}
return ret;
@@ -3112,7 +3143,8 @@ out:
return addr_cpumode;
}
-struct dso *machine__findnew_dso_id(struct machine *machine, const char *filename, struct dso_id *id)
+struct dso *machine__findnew_dso_id(struct machine *machine, const char *filename,
+ const struct dso_id *id)
{
return dsos__findnew_id(&machine->dsos, filename, id);
}
diff --git a/tools/perf/util/machine.h b/tools/perf/util/machine.h
index 82a47bac8023..2e5a4cb342d8 100644
--- a/tools/perf/util/machine.h
+++ b/tools/perf/util/machine.h
@@ -178,13 +178,32 @@ struct mem_info *sample__resolve_mem(struct perf_sample *sample,
struct callchain_cursor;
-int thread__resolve_callchain(struct thread *thread,
- struct callchain_cursor *cursor,
- struct evsel *evsel,
- struct perf_sample *sample,
- struct symbol **parent,
- struct addr_location *root_al,
- int max_stack);
+int __thread__resolve_callchain(struct thread *thread,
+ struct callchain_cursor *cursor,
+ struct evsel *evsel,
+ struct perf_sample *sample,
+ struct symbol **parent,
+ struct addr_location *root_al,
+ int max_stack,
+ bool symbols);
+
+static inline int thread__resolve_callchain(struct thread *thread,
+ struct callchain_cursor *cursor,
+ struct evsel *evsel,
+ struct perf_sample *sample,
+ struct symbol **parent,
+ struct addr_location *root_al,
+ int max_stack)
+{
+ return __thread__resolve_callchain(thread,
+ cursor,
+ evsel,
+ sample,
+ parent,
+ root_al,
+ max_stack,
+ /*symbols=*/true);
+}
/*
* Default guest kernel is defined by parameter --guestkallsyms
@@ -207,7 +226,8 @@ int machine__nr_cpus_avail(struct machine *machine);
struct thread *machine__findnew_thread(struct machine *machine, pid_t pid, pid_t tid);
-struct dso *machine__findnew_dso_id(struct machine *machine, const char *filename, struct dso_id *id);
+struct dso *machine__findnew_dso_id(struct machine *machine, const char *filename,
+ const struct dso_id *id);
struct dso *machine__findnew_dso(struct machine *machine, const char *filename);
size_t machine__fprintf(struct machine *machine, FILE *fp);
diff --git a/tools/perf/util/map.c b/tools/perf/util/map.c
index e1d14936a60d..d729438b7d65 100644
--- a/tools/perf/util/map.c
+++ b/tools/perf/util/map.c
@@ -102,16 +102,21 @@ static inline bool replace_android_lib(const char *filename, char *newfilename)
return false;
}
-void map__init(struct map *map, u64 start, u64 end, u64 pgoff, struct dso *dso)
+static void map__init(struct map *map, u64 start, u64 end, u64 pgoff,
+ struct dso *dso, u32 prot, u32 flags)
{
map__set_start(map, start);
map__set_end(map, end);
map__set_pgoff(map, pgoff);
- map__set_reloc(map, 0);
+ assert(map__reloc(map) == 0);
map__set_dso(map, dso__get(dso));
- map__set_mapping_type(map, MAPPING_TYPE__DSO);
- map__set_erange_warned(map, false);
refcount_set(map__refcnt(map), 1);
+ RC_CHK_ACCESS(map)->prot = prot;
+ RC_CHK_ACCESS(map)->flags = flags;
+ map__set_mapping_type(map, MAPPING_TYPE__DSO);
+ assert(map__erange_warned(map) == false);
+ assert(map__priv(map) == false);
+ assert(map__hit(map) == false);
}
struct map *map__new(struct machine *machine, u64 start, u64 len,
@@ -124,7 +129,7 @@ struct map *map__new(struct machine *machine, u64 start, u64 len,
struct nsinfo *nsi = NULL;
struct nsinfo *nnsi;
- map = malloc(sizeof(*map));
+ map = zalloc(sizeof(*map));
if (ADD_RC_CHK(result, map)) {
char newfilename[PATH_MAX];
struct dso *dso, *header_bid_dso;
@@ -134,8 +139,6 @@ struct map *map__new(struct machine *machine, u64 start, u64 len,
anon = is_anon_memory(filename) || flags & MAP_HUGETLB;
vdso = is_vdso_map(filename);
no_dso = is_no_dso_memory(filename);
- map->prot = prot;
- map->flags = flags;
nsi = nsinfo__get(thread__nsinfo(thread));
if ((anon || no_dso) && nsi && (prot & PROT_EXEC)) {
@@ -169,7 +172,7 @@ struct map *map__new(struct machine *machine, u64 start, u64 len,
goto out_delete;
assert(!dso__kernel(dso));
- map__init(result, start, start + len, pgoff, dso);
+ map__init(result, start, start + len, pgoff, dso, prot, flags);
if (anon || no_dso) {
map->mapping_type = MAPPING_TYPE__IDENTITY;
@@ -223,10 +226,8 @@ struct map *map__new2(u64 start, struct dso *dso)
map = calloc(1, sizeof(*map) + (dso__kernel(dso) ? sizeof(struct kmap) : 0));
if (ADD_RC_CHK(result, map)) {
- /*
- * ->end will be filled after we load all the symbols
- */
- map__init(result, start, 0, 0, dso);
+ /* ->end will be filled after we load all the symbols. */
+ map__init(result, start, /*end=*/0, /*pgoff=*/0, dso, /*prot=*/0, /*flags=*/0);
}
return result;
diff --git a/tools/perf/util/map.h b/tools/perf/util/map.h
index 65e2609fa1b1..4262f5a143be 100644
--- a/tools/perf/util/map.h
+++ b/tools/perf/util/map.h
@@ -35,6 +35,7 @@ DECLARE_RC_STRUCT(map) {
enum mapping_type mapping_type:8;
bool erange_warned;
bool priv;
+ bool hit;
};
struct kmap;
@@ -83,6 +84,11 @@ static inline bool map__priv(const struct map *map)
return RC_CHK_ACCESS(map)->priv;
}
+static inline bool map__hit(const struct map *map)
+{
+ return RC_CHK_ACCESS(map)->hit;
+}
+
static inline refcount_t *map__refcnt(struct map *map)
{
return &RC_CHK_ACCESS(map)->refcnt;
@@ -166,9 +172,6 @@ struct thread;
#define map__for_each_symbol_by_name(map, sym_name, pos, idx) \
__map__for_each_symbol_by_name(map, sym_name, (pos), idx)
-void map__init(struct map *map,
- u64 start, u64 end, u64 pgoff, struct dso *dso);
-
struct dso_id;
struct build_id;
@@ -285,14 +288,19 @@ static inline void map__set_reloc(struct map *map, u64 reloc)
RC_CHK_ACCESS(map)->reloc = reloc;
}
-static inline void map__set_priv(struct map *map, int priv)
+static inline void map__set_priv(struct map *map)
+{
+ RC_CHK_ACCESS(map)->priv = true;
+}
+
+static inline void map__set_hit(struct map *map)
{
- RC_CHK_ACCESS(map)->priv = priv;
+ RC_CHK_ACCESS(map)->hit = true;
}
-static inline void map__set_erange_warned(struct map *map, bool erange_warned)
+static inline void map__set_erange_warned(struct map *map)
{
- RC_CHK_ACCESS(map)->erange_warned = erange_warned;
+ RC_CHK_ACCESS(map)->erange_warned = true;
}
static inline void map__set_dso(struct map *map, struct dso *dso)
diff --git a/tools/perf/util/map_symbol.c b/tools/perf/util/map_symbol.c
index bef5079f2403..6ad2960bc289 100644
--- a/tools/perf/util/map_symbol.c
+++ b/tools/perf/util/map_symbol.c
@@ -13,3 +13,21 @@ void addr_map_symbol__exit(struct addr_map_symbol *ams)
{
map_symbol__exit(&ams->ms);
}
+
+void map_symbol__copy(struct map_symbol *dst, struct map_symbol *src)
+{
+ dst->maps = maps__get(src->maps);
+ dst->map = map__get(src->map);
+ dst->sym = src->sym;
+}
+
+void addr_map_symbol__copy(struct addr_map_symbol *dst, struct addr_map_symbol *src)
+{
+ map_symbol__copy(&dst->ms, &src->ms);
+
+ dst->addr = src->addr;
+ dst->al_addr = src->al_addr;
+ dst->al_level = src->al_level;
+ dst->phys_addr = src->phys_addr;
+ dst->data_page_size = src->data_page_size;
+}
diff --git a/tools/perf/util/map_symbol.h b/tools/perf/util/map_symbol.h
index 72d5ed938ed6..e370bb32ed47 100644
--- a/tools/perf/util/map_symbol.h
+++ b/tools/perf/util/map_symbol.h
@@ -26,4 +26,7 @@ struct addr_map_symbol {
void map_symbol__exit(struct map_symbol *ms);
void addr_map_symbol__exit(struct addr_map_symbol *ams);
+void map_symbol__copy(struct map_symbol *dst, struct map_symbol *src);
+void addr_map_symbol__copy(struct addr_map_symbol *dst, struct addr_map_symbol *src);
+
#endif // __PERF_MAP_SYMBOL
diff --git a/tools/perf/util/mem-events.c b/tools/perf/util/mem-events.c
index be048bd02f36..051feb93ed8d 100644
--- a/tools/perf/util/mem-events.c
+++ b/tools/perf/util/mem-events.c
@@ -29,6 +29,8 @@ struct perf_mem_event perf_mem_events[PERF_MEM_EVENTS__MAX] = {
};
#undef E
+bool perf_mem_record[PERF_MEM_EVENTS__MAX] = { 0 };
+
static char mem_loads_name[100];
static char mem_stores_name[100];
@@ -163,7 +165,7 @@ int perf_pmu__mem_events_parse(struct perf_pmu *pmu, const char *str)
continue;
if (strstr(e->tag, tok))
- e->record = found = true;
+ perf_mem_record[j] = found = true;
}
tok = strtok_r(NULL, ",", &saveptr);
@@ -192,7 +194,7 @@ static bool perf_pmu__mem_events_supported(const char *mnt, struct perf_pmu *pmu
return !stat(path, &st);
}
-int perf_pmu__mem_events_init(struct perf_pmu *pmu)
+static int __perf_pmu__mem_events_init(struct perf_pmu *pmu)
{
const char *mnt = sysfs__mount();
bool found = false;
@@ -219,6 +221,18 @@ int perf_pmu__mem_events_init(struct perf_pmu *pmu)
return found ? 0 : -ENOENT;
}
+int perf_pmu__mem_events_init(void)
+{
+ struct perf_pmu *pmu = NULL;
+
+ while ((pmu = perf_pmus__scan_mem(pmu)) != NULL) {
+ if (__perf_pmu__mem_events_init(pmu))
+ return -ENOENT;
+ }
+
+ return 0;
+}
+
void perf_pmu__mem_events_list(struct perf_pmu *pmu)
{
int j;
@@ -249,7 +263,7 @@ int perf_mem_events__record_args(const char **rec_argv, int *argv_nr)
for (int j = 0; j < PERF_MEM_EVENTS__MAX; j++) {
e = perf_pmu__mem_events_ptr(pmu, j);
- if (!e->record)
+ if (!perf_mem_record[j])
continue;
if (!e->supported) {
diff --git a/tools/perf/util/mem-events.h b/tools/perf/util/mem-events.h
index ca31014d7934..8dc27db9fd52 100644
--- a/tools/perf/util/mem-events.h
+++ b/tools/perf/util/mem-events.h
@@ -6,7 +6,6 @@
#include <linux/types.h>
struct perf_mem_event {
- bool record;
bool supported;
bool ldlat;
u32 aux_event;
@@ -28,9 +27,10 @@ struct perf_pmu;
extern unsigned int perf_mem_events__loads_ldlat;
extern struct perf_mem_event perf_mem_events[PERF_MEM_EVENTS__MAX];
+extern bool perf_mem_record[PERF_MEM_EVENTS__MAX];
int perf_pmu__mem_events_parse(struct perf_pmu *pmu, const char *str);
-int perf_pmu__mem_events_init(struct perf_pmu *pmu);
+int perf_pmu__mem_events_init(void);
struct perf_mem_event *perf_pmu__mem_events_ptr(struct perf_pmu *pmu, int i);
struct perf_pmu *perf_mem_events_find_pmu(void);
diff --git a/tools/perf/util/mem-info.c b/tools/perf/util/mem-info.c
index 27d67721a695..d3efa9c139f2 100644
--- a/tools/perf/util/mem-info.c
+++ b/tools/perf/util/mem-info.c
@@ -33,3 +33,16 @@ struct mem_info *mem_info__new(void)
return result;
}
+
+struct mem_info *mem_info__clone(struct mem_info *mi)
+{
+ struct mem_info *result = mem_info__new();
+
+ if (result) {
+ addr_map_symbol__copy(mem_info__iaddr(result), mem_info__iaddr(mi));
+ addr_map_symbol__copy(mem_info__daddr(result), mem_info__daddr(mi));
+ mem_info__data_src(result)->val = mem_info__data_src(mi)->val;
+ }
+
+ return result;
+}
diff --git a/tools/perf/util/mem-info.h b/tools/perf/util/mem-info.h
index 0f68e29f311b..df75e94ed3d0 100644
--- a/tools/perf/util/mem-info.h
+++ b/tools/perf/util/mem-info.h
@@ -15,6 +15,7 @@ DECLARE_RC_STRUCT(mem_info) {
};
struct mem_info *mem_info__new(void);
+struct mem_info *mem_info__clone(struct mem_info *mi);
struct mem_info *mem_info__get(struct mem_info *mi);
void mem_info__put(struct mem_info *mi);
diff --git a/tools/perf/util/metricgroup.c b/tools/perf/util/metricgroup.c
index 69f6a46402c3..4dff3e925a47 100644
--- a/tools/perf/util/metricgroup.c
+++ b/tools/perf/util/metricgroup.c
@@ -1436,7 +1436,7 @@ err_out:
* parse_ids - Build the event string for the ids and parse them creating an
* evlist. The encoded metric_ids are decoded.
* @metric_no_merge: is metric sharing explicitly disabled.
- * @fake_pmu: used when testing metrics not supported by the current CPU.
+ * @fake_pmu: use a fake PMU when testing metrics not supported by the current CPU.
* @ids: the event identifiers parsed from a metric.
* @modifier: any modifiers added to the events.
* @group_events: should events be placed in a weak group.
@@ -1444,7 +1444,7 @@ err_out:
* the overall list of metrics.
* @out_evlist: the created list of events.
*/
-static int parse_ids(bool metric_no_merge, struct perf_pmu *fake_pmu,
+static int parse_ids(bool metric_no_merge, bool fake_pmu,
struct expr_parse_ctx *ids, const char *modifier,
bool group_events, const bool tool_events[PERF_TOOL_MAX],
struct evlist **out_evlist)
@@ -1528,7 +1528,7 @@ static int parse_groups(struct evlist *perf_evlist,
bool metric_no_threshold,
const char *user_requested_cpu_list,
bool system_wide,
- struct perf_pmu *fake_pmu,
+ bool fake_pmu,
struct rblist *metric_events_list,
const struct pmu_metrics_table *table)
{
@@ -1703,7 +1703,7 @@ int metricgroup__parse_groups(struct evlist *perf_evlist,
return parse_groups(perf_evlist, pmu, str, metric_no_group, metric_no_merge,
metric_no_threshold, user_requested_cpu_list, system_wide,
- /*fake_pmu=*/NULL, metric_events, table);
+ /*fake_pmu=*/false, metric_events, table);
}
int metricgroup__parse_groups_test(struct evlist *evlist,
@@ -1717,7 +1717,7 @@ int metricgroup__parse_groups_test(struct evlist *evlist,
/*metric_no_threshold=*/false,
/*user_requested_cpu_list=*/NULL,
/*system_wide=*/false,
- &perf_pmu__fake, metric_events, table);
+ /*fake_pmu=*/true, metric_events, table);
}
struct metricgroup__has_metric_data {
diff --git a/tools/perf/util/mmap.c b/tools/perf/util/mmap.c
index 122ee198a86e..43b02293f1d2 100644
--- a/tools/perf/util/mmap.c
+++ b/tools/perf/util/mmap.c
@@ -230,9 +230,7 @@ void mmap__munmap(struct mmap *map)
{
bitmap_free(map->affinity_mask.bits);
-#ifndef PYTHON_PERF
zstd_fini(&map->zstd_data);
-#endif
perf_mmap__aio_munmap(map);
if (map->data != NULL) {
@@ -295,12 +293,10 @@ int mmap__mmap(struct mmap *map, struct mmap_params *mp, int fd, struct perf_cpu
map->core.flush = mp->flush;
-#ifndef PYTHON_PERF
if (zstd_init(&map->zstd_data, mp->comp_level)) {
pr_debug2("failed to init mmap compressor, error %d\n", errno);
return -1;
}
-#endif
if (mp->comp_level && !perf_mmap__aio_enabled(map)) {
map->data = mmap(NULL, mmap__mmap_len(map), PROT_READ|PROT_WRITE,
diff --git a/tools/perf/util/parse-events.c b/tools/perf/util/parse-events.c
index 321586fb5556..9a8be1e46d67 100644
--- a/tools/perf/util/parse-events.c
+++ b/tools/perf/util/parse-events.c
@@ -8,6 +8,7 @@
#include <sys/ioctl.h>
#include <sys/param.h>
#include "term.h"
+#include "env.h"
#include "evlist.h"
#include "evsel.h"
#include <subcmd/parse-options.h>
@@ -227,12 +228,12 @@ __add_event(struct list_head *list, int *idx,
bool init_attr,
const char *name, const char *metric_id, struct perf_pmu *pmu,
struct list_head *config_terms, bool auto_merge_stats,
- const char *cpu_list)
+ struct perf_cpu_map *cpu_list)
{
struct evsel *evsel;
- struct perf_cpu_map *cpus = pmu ? perf_cpu_map__get(pmu->cpus) :
- cpu_list ? perf_cpu_map__new(cpu_list) : NULL;
+ struct perf_cpu_map *cpus = perf_cpu_map__is_empty(cpu_list) && pmu ? pmu->cpus : cpu_list;
+ cpus = perf_cpu_map__get(cpus);
if (pmu)
perf_pmu__warn_invalid_formats(pmu);
@@ -305,16 +306,17 @@ static int add_event_tool(struct list_head *list, int *idx,
.type = PERF_TYPE_SOFTWARE,
.config = PERF_COUNT_SW_DUMMY,
};
- const char *cpu_list = NULL;
+ struct perf_cpu_map *cpu_list = NULL;
if (tool_event == PERF_TOOL_DURATION_TIME) {
/* Duration time is gathered globally, pretend it is only on CPU0. */
- cpu_list = "0";
+ cpu_list = perf_cpu_map__new("0");
}
evsel = __add_event(list, idx, &attr, /*init_attr=*/true, /*name=*/NULL,
/*metric_id=*/NULL, /*pmu=*/NULL,
/*config_terms=*/NULL, /*auto_merge_stats=*/false,
cpu_list);
+ perf_cpu_map__put(cpu_list);
if (!evsel)
return -ENOMEM;
evsel->tool_event = tool_event;
@@ -670,6 +672,26 @@ static int add_tracepoint_multi_sys(struct parse_events_state *parse_state,
}
#endif /* HAVE_LIBTRACEEVENT */
+size_t default_breakpoint_len(void)
+{
+#if defined(__i386__)
+ static int len;
+
+ if (len == 0) {
+ struct perf_env env = {};
+
+ perf_env__init(&env);
+ len = perf_env__kernel_is_64_bit(&env) ? sizeof(u64) : sizeof(long);
+ perf_env__exit(&env);
+ }
+ return len;
+#elif defined(__aarch64__)
+ return 4;
+#else
+ return sizeof(long);
+#endif
+}
+
static int
parse_breakpoint_type(const char *type, struct perf_event_attr *attr)
{
@@ -728,7 +750,7 @@ int parse_events_add_breakpoint(struct parse_events_state *parse_state,
/* Provide some defaults if len is not specified */
if (!len) {
if (attr.bp_type == HW_BREAKPOINT_X)
- len = sizeof(long);
+ len = default_breakpoint_len();
else
len = HW_BREAKPOINT_LEN_4;
}
@@ -1478,8 +1500,8 @@ static int parse_events_add_pmu(struct parse_events_state *parse_state,
}
/* Look for event names in the terms and rewrite into format based terms. */
- if (!parse_state->fake_pmu && perf_pmu__check_alias(pmu, &parsed_terms,
- &info, &alias_rewrote_terms, err)) {
+ if (perf_pmu__check_alias(pmu, &parsed_terms,
+ &info, &alias_rewrote_terms, err)) {
parse_events_terms__exit(&parsed_terms);
return -EINVAL;
}
@@ -1515,8 +1537,7 @@ static int parse_events_add_pmu(struct parse_events_state *parse_state,
return -ENOMEM;
}
- if (!parse_state->fake_pmu &&
- perf_pmu__config(pmu, &attr, &parsed_terms, parse_state->error)) {
+ if (perf_pmu__config(pmu, &attr, &parsed_terms, parse_state->error)) {
free_config_terms(&config_terms);
parse_events_terms__exit(&parsed_terms);
return -EINVAL;
@@ -1536,11 +1557,6 @@ static int parse_events_add_pmu(struct parse_events_state *parse_state,
evsel->percore = config_term_percore(&evsel->config_terms);
- if (parse_state->fake_pmu) {
- parse_events_terms__exit(&parsed_terms);
- return 0;
- }
-
parse_events_terms__exit(&parsed_terms);
free((char *)evsel->unit);
evsel->unit = strdup(info.unit);
@@ -1616,13 +1632,13 @@ int parse_events_multi_pmu_add(struct parse_events_state *parse_state,
}
if (parse_state->fake_pmu) {
- if (!parse_events_add_pmu(parse_state, list, parse_state->fake_pmu, &parsed_terms,
+ if (!parse_events_add_pmu(parse_state, list, perf_pmus__fake_pmu(), &parsed_terms,
/*auto_merge_stats=*/true)) {
struct strbuf sb;
strbuf_init(&sb, /*hint=*/ 0);
parse_events_terms__to_strbuf(&parsed_terms, &sb);
- pr_debug("%s -> %s/%s/\n", event_name, "fake_pmu", sb.buf);
+ pr_debug("%s -> fake/%s/\n", event_name, sb.buf);
strbuf_release(&sb);
ok++;
}
@@ -1656,11 +1672,18 @@ int parse_events_multi_pmu_add_or_add_pmu(struct parse_events_state *parse_state
INIT_LIST_HEAD(*listp);
/* Attempt to add to list assuming event_or_pmu is a PMU name. */
- pmu = parse_state->fake_pmu ?: perf_pmus__find(event_or_pmu);
+ pmu = perf_pmus__find(event_or_pmu);
if (pmu && !parse_events_add_pmu(parse_state, *listp, pmu, const_parsed_terms,
/*auto_merge_stats=*/false))
return 0;
+ if (parse_state->fake_pmu) {
+ if (!parse_events_add_pmu(parse_state, *listp, perf_pmus__fake_pmu(),
+ const_parsed_terms,
+ /*auto_merge_stats=*/false))
+ return 0;
+ }
+
pmu = NULL;
/* Failed to add, try wildcard expansion of event_or_pmu as a PMU name. */
while ((pmu = perf_pmus__scan(pmu)) != NULL) {
@@ -1811,6 +1834,8 @@ static int parse_events__modifier_list(struct parse_events_state *parse_state,
evsel->weak_group = true;
if (mod.bpf)
evsel->bpf_counter = true;
+ if (mod.retire_lat)
+ evsel->retire_lat = true;
}
return 0;
}
@@ -1959,8 +1984,8 @@ static int evsel__compute_group_pmu_name(struct evsel *evsel,
}
}
}
- /* Assign the actual name taking care that the fake PMU lacks a name. */
- evsel->group_pmu_name = strdup(group_pmu_name ?: "fake");
+ /* Record computed name. */
+ evsel->group_pmu_name = strdup(group_pmu_name);
return evsel->group_pmu_name ? 0 : -ENOMEM;
}
@@ -2122,7 +2147,7 @@ static int parse_events__sort_events_and_fix_groups(struct list_head *list)
}
int __parse_events(struct evlist *evlist, const char *str, const char *pmu_filter,
- struct parse_events_error *err, struct perf_pmu *fake_pmu,
+ struct parse_events_error *err, bool fake_pmu,
bool warn_if_reordered, bool fake_tp)
{
struct parse_events_state parse_state = {
@@ -2341,7 +2366,7 @@ int parse_events_option(const struct option *opt, const char *str,
parse_events_error__init(&err);
ret = __parse_events(*args->evlistp, str, args->pmu_filter, &err,
- /*fake_pmu=*/NULL, /*warn_if_reordered=*/true,
+ /*fake_pmu=*/false, /*warn_if_reordered=*/true,
/*fake_tp=*/false);
if (ret) {
diff --git a/tools/perf/util/parse-events.h b/tools/perf/util/parse-events.h
index e13de2c8b706..10cc9c433116 100644
--- a/tools/perf/util/parse-events.h
+++ b/tools/perf/util/parse-events.h
@@ -31,14 +31,14 @@ int parse_events_option(const struct option *opt, const char *str, int unset);
int parse_events_option_new_evlist(const struct option *opt, const char *str, int unset);
__attribute__((nonnull(1, 2, 4)))
int __parse_events(struct evlist *evlist, const char *str, const char *pmu_filter,
- struct parse_events_error *error, struct perf_pmu *fake_pmu,
+ struct parse_events_error *error, bool fake_pmu,
bool warn_if_reordered, bool fake_tp);
__attribute__((nonnull(1, 2, 3)))
static inline int parse_events(struct evlist *evlist, const char *str,
struct parse_events_error *err)
{
- return __parse_events(evlist, str, /*pmu_filter=*/NULL, err, /*fake_pmu=*/NULL,
+ return __parse_events(evlist, str, /*pmu_filter=*/NULL, err, /*fake_pmu=*/false,
/*warn_if_reordered=*/true, /*fake_tp=*/false);
}
@@ -150,8 +150,8 @@ struct parse_events_state {
struct parse_events_terms *terms;
/* Start token. */
int stoken;
- /* Special fake PMU marker for testing. */
- struct perf_pmu *fake_pmu;
+ /* Use the fake PMU marker for testing. */
+ bool fake_pmu;
/* Skip actual tracepoint processing for testing. */
bool fake_tp;
/* If non-null, when wildcard matching only match the given PMU. */
@@ -203,6 +203,7 @@ struct parse_events_modifier {
bool hypervisor : 1; /* 'h' */
bool guest : 1; /* 'G' */
bool host : 1; /* 'H' */
+ bool retire_lat : 1; /* 'R' */
};
int parse_events__modifier_event(struct parse_events_state *parse_state, void *loc,
@@ -285,4 +286,6 @@ static inline bool is_sdt_event(char *str __maybe_unused)
}
#endif /* HAVE_LIBELF_SUPPORT */
+size_t default_breakpoint_len(void);
+
#endif /* __PERF_PARSE_EVENTS_H */
diff --git a/tools/perf/util/parse-events.l b/tools/perf/util/parse-events.l
index 16045c383ada..5a0bcd7f166a 100644
--- a/tools/perf/util/parse-events.l
+++ b/tools/perf/util/parse-events.l
@@ -209,6 +209,7 @@ static int modifiers(struct parse_events_state *parse_state, yyscan_t scanner)
CASE('W', weak);
CASE('e', exclusive);
CASE('b', bpf);
+ CASE('R', retire_lat);
default:
return PE_ERROR;
}
@@ -250,7 +251,7 @@ drv_cfg_term [a-zA-Z0-9_\.]+(=[a-zA-Z0-9_*?\.:]+)?
* If you add a modifier you need to update check_modifier().
* Also, the letters in modifier_event must not be in modifier_bp.
*/
-modifier_event [ukhpPGHSDIWeb]{1,15}
+modifier_event [ukhpPGHSDIWebR]{1,16}
modifier_bp [rwx]{1,3}
lc_type (L1-dcache|l1-d|l1d|L1-data|L1-icache|l1-i|l1i|L1-instruction|LLC|L2|dTLB|d-tlb|Data-TLB|iTLB|i-tlb|Instruction-TLB|branch|branches|bpu|btb|bpc|node)
lc_op_result (load|loads|read|store|stores|write|prefetch|prefetches|speculative-read|speculative-load|refs|Reference|ops|access|misses|miss)
diff --git a/tools/perf/util/pmu.c b/tools/perf/util/pmu.c
index 986166bc7c78..61bdda01a05a 100644
--- a/tools/perf/util/pmu.c
+++ b/tools/perf/util/pmu.c
@@ -30,10 +30,6 @@
#include "util/evsel_config.h"
#include <regex.h>
-struct perf_pmu perf_pmu__fake = {
- .name = "fake",
-};
-
#define UNIT_MAX_LEN 31 /* max length for event unit name */
enum event_source {
@@ -367,8 +363,8 @@ error:
return -1;
}
-static int
-perf_pmu__parse_per_pkg(struct perf_pmu *pmu, struct perf_pmu_alias *alias)
+static bool perf_pmu__parse_event_source_bool(const char *pmu_name, const char *event_name,
+ const char *suffix)
{
char path[PATH_MAX];
size_t len;
@@ -376,37 +372,36 @@ perf_pmu__parse_per_pkg(struct perf_pmu *pmu, struct perf_pmu_alias *alias)
len = perf_pmu__event_source_devices_scnprintf(path, sizeof(path));
if (!len)
- return 0;
- scnprintf(path + len, sizeof(path) - len, "%s/events/%s.per-pkg", pmu->name, alias->name);
+ return false;
+
+ scnprintf(path + len, sizeof(path) - len, "%s/events/%s.%s", pmu_name, event_name, suffix);
fd = open(path, O_RDONLY);
if (fd == -1)
- return -1;
+ return false;
- close(fd);
+#ifndef NDEBUG
+ {
+ char buf[8];
- alias->per_pkg = true;
- return 0;
+ len = read(fd, buf, sizeof(buf));
+ assert(len == 1 || len == 2);
+ assert(buf[0] == '1');
+ }
+#endif
+
+ close(fd);
+ return true;
}
-static int perf_pmu__parse_snapshot(struct perf_pmu *pmu, struct perf_pmu_alias *alias)
+static void perf_pmu__parse_per_pkg(struct perf_pmu *pmu, struct perf_pmu_alias *alias)
{
- char path[PATH_MAX];
- size_t len;
- int fd;
-
- len = perf_pmu__event_source_devices_scnprintf(path, sizeof(path));
- if (!len)
- return 0;
- scnprintf(path + len, sizeof(path) - len, "%s/events/%s.snapshot", pmu->name, alias->name);
-
- fd = open(path, O_RDONLY);
- if (fd == -1)
- return -1;
+ alias->per_pkg = perf_pmu__parse_event_source_bool(pmu->name, alias->name, "per-pkg");
+}
- alias->snapshot = true;
- close(fd);
- return 0;
+static void perf_pmu__parse_snapshot(struct perf_pmu *pmu, struct perf_pmu_alias *alias)
+{
+ alias->snapshot = perf_pmu__parse_event_source_bool(pmu->name, alias->name, "snapshot");
}
/* Delete an alias entry. */
@@ -1173,6 +1168,11 @@ struct perf_pmu *perf_pmu__create_placeholder_core_pmu(struct list_head *core_pm
return pmu;
}
+static bool perf_pmu__is_fake(const struct perf_pmu *pmu)
+{
+ return pmu->type == PERF_PMU_TYPE_FAKE;
+}
+
void perf_pmu__warn_invalid_formats(struct perf_pmu *pmu)
{
struct perf_pmu_format *format;
@@ -1183,7 +1183,7 @@ void perf_pmu__warn_invalid_formats(struct perf_pmu *pmu)
pmu->formats_checked = true;
/* fake pmu doesn't have format list */
- if (pmu == &perf_pmu__fake)
+ if (perf_pmu__is_fake(pmu))
return;
list_for_each_entry(format, &pmu->format, list) {
@@ -1199,8 +1199,12 @@ void perf_pmu__warn_invalid_formats(struct perf_pmu *pmu)
bool evsel__is_aux_event(const struct evsel *evsel)
{
- struct perf_pmu *pmu = evsel__find_pmu(evsel);
+ struct perf_pmu *pmu;
+ if (evsel->needs_auxtrace_mmap)
+ return true;
+
+ pmu = evsel__find_pmu(evsel);
return pmu && pmu->auxtrace;
}
@@ -1507,6 +1511,10 @@ int perf_pmu__config(struct perf_pmu *pmu, struct perf_event_attr *attr,
{
bool zero = !!pmu->perf_event_attr_init_default;
+ /* Fake PMU doesn't have proper terms so nothing to configure in attr. */
+ if (perf_pmu__is_fake(pmu))
+ return 0;
+
return perf_pmu__config_terms(pmu, attr, head_terms, zero, err);
}
@@ -1615,6 +1623,10 @@ int perf_pmu__check_alias(struct perf_pmu *pmu, struct parse_events_terms *head_
info->scale = 0.0;
info->snapshot = false;
+ /* Fake PMU doesn't rewrite terms. */
+ if (perf_pmu__is_fake(pmu))
+ goto out;
+
list_for_each_entry_safe(term, h, &head_terms->terms, list) {
alias = pmu_find_alias(pmu, term);
if (!alias)
@@ -1637,7 +1649,7 @@ int perf_pmu__check_alias(struct perf_pmu *pmu, struct parse_events_terms *head_
list_del_init(&term->list);
parse_events_term__delete(term);
}
-
+out:
/*
* if no unit or scale found in aliases, then
* set defaults as for evsel
@@ -1844,6 +1856,7 @@ int perf_pmu__for_each_event(struct perf_pmu *pmu, bool skip_duplicate_pmus,
struct perf_pmu_alias *event;
struct pmu_event_info info = {
.pmu = pmu,
+ .event_type_desc = "Kernel PMU event",
};
int ret = 0;
struct strbuf sb;
diff --git a/tools/perf/util/pmu.h b/tools/perf/util/pmu.h
index b2d3fd291f02..4397c48ad569 100644
--- a/tools/perf/util/pmu.h
+++ b/tools/perf/util/pmu.h
@@ -36,6 +36,10 @@ struct perf_pmu_caps {
struct list_head list;
};
+enum {
+ PERF_PMU_TYPE_FAKE = 0xFFFFFFFF,
+};
+
/**
* struct perf_pmu
*/
@@ -173,9 +177,6 @@ struct perf_pmu {
struct perf_mem_event *mem_events;
};
-/** @perf_pmu__fake: A special global PMU used for testing. */
-extern struct perf_pmu perf_pmu__fake;
-
struct perf_pmu_info {
const char *unit;
double scale;
@@ -193,6 +194,7 @@ struct pmu_event_info {
const char *encoding_desc;
const char *topic;
const char *pmu_name;
+ const char *event_type_desc;
const char *str;
bool deprecated;
};
diff --git a/tools/perf/util/pmus.c b/tools/perf/util/pmus.c
index 3fcabfd8fca1..52109af5f2f1 100644
--- a/tools/perf/util/pmus.c
+++ b/tools/perf/util/pmus.c
@@ -69,7 +69,7 @@ size_t pmu_name_len_no_suffix(const char *str)
int pmu_name_cmp(const char *lhs_pmu_name, const char *rhs_pmu_name)
{
- unsigned long lhs_num = 0, rhs_num = 0;
+ unsigned long long lhs_num = 0, rhs_num = 0;
size_t lhs_pmu_name_len = pmu_name_len_no_suffix(lhs_pmu_name);
size_t rhs_pmu_name_len = pmu_name_len_no_suffix(rhs_pmu_name);
int ret = strncmp(lhs_pmu_name, rhs_pmu_name,
@@ -79,9 +79,9 @@ int pmu_name_cmp(const char *lhs_pmu_name, const char *rhs_pmu_name)
return ret;
if (lhs_pmu_name_len + 1 < strlen(lhs_pmu_name))
- lhs_num = strtoul(&lhs_pmu_name[lhs_pmu_name_len + 1], NULL, 16);
+ lhs_num = strtoull(&lhs_pmu_name[lhs_pmu_name_len + 1], NULL, 16);
if (rhs_pmu_name_len + 1 < strlen(rhs_pmu_name))
- rhs_num = strtoul(&rhs_pmu_name[rhs_pmu_name_len + 1], NULL, 16);
+ rhs_num = strtoull(&rhs_pmu_name[rhs_pmu_name_len + 1], NULL, 16);
return lhs_num < rhs_num ? -1 : (lhs_num > rhs_num ? 1 : 0);
}
@@ -371,6 +371,7 @@ struct sevent {
const char *encoding_desc;
const char *topic;
const char *pmu_name;
+ const char *event_type_desc;
bool deprecated;
};
@@ -444,6 +445,7 @@ static int perf_pmus__print_pmu_events__callback(void *vstate,
COPY_STR(encoding_desc);
COPY_STR(topic);
COPY_STR(pmu_name);
+ COPY_STR(event_type_desc);
#undef COPY_STR
s->deprecated = info->deprecated;
state->index++;
@@ -498,7 +500,7 @@ void perf_pmus__print_pmu_events(const struct print_callbacks *print_cb, void *p
aliases[j].alias,
aliases[j].scale_unit,
aliases[j].deprecated,
- "Kernel PMU event",
+ aliases[j].event_type_desc,
aliases[j].desc,
aliases[j].long_desc,
aliases[j].encoding_desc);
@@ -511,6 +513,7 @@ free:
zfree(&aliases[j].encoding_desc);
zfree(&aliases[j].topic);
zfree(&aliases[j].pmu_name);
+ zfree(&aliases[j].event_type_desc);
}
if (printed && pager_in_use())
printf("\n");
@@ -720,3 +723,14 @@ struct perf_pmu *perf_pmus__add_test_pmu(int test_sysfs_dirfd, const char *name)
*/
return perf_pmu__lookup(&other_pmus, test_sysfs_dirfd, name, /*eager_load=*/true);
}
+
+struct perf_pmu *perf_pmus__fake_pmu(void)
+{
+ static struct perf_pmu fake = {
+ .name = "fake",
+ .type = PERF_PMU_TYPE_FAKE,
+ .format = LIST_HEAD_INIT(fake.format),
+ };
+
+ return &fake;
+}
diff --git a/tools/perf/util/pmus.h b/tools/perf/util/pmus.h
index bdbff02324bb..e1742b56eec7 100644
--- a/tools/perf/util/pmus.h
+++ b/tools/perf/util/pmus.h
@@ -30,5 +30,6 @@ bool perf_pmus__supports_extended_type(void);
char *perf_pmus__default_pmu_name(void);
struct perf_pmu *perf_pmus__add_test_pmu(int test_sysfs_dirfd, const char *name);
+struct perf_pmu *perf_pmus__fake_pmu(void);
#endif /* __PMUS_H */
diff --git a/tools/perf/util/print-events.c b/tools/perf/util/print-events.c
index 3f38c27f0157..81e0135cddf0 100644
--- a/tools/perf/util/print-events.c
+++ b/tools/perf/util/print-events.c
@@ -68,11 +68,12 @@ void print_tracepoint_events(const struct print_callbacks *print_cb __maybe_unus
struct dirent **sys_namelist = NULL;
int sys_items;
- put_tracing_file(events_path);
if (events_fd < 0) {
pr_err("Error: failed to open tracing events directory\n");
+ pr_err("%s: %s\n", events_path, strerror(errno));
return;
}
+ put_tracing_file(events_path);
sys_items = tracing_events__scandir_alphasort(&sys_namelist);
diff --git a/tools/perf/util/print_insn.c b/tools/perf/util/print_insn.c
index a950e9157d2d..a33a7726422d 100644
--- a/tools/perf/util/print_insn.c
+++ b/tools/perf/util/print_insn.c
@@ -32,7 +32,9 @@ size_t sample__fprintf_insn_raw(struct perf_sample *sample, FILE *fp)
#ifdef HAVE_LIBCAPSTONE_SUPPORT
#include <capstone/capstone.h>
-static int capstone_init(struct machine *machine, csh *cs_handle, bool is64)
+int capstone_init(struct machine *machine, csh *cs_handle, bool is64, bool disassembler_style);
+
+int capstone_init(struct machine *machine, csh *cs_handle, bool is64, bool disassembler_style)
{
cs_arch arch;
cs_mode mode;
@@ -62,7 +64,13 @@ static int capstone_init(struct machine *machine, csh *cs_handle, bool is64)
}
if (machine__normalized_is(machine, "x86")) {
- cs_option(*cs_handle, CS_OPT_SYNTAX, CS_OPT_SYNTAX_ATT);
+ /*
+ * In case of using capstone_init while symbol__disassemble
+ * setting CS_OPT_SYNTAX_ATT depends if disassembler_style opts
+ * is set via annotation args
+ */
+ if (disassembler_style)
+ cs_option(*cs_handle, CS_OPT_SYNTAX, CS_OPT_SYNTAX_ATT);
/*
* Resolving address operands to symbols is implemented
* on x86 by investigating instruction details.
@@ -122,7 +130,7 @@ ssize_t fprintf_insn_asm(struct machine *machine, struct thread *thread, u8 cpum
int ret;
/* TODO: Try to initiate capstone only once but need a proper place. */
- ret = capstone_init(machine, &cs_handle, is64bit);
+ ret = capstone_init(machine, &cs_handle, is64bit, true);
if (ret < 0)
return ret;
diff --git a/tools/perf/util/s390-cpumsf.c b/tools/perf/util/s390-cpumsf.c
index 6fe478b0b61b..73846b73d0cf 100644
--- a/tools/perf/util/s390-cpumsf.c
+++ b/tools/perf/util/s390-cpumsf.c
@@ -912,7 +912,7 @@ static int
s390_cpumsf_process_event(struct perf_session *session,
union perf_event *event,
struct perf_sample *sample,
- struct perf_tool *tool)
+ const struct perf_tool *tool)
{
struct s390_cpumsf *sf = container_of(session->auxtrace,
struct s390_cpumsf,
@@ -952,15 +952,10 @@ s390_cpumsf_process_event(struct perf_session *session,
return err;
}
-struct s390_cpumsf_synth {
- struct perf_tool cpumsf_tool;
- struct perf_session *session;
-};
-
static int
s390_cpumsf_process_auxtrace_event(struct perf_session *session,
union perf_event *event __maybe_unused,
- struct perf_tool *tool __maybe_unused)
+ const struct perf_tool *tool __maybe_unused)
{
struct s390_cpumsf *sf = container_of(session->auxtrace,
struct s390_cpumsf,
@@ -1003,7 +998,7 @@ static void s390_cpumsf_free_events(struct perf_session *session __maybe_unused)
}
static int s390_cpumsf_flush(struct perf_session *session __maybe_unused,
- struct perf_tool *tool __maybe_unused)
+ const struct perf_tool *tool __maybe_unused)
{
return 0;
}
diff --git a/tools/perf/util/scripting-engines/trace-event-python.c b/tools/perf/util/scripting-engines/trace-event-python.c
index fb00f3ad6815..d7183134b669 100644
--- a/tools/perf/util/scripting-engines/trace-event-python.c
+++ b/tools/perf/util/scripting-engines/trace-event-python.c
@@ -762,6 +762,8 @@ static void regs_map(struct regs_dump *regs, uint64_t mask, const char *arch, ch
}
}
+#define MAX_REG_SIZE 128
+
static int set_regs_in_dict(PyObject *dict,
struct perf_sample *sample,
struct evsel *evsel)
@@ -769,14 +771,7 @@ static int set_regs_in_dict(PyObject *dict,
struct perf_event_attr *attr = &evsel->core.attr;
const char *arch = perf_env__arch(evsel__env(evsel));
- /*
- * Here value 28 is a constant size which can be used to print
- * one register value and its corresponds to:
- * 16 chars is to specify 64 bit register in hexadecimal.
- * 2 chars is for appending "0x" to the hexadecimal value and
- * 10 chars is for register name.
- */
- int size = __sw_hweight64(attr->sample_regs_intr) * 28;
+ int size = (__sw_hweight64(attr->sample_regs_intr) * MAX_REG_SIZE) + 1;
char *bf = malloc(size);
if (!bf)
return -1;
@@ -888,6 +883,8 @@ static PyObject *get_perf_sample_dict(struct perf_sample *sample,
set_sample_read_in_dict(dict_sample, sample, evsel);
pydict_set_item_string_decref(dict_sample, "weight",
PyLong_FromUnsignedLongLong(sample->weight));
+ pydict_set_item_string_decref(dict_sample, "ins_lat",
+ PyLong_FromUnsignedLong(sample->ins_lat));
pydict_set_item_string_decref(dict_sample, "transaction",
PyLong_FromUnsignedLongLong(sample->transaction));
set_sample_datasrc_in_dict(dict_sample, sample);
@@ -1317,7 +1314,7 @@ static void python_export_sample_table(struct db_export *dbe,
struct tables *tables = container_of(dbe, struct tables, dbe);
PyObject *t;
- t = tuple_new(27);
+ t = tuple_new(28);
tuple_set_d64(t, 0, es->db_id);
tuple_set_d64(t, 1, es->evsel->db_id);
@@ -1346,6 +1343,7 @@ static void python_export_sample_table(struct db_export *dbe,
tuple_set_s32(t, 24, es->sample->flags);
tuple_set_d64(t, 25, es->sample->id);
tuple_set_d64(t, 26, es->sample->stream_id);
+ tuple_set_u32(t, 27, es->sample->ins_lat);
call_object(tables->sample_handler, t, "sample_table");
diff --git a/tools/perf/util/session.c b/tools/perf/util/session.c
index 5596bed1b8c8..dbaf07bf6c5f 100644
--- a/tools/perf/util/session.c
+++ b/tools/perf/util/session.c
@@ -36,81 +36,20 @@
#include "util.h"
#include "arch/common.h"
#include "units.h"
+#include "annotate.h"
#include <internal/lib.h>
-#ifdef HAVE_ZSTD_SUPPORT
-static int perf_session__process_compressed_event(struct perf_session *session,
- union perf_event *event, u64 file_offset,
- const char *file_path)
-{
- void *src;
- size_t decomp_size, src_size;
- u64 decomp_last_rem = 0;
- size_t mmap_len, decomp_len = session->header.env.comp_mmap_len;
- struct decomp *decomp, *decomp_last = session->active_decomp->decomp_last;
-
- if (decomp_last) {
- decomp_last_rem = decomp_last->size - decomp_last->head;
- decomp_len += decomp_last_rem;
- }
-
- mmap_len = sizeof(struct decomp) + decomp_len;
- decomp = mmap(NULL, mmap_len, PROT_READ|PROT_WRITE,
- MAP_ANONYMOUS|MAP_PRIVATE, -1, 0);
- if (decomp == MAP_FAILED) {
- pr_err("Couldn't allocate memory for decompression\n");
- return -1;
- }
-
- decomp->file_pos = file_offset;
- decomp->file_path = file_path;
- decomp->mmap_len = mmap_len;
- decomp->head = 0;
-
- if (decomp_last_rem) {
- memcpy(decomp->data, &(decomp_last->data[decomp_last->head]), decomp_last_rem);
- decomp->size = decomp_last_rem;
- }
-
- src = (void *)event + sizeof(struct perf_record_compressed);
- src_size = event->pack.header.size - sizeof(struct perf_record_compressed);
-
- decomp_size = zstd_decompress_stream(session->active_decomp->zstd_decomp, src, src_size,
- &(decomp->data[decomp_last_rem]), decomp_len - decomp_last_rem);
- if (!decomp_size) {
- munmap(decomp, mmap_len);
- pr_err("Couldn't decompress data\n");
- return -1;
- }
-
- decomp->size += decomp_size;
-
- if (session->active_decomp->decomp == NULL)
- session->active_decomp->decomp = decomp;
- else
- session->active_decomp->decomp_last->next = decomp;
-
- session->active_decomp->decomp_last = decomp;
-
- pr_debug("decomp (B): %zd to %zd\n", src_size, decomp_size);
-
- return 0;
-}
-#else /* !HAVE_ZSTD_SUPPORT */
-#define perf_session__process_compressed_event perf_session__process_compressed_event_stub
-#endif
-
static int perf_session__deliver_event(struct perf_session *session,
union perf_event *event,
- struct perf_tool *tool,
+ const struct perf_tool *tool,
u64 file_offset,
const char *file_path);
-static int perf_session__open(struct perf_session *session, int repipe_fd)
+static int perf_session__open(struct perf_session *session)
{
struct perf_data *data = session->data;
- if (perf_session__read_header(session, repipe_fd) < 0) {
+ if (perf_session__read_header(session) < 0) {
pr_err("incompatible file format (rerun with -v to learn more)\n");
return -1;
}
@@ -196,8 +135,8 @@ static int ordered_events__deliver_event(struct ordered_events *oe,
}
struct perf_session *__perf_session__new(struct perf_data *data,
- bool repipe, int repipe_fd,
- struct perf_tool *tool)
+ struct perf_tool *tool,
+ bool trace_event_repipe)
{
int ret = -ENOMEM;
struct perf_session *session = zalloc(sizeof(*session));
@@ -205,7 +144,7 @@ struct perf_session *__perf_session__new(struct perf_data *data,
if (!session)
goto out;
- session->repipe = repipe;
+ session->trace_event_repipe = trace_event_repipe;
session->tool = tool;
session->decomp_data.zstd_decomp = &session->zstd_data;
session->active_decomp = &session->decomp_data;
@@ -223,7 +162,7 @@ struct perf_session *__perf_session__new(struct perf_data *data,
session->data = data;
if (perf_data__is_read(data)) {
- ret = perf_session__open(session, repipe_fd);
+ ret = perf_session__open(session);
if (ret < 0)
goto out_delete;
@@ -304,6 +243,7 @@ void perf_session__delete(struct perf_session *session)
return;
auxtrace__free(session);
auxtrace_index__free(&session->auxtrace_index);
+ debuginfo_cache__delete();
perf_session__destroy_kernel_maps(session);
perf_decomp__release_events(session->decomp_data.decomp);
perf_env__exit(&session->header.env);
@@ -319,251 +259,6 @@ void perf_session__delete(struct perf_session *session)
free(session);
}
-static int process_event_synth_tracing_data_stub(struct perf_session *session
- __maybe_unused,
- union perf_event *event
- __maybe_unused)
-{
- dump_printf(": unhandled!\n");
- return 0;
-}
-
-static int process_event_synth_attr_stub(struct perf_tool *tool __maybe_unused,
- union perf_event *event __maybe_unused,
- struct evlist **pevlist
- __maybe_unused)
-{
- dump_printf(": unhandled!\n");
- return 0;
-}
-
-static int process_event_synth_event_update_stub(struct perf_tool *tool __maybe_unused,
- union perf_event *event __maybe_unused,
- struct evlist **pevlist
- __maybe_unused)
-{
- if (dump_trace)
- perf_event__fprintf_event_update(event, stdout);
-
- dump_printf(": unhandled!\n");
- return 0;
-}
-
-static int process_event_sample_stub(struct perf_tool *tool __maybe_unused,
- union perf_event *event __maybe_unused,
- struct perf_sample *sample __maybe_unused,
- struct evsel *evsel __maybe_unused,
- struct machine *machine __maybe_unused)
-{
- dump_printf(": unhandled!\n");
- return 0;
-}
-
-static int process_event_stub(struct perf_tool *tool __maybe_unused,
- union perf_event *event __maybe_unused,
- struct perf_sample *sample __maybe_unused,
- struct machine *machine __maybe_unused)
-{
- dump_printf(": unhandled!\n");
- return 0;
-}
-
-static int process_finished_round_stub(struct perf_tool *tool __maybe_unused,
- union perf_event *event __maybe_unused,
- struct ordered_events *oe __maybe_unused)
-{
- dump_printf(": unhandled!\n");
- return 0;
-}
-
-static int skipn(int fd, off_t n)
-{
- char buf[4096];
- ssize_t ret;
-
- while (n > 0) {
- ret = read(fd, buf, min(n, (off_t)sizeof(buf)));
- if (ret <= 0)
- return ret;
- n -= ret;
- }
-
- return 0;
-}
-
-static s64 process_event_auxtrace_stub(struct perf_session *session __maybe_unused,
- union perf_event *event)
-{
- dump_printf(": unhandled!\n");
- if (perf_data__is_pipe(session->data))
- skipn(perf_data__fd(session->data), event->auxtrace.size);
- return event->auxtrace.size;
-}
-
-static int process_event_op2_stub(struct perf_session *session __maybe_unused,
- union perf_event *event __maybe_unused)
-{
- dump_printf(": unhandled!\n");
- return 0;
-}
-
-
-static
-int process_event_thread_map_stub(struct perf_session *session __maybe_unused,
- union perf_event *event __maybe_unused)
-{
- if (dump_trace)
- perf_event__fprintf_thread_map(event, stdout);
-
- dump_printf(": unhandled!\n");
- return 0;
-}
-
-static
-int process_event_cpu_map_stub(struct perf_session *session __maybe_unused,
- union perf_event *event __maybe_unused)
-{
- if (dump_trace)
- perf_event__fprintf_cpu_map(event, stdout);
-
- dump_printf(": unhandled!\n");
- return 0;
-}
-
-static
-int process_event_stat_config_stub(struct perf_session *session __maybe_unused,
- union perf_event *event __maybe_unused)
-{
- if (dump_trace)
- perf_event__fprintf_stat_config(event, stdout);
-
- dump_printf(": unhandled!\n");
- return 0;
-}
-
-static int process_stat_stub(struct perf_session *perf_session __maybe_unused,
- union perf_event *event)
-{
- if (dump_trace)
- perf_event__fprintf_stat(event, stdout);
-
- dump_printf(": unhandled!\n");
- return 0;
-}
-
-static int process_stat_round_stub(struct perf_session *perf_session __maybe_unused,
- union perf_event *event)
-{
- if (dump_trace)
- perf_event__fprintf_stat_round(event, stdout);
-
- dump_printf(": unhandled!\n");
- return 0;
-}
-
-static int process_event_time_conv_stub(struct perf_session *perf_session __maybe_unused,
- union perf_event *event)
-{
- if (dump_trace)
- perf_event__fprintf_time_conv(event, stdout);
-
- dump_printf(": unhandled!\n");
- return 0;
-}
-
-static int perf_session__process_compressed_event_stub(struct perf_session *session __maybe_unused,
- union perf_event *event __maybe_unused,
- u64 file_offset __maybe_unused,
- const char *file_path __maybe_unused)
-{
- dump_printf(": unhandled!\n");
- return 0;
-}
-
-void perf_tool__fill_defaults(struct perf_tool *tool)
-{
- if (tool->sample == NULL)
- tool->sample = process_event_sample_stub;
- if (tool->mmap == NULL)
- tool->mmap = process_event_stub;
- if (tool->mmap2 == NULL)
- tool->mmap2 = process_event_stub;
- if (tool->comm == NULL)
- tool->comm = process_event_stub;
- if (tool->namespaces == NULL)
- tool->namespaces = process_event_stub;
- if (tool->cgroup == NULL)
- tool->cgroup = process_event_stub;
- if (tool->fork == NULL)
- tool->fork = process_event_stub;
- if (tool->exit == NULL)
- tool->exit = process_event_stub;
- if (tool->lost == NULL)
- tool->lost = perf_event__process_lost;
- if (tool->lost_samples == NULL)
- tool->lost_samples = perf_event__process_lost_samples;
- if (tool->aux == NULL)
- tool->aux = perf_event__process_aux;
- if (tool->itrace_start == NULL)
- tool->itrace_start = perf_event__process_itrace_start;
- if (tool->context_switch == NULL)
- tool->context_switch = perf_event__process_switch;
- if (tool->ksymbol == NULL)
- tool->ksymbol = perf_event__process_ksymbol;
- if (tool->bpf == NULL)
- tool->bpf = perf_event__process_bpf;
- if (tool->text_poke == NULL)
- tool->text_poke = perf_event__process_text_poke;
- if (tool->aux_output_hw_id == NULL)
- tool->aux_output_hw_id = perf_event__process_aux_output_hw_id;
- if (tool->read == NULL)
- tool->read = process_event_sample_stub;
- if (tool->throttle == NULL)
- tool->throttle = process_event_stub;
- if (tool->unthrottle == NULL)
- tool->unthrottle = process_event_stub;
- if (tool->attr == NULL)
- tool->attr = process_event_synth_attr_stub;
- if (tool->event_update == NULL)
- tool->event_update = process_event_synth_event_update_stub;
- if (tool->tracing_data == NULL)
- tool->tracing_data = process_event_synth_tracing_data_stub;
- if (tool->build_id == NULL)
- tool->build_id = process_event_op2_stub;
- if (tool->finished_round == NULL) {
- if (tool->ordered_events)
- tool->finished_round = perf_event__process_finished_round;
- else
- tool->finished_round = process_finished_round_stub;
- }
- if (tool->id_index == NULL)
- tool->id_index = process_event_op2_stub;
- if (tool->auxtrace_info == NULL)
- tool->auxtrace_info = process_event_op2_stub;
- if (tool->auxtrace == NULL)
- tool->auxtrace = process_event_auxtrace_stub;
- if (tool->auxtrace_error == NULL)
- tool->auxtrace_error = process_event_op2_stub;
- if (tool->thread_map == NULL)
- tool->thread_map = process_event_thread_map_stub;
- if (tool->cpu_map == NULL)
- tool->cpu_map = process_event_cpu_map_stub;
- if (tool->stat_config == NULL)
- tool->stat_config = process_event_stat_config_stub;
- if (tool->stat == NULL)
- tool->stat = process_stat_stub;
- if (tool->stat_round == NULL)
- tool->stat_round = process_stat_round_stub;
- if (tool->time_conv == NULL)
- tool->time_conv = process_event_time_conv_stub;
- if (tool->feature == NULL)
- tool->feature = process_event_op2_stub;
- if (tool->compressed == NULL)
- tool->compressed = perf_session__process_compressed_event;
- if (tool->finished_init == NULL)
- tool->finished_init = process_event_op2_stub;
-}
-
static void swap_sample_id_all(union perf_event *event, void *data)
{
void *end = (void *) event + event->header.size;
@@ -1076,7 +771,7 @@ static perf_event__swap_op perf_event__swap_ops[] = {
* Flush every events below timestamp 7
* etc...
*/
-int perf_event__process_finished_round(struct perf_tool *tool __maybe_unused,
+int perf_event__process_finished_round(const struct perf_tool *tool __maybe_unused,
union perf_event *event __maybe_unused,
struct ordered_events *oe)
{
@@ -1161,7 +856,6 @@ static void branch_stack__printf(struct perf_sample *sample,
struct branch_entry *entries = perf_sample__branch_entries(sample);
bool callstack = evsel__has_branch_callstack(evsel);
u64 *branch_stack_cntr = sample->branch_stack_cntr;
- struct perf_env *env = evsel__env(evsel);
uint64_t i;
if (!callstack) {
@@ -1205,8 +899,11 @@ static void branch_stack__printf(struct perf_sample *sample,
}
if (branch_stack_cntr) {
+ unsigned int br_cntr_width, br_cntr_nr;
+
+ perf_env__find_br_cntr_info(evsel__env(evsel), &br_cntr_nr, &br_cntr_width);
printf("... branch stack counters: nr:%" PRIu64 " (counter width: %u max counter nr:%u)\n",
- sample->branch_stack->nr, env->br_cntr_width, env->br_cntr_nr);
+ sample->branch_stack->nr, br_cntr_width, br_cntr_nr);
for (i = 0; i < sample->branch_stack->nr; i++)
printf("..... %2"PRIu64": %016" PRIx64 "\n", i, branch_stack_cntr[i]);
}
@@ -1470,7 +1167,7 @@ static struct machine *machines__find_for_cpumode(struct machines *machines,
}
static int deliver_sample_value(struct evlist *evlist,
- struct perf_tool *tool,
+ const struct perf_tool *tool,
union perf_event *event,
struct perf_sample *sample,
struct sample_read_value *v,
@@ -1502,7 +1199,7 @@ static int deliver_sample_value(struct evlist *evlist,
}
static int deliver_sample_group(struct evlist *evlist,
- struct perf_tool *tool,
+ const struct perf_tool *tool,
union perf_event *event,
struct perf_sample *sample,
struct machine *machine,
@@ -1511,6 +1208,9 @@ static int deliver_sample_group(struct evlist *evlist,
int ret = -EINVAL;
struct sample_read_value *v = sample->read.group.values;
+ if (tool->dont_split_sample_group)
+ return deliver_sample_value(evlist, tool, event, sample, v, machine);
+
sample_read_group__for_each(v, sample->read.group.nr, read_format) {
ret = deliver_sample_value(evlist, tool, event, sample, v,
machine);
@@ -1521,7 +1221,7 @@ static int deliver_sample_group(struct evlist *evlist,
return ret;
}
-static int evlist__deliver_sample(struct evlist *evlist, struct perf_tool *tool,
+static int evlist__deliver_sample(struct evlist *evlist, const struct perf_tool *tool,
union perf_event *event, struct perf_sample *sample,
struct evsel *evsel, struct machine *machine)
{
@@ -1546,7 +1246,7 @@ static int machines__deliver_event(struct machines *machines,
struct evlist *evlist,
union perf_event *event,
struct perf_sample *sample,
- struct perf_tool *tool, u64 file_offset,
+ const struct perf_tool *tool, u64 file_offset,
const char *file_path)
{
struct evsel *evsel;
@@ -1592,8 +1292,9 @@ static int machines__deliver_event(struct machines *machines,
evlist->stats.total_lost += event->lost.lost;
return tool->lost(tool, event, sample, machine);
case PERF_RECORD_LOST_SAMPLES:
- if (tool->lost_samples == perf_event__process_lost_samples &&
- !(event->header.misc & PERF_RECORD_MISC_LOST_SAMPLES_BPF))
+ if (event->header.misc & PERF_RECORD_MISC_LOST_SAMPLES_BPF)
+ evlist->stats.total_dropped_samples += event->lost_samples.lost;
+ else if (tool->lost_samples == perf_event__process_lost_samples)
evlist->stats.total_lost_samples += event->lost_samples.lost;
return tool->lost_samples(tool, event, sample, machine);
case PERF_RECORD_READ:
@@ -1634,7 +1335,7 @@ static int machines__deliver_event(struct machines *machines,
static int perf_session__deliver_event(struct perf_session *session,
union perf_event *event,
- struct perf_tool *tool,
+ const struct perf_tool *tool,
u64 file_offset,
const char *file_path)
{
@@ -1667,13 +1368,12 @@ static s64 perf_session__process_user_event(struct perf_session *session,
const char *file_path)
{
struct ordered_events *oe = &session->ordered_events;
- struct perf_tool *tool = session->tool;
+ const struct perf_tool *tool = session->tool;
struct perf_sample sample = { .time = 0, };
int fd = perf_data__fd(session->data);
int err;
- if (event->header.type != PERF_RECORD_COMPRESSED ||
- tool->compressed == perf_session__process_compressed_event_stub)
+ if (event->header.type != PERF_RECORD_COMPRESSED || perf_tool__compressed_is_stub(tool))
dump_event(session->evlist, event, file_offset, &sample, file_path);
/* These events are processed right away */
@@ -1754,7 +1454,7 @@ int perf_session__deliver_synth_event(struct perf_session *session,
struct perf_sample *sample)
{
struct evlist *evlist = session->evlist;
- struct perf_tool *tool = session->tool;
+ const struct perf_tool *tool = session->tool;
events_stats__inc(&evlist->stats, event->header.type);
@@ -1764,6 +1464,30 @@ int perf_session__deliver_synth_event(struct perf_session *session,
return machines__deliver_event(&session->machines, evlist, event, sample, tool, 0, NULL);
}
+int perf_session__deliver_synth_attr_event(struct perf_session *session,
+ const struct perf_event_attr *attr,
+ u64 id)
+{
+ union {
+ struct {
+ struct perf_record_header_attr attr;
+ u64 ids[1];
+ } attr_id;
+ union perf_event ev;
+ } ev = {
+ .attr_id.attr.header.type = PERF_RECORD_HEADER_ATTR,
+ .attr_id.attr.header.size = sizeof(ev.attr_id),
+ .attr_id.ids[0] = id,
+ };
+
+ if (attr->size != sizeof(ev.attr_id.attr.attr)) {
+ pr_debug("Unexpected perf_event_attr size\n");
+ return -EINVAL;
+ }
+ ev.attr_id.attr.attr = *attr;
+ return perf_session__deliver_synth_event(session, &ev.ev, NULL);
+}
+
static void event_swap(union perf_event *event, bool sample_id_all)
{
perf_event__swap_op swap;
@@ -1862,7 +1586,7 @@ static s64 perf_session__process_event(struct perf_session *session,
const char *file_path)
{
struct evlist *evlist = session->evlist;
- struct perf_tool *tool = session->tool;
+ const struct perf_tool *tool = session->tool;
int ret;
if (session->header.needs_swap)
@@ -2049,7 +1773,7 @@ static int __perf_session__process_decomp_events(struct perf_session *session);
static int __perf_session__process_pipe_events(struct perf_session *session)
{
struct ordered_events *oe = &session->ordered_events;
- struct perf_tool *tool = session->tool;
+ const struct perf_tool *tool = session->tool;
struct ui_progress prog;
union perf_event *event;
uint32_t size, cur_size = 0;
@@ -2060,8 +1784,6 @@ static int __perf_session__process_pipe_events(struct perf_session *session)
void *p;
bool update_prog = false;
- perf_tool__fill_defaults(tool);
-
/*
* If it's from a file saving pipe data (by redirection), it would have
* a file name other than "-". Then we can get the total size and show
@@ -2481,12 +2203,10 @@ static int __perf_session__process_events(struct perf_session *session)
.in_place_update = session->data->in_place_update,
};
struct ordered_events *oe = &session->ordered_events;
- struct perf_tool *tool = session->tool;
+ const struct perf_tool *tool = session->tool;
struct ui_progress prog;
int err;
- perf_tool__fill_defaults(tool);
-
if (rd.data_size == 0)
return -1;
@@ -2533,14 +2253,12 @@ out_err:
static int __perf_session__process_dir_events(struct perf_session *session)
{
struct perf_data *data = session->data;
- struct perf_tool *tool = session->tool;
+ const struct perf_tool *tool = session->tool;
int i, ret, readers, nr_readers;
struct ui_progress prog;
u64 total_size = perf_data__size(session->data);
struct reader *rd;
- perf_tool__fill_defaults(tool);
-
ui_progress__init_size(&prog, total_size, "Processing events...");
nr_readers = 1;
diff --git a/tools/perf/util/session.h b/tools/perf/util/session.h
index 4c29dc86956f..bcf1bcf06959 100644
--- a/tools/perf/util/session.h
+++ b/tools/perf/util/session.h
@@ -26,26 +26,68 @@ struct decomp_data {
struct zstd_data *zstd_decomp;
};
+/**
+ * struct perf_session- A Perf session holds the main state when the program is
+ * working with live perf events or reading data from an input file.
+ *
+ * The rough organization of a perf_session is:
+ * ```
+ * +--------------+ +-----------+ +------------+
+ * | Session |1..* ----->| Machine |1..* ----->| Thread |
+ * +--------------+ +-----------+ +------------+
+ * ```
+ */
struct perf_session {
+ /**
+ * @header: The read version of a perf_file_header, or captures global
+ * information from a live session.
+ */
struct perf_header header;
+ /** @machines: Machines within the session a host and 0 or more guests. */
struct machines machines;
+ /** @evlist: List of evsels/events of the session. */
struct evlist *evlist;
- struct auxtrace *auxtrace;
+ /** @auxtrace: callbacks to allow AUX area data decoding. */
+ const struct auxtrace *auxtrace;
+ /** @itrace_synth_opts: AUX area tracing synthesis options. */
struct itrace_synth_opts *itrace_synth_opts;
+ /** @auxtrace_index: index of AUX area tracing events within a perf.data file. */
struct list_head auxtrace_index;
#ifdef HAVE_LIBTRACEEVENT
+ /** @tevent: handles for libtraceevent and plugins. */
struct trace_event tevent;
#endif
+ /** @time_conv: Holds contents of last PERF_RECORD_TIME_CONV event. */
struct perf_record_time_conv time_conv;
- bool repipe;
+ /** @trace_event_repipe: When set causes read trace events to be written to stdout. */
+ bool trace_event_repipe;
+ /**
+ * @one_mmap: The reader will use a single mmap by default. There may be
+ * multiple data files in particular for aux events. If this is true
+ * then the single big mmap for the data file can be assumed.
+ */
bool one_mmap;
+ /** @one_mmap_addr: Address of initial perf data file reader mmap. */
void *one_mmap_addr;
+ /** @one_mmap_offset: File offset in perf.data file when mapped. */
u64 one_mmap_offset;
+ /** @ordered_events: Used to turn unordered events into ordered ones. */
struct ordered_events ordered_events;
+ /** @data: Optional perf data file being read from. */
struct perf_data *data;
- struct perf_tool *tool;
+ /** @tool: callbacks for event handling. */
+ const struct perf_tool *tool;
+ /**
+ * @bytes_transferred: Used by perf record to count written bytes before
+ * compression.
+ */
u64 bytes_transferred;
+ /**
+ * @bytes_compressed: Used by perf record to count written bytes after
+ * compression.
+ */
u64 bytes_compressed;
+ /** @zstd_data: Owner of global compression state, buffers, etc. */
struct zstd_data zstd_data;
struct decomp_data decomp_data;
struct decomp_data *active_decomp;
@@ -64,13 +106,13 @@ struct decomp {
struct perf_tool;
struct perf_session *__perf_session__new(struct perf_data *data,
- bool repipe, int repipe_fd,
- struct perf_tool *tool);
+ struct perf_tool *tool,
+ bool trace_event_repipe);
static inline struct perf_session *perf_session__new(struct perf_data *data,
struct perf_tool *tool)
{
- return __perf_session__new(data, false, -1, tool);
+ return __perf_session__new(data, tool, /*trace_event_repipe=*/false);
}
void perf_session__delete(struct perf_session *session);
@@ -92,8 +134,6 @@ int perf_session__process_events(struct perf_session *session);
int perf_session__queue_event(struct perf_session *s, union perf_event *event,
u64 timestamp, u64 file_offset, const char *file_path);
-void perf_tool__fill_defaults(struct perf_tool *tool);
-
int perf_session__resolve_callchain(struct perf_session *session,
struct evsel *evsel,
struct thread *thread,
@@ -154,13 +194,16 @@ extern volatile int session_done;
int perf_session__deliver_synth_event(struct perf_session *session,
union perf_event *event,
struct perf_sample *sample);
+int perf_session__deliver_synth_attr_event(struct perf_session *session,
+ const struct perf_event_attr *attr,
+ u64 id);
int perf_session__dsos_hit_all(struct perf_session *session);
int perf_event__process_id_index(struct perf_session *session,
union perf_event *event);
-int perf_event__process_finished_round(struct perf_tool *tool,
+int perf_event__process_finished_round(const struct perf_tool *tool,
union perf_event *event,
struct ordered_events *oe);
diff --git a/tools/perf/util/setup.py b/tools/perf/util/setup.py
index 142e9d447ce7..649550e9b7aa 100644
--- a/tools/perf/util/setup.py
+++ b/tools/perf/util/setup.py
@@ -17,7 +17,7 @@ src_feature_tests = getenv('srctree') + '/tools/build/feature'
def clang_has_option(option):
cc_output = Popen([cc, cc_options + option, path.join(src_feature_tests, "test-hello.c") ], stderr=PIPE).stderr.readlines()
- return [o for o in cc_output if ((b"unknown argument" in o) or (b"is not supported" in o))] == [ ]
+ return [o for o in cc_output if ((b"unknown argument" in o) or (b"is not supported" in o) or (b"unknown warning option" in o))] == [ ]
if cc_is_clang:
from sysconfig import get_config_vars
@@ -63,6 +63,8 @@ cflags = getenv('CFLAGS', '').split()
cflags += ['-fno-strict-aliasing', '-Wno-write-strings', '-Wno-unused-parameter', '-Wno-redundant-decls' ]
if cc_is_clang:
cflags += ["-Wno-unused-command-line-argument" ]
+ if clang_has_option("-Wno-cast-function-type-mismatch"):
+ cflags += ["-Wno-cast-function-type-mismatch" ]
else:
cflags += ['-Wno-cast-function-type' ]
diff --git a/tools/perf/util/sort.c b/tools/perf/util/sort.c
index ab7c7ff35f9b..013020f33ece 100644
--- a/tools/perf/util/sort.c
+++ b/tools/perf/util/sort.c
@@ -2312,7 +2312,7 @@ static int hist_entry__typeoff_snprintf(struct hist_entry *he, char *bf,
he->mem_type_off, true);
buf[4095] = '\0';
- return repsep_snprintf(bf, size, "%s %+d (%s)", he_type->self.type_name,
+ return repsep_snprintf(bf, size, "%s +%#x (%s)", he_type->self.type_name,
he->mem_type_off, buf);
}
@@ -2326,6 +2326,57 @@ struct sort_entry sort_type_offset = {
.se_width_idx = HISTC_TYPE_OFFSET,
};
+/* --sort typecln */
+
+/* TODO: use actual value in the system */
+#define TYPE_CACHELINE_SIZE 64
+
+static int64_t
+sort__typecln_sort(struct hist_entry *left, struct hist_entry *right)
+{
+ struct annotated_data_type *left_type = left->mem_type;
+ struct annotated_data_type *right_type = right->mem_type;
+ int64_t left_cln, right_cln;
+ int64_t ret;
+
+ if (!left_type) {
+ sort__type_init(left);
+ left_type = left->mem_type;
+ }
+
+ if (!right_type) {
+ sort__type_init(right);
+ right_type = right->mem_type;
+ }
+
+ ret = strcmp(left_type->self.type_name, right_type->self.type_name);
+ if (ret)
+ return ret;
+
+ left_cln = left->mem_type_off / TYPE_CACHELINE_SIZE;
+ right_cln = right->mem_type_off / TYPE_CACHELINE_SIZE;
+ return left_cln - right_cln;
+}
+
+static int hist_entry__typecln_snprintf(struct hist_entry *he, char *bf,
+ size_t size, unsigned int width __maybe_unused)
+{
+ struct annotated_data_type *he_type = he->mem_type;
+
+ return repsep_snprintf(bf, size, "%s: cache-line %d", he_type->self.type_name,
+ he->mem_type_off / TYPE_CACHELINE_SIZE);
+}
+
+struct sort_entry sort_type_cacheline = {
+ .se_header = "Data Type Cacheline",
+ .se_cmp = sort__type_cmp,
+ .se_collapse = sort__typecln_sort,
+ .se_sort = sort__typecln_sort,
+ .se_init = sort__type_init,
+ .se_snprintf = hist_entry__typecln_snprintf,
+ .se_width_idx = HISTC_TYPE_CACHELINE,
+};
+
struct sort_dimension {
const char *name;
@@ -2384,6 +2435,7 @@ static struct sort_dimension common_sort_dimensions[] = {
DIM(SORT_ANNOTATE_DATA_TYPE, "type", sort_type),
DIM(SORT_ANNOTATE_DATA_TYPE_OFFSET, "typeoff", sort_type_offset),
DIM(SORT_SYM_OFFSET, "symoff", sort_sym_offset),
+ DIM(SORT_ANNOTATE_DATA_TYPE_CACHELINE, "typecln", sort_type_cacheline),
};
#undef DIM
@@ -3960,7 +4012,7 @@ static void add_hpp_sort_string(struct strbuf *sb, struct hpp_dimension *s, int
add_key(sb, s[i].name, llen);
}
-char *sort_help(const char *prefix)
+char *sort_help(const char *prefix, enum sort_mode mode)
{
struct strbuf sb;
char *s;
@@ -3972,10 +4024,12 @@ char *sort_help(const char *prefix)
ARRAY_SIZE(hpp_sort_dimensions), &len);
add_sort_string(&sb, common_sort_dimensions,
ARRAY_SIZE(common_sort_dimensions), &len);
- add_sort_string(&sb, bstack_sort_dimensions,
- ARRAY_SIZE(bstack_sort_dimensions), &len);
- add_sort_string(&sb, memory_sort_dimensions,
- ARRAY_SIZE(memory_sort_dimensions), &len);
+ if (mode == SORT_MODE__NORMAL || mode == SORT_MODE__BRANCH)
+ add_sort_string(&sb, bstack_sort_dimensions,
+ ARRAY_SIZE(bstack_sort_dimensions), &len);
+ if (mode == SORT_MODE__NORMAL || mode == SORT_MODE__MEMORY)
+ add_sort_string(&sb, memory_sort_dimensions,
+ ARRAY_SIZE(memory_sort_dimensions), &len);
s = strbuf_detach(&sb, NULL);
strbuf_release(&sb);
return s;
diff --git a/tools/perf/util/sort.h b/tools/perf/util/sort.h
index 0bd0ee3ae76b..9ff68c6786e7 100644
--- a/tools/perf/util/sort.h
+++ b/tools/perf/util/sort.h
@@ -71,6 +71,7 @@ enum sort_type {
SORT_ANNOTATE_DATA_TYPE,
SORT_ANNOTATE_DATA_TYPE_OFFSET,
SORT_SYM_OFFSET,
+ SORT_ANNOTATE_DATA_TYPE_CACHELINE,
/* branch stack specific sort keys */
__SORT_BRANCH_STACK,
@@ -130,7 +131,7 @@ void reset_output_field(void);
void sort__setup_elide(FILE *fp);
void perf_hpp__set_elide(int idx, bool elide);
-char *sort_help(const char *prefix);
+char *sort_help(const char *prefix, enum sort_mode mode);
int report_parse_ignore_callees_opt(const struct option *opt, const char *arg, int unset);
diff --git a/tools/perf/util/srcline.c b/tools/perf/util/srcline.c
index 760742fd4a7d..f32d0d4f4bc9 100644
--- a/tools/perf/util/srcline.c
+++ b/tools/perf/util/srcline.c
@@ -6,6 +6,7 @@
#include <string.h>
#include <sys/types.h>
+#include <linux/compiler.h>
#include <linux/kernel.h>
#include <linux/string.h>
#include <linux/zalloc.h>
@@ -16,6 +17,9 @@
#include "util/debug.h"
#include "util/callchain.h"
#include "util/symbol_conf.h"
+#ifdef HAVE_LIBLLVM_SUPPORT
+#include "util/llvm-c-helpers.h"
+#endif
#include "srcline.h"
#include "string2.h"
#include "symbol.h"
@@ -130,7 +134,60 @@ static struct symbol *new_inline_sym(struct dso *dso,
#define MAX_INLINE_NEST 1024
-#ifdef HAVE_LIBBFD_SUPPORT
+#ifdef HAVE_LIBLLVM_SUPPORT
+
+static void free_llvm_inline_frames(struct llvm_a2l_frame *inline_frames,
+ int num_frames)
+{
+ if (inline_frames != NULL) {
+ for (int i = 0; i < num_frames; ++i) {
+ zfree(&inline_frames[i].filename);
+ zfree(&inline_frames[i].funcname);
+ }
+ zfree(&inline_frames);
+ }
+}
+
+static int addr2line(const char *dso_name, u64 addr,
+ char **file, unsigned int *line, struct dso *dso,
+ bool unwind_inlines, struct inline_node *node,
+ struct symbol *sym)
+{
+ struct llvm_a2l_frame *inline_frames = NULL;
+ int num_frames = llvm_addr2line(dso_name, addr, file, line,
+ node && unwind_inlines, &inline_frames);
+
+ if (num_frames == 0 || !inline_frames) {
+ /* Error, or we didn't want inlines. */
+ return num_frames;
+ }
+
+ for (int i = 0; i < num_frames; ++i) {
+ struct symbol *inline_sym =
+ new_inline_sym(dso, sym, inline_frames[i].funcname);
+ char *srcline = NULL;
+
+ if (inline_frames[i].filename) {
+ srcline =
+ srcline_from_fileline(inline_frames[i].filename,
+ inline_frames[i].line);
+ }
+ if (inline_list__append(inline_sym, srcline, node) != 0) {
+ free_llvm_inline_frames(inline_frames, num_frames);
+ return 0;
+ }
+ }
+ free_llvm_inline_frames(inline_frames, num_frames);
+
+ return num_frames;
+}
+
+void dso__free_a2l(struct dso *dso __maybe_unused)
+{
+ /* Nothing to free. */
+}
+
+#elif defined(HAVE_LIBBFD_SUPPORT)
/*
* Implement addr2line using libbfd.
diff --git a/tools/perf/util/stat-display.c b/tools/perf/util/stat-display.c
index c38bcb6f4c78..ea96e4ebad8c 100644
--- a/tools/perf/util/stat-display.c
+++ b/tools/perf/util/stat-display.c
@@ -1237,7 +1237,8 @@ static void print_metric_headers(struct perf_stat_config *config,
/* Print metrics headers only */
evlist__for_each_entry(evlist, counter) {
- if (config->aggr_mode != AGGR_NONE && counter->metric_leader != counter)
+ if (!config->iostat_run &&
+ config->aggr_mode != AGGR_NONE && counter->metric_leader != counter)
continue;
os.evsel = counter;
diff --git a/tools/perf/util/stat-shadow.c b/tools/perf/util/stat-shadow.c
index 6bb975e46de3..99376c12dd8e 100644
--- a/tools/perf/util/stat-shadow.c
+++ b/tools/perf/util/stat-shadow.c
@@ -380,7 +380,7 @@ static int prepare_metric(const struct metric_expr *mexp,
struct stats *stats;
double scale;
- switch (metric_events[i]->tool_event) {
+ switch (evsel__tool_event(metric_events[i])) {
case PERF_TOOL_DURATION_TIME:
stats = &walltime_nsecs_stats;
scale = 1e-9;
diff --git a/tools/perf/util/symbol.c b/tools/perf/util/symbol.c
index 19eb623e0826..a18927d792af 100644
--- a/tools/perf/util/symbol.c
+++ b/tools/perf/util/symbol.c
@@ -2425,14 +2425,14 @@ static bool symbol__read_kptr_restrict(void)
{
bool value = false;
FILE *fp = fopen("/proc/sys/kernel/kptr_restrict", "r");
+ bool used_root;
+ bool cap_syslog = perf_cap__capable(CAP_SYSLOG, &used_root);
if (fp != NULL) {
char line[8];
if (fgets(line, sizeof(line), fp) != NULL)
- value = perf_cap__capable(CAP_SYSLOG) ?
- (atoi(line) >= 2) :
- (atoi(line) != 0);
+ value = cap_syslog ? (atoi(line) >= 2) : (atoi(line) != 0);
fclose(fp);
}
@@ -2440,7 +2440,7 @@ static bool symbol__read_kptr_restrict(void)
/* Per kernel/kallsyms.c:
* we also restrict when perf_event_paranoid > 1 w/o CAP_SYSLOG
*/
- if (perf_event_paranoid() > 1 && !perf_cap__capable(CAP_SYSLOG))
+ if (perf_event_paranoid() > 1 && !cap_syslog)
value = true;
return value;
diff --git a/tools/perf/util/symbol_conf.h b/tools/perf/util/symbol_conf.h
index 657cfa5af43c..a9c51acc722f 100644
--- a/tools/perf/util/symbol_conf.h
+++ b/tools/perf/util/symbol_conf.h
@@ -64,7 +64,7 @@ struct symbol_conf {
*sym_list_str,
*col_width_list_str,
*bt_stop_list_str;
- char *addr2line_path;
+ const char *addr2line_path;
unsigned long time_quantum;
struct strlist *dso_list,
*comm_list,
diff --git a/tools/perf/util/synthetic-events.c b/tools/perf/util/synthetic-events.c
index 5498048f56ea..a58444c4aed1 100644
--- a/tools/perf/util/synthetic-events.c
+++ b/tools/perf/util/synthetic-events.c
@@ -1,4 +1,4 @@
-// SPDX-License-Identifier: GPL-2.0-only
+// SPDX-License-Identifier: GPL-2.0-only
#include "util/cgroup.h"
#include "util/data.h"
@@ -47,7 +47,7 @@
unsigned int proc_map_timeout = DEFAULT_PROC_MAP_PARSE_TIMEOUT;
-int perf_tool__process_synth_event(struct perf_tool *tool,
+int perf_tool__process_synth_event(const struct perf_tool *tool,
union perf_event *event,
struct machine *machine,
perf_event__handler_t process)
@@ -187,7 +187,7 @@ static int perf_event__prepare_comm(union perf_event *event, pid_t pid, pid_t ti
return 0;
}
-pid_t perf_event__synthesize_comm(struct perf_tool *tool,
+pid_t perf_event__synthesize_comm(const struct perf_tool *tool,
union perf_event *event, pid_t pid,
perf_event__handler_t process,
struct machine *machine)
@@ -218,7 +218,7 @@ static void perf_event__get_ns_link_info(pid_t pid, const char *ns,
}
}
-int perf_event__synthesize_namespaces(struct perf_tool *tool,
+int perf_event__synthesize_namespaces(const struct perf_tool *tool,
union perf_event *event,
pid_t pid, pid_t tgid,
perf_event__handler_t process,
@@ -257,7 +257,7 @@ int perf_event__synthesize_namespaces(struct perf_tool *tool,
return 0;
}
-static int perf_event__synthesize_fork(struct perf_tool *tool,
+static int perf_event__synthesize_fork(const struct perf_tool *tool,
union perf_event *event,
pid_t pid, pid_t tgid, pid_t ppid,
perf_event__handler_t process,
@@ -418,7 +418,7 @@ out:
dso__put(dso);
}
-int perf_event__synthesize_mmap_events(struct perf_tool *tool,
+int perf_event__synthesize_mmap_events(const struct perf_tool *tool,
union perf_event *event,
pid_t pid, pid_t tgid,
perf_event__handler_t process,
@@ -542,7 +542,7 @@ out:
}
#ifdef HAVE_FILE_HANDLE
-static int perf_event__synthesize_cgroup(struct perf_tool *tool,
+static int perf_event__synthesize_cgroup(const struct perf_tool *tool,
union perf_event *event,
char *path, size_t mount_len,
perf_event__handler_t process,
@@ -582,7 +582,7 @@ static int perf_event__synthesize_cgroup(struct perf_tool *tool,
return 0;
}
-static int perf_event__walk_cgroup_tree(struct perf_tool *tool,
+static int perf_event__walk_cgroup_tree(const struct perf_tool *tool,
union perf_event *event,
char *path, size_t mount_len,
perf_event__handler_t process,
@@ -630,7 +630,7 @@ static int perf_event__walk_cgroup_tree(struct perf_tool *tool,
return ret;
}
-int perf_event__synthesize_cgroups(struct perf_tool *tool,
+int perf_event__synthesize_cgroups(const struct perf_tool *tool,
perf_event__handler_t process,
struct machine *machine)
{
@@ -657,7 +657,7 @@ int perf_event__synthesize_cgroups(struct perf_tool *tool,
return 0;
}
#else
-int perf_event__synthesize_cgroups(struct perf_tool *tool __maybe_unused,
+int perf_event__synthesize_cgroups(const struct perf_tool *tool __maybe_unused,
perf_event__handler_t process __maybe_unused,
struct machine *machine __maybe_unused)
{
@@ -666,7 +666,7 @@ int perf_event__synthesize_cgroups(struct perf_tool *tool __maybe_unused,
#endif
struct perf_event__synthesize_modules_maps_cb_args {
- struct perf_tool *tool;
+ const struct perf_tool *tool;
perf_event__handler_t process;
struct machine *machine;
union perf_event *event;
@@ -717,7 +717,7 @@ static int perf_event__synthesize_modules_maps_cb(struct map *map, void *data)
return 0;
}
-int perf_event__synthesize_modules(struct perf_tool *tool, perf_event__handler_t process,
+int perf_event__synthesize_modules(const struct perf_tool *tool, perf_event__handler_t process,
struct machine *machine)
{
int rc;
@@ -763,7 +763,7 @@ static int __event__synthesize_thread(union perf_event *comm_event,
union perf_event *fork_event,
union perf_event *namespaces_event,
pid_t pid, int full, perf_event__handler_t process,
- struct perf_tool *tool, struct machine *machine,
+ const struct perf_tool *tool, struct machine *machine,
bool needs_mmap, bool mmap_data)
{
char filename[PATH_MAX];
@@ -852,7 +852,7 @@ static int __event__synthesize_thread(union perf_event *comm_event,
return rc;
}
-int perf_event__synthesize_thread_map(struct perf_tool *tool,
+int perf_event__synthesize_thread_map(const struct perf_tool *tool,
struct perf_thread_map *threads,
perf_event__handler_t process,
struct machine *machine,
@@ -929,7 +929,7 @@ out:
return err;
}
-static int __perf_event__synthesize_threads(struct perf_tool *tool,
+static int __perf_event__synthesize_threads(const struct perf_tool *tool,
perf_event__handler_t process,
struct machine *machine,
bool needs_mmap,
@@ -993,7 +993,7 @@ out:
}
struct synthesize_threads_arg {
- struct perf_tool *tool;
+ const struct perf_tool *tool;
perf_event__handler_t process;
struct machine *machine;
bool needs_mmap;
@@ -1015,7 +1015,7 @@ static void *synthesize_threads_worker(void *arg)
return NULL;
}
-int perf_event__synthesize_threads(struct perf_tool *tool,
+int perf_event__synthesize_threads(const struct perf_tool *tool,
perf_event__handler_t process,
struct machine *machine,
bool needs_mmap, bool mmap_data,
@@ -1104,14 +1104,14 @@ free_dirent:
return err;
}
-int __weak perf_event__synthesize_extra_kmaps(struct perf_tool *tool __maybe_unused,
+int __weak perf_event__synthesize_extra_kmaps(const struct perf_tool *tool __maybe_unused,
perf_event__handler_t process __maybe_unused,
struct machine *machine __maybe_unused)
{
return 0;
}
-static int __perf_event__synthesize_kernel_mmap(struct perf_tool *tool,
+static int __perf_event__synthesize_kernel_mmap(const struct perf_tool *tool,
perf_event__handler_t process,
struct machine *machine)
{
@@ -1183,7 +1183,7 @@ static int __perf_event__synthesize_kernel_mmap(struct perf_tool *tool,
return err;
}
-int perf_event__synthesize_kernel_mmap(struct perf_tool *tool,
+int perf_event__synthesize_kernel_mmap(const struct perf_tool *tool,
perf_event__handler_t process,
struct machine *machine)
{
@@ -1196,7 +1196,7 @@ int perf_event__synthesize_kernel_mmap(struct perf_tool *tool,
return perf_event__synthesize_extra_kmaps(tool, process, machine);
}
-int perf_event__synthesize_thread_map2(struct perf_tool *tool,
+int perf_event__synthesize_thread_map2(const struct perf_tool *tool,
struct perf_thread_map *threads,
perf_event__handler_t process,
struct machine *machine)
@@ -1346,7 +1346,7 @@ static struct perf_record_cpu_map *cpu_map_event__new(const struct perf_cpu_map
}
-int perf_event__synthesize_cpu_map(struct perf_tool *tool,
+int perf_event__synthesize_cpu_map(const struct perf_tool *tool,
const struct perf_cpu_map *map,
perf_event__handler_t process,
struct machine *machine)
@@ -1364,7 +1364,7 @@ int perf_event__synthesize_cpu_map(struct perf_tool *tool,
return err;
}
-int perf_event__synthesize_stat_config(struct perf_tool *tool,
+int perf_event__synthesize_stat_config(const struct perf_tool *tool,
struct perf_stat_config *config,
perf_event__handler_t process,
struct machine *machine)
@@ -1403,7 +1403,7 @@ int perf_event__synthesize_stat_config(struct perf_tool *tool,
return err;
}
-int perf_event__synthesize_stat(struct perf_tool *tool,
+int perf_event__synthesize_stat(const struct perf_tool *tool,
struct perf_cpu cpu, u32 thread, u64 id,
struct perf_counts_values *count,
perf_event__handler_t process,
@@ -1425,7 +1425,7 @@ int perf_event__synthesize_stat(struct perf_tool *tool,
return process(tool, (union perf_event *) &event, NULL, machine);
}
-int perf_event__synthesize_stat_round(struct perf_tool *tool,
+int perf_event__synthesize_stat_round(const struct perf_tool *tool,
u64 evtime, u64 type,
perf_event__handler_t process,
struct machine *machine)
@@ -1826,7 +1826,7 @@ int perf_event__synthesize_id_sample(__u64 *array, u64 type, const struct perf_s
return (void *)array - (void *)start;
}
-int __perf_event__synthesize_id_index(struct perf_tool *tool, perf_event__handler_t process,
+int __perf_event__synthesize_id_index(const struct perf_tool *tool, perf_event__handler_t process,
struct evlist *evlist, struct machine *machine, size_t from)
{
union perf_event *ev;
@@ -1918,13 +1918,13 @@ out_err:
return err;
}
-int perf_event__synthesize_id_index(struct perf_tool *tool, perf_event__handler_t process,
+int perf_event__synthesize_id_index(const struct perf_tool *tool, perf_event__handler_t process,
struct evlist *evlist, struct machine *machine)
{
return __perf_event__synthesize_id_index(tool, process, evlist, machine, 0);
}
-int __machine__synthesize_threads(struct machine *machine, struct perf_tool *tool,
+int __machine__synthesize_threads(struct machine *machine, const struct perf_tool *tool,
struct target *target, struct perf_thread_map *threads,
perf_event__handler_t process, bool needs_mmap,
bool data_mmap, unsigned int nr_threads_synthesize)
@@ -1985,7 +1985,7 @@ static struct perf_record_event_update *event_update_event__new(size_t size, u64
return ev;
}
-int perf_event__synthesize_event_update_unit(struct perf_tool *tool, struct evsel *evsel,
+int perf_event__synthesize_event_update_unit(const struct perf_tool *tool, struct evsel *evsel,
perf_event__handler_t process)
{
size_t size = strlen(evsel->unit);
@@ -2002,7 +2002,7 @@ int perf_event__synthesize_event_update_unit(struct perf_tool *tool, struct evse
return err;
}
-int perf_event__synthesize_event_update_scale(struct perf_tool *tool, struct evsel *evsel,
+int perf_event__synthesize_event_update_scale(const struct perf_tool *tool, struct evsel *evsel,
perf_event__handler_t process)
{
struct perf_record_event_update *ev;
@@ -2019,7 +2019,7 @@ int perf_event__synthesize_event_update_scale(struct perf_tool *tool, struct evs
return err;
}
-int perf_event__synthesize_event_update_name(struct perf_tool *tool, struct evsel *evsel,
+int perf_event__synthesize_event_update_name(const struct perf_tool *tool, struct evsel *evsel,
perf_event__handler_t process)
{
struct perf_record_event_update *ev;
@@ -2036,7 +2036,7 @@ int perf_event__synthesize_event_update_name(struct perf_tool *tool, struct evse
return err;
}
-int perf_event__synthesize_event_update_cpus(struct perf_tool *tool, struct evsel *evsel,
+int perf_event__synthesize_event_update_cpus(const struct perf_tool *tool, struct evsel *evsel,
perf_event__handler_t process)
{
struct synthesize_cpu_map_data syn_data = { .map = evsel->core.own_cpus };
@@ -2059,7 +2059,7 @@ int perf_event__synthesize_event_update_cpus(struct perf_tool *tool, struct evse
return err;
}
-int perf_event__synthesize_attrs(struct perf_tool *tool, struct evlist *evlist,
+int perf_event__synthesize_attrs(const struct perf_tool *tool, struct evlist *evlist,
perf_event__handler_t process)
{
struct evsel *evsel;
@@ -2087,7 +2087,7 @@ static bool has_scale(struct evsel *evsel)
return evsel->scale != 1;
}
-int perf_event__synthesize_extra_attr(struct perf_tool *tool, struct evlist *evsel_list,
+int perf_event__synthesize_extra_attr(const struct perf_tool *tool, struct evlist *evsel_list,
perf_event__handler_t process, bool is_pipe)
{
struct evsel *evsel;
@@ -2143,7 +2143,7 @@ int perf_event__synthesize_extra_attr(struct perf_tool *tool, struct evlist *evs
return 0;
}
-int perf_event__synthesize_attr(struct perf_tool *tool, struct perf_event_attr *attr,
+int perf_event__synthesize_attr(const struct perf_tool *tool, struct perf_event_attr *attr,
u32 ids, u64 *id, perf_event__handler_t process)
{
union perf_event *ev;
@@ -2177,7 +2177,7 @@ int perf_event__synthesize_attr(struct perf_tool *tool, struct perf_event_attr *
}
#ifdef HAVE_LIBTRACEEVENT
-int perf_event__synthesize_tracing_data(struct perf_tool *tool, int fd, struct evlist *evlist,
+int perf_event__synthesize_tracing_data(const struct perf_tool *tool, int fd, struct evlist *evlist,
perf_event__handler_t process)
{
union perf_event ev;
@@ -2200,7 +2200,7 @@ int perf_event__synthesize_tracing_data(struct perf_tool *tool, int fd, struct e
if (!tdata)
return -1;
- memset(&ev, 0, sizeof(ev));
+ memset(&ev, 0, sizeof(ev.tracing_data));
ev.tracing_data.header.type = PERF_RECORD_HEADER_TRACING_DATA;
size = tdata->size;
@@ -2225,31 +2225,108 @@ int perf_event__synthesize_tracing_data(struct perf_tool *tool, int fd, struct e
}
#endif
-int perf_event__synthesize_build_id(struct perf_tool *tool, struct dso *pos, u16 misc,
- perf_event__handler_t process, struct machine *machine)
+int perf_event__synthesize_build_id(const struct perf_tool *tool,
+ struct perf_sample *sample,
+ struct machine *machine,
+ perf_event__handler_t process,
+ const struct evsel *evsel,
+ __u16 misc,
+ const struct build_id *bid,
+ const char *filename)
{
union perf_event ev;
size_t len;
- if (!dso__hit(pos))
- return 0;
+ len = sizeof(ev.build_id) + strlen(filename) + 1;
+ len = PERF_ALIGN(len, sizeof(u64));
- memset(&ev, 0, sizeof(ev));
+ memset(&ev, 0, len);
- len = dso__long_name_len(pos) + 1;
- len = PERF_ALIGN(len, NAME_ALIGN);
- ev.build_id.size = min(dso__bid(pos)->size, sizeof(dso__bid(pos)->data));
- memcpy(&ev.build_id.build_id, dso__bid(pos)->data, ev.build_id.size);
+ ev.build_id.size = min(bid->size, sizeof(ev.build_id.build_id));
+ memcpy(ev.build_id.build_id, bid->data, ev.build_id.size);
ev.build_id.header.type = PERF_RECORD_HEADER_BUILD_ID;
ev.build_id.header.misc = misc | PERF_RECORD_MISC_BUILD_ID_SIZE;
ev.build_id.pid = machine->pid;
- ev.build_id.header.size = sizeof(ev.build_id) + len;
- memcpy(&ev.build_id.filename, dso__long_name(pos), dso__long_name_len(pos));
+ ev.build_id.header.size = len;
+ strcpy(ev.build_id.filename, filename);
+
+ if (evsel) {
+ void *array = &ev;
+ int ret;
+
+ array += ev.header.size;
+ ret = perf_event__synthesize_id_sample(array, evsel->core.attr.sample_type, sample);
+ if (ret < 0)
+ return ret;
+
+ if (ret & 7) {
+ pr_err("Bad id sample size %d\n", ret);
+ return -EINVAL;
+ }
+
+ ev.header.size += ret;
+ }
+
+ return process(tool, &ev, sample, machine);
+}
+
+int perf_event__synthesize_mmap2_build_id(const struct perf_tool *tool,
+ struct perf_sample *sample,
+ struct machine *machine,
+ perf_event__handler_t process,
+ const struct evsel *evsel,
+ __u16 misc,
+ __u32 pid, __u32 tid,
+ __u64 start, __u64 len, __u64 pgoff,
+ const struct build_id *bid,
+ __u32 prot, __u32 flags,
+ const char *filename)
+{
+ union perf_event ev;
+ size_t ev_len;
+ void *array;
+ int ret;
+
+ ev_len = sizeof(ev.mmap2) - sizeof(ev.mmap2.filename) + strlen(filename) + 1;
+ ev_len = PERF_ALIGN(ev_len, sizeof(u64));
+
+ memset(&ev, 0, ev_len);
+
+ ev.mmap2.header.type = PERF_RECORD_MMAP2;
+ ev.mmap2.header.misc = misc | PERF_RECORD_MISC_MMAP_BUILD_ID;
+ ev.mmap2.header.size = ev_len;
+
+ ev.mmap2.pid = pid;
+ ev.mmap2.tid = tid;
+ ev.mmap2.start = start;
+ ev.mmap2.len = len;
+ ev.mmap2.pgoff = pgoff;
+
+ ev.mmap2.build_id_size = min(bid->size, sizeof(ev.mmap2.build_id));
+ memcpy(ev.mmap2.build_id, bid->data, ev.mmap2.build_id_size);
+
+ ev.mmap2.prot = prot;
+ ev.mmap2.flags = flags;
+
+ memcpy(ev.mmap2.filename, filename, min(strlen(filename), sizeof(ev.mmap.filename)));
+
+ array = &ev;
+ array += ev.header.size;
+ ret = perf_event__synthesize_id_sample(array, evsel->core.attr.sample_type, sample);
+ if (ret < 0)
+ return ret;
+
+ if (ret & 7) {
+ pr_err("Bad id sample size %d\n", ret);
+ return -EINVAL;
+ }
+
+ ev.header.size += ret;
- return process(tool, &ev, NULL, machine);
+ return process(tool, &ev, sample, machine);
}
-int perf_event__synthesize_stat_events(struct perf_stat_config *config, struct perf_tool *tool,
+int perf_event__synthesize_stat_events(struct perf_stat_config *config, const struct perf_tool *tool,
struct evlist *evlist, perf_event__handler_t process, bool attrs)
{
int err;
@@ -2286,7 +2363,7 @@ int perf_event__synthesize_stat_events(struct perf_stat_config *config, struct p
extern const struct perf_header_feature_ops feat_ops[HEADER_LAST_FEATURE];
-int perf_event__synthesize_features(struct perf_tool *tool, struct perf_session *session,
+int perf_event__synthesize_features(const struct perf_tool *tool, struct perf_session *session,
struct evlist *evlist, perf_event__handler_t process)
{
struct perf_header *header = &session->header;
@@ -2349,7 +2426,7 @@ int perf_event__synthesize_features(struct perf_tool *tool, struct perf_session
return ret;
}
-int perf_event__synthesize_for_pipe(struct perf_tool *tool,
+int perf_event__synthesize_for_pipe(const struct perf_tool *tool,
struct perf_session *session,
struct perf_data *data,
perf_event__handler_t process)
diff --git a/tools/perf/util/synthetic-events.h b/tools/perf/util/synthetic-events.h
index 53737d1619a4..b9c936b5cfeb 100644
--- a/tools/perf/util/synthetic-events.h
+++ b/tools/perf/util/synthetic-events.h
@@ -9,6 +9,7 @@
#include <perf/cpumap.h>
struct auxtrace_record;
+struct build_id;
struct dso;
struct evlist;
struct evsel;
@@ -40,45 +41,63 @@ enum perf_record_synth {
int parse_synth_opt(char *str);
-typedef int (*perf_event__handler_t)(struct perf_tool *tool, union perf_event *event,
+typedef int (*perf_event__handler_t)(const struct perf_tool *tool, union perf_event *event,
struct perf_sample *sample, struct machine *machine);
-int perf_event__synthesize_attrs(struct perf_tool *tool, struct evlist *evlist, perf_event__handler_t process);
-int perf_event__synthesize_attr(struct perf_tool *tool, struct perf_event_attr *attr, u32 ids, u64 *id, perf_event__handler_t process);
-int perf_event__synthesize_build_id(struct perf_tool *tool, struct dso *pos, u16 misc, perf_event__handler_t process, struct machine *machine);
-int perf_event__synthesize_cpu_map(struct perf_tool *tool, const struct perf_cpu_map *cpus, perf_event__handler_t process, struct machine *machine);
-int perf_event__synthesize_event_update_cpus(struct perf_tool *tool, struct evsel *evsel, perf_event__handler_t process);
-int perf_event__synthesize_event_update_name(struct perf_tool *tool, struct evsel *evsel, perf_event__handler_t process);
-int perf_event__synthesize_event_update_scale(struct perf_tool *tool, struct evsel *evsel, perf_event__handler_t process);
-int perf_event__synthesize_event_update_unit(struct perf_tool *tool, struct evsel *evsel, perf_event__handler_t process);
-int perf_event__synthesize_extra_attr(struct perf_tool *tool, struct evlist *evsel_list, perf_event__handler_t process, bool is_pipe);
-int perf_event__synthesize_extra_kmaps(struct perf_tool *tool, perf_event__handler_t process, struct machine *machine);
-int perf_event__synthesize_features(struct perf_tool *tool, struct perf_session *session, struct evlist *evlist, perf_event__handler_t process);
-int perf_event__synthesize_id_index(struct perf_tool *tool, perf_event__handler_t process, struct evlist *evlist, struct machine *machine);
-int __perf_event__synthesize_id_index(struct perf_tool *tool, perf_event__handler_t process, struct evlist *evlist, struct machine *machine, size_t from);
+int perf_event__synthesize_attrs(const struct perf_tool *tool, struct evlist *evlist, perf_event__handler_t process);
+int perf_event__synthesize_attr(const struct perf_tool *tool, struct perf_event_attr *attr, u32 ids, u64 *id, perf_event__handler_t process);
+int perf_event__synthesize_build_id(const struct perf_tool *tool,
+ struct perf_sample *sample,
+ struct machine *machine,
+ perf_event__handler_t process,
+ const struct evsel *evsel,
+ __u16 misc,
+ const struct build_id *bid,
+ const char *filename);
+int perf_event__synthesize_mmap2_build_id(const struct perf_tool *tool,
+ struct perf_sample *sample,
+ struct machine *machine,
+ perf_event__handler_t process,
+ const struct evsel *evsel,
+ __u16 misc,
+ __u32 pid, __u32 tid,
+ __u64 start, __u64 len, __u64 pgoff,
+ const struct build_id *bid,
+ __u32 prot, __u32 flags,
+ const char *filename);
+int perf_event__synthesize_cpu_map(const struct perf_tool *tool, const struct perf_cpu_map *cpus, perf_event__handler_t process, struct machine *machine);
+int perf_event__synthesize_event_update_cpus(const struct perf_tool *tool, struct evsel *evsel, perf_event__handler_t process);
+int perf_event__synthesize_event_update_name(const struct perf_tool *tool, struct evsel *evsel, perf_event__handler_t process);
+int perf_event__synthesize_event_update_scale(const struct perf_tool *tool, struct evsel *evsel, perf_event__handler_t process);
+int perf_event__synthesize_event_update_unit(const struct perf_tool *tool, struct evsel *evsel, perf_event__handler_t process);
+int perf_event__synthesize_extra_attr(const struct perf_tool *tool, struct evlist *evsel_list, perf_event__handler_t process, bool is_pipe);
+int perf_event__synthesize_extra_kmaps(const struct perf_tool *tool, perf_event__handler_t process, struct machine *machine);
+int perf_event__synthesize_features(const struct perf_tool *tool, struct perf_session *session, struct evlist *evlist, perf_event__handler_t process);
+int perf_event__synthesize_id_index(const struct perf_tool *tool, perf_event__handler_t process, struct evlist *evlist, struct machine *machine);
+int __perf_event__synthesize_id_index(const struct perf_tool *tool, perf_event__handler_t process, struct evlist *evlist, struct machine *machine, size_t from);
int perf_event__synthesize_id_sample(__u64 *array, u64 type, const struct perf_sample *sample);
-int perf_event__synthesize_kernel_mmap(struct perf_tool *tool, perf_event__handler_t process, struct machine *machine);
-int perf_event__synthesize_mmap_events(struct perf_tool *tool, union perf_event *event, pid_t pid, pid_t tgid, perf_event__handler_t process, struct machine *machine, bool mmap_data);
-int perf_event__synthesize_modules(struct perf_tool *tool, perf_event__handler_t process, struct machine *machine);
-int perf_event__synthesize_namespaces(struct perf_tool *tool, union perf_event *event, pid_t pid, pid_t tgid, perf_event__handler_t process, struct machine *machine);
-int perf_event__synthesize_cgroups(struct perf_tool *tool, perf_event__handler_t process, struct machine *machine);
+int perf_event__synthesize_kernel_mmap(const struct perf_tool *tool, perf_event__handler_t process, struct machine *machine);
+int perf_event__synthesize_mmap_events(const struct perf_tool *tool, union perf_event *event, pid_t pid, pid_t tgid, perf_event__handler_t process, struct machine *machine, bool mmap_data);
+int perf_event__synthesize_modules(const struct perf_tool *tool, perf_event__handler_t process, struct machine *machine);
+int perf_event__synthesize_namespaces(const struct perf_tool *tool, union perf_event *event, pid_t pid, pid_t tgid, perf_event__handler_t process, struct machine *machine);
+int perf_event__synthesize_cgroups(const struct perf_tool *tool, perf_event__handler_t process, struct machine *machine);
int perf_event__synthesize_sample(union perf_event *event, u64 type, u64 read_format, const struct perf_sample *sample);
-int perf_event__synthesize_stat_config(struct perf_tool *tool, struct perf_stat_config *config, perf_event__handler_t process, struct machine *machine);
-int perf_event__synthesize_stat_events(struct perf_stat_config *config, struct perf_tool *tool, struct evlist *evlist, perf_event__handler_t process, bool attrs);
-int perf_event__synthesize_stat_round(struct perf_tool *tool, u64 time, u64 type, perf_event__handler_t process, struct machine *machine);
-int perf_event__synthesize_stat(struct perf_tool *tool, struct perf_cpu cpu, u32 thread, u64 id, struct perf_counts_values *count, perf_event__handler_t process, struct machine *machine);
-int perf_event__synthesize_thread_map2(struct perf_tool *tool, struct perf_thread_map *threads, perf_event__handler_t process, struct machine *machine);
-int perf_event__synthesize_thread_map(struct perf_tool *tool, struct perf_thread_map *threads, perf_event__handler_t process, struct machine *machine, bool needs_mmap, bool mmap_data);
-int perf_event__synthesize_threads(struct perf_tool *tool, perf_event__handler_t process, struct machine *machine, bool needs_mmap, bool mmap_data, unsigned int nr_threads_synthesize);
-int perf_event__synthesize_tracing_data(struct perf_tool *tool, int fd, struct evlist *evlist, perf_event__handler_t process);
-int perf_event__synth_time_conv(const struct perf_event_mmap_page *pc, struct perf_tool *tool, perf_event__handler_t process, struct machine *machine);
-pid_t perf_event__synthesize_comm(struct perf_tool *tool, union perf_event *event, pid_t pid, perf_event__handler_t process, struct machine *machine);
-
-int perf_tool__process_synth_event(struct perf_tool *tool, union perf_event *event, struct machine *machine, perf_event__handler_t process);
+int perf_event__synthesize_stat_config(const struct perf_tool *tool, struct perf_stat_config *config, perf_event__handler_t process, struct machine *machine);
+int perf_event__synthesize_stat_events(struct perf_stat_config *config, const struct perf_tool *tool, struct evlist *evlist, perf_event__handler_t process, bool attrs);
+int perf_event__synthesize_stat_round(const struct perf_tool *tool, u64 time, u64 type, perf_event__handler_t process, struct machine *machine);
+int perf_event__synthesize_stat(const struct perf_tool *tool, struct perf_cpu cpu, u32 thread, u64 id, struct perf_counts_values *count, perf_event__handler_t process, struct machine *machine);
+int perf_event__synthesize_thread_map2(const struct perf_tool *tool, struct perf_thread_map *threads, perf_event__handler_t process, struct machine *machine);
+int perf_event__synthesize_thread_map(const struct perf_tool *tool, struct perf_thread_map *threads, perf_event__handler_t process, struct machine *machine, bool needs_mmap, bool mmap_data);
+int perf_event__synthesize_threads(const struct perf_tool *tool, perf_event__handler_t process, struct machine *machine, bool needs_mmap, bool mmap_data, unsigned int nr_threads_synthesize);
+int perf_event__synthesize_tracing_data(const struct perf_tool *tool, int fd, struct evlist *evlist, perf_event__handler_t process);
+int perf_event__synth_time_conv(const struct perf_event_mmap_page *pc, const struct perf_tool *tool, perf_event__handler_t process, struct machine *machine);
+pid_t perf_event__synthesize_comm(const struct perf_tool *tool, union perf_event *event, pid_t pid, perf_event__handler_t process, struct machine *machine);
+
+int perf_tool__process_synth_event(const struct perf_tool *tool, union perf_event *event, struct machine *machine, perf_event__handler_t process);
size_t perf_event__sample_event_size(const struct perf_sample *sample, u64 type, u64 read_format);
-int __machine__synthesize_threads(struct machine *machine, struct perf_tool *tool,
+int __machine__synthesize_threads(struct machine *machine, const struct perf_tool *tool,
struct target *target, struct perf_thread_map *threads,
perf_event__handler_t process, bool needs_mmap, bool data_mmap,
unsigned int nr_threads_synthesize);
@@ -87,7 +106,7 @@ int machine__synthesize_threads(struct machine *machine, struct target *target,
unsigned int nr_threads_synthesize);
#ifdef HAVE_AUXTRACE_SUPPORT
-int perf_event__synthesize_auxtrace_info(struct auxtrace_record *itr, struct perf_tool *tool,
+int perf_event__synthesize_auxtrace_info(struct auxtrace_record *itr, const struct perf_tool *tool,
struct perf_session *session, perf_event__handler_t process);
#else // HAVE_AUXTRACE_SUPPORT
@@ -96,7 +115,7 @@ int perf_event__synthesize_auxtrace_info(struct auxtrace_record *itr, struct per
static inline int
perf_event__synthesize_auxtrace_info(struct auxtrace_record *itr __maybe_unused,
- struct perf_tool *tool __maybe_unused,
+ const struct perf_tool *tool __maybe_unused,
struct perf_session *session __maybe_unused,
perf_event__handler_t process __maybe_unused)
{
@@ -117,7 +136,7 @@ static inline int perf_event__synthesize_bpf_events(struct perf_session *session
}
#endif // HAVE_LIBBPF_SUPPORT
-int perf_event__synthesize_for_pipe(struct perf_tool *tool,
+int perf_event__synthesize_for_pipe(const struct perf_tool *tool,
struct perf_session *session,
struct perf_data *data,
perf_event__handler_t process);
diff --git a/tools/perf/util/syscalltbl.c b/tools/perf/util/syscalltbl.c
index 0dd26b991b3f..7c15dec6900d 100644
--- a/tools/perf/util/syscalltbl.c
+++ b/tools/perf/util/syscalltbl.c
@@ -18,6 +18,10 @@
#include <asm/syscalls_64.c>
const int syscalltbl_native_max_id = SYSCALLTBL_x86_64_MAX_ID;
static const char *const *syscalltbl_native = syscalltbl_x86_64;
+#elif defined(__i386__)
+#include <asm/syscalls_32.c>
+const int syscalltbl_native_max_id = SYSCALLTBL_x86_MAX_ID;
+static const char *const *syscalltbl_native = syscalltbl_x86;
#elif defined(__s390x__)
#include <asm/syscalls_64.c>
const int syscalltbl_native_max_id = SYSCALLTBL_S390_64_MAX_ID;
diff --git a/tools/perf/util/thread.c b/tools/perf/util/thread.c
index 87c59aa9fe38..0ffdd52d86d7 100644
--- a/tools/perf/util/thread.c
+++ b/tools/perf/util/thread.c
@@ -476,6 +476,7 @@ void thread__free_stitch_list(struct thread *thread)
return;
list_for_each_entry_safe(pos, tmp, &lbr_stitch->lists, node) {
+ map_symbol__exit(&pos->cursor.ms);
list_del_init(&pos->node);
free(pos);
}
@@ -485,6 +486,9 @@ void thread__free_stitch_list(struct thread *thread)
free(pos);
}
+ for (unsigned int i = 0 ; i < lbr_stitch->prev_lbr_cursor_size; i++)
+ map_symbol__exit(&lbr_stitch->prev_lbr_cursor[i].ms);
+
zfree(&lbr_stitch->prev_lbr_cursor);
free(thread__lbr_stitch(thread));
thread__set_lbr_stitch(thread, NULL);
diff --git a/tools/perf/util/thread.h b/tools/perf/util/thread.h
index 8b4a3c69bad1..6cbf6eb2812e 100644
--- a/tools/perf/util/thread.h
+++ b/tools/perf/util/thread.h
@@ -26,6 +26,7 @@ struct lbr_stitch {
struct list_head free_lists;
struct perf_sample prev_sample;
struct callchain_cursor_node *prev_lbr_cursor;
+ unsigned int prev_lbr_cursor_size;
};
DECLARE_RC_STRUCT(thread) {
diff --git a/tools/perf/util/time-utils.c b/tools/perf/util/time-utils.c
index 302443921681..1b91ccd4d523 100644
--- a/tools/perf/util/time-utils.c
+++ b/tools/perf/util/time-utils.c
@@ -20,7 +20,7 @@ int parse_nsec_time(const char *str, u64 *ptime)
u64 time_sec, time_nsec;
char *end;
- time_sec = strtoul(str, &end, 10);
+ time_sec = strtoull(str, &end, 10);
if (*end != '.' && *end != '\0')
return -1;
@@ -38,7 +38,7 @@ int parse_nsec_time(const char *str, u64 *ptime)
for (i = strlen(nsec_buf); i < 9; i++)
nsec_buf[i] = '0';
- time_nsec = strtoul(nsec_buf, &end, 10);
+ time_nsec = strtoull(nsec_buf, &end, 10);
if (*end != '\0')
return -1;
} else
diff --git a/tools/perf/util/tool.c b/tools/perf/util/tool.c
new file mode 100644
index 000000000000..3b7f390f26eb
--- /dev/null
+++ b/tools/perf/util/tool.c
@@ -0,0 +1,294 @@
+// SPDX-License-Identifier: GPL-2.0
+#include "data.h"
+#include "debug.h"
+#include "header.h"
+#include "session.h"
+#include "stat.h"
+#include "tool.h"
+#include "tsc.h"
+#include <sys/mman.h>
+#include <unistd.h>
+
+#ifdef HAVE_ZSTD_SUPPORT
+static int perf_session__process_compressed_event(struct perf_session *session,
+ union perf_event *event, u64 file_offset,
+ const char *file_path)
+{
+ void *src;
+ size_t decomp_size, src_size;
+ u64 decomp_last_rem = 0;
+ size_t mmap_len, decomp_len = session->header.env.comp_mmap_len;
+ struct decomp *decomp, *decomp_last = session->active_decomp->decomp_last;
+
+ if (decomp_last) {
+ decomp_last_rem = decomp_last->size - decomp_last->head;
+ decomp_len += decomp_last_rem;
+ }
+
+ mmap_len = sizeof(struct decomp) + decomp_len;
+ decomp = mmap(NULL, mmap_len, PROT_READ|PROT_WRITE,
+ MAP_ANONYMOUS|MAP_PRIVATE, -1, 0);
+ if (decomp == MAP_FAILED) {
+ pr_err("Couldn't allocate memory for decompression\n");
+ return -1;
+ }
+
+ decomp->file_pos = file_offset;
+ decomp->file_path = file_path;
+ decomp->mmap_len = mmap_len;
+ decomp->head = 0;
+
+ if (decomp_last_rem) {
+ memcpy(decomp->data, &(decomp_last->data[decomp_last->head]), decomp_last_rem);
+ decomp->size = decomp_last_rem;
+ }
+
+ src = (void *)event + sizeof(struct perf_record_compressed);
+ src_size = event->pack.header.size - sizeof(struct perf_record_compressed);
+
+ decomp_size = zstd_decompress_stream(session->active_decomp->zstd_decomp, src, src_size,
+ &(decomp->data[decomp_last_rem]), decomp_len - decomp_last_rem);
+ if (!decomp_size) {
+ munmap(decomp, mmap_len);
+ pr_err("Couldn't decompress data\n");
+ return -1;
+ }
+
+ decomp->size += decomp_size;
+
+ if (session->active_decomp->decomp == NULL)
+ session->active_decomp->decomp = decomp;
+ else
+ session->active_decomp->decomp_last->next = decomp;
+
+ session->active_decomp->decomp_last = decomp;
+
+ pr_debug("decomp (B): %zd to %zd\n", src_size, decomp_size);
+
+ return 0;
+}
+#endif
+
+static int process_event_synth_tracing_data_stub(struct perf_session *session
+ __maybe_unused,
+ union perf_event *event
+ __maybe_unused)
+{
+ dump_printf(": unhandled!\n");
+ return 0;
+}
+
+static int process_event_synth_attr_stub(const struct perf_tool *tool __maybe_unused,
+ union perf_event *event __maybe_unused,
+ struct evlist **pevlist
+ __maybe_unused)
+{
+ dump_printf(": unhandled!\n");
+ return 0;
+}
+
+static int process_event_synth_event_update_stub(const struct perf_tool *tool __maybe_unused,
+ union perf_event *event __maybe_unused,
+ struct evlist **pevlist
+ __maybe_unused)
+{
+ if (dump_trace)
+ perf_event__fprintf_event_update(event, stdout);
+
+ dump_printf(": unhandled!\n");
+ return 0;
+}
+
+int process_event_sample_stub(const struct perf_tool *tool __maybe_unused,
+ union perf_event *event __maybe_unused,
+ struct perf_sample *sample __maybe_unused,
+ struct evsel *evsel __maybe_unused,
+ struct machine *machine __maybe_unused)
+{
+ dump_printf(": unhandled!\n");
+ return 0;
+}
+
+static int process_event_stub(const struct perf_tool *tool __maybe_unused,
+ union perf_event *event __maybe_unused,
+ struct perf_sample *sample __maybe_unused,
+ struct machine *machine __maybe_unused)
+{
+ dump_printf(": unhandled!\n");
+ return 0;
+}
+
+static int process_finished_round_stub(const struct perf_tool *tool __maybe_unused,
+ union perf_event *event __maybe_unused,
+ struct ordered_events *oe __maybe_unused)
+{
+ dump_printf(": unhandled!\n");
+ return 0;
+}
+
+static int skipn(int fd, off_t n)
+{
+ char buf[4096];
+ ssize_t ret;
+
+ while (n > 0) {
+ ret = read(fd, buf, min(n, (off_t)sizeof(buf)));
+ if (ret <= 0)
+ return ret;
+ n -= ret;
+ }
+
+ return 0;
+}
+
+static s64 process_event_auxtrace_stub(struct perf_session *session __maybe_unused,
+ union perf_event *event)
+{
+ dump_printf(": unhandled!\n");
+ if (perf_data__is_pipe(session->data))
+ skipn(perf_data__fd(session->data), event->auxtrace.size);
+ return event->auxtrace.size;
+}
+
+static int process_event_op2_stub(struct perf_session *session __maybe_unused,
+ union perf_event *event __maybe_unused)
+{
+ dump_printf(": unhandled!\n");
+ return 0;
+}
+
+
+static
+int process_event_thread_map_stub(struct perf_session *session __maybe_unused,
+ union perf_event *event __maybe_unused)
+{
+ if (dump_trace)
+ perf_event__fprintf_thread_map(event, stdout);
+
+ dump_printf(": unhandled!\n");
+ return 0;
+}
+
+static
+int process_event_cpu_map_stub(struct perf_session *session __maybe_unused,
+ union perf_event *event __maybe_unused)
+{
+ if (dump_trace)
+ perf_event__fprintf_cpu_map(event, stdout);
+
+ dump_printf(": unhandled!\n");
+ return 0;
+}
+
+static
+int process_event_stat_config_stub(struct perf_session *session __maybe_unused,
+ union perf_event *event __maybe_unused)
+{
+ if (dump_trace)
+ perf_event__fprintf_stat_config(event, stdout);
+
+ dump_printf(": unhandled!\n");
+ return 0;
+}
+
+static int process_stat_stub(struct perf_session *perf_session __maybe_unused,
+ union perf_event *event)
+{
+ if (dump_trace)
+ perf_event__fprintf_stat(event, stdout);
+
+ dump_printf(": unhandled!\n");
+ return 0;
+}
+
+static int process_stat_round_stub(struct perf_session *perf_session __maybe_unused,
+ union perf_event *event)
+{
+ if (dump_trace)
+ perf_event__fprintf_stat_round(event, stdout);
+
+ dump_printf(": unhandled!\n");
+ return 0;
+}
+
+static int process_event_time_conv_stub(struct perf_session *perf_session __maybe_unused,
+ union perf_event *event)
+{
+ if (dump_trace)
+ perf_event__fprintf_time_conv(event, stdout);
+
+ dump_printf(": unhandled!\n");
+ return 0;
+}
+
+static int perf_session__process_compressed_event_stub(struct perf_session *session __maybe_unused,
+ union perf_event *event __maybe_unused,
+ u64 file_offset __maybe_unused,
+ const char *file_path __maybe_unused)
+{
+ dump_printf(": unhandled!\n");
+ return 0;
+}
+
+void perf_tool__init(struct perf_tool *tool, bool ordered_events)
+{
+ tool->ordered_events = ordered_events;
+ tool->ordering_requires_timestamps = false;
+ tool->namespace_events = false;
+ tool->cgroup_events = false;
+ tool->no_warn = false;
+ tool->show_feat_hdr = SHOW_FEAT_NO_HEADER;
+
+ tool->sample = process_event_sample_stub;
+ tool->mmap = process_event_stub;
+ tool->mmap2 = process_event_stub;
+ tool->comm = process_event_stub;
+ tool->namespaces = process_event_stub;
+ tool->cgroup = process_event_stub;
+ tool->fork = process_event_stub;
+ tool->exit = process_event_stub;
+ tool->lost = perf_event__process_lost;
+ tool->lost_samples = perf_event__process_lost_samples;
+ tool->aux = perf_event__process_aux;
+ tool->itrace_start = perf_event__process_itrace_start;
+ tool->context_switch = perf_event__process_switch;
+ tool->ksymbol = perf_event__process_ksymbol;
+ tool->bpf = perf_event__process_bpf;
+ tool->text_poke = perf_event__process_text_poke;
+ tool->aux_output_hw_id = perf_event__process_aux_output_hw_id;
+ tool->read = process_event_sample_stub;
+ tool->throttle = process_event_stub;
+ tool->unthrottle = process_event_stub;
+ tool->attr = process_event_synth_attr_stub;
+ tool->event_update = process_event_synth_event_update_stub;
+ tool->tracing_data = process_event_synth_tracing_data_stub;
+ tool->build_id = process_event_op2_stub;
+
+ if (ordered_events)
+ tool->finished_round = perf_event__process_finished_round;
+ else
+ tool->finished_round = process_finished_round_stub;
+
+ tool->id_index = process_event_op2_stub;
+ tool->auxtrace_info = process_event_op2_stub;
+ tool->auxtrace = process_event_auxtrace_stub;
+ tool->auxtrace_error = process_event_op2_stub;
+ tool->thread_map = process_event_thread_map_stub;
+ tool->cpu_map = process_event_cpu_map_stub;
+ tool->stat_config = process_event_stat_config_stub;
+ tool->stat = process_stat_stub;
+ tool->stat_round = process_stat_round_stub;
+ tool->time_conv = process_event_time_conv_stub;
+ tool->feature = process_event_op2_stub;
+#ifdef HAVE_ZSTD_SUPPORT
+ tool->compressed = perf_session__process_compressed_event;
+#else
+ tool->compressed = perf_session__process_compressed_event_stub;
+#endif
+ tool->finished_init = process_event_op2_stub;
+}
+
+bool perf_tool__compressed_is_stub(const struct perf_tool *tool)
+{
+ return tool->compressed == perf_session__process_compressed_event_stub;
+}
diff --git a/tools/perf/util/tool.h b/tools/perf/util/tool.h
index c957fb849ac6..db1c7642b0d1 100644
--- a/tools/perf/util/tool.h
+++ b/tools/perf/util/tool.h
@@ -15,14 +15,14 @@ struct perf_tool;
struct machine;
struct ordered_events;
-typedef int (*event_sample)(struct perf_tool *tool, union perf_event *event,
+typedef int (*event_sample)(const struct perf_tool *tool, union perf_event *event,
struct perf_sample *sample,
struct evsel *evsel, struct machine *machine);
-typedef int (*event_op)(struct perf_tool *tool, union perf_event *event,
+typedef int (*event_op)(const struct perf_tool *tool, union perf_event *event,
struct perf_sample *sample, struct machine *machine);
-typedef int (*event_attr_op)(struct perf_tool *tool,
+typedef int (*event_attr_op)(const struct perf_tool *tool,
union perf_event *event,
struct evlist **pevlist);
@@ -31,7 +31,7 @@ typedef s64 (*event_op3)(struct perf_session *session, union perf_event *event);
typedef int (*event_op4)(struct perf_session *session, union perf_event *event, u64 data,
const char *str);
-typedef int (*event_oe)(struct perf_tool *tool, union perf_event *event,
+typedef int (*event_oe)(const struct perf_tool *tool, union perf_event *event,
struct ordered_events *oe);
enum show_feature_header {
@@ -85,7 +85,18 @@ struct perf_tool {
bool namespace_events;
bool cgroup_events;
bool no_warn;
+ bool dont_split_sample_group;
enum show_feature_header show_feat_hdr;
};
+void perf_tool__init(struct perf_tool *tool, bool ordered_events);
+
+bool perf_tool__compressed_is_stub(const struct perf_tool *tool);
+
+int process_event_sample_stub(const struct perf_tool *tool,
+ union perf_event *event,
+ struct perf_sample *sample,
+ struct evsel *evsel,
+ struct machine *machine);
+
#endif /* __PERF_TOOL_H */
diff --git a/tools/perf/util/trace_augment.h b/tools/perf/util/trace_augment.h
new file mode 100644
index 000000000000..57a3e5045937
--- /dev/null
+++ b/tools/perf/util/trace_augment.h
@@ -0,0 +1,6 @@
+#ifndef TRACE_AUGMENT_H
+#define TRACE_AUGMENT_H
+
+#define TRACE_AUG_MAX_BUF 32 /* for buffer augmentation in perf trace */
+
+#endif
diff --git a/tools/perf/util/tsc.c b/tools/perf/util/tsc.c
index f19791d46e99..2e33a20e1e1b 100644
--- a/tools/perf/util/tsc.c
+++ b/tools/perf/util/tsc.c
@@ -72,7 +72,7 @@ int perf_read_tsc_conversion(const struct perf_event_mmap_page *pc,
}
int perf_event__synth_time_conv(const struct perf_event_mmap_page *pc,
- struct perf_tool *tool,
+ const struct perf_tool *tool,
perf_event__handler_t process,
struct machine *machine)
{
diff --git a/tools/perf/util/util.c b/tools/perf/util/util.c
index 4f561e5e4162..9d55a13787ce 100644
--- a/tools/perf/util/util.c
+++ b/tools/perf/util/util.c
@@ -325,9 +325,15 @@ int perf_event_paranoid(void)
bool perf_event_paranoid_check(int max_level)
{
- return perf_cap__capable(CAP_SYS_ADMIN) ||
- perf_cap__capable(CAP_PERFMON) ||
- perf_event_paranoid() <= max_level;
+ bool used_root;
+
+ if (perf_cap__capable(CAP_SYS_ADMIN, &used_root))
+ return true;
+
+ if (!used_root && perf_cap__capable(CAP_PERFMON, &used_root))
+ return true;
+
+ return perf_event_paranoid() <= max_level;
}
static int
diff --git a/tools/power/cpupower/bindings/python/.gitignore b/tools/power/cpupower/bindings/python/.gitignore
index 5c9a1f0212dd..51cbb8799c44 100644
--- a/tools/power/cpupower/bindings/python/.gitignore
+++ b/tools/power/cpupower/bindings/python/.gitignore
@@ -1,8 +1,7 @@
+# SPDX-License-Identifier: GPL-2.0-only
__pycache__/
raw_pylibcpupower_wrap.c
*.o
*.so
*.py
!test_raw_pylibcpupower.py
-# git keeps ignoring this file, use git add -f raw_libcpupower.i
-!raw_pylibcpupower.i
diff --git a/tools/power/cpupower/bindings/python/Makefile b/tools/power/cpupower/bindings/python/Makefile
index dc09c5b66ead..e1ebb1d60cd4 100644
--- a/tools/power/cpupower/bindings/python/Makefile
+++ b/tools/power/cpupower/bindings/python/Makefile
@@ -20,13 +20,13 @@ _raw_pylibcpupower.so: raw_pylibcpupower_wrap.o
raw_pylibcpupower_wrap.o: raw_pylibcpupower_wrap.c
$(CC) -fPIC -c raw_pylibcpupower_wrap.c $(PY_INCLUDE)
-raw_pylibcpupower_wrap.c: raw_pylibcpupower.i
+raw_pylibcpupower_wrap.c: raw_pylibcpupower.swg
ifeq ($(HAVE_SWIG),0)
$(error "swig was not found. Make sure you have it installed and in the PATH to generate the bindings.")
else ifeq ($(HAVE_PYCONFIG),0)
$(error "python-config was not found. Make sure you have it installed and in the PATH to generate the bindings.")
endif
- swig -python raw_pylibcpupower.i
+ swig -python raw_pylibcpupower.swg
# Will only clean the bindings folder; will not clean the actual cpupower folder
clean:
diff --git a/tools/power/cpupower/bindings/python/raw_pylibcpupower.i b/tools/power/cpupower/bindings/python/raw_pylibcpupower.swg
index 96556d87a745..96556d87a745 100644
--- a/tools/power/cpupower/bindings/python/raw_pylibcpupower.i
+++ b/tools/power/cpupower/bindings/python/raw_pylibcpupower.swg
diff --git a/tools/sched_ext/.gitignore b/tools/sched_ext/.gitignore
new file mode 100644
index 000000000000..d6264fe1c8cd
--- /dev/null
+++ b/tools/sched_ext/.gitignore
@@ -0,0 +1,2 @@
+tools/
+build/
diff --git a/tools/sched_ext/Makefile b/tools/sched_ext/Makefile
new file mode 100644
index 000000000000..ca3815e572d8
--- /dev/null
+++ b/tools/sched_ext/Makefile
@@ -0,0 +1,246 @@
+# SPDX-License-Identifier: GPL-2.0
+# Copyright (c) 2022 Meta Platforms, Inc. and affiliates.
+include ../build/Build.include
+include ../scripts/Makefile.arch
+include ../scripts/Makefile.include
+
+all: all_targets
+
+ifneq ($(LLVM),)
+ifneq ($(filter %/,$(LLVM)),)
+LLVM_PREFIX := $(LLVM)
+else ifneq ($(filter -%,$(LLVM)),)
+LLVM_SUFFIX := $(LLVM)
+endif
+
+CLANG_TARGET_FLAGS_arm := arm-linux-gnueabi
+CLANG_TARGET_FLAGS_arm64 := aarch64-linux-gnu
+CLANG_TARGET_FLAGS_hexagon := hexagon-linux-musl
+CLANG_TARGET_FLAGS_m68k := m68k-linux-gnu
+CLANG_TARGET_FLAGS_mips := mipsel-linux-gnu
+CLANG_TARGET_FLAGS_powerpc := powerpc64le-linux-gnu
+CLANG_TARGET_FLAGS_riscv := riscv64-linux-gnu
+CLANG_TARGET_FLAGS_s390 := s390x-linux-gnu
+CLANG_TARGET_FLAGS_x86 := x86_64-linux-gnu
+CLANG_TARGET_FLAGS := $(CLANG_TARGET_FLAGS_$(ARCH))
+
+ifeq ($(CROSS_COMPILE),)
+ifeq ($(CLANG_TARGET_FLAGS),)
+$(error Specify CROSS_COMPILE or add '--target=' option to lib.mk)
+else
+CLANG_FLAGS += --target=$(CLANG_TARGET_FLAGS)
+endif # CLANG_TARGET_FLAGS
+else
+CLANG_FLAGS += --target=$(notdir $(CROSS_COMPILE:%-=%))
+endif # CROSS_COMPILE
+
+CC := $(LLVM_PREFIX)clang$(LLVM_SUFFIX) $(CLANG_FLAGS) -fintegrated-as
+else
+CC := $(CROSS_COMPILE)gcc
+endif # LLVM
+
+CURDIR := $(abspath .)
+TOOLSDIR := $(abspath ..)
+LIBDIR := $(TOOLSDIR)/lib
+BPFDIR := $(LIBDIR)/bpf
+TOOLSINCDIR := $(TOOLSDIR)/include
+BPFTOOLDIR := $(TOOLSDIR)/bpf/bpftool
+APIDIR := $(TOOLSINCDIR)/uapi
+GENDIR := $(abspath ../../include/generated)
+GENHDR := $(GENDIR)/autoconf.h
+
+ifeq ($(O),)
+OUTPUT_DIR := $(CURDIR)/build
+else
+OUTPUT_DIR := $(O)/build
+endif # O
+OBJ_DIR := $(OUTPUT_DIR)/obj
+INCLUDE_DIR := $(OUTPUT_DIR)/include
+BPFOBJ_DIR := $(OBJ_DIR)/libbpf
+SCXOBJ_DIR := $(OBJ_DIR)/sched_ext
+BINDIR := $(OUTPUT_DIR)/bin
+BPFOBJ := $(BPFOBJ_DIR)/libbpf.a
+ifneq ($(CROSS_COMPILE),)
+HOST_BUILD_DIR := $(OBJ_DIR)/host
+HOST_OUTPUT_DIR := host-tools
+HOST_INCLUDE_DIR := $(HOST_OUTPUT_DIR)/include
+else
+HOST_BUILD_DIR := $(OBJ_DIR)
+HOST_OUTPUT_DIR := $(OUTPUT_DIR)
+HOST_INCLUDE_DIR := $(INCLUDE_DIR)
+endif
+HOST_BPFOBJ := $(HOST_BUILD_DIR)/libbpf/libbpf.a
+RESOLVE_BTFIDS := $(HOST_BUILD_DIR)/resolve_btfids/resolve_btfids
+DEFAULT_BPFTOOL := $(HOST_OUTPUT_DIR)/sbin/bpftool
+
+VMLINUX_BTF_PATHS ?= $(if $(O),$(O)/vmlinux) \
+ $(if $(KBUILD_OUTPUT),$(KBUILD_OUTPUT)/vmlinux) \
+ ../../vmlinux \
+ /sys/kernel/btf/vmlinux \
+ /boot/vmlinux-$(shell uname -r)
+VMLINUX_BTF ?= $(abspath $(firstword $(wildcard $(VMLINUX_BTF_PATHS))))
+ifeq ($(VMLINUX_BTF),)
+$(error Cannot find a vmlinux for VMLINUX_BTF at any of "$(VMLINUX_BTF_PATHS)")
+endif
+
+BPFTOOL ?= $(DEFAULT_BPFTOOL)
+
+ifneq ($(wildcard $(GENHDR)),)
+ GENFLAGS := -DHAVE_GENHDR
+endif
+
+CFLAGS += -g -O2 -rdynamic -pthread -Wall -Werror $(GENFLAGS) \
+ -I$(INCLUDE_DIR) -I$(GENDIR) -I$(LIBDIR) \
+ -I$(TOOLSINCDIR) -I$(APIDIR) -I$(CURDIR)/include
+
+# Silence some warnings when compiled with clang
+ifneq ($(LLVM),)
+CFLAGS += -Wno-unused-command-line-argument
+endif
+
+LDFLAGS = -lelf -lz -lpthread
+
+IS_LITTLE_ENDIAN = $(shell $(CC) -dM -E - </dev/null | \
+ grep 'define __BYTE_ORDER__ __ORDER_LITTLE_ENDIAN__')
+
+# Get Clang's default includes on this system, as opposed to those seen by
+# '-target bpf'. This fixes "missing" files on some architectures/distros,
+# such as asm/byteorder.h, asm/socket.h, asm/sockios.h, sys/cdefs.h etc.
+#
+# Use '-idirafter': Don't interfere with include mechanics except where the
+# build would have failed anyways.
+define get_sys_includes
+$(shell $(1) -v -E - </dev/null 2>&1 \
+ | sed -n '/<...> search starts here:/,/End of search list./{ s| \(/.*\)|-idirafter \1|p }') \
+$(shell $(1) -dM -E - </dev/null | grep '__riscv_xlen ' | awk '{printf("-D__riscv_xlen=%d -D__BITS_PER_LONG=%d", $$3, $$3)}')
+endef
+
+BPF_CFLAGS = -g -D__TARGET_ARCH_$(SRCARCH) \
+ $(if $(IS_LITTLE_ENDIAN),-mlittle-endian,-mbig-endian) \
+ -I$(CURDIR)/include -I$(CURDIR)/include/bpf-compat \
+ -I$(INCLUDE_DIR) -I$(APIDIR) \
+ -I../../include \
+ $(call get_sys_includes,$(CLANG)) \
+ -Wall -Wno-compare-distinct-pointer-types \
+ -O2 -mcpu=v3
+
+# sort removes libbpf duplicates when not cross-building
+MAKE_DIRS := $(sort $(OBJ_DIR)/libbpf $(HOST_BUILD_DIR)/libbpf \
+ $(HOST_BUILD_DIR)/bpftool $(HOST_BUILD_DIR)/resolve_btfids \
+ $(INCLUDE_DIR) $(SCXOBJ_DIR) $(BINDIR))
+
+$(MAKE_DIRS):
+ $(call msg,MKDIR,,$@)
+ $(Q)mkdir -p $@
+
+$(BPFOBJ): $(wildcard $(BPFDIR)/*.[ch] $(BPFDIR)/Makefile) \
+ $(APIDIR)/linux/bpf.h \
+ | $(OBJ_DIR)/libbpf
+ $(Q)$(MAKE) $(submake_extras) -C $(BPFDIR) OUTPUT=$(OBJ_DIR)/libbpf/ \
+ EXTRA_CFLAGS='-g -O0 -fPIC' \
+ DESTDIR=$(OUTPUT_DIR) prefix= all install_headers
+
+$(DEFAULT_BPFTOOL): $(wildcard $(BPFTOOLDIR)/*.[ch] $(BPFTOOLDIR)/Makefile) \
+ $(HOST_BPFOBJ) | $(HOST_BUILD_DIR)/bpftool
+ $(Q)$(MAKE) $(submake_extras) -C $(BPFTOOLDIR) \
+ ARCH= CROSS_COMPILE= CC=$(HOSTCC) LD=$(HOSTLD) \
+ EXTRA_CFLAGS='-g -O0' \
+ OUTPUT=$(HOST_BUILD_DIR)/bpftool/ \
+ LIBBPF_OUTPUT=$(HOST_BUILD_DIR)/libbpf/ \
+ LIBBPF_DESTDIR=$(HOST_OUTPUT_DIR)/ \
+ prefix= DESTDIR=$(HOST_OUTPUT_DIR)/ install-bin
+
+$(INCLUDE_DIR)/vmlinux.h: $(VMLINUX_BTF) $(BPFTOOL) | $(INCLUDE_DIR)
+ifeq ($(VMLINUX_H),)
+ $(call msg,GEN,,$@)
+ $(Q)$(BPFTOOL) btf dump file $(VMLINUX_BTF) format c > $@
+else
+ $(call msg,CP,,$@)
+ $(Q)cp "$(VMLINUX_H)" $@
+endif
+
+$(SCXOBJ_DIR)/%.bpf.o: %.bpf.c $(INCLUDE_DIR)/vmlinux.h include/scx/*.h \
+ | $(BPFOBJ) $(SCXOBJ_DIR)
+ $(call msg,CLNG-BPF,,$(notdir $@))
+ $(Q)$(CLANG) $(BPF_CFLAGS) -target bpf -c $< -o $@
+
+$(INCLUDE_DIR)/%.bpf.skel.h: $(SCXOBJ_DIR)/%.bpf.o $(INCLUDE_DIR)/vmlinux.h $(BPFTOOL)
+ $(eval sched=$(notdir $@))
+ $(call msg,GEN-SKEL,,$(sched))
+ $(Q)$(BPFTOOL) gen object $(<:.o=.linked1.o) $<
+ $(Q)$(BPFTOOL) gen object $(<:.o=.linked2.o) $(<:.o=.linked1.o)
+ $(Q)$(BPFTOOL) gen object $(<:.o=.linked3.o) $(<:.o=.linked2.o)
+ $(Q)diff $(<:.o=.linked2.o) $(<:.o=.linked3.o)
+ $(Q)$(BPFTOOL) gen skeleton $(<:.o=.linked3.o) name $(subst .bpf.skel.h,,$(sched)) > $@
+ $(Q)$(BPFTOOL) gen subskeleton $(<:.o=.linked3.o) name $(subst .bpf.skel.h,,$(sched)) > $(@:.skel.h=.subskel.h)
+
+SCX_COMMON_DEPS := include/scx/common.h include/scx/user_exit_info.h | $(BINDIR)
+
+c-sched-targets = scx_simple scx_qmap scx_central scx_flatcg
+
+$(addprefix $(BINDIR)/,$(c-sched-targets)): \
+ $(BINDIR)/%: \
+ $(filter-out %.bpf.c,%.c) \
+ $(INCLUDE_DIR)/%.bpf.skel.h \
+ $(SCX_COMMON_DEPS)
+ $(eval sched=$(notdir $@))
+ $(CC) $(CFLAGS) -c $(sched).c -o $(SCXOBJ_DIR)/$(sched).o
+ $(CC) -o $@ $(SCXOBJ_DIR)/$(sched).o $(HOST_BPFOBJ) $(LDFLAGS)
+
+$(c-sched-targets): %: $(BINDIR)/%
+
+install: all
+ $(Q)mkdir -p $(DESTDIR)/usr/local/bin/
+ $(Q)cp $(BINDIR)/* $(DESTDIR)/usr/local/bin/
+
+clean:
+ rm -rf $(OUTPUT_DIR) $(HOST_OUTPUT_DIR)
+ rm -f *.o *.bpf.o *.bpf.skel.h *.bpf.subskel.h
+ rm -f $(c-sched-targets)
+
+help:
+ @echo 'Building targets'
+ @echo '================'
+ @echo ''
+ @echo ' all - Compile all schedulers'
+ @echo ''
+ @echo 'Alternatively, you may compile individual schedulers:'
+ @echo ''
+ @printf ' %s\n' $(c-sched-targets)
+ @echo ''
+ @echo 'For any scheduler build target, you may specify an alternative'
+ @echo 'build output path with the O= environment variable. For example:'
+ @echo ''
+ @echo ' O=/tmp/sched_ext make all'
+ @echo ''
+ @echo 'will compile all schedulers, and emit the build artifacts to'
+ @echo '/tmp/sched_ext/build.'
+ @echo ''
+ @echo ''
+ @echo 'Installing targets'
+ @echo '=================='
+ @echo ''
+ @echo ' install - Compile and install all schedulers to /usr/bin.'
+ @echo ' You may specify the DESTDIR= environment variable'
+ @echo ' to indicate a prefix for /usr/bin. For example:'
+ @echo ''
+ @echo ' DESTDIR=/tmp/sched_ext make install'
+ @echo ''
+ @echo ' will build the schedulers in CWD/build, and'
+ @echo ' install the schedulers to /tmp/sched_ext/usr/bin.'
+ @echo ''
+ @echo ''
+ @echo 'Cleaning targets'
+ @echo '================'
+ @echo ''
+ @echo ' clean - Remove all generated files'
+
+all_targets: $(c-sched-targets)
+
+.PHONY: all all_targets $(c-sched-targets) clean help
+
+# delete failed targets
+.DELETE_ON_ERROR:
+
+# keep intermediate (.bpf.skel.h, .bpf.o, etc) targets
+.SECONDARY:
diff --git a/tools/sched_ext/README.md b/tools/sched_ext/README.md
new file mode 100644
index 000000000000..16a42e4060f6
--- /dev/null
+++ b/tools/sched_ext/README.md
@@ -0,0 +1,270 @@
+SCHED_EXT EXAMPLE SCHEDULERS
+============================
+
+# Introduction
+
+This directory contains a number of example sched_ext schedulers. These
+schedulers are meant to provide examples of different types of schedulers
+that can be built using sched_ext, and illustrate how various features of
+sched_ext can be used.
+
+Some of the examples are performant, production-ready schedulers. That is, for
+the correct workload and with the correct tuning, they may be deployed in a
+production environment with acceptable or possibly even improved performance.
+Others are just examples that in practice, would not provide acceptable
+performance (though they could be improved to get there).
+
+This README will describe these example schedulers, including describing the
+types of workloads or scenarios they're designed to accommodate, and whether or
+not they're production ready. For more details on any of these schedulers,
+please see the header comment in their .bpf.c file.
+
+
+# Compiling the examples
+
+There are a few toolchain dependencies for compiling the example schedulers.
+
+## Toolchain dependencies
+
+1. clang >= 16.0.0
+
+The schedulers are BPF programs, and therefore must be compiled with clang. gcc
+is actively working on adding a BPF backend compiler as well, but are still
+missing some features such as BTF type tags which are necessary for using
+kptrs.
+
+2. pahole >= 1.25
+
+You may need pahole in order to generate BTF from DWARF.
+
+3. rust >= 1.70.0
+
+Rust schedulers uses features present in the rust toolchain >= 1.70.0. You
+should be able to use the stable build from rustup, but if that doesn't
+work, try using the rustup nightly build.
+
+There are other requirements as well, such as make, but these are the main /
+non-trivial ones.
+
+## Compiling the kernel
+
+In order to run a sched_ext scheduler, you'll have to run a kernel compiled
+with the patches in this repository, and with a minimum set of necessary
+Kconfig options:
+
+```
+CONFIG_BPF=y
+CONFIG_SCHED_CLASS_EXT=y
+CONFIG_BPF_SYSCALL=y
+CONFIG_BPF_JIT=y
+CONFIG_DEBUG_INFO_BTF=y
+```
+
+It's also recommended that you also include the following Kconfig options:
+
+```
+CONFIG_BPF_JIT_ALWAYS_ON=y
+CONFIG_BPF_JIT_DEFAULT_ON=y
+CONFIG_PAHOLE_HAS_SPLIT_BTF=y
+CONFIG_PAHOLE_HAS_BTF_TAG=y
+```
+
+There is a `Kconfig` file in this directory whose contents you can append to
+your local `.config` file, as long as there are no conflicts with any existing
+options in the file.
+
+## Getting a vmlinux.h file
+
+You may notice that most of the example schedulers include a "vmlinux.h" file.
+This is a large, auto-generated header file that contains all of the types
+defined in some vmlinux binary that was compiled with
+[BTF](https://docs.kernel.org/bpf/btf.html) (i.e. with the BTF-related Kconfig
+options specified above).
+
+The header file is created using `bpftool`, by passing it a vmlinux binary
+compiled with BTF as follows:
+
+```bash
+$ bpftool btf dump file /path/to/vmlinux format c > vmlinux.h
+```
+
+`bpftool` analyzes all of the BTF encodings in the binary, and produces a
+header file that can be included by BPF programs to access those types. For
+example, using vmlinux.h allows a scheduler to access fields defined directly
+in vmlinux as follows:
+
+```c
+#include "vmlinux.h"
+// vmlinux.h is also implicitly included by scx_common.bpf.h.
+#include "scx_common.bpf.h"
+
+/*
+ * vmlinux.h provides definitions for struct task_struct and
+ * struct scx_enable_args.
+ */
+void BPF_STRUCT_OPS(example_enable, struct task_struct *p,
+ struct scx_enable_args *args)
+{
+ bpf_printk("Task %s enabled in example scheduler", p->comm);
+}
+
+// vmlinux.h provides the definition for struct sched_ext_ops.
+SEC(".struct_ops.link")
+struct sched_ext_ops example_ops {
+ .enable = (void *)example_enable,
+ .name = "example",
+}
+```
+
+The scheduler build system will generate this vmlinux.h file as part of the
+scheduler build pipeline. It looks for a vmlinux file in the following
+dependency order:
+
+1. If the O= environment variable is defined, at `$O/vmlinux`
+2. If the KBUILD_OUTPUT= environment variable is defined, at
+ `$KBUILD_OUTPUT/vmlinux`
+3. At `../../vmlinux` (i.e. at the root of the kernel tree where you're
+ compiling the schedulers)
+3. `/sys/kernel/btf/vmlinux`
+4. `/boot/vmlinux-$(uname -r)`
+
+In other words, if you have compiled a kernel in your local repo, its vmlinux
+file will be used to generate vmlinux.h. Otherwise, it will be the vmlinux of
+the kernel you're currently running on. This means that if you're running on a
+kernel with sched_ext support, you may not need to compile a local kernel at
+all.
+
+### Aside on CO-RE
+
+One of the cooler features of BPF is that it supports
+[CO-RE](https://nakryiko.com/posts/bpf-core-reference-guide/) (Compile Once Run
+Everywhere). This feature allows you to reference fields inside of structs with
+types defined internal to the kernel, and not have to recompile if you load the
+BPF program on a different kernel with the field at a different offset. In our
+example above, we print out a task name with `p->comm`. CO-RE would perform
+relocations for that access when the program is loaded to ensure that it's
+referencing the correct offset for the currently running kernel.
+
+## Compiling the schedulers
+
+Once you have your toolchain setup, and a vmlinux that can be used to generate
+a full vmlinux.h file, you can compile the schedulers using `make`:
+
+```bash
+$ make -j($nproc)
+```
+
+# Example schedulers
+
+This directory contains the following example schedulers. These schedulers are
+for testing and demonstrating different aspects of sched_ext. While some may be
+useful in limited scenarios, they are not intended to be practical.
+
+For more scheduler implementations, tools and documentation, visit
+https://github.com/sched-ext/scx.
+
+## scx_simple
+
+A simple scheduler that provides an example of a minimal sched_ext scheduler.
+scx_simple can be run in either global weighted vtime mode, or FIFO mode.
+
+Though very simple, in limited scenarios, this scheduler can perform reasonably
+well on single-socket systems with a unified L3 cache.
+
+## scx_qmap
+
+Another simple, yet slightly more complex scheduler that provides an example of
+a basic weighted FIFO queuing policy. It also provides examples of some common
+useful BPF features, such as sleepable per-task storage allocation in the
+`ops.prep_enable()` callback, and using the `BPF_MAP_TYPE_QUEUE` map type to
+enqueue tasks. It also illustrates how core-sched support could be implemented.
+
+## scx_central
+
+A "central" scheduler where scheduling decisions are made from a single CPU.
+This scheduler illustrates how scheduling decisions can be dispatched from a
+single CPU, allowing other cores to run with infinite slices, without timer
+ticks, and without having to incur the overhead of making scheduling decisions.
+
+The approach demonstrated by this scheduler may be useful for any workload that
+benefits from minimizing scheduling overhead and timer ticks. An example of
+where this could be particularly useful is running VMs, where running with
+infinite slices and no timer ticks allows the VM to avoid unnecessary expensive
+vmexits.
+
+## scx_flatcg
+
+A flattened cgroup hierarchy scheduler. This scheduler implements hierarchical
+weight-based cgroup CPU control by flattening the cgroup hierarchy into a single
+layer, by compounding the active weight share at each level. The effect of this
+is a much more performant CPU controller, which does not need to descend down
+cgroup trees in order to properly compute a cgroup's share.
+
+Similar to scx_simple, in limited scenarios, this scheduler can perform
+reasonably well on single socket-socket systems with a unified L3 cache and show
+significantly lowered hierarchical scheduling overhead.
+
+
+# Troubleshooting
+
+There are a number of common issues that you may run into when building the
+schedulers. We'll go over some of the common ones here.
+
+## Build Failures
+
+### Old version of clang
+
+```
+error: static assertion failed due to requirement 'SCX_DSQ_FLAG_BUILTIN': bpftool generated vmlinux.h is missing high bits for 64bit enums, upgrade clang and pahole
+ _Static_assert(SCX_DSQ_FLAG_BUILTIN,
+ ^~~~~~~~~~~~~~~~~~~~
+1 error generated.
+```
+
+This means you built the kernel or the schedulers with an older version of
+clang than what's supported (i.e. older than 16.0.0). To remediate this:
+
+1. `which clang` to make sure you're using a sufficiently new version of clang.
+
+2. `make fullclean` in the root path of the repository, and rebuild the kernel
+ and schedulers.
+
+3. Rebuild the kernel, and then your example schedulers.
+
+The schedulers are also cleaned if you invoke `make mrproper` in the root
+directory of the tree.
+
+### Stale kernel build / incomplete vmlinux.h file
+
+As described above, you'll need a `vmlinux.h` file that was generated from a
+vmlinux built with BTF, and with sched_ext support enabled. If you don't,
+you'll see errors such as the following which indicate that a type being
+referenced in a scheduler is unknown:
+
+```
+/path/to/sched_ext/tools/sched_ext/user_exit_info.h:25:23: note: forward declaration of 'struct scx_exit_info'
+
+const struct scx_exit_info *ei)
+
+^
+```
+
+In order to resolve this, please follow the steps above in
+[Getting a vmlinux.h file](#getting-a-vmlinuxh-file) in order to ensure your
+schedulers are using a vmlinux.h file that includes the requisite types.
+
+## Misc
+
+### llvm: [OFF]
+
+You may see the following output when building the schedulers:
+
+```
+Auto-detecting system features:
+... clang-bpf-co-re: [ on ]
+... llvm: [ OFF ]
+... libcap: [ on ]
+... libbfd: [ on ]
+```
+
+Seeing `llvm: [ OFF ]` here is not an issue. You can safely ignore.
diff --git a/tools/sched_ext/include/bpf-compat/gnu/stubs.h b/tools/sched_ext/include/bpf-compat/gnu/stubs.h
new file mode 100644
index 000000000000..ad7d139ce907
--- /dev/null
+++ b/tools/sched_ext/include/bpf-compat/gnu/stubs.h
@@ -0,0 +1,11 @@
+/*
+ * Dummy gnu/stubs.h. clang can end up including /usr/include/gnu/stubs.h when
+ * compiling BPF files although its content doesn't play any role. The file in
+ * turn includes stubs-64.h or stubs-32.h depending on whether __x86_64__ is
+ * defined. When compiling a BPF source, __x86_64__ isn't set and thus
+ * stubs-32.h is selected. However, the file is not there if the system doesn't
+ * have 32bit glibc devel package installed leading to a build failure.
+ *
+ * The problem is worked around by making this file available in the include
+ * search paths before the system one when building BPF.
+ */
diff --git a/tools/sched_ext/include/scx/common.bpf.h b/tools/sched_ext/include/scx/common.bpf.h
new file mode 100644
index 000000000000..225f61f9bfca
--- /dev/null
+++ b/tools/sched_ext/include/scx/common.bpf.h
@@ -0,0 +1,427 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (c) 2022 Meta Platforms, Inc. and affiliates.
+ * Copyright (c) 2022 Tejun Heo <tj@kernel.org>
+ * Copyright (c) 2022 David Vernet <dvernet@meta.com>
+ */
+#ifndef __SCX_COMMON_BPF_H
+#define __SCX_COMMON_BPF_H
+
+#ifdef LSP
+#define __bpf__
+#include "../vmlinux/vmlinux.h"
+#else
+#include "vmlinux.h"
+#endif
+
+#include <bpf/bpf_helpers.h>
+#include <bpf/bpf_tracing.h>
+#include <asm-generic/errno.h>
+#include "user_exit_info.h"
+
+#define PF_WQ_WORKER 0x00000020 /* I'm a workqueue worker */
+#define PF_KTHREAD 0x00200000 /* I am a kernel thread */
+#define PF_EXITING 0x00000004
+#define CLOCK_MONOTONIC 1
+
+/*
+ * Earlier versions of clang/pahole lost upper 32bits in 64bit enums which can
+ * lead to really confusing misbehaviors. Let's trigger a build failure.
+ */
+static inline void ___vmlinux_h_sanity_check___(void)
+{
+ _Static_assert(SCX_DSQ_FLAG_BUILTIN,
+ "bpftool generated vmlinux.h is missing high bits for 64bit enums, upgrade clang and pahole");
+}
+
+s32 scx_bpf_create_dsq(u64 dsq_id, s32 node) __ksym;
+s32 scx_bpf_select_cpu_dfl(struct task_struct *p, s32 prev_cpu, u64 wake_flags, bool *is_idle) __ksym;
+void scx_bpf_dispatch(struct task_struct *p, u64 dsq_id, u64 slice, u64 enq_flags) __ksym;
+void scx_bpf_dispatch_vtime(struct task_struct *p, u64 dsq_id, u64 slice, u64 vtime, u64 enq_flags) __ksym;
+u32 scx_bpf_dispatch_nr_slots(void) __ksym;
+void scx_bpf_dispatch_cancel(void) __ksym;
+bool scx_bpf_consume(u64 dsq_id) __ksym;
+void scx_bpf_dispatch_from_dsq_set_slice(struct bpf_iter_scx_dsq *it__iter, u64 slice) __ksym;
+void scx_bpf_dispatch_from_dsq_set_vtime(struct bpf_iter_scx_dsq *it__iter, u64 vtime) __ksym;
+bool scx_bpf_dispatch_from_dsq(struct bpf_iter_scx_dsq *it__iter, struct task_struct *p, u64 dsq_id, u64 enq_flags) __ksym __weak;
+bool scx_bpf_dispatch_vtime_from_dsq(struct bpf_iter_scx_dsq *it__iter, struct task_struct *p, u64 dsq_id, u64 enq_flags) __ksym __weak;
+u32 scx_bpf_reenqueue_local(void) __ksym;
+void scx_bpf_kick_cpu(s32 cpu, u64 flags) __ksym;
+s32 scx_bpf_dsq_nr_queued(u64 dsq_id) __ksym;
+void scx_bpf_destroy_dsq(u64 dsq_id) __ksym;
+int bpf_iter_scx_dsq_new(struct bpf_iter_scx_dsq *it, u64 dsq_id, u64 flags) __ksym __weak;
+struct task_struct *bpf_iter_scx_dsq_next(struct bpf_iter_scx_dsq *it) __ksym __weak;
+void bpf_iter_scx_dsq_destroy(struct bpf_iter_scx_dsq *it) __ksym __weak;
+void scx_bpf_exit_bstr(s64 exit_code, char *fmt, unsigned long long *data, u32 data__sz) __ksym __weak;
+void scx_bpf_error_bstr(char *fmt, unsigned long long *data, u32 data_len) __ksym;
+void scx_bpf_dump_bstr(char *fmt, unsigned long long *data, u32 data_len) __ksym __weak;
+u32 scx_bpf_cpuperf_cap(s32 cpu) __ksym __weak;
+u32 scx_bpf_cpuperf_cur(s32 cpu) __ksym __weak;
+void scx_bpf_cpuperf_set(s32 cpu, u32 perf) __ksym __weak;
+u32 scx_bpf_nr_cpu_ids(void) __ksym __weak;
+const struct cpumask *scx_bpf_get_possible_cpumask(void) __ksym __weak;
+const struct cpumask *scx_bpf_get_online_cpumask(void) __ksym __weak;
+void scx_bpf_put_cpumask(const struct cpumask *cpumask) __ksym __weak;
+const struct cpumask *scx_bpf_get_idle_cpumask(void) __ksym;
+const struct cpumask *scx_bpf_get_idle_smtmask(void) __ksym;
+void scx_bpf_put_idle_cpumask(const struct cpumask *cpumask) __ksym;
+bool scx_bpf_test_and_clear_cpu_idle(s32 cpu) __ksym;
+s32 scx_bpf_pick_idle_cpu(const cpumask_t *cpus_allowed, u64 flags) __ksym;
+s32 scx_bpf_pick_any_cpu(const cpumask_t *cpus_allowed, u64 flags) __ksym;
+bool scx_bpf_task_running(const struct task_struct *p) __ksym;
+s32 scx_bpf_task_cpu(const struct task_struct *p) __ksym;
+struct rq *scx_bpf_cpu_rq(s32 cpu) __ksym;
+struct cgroup *scx_bpf_task_cgroup(struct task_struct *p) __ksym;
+
+/*
+ * Use the following as @it__iter when calling
+ * scx_bpf_dispatch[_vtime]_from_dsq() from within bpf_for_each() loops.
+ */
+#define BPF_FOR_EACH_ITER (&___it)
+
+static inline __attribute__((format(printf, 1, 2)))
+void ___scx_bpf_bstr_format_checker(const char *fmt, ...) {}
+
+/*
+ * Helper macro for initializing the fmt and variadic argument inputs to both
+ * bstr exit kfuncs. Callers to this function should use ___fmt and ___param to
+ * refer to the initialized list of inputs to the bstr kfunc.
+ */
+#define scx_bpf_bstr_preamble(fmt, args...) \
+ static char ___fmt[] = fmt; \
+ /* \
+ * Note that __param[] must have at least one \
+ * element to keep the verifier happy. \
+ */ \
+ unsigned long long ___param[___bpf_narg(args) ?: 1] = {}; \
+ \
+ _Pragma("GCC diagnostic push") \
+ _Pragma("GCC diagnostic ignored \"-Wint-conversion\"") \
+ ___bpf_fill(___param, args); \
+ _Pragma("GCC diagnostic pop") \
+
+/*
+ * scx_bpf_exit() wraps the scx_bpf_exit_bstr() kfunc with variadic arguments
+ * instead of an array of u64. Using this macro will cause the scheduler to
+ * exit cleanly with the specified exit code being passed to user space.
+ */
+#define scx_bpf_exit(code, fmt, args...) \
+({ \
+ scx_bpf_bstr_preamble(fmt, args) \
+ scx_bpf_exit_bstr(code, ___fmt, ___param, sizeof(___param)); \
+ ___scx_bpf_bstr_format_checker(fmt, ##args); \
+})
+
+/*
+ * scx_bpf_error() wraps the scx_bpf_error_bstr() kfunc with variadic arguments
+ * instead of an array of u64. Invoking this macro will cause the scheduler to
+ * exit in an erroneous state, with diagnostic information being passed to the
+ * user.
+ */
+#define scx_bpf_error(fmt, args...) \
+({ \
+ scx_bpf_bstr_preamble(fmt, args) \
+ scx_bpf_error_bstr(___fmt, ___param, sizeof(___param)); \
+ ___scx_bpf_bstr_format_checker(fmt, ##args); \
+})
+
+/*
+ * scx_bpf_dump() wraps the scx_bpf_dump_bstr() kfunc with variadic arguments
+ * instead of an array of u64. To be used from ops.dump() and friends.
+ */
+#define scx_bpf_dump(fmt, args...) \
+({ \
+ scx_bpf_bstr_preamble(fmt, args) \
+ scx_bpf_dump_bstr(___fmt, ___param, sizeof(___param)); \
+ ___scx_bpf_bstr_format_checker(fmt, ##args); \
+})
+
+#define BPF_STRUCT_OPS(name, args...) \
+SEC("struct_ops/"#name) \
+BPF_PROG(name, ##args)
+
+#define BPF_STRUCT_OPS_SLEEPABLE(name, args...) \
+SEC("struct_ops.s/"#name) \
+BPF_PROG(name, ##args)
+
+/**
+ * RESIZABLE_ARRAY - Generates annotations for an array that may be resized
+ * @elfsec: the data section of the BPF program in which to place the array
+ * @arr: the name of the array
+ *
+ * libbpf has an API for setting map value sizes. Since data sections (i.e.
+ * bss, data, rodata) themselves are maps, a data section can be resized. If
+ * a data section has an array as its last element, the BTF info for that
+ * array will be adjusted so that length of the array is extended to meet the
+ * new length of the data section. This macro annotates an array to have an
+ * element count of one with the assumption that this array can be resized
+ * within the userspace program. It also annotates the section specifier so
+ * this array exists in a custom sub data section which can be resized
+ * independently.
+ *
+ * See RESIZE_ARRAY() for the userspace convenience macro for resizing an
+ * array declared with RESIZABLE_ARRAY().
+ */
+#define RESIZABLE_ARRAY(elfsec, arr) arr[1] SEC("."#elfsec"."#arr)
+
+/**
+ * MEMBER_VPTR - Obtain the verified pointer to a struct or array member
+ * @base: struct or array to index
+ * @member: dereferenced member (e.g. .field, [idx0][idx1], .field[idx0] ...)
+ *
+ * The verifier often gets confused by the instruction sequence the compiler
+ * generates for indexing struct fields or arrays. This macro forces the
+ * compiler to generate a code sequence which first calculates the byte offset,
+ * checks it against the struct or array size and add that byte offset to
+ * generate the pointer to the member to help the verifier.
+ *
+ * Ideally, we want to abort if the calculated offset is out-of-bounds. However,
+ * BPF currently doesn't support abort, so evaluate to %NULL instead. The caller
+ * must check for %NULL and take appropriate action to appease the verifier. To
+ * avoid confusing the verifier, it's best to check for %NULL and dereference
+ * immediately.
+ *
+ * vptr = MEMBER_VPTR(my_array, [i][j]);
+ * if (!vptr)
+ * return error;
+ * *vptr = new_value;
+ *
+ * sizeof(@base) should encompass the memory area to be accessed and thus can't
+ * be a pointer to the area. Use `MEMBER_VPTR(*ptr, .member)` instead of
+ * `MEMBER_VPTR(ptr, ->member)`.
+ */
+#define MEMBER_VPTR(base, member) (typeof((base) member) *) \
+({ \
+ u64 __base = (u64)&(base); \
+ u64 __addr = (u64)&((base) member) - __base; \
+ _Static_assert(sizeof(base) >= sizeof((base) member), \
+ "@base is smaller than @member, is @base a pointer?"); \
+ asm volatile ( \
+ "if %0 <= %[max] goto +2\n" \
+ "%0 = 0\n" \
+ "goto +1\n" \
+ "%0 += %1\n" \
+ : "+r"(__addr) \
+ : "r"(__base), \
+ [max]"i"(sizeof(base) - sizeof((base) member))); \
+ __addr; \
+})
+
+/**
+ * ARRAY_ELEM_PTR - Obtain the verified pointer to an array element
+ * @arr: array to index into
+ * @i: array index
+ * @n: number of elements in array
+ *
+ * Similar to MEMBER_VPTR() but is intended for use with arrays where the
+ * element count needs to be explicit.
+ * It can be used in cases where a global array is defined with an initial
+ * size but is intended to be be resized before loading the BPF program.
+ * Without this version of the macro, MEMBER_VPTR() will use the compile time
+ * size of the array to compute the max, which will result in rejection by
+ * the verifier.
+ */
+#define ARRAY_ELEM_PTR(arr, i, n) (typeof(arr[i]) *) \
+({ \
+ u64 __base = (u64)arr; \
+ u64 __addr = (u64)&(arr[i]) - __base; \
+ asm volatile ( \
+ "if %0 <= %[max] goto +2\n" \
+ "%0 = 0\n" \
+ "goto +1\n" \
+ "%0 += %1\n" \
+ : "+r"(__addr) \
+ : "r"(__base), \
+ [max]"r"(sizeof(arr[0]) * ((n) - 1))); \
+ __addr; \
+})
+
+
+/*
+ * BPF declarations and helpers
+ */
+
+/* list and rbtree */
+#define __contains(name, node) __attribute__((btf_decl_tag("contains:" #name ":" #node)))
+#define private(name) SEC(".data." #name) __hidden __attribute__((aligned(8)))
+
+void *bpf_obj_new_impl(__u64 local_type_id, void *meta) __ksym;
+void bpf_obj_drop_impl(void *kptr, void *meta) __ksym;
+
+#define bpf_obj_new(type) ((type *)bpf_obj_new_impl(bpf_core_type_id_local(type), NULL))
+#define bpf_obj_drop(kptr) bpf_obj_drop_impl(kptr, NULL)
+
+void bpf_list_push_front(struct bpf_list_head *head, struct bpf_list_node *node) __ksym;
+void bpf_list_push_back(struct bpf_list_head *head, struct bpf_list_node *node) __ksym;
+struct bpf_list_node *bpf_list_pop_front(struct bpf_list_head *head) __ksym;
+struct bpf_list_node *bpf_list_pop_back(struct bpf_list_head *head) __ksym;
+struct bpf_rb_node *bpf_rbtree_remove(struct bpf_rb_root *root,
+ struct bpf_rb_node *node) __ksym;
+int bpf_rbtree_add_impl(struct bpf_rb_root *root, struct bpf_rb_node *node,
+ bool (less)(struct bpf_rb_node *a, const struct bpf_rb_node *b),
+ void *meta, __u64 off) __ksym;
+#define bpf_rbtree_add(head, node, less) bpf_rbtree_add_impl(head, node, less, NULL, 0)
+
+struct bpf_rb_node *bpf_rbtree_first(struct bpf_rb_root *root) __ksym;
+
+void *bpf_refcount_acquire_impl(void *kptr, void *meta) __ksym;
+#define bpf_refcount_acquire(kptr) bpf_refcount_acquire_impl(kptr, NULL)
+
+/* task */
+struct task_struct *bpf_task_from_pid(s32 pid) __ksym;
+struct task_struct *bpf_task_acquire(struct task_struct *p) __ksym;
+void bpf_task_release(struct task_struct *p) __ksym;
+
+/* cgroup */
+struct cgroup *bpf_cgroup_ancestor(struct cgroup *cgrp, int level) __ksym;
+void bpf_cgroup_release(struct cgroup *cgrp) __ksym;
+struct cgroup *bpf_cgroup_from_id(u64 cgid) __ksym;
+
+/* css iteration */
+struct bpf_iter_css;
+struct cgroup_subsys_state;
+extern int bpf_iter_css_new(struct bpf_iter_css *it,
+ struct cgroup_subsys_state *start,
+ unsigned int flags) __weak __ksym;
+extern struct cgroup_subsys_state *
+bpf_iter_css_next(struct bpf_iter_css *it) __weak __ksym;
+extern void bpf_iter_css_destroy(struct bpf_iter_css *it) __weak __ksym;
+
+/* cpumask */
+struct bpf_cpumask *bpf_cpumask_create(void) __ksym;
+struct bpf_cpumask *bpf_cpumask_acquire(struct bpf_cpumask *cpumask) __ksym;
+void bpf_cpumask_release(struct bpf_cpumask *cpumask) __ksym;
+u32 bpf_cpumask_first(const struct cpumask *cpumask) __ksym;
+u32 bpf_cpumask_first_zero(const struct cpumask *cpumask) __ksym;
+void bpf_cpumask_set_cpu(u32 cpu, struct bpf_cpumask *cpumask) __ksym;
+void bpf_cpumask_clear_cpu(u32 cpu, struct bpf_cpumask *cpumask) __ksym;
+bool bpf_cpumask_test_cpu(u32 cpu, const struct cpumask *cpumask) __ksym;
+bool bpf_cpumask_test_and_set_cpu(u32 cpu, struct bpf_cpumask *cpumask) __ksym;
+bool bpf_cpumask_test_and_clear_cpu(u32 cpu, struct bpf_cpumask *cpumask) __ksym;
+void bpf_cpumask_setall(struct bpf_cpumask *cpumask) __ksym;
+void bpf_cpumask_clear(struct bpf_cpumask *cpumask) __ksym;
+bool bpf_cpumask_and(struct bpf_cpumask *dst, const struct cpumask *src1,
+ const struct cpumask *src2) __ksym;
+void bpf_cpumask_or(struct bpf_cpumask *dst, const struct cpumask *src1,
+ const struct cpumask *src2) __ksym;
+void bpf_cpumask_xor(struct bpf_cpumask *dst, const struct cpumask *src1,
+ const struct cpumask *src2) __ksym;
+bool bpf_cpumask_equal(const struct cpumask *src1, const struct cpumask *src2) __ksym;
+bool bpf_cpumask_intersects(const struct cpumask *src1, const struct cpumask *src2) __ksym;
+bool bpf_cpumask_subset(const struct cpumask *src1, const struct cpumask *src2) __ksym;
+bool bpf_cpumask_empty(const struct cpumask *cpumask) __ksym;
+bool bpf_cpumask_full(const struct cpumask *cpumask) __ksym;
+void bpf_cpumask_copy(struct bpf_cpumask *dst, const struct cpumask *src) __ksym;
+u32 bpf_cpumask_any_distribute(const struct cpumask *cpumask) __ksym;
+u32 bpf_cpumask_any_and_distribute(const struct cpumask *src1,
+ const struct cpumask *src2) __ksym;
+u32 bpf_cpumask_weight(const struct cpumask *cpumask) __ksym;
+
+/*
+ * Access a cpumask in read-only mode (typically to check bits).
+ */
+const struct cpumask *cast_mask(struct bpf_cpumask *mask)
+{
+ return (const struct cpumask *)mask;
+}
+
+/* rcu */
+void bpf_rcu_read_lock(void) __ksym;
+void bpf_rcu_read_unlock(void) __ksym;
+
+
+/*
+ * Other helpers
+ */
+
+/* useful compiler attributes */
+#define likely(x) __builtin_expect(!!(x), 1)
+#define unlikely(x) __builtin_expect(!!(x), 0)
+#define __maybe_unused __attribute__((__unused__))
+
+/*
+ * READ/WRITE_ONCE() are from kernel (include/asm-generic/rwonce.h). They
+ * prevent compiler from caching, redoing or reordering reads or writes.
+ */
+typedef __u8 __attribute__((__may_alias__)) __u8_alias_t;
+typedef __u16 __attribute__((__may_alias__)) __u16_alias_t;
+typedef __u32 __attribute__((__may_alias__)) __u32_alias_t;
+typedef __u64 __attribute__((__may_alias__)) __u64_alias_t;
+
+static __always_inline void __read_once_size(const volatile void *p, void *res, int size)
+{
+ switch (size) {
+ case 1: *(__u8_alias_t *) res = *(volatile __u8_alias_t *) p; break;
+ case 2: *(__u16_alias_t *) res = *(volatile __u16_alias_t *) p; break;
+ case 4: *(__u32_alias_t *) res = *(volatile __u32_alias_t *) p; break;
+ case 8: *(__u64_alias_t *) res = *(volatile __u64_alias_t *) p; break;
+ default:
+ barrier();
+ __builtin_memcpy((void *)res, (const void *)p, size);
+ barrier();
+ }
+}
+
+static __always_inline void __write_once_size(volatile void *p, void *res, int size)
+{
+ switch (size) {
+ case 1: *(volatile __u8_alias_t *) p = *(__u8_alias_t *) res; break;
+ case 2: *(volatile __u16_alias_t *) p = *(__u16_alias_t *) res; break;
+ case 4: *(volatile __u32_alias_t *) p = *(__u32_alias_t *) res; break;
+ case 8: *(volatile __u64_alias_t *) p = *(__u64_alias_t *) res; break;
+ default:
+ barrier();
+ __builtin_memcpy((void *)p, (const void *)res, size);
+ barrier();
+ }
+}
+
+#define READ_ONCE(x) \
+({ \
+ union { typeof(x) __val; char __c[1]; } __u = \
+ { .__c = { 0 } }; \
+ __read_once_size(&(x), __u.__c, sizeof(x)); \
+ __u.__val; \
+})
+
+#define WRITE_ONCE(x, val) \
+({ \
+ union { typeof(x) __val; char __c[1]; } __u = \
+ { .__val = (val) }; \
+ __write_once_size(&(x), __u.__c, sizeof(x)); \
+ __u.__val; \
+})
+
+/*
+ * log2_u32 - Compute the base 2 logarithm of a 32-bit exponential value.
+ * @v: The value for which we're computing the base 2 logarithm.
+ */
+static inline u32 log2_u32(u32 v)
+{
+ u32 r;
+ u32 shift;
+
+ r = (v > 0xFFFF) << 4; v >>= r;
+ shift = (v > 0xFF) << 3; v >>= shift; r |= shift;
+ shift = (v > 0xF) << 2; v >>= shift; r |= shift;
+ shift = (v > 0x3) << 1; v >>= shift; r |= shift;
+ r |= (v >> 1);
+ return r;
+}
+
+/*
+ * log2_u64 - Compute the base 2 logarithm of a 64-bit exponential value.
+ * @v: The value for which we're computing the base 2 logarithm.
+ */
+static inline u32 log2_u64(u64 v)
+{
+ u32 hi = v >> 32;
+ if (hi)
+ return log2_u32(hi) + 32 + 1;
+ else
+ return log2_u32(v) + 1;
+}
+
+#include "compat.bpf.h"
+
+#endif /* __SCX_COMMON_BPF_H */
diff --git a/tools/sched_ext/include/scx/common.h b/tools/sched_ext/include/scx/common.h
new file mode 100644
index 000000000000..5b0f90152152
--- /dev/null
+++ b/tools/sched_ext/include/scx/common.h
@@ -0,0 +1,75 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (c) 2023 Meta Platforms, Inc. and affiliates.
+ * Copyright (c) 2023 Tejun Heo <tj@kernel.org>
+ * Copyright (c) 2023 David Vernet <dvernet@meta.com>
+ */
+#ifndef __SCHED_EXT_COMMON_H
+#define __SCHED_EXT_COMMON_H
+
+#ifdef __KERNEL__
+#error "Should not be included by BPF programs"
+#endif
+
+#include <stdarg.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <stdint.h>
+#include <errno.h>
+
+typedef uint8_t u8;
+typedef uint16_t u16;
+typedef uint32_t u32;
+typedef uint64_t u64;
+typedef int8_t s8;
+typedef int16_t s16;
+typedef int32_t s32;
+typedef int64_t s64;
+
+#define SCX_BUG(__fmt, ...) \
+ do { \
+ fprintf(stderr, "[SCX_BUG] %s:%d", __FILE__, __LINE__); \
+ if (errno) \
+ fprintf(stderr, " (%s)\n", strerror(errno)); \
+ else \
+ fprintf(stderr, "\n"); \
+ fprintf(stderr, __fmt __VA_OPT__(,) __VA_ARGS__); \
+ fprintf(stderr, "\n"); \
+ \
+ exit(EXIT_FAILURE); \
+ } while (0)
+
+#define SCX_BUG_ON(__cond, __fmt, ...) \
+ do { \
+ if (__cond) \
+ SCX_BUG((__fmt) __VA_OPT__(,) __VA_ARGS__); \
+ } while (0)
+
+/**
+ * RESIZE_ARRAY - Convenience macro for resizing a BPF array
+ * @__skel: the skeleton containing the array
+ * @elfsec: the data section of the BPF program in which the array exists
+ * @arr: the name of the array
+ * @n: the desired array element count
+ *
+ * For BPF arrays declared with RESIZABLE_ARRAY(), this macro performs two
+ * operations. It resizes the map which corresponds to the custom data
+ * section that contains the target array. As a side effect, the BTF info for
+ * the array is adjusted so that the array length is sized to cover the new
+ * data section size. The second operation is reassigning the skeleton pointer
+ * for that custom data section so that it points to the newly memory mapped
+ * region.
+ */
+#define RESIZE_ARRAY(__skel, elfsec, arr, n) \
+ do { \
+ size_t __sz; \
+ bpf_map__set_value_size((__skel)->maps.elfsec##_##arr, \
+ sizeof((__skel)->elfsec##_##arr->arr[0]) * (n)); \
+ (__skel)->elfsec##_##arr = \
+ bpf_map__initial_value((__skel)->maps.elfsec##_##arr, &__sz); \
+ } while (0)
+
+#include "user_exit_info.h"
+#include "compat.h"
+
+#endif /* __SCHED_EXT_COMMON_H */
diff --git a/tools/sched_ext/include/scx/compat.bpf.h b/tools/sched_ext/include/scx/compat.bpf.h
new file mode 100644
index 000000000000..e5afe9efd3f3
--- /dev/null
+++ b/tools/sched_ext/include/scx/compat.bpf.h
@@ -0,0 +1,47 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (c) 2024 Meta Platforms, Inc. and affiliates.
+ * Copyright (c) 2024 Tejun Heo <tj@kernel.org>
+ * Copyright (c) 2024 David Vernet <dvernet@meta.com>
+ */
+#ifndef __SCX_COMPAT_BPF_H
+#define __SCX_COMPAT_BPF_H
+
+#define __COMPAT_ENUM_OR_ZERO(__type, __ent) \
+({ \
+ __type __ret = 0; \
+ if (bpf_core_enum_value_exists(__type, __ent)) \
+ __ret = __ent; \
+ __ret; \
+})
+
+/* v6.12: 819513666966 ("sched_ext: Add cgroup support") */
+#define __COMPAT_scx_bpf_task_cgroup(p) \
+ (bpf_ksym_exists(scx_bpf_task_cgroup) ? \
+ scx_bpf_task_cgroup((p)) : NULL)
+
+/* v6.12: 4c30f5ce4f7a ("sched_ext: Implement scx_bpf_dispatch[_vtime]_from_dsq()") */
+#define __COMPAT_scx_bpf_dispatch_from_dsq_set_slice(it, slice) \
+ (bpf_ksym_exists(scx_bpf_dispatch_from_dsq_set_slice) ? \
+ scx_bpf_dispatch_from_dsq_set_slice((it), (slice)) : (void)0)
+#define __COMPAT_scx_bpf_dispatch_from_dsq_set_vtime(it, vtime) \
+ (bpf_ksym_exists(scx_bpf_dispatch_from_dsq_set_vtime) ? \
+ scx_bpf_dispatch_from_dsq_set_vtime((it), (vtime)) : (void)0)
+#define __COMPAT_scx_bpf_dispatch_from_dsq(it, p, dsq_id, enq_flags) \
+ (bpf_ksym_exists(scx_bpf_dispatch_from_dsq) ? \
+ scx_bpf_dispatch_from_dsq((it), (p), (dsq_id), (enq_flags)) : false)
+#define __COMPAT_scx_bpf_dispatch_vtime_from_dsq(it, p, dsq_id, enq_flags) \
+ (bpf_ksym_exists(scx_bpf_dispatch_vtime_from_dsq) ? \
+ scx_bpf_dispatch_vtime_from_dsq((it), (p), (dsq_id), (enq_flags)) : false)
+
+/*
+ * Define sched_ext_ops. This may be expanded to define multiple variants for
+ * backward compatibility. See compat.h::SCX_OPS_LOAD/ATTACH().
+ */
+#define SCX_OPS_DEFINE(__name, ...) \
+ SEC(".struct_ops.link") \
+ struct sched_ext_ops __name = { \
+ __VA_ARGS__, \
+ };
+
+#endif /* __SCX_COMPAT_BPF_H */
diff --git a/tools/sched_ext/include/scx/compat.h b/tools/sched_ext/include/scx/compat.h
new file mode 100644
index 000000000000..cc56ff9aa252
--- /dev/null
+++ b/tools/sched_ext/include/scx/compat.h
@@ -0,0 +1,186 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (c) 2024 Meta Platforms, Inc. and affiliates.
+ * Copyright (c) 2024 Tejun Heo <tj@kernel.org>
+ * Copyright (c) 2024 David Vernet <dvernet@meta.com>
+ */
+#ifndef __SCX_COMPAT_H
+#define __SCX_COMPAT_H
+
+#include <bpf/btf.h>
+#include <fcntl.h>
+#include <stdlib.h>
+#include <unistd.h>
+
+struct btf *__COMPAT_vmlinux_btf __attribute__((weak));
+
+static inline void __COMPAT_load_vmlinux_btf(void)
+{
+ if (!__COMPAT_vmlinux_btf) {
+ __COMPAT_vmlinux_btf = btf__load_vmlinux_btf();
+ SCX_BUG_ON(!__COMPAT_vmlinux_btf, "btf__load_vmlinux_btf()");
+ }
+}
+
+static inline bool __COMPAT_read_enum(const char *type, const char *name, u64 *v)
+{
+ const struct btf_type *t;
+ const char *n;
+ s32 tid;
+ int i;
+
+ __COMPAT_load_vmlinux_btf();
+
+ tid = btf__find_by_name(__COMPAT_vmlinux_btf, type);
+ if (tid < 0)
+ return false;
+
+ t = btf__type_by_id(__COMPAT_vmlinux_btf, tid);
+ SCX_BUG_ON(!t, "btf__type_by_id(%d)", tid);
+
+ if (btf_is_enum(t)) {
+ struct btf_enum *e = btf_enum(t);
+
+ for (i = 0; i < BTF_INFO_VLEN(t->info); i++) {
+ n = btf__name_by_offset(__COMPAT_vmlinux_btf, e[i].name_off);
+ SCX_BUG_ON(!n, "btf__name_by_offset()");
+ if (!strcmp(n, name)) {
+ *v = e[i].val;
+ return true;
+ }
+ }
+ } else if (btf_is_enum64(t)) {
+ struct btf_enum64 *e = btf_enum64(t);
+
+ for (i = 0; i < BTF_INFO_VLEN(t->info); i++) {
+ n = btf__name_by_offset(__COMPAT_vmlinux_btf, e[i].name_off);
+ SCX_BUG_ON(!n, "btf__name_by_offset()");
+ if (!strcmp(n, name)) {
+ *v = btf_enum64_value(&e[i]);
+ return true;
+ }
+ }
+ }
+
+ return false;
+}
+
+#define __COMPAT_ENUM_OR_ZERO(__type, __ent) \
+({ \
+ u64 __val = 0; \
+ __COMPAT_read_enum(__type, __ent, &__val); \
+ __val; \
+})
+
+static inline bool __COMPAT_has_ksym(const char *ksym)
+{
+ __COMPAT_load_vmlinux_btf();
+ return btf__find_by_name(__COMPAT_vmlinux_btf, ksym) >= 0;
+}
+
+static inline bool __COMPAT_struct_has_field(const char *type, const char *field)
+{
+ const struct btf_type *t;
+ const struct btf_member *m;
+ const char *n;
+ s32 tid;
+ int i;
+
+ __COMPAT_load_vmlinux_btf();
+ tid = btf__find_by_name_kind(__COMPAT_vmlinux_btf, type, BTF_KIND_STRUCT);
+ if (tid < 0)
+ return false;
+
+ t = btf__type_by_id(__COMPAT_vmlinux_btf, tid);
+ SCX_BUG_ON(!t, "btf__type_by_id(%d)", tid);
+
+ m = btf_members(t);
+
+ for (i = 0; i < BTF_INFO_VLEN(t->info); i++) {
+ n = btf__name_by_offset(__COMPAT_vmlinux_btf, m[i].name_off);
+ SCX_BUG_ON(!n, "btf__name_by_offset()");
+ if (!strcmp(n, field))
+ return true;
+ }
+
+ return false;
+}
+
+#define SCX_OPS_SWITCH_PARTIAL \
+ __COMPAT_ENUM_OR_ZERO("scx_ops_flags", "SCX_OPS_SWITCH_PARTIAL")
+
+static inline long scx_hotplug_seq(void)
+{
+ int fd;
+ char buf[32];
+ ssize_t len;
+ long val;
+
+ fd = open("/sys/kernel/sched_ext/hotplug_seq", O_RDONLY);
+ if (fd < 0)
+ return -ENOENT;
+
+ len = read(fd, buf, sizeof(buf) - 1);
+ SCX_BUG_ON(len <= 0, "read failed (%ld)", len);
+ buf[len] = 0;
+ close(fd);
+
+ val = strtoul(buf, NULL, 10);
+ SCX_BUG_ON(val < 0, "invalid num hotplug events: %lu", val);
+
+ return val;
+}
+
+/*
+ * struct sched_ext_ops can change over time. If compat.bpf.h::SCX_OPS_DEFINE()
+ * is used to define ops and compat.h::SCX_OPS_LOAD/ATTACH() are used to load
+ * and attach it, backward compatibility is automatically maintained where
+ * reasonable.
+ *
+ * ec7e3b0463e1 ("implement-ops") in https://github.com/sched-ext/sched_ext is
+ * the current minimum required kernel version.
+ */
+#define SCX_OPS_OPEN(__ops_name, __scx_name) ({ \
+ struct __scx_name *__skel; \
+ \
+ SCX_BUG_ON(!__COMPAT_struct_has_field("sched_ext_ops", "dump"), \
+ "sched_ext_ops.dump() missing, kernel too old?"); \
+ \
+ __skel = __scx_name##__open(); \
+ SCX_BUG_ON(!__skel, "Could not open " #__scx_name); \
+ __skel->struct_ops.__ops_name->hotplug_seq = scx_hotplug_seq(); \
+ __skel; \
+})
+
+#define SCX_OPS_LOAD(__skel, __ops_name, __scx_name, __uei_name) ({ \
+ UEI_SET_SIZE(__skel, __ops_name, __uei_name); \
+ SCX_BUG_ON(__scx_name##__load((__skel)), "Failed to load skel"); \
+})
+
+/*
+ * New versions of bpftool now emit additional link placeholders for BPF maps,
+ * and set up BPF skeleton in such a way that libbpf will auto-attach BPF maps
+ * automatically, assumming libbpf is recent enough (v1.5+). Old libbpf will do
+ * nothing with those links and won't attempt to auto-attach maps.
+ *
+ * To maintain compatibility with older libbpf while avoiding trying to attach
+ * twice, disable the autoattach feature on newer libbpf.
+ */
+#if LIBBPF_MAJOR_VERSION > 1 || \
+ (LIBBPF_MAJOR_VERSION == 1 && LIBBPF_MINOR_VERSION >= 5)
+#define __SCX_OPS_DISABLE_AUTOATTACH(__skel, __ops_name) \
+ bpf_map__set_autoattach((__skel)->maps.__ops_name, false)
+#else
+#define __SCX_OPS_DISABLE_AUTOATTACH(__skel, __ops_name) do {} while (0)
+#endif
+
+#define SCX_OPS_ATTACH(__skel, __ops_name, __scx_name) ({ \
+ struct bpf_link *__link; \
+ __SCX_OPS_DISABLE_AUTOATTACH(__skel, __ops_name); \
+ SCX_BUG_ON(__scx_name##__attach((__skel)), "Failed to attach skel"); \
+ __link = bpf_map__attach_struct_ops((__skel)->maps.__ops_name); \
+ SCX_BUG_ON(!__link, "Failed to attach struct_ops"); \
+ __link; \
+})
+
+#endif /* __SCX_COMPAT_H */
diff --git a/tools/sched_ext/include/scx/user_exit_info.h b/tools/sched_ext/include/scx/user_exit_info.h
new file mode 100644
index 000000000000..8ce2734402e1
--- /dev/null
+++ b/tools/sched_ext/include/scx/user_exit_info.h
@@ -0,0 +1,115 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Define struct user_exit_info which is shared between BPF and userspace parts
+ * to communicate exit status and other information.
+ *
+ * Copyright (c) 2022 Meta Platforms, Inc. and affiliates.
+ * Copyright (c) 2022 Tejun Heo <tj@kernel.org>
+ * Copyright (c) 2022 David Vernet <dvernet@meta.com>
+ */
+#ifndef __USER_EXIT_INFO_H
+#define __USER_EXIT_INFO_H
+
+enum uei_sizes {
+ UEI_REASON_LEN = 128,
+ UEI_MSG_LEN = 1024,
+ UEI_DUMP_DFL_LEN = 32768,
+};
+
+struct user_exit_info {
+ int kind;
+ s64 exit_code;
+ char reason[UEI_REASON_LEN];
+ char msg[UEI_MSG_LEN];
+};
+
+#ifdef __bpf__
+
+#ifdef LSP
+#include "../vmlinux/vmlinux.h"
+#else
+#include "vmlinux.h"
+#endif
+#include <bpf/bpf_core_read.h>
+
+#define UEI_DEFINE(__name) \
+ char RESIZABLE_ARRAY(data, __name##_dump); \
+ const volatile u32 __name##_dump_len; \
+ struct user_exit_info __name SEC(".data")
+
+#define UEI_RECORD(__uei_name, __ei) ({ \
+ bpf_probe_read_kernel_str(__uei_name.reason, \
+ sizeof(__uei_name.reason), (__ei)->reason); \
+ bpf_probe_read_kernel_str(__uei_name.msg, \
+ sizeof(__uei_name.msg), (__ei)->msg); \
+ bpf_probe_read_kernel_str(__uei_name##_dump, \
+ __uei_name##_dump_len, (__ei)->dump); \
+ if (bpf_core_field_exists((__ei)->exit_code)) \
+ __uei_name.exit_code = (__ei)->exit_code; \
+ /* use __sync to force memory barrier */ \
+ __sync_val_compare_and_swap(&__uei_name.kind, __uei_name.kind, \
+ (__ei)->kind); \
+})
+
+#else /* !__bpf__ */
+
+#include <stdio.h>
+#include <stdbool.h>
+
+/* no need to call the following explicitly if SCX_OPS_LOAD() is used */
+#define UEI_SET_SIZE(__skel, __ops_name, __uei_name) ({ \
+ u32 __len = (__skel)->struct_ops.__ops_name->exit_dump_len ?: UEI_DUMP_DFL_LEN; \
+ (__skel)->rodata->__uei_name##_dump_len = __len; \
+ RESIZE_ARRAY((__skel), data, __uei_name##_dump, __len); \
+})
+
+#define UEI_EXITED(__skel, __uei_name) ({ \
+ /* use __sync to force memory barrier */ \
+ __sync_val_compare_and_swap(&(__skel)->data->__uei_name.kind, -1, -1); \
+})
+
+#define UEI_REPORT(__skel, __uei_name) ({ \
+ struct user_exit_info *__uei = &(__skel)->data->__uei_name; \
+ char *__uei_dump = (__skel)->data_##__uei_name##_dump->__uei_name##_dump; \
+ if (__uei_dump[0] != '\0') { \
+ fputs("\nDEBUG DUMP\n", stderr); \
+ fputs("================================================================================\n\n", stderr); \
+ fputs(__uei_dump, stderr); \
+ fputs("\n================================================================================\n\n", stderr); \
+ } \
+ fprintf(stderr, "EXIT: %s", __uei->reason); \
+ if (__uei->msg[0] != '\0') \
+ fprintf(stderr, " (%s)", __uei->msg); \
+ fputs("\n", stderr); \
+ __uei->exit_code; \
+})
+
+/*
+ * We can't import vmlinux.h while compiling user C code. Let's duplicate
+ * scx_exit_code definition.
+ */
+enum scx_exit_code {
+ /* Reasons */
+ SCX_ECODE_RSN_HOTPLUG = 1LLU << 32,
+
+ /* Actions */
+ SCX_ECODE_ACT_RESTART = 1LLU << 48,
+};
+
+enum uei_ecode_mask {
+ UEI_ECODE_USER_MASK = ((1LLU << 32) - 1),
+ UEI_ECODE_SYS_RSN_MASK = ((1LLU << 16) - 1) << 32,
+ UEI_ECODE_SYS_ACT_MASK = ((1LLU << 16) - 1) << 48,
+};
+
+/*
+ * These macro interpret the ecode returned from UEI_REPORT().
+ */
+#define UEI_ECODE_USER(__ecode) ((__ecode) & UEI_ECODE_USER_MASK)
+#define UEI_ECODE_SYS_RSN(__ecode) ((__ecode) & UEI_ECODE_SYS_RSN_MASK)
+#define UEI_ECODE_SYS_ACT(__ecode) ((__ecode) & UEI_ECODE_SYS_ACT_MASK)
+
+#define UEI_ECODE_RESTART(__ecode) (UEI_ECODE_SYS_ACT((__ecode)) == SCX_ECODE_ACT_RESTART)
+
+#endif /* __bpf__ */
+#endif /* __USER_EXIT_INFO_H */
diff --git a/tools/sched_ext/scx_central.bpf.c b/tools/sched_ext/scx_central.bpf.c
new file mode 100644
index 000000000000..8dd8eb73b6b8
--- /dev/null
+++ b/tools/sched_ext/scx_central.bpf.c
@@ -0,0 +1,361 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * A central FIFO sched_ext scheduler which demonstrates the followings:
+ *
+ * a. Making all scheduling decisions from one CPU:
+ *
+ * The central CPU is the only one making scheduling decisions. All other
+ * CPUs kick the central CPU when they run out of tasks to run.
+ *
+ * There is one global BPF queue and the central CPU schedules all CPUs by
+ * dispatching from the global queue to each CPU's local dsq from dispatch().
+ * This isn't the most straightforward. e.g. It'd be easier to bounce
+ * through per-CPU BPF queues. The current design is chosen to maximally
+ * utilize and verify various SCX mechanisms such as LOCAL_ON dispatching.
+ *
+ * b. Tickless operation
+ *
+ * All tasks are dispatched with the infinite slice which allows stopping the
+ * ticks on CONFIG_NO_HZ_FULL kernels running with the proper nohz_full
+ * parameter. The tickless operation can be observed through
+ * /proc/interrupts.
+ *
+ * Periodic switching is enforced by a periodic timer checking all CPUs and
+ * preempting them as necessary. Unfortunately, BPF timer currently doesn't
+ * have a way to pin to a specific CPU, so the periodic timer isn't pinned to
+ * the central CPU.
+ *
+ * c. Preemption
+ *
+ * Kthreads are unconditionally queued to the head of a matching local dsq
+ * and dispatched with SCX_DSQ_PREEMPT. This ensures that a kthread is always
+ * prioritized over user threads, which is required for ensuring forward
+ * progress as e.g. the periodic timer may run on a ksoftirqd and if the
+ * ksoftirqd gets starved by a user thread, there may not be anything else to
+ * vacate that user thread.
+ *
+ * SCX_KICK_PREEMPT is used to trigger scheduling and CPUs to move to the
+ * next tasks.
+ *
+ * This scheduler is designed to maximize usage of various SCX mechanisms. A
+ * more practical implementation would likely put the scheduling loop outside
+ * the central CPU's dispatch() path and add some form of priority mechanism.
+ *
+ * Copyright (c) 2022 Meta Platforms, Inc. and affiliates.
+ * Copyright (c) 2022 Tejun Heo <tj@kernel.org>
+ * Copyright (c) 2022 David Vernet <dvernet@meta.com>
+ */
+#include <scx/common.bpf.h>
+
+char _license[] SEC("license") = "GPL";
+
+enum {
+ FALLBACK_DSQ_ID = 0,
+ MS_TO_NS = 1000LLU * 1000,
+ TIMER_INTERVAL_NS = 1 * MS_TO_NS,
+};
+
+const volatile s32 central_cpu;
+const volatile u32 nr_cpu_ids = 1; /* !0 for veristat, set during init */
+const volatile u64 slice_ns = SCX_SLICE_DFL;
+
+bool timer_pinned = true;
+u64 nr_total, nr_locals, nr_queued, nr_lost_pids;
+u64 nr_timers, nr_dispatches, nr_mismatches, nr_retries;
+u64 nr_overflows;
+
+UEI_DEFINE(uei);
+
+struct {
+ __uint(type, BPF_MAP_TYPE_QUEUE);
+ __uint(max_entries, 4096);
+ __type(value, s32);
+} central_q SEC(".maps");
+
+/* can't use percpu map due to bad lookups */
+bool RESIZABLE_ARRAY(data, cpu_gimme_task);
+u64 RESIZABLE_ARRAY(data, cpu_started_at);
+
+struct central_timer {
+ struct bpf_timer timer;
+};
+
+struct {
+ __uint(type, BPF_MAP_TYPE_ARRAY);
+ __uint(max_entries, 1);
+ __type(key, u32);
+ __type(value, struct central_timer);
+} central_timer SEC(".maps");
+
+static bool vtime_before(u64 a, u64 b)
+{
+ return (s64)(a - b) < 0;
+}
+
+s32 BPF_STRUCT_OPS(central_select_cpu, struct task_struct *p,
+ s32 prev_cpu, u64 wake_flags)
+{
+ /*
+ * Steer wakeups to the central CPU as much as possible to avoid
+ * disturbing other CPUs. It's safe to blindly return the central cpu as
+ * select_cpu() is a hint and if @p can't be on it, the kernel will
+ * automatically pick a fallback CPU.
+ */
+ return central_cpu;
+}
+
+void BPF_STRUCT_OPS(central_enqueue, struct task_struct *p, u64 enq_flags)
+{
+ s32 pid = p->pid;
+
+ __sync_fetch_and_add(&nr_total, 1);
+
+ /*
+ * Push per-cpu kthreads at the head of local dsq's and preempt the
+ * corresponding CPU. This ensures that e.g. ksoftirqd isn't blocked
+ * behind other threads which is necessary for forward progress
+ * guarantee as we depend on the BPF timer which may run from ksoftirqd.
+ */
+ if ((p->flags & PF_KTHREAD) && p->nr_cpus_allowed == 1) {
+ __sync_fetch_and_add(&nr_locals, 1);
+ scx_bpf_dispatch(p, SCX_DSQ_LOCAL, SCX_SLICE_INF,
+ enq_flags | SCX_ENQ_PREEMPT);
+ return;
+ }
+
+ if (bpf_map_push_elem(&central_q, &pid, 0)) {
+ __sync_fetch_and_add(&nr_overflows, 1);
+ scx_bpf_dispatch(p, FALLBACK_DSQ_ID, SCX_SLICE_INF, enq_flags);
+ return;
+ }
+
+ __sync_fetch_and_add(&nr_queued, 1);
+
+ if (!scx_bpf_task_running(p))
+ scx_bpf_kick_cpu(central_cpu, SCX_KICK_PREEMPT);
+}
+
+static bool dispatch_to_cpu(s32 cpu)
+{
+ struct task_struct *p;
+ s32 pid;
+
+ bpf_repeat(BPF_MAX_LOOPS) {
+ if (bpf_map_pop_elem(&central_q, &pid))
+ break;
+
+ __sync_fetch_and_sub(&nr_queued, 1);
+
+ p = bpf_task_from_pid(pid);
+ if (!p) {
+ __sync_fetch_and_add(&nr_lost_pids, 1);
+ continue;
+ }
+
+ /*
+ * If we can't run the task at the top, do the dumb thing and
+ * bounce it to the fallback dsq.
+ */
+ if (!bpf_cpumask_test_cpu(cpu, p->cpus_ptr)) {
+ __sync_fetch_and_add(&nr_mismatches, 1);
+ scx_bpf_dispatch(p, FALLBACK_DSQ_ID, SCX_SLICE_INF, 0);
+ bpf_task_release(p);
+ /*
+ * We might run out of dispatch buffer slots if we continue dispatching
+ * to the fallback DSQ, without dispatching to the local DSQ of the
+ * target CPU. In such a case, break the loop now as will fail the
+ * next dispatch operation.
+ */
+ if (!scx_bpf_dispatch_nr_slots())
+ break;
+ continue;
+ }
+
+ /* dispatch to local and mark that @cpu doesn't need more */
+ scx_bpf_dispatch(p, SCX_DSQ_LOCAL_ON | cpu, SCX_SLICE_INF, 0);
+
+ if (cpu != central_cpu)
+ scx_bpf_kick_cpu(cpu, SCX_KICK_IDLE);
+
+ bpf_task_release(p);
+ return true;
+ }
+
+ return false;
+}
+
+void BPF_STRUCT_OPS(central_dispatch, s32 cpu, struct task_struct *prev)
+{
+ if (cpu == central_cpu) {
+ /* dispatch for all other CPUs first */
+ __sync_fetch_and_add(&nr_dispatches, 1);
+
+ bpf_for(cpu, 0, nr_cpu_ids) {
+ bool *gimme;
+
+ if (!scx_bpf_dispatch_nr_slots())
+ break;
+
+ /* central's gimme is never set */
+ gimme = ARRAY_ELEM_PTR(cpu_gimme_task, cpu, nr_cpu_ids);
+ if (!gimme || !*gimme)
+ continue;
+
+ if (dispatch_to_cpu(cpu))
+ *gimme = false;
+ }
+
+ /*
+ * Retry if we ran out of dispatch buffer slots as we might have
+ * skipped some CPUs and also need to dispatch for self. The ext
+ * core automatically retries if the local dsq is empty but we
+ * can't rely on that as we're dispatching for other CPUs too.
+ * Kick self explicitly to retry.
+ */
+ if (!scx_bpf_dispatch_nr_slots()) {
+ __sync_fetch_and_add(&nr_retries, 1);
+ scx_bpf_kick_cpu(central_cpu, SCX_KICK_PREEMPT);
+ return;
+ }
+
+ /* look for a task to run on the central CPU */
+ if (scx_bpf_consume(FALLBACK_DSQ_ID))
+ return;
+ dispatch_to_cpu(central_cpu);
+ } else {
+ bool *gimme;
+
+ if (scx_bpf_consume(FALLBACK_DSQ_ID))
+ return;
+
+ gimme = ARRAY_ELEM_PTR(cpu_gimme_task, cpu, nr_cpu_ids);
+ if (gimme)
+ *gimme = true;
+
+ /*
+ * Force dispatch on the scheduling CPU so that it finds a task
+ * to run for us.
+ */
+ scx_bpf_kick_cpu(central_cpu, SCX_KICK_PREEMPT);
+ }
+}
+
+void BPF_STRUCT_OPS(central_running, struct task_struct *p)
+{
+ s32 cpu = scx_bpf_task_cpu(p);
+ u64 *started_at = ARRAY_ELEM_PTR(cpu_started_at, cpu, nr_cpu_ids);
+ if (started_at)
+ *started_at = bpf_ktime_get_ns() ?: 1; /* 0 indicates idle */
+}
+
+void BPF_STRUCT_OPS(central_stopping, struct task_struct *p, bool runnable)
+{
+ s32 cpu = scx_bpf_task_cpu(p);
+ u64 *started_at = ARRAY_ELEM_PTR(cpu_started_at, cpu, nr_cpu_ids);
+ if (started_at)
+ *started_at = 0;
+}
+
+static int central_timerfn(void *map, int *key, struct bpf_timer *timer)
+{
+ u64 now = bpf_ktime_get_ns();
+ u64 nr_to_kick = nr_queued;
+ s32 i, curr_cpu;
+
+ curr_cpu = bpf_get_smp_processor_id();
+ if (timer_pinned && (curr_cpu != central_cpu)) {
+ scx_bpf_error("Central timer ran on CPU %d, not central CPU %d",
+ curr_cpu, central_cpu);
+ return 0;
+ }
+
+ bpf_for(i, 0, nr_cpu_ids) {
+ s32 cpu = (nr_timers + i) % nr_cpu_ids;
+ u64 *started_at;
+
+ if (cpu == central_cpu)
+ continue;
+
+ /* kick iff the current one exhausted its slice */
+ started_at = ARRAY_ELEM_PTR(cpu_started_at, cpu, nr_cpu_ids);
+ if (started_at && *started_at &&
+ vtime_before(now, *started_at + slice_ns))
+ continue;
+
+ /* and there's something pending */
+ if (scx_bpf_dsq_nr_queued(FALLBACK_DSQ_ID) ||
+ scx_bpf_dsq_nr_queued(SCX_DSQ_LOCAL_ON | cpu))
+ ;
+ else if (nr_to_kick)
+ nr_to_kick--;
+ else
+ continue;
+
+ scx_bpf_kick_cpu(cpu, SCX_KICK_PREEMPT);
+ }
+
+ bpf_timer_start(timer, TIMER_INTERVAL_NS, BPF_F_TIMER_CPU_PIN);
+ __sync_fetch_and_add(&nr_timers, 1);
+ return 0;
+}
+
+int BPF_STRUCT_OPS_SLEEPABLE(central_init)
+{
+ u32 key = 0;
+ struct bpf_timer *timer;
+ int ret;
+
+ ret = scx_bpf_create_dsq(FALLBACK_DSQ_ID, -1);
+ if (ret)
+ return ret;
+
+ timer = bpf_map_lookup_elem(&central_timer, &key);
+ if (!timer)
+ return -ESRCH;
+
+ if (bpf_get_smp_processor_id() != central_cpu) {
+ scx_bpf_error("init from non-central CPU");
+ return -EINVAL;
+ }
+
+ bpf_timer_init(timer, &central_timer, CLOCK_MONOTONIC);
+ bpf_timer_set_callback(timer, central_timerfn);
+
+ ret = bpf_timer_start(timer, TIMER_INTERVAL_NS, BPF_F_TIMER_CPU_PIN);
+ /*
+ * BPF_F_TIMER_CPU_PIN is pretty new (>=6.7). If we're running in a
+ * kernel which doesn't have it, bpf_timer_start() will return -EINVAL.
+ * Retry without the PIN. This would be the perfect use case for
+ * bpf_core_enum_value_exists() but the enum type doesn't have a name
+ * and can't be used with bpf_core_enum_value_exists(). Oh well...
+ */
+ if (ret == -EINVAL) {
+ timer_pinned = false;
+ ret = bpf_timer_start(timer, TIMER_INTERVAL_NS, 0);
+ }
+ if (ret)
+ scx_bpf_error("bpf_timer_start failed (%d)", ret);
+ return ret;
+}
+
+void BPF_STRUCT_OPS(central_exit, struct scx_exit_info *ei)
+{
+ UEI_RECORD(uei, ei);
+}
+
+SCX_OPS_DEFINE(central_ops,
+ /*
+ * We are offloading all scheduling decisions to the central CPU
+ * and thus being the last task on a given CPU doesn't mean
+ * anything special. Enqueue the last tasks like any other tasks.
+ */
+ .flags = SCX_OPS_ENQ_LAST,
+
+ .select_cpu = (void *)central_select_cpu,
+ .enqueue = (void *)central_enqueue,
+ .dispatch = (void *)central_dispatch,
+ .running = (void *)central_running,
+ .stopping = (void *)central_stopping,
+ .init = (void *)central_init,
+ .exit = (void *)central_exit,
+ .name = "central");
diff --git a/tools/sched_ext/scx_central.c b/tools/sched_ext/scx_central.c
new file mode 100644
index 000000000000..21deea320bd7
--- /dev/null
+++ b/tools/sched_ext/scx_central.c
@@ -0,0 +1,135 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (c) 2022 Meta Platforms, Inc. and affiliates.
+ * Copyright (c) 2022 Tejun Heo <tj@kernel.org>
+ * Copyright (c) 2022 David Vernet <dvernet@meta.com>
+ */
+#define _GNU_SOURCE
+#include <sched.h>
+#include <stdio.h>
+#include <unistd.h>
+#include <inttypes.h>
+#include <signal.h>
+#include <libgen.h>
+#include <bpf/bpf.h>
+#include <scx/common.h>
+#include "scx_central.bpf.skel.h"
+
+const char help_fmt[] =
+"A central FIFO sched_ext scheduler.\n"
+"\n"
+"See the top-level comment in .bpf.c for more details.\n"
+"\n"
+"Usage: %s [-s SLICE_US] [-c CPU]\n"
+"\n"
+" -s SLICE_US Override slice duration\n"
+" -c CPU Override the central CPU (default: 0)\n"
+" -v Print libbpf debug messages\n"
+" -h Display this help and exit\n";
+
+static bool verbose;
+static volatile int exit_req;
+
+static int libbpf_print_fn(enum libbpf_print_level level, const char *format, va_list args)
+{
+ if (level == LIBBPF_DEBUG && !verbose)
+ return 0;
+ return vfprintf(stderr, format, args);
+}
+
+static void sigint_handler(int dummy)
+{
+ exit_req = 1;
+}
+
+int main(int argc, char **argv)
+{
+ struct scx_central *skel;
+ struct bpf_link *link;
+ __u64 seq = 0, ecode;
+ __s32 opt;
+ cpu_set_t *cpuset;
+
+ libbpf_set_print(libbpf_print_fn);
+ signal(SIGINT, sigint_handler);
+ signal(SIGTERM, sigint_handler);
+restart:
+ skel = SCX_OPS_OPEN(central_ops, scx_central);
+
+ skel->rodata->central_cpu = 0;
+ skel->rodata->nr_cpu_ids = libbpf_num_possible_cpus();
+
+ while ((opt = getopt(argc, argv, "s:c:pvh")) != -1) {
+ switch (opt) {
+ case 's':
+ skel->rodata->slice_ns = strtoull(optarg, NULL, 0) * 1000;
+ break;
+ case 'c':
+ skel->rodata->central_cpu = strtoul(optarg, NULL, 0);
+ break;
+ case 'v':
+ verbose = true;
+ break;
+ default:
+ fprintf(stderr, help_fmt, basename(argv[0]));
+ return opt != 'h';
+ }
+ }
+
+ /* Resize arrays so their element count is equal to cpu count. */
+ RESIZE_ARRAY(skel, data, cpu_gimme_task, skel->rodata->nr_cpu_ids);
+ RESIZE_ARRAY(skel, data, cpu_started_at, skel->rodata->nr_cpu_ids);
+
+ SCX_OPS_LOAD(skel, central_ops, scx_central, uei);
+
+ /*
+ * Affinitize the loading thread to the central CPU, as:
+ * - That's where the BPF timer is first invoked in the BPF program.
+ * - We probably don't want this user space component to take up a core
+ * from a task that would benefit from avoiding preemption on one of
+ * the tickless cores.
+ *
+ * Until BPF supports pinning the timer, it's not guaranteed that it
+ * will always be invoked on the central CPU. In practice, this
+ * suffices the majority of the time.
+ */
+ cpuset = CPU_ALLOC(skel->rodata->nr_cpu_ids);
+ SCX_BUG_ON(!cpuset, "Failed to allocate cpuset");
+ CPU_ZERO(cpuset);
+ CPU_SET(skel->rodata->central_cpu, cpuset);
+ SCX_BUG_ON(sched_setaffinity(0, sizeof(cpuset), cpuset),
+ "Failed to affinitize to central CPU %d (max %d)",
+ skel->rodata->central_cpu, skel->rodata->nr_cpu_ids - 1);
+ CPU_FREE(cpuset);
+
+ link = SCX_OPS_ATTACH(skel, central_ops, scx_central);
+
+ if (!skel->data->timer_pinned)
+ printf("WARNING : BPF_F_TIMER_CPU_PIN not available, timer not pinned to central\n");
+
+ while (!exit_req && !UEI_EXITED(skel, uei)) {
+ printf("[SEQ %llu]\n", seq++);
+ printf("total :%10" PRIu64 " local:%10" PRIu64 " queued:%10" PRIu64 " lost:%10" PRIu64 "\n",
+ skel->bss->nr_total,
+ skel->bss->nr_locals,
+ skel->bss->nr_queued,
+ skel->bss->nr_lost_pids);
+ printf("timer :%10" PRIu64 " dispatch:%10" PRIu64 " mismatch:%10" PRIu64 " retry:%10" PRIu64 "\n",
+ skel->bss->nr_timers,
+ skel->bss->nr_dispatches,
+ skel->bss->nr_mismatches,
+ skel->bss->nr_retries);
+ printf("overflow:%10" PRIu64 "\n",
+ skel->bss->nr_overflows);
+ fflush(stdout);
+ sleep(1);
+ }
+
+ bpf_link__destroy(link);
+ ecode = UEI_REPORT(skel, uei);
+ scx_central__destroy(skel);
+
+ if (UEI_ECODE_RESTART(ecode))
+ goto restart;
+ return 0;
+}
diff --git a/tools/sched_ext/scx_flatcg.bpf.c b/tools/sched_ext/scx_flatcg.bpf.c
new file mode 100644
index 000000000000..b722baf6da4b
--- /dev/null
+++ b/tools/sched_ext/scx_flatcg.bpf.c
@@ -0,0 +1,957 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * A demo sched_ext flattened cgroup hierarchy scheduler. It implements
+ * hierarchical weight-based cgroup CPU control by flattening the cgroup
+ * hierarchy into a single layer by compounding the active weight share at each
+ * level. Consider the following hierarchy with weights in parentheses:
+ *
+ * R + A (100) + B (100)
+ * | \ C (100)
+ * \ D (200)
+ *
+ * Ignoring the root and threaded cgroups, only B, C and D can contain tasks.
+ * Let's say all three have runnable tasks. The total share that each of these
+ * three cgroups is entitled to can be calculated by compounding its share at
+ * each level.
+ *
+ * For example, B is competing against C and in that competition its share is
+ * 100/(100+100) == 1/2. At its parent level, A is competing against D and A's
+ * share in that competition is 100/(200+100) == 1/3. B's eventual share in the
+ * system can be calculated by multiplying the two shares, 1/2 * 1/3 == 1/6. C's
+ * eventual shaer is the same at 1/6. D is only competing at the top level and
+ * its share is 200/(100+200) == 2/3.
+ *
+ * So, instead of hierarchically scheduling level-by-level, we can consider it
+ * as B, C and D competing each other with respective share of 1/6, 1/6 and 2/3
+ * and keep updating the eventual shares as the cgroups' runnable states change.
+ *
+ * This flattening of hierarchy can bring a substantial performance gain when
+ * the cgroup hierarchy is nested multiple levels. in a simple benchmark using
+ * wrk[8] on apache serving a CGI script calculating sha1sum of a small file, it
+ * outperforms CFS by ~3% with CPU controller disabled and by ~10% with two
+ * apache instances competing with 2:1 weight ratio nested four level deep.
+ *
+ * However, the gain comes at the cost of not being able to properly handle
+ * thundering herd of cgroups. For example, if many cgroups which are nested
+ * behind a low priority parent cgroup wake up around the same time, they may be
+ * able to consume more CPU cycles than they are entitled to. In many use cases,
+ * this isn't a real concern especially given the performance gain. Also, there
+ * are ways to mitigate the problem further by e.g. introducing an extra
+ * scheduling layer on cgroup delegation boundaries.
+ *
+ * The scheduler first picks the cgroup to run and then schedule the tasks
+ * within by using nested weighted vtime scheduling by default. The
+ * cgroup-internal scheduling can be switched to FIFO with the -f option.
+ */
+#include <scx/common.bpf.h>
+#include "scx_flatcg.h"
+
+/*
+ * Maximum amount of retries to find a valid cgroup.
+ */
+enum {
+ FALLBACK_DSQ = 0,
+ CGROUP_MAX_RETRIES = 1024,
+};
+
+char _license[] SEC("license") = "GPL";
+
+const volatile u32 nr_cpus = 32; /* !0 for veristat, set during init */
+const volatile u64 cgrp_slice_ns = SCX_SLICE_DFL;
+const volatile bool fifo_sched;
+
+u64 cvtime_now;
+UEI_DEFINE(uei);
+
+struct {
+ __uint(type, BPF_MAP_TYPE_PERCPU_ARRAY);
+ __type(key, u32);
+ __type(value, u64);
+ __uint(max_entries, FCG_NR_STATS);
+} stats SEC(".maps");
+
+static void stat_inc(enum fcg_stat_idx idx)
+{
+ u32 idx_v = idx;
+
+ u64 *cnt_p = bpf_map_lookup_elem(&stats, &idx_v);
+ if (cnt_p)
+ (*cnt_p)++;
+}
+
+struct fcg_cpu_ctx {
+ u64 cur_cgid;
+ u64 cur_at;
+};
+
+struct {
+ __uint(type, BPF_MAP_TYPE_PERCPU_ARRAY);
+ __type(key, u32);
+ __type(value, struct fcg_cpu_ctx);
+ __uint(max_entries, 1);
+} cpu_ctx SEC(".maps");
+
+struct {
+ __uint(type, BPF_MAP_TYPE_CGRP_STORAGE);
+ __uint(map_flags, BPF_F_NO_PREALLOC);
+ __type(key, int);
+ __type(value, struct fcg_cgrp_ctx);
+} cgrp_ctx SEC(".maps");
+
+struct cgv_node {
+ struct bpf_rb_node rb_node;
+ __u64 cvtime;
+ __u64 cgid;
+};
+
+private(CGV_TREE) struct bpf_spin_lock cgv_tree_lock;
+private(CGV_TREE) struct bpf_rb_root cgv_tree __contains(cgv_node, rb_node);
+
+struct cgv_node_stash {
+ struct cgv_node __kptr *node;
+};
+
+struct {
+ __uint(type, BPF_MAP_TYPE_HASH);
+ __uint(max_entries, 16384);
+ __type(key, __u64);
+ __type(value, struct cgv_node_stash);
+} cgv_node_stash SEC(".maps");
+
+struct fcg_task_ctx {
+ u64 bypassed_at;
+};
+
+struct {
+ __uint(type, BPF_MAP_TYPE_TASK_STORAGE);
+ __uint(map_flags, BPF_F_NO_PREALLOC);
+ __type(key, int);
+ __type(value, struct fcg_task_ctx);
+} task_ctx SEC(".maps");
+
+/* gets inc'd on weight tree changes to expire the cached hweights */
+u64 hweight_gen = 1;
+
+static u64 div_round_up(u64 dividend, u64 divisor)
+{
+ return (dividend + divisor - 1) / divisor;
+}
+
+static bool vtime_before(u64 a, u64 b)
+{
+ return (s64)(a - b) < 0;
+}
+
+static bool cgv_node_less(struct bpf_rb_node *a, const struct bpf_rb_node *b)
+{
+ struct cgv_node *cgc_a, *cgc_b;
+
+ cgc_a = container_of(a, struct cgv_node, rb_node);
+ cgc_b = container_of(b, struct cgv_node, rb_node);
+
+ return cgc_a->cvtime < cgc_b->cvtime;
+}
+
+static struct fcg_cpu_ctx *find_cpu_ctx(void)
+{
+ struct fcg_cpu_ctx *cpuc;
+ u32 idx = 0;
+
+ cpuc = bpf_map_lookup_elem(&cpu_ctx, &idx);
+ if (!cpuc) {
+ scx_bpf_error("cpu_ctx lookup failed");
+ return NULL;
+ }
+ return cpuc;
+}
+
+static struct fcg_cgrp_ctx *find_cgrp_ctx(struct cgroup *cgrp)
+{
+ struct fcg_cgrp_ctx *cgc;
+
+ cgc = bpf_cgrp_storage_get(&cgrp_ctx, cgrp, 0, 0);
+ if (!cgc) {
+ scx_bpf_error("cgrp_ctx lookup failed for cgid %llu", cgrp->kn->id);
+ return NULL;
+ }
+ return cgc;
+}
+
+static struct fcg_cgrp_ctx *find_ancestor_cgrp_ctx(struct cgroup *cgrp, int level)
+{
+ struct fcg_cgrp_ctx *cgc;
+
+ cgrp = bpf_cgroup_ancestor(cgrp, level);
+ if (!cgrp) {
+ scx_bpf_error("ancestor cgroup lookup failed");
+ return NULL;
+ }
+
+ cgc = find_cgrp_ctx(cgrp);
+ if (!cgc)
+ scx_bpf_error("ancestor cgrp_ctx lookup failed");
+ bpf_cgroup_release(cgrp);
+ return cgc;
+}
+
+static void cgrp_refresh_hweight(struct cgroup *cgrp, struct fcg_cgrp_ctx *cgc)
+{
+ int level;
+
+ if (!cgc->nr_active) {
+ stat_inc(FCG_STAT_HWT_SKIP);
+ return;
+ }
+
+ if (cgc->hweight_gen == hweight_gen) {
+ stat_inc(FCG_STAT_HWT_CACHE);
+ return;
+ }
+
+ stat_inc(FCG_STAT_HWT_UPDATES);
+ bpf_for(level, 0, cgrp->level + 1) {
+ struct fcg_cgrp_ctx *cgc;
+ bool is_active;
+
+ cgc = find_ancestor_cgrp_ctx(cgrp, level);
+ if (!cgc)
+ break;
+
+ if (!level) {
+ cgc->hweight = FCG_HWEIGHT_ONE;
+ cgc->hweight_gen = hweight_gen;
+ } else {
+ struct fcg_cgrp_ctx *pcgc;
+
+ pcgc = find_ancestor_cgrp_ctx(cgrp, level - 1);
+ if (!pcgc)
+ break;
+
+ /*
+ * We can be opportunistic here and not grab the
+ * cgv_tree_lock and deal with the occasional races.
+ * However, hweight updates are already cached and
+ * relatively low-frequency. Let's just do the
+ * straightforward thing.
+ */
+ bpf_spin_lock(&cgv_tree_lock);
+ is_active = cgc->nr_active;
+ if (is_active) {
+ cgc->hweight_gen = pcgc->hweight_gen;
+ cgc->hweight =
+ div_round_up(pcgc->hweight * cgc->weight,
+ pcgc->child_weight_sum);
+ }
+ bpf_spin_unlock(&cgv_tree_lock);
+
+ if (!is_active) {
+ stat_inc(FCG_STAT_HWT_RACE);
+ break;
+ }
+ }
+ }
+}
+
+static void cgrp_cap_budget(struct cgv_node *cgv_node, struct fcg_cgrp_ctx *cgc)
+{
+ u64 delta, cvtime, max_budget;
+
+ /*
+ * A node which is on the rbtree can't be pointed to from elsewhere yet
+ * and thus can't be updated and repositioned. Instead, we collect the
+ * vtime deltas separately and apply it asynchronously here.
+ */
+ delta = __sync_fetch_and_sub(&cgc->cvtime_delta, cgc->cvtime_delta);
+ cvtime = cgv_node->cvtime + delta;
+
+ /*
+ * Allow a cgroup to carry the maximum budget proportional to its
+ * hweight such that a full-hweight cgroup can immediately take up half
+ * of the CPUs at the most while staying at the front of the rbtree.
+ */
+ max_budget = (cgrp_slice_ns * nr_cpus * cgc->hweight) /
+ (2 * FCG_HWEIGHT_ONE);
+ if (vtime_before(cvtime, cvtime_now - max_budget))
+ cvtime = cvtime_now - max_budget;
+
+ cgv_node->cvtime = cvtime;
+}
+
+static void cgrp_enqueued(struct cgroup *cgrp, struct fcg_cgrp_ctx *cgc)
+{
+ struct cgv_node_stash *stash;
+ struct cgv_node *cgv_node;
+ u64 cgid = cgrp->kn->id;
+
+ /* paired with cmpxchg in try_pick_next_cgroup() */
+ if (__sync_val_compare_and_swap(&cgc->queued, 0, 1)) {
+ stat_inc(FCG_STAT_ENQ_SKIP);
+ return;
+ }
+
+ stash = bpf_map_lookup_elem(&cgv_node_stash, &cgid);
+ if (!stash) {
+ scx_bpf_error("cgv_node lookup failed for cgid %llu", cgid);
+ return;
+ }
+
+ /* NULL if the node is already on the rbtree */
+ cgv_node = bpf_kptr_xchg(&stash->node, NULL);
+ if (!cgv_node) {
+ stat_inc(FCG_STAT_ENQ_RACE);
+ return;
+ }
+
+ bpf_spin_lock(&cgv_tree_lock);
+ cgrp_cap_budget(cgv_node, cgc);
+ bpf_rbtree_add(&cgv_tree, &cgv_node->rb_node, cgv_node_less);
+ bpf_spin_unlock(&cgv_tree_lock);
+}
+
+static void set_bypassed_at(struct task_struct *p, struct fcg_task_ctx *taskc)
+{
+ /*
+ * Tell fcg_stopping() that this bypassed the regular scheduling path
+ * and should be force charged to the cgroup. 0 is used to indicate that
+ * the task isn't bypassing, so if the current runtime is 0, go back by
+ * one nanosecond.
+ */
+ taskc->bypassed_at = p->se.sum_exec_runtime ?: (u64)-1;
+}
+
+s32 BPF_STRUCT_OPS(fcg_select_cpu, struct task_struct *p, s32 prev_cpu, u64 wake_flags)
+{
+ struct fcg_task_ctx *taskc;
+ bool is_idle = false;
+ s32 cpu;
+
+ cpu = scx_bpf_select_cpu_dfl(p, prev_cpu, wake_flags, &is_idle);
+
+ taskc = bpf_task_storage_get(&task_ctx, p, 0, 0);
+ if (!taskc) {
+ scx_bpf_error("task_ctx lookup failed");
+ return cpu;
+ }
+
+ /*
+ * If select_cpu_dfl() is recommending local enqueue, the target CPU is
+ * idle. Follow it and charge the cgroup later in fcg_stopping() after
+ * the fact.
+ */
+ if (is_idle) {
+ set_bypassed_at(p, taskc);
+ stat_inc(FCG_STAT_LOCAL);
+ scx_bpf_dispatch(p, SCX_DSQ_LOCAL, SCX_SLICE_DFL, 0);
+ }
+
+ return cpu;
+}
+
+void BPF_STRUCT_OPS(fcg_enqueue, struct task_struct *p, u64 enq_flags)
+{
+ struct fcg_task_ctx *taskc;
+ struct cgroup *cgrp;
+ struct fcg_cgrp_ctx *cgc;
+
+ taskc = bpf_task_storage_get(&task_ctx, p, 0, 0);
+ if (!taskc) {
+ scx_bpf_error("task_ctx lookup failed");
+ return;
+ }
+
+ /*
+ * Use the direct dispatching and force charging to deal with tasks with
+ * custom affinities so that we don't have to worry about per-cgroup
+ * dq's containing tasks that can't be executed from some CPUs.
+ */
+ if (p->nr_cpus_allowed != nr_cpus) {
+ set_bypassed_at(p, taskc);
+
+ /*
+ * The global dq is deprioritized as we don't want to let tasks
+ * to boost themselves by constraining its cpumask. The
+ * deprioritization is rather severe, so let's not apply that to
+ * per-cpu kernel threads. This is ham-fisted. We probably wanna
+ * implement per-cgroup fallback dq's instead so that we have
+ * more control over when tasks with custom cpumask get issued.
+ */
+ if (p->nr_cpus_allowed == 1 && (p->flags & PF_KTHREAD)) {
+ stat_inc(FCG_STAT_LOCAL);
+ scx_bpf_dispatch(p, SCX_DSQ_LOCAL, SCX_SLICE_DFL, enq_flags);
+ } else {
+ stat_inc(FCG_STAT_GLOBAL);
+ scx_bpf_dispatch(p, FALLBACK_DSQ, SCX_SLICE_DFL, enq_flags);
+ }
+ return;
+ }
+
+ cgrp = __COMPAT_scx_bpf_task_cgroup(p);
+ cgc = find_cgrp_ctx(cgrp);
+ if (!cgc)
+ goto out_release;
+
+ if (fifo_sched) {
+ scx_bpf_dispatch(p, cgrp->kn->id, SCX_SLICE_DFL, enq_flags);
+ } else {
+ u64 tvtime = p->scx.dsq_vtime;
+
+ /*
+ * Limit the amount of budget that an idling task can accumulate
+ * to one slice.
+ */
+ if (vtime_before(tvtime, cgc->tvtime_now - SCX_SLICE_DFL))
+ tvtime = cgc->tvtime_now - SCX_SLICE_DFL;
+
+ scx_bpf_dispatch_vtime(p, cgrp->kn->id, SCX_SLICE_DFL,
+ tvtime, enq_flags);
+ }
+
+ cgrp_enqueued(cgrp, cgc);
+out_release:
+ bpf_cgroup_release(cgrp);
+}
+
+/*
+ * Walk the cgroup tree to update the active weight sums as tasks wake up and
+ * sleep. The weight sums are used as the base when calculating the proportion a
+ * given cgroup or task is entitled to at each level.
+ */
+static void update_active_weight_sums(struct cgroup *cgrp, bool runnable)
+{
+ struct fcg_cgrp_ctx *cgc;
+ bool updated = false;
+ int idx;
+
+ cgc = find_cgrp_ctx(cgrp);
+ if (!cgc)
+ return;
+
+ /*
+ * In most cases, a hot cgroup would have multiple threads going to
+ * sleep and waking up while the whole cgroup stays active. In leaf
+ * cgroups, ->nr_runnable which is updated with __sync operations gates
+ * ->nr_active updates, so that we don't have to grab the cgv_tree_lock
+ * repeatedly for a busy cgroup which is staying active.
+ */
+ if (runnable) {
+ if (__sync_fetch_and_add(&cgc->nr_runnable, 1))
+ return;
+ stat_inc(FCG_STAT_ACT);
+ } else {
+ if (__sync_sub_and_fetch(&cgc->nr_runnable, 1))
+ return;
+ stat_inc(FCG_STAT_DEACT);
+ }
+
+ /*
+ * If @cgrp is becoming runnable, its hweight should be refreshed after
+ * it's added to the weight tree so that enqueue has the up-to-date
+ * value. If @cgrp is becoming quiescent, the hweight should be
+ * refreshed before it's removed from the weight tree so that the usage
+ * charging which happens afterwards has access to the latest value.
+ */
+ if (!runnable)
+ cgrp_refresh_hweight(cgrp, cgc);
+
+ /* propagate upwards */
+ bpf_for(idx, 0, cgrp->level) {
+ int level = cgrp->level - idx;
+ struct fcg_cgrp_ctx *cgc, *pcgc = NULL;
+ bool propagate = false;
+
+ cgc = find_ancestor_cgrp_ctx(cgrp, level);
+ if (!cgc)
+ break;
+ if (level) {
+ pcgc = find_ancestor_cgrp_ctx(cgrp, level - 1);
+ if (!pcgc)
+ break;
+ }
+
+ /*
+ * We need the propagation protected by a lock to synchronize
+ * against weight changes. There's no reason to drop the lock at
+ * each level but bpf_spin_lock() doesn't want any function
+ * calls while locked.
+ */
+ bpf_spin_lock(&cgv_tree_lock);
+
+ if (runnable) {
+ if (!cgc->nr_active++) {
+ updated = true;
+ if (pcgc) {
+ propagate = true;
+ pcgc->child_weight_sum += cgc->weight;
+ }
+ }
+ } else {
+ if (!--cgc->nr_active) {
+ updated = true;
+ if (pcgc) {
+ propagate = true;
+ pcgc->child_weight_sum -= cgc->weight;
+ }
+ }
+ }
+
+ bpf_spin_unlock(&cgv_tree_lock);
+
+ if (!propagate)
+ break;
+ }
+
+ if (updated)
+ __sync_fetch_and_add(&hweight_gen, 1);
+
+ if (runnable)
+ cgrp_refresh_hweight(cgrp, cgc);
+}
+
+void BPF_STRUCT_OPS(fcg_runnable, struct task_struct *p, u64 enq_flags)
+{
+ struct cgroup *cgrp;
+
+ cgrp = __COMPAT_scx_bpf_task_cgroup(p);
+ update_active_weight_sums(cgrp, true);
+ bpf_cgroup_release(cgrp);
+}
+
+void BPF_STRUCT_OPS(fcg_running, struct task_struct *p)
+{
+ struct cgroup *cgrp;
+ struct fcg_cgrp_ctx *cgc;
+
+ if (fifo_sched)
+ return;
+
+ cgrp = __COMPAT_scx_bpf_task_cgroup(p);
+ cgc = find_cgrp_ctx(cgrp);
+ if (cgc) {
+ /*
+ * @cgc->tvtime_now always progresses forward as tasks start
+ * executing. The test and update can be performed concurrently
+ * from multiple CPUs and thus racy. Any error should be
+ * contained and temporary. Let's just live with it.
+ */
+ if (vtime_before(cgc->tvtime_now, p->scx.dsq_vtime))
+ cgc->tvtime_now = p->scx.dsq_vtime;
+ }
+ bpf_cgroup_release(cgrp);
+}
+
+void BPF_STRUCT_OPS(fcg_stopping, struct task_struct *p, bool runnable)
+{
+ struct fcg_task_ctx *taskc;
+ struct cgroup *cgrp;
+ struct fcg_cgrp_ctx *cgc;
+
+ /*
+ * Scale the execution time by the inverse of the weight and charge.
+ *
+ * Note that the default yield implementation yields by setting
+ * @p->scx.slice to zero and the following would treat the yielding task
+ * as if it has consumed all its slice. If this penalizes yielding tasks
+ * too much, determine the execution time by taking explicit timestamps
+ * instead of depending on @p->scx.slice.
+ */
+ if (!fifo_sched)
+ p->scx.dsq_vtime +=
+ (SCX_SLICE_DFL - p->scx.slice) * 100 / p->scx.weight;
+
+ taskc = bpf_task_storage_get(&task_ctx, p, 0, 0);
+ if (!taskc) {
+ scx_bpf_error("task_ctx lookup failed");
+ return;
+ }
+
+ if (!taskc->bypassed_at)
+ return;
+
+ cgrp = __COMPAT_scx_bpf_task_cgroup(p);
+ cgc = find_cgrp_ctx(cgrp);
+ if (cgc) {
+ __sync_fetch_and_add(&cgc->cvtime_delta,
+ p->se.sum_exec_runtime - taskc->bypassed_at);
+ taskc->bypassed_at = 0;
+ }
+ bpf_cgroup_release(cgrp);
+}
+
+void BPF_STRUCT_OPS(fcg_quiescent, struct task_struct *p, u64 deq_flags)
+{
+ struct cgroup *cgrp;
+
+ cgrp = __COMPAT_scx_bpf_task_cgroup(p);
+ update_active_weight_sums(cgrp, false);
+ bpf_cgroup_release(cgrp);
+}
+
+void BPF_STRUCT_OPS(fcg_cgroup_set_weight, struct cgroup *cgrp, u32 weight)
+{
+ struct fcg_cgrp_ctx *cgc, *pcgc = NULL;
+
+ cgc = find_cgrp_ctx(cgrp);
+ if (!cgc)
+ return;
+
+ if (cgrp->level) {
+ pcgc = find_ancestor_cgrp_ctx(cgrp, cgrp->level - 1);
+ if (!pcgc)
+ return;
+ }
+
+ bpf_spin_lock(&cgv_tree_lock);
+ if (pcgc && cgc->nr_active)
+ pcgc->child_weight_sum += (s64)weight - cgc->weight;
+ cgc->weight = weight;
+ bpf_spin_unlock(&cgv_tree_lock);
+}
+
+static bool try_pick_next_cgroup(u64 *cgidp)
+{
+ struct bpf_rb_node *rb_node;
+ struct cgv_node_stash *stash;
+ struct cgv_node *cgv_node;
+ struct fcg_cgrp_ctx *cgc;
+ struct cgroup *cgrp;
+ u64 cgid;
+
+ /* pop the front cgroup and wind cvtime_now accordingly */
+ bpf_spin_lock(&cgv_tree_lock);
+
+ rb_node = bpf_rbtree_first(&cgv_tree);
+ if (!rb_node) {
+ bpf_spin_unlock(&cgv_tree_lock);
+ stat_inc(FCG_STAT_PNC_NO_CGRP);
+ *cgidp = 0;
+ return true;
+ }
+
+ rb_node = bpf_rbtree_remove(&cgv_tree, rb_node);
+ bpf_spin_unlock(&cgv_tree_lock);
+
+ if (!rb_node) {
+ /*
+ * This should never happen. bpf_rbtree_first() was called
+ * above while the tree lock was held, so the node should
+ * always be present.
+ */
+ scx_bpf_error("node could not be removed");
+ return true;
+ }
+
+ cgv_node = container_of(rb_node, struct cgv_node, rb_node);
+ cgid = cgv_node->cgid;
+
+ if (vtime_before(cvtime_now, cgv_node->cvtime))
+ cvtime_now = cgv_node->cvtime;
+
+ /*
+ * If lookup fails, the cgroup's gone. Free and move on. See
+ * fcg_cgroup_exit().
+ */
+ cgrp = bpf_cgroup_from_id(cgid);
+ if (!cgrp) {
+ stat_inc(FCG_STAT_PNC_GONE);
+ goto out_free;
+ }
+
+ cgc = bpf_cgrp_storage_get(&cgrp_ctx, cgrp, 0, 0);
+ if (!cgc) {
+ bpf_cgroup_release(cgrp);
+ stat_inc(FCG_STAT_PNC_GONE);
+ goto out_free;
+ }
+
+ if (!scx_bpf_consume(cgid)) {
+ bpf_cgroup_release(cgrp);
+ stat_inc(FCG_STAT_PNC_EMPTY);
+ goto out_stash;
+ }
+
+ /*
+ * Successfully consumed from the cgroup. This will be our current
+ * cgroup for the new slice. Refresh its hweight.
+ */
+ cgrp_refresh_hweight(cgrp, cgc);
+
+ bpf_cgroup_release(cgrp);
+
+ /*
+ * As the cgroup may have more tasks, add it back to the rbtree. Note
+ * that here we charge the full slice upfront and then exact later
+ * according to the actual consumption. This prevents lowpri thundering
+ * herd from saturating the machine.
+ */
+ bpf_spin_lock(&cgv_tree_lock);
+ cgv_node->cvtime += cgrp_slice_ns * FCG_HWEIGHT_ONE / (cgc->hweight ?: 1);
+ cgrp_cap_budget(cgv_node, cgc);
+ bpf_rbtree_add(&cgv_tree, &cgv_node->rb_node, cgv_node_less);
+ bpf_spin_unlock(&cgv_tree_lock);
+
+ *cgidp = cgid;
+ stat_inc(FCG_STAT_PNC_NEXT);
+ return true;
+
+out_stash:
+ stash = bpf_map_lookup_elem(&cgv_node_stash, &cgid);
+ if (!stash) {
+ stat_inc(FCG_STAT_PNC_GONE);
+ goto out_free;
+ }
+
+ /*
+ * Paired with cmpxchg in cgrp_enqueued(). If they see the following
+ * transition, they'll enqueue the cgroup. If they are earlier, we'll
+ * see their task in the dq below and requeue the cgroup.
+ */
+ __sync_val_compare_and_swap(&cgc->queued, 1, 0);
+
+ if (scx_bpf_dsq_nr_queued(cgid)) {
+ bpf_spin_lock(&cgv_tree_lock);
+ bpf_rbtree_add(&cgv_tree, &cgv_node->rb_node, cgv_node_less);
+ bpf_spin_unlock(&cgv_tree_lock);
+ stat_inc(FCG_STAT_PNC_RACE);
+ } else {
+ cgv_node = bpf_kptr_xchg(&stash->node, cgv_node);
+ if (cgv_node) {
+ scx_bpf_error("unexpected !NULL cgv_node stash");
+ goto out_free;
+ }
+ }
+
+ return false;
+
+out_free:
+ bpf_obj_drop(cgv_node);
+ return false;
+}
+
+void BPF_STRUCT_OPS(fcg_dispatch, s32 cpu, struct task_struct *prev)
+{
+ struct fcg_cpu_ctx *cpuc;
+ struct fcg_cgrp_ctx *cgc;
+ struct cgroup *cgrp;
+ u64 now = bpf_ktime_get_ns();
+ bool picked_next = false;
+
+ cpuc = find_cpu_ctx();
+ if (!cpuc)
+ return;
+
+ if (!cpuc->cur_cgid)
+ goto pick_next_cgroup;
+
+ if (vtime_before(now, cpuc->cur_at + cgrp_slice_ns)) {
+ if (scx_bpf_consume(cpuc->cur_cgid)) {
+ stat_inc(FCG_STAT_CNS_KEEP);
+ return;
+ }
+ stat_inc(FCG_STAT_CNS_EMPTY);
+ } else {
+ stat_inc(FCG_STAT_CNS_EXPIRE);
+ }
+
+ /*
+ * The current cgroup is expiring. It was already charged a full slice.
+ * Calculate the actual usage and accumulate the delta.
+ */
+ cgrp = bpf_cgroup_from_id(cpuc->cur_cgid);
+ if (!cgrp) {
+ stat_inc(FCG_STAT_CNS_GONE);
+ goto pick_next_cgroup;
+ }
+
+ cgc = bpf_cgrp_storage_get(&cgrp_ctx, cgrp, 0, 0);
+ if (cgc) {
+ /*
+ * We want to update the vtime delta and then look for the next
+ * cgroup to execute but the latter needs to be done in a loop
+ * and we can't keep the lock held. Oh well...
+ */
+ bpf_spin_lock(&cgv_tree_lock);
+ __sync_fetch_and_add(&cgc->cvtime_delta,
+ (cpuc->cur_at + cgrp_slice_ns - now) *
+ FCG_HWEIGHT_ONE / (cgc->hweight ?: 1));
+ bpf_spin_unlock(&cgv_tree_lock);
+ } else {
+ stat_inc(FCG_STAT_CNS_GONE);
+ }
+
+ bpf_cgroup_release(cgrp);
+
+pick_next_cgroup:
+ cpuc->cur_at = now;
+
+ if (scx_bpf_consume(FALLBACK_DSQ)) {
+ cpuc->cur_cgid = 0;
+ return;
+ }
+
+ bpf_repeat(CGROUP_MAX_RETRIES) {
+ if (try_pick_next_cgroup(&cpuc->cur_cgid)) {
+ picked_next = true;
+ break;
+ }
+ }
+
+ /*
+ * This only happens if try_pick_next_cgroup() races against enqueue
+ * path for more than CGROUP_MAX_RETRIES times, which is extremely
+ * unlikely and likely indicates an underlying bug. There shouldn't be
+ * any stall risk as the race is against enqueue.
+ */
+ if (!picked_next)
+ stat_inc(FCG_STAT_PNC_FAIL);
+}
+
+s32 BPF_STRUCT_OPS(fcg_init_task, struct task_struct *p,
+ struct scx_init_task_args *args)
+{
+ struct fcg_task_ctx *taskc;
+ struct fcg_cgrp_ctx *cgc;
+
+ /*
+ * @p is new. Let's ensure that its task_ctx is available. We can sleep
+ * in this function and the following will automatically use GFP_KERNEL.
+ */
+ taskc = bpf_task_storage_get(&task_ctx, p, 0,
+ BPF_LOCAL_STORAGE_GET_F_CREATE);
+ if (!taskc)
+ return -ENOMEM;
+
+ taskc->bypassed_at = 0;
+
+ if (!(cgc = find_cgrp_ctx(args->cgroup)))
+ return -ENOENT;
+
+ p->scx.dsq_vtime = cgc->tvtime_now;
+
+ return 0;
+}
+
+int BPF_STRUCT_OPS_SLEEPABLE(fcg_cgroup_init, struct cgroup *cgrp,
+ struct scx_cgroup_init_args *args)
+{
+ struct fcg_cgrp_ctx *cgc;
+ struct cgv_node *cgv_node;
+ struct cgv_node_stash empty_stash = {}, *stash;
+ u64 cgid = cgrp->kn->id;
+ int ret;
+
+ /*
+ * Technically incorrect as cgroup ID is full 64bit while dsq ID is
+ * 63bit. Should not be a problem in practice and easy to spot in the
+ * unlikely case that it breaks.
+ */
+ ret = scx_bpf_create_dsq(cgid, -1);
+ if (ret)
+ return ret;
+
+ cgc = bpf_cgrp_storage_get(&cgrp_ctx, cgrp, 0,
+ BPF_LOCAL_STORAGE_GET_F_CREATE);
+ if (!cgc) {
+ ret = -ENOMEM;
+ goto err_destroy_dsq;
+ }
+
+ cgc->weight = args->weight;
+ cgc->hweight = FCG_HWEIGHT_ONE;
+
+ ret = bpf_map_update_elem(&cgv_node_stash, &cgid, &empty_stash,
+ BPF_NOEXIST);
+ if (ret) {
+ if (ret != -ENOMEM)
+ scx_bpf_error("unexpected stash creation error (%d)",
+ ret);
+ goto err_destroy_dsq;
+ }
+
+ stash = bpf_map_lookup_elem(&cgv_node_stash, &cgid);
+ if (!stash) {
+ scx_bpf_error("unexpected cgv_node stash lookup failure");
+ ret = -ENOENT;
+ goto err_destroy_dsq;
+ }
+
+ cgv_node = bpf_obj_new(struct cgv_node);
+ if (!cgv_node) {
+ ret = -ENOMEM;
+ goto err_del_cgv_node;
+ }
+
+ cgv_node->cgid = cgid;
+ cgv_node->cvtime = cvtime_now;
+
+ cgv_node = bpf_kptr_xchg(&stash->node, cgv_node);
+ if (cgv_node) {
+ scx_bpf_error("unexpected !NULL cgv_node stash");
+ ret = -EBUSY;
+ goto err_drop;
+ }
+
+ return 0;
+
+err_drop:
+ bpf_obj_drop(cgv_node);
+err_del_cgv_node:
+ bpf_map_delete_elem(&cgv_node_stash, &cgid);
+err_destroy_dsq:
+ scx_bpf_destroy_dsq(cgid);
+ return ret;
+}
+
+void BPF_STRUCT_OPS(fcg_cgroup_exit, struct cgroup *cgrp)
+{
+ u64 cgid = cgrp->kn->id;
+
+ /*
+ * For now, there's no way find and remove the cgv_node if it's on the
+ * cgv_tree. Let's drain them in the dispatch path as they get popped
+ * off the front of the tree.
+ */
+ bpf_map_delete_elem(&cgv_node_stash, &cgid);
+ scx_bpf_destroy_dsq(cgid);
+}
+
+void BPF_STRUCT_OPS(fcg_cgroup_move, struct task_struct *p,
+ struct cgroup *from, struct cgroup *to)
+{
+ struct fcg_cgrp_ctx *from_cgc, *to_cgc;
+ s64 vtime_delta;
+
+ /* find_cgrp_ctx() triggers scx_ops_error() on lookup failures */
+ if (!(from_cgc = find_cgrp_ctx(from)) || !(to_cgc = find_cgrp_ctx(to)))
+ return;
+
+ vtime_delta = p->scx.dsq_vtime - from_cgc->tvtime_now;
+ p->scx.dsq_vtime = to_cgc->tvtime_now + vtime_delta;
+}
+
+s32 BPF_STRUCT_OPS_SLEEPABLE(fcg_init)
+{
+ return scx_bpf_create_dsq(FALLBACK_DSQ, -1);
+}
+
+void BPF_STRUCT_OPS(fcg_exit, struct scx_exit_info *ei)
+{
+ UEI_RECORD(uei, ei);
+}
+
+SCX_OPS_DEFINE(flatcg_ops,
+ .select_cpu = (void *)fcg_select_cpu,
+ .enqueue = (void *)fcg_enqueue,
+ .dispatch = (void *)fcg_dispatch,
+ .runnable = (void *)fcg_runnable,
+ .running = (void *)fcg_running,
+ .stopping = (void *)fcg_stopping,
+ .quiescent = (void *)fcg_quiescent,
+ .init_task = (void *)fcg_init_task,
+ .cgroup_set_weight = (void *)fcg_cgroup_set_weight,
+ .cgroup_init = (void *)fcg_cgroup_init,
+ .cgroup_exit = (void *)fcg_cgroup_exit,
+ .cgroup_move = (void *)fcg_cgroup_move,
+ .init = (void *)fcg_init,
+ .exit = (void *)fcg_exit,
+ .flags = SCX_OPS_HAS_CGROUP_WEIGHT | SCX_OPS_ENQ_EXITING,
+ .name = "flatcg");
diff --git a/tools/sched_ext/scx_flatcg.c b/tools/sched_ext/scx_flatcg.c
new file mode 100644
index 000000000000..5d24ca9c29d9
--- /dev/null
+++ b/tools/sched_ext/scx_flatcg.c
@@ -0,0 +1,233 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (c) 2023 Meta Platforms, Inc. and affiliates.
+ * Copyright (c) 2023 Tejun Heo <tj@kernel.org>
+ * Copyright (c) 2023 David Vernet <dvernet@meta.com>
+ */
+#include <stdio.h>
+#include <signal.h>
+#include <unistd.h>
+#include <libgen.h>
+#include <limits.h>
+#include <inttypes.h>
+#include <fcntl.h>
+#include <time.h>
+#include <bpf/bpf.h>
+#include <scx/common.h>
+#include "scx_flatcg.h"
+#include "scx_flatcg.bpf.skel.h"
+
+#ifndef FILEID_KERNFS
+#define FILEID_KERNFS 0xfe
+#endif
+
+const char help_fmt[] =
+"A flattened cgroup hierarchy sched_ext scheduler.\n"
+"\n"
+"See the top-level comment in .bpf.c for more details.\n"
+"\n"
+"Usage: %s [-s SLICE_US] [-i INTERVAL] [-f] [-v]\n"
+"\n"
+" -s SLICE_US Override slice duration\n"
+" -i INTERVAL Report interval\n"
+" -f Use FIFO scheduling instead of weighted vtime scheduling\n"
+" -v Print libbpf debug messages\n"
+" -h Display this help and exit\n";
+
+static bool verbose;
+static volatile int exit_req;
+
+static int libbpf_print_fn(enum libbpf_print_level level, const char *format, va_list args)
+{
+ if (level == LIBBPF_DEBUG && !verbose)
+ return 0;
+ return vfprintf(stderr, format, args);
+}
+
+static void sigint_handler(int dummy)
+{
+ exit_req = 1;
+}
+
+static float read_cpu_util(__u64 *last_sum, __u64 *last_idle)
+{
+ FILE *fp;
+ char buf[4096];
+ char *line, *cur = NULL, *tok;
+ __u64 sum = 0, idle = 0;
+ __u64 delta_sum, delta_idle;
+ int idx;
+
+ fp = fopen("/proc/stat", "r");
+ if (!fp) {
+ perror("fopen(\"/proc/stat\")");
+ return 0.0;
+ }
+
+ if (!fgets(buf, sizeof(buf), fp)) {
+ perror("fgets(\"/proc/stat\")");
+ fclose(fp);
+ return 0.0;
+ }
+ fclose(fp);
+
+ line = buf;
+ for (idx = 0; (tok = strtok_r(line, " \n", &cur)); idx++) {
+ char *endp = NULL;
+ __u64 v;
+
+ if (idx == 0) {
+ line = NULL;
+ continue;
+ }
+ v = strtoull(tok, &endp, 0);
+ if (!endp || *endp != '\0') {
+ fprintf(stderr, "failed to parse %dth field of /proc/stat (\"%s\")\n",
+ idx, tok);
+ continue;
+ }
+ sum += v;
+ if (idx == 4)
+ idle = v;
+ }
+
+ delta_sum = sum - *last_sum;
+ delta_idle = idle - *last_idle;
+ *last_sum = sum;
+ *last_idle = idle;
+
+ return delta_sum ? (float)(delta_sum - delta_idle) / delta_sum : 0.0;
+}
+
+static void fcg_read_stats(struct scx_flatcg *skel, __u64 *stats)
+{
+ __u64 cnts[FCG_NR_STATS][skel->rodata->nr_cpus];
+ __u32 idx;
+
+ memset(stats, 0, sizeof(stats[0]) * FCG_NR_STATS);
+
+ for (idx = 0; idx < FCG_NR_STATS; idx++) {
+ int ret, cpu;
+
+ ret = bpf_map_lookup_elem(bpf_map__fd(skel->maps.stats),
+ &idx, cnts[idx]);
+ if (ret < 0)
+ continue;
+ for (cpu = 0; cpu < skel->rodata->nr_cpus; cpu++)
+ stats[idx] += cnts[idx][cpu];
+ }
+}
+
+int main(int argc, char **argv)
+{
+ struct scx_flatcg *skel;
+ struct bpf_link *link;
+ struct timespec intv_ts = { .tv_sec = 2, .tv_nsec = 0 };
+ bool dump_cgrps = false;
+ __u64 last_cpu_sum = 0, last_cpu_idle = 0;
+ __u64 last_stats[FCG_NR_STATS] = {};
+ unsigned long seq = 0;
+ __s32 opt;
+ __u64 ecode;
+
+ libbpf_set_print(libbpf_print_fn);
+ signal(SIGINT, sigint_handler);
+ signal(SIGTERM, sigint_handler);
+restart:
+ skel = SCX_OPS_OPEN(flatcg_ops, scx_flatcg);
+
+ skel->rodata->nr_cpus = libbpf_num_possible_cpus();
+
+ while ((opt = getopt(argc, argv, "s:i:dfvh")) != -1) {
+ double v;
+
+ switch (opt) {
+ case 's':
+ v = strtod(optarg, NULL);
+ skel->rodata->cgrp_slice_ns = v * 1000;
+ break;
+ case 'i':
+ v = strtod(optarg, NULL);
+ intv_ts.tv_sec = v;
+ intv_ts.tv_nsec = (v - (float)intv_ts.tv_sec) * 1000000000;
+ break;
+ case 'd':
+ dump_cgrps = true;
+ break;
+ case 'f':
+ skel->rodata->fifo_sched = true;
+ break;
+ case 'v':
+ verbose = true;
+ break;
+ case 'h':
+ default:
+ fprintf(stderr, help_fmt, basename(argv[0]));
+ return opt != 'h';
+ }
+ }
+
+ printf("slice=%.1lfms intv=%.1lfs dump_cgrps=%d",
+ (double)skel->rodata->cgrp_slice_ns / 1000000.0,
+ (double)intv_ts.tv_sec + (double)intv_ts.tv_nsec / 1000000000.0,
+ dump_cgrps);
+
+ SCX_OPS_LOAD(skel, flatcg_ops, scx_flatcg, uei);
+ link = SCX_OPS_ATTACH(skel, flatcg_ops, scx_flatcg);
+
+ while (!exit_req && !UEI_EXITED(skel, uei)) {
+ __u64 acc_stats[FCG_NR_STATS];
+ __u64 stats[FCG_NR_STATS];
+ float cpu_util;
+ int i;
+
+ cpu_util = read_cpu_util(&last_cpu_sum, &last_cpu_idle);
+
+ fcg_read_stats(skel, acc_stats);
+ for (i = 0; i < FCG_NR_STATS; i++)
+ stats[i] = acc_stats[i] - last_stats[i];
+
+ memcpy(last_stats, acc_stats, sizeof(acc_stats));
+
+ printf("\n[SEQ %6lu cpu=%5.1lf hweight_gen=%" PRIu64 "]\n",
+ seq++, cpu_util * 100.0, skel->data->hweight_gen);
+ printf(" act:%6llu deact:%6llu global:%6llu local:%6llu\n",
+ stats[FCG_STAT_ACT],
+ stats[FCG_STAT_DEACT],
+ stats[FCG_STAT_GLOBAL],
+ stats[FCG_STAT_LOCAL]);
+ printf("HWT cache:%6llu update:%6llu skip:%6llu race:%6llu\n",
+ stats[FCG_STAT_HWT_CACHE],
+ stats[FCG_STAT_HWT_UPDATES],
+ stats[FCG_STAT_HWT_SKIP],
+ stats[FCG_STAT_HWT_RACE]);
+ printf("ENQ skip:%6llu race:%6llu\n",
+ stats[FCG_STAT_ENQ_SKIP],
+ stats[FCG_STAT_ENQ_RACE]);
+ printf("CNS keep:%6llu expire:%6llu empty:%6llu gone:%6llu\n",
+ stats[FCG_STAT_CNS_KEEP],
+ stats[FCG_STAT_CNS_EXPIRE],
+ stats[FCG_STAT_CNS_EMPTY],
+ stats[FCG_STAT_CNS_GONE]);
+ printf("PNC next:%6llu empty:%6llu nocgrp:%6llu gone:%6llu race:%6llu fail:%6llu\n",
+ stats[FCG_STAT_PNC_NEXT],
+ stats[FCG_STAT_PNC_EMPTY],
+ stats[FCG_STAT_PNC_NO_CGRP],
+ stats[FCG_STAT_PNC_GONE],
+ stats[FCG_STAT_PNC_RACE],
+ stats[FCG_STAT_PNC_FAIL]);
+ printf("BAD remove:%6llu\n",
+ acc_stats[FCG_STAT_BAD_REMOVAL]);
+ fflush(stdout);
+
+ nanosleep(&intv_ts, NULL);
+ }
+
+ bpf_link__destroy(link);
+ ecode = UEI_REPORT(skel, uei);
+ scx_flatcg__destroy(skel);
+
+ if (UEI_ECODE_RESTART(ecode))
+ goto restart;
+ return 0;
+}
diff --git a/tools/sched_ext/scx_flatcg.h b/tools/sched_ext/scx_flatcg.h
new file mode 100644
index 000000000000..6f2ea50acb1c
--- /dev/null
+++ b/tools/sched_ext/scx_flatcg.h
@@ -0,0 +1,51 @@
+#ifndef __SCX_EXAMPLE_FLATCG_H
+#define __SCX_EXAMPLE_FLATCG_H
+
+enum {
+ FCG_HWEIGHT_ONE = 1LLU << 16,
+};
+
+enum fcg_stat_idx {
+ FCG_STAT_ACT,
+ FCG_STAT_DEACT,
+ FCG_STAT_LOCAL,
+ FCG_STAT_GLOBAL,
+
+ FCG_STAT_HWT_UPDATES,
+ FCG_STAT_HWT_CACHE,
+ FCG_STAT_HWT_SKIP,
+ FCG_STAT_HWT_RACE,
+
+ FCG_STAT_ENQ_SKIP,
+ FCG_STAT_ENQ_RACE,
+
+ FCG_STAT_CNS_KEEP,
+ FCG_STAT_CNS_EXPIRE,
+ FCG_STAT_CNS_EMPTY,
+ FCG_STAT_CNS_GONE,
+
+ FCG_STAT_PNC_NO_CGRP,
+ FCG_STAT_PNC_NEXT,
+ FCG_STAT_PNC_EMPTY,
+ FCG_STAT_PNC_GONE,
+ FCG_STAT_PNC_RACE,
+ FCG_STAT_PNC_FAIL,
+
+ FCG_STAT_BAD_REMOVAL,
+
+ FCG_NR_STATS,
+};
+
+struct fcg_cgrp_ctx {
+ u32 nr_active;
+ u32 nr_runnable;
+ u32 queued;
+ u32 weight;
+ u32 hweight;
+ u64 child_weight_sum;
+ u64 hweight_gen;
+ s64 cvtime_delta;
+ u64 tvtime_now;
+};
+
+#endif /* __SCX_EXAMPLE_FLATCG_H */
diff --git a/tools/sched_ext/scx_qmap.bpf.c b/tools/sched_ext/scx_qmap.bpf.c
new file mode 100644
index 000000000000..67e2a7968cc9
--- /dev/null
+++ b/tools/sched_ext/scx_qmap.bpf.c
@@ -0,0 +1,827 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * A simple five-level FIFO queue scheduler.
+ *
+ * There are five FIFOs implemented using BPF_MAP_TYPE_QUEUE. A task gets
+ * assigned to one depending on its compound weight. Each CPU round robins
+ * through the FIFOs and dispatches more from FIFOs with higher indices - 1 from
+ * queue0, 2 from queue1, 4 from queue2 and so on.
+ *
+ * This scheduler demonstrates:
+ *
+ * - BPF-side queueing using PIDs.
+ * - Sleepable per-task storage allocation using ops.prep_enable().
+ * - Using ops.cpu_release() to handle a higher priority scheduling class taking
+ * the CPU away.
+ * - Core-sched support.
+ *
+ * This scheduler is primarily for demonstration and testing of sched_ext
+ * features and unlikely to be useful for actual workloads.
+ *
+ * Copyright (c) 2022 Meta Platforms, Inc. and affiliates.
+ * Copyright (c) 2022 Tejun Heo <tj@kernel.org>
+ * Copyright (c) 2022 David Vernet <dvernet@meta.com>
+ */
+#include <scx/common.bpf.h>
+
+enum consts {
+ ONE_SEC_IN_NS = 1000000000,
+ SHARED_DSQ = 0,
+ HIGHPRI_DSQ = 1,
+ HIGHPRI_WEIGHT = 8668, /* this is what -20 maps to */
+};
+
+char _license[] SEC("license") = "GPL";
+
+const volatile u64 slice_ns = SCX_SLICE_DFL;
+const volatile u32 stall_user_nth;
+const volatile u32 stall_kernel_nth;
+const volatile u32 dsp_inf_loop_after;
+const volatile u32 dsp_batch;
+const volatile bool highpri_boosting;
+const volatile bool print_shared_dsq;
+const volatile s32 disallow_tgid;
+const volatile bool suppress_dump;
+
+u64 nr_highpri_queued;
+u32 test_error_cnt;
+
+UEI_DEFINE(uei);
+
+struct qmap {
+ __uint(type, BPF_MAP_TYPE_QUEUE);
+ __uint(max_entries, 4096);
+ __type(value, u32);
+} queue0 SEC(".maps"),
+ queue1 SEC(".maps"),
+ queue2 SEC(".maps"),
+ queue3 SEC(".maps"),
+ queue4 SEC(".maps");
+
+struct {
+ __uint(type, BPF_MAP_TYPE_ARRAY_OF_MAPS);
+ __uint(max_entries, 5);
+ __type(key, int);
+ __array(values, struct qmap);
+} queue_arr SEC(".maps") = {
+ .values = {
+ [0] = &queue0,
+ [1] = &queue1,
+ [2] = &queue2,
+ [3] = &queue3,
+ [4] = &queue4,
+ },
+};
+
+/*
+ * If enabled, CPU performance target is set according to the queue index
+ * according to the following table.
+ */
+static const u32 qidx_to_cpuperf_target[] = {
+ [0] = SCX_CPUPERF_ONE * 0 / 4,
+ [1] = SCX_CPUPERF_ONE * 1 / 4,
+ [2] = SCX_CPUPERF_ONE * 2 / 4,
+ [3] = SCX_CPUPERF_ONE * 3 / 4,
+ [4] = SCX_CPUPERF_ONE * 4 / 4,
+};
+
+/*
+ * Per-queue sequence numbers to implement core-sched ordering.
+ *
+ * Tail seq is assigned to each queued task and incremented. Head seq tracks the
+ * sequence number of the latest dispatched task. The distance between the a
+ * task's seq and the associated queue's head seq is called the queue distance
+ * and used when comparing two tasks for ordering. See qmap_core_sched_before().
+ */
+static u64 core_sched_head_seqs[5];
+static u64 core_sched_tail_seqs[5];
+
+/* Per-task scheduling context */
+struct task_ctx {
+ bool force_local; /* Dispatch directly to local_dsq */
+ bool highpri;
+ u64 core_sched_seq;
+};
+
+struct {
+ __uint(type, BPF_MAP_TYPE_TASK_STORAGE);
+ __uint(map_flags, BPF_F_NO_PREALLOC);
+ __type(key, int);
+ __type(value, struct task_ctx);
+} task_ctx_stor SEC(".maps");
+
+struct cpu_ctx {
+ u64 dsp_idx; /* dispatch index */
+ u64 dsp_cnt; /* remaining count */
+ u32 avg_weight;
+ u32 cpuperf_target;
+};
+
+struct {
+ __uint(type, BPF_MAP_TYPE_PERCPU_ARRAY);
+ __uint(max_entries, 1);
+ __type(key, u32);
+ __type(value, struct cpu_ctx);
+} cpu_ctx_stor SEC(".maps");
+
+/* Statistics */
+u64 nr_enqueued, nr_dispatched, nr_reenqueued, nr_dequeued, nr_ddsp_from_enq;
+u64 nr_core_sched_execed;
+u64 nr_expedited_local, nr_expedited_remote, nr_expedited_lost, nr_expedited_from_timer;
+u32 cpuperf_min, cpuperf_avg, cpuperf_max;
+u32 cpuperf_target_min, cpuperf_target_avg, cpuperf_target_max;
+
+static s32 pick_direct_dispatch_cpu(struct task_struct *p, s32 prev_cpu)
+{
+ s32 cpu;
+
+ if (p->nr_cpus_allowed == 1 ||
+ scx_bpf_test_and_clear_cpu_idle(prev_cpu))
+ return prev_cpu;
+
+ cpu = scx_bpf_pick_idle_cpu(p->cpus_ptr, 0);
+ if (cpu >= 0)
+ return cpu;
+
+ return -1;
+}
+
+static struct task_ctx *lookup_task_ctx(struct task_struct *p)
+{
+ struct task_ctx *tctx;
+
+ if (!(tctx = bpf_task_storage_get(&task_ctx_stor, p, 0, 0))) {
+ scx_bpf_error("task_ctx lookup failed");
+ return NULL;
+ }
+ return tctx;
+}
+
+s32 BPF_STRUCT_OPS(qmap_select_cpu, struct task_struct *p,
+ s32 prev_cpu, u64 wake_flags)
+{
+ struct task_ctx *tctx;
+ s32 cpu;
+
+ if (!(tctx = lookup_task_ctx(p)))
+ return -ESRCH;
+
+ cpu = pick_direct_dispatch_cpu(p, prev_cpu);
+
+ if (cpu >= 0) {
+ tctx->force_local = true;
+ return cpu;
+ } else {
+ return prev_cpu;
+ }
+}
+
+static int weight_to_idx(u32 weight)
+{
+ /* Coarsely map the compound weight to a FIFO. */
+ if (weight <= 25)
+ return 0;
+ else if (weight <= 50)
+ return 1;
+ else if (weight < 200)
+ return 2;
+ else if (weight < 400)
+ return 3;
+ else
+ return 4;
+}
+
+void BPF_STRUCT_OPS(qmap_enqueue, struct task_struct *p, u64 enq_flags)
+{
+ static u32 user_cnt, kernel_cnt;
+ struct task_ctx *tctx;
+ u32 pid = p->pid;
+ int idx = weight_to_idx(p->scx.weight);
+ void *ring;
+ s32 cpu;
+
+ if (p->flags & PF_KTHREAD) {
+ if (stall_kernel_nth && !(++kernel_cnt % stall_kernel_nth))
+ return;
+ } else {
+ if (stall_user_nth && !(++user_cnt % stall_user_nth))
+ return;
+ }
+
+ if (test_error_cnt && !--test_error_cnt)
+ scx_bpf_error("test triggering error");
+
+ if (!(tctx = lookup_task_ctx(p)))
+ return;
+
+ /*
+ * All enqueued tasks must have their core_sched_seq updated for correct
+ * core-sched ordering. Also, take a look at the end of qmap_dispatch().
+ */
+ tctx->core_sched_seq = core_sched_tail_seqs[idx]++;
+
+ /*
+ * If qmap_select_cpu() is telling us to or this is the last runnable
+ * task on the CPU, enqueue locally.
+ */
+ if (tctx->force_local) {
+ tctx->force_local = false;
+ scx_bpf_dispatch(p, SCX_DSQ_LOCAL, slice_ns, enq_flags);
+ return;
+ }
+
+ /* if !WAKEUP, select_cpu() wasn't called, try direct dispatch */
+ if (!(enq_flags & SCX_ENQ_WAKEUP) &&
+ (cpu = pick_direct_dispatch_cpu(p, scx_bpf_task_cpu(p))) >= 0) {
+ __sync_fetch_and_add(&nr_ddsp_from_enq, 1);
+ scx_bpf_dispatch(p, SCX_DSQ_LOCAL_ON | cpu, slice_ns, enq_flags);
+ return;
+ }
+
+ /*
+ * If the task was re-enqueued due to the CPU being preempted by a
+ * higher priority scheduling class, just re-enqueue the task directly
+ * on the global DSQ. As we want another CPU to pick it up, find and
+ * kick an idle CPU.
+ */
+ if (enq_flags & SCX_ENQ_REENQ) {
+ s32 cpu;
+
+ scx_bpf_dispatch(p, SHARED_DSQ, 0, enq_flags);
+ cpu = scx_bpf_pick_idle_cpu(p->cpus_ptr, 0);
+ if (cpu >= 0)
+ scx_bpf_kick_cpu(cpu, SCX_KICK_IDLE);
+ return;
+ }
+
+ ring = bpf_map_lookup_elem(&queue_arr, &idx);
+ if (!ring) {
+ scx_bpf_error("failed to find ring %d", idx);
+ return;
+ }
+
+ /* Queue on the selected FIFO. If the FIFO overflows, punt to global. */
+ if (bpf_map_push_elem(ring, &pid, 0)) {
+ scx_bpf_dispatch(p, SHARED_DSQ, slice_ns, enq_flags);
+ return;
+ }
+
+ if (highpri_boosting && p->scx.weight >= HIGHPRI_WEIGHT) {
+ tctx->highpri = true;
+ __sync_fetch_and_add(&nr_highpri_queued, 1);
+ }
+ __sync_fetch_and_add(&nr_enqueued, 1);
+}
+
+/*
+ * The BPF queue map doesn't support removal and sched_ext can handle spurious
+ * dispatches. qmap_dequeue() is only used to collect statistics.
+ */
+void BPF_STRUCT_OPS(qmap_dequeue, struct task_struct *p, u64 deq_flags)
+{
+ __sync_fetch_and_add(&nr_dequeued, 1);
+ if (deq_flags & SCX_DEQ_CORE_SCHED_EXEC)
+ __sync_fetch_and_add(&nr_core_sched_execed, 1);
+}
+
+static void update_core_sched_head_seq(struct task_struct *p)
+{
+ int idx = weight_to_idx(p->scx.weight);
+ struct task_ctx *tctx;
+
+ if ((tctx = lookup_task_ctx(p)))
+ core_sched_head_seqs[idx] = tctx->core_sched_seq;
+}
+
+/*
+ * To demonstrate the use of scx_bpf_dispatch_from_dsq(), implement silly
+ * selective priority boosting mechanism by scanning SHARED_DSQ looking for
+ * highpri tasks, moving them to HIGHPRI_DSQ and then consuming them first. This
+ * makes minor difference only when dsp_batch is larger than 1.
+ *
+ * scx_bpf_dispatch[_vtime]_from_dsq() are allowed both from ops.dispatch() and
+ * non-rq-lock holding BPF programs. As demonstration, this function is called
+ * from qmap_dispatch() and monitor_timerfn().
+ */
+static bool dispatch_highpri(bool from_timer)
+{
+ struct task_struct *p;
+ s32 this_cpu = bpf_get_smp_processor_id();
+
+ /* scan SHARED_DSQ and move highpri tasks to HIGHPRI_DSQ */
+ bpf_for_each(scx_dsq, p, SHARED_DSQ, 0) {
+ static u64 highpri_seq;
+ struct task_ctx *tctx;
+
+ if (!(tctx = lookup_task_ctx(p)))
+ return false;
+
+ if (tctx->highpri) {
+ /* exercise the set_*() and vtime interface too */
+ __COMPAT_scx_bpf_dispatch_from_dsq_set_slice(
+ BPF_FOR_EACH_ITER, slice_ns * 2);
+ __COMPAT_scx_bpf_dispatch_from_dsq_set_vtime(
+ BPF_FOR_EACH_ITER, highpri_seq++);
+ __COMPAT_scx_bpf_dispatch_vtime_from_dsq(
+ BPF_FOR_EACH_ITER, p, HIGHPRI_DSQ, 0);
+ }
+ }
+
+ /*
+ * Scan HIGHPRI_DSQ and dispatch until a task that can run on this CPU
+ * is found.
+ */
+ bpf_for_each(scx_dsq, p, HIGHPRI_DSQ, 0) {
+ bool dispatched = false;
+ s32 cpu;
+
+ if (bpf_cpumask_test_cpu(this_cpu, p->cpus_ptr))
+ cpu = this_cpu;
+ else
+ cpu = scx_bpf_pick_any_cpu(p->cpus_ptr, 0);
+
+ if (__COMPAT_scx_bpf_dispatch_from_dsq(BPF_FOR_EACH_ITER, p,
+ SCX_DSQ_LOCAL_ON | cpu,
+ SCX_ENQ_PREEMPT)) {
+ if (cpu == this_cpu) {
+ dispatched = true;
+ __sync_fetch_and_add(&nr_expedited_local, 1);
+ } else {
+ __sync_fetch_and_add(&nr_expedited_remote, 1);
+ }
+ if (from_timer)
+ __sync_fetch_and_add(&nr_expedited_from_timer, 1);
+ } else {
+ __sync_fetch_and_add(&nr_expedited_lost, 1);
+ }
+
+ if (dispatched)
+ return true;
+ }
+
+ return false;
+}
+
+void BPF_STRUCT_OPS(qmap_dispatch, s32 cpu, struct task_struct *prev)
+{
+ struct task_struct *p;
+ struct cpu_ctx *cpuc;
+ struct task_ctx *tctx;
+ u32 zero = 0, batch = dsp_batch ?: 1;
+ void *fifo;
+ s32 i, pid;
+
+ if (dispatch_highpri(false))
+ return;
+
+ if (!nr_highpri_queued && scx_bpf_consume(SHARED_DSQ))
+ return;
+
+ if (dsp_inf_loop_after && nr_dispatched > dsp_inf_loop_after) {
+ /*
+ * PID 2 should be kthreadd which should mostly be idle and off
+ * the scheduler. Let's keep dispatching it to force the kernel
+ * to call this function over and over again.
+ */
+ p = bpf_task_from_pid(2);
+ if (p) {
+ scx_bpf_dispatch(p, SCX_DSQ_LOCAL, slice_ns, 0);
+ bpf_task_release(p);
+ return;
+ }
+ }
+
+ if (!(cpuc = bpf_map_lookup_elem(&cpu_ctx_stor, &zero))) {
+ scx_bpf_error("failed to look up cpu_ctx");
+ return;
+ }
+
+ for (i = 0; i < 5; i++) {
+ /* Advance the dispatch cursor and pick the fifo. */
+ if (!cpuc->dsp_cnt) {
+ cpuc->dsp_idx = (cpuc->dsp_idx + 1) % 5;
+ cpuc->dsp_cnt = 1 << cpuc->dsp_idx;
+ }
+
+ fifo = bpf_map_lookup_elem(&queue_arr, &cpuc->dsp_idx);
+ if (!fifo) {
+ scx_bpf_error("failed to find ring %llu", cpuc->dsp_idx);
+ return;
+ }
+
+ /* Dispatch or advance. */
+ bpf_repeat(BPF_MAX_LOOPS) {
+ struct task_ctx *tctx;
+
+ if (bpf_map_pop_elem(fifo, &pid))
+ break;
+
+ p = bpf_task_from_pid(pid);
+ if (!p)
+ continue;
+
+ if (!(tctx = lookup_task_ctx(p))) {
+ bpf_task_release(p);
+ return;
+ }
+
+ if (tctx->highpri)
+ __sync_fetch_and_sub(&nr_highpri_queued, 1);
+
+ update_core_sched_head_seq(p);
+ __sync_fetch_and_add(&nr_dispatched, 1);
+
+ scx_bpf_dispatch(p, SHARED_DSQ, slice_ns, 0);
+ bpf_task_release(p);
+
+ batch--;
+ cpuc->dsp_cnt--;
+ if (!batch || !scx_bpf_dispatch_nr_slots()) {
+ if (dispatch_highpri(false))
+ return;
+ scx_bpf_consume(SHARED_DSQ);
+ return;
+ }
+ if (!cpuc->dsp_cnt)
+ break;
+ }
+
+ cpuc->dsp_cnt = 0;
+ }
+
+ /*
+ * No other tasks. @prev will keep running. Update its core_sched_seq as
+ * if the task were enqueued and dispatched immediately.
+ */
+ if (prev) {
+ tctx = bpf_task_storage_get(&task_ctx_stor, prev, 0, 0);
+ if (!tctx) {
+ scx_bpf_error("task_ctx lookup failed");
+ return;
+ }
+
+ tctx->core_sched_seq =
+ core_sched_tail_seqs[weight_to_idx(prev->scx.weight)]++;
+ }
+}
+
+void BPF_STRUCT_OPS(qmap_tick, struct task_struct *p)
+{
+ struct cpu_ctx *cpuc;
+ u32 zero = 0;
+ int idx;
+
+ if (!(cpuc = bpf_map_lookup_elem(&cpu_ctx_stor, &zero))) {
+ scx_bpf_error("failed to look up cpu_ctx");
+ return;
+ }
+
+ /*
+ * Use the running avg of weights to select the target cpuperf level.
+ * This is a demonstration of the cpuperf feature rather than a
+ * practical strategy to regulate CPU frequency.
+ */
+ cpuc->avg_weight = cpuc->avg_weight * 3 / 4 + p->scx.weight / 4;
+ idx = weight_to_idx(cpuc->avg_weight);
+ cpuc->cpuperf_target = qidx_to_cpuperf_target[idx];
+
+ scx_bpf_cpuperf_set(scx_bpf_task_cpu(p), cpuc->cpuperf_target);
+}
+
+/*
+ * The distance from the head of the queue scaled by the weight of the queue.
+ * The lower the number, the older the task and the higher the priority.
+ */
+static s64 task_qdist(struct task_struct *p)
+{
+ int idx = weight_to_idx(p->scx.weight);
+ struct task_ctx *tctx;
+ s64 qdist;
+
+ tctx = bpf_task_storage_get(&task_ctx_stor, p, 0, 0);
+ if (!tctx) {
+ scx_bpf_error("task_ctx lookup failed");
+ return 0;
+ }
+
+ qdist = tctx->core_sched_seq - core_sched_head_seqs[idx];
+
+ /*
+ * As queue index increments, the priority doubles. The queue w/ index 3
+ * is dispatched twice more frequently than 2. Reflect the difference by
+ * scaling qdists accordingly. Note that the shift amount needs to be
+ * flipped depending on the sign to avoid flipping priority direction.
+ */
+ if (qdist >= 0)
+ return qdist << (4 - idx);
+ else
+ return qdist << idx;
+}
+
+/*
+ * This is called to determine the task ordering when core-sched is picking
+ * tasks to execute on SMT siblings and should encode about the same ordering as
+ * the regular scheduling path. Use the priority-scaled distances from the head
+ * of the queues to compare the two tasks which should be consistent with the
+ * dispatch path behavior.
+ */
+bool BPF_STRUCT_OPS(qmap_core_sched_before,
+ struct task_struct *a, struct task_struct *b)
+{
+ return task_qdist(a) > task_qdist(b);
+}
+
+void BPF_STRUCT_OPS(qmap_cpu_release, s32 cpu, struct scx_cpu_release_args *args)
+{
+ u32 cnt;
+
+ /*
+ * Called when @cpu is taken by a higher priority scheduling class. This
+ * makes @cpu no longer available for executing sched_ext tasks. As we
+ * don't want the tasks in @cpu's local dsq to sit there until @cpu
+ * becomes available again, re-enqueue them into the global dsq. See
+ * %SCX_ENQ_REENQ handling in qmap_enqueue().
+ */
+ cnt = scx_bpf_reenqueue_local();
+ if (cnt)
+ __sync_fetch_and_add(&nr_reenqueued, cnt);
+}
+
+s32 BPF_STRUCT_OPS(qmap_init_task, struct task_struct *p,
+ struct scx_init_task_args *args)
+{
+ if (p->tgid == disallow_tgid)
+ p->scx.disallow = true;
+
+ /*
+ * @p is new. Let's ensure that its task_ctx is available. We can sleep
+ * in this function and the following will automatically use GFP_KERNEL.
+ */
+ if (bpf_task_storage_get(&task_ctx_stor, p, 0,
+ BPF_LOCAL_STORAGE_GET_F_CREATE))
+ return 0;
+ else
+ return -ENOMEM;
+}
+
+void BPF_STRUCT_OPS(qmap_dump, struct scx_dump_ctx *dctx)
+{
+ s32 i, pid;
+
+ if (suppress_dump)
+ return;
+
+ bpf_for(i, 0, 5) {
+ void *fifo;
+
+ if (!(fifo = bpf_map_lookup_elem(&queue_arr, &i)))
+ return;
+
+ scx_bpf_dump("QMAP FIFO[%d]:", i);
+ bpf_repeat(4096) {
+ if (bpf_map_pop_elem(fifo, &pid))
+ break;
+ scx_bpf_dump(" %d", pid);
+ }
+ scx_bpf_dump("\n");
+ }
+}
+
+void BPF_STRUCT_OPS(qmap_dump_cpu, struct scx_dump_ctx *dctx, s32 cpu, bool idle)
+{
+ u32 zero = 0;
+ struct cpu_ctx *cpuc;
+
+ if (suppress_dump || idle)
+ return;
+ if (!(cpuc = bpf_map_lookup_percpu_elem(&cpu_ctx_stor, &zero, cpu)))
+ return;
+
+ scx_bpf_dump("QMAP: dsp_idx=%llu dsp_cnt=%llu avg_weight=%u cpuperf_target=%u",
+ cpuc->dsp_idx, cpuc->dsp_cnt, cpuc->avg_weight,
+ cpuc->cpuperf_target);
+}
+
+void BPF_STRUCT_OPS(qmap_dump_task, struct scx_dump_ctx *dctx, struct task_struct *p)
+{
+ struct task_ctx *taskc;
+
+ if (suppress_dump)
+ return;
+ if (!(taskc = bpf_task_storage_get(&task_ctx_stor, p, 0, 0)))
+ return;
+
+ scx_bpf_dump("QMAP: force_local=%d core_sched_seq=%llu",
+ taskc->force_local, taskc->core_sched_seq);
+}
+
+/*
+ * Print out the online and possible CPU map using bpf_printk() as a
+ * demonstration of using the cpumask kfuncs and ops.cpu_on/offline().
+ */
+static void print_cpus(void)
+{
+ const struct cpumask *possible, *online;
+ s32 cpu;
+ char buf[128] = "", *p;
+ int idx;
+
+ possible = scx_bpf_get_possible_cpumask();
+ online = scx_bpf_get_online_cpumask();
+
+ idx = 0;
+ bpf_for(cpu, 0, scx_bpf_nr_cpu_ids()) {
+ if (!(p = MEMBER_VPTR(buf, [idx++])))
+ break;
+ if (bpf_cpumask_test_cpu(cpu, online))
+ *p++ = 'O';
+ else if (bpf_cpumask_test_cpu(cpu, possible))
+ *p++ = 'X';
+ else
+ *p++ = ' ';
+
+ if ((cpu & 7) == 7) {
+ if (!(p = MEMBER_VPTR(buf, [idx++])))
+ break;
+ *p++ = '|';
+ }
+ }
+ buf[sizeof(buf) - 1] = '\0';
+
+ scx_bpf_put_cpumask(online);
+ scx_bpf_put_cpumask(possible);
+
+ bpf_printk("CPUS: |%s", buf);
+}
+
+void BPF_STRUCT_OPS(qmap_cpu_online, s32 cpu)
+{
+ bpf_printk("CPU %d coming online", cpu);
+ /* @cpu is already online at this point */
+ print_cpus();
+}
+
+void BPF_STRUCT_OPS(qmap_cpu_offline, s32 cpu)
+{
+ bpf_printk("CPU %d going offline", cpu);
+ /* @cpu is still online at this point */
+ print_cpus();
+}
+
+struct monitor_timer {
+ struct bpf_timer timer;
+};
+
+struct {
+ __uint(type, BPF_MAP_TYPE_ARRAY);
+ __uint(max_entries, 1);
+ __type(key, u32);
+ __type(value, struct monitor_timer);
+} monitor_timer SEC(".maps");
+
+/*
+ * Print out the min, avg and max performance levels of CPUs every second to
+ * demonstrate the cpuperf interface.
+ */
+static void monitor_cpuperf(void)
+{
+ u32 zero = 0, nr_cpu_ids;
+ u64 cap_sum = 0, cur_sum = 0, cur_min = SCX_CPUPERF_ONE, cur_max = 0;
+ u64 target_sum = 0, target_min = SCX_CPUPERF_ONE, target_max = 0;
+ const struct cpumask *online;
+ int i, nr_online_cpus = 0;
+
+ nr_cpu_ids = scx_bpf_nr_cpu_ids();
+ online = scx_bpf_get_online_cpumask();
+
+ bpf_for(i, 0, nr_cpu_ids) {
+ struct cpu_ctx *cpuc;
+ u32 cap, cur;
+
+ if (!bpf_cpumask_test_cpu(i, online))
+ continue;
+ nr_online_cpus++;
+
+ /* collect the capacity and current cpuperf */
+ cap = scx_bpf_cpuperf_cap(i);
+ cur = scx_bpf_cpuperf_cur(i);
+
+ cur_min = cur < cur_min ? cur : cur_min;
+ cur_max = cur > cur_max ? cur : cur_max;
+
+ /*
+ * $cur is relative to $cap. Scale it down accordingly so that
+ * it's in the same scale as other CPUs and $cur_sum/$cap_sum
+ * makes sense.
+ */
+ cur_sum += cur * cap / SCX_CPUPERF_ONE;
+ cap_sum += cap;
+
+ if (!(cpuc = bpf_map_lookup_percpu_elem(&cpu_ctx_stor, &zero, i))) {
+ scx_bpf_error("failed to look up cpu_ctx");
+ goto out;
+ }
+
+ /* collect target */
+ cur = cpuc->cpuperf_target;
+ target_sum += cur;
+ target_min = cur < target_min ? cur : target_min;
+ target_max = cur > target_max ? cur : target_max;
+ }
+
+ cpuperf_min = cur_min;
+ cpuperf_avg = cur_sum * SCX_CPUPERF_ONE / cap_sum;
+ cpuperf_max = cur_max;
+
+ cpuperf_target_min = target_min;
+ cpuperf_target_avg = target_sum / nr_online_cpus;
+ cpuperf_target_max = target_max;
+out:
+ scx_bpf_put_cpumask(online);
+}
+
+/*
+ * Dump the currently queued tasks in the shared DSQ to demonstrate the usage of
+ * scx_bpf_dsq_nr_queued() and DSQ iterator. Raise the dispatch batch count to
+ * see meaningful dumps in the trace pipe.
+ */
+static void dump_shared_dsq(void)
+{
+ struct task_struct *p;
+ s32 nr;
+
+ if (!(nr = scx_bpf_dsq_nr_queued(SHARED_DSQ)))
+ return;
+
+ bpf_printk("Dumping %d tasks in SHARED_DSQ in reverse order", nr);
+
+ bpf_rcu_read_lock();
+ bpf_for_each(scx_dsq, p, SHARED_DSQ, SCX_DSQ_ITER_REV)
+ bpf_printk("%s[%d]", p->comm, p->pid);
+ bpf_rcu_read_unlock();
+}
+
+static int monitor_timerfn(void *map, int *key, struct bpf_timer *timer)
+{
+ bpf_rcu_read_lock();
+ dispatch_highpri(true);
+ bpf_rcu_read_unlock();
+
+ monitor_cpuperf();
+
+ if (print_shared_dsq)
+ dump_shared_dsq();
+
+ bpf_timer_start(timer, ONE_SEC_IN_NS, 0);
+ return 0;
+}
+
+s32 BPF_STRUCT_OPS_SLEEPABLE(qmap_init)
+{
+ u32 key = 0;
+ struct bpf_timer *timer;
+ s32 ret;
+
+ print_cpus();
+
+ ret = scx_bpf_create_dsq(SHARED_DSQ, -1);
+ if (ret)
+ return ret;
+
+ ret = scx_bpf_create_dsq(HIGHPRI_DSQ, -1);
+ if (ret)
+ return ret;
+
+ timer = bpf_map_lookup_elem(&monitor_timer, &key);
+ if (!timer)
+ return -ESRCH;
+
+ bpf_timer_init(timer, &monitor_timer, CLOCK_MONOTONIC);
+ bpf_timer_set_callback(timer, monitor_timerfn);
+
+ return bpf_timer_start(timer, ONE_SEC_IN_NS, 0);
+}
+
+void BPF_STRUCT_OPS(qmap_exit, struct scx_exit_info *ei)
+{
+ UEI_RECORD(uei, ei);
+}
+
+SCX_OPS_DEFINE(qmap_ops,
+ .select_cpu = (void *)qmap_select_cpu,
+ .enqueue = (void *)qmap_enqueue,
+ .dequeue = (void *)qmap_dequeue,
+ .dispatch = (void *)qmap_dispatch,
+ .tick = (void *)qmap_tick,
+ .core_sched_before = (void *)qmap_core_sched_before,
+ .cpu_release = (void *)qmap_cpu_release,
+ .init_task = (void *)qmap_init_task,
+ .dump = (void *)qmap_dump,
+ .dump_cpu = (void *)qmap_dump_cpu,
+ .dump_task = (void *)qmap_dump_task,
+ .cpu_online = (void *)qmap_cpu_online,
+ .cpu_offline = (void *)qmap_cpu_offline,
+ .init = (void *)qmap_init,
+ .exit = (void *)qmap_exit,
+ .timeout_ms = 5000U,
+ .name = "qmap");
diff --git a/tools/sched_ext/scx_qmap.c b/tools/sched_ext/scx_qmap.c
new file mode 100644
index 000000000000..ac45a02b4055
--- /dev/null
+++ b/tools/sched_ext/scx_qmap.c
@@ -0,0 +1,153 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (c) 2022 Meta Platforms, Inc. and affiliates.
+ * Copyright (c) 2022 Tejun Heo <tj@kernel.org>
+ * Copyright (c) 2022 David Vernet <dvernet@meta.com>
+ */
+#include <stdio.h>
+#include <stdlib.h>
+#include <unistd.h>
+#include <inttypes.h>
+#include <signal.h>
+#include <libgen.h>
+#include <bpf/bpf.h>
+#include <scx/common.h>
+#include "scx_qmap.bpf.skel.h"
+
+const char help_fmt[] =
+"A simple five-level FIFO queue sched_ext scheduler.\n"
+"\n"
+"See the top-level comment in .bpf.c for more details.\n"
+"\n"
+"Usage: %s [-s SLICE_US] [-e COUNT] [-t COUNT] [-T COUNT] [-l COUNT] [-b COUNT]\n"
+" [-P] [-d PID] [-D LEN] [-p] [-v]\n"
+"\n"
+" -s SLICE_US Override slice duration\n"
+" -e COUNT Trigger scx_bpf_error() after COUNT enqueues\n"
+" -t COUNT Stall every COUNT'th user thread\n"
+" -T COUNT Stall every COUNT'th kernel thread\n"
+" -l COUNT Trigger dispatch infinite looping after COUNT dispatches\n"
+" -b COUNT Dispatch upto COUNT tasks together\n"
+" -P Print out DSQ content to trace_pipe every second, use with -b\n"
+" -H Boost nice -20 tasks in SHARED_DSQ, use with -b\n"
+" -d PID Disallow a process from switching into SCHED_EXT (-1 for self)\n"
+" -D LEN Set scx_exit_info.dump buffer length\n"
+" -S Suppress qmap-specific debug dump\n"
+" -p Switch only tasks on SCHED_EXT policy instead of all\n"
+" -v Print libbpf debug messages\n"
+" -h Display this help and exit\n";
+
+static bool verbose;
+static volatile int exit_req;
+
+static int libbpf_print_fn(enum libbpf_print_level level, const char *format, va_list args)
+{
+ if (level == LIBBPF_DEBUG && !verbose)
+ return 0;
+ return vfprintf(stderr, format, args);
+}
+
+static void sigint_handler(int dummy)
+{
+ exit_req = 1;
+}
+
+int main(int argc, char **argv)
+{
+ struct scx_qmap *skel;
+ struct bpf_link *link;
+ int opt;
+
+ libbpf_set_print(libbpf_print_fn);
+ signal(SIGINT, sigint_handler);
+ signal(SIGTERM, sigint_handler);
+
+ skel = SCX_OPS_OPEN(qmap_ops, scx_qmap);
+
+ while ((opt = getopt(argc, argv, "s:e:t:T:l:b:PHd:D:Spvh")) != -1) {
+ switch (opt) {
+ case 's':
+ skel->rodata->slice_ns = strtoull(optarg, NULL, 0) * 1000;
+ break;
+ case 'e':
+ skel->bss->test_error_cnt = strtoul(optarg, NULL, 0);
+ break;
+ case 't':
+ skel->rodata->stall_user_nth = strtoul(optarg, NULL, 0);
+ break;
+ case 'T':
+ skel->rodata->stall_kernel_nth = strtoul(optarg, NULL, 0);
+ break;
+ case 'l':
+ skel->rodata->dsp_inf_loop_after = strtoul(optarg, NULL, 0);
+ break;
+ case 'b':
+ skel->rodata->dsp_batch = strtoul(optarg, NULL, 0);
+ break;
+ case 'P':
+ skel->rodata->print_shared_dsq = true;
+ break;
+ case 'H':
+ skel->rodata->highpri_boosting = true;
+ break;
+ case 'd':
+ skel->rodata->disallow_tgid = strtol(optarg, NULL, 0);
+ if (skel->rodata->disallow_tgid < 0)
+ skel->rodata->disallow_tgid = getpid();
+ break;
+ case 'D':
+ skel->struct_ops.qmap_ops->exit_dump_len = strtoul(optarg, NULL, 0);
+ break;
+ case 'S':
+ skel->rodata->suppress_dump = true;
+ break;
+ case 'p':
+ skel->struct_ops.qmap_ops->flags |= SCX_OPS_SWITCH_PARTIAL;
+ break;
+ case 'v':
+ verbose = true;
+ break;
+ default:
+ fprintf(stderr, help_fmt, basename(argv[0]));
+ return opt != 'h';
+ }
+ }
+
+ SCX_OPS_LOAD(skel, qmap_ops, scx_qmap, uei);
+ link = SCX_OPS_ATTACH(skel, qmap_ops, scx_qmap);
+
+ while (!exit_req && !UEI_EXITED(skel, uei)) {
+ long nr_enqueued = skel->bss->nr_enqueued;
+ long nr_dispatched = skel->bss->nr_dispatched;
+
+ printf("stats : enq=%lu dsp=%lu delta=%ld reenq=%"PRIu64" deq=%"PRIu64" core=%"PRIu64" enq_ddsp=%"PRIu64"\n",
+ nr_enqueued, nr_dispatched, nr_enqueued - nr_dispatched,
+ skel->bss->nr_reenqueued, skel->bss->nr_dequeued,
+ skel->bss->nr_core_sched_execed,
+ skel->bss->nr_ddsp_from_enq);
+ printf(" exp_local=%"PRIu64" exp_remote=%"PRIu64" exp_timer=%"PRIu64" exp_lost=%"PRIu64"\n",
+ skel->bss->nr_expedited_local,
+ skel->bss->nr_expedited_remote,
+ skel->bss->nr_expedited_from_timer,
+ skel->bss->nr_expedited_lost);
+ if (__COMPAT_has_ksym("scx_bpf_cpuperf_cur"))
+ printf("cpuperf: cur min/avg/max=%u/%u/%u target min/avg/max=%u/%u/%u\n",
+ skel->bss->cpuperf_min,
+ skel->bss->cpuperf_avg,
+ skel->bss->cpuperf_max,
+ skel->bss->cpuperf_target_min,
+ skel->bss->cpuperf_target_avg,
+ skel->bss->cpuperf_target_max);
+ fflush(stdout);
+ sleep(1);
+ }
+
+ bpf_link__destroy(link);
+ UEI_REPORT(skel, uei);
+ scx_qmap__destroy(skel);
+ /*
+ * scx_qmap implements ops.cpu_on/offline() and doesn't need to restart
+ * on CPU hotplug events.
+ */
+ return 0;
+}
diff --git a/tools/sched_ext/scx_show_state.py b/tools/sched_ext/scx_show_state.py
new file mode 100644
index 000000000000..8bc626ede1c4
--- /dev/null
+++ b/tools/sched_ext/scx_show_state.py
@@ -0,0 +1,40 @@
+#!/usr/bin/env drgn
+#
+# Copyright (C) 2024 Tejun Heo <tj@kernel.org>
+# Copyright (C) 2024 Meta Platforms, Inc. and affiliates.
+
+desc = """
+This is a drgn script to show the current sched_ext state.
+For more info on drgn, visit https://github.com/osandov/drgn.
+"""
+
+import drgn
+import sys
+
+def err(s):
+ print(s, file=sys.stderr, flush=True)
+ sys.exit(1)
+
+def read_int(name):
+ return int(prog[name].value_())
+
+def read_atomic(name):
+ return prog[name].counter.value_()
+
+def read_static_key(name):
+ return prog[name].key.enabled.counter.value_()
+
+def ops_state_str(state):
+ return prog['scx_ops_enable_state_str'][state].string_().decode()
+
+ops = prog['scx_ops']
+enable_state = read_atomic("scx_ops_enable_state_var")
+
+print(f'ops : {ops.name.string_().decode()}')
+print(f'enabled : {read_static_key("__scx_ops_enabled")}')
+print(f'switching_all : {read_int("scx_switching_all")}')
+print(f'switched_all : {read_static_key("__scx_switched_all")}')
+print(f'enable_state : {ops_state_str(enable_state)} ({enable_state})')
+print(f'bypass_depth : {read_atomic("scx_ops_bypass_depth")}')
+print(f'nr_rejected : {read_atomic("scx_nr_rejected")}')
+print(f'enable_seq : {read_atomic("scx_enable_seq")}')
diff --git a/tools/sched_ext/scx_simple.bpf.c b/tools/sched_ext/scx_simple.bpf.c
new file mode 100644
index 000000000000..ed7e8d535fc5
--- /dev/null
+++ b/tools/sched_ext/scx_simple.bpf.c
@@ -0,0 +1,156 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * A simple scheduler.
+ *
+ * By default, it operates as a simple global weighted vtime scheduler and can
+ * be switched to FIFO scheduling. It also demonstrates the following niceties.
+ *
+ * - Statistics tracking how many tasks are queued to local and global dsq's.
+ * - Termination notification for userspace.
+ *
+ * While very simple, this scheduler should work reasonably well on CPUs with a
+ * uniform L3 cache topology. While preemption is not implemented, the fact that
+ * the scheduling queue is shared across all CPUs means that whatever is at the
+ * front of the queue is likely to be executed fairly quickly given enough
+ * number of CPUs. The FIFO scheduling mode may be beneficial to some workloads
+ * but comes with the usual problems with FIFO scheduling where saturating
+ * threads can easily drown out interactive ones.
+ *
+ * Copyright (c) 2022 Meta Platforms, Inc. and affiliates.
+ * Copyright (c) 2022 Tejun Heo <tj@kernel.org>
+ * Copyright (c) 2022 David Vernet <dvernet@meta.com>
+ */
+#include <scx/common.bpf.h>
+
+char _license[] SEC("license") = "GPL";
+
+const volatile bool fifo_sched;
+
+static u64 vtime_now;
+UEI_DEFINE(uei);
+
+/*
+ * Built-in DSQs such as SCX_DSQ_GLOBAL cannot be used as priority queues
+ * (meaning, cannot be dispatched to with scx_bpf_dispatch_vtime()). We
+ * therefore create a separate DSQ with ID 0 that we dispatch to and consume
+ * from. If scx_simple only supported global FIFO scheduling, then we could
+ * just use SCX_DSQ_GLOBAL.
+ */
+#define SHARED_DSQ 0
+
+struct {
+ __uint(type, BPF_MAP_TYPE_PERCPU_ARRAY);
+ __uint(key_size, sizeof(u32));
+ __uint(value_size, sizeof(u64));
+ __uint(max_entries, 2); /* [local, global] */
+} stats SEC(".maps");
+
+static void stat_inc(u32 idx)
+{
+ u64 *cnt_p = bpf_map_lookup_elem(&stats, &idx);
+ if (cnt_p)
+ (*cnt_p)++;
+}
+
+static inline bool vtime_before(u64 a, u64 b)
+{
+ return (s64)(a - b) < 0;
+}
+
+s32 BPF_STRUCT_OPS(simple_select_cpu, struct task_struct *p, s32 prev_cpu, u64 wake_flags)
+{
+ bool is_idle = false;
+ s32 cpu;
+
+ cpu = scx_bpf_select_cpu_dfl(p, prev_cpu, wake_flags, &is_idle);
+ if (is_idle) {
+ stat_inc(0); /* count local queueing */
+ scx_bpf_dispatch(p, SCX_DSQ_LOCAL, SCX_SLICE_DFL, 0);
+ }
+
+ return cpu;
+}
+
+void BPF_STRUCT_OPS(simple_enqueue, struct task_struct *p, u64 enq_flags)
+{
+ stat_inc(1); /* count global queueing */
+
+ if (fifo_sched) {
+ scx_bpf_dispatch(p, SHARED_DSQ, SCX_SLICE_DFL, enq_flags);
+ } else {
+ u64 vtime = p->scx.dsq_vtime;
+
+ /*
+ * Limit the amount of budget that an idling task can accumulate
+ * to one slice.
+ */
+ if (vtime_before(vtime, vtime_now - SCX_SLICE_DFL))
+ vtime = vtime_now - SCX_SLICE_DFL;
+
+ scx_bpf_dispatch_vtime(p, SHARED_DSQ, SCX_SLICE_DFL, vtime,
+ enq_flags);
+ }
+}
+
+void BPF_STRUCT_OPS(simple_dispatch, s32 cpu, struct task_struct *prev)
+{
+ scx_bpf_consume(SHARED_DSQ);
+}
+
+void BPF_STRUCT_OPS(simple_running, struct task_struct *p)
+{
+ if (fifo_sched)
+ return;
+
+ /*
+ * Global vtime always progresses forward as tasks start executing. The
+ * test and update can be performed concurrently from multiple CPUs and
+ * thus racy. Any error should be contained and temporary. Let's just
+ * live with it.
+ */
+ if (vtime_before(vtime_now, p->scx.dsq_vtime))
+ vtime_now = p->scx.dsq_vtime;
+}
+
+void BPF_STRUCT_OPS(simple_stopping, struct task_struct *p, bool runnable)
+{
+ if (fifo_sched)
+ return;
+
+ /*
+ * Scale the execution time by the inverse of the weight and charge.
+ *
+ * Note that the default yield implementation yields by setting
+ * @p->scx.slice to zero and the following would treat the yielding task
+ * as if it has consumed all its slice. If this penalizes yielding tasks
+ * too much, determine the execution time by taking explicit timestamps
+ * instead of depending on @p->scx.slice.
+ */
+ p->scx.dsq_vtime += (SCX_SLICE_DFL - p->scx.slice) * 100 / p->scx.weight;
+}
+
+void BPF_STRUCT_OPS(simple_enable, struct task_struct *p)
+{
+ p->scx.dsq_vtime = vtime_now;
+}
+
+s32 BPF_STRUCT_OPS_SLEEPABLE(simple_init)
+{
+ return scx_bpf_create_dsq(SHARED_DSQ, -1);
+}
+
+void BPF_STRUCT_OPS(simple_exit, struct scx_exit_info *ei)
+{
+ UEI_RECORD(uei, ei);
+}
+
+SCX_OPS_DEFINE(simple_ops,
+ .select_cpu = (void *)simple_select_cpu,
+ .enqueue = (void *)simple_enqueue,
+ .dispatch = (void *)simple_dispatch,
+ .running = (void *)simple_running,
+ .stopping = (void *)simple_stopping,
+ .enable = (void *)simple_enable,
+ .init = (void *)simple_init,
+ .exit = (void *)simple_exit,
+ .name = "simple");
diff --git a/tools/sched_ext/scx_simple.c b/tools/sched_ext/scx_simple.c
new file mode 100644
index 000000000000..76d83199545c
--- /dev/null
+++ b/tools/sched_ext/scx_simple.c
@@ -0,0 +1,107 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (c) 2022 Meta Platforms, Inc. and affiliates.
+ * Copyright (c) 2022 Tejun Heo <tj@kernel.org>
+ * Copyright (c) 2022 David Vernet <dvernet@meta.com>
+ */
+#include <stdio.h>
+#include <unistd.h>
+#include <signal.h>
+#include <libgen.h>
+#include <bpf/bpf.h>
+#include <scx/common.h>
+#include "scx_simple.bpf.skel.h"
+
+const char help_fmt[] =
+"A simple sched_ext scheduler.\n"
+"\n"
+"See the top-level comment in .bpf.c for more details.\n"
+"\n"
+"Usage: %s [-f] [-v]\n"
+"\n"
+" -f Use FIFO scheduling instead of weighted vtime scheduling\n"
+" -v Print libbpf debug messages\n"
+" -h Display this help and exit\n";
+
+static bool verbose;
+static volatile int exit_req;
+
+static int libbpf_print_fn(enum libbpf_print_level level, const char *format, va_list args)
+{
+ if (level == LIBBPF_DEBUG && !verbose)
+ return 0;
+ return vfprintf(stderr, format, args);
+}
+
+static void sigint_handler(int simple)
+{
+ exit_req = 1;
+}
+
+static void read_stats(struct scx_simple *skel, __u64 *stats)
+{
+ int nr_cpus = libbpf_num_possible_cpus();
+ __u64 cnts[2][nr_cpus];
+ __u32 idx;
+
+ memset(stats, 0, sizeof(stats[0]) * 2);
+
+ for (idx = 0; idx < 2; idx++) {
+ int ret, cpu;
+
+ ret = bpf_map_lookup_elem(bpf_map__fd(skel->maps.stats),
+ &idx, cnts[idx]);
+ if (ret < 0)
+ continue;
+ for (cpu = 0; cpu < nr_cpus; cpu++)
+ stats[idx] += cnts[idx][cpu];
+ }
+}
+
+int main(int argc, char **argv)
+{
+ struct scx_simple *skel;
+ struct bpf_link *link;
+ __u32 opt;
+ __u64 ecode;
+
+ libbpf_set_print(libbpf_print_fn);
+ signal(SIGINT, sigint_handler);
+ signal(SIGTERM, sigint_handler);
+restart:
+ skel = SCX_OPS_OPEN(simple_ops, scx_simple);
+
+ while ((opt = getopt(argc, argv, "fvh")) != -1) {
+ switch (opt) {
+ case 'f':
+ skel->rodata->fifo_sched = true;
+ break;
+ case 'v':
+ verbose = true;
+ break;
+ default:
+ fprintf(stderr, help_fmt, basename(argv[0]));
+ return opt != 'h';
+ }
+ }
+
+ SCX_OPS_LOAD(skel, simple_ops, scx_simple, uei);
+ link = SCX_OPS_ATTACH(skel, simple_ops, scx_simple);
+
+ while (!exit_req && !UEI_EXITED(skel, uei)) {
+ __u64 stats[2];
+
+ read_stats(skel, stats);
+ printf("local=%llu global=%llu\n", stats[0], stats[1]);
+ fflush(stdout);
+ sleep(1);
+ }
+
+ bpf_link__destroy(link);
+ ecode = UEI_REPORT(skel, uei);
+ scx_simple__destroy(skel);
+
+ if (UEI_ECODE_RESTART(ecode))
+ goto restart;
+ return 0;
+}
diff --git a/tools/testing/cxl/Kbuild b/tools/testing/cxl/Kbuild
index 3d1ca9e38b1f..b1256fee3567 100644
--- a/tools/testing/cxl/Kbuild
+++ b/tools/testing/cxl/Kbuild
@@ -14,7 +14,7 @@ ldflags-y += --wrap=cxl_dvsec_rr_decode
ldflags-y += --wrap=devm_cxl_add_rch_dport
ldflags-y += --wrap=cxl_rcd_component_reg_phys
ldflags-y += --wrap=cxl_endpoint_parse_cdat
-ldflags-y += --wrap=cxl_setup_parent_dport
+ldflags-y += --wrap=cxl_dport_init_ras_reporting
DRIVERS := ../../../drivers
CXL_SRC := $(DRIVERS)/cxl
diff --git a/tools/testing/cxl/mock_acpi.c b/tools/testing/cxl/mock_acpi.c
index 55813de26d46..8da94378ccec 100644
--- a/tools/testing/cxl/mock_acpi.c
+++ b/tools/testing/cxl/mock_acpi.c
@@ -18,7 +18,7 @@ struct acpi_device *to_cxl_host_bridge(struct device *host, struct device *dev)
goto out;
}
- if (dev->bus == &platform_bus_type)
+ if (dev_is_platform(dev))
goto out;
adev = to_acpi_device(dev);
diff --git a/tools/testing/cxl/test/mem.c b/tools/testing/cxl/test/mem.c
index 129f179b0ac5..ccdd6a504222 100644
--- a/tools/testing/cxl/test/mem.c
+++ b/tools/testing/cxl/test/mem.c
@@ -8,6 +8,7 @@
#include <linux/delay.h>
#include <linux/sizes.h>
#include <linux/bits.h>
+#include <cxl/mailbox.h>
#include <asm/unaligned.h>
#include <crypto/sha2.h>
#include <cxlmem.h>
@@ -534,6 +535,7 @@ static int mock_gsl(struct cxl_mbox_cmd *cmd)
static int mock_get_log(struct cxl_memdev_state *mds, struct cxl_mbox_cmd *cmd)
{
+ struct cxl_mailbox *cxl_mbox = &mds->cxlds.cxl_mbox;
struct cxl_mbox_get_log *gl = cmd->payload_in;
u32 offset = le32_to_cpu(gl->offset);
u32 length = le32_to_cpu(gl->length);
@@ -542,7 +544,7 @@ static int mock_get_log(struct cxl_memdev_state *mds, struct cxl_mbox_cmd *cmd)
if (cmd->size_in < sizeof(*gl))
return -EINVAL;
- if (length > mds->payload_size)
+ if (length > cxl_mbox->payload_size)
return -EINVAL;
if (offset + length > sizeof(mock_cel))
return -EINVAL;
@@ -617,12 +619,13 @@ void cxl_mockmem_sanitize_work(struct work_struct *work)
{
struct cxl_memdev_state *mds =
container_of(work, typeof(*mds), security.poll_dwork.work);
+ struct cxl_mailbox *cxl_mbox = &mds->cxlds.cxl_mbox;
- mutex_lock(&mds->mbox_mutex);
+ mutex_lock(&cxl_mbox->mbox_mutex);
if (mds->security.sanitize_node)
sysfs_notify_dirent(mds->security.sanitize_node);
mds->security.sanitize_active = false;
- mutex_unlock(&mds->mbox_mutex);
+ mutex_unlock(&cxl_mbox->mbox_mutex);
dev_dbg(mds->cxlds.dev, "sanitize complete\n");
}
@@ -631,6 +634,7 @@ static int mock_sanitize(struct cxl_mockmem_data *mdata,
struct cxl_mbox_cmd *cmd)
{
struct cxl_memdev_state *mds = mdata->mds;
+ struct cxl_mailbox *cxl_mbox = &mds->cxlds.cxl_mbox;
int rc = 0;
if (cmd->size_in != 0)
@@ -648,14 +652,14 @@ static int mock_sanitize(struct cxl_mockmem_data *mdata,
return -ENXIO;
}
- mutex_lock(&mds->mbox_mutex);
+ mutex_lock(&cxl_mbox->mbox_mutex);
if (schedule_delayed_work(&mds->security.poll_dwork,
msecs_to_jiffies(mdata->sanitize_timeout))) {
mds->security.sanitize_active = true;
dev_dbg(mds->cxlds.dev, "sanitize issued\n");
} else
rc = -EBUSY;
- mutex_unlock(&mds->mbox_mutex);
+ mutex_unlock(&cxl_mbox->mbox_mutex);
return rc;
}
@@ -1333,12 +1337,13 @@ static int mock_activate_fw(struct cxl_mockmem_data *mdata,
return -EINVAL;
}
-static int cxl_mock_mbox_send(struct cxl_memdev_state *mds,
+static int cxl_mock_mbox_send(struct cxl_mailbox *cxl_mbox,
struct cxl_mbox_cmd *cmd)
{
- struct cxl_dev_state *cxlds = &mds->cxlds;
- struct device *dev = cxlds->dev;
+ struct device *dev = cxl_mbox->host;
struct cxl_mockmem_data *mdata = dev_get_drvdata(dev);
+ struct cxl_memdev_state *mds = mdata->mds;
+ struct cxl_dev_state *cxlds = &mds->cxlds;
int rc = -EIO;
switch (cmd->opcode) {
@@ -1453,6 +1458,17 @@ static ssize_t event_trigger_store(struct device *dev,
}
static DEVICE_ATTR_WO(event_trigger);
+static int cxl_mock_mailbox_create(struct cxl_dev_state *cxlds)
+{
+ int rc;
+
+ rc = cxl_mailbox_init(&cxlds->cxl_mbox, cxlds->dev);
+ if (rc)
+ return rc;
+
+ return 0;
+}
+
static int cxl_mock_mem_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
@@ -1460,6 +1476,7 @@ static int cxl_mock_mem_probe(struct platform_device *pdev)
struct cxl_memdev_state *mds;
struct cxl_dev_state *cxlds;
struct cxl_mockmem_data *mdata;
+ struct cxl_mailbox *cxl_mbox;
int rc;
mdata = devm_kzalloc(dev, sizeof(*mdata), GFP_KERNEL);
@@ -1487,13 +1504,18 @@ static int cxl_mock_mem_probe(struct platform_device *pdev)
if (IS_ERR(mds))
return PTR_ERR(mds);
+ cxlds = &mds->cxlds;
+ rc = cxl_mock_mailbox_create(cxlds);
+ if (rc)
+ return rc;
+
+ cxl_mbox = &mds->cxlds.cxl_mbox;
mdata->mds = mds;
- mds->mbox_send = cxl_mock_mbox_send;
- mds->payload_size = SZ_4K;
+ cxl_mbox->mbox_send = cxl_mock_mbox_send;
+ cxl_mbox->payload_size = SZ_4K;
mds->event.buf = (struct cxl_get_event_payload *) mdata->event_buf;
INIT_DELAYED_WORK(&mds->security.poll_dwork, cxl_mockmem_sanitize_work);
- cxlds = &mds->cxlds;
cxlds->serial = pdev->id;
if (is_rcd(pdev))
cxlds->rcd = true;
diff --git a/tools/testing/cxl/test/mock.c b/tools/testing/cxl/test/mock.c
index d619672faa49..f4ce96cc11d4 100644
--- a/tools/testing/cxl/test/mock.c
+++ b/tools/testing/cxl/test/mock.c
@@ -228,7 +228,7 @@ int __wrap_cxl_hdm_decode_init(struct cxl_dev_state *cxlds,
}
EXPORT_SYMBOL_NS_GPL(__wrap_cxl_hdm_decode_init, CXL);
-int __wrap_cxl_dvsec_rr_decode(struct device *dev, int dvsec,
+int __wrap_cxl_dvsec_rr_decode(struct device *dev, struct cxl_port *port,
struct cxl_endpoint_dvsec_info *info)
{
int rc = 0, index;
@@ -237,7 +237,7 @@ int __wrap_cxl_dvsec_rr_decode(struct device *dev, int dvsec,
if (ops && ops->is_mock_dev(dev))
rc = 0;
else
- rc = cxl_dvsec_rr_decode(dev, dvsec, info);
+ rc = cxl_dvsec_rr_decode(dev, port, info);
put_cxl_mock_ops(index);
return rc;
@@ -299,17 +299,17 @@ void __wrap_cxl_endpoint_parse_cdat(struct cxl_port *port)
}
EXPORT_SYMBOL_NS_GPL(__wrap_cxl_endpoint_parse_cdat, CXL);
-void __wrap_cxl_setup_parent_dport(struct device *host, struct cxl_dport *dport)
+void __wrap_cxl_dport_init_ras_reporting(struct cxl_dport *dport, struct device *host)
{
int index;
struct cxl_mock_ops *ops = get_cxl_mock_ops(&index);
if (!ops || !ops->is_mock_port(dport->dport_dev))
- cxl_setup_parent_dport(host, dport);
+ cxl_dport_init_ras_reporting(dport, host);
put_cxl_mock_ops(index);
}
-EXPORT_SYMBOL_NS_GPL(__wrap_cxl_setup_parent_dport, CXL);
+EXPORT_SYMBOL_NS_GPL(__wrap_cxl_dport_init_ras_reporting, CXL);
MODULE_LICENSE("GPL v2");
MODULE_IMPORT_NS(ACPI);
diff --git a/tools/testing/fault-injection/failcmd.sh b/tools/testing/fault-injection/failcmd.sh
index 78dac34264be..c4f2432750f4 100644..100755
--- a/tools/testing/fault-injection/failcmd.sh
+++ b/tools/testing/fault-injection/failcmd.sh
@@ -64,6 +64,14 @@ ENVIRONMENT
EOF
}
+exit_if_not_hex() {
+ local value="$1"
+ if ! [[ $value =~ ^0x[0-9a-fA-F]+$ ]]; then
+ echo "Error: The provided value '$value' is not a valid hexadecimal number." >&2
+ exit 1
+ fi
+}
+
if [ $UID != 0 ]; then
echo must be run as root >&2
exit 1
@@ -160,18 +168,22 @@ while true; do
shift 2
;;
--require-start)
+ exit_if_not_hex "$2"
echo $2 > $FAULTATTR/require-start
shift 2
;;
--require-end)
+ exit_if_not_hex "$2"
echo $2 > $FAULTATTR/require-end
shift 2
;;
--reject-start)
+ exit_if_not_hex "$2"
echo $2 > $FAULTATTR/reject-start
shift 2
;;
--reject-end)
+ exit_if_not_hex "$2"
echo $2 > $FAULTATTR/reject-end
shift 2
;;
diff --git a/tools/testing/ktest/ktest.pl b/tools/testing/ktest/ktest.pl
index eb31cd9c977b..dacad94e2be4 100755
--- a/tools/testing/ktest/ktest.pl
+++ b/tools/testing/ktest/ktest.pl
@@ -222,6 +222,8 @@ my $install_time;
my $reboot_time;
my $test_time;
+my $warning_found = 0;
+
my $pwd;
my $dirname = $FindBin::Bin;
@@ -729,11 +731,18 @@ sub print_times {
show_time($test_time);
doprint "\n";
}
+ if ($warning_found) {
+ doprint "\n*** WARNING";
+ doprint "S" if ($warning_found > 1);
+ doprint " found in build: $warning_found ***\n\n";
+ }
+
# reset for iterations like bisect
$build_time = 0;
$install_time = 0;
$reboot_time = 0;
$test_time = 0;
+ $warning_found = 0;
}
sub get_mandatory_configs {
@@ -2047,7 +2056,7 @@ sub get_grub_index {
} elsif ($reboot_type eq "grub2") {
$command = "cat $grub_file";
$target = '^\s*menuentry.*' . $grub_menu_qt;
- $skip = '^\s*menuentry';
+ $skip = '^\s*menuentry\s';
$submenu = '^\s*submenu\s';
} elsif ($reboot_type eq "grub2bls") {
$command = $grub_bls_get;
@@ -2460,8 +2469,6 @@ sub process_warning_line {
# Returns 1 if OK
# 0 otherwise
sub check_buildlog {
- return 1 if (!defined $warnings_file);
-
my %warnings_list;
# Failed builds should not reboot the target
@@ -2482,18 +2489,21 @@ sub check_buildlog {
close(IN);
}
- # If warnings file didn't exist, and WARNINGS_FILE exist,
- # then we fail on any warning!
-
open(IN, $buildlog) or dodie "Can't open $buildlog";
while (<IN>) {
if (/$check_build_re/) {
my $warning = process_warning_line $_;
if (!defined $warnings_list{$warning}) {
- fail "New warning found (not in $warnings_file)\n$_\n";
- $no_reboot = $save_no_reboot;
- return 0;
+ $warning_found++;
+
+ # If warnings file didn't exist, and WARNINGS_FILE exist,
+ # then we fail on any warning!
+ if (defined $warnings_file) {
+ fail "New warning found (not in $warnings_file)\n$_\n";
+ $no_reboot = $save_no_reboot;
+ return 0;
+ }
}
}
}
diff --git a/tools/testing/memblock/Makefile b/tools/testing/memblock/Makefile
index 7a1ca694a982..d80982ccdc20 100644
--- a/tools/testing/memblock/Makefile
+++ b/tools/testing/memblock/Makefile
@@ -8,7 +8,7 @@ LDFLAGS += -fsanitize=address -fsanitize=undefined
TARGETS = main
TEST_OFILES = tests/alloc_nid_api.o tests/alloc_helpers_api.o tests/alloc_api.o \
tests/basic_api.o tests/common.o tests/alloc_exact_nid_api.o
-DEP_OFILES = memblock.o lib/slab.o mmzone.o slab.o
+DEP_OFILES = memblock.o lib/slab.o mmzone.o slab.o cmdline.o
OFILES = main.o $(DEP_OFILES) $(TEST_OFILES)
EXTR_SRC = ../../../mm/memblock.c
diff --git a/tools/testing/memblock/internal.h b/tools/testing/memblock/internal.h
index f6c6e5474c3a..1cf82acb2a3e 100644
--- a/tools/testing/memblock/internal.h
+++ b/tools/testing/memblock/internal.h
@@ -20,7 +20,7 @@ void memblock_free_pages(struct page *page, unsigned long pfn,
{
}
-static inline void accept_memory(phys_addr_t start, phys_addr_t end)
+static inline void accept_memory(phys_addr_t start, unsigned long size)
{
}
diff --git a/tools/testing/memblock/linux/kernel.h b/tools/testing/memblock/linux/kernel.h
index d2f148bd8902..4d1012d5be6e 100644
--- a/tools/testing/memblock/linux/kernel.h
+++ b/tools/testing/memblock/linux/kernel.h
@@ -8,5 +8,7 @@
#include <linux/printk.h>
#include <linux/linkage.h>
#include <linux/kconfig.h>
+#include <linux/string.h>
+#include <linux/ctype.h>
#endif
diff --git a/tools/testing/memblock/linux/mmzone.h b/tools/testing/memblock/linux/mmzone.h
index 71546e15bdd3..bb682659a12d 100644
--- a/tools/testing/memblock/linux/mmzone.h
+++ b/tools/testing/memblock/linux/mmzone.h
@@ -3,6 +3,7 @@
#define _TOOLS_MMZONE_H
#include <linux/atomic.h>
+#include <linux/memory_hotplug.h>
struct pglist_data *first_online_pgdat(void);
struct pglist_data *next_online_pgdat(struct pglist_data *pgdat);
diff --git a/tools/testing/radix-tree/.gitignore b/tools/testing/radix-tree/.gitignore
index 49bccb90c35b..ce167a761981 100644
--- a/tools/testing/radix-tree/.gitignore
+++ b/tools/testing/radix-tree/.gitignore
@@ -1,4 +1,5 @@
# SPDX-License-Identifier: GPL-2.0-only
+generated/autoconf.h
generated/bit-length.h
generated/map-shift.h
idr.c
diff --git a/tools/testing/radix-tree/Makefile b/tools/testing/radix-tree/Makefile
index d1acd7d58850..8b3591a51e1f 100644
--- a/tools/testing/radix-tree/Makefile
+++ b/tools/testing/radix-tree/Makefile
@@ -1,77 +1,29 @@
# SPDX-License-Identifier: GPL-2.0
-CFLAGS += -I. -I../../include -I../../../lib -g -Og -Wall \
- -D_LGPL_SOURCE -fsanitize=address -fsanitize=undefined
-LDFLAGS += -fsanitize=address -fsanitize=undefined
-LDLIBS+= -lpthread -lurcu
-TARGETS = main idr-test multiorder xarray maple
-LIBS := slab.o find_bit.o bitmap.o hweight.o vsprintf.o
-CORE_OFILES := xarray.o radix-tree.o idr.o linux.o test.o maple.o $(LIBS)
-OFILES = main.o $(CORE_OFILES) regression1.o regression2.o regression3.o \
- regression4.o tag_check.o multiorder.o idr-test.o iteration_check.o \
- iteration_check_2.o benchmark.o
-
-ifndef SHIFT
- SHIFT=3
-endif
+.PHONY: clean
-ifeq ($(BUILD), 32)
- CFLAGS += -m32
- LDFLAGS += -m32
-LONG_BIT := 32
-endif
-
-ifndef LONG_BIT
-LONG_BIT := $(shell getconf LONG_BIT)
-endif
+TARGETS = main idr-test multiorder xarray maple
+CORE_OFILES = $(SHARED_OFILES) xarray.o maple.o test.o
+OFILES = main.o $(CORE_OFILES) regression1.o regression2.o \
+ regression3.o regression4.o tag_check.o multiorder.o idr-test.o \
+ iteration_check.o iteration_check_2.o benchmark.o
targets: generated/map-shift.h generated/bit-length.h $(TARGETS)
+include ../shared/shared.mk
+
main: $(OFILES)
idr-test.o: ../../../lib/test_ida.c
idr-test: idr-test.o $(CORE_OFILES)
-xarray: $(CORE_OFILES)
+xarray: $(CORE_OFILES) xarray.o
-maple: $(CORE_OFILES)
+maple: $(CORE_OFILES) maple.o
multiorder: multiorder.o $(CORE_OFILES)
clean:
- $(RM) $(TARGETS) *.o radix-tree.c idr.c generated/map-shift.h generated/bit-length.h
-
-vpath %.c ../../lib
-
-$(OFILES): Makefile *.h */*.h generated/map-shift.h generated/bit-length.h \
- ../../include/linux/*.h \
- ../../include/asm/*.h \
- ../../../include/linux/xarray.h \
- ../../../include/linux/maple_tree.h \
- ../../../include/linux/radix-tree.h \
- ../../../lib/radix-tree.h \
- ../../../include/linux/idr.h
-
-radix-tree.c: ../../../lib/radix-tree.c
- sed -e 's/^static //' -e 's/__always_inline //' -e 's/inline //' < $< > $@
-
-idr.c: ../../../lib/idr.c
- sed -e 's/^static //' -e 's/__always_inline //' -e 's/inline //' < $< > $@
-
-xarray.o: ../../../lib/xarray.c ../../../lib/test_xarray.c
-
-maple.o: ../../../lib/maple_tree.c ../../../lib/test_maple_tree.c
-
-generated/map-shift.h:
- @if ! grep -qws $(SHIFT) generated/map-shift.h; then \
- echo "#define XA_CHUNK_SHIFT $(SHIFT)" > \
- generated/map-shift.h; \
- fi
-
-generated/bit-length.h: FORCE
- @if ! grep -qws CONFIG_$(LONG_BIT)BIT generated/bit-length.h; then \
- echo "Generating $@"; \
- echo "#define CONFIG_$(LONG_BIT)BIT 1" > $@; \
- fi
+ $(RM) $(TARGETS) *.o radix-tree.c idr.c generated/*
-FORCE: ;
+$(OFILES): $(SHARED_DEPS) *.h
diff --git a/tools/testing/radix-tree/linux/init.h b/tools/testing/radix-tree/linux/init.h
deleted file mode 100644
index 81563c3dfce7..000000000000
--- a/tools/testing/radix-tree/linux/init.h
+++ /dev/null
@@ -1,2 +0,0 @@
-#define __init
-#define __exit
diff --git a/tools/testing/radix-tree/maple.c b/tools/testing/radix-tree/maple.c
index cd1cf05503b4..1873ddbe16cc 100644
--- a/tools/testing/radix-tree/maple.c
+++ b/tools/testing/radix-tree/maple.c
@@ -14,7 +14,7 @@
#include "test.h"
#include <stdlib.h>
#include <time.h>
-#include "linux/init.h"
+#include <linux/init.h>
#define module_init(x)
#define module_exit(x)
@@ -120,7 +120,7 @@ static noinline void __init check_new_node(struct maple_tree *mt)
MT_BUG_ON(mt, mas.alloc->slot[0] == NULL);
mas_push_node(&mas, mn);
mas_reset(&mas);
- mas_nomem(&mas, GFP_KERNEL); /* free */
+ mas_destroy(&mas);
mtree_unlock(mt);
@@ -144,7 +144,7 @@ static noinline void __init check_new_node(struct maple_tree *mt)
mn->parent = ma_parent_ptr(mn);
ma_free_rcu(mn);
mas.status = ma_start;
- mas_nomem(&mas, GFP_KERNEL);
+ mas_destroy(&mas);
/* Allocate 3 nodes, will fail. */
mas_node_count(&mas, 3);
/* Drop the lock and allocate 3 nodes. */
@@ -161,7 +161,7 @@ static noinline void __init check_new_node(struct maple_tree *mt)
MT_BUG_ON(mt, mas_allocated(&mas) != 3);
/* Free. */
mas_reset(&mas);
- mas_nomem(&mas, GFP_KERNEL);
+ mas_destroy(&mas);
/* Set allocation request to 1. */
mas_set_alloc_req(&mas, 1);
@@ -277,6 +277,7 @@ static noinline void __init check_new_node(struct maple_tree *mt)
}
mas_reset(&mas);
MT_BUG_ON(mt, mas_nomem(&mas, GFP_KERNEL));
+ mas_destroy(&mas);
}
@@ -299,7 +300,7 @@ static noinline void __init check_new_node(struct maple_tree *mt)
}
MT_BUG_ON(mt, mas_allocated(&mas) != total);
mas_reset(&mas);
- mas_nomem(&mas, GFP_KERNEL); /* Free. */
+ mas_destroy(&mas); /* Free. */
MT_BUG_ON(mt, mas_allocated(&mas) != 0);
for (i = 1; i < 128; i++) {
@@ -35847,6 +35848,7 @@ static noinline void __init check_nomem(struct maple_tree *mt)
mas_store(&ms, &ms); /* insert 1 -> &ms */
mas_nomem(&ms, GFP_KERNEL); /* Node allocated in here. */
mtree_unlock(mt);
+ mas_destroy(&ms);
mtree_destroy(mt);
}
@@ -36224,6 +36226,97 @@ static noinline void __init check_mtree_dup(struct maple_tree *mt)
extern void test_kmem_cache_bulk(void);
+/* callback function used for check_nomem_writer_race() */
+static void writer2(void *maple_tree)
+{
+ struct maple_tree *mt = (struct maple_tree *)maple_tree;
+ MA_STATE(mas, mt, 6, 10);
+
+ mtree_lock(mas.tree);
+ mas_store(&mas, xa_mk_value(0xC));
+ mas_destroy(&mas);
+ mtree_unlock(mas.tree);
+}
+
+/*
+ * check_nomem_writer_race() - test a possible race in the mas_nomem() path
+ * @mt: The tree to build.
+ *
+ * There is a possible race condition in low memory conditions when mas_nomem()
+ * gives up its lock. A second writer can chagne the entry that the primary
+ * writer executing the mas_nomem() path is modifying. This test recreates this
+ * scenario to ensure we are handling it correctly.
+ */
+static void check_nomem_writer_race(struct maple_tree *mt)
+{
+ MA_STATE(mas, mt, 0, 5);
+
+ mt_set_non_kernel(0);
+ /* setup root with 2 values with NULL in between */
+ mtree_store_range(mt, 0, 5, xa_mk_value(0xA), GFP_KERNEL);
+ mtree_store_range(mt, 6, 10, NULL, GFP_KERNEL);
+ mtree_store_range(mt, 11, 15, xa_mk_value(0xB), GFP_KERNEL);
+
+ /* setup writer 2 that will trigger the race condition */
+ mt_set_private(mt);
+ mt_set_callback(writer2);
+
+ mtree_lock(mt);
+ /* erase 0-5 */
+ mas_erase(&mas);
+
+ /* index 6-10 should retain the value from writer 2 */
+ check_load(mt, 6, xa_mk_value(0xC));
+ mtree_unlock(mt);
+
+ /* test for the same race but with mas_store_gfp() */
+ mtree_store_range(mt, 0, 5, xa_mk_value(0xA), GFP_KERNEL);
+ mtree_store_range(mt, 6, 10, NULL, GFP_KERNEL);
+
+ mas_set_range(&mas, 0, 5);
+ mtree_lock(mt);
+ mas_store_gfp(&mas, NULL, GFP_KERNEL);
+
+ /* ensure write made by writer 2 is retained */
+ check_load(mt, 6, xa_mk_value(0xC));
+
+ mt_set_private(NULL);
+ mt_set_callback(NULL);
+ mtree_unlock(mt);
+}
+
+ /* test to simulate expanding a vma from [0x7fffffffe000, 0x7ffffffff000)
+ * to [0x7ffde4ca1000, 0x7ffffffff000) and then shrinking the vma to
+ * [0x7ffde4ca1000, 0x7ffde4ca2000)
+ */
+static inline int check_vma_modification(struct maple_tree *mt)
+{
+ MA_STATE(mas, mt, 0, 0);
+
+ mtree_lock(mt);
+ /* vma with old start and old end */
+ __mas_set_range(&mas, 0x7fffffffe000, 0x7ffffffff000 - 1);
+ mas_preallocate(&mas, xa_mk_value(1), GFP_KERNEL);
+ mas_store_prealloc(&mas, xa_mk_value(1));
+
+ /* next write occurs partly in previous range [0, 0x7fffffffe000)*/
+ mas_prev_range(&mas, 0);
+ /* expand vma to {0x7ffde4ca1000, 0x7ffffffff000) */
+ __mas_set_range(&mas, 0x7ffde4ca1000, 0x7ffffffff000 - 1);
+ mas_preallocate(&mas, xa_mk_value(1), GFP_KERNEL);
+ mas_store_prealloc(&mas, xa_mk_value(1));
+
+ /* shrink vma to [0x7ffde4ca1000, 7ffde4ca2000) */
+ __mas_set_range(&mas, 0x7ffde4ca2000, 0x7ffffffff000 - 1);
+ mas_preallocate(&mas, NULL, GFP_KERNEL);
+ mas_store_prealloc(&mas, NULL);
+ mt_dump(mt, mt_dump_hex);
+
+ mas_destroy(&mas);
+ mtree_unlock(mt);
+ return 0;
+}
+
void farmer_tests(void)
{
struct maple_node *node;
@@ -36231,6 +36324,10 @@ void farmer_tests(void)
mt_dump(&tree, mt_dump_dec);
+ mt_init_flags(&tree, MT_FLAGS_ALLOC_RANGE | MT_FLAGS_LOCK_EXTERN | MT_FLAGS_USE_RCU);
+ check_vma_modification(&tree);
+ mtree_destroy(&tree);
+
tree.ma_root = xa_mk_value(0);
mt_dump(&tree, mt_dump_dec);
@@ -36257,6 +36354,10 @@ void farmer_tests(void)
check_dfs_preorder(&tree);
mtree_destroy(&tree);
+ mt_init_flags(&tree, MT_FLAGS_ALLOC_RANGE | MT_FLAGS_USE_RCU);
+ check_nomem_writer_race(&tree);
+ mtree_destroy(&tree);
+
mt_init_flags(&tree, MT_FLAGS_ALLOC_RANGE);
check_prealloc(&tree);
mtree_destroy(&tree);
diff --git a/tools/testing/radix-tree/xarray.c b/tools/testing/radix-tree/xarray.c
index d0e53bff1eb6..253208a8541b 100644
--- a/tools/testing/radix-tree/xarray.c
+++ b/tools/testing/radix-tree/xarray.c
@@ -4,17 +4,9 @@
* Copyright (c) 2018 Matthew Wilcox <willy@infradead.org>
*/
-#define XA_DEBUG
+#include "xarray-shared.h"
#include "test.h"
-#define module_init(x)
-#define module_exit(x)
-#define MODULE_AUTHOR(x)
-#define MODULE_DESCRIPTION(X)
-#define MODULE_LICENSE(x)
-#define dump_stack() assert(0)
-
-#include "../../../lib/xarray.c"
#undef XA_DEBUG
#include "../../../lib/test_xarray.c"
diff --git a/tools/testing/selftests/bpf/.gitignore b/tools/testing/selftests/bpf/.gitignore
index 5025401323af..e6533b3400de 100644
--- a/tools/testing/selftests/bpf/.gitignore
+++ b/tools/testing/selftests/bpf/.gitignore
@@ -8,8 +8,8 @@ test_lru_map
test_lpm_map
test_tag
FEATURE-DUMP.libbpf
+FEATURE-DUMP.selftests
fixdep
-test_dev_cgroup
/test_progs
/test_progs-no_alu32
/test_progs-bpf_gcc
@@ -20,9 +20,6 @@ test_sock
urandom_read
test_sockmap
test_lirc_mode2_user
-get_cgroup_id_user
-test_skb_cgroup_id_user
-test_cgroup_storage
test_flow_dissector
flow_dissector_load
test_tcpnotify_user
@@ -31,6 +28,7 @@ test_tcp_check_syncookie_user
test_sysctl
xdping
test_cpp
+*.d
*.subskel.h
*.skel.h
*.lskel.h
diff --git a/tools/testing/selftests/bpf/DENYLIST.riscv64 b/tools/testing/selftests/bpf/DENYLIST.riscv64
new file mode 100644
index 000000000000..4fc4dfdde293
--- /dev/null
+++ b/tools/testing/selftests/bpf/DENYLIST.riscv64
@@ -0,0 +1,3 @@
+# riscv64 deny list for BPF CI and local vmtest
+exceptions # JIT does not support exceptions
+tailcalls/tailcall_bpf2bpf* # JIT does not support mixing bpf2bpf and tailcalls
diff --git a/tools/testing/selftests/bpf/Makefile b/tools/testing/selftests/bpf/Makefile
index 81d4757ecd4c..f04af11df8eb 100644
--- a/tools/testing/selftests/bpf/Makefile
+++ b/tools/testing/selftests/bpf/Makefile
@@ -33,6 +33,13 @@ OPT_FLAGS ?= $(if $(RELEASE),-O2,-O0)
LIBELF_CFLAGS := $(shell $(PKG_CONFIG) libelf --cflags 2>/dev/null)
LIBELF_LIBS := $(shell $(PKG_CONFIG) libelf --libs 2>/dev/null || echo -lelf)
+ifeq ($(srctree),)
+srctree := $(patsubst %/,%,$(dir $(CURDIR)))
+srctree := $(patsubst %/,%,$(dir $(srctree)))
+srctree := $(patsubst %/,%,$(dir $(srctree)))
+srctree := $(patsubst %/,%,$(dir $(srctree)))
+endif
+
CFLAGS += -g $(OPT_FLAGS) -rdynamic \
-Wall -Werror -fno-omit-frame-pointer \
$(GENFLAGS) $(SAN_CFLAGS) $(LIBELF_CFLAGS) \
@@ -41,6 +48,11 @@ CFLAGS += -g $(OPT_FLAGS) -rdynamic \
LDFLAGS += $(SAN_LDFLAGS)
LDLIBS += $(LIBELF_LIBS) -lz -lrt -lpthread
+PCAP_CFLAGS := $(shell $(PKG_CONFIG) --cflags libpcap 2>/dev/null && echo "-DTRAFFIC_MONITOR=1")
+PCAP_LIBS := $(shell $(PKG_CONFIG) --libs libpcap 2>/dev/null)
+LDLIBS += $(PCAP_LIBS)
+CFLAGS += $(PCAP_CFLAGS)
+
# The following tests perform type punning and they may break strict
# aliasing rules, which are exploited by both GCC and clang by default
# while optimizing. This can lead to broken programs.
@@ -54,6 +66,10 @@ progs/test_pkt_md_access.c-CFLAGS := -fno-strict-aliasing
progs/test_sk_lookup.c-CFLAGS := -fno-strict-aliasing
progs/timer_crash.c-CFLAGS := -fno-strict-aliasing
progs/test_global_func9.c-CFLAGS := -fno-strict-aliasing
+progs/verifier_nocsr.c-CFLAGS := -fno-strict-aliasing
+
+# Some utility functions use LLVM libraries
+jit_disasm_helpers.c-CFLAGS = $(LLVM_CFLAGS)
ifneq ($(LLVM),)
# Silence some warnings when compiled with clang
@@ -67,9 +83,7 @@ endif
# Order correspond to 'make run_tests' order
TEST_GEN_PROGS = test_verifier test_tag test_maps test_lru_map test_lpm_map test_progs \
- test_dev_cgroup \
- test_sock test_sockmap get_cgroup_id_user \
- test_cgroup_storage \
+ test_sock test_sockmap \
test_tcpnotify_user test_sysctl \
test_progs-no_alu32
TEST_INST_SUBDIRS := no_alu32
@@ -115,7 +129,6 @@ TEST_PROGS := test_kmod.sh \
test_xdp_redirect.sh \
test_xdp_redirect_multi.sh \
test_xdp_meta.sh \
- test_xdp_veth.sh \
test_tunnel.sh \
test_lwt_seg6local.sh \
test_lirc_mode2.sh \
@@ -140,7 +153,7 @@ TEST_PROGS_EXTENDED := with_addr.sh \
test_xdp_vlan.sh test_bpftool.py
# Compile but not part of 'make run_tests'
-TEST_GEN_PROGS_EXTENDED = test_skb_cgroup_id_user \
+TEST_GEN_PROGS_EXTENDED = \
flow_dissector_load test_flow_dissector test_tcp_check_syncookie_user \
test_lirc_mode2_user xdping test_cpp runqslower bench bpf_testmod.ko \
xskxceiver xdp_redirect_multi xdp_synproxy veristat xdp_hw_metadata \
@@ -166,6 +179,35 @@ endef
include ../lib.mk
+NON_CHECK_FEAT_TARGETS := clean docs-clean
+CHECK_FEAT := $(filter-out $(NON_CHECK_FEAT_TARGETS),$(or $(MAKECMDGOALS), "none"))
+ifneq ($(CHECK_FEAT),)
+FEATURE_USER := .selftests
+FEATURE_TESTS := llvm
+FEATURE_DISPLAY := $(FEATURE_TESTS)
+
+# Makefile.feature expects OUTPUT to end with a slash
+ifeq ($(shell expr $(MAKE_VERSION) \>= 4.4), 1)
+$(let OUTPUT,$(OUTPUT)/,\
+ $(eval include ../../../build/Makefile.feature))
+else
+OUTPUT := $(OUTPUT)/
+$(eval include ../../../build/Makefile.feature)
+OUTPUT := $(patsubst %/,%,$(OUTPUT))
+endif
+endif
+
+ifeq ($(feature-llvm),1)
+ LLVM_CFLAGS += -DHAVE_LLVM_SUPPORT
+ LLVM_CONFIG_LIB_COMPONENTS := mcdisassembler all-targets
+ # both llvm-config and lib.mk add -D_GNU_SOURCE, which ends up as conflict
+ LLVM_CFLAGS += $(filter-out -D_GNU_SOURCE,$(shell $(LLVM_CONFIG) --cflags))
+ LLVM_LDLIBS += $(shell $(LLVM_CONFIG) --link-static --libs $(LLVM_CONFIG_LIB_COMPONENTS))
+ LLVM_LDLIBS += $(shell $(LLVM_CONFIG) --link-static --system-libs $(LLVM_CONFIG_LIB_COMPONENTS))
+ LLVM_LDLIBS += -lstdc++
+ LLVM_LDFLAGS += $(shell $(LLVM_CONFIG) --ldflags)
+endif
+
SCRATCH_DIR := $(OUTPUT)/tools
BUILD_DIR := $(SCRATCH_DIR)/build
INCLUDE_DIR := $(SCRATCH_DIR)/include
@@ -293,13 +335,9 @@ JSON_WRITER := $(OUTPUT)/json_writer.o
CAP_HELPERS := $(OUTPUT)/cap_helpers.o
NETWORK_HELPERS := $(OUTPUT)/network_helpers.o
-$(OUTPUT)/test_dev_cgroup: $(CGROUP_HELPERS) $(TESTING_HELPERS)
-$(OUTPUT)/test_skb_cgroup_id_user: $(CGROUP_HELPERS) $(TESTING_HELPERS)
$(OUTPUT)/test_sock: $(CGROUP_HELPERS) $(TESTING_HELPERS)
$(OUTPUT)/test_sockmap: $(CGROUP_HELPERS) $(TESTING_HELPERS)
$(OUTPUT)/test_tcpnotify_user: $(CGROUP_HELPERS) $(TESTING_HELPERS) $(TRACE_HELPERS)
-$(OUTPUT)/get_cgroup_id_user: $(CGROUP_HELPERS) $(TESTING_HELPERS)
-$(OUTPUT)/test_cgroup_storage: $(CGROUP_HELPERS) $(TESTING_HELPERS)
$(OUTPUT)/test_sock_fields: $(CGROUP_HELPERS) $(TESTING_HELPERS)
$(OUTPUT)/test_sysctl: $(CGROUP_HELPERS) $(TESTING_HELPERS)
$(OUTPUT)/test_tag: $(TESTING_HELPERS)
@@ -365,10 +403,14 @@ $(HOST_BPFOBJ): $(wildcard $(BPFDIR)/*.[ch] $(BPFDIR)/Makefile) \
DESTDIR=$(HOST_SCRATCH_DIR)/ prefix= all install_headers
endif
+# vmlinux.h is first dumped to a temprorary file and then compared to
+# the previous version. This helps to avoid unnecessary re-builds of
+# $(TRUNNER_BPF_OBJS)
$(INCLUDE_DIR)/vmlinux.h: $(VMLINUX_BTF) $(BPFTOOL) | $(INCLUDE_DIR)
ifeq ($(VMLINUX_H),)
$(call msg,GEN,,$@)
- $(Q)$(BPFTOOL) btf dump file $(VMLINUX_BTF) format c > $@
+ $(Q)$(BPFTOOL) btf dump file $(VMLINUX_BTF) format c > $(INCLUDE_DIR)/.vmlinux.h.tmp
+ $(Q)cmp -s $(INCLUDE_DIR)/.vmlinux.h.tmp $@ || mv $(INCLUDE_DIR)/.vmlinux.h.tmp $@
else
$(call msg,CP,,$@)
$(Q)cp "$(VMLINUX_H)" $@
@@ -396,7 +438,8 @@ define get_sys_includes
$(shell $(1) $(2) -v -E - </dev/null 2>&1 \
| sed -n '/<...> search starts here:/,/End of search list./{ s| \(/.*\)|-idirafter \1|p }') \
$(shell $(1) $(2) -dM -E - </dev/null | grep '__riscv_xlen ' | awk '{printf("-D__riscv_xlen=%d -D__BITS_PER_LONG=%d", $$3, $$3)}') \
-$(shell $(1) $(2) -dM -E - </dev/null | grep '__loongarch_grlen ' | awk '{printf("-D__BITS_PER_LONG=%d", $$3)}')
+$(shell $(1) $(2) -dM -E - </dev/null | grep '__loongarch_grlen ' | awk '{printf("-D__BITS_PER_LONG=%d", $$3)}') \
+$(shell $(1) $(2) -dM -E - </dev/null | grep -E 'MIPS(EL|EB)|_MIPS_SZ(PTR|LONG) |_MIPS_SIM |_ABI(O32|N32|64) ' | awk '{printf("-D%s=%s ", $$2, $$3)}')
endef
# Determine target endianness.
@@ -427,23 +470,24 @@ $(OUTPUT)/cgroup_getset_retval_hooks.o: cgroup_getset_retval_hooks.h
# $1 - input .c file
# $2 - output .o file
# $3 - CFLAGS
+# $4 - binary name
define CLANG_BPF_BUILD_RULE
- $(call msg,CLNG-BPF,$(TRUNNER_BINARY),$2)
+ $(call msg,CLNG-BPF,$4,$2)
$(Q)$(CLANG) $3 -O2 --target=bpf -c $1 -mcpu=v3 -o $2
endef
# Similar to CLANG_BPF_BUILD_RULE, but with disabled alu32
define CLANG_NOALU32_BPF_BUILD_RULE
- $(call msg,CLNG-BPF,$(TRUNNER_BINARY),$2)
+ $(call msg,CLNG-BPF,$4,$2)
$(Q)$(CLANG) $3 -O2 --target=bpf -c $1 -mcpu=v2 -o $2
endef
# Similar to CLANG_BPF_BUILD_RULE, but with cpu-v4
define CLANG_CPUV4_BPF_BUILD_RULE
- $(call msg,CLNG-BPF,$(TRUNNER_BINARY),$2)
+ $(call msg,CLNG-BPF,$4,$2)
$(Q)$(CLANG) $3 -O2 --target=bpf -c $1 -mcpu=v4 -o $2
endef
# Build BPF object using GCC
define GCC_BPF_BUILD_RULE
- $(call msg,GCC-BPF,$(TRUNNER_BINARY),$2)
+ $(call msg,GCC-BPF,$4,$2)
$(Q)$(BPF_GCC) $3 -DBPF_NO_PRESERVE_ACCESS_INDEX -Wno-attributes -O2 -c $1 -o $2
endef
@@ -477,7 +521,14 @@ xsk_xdp_progs.skel.h-deps := xsk_xdp_progs.bpf.o
xdp_hw_metadata.skel.h-deps := xdp_hw_metadata.bpf.o
xdp_features.skel.h-deps := xdp_features.bpf.o
-LINKED_BPF_SRCS := $(patsubst %.bpf.o,%.c,$(foreach skel,$(LINKED_SKELS),$($(skel)-deps)))
+LINKED_BPF_OBJS := $(foreach skel,$(LINKED_SKELS),$($(skel)-deps))
+LINKED_BPF_SRCS := $(patsubst %.bpf.o,%.c,$(LINKED_BPF_OBJS))
+
+HEADERS_FOR_BPF_OBJS := $(wildcard $(BPFDIR)/*.bpf.h) \
+ $(addprefix $(BPFDIR)/, bpf_core_read.h \
+ bpf_endian.h \
+ bpf_helpers.h \
+ bpf_tracing.h)
# Set up extra TRUNNER_XXX "temporary" variables in the environment (relies on
# $eval()) and pass control to DEFINE_TEST_RUNNER_RULES.
@@ -529,13 +580,12 @@ $(TRUNNER_BPF_OBJS): $(TRUNNER_OUTPUT)/%.bpf.o: \
$(TRUNNER_BPF_PROGS_DIR)/%.c \
$(TRUNNER_BPF_PROGS_DIR)/*.h \
$$(INCLUDE_DIR)/vmlinux.h \
- $(wildcard $(BPFDIR)/bpf_*.h) \
- $(wildcard $(BPFDIR)/*.bpf.h) \
+ $(HEADERS_FOR_BPF_OBJS) \
| $(TRUNNER_OUTPUT) $$(BPFOBJ)
$$(call $(TRUNNER_BPF_BUILD_RULE),$$<,$$@, \
$(TRUNNER_BPF_CFLAGS) \
$$($$<-CFLAGS) \
- $$($$<-$2-CFLAGS))
+ $$($$<-$2-CFLAGS),$(TRUNNER_BINARY))
$(TRUNNER_BPF_SKELS): %.skel.h: %.bpf.o $(BPFTOOL) | $(TRUNNER_OUTPUT)
$$(call msg,GEN-SKEL,$(TRUNNER_BINARY),$$@)
@@ -556,7 +606,11 @@ $(TRUNNER_BPF_LSKELS): %.lskel.h: %.bpf.o $(BPFTOOL) | $(TRUNNER_OUTPUT)
$(Q)$$(BPFTOOL) gen skeleton -L $$(<:.o=.llinked3.o) name $$(notdir $$(<:.bpf.o=_lskel)) > $$@
$(Q)rm -f $$(<:.o=.llinked1.o) $$(<:.o=.llinked2.o) $$(<:.o=.llinked3.o)
-$(TRUNNER_BPF_SKELS_LINKED): $(TRUNNER_BPF_OBJS) $(BPFTOOL) | $(TRUNNER_OUTPUT)
+$(LINKED_BPF_OBJS): %: $(TRUNNER_OUTPUT)/%
+
+# .SECONDEXPANSION here allows to correctly expand %-deps variables as prerequisites
+.SECONDEXPANSION:
+$(TRUNNER_BPF_SKELS_LINKED): $(TRUNNER_OUTPUT)/%: $$$$(%-deps) $(BPFTOOL) | $(TRUNNER_OUTPUT)
$$(call msg,LINK-BPF,$(TRUNNER_BINARY),$$(@:.skel.h=.bpf.o))
$(Q)$$(BPFTOOL) gen object $$(@:.skel.h=.linked1.o) $$(addprefix $(TRUNNER_OUTPUT)/,$$($$(@F)-deps))
$(Q)$$(BPFTOOL) gen object $$(@:.skel.h=.linked2.o) $$(@:.skel.h=.linked1.o)
@@ -566,6 +620,14 @@ $(TRUNNER_BPF_SKELS_LINKED): $(TRUNNER_BPF_OBJS) $(BPFTOOL) | $(TRUNNER_OUTPUT)
$(Q)$$(BPFTOOL) gen skeleton $$(@:.skel.h=.linked3.o) name $$(notdir $$(@:.skel.h=)) > $$@
$(Q)$$(BPFTOOL) gen subskeleton $$(@:.skel.h=.linked3.o) name $$(notdir $$(@:.skel.h=)) > $$(@:.skel.h=.subskel.h)
$(Q)rm -f $$(@:.skel.h=.linked1.o) $$(@:.skel.h=.linked2.o) $$(@:.skel.h=.linked3.o)
+
+# When the compiler generates a %.d file, only skel basenames (not
+# full paths) are specified as prerequisites for corresponding %.o
+# file. This target makes %.skel.h basename dependent on full paths,
+# linking generated %.d dependency with actual %.skel.h files.
+$(notdir %.skel.h): $(TRUNNER_OUTPUT)/%.skel.h
+ @true
+
endif
# ensure we set up tests.h header generation rule just once
@@ -583,14 +645,25 @@ endif
# Note: we cd into output directory to ensure embedded BPF object is found
$(TRUNNER_TEST_OBJS): $(TRUNNER_OUTPUT)/%.test.o: \
$(TRUNNER_TESTS_DIR)/%.c \
- $(TRUNNER_EXTRA_HDRS) \
- $(TRUNNER_BPF_OBJS) \
- $(TRUNNER_BPF_SKELS) \
- $(TRUNNER_BPF_LSKELS) \
- $(TRUNNER_BPF_SKELS_LINKED) \
- $$(BPFOBJ) | $(TRUNNER_OUTPUT)
+ | $(TRUNNER_OUTPUT)/%.test.d
$$(call msg,TEST-OBJ,$(TRUNNER_BINARY),$$@)
- $(Q)cd $$(@D) && $$(CC) -I. $$(CFLAGS) -c $(CURDIR)/$$< $$(LDLIBS) -o $$(@F)
+ $(Q)cd $$(@D) && $$(CC) -I. $$(CFLAGS) -MMD -MT $$@ -c $(CURDIR)/$$< $$(LDLIBS) -o $$(@F)
+
+$(TRUNNER_TEST_OBJS:.o=.d): $(TRUNNER_OUTPUT)/%.test.d: \
+ $(TRUNNER_TESTS_DIR)/%.c \
+ $(TRUNNER_EXTRA_HDRS) \
+ $(TRUNNER_BPF_SKELS) \
+ $(TRUNNER_BPF_LSKELS) \
+ $(TRUNNER_BPF_SKELS_LINKED) \
+ $$(BPFOBJ) | $(TRUNNER_OUTPUT)
+
+ifeq ($(filter clean docs-clean,$(MAKECMDGOALS)),)
+include $(wildcard $(TRUNNER_TEST_OBJS:.o=.d))
+endif
+
+# add per extra obj CFGLAGS definitions
+$(foreach N,$(patsubst $(TRUNNER_OUTPUT)/%.o,%,$(TRUNNER_EXTRA_OBJS)), \
+ $(eval $(TRUNNER_OUTPUT)/$(N).o: CFLAGS += $($(N).c-CFLAGS)))
$(TRUNNER_EXTRA_OBJS): $(TRUNNER_OUTPUT)/%.o: \
%.c \
@@ -608,13 +681,19 @@ ifneq ($2:$(OUTPUT),:$(shell pwd))
$(Q)rsync -aq $$^ $(TRUNNER_OUTPUT)/
endif
+$(OUTPUT)/$(TRUNNER_BINARY): LDLIBS += $$(LLVM_LDLIBS)
+$(OUTPUT)/$(TRUNNER_BINARY): LDFLAGS += $$(LLVM_LDFLAGS)
+
+# some X.test.o files have runtime dependencies on Y.bpf.o files
+$(OUTPUT)/$(TRUNNER_BINARY): | $(TRUNNER_BPF_OBJS)
+
$(OUTPUT)/$(TRUNNER_BINARY): $(TRUNNER_TEST_OBJS) \
$(TRUNNER_EXTRA_OBJS) $$(BPFOBJ) \
$(RESOLVE_BTFIDS) \
$(TRUNNER_BPFTOOL) \
| $(TRUNNER_BINARY)-extras
$$(call msg,BINARY,,$$@)
- $(Q)$$(CC) $$(CFLAGS) $$(filter %.a %.o,$$^) $$(LDLIBS) -o $$@
+ $(Q)$$(CC) $$(CFLAGS) $$(filter %.a %.o,$$^) $$(LDLIBS) $$(LDFLAGS) -o $$@
$(Q)$(RESOLVE_BTFIDS) --btf $(TRUNNER_OUTPUT)/btf_data.bpf.o $$@
$(Q)ln -sf $(if $2,..,.)/tools/build/bpftool/$(USE_BOOTSTRAP)bpftool \
$(OUTPUT)/$(if $2,$2/)bpftool
@@ -633,9 +712,11 @@ TRUNNER_EXTRA_SOURCES := test_progs.c \
cap_helpers.c \
unpriv_helpers.c \
netlink_helpers.c \
+ jit_disasm_helpers.c \
test_loader.c \
xsk.c \
disasm.c \
+ disasm_helpers.c \
json_writer.c \
flow_dissector_load.h \
ip_check_defrag_frags.h
@@ -762,17 +843,21 @@ $(OUTPUT)/veristat: $(OUTPUT)/veristat.o
$(call msg,BINARY,,$@)
$(Q)$(CC) $(CFLAGS) $(LDFLAGS) $(filter %.a %.o,$^) $(LDLIBS) -o $@
-$(OUTPUT)/uprobe_multi: uprobe_multi.c
+# Linking uprobe_multi can fail due to relocation overflows on mips.
+$(OUTPUT)/uprobe_multi: CFLAGS += $(if $(filter mips, $(ARCH)),-mxgot)
+$(OUTPUT)/uprobe_multi: uprobe_multi.c uprobe_multi.ld
$(call msg,BINARY,,$@)
- $(Q)$(CC) $(CFLAGS) -O0 $(LDFLAGS) $^ $(LDLIBS) -o $@
+ $(Q)$(CC) $(CFLAGS) -Wl,-T,uprobe_multi.ld -O0 $(LDFLAGS) \
+ $(filter-out %.ld,$^) $(LDLIBS) -o $@
EXTRA_CLEAN := $(SCRATCH_DIR) $(HOST_SCRATCH_DIR) \
prog_tests/tests.h map_tests/tests.h verifier/tests.h \
- feature bpftool \
- $(addprefix $(OUTPUT)/,*.o *.skel.h *.lskel.h *.subskel.h \
+ feature bpftool \
+ $(addprefix $(OUTPUT)/,*.o *.d *.skel.h *.lskel.h *.subskel.h \
no_alu32 cpuv4 bpf_gcc bpf_testmod.ko \
bpf_test_no_cfi.ko \
- liburandom_read.so)
+ liburandom_read.so) \
+ $(OUTPUT)/FEATURE-DUMP.selftests
.PHONY: docs docs-clean
diff --git a/tools/testing/selftests/bpf/README.rst b/tools/testing/selftests/bpf/README.rst
index 9b974e425af3..776fbe3cb8f9 100644
--- a/tools/testing/selftests/bpf/README.rst
+++ b/tools/testing/selftests/bpf/README.rst
@@ -85,7 +85,37 @@ In case of linker errors when running selftests, try using static linking:
If you want to change pahole and llvm, you can change `PATH` environment
variable in the beginning of script.
-.. note:: The script currently only supports x86_64 and s390x architectures.
+Running vmtest on RV64
+======================
+To speed up testing and avoid various dependency issues, it is recommended to
+run vmtest in a Docker container. Before running vmtest, we need to prepare
+Docker container and local rootfs image. The overall steps are as follows:
+
+1. Create Docker container as shown in link [0].
+
+2. Use mkrootfs_debian.sh script [1] to build local rootfs image:
+
+.. code-block:: console
+
+ $ sudo ./mkrootfs_debian.sh --arch riscv64 --distro noble
+
+3. Start Docker container [0] and run vmtest in the container:
+
+.. code-block:: console
+
+ $ PLATFORM=riscv64 CROSS_COMPILE=riscv64-linux-gnu- \
+ tools/testing/selftests/bpf/vmtest.sh \
+ -l <path of local rootfs image> -- \
+ ./test_progs -d \
+ \"$(cat tools/testing/selftests/bpf/DENYLIST.riscv64 \
+ | cut -d'#' -f1 \
+ | sed -e 's/^[[:space:]]*//' \
+ -e 's/[[:space:]]*$//' \
+ | tr -s '\n' ',' \
+ )\"
+
+Link: https://github.com/pulehui/riscv-bpf-vmtest.git [0]
+Link: https://github.com/libbpf/ci/blob/main/rootfs/mkrootfs_debian.sh [1]
Additional information about selftest failures are
documented here.
diff --git a/tools/testing/selftests/bpf/bench.c b/tools/testing/selftests/bpf/bench.c
index 627b74ae041b..1bd403a5ef7b 100644
--- a/tools/testing/selftests/bpf/bench.c
+++ b/tools/testing/selftests/bpf/bench.c
@@ -10,6 +10,7 @@
#include <sys/sysinfo.h>
#include <signal.h>
#include "bench.h"
+#include "bpf_util.h"
#include "testing_helpers.h"
struct env env = {
@@ -519,6 +520,12 @@ extern const struct bench bench_trig_uprobe_push;
extern const struct bench bench_trig_uretprobe_push;
extern const struct bench bench_trig_uprobe_ret;
extern const struct bench bench_trig_uretprobe_ret;
+extern const struct bench bench_trig_uprobe_multi_nop;
+extern const struct bench bench_trig_uretprobe_multi_nop;
+extern const struct bench bench_trig_uprobe_multi_push;
+extern const struct bench bench_trig_uretprobe_multi_push;
+extern const struct bench bench_trig_uprobe_multi_ret;
+extern const struct bench bench_trig_uretprobe_multi_ret;
extern const struct bench bench_rb_libbpf;
extern const struct bench bench_rb_custom;
@@ -573,6 +580,12 @@ static const struct bench *benchs[] = {
&bench_trig_uretprobe_push,
&bench_trig_uprobe_ret,
&bench_trig_uretprobe_ret,
+ &bench_trig_uprobe_multi_nop,
+ &bench_trig_uretprobe_multi_nop,
+ &bench_trig_uprobe_multi_push,
+ &bench_trig_uretprobe_multi_push,
+ &bench_trig_uprobe_multi_ret,
+ &bench_trig_uretprobe_multi_ret,
/* ringbuf/perfbuf benchmarks */
&bench_rb_libbpf,
&bench_rb_custom,
diff --git a/tools/testing/selftests/bpf/bench.h b/tools/testing/selftests/bpf/bench.h
index 68180d8f8558..005c401b3e22 100644
--- a/tools/testing/selftests/bpf/bench.h
+++ b/tools/testing/selftests/bpf/bench.h
@@ -10,6 +10,7 @@
#include <math.h>
#include <time.h>
#include <sys/syscall.h>
+#include <limits.h>
struct cpu_set {
bool *cpus;
diff --git a/tools/testing/selftests/bpf/benchs/bench_trigger.c b/tools/testing/selftests/bpf/benchs/bench_trigger.c
index 4b05539f167d..2ed0ef6f21ee 100644
--- a/tools/testing/selftests/bpf/benchs/bench_trigger.c
+++ b/tools/testing/selftests/bpf/benchs/bench_trigger.c
@@ -276,7 +276,7 @@ static void trigger_rawtp_setup(void)
* instructions. So use two different targets, one of which starts with nop
* and another doesn't.
*
- * GCC doesn't generate stack setup preample for these functions due to them
+ * GCC doesn't generate stack setup preamble for these functions due to them
* having no input arguments and doing nothing in the body.
*/
__nocf_check __weak void uprobe_target_nop(void)
@@ -332,7 +332,7 @@ static void *uprobe_producer_ret(void *input)
return NULL;
}
-static void usetup(bool use_retprobe, void *target_addr)
+static void usetup(bool use_retprobe, bool use_multi, void *target_addr)
{
size_t uprobe_offset;
struct bpf_link *link;
@@ -346,7 +346,10 @@ static void usetup(bool use_retprobe, void *target_addr)
exit(1);
}
- bpf_program__set_autoload(ctx.skel->progs.bench_trigger_uprobe, true);
+ if (use_multi)
+ bpf_program__set_autoload(ctx.skel->progs.bench_trigger_uprobe_multi, true);
+ else
+ bpf_program__set_autoload(ctx.skel->progs.bench_trigger_uprobe, true);
err = trigger_bench__load(ctx.skel);
if (err) {
@@ -355,16 +358,28 @@ static void usetup(bool use_retprobe, void *target_addr)
}
uprobe_offset = get_uprobe_offset(target_addr);
- link = bpf_program__attach_uprobe(ctx.skel->progs.bench_trigger_uprobe,
- use_retprobe,
- -1 /* all PIDs */,
- "/proc/self/exe",
- uprobe_offset);
+ if (use_multi) {
+ LIBBPF_OPTS(bpf_uprobe_multi_opts, opts,
+ .retprobe = use_retprobe,
+ .cnt = 1,
+ .offsets = &uprobe_offset,
+ );
+ link = bpf_program__attach_uprobe_multi(
+ ctx.skel->progs.bench_trigger_uprobe_multi,
+ -1 /* all PIDs */, "/proc/self/exe", NULL, &opts);
+ ctx.skel->links.bench_trigger_uprobe_multi = link;
+ } else {
+ link = bpf_program__attach_uprobe(ctx.skel->progs.bench_trigger_uprobe,
+ use_retprobe,
+ -1 /* all PIDs */,
+ "/proc/self/exe",
+ uprobe_offset);
+ ctx.skel->links.bench_trigger_uprobe = link;
+ }
if (!link) {
- fprintf(stderr, "failed to attach uprobe!\n");
+ fprintf(stderr, "failed to attach %s!\n", use_multi ? "multi-uprobe" : "uprobe");
exit(1);
}
- ctx.skel->links.bench_trigger_uprobe = link;
}
static void usermode_count_setup(void)
@@ -374,32 +389,62 @@ static void usermode_count_setup(void)
static void uprobe_nop_setup(void)
{
- usetup(false, &uprobe_target_nop);
+ usetup(false, false /* !use_multi */, &uprobe_target_nop);
}
static void uretprobe_nop_setup(void)
{
- usetup(true, &uprobe_target_nop);
+ usetup(true, false /* !use_multi */, &uprobe_target_nop);
}
static void uprobe_push_setup(void)
{
- usetup(false, &uprobe_target_push);
+ usetup(false, false /* !use_multi */, &uprobe_target_push);
}
static void uretprobe_push_setup(void)
{
- usetup(true, &uprobe_target_push);
+ usetup(true, false /* !use_multi */, &uprobe_target_push);
}
static void uprobe_ret_setup(void)
{
- usetup(false, &uprobe_target_ret);
+ usetup(false, false /* !use_multi */, &uprobe_target_ret);
}
static void uretprobe_ret_setup(void)
{
- usetup(true, &uprobe_target_ret);
+ usetup(true, false /* !use_multi */, &uprobe_target_ret);
+}
+
+static void uprobe_multi_nop_setup(void)
+{
+ usetup(false, true /* use_multi */, &uprobe_target_nop);
+}
+
+static void uretprobe_multi_nop_setup(void)
+{
+ usetup(true, true /* use_multi */, &uprobe_target_nop);
+}
+
+static void uprobe_multi_push_setup(void)
+{
+ usetup(false, true /* use_multi */, &uprobe_target_push);
+}
+
+static void uretprobe_multi_push_setup(void)
+{
+ usetup(true, true /* use_multi */, &uprobe_target_push);
+}
+
+static void uprobe_multi_ret_setup(void)
+{
+ usetup(false, true /* use_multi */, &uprobe_target_ret);
+}
+
+static void uretprobe_multi_ret_setup(void)
+{
+ usetup(true, true /* use_multi */, &uprobe_target_ret);
}
const struct bench bench_trig_syscall_count = {
@@ -454,3 +499,9 @@ BENCH_TRIG_USERMODE(uprobe_ret, ret, "uprobe-ret");
BENCH_TRIG_USERMODE(uretprobe_nop, nop, "uretprobe-nop");
BENCH_TRIG_USERMODE(uretprobe_push, push, "uretprobe-push");
BENCH_TRIG_USERMODE(uretprobe_ret, ret, "uretprobe-ret");
+BENCH_TRIG_USERMODE(uprobe_multi_nop, nop, "uprobe-multi-nop");
+BENCH_TRIG_USERMODE(uprobe_multi_push, push, "uprobe-multi-push");
+BENCH_TRIG_USERMODE(uprobe_multi_ret, ret, "uprobe-multi-ret");
+BENCH_TRIG_USERMODE(uretprobe_multi_nop, nop, "uretprobe-multi-nop");
+BENCH_TRIG_USERMODE(uretprobe_multi_push, push, "uretprobe-multi-push");
+BENCH_TRIG_USERMODE(uretprobe_multi_ret, ret, "uretprobe-multi-ret");
diff --git a/tools/testing/selftests/bpf/bpf_experimental.h b/tools/testing/selftests/bpf/bpf_experimental.h
index 828556cdc2f0..b0668f29f7b3 100644
--- a/tools/testing/selftests/bpf/bpf_experimental.h
+++ b/tools/testing/selftests/bpf/bpf_experimental.h
@@ -195,6 +195,32 @@ extern void bpf_iter_task_vma_destroy(struct bpf_iter_task_vma *it) __ksym;
*/
extern void bpf_throw(u64 cookie) __ksym;
+/* Description
+ * Acquire a reference on the exe_file member field belonging to the
+ * mm_struct that is nested within the supplied task_struct. The supplied
+ * task_struct must be trusted/referenced.
+ * Returns
+ * A referenced file pointer pointing to the exe_file member field of the
+ * mm_struct nested in the supplied task_struct, or NULL.
+ */
+extern struct file *bpf_get_task_exe_file(struct task_struct *task) __ksym;
+
+/* Description
+ * Release a reference on the supplied file. The supplied file must be
+ * acquired.
+ */
+extern void bpf_put_file(struct file *file) __ksym;
+
+/* Description
+ * Resolve a pathname for the supplied path and store it in the supplied
+ * buffer. The supplied path must be trusted/referenced.
+ * Returns
+ * A positive integer corresponding to the length of the resolved pathname,
+ * including the NULL termination character, stored in the supplied
+ * buffer. On error, a negative integer is returned.
+ */
+extern int bpf_path_d_path(struct path *path, char *buf, size_t buf__sz) __ksym;
+
/* This macro must be used to mark the exception callback corresponding to the
* main program. For example:
*
diff --git a/tools/testing/selftests/bpf/bpf_kfuncs.h b/tools/testing/selftests/bpf/bpf_kfuncs.h
index 3b6675ab4086..2eb3483f2fb0 100644
--- a/tools/testing/selftests/bpf/bpf_kfuncs.h
+++ b/tools/testing/selftests/bpf/bpf_kfuncs.h
@@ -45,7 +45,7 @@ extern int bpf_dynptr_clone(const struct bpf_dynptr *ptr, struct bpf_dynptr *clo
/* Description
* Modify the address of a AF_UNIX sockaddr.
- * Returns__bpf_kfunc
+ * Returns
* -EINVAL if the address size is too big or, 0 if the sockaddr was successfully modified.
*/
extern int bpf_sock_addr_set_sun_path(struct bpf_sock_addr_kern *sa_kern,
@@ -78,4 +78,13 @@ extern int bpf_verify_pkcs7_signature(struct bpf_dynptr *data_ptr,
extern bool bpf_session_is_return(void) __ksym __weak;
extern __u64 *bpf_session_cookie(void) __ksym __weak;
+
+struct dentry;
+/* Description
+ * Returns xattr of a dentry
+ * Returns
+ * Error code
+ */
+extern int bpf_get_dentry_xattr(struct dentry *dentry, const char *name,
+ struct bpf_dynptr *value_ptr) __ksym __weak;
#endif
diff --git a/tools/testing/selftests/bpf/bpf_testmod/bpf_testmod.c b/tools/testing/selftests/bpf/bpf_testmod/bpf_testmod.c
index 22807fd78fe6..8835761d9a12 100644
--- a/tools/testing/selftests/bpf/bpf_testmod/bpf_testmod.c
+++ b/tools/testing/selftests/bpf/bpf_testmod/bpf_testmod.c
@@ -17,6 +17,7 @@
#include <linux/in.h>
#include <linux/in6.h>
#include <linux/un.h>
+#include <linux/filter.h>
#include <net/sock.h>
#include <linux/namei.h>
#include "bpf_testmod.h"
@@ -141,13 +142,12 @@ bpf_testmod_test_mod_kfunc(int i)
__bpf_kfunc int bpf_iter_testmod_seq_new(struct bpf_iter_testmod_seq *it, s64 value, int cnt)
{
- if (cnt < 0) {
- it->cnt = 0;
+ it->cnt = cnt;
+
+ if (cnt < 0)
return -EINVAL;
- }
it->value = value;
- it->cnt = cnt;
return 0;
}
@@ -162,6 +162,14 @@ __bpf_kfunc s64 *bpf_iter_testmod_seq_next(struct bpf_iter_testmod_seq* it)
return &it->value;
}
+__bpf_kfunc s64 bpf_iter_testmod_seq_value(int val, struct bpf_iter_testmod_seq* it__iter)
+{
+ if (it__iter->cnt < 0)
+ return 0;
+
+ return val + it__iter->value;
+}
+
__bpf_kfunc void bpf_iter_testmod_seq_destroy(struct bpf_iter_testmod_seq *it)
{
it->cnt = 0;
@@ -176,6 +184,36 @@ __bpf_kfunc void bpf_kfunc_dynptr_test(struct bpf_dynptr *ptr,
{
}
+__bpf_kfunc struct sk_buff *bpf_kfunc_nested_acquire_nonzero_offset_test(struct sk_buff_head *ptr)
+{
+ return NULL;
+}
+
+__bpf_kfunc struct sk_buff *bpf_kfunc_nested_acquire_zero_offset_test(struct sock_common *ptr)
+{
+ return NULL;
+}
+
+__bpf_kfunc void bpf_kfunc_nested_release_test(struct sk_buff *ptr)
+{
+}
+
+__bpf_kfunc void bpf_kfunc_trusted_vma_test(struct vm_area_struct *ptr)
+{
+}
+
+__bpf_kfunc void bpf_kfunc_trusted_task_test(struct task_struct *ptr)
+{
+}
+
+__bpf_kfunc void bpf_kfunc_trusted_num_test(int *ptr)
+{
+}
+
+__bpf_kfunc void bpf_kfunc_rcu_task_test(struct task_struct *ptr)
+{
+}
+
__bpf_kfunc struct bpf_testmod_ctx *
bpf_testmod_ctx_create(int *err)
{
@@ -534,8 +572,16 @@ BTF_KFUNCS_START(bpf_testmod_common_kfunc_ids)
BTF_ID_FLAGS(func, bpf_iter_testmod_seq_new, KF_ITER_NEW)
BTF_ID_FLAGS(func, bpf_iter_testmod_seq_next, KF_ITER_NEXT | KF_RET_NULL)
BTF_ID_FLAGS(func, bpf_iter_testmod_seq_destroy, KF_ITER_DESTROY)
+BTF_ID_FLAGS(func, bpf_iter_testmod_seq_value)
BTF_ID_FLAGS(func, bpf_kfunc_common_test)
BTF_ID_FLAGS(func, bpf_kfunc_dynptr_test)
+BTF_ID_FLAGS(func, bpf_kfunc_nested_acquire_nonzero_offset_test, KF_ACQUIRE)
+BTF_ID_FLAGS(func, bpf_kfunc_nested_acquire_zero_offset_test, KF_ACQUIRE)
+BTF_ID_FLAGS(func, bpf_kfunc_nested_release_test, KF_RELEASE)
+BTF_ID_FLAGS(func, bpf_kfunc_trusted_vma_test, KF_TRUSTED_ARGS)
+BTF_ID_FLAGS(func, bpf_kfunc_trusted_task_test, KF_TRUSTED_ARGS)
+BTF_ID_FLAGS(func, bpf_kfunc_trusted_num_test, KF_TRUSTED_ARGS)
+BTF_ID_FLAGS(func, bpf_kfunc_rcu_task_test, KF_RCU)
BTF_ID_FLAGS(func, bpf_testmod_ctx_create, KF_ACQUIRE | KF_RET_NULL)
BTF_ID_FLAGS(func, bpf_testmod_ctx_release, KF_RELEASE)
BTF_KFUNCS_END(bpf_testmod_common_kfunc_ids)
@@ -923,6 +969,51 @@ out:
return err;
}
+static DEFINE_MUTEX(st_ops_mutex);
+static struct bpf_testmod_st_ops *st_ops;
+
+__bpf_kfunc int bpf_kfunc_st_ops_test_prologue(struct st_ops_args *args)
+{
+ int ret = -1;
+
+ mutex_lock(&st_ops_mutex);
+ if (st_ops && st_ops->test_prologue)
+ ret = st_ops->test_prologue(args);
+ mutex_unlock(&st_ops_mutex);
+
+ return ret;
+}
+
+__bpf_kfunc int bpf_kfunc_st_ops_test_epilogue(struct st_ops_args *args)
+{
+ int ret = -1;
+
+ mutex_lock(&st_ops_mutex);
+ if (st_ops && st_ops->test_epilogue)
+ ret = st_ops->test_epilogue(args);
+ mutex_unlock(&st_ops_mutex);
+
+ return ret;
+}
+
+__bpf_kfunc int bpf_kfunc_st_ops_test_pro_epilogue(struct st_ops_args *args)
+{
+ int ret = -1;
+
+ mutex_lock(&st_ops_mutex);
+ if (st_ops && st_ops->test_pro_epilogue)
+ ret = st_ops->test_pro_epilogue(args);
+ mutex_unlock(&st_ops_mutex);
+
+ return ret;
+}
+
+__bpf_kfunc int bpf_kfunc_st_ops_inc10(struct st_ops_args *args)
+{
+ args->a += 10;
+ return args->a;
+}
+
BTF_KFUNCS_START(bpf_testmod_check_kfunc_ids)
BTF_ID_FLAGS(func, bpf_testmod_test_mod_kfunc)
BTF_ID_FLAGS(func, bpf_kfunc_call_test1)
@@ -959,6 +1050,10 @@ BTF_ID_FLAGS(func, bpf_kfunc_call_kernel_sendmsg, KF_SLEEPABLE)
BTF_ID_FLAGS(func, bpf_kfunc_call_sock_sendmsg, KF_SLEEPABLE)
BTF_ID_FLAGS(func, bpf_kfunc_call_kernel_getsockname, KF_SLEEPABLE)
BTF_ID_FLAGS(func, bpf_kfunc_call_kernel_getpeername, KF_SLEEPABLE)
+BTF_ID_FLAGS(func, bpf_kfunc_st_ops_test_prologue, KF_TRUSTED_ARGS | KF_SLEEPABLE)
+BTF_ID_FLAGS(func, bpf_kfunc_st_ops_test_epilogue, KF_TRUSTED_ARGS | KF_SLEEPABLE)
+BTF_ID_FLAGS(func, bpf_kfunc_st_ops_test_pro_epilogue, KF_TRUSTED_ARGS | KF_SLEEPABLE)
+BTF_ID_FLAGS(func, bpf_kfunc_st_ops_inc10, KF_TRUSTED_ARGS)
BTF_KFUNCS_END(bpf_testmod_check_kfunc_ids)
static int bpf_testmod_ops_init(struct btf *btf)
@@ -1027,6 +1122,11 @@ static void bpf_testmod_test_2(int a, int b)
{
}
+static int bpf_testmod_tramp(int value)
+{
+ return 0;
+}
+
static int bpf_testmod_ops__test_maybe_null(int dummy,
struct task_struct *task__nullable)
{
@@ -1073,6 +1173,144 @@ struct bpf_struct_ops bpf_testmod_ops2 = {
.owner = THIS_MODULE,
};
+static int bpf_test_mod_st_ops__test_prologue(struct st_ops_args *args)
+{
+ return 0;
+}
+
+static int bpf_test_mod_st_ops__test_epilogue(struct st_ops_args *args)
+{
+ return 0;
+}
+
+static int bpf_test_mod_st_ops__test_pro_epilogue(struct st_ops_args *args)
+{
+ return 0;
+}
+
+static int st_ops_gen_prologue(struct bpf_insn *insn_buf, bool direct_write,
+ const struct bpf_prog *prog)
+{
+ struct bpf_insn *insn = insn_buf;
+
+ if (strcmp(prog->aux->attach_func_name, "test_prologue") &&
+ strcmp(prog->aux->attach_func_name, "test_pro_epilogue"))
+ return 0;
+
+ /* r6 = r1[0]; // r6 will be "struct st_ops *args". r1 is "u64 *ctx".
+ * r7 = r6->a;
+ * r7 += 1000;
+ * r6->a = r7;
+ */
+ *insn++ = BPF_LDX_MEM(BPF_DW, BPF_REG_6, BPF_REG_1, 0);
+ *insn++ = BPF_LDX_MEM(BPF_DW, BPF_REG_7, BPF_REG_6, offsetof(struct st_ops_args, a));
+ *insn++ = BPF_ALU64_IMM(BPF_ADD, BPF_REG_7, 1000);
+ *insn++ = BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_7, offsetof(struct st_ops_args, a));
+ *insn++ = prog->insnsi[0];
+
+ return insn - insn_buf;
+}
+
+static int st_ops_gen_epilogue(struct bpf_insn *insn_buf, const struct bpf_prog *prog,
+ s16 ctx_stack_off)
+{
+ struct bpf_insn *insn = insn_buf;
+
+ if (strcmp(prog->aux->attach_func_name, "test_epilogue") &&
+ strcmp(prog->aux->attach_func_name, "test_pro_epilogue"))
+ return 0;
+
+ /* r1 = stack[ctx_stack_off]; // r1 will be "u64 *ctx"
+ * r1 = r1[0]; // r1 will be "struct st_ops *args"
+ * r6 = r1->a;
+ * r6 += 10000;
+ * r1->a = r6;
+ * r0 = r6;
+ * r0 *= 2;
+ * BPF_EXIT;
+ */
+ *insn++ = BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_FP, ctx_stack_off);
+ *insn++ = BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_1, 0);
+ *insn++ = BPF_LDX_MEM(BPF_DW, BPF_REG_6, BPF_REG_1, offsetof(struct st_ops_args, a));
+ *insn++ = BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, 10000);
+ *insn++ = BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_6, offsetof(struct st_ops_args, a));
+ *insn++ = BPF_MOV64_REG(BPF_REG_0, BPF_REG_6);
+ *insn++ = BPF_ALU64_IMM(BPF_MUL, BPF_REG_0, 2);
+ *insn++ = BPF_EXIT_INSN();
+
+ return insn - insn_buf;
+}
+
+static int st_ops_btf_struct_access(struct bpf_verifier_log *log,
+ const struct bpf_reg_state *reg,
+ int off, int size)
+{
+ if (off < 0 || off + size > sizeof(struct st_ops_args))
+ return -EACCES;
+ return 0;
+}
+
+static const struct bpf_verifier_ops st_ops_verifier_ops = {
+ .is_valid_access = bpf_testmod_ops_is_valid_access,
+ .btf_struct_access = st_ops_btf_struct_access,
+ .gen_prologue = st_ops_gen_prologue,
+ .gen_epilogue = st_ops_gen_epilogue,
+ .get_func_proto = bpf_base_func_proto,
+};
+
+static struct bpf_testmod_st_ops st_ops_cfi_stubs = {
+ .test_prologue = bpf_test_mod_st_ops__test_prologue,
+ .test_epilogue = bpf_test_mod_st_ops__test_epilogue,
+ .test_pro_epilogue = bpf_test_mod_st_ops__test_pro_epilogue,
+};
+
+static int st_ops_reg(void *kdata, struct bpf_link *link)
+{
+ int err = 0;
+
+ mutex_lock(&st_ops_mutex);
+ if (st_ops) {
+ pr_err("st_ops has already been registered\n");
+ err = -EEXIST;
+ goto unlock;
+ }
+ st_ops = kdata;
+
+unlock:
+ mutex_unlock(&st_ops_mutex);
+ return err;
+}
+
+static void st_ops_unreg(void *kdata, struct bpf_link *link)
+{
+ mutex_lock(&st_ops_mutex);
+ st_ops = NULL;
+ mutex_unlock(&st_ops_mutex);
+}
+
+static int st_ops_init(struct btf *btf)
+{
+ return 0;
+}
+
+static int st_ops_init_member(const struct btf_type *t,
+ const struct btf_member *member,
+ void *kdata, const void *udata)
+{
+ return 0;
+}
+
+static struct bpf_struct_ops testmod_st_ops = {
+ .verifier_ops = &st_ops_verifier_ops,
+ .init = st_ops_init,
+ .init_member = st_ops_init_member,
+ .reg = st_ops_reg,
+ .unreg = st_ops_unreg,
+ .cfi_stubs = &st_ops_cfi_stubs,
+ .name = "bpf_testmod_st_ops",
+ .owner = THIS_MODULE,
+};
+
extern int bpf_fentry_test1(int a);
static int bpf_testmod_init(void)
@@ -1083,14 +1321,17 @@ static int bpf_testmod_init(void)
.kfunc_btf_id = bpf_testmod_dtor_ids[1]
},
};
+ void **tramp;
int ret;
ret = register_btf_kfunc_id_set(BPF_PROG_TYPE_UNSPEC, &bpf_testmod_common_kfunc_set);
ret = ret ?: register_btf_kfunc_id_set(BPF_PROG_TYPE_SCHED_CLS, &bpf_testmod_kfunc_set);
ret = ret ?: register_btf_kfunc_id_set(BPF_PROG_TYPE_TRACING, &bpf_testmod_kfunc_set);
ret = ret ?: register_btf_kfunc_id_set(BPF_PROG_TYPE_SYSCALL, &bpf_testmod_kfunc_set);
+ ret = ret ?: register_btf_kfunc_id_set(BPF_PROG_TYPE_STRUCT_OPS, &bpf_testmod_kfunc_set);
ret = ret ?: register_bpf_struct_ops(&bpf_bpf_testmod_ops, bpf_testmod_ops);
ret = ret ?: register_bpf_struct_ops(&bpf_testmod_ops2, bpf_testmod_ops2);
+ ret = ret ?: register_bpf_struct_ops(&testmod_st_ops, bpf_testmod_st_ops);
ret = ret ?: register_btf_id_dtor_kfuncs(bpf_testmod_dtors,
ARRAY_SIZE(bpf_testmod_dtors),
THIS_MODULE);
@@ -1106,6 +1347,14 @@ static int bpf_testmod_init(void)
ret = register_bpf_testmod_uprobe();
if (ret < 0)
return ret;
+
+ /* Ensure nothing is between tramp_1..tramp_40 */
+ BUILD_BUG_ON(offsetof(struct bpf_testmod_ops, tramp_1) + 40 * sizeof(long) !=
+ offsetofend(struct bpf_testmod_ops, tramp_40));
+ tramp = (void **)&__bpf_testmod_ops.tramp_1;
+ while (tramp <= (void **)&__bpf_testmod_ops.tramp_40)
+ *tramp++ = bpf_testmod_tramp;
+
return 0;
}
diff --git a/tools/testing/selftests/bpf/bpf_testmod/bpf_testmod.h b/tools/testing/selftests/bpf/bpf_testmod/bpf_testmod.h
index 23fa1872ee67..fb7dff47597a 100644
--- a/tools/testing/selftests/bpf/bpf_testmod/bpf_testmod.h
+++ b/tools/testing/selftests/bpf/bpf_testmod/bpf_testmod.h
@@ -35,6 +35,7 @@ struct bpf_testmod_ops {
void (*test_2)(int a, int b);
/* Used to test nullable arguments. */
int (*test_maybe_null)(int dummy, struct task_struct *task);
+ int (*unsupported_ops)(void);
/* The following fields are used to test shadow copies. */
char onebyte;
@@ -93,4 +94,15 @@ struct bpf_testmod_ops2 {
int (*test_1)(void);
};
+struct st_ops_args {
+ u64 a;
+};
+
+struct bpf_testmod_st_ops {
+ int (*test_prologue)(struct st_ops_args *args);
+ int (*test_epilogue)(struct st_ops_args *args);
+ int (*test_pro_epilogue)(struct st_ops_args *args);
+ struct module *owner;
+};
+
#endif /* _BPF_TESTMOD_H */
diff --git a/tools/testing/selftests/bpf/bpf_testmod/bpf_testmod_kfunc.h b/tools/testing/selftests/bpf/bpf_testmod/bpf_testmod_kfunc.h
index e587a79f2239..b58817938deb 100644
--- a/tools/testing/selftests/bpf/bpf_testmod/bpf_testmod_kfunc.h
+++ b/tools/testing/selftests/bpf/bpf_testmod/bpf_testmod_kfunc.h
@@ -144,4 +144,19 @@ void bpf_kfunc_dynptr_test(struct bpf_dynptr *ptr, struct bpf_dynptr *ptr__nulla
struct bpf_testmod_ctx *bpf_testmod_ctx_create(int *err) __ksym;
void bpf_testmod_ctx_release(struct bpf_testmod_ctx *ctx) __ksym;
+struct sk_buff *bpf_kfunc_nested_acquire_nonzero_offset_test(struct sk_buff_head *ptr) __ksym;
+struct sk_buff *bpf_kfunc_nested_acquire_zero_offset_test(struct sock_common *ptr) __ksym;
+void bpf_kfunc_nested_release_test(struct sk_buff *ptr) __ksym;
+
+struct st_ops_args;
+int bpf_kfunc_st_ops_test_prologue(struct st_ops_args *args) __ksym;
+int bpf_kfunc_st_ops_test_epilogue(struct st_ops_args *args) __ksym;
+int bpf_kfunc_st_ops_test_pro_epilogue(struct st_ops_args *args) __ksym;
+int bpf_kfunc_st_ops_inc10(struct st_ops_args *args) __ksym;
+
+void bpf_kfunc_trusted_vma_test(struct vm_area_struct *ptr) __ksym;
+void bpf_kfunc_trusted_task_test(struct task_struct *ptr) __ksym;
+void bpf_kfunc_trusted_num_test(int *ptr) __ksym;
+void bpf_kfunc_rcu_task_test(struct task_struct *ptr) __ksym;
+
#endif /* _BPF_TESTMOD_KFUNC_H */
diff --git a/tools/testing/selftests/bpf/cgroup_helpers.c b/tools/testing/selftests/bpf/cgroup_helpers.c
index 23bb9a9e6a7d..e4535451322e 100644
--- a/tools/testing/selftests/bpf/cgroup_helpers.c
+++ b/tools/testing/selftests/bpf/cgroup_helpers.c
@@ -644,7 +644,7 @@ unsigned long long get_classid_cgroup_id(void)
/**
* get_cgroup1_hierarchy_id - Retrieves the ID of a cgroup1 hierarchy from the cgroup1 subsys name.
* @subsys_name: The cgroup1 subsys name, which can be retrieved from /proc/self/cgroup. It can be
- * a named cgroup like "name=systemd", a controller name like "net_cls", or multi-contollers like
+ * a named cgroup like "name=systemd", a controller name like "net_cls", or multi-controllers like
* "net_cls,net_prio".
*/
int get_cgroup1_hierarchy_id(const char *subsys_name)
diff --git a/tools/testing/selftests/bpf/config.riscv64 b/tools/testing/selftests/bpf/config.riscv64
new file mode 100644
index 000000000000..bb7043a80e1a
--- /dev/null
+++ b/tools/testing/selftests/bpf/config.riscv64
@@ -0,0 +1,84 @@
+CONFIG_AUDIT=y
+CONFIG_BLK_CGROUP=y
+CONFIG_BLK_DEV_INITRD=y
+CONFIG_BLK_DEV_RAM=y
+CONFIG_BONDING=y
+CONFIG_BPF_JIT_ALWAYS_ON=y
+CONFIG_BPF_PRELOAD=y
+CONFIG_BPF_PRELOAD_UMD=y
+CONFIG_CGROUPS=y
+CONFIG_CGROUP_CPUACCT=y
+CONFIG_CGROUP_DEVICE=y
+CONFIG_CGROUP_FREEZER=y
+CONFIG_CGROUP_HUGETLB=y
+CONFIG_CGROUP_NET_CLASSID=y
+CONFIG_CGROUP_PERF=y
+CONFIG_CGROUP_PIDS=y
+CONFIG_CGROUP_SCHED=y
+CONFIG_CPUSETS=y
+CONFIG_DEBUG_ATOMIC_SLEEP=y
+CONFIG_DEBUG_FS=y
+CONFIG_DETECT_HUNG_TASK=y
+CONFIG_DEVTMPFS=y
+CONFIG_DEVTMPFS_MOUNT=y
+CONFIG_EXPERT=y
+CONFIG_EXT4_FS=y
+CONFIG_EXT4_FS_POSIX_ACL=y
+CONFIG_EXT4_FS_SECURITY=y
+CONFIG_FRAME_POINTER=y
+CONFIG_HARDLOCKUP_DETECTOR=y
+CONFIG_HIGH_RES_TIMERS=y
+CONFIG_HUGETLBFS=y
+CONFIG_INET=y
+CONFIG_IPV6_SEG6_LWTUNNEL=y
+CONFIG_IP_ADVANCED_ROUTER=y
+CONFIG_IP_MULTICAST=y
+CONFIG_IP_MULTIPLE_TABLES=y
+CONFIG_JUMP_LABEL=y
+CONFIG_KALLSYMS_ALL=y
+CONFIG_KPROBES=y
+CONFIG_MEMCG=y
+CONFIG_NAMESPACES=y
+CONFIG_NET=y
+CONFIG_NETDEVICES=y
+CONFIG_NETFILTER_XT_MATCH_BPF=y
+CONFIG_NET_ACT_BPF=y
+CONFIG_NET_L3_MASTER_DEV=y
+CONFIG_NET_VRF=y
+CONFIG_NONPORTABLE=y
+CONFIG_NO_HZ_IDLE=y
+CONFIG_NR_CPUS=256
+CONFIG_PACKET=y
+CONFIG_PANIC_ON_OOPS=y
+CONFIG_PARTITION_ADVANCED=y
+CONFIG_PCI=y
+CONFIG_PCI_HOST_GENERIC=y
+CONFIG_POSIX_MQUEUE=y
+CONFIG_PRINTK_TIME=y
+CONFIG_PROC_KCORE=y
+CONFIG_PROFILING=y
+CONFIG_RCU_CPU_STALL_TIMEOUT=60
+CONFIG_RISCV_EFFICIENT_UNALIGNED_ACCESS=y
+CONFIG_RISCV_ISA_C=y
+CONFIG_RISCV_PMU=y
+CONFIG_RISCV_PMU_SBI=y
+CONFIG_RT_GROUP_SCHED=y
+CONFIG_SECURITY_NETWORK=y
+CONFIG_SERIAL_8250=y
+CONFIG_SERIAL_8250_CONSOLE=y
+CONFIG_SERIAL_OF_PLATFORM=y
+CONFIG_SMP=y
+CONFIG_SOC_VIRT=y
+CONFIG_SYSVIPC=y
+CONFIG_TCP_CONG_ADVANCED=y
+CONFIG_TLS=y
+CONFIG_TMPFS=y
+CONFIG_TMPFS_POSIX_ACL=y
+CONFIG_TUN=y
+CONFIG_UNIX=y
+CONFIG_UPROBES=y
+CONFIG_USER_NS=y
+CONFIG_VETH=y
+CONFIG_VLAN_8021Q=y
+CONFIG_VSOCKETS_LOOPBACK=y
+CONFIG_XFRM_USER=y
diff --git a/tools/testing/selftests/bpf/disasm_helpers.c b/tools/testing/selftests/bpf/disasm_helpers.c
new file mode 100644
index 000000000000..f529f1c8c171
--- /dev/null
+++ b/tools/testing/selftests/bpf/disasm_helpers.c
@@ -0,0 +1,69 @@
+// SPDX-License-Identifier: (LGPL-2.1 OR BSD-2-Clause)
+
+#include <bpf/bpf.h>
+#include "disasm.h"
+
+struct print_insn_context {
+ char scratch[16];
+ char *buf;
+ size_t sz;
+};
+
+static void print_insn_cb(void *private_data, const char *fmt, ...)
+{
+ struct print_insn_context *ctx = private_data;
+ va_list args;
+
+ va_start(args, fmt);
+ vsnprintf(ctx->buf, ctx->sz, fmt, args);
+ va_end(args);
+}
+
+static const char *print_call_cb(void *private_data, const struct bpf_insn *insn)
+{
+ struct print_insn_context *ctx = private_data;
+
+ /* For pseudo calls verifier.c:jit_subprogs() hides original
+ * imm to insn->off and changes insn->imm to be an index of
+ * the subprog instead.
+ */
+ if (insn->src_reg == BPF_PSEUDO_CALL) {
+ snprintf(ctx->scratch, sizeof(ctx->scratch), "%+d", insn->off);
+ return ctx->scratch;
+ }
+
+ return NULL;
+}
+
+struct bpf_insn *disasm_insn(struct bpf_insn *insn, char *buf, size_t buf_sz)
+{
+ struct print_insn_context ctx = {
+ .buf = buf,
+ .sz = buf_sz,
+ };
+ struct bpf_insn_cbs cbs = {
+ .cb_print = print_insn_cb,
+ .cb_call = print_call_cb,
+ .private_data = &ctx,
+ };
+ char *tmp, *pfx_end, *sfx_start;
+ bool double_insn;
+ int len;
+
+ print_bpf_insn(&cbs, insn, true);
+ /* We share code with kernel BPF disassembler, it adds '(FF) ' prefix
+ * for each instruction (FF stands for instruction `code` byte).
+ * Remove the prefix inplace, and also simplify call instructions.
+ * E.g.: "(85) call foo#10" -> "call foo".
+ * Also remove newline in the end (the 'max(strlen(buf) - 1, 0)' thing).
+ */
+ pfx_end = buf + 5;
+ sfx_start = buf + max((int)strlen(buf) - 1, 0);
+ if (strncmp(pfx_end, "call ", 5) == 0 && (tmp = strrchr(buf, '#')))
+ sfx_start = tmp;
+ len = sfx_start - pfx_end;
+ memmove(buf, pfx_end, len);
+ buf[len] = 0;
+ double_insn = insn->code == (BPF_LD | BPF_IMM | BPF_DW);
+ return insn + (double_insn ? 2 : 1);
+}
diff --git a/tools/testing/selftests/bpf/disasm_helpers.h b/tools/testing/selftests/bpf/disasm_helpers.h
new file mode 100644
index 000000000000..7b26cab70099
--- /dev/null
+++ b/tools/testing/selftests/bpf/disasm_helpers.h
@@ -0,0 +1,12 @@
+/* SPDX-License-Identifier: (LGPL-2.1 OR BSD-2-Clause) */
+
+#ifndef __DISASM_HELPERS_H
+#define __DISASM_HELPERS_H
+
+#include <stdlib.h>
+
+struct bpf_insn;
+
+struct bpf_insn *disasm_insn(struct bpf_insn *insn, char *buf, size_t buf_sz);
+
+#endif /* __DISASM_HELPERS_H */
diff --git a/tools/testing/selftests/bpf/get_cgroup_id_user.c b/tools/testing/selftests/bpf/get_cgroup_id_user.c
deleted file mode 100644
index aefd83ebdcd7..000000000000
--- a/tools/testing/selftests/bpf/get_cgroup_id_user.c
+++ /dev/null
@@ -1,151 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-// Copyright (c) 2018 Facebook
-
-#include <stdio.h>
-#include <stdlib.h>
-#include <string.h>
-#include <errno.h>
-#include <fcntl.h>
-#include <syscall.h>
-#include <unistd.h>
-#include <linux/perf_event.h>
-#include <sys/ioctl.h>
-#include <sys/time.h>
-#include <sys/types.h>
-#include <sys/stat.h>
-
-#include <linux/bpf.h>
-#include <bpf/bpf.h>
-#include <bpf/libbpf.h>
-
-#include "cgroup_helpers.h"
-#include "testing_helpers.h"
-
-#define CHECK(condition, tag, format...) ({ \
- int __ret = !!(condition); \
- if (__ret) { \
- printf("%s:FAIL:%s ", __func__, tag); \
- printf(format); \
- } else { \
- printf("%s:PASS:%s\n", __func__, tag); \
- } \
- __ret; \
-})
-
-static int bpf_find_map(const char *test, struct bpf_object *obj,
- const char *name)
-{
- struct bpf_map *map;
-
- map = bpf_object__find_map_by_name(obj, name);
- if (!map)
- return -1;
- return bpf_map__fd(map);
-}
-
-#define TEST_CGROUP "/test-bpf-get-cgroup-id/"
-
-int main(int argc, char **argv)
-{
- const char *probe_name = "syscalls/sys_enter_nanosleep";
- const char *file = "get_cgroup_id_kern.bpf.o";
- int err, bytes, efd, prog_fd, pmu_fd;
- int cgroup_fd, cgidmap_fd, pidmap_fd;
- struct perf_event_attr attr = {};
- struct bpf_object *obj;
- __u64 kcgid = 0, ucgid;
- __u32 key = 0, pid;
- int exit_code = 1;
- char buf[256];
- const struct timespec req = {
- .tv_sec = 1,
- .tv_nsec = 0,
- };
-
- cgroup_fd = cgroup_setup_and_join(TEST_CGROUP);
- if (CHECK(cgroup_fd < 0, "cgroup_setup_and_join", "err %d errno %d\n", cgroup_fd, errno))
- return 1;
-
- /* Use libbpf 1.0 API mode */
- libbpf_set_strict_mode(LIBBPF_STRICT_ALL);
-
- err = bpf_prog_test_load(file, BPF_PROG_TYPE_TRACEPOINT, &obj, &prog_fd);
- if (CHECK(err, "bpf_prog_test_load", "err %d errno %d\n", err, errno))
- goto cleanup_cgroup_env;
-
- cgidmap_fd = bpf_find_map(__func__, obj, "cg_ids");
- if (CHECK(cgidmap_fd < 0, "bpf_find_map", "err %d errno %d\n",
- cgidmap_fd, errno))
- goto close_prog;
-
- pidmap_fd = bpf_find_map(__func__, obj, "pidmap");
- if (CHECK(pidmap_fd < 0, "bpf_find_map", "err %d errno %d\n",
- pidmap_fd, errno))
- goto close_prog;
-
- pid = getpid();
- bpf_map_update_elem(pidmap_fd, &key, &pid, 0);
-
- if (access("/sys/kernel/tracing/trace", F_OK) == 0) {
- snprintf(buf, sizeof(buf),
- "/sys/kernel/tracing/events/%s/id", probe_name);
- } else {
- snprintf(buf, sizeof(buf),
- "/sys/kernel/debug/tracing/events/%s/id", probe_name);
- }
- efd = open(buf, O_RDONLY, 0);
- if (CHECK(efd < 0, "open", "err %d errno %d\n", efd, errno))
- goto close_prog;
- bytes = read(efd, buf, sizeof(buf));
- close(efd);
- if (CHECK(bytes <= 0 || bytes >= sizeof(buf), "read",
- "bytes %d errno %d\n", bytes, errno))
- goto close_prog;
-
- attr.config = strtol(buf, NULL, 0);
- attr.type = PERF_TYPE_TRACEPOINT;
- attr.sample_type = PERF_SAMPLE_RAW;
- attr.sample_period = 1;
- attr.wakeup_events = 1;
-
- /* attach to this pid so the all bpf invocations will be in the
- * cgroup associated with this pid.
- */
- pmu_fd = syscall(__NR_perf_event_open, &attr, getpid(), -1, -1, 0);
- if (CHECK(pmu_fd < 0, "perf_event_open", "err %d errno %d\n", pmu_fd,
- errno))
- goto close_prog;
-
- err = ioctl(pmu_fd, PERF_EVENT_IOC_ENABLE, 0);
- if (CHECK(err, "perf_event_ioc_enable", "err %d errno %d\n", err,
- errno))
- goto close_pmu;
-
- err = ioctl(pmu_fd, PERF_EVENT_IOC_SET_BPF, prog_fd);
- if (CHECK(err, "perf_event_ioc_set_bpf", "err %d errno %d\n", err,
- errno))
- goto close_pmu;
-
- /* trigger some syscalls */
- syscall(__NR_nanosleep, &req, NULL);
-
- err = bpf_map_lookup_elem(cgidmap_fd, &key, &kcgid);
- if (CHECK(err, "bpf_map_lookup_elem", "err %d errno %d\n", err, errno))
- goto close_pmu;
-
- ucgid = get_cgroup_id(TEST_CGROUP);
- if (CHECK(kcgid != ucgid, "compare_cgroup_id",
- "kern cgid %llx user cgid %llx", kcgid, ucgid))
- goto close_pmu;
-
- exit_code = 0;
- printf("%s:PASS\n", argv[0]);
-
-close_pmu:
- close(pmu_fd);
-close_prog:
- bpf_object__close(obj);
-cleanup_cgroup_env:
- cleanup_cgroup_environment();
- return exit_code;
-}
diff --git a/tools/testing/selftests/bpf/jit_disasm_helpers.c b/tools/testing/selftests/bpf/jit_disasm_helpers.c
new file mode 100644
index 000000000000..febd6b12e372
--- /dev/null
+++ b/tools/testing/selftests/bpf/jit_disasm_helpers.c
@@ -0,0 +1,245 @@
+// SPDX-License-Identifier: (LGPL-2.1 OR BSD-2-Clause)
+#include <bpf/bpf.h>
+#include <bpf/libbpf.h>
+#include <test_progs.h>
+
+#ifdef HAVE_LLVM_SUPPORT
+
+#include <llvm-c/Core.h>
+#include <llvm-c/Disassembler.h>
+#include <llvm-c/Target.h>
+#include <llvm-c/TargetMachine.h>
+
+/* The intent is to use get_jited_program_text() for small test
+ * programs written in BPF assembly, thus assume that 32 local labels
+ * would be sufficient.
+ */
+#define MAX_LOCAL_LABELS 32
+
+/* Local labels are encoded as 'L42', this requires 4 bytes of storage:
+ * 3 characters + zero byte
+ */
+#define LOCAL_LABEL_LEN 4
+
+static bool llvm_initialized;
+
+struct local_labels {
+ bool print_phase;
+ __u32 prog_len;
+ __u32 cnt;
+ __u32 pcs[MAX_LOCAL_LABELS];
+ char names[MAX_LOCAL_LABELS][LOCAL_LABEL_LEN];
+};
+
+static const char *lookup_symbol(void *data, uint64_t ref_value, uint64_t *ref_type,
+ uint64_t ref_pc, const char **ref_name)
+{
+ struct local_labels *labels = data;
+ uint64_t type = *ref_type;
+ int i;
+
+ *ref_type = LLVMDisassembler_ReferenceType_InOut_None;
+ *ref_name = NULL;
+ if (type != LLVMDisassembler_ReferenceType_In_Branch)
+ return NULL;
+ /* Depending on labels->print_phase either discover local labels or
+ * return a name assigned with local jump target:
+ * - if print_phase is true and ref_value is in labels->pcs,
+ * return corresponding labels->name.
+ * - if print_phase is false, save program-local jump targets
+ * in labels->pcs;
+ */
+ if (labels->print_phase) {
+ for (i = 0; i < labels->cnt; ++i)
+ if (labels->pcs[i] == ref_value)
+ return labels->names[i];
+ } else {
+ if (labels->cnt < MAX_LOCAL_LABELS && ref_value < labels->prog_len)
+ labels->pcs[labels->cnt++] = ref_value;
+ }
+ return NULL;
+}
+
+static int disasm_insn(LLVMDisasmContextRef ctx, uint8_t *image, __u32 len, __u32 pc,
+ char *buf, __u32 buf_sz)
+{
+ int i, cnt;
+
+ cnt = LLVMDisasmInstruction(ctx, image + pc, len - pc, pc,
+ buf, buf_sz);
+ if (cnt > 0)
+ return cnt;
+ PRINT_FAIL("Can't disasm instruction at offset %d:", pc);
+ for (i = 0; i < 16 && pc + i < len; ++i)
+ printf(" %02x", image[pc + i]);
+ printf("\n");
+ return -EINVAL;
+}
+
+static int cmp_u32(const void *_a, const void *_b)
+{
+ __u32 a = *(__u32 *)_a;
+ __u32 b = *(__u32 *)_b;
+
+ if (a < b)
+ return -1;
+ if (a > b)
+ return 1;
+ return 0;
+}
+
+static int disasm_one_func(FILE *text_out, uint8_t *image, __u32 len)
+{
+ char *label, *colon, *triple = NULL;
+ LLVMDisasmContextRef ctx = NULL;
+ struct local_labels labels = {};
+ __u32 *label_pc, pc;
+ int i, cnt, err = 0;
+ char buf[64];
+
+ triple = LLVMGetDefaultTargetTriple();
+ ctx = LLVMCreateDisasm(triple, &labels, 0, NULL, lookup_symbol);
+ if (!ASSERT_OK_PTR(ctx, "LLVMCreateDisasm")) {
+ err = -EINVAL;
+ goto out;
+ }
+
+ cnt = LLVMSetDisasmOptions(ctx, LLVMDisassembler_Option_PrintImmHex);
+ if (!ASSERT_EQ(cnt, 1, "LLVMSetDisasmOptions")) {
+ err = -EINVAL;
+ goto out;
+ }
+
+ /* discover labels */
+ labels.prog_len = len;
+ pc = 0;
+ while (pc < len) {
+ cnt = disasm_insn(ctx, image, len, pc, buf, 1);
+ if (cnt < 0) {
+ err = cnt;
+ goto out;
+ }
+ pc += cnt;
+ }
+ qsort(labels.pcs, labels.cnt, sizeof(*labels.pcs), cmp_u32);
+ for (i = 0; i < labels.cnt; ++i)
+ /* gcc is unable to infer upper bound for labels.cnt and assumes
+ * it to be U32_MAX. U32_MAX takes 10 decimal digits.
+ * snprintf below prints into labels.names[*],
+ * which has space only for two digits and a letter.
+ * To avoid truncation warning use (i % MAX_LOCAL_LABELS),
+ * which informs gcc about printed value upper bound.
+ */
+ snprintf(labels.names[i], sizeof(labels.names[i]), "L%d", i % MAX_LOCAL_LABELS);
+
+ /* now print with labels */
+ labels.print_phase = true;
+ pc = 0;
+ while (pc < len) {
+ cnt = disasm_insn(ctx, image, len, pc, buf, sizeof(buf));
+ if (cnt < 0) {
+ err = cnt;
+ goto out;
+ }
+ label_pc = bsearch(&pc, labels.pcs, labels.cnt, sizeof(*labels.pcs), cmp_u32);
+ label = "";
+ colon = "";
+ if (label_pc) {
+ label = labels.names[label_pc - labels.pcs];
+ colon = ":";
+ }
+ fprintf(text_out, "%x:\t", pc);
+ for (i = 0; i < cnt; ++i)
+ fprintf(text_out, "%02x ", image[pc + i]);
+ for (i = cnt * 3; i < 12 * 3; ++i)
+ fputc(' ', text_out);
+ fprintf(text_out, "%s%s%s\n", label, colon, buf);
+ pc += cnt;
+ }
+
+out:
+ if (triple)
+ LLVMDisposeMessage(triple);
+ if (ctx)
+ LLVMDisasmDispose(ctx);
+ return err;
+}
+
+int get_jited_program_text(int fd, char *text, size_t text_sz)
+{
+ struct bpf_prog_info info = {};
+ __u32 info_len = sizeof(info);
+ __u32 jited_funcs, len, pc;
+ __u32 *func_lens = NULL;
+ FILE *text_out = NULL;
+ uint8_t *image = NULL;
+ int i, err = 0;
+
+ if (!llvm_initialized) {
+ LLVMInitializeAllTargetInfos();
+ LLVMInitializeAllTargetMCs();
+ LLVMInitializeAllDisassemblers();
+ llvm_initialized = 1;
+ }
+
+ text_out = fmemopen(text, text_sz, "w");
+ if (!ASSERT_OK_PTR(text_out, "open_memstream")) {
+ err = -errno;
+ goto out;
+ }
+
+ /* first call is to find out jited program len */
+ err = bpf_prog_get_info_by_fd(fd, &info, &info_len);
+ if (!ASSERT_OK(err, "bpf_prog_get_info_by_fd #1"))
+ goto out;
+
+ len = info.jited_prog_len;
+ image = malloc(len);
+ if (!ASSERT_OK_PTR(image, "malloc(info.jited_prog_len)")) {
+ err = -ENOMEM;
+ goto out;
+ }
+
+ jited_funcs = info.nr_jited_func_lens;
+ func_lens = malloc(jited_funcs * sizeof(__u32));
+ if (!ASSERT_OK_PTR(func_lens, "malloc(info.nr_jited_func_lens)")) {
+ err = -ENOMEM;
+ goto out;
+ }
+
+ memset(&info, 0, sizeof(info));
+ info.jited_prog_insns = (__u64)image;
+ info.jited_prog_len = len;
+ info.jited_func_lens = (__u64)func_lens;
+ info.nr_jited_func_lens = jited_funcs;
+ err = bpf_prog_get_info_by_fd(fd, &info, &info_len);
+ if (!ASSERT_OK(err, "bpf_prog_get_info_by_fd #2"))
+ goto out;
+
+ for (pc = 0, i = 0; i < jited_funcs; ++i) {
+ fprintf(text_out, "func #%d:\n", i);
+ disasm_one_func(text_out, image + pc, func_lens[i]);
+ fprintf(text_out, "\n");
+ pc += func_lens[i];
+ }
+
+out:
+ if (text_out)
+ fclose(text_out);
+ if (image)
+ free(image);
+ if (func_lens)
+ free(func_lens);
+ return err;
+}
+
+#else /* HAVE_LLVM_SUPPORT */
+
+int get_jited_program_text(int fd, char *text, size_t text_sz)
+{
+ if (env.verbosity >= VERBOSE_VERY)
+ printf("compiled w/o llvm development libraries, can't dis-assembly binary code");
+ return -EOPNOTSUPP;
+}
+
+#endif /* HAVE_LLVM_SUPPORT */
diff --git a/tools/testing/selftests/bpf/jit_disasm_helpers.h b/tools/testing/selftests/bpf/jit_disasm_helpers.h
new file mode 100644
index 000000000000..e6924fd65ecf
--- /dev/null
+++ b/tools/testing/selftests/bpf/jit_disasm_helpers.h
@@ -0,0 +1,10 @@
+/* SPDX-License-Identifier: (LGPL-2.1 OR BSD-2-Clause) */
+
+#ifndef __JIT_DISASM_HELPERS_H
+#define __JIT_DISASM_HELPERS_H
+
+#include <stddef.h>
+
+int get_jited_program_text(int fd, char *text, size_t text_sz);
+
+#endif /* __JIT_DISASM_HELPERS_H */
diff --git a/tools/testing/selftests/bpf/map_tests/htab_map_batch_ops.c b/tools/testing/selftests/bpf/map_tests/htab_map_batch_ops.c
index 1230ccf90128..5da493b94ae2 100644
--- a/tools/testing/selftests/bpf/map_tests/htab_map_batch_ops.c
+++ b/tools/testing/selftests/bpf/map_tests/htab_map_batch_ops.c
@@ -197,7 +197,7 @@ void __test_map_lookup_and_delete_batch(bool is_pcpu)
CHECK(total != max_entries, "delete with steps",
"total = %u, max_entries = %u\n", total, max_entries);
- /* check map is empty, errono == ENOENT */
+ /* check map is empty, errno == ENOENT */
err = bpf_map_get_next_key(map_fd, NULL, &key);
CHECK(!err || errno != ENOENT, "bpf_map_get_next_key()",
"error: %s\n", strerror(errno));
diff --git a/tools/testing/selftests/bpf/map_tests/lpm_trie_map_batch_ops.c b/tools/testing/selftests/bpf/map_tests/lpm_trie_map_batch_ops.c
index b66d56ddb7ef..fe3e19f96244 100644
--- a/tools/testing/selftests/bpf/map_tests/lpm_trie_map_batch_ops.c
+++ b/tools/testing/selftests/bpf/map_tests/lpm_trie_map_batch_ops.c
@@ -135,7 +135,7 @@ void test_lpm_trie_map_batch_ops(void)
CHECK(total != max_entries, "delete with steps",
"total = %u, max_entries = %u\n", total, max_entries);
- /* check map is empty, errono == ENOENT */
+ /* check map is empty, errno == ENOENT */
err = bpf_map_get_next_key(map_fd, NULL, &key);
CHECK(!err || errno != ENOENT, "bpf_map_get_next_key()",
"error: %s\n", strerror(errno));
diff --git a/tools/testing/selftests/bpf/map_tests/map_percpu_stats.c b/tools/testing/selftests/bpf/map_tests/map_percpu_stats.c
index 2ea36408816b..1c7c04288eff 100644
--- a/tools/testing/selftests/bpf/map_tests/map_percpu_stats.c
+++ b/tools/testing/selftests/bpf/map_tests/map_percpu_stats.c
@@ -17,6 +17,7 @@
#define MAX_ENTRIES_HASH_OF_MAPS 64
#define N_THREADS 8
#define MAX_MAP_KEY_SIZE 4
+#define PCPU_MIN_UNIT_SIZE 32768
static void map_info(int map_fd, struct bpf_map_info *info)
{
@@ -456,6 +457,22 @@ static void map_percpu_stats_hash_of_maps(void)
printf("test_%s:PASS\n", __func__);
}
+static void map_percpu_stats_map_value_size(void)
+{
+ int fd;
+ int value_sz = PCPU_MIN_UNIT_SIZE + 1;
+ struct bpf_map_create_opts opts = { .sz = sizeof(opts) };
+ enum bpf_map_type map_types[] = { BPF_MAP_TYPE_PERCPU_ARRAY,
+ BPF_MAP_TYPE_PERCPU_HASH,
+ BPF_MAP_TYPE_LRU_PERCPU_HASH };
+ for (int i = 0; i < ARRAY_SIZE(map_types); i++) {
+ fd = bpf_map_create(map_types[i], NULL, sizeof(__u32), value_sz, 1, &opts);
+ CHECK(fd < 0 && errno != E2BIG, "percpu map value size",
+ "error: %s\n", strerror(errno));
+ }
+ printf("test_%s:PASS\n", __func__);
+}
+
void test_map_percpu_stats(void)
{
map_percpu_stats_hash();
@@ -467,4 +484,5 @@ void test_map_percpu_stats(void)
map_percpu_stats_percpu_lru_hash();
map_percpu_stats_percpu_lru_hash_no_common();
map_percpu_stats_hash_of_maps();
+ map_percpu_stats_map_value_size();
}
diff --git a/tools/testing/selftests/bpf/map_tests/sk_storage_map.c b/tools/testing/selftests/bpf/map_tests/sk_storage_map.c
index 18405c3b7cee..af10c309359a 100644
--- a/tools/testing/selftests/bpf/map_tests/sk_storage_map.c
+++ b/tools/testing/selftests/bpf/map_tests/sk_storage_map.c
@@ -412,7 +412,7 @@ static void test_sk_storage_map_stress_free(void)
rlim_new.rlim_max = rlim_new.rlim_cur + 128;
err = setrlimit(RLIMIT_NOFILE, &rlim_new);
CHECK(err, "setrlimit(RLIMIT_NOFILE)", "rlim_new:%lu errno:%d",
- rlim_new.rlim_cur, errno);
+ (unsigned long) rlim_new.rlim_cur, errno);
}
err = do_sk_storage_map_stress_free();
diff --git a/tools/testing/selftests/bpf/network_helpers.c b/tools/testing/selftests/bpf/network_helpers.c
index e0cba4178e41..27784946b01b 100644
--- a/tools/testing/selftests/bpf/network_helpers.c
+++ b/tools/testing/selftests/bpf/network_helpers.c
@@ -11,17 +11,31 @@
#include <arpa/inet.h>
#include <sys/mount.h>
#include <sys/stat.h>
+#include <sys/types.h>
#include <sys/un.h>
+#include <sys/eventfd.h>
#include <linux/err.h>
#include <linux/in.h>
#include <linux/in6.h>
#include <linux/limits.h>
+#include <linux/ip.h>
+#include <linux/udp.h>
+#include <netinet/tcp.h>
+#include <net/if.h>
+
#include "bpf_util.h"
#include "network_helpers.h"
#include "test_progs.h"
+#ifdef TRAFFIC_MONITOR
+/* Prevent pcap.h from including pcap/bpf.h and causing conflicts */
+#define PCAP_DONT_INCLUDE_PCAP_BPF_H 1
+#include <pcap/pcap.h>
+#include <pcap/dlt.h>
+#endif
+
#ifndef IPPROTO_MPTCP
#define IPPROTO_MPTCP 262
#endif
@@ -80,12 +94,15 @@ int settimeo(int fd, int timeout_ms)
#define save_errno_close(fd) ({ int __save = errno; close(fd); errno = __save; })
-static int __start_server(int type, const struct sockaddr *addr, socklen_t addrlen,
- const struct network_helper_opts *opts)
+int start_server_addr(int type, const struct sockaddr_storage *addr, socklen_t addrlen,
+ const struct network_helper_opts *opts)
{
int fd;
- fd = socket(addr->sa_family, type, opts->proto);
+ if (!opts)
+ opts = &default_opts;
+
+ fd = socket(addr->ss_family, type, opts->proto);
if (fd < 0) {
log_err("Failed to create server socket");
return -1;
@@ -100,7 +117,7 @@ static int __start_server(int type, const struct sockaddr *addr, socklen_t addrl
goto error_close;
}
- if (bind(fd, addr, addrlen) < 0) {
+ if (bind(fd, (struct sockaddr *)addr, addrlen) < 0) {
log_err("Failed to bind socket");
goto error_close;
}
@@ -131,7 +148,7 @@ int start_server_str(int family, int type, const char *addr_str, __u16 port,
if (make_sockaddr(family, addr_str, port, &addr, &addrlen))
return -1;
- return __start_server(type, (struct sockaddr *)&addr, addrlen, opts);
+ return start_server_addr(type, &addr, addrlen, opts);
}
int start_server(int family, int type, const char *addr_str, __u16 port,
@@ -173,7 +190,7 @@ int *start_reuseport_server(int family, int type, const char *addr_str,
if (!fds)
return NULL;
- fds[0] = __start_server(type, (struct sockaddr *)&addr, addrlen, &opts);
+ fds[0] = start_server_addr(type, &addr, addrlen, &opts);
if (fds[0] == -1)
goto close_fds;
nr_fds = 1;
@@ -182,7 +199,7 @@ int *start_reuseport_server(int family, int type, const char *addr_str,
goto close_fds;
for (; nr_fds < nr_listens; nr_fds++) {
- fds[nr_fds] = __start_server(type, (struct sockaddr *)&addr, addrlen, &opts);
+ fds[nr_fds] = start_server_addr(type, &addr, addrlen, &opts);
if (fds[nr_fds] == -1)
goto close_fds;
}
@@ -194,15 +211,6 @@ close_fds:
return NULL;
}
-int start_server_addr(int type, const struct sockaddr_storage *addr, socklen_t len,
- const struct network_helper_opts *opts)
-{
- if (!opts)
- opts = &default_opts;
-
- return __start_server(type, (struct sockaddr *)addr, len, opts);
-}
-
void free_fds(int *fds, unsigned int nr_close_fds)
{
if (fds) {
@@ -277,33 +285,6 @@ error_close:
return -1;
}
-static int connect_fd_to_addr(int fd,
- const struct sockaddr_storage *addr,
- socklen_t addrlen, const bool must_fail)
-{
- int ret;
-
- errno = 0;
- ret = connect(fd, (const struct sockaddr *)addr, addrlen);
- if (must_fail) {
- if (!ret) {
- log_err("Unexpected success to connect to server");
- return -1;
- }
- if (errno != EPERM) {
- log_err("Unexpected error from connect to server");
- return -1;
- }
- } else {
- if (ret) {
- log_err("Failed to connect to server");
- return -1;
- }
- }
-
- return 0;
-}
-
int connect_to_addr(int type, const struct sockaddr_storage *addr, socklen_t addrlen,
const struct network_helper_opts *opts)
{
@@ -318,17 +299,17 @@ int connect_to_addr(int type, const struct sockaddr_storage *addr, socklen_t add
return -1;
}
- if (connect_fd_to_addr(fd, addr, addrlen, opts->must_fail))
- goto error_close;
+ if (connect(fd, (const struct sockaddr *)addr, addrlen)) {
+ log_err("Failed to connect to server");
+ save_errno_close(fd);
+ return -1;
+ }
return fd;
-
-error_close:
- save_errno_close(fd);
- return -1;
}
-int connect_to_fd_opts(int server_fd, int type, const struct network_helper_opts *opts)
+int connect_to_addr_str(int family, int type, const char *addr_str, __u16 port,
+ const struct network_helper_opts *opts)
{
struct sockaddr_storage addr;
socklen_t addrlen;
@@ -336,6 +317,27 @@ int connect_to_fd_opts(int server_fd, int type, const struct network_helper_opts
if (!opts)
opts = &default_opts;
+ if (make_sockaddr(family, addr_str, port, &addr, &addrlen))
+ return -1;
+
+ return connect_to_addr(type, &addr, addrlen, opts);
+}
+
+int connect_to_fd_opts(int server_fd, const struct network_helper_opts *opts)
+{
+ struct sockaddr_storage addr;
+ socklen_t addrlen, optlen;
+ int type;
+
+ if (!opts)
+ opts = &default_opts;
+
+ optlen = sizeof(type);
+ if (getsockopt(server_fd, SOL_SOCKET, SO_TYPE, &type, &optlen)) {
+ log_err("getsockopt(SOL_TYPE)");
+ return -1;
+ }
+
addrlen = sizeof(addr);
if (getsockname(server_fd, (struct sockaddr *)&addr, &addrlen)) {
log_err("Failed to get server addr");
@@ -350,14 +352,8 @@ int connect_to_fd(int server_fd, int timeout_ms)
struct network_helper_opts opts = {
.timeout_ms = timeout_ms,
};
- int type, protocol;
socklen_t optlen;
-
- optlen = sizeof(type);
- if (getsockopt(server_fd, SOL_SOCKET, SO_TYPE, &type, &optlen)) {
- log_err("getsockopt(SOL_TYPE)");
- return -1;
- }
+ int protocol;
optlen = sizeof(protocol);
if (getsockopt(server_fd, SOL_SOCKET, SO_PROTOCOL, &protocol, &optlen)) {
@@ -366,7 +362,7 @@ int connect_to_fd(int server_fd, int timeout_ms)
}
opts.proto = protocol;
- return connect_to_fd_opts(server_fd, type, &opts);
+ return connect_to_fd_opts(server_fd, &opts);
}
int connect_fd_to_fd(int client_fd, int server_fd, int timeout_ms)
@@ -382,8 +378,10 @@ int connect_fd_to_fd(int client_fd, int server_fd, int timeout_ms)
return -1;
}
- if (connect_fd_to_addr(client_fd, &addr, len, false))
+ if (connect(client_fd, (const struct sockaddr *)&addr, len)) {
+ log_err("Failed to connect to server");
return -1;
+ }
return 0;
}
@@ -448,6 +446,52 @@ char *ping_command(int family)
return "ping";
}
+int remove_netns(const char *name)
+{
+ char *cmd;
+ int r;
+
+ r = asprintf(&cmd, "ip netns del %s >/dev/null 2>&1", name);
+ if (r < 0) {
+ log_err("Failed to malloc cmd");
+ return -1;
+ }
+
+ r = system(cmd);
+ free(cmd);
+ return r;
+}
+
+int make_netns(const char *name)
+{
+ char *cmd;
+ int r;
+
+ r = asprintf(&cmd, "ip netns add %s", name);
+ if (r < 0) {
+ log_err("Failed to malloc cmd");
+ return -1;
+ }
+
+ r = system(cmd);
+ free(cmd);
+
+ if (r)
+ return r;
+
+ r = asprintf(&cmd, "ip -n %s link set lo up", name);
+ if (r < 0) {
+ log_err("Failed to malloc cmd for setting up lo");
+ remove_netns(name);
+ return -1;
+ }
+
+ r = system(cmd);
+ free(cmd);
+
+ return r;
+}
+
struct nstoken {
int orig_netns_fd;
};
@@ -676,3 +720,443 @@ int send_recv_data(int lfd, int fd, uint32_t total_bytes)
return err;
}
+
+#ifdef TRAFFIC_MONITOR
+struct tmonitor_ctx {
+ pcap_t *pcap;
+ pcap_dumper_t *dumper;
+ pthread_t thread;
+ int wake_fd;
+
+ volatile bool done;
+ char pkt_fname[PATH_MAX];
+ int pcap_fd;
+};
+
+/* Is this packet captured with a Ethernet protocol type? */
+static bool is_ethernet(const u_char *packet)
+{
+ u16 arphdr_type;
+
+ memcpy(&arphdr_type, packet + 8, 2);
+ arphdr_type = ntohs(arphdr_type);
+
+ /* Except the following cases, the protocol type contains the
+ * Ethernet protocol type for the packet.
+ *
+ * https://www.tcpdump.org/linktypes/LINKTYPE_LINUX_SLL2.html
+ */
+ switch (arphdr_type) {
+ case 770: /* ARPHRD_FRAD */
+ case 778: /* ARPHDR_IPGRE */
+ case 803: /* ARPHRD_IEEE80211_RADIOTAP */
+ printf("Packet captured: arphdr_type=%d\n", arphdr_type);
+ return false;
+ }
+ return true;
+}
+
+static const char * const pkt_types[] = {
+ "In",
+ "B", /* Broadcast */
+ "M", /* Multicast */
+ "C", /* Captured with the promiscuous mode */
+ "Out",
+};
+
+static const char *pkt_type_str(u16 pkt_type)
+{
+ if (pkt_type < ARRAY_SIZE(pkt_types))
+ return pkt_types[pkt_type];
+ return "Unknown";
+}
+
+/* Show the information of the transport layer in the packet */
+static void show_transport(const u_char *packet, u16 len, u32 ifindex,
+ const char *src_addr, const char *dst_addr,
+ u16 proto, bool ipv6, u8 pkt_type)
+{
+ char *ifname, _ifname[IF_NAMESIZE];
+ const char *transport_str;
+ u16 src_port, dst_port;
+ struct udphdr *udp;
+ struct tcphdr *tcp;
+
+ ifname = if_indextoname(ifindex, _ifname);
+ if (!ifname) {
+ snprintf(_ifname, sizeof(_ifname), "unknown(%d)", ifindex);
+ ifname = _ifname;
+ }
+
+ if (proto == IPPROTO_UDP) {
+ udp = (struct udphdr *)packet;
+ src_port = ntohs(udp->source);
+ dst_port = ntohs(udp->dest);
+ transport_str = "UDP";
+ } else if (proto == IPPROTO_TCP) {
+ tcp = (struct tcphdr *)packet;
+ src_port = ntohs(tcp->source);
+ dst_port = ntohs(tcp->dest);
+ transport_str = "TCP";
+ } else if (proto == IPPROTO_ICMP) {
+ printf("%-7s %-3s IPv4 %s > %s: ICMP, length %d, type %d, code %d\n",
+ ifname, pkt_type_str(pkt_type), src_addr, dst_addr, len,
+ packet[0], packet[1]);
+ return;
+ } else if (proto == IPPROTO_ICMPV6) {
+ printf("%-7s %-3s IPv6 %s > %s: ICMPv6, length %d, type %d, code %d\n",
+ ifname, pkt_type_str(pkt_type), src_addr, dst_addr, len,
+ packet[0], packet[1]);
+ return;
+ } else {
+ printf("%-7s %-3s %s %s > %s: protocol %d\n",
+ ifname, pkt_type_str(pkt_type), ipv6 ? "IPv6" : "IPv4",
+ src_addr, dst_addr, proto);
+ return;
+ }
+
+ /* TCP or UDP*/
+
+ flockfile(stdout);
+ if (ipv6)
+ printf("%-7s %-3s IPv6 %s.%d > %s.%d: %s, length %d",
+ ifname, pkt_type_str(pkt_type), src_addr, src_port,
+ dst_addr, dst_port, transport_str, len);
+ else
+ printf("%-7s %-3s IPv4 %s:%d > %s:%d: %s, length %d",
+ ifname, pkt_type_str(pkt_type), src_addr, src_port,
+ dst_addr, dst_port, transport_str, len);
+
+ if (proto == IPPROTO_TCP) {
+ if (tcp->fin)
+ printf(", FIN");
+ if (tcp->syn)
+ printf(", SYN");
+ if (tcp->rst)
+ printf(", RST");
+ if (tcp->ack)
+ printf(", ACK");
+ }
+
+ printf("\n");
+ funlockfile(stdout);
+}
+
+static void show_ipv6_packet(const u_char *packet, u32 ifindex, u8 pkt_type)
+{
+ char src_buf[INET6_ADDRSTRLEN], dst_buf[INET6_ADDRSTRLEN];
+ struct ipv6hdr *pkt = (struct ipv6hdr *)packet;
+ const char *src, *dst;
+ u_char proto;
+
+ src = inet_ntop(AF_INET6, &pkt->saddr, src_buf, sizeof(src_buf));
+ if (!src)
+ src = "<invalid>";
+ dst = inet_ntop(AF_INET6, &pkt->daddr, dst_buf, sizeof(dst_buf));
+ if (!dst)
+ dst = "<invalid>";
+ proto = pkt->nexthdr;
+ show_transport(packet + sizeof(struct ipv6hdr),
+ ntohs(pkt->payload_len),
+ ifindex, src, dst, proto, true, pkt_type);
+}
+
+static void show_ipv4_packet(const u_char *packet, u32 ifindex, u8 pkt_type)
+{
+ char src_buf[INET_ADDRSTRLEN], dst_buf[INET_ADDRSTRLEN];
+ struct iphdr *pkt = (struct iphdr *)packet;
+ const char *src, *dst;
+ u_char proto;
+
+ src = inet_ntop(AF_INET, &pkt->saddr, src_buf, sizeof(src_buf));
+ if (!src)
+ src = "<invalid>";
+ dst = inet_ntop(AF_INET, &pkt->daddr, dst_buf, sizeof(dst_buf));
+ if (!dst)
+ dst = "<invalid>";
+ proto = pkt->protocol;
+ show_transport(packet + sizeof(struct iphdr),
+ ntohs(pkt->tot_len),
+ ifindex, src, dst, proto, false, pkt_type);
+}
+
+static void *traffic_monitor_thread(void *arg)
+{
+ char *ifname, _ifname[IF_NAMESIZE];
+ const u_char *packet, *payload;
+ struct tmonitor_ctx *ctx = arg;
+ pcap_dumper_t *dumper = ctx->dumper;
+ int fd = ctx->pcap_fd, nfds, r;
+ int wake_fd = ctx->wake_fd;
+ struct pcap_pkthdr header;
+ pcap_t *pcap = ctx->pcap;
+ u32 ifindex;
+ fd_set fds;
+ u16 proto;
+ u8 ptype;
+
+ nfds = (fd > wake_fd ? fd : wake_fd) + 1;
+ FD_ZERO(&fds);
+
+ while (!ctx->done) {
+ FD_SET(fd, &fds);
+ FD_SET(wake_fd, &fds);
+ r = select(nfds, &fds, NULL, NULL, NULL);
+ if (!r)
+ continue;
+ if (r < 0) {
+ if (errno == EINTR)
+ continue;
+ log_err("Fail to select on pcap fd and wake fd");
+ break;
+ }
+
+ /* This instance of pcap is non-blocking */
+ packet = pcap_next(pcap, &header);
+ if (!packet)
+ continue;
+
+ /* According to the man page of pcap_dump(), first argument
+ * is the pcap_dumper_t pointer even it's argument type is
+ * u_char *.
+ */
+ pcap_dump((u_char *)dumper, &header, packet);
+
+ /* Not sure what other types of packets look like. Here, we
+ * parse only Ethernet and compatible packets.
+ */
+ if (!is_ethernet(packet))
+ continue;
+
+ /* Skip SLL2 header
+ * https://www.tcpdump.org/linktypes/LINKTYPE_LINUX_SLL2.html
+ *
+ * Although the document doesn't mention that, the payload
+ * doesn't include the Ethernet header. The payload starts
+ * from the first byte of the network layer header.
+ */
+ payload = packet + 20;
+
+ memcpy(&proto, packet, 2);
+ proto = ntohs(proto);
+ memcpy(&ifindex, packet + 4, 4);
+ ifindex = ntohl(ifindex);
+ ptype = packet[10];
+
+ if (proto == ETH_P_IPV6) {
+ show_ipv6_packet(payload, ifindex, ptype);
+ } else if (proto == ETH_P_IP) {
+ show_ipv4_packet(payload, ifindex, ptype);
+ } else {
+ ifname = if_indextoname(ifindex, _ifname);
+ if (!ifname) {
+ snprintf(_ifname, sizeof(_ifname), "unknown(%d)", ifindex);
+ ifname = _ifname;
+ }
+
+ printf("%-7s %-3s Unknown network protocol type 0x%x\n",
+ ifname, pkt_type_str(ptype), proto);
+ }
+ }
+
+ return NULL;
+}
+
+/* Prepare the pcap handle to capture packets.
+ *
+ * This pcap is non-blocking and immediate mode is enabled to receive
+ * captured packets as soon as possible. The snaplen is set to 1024 bytes
+ * to limit the size of captured content. The format of the link-layer
+ * header is set to DLT_LINUX_SLL2 to enable handling various link-layer
+ * technologies.
+ */
+static pcap_t *traffic_monitor_prepare_pcap(void)
+{
+ char errbuf[PCAP_ERRBUF_SIZE];
+ pcap_t *pcap;
+ int r;
+
+ /* Listen on all NICs in the namespace */
+ pcap = pcap_create("any", errbuf);
+ if (!pcap) {
+ log_err("Failed to open pcap: %s", errbuf);
+ return NULL;
+ }
+ /* Limit the size of the packet (first N bytes) */
+ r = pcap_set_snaplen(pcap, 1024);
+ if (r) {
+ log_err("Failed to set snaplen: %s", pcap_geterr(pcap));
+ goto error;
+ }
+ /* To receive packets as fast as possible */
+ r = pcap_set_immediate_mode(pcap, 1);
+ if (r) {
+ log_err("Failed to set immediate mode: %s", pcap_geterr(pcap));
+ goto error;
+ }
+ r = pcap_setnonblock(pcap, 1, errbuf);
+ if (r) {
+ log_err("Failed to set nonblock: %s", errbuf);
+ goto error;
+ }
+ r = pcap_activate(pcap);
+ if (r) {
+ log_err("Failed to activate pcap: %s", pcap_geterr(pcap));
+ goto error;
+ }
+ /* Determine the format of the link-layer header */
+ r = pcap_set_datalink(pcap, DLT_LINUX_SLL2);
+ if (r) {
+ log_err("Failed to set datalink: %s", pcap_geterr(pcap));
+ goto error;
+ }
+
+ return pcap;
+error:
+ pcap_close(pcap);
+ return NULL;
+}
+
+static void encode_test_name(char *buf, size_t len, const char *test_name, const char *subtest_name)
+{
+ char *p;
+
+ if (subtest_name)
+ snprintf(buf, len, "%s__%s", test_name, subtest_name);
+ else
+ snprintf(buf, len, "%s", test_name);
+ while ((p = strchr(buf, '/')))
+ *p = '_';
+ while ((p = strchr(buf, ' ')))
+ *p = '_';
+}
+
+#define PCAP_DIR "/tmp/tmon_pcap"
+
+/* Start to monitor the network traffic in the given network namespace.
+ *
+ * netns: the name of the network namespace to monitor. If NULL, the
+ * current network namespace is monitored.
+ * test_name: the name of the running test.
+ * subtest_name: the name of the running subtest if there is. It should be
+ * NULL if it is not a subtest.
+ *
+ * This function will start a thread to capture packets going through NICs
+ * in the give network namespace.
+ */
+struct tmonitor_ctx *traffic_monitor_start(const char *netns, const char *test_name,
+ const char *subtest_name)
+{
+ struct nstoken *nstoken = NULL;
+ struct tmonitor_ctx *ctx;
+ char test_name_buf[64];
+ static int tmon_seq;
+ int r;
+
+ if (netns) {
+ nstoken = open_netns(netns);
+ if (!nstoken)
+ return NULL;
+ }
+ ctx = malloc(sizeof(*ctx));
+ if (!ctx) {
+ log_err("Failed to malloc ctx");
+ goto fail_ctx;
+ }
+ memset(ctx, 0, sizeof(*ctx));
+
+ encode_test_name(test_name_buf, sizeof(test_name_buf), test_name, subtest_name);
+ snprintf(ctx->pkt_fname, sizeof(ctx->pkt_fname),
+ PCAP_DIR "/packets-%d-%d-%s-%s.log", getpid(), tmon_seq++,
+ test_name_buf, netns ? netns : "unknown");
+
+ r = mkdir(PCAP_DIR, 0755);
+ if (r && errno != EEXIST) {
+ log_err("Failed to create " PCAP_DIR);
+ goto fail_pcap;
+ }
+
+ ctx->pcap = traffic_monitor_prepare_pcap();
+ if (!ctx->pcap)
+ goto fail_pcap;
+ ctx->pcap_fd = pcap_get_selectable_fd(ctx->pcap);
+ if (ctx->pcap_fd < 0) {
+ log_err("Failed to get pcap fd");
+ goto fail_dumper;
+ }
+
+ /* Create a packet file */
+ ctx->dumper = pcap_dump_open(ctx->pcap, ctx->pkt_fname);
+ if (!ctx->dumper) {
+ log_err("Failed to open pcap dump: %s", ctx->pkt_fname);
+ goto fail_dumper;
+ }
+
+ /* Create an eventfd to wake up the monitor thread */
+ ctx->wake_fd = eventfd(0, 0);
+ if (ctx->wake_fd < 0) {
+ log_err("Failed to create eventfd");
+ goto fail_eventfd;
+ }
+
+ r = pthread_create(&ctx->thread, NULL, traffic_monitor_thread, ctx);
+ if (r) {
+ log_err("Failed to create thread");
+ goto fail;
+ }
+
+ close_netns(nstoken);
+
+ return ctx;
+
+fail:
+ close(ctx->wake_fd);
+
+fail_eventfd:
+ pcap_dump_close(ctx->dumper);
+ unlink(ctx->pkt_fname);
+
+fail_dumper:
+ pcap_close(ctx->pcap);
+
+fail_pcap:
+ free(ctx);
+
+fail_ctx:
+ close_netns(nstoken);
+
+ return NULL;
+}
+
+static void traffic_monitor_release(struct tmonitor_ctx *ctx)
+{
+ pcap_close(ctx->pcap);
+ pcap_dump_close(ctx->dumper);
+
+ close(ctx->wake_fd);
+
+ free(ctx);
+}
+
+/* Stop the network traffic monitor.
+ *
+ * ctx: the context returned by traffic_monitor_start()
+ */
+void traffic_monitor_stop(struct tmonitor_ctx *ctx)
+{
+ __u64 w = 1;
+
+ if (!ctx)
+ return;
+
+ /* Stop the monitor thread */
+ ctx->done = true;
+ /* Wake up the background thread. */
+ write(ctx->wake_fd, &w, sizeof(w));
+ pthread_join(ctx->thread, NULL);
+
+ printf("Packet file: %s\n", strrchr(ctx->pkt_fname, '/') + 1);
+
+ traffic_monitor_release(ctx);
+}
+#endif /* TRAFFIC_MONITOR */
diff --git a/tools/testing/selftests/bpf/network_helpers.h b/tools/testing/selftests/bpf/network_helpers.h
index aac5b94d6379..c72c16e1aff8 100644
--- a/tools/testing/selftests/bpf/network_helpers.h
+++ b/tools/testing/selftests/bpf/network_helpers.h
@@ -23,7 +23,6 @@ typedef __u16 __sum16;
struct network_helper_opts {
int timeout_ms;
- bool must_fail;
int proto;
/* +ve: Passed to listen() as-is.
* 0: Default when the test does not set
@@ -70,8 +69,10 @@ int client_socket(int family, int type,
const struct network_helper_opts *opts);
int connect_to_addr(int type, const struct sockaddr_storage *addr, socklen_t len,
const struct network_helper_opts *opts);
+int connect_to_addr_str(int family, int type, const char *addr_str, __u16 port,
+ const struct network_helper_opts *opts);
int connect_to_fd(int server_fd, int timeout_ms);
-int connect_to_fd_opts(int server_fd, int type, const struct network_helper_opts *opts);
+int connect_to_fd_opts(int server_fd, const struct network_helper_opts *opts);
int connect_fd_to_fd(int client_fd, int server_fd, int timeout_ms);
int fastopen_connect(int server_fd, const char *data, unsigned int data_len,
int timeout_ms);
@@ -92,6 +93,8 @@ struct nstoken;
struct nstoken *open_netns(const char *name);
void close_netns(struct nstoken *token);
int send_recv_data(int lfd, int fd, uint32_t total_bytes);
+int make_netns(const char *name);
+int remove_netns(const char *name);
static __u16 csum_fold(__u32 csum)
{
@@ -135,4 +138,22 @@ static inline __sum16 csum_ipv6_magic(const struct in6_addr *saddr,
return csum_fold((__u32)s);
}
+struct tmonitor_ctx;
+
+#ifdef TRAFFIC_MONITOR
+struct tmonitor_ctx *traffic_monitor_start(const char *netns, const char *test_name,
+ const char *subtest_name);
+void traffic_monitor_stop(struct tmonitor_ctx *ctx);
+#else
+static inline struct tmonitor_ctx *traffic_monitor_start(const char *netns, const char *test_name,
+ const char *subtest_name)
+{
+ return NULL;
+}
+
+static inline void traffic_monitor_stop(struct tmonitor_ctx *ctx)
+{
+}
+#endif
+
#endif
diff --git a/tools/testing/selftests/bpf/prog_tests/attach_probe.c b/tools/testing/selftests/bpf/prog_tests/attach_probe.c
index 7175af39134f..329c7862b52d 100644
--- a/tools/testing/selftests/bpf/prog_tests/attach_probe.c
+++ b/tools/testing/selftests/bpf/prog_tests/attach_probe.c
@@ -283,9 +283,11 @@ static void test_uprobe_sleepable(struct test_attach_probe *skel)
trigger_func3();
ASSERT_EQ(skel->bss->uprobe_byname3_sleepable_res, 9, "check_uprobe_byname3_sleepable_res");
- ASSERT_EQ(skel->bss->uprobe_byname3_res, 10, "check_uprobe_byname3_res");
- ASSERT_EQ(skel->bss->uretprobe_byname3_sleepable_res, 11, "check_uretprobe_byname3_sleepable_res");
- ASSERT_EQ(skel->bss->uretprobe_byname3_res, 12, "check_uretprobe_byname3_res");
+ ASSERT_EQ(skel->bss->uprobe_byname3_str_sleepable_res, 10, "check_uprobe_byname3_str_sleepable_res");
+ ASSERT_EQ(skel->bss->uprobe_byname3_res, 11, "check_uprobe_byname3_res");
+ ASSERT_EQ(skel->bss->uretprobe_byname3_sleepable_res, 12, "check_uretprobe_byname3_sleepable_res");
+ ASSERT_EQ(skel->bss->uretprobe_byname3_str_sleepable_res, 13, "check_uretprobe_byname3_str_sleepable_res");
+ ASSERT_EQ(skel->bss->uretprobe_byname3_res, 14, "check_uretprobe_byname3_res");
}
void test_attach_probe(void)
diff --git a/tools/testing/selftests/bpf/prog_tests/bpf_iter.c b/tools/testing/selftests/bpf/prog_tests/bpf_iter.c
index 618af9dfae9b..52e6f7570475 100644
--- a/tools/testing/selftests/bpf/prog_tests/bpf_iter.c
+++ b/tools/testing/selftests/bpf/prog_tests/bpf_iter.c
@@ -1218,7 +1218,7 @@ out:
bpf_iter_bpf_sk_storage_helpers__destroy(skel);
}
-static void test_bpf_sk_stoarge_map_iter_fd(void)
+static void test_bpf_sk_storage_map_iter_fd(void)
{
struct bpf_iter_bpf_sk_storage_map *skel;
@@ -1693,7 +1693,7 @@ void test_bpf_iter(void)
if (test__start_subtest("bpf_sk_storage_map"))
test_bpf_sk_storage_map();
if (test__start_subtest("bpf_sk_storage_map_iter_fd"))
- test_bpf_sk_stoarge_map_iter_fd();
+ test_bpf_sk_storage_map_iter_fd();
if (test__start_subtest("bpf_sk_storage_delete"))
test_bpf_sk_storage_delete();
if (test__start_subtest("bpf_sk_storage_get"))
diff --git a/tools/testing/selftests/bpf/prog_tests/bpf_iter_setsockopt.c b/tools/testing/selftests/bpf/prog_tests/bpf_iter_setsockopt.c
index b52ff8ce34db..16bed9dd8e6a 100644
--- a/tools/testing/selftests/bpf/prog_tests/bpf_iter_setsockopt.c
+++ b/tools/testing/selftests/bpf/prog_tests/bpf_iter_setsockopt.c
@@ -95,7 +95,7 @@ static unsigned short get_local_port(int fd)
struct sockaddr_in6 addr;
socklen_t addrlen = sizeof(addr);
- if (!getsockname(fd, &addr, &addrlen))
+ if (!getsockname(fd, (struct sockaddr *)&addr, &addrlen))
return ntohs(addr.sin6_port);
return 0;
diff --git a/tools/testing/selftests/bpf/prog_tests/bpf_tcp_ca.c b/tools/testing/selftests/bpf/prog_tests/bpf_tcp_ca.c
index 63422f4f3896..409a06975823 100644
--- a/tools/testing/selftests/bpf/prog_tests/bpf_tcp_ca.c
+++ b/tools/testing/selftests/bpf/prog_tests/bpf_tcp_ca.c
@@ -49,7 +49,7 @@ static bool start_test(char *addr_str,
goto err;
/* connect to server */
- *cli_fd = connect_to_fd_opts(*srv_fd, SOCK_STREAM, cli_opts);
+ *cli_fd = connect_to_fd_opts(*srv_fd, cli_opts);
if (!ASSERT_NEQ(*cli_fd, -1, "connect_to_fd_opts"))
goto err;
@@ -285,7 +285,7 @@ static void test_dctcp_fallback(void)
dctcp_skel = bpf_dctcp__open();
if (!ASSERT_OK_PTR(dctcp_skel, "dctcp_skel"))
return;
- strcpy(dctcp_skel->rodata->fallback, "cubic");
+ strcpy(dctcp_skel->rodata->fallback_cc, "cubic");
if (!ASSERT_OK(bpf_dctcp__load(dctcp_skel), "bpf_dctcp__load"))
goto done;
diff --git a/tools/testing/selftests/bpf/prog_tests/btf.c b/tools/testing/selftests/bpf/prog_tests/btf.c
index 61de88cf4ad0..e63d74ce046f 100644
--- a/tools/testing/selftests/bpf/prog_tests/btf.c
+++ b/tools/testing/selftests/bpf/prog_tests/btf.c
@@ -5020,7 +5020,7 @@ struct pprint_mapv_int128 {
static struct btf_raw_test pprint_test_template[] = {
{
.raw_types = {
- /* unsighed char */ /* [1] */
+ /* unsigned char */ /* [1] */
BTF_TYPE_INT_ENC(NAME_TBD, 0, 0, 8, 1),
/* unsigned short */ /* [2] */
BTF_TYPE_INT_ENC(NAME_TBD, 0, 0, 16, 2),
@@ -5087,7 +5087,7 @@ static struct btf_raw_test pprint_test_template[] = {
* be encoded with kind_flag set.
*/
.raw_types = {
- /* unsighed char */ /* [1] */
+ /* unsigned char */ /* [1] */
BTF_TYPE_INT_ENC(NAME_TBD, 0, 0, 8, 1),
/* unsigned short */ /* [2] */
BTF_TYPE_INT_ENC(NAME_TBD, 0, 0, 16, 2),
@@ -5154,7 +5154,7 @@ static struct btf_raw_test pprint_test_template[] = {
* will have both int and enum types.
*/
.raw_types = {
- /* unsighed char */ /* [1] */
+ /* unsigned char */ /* [1] */
BTF_TYPE_INT_ENC(NAME_TBD, 0, 0, 8, 1),
/* unsigned short */ /* [2] */
BTF_TYPE_INT_ENC(NAME_TBD, 0, 0, 16, 2),
diff --git a/tools/testing/selftests/bpf/prog_tests/btf_distill.c b/tools/testing/selftests/bpf/prog_tests/btf_distill.c
index bfbe795823a2..ca84726d5ac1 100644
--- a/tools/testing/selftests/bpf/prog_tests/btf_distill.c
+++ b/tools/testing/selftests/bpf/prog_tests/btf_distill.c
@@ -535,6 +535,72 @@ cleanup:
btf__free(vmlinux_btf);
}
+/* Split and new base BTFs should inherit endianness from source BTF. */
+static void test_distilled_endianness(void)
+{
+ struct btf *base = NULL, *split = NULL, *new_base = NULL, *new_split = NULL;
+ struct btf *new_base1 = NULL, *new_split1 = NULL;
+ enum btf_endianness inverse_endianness;
+ const void *raw_data;
+ __u32 size;
+
+ base = btf__new_empty();
+ if (!ASSERT_OK_PTR(base, "empty_main_btf"))
+ return;
+ inverse_endianness = btf__endianness(base) == BTF_LITTLE_ENDIAN ? BTF_BIG_ENDIAN
+ : BTF_LITTLE_ENDIAN;
+ btf__set_endianness(base, inverse_endianness);
+ btf__add_int(base, "int", 4, BTF_INT_SIGNED); /* [1] int */
+ VALIDATE_RAW_BTF(
+ base,
+ "[1] INT 'int' size=4 bits_offset=0 nr_bits=32 encoding=SIGNED");
+ split = btf__new_empty_split(base);
+ if (!ASSERT_OK_PTR(split, "empty_split_btf"))
+ goto cleanup;
+ btf__add_ptr(split, 1);
+ VALIDATE_RAW_BTF(
+ split,
+ "[1] INT 'int' size=4 bits_offset=0 nr_bits=32 encoding=SIGNED",
+ "[2] PTR '(anon)' type_id=1");
+ if (!ASSERT_EQ(0, btf__distill_base(split, &new_base, &new_split),
+ "distilled_base") ||
+ !ASSERT_OK_PTR(new_base, "distilled_base") ||
+ !ASSERT_OK_PTR(new_split, "distilled_split") ||
+ !ASSERT_EQ(2, btf__type_cnt(new_base), "distilled_base_type_cnt"))
+ goto cleanup;
+ VALIDATE_RAW_BTF(
+ new_split,
+ "[1] INT 'int' size=4 bits_offset=0 nr_bits=32 encoding=SIGNED",
+ "[2] PTR '(anon)' type_id=1");
+
+ raw_data = btf__raw_data(new_base, &size);
+ if (!ASSERT_OK_PTR(raw_data, "btf__raw_data #1"))
+ goto cleanup;
+ new_base1 = btf__new(raw_data, size);
+ if (!ASSERT_OK_PTR(new_base1, "new_base1 = btf__new()"))
+ goto cleanup;
+ raw_data = btf__raw_data(new_split, &size);
+ if (!ASSERT_OK_PTR(raw_data, "btf__raw_data #2"))
+ goto cleanup;
+ new_split1 = btf__new_split(raw_data, size, new_base1);
+ if (!ASSERT_OK_PTR(new_split1, "new_split1 = btf__new()"))
+ goto cleanup;
+
+ ASSERT_EQ(btf__endianness(new_base1), inverse_endianness, "new_base1 endianness");
+ ASSERT_EQ(btf__endianness(new_split1), inverse_endianness, "new_split1 endianness");
+ VALIDATE_RAW_BTF(
+ new_split1,
+ "[1] INT 'int' size=4 bits_offset=0 nr_bits=32 encoding=SIGNED",
+ "[2] PTR '(anon)' type_id=1");
+cleanup:
+ btf__free(new_split1);
+ btf__free(new_base1);
+ btf__free(new_split);
+ btf__free(new_base);
+ btf__free(split);
+ btf__free(base);
+}
+
void test_btf_distill(void)
{
if (test__start_subtest("distilled_base"))
@@ -549,4 +615,6 @@ void test_btf_distill(void)
test_distilled_base_multi_err2();
if (test__start_subtest("distilled_base_vmlinux"))
test_distilled_base_vmlinux();
+ if (test__start_subtest("distilled_endianness"))
+ test_distilled_endianness();
}
diff --git a/tools/testing/selftests/bpf/prog_tests/btf_dump.c b/tools/testing/selftests/bpf/prog_tests/btf_dump.c
index 09a8e6f9b379..b293b8501fd6 100644
--- a/tools/testing/selftests/bpf/prog_tests/btf_dump.c
+++ b/tools/testing/selftests/bpf/prog_tests/btf_dump.c
@@ -805,8 +805,8 @@ static void test_btf_dump_var_data(struct btf *btf, struct btf_dump *d,
TEST_BTF_DUMP_VAR(btf, d, NULL, str, "cpu_number", int, BTF_F_COMPACT,
"int cpu_number = (int)100", 100);
#endif
- TEST_BTF_DUMP_VAR(btf, d, NULL, str, "cpu_profile_flip", int, BTF_F_COMPACT,
- "static int cpu_profile_flip = (int)2", 2);
+ TEST_BTF_DUMP_VAR(btf, d, NULL, str, "bpf_cgrp_storage_busy", int, BTF_F_COMPACT,
+ "static int bpf_cgrp_storage_busy = (int)2", 2);
}
static void test_btf_datasec(struct btf *btf, struct btf_dump *d, char *str,
diff --git a/tools/testing/selftests/bpf/prog_tests/build_id.c b/tools/testing/selftests/bpf/prog_tests/build_id.c
new file mode 100644
index 000000000000..aec9c8d6bc96
--- /dev/null
+++ b/tools/testing/selftests/bpf/prog_tests/build_id.c
@@ -0,0 +1,118 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2024 Meta Platforms, Inc. and affiliates. */
+#include <test_progs.h>
+
+#include "test_build_id.skel.h"
+
+static char build_id[BPF_BUILD_ID_SIZE];
+static int build_id_sz;
+
+static void print_stack(struct bpf_stack_build_id *stack, int frame_cnt)
+{
+ int i, j;
+
+ for (i = 0; i < frame_cnt; i++) {
+ printf("FRAME #%02d: ", i);
+ switch (stack[i].status) {
+ case BPF_STACK_BUILD_ID_EMPTY:
+ printf("<EMPTY>\n");
+ break;
+ case BPF_STACK_BUILD_ID_VALID:
+ printf("BUILD ID = ");
+ for (j = 0; j < BPF_BUILD_ID_SIZE; j++)
+ printf("%02hhx", (unsigned)stack[i].build_id[j]);
+ printf(" OFFSET = %llx", (unsigned long long)stack[i].offset);
+ break;
+ case BPF_STACK_BUILD_ID_IP:
+ printf("IP = %llx", (unsigned long long)stack[i].ip);
+ break;
+ default:
+ printf("UNEXPECTED STATUS %d ", stack[i].status);
+ break;
+ }
+ printf("\n");
+ }
+}
+
+static void subtest_nofault(bool build_id_resident)
+{
+ struct test_build_id *skel;
+ struct bpf_stack_build_id *stack;
+ int frame_cnt;
+
+ skel = test_build_id__open_and_load();
+ if (!ASSERT_OK_PTR(skel, "skel_open"))
+ return;
+
+ skel->links.uprobe_nofault = bpf_program__attach(skel->progs.uprobe_nofault);
+ if (!ASSERT_OK_PTR(skel->links.uprobe_nofault, "link"))
+ goto cleanup;
+
+ if (build_id_resident)
+ ASSERT_OK(system("./uprobe_multi uprobe-paged-in"), "trigger_uprobe");
+ else
+ ASSERT_OK(system("./uprobe_multi uprobe-paged-out"), "trigger_uprobe");
+
+ if (!ASSERT_GT(skel->bss->res_nofault, 0, "res"))
+ goto cleanup;
+
+ stack = skel->bss->stack_nofault;
+ frame_cnt = skel->bss->res_nofault / sizeof(struct bpf_stack_build_id);
+ if (env.verbosity >= VERBOSE_NORMAL)
+ print_stack(stack, frame_cnt);
+
+ if (build_id_resident) {
+ ASSERT_EQ(stack[0].status, BPF_STACK_BUILD_ID_VALID, "build_id_status");
+ ASSERT_EQ(memcmp(stack[0].build_id, build_id, build_id_sz), 0, "build_id_match");
+ } else {
+ ASSERT_EQ(stack[0].status, BPF_STACK_BUILD_ID_IP, "build_id_status");
+ }
+
+cleanup:
+ test_build_id__destroy(skel);
+}
+
+static void subtest_sleepable(void)
+{
+ struct test_build_id *skel;
+ struct bpf_stack_build_id *stack;
+ int frame_cnt;
+
+ skel = test_build_id__open_and_load();
+ if (!ASSERT_OK_PTR(skel, "skel_open"))
+ return;
+
+ skel->links.uprobe_sleepable = bpf_program__attach(skel->progs.uprobe_sleepable);
+ if (!ASSERT_OK_PTR(skel->links.uprobe_sleepable, "link"))
+ goto cleanup;
+
+ /* force build ID to not be paged in */
+ ASSERT_OK(system("./uprobe_multi uprobe-paged-out"), "trigger_uprobe");
+
+ if (!ASSERT_GT(skel->bss->res_sleepable, 0, "res"))
+ goto cleanup;
+
+ stack = skel->bss->stack_sleepable;
+ frame_cnt = skel->bss->res_sleepable / sizeof(struct bpf_stack_build_id);
+ if (env.verbosity >= VERBOSE_NORMAL)
+ print_stack(stack, frame_cnt);
+
+ ASSERT_EQ(stack[0].status, BPF_STACK_BUILD_ID_VALID, "build_id_status");
+ ASSERT_EQ(memcmp(stack[0].build_id, build_id, build_id_sz), 0, "build_id_match");
+
+cleanup:
+ test_build_id__destroy(skel);
+}
+
+void serial_test_build_id(void)
+{
+ build_id_sz = read_build_id("uprobe_multi", build_id, sizeof(build_id));
+ ASSERT_EQ(build_id_sz, BPF_BUILD_ID_SIZE, "parse_build_id");
+
+ if (test__start_subtest("nofault-paged-out"))
+ subtest_nofault(false /* not resident */);
+ if (test__start_subtest("nofault-paged-in"))
+ subtest_nofault(true /* resident */);
+ if (test__start_subtest("sleepable"))
+ subtest_sleepable();
+}
diff --git a/tools/testing/selftests/bpf/prog_tests/cg_storage_multi.c b/tools/testing/selftests/bpf/prog_tests/cg_storage_multi.c
index 63ee892bc757..10224f845568 100644
--- a/tools/testing/selftests/bpf/prog_tests/cg_storage_multi.c
+++ b/tools/testing/selftests/bpf/prog_tests/cg_storage_multi.c
@@ -214,7 +214,7 @@ static void test_isolated(int parent_cgroup_fd, int child_cgroup_fd)
/* Attach to parent and child cgroup, trigger packet from child.
* Assert that there is six additional runs, parent cgroup egresses and
* ingress, child cgroup egresses and ingress.
- * Assert that egree and ingress storages are separate.
+ * Assert that egress and ingress storages are separate.
*/
child_egress1_link = bpf_program__attach_cgroup(obj->progs.egress1,
child_cgroup_fd);
diff --git a/tools/testing/selftests/bpf/prog_tests/cgroup_ancestor.c b/tools/testing/selftests/bpf/prog_tests/cgroup_ancestor.c
new file mode 100644
index 000000000000..9250a1e9f9af
--- /dev/null
+++ b/tools/testing/selftests/bpf/prog_tests/cgroup_ancestor.c
@@ -0,0 +1,141 @@
+// SPDX-License-Identifier: GPL-2.0
+
+#include "test_progs.h"
+#include "network_helpers.h"
+#include "cgroup_helpers.h"
+#include "cgroup_ancestor.skel.h"
+
+#define CGROUP_PATH "/skb_cgroup_test"
+#define TEST_NS "cgroup_ancestor_ns"
+#define NUM_CGROUP_LEVELS 4
+#define WAIT_AUTO_IP_MAX_ATTEMPT 10
+#define DST_ADDR "::1"
+#define DST_PORT 1234
+#define MAX_ASSERT_NAME 32
+
+struct test_data {
+ struct cgroup_ancestor *skel;
+ struct bpf_tc_hook qdisc;
+ struct bpf_tc_opts tc_attach;
+ struct nstoken *ns;
+};
+
+static int send_datagram(void)
+{
+ unsigned char buf[] = "some random test data";
+ struct sockaddr_in6 addr = { .sin6_family = AF_INET6,
+ .sin6_port = htons(DST_PORT), };
+ int sock, n;
+
+ if (!ASSERT_EQ(inet_pton(AF_INET6, DST_ADDR, &addr.sin6_addr), 1,
+ "inet_pton"))
+ return -1;
+
+ sock = socket(AF_INET6, SOCK_DGRAM, 0);
+ if (!ASSERT_OK_FD(sock, "create socket"))
+ return sock;
+
+ if (!ASSERT_OK(connect(sock, &addr, sizeof(addr)), "connect")) {
+ close(sock);
+ return -1;
+ }
+
+ n = sendto(sock, buf, sizeof(buf), 0, (const struct sockaddr *)&addr,
+ sizeof(addr));
+ close(sock);
+ return ASSERT_EQ(n, sizeof(buf), "send data") ? 0 : -1;
+}
+
+static int setup_network(struct test_data *t)
+{
+ SYS(fail, "ip netns add %s", TEST_NS);
+ t->ns = open_netns(TEST_NS);
+ if (!ASSERT_OK_PTR(t->ns, "open netns"))
+ goto cleanup_ns;
+
+ SYS(close_ns, "ip link set lo up");
+
+ memset(&t->qdisc, 0, sizeof(t->qdisc));
+ t->qdisc.sz = sizeof(t->qdisc);
+ t->qdisc.attach_point = BPF_TC_EGRESS;
+ t->qdisc.ifindex = if_nametoindex("lo");
+ if (!ASSERT_NEQ(t->qdisc.ifindex, 0, "if_nametoindex"))
+ goto close_ns;
+ if (!ASSERT_OK(bpf_tc_hook_create(&t->qdisc), "qdisc add"))
+ goto close_ns;
+
+ memset(&t->tc_attach, 0, sizeof(t->tc_attach));
+ t->tc_attach.sz = sizeof(t->tc_attach);
+ t->tc_attach.prog_fd = bpf_program__fd(t->skel->progs.log_cgroup_id);
+ if (!ASSERT_OK(bpf_tc_attach(&t->qdisc, &t->tc_attach), "filter add"))
+ goto cleanup_qdisc;
+
+ return 0;
+
+cleanup_qdisc:
+ bpf_tc_hook_destroy(&t->qdisc);
+close_ns:
+ close_netns(t->ns);
+cleanup_ns:
+ SYS_NOFAIL("ip netns del %s", TEST_NS);
+fail:
+ return 1;
+}
+
+static void cleanup_network(struct test_data *t)
+{
+ bpf_tc_detach(&t->qdisc, &t->tc_attach);
+ bpf_tc_hook_destroy(&t->qdisc);
+ close_netns(t->ns);
+ SYS_NOFAIL("ip netns del %s", TEST_NS);
+}
+
+static void check_ancestors_ids(struct test_data *t)
+{
+ __u64 expected_ids[NUM_CGROUP_LEVELS];
+ char assert_name[MAX_ASSERT_NAME];
+ __u32 level;
+
+ expected_ids[0] = get_cgroup_id("/.."); /* root cgroup */
+ expected_ids[1] = get_cgroup_id("");
+ expected_ids[2] = get_cgroup_id(CGROUP_PATH);
+ expected_ids[3] = 0; /* non-existent cgroup */
+
+ for (level = 0; level < NUM_CGROUP_LEVELS; level++) {
+ snprintf(assert_name, MAX_ASSERT_NAME,
+ "ancestor id at level %d", level);
+ ASSERT_EQ(t->skel->bss->cgroup_ids[level], expected_ids[level],
+ assert_name);
+ }
+}
+
+void test_cgroup_ancestor(void)
+{
+ struct test_data t;
+ int cgroup_fd;
+
+ t.skel = cgroup_ancestor__open_and_load();
+ if (!ASSERT_OK_PTR(t.skel, "open and load"))
+ return;
+
+ t.skel->bss->dport = htons(DST_PORT);
+ cgroup_fd = cgroup_setup_and_join(CGROUP_PATH);
+ if (cgroup_fd < 0)
+ goto cleanup_progs;
+
+ if (setup_network(&t))
+ goto cleanup_cgroups;
+
+ if (send_datagram())
+ goto cleanup_network;
+
+ check_ancestors_ids(&t);
+
+cleanup_network:
+ cleanup_network(&t);
+cleanup_cgroups:
+ close(cgroup_fd);
+ cleanup_cgroup_environment();
+cleanup_progs:
+ cgroup_ancestor__destroy(t.skel);
+}
diff --git a/tools/testing/selftests/bpf/prog_tests/cgroup_dev.c b/tools/testing/selftests/bpf/prog_tests/cgroup_dev.c
new file mode 100644
index 000000000000..5ab7547e38c0
--- /dev/null
+++ b/tools/testing/selftests/bpf/prog_tests/cgroup_dev.c
@@ -0,0 +1,125 @@
+// SPDX-License-Identifier: GPL-2.0
+
+#include <sys/stat.h>
+#include <sys/sysmacros.h>
+#include <errno.h>
+#include "test_progs.h"
+#include "cgroup_helpers.h"
+#include "dev_cgroup.skel.h"
+
+#define TEST_CGROUP "/test-bpf-based-device-cgroup/"
+#define TEST_BUFFER_SIZE 64
+
+static void test_mknod(const char *path, mode_t mode, int dev_major,
+ int dev_minor, int expected_ret, int expected_errno)
+{
+ int ret;
+
+ unlink(path);
+ ret = mknod(path, mode, makedev(dev_major, dev_minor));
+ ASSERT_EQ(ret, expected_ret, "mknod");
+ if (expected_ret)
+ ASSERT_EQ(errno, expected_errno, "mknod errno");
+ else
+ unlink(path);
+}
+
+static void test_read(const char *path, char *buf, int buf_size,
+ int expected_ret, int expected_errno)
+{
+ int ret, fd;
+
+ fd = open(path, O_RDONLY);
+
+ /* A bare open on unauthorized device should fail */
+ if (expected_ret < 0) {
+ ASSERT_EQ(fd, expected_ret, "open ret for read");
+ ASSERT_EQ(errno, expected_errno, "open errno for read");
+ if (fd >= 0)
+ close(fd);
+ return;
+ }
+
+ if (!ASSERT_OK_FD(fd, "open ret for read"))
+ return;
+
+ ret = read(fd, buf, buf_size);
+ ASSERT_EQ(ret, expected_ret, "read");
+
+ close(fd);
+}
+
+static void test_write(const char *path, char *buf, int buf_size,
+ int expected_ret, int expected_errno)
+{
+ int ret, fd;
+
+ fd = open(path, O_WRONLY);
+
+ /* A bare open on unauthorized device should fail */
+ if (expected_ret < 0) {
+ ASSERT_EQ(fd, expected_ret, "open ret for write");
+ ASSERT_EQ(errno, expected_errno, "open errno for write");
+ if (fd >= 0)
+ close(fd);
+ return;
+ }
+
+ if (!ASSERT_OK_FD(fd, "open ret for write"))
+ return;
+
+ ret = write(fd, buf, buf_size);
+ ASSERT_EQ(ret, expected_ret, "write");
+
+ close(fd);
+}
+
+void test_cgroup_dev(void)
+{
+ char buf[TEST_BUFFER_SIZE] = "some random test data";
+ struct dev_cgroup *skel;
+ int cgroup_fd;
+
+ cgroup_fd = cgroup_setup_and_join(TEST_CGROUP);
+ if (!ASSERT_OK_FD(cgroup_fd, "cgroup switch"))
+ return;
+
+ skel = dev_cgroup__open_and_load();
+ if (!ASSERT_OK_PTR(skel, "load program"))
+ goto cleanup_cgroup;
+
+ skel->links.bpf_prog1 =
+ bpf_program__attach_cgroup(skel->progs.bpf_prog1, cgroup_fd);
+ if (!ASSERT_OK_PTR(skel->links.bpf_prog1, "attach_program"))
+ goto cleanup_progs;
+
+ if (test__start_subtest("allow-mknod"))
+ test_mknod("/dev/test_dev_cgroup_null", S_IFCHR, 1, 3, 0, 0);
+
+ if (test__start_subtest("allow-read"))
+ test_read("/dev/urandom", buf, TEST_BUFFER_SIZE,
+ TEST_BUFFER_SIZE, 0);
+
+ if (test__start_subtest("allow-write"))
+ test_write("/dev/null", buf, TEST_BUFFER_SIZE,
+ TEST_BUFFER_SIZE, 0);
+
+ if (test__start_subtest("deny-mknod"))
+ test_mknod("/dev/test_dev_cgroup_zero", S_IFCHR, 1, 5, -1,
+ EPERM);
+
+ if (test__start_subtest("deny-read"))
+ test_read("/dev/random", buf, TEST_BUFFER_SIZE, -1, EPERM);
+
+ if (test__start_subtest("deny-write"))
+ test_write("/dev/zero", buf, TEST_BUFFER_SIZE, -1, EPERM);
+
+ if (test__start_subtest("deny-mknod-wrong-type"))
+ test_mknod("/dev/test_dev_cgroup_block", S_IFBLK, 1, 3, -1,
+ EPERM);
+
+cleanup_progs:
+ dev_cgroup__destroy(skel);
+cleanup_cgroup:
+ cleanup_cgroup_environment();
+}
diff --git a/tools/testing/selftests/bpf/prog_tests/cgroup_get_current_cgroup_id.c b/tools/testing/selftests/bpf/prog_tests/cgroup_get_current_cgroup_id.c
new file mode 100644
index 000000000000..7a1643b03bf3
--- /dev/null
+++ b/tools/testing/selftests/bpf/prog_tests/cgroup_get_current_cgroup_id.c
@@ -0,0 +1,46 @@
+// SPDX-License-Identifier: GPL-2.0
+
+#include <sys/stat.h>
+#include <sys/sysmacros.h>
+#include "test_progs.h"
+#include "cgroup_helpers.h"
+#include "get_cgroup_id_kern.skel.h"
+
+#define TEST_CGROUP "/test-bpf-get-cgroup-id/"
+
+void test_cgroup_get_current_cgroup_id(void)
+{
+ struct get_cgroup_id_kern *skel;
+ const struct timespec req = {
+ .tv_sec = 0,
+ .tv_nsec = 1,
+ };
+ int cgroup_fd;
+ __u64 ucgid;
+
+ cgroup_fd = cgroup_setup_and_join(TEST_CGROUP);
+ if (!ASSERT_OK_FD(cgroup_fd, "cgroup switch"))
+ return;
+
+ skel = get_cgroup_id_kern__open_and_load();
+ if (!ASSERT_OK_PTR(skel, "load program"))
+ goto cleanup_cgroup;
+
+ if (!ASSERT_OK(get_cgroup_id_kern__attach(skel), "attach bpf program"))
+ goto cleanup_progs;
+
+ skel->bss->expected_pid = getpid();
+ /* trigger the syscall on which is attached the tested prog */
+ if (!ASSERT_OK(syscall(__NR_nanosleep, &req, NULL), "nanosleep"))
+ goto cleanup_progs;
+
+ ucgid = get_cgroup_id(TEST_CGROUP);
+
+ ASSERT_EQ(skel->bss->cg_id, ucgid, "compare cgroup ids");
+
+cleanup_progs:
+ get_cgroup_id_kern__destroy(skel);
+cleanup_cgroup:
+ close(cgroup_fd);
+ cleanup_cgroup_environment();
+}
diff --git a/tools/testing/selftests/bpf/prog_tests/cgroup_storage.c b/tools/testing/selftests/bpf/prog_tests/cgroup_storage.c
new file mode 100644
index 000000000000..cf395715ced4
--- /dev/null
+++ b/tools/testing/selftests/bpf/prog_tests/cgroup_storage.c
@@ -0,0 +1,96 @@
+// SPDX-License-Identifier: GPL-2.0
+
+#include <test_progs.h>
+#include "cgroup_helpers.h"
+#include "network_helpers.h"
+#include "cgroup_storage.skel.h"
+
+#define TEST_CGROUP "/test-bpf-cgroup-storage-buf/"
+#define TEST_NS "cgroup_storage_ns"
+#define PING_CMD "ping localhost -c 1 -W 1 -q"
+
+static int setup_network(struct nstoken **token)
+{
+ SYS(fail, "ip netns add %s", TEST_NS);
+ *token = open_netns(TEST_NS);
+ if (!ASSERT_OK_PTR(*token, "open netns"))
+ goto cleanup_ns;
+ SYS(cleanup_ns, "ip link set lo up");
+
+ return 0;
+
+cleanup_ns:
+ SYS_NOFAIL("ip netns del %s", TEST_NS);
+fail:
+ return -1;
+}
+
+static void cleanup_network(struct nstoken *ns)
+{
+ close_netns(ns);
+ SYS_NOFAIL("ip netns del %s", TEST_NS);
+}
+
+void test_cgroup_storage(void)
+{
+ struct bpf_cgroup_storage_key key;
+ struct cgroup_storage *skel;
+ struct nstoken *ns = NULL;
+ unsigned long long value;
+ int cgroup_fd;
+ int err;
+
+ cgroup_fd = cgroup_setup_and_join(TEST_CGROUP);
+ if (!ASSERT_OK_FD(cgroup_fd, "create cgroup"))
+ return;
+
+ if (!ASSERT_OK(setup_network(&ns), "setup network"))
+ goto cleanup_cgroup;
+
+ skel = cgroup_storage__open_and_load();
+ if (!ASSERT_OK_PTR(skel, "load program"))
+ goto cleanup_network;
+
+ skel->links.bpf_prog =
+ bpf_program__attach_cgroup(skel->progs.bpf_prog, cgroup_fd);
+ if (!ASSERT_OK_PTR(skel->links.bpf_prog, "attach program"))
+ goto cleanup_progs;
+
+ /* Check that one out of every two packets is dropped */
+ err = SYS_NOFAIL(PING_CMD);
+ ASSERT_OK(err, "first ping");
+ err = SYS_NOFAIL(PING_CMD);
+ ASSERT_NEQ(err, 0, "second ping");
+ err = SYS_NOFAIL(PING_CMD);
+ ASSERT_OK(err, "third ping");
+
+ err = bpf_map__get_next_key(skel->maps.cgroup_storage, NULL, &key,
+ sizeof(key));
+ if (!ASSERT_OK(err, "get first key"))
+ goto cleanup_progs;
+ err = bpf_map__lookup_elem(skel->maps.cgroup_storage, &key, sizeof(key),
+ &value, sizeof(value), 0);
+ if (!ASSERT_OK(err, "first packet count read"))
+ goto cleanup_progs;
+
+ /* Add one to the packet counter, check again packet filtering */
+ value++;
+ err = bpf_map__update_elem(skel->maps.cgroup_storage, &key, sizeof(key),
+ &value, sizeof(value), 0);
+ if (!ASSERT_OK(err, "increment packet counter"))
+ goto cleanup_progs;
+ err = SYS_NOFAIL(PING_CMD);
+ ASSERT_OK(err, "fourth ping");
+ err = SYS_NOFAIL(PING_CMD);
+ ASSERT_NEQ(err, 0, "fifth ping");
+ err = SYS_NOFAIL(PING_CMD);
+ ASSERT_OK(err, "sixth ping");
+
+cleanup_progs:
+ cgroup_storage__destroy(skel);
+cleanup_network:
+ cleanup_network(ns);
+cleanup_cgroup:
+ close(cgroup_fd);
+ cleanup_cgroup_environment();
+}
diff --git a/tools/testing/selftests/bpf/prog_tests/cgroup_v1v2.c b/tools/testing/selftests/bpf/prog_tests/cgroup_v1v2.c
index 9709c8db7275..64abba72ac10 100644
--- a/tools/testing/selftests/bpf/prog_tests/cgroup_v1v2.c
+++ b/tools/testing/selftests/bpf/prog_tests/cgroup_v1v2.c
@@ -9,9 +9,6 @@
static int run_test(int cgroup_fd, int server_fd, bool classid)
{
- struct network_helper_opts opts = {
- .must_fail = true,
- };
struct connect4_dropper *skel;
int fd, err = 0;
@@ -32,11 +29,16 @@ static int run_test(int cgroup_fd, int server_fd, bool classid)
goto out;
}
- fd = connect_to_fd_opts(server_fd, SOCK_STREAM, &opts);
- if (fd < 0)
+ errno = 0;
+ fd = connect_to_fd_opts(server_fd, NULL);
+ if (fd >= 0) {
+ log_err("Unexpected success to connect to server");
err = -1;
- else
close(fd);
+ } else if (errno != EPERM) {
+ log_err("Unexpected errno from connect to server");
+ err = -1;
+ }
out:
connect4_dropper__destroy(skel);
return err;
@@ -52,7 +54,7 @@ void test_cgroup_v1v2(void)
server_fd = start_server(AF_INET, SOCK_STREAM, NULL, port, 0);
if (!ASSERT_GE(server_fd, 0, "server_fd"))
return;
- client_fd = connect_to_fd_opts(server_fd, SOCK_STREAM, &opts);
+ client_fd = connect_to_fd_opts(server_fd, &opts);
if (!ASSERT_GE(client_fd, 0, "client_fd")) {
close(server_fd);
return;
diff --git a/tools/testing/selftests/bpf/prog_tests/core_reloc.c b/tools/testing/selftests/bpf/prog_tests/core_reloc.c
index 47f42e680105..26019313e1fc 100644
--- a/tools/testing/selftests/bpf/prog_tests/core_reloc.c
+++ b/tools/testing/selftests/bpf/prog_tests/core_reloc.c
@@ -1,4 +1,5 @@
// SPDX-License-Identifier: GPL-2.0
+#define _GNU_SOURCE
#include <test_progs.h>
#include "progs/core_reloc_types.h"
#include "bpf_testmod/bpf_testmod.h"
diff --git a/tools/testing/selftests/bpf/prog_tests/core_reloc_raw.c b/tools/testing/selftests/bpf/prog_tests/core_reloc_raw.c
new file mode 100644
index 000000000000..a18d3680fb16
--- /dev/null
+++ b/tools/testing/selftests/bpf/prog_tests/core_reloc_raw.c
@@ -0,0 +1,125 @@
+// SPDX-License-Identifier: GPL-2.0
+
+/* Test cases that can't load programs using libbpf and need direct
+ * BPF syscall access
+ */
+
+#include <sys/syscall.h>
+#include <bpf/libbpf.h>
+#include <bpf/btf.h>
+
+#include "test_progs.h"
+#include "test_btf.h"
+#include "bpf/libbpf_internal.h"
+
+static char log[16 * 1024];
+
+/* Check that verifier rejects BPF program containing relocation
+ * pointing to non-existent BTF type.
+ */
+static void test_bad_local_id(void)
+{
+ struct test_btf {
+ struct btf_header hdr;
+ __u32 types[15];
+ char strings[128];
+ } raw_btf = {
+ .hdr = {
+ .magic = BTF_MAGIC,
+ .version = BTF_VERSION,
+ .hdr_len = sizeof(struct btf_header),
+ .type_off = 0,
+ .type_len = sizeof(raw_btf.types),
+ .str_off = offsetof(struct test_btf, strings) -
+ offsetof(struct test_btf, types),
+ .str_len = sizeof(raw_btf.strings),
+ },
+ .types = {
+ BTF_PTR_ENC(0), /* [1] void* */
+ BTF_TYPE_INT_ENC(1, BTF_INT_SIGNED, 0, 32, 4), /* [2] int */
+ BTF_FUNC_PROTO_ENC(2, 1), /* [3] int (*)(void*) */
+ BTF_FUNC_PROTO_ARG_ENC(8, 1),
+ BTF_FUNC_ENC(8, 3) /* [4] FUNC 'foo' type_id=2 */
+ },
+ .strings = "\0int\0 0\0foo\0"
+ };
+ __u32 log_level = 1 | 2 | 4;
+ LIBBPF_OPTS(bpf_btf_load_opts, opts,
+ .log_buf = log,
+ .log_size = sizeof(log),
+ .log_level = log_level,
+ );
+ struct bpf_insn insns[] = {
+ BPF_ALU64_IMM(BPF_MOV, BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ };
+ struct bpf_func_info funcs[] = {
+ {
+ .insn_off = 0,
+ .type_id = 4,
+ }
+ };
+ struct bpf_core_relo relos[] = {
+ {
+ .insn_off = 0, /* patch first instruction (r0 = 0) */
+ .type_id = 100500, /* !!! this type id does not exist */
+ .access_str_off = 6, /* offset of "0" */
+ .kind = BPF_CORE_TYPE_ID_LOCAL,
+ }
+ };
+ union bpf_attr attr;
+ int saved_errno;
+ int prog_fd = -1;
+ int btf_fd = -1;
+
+ btf_fd = bpf_btf_load(&raw_btf, sizeof(raw_btf), &opts);
+ saved_errno = errno;
+ if (btf_fd < 0 || env.verbosity > VERBOSE_NORMAL) {
+ printf("-------- BTF load log start --------\n");
+ printf("%s", log);
+ printf("-------- BTF load log end ----------\n");
+ }
+ if (btf_fd < 0) {
+ PRINT_FAIL("bpf_btf_load() failed, errno=%d\n", saved_errno);
+ return;
+ }
+
+ log[0] = 0;
+ memset(&attr, 0, sizeof(attr));
+ attr.prog_btf_fd = btf_fd;
+ attr.prog_type = BPF_TRACE_RAW_TP;
+ attr.license = (__u64)"GPL";
+ attr.insns = (__u64)&insns;
+ attr.insn_cnt = sizeof(insns) / sizeof(*insns);
+ attr.log_buf = (__u64)log;
+ attr.log_size = sizeof(log);
+ attr.log_level = log_level;
+ attr.func_info = (__u64)funcs;
+ attr.func_info_cnt = sizeof(funcs) / sizeof(*funcs);
+ attr.func_info_rec_size = sizeof(*funcs);
+ attr.core_relos = (__u64)relos;
+ attr.core_relo_cnt = sizeof(relos) / sizeof(*relos);
+ attr.core_relo_rec_size = sizeof(*relos);
+ prog_fd = sys_bpf_prog_load(&attr, sizeof(attr), 1);
+ saved_errno = errno;
+ if (prog_fd < 0 || env.verbosity > VERBOSE_NORMAL) {
+ printf("-------- program load log start --------\n");
+ printf("%s", log);
+ printf("-------- program load log end ----------\n");
+ }
+ if (prog_fd >= 0) {
+ PRINT_FAIL("sys_bpf_prog_load() expected to fail\n");
+ goto out;
+ }
+ ASSERT_HAS_SUBSTR(log, "relo #0: bad type id 100500", "program load log");
+
+out:
+ close(prog_fd);
+ close(btf_fd);
+}
+
+void test_core_reloc_raw(void)
+{
+ if (test__start_subtest("bad_local_id"))
+ test_bad_local_id();
+}
diff --git a/tools/testing/selftests/bpf/prog_tests/crypto_sanity.c b/tools/testing/selftests/bpf/prog_tests/crypto_sanity.c
index b1a3a49a822a..42bd07f7218d 100644
--- a/tools/testing/selftests/bpf/prog_tests/crypto_sanity.c
+++ b/tools/testing/selftests/bpf/prog_tests/crypto_sanity.c
@@ -4,7 +4,6 @@
#include <sys/types.h>
#include <sys/socket.h>
#include <net/if.h>
-#include <linux/in6.h>
#include <linux/if_alg.h>
#include "test_progs.h"
diff --git a/tools/testing/selftests/bpf/prog_tests/ctx_rewrite.c b/tools/testing/selftests/bpf/prog_tests/ctx_rewrite.c
index 08b6391f2f56..dd75ccb03770 100644
--- a/tools/testing/selftests/bpf/prog_tests/ctx_rewrite.c
+++ b/tools/testing/selftests/bpf/prog_tests/ctx_rewrite.c
@@ -10,7 +10,8 @@
#include "bpf/btf.h"
#include "bpf_util.h"
#include "linux/filter.h"
-#include "disasm.h"
+#include "linux/kernel.h"
+#include "disasm_helpers.h"
#define MAX_PROG_TEXT_SZ (32 * 1024)
@@ -628,63 +629,6 @@ err:
return false;
}
-static void print_insn(void *private_data, const char *fmt, ...)
-{
- va_list args;
-
- va_start(args, fmt);
- vfprintf((FILE *)private_data, fmt, args);
- va_end(args);
-}
-
-/* Disassemble instructions to a stream */
-static void print_xlated(FILE *out, struct bpf_insn *insn, __u32 len)
-{
- const struct bpf_insn_cbs cbs = {
- .cb_print = print_insn,
- .cb_call = NULL,
- .cb_imm = NULL,
- .private_data = out,
- };
- bool double_insn = false;
- int i;
-
- for (i = 0; i < len; i++) {
- if (double_insn) {
- double_insn = false;
- continue;
- }
-
- double_insn = insn[i].code == (BPF_LD | BPF_IMM | BPF_DW);
- print_bpf_insn(&cbs, insn + i, true);
- }
-}
-
-/* We share code with kernel BPF disassembler, it adds '(FF) ' prefix
- * for each instruction (FF stands for instruction `code` byte).
- * This function removes the prefix inplace for each line in `str`.
- */
-static void remove_insn_prefix(char *str, int size)
-{
- const int prefix_size = 5;
-
- int write_pos = 0, read_pos = prefix_size;
- int len = strlen(str);
- char c;
-
- size = min(size, len);
-
- while (read_pos < size) {
- c = str[read_pos++];
- if (c == 0)
- break;
- str[write_pos++] = c;
- if (c == '\n')
- read_pos += prefix_size;
- }
- str[write_pos] = 0;
-}
-
struct prog_info {
char *prog_kind;
enum bpf_prog_type prog_type;
@@ -699,9 +643,10 @@ static void match_program(struct btf *btf,
char *reg_map[][2],
bool skip_first_insn)
{
- struct bpf_insn *buf = NULL;
+ struct bpf_insn *buf = NULL, *insn, *insn_end;
int err = 0, prog_fd = 0;
FILE *prog_out = NULL;
+ char insn_buf[64];
char *text = NULL;
__u32 cnt = 0;
@@ -739,12 +684,13 @@ static void match_program(struct btf *btf,
PRINT_FAIL("Can't open memory stream\n");
goto out;
}
- if (skip_first_insn)
- print_xlated(prog_out, buf + 1, cnt - 1);
- else
- print_xlated(prog_out, buf, cnt);
+ insn_end = buf + cnt;
+ insn = buf + (skip_first_insn ? 1 : 0);
+ while (insn < insn_end) {
+ insn = disasm_insn(insn, insn_buf, sizeof(insn_buf));
+ fprintf(prog_out, "%s\n", insn_buf);
+ }
fclose(prog_out);
- remove_insn_prefix(text, MAX_PROG_TEXT_SZ);
ASSERT_TRUE(match_pattern(btf, pattern, text, reg_map),
pinfo->prog_kind);
diff --git a/tools/testing/selftests/bpf/prog_tests/decap_sanity.c b/tools/testing/selftests/bpf/prog_tests/decap_sanity.c
index dcb9e5070cc3..d79f398ec6b7 100644
--- a/tools/testing/selftests/bpf/prog_tests/decap_sanity.c
+++ b/tools/testing/selftests/bpf/prog_tests/decap_sanity.c
@@ -4,7 +4,6 @@
#include <sys/types.h>
#include <sys/socket.h>
#include <net/if.h>
-#include <linux/in6.h>
#include "test_progs.h"
#include "network_helpers.h"
diff --git a/tools/testing/selftests/bpf/prog_tests/fexit_stress.c b/tools/testing/selftests/bpf/prog_tests/fexit_stress.c
index 49b1ffc9af1f..14c91b6f1e83 100644
--- a/tools/testing/selftests/bpf/prog_tests/fexit_stress.c
+++ b/tools/testing/selftests/bpf/prog_tests/fexit_stress.c
@@ -1,6 +1,7 @@
// SPDX-License-Identifier: GPL-2.0
/* Copyright (c) 2019 Facebook */
#include <test_progs.h>
+#include "bpf_util.h"
void serial_test_fexit_stress(void)
{
@@ -36,7 +37,7 @@ void serial_test_fexit_stress(void)
for (i = 0; i < bpf_max_tramp_links; i++) {
fexit_fd[i] = bpf_prog_load(BPF_PROG_TYPE_TRACING, NULL, "GPL",
trace_program,
- sizeof(trace_program) / sizeof(struct bpf_insn),
+ ARRAY_SIZE(trace_program),
&trace_opts);
if (!ASSERT_GE(fexit_fd[i], 0, "fexit load"))
goto out;
diff --git a/tools/testing/selftests/bpf/prog_tests/flow_dissector.c b/tools/testing/selftests/bpf/prog_tests/flow_dissector.c
index 6b3078dd5645..cfcc90cb7ffb 100644
--- a/tools/testing/selftests/bpf/prog_tests/flow_dissector.c
+++ b/tools/testing/selftests/bpf/prog_tests/flow_dissector.c
@@ -1,7 +1,7 @@
// SPDX-License-Identifier: GPL-2.0
+#define _GNU_SOURCE
#include <test_progs.h>
#include <network_helpers.h>
-#include <error.h>
#include <linux/if_tun.h>
#include <sys/uio.h>
diff --git a/tools/testing/selftests/bpf/prog_tests/fs_kfuncs.c b/tools/testing/selftests/bpf/prog_tests/fs_kfuncs.c
index 37056ba73847..5a0b51157451 100644
--- a/tools/testing/selftests/bpf/prog_tests/fs_kfuncs.c
+++ b/tools/testing/selftests/bpf/prog_tests/fs_kfuncs.c
@@ -16,6 +16,7 @@ static void test_xattr(void)
{
struct test_get_xattr *skel = NULL;
int fd = -1, err;
+ int v[32];
fd = open(testfile, O_CREAT | O_RDONLY, 0644);
if (!ASSERT_GE(fd, 0, "create_file"))
@@ -50,7 +51,13 @@ static void test_xattr(void)
if (!ASSERT_GE(fd, 0, "open_file"))
goto out;
- ASSERT_EQ(skel->bss->found_xattr, 1, "found_xattr");
+ ASSERT_EQ(skel->bss->found_xattr_from_file, 1, "found_xattr_from_file");
+
+ /* Trigger security_inode_getxattr */
+ err = getxattr(testfile, "user.kfuncs", v, sizeof(v));
+ ASSERT_EQ(err, -1, "getxattr_return");
+ ASSERT_EQ(errno, EINVAL, "getxattr_errno");
+ ASSERT_EQ(skel->bss->found_xattr_from_dentry, 1, "found_xattr_from_dentry");
out:
close(fd);
diff --git a/tools/testing/selftests/bpf/prog_tests/iters.c b/tools/testing/selftests/bpf/prog_tests/iters.c
index 3c440370c1f0..89ff23c4a8bc 100644
--- a/tools/testing/selftests/bpf/prog_tests/iters.c
+++ b/tools/testing/selftests/bpf/prog_tests/iters.c
@@ -14,6 +14,7 @@
#include "iters_state_safety.skel.h"
#include "iters_looping.skel.h"
#include "iters_num.skel.h"
+#include "iters_testmod.skel.h"
#include "iters_testmod_seq.skel.h"
#include "iters_task_vma.skel.h"
#include "iters_task.skel.h"
@@ -297,8 +298,10 @@ void test_iters(void)
RUN_TESTS(iters);
RUN_TESTS(iters_css_task);
- if (env.has_testmod)
+ if (env.has_testmod) {
+ RUN_TESTS(iters_testmod);
RUN_TESTS(iters_testmod_seq);
+ }
if (test__start_subtest("num"))
subtest_num_iters();
diff --git a/tools/testing/selftests/bpf/prog_tests/kfree_skb.c b/tools/testing/selftests/bpf/prog_tests/kfree_skb.c
index c07991544a78..34f8822fd221 100644
--- a/tools/testing/selftests/bpf/prog_tests/kfree_skb.c
+++ b/tools/testing/selftests/bpf/prog_tests/kfree_skb.c
@@ -1,4 +1,5 @@
// SPDX-License-Identifier: GPL-2.0
+#define _GNU_SOURCE
#include <test_progs.h>
#include <network_helpers.h>
#include "kfree_skb.skel.h"
diff --git a/tools/testing/selftests/bpf/prog_tests/kfunc_call.c b/tools/testing/selftests/bpf/prog_tests/kfunc_call.c
index 5b743212292f..f79c8e53cb3e 100644
--- a/tools/testing/selftests/bpf/prog_tests/kfunc_call.c
+++ b/tools/testing/selftests/bpf/prog_tests/kfunc_call.c
@@ -68,6 +68,7 @@ static struct kfunc_test_params kfunc_tests[] = {
TC_FAIL(kfunc_call_test_get_mem_fail_oob, 0, "min value is outside of the allowed memory range"),
TC_FAIL(kfunc_call_test_get_mem_fail_not_const, 0, "is not a const"),
TC_FAIL(kfunc_call_test_mem_acquire_fail, 0, "acquire kernel function does not return PTR_TO_BTF_ID"),
+ TC_FAIL(kfunc_call_test_pointer_arg_type_mismatch, 0, "arg#0 expected pointer to ctx, but got scalar"),
/* success cases */
TC_TEST(kfunc_call_test1, 12),
diff --git a/tools/testing/selftests/bpf/prog_tests/log_buf.c b/tools/testing/selftests/bpf/prog_tests/log_buf.c
index 0f7ea4d7d9f6..27676a04d0b6 100644
--- a/tools/testing/selftests/bpf/prog_tests/log_buf.c
+++ b/tools/testing/selftests/bpf/prog_tests/log_buf.c
@@ -5,6 +5,7 @@
#include <bpf/btf.h>
#include "test_log_buf.skel.h"
+#include "bpf_util.h"
static size_t libbpf_log_pos;
static char libbpf_log_buf[1024 * 1024];
@@ -143,11 +144,11 @@ static void bpf_prog_load_log_buf(void)
BPF_MOV64_IMM(BPF_REG_0, 0),
BPF_EXIT_INSN(),
};
- const size_t good_prog_insn_cnt = sizeof(good_prog_insns) / sizeof(struct bpf_insn);
+ const size_t good_prog_insn_cnt = ARRAY_SIZE(good_prog_insns);
const struct bpf_insn bad_prog_insns[] = {
BPF_EXIT_INSN(),
};
- size_t bad_prog_insn_cnt = sizeof(bad_prog_insns) / sizeof(struct bpf_insn);
+ size_t bad_prog_insn_cnt = ARRAY_SIZE(bad_prog_insns);
LIBBPF_OPTS(bpf_prog_load_opts, opts);
const size_t log_buf_sz = 1024 * 1024;
char *log_buf;
@@ -159,7 +160,7 @@ static void bpf_prog_load_log_buf(void)
opts.log_buf = log_buf;
opts.log_size = log_buf_sz;
- /* with log_level == 0 log_buf shoud stay empty for good prog */
+ /* with log_level == 0 log_buf should stay empty for good prog */
log_buf[0] = '\0';
opts.log_level = 0;
fd = bpf_prog_load(BPF_PROG_TYPE_SOCKET_FILTER, "good_prog", "GPL",
@@ -221,7 +222,7 @@ static void bpf_btf_load_log_buf(void)
opts.log_buf = log_buf;
opts.log_size = log_buf_sz;
- /* with log_level == 0 log_buf shoud stay empty for good BTF */
+ /* with log_level == 0 log_buf should stay empty for good BTF */
log_buf[0] = '\0';
opts.log_level = 0;
fd = bpf_btf_load(raw_btf_data, raw_btf_size, &opts);
diff --git a/tools/testing/selftests/bpf/prog_tests/lwt_redirect.c b/tools/testing/selftests/bpf/prog_tests/lwt_redirect.c
index 835a1d756c16..b6e8d822e8e9 100644
--- a/tools/testing/selftests/bpf/prog_tests/lwt_redirect.c
+++ b/tools/testing/selftests/bpf/prog_tests/lwt_redirect.c
@@ -47,7 +47,6 @@
#include <linux/if_ether.h>
#include <linux/if_packet.h>
#include <linux/if_tun.h>
-#include <linux/icmp.h>
#include <arpa/inet.h>
#include <unistd.h>
#include <errno.h>
diff --git a/tools/testing/selftests/bpf/prog_tests/lwt_reroute.c b/tools/testing/selftests/bpf/prog_tests/lwt_reroute.c
index 03825d2b45a8..6c50c0f63f43 100644
--- a/tools/testing/selftests/bpf/prog_tests/lwt_reroute.c
+++ b/tools/testing/selftests/bpf/prog_tests/lwt_reroute.c
@@ -49,6 +49,7 @@
* is not crashed, it is considered successful.
*/
#define NETNS "ns_lwt_reroute"
+#include <netinet/in.h>
#include "lwt_helpers.h"
#include "network_helpers.h"
#include <linux/net_tstamp.h>
diff --git a/tools/testing/selftests/bpf/prog_tests/module_fentry_shadow.c b/tools/testing/selftests/bpf/prog_tests/module_fentry_shadow.c
index aa9f67eb1c95..bea05f78de5f 100644
--- a/tools/testing/selftests/bpf/prog_tests/module_fentry_shadow.c
+++ b/tools/testing/selftests/bpf/prog_tests/module_fentry_shadow.c
@@ -4,6 +4,7 @@
#include <bpf/btf.h>
#include "bpf/libbpf_internal.h"
#include "cgroup_helpers.h"
+#include "bpf_util.h"
static const char *module_name = "bpf_testmod";
static const char *symbol_name = "bpf_fentry_shadow_test";
@@ -100,7 +101,7 @@ void test_module_fentry_shadow(void)
load_opts.attach_btf_obj_fd = btf_fd[i];
prog_fd[i] = bpf_prog_load(BPF_PROG_TYPE_TRACING, NULL, "GPL",
trace_program,
- sizeof(trace_program) / sizeof(struct bpf_insn),
+ ARRAY_SIZE(trace_program),
&load_opts);
if (!ASSERT_GE(prog_fd[i], 0, "bpf_prog_load"))
goto out;
diff --git a/tools/testing/selftests/bpf/prog_tests/nested_trust.c b/tools/testing/selftests/bpf/prog_tests/nested_trust.c
index 39886f58924e..54a112ad5f9c 100644
--- a/tools/testing/selftests/bpf/prog_tests/nested_trust.c
+++ b/tools/testing/selftests/bpf/prog_tests/nested_trust.c
@@ -4,9 +4,13 @@
#include <test_progs.h>
#include "nested_trust_failure.skel.h"
#include "nested_trust_success.skel.h"
+#include "nested_acquire.skel.h"
void test_nested_trust(void)
{
RUN_TESTS(nested_trust_success);
RUN_TESTS(nested_trust_failure);
+
+ if (env.has_testmod)
+ RUN_TESTS(nested_acquire);
}
diff --git a/tools/testing/selftests/bpf/prog_tests/ns_current_pid_tgid.c b/tools/testing/selftests/bpf/prog_tests/ns_current_pid_tgid.c
index e72d75d6baa7..c29787e092d6 100644
--- a/tools/testing/selftests/bpf/prog_tests/ns_current_pid_tgid.c
+++ b/tools/testing/selftests/bpf/prog_tests/ns_current_pid_tgid.c
@@ -11,7 +11,7 @@
#include <sched.h>
#include <sys/wait.h>
#include <sys/mount.h>
-#include <sys/fcntl.h>
+#include <fcntl.h>
#include "network_helpers.h"
#define STACK_SIZE (1024 * 1024)
diff --git a/tools/testing/selftests/bpf/prog_tests/parse_tcp_hdr_opt.c b/tools/testing/selftests/bpf/prog_tests/parse_tcp_hdr_opt.c
index daa952711d8f..e9c07d561ded 100644
--- a/tools/testing/selftests/bpf/prog_tests/parse_tcp_hdr_opt.c
+++ b/tools/testing/selftests/bpf/prog_tests/parse_tcp_hdr_opt.c
@@ -1,5 +1,6 @@
// SPDX-License-Identifier: GPL-2.0
+#define _GNU_SOURCE
#include <test_progs.h>
#include <network_helpers.h>
#include "test_parse_tcp_hdr_opt.skel.h"
diff --git a/tools/testing/selftests/bpf/prog_tests/pro_epilogue.c b/tools/testing/selftests/bpf/prog_tests/pro_epilogue.c
new file mode 100644
index 000000000000..509883e6823a
--- /dev/null
+++ b/tools/testing/selftests/bpf/prog_tests/pro_epilogue.c
@@ -0,0 +1,60 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/* Copyright (c) 2024 Meta Platforms, Inc. and affiliates. */
+
+#include <test_progs.h>
+#include "pro_epilogue.skel.h"
+#include "epilogue_tailcall.skel.h"
+#include "pro_epilogue_goto_start.skel.h"
+#include "epilogue_exit.skel.h"
+
+struct st_ops_args {
+ __u64 a;
+};
+
+static void test_tailcall(void)
+{
+ LIBBPF_OPTS(bpf_test_run_opts, topts);
+ struct epilogue_tailcall *skel;
+ struct st_ops_args args;
+ int err, prog_fd;
+
+ skel = epilogue_tailcall__open_and_load();
+ if (!ASSERT_OK_PTR(skel, "epilogue_tailcall__open_and_load"))
+ return;
+
+ topts.ctx_in = &args;
+ topts.ctx_size_in = sizeof(args);
+
+ skel->links.epilogue_tailcall =
+ bpf_map__attach_struct_ops(skel->maps.epilogue_tailcall);
+ if (!ASSERT_OK_PTR(skel->links.epilogue_tailcall, "attach_struct_ops"))
+ goto done;
+
+ /* Both test_epilogue_tailcall and test_epilogue_subprog are
+ * patched with epilogue. When syscall_epilogue_tailcall()
+ * is run, test_epilogue_tailcall() is triggered.
+ * It executes a tail call and control is transferred to
+ * test_epilogue_subprog(). Only test_epilogue_subprog()
+ * does args->a += 1, thus final args.a value of 10001
+ * guarantees that only the epilogue of the
+ * test_epilogue_subprog is executed.
+ */
+ memset(&args, 0, sizeof(args));
+ prog_fd = bpf_program__fd(skel->progs.syscall_epilogue_tailcall);
+ err = bpf_prog_test_run_opts(prog_fd, &topts);
+ ASSERT_OK(err, "bpf_prog_test_run_opts");
+ ASSERT_EQ(args.a, 10001, "args.a");
+ ASSERT_EQ(topts.retval, 10001 * 2, "topts.retval");
+
+done:
+ epilogue_tailcall__destroy(skel);
+}
+
+void test_pro_epilogue(void)
+{
+ RUN_TESTS(pro_epilogue);
+ RUN_TESTS(pro_epilogue_goto_start);
+ RUN_TESTS(epilogue_exit);
+ if (test__start_subtest("tailcall"))
+ test_tailcall();
+}
diff --git a/tools/testing/selftests/bpf/prog_tests/raw_tp_writable_reject_nbd_invalid.c b/tools/testing/selftests/bpf/prog_tests/raw_tp_writable_reject_nbd_invalid.c
index e2f1445b0e10..216b0dfac0fe 100644
--- a/tools/testing/selftests/bpf/prog_tests/raw_tp_writable_reject_nbd_invalid.c
+++ b/tools/testing/selftests/bpf/prog_tests/raw_tp_writable_reject_nbd_invalid.c
@@ -2,6 +2,7 @@
#include <test_progs.h>
#include <linux/nbd.h>
+#include "bpf_util.h"
void test_raw_tp_writable_reject_nbd_invalid(void)
{
@@ -25,7 +26,7 @@ void test_raw_tp_writable_reject_nbd_invalid(void)
);
bpf_fd = bpf_prog_load(BPF_PROG_TYPE_RAW_TRACEPOINT_WRITABLE, NULL, "GPL v2",
- program, sizeof(program) / sizeof(struct bpf_insn),
+ program, ARRAY_SIZE(program),
&opts);
if (CHECK(bpf_fd < 0, "bpf_raw_tracepoint_writable load",
"failed: %d errno %d\n", bpf_fd, errno))
diff --git a/tools/testing/selftests/bpf/prog_tests/raw_tp_writable_test_run.c b/tools/testing/selftests/bpf/prog_tests/raw_tp_writable_test_run.c
index f4aa7dab4766..e3668058b7bb 100644
--- a/tools/testing/selftests/bpf/prog_tests/raw_tp_writable_test_run.c
+++ b/tools/testing/selftests/bpf/prog_tests/raw_tp_writable_test_run.c
@@ -2,6 +2,7 @@
#include <test_progs.h>
#include <linux/nbd.h>
+#include "bpf_util.h"
/* NOTE: conflict with other tests. */
void serial_test_raw_tp_writable_test_run(void)
@@ -24,7 +25,7 @@ void serial_test_raw_tp_writable_test_run(void)
);
int bpf_fd = bpf_prog_load(BPF_PROG_TYPE_RAW_TRACEPOINT_WRITABLE, NULL, "GPL v2",
- trace_program, sizeof(trace_program) / sizeof(struct bpf_insn),
+ trace_program, ARRAY_SIZE(trace_program),
&trace_opts);
if (CHECK(bpf_fd < 0, "bpf_raw_tracepoint_writable loaded",
"failed: %d errno %d\n", bpf_fd, errno))
@@ -41,7 +42,7 @@ void serial_test_raw_tp_writable_test_run(void)
);
int filter_fd = bpf_prog_load(BPF_PROG_TYPE_SOCKET_FILTER, NULL, "GPL v2",
- skb_program, sizeof(skb_program) / sizeof(struct bpf_insn),
+ skb_program, ARRAY_SIZE(skb_program),
&skb_opts);
if (CHECK(filter_fd < 0, "test_program_loaded", "failed: %d errno %d\n",
filter_fd, errno))
diff --git a/tools/testing/selftests/bpf/prog_tests/read_vsyscall.c b/tools/testing/selftests/bpf/prog_tests/read_vsyscall.c
index 3405923fe4e6..c7b9ba8b1d06 100644
--- a/tools/testing/selftests/bpf/prog_tests/read_vsyscall.c
+++ b/tools/testing/selftests/bpf/prog_tests/read_vsyscall.c
@@ -23,6 +23,7 @@ struct read_ret_desc {
{ .name = "probe_read_user_str", .ret = -EFAULT },
{ .name = "copy_from_user", .ret = -EFAULT },
{ .name = "copy_from_user_task", .ret = -EFAULT },
+ { .name = "copy_from_user_str", .ret = -EFAULT },
};
void test_read_vsyscall(void)
diff --git a/tools/testing/selftests/bpf/prog_tests/reg_bounds.c b/tools/testing/selftests/bpf/prog_tests/reg_bounds.c
index eb74363f9f70..39d42271cc46 100644
--- a/tools/testing/selftests/bpf/prog_tests/reg_bounds.c
+++ b/tools/testing/selftests/bpf/prog_tests/reg_bounds.c
@@ -433,6 +433,19 @@ static struct range range_refine(enum num_t x_t, struct range x, enum num_t y_t,
y_cast = range_cast(y_t, x_t, y);
+ /* If we know that
+ * - *x* is in the range of signed 32bit value, and
+ * - *y_cast* range is 32-bit signed non-negative
+ * then *x* range can be improved with *y_cast* such that *x* range
+ * is 32-bit signed non-negative. Otherwise, if the new range for *x*
+ * allows upper 32-bit * 0xffffffff then the eventual new range for
+ * *x* will be out of signed 32-bit range which violates the origin
+ * *x* range.
+ */
+ if (x_t == S64 && y_t == S32 && y_cast.a <= S32_MAX && y_cast.b <= S32_MAX &&
+ (s64)x.a >= S32_MIN && (s64)x.b <= S32_MAX)
+ return range_improve(x_t, x, y_cast);
+
/* the case when new range knowledge, *y*, is a 32-bit subregister
* range, while previous range knowledge, *x*, is a full register
* 64-bit range, needs special treatment to take into account upper 32
@@ -490,7 +503,7 @@ static const char *op_str(enum op op)
/* Can register with range [x.a, x.b] *EVER* satisfy
* OP (<, <=, >, >=, ==, !=) relation to
- * a regsiter with range [y.a, y.b]
+ * a register with range [y.a, y.b]
* _in *num_t* domain_
*/
static bool range_canbe_op(enum num_t t, struct range x, struct range y, enum op op)
@@ -519,7 +532,7 @@ static bool range_canbe_op(enum num_t t, struct range x, struct range y, enum op
/* Does register with range [x.a, x.b] *ALWAYS* satisfy
* OP (<, <=, >, >=, ==, !=) relation to
- * a regsiter with range [y.a, y.b]
+ * a register with range [y.a, y.b]
* _in *num_t* domain_
*/
static bool range_always_op(enum num_t t, struct range x, struct range y, enum op op)
@@ -530,7 +543,7 @@ static bool range_always_op(enum num_t t, struct range x, struct range y, enum o
/* Does register with range [x.a, x.b] *NEVER* satisfy
* OP (<, <=, >, >=, ==, !=) relation to
- * a regsiter with range [y.a, y.b]
+ * a register with range [y.a, y.b]
* _in *num_t* domain_
*/
static bool range_never_op(enum num_t t, struct range x, struct range y, enum op op)
@@ -1005,11 +1018,11 @@ static int parse_reg_state(const char *s, struct reg_state *reg)
* - umin=%llu, if missing, assumed 0;
* - umax=%llu, if missing, assumed U64_MAX;
* - smin=%lld, if missing, assumed S64_MIN;
- * - smax=%lld, if missing, assummed S64_MAX;
+ * - smax=%lld, if missing, assumed S64_MAX;
* - umin32=%d, if missing, assumed 0;
* - umax32=%d, if missing, assumed U32_MAX;
* - smin32=%d, if missing, assumed S32_MIN;
- * - smax32=%d, if missing, assummed S32_MAX;
+ * - smax32=%d, if missing, assumed S32_MAX;
* - var_off=(%#llx; %#llx), tnum part, we don't care about it.
*
* If some of the values are equal, they will be grouped (but min/max
@@ -1474,7 +1487,7 @@ static int verify_case_opt(struct ctx *ctx, enum num_t init_t, enum num_t cond_t
u64 elapsed_ns = get_time_ns() - ctx->start_ns;
double remain_ns = elapsed_ns / progress * (1 - progress);
- fprintf(env.stderr, "PROGRESS (%s): %d/%d (%.2lf%%), "
+ fprintf(env.stderr_saved, "PROGRESS (%s): %d/%d (%.2lf%%), "
"elapsed %llu mins (%.2lf hrs), "
"ETA %.0lf mins (%.2lf hrs)\n",
ctx->progress_ctx,
@@ -1871,7 +1884,7 @@ cleanup:
* envvar is not set, this test is skipped during test_progs testing.
*
* We split this up into smaller subsets based on initialization and
- * conditiona numeric domains to get an easy parallelization with test_progs'
+ * conditional numeric domains to get an easy parallelization with test_progs'
* -j argument.
*/
@@ -1925,7 +1938,7 @@ static u64 rand_u64()
{
/* RAND_MAX is guaranteed to be at least 1<<15, but in practice it
* seems to be 1<<31, so we need to call it thrice to get full u64;
- * we'll use rougly equal split: 22 + 21 + 21 bits
+ * we'll use roughly equal split: 22 + 21 + 21 bits
*/
return ((u64)random() << 42) |
(((u64)random() & RAND_21BIT_MASK) << 21) |
@@ -2108,6 +2121,9 @@ static struct subtest_case crafted_cases[] = {
{S32, U32, {(u32)S32_MIN, 0}, {0, 0}},
{S32, U32, {(u32)S32_MIN, 0}, {(u32)S32_MIN, (u32)S32_MIN}},
{S32, U32, {(u32)S32_MIN, S32_MAX}, {S32_MAX, S32_MAX}},
+ {S64, U32, {0x0, 0x1f}, {0xffffffff80000000ULL, 0x000000007fffffffULL}},
+ {S64, U32, {0x0, 0x1f}, {0xffffffffffff8000ULL, 0x0000000000007fffULL}},
+ {S64, U32, {0x0, 0x1f}, {0xffffffffffffff80ULL, 0x000000000000007fULL}},
};
/* Go over crafted hard-coded cases. This is fast, so we do it as part of
diff --git a/tools/testing/selftests/bpf/prog_tests/resolve_btfids.c b/tools/testing/selftests/bpf/prog_tests/resolve_btfids.c
index f81d08d429a2..51544372f52e 100644
--- a/tools/testing/selftests/bpf/prog_tests/resolve_btfids.c
+++ b/tools/testing/selftests/bpf/prog_tests/resolve_btfids.c
@@ -103,7 +103,7 @@ static int resolve_symbols(void)
btf = btf__parse_elf("btf_data.bpf.o", NULL);
if (CHECK(libbpf_get_error(btf), "resolve",
- "Failed to load BTF from btf_data.o\n"))
+ "Failed to load BTF from btf_data.bpf.o\n"))
return -1;
nr = btf__type_cnt(btf);
diff --git a/tools/testing/selftests/bpf/prog_tests/select_reuseport.c b/tools/testing/selftests/bpf/prog_tests/select_reuseport.c
index 64c5f5eb2994..036d4760d2c1 100644
--- a/tools/testing/selftests/bpf/prog_tests/select_reuseport.c
+++ b/tools/testing/selftests/bpf/prog_tests/select_reuseport.c
@@ -37,9 +37,7 @@ static int sk_fds[REUSEPORT_ARRAY_SIZE];
static int reuseport_array = -1, outer_map = -1;
static enum bpf_map_type inner_map_type;
static int select_by_skb_data_prog;
-static int saved_tcp_syncookie = -1;
static struct bpf_object *obj;
-static int saved_tcp_fo = -1;
static __u32 index_zero;
static int epfd;
@@ -193,14 +191,6 @@ static int write_int_sysctl(const char *sysctl, int v)
return 0;
}
-static void restore_sysctls(void)
-{
- if (saved_tcp_fo != -1)
- write_int_sysctl(TCP_FO_SYSCTL, saved_tcp_fo);
- if (saved_tcp_syncookie != -1)
- write_int_sysctl(TCP_SYNCOOKIE_SYSCTL, saved_tcp_syncookie);
-}
-
static int enable_fastopen(void)
{
int fo;
@@ -793,6 +783,7 @@ static void test_config(int sotype, sa_family_t family, bool inany)
TEST_INIT(test_pass_on_err),
TEST_INIT(test_detach_bpf),
};
+ struct netns_obj *netns;
char s[MAX_TEST_NAME];
const struct test *t;
@@ -808,9 +799,21 @@ static void test_config(int sotype, sa_family_t family, bool inany)
if (!test__start_subtest(s))
continue;
+ netns = netns_new("select_reuseport", true);
+ if (!ASSERT_OK_PTR(netns, "netns_new"))
+ continue;
+
+ if (CHECK_FAIL(enable_fastopen()))
+ goto out;
+ if (CHECK_FAIL(disable_syncookie()))
+ goto out;
+
setup_per_test(sotype, family, inany, t->no_inner_map);
t->fn(sotype, family);
cleanup_per_test(t->no_inner_map);
+
+out:
+ netns_free(netns);
}
}
@@ -850,21 +853,7 @@ out:
void serial_test_select_reuseport(void)
{
- saved_tcp_fo = read_int_sysctl(TCP_FO_SYSCTL);
- if (saved_tcp_fo < 0)
- goto out;
- saved_tcp_syncookie = read_int_sysctl(TCP_SYNCOOKIE_SYSCTL);
- if (saved_tcp_syncookie < 0)
- goto out;
-
- if (enable_fastopen())
- goto out;
- if (disable_syncookie())
- goto out;
-
test_map_type(BPF_MAP_TYPE_REUSEPORT_SOCKARRAY);
test_map_type(BPF_MAP_TYPE_SOCKMAP);
test_map_type(BPF_MAP_TYPE_SOCKHASH);
-out:
- restore_sysctls();
}
diff --git a/tools/testing/selftests/bpf/prog_tests/sk_lookup.c b/tools/testing/selftests/bpf/prog_tests/sk_lookup.c
index ae87c00867ba..023c31bde229 100644
--- a/tools/testing/selftests/bpf/prog_tests/sk_lookup.c
+++ b/tools/testing/selftests/bpf/prog_tests/sk_lookup.c
@@ -18,7 +18,6 @@
#include <arpa/inet.h>
#include <assert.h>
#include <errno.h>
-#include <error.h>
#include <fcntl.h>
#include <sched.h>
#include <stdio.h>
@@ -47,8 +46,6 @@
#define INT_IP6 "fd00::2"
#define INT_PORT 8008
-#define IO_TIMEOUT_SEC 3
-
enum server {
SERVER_A = 0,
SERVER_B = 1,
@@ -108,46 +105,6 @@ static int attach_reuseport(int sock_fd, struct bpf_program *reuseport_prog)
return 0;
}
-static socklen_t inetaddr_len(const struct sockaddr_storage *addr)
-{
- return (addr->ss_family == AF_INET ? sizeof(struct sockaddr_in) :
- addr->ss_family == AF_INET6 ? sizeof(struct sockaddr_in6) : 0);
-}
-
-static int make_socket(int sotype, const char *ip, int port,
- struct sockaddr_storage *addr)
-{
- struct timeval timeo = { .tv_sec = IO_TIMEOUT_SEC };
- int err, family, fd;
-
- family = is_ipv6(ip) ? AF_INET6 : AF_INET;
- err = make_sockaddr(family, ip, port, addr, NULL);
- if (CHECK(err, "make_address", "failed\n"))
- return -1;
-
- fd = socket(addr->ss_family, sotype, 0);
- if (CHECK(fd < 0, "socket", "failed\n")) {
- log_err("failed to make socket");
- return -1;
- }
-
- err = setsockopt(fd, SOL_SOCKET, SO_SNDTIMEO, &timeo, sizeof(timeo));
- if (CHECK(err, "setsockopt(SO_SNDTIMEO)", "failed\n")) {
- log_err("failed to set SNDTIMEO");
- close(fd);
- return -1;
- }
-
- err = setsockopt(fd, SOL_SOCKET, SO_RCVTIMEO, &timeo, sizeof(timeo));
- if (CHECK(err, "setsockopt(SO_RCVTIMEO)", "failed\n")) {
- log_err("failed to set RCVTIMEO");
- close(fd);
- return -1;
- }
-
- return fd;
-}
-
static int setsockopts(int fd, void *opts)
{
struct cb_opts *co = (struct cb_opts *)opts;
@@ -229,27 +186,6 @@ fail:
return -1;
}
-static int make_client(int sotype, const char *ip, int port)
-{
- struct sockaddr_storage addr = {0};
- int err, fd;
-
- fd = make_socket(sotype, ip, port, &addr);
- if (fd < 0)
- return -1;
-
- err = connect(fd, (void *)&addr, inetaddr_len(&addr));
- if (CHECK(err, "make_client", "connect")) {
- log_err("failed to connect client socket");
- goto fail;
- }
-
- return fd;
-fail:
- close(fd);
- return -1;
-}
-
static __u64 socket_cookie(int fd)
{
__u64 cookie;
@@ -646,8 +582,9 @@ static void run_lookup_prog(const struct test *t)
goto close;
}
- client_fd = make_client(t->sotype, t->connect_to.ip, t->connect_to.port);
- if (client_fd < 0)
+ client_fd = connect_to_addr_str(is_ipv6(t->connect_to.ip) ? AF_INET6 : AF_INET,
+ t->sotype, t->connect_to.ip, t->connect_to.port, NULL);
+ if (!ASSERT_OK_FD(client_fd, "connect_to_addr_str"))
goto close;
if (t->sotype == SOCK_STREAM)
@@ -862,9 +799,11 @@ static void test_redirect_lookup(struct test_sk_lookup *skel)
static void drop_on_lookup(const struct test *t)
{
+ int family = is_ipv6(t->connect_to.ip) ? AF_INET6 : AF_INET;
struct sockaddr_storage dst = {};
int client_fd, server_fd, err;
struct bpf_link *lookup_link;
+ socklen_t len;
ssize_t n;
lookup_link = attach_lookup_prog(t->lookup_prog);
@@ -876,12 +815,14 @@ static void drop_on_lookup(const struct test *t)
if (server_fd < 0)
goto detach;
- client_fd = make_socket(t->sotype, t->connect_to.ip,
- t->connect_to.port, &dst);
- if (client_fd < 0)
+ client_fd = client_socket(family, t->sotype, NULL);
+ if (!ASSERT_OK_FD(client_fd, "client_socket"))
goto close_srv;
- err = connect(client_fd, (void *)&dst, inetaddr_len(&dst));
+ err = make_sockaddr(family, t->connect_to.ip, t->connect_to.port, &dst, &len);
+ if (!ASSERT_OK(err, "make_sockaddr"))
+ goto close_all;
+ err = connect(client_fd, (void *)&dst, len);
if (t->sotype == SOCK_DGRAM) {
err = send_byte(client_fd);
if (err)
@@ -976,9 +917,11 @@ static void test_drop_on_lookup(struct test_sk_lookup *skel)
static void drop_on_reuseport(const struct test *t)
{
+ int family = is_ipv6(t->connect_to.ip) ? AF_INET6 : AF_INET;
struct sockaddr_storage dst = { 0 };
int client, server1, server2, err;
struct bpf_link *lookup_link;
+ socklen_t len;
ssize_t n;
lookup_link = attach_lookup_prog(t->lookup_prog);
@@ -1000,12 +943,14 @@ static void drop_on_reuseport(const struct test *t)
if (server2 < 0)
goto close_srv1;
- client = make_socket(t->sotype, t->connect_to.ip,
- t->connect_to.port, &dst);
- if (client < 0)
+ client = client_socket(family, t->sotype, NULL);
+ if (!ASSERT_OK_FD(client, "client_socket"))
goto close_srv2;
- err = connect(client, (void *)&dst, inetaddr_len(&dst));
+ err = make_sockaddr(family, t->connect_to.ip, t->connect_to.port, &dst, &len);
+ if (!ASSERT_OK(err, "make_sockaddr"))
+ goto close_all;
+ err = connect(client, (void *)&dst, len);
if (t->sotype == SOCK_DGRAM) {
err = send_byte(client);
if (err)
@@ -1152,8 +1097,8 @@ static void run_sk_assign_connected(struct test_sk_lookup *skel,
if (server_fd < 0)
return;
- connected_fd = make_client(sotype, EXT_IP4, EXT_PORT);
- if (connected_fd < 0)
+ connected_fd = connect_to_addr_str(AF_INET, sotype, EXT_IP4, EXT_PORT, NULL);
+ if (!ASSERT_OK_FD(connected_fd, "connect_to_addr_str"))
goto out_close_server;
/* Put a connected socket in redirect map */
@@ -1166,8 +1111,8 @@ static void run_sk_assign_connected(struct test_sk_lookup *skel,
goto out_close_connected;
/* Try to redirect TCP SYN / UDP packet to a connected socket */
- client_fd = make_client(sotype, EXT_IP4, EXT_PORT);
- if (client_fd < 0)
+ client_fd = connect_to_addr_str(AF_INET, sotype, EXT_IP4, EXT_PORT, NULL);
+ if (!ASSERT_OK_FD(client_fd, "connect_to_addr_str"))
goto out_unlink_prog;
if (sotype == SOCK_DGRAM) {
send_byte(client_fd);
@@ -1219,6 +1164,7 @@ static void run_multi_prog_lookup(const struct test_multi_prog *t)
int map_fd, server_fd, client_fd;
struct bpf_link *link1, *link2;
int prog_idx, done, err;
+ socklen_t len;
map_fd = bpf_map__fd(t->run_map);
@@ -1248,11 +1194,14 @@ static void run_multi_prog_lookup(const struct test_multi_prog *t)
if (err)
goto out_close_server;
- client_fd = make_socket(SOCK_STREAM, EXT_IP4, EXT_PORT, &dst);
- if (client_fd < 0)
+ client_fd = client_socket(AF_INET, SOCK_STREAM, NULL);
+ if (!ASSERT_OK_FD(client_fd, "client_socket"))
goto out_close_server;
- err = connect(client_fd, (void *)&dst, inetaddr_len(&dst));
+ err = make_sockaddr(AF_INET, EXT_IP4, EXT_PORT, &dst, &len);
+ if (!ASSERT_OK(err, "make_sockaddr"))
+ goto out_close_client;
+ err = connect(client_fd, (void *)&dst, len);
if (CHECK(err && !t->expect_errno, "connect",
"unexpected error %d\n", errno))
goto out_close_client;
diff --git a/tools/testing/selftests/bpf/prog_tests/sock_addr.c b/tools/testing/selftests/bpf/prog_tests/sock_addr.c
index b880c564a204..a6ee7f8d4f79 100644
--- a/tools/testing/selftests/bpf/prog_tests/sock_addr.c
+++ b/tools/testing/selftests/bpf/prog_tests/sock_addr.c
@@ -2642,6 +2642,7 @@ void test_sock_addr(void)
break;
default:
ASSERT_TRUE(false, "Unknown sock addr test type");
+ err = -EINVAL;
break;
}
diff --git a/tools/testing/selftests/bpf/prog_tests/sockmap_listen.c b/tools/testing/selftests/bpf/prog_tests/sockmap_listen.c
index da5a6fb03b69..4ee1148d22be 100644
--- a/tools/testing/selftests/bpf/prog_tests/sockmap_listen.c
+++ b/tools/testing/selftests/bpf/prog_tests/sockmap_listen.c
@@ -1836,6 +1836,7 @@ static void test_udp_unix_redir(struct test_sockmap_listen *skel, struct bpf_map
int family)
{
const char *family_name, *map_name;
+ struct netns_obj *netns;
char s[MAX_TEST_NAME];
family_name = family_str(family);
@@ -1843,8 +1844,15 @@ static void test_udp_unix_redir(struct test_sockmap_listen *skel, struct bpf_map
snprintf(s, sizeof(s), "%s %s %s", map_name, family_name, __func__);
if (!test__start_subtest(s))
return;
+
+ netns = netns_new("sockmap_listen", true);
+ if (!ASSERT_OK_PTR(netns, "netns_new"))
+ return;
+
inet_unix_skb_redir_to_connected(skel, map, family);
unix_inet_skb_redir_to_connected(skel, map, family);
+
+ netns_free(netns);
}
static void run_tests(struct test_sockmap_listen *skel, struct bpf_map *map,
diff --git a/tools/testing/selftests/bpf/prog_tests/tailcalls.c b/tools/testing/selftests/bpf/prog_tests/tailcalls.c
index 59993fc9c0d7..21c5a37846ad 100644
--- a/tools/testing/selftests/bpf/prog_tests/tailcalls.c
+++ b/tools/testing/selftests/bpf/prog_tests/tailcalls.c
@@ -3,7 +3,10 @@
#include <test_progs.h>
#include <network_helpers.h>
#include "tailcall_poke.skel.h"
-
+#include "tailcall_bpf2bpf_hierarchy2.skel.h"
+#include "tailcall_bpf2bpf_hierarchy3.skel.h"
+#include "tailcall_freplace.skel.h"
+#include "tc_bpf2bpf.skel.h"
/* test_tailcall_1 checks basic functionality by patching multiple locations
* in a single program for a single tail call slot with nop->jmp, jmp->nop
@@ -1187,6 +1190,372 @@ out:
tailcall_poke__destroy(call);
}
+static void test_tailcall_hierarchy_count(const char *which, bool test_fentry,
+ bool test_fexit,
+ bool test_fentry_entry)
+{
+ int err, map_fd, prog_fd, main_data_fd, fentry_data_fd, fexit_data_fd, i, val;
+ struct bpf_object *obj = NULL, *fentry_obj = NULL, *fexit_obj = NULL;
+ struct bpf_link *fentry_link = NULL, *fexit_link = NULL;
+ struct bpf_program *prog, *fentry_prog;
+ struct bpf_map *prog_array, *data_map;
+ int fentry_prog_fd;
+ char buff[128] = {};
+
+ LIBBPF_OPTS(bpf_test_run_opts, topts,
+ .data_in = buff,
+ .data_size_in = sizeof(buff),
+ .repeat = 1,
+ );
+
+ err = bpf_prog_test_load(which, BPF_PROG_TYPE_SCHED_CLS, &obj,
+ &prog_fd);
+ if (!ASSERT_OK(err, "load obj"))
+ return;
+
+ prog = bpf_object__find_program_by_name(obj, "entry");
+ if (!ASSERT_OK_PTR(prog, "find entry prog"))
+ goto out;
+
+ prog_fd = bpf_program__fd(prog);
+ if (!ASSERT_GE(prog_fd, 0, "prog_fd"))
+ goto out;
+
+ if (test_fentry_entry) {
+ fentry_obj = bpf_object__open_file("tailcall_bpf2bpf_hierarchy_fentry.bpf.o",
+ NULL);
+ if (!ASSERT_OK_PTR(fentry_obj, "open fentry_obj file"))
+ goto out;
+
+ fentry_prog = bpf_object__find_program_by_name(fentry_obj,
+ "fentry");
+ if (!ASSERT_OK_PTR(prog, "find fentry prog"))
+ goto out;
+
+ err = bpf_program__set_attach_target(fentry_prog, prog_fd,
+ "entry");
+ if (!ASSERT_OK(err, "set_attach_target entry"))
+ goto out;
+
+ err = bpf_object__load(fentry_obj);
+ if (!ASSERT_OK(err, "load fentry_obj"))
+ goto out;
+
+ fentry_link = bpf_program__attach_trace(fentry_prog);
+ if (!ASSERT_OK_PTR(fentry_link, "attach_trace"))
+ goto out;
+
+ fentry_prog_fd = bpf_program__fd(fentry_prog);
+ if (!ASSERT_GE(fentry_prog_fd, 0, "fentry_prog_fd"))
+ goto out;
+
+ prog_array = bpf_object__find_map_by_name(fentry_obj, "jmp_table");
+ if (!ASSERT_OK_PTR(prog_array, "find jmp_table"))
+ goto out;
+
+ map_fd = bpf_map__fd(prog_array);
+ if (!ASSERT_GE(map_fd, 0, "map_fd"))
+ goto out;
+
+ i = 0;
+ err = bpf_map_update_elem(map_fd, &i, &fentry_prog_fd, BPF_ANY);
+ if (!ASSERT_OK(err, "update jmp_table"))
+ goto out;
+
+ data_map = bpf_object__find_map_by_name(fentry_obj, ".bss");
+ if (!ASSERT_FALSE(!data_map || !bpf_map__is_internal(data_map),
+ "find data_map"))
+ goto out;
+
+ } else {
+ prog_array = bpf_object__find_map_by_name(obj, "jmp_table");
+ if (!ASSERT_OK_PTR(prog_array, "find jmp_table"))
+ goto out;
+
+ map_fd = bpf_map__fd(prog_array);
+ if (!ASSERT_GE(map_fd, 0, "map_fd"))
+ goto out;
+
+ i = 0;
+ err = bpf_map_update_elem(map_fd, &i, &prog_fd, BPF_ANY);
+ if (!ASSERT_OK(err, "update jmp_table"))
+ goto out;
+
+ data_map = bpf_object__find_map_by_name(obj, ".bss");
+ if (!ASSERT_FALSE(!data_map || !bpf_map__is_internal(data_map),
+ "find data_map"))
+ goto out;
+ }
+
+ if (test_fentry) {
+ fentry_obj = bpf_object__open_file("tailcall_bpf2bpf_fentry.bpf.o",
+ NULL);
+ if (!ASSERT_OK_PTR(fentry_obj, "open fentry_obj file"))
+ goto out;
+
+ prog = bpf_object__find_program_by_name(fentry_obj, "fentry");
+ if (!ASSERT_OK_PTR(prog, "find fentry prog"))
+ goto out;
+
+ err = bpf_program__set_attach_target(prog, prog_fd,
+ "subprog_tail");
+ if (!ASSERT_OK(err, "set_attach_target subprog_tail"))
+ goto out;
+
+ err = bpf_object__load(fentry_obj);
+ if (!ASSERT_OK(err, "load fentry_obj"))
+ goto out;
+
+ fentry_link = bpf_program__attach_trace(prog);
+ if (!ASSERT_OK_PTR(fentry_link, "attach_trace"))
+ goto out;
+ }
+
+ if (test_fexit) {
+ fexit_obj = bpf_object__open_file("tailcall_bpf2bpf_fexit.bpf.o",
+ NULL);
+ if (!ASSERT_OK_PTR(fexit_obj, "open fexit_obj file"))
+ goto out;
+
+ prog = bpf_object__find_program_by_name(fexit_obj, "fexit");
+ if (!ASSERT_OK_PTR(prog, "find fexit prog"))
+ goto out;
+
+ err = bpf_program__set_attach_target(prog, prog_fd,
+ "subprog_tail");
+ if (!ASSERT_OK(err, "set_attach_target subprog_tail"))
+ goto out;
+
+ err = bpf_object__load(fexit_obj);
+ if (!ASSERT_OK(err, "load fexit_obj"))
+ goto out;
+
+ fexit_link = bpf_program__attach_trace(prog);
+ if (!ASSERT_OK_PTR(fexit_link, "attach_trace"))
+ goto out;
+ }
+
+ err = bpf_prog_test_run_opts(prog_fd, &topts);
+ ASSERT_OK(err, "tailcall");
+ ASSERT_EQ(topts.retval, 1, "tailcall retval");
+
+ main_data_fd = bpf_map__fd(data_map);
+ if (!ASSERT_GE(main_data_fd, 0, "main_data_fd"))
+ goto out;
+
+ i = 0;
+ err = bpf_map_lookup_elem(main_data_fd, &i, &val);
+ ASSERT_OK(err, "tailcall count");
+ ASSERT_EQ(val, 34, "tailcall count");
+
+ if (test_fentry) {
+ data_map = bpf_object__find_map_by_name(fentry_obj, ".bss");
+ if (!ASSERT_FALSE(!data_map || !bpf_map__is_internal(data_map),
+ "find tailcall_bpf2bpf_fentry.bss map"))
+ goto out;
+
+ fentry_data_fd = bpf_map__fd(data_map);
+ if (!ASSERT_GE(fentry_data_fd, 0,
+ "find tailcall_bpf2bpf_fentry.bss map fd"))
+ goto out;
+
+ i = 0;
+ err = bpf_map_lookup_elem(fentry_data_fd, &i, &val);
+ ASSERT_OK(err, "fentry count");
+ ASSERT_EQ(val, 68, "fentry count");
+ }
+
+ if (test_fexit) {
+ data_map = bpf_object__find_map_by_name(fexit_obj, ".bss");
+ if (!ASSERT_FALSE(!data_map || !bpf_map__is_internal(data_map),
+ "find tailcall_bpf2bpf_fexit.bss map"))
+ goto out;
+
+ fexit_data_fd = bpf_map__fd(data_map);
+ if (!ASSERT_GE(fexit_data_fd, 0,
+ "find tailcall_bpf2bpf_fexit.bss map fd"))
+ goto out;
+
+ i = 0;
+ err = bpf_map_lookup_elem(fexit_data_fd, &i, &val);
+ ASSERT_OK(err, "fexit count");
+ ASSERT_EQ(val, 68, "fexit count");
+ }
+
+ i = 0;
+ err = bpf_map_delete_elem(map_fd, &i);
+ if (!ASSERT_OK(err, "delete_elem from jmp_table"))
+ goto out;
+
+ err = bpf_prog_test_run_opts(prog_fd, &topts);
+ ASSERT_OK(err, "tailcall");
+ ASSERT_EQ(topts.retval, 1, "tailcall retval");
+
+ i = 0;
+ err = bpf_map_lookup_elem(main_data_fd, &i, &val);
+ ASSERT_OK(err, "tailcall count");
+ ASSERT_EQ(val, 35, "tailcall count");
+
+ if (test_fentry) {
+ i = 0;
+ err = bpf_map_lookup_elem(fentry_data_fd, &i, &val);
+ ASSERT_OK(err, "fentry count");
+ ASSERT_EQ(val, 70, "fentry count");
+ }
+
+ if (test_fexit) {
+ i = 0;
+ err = bpf_map_lookup_elem(fexit_data_fd, &i, &val);
+ ASSERT_OK(err, "fexit count");
+ ASSERT_EQ(val, 70, "fexit count");
+ }
+
+out:
+ bpf_link__destroy(fentry_link);
+ bpf_link__destroy(fexit_link);
+ bpf_object__close(fentry_obj);
+ bpf_object__close(fexit_obj);
+ bpf_object__close(obj);
+}
+
+/* test_tailcall_bpf2bpf_hierarchy_1 checks that the count value of the tail
+ * call limit enforcement matches with expectations when tailcalls are preceded
+ * with two bpf2bpf calls.
+ *
+ * subprog --tailcall-> entry
+ * entry <
+ * subprog --tailcall-> entry
+ */
+static void test_tailcall_bpf2bpf_hierarchy_1(void)
+{
+ test_tailcall_hierarchy_count("tailcall_bpf2bpf_hierarchy1.bpf.o",
+ false, false, false);
+}
+
+/* test_tailcall_bpf2bpf_hierarchy_fentry checks that the count value of the
+ * tail call limit enforcement matches with expectations when tailcalls are
+ * preceded with two bpf2bpf calls, and the two subprogs are traced by fentry.
+ */
+static void test_tailcall_bpf2bpf_hierarchy_fentry(void)
+{
+ test_tailcall_hierarchy_count("tailcall_bpf2bpf_hierarchy1.bpf.o",
+ true, false, false);
+}
+
+/* test_tailcall_bpf2bpf_hierarchy_fexit checks that the count value of the tail
+ * call limit enforcement matches with expectations when tailcalls are preceded
+ * with two bpf2bpf calls, and the two subprogs are traced by fexit.
+ */
+static void test_tailcall_bpf2bpf_hierarchy_fexit(void)
+{
+ test_tailcall_hierarchy_count("tailcall_bpf2bpf_hierarchy1.bpf.o",
+ false, true, false);
+}
+
+/* test_tailcall_bpf2bpf_hierarchy_fentry_fexit checks that the count value of
+ * the tail call limit enforcement matches with expectations when tailcalls are
+ * preceded with two bpf2bpf calls, and the two subprogs are traced by both
+ * fentry and fexit.
+ */
+static void test_tailcall_bpf2bpf_hierarchy_fentry_fexit(void)
+{
+ test_tailcall_hierarchy_count("tailcall_bpf2bpf_hierarchy1.bpf.o",
+ true, true, false);
+}
+
+/* test_tailcall_bpf2bpf_hierarchy_fentry_entry checks that the count value of
+ * the tail call limit enforcement matches with expectations when tailcalls are
+ * preceded with two bpf2bpf calls in fentry.
+ */
+static void test_tailcall_bpf2bpf_hierarchy_fentry_entry(void)
+{
+ test_tailcall_hierarchy_count("tc_dummy.bpf.o", false, false, true);
+}
+
+/* test_tailcall_bpf2bpf_hierarchy_2 checks that the count value of the tail
+ * call limit enforcement matches with expectations:
+ *
+ * subprog_tail0 --tailcall-> classifier_0 -> subprog_tail0
+ * entry <
+ * subprog_tail1 --tailcall-> classifier_1 -> subprog_tail1
+ */
+static void test_tailcall_bpf2bpf_hierarchy_2(void)
+{
+ RUN_TESTS(tailcall_bpf2bpf_hierarchy2);
+}
+
+/* test_tailcall_bpf2bpf_hierarchy_3 checks that the count value of the tail
+ * call limit enforcement matches with expectations:
+ *
+ * subprog with jmp_table0 to classifier_0
+ * entry --tailcall-> classifier_0 <
+ * subprog with jmp_table1 to classifier_0
+ */
+static void test_tailcall_bpf2bpf_hierarchy_3(void)
+{
+ RUN_TESTS(tailcall_bpf2bpf_hierarchy3);
+}
+
+/* test_tailcall_freplace checks that the attached freplace prog is OK to
+ * update the prog_array map.
+ */
+static void test_tailcall_freplace(void)
+{
+ struct tailcall_freplace *freplace_skel = NULL;
+ struct bpf_link *freplace_link = NULL;
+ struct bpf_program *freplace_prog;
+ struct tc_bpf2bpf *tc_skel = NULL;
+ int prog_fd, map_fd;
+ char buff[128] = {};
+ int err, key;
+
+ LIBBPF_OPTS(bpf_test_run_opts, topts,
+ .data_in = buff,
+ .data_size_in = sizeof(buff),
+ .repeat = 1,
+ );
+
+ freplace_skel = tailcall_freplace__open();
+ if (!ASSERT_OK_PTR(freplace_skel, "tailcall_freplace__open"))
+ return;
+
+ tc_skel = tc_bpf2bpf__open_and_load();
+ if (!ASSERT_OK_PTR(tc_skel, "tc_bpf2bpf__open_and_load"))
+ goto out;
+
+ prog_fd = bpf_program__fd(tc_skel->progs.entry_tc);
+ freplace_prog = freplace_skel->progs.entry_freplace;
+ err = bpf_program__set_attach_target(freplace_prog, prog_fd, "subprog");
+ if (!ASSERT_OK(err, "set_attach_target"))
+ goto out;
+
+ err = tailcall_freplace__load(freplace_skel);
+ if (!ASSERT_OK(err, "tailcall_freplace__load"))
+ goto out;
+
+ freplace_link = bpf_program__attach_freplace(freplace_prog, prog_fd,
+ "subprog");
+ if (!ASSERT_OK_PTR(freplace_link, "attach_freplace"))
+ goto out;
+
+ map_fd = bpf_map__fd(freplace_skel->maps.jmp_table);
+ prog_fd = bpf_program__fd(freplace_prog);
+ key = 0;
+ err = bpf_map_update_elem(map_fd, &key, &prog_fd, BPF_ANY);
+ if (!ASSERT_OK(err, "update jmp_table"))
+ goto out;
+
+ prog_fd = bpf_program__fd(tc_skel->progs.entry_tc);
+ err = bpf_prog_test_run_opts(prog_fd, &topts);
+ ASSERT_OK(err, "test_run");
+ ASSERT_EQ(topts.retval, 34, "test_run retval");
+
+out:
+ bpf_link__destroy(freplace_link);
+ tc_bpf2bpf__destroy(tc_skel);
+ tailcall_freplace__destroy(freplace_skel);
+}
+
void test_tailcalls(void)
{
if (test__start_subtest("tailcall_1"))
@@ -1223,4 +1592,18 @@ void test_tailcalls(void)
test_tailcall_bpf2bpf_fentry_entry();
if (test__start_subtest("tailcall_poke"))
test_tailcall_poke();
+ if (test__start_subtest("tailcall_bpf2bpf_hierarchy_1"))
+ test_tailcall_bpf2bpf_hierarchy_1();
+ if (test__start_subtest("tailcall_bpf2bpf_hierarchy_fentry"))
+ test_tailcall_bpf2bpf_hierarchy_fentry();
+ if (test__start_subtest("tailcall_bpf2bpf_hierarchy_fexit"))
+ test_tailcall_bpf2bpf_hierarchy_fexit();
+ if (test__start_subtest("tailcall_bpf2bpf_hierarchy_fentry_fexit"))
+ test_tailcall_bpf2bpf_hierarchy_fentry_fexit();
+ if (test__start_subtest("tailcall_bpf2bpf_hierarchy_fentry_entry"))
+ test_tailcall_bpf2bpf_hierarchy_fentry_entry();
+ test_tailcall_bpf2bpf_hierarchy_2();
+ test_tailcall_bpf2bpf_hierarchy_3();
+ if (test__start_subtest("tailcall_freplace"))
+ test_tailcall_freplace();
}
diff --git a/tools/testing/selftests/bpf/prog_tests/tc_opts.c b/tools/testing/selftests/bpf/prog_tests/tc_opts.c
index 196abf223465..f77f604389aa 100644
--- a/tools/testing/selftests/bpf/prog_tests/tc_opts.c
+++ b/tools/testing/selftests/bpf/prog_tests/tc_opts.c
@@ -2384,7 +2384,7 @@ static int generate_dummy_prog(void)
BPF_MOV64_IMM(BPF_REG_0, 0),
BPF_EXIT_INSN(),
};
- const size_t prog_insn_cnt = sizeof(prog_insns) / sizeof(struct bpf_insn);
+ const size_t prog_insn_cnt = ARRAY_SIZE(prog_insns);
LIBBPF_OPTS(bpf_prog_load_opts, opts);
const size_t log_buf_sz = 256;
char log_buf[log_buf_sz];
diff --git a/tools/testing/selftests/bpf/prog_tests/tc_redirect.c b/tools/testing/selftests/bpf/prog_tests/tc_redirect.c
index 327d51f59142..c85798966aec 100644
--- a/tools/testing/selftests/bpf/prog_tests/tc_redirect.c
+++ b/tools/testing/selftests/bpf/prog_tests/tc_redirect.c
@@ -68,6 +68,7 @@
__FILE__, __LINE__, strerror(errno), ##__VA_ARGS__)
static const char * const namespaces[] = {NS_SRC, NS_FWD, NS_DST, NULL};
+static struct netns_obj *netns_objs[3];
static int write_file(const char *path, const char *newval)
{
@@ -87,27 +88,41 @@ static int write_file(const char *path, const char *newval)
static int netns_setup_namespaces(const char *verb)
{
+ struct netns_obj **ns_obj = netns_objs;
const char * const *ns = namespaces;
- char cmd[128];
while (*ns) {
- snprintf(cmd, sizeof(cmd), "ip netns %s %s", verb, *ns);
- if (!ASSERT_OK(system(cmd), cmd))
- return -1;
+ if (strcmp(verb, "add") == 0) {
+ *ns_obj = netns_new(*ns, false);
+ if (!ASSERT_OK_PTR(*ns_obj, "netns_new"))
+ return -1;
+ } else {
+ if (!ASSERT_OK_PTR(*ns_obj, "netns_obj is NULL"))
+ return -1;
+ netns_free(*ns_obj);
+ *ns_obj = NULL;
+ }
ns++;
+ ns_obj++;
}
return 0;
}
static void netns_setup_namespaces_nofail(const char *verb)
{
+ struct netns_obj **ns_obj = netns_objs;
const char * const *ns = namespaces;
- char cmd[128];
while (*ns) {
- snprintf(cmd, sizeof(cmd), "ip netns %s %s > /dev/null 2>&1", verb, *ns);
- system(cmd);
+ if (strcmp(verb, "add") == 0) {
+ *ns_obj = netns_new(*ns, false);
+ } else {
+ if (*ns_obj)
+ netns_free(*ns_obj);
+ *ns_obj = NULL;
+ }
ns++;
+ ns_obj++;
}
}
@@ -471,7 +486,7 @@ static int set_forwarding(bool enable)
static int __rcv_tstamp(int fd, const char *expected, size_t s, __u64 *tstamp)
{
- struct __kernel_timespec pkt_ts = {};
+ struct timespec pkt_ts = {};
char ctl[CMSG_SPACE(sizeof(pkt_ts))];
struct timespec now_ts;
struct msghdr msg = {};
@@ -495,7 +510,7 @@ static int __rcv_tstamp(int fd, const char *expected, size_t s, __u64 *tstamp)
cmsg = CMSG_FIRSTHDR(&msg);
if (cmsg && cmsg->cmsg_level == SOL_SOCKET &&
- cmsg->cmsg_type == SO_TIMESTAMPNS_NEW)
+ cmsg->cmsg_type == SO_TIMESTAMPNS)
memcpy(&pkt_ts, CMSG_DATA(cmsg), sizeof(pkt_ts));
pkt_ns = pkt_ts.tv_sec * NSEC_PER_SEC + pkt_ts.tv_nsec;
@@ -537,9 +552,9 @@ static int wait_netstamp_needed_key(void)
if (!ASSERT_GE(srv_fd, 0, "start_server"))
goto done;
- err = setsockopt(srv_fd, SOL_SOCKET, SO_TIMESTAMPNS_NEW,
+ err = setsockopt(srv_fd, SOL_SOCKET, SO_TIMESTAMPNS,
&opt, sizeof(opt));
- if (!ASSERT_OK(err, "setsockopt(SO_TIMESTAMPNS_NEW)"))
+ if (!ASSERT_OK(err, "setsockopt(SO_TIMESTAMPNS)"))
goto done;
cli_fd = connect_to_fd(srv_fd, TIMEOUT_MILLIS);
@@ -621,9 +636,9 @@ static void test_inet_dtime(int family, int type, const char *addr, __u16 port)
return;
/* Ensure the kernel puts the (rcv) timestamp for all skb */
- err = setsockopt(listen_fd, SOL_SOCKET, SO_TIMESTAMPNS_NEW,
+ err = setsockopt(listen_fd, SOL_SOCKET, SO_TIMESTAMPNS,
&opt, sizeof(opt));
- if (!ASSERT_OK(err, "setsockopt(SO_TIMESTAMPNS_NEW)"))
+ if (!ASSERT_OK(err, "setsockopt(SO_TIMESTAMPNS)"))
goto done;
if (type == SOCK_STREAM) {
@@ -857,7 +872,7 @@ static void test_tcp_dtime(struct test_tc_dtime *skel, int family, bool bpf_fwd)
test_inet_dtime(family, SOCK_STREAM, addr, 50000 + t);
/* fwdns_prio100 prog does not read delivery_time_type, so
- * kernel puts the (rcv) timetamp in __sk_buff->tstamp
+ * kernel puts the (rcv) timestamp in __sk_buff->tstamp
*/
ASSERT_EQ(dtimes[INGRESS_FWDNS_P100], 0,
dtime_cnt_str(t, INGRESS_FWDNS_P100));
diff --git a/tools/testing/selftests/bpf/prog_tests/tcp_rtt.c b/tools/testing/selftests/bpf/prog_tests/tcp_rtt.c
index f2b99d95d916..c38784c1c066 100644
--- a/tools/testing/selftests/bpf/prog_tests/tcp_rtt.c
+++ b/tools/testing/selftests/bpf/prog_tests/tcp_rtt.c
@@ -1,4 +1,5 @@
// SPDX-License-Identifier: GPL-2.0
+#define _GNU_SOURCE
#include <test_progs.h>
#include "cgroup_helpers.h"
#include "network_helpers.h"
diff --git a/tools/testing/selftests/bpf/prog_tests/test_bpf_syscall_macro.c b/tools/testing/selftests/bpf/prog_tests/test_bpf_syscall_macro.c
index 2900c5e9a016..1750c29b94f8 100644
--- a/tools/testing/selftests/bpf/prog_tests/test_bpf_syscall_macro.c
+++ b/tools/testing/selftests/bpf/prog_tests/test_bpf_syscall_macro.c
@@ -38,11 +38,7 @@ void test_bpf_syscall_macro(void)
/* check whether args of syscall are copied correctly */
prctl(exp_arg1, exp_arg2, exp_arg3, exp_arg4, exp_arg5);
-#if defined(__aarch64__) || defined(__s390__)
- ASSERT_NEQ(skel->bss->arg1, exp_arg1, "syscall_arg1");
-#else
ASSERT_EQ(skel->bss->arg1, exp_arg1, "syscall_arg1");
-#endif
ASSERT_EQ(skel->bss->arg2, exp_arg2, "syscall_arg2");
ASSERT_EQ(skel->bss->arg3, exp_arg3, "syscall_arg3");
/* it cannot copy arg4 when uses PT_REGS_PARM4 on x86_64 */
diff --git a/tools/testing/selftests/bpf/prog_tests/test_bprm_opts.c b/tools/testing/selftests/bpf/prog_tests/test_bprm_opts.c
index a0054019e677..9c0200c132d9 100644
--- a/tools/testing/selftests/bpf/prog_tests/test_bprm_opts.c
+++ b/tools/testing/selftests/bpf/prog_tests/test_bprm_opts.c
@@ -51,7 +51,7 @@ static int run_set_secureexec(int map_fd, int secureexec)
exit(ret);
/* If the binary is executed with securexec=1, the dynamic
- * loader ingores and unsets certain variables like LD_PRELOAD,
+ * loader ignores and unsets certain variables like LD_PRELOAD,
* TMPDIR etc. TMPDIR is used here to simplify the example, as
* LD_PRELOAD requires a real .so file.
*
diff --git a/tools/testing/selftests/bpf/prog_tests/test_lsm.c b/tools/testing/selftests/bpf/prog_tests/test_lsm.c
index 16175d579bc7..2a27f3714f5c 100644
--- a/tools/testing/selftests/bpf/prog_tests/test_lsm.c
+++ b/tools/testing/selftests/bpf/prog_tests/test_lsm.c
@@ -12,6 +12,7 @@
#include <stdlib.h>
#include "lsm.skel.h"
+#include "lsm_tailcall.skel.h"
char *CMD_ARGS[] = {"true", NULL};
@@ -95,7 +96,7 @@ static int test_lsm(struct lsm *skel)
return 0;
}
-void test_test_lsm(void)
+static void test_lsm_basic(void)
{
struct lsm *skel = NULL;
int err;
@@ -114,3 +115,46 @@ void test_test_lsm(void)
close_prog:
lsm__destroy(skel);
}
+
+static void test_lsm_tailcall(void)
+{
+ struct lsm_tailcall *skel = NULL;
+ int map_fd, prog_fd;
+ int err, key;
+
+ skel = lsm_tailcall__open_and_load();
+ if (!ASSERT_OK_PTR(skel, "lsm_tailcall__skel_load"))
+ goto close_prog;
+
+ map_fd = bpf_map__fd(skel->maps.jmp_table);
+ if (CHECK_FAIL(map_fd < 0))
+ goto close_prog;
+
+ prog_fd = bpf_program__fd(skel->progs.lsm_file_permission_prog);
+ if (CHECK_FAIL(prog_fd < 0))
+ goto close_prog;
+
+ key = 0;
+ err = bpf_map_update_elem(map_fd, &key, &prog_fd, BPF_ANY);
+ if (CHECK_FAIL(!err))
+ goto close_prog;
+
+ prog_fd = bpf_program__fd(skel->progs.lsm_file_alloc_security_prog);
+ if (CHECK_FAIL(prog_fd < 0))
+ goto close_prog;
+
+ err = bpf_map_update_elem(map_fd, &key, &prog_fd, BPF_ANY);
+ if (CHECK_FAIL(err))
+ goto close_prog;
+
+close_prog:
+ lsm_tailcall__destroy(skel);
+}
+
+void test_test_lsm(void)
+{
+ if (test__start_subtest("lsm_basic"))
+ test_lsm_basic();
+ if (test__start_subtest("lsm_tailcall"))
+ test_lsm_tailcall();
+}
diff --git a/tools/testing/selftests/bpf/prog_tests/test_mmap_inner_array.c b/tools/testing/selftests/bpf/prog_tests/test_mmap_inner_array.c
new file mode 100644
index 000000000000..ce745776ed18
--- /dev/null
+++ b/tools/testing/selftests/bpf/prog_tests/test_mmap_inner_array.c
@@ -0,0 +1,57 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2024 Meta Platforms, Inc. and affiliates. */
+#include <test_progs.h>
+#include <sys/mman.h>
+#include "mmap_inner_array.skel.h"
+
+void test_mmap_inner_array(void)
+{
+ const long page_size = sysconf(_SC_PAGE_SIZE);
+ struct mmap_inner_array *skel;
+ int inner_array_fd, err;
+ void *tmp;
+ __u64 *val;
+
+ skel = mmap_inner_array__open_and_load();
+
+ if (!ASSERT_OK_PTR(skel, "open_and_load"))
+ return;
+
+ inner_array_fd = bpf_map__fd(skel->maps.inner_array);
+ tmp = mmap(NULL, page_size, PROT_READ | PROT_WRITE, MAP_SHARED, inner_array_fd, 0);
+ if (!ASSERT_OK_PTR(tmp, "inner array mmap"))
+ goto out;
+ val = (void *)tmp;
+
+ err = mmap_inner_array__attach(skel);
+ if (!ASSERT_OK(err, "attach"))
+ goto out_unmap;
+
+ skel->bss->pid = getpid();
+ usleep(1);
+
+ /* pid is set, pid_match == true and outer_map_match == false */
+ ASSERT_TRUE(skel->bss->pid_match, "pid match 1");
+ ASSERT_FALSE(skel->bss->outer_map_match, "outer map match 1");
+ ASSERT_FALSE(skel->bss->done, "done 1");
+ ASSERT_EQ(*val, 0, "value match 1");
+
+ err = bpf_map__update_elem(skel->maps.outer_map,
+ &skel->bss->pid, sizeof(skel->bss->pid),
+ &inner_array_fd, sizeof(inner_array_fd),
+ BPF_ANY);
+ if (!ASSERT_OK(err, "update elem"))
+ goto out_unmap;
+ usleep(1);
+
+ /* outer map key is set, outer_map_match == true */
+ ASSERT_TRUE(skel->bss->pid_match, "pid match 2");
+ ASSERT_TRUE(skel->bss->outer_map_match, "outer map match 2");
+ ASSERT_TRUE(skel->bss->done, "done 2");
+ ASSERT_EQ(*val, skel->data->match_value, "value match 2");
+
+out_unmap:
+ munmap(tmp, page_size);
+out:
+ mmap_inner_array__destroy(skel);
+}
diff --git a/tools/testing/selftests/bpf/prog_tests/test_strncmp.c b/tools/testing/selftests/bpf/prog_tests/test_strncmp.c
index 7ddd6615b7e7..baceb0de9d49 100644
--- a/tools/testing/selftests/bpf/prog_tests/test_strncmp.c
+++ b/tools/testing/selftests/bpf/prog_tests/test_strncmp.c
@@ -72,7 +72,7 @@ static void test_strncmp_ret(void)
got = trigger_strncmp(skel);
ASSERT_EQ(got, 0, "strncmp: same str");
- /* Not-null-termainted string */
+ /* Not-null-terminated string */
memcpy(skel->bss->str, skel->rodata->target, sizeof(skel->bss->str));
skel->bss->str[sizeof(skel->bss->str) - 1] = 'A';
got = trigger_strncmp(skel);
diff --git a/tools/testing/selftests/bpf/prog_tests/test_struct_ops_module.c b/tools/testing/selftests/bpf/prog_tests/test_struct_ops_module.c
index bbcf12696a6b..75a0dea511b3 100644
--- a/tools/testing/selftests/bpf/prog_tests/test_struct_ops_module.c
+++ b/tools/testing/selftests/bpf/prog_tests/test_struct_ops_module.c
@@ -9,6 +9,7 @@
#include "struct_ops_nulled_out_cb.skel.h"
#include "struct_ops_forgotten_cb.skel.h"
#include "struct_ops_detach.skel.h"
+#include "unsupported_ops.skel.h"
static void check_map_info(struct bpf_map_info *info)
{
@@ -311,5 +312,6 @@ void serial_test_struct_ops_module(void)
test_struct_ops_forgotten_cb();
if (test__start_subtest("test_detach_link"))
test_detach_link();
+ RUN_TESTS(unsupported_ops);
}
diff --git a/tools/testing/selftests/bpf/prog_tests/test_xdp_veth.c b/tools/testing/selftests/bpf/prog_tests/test_xdp_veth.c
new file mode 100644
index 000000000000..8d75424fe6bc
--- /dev/null
+++ b/tools/testing/selftests/bpf/prog_tests/test_xdp_veth.c
@@ -0,0 +1,213 @@
+// SPDX-License-Identifier: GPL-2.0
+
+/* Create 3 namespaces with 3 veth peers, and forward packets in-between using
+ * native XDP
+ *
+ * XDP_TX
+ * NS1(veth11) NS2(veth22) NS3(veth33)
+ * | | |
+ * | | |
+ * (veth1, (veth2, (veth3,
+ * id:111) id:122) id:133)
+ * ^ | ^ | ^ |
+ * | | XDP_REDIRECT | | XDP_REDIRECT | |
+ * | ------------------ ------------------ |
+ * -----------------------------------------
+ * XDP_REDIRECT
+ */
+
+#define _GNU_SOURCE
+#include <net/if.h>
+#include "test_progs.h"
+#include "network_helpers.h"
+#include "xdp_dummy.skel.h"
+#include "xdp_redirect_map.skel.h"
+#include "xdp_tx.skel.h"
+
+#define VETH_PAIRS_COUNT 3
+#define NS_SUFFIX_LEN 6
+#define VETH_NAME_MAX_LEN 16
+#define IP_SRC "10.1.1.11"
+#define IP_DST "10.1.1.33"
+#define IP_CMD_MAX_LEN 128
+
+struct skeletons {
+ struct xdp_dummy *xdp_dummy;
+ struct xdp_tx *xdp_tx;
+ struct xdp_redirect_map *xdp_redirect_maps;
+};
+
+struct veth_configuration {
+ char local_veth[VETH_NAME_MAX_LEN]; /* Interface in main namespace */
+ char remote_veth[VETH_NAME_MAX_LEN]; /* Peer interface in dedicated namespace*/
+ const char *namespace; /* Namespace for the remote veth */
+ char next_veth[VETH_NAME_MAX_LEN]; /* Local interface to redirect traffic to */
+ char *remote_addr; /* IP address of the remote veth */
+};
+
+static struct veth_configuration config[VETH_PAIRS_COUNT] = {
+ {
+ .local_veth = "veth1",
+ .remote_veth = "veth11",
+ .next_veth = "veth2",
+ .remote_addr = IP_SRC,
+ .namespace = "ns-veth11"
+ },
+ {
+ .local_veth = "veth2",
+ .remote_veth = "veth22",
+ .next_veth = "veth3",
+ .remote_addr = NULL,
+ .namespace = "ns-veth22"
+ },
+ {
+ .local_veth = "veth3",
+ .remote_veth = "veth33",
+ .next_veth = "veth1",
+ .remote_addr = IP_DST,
+ .namespace = "ns-veth33"
+ }
+};
+
+static int attach_programs_to_veth_pair(struct skeletons *skeletons, int index)
+{
+ struct bpf_program *local_prog, *remote_prog;
+ struct bpf_link **local_link, **remote_link;
+ struct nstoken *nstoken;
+ struct bpf_link *link;
+ int interface;
+
+ switch (index) {
+ case 0:
+ local_prog = skeletons->xdp_redirect_maps->progs.xdp_redirect_map_0;
+ local_link = &skeletons->xdp_redirect_maps->links.xdp_redirect_map_0;
+ remote_prog = skeletons->xdp_dummy->progs.xdp_dummy_prog;
+ remote_link = &skeletons->xdp_dummy->links.xdp_dummy_prog;
+ break;
+ case 1:
+ local_prog = skeletons->xdp_redirect_maps->progs.xdp_redirect_map_1;
+ local_link = &skeletons->xdp_redirect_maps->links.xdp_redirect_map_1;
+ remote_prog = skeletons->xdp_tx->progs.xdp_tx;
+ remote_link = &skeletons->xdp_tx->links.xdp_tx;
+ break;
+ case 2:
+ local_prog = skeletons->xdp_redirect_maps->progs.xdp_redirect_map_2;
+ local_link = &skeletons->xdp_redirect_maps->links.xdp_redirect_map_2;
+ remote_prog = skeletons->xdp_dummy->progs.xdp_dummy_prog;
+ remote_link = &skeletons->xdp_dummy->links.xdp_dummy_prog;
+ break;
+ }
+ interface = if_nametoindex(config[index].local_veth);
+ if (!ASSERT_NEQ(interface, 0, "non zero interface index"))
+ return -1;
+ link = bpf_program__attach_xdp(local_prog, interface);
+ if (!ASSERT_OK_PTR(link, "attach xdp program to local veth"))
+ return -1;
+ *local_link = link;
+ nstoken = open_netns(config[index].namespace);
+ if (!ASSERT_OK_PTR(nstoken, "switch to remote veth namespace"))
+ return -1;
+ interface = if_nametoindex(config[index].remote_veth);
+ if (!ASSERT_NEQ(interface, 0, "non zero interface index")) {
+ close_netns(nstoken);
+ return -1;
+ }
+ link = bpf_program__attach_xdp(remote_prog, interface);
+ *remote_link = link;
+ close_netns(nstoken);
+ if (!ASSERT_OK_PTR(link, "attach xdp program to remote veth"))
+ return -1;
+
+ return 0;
+}
+
+static int configure_network(struct skeletons *skeletons)
+{
+ int interface_id;
+ int map_fd;
+ int err;
+ int i = 0;
+
+ /* First create and configure all interfaces */
+ for (i = 0; i < VETH_PAIRS_COUNT; i++) {
+ SYS(fail, "ip netns add %s", config[i].namespace);
+ SYS(fail, "ip link add %s type veth peer name %s netns %s",
+ config[i].local_veth, config[i].remote_veth, config[i].namespace);
+ SYS(fail, "ip link set dev %s up", config[i].local_veth);
+ if (config[i].remote_addr)
+ SYS(fail, "ip -n %s addr add %s/24 dev %s", config[i].namespace,
+ config[i].remote_addr, config[i].remote_veth);
+ SYS(fail, "ip -n %s link set dev %s up", config[i].namespace,
+ config[i].remote_veth);
+ }
+
+ /* Then configure the redirect map and attach programs to interfaces */
+ map_fd = bpf_map__fd(skeletons->xdp_redirect_maps->maps.tx_port);
+ if (!ASSERT_GE(map_fd, 0, "open redirect map"))
+ goto fail;
+ for (i = 0; i < VETH_PAIRS_COUNT; i++) {
+ interface_id = if_nametoindex(config[i].next_veth);
+ if (!ASSERT_NEQ(interface_id, 0, "non zero interface index"))
+ goto fail;
+ err = bpf_map_update_elem(map_fd, &i, &interface_id, BPF_ANY);
+ if (!ASSERT_OK(err, "configure interface redirection through map"))
+ goto fail;
+ if (attach_programs_to_veth_pair(skeletons, i))
+ goto fail;
+ }
+
+ return 0;
+
+fail:
+ return -1;
+}
+
+static void cleanup_network(void)
+{
+ int i;
+
+ /* Deleting namespaces is enough to automatically remove veth pairs as well
+ */
+ for (i = 0; i < VETH_PAIRS_COUNT; i++)
+ SYS_NOFAIL("ip netns del %s", config[i].namespace);
+}
+
+static int check_ping(struct skeletons *skeletons)
+{
+ /* Test: if all interfaces are properly configured, we must be able to ping
+ * veth33 from veth11
+ */
+ return SYS_NOFAIL("ip netns exec %s ping -c 1 -W 1 %s > /dev/null",
+ config[0].namespace, IP_DST);
+}
+
+void test_xdp_veth_redirect(void)
+{
+ struct skeletons skeletons = {};
+
+ skeletons.xdp_dummy = xdp_dummy__open_and_load();
+ if (!ASSERT_OK_PTR(skeletons.xdp_dummy, "xdp_dummy__open_and_load"))
+ return;
+
+ skeletons.xdp_tx = xdp_tx__open_and_load();
+ if (!ASSERT_OK_PTR(skeletons.xdp_tx, "xdp_tx__open_and_load"))
+ goto destroy_xdp_dummy;
+
+ skeletons.xdp_redirect_maps = xdp_redirect_map__open_and_load();
+ if (!ASSERT_OK_PTR(skeletons.xdp_redirect_maps, "xdp_redirect_map__open_and_load"))
+ goto destroy_xdp_tx;
+
+ if (configure_network(&skeletons))
+ goto destroy_xdp_redirect_map;
+
+ ASSERT_OK(check_ping(&skeletons), "ping");
+
+destroy_xdp_redirect_map:
+ xdp_redirect_map__destroy(skeletons.xdp_redirect_maps);
+destroy_xdp_tx:
+ xdp_tx__destroy(skeletons.xdp_tx);
+destroy_xdp_dummy:
+ xdp_dummy__destroy(skeletons.xdp_dummy);
+
+ cleanup_network();
+}
diff --git a/tools/testing/selftests/bpf/prog_tests/token.c b/tools/testing/selftests/bpf/prog_tests/token.c
index fc4a175d8d76..fe86e4fdb89c 100644
--- a/tools/testing/selftests/bpf/prog_tests/token.c
+++ b/tools/testing/selftests/bpf/prog_tests/token.c
@@ -867,7 +867,7 @@ static int userns_obj_priv_implicit_token(int mnt_fd, struct token_lsm *lsm_skel
}
unsetenv(TOKEN_ENVVAR);
- /* now the same struct_ops skeleton should succeed thanks to libppf
+ /* now the same struct_ops skeleton should succeed thanks to libbpf
* creating BPF token from /sys/fs/bpf mount point
*/
skel = dummy_st_ops_success__open_and_load();
@@ -929,7 +929,7 @@ static int userns_obj_priv_implicit_token_envvar(int mnt_fd, struct token_lsm *l
if (!ASSERT_OK(err, "setenv_token_path"))
goto err_out;
- /* now the same struct_ops skeleton should succeed thanks to libppf
+ /* now the same struct_ops skeleton should succeed thanks to libbpf
* creating BPF token from custom mount point
*/
skel = dummy_st_ops_success__open_and_load();
diff --git a/tools/testing/selftests/bpf/prog_tests/unpriv_bpf_disabled.c b/tools/testing/selftests/bpf/prog_tests/unpriv_bpf_disabled.c
index 0adf8d9475cb..472f4f9fa95f 100644
--- a/tools/testing/selftests/bpf/prog_tests/unpriv_bpf_disabled.c
+++ b/tools/testing/selftests/bpf/prog_tests/unpriv_bpf_disabled.c
@@ -7,6 +7,7 @@
#include "test_unpriv_bpf_disabled.skel.h"
#include "cap_helpers.h"
+#include "bpf_util.h"
/* Using CAP_LAST_CAP is risky here, since it can get pulled in from
* an old /usr/include/linux/capability.h and be < CAP_BPF; as a result
@@ -146,7 +147,7 @@ static void test_unpriv_bpf_disabled_negative(struct test_unpriv_bpf_disabled *s
BPF_MOV64_IMM(BPF_REG_0, 0),
BPF_EXIT_INSN(),
};
- const size_t prog_insn_cnt = sizeof(prog_insns) / sizeof(struct bpf_insn);
+ const size_t prog_insn_cnt = ARRAY_SIZE(prog_insns);
LIBBPF_OPTS(bpf_prog_load_opts, load_opts);
struct bpf_map_info map_info = {};
__u32 map_info_len = sizeof(map_info);
diff --git a/tools/testing/selftests/bpf/prog_tests/uprobe_multi_test.c b/tools/testing/selftests/bpf/prog_tests/uprobe_multi_test.c
index bf6ca8e3eb13..844f6fc8487b 100644
--- a/tools/testing/selftests/bpf/prog_tests/uprobe_multi_test.c
+++ b/tools/testing/selftests/bpf/prog_tests/uprobe_multi_test.c
@@ -6,6 +6,8 @@
#include "uprobe_multi.skel.h"
#include "uprobe_multi_bench.skel.h"
#include "uprobe_multi_usdt.skel.h"
+#include "uprobe_multi_consumers.skel.h"
+#include "uprobe_multi_pid_filter.skel.h"
#include "bpf/libbpf_internal.h"
#include "testing_helpers.h"
#include "../sdt.h"
@@ -38,6 +40,7 @@ struct child {
int pid;
int tid;
pthread_t thread;
+ char stack[65536];
};
static void release_child(struct child *child)
@@ -67,41 +70,54 @@ static void kick_child(struct child *child)
fflush(NULL);
}
-static struct child *spawn_child(void)
+static int child_func(void *arg)
{
- static struct child child;
- int err;
- int c;
-
- /* pipe to notify child to execute the trigger functions */
- if (pipe(child.go))
- return NULL;
+ struct child *child = arg;
+ int err, c;
- child.pid = child.tid = fork();
- if (child.pid < 0) {
- release_child(&child);
- errno = EINVAL;
- return NULL;
- }
+ close(child->go[1]);
- /* child */
- if (child.pid == 0) {
- close(child.go[1]);
+ /* wait for parent's kick */
+ err = read(child->go[0], &c, 1);
+ if (err != 1)
+ exit(err);
- /* wait for parent's kick */
- err = read(child.go[0], &c, 1);
- if (err != 1)
- exit(err);
+ uprobe_multi_func_1();
+ uprobe_multi_func_2();
+ uprobe_multi_func_3();
+ usdt_trigger();
- uprobe_multi_func_1();
- uprobe_multi_func_2();
- uprobe_multi_func_3();
- usdt_trigger();
+ exit(errno);
+}
- exit(errno);
+static int spawn_child_flag(struct child *child, bool clone_vm)
+{
+ /* pipe to notify child to execute the trigger functions */
+ if (pipe(child->go))
+ return -1;
+
+ if (clone_vm) {
+ child->pid = child->tid = clone(child_func, child->stack + sizeof(child->stack)/2,
+ CLONE_VM|SIGCHLD, child);
+ } else {
+ child->pid = child->tid = fork();
+ }
+ if (child->pid < 0) {
+ release_child(child);
+ errno = EINVAL;
+ return -1;
}
- return &child;
+ /* fork-ed child */
+ if (!clone_vm && child->pid == 0)
+ child_func(child);
+
+ return 0;
+}
+
+static int spawn_child(struct child *child)
+{
+ return spawn_child_flag(child, false);
}
static void *child_thread(void *ctx)
@@ -130,39 +146,38 @@ static void *child_thread(void *ctx)
pthread_exit(&err);
}
-static struct child *spawn_thread(void)
+static int spawn_thread(struct child *child)
{
- static struct child child;
int c, err;
/* pipe to notify child to execute the trigger functions */
- if (pipe(child.go))
- return NULL;
+ if (pipe(child->go))
+ return -1;
/* pipe to notify parent that child thread is ready */
- if (pipe(child.c2p)) {
- close(child.go[0]);
- close(child.go[1]);
- return NULL;
+ if (pipe(child->c2p)) {
+ close(child->go[0]);
+ close(child->go[1]);
+ return -1;
}
- child.pid = getpid();
+ child->pid = getpid();
- err = pthread_create(&child.thread, NULL, child_thread, &child);
+ err = pthread_create(&child->thread, NULL, child_thread, child);
if (err) {
err = -errno;
- close(child.go[0]);
- close(child.go[1]);
- close(child.c2p[0]);
- close(child.c2p[1]);
+ close(child->go[0]);
+ close(child->go[1]);
+ close(child->c2p[0]);
+ close(child->c2p[1]);
errno = -err;
- return NULL;
+ return -1;
}
- err = read(child.c2p[0], &c, 1);
+ err = read(child->c2p[0], &c, 1);
if (!ASSERT_EQ(err, 1, "child_thread_ready"))
- return NULL;
+ return -1;
- return &child;
+ return 0;
}
static void uprobe_multi_test_run(struct uprobe_multi *skel, struct child *child)
@@ -198,7 +213,7 @@ static void uprobe_multi_test_run(struct uprobe_multi *skel, struct child *child
/*
* There are 2 entry and 2 exit probe called for each uprobe_multi_func_[123]
- * function and each slepable probe (6) increments uprobe_multi_sleep_result.
+ * function and each sleepable probe (6) increments uprobe_multi_sleep_result.
*/
ASSERT_EQ(skel->bss->uprobe_multi_func_1_result, 2, "uprobe_multi_func_1_result");
ASSERT_EQ(skel->bss->uprobe_multi_func_2_result, 2, "uprobe_multi_func_2_result");
@@ -303,24 +318,22 @@ cleanup:
static void
test_attach_api(const char *binary, const char *pattern, struct bpf_uprobe_multi_opts *opts)
{
- struct child *child;
+ static struct child child;
/* no pid filter */
__test_attach_api(binary, pattern, opts, NULL);
/* pid filter */
- child = spawn_child();
- if (!ASSERT_OK_PTR(child, "spawn_child"))
+ if (!ASSERT_OK(spawn_child(&child), "spawn_child"))
return;
- __test_attach_api(binary, pattern, opts, child);
+ __test_attach_api(binary, pattern, opts, &child);
/* pid filter (thread) */
- child = spawn_thread();
- if (!ASSERT_OK_PTR(child, "spawn_thread"))
+ if (!ASSERT_OK(spawn_thread(&child), "spawn_thread"))
return;
- __test_attach_api(binary, pattern, opts, child);
+ __test_attach_api(binary, pattern, opts, &child);
}
static void test_attach_api_pattern(void)
@@ -516,6 +529,122 @@ cleanup:
uprobe_multi__destroy(skel);
}
+#ifdef __x86_64__
+noinline void uprobe_multi_error_func(void)
+{
+ /*
+ * If --fcf-protection=branch is enabled the gcc generates endbr as
+ * first instruction, so marking the exact address of int3 with the
+ * symbol to be used in the attach_uprobe_fail_trap test below.
+ */
+ asm volatile (
+ ".globl uprobe_multi_error_func_int3; \n"
+ "uprobe_multi_error_func_int3: \n"
+ "int3 \n"
+ );
+}
+
+/*
+ * Attaching uprobe on uprobe_multi_error_func results in error
+ * because it already starts with int3 instruction.
+ */
+static void attach_uprobe_fail_trap(struct uprobe_multi *skel)
+{
+ LIBBPF_OPTS(bpf_uprobe_multi_opts, opts);
+ const char *syms[4] = {
+ "uprobe_multi_func_1",
+ "uprobe_multi_func_2",
+ "uprobe_multi_func_3",
+ "uprobe_multi_error_func_int3",
+ };
+
+ opts.syms = syms;
+ opts.cnt = ARRAY_SIZE(syms);
+
+ skel->links.uprobe = bpf_program__attach_uprobe_multi(skel->progs.uprobe, -1,
+ "/proc/self/exe", NULL, &opts);
+ if (!ASSERT_ERR_PTR(skel->links.uprobe, "bpf_program__attach_uprobe_multi")) {
+ bpf_link__destroy(skel->links.uprobe);
+ skel->links.uprobe = NULL;
+ }
+}
+#else
+static void attach_uprobe_fail_trap(struct uprobe_multi *skel) { }
+#endif
+
+short sema_1 __used, sema_2 __used;
+
+static void attach_uprobe_fail_refctr(struct uprobe_multi *skel)
+{
+ unsigned long *tmp_offsets = NULL, *tmp_ref_ctr_offsets = NULL;
+ unsigned long offsets[3], ref_ctr_offsets[3];
+ LIBBPF_OPTS(bpf_link_create_opts, opts);
+ const char *path = "/proc/self/exe";
+ const char *syms[3] = {
+ "uprobe_multi_func_1",
+ "uprobe_multi_func_2",
+ };
+ const char *sema[3] = {
+ "sema_1",
+ "sema_2",
+ };
+ int prog_fd, link_fd, err;
+
+ prog_fd = bpf_program__fd(skel->progs.uprobe_extra);
+
+ err = elf_resolve_syms_offsets("/proc/self/exe", 2, (const char **) &syms,
+ &tmp_offsets, STT_FUNC);
+ if (!ASSERT_OK(err, "elf_resolve_syms_offsets_func"))
+ return;
+
+ err = elf_resolve_syms_offsets("/proc/self/exe", 2, (const char **) &sema,
+ &tmp_ref_ctr_offsets, STT_OBJECT);
+ if (!ASSERT_OK(err, "elf_resolve_syms_offsets_sema"))
+ goto cleanup;
+
+ /*
+ * We attach to 3 uprobes on 2 functions, so 2 uprobes share single function,
+ * but with different ref_ctr_offset which is not allowed and results in fail.
+ */
+ offsets[0] = tmp_offsets[0]; /* uprobe_multi_func_1 */
+ offsets[1] = tmp_offsets[1]; /* uprobe_multi_func_2 */
+ offsets[2] = tmp_offsets[1]; /* uprobe_multi_func_2 */
+
+ ref_ctr_offsets[0] = tmp_ref_ctr_offsets[0]; /* sema_1 */
+ ref_ctr_offsets[1] = tmp_ref_ctr_offsets[1]; /* sema_2 */
+ ref_ctr_offsets[2] = tmp_ref_ctr_offsets[0]; /* sema_1, error */
+
+ opts.uprobe_multi.path = path;
+ opts.uprobe_multi.offsets = (const unsigned long *) &offsets;
+ opts.uprobe_multi.ref_ctr_offsets = (const unsigned long *) &ref_ctr_offsets;
+ opts.uprobe_multi.cnt = 3;
+
+ link_fd = bpf_link_create(prog_fd, 0, BPF_TRACE_UPROBE_MULTI, &opts);
+ if (!ASSERT_ERR(link_fd, "link_fd"))
+ close(link_fd);
+
+cleanup:
+ free(tmp_ref_ctr_offsets);
+ free(tmp_offsets);
+}
+
+static void test_attach_uprobe_fails(void)
+{
+ struct uprobe_multi *skel = NULL;
+
+ skel = uprobe_multi__open_and_load();
+ if (!ASSERT_OK_PTR(skel, "uprobe_multi__open_and_load"))
+ return;
+
+ /* attach fails due to adding uprobe on trap instruction, x86_64 only */
+ attach_uprobe_fail_trap(skel);
+
+ /* attach fail due to wrong ref_ctr_offs on one of the uprobes */
+ attach_uprobe_fail_refctr(skel);
+
+ uprobe_multi__destroy(skel);
+}
+
static void __test_link_api(struct child *child)
{
int prog_fd, link1_fd = -1, link2_fd = -1, link3_fd = -1, link4_fd = -1;
@@ -595,24 +724,296 @@ cleanup:
static void test_link_api(void)
{
- struct child *child;
+ static struct child child;
/* no pid filter */
__test_link_api(NULL);
/* pid filter */
- child = spawn_child();
- if (!ASSERT_OK_PTR(child, "spawn_child"))
+ if (!ASSERT_OK(spawn_child(&child), "spawn_child"))
return;
- __test_link_api(child);
+ __test_link_api(&child);
/* pid filter (thread) */
- child = spawn_thread();
- if (!ASSERT_OK_PTR(child, "spawn_thread"))
+ if (!ASSERT_OK(spawn_thread(&child), "spawn_thread"))
+ return;
+
+ __test_link_api(&child);
+}
+
+static struct bpf_program *
+get_program(struct uprobe_multi_consumers *skel, int prog)
+{
+ switch (prog) {
+ case 0:
+ return skel->progs.uprobe_0;
+ case 1:
+ return skel->progs.uprobe_1;
+ case 2:
+ return skel->progs.uprobe_2;
+ case 3:
+ return skel->progs.uprobe_3;
+ default:
+ ASSERT_FAIL("get_program");
+ return NULL;
+ }
+}
+
+static struct bpf_link **
+get_link(struct uprobe_multi_consumers *skel, int link)
+{
+ switch (link) {
+ case 0:
+ return &skel->links.uprobe_0;
+ case 1:
+ return &skel->links.uprobe_1;
+ case 2:
+ return &skel->links.uprobe_2;
+ case 3:
+ return &skel->links.uprobe_3;
+ default:
+ ASSERT_FAIL("get_link");
+ return NULL;
+ }
+}
+
+static int uprobe_attach(struct uprobe_multi_consumers *skel, int idx)
+{
+ struct bpf_program *prog = get_program(skel, idx);
+ struct bpf_link **link = get_link(skel, idx);
+ LIBBPF_OPTS(bpf_uprobe_multi_opts, opts);
+
+ if (!prog || !link)
+ return -1;
+
+ /*
+ * bit/prog: 0,1 uprobe entry
+ * bit/prog: 2,3 uprobe return
+ */
+ opts.retprobe = idx == 2 || idx == 3;
+
+ *link = bpf_program__attach_uprobe_multi(prog, 0, "/proc/self/exe",
+ "uprobe_consumer_test",
+ &opts);
+ if (!ASSERT_OK_PTR(*link, "bpf_program__attach_uprobe_multi"))
+ return -1;
+ return 0;
+}
+
+static void uprobe_detach(struct uprobe_multi_consumers *skel, int idx)
+{
+ struct bpf_link **link = get_link(skel, idx);
+
+ bpf_link__destroy(*link);
+ *link = NULL;
+}
+
+static bool test_bit(int bit, unsigned long val)
+{
+ return val & (1 << bit);
+}
+
+noinline int
+uprobe_consumer_test(struct uprobe_multi_consumers *skel,
+ unsigned long before, unsigned long after)
+{
+ int idx;
+
+ /* detach uprobe for each unset programs in 'before' state ... */
+ for (idx = 0; idx < 4; idx++) {
+ if (test_bit(idx, before) && !test_bit(idx, after))
+ uprobe_detach(skel, idx);
+ }
+
+ /* ... and attach all new programs in 'after' state */
+ for (idx = 0; idx < 4; idx++) {
+ if (!test_bit(idx, before) && test_bit(idx, after)) {
+ if (!ASSERT_OK(uprobe_attach(skel, idx), "uprobe_attach_after"))
+ return -1;
+ }
+ }
+ return 0;
+}
+
+static void consumer_test(struct uprobe_multi_consumers *skel,
+ unsigned long before, unsigned long after)
+{
+ int err, idx;
+
+ printf("consumer_test before %lu after %lu\n", before, after);
+
+ /* 'before' is each, we attach uprobe for every set idx */
+ for (idx = 0; idx < 4; idx++) {
+ if (test_bit(idx, before)) {
+ if (!ASSERT_OK(uprobe_attach(skel, idx), "uprobe_attach_before"))
+ goto cleanup;
+ }
+ }
+
+ err = uprobe_consumer_test(skel, before, after);
+ if (!ASSERT_EQ(err, 0, "uprobe_consumer_test"))
+ goto cleanup;
+
+ for (idx = 0; idx < 4; idx++) {
+ const char *fmt = "BUG";
+ __u64 val = 0;
+
+ if (idx < 2) {
+ /*
+ * uprobe entry
+ * +1 if define in 'before'
+ */
+ if (test_bit(idx, before))
+ val++;
+ fmt = "prog 0/1: uprobe";
+ } else {
+ /*
+ * uprobe return is tricky ;-)
+ *
+ * to trigger uretprobe consumer, the uretprobe needs to be installed,
+ * which means one of the 'return' uprobes was alive when probe was hit:
+ *
+ * idxs: 2/3 uprobe return in 'installed' mask
+ *
+ * in addition if 'after' state removes everything that was installed in
+ * 'before' state, then uprobe kernel object goes away and return uprobe
+ * is not installed and we won't hit it even if it's in 'after' state.
+ */
+ unsigned long had_uretprobes = before & 0b1100; /* is uretprobe installed */
+ unsigned long probe_preserved = before & after; /* did uprobe go away */
+
+ if (had_uretprobes && probe_preserved && test_bit(idx, after))
+ val++;
+ fmt = "idx 2/3: uretprobe";
+ }
+
+ ASSERT_EQ(skel->bss->uprobe_result[idx], val, fmt);
+ skel->bss->uprobe_result[idx] = 0;
+ }
+
+cleanup:
+ for (idx = 0; idx < 4; idx++)
+ uprobe_detach(skel, idx);
+}
+
+static void test_consumers(void)
+{
+ struct uprobe_multi_consumers *skel;
+ int before, after;
+
+ skel = uprobe_multi_consumers__open_and_load();
+ if (!ASSERT_OK_PTR(skel, "uprobe_multi_consumers__open_and_load"))
+ return;
+
+ /*
+ * The idea of this test is to try all possible combinations of
+ * uprobes consumers attached on single function.
+ *
+ * - 2 uprobe entry consumer
+ * - 2 uprobe exit consumers
+ *
+ * The test uses 4 uprobes attached on single function, but that
+ * translates into single uprobe with 4 consumers in kernel.
+ *
+ * The before/after values present the state of attached consumers
+ * before and after the probed function:
+ *
+ * bit/prog 0,1 : uprobe entry
+ * bit/prog 2,3 : uprobe return
+ *
+ * For example for:
+ *
+ * before = 0b0101
+ * after = 0b0110
+ *
+ * it means that before we call 'uprobe_consumer_test' we attach
+ * uprobes defined in 'before' value:
+ *
+ * - bit/prog 0: uprobe entry
+ * - bit/prog 2: uprobe return
+ *
+ * uprobe_consumer_test is called and inside it we attach and detach
+ * uprobes based on 'after' value:
+ *
+ * - bit/prog 0: stays untouched
+ * - bit/prog 2: uprobe return is detached
+ *
+ * uprobe_consumer_test returns and we check counters values increased
+ * by bpf programs on each uprobe to match the expected count based on
+ * before/after bits.
+ */
+
+ for (before = 0; before < 16; before++) {
+ for (after = 0; after < 16; after++)
+ consumer_test(skel, before, after);
+ }
+
+ uprobe_multi_consumers__destroy(skel);
+}
+
+static struct bpf_program *uprobe_multi_program(struct uprobe_multi_pid_filter *skel, int idx)
+{
+ switch (idx) {
+ case 0: return skel->progs.uprobe_multi_0;
+ case 1: return skel->progs.uprobe_multi_1;
+ case 2: return skel->progs.uprobe_multi_2;
+ }
+ return NULL;
+}
+
+#define TASKS 3
+
+static void run_pid_filter(struct uprobe_multi_pid_filter *skel, bool clone_vm, bool retprobe)
+{
+ LIBBPF_OPTS(bpf_uprobe_multi_opts, opts, .retprobe = retprobe);
+ struct bpf_link *link[TASKS] = {};
+ struct child child[TASKS] = {};
+ int i;
+
+ memset(skel->bss->test, 0, sizeof(skel->bss->test));
+
+ for (i = 0; i < TASKS; i++) {
+ if (!ASSERT_OK(spawn_child_flag(&child[i], clone_vm), "spawn_child"))
+ goto cleanup;
+ skel->bss->pids[i] = child[i].pid;
+ }
+
+ for (i = 0; i < TASKS; i++) {
+ link[i] = bpf_program__attach_uprobe_multi(uprobe_multi_program(skel, i),
+ child[i].pid, "/proc/self/exe",
+ "uprobe_multi_func_1", &opts);
+ if (!ASSERT_OK_PTR(link[i], "bpf_program__attach_uprobe_multi"))
+ goto cleanup;
+ }
+
+ for (i = 0; i < TASKS; i++)
+ kick_child(&child[i]);
+
+ for (i = 0; i < TASKS; i++) {
+ ASSERT_EQ(skel->bss->test[i][0], 1, "pid");
+ ASSERT_EQ(skel->bss->test[i][1], 0, "unknown");
+ }
+
+cleanup:
+ for (i = 0; i < TASKS; i++)
+ bpf_link__destroy(link[i]);
+ for (i = 0; i < TASKS; i++)
+ release_child(&child[i]);
+}
+
+static void test_pid_filter_process(bool clone_vm)
+{
+ struct uprobe_multi_pid_filter *skel;
+
+ skel = uprobe_multi_pid_filter__open_and_load();
+ if (!ASSERT_OK_PTR(skel, "uprobe_multi_pid_filter__open_and_load"))
return;
- __test_link_api(child);
+ run_pid_filter(skel, clone_vm, false);
+ run_pid_filter(skel, clone_vm, true);
+
+ uprobe_multi_pid_filter__destroy(skel);
}
static void test_bench_attach_uprobe(void)
@@ -703,4 +1104,12 @@ void test_uprobe_multi_test(void)
test_bench_attach_usdt();
if (test__start_subtest("attach_api_fails"))
test_attach_api_fails();
+ if (test__start_subtest("attach_uprobe_fails"))
+ test_attach_uprobe_fails();
+ if (test__start_subtest("consumers"))
+ test_consumers();
+ if (test__start_subtest("filter_fork"))
+ test_pid_filter_process(false);
+ if (test__start_subtest("filter_clone_vm"))
+ test_pid_filter_process(true);
}
diff --git a/tools/testing/selftests/bpf/prog_tests/user_ringbuf.c b/tools/testing/selftests/bpf/prog_tests/user_ringbuf.c
index e51721df14fc..d424e7ecbd12 100644
--- a/tools/testing/selftests/bpf/prog_tests/user_ringbuf.c
+++ b/tools/testing/selftests/bpf/prog_tests/user_ringbuf.c
@@ -4,6 +4,7 @@
#define _GNU_SOURCE
#include <linux/compiler.h>
#include <linux/ring_buffer.h>
+#include <linux/build_bug.h>
#include <pthread.h>
#include <stdio.h>
#include <stdlib.h>
@@ -642,7 +643,7 @@ static void test_user_ringbuf_blocking_reserve(void)
if (!ASSERT_EQ(err, 0, "deferred_kick_thread\n"))
goto cleanup;
- /* After spawning another thread that asychronously kicks the kernel to
+ /* After spawning another thread that asynchronously kicks the kernel to
* drain the messages, we're able to block and successfully get a
* sample once we receive an event notification.
*/
diff --git a/tools/testing/selftests/bpf/prog_tests/verifier.c b/tools/testing/selftests/bpf/prog_tests/verifier.c
index 9dc3687bc406..e26b5150fc43 100644
--- a/tools/testing/selftests/bpf/prog_tests/verifier.c
+++ b/tools/testing/selftests/bpf/prog_tests/verifier.c
@@ -21,6 +21,7 @@
#include "verifier_cgroup_inv_retcode.skel.h"
#include "verifier_cgroup_skb.skel.h"
#include "verifier_cgroup_storage.skel.h"
+#include "verifier_const.skel.h"
#include "verifier_const_or.skel.h"
#include "verifier_ctx.skel.h"
#include "verifier_ctx_sk_msg.skel.h"
@@ -39,6 +40,7 @@
#include "verifier_int_ptr.skel.h"
#include "verifier_iterating_callbacks.skel.h"
#include "verifier_jeq_infer_not_null.skel.h"
+#include "verifier_jit_convergence.skel.h"
#include "verifier_ld_ind.skel.h"
#include "verifier_ldsx.skel.h"
#include "verifier_leak_ptr.skel.h"
@@ -53,6 +55,7 @@
#include "verifier_movsx.skel.h"
#include "verifier_netfilter_ctx.skel.h"
#include "verifier_netfilter_retcode.skel.h"
+#include "verifier_bpf_fastcall.skel.h"
#include "verifier_or_jmp32_k.skel.h"
#include "verifier_precision.skel.h"
#include "verifier_prevent_map_lookup.skel.h"
@@ -74,6 +77,7 @@
#include "verifier_stack_ptr.skel.h"
#include "verifier_subprog_precision.skel.h"
#include "verifier_subreg.skel.h"
+#include "verifier_tailcall_jit.skel.h"
#include "verifier_typedef.skel.h"
#include "verifier_uninit.skel.h"
#include "verifier_unpriv.skel.h"
@@ -84,10 +88,13 @@
#include "verifier_value_or_null.skel.h"
#include "verifier_value_ptr_arith.skel.h"
#include "verifier_var_off.skel.h"
+#include "verifier_vfs_accept.skel.h"
+#include "verifier_vfs_reject.skel.h"
#include "verifier_xadd.skel.h"
#include "verifier_xdp.skel.h"
#include "verifier_xdp_direct_packet_access.skel.h"
#include "verifier_bits_iter.skel.h"
+#include "verifier_lsm.skel.h"
#define MAX_ENTRIES 11
@@ -140,6 +147,7 @@ void test_verifier_cfg(void) { RUN(verifier_cfg); }
void test_verifier_cgroup_inv_retcode(void) { RUN(verifier_cgroup_inv_retcode); }
void test_verifier_cgroup_skb(void) { RUN(verifier_cgroup_skb); }
void test_verifier_cgroup_storage(void) { RUN(verifier_cgroup_storage); }
+void test_verifier_const(void) { RUN(verifier_const); }
void test_verifier_const_or(void) { RUN(verifier_const_or); }
void test_verifier_ctx(void) { RUN(verifier_ctx); }
void test_verifier_ctx_sk_msg(void) { RUN(verifier_ctx_sk_msg); }
@@ -158,6 +166,7 @@ void test_verifier_helper_value_access(void) { RUN(verifier_helper_value_access
void test_verifier_int_ptr(void) { RUN(verifier_int_ptr); }
void test_verifier_iterating_callbacks(void) { RUN(verifier_iterating_callbacks); }
void test_verifier_jeq_infer_not_null(void) { RUN(verifier_jeq_infer_not_null); }
+void test_verifier_jit_convergence(void) { RUN(verifier_jit_convergence); }
void test_verifier_ld_ind(void) { RUN(verifier_ld_ind); }
void test_verifier_ldsx(void) { RUN(verifier_ldsx); }
void test_verifier_leak_ptr(void) { RUN(verifier_leak_ptr); }
@@ -172,6 +181,7 @@ void test_verifier_meta_access(void) { RUN(verifier_meta_access); }
void test_verifier_movsx(void) { RUN(verifier_movsx); }
void test_verifier_netfilter_ctx(void) { RUN(verifier_netfilter_ctx); }
void test_verifier_netfilter_retcode(void) { RUN(verifier_netfilter_retcode); }
+void test_verifier_bpf_fastcall(void) { RUN(verifier_bpf_fastcall); }
void test_verifier_or_jmp32_k(void) { RUN(verifier_or_jmp32_k); }
void test_verifier_precision(void) { RUN(verifier_precision); }
void test_verifier_prevent_map_lookup(void) { RUN(verifier_prevent_map_lookup); }
@@ -193,6 +203,7 @@ void test_verifier_spin_lock(void) { RUN(verifier_spin_lock); }
void test_verifier_stack_ptr(void) { RUN(verifier_stack_ptr); }
void test_verifier_subprog_precision(void) { RUN(verifier_subprog_precision); }
void test_verifier_subreg(void) { RUN(verifier_subreg); }
+void test_verifier_tailcall_jit(void) { RUN(verifier_tailcall_jit); }
void test_verifier_typedef(void) { RUN(verifier_typedef); }
void test_verifier_uninit(void) { RUN(verifier_uninit); }
void test_verifier_unpriv(void) { RUN(verifier_unpriv); }
@@ -202,10 +213,13 @@ void test_verifier_value(void) { RUN(verifier_value); }
void test_verifier_value_illegal_alu(void) { RUN(verifier_value_illegal_alu); }
void test_verifier_value_or_null(void) { RUN(verifier_value_or_null); }
void test_verifier_var_off(void) { RUN(verifier_var_off); }
+void test_verifier_vfs_accept(void) { RUN(verifier_vfs_accept); }
+void test_verifier_vfs_reject(void) { RUN(verifier_vfs_reject); }
void test_verifier_xadd(void) { RUN(verifier_xadd); }
void test_verifier_xdp(void) { RUN(verifier_xdp); }
void test_verifier_xdp_direct_packet_access(void) { RUN(verifier_xdp_direct_packet_access); }
void test_verifier_bits_iter(void) { RUN(verifier_bits_iter); }
+void test_verifier_lsm(void) { RUN(verifier_lsm); }
static int init_test_val_map(struct bpf_object *obj, char *map_name)
{
diff --git a/tools/testing/selftests/bpf/progs/arena_atomics.c b/tools/testing/selftests/bpf/progs/arena_atomics.c
index bb0acd79d28a..40dd57fca5cc 100644
--- a/tools/testing/selftests/bpf/progs/arena_atomics.c
+++ b/tools/testing/selftests/bpf/progs/arena_atomics.c
@@ -4,6 +4,7 @@
#include <bpf/bpf_helpers.h>
#include <bpf/bpf_tracing.h>
#include <stdbool.h>
+#include <stdatomic.h>
#include "bpf_arena_common.h"
struct {
@@ -77,8 +78,13 @@ int sub(const void *ctx)
return 0;
}
+#ifdef __BPF_FEATURE_ATOMIC_MEM_ORDERING
+_Atomic __u64 __arena_global and64_value = (0x110ull << 32);
+_Atomic __u32 __arena_global and32_value = 0x110;
+#else
__u64 __arena_global and64_value = (0x110ull << 32);
__u32 __arena_global and32_value = 0x110;
+#endif
SEC("raw_tp/sys_enter")
int and(const void *ctx)
@@ -86,16 +92,25 @@ int and(const void *ctx)
if (pid != (bpf_get_current_pid_tgid() >> 32))
return 0;
#ifdef ENABLE_ATOMICS_TESTS
-
+#ifdef __BPF_FEATURE_ATOMIC_MEM_ORDERING
+ __c11_atomic_fetch_and(&and64_value, 0x011ull << 32, memory_order_relaxed);
+ __c11_atomic_fetch_and(&and32_value, 0x011, memory_order_relaxed);
+#else
__sync_fetch_and_and(&and64_value, 0x011ull << 32);
__sync_fetch_and_and(&and32_value, 0x011);
#endif
+#endif
return 0;
}
+#ifdef __BPF_FEATURE_ATOMIC_MEM_ORDERING
+_Atomic __u32 __arena_global or32_value = 0x110;
+_Atomic __u64 __arena_global or64_value = (0x110ull << 32);
+#else
__u32 __arena_global or32_value = 0x110;
__u64 __arena_global or64_value = (0x110ull << 32);
+#endif
SEC("raw_tp/sys_enter")
int or(const void *ctx)
@@ -103,15 +118,25 @@ int or(const void *ctx)
if (pid != (bpf_get_current_pid_tgid() >> 32))
return 0;
#ifdef ENABLE_ATOMICS_TESTS
+#ifdef __BPF_FEATURE_ATOMIC_MEM_ORDERING
+ __c11_atomic_fetch_or(&or64_value, 0x011ull << 32, memory_order_relaxed);
+ __c11_atomic_fetch_or(&or32_value, 0x011, memory_order_relaxed);
+#else
__sync_fetch_and_or(&or64_value, 0x011ull << 32);
__sync_fetch_and_or(&or32_value, 0x011);
#endif
+#endif
return 0;
}
+#ifdef __BPF_FEATURE_ATOMIC_MEM_ORDERING
+_Atomic __u64 __arena_global xor64_value = (0x110ull << 32);
+_Atomic __u32 __arena_global xor32_value = 0x110;
+#else
__u64 __arena_global xor64_value = (0x110ull << 32);
__u32 __arena_global xor32_value = 0x110;
+#endif
SEC("raw_tp/sys_enter")
int xor(const void *ctx)
@@ -119,9 +144,14 @@ int xor(const void *ctx)
if (pid != (bpf_get_current_pid_tgid() >> 32))
return 0;
#ifdef ENABLE_ATOMICS_TESTS
+#ifdef __BPF_FEATURE_ATOMIC_MEM_ORDERING
+ __c11_atomic_fetch_xor(&xor64_value, 0x011ull << 32, memory_order_relaxed);
+ __c11_atomic_fetch_xor(&xor32_value, 0x011, memory_order_relaxed);
+#else
__sync_fetch_and_xor(&xor64_value, 0x011ull << 32);
__sync_fetch_and_xor(&xor32_value, 0x011);
#endif
+#endif
return 0;
}
diff --git a/tools/testing/selftests/bpf/progs/bpf_cubic.c b/tools/testing/selftests/bpf/progs/bpf_cubic.c
index d665b8a15cc4..f089faa97ae6 100644
--- a/tools/testing/selftests/bpf/progs/bpf_cubic.c
+++ b/tools/testing/selftests/bpf/progs/bpf_cubic.c
@@ -1,6 +1,6 @@
// SPDX-License-Identifier: GPL-2.0-only
-/* WARNING: This implemenation is not necessarily the same
+/* WARNING: This implementation is not necessarily the same
* as the tcp_cubic.c. The purpose is mainly for testing
* the kernel BPF logic.
*
@@ -314,7 +314,7 @@ static void bictcp_update(struct bpf_bictcp *ca, __u32 cwnd, __u32 acked)
* (so time^3 is done by using 64 bit)
* and without the support of division of 64bit numbers
* (so all divisions are done by using 32 bit)
- * also NOTE the unit of those veriables
+ * also NOTE the unit of those variables
* time = (t - K) / 2^bictcp_HZ
* c = bic_scale >> 10
* rtt = (srtt >> 3) / HZ
@@ -507,7 +507,7 @@ void BPF_PROG(bpf_cubic_acked, struct sock *sk, const struct ack_sample *sample)
__u32 delay;
bpf_cubic_acked_called = 1;
- /* Some calls are for duplicates without timetamps */
+ /* Some calls are for duplicates without timestamps */
if (sample->rtt_us < 0)
return;
diff --git a/tools/testing/selftests/bpf/progs/bpf_dctcp.c b/tools/testing/selftests/bpf/progs/bpf_dctcp.c
index 02f552e7fd4d..7cd73e75f52a 100644
--- a/tools/testing/selftests/bpf/progs/bpf_dctcp.c
+++ b/tools/testing/selftests/bpf/progs/bpf_dctcp.c
@@ -26,7 +26,7 @@ static bool before(__u32 seq1, __u32 seq2)
char _license[] SEC("license") = "GPL";
-volatile const char fallback[TCP_CA_NAME_MAX];
+volatile const char fallback_cc[TCP_CA_NAME_MAX];
const char bpf_dctcp[] = "bpf_dctcp";
const char tcp_cdg[] = "cdg";
char cc_res[TCP_CA_NAME_MAX];
@@ -71,10 +71,10 @@ void BPF_PROG(bpf_dctcp_init, struct sock *sk)
struct bpf_dctcp *ca = inet_csk_ca(sk);
int *stg;
- if (!(tp->ecn_flags & TCP_ECN_OK) && fallback[0]) {
+ if (!(tp->ecn_flags & TCP_ECN_OK) && fallback_cc[0]) {
/* Switch to fallback */
if (bpf_setsockopt(sk, SOL_TCP, TCP_CONGESTION,
- (void *)fallback, sizeof(fallback)) == -EBUSY)
+ (void *)fallback_cc, sizeof(fallback_cc)) == -EBUSY)
ebusy_cnt++;
/* Switch back to myself and the recurred bpf_dctcp_init()
@@ -87,7 +87,7 @@ void BPF_PROG(bpf_dctcp_init, struct sock *sk)
/* Switch back to fallback */
if (bpf_setsockopt(sk, SOL_TCP, TCP_CONGESTION,
- (void *)fallback, sizeof(fallback)) == -EBUSY)
+ (void *)fallback_cc, sizeof(fallback_cc)) == -EBUSY)
ebusy_cnt++;
/* Expecting -ENOTSUPP for tcp_cdg_res */
diff --git a/tools/testing/selftests/bpf/progs/bpf_misc.h b/tools/testing/selftests/bpf/progs/bpf_misc.h
index 81097a3f15eb..eccaf955e394 100644
--- a/tools/testing/selftests/bpf/progs/bpf_misc.h
+++ b/tools/testing/selftests/bpf/progs/bpf_misc.h
@@ -2,6 +2,9 @@
#ifndef __BPF_MISC_H__
#define __BPF_MISC_H__
+#define XSTR(s) STR(s)
+#define STR(s) #s
+
/* This set of attributes controls behavior of the
* test_loader.c:test_loader__run_subtests().
*
@@ -22,10 +25,49 @@
*
* __msg Message expected to be found in the verifier log.
* Multiple __msg attributes could be specified.
+ * To match a regular expression use "{{" "}}" brackets,
+ * e.g. "foo{{[0-9]+}}" matches strings like "foo007".
+ * Extended POSIX regular expression syntax is allowed
+ * inside the brackets.
* __msg_unpriv Same as __msg but for unprivileged mode.
*
- * __regex Same as __msg, but using a regular expression.
- * __regex_unpriv Same as __msg_unpriv but using a regular expression.
+ * __xlated Expect a line in a disassembly log after verifier applies rewrites.
+ * Multiple __xlated attributes could be specified.
+ * Regular expressions could be specified same way as in __msg.
+ * __xlated_unpriv Same as __xlated but for unprivileged mode.
+ *
+ * __jited Match a line in a disassembly of the jited BPF program.
+ * Has to be used after __arch_* macro.
+ * For example:
+ *
+ * __arch_x86_64
+ * __jited(" endbr64")
+ * __jited(" nopl (%rax,%rax)")
+ * __jited(" xorq %rax, %rax")
+ * ...
+ * __naked void some_test(void)
+ * {
+ * asm volatile (... ::: __clobber_all);
+ * }
+ *
+ * Regular expressions could be included in patterns same way
+ * as in __msg.
+ *
+ * By default assume that each pattern has to be matched on the
+ * next consecutive line of disassembly, e.g.:
+ *
+ * __jited(" endbr64") # matched on line N
+ * __jited(" nopl (%rax,%rax)") # matched on line N+1
+ *
+ * If match occurs on a wrong line an error is reported.
+ * To override this behaviour use literal "...", e.g.:
+ *
+ * __jited(" endbr64") # matched on line N
+ * __jited("...") # not matched
+ * __jited(" nopl (%rax,%rax)") # matched on any line >= N
+ *
+ * __jited_unpriv Same as __jited but for unprivileged mode.
+ *
*
* __success Expect program load success in privileged mode.
* __success_unpriv Expect program load success in unprivileged mode.
@@ -60,14 +102,20 @@
* __auxiliary Annotated program is not a separate test, but used as auxiliary
* for some other test cases and should always be loaded.
* __auxiliary_unpriv Same, but load program in unprivileged mode.
+ *
+ * __arch_* Specify on which architecture the test case should be tested.
+ * Several __arch_* annotations could be specified at once.
+ * When test case is not run on current arch it is marked as skipped.
*/
-#define __msg(msg) __attribute__((btf_decl_tag("comment:test_expect_msg=" msg)))
-#define __regex(regex) __attribute__((btf_decl_tag("comment:test_expect_regex=" regex)))
+#define __msg(msg) __attribute__((btf_decl_tag("comment:test_expect_msg=" XSTR(__COUNTER__) "=" msg)))
+#define __xlated(msg) __attribute__((btf_decl_tag("comment:test_expect_xlated=" XSTR(__COUNTER__) "=" msg)))
+#define __jited(msg) __attribute__((btf_decl_tag("comment:test_jited=" XSTR(__COUNTER__) "=" msg)))
#define __failure __attribute__((btf_decl_tag("comment:test_expect_failure")))
#define __success __attribute__((btf_decl_tag("comment:test_expect_success")))
#define __description(desc) __attribute__((btf_decl_tag("comment:test_description=" desc)))
-#define __msg_unpriv(msg) __attribute__((btf_decl_tag("comment:test_expect_msg_unpriv=" msg)))
-#define __regex_unpriv(regex) __attribute__((btf_decl_tag("comment:test_expect_regex_unpriv=" regex)))
+#define __msg_unpriv(msg) __attribute__((btf_decl_tag("comment:test_expect_msg_unpriv=" XSTR(__COUNTER__) "=" msg)))
+#define __xlated_unpriv(msg) __attribute__((btf_decl_tag("comment:test_expect_xlated_unpriv=" XSTR(__COUNTER__) "=" msg)))
+#define __jited_unpriv(msg) __attribute__((btf_decl_tag("comment:test_jited=" XSTR(__COUNTER__) "=" msg)))
#define __failure_unpriv __attribute__((btf_decl_tag("comment:test_expect_failure_unpriv")))
#define __success_unpriv __attribute__((btf_decl_tag("comment:test_expect_success_unpriv")))
#define __log_level(lvl) __attribute__((btf_decl_tag("comment:test_log_level="#lvl)))
@@ -77,6 +125,10 @@
#define __auxiliary __attribute__((btf_decl_tag("comment:test_auxiliary")))
#define __auxiliary_unpriv __attribute__((btf_decl_tag("comment:test_auxiliary_unpriv")))
#define __btf_path(path) __attribute__((btf_decl_tag("comment:test_btf_path=" path)))
+#define __arch(arch) __attribute__((btf_decl_tag("comment:test_arch=" arch)))
+#define __arch_x86_64 __arch("X86_64")
+#define __arch_arm64 __arch("ARM64")
+#define __arch_riscv64 __arch("RISCV64")
/* Convenience macro for use with 'asm volatile' blocks */
#define __naked __attribute__((naked))
diff --git a/tools/testing/selftests/bpf/progs/bpf_syscall_macro.c b/tools/testing/selftests/bpf/progs/bpf_syscall_macro.c
index 1a476d8ed354..9e7d9674ce2a 100644
--- a/tools/testing/selftests/bpf/progs/bpf_syscall_macro.c
+++ b/tools/testing/selftests/bpf/progs/bpf_syscall_macro.c
@@ -43,9 +43,7 @@ int BPF_KPROBE(handle_sys_prctl)
/* test for PT_REGS_PARM */
-#if !defined(bpf_target_arm64) && !defined(bpf_target_s390)
bpf_probe_read_kernel(&tmp, sizeof(tmp), &PT_REGS_PARM1_SYSCALL(real_regs));
-#endif
arg1 = tmp;
bpf_probe_read_kernel(&arg2, sizeof(arg2), &PT_REGS_PARM2_SYSCALL(real_regs));
bpf_probe_read_kernel(&arg3, sizeof(arg3), &PT_REGS_PARM3_SYSCALL(real_regs));
diff --git a/tools/testing/selftests/bpf/progs/cg_storage_multi.h b/tools/testing/selftests/bpf/progs/cg_storage_multi.h
index a0778fe7857a..41d59f0ee606 100644
--- a/tools/testing/selftests/bpf/progs/cg_storage_multi.h
+++ b/tools/testing/selftests/bpf/progs/cg_storage_multi.h
@@ -3,8 +3,6 @@
#ifndef __PROGS_CG_STORAGE_MULTI_H
#define __PROGS_CG_STORAGE_MULTI_H
-#include <asm/types.h>
-
struct cgroup_value {
__u32 egress_pkts;
__u32 ingress_pkts;
diff --git a/tools/testing/selftests/bpf/progs/cgroup_ancestor.c b/tools/testing/selftests/bpf/progs/cgroup_ancestor.c
new file mode 100644
index 000000000000..8c2deb4fc493
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/cgroup_ancestor.c
@@ -0,0 +1,40 @@
+// SPDX-License-Identifier: GPL-2.0
+// Copyright (c) 2018 Facebook
+
+#include <vmlinux.h>
+#include <bpf/bpf_helpers.h>
+#include <bpf/bpf_core_read.h>
+#include "bpf_tracing_net.h"
+#define NUM_CGROUP_LEVELS 4
+
+__u64 cgroup_ids[NUM_CGROUP_LEVELS];
+__u16 dport;
+
+static __always_inline void log_nth_level(struct __sk_buff *skb, __u32 level)
+{
+ /* [1] &level passed to external function that may change it, it's
+ * incompatible with loop unroll.
+ */
+ cgroup_ids[level] = bpf_skb_ancestor_cgroup_id(skb, level);
+}
+
+SEC("tc")
+int log_cgroup_id(struct __sk_buff *skb)
+{
+ struct sock *sk = (void *)skb->sk;
+
+ if (!sk)
+ return TC_ACT_OK;
+
+ sk = bpf_core_cast(sk, struct sock);
+ if (sk->sk_protocol == IPPROTO_UDP && sk->sk_dport == dport) {
+ log_nth_level(skb, 0);
+ log_nth_level(skb, 1);
+ log_nth_level(skb, 2);
+ log_nth_level(skb, 3);
+ }
+
+ return TC_ACT_OK;
+}
+
+char _license[] SEC("license") = "GPL";
diff --git a/tools/testing/selftests/bpf/progs/cgroup_storage.c b/tools/testing/selftests/bpf/progs/cgroup_storage.c
new file mode 100644
index 000000000000..db1e4d2d3281
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/cgroup_storage.c
@@ -0,0 +1,24 @@
+// SPDX-License-Identifier: GPL-2.0
+
+#include <linux/bpf.h>
+#include <bpf/bpf_helpers.h>
+
+struct {
+ __uint(type, BPF_MAP_TYPE_CGROUP_STORAGE);
+ __type(key, struct bpf_cgroup_storage_key);
+ __type(value, __u64);
+} cgroup_storage SEC(".maps");
+
+SEC("cgroup_skb/egress")
+int bpf_prog(struct __sk_buff *skb)
+{
+ __u64 *counter;
+
+ counter = bpf_get_local_storage(&cgroup_storage, 0);
+ __sync_fetch_and_add(counter, 1);
+
+ /* Drop one out of every two packets */
+ return (*counter & 1);
+}
+
+char _license[] SEC("license") = "GPL";
diff --git a/tools/testing/selftests/bpf/progs/dev_cgroup.c b/tools/testing/selftests/bpf/progs/dev_cgroup.c
index 79b54a4fa244..c1dfbd2b56fc 100644
--- a/tools/testing/selftests/bpf/progs/dev_cgroup.c
+++ b/tools/testing/selftests/bpf/progs/dev_cgroup.c
@@ -41,14 +41,14 @@ int bpf_prog1(struct bpf_cgroup_dev_ctx *ctx)
bpf_trace_printk(fmt, sizeof(fmt), ctx->major, ctx->minor);
#endif
- /* Allow access to /dev/zero and /dev/random.
+ /* Allow access to /dev/null and /dev/urandom.
* Forbid everything else.
*/
if (ctx->major != 1 || type != BPF_DEVCG_DEV_CHAR)
return 0;
switch (ctx->minor) {
- case 5: /* 1:5 /dev/zero */
+ case 3: /* 1:3 /dev/null */
case 9: /* 1:9 /dev/urandom */
return 1;
}
diff --git a/tools/testing/selftests/bpf/progs/dynptr_fail.c b/tools/testing/selftests/bpf/progs/dynptr_fail.c
index c3bc186af21e..8f36c9de7591 100644
--- a/tools/testing/selftests/bpf/progs/dynptr_fail.c
+++ b/tools/testing/selftests/bpf/progs/dynptr_fail.c
@@ -965,7 +965,7 @@ int dynptr_invalidate_slice_reinit(void *ctx)
* mem_or_null pointers.
*/
SEC("?raw_tp")
-__failure __regex("R[0-9]+ type=scalar expected=percpu_ptr_")
+__failure __msg("R{{[0-9]+}} type=scalar expected=percpu_ptr_")
int dynptr_invalidate_slice_or_null(void *ctx)
{
struct bpf_dynptr ptr;
@@ -983,7 +983,7 @@ int dynptr_invalidate_slice_or_null(void *ctx)
/* Destruction of dynptr should also any slices obtained from it */
SEC("?raw_tp")
-__failure __regex("R[0-9]+ invalid mem access 'scalar'")
+__failure __msg("R{{[0-9]+}} invalid mem access 'scalar'")
int dynptr_invalidate_slice_failure(void *ctx)
{
struct bpf_dynptr ptr1;
@@ -1070,7 +1070,7 @@ int dynptr_read_into_slot(void *ctx)
/* bpf_dynptr_slice()s are read-only and cannot be written to */
SEC("?tc")
-__failure __regex("R[0-9]+ cannot write into rdonly_mem")
+__failure __msg("R{{[0-9]+}} cannot write into rdonly_mem")
int skb_invalid_slice_write(struct __sk_buff *skb)
{
struct bpf_dynptr ptr;
diff --git a/tools/testing/selftests/bpf/progs/epilogue_exit.c b/tools/testing/selftests/bpf/progs/epilogue_exit.c
new file mode 100644
index 000000000000..33d3a57bee90
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/epilogue_exit.c
@@ -0,0 +1,82 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2024 Meta Platforms, Inc. and affiliates. */
+
+#include <vmlinux.h>
+#include <bpf/bpf_tracing.h>
+#include "bpf_misc.h"
+#include "../bpf_testmod/bpf_testmod.h"
+#include "../bpf_testmod/bpf_testmod_kfunc.h"
+
+char _license[] SEC("license") = "GPL";
+
+__success
+/* save __u64 *ctx to stack */
+__xlated("0: *(u64 *)(r10 -8) = r1")
+/* main prog */
+__xlated("1: r1 = *(u64 *)(r1 +0)")
+__xlated("2: r2 = *(u64 *)(r1 +0)")
+__xlated("3: r3 = 0")
+__xlated("4: r4 = 1")
+__xlated("5: if r2 == 0x0 goto pc+10")
+__xlated("6: r0 = 0")
+__xlated("7: *(u64 *)(r1 +0) = r3")
+/* epilogue */
+__xlated("8: r1 = *(u64 *)(r10 -8)")
+__xlated("9: r1 = *(u64 *)(r1 +0)")
+__xlated("10: r6 = *(u64 *)(r1 +0)")
+__xlated("11: r6 += 10000")
+__xlated("12: *(u64 *)(r1 +0) = r6")
+__xlated("13: r0 = r6")
+__xlated("14: r0 *= 2")
+__xlated("15: exit")
+/* 2nd part of the main prog after the first exit */
+__xlated("16: *(u64 *)(r1 +0) = r4")
+__xlated("17: r0 = 1")
+/* Clear the r1 to ensure it does not have
+ * off-by-1 error and ensure it jumps back to the
+ * beginning of epilogue which initializes
+ * the r1 with the ctx ptr.
+ */
+__xlated("18: r1 = 0")
+__xlated("19: gotol pc-12")
+SEC("struct_ops/test_epilogue_exit")
+__naked int test_epilogue_exit(void)
+{
+ asm volatile (
+ "r1 = *(u64 *)(r1 +0);"
+ "r2 = *(u64 *)(r1 +0);"
+ "r3 = 0;"
+ "r4 = 1;"
+ "if r2 == 0 goto +3;"
+ "r0 = 0;"
+ "*(u64 *)(r1 + 0) = r3;"
+ "exit;"
+ "*(u64 *)(r1 + 0) = r4;"
+ "r0 = 1;"
+ "r1 = 0;"
+ "exit;"
+ ::: __clobber_all);
+}
+
+SEC(".struct_ops.link")
+struct bpf_testmod_st_ops epilogue_exit = {
+ .test_epilogue = (void *)test_epilogue_exit,
+};
+
+SEC("syscall")
+__retval(20000)
+int syscall_epilogue_exit0(void *ctx)
+{
+ struct st_ops_args args = { .a = 1 };
+
+ return bpf_kfunc_st_ops_test_epilogue(&args);
+}
+
+SEC("syscall")
+__retval(20002)
+int syscall_epilogue_exit1(void *ctx)
+{
+ struct st_ops_args args = {};
+
+ return bpf_kfunc_st_ops_test_epilogue(&args);
+}
diff --git a/tools/testing/selftests/bpf/progs/epilogue_tailcall.c b/tools/testing/selftests/bpf/progs/epilogue_tailcall.c
new file mode 100644
index 000000000000..7275dd594de0
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/epilogue_tailcall.c
@@ -0,0 +1,58 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2024 Meta Platforms, Inc. and affiliates. */
+
+#include <vmlinux.h>
+#include <bpf/bpf_tracing.h>
+#include "bpf_misc.h"
+#include "../bpf_testmod/bpf_testmod.h"
+#include "../bpf_testmod/bpf_testmod_kfunc.h"
+
+char _license[] SEC("license") = "GPL";
+
+static __noinline __used int subprog(struct st_ops_args *args)
+{
+ args->a += 1;
+ return args->a;
+}
+
+SEC("struct_ops/test_epilogue_subprog")
+int BPF_PROG(test_epilogue_subprog, struct st_ops_args *args)
+{
+ subprog(args);
+ return args->a;
+}
+
+struct {
+ __uint(type, BPF_MAP_TYPE_PROG_ARRAY);
+ __uint(max_entries, 1);
+ __uint(key_size, sizeof(__u32));
+ __uint(value_size, sizeof(__u32));
+ __array(values, void (void));
+} epilogue_map SEC(".maps") = {
+ .values = {
+ [0] = (void *)&test_epilogue_subprog,
+ }
+};
+
+SEC("struct_ops/test_epilogue_tailcall")
+int test_epilogue_tailcall(unsigned long long *ctx)
+{
+ bpf_tail_call(ctx, &epilogue_map, 0);
+ return 0;
+}
+
+SEC(".struct_ops.link")
+struct bpf_testmod_st_ops epilogue_tailcall = {
+ .test_epilogue = (void *)test_epilogue_tailcall,
+};
+
+SEC(".struct_ops.link")
+struct bpf_testmod_st_ops epilogue_subprog = {
+ .test_epilogue = (void *)test_epilogue_subprog,
+};
+
+SEC("syscall")
+int syscall_epilogue_tailcall(struct st_ops_args *args)
+{
+ return bpf_kfunc_st_ops_test_epilogue(args);
+}
diff --git a/tools/testing/selftests/bpf/progs/err.h b/tools/testing/selftests/bpf/progs/err.h
index d66d283d9e59..38529779a236 100644
--- a/tools/testing/selftests/bpf/progs/err.h
+++ b/tools/testing/selftests/bpf/progs/err.h
@@ -5,6 +5,16 @@
#define MAX_ERRNO 4095
#define IS_ERR_VALUE(x) (unsigned long)(void *)(x) >= (unsigned long)-MAX_ERRNO
+#define __STR(x) #x
+
+#define set_if_not_errno_or_zero(x, y) \
+({ \
+ asm volatile ("if %0 s< -4095 goto +1\n" \
+ "if %0 s<= 0 goto +1\n" \
+ "%0 = " __STR(y) "\n" \
+ : "+r"(x)); \
+})
+
static inline int IS_ERR_OR_NULL(const void *ptr)
{
return !ptr || IS_ERR_VALUE((unsigned long)ptr);
diff --git a/tools/testing/selftests/bpf/progs/get_cgroup_id_kern.c b/tools/testing/selftests/bpf/progs/get_cgroup_id_kern.c
index 68587b1de34e..30fd504856c7 100644
--- a/tools/testing/selftests/bpf/progs/get_cgroup_id_kern.c
+++ b/tools/testing/selftests/bpf/progs/get_cgroup_id_kern.c
@@ -4,34 +4,16 @@
#include <linux/bpf.h>
#include <bpf/bpf_helpers.h>
-struct {
- __uint(type, BPF_MAP_TYPE_ARRAY);
- __uint(max_entries, 1);
- __type(key, __u32);
- __type(value, __u64);
-} cg_ids SEC(".maps");
-
-struct {
- __uint(type, BPF_MAP_TYPE_ARRAY);
- __uint(max_entries, 1);
- __type(key, __u32);
- __type(value, __u32);
-} pidmap SEC(".maps");
+__u64 cg_id;
+__u64 expected_pid;
SEC("tracepoint/syscalls/sys_enter_nanosleep")
int trace(void *ctx)
{
__u32 pid = bpf_get_current_pid_tgid();
- __u32 key = 0, *expected_pid;
- __u64 *val;
-
- expected_pid = bpf_map_lookup_elem(&pidmap, &key);
- if (!expected_pid || *expected_pid != pid)
- return 0;
- val = bpf_map_lookup_elem(&cg_ids, &key);
- if (val)
- *val = bpf_get_current_cgroup_id();
+ if (expected_pid == pid)
+ cg_id = bpf_get_current_cgroup_id();
return 0;
}
diff --git a/tools/testing/selftests/bpf/progs/iters_testmod.c b/tools/testing/selftests/bpf/progs/iters_testmod.c
new file mode 100644
index 000000000000..df1d3db60b1b
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/iters_testmod.c
@@ -0,0 +1,125 @@
+// SPDX-License-Identifier: GPL-2.0
+
+#include "vmlinux.h"
+#include "bpf_experimental.h"
+#include <bpf/bpf_helpers.h>
+#include "bpf_misc.h"
+#include "../bpf_testmod/bpf_testmod_kfunc.h"
+
+char _license[] SEC("license") = "GPL";
+
+SEC("raw_tp/sys_enter")
+__success
+int iter_next_trusted(const void *ctx)
+{
+ struct task_struct *cur_task = bpf_get_current_task_btf();
+ struct bpf_iter_task_vma vma_it;
+ struct vm_area_struct *vma_ptr;
+
+ bpf_iter_task_vma_new(&vma_it, cur_task, 0);
+
+ vma_ptr = bpf_iter_task_vma_next(&vma_it);
+ if (vma_ptr == NULL)
+ goto out;
+
+ bpf_kfunc_trusted_vma_test(vma_ptr);
+out:
+ bpf_iter_task_vma_destroy(&vma_it);
+ return 0;
+}
+
+SEC("raw_tp/sys_enter")
+__failure __msg("Possibly NULL pointer passed to trusted arg0")
+int iter_next_trusted_or_null(const void *ctx)
+{
+ struct task_struct *cur_task = bpf_get_current_task_btf();
+ struct bpf_iter_task_vma vma_it;
+ struct vm_area_struct *vma_ptr;
+
+ bpf_iter_task_vma_new(&vma_it, cur_task, 0);
+
+ vma_ptr = bpf_iter_task_vma_next(&vma_it);
+
+ bpf_kfunc_trusted_vma_test(vma_ptr);
+
+ bpf_iter_task_vma_destroy(&vma_it);
+ return 0;
+}
+
+SEC("raw_tp/sys_enter")
+__success
+int iter_next_rcu(const void *ctx)
+{
+ struct task_struct *cur_task = bpf_get_current_task_btf();
+ struct bpf_iter_task task_it;
+ struct task_struct *task_ptr;
+
+ bpf_iter_task_new(&task_it, cur_task, 0);
+
+ task_ptr = bpf_iter_task_next(&task_it);
+ if (task_ptr == NULL)
+ goto out;
+
+ bpf_kfunc_rcu_task_test(task_ptr);
+out:
+ bpf_iter_task_destroy(&task_it);
+ return 0;
+}
+
+SEC("raw_tp/sys_enter")
+__failure __msg("Possibly NULL pointer passed to trusted arg0")
+int iter_next_rcu_or_null(const void *ctx)
+{
+ struct task_struct *cur_task = bpf_get_current_task_btf();
+ struct bpf_iter_task task_it;
+ struct task_struct *task_ptr;
+
+ bpf_iter_task_new(&task_it, cur_task, 0);
+
+ task_ptr = bpf_iter_task_next(&task_it);
+
+ bpf_kfunc_rcu_task_test(task_ptr);
+
+ bpf_iter_task_destroy(&task_it);
+ return 0;
+}
+
+SEC("raw_tp/sys_enter")
+__failure __msg("R1 must be referenced or trusted")
+int iter_next_rcu_not_trusted(const void *ctx)
+{
+ struct task_struct *cur_task = bpf_get_current_task_btf();
+ struct bpf_iter_task task_it;
+ struct task_struct *task_ptr;
+
+ bpf_iter_task_new(&task_it, cur_task, 0);
+
+ task_ptr = bpf_iter_task_next(&task_it);
+ if (task_ptr == NULL)
+ goto out;
+
+ bpf_kfunc_trusted_task_test(task_ptr);
+out:
+ bpf_iter_task_destroy(&task_it);
+ return 0;
+}
+
+SEC("raw_tp/sys_enter")
+__failure __msg("R1 cannot write into rdonly_mem")
+/* Message should not be 'R1 cannot write into rdonly_trusted_mem' */
+int iter_next_ptr_mem_not_trusted(const void *ctx)
+{
+ struct bpf_iter_num num_it;
+ int *num_ptr;
+
+ bpf_iter_num_new(&num_it, 0, 10);
+
+ num_ptr = bpf_iter_num_next(&num_it);
+ if (num_ptr == NULL)
+ goto out;
+
+ bpf_kfunc_trusted_num_test(num_ptr);
+out:
+ bpf_iter_num_destroy(&num_it);
+ return 0;
+}
diff --git a/tools/testing/selftests/bpf/progs/iters_testmod_seq.c b/tools/testing/selftests/bpf/progs/iters_testmod_seq.c
index 3873fb6c292a..4a176e6aede8 100644
--- a/tools/testing/selftests/bpf/progs/iters_testmod_seq.c
+++ b/tools/testing/selftests/bpf/progs/iters_testmod_seq.c
@@ -12,6 +12,7 @@ struct bpf_iter_testmod_seq {
extern int bpf_iter_testmod_seq_new(struct bpf_iter_testmod_seq *it, s64 value, int cnt) __ksym;
extern s64 *bpf_iter_testmod_seq_next(struct bpf_iter_testmod_seq *it) __ksym;
+extern s64 bpf_iter_testmod_seq_value(int blah, struct bpf_iter_testmod_seq *it) __ksym;
extern void bpf_iter_testmod_seq_destroy(struct bpf_iter_testmod_seq *it) __ksym;
const volatile __s64 exp_empty = 0 + 1;
@@ -76,4 +77,53 @@ int testmod_seq_truncated(const void *ctx)
return 0;
}
+SEC("?raw_tp")
+__failure
+__msg("expected an initialized iter_testmod_seq as arg #2")
+int testmod_seq_getter_before_bad(const void *ctx)
+{
+ struct bpf_iter_testmod_seq it;
+
+ return bpf_iter_testmod_seq_value(0, &it);
+}
+
+SEC("?raw_tp")
+__failure
+__msg("expected an initialized iter_testmod_seq as arg #2")
+int testmod_seq_getter_after_bad(const void *ctx)
+{
+ struct bpf_iter_testmod_seq it;
+ s64 sum = 0, *v;
+
+ bpf_iter_testmod_seq_new(&it, 100, 100);
+
+ while ((v = bpf_iter_testmod_seq_next(&it))) {
+ sum += *v;
+ }
+
+ bpf_iter_testmod_seq_destroy(&it);
+
+ return sum + bpf_iter_testmod_seq_value(0, &it);
+}
+
+SEC("?socket")
+__success __retval(1000000)
+int testmod_seq_getter_good(const void *ctx)
+{
+ struct bpf_iter_testmod_seq it;
+ s64 sum = 0, *v;
+
+ bpf_iter_testmod_seq_new(&it, 100, 100);
+
+ while ((v = bpf_iter_testmod_seq_next(&it))) {
+ sum += *v;
+ }
+
+ sum *= bpf_iter_testmod_seq_value(0, &it);
+
+ bpf_iter_testmod_seq_destroy(&it);
+
+ return sum;
+}
+
char _license[] SEC("license") = "GPL";
diff --git a/tools/testing/selftests/bpf/progs/kfunc_call_fail.c b/tools/testing/selftests/bpf/progs/kfunc_call_fail.c
index 4b0b7b79cdfb..08fae306539c 100644
--- a/tools/testing/selftests/bpf/progs/kfunc_call_fail.c
+++ b/tools/testing/selftests/bpf/progs/kfunc_call_fail.c
@@ -150,4 +150,11 @@ int kfunc_call_test_mem_acquire_fail(struct __sk_buff *skb)
return ret;
}
+SEC("?tc")
+int kfunc_call_test_pointer_arg_type_mismatch(struct __sk_buff *skb)
+{
+ bpf_kfunc_call_test_pass_ctx((void *)10);
+ return 0;
+}
+
char _license[] SEC("license") = "GPL";
diff --git a/tools/testing/selftests/bpf/progs/local_kptr_stash.c b/tools/testing/selftests/bpf/progs/local_kptr_stash.c
index 75043ffc5dad..b092a72b2c9d 100644
--- a/tools/testing/selftests/bpf/progs/local_kptr_stash.c
+++ b/tools/testing/selftests/bpf/progs/local_kptr_stash.c
@@ -8,9 +8,12 @@
#include "../bpf_experimental.h"
#include "../bpf_testmod/bpf_testmod_kfunc.h"
+struct plain_local;
+
struct node_data {
long key;
long data;
+ struct plain_local __kptr * stashed_in_local_kptr;
struct bpf_rb_node node;
};
@@ -85,6 +88,7 @@ static bool less(struct bpf_rb_node *a, const struct bpf_rb_node *b)
static int create_and_stash(int idx, int val)
{
+ struct plain_local *inner_local_kptr;
struct map_value *mapval;
struct node_data *res;
@@ -92,11 +96,25 @@ static int create_and_stash(int idx, int val)
if (!mapval)
return 1;
+ inner_local_kptr = bpf_obj_new(typeof(*inner_local_kptr));
+ if (!inner_local_kptr)
+ return 2;
+
res = bpf_obj_new(typeof(*res));
- if (!res)
- return 1;
+ if (!res) {
+ bpf_obj_drop(inner_local_kptr);
+ return 3;
+ }
res->key = val;
+ inner_local_kptr = bpf_kptr_xchg(&res->stashed_in_local_kptr, inner_local_kptr);
+ if (inner_local_kptr) {
+ /* Should never happen, we just obj_new'd res */
+ bpf_obj_drop(inner_local_kptr);
+ bpf_obj_drop(res);
+ return 4;
+ }
+
res = bpf_kptr_xchg(&mapval->node, res);
if (res)
bpf_obj_drop(res);
@@ -169,6 +187,7 @@ long stash_local_with_root(void *ctx)
SEC("tc")
long unstash_rb_node(void *ctx)
{
+ struct plain_local *inner_local_kptr = NULL;
struct map_value *mapval;
struct node_data *res;
long retval;
@@ -180,6 +199,13 @@ long unstash_rb_node(void *ctx)
res = bpf_kptr_xchg(&mapval->node, NULL);
if (res) {
+ inner_local_kptr = bpf_kptr_xchg(&res->stashed_in_local_kptr, inner_local_kptr);
+ if (!inner_local_kptr) {
+ bpf_obj_drop(res);
+ return 1;
+ }
+ bpf_obj_drop(inner_local_kptr);
+
retval = res->key;
bpf_obj_drop(res);
return retval;
diff --git a/tools/testing/selftests/bpf/progs/lsm_tailcall.c b/tools/testing/selftests/bpf/progs/lsm_tailcall.c
new file mode 100644
index 000000000000..49c075ce2d4c
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/lsm_tailcall.c
@@ -0,0 +1,34 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2024 Huawei Technologies Co., Ltd */
+
+#include "vmlinux.h"
+#include <errno.h>
+#include <bpf/bpf_helpers.h>
+
+char _license[] SEC("license") = "GPL";
+
+struct {
+ __uint(type, BPF_MAP_TYPE_PROG_ARRAY);
+ __uint(max_entries, 1);
+ __uint(key_size, sizeof(__u32));
+ __uint(value_size, sizeof(__u32));
+} jmp_table SEC(".maps");
+
+SEC("lsm/file_permission")
+int lsm_file_permission_prog(void *ctx)
+{
+ return 0;
+}
+
+SEC("lsm/file_alloc_security")
+int lsm_file_alloc_security_prog(void *ctx)
+{
+ return 0;
+}
+
+SEC("lsm/file_alloc_security")
+int lsm_file_alloc_security_entry(void *ctx)
+{
+ bpf_tail_call_static(ctx, &jmp_table, 0);
+ return 0;
+}
diff --git a/tools/testing/selftests/bpf/progs/mmap_inner_array.c b/tools/testing/selftests/bpf/progs/mmap_inner_array.c
new file mode 100644
index 000000000000..90aacbc2938a
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/mmap_inner_array.c
@@ -0,0 +1,57 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2024 Meta Platforms, Inc. and affiliates. */
+
+#include "vmlinux.h"
+#include <bpf/bpf_helpers.h>
+
+#include "bpf_misc.h"
+
+char _license[] SEC("license") = "GPL";
+
+struct inner_array_type {
+ __uint(type, BPF_MAP_TYPE_ARRAY);
+ __uint(map_flags, BPF_F_MMAPABLE);
+ __type(key, __u32);
+ __type(value, __u64);
+ __uint(max_entries, 1);
+} inner_array SEC(".maps");
+
+struct {
+ __uint(type, BPF_MAP_TYPE_HASH_OF_MAPS);
+ __uint(key_size, 4);
+ __uint(value_size, 4);
+ __uint(max_entries, 1);
+ __array(values, struct inner_array_type);
+} outer_map SEC(".maps");
+
+int pid = 0;
+__u64 match_value = 0x13572468;
+bool done = false;
+bool pid_match = false;
+bool outer_map_match = false;
+
+SEC("fentry/" SYS_PREFIX "sys_nanosleep")
+int add_to_list_in_inner_array(void *ctx)
+{
+ __u32 curr_pid, zero = 0;
+ struct bpf_map *map;
+ __u64 *value;
+
+ curr_pid = (u32)bpf_get_current_pid_tgid();
+ if (done || curr_pid != pid)
+ return 0;
+
+ pid_match = true;
+ map = bpf_map_lookup_elem(&outer_map, &curr_pid);
+ if (!map)
+ return 0;
+
+ outer_map_match = true;
+ value = bpf_map_lookup_elem(map, &zero);
+ if (!value)
+ return 0;
+
+ *value = match_value;
+ done = true;
+ return 0;
+}
diff --git a/tools/testing/selftests/bpf/progs/nested_acquire.c b/tools/testing/selftests/bpf/progs/nested_acquire.c
new file mode 100644
index 000000000000..8e521a21d995
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/nested_acquire.c
@@ -0,0 +1,33 @@
+// SPDX-License-Identifier: GPL-2.0
+
+#include <vmlinux.h>
+#include <bpf/bpf_tracing.h>
+#include <bpf/bpf_helpers.h>
+#include "bpf_misc.h"
+#include "../bpf_testmod/bpf_testmod_kfunc.h"
+
+char _license[] SEC("license") = "GPL";
+
+SEC("tp_btf/tcp_probe")
+__success
+int BPF_PROG(test_nested_acquire_nonzero, struct sock *sk, struct sk_buff *skb)
+{
+ struct sk_buff *ptr;
+
+ ptr = bpf_kfunc_nested_acquire_nonzero_offset_test(&sk->sk_write_queue);
+
+ bpf_kfunc_nested_release_test(ptr);
+ return 0;
+}
+
+SEC("tp_btf/tcp_probe")
+__success
+int BPF_PROG(test_nested_acquire_zero, struct sock *sk, struct sk_buff *skb)
+{
+ struct sk_buff *ptr;
+
+ ptr = bpf_kfunc_nested_acquire_zero_offset_test(&sk->__sk_common);
+
+ bpf_kfunc_nested_release_test(ptr);
+ return 0;
+}
diff --git a/tools/testing/selftests/bpf/progs/pro_epilogue.c b/tools/testing/selftests/bpf/progs/pro_epilogue.c
new file mode 100644
index 000000000000..44bc3f06b4b6
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/pro_epilogue.c
@@ -0,0 +1,154 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2024 Meta Platforms, Inc. and affiliates. */
+
+#include <vmlinux.h>
+#include <bpf/bpf_tracing.h>
+#include "bpf_misc.h"
+#include "../bpf_testmod/bpf_testmod.h"
+#include "../bpf_testmod/bpf_testmod_kfunc.h"
+
+char _license[] SEC("license") = "GPL";
+
+void __kfunc_btf_root(void)
+{
+ bpf_kfunc_st_ops_inc10(NULL);
+}
+
+static __noinline __used int subprog(struct st_ops_args *args)
+{
+ args->a += 1;
+ return args->a;
+}
+
+__success
+/* prologue */
+__xlated("0: r6 = *(u64 *)(r1 +0)")
+__xlated("1: r7 = *(u64 *)(r6 +0)")
+__xlated("2: r7 += 1000")
+__xlated("3: *(u64 *)(r6 +0) = r7")
+/* main prog */
+__xlated("4: r1 = *(u64 *)(r1 +0)")
+__xlated("5: r6 = r1")
+__xlated("6: call kernel-function")
+__xlated("7: r1 = r6")
+__xlated("8: call pc+1")
+__xlated("9: exit")
+SEC("struct_ops/test_prologue")
+__naked int test_prologue(void)
+{
+ asm volatile (
+ "r1 = *(u64 *)(r1 +0);"
+ "r6 = r1;"
+ "call %[bpf_kfunc_st_ops_inc10];"
+ "r1 = r6;"
+ "call subprog;"
+ "exit;"
+ :
+ : __imm(bpf_kfunc_st_ops_inc10)
+ : __clobber_all);
+}
+
+__success
+/* save __u64 *ctx to stack */
+__xlated("0: *(u64 *)(r10 -8) = r1")
+/* main prog */
+__xlated("1: r1 = *(u64 *)(r1 +0)")
+__xlated("2: r6 = r1")
+__xlated("3: call kernel-function")
+__xlated("4: r1 = r6")
+__xlated("5: call pc+")
+/* epilogue */
+__xlated("6: r1 = *(u64 *)(r10 -8)")
+__xlated("7: r1 = *(u64 *)(r1 +0)")
+__xlated("8: r6 = *(u64 *)(r1 +0)")
+__xlated("9: r6 += 10000")
+__xlated("10: *(u64 *)(r1 +0) = r6")
+__xlated("11: r0 = r6")
+__xlated("12: r0 *= 2")
+__xlated("13: exit")
+SEC("struct_ops/test_epilogue")
+__naked int test_epilogue(void)
+{
+ asm volatile (
+ "r1 = *(u64 *)(r1 +0);"
+ "r6 = r1;"
+ "call %[bpf_kfunc_st_ops_inc10];"
+ "r1 = r6;"
+ "call subprog;"
+ "exit;"
+ :
+ : __imm(bpf_kfunc_st_ops_inc10)
+ : __clobber_all);
+}
+
+__success
+/* prologue */
+__xlated("0: r6 = *(u64 *)(r1 +0)")
+__xlated("1: r7 = *(u64 *)(r6 +0)")
+__xlated("2: r7 += 1000")
+__xlated("3: *(u64 *)(r6 +0) = r7")
+/* save __u64 *ctx to stack */
+__xlated("4: *(u64 *)(r10 -8) = r1")
+/* main prog */
+__xlated("5: r1 = *(u64 *)(r1 +0)")
+__xlated("6: r6 = r1")
+__xlated("7: call kernel-function")
+__xlated("8: r1 = r6")
+__xlated("9: call pc+")
+/* epilogue */
+__xlated("10: r1 = *(u64 *)(r10 -8)")
+__xlated("11: r1 = *(u64 *)(r1 +0)")
+__xlated("12: r6 = *(u64 *)(r1 +0)")
+__xlated("13: r6 += 10000")
+__xlated("14: *(u64 *)(r1 +0) = r6")
+__xlated("15: r0 = r6")
+__xlated("16: r0 *= 2")
+__xlated("17: exit")
+SEC("struct_ops/test_pro_epilogue")
+__naked int test_pro_epilogue(void)
+{
+ asm volatile (
+ "r1 = *(u64 *)(r1 +0);"
+ "r6 = r1;"
+ "call %[bpf_kfunc_st_ops_inc10];"
+ "r1 = r6;"
+ "call subprog;"
+ "exit;"
+ :
+ : __imm(bpf_kfunc_st_ops_inc10)
+ : __clobber_all);
+}
+
+SEC("syscall")
+__retval(1011) /* PROLOGUE_A [1000] + KFUNC_INC10 + SUBPROG_A [1] */
+int syscall_prologue(void *ctx)
+{
+ struct st_ops_args args = {};
+
+ return bpf_kfunc_st_ops_test_prologue(&args);
+}
+
+SEC("syscall")
+__retval(20022) /* (KFUNC_INC10 + SUBPROG_A [1] + EPILOGUE_A [10000]) * 2 */
+int syscall_epilogue(void *ctx)
+{
+ struct st_ops_args args = {};
+
+ return bpf_kfunc_st_ops_test_epilogue(&args);
+}
+
+SEC("syscall")
+__retval(22022) /* (PROLOGUE_A [1000] + KFUNC_INC10 + SUBPROG_A [1] + EPILOGUE_A [10000]) * 2 */
+int syscall_pro_epilogue(void *ctx)
+{
+ struct st_ops_args args = {};
+
+ return bpf_kfunc_st_ops_test_pro_epilogue(&args);
+}
+
+SEC(".struct_ops.link")
+struct bpf_testmod_st_ops pro_epilogue = {
+ .test_prologue = (void *)test_prologue,
+ .test_epilogue = (void *)test_epilogue,
+ .test_pro_epilogue = (void *)test_pro_epilogue,
+};
diff --git a/tools/testing/selftests/bpf/progs/pro_epilogue_goto_start.c b/tools/testing/selftests/bpf/progs/pro_epilogue_goto_start.c
new file mode 100644
index 000000000000..3529e53be355
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/pro_epilogue_goto_start.c
@@ -0,0 +1,149 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2024 Meta Platforms, Inc. and affiliates. */
+
+#include <vmlinux.h>
+#include <bpf/bpf_tracing.h>
+#include "bpf_misc.h"
+#include "../bpf_testmod/bpf_testmod.h"
+#include "../bpf_testmod/bpf_testmod_kfunc.h"
+
+char _license[] SEC("license") = "GPL";
+
+__success
+/* prologue */
+__xlated("0: r6 = *(u64 *)(r1 +0)")
+__xlated("1: r7 = *(u64 *)(r6 +0)")
+__xlated("2: r7 += 1000")
+__xlated("3: *(u64 *)(r6 +0) = r7")
+/* main prog */
+__xlated("4: if r1 == 0x0 goto pc+5")
+__xlated("5: if r1 == 0x1 goto pc+2")
+__xlated("6: r1 = 1")
+__xlated("7: goto pc-3")
+__xlated("8: r1 = 0")
+__xlated("9: goto pc-6")
+__xlated("10: r0 = 0")
+__xlated("11: exit")
+SEC("struct_ops/test_prologue_goto_start")
+__naked int test_prologue_goto_start(void)
+{
+ asm volatile (
+ "if r1 == 0 goto +5;"
+ "if r1 == 1 goto +2;"
+ "r1 = 1;"
+ "goto -3;"
+ "r1 = 0;"
+ "goto -6;"
+ "r0 = 0;"
+ "exit;"
+ ::: __clobber_all);
+}
+
+__success
+/* save __u64 *ctx to stack */
+__xlated("0: *(u64 *)(r10 -8) = r1")
+/* main prog */
+__xlated("1: if r1 == 0x0 goto pc+5")
+__xlated("2: if r1 == 0x1 goto pc+2")
+__xlated("3: r1 = 1")
+__xlated("4: goto pc-3")
+__xlated("5: r1 = 0")
+__xlated("6: goto pc-6")
+__xlated("7: r0 = 0")
+/* epilogue */
+__xlated("8: r1 = *(u64 *)(r10 -8)")
+__xlated("9: r1 = *(u64 *)(r1 +0)")
+__xlated("10: r6 = *(u64 *)(r1 +0)")
+__xlated("11: r6 += 10000")
+__xlated("12: *(u64 *)(r1 +0) = r6")
+__xlated("13: r0 = r6")
+__xlated("14: r0 *= 2")
+__xlated("15: exit")
+SEC("struct_ops/test_epilogue_goto_start")
+__naked int test_epilogue_goto_start(void)
+{
+ asm volatile (
+ "if r1 == 0 goto +5;"
+ "if r1 == 1 goto +2;"
+ "r1 = 1;"
+ "goto -3;"
+ "r1 = 0;"
+ "goto -6;"
+ "r0 = 0;"
+ "exit;"
+ ::: __clobber_all);
+}
+
+__success
+/* prologue */
+__xlated("0: r6 = *(u64 *)(r1 +0)")
+__xlated("1: r7 = *(u64 *)(r6 +0)")
+__xlated("2: r7 += 1000")
+__xlated("3: *(u64 *)(r6 +0) = r7")
+/* save __u64 *ctx to stack */
+__xlated("4: *(u64 *)(r10 -8) = r1")
+/* main prog */
+__xlated("5: if r1 == 0x0 goto pc+5")
+__xlated("6: if r1 == 0x1 goto pc+2")
+__xlated("7: r1 = 1")
+__xlated("8: goto pc-3")
+__xlated("9: r1 = 0")
+__xlated("10: goto pc-6")
+__xlated("11: r0 = 0")
+/* epilogue */
+__xlated("12: r1 = *(u64 *)(r10 -8)")
+__xlated("13: r1 = *(u64 *)(r1 +0)")
+__xlated("14: r6 = *(u64 *)(r1 +0)")
+__xlated("15: r6 += 10000")
+__xlated("16: *(u64 *)(r1 +0) = r6")
+__xlated("17: r0 = r6")
+__xlated("18: r0 *= 2")
+__xlated("19: exit")
+SEC("struct_ops/test_pro_epilogue_goto_start")
+__naked int test_pro_epilogue_goto_start(void)
+{
+ asm volatile (
+ "if r1 == 0 goto +5;"
+ "if r1 == 1 goto +2;"
+ "r1 = 1;"
+ "goto -3;"
+ "r1 = 0;"
+ "goto -6;"
+ "r0 = 0;"
+ "exit;"
+ ::: __clobber_all);
+}
+
+SEC(".struct_ops.link")
+struct bpf_testmod_st_ops epilogue_goto_start = {
+ .test_prologue = (void *)test_prologue_goto_start,
+ .test_epilogue = (void *)test_epilogue_goto_start,
+ .test_pro_epilogue = (void *)test_pro_epilogue_goto_start,
+};
+
+SEC("syscall")
+__retval(0)
+int syscall_prologue_goto_start(void *ctx)
+{
+ struct st_ops_args args = {};
+
+ return bpf_kfunc_st_ops_test_prologue(&args);
+}
+
+SEC("syscall")
+__retval(20000) /* (EPILOGUE_A [10000]) * 2 */
+int syscall_epilogue_goto_start(void *ctx)
+{
+ struct st_ops_args args = {};
+
+ return bpf_kfunc_st_ops_test_epilogue(&args);
+}
+
+SEC("syscall")
+__retval(22000) /* (PROLOGUE_A [1000] + EPILOGUE_A [10000]) * 2 */
+int syscall_pro_epilogue_goto_start(void *ctx)
+{
+ struct st_ops_args args = {};
+
+ return bpf_kfunc_st_ops_test_pro_epilogue(&args);
+}
diff --git a/tools/testing/selftests/bpf/progs/rbtree_fail.c b/tools/testing/selftests/bpf/progs/rbtree_fail.c
index b722a1e1ddef..dbd5eee8e25e 100644
--- a/tools/testing/selftests/bpf/progs/rbtree_fail.c
+++ b/tools/testing/selftests/bpf/progs/rbtree_fail.c
@@ -105,7 +105,7 @@ long rbtree_api_remove_unadded_node(void *ctx)
}
SEC("?tc")
-__failure __regex("Unreleased reference id=3 alloc_insn=[0-9]+")
+__failure __msg("Unreleased reference id=3 alloc_insn={{[0-9]+}}")
long rbtree_api_remove_no_drop(void *ctx)
{
struct bpf_rb_node *res;
diff --git a/tools/testing/selftests/bpf/progs/read_vsyscall.c b/tools/testing/selftests/bpf/progs/read_vsyscall.c
index 986f96687ae1..39ebef430059 100644
--- a/tools/testing/selftests/bpf/progs/read_vsyscall.c
+++ b/tools/testing/selftests/bpf/progs/read_vsyscall.c
@@ -1,5 +1,6 @@
// SPDX-License-Identifier: GPL-2.0
/* Copyright (C) 2024. Huawei Technologies Co., Ltd */
+#include "vmlinux.h"
#include <linux/types.h>
#include <bpf/bpf_helpers.h>
@@ -7,10 +8,15 @@
int target_pid = 0;
void *user_ptr = 0;
-int read_ret[8];
+int read_ret[9];
char _license[] SEC("license") = "GPL";
+/*
+ * This is the only kfunc, the others are helpers
+ */
+int bpf_copy_from_user_str(void *dst, u32, const void *, u64) __weak __ksym;
+
SEC("fentry/" SYS_PREFIX "sys_nanosleep")
int do_probe_read(void *ctx)
{
@@ -40,6 +46,7 @@ int do_copy_from_user(void *ctx)
read_ret[6] = bpf_copy_from_user(buf, sizeof(buf), user_ptr);
read_ret[7] = bpf_copy_from_user_task(buf, sizeof(buf), user_ptr,
bpf_get_current_task_btf(), 0);
+ read_ret[8] = bpf_copy_from_user_str((char *)buf, sizeof(buf), user_ptr, 0);
return 0;
}
diff --git a/tools/testing/selftests/bpf/progs/refcounted_kptr_fail.c b/tools/testing/selftests/bpf/progs/refcounted_kptr_fail.c
index f8d4b7cfcd68..836c8ab7b908 100644
--- a/tools/testing/selftests/bpf/progs/refcounted_kptr_fail.c
+++ b/tools/testing/selftests/bpf/progs/refcounted_kptr_fail.c
@@ -32,7 +32,7 @@ static bool less(struct bpf_rb_node *a, const struct bpf_rb_node *b)
}
SEC("?tc")
-__failure __regex("Unreleased reference id=4 alloc_insn=[0-9]+")
+__failure __msg("Unreleased reference id=4 alloc_insn={{[0-9]+}}")
long rbtree_refcounted_node_ref_escapes(void *ctx)
{
struct node_acquire *n, *m;
@@ -73,7 +73,7 @@ long refcount_acquire_maybe_null(void *ctx)
}
SEC("?tc")
-__failure __regex("Unreleased reference id=3 alloc_insn=[0-9]+")
+__failure __msg("Unreleased reference id=3 alloc_insn={{[0-9]+}}")
long rbtree_refcounted_node_ref_escapes_owning_input(void *ctx)
{
struct node_acquire *n, *m;
diff --git a/tools/testing/selftests/bpf/progs/strobemeta.h b/tools/testing/selftests/bpf/progs/strobemeta.h
index f74459eead26..a5c74d31a244 100644
--- a/tools/testing/selftests/bpf/progs/strobemeta.h
+++ b/tools/testing/selftests/bpf/progs/strobemeta.h
@@ -373,7 +373,7 @@ static __always_inline uint64_t read_str_var(struct strobemeta_cfg *cfg,
len = bpf_probe_read_user_str(&data->payload[off], STROBE_MAX_STR_LEN, value->ptr);
/*
* if bpf_probe_read_user_str returns error (<0), due to casting to
- * unsinged int, it will become big number, so next check is
+ * unsigned int, it will become big number, so next check is
* sufficient to check for errors AND prove to BPF verifier, that
* bpf_probe_read_user_str won't return anything bigger than
* STROBE_MAX_STR_LEN
@@ -557,7 +557,7 @@ static void *read_strobe_meta(struct task_struct *task,
return NULL;
payload_off = ctx.payload_off;
- /* this should not really happen, here only to satisfy verifer */
+ /* this should not really happen, here only to satisfy verifier */
if (payload_off > sizeof(data->payload))
payload_off = sizeof(data->payload);
#else
diff --git a/tools/testing/selftests/bpf/progs/syscall.c b/tools/testing/selftests/bpf/progs/syscall.c
index 3d3cafdebe72..0f4dfb770c32 100644
--- a/tools/testing/selftests/bpf/progs/syscall.c
+++ b/tools/testing/selftests/bpf/progs/syscall.c
@@ -8,6 +8,7 @@
#include <linux/btf.h>
#include <string.h>
#include <errno.h>
+#include "bpf_misc.h"
char _license[] SEC("license") = "GPL";
@@ -119,7 +120,7 @@ int load_prog(struct args *ctx)
static __u64 value = 34;
static union bpf_attr prog_load_attr = {
.prog_type = BPF_PROG_TYPE_XDP,
- .insn_cnt = sizeof(insns) / sizeof(insns[0]),
+ .insn_cnt = ARRAY_SIZE(insns),
};
int ret;
diff --git a/tools/testing/selftests/bpf/progs/tailcall_bpf2bpf_hierarchy1.c b/tools/testing/selftests/bpf/progs/tailcall_bpf2bpf_hierarchy1.c
new file mode 100644
index 000000000000..327ca395e860
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/tailcall_bpf2bpf_hierarchy1.c
@@ -0,0 +1,34 @@
+// SPDX-License-Identifier: GPL-2.0
+#include <linux/bpf.h>
+#include <bpf/bpf_helpers.h>
+#include "bpf_legacy.h"
+
+struct {
+ __uint(type, BPF_MAP_TYPE_PROG_ARRAY);
+ __uint(max_entries, 1);
+ __uint(key_size, sizeof(__u32));
+ __uint(value_size, sizeof(__u32));
+} jmp_table SEC(".maps");
+
+int count = 0;
+
+static __noinline
+int subprog_tail(struct __sk_buff *skb)
+{
+ bpf_tail_call_static(skb, &jmp_table, 0);
+ return 0;
+}
+
+SEC("tc")
+int entry(struct __sk_buff *skb)
+{
+ int ret = 1;
+
+ count++;
+ subprog_tail(skb);
+ subprog_tail(skb);
+
+ return ret;
+}
+
+char __license[] SEC("license") = "GPL";
diff --git a/tools/testing/selftests/bpf/progs/tailcall_bpf2bpf_hierarchy2.c b/tools/testing/selftests/bpf/progs/tailcall_bpf2bpf_hierarchy2.c
new file mode 100644
index 000000000000..72fd0d577506
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/tailcall_bpf2bpf_hierarchy2.c
@@ -0,0 +1,70 @@
+// SPDX-License-Identifier: GPL-2.0
+#include <linux/bpf.h>
+#include <bpf/bpf_helpers.h>
+#include "bpf_misc.h"
+
+int classifier_0(struct __sk_buff *skb);
+int classifier_1(struct __sk_buff *skb);
+
+struct {
+ __uint(type, BPF_MAP_TYPE_PROG_ARRAY);
+ __uint(max_entries, 2);
+ __uint(key_size, sizeof(__u32));
+ __array(values, void (void));
+} jmp_table SEC(".maps") = {
+ .values = {
+ [0] = (void *) &classifier_0,
+ [1] = (void *) &classifier_1,
+ },
+};
+
+int count0 = 0;
+int count1 = 0;
+
+static __noinline
+int subprog_tail0(struct __sk_buff *skb)
+{
+ bpf_tail_call_static(skb, &jmp_table, 0);
+ return 0;
+}
+
+__auxiliary
+SEC("tc")
+int classifier_0(struct __sk_buff *skb)
+{
+ count0++;
+ subprog_tail0(skb);
+ return 0;
+}
+
+static __noinline
+int subprog_tail1(struct __sk_buff *skb)
+{
+ bpf_tail_call_static(skb, &jmp_table, 1);
+ return 0;
+}
+
+__auxiliary
+SEC("tc")
+int classifier_1(struct __sk_buff *skb)
+{
+ count1++;
+ subprog_tail1(skb);
+ return 0;
+}
+
+__success
+__retval(33)
+SEC("tc")
+int tailcall_bpf2bpf_hierarchy_2(struct __sk_buff *skb)
+{
+ int ret = 0;
+
+ subprog_tail0(skb);
+ subprog_tail1(skb);
+
+ __sink(ret);
+ return (count1 << 16) | count0;
+}
+
+char __license[] SEC("license") = "GPL";
diff --git a/tools/testing/selftests/bpf/progs/tailcall_bpf2bpf_hierarchy3.c b/tools/testing/selftests/bpf/progs/tailcall_bpf2bpf_hierarchy3.c
new file mode 100644
index 000000000000..a7fb91cb05b7
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/tailcall_bpf2bpf_hierarchy3.c
@@ -0,0 +1,62 @@
+// SPDX-License-Identifier: GPL-2.0
+#include <linux/bpf.h>
+#include <bpf/bpf_helpers.h>
+#include "bpf_misc.h"
+
+int classifier_0(struct __sk_buff *skb);
+
+struct {
+ __uint(type, BPF_MAP_TYPE_PROG_ARRAY);
+ __uint(max_entries, 1);
+ __uint(key_size, sizeof(__u32));
+ __array(values, void (void));
+} jmp_table0 SEC(".maps") = {
+ .values = {
+ [0] = (void *) &classifier_0,
+ },
+};
+
+struct {
+ __uint(type, BPF_MAP_TYPE_PROG_ARRAY);
+ __uint(max_entries, 1);
+ __uint(key_size, sizeof(__u32));
+ __array(values, void (void));
+} jmp_table1 SEC(".maps") = {
+ .values = {
+ [0] = (void *) &classifier_0,
+ },
+};
+
+int count = 0;
+
+static __noinline
+int subprog_tail(struct __sk_buff *skb, void *jmp_table)
+{
+ bpf_tail_call_static(skb, jmp_table, 0);
+ return 0;
+}
+
+__auxiliary
+SEC("tc")
+int classifier_0(struct __sk_buff *skb)
+{
+ count++;
+ subprog_tail(skb, &jmp_table0);
+ subprog_tail(skb, &jmp_table1);
+ return count;
+}
+
+__success
+__retval(33)
+SEC("tc")
+int tailcall_bpf2bpf_hierarchy_3(struct __sk_buff *skb)
+{
+ int ret = 0;
+
+ bpf_tail_call_static(skb, &jmp_table0, 0);
+
+ __sink(ret);
+ return ret;
+}
+
+char __license[] SEC("license") = "GPL";
diff --git a/tools/testing/selftests/bpf/progs/tailcall_bpf2bpf_hierarchy_fentry.c b/tools/testing/selftests/bpf/progs/tailcall_bpf2bpf_hierarchy_fentry.c
new file mode 100644
index 000000000000..c87f9ca982d3
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/tailcall_bpf2bpf_hierarchy_fentry.c
@@ -0,0 +1,35 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright Leon Hwang */
+
+#include "vmlinux.h"
+#include <bpf/bpf_helpers.h>
+#include <bpf/bpf_tracing.h>
+
+struct {
+ __uint(type, BPF_MAP_TYPE_PROG_ARRAY);
+ __uint(max_entries, 1);
+ __uint(key_size, sizeof(__u32));
+ __uint(value_size, sizeof(__u32));
+} jmp_table SEC(".maps");
+
+int count = 0;
+
+static __noinline
+int subprog_tail(void *ctx)
+{
+ bpf_tail_call_static(ctx, &jmp_table, 0);
+ return 0;
+}
+
+SEC("fentry/dummy")
+int BPF_PROG(fentry, struct sk_buff *skb)
+{
+ count++;
+ subprog_tail(ctx);
+ subprog_tail(ctx);
+
+ return 0;
+}
+
+
+char _license[] SEC("license") = "GPL";
diff --git a/tools/testing/selftests/bpf/progs/tailcall_freplace.c b/tools/testing/selftests/bpf/progs/tailcall_freplace.c
new file mode 100644
index 000000000000..6713b809df44
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/tailcall_freplace.c
@@ -0,0 +1,23 @@
+// SPDX-License-Identifier: GPL-2.0
+
+#include <linux/bpf.h>
+#include <bpf/bpf_helpers.h>
+
+struct {
+ __uint(type, BPF_MAP_TYPE_PROG_ARRAY);
+ __uint(max_entries, 1);
+ __uint(key_size, sizeof(__u32));
+ __uint(value_size, sizeof(__u32));
+} jmp_table SEC(".maps");
+
+int count = 0;
+
+SEC("freplace")
+int entry_freplace(struct __sk_buff *skb)
+{
+ count++;
+ bpf_tail_call_static(skb, &jmp_table, 0);
+ return count;
+}
+
+char __license[] SEC("license") = "GPL";
diff --git a/tools/testing/selftests/bpf/progs/task_kfunc_success.c b/tools/testing/selftests/bpf/progs/task_kfunc_success.c
index 70df695312dc..a55149015063 100644
--- a/tools/testing/selftests/bpf/progs/task_kfunc_success.c
+++ b/tools/testing/selftests/bpf/progs/task_kfunc_success.c
@@ -5,6 +5,7 @@
#include <bpf/bpf_tracing.h>
#include <bpf/bpf_helpers.h>
+#include "../bpf_experimental.h"
#include "task_kfunc_common.h"
char _license[] SEC("license") = "GPL";
@@ -142,8 +143,9 @@ int BPF_PROG(test_task_acquire_leave_in_map, struct task_struct *task, u64 clone
SEC("tp_btf/task_newtask")
int BPF_PROG(test_task_xchg_release, struct task_struct *task, u64 clone_flags)
{
- struct task_struct *kptr;
- struct __tasks_kfunc_map_value *v;
+ struct task_struct *kptr, *acquired;
+ struct __tasks_kfunc_map_value *v, *local;
+ int refcnt, refcnt_after_drop;
long status;
if (!is_test_kfunc_task())
@@ -167,6 +169,56 @@ int BPF_PROG(test_task_xchg_release, struct task_struct *task, u64 clone_flags)
return 0;
}
+ local = bpf_obj_new(typeof(*local));
+ if (!local) {
+ err = 4;
+ bpf_task_release(kptr);
+ return 0;
+ }
+
+ kptr = bpf_kptr_xchg(&local->task, kptr);
+ if (kptr) {
+ err = 5;
+ bpf_obj_drop(local);
+ bpf_task_release(kptr);
+ return 0;
+ }
+
+ kptr = bpf_kptr_xchg(&local->task, NULL);
+ if (!kptr) {
+ err = 6;
+ bpf_obj_drop(local);
+ return 0;
+ }
+
+ /* Stash a copy into local kptr and check if it is released recursively */
+ acquired = bpf_task_acquire(kptr);
+ if (!acquired) {
+ err = 7;
+ bpf_obj_drop(local);
+ bpf_task_release(kptr);
+ return 0;
+ }
+ bpf_probe_read_kernel(&refcnt, sizeof(refcnt), &acquired->rcu_users);
+
+ acquired = bpf_kptr_xchg(&local->task, acquired);
+ if (acquired) {
+ err = 8;
+ bpf_obj_drop(local);
+ bpf_task_release(kptr);
+ bpf_task_release(acquired);
+ return 0;
+ }
+
+ bpf_obj_drop(local);
+
+ bpf_probe_read_kernel(&refcnt_after_drop, sizeof(refcnt_after_drop), &kptr->rcu_users);
+ if (refcnt != refcnt_after_drop + 1) {
+ err = 9;
+ bpf_task_release(kptr);
+ return 0;
+ }
+
bpf_task_release(kptr);
return 0;
diff --git a/tools/testing/selftests/bpf/progs/tc_bpf2bpf.c b/tools/testing/selftests/bpf/progs/tc_bpf2bpf.c
new file mode 100644
index 000000000000..8a0632c37839
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/tc_bpf2bpf.c
@@ -0,0 +1,22 @@
+// SPDX-License-Identifier: GPL-2.0
+
+#include <linux/bpf.h>
+#include <bpf/bpf_helpers.h>
+#include "bpf_misc.h"
+
+__noinline
+int subprog(struct __sk_buff *skb)
+{
+ int ret = 1;
+
+ __sink(ret);
+ return ret;
+}
+
+SEC("tc")
+int entry_tc(struct __sk_buff *skb)
+{
+ return subprog(skb);
+}
+
+char __license[] SEC("license") = "GPL";
diff --git a/tools/testing/selftests/bpf/progs/tc_dummy.c b/tools/testing/selftests/bpf/progs/tc_dummy.c
new file mode 100644
index 000000000000..69a3d0dc8787
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/tc_dummy.c
@@ -0,0 +1,12 @@
+// SPDX-License-Identifier: GPL-2.0
+#include <linux/bpf.h>
+#include <bpf/bpf_helpers.h>
+#include "bpf_legacy.h"
+
+SEC("tc")
+int entry(struct __sk_buff *skb)
+{
+ return 1;
+}
+
+char __license[] SEC("license") = "GPL";
diff --git a/tools/testing/selftests/bpf/progs/test_attach_probe.c b/tools/testing/selftests/bpf/progs/test_attach_probe.c
index 68466a6ad18c..fb79e6cab932 100644
--- a/tools/testing/selftests/bpf/progs/test_attach_probe.c
+++ b/tools/testing/selftests/bpf/progs/test_attach_probe.c
@@ -5,8 +5,10 @@
#include <bpf/bpf_helpers.h>
#include <bpf/bpf_tracing.h>
#include <bpf/bpf_core_read.h>
+#include <errno.h>
#include "bpf_misc.h"
+u32 dynamic_sz = 1;
int kprobe2_res = 0;
int kretprobe2_res = 0;
int uprobe_byname_res = 0;
@@ -14,11 +16,15 @@ int uretprobe_byname_res = 0;
int uprobe_byname2_res = 0;
int uretprobe_byname2_res = 0;
int uprobe_byname3_sleepable_res = 0;
+int uprobe_byname3_str_sleepable_res = 0;
int uprobe_byname3_res = 0;
int uretprobe_byname3_sleepable_res = 0;
+int uretprobe_byname3_str_sleepable_res = 0;
int uretprobe_byname3_res = 0;
void *user_ptr = 0;
+int bpf_copy_from_user_str(void *dst, u32, const void *, u64) __weak __ksym;
+
SEC("ksyscall/nanosleep")
int BPF_KSYSCALL(handle_kprobe_auto, struct __kernel_timespec *req, struct __kernel_timespec *rem)
{
@@ -87,11 +93,61 @@ static __always_inline bool verify_sleepable_user_copy(void)
return bpf_strncmp(data, sizeof(data), "test_data") == 0;
}
+static __always_inline bool verify_sleepable_user_copy_str(void)
+{
+ int ret;
+ char data_long[20];
+ char data_long_pad[20];
+ char data_long_err[20];
+ char data_short[4];
+ char data_short_pad[4];
+
+ ret = bpf_copy_from_user_str(data_short, sizeof(data_short), user_ptr, 0);
+
+ if (bpf_strncmp(data_short, 4, "tes\0") != 0 || ret != 4)
+ return false;
+
+ ret = bpf_copy_from_user_str(data_short_pad, sizeof(data_short_pad), user_ptr, BPF_F_PAD_ZEROS);
+
+ if (bpf_strncmp(data_short, 4, "tes\0") != 0 || ret != 4)
+ return false;
+
+ /* Make sure this passes the verifier */
+ ret = bpf_copy_from_user_str(data_long, dynamic_sz & sizeof(data_long), user_ptr, 0);
+
+ if (ret != 0)
+ return false;
+
+ ret = bpf_copy_from_user_str(data_long, sizeof(data_long), user_ptr, 0);
+
+ if (bpf_strncmp(data_long, 10, "test_data\0") != 0 || ret != 10)
+ return false;
+
+ ret = bpf_copy_from_user_str(data_long_pad, sizeof(data_long_pad), user_ptr, BPF_F_PAD_ZEROS);
+
+ if (bpf_strncmp(data_long_pad, 10, "test_data\0") != 0 || ret != 10 || data_long_pad[19] != '\0')
+ return false;
+
+ ret = bpf_copy_from_user_str(data_long_err, sizeof(data_long_err), (void *)data_long, BPF_F_PAD_ZEROS);
+
+ if (ret > 0 || data_long_err[19] != '\0')
+ return false;
+
+ ret = bpf_copy_from_user_str(data_long, sizeof(data_long), user_ptr, 2);
+
+ if (ret != -EINVAL)
+ return false;
+
+ return true;
+}
+
SEC("uprobe.s//proc/self/exe:trigger_func3")
int handle_uprobe_byname3_sleepable(struct pt_regs *ctx)
{
if (verify_sleepable_user_copy())
uprobe_byname3_sleepable_res = 9;
+ if (verify_sleepable_user_copy_str())
+ uprobe_byname3_str_sleepable_res = 10;
return 0;
}
@@ -102,7 +158,7 @@ int handle_uprobe_byname3_sleepable(struct pt_regs *ctx)
SEC("uprobe//proc/self/exe:trigger_func3")
int handle_uprobe_byname3(struct pt_regs *ctx)
{
- uprobe_byname3_res = 10;
+ uprobe_byname3_res = 11;
return 0;
}
@@ -110,14 +166,16 @@ SEC("uretprobe.s//proc/self/exe:trigger_func3")
int handle_uretprobe_byname3_sleepable(struct pt_regs *ctx)
{
if (verify_sleepable_user_copy())
- uretprobe_byname3_sleepable_res = 11;
+ uretprobe_byname3_sleepable_res = 12;
+ if (verify_sleepable_user_copy_str())
+ uretprobe_byname3_str_sleepable_res = 13;
return 0;
}
SEC("uretprobe//proc/self/exe:trigger_func3")
int handle_uretprobe_byname3(struct pt_regs *ctx)
{
- uretprobe_byname3_res = 12;
+ uretprobe_byname3_res = 14;
return 0;
}
diff --git a/tools/testing/selftests/bpf/progs/test_build_id.c b/tools/testing/selftests/bpf/progs/test_build_id.c
new file mode 100644
index 000000000000..32ce59f9aa27
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/test_build_id.c
@@ -0,0 +1,31 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2024 Meta Platforms, Inc. and affiliates. */
+
+#include "vmlinux.h"
+#include <bpf/bpf_helpers.h>
+
+struct bpf_stack_build_id stack_sleepable[128];
+int res_sleepable;
+
+struct bpf_stack_build_id stack_nofault[128];
+int res_nofault;
+
+SEC("uprobe.multi/./uprobe_multi:uprobe")
+int uprobe_nofault(struct pt_regs *ctx)
+{
+ res_nofault = bpf_get_stack(ctx, stack_nofault, sizeof(stack_nofault),
+ BPF_F_USER_STACK | BPF_F_USER_BUILD_ID);
+
+ return 0;
+}
+
+SEC("uprobe.multi.s/./uprobe_multi:uprobe")
+int uprobe_sleepable(struct pt_regs *ctx)
+{
+ res_sleepable = bpf_get_stack(ctx, stack_sleepable, sizeof(stack_sleepable),
+ BPF_F_USER_STACK | BPF_F_USER_BUILD_ID);
+
+ return 0;
+}
+
+char _license[] SEC("license") = "GPL";
diff --git a/tools/testing/selftests/bpf/progs/test_cls_redirect_dynptr.c b/tools/testing/selftests/bpf/progs/test_cls_redirect_dynptr.c
index da54c09e9a15..464515b824b9 100644
--- a/tools/testing/selftests/bpf/progs/test_cls_redirect_dynptr.c
+++ b/tools/testing/selftests/bpf/progs/test_cls_redirect_dynptr.c
@@ -503,7 +503,7 @@ static ret_t get_next_hop(struct bpf_dynptr *dynptr, __u64 *offset, encap_header
*
* fill_tuple(&t, foo, sizeof(struct iphdr), 123, 321)
*
- * clang will substitue a costant for sizeof, which allows the verifier
+ * clang will substitute a constant for sizeof, which allows the verifier
* to track it's value. Based on this, it can figure out the constant
* return value, and calling code works while still being "generic" to
* IPv4 and IPv6.
diff --git a/tools/testing/selftests/bpf/progs/test_core_read_macros.c b/tools/testing/selftests/bpf/progs/test_core_read_macros.c
index fd54caa17319..873d85a4739b 100644
--- a/tools/testing/selftests/bpf/progs/test_core_read_macros.c
+++ b/tools/testing/selftests/bpf/progs/test_core_read_macros.c
@@ -36,7 +36,7 @@ int handler(void *ctx)
return 0;
/* next pointers for kernel address space have to be initialized from
- * BPF side, user-space mmaped addresses are stil user-space addresses
+ * BPF side, user-space mmaped addresses are still user-space addresses
*/
k_probe_in.next = &k_probe_in;
__builtin_preserve_access_index(({k_core_in.next = &k_core_in;}));
diff --git a/tools/testing/selftests/bpf/progs/test_get_xattr.c b/tools/testing/selftests/bpf/progs/test_get_xattr.c
index 7eb2a4e5a3e5..66e737720f7c 100644
--- a/tools/testing/selftests/bpf/progs/test_get_xattr.c
+++ b/tools/testing/selftests/bpf/progs/test_get_xattr.c
@@ -2,6 +2,7 @@
/* Copyright (c) 2023 Meta Platforms, Inc. and affiliates. */
#include "vmlinux.h"
+#include <errno.h>
#include <bpf/bpf_helpers.h>
#include <bpf/bpf_tracing.h>
#include "bpf_kfuncs.h"
@@ -9,10 +10,12 @@
char _license[] SEC("license") = "GPL";
__u32 monitored_pid;
-__u32 found_xattr;
+__u32 found_xattr_from_file;
+__u32 found_xattr_from_dentry;
static const char expected_value[] = "hello";
-char value[32];
+char value1[32];
+char value2[32];
SEC("lsm.s/file_open")
int BPF_PROG(test_file_open, struct file *f)
@@ -25,13 +28,37 @@ int BPF_PROG(test_file_open, struct file *f)
if (pid != monitored_pid)
return 0;
- bpf_dynptr_from_mem(value, sizeof(value), 0, &value_ptr);
+ bpf_dynptr_from_mem(value1, sizeof(value1), 0, &value_ptr);
ret = bpf_get_file_xattr(f, "user.kfuncs", &value_ptr);
if (ret != sizeof(expected_value))
return 0;
- if (bpf_strncmp(value, ret, expected_value))
+ if (bpf_strncmp(value1, ret, expected_value))
return 0;
- found_xattr = 1;
+ found_xattr_from_file = 1;
return 0;
}
+
+SEC("lsm.s/inode_getxattr")
+int BPF_PROG(test_inode_getxattr, struct dentry *dentry, char *name)
+{
+ struct bpf_dynptr value_ptr;
+ __u32 pid;
+ int ret;
+
+ pid = bpf_get_current_pid_tgid() >> 32;
+ if (pid != monitored_pid)
+ return 0;
+
+ bpf_dynptr_from_mem(value2, sizeof(value2), 0, &value_ptr);
+
+ ret = bpf_get_dentry_xattr(dentry, "user.kfuncs", &value_ptr);
+ if (ret != sizeof(expected_value))
+ return 0;
+ if (bpf_strncmp(value2, ret, expected_value))
+ return 0;
+ found_xattr_from_dentry = 1;
+
+ /* return non-zero to fail getxattr from user space */
+ return -EINVAL;
+}
diff --git a/tools/testing/selftests/bpf/progs/test_global_func15.c b/tools/testing/selftests/bpf/progs/test_global_func15.c
index b4e089d6981d..201cc000b3f4 100644
--- a/tools/testing/selftests/bpf/progs/test_global_func15.c
+++ b/tools/testing/selftests/bpf/progs/test_global_func15.c
@@ -44,7 +44,7 @@ __naked int global_func15_tricky_pruning(void)
* case we have a valid 1 stored in R0 register, but in
* a branch case we assign some random value to R0. So if
* there is something wrong with precision tracking for R0 at
- * program exit, we might erronenously prune branch case,
+ * program exit, we might erroneously prune branch case,
* because R0 in fallthrough case is imprecise (and thus any
* value is valid from POV of verifier is_state_equal() logic)
*/
diff --git a/tools/testing/selftests/bpf/progs/test_global_map_resize.c b/tools/testing/selftests/bpf/progs/test_global_map_resize.c
index 1fbb73d3e5d5..a3f220ba7025 100644
--- a/tools/testing/selftests/bpf/progs/test_global_map_resize.c
+++ b/tools/testing/selftests/bpf/progs/test_global_map_resize.c
@@ -3,6 +3,7 @@
#include "vmlinux.h"
#include <bpf/bpf_helpers.h>
+#include <bpf/bpf_tracing.h>
char _license[] SEC("license") = "GPL";
@@ -15,7 +16,7 @@ const volatile size_t data_array_len;
int sum = 0;
int array[1];
-/* custom data secton */
+/* custom data section */
int my_array[1] SEC(".data.custom");
/* custom data section which should NOT be resizable,
@@ -60,3 +61,18 @@ int data_array_sum(void *ctx)
return 0;
}
+
+SEC("struct_ops/test_1")
+int BPF_PROG(test_1)
+{
+ return 0;
+}
+
+struct bpf_testmod_ops {
+ int (*test_1)(void);
+};
+
+SEC(".struct_ops.link")
+struct bpf_testmod_ops st_ops_resize = {
+ .test_1 = (void *)test_1
+};
diff --git a/tools/testing/selftests/bpf/progs/test_libbpf_get_fd_by_id_opts.c b/tools/testing/selftests/bpf/progs/test_libbpf_get_fd_by_id_opts.c
index f5ac5f3e8919..568816307f71 100644
--- a/tools/testing/selftests/bpf/progs/test_libbpf_get_fd_by_id_opts.c
+++ b/tools/testing/selftests/bpf/progs/test_libbpf_get_fd_by_id_opts.c
@@ -31,6 +31,7 @@ int BPF_PROG(check_access, struct bpf_map *map, fmode_t fmode)
if (fmode & FMODE_WRITE)
return -EACCES;
+ barrier();
return 0;
}
diff --git a/tools/testing/selftests/bpf/progs/test_rdonly_maps.c b/tools/testing/selftests/bpf/progs/test_rdonly_maps.c
index fc8e8a34a3db..7035fb4d4165 100644
--- a/tools/testing/selftests/bpf/progs/test_rdonly_maps.c
+++ b/tools/testing/selftests/bpf/progs/test_rdonly_maps.c
@@ -4,6 +4,7 @@
#include <linux/ptrace.h>
#include <linux/bpf.h>
#include <bpf/bpf_helpers.h>
+#include "bpf_misc.h"
const struct {
unsigned a[4];
@@ -64,7 +65,7 @@ int full_loop(struct pt_regs *ctx)
{
/* prevent compiler to optimize everything out */
unsigned * volatile p = (void *)&rdonly_values.a;
- int i = sizeof(rdonly_values.a) / sizeof(rdonly_values.a[0]);
+ int i = ARRAY_SIZE(rdonly_values.a);
unsigned iters = 0, sum = 0;
/* validate verifier can allow full loop as well */
diff --git a/tools/testing/selftests/bpf/progs/test_sig_in_xattr.c b/tools/testing/selftests/bpf/progs/test_sig_in_xattr.c
index 2f0eb1334d65..8ef6b39335b6 100644
--- a/tools/testing/selftests/bpf/progs/test_sig_in_xattr.c
+++ b/tools/testing/selftests/bpf/progs/test_sig_in_xattr.c
@@ -6,6 +6,7 @@
#include <bpf/bpf_helpers.h>
#include <bpf/bpf_tracing.h>
#include "bpf_kfuncs.h"
+#include "err.h"
char _license[] SEC("license") = "GPL";
@@ -79,5 +80,8 @@ int BPF_PROG(test_file_open, struct file *f)
ret = bpf_verify_pkcs7_signature(&digest_ptr, &sig_ptr, trusted_keyring);
bpf_key_put(trusted_keyring);
+
+ set_if_not_errno_or_zero(ret, -EFAULT);
+
return ret;
}
diff --git a/tools/testing/selftests/bpf/progs/test_skb_cgroup_id_kern.c b/tools/testing/selftests/bpf/progs/test_skb_cgroup_id_kern.c
deleted file mode 100644
index 37aacc66cd68..000000000000
--- a/tools/testing/selftests/bpf/progs/test_skb_cgroup_id_kern.c
+++ /dev/null
@@ -1,45 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-// Copyright (c) 2018 Facebook
-
-#include <linux/bpf.h>
-#include <linux/pkt_cls.h>
-
-#include <string.h>
-
-#include <bpf/bpf_helpers.h>
-
-#define NUM_CGROUP_LEVELS 4
-
-struct {
- __uint(type, BPF_MAP_TYPE_ARRAY);
- __type(key, __u32);
- __type(value, __u64);
- __uint(max_entries, NUM_CGROUP_LEVELS);
-} cgroup_ids SEC(".maps");
-
-static __always_inline void log_nth_level(struct __sk_buff *skb, __u32 level)
-{
- __u64 id;
-
- /* [1] &level passed to external function that may change it, it's
- * incompatible with loop unroll.
- */
- id = bpf_skb_ancestor_cgroup_id(skb, level);
- bpf_map_update_elem(&cgroup_ids, &level, &id, 0);
-}
-
-SEC("cgroup_id_logger")
-int log_cgroup_id(struct __sk_buff *skb)
-{
- /* Loop unroll can't be used here due to [1]. Unrolling manually.
- * Number of calls should be in sync with NUM_CGROUP_LEVELS.
- */
- log_nth_level(skb, 0);
- log_nth_level(skb, 1);
- log_nth_level(skb, 2);
- log_nth_level(skb, 3);
-
- return TC_ACT_OK;
-}
-
-char _license[] SEC("license") = "GPL";
diff --git a/tools/testing/selftests/bpf/progs/test_tunnel_kern.c b/tools/testing/selftests/bpf/progs/test_tunnel_kern.c
index 3f5abcf3ff13..32127f1cd687 100644
--- a/tools/testing/selftests/bpf/progs/test_tunnel_kern.c
+++ b/tools/testing/selftests/bpf/progs/test_tunnel_kern.c
@@ -26,6 +26,18 @@
*/
#define ASSIGNED_ADDR_VETH1 0xac1001c8
+struct bpf_fou_encap___local {
+ __be16 sport;
+ __be16 dport;
+} __attribute__((preserve_access_index));
+
+enum bpf_fou_encap_type___local {
+ FOU_BPF_ENCAP_FOU___local,
+ FOU_BPF_ENCAP_GUE___local,
+};
+
+struct bpf_fou_encap;
+
int bpf_skb_set_fou_encap(struct __sk_buff *skb_ctx,
struct bpf_fou_encap *encap, int type) __ksym;
int bpf_skb_get_fou_encap(struct __sk_buff *skb_ctx,
@@ -745,7 +757,7 @@ SEC("tc")
int ipip_gue_set_tunnel(struct __sk_buff *skb)
{
struct bpf_tunnel_key key = {};
- struct bpf_fou_encap encap = {};
+ struct bpf_fou_encap___local encap = {};
void *data = (void *)(long)skb->data;
struct iphdr *iph = data;
void *data_end = (void *)(long)skb->data_end;
@@ -769,7 +781,9 @@ int ipip_gue_set_tunnel(struct __sk_buff *skb)
encap.sport = 0;
encap.dport = bpf_htons(5555);
- ret = bpf_skb_set_fou_encap(skb, &encap, FOU_BPF_ENCAP_GUE);
+ ret = bpf_skb_set_fou_encap(skb, (struct bpf_fou_encap *)&encap,
+ bpf_core_enum_value(enum bpf_fou_encap_type___local,
+ FOU_BPF_ENCAP_GUE___local));
if (ret < 0) {
log_err(ret);
return TC_ACT_SHOT;
@@ -782,7 +796,7 @@ SEC("tc")
int ipip_fou_set_tunnel(struct __sk_buff *skb)
{
struct bpf_tunnel_key key = {};
- struct bpf_fou_encap encap = {};
+ struct bpf_fou_encap___local encap = {};
void *data = (void *)(long)skb->data;
struct iphdr *iph = data;
void *data_end = (void *)(long)skb->data_end;
@@ -806,7 +820,8 @@ int ipip_fou_set_tunnel(struct __sk_buff *skb)
encap.sport = 0;
encap.dport = bpf_htons(5555);
- ret = bpf_skb_set_fou_encap(skb, &encap, FOU_BPF_ENCAP_FOU);
+ ret = bpf_skb_set_fou_encap(skb, (struct bpf_fou_encap *)&encap,
+ FOU_BPF_ENCAP_FOU___local);
if (ret < 0) {
log_err(ret);
return TC_ACT_SHOT;
@@ -820,7 +835,7 @@ int ipip_encap_get_tunnel(struct __sk_buff *skb)
{
int ret;
struct bpf_tunnel_key key = {};
- struct bpf_fou_encap encap = {};
+ struct bpf_fou_encap___local encap = {};
ret = bpf_skb_get_tunnel_key(skb, &key, sizeof(key), 0);
if (ret < 0) {
@@ -828,7 +843,7 @@ int ipip_encap_get_tunnel(struct __sk_buff *skb)
return TC_ACT_SHOT;
}
- ret = bpf_skb_get_fou_encap(skb, &encap);
+ ret = bpf_skb_get_fou_encap(skb, (struct bpf_fou_encap *)&encap);
if (ret < 0) {
log_err(ret);
return TC_ACT_SHOT;
diff --git a/tools/testing/selftests/bpf/progs/test_verify_pkcs7_sig.c b/tools/testing/selftests/bpf/progs/test_verify_pkcs7_sig.c
index f42e9f3831a1..12034a73ee2d 100644
--- a/tools/testing/selftests/bpf/progs/test_verify_pkcs7_sig.c
+++ b/tools/testing/selftests/bpf/progs/test_verify_pkcs7_sig.c
@@ -11,6 +11,7 @@
#include <bpf/bpf_helpers.h>
#include <bpf/bpf_tracing.h>
#include "bpf_kfuncs.h"
+#include "err.h"
#define MAX_DATA_SIZE (1024 * 1024)
#define MAX_SIG_SIZE 1024
@@ -55,12 +56,12 @@ int BPF_PROG(bpf, int cmd, union bpf_attr *attr, unsigned int size)
ret = bpf_probe_read_kernel(&value, sizeof(value), &attr->value);
if (ret)
- return ret;
+ goto out;
ret = bpf_copy_from_user(data_val, sizeof(struct data),
(void *)(unsigned long)value);
if (ret)
- return ret;
+ goto out;
if (data_val->data_len > sizeof(data_val->data))
return -EINVAL;
@@ -84,5 +85,8 @@ int BPF_PROG(bpf, int cmd, union bpf_attr *attr, unsigned int size)
bpf_key_put(trusted_keyring);
+out:
+ set_if_not_errno_or_zero(ret, -EFAULT);
+
return ret;
}
diff --git a/tools/testing/selftests/bpf/progs/token_lsm.c b/tools/testing/selftests/bpf/progs/token_lsm.c
index e4d59b6ba743..a6002d073b1b 100644
--- a/tools/testing/selftests/bpf/progs/token_lsm.c
+++ b/tools/testing/selftests/bpf/progs/token_lsm.c
@@ -8,8 +8,8 @@
char _license[] SEC("license") = "GPL";
int my_pid;
-bool reject_capable;
-bool reject_cmd;
+int reject_capable;
+int reject_cmd;
SEC("lsm/bpf_token_capable")
int BPF_PROG(token_capable, struct bpf_token *token, int cap)
diff --git a/tools/testing/selftests/bpf/progs/trigger_bench.c b/tools/testing/selftests/bpf/progs/trigger_bench.c
index 2619ed193c65..044a6d78923e 100644
--- a/tools/testing/selftests/bpf/progs/trigger_bench.c
+++ b/tools/testing/selftests/bpf/progs/trigger_bench.c
@@ -32,6 +32,13 @@ int bench_trigger_uprobe(void *ctx)
return 0;
}
+SEC("?uprobe.multi")
+int bench_trigger_uprobe_multi(void *ctx)
+{
+ inc_counter();
+ return 0;
+}
+
const volatile int batch_iters = 0;
SEC("?raw_tp")
diff --git a/tools/testing/selftests/bpf/progs/unsupported_ops.c b/tools/testing/selftests/bpf/progs/unsupported_ops.c
new file mode 100644
index 000000000000..9180365a3568
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/unsupported_ops.c
@@ -0,0 +1,22 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2024 Meta Platforms, Inc. and affiliates. */
+
+#include <vmlinux.h>
+#include <bpf/bpf_tracing.h>
+#include "bpf_misc.h"
+#include "../bpf_testmod/bpf_testmod.h"
+
+char _license[] SEC("license") = "GPL";
+
+SEC("struct_ops/unsupported_ops")
+__failure
+__msg("attach to unsupported member unsupported_ops of struct bpf_testmod_ops")
+int BPF_PROG(unsupported_ops)
+{
+ return 0;
+}
+
+SEC(".struct_ops.link")
+struct bpf_testmod_ops testmod = {
+ .unsupported_ops = (void *)unsupported_ops,
+};
diff --git a/tools/testing/selftests/bpf/progs/uprobe_multi_consumers.c b/tools/testing/selftests/bpf/progs/uprobe_multi_consumers.c
new file mode 100644
index 000000000000..7e0fdcbbd242
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/uprobe_multi_consumers.c
@@ -0,0 +1,39 @@
+// SPDX-License-Identifier: GPL-2.0
+#include <linux/bpf.h>
+#include <bpf/bpf_helpers.h>
+#include <bpf/bpf_tracing.h>
+#include <stdbool.h>
+#include "bpf_kfuncs.h"
+#include "bpf_misc.h"
+
+char _license[] SEC("license") = "GPL";
+
+__u64 uprobe_result[4];
+
+SEC("uprobe.multi")
+int uprobe_0(struct pt_regs *ctx)
+{
+ uprobe_result[0]++;
+ return 0;
+}
+
+SEC("uprobe.multi")
+int uprobe_1(struct pt_regs *ctx)
+{
+ uprobe_result[1]++;
+ return 0;
+}
+
+SEC("uprobe.multi")
+int uprobe_2(struct pt_regs *ctx)
+{
+ uprobe_result[2]++;
+ return 0;
+}
+
+SEC("uprobe.multi")
+int uprobe_3(struct pt_regs *ctx)
+{
+ uprobe_result[3]++;
+ return 0;
+}
diff --git a/tools/testing/selftests/bpf/progs/uprobe_multi_pid_filter.c b/tools/testing/selftests/bpf/progs/uprobe_multi_pid_filter.c
new file mode 100644
index 000000000000..67fcbad36661
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/uprobe_multi_pid_filter.c
@@ -0,0 +1,40 @@
+// SPDX-License-Identifier: GPL-2.0
+#include "vmlinux.h"
+#include <bpf/bpf_helpers.h>
+#include <bpf/bpf_tracing.h>
+
+char _license[] SEC("license") = "GPL";
+
+__u32 pids[3];
+__u32 test[3][2];
+
+static void update_pid(int idx)
+{
+ __u32 pid = bpf_get_current_pid_tgid() >> 32;
+
+ if (pid == pids[idx])
+ test[idx][0]++;
+ else
+ test[idx][1]++;
+}
+
+SEC("uprobe.multi")
+int uprobe_multi_0(struct pt_regs *ctx)
+{
+ update_pid(0);
+ return 0;
+}
+
+SEC("uprobe.multi")
+int uprobe_multi_1(struct pt_regs *ctx)
+{
+ update_pid(1);
+ return 0;
+}
+
+SEC("uprobe.multi")
+int uprobe_multi_2(struct pt_regs *ctx)
+{
+ update_pid(2);
+ return 0;
+}
diff --git a/tools/testing/selftests/bpf/progs/verifier_bits_iter.c b/tools/testing/selftests/bpf/progs/verifier_bits_iter.c
index 716113c2bce2..f4da4d508ddb 100644
--- a/tools/testing/selftests/bpf/progs/verifier_bits_iter.c
+++ b/tools/testing/selftests/bpf/progs/verifier_bits_iter.c
@@ -87,7 +87,7 @@ int bits_memalloc(void)
int *bit;
__builtin_memset(&data, 0xf0, sizeof(data)); /* 4 * 16 */
- bpf_for_each(bits, bit, &data[0], sizeof(data) / sizeof(u64))
+ bpf_for_each(bits, bit, &data[0], ARRAY_SIZE(data))
nr++;
return nr;
}
diff --git a/tools/testing/selftests/bpf/progs/verifier_bpf_fastcall.c b/tools/testing/selftests/bpf/progs/verifier_bpf_fastcall.c
new file mode 100644
index 000000000000..9da97d2efcd9
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/verifier_bpf_fastcall.c
@@ -0,0 +1,900 @@
+// SPDX-License-Identifier: GPL-2.0
+
+#include <linux/bpf.h>
+#include <bpf/bpf_helpers.h>
+#include <bpf/bpf_core_read.h>
+#include "../../../include/linux/filter.h"
+#include "bpf_misc.h"
+#include <stdbool.h>
+#include "bpf_kfuncs.h"
+
+SEC("raw_tp")
+__arch_x86_64
+__log_level(4) __msg("stack depth 8")
+__xlated("4: r5 = 5")
+__xlated("5: w0 = ")
+__xlated("6: r0 = &(void __percpu *)(r0)")
+__xlated("7: r0 = *(u32 *)(r0 +0)")
+__xlated("8: exit")
+__success
+__naked void simple(void)
+{
+ asm volatile (
+ "r1 = 1;"
+ "r2 = 2;"
+ "r3 = 3;"
+ "r4 = 4;"
+ "r5 = 5;"
+ "*(u64 *)(r10 - 16) = r1;"
+ "*(u64 *)(r10 - 24) = r2;"
+ "*(u64 *)(r10 - 32) = r3;"
+ "*(u64 *)(r10 - 40) = r4;"
+ "*(u64 *)(r10 - 48) = r5;"
+ "call %[bpf_get_smp_processor_id];"
+ "r5 = *(u64 *)(r10 - 48);"
+ "r4 = *(u64 *)(r10 - 40);"
+ "r3 = *(u64 *)(r10 - 32);"
+ "r2 = *(u64 *)(r10 - 24);"
+ "r1 = *(u64 *)(r10 - 16);"
+ "exit;"
+ :
+ : __imm(bpf_get_smp_processor_id)
+ : __clobber_all);
+}
+
+/* The logic for detecting and verifying bpf_fastcall pattern is the same for
+ * any arch, however x86 differs from arm64 or riscv64 in a way
+ * bpf_get_smp_processor_id is rewritten:
+ * - on x86 it is done by verifier
+ * - on arm64 and riscv64 it is done by jit
+ *
+ * Which leads to different xlated patterns for different archs:
+ * - on x86 the call is expanded as 3 instructions
+ * - on arm64 and riscv64 the call remains as is
+ * (but spills/fills are still removed)
+ *
+ * It is really desirable to check instruction indexes in the xlated
+ * patterns, so add this canary test to check that function rewrite by
+ * jit is correctly processed by bpf_fastcall logic, keep the rest of the
+ * tests as x86.
+ */
+SEC("raw_tp")
+__arch_arm64
+__arch_riscv64
+__xlated("0: r1 = 1")
+__xlated("1: call bpf_get_smp_processor_id")
+__xlated("2: exit")
+__success
+__naked void canary_arm64_riscv64(void)
+{
+ asm volatile (
+ "r1 = 1;"
+ "*(u64 *)(r10 - 16) = r1;"
+ "call %[bpf_get_smp_processor_id];"
+ "r1 = *(u64 *)(r10 - 16);"
+ "exit;"
+ :
+ : __imm(bpf_get_smp_processor_id)
+ : __clobber_all);
+}
+
+SEC("raw_tp")
+__arch_x86_64
+__xlated("1: r0 = &(void __percpu *)(r0)")
+__xlated("...")
+__xlated("3: exit")
+__success
+__naked void canary_zero_spills(void)
+{
+ asm volatile (
+ "call %[bpf_get_smp_processor_id];"
+ "exit;"
+ :
+ : __imm(bpf_get_smp_processor_id)
+ : __clobber_all);
+}
+
+SEC("raw_tp")
+__arch_x86_64
+__log_level(4) __msg("stack depth 16")
+__xlated("1: *(u64 *)(r10 -16) = r1")
+__xlated("...")
+__xlated("3: r0 = &(void __percpu *)(r0)")
+__xlated("...")
+__xlated("5: r2 = *(u64 *)(r10 -16)")
+__success
+__naked void wrong_reg_in_pattern1(void)
+{
+ asm volatile (
+ "r1 = 1;"
+ "*(u64 *)(r10 - 16) = r1;"
+ "call %[bpf_get_smp_processor_id];"
+ "r2 = *(u64 *)(r10 - 16);"
+ "exit;"
+ :
+ : __imm(bpf_get_smp_processor_id)
+ : __clobber_all);
+}
+
+SEC("raw_tp")
+__arch_x86_64
+__xlated("1: *(u64 *)(r10 -16) = r6")
+__xlated("...")
+__xlated("3: r0 = &(void __percpu *)(r0)")
+__xlated("...")
+__xlated("5: r6 = *(u64 *)(r10 -16)")
+__success
+__naked void wrong_reg_in_pattern2(void)
+{
+ asm volatile (
+ "r6 = 1;"
+ "*(u64 *)(r10 - 16) = r6;"
+ "call %[bpf_get_smp_processor_id];"
+ "r6 = *(u64 *)(r10 - 16);"
+ "exit;"
+ :
+ : __imm(bpf_get_smp_processor_id)
+ : __clobber_all);
+}
+
+SEC("raw_tp")
+__arch_x86_64
+__xlated("1: *(u64 *)(r10 -16) = r0")
+__xlated("...")
+__xlated("3: r0 = &(void __percpu *)(r0)")
+__xlated("...")
+__xlated("5: r0 = *(u64 *)(r10 -16)")
+__success
+__naked void wrong_reg_in_pattern3(void)
+{
+ asm volatile (
+ "r0 = 1;"
+ "*(u64 *)(r10 - 16) = r0;"
+ "call %[bpf_get_smp_processor_id];"
+ "r0 = *(u64 *)(r10 - 16);"
+ "exit;"
+ :
+ : __imm(bpf_get_smp_processor_id)
+ : __clobber_all);
+}
+
+SEC("raw_tp")
+__arch_x86_64
+__xlated("2: *(u64 *)(r2 -16) = r1")
+__xlated("...")
+__xlated("4: r0 = &(void __percpu *)(r0)")
+__xlated("...")
+__xlated("6: r1 = *(u64 *)(r10 -16)")
+__success
+__naked void wrong_base_in_pattern(void)
+{
+ asm volatile (
+ "r1 = 1;"
+ "r2 = r10;"
+ "*(u64 *)(r2 - 16) = r1;"
+ "call %[bpf_get_smp_processor_id];"
+ "r1 = *(u64 *)(r10 - 16);"
+ "exit;"
+ :
+ : __imm(bpf_get_smp_processor_id)
+ : __clobber_all);
+}
+
+SEC("raw_tp")
+__arch_x86_64
+__xlated("1: *(u64 *)(r10 -16) = r1")
+__xlated("...")
+__xlated("3: r0 = &(void __percpu *)(r0)")
+__xlated("...")
+__xlated("5: r2 = 1")
+__success
+__naked void wrong_insn_in_pattern(void)
+{
+ asm volatile (
+ "r1 = 1;"
+ "*(u64 *)(r10 - 16) = r1;"
+ "call %[bpf_get_smp_processor_id];"
+ "r2 = 1;"
+ "r1 = *(u64 *)(r10 - 16);"
+ "exit;"
+ :
+ : __imm(bpf_get_smp_processor_id)
+ : __clobber_all);
+}
+
+SEC("raw_tp")
+__arch_x86_64
+__xlated("2: *(u64 *)(r10 -16) = r1")
+__xlated("...")
+__xlated("4: r0 = &(void __percpu *)(r0)")
+__xlated("...")
+__xlated("6: r1 = *(u64 *)(r10 -8)")
+__success
+__naked void wrong_off_in_pattern1(void)
+{
+ asm volatile (
+ "r1 = 1;"
+ "*(u64 *)(r10 - 8) = r1;"
+ "*(u64 *)(r10 - 16) = r1;"
+ "call %[bpf_get_smp_processor_id];"
+ "r1 = *(u64 *)(r10 - 8);"
+ "exit;"
+ :
+ : __imm(bpf_get_smp_processor_id)
+ : __clobber_all);
+}
+
+SEC("raw_tp")
+__arch_x86_64
+__xlated("1: *(u32 *)(r10 -4) = r1")
+__xlated("...")
+__xlated("3: r0 = &(void __percpu *)(r0)")
+__xlated("...")
+__xlated("5: r1 = *(u32 *)(r10 -4)")
+__success
+__naked void wrong_off_in_pattern2(void)
+{
+ asm volatile (
+ "r1 = 1;"
+ "*(u32 *)(r10 - 4) = r1;"
+ "call %[bpf_get_smp_processor_id];"
+ "r1 = *(u32 *)(r10 - 4);"
+ "exit;"
+ :
+ : __imm(bpf_get_smp_processor_id)
+ : __clobber_all);
+}
+
+SEC("raw_tp")
+__arch_x86_64
+__xlated("1: *(u32 *)(r10 -16) = r1")
+__xlated("...")
+__xlated("3: r0 = &(void __percpu *)(r0)")
+__xlated("...")
+__xlated("5: r1 = *(u32 *)(r10 -16)")
+__success
+__naked void wrong_size_in_pattern(void)
+{
+ asm volatile (
+ "r1 = 1;"
+ "*(u32 *)(r10 - 16) = r1;"
+ "call %[bpf_get_smp_processor_id];"
+ "r1 = *(u32 *)(r10 - 16);"
+ "exit;"
+ :
+ : __imm(bpf_get_smp_processor_id)
+ : __clobber_all);
+}
+
+SEC("raw_tp")
+__arch_x86_64
+__xlated("2: *(u32 *)(r10 -8) = r1")
+__xlated("...")
+__xlated("4: r0 = &(void __percpu *)(r0)")
+__xlated("...")
+__xlated("6: r1 = *(u32 *)(r10 -8)")
+__success
+__naked void partial_pattern(void)
+{
+ asm volatile (
+ "r1 = 1;"
+ "r2 = 2;"
+ "*(u32 *)(r10 - 8) = r1;"
+ "*(u64 *)(r10 - 16) = r2;"
+ "call %[bpf_get_smp_processor_id];"
+ "r2 = *(u64 *)(r10 - 16);"
+ "r1 = *(u32 *)(r10 - 8);"
+ "exit;"
+ :
+ : __imm(bpf_get_smp_processor_id)
+ : __clobber_all);
+}
+
+SEC("raw_tp")
+__arch_x86_64
+__xlated("0: r1 = 1")
+__xlated("1: r2 = 2")
+/* not patched, spills for -8, -16 not removed */
+__xlated("2: *(u64 *)(r10 -8) = r1")
+__xlated("3: *(u64 *)(r10 -16) = r2")
+__xlated("...")
+__xlated("5: r0 = &(void __percpu *)(r0)")
+__xlated("...")
+__xlated("7: r2 = *(u64 *)(r10 -16)")
+__xlated("8: r1 = *(u64 *)(r10 -8)")
+/* patched, spills for -24, -32 removed */
+__xlated("...")
+__xlated("10: r0 = &(void __percpu *)(r0)")
+__xlated("...")
+__xlated("12: exit")
+__success
+__naked void min_stack_offset(void)
+{
+ asm volatile (
+ "r1 = 1;"
+ "r2 = 2;"
+ /* this call won't be patched */
+ "*(u64 *)(r10 - 8) = r1;"
+ "*(u64 *)(r10 - 16) = r2;"
+ "call %[bpf_get_smp_processor_id];"
+ "r2 = *(u64 *)(r10 - 16);"
+ "r1 = *(u64 *)(r10 - 8);"
+ /* this call would be patched */
+ "*(u64 *)(r10 - 24) = r1;"
+ "*(u64 *)(r10 - 32) = r2;"
+ "call %[bpf_get_smp_processor_id];"
+ "r2 = *(u64 *)(r10 - 32);"
+ "r1 = *(u64 *)(r10 - 24);"
+ "exit;"
+ :
+ : __imm(bpf_get_smp_processor_id)
+ : __clobber_all);
+}
+
+SEC("raw_tp")
+__arch_x86_64
+__xlated("1: *(u64 *)(r10 -8) = r1")
+__xlated("...")
+__xlated("3: r0 = &(void __percpu *)(r0)")
+__xlated("...")
+__xlated("5: r1 = *(u64 *)(r10 -8)")
+__success
+__naked void bad_fixed_read(void)
+{
+ asm volatile (
+ "r1 = 1;"
+ "*(u64 *)(r10 - 8) = r1;"
+ "call %[bpf_get_smp_processor_id];"
+ "r1 = *(u64 *)(r10 - 8);"
+ "r1 = r10;"
+ "r1 += -8;"
+ "r1 = *(u64 *)(r1 - 0);"
+ "exit;"
+ :
+ : __imm(bpf_get_smp_processor_id)
+ : __clobber_all);
+}
+
+SEC("raw_tp")
+__arch_x86_64
+__xlated("1: *(u64 *)(r10 -8) = r1")
+__xlated("...")
+__xlated("3: r0 = &(void __percpu *)(r0)")
+__xlated("...")
+__xlated("5: r1 = *(u64 *)(r10 -8)")
+__success
+__naked void bad_fixed_write(void)
+{
+ asm volatile (
+ "r1 = 1;"
+ "*(u64 *)(r10 - 8) = r1;"
+ "call %[bpf_get_smp_processor_id];"
+ "r1 = *(u64 *)(r10 - 8);"
+ "r1 = r10;"
+ "r1 += -8;"
+ "*(u64 *)(r1 - 0) = r1;"
+ "exit;"
+ :
+ : __imm(bpf_get_smp_processor_id)
+ : __clobber_all);
+}
+
+SEC("raw_tp")
+__arch_x86_64
+__xlated("6: *(u64 *)(r10 -16) = r1")
+__xlated("...")
+__xlated("8: r0 = &(void __percpu *)(r0)")
+__xlated("...")
+__xlated("10: r1 = *(u64 *)(r10 -16)")
+__success
+__naked void bad_varying_read(void)
+{
+ asm volatile (
+ "r6 = *(u64 *)(r1 + 0);" /* random scalar value */
+ "r6 &= 0x7;" /* r6 range [0..7] */
+ "r6 += 0x2;" /* r6 range [2..9] */
+ "r7 = 0;"
+ "r7 -= r6;" /* r7 range [-9..-2] */
+ "r1 = 1;"
+ "*(u64 *)(r10 - 16) = r1;"
+ "call %[bpf_get_smp_processor_id];"
+ "r1 = *(u64 *)(r10 - 16);"
+ "r1 = r10;"
+ "r1 += r7;"
+ "r1 = *(u8 *)(r1 - 0);" /* touches slot [-16..-9] where spills are stored */
+ "exit;"
+ :
+ : __imm(bpf_get_smp_processor_id)
+ : __clobber_all);
+}
+
+SEC("raw_tp")
+__arch_x86_64
+__xlated("6: *(u64 *)(r10 -16) = r1")
+__xlated("...")
+__xlated("8: r0 = &(void __percpu *)(r0)")
+__xlated("...")
+__xlated("10: r1 = *(u64 *)(r10 -16)")
+__success
+__naked void bad_varying_write(void)
+{
+ asm volatile (
+ "r6 = *(u64 *)(r1 + 0);" /* random scalar value */
+ "r6 &= 0x7;" /* r6 range [0..7] */
+ "r6 += 0x2;" /* r6 range [2..9] */
+ "r7 = 0;"
+ "r7 -= r6;" /* r7 range [-9..-2] */
+ "r1 = 1;"
+ "*(u64 *)(r10 - 16) = r1;"
+ "call %[bpf_get_smp_processor_id];"
+ "r1 = *(u64 *)(r10 - 16);"
+ "r1 = r10;"
+ "r1 += r7;"
+ "*(u8 *)(r1 - 0) = r7;" /* touches slot [-16..-9] where spills are stored */
+ "exit;"
+ :
+ : __imm(bpf_get_smp_processor_id)
+ : __clobber_all);
+}
+
+SEC("raw_tp")
+__arch_x86_64
+__xlated("1: *(u64 *)(r10 -8) = r1")
+__xlated("...")
+__xlated("3: r0 = &(void __percpu *)(r0)")
+__xlated("...")
+__xlated("5: r1 = *(u64 *)(r10 -8)")
+__success
+__naked void bad_write_in_subprog(void)
+{
+ asm volatile (
+ "r1 = 1;"
+ "*(u64 *)(r10 - 8) = r1;"
+ "call %[bpf_get_smp_processor_id];"
+ "r1 = *(u64 *)(r10 - 8);"
+ "r1 = r10;"
+ "r1 += -8;"
+ "call bad_write_in_subprog_aux;"
+ "exit;"
+ :
+ : __imm(bpf_get_smp_processor_id)
+ : __clobber_all);
+}
+
+__used
+__naked static void bad_write_in_subprog_aux(void)
+{
+ asm volatile (
+ "r0 = 1;"
+ "*(u64 *)(r1 - 0) = r0;" /* invalidates bpf_fastcall contract for caller: */
+ "exit;" /* caller stack at -8 used outside of the pattern */
+ ::: __clobber_all);
+}
+
+SEC("raw_tp")
+__arch_x86_64
+__xlated("1: *(u64 *)(r10 -8) = r1")
+__xlated("...")
+__xlated("3: r0 = &(void __percpu *)(r0)")
+__xlated("...")
+__xlated("5: r1 = *(u64 *)(r10 -8)")
+__success
+__naked void bad_helper_write(void)
+{
+ asm volatile (
+ "r1 = 1;"
+ /* bpf_fastcall pattern with stack offset -8 */
+ "*(u64 *)(r10 - 8) = r1;"
+ "call %[bpf_get_smp_processor_id];"
+ "r1 = *(u64 *)(r10 - 8);"
+ "r1 = r10;"
+ "r1 += -8;"
+ "r2 = 1;"
+ "r3 = 42;"
+ /* read dst is fp[-8], thus bpf_fastcall rewrite not applied */
+ "call %[bpf_probe_read_kernel];"
+ "exit;"
+ :
+ : __imm(bpf_get_smp_processor_id),
+ __imm(bpf_probe_read_kernel)
+ : __clobber_all);
+}
+
+SEC("raw_tp")
+__arch_x86_64
+/* main, not patched */
+__xlated("1: *(u64 *)(r10 -8) = r1")
+__xlated("...")
+__xlated("3: r0 = &(void __percpu *)(r0)")
+__xlated("...")
+__xlated("5: r1 = *(u64 *)(r10 -8)")
+__xlated("...")
+__xlated("9: call pc+1")
+__xlated("...")
+__xlated("10: exit")
+/* subprogram, patched */
+__xlated("11: r1 = 1")
+__xlated("...")
+__xlated("13: r0 = &(void __percpu *)(r0)")
+__xlated("...")
+__xlated("15: exit")
+__success
+__naked void invalidate_one_subprog(void)
+{
+ asm volatile (
+ "r1 = 1;"
+ "*(u64 *)(r10 - 8) = r1;"
+ "call %[bpf_get_smp_processor_id];"
+ "r1 = *(u64 *)(r10 - 8);"
+ "r1 = r10;"
+ "r1 += -8;"
+ "r1 = *(u64 *)(r1 - 0);"
+ "call invalidate_one_subprog_aux;"
+ "exit;"
+ :
+ : __imm(bpf_get_smp_processor_id)
+ : __clobber_all);
+}
+
+__used
+__naked static void invalidate_one_subprog_aux(void)
+{
+ asm volatile (
+ "r1 = 1;"
+ "*(u64 *)(r10 - 8) = r1;"
+ "call %[bpf_get_smp_processor_id];"
+ "r1 = *(u64 *)(r10 - 8);"
+ "exit;"
+ :
+ : __imm(bpf_get_smp_processor_id)
+ : __clobber_all);
+}
+
+SEC("raw_tp")
+__arch_x86_64
+/* main */
+__xlated("0: r1 = 1")
+__xlated("...")
+__xlated("2: r0 = &(void __percpu *)(r0)")
+__xlated("...")
+__xlated("4: call pc+1")
+__xlated("5: exit")
+/* subprogram */
+__xlated("6: r1 = 1")
+__xlated("...")
+__xlated("8: r0 = &(void __percpu *)(r0)")
+__xlated("...")
+__xlated("10: *(u64 *)(r10 -16) = r1")
+__xlated("11: exit")
+__success
+__naked void subprogs_use_independent_offsets(void)
+{
+ asm volatile (
+ "r1 = 1;"
+ "*(u64 *)(r10 - 16) = r1;"
+ "call %[bpf_get_smp_processor_id];"
+ "r1 = *(u64 *)(r10 - 16);"
+ "call subprogs_use_independent_offsets_aux;"
+ "exit;"
+ :
+ : __imm(bpf_get_smp_processor_id)
+ : __clobber_all);
+}
+
+__used
+__naked static void subprogs_use_independent_offsets_aux(void)
+{
+ asm volatile (
+ "r1 = 1;"
+ "*(u64 *)(r10 - 24) = r1;"
+ "call %[bpf_get_smp_processor_id];"
+ "r1 = *(u64 *)(r10 - 24);"
+ "*(u64 *)(r10 - 16) = r1;"
+ "exit;"
+ :
+ : __imm(bpf_get_smp_processor_id)
+ : __clobber_all);
+}
+
+SEC("raw_tp")
+__arch_x86_64
+__log_level(4) __msg("stack depth 8")
+__xlated("2: r0 = &(void __percpu *)(r0)")
+__success
+__naked void helper_call_does_not_prevent_bpf_fastcall(void)
+{
+ asm volatile (
+ "r1 = 1;"
+ "*(u64 *)(r10 - 8) = r1;"
+ "call %[bpf_get_smp_processor_id];"
+ "r1 = *(u64 *)(r10 - 8);"
+ "*(u64 *)(r10 - 8) = r1;"
+ "call %[bpf_get_prandom_u32];"
+ "r1 = *(u64 *)(r10 - 8);"
+ "exit;"
+ :
+ : __imm(bpf_get_smp_processor_id),
+ __imm(bpf_get_prandom_u32)
+ : __clobber_all);
+}
+
+SEC("raw_tp")
+__arch_x86_64
+__log_level(4) __msg("stack depth 16")
+/* may_goto counter at -16 */
+__xlated("0: *(u64 *)(r10 -16) =")
+__xlated("1: r1 = 1")
+__xlated("...")
+__xlated("3: r0 = &(void __percpu *)(r0)")
+__xlated("...")
+/* may_goto expansion starts */
+__xlated("5: r11 = *(u64 *)(r10 -16)")
+__xlated("6: if r11 == 0x0 goto pc+3")
+__xlated("7: r11 -= 1")
+__xlated("8: *(u64 *)(r10 -16) = r11")
+/* may_goto expansion ends */
+__xlated("9: *(u64 *)(r10 -8) = r1")
+__xlated("10: exit")
+__success
+__naked void may_goto_interaction(void)
+{
+ asm volatile (
+ "r1 = 1;"
+ "*(u64 *)(r10 - 16) = r1;"
+ "call %[bpf_get_smp_processor_id];"
+ "r1 = *(u64 *)(r10 - 16);"
+ ".8byte %[may_goto];"
+ /* just touch some stack at -8 */
+ "*(u64 *)(r10 - 8) = r1;"
+ "exit;"
+ :
+ : __imm(bpf_get_smp_processor_id),
+ __imm_insn(may_goto, BPF_RAW_INSN(BPF_JMP | BPF_JCOND, 0, 0, +1 /* offset */, 0))
+ : __clobber_all);
+}
+
+__used
+__naked static void dummy_loop_callback(void)
+{
+ asm volatile (
+ "r0 = 0;"
+ "exit;"
+ ::: __clobber_all);
+}
+
+SEC("raw_tp")
+__arch_x86_64
+__log_level(4) __msg("stack depth 32+0")
+__xlated("2: r1 = 1")
+__xlated("3: w0 =")
+__xlated("4: r0 = &(void __percpu *)(r0)")
+__xlated("5: r0 = *(u32 *)(r0 +0)")
+/* bpf_loop params setup */
+__xlated("6: r2 =")
+__xlated("7: r3 = 0")
+__xlated("8: r4 = 0")
+__xlated("...")
+/* ... part of the inlined bpf_loop */
+__xlated("12: *(u64 *)(r10 -32) = r6")
+__xlated("13: *(u64 *)(r10 -24) = r7")
+__xlated("14: *(u64 *)(r10 -16) = r8")
+__xlated("...")
+__xlated("21: call pc+8") /* dummy_loop_callback */
+/* ... last insns of the bpf_loop_interaction1 */
+__xlated("...")
+__xlated("28: r0 = 0")
+__xlated("29: exit")
+/* dummy_loop_callback */
+__xlated("30: r0 = 0")
+__xlated("31: exit")
+__success
+__naked int bpf_loop_interaction1(void)
+{
+ asm volatile (
+ "r1 = 1;"
+ /* bpf_fastcall stack region at -16, but could be removed */
+ "*(u64 *)(r10 - 16) = r1;"
+ "call %[bpf_get_smp_processor_id];"
+ "r1 = *(u64 *)(r10 - 16);"
+ "r2 = %[dummy_loop_callback];"
+ "r3 = 0;"
+ "r4 = 0;"
+ "call %[bpf_loop];"
+ "r0 = 0;"
+ "exit;"
+ :
+ : __imm_ptr(dummy_loop_callback),
+ __imm(bpf_get_smp_processor_id),
+ __imm(bpf_loop)
+ : __clobber_common
+ );
+}
+
+SEC("raw_tp")
+__arch_x86_64
+__log_level(4) __msg("stack depth 40+0")
+/* call bpf_get_smp_processor_id */
+__xlated("2: r1 = 42")
+__xlated("3: w0 =")
+__xlated("4: r0 = &(void __percpu *)(r0)")
+__xlated("5: r0 = *(u32 *)(r0 +0)")
+/* call bpf_get_prandom_u32 */
+__xlated("6: *(u64 *)(r10 -16) = r1")
+__xlated("7: call")
+__xlated("8: r1 = *(u64 *)(r10 -16)")
+__xlated("...")
+/* ... part of the inlined bpf_loop */
+__xlated("15: *(u64 *)(r10 -40) = r6")
+__xlated("16: *(u64 *)(r10 -32) = r7")
+__xlated("17: *(u64 *)(r10 -24) = r8")
+__success
+__naked int bpf_loop_interaction2(void)
+{
+ asm volatile (
+ "r1 = 42;"
+ /* bpf_fastcall stack region at -16, cannot be removed */
+ "*(u64 *)(r10 - 16) = r1;"
+ "call %[bpf_get_smp_processor_id];"
+ "r1 = *(u64 *)(r10 - 16);"
+ "*(u64 *)(r10 - 16) = r1;"
+ "call %[bpf_get_prandom_u32];"
+ "r1 = *(u64 *)(r10 - 16);"
+ "r2 = %[dummy_loop_callback];"
+ "r3 = 0;"
+ "r4 = 0;"
+ "call %[bpf_loop];"
+ "r0 = 0;"
+ "exit;"
+ :
+ : __imm_ptr(dummy_loop_callback),
+ __imm(bpf_get_smp_processor_id),
+ __imm(bpf_get_prandom_u32),
+ __imm(bpf_loop)
+ : __clobber_common
+ );
+}
+
+SEC("raw_tp")
+__arch_x86_64
+__log_level(4)
+__msg("stack depth 512+0")
+/* just to print xlated version when debugging */
+__xlated("r0 = &(void __percpu *)(r0)")
+__success
+/* cumulative_stack_depth() stack usage is MAX_BPF_STACK,
+ * called subprogram uses an additional slot for bpf_fastcall spill/fill,
+ * since bpf_fastcall spill/fill could be removed the program still fits
+ * in MAX_BPF_STACK and should be accepted.
+ */
+__naked int cumulative_stack_depth(void)
+{
+ asm volatile(
+ "r1 = 42;"
+ "*(u64 *)(r10 - %[max_bpf_stack]) = r1;"
+ "call cumulative_stack_depth_subprog;"
+ "exit;"
+ :
+ : __imm_const(max_bpf_stack, MAX_BPF_STACK)
+ : __clobber_all
+ );
+}
+
+__used
+__naked static void cumulative_stack_depth_subprog(void)
+{
+ asm volatile (
+ "*(u64 *)(r10 - 8) = r1;"
+ "call %[bpf_get_smp_processor_id];"
+ "r1 = *(u64 *)(r10 - 8);"
+ "exit;"
+ :: __imm(bpf_get_smp_processor_id) : __clobber_all);
+}
+
+SEC("raw_tp")
+__arch_x86_64
+__log_level(4)
+__msg("stack depth 512")
+__xlated("0: r1 = 42")
+__xlated("1: *(u64 *)(r10 -512) = r1")
+__xlated("2: w0 = ")
+__xlated("3: r0 = &(void __percpu *)(r0)")
+__xlated("4: r0 = *(u32 *)(r0 +0)")
+__xlated("5: exit")
+__success
+__naked int bpf_fastcall_max_stack_ok(void)
+{
+ asm volatile(
+ "r1 = 42;"
+ "*(u64 *)(r10 - %[max_bpf_stack]) = r1;"
+ "*(u64 *)(r10 - %[max_bpf_stack_8]) = r1;"
+ "call %[bpf_get_smp_processor_id];"
+ "r1 = *(u64 *)(r10 - %[max_bpf_stack_8]);"
+ "exit;"
+ :
+ : __imm_const(max_bpf_stack, MAX_BPF_STACK),
+ __imm_const(max_bpf_stack_8, MAX_BPF_STACK + 8),
+ __imm(bpf_get_smp_processor_id)
+ : __clobber_all
+ );
+}
+
+SEC("raw_tp")
+__arch_x86_64
+__log_level(4)
+__msg("stack depth 520")
+__failure
+__naked int bpf_fastcall_max_stack_fail(void)
+{
+ asm volatile(
+ "r1 = 42;"
+ "*(u64 *)(r10 - %[max_bpf_stack]) = r1;"
+ "*(u64 *)(r10 - %[max_bpf_stack_8]) = r1;"
+ "call %[bpf_get_smp_processor_id];"
+ "r1 = *(u64 *)(r10 - %[max_bpf_stack_8]);"
+ /* call to prandom blocks bpf_fastcall rewrite */
+ "*(u64 *)(r10 - %[max_bpf_stack_8]) = r1;"
+ "call %[bpf_get_prandom_u32];"
+ "r1 = *(u64 *)(r10 - %[max_bpf_stack_8]);"
+ "exit;"
+ :
+ : __imm_const(max_bpf_stack, MAX_BPF_STACK),
+ __imm_const(max_bpf_stack_8, MAX_BPF_STACK + 8),
+ __imm(bpf_get_smp_processor_id),
+ __imm(bpf_get_prandom_u32)
+ : __clobber_all
+ );
+}
+
+SEC("cgroup/getsockname_unix")
+__xlated("0: r2 = 1")
+/* bpf_cast_to_kern_ctx is replaced by a single assignment */
+__xlated("1: r0 = r1")
+__xlated("2: r0 = r2")
+__xlated("3: exit")
+__success
+__naked void kfunc_bpf_cast_to_kern_ctx(void)
+{
+ asm volatile (
+ "r2 = 1;"
+ "*(u64 *)(r10 - 32) = r2;"
+ "call %[bpf_cast_to_kern_ctx];"
+ "r2 = *(u64 *)(r10 - 32);"
+ "r0 = r2;"
+ "exit;"
+ :
+ : __imm(bpf_cast_to_kern_ctx)
+ : __clobber_all);
+}
+
+SEC("raw_tp")
+__xlated("3: r3 = 1")
+/* bpf_rdonly_cast is replaced by a single assignment */
+__xlated("4: r0 = r1")
+__xlated("5: r0 = r3")
+void kfunc_bpf_rdonly_cast(void)
+{
+ asm volatile (
+ "r2 = %[btf_id];"
+ "r3 = 1;"
+ "*(u64 *)(r10 - 32) = r3;"
+ "call %[bpf_rdonly_cast];"
+ "r3 = *(u64 *)(r10 - 32);"
+ "r0 = r3;"
+ :
+ : __imm(bpf_rdonly_cast),
+ [btf_id]"r"(bpf_core_type_id_kernel(union bpf_attr))
+ : __clobber_common);
+}
+
+/* BTF FUNC records are not generated for kfuncs referenced
+ * from inline assembly. These records are necessary for
+ * libbpf to link the program. The function below is a hack
+ * to ensure that BTF FUNC records are generated.
+ */
+void kfunc_root(void)
+{
+ bpf_cast_to_kern_ctx(0);
+ bpf_rdonly_cast(0, 0);
+}
+
+char _license[] SEC("license") = "GPL";
diff --git a/tools/testing/selftests/bpf/progs/verifier_const.c b/tools/testing/selftests/bpf/progs/verifier_const.c
new file mode 100644
index 000000000000..2e533d7eec2f
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/verifier_const.c
@@ -0,0 +1,69 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2024 Isovalent */
+
+#include <linux/bpf.h>
+#include <bpf/bpf_helpers.h>
+#include "bpf_misc.h"
+
+const volatile long foo = 42;
+long bar;
+long bart = 96;
+
+SEC("tc/ingress")
+__description("rodata/strtol: write rejected")
+__failure __msg("write into map forbidden")
+int tcx1(struct __sk_buff *skb)
+{
+ char buff[] = { '8', '4', '\0' };
+ bpf_strtol(buff, sizeof(buff), 0, (long *)&foo);
+ return TCX_PASS;
+}
+
+SEC("tc/ingress")
+__description("bss/strtol: write accepted")
+__success
+int tcx2(struct __sk_buff *skb)
+{
+ char buff[] = { '8', '4', '\0' };
+ bpf_strtol(buff, sizeof(buff), 0, &bar);
+ return TCX_PASS;
+}
+
+SEC("tc/ingress")
+__description("data/strtol: write accepted")
+__success
+int tcx3(struct __sk_buff *skb)
+{
+ char buff[] = { '8', '4', '\0' };
+ bpf_strtol(buff, sizeof(buff), 0, &bart);
+ return TCX_PASS;
+}
+
+SEC("tc/ingress")
+__description("rodata/mtu: write rejected")
+__failure __msg("write into map forbidden")
+int tcx4(struct __sk_buff *skb)
+{
+ bpf_check_mtu(skb, skb->ifindex, (__u32 *)&foo, 0, 0);
+ return TCX_PASS;
+}
+
+SEC("tc/ingress")
+__description("bss/mtu: write accepted")
+__success
+int tcx5(struct __sk_buff *skb)
+{
+ bpf_check_mtu(skb, skb->ifindex, (__u32 *)&bar, 0, 0);
+ return TCX_PASS;
+}
+
+SEC("tc/ingress")
+__description("data/mtu: write accepted")
+__success
+int tcx6(struct __sk_buff *skb)
+{
+ bpf_check_mtu(skb, skb->ifindex, (__u32 *)&bart, 0, 0);
+ return TCX_PASS;
+}
+
+char LICENSE[] SEC("license") = "GPL";
diff --git a/tools/testing/selftests/bpf/progs/verifier_global_subprogs.c b/tools/testing/selftests/bpf/progs/verifier_global_subprogs.c
index a9fc30ed4d73..20904cd2baa2 100644
--- a/tools/testing/selftests/bpf/progs/verifier_global_subprogs.c
+++ b/tools/testing/selftests/bpf/progs/verifier_global_subprogs.c
@@ -7,6 +7,7 @@
#include "bpf_misc.h"
#include "xdp_metadata.h"
#include "bpf_kfuncs.h"
+#include "err.h"
/* The compiler may be able to detect the access to uninitialized
memory in the routines performing out of bound memory accesses and
@@ -331,7 +332,11 @@ SEC("?lsm/bpf")
__success __log_level(2)
int BPF_PROG(arg_tag_ctx_lsm)
{
- return tracing_subprog_void(ctx) + tracing_subprog_u64(ctx);
+ int ret;
+
+ ret = tracing_subprog_void(ctx) + tracing_subprog_u64(ctx);
+ set_if_not_errno_or_zero(ret, -1);
+ return ret;
}
SEC("?struct_ops/test_1")
diff --git a/tools/testing/selftests/bpf/progs/verifier_int_ptr.c b/tools/testing/selftests/bpf/progs/verifier_int_ptr.c
index 9fc3fae5cd83..5f2efb895edb 100644
--- a/tools/testing/selftests/bpf/progs/verifier_int_ptr.c
+++ b/tools/testing/selftests/bpf/progs/verifier_int_ptr.c
@@ -6,9 +6,8 @@
#include "bpf_misc.h"
SEC("socket")
-__description("ARG_PTR_TO_LONG uninitialized")
+__description("arg pointer to long uninitialized")
__success
-__failure_unpriv __msg_unpriv("invalid indirect read from stack R4 off -16+0 size 8")
__naked void arg_ptr_to_long_uninitialized(void)
{
asm volatile (" \
@@ -35,10 +34,8 @@ __naked void arg_ptr_to_long_uninitialized(void)
}
SEC("socket")
-__description("ARG_PTR_TO_LONG half-uninitialized")
-/* in privileged mode reads from uninitialized stack locations are permitted */
-__success __failure_unpriv
-__msg_unpriv("invalid indirect read from stack R4 off -16+4 size 8")
+__description("arg pointer to long half-uninitialized")
+__success
__retval(0)
__naked void ptr_to_long_half_uninitialized(void)
{
@@ -67,7 +64,7 @@ __naked void ptr_to_long_half_uninitialized(void)
}
SEC("cgroup/sysctl")
-__description("ARG_PTR_TO_LONG misaligned")
+__description("arg pointer to long misaligned")
__failure __msg("misaligned stack access off 0+-20+0 size 8")
__naked void arg_ptr_to_long_misaligned(void)
{
@@ -98,7 +95,7 @@ __naked void arg_ptr_to_long_misaligned(void)
}
SEC("cgroup/sysctl")
-__description("ARG_PTR_TO_LONG size < sizeof(long)")
+__description("arg pointer to long size < sizeof(long)")
__failure __msg("invalid indirect access to stack R4 off=-4 size=8")
__naked void to_long_size_sizeof_long(void)
{
@@ -127,7 +124,7 @@ __naked void to_long_size_sizeof_long(void)
}
SEC("cgroup/sysctl")
-__description("ARG_PTR_TO_LONG initialized")
+__description("arg pointer to long initialized")
__success
__naked void arg_ptr_to_long_initialized(void)
{
diff --git a/tools/testing/selftests/bpf/progs/verifier_jit_convergence.c b/tools/testing/selftests/bpf/progs/verifier_jit_convergence.c
new file mode 100644
index 000000000000..9f3f2b7db450
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/verifier_jit_convergence.c
@@ -0,0 +1,114 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2024 Meta Platforms, Inc. and affiliates. */
+
+#include <linux/bpf.h>
+#include <bpf/bpf_helpers.h>
+#include "bpf_misc.h"
+
+struct value_t {
+ long long a[32];
+};
+
+struct {
+ __uint(type, BPF_MAP_TYPE_HASH);
+ __uint(max_entries, 1);
+ __type(key, long long);
+ __type(value, struct value_t);
+} map_hash SEC(".maps");
+
+SEC("socket")
+__description("bpf_jit_convergence je <-> jmp")
+__success __retval(0)
+__arch_x86_64
+__jited(" pushq %rbp")
+__naked void btf_jit_convergence_je_jmp(void)
+{
+ asm volatile (
+ "call %[bpf_get_prandom_u32];"
+ "if r0 == 0 goto l20_%=;"
+ "if r0 == 1 goto l21_%=;"
+ "if r0 == 2 goto l22_%=;"
+ "if r0 == 3 goto l23_%=;"
+ "if r0 == 4 goto l24_%=;"
+ "call %[bpf_get_prandom_u32];"
+ "call %[bpf_get_prandom_u32];"
+"l20_%=:"
+"l21_%=:"
+"l22_%=:"
+"l23_%=:"
+"l24_%=:"
+ "r1 = 0;"
+ "*(u64 *)(r10 - 8) = r1;"
+ "r2 = r10;"
+ "r2 += -8;"
+ "r1 = %[map_hash] ll;"
+ "call %[bpf_map_lookup_elem];"
+ "if r0 == 0 goto l1_%=;"
+ "r6 = r0;"
+ "call %[bpf_get_prandom_u32];"
+ "r7 = r0;"
+ "r5 = r6;"
+ "if r0 != 0x0 goto l12_%=;"
+ "call %[bpf_get_prandom_u32];"
+ "r1 = r0;"
+ "r2 = r6;"
+ "if r1 == 0x0 goto l0_%=;"
+"l9_%=:"
+ "r2 = *(u64 *)(r6 + 0x0);"
+ "r2 += 0x1;"
+ "*(u64 *)(r6 + 0x0) = r2;"
+ "goto l1_%=;"
+"l12_%=:"
+ "r1 = r7;"
+ "r1 += 0x98;"
+ "r2 = r5;"
+ "r2 += 0x90;"
+ "r2 = *(u32 *)(r2 + 0x0);"
+ "r3 = r7;"
+ "r3 &= 0x1;"
+ "r2 *= 0xa8;"
+ "if r3 == 0x0 goto l2_%=;"
+ "r1 += r2;"
+ "r1 -= r7;"
+ "r1 += 0x8;"
+ "if r1 <= 0xb20 goto l3_%=;"
+ "r1 = 0x0;"
+ "goto l4_%=;"
+"l3_%=:"
+ "r1 += r7;"
+"l4_%=:"
+ "if r1 == 0x0 goto l8_%=;"
+ "goto l9_%=;"
+"l2_%=:"
+ "r1 += r2;"
+ "r1 -= r7;"
+ "r1 += 0x10;"
+ "if r1 <= 0xb20 goto l6_%=;"
+ "r1 = 0x0;"
+ "goto l7_%=;"
+"l6_%=:"
+ "r1 += r7;"
+"l7_%=:"
+ "if r1 == 0x0 goto l8_%=;"
+ "goto l9_%=;"
+"l0_%=:"
+ "r1 = 0x3;"
+ "*(u64 *)(r10 - 0x10) = r1;"
+ "r2 = r1;"
+ "goto l1_%=;"
+"l8_%=:"
+ "r1 = r5;"
+ "r1 += 0x4;"
+ "r1 = *(u32 *)(r1 + 0x0);"
+ "*(u64 *)(r10 - 0x8) = r1;"
+"l1_%=:"
+ "r0 = 0;"
+ "exit;"
+ :
+ : __imm(bpf_get_prandom_u32),
+ __imm(bpf_map_lookup_elem),
+ __imm_addr(map_hash)
+ : __clobber_all);
+}
+
+char _license[] SEC("license") = "GPL";
diff --git a/tools/testing/selftests/bpf/progs/verifier_kfunc_prog_types.c b/tools/testing/selftests/bpf/progs/verifier_kfunc_prog_types.c
index cb32b0cfc84b..a509cad97e69 100644
--- a/tools/testing/selftests/bpf/progs/verifier_kfunc_prog_types.c
+++ b/tools/testing/selftests/bpf/progs/verifier_kfunc_prog_types.c
@@ -47,6 +47,22 @@ int BPF_PROG(task_kfunc_syscall)
return 0;
}
+SEC("tracepoint")
+__success
+int BPF_PROG(task_kfunc_tracepoint)
+{
+ task_kfunc_load_test();
+ return 0;
+}
+
+SEC("perf_event")
+__success
+int BPF_PROG(task_kfunc_perf_event)
+{
+ task_kfunc_load_test();
+ return 0;
+}
+
/*****************
* cgroup kfuncs *
*****************/
@@ -85,6 +101,22 @@ int BPF_PROG(cgrp_kfunc_syscall)
return 0;
}
+SEC("tracepoint")
+__success
+int BPF_PROG(cgrp_kfunc_tracepoint)
+{
+ cgrp_kfunc_load_test();
+ return 0;
+}
+
+SEC("perf_event")
+__success
+int BPF_PROG(cgrp_kfunc_perf_event)
+{
+ cgrp_kfunc_load_test();
+ return 0;
+}
+
/******************
* cpumask kfuncs *
******************/
@@ -120,3 +152,19 @@ int BPF_PROG(cpumask_kfunc_syscall)
cpumask_kfunc_load_test();
return 0;
}
+
+SEC("tracepoint")
+__success
+int BPF_PROG(cpumask_kfunc_tracepoint)
+{
+ cpumask_kfunc_load_test();
+ return 0;
+}
+
+SEC("perf_event")
+__success
+int BPF_PROG(cpumask_kfunc_perf_event)
+{
+ cpumask_kfunc_load_test();
+ return 0;
+}
diff --git a/tools/testing/selftests/bpf/progs/verifier_ldsx.c b/tools/testing/selftests/bpf/progs/verifier_ldsx.c
index d4427d8e1217..52edee41caf6 100644
--- a/tools/testing/selftests/bpf/progs/verifier_ldsx.c
+++ b/tools/testing/selftests/bpf/progs/verifier_ldsx.c
@@ -144,6 +144,118 @@ __naked void ldsx_s32_range(void)
: __clobber_all);
}
+SEC("xdp")
+__description("LDSX, xdp s32 xdp_md->data")
+__failure __msg("invalid bpf_context access")
+__naked void ldsx_ctx_1(void)
+{
+ asm volatile (
+ "r2 = *(s32 *)(r1 + %[xdp_md_data]);"
+ "r0 = 0;"
+ "exit;"
+ :
+ : __imm_const(xdp_md_data, offsetof(struct xdp_md, data))
+ : __clobber_all);
+}
+
+SEC("xdp")
+__description("LDSX, xdp s32 xdp_md->data_end")
+__failure __msg("invalid bpf_context access")
+__naked void ldsx_ctx_2(void)
+{
+ asm volatile (
+ "r2 = *(s32 *)(r1 + %[xdp_md_data_end]);"
+ "r0 = 0;"
+ "exit;"
+ :
+ : __imm_const(xdp_md_data_end, offsetof(struct xdp_md, data_end))
+ : __clobber_all);
+}
+
+SEC("xdp")
+__description("LDSX, xdp s32 xdp_md->data_meta")
+__failure __msg("invalid bpf_context access")
+__naked void ldsx_ctx_3(void)
+{
+ asm volatile (
+ "r2 = *(s32 *)(r1 + %[xdp_md_data_meta]);"
+ "r0 = 0;"
+ "exit;"
+ :
+ : __imm_const(xdp_md_data_meta, offsetof(struct xdp_md, data_meta))
+ : __clobber_all);
+}
+
+SEC("tcx/ingress")
+__description("LDSX, tcx s32 __sk_buff->data")
+__failure __msg("invalid bpf_context access")
+__naked void ldsx_ctx_4(void)
+{
+ asm volatile (
+ "r2 = *(s32 *)(r1 + %[sk_buff_data]);"
+ "r0 = 0;"
+ "exit;"
+ :
+ : __imm_const(sk_buff_data, offsetof(struct __sk_buff, data))
+ : __clobber_all);
+}
+
+SEC("tcx/ingress")
+__description("LDSX, tcx s32 __sk_buff->data_end")
+__failure __msg("invalid bpf_context access")
+__naked void ldsx_ctx_5(void)
+{
+ asm volatile (
+ "r2 = *(s32 *)(r1 + %[sk_buff_data_end]);"
+ "r0 = 0;"
+ "exit;"
+ :
+ : __imm_const(sk_buff_data_end, offsetof(struct __sk_buff, data_end))
+ : __clobber_all);
+}
+
+SEC("tcx/ingress")
+__description("LDSX, tcx s32 __sk_buff->data_meta")
+__failure __msg("invalid bpf_context access")
+__naked void ldsx_ctx_6(void)
+{
+ asm volatile (
+ "r2 = *(s32 *)(r1 + %[sk_buff_data_meta]);"
+ "r0 = 0;"
+ "exit;"
+ :
+ : __imm_const(sk_buff_data_meta, offsetof(struct __sk_buff, data_meta))
+ : __clobber_all);
+}
+
+SEC("flow_dissector")
+__description("LDSX, flow_dissector s32 __sk_buff->data")
+__failure __msg("invalid bpf_context access")
+__naked void ldsx_ctx_7(void)
+{
+ asm volatile (
+ "r2 = *(s32 *)(r1 + %[sk_buff_data]);"
+ "r0 = 0;"
+ "exit;"
+ :
+ : __imm_const(sk_buff_data, offsetof(struct __sk_buff, data))
+ : __clobber_all);
+}
+
+SEC("flow_dissector")
+__description("LDSX, flow_dissector s32 __sk_buff->data_end")
+__failure __msg("invalid bpf_context access")
+__naked void ldsx_ctx_8(void)
+{
+ asm volatile (
+ "r2 = *(s32 *)(r1 + %[sk_buff_data_end]);"
+ "r0 = 0;"
+ "exit;"
+ :
+ : __imm_const(sk_buff_data_end, offsetof(struct __sk_buff, data_end))
+ : __clobber_all);
+}
+
#else
SEC("socket")
diff --git a/tools/testing/selftests/bpf/progs/verifier_lsm.c b/tools/testing/selftests/bpf/progs/verifier_lsm.c
new file mode 100644
index 000000000000..32e5e779cb96
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/verifier_lsm.c
@@ -0,0 +1,162 @@
+// SPDX-License-Identifier: GPL-2.0
+
+#include <linux/bpf.h>
+#include <bpf/bpf_helpers.h>
+#include "bpf_misc.h"
+
+SEC("lsm/file_alloc_security")
+__description("lsm bpf prog with -4095~0 retval. test 1")
+__success
+__naked int errno_zero_retval_test1(void *ctx)
+{
+ asm volatile (
+ "r0 = 0;"
+ "exit;"
+ ::: __clobber_all);
+}
+
+SEC("lsm/file_alloc_security")
+__description("lsm bpf prog with -4095~0 retval. test 2")
+__success
+__naked int errno_zero_retval_test2(void *ctx)
+{
+ asm volatile (
+ "r0 = -4095;"
+ "exit;"
+ ::: __clobber_all);
+}
+
+SEC("lsm/file_mprotect")
+__description("lsm bpf prog with -4095~0 retval. test 4")
+__failure __msg("R0 has smin=-4096 smax=-4096 should have been in [-4095, 0]")
+__naked int errno_zero_retval_test4(void *ctx)
+{
+ asm volatile (
+ "r0 = -4096;"
+ "exit;"
+ ::: __clobber_all);
+}
+
+SEC("lsm/file_mprotect")
+__description("lsm bpf prog with -4095~0 retval. test 5")
+__failure __msg("R0 has smin=4096 smax=4096 should have been in [-4095, 0]")
+__naked int errno_zero_retval_test5(void *ctx)
+{
+ asm volatile (
+ "r0 = 4096;"
+ "exit;"
+ ::: __clobber_all);
+}
+
+SEC("lsm/file_mprotect")
+__description("lsm bpf prog with -4095~0 retval. test 6")
+__failure __msg("R0 has smin=1 smax=1 should have been in [-4095, 0]")
+__naked int errno_zero_retval_test6(void *ctx)
+{
+ asm volatile (
+ "r0 = 1;"
+ "exit;"
+ ::: __clobber_all);
+}
+
+SEC("lsm/audit_rule_known")
+__description("lsm bpf prog with bool retval. test 1")
+__success
+__naked int bool_retval_test1(void *ctx)
+{
+ asm volatile (
+ "r0 = 1;"
+ "exit;"
+ ::: __clobber_all);
+}
+
+SEC("lsm/audit_rule_known")
+__description("lsm bpf prog with bool retval. test 2")
+__success
+__success
+__naked int bool_retval_test2(void *ctx)
+{
+ asm volatile (
+ "r0 = 0;"
+ "exit;"
+ ::: __clobber_all);
+}
+
+SEC("lsm/audit_rule_known")
+__description("lsm bpf prog with bool retval. test 3")
+__failure __msg("R0 has smin=-1 smax=-1 should have been in [0, 1]")
+__naked int bool_retval_test3(void *ctx)
+{
+ asm volatile (
+ "r0 = -1;"
+ "exit;"
+ ::: __clobber_all);
+}
+
+SEC("lsm/audit_rule_known")
+__description("lsm bpf prog with bool retval. test 4")
+__failure __msg("R0 has smin=2 smax=2 should have been in [0, 1]")
+__naked int bool_retval_test4(void *ctx)
+{
+ asm volatile (
+ "r0 = 2;"
+ "exit;"
+ ::: __clobber_all);
+}
+
+SEC("lsm/file_free_security")
+__success
+__description("lsm bpf prog with void retval. test 1")
+__naked int void_retval_test1(void *ctx)
+{
+ asm volatile (
+ "r0 = -4096;"
+ "exit;"
+ ::: __clobber_all);
+}
+
+SEC("lsm/file_free_security")
+__success
+__description("lsm bpf prog with void retval. test 2")
+__naked int void_retval_test2(void *ctx)
+{
+ asm volatile (
+ "r0 = 4096;"
+ "exit;"
+ ::: __clobber_all);
+}
+
+SEC("lsm/getprocattr")
+__description("lsm disabled hook: getprocattr")
+__failure __msg("points to disabled hook")
+__naked int disabled_hook_test1(void *ctx)
+{
+ asm volatile (
+ "r0 = 0;"
+ "exit;"
+ ::: __clobber_all);
+}
+
+SEC("lsm/setprocattr")
+__description("lsm disabled hook: setprocattr")
+__failure __msg("points to disabled hook")
+__naked int disabled_hook_test2(void *ctx)
+{
+ asm volatile (
+ "r0 = 0;"
+ "exit;"
+ ::: __clobber_all);
+}
+
+SEC("lsm/ismaclabel")
+__description("lsm disabled hook: ismaclabel")
+__failure __msg("points to disabled hook")
+__naked int disabled_hook_test3(void *ctx)
+{
+ asm volatile (
+ "r0 = 0;"
+ "exit;"
+ ::: __clobber_all);
+}
+
+char _license[] SEC("license") = "GPL";
diff --git a/tools/testing/selftests/bpf/progs/verifier_scalar_ids.c b/tools/testing/selftests/bpf/progs/verifier_scalar_ids.c
index 13b29a7faa71..2ecf77b623e0 100644
--- a/tools/testing/selftests/bpf/progs/verifier_scalar_ids.c
+++ b/tools/testing/selftests/bpf/progs/verifier_scalar_ids.c
@@ -5,18 +5,27 @@
#include "bpf_misc.h"
/* Check that precision marks propagate through scalar IDs.
- * Registers r{0,1,2} have the same scalar ID at the moment when r0 is
- * marked to be precise, this mark is immediately propagated to r{1,2}.
+ * Registers r{0,1,2} have the same scalar ID.
+ * Range information is propagated for scalars sharing same ID.
+ * Check that precision mark for r0 causes precision marks for r{1,2}
+ * when range information is propagated for 'if <reg> <op> <const>' insn.
*/
SEC("socket")
__success __log_level(2)
-__msg("frame0: regs=r0,r1,r2 stack= before 4: (bf) r3 = r10")
+/* first 'if' branch */
+__msg("6: (0f) r3 += r0")
+__msg("frame0: regs=r0 stack= before 4: (25) if r1 > 0x7 goto pc+0")
+__msg("frame0: parent state regs=r0,r1,r2 stack=:")
__msg("frame0: regs=r0,r1,r2 stack= before 3: (bf) r2 = r0")
-__msg("frame0: regs=r0,r1 stack= before 2: (bf) r1 = r0")
-__msg("frame0: regs=r0 stack= before 1: (57) r0 &= 255")
-__msg("frame0: regs=r0 stack= before 0: (85) call bpf_ktime_get_ns")
+/* second 'if' branch */
+__msg("from 4 to 5: ")
+__msg("6: (0f) r3 += r0")
+__msg("frame0: regs=r0 stack= before 5: (bf) r3 = r10")
+__msg("frame0: regs=r0 stack= before 4: (25) if r1 > 0x7 goto pc+0")
+/* parent state already has r{0,1,2} as precise */
+__msg("frame0: parent state regs= stack=:")
__flag(BPF_F_TEST_STATE_FREQ)
-__naked void precision_same_state(void)
+__naked void linked_regs_bpf_k(void)
{
asm volatile (
/* r0 = random number up to 0xff */
@@ -25,7 +34,8 @@ __naked void precision_same_state(void)
/* tie r0.id == r1.id == r2.id */
"r1 = r0;"
"r2 = r0;"
- /* force r0 to be precise, this immediately marks r1 and r2 as
+ "if r1 > 7 goto +0;"
+ /* force r0 to be precise, this eventually marks r1 and r2 as
* precise as well because of shared IDs
*/
"r3 = r10;"
@@ -37,22 +47,17 @@ __naked void precision_same_state(void)
: __clobber_all);
}
-/* Same as precision_same_state, but mark propagates through state /
- * parent state boundary.
+/* Registers r{0,1,2} share same ID when 'if r1 > ...' insn is processed,
+ * check that verifier marks r{1,2} as precise while backtracking
+ * 'if r1 > ...' with r0 already marked.
*/
SEC("socket")
__success __log_level(2)
-__msg("frame0: last_idx 6 first_idx 5 subseq_idx -1")
-__msg("frame0: regs=r0,r1,r2 stack= before 5: (bf) r3 = r10")
-__msg("frame0: parent state regs=r0,r1,r2 stack=:")
-__msg("frame0: regs=r0,r1,r2 stack= before 4: (05) goto pc+0")
-__msg("frame0: regs=r0,r1,r2 stack= before 3: (bf) r2 = r0")
-__msg("frame0: regs=r0,r1 stack= before 2: (bf) r1 = r0")
-__msg("frame0: regs=r0 stack= before 1: (57) r0 &= 255")
-__msg("frame0: parent state regs=r0 stack=:")
-__msg("frame0: regs=r0 stack= before 0: (85) call bpf_ktime_get_ns")
__flag(BPF_F_TEST_STATE_FREQ)
-__naked void precision_cross_state(void)
+__msg("frame0: regs=r0 stack= before 5: (2d) if r1 > r3 goto pc+0")
+__msg("frame0: parent state regs=r0,r1,r2,r3 stack=:")
+__msg("frame0: regs=r0,r1,r2,r3 stack= before 4: (b7) r3 = 7")
+__naked void linked_regs_bpf_x_src(void)
{
asm volatile (
/* r0 = random number up to 0xff */
@@ -61,13 +66,13 @@ __naked void precision_cross_state(void)
/* tie r0.id == r1.id == r2.id */
"r1 = r0;"
"r2 = r0;"
- /* force checkpoint */
- "goto +0;"
- /* force r0 to be precise, this immediately marks r1 and r2 as
+ "r3 = 7;"
+ "if r1 > r3 goto +0;"
+ /* force r0 to be precise, this eventually marks r1 and r2 as
* precise as well because of shared IDs
*/
- "r3 = r10;"
- "r3 += r0;"
+ "r4 = r10;"
+ "r4 += r0;"
"r0 = 0;"
"exit;"
:
@@ -75,19 +80,17 @@ __naked void precision_cross_state(void)
: __clobber_all);
}
-/* Same as precision_same_state, but break one of the
- * links, note that r1 is absent from regs=... in __msg below.
+/* Registers r{0,1,2} share same ID when 'if r1 > r3' insn is processed,
+ * check that verifier marks r{0,1,2} as precise while backtracking
+ * 'if r1 > r3' with r3 already marked.
*/
SEC("socket")
__success __log_level(2)
-__msg("frame0: regs=r0,r2 stack= before 5: (bf) r3 = r10")
-__msg("frame0: regs=r0,r2 stack= before 4: (b7) r1 = 0")
-__msg("frame0: regs=r0,r2 stack= before 3: (bf) r2 = r0")
-__msg("frame0: regs=r0 stack= before 2: (bf) r1 = r0")
-__msg("frame0: regs=r0 stack= before 1: (57) r0 &= 255")
-__msg("frame0: regs=r0 stack= before 0: (85) call bpf_ktime_get_ns")
__flag(BPF_F_TEST_STATE_FREQ)
-__naked void precision_same_state_broken_link(void)
+__msg("frame0: regs=r3 stack= before 5: (2d) if r1 > r3 goto pc+0")
+__msg("frame0: parent state regs=r0,r1,r2,r3 stack=:")
+__msg("frame0: regs=r0,r1,r2,r3 stack= before 4: (b7) r3 = 7")
+__naked void linked_regs_bpf_x_dst(void)
{
asm volatile (
/* r0 = random number up to 0xff */
@@ -96,15 +99,13 @@ __naked void precision_same_state_broken_link(void)
/* tie r0.id == r1.id == r2.id */
"r1 = r0;"
"r2 = r0;"
- /* break link for r1, this is the only line that differs
- * compared to the previous test
- */
- "r1 = 0;"
- /* force r0 to be precise, this immediately marks r1 and r2 as
+ "r3 = 7;"
+ "if r1 > r3 goto +0;"
+ /* force r0 to be precise, this eventually marks r1 and r2 as
* precise as well because of shared IDs
*/
- "r3 = r10;"
- "r3 += r0;"
+ "r4 = r10;"
+ "r4 += r3;"
"r0 = 0;"
"exit;"
:
@@ -112,22 +113,18 @@ __naked void precision_same_state_broken_link(void)
: __clobber_all);
}
-/* Same as precision_same_state_broken_link, but with state /
- * parent state boundary.
+/* Same as linked_regs_bpf_k, but break one of the
+ * links, note that r1 is absent from regs=... in __msg below.
*/
SEC("socket")
__success __log_level(2)
-__msg("frame0: regs=r0,r2 stack= before 6: (bf) r3 = r10")
-__msg("frame0: regs=r0,r2 stack= before 5: (b7) r1 = 0")
-__msg("frame0: parent state regs=r0,r2 stack=:")
-__msg("frame0: regs=r0,r1,r2 stack= before 4: (05) goto pc+0")
-__msg("frame0: regs=r0,r1,r2 stack= before 3: (bf) r2 = r0")
-__msg("frame0: regs=r0,r1 stack= before 2: (bf) r1 = r0")
-__msg("frame0: regs=r0 stack= before 1: (57) r0 &= 255")
+__msg("7: (0f) r3 += r0")
+__msg("frame0: regs=r0 stack= before 6: (bf) r3 = r10")
__msg("frame0: parent state regs=r0 stack=:")
-__msg("frame0: regs=r0 stack= before 0: (85) call bpf_ktime_get_ns")
+__msg("frame0: regs=r0 stack= before 5: (25) if r0 > 0x7 goto pc+0")
+__msg("frame0: parent state regs=r0,r2 stack=:")
__flag(BPF_F_TEST_STATE_FREQ)
-__naked void precision_cross_state_broken_link(void)
+__naked void linked_regs_broken_link(void)
{
asm volatile (
/* r0 = random number up to 0xff */
@@ -136,18 +133,13 @@ __naked void precision_cross_state_broken_link(void)
/* tie r0.id == r1.id == r2.id */
"r1 = r0;"
"r2 = r0;"
- /* force checkpoint, although link between r1 and r{0,2} is
- * broken by the next statement current precision tracking
- * algorithm can't react to it and propagates mark for r1 to
- * the parent state.
- */
- "goto +0;"
/* break link for r1, this is the only line that differs
- * compared to precision_cross_state()
+ * compared to the previous test
*/
"r1 = 0;"
- /* force r0 to be precise, this immediately marks r1 and r2 as
- * precise as well because of shared IDs
+ "if r0 > 7 goto +0;"
+ /* force r0 to be precise,
+ * this eventually marks r2 as precise because of shared IDs
*/
"r3 = r10;"
"r3 += r0;"
@@ -164,10 +156,16 @@ __naked void precision_cross_state_broken_link(void)
*/
SEC("socket")
__success __log_level(2)
-__msg("11: (0f) r2 += r1")
+__msg("12: (0f) r2 += r1")
/* Current state */
-__msg("frame2: last_idx 11 first_idx 10 subseq_idx -1")
-__msg("frame2: regs=r1 stack= before 10: (bf) r2 = r10")
+__msg("frame2: last_idx 12 first_idx 11 subseq_idx -1 ")
+__msg("frame2: regs=r1 stack= before 11: (bf) r2 = r10")
+__msg("frame2: parent state regs=r1 stack=")
+__msg("frame1: parent state regs= stack=")
+__msg("frame0: parent state regs= stack=")
+/* Parent state */
+__msg("frame2: last_idx 10 first_idx 10 subseq_idx 11 ")
+__msg("frame2: regs=r1 stack= before 10: (25) if r1 > 0x7 goto pc+0")
__msg("frame2: parent state regs=r1 stack=")
/* frame1.r{6,7} are marked because mark_precise_scalar_ids()
* looks for all registers with frame2.r1.id in the current state
@@ -192,7 +190,7 @@ __msg("frame1: regs=r1 stack= before 4: (85) call pc+1")
__msg("frame0: parent state regs=r1,r6 stack=")
/* Parent state */
__msg("frame0: last_idx 3 first_idx 1 subseq_idx 4")
-__msg("frame0: regs=r0,r1,r6 stack= before 3: (bf) r6 = r0")
+__msg("frame0: regs=r1,r6 stack= before 3: (bf) r6 = r0")
__msg("frame0: regs=r0,r1 stack= before 2: (bf) r1 = r0")
__msg("frame0: regs=r0 stack= before 1: (57) r0 &= 255")
__flag(BPF_F_TEST_STATE_FREQ)
@@ -230,7 +228,8 @@ static __naked __noinline __used
void precision_many_frames__bar(void)
{
asm volatile (
- /* force r1 to be precise, this immediately marks:
+ "if r1 > 7 goto +0;"
+ /* force r1 to be precise, this eventually marks:
* - bar frame r1
* - foo frame r{1,6,7}
* - main frame r{1,6}
@@ -247,14 +246,16 @@ void precision_many_frames__bar(void)
*/
SEC("socket")
__success __log_level(2)
+__msg("11: (0f) r2 += r1")
/* foo frame */
-__msg("frame1: regs=r1 stack=-8,-16 before 9: (bf) r2 = r10")
+__msg("frame1: regs=r1 stack= before 10: (bf) r2 = r10")
+__msg("frame1: regs=r1 stack= before 9: (25) if r1 > 0x7 goto pc+0")
__msg("frame1: regs=r1 stack=-8,-16 before 8: (7b) *(u64 *)(r10 -16) = r1")
__msg("frame1: regs=r1 stack=-8 before 7: (7b) *(u64 *)(r10 -8) = r1")
__msg("frame1: regs=r1 stack= before 4: (85) call pc+2")
/* main frame */
-__msg("frame0: regs=r0,r1 stack=-8 before 3: (7b) *(u64 *)(r10 -8) = r1")
-__msg("frame0: regs=r0,r1 stack= before 2: (bf) r1 = r0")
+__msg("frame0: regs=r1 stack=-8 before 3: (7b) *(u64 *)(r10 -8) = r1")
+__msg("frame0: regs=r1 stack= before 2: (bf) r1 = r0")
__msg("frame0: regs=r0 stack= before 1: (57) r0 &= 255")
__flag(BPF_F_TEST_STATE_FREQ)
__naked void precision_stack(void)
@@ -283,7 +284,8 @@ void precision_stack__foo(void)
*/
"*(u64*)(r10 - 8) = r1;"
"*(u64*)(r10 - 16) = r1;"
- /* force r1 to be precise, this immediately marks:
+ "if r1 > 7 goto +0;"
+ /* force r1 to be precise, this eventually marks:
* - foo frame r1,fp{-8,-16}
* - main frame r1,fp{-8}
*/
@@ -299,15 +301,17 @@ void precision_stack__foo(void)
SEC("socket")
__success __log_level(2)
/* r{6,7} */
-__msg("11: (0f) r3 += r7")
-__msg("frame0: regs=r6,r7 stack= before 10: (bf) r3 = r10")
+__msg("12: (0f) r3 += r7")
+__msg("frame0: regs=r7 stack= before 11: (bf) r3 = r10")
+__msg("frame0: regs=r7 stack= before 9: (25) if r7 > 0x7 goto pc+0")
/* ... skip some insns ... */
__msg("frame0: regs=r6,r7 stack= before 3: (bf) r7 = r0")
__msg("frame0: regs=r0,r6 stack= before 2: (bf) r6 = r0")
/* r{8,9} */
-__msg("12: (0f) r3 += r9")
-__msg("frame0: regs=r8,r9 stack= before 11: (0f) r3 += r7")
+__msg("13: (0f) r3 += r9")
+__msg("frame0: regs=r9 stack= before 12: (0f) r3 += r7")
/* ... skip some insns ... */
+__msg("frame0: regs=r9 stack= before 10: (25) if r9 > 0x7 goto pc+0")
__msg("frame0: regs=r8,r9 stack= before 7: (bf) r9 = r0")
__msg("frame0: regs=r0,r8 stack= before 6: (bf) r8 = r0")
__flag(BPF_F_TEST_STATE_FREQ)
@@ -328,8 +332,9 @@ __naked void precision_two_ids(void)
"r9 = r0;"
/* clear r0 id */
"r0 = 0;"
- /* force checkpoint */
- "goto +0;"
+ /* propagate equal scalars precision */
+ "if r7 > 7 goto +0;"
+ "if r9 > 7 goto +0;"
"r3 = r10;"
/* force r7 to be precise, this also marks r6 */
"r3 += r7;"
@@ -341,6 +346,105 @@ __naked void precision_two_ids(void)
: __clobber_all);
}
+SEC("socket")
+__success __log_level(2)
+__flag(BPF_F_TEST_STATE_FREQ)
+/* check thar r0 and r6 have different IDs after 'if',
+ * collect_linked_regs() can't tie more than 6 registers for a single insn.
+ */
+__msg("8: (25) if r0 > 0x7 goto pc+0 ; R0=scalar(id=1")
+__msg("9: (bf) r6 = r6 ; R6_w=scalar(id=2")
+/* check that r{0-5} are marked precise after 'if' */
+__msg("frame0: regs=r0 stack= before 8: (25) if r0 > 0x7 goto pc+0")
+__msg("frame0: parent state regs=r0,r1,r2,r3,r4,r5 stack=:")
+__naked void linked_regs_too_many_regs(void)
+{
+ asm volatile (
+ /* r0 = random number up to 0xff */
+ "call %[bpf_ktime_get_ns];"
+ "r0 &= 0xff;"
+ /* tie r{0-6} IDs */
+ "r1 = r0;"
+ "r2 = r0;"
+ "r3 = r0;"
+ "r4 = r0;"
+ "r5 = r0;"
+ "r6 = r0;"
+ /* propagate range for r{0-6} */
+ "if r0 > 7 goto +0;"
+ /* make r6 appear in the log */
+ "r6 = r6;"
+ /* force r0 to be precise,
+ * this would cause r{0-4} to be precise because of shared IDs
+ */
+ "r7 = r10;"
+ "r7 += r0;"
+ "r0 = 0;"
+ "exit;"
+ :
+ : __imm(bpf_ktime_get_ns)
+ : __clobber_all);
+}
+
+SEC("socket")
+__failure __log_level(2)
+__flag(BPF_F_TEST_STATE_FREQ)
+__msg("regs=r7 stack= before 5: (3d) if r8 >= r0")
+__msg("parent state regs=r0,r7,r8")
+__msg("regs=r0,r7,r8 stack= before 4: (25) if r0 > 0x1")
+__msg("div by zero")
+__naked void linked_regs_broken_link_2(void)
+{
+ asm volatile (
+ "call %[bpf_get_prandom_u32];"
+ "r7 = r0;"
+ "r8 = r0;"
+ "call %[bpf_get_prandom_u32];"
+ "if r0 > 1 goto +0;"
+ /* r7.id == r8.id,
+ * thus r7 precision implies r8 precision,
+ * which implies r0 precision because of the conditional below.
+ */
+ "if r8 >= r0 goto 1f;"
+ /* break id relation between r7 and r8 */
+ "r8 += r8;"
+ /* make r7 precise */
+ "if r7 == 0 goto 1f;"
+ "r0 /= 0;"
+"1:"
+ "r0 = 42;"
+ "exit;"
+ :
+ : __imm(bpf_get_prandom_u32)
+ : __clobber_all);
+}
+
+/* Check that mark_chain_precision() for one of the conditional jump
+ * operands does not trigger equal scalars precision propagation.
+ */
+SEC("socket")
+__success __log_level(2)
+__msg("3: (25) if r1 > 0x100 goto pc+0")
+__msg("frame0: regs=r1 stack= before 2: (bf) r1 = r0")
+__naked void cjmp_no_linked_regs_trigger(void)
+{
+ asm volatile (
+ /* r0 = random number up to 0xff */
+ "call %[bpf_ktime_get_ns];"
+ "r0 &= 0xff;"
+ /* tie r0.id == r1.id */
+ "r1 = r0;"
+ /* the jump below would be predicted, thus r1 would be marked precise,
+ * this should not imply precision mark for r0
+ */
+ "if r1 > 256 goto +0;"
+ "r0 = 0;"
+ "exit;"
+ :
+ : __imm(bpf_ktime_get_ns)
+ : __clobber_all);
+}
+
/* Verify that check_ids() is used by regsafe() for scalars.
*
* r9 = ... some pointer with range X ...
diff --git a/tools/testing/selftests/bpf/progs/verifier_sdiv.c b/tools/testing/selftests/bpf/progs/verifier_sdiv.c
index 2a2271cf0294..148d2299e5b4 100644
--- a/tools/testing/selftests/bpf/progs/verifier_sdiv.c
+++ b/tools/testing/selftests/bpf/progs/verifier_sdiv.c
@@ -1,6 +1,7 @@
// SPDX-License-Identifier: GPL-2.0
#include <linux/bpf.h>
+#include <limits.h>
#include <bpf/bpf_helpers.h>
#include "bpf_misc.h"
@@ -770,6 +771,444 @@ __naked void smod64_zero_divisor(void)
" ::: __clobber_all);
}
+SEC("socket")
+__description("SDIV64, overflow r/r, LLONG_MIN/-1")
+__success __retval(1)
+__arch_x86_64
+__xlated("0: r2 = 0x8000000000000000")
+__xlated("2: r3 = -1")
+__xlated("3: r4 = r2")
+__xlated("4: r11 = r3")
+__xlated("5: r11 += 1")
+__xlated("6: if r11 > 0x1 goto pc+4")
+__xlated("7: if r11 == 0x0 goto pc+1")
+__xlated("8: r2 = 0")
+__xlated("9: r2 = -r2")
+__xlated("10: goto pc+1")
+__xlated("11: r2 s/= r3")
+__xlated("12: r0 = 0")
+__xlated("13: if r2 != r4 goto pc+1")
+__xlated("14: r0 = 1")
+__xlated("15: exit")
+__naked void sdiv64_overflow_rr(void)
+{
+ asm volatile (" \
+ r2 = %[llong_min] ll; \
+ r3 = -1; \
+ r4 = r2; \
+ r2 s/= r3; \
+ r0 = 0; \
+ if r2 != r4 goto +1; \
+ r0 = 1; \
+ exit; \
+" :
+ : __imm_const(llong_min, LLONG_MIN)
+ : __clobber_all);
+}
+
+SEC("socket")
+__description("SDIV64, r/r, small_val/-1")
+__success __retval(-5)
+__arch_x86_64
+__xlated("0: r2 = 5")
+__xlated("1: r3 = -1")
+__xlated("2: r11 = r3")
+__xlated("3: r11 += 1")
+__xlated("4: if r11 > 0x1 goto pc+4")
+__xlated("5: if r11 == 0x0 goto pc+1")
+__xlated("6: r2 = 0")
+__xlated("7: r2 = -r2")
+__xlated("8: goto pc+1")
+__xlated("9: r2 s/= r3")
+__xlated("10: r0 = r2")
+__xlated("11: exit")
+__naked void sdiv64_rr_divisor_neg_1(void)
+{
+ asm volatile (" \
+ r2 = 5; \
+ r3 = -1; \
+ r2 s/= r3; \
+ r0 = r2; \
+ exit; \
+" :
+ :
+ : __clobber_all);
+}
+
+SEC("socket")
+__description("SDIV64, overflow r/i, LLONG_MIN/-1")
+__success __retval(1)
+__arch_x86_64
+__xlated("0: r2 = 0x8000000000000000")
+__xlated("2: r4 = r2")
+__xlated("3: r2 = -r2")
+__xlated("4: r0 = 0")
+__xlated("5: if r2 != r4 goto pc+1")
+__xlated("6: r0 = 1")
+__xlated("7: exit")
+__naked void sdiv64_overflow_ri(void)
+{
+ asm volatile (" \
+ r2 = %[llong_min] ll; \
+ r4 = r2; \
+ r2 s/= -1; \
+ r0 = 0; \
+ if r2 != r4 goto +1; \
+ r0 = 1; \
+ exit; \
+" :
+ : __imm_const(llong_min, LLONG_MIN)
+ : __clobber_all);
+}
+
+SEC("socket")
+__description("SDIV64, r/i, small_val/-1")
+__success __retval(-5)
+__arch_x86_64
+__xlated("0: r2 = 5")
+__xlated("1: r4 = r2")
+__xlated("2: r2 = -r2")
+__xlated("3: r0 = r2")
+__xlated("4: exit")
+__naked void sdiv64_ri_divisor_neg_1(void)
+{
+ asm volatile (" \
+ r2 = 5; \
+ r4 = r2; \
+ r2 s/= -1; \
+ r0 = r2; \
+ exit; \
+" :
+ :
+ : __clobber_all);
+}
+
+SEC("socket")
+__description("SDIV32, overflow r/r, INT_MIN/-1")
+__success __retval(1)
+__arch_x86_64
+__xlated("0: w2 = -2147483648")
+__xlated("1: w3 = -1")
+__xlated("2: w4 = w2")
+__xlated("3: r11 = r3")
+__xlated("4: w11 += 1")
+__xlated("5: if w11 > 0x1 goto pc+4")
+__xlated("6: if w11 == 0x0 goto pc+1")
+__xlated("7: w2 = 0")
+__xlated("8: w2 = -w2")
+__xlated("9: goto pc+1")
+__xlated("10: w2 s/= w3")
+__xlated("11: r0 = 0")
+__xlated("12: if w2 != w4 goto pc+1")
+__xlated("13: r0 = 1")
+__xlated("14: exit")
+__naked void sdiv32_overflow_rr(void)
+{
+ asm volatile (" \
+ w2 = %[int_min]; \
+ w3 = -1; \
+ w4 = w2; \
+ w2 s/= w3; \
+ r0 = 0; \
+ if w2 != w4 goto +1; \
+ r0 = 1; \
+ exit; \
+" :
+ : __imm_const(int_min, INT_MIN)
+ : __clobber_all);
+}
+
+SEC("socket")
+__description("SDIV32, r/r, small_val/-1")
+__success __retval(5)
+__arch_x86_64
+__xlated("0: w2 = -5")
+__xlated("1: w3 = -1")
+__xlated("2: w4 = w2")
+__xlated("3: r11 = r3")
+__xlated("4: w11 += 1")
+__xlated("5: if w11 > 0x1 goto pc+4")
+__xlated("6: if w11 == 0x0 goto pc+1")
+__xlated("7: w2 = 0")
+__xlated("8: w2 = -w2")
+__xlated("9: goto pc+1")
+__xlated("10: w2 s/= w3")
+__xlated("11: w0 = w2")
+__xlated("12: exit")
+__naked void sdiv32_rr_divisor_neg_1(void)
+{
+ asm volatile (" \
+ w2 = -5; \
+ w3 = -1; \
+ w4 = w2; \
+ w2 s/= w3; \
+ w0 = w2; \
+ exit; \
+" :
+ :
+ : __clobber_all);
+}
+
+SEC("socket")
+__description("SDIV32, overflow r/i, INT_MIN/-1")
+__success __retval(1)
+__arch_x86_64
+__xlated("0: w2 = -2147483648")
+__xlated("1: w4 = w2")
+__xlated("2: w2 = -w2")
+__xlated("3: r0 = 0")
+__xlated("4: if w2 != w4 goto pc+1")
+__xlated("5: r0 = 1")
+__xlated("6: exit")
+__naked void sdiv32_overflow_ri(void)
+{
+ asm volatile (" \
+ w2 = %[int_min]; \
+ w4 = w2; \
+ w2 s/= -1; \
+ r0 = 0; \
+ if w2 != w4 goto +1; \
+ r0 = 1; \
+ exit; \
+" :
+ : __imm_const(int_min, INT_MIN)
+ : __clobber_all);
+}
+
+SEC("socket")
+__description("SDIV32, r/i, small_val/-1")
+__success __retval(-5)
+__arch_x86_64
+__xlated("0: w2 = 5")
+__xlated("1: w4 = w2")
+__xlated("2: w2 = -w2")
+__xlated("3: w0 = w2")
+__xlated("4: exit")
+__naked void sdiv32_ri_divisor_neg_1(void)
+{
+ asm volatile (" \
+ w2 = 5; \
+ w4 = w2; \
+ w2 s/= -1; \
+ w0 = w2; \
+ exit; \
+" :
+ :
+ : __clobber_all);
+}
+
+SEC("socket")
+__description("SMOD64, overflow r/r, LLONG_MIN/-1")
+__success __retval(0)
+__arch_x86_64
+__xlated("0: r2 = 0x8000000000000000")
+__xlated("2: r3 = -1")
+__xlated("3: r4 = r2")
+__xlated("4: r11 = r3")
+__xlated("5: r11 += 1")
+__xlated("6: if r11 > 0x1 goto pc+3")
+__xlated("7: if r11 == 0x1 goto pc+3")
+__xlated("8: w2 = 0")
+__xlated("9: goto pc+1")
+__xlated("10: r2 s%= r3")
+__xlated("11: r0 = r2")
+__xlated("12: exit")
+__naked void smod64_overflow_rr(void)
+{
+ asm volatile (" \
+ r2 = %[llong_min] ll; \
+ r3 = -1; \
+ r4 = r2; \
+ r2 s%%= r3; \
+ r0 = r2; \
+ exit; \
+" :
+ : __imm_const(llong_min, LLONG_MIN)
+ : __clobber_all);
+}
+
+SEC("socket")
+__description("SMOD64, r/r, small_val/-1")
+__success __retval(0)
+__arch_x86_64
+__xlated("0: r2 = 5")
+__xlated("1: r3 = -1")
+__xlated("2: r4 = r2")
+__xlated("3: r11 = r3")
+__xlated("4: r11 += 1")
+__xlated("5: if r11 > 0x1 goto pc+3")
+__xlated("6: if r11 == 0x1 goto pc+3")
+__xlated("7: w2 = 0")
+__xlated("8: goto pc+1")
+__xlated("9: r2 s%= r3")
+__xlated("10: r0 = r2")
+__xlated("11: exit")
+__naked void smod64_rr_divisor_neg_1(void)
+{
+ asm volatile (" \
+ r2 = 5; \
+ r3 = -1; \
+ r4 = r2; \
+ r2 s%%= r3; \
+ r0 = r2; \
+ exit; \
+" :
+ :
+ : __clobber_all);
+}
+
+SEC("socket")
+__description("SMOD64, overflow r/i, LLONG_MIN/-1")
+__success __retval(0)
+__arch_x86_64
+__xlated("0: r2 = 0x8000000000000000")
+__xlated("2: r4 = r2")
+__xlated("3: w2 = 0")
+__xlated("4: r0 = r2")
+__xlated("5: exit")
+__naked void smod64_overflow_ri(void)
+{
+ asm volatile (" \
+ r2 = %[llong_min] ll; \
+ r4 = r2; \
+ r2 s%%= -1; \
+ r0 = r2; \
+ exit; \
+" :
+ : __imm_const(llong_min, LLONG_MIN)
+ : __clobber_all);
+}
+
+SEC("socket")
+__description("SMOD64, r/i, small_val/-1")
+__success __retval(0)
+__arch_x86_64
+__xlated("0: r2 = 5")
+__xlated("1: r4 = r2")
+__xlated("2: w2 = 0")
+__xlated("3: r0 = r2")
+__xlated("4: exit")
+__naked void smod64_ri_divisor_neg_1(void)
+{
+ asm volatile (" \
+ r2 = 5; \
+ r4 = r2; \
+ r2 s%%= -1; \
+ r0 = r2; \
+ exit; \
+" :
+ :
+ : __clobber_all);
+}
+
+SEC("socket")
+__description("SMOD32, overflow r/r, INT_MIN/-1")
+__success __retval(0)
+__arch_x86_64
+__xlated("0: w2 = -2147483648")
+__xlated("1: w3 = -1")
+__xlated("2: w4 = w2")
+__xlated("3: r11 = r3")
+__xlated("4: w11 += 1")
+__xlated("5: if w11 > 0x1 goto pc+3")
+__xlated("6: if w11 == 0x1 goto pc+4")
+__xlated("7: w2 = 0")
+__xlated("8: goto pc+1")
+__xlated("9: w2 s%= w3")
+__xlated("10: goto pc+1")
+__xlated("11: w2 = w2")
+__xlated("12: r0 = r2")
+__xlated("13: exit")
+__naked void smod32_overflow_rr(void)
+{
+ asm volatile (" \
+ w2 = %[int_min]; \
+ w3 = -1; \
+ w4 = w2; \
+ w2 s%%= w3; \
+ r0 = r2; \
+ exit; \
+" :
+ : __imm_const(int_min, INT_MIN)
+ : __clobber_all);
+}
+
+SEC("socket")
+__description("SMOD32, r/r, small_val/-1")
+__success __retval(0)
+__arch_x86_64
+__xlated("0: w2 = -5")
+__xlated("1: w3 = -1")
+__xlated("2: w4 = w2")
+__xlated("3: r11 = r3")
+__xlated("4: w11 += 1")
+__xlated("5: if w11 > 0x1 goto pc+3")
+__xlated("6: if w11 == 0x1 goto pc+4")
+__xlated("7: w2 = 0")
+__xlated("8: goto pc+1")
+__xlated("9: w2 s%= w3")
+__xlated("10: goto pc+1")
+__xlated("11: w2 = w2")
+__xlated("12: r0 = r2")
+__xlated("13: exit")
+__naked void smod32_rr_divisor_neg_1(void)
+{
+ asm volatile (" \
+ w2 = -5; \
+ w3 = -1; \
+ w4 = w2; \
+ w2 s%%= w3; \
+ r0 = r2; \
+ exit; \
+" :
+ :
+ : __clobber_all);
+}
+
+SEC("socket")
+__description("SMOD32, overflow r/i, INT_MIN/-1")
+__success __retval(0)
+__arch_x86_64
+__xlated("0: w2 = -2147483648")
+__xlated("1: w4 = w2")
+__xlated("2: w2 = 0")
+__xlated("3: r0 = r2")
+__xlated("4: exit")
+__naked void smod32_overflow_ri(void)
+{
+ asm volatile (" \
+ w2 = %[int_min]; \
+ w4 = w2; \
+ w2 s%%= -1; \
+ r0 = r2; \
+ exit; \
+" :
+ : __imm_const(int_min, INT_MIN)
+ : __clobber_all);
+}
+
+SEC("socket")
+__description("SMOD32, r/i, small_val/-1")
+__success __retval(0)
+__arch_x86_64
+__xlated("0: w2 = 5")
+__xlated("1: w4 = w2")
+__xlated("2: w2 = 0")
+__xlated("3: w0 = w2")
+__xlated("4: exit")
+__naked void smod32_ri_divisor_neg_1(void)
+{
+ asm volatile (" \
+ w2 = 5; \
+ w4 = w2; \
+ w2 s%%= -1; \
+ w0 = w2; \
+ exit; \
+" :
+ :
+ : __clobber_all);
+}
+
#else
SEC("socket")
diff --git a/tools/testing/selftests/bpf/progs/verifier_spill_fill.c b/tools/testing/selftests/bpf/progs/verifier_spill_fill.c
index 85e48069c9e6..671d9f415dbf 100644
--- a/tools/testing/selftests/bpf/progs/verifier_spill_fill.c
+++ b/tools/testing/selftests/bpf/progs/verifier_spill_fill.c
@@ -402,7 +402,7 @@ __naked void spill_32bit_of_64bit_fail(void)
*(u32*)(r10 - 8) = r1; \
/* 32-bit fill r2 from stack. */ \
r2 = *(u32*)(r10 - 8); \
- /* Compare r2 with another register to trigger find_equal_scalars.\
+ /* Compare r2 with another register to trigger sync_linked_regs.\
* Having one random bit is important here, otherwise the verifier cuts\
* the corners. If the ID was mistakenly preserved on spill, this would\
* cause the verifier to think that r1 is also equal to zero in one of\
@@ -441,7 +441,7 @@ __naked void spill_16bit_of_32bit_fail(void)
*(u16*)(r10 - 8) = r1; \
/* 16-bit fill r2 from stack. */ \
r2 = *(u16*)(r10 - 8); \
- /* Compare r2 with another register to trigger find_equal_scalars.\
+ /* Compare r2 with another register to trigger sync_linked_regs.\
* Having one random bit is important here, otherwise the verifier cuts\
* the corners. If the ID was mistakenly preserved on spill, this would\
* cause the verifier to think that r1 is also equal to zero in one of\
@@ -833,7 +833,7 @@ __naked void spill_64bit_of_64bit_ok(void)
*(u64*)(r10 - 8) = r0; \
/* 64-bit fill r1 from stack - should preserve the ID. */\
r1 = *(u64*)(r10 - 8); \
- /* Compare r1 with another register to trigger find_equal_scalars.\
+ /* Compare r1 with another register to trigger sync_linked_regs.\
* Having one random bit is important here, otherwise the verifier cuts\
* the corners. \
*/ \
@@ -866,7 +866,7 @@ __naked void spill_32bit_of_32bit_ok(void)
*(u32*)(r10 - 8) = r0; \
/* 32-bit fill r1 from stack - should preserve the ID. */\
r1 = *(u32*)(r10 - 8); \
- /* Compare r1 with another register to trigger find_equal_scalars.\
+ /* Compare r1 with another register to trigger sync_linked_regs.\
* Having one random bit is important here, otherwise the verifier cuts\
* the corners. \
*/ \
@@ -899,7 +899,7 @@ __naked void spill_16bit_of_16bit_ok(void)
*(u16*)(r10 - 8) = r0; \
/* 16-bit fill r1 from stack - should preserve the ID. */\
r1 = *(u16*)(r10 - 8); \
- /* Compare r1 with another register to trigger find_equal_scalars.\
+ /* Compare r1 with another register to trigger sync_linked_regs.\
* Having one random bit is important here, otherwise the verifier cuts\
* the corners. \
*/ \
@@ -932,7 +932,7 @@ __naked void spill_8bit_of_8bit_ok(void)
*(u8*)(r10 - 8) = r0; \
/* 8-bit fill r1 from stack - should preserve the ID. */\
r1 = *(u8*)(r10 - 8); \
- /* Compare r1 with another register to trigger find_equal_scalars.\
+ /* Compare r1 with another register to trigger sync_linked_regs.\
* Having one random bit is important here, otherwise the verifier cuts\
* the corners. \
*/ \
@@ -1029,7 +1029,7 @@ __naked void fill_32bit_after_spill_64bit_preserve_id(void)
"r1 = *(u32*)(r10 - 4);"
#endif
" \
- /* Compare r1 with another register to trigger find_equal_scalars. */\
+ /* Compare r1 with another register to trigger sync_linked_regs. */\
r2 = 0; \
if r1 != r2 goto l0_%=; \
/* The result of this comparison is predefined. */\
@@ -1070,7 +1070,7 @@ __naked void fill_32bit_after_spill_64bit_clear_id(void)
"r2 = *(u32*)(r10 - 4);"
#endif
" \
- /* Compare r2 with another register to trigger find_equal_scalars.\
+ /* Compare r2 with another register to trigger sync_linked_regs.\
* Having one random bit is important here, otherwise the verifier cuts\
* the corners. If the ID was mistakenly preserved on fill, this would\
* cause the verifier to think that r1 is also equal to zero in one of\
@@ -1213,10 +1213,10 @@ __success __log_level(2)
* - once for path entry - label 2;
* - once for path entry - label 1 - label 2.
*/
-__msg("r1 = *(u64 *)(r10 -8)")
-__msg("exit")
-__msg("r1 = *(u64 *)(r10 -8)")
-__msg("exit")
+__msg("8: (79) r1 = *(u64 *)(r10 -8)")
+__msg("9: (95) exit")
+__msg("from 2 to 7")
+__msg("8: safe")
__msg("processed 11 insns")
__flag(BPF_F_TEST_STATE_FREQ)
__naked void old_stack_misc_vs_cur_ctx_ptr(void)
diff --git a/tools/testing/selftests/bpf/progs/verifier_subprog_precision.c b/tools/testing/selftests/bpf/progs/verifier_subprog_precision.c
index 6a6fad625f7e..9d415f7ce599 100644
--- a/tools/testing/selftests/bpf/progs/verifier_subprog_precision.c
+++ b/tools/testing/selftests/bpf/progs/verifier_subprog_precision.c
@@ -278,7 +278,7 @@ __msg("mark_precise: frame0: last_idx 14 first_idx 9")
__msg("mark_precise: frame0: regs=r6 stack= before 13: (bf) r1 = r7")
__msg("mark_precise: frame0: regs=r6 stack= before 12: (27) r6 *= 4")
__msg("mark_precise: frame0: regs=r6 stack= before 11: (25) if r6 > 0x3 goto pc+4")
-__msg("mark_precise: frame0: regs=r6 stack= before 10: (bf) r6 = r0")
+__msg("mark_precise: frame0: regs=r0,r6 stack= before 10: (bf) r6 = r0")
__msg("mark_precise: frame0: regs=r0 stack= before 9: (85) call bpf_loop")
/* State entering callback body popped from states stack */
__msg("from 9 to 17: frame1:")
diff --git a/tools/testing/selftests/bpf/progs/verifier_tailcall_jit.c b/tools/testing/selftests/bpf/progs/verifier_tailcall_jit.c
new file mode 100644
index 000000000000..8d60c634a114
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/verifier_tailcall_jit.c
@@ -0,0 +1,105 @@
+// SPDX-License-Identifier: GPL-2.0
+#include <linux/bpf.h>
+#include <bpf/bpf_helpers.h>
+#include "bpf_misc.h"
+
+int main(void);
+
+struct {
+ __uint(type, BPF_MAP_TYPE_PROG_ARRAY);
+ __uint(max_entries, 1);
+ __uint(key_size, sizeof(__u32));
+ __array(values, void (void));
+} jmp_table SEC(".maps") = {
+ .values = {
+ [0] = (void *) &main,
+ },
+};
+
+__noinline __auxiliary
+static __naked int sub(void)
+{
+ asm volatile (
+ "r2 = %[jmp_table] ll;"
+ "r3 = 0;"
+ "call 12;"
+ "exit;"
+ :
+ : __imm_addr(jmp_table)
+ : __clobber_all);
+}
+
+__success
+__arch_x86_64
+/* program entry for main(), regular function prologue */
+__jited(" endbr64")
+__jited(" nopl (%rax,%rax)")
+__jited(" xorq %rax, %rax")
+__jited(" pushq %rbp")
+__jited(" movq %rsp, %rbp")
+/* tail call prologue for program:
+ * - establish memory location for tail call counter at &rbp[-8];
+ * - spill tail_call_cnt_ptr at &rbp[-16];
+ * - expect tail call counter to be passed in rax;
+ * - for entry program rax is a raw counter, value < 33;
+ * - for tail called program rax is tail_call_cnt_ptr (value > 33).
+ */
+__jited(" endbr64")
+__jited(" cmpq $0x21, %rax")
+__jited(" ja L0")
+__jited(" pushq %rax")
+__jited(" movq %rsp, %rax")
+__jited(" jmp L1")
+__jited("L0: pushq %rax") /* rbp[-8] = rax */
+__jited("L1: pushq %rax") /* rbp[-16] = rax */
+/* on subprogram call restore rax to be tail_call_cnt_ptr from rbp[-16]
+ * (cause original rax might be clobbered by this point)
+ */
+__jited(" movq -0x10(%rbp), %rax")
+__jited(" callq 0x{{.*}}") /* call to sub() */
+__jited(" xorl %eax, %eax")
+__jited(" leave")
+__jited(" {{(retq|jmp 0x)}}") /* return or jump to rethunk */
+__jited("...")
+/* subprogram entry for sub(), regular function prologue */
+__jited(" endbr64")
+__jited(" nopl (%rax,%rax)")
+__jited(" nopl (%rax)")
+__jited(" pushq %rbp")
+__jited(" movq %rsp, %rbp")
+/* tail call prologue for subprogram address of tail call counter
+ * stored at rbp[-16].
+ */
+__jited(" endbr64")
+__jited(" pushq %rax") /* rbp[-8] = rax */
+__jited(" pushq %rax") /* rbp[-16] = rax */
+__jited(" movabsq ${{.*}}, %rsi") /* r2 = &jmp_table */
+__jited(" xorl %edx, %edx") /* r3 = 0 */
+/* bpf_tail_call implementation:
+ * - load tail_call_cnt_ptr from rbp[-16];
+ * - if *tail_call_cnt_ptr < 33, increment it and jump to target;
+ * - otherwise do nothing.
+ */
+__jited(" movq -0x10(%rbp), %rax")
+__jited(" cmpq $0x21, (%rax)")
+__jited(" jae L0")
+__jited(" nopl (%rax,%rax)")
+__jited(" addq $0x1, (%rax)") /* *tail_call_cnt_ptr += 1 */
+__jited(" popq %rax")
+__jited(" popq %rax")
+__jited(" jmp {{.*}}") /* jump to tail call tgt */
+__jited("L0: leave")
+__jited(" {{(retq|jmp 0x)}}") /* return or jump to rethunk */
+SEC("tc")
+__naked int main(void)
+{
+ asm volatile (
+ "call %[sub];"
+ "r0 = 0;"
+ "exit;"
+ :
+ : __imm(sub)
+ : __clobber_all);
+}
+
+char __license[] SEC("license") = "GPL";
diff --git a/tools/testing/selftests/bpf/progs/verifier_vfs_accept.c b/tools/testing/selftests/bpf/progs/verifier_vfs_accept.c
new file mode 100644
index 000000000000..a7c0a553aa50
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/verifier_vfs_accept.c
@@ -0,0 +1,85 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2024 Google LLC. */
+
+#include <vmlinux.h>
+#include <bpf/bpf_helpers.h>
+#include <bpf/bpf_tracing.h>
+
+#include "bpf_misc.h"
+#include "bpf_experimental.h"
+
+static char buf[64];
+
+SEC("lsm.s/file_open")
+__success
+int BPF_PROG(get_task_exe_file_and_put_kfunc_from_current_sleepable)
+{
+ struct file *acquired;
+
+ acquired = bpf_get_task_exe_file(bpf_get_current_task_btf());
+ if (!acquired)
+ return 0;
+
+ bpf_put_file(acquired);
+ return 0;
+}
+
+SEC("lsm/file_open")
+__success
+int BPF_PROG(get_task_exe_file_and_put_kfunc_from_current_non_sleepable, struct file *file)
+{
+ struct file *acquired;
+
+ acquired = bpf_get_task_exe_file(bpf_get_current_task_btf());
+ if (!acquired)
+ return 0;
+
+ bpf_put_file(acquired);
+ return 0;
+}
+
+SEC("lsm.s/task_alloc")
+__success
+int BPF_PROG(get_task_exe_file_and_put_kfunc_from_argument,
+ struct task_struct *task)
+{
+ struct file *acquired;
+
+ acquired = bpf_get_task_exe_file(task);
+ if (!acquired)
+ return 0;
+
+ bpf_put_file(acquired);
+ return 0;
+}
+
+SEC("lsm.s/inode_getattr")
+__success
+int BPF_PROG(path_d_path_from_path_argument, struct path *path)
+{
+ int ret;
+
+ ret = bpf_path_d_path(path, buf, sizeof(buf));
+ __sink(ret);
+ return 0;
+}
+
+SEC("lsm.s/file_open")
+__success
+int BPF_PROG(path_d_path_from_file_argument, struct file *file)
+{
+ int ret;
+ struct path *path;
+
+ /* The f_path member is a path which is embedded directly within a
+ * file. Therefore, a pointer to such embedded members are still
+ * recognized by the BPF verifier as being PTR_TRUSTED as it's
+ * essentially PTR_TRUSTED w/ a non-zero fixed offset.
+ */
+ path = &file->f_path;
+ ret = bpf_path_d_path(path, buf, sizeof(buf));
+ __sink(ret);
+ return 0;
+}
+
+char _license[] SEC("license") = "GPL";
diff --git a/tools/testing/selftests/bpf/progs/verifier_vfs_reject.c b/tools/testing/selftests/bpf/progs/verifier_vfs_reject.c
new file mode 100644
index 000000000000..d6d3f4fcb24c
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/verifier_vfs_reject.c
@@ -0,0 +1,161 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2024 Google LLC. */
+
+#include <vmlinux.h>
+#include <bpf/bpf_helpers.h>
+#include <bpf/bpf_tracing.h>
+#include <linux/limits.h>
+
+#include "bpf_misc.h"
+#include "bpf_experimental.h"
+
+static char buf[PATH_MAX];
+
+SEC("lsm.s/file_open")
+__failure __msg("Possibly NULL pointer passed to trusted arg0")
+int BPF_PROG(get_task_exe_file_kfunc_null)
+{
+ struct file *acquired;
+
+ /* Can't pass a NULL pointer to bpf_get_task_exe_file(). */
+ acquired = bpf_get_task_exe_file(NULL);
+ if (!acquired)
+ return 0;
+
+ bpf_put_file(acquired);
+ return 0;
+}
+
+SEC("lsm.s/inode_getxattr")
+__failure __msg("arg#0 pointer type STRUCT task_struct must point to scalar, or struct with scalar")
+int BPF_PROG(get_task_exe_file_kfunc_fp)
+{
+ u64 x;
+ struct file *acquired;
+ struct task_struct *task;
+
+ task = (struct task_struct *)&x;
+ /* Can't pass random frame pointer to bpf_get_task_exe_file(). */
+ acquired = bpf_get_task_exe_file(task);
+ if (!acquired)
+ return 0;
+
+ bpf_put_file(acquired);
+ return 0;
+}
+
+SEC("lsm.s/file_open")
+__failure __msg("R1 must be referenced or trusted")
+int BPF_PROG(get_task_exe_file_kfunc_untrusted)
+{
+ struct file *acquired;
+ struct task_struct *parent;
+
+ /* Walking a trusted struct task_struct returned from
+ * bpf_get_current_task_btf() yields an untrusted pointer.
+ */
+ parent = bpf_get_current_task_btf()->parent;
+ /* Can't pass untrusted pointer to bpf_get_task_exe_file(). */
+ acquired = bpf_get_task_exe_file(parent);
+ if (!acquired)
+ return 0;
+
+ bpf_put_file(acquired);
+ return 0;
+}
+
+SEC("lsm.s/file_open")
+__failure __msg("Unreleased reference")
+int BPF_PROG(get_task_exe_file_kfunc_unreleased)
+{
+ struct file *acquired;
+
+ acquired = bpf_get_task_exe_file(bpf_get_current_task_btf());
+ if (!acquired)
+ return 0;
+
+ /* Acquired but never released. */
+ return 0;
+}
+
+SEC("lsm.s/file_open")
+__failure __msg("release kernel function bpf_put_file expects")
+int BPF_PROG(put_file_kfunc_unacquired, struct file *file)
+{
+ /* Can't release an unacquired pointer. */
+ bpf_put_file(file);
+ return 0;
+}
+
+SEC("lsm.s/file_open")
+__failure __msg("Possibly NULL pointer passed to trusted arg0")
+int BPF_PROG(path_d_path_kfunc_null)
+{
+ /* Can't pass NULL value to bpf_path_d_path() kfunc. */
+ bpf_path_d_path(NULL, buf, sizeof(buf));
+ return 0;
+}
+
+SEC("lsm.s/task_alloc")
+__failure __msg("R1 must be referenced or trusted")
+int BPF_PROG(path_d_path_kfunc_untrusted_from_argument, struct task_struct *task)
+{
+ struct path *root;
+
+ /* Walking a trusted argument typically yields an untrusted
+ * pointer. This is one example of that.
+ */
+ root = &task->fs->root;
+ bpf_path_d_path(root, buf, sizeof(buf));
+ return 0;
+}
+
+SEC("lsm.s/file_open")
+__failure __msg("R1 must be referenced or trusted")
+int BPF_PROG(path_d_path_kfunc_untrusted_from_current)
+{
+ struct path *pwd;
+ struct task_struct *current;
+
+ current = bpf_get_current_task_btf();
+ /* Walking a trusted pointer returned from bpf_get_current_task_btf()
+ * yields an untrusted pointer.
+ */
+ pwd = &current->fs->pwd;
+ bpf_path_d_path(pwd, buf, sizeof(buf));
+ return 0;
+}
+
+SEC("lsm.s/file_open")
+__failure __msg("kernel function bpf_path_d_path args#0 expected pointer to STRUCT path but R1 has a pointer to STRUCT file")
+int BPF_PROG(path_d_path_kfunc_type_mismatch, struct file *file)
+{
+ bpf_path_d_path((struct path *)&file->f_task_work, buf, sizeof(buf));
+ return 0;
+}
+
+SEC("lsm.s/file_open")
+__failure __msg("invalid access to map value, value_size=4096 off=0 size=8192")
+int BPF_PROG(path_d_path_kfunc_invalid_buf_sz, struct file *file)
+{
+ /* bpf_path_d_path() enforces a constraint on the buffer size supplied
+ * by the BPF LSM program via the __sz annotation. buf here is set to
+ * PATH_MAX, so let's ensure that the BPF verifier rejects BPF_PROG_LOAD
+ * attempts if the supplied size and the actual size of the buffer
+ * mismatches.
+ */
+ bpf_path_d_path(&file->f_path, buf, PATH_MAX * 2);
+ return 0;
+}
+
+SEC("fentry/vfs_open")
+__failure __msg("calling kernel function bpf_path_d_path is not allowed")
+int BPF_PROG(path_d_path_kfunc_non_lsm, struct path *path, struct file *f)
+{
+ /* Calling bpf_path_d_path() from a non-LSM BPF program isn't permitted.
+ */
+ bpf_path_d_path(path, buf, sizeof(buf));
+ return 0;
+}
+
+char _license[] SEC("license") = "GPL";
diff --git a/tools/testing/selftests/bpf/progs/xdp_redirect_map.c b/tools/testing/selftests/bpf/progs/xdp_redirect_map.c
index d037262c8937..682dda8dabbc 100644
--- a/tools/testing/selftests/bpf/progs/xdp_redirect_map.c
+++ b/tools/testing/selftests/bpf/progs/xdp_redirect_map.c
@@ -10,19 +10,19 @@ struct {
__uint(value_size, sizeof(int));
} tx_port SEC(".maps");
-SEC("redirect_map_0")
+SEC("xdp")
int xdp_redirect_map_0(struct xdp_md *xdp)
{
return bpf_redirect_map(&tx_port, 0, 0);
}
-SEC("redirect_map_1")
+SEC("xdp")
int xdp_redirect_map_1(struct xdp_md *xdp)
{
return bpf_redirect_map(&tx_port, 1, 0);
}
-SEC("redirect_map_2")
+SEC("xdp")
int xdp_redirect_map_2(struct xdp_md *xdp)
{
return bpf_redirect_map(&tx_port, 2, 0);
diff --git a/tools/testing/selftests/bpf/test_cgroup_storage.c b/tools/testing/selftests/bpf/test_cgroup_storage.c
deleted file mode 100644
index 0861ea60dcdd..000000000000
--- a/tools/testing/selftests/bpf/test_cgroup_storage.c
+++ /dev/null
@@ -1,174 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-#include <assert.h>
-#include <bpf/bpf.h>
-#include <linux/filter.h>
-#include <stdio.h>
-#include <stdlib.h>
-#include <sys/sysinfo.h>
-
-#include "bpf_util.h"
-#include "cgroup_helpers.h"
-#include "testing_helpers.h"
-
-char bpf_log_buf[BPF_LOG_BUF_SIZE];
-
-#define TEST_CGROUP "/test-bpf-cgroup-storage-buf/"
-
-int main(int argc, char **argv)
-{
- struct bpf_insn prog[] = {
- BPF_LD_MAP_FD(BPF_REG_1, 0), /* percpu map fd */
- BPF_MOV64_IMM(BPF_REG_2, 0), /* flags, not used */
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
- BPF_FUNC_get_local_storage),
- BPF_LDX_MEM(BPF_DW, BPF_REG_3, BPF_REG_0, 0),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_3, 0x1),
- BPF_STX_MEM(BPF_DW, BPF_REG_0, BPF_REG_3, 0),
-
- BPF_LD_MAP_FD(BPF_REG_1, 0), /* map fd */
- BPF_MOV64_IMM(BPF_REG_2, 0), /* flags, not used */
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
- BPF_FUNC_get_local_storage),
- BPF_MOV64_IMM(BPF_REG_1, 1),
- BPF_ATOMIC_OP(BPF_DW, BPF_ADD, BPF_REG_0, BPF_REG_1, 0),
- BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_0, 0),
- BPF_ALU64_IMM(BPF_AND, BPF_REG_1, 0x1),
- BPF_MOV64_REG(BPF_REG_0, BPF_REG_1),
- BPF_EXIT_INSN(),
- };
- size_t insns_cnt = ARRAY_SIZE(prog);
- int error = EXIT_FAILURE;
- int map_fd, percpu_map_fd, prog_fd, cgroup_fd;
- struct bpf_cgroup_storage_key key;
- unsigned long long value;
- unsigned long long *percpu_value;
- int cpu, nproc;
-
- nproc = bpf_num_possible_cpus();
- percpu_value = malloc(sizeof(*percpu_value) * nproc);
- if (!percpu_value) {
- printf("Not enough memory for per-cpu area (%d cpus)\n", nproc);
- goto err;
- }
-
- /* Use libbpf 1.0 API mode */
- libbpf_set_strict_mode(LIBBPF_STRICT_ALL);
-
- map_fd = bpf_map_create(BPF_MAP_TYPE_CGROUP_STORAGE, NULL, sizeof(key),
- sizeof(value), 0, NULL);
- if (map_fd < 0) {
- printf("Failed to create map: %s\n", strerror(errno));
- goto out;
- }
-
- percpu_map_fd = bpf_map_create(BPF_MAP_TYPE_PERCPU_CGROUP_STORAGE, NULL,
- sizeof(key), sizeof(value), 0, NULL);
- if (percpu_map_fd < 0) {
- printf("Failed to create map: %s\n", strerror(errno));
- goto out;
- }
-
- prog[0].imm = percpu_map_fd;
- prog[7].imm = map_fd;
- prog_fd = bpf_test_load_program(BPF_PROG_TYPE_CGROUP_SKB,
- prog, insns_cnt, "GPL", 0,
- bpf_log_buf, BPF_LOG_BUF_SIZE);
- if (prog_fd < 0) {
- printf("Failed to load bpf program: %s\n", bpf_log_buf);
- goto out;
- }
-
- cgroup_fd = cgroup_setup_and_join(TEST_CGROUP);
-
- /* Attach the bpf program */
- if (bpf_prog_attach(prog_fd, cgroup_fd, BPF_CGROUP_INET_EGRESS, 0)) {
- printf("Failed to attach bpf program\n");
- goto err;
- }
-
- if (bpf_map_get_next_key(map_fd, NULL, &key)) {
- printf("Failed to get the first key in cgroup storage\n");
- goto err;
- }
-
- if (bpf_map_lookup_elem(map_fd, &key, &value)) {
- printf("Failed to lookup cgroup storage 0\n");
- goto err;
- }
-
- for (cpu = 0; cpu < nproc; cpu++)
- percpu_value[cpu] = 1000;
-
- if (bpf_map_update_elem(percpu_map_fd, &key, percpu_value, 0)) {
- printf("Failed to update the data in the cgroup storage\n");
- goto err;
- }
-
- /* Every second packet should be dropped */
- assert(system("ping localhost -c 1 -W 1 -q > /dev/null") == 0);
- assert(system("ping localhost -c 1 -W 1 -q > /dev/null"));
- assert(system("ping localhost -c 1 -W 1 -q > /dev/null") == 0);
-
- /* Check the counter in the cgroup local storage */
- if (bpf_map_lookup_elem(map_fd, &key, &value)) {
- printf("Failed to lookup cgroup storage\n");
- goto err;
- }
-
- if (value != 3) {
- printf("Unexpected data in the cgroup storage: %llu\n", value);
- goto err;
- }
-
- /* Bump the counter in the cgroup local storage */
- value++;
- if (bpf_map_update_elem(map_fd, &key, &value, 0)) {
- printf("Failed to update the data in the cgroup storage\n");
- goto err;
- }
-
- /* Every second packet should be dropped */
- assert(system("ping localhost -c 1 -W 1 -q > /dev/null") == 0);
- assert(system("ping localhost -c 1 -W 1 -q > /dev/null"));
- assert(system("ping localhost -c 1 -W 1 -q > /dev/null") == 0);
-
- /* Check the final value of the counter in the cgroup local storage */
- if (bpf_map_lookup_elem(map_fd, &key, &value)) {
- printf("Failed to lookup the cgroup storage\n");
- goto err;
- }
-
- if (value != 7) {
- printf("Unexpected data in the cgroup storage: %llu\n", value);
- goto err;
- }
-
- /* Check the final value of the counter in the percpu local storage */
-
- for (cpu = 0; cpu < nproc; cpu++)
- percpu_value[cpu] = 0;
-
- if (bpf_map_lookup_elem(percpu_map_fd, &key, percpu_value)) {
- printf("Failed to lookup the per-cpu cgroup storage\n");
- goto err;
- }
-
- value = 0;
- for (cpu = 0; cpu < nproc; cpu++)
- value += percpu_value[cpu];
-
- if (value != nproc * 1000 + 6) {
- printf("Unexpected data in the per-cpu cgroup storage\n");
- goto err;
- }
-
- error = 0;
- printf("test_cgroup_storage:PASS\n");
-
-err:
- cleanup_cgroup_environment();
- free(percpu_value);
-
-out:
- return error;
-}
diff --git a/tools/testing/selftests/bpf/test_cpp.cpp b/tools/testing/selftests/bpf/test_cpp.cpp
index dde0bb16e782..abc2a56ab261 100644
--- a/tools/testing/selftests/bpf/test_cpp.cpp
+++ b/tools/testing/selftests/bpf/test_cpp.cpp
@@ -6,6 +6,10 @@
#include <bpf/libbpf.h>
#include <bpf/bpf.h>
#include <bpf/btf.h>
+
+#ifndef _Bool
+#define _Bool bool
+#endif
#include "test_core_extern.skel.h"
#include "struct_ops_module.skel.h"
diff --git a/tools/testing/selftests/bpf/test_dev_cgroup.c b/tools/testing/selftests/bpf/test_dev_cgroup.c
deleted file mode 100644
index adeaf63cb6fa..000000000000
--- a/tools/testing/selftests/bpf/test_dev_cgroup.c
+++ /dev/null
@@ -1,85 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-only
-/* Copyright (c) 2017 Facebook
- */
-
-#include <stdio.h>
-#include <stdlib.h>
-#include <string.h>
-#include <errno.h>
-#include <assert.h>
-#include <sys/time.h>
-
-#include <linux/bpf.h>
-#include <bpf/bpf.h>
-#include <bpf/libbpf.h>
-
-#include "cgroup_helpers.h"
-#include "testing_helpers.h"
-
-#define DEV_CGROUP_PROG "./dev_cgroup.bpf.o"
-
-#define TEST_CGROUP "/test-bpf-based-device-cgroup/"
-
-int main(int argc, char **argv)
-{
- struct bpf_object *obj;
- int error = EXIT_FAILURE;
- int prog_fd, cgroup_fd;
- __u32 prog_cnt;
-
- /* Use libbpf 1.0 API mode */
- libbpf_set_strict_mode(LIBBPF_STRICT_ALL);
-
- if (bpf_prog_test_load(DEV_CGROUP_PROG, BPF_PROG_TYPE_CGROUP_DEVICE,
- &obj, &prog_fd)) {
- printf("Failed to load DEV_CGROUP program\n");
- goto out;
- }
-
- cgroup_fd = cgroup_setup_and_join(TEST_CGROUP);
- if (cgroup_fd < 0) {
- printf("Failed to create test cgroup\n");
- goto out;
- }
-
- /* Attach bpf program */
- if (bpf_prog_attach(prog_fd, cgroup_fd, BPF_CGROUP_DEVICE, 0)) {
- printf("Failed to attach DEV_CGROUP program");
- goto err;
- }
-
- if (bpf_prog_query(cgroup_fd, BPF_CGROUP_DEVICE, 0, NULL, NULL,
- &prog_cnt)) {
- printf("Failed to query attached programs");
- goto err;
- }
-
- /* All operations with /dev/zero and and /dev/urandom are allowed,
- * everything else is forbidden.
- */
- assert(system("rm -f /tmp/test_dev_cgroup_null") == 0);
- assert(system("mknod /tmp/test_dev_cgroup_null c 1 3"));
- assert(system("rm -f /tmp/test_dev_cgroup_null") == 0);
-
- /* /dev/zero is whitelisted */
- assert(system("rm -f /tmp/test_dev_cgroup_zero") == 0);
- assert(system("mknod /tmp/test_dev_cgroup_zero c 1 5") == 0);
- assert(system("rm -f /tmp/test_dev_cgroup_zero") == 0);
-
- assert(system("dd if=/dev/urandom of=/dev/zero count=64") == 0);
-
- /* src is allowed, target is forbidden */
- assert(system("dd if=/dev/urandom of=/dev/full count=64"));
-
- /* src is forbidden, target is allowed */
- assert(system("dd if=/dev/random of=/dev/zero count=64"));
-
- error = 0;
- printf("test_dev_cgroup:PASS\n");
-
-err:
- cleanup_cgroup_environment();
-
-out:
- return error;
-}
diff --git a/tools/testing/selftests/bpf/test_loader.c b/tools/testing/selftests/bpf/test_loader.c
index f14e10b0de96..3e9b009580d4 100644
--- a/tools/testing/selftests/bpf/test_loader.c
+++ b/tools/testing/selftests/bpf/test_loader.c
@@ -7,8 +7,10 @@
#include <bpf/btf.h>
#include "autoconf_helper.h"
+#include "disasm_helpers.h"
#include "unpriv_helpers.h"
#include "cap_helpers.h"
+#include "jit_disasm_helpers.h"
#define str_has_pfx(str, pfx) \
(strncmp(str, pfx, __builtin_constant_p(pfx) ? sizeof(pfx) - 1 : strlen(pfx)) == 0)
@@ -18,11 +20,11 @@
#define TEST_TAG_EXPECT_FAILURE "comment:test_expect_failure"
#define TEST_TAG_EXPECT_SUCCESS "comment:test_expect_success"
#define TEST_TAG_EXPECT_MSG_PFX "comment:test_expect_msg="
-#define TEST_TAG_EXPECT_REGEX_PFX "comment:test_expect_regex="
+#define TEST_TAG_EXPECT_XLATED_PFX "comment:test_expect_xlated="
#define TEST_TAG_EXPECT_FAILURE_UNPRIV "comment:test_expect_failure_unpriv"
#define TEST_TAG_EXPECT_SUCCESS_UNPRIV "comment:test_expect_success_unpriv"
#define TEST_TAG_EXPECT_MSG_PFX_UNPRIV "comment:test_expect_msg_unpriv="
-#define TEST_TAG_EXPECT_REGEX_PFX_UNPRIV "comment:test_expect_regex_unpriv="
+#define TEST_TAG_EXPECT_XLATED_PFX_UNPRIV "comment:test_expect_xlated_unpriv="
#define TEST_TAG_LOG_LEVEL_PFX "comment:test_log_level="
#define TEST_TAG_PROG_FLAGS_PFX "comment:test_prog_flags="
#define TEST_TAG_DESCRIPTION_PFX "comment:test_description="
@@ -31,6 +33,9 @@
#define TEST_TAG_AUXILIARY "comment:test_auxiliary"
#define TEST_TAG_AUXILIARY_UNPRIV "comment:test_auxiliary_unpriv"
#define TEST_BTF_PATH "comment:test_btf_path="
+#define TEST_TAG_ARCH "comment:test_arch="
+#define TEST_TAG_JITED_PFX "comment:test_jited="
+#define TEST_TAG_JITED_PFX_UNPRIV "comment:test_jited_unpriv="
/* Warning: duplicated in bpf_misc.h */
#define POINTER_VALUE 0xcafe4all
@@ -51,15 +56,22 @@ enum mode {
struct expect_msg {
const char *substr; /* substring match */
- const char *regex_str; /* regex-based match */
regex_t regex;
+ bool is_regex;
+ bool on_next_line;
+};
+
+struct expected_msgs {
+ struct expect_msg *patterns;
+ size_t cnt;
};
struct test_subspec {
char *name;
bool expect_failure;
- struct expect_msg *expect_msgs;
- size_t expect_msg_cnt;
+ struct expected_msgs expect_msgs;
+ struct expected_msgs expect_xlated;
+ struct expected_msgs jited;
int retval;
bool execute;
};
@@ -72,6 +84,7 @@ struct test_spec {
int log_level;
int prog_flags;
int mode_mask;
+ int arch_mask;
bool auxiliary;
bool valid;
};
@@ -96,61 +109,152 @@ void test_loader_fini(struct test_loader *tester)
free(tester->log_buf);
}
-static void free_test_spec(struct test_spec *spec)
+static void free_msgs(struct expected_msgs *msgs)
{
int i;
+ for (i = 0; i < msgs->cnt; i++)
+ if (msgs->patterns[i].is_regex)
+ regfree(&msgs->patterns[i].regex);
+ free(msgs->patterns);
+ msgs->patterns = NULL;
+ msgs->cnt = 0;
+}
+
+static void free_test_spec(struct test_spec *spec)
+{
/* Deallocate expect_msgs arrays. */
- for (i = 0; i < spec->priv.expect_msg_cnt; i++)
- if (spec->priv.expect_msgs[i].regex_str)
- regfree(&spec->priv.expect_msgs[i].regex);
- for (i = 0; i < spec->unpriv.expect_msg_cnt; i++)
- if (spec->unpriv.expect_msgs[i].regex_str)
- regfree(&spec->unpriv.expect_msgs[i].regex);
+ free_msgs(&spec->priv.expect_msgs);
+ free_msgs(&spec->unpriv.expect_msgs);
+ free_msgs(&spec->priv.expect_xlated);
+ free_msgs(&spec->unpriv.expect_xlated);
+ free_msgs(&spec->priv.jited);
+ free_msgs(&spec->unpriv.jited);
free(spec->priv.name);
free(spec->unpriv.name);
- free(spec->priv.expect_msgs);
- free(spec->unpriv.expect_msgs);
-
spec->priv.name = NULL;
spec->unpriv.name = NULL;
- spec->priv.expect_msgs = NULL;
- spec->unpriv.expect_msgs = NULL;
}
-static int push_msg(const char *substr, const char *regex_str, struct test_subspec *subspec)
+/* Compiles regular expression matching pattern.
+ * Pattern has a special syntax:
+ *
+ * pattern := (<verbatim text> | regex)*
+ * regex := "{{" <posix extended regular expression> "}}"
+ *
+ * In other words, pattern is a verbatim text with inclusion
+ * of regular expressions enclosed in "{{" "}}" pairs.
+ * For example, pattern "foo{{[0-9]+}}" matches strings like
+ * "foo0", "foo007", etc.
+ */
+static int compile_regex(const char *pattern, regex_t *regex)
+{
+ char err_buf[256], buf[256] = {}, *ptr, *buf_end;
+ const char *original_pattern = pattern;
+ bool in_regex = false;
+ int err;
+
+ buf_end = buf + sizeof(buf);
+ ptr = buf;
+ while (*pattern && ptr < buf_end - 2) {
+ if (!in_regex && str_has_pfx(pattern, "{{")) {
+ in_regex = true;
+ pattern += 2;
+ continue;
+ }
+ if (in_regex && str_has_pfx(pattern, "}}")) {
+ in_regex = false;
+ pattern += 2;
+ continue;
+ }
+ if (in_regex) {
+ *ptr++ = *pattern++;
+ continue;
+ }
+ /* list of characters that need escaping for extended posix regex */
+ if (strchr(".[]\\()*+?{}|^$", *pattern)) {
+ *ptr++ = '\\';
+ *ptr++ = *pattern++;
+ continue;
+ }
+ *ptr++ = *pattern++;
+ }
+ if (*pattern) {
+ PRINT_FAIL("Regexp too long: '%s'\n", original_pattern);
+ return -EINVAL;
+ }
+ if (in_regex) {
+ PRINT_FAIL("Regexp has open '{{' but no closing '}}': '%s'\n", original_pattern);
+ return -EINVAL;
+ }
+ err = regcomp(regex, buf, REG_EXTENDED | REG_NEWLINE);
+ if (err != 0) {
+ regerror(err, regex, err_buf, sizeof(err_buf));
+ PRINT_FAIL("Regexp compilation error in '%s': '%s'\n", buf, err_buf);
+ return -EINVAL;
+ }
+ return 0;
+}
+
+static int __push_msg(const char *pattern, bool on_next_line, struct expected_msgs *msgs)
{
- void *tmp;
- int regcomp_res;
- char error_msg[100];
struct expect_msg *msg;
+ void *tmp;
+ int err;
- tmp = realloc(subspec->expect_msgs,
- (1 + subspec->expect_msg_cnt) * sizeof(struct expect_msg));
+ tmp = realloc(msgs->patterns,
+ (1 + msgs->cnt) * sizeof(struct expect_msg));
if (!tmp) {
ASSERT_FAIL("failed to realloc memory for messages\n");
return -ENOMEM;
}
- subspec->expect_msgs = tmp;
- msg = &subspec->expect_msgs[subspec->expect_msg_cnt];
+ msgs->patterns = tmp;
+ msg = &msgs->patterns[msgs->cnt];
+ msg->on_next_line = on_next_line;
+ msg->substr = pattern;
+ msg->is_regex = false;
+ if (strstr(pattern, "{{")) {
+ err = compile_regex(pattern, &msg->regex);
+ if (err)
+ return err;
+ msg->is_regex = true;
+ }
+ msgs->cnt += 1;
+ return 0;
+}
- if (substr) {
- msg->substr = substr;
- msg->regex_str = NULL;
- } else {
- msg->regex_str = regex_str;
- msg->substr = NULL;
- regcomp_res = regcomp(&msg->regex, regex_str, REG_EXTENDED|REG_NEWLINE);
- if (regcomp_res != 0) {
- regerror(regcomp_res, &msg->regex, error_msg, sizeof(error_msg));
- PRINT_FAIL("Regexp compilation error in '%s': '%s'\n",
- regex_str, error_msg);
- return -EINVAL;
- }
+static int clone_msgs(struct expected_msgs *from, struct expected_msgs *to)
+{
+ struct expect_msg *msg;
+ int i, err;
+
+ for (i = 0; i < from->cnt; i++) {
+ msg = &from->patterns[i];
+ err = __push_msg(msg->substr, msg->on_next_line, to);
+ if (err)
+ return err;
}
+ return 0;
+}
+
+static int push_msg(const char *substr, struct expected_msgs *msgs)
+{
+ return __push_msg(substr, false, msgs);
+}
- subspec->expect_msg_cnt += 1;
+static int push_disasm_msg(const char *regex_str, bool *on_next_line, struct expected_msgs *msgs)
+{
+ int err;
+
+ if (strcmp(regex_str, "...") == 0) {
+ *on_next_line = false;
+ return 0;
+ }
+ err = __push_msg(regex_str, *on_next_line, msgs);
+ if (err)
+ return err;
+ *on_next_line = true;
return 0;
}
@@ -202,6 +306,54 @@ static void update_flags(int *flags, int flag, bool clear)
*flags |= flag;
}
+/* Matches a string of form '<pfx>[^=]=.*' and returns it's suffix.
+ * Used to parse btf_decl_tag values.
+ * Such values require unique prefix because compiler does not add
+ * same __attribute__((btf_decl_tag(...))) twice.
+ * Test suite uses two-component tags for such cases:
+ *
+ * <pfx> __COUNTER__ '='
+ *
+ * For example, two consecutive __msg tags '__msg("foo") __msg("foo")'
+ * would be encoded as:
+ *
+ * [18] DECL_TAG 'comment:test_expect_msg=0=foo' type_id=15 component_idx=-1
+ * [19] DECL_TAG 'comment:test_expect_msg=1=foo' type_id=15 component_idx=-1
+ *
+ * And the purpose of this function is to extract 'foo' from the above.
+ */
+static const char *skip_dynamic_pfx(const char *s, const char *pfx)
+{
+ const char *msg;
+
+ if (strncmp(s, pfx, strlen(pfx)) != 0)
+ return NULL;
+ msg = s + strlen(pfx);
+ msg = strchr(msg, '=');
+ if (!msg)
+ return NULL;
+ return msg + 1;
+}
+
+enum arch {
+ ARCH_UNKNOWN = 0x1,
+ ARCH_X86_64 = 0x2,
+ ARCH_ARM64 = 0x4,
+ ARCH_RISCV64 = 0x8,
+};
+
+static int get_current_arch(void)
+{
+#if defined(__x86_64__)
+ return ARCH_X86_64;
+#elif defined(__aarch64__)
+ return ARCH_ARM64;
+#elif defined(__riscv) && __riscv_xlen == 64
+ return ARCH_RISCV64;
+#endif
+ return ARCH_UNKNOWN;
+}
+
/* Uses btf_decl_tag attributes to describe the expected test
* behavior, see bpf_misc.h for detailed description of each attribute
* and attribute combinations.
@@ -214,8 +366,15 @@ static int parse_test_spec(struct test_loader *tester,
const char *description = NULL;
bool has_unpriv_result = false;
bool has_unpriv_retval = false;
+ bool unpriv_xlated_on_next_line = true;
+ bool xlated_on_next_line = true;
+ bool unpriv_jit_on_next_line;
+ bool jit_on_next_line;
+ bool collect_jit = false;
int func_id, i, err = 0;
+ u32 arch_mask = 0;
struct btf *btf;
+ enum arch arch;
memset(spec, 0, sizeof(*spec));
@@ -270,27 +429,49 @@ static int parse_test_spec(struct test_loader *tester,
} else if (strcmp(s, TEST_TAG_AUXILIARY_UNPRIV) == 0) {
spec->auxiliary = true;
spec->mode_mask |= UNPRIV;
- } else if (str_has_pfx(s, TEST_TAG_EXPECT_MSG_PFX)) {
- msg = s + sizeof(TEST_TAG_EXPECT_MSG_PFX) - 1;
- err = push_msg(msg, NULL, &spec->priv);
+ } else if ((msg = skip_dynamic_pfx(s, TEST_TAG_EXPECT_MSG_PFX))) {
+ err = push_msg(msg, &spec->priv.expect_msgs);
if (err)
goto cleanup;
spec->mode_mask |= PRIV;
- } else if (str_has_pfx(s, TEST_TAG_EXPECT_MSG_PFX_UNPRIV)) {
- msg = s + sizeof(TEST_TAG_EXPECT_MSG_PFX_UNPRIV) - 1;
- err = push_msg(msg, NULL, &spec->unpriv);
+ } else if ((msg = skip_dynamic_pfx(s, TEST_TAG_EXPECT_MSG_PFX_UNPRIV))) {
+ err = push_msg(msg, &spec->unpriv.expect_msgs);
if (err)
goto cleanup;
spec->mode_mask |= UNPRIV;
- } else if (str_has_pfx(s, TEST_TAG_EXPECT_REGEX_PFX)) {
- msg = s + sizeof(TEST_TAG_EXPECT_REGEX_PFX) - 1;
- err = push_msg(NULL, msg, &spec->priv);
+ } else if ((msg = skip_dynamic_pfx(s, TEST_TAG_JITED_PFX))) {
+ if (arch_mask == 0) {
+ PRINT_FAIL("__jited used before __arch_*");
+ goto cleanup;
+ }
+ if (collect_jit) {
+ err = push_disasm_msg(msg, &jit_on_next_line,
+ &spec->priv.jited);
+ if (err)
+ goto cleanup;
+ spec->mode_mask |= PRIV;
+ }
+ } else if ((msg = skip_dynamic_pfx(s, TEST_TAG_JITED_PFX_UNPRIV))) {
+ if (arch_mask == 0) {
+ PRINT_FAIL("__unpriv_jited used before __arch_*");
+ goto cleanup;
+ }
+ if (collect_jit) {
+ err = push_disasm_msg(msg, &unpriv_jit_on_next_line,
+ &spec->unpriv.jited);
+ if (err)
+ goto cleanup;
+ spec->mode_mask |= UNPRIV;
+ }
+ } else if ((msg = skip_dynamic_pfx(s, TEST_TAG_EXPECT_XLATED_PFX))) {
+ err = push_disasm_msg(msg, &xlated_on_next_line,
+ &spec->priv.expect_xlated);
if (err)
goto cleanup;
spec->mode_mask |= PRIV;
- } else if (str_has_pfx(s, TEST_TAG_EXPECT_REGEX_PFX_UNPRIV)) {
- msg = s + sizeof(TEST_TAG_EXPECT_REGEX_PFX_UNPRIV) - 1;
- err = push_msg(NULL, msg, &spec->unpriv);
+ } else if ((msg = skip_dynamic_pfx(s, TEST_TAG_EXPECT_XLATED_PFX_UNPRIV))) {
+ err = push_disasm_msg(msg, &unpriv_xlated_on_next_line,
+ &spec->unpriv.expect_xlated);
if (err)
goto cleanup;
spec->mode_mask |= UNPRIV;
@@ -341,11 +522,30 @@ static int parse_test_spec(struct test_loader *tester,
goto cleanup;
update_flags(&spec->prog_flags, flags, clear);
}
+ } else if (str_has_pfx(s, TEST_TAG_ARCH)) {
+ val = s + sizeof(TEST_TAG_ARCH) - 1;
+ if (strcmp(val, "X86_64") == 0) {
+ arch = ARCH_X86_64;
+ } else if (strcmp(val, "ARM64") == 0) {
+ arch = ARCH_ARM64;
+ } else if (strcmp(val, "RISCV64") == 0) {
+ arch = ARCH_RISCV64;
+ } else {
+ PRINT_FAIL("bad arch spec: '%s'", val);
+ err = -EINVAL;
+ goto cleanup;
+ }
+ arch_mask |= arch;
+ collect_jit = get_current_arch() == arch;
+ unpriv_jit_on_next_line = true;
+ jit_on_next_line = true;
} else if (str_has_pfx(s, TEST_BTF_PATH)) {
spec->btf_custom_path = s + sizeof(TEST_BTF_PATH) - 1;
}
}
+ spec->arch_mask = arch_mask ?: -1;
+
if (spec->mode_mask == 0)
spec->mode_mask = PRIV;
@@ -387,15 +587,12 @@ static int parse_test_spec(struct test_loader *tester,
spec->unpriv.execute = spec->priv.execute;
}
- if (!spec->unpriv.expect_msgs) {
- for (i = 0; i < spec->priv.expect_msg_cnt; i++) {
- struct expect_msg *msg = &spec->priv.expect_msgs[i];
-
- err = push_msg(msg->substr, msg->regex_str, &spec->unpriv);
- if (err)
- goto cleanup;
- }
- }
+ if (spec->unpriv.expect_msgs.cnt == 0)
+ clone_msgs(&spec->priv.expect_msgs, &spec->unpriv.expect_msgs);
+ if (spec->unpriv.expect_xlated.cnt == 0)
+ clone_msgs(&spec->priv.expect_xlated, &spec->unpriv.expect_xlated);
+ if (spec->unpriv.jited.cnt == 0)
+ clone_msgs(&spec->priv.jited, &spec->unpriv.jited);
}
spec->valid = true;
@@ -434,7 +631,6 @@ static void prepare_case(struct test_loader *tester,
bpf_program__set_flags(prog, prog_flags | spec->prog_flags);
tester->log_buf[0] = '\0';
- tester->next_match_pos = 0;
}
static void emit_verifier_log(const char *log_buf, bool force)
@@ -444,46 +640,84 @@ static void emit_verifier_log(const char *log_buf, bool force)
fprintf(stdout, "VERIFIER LOG:\n=============\n%s=============\n", log_buf);
}
-static void validate_case(struct test_loader *tester,
- struct test_subspec *subspec,
- struct bpf_object *obj,
- struct bpf_program *prog,
- int load_err)
+static void emit_xlated(const char *xlated, bool force)
{
- int i, j, err;
- char *match;
+ if (!force && env.verbosity == VERBOSE_NONE)
+ return;
+ fprintf(stdout, "XLATED:\n=============\n%s=============\n", xlated);
+}
+
+static void emit_jited(const char *jited, bool force)
+{
+ if (!force && env.verbosity == VERBOSE_NONE)
+ return;
+ fprintf(stdout, "JITED:\n=============\n%s=============\n", jited);
+}
+
+static void validate_msgs(char *log_buf, struct expected_msgs *msgs,
+ void (*emit_fn)(const char *buf, bool force))
+{
+ const char *log = log_buf, *prev_match;
regmatch_t reg_match[1];
+ int prev_match_line;
+ int match_line;
+ int i, j, err;
- for (i = 0; i < subspec->expect_msg_cnt; i++) {
- struct expect_msg *msg = &subspec->expect_msgs[i];
+ prev_match_line = -1;
+ match_line = 0;
+ prev_match = log;
+ for (i = 0; i < msgs->cnt; i++) {
+ struct expect_msg *msg = &msgs->patterns[i];
+ const char *match = NULL, *pat_status;
+ bool wrong_line = false;
- if (msg->substr) {
- match = strstr(tester->log_buf + tester->next_match_pos, msg->substr);
+ if (!msg->is_regex) {
+ match = strstr(log, msg->substr);
if (match)
- tester->next_match_pos = match - tester->log_buf + strlen(msg->substr);
+ log = match + strlen(msg->substr);
} else {
- err = regexec(&msg->regex,
- tester->log_buf + tester->next_match_pos, 1, reg_match, 0);
+ err = regexec(&msg->regex, log, 1, reg_match, 0);
if (err == 0) {
- match = tester->log_buf + tester->next_match_pos + reg_match[0].rm_so;
- tester->next_match_pos += reg_match[0].rm_eo;
- } else {
- match = NULL;
+ match = log + reg_match[0].rm_so;
+ log += reg_match[0].rm_eo;
}
}
- if (!ASSERT_OK_PTR(match, "expect_msg")) {
+ if (match) {
+ for (; prev_match < match; ++prev_match)
+ if (*prev_match == '\n')
+ ++match_line;
+ wrong_line = msg->on_next_line && prev_match_line >= 0 &&
+ prev_match_line + 1 != match_line;
+ }
+
+ if (!match || wrong_line) {
+ PRINT_FAIL("expect_msg\n");
if (env.verbosity == VERBOSE_NONE)
- emit_verifier_log(tester->log_buf, true /*force*/);
+ emit_fn(log_buf, true /*force*/);
for (j = 0; j <= i; j++) {
- msg = &subspec->expect_msgs[j];
+ msg = &msgs->patterns[j];
+ if (j < i)
+ pat_status = "MATCHED ";
+ else if (wrong_line)
+ pat_status = "WRONG LINE";
+ else
+ pat_status = "EXPECTED ";
+ msg = &msgs->patterns[j];
fprintf(stderr, "%s %s: '%s'\n",
- j < i ? "MATCHED " : "EXPECTED",
- msg->substr ? "SUBSTR" : " REGEX",
- msg->substr ?: msg->regex_str);
+ pat_status,
+ msg->is_regex ? " REGEX" : "SUBSTR",
+ msg->substr);
}
- return;
+ if (wrong_line) {
+ fprintf(stderr,
+ "expecting match at line %d, actual match is at line %d\n",
+ prev_match_line + 1, match_line);
+ }
+ break;
}
+
+ prev_match_line = match_line;
}
}
@@ -611,6 +845,37 @@ static bool should_do_test_run(struct test_spec *spec, struct test_subspec *subs
return true;
}
+/* Get a disassembly of BPF program after verifier applies all rewrites */
+static int get_xlated_program_text(int prog_fd, char *text, size_t text_sz)
+{
+ struct bpf_insn *insn_start = NULL, *insn, *insn_end;
+ __u32 insns_cnt = 0, i;
+ char buf[64];
+ FILE *out = NULL;
+ int err;
+
+ err = get_xlated_program(prog_fd, &insn_start, &insns_cnt);
+ if (!ASSERT_OK(err, "get_xlated_program"))
+ goto out;
+ out = fmemopen(text, text_sz, "w");
+ if (!ASSERT_OK_PTR(out, "open_memstream"))
+ goto out;
+ insn_end = insn_start + insns_cnt;
+ insn = insn_start;
+ while (insn < insn_end) {
+ i = insn - insn_start;
+ insn = disasm_insn(insn, buf, sizeof(buf));
+ fprintf(out, "%d: %s\n", i, buf);
+ }
+ fflush(out);
+
+out:
+ free(insn_start);
+ if (out)
+ fclose(out);
+ return err;
+}
+
/* this function is forced noinline and has short generic name to look better
* in test_progs output (in case of a failure)
*/
@@ -625,16 +890,23 @@ void run_subtest(struct test_loader *tester,
{
struct test_subspec *subspec = unpriv ? &spec->unpriv : &spec->priv;
struct bpf_program *tprog = NULL, *tprog_iter;
+ struct bpf_link *link, *links[32] = {};
struct test_spec *spec_iter;
struct cap_state caps = {};
struct bpf_object *tobj;
struct bpf_map *map;
int retval, err, i;
+ int links_cnt = 0;
bool should_load;
if (!test__start_subtest(subspec->name))
return;
+ if ((get_current_arch() & spec->arch_mask) == 0) {
+ test__skip();
+ return;
+ }
+
if (unpriv) {
if (!can_execute_unpriv(tester, spec)) {
test__skip();
@@ -695,9 +967,32 @@ void run_subtest(struct test_loader *tester,
goto tobj_cleanup;
}
}
-
emit_verifier_log(tester->log_buf, false /*force*/);
- validate_case(tester, subspec, tobj, tprog, err);
+ validate_msgs(tester->log_buf, &subspec->expect_msgs, emit_verifier_log);
+
+ if (subspec->expect_xlated.cnt) {
+ err = get_xlated_program_text(bpf_program__fd(tprog),
+ tester->log_buf, tester->log_buf_sz);
+ if (err)
+ goto tobj_cleanup;
+ emit_xlated(tester->log_buf, false /*force*/);
+ validate_msgs(tester->log_buf, &subspec->expect_xlated, emit_xlated);
+ }
+
+ if (subspec->jited.cnt) {
+ err = get_jited_program_text(bpf_program__fd(tprog),
+ tester->log_buf, tester->log_buf_sz);
+ if (err == -EOPNOTSUPP) {
+ printf("%s:SKIP: jited programs disassembly is not supported,\n", __func__);
+ printf("%s:SKIP: tests are built w/o LLVM development libs\n", __func__);
+ test__skip();
+ goto tobj_cleanup;
+ }
+ if (!ASSERT_EQ(err, 0, "get_jited_program_text"))
+ goto tobj_cleanup;
+ emit_jited(tester->log_buf, false /*force*/);
+ validate_msgs(tester->log_buf, &subspec->jited, emit_jited);
+ }
if (should_do_test_run(spec, subspec)) {
/* For some reason test_verifier executes programs
@@ -706,6 +1001,26 @@ void run_subtest(struct test_loader *tester,
if (restore_capabilities(&caps))
goto tobj_cleanup;
+ /* Do bpf_map__attach_struct_ops() for each struct_ops map.
+ * This should trigger bpf_struct_ops->reg callback on kernel side.
+ */
+ bpf_object__for_each_map(map, tobj) {
+ if (!bpf_map__autocreate(map) ||
+ bpf_map__type(map) != BPF_MAP_TYPE_STRUCT_OPS)
+ continue;
+ if (links_cnt >= ARRAY_SIZE(links)) {
+ PRINT_FAIL("too many struct_ops maps");
+ goto tobj_cleanup;
+ }
+ link = bpf_map__attach_struct_ops(map);
+ if (!link) {
+ PRINT_FAIL("bpf_map__attach_struct_ops failed for map %s: err=%d\n",
+ bpf_map__name(map), err);
+ goto tobj_cleanup;
+ }
+ links[links_cnt++] = link;
+ }
+
if (tester->pre_execution_cb) {
err = tester->pre_execution_cb(tobj);
if (err) {
@@ -720,9 +1035,14 @@ void run_subtest(struct test_loader *tester,
PRINT_FAIL("Unexpected retval: %d != %d\n", retval, subspec->retval);
goto tobj_cleanup;
}
+ /* redo bpf_map__attach_struct_ops for each test */
+ while (links_cnt > 0)
+ bpf_link__destroy(links[--links_cnt]);
}
tobj_cleanup:
+ while (links_cnt > 0)
+ bpf_link__destroy(links[--links_cnt]);
bpf_object__close(tobj);
subtest_cleanup:
test__end_subtest();
diff --git a/tools/testing/selftests/bpf/test_lru_map.c b/tools/testing/selftests/bpf/test_lru_map.c
index 4d0650cfb5cd..fda7589c5023 100644
--- a/tools/testing/selftests/bpf/test_lru_map.c
+++ b/tools/testing/selftests/bpf/test_lru_map.c
@@ -126,7 +126,8 @@ static int sched_next_online(int pid, int *next_to_try)
while (next < nr_cpus) {
CPU_ZERO(&cpuset);
- CPU_SET(next++, &cpuset);
+ CPU_SET(next, &cpuset);
+ next++;
if (!sched_setaffinity(pid, sizeof(cpuset), &cpuset)) {
ret = 0;
break;
diff --git a/tools/testing/selftests/bpf/test_maps.c b/tools/testing/selftests/bpf/test_maps.c
index dfbab214f4d1..905d5981ace1 100644
--- a/tools/testing/selftests/bpf/test_maps.c
+++ b/tools/testing/selftests/bpf/test_maps.c
@@ -1515,7 +1515,7 @@ again:
value == key);
}
- /* Now let's delete all elemenets in parallel. */
+ /* Now let's delete all elements in parallel. */
data[1] = DO_DELETE;
run_parallel(TASKS, test_update_delete, data);
diff --git a/tools/testing/selftests/bpf/test_progs.c b/tools/testing/selftests/bpf/test_progs.c
index 89ff704e9dad..c7a70e1a1085 100644
--- a/tools/testing/selftests/bpf/test_progs.c
+++ b/tools/testing/selftests/bpf/test_progs.c
@@ -10,7 +10,6 @@
#include <sched.h>
#include <signal.h>
#include <string.h>
-#include <execinfo.h> /* backtrace */
#include <sys/sysinfo.h> /* get_nprocs */
#include <netinet/in.h>
#include <sys/select.h>
@@ -19,6 +18,25 @@
#include <bpf/btf.h>
#include "json_writer.h"
+#include "network_helpers.h"
+
+#ifdef __GLIBC__
+#include <execinfo.h> /* backtrace */
+#endif
+
+/* Default backtrace funcs if missing at link */
+__weak int backtrace(void **buffer, int size)
+{
+ return 0;
+}
+
+__weak void backtrace_symbols_fd(void *const *buffer, int size, int fd)
+{
+ dprintf(fd, "<backtrace not supported>\n");
+}
+
+int env_verbosity = 0;
+
static bool verbose(void)
{
return env.verbosity > VERBOSE_NONE;
@@ -37,15 +55,15 @@ static void stdio_hijack_init(char **log_buf, size_t *log_cnt)
stdout = open_memstream(log_buf, log_cnt);
if (!stdout) {
- stdout = env.stdout;
+ stdout = env.stdout_saved;
perror("open_memstream");
return;
}
if (env.subtest_state)
- env.subtest_state->stdout = stdout;
+ env.subtest_state->stdout_saved = stdout;
else
- env.test_state->stdout = stdout;
+ env.test_state->stdout_saved = stdout;
stderr = stdout;
#endif
@@ -59,8 +77,8 @@ static void stdio_hijack(char **log_buf, size_t *log_cnt)
return;
}
- env.stdout = stdout;
- env.stderr = stderr;
+ env.stdout_saved = stdout;
+ env.stderr_saved = stderr;
stdio_hijack_init(log_buf, log_cnt);
#endif
@@ -77,13 +95,13 @@ static void stdio_restore_cleanup(void)
fflush(stdout);
if (env.subtest_state) {
- fclose(env.subtest_state->stdout);
- env.subtest_state->stdout = NULL;
- stdout = env.test_state->stdout;
- stderr = env.test_state->stdout;
+ fclose(env.subtest_state->stdout_saved);
+ env.subtest_state->stdout_saved = NULL;
+ stdout = env.test_state->stdout_saved;
+ stderr = env.test_state->stdout_saved;
} else {
- fclose(env.test_state->stdout);
- env.test_state->stdout = NULL;
+ fclose(env.test_state->stdout_saved);
+ env.test_state->stdout_saved = NULL;
}
#endif
}
@@ -96,13 +114,13 @@ static void stdio_restore(void)
return;
}
- if (stdout == env.stdout)
+ if (stdout == env.stdout_saved)
return;
stdio_restore_cleanup();
- stdout = env.stdout;
- stderr = env.stderr;
+ stdout = env.stdout_saved;
+ stderr = env.stderr_saved;
#endif
}
@@ -141,6 +159,7 @@ struct prog_test_def {
void (*run_serial_test)(void);
bool should_run;
bool need_cgroup_cleanup;
+ bool should_tmon;
};
/* Override C runtime library's usleep() implementation to ensure nanosleep()
@@ -178,46 +197,59 @@ static bool should_run(struct test_selector *sel, int num, const char *name)
return num < sel->num_set_len && sel->num_set[num];
}
-static bool should_run_subtest(struct test_selector *sel,
- struct test_selector *subtest_sel,
- int subtest_num,
- const char *test_name,
- const char *subtest_name)
+static bool match_subtest(struct test_filter_set *filter,
+ const char *test_name,
+ const char *subtest_name)
{
int i, j;
- for (i = 0; i < sel->blacklist.cnt; i++) {
- if (glob_match(test_name, sel->blacklist.tests[i].name)) {
- if (!sel->blacklist.tests[i].subtest_cnt)
- return false;
-
- for (j = 0; j < sel->blacklist.tests[i].subtest_cnt; j++) {
- if (glob_match(subtest_name,
- sel->blacklist.tests[i].subtests[j]))
- return false;
- }
- }
- }
-
- for (i = 0; i < sel->whitelist.cnt; i++) {
- if (glob_match(test_name, sel->whitelist.tests[i].name)) {
- if (!sel->whitelist.tests[i].subtest_cnt)
+ for (i = 0; i < filter->cnt; i++) {
+ if (glob_match(test_name, filter->tests[i].name)) {
+ if (!filter->tests[i].subtest_cnt)
return true;
- for (j = 0; j < sel->whitelist.tests[i].subtest_cnt; j++) {
+ for (j = 0; j < filter->tests[i].subtest_cnt; j++) {
if (glob_match(subtest_name,
- sel->whitelist.tests[i].subtests[j]))
+ filter->tests[i].subtests[j]))
return true;
}
}
}
+ return false;
+}
+
+static bool should_run_subtest(struct test_selector *sel,
+ struct test_selector *subtest_sel,
+ int subtest_num,
+ const char *test_name,
+ const char *subtest_name)
+{
+ if (match_subtest(&sel->blacklist, test_name, subtest_name))
+ return false;
+
+ if (match_subtest(&sel->whitelist, test_name, subtest_name))
+ return true;
+
if (!sel->whitelist.cnt && !subtest_sel->num_set)
return true;
return subtest_num < subtest_sel->num_set_len && subtest_sel->num_set[subtest_num];
}
+static bool should_tmon(struct test_selector *sel, const char *name)
+{
+ int i;
+
+ for (i = 0; i < sel->whitelist.cnt; i++) {
+ if (glob_match(name, sel->whitelist.tests[i].name) &&
+ !sel->whitelist.tests[i].subtest_cnt)
+ return true;
+ }
+
+ return false;
+}
+
static char *test_result(bool failed, bool skipped)
{
return failed ? "FAIL" : (skipped ? "SKIP" : "OK");
@@ -230,25 +262,25 @@ static void print_test_result(const struct prog_test_def *test, const struct tes
int skipped_cnt = test_state->skip_cnt;
int subtests_cnt = test_state->subtest_num;
- fprintf(env.stdout, "#%-*d %s:", TEST_NUM_WIDTH, test->test_num, test->test_name);
+ fprintf(env.stdout_saved, "#%-*d %s:", TEST_NUM_WIDTH, test->test_num, test->test_name);
if (test_state->error_cnt)
- fprintf(env.stdout, "FAIL");
+ fprintf(env.stdout_saved, "FAIL");
else if (!skipped_cnt)
- fprintf(env.stdout, "OK");
+ fprintf(env.stdout_saved, "OK");
else if (skipped_cnt == subtests_cnt || !subtests_cnt)
- fprintf(env.stdout, "SKIP");
+ fprintf(env.stdout_saved, "SKIP");
else
- fprintf(env.stdout, "OK (SKIP: %d/%d)", skipped_cnt, subtests_cnt);
+ fprintf(env.stdout_saved, "OK (SKIP: %d/%d)", skipped_cnt, subtests_cnt);
- fprintf(env.stdout, "\n");
+ fprintf(env.stdout_saved, "\n");
}
static void print_test_log(char *log_buf, size_t log_cnt)
{
log_buf[log_cnt] = '\0';
- fprintf(env.stdout, "%s", log_buf);
+ fprintf(env.stdout_saved, "%s", log_buf);
if (log_buf[log_cnt - 1] != '\n')
- fprintf(env.stdout, "\n");
+ fprintf(env.stdout_saved, "\n");
}
static void print_subtest_name(int test_num, int subtest_num,
@@ -259,14 +291,14 @@ static void print_subtest_name(int test_num, int subtest_num,
snprintf(test_num_str, sizeof(test_num_str), "%d/%d", test_num, subtest_num);
- fprintf(env.stdout, "#%-*s %s/%s",
+ fprintf(env.stdout_saved, "#%-*s %s/%s",
TEST_NUM_WIDTH, test_num_str,
test_name, subtest_name);
if (result)
- fprintf(env.stdout, ":%s", result);
+ fprintf(env.stdout_saved, ":%s", result);
- fprintf(env.stdout, "\n");
+ fprintf(env.stdout_saved, "\n");
}
static void jsonw_write_log_message(json_writer_t *w, char *log_buf, size_t log_cnt)
@@ -451,7 +483,7 @@ bool test__start_subtest(const char *subtest_name)
memset(subtest_state, 0, sub_state_size);
if (!subtest_name || !subtest_name[0]) {
- fprintf(env.stderr,
+ fprintf(env.stderr_saved,
"Subtest #%d didn't provide sub-test name!\n",
state->subtest_num);
return false;
@@ -459,7 +491,7 @@ bool test__start_subtest(const char *subtest_name)
subtest_state->name = strdup(subtest_name);
if (!subtest_state->name) {
- fprintf(env.stderr,
+ fprintf(env.stderr_saved,
"Subtest #%d: failed to copy subtest name!\n",
state->subtest_num);
return false;
@@ -474,6 +506,10 @@ bool test__start_subtest(const char *subtest_name)
return false;
}
+ subtest_state->should_tmon = match_subtest(&env.tmon_selector.whitelist,
+ test->test_name,
+ subtest_name);
+
env.subtest_state = subtest_state;
stdio_hijack_init(&subtest_state->log_buf, &subtest_state->log_cnt);
@@ -610,6 +646,92 @@ out:
return err;
}
+struct netns_obj {
+ char *nsname;
+ struct tmonitor_ctx *tmon;
+ struct nstoken *nstoken;
+};
+
+/* Create a new network namespace with the given name.
+ *
+ * Create a new network namespace and set the network namespace of the
+ * current process to the new network namespace if the argument "open" is
+ * true. This function should be paired with netns_free() to release the
+ * resource and delete the network namespace.
+ *
+ * It also implements the functionality of the option "-m" by starting
+ * traffic monitor on the background to capture the packets in this network
+ * namespace if the current test or subtest matching the pattern.
+ *
+ * nsname: the name of the network namespace to create.
+ * open: open the network namespace if true.
+ *
+ * Return: the network namespace object on success, NULL on failure.
+ */
+struct netns_obj *netns_new(const char *nsname, bool open)
+{
+ struct netns_obj *netns_obj = malloc(sizeof(*netns_obj));
+ const char *test_name, *subtest_name;
+ int r;
+
+ if (!netns_obj)
+ return NULL;
+ memset(netns_obj, 0, sizeof(*netns_obj));
+
+ netns_obj->nsname = strdup(nsname);
+ if (!netns_obj->nsname)
+ goto fail;
+
+ /* Create the network namespace */
+ r = make_netns(nsname);
+ if (r)
+ goto fail;
+
+ /* Start traffic monitor */
+ if (env.test->should_tmon ||
+ (env.subtest_state && env.subtest_state->should_tmon)) {
+ test_name = env.test->test_name;
+ subtest_name = env.subtest_state ? env.subtest_state->name : NULL;
+ netns_obj->tmon = traffic_monitor_start(nsname, test_name, subtest_name);
+ if (!netns_obj->tmon) {
+ fprintf(stderr, "Failed to start traffic monitor for %s\n", nsname);
+ goto fail;
+ }
+ } else {
+ netns_obj->tmon = NULL;
+ }
+
+ if (open) {
+ netns_obj->nstoken = open_netns(nsname);
+ if (!netns_obj->nstoken)
+ goto fail;
+ }
+
+ return netns_obj;
+fail:
+ traffic_monitor_stop(netns_obj->tmon);
+ remove_netns(nsname);
+ free(netns_obj->nsname);
+ free(netns_obj);
+ return NULL;
+}
+
+/* Delete the network namespace.
+ *
+ * This function should be paired with netns_new() to delete the namespace
+ * created by netns_new().
+ */
+void netns_free(struct netns_obj *netns_obj)
+{
+ if (!netns_obj)
+ return;
+ traffic_monitor_stop(netns_obj->tmon);
+ close_netns(netns_obj->nstoken);
+ remove_netns(netns_obj->nsname);
+ free(netns_obj->nsname);
+ free(netns_obj);
+}
+
/* extern declarations for test funcs */
#define DEFINE_TEST(name) \
extern void test_##name(void) __weak; \
@@ -653,7 +775,8 @@ enum ARG_KEYS {
ARG_TEST_NAME_GLOB_DENYLIST = 'd',
ARG_NUM_WORKERS = 'j',
ARG_DEBUG = -1,
- ARG_JSON_SUMMARY = 'J'
+ ARG_JSON_SUMMARY = 'J',
+ ARG_TRAFFIC_MONITOR = 'm',
};
static const struct argp_option opts[] = {
@@ -680,6 +803,10 @@ static const struct argp_option opts[] = {
{ "debug", ARG_DEBUG, NULL, 0,
"print extra debug information for test_progs." },
{ "json-summary", ARG_JSON_SUMMARY, "FILE", 0, "Write report in json format to this file."},
+#ifdef TRAFFIC_MONITOR
+ { "traffic-monitor", ARG_TRAFFIC_MONITOR, "NAMES", 0,
+ "Monitor network traffic of tests with name matching the pattern (supports '*' wildcard)." },
+#endif
{},
};
@@ -848,6 +975,7 @@ static error_t parse_arg(int key, char *arg, struct argp_state *state)
return -EINVAL;
}
}
+ env_verbosity = env->verbosity;
if (verbose()) {
if (setenv("SELFTESTS_VERBOSE", "1", 1) == -1) {
@@ -891,6 +1019,18 @@ static error_t parse_arg(int key, char *arg, struct argp_state *state)
break;
case ARGP_KEY_END:
break;
+#ifdef TRAFFIC_MONITOR
+ case ARG_TRAFFIC_MONITOR:
+ if (arg[0] == '@')
+ err = parse_test_list_file(arg + 1,
+ &env->tmon_selector.whitelist,
+ true);
+ else
+ err = parse_test_list(arg,
+ &env->tmon_selector.whitelist,
+ true);
+ break;
+#endif
default:
return ARGP_ERR_UNKNOWN;
}
@@ -1029,7 +1169,7 @@ void crash_handler(int signum)
sz = backtrace(bt, ARRAY_SIZE(bt));
- if (env.stdout)
+ if (env.stdout_saved)
stdio_restore();
if (env.test) {
env.test_state->error_cnt++;
@@ -1345,7 +1485,7 @@ static void calculate_summary_and_print_errors(struct test_env *env)
if (env->json) {
w = jsonw_new(env->json);
if (!w)
- fprintf(env->stderr, "Failed to create new JSON stream.");
+ fprintf(env->stderr_saved, "Failed to create new JSON stream.");
}
if (w) {
@@ -1360,7 +1500,7 @@ static void calculate_summary_and_print_errors(struct test_env *env)
/*
* We only print error logs summary when there are failed tests and
- * verbose mode is not enabled. Otherwise, results may be incosistent.
+ * verbose mode is not enabled. Otherwise, results may be inconsistent.
*
*/
if (!verbose() && fail_cnt) {
@@ -1694,8 +1834,8 @@ int main(int argc, char **argv)
return -1;
}
- env.stdout = stdout;
- env.stderr = stderr;
+ env.stdout_saved = stdout;
+ env.stderr_saved = stderr;
env.has_testmod = true;
if (!env.list_test_names) {
@@ -1703,7 +1843,7 @@ int main(int argc, char **argv)
unload_bpf_testmod(verbose());
if (load_bpf_testmod(verbose())) {
- fprintf(env.stderr, "WARNING! Selftests relying on bpf_testmod.ko will be skipped.\n");
+ fprintf(env.stderr_saved, "WARNING! Selftests relying on bpf_testmod.ko will be skipped.\n");
env.has_testmod = false;
}
}
@@ -1722,6 +1862,8 @@ int main(int argc, char **argv)
test->test_num, test->test_name, test->test_name, test->test_name);
exit(EXIT_ERR_SETUP_INFRA);
}
+ if (test->should_run)
+ test->should_tmon = should_tmon(&env.tmon_selector, test->test_name);
}
/* ignore workers if we are just listing */
@@ -1731,7 +1873,7 @@ int main(int argc, char **argv)
/* launch workers if requested */
env.worker_id = -1; /* main process */
if (env.workers) {
- env.worker_pids = calloc(sizeof(__pid_t), env.workers);
+ env.worker_pids = calloc(sizeof(pid_t), env.workers);
env.worker_socks = calloc(sizeof(int), env.workers);
if (env.debug)
fprintf(stdout, "Launching %d workers.\n", env.workers);
@@ -1781,7 +1923,7 @@ int main(int argc, char **argv)
}
if (env.list_test_names) {
- fprintf(env.stdout, "%s\n", test->test_name);
+ fprintf(env.stdout_saved, "%s\n", test->test_name);
env.succ_cnt++;
continue;
}
@@ -1806,6 +1948,7 @@ out:
free_test_selector(&env.test_selector);
free_test_selector(&env.subtest_selector);
+ free_test_selector(&env.tmon_selector);
free_test_states();
if (env.succ_cnt + env.fail_cnt + env.skip_cnt == 0)
diff --git a/tools/testing/selftests/bpf/test_progs.h b/tools/testing/selftests/bpf/test_progs.h
index 51341d50213b..7767d9a825ae 100644
--- a/tools/testing/selftests/bpf/test_progs.h
+++ b/tools/testing/selftests/bpf/test_progs.h
@@ -74,8 +74,9 @@ struct subtest_state {
int error_cnt;
bool skipped;
bool filtered;
+ bool should_tmon;
- FILE *stdout;
+ FILE *stdout_saved;
};
struct test_state {
@@ -92,12 +93,15 @@ struct test_state {
size_t log_cnt;
char *log_buf;
- FILE *stdout;
+ FILE *stdout_saved;
};
+extern int env_verbosity;
+
struct test_env {
struct test_selector test_selector;
struct test_selector subtest_selector;
+ struct test_selector tmon_selector;
bool verifier_stats;
bool debug;
enum verbosity verbosity;
@@ -111,8 +115,8 @@ struct test_env {
struct test_state *test_state; /* current running test state */
struct subtest_state *subtest_state; /* current running subtest state */
- FILE *stdout;
- FILE *stderr;
+ FILE *stdout_saved;
+ FILE *stderr_saved;
int nr_cpus;
FILE *json;
@@ -428,6 +432,10 @@ int write_sysctl(const char *sysctl, const char *value);
int get_bpf_max_tramp_links_from(struct btf *btf);
int get_bpf_max_tramp_links(void);
+struct netns_obj;
+struct netns_obj *netns_new(const char *name, bool open);
+void netns_free(struct netns_obj *netns);
+
#ifdef __x86_64__
#define SYS_NANOSLEEP_KPROBE_NAME "__x64_sys_nanosleep"
#elif defined(__s390x__)
@@ -447,7 +455,6 @@ typedef int (*pre_execution_cb)(struct bpf_object *obj);
struct test_loader {
char *log_buf;
size_t log_buf_sz;
- size_t next_match_pos;
pre_execution_cb pre_execution_cb;
struct bpf_object *obj;
diff --git a/tools/testing/selftests/bpf/test_skb_cgroup_id.sh b/tools/testing/selftests/bpf/test_skb_cgroup_id.sh
deleted file mode 100755
index 515c2eafc97f..000000000000
--- a/tools/testing/selftests/bpf/test_skb_cgroup_id.sh
+++ /dev/null
@@ -1,63 +0,0 @@
-#!/bin/sh
-# SPDX-License-Identifier: GPL-2.0
-# Copyright (c) 2018 Facebook
-
-set -eu
-
-wait_for_ip()
-{
- local _i
- echo -n "Wait for testing link-local IP to become available "
- for _i in $(seq ${MAX_PING_TRIES}); do
- echo -n "."
- if $PING6 -c 1 -W 1 ff02::1%${TEST_IF} >/dev/null 2>&1; then
- echo " OK"
- return
- fi
- sleep 1
- done
- echo 1>&2 "ERROR: Timeout waiting for test IP to become available."
- exit 1
-}
-
-setup()
-{
- # Create testing interfaces not to interfere with current environment.
- ip link add dev ${TEST_IF} type veth peer name ${TEST_IF_PEER}
- ip link set ${TEST_IF} up
- ip link set ${TEST_IF_PEER} up
-
- wait_for_ip
-
- tc qdisc add dev ${TEST_IF} clsact
- tc filter add dev ${TEST_IF} egress bpf obj ${BPF_PROG_OBJ} \
- sec ${BPF_PROG_SECTION} da
-
- BPF_PROG_ID=$(tc filter show dev ${TEST_IF} egress | \
- awk '/ id / {sub(/.* id /, "", $0); print($1)}')
-}
-
-cleanup()
-{
- ip link del ${TEST_IF} 2>/dev/null || :
- ip link del ${TEST_IF_PEER} 2>/dev/null || :
-}
-
-main()
-{
- trap cleanup EXIT 2 3 6 15
- setup
- ${PROG} ${TEST_IF} ${BPF_PROG_ID}
-}
-
-DIR=$(dirname $0)
-TEST_IF="test_cgid_1"
-TEST_IF_PEER="test_cgid_2"
-MAX_PING_TRIES=5
-BPF_PROG_OBJ="${DIR}/test_skb_cgroup_id_kern.bpf.o"
-BPF_PROG_SECTION="cgroup_id_logger"
-BPF_PROG_ID=0
-PROG="${DIR}/test_skb_cgroup_id_user"
-type ping6 >/dev/null 2>&1 && PING6="ping6" || PING6="ping -6"
-
-main
diff --git a/tools/testing/selftests/bpf/test_skb_cgroup_id_user.c b/tools/testing/selftests/bpf/test_skb_cgroup_id_user.c
deleted file mode 100644
index ed518d075d1d..000000000000
--- a/tools/testing/selftests/bpf/test_skb_cgroup_id_user.c
+++ /dev/null
@@ -1,183 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-// Copyright (c) 2018 Facebook
-
-#include <stdlib.h>
-#include <string.h>
-#include <unistd.h>
-
-#include <arpa/inet.h>
-#include <net/if.h>
-#include <netinet/in.h>
-#include <sys/socket.h>
-#include <sys/types.h>
-
-
-#include <bpf/bpf.h>
-#include <bpf/libbpf.h>
-
-#include "cgroup_helpers.h"
-
-#define CGROUP_PATH "/skb_cgroup_test"
-#define NUM_CGROUP_LEVELS 4
-
-/* RFC 4291, Section 2.7.1 */
-#define LINKLOCAL_MULTICAST "ff02::1"
-
-static int mk_dst_addr(const char *ip, const char *iface,
- struct sockaddr_in6 *dst)
-{
- memset(dst, 0, sizeof(*dst));
-
- dst->sin6_family = AF_INET6;
- dst->sin6_port = htons(1025);
-
- if (inet_pton(AF_INET6, ip, &dst->sin6_addr) != 1) {
- log_err("Invalid IPv6: %s", ip);
- return -1;
- }
-
- dst->sin6_scope_id = if_nametoindex(iface);
- if (!dst->sin6_scope_id) {
- log_err("Failed to get index of iface: %s", iface);
- return -1;
- }
-
- return 0;
-}
-
-static int send_packet(const char *iface)
-{
- struct sockaddr_in6 dst;
- char msg[] = "msg";
- int err = 0;
- int fd = -1;
-
- if (mk_dst_addr(LINKLOCAL_MULTICAST, iface, &dst))
- goto err;
-
- fd = socket(AF_INET6, SOCK_DGRAM, 0);
- if (fd == -1) {
- log_err("Failed to create UDP socket");
- goto err;
- }
-
- if (sendto(fd, &msg, sizeof(msg), 0, (const struct sockaddr *)&dst,
- sizeof(dst)) == -1) {
- log_err("Failed to send datagram");
- goto err;
- }
-
- goto out;
-err:
- err = -1;
-out:
- if (fd >= 0)
- close(fd);
- return err;
-}
-
-int get_map_fd_by_prog_id(int prog_id)
-{
- struct bpf_prog_info info = {};
- __u32 info_len = sizeof(info);
- __u32 map_ids[1];
- int prog_fd = -1;
- int map_fd = -1;
-
- prog_fd = bpf_prog_get_fd_by_id(prog_id);
- if (prog_fd < 0) {
- log_err("Failed to get fd by prog id %d", prog_id);
- goto err;
- }
-
- info.nr_map_ids = 1;
- info.map_ids = (__u64) (unsigned long) map_ids;
-
- if (bpf_prog_get_info_by_fd(prog_fd, &info, &info_len)) {
- log_err("Failed to get info by prog fd %d", prog_fd);
- goto err;
- }
-
- if (!info.nr_map_ids) {
- log_err("No maps found for prog fd %d", prog_fd);
- goto err;
- }
-
- map_fd = bpf_map_get_fd_by_id(map_ids[0]);
- if (map_fd < 0)
- log_err("Failed to get fd by map id %d", map_ids[0]);
-err:
- if (prog_fd >= 0)
- close(prog_fd);
- return map_fd;
-}
-
-int check_ancestor_cgroup_ids(int prog_id)
-{
- __u64 actual_ids[NUM_CGROUP_LEVELS], expected_ids[NUM_CGROUP_LEVELS];
- __u32 level;
- int err = 0;
- int map_fd;
-
- expected_ids[0] = get_cgroup_id("/.."); /* root cgroup */
- expected_ids[1] = get_cgroup_id("");
- expected_ids[2] = get_cgroup_id(CGROUP_PATH);
- expected_ids[3] = 0; /* non-existent cgroup */
-
- map_fd = get_map_fd_by_prog_id(prog_id);
- if (map_fd < 0)
- goto err;
-
- for (level = 0; level < NUM_CGROUP_LEVELS; ++level) {
- if (bpf_map_lookup_elem(map_fd, &level, &actual_ids[level])) {
- log_err("Failed to lookup key %d", level);
- goto err;
- }
- if (actual_ids[level] != expected_ids[level]) {
- log_err("%llx (actual) != %llx (expected), level: %u\n",
- actual_ids[level], expected_ids[level], level);
- goto err;
- }
- }
-
- goto out;
-err:
- err = -1;
-out:
- if (map_fd >= 0)
- close(map_fd);
- return err;
-}
-
-int main(int argc, char **argv)
-{
- int cgfd = -1;
- int err = 0;
-
- if (argc < 3) {
- fprintf(stderr, "Usage: %s iface prog_id\n", argv[0]);
- exit(EXIT_FAILURE);
- }
-
- /* Use libbpf 1.0 API mode */
- libbpf_set_strict_mode(LIBBPF_STRICT_ALL);
-
- cgfd = cgroup_setup_and_join(CGROUP_PATH);
- if (cgfd < 0)
- goto err;
-
- if (send_packet(argv[1]))
- goto err;
-
- if (check_ancestor_cgroup_ids(atoi(argv[2])))
- goto err;
-
- goto out;
-err:
- err = -1;
-out:
- close(cgfd);
- cleanup_cgroup_environment();
- printf("[%s]\n", err ? "FAIL" : "PASS");
- return err;
-}
diff --git a/tools/testing/selftests/bpf/test_xdp_veth.sh b/tools/testing/selftests/bpf/test_xdp_veth.sh
deleted file mode 100755
index 5211ca9a0239..000000000000
--- a/tools/testing/selftests/bpf/test_xdp_veth.sh
+++ /dev/null
@@ -1,121 +0,0 @@
-#!/bin/sh
-# SPDX-License-Identifier: GPL-2.0
-#
-# Create 3 namespaces with 3 veth peers, and
-# forward packets in-between using native XDP
-#
-# XDP_TX
-# NS1(veth11) NS2(veth22) NS3(veth33)
-# | | |
-# | | |
-# (veth1, (veth2, (veth3,
-# id:111) id:122) id:133)
-# ^ | ^ | ^ |
-# | | XDP_REDIRECT | | XDP_REDIRECT | |
-# | ------------------ ------------------ |
-# -----------------------------------------
-# XDP_REDIRECT
-
-# Kselftest framework requirement - SKIP code is 4.
-ksft_skip=4
-
-TESTNAME=xdp_veth
-BPF_FS=$(awk '$3 == "bpf" {print $2; exit}' /proc/mounts)
-BPF_DIR=$BPF_FS/test_$TESTNAME
-readonly NS1="ns1-$(mktemp -u XXXXXX)"
-readonly NS2="ns2-$(mktemp -u XXXXXX)"
-readonly NS3="ns3-$(mktemp -u XXXXXX)"
-
-_cleanup()
-{
- set +e
- ip link del veth1 2> /dev/null
- ip link del veth2 2> /dev/null
- ip link del veth3 2> /dev/null
- ip netns del ${NS1} 2> /dev/null
- ip netns del ${NS2} 2> /dev/null
- ip netns del ${NS3} 2> /dev/null
- rm -rf $BPF_DIR 2> /dev/null
-}
-
-cleanup_skip()
-{
- echo "selftests: $TESTNAME [SKIP]"
- _cleanup
-
- exit $ksft_skip
-}
-
-cleanup()
-{
- if [ "$?" = 0 ]; then
- echo "selftests: $TESTNAME [PASS]"
- else
- echo "selftests: $TESTNAME [FAILED]"
- fi
- _cleanup
-}
-
-if [ $(id -u) -ne 0 ]; then
- echo "selftests: $TESTNAME [SKIP] Need root privileges"
- exit $ksft_skip
-fi
-
-if ! ip link set dev lo xdp off > /dev/null 2>&1; then
- echo "selftests: $TESTNAME [SKIP] Could not run test without the ip xdp support"
- exit $ksft_skip
-fi
-
-if [ -z "$BPF_FS" ]; then
- echo "selftests: $TESTNAME [SKIP] Could not run test without bpffs mounted"
- exit $ksft_skip
-fi
-
-if ! bpftool version > /dev/null 2>&1; then
- echo "selftests: $TESTNAME [SKIP] Could not run test without bpftool"
- exit $ksft_skip
-fi
-
-set -e
-
-trap cleanup_skip EXIT
-
-ip netns add ${NS1}
-ip netns add ${NS2}
-ip netns add ${NS3}
-
-ip link add veth1 index 111 type veth peer name veth11 netns ${NS1}
-ip link add veth2 index 122 type veth peer name veth22 netns ${NS2}
-ip link add veth3 index 133 type veth peer name veth33 netns ${NS3}
-
-ip link set veth1 up
-ip link set veth2 up
-ip link set veth3 up
-
-ip -n ${NS1} addr add 10.1.1.11/24 dev veth11
-ip -n ${NS3} addr add 10.1.1.33/24 dev veth33
-
-ip -n ${NS1} link set dev veth11 up
-ip -n ${NS2} link set dev veth22 up
-ip -n ${NS3} link set dev veth33 up
-
-mkdir $BPF_DIR
-bpftool prog loadall \
- xdp_redirect_map.bpf.o $BPF_DIR/progs type xdp \
- pinmaps $BPF_DIR/maps
-bpftool map update pinned $BPF_DIR/maps/tx_port key 0 0 0 0 value 122 0 0 0
-bpftool map update pinned $BPF_DIR/maps/tx_port key 1 0 0 0 value 133 0 0 0
-bpftool map update pinned $BPF_DIR/maps/tx_port key 2 0 0 0 value 111 0 0 0
-ip link set dev veth1 xdp pinned $BPF_DIR/progs/xdp_redirect_map_0
-ip link set dev veth2 xdp pinned $BPF_DIR/progs/xdp_redirect_map_1
-ip link set dev veth3 xdp pinned $BPF_DIR/progs/xdp_redirect_map_2
-
-ip -n ${NS1} link set dev veth11 xdp obj xdp_dummy.bpf.o sec xdp
-ip -n ${NS2} link set dev veth22 xdp obj xdp_tx.bpf.o sec xdp
-ip -n ${NS3} link set dev veth33 xdp obj xdp_dummy.bpf.o sec xdp
-
-trap cleanup EXIT
-
-ip netns exec ${NS1} ping -c 1 -W 1 10.1.1.33
-
-exit 0
diff --git a/tools/testing/selftests/bpf/testing_helpers.c b/tools/testing/selftests/bpf/testing_helpers.c
index d5379a0e6da8..d3c3c3a24150 100644
--- a/tools/testing/selftests/bpf/testing_helpers.c
+++ b/tools/testing/selftests/bpf/testing_helpers.c
@@ -7,6 +7,7 @@
#include <errno.h>
#include <bpf/bpf.h>
#include <bpf/libbpf.h>
+#include "disasm.h"
#include "test_progs.h"
#include "testing_helpers.h"
#include <linux/membarrier.h>
@@ -220,13 +221,13 @@ int parse_test_list(const char *s,
bool is_glob_pattern)
{
char *input, *state = NULL, *test_spec;
- int err = 0;
+ int err = 0, cnt = 0;
input = strdup(s);
if (!input)
return -ENOMEM;
- while ((test_spec = strtok_r(state ? NULL : input, ",", &state))) {
+ while ((test_spec = strtok_r(cnt++ ? NULL : input, ",", &state))) {
err = insert_test(set, test_spec, is_glob_pattern);
if (err)
break;
@@ -451,7 +452,7 @@ int get_xlated_program(int fd_prog, struct bpf_insn **buf, __u32 *cnt)
*cnt = xlated_prog_len / buf_element_size;
*buf = calloc(*cnt, buf_element_size);
- if (!buf) {
+ if (!*buf) {
perror("can't allocate xlated program buffer");
return -ENOMEM;
}
diff --git a/tools/testing/selftests/bpf/trace_helpers.c b/tools/testing/selftests/bpf/trace_helpers.c
index 465d196c7165..2d742fdac6b9 100644
--- a/tools/testing/selftests/bpf/trace_helpers.c
+++ b/tools/testing/selftests/bpf/trace_helpers.c
@@ -10,6 +10,8 @@
#include <pthread.h>
#include <unistd.h>
#include <linux/perf_event.h>
+#include <linux/fs.h>
+#include <sys/ioctl.h>
#include <sys/mman.h>
#include "trace_helpers.h"
#include <linux/limits.h>
@@ -244,29 +246,91 @@ out:
return err;
}
+#ifdef PROCMAP_QUERY
+int env_verbosity __weak = 0;
+
+static int procmap_query(int fd, const void *addr, __u32 query_flags, size_t *start, size_t *offset, int *flags)
+{
+ char path_buf[PATH_MAX], build_id_buf[20];
+ struct procmap_query q;
+ int err;
+
+ memset(&q, 0, sizeof(q));
+ q.size = sizeof(q);
+ q.query_flags = query_flags;
+ q.query_addr = (__u64)addr;
+ q.vma_name_addr = (__u64)path_buf;
+ q.vma_name_size = sizeof(path_buf);
+ q.build_id_addr = (__u64)build_id_buf;
+ q.build_id_size = sizeof(build_id_buf);
+
+ err = ioctl(fd, PROCMAP_QUERY, &q);
+ if (err < 0) {
+ err = -errno;
+ if (err == -ENOTTY)
+ return -EOPNOTSUPP; /* ioctl() not implemented yet */
+ if (err == -ENOENT)
+ return -ESRCH; /* vma not found */
+ return err;
+ }
+
+ if (env_verbosity >= 1) {
+ printf("VMA FOUND (addr %08lx): %08lx-%08lx %c%c%c%c %08lx %02x:%02x %ld %s (build ID: %s, %d bytes)\n",
+ (long)addr, (long)q.vma_start, (long)q.vma_end,
+ (q.vma_flags & PROCMAP_QUERY_VMA_READABLE) ? 'r' : '-',
+ (q.vma_flags & PROCMAP_QUERY_VMA_WRITABLE) ? 'w' : '-',
+ (q.vma_flags & PROCMAP_QUERY_VMA_EXECUTABLE) ? 'x' : '-',
+ (q.vma_flags & PROCMAP_QUERY_VMA_SHARED) ? 's' : 'p',
+ (long)q.vma_offset, q.dev_major, q.dev_minor, (long)q.inode,
+ q.vma_name_size ? path_buf : "",
+ q.build_id_size ? "YES" : "NO",
+ q.build_id_size);
+ }
+
+ *start = q.vma_start;
+ *offset = q.vma_offset;
+ *flags = q.vma_flags;
+ return 0;
+}
+#else
+static int procmap_query(int fd, const void *addr, __u32 query_flags, size_t *start, size_t *offset, int *flags)
+{
+ return -EOPNOTSUPP;
+}
+#endif
+
ssize_t get_uprobe_offset(const void *addr)
{
- size_t start, end, base;
- char buf[256];
- bool found = false;
+ size_t start, base, end;
FILE *f;
+ char buf[256];
+ int err, flags;
f = fopen("/proc/self/maps", "r");
if (!f)
return -errno;
- while (fscanf(f, "%zx-%zx %s %zx %*[^\n]\n", &start, &end, buf, &base) == 4) {
- if (buf[2] == 'x' && (uintptr_t)addr >= start && (uintptr_t)addr < end) {
- found = true;
- break;
+ /* requested executable VMA only */
+ err = procmap_query(fileno(f), addr, PROCMAP_QUERY_VMA_EXECUTABLE, &start, &base, &flags);
+ if (err == -EOPNOTSUPP) {
+ bool found = false;
+
+ while (fscanf(f, "%zx-%zx %s %zx %*[^\n]\n", &start, &end, buf, &base) == 4) {
+ if (buf[2] == 'x' && (uintptr_t)addr >= start && (uintptr_t)addr < end) {
+ found = true;
+ break;
+ }
+ }
+ if (!found) {
+ fclose(f);
+ return -ESRCH;
}
+ } else if (err) {
+ fclose(f);
+ return err;
}
-
fclose(f);
- if (!found)
- return -ESRCH;
-
#if defined(__powerpc64__) && defined(_CALL_ELF) && _CALL_ELF == 2
#define OP_RT_RA_MASK 0xffff0000UL
@@ -307,15 +371,25 @@ ssize_t get_rel_offset(uintptr_t addr)
size_t start, end, offset;
char buf[256];
FILE *f;
+ int err, flags;
f = fopen("/proc/self/maps", "r");
if (!f)
return -errno;
- while (fscanf(f, "%zx-%zx %s %zx %*[^\n]\n", &start, &end, buf, &offset) == 4) {
- if (addr >= start && addr < end) {
- fclose(f);
- return (size_t)addr - start + offset;
+ err = procmap_query(fileno(f), (const void *)addr, 0, &start, &offset, &flags);
+ if (err == 0) {
+ fclose(f);
+ return (size_t)addr - start + offset;
+ } else if (err != -EOPNOTSUPP) {
+ fclose(f);
+ return err;
+ } else if (err) {
+ while (fscanf(f, "%zx-%zx %s %zx %*[^\n]\n", &start, &end, buf, &offset) == 4) {
+ if (addr >= start && addr < end) {
+ fclose(f);
+ return (size_t)addr - start + offset;
+ }
}
}
diff --git a/tools/testing/selftests/bpf/unpriv_helpers.c b/tools/testing/selftests/bpf/unpriv_helpers.c
index b6d016461fb0..220f6a963813 100644
--- a/tools/testing/selftests/bpf/unpriv_helpers.c
+++ b/tools/testing/selftests/bpf/unpriv_helpers.c
@@ -2,7 +2,6 @@
#include <stdbool.h>
#include <stdlib.h>
-#include <error.h>
#include <stdio.h>
#include <string.h>
#include <unistd.h>
diff --git a/tools/testing/selftests/bpf/uprobe_multi.c b/tools/testing/selftests/bpf/uprobe_multi.c
index 7ffa563ffeba..c7828b13e5ff 100644
--- a/tools/testing/selftests/bpf/uprobe_multi.c
+++ b/tools/testing/selftests/bpf/uprobe_multi.c
@@ -2,8 +2,21 @@
#include <stdio.h>
#include <string.h>
+#include <stdbool.h>
+#include <stdint.h>
+#include <sys/mman.h>
+#include <unistd.h>
#include <sdt.h>
+#ifndef MADV_POPULATE_READ
+#define MADV_POPULATE_READ 22
+#endif
+
+int __attribute__((weak)) uprobe(void)
+{
+ return 0;
+}
+
#define __PASTE(a, b) a##b
#define PASTE(a, b) __PASTE(a, b)
@@ -75,6 +88,30 @@ static int usdt(void)
return 0;
}
+extern char build_id_start[];
+extern char build_id_end[];
+
+int __attribute__((weak)) trigger_uprobe(bool build_id_resident)
+{
+ int page_sz = sysconf(_SC_PAGESIZE);
+ void *addr;
+
+ /* page-align build ID start */
+ addr = (void *)((uintptr_t)&build_id_start & ~(page_sz - 1));
+
+ /* to guarantee MADV_PAGEOUT work reliably, we need to ensure that
+ * memory range is mapped into current process, so we unconditionally
+ * do MADV_POPULATE_READ, and then MADV_PAGEOUT, if necessary
+ */
+ madvise(addr, page_sz, MADV_POPULATE_READ);
+ if (!build_id_resident)
+ madvise(addr, page_sz, MADV_PAGEOUT);
+
+ (void)uprobe();
+
+ return 0;
+}
+
int main(int argc, char **argv)
{
if (argc != 2)
@@ -84,6 +121,10 @@ int main(int argc, char **argv)
return bench();
if (!strcmp("usdt", argv[1]))
return usdt();
+ if (!strcmp("uprobe-paged-out", argv[1]))
+ return trigger_uprobe(false /* page-out build ID */);
+ if (!strcmp("uprobe-paged-in", argv[1]))
+ return trigger_uprobe(true /* page-in build ID */);
error:
fprintf(stderr, "usage: %s <bench|usdt>\n", argv[0]);
diff --git a/tools/testing/selftests/bpf/uprobe_multi.ld b/tools/testing/selftests/bpf/uprobe_multi.ld
new file mode 100644
index 000000000000..a2e94828bc8c
--- /dev/null
+++ b/tools/testing/selftests/bpf/uprobe_multi.ld
@@ -0,0 +1,11 @@
+SECTIONS
+{
+ . = ALIGN(4096);
+ .note.gnu.build-id : { *(.note.gnu.build-id) }
+ . = ALIGN(4096);
+}
+INSERT AFTER .text;
+
+build_id_start = ADDR(.note.gnu.build-id);
+build_id_end = ADDR(.note.gnu.build-id) + SIZEOF(.note.gnu.build-id);
+
diff --git a/tools/testing/selftests/bpf/verifier/calls.c b/tools/testing/selftests/bpf/verifier/calls.c
index d0cdd156cd55..7afc2619ab14 100644
--- a/tools/testing/selftests/bpf/verifier/calls.c
+++ b/tools/testing/selftests/bpf/verifier/calls.c
@@ -76,7 +76,7 @@
},
.prog_type = BPF_PROG_TYPE_SCHED_CLS,
.result = REJECT,
- .errstr = "arg#0 expected pointer to ctx, but got PTR",
+ .errstr = "arg#0 expected pointer to ctx, but got fp",
.fixup_kfunc_btf_id = {
{ "bpf_kfunc_call_test_pass_ctx", 2 },
},
diff --git a/tools/testing/selftests/bpf/verifier/map_kptr.c b/tools/testing/selftests/bpf/verifier/map_kptr.c
index d25c3e9605f1..f420c0312aa0 100644
--- a/tools/testing/selftests/bpf/verifier/map_kptr.c
+++ b/tools/testing/selftests/bpf/verifier/map_kptr.c
@@ -153,7 +153,7 @@
.result = REJECT,
.errstr = "variable untrusted_ptr_ access var_off=(0x0; 0x7) disallowed",
},
-/* Tests for unreferened PTR_TO_BTF_ID */
+/* Tests for unreferenced PTR_TO_BTF_ID */
{
"map_kptr: unref: reject btf_struct_ids_match == false",
.insns = {
diff --git a/tools/testing/selftests/bpf/verifier/precise.c b/tools/testing/selftests/bpf/verifier/precise.c
index 90643ccc221d..59a020c35647 100644
--- a/tools/testing/selftests/bpf/verifier/precise.c
+++ b/tools/testing/selftests/bpf/verifier/precise.c
@@ -39,11 +39,11 @@
.result = VERBOSE_ACCEPT,
.errstr =
"mark_precise: frame0: last_idx 26 first_idx 20\
- mark_precise: frame0: regs=r2,r9 stack= before 25\
- mark_precise: frame0: regs=r2,r9 stack= before 24\
- mark_precise: frame0: regs=r2,r9 stack= before 23\
- mark_precise: frame0: regs=r2,r9 stack= before 22\
- mark_precise: frame0: regs=r2,r9 stack= before 20\
+ mark_precise: frame0: regs=r2 stack= before 25\
+ mark_precise: frame0: regs=r2 stack= before 24\
+ mark_precise: frame0: regs=r2 stack= before 23\
+ mark_precise: frame0: regs=r2 stack= before 22\
+ mark_precise: frame0: regs=r2 stack= before 20\
mark_precise: frame0: parent state regs=r2,r9 stack=:\
mark_precise: frame0: last_idx 19 first_idx 10\
mark_precise: frame0: regs=r2,r9 stack= before 19\
@@ -100,13 +100,13 @@
.errstr =
"26: (85) call bpf_probe_read_kernel#113\
mark_precise: frame0: last_idx 26 first_idx 22\
- mark_precise: frame0: regs=r2,r9 stack= before 25\
- mark_precise: frame0: regs=r2,r9 stack= before 24\
- mark_precise: frame0: regs=r2,r9 stack= before 23\
- mark_precise: frame0: regs=r2,r9 stack= before 22\
- mark_precise: frame0: parent state regs=r2,r9 stack=:\
+ mark_precise: frame0: regs=r2 stack= before 25\
+ mark_precise: frame0: regs=r2 stack= before 24\
+ mark_precise: frame0: regs=r2 stack= before 23\
+ mark_precise: frame0: regs=r2 stack= before 22\
+ mark_precise: frame0: parent state regs=r2 stack=:\
mark_precise: frame0: last_idx 20 first_idx 20\
- mark_precise: frame0: regs=r2,r9 stack= before 20\
+ mark_precise: frame0: regs=r2 stack= before 20\
mark_precise: frame0: parent state regs=r2,r9 stack=:\
mark_precise: frame0: last_idx 19 first_idx 17\
mark_precise: frame0: regs=r2,r9 stack= before 19\
@@ -183,10 +183,10 @@
.prog_type = BPF_PROG_TYPE_XDP,
.flags = BPF_F_TEST_STATE_FREQ,
.errstr = "mark_precise: frame0: last_idx 7 first_idx 7\
- mark_precise: frame0: parent state regs=r4 stack=-8:\
+ mark_precise: frame0: parent state regs=r4 stack=:\
mark_precise: frame0: last_idx 6 first_idx 4\
- mark_precise: frame0: regs=r4 stack=-8 before 6: (b7) r0 = -1\
- mark_precise: frame0: regs=r4 stack=-8 before 5: (79) r4 = *(u64 *)(r10 -8)\
+ mark_precise: frame0: regs=r4 stack= before 6: (b7) r0 = -1\
+ mark_precise: frame0: regs=r4 stack= before 5: (79) r4 = *(u64 *)(r10 -8)\
mark_precise: frame0: regs= stack=-8 before 4: (7b) *(u64 *)(r3 -8) = r0\
mark_precise: frame0: parent state regs=r0 stack=:\
mark_precise: frame0: last_idx 3 first_idx 3\
diff --git a/tools/testing/selftests/bpf/veristat.c b/tools/testing/selftests/bpf/veristat.c
index b2854238d4a0..1ec5c4c47235 100644
--- a/tools/testing/selftests/bpf/veristat.c
+++ b/tools/testing/selftests/bpf/veristat.c
@@ -2,6 +2,7 @@
/* Copyright (c) 2022 Meta Platforms, Inc. and affiliates. */
#define _GNU_SOURCE
#include <argp.h>
+#include <libgen.h>
#include <string.h>
#include <stdlib.h>
#include <sched.h>
@@ -784,13 +785,13 @@ static int parse_stat(const char *stat_name, struct stat_specs *specs)
static int parse_stats(const char *stats_str, struct stat_specs *specs)
{
char *input, *state = NULL, *next;
- int err;
+ int err, cnt = 0;
input = strdup(stats_str);
if (!input)
return -ENOMEM;
- while ((next = strtok_r(state ? NULL : input, ",", &state))) {
+ while ((next = strtok_r(cnt++ ? NULL : input, ",", &state))) {
err = parse_stat(next, specs);
if (err) {
free(input);
@@ -988,8 +989,8 @@ skip_freplace_fixup:
static int process_prog(const char *filename, struct bpf_object *obj, struct bpf_program *prog)
{
+ const char *base_filename = basename(strdupa(filename));
const char *prog_name = bpf_program__name(prog);
- const char *base_filename = basename(filename);
char *buf;
int buf_sz, log_level;
struct verif_stats *stats;
@@ -1056,13 +1057,14 @@ static int process_prog(const char *filename, struct bpf_object *obj, struct bpf
static int process_obj(const char *filename)
{
+ const char *base_filename = basename(strdupa(filename));
struct bpf_object *obj = NULL, *tobj;
struct bpf_program *prog, *tprog, *lprog;
libbpf_print_fn_t old_libbpf_print_fn;
LIBBPF_OPTS(bpf_object_open_opts, opts);
int err = 0, prog_cnt = 0;
- if (!should_process_file_prog(basename(filename), NULL)) {
+ if (!should_process_file_prog(base_filename, NULL)) {
if (env.verbose)
printf("Skipping '%s' due to filters...\n", filename);
env.files_skipped++;
@@ -1076,7 +1078,7 @@ static int process_obj(const char *filename)
}
if (!env.quiet && env.out_fmt == RESFMT_TABLE)
- printf("Processing '%s'...\n", basename(filename));
+ printf("Processing '%s'...\n", base_filename);
old_libbpf_print_fn = libbpf_set_print(libbpf_print_fn);
obj = bpf_object__open_file(filename, &opts);
@@ -1493,7 +1495,7 @@ static int parse_stats_csv(const char *filename, struct stat_specs *specs,
while (fgets(line, sizeof(line), f)) {
char *input = line, *state = NULL, *next;
struct verif_stats *st = NULL;
- int col = 0;
+ int col = 0, cnt = 0;
if (!header) {
void *tmp;
@@ -1511,7 +1513,7 @@ static int parse_stats_csv(const char *filename, struct stat_specs *specs,
*stat_cntp += 1;
}
- while ((next = strtok_r(state ? NULL : input, ",\n", &state))) {
+ while ((next = strtok_r(cnt++ ? NULL : input, ",\n", &state))) {
if (header) {
/* for the first line, set up spec stats */
err = parse_stat(next, specs);
diff --git a/tools/testing/selftests/bpf/vmtest.sh b/tools/testing/selftests/bpf/vmtest.sh
index 65d14f3bbe30..79505d294c44 100755
--- a/tools/testing/selftests/bpf/vmtest.sh
+++ b/tools/testing/selftests/bpf/vmtest.sh
@@ -1,31 +1,47 @@
#!/bin/bash
# SPDX-License-Identifier: GPL-2.0
-set -u
set -e
-# This script currently only works for x86_64 and s390x, as
-# it is based on the VM image used by the BPF CI, which is
-# available only for these architectures.
-ARCH="$(uname -m)"
-case "${ARCH}" in
+# This script currently only works for the following platforms,
+# as it is based on the VM image used by the BPF CI, which is
+# available only for these architectures. We can also specify
+# the local rootfs image generated by the following script:
+# https://github.com/libbpf/ci/blob/main/rootfs/mkrootfs_debian.sh
+PLATFORM="${PLATFORM:-$(uname -m)}"
+case "${PLATFORM}" in
s390x)
QEMU_BINARY=qemu-system-s390x
QEMU_CONSOLE="ttyS1"
- QEMU_FLAGS=(-smp 2)
+ HOST_FLAGS=(-smp 2 -enable-kvm)
+ CROSS_FLAGS=(-smp 2)
BZIMAGE="arch/s390/boot/vmlinux"
+ ARCH="s390"
;;
x86_64)
QEMU_BINARY=qemu-system-x86_64
QEMU_CONSOLE="ttyS0,115200"
- QEMU_FLAGS=(-cpu host -smp 8)
+ HOST_FLAGS=(-cpu host -enable-kvm -smp 8)
+ CROSS_FLAGS=(-smp 8)
BZIMAGE="arch/x86/boot/bzImage"
+ ARCH="x86"
;;
aarch64)
QEMU_BINARY=qemu-system-aarch64
QEMU_CONSOLE="ttyAMA0,115200"
- QEMU_FLAGS=(-M virt,gic-version=3 -cpu host -smp 8)
+ HOST_FLAGS=(-M virt,gic-version=3 -cpu host -enable-kvm -smp 8)
+ CROSS_FLAGS=(-M virt,gic-version=3 -cpu cortex-a76 -smp 8)
BZIMAGE="arch/arm64/boot/Image"
+ ARCH="arm64"
+ ;;
+riscv64)
+ # required qemu version v7.2.0+
+ QEMU_BINARY=qemu-system-riscv64
+ QEMU_CONSOLE="ttyS0,115200"
+ HOST_FLAGS=(-M virt -cpu host -enable-kvm -smp 8)
+ CROSS_FLAGS=(-M virt -cpu rv64,sscofpmf=true -smp 8)
+ BZIMAGE="arch/riscv/boot/Image"
+ ARCH="riscv"
;;
*)
echo "Unsupported architecture"
@@ -34,11 +50,12 @@ aarch64)
esac
DEFAULT_COMMAND="./test_progs"
MOUNT_DIR="mnt"
+LOCAL_ROOTFS_IMAGE=""
ROOTFS_IMAGE="root.img"
OUTPUT_DIR="$HOME/.bpf_selftests"
KCONFIG_REL_PATHS=("tools/testing/selftests/bpf/config"
"tools/testing/selftests/bpf/config.vm"
- "tools/testing/selftests/bpf/config.${ARCH}")
+ "tools/testing/selftests/bpf/config.${PLATFORM}")
INDEX_URL="https://raw.githubusercontent.com/libbpf/ci/master/INDEX"
NUM_COMPILE_JOBS="$(nproc)"
LOG_FILE_BASE="$(date +"bpf_selftests.%Y-%m-%d_%H-%M-%S")"
@@ -58,6 +75,10 @@ tools/testing/selftests/bpf. e.g:
If no command is specified and a debug shell (-s) is not requested,
"${DEFAULT_COMMAND}" will be run by default.
+Using PLATFORM= and CROSS_COMPILE= options will enable cross platform testing:
+
+ PLATFORM=<platform> CROSS_COMPILE=<toolchain> $0 -- ./test_progs -t test_lsm
+
If you build your kernel using KBUILD_OUTPUT= or O= options, these
can be passed as environment variables to the script:
@@ -69,6 +90,7 @@ or
Options:
+ -l) Specify the path to the local rootfs image.
-i) Update the rootfs image with a newer version.
-d) Update the output directory (default: ${OUTPUT_DIR})
-j) Number of jobs for compilation, similar to -j in make
@@ -92,24 +114,11 @@ populate_url_map()
fi
}
-download()
-{
- local file="$1"
-
- if [[ ! -v URLS[$file] ]]; then
- echo "$file not found" >&2
- return 1
- fi
-
- echo "Downloading $file..." >&2
- curl -Lsf "${URLS[$file]}" "${@:2}"
-}
-
newest_rootfs_version()
{
{
for file in "${!URLS[@]}"; do
- if [[ $file =~ ^"${ARCH}"/libbpf-vmtest-rootfs-(.*)\.tar\.zst$ ]]; then
+ if [[ $file =~ ^"${PLATFORM}"/libbpf-vmtest-rootfs-(.*)\.tar\.zst$ ]]; then
echo "${BASH_REMATCH[1]}"
fi
done
@@ -118,16 +127,34 @@ newest_rootfs_version()
download_rootfs()
{
- local rootfsversion="$1"
- local dir="$2"
+ populate_url_map
+
+ local rootfsversion="$(newest_rootfs_version)"
+ local file="${PLATFORM}/libbpf-vmtest-rootfs-$rootfsversion.tar.zst"
+
+ if [[ ! -v URLS[$file] ]]; then
+ echo "$file not found" >&2
+ return 1
+ fi
+
+ echo "Downloading $file..." >&2
+ curl -Lsf "${URLS[$file]}" "${@:2}"
+}
+
+load_rootfs()
+{
+ local dir="$1"
if ! which zstd &> /dev/null; then
echo 'Could not find "zstd" on the system, please install zstd'
exit 1
fi
- download "${ARCH}/libbpf-vmtest-rootfs-$rootfsversion.tar.zst" |
- zstd -d | sudo tar -C "$dir" -x
+ if [[ -n "${LOCAL_ROOTFS_IMAGE}" ]]; then
+ cat "${LOCAL_ROOTFS_IMAGE}" | zstd -d | sudo tar -C "$dir" -x
+ else
+ download_rootfs | zstd -d | sudo tar -C "$dir" -x
+ fi
}
recompile_kernel()
@@ -227,7 +254,7 @@ create_vm_image()
mkfs.ext4 -q "${rootfs_img}"
mount_image
- download_rootfs "$(newest_rootfs_version)" "${mount_dir}"
+ load_rootfs "${mount_dir}"
unmount_image
}
@@ -244,12 +271,17 @@ EOF
exit 1
fi
+ if [[ "${PLATFORM}" != "$(uname -m)" ]]; then
+ QEMU_FLAGS=("${CROSS_FLAGS[@]}")
+ else
+ QEMU_FLAGS=("${HOST_FLAGS[@]}")
+ fi
+
${QEMU_BINARY} \
-nodefaults \
-display none \
-serial mon:stdio \
"${QEMU_FLAGS[@]}" \
- -enable-kvm \
-m 4G \
-drive file="${rootfs_img}",format=raw,index=1,media=disk,if=virtio,cache=none \
-kernel "${kernel_bzimage}" \
@@ -341,8 +373,11 @@ main()
local exit_command="poweroff -f"
local debug_shell="no"
- while getopts ':hskid:j:' opt; do
+ while getopts ':hskl:id:j:' opt; do
case ${opt} in
+ l)
+ LOCAL_ROOTFS_IMAGE="$OPTARG"
+ ;;
i)
update_image="yes"
;;
@@ -377,6 +412,11 @@ main()
trap 'catch "$?"' EXIT
+ if [[ "${PLATFORM}" != "$(uname -m)" ]] && [[ -z "${CROSS_COMPILE}" ]]; then
+ echo "Cross-platform testing needs to specify CROSS_COMPILE"
+ exit 1
+ fi
+
if [[ $# -eq 0 && "${debug_shell}" == "no" ]]; then
echo "No command specified, will run ${DEFAULT_COMMAND} in the vm"
else
@@ -384,7 +424,8 @@ main()
fi
local kconfig_file="${OUTPUT_DIR}/latest.config"
- local make_command="make -j ${NUM_COMPILE_JOBS} KCONFIG_CONFIG=${kconfig_file}"
+ local make_command="make ARCH=${ARCH} CROSS_COMPILE=${CROSS_COMPILE} \
+ -j ${NUM_COMPILE_JOBS} KCONFIG_CONFIG=${kconfig_file}"
# Figure out where the kernel is being built.
# O takes precedence over KBUILD_OUTPUT.
@@ -402,8 +443,6 @@ main()
make_command="${make_command} KBUILD_OUTPUT=${KBUILD_OUTPUT}"
fi
- populate_url_map
-
local rootfs_img="${OUTPUT_DIR}/${ROOTFS_IMAGE}"
local mount_dir="${OUTPUT_DIR}/${MOUNT_DIR}"
diff --git a/tools/testing/selftests/bpf/xskxceiver.c b/tools/testing/selftests/bpf/xskxceiver.c
index 1ee0ef114f9d..11f047b8af75 100644
--- a/tools/testing/selftests/bpf/xskxceiver.c
+++ b/tools/testing/selftests/bpf/xskxceiver.c
@@ -90,6 +90,7 @@
#include <signal.h>
#include <stdio.h>
#include <stdlib.h>
+#include <libgen.h>
#include <string.h>
#include <stddef.h>
#include <sys/mman.h>
diff --git a/tools/testing/selftests/cgroup/cgroup_util.c b/tools/testing/selftests/cgroup/cgroup_util.c
index 432db923bced..1e2d46636a0c 100644
--- a/tools/testing/selftests/cgroup/cgroup_util.c
+++ b/tools/testing/selftests/cgroup/cgroup_util.c
@@ -141,6 +141,16 @@ long cg_read_long(const char *cgroup, const char *control)
return atol(buf);
}
+long cg_read_long_fd(int fd)
+{
+ char buf[128];
+
+ if (pread(fd, buf, sizeof(buf), 0) <= 0)
+ return -1;
+
+ return atol(buf);
+}
+
long cg_read_key_long(const char *cgroup, const char *control, const char *key)
{
char buf[PAGE_SIZE];
@@ -183,6 +193,18 @@ int cg_write(const char *cgroup, const char *control, char *buf)
return ret == len ? 0 : ret;
}
+/*
+ * Returns fd on success, or -1 on failure.
+ * (fd should be closed with close() as usual)
+ */
+int cg_open(const char *cgroup, const char *control, int flags)
+{
+ char path[PATH_MAX];
+
+ snprintf(path, sizeof(path), "%s/%s", cgroup, control);
+ return open(path, flags);
+}
+
int cg_write_numeric(const char *cgroup, const char *control, long value)
{
char buf[64];
diff --git a/tools/testing/selftests/cgroup/cgroup_util.h b/tools/testing/selftests/cgroup/cgroup_util.h
index e8d04ac9e3d2..19b131ee7707 100644
--- a/tools/testing/selftests/cgroup/cgroup_util.h
+++ b/tools/testing/selftests/cgroup/cgroup_util.h
@@ -34,9 +34,11 @@ extern int cg_read_strcmp(const char *cgroup, const char *control,
extern int cg_read_strstr(const char *cgroup, const char *control,
const char *needle);
extern long cg_read_long(const char *cgroup, const char *control);
+extern long cg_read_long_fd(int fd);
long cg_read_key_long(const char *cgroup, const char *control, const char *key);
extern long cg_read_lc(const char *cgroup, const char *control);
extern int cg_write(const char *cgroup, const char *control, char *buf);
+extern int cg_open(const char *cgroup, const char *control, int flags);
int cg_write_numeric(const char *cgroup, const char *control, long value);
extern int cg_run(const char *cgroup,
int (*fn)(const char *cgroup, void *arg),
diff --git a/tools/testing/selftests/cgroup/test_memcontrol.c b/tools/testing/selftests/cgroup/test_memcontrol.c
index 41ae8047b889..16f5d74ae762 100644
--- a/tools/testing/selftests/cgroup/test_memcontrol.c
+++ b/tools/testing/selftests/cgroup/test_memcontrol.c
@@ -161,13 +161,16 @@ cleanup:
/*
* This test create a memory cgroup, allocates
* some anonymous memory and some pagecache
- * and check memory.current and some memory.stat values.
+ * and checks memory.current, memory.peak, and some memory.stat values.
*/
-static int test_memcg_current(const char *root)
+static int test_memcg_current_peak(const char *root)
{
int ret = KSFT_FAIL;
- long current;
+ long current, peak, peak_reset;
char *memcg;
+ bool fd2_closed = false, fd3_closed = false, fd4_closed = false;
+ int peak_fd = -1, peak_fd2 = -1, peak_fd3 = -1, peak_fd4 = -1;
+ struct stat ss;
memcg = cg_name(root, "memcg_test");
if (!memcg)
@@ -180,15 +183,124 @@ static int test_memcg_current(const char *root)
if (current != 0)
goto cleanup;
+ peak = cg_read_long(memcg, "memory.peak");
+ if (peak != 0)
+ goto cleanup;
+
if (cg_run(memcg, alloc_anon_50M_check, NULL))
goto cleanup;
+ peak = cg_read_long(memcg, "memory.peak");
+ if (peak < MB(50))
+ goto cleanup;
+
+ /*
+ * We'll open a few FDs for the same memory.peak file to exercise the free-path
+ * We need at least three to be closed in a different order than writes occurred to test
+ * the linked-list handling.
+ */
+ peak_fd = cg_open(memcg, "memory.peak", O_RDWR | O_APPEND | O_CLOEXEC);
+
+ if (peak_fd == -1) {
+ if (errno == ENOENT)
+ ret = KSFT_SKIP;
+ goto cleanup;
+ }
+
+ /*
+ * Before we try to use memory.peak's fd, try to figure out whether
+ * this kernel supports writing to that file in the first place. (by
+ * checking the writable bit on the file's st_mode)
+ */
+ if (fstat(peak_fd, &ss))
+ goto cleanup;
+
+ if ((ss.st_mode & S_IWUSR) == 0) {
+ ret = KSFT_SKIP;
+ goto cleanup;
+ }
+
+ peak_fd2 = cg_open(memcg, "memory.peak", O_RDWR | O_APPEND | O_CLOEXEC);
+
+ if (peak_fd2 == -1)
+ goto cleanup;
+
+ peak_fd3 = cg_open(memcg, "memory.peak", O_RDWR | O_APPEND | O_CLOEXEC);
+
+ if (peak_fd3 == -1)
+ goto cleanup;
+
+ /* any non-empty string resets, but make it clear */
+ static const char reset_string[] = "reset\n";
+
+ peak_reset = write(peak_fd, reset_string, sizeof(reset_string));
+ if (peak_reset != sizeof(reset_string))
+ goto cleanup;
+
+ peak_reset = write(peak_fd2, reset_string, sizeof(reset_string));
+ if (peak_reset != sizeof(reset_string))
+ goto cleanup;
+
+ peak_reset = write(peak_fd3, reset_string, sizeof(reset_string));
+ if (peak_reset != sizeof(reset_string))
+ goto cleanup;
+
+ /* Make sure a completely independent read isn't affected by our FD-local reset above*/
+ peak = cg_read_long(memcg, "memory.peak");
+ if (peak < MB(50))
+ goto cleanup;
+
+ fd2_closed = true;
+ if (close(peak_fd2))
+ goto cleanup;
+
+ peak_fd4 = cg_open(memcg, "memory.peak", O_RDWR | O_APPEND | O_CLOEXEC);
+
+ if (peak_fd4 == -1)
+ goto cleanup;
+
+ peak_reset = write(peak_fd4, reset_string, sizeof(reset_string));
+ if (peak_reset != sizeof(reset_string))
+ goto cleanup;
+
+ peak = cg_read_long_fd(peak_fd);
+ if (peak > MB(30) || peak < 0)
+ goto cleanup;
+
if (cg_run(memcg, alloc_pagecache_50M_check, NULL))
goto cleanup;
+ peak = cg_read_long(memcg, "memory.peak");
+ if (peak < MB(50))
+ goto cleanup;
+
+ /* Make sure everything is back to normal */
+ peak = cg_read_long_fd(peak_fd);
+ if (peak < MB(50))
+ goto cleanup;
+
+ peak = cg_read_long_fd(peak_fd4);
+ if (peak < MB(50))
+ goto cleanup;
+
+ fd3_closed = true;
+ if (close(peak_fd3))
+ goto cleanup;
+
+ fd4_closed = true;
+ if (close(peak_fd4))
+ goto cleanup;
+
ret = KSFT_PASS;
cleanup:
+ close(peak_fd);
+ if (!fd2_closed)
+ close(peak_fd2);
+ if (!fd3_closed)
+ close(peak_fd3);
+ if (!fd4_closed)
+ close(peak_fd4);
cg_destroy(memcg);
free(memcg);
@@ -817,13 +929,19 @@ cleanup:
/*
* This test checks that memory.swap.max limits the amount of
- * anonymous memory which can be swapped out.
+ * anonymous memory which can be swapped out. Additionally, it verifies that
+ * memory.swap.peak reflects the high watermark and can be reset.
*/
-static int test_memcg_swap_max(const char *root)
+static int test_memcg_swap_max_peak(const char *root)
{
int ret = KSFT_FAIL;
char *memcg;
- long max;
+ long max, peak;
+ struct stat ss;
+ int swap_peak_fd = -1, mem_peak_fd = -1;
+
+ /* any non-empty string resets */
+ static const char reset_string[] = "foobarbaz";
if (!is_swap_enabled())
return KSFT_SKIP;
@@ -840,6 +958,61 @@ static int test_memcg_swap_max(const char *root)
goto cleanup;
}
+ swap_peak_fd = cg_open(memcg, "memory.swap.peak",
+ O_RDWR | O_APPEND | O_CLOEXEC);
+
+ if (swap_peak_fd == -1) {
+ if (errno == ENOENT)
+ ret = KSFT_SKIP;
+ goto cleanup;
+ }
+
+ /*
+ * Before we try to use memory.swap.peak's fd, try to figure out
+ * whether this kernel supports writing to that file in the first
+ * place. (by checking the writable bit on the file's st_mode)
+ */
+ if (fstat(swap_peak_fd, &ss))
+ goto cleanup;
+
+ if ((ss.st_mode & S_IWUSR) == 0) {
+ ret = KSFT_SKIP;
+ goto cleanup;
+ }
+
+ mem_peak_fd = cg_open(memcg, "memory.peak", O_RDWR | O_APPEND | O_CLOEXEC);
+
+ if (mem_peak_fd == -1)
+ goto cleanup;
+
+ if (cg_read_long(memcg, "memory.swap.peak"))
+ goto cleanup;
+
+ if (cg_read_long_fd(swap_peak_fd))
+ goto cleanup;
+
+ /* switch the swap and mem fds into local-peak tracking mode*/
+ int peak_reset = write(swap_peak_fd, reset_string, sizeof(reset_string));
+
+ if (peak_reset != sizeof(reset_string))
+ goto cleanup;
+
+ if (cg_read_long_fd(swap_peak_fd))
+ goto cleanup;
+
+ if (cg_read_long(memcg, "memory.peak"))
+ goto cleanup;
+
+ if (cg_read_long_fd(mem_peak_fd))
+ goto cleanup;
+
+ peak_reset = write(mem_peak_fd, reset_string, sizeof(reset_string));
+ if (peak_reset != sizeof(reset_string))
+ goto cleanup;
+
+ if (cg_read_long_fd(mem_peak_fd))
+ goto cleanup;
+
if (cg_read_strcmp(memcg, "memory.max", "max\n"))
goto cleanup;
@@ -862,6 +1035,61 @@ static int test_memcg_swap_max(const char *root)
if (cg_read_key_long(memcg, "memory.events", "oom_kill ") != 1)
goto cleanup;
+ peak = cg_read_long(memcg, "memory.peak");
+ if (peak < MB(29))
+ goto cleanup;
+
+ peak = cg_read_long(memcg, "memory.swap.peak");
+ if (peak < MB(29))
+ goto cleanup;
+
+ peak = cg_read_long_fd(mem_peak_fd);
+ if (peak < MB(29))
+ goto cleanup;
+
+ peak = cg_read_long_fd(swap_peak_fd);
+ if (peak < MB(29))
+ goto cleanup;
+
+ /*
+ * open, reset and close the peak swap on another FD to make sure
+ * multiple extant fds don't corrupt the linked-list
+ */
+ peak_reset = cg_write(memcg, "memory.swap.peak", (char *)reset_string);
+ if (peak_reset)
+ goto cleanup;
+
+ peak_reset = cg_write(memcg, "memory.peak", (char *)reset_string);
+ if (peak_reset)
+ goto cleanup;
+
+ /* actually reset on the fds */
+ peak_reset = write(swap_peak_fd, reset_string, sizeof(reset_string));
+ if (peak_reset != sizeof(reset_string))
+ goto cleanup;
+
+ peak_reset = write(mem_peak_fd, reset_string, sizeof(reset_string));
+ if (peak_reset != sizeof(reset_string))
+ goto cleanup;
+
+ peak = cg_read_long_fd(swap_peak_fd);
+ if (peak > MB(10))
+ goto cleanup;
+
+ /*
+ * The cgroup is now empty, but there may be a page or two associated
+ * with the open FD accounted to it.
+ */
+ peak = cg_read_long_fd(mem_peak_fd);
+ if (peak > MB(1))
+ goto cleanup;
+
+ if (cg_read_long(memcg, "memory.peak") < MB(29))
+ goto cleanup;
+
+ if (cg_read_long(memcg, "memory.swap.peak") < MB(29))
+ goto cleanup;
+
if (cg_run(memcg, alloc_anon_50M_check_swap, (void *)MB(30)))
goto cleanup;
@@ -869,9 +1097,29 @@ static int test_memcg_swap_max(const char *root)
if (max <= 0)
goto cleanup;
+ peak = cg_read_long(memcg, "memory.peak");
+ if (peak < MB(29))
+ goto cleanup;
+
+ peak = cg_read_long(memcg, "memory.swap.peak");
+ if (peak < MB(29))
+ goto cleanup;
+
+ peak = cg_read_long_fd(mem_peak_fd);
+ if (peak < MB(29))
+ goto cleanup;
+
+ peak = cg_read_long_fd(swap_peak_fd);
+ if (peak < MB(19))
+ goto cleanup;
+
ret = KSFT_PASS;
cleanup:
+ if (mem_peak_fd != -1 && close(mem_peak_fd))
+ ret = KSFT_FAIL;
+ if (swap_peak_fd != -1 && close(swap_peak_fd))
+ ret = KSFT_FAIL;
cg_destroy(memcg);
free(memcg);
@@ -1295,7 +1543,7 @@ struct memcg_test {
const char *name;
} tests[] = {
T(test_memcg_subtree_control),
- T(test_memcg_current),
+ T(test_memcg_current_peak),
T(test_memcg_min),
T(test_memcg_low),
T(test_memcg_high),
@@ -1303,7 +1551,7 @@ struct memcg_test {
T(test_memcg_max),
T(test_memcg_reclaim),
T(test_memcg_oom_events),
- T(test_memcg_swap_max),
+ T(test_memcg_swap_max_peak),
T(test_memcg_sock),
T(test_memcg_oom_group_leaf_events),
T(test_memcg_oom_group_parent_events),
diff --git a/tools/testing/selftests/cgroup/test_zswap.c b/tools/testing/selftests/cgroup/test_zswap.c
index 190096017f80..40de679248b8 100644
--- a/tools/testing/selftests/cgroup/test_zswap.c
+++ b/tools/testing/selftests/cgroup/test_zswap.c
@@ -263,15 +263,13 @@ out:
static int attempt_writeback(const char *cgroup, void *arg)
{
long pagesize = sysconf(_SC_PAGESIZE);
- char *test_group = arg;
size_t memsize = MB(4);
char buf[pagesize];
long zswap_usage;
- bool wb_enabled;
+ bool wb_enabled = *(bool *) arg;
int ret = -1;
char *mem;
- wb_enabled = cg_read_long(test_group, "memory.zswap.writeback");
mem = (char *)malloc(memsize);
if (!mem)
return ret;
@@ -288,12 +286,12 @@ static int attempt_writeback(const char *cgroup, void *arg)
memcpy(&mem[i], buf, pagesize);
/* Try and reclaim allocated memory */
- if (cg_write_numeric(test_group, "memory.reclaim", memsize)) {
+ if (cg_write_numeric(cgroup, "memory.reclaim", memsize)) {
ksft_print_msg("Failed to reclaim all of the requested memory\n");
goto out;
}
- zswap_usage = cg_read_long(test_group, "memory.zswap.current");
+ zswap_usage = cg_read_long(cgroup, "memory.zswap.current");
/* zswpin */
for (int i = 0; i < memsize; i += pagesize) {
@@ -303,7 +301,7 @@ static int attempt_writeback(const char *cgroup, void *arg)
}
}
- if (cg_write_numeric(test_group, "memory.zswap.max", zswap_usage/2))
+ if (cg_write_numeric(cgroup, "memory.zswap.max", zswap_usage/2))
goto out;
/*
@@ -312,7 +310,7 @@ static int attempt_writeback(const char *cgroup, void *arg)
* If writeback is disabled, memory reclaim will fail as zswap is limited and
* it can't writeback to swap.
*/
- ret = cg_write_numeric(test_group, "memory.reclaim", memsize);
+ ret = cg_write_numeric(cgroup, "memory.reclaim", memsize);
if (!wb_enabled)
ret = (ret == -EAGAIN) ? 0 : -1;
@@ -321,12 +319,41 @@ out:
return ret;
}
+static int test_zswap_writeback_one(const char *cgroup, bool wb)
+{
+ long zswpwb_before, zswpwb_after;
+
+ zswpwb_before = get_cg_wb_count(cgroup);
+ if (zswpwb_before != 0) {
+ ksft_print_msg("zswpwb_before = %ld instead of 0\n", zswpwb_before);
+ return -1;
+ }
+
+ if (cg_run(cgroup, attempt_writeback, (void *) &wb))
+ return -1;
+
+ /* Verify that zswap writeback occurred only if writeback was enabled */
+ zswpwb_after = get_cg_wb_count(cgroup);
+ if (zswpwb_after < 0)
+ return -1;
+
+ if (wb != !!zswpwb_after) {
+ ksft_print_msg("zswpwb_after is %ld while wb is %s",
+ zswpwb_after, wb ? "enabled" : "disabled");
+ return -1;
+ }
+
+ return 0;
+}
+
/* Test to verify the zswap writeback path */
static int test_zswap_writeback(const char *root, bool wb)
{
- long zswpwb_before, zswpwb_after;
int ret = KSFT_FAIL;
- char *test_group;
+ char *test_group, *test_group_child = NULL;
+
+ if (cg_read_strcmp(root, "memory.zswap.writeback", "1"))
+ return KSFT_SKIP;
test_group = cg_name(root, "zswap_writeback_test");
if (!test_group)
@@ -336,29 +363,35 @@ static int test_zswap_writeback(const char *root, bool wb)
if (cg_write(test_group, "memory.zswap.writeback", wb ? "1" : "0"))
goto out;
- zswpwb_before = get_cg_wb_count(test_group);
- if (zswpwb_before != 0) {
- ksft_print_msg("zswpwb_before = %ld instead of 0\n", zswpwb_before);
+ if (test_zswap_writeback_one(test_group, wb))
goto out;
- }
- if (cg_run(test_group, attempt_writeback, (void *) test_group))
+ /* Reset memory.zswap.max to max (modified by attempt_writeback), and
+ * set up child cgroup, whose memory.zswap.writeback is hardcoded to 1.
+ * Thus, the parent's setting shall be what's in effect. */
+ if (cg_write(test_group, "memory.zswap.max", "max"))
+ goto out;
+ if (cg_write(test_group, "cgroup.subtree_control", "+memory"))
goto out;
- /* Verify that zswap writeback occurred only if writeback was enabled */
- zswpwb_after = get_cg_wb_count(test_group);
- if (zswpwb_after < 0)
+ test_group_child = cg_name(test_group, "zswap_writeback_test_child");
+ if (!test_group_child)
+ goto out;
+ if (cg_create(test_group_child))
+ goto out;
+ if (cg_write(test_group_child, "memory.zswap.writeback", "1"))
goto out;
- if (wb != !!zswpwb_after) {
- ksft_print_msg("zswpwb_after is %ld while wb is %s",
- zswpwb_after, wb ? "enabled" : "disabled");
+ if (test_zswap_writeback_one(test_group_child, wb))
goto out;
- }
ret = KSFT_PASS;
out:
+ if (test_group_child) {
+ cg_destroy(test_group_child);
+ free(test_group_child);
+ }
cg_destroy(test_group);
free(test_group);
return ret;
diff --git a/tools/testing/selftests/damon/.gitignore b/tools/testing/selftests/damon/.gitignore
index e65ef9d9cedc..2ab675fecb6b 100644
--- a/tools/testing/selftests/damon/.gitignore
+++ b/tools/testing/selftests/damon/.gitignore
@@ -3,3 +3,4 @@ huge_count_read_write
debugfs_target_ids_read_before_terminate_race
debugfs_target_ids_pid_leak
access_memory
+access_memory_even
diff --git a/tools/testing/selftests/damon/Makefile b/tools/testing/selftests/damon/Makefile
index 1e2e98cc809d..5b2a6a5dd1af 100644
--- a/tools/testing/selftests/damon/Makefile
+++ b/tools/testing/selftests/damon/Makefile
@@ -25,4 +25,6 @@ TEST_PROGS += debugfs_target_ids_pid_leak.sh
TEST_PROGS += sysfs_update_removed_scheme_dir.sh
TEST_PROGS += sysfs_update_schemes_tried_regions_hang.py
+EXTRA_CLEAN = __pycache__
+
include ../lib.mk
diff --git a/tools/testing/selftests/damon/damon_nr_regions.py b/tools/testing/selftests/damon/damon_nr_regions.py
index 2e8a74aff543..2e8a74aff543 100644..100755
--- a/tools/testing/selftests/damon/damon_nr_regions.py
+++ b/tools/testing/selftests/damon/damon_nr_regions.py
diff --git a/tools/testing/selftests/damon/damos_apply_interval.py b/tools/testing/selftests/damon/damos_apply_interval.py
index f04d43702481..f04d43702481 100644..100755
--- a/tools/testing/selftests/damon/damos_apply_interval.py
+++ b/tools/testing/selftests/damon/damos_apply_interval.py
diff --git a/tools/testing/selftests/damon/damos_quota.py b/tools/testing/selftests/damon/damos_quota.py
index 7d4c6bb2e3cd..7d4c6bb2e3cd 100644..100755
--- a/tools/testing/selftests/damon/damos_quota.py
+++ b/tools/testing/selftests/damon/damos_quota.py
diff --git a/tools/testing/selftests/damon/damos_quota_goal.py b/tools/testing/selftests/damon/damos_quota_goal.py
index 18246f3b62f7..18246f3b62f7 100644..100755
--- a/tools/testing/selftests/damon/damos_quota_goal.py
+++ b/tools/testing/selftests/damon/damos_quota_goal.py
diff --git a/tools/testing/selftests/damon/damos_tried_regions.py b/tools/testing/selftests/damon/damos_tried_regions.py
index 3b347eb28bd2..3b347eb28bd2 100644..100755
--- a/tools/testing/selftests/damon/damos_tried_regions.py
+++ b/tools/testing/selftests/damon/damos_tried_regions.py
diff --git a/tools/testing/selftests/damon/debugfs_target_ids_pid_leak.sh b/tools/testing/selftests/damon/debugfs_target_ids_pid_leak.sh
index 31fe33c2b032..31fe33c2b032 100644..100755
--- a/tools/testing/selftests/damon/debugfs_target_ids_pid_leak.sh
+++ b/tools/testing/selftests/damon/debugfs_target_ids_pid_leak.sh
diff --git a/tools/testing/selftests/damon/debugfs_target_ids_read_before_terminate_race.sh b/tools/testing/selftests/damon/debugfs_target_ids_read_before_terminate_race.sh
index fc793c4c9aea..fc793c4c9aea 100644..100755
--- a/tools/testing/selftests/damon/debugfs_target_ids_read_before_terminate_race.sh
+++ b/tools/testing/selftests/damon/debugfs_target_ids_read_before_terminate_race.sh
diff --git a/tools/testing/selftests/damon/sysfs_update_schemes_tried_regions_hang.py b/tools/testing/selftests/damon/sysfs_update_schemes_tried_regions_hang.py
index 28c887a0108f..28c887a0108f 100644..100755
--- a/tools/testing/selftests/damon/sysfs_update_schemes_tried_regions_hang.py
+++ b/tools/testing/selftests/damon/sysfs_update_schemes_tried_regions_hang.py
diff --git a/tools/testing/selftests/damon/sysfs_update_schemes_tried_regions_wss_estimation.py b/tools/testing/selftests/damon/sysfs_update_schemes_tried_regions_wss_estimation.py
index 90ad7409a7a6..90ad7409a7a6 100644..100755
--- a/tools/testing/selftests/damon/sysfs_update_schemes_tried_regions_wss_estimation.py
+++ b/tools/testing/selftests/damon/sysfs_update_schemes_tried_regions_wss_estimation.py
diff --git a/tools/testing/selftests/filesystems/binderfs/binderfs_test.c b/tools/testing/selftests/filesystems/binderfs/binderfs_test.c
index 5f362c0fd890..319567f0fae1 100644
--- a/tools/testing/selftests/filesystems/binderfs/binderfs_test.c
+++ b/tools/testing/selftests/filesystems/binderfs/binderfs_test.c
@@ -65,6 +65,7 @@ static int __do_binderfs_test(struct __test_metadata *_metadata)
static const char * const binder_features[] = {
"oneway_spam_detection",
"extended_error",
+ "freeze_notification",
};
change_mountns(_metadata);
diff --git a/tools/testing/selftests/ftrace/config b/tools/testing/selftests/ftrace/config
index 048a312abf40..544de0db5f58 100644
--- a/tools/testing/selftests/ftrace/config
+++ b/tools/testing/selftests/ftrace/config
@@ -20,6 +20,7 @@ CONFIG_PREEMPT_TRACER=y
CONFIG_PROBE_EVENTS_BTF_ARGS=y
CONFIG_SAMPLES=y
CONFIG_SAMPLE_FTRACE_DIRECT=m
+CONFIG_SAMPLE_TRACE_EVENTS=m
CONFIG_SAMPLE_TRACE_PRINTK=m
CONFIG_SCHED_TRACER=y
CONFIG_STACK_TRACER=y
diff --git a/tools/testing/selftests/ftrace/test.d/dynevent/add_remove_tprobe_module.tc b/tools/testing/selftests/ftrace/test.d/dynevent/add_remove_tprobe_module.tc
new file mode 100644
index 000000000000..d319d5ed4226
--- /dev/null
+++ b/tools/testing/selftests/ftrace/test.d/dynevent/add_remove_tprobe_module.tc
@@ -0,0 +1,61 @@
+#!/bin/sh
+# SPDX-License-Identifier: GPL-2.0
+# description: Generic dynamic event - add/remove tracepoint probe events on module
+# requires: dynamic_events "t[:[<group>/][<event>]] <tracepoint> [<args>]":README
+
+rmmod trace-events-sample ||:
+if ! modprobe trace-events-sample ; then
+ echo "No trace-events sample module - please make CONFIG_SAMPLE_TRACE_EVENTS=m"
+ exit_unresolved;
+fi
+trap "rmmod trace-events-sample" EXIT
+
+echo 0 > events/enable
+echo > dynamic_events
+
+TRACEPOINT1=foo_bar
+TRACEPOINT2=foo_bar_with_cond
+
+echo "t:myevent1 $TRACEPOINT1" >> dynamic_events
+echo "t:myevent2 $TRACEPOINT2" >> dynamic_events
+
+grep -q myevent1 dynamic_events
+grep -q myevent2 dynamic_events
+test -d events/tracepoints/myevent1
+test -d events/tracepoints/myevent2
+
+echo "-:myevent2" >> dynamic_events
+
+grep -q myevent1 dynamic_events
+! grep -q myevent2 dynamic_events
+
+echo > dynamic_events
+
+clear_trace
+
+:;: "Try to put a probe on a tracepoint in non-loaded module" ;:
+rmmod trace-events-sample
+
+echo "t:myevent1 $TRACEPOINT1" >> dynamic_events
+echo "t:myevent2 $TRACEPOINT2" >> dynamic_events
+
+grep -q myevent1 dynamic_events
+grep -q myevent2 dynamic_events
+test -d events/tracepoints/myevent1
+test -d events/tracepoints/myevent2
+
+echo 1 > events/tracepoints/enable
+
+modprobe trace-events-sample
+
+sleep 2
+
+grep -q "myevent1" trace
+grep -q "myevent2" trace
+
+rmmod trace-events-sample
+trap "" EXIT
+
+echo 0 > events/tracepoints/enable
+echo > dynamic_events
+clear_trace
diff --git a/tools/testing/selftests/ftrace/test.d/dynevent/tprobe_syntax_errors.tc b/tools/testing/selftests/ftrace/test.d/dynevent/tprobe_syntax_errors.tc
index da117b8f1d12..ffe8ffef4027 100644
--- a/tools/testing/selftests/ftrace/test.d/dynevent/tprobe_syntax_errors.tc
+++ b/tools/testing/selftests/ftrace/test.d/dynevent/tprobe_syntax_errors.tc
@@ -9,7 +9,6 @@ check_error() { # command-with-error-pos-by-^
check_error 't^100 kfree' # BAD_MAXACT_TYPE
-check_error 't ^non_exist_tracepoint' # NO_TRACEPOINT
check_error 't:^/bar kfree' # NO_GROUP_NAME
check_error 't:^12345678901234567890123456789012345678901234567890123456789012345/bar kfree' # GROUP_TOO_LONG
diff --git a/tools/testing/selftests/kvm/.gitignore b/tools/testing/selftests/kvm/.gitignore
index 6d9381d60172..7f57abf936e7 100644
--- a/tools/testing/selftests/kvm/.gitignore
+++ b/tools/testing/selftests/kvm/.gitignore
@@ -5,3 +5,7 @@
!*.h
!*.S
!*.sh
+!.gitignore
+!config
+!settings
+!Makefile
diff --git a/tools/testing/selftests/kvm/Makefile b/tools/testing/selftests/kvm/Makefile
index 0c4b254ab56b..960cf6a77198 100644
--- a/tools/testing/selftests/kvm/Makefile
+++ b/tools/testing/selftests/kvm/Makefile
@@ -130,6 +130,7 @@ TEST_GEN_PROGS_x86_64 += x86_64/max_vcpuid_cap_test
TEST_GEN_PROGS_x86_64 += x86_64/triple_fault_event_test
TEST_GEN_PROGS_x86_64 += x86_64/recalc_apic_map_test
TEST_GEN_PROGS_x86_64 += access_tracking_perf_test
+TEST_GEN_PROGS_x86_64 += coalesced_io_test
TEST_GEN_PROGS_x86_64 += demand_paging_test
TEST_GEN_PROGS_x86_64 += dirty_log_test
TEST_GEN_PROGS_x86_64 += dirty_log_perf_test
@@ -167,6 +168,7 @@ TEST_GEN_PROGS_aarch64 += aarch64/vpmu_counter_access
TEST_GEN_PROGS_aarch64 += aarch64/no-vgic-v3
TEST_GEN_PROGS_aarch64 += access_tracking_perf_test
TEST_GEN_PROGS_aarch64 += arch_timer
+TEST_GEN_PROGS_aarch64 += coalesced_io_test
TEST_GEN_PROGS_aarch64 += demand_paging_test
TEST_GEN_PROGS_aarch64 += dirty_log_test
TEST_GEN_PROGS_aarch64 += dirty_log_perf_test
@@ -188,6 +190,7 @@ TEST_GEN_PROGS_s390x += s390x/tprot
TEST_GEN_PROGS_s390x += s390x/cmma_test
TEST_GEN_PROGS_s390x += s390x/debug_test
TEST_GEN_PROGS_s390x += s390x/shared_zeropage_test
+TEST_GEN_PROGS_s390x += s390x/ucontrol_test
TEST_GEN_PROGS_s390x += demand_paging_test
TEST_GEN_PROGS_s390x += dirty_log_test
TEST_GEN_PROGS_s390x += guest_print_test
@@ -200,6 +203,7 @@ TEST_GEN_PROGS_s390x += kvm_binary_stats_test
TEST_GEN_PROGS_riscv += riscv/sbi_pmu_test
TEST_GEN_PROGS_riscv += riscv/ebreak_test
TEST_GEN_PROGS_riscv += arch_timer
+TEST_GEN_PROGS_riscv += coalesced_io_test
TEST_GEN_PROGS_riscv += demand_paging_test
TEST_GEN_PROGS_riscv += dirty_log_test
TEST_GEN_PROGS_riscv += get-reg-list
diff --git a/tools/testing/selftests/kvm/coalesced_io_test.c b/tools/testing/selftests/kvm/coalesced_io_test.c
new file mode 100644
index 000000000000..60cb25454899
--- /dev/null
+++ b/tools/testing/selftests/kvm/coalesced_io_test.c
@@ -0,0 +1,236 @@
+// SPDX-License-Identifier: GPL-2.0
+#include <signal.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+#include <sys/ioctl.h>
+
+#include <linux/sizes.h>
+
+#include <kvm_util.h>
+#include <processor.h>
+
+#include "ucall_common.h"
+
+struct kvm_coalesced_io {
+ struct kvm_coalesced_mmio_ring *ring;
+ uint32_t ring_size;
+ uint64_t mmio_gpa;
+ uint64_t *mmio;
+
+ /*
+ * x86-only, but define pio_port for all architectures to minimize the
+ * amount of #ifdeffery and complexity, without having to sacrifice
+ * verbose error messages.
+ */
+ uint8_t pio_port;
+};
+
+static struct kvm_coalesced_io kvm_builtin_io_ring;
+
+#ifdef __x86_64__
+static const int has_pio = 1;
+#else
+static const int has_pio = 0;
+#endif
+
+static void guest_code(struct kvm_coalesced_io *io)
+{
+ int i, j;
+
+ for (;;) {
+ for (j = 0; j < 1 + has_pio; j++) {
+ /*
+ * KVM always leaves one free entry, i.e. exits to
+ * userspace before the last entry is filled.
+ */
+ for (i = 0; i < io->ring_size - 1; i++) {
+#ifdef __x86_64__
+ if (i & 1)
+ outl(io->pio_port, io->pio_port + i);
+ else
+#endif
+ WRITE_ONCE(*io->mmio, io->mmio_gpa + i);
+ }
+#ifdef __x86_64__
+ if (j & 1)
+ outl(io->pio_port, io->pio_port + i);
+ else
+#endif
+ WRITE_ONCE(*io->mmio, io->mmio_gpa + i);
+ }
+ GUEST_SYNC(0);
+
+ WRITE_ONCE(*io->mmio, io->mmio_gpa + i);
+#ifdef __x86_64__
+ outl(io->pio_port, io->pio_port + i);
+#endif
+ }
+}
+
+static void vcpu_run_and_verify_io_exit(struct kvm_vcpu *vcpu,
+ struct kvm_coalesced_io *io,
+ uint32_t ring_start,
+ uint32_t expected_exit)
+{
+ const bool want_pio = expected_exit == KVM_EXIT_IO;
+ struct kvm_coalesced_mmio_ring *ring = io->ring;
+ struct kvm_run *run = vcpu->run;
+ uint32_t pio_value;
+
+ WRITE_ONCE(ring->first, ring_start);
+ WRITE_ONCE(ring->last, ring_start);
+
+ vcpu_run(vcpu);
+
+ /*
+ * Annoyingly, reading PIO data is safe only for PIO exits, otherwise
+ * data_offset is garbage, e.g. an MMIO gpa.
+ */
+ if (run->exit_reason == KVM_EXIT_IO)
+ pio_value = *(uint32_t *)((void *)run + run->io.data_offset);
+ else
+ pio_value = 0;
+
+ TEST_ASSERT((!want_pio && (run->exit_reason == KVM_EXIT_MMIO && run->mmio.is_write &&
+ run->mmio.phys_addr == io->mmio_gpa && run->mmio.len == 8 &&
+ *(uint64_t *)run->mmio.data == io->mmio_gpa + io->ring_size - 1)) ||
+ (want_pio && (run->exit_reason == KVM_EXIT_IO && run->io.port == io->pio_port &&
+ run->io.direction == KVM_EXIT_IO_OUT && run->io.count == 1 &&
+ pio_value == io->pio_port + io->ring_size - 1)),
+ "For start = %u, expected exit on %u-byte %s write 0x%llx = %lx, got exit_reason = %u (%s)\n "
+ "(MMIO addr = 0x%llx, write = %u, len = %u, data = %lx)\n "
+ "(PIO port = 0x%x, write = %u, len = %u, count = %u, data = %x",
+ ring_start, want_pio ? 4 : 8, want_pio ? "PIO" : "MMIO",
+ want_pio ? (unsigned long long)io->pio_port : io->mmio_gpa,
+ (want_pio ? io->pio_port : io->mmio_gpa) + io->ring_size - 1, run->exit_reason,
+ run->exit_reason == KVM_EXIT_MMIO ? "MMIO" : run->exit_reason == KVM_EXIT_IO ? "PIO" : "other",
+ run->mmio.phys_addr, run->mmio.is_write, run->mmio.len, *(uint64_t *)run->mmio.data,
+ run->io.port, run->io.direction, run->io.size, run->io.count, pio_value);
+}
+
+static void vcpu_run_and_verify_coalesced_io(struct kvm_vcpu *vcpu,
+ struct kvm_coalesced_io *io,
+ uint32_t ring_start,
+ uint32_t expected_exit)
+{
+ struct kvm_coalesced_mmio_ring *ring = io->ring;
+ int i;
+
+ vcpu_run_and_verify_io_exit(vcpu, io, ring_start, expected_exit);
+
+ TEST_ASSERT((ring->last + 1) % io->ring_size == ring->first,
+ "Expected ring to be full (minus 1), first = %u, last = %u, max = %u, start = %u",
+ ring->first, ring->last, io->ring_size, ring_start);
+
+ for (i = 0; i < io->ring_size - 1; i++) {
+ uint32_t idx = (ring->first + i) % io->ring_size;
+ struct kvm_coalesced_mmio *entry = &ring->coalesced_mmio[idx];
+
+#ifdef __x86_64__
+ if (i & 1)
+ TEST_ASSERT(entry->phys_addr == io->pio_port &&
+ entry->len == 4 && entry->pio &&
+ *(uint32_t *)entry->data == io->pio_port + i,
+ "Wanted 4-byte port I/O 0x%x = 0x%x in entry %u, got %u-byte %s 0x%llx = 0x%x",
+ io->pio_port, io->pio_port + i, i,
+ entry->len, entry->pio ? "PIO" : "MMIO",
+ entry->phys_addr, *(uint32_t *)entry->data);
+ else
+#endif
+ TEST_ASSERT(entry->phys_addr == io->mmio_gpa &&
+ entry->len == 8 && !entry->pio,
+ "Wanted 8-byte MMIO to 0x%lx = %lx in entry %u, got %u-byte %s 0x%llx = 0x%lx",
+ io->mmio_gpa, io->mmio_gpa + i, i,
+ entry->len, entry->pio ? "PIO" : "MMIO",
+ entry->phys_addr, *(uint64_t *)entry->data);
+ }
+}
+
+static void test_coalesced_io(struct kvm_vcpu *vcpu,
+ struct kvm_coalesced_io *io, uint32_t ring_start)
+{
+ struct kvm_coalesced_mmio_ring *ring = io->ring;
+
+ kvm_vm_register_coalesced_io(vcpu->vm, io->mmio_gpa, 8, false /* pio */);
+#ifdef __x86_64__
+ kvm_vm_register_coalesced_io(vcpu->vm, io->pio_port, 8, true /* pio */);
+#endif
+
+ vcpu_run_and_verify_coalesced_io(vcpu, io, ring_start, KVM_EXIT_MMIO);
+#ifdef __x86_64__
+ vcpu_run_and_verify_coalesced_io(vcpu, io, ring_start, KVM_EXIT_IO);
+#endif
+
+ /*
+ * Verify ucall, which may use non-coalesced MMIO or PIO, generates an
+ * immediate exit.
+ */
+ WRITE_ONCE(ring->first, ring_start);
+ WRITE_ONCE(ring->last, ring_start);
+ vcpu_run(vcpu);
+ TEST_ASSERT_EQ(get_ucall(vcpu, NULL), UCALL_SYNC);
+ TEST_ASSERT_EQ(ring->first, ring_start);
+ TEST_ASSERT_EQ(ring->last, ring_start);
+
+ /* Verify that non-coalesced MMIO/PIO generates an exit to userspace. */
+ kvm_vm_unregister_coalesced_io(vcpu->vm, io->mmio_gpa, 8, false /* pio */);
+ vcpu_run_and_verify_io_exit(vcpu, io, ring_start, KVM_EXIT_MMIO);
+
+#ifdef __x86_64__
+ kvm_vm_unregister_coalesced_io(vcpu->vm, io->pio_port, 8, true /* pio */);
+ vcpu_run_and_verify_io_exit(vcpu, io, ring_start, KVM_EXIT_IO);
+#endif
+}
+
+int main(int argc, char *argv[])
+{
+ struct kvm_vcpu *vcpu;
+ struct kvm_vm *vm;
+ int i;
+
+ TEST_REQUIRE(kvm_has_cap(KVM_CAP_COALESCED_MMIO));
+
+#ifdef __x86_64__
+ TEST_REQUIRE(kvm_has_cap(KVM_CAP_COALESCED_PIO));
+#endif
+
+ vm = vm_create_with_one_vcpu(&vcpu, guest_code);
+
+ kvm_builtin_io_ring = (struct kvm_coalesced_io) {
+ /*
+ * The I/O ring is a kernel-allocated page whose address is
+ * relative to each vCPU's run page, with the page offset
+ * provided by KVM in the return of KVM_CAP_COALESCED_MMIO.
+ */
+ .ring = (void *)vcpu->run +
+ (kvm_check_cap(KVM_CAP_COALESCED_MMIO) * getpagesize()),
+
+ /*
+ * The size of the I/O ring is fixed, but KVM defines the sized
+ * based on the kernel's PAGE_SIZE. Thus, userspace must query
+ * the host's page size at runtime to compute the ring size.
+ */
+ .ring_size = (getpagesize() - sizeof(struct kvm_coalesced_mmio_ring)) /
+ sizeof(struct kvm_coalesced_mmio),
+
+ /*
+ * Arbitrary address+port (MMIO mustn't overlap memslots), with
+ * the MMIO GPA identity mapped in the guest.
+ */
+ .mmio_gpa = 4ull * SZ_1G,
+ .mmio = (uint64_t *)(4ull * SZ_1G),
+ .pio_port = 0x80,
+ };
+
+ virt_map(vm, (uint64_t)kvm_builtin_io_ring.mmio, kvm_builtin_io_ring.mmio_gpa, 1);
+
+ sync_global_to_guest(vm, kvm_builtin_io_ring);
+ vcpu_args_set(vcpu, 1, &kvm_builtin_io_ring);
+
+ for (i = 0; i < kvm_builtin_io_ring.ring_size; i++)
+ test_coalesced_io(vcpu, &kvm_builtin_io_ring, i);
+
+ kvm_vm_free(vm);
+ return 0;
+}
diff --git a/tools/testing/selftests/kvm/guest_print_test.c b/tools/testing/selftests/kvm/guest_print_test.c
index 8092c2d0f5d6..bcf582852db9 100644
--- a/tools/testing/selftests/kvm/guest_print_test.c
+++ b/tools/testing/selftests/kvm/guest_print_test.c
@@ -107,6 +107,21 @@ static void ucall_abort(const char *assert_msg, const char *expected_assert_msg)
expected_assert_msg, &assert_msg[offset]);
}
+/*
+ * Open code vcpu_run(), sans the UCALL_ABORT handling, so that intentional
+ * guest asserts guest can be verified instead of being reported as failures.
+ */
+static void do_vcpu_run(struct kvm_vcpu *vcpu)
+{
+ int r;
+
+ do {
+ r = __vcpu_run(vcpu);
+ } while (r == -1 && errno == EINTR);
+
+ TEST_ASSERT(!r, KVM_IOCTL_ERROR(KVM_RUN, r));
+}
+
static void run_test(struct kvm_vcpu *vcpu, const char *expected_printf,
const char *expected_assert)
{
@@ -114,7 +129,7 @@ static void run_test(struct kvm_vcpu *vcpu, const char *expected_printf,
struct ucall uc;
while (1) {
- vcpu_run(vcpu);
+ do_vcpu_run(vcpu);
TEST_ASSERT(run->exit_reason == UCALL_EXIT_REASON,
"Unexpected exit reason: %u (%s),",
@@ -159,7 +174,7 @@ static void test_limits(void)
vm = vm_create_with_one_vcpu(&vcpu, guest_code_limits);
run = vcpu->run;
- vcpu_run(vcpu);
+ do_vcpu_run(vcpu);
TEST_ASSERT(run->exit_reason == UCALL_EXIT_REASON,
"Unexpected exit reason: %u (%s),",
diff --git a/tools/testing/selftests/kvm/include/kvm_util.h b/tools/testing/selftests/kvm/include/kvm_util.h
index 63c2aaae51f3..bc7c242480d6 100644
--- a/tools/testing/selftests/kvm/include/kvm_util.h
+++ b/tools/testing/selftests/kvm/include/kvm_util.h
@@ -428,8 +428,6 @@ const char *vm_guest_mode_string(uint32_t i);
void kvm_vm_free(struct kvm_vm *vmp);
void kvm_vm_restart(struct kvm_vm *vmp);
void kvm_vm_release(struct kvm_vm *vmp);
-int kvm_memcmp_hva_gva(void *hva, struct kvm_vm *vm, const vm_vaddr_t gva,
- size_t len);
void kvm_vm_elf_load(struct kvm_vm *vm, const char *filename);
int kvm_memfd_alloc(size_t size, bool hugepages);
@@ -460,6 +458,32 @@ static inline uint32_t kvm_vm_reset_dirty_ring(struct kvm_vm *vm)
return __vm_ioctl(vm, KVM_RESET_DIRTY_RINGS, NULL);
}
+static inline void kvm_vm_register_coalesced_io(struct kvm_vm *vm,
+ uint64_t address,
+ uint64_t size, bool pio)
+{
+ struct kvm_coalesced_mmio_zone zone = {
+ .addr = address,
+ .size = size,
+ .pio = pio,
+ };
+
+ vm_ioctl(vm, KVM_REGISTER_COALESCED_MMIO, &zone);
+}
+
+static inline void kvm_vm_unregister_coalesced_io(struct kvm_vm *vm,
+ uint64_t address,
+ uint64_t size, bool pio)
+{
+ struct kvm_coalesced_mmio_zone zone = {
+ .addr = address,
+ .size = size,
+ .pio = pio,
+ };
+
+ vm_ioctl(vm, KVM_UNREGISTER_COALESCED_MMIO, &zone);
+}
+
static inline int vm_get_stats_fd(struct kvm_vm *vm)
{
int fd = __vm_ioctl(vm, KVM_GET_STATS_FD, NULL);
diff --git a/tools/testing/selftests/kvm/include/s390x/debug_print.h b/tools/testing/selftests/kvm/include/s390x/debug_print.h
new file mode 100644
index 000000000000..1bf275631cc6
--- /dev/null
+++ b/tools/testing/selftests/kvm/include/s390x/debug_print.h
@@ -0,0 +1,69 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Definition for kernel virtual machines on s390x
+ *
+ * Copyright IBM Corp. 2024
+ *
+ * Authors:
+ * Christoph Schlameuss <schlameuss@linux.ibm.com>
+ */
+
+#ifndef SELFTEST_KVM_DEBUG_PRINT_H
+#define SELFTEST_KVM_DEBUG_PRINT_H
+
+#include "asm/ptrace.h"
+#include "kvm_util.h"
+#include "sie.h"
+
+static inline void print_hex_bytes(const char *name, u64 addr, size_t len)
+{
+ u64 pos;
+
+ pr_debug("%s (%p)\n", name, (void *)addr);
+ pr_debug(" 0/0x00---------|");
+ if (len > 8)
+ pr_debug(" 8/0x08---------|");
+ if (len > 16)
+ pr_debug(" 16/0x10--------|");
+ if (len > 24)
+ pr_debug(" 24/0x18--------|");
+ for (pos = 0; pos < len; pos += 8) {
+ if ((pos % 32) == 0)
+ pr_debug("\n %3lu 0x%.3lx ", pos, pos);
+ pr_debug(" %16lx", *((u64 *)(addr + pos)));
+ }
+ pr_debug("\n");
+}
+
+static inline void print_hex(const char *name, u64 addr)
+{
+ print_hex_bytes(name, addr, 512);
+}
+
+static inline void print_psw(struct kvm_run *run, struct kvm_s390_sie_block *sie_block)
+{
+ pr_debug("flags:0x%x psw:0x%.16llx:0x%.16llx exit:%u %s\n",
+ run->flags,
+ run->psw_mask, run->psw_addr,
+ run->exit_reason, exit_reason_str(run->exit_reason));
+ pr_debug("sie_block psw:0x%.16llx:0x%.16llx\n",
+ sie_block->psw_mask, sie_block->psw_addr);
+}
+
+static inline void print_run(struct kvm_run *run, struct kvm_s390_sie_block *sie_block)
+{
+ print_hex_bytes("run", (u64)run, 0x150);
+ print_hex("sie_block", (u64)sie_block);
+ print_psw(run, sie_block);
+}
+
+static inline void print_regs(struct kvm_run *run)
+{
+ struct kvm_sync_regs *sync_regs = &run->s.regs;
+
+ print_hex_bytes("GPRS", (u64)sync_regs->gprs, 8 * NUM_GPRS);
+ print_hex_bytes("ACRS", (u64)sync_regs->acrs, 4 * NUM_ACRS);
+ print_hex_bytes("CRS", (u64)sync_regs->crs, 8 * NUM_CRS);
+}
+
+#endif /* SELFTEST_KVM_DEBUG_PRINT_H */
diff --git a/tools/testing/selftests/kvm/include/s390x/processor.h b/tools/testing/selftests/kvm/include/s390x/processor.h
index 255c9b990f4c..481bd2fd6a32 100644
--- a/tools/testing/selftests/kvm/include/s390x/processor.h
+++ b/tools/testing/selftests/kvm/include/s390x/processor.h
@@ -21,6 +21,11 @@
#define PAGE_PROTECT 0x200 /* HW read-only bit */
#define PAGE_NOEXEC 0x100 /* HW no-execute bit */
+/* Page size definitions */
+#define PAGE_SHIFT 12
+#define PAGE_SIZE BIT_ULL(PAGE_SHIFT)
+#define PAGE_MASK (~(PAGE_SIZE - 1))
+
/* Is there a portable way to do this? */
static inline void cpu_relax(void)
{
diff --git a/tools/testing/selftests/kvm/include/s390x/sie.h b/tools/testing/selftests/kvm/include/s390x/sie.h
new file mode 100644
index 000000000000..160acd4a1db9
--- /dev/null
+++ b/tools/testing/selftests/kvm/include/s390x/sie.h
@@ -0,0 +1,240 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Definition for kernel virtual machines on s390.
+ *
+ * Adapted copy of struct definition kvm_s390_sie_block from
+ * arch/s390/include/asm/kvm_host.h for use in userspace selftest programs.
+ *
+ * Copyright IBM Corp. 2008, 2024
+ *
+ * Authors:
+ * Christoph Schlameuss <schlameuss@linux.ibm.com>
+ * Carsten Otte <cotte@de.ibm.com>
+ */
+
+#ifndef SELFTEST_KVM_SIE_H
+#define SELFTEST_KVM_SIE_H
+
+#include <linux/types.h>
+
+struct kvm_s390_sie_block {
+#define CPUSTAT_STOPPED 0x80000000
+#define CPUSTAT_WAIT 0x10000000
+#define CPUSTAT_ECALL_PEND 0x08000000
+#define CPUSTAT_STOP_INT 0x04000000
+#define CPUSTAT_IO_INT 0x02000000
+#define CPUSTAT_EXT_INT 0x01000000
+#define CPUSTAT_RUNNING 0x00800000
+#define CPUSTAT_RETAINED 0x00400000
+#define CPUSTAT_TIMING_SUB 0x00020000
+#define CPUSTAT_SIE_SUB 0x00010000
+#define CPUSTAT_RRF 0x00008000
+#define CPUSTAT_SLSV 0x00004000
+#define CPUSTAT_SLSR 0x00002000
+#define CPUSTAT_ZARCH 0x00000800
+#define CPUSTAT_MCDS 0x00000100
+#define CPUSTAT_KSS 0x00000200
+#define CPUSTAT_SM 0x00000080
+#define CPUSTAT_IBS 0x00000040
+#define CPUSTAT_GED2 0x00000010
+#define CPUSTAT_G 0x00000008
+#define CPUSTAT_GED 0x00000004
+#define CPUSTAT_J 0x00000002
+#define CPUSTAT_P 0x00000001
+ __u32 cpuflags; /* 0x0000 */
+ __u32: 1; /* 0x0004 */
+ __u32 prefix : 18;
+ __u32: 1;
+ __u32 ibc : 12;
+ __u8 reserved08[4]; /* 0x0008 */
+#define PROG_IN_SIE BIT(0)
+ __u32 prog0c; /* 0x000c */
+ union {
+ __u8 reserved10[16]; /* 0x0010 */
+ struct {
+ __u64 pv_handle_cpu;
+ __u64 pv_handle_config;
+ };
+ };
+#define PROG_BLOCK_SIE BIT(0)
+#define PROG_REQUEST BIT(1)
+ __u32 prog20; /* 0x0020 */
+ __u8 reserved24[4]; /* 0x0024 */
+ __u64 cputm; /* 0x0028 */
+ __u64 ckc; /* 0x0030 */
+ __u64 epoch; /* 0x0038 */
+ __u32 svcc; /* 0x0040 */
+#define LCTL_CR0 0x8000
+#define LCTL_CR6 0x0200
+#define LCTL_CR9 0x0040
+#define LCTL_CR10 0x0020
+#define LCTL_CR11 0x0010
+#define LCTL_CR14 0x0002
+ __u16 lctl; /* 0x0044 */
+ __s16 icpua; /* 0x0046 */
+#define ICTL_OPEREXC 0x80000000
+#define ICTL_PINT 0x20000000
+#define ICTL_LPSW 0x00400000
+#define ICTL_STCTL 0x00040000
+#define ICTL_ISKE 0x00004000
+#define ICTL_SSKE 0x00002000
+#define ICTL_RRBE 0x00001000
+#define ICTL_TPROT 0x00000200
+ __u32 ictl; /* 0x0048 */
+#define ECA_CEI 0x80000000
+#define ECA_IB 0x40000000
+#define ECA_SIGPI 0x10000000
+#define ECA_MVPGI 0x01000000
+#define ECA_AIV 0x00200000
+#define ECA_VX 0x00020000
+#define ECA_PROTEXCI 0x00002000
+#define ECA_APIE 0x00000008
+#define ECA_SII 0x00000001
+ __u32 eca; /* 0x004c */
+#define ICPT_INST 0x04
+#define ICPT_PROGI 0x08
+#define ICPT_INSTPROGI 0x0C
+#define ICPT_EXTREQ 0x10
+#define ICPT_EXTINT 0x14
+#define ICPT_IOREQ 0x18
+#define ICPT_WAIT 0x1c
+#define ICPT_VALIDITY 0x20
+#define ICPT_STOP 0x28
+#define ICPT_OPEREXC 0x2C
+#define ICPT_PARTEXEC 0x38
+#define ICPT_IOINST 0x40
+#define ICPT_KSS 0x5c
+#define ICPT_MCHKREQ 0x60
+#define ICPT_INT_ENABLE 0x64
+#define ICPT_PV_INSTR 0x68
+#define ICPT_PV_NOTIFY 0x6c
+#define ICPT_PV_PREF 0x70
+ __u8 icptcode; /* 0x0050 */
+ __u8 icptstatus; /* 0x0051 */
+ __u16 ihcpu; /* 0x0052 */
+ __u8 reserved54; /* 0x0054 */
+#define IICTL_CODE_NONE 0x00
+#define IICTL_CODE_MCHK 0x01
+#define IICTL_CODE_EXT 0x02
+#define IICTL_CODE_IO 0x03
+#define IICTL_CODE_RESTART 0x04
+#define IICTL_CODE_SPECIFICATION 0x10
+#define IICTL_CODE_OPERAND 0x11
+ __u8 iictl; /* 0x0055 */
+ __u16 ipa; /* 0x0056 */
+ __u32 ipb; /* 0x0058 */
+ __u32 scaoh; /* 0x005c */
+#define FPF_BPBC 0x20
+ __u8 fpf; /* 0x0060 */
+#define ECB_GS 0x40
+#define ECB_TE 0x10
+#define ECB_SPECI 0x08
+#define ECB_SRSI 0x04
+#define ECB_HOSTPROTINT 0x02
+#define ECB_PTF 0x01
+ __u8 ecb; /* 0x0061 */
+#define ECB2_CMMA 0x80
+#define ECB2_IEP 0x20
+#define ECB2_PFMFI 0x08
+#define ECB2_ESCA 0x04
+#define ECB2_ZPCI_LSI 0x02
+ __u8 ecb2; /* 0x0062 */
+#define ECB3_AISI 0x20
+#define ECB3_AISII 0x10
+#define ECB3_DEA 0x08
+#define ECB3_AES 0x04
+#define ECB3_RI 0x01
+ __u8 ecb3; /* 0x0063 */
+#define ESCA_SCAOL_MASK ~0x3fU
+ __u32 scaol; /* 0x0064 */
+ __u8 sdf; /* 0x0068 */
+ __u8 epdx; /* 0x0069 */
+ __u8 cpnc; /* 0x006a */
+ __u8 reserved6b; /* 0x006b */
+ __u32 todpr; /* 0x006c */
+#define GISA_FORMAT1 0x00000001
+ __u32 gd; /* 0x0070 */
+ __u8 reserved74[12]; /* 0x0074 */
+ __u64 mso; /* 0x0080 */
+ __u64 msl; /* 0x0088 */
+ __u64 psw_mask; /* 0x0090 */
+ __u64 psw_addr; /* 0x0098 */
+ __u64 gg14; /* 0x00a0 */
+ __u64 gg15; /* 0x00a8 */
+ __u8 reservedb0[8]; /* 0x00b0 */
+#define HPID_KVM 0x4
+#define HPID_VSIE 0x5
+ __u8 hpid; /* 0x00b8 */
+ __u8 reservedb9[7]; /* 0x00b9 */
+ union {
+ struct {
+ __u32 eiparams; /* 0x00c0 */
+ __u16 extcpuaddr; /* 0x00c4 */
+ __u16 eic; /* 0x00c6 */
+ };
+ __u64 mcic; /* 0x00c0 */
+ } __packed;
+ __u32 reservedc8; /* 0x00c8 */
+ union {
+ struct {
+ __u16 pgmilc; /* 0x00cc */
+ __u16 iprcc; /* 0x00ce */
+ };
+ __u32 edc; /* 0x00cc */
+ } __packed;
+ union {
+ struct {
+ __u32 dxc; /* 0x00d0 */
+ __u16 mcn; /* 0x00d4 */
+ __u8 perc; /* 0x00d6 */
+ __u8 peratmid; /* 0x00d7 */
+ };
+ __u64 faddr; /* 0x00d0 */
+ } __packed;
+ __u64 peraddr; /* 0x00d8 */
+ __u8 eai; /* 0x00e0 */
+ __u8 peraid; /* 0x00e1 */
+ __u8 oai; /* 0x00e2 */
+ __u8 armid; /* 0x00e3 */
+ __u8 reservede4[4]; /* 0x00e4 */
+ union {
+ __u64 tecmc; /* 0x00e8 */
+ struct {
+ __u16 subchannel_id; /* 0x00e8 */
+ __u16 subchannel_nr; /* 0x00ea */
+ __u32 io_int_parm; /* 0x00ec */
+ __u32 io_int_word; /* 0x00f0 */
+ };
+ } __packed;
+ __u8 reservedf4[8]; /* 0x00f4 */
+#define CRYCB_FORMAT_MASK 0x00000003
+#define CRYCB_FORMAT0 0x00000000
+#define CRYCB_FORMAT1 0x00000001
+#define CRYCB_FORMAT2 0x00000003
+ __u32 crycbd; /* 0x00fc */
+ __u64 gcr[16]; /* 0x0100 */
+ union {
+ __u64 gbea; /* 0x0180 */
+ __u64 sidad;
+ };
+ __u8 reserved188[8]; /* 0x0188 */
+ __u64 sdnxo; /* 0x0190 */
+ __u8 reserved198[8]; /* 0x0198 */
+ __u32 fac; /* 0x01a0 */
+ __u8 reserved1a4[20]; /* 0x01a4 */
+ __u64 cbrlo; /* 0x01b8 */
+ __u8 reserved1c0[8]; /* 0x01c0 */
+#define ECD_HOSTREGMGMT 0x20000000
+#define ECD_MEF 0x08000000
+#define ECD_ETOKENF 0x02000000
+#define ECD_ECC 0x00200000
+ __u32 ecd; /* 0x01c8 */
+ __u8 reserved1cc[18]; /* 0x01cc */
+ __u64 pp; /* 0x01de */
+ __u8 reserved1e6[2]; /* 0x01e6 */
+ __u64 itdba; /* 0x01e8 */
+ __u64 riccbd; /* 0x01f0 */
+ __u64 gvrd; /* 0x01f8 */
+} __packed __aligned(512);
+
+#endif /* SELFTEST_KVM_SIE_H */
diff --git a/tools/testing/selftests/kvm/include/x86_64/apic.h b/tools/testing/selftests/kvm/include/x86_64/apic.h
index 0f268b55fa06..51990094effd 100644
--- a/tools/testing/selftests/kvm/include/x86_64/apic.h
+++ b/tools/testing/selftests/kvm/include/x86_64/apic.h
@@ -11,6 +11,7 @@
#include <stdint.h>
#include "processor.h"
+#include "ucall_common.h"
#define APIC_DEFAULT_GPA 0xfee00000ULL
@@ -93,9 +94,27 @@ static inline uint64_t x2apic_read_reg(unsigned int reg)
return rdmsr(APIC_BASE_MSR + (reg >> 4));
}
+static inline uint8_t x2apic_write_reg_safe(unsigned int reg, uint64_t value)
+{
+ return wrmsr_safe(APIC_BASE_MSR + (reg >> 4), value);
+}
+
static inline void x2apic_write_reg(unsigned int reg, uint64_t value)
{
- wrmsr(APIC_BASE_MSR + (reg >> 4), value);
+ uint8_t fault = x2apic_write_reg_safe(reg, value);
+
+ __GUEST_ASSERT(!fault, "Unexpected fault 0x%x on WRMSR(%x) = %lx\n",
+ fault, APIC_BASE_MSR + (reg >> 4), value);
}
+static inline void x2apic_write_reg_fault(unsigned int reg, uint64_t value)
+{
+ uint8_t fault = x2apic_write_reg_safe(reg, value);
+
+ __GUEST_ASSERT(fault == GP_VECTOR,
+ "Wanted #GP on WRMSR(%x) = %lx, got 0x%x\n",
+ APIC_BASE_MSR + (reg >> 4), value, fault);
+}
+
+
#endif /* SELFTEST_KVM_APIC_H */
diff --git a/tools/testing/selftests/kvm/include/x86_64/hyperv.h b/tools/testing/selftests/kvm/include/x86_64/hyperv.h
index fa65b908b13e..6849e2552f1b 100644
--- a/tools/testing/selftests/kvm/include/x86_64/hyperv.h
+++ b/tools/testing/selftests/kvm/include/x86_64/hyperv.h
@@ -186,6 +186,18 @@
#define HV_X64_ENLIGHTENED_VMCS_RECOMMENDED \
KVM_X86_CPU_FEATURE(HYPERV_CPUID_ENLIGHTMENT_INFO, 0, EAX, 14)
+/* HYPERV_CPUID_NESTED_FEATURES.EAX */
+#define HV_X64_NESTED_DIRECT_FLUSH \
+ KVM_X86_CPU_FEATURE(HYPERV_CPUID_NESTED_FEATURES, 0, EAX, 17)
+#define HV_X64_NESTED_GUEST_MAPPING_FLUSH \
+ KVM_X86_CPU_FEATURE(HYPERV_CPUID_NESTED_FEATURES, 0, EAX, 18)
+#define HV_X64_NESTED_MSR_BITMAP \
+ KVM_X86_CPU_FEATURE(HYPERV_CPUID_NESTED_FEATURES, 0, EAX, 19)
+
+/* HYPERV_CPUID_NESTED_FEATURES.EBX */
+#define HV_X64_NESTED_EVMCS1_PERF_GLOBAL_CTRL \
+ KVM_X86_CPU_FEATURE(HYPERV_CPUID_NESTED_FEATURES, 0, EBX, 0)
+
/* HYPERV_CPUID_SYNDBG_PLATFORM_CAPABILITIES.EAX */
#define HV_X64_SYNDBG_CAP_ALLOW_KERNEL_DEBUGGING \
KVM_X86_CPU_FEATURE(HYPERV_CPUID_SYNDBG_PLATFORM_CAPABILITIES, 0, EAX, 1)
@@ -343,4 +355,10 @@ struct hyperv_test_pages *vcpu_alloc_hyperv_test_pages(struct kvm_vm *vm,
/* HV_X64_MSR_TSC_INVARIANT_CONTROL bits */
#define HV_INVARIANT_TSC_EXPOSED BIT_ULL(0)
+const struct kvm_cpuid2 *kvm_get_supported_hv_cpuid(void);
+const struct kvm_cpuid2 *vcpu_get_supported_hv_cpuid(struct kvm_vcpu *vcpu);
+void vcpu_set_hv_cpuid(struct kvm_vcpu *vcpu);
+
+bool kvm_hv_cpu_has(struct kvm_x86_cpu_feature feature);
+
#endif /* !SELFTEST_KVM_HYPERV_H */
diff --git a/tools/testing/selftests/kvm/include/x86_64/processor.h b/tools/testing/selftests/kvm/include/x86_64/processor.h
index a0c1440017bb..e247f99e0473 100644
--- a/tools/testing/selftests/kvm/include/x86_64/processor.h
+++ b/tools/testing/selftests/kvm/include/x86_64/processor.h
@@ -25,6 +25,10 @@ extern bool host_cpu_is_intel;
extern bool host_cpu_is_amd;
extern uint64_t guest_tsc_khz;
+#ifndef MAX_NR_CPUID_ENTRIES
+#define MAX_NR_CPUID_ENTRIES 100
+#endif
+
/* Forced emulation prefix, used to invoke the emulator unconditionally. */
#define KVM_FEP "ud2; .byte 'k', 'v', 'm';"
@@ -908,8 +912,6 @@ static inline void vcpu_xcrs_set(struct kvm_vcpu *vcpu, struct kvm_xcrs *xcrs)
const struct kvm_cpuid_entry2 *get_cpuid_entry(const struct kvm_cpuid2 *cpuid,
uint32_t function, uint32_t index);
const struct kvm_cpuid2 *kvm_get_supported_cpuid(void);
-const struct kvm_cpuid2 *kvm_get_supported_hv_cpuid(void);
-const struct kvm_cpuid2 *vcpu_get_supported_hv_cpuid(struct kvm_vcpu *vcpu);
static inline uint32_t kvm_cpu_fms(void)
{
@@ -1009,7 +1011,6 @@ static inline struct kvm_cpuid2 *allocate_kvm_cpuid2(int nr_entries)
}
void vcpu_init_cpuid(struct kvm_vcpu *vcpu, const struct kvm_cpuid2 *cpuid);
-void vcpu_set_hv_cpuid(struct kvm_vcpu *vcpu);
static inline struct kvm_cpuid_entry2 *__vcpu_get_cpuid_entry(struct kvm_vcpu *vcpu,
uint32_t function,
diff --git a/tools/testing/selftests/kvm/lib/kvm_util.c b/tools/testing/selftests/kvm/lib/kvm_util.c
index 56b170b725b3..a2b7df5f1d39 100644
--- a/tools/testing/selftests/kvm/lib/kvm_util.c
+++ b/tools/testing/selftests/kvm/lib/kvm_util.c
@@ -712,16 +712,13 @@ void kvm_vm_release(struct kvm_vm *vmp)
}
static void __vm_mem_region_delete(struct kvm_vm *vm,
- struct userspace_mem_region *region,
- bool unlink)
+ struct userspace_mem_region *region)
{
int ret;
- if (unlink) {
- rb_erase(&region->gpa_node, &vm->regions.gpa_tree);
- rb_erase(&region->hva_node, &vm->regions.hva_tree);
- hash_del(&region->slot_node);
- }
+ rb_erase(&region->gpa_node, &vm->regions.gpa_tree);
+ rb_erase(&region->hva_node, &vm->regions.hva_tree);
+ hash_del(&region->slot_node);
region->region.memory_size = 0;
vm_ioctl(vm, KVM_SET_USER_MEMORY_REGION2, &region->region);
@@ -762,7 +759,7 @@ void kvm_vm_free(struct kvm_vm *vmp)
/* Free userspace_mem_regions. */
hash_for_each_safe(vmp->regions.slot_hash, ctr, node, region, slot_node)
- __vm_mem_region_delete(vmp, region, false);
+ __vm_mem_region_delete(vmp, region);
/* Free sparsebit arrays. */
sparsebit_free(&vmp->vpages_valid);
@@ -794,76 +791,6 @@ int kvm_memfd_alloc(size_t size, bool hugepages)
return fd;
}
-/*
- * Memory Compare, host virtual to guest virtual
- *
- * Input Args:
- * hva - Starting host virtual address
- * vm - Virtual Machine
- * gva - Starting guest virtual address
- * len - number of bytes to compare
- *
- * Output Args: None
- *
- * Input/Output Args: None
- *
- * Return:
- * Returns 0 if the bytes starting at hva for a length of len
- * are equal the guest virtual bytes starting at gva. Returns
- * a value < 0, if bytes at hva are less than those at gva.
- * Otherwise a value > 0 is returned.
- *
- * Compares the bytes starting at the host virtual address hva, for
- * a length of len, to the guest bytes starting at the guest virtual
- * address given by gva.
- */
-int kvm_memcmp_hva_gva(void *hva, struct kvm_vm *vm, vm_vaddr_t gva, size_t len)
-{
- size_t amt;
-
- /*
- * Compare a batch of bytes until either a match is found
- * or all the bytes have been compared.
- */
- for (uintptr_t offset = 0; offset < len; offset += amt) {
- uintptr_t ptr1 = (uintptr_t)hva + offset;
-
- /*
- * Determine host address for guest virtual address
- * at offset.
- */
- uintptr_t ptr2 = (uintptr_t)addr_gva2hva(vm, gva + offset);
-
- /*
- * Determine amount to compare on this pass.
- * Don't allow the comparsion to cross a page boundary.
- */
- amt = len - offset;
- if ((ptr1 >> vm->page_shift) != ((ptr1 + amt) >> vm->page_shift))
- amt = vm->page_size - (ptr1 % vm->page_size);
- if ((ptr2 >> vm->page_shift) != ((ptr2 + amt) >> vm->page_shift))
- amt = vm->page_size - (ptr2 % vm->page_size);
-
- assert((ptr1 >> vm->page_shift) == ((ptr1 + amt - 1) >> vm->page_shift));
- assert((ptr2 >> vm->page_shift) == ((ptr2 + amt - 1) >> vm->page_shift));
-
- /*
- * Perform the comparison. If there is a difference
- * return that result to the caller, otherwise need
- * to continue on looking for a mismatch.
- */
- int ret = memcmp((void *)ptr1, (void *)ptr2, amt);
- if (ret != 0)
- return ret;
- }
-
- /*
- * No mismatch found. Let the caller know the two memory
- * areas are equal.
- */
- return 0;
-}
-
static void vm_userspace_mem_region_gpa_insert(struct rb_root *gpa_tree,
struct userspace_mem_region *region)
{
@@ -1270,7 +1197,7 @@ void vm_mem_region_move(struct kvm_vm *vm, uint32_t slot, uint64_t new_gpa)
*/
void vm_mem_region_delete(struct kvm_vm *vm, uint32_t slot)
{
- __vm_mem_region_delete(vm, memslot2region(vm, slot), true);
+ __vm_mem_region_delete(vm, memslot2region(vm, slot));
}
void vm_guest_mem_fallocate(struct kvm_vm *vm, uint64_t base, uint64_t size,
diff --git a/tools/testing/selftests/kvm/lib/s390x/processor.c b/tools/testing/selftests/kvm/lib/s390x/processor.c
index 4ad4492eea1d..20cfe970e3e3 100644
--- a/tools/testing/selftests/kvm/lib/s390x/processor.c
+++ b/tools/testing/selftests/kvm/lib/s390x/processor.c
@@ -14,7 +14,7 @@ void virt_arch_pgd_alloc(struct kvm_vm *vm)
{
vm_paddr_t paddr;
- TEST_ASSERT(vm->page_size == 4096, "Unsupported page size: 0x%x",
+ TEST_ASSERT(vm->page_size == PAGE_SIZE, "Unsupported page size: 0x%x",
vm->page_size);
if (vm->pgd_created)
@@ -79,7 +79,7 @@ void virt_arch_pg_map(struct kvm_vm *vm, uint64_t gva, uint64_t gpa)
}
/* Fill in page table entry */
- idx = (gva >> 12) & 0x0ffu; /* page index */
+ idx = (gva >> PAGE_SHIFT) & 0x0ffu; /* page index */
if (!(entry[idx] & PAGE_INVALID))
fprintf(stderr,
"WARNING: PTE for gpa=0x%"PRIx64" already set!\n", gpa);
@@ -91,7 +91,7 @@ vm_paddr_t addr_arch_gva2gpa(struct kvm_vm *vm, vm_vaddr_t gva)
int ri, idx;
uint64_t *entry;
- TEST_ASSERT(vm->page_size == 4096, "Unsupported page size: 0x%x",
+ TEST_ASSERT(vm->page_size == PAGE_SIZE, "Unsupported page size: 0x%x",
vm->page_size);
entry = addr_gpa2hva(vm, vm->pgd);
@@ -103,7 +103,7 @@ vm_paddr_t addr_arch_gva2gpa(struct kvm_vm *vm, vm_vaddr_t gva)
entry = addr_gpa2hva(vm, entry[idx] & REGION_ENTRY_ORIGIN);
}
- idx = (gva >> 12) & 0x0ffu; /* page index */
+ idx = (gva >> PAGE_SHIFT) & 0x0ffu; /* page index */
TEST_ASSERT(!(entry[idx] & PAGE_INVALID),
"No page mapping for vm virtual address 0x%lx", gva);
@@ -168,7 +168,7 @@ struct kvm_vcpu *vm_arch_vcpu_add(struct kvm_vm *vm, uint32_t vcpu_id)
struct kvm_sregs sregs;
struct kvm_vcpu *vcpu;
- TEST_ASSERT(vm->page_size == 4096, "Unsupported page size: 0x%x",
+ TEST_ASSERT(vm->page_size == PAGE_SIZE, "Unsupported page size: 0x%x",
vm->page_size);
stack_vaddr = __vm_vaddr_alloc(vm, stack_size,
diff --git a/tools/testing/selftests/kvm/lib/x86_64/hyperv.c b/tools/testing/selftests/kvm/lib/x86_64/hyperv.c
index efb7e7a1354d..15bc8cd583aa 100644
--- a/tools/testing/selftests/kvm/lib/x86_64/hyperv.c
+++ b/tools/testing/selftests/kvm/lib/x86_64/hyperv.c
@@ -8,6 +8,73 @@
#include "processor.h"
#include "hyperv.h"
+const struct kvm_cpuid2 *kvm_get_supported_hv_cpuid(void)
+{
+ static struct kvm_cpuid2 *cpuid;
+ int kvm_fd;
+
+ if (cpuid)
+ return cpuid;
+
+ cpuid = allocate_kvm_cpuid2(MAX_NR_CPUID_ENTRIES);
+ kvm_fd = open_kvm_dev_path_or_exit();
+
+ kvm_ioctl(kvm_fd, KVM_GET_SUPPORTED_HV_CPUID, cpuid);
+
+ close(kvm_fd);
+ return cpuid;
+}
+
+void vcpu_set_hv_cpuid(struct kvm_vcpu *vcpu)
+{
+ static struct kvm_cpuid2 *cpuid_full;
+ const struct kvm_cpuid2 *cpuid_sys, *cpuid_hv;
+ int i, nent = 0;
+
+ if (!cpuid_full) {
+ cpuid_sys = kvm_get_supported_cpuid();
+ cpuid_hv = kvm_get_supported_hv_cpuid();
+
+ cpuid_full = allocate_kvm_cpuid2(cpuid_sys->nent + cpuid_hv->nent);
+ if (!cpuid_full) {
+ perror("malloc");
+ abort();
+ }
+
+ /* Need to skip KVM CPUID leaves 0x400000xx */
+ for (i = 0; i < cpuid_sys->nent; i++) {
+ if (cpuid_sys->entries[i].function >= 0x40000000 &&
+ cpuid_sys->entries[i].function < 0x40000100)
+ continue;
+ cpuid_full->entries[nent] = cpuid_sys->entries[i];
+ nent++;
+ }
+
+ memcpy(&cpuid_full->entries[nent], cpuid_hv->entries,
+ cpuid_hv->nent * sizeof(struct kvm_cpuid_entry2));
+ cpuid_full->nent = nent + cpuid_hv->nent;
+ }
+
+ vcpu_init_cpuid(vcpu, cpuid_full);
+}
+
+const struct kvm_cpuid2 *vcpu_get_supported_hv_cpuid(struct kvm_vcpu *vcpu)
+{
+ struct kvm_cpuid2 *cpuid = allocate_kvm_cpuid2(MAX_NR_CPUID_ENTRIES);
+
+ vcpu_ioctl(vcpu, KVM_GET_SUPPORTED_HV_CPUID, cpuid);
+
+ return cpuid;
+}
+
+bool kvm_hv_cpu_has(struct kvm_x86_cpu_feature feature)
+{
+ if (!kvm_has_cap(KVM_CAP_SYS_HYPERV_CPUID))
+ return false;
+
+ return kvm_cpuid_has(kvm_get_supported_hv_cpuid(), feature);
+}
+
struct hyperv_test_pages *vcpu_alloc_hyperv_test_pages(struct kvm_vm *vm,
vm_vaddr_t *p_hv_pages_gva)
{
diff --git a/tools/testing/selftests/kvm/lib/x86_64/processor.c b/tools/testing/selftests/kvm/lib/x86_64/processor.c
index 153739f2e201..974bcd2df6d7 100644
--- a/tools/testing/selftests/kvm/lib/x86_64/processor.c
+++ b/tools/testing/selftests/kvm/lib/x86_64/processor.c
@@ -19,8 +19,6 @@
#define KERNEL_DS 0x10
#define KERNEL_TSS 0x18
-#define MAX_NR_CPUID_ENTRIES 100
-
vm_vaddr_t exception_handlers;
bool host_cpu_is_amd;
bool host_cpu_is_intel;
@@ -566,10 +564,8 @@ void route_exception(struct ex_regs *regs)
if (kvm_fixup_exception(regs))
return;
- ucall_assert(UCALL_UNHANDLED,
- "Unhandled exception in guest", __FILE__, __LINE__,
- "Unhandled exception '0x%lx' at guest RIP '0x%lx'",
- regs->vector, regs->rip);
+ GUEST_FAIL("Unhandled exception '0x%lx' at guest RIP '0x%lx'",
+ regs->vector, regs->rip);
}
static void vm_init_descriptor_tables(struct kvm_vm *vm)
@@ -611,7 +607,7 @@ void assert_on_unhandled_exception(struct kvm_vcpu *vcpu)
{
struct ucall uc;
- if (get_ucall(vcpu, &uc) == UCALL_UNHANDLED)
+ if (get_ucall(vcpu, &uc) == UCALL_ABORT)
REPORT_GUEST_ASSERT(uc);
}
@@ -1195,65 +1191,6 @@ void xen_hypercall(uint64_t nr, uint64_t a0, void *a1)
GUEST_ASSERT(!__xen_hypercall(nr, a0, a1));
}
-const struct kvm_cpuid2 *kvm_get_supported_hv_cpuid(void)
-{
- static struct kvm_cpuid2 *cpuid;
- int kvm_fd;
-
- if (cpuid)
- return cpuid;
-
- cpuid = allocate_kvm_cpuid2(MAX_NR_CPUID_ENTRIES);
- kvm_fd = open_kvm_dev_path_or_exit();
-
- kvm_ioctl(kvm_fd, KVM_GET_SUPPORTED_HV_CPUID, cpuid);
-
- close(kvm_fd);
- return cpuid;
-}
-
-void vcpu_set_hv_cpuid(struct kvm_vcpu *vcpu)
-{
- static struct kvm_cpuid2 *cpuid_full;
- const struct kvm_cpuid2 *cpuid_sys, *cpuid_hv;
- int i, nent = 0;
-
- if (!cpuid_full) {
- cpuid_sys = kvm_get_supported_cpuid();
- cpuid_hv = kvm_get_supported_hv_cpuid();
-
- cpuid_full = allocate_kvm_cpuid2(cpuid_sys->nent + cpuid_hv->nent);
- if (!cpuid_full) {
- perror("malloc");
- abort();
- }
-
- /* Need to skip KVM CPUID leaves 0x400000xx */
- for (i = 0; i < cpuid_sys->nent; i++) {
- if (cpuid_sys->entries[i].function >= 0x40000000 &&
- cpuid_sys->entries[i].function < 0x40000100)
- continue;
- cpuid_full->entries[nent] = cpuid_sys->entries[i];
- nent++;
- }
-
- memcpy(&cpuid_full->entries[nent], cpuid_hv->entries,
- cpuid_hv->nent * sizeof(struct kvm_cpuid_entry2));
- cpuid_full->nent = nent + cpuid_hv->nent;
- }
-
- vcpu_init_cpuid(vcpu, cpuid_full);
-}
-
-const struct kvm_cpuid2 *vcpu_get_supported_hv_cpuid(struct kvm_vcpu *vcpu)
-{
- struct kvm_cpuid2 *cpuid = allocate_kvm_cpuid2(MAX_NR_CPUID_ENTRIES);
-
- vcpu_ioctl(vcpu, KVM_GET_SUPPORTED_HV_CPUID, cpuid);
-
- return cpuid;
-}
-
unsigned long vm_compute_max_gfn(struct kvm_vm *vm)
{
const unsigned long num_ht_pages = 12 << (30 - vm->page_shift); /* 12 GiB */
diff --git a/tools/testing/selftests/kvm/memslot_modification_stress_test.c b/tools/testing/selftests/kvm/memslot_modification_stress_test.c
index 49f162573126..e3343f0df9e1 100644
--- a/tools/testing/selftests/kvm/memslot_modification_stress_test.c
+++ b/tools/testing/selftests/kvm/memslot_modification_stress_test.c
@@ -79,6 +79,7 @@ struct test_params {
useconds_t delay;
uint64_t nr_iterations;
bool partition_vcpu_memory_access;
+ bool disable_slot_zap_quirk;
};
static void run_test(enum vm_guest_mode mode, void *arg)
@@ -89,6 +90,13 @@ static void run_test(enum vm_guest_mode mode, void *arg)
vm = memstress_create_vm(mode, nr_vcpus, guest_percpu_mem_size, 1,
VM_MEM_SRC_ANONYMOUS,
p->partition_vcpu_memory_access);
+#ifdef __x86_64__
+ if (p->disable_slot_zap_quirk)
+ vm_enable_cap(vm, KVM_CAP_DISABLE_QUIRKS2, KVM_X86_QUIRK_SLOT_ZAP_ALL);
+
+ pr_info("Memslot zap quirk %s\n", p->disable_slot_zap_quirk ?
+ "disabled" : "enabled");
+#endif
pr_info("Finished creating vCPUs\n");
@@ -107,11 +115,12 @@ static void run_test(enum vm_guest_mode mode, void *arg)
static void help(char *name)
{
puts("");
- printf("usage: %s [-h] [-m mode] [-d delay_usec]\n"
+ printf("usage: %s [-h] [-m mode] [-d delay_usec] [-q]\n"
" [-b memory] [-v vcpus] [-o] [-i iterations]\n", name);
guest_modes_help();
printf(" -d: add a delay between each iteration of adding and\n"
" deleting a memslot in usec.\n");
+ printf(" -q: Disable memslot zap quirk.\n");
printf(" -b: specify the size of the memory region which should be\n"
" accessed by each vCPU. e.g. 10M or 3G.\n"
" Default: 1G\n");
@@ -137,7 +146,7 @@ int main(int argc, char *argv[])
guest_modes_append_default();
- while ((opt = getopt(argc, argv, "hm:d:b:v:oi:")) != -1) {
+ while ((opt = getopt(argc, argv, "hm:d:qb:v:oi:")) != -1) {
switch (opt) {
case 'm':
guest_modes_cmdline(optarg);
@@ -160,6 +169,12 @@ int main(int argc, char *argv[])
case 'i':
p.nr_iterations = atoi_positive("Number of iterations", optarg);
break;
+ case 'q':
+ p.disable_slot_zap_quirk = true;
+
+ TEST_REQUIRE(kvm_check_cap(KVM_CAP_DISABLE_QUIRKS2) &
+ KVM_X86_QUIRK_SLOT_ZAP_ALL);
+ break;
case 'h':
default:
help(argv[0]);
diff --git a/tools/testing/selftests/kvm/memslot_perf_test.c b/tools/testing/selftests/kvm/memslot_perf_test.c
index 579a64f97333..893366982f77 100644
--- a/tools/testing/selftests/kvm/memslot_perf_test.c
+++ b/tools/testing/selftests/kvm/memslot_perf_test.c
@@ -113,6 +113,7 @@ static_assert(ATOMIC_BOOL_LOCK_FREE == 2, "atomic bool is not lockless");
static sem_t vcpu_ready;
static bool map_unmap_verify;
+static bool disable_slot_zap_quirk;
static bool verbose;
#define pr_info_v(...) \
@@ -578,6 +579,9 @@ static bool test_memslot_move_prepare(struct vm_data *data,
uint32_t guest_page_size = data->vm->page_size;
uint64_t movesrcgpa, movetestgpa;
+ if (disable_slot_zap_quirk)
+ vm_enable_cap(data->vm, KVM_CAP_DISABLE_QUIRKS2, KVM_X86_QUIRK_SLOT_ZAP_ALL);
+
movesrcgpa = vm_slot2gpa(data, data->nslots - 1);
if (isactive) {
@@ -896,6 +900,7 @@ static void help(char *name, struct test_args *targs)
pr_info(" -h: print this help screen.\n");
pr_info(" -v: enable verbose mode (not for benchmarking).\n");
pr_info(" -d: enable extra debug checks.\n");
+ pr_info(" -q: Disable memslot zap quirk during memslot move.\n");
pr_info(" -s: specify memslot count cap (-1 means no cap; currently: %i)\n",
targs->nslots);
pr_info(" -f: specify the first test to run (currently: %i; max %zu)\n",
@@ -954,7 +959,7 @@ static bool parse_args(int argc, char *argv[],
uint32_t max_mem_slots;
int opt;
- while ((opt = getopt(argc, argv, "hvds:f:e:l:r:")) != -1) {
+ while ((opt = getopt(argc, argv, "hvdqs:f:e:l:r:")) != -1) {
switch (opt) {
case 'h':
default:
@@ -966,6 +971,11 @@ static bool parse_args(int argc, char *argv[],
case 'd':
map_unmap_verify = true;
break;
+ case 'q':
+ disable_slot_zap_quirk = true;
+ TEST_REQUIRE(kvm_check_cap(KVM_CAP_DISABLE_QUIRKS2) &
+ KVM_X86_QUIRK_SLOT_ZAP_ALL);
+ break;
case 's':
targs->nslots = atoi_paranoid(optarg);
if (targs->nslots <= 1 && targs->nslots != -1) {
diff --git a/tools/testing/selftests/kvm/s390x/cmma_test.c b/tools/testing/selftests/kvm/s390x/cmma_test.c
index b39033844756..e32dd59703a0 100644
--- a/tools/testing/selftests/kvm/s390x/cmma_test.c
+++ b/tools/testing/selftests/kvm/s390x/cmma_test.c
@@ -17,16 +17,17 @@
#include "kvm_util.h"
#include "kselftest.h"
#include "ucall_common.h"
+#include "processor.h"
#define MAIN_PAGE_COUNT 512
#define TEST_DATA_PAGE_COUNT 512
#define TEST_DATA_MEMSLOT 1
-#define TEST_DATA_START_GFN 4096
+#define TEST_DATA_START_GFN PAGE_SIZE
#define TEST_DATA_TWO_PAGE_COUNT 256
#define TEST_DATA_TWO_MEMSLOT 2
-#define TEST_DATA_TWO_START_GFN 8192
+#define TEST_DATA_TWO_START_GFN (2 * PAGE_SIZE)
static char cmma_value_buf[MAIN_PAGE_COUNT + TEST_DATA_PAGE_COUNT];
@@ -66,7 +67,7 @@ static void guest_dirty_test_data(void)
" lghi 5,%[page_count]\n"
/* r5 += r1 */
"2: agfr 5,1\n"
- /* r2 = r1 << 12 */
+ /* r2 = r1 << PAGE_SHIFT */
"1: sllg 2,1,12(0)\n"
/* essa(r4, r2, SET_STABLE) */
" .insn rrf,0xb9ab0000,4,2,1,0\n"
diff --git a/tools/testing/selftests/kvm/s390x/config b/tools/testing/selftests/kvm/s390x/config
new file mode 100644
index 000000000000..23270f2d679f
--- /dev/null
+++ b/tools/testing/selftests/kvm/s390x/config
@@ -0,0 +1,2 @@
+CONFIG_KVM=y
+CONFIG_KVM_S390_UCONTROL=y
diff --git a/tools/testing/selftests/kvm/s390x/debug_test.c b/tools/testing/selftests/kvm/s390x/debug_test.c
index 84313fb27529..ad8095968601 100644
--- a/tools/testing/selftests/kvm/s390x/debug_test.c
+++ b/tools/testing/selftests/kvm/s390x/debug_test.c
@@ -2,12 +2,12 @@
/* Test KVM debugging features. */
#include "kvm_util.h"
#include "test_util.h"
+#include "sie.h"
#include <linux/kvm.h>
#define __LC_SVC_NEW_PSW 0x1c0
#define __LC_PGM_NEW_PSW 0x1d0
-#define ICPT_INSTRUCTION 0x04
#define IPA0_DIAG 0x8300
#define PGM_SPECIFICATION 0x06
@@ -85,7 +85,7 @@ static void test_step_pgm_diag(void)
vm = test_step_int_1(&vcpu, test_step_pgm_diag_guest_code,
__LC_PGM_NEW_PSW, new_psw);
TEST_ASSERT_KVM_EXIT_REASON(vcpu, KVM_EXIT_S390_SIEIC);
- TEST_ASSERT_EQ(vcpu->run->s390_sieic.icptcode, ICPT_INSTRUCTION);
+ TEST_ASSERT_EQ(vcpu->run->s390_sieic.icptcode, ICPT_INST);
TEST_ASSERT_EQ(vcpu->run->s390_sieic.ipa & 0xff00, IPA0_DIAG);
vcpu_ioctl(vcpu, KVM_S390_IRQ, &irq);
vcpu_run(vcpu);
diff --git a/tools/testing/selftests/kvm/s390x/memop.c b/tools/testing/selftests/kvm/s390x/memop.c
index f2df7416be84..4374b4cd2a80 100644
--- a/tools/testing/selftests/kvm/s390x/memop.c
+++ b/tools/testing/selftests/kvm/s390x/memop.c
@@ -16,6 +16,7 @@
#include "kvm_util.h"
#include "kselftest.h"
#include "ucall_common.h"
+#include "processor.h"
enum mop_target {
LOGICAL,
@@ -226,9 +227,6 @@ static void memop_ioctl(struct test_info info, struct kvm_s390_mem_op *ksmo,
#define CHECK_N_DO(f, ...) ({ f(__VA_ARGS__, CHECK_ONLY); f(__VA_ARGS__); })
-#define PAGE_SHIFT 12
-#define PAGE_SIZE (1ULL << PAGE_SHIFT)
-#define PAGE_MASK (~(PAGE_SIZE - 1))
#define CR0_FETCH_PROTECTION_OVERRIDE (1UL << (63 - 38))
#define CR0_STORAGE_PROTECTION_OVERRIDE (1UL << (63 - 39))
diff --git a/tools/testing/selftests/kvm/s390x/tprot.c b/tools/testing/selftests/kvm/s390x/tprot.c
index 7a742a673b7c..12d5e1cb62e3 100644
--- a/tools/testing/selftests/kvm/s390x/tprot.c
+++ b/tools/testing/selftests/kvm/s390x/tprot.c
@@ -9,9 +9,8 @@
#include "kvm_util.h"
#include "kselftest.h"
#include "ucall_common.h"
+#include "processor.h"
-#define PAGE_SHIFT 12
-#define PAGE_SIZE (1 << PAGE_SHIFT)
#define CR0_FETCH_PROTECTION_OVERRIDE (1UL << (63 - 38))
#define CR0_STORAGE_PROTECTION_OVERRIDE (1UL << (63 - 39))
@@ -151,7 +150,7 @@ static enum stage perform_next_stage(int *i, bool mapped_0)
* instead.
* In order to skip these tests we detect this inside the guest
*/
- skip = tests[*i].addr < (void *)4096 &&
+ skip = tests[*i].addr < (void *)PAGE_SIZE &&
tests[*i].expected != TRANSL_UNAVAIL &&
!mapped_0;
if (!skip) {
diff --git a/tools/testing/selftests/kvm/s390x/ucontrol_test.c b/tools/testing/selftests/kvm/s390x/ucontrol_test.c
new file mode 100644
index 000000000000..f257beec1430
--- /dev/null
+++ b/tools/testing/selftests/kvm/s390x/ucontrol_test.c
@@ -0,0 +1,332 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Test code for the s390x kvm ucontrol interface
+ *
+ * Copyright IBM Corp. 2024
+ *
+ * Authors:
+ * Christoph Schlameuss <schlameuss@linux.ibm.com>
+ */
+#include "debug_print.h"
+#include "kselftest_harness.h"
+#include "kvm_util.h"
+#include "processor.h"
+#include "sie.h"
+
+#include <linux/capability.h>
+#include <linux/sizes.h>
+
+#define VM_MEM_SIZE (4 * SZ_1M)
+
+/* so directly declare capget to check caps without libcap */
+int capget(cap_user_header_t header, cap_user_data_t data);
+
+/**
+ * In order to create user controlled virtual machines on S390,
+ * check KVM_CAP_S390_UCONTROL and use the flag KVM_VM_S390_UCONTROL
+ * as privileged user (SYS_ADMIN).
+ */
+void require_ucontrol_admin(void)
+{
+ struct __user_cap_data_struct data[_LINUX_CAPABILITY_U32S_3];
+ struct __user_cap_header_struct hdr = {
+ .version = _LINUX_CAPABILITY_VERSION_3,
+ };
+ int rc;
+
+ rc = capget(&hdr, data);
+ TEST_ASSERT_EQ(0, rc);
+ TEST_REQUIRE((data->effective & CAP_TO_MASK(CAP_SYS_ADMIN)) > 0);
+
+ TEST_REQUIRE(kvm_has_cap(KVM_CAP_S390_UCONTROL));
+}
+
+/* Test program setting some registers and looping */
+extern char test_gprs_asm[];
+asm("test_gprs_asm:\n"
+ "xgr %r0, %r0\n"
+ "lgfi %r1,1\n"
+ "lgfi %r2,2\n"
+ "lgfi %r3,3\n"
+ "lgfi %r4,4\n"
+ "lgfi %r5,5\n"
+ "lgfi %r6,6\n"
+ "lgfi %r7,7\n"
+ "0:\n"
+ " diag 0,0,0x44\n"
+ " ahi %r0,1\n"
+ " j 0b\n"
+);
+
+FIXTURE(uc_kvm)
+{
+ struct kvm_s390_sie_block *sie_block;
+ struct kvm_run *run;
+ uintptr_t base_gpa;
+ uintptr_t code_gpa;
+ uintptr_t base_hva;
+ uintptr_t code_hva;
+ int kvm_run_size;
+ void *vm_mem;
+ int vcpu_fd;
+ int kvm_fd;
+ int vm_fd;
+};
+
+/**
+ * create VM with single vcpu, map kvm_run and SIE control block for easy access
+ */
+FIXTURE_SETUP(uc_kvm)
+{
+ struct kvm_s390_vm_cpu_processor info;
+ int rc;
+
+ require_ucontrol_admin();
+
+ self->kvm_fd = open_kvm_dev_path_or_exit();
+ self->vm_fd = ioctl(self->kvm_fd, KVM_CREATE_VM, KVM_VM_S390_UCONTROL);
+ ASSERT_GE(self->vm_fd, 0);
+
+ kvm_device_attr_get(self->vm_fd, KVM_S390_VM_CPU_MODEL,
+ KVM_S390_VM_CPU_PROCESSOR, &info);
+ TH_LOG("create VM 0x%llx", info.cpuid);
+
+ self->vcpu_fd = ioctl(self->vm_fd, KVM_CREATE_VCPU, 0);
+ ASSERT_GE(self->vcpu_fd, 0);
+
+ self->kvm_run_size = ioctl(self->kvm_fd, KVM_GET_VCPU_MMAP_SIZE, NULL);
+ ASSERT_GE(self->kvm_run_size, sizeof(struct kvm_run))
+ TH_LOG(KVM_IOCTL_ERROR(KVM_GET_VCPU_MMAP_SIZE, self->kvm_run_size));
+ self->run = (struct kvm_run *)mmap(NULL, self->kvm_run_size,
+ PROT_READ | PROT_WRITE, MAP_SHARED, self->vcpu_fd, 0);
+ ASSERT_NE(self->run, MAP_FAILED);
+ /**
+ * For virtual cpus that have been created with S390 user controlled
+ * virtual machines, the resulting vcpu fd can be memory mapped at page
+ * offset KVM_S390_SIE_PAGE_OFFSET in order to obtain a memory map of
+ * the virtual cpu's hardware control block.
+ */
+ self->sie_block = (struct kvm_s390_sie_block *)mmap(NULL, PAGE_SIZE,
+ PROT_READ | PROT_WRITE, MAP_SHARED,
+ self->vcpu_fd, KVM_S390_SIE_PAGE_OFFSET << PAGE_SHIFT);
+ ASSERT_NE(self->sie_block, MAP_FAILED);
+
+ TH_LOG("VM created %p %p", self->run, self->sie_block);
+
+ self->base_gpa = 0;
+ self->code_gpa = self->base_gpa + (3 * SZ_1M);
+
+ self->vm_mem = aligned_alloc(SZ_1M, VM_MEM_SIZE);
+ ASSERT_NE(NULL, self->vm_mem) TH_LOG("malloc failed %u", errno);
+ self->base_hva = (uintptr_t)self->vm_mem;
+ self->code_hva = self->base_hva - self->base_gpa + self->code_gpa;
+ struct kvm_s390_ucas_mapping map = {
+ .user_addr = self->base_hva,
+ .vcpu_addr = self->base_gpa,
+ .length = VM_MEM_SIZE,
+ };
+ TH_LOG("ucas map %p %p 0x%llx",
+ (void *)map.user_addr, (void *)map.vcpu_addr, map.length);
+ rc = ioctl(self->vcpu_fd, KVM_S390_UCAS_MAP, &map);
+ ASSERT_EQ(0, rc) TH_LOG("ucas map result %d not expected, %s",
+ rc, strerror(errno));
+
+ TH_LOG("page in %p", (void *)self->base_gpa);
+ rc = ioctl(self->vcpu_fd, KVM_S390_VCPU_FAULT, self->base_gpa);
+ ASSERT_EQ(0, rc) TH_LOG("vcpu fault (%p) result %d not expected, %s",
+ (void *)self->base_hva, rc, strerror(errno));
+
+ self->sie_block->cpuflags &= ~CPUSTAT_STOPPED;
+}
+
+FIXTURE_TEARDOWN(uc_kvm)
+{
+ munmap(self->sie_block, PAGE_SIZE);
+ munmap(self->run, self->kvm_run_size);
+ close(self->vcpu_fd);
+ close(self->vm_fd);
+ close(self->kvm_fd);
+ free(self->vm_mem);
+}
+
+TEST_F(uc_kvm, uc_sie_assertions)
+{
+ /* assert interception of Code 08 (Program Interruption) is set */
+ EXPECT_EQ(0, self->sie_block->ecb & ECB_SPECI);
+}
+
+TEST_F(uc_kvm, uc_attr_mem_limit)
+{
+ u64 limit;
+ struct kvm_device_attr attr = {
+ .group = KVM_S390_VM_MEM_CTRL,
+ .attr = KVM_S390_VM_MEM_LIMIT_SIZE,
+ .addr = (unsigned long)&limit,
+ };
+ int rc;
+
+ rc = ioctl(self->vm_fd, KVM_GET_DEVICE_ATTR, &attr);
+ EXPECT_EQ(0, rc);
+ EXPECT_EQ(~0UL, limit);
+
+ /* assert set not supported */
+ rc = ioctl(self->vm_fd, KVM_SET_DEVICE_ATTR, &attr);
+ EXPECT_EQ(-1, rc);
+ EXPECT_EQ(EINVAL, errno);
+}
+
+TEST_F(uc_kvm, uc_no_dirty_log)
+{
+ struct kvm_dirty_log dlog;
+ int rc;
+
+ rc = ioctl(self->vm_fd, KVM_GET_DIRTY_LOG, &dlog);
+ EXPECT_EQ(-1, rc);
+ EXPECT_EQ(EINVAL, errno);
+}
+
+/**
+ * Assert HPAGE CAP cannot be enabled on UCONTROL VM
+ */
+TEST(uc_cap_hpage)
+{
+ int rc, kvm_fd, vm_fd, vcpu_fd;
+ struct kvm_enable_cap cap = {
+ .cap = KVM_CAP_S390_HPAGE_1M,
+ };
+
+ require_ucontrol_admin();
+
+ kvm_fd = open_kvm_dev_path_or_exit();
+ vm_fd = ioctl(kvm_fd, KVM_CREATE_VM, KVM_VM_S390_UCONTROL);
+ ASSERT_GE(vm_fd, 0);
+
+ /* assert hpages are not supported on ucontrol vm */
+ rc = ioctl(vm_fd, KVM_CHECK_EXTENSION, KVM_CAP_S390_HPAGE_1M);
+ EXPECT_EQ(0, rc);
+
+ /* Test that KVM_CAP_S390_HPAGE_1M can't be enabled for a ucontrol vm */
+ rc = ioctl(vm_fd, KVM_ENABLE_CAP, cap);
+ EXPECT_EQ(-1, rc);
+ EXPECT_EQ(EINVAL, errno);
+
+ /* assert HPAGE CAP is rejected after vCPU creation */
+ vcpu_fd = ioctl(vm_fd, KVM_CREATE_VCPU, 0);
+ ASSERT_GE(vcpu_fd, 0);
+ rc = ioctl(vm_fd, KVM_ENABLE_CAP, cap);
+ EXPECT_EQ(-1, rc);
+ EXPECT_EQ(EBUSY, errno);
+
+ close(vcpu_fd);
+ close(vm_fd);
+ close(kvm_fd);
+}
+
+/* verify SIEIC exit
+ * * fail on codes not expected in the test cases
+ */
+static bool uc_handle_sieic(FIXTURE_DATA(uc_kvm) * self)
+{
+ struct kvm_s390_sie_block *sie_block = self->sie_block;
+ struct kvm_run *run = self->run;
+
+ /* check SIE interception code */
+ pr_info("sieic: 0x%.2x 0x%.4x 0x%.4x\n",
+ run->s390_sieic.icptcode,
+ run->s390_sieic.ipa,
+ run->s390_sieic.ipb);
+ switch (run->s390_sieic.icptcode) {
+ case ICPT_INST:
+ /* end execution in caller on intercepted instruction */
+ pr_info("sie instruction interception\n");
+ return false;
+ case ICPT_OPEREXC:
+ /* operation exception */
+ TEST_FAIL("sie exception on %.4x%.8x", sie_block->ipa, sie_block->ipb);
+ default:
+ TEST_FAIL("UNEXPECTED SIEIC CODE %d", run->s390_sieic.icptcode);
+ }
+ return true;
+}
+
+/* verify VM state on exit */
+static bool uc_handle_exit(FIXTURE_DATA(uc_kvm) * self)
+{
+ struct kvm_run *run = self->run;
+
+ switch (run->exit_reason) {
+ case KVM_EXIT_S390_SIEIC:
+ return uc_handle_sieic(self);
+ default:
+ pr_info("exit_reason %2d not handled\n", run->exit_reason);
+ }
+ return true;
+}
+
+/* run the VM until interrupted */
+static int uc_run_once(FIXTURE_DATA(uc_kvm) * self)
+{
+ int rc;
+
+ rc = ioctl(self->vcpu_fd, KVM_RUN, NULL);
+ print_run(self->run, self->sie_block);
+ print_regs(self->run);
+ pr_debug("run %d / %d %s\n", rc, errno, strerror(errno));
+ return rc;
+}
+
+static void uc_assert_diag44(FIXTURE_DATA(uc_kvm) * self)
+{
+ struct kvm_s390_sie_block *sie_block = self->sie_block;
+
+ /* assert vm was interrupted by diag 0x0044 */
+ TEST_ASSERT_EQ(KVM_EXIT_S390_SIEIC, self->run->exit_reason);
+ TEST_ASSERT_EQ(ICPT_INST, sie_block->icptcode);
+ TEST_ASSERT_EQ(0x8300, sie_block->ipa);
+ TEST_ASSERT_EQ(0x440000, sie_block->ipb);
+}
+
+TEST_F(uc_kvm, uc_gprs)
+{
+ struct kvm_sync_regs *sync_regs = &self->run->s.regs;
+ struct kvm_run *run = self->run;
+ struct kvm_regs regs = {};
+
+ /* Set registers to values that are different from the ones that we expect below */
+ for (int i = 0; i < 8; i++)
+ sync_regs->gprs[i] = 8;
+ run->kvm_dirty_regs |= KVM_SYNC_GPRS;
+
+ /* copy test_gprs_asm to code_hva / code_gpa */
+ TH_LOG("copy code %p to vm mapped memory %p / %p",
+ &test_gprs_asm, (void *)self->code_hva, (void *)self->code_gpa);
+ memcpy((void *)self->code_hva, &test_gprs_asm, PAGE_SIZE);
+
+ /* DAT disabled + 64 bit mode */
+ run->psw_mask = 0x0000000180000000ULL;
+ run->psw_addr = self->code_gpa;
+
+ /* run and expect interception of diag 44 */
+ ASSERT_EQ(0, uc_run_once(self));
+ ASSERT_EQ(false, uc_handle_exit(self));
+ uc_assert_diag44(self);
+
+ /* Retrieve and check guest register values */
+ ASSERT_EQ(0, ioctl(self->vcpu_fd, KVM_GET_REGS, &regs));
+ for (int i = 0; i < 8; i++) {
+ ASSERT_EQ(i, regs.gprs[i]);
+ ASSERT_EQ(i, sync_regs->gprs[i]);
+ }
+
+ /* run and expect interception of diag 44 again */
+ ASSERT_EQ(0, uc_run_once(self));
+ ASSERT_EQ(false, uc_handle_exit(self));
+ uc_assert_diag44(self);
+
+ /* check continued increment of register 0 value */
+ ASSERT_EQ(0, ioctl(self->vcpu_fd, KVM_GET_REGS, &regs));
+ ASSERT_EQ(1, regs.gprs[0]);
+ ASSERT_EQ(1, sync_regs->gprs[0]);
+}
+
+TEST_HARNESS_MAIN
diff --git a/tools/testing/selftests/kvm/set_memory_region_test.c b/tools/testing/selftests/kvm/set_memory_region_test.c
index bb8002084f52..a8267628e9ed 100644
--- a/tools/testing/selftests/kvm/set_memory_region_test.c
+++ b/tools/testing/selftests/kvm/set_memory_region_test.c
@@ -175,7 +175,7 @@ static void guest_code_move_memory_region(void)
GUEST_DONE();
}
-static void test_move_memory_region(void)
+static void test_move_memory_region(bool disable_slot_zap_quirk)
{
pthread_t vcpu_thread;
struct kvm_vcpu *vcpu;
@@ -184,6 +184,9 @@ static void test_move_memory_region(void)
vm = spawn_vm(&vcpu, &vcpu_thread, guest_code_move_memory_region);
+ if (disable_slot_zap_quirk)
+ vm_enable_cap(vm, KVM_CAP_DISABLE_QUIRKS2, KVM_X86_QUIRK_SLOT_ZAP_ALL);
+
hva = addr_gpa2hva(vm, MEM_REGION_GPA);
/*
@@ -266,7 +269,7 @@ static void guest_code_delete_memory_region(void)
GUEST_ASSERT(0);
}
-static void test_delete_memory_region(void)
+static void test_delete_memory_region(bool disable_slot_zap_quirk)
{
pthread_t vcpu_thread;
struct kvm_vcpu *vcpu;
@@ -276,6 +279,9 @@ static void test_delete_memory_region(void)
vm = spawn_vm(&vcpu, &vcpu_thread, guest_code_delete_memory_region);
+ if (disable_slot_zap_quirk)
+ vm_enable_cap(vm, KVM_CAP_DISABLE_QUIRKS2, KVM_X86_QUIRK_SLOT_ZAP_ALL);
+
/* Delete the memory region, the guest should not die. */
vm_mem_region_delete(vm, MEM_REGION_SLOT);
wait_for_vcpu();
@@ -553,7 +559,10 @@ int main(int argc, char *argv[])
{
#ifdef __x86_64__
int i, loops;
+ int j, disable_slot_zap_quirk = 0;
+ if (kvm_check_cap(KVM_CAP_DISABLE_QUIRKS2) & KVM_X86_QUIRK_SLOT_ZAP_ALL)
+ disable_slot_zap_quirk = 1;
/*
* FIXME: the zero-memslot test fails on aarch64 and s390x because
* KVM_RUN fails with ENOEXEC or EFAULT.
@@ -579,13 +588,17 @@ int main(int argc, char *argv[])
else
loops = 10;
- pr_info("Testing MOVE of in-use region, %d loops\n", loops);
- for (i = 0; i < loops; i++)
- test_move_memory_region();
+ for (j = 0; j <= disable_slot_zap_quirk; j++) {
+ pr_info("Testing MOVE of in-use region, %d loops, slot zap quirk %s\n",
+ loops, j ? "disabled" : "enabled");
+ for (i = 0; i < loops; i++)
+ test_move_memory_region(!!j);
- pr_info("Testing DELETE of in-use region, %d loops\n", loops);
- for (i = 0; i < loops; i++)
- test_delete_memory_region();
+ pr_info("Testing DELETE of in-use region, %d loops, slot zap quirk %s\n",
+ loops, j ? "disabled" : "enabled");
+ for (i = 0; i < loops; i++)
+ test_delete_memory_region(!!j);
+ }
#endif
return 0;
diff --git a/tools/testing/selftests/kvm/x86_64/debug_regs.c b/tools/testing/selftests/kvm/x86_64/debug_regs.c
index f6b295e0b2d2..76cc2df9238a 100644
--- a/tools/testing/selftests/kvm/x86_64/debug_regs.c
+++ b/tools/testing/selftests/kvm/x86_64/debug_regs.c
@@ -47,15 +47,18 @@ static void guest_code(void)
/*
* Single step test, covers 2 basic instructions and 2 emulated
*
- * Enable interrupts during the single stepping to see that
- * pending interrupt we raised is not handled due to KVM_GUESTDBG_BLOCKIRQ
+ * Enable interrupts during the single stepping to see that pending
+ * interrupt we raised is not handled due to KVM_GUESTDBG_BLOCKIRQ.
+ *
+ * Write MSR_IA32_TSC_DEADLINE to verify that KVM's fastpath handler
+ * exits to userspace due to single-step being enabled.
*/
asm volatile("ss_start: "
"sti\n\t"
"xor %%eax,%%eax\n\t"
"cpuid\n\t"
- "movl $0x1a0,%%ecx\n\t"
- "rdmsr\n\t"
+ "movl $" __stringify(MSR_IA32_TSC_DEADLINE) ", %%ecx\n\t"
+ "wrmsr\n\t"
"cli\n\t"
: : : "eax", "ebx", "ecx", "edx");
diff --git a/tools/testing/selftests/kvm/x86_64/hyperv_evmcs.c b/tools/testing/selftests/kvm/x86_64/hyperv_evmcs.c
index e192720bfe14..74cf19661309 100644
--- a/tools/testing/selftests/kvm/x86_64/hyperv_evmcs.c
+++ b/tools/testing/selftests/kvm/x86_64/hyperv_evmcs.c
@@ -242,7 +242,7 @@ int main(int argc, char *argv[])
TEST_REQUIRE(kvm_cpu_has(X86_FEATURE_VMX));
TEST_REQUIRE(kvm_has_cap(KVM_CAP_NESTED_STATE));
TEST_REQUIRE(kvm_has_cap(KVM_CAP_HYPERV_ENLIGHTENED_VMCS));
- TEST_REQUIRE(kvm_has_cap(KVM_CAP_HYPERV_DIRECT_TLBFLUSH));
+ TEST_REQUIRE(kvm_hv_cpu_has(HV_X64_NESTED_DIRECT_FLUSH));
vm = vm_create_with_one_vcpu(&vcpu, guest_code);
diff --git a/tools/testing/selftests/kvm/x86_64/hyperv_svm_test.c b/tools/testing/selftests/kvm/x86_64/hyperv_svm_test.c
index b987a3d79715..0ddb63229bcb 100644
--- a/tools/testing/selftests/kvm/x86_64/hyperv_svm_test.c
+++ b/tools/testing/selftests/kvm/x86_64/hyperv_svm_test.c
@@ -157,7 +157,7 @@ int main(int argc, char *argv[])
int stage;
TEST_REQUIRE(kvm_cpu_has(X86_FEATURE_SVM));
- TEST_REQUIRE(kvm_has_cap(KVM_CAP_HYPERV_DIRECT_TLBFLUSH));
+ TEST_REQUIRE(kvm_hv_cpu_has(HV_X64_NESTED_DIRECT_FLUSH));
/* Create VM */
vm = vm_create_with_one_vcpu(&vcpu, guest_code);
diff --git a/tools/testing/selftests/kvm/x86_64/sev_smoke_test.c b/tools/testing/selftests/kvm/x86_64/sev_smoke_test.c
index 7c70c0da4fb7..2e9197eb1652 100644
--- a/tools/testing/selftests/kvm/x86_64/sev_smoke_test.c
+++ b/tools/testing/selftests/kvm/x86_64/sev_smoke_test.c
@@ -160,6 +160,36 @@ static void test_sev(void *guest_code, uint64_t policy)
kvm_vm_free(vm);
}
+static void guest_shutdown_code(void)
+{
+ struct desc_ptr idt;
+
+ /* Clobber the IDT so that #UD is guaranteed to trigger SHUTDOWN. */
+ memset(&idt, 0, sizeof(idt));
+ __asm__ __volatile__("lidt %0" :: "m"(idt));
+
+ __asm__ __volatile__("ud2");
+}
+
+static void test_sev_es_shutdown(void)
+{
+ struct kvm_vcpu *vcpu;
+ struct kvm_vm *vm;
+
+ uint32_t type = KVM_X86_SEV_ES_VM;
+
+ vm = vm_sev_create_with_one_vcpu(type, guest_shutdown_code, &vcpu);
+
+ vm_sev_launch(vm, SEV_POLICY_ES, NULL);
+
+ vcpu_run(vcpu);
+ TEST_ASSERT(vcpu->run->exit_reason == KVM_EXIT_SHUTDOWN,
+ "Wanted SHUTDOWN, got %s",
+ exit_reason_str(vcpu->run->exit_reason));
+
+ kvm_vm_free(vm);
+}
+
int main(int argc, char *argv[])
{
TEST_REQUIRE(kvm_cpu_has(X86_FEATURE_SEV));
@@ -171,6 +201,8 @@ int main(int argc, char *argv[])
test_sev(guest_sev_es_code, SEV_POLICY_ES | SEV_POLICY_NO_DBG);
test_sev(guest_sev_es_code, SEV_POLICY_ES);
+ test_sev_es_shutdown();
+
if (kvm_has_cap(KVM_CAP_XCRS) &&
(xgetbv(0) & XFEATURE_MASK_X87_AVX) == XFEATURE_MASK_X87_AVX) {
test_sync_vmsa(0);
diff --git a/tools/testing/selftests/kvm/x86_64/xapic_state_test.c b/tools/testing/selftests/kvm/x86_64/xapic_state_test.c
index 618cd2442390..88bcca188799 100644
--- a/tools/testing/selftests/kvm/x86_64/xapic_state_test.c
+++ b/tools/testing/selftests/kvm/x86_64/xapic_state_test.c
@@ -13,6 +13,7 @@
struct xapic_vcpu {
struct kvm_vcpu *vcpu;
bool is_x2apic;
+ bool has_xavic_errata;
};
static void xapic_guest_code(void)
@@ -31,6 +32,10 @@ static void xapic_guest_code(void)
}
}
+#define X2APIC_RSVD_BITS_MASK (GENMASK_ULL(31, 20) | \
+ GENMASK_ULL(17, 16) | \
+ GENMASK_ULL(13, 13))
+
static void x2apic_guest_code(void)
{
asm volatile("cli");
@@ -41,7 +46,12 @@ static void x2apic_guest_code(void)
uint64_t val = x2apic_read_reg(APIC_IRR) |
x2apic_read_reg(APIC_IRR + 0x10) << 32;
- x2apic_write_reg(APIC_ICR, val);
+ if (val & X2APIC_RSVD_BITS_MASK) {
+ x2apic_write_reg_fault(APIC_ICR, val);
+ } else {
+ x2apic_write_reg(APIC_ICR, val);
+ GUEST_ASSERT_EQ(x2apic_read_reg(APIC_ICR), val);
+ }
GUEST_SYNC(val);
} while (1);
}
@@ -71,27 +81,28 @@ static void ____test_icr(struct xapic_vcpu *x, uint64_t val)
icr = (u64)(*((u32 *)&xapic.regs[APIC_ICR])) |
(u64)(*((u32 *)&xapic.regs[APIC_ICR2])) << 32;
if (!x->is_x2apic) {
- val &= (-1u | (0xffull << (32 + 24)));
- TEST_ASSERT_EQ(icr, val & ~APIC_ICR_BUSY);
- } else {
- TEST_ASSERT_EQ(icr & ~APIC_ICR_BUSY, val & ~APIC_ICR_BUSY);
+ if (!x->has_xavic_errata)
+ val &= (-1u | (0xffull << (32 + 24)));
+ } else if (val & X2APIC_RSVD_BITS_MASK) {
+ return;
}
-}
-#define X2APIC_RSVED_BITS_MASK (GENMASK_ULL(31,20) | \
- GENMASK_ULL(17,16) | \
- GENMASK_ULL(13,13))
+ if (x->has_xavic_errata)
+ TEST_ASSERT_EQ(icr & ~APIC_ICR_BUSY, val & ~APIC_ICR_BUSY);
+ else
+ TEST_ASSERT_EQ(icr, val & ~APIC_ICR_BUSY);
+}
static void __test_icr(struct xapic_vcpu *x, uint64_t val)
{
- if (x->is_x2apic) {
- /* Hardware writing vICR register requires reserved bits 31:20,
- * 17:16 and 13 kept as zero to avoid #GP exception. Data value
- * written to vICR should mask out those bits above.
- */
- val &= ~X2APIC_RSVED_BITS_MASK;
- }
- ____test_icr(x, val | APIC_ICR_BUSY);
+ /*
+ * The BUSY bit is reserved on both AMD and Intel, but only AMD treats
+ * it is as _must_ be zero. Intel simply ignores the bit. Don't test
+ * the BUSY bit for x2APIC, as there is no single correct behavior.
+ */
+ if (!x->is_x2apic)
+ ____test_icr(x, val | APIC_ICR_BUSY);
+
____test_icr(x, val & ~(u64)APIC_ICR_BUSY);
}
@@ -231,6 +242,15 @@ int main(int argc, char *argv[])
vm = vm_create_with_one_vcpu(&x.vcpu, xapic_guest_code);
x.is_x2apic = false;
+ /*
+ * AMD's AVIC implementation is buggy (fails to clear the ICR BUSY bit),
+ * and also diverges from KVM with respect to ICR2[23:0] (KVM and Intel
+ * drops writes, AMD does not). Account for the errata when checking
+ * that KVM reads back what was written.
+ */
+ x.has_xavic_errata = host_cpu_is_amd &&
+ get_kvm_amd_param_bool("avic");
+
vcpu_clear_cpuid_feature(x.vcpu, X86_FEATURE_X2APIC);
virt_pg_map(vm, APIC_DEFAULT_GPA, APIC_DEFAULT_GPA);
diff --git a/tools/testing/selftests/kvm/x86_64/xen_vmcall_test.c b/tools/testing/selftests/kvm/x86_64/xen_vmcall_test.c
index e149d0574961..2585087cdf5c 100644
--- a/tools/testing/selftests/kvm/x86_64/xen_vmcall_test.c
+++ b/tools/testing/selftests/kvm/x86_64/xen_vmcall_test.c
@@ -10,6 +10,7 @@
#include "test_util.h"
#include "kvm_util.h"
#include "processor.h"
+#include "hyperv.h"
#define HCALL_REGION_GPA 0xc0000000ULL
#define HCALL_REGION_SLOT 10
diff --git a/tools/testing/selftests/landlock/base_test.c b/tools/testing/selftests/landlock/base_test.c
index 3b26bf3cf5b9..1bc16fde2e8a 100644
--- a/tools/testing/selftests/landlock/base_test.c
+++ b/tools/testing/selftests/landlock/base_test.c
@@ -76,7 +76,7 @@ TEST(abi_version)
const struct landlock_ruleset_attr ruleset_attr = {
.handled_access_fs = LANDLOCK_ACCESS_FS_READ_FILE,
};
- ASSERT_EQ(5, landlock_create_ruleset(NULL, 0,
+ ASSERT_EQ(6, landlock_create_ruleset(NULL, 0,
LANDLOCK_CREATE_RULESET_VERSION));
ASSERT_EQ(-1, landlock_create_ruleset(&ruleset_attr, 0,
diff --git a/tools/testing/selftests/landlock/common.h b/tools/testing/selftests/landlock/common.h
index 7e2b431b9f90..61056fa074bb 100644
--- a/tools/testing/selftests/landlock/common.h
+++ b/tools/testing/selftests/landlock/common.h
@@ -7,6 +7,7 @@
* Copyright © 2021 Microsoft Corporation
*/
+#include <arpa/inet.h>
#include <errno.h>
#include <linux/landlock.h>
#include <linux/securebits.h>
@@ -14,11 +15,14 @@
#include <sys/socket.h>
#include <sys/syscall.h>
#include <sys/types.h>
+#include <sys/un.h>
#include <sys/wait.h>
#include <unistd.h>
#include "../kselftest_harness.h"
+#define TMP_DIR "tmp"
+
#ifndef __maybe_unused
#define __maybe_unused __attribute__((__unused__))
#endif
@@ -226,3 +230,38 @@ enforce_ruleset(struct __test_metadata *const _metadata, const int ruleset_fd)
TH_LOG("Failed to enforce ruleset: %s", strerror(errno));
}
}
+
+struct protocol_variant {
+ int domain;
+ int type;
+};
+
+struct service_fixture {
+ struct protocol_variant protocol;
+ /* port is also stored in ipv4_addr.sin_port or ipv6_addr.sin6_port */
+ unsigned short port;
+ union {
+ struct sockaddr_in ipv4_addr;
+ struct sockaddr_in6 ipv6_addr;
+ struct {
+ struct sockaddr_un unix_addr;
+ socklen_t unix_addr_len;
+ };
+ };
+};
+
+static pid_t __maybe_unused sys_gettid(void)
+{
+ return syscall(__NR_gettid);
+}
+
+static void __maybe_unused set_unix_address(struct service_fixture *const srv,
+ const unsigned short index)
+{
+ srv->unix_addr.sun_family = AF_UNIX;
+ sprintf(srv->unix_addr.sun_path,
+ "_selftests-landlock-abstract-unix-tid%d-index%d", sys_gettid(),
+ index);
+ srv->unix_addr_len = SUN_LEN(&srv->unix_addr);
+ srv->unix_addr.sun_path[0] = '\0';
+}
diff --git a/tools/testing/selftests/landlock/fs_test.c b/tools/testing/selftests/landlock/fs_test.c
index 7d063c652be1..6788762188fe 100644
--- a/tools/testing/selftests/landlock/fs_test.c
+++ b/tools/testing/selftests/landlock/fs_test.c
@@ -59,7 +59,6 @@ int open_tree(int dfd, const char *filename, unsigned int flags)
#define RENAME_EXCHANGE (1 << 1)
#endif
-#define TMP_DIR "tmp"
#define BINARY_PATH "./true"
/* Paths (sibling number and depth) */
diff --git a/tools/testing/selftests/landlock/net_test.c b/tools/testing/selftests/landlock/net_test.c
index f21cfbbc3638..4e0aeb53b225 100644
--- a/tools/testing/selftests/landlock/net_test.c
+++ b/tools/testing/selftests/landlock/net_test.c
@@ -36,30 +36,6 @@ enum sandbox_type {
TCP_SANDBOX,
};
-struct protocol_variant {
- int domain;
- int type;
-};
-
-struct service_fixture {
- struct protocol_variant protocol;
- /* port is also stored in ipv4_addr.sin_port or ipv6_addr.sin6_port */
- unsigned short port;
- union {
- struct sockaddr_in ipv4_addr;
- struct sockaddr_in6 ipv6_addr;
- struct {
- struct sockaddr_un unix_addr;
- socklen_t unix_addr_len;
- };
- };
-};
-
-static pid_t sys_gettid(void)
-{
- return syscall(__NR_gettid);
-}
-
static int set_service(struct service_fixture *const srv,
const struct protocol_variant prot,
const unsigned short index)
@@ -92,12 +68,7 @@ static int set_service(struct service_fixture *const srv,
return 0;
case AF_UNIX:
- srv->unix_addr.sun_family = prot.domain;
- sprintf(srv->unix_addr.sun_path,
- "_selftests-landlock-net-tid%d-index%d", sys_gettid(),
- index);
- srv->unix_addr_len = SUN_LEN(&srv->unix_addr);
- srv->unix_addr.sun_path[0] = '\0';
+ set_unix_address(srv, index);
return 0;
}
return 1;
diff --git a/tools/testing/selftests/landlock/scoped_abstract_unix_test.c b/tools/testing/selftests/landlock/scoped_abstract_unix_test.c
new file mode 100644
index 000000000000..a6b59d2ab1b4
--- /dev/null
+++ b/tools/testing/selftests/landlock/scoped_abstract_unix_test.c
@@ -0,0 +1,1041 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Landlock tests - Abstract UNIX socket
+ *
+ * Copyright © 2024 Tahera Fahimi <fahimitahera@gmail.com>
+ */
+
+#define _GNU_SOURCE
+#include <errno.h>
+#include <fcntl.h>
+#include <linux/landlock.h>
+#include <sched.h>
+#include <signal.h>
+#include <stddef.h>
+#include <sys/prctl.h>
+#include <sys/socket.h>
+#include <sys/stat.h>
+#include <sys/types.h>
+#include <sys/un.h>
+#include <sys/wait.h>
+#include <unistd.h>
+
+#include "common.h"
+#include "scoped_common.h"
+
+/* Number of pending connections queue to be hold. */
+const short backlog = 10;
+
+static void create_fs_domain(struct __test_metadata *const _metadata)
+{
+ int ruleset_fd;
+ struct landlock_ruleset_attr ruleset_attr = {
+ .handled_access_fs = LANDLOCK_ACCESS_FS_READ_DIR,
+ };
+
+ ruleset_fd =
+ landlock_create_ruleset(&ruleset_attr, sizeof(ruleset_attr), 0);
+ EXPECT_LE(0, ruleset_fd)
+ {
+ TH_LOG("Failed to create a ruleset: %s", strerror(errno));
+ }
+ EXPECT_EQ(0, prctl(PR_SET_NO_NEW_PRIVS, 1, 0, 0, 0));
+ EXPECT_EQ(0, landlock_restrict_self(ruleset_fd, 0));
+ EXPECT_EQ(0, close(ruleset_fd));
+}
+
+FIXTURE(scoped_domains)
+{
+ struct service_fixture stream_address, dgram_address;
+};
+
+#include "scoped_base_variants.h"
+
+FIXTURE_SETUP(scoped_domains)
+{
+ drop_caps(_metadata);
+
+ memset(&self->stream_address, 0, sizeof(self->stream_address));
+ memset(&self->dgram_address, 0, sizeof(self->dgram_address));
+ set_unix_address(&self->stream_address, 0);
+ set_unix_address(&self->dgram_address, 1);
+}
+
+FIXTURE_TEARDOWN(scoped_domains)
+{
+}
+
+/*
+ * Test unix_stream_connect() and unix_may_send() for a child connecting to its
+ * parent, when they have scoped domain or no domain.
+ */
+TEST_F(scoped_domains, connect_to_parent)
+{
+ pid_t child;
+ bool can_connect_to_parent;
+ int status;
+ int pipe_parent[2];
+ int stream_server, dgram_server;
+
+ /*
+ * can_connect_to_parent is true if a child process can connect to its
+ * parent process. This depends on the child process not being isolated
+ * from the parent with a dedicated Landlock domain.
+ */
+ can_connect_to_parent = !variant->domain_child;
+
+ ASSERT_EQ(0, pipe2(pipe_parent, O_CLOEXEC));
+ if (variant->domain_both) {
+ create_scoped_domain(_metadata,
+ LANDLOCK_SCOPE_ABSTRACT_UNIX_SOCKET);
+ if (!__test_passed(_metadata))
+ return;
+ }
+
+ child = fork();
+ ASSERT_LE(0, child);
+ if (child == 0) {
+ int err;
+ int stream_client, dgram_client;
+ char buf_child;
+
+ EXPECT_EQ(0, close(pipe_parent[1]));
+ if (variant->domain_child)
+ create_scoped_domain(
+ _metadata, LANDLOCK_SCOPE_ABSTRACT_UNIX_SOCKET);
+
+ stream_client = socket(AF_UNIX, SOCK_STREAM, 0);
+ ASSERT_LE(0, stream_client);
+ dgram_client = socket(AF_UNIX, SOCK_DGRAM, 0);
+ ASSERT_LE(0, dgram_client);
+
+ /* Waits for the server. */
+ ASSERT_EQ(1, read(pipe_parent[0], &buf_child, 1));
+
+ err = connect(stream_client, &self->stream_address.unix_addr,
+ self->stream_address.unix_addr_len);
+ if (can_connect_to_parent) {
+ EXPECT_EQ(0, err);
+ } else {
+ EXPECT_EQ(-1, err);
+ EXPECT_EQ(EPERM, errno);
+ }
+ EXPECT_EQ(0, close(stream_client));
+
+ err = connect(dgram_client, &self->dgram_address.unix_addr,
+ self->dgram_address.unix_addr_len);
+ if (can_connect_to_parent) {
+ EXPECT_EQ(0, err);
+ } else {
+ EXPECT_EQ(-1, err);
+ EXPECT_EQ(EPERM, errno);
+ }
+ EXPECT_EQ(0, close(dgram_client));
+ _exit(_metadata->exit_code);
+ return;
+ }
+ EXPECT_EQ(0, close(pipe_parent[0]));
+ if (variant->domain_parent)
+ create_scoped_domain(_metadata,
+ LANDLOCK_SCOPE_ABSTRACT_UNIX_SOCKET);
+
+ stream_server = socket(AF_UNIX, SOCK_STREAM, 0);
+ ASSERT_LE(0, stream_server);
+ dgram_server = socket(AF_UNIX, SOCK_DGRAM, 0);
+ ASSERT_LE(0, dgram_server);
+ ASSERT_EQ(0, bind(stream_server, &self->stream_address.unix_addr,
+ self->stream_address.unix_addr_len));
+ ASSERT_EQ(0, bind(dgram_server, &self->dgram_address.unix_addr,
+ self->dgram_address.unix_addr_len));
+ ASSERT_EQ(0, listen(stream_server, backlog));
+
+ /* Signals to child that the parent is listening. */
+ ASSERT_EQ(1, write(pipe_parent[1], ".", 1));
+
+ ASSERT_EQ(child, waitpid(child, &status, 0));
+ EXPECT_EQ(0, close(stream_server));
+ EXPECT_EQ(0, close(dgram_server));
+
+ if (WIFSIGNALED(status) || !WIFEXITED(status) ||
+ WEXITSTATUS(status) != EXIT_SUCCESS)
+ _metadata->exit_code = KSFT_FAIL;
+}
+
+/*
+ * Test unix_stream_connect() and unix_may_send() for a parent connecting to
+ * its child, when they have scoped domain or no domain.
+ */
+TEST_F(scoped_domains, connect_to_child)
+{
+ pid_t child;
+ bool can_connect_to_child;
+ int err_stream, err_dgram, errno_stream, errno_dgram, status;
+ int pipe_child[2], pipe_parent[2];
+ char buf;
+ int stream_client, dgram_client;
+
+ /*
+ * can_connect_to_child is true if a parent process can connect to its
+ * child process. The parent process is not isolated from the child
+ * with a dedicated Landlock domain.
+ */
+ can_connect_to_child = !variant->domain_parent;
+
+ ASSERT_EQ(0, pipe2(pipe_child, O_CLOEXEC));
+ ASSERT_EQ(0, pipe2(pipe_parent, O_CLOEXEC));
+ if (variant->domain_both) {
+ create_scoped_domain(_metadata,
+ LANDLOCK_SCOPE_ABSTRACT_UNIX_SOCKET);
+ if (!__test_passed(_metadata))
+ return;
+ }
+
+ child = fork();
+ ASSERT_LE(0, child);
+ if (child == 0) {
+ int stream_server, dgram_server;
+
+ EXPECT_EQ(0, close(pipe_parent[1]));
+ EXPECT_EQ(0, close(pipe_child[0]));
+ if (variant->domain_child)
+ create_scoped_domain(
+ _metadata, LANDLOCK_SCOPE_ABSTRACT_UNIX_SOCKET);
+
+ /* Waits for the parent to be in a domain, if any. */
+ ASSERT_EQ(1, read(pipe_parent[0], &buf, 1));
+
+ stream_server = socket(AF_UNIX, SOCK_STREAM, 0);
+ ASSERT_LE(0, stream_server);
+ dgram_server = socket(AF_UNIX, SOCK_DGRAM, 0);
+ ASSERT_LE(0, dgram_server);
+ ASSERT_EQ(0,
+ bind(stream_server, &self->stream_address.unix_addr,
+ self->stream_address.unix_addr_len));
+ ASSERT_EQ(0, bind(dgram_server, &self->dgram_address.unix_addr,
+ self->dgram_address.unix_addr_len));
+ ASSERT_EQ(0, listen(stream_server, backlog));
+
+ /* Signals to the parent that child is listening. */
+ ASSERT_EQ(1, write(pipe_child[1], ".", 1));
+
+ /* Waits to connect. */
+ ASSERT_EQ(1, read(pipe_parent[0], &buf, 1));
+ EXPECT_EQ(0, close(stream_server));
+ EXPECT_EQ(0, close(dgram_server));
+ _exit(_metadata->exit_code);
+ return;
+ }
+ EXPECT_EQ(0, close(pipe_child[1]));
+ EXPECT_EQ(0, close(pipe_parent[0]));
+
+ if (variant->domain_parent)
+ create_scoped_domain(_metadata,
+ LANDLOCK_SCOPE_ABSTRACT_UNIX_SOCKET);
+
+ /* Signals that the parent is in a domain, if any. */
+ ASSERT_EQ(1, write(pipe_parent[1], ".", 1));
+
+ stream_client = socket(AF_UNIX, SOCK_STREAM, 0);
+ ASSERT_LE(0, stream_client);
+ dgram_client = socket(AF_UNIX, SOCK_DGRAM, 0);
+ ASSERT_LE(0, dgram_client);
+
+ /* Waits for the child to listen */
+ ASSERT_EQ(1, read(pipe_child[0], &buf, 1));
+ err_stream = connect(stream_client, &self->stream_address.unix_addr,
+ self->stream_address.unix_addr_len);
+ errno_stream = errno;
+ err_dgram = connect(dgram_client, &self->dgram_address.unix_addr,
+ self->dgram_address.unix_addr_len);
+ errno_dgram = errno;
+ if (can_connect_to_child) {
+ EXPECT_EQ(0, err_stream);
+ EXPECT_EQ(0, err_dgram);
+ } else {
+ EXPECT_EQ(-1, err_stream);
+ EXPECT_EQ(-1, err_dgram);
+ EXPECT_EQ(EPERM, errno_stream);
+ EXPECT_EQ(EPERM, errno_dgram);
+ }
+ ASSERT_EQ(1, write(pipe_parent[1], ".", 1));
+ EXPECT_EQ(0, close(stream_client));
+ EXPECT_EQ(0, close(dgram_client));
+
+ ASSERT_EQ(child, waitpid(child, &status, 0));
+ if (WIFSIGNALED(status) || !WIFEXITED(status) ||
+ WEXITSTATUS(status) != EXIT_SUCCESS)
+ _metadata->exit_code = KSFT_FAIL;
+}
+
+FIXTURE(scoped_vs_unscoped)
+{
+ struct service_fixture parent_stream_address, parent_dgram_address,
+ child_stream_address, child_dgram_address;
+};
+
+#include "scoped_multiple_domain_variants.h"
+
+FIXTURE_SETUP(scoped_vs_unscoped)
+{
+ drop_caps(_metadata);
+
+ memset(&self->parent_stream_address, 0,
+ sizeof(self->parent_stream_address));
+ set_unix_address(&self->parent_stream_address, 0);
+ memset(&self->parent_dgram_address, 0,
+ sizeof(self->parent_dgram_address));
+ set_unix_address(&self->parent_dgram_address, 1);
+ memset(&self->child_stream_address, 0,
+ sizeof(self->child_stream_address));
+ set_unix_address(&self->child_stream_address, 2);
+ memset(&self->child_dgram_address, 0,
+ sizeof(self->child_dgram_address));
+ set_unix_address(&self->child_dgram_address, 3);
+}
+
+FIXTURE_TEARDOWN(scoped_vs_unscoped)
+{
+}
+
+/*
+ * Test unix_stream_connect and unix_may_send for parent, child and
+ * grand child processes when they can have scoped or non-scoped domains.
+ */
+TEST_F(scoped_vs_unscoped, unix_scoping)
+{
+ pid_t child;
+ int status;
+ bool can_connect_to_parent, can_connect_to_child;
+ int pipe_parent[2];
+ int stream_server_parent, dgram_server_parent;
+
+ can_connect_to_child = (variant->domain_grand_child != SCOPE_SANDBOX);
+ can_connect_to_parent = (can_connect_to_child &&
+ (variant->domain_children != SCOPE_SANDBOX));
+
+ ASSERT_EQ(0, pipe2(pipe_parent, O_CLOEXEC));
+
+ if (variant->domain_all == OTHER_SANDBOX)
+ create_fs_domain(_metadata);
+ else if (variant->domain_all == SCOPE_SANDBOX)
+ create_scoped_domain(_metadata,
+ LANDLOCK_SCOPE_ABSTRACT_UNIX_SOCKET);
+
+ child = fork();
+ ASSERT_LE(0, child);
+ if (child == 0) {
+ int stream_server_child, dgram_server_child;
+ int pipe_child[2];
+ pid_t grand_child;
+
+ ASSERT_EQ(0, pipe2(pipe_child, O_CLOEXEC));
+
+ if (variant->domain_children == OTHER_SANDBOX)
+ create_fs_domain(_metadata);
+ else if (variant->domain_children == SCOPE_SANDBOX)
+ create_scoped_domain(
+ _metadata, LANDLOCK_SCOPE_ABSTRACT_UNIX_SOCKET);
+
+ grand_child = fork();
+ ASSERT_LE(0, grand_child);
+ if (grand_child == 0) {
+ char buf;
+ int stream_err, dgram_err, stream_errno, dgram_errno;
+ int stream_client, dgram_client;
+
+ EXPECT_EQ(0, close(pipe_parent[1]));
+ EXPECT_EQ(0, close(pipe_child[1]));
+
+ if (variant->domain_grand_child == OTHER_SANDBOX)
+ create_fs_domain(_metadata);
+ else if (variant->domain_grand_child == SCOPE_SANDBOX)
+ create_scoped_domain(
+ _metadata,
+ LANDLOCK_SCOPE_ABSTRACT_UNIX_SOCKET);
+
+ stream_client = socket(AF_UNIX, SOCK_STREAM, 0);
+ ASSERT_LE(0, stream_client);
+ dgram_client = socket(AF_UNIX, SOCK_DGRAM, 0);
+ ASSERT_LE(0, dgram_client);
+
+ ASSERT_EQ(1, read(pipe_child[0], &buf, 1));
+ stream_err = connect(
+ stream_client,
+ &self->child_stream_address.unix_addr,
+ self->child_stream_address.unix_addr_len);
+ stream_errno = errno;
+ dgram_err = connect(
+ dgram_client,
+ &self->child_dgram_address.unix_addr,
+ self->child_dgram_address.unix_addr_len);
+ dgram_errno = errno;
+ if (can_connect_to_child) {
+ EXPECT_EQ(0, stream_err);
+ EXPECT_EQ(0, dgram_err);
+ } else {
+ EXPECT_EQ(-1, stream_err);
+ EXPECT_EQ(-1, dgram_err);
+ EXPECT_EQ(EPERM, stream_errno);
+ EXPECT_EQ(EPERM, dgram_errno);
+ }
+
+ EXPECT_EQ(0, close(stream_client));
+ stream_client = socket(AF_UNIX, SOCK_STREAM, 0);
+ ASSERT_LE(0, stream_client);
+ /* Datagram sockets can "reconnect". */
+
+ ASSERT_EQ(1, read(pipe_parent[0], &buf, 1));
+ stream_err = connect(
+ stream_client,
+ &self->parent_stream_address.unix_addr,
+ self->parent_stream_address.unix_addr_len);
+ stream_errno = errno;
+ dgram_err = connect(
+ dgram_client,
+ &self->parent_dgram_address.unix_addr,
+ self->parent_dgram_address.unix_addr_len);
+ dgram_errno = errno;
+ if (can_connect_to_parent) {
+ EXPECT_EQ(0, stream_err);
+ EXPECT_EQ(0, dgram_err);
+ } else {
+ EXPECT_EQ(-1, stream_err);
+ EXPECT_EQ(-1, dgram_err);
+ EXPECT_EQ(EPERM, stream_errno);
+ EXPECT_EQ(EPERM, dgram_errno);
+ }
+ EXPECT_EQ(0, close(stream_client));
+ EXPECT_EQ(0, close(dgram_client));
+
+ _exit(_metadata->exit_code);
+ return;
+ }
+ EXPECT_EQ(0, close(pipe_child[0]));
+ if (variant->domain_child == OTHER_SANDBOX)
+ create_fs_domain(_metadata);
+ else if (variant->domain_child == SCOPE_SANDBOX)
+ create_scoped_domain(
+ _metadata, LANDLOCK_SCOPE_ABSTRACT_UNIX_SOCKET);
+
+ stream_server_child = socket(AF_UNIX, SOCK_STREAM, 0);
+ ASSERT_LE(0, stream_server_child);
+ dgram_server_child = socket(AF_UNIX, SOCK_DGRAM, 0);
+ ASSERT_LE(0, dgram_server_child);
+
+ ASSERT_EQ(0, bind(stream_server_child,
+ &self->child_stream_address.unix_addr,
+ self->child_stream_address.unix_addr_len));
+ ASSERT_EQ(0, bind(dgram_server_child,
+ &self->child_dgram_address.unix_addr,
+ self->child_dgram_address.unix_addr_len));
+ ASSERT_EQ(0, listen(stream_server_child, backlog));
+
+ ASSERT_EQ(1, write(pipe_child[1], ".", 1));
+ ASSERT_EQ(grand_child, waitpid(grand_child, &status, 0));
+ EXPECT_EQ(0, close(stream_server_child))
+ EXPECT_EQ(0, close(dgram_server_child));
+ return;
+ }
+ EXPECT_EQ(0, close(pipe_parent[0]));
+
+ if (variant->domain_parent == OTHER_SANDBOX)
+ create_fs_domain(_metadata);
+ else if (variant->domain_parent == SCOPE_SANDBOX)
+ create_scoped_domain(_metadata,
+ LANDLOCK_SCOPE_ABSTRACT_UNIX_SOCKET);
+
+ stream_server_parent = socket(AF_UNIX, SOCK_STREAM, 0);
+ ASSERT_LE(0, stream_server_parent);
+ dgram_server_parent = socket(AF_UNIX, SOCK_DGRAM, 0);
+ ASSERT_LE(0, dgram_server_parent);
+ ASSERT_EQ(0, bind(stream_server_parent,
+ &self->parent_stream_address.unix_addr,
+ self->parent_stream_address.unix_addr_len));
+ ASSERT_EQ(0, bind(dgram_server_parent,
+ &self->parent_dgram_address.unix_addr,
+ self->parent_dgram_address.unix_addr_len));
+
+ ASSERT_EQ(0, listen(stream_server_parent, backlog));
+
+ ASSERT_EQ(1, write(pipe_parent[1], ".", 1));
+ ASSERT_EQ(child, waitpid(child, &status, 0));
+ EXPECT_EQ(0, close(stream_server_parent));
+ EXPECT_EQ(0, close(dgram_server_parent));
+
+ if (WIFSIGNALED(status) || !WIFEXITED(status) ||
+ WEXITSTATUS(status) != EXIT_SUCCESS)
+ _metadata->exit_code = KSFT_FAIL;
+}
+
+FIXTURE(outside_socket)
+{
+ struct service_fixture address, transit_address;
+};
+
+FIXTURE_VARIANT(outside_socket)
+{
+ const bool child_socket;
+ const int type;
+};
+
+/* clang-format off */
+FIXTURE_VARIANT_ADD(outside_socket, allow_dgram_child) {
+ /* clang-format on */
+ .child_socket = true,
+ .type = SOCK_DGRAM,
+};
+
+/* clang-format off */
+FIXTURE_VARIANT_ADD(outside_socket, deny_dgram_server) {
+ /* clang-format on */
+ .child_socket = false,
+ .type = SOCK_DGRAM,
+};
+
+/* clang-format off */
+FIXTURE_VARIANT_ADD(outside_socket, allow_stream_child) {
+ /* clang-format on */
+ .child_socket = true,
+ .type = SOCK_STREAM,
+};
+
+/* clang-format off */
+FIXTURE_VARIANT_ADD(outside_socket, deny_stream_server) {
+ /* clang-format on */
+ .child_socket = false,
+ .type = SOCK_STREAM,
+};
+
+FIXTURE_SETUP(outside_socket)
+{
+ drop_caps(_metadata);
+
+ memset(&self->transit_address, 0, sizeof(self->transit_address));
+ set_unix_address(&self->transit_address, 0);
+ memset(&self->address, 0, sizeof(self->address));
+ set_unix_address(&self->address, 1);
+}
+
+FIXTURE_TEARDOWN(outside_socket)
+{
+}
+
+/*
+ * Test unix_stream_connect and unix_may_send for parent and child processes
+ * when connecting socket has different domain than the process using it.
+ */
+TEST_F(outside_socket, socket_with_different_domain)
+{
+ pid_t child;
+ int err, status;
+ int pipe_child[2], pipe_parent[2];
+ char buf_parent;
+ int server_socket;
+
+ ASSERT_EQ(0, pipe2(pipe_child, O_CLOEXEC));
+ ASSERT_EQ(0, pipe2(pipe_parent, O_CLOEXEC));
+
+ child = fork();
+ ASSERT_LE(0, child);
+ if (child == 0) {
+ int client_socket;
+ char buf_child;
+
+ EXPECT_EQ(0, close(pipe_parent[1]));
+ EXPECT_EQ(0, close(pipe_child[0]));
+
+ /* Client always has a domain. */
+ create_scoped_domain(_metadata,
+ LANDLOCK_SCOPE_ABSTRACT_UNIX_SOCKET);
+
+ if (variant->child_socket) {
+ int data_socket, passed_socket, stream_server;
+
+ passed_socket = socket(AF_UNIX, variant->type, 0);
+ ASSERT_LE(0, passed_socket);
+ stream_server = socket(AF_UNIX, SOCK_STREAM, 0);
+ ASSERT_LE(0, stream_server);
+ ASSERT_EQ(0, bind(stream_server,
+ &self->transit_address.unix_addr,
+ self->transit_address.unix_addr_len));
+ ASSERT_EQ(0, listen(stream_server, backlog));
+ ASSERT_EQ(1, write(pipe_child[1], ".", 1));
+ data_socket = accept(stream_server, NULL, NULL);
+ ASSERT_LE(0, data_socket);
+ ASSERT_EQ(0, send_fd(data_socket, passed_socket));
+ EXPECT_EQ(0, close(passed_socket));
+ EXPECT_EQ(0, close(stream_server));
+ }
+
+ client_socket = socket(AF_UNIX, variant->type, 0);
+ ASSERT_LE(0, client_socket);
+
+ /* Waits for parent signal for connection. */
+ ASSERT_EQ(1, read(pipe_parent[0], &buf_child, 1));
+ err = connect(client_socket, &self->address.unix_addr,
+ self->address.unix_addr_len);
+ if (variant->child_socket) {
+ EXPECT_EQ(0, err);
+ } else {
+ EXPECT_EQ(-1, err);
+ EXPECT_EQ(EPERM, errno);
+ }
+ EXPECT_EQ(0, close(client_socket));
+ _exit(_metadata->exit_code);
+ return;
+ }
+ EXPECT_EQ(0, close(pipe_child[1]));
+ EXPECT_EQ(0, close(pipe_parent[0]));
+
+ if (variant->child_socket) {
+ int client_child = socket(AF_UNIX, SOCK_STREAM, 0);
+
+ ASSERT_LE(0, client_child);
+ ASSERT_EQ(1, read(pipe_child[0], &buf_parent, 1));
+ ASSERT_EQ(0, connect(client_child,
+ &self->transit_address.unix_addr,
+ self->transit_address.unix_addr_len));
+ server_socket = recv_fd(client_child);
+ EXPECT_EQ(0, close(client_child));
+ } else {
+ server_socket = socket(AF_UNIX, variant->type, 0);
+ }
+ ASSERT_LE(0, server_socket);
+
+ /* Server always has a domain. */
+ create_scoped_domain(_metadata, LANDLOCK_SCOPE_ABSTRACT_UNIX_SOCKET);
+
+ ASSERT_EQ(0, bind(server_socket, &self->address.unix_addr,
+ self->address.unix_addr_len));
+ if (variant->type == SOCK_STREAM)
+ ASSERT_EQ(0, listen(server_socket, backlog));
+
+ /* Signals to child that the parent is listening. */
+ ASSERT_EQ(1, write(pipe_parent[1], ".", 1));
+
+ ASSERT_EQ(child, waitpid(child, &status, 0));
+ EXPECT_EQ(0, close(server_socket));
+
+ if (WIFSIGNALED(status) || !WIFEXITED(status) ||
+ WEXITSTATUS(status) != EXIT_SUCCESS)
+ _metadata->exit_code = KSFT_FAIL;
+}
+
+static const char stream_path[] = TMP_DIR "/stream.sock";
+static const char dgram_path[] = TMP_DIR "/dgram.sock";
+
+/* clang-format off */
+FIXTURE(various_address_sockets) {};
+/* clang-format on */
+
+FIXTURE_VARIANT(various_address_sockets)
+{
+ const int domain;
+};
+
+/* clang-format off */
+FIXTURE_VARIANT_ADD(various_address_sockets, pathname_socket_scoped_domain) {
+ /* clang-format on */
+ .domain = SCOPE_SANDBOX,
+};
+
+/* clang-format off */
+FIXTURE_VARIANT_ADD(various_address_sockets, pathname_socket_other_domain) {
+ /* clang-format on */
+ .domain = OTHER_SANDBOX,
+};
+
+/* clang-format off */
+FIXTURE_VARIANT_ADD(various_address_sockets, pathname_socket_no_domain) {
+ /* clang-format on */
+ .domain = NO_SANDBOX,
+};
+
+FIXTURE_SETUP(various_address_sockets)
+{
+ drop_caps(_metadata);
+
+ umask(0077);
+ ASSERT_EQ(0, mkdir(TMP_DIR, 0700));
+}
+
+FIXTURE_TEARDOWN(various_address_sockets)
+{
+ EXPECT_EQ(0, unlink(stream_path));
+ EXPECT_EQ(0, unlink(dgram_path));
+ EXPECT_EQ(0, rmdir(TMP_DIR));
+}
+
+TEST_F(various_address_sockets, scoped_pathname_sockets)
+{
+ socklen_t size_stream, size_dgram;
+ pid_t child;
+ int status;
+ char buf_child, buf_parent;
+ int pipe_parent[2];
+ int unnamed_sockets[2];
+ int stream_pathname_socket, dgram_pathname_socket,
+ stream_abstract_socket, dgram_abstract_socket, data_socket;
+ struct service_fixture stream_abstract_addr, dgram_abstract_addr;
+ struct sockaddr_un stream_pathname_addr = {
+ .sun_family = AF_UNIX,
+ };
+ struct sockaddr_un dgram_pathname_addr = {
+ .sun_family = AF_UNIX,
+ };
+
+ /* Pathname address. */
+ snprintf(stream_pathname_addr.sun_path,
+ sizeof(stream_pathname_addr.sun_path), "%s", stream_path);
+ size_stream = offsetof(struct sockaddr_un, sun_path) +
+ strlen(stream_pathname_addr.sun_path);
+ snprintf(dgram_pathname_addr.sun_path,
+ sizeof(dgram_pathname_addr.sun_path), "%s", dgram_path);
+ size_dgram = offsetof(struct sockaddr_un, sun_path) +
+ strlen(dgram_pathname_addr.sun_path);
+
+ /* Abstract address. */
+ memset(&stream_abstract_addr, 0, sizeof(stream_abstract_addr));
+ set_unix_address(&stream_abstract_addr, 0);
+ memset(&dgram_abstract_addr, 0, sizeof(dgram_abstract_addr));
+ set_unix_address(&dgram_abstract_addr, 1);
+
+ /* Unnamed address for datagram socket. */
+ ASSERT_EQ(0, socketpair(AF_UNIX, SOCK_DGRAM, 0, unnamed_sockets));
+
+ ASSERT_EQ(0, pipe2(pipe_parent, O_CLOEXEC));
+
+ child = fork();
+ ASSERT_LE(0, child);
+ if (child == 0) {
+ int err;
+
+ EXPECT_EQ(0, close(pipe_parent[1]));
+ EXPECT_EQ(0, close(unnamed_sockets[1]));
+
+ if (variant->domain == SCOPE_SANDBOX)
+ create_scoped_domain(
+ _metadata, LANDLOCK_SCOPE_ABSTRACT_UNIX_SOCKET);
+ else if (variant->domain == OTHER_SANDBOX)
+ create_fs_domain(_metadata);
+
+ /* Waits for parent to listen. */
+ ASSERT_EQ(1, read(pipe_parent[0], &buf_child, 1));
+ EXPECT_EQ(0, close(pipe_parent[0]));
+
+ /* Checks that we can send data through a datagram socket. */
+ ASSERT_EQ(1, write(unnamed_sockets[0], "a", 1));
+ EXPECT_EQ(0, close(unnamed_sockets[0]));
+
+ /* Connects with pathname sockets. */
+ stream_pathname_socket = socket(AF_UNIX, SOCK_STREAM, 0);
+ ASSERT_LE(0, stream_pathname_socket);
+ ASSERT_EQ(0, connect(stream_pathname_socket,
+ &stream_pathname_addr, size_stream));
+ ASSERT_EQ(1, write(stream_pathname_socket, "b", 1));
+ EXPECT_EQ(0, close(stream_pathname_socket));
+
+ /* Sends without connection. */
+ dgram_pathname_socket = socket(AF_UNIX, SOCK_DGRAM, 0);
+ ASSERT_LE(0, dgram_pathname_socket);
+ err = sendto(dgram_pathname_socket, "c", 1, 0,
+ &dgram_pathname_addr, size_dgram);
+ EXPECT_EQ(1, err);
+
+ /* Sends with connection. */
+ ASSERT_EQ(0, connect(dgram_pathname_socket,
+ &dgram_pathname_addr, size_dgram));
+ ASSERT_EQ(1, write(dgram_pathname_socket, "d", 1));
+ EXPECT_EQ(0, close(dgram_pathname_socket));
+
+ /* Connects with abstract sockets. */
+ stream_abstract_socket = socket(AF_UNIX, SOCK_STREAM, 0);
+ ASSERT_LE(0, stream_abstract_socket);
+ err = connect(stream_abstract_socket,
+ &stream_abstract_addr.unix_addr,
+ stream_abstract_addr.unix_addr_len);
+ if (variant->domain == SCOPE_SANDBOX) {
+ EXPECT_EQ(-1, err);
+ EXPECT_EQ(EPERM, errno);
+ } else {
+ EXPECT_EQ(0, err);
+ ASSERT_EQ(1, write(stream_abstract_socket, "e", 1));
+ }
+ EXPECT_EQ(0, close(stream_abstract_socket));
+
+ /* Sends without connection. */
+ dgram_abstract_socket = socket(AF_UNIX, SOCK_DGRAM, 0);
+ ASSERT_LE(0, dgram_abstract_socket);
+ err = sendto(dgram_abstract_socket, "f", 1, 0,
+ &dgram_abstract_addr.unix_addr,
+ dgram_abstract_addr.unix_addr_len);
+ if (variant->domain == SCOPE_SANDBOX) {
+ EXPECT_EQ(-1, err);
+ EXPECT_EQ(EPERM, errno);
+ } else {
+ EXPECT_EQ(1, err);
+ }
+
+ /* Sends with connection. */
+ err = connect(dgram_abstract_socket,
+ &dgram_abstract_addr.unix_addr,
+ dgram_abstract_addr.unix_addr_len);
+ if (variant->domain == SCOPE_SANDBOX) {
+ EXPECT_EQ(-1, err);
+ EXPECT_EQ(EPERM, errno);
+ } else {
+ EXPECT_EQ(0, err);
+ ASSERT_EQ(1, write(dgram_abstract_socket, "g", 1));
+ }
+ EXPECT_EQ(0, close(dgram_abstract_socket));
+
+ _exit(_metadata->exit_code);
+ return;
+ }
+ EXPECT_EQ(0, close(pipe_parent[0]));
+ EXPECT_EQ(0, close(unnamed_sockets[0]));
+
+ /* Sets up pathname servers. */
+ stream_pathname_socket = socket(AF_UNIX, SOCK_STREAM, 0);
+ ASSERT_LE(0, stream_pathname_socket);
+ ASSERT_EQ(0, bind(stream_pathname_socket, &stream_pathname_addr,
+ size_stream));
+ ASSERT_EQ(0, listen(stream_pathname_socket, backlog));
+
+ dgram_pathname_socket = socket(AF_UNIX, SOCK_DGRAM, 0);
+ ASSERT_LE(0, dgram_pathname_socket);
+ ASSERT_EQ(0, bind(dgram_pathname_socket, &dgram_pathname_addr,
+ size_dgram));
+
+ /* Sets up abstract servers. */
+ stream_abstract_socket = socket(AF_UNIX, SOCK_STREAM, 0);
+ ASSERT_LE(0, stream_abstract_socket);
+ ASSERT_EQ(0,
+ bind(stream_abstract_socket, &stream_abstract_addr.unix_addr,
+ stream_abstract_addr.unix_addr_len));
+
+ dgram_abstract_socket = socket(AF_UNIX, SOCK_DGRAM, 0);
+ ASSERT_LE(0, dgram_abstract_socket);
+ ASSERT_EQ(0, bind(dgram_abstract_socket, &dgram_abstract_addr.unix_addr,
+ dgram_abstract_addr.unix_addr_len));
+ ASSERT_EQ(0, listen(stream_abstract_socket, backlog));
+
+ ASSERT_EQ(1, write(pipe_parent[1], ".", 1));
+ EXPECT_EQ(0, close(pipe_parent[1]));
+
+ /* Reads from unnamed socket. */
+ ASSERT_EQ(1, read(unnamed_sockets[1], &buf_parent, sizeof(buf_parent)));
+ ASSERT_EQ('a', buf_parent);
+ EXPECT_LE(0, close(unnamed_sockets[1]));
+
+ /* Reads from pathname sockets. */
+ data_socket = accept(stream_pathname_socket, NULL, NULL);
+ ASSERT_LE(0, data_socket);
+ ASSERT_EQ(1, read(data_socket, &buf_parent, sizeof(buf_parent)));
+ ASSERT_EQ('b', buf_parent);
+ EXPECT_EQ(0, close(data_socket));
+ EXPECT_EQ(0, close(stream_pathname_socket));
+
+ ASSERT_EQ(1,
+ read(dgram_pathname_socket, &buf_parent, sizeof(buf_parent)));
+ ASSERT_EQ('c', buf_parent);
+ ASSERT_EQ(1,
+ read(dgram_pathname_socket, &buf_parent, sizeof(buf_parent)));
+ ASSERT_EQ('d', buf_parent);
+ EXPECT_EQ(0, close(dgram_pathname_socket));
+
+ if (variant->domain != SCOPE_SANDBOX) {
+ /* Reads from abstract sockets if allowed to send. */
+ data_socket = accept(stream_abstract_socket, NULL, NULL);
+ ASSERT_LE(0, data_socket);
+ ASSERT_EQ(1,
+ read(data_socket, &buf_parent, sizeof(buf_parent)));
+ ASSERT_EQ('e', buf_parent);
+ EXPECT_EQ(0, close(data_socket));
+
+ ASSERT_EQ(1, read(dgram_abstract_socket, &buf_parent,
+ sizeof(buf_parent)));
+ ASSERT_EQ('f', buf_parent);
+ ASSERT_EQ(1, read(dgram_abstract_socket, &buf_parent,
+ sizeof(buf_parent)));
+ ASSERT_EQ('g', buf_parent);
+ }
+
+ /* Waits for all abstract socket tests. */
+ ASSERT_EQ(child, waitpid(child, &status, 0));
+ EXPECT_EQ(0, close(stream_abstract_socket));
+ EXPECT_EQ(0, close(dgram_abstract_socket));
+
+ if (WIFSIGNALED(status) || !WIFEXITED(status) ||
+ WEXITSTATUS(status) != EXIT_SUCCESS)
+ _metadata->exit_code = KSFT_FAIL;
+}
+
+TEST(datagram_sockets)
+{
+ struct service_fixture connected_addr, non_connected_addr;
+ int server_conn_socket, server_unconn_socket;
+ int pipe_parent[2], pipe_child[2];
+ int status;
+ char buf;
+ pid_t child;
+
+ drop_caps(_metadata);
+ memset(&connected_addr, 0, sizeof(connected_addr));
+ set_unix_address(&connected_addr, 0);
+ memset(&non_connected_addr, 0, sizeof(non_connected_addr));
+ set_unix_address(&non_connected_addr, 1);
+
+ ASSERT_EQ(0, pipe2(pipe_parent, O_CLOEXEC));
+ ASSERT_EQ(0, pipe2(pipe_child, O_CLOEXEC));
+
+ child = fork();
+ ASSERT_LE(0, child);
+ if (child == 0) {
+ int client_conn_socket, client_unconn_socket;
+
+ EXPECT_EQ(0, close(pipe_parent[1]));
+ EXPECT_EQ(0, close(pipe_child[0]));
+
+ client_conn_socket = socket(AF_UNIX, SOCK_DGRAM, 0);
+ client_unconn_socket = socket(AF_UNIX, SOCK_DGRAM, 0);
+ ASSERT_LE(0, client_conn_socket);
+ ASSERT_LE(0, client_unconn_socket);
+
+ /* Waits for parent to listen. */
+ ASSERT_EQ(1, read(pipe_parent[0], &buf, 1));
+ ASSERT_EQ(0,
+ connect(client_conn_socket, &connected_addr.unix_addr,
+ connected_addr.unix_addr_len));
+
+ /*
+ * Both connected and non-connected sockets can send data when
+ * the domain is not scoped.
+ */
+ ASSERT_EQ(1, send(client_conn_socket, ".", 1, 0));
+ ASSERT_EQ(1, sendto(client_unconn_socket, ".", 1, 0,
+ &non_connected_addr.unix_addr,
+ non_connected_addr.unix_addr_len));
+ ASSERT_EQ(1, write(pipe_child[1], ".", 1));
+
+ /* Scopes the domain. */
+ create_scoped_domain(_metadata,
+ LANDLOCK_SCOPE_ABSTRACT_UNIX_SOCKET);
+
+ /*
+ * Connected socket sends data to the receiver, but the
+ * non-connected socket must fail to send data.
+ */
+ ASSERT_EQ(1, send(client_conn_socket, ".", 1, 0));
+ ASSERT_EQ(-1, sendto(client_unconn_socket, ".", 1, 0,
+ &non_connected_addr.unix_addr,
+ non_connected_addr.unix_addr_len));
+ ASSERT_EQ(EPERM, errno);
+ ASSERT_EQ(1, write(pipe_child[1], ".", 1));
+
+ EXPECT_EQ(0, close(client_conn_socket));
+ EXPECT_EQ(0, close(client_unconn_socket));
+ _exit(_metadata->exit_code);
+ return;
+ }
+ EXPECT_EQ(0, close(pipe_parent[0]));
+ EXPECT_EQ(0, close(pipe_child[1]));
+
+ server_conn_socket = socket(AF_UNIX, SOCK_DGRAM, 0);
+ server_unconn_socket = socket(AF_UNIX, SOCK_DGRAM, 0);
+ ASSERT_LE(0, server_conn_socket);
+ ASSERT_LE(0, server_unconn_socket);
+
+ ASSERT_EQ(0, bind(server_conn_socket, &connected_addr.unix_addr,
+ connected_addr.unix_addr_len));
+ ASSERT_EQ(0, bind(server_unconn_socket, &non_connected_addr.unix_addr,
+ non_connected_addr.unix_addr_len));
+ ASSERT_EQ(1, write(pipe_parent[1], ".", 1));
+
+ /* Waits for child to test. */
+ ASSERT_EQ(1, read(pipe_child[0], &buf, 1));
+ ASSERT_EQ(1, recv(server_conn_socket, &buf, 1, 0));
+ ASSERT_EQ(1, recv(server_unconn_socket, &buf, 1, 0));
+
+ /*
+ * Connected datagram socket will receive data, but
+ * non-connected datagram socket does not receive data.
+ */
+ ASSERT_EQ(1, read(pipe_child[0], &buf, 1));
+ ASSERT_EQ(1, recv(server_conn_socket, &buf, 1, 0));
+
+ /* Waits for all tests to finish. */
+ ASSERT_EQ(child, waitpid(child, &status, 0));
+ EXPECT_EQ(0, close(server_conn_socket));
+ EXPECT_EQ(0, close(server_unconn_socket));
+
+ if (WIFSIGNALED(status) || !WIFEXITED(status) ||
+ WEXITSTATUS(status) != EXIT_SUCCESS)
+ _metadata->exit_code = KSFT_FAIL;
+}
+
+TEST(self_connect)
+{
+ struct service_fixture connected_addr, non_connected_addr;
+ int connected_socket, non_connected_socket, status;
+ pid_t child;
+
+ drop_caps(_metadata);
+ memset(&connected_addr, 0, sizeof(connected_addr));
+ set_unix_address(&connected_addr, 0);
+ memset(&non_connected_addr, 0, sizeof(non_connected_addr));
+ set_unix_address(&non_connected_addr, 1);
+
+ connected_socket = socket(AF_UNIX, SOCK_DGRAM, 0);
+ non_connected_socket = socket(AF_UNIX, SOCK_DGRAM, 0);
+ ASSERT_LE(0, connected_socket);
+ ASSERT_LE(0, non_connected_socket);
+
+ ASSERT_EQ(0, bind(connected_socket, &connected_addr.unix_addr,
+ connected_addr.unix_addr_len));
+ ASSERT_EQ(0, bind(non_connected_socket, &non_connected_addr.unix_addr,
+ non_connected_addr.unix_addr_len));
+
+ child = fork();
+ ASSERT_LE(0, child);
+ if (child == 0) {
+ /* Child's domain is scoped. */
+ create_scoped_domain(_metadata,
+ LANDLOCK_SCOPE_ABSTRACT_UNIX_SOCKET);
+
+ /*
+ * The child inherits the sockets, and cannot connect or
+ * send data to them.
+ */
+ ASSERT_EQ(-1,
+ connect(connected_socket, &connected_addr.unix_addr,
+ connected_addr.unix_addr_len));
+ ASSERT_EQ(EPERM, errno);
+
+ ASSERT_EQ(-1, sendto(connected_socket, ".", 1, 0,
+ &connected_addr.unix_addr,
+ connected_addr.unix_addr_len));
+ ASSERT_EQ(EPERM, errno);
+
+ ASSERT_EQ(-1, sendto(non_connected_socket, ".", 1, 0,
+ &non_connected_addr.unix_addr,
+ non_connected_addr.unix_addr_len));
+ ASSERT_EQ(EPERM, errno);
+
+ EXPECT_EQ(0, close(connected_socket));
+ EXPECT_EQ(0, close(non_connected_socket));
+ _exit(_metadata->exit_code);
+ return;
+ }
+
+ /* Waits for all tests to finish. */
+ ASSERT_EQ(child, waitpid(child, &status, 0));
+ EXPECT_EQ(0, close(connected_socket));
+ EXPECT_EQ(0, close(non_connected_socket));
+
+ if (WIFSIGNALED(status) || !WIFEXITED(status) ||
+ WEXITSTATUS(status) != EXIT_SUCCESS)
+ _metadata->exit_code = KSFT_FAIL;
+}
+
+TEST_HARNESS_MAIN
diff --git a/tools/testing/selftests/landlock/scoped_base_variants.h b/tools/testing/selftests/landlock/scoped_base_variants.h
new file mode 100644
index 000000000000..d3b1fa8a584e
--- /dev/null
+++ b/tools/testing/selftests/landlock/scoped_base_variants.h
@@ -0,0 +1,156 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Landlock scoped_domains variants
+ *
+ * See the hierarchy variants from ptrace_test.c
+ *
+ * Copyright © 2017-2020 Mickaël Salaün <mic@digikod.net>
+ * Copyright © 2019-2020 ANSSI
+ * Copyright © 2024 Tahera Fahimi <fahimitahera@gmail.com>
+ */
+
+/* clang-format on */
+FIXTURE_VARIANT(scoped_domains)
+{
+ bool domain_both;
+ bool domain_parent;
+ bool domain_child;
+};
+
+/*
+ * No domain
+ *
+ * P1-. P1 -> P2 : allow
+ * \ P2 -> P1 : allow
+ * 'P2
+ */
+/* clang-format off */
+FIXTURE_VARIANT_ADD(scoped_domains, without_domain) {
+ /* clang-format on */
+ .domain_both = false,
+ .domain_parent = false,
+ .domain_child = false,
+};
+
+/*
+ * Child domain
+ *
+ * P1--. P1 -> P2 : allow
+ * \ P2 -> P1 : deny
+ * .'-----.
+ * | P2 |
+ * '------'
+ */
+/* clang-format off */
+FIXTURE_VARIANT_ADD(scoped_domains, child_domain) {
+ /* clang-format on */
+ .domain_both = false,
+ .domain_parent = false,
+ .domain_child = true,
+};
+
+/*
+ * Parent domain
+ * .------.
+ * | P1 --. P1 -> P2 : deny
+ * '------' \ P2 -> P1 : allow
+ * '
+ * P2
+ */
+/* clang-format off */
+FIXTURE_VARIANT_ADD(scoped_domains, parent_domain) {
+ /* clang-format on */
+ .domain_both = false,
+ .domain_parent = true,
+ .domain_child = false,
+};
+
+/*
+ * Parent + child domain (siblings)
+ * .------.
+ * | P1 ---. P1 -> P2 : deny
+ * '------' \ P2 -> P1 : deny
+ * .---'--.
+ * | P2 |
+ * '------'
+ */
+/* clang-format off */
+FIXTURE_VARIANT_ADD(scoped_domains, sibling_domain) {
+ /* clang-format on */
+ .domain_both = false,
+ .domain_parent = true,
+ .domain_child = true,
+};
+
+/*
+ * Same domain (inherited)
+ * .-------------.
+ * | P1----. | P1 -> P2 : allow
+ * | \ | P2 -> P1 : allow
+ * | ' |
+ * | P2 |
+ * '-------------'
+ */
+/* clang-format off */
+FIXTURE_VARIANT_ADD(scoped_domains, inherited_domain) {
+ /* clang-format on */
+ .domain_both = true,
+ .domain_parent = false,
+ .domain_child = false,
+};
+
+/*
+ * Inherited + child domain
+ * .-----------------.
+ * | P1----. | P1 -> P2 : allow
+ * | \ | P2 -> P1 : deny
+ * | .-'----. |
+ * | | P2 | |
+ * | '------' |
+ * '-----------------'
+ */
+/* clang-format off */
+FIXTURE_VARIANT_ADD(scoped_domains, nested_domain) {
+ /* clang-format on */
+ .domain_both = true,
+ .domain_parent = false,
+ .domain_child = true,
+};
+
+/*
+ * Inherited + parent domain
+ * .-----------------.
+ * |.------. | P1 -> P2 : deny
+ * || P1 ----. | P2 -> P1 : allow
+ * |'------' \ |
+ * | ' |
+ * | P2 |
+ * '-----------------'
+ */
+/* clang-format off */
+FIXTURE_VARIANT_ADD(scoped_domains, nested_and_parent_domain) {
+ /* clang-format on */
+ .domain_both = true,
+ .domain_parent = true,
+ .domain_child = false,
+};
+
+/*
+ * Inherited + parent and child domain (siblings)
+ * .-----------------.
+ * | .------. | P1 -> P2 : deny
+ * | | P1 . | P2 -> P1 : deny
+ * | '------'\ |
+ * | \ |
+ * | .--'---. |
+ * | | P2 | |
+ * | '------' |
+ * '-----------------'
+ */
+/* clang-format off */
+FIXTURE_VARIANT_ADD(scoped_domains, forked_domains) {
+ /* clang-format on */
+ .domain_both = true,
+ .domain_parent = true,
+ .domain_child = true,
+};
diff --git a/tools/testing/selftests/landlock/scoped_common.h b/tools/testing/selftests/landlock/scoped_common.h
new file mode 100644
index 000000000000..a9a912d30c4d
--- /dev/null
+++ b/tools/testing/selftests/landlock/scoped_common.h
@@ -0,0 +1,28 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Landlock scope test helpers
+ *
+ * Copyright © 2024 Tahera Fahimi <fahimitahera@gmail.com>
+ */
+
+#define _GNU_SOURCE
+
+#include <sys/types.h>
+
+static void create_scoped_domain(struct __test_metadata *const _metadata,
+ const __u16 scope)
+{
+ int ruleset_fd;
+ const struct landlock_ruleset_attr ruleset_attr = {
+ .scoped = scope,
+ };
+
+ ruleset_fd =
+ landlock_create_ruleset(&ruleset_attr, sizeof(ruleset_attr), 0);
+ ASSERT_LE(0, ruleset_fd)
+ {
+ TH_LOG("Failed to create a ruleset: %s", strerror(errno));
+ }
+ enforce_ruleset(_metadata, ruleset_fd);
+ EXPECT_EQ(0, close(ruleset_fd));
+}
diff --git a/tools/testing/selftests/landlock/scoped_multiple_domain_variants.h b/tools/testing/selftests/landlock/scoped_multiple_domain_variants.h
new file mode 100644
index 000000000000..bcd9a83805d0
--- /dev/null
+++ b/tools/testing/selftests/landlock/scoped_multiple_domain_variants.h
@@ -0,0 +1,152 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Landlock variants for three processes with various domains.
+ *
+ * Copyright © 2024 Tahera Fahimi <fahimitahera@gmail.com>
+ */
+
+enum sandbox_type {
+ NO_SANDBOX,
+ SCOPE_SANDBOX,
+ /* Any other type of sandboxing domain */
+ OTHER_SANDBOX,
+};
+
+/* clang-format on */
+FIXTURE_VARIANT(scoped_vs_unscoped)
+{
+ const int domain_all;
+ const int domain_parent;
+ const int domain_children;
+ const int domain_child;
+ const int domain_grand_child;
+};
+
+/*
+ * .-----------------.
+ * | ####### | P3 -> P2 : allow
+ * | P1----# P2 # | P3 -> P1 : deny
+ * | # | # |
+ * | # P3 # |
+ * | ####### |
+ * '-----------------'
+ */
+/* clang-format off */
+FIXTURE_VARIANT_ADD(scoped_vs_unscoped, deny_scoped) {
+ .domain_all = OTHER_SANDBOX,
+ .domain_parent = NO_SANDBOX,
+ .domain_children = SCOPE_SANDBOX,
+ .domain_child = NO_SANDBOX,
+ .domain_grand_child = NO_SANDBOX,
+ /* clang-format on */
+};
+
+/*
+ * ###################
+ * # ####### # P3 -> P2 : allow
+ * # P1----# P2 # # P3 -> P1 : deny
+ * # # | # #
+ * # # P3 # #
+ * # ####### #
+ * ###################
+ */
+/* clang-format off */
+FIXTURE_VARIANT_ADD(scoped_vs_unscoped, all_scoped) {
+ .domain_all = SCOPE_SANDBOX,
+ .domain_parent = NO_SANDBOX,
+ .domain_children = SCOPE_SANDBOX,
+ .domain_child = NO_SANDBOX,
+ .domain_grand_child = NO_SANDBOX,
+ /* clang-format on */
+};
+
+/*
+ * .-----------------.
+ * | .-----. | P3 -> P2 : allow
+ * | P1----| P2 | | P3 -> P1 : allow
+ * | | | |
+ * | | P3 | |
+ * | '-----' |
+ * '-----------------'
+ */
+/* clang-format off */
+FIXTURE_VARIANT_ADD(scoped_vs_unscoped, allow_with_other_domain) {
+ .domain_all = OTHER_SANDBOX,
+ .domain_parent = NO_SANDBOX,
+ .domain_children = OTHER_SANDBOX,
+ .domain_child = NO_SANDBOX,
+ .domain_grand_child = NO_SANDBOX,
+ /* clang-format on */
+};
+
+/*
+ * .----. ###### P3 -> P2 : allow
+ * | P1 |----# P2 # P3 -> P1 : allow
+ * '----' ######
+ * |
+ * P3
+ */
+/* clang-format off */
+FIXTURE_VARIANT_ADD(scoped_vs_unscoped, allow_with_one_domain) {
+ .domain_all = NO_SANDBOX,
+ .domain_parent = OTHER_SANDBOX,
+ .domain_children = NO_SANDBOX,
+ .domain_child = SCOPE_SANDBOX,
+ .domain_grand_child = NO_SANDBOX,
+ /* clang-format on */
+};
+
+/*
+ * ###### .-----. P3 -> P2 : allow
+ * # P1 #----| P2 | P3 -> P1 : allow
+ * ###### '-----'
+ * |
+ * P3
+ */
+/* clang-format off */
+FIXTURE_VARIANT_ADD(scoped_vs_unscoped, allow_with_grand_parent_scoped) {
+ .domain_all = NO_SANDBOX,
+ .domain_parent = SCOPE_SANDBOX,
+ .domain_children = NO_SANDBOX,
+ .domain_child = OTHER_SANDBOX,
+ .domain_grand_child = NO_SANDBOX,
+ /* clang-format on */
+};
+
+/*
+ * ###### ###### P3 -> P2 : allow
+ * # P1 #----# P2 # P3 -> P1 : allow
+ * ###### ######
+ * |
+ * .----.
+ * | P3 |
+ * '----'
+ */
+/* clang-format off */
+FIXTURE_VARIANT_ADD(scoped_vs_unscoped, allow_with_parents_domain) {
+ .domain_all = NO_SANDBOX,
+ .domain_parent = SCOPE_SANDBOX,
+ .domain_children = NO_SANDBOX,
+ .domain_child = SCOPE_SANDBOX,
+ .domain_grand_child = NO_SANDBOX,
+ /* clang-format on */
+};
+
+/*
+ * ###### P3 -> P2 : deny
+ * # P1 #----P2 P3 -> P1 : deny
+ * ###### |
+ * |
+ * ######
+ * # P3 #
+ * ######
+ */
+/* clang-format off */
+FIXTURE_VARIANT_ADD(scoped_vs_unscoped, deny_with_self_and_grandparent_domain) {
+ .domain_all = NO_SANDBOX,
+ .domain_parent = SCOPE_SANDBOX,
+ .domain_children = NO_SANDBOX,
+ .domain_child = NO_SANDBOX,
+ .domain_grand_child = SCOPE_SANDBOX,
+ /* clang-format on */
+};
diff --git a/tools/testing/selftests/landlock/scoped_signal_test.c b/tools/testing/selftests/landlock/scoped_signal_test.c
new file mode 100644
index 000000000000..475ee62a832d
--- /dev/null
+++ b/tools/testing/selftests/landlock/scoped_signal_test.c
@@ -0,0 +1,484 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Landlock tests - Signal Scoping
+ *
+ * Copyright © 2024 Tahera Fahimi <fahimitahera@gmail.com>
+ */
+
+#define _GNU_SOURCE
+#include <errno.h>
+#include <fcntl.h>
+#include <linux/landlock.h>
+#include <pthread.h>
+#include <signal.h>
+#include <sys/prctl.h>
+#include <sys/types.h>
+#include <sys/wait.h>
+#include <unistd.h>
+
+#include "common.h"
+#include "scoped_common.h"
+
+/* This variable is used for handling several signals. */
+static volatile sig_atomic_t is_signaled;
+
+/* clang-format off */
+FIXTURE(scoping_signals) {};
+/* clang-format on */
+
+FIXTURE_VARIANT(scoping_signals)
+{
+ int sig;
+};
+
+/* clang-format off */
+FIXTURE_VARIANT_ADD(scoping_signals, sigtrap) {
+ /* clang-format on */
+ .sig = SIGTRAP,
+};
+
+/* clang-format off */
+FIXTURE_VARIANT_ADD(scoping_signals, sigurg) {
+ /* clang-format on */
+ .sig = SIGURG,
+};
+
+/* clang-format off */
+FIXTURE_VARIANT_ADD(scoping_signals, sighup) {
+ /* clang-format on */
+ .sig = SIGHUP,
+};
+
+/* clang-format off */
+FIXTURE_VARIANT_ADD(scoping_signals, sigtstp) {
+ /* clang-format on */
+ .sig = SIGTSTP,
+};
+
+FIXTURE_SETUP(scoping_signals)
+{
+ drop_caps(_metadata);
+
+ is_signaled = 0;
+}
+
+FIXTURE_TEARDOWN(scoping_signals)
+{
+}
+
+static void scope_signal_handler(int sig, siginfo_t *info, void *ucontext)
+{
+ if (sig == SIGTRAP || sig == SIGURG || sig == SIGHUP || sig == SIGTSTP)
+ is_signaled = 1;
+}
+
+/*
+ * In this test, a child process sends a signal to parent before and
+ * after getting scoped.
+ */
+TEST_F(scoping_signals, send_sig_to_parent)
+{
+ int pipe_parent[2];
+ int status;
+ pid_t child;
+ pid_t parent = getpid();
+ struct sigaction action = {
+ .sa_sigaction = scope_signal_handler,
+ .sa_flags = SA_SIGINFO,
+
+ };
+
+ ASSERT_EQ(0, pipe2(pipe_parent, O_CLOEXEC));
+ ASSERT_LE(0, sigaction(variant->sig, &action, NULL));
+
+ /* The process should not have already been signaled. */
+ EXPECT_EQ(0, is_signaled);
+
+ child = fork();
+ ASSERT_LE(0, child);
+ if (child == 0) {
+ char buf_child;
+ int err;
+
+ EXPECT_EQ(0, close(pipe_parent[1]));
+
+ /*
+ * The child process can send signal to parent when
+ * domain is not scoped.
+ */
+ err = kill(parent, variant->sig);
+ ASSERT_EQ(0, err);
+ ASSERT_EQ(1, read(pipe_parent[0], &buf_child, 1));
+ EXPECT_EQ(0, close(pipe_parent[0]));
+
+ create_scoped_domain(_metadata, LANDLOCK_SCOPE_SIGNAL);
+
+ /*
+ * The child process cannot send signal to the parent
+ * anymore.
+ */
+ err = kill(parent, variant->sig);
+ ASSERT_EQ(-1, err);
+ ASSERT_EQ(EPERM, errno);
+
+ /*
+ * No matter of the domain, a process should be able to
+ * send a signal to itself.
+ */
+ ASSERT_EQ(0, is_signaled);
+ ASSERT_EQ(0, raise(variant->sig));
+ ASSERT_EQ(1, is_signaled);
+
+ _exit(_metadata->exit_code);
+ return;
+ }
+ EXPECT_EQ(0, close(pipe_parent[0]));
+
+ /* Waits for a first signal to be received, without race condition. */
+ while (!is_signaled && !usleep(1))
+ ;
+ ASSERT_EQ(1, is_signaled);
+ ASSERT_EQ(1, write(pipe_parent[1], ".", 1));
+ EXPECT_EQ(0, close(pipe_parent[1]));
+ is_signaled = 0;
+
+ ASSERT_EQ(child, waitpid(child, &status, 0));
+ if (WIFSIGNALED(status) || !WIFEXITED(status) ||
+ WEXITSTATUS(status) != EXIT_SUCCESS)
+ _metadata->exit_code = KSFT_FAIL;
+
+ EXPECT_EQ(0, is_signaled);
+}
+
+/* clang-format off */
+FIXTURE(scoped_domains) {};
+/* clang-format on */
+
+#include "scoped_base_variants.h"
+
+FIXTURE_SETUP(scoped_domains)
+{
+ drop_caps(_metadata);
+}
+
+FIXTURE_TEARDOWN(scoped_domains)
+{
+}
+
+/*
+ * This test ensures that a scoped process cannot send signal out of
+ * scoped domain.
+ */
+TEST_F(scoped_domains, check_access_signal)
+{
+ pid_t child;
+ pid_t parent = getpid();
+ int status;
+ bool can_signal_child, can_signal_parent;
+ int pipe_parent[2], pipe_child[2];
+ char buf_parent;
+ int err;
+
+ can_signal_parent = !variant->domain_child;
+ can_signal_child = !variant->domain_parent;
+
+ if (variant->domain_both)
+ create_scoped_domain(_metadata, LANDLOCK_SCOPE_SIGNAL);
+
+ ASSERT_EQ(0, pipe2(pipe_parent, O_CLOEXEC));
+ ASSERT_EQ(0, pipe2(pipe_child, O_CLOEXEC));
+
+ child = fork();
+ ASSERT_LE(0, child);
+ if (child == 0) {
+ char buf_child;
+
+ EXPECT_EQ(0, close(pipe_child[0]));
+ EXPECT_EQ(0, close(pipe_parent[1]));
+
+ if (variant->domain_child)
+ create_scoped_domain(_metadata, LANDLOCK_SCOPE_SIGNAL);
+
+ ASSERT_EQ(1, write(pipe_child[1], ".", 1));
+ EXPECT_EQ(0, close(pipe_child[1]));
+
+ /* Waits for the parent to send signals. */
+ ASSERT_EQ(1, read(pipe_parent[0], &buf_child, 1));
+ EXPECT_EQ(0, close(pipe_parent[0]));
+
+ err = kill(parent, 0);
+ if (can_signal_parent) {
+ ASSERT_EQ(0, err);
+ } else {
+ ASSERT_EQ(-1, err);
+ ASSERT_EQ(EPERM, errno);
+ }
+ /*
+ * No matter of the domain, a process should be able to
+ * send a signal to itself.
+ */
+ ASSERT_EQ(0, raise(0));
+
+ _exit(_metadata->exit_code);
+ return;
+ }
+ EXPECT_EQ(0, close(pipe_parent[0]));
+ EXPECT_EQ(0, close(pipe_child[1]));
+
+ if (variant->domain_parent)
+ create_scoped_domain(_metadata, LANDLOCK_SCOPE_SIGNAL);
+
+ ASSERT_EQ(1, read(pipe_child[0], &buf_parent, 1));
+ EXPECT_EQ(0, close(pipe_child[0]));
+
+ err = kill(child, 0);
+ if (can_signal_child) {
+ ASSERT_EQ(0, err);
+ } else {
+ ASSERT_EQ(-1, err);
+ ASSERT_EQ(EPERM, errno);
+ }
+ ASSERT_EQ(0, raise(0));
+
+ ASSERT_EQ(1, write(pipe_parent[1], ".", 1));
+ EXPECT_EQ(0, close(pipe_parent[1]));
+ ASSERT_EQ(child, waitpid(child, &status, 0));
+
+ if (WIFSIGNALED(status) || !WIFEXITED(status) ||
+ WEXITSTATUS(status) != EXIT_SUCCESS)
+ _metadata->exit_code = KSFT_FAIL;
+}
+
+static int thread_pipe[2];
+
+enum thread_return {
+ THREAD_INVALID = 0,
+ THREAD_SUCCESS = 1,
+ THREAD_ERROR = 2,
+};
+
+void *thread_func(void *arg)
+{
+ char buf;
+
+ if (read(thread_pipe[0], &buf, 1) != 1)
+ return (void *)THREAD_ERROR;
+
+ return (void *)THREAD_SUCCESS;
+}
+
+TEST(signal_scoping_threads)
+{
+ pthread_t no_sandbox_thread, scoped_thread;
+ enum thread_return ret = THREAD_INVALID;
+
+ drop_caps(_metadata);
+ ASSERT_EQ(0, pipe2(thread_pipe, O_CLOEXEC));
+
+ ASSERT_EQ(0,
+ pthread_create(&no_sandbox_thread, NULL, thread_func, NULL));
+
+ /* Restricts the domain after creating the first thread. */
+ create_scoped_domain(_metadata, LANDLOCK_SCOPE_SIGNAL);
+
+ ASSERT_EQ(EPERM, pthread_kill(no_sandbox_thread, 0));
+ ASSERT_EQ(1, write(thread_pipe[1], ".", 1));
+
+ ASSERT_EQ(0, pthread_create(&scoped_thread, NULL, thread_func, NULL));
+ ASSERT_EQ(0, pthread_kill(scoped_thread, 0));
+ ASSERT_EQ(1, write(thread_pipe[1], ".", 1));
+
+ EXPECT_EQ(0, pthread_join(no_sandbox_thread, (void **)&ret));
+ EXPECT_EQ(THREAD_SUCCESS, ret);
+ EXPECT_EQ(0, pthread_join(scoped_thread, (void **)&ret));
+ EXPECT_EQ(THREAD_SUCCESS, ret);
+
+ EXPECT_EQ(0, close(thread_pipe[0]));
+ EXPECT_EQ(0, close(thread_pipe[1]));
+}
+
+const short backlog = 10;
+
+static volatile sig_atomic_t signal_received;
+
+static void handle_sigurg(int sig)
+{
+ if (sig == SIGURG)
+ signal_received = 1;
+ else
+ signal_received = -1;
+}
+
+static int setup_signal_handler(int signal)
+{
+ struct sigaction sa = {
+ .sa_handler = handle_sigurg,
+ };
+
+ if (sigemptyset(&sa.sa_mask))
+ return -1;
+
+ sa.sa_flags = SA_SIGINFO | SA_RESTART;
+ return sigaction(SIGURG, &sa, NULL);
+}
+
+/* clang-format off */
+FIXTURE(fown) {};
+/* clang-format on */
+
+enum fown_sandbox {
+ SANDBOX_NONE,
+ SANDBOX_BEFORE_FORK,
+ SANDBOX_BEFORE_SETOWN,
+ SANDBOX_AFTER_SETOWN,
+};
+
+FIXTURE_VARIANT(fown)
+{
+ const enum fown_sandbox sandbox_setown;
+};
+
+/* clang-format off */
+FIXTURE_VARIANT_ADD(fown, no_sandbox) {
+ /* clang-format on */
+ .sandbox_setown = SANDBOX_NONE,
+};
+
+/* clang-format off */
+FIXTURE_VARIANT_ADD(fown, sandbox_before_fork) {
+ /* clang-format on */
+ .sandbox_setown = SANDBOX_BEFORE_FORK,
+};
+
+/* clang-format off */
+FIXTURE_VARIANT_ADD(fown, sandbox_before_setown) {
+ /* clang-format on */
+ .sandbox_setown = SANDBOX_BEFORE_SETOWN,
+};
+
+/* clang-format off */
+FIXTURE_VARIANT_ADD(fown, sandbox_after_setown) {
+ /* clang-format on */
+ .sandbox_setown = SANDBOX_AFTER_SETOWN,
+};
+
+FIXTURE_SETUP(fown)
+{
+ drop_caps(_metadata);
+}
+
+FIXTURE_TEARDOWN(fown)
+{
+}
+
+/*
+ * Sending an out of bound message will trigger the SIGURG signal
+ * through file_send_sigiotask.
+ */
+TEST_F(fown, sigurg_socket)
+{
+ int server_socket, recv_socket;
+ struct service_fixture server_address;
+ char buffer_parent;
+ int status;
+ int pipe_parent[2], pipe_child[2];
+ pid_t child;
+
+ memset(&server_address, 0, sizeof(server_address));
+ set_unix_address(&server_address, 0);
+
+ ASSERT_EQ(0, pipe2(pipe_parent, O_CLOEXEC));
+ ASSERT_EQ(0, pipe2(pipe_child, O_CLOEXEC));
+
+ if (variant->sandbox_setown == SANDBOX_BEFORE_FORK)
+ create_scoped_domain(_metadata, LANDLOCK_SCOPE_SIGNAL);
+
+ child = fork();
+ ASSERT_LE(0, child);
+ if (child == 0) {
+ int client_socket;
+ char buffer_child;
+
+ EXPECT_EQ(0, close(pipe_parent[1]));
+ EXPECT_EQ(0, close(pipe_child[0]));
+
+ ASSERT_EQ(0, setup_signal_handler(SIGURG));
+ client_socket = socket(AF_UNIX, SOCK_STREAM, 0);
+ ASSERT_LE(0, client_socket);
+
+ /* Waits for the parent to listen. */
+ ASSERT_EQ(1, read(pipe_parent[0], &buffer_child, 1));
+ ASSERT_EQ(0, connect(client_socket, &server_address.unix_addr,
+ server_address.unix_addr_len));
+
+ /*
+ * Waits for the parent to accept the connection, sandbox
+ * itself, and call fcntl(2).
+ */
+ ASSERT_EQ(1, read(pipe_parent[0], &buffer_child, 1));
+ /* May signal itself. */
+ ASSERT_EQ(1, send(client_socket, ".", 1, MSG_OOB));
+ EXPECT_EQ(0, close(client_socket));
+ ASSERT_EQ(1, write(pipe_child[1], ".", 1));
+ EXPECT_EQ(0, close(pipe_child[1]));
+
+ /* Waits for the message to be received. */
+ ASSERT_EQ(1, read(pipe_parent[0], &buffer_child, 1));
+ EXPECT_EQ(0, close(pipe_parent[0]));
+
+ if (variant->sandbox_setown == SANDBOX_BEFORE_SETOWN) {
+ ASSERT_EQ(0, signal_received);
+ } else {
+ /*
+ * A signal is only received if fcntl(F_SETOWN) was
+ * called before any sandboxing or if the signal
+ * receiver is in the same domain.
+ */
+ ASSERT_EQ(1, signal_received);
+ }
+ _exit(_metadata->exit_code);
+ return;
+ }
+ EXPECT_EQ(0, close(pipe_parent[0]));
+ EXPECT_EQ(0, close(pipe_child[1]));
+
+ server_socket = socket(AF_UNIX, SOCK_STREAM, 0);
+ ASSERT_LE(0, server_socket);
+ ASSERT_EQ(0, bind(server_socket, &server_address.unix_addr,
+ server_address.unix_addr_len));
+ ASSERT_EQ(0, listen(server_socket, backlog));
+ ASSERT_EQ(1, write(pipe_parent[1], ".", 1));
+
+ recv_socket = accept(server_socket, NULL, NULL);
+ ASSERT_LE(0, recv_socket);
+
+ if (variant->sandbox_setown == SANDBOX_BEFORE_SETOWN)
+ create_scoped_domain(_metadata, LANDLOCK_SCOPE_SIGNAL);
+
+ /*
+ * Sets the child to receive SIGURG for MSG_OOB. This uncommon use is
+ * a valid attack scenario which also simplifies this test.
+ */
+ ASSERT_EQ(0, fcntl(recv_socket, F_SETOWN, child));
+
+ if (variant->sandbox_setown == SANDBOX_AFTER_SETOWN)
+ create_scoped_domain(_metadata, LANDLOCK_SCOPE_SIGNAL);
+
+ ASSERT_EQ(1, write(pipe_parent[1], ".", 1));
+
+ /* Waits for the child to send MSG_OOB. */
+ ASSERT_EQ(1, read(pipe_child[0], &buffer_parent, 1));
+ EXPECT_EQ(0, close(pipe_child[0]));
+ ASSERT_EQ(1, recv(recv_socket, &buffer_parent, 1, MSG_OOB));
+ EXPECT_EQ(0, close(recv_socket));
+ EXPECT_EQ(0, close(server_socket));
+ ASSERT_EQ(1, write(pipe_parent[1], ".", 1));
+ EXPECT_EQ(0, close(pipe_parent[1]));
+
+ ASSERT_EQ(child, waitpid(child, &status, 0));
+ if (WIFSIGNALED(status) || !WIFEXITED(status) ||
+ WEXITSTATUS(status) != EXIT_SUCCESS)
+ _metadata->exit_code = KSFT_FAIL;
+}
+
+TEST_HARNESS_MAIN
diff --git a/tools/testing/selftests/landlock/scoped_test.c b/tools/testing/selftests/landlock/scoped_test.c
new file mode 100644
index 000000000000..b90f76ed0d9c
--- /dev/null
+++ b/tools/testing/selftests/landlock/scoped_test.c
@@ -0,0 +1,33 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Landlock tests - Common scope restriction
+ *
+ * Copyright © 2024 Tahera Fahimi <fahimitahera@gmail.com>
+ */
+
+#define _GNU_SOURCE
+#include <errno.h>
+#include <linux/landlock.h>
+#include <sys/prctl.h>
+
+#include "common.h"
+
+#define ACCESS_LAST LANDLOCK_SCOPE_SIGNAL
+
+TEST(ruleset_with_unknown_scope)
+{
+ __u64 scoped_mask;
+
+ for (scoped_mask = 1ULL << 63; scoped_mask != ACCESS_LAST;
+ scoped_mask >>= 1) {
+ struct landlock_ruleset_attr ruleset_attr = {
+ .scoped = scoped_mask,
+ };
+
+ ASSERT_EQ(-1, landlock_create_ruleset(&ruleset_attr,
+ sizeof(ruleset_attr), 0));
+ ASSERT_EQ(EINVAL, errno);
+ }
+}
+
+TEST_HARNESS_MAIN
diff --git a/tools/testing/selftests/mm/Makefile b/tools/testing/selftests/mm/Makefile
index e10b87376fde..02e1204971b0 100644
--- a/tools/testing/selftests/mm/Makefile
+++ b/tools/testing/selftests/mm/Makefile
@@ -113,7 +113,7 @@ endif
endif
-ifneq (,$(filter $(ARCH),arm64 ia64 mips64 parisc64 powerpc riscv64 s390x sparc64 x86_64 s390))
+ifneq (,$(filter $(ARCH),arm64 mips64 parisc64 powerpc riscv64 s390x sparc64 x86_64 s390))
TEST_GEN_FILES += va_high_addr_switch
TEST_GEN_FILES += virtual_address_range
TEST_GEN_FILES += write_to_hugetlbfs
diff --git a/tools/testing/selftests/mm/charge_reserved_hugetlb.sh b/tools/testing/selftests/mm/charge_reserved_hugetlb.sh
index d680c00d2853..67df7b47087f 100755
--- a/tools/testing/selftests/mm/charge_reserved_hugetlb.sh
+++ b/tools/testing/selftests/mm/charge_reserved_hugetlb.sh
@@ -254,7 +254,7 @@ function cleanup_hugetlb_memory() {
local cgroup="$1"
if [[ "$(pgrep -f write_to_hugetlbfs)" != "" ]]; then
echo killing write_to_hugetlbfs
- killall -2 write_to_hugetlbfs
+ killall -2 --wait write_to_hugetlbfs
wait_for_hugetlb_memory_to_get_depleted $cgroup
fi
set -e
diff --git a/tools/testing/selftests/mm/hugepage-mmap.c b/tools/testing/selftests/mm/hugepage-mmap.c
index 267eea2e0e0b..3b1b532f1cbb 100644
--- a/tools/testing/selftests/mm/hugepage-mmap.c
+++ b/tools/testing/selftests/mm/hugepage-mmap.c
@@ -8,13 +8,6 @@
* like /mnt) using the command mount -t hugetlbfs nodev /mnt. In this
* example, the app is requesting memory of size 256MB that is backed by
* huge pages.
- *
- * For the ia64 architecture, the Linux kernel reserves Region number 4 for
- * huge pages. That means that if one requires a fixed address, a huge page
- * aligned address starting with 0x800000... will be required. If a fixed
- * address is not required, the kernel will select an address in the proper
- * range.
- * Other architectures, such as ppc64, i386 or x86_64 are not so constrained.
*/
#define _GNU_SOURCE
#include <stdlib.h>
@@ -27,15 +20,6 @@
#define LENGTH (256UL*1024*1024)
#define PROTECTION (PROT_READ | PROT_WRITE)
-/* Only ia64 requires this */
-#ifdef __ia64__
-#define ADDR (void *)(0x8000000000000000UL)
-#define FLAGS (MAP_SHARED | MAP_FIXED)
-#else
-#define ADDR (void *)(0x0UL)
-#define FLAGS (MAP_SHARED)
-#endif
-
static void check_bytes(char *addr)
{
ksft_print_msg("First hex is %x\n", *((unsigned int *)addr));
@@ -74,7 +58,7 @@ int main(void)
if (fd < 0)
ksft_exit_fail_msg("memfd_create() failed: %s\n", strerror(errno));
- addr = mmap(ADDR, LENGTH, PROTECTION, FLAGS, fd, 0);
+ addr = mmap(NULL, LENGTH, PROTECTION, MAP_SHARED, fd, 0);
if (addr == MAP_FAILED) {
close(fd);
ksft_exit_fail_msg("mmap(): %s\n", strerror(errno));
diff --git a/tools/testing/selftests/mm/hugepage-shm.c b/tools/testing/selftests/mm/hugepage-shm.c
index 478bb1e989e9..ef06260802b5 100644
--- a/tools/testing/selftests/mm/hugepage-shm.c
+++ b/tools/testing/selftests/mm/hugepage-shm.c
@@ -8,13 +8,6 @@
* SHM_HUGETLB in the shmget system call to inform the kernel that it is
* requesting huge pages.
*
- * For the ia64 architecture, the Linux kernel reserves Region number 4 for
- * huge pages. That means that if one requires a fixed address, a huge page
- * aligned address starting with 0x800000... will be required. If a fixed
- * address is not required, the kernel will select an address in the proper
- * range.
- * Other architectures, such as ppc64, i386 or x86_64 are not so constrained.
- *
* Note: The default shared memory limit is quite low on many kernels,
* you may need to increase it via:
*
@@ -39,15 +32,6 @@
#define dprintf(x) printf(x)
-/* Only ia64 requires this */
-#ifdef __ia64__
-#define ADDR (void *)(0x8000000000000000UL)
-#define SHMAT_FLAGS (SHM_RND)
-#else
-#define ADDR (void *)(0x0UL)
-#define SHMAT_FLAGS (0)
-#endif
-
int main(void)
{
int shmid;
@@ -61,7 +45,7 @@ int main(void)
}
printf("shmid: 0x%x\n", shmid);
- shmaddr = shmat(shmid, ADDR, SHMAT_FLAGS);
+ shmaddr = shmat(shmid, NULL, 0);
if (shmaddr == (char *)-1) {
perror("Shared memory attach failure");
shmctl(shmid, IPC_RMID, NULL);
diff --git a/tools/testing/selftests/mm/hugepage-vmemmap.c b/tools/testing/selftests/mm/hugepage-vmemmap.c
index 894d28c3dd47..df366a4d1b92 100644
--- a/tools/testing/selftests/mm/hugepage-vmemmap.c
+++ b/tools/testing/selftests/mm/hugepage-vmemmap.c
@@ -22,20 +22,6 @@
#define PM_PFRAME_BITS 55
#define PM_PFRAME_MASK ~((1UL << PM_PFRAME_BITS) - 1)
-/*
- * For ia64 architecture, Linux kernel reserves Region number 4 for hugepages.
- * That means the addresses starting with 0x800000... will need to be
- * specified. Specifying a fixed address is not required on ppc64, i386
- * or x86_64.
- */
-#ifdef __ia64__
-#define MAP_ADDR (void *)(0x8000000000000000UL)
-#define MAP_FLAGS (MAP_PRIVATE | MAP_ANONYMOUS | MAP_HUGETLB | MAP_FIXED)
-#else
-#define MAP_ADDR NULL
-#define MAP_FLAGS (MAP_PRIVATE | MAP_ANONYMOUS | MAP_HUGETLB)
-#endif
-
static size_t pagesize;
static size_t maplength;
@@ -113,7 +99,8 @@ int main(int argc, char **argv)
exit(1);
}
- addr = mmap(MAP_ADDR, maplength, PROT_READ | PROT_WRITE, MAP_FLAGS, -1, 0);
+ addr = mmap(NULL, maplength, PROT_READ | PROT_WRITE,
+ MAP_PRIVATE | MAP_ANONYMOUS | MAP_HUGETLB, -1, 0);
if (addr == MAP_FAILED) {
perror("mmap");
exit(1);
diff --git a/tools/testing/selftests/mm/khugepaged.c b/tools/testing/selftests/mm/khugepaged.c
index 829320a519e7..56d4480e8d3c 100644
--- a/tools/testing/selftests/mm/khugepaged.c
+++ b/tools/testing/selftests/mm/khugepaged.c
@@ -1095,7 +1095,7 @@ static void usage(void)
fprintf(stderr, "\n\tSupported Options:\n");
fprintf(stderr, "\t\t-h: This help message.\n");
fprintf(stderr, "\t\t-s: mTHP size, expressed as page order.\n");
- fprintf(stderr, "\t\t Defaults to 0. Use this size for anon allocations.\n");
+ fprintf(stderr, "\t\t Defaults to 0. Use this size for anon or shmem allocations.\n");
exit(1);
}
@@ -1209,6 +1209,8 @@ int main(int argc, char **argv)
default_settings.khugepaged.pages_to_scan = hpage_pmd_nr * 8;
default_settings.hugepages[hpage_pmd_order].enabled = THP_INHERIT;
default_settings.hugepages[anon_order].enabled = THP_ALWAYS;
+ default_settings.shmem_hugepages[hpage_pmd_order].enabled = SHMEM_INHERIT;
+ default_settings.shmem_hugepages[anon_order].enabled = SHMEM_ALWAYS;
save_settings();
thp_push_settings(&default_settings);
diff --git a/tools/testing/selftests/mm/map_hugetlb.c b/tools/testing/selftests/mm/map_hugetlb.c
index a1f005a90a4f..b47399feab53 100644
--- a/tools/testing/selftests/mm/map_hugetlb.c
+++ b/tools/testing/selftests/mm/map_hugetlb.c
@@ -4,11 +4,6 @@
* system call with MAP_HUGETLB flag. Before running this program make
* sure the administrator has allocated enough default sized huge pages
* to cover the 256 MB allocation.
- *
- * For ia64 architecture, Linux kernel reserves Region number 4 for hugepages.
- * That means the addresses starting with 0x800000... will need to be
- * specified. Specifying a fixed address is not required on ppc64, i386
- * or x86_64.
*/
#include <stdlib.h>
#include <stdio.h>
@@ -21,15 +16,6 @@
#define LENGTH (256UL*1024*1024)
#define PROTECTION (PROT_READ | PROT_WRITE)
-/* Only ia64 requires this */
-#ifdef __ia64__
-#define ADDR (void *)(0x8000000000000000UL)
-#define FLAGS (MAP_PRIVATE | MAP_ANONYMOUS | MAP_HUGETLB | MAP_FIXED)
-#else
-#define ADDR (void *)(0x0UL)
-#define FLAGS (MAP_PRIVATE | MAP_ANONYMOUS | MAP_HUGETLB)
-#endif
-
static void check_bytes(char *addr)
{
ksft_print_msg("First hex is %x\n", *((unsigned int *)addr));
@@ -60,7 +46,7 @@ int main(int argc, char **argv)
void *addr;
size_t hugepage_size;
size_t length = LENGTH;
- int flags = FLAGS;
+ int flags = MAP_PRIVATE | MAP_ANONYMOUS | MAP_HUGETLB;
int shift = 0;
hugepage_size = default_huge_page_size();
@@ -85,7 +71,7 @@ int main(int argc, char **argv)
ksft_print_msg("Default size hugepages\n");
ksft_print_msg("Mapping %lu Mbytes\n", (unsigned long)length >> 20);
- addr = mmap(ADDR, length, PROTECTION, flags, -1, 0);
+ addr = mmap(NULL, length, PROTECTION, flags, -1, 0);
if (addr == MAP_FAILED)
ksft_exit_fail_msg("mmap: %s\n", strerror(errno));
diff --git a/tools/testing/selftests/mm/migration.c b/tools/testing/selftests/mm/migration.c
index 6908569ef406..64bcbb7151cf 100644
--- a/tools/testing/selftests/mm/migration.c
+++ b/tools/testing/selftests/mm/migration.c
@@ -15,10 +15,10 @@
#include <signal.h>
#include <time.h>
-#define TWOMEG (2<<20)
-#define RUNTIME (20)
-
-#define ALIGN(x, a) (((x) + (a - 1)) & (~((a) - 1)))
+#define TWOMEG (2<<20)
+#define RUNTIME (20)
+#define MAX_RETRIES 100
+#define ALIGN(x, a) (((x) + (a - 1)) & (~((a) - 1)))
FIXTURE(migration)
{
@@ -65,6 +65,7 @@ int migrate(uint64_t *ptr, int n1, int n2)
int ret, tmp;
int status = 0;
struct timespec ts1, ts2;
+ int failures = 0;
if (clock_gettime(CLOCK_MONOTONIC, &ts1))
return -1;
@@ -79,13 +80,17 @@ int migrate(uint64_t *ptr, int n1, int n2)
ret = move_pages(0, 1, (void **) &ptr, &n2, &status,
MPOL_MF_MOVE_ALL);
if (ret) {
- if (ret > 0)
+ if (ret > 0) {
+ /* Migration is best effort; try again */
+ if (++failures < MAX_RETRIES)
+ continue;
printf("Didn't migrate %d pages\n", ret);
+ }
else
perror("Couldn't migrate pages");
return -2;
}
-
+ failures = 0;
tmp = n2;
n2 = n1;
n1 = tmp;
diff --git a/tools/testing/selftests/mm/mseal_test.c b/tools/testing/selftests/mm/mseal_test.c
index bfcea5cf9a48..01675c412b2a 100644
--- a/tools/testing/selftests/mm/mseal_test.c
+++ b/tools/testing/selftests/mm/mseal_test.c
@@ -99,6 +99,16 @@ static int sys_madvise(void *start, size_t len, int types)
return sret;
}
+static void *sys_mremap(void *addr, size_t old_len, size_t new_len,
+ unsigned long flags, void *new_addr)
+{
+ void *sret;
+
+ errno = 0;
+ sret = (void *) syscall(__NR_mremap, addr, old_len, new_len, flags, new_addr);
+ return sret;
+}
+
static int sys_pkey_alloc(unsigned long flags, unsigned long init_val)
{
int ret = syscall(__NR_pkey_alloc, flags, init_val);
@@ -756,6 +766,42 @@ static void test_seal_mprotect_partial_mprotect(bool seal)
REPORT_TEST_PASS();
}
+static void test_seal_mprotect_partial_mprotect_tail(bool seal)
+{
+ void *ptr;
+ unsigned long page_size = getpagesize();
+ unsigned long size = 2 * page_size;
+ int ret;
+ int prot;
+
+ /*
+ * Check if a partial mseal (that results in two vmas) works correctly.
+ * It might mprotect the first, but it'll never touch the second (msealed) vma.
+ */
+
+ setup_single_address(size, &ptr);
+ FAIL_TEST_IF_FALSE(ptr != (void *)-1);
+
+ if (seal) {
+ ret = sys_mseal(ptr + page_size, page_size);
+ FAIL_TEST_IF_FALSE(!ret);
+ }
+
+ ret = sys_mprotect(ptr, size, PROT_EXEC);
+ if (seal)
+ FAIL_TEST_IF_FALSE(ret < 0);
+ else
+ FAIL_TEST_IF_FALSE(!ret);
+
+ if (seal) {
+ FAIL_TEST_IF_FALSE(get_vma_size(ptr + page_size, &prot) > 0);
+ FAIL_TEST_IF_FALSE(prot == 0x4);
+ }
+
+ REPORT_TEST_PASS();
+}
+
+
static void test_seal_mprotect_two_vma_with_gap(bool seal)
{
void *ptr;
@@ -973,6 +1019,36 @@ static void test_seal_munmap_vma_with_gap(bool seal)
REPORT_TEST_PASS();
}
+static void test_seal_munmap_partial_across_vmas(bool seal)
+{
+ void *ptr;
+ unsigned long page_size = getpagesize();
+ unsigned long size = 2 * page_size;
+ int ret;
+ int prot;
+
+ setup_single_address(size, &ptr);
+ FAIL_TEST_IF_FALSE(ptr != (void *)-1);
+
+ if (seal) {
+ ret = sys_mseal(ptr + page_size, page_size);
+ FAIL_TEST_IF_FALSE(!ret);
+ }
+
+ ret = sys_munmap(ptr, size);
+ if (seal)
+ FAIL_TEST_IF_FALSE(ret < 0);
+ else
+ FAIL_TEST_IF_FALSE(!ret);
+
+ if (seal) {
+ FAIL_TEST_IF_FALSE(get_vma_size(ptr + page_size, &prot) > 0);
+ FAIL_TEST_IF_FALSE(prot == 0x4);
+ }
+
+ REPORT_TEST_PASS();
+}
+
static void test_munmap_start_freed(bool seal)
{
void *ptr;
@@ -1104,12 +1180,12 @@ static void test_seal_mremap_shrink(bool seal)
}
/* shrink from 4 pages to 2 pages. */
- ret2 = mremap(ptr, size, 2 * page_size, 0, 0);
+ ret2 = sys_mremap(ptr, size, 2 * page_size, 0, 0);
if (seal) {
- FAIL_TEST_IF_FALSE(ret2 == MAP_FAILED);
+ FAIL_TEST_IF_FALSE(ret2 == (void *) MAP_FAILED);
FAIL_TEST_IF_FALSE(errno == EPERM);
} else {
- FAIL_TEST_IF_FALSE(ret2 != MAP_FAILED);
+ FAIL_TEST_IF_FALSE(ret2 != (void *) MAP_FAILED);
}
@@ -1136,7 +1212,7 @@ static void test_seal_mremap_expand(bool seal)
}
/* expand from 2 page to 4 pages. */
- ret2 = mremap(ptr, 2 * page_size, 4 * page_size, 0, 0);
+ ret2 = sys_mremap(ptr, 2 * page_size, 4 * page_size, 0, 0);
if (seal) {
FAIL_TEST_IF_FALSE(ret2 == MAP_FAILED);
FAIL_TEST_IF_FALSE(errno == EPERM);
@@ -1169,7 +1245,7 @@ static void test_seal_mremap_move(bool seal)
}
/* move from ptr to fixed address. */
- ret2 = mremap(ptr, size, size, MREMAP_MAYMOVE | MREMAP_FIXED, newPtr);
+ ret2 = sys_mremap(ptr, size, size, MREMAP_MAYMOVE | MREMAP_FIXED, newPtr);
if (seal) {
FAIL_TEST_IF_FALSE(ret2 == MAP_FAILED);
FAIL_TEST_IF_FALSE(errno == EPERM);
@@ -1288,7 +1364,7 @@ static void test_seal_mremap_shrink_fixed(bool seal)
}
/* mremap to move and shrink to fixed address */
- ret2 = mremap(ptr, size, 2 * page_size, MREMAP_MAYMOVE | MREMAP_FIXED,
+ ret2 = sys_mremap(ptr, size, 2 * page_size, MREMAP_MAYMOVE | MREMAP_FIXED,
newAddr);
if (seal) {
FAIL_TEST_IF_FALSE(ret2 == MAP_FAILED);
@@ -1319,7 +1395,7 @@ static void test_seal_mremap_expand_fixed(bool seal)
}
/* mremap to move and expand to fixed address */
- ret2 = mremap(ptr, page_size, size, MREMAP_MAYMOVE | MREMAP_FIXED,
+ ret2 = sys_mremap(ptr, page_size, size, MREMAP_MAYMOVE | MREMAP_FIXED,
newAddr);
if (seal) {
FAIL_TEST_IF_FALSE(ret2 == MAP_FAILED);
@@ -1350,7 +1426,7 @@ static void test_seal_mremap_move_fixed(bool seal)
}
/* mremap to move to fixed address */
- ret2 = mremap(ptr, size, size, MREMAP_MAYMOVE | MREMAP_FIXED, newAddr);
+ ret2 = sys_mremap(ptr, size, size, MREMAP_MAYMOVE | MREMAP_FIXED, newAddr);
if (seal) {
FAIL_TEST_IF_FALSE(ret2 == MAP_FAILED);
FAIL_TEST_IF_FALSE(errno == EPERM);
@@ -1379,14 +1455,13 @@ static void test_seal_mremap_move_fixed_zero(bool seal)
/*
* MREMAP_FIXED can move the mapping to zero address
*/
- ret2 = mremap(ptr, size, 2 * page_size, MREMAP_MAYMOVE | MREMAP_FIXED,
+ ret2 = sys_mremap(ptr, size, 2 * page_size, MREMAP_MAYMOVE | MREMAP_FIXED,
0);
if (seal) {
FAIL_TEST_IF_FALSE(ret2 == MAP_FAILED);
FAIL_TEST_IF_FALSE(errno == EPERM);
} else {
FAIL_TEST_IF_FALSE(ret2 == 0);
-
}
REPORT_TEST_PASS();
@@ -1409,13 +1484,13 @@ static void test_seal_mremap_move_dontunmap(bool seal)
}
/* mremap to move, and don't unmap src addr. */
- ret2 = mremap(ptr, size, size, MREMAP_MAYMOVE | MREMAP_DONTUNMAP, 0);
+ ret2 = sys_mremap(ptr, size, size, MREMAP_MAYMOVE | MREMAP_DONTUNMAP, 0);
if (seal) {
FAIL_TEST_IF_FALSE(ret2 == MAP_FAILED);
FAIL_TEST_IF_FALSE(errno == EPERM);
} else {
+ /* kernel will allocate a new address */
FAIL_TEST_IF_FALSE(ret2 != MAP_FAILED);
-
}
REPORT_TEST_PASS();
@@ -1423,7 +1498,7 @@ static void test_seal_mremap_move_dontunmap(bool seal)
static void test_seal_mremap_move_dontunmap_anyaddr(bool seal)
{
- void *ptr;
+ void *ptr, *ptr2;
unsigned long page_size = getpagesize();
unsigned long size = 4 * page_size;
int ret;
@@ -1438,24 +1513,30 @@ static void test_seal_mremap_move_dontunmap_anyaddr(bool seal)
}
/*
- * The 0xdeaddead should not have effect on dest addr
- * when MREMAP_DONTUNMAP is set.
+ * The new address is any address that not allocated.
+ * use allocate/free to similate that.
*/
- ret2 = mremap(ptr, size, size, MREMAP_MAYMOVE | MREMAP_DONTUNMAP,
- 0xdeaddead);
+ setup_single_address(size, &ptr2);
+ FAIL_TEST_IF_FALSE(ptr2 != (void *)-1);
+ ret = sys_munmap(ptr2, size);
+ FAIL_TEST_IF_FALSE(!ret);
+
+ /*
+ * remap to any address.
+ */
+ ret2 = sys_mremap(ptr, size, size, MREMAP_MAYMOVE | MREMAP_DONTUNMAP,
+ (void *) ptr2);
if (seal) {
FAIL_TEST_IF_FALSE(ret2 == MAP_FAILED);
FAIL_TEST_IF_FALSE(errno == EPERM);
} else {
- FAIL_TEST_IF_FALSE(ret2 != MAP_FAILED);
- FAIL_TEST_IF_FALSE((long)ret2 != 0xdeaddead);
-
+ /* remap success and return ptr2 */
+ FAIL_TEST_IF_FALSE(ret2 == ptr2);
}
REPORT_TEST_PASS();
}
-
static void test_seal_merge_and_split(void)
{
void *ptr;
@@ -1720,6 +1801,69 @@ static void test_seal_discard_ro_anon(bool seal)
REPORT_TEST_PASS();
}
+static void test_seal_discard_across_vmas(bool seal)
+{
+ void *ptr;
+ unsigned long page_size = getpagesize();
+ unsigned long size = 2 * page_size;
+ int ret;
+
+ setup_single_address(size, &ptr);
+ FAIL_TEST_IF_FALSE(ptr != (void *)-1);
+
+ if (seal) {
+ ret = seal_single_address(ptr + page_size, page_size);
+ FAIL_TEST_IF_FALSE(!ret);
+ }
+
+ ret = sys_madvise(ptr, size, MADV_DONTNEED);
+ if (seal)
+ FAIL_TEST_IF_FALSE(ret < 0);
+ else
+ FAIL_TEST_IF_FALSE(!ret);
+
+ ret = sys_munmap(ptr, size);
+ if (seal)
+ FAIL_TEST_IF_FALSE(ret < 0);
+ else
+ FAIL_TEST_IF_FALSE(!ret);
+
+ REPORT_TEST_PASS();
+}
+
+
+static void test_seal_madvise_nodiscard(bool seal)
+{
+ void *ptr;
+ unsigned long page_size = getpagesize();
+ unsigned long size = 4 * page_size;
+ int ret;
+
+ setup_single_address(size, &ptr);
+ FAIL_TEST_IF_FALSE(ptr != (void *)-1);
+
+ if (seal) {
+ ret = seal_single_address(ptr, size);
+ FAIL_TEST_IF_FALSE(!ret);
+ }
+
+ /*
+ * Test a random madvise flag like MADV_RANDOM that does not touch page
+ * contents (and thus should work for msealed VMAs). RANDOM also happens to
+ * share bits with other discard-ish flags like REMOVE.
+ */
+ ret = sys_madvise(ptr, size, MADV_RANDOM);
+ FAIL_TEST_IF_FALSE(!ret);
+
+ ret = sys_munmap(ptr, size);
+ if (seal)
+ FAIL_TEST_IF_FALSE(ret < 0);
+ else
+ FAIL_TEST_IF_FALSE(!ret);
+
+ REPORT_TEST_PASS();
+}
+
int main(int argc, char **argv)
{
bool test_seal = seal_support();
@@ -1732,7 +1876,7 @@ int main(int argc, char **argv)
if (!pkey_supported())
ksft_print_msg("PKEY not supported\n");
- ksft_set_plan(80);
+ ksft_set_plan(88);
test_seal_addseal();
test_seal_unmapped_start();
@@ -1778,12 +1922,17 @@ int main(int argc, char **argv)
test_seal_mprotect_split(false);
test_seal_mprotect_split(true);
+ test_seal_mprotect_partial_mprotect_tail(false);
+ test_seal_mprotect_partial_mprotect_tail(true);
+
test_seal_munmap(false);
test_seal_munmap(true);
test_seal_munmap_two_vma(false);
test_seal_munmap_two_vma(true);
test_seal_munmap_vma_with_gap(false);
test_seal_munmap_vma_with_gap(true);
+ test_seal_munmap_partial_across_vmas(false);
+ test_seal_munmap_partial_across_vmas(true);
test_munmap_start_freed(false);
test_munmap_start_freed(true);
@@ -1811,8 +1960,12 @@ int main(int argc, char **argv)
test_seal_mremap_move_fixed_zero(true);
test_seal_mremap_move_dontunmap_anyaddr(false);
test_seal_mremap_move_dontunmap_anyaddr(true);
+ test_seal_madvise_nodiscard(false);
+ test_seal_madvise_nodiscard(true);
test_seal_discard_ro_anon(false);
test_seal_discard_ro_anon(true);
+ test_seal_discard_across_vmas(false);
+ test_seal_discard_across_vmas(true);
test_seal_discard_ro_anon_on_rw(false);
test_seal_discard_ro_anon_on_rw(true);
test_seal_discard_ro_anon_on_shared(false);
diff --git a/tools/testing/selftests/mm/pagemap_ioctl.c b/tools/testing/selftests/mm/pagemap_ioctl.c
index fc90af2a97b8..bcc73b4e805c 100644
--- a/tools/testing/selftests/mm/pagemap_ioctl.c
+++ b/tools/testing/selftests/mm/pagemap_ioctl.c
@@ -15,7 +15,7 @@
#include <sys/ioctl.h>
#include <sys/stat.h>
#include <math.h>
-#include <asm-generic/unistd.h>
+#include <asm/unistd.h>
#include <pthread.h>
#include <sys/resource.h>
#include <assert.h>
diff --git a/tools/testing/selftests/mm/run_vmtests.sh b/tools/testing/selftests/mm/run_vmtests.sh
index 36045edb10de..c5797ad1d37b 100755
--- a/tools/testing/selftests/mm/run_vmtests.sh
+++ b/tools/testing/selftests/mm/run_vmtests.sh
@@ -189,7 +189,7 @@ else
fi
# filter 64bit architectures
-ARCH64STR="arm64 ia64 mips64 parisc64 ppc64 ppc64le riscv64 s390x sparc64 x86_64"
+ARCH64STR="arm64 mips64 parisc64 ppc64 ppc64le riscv64 s390x sparc64 x86_64"
if [ -z "$ARCH" ]; then
ARCH=$(uname -m 2>/dev/null | sed -e 's/aarch64.*/arm64/')
fi
diff --git a/tools/testing/selftests/mm/split_huge_page_test.c b/tools/testing/selftests/mm/split_huge_page_test.c
index e5e8dafc9d94..eb6d1b9fc362 100644
--- a/tools/testing/selftests/mm/split_huge_page_test.c
+++ b/tools/testing/selftests/mm/split_huge_page_test.c
@@ -84,6 +84,76 @@ static void write_debugfs(const char *fmt, ...)
write_file(SPLIT_DEBUGFS, input, ret + 1);
}
+static char *allocate_zero_filled_hugepage(size_t len)
+{
+ char *result;
+ size_t i;
+
+ result = memalign(pmd_pagesize, len);
+ if (!result) {
+ printf("Fail to allocate memory\n");
+ exit(EXIT_FAILURE);
+ }
+
+ madvise(result, len, MADV_HUGEPAGE);
+
+ for (i = 0; i < len; i++)
+ result[i] = (char)0;
+
+ return result;
+}
+
+static void verify_rss_anon_split_huge_page_all_zeroes(char *one_page, int nr_hpages, size_t len)
+{
+ unsigned long rss_anon_before, rss_anon_after;
+ size_t i;
+
+ if (!check_huge_anon(one_page, 4, pmd_pagesize)) {
+ printf("No THP is allocated\n");
+ exit(EXIT_FAILURE);
+ }
+
+ rss_anon_before = rss_anon();
+ if (!rss_anon_before) {
+ printf("No RssAnon is allocated before split\n");
+ exit(EXIT_FAILURE);
+ }
+
+ /* split all THPs */
+ write_debugfs(PID_FMT, getpid(), (uint64_t)one_page,
+ (uint64_t)one_page + len, 0);
+
+ for (i = 0; i < len; i++)
+ if (one_page[i] != (char)0) {
+ printf("%ld byte corrupted\n", i);
+ exit(EXIT_FAILURE);
+ }
+
+ if (!check_huge_anon(one_page, 0, pmd_pagesize)) {
+ printf("Still AnonHugePages not split\n");
+ exit(EXIT_FAILURE);
+ }
+
+ rss_anon_after = rss_anon();
+ if (rss_anon_after >= rss_anon_before) {
+ printf("Incorrect RssAnon value. Before: %ld After: %ld\n",
+ rss_anon_before, rss_anon_after);
+ exit(EXIT_FAILURE);
+ }
+}
+
+void split_pmd_zero_pages(void)
+{
+ char *one_page;
+ int nr_hpages = 4;
+ size_t len = nr_hpages * pmd_pagesize;
+
+ one_page = allocate_zero_filled_hugepage(len);
+ verify_rss_anon_split_huge_page_all_zeroes(one_page, nr_hpages, len);
+ printf("Split zero filled huge pages successful\n");
+ free(one_page);
+}
+
void split_pmd_thp(void)
{
char *one_page;
@@ -431,6 +501,7 @@ int main(int argc, char **argv)
fd_size = 2 * pmd_pagesize;
+ split_pmd_zero_pages();
split_pmd_thp();
split_pte_mapped_thp();
split_file_backed_thp();
diff --git a/tools/testing/selftests/mm/thp_settings.c b/tools/testing/selftests/mm/thp_settings.c
index a4163438108e..577eaab6266f 100644
--- a/tools/testing/selftests/mm/thp_settings.c
+++ b/tools/testing/selftests/mm/thp_settings.c
@@ -33,10 +33,11 @@ static const char * const thp_defrag_strings[] = {
};
static const char * const shmem_enabled_strings[] = {
+ "never",
"always",
"within_size",
"advise",
- "never",
+ "inherit",
"deny",
"force",
NULL
@@ -200,6 +201,7 @@ void thp_write_num(const char *name, unsigned long num)
void thp_read_settings(struct thp_settings *settings)
{
unsigned long orders = thp_supported_orders();
+ unsigned long shmem_orders = thp_shmem_supported_orders();
char path[PATH_MAX];
int i;
@@ -234,12 +236,24 @@ void thp_read_settings(struct thp_settings *settings)
settings->hugepages[i].enabled =
thp_read_string(path, thp_enabled_strings);
}
+
+ for (i = 0; i < NR_ORDERS; i++) {
+ if (!((1 << i) & shmem_orders)) {
+ settings->shmem_hugepages[i].enabled = SHMEM_NEVER;
+ continue;
+ }
+ snprintf(path, PATH_MAX, "hugepages-%ukB/shmem_enabled",
+ (getpagesize() >> 10) << i);
+ settings->shmem_hugepages[i].enabled =
+ thp_read_string(path, shmem_enabled_strings);
+ }
}
void thp_write_settings(struct thp_settings *settings)
{
struct khugepaged_settings *khugepaged = &settings->khugepaged;
unsigned long orders = thp_supported_orders();
+ unsigned long shmem_orders = thp_shmem_supported_orders();
char path[PATH_MAX];
int enabled;
int i;
@@ -271,6 +285,15 @@ void thp_write_settings(struct thp_settings *settings)
enabled = settings->hugepages[i].enabled;
thp_write_string(path, thp_enabled_strings[enabled]);
}
+
+ for (i = 0; i < NR_ORDERS; i++) {
+ if (!((1 << i) & shmem_orders))
+ continue;
+ snprintf(path, PATH_MAX, "hugepages-%ukB/shmem_enabled",
+ (getpagesize() >> 10) << i);
+ enabled = settings->shmem_hugepages[i].enabled;
+ thp_write_string(path, shmem_enabled_strings[enabled]);
+ }
}
struct thp_settings *thp_current_settings(void)
@@ -324,17 +347,18 @@ void thp_set_read_ahead_path(char *path)
dev_queue_read_ahead_path[sizeof(dev_queue_read_ahead_path) - 1] = '\0';
}
-unsigned long thp_supported_orders(void)
+static unsigned long __thp_supported_orders(bool is_shmem)
{
unsigned long orders = 0;
char path[PATH_MAX];
char buf[256];
- int ret;
- int i;
+ int ret, i;
+ char anon_dir[] = "enabled";
+ char shmem_dir[] = "shmem_enabled";
for (i = 0; i < NR_ORDERS; i++) {
- ret = snprintf(path, PATH_MAX, THP_SYSFS "hugepages-%ukB/enabled",
- (getpagesize() >> 10) << i);
+ ret = snprintf(path, PATH_MAX, THP_SYSFS "hugepages-%ukB/%s",
+ (getpagesize() >> 10) << i, is_shmem ? shmem_dir : anon_dir);
if (ret >= PATH_MAX) {
printf("%s: Pathname is too long\n", __func__);
exit(EXIT_FAILURE);
@@ -347,3 +371,13 @@ unsigned long thp_supported_orders(void)
return orders;
}
+
+unsigned long thp_supported_orders(void)
+{
+ return __thp_supported_orders(false);
+}
+
+unsigned long thp_shmem_supported_orders(void)
+{
+ return __thp_supported_orders(true);
+}
diff --git a/tools/testing/selftests/mm/thp_settings.h b/tools/testing/selftests/mm/thp_settings.h
index 71cbff05f4c7..876235a23460 100644
--- a/tools/testing/selftests/mm/thp_settings.h
+++ b/tools/testing/selftests/mm/thp_settings.h
@@ -22,10 +22,11 @@ enum thp_defrag {
};
enum shmem_enabled {
+ SHMEM_NEVER,
SHMEM_ALWAYS,
SHMEM_WITHIN_SIZE,
SHMEM_ADVISE,
- SHMEM_NEVER,
+ SHMEM_INHERIT,
SHMEM_DENY,
SHMEM_FORCE,
};
@@ -46,6 +47,10 @@ struct khugepaged_settings {
unsigned long pages_to_scan;
};
+struct shmem_hugepages_settings {
+ enum shmem_enabled enabled;
+};
+
struct thp_settings {
enum thp_enabled thp_enabled;
enum thp_defrag thp_defrag;
@@ -54,6 +59,7 @@ struct thp_settings {
struct khugepaged_settings khugepaged;
unsigned long read_ahead_kb;
struct hugepages_settings hugepages[NR_ORDERS];
+ struct shmem_hugepages_settings shmem_hugepages[NR_ORDERS];
};
int read_file(const char *path, char *buf, size_t buflen);
@@ -76,5 +82,6 @@ void thp_save_settings(void);
void thp_set_read_ahead_path(char *path);
unsigned long thp_supported_orders(void);
+unsigned long thp_shmem_supported_orders(void);
#endif /* __THP_SETTINGS_H__ */
diff --git a/tools/testing/selftests/mm/vm_util.c b/tools/testing/selftests/mm/vm_util.c
index 5a62530da3b5..d8d0cf04bb57 100644
--- a/tools/testing/selftests/mm/vm_util.c
+++ b/tools/testing/selftests/mm/vm_util.c
@@ -12,6 +12,7 @@
#define PMD_SIZE_FILE_PATH "/sys/kernel/mm/transparent_hugepage/hpage_pmd_size"
#define SMAP_FILE_PATH "/proc/self/smaps"
+#define STATUS_FILE_PATH "/proc/self/status"
#define MAX_LINE_LENGTH 500
unsigned int __page_size;
@@ -171,6 +172,27 @@ uint64_t read_pmd_pagesize(void)
return strtoul(buf, NULL, 10);
}
+unsigned long rss_anon(void)
+{
+ unsigned long rss_anon = 0;
+ FILE *fp;
+ char buffer[MAX_LINE_LENGTH];
+
+ fp = fopen(STATUS_FILE_PATH, "r");
+ if (!fp)
+ ksft_exit_fail_msg("%s: Failed to open file %s\n", __func__, STATUS_FILE_PATH);
+
+ if (!check_for_pattern(fp, "RssAnon:", buffer, sizeof(buffer)))
+ goto err_out;
+
+ if (sscanf(buffer, "RssAnon:%10lu kB", &rss_anon) != 1)
+ ksft_exit_fail_msg("Reading status error\n");
+
+err_out:
+ fclose(fp);
+ return rss_anon;
+}
+
bool __check_huge(void *addr, char *pattern, int nr_hpages,
uint64_t hpage_size)
{
diff --git a/tools/testing/selftests/mm/vm_util.h b/tools/testing/selftests/mm/vm_util.h
index 9007c420d52c..2eaed8209925 100644
--- a/tools/testing/selftests/mm/vm_util.h
+++ b/tools/testing/selftests/mm/vm_util.h
@@ -39,6 +39,7 @@ unsigned long pagemap_get_pfn(int fd, char *start);
void clear_softdirty(void);
bool check_for_pattern(FILE *fp, const char *pattern, char *buf, size_t len);
uint64_t read_pmd_pagesize(void);
+unsigned long rss_anon(void);
bool check_huge_anon(void *addr, int nr_hpages, uint64_t hpage_size);
bool check_huge_file(void *addr, int nr_hpages, uint64_t hpage_size);
bool check_huge_shmem(void *addr, int nr_hpages, uint64_t hpage_size);
diff --git a/tools/testing/selftests/mm/write_to_hugetlbfs.c b/tools/testing/selftests/mm/write_to_hugetlbfs.c
index 6a2caba19ee1..1289d311efd7 100644
--- a/tools/testing/selftests/mm/write_to_hugetlbfs.c
+++ b/tools/testing/selftests/mm/write_to_hugetlbfs.c
@@ -28,7 +28,7 @@ enum method {
/* Global variables. */
static const char *self;
-static char *shmaddr;
+static int *shmaddr;
static int shmid;
/*
@@ -47,15 +47,17 @@ void sig_handler(int signo)
{
printf("Received %d.\n", signo);
if (signo == SIGINT) {
- printf("Deleting the memory\n");
- if (shmdt((const void *)shmaddr) != 0) {
- perror("Detach failure");
+ if (shmaddr) {
+ printf("Deleting the memory\n");
+ if (shmdt((const void *)shmaddr) != 0) {
+ perror("Detach failure");
+ shmctl(shmid, IPC_RMID, NULL);
+ exit(4);
+ }
+
shmctl(shmid, IPC_RMID, NULL);
- exit(4);
+ printf("Done deleting the memory\n");
}
-
- shmctl(shmid, IPC_RMID, NULL);
- printf("Done deleting the memory\n");
}
exit(2);
}
@@ -211,7 +213,8 @@ int main(int argc, char **argv)
shmctl(shmid, IPC_RMID, NULL);
exit(2);
}
- printf("shmaddr: %p\n", ptr);
+ shmaddr = ptr;
+ printf("shmaddr: %p\n", shmaddr);
break;
default:
diff --git a/tools/testing/selftests/net/netfilter/Makefile b/tools/testing/selftests/net/netfilter/Makefile
index d13fb5ea3e89..e6c9e777fead 100644
--- a/tools/testing/selftests/net/netfilter/Makefile
+++ b/tools/testing/selftests/net/netfilter/Makefile
@@ -13,6 +13,7 @@ TEST_PROGS += conntrack_ipip_mtu.sh
TEST_PROGS += conntrack_tcp_unreplied.sh
TEST_PROGS += conntrack_sctp_collision.sh
TEST_PROGS += conntrack_vrf.sh
+TEST_PROGS += conntrack_reverse_clash.sh
TEST_PROGS += ipvs.sh
TEST_PROGS += nf_conntrack_packetdrill.sh
TEST_PROGS += nf_nat_edemux.sh
@@ -26,6 +27,8 @@ TEST_PROGS += nft_nat.sh
TEST_PROGS += nft_nat_zones.sh
TEST_PROGS += nft_queue.sh
TEST_PROGS += nft_synproxy.sh
+TEST_PROGS += nft_tproxy_tcp.sh
+TEST_PROGS += nft_tproxy_udp.sh
TEST_PROGS += nft_zones_many.sh
TEST_PROGS += rpath.sh
TEST_PROGS += xt_string.sh
@@ -36,6 +39,7 @@ TEST_GEN_PROGS = conntrack_dump_flush
TEST_GEN_FILES = audit_logread
TEST_GEN_FILES += connect_close nf_queue
+TEST_GEN_FILES += conntrack_reverse_clash
TEST_GEN_FILES += sctp_collision
include ../../lib.mk
diff --git a/tools/testing/selftests/net/netfilter/config b/tools/testing/selftests/net/netfilter/config
index b2dd4db45215..c5fe7b34eaf1 100644
--- a/tools/testing/selftests/net/netfilter/config
+++ b/tools/testing/selftests/net/netfilter/config
@@ -81,6 +81,7 @@ CONFIG_NFT_QUEUE=m
CONFIG_NFT_QUOTA=m
CONFIG_NFT_REDIR=m
CONFIG_NFT_SYNPROXY=m
+CONFIG_NFT_TPROXY=m
CONFIG_VETH=m
CONFIG_VLAN_8021Q=m
CONFIG_XFRM_USER=m
diff --git a/tools/testing/selftests/net/netfilter/conntrack_reverse_clash.c b/tools/testing/selftests/net/netfilter/conntrack_reverse_clash.c
new file mode 100644
index 000000000000..507930cee8cb
--- /dev/null
+++ b/tools/testing/selftests/net/netfilter/conntrack_reverse_clash.c
@@ -0,0 +1,125 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Needs something like:
+ *
+ * iptables -t nat -A POSTROUTING -o nomatch -j MASQUERADE
+ *
+ * so NAT engine attaches a NAT null-binding to each connection.
+ *
+ * With unmodified kernels, child or parent will exit with
+ * "Port number changed" error, even though no port translation
+ * was requested.
+ */
+
+#include <errno.h>
+#include <stdbool.h>
+#include <stdint.h>
+#include <stdio.h>
+#include <string.h>
+#include <stdlib.h>
+#include <time.h>
+#include <unistd.h>
+#include <arpa/inet.h>
+#include <sys/socket.h>
+#include <sys/wait.h>
+
+#define LEN 512
+#define PORT 56789
+#define TEST_TIME 5
+
+static void die(const char *e)
+{
+ perror(e);
+ exit(111);
+}
+
+static void die_port(uint16_t got, uint16_t want)
+{
+ fprintf(stderr, "Port number changed, wanted %d got %d\n", want, ntohs(got));
+ exit(1);
+}
+
+static int udp_socket(void)
+{
+ static const struct timeval tv = {
+ .tv_sec = 1,
+ };
+ int fd = socket(AF_INET, SOCK_DGRAM, IPPROTO_UDP);
+
+ if (fd < 0)
+ die("socket");
+
+ setsockopt(fd, SOL_SOCKET, SO_RCVTIMEO, &tv, sizeof(tv));
+ return fd;
+}
+
+int main(int argc, char *argv[])
+{
+ struct sockaddr_in sa1 = {
+ .sin_family = AF_INET,
+ };
+ struct sockaddr_in sa2 = {
+ .sin_family = AF_INET,
+ };
+ int s1, s2, status;
+ time_t end, now;
+ socklen_t plen;
+ char buf[LEN];
+ bool child;
+
+ sa1.sin_port = htons(PORT);
+ sa2.sin_port = htons(PORT + 1);
+
+ s1 = udp_socket();
+ s2 = udp_socket();
+
+ inet_pton(AF_INET, "127.0.0.11", &sa1.sin_addr);
+ inet_pton(AF_INET, "127.0.0.12", &sa2.sin_addr);
+
+ if (bind(s1, (struct sockaddr *)&sa1, sizeof(sa1)) < 0)
+ die("bind 1");
+ if (bind(s2, (struct sockaddr *)&sa2, sizeof(sa2)) < 0)
+ die("bind 2");
+
+ child = fork() == 0;
+
+ now = time(NULL);
+ end = now + TEST_TIME;
+
+ while (now < end) {
+ struct sockaddr_in peer;
+ socklen_t plen = sizeof(peer);
+
+ now = time(NULL);
+
+ if (child) {
+ if (sendto(s1, buf, LEN, 0, (struct sockaddr *)&sa2, sizeof(sa2)) != LEN)
+ continue;
+
+ if (recvfrom(s2, buf, LEN, 0, (struct sockaddr *)&peer, &plen) < 0)
+ die("child recvfrom");
+
+ if (peer.sin_port != htons(PORT))
+ die_port(peer.sin_port, PORT);
+ } else {
+ if (sendto(s2, buf, LEN, 0, (struct sockaddr *)&sa1, sizeof(sa1)) != LEN)
+ continue;
+
+ if (recvfrom(s1, buf, LEN, 0, (struct sockaddr *)&peer, &plen) < 0)
+ die("parent recvfrom");
+
+ if (peer.sin_port != htons((PORT + 1)))
+ die_port(peer.sin_port, PORT + 1);
+ }
+ }
+
+ if (child)
+ return 0;
+
+ wait(&status);
+
+ if (WIFEXITED(status))
+ return WEXITSTATUS(status);
+
+ return 1;
+}
diff --git a/tools/testing/selftests/net/netfilter/conntrack_reverse_clash.sh b/tools/testing/selftests/net/netfilter/conntrack_reverse_clash.sh
new file mode 100755
index 000000000000..a24c896347a8
--- /dev/null
+++ b/tools/testing/selftests/net/netfilter/conntrack_reverse_clash.sh
@@ -0,0 +1,51 @@
+#!/bin/bash
+# SPDX-License-Identifier: GPL-2.0
+
+source lib.sh
+
+cleanup()
+{
+ cleanup_all_ns
+}
+
+checktool "nft --version" "run test without nft"
+checktool "conntrack --version" "run test without conntrack"
+
+trap cleanup EXIT
+
+setup_ns ns0
+
+# make loopback connections get nat null bindings assigned
+ip netns exec "$ns0" nft -f - <<EOF
+table ip nat {
+ chain POSTROUTING {
+ type nat hook postrouting priority srcnat; policy accept;
+ oifname "nomatch" counter packets 0 bytes 0 masquerade
+ }
+}
+EOF
+
+do_flush()
+{
+ local end
+ local now
+
+ now=$(date +%s)
+ end=$((now + 5))
+
+ while [ $now -lt $end ];do
+ ip netns exec "$ns0" conntrack -F 2>/dev/null
+ now=$(date +%s)
+ done
+}
+
+do_flush &
+
+if ip netns exec "$ns0" ./conntrack_reverse_clash; then
+ echo "PASS: No SNAT performed for null bindings"
+else
+ echo "ERROR: SNAT performed without any matching snat rule"
+ exit 1
+fi
+
+exit 0
diff --git a/tools/testing/selftests/net/netfilter/ipvs.sh b/tools/testing/selftests/net/netfilter/ipvs.sh
index 4ceee9fb3949..d3edb16cd4b3 100755
--- a/tools/testing/selftests/net/netfilter/ipvs.sh
+++ b/tools/testing/selftests/net/netfilter/ipvs.sh
@@ -97,7 +97,7 @@ cleanup() {
}
server_listen() {
- ip netns exec "$ns2" socat -u -4 TCP-LISTEN:8080,reuseaddr STDOUT > "${outfile}" &
+ ip netns exec "$ns2" timeout 5 socat -u -4 TCP-LISTEN:8080,reuseaddr STDOUT > "${outfile}" &
server_pid=$!
sleep 0.2
}
diff --git a/tools/testing/selftests/net/netfilter/nft_queue.sh b/tools/testing/selftests/net/netfilter/nft_queue.sh
index d66e3c4dfec6..a9d109fcc15c 100755
--- a/tools/testing/selftests/net/netfilter/nft_queue.sh
+++ b/tools/testing/selftests/net/netfilter/nft_queue.sh
@@ -31,7 +31,7 @@ modprobe -q sctp
trap cleanup EXIT
-setup_ns ns1 ns2 nsrouter
+setup_ns ns1 ns2 ns3 nsrouter
TMPFILE0=$(mktemp)
TMPFILE1=$(mktemp)
@@ -48,6 +48,7 @@ if ! ip link add veth0 netns "$nsrouter" type veth peer name eth0 netns "$ns1" >
exit $ksft_skip
fi
ip link add veth1 netns "$nsrouter" type veth peer name eth0 netns "$ns2"
+ip link add veth2 netns "$nsrouter" type veth peer name eth0 netns "$ns3"
ip -net "$nsrouter" link set veth0 up
ip -net "$nsrouter" addr add 10.0.1.1/24 dev veth0
@@ -57,8 +58,13 @@ ip -net "$nsrouter" link set veth1 up
ip -net "$nsrouter" addr add 10.0.2.1/24 dev veth1
ip -net "$nsrouter" addr add dead:2::1/64 dev veth1 nodad
+ip -net "$nsrouter" link set veth2 up
+ip -net "$nsrouter" addr add 10.0.3.1/24 dev veth2
+ip -net "$nsrouter" addr add dead:3::1/64 dev veth2 nodad
+
ip -net "$ns1" link set eth0 up
ip -net "$ns2" link set eth0 up
+ip -net "$ns3" link set eth0 up
ip -net "$ns1" addr add 10.0.1.99/24 dev eth0
ip -net "$ns1" addr add dead:1::99/64 dev eth0 nodad
@@ -70,6 +76,11 @@ ip -net "$ns2" addr add dead:2::99/64 dev eth0 nodad
ip -net "$ns2" route add default via 10.0.2.1
ip -net "$ns2" route add default via dead:2::1
+ip -net "$ns3" addr add 10.0.3.99/24 dev eth0
+ip -net "$ns3" addr add dead:3::99/64 dev eth0 nodad
+ip -net "$ns3" route add default via 10.0.3.1
+ip -net "$ns3" route add default via dead:3::1
+
load_ruleset() {
local name=$1
local prio=$2
@@ -473,6 +484,83 @@ EOF
check_output_files "$TMPINPUT" "$TMPFILE1" "sctp output"
}
+udp_listener_ready()
+{
+ ss -S -N "$1" -uln -o "sport = :12345" | grep -q 12345
+}
+
+output_files_written()
+{
+ test -s "$1" && test -s "$2"
+}
+
+test_udp_ct_race()
+{
+ ip netns exec "$nsrouter" nft -f /dev/stdin <<EOF
+flush ruleset
+table inet udpq {
+ chain prerouting {
+ type nat hook prerouting priority dstnat - 5; policy accept;
+ ip daddr 10.6.6.6 udp dport 12345 counter dnat to numgen inc mod 2 map { 0 : 10.0.2.99, 1 : 10.0.3.99 }
+ }
+ chain postrouting {
+ type filter hook postrouting priority srcnat - 5; policy accept;
+ udp dport 12345 counter queue num 12
+ }
+}
+EOF
+ :> "$TMPFILE1"
+ :> "$TMPFILE2"
+
+ timeout 10 ip netns exec "$ns2" socat UDP-LISTEN:12345,fork OPEN:"$TMPFILE1",trunc &
+ local rpid1=$!
+
+ timeout 10 ip netns exec "$ns3" socat UDP-LISTEN:12345,fork OPEN:"$TMPFILE2",trunc &
+ local rpid2=$!
+
+ ip netns exec "$nsrouter" ./nf_queue -q 12 -d 1000 &
+ local nfqpid=$!
+
+ busywait "$BUSYWAIT_TIMEOUT" udp_listener_ready "$ns2"
+ busywait "$BUSYWAIT_TIMEOUT" udp_listener_ready "$ns3"
+ busywait "$BUSYWAIT_TIMEOUT" nf_queue_wait "$nsrouter" 12
+
+ # Send two packets, one should end up in ns1, other in ns2.
+ # This is because nfqueue will delay packet for long enough so that
+ # second packet will not find existing conntrack entry.
+ echo "Packet 1" | ip netns exec "$ns1" socat STDIN UDP-DATAGRAM:10.6.6.6:12345,bind=0.0.0.0:55221
+ echo "Packet 2" | ip netns exec "$ns1" socat STDIN UDP-DATAGRAM:10.6.6.6:12345,bind=0.0.0.0:55221
+
+ busywait 10000 output_files_written "$TMPFILE1" "$TMPFILE2"
+
+ kill "$nfqpid"
+
+ if ! ip netns exec "$nsrouter" bash -c 'conntrack -L -p udp --dport 12345 2>/dev/null | wc -l | grep -q "^1"'; then
+ echo "FAIL: Expected One udp conntrack entry"
+ ip netns exec "$nsrouter" conntrack -L -p udp --dport 12345
+ ret=1
+ fi
+
+ if ! ip netns exec "$nsrouter" nft delete table inet udpq; then
+ echo "FAIL: Could not delete udpq table"
+ ret=1
+ return
+ fi
+
+ NUMLINES1=$(wc -l < "$TMPFILE1")
+ NUMLINES2=$(wc -l < "$TMPFILE2")
+
+ if [ "$NUMLINES1" -ne 1 ] || [ "$NUMLINES2" -ne 1 ]; then
+ ret=1
+ echo "FAIL: uneven udp packet distribution: $NUMLINES1 $NUMLINES2"
+ echo -n "$TMPFILE1: ";cat "$TMPFILE1"
+ echo -n "$TMPFILE2: ";cat "$TMPFILE2"
+ return
+ fi
+
+ echo "PASS: both udp receivers got one packet each"
+}
+
test_queue_removal()
{
read tainted_then < /proc/sys/kernel/tainted
@@ -512,6 +600,7 @@ EOF
ip netns exec "$nsrouter" sysctl net.ipv6.conf.all.forwarding=1 > /dev/null
ip netns exec "$nsrouter" sysctl net.ipv4.conf.veth0.forwarding=1 > /dev/null
ip netns exec "$nsrouter" sysctl net.ipv4.conf.veth1.forwarding=1 > /dev/null
+ip netns exec "$nsrouter" sysctl net.ipv4.conf.veth2.forwarding=1 > /dev/null
load_ruleset "filter" 0
@@ -549,6 +638,7 @@ test_tcp_localhost_connectclose
test_tcp_localhost_requeue
test_sctp_forward
test_sctp_output
+test_udp_ct_race
# should be last, adds vrf device in ns1 and changes routes
test_icmp_vrf
diff --git a/tools/testing/selftests/net/netfilter/nft_tproxy_tcp.sh b/tools/testing/selftests/net/netfilter/nft_tproxy_tcp.sh
new file mode 100755
index 000000000000..e208fb03eeb7
--- /dev/null
+++ b/tools/testing/selftests/net/netfilter/nft_tproxy_tcp.sh
@@ -0,0 +1,358 @@
+#!/bin/bash
+#
+# This tests tproxy on the following scenario:
+#
+# +------------+
+# +-------+ | nsrouter | +-------+
+# |ns1 |.99 .1| |.1 .99| ns2|
+# | eth0|---------------|veth0 veth1|------------------|eth0 |
+# | | 10.0.1.0/24 | | 10.0.2.0/24 | |
+# +-------+ dead:1::/64 | veth2 | dead:2::/64 +-------+
+# +------------+
+# |.1
+# |
+# |
+# | +-------+
+# | .99| ns3|
+# +------------------------|eth0 |
+# 10.0.3.0/24 | |
+# dead:3::/64 +-------+
+#
+# The tproxy implementation acts as an echo server so the client
+# must receive the same message it sent if it has been proxied.
+# If is not proxied the servers return PONG_NS# with the number
+# of the namespace the server is running.
+#
+# shellcheck disable=SC2162,SC2317
+
+source lib.sh
+ret=0
+timeout=5
+
+cleanup()
+{
+ ip netns pids "$ns1" | xargs kill 2>/dev/null
+ ip netns pids "$ns2" | xargs kill 2>/dev/null
+ ip netns pids "$ns3" | xargs kill 2>/dev/null
+ ip netns pids "$nsrouter" | xargs kill 2>/dev/null
+
+ cleanup_all_ns
+}
+
+checktool "nft --version" "test without nft tool"
+checktool "socat -h" "run test without socat"
+
+trap cleanup EXIT
+setup_ns ns1 ns2 ns3 nsrouter
+
+if ! ip link add veth0 netns "$nsrouter" type veth peer name eth0 netns "$ns1" > /dev/null 2>&1; then
+ echo "SKIP: No virtual ethernet pair device support in kernel"
+ exit $ksft_skip
+fi
+ip link add veth1 netns "$nsrouter" type veth peer name eth0 netns "$ns2"
+ip link add veth2 netns "$nsrouter" type veth peer name eth0 netns "$ns3"
+
+ip -net "$nsrouter" link set veth0 up
+ip -net "$nsrouter" addr add 10.0.1.1/24 dev veth0
+ip -net "$nsrouter" addr add dead:1::1/64 dev veth0 nodad
+
+ip -net "$nsrouter" link set veth1 up
+ip -net "$nsrouter" addr add 10.0.2.1/24 dev veth1
+ip -net "$nsrouter" addr add dead:2::1/64 dev veth1 nodad
+
+ip -net "$nsrouter" link set veth2 up
+ip -net "$nsrouter" addr add 10.0.3.1/24 dev veth2
+ip -net "$nsrouter" addr add dead:3::1/64 dev veth2 nodad
+
+ip -net "$ns1" link set eth0 up
+ip -net "$ns2" link set eth0 up
+ip -net "$ns3" link set eth0 up
+
+ip -net "$ns1" addr add 10.0.1.99/24 dev eth0
+ip -net "$ns1" addr add dead:1::99/64 dev eth0 nodad
+ip -net "$ns1" route add default via 10.0.1.1
+ip -net "$ns1" route add default via dead:1::1
+
+ip -net "$ns2" addr add 10.0.2.99/24 dev eth0
+ip -net "$ns2" addr add dead:2::99/64 dev eth0 nodad
+ip -net "$ns2" route add default via 10.0.2.1
+ip -net "$ns2" route add default via dead:2::1
+
+ip -net "$ns3" addr add 10.0.3.99/24 dev eth0
+ip -net "$ns3" addr add dead:3::99/64 dev eth0 nodad
+ip -net "$ns3" route add default via 10.0.3.1
+ip -net "$ns3" route add default via dead:3::1
+
+ip netns exec "$nsrouter" sysctl net.ipv6.conf.all.forwarding=1 > /dev/null
+ip netns exec "$nsrouter" sysctl net.ipv4.conf.veth0.forwarding=1 > /dev/null
+ip netns exec "$nsrouter" sysctl net.ipv4.conf.veth1.forwarding=1 > /dev/null
+ip netns exec "$nsrouter" sysctl net.ipv4.conf.veth2.forwarding=1 > /dev/null
+
+test_ping() {
+ if ! ip netns exec "$ns1" ping -c 1 -q 10.0.2.99 > /dev/null; then
+ return 1
+ fi
+
+ if ! ip netns exec "$ns1" ping -c 1 -q dead:2::99 > /dev/null; then
+ return 2
+ fi
+
+ if ! ip netns exec "$ns1" ping -c 1 -q 10.0.3.99 > /dev/null; then
+ return 1
+ fi
+
+ if ! ip netns exec "$ns1" ping -c 1 -q dead:3::99 > /dev/null; then
+ return 2
+ fi
+
+ return 0
+}
+
+test_ping_router() {
+ if ! ip netns exec "$ns1" ping -c 1 -q 10.0.2.1 > /dev/null; then
+ return 3
+ fi
+
+ if ! ip netns exec "$ns1" ping -c 1 -q dead:2::1 > /dev/null; then
+ return 4
+ fi
+
+ return 0
+}
+
+
+listener_ready()
+{
+ local ns="$1"
+ local port="$2"
+ local proto="$3"
+ ss -N "$ns" -ln "$proto" -o "sport = :$port" | grep -q "$port"
+}
+
+test_tproxy()
+{
+ local traffic_origin="$1"
+ local ip_proto="$2"
+ local expect_ns1_ns2="$3"
+ local expect_ns1_ns3="$4"
+ local expect_nsrouter_ns2="$5"
+ local expect_nsrouter_ns3="$6"
+
+ # derived variables
+ local testname="test_${ip_proto}_tcp_${traffic_origin}"
+ local socat_ipproto
+ local ns1_ip
+ local ns2_ip
+ local ns3_ip
+ local ns2_target
+ local ns3_target
+ local nftables_subject
+ local ip_command
+
+ # socat 1.8.0 has a bug that requires to specify the IP family to bind (fixed in 1.8.0.1)
+ case $ip_proto in
+ "ip")
+ socat_ipproto="-4"
+ ns1_ip=10.0.1.99
+ ns2_ip=10.0.2.99
+ ns3_ip=10.0.3.99
+ ns2_target="tcp:$ns2_ip:8080"
+ ns3_target="tcp:$ns3_ip:8080"
+ nftables_subject="ip daddr $ns2_ip tcp dport 8080"
+ ip_command="ip"
+ ;;
+ "ip6")
+ socat_ipproto="-6"
+ ns1_ip=dead:1::99
+ ns2_ip=dead:2::99
+ ns3_ip=dead:3::99
+ ns2_target="tcp:[$ns2_ip]:8080"
+ ns3_target="tcp:[$ns3_ip]:8080"
+ nftables_subject="ip6 daddr $ns2_ip tcp dport 8080"
+ ip_command="ip -6"
+ ;;
+ *)
+ echo "FAIL: unsupported protocol"
+ exit 255
+ ;;
+ esac
+
+ case $traffic_origin in
+ # to capture the local originated traffic we need to mark the outgoing
+ # traffic so the policy based routing rule redirects it and can be processed
+ # in the prerouting chain.
+ "local")
+ nftables_rules="
+flush ruleset
+table inet filter {
+ chain divert {
+ type filter hook prerouting priority 0; policy accept;
+ $nftables_subject tproxy $ip_proto to :12345 meta mark set 1 accept
+ }
+ chain output {
+ type route hook output priority 0; policy accept;
+ $nftables_subject meta mark set 1 accept
+ }
+}"
+ ;;
+ "forward")
+ nftables_rules="
+flush ruleset
+table inet filter {
+ chain divert {
+ type filter hook prerouting priority 0; policy accept;
+ $nftables_subject tproxy $ip_proto to :12345 meta mark set 1 accept
+ }
+}"
+ ;;
+ *)
+ echo "FAIL: unsupported parameter for traffic origin"
+ exit 255
+ ;;
+ esac
+
+ # shellcheck disable=SC2046 # Intended splitting of ip_command
+ ip netns exec "$nsrouter" $ip_command rule add fwmark 1 table 100
+ ip netns exec "$nsrouter" $ip_command route add local "${ns2_ip}" dev lo table 100
+ echo "$nftables_rules" | ip netns exec "$nsrouter" nft -f /dev/stdin
+
+ timeout "$timeout" ip netns exec "$nsrouter" socat "$socat_ipproto" tcp-listen:12345,fork,ip-transparent SYSTEM:"cat" 2>/dev/null &
+ local tproxy_pid=$!
+
+ timeout "$timeout" ip netns exec "$ns2" socat "$socat_ipproto" tcp-listen:8080,fork SYSTEM:"echo PONG_NS2" 2>/dev/null &
+ local server2_pid=$!
+
+ timeout "$timeout" ip netns exec "$ns3" socat "$socat_ipproto" tcp-listen:8080,fork SYSTEM:"echo PONG_NS3" 2>/dev/null &
+ local server3_pid=$!
+
+ busywait "$BUSYWAIT_TIMEOUT" listener_ready "$nsrouter" 12345 "-t"
+ busywait "$BUSYWAIT_TIMEOUT" listener_ready "$ns2" 8080 "-t"
+ busywait "$BUSYWAIT_TIMEOUT" listener_ready "$ns3" 8080 "-t"
+
+ local result
+ # request from ns1 to ns2 (forwarded traffic)
+ result=$(echo I_M_PROXIED | ip netns exec "$ns1" socat -t 2 -T 2 STDIO "$ns2_target")
+ if [ "$result" == "$expect_ns1_ns2" ] ;then
+ echo "PASS: tproxy test $testname: ns1 got reply \"$result\" connecting to ns2"
+ else
+ echo "ERROR: tproxy test $testname: ns1 got reply \"$result\" connecting to ns2, not \"${expect_ns1_ns2}\" as intended"
+ ret=1
+ fi
+
+ # request from ns1 to ns3(forwarded traffic)
+ result=$(echo I_M_PROXIED | ip netns exec "$ns1" socat -t 2 -T 2 STDIO "$ns3_target")
+ if [ "$result" = "$expect_ns1_ns3" ] ;then
+ echo "PASS: tproxy test $testname: ns1 got reply \"$result\" connecting to ns3"
+ else
+ echo "ERROR: tproxy test $testname: ns1 got reply \"$result\" connecting to ns3, not \"$expect_ns1_ns3\" as intended"
+ ret=1
+ fi
+
+ # request from nsrouter to ns2 (localy originated traffic)
+ result=$(echo I_M_PROXIED | ip netns exec "$nsrouter" socat -t 2 -T 2 STDIO "$ns2_target")
+ if [ "$result" == "$expect_nsrouter_ns2" ] ;then
+ echo "PASS: tproxy test $testname: nsrouter got reply \"$result\" connecting to ns2"
+ else
+ echo "ERROR: tproxy test $testname: nsrouter got reply \"$result\" connecting to ns2, not \"$expect_nsrouter_ns2\" as intended"
+ ret=1
+ fi
+
+ # request from nsrouter to ns3 (localy originated traffic)
+ result=$(echo I_M_PROXIED | ip netns exec "$nsrouter" socat -t 2 -T 2 STDIO "$ns3_target")
+ if [ "$result" = "$expect_nsrouter_ns3" ] ;then
+ echo "PASS: tproxy test $testname: nsrouter got reply \"$result\" connecting to ns3"
+ else
+ echo "ERROR: tproxy test $testname: nsrouter got reply \"$result\" connecting to ns3, not \"$expect_nsrouter_ns3\" as intended"
+ ret=1
+ fi
+
+ # cleanup
+ kill "$tproxy_pid" "$server2_pid" "$server3_pid" 2>/dev/null
+ # shellcheck disable=SC2046 # Intended splitting of ip_command
+ ip netns exec "$nsrouter" $ip_command rule del fwmark 1 table 100
+ ip netns exec "$nsrouter" $ip_command route flush table 100
+}
+
+
+test_ipv4_tcp_forward()
+{
+ local traffic_origin="forward"
+ local ip_proto="ip"
+ local expect_ns1_ns2="I_M_PROXIED"
+ local expect_ns1_ns3="PONG_NS3"
+ local expect_nsrouter_ns2="PONG_NS2"
+ local expect_nsrouter_ns3="PONG_NS3"
+
+ test_tproxy "$traffic_origin" \
+ "$ip_proto" \
+ "$expect_ns1_ns2" \
+ "$expect_ns1_ns3" \
+ "$expect_nsrouter_ns2" \
+ "$expect_nsrouter_ns3"
+}
+
+test_ipv4_tcp_local()
+{
+ local traffic_origin="local"
+ local ip_proto="ip"
+ local expect_ns1_ns2="I_M_PROXIED"
+ local expect_ns1_ns3="PONG_NS3"
+ local expect_nsrouter_ns2="I_M_PROXIED"
+ local expect_nsrouter_ns3="PONG_NS3"
+
+ test_tproxy "$traffic_origin" \
+ "$ip_proto" \
+ "$expect_ns1_ns2" \
+ "$expect_ns1_ns3" \
+ "$expect_nsrouter_ns2" \
+ "$expect_nsrouter_ns3"
+}
+
+test_ipv6_tcp_forward()
+{
+ local traffic_origin="forward"
+ local ip_proto="ip6"
+ local expect_ns1_ns2="I_M_PROXIED"
+ local expect_ns1_ns3="PONG_NS3"
+ local expect_nsrouter_ns2="PONG_NS2"
+ local expect_nsrouter_ns3="PONG_NS3"
+
+ test_tproxy "$traffic_origin" \
+ "$ip_proto" \
+ "$expect_ns1_ns2" \
+ "$expect_ns1_ns3" \
+ "$expect_nsrouter_ns2" \
+ "$expect_nsrouter_ns3"
+}
+
+test_ipv6_tcp_local()
+{
+ local traffic_origin="local"
+ local ip_proto="ip6"
+ local expect_ns1_ns2="I_M_PROXIED"
+ local expect_ns1_ns3="PONG_NS3"
+ local expect_nsrouter_ns2="I_M_PROXIED"
+ local expect_nsrouter_ns3="PONG_NS3"
+
+ test_tproxy "$traffic_origin" \
+ "$ip_proto" \
+ "$expect_ns1_ns2" \
+ "$expect_ns1_ns3" \
+ "$expect_nsrouter_ns2" \
+ "$expect_nsrouter_ns3"
+}
+
+if test_ping; then
+ # queue bypass works (rules were skipped, no listener)
+ echo "PASS: ${ns1} can reach ${ns2}"
+else
+ echo "FAIL: ${ns1} cannot reach ${ns2}: $ret" 1>&2
+ exit $ret
+fi
+
+test_ipv4_tcp_forward
+test_ipv4_tcp_local
+test_ipv6_tcp_forward
+test_ipv6_tcp_local
+
+exit $ret
diff --git a/tools/testing/selftests/net/netfilter/nft_tproxy_udp.sh b/tools/testing/selftests/net/netfilter/nft_tproxy_udp.sh
new file mode 100755
index 000000000000..d16de13fe5a7
--- /dev/null
+++ b/tools/testing/selftests/net/netfilter/nft_tproxy_udp.sh
@@ -0,0 +1,262 @@
+#!/bin/bash
+#
+# This tests tproxy on the following scenario:
+#
+# +------------+
+# +-------+ | nsrouter | +-------+
+# |ns1 |.99 .1| |.1 .99| ns2|
+# | eth0|---------------|veth0 veth1|------------------|eth0 |
+# | | 10.0.1.0/24 | | 10.0.2.0/24 | |
+# +-------+ dead:1::/64 | veth2 | dead:2::/64 +-------+
+# +------------+
+# |.1
+# |
+# |
+# | +-------+
+# | .99| ns3|
+# +------------------------|eth0 |
+# 10.0.3.0/24 | |
+# dead:3::/64 +-------+
+#
+# The tproxy implementation acts as an echo server so the client
+# must receive the same message it sent if it has been proxied.
+# If is not proxied the servers return PONG_NS# with the number
+# of the namespace the server is running.
+# shellcheck disable=SC2162,SC2317
+
+source lib.sh
+ret=0
+# UDP is slow
+timeout=15
+
+cleanup()
+{
+ ip netns pids "$ns1" | xargs kill 2>/dev/null
+ ip netns pids "$ns2" | xargs kill 2>/dev/null
+ ip netns pids "$ns3" | xargs kill 2>/dev/null
+ ip netns pids "$nsrouter" | xargs kill 2>/dev/null
+
+ cleanup_all_ns
+}
+
+checktool "nft --version" "test without nft tool"
+checktool "socat -h" "run test without socat"
+
+trap cleanup EXIT
+setup_ns ns1 ns2 ns3 nsrouter
+
+if ! ip link add veth0 netns "$nsrouter" type veth peer name eth0 netns "$ns1" > /dev/null 2>&1; then
+ echo "SKIP: No virtual ethernet pair device support in kernel"
+ exit $ksft_skip
+fi
+ip link add veth1 netns "$nsrouter" type veth peer name eth0 netns "$ns2"
+ip link add veth2 netns "$nsrouter" type veth peer name eth0 netns "$ns3"
+
+ip -net "$nsrouter" link set veth0 up
+ip -net "$nsrouter" addr add 10.0.1.1/24 dev veth0
+ip -net "$nsrouter" addr add dead:1::1/64 dev veth0 nodad
+
+ip -net "$nsrouter" link set veth1 up
+ip -net "$nsrouter" addr add 10.0.2.1/24 dev veth1
+ip -net "$nsrouter" addr add dead:2::1/64 dev veth1 nodad
+
+ip -net "$nsrouter" link set veth2 up
+ip -net "$nsrouter" addr add 10.0.3.1/24 dev veth2
+ip -net "$nsrouter" addr add dead:3::1/64 dev veth2 nodad
+
+ip -net "$ns1" link set eth0 up
+ip -net "$ns2" link set eth0 up
+ip -net "$ns3" link set eth0 up
+
+ip -net "$ns1" addr add 10.0.1.99/24 dev eth0
+ip -net "$ns1" addr add dead:1::99/64 dev eth0 nodad
+ip -net "$ns1" route add default via 10.0.1.1
+ip -net "$ns1" route add default via dead:1::1
+
+ip -net "$ns2" addr add 10.0.2.99/24 dev eth0
+ip -net "$ns2" addr add dead:2::99/64 dev eth0 nodad
+ip -net "$ns2" route add default via 10.0.2.1
+ip -net "$ns2" route add default via dead:2::1
+
+ip -net "$ns3" addr add 10.0.3.99/24 dev eth0
+ip -net "$ns3" addr add dead:3::99/64 dev eth0 nodad
+ip -net "$ns3" route add default via 10.0.3.1
+ip -net "$ns3" route add default via dead:3::1
+
+ip netns exec "$nsrouter" sysctl net.ipv6.conf.all.forwarding=1 > /dev/null
+ip netns exec "$nsrouter" sysctl net.ipv4.conf.veth0.forwarding=1 > /dev/null
+ip netns exec "$nsrouter" sysctl net.ipv4.conf.veth1.forwarding=1 > /dev/null
+ip netns exec "$nsrouter" sysctl net.ipv4.conf.veth2.forwarding=1 > /dev/null
+
+test_ping() {
+ if ! ip netns exec "$ns1" ping -c 1 -q 10.0.2.99 > /dev/null; then
+ return 1
+ fi
+
+ if ! ip netns exec "$ns1" ping -c 1 -q dead:2::99 > /dev/null; then
+ return 2
+ fi
+
+ if ! ip netns exec "$ns1" ping -c 1 -q 10.0.3.99 > /dev/null; then
+ return 1
+ fi
+
+ if ! ip netns exec "$ns1" ping -c 1 -q dead:3::99 > /dev/null; then
+ return 2
+ fi
+
+ return 0
+}
+
+test_ping_router() {
+ if ! ip netns exec "$ns1" ping -c 1 -q 10.0.2.1 > /dev/null; then
+ return 3
+ fi
+
+ if ! ip netns exec "$ns1" ping -c 1 -q dead:2::1 > /dev/null; then
+ return 4
+ fi
+
+ return 0
+}
+
+
+listener_ready()
+{
+ local ns="$1"
+ local port="$2"
+ local proto="$3"
+ ss -N "$ns" -ln "$proto" -o "sport = :$port" | grep -q "$port"
+}
+
+test_tproxy_udp_forward()
+{
+ local ip_proto="$1"
+
+ local expect_ns1_ns2="I_M_PROXIED"
+ local expect_ns1_ns3="PONG_NS3"
+ local expect_nsrouter_ns2="PONG_NS2"
+ local expect_nsrouter_ns3="PONG_NS3"
+
+ # derived variables
+ local testname="test_${ip_proto}_udp_forward"
+ local socat_ipproto
+ local ns1_ip
+ local ns2_ip
+ local ns3_ip
+ local ns1_ip_port
+ local ns2_ip_port
+ local ns3_ip_port
+ local ip_command
+
+ # socat 1.8.0 has a bug that requires to specify the IP family to bind (fixed in 1.8.0.1)
+ case $ip_proto in
+ "ip")
+ socat_ipproto="-4"
+ ns1_ip=10.0.1.99
+ ns2_ip=10.0.2.99
+ ns3_ip=10.0.3.99
+ ns1_ip_port="$ns1_ip:18888"
+ ns2_ip_port="$ns2_ip:8080"
+ ns3_ip_port="$ns3_ip:8080"
+ ip_command="ip"
+ ;;
+ "ip6")
+ socat_ipproto="-6"
+ ns1_ip=dead:1::99
+ ns2_ip=dead:2::99
+ ns3_ip=dead:3::99
+ ns1_ip_port="[$ns1_ip]:18888"
+ ns2_ip_port="[$ns2_ip]:8080"
+ ns3_ip_port="[$ns3_ip]:8080"
+ ip_command="ip -6"
+ ;;
+ *)
+ echo "FAIL: unsupported protocol"
+ exit 255
+ ;;
+ esac
+
+ # shellcheck disable=SC2046 # Intended splitting of ip_command
+ ip netns exec "$nsrouter" $ip_command rule add fwmark 1 table 100
+ ip netns exec "$nsrouter" $ip_command route add local "$ns2_ip" dev lo table 100
+ ip netns exec "$nsrouter" nft -f /dev/stdin <<EOF
+flush ruleset
+table inet filter {
+ chain divert {
+ type filter hook prerouting priority 0; policy accept;
+ $ip_proto daddr $ns2_ip udp dport 8080 tproxy $ip_proto to :12345 meta mark set 1 accept
+ }
+}
+EOF
+
+ timeout "$timeout" ip netns exec "$nsrouter" socat -u "$socat_ipproto" udp-listen:12345,fork,ip-transparent,reuseport udp:"$ns1_ip_port",ip-transparent,reuseport,bind="$ns2_ip_port" 2>/dev/null &
+ local tproxy_pid=$!
+
+ timeout "$timeout" ip netns exec "$ns2" socat "$socat_ipproto" udp-listen:8080,fork SYSTEM:"echo PONG_NS2" 2>/dev/null &
+ local server2_pid=$!
+
+ timeout "$timeout" ip netns exec "$ns3" socat "$socat_ipproto" udp-listen:8080,fork SYSTEM:"echo PONG_NS3" 2>/dev/null &
+ local server3_pid=$!
+
+ busywait "$BUSYWAIT_TIMEOUT" listener_ready "$nsrouter" 12345 "-u"
+ busywait "$BUSYWAIT_TIMEOUT" listener_ready "$ns2" 8080 "-u"
+ busywait "$BUSYWAIT_TIMEOUT" listener_ready "$ns3" 8080 "-u"
+
+ local result
+ # request from ns1 to ns2 (forwarded traffic)
+ result=$(echo I_M_PROXIED | ip netns exec "$ns1" socat -t 2 -T 2 STDIO udp:"$ns2_ip_port",sourceport=18888)
+ if [ "$result" == "$expect_ns1_ns2" ] ;then
+ echo "PASS: tproxy test $testname: ns1 got reply \"$result\" connecting to ns2"
+ else
+ echo "ERROR: tproxy test $testname: ns1 got reply \"$result\" connecting to ns2, not \"${expect_ns1_ns2}\" as intended"
+ ret=1
+ fi
+
+ # request from ns1 to ns3 (forwarded traffic)
+ result=$(echo I_M_PROXIED | ip netns exec "$ns1" socat -t 2 -T 2 STDIO udp:"$ns3_ip_port")
+ if [ "$result" = "$expect_ns1_ns3" ] ;then
+ echo "PASS: tproxy test $testname: ns1 got reply \"$result\" connecting to ns3"
+ else
+ echo "ERROR: tproxy test $testname: ns1 got reply \"$result\" connecting to ns3, not \"$expect_ns1_ns3\" as intended"
+ ret=1
+ fi
+
+ # request from nsrouter to ns2 (localy originated traffic)
+ result=$(echo I_M_PROXIED | ip netns exec "$nsrouter" socat -t 2 -T 2 STDIO udp:"$ns2_ip_port")
+ if [ "$result" == "$expect_nsrouter_ns2" ] ;then
+ echo "PASS: tproxy test $testname: nsrouter got reply \"$result\" connecting to ns2"
+ else
+ echo "ERROR: tproxy test $testname: nsrouter got reply \"$result\" connecting to ns2, not \"$expect_nsrouter_ns2\" as intended"
+ ret=1
+ fi
+
+ # request from nsrouter to ns3 (localy originated traffic)
+ result=$(echo I_M_PROXIED | ip netns exec "$nsrouter" socat -t 2 -T 2 STDIO udp:"$ns3_ip_port")
+ if [ "$result" = "$expect_nsrouter_ns3" ] ;then
+ echo "PASS: tproxy test $testname: nsrouter got reply \"$result\" connecting to ns3"
+ else
+ echo "ERROR: tproxy test $testname: nsrouter got reply \"$result\" connecting to ns3, not \"$expect_nsrouter_ns3\" as intended"
+ ret=1
+ fi
+
+ # cleanup
+ kill "$tproxy_pid" "$server2_pid" "$server3_pid" 2>/dev/null
+ # shellcheck disable=SC2046 # Intended splitting of ip_command
+ ip netns exec "$nsrouter" $ip_command rule del fwmark 1 table 100
+ ip netns exec "$nsrouter" $ip_command route flush table 100
+}
+
+
+if test_ping; then
+ # queue bypass works (rules were skipped, no listener)
+ echo "PASS: ${ns1} can reach ${ns2}"
+else
+ echo "FAIL: ${ns1} cannot reach ${ns2}: $ret" 1>&2
+ exit $ret
+fi
+
+test_tproxy_udp_forward "ip"
+test_tproxy_udp_forward "ip6"
+
+exit $ret
diff --git a/tools/testing/selftests/net/packetdrill/ksft_runner.sh b/tools/testing/selftests/net/packetdrill/ksft_runner.sh
index 7478c0c0c9aa..4071c133f29e 100755
--- a/tools/testing/selftests/net/packetdrill/ksft_runner.sh
+++ b/tools/testing/selftests/net/packetdrill/ksft_runner.sh
@@ -30,12 +30,17 @@ if [ -z "$(which packetdrill)" ]; then
exit "$KSFT_SKIP"
fi
+declare -a optargs
+if [[ -n "${KSFT_MACHINE_SLOW}" ]]; then
+ optargs+=('--tolerance_usecs=14000')
+fi
+
ktap_print_header
ktap_set_plan 2
-unshare -n packetdrill ${ipv4_args[@]} $(basename $script) > /dev/null \
+unshare -n packetdrill ${ipv4_args[@]} ${optargs[@]} $(basename $script) > /dev/null \
&& ktap_test_pass "ipv4" || ktap_test_fail "ipv4"
-unshare -n packetdrill ${ipv6_args[@]} $(basename $script) > /dev/null \
+unshare -n packetdrill ${ipv6_args[@]} ${optargs[@]} $(basename $script) > /dev/null \
&& ktap_test_pass "ipv6" || ktap_test_fail "ipv6"
ktap_finished
diff --git a/tools/testing/selftests/ring-buffer/map_test.c b/tools/testing/selftests/ring-buffer/map_test.c
index a9006fa7097e..d10a847130fb 100644
--- a/tools/testing/selftests/ring-buffer/map_test.c
+++ b/tools/testing/selftests/ring-buffer/map_test.c
@@ -92,12 +92,22 @@ int tracefs_cpu_map(struct tracefs_cpu_map_desc *desc, int cpu)
if (desc->cpu_fd < 0)
return -ENODEV;
+again:
map = mmap(NULL, page_size, PROT_READ, MAP_SHARED, desc->cpu_fd, 0);
if (map == MAP_FAILED)
return -errno;
desc->meta = (struct trace_buffer_meta *)map;
+ /* the meta-page is bigger than the original mapping */
+ if (page_size < desc->meta->meta_struct_len) {
+ int meta_page_size = desc->meta->meta_page_size;
+
+ munmap(desc->meta, page_size);
+ page_size = meta_page_size;
+ goto again;
+ }
+
return 0;
}
@@ -228,6 +238,20 @@ TEST_F(map, data_mmap)
data = mmap(NULL, data_len, PROT_READ, MAP_SHARED,
desc->cpu_fd, meta_len);
ASSERT_EQ(data, MAP_FAILED);
+
+ /* Verify meta-page padding */
+ if (desc->meta->meta_page_size > getpagesize()) {
+ data_len = desc->meta->meta_page_size;
+ data = mmap(NULL, data_len,
+ PROT_READ, MAP_SHARED, desc->cpu_fd, 0);
+ ASSERT_NE(data, MAP_FAILED);
+
+ for (int i = desc->meta->meta_struct_len;
+ i < desc->meta->meta_page_size; i += sizeof(int))
+ ASSERT_EQ(*(int *)(data + i), 0);
+
+ munmap(data, data_len);
+ }
}
FIXTURE(snapshot) {
diff --git a/tools/testing/selftests/sched_ext/.gitignore b/tools/testing/selftests/sched_ext/.gitignore
new file mode 100644
index 000000000000..ae5491a114c0
--- /dev/null
+++ b/tools/testing/selftests/sched_ext/.gitignore
@@ -0,0 +1,6 @@
+*
+!*.c
+!*.h
+!Makefile
+!.gitignore
+!config
diff --git a/tools/testing/selftests/sched_ext/Makefile b/tools/testing/selftests/sched_ext/Makefile
new file mode 100644
index 000000000000..0754a2c110a1
--- /dev/null
+++ b/tools/testing/selftests/sched_ext/Makefile
@@ -0,0 +1,218 @@
+# SPDX-License-Identifier: GPL-2.0
+# Copyright (c) 2022 Meta Platforms, Inc. and affiliates.
+include ../../../build/Build.include
+include ../../../scripts/Makefile.arch
+include ../../../scripts/Makefile.include
+include ../lib.mk
+
+ifneq ($(LLVM),)
+ifneq ($(filter %/,$(LLVM)),)
+LLVM_PREFIX := $(LLVM)
+else ifneq ($(filter -%,$(LLVM)),)
+LLVM_SUFFIX := $(LLVM)
+endif
+
+CC := $(LLVM_PREFIX)clang$(LLVM_SUFFIX) $(CLANG_FLAGS) -fintegrated-as
+else
+CC := gcc
+endif # LLVM
+
+ifneq ($(CROSS_COMPILE),)
+$(error CROSS_COMPILE not supported for scx selftests)
+endif # CROSS_COMPILE
+
+CURDIR := $(abspath .)
+REPOROOT := $(abspath ../../../..)
+TOOLSDIR := $(REPOROOT)/tools
+LIBDIR := $(TOOLSDIR)/lib
+BPFDIR := $(LIBDIR)/bpf
+TOOLSINCDIR := $(TOOLSDIR)/include
+BPFTOOLDIR := $(TOOLSDIR)/bpf/bpftool
+APIDIR := $(TOOLSINCDIR)/uapi
+GENDIR := $(REPOROOT)/include/generated
+GENHDR := $(GENDIR)/autoconf.h
+SCXTOOLSDIR := $(TOOLSDIR)/sched_ext
+SCXTOOLSINCDIR := $(TOOLSDIR)/sched_ext/include
+
+OUTPUT_DIR := $(CURDIR)/build
+OBJ_DIR := $(OUTPUT_DIR)/obj
+INCLUDE_DIR := $(OUTPUT_DIR)/include
+BPFOBJ_DIR := $(OBJ_DIR)/libbpf
+SCXOBJ_DIR := $(OBJ_DIR)/sched_ext
+BPFOBJ := $(BPFOBJ_DIR)/libbpf.a
+LIBBPF_OUTPUT := $(OBJ_DIR)/libbpf/libbpf.a
+DEFAULT_BPFTOOL := $(OUTPUT_DIR)/sbin/bpftool
+HOST_BUILD_DIR := $(OBJ_DIR)
+HOST_OUTPUT_DIR := $(OUTPUT_DIR)
+
+VMLINUX_BTF_PATHS ?= ../../../../vmlinux \
+ /sys/kernel/btf/vmlinux \
+ /boot/vmlinux-$(shell uname -r)
+VMLINUX_BTF ?= $(abspath $(firstword $(wildcard $(VMLINUX_BTF_PATHS))))
+ifeq ($(VMLINUX_BTF),)
+$(error Cannot find a vmlinux for VMLINUX_BTF at any of "$(VMLINUX_BTF_PATHS)")
+endif
+
+BPFTOOL ?= $(DEFAULT_BPFTOOL)
+
+ifneq ($(wildcard $(GENHDR)),)
+ GENFLAGS := -DHAVE_GENHDR
+endif
+
+CFLAGS += -g -O2 -rdynamic -pthread -Wall -Werror $(GENFLAGS) \
+ -I$(INCLUDE_DIR) -I$(GENDIR) -I$(LIBDIR) \
+ -I$(TOOLSINCDIR) -I$(APIDIR) -I$(CURDIR)/include -I$(SCXTOOLSINCDIR)
+
+# Silence some warnings when compiled with clang
+ifneq ($(LLVM),)
+CFLAGS += -Wno-unused-command-line-argument
+endif
+
+LDFLAGS = -lelf -lz -lpthread -lzstd
+
+IS_LITTLE_ENDIAN = $(shell $(CC) -dM -E - </dev/null | \
+ grep 'define __BYTE_ORDER__ __ORDER_LITTLE_ENDIAN__')
+
+# Get Clang's default includes on this system, as opposed to those seen by
+# '-target bpf'. This fixes "missing" files on some architectures/distros,
+# such as asm/byteorder.h, asm/socket.h, asm/sockios.h, sys/cdefs.h etc.
+#
+# Use '-idirafter': Don't interfere with include mechanics except where the
+# build would have failed anyways.
+define get_sys_includes
+$(shell $(1) -v -E - </dev/null 2>&1 \
+ | sed -n '/<...> search starts here:/,/End of search list./{ s| \(/.*\)|-idirafter \1|p }') \
+$(shell $(1) -dM -E - </dev/null | grep '__riscv_xlen ' | awk '{printf("-D__riscv_xlen=%d -D__BITS_PER_LONG=%d", $$3, $$3)}')
+endef
+
+BPF_CFLAGS = -g -D__TARGET_ARCH_$(SRCARCH) \
+ $(if $(IS_LITTLE_ENDIAN),-mlittle-endian,-mbig-endian) \
+ -I$(CURDIR)/include -I$(CURDIR)/include/bpf-compat \
+ -I$(INCLUDE_DIR) -I$(APIDIR) -I$(SCXTOOLSINCDIR) \
+ -I$(REPOROOT)/include \
+ $(call get_sys_includes,$(CLANG)) \
+ -Wall -Wno-compare-distinct-pointer-types \
+ -Wno-incompatible-function-pointer-types \
+ -O2 -mcpu=v3
+
+# sort removes libbpf duplicates when not cross-building
+MAKE_DIRS := $(sort $(OBJ_DIR)/libbpf $(OBJ_DIR)/libbpf \
+ $(OBJ_DIR)/bpftool $(OBJ_DIR)/resolve_btfids \
+ $(INCLUDE_DIR) $(SCXOBJ_DIR))
+
+$(MAKE_DIRS):
+ $(call msg,MKDIR,,$@)
+ $(Q)mkdir -p $@
+
+$(BPFOBJ): $(wildcard $(BPFDIR)/*.[ch] $(BPFDIR)/Makefile) \
+ $(APIDIR)/linux/bpf.h \
+ | $(OBJ_DIR)/libbpf
+ $(Q)$(MAKE) $(submake_extras) -C $(BPFDIR) OUTPUT=$(OBJ_DIR)/libbpf/ \
+ EXTRA_CFLAGS='-g -O0 -fPIC' \
+ DESTDIR=$(OUTPUT_DIR) prefix= all install_headers
+
+$(DEFAULT_BPFTOOL): $(wildcard $(BPFTOOLDIR)/*.[ch] $(BPFTOOLDIR)/Makefile) \
+ $(LIBBPF_OUTPUT) | $(OBJ_DIR)/bpftool
+ $(Q)$(MAKE) $(submake_extras) -C $(BPFTOOLDIR) \
+ ARCH= CROSS_COMPILE= CC=$(HOSTCC) LD=$(HOSTLD) \
+ EXTRA_CFLAGS='-g -O0' \
+ OUTPUT=$(OBJ_DIR)/bpftool/ \
+ LIBBPF_OUTPUT=$(OBJ_DIR)/libbpf/ \
+ LIBBPF_DESTDIR=$(OUTPUT_DIR)/ \
+ prefix= DESTDIR=$(OUTPUT_DIR)/ install-bin
+
+$(INCLUDE_DIR)/vmlinux.h: $(VMLINUX_BTF) $(BPFTOOL) | $(INCLUDE_DIR)
+ifeq ($(VMLINUX_H),)
+ $(call msg,GEN,,$@)
+ $(Q)$(BPFTOOL) btf dump file $(VMLINUX_BTF) format c > $@
+else
+ $(call msg,CP,,$@)
+ $(Q)cp "$(VMLINUX_H)" $@
+endif
+
+$(SCXOBJ_DIR)/%.bpf.o: %.bpf.c $(INCLUDE_DIR)/vmlinux.h | $(BPFOBJ) $(SCXOBJ_DIR)
+ $(call msg,CLNG-BPF,,$(notdir $@))
+ $(Q)$(CLANG) $(BPF_CFLAGS) -target bpf -c $< -o $@
+
+$(INCLUDE_DIR)/%.bpf.skel.h: $(SCXOBJ_DIR)/%.bpf.o $(INCLUDE_DIR)/vmlinux.h $(BPFTOOL) | $(INCLUDE_DIR)
+ $(eval sched=$(notdir $@))
+ $(call msg,GEN-SKEL,,$(sched))
+ $(Q)$(BPFTOOL) gen object $(<:.o=.linked1.o) $<
+ $(Q)$(BPFTOOL) gen object $(<:.o=.linked2.o) $(<:.o=.linked1.o)
+ $(Q)$(BPFTOOL) gen object $(<:.o=.linked3.o) $(<:.o=.linked2.o)
+ $(Q)diff $(<:.o=.linked2.o) $(<:.o=.linked3.o)
+ $(Q)$(BPFTOOL) gen skeleton $(<:.o=.linked3.o) name $(subst .bpf.skel.h,,$(sched)) > $@
+ $(Q)$(BPFTOOL) gen subskeleton $(<:.o=.linked3.o) name $(subst .bpf.skel.h,,$(sched)) > $(@:.skel.h=.subskel.h)
+
+################
+# C schedulers #
+################
+
+override define CLEAN
+ rm -rf $(OUTPUT_DIR)
+ rm -f *.o *.bpf.o *.bpf.skel.h *.bpf.subskel.h
+ rm -f $(TEST_GEN_PROGS)
+ rm -f runner
+endef
+
+# Every testcase takes all of the BPF progs are dependencies by default. This
+# allows testcases to load any BPF scheduler, which is useful for testcases
+# that don't need their own prog to run their test.
+all_test_bpfprogs := $(foreach prog,$(wildcard *.bpf.c),$(INCLUDE_DIR)/$(patsubst %.c,%.skel.h,$(prog)))
+
+auto-test-targets := \
+ create_dsq \
+ enq_last_no_enq_fails \
+ enq_select_cpu_fails \
+ ddsp_bogus_dsq_fail \
+ ddsp_vtimelocal_fail \
+ dsp_local_on \
+ exit \
+ hotplug \
+ init_enable_count \
+ maximal \
+ maybe_null \
+ minimal \
+ prog_run \
+ reload_loop \
+ select_cpu_dfl \
+ select_cpu_dfl_nodispatch \
+ select_cpu_dispatch \
+ select_cpu_dispatch_bad_dsq \
+ select_cpu_dispatch_dbl_dsp \
+ select_cpu_vtime \
+ test_example \
+
+testcase-targets := $(addsuffix .o,$(addprefix $(SCXOBJ_DIR)/,$(auto-test-targets)))
+
+$(SCXOBJ_DIR)/runner.o: runner.c | $(SCXOBJ_DIR)
+ $(CC) $(CFLAGS) -c $< -o $@
+
+# Create all of the test targets object files, whose testcase objects will be
+# registered into the runner in ELF constructors.
+#
+# Note that we must do double expansion here in order to support conditionally
+# compiling BPF object files only if one is present, as the wildcard Make
+# function doesn't support using implicit rules otherwise.
+$(testcase-targets): $(SCXOBJ_DIR)/%.o: %.c $(SCXOBJ_DIR)/runner.o $(all_test_bpfprogs) | $(SCXOBJ_DIR)
+ $(eval test=$(patsubst %.o,%.c,$(notdir $@)))
+ $(CC) $(CFLAGS) -c $< -o $@ $(SCXOBJ_DIR)/runner.o
+
+$(SCXOBJ_DIR)/util.o: util.c | $(SCXOBJ_DIR)
+ $(CC) $(CFLAGS) -c $< -o $@
+
+runner: $(SCXOBJ_DIR)/runner.o $(SCXOBJ_DIR)/util.o $(BPFOBJ) $(testcase-targets)
+ @echo "$(testcase-targets)"
+ $(CC) $(CFLAGS) -o $@ $^ $(LDFLAGS)
+
+TEST_GEN_PROGS := runner
+
+all: runner
+
+.PHONY: all clean help
+
+.DEFAULT_GOAL := all
+
+.DELETE_ON_ERROR:
+
+.SECONDARY:
diff --git a/tools/testing/selftests/sched_ext/config b/tools/testing/selftests/sched_ext/config
new file mode 100644
index 000000000000..0de9b4ee249d
--- /dev/null
+++ b/tools/testing/selftests/sched_ext/config
@@ -0,0 +1,9 @@
+CONFIG_SCHED_DEBUG=y
+CONFIG_SCHED_CLASS_EXT=y
+CONFIG_CGROUPS=y
+CONFIG_CGROUP_SCHED=y
+CONFIG_EXT_GROUP_SCHED=y
+CONFIG_BPF=y
+CONFIG_BPF_SYSCALL=y
+CONFIG_DEBUG_INFO=y
+CONFIG_DEBUG_INFO_BTF=y
diff --git a/tools/testing/selftests/sched_ext/create_dsq.bpf.c b/tools/testing/selftests/sched_ext/create_dsq.bpf.c
new file mode 100644
index 000000000000..23f79ed343f0
--- /dev/null
+++ b/tools/testing/selftests/sched_ext/create_dsq.bpf.c
@@ -0,0 +1,58 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Create and destroy DSQs in a loop.
+ *
+ * Copyright (c) 2024 Meta Platforms, Inc. and affiliates.
+ * Copyright (c) 2024 David Vernet <dvernet@meta.com>
+ */
+
+#include <scx/common.bpf.h>
+
+char _license[] SEC("license") = "GPL";
+
+void BPF_STRUCT_OPS(create_dsq_exit_task, struct task_struct *p,
+ struct scx_exit_task_args *args)
+{
+ scx_bpf_destroy_dsq(p->pid);
+}
+
+s32 BPF_STRUCT_OPS_SLEEPABLE(create_dsq_init_task, struct task_struct *p,
+ struct scx_init_task_args *args)
+{
+ s32 err;
+
+ err = scx_bpf_create_dsq(p->pid, -1);
+ if (err)
+ scx_bpf_error("Failed to create DSQ for %s[%d]",
+ p->comm, p->pid);
+
+ return err;
+}
+
+s32 BPF_STRUCT_OPS_SLEEPABLE(create_dsq_init)
+{
+ u32 i;
+ s32 err;
+
+ bpf_for(i, 0, 1024) {
+ err = scx_bpf_create_dsq(i, -1);
+ if (err) {
+ scx_bpf_error("Failed to create DSQ %d", i);
+ return 0;
+ }
+ }
+
+ bpf_for(i, 0, 1024) {
+ scx_bpf_destroy_dsq(i);
+ }
+
+ return 0;
+}
+
+SEC(".struct_ops.link")
+struct sched_ext_ops create_dsq_ops = {
+ .init_task = create_dsq_init_task,
+ .exit_task = create_dsq_exit_task,
+ .init = create_dsq_init,
+ .name = "create_dsq",
+};
diff --git a/tools/testing/selftests/sched_ext/create_dsq.c b/tools/testing/selftests/sched_ext/create_dsq.c
new file mode 100644
index 000000000000..fa946d9146d4
--- /dev/null
+++ b/tools/testing/selftests/sched_ext/create_dsq.c
@@ -0,0 +1,57 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (c) 2024 Meta Platforms, Inc. and affiliates.
+ * Copyright (c) 2024 David Vernet <dvernet@meta.com>
+ */
+#include <bpf/bpf.h>
+#include <scx/common.h>
+#include <sys/wait.h>
+#include <unistd.h>
+#include "create_dsq.bpf.skel.h"
+#include "scx_test.h"
+
+static enum scx_test_status setup(void **ctx)
+{
+ struct create_dsq *skel;
+
+ skel = create_dsq__open_and_load();
+ if (!skel) {
+ SCX_ERR("Failed to open and load skel");
+ return SCX_TEST_FAIL;
+ }
+ *ctx = skel;
+
+ return SCX_TEST_PASS;
+}
+
+static enum scx_test_status run(void *ctx)
+{
+ struct create_dsq *skel = ctx;
+ struct bpf_link *link;
+
+ link = bpf_map__attach_struct_ops(skel->maps.create_dsq_ops);
+ if (!link) {
+ SCX_ERR("Failed to attach scheduler");
+ return SCX_TEST_FAIL;
+ }
+
+ bpf_link__destroy(link);
+
+ return SCX_TEST_PASS;
+}
+
+static void cleanup(void *ctx)
+{
+ struct create_dsq *skel = ctx;
+
+ create_dsq__destroy(skel);
+}
+
+struct scx_test create_dsq = {
+ .name = "create_dsq",
+ .description = "Create and destroy a dsq in a loop",
+ .setup = setup,
+ .run = run,
+ .cleanup = cleanup,
+};
+REGISTER_SCX_TEST(&create_dsq)
diff --git a/tools/testing/selftests/sched_ext/ddsp_bogus_dsq_fail.bpf.c b/tools/testing/selftests/sched_ext/ddsp_bogus_dsq_fail.bpf.c
new file mode 100644
index 000000000000..e97ad41d354a
--- /dev/null
+++ b/tools/testing/selftests/sched_ext/ddsp_bogus_dsq_fail.bpf.c
@@ -0,0 +1,42 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (c) 2024 Meta Platforms, Inc. and affiliates.
+ * Copyright (c) 2024 David Vernet <dvernet@meta.com>
+ * Copyright (c) 2024 Tejun Heo <tj@kernel.org>
+ */
+#include <scx/common.bpf.h>
+
+char _license[] SEC("license") = "GPL";
+
+UEI_DEFINE(uei);
+
+s32 BPF_STRUCT_OPS(ddsp_bogus_dsq_fail_select_cpu, struct task_struct *p,
+ s32 prev_cpu, u64 wake_flags)
+{
+ s32 cpu = scx_bpf_pick_idle_cpu(p->cpus_ptr, 0);
+
+ if (cpu >= 0) {
+ /*
+ * If we dispatch to a bogus DSQ that will fall back to the
+ * builtin global DSQ, we fail gracefully.
+ */
+ scx_bpf_dispatch_vtime(p, 0xcafef00d, SCX_SLICE_DFL,
+ p->scx.dsq_vtime, 0);
+ return cpu;
+ }
+
+ return prev_cpu;
+}
+
+void BPF_STRUCT_OPS(ddsp_bogus_dsq_fail_exit, struct scx_exit_info *ei)
+{
+ UEI_RECORD(uei, ei);
+}
+
+SEC(".struct_ops.link")
+struct sched_ext_ops ddsp_bogus_dsq_fail_ops = {
+ .select_cpu = ddsp_bogus_dsq_fail_select_cpu,
+ .exit = ddsp_bogus_dsq_fail_exit,
+ .name = "ddsp_bogus_dsq_fail",
+ .timeout_ms = 1000U,
+};
diff --git a/tools/testing/selftests/sched_ext/ddsp_bogus_dsq_fail.c b/tools/testing/selftests/sched_ext/ddsp_bogus_dsq_fail.c
new file mode 100644
index 000000000000..e65d22f23f3b
--- /dev/null
+++ b/tools/testing/selftests/sched_ext/ddsp_bogus_dsq_fail.c
@@ -0,0 +1,57 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (c) 2024 Meta Platforms, Inc. and affiliates.
+ * Copyright (c) 2024 David Vernet <dvernet@meta.com>
+ * Copyright (c) 2024 Tejun Heo <tj@kernel.org>
+ */
+#include <bpf/bpf.h>
+#include <scx/common.h>
+#include <sys/wait.h>
+#include <unistd.h>
+#include "ddsp_bogus_dsq_fail.bpf.skel.h"
+#include "scx_test.h"
+
+static enum scx_test_status setup(void **ctx)
+{
+ struct ddsp_bogus_dsq_fail *skel;
+
+ skel = ddsp_bogus_dsq_fail__open_and_load();
+ SCX_FAIL_IF(!skel, "Failed to open and load skel");
+ *ctx = skel;
+
+ return SCX_TEST_PASS;
+}
+
+static enum scx_test_status run(void *ctx)
+{
+ struct ddsp_bogus_dsq_fail *skel = ctx;
+ struct bpf_link *link;
+
+ link = bpf_map__attach_struct_ops(skel->maps.ddsp_bogus_dsq_fail_ops);
+ SCX_FAIL_IF(!link, "Failed to attach struct_ops");
+
+ sleep(1);
+
+ SCX_EQ(skel->data->uei.kind, EXIT_KIND(SCX_EXIT_ERROR));
+ bpf_link__destroy(link);
+
+ return SCX_TEST_PASS;
+}
+
+static void cleanup(void *ctx)
+{
+ struct ddsp_bogus_dsq_fail *skel = ctx;
+
+ ddsp_bogus_dsq_fail__destroy(skel);
+}
+
+struct scx_test ddsp_bogus_dsq_fail = {
+ .name = "ddsp_bogus_dsq_fail",
+ .description = "Verify we gracefully fail, and fall back to using a "
+ "built-in DSQ, if we do a direct dispatch to an invalid"
+ " DSQ in ops.select_cpu()",
+ .setup = setup,
+ .run = run,
+ .cleanup = cleanup,
+};
+REGISTER_SCX_TEST(&ddsp_bogus_dsq_fail)
diff --git a/tools/testing/selftests/sched_ext/ddsp_vtimelocal_fail.bpf.c b/tools/testing/selftests/sched_ext/ddsp_vtimelocal_fail.bpf.c
new file mode 100644
index 000000000000..dde7e7dafbfb
--- /dev/null
+++ b/tools/testing/selftests/sched_ext/ddsp_vtimelocal_fail.bpf.c
@@ -0,0 +1,39 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (c) 2024 Meta Platforms, Inc. and affiliates.
+ * Copyright (c) 2024 David Vernet <dvernet@meta.com>
+ * Copyright (c) 2024 Tejun Heo <tj@kernel.org>
+ */
+#include <scx/common.bpf.h>
+
+char _license[] SEC("license") = "GPL";
+
+UEI_DEFINE(uei);
+
+s32 BPF_STRUCT_OPS(ddsp_vtimelocal_fail_select_cpu, struct task_struct *p,
+ s32 prev_cpu, u64 wake_flags)
+{
+ s32 cpu = scx_bpf_pick_idle_cpu(p->cpus_ptr, 0);
+
+ if (cpu >= 0) {
+ /* Shouldn't be allowed to vtime dispatch to a builtin DSQ. */
+ scx_bpf_dispatch_vtime(p, SCX_DSQ_LOCAL, SCX_SLICE_DFL,
+ p->scx.dsq_vtime, 0);
+ return cpu;
+ }
+
+ return prev_cpu;
+}
+
+void BPF_STRUCT_OPS(ddsp_vtimelocal_fail_exit, struct scx_exit_info *ei)
+{
+ UEI_RECORD(uei, ei);
+}
+
+SEC(".struct_ops.link")
+struct sched_ext_ops ddsp_vtimelocal_fail_ops = {
+ .select_cpu = ddsp_vtimelocal_fail_select_cpu,
+ .exit = ddsp_vtimelocal_fail_exit,
+ .name = "ddsp_vtimelocal_fail",
+ .timeout_ms = 1000U,
+};
diff --git a/tools/testing/selftests/sched_ext/ddsp_vtimelocal_fail.c b/tools/testing/selftests/sched_ext/ddsp_vtimelocal_fail.c
new file mode 100644
index 000000000000..abafee587cd6
--- /dev/null
+++ b/tools/testing/selftests/sched_ext/ddsp_vtimelocal_fail.c
@@ -0,0 +1,56 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (c) 2024 Meta Platforms, Inc. and affiliates.
+ * Copyright (c) 2024 David Vernet <dvernet@meta.com>
+ * Copyright (c) 2024 Tejun Heo <tj@kernel.org>
+ */
+#include <bpf/bpf.h>
+#include <scx/common.h>
+#include <unistd.h>
+#include "ddsp_vtimelocal_fail.bpf.skel.h"
+#include "scx_test.h"
+
+static enum scx_test_status setup(void **ctx)
+{
+ struct ddsp_vtimelocal_fail *skel;
+
+ skel = ddsp_vtimelocal_fail__open_and_load();
+ SCX_FAIL_IF(!skel, "Failed to open and load skel");
+ *ctx = skel;
+
+ return SCX_TEST_PASS;
+}
+
+static enum scx_test_status run(void *ctx)
+{
+ struct ddsp_vtimelocal_fail *skel = ctx;
+ struct bpf_link *link;
+
+ link = bpf_map__attach_struct_ops(skel->maps.ddsp_vtimelocal_fail_ops);
+ SCX_FAIL_IF(!link, "Failed to attach struct_ops");
+
+ sleep(1);
+
+ SCX_EQ(skel->data->uei.kind, EXIT_KIND(SCX_EXIT_ERROR));
+ bpf_link__destroy(link);
+
+ return SCX_TEST_PASS;
+}
+
+static void cleanup(void *ctx)
+{
+ struct ddsp_vtimelocal_fail *skel = ctx;
+
+ ddsp_vtimelocal_fail__destroy(skel);
+}
+
+struct scx_test ddsp_vtimelocal_fail = {
+ .name = "ddsp_vtimelocal_fail",
+ .description = "Verify we gracefully fail, and fall back to using a "
+ "built-in DSQ, if we do a direct vtime dispatch to a "
+ "built-in DSQ from DSQ in ops.select_cpu()",
+ .setup = setup,
+ .run = run,
+ .cleanup = cleanup,
+};
+REGISTER_SCX_TEST(&ddsp_vtimelocal_fail)
diff --git a/tools/testing/selftests/sched_ext/dsp_local_on.bpf.c b/tools/testing/selftests/sched_ext/dsp_local_on.bpf.c
new file mode 100644
index 000000000000..efb4672decb4
--- /dev/null
+++ b/tools/testing/selftests/sched_ext/dsp_local_on.bpf.c
@@ -0,0 +1,65 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (c) 2024 Meta Platforms, Inc. and affiliates.
+ * Copyright (c) 2024 David Vernet <dvernet@meta.com>
+ */
+#include <scx/common.bpf.h>
+
+char _license[] SEC("license") = "GPL";
+const volatile s32 nr_cpus;
+
+UEI_DEFINE(uei);
+
+struct {
+ __uint(type, BPF_MAP_TYPE_QUEUE);
+ __uint(max_entries, 8192);
+ __type(value, s32);
+} queue SEC(".maps");
+
+s32 BPF_STRUCT_OPS(dsp_local_on_select_cpu, struct task_struct *p,
+ s32 prev_cpu, u64 wake_flags)
+{
+ return prev_cpu;
+}
+
+void BPF_STRUCT_OPS(dsp_local_on_enqueue, struct task_struct *p,
+ u64 enq_flags)
+{
+ s32 pid = p->pid;
+
+ if (bpf_map_push_elem(&queue, &pid, 0))
+ scx_bpf_error("Failed to enqueue %s[%d]", p->comm, p->pid);
+}
+
+void BPF_STRUCT_OPS(dsp_local_on_dispatch, s32 cpu, struct task_struct *prev)
+{
+ s32 pid, target;
+ struct task_struct *p;
+
+ if (bpf_map_pop_elem(&queue, &pid))
+ return;
+
+ p = bpf_task_from_pid(pid);
+ if (!p)
+ return;
+
+ target = bpf_get_prandom_u32() % nr_cpus;
+
+ scx_bpf_dispatch(p, SCX_DSQ_LOCAL_ON | target, SCX_SLICE_DFL, 0);
+ bpf_task_release(p);
+}
+
+void BPF_STRUCT_OPS(dsp_local_on_exit, struct scx_exit_info *ei)
+{
+ UEI_RECORD(uei, ei);
+}
+
+SEC(".struct_ops.link")
+struct sched_ext_ops dsp_local_on_ops = {
+ .select_cpu = dsp_local_on_select_cpu,
+ .enqueue = dsp_local_on_enqueue,
+ .dispatch = dsp_local_on_dispatch,
+ .exit = dsp_local_on_exit,
+ .name = "dsp_local_on",
+ .timeout_ms = 1000U,
+};
diff --git a/tools/testing/selftests/sched_ext/dsp_local_on.c b/tools/testing/selftests/sched_ext/dsp_local_on.c
new file mode 100644
index 000000000000..472851b56854
--- /dev/null
+++ b/tools/testing/selftests/sched_ext/dsp_local_on.c
@@ -0,0 +1,58 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (c) 2024 Meta Platforms, Inc. and affiliates.
+ * Copyright (c) 2024 David Vernet <dvernet@meta.com>
+ */
+#include <bpf/bpf.h>
+#include <scx/common.h>
+#include <unistd.h>
+#include "dsp_local_on.bpf.skel.h"
+#include "scx_test.h"
+
+static enum scx_test_status setup(void **ctx)
+{
+ struct dsp_local_on *skel;
+
+ skel = dsp_local_on__open();
+ SCX_FAIL_IF(!skel, "Failed to open");
+
+ skel->rodata->nr_cpus = libbpf_num_possible_cpus();
+ SCX_FAIL_IF(dsp_local_on__load(skel), "Failed to load skel");
+ *ctx = skel;
+
+ return SCX_TEST_PASS;
+}
+
+static enum scx_test_status run(void *ctx)
+{
+ struct dsp_local_on *skel = ctx;
+ struct bpf_link *link;
+
+ link = bpf_map__attach_struct_ops(skel->maps.dsp_local_on_ops);
+ SCX_FAIL_IF(!link, "Failed to attach struct_ops");
+
+ /* Just sleeping is fine, plenty of scheduling events happening */
+ sleep(1);
+
+ SCX_EQ(skel->data->uei.kind, EXIT_KIND(SCX_EXIT_ERROR));
+ bpf_link__destroy(link);
+
+ return SCX_TEST_PASS;
+}
+
+static void cleanup(void *ctx)
+{
+ struct dsp_local_on *skel = ctx;
+
+ dsp_local_on__destroy(skel);
+}
+
+struct scx_test dsp_local_on = {
+ .name = "dsp_local_on",
+ .description = "Verify we can directly dispatch tasks to a local DSQs "
+ "from osp.dispatch()",
+ .setup = setup,
+ .run = run,
+ .cleanup = cleanup,
+};
+REGISTER_SCX_TEST(&dsp_local_on)
diff --git a/tools/testing/selftests/sched_ext/enq_last_no_enq_fails.bpf.c b/tools/testing/selftests/sched_ext/enq_last_no_enq_fails.bpf.c
new file mode 100644
index 000000000000..b0b99531d5d5
--- /dev/null
+++ b/tools/testing/selftests/sched_ext/enq_last_no_enq_fails.bpf.c
@@ -0,0 +1,21 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * A scheduler that validates the behavior of direct dispatching with a default
+ * select_cpu implementation.
+ *
+ * Copyright (c) 2023 Meta Platforms, Inc. and affiliates.
+ * Copyright (c) 2023 David Vernet <dvernet@meta.com>
+ * Copyright (c) 2023 Tejun Heo <tj@kernel.org>
+ */
+
+#include <scx/common.bpf.h>
+
+char _license[] SEC("license") = "GPL";
+
+SEC(".struct_ops.link")
+struct sched_ext_ops enq_last_no_enq_fails_ops = {
+ .name = "enq_last_no_enq_fails",
+ /* Need to define ops.enqueue() with SCX_OPS_ENQ_LAST */
+ .flags = SCX_OPS_ENQ_LAST,
+ .timeout_ms = 1000U,
+};
diff --git a/tools/testing/selftests/sched_ext/enq_last_no_enq_fails.c b/tools/testing/selftests/sched_ext/enq_last_no_enq_fails.c
new file mode 100644
index 000000000000..2a3eda5e2c0b
--- /dev/null
+++ b/tools/testing/selftests/sched_ext/enq_last_no_enq_fails.c
@@ -0,0 +1,60 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (c) 2023 Meta Platforms, Inc. and affiliates.
+ * Copyright (c) 2023 David Vernet <dvernet@meta.com>
+ * Copyright (c) 2023 Tejun Heo <tj@kernel.org>
+ */
+#include <bpf/bpf.h>
+#include <scx/common.h>
+#include <sys/wait.h>
+#include <unistd.h>
+#include "enq_last_no_enq_fails.bpf.skel.h"
+#include "scx_test.h"
+
+static enum scx_test_status setup(void **ctx)
+{
+ struct enq_last_no_enq_fails *skel;
+
+ skel = enq_last_no_enq_fails__open_and_load();
+ if (!skel) {
+ SCX_ERR("Failed to open and load skel");
+ return SCX_TEST_FAIL;
+ }
+ *ctx = skel;
+
+ return SCX_TEST_PASS;
+}
+
+static enum scx_test_status run(void *ctx)
+{
+ struct enq_last_no_enq_fails *skel = ctx;
+ struct bpf_link *link;
+
+ link = bpf_map__attach_struct_ops(skel->maps.enq_last_no_enq_fails_ops);
+ if (link) {
+ SCX_ERR("Incorrectly succeeded in to attaching scheduler");
+ return SCX_TEST_FAIL;
+ }
+
+ bpf_link__destroy(link);
+
+ return SCX_TEST_PASS;
+}
+
+static void cleanup(void *ctx)
+{
+ struct enq_last_no_enq_fails *skel = ctx;
+
+ enq_last_no_enq_fails__destroy(skel);
+}
+
+struct scx_test enq_last_no_enq_fails = {
+ .name = "enq_last_no_enq_fails",
+ .description = "Verify we fail to load a scheduler if we specify "
+ "the SCX_OPS_ENQ_LAST flag without defining "
+ "ops.enqueue()",
+ .setup = setup,
+ .run = run,
+ .cleanup = cleanup,
+};
+REGISTER_SCX_TEST(&enq_last_no_enq_fails)
diff --git a/tools/testing/selftests/sched_ext/enq_select_cpu_fails.bpf.c b/tools/testing/selftests/sched_ext/enq_select_cpu_fails.bpf.c
new file mode 100644
index 000000000000..b3dfc1033cd6
--- /dev/null
+++ b/tools/testing/selftests/sched_ext/enq_select_cpu_fails.bpf.c
@@ -0,0 +1,43 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (c) 2023 Meta Platforms, Inc. and affiliates.
+ * Copyright (c) 2023 David Vernet <dvernet@meta.com>
+ * Copyright (c) 2023 Tejun Heo <tj@kernel.org>
+ */
+
+#include <scx/common.bpf.h>
+
+char _license[] SEC("license") = "GPL";
+
+/* Manually specify the signature until the kfunc is added to the scx repo. */
+s32 scx_bpf_select_cpu_dfl(struct task_struct *p, s32 prev_cpu, u64 wake_flags,
+ bool *found) __ksym;
+
+s32 BPF_STRUCT_OPS(enq_select_cpu_fails_select_cpu, struct task_struct *p,
+ s32 prev_cpu, u64 wake_flags)
+{
+ return prev_cpu;
+}
+
+void BPF_STRUCT_OPS(enq_select_cpu_fails_enqueue, struct task_struct *p,
+ u64 enq_flags)
+{
+ /*
+ * Need to initialize the variable or the verifier will fail to load.
+ * Improving these semantics is actively being worked on.
+ */
+ bool found = false;
+
+ /* Can only call from ops.select_cpu() */
+ scx_bpf_select_cpu_dfl(p, 0, 0, &found);
+
+ scx_bpf_dispatch(p, SCX_DSQ_GLOBAL, SCX_SLICE_DFL, enq_flags);
+}
+
+SEC(".struct_ops.link")
+struct sched_ext_ops enq_select_cpu_fails_ops = {
+ .select_cpu = enq_select_cpu_fails_select_cpu,
+ .enqueue = enq_select_cpu_fails_enqueue,
+ .name = "enq_select_cpu_fails",
+ .timeout_ms = 1000U,
+};
diff --git a/tools/testing/selftests/sched_ext/enq_select_cpu_fails.c b/tools/testing/selftests/sched_ext/enq_select_cpu_fails.c
new file mode 100644
index 000000000000..dd1350e5f002
--- /dev/null
+++ b/tools/testing/selftests/sched_ext/enq_select_cpu_fails.c
@@ -0,0 +1,61 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (c) 2023 Meta Platforms, Inc. and affiliates.
+ * Copyright (c) 2023 David Vernet <dvernet@meta.com>
+ * Copyright (c) 2023 Tejun Heo <tj@kernel.org>
+ */
+#include <bpf/bpf.h>
+#include <scx/common.h>
+#include <sys/wait.h>
+#include <unistd.h>
+#include "enq_select_cpu_fails.bpf.skel.h"
+#include "scx_test.h"
+
+static enum scx_test_status setup(void **ctx)
+{
+ struct enq_select_cpu_fails *skel;
+
+ skel = enq_select_cpu_fails__open_and_load();
+ if (!skel) {
+ SCX_ERR("Failed to open and load skel");
+ return SCX_TEST_FAIL;
+ }
+ *ctx = skel;
+
+ return SCX_TEST_PASS;
+}
+
+static enum scx_test_status run(void *ctx)
+{
+ struct enq_select_cpu_fails *skel = ctx;
+ struct bpf_link *link;
+
+ link = bpf_map__attach_struct_ops(skel->maps.enq_select_cpu_fails_ops);
+ if (!link) {
+ SCX_ERR("Failed to attach scheduler");
+ return SCX_TEST_FAIL;
+ }
+
+ sleep(1);
+
+ bpf_link__destroy(link);
+
+ return SCX_TEST_PASS;
+}
+
+static void cleanup(void *ctx)
+{
+ struct enq_select_cpu_fails *skel = ctx;
+
+ enq_select_cpu_fails__destroy(skel);
+}
+
+struct scx_test enq_select_cpu_fails = {
+ .name = "enq_select_cpu_fails",
+ .description = "Verify we fail to call scx_bpf_select_cpu_dfl() "
+ "from ops.enqueue()",
+ .setup = setup,
+ .run = run,
+ .cleanup = cleanup,
+};
+REGISTER_SCX_TEST(&enq_select_cpu_fails)
diff --git a/tools/testing/selftests/sched_ext/exit.bpf.c b/tools/testing/selftests/sched_ext/exit.bpf.c
new file mode 100644
index 000000000000..ae12ddaac921
--- /dev/null
+++ b/tools/testing/selftests/sched_ext/exit.bpf.c
@@ -0,0 +1,84 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (c) 2024 Meta Platforms, Inc. and affiliates.
+ * Copyright (c) 2024 David Vernet <dvernet@meta.com>
+ */
+
+#include <scx/common.bpf.h>
+
+char _license[] SEC("license") = "GPL";
+
+#include "exit_test.h"
+
+const volatile int exit_point;
+UEI_DEFINE(uei);
+
+#define EXIT_CLEANLY() scx_bpf_exit(exit_point, "%d", exit_point)
+
+s32 BPF_STRUCT_OPS(exit_select_cpu, struct task_struct *p,
+ s32 prev_cpu, u64 wake_flags)
+{
+ bool found;
+
+ if (exit_point == EXIT_SELECT_CPU)
+ EXIT_CLEANLY();
+
+ return scx_bpf_select_cpu_dfl(p, prev_cpu, wake_flags, &found);
+}
+
+void BPF_STRUCT_OPS(exit_enqueue, struct task_struct *p, u64 enq_flags)
+{
+ if (exit_point == EXIT_ENQUEUE)
+ EXIT_CLEANLY();
+
+ scx_bpf_dispatch(p, SCX_DSQ_GLOBAL, SCX_SLICE_DFL, enq_flags);
+}
+
+void BPF_STRUCT_OPS(exit_dispatch, s32 cpu, struct task_struct *p)
+{
+ if (exit_point == EXIT_DISPATCH)
+ EXIT_CLEANLY();
+
+ scx_bpf_consume(SCX_DSQ_GLOBAL);
+}
+
+void BPF_STRUCT_OPS(exit_enable, struct task_struct *p)
+{
+ if (exit_point == EXIT_ENABLE)
+ EXIT_CLEANLY();
+}
+
+s32 BPF_STRUCT_OPS(exit_init_task, struct task_struct *p,
+ struct scx_init_task_args *args)
+{
+ if (exit_point == EXIT_INIT_TASK)
+ EXIT_CLEANLY();
+
+ return 0;
+}
+
+void BPF_STRUCT_OPS(exit_exit, struct scx_exit_info *ei)
+{
+ UEI_RECORD(uei, ei);
+}
+
+s32 BPF_STRUCT_OPS_SLEEPABLE(exit_init)
+{
+ if (exit_point == EXIT_INIT)
+ EXIT_CLEANLY();
+
+ return 0;
+}
+
+SEC(".struct_ops.link")
+struct sched_ext_ops exit_ops = {
+ .select_cpu = exit_select_cpu,
+ .enqueue = exit_enqueue,
+ .dispatch = exit_dispatch,
+ .init_task = exit_init_task,
+ .enable = exit_enable,
+ .exit = exit_exit,
+ .init = exit_init,
+ .name = "exit",
+ .timeout_ms = 1000U,
+};
diff --git a/tools/testing/selftests/sched_ext/exit.c b/tools/testing/selftests/sched_ext/exit.c
new file mode 100644
index 000000000000..31bcd06e21cd
--- /dev/null
+++ b/tools/testing/selftests/sched_ext/exit.c
@@ -0,0 +1,55 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (c) 2024 Meta Platforms, Inc. and affiliates.
+ * Copyright (c) 2024 David Vernet <dvernet@meta.com>
+ */
+#include <bpf/bpf.h>
+#include <sched.h>
+#include <scx/common.h>
+#include <sys/wait.h>
+#include <unistd.h>
+#include "exit.bpf.skel.h"
+#include "scx_test.h"
+
+#include "exit_test.h"
+
+static enum scx_test_status run(void *ctx)
+{
+ enum exit_test_case tc;
+
+ for (tc = 0; tc < NUM_EXITS; tc++) {
+ struct exit *skel;
+ struct bpf_link *link;
+ char buf[16];
+
+ skel = exit__open();
+ skel->rodata->exit_point = tc;
+ exit__load(skel);
+ link = bpf_map__attach_struct_ops(skel->maps.exit_ops);
+ if (!link) {
+ SCX_ERR("Failed to attach scheduler");
+ exit__destroy(skel);
+ return SCX_TEST_FAIL;
+ }
+
+ /* Assumes uei.kind is written last */
+ while (skel->data->uei.kind == EXIT_KIND(SCX_EXIT_NONE))
+ sched_yield();
+
+ SCX_EQ(skel->data->uei.kind, EXIT_KIND(SCX_EXIT_UNREG_BPF));
+ SCX_EQ(skel->data->uei.exit_code, tc);
+ sprintf(buf, "%d", tc);
+ SCX_ASSERT(!strcmp(skel->data->uei.msg, buf));
+ bpf_link__destroy(link);
+ exit__destroy(skel);
+ }
+
+ return SCX_TEST_PASS;
+}
+
+struct scx_test exit_test = {
+ .name = "exit",
+ .description = "Verify we can cleanly exit a scheduler in multiple places",
+ .run = run,
+};
+REGISTER_SCX_TEST(&exit_test)
diff --git a/tools/testing/selftests/sched_ext/exit_test.h b/tools/testing/selftests/sched_ext/exit_test.h
new file mode 100644
index 000000000000..94f0268b9cb8
--- /dev/null
+++ b/tools/testing/selftests/sched_ext/exit_test.h
@@ -0,0 +1,20 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (c) 2024 Meta Platforms, Inc. and affiliates.
+ * Copyright (c) 2024 David Vernet <dvernet@meta.com>
+ */
+
+#ifndef __EXIT_TEST_H__
+#define __EXIT_TEST_H__
+
+enum exit_test_case {
+ EXIT_SELECT_CPU,
+ EXIT_ENQUEUE,
+ EXIT_DISPATCH,
+ EXIT_ENABLE,
+ EXIT_INIT_TASK,
+ EXIT_INIT,
+ NUM_EXITS,
+};
+
+#endif // # __EXIT_TEST_H__
diff --git a/tools/testing/selftests/sched_ext/hotplug.bpf.c b/tools/testing/selftests/sched_ext/hotplug.bpf.c
new file mode 100644
index 000000000000..8f2601db39f3
--- /dev/null
+++ b/tools/testing/selftests/sched_ext/hotplug.bpf.c
@@ -0,0 +1,61 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (c) 2024 Meta Platforms, Inc. and affiliates.
+ * Copyright (c) 2024 David Vernet <dvernet@meta.com>
+ */
+
+#include <scx/common.bpf.h>
+
+char _license[] SEC("license") = "GPL";
+
+#include "hotplug_test.h"
+
+UEI_DEFINE(uei);
+
+void BPF_STRUCT_OPS(hotplug_exit, struct scx_exit_info *ei)
+{
+ UEI_RECORD(uei, ei);
+}
+
+static void exit_from_hotplug(s32 cpu, bool onlining)
+{
+ /*
+ * Ignored, just used to verify that we can invoke blocking kfuncs
+ * from the hotplug path.
+ */
+ scx_bpf_create_dsq(0, -1);
+
+ s64 code = SCX_ECODE_ACT_RESTART | HOTPLUG_EXIT_RSN;
+
+ if (onlining)
+ code |= HOTPLUG_ONLINING;
+
+ scx_bpf_exit(code, "hotplug event detected (%d going %s)", cpu,
+ onlining ? "online" : "offline");
+}
+
+void BPF_STRUCT_OPS_SLEEPABLE(hotplug_cpu_online, s32 cpu)
+{
+ exit_from_hotplug(cpu, true);
+}
+
+void BPF_STRUCT_OPS_SLEEPABLE(hotplug_cpu_offline, s32 cpu)
+{
+ exit_from_hotplug(cpu, false);
+}
+
+SEC(".struct_ops.link")
+struct sched_ext_ops hotplug_cb_ops = {
+ .cpu_online = hotplug_cpu_online,
+ .cpu_offline = hotplug_cpu_offline,
+ .exit = hotplug_exit,
+ .name = "hotplug_cbs",
+ .timeout_ms = 1000U,
+};
+
+SEC(".struct_ops.link")
+struct sched_ext_ops hotplug_nocb_ops = {
+ .exit = hotplug_exit,
+ .name = "hotplug_nocbs",
+ .timeout_ms = 1000U,
+};
diff --git a/tools/testing/selftests/sched_ext/hotplug.c b/tools/testing/selftests/sched_ext/hotplug.c
new file mode 100644
index 000000000000..87bf220b1bce
--- /dev/null
+++ b/tools/testing/selftests/sched_ext/hotplug.c
@@ -0,0 +1,168 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (c) 2024 Meta Platforms, Inc. and affiliates.
+ * Copyright (c) 2024 David Vernet <dvernet@meta.com>
+ */
+#include <bpf/bpf.h>
+#include <sched.h>
+#include <scx/common.h>
+#include <sched.h>
+#include <sys/wait.h>
+#include <unistd.h>
+
+#include "hotplug_test.h"
+#include "hotplug.bpf.skel.h"
+#include "scx_test.h"
+#include "util.h"
+
+const char *online_path = "/sys/devices/system/cpu/cpu1/online";
+
+static bool is_cpu_online(void)
+{
+ return file_read_long(online_path) > 0;
+}
+
+static void toggle_online_status(bool online)
+{
+ long val = online ? 1 : 0;
+ int ret;
+
+ ret = file_write_long(online_path, val);
+ if (ret != 0)
+ fprintf(stderr, "Failed to bring CPU %s (%s)",
+ online ? "online" : "offline", strerror(errno));
+}
+
+static enum scx_test_status setup(void **ctx)
+{
+ if (!is_cpu_online())
+ return SCX_TEST_SKIP;
+
+ return SCX_TEST_PASS;
+}
+
+static enum scx_test_status test_hotplug(bool onlining, bool cbs_defined)
+{
+ struct hotplug *skel;
+ struct bpf_link *link;
+ long kind, code;
+
+ SCX_ASSERT(is_cpu_online());
+
+ skel = hotplug__open_and_load();
+ SCX_ASSERT(skel);
+
+ /* Testing the offline -> online path, so go offline before starting */
+ if (onlining)
+ toggle_online_status(0);
+
+ if (cbs_defined) {
+ kind = SCX_KIND_VAL(SCX_EXIT_UNREG_BPF);
+ code = SCX_ECODE_VAL(SCX_ECODE_ACT_RESTART) | HOTPLUG_EXIT_RSN;
+ if (onlining)
+ code |= HOTPLUG_ONLINING;
+ } else {
+ kind = SCX_KIND_VAL(SCX_EXIT_UNREG_KERN);
+ code = SCX_ECODE_VAL(SCX_ECODE_ACT_RESTART) |
+ SCX_ECODE_VAL(SCX_ECODE_RSN_HOTPLUG);
+ }
+
+ if (cbs_defined)
+ link = bpf_map__attach_struct_ops(skel->maps.hotplug_cb_ops);
+ else
+ link = bpf_map__attach_struct_ops(skel->maps.hotplug_nocb_ops);
+
+ if (!link) {
+ SCX_ERR("Failed to attach scheduler");
+ hotplug__destroy(skel);
+ return SCX_TEST_FAIL;
+ }
+
+ toggle_online_status(onlining ? 1 : 0);
+
+ while (!UEI_EXITED(skel, uei))
+ sched_yield();
+
+ SCX_EQ(skel->data->uei.kind, kind);
+ SCX_EQ(UEI_REPORT(skel, uei), code);
+
+ if (!onlining)
+ toggle_online_status(1);
+
+ bpf_link__destroy(link);
+ hotplug__destroy(skel);
+
+ return SCX_TEST_PASS;
+}
+
+static enum scx_test_status test_hotplug_attach(void)
+{
+ struct hotplug *skel;
+ struct bpf_link *link;
+ enum scx_test_status status = SCX_TEST_PASS;
+ long kind, code;
+
+ SCX_ASSERT(is_cpu_online());
+ SCX_ASSERT(scx_hotplug_seq() > 0);
+
+ skel = SCX_OPS_OPEN(hotplug_nocb_ops, hotplug);
+ SCX_ASSERT(skel);
+
+ SCX_OPS_LOAD(skel, hotplug_nocb_ops, hotplug, uei);
+
+ /*
+ * Take the CPU offline to increment the global hotplug seq, which
+ * should cause attach to fail due to us setting the hotplug seq above
+ */
+ toggle_online_status(0);
+ link = bpf_map__attach_struct_ops(skel->maps.hotplug_nocb_ops);
+
+ toggle_online_status(1);
+
+ SCX_ASSERT(link);
+ while (!UEI_EXITED(skel, uei))
+ sched_yield();
+
+ kind = SCX_KIND_VAL(SCX_EXIT_UNREG_KERN);
+ code = SCX_ECODE_VAL(SCX_ECODE_ACT_RESTART) |
+ SCX_ECODE_VAL(SCX_ECODE_RSN_HOTPLUG);
+ SCX_EQ(skel->data->uei.kind, kind);
+ SCX_EQ(UEI_REPORT(skel, uei), code);
+
+ bpf_link__destroy(link);
+ hotplug__destroy(skel);
+
+ return status;
+}
+
+static enum scx_test_status run(void *ctx)
+{
+
+#define HP_TEST(__onlining, __cbs_defined) ({ \
+ if (test_hotplug(__onlining, __cbs_defined) != SCX_TEST_PASS) \
+ return SCX_TEST_FAIL; \
+})
+
+ HP_TEST(true, true);
+ HP_TEST(false, true);
+ HP_TEST(true, false);
+ HP_TEST(false, false);
+
+#undef HP_TEST
+
+ return test_hotplug_attach();
+}
+
+static void cleanup(void *ctx)
+{
+ toggle_online_status(1);
+}
+
+struct scx_test hotplug_test = {
+ .name = "hotplug",
+ .description = "Verify hotplug behavior",
+ .setup = setup,
+ .run = run,
+ .cleanup = cleanup,
+};
+REGISTER_SCX_TEST(&hotplug_test)
diff --git a/tools/testing/selftests/sched_ext/hotplug_test.h b/tools/testing/selftests/sched_ext/hotplug_test.h
new file mode 100644
index 000000000000..73d236f90787
--- /dev/null
+++ b/tools/testing/selftests/sched_ext/hotplug_test.h
@@ -0,0 +1,15 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (c) 2024 Meta Platforms, Inc. and affiliates.
+ * Copyright (c) 2024 David Vernet <dvernet@meta.com>
+ */
+
+#ifndef __HOTPLUG_TEST_H__
+#define __HOTPLUG_TEST_H__
+
+enum hotplug_test_flags {
+ HOTPLUG_EXIT_RSN = 1LLU << 0,
+ HOTPLUG_ONLINING = 1LLU << 1,
+};
+
+#endif // # __HOTPLUG_TEST_H__
diff --git a/tools/testing/selftests/sched_ext/init_enable_count.bpf.c b/tools/testing/selftests/sched_ext/init_enable_count.bpf.c
new file mode 100644
index 000000000000..47ea89a626c3
--- /dev/null
+++ b/tools/testing/selftests/sched_ext/init_enable_count.bpf.c
@@ -0,0 +1,53 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * A scheduler that verifies that we do proper counting of init, enable, etc
+ * callbacks.
+ *
+ * Copyright (c) 2023 Meta Platforms, Inc. and affiliates.
+ * Copyright (c) 2023 David Vernet <dvernet@meta.com>
+ * Copyright (c) 2023 Tejun Heo <tj@kernel.org>
+ */
+
+#include <scx/common.bpf.h>
+
+char _license[] SEC("license") = "GPL";
+
+u64 init_task_cnt, exit_task_cnt, enable_cnt, disable_cnt;
+u64 init_fork_cnt, init_transition_cnt;
+
+s32 BPF_STRUCT_OPS_SLEEPABLE(cnt_init_task, struct task_struct *p,
+ struct scx_init_task_args *args)
+{
+ __sync_fetch_and_add(&init_task_cnt, 1);
+
+ if (args->fork)
+ __sync_fetch_and_add(&init_fork_cnt, 1);
+ else
+ __sync_fetch_and_add(&init_transition_cnt, 1);
+
+ return 0;
+}
+
+void BPF_STRUCT_OPS(cnt_exit_task, struct task_struct *p)
+{
+ __sync_fetch_and_add(&exit_task_cnt, 1);
+}
+
+void BPF_STRUCT_OPS(cnt_enable, struct task_struct *p)
+{
+ __sync_fetch_and_add(&enable_cnt, 1);
+}
+
+void BPF_STRUCT_OPS(cnt_disable, struct task_struct *p)
+{
+ __sync_fetch_and_add(&disable_cnt, 1);
+}
+
+SEC(".struct_ops.link")
+struct sched_ext_ops init_enable_count_ops = {
+ .init_task = cnt_init_task,
+ .exit_task = cnt_exit_task,
+ .enable = cnt_enable,
+ .disable = cnt_disable,
+ .name = "init_enable_count",
+};
diff --git a/tools/testing/selftests/sched_ext/init_enable_count.c b/tools/testing/selftests/sched_ext/init_enable_count.c
new file mode 100644
index 000000000000..97d45f1e5597
--- /dev/null
+++ b/tools/testing/selftests/sched_ext/init_enable_count.c
@@ -0,0 +1,166 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (c) 2023 Meta Platforms, Inc. and affiliates.
+ * Copyright (c) 2023 David Vernet <dvernet@meta.com>
+ * Copyright (c) 2023 Tejun Heo <tj@kernel.org>
+ */
+#include <stdio.h>
+#include <unistd.h>
+#include <sched.h>
+#include <bpf/bpf.h>
+#include <scx/common.h>
+#include <sys/wait.h>
+#include "scx_test.h"
+#include "init_enable_count.bpf.skel.h"
+
+#define SCHED_EXT 7
+
+static struct init_enable_count *
+open_load_prog(bool global)
+{
+ struct init_enable_count *skel;
+
+ skel = init_enable_count__open();
+ SCX_BUG_ON(!skel, "Failed to open skel");
+
+ if (!global)
+ skel->struct_ops.init_enable_count_ops->flags |= SCX_OPS_SWITCH_PARTIAL;
+
+ SCX_BUG_ON(init_enable_count__load(skel), "Failed to load skel");
+
+ return skel;
+}
+
+static enum scx_test_status run_test(bool global)
+{
+ struct init_enable_count *skel;
+ struct bpf_link *link;
+ const u32 num_children = 5, num_pre_forks = 1024;
+ int ret, i, status;
+ struct sched_param param = {};
+ pid_t pids[num_pre_forks];
+
+ skel = open_load_prog(global);
+
+ /*
+ * Fork a bunch of children before we attach the scheduler so that we
+ * ensure (at least in practical terms) that there are more tasks that
+ * transition from SCHED_OTHER -> SCHED_EXT than there are tasks that
+ * take the fork() path either below or in other processes.
+ */
+ for (i = 0; i < num_pre_forks; i++) {
+ pids[i] = fork();
+ SCX_FAIL_IF(pids[i] < 0, "Failed to fork child");
+ if (pids[i] == 0) {
+ sleep(1);
+ exit(0);
+ }
+ }
+
+ link = bpf_map__attach_struct_ops(skel->maps.init_enable_count_ops);
+ SCX_FAIL_IF(!link, "Failed to attach struct_ops");
+
+ for (i = 0; i < num_pre_forks; i++) {
+ SCX_FAIL_IF(waitpid(pids[i], &status, 0) != pids[i],
+ "Failed to wait for pre-forked child\n");
+
+ SCX_FAIL_IF(status != 0, "Pre-forked child %d exited with status %d\n", i,
+ status);
+ }
+
+ bpf_link__destroy(link);
+ SCX_GE(skel->bss->init_task_cnt, num_pre_forks);
+ SCX_GE(skel->bss->exit_task_cnt, num_pre_forks);
+
+ link = bpf_map__attach_struct_ops(skel->maps.init_enable_count_ops);
+ SCX_FAIL_IF(!link, "Failed to attach struct_ops");
+
+ /* SCHED_EXT children */
+ for (i = 0; i < num_children; i++) {
+ pids[i] = fork();
+ SCX_FAIL_IF(pids[i] < 0, "Failed to fork child");
+
+ if (pids[i] == 0) {
+ ret = sched_setscheduler(0, SCHED_EXT, &param);
+ SCX_BUG_ON(ret, "Failed to set sched to sched_ext");
+
+ /*
+ * Reset to SCHED_OTHER for half of them. Counts for
+ * everything should still be the same regardless, as
+ * ops.disable() is invoked even if a task is still on
+ * SCHED_EXT before it exits.
+ */
+ if (i % 2 == 0) {
+ ret = sched_setscheduler(0, SCHED_OTHER, &param);
+ SCX_BUG_ON(ret, "Failed to reset sched to normal");
+ }
+ exit(0);
+ }
+ }
+ for (i = 0; i < num_children; i++) {
+ SCX_FAIL_IF(waitpid(pids[i], &status, 0) != pids[i],
+ "Failed to wait for SCX child\n");
+
+ SCX_FAIL_IF(status != 0, "SCX child %d exited with status %d\n", i,
+ status);
+ }
+
+ /* SCHED_OTHER children */
+ for (i = 0; i < num_children; i++) {
+ pids[i] = fork();
+ if (pids[i] == 0)
+ exit(0);
+ }
+
+ for (i = 0; i < num_children; i++) {
+ SCX_FAIL_IF(waitpid(pids[i], &status, 0) != pids[i],
+ "Failed to wait for normal child\n");
+
+ SCX_FAIL_IF(status != 0, "Normal child %d exited with status %d\n", i,
+ status);
+ }
+
+ bpf_link__destroy(link);
+
+ SCX_GE(skel->bss->init_task_cnt, 2 * num_children);
+ SCX_GE(skel->bss->exit_task_cnt, 2 * num_children);
+
+ if (global) {
+ SCX_GE(skel->bss->enable_cnt, 2 * num_children);
+ SCX_GE(skel->bss->disable_cnt, 2 * num_children);
+ } else {
+ SCX_EQ(skel->bss->enable_cnt, num_children);
+ SCX_EQ(skel->bss->disable_cnt, num_children);
+ }
+ /*
+ * We forked a ton of tasks before we attached the scheduler above, so
+ * this should be fine. Technically it could be flaky if a ton of forks
+ * are happening at the same time in other processes, but that should
+ * be exceedingly unlikely.
+ */
+ SCX_GT(skel->bss->init_transition_cnt, skel->bss->init_fork_cnt);
+ SCX_GE(skel->bss->init_fork_cnt, 2 * num_children);
+
+ init_enable_count__destroy(skel);
+
+ return SCX_TEST_PASS;
+}
+
+static enum scx_test_status run(void *ctx)
+{
+ enum scx_test_status status;
+
+ status = run_test(true);
+ if (status != SCX_TEST_PASS)
+ return status;
+
+ return run_test(false);
+}
+
+struct scx_test init_enable_count = {
+ .name = "init_enable_count",
+ .description = "Verify we do the correct amount of counting of init, "
+ "enable, etc callbacks.",
+ .run = run,
+};
+REGISTER_SCX_TEST(&init_enable_count)
diff --git a/tools/testing/selftests/sched_ext/maximal.bpf.c b/tools/testing/selftests/sched_ext/maximal.bpf.c
new file mode 100644
index 000000000000..00bfa9cb95d3
--- /dev/null
+++ b/tools/testing/selftests/sched_ext/maximal.bpf.c
@@ -0,0 +1,164 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * A scheduler with every callback defined.
+ *
+ * This scheduler defines every callback.
+ *
+ * Copyright (c) 2024 Meta Platforms, Inc. and affiliates.
+ * Copyright (c) 2024 David Vernet <dvernet@meta.com>
+ */
+
+#include <scx/common.bpf.h>
+
+char _license[] SEC("license") = "GPL";
+
+s32 BPF_STRUCT_OPS(maximal_select_cpu, struct task_struct *p, s32 prev_cpu,
+ u64 wake_flags)
+{
+ return prev_cpu;
+}
+
+void BPF_STRUCT_OPS(maximal_enqueue, struct task_struct *p, u64 enq_flags)
+{
+ scx_bpf_dispatch(p, SCX_DSQ_GLOBAL, SCX_SLICE_DFL, enq_flags);
+}
+
+void BPF_STRUCT_OPS(maximal_dequeue, struct task_struct *p, u64 deq_flags)
+{}
+
+void BPF_STRUCT_OPS(maximal_dispatch, s32 cpu, struct task_struct *prev)
+{
+ scx_bpf_consume(SCX_DSQ_GLOBAL);
+}
+
+void BPF_STRUCT_OPS(maximal_runnable, struct task_struct *p, u64 enq_flags)
+{}
+
+void BPF_STRUCT_OPS(maximal_running, struct task_struct *p)
+{}
+
+void BPF_STRUCT_OPS(maximal_stopping, struct task_struct *p, bool runnable)
+{}
+
+void BPF_STRUCT_OPS(maximal_quiescent, struct task_struct *p, u64 deq_flags)
+{}
+
+bool BPF_STRUCT_OPS(maximal_yield, struct task_struct *from,
+ struct task_struct *to)
+{
+ return false;
+}
+
+bool BPF_STRUCT_OPS(maximal_core_sched_before, struct task_struct *a,
+ struct task_struct *b)
+{
+ return false;
+}
+
+void BPF_STRUCT_OPS(maximal_set_weight, struct task_struct *p, u32 weight)
+{}
+
+void BPF_STRUCT_OPS(maximal_set_cpumask, struct task_struct *p,
+ const struct cpumask *cpumask)
+{}
+
+void BPF_STRUCT_OPS(maximal_update_idle, s32 cpu, bool idle)
+{}
+
+void BPF_STRUCT_OPS(maximal_cpu_acquire, s32 cpu,
+ struct scx_cpu_acquire_args *args)
+{}
+
+void BPF_STRUCT_OPS(maximal_cpu_release, s32 cpu,
+ struct scx_cpu_release_args *args)
+{}
+
+void BPF_STRUCT_OPS(maximal_cpu_online, s32 cpu)
+{}
+
+void BPF_STRUCT_OPS(maximal_cpu_offline, s32 cpu)
+{}
+
+s32 BPF_STRUCT_OPS(maximal_init_task, struct task_struct *p,
+ struct scx_init_task_args *args)
+{
+ return 0;
+}
+
+void BPF_STRUCT_OPS(maximal_enable, struct task_struct *p)
+{}
+
+void BPF_STRUCT_OPS(maximal_exit_task, struct task_struct *p,
+ struct scx_exit_task_args *args)
+{}
+
+void BPF_STRUCT_OPS(maximal_disable, struct task_struct *p)
+{}
+
+s32 BPF_STRUCT_OPS(maximal_cgroup_init, struct cgroup *cgrp,
+ struct scx_cgroup_init_args *args)
+{
+ return 0;
+}
+
+void BPF_STRUCT_OPS(maximal_cgroup_exit, struct cgroup *cgrp)
+{}
+
+s32 BPF_STRUCT_OPS(maximal_cgroup_prep_move, struct task_struct *p,
+ struct cgroup *from, struct cgroup *to)
+{
+ return 0;
+}
+
+void BPF_STRUCT_OPS(maximal_cgroup_move, struct task_struct *p,
+ struct cgroup *from, struct cgroup *to)
+{}
+
+void BPF_STRUCT_OPS(maximal_cgroup_cancel_move, struct task_struct *p,
+ struct cgroup *from, struct cgroup *to)
+{}
+
+void BPF_STRUCT_OPS(maximal_cgroup_set_weight, struct cgroup *cgrp, u32 weight)
+{}
+
+s32 BPF_STRUCT_OPS_SLEEPABLE(maximal_init)
+{
+ return 0;
+}
+
+void BPF_STRUCT_OPS(maximal_exit, struct scx_exit_info *info)
+{}
+
+SEC(".struct_ops.link")
+struct sched_ext_ops maximal_ops = {
+ .select_cpu = maximal_select_cpu,
+ .enqueue = maximal_enqueue,
+ .dequeue = maximal_dequeue,
+ .dispatch = maximal_dispatch,
+ .runnable = maximal_runnable,
+ .running = maximal_running,
+ .stopping = maximal_stopping,
+ .quiescent = maximal_quiescent,
+ .yield = maximal_yield,
+ .core_sched_before = maximal_core_sched_before,
+ .set_weight = maximal_set_weight,
+ .set_cpumask = maximal_set_cpumask,
+ .update_idle = maximal_update_idle,
+ .cpu_acquire = maximal_cpu_acquire,
+ .cpu_release = maximal_cpu_release,
+ .cpu_online = maximal_cpu_online,
+ .cpu_offline = maximal_cpu_offline,
+ .init_task = maximal_init_task,
+ .enable = maximal_enable,
+ .exit_task = maximal_exit_task,
+ .disable = maximal_disable,
+ .cgroup_init = maximal_cgroup_init,
+ .cgroup_exit = maximal_cgroup_exit,
+ .cgroup_prep_move = maximal_cgroup_prep_move,
+ .cgroup_move = maximal_cgroup_move,
+ .cgroup_cancel_move = maximal_cgroup_cancel_move,
+ .cgroup_set_weight = maximal_cgroup_set_weight,
+ .init = maximal_init,
+ .exit = maximal_exit,
+ .name = "maximal",
+};
diff --git a/tools/testing/selftests/sched_ext/maximal.c b/tools/testing/selftests/sched_ext/maximal.c
new file mode 100644
index 000000000000..f38fc973c380
--- /dev/null
+++ b/tools/testing/selftests/sched_ext/maximal.c
@@ -0,0 +1,51 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (c) 2024 Meta Platforms, Inc. and affiliates.
+ * Copyright (c) 2024 David Vernet <dvernet@meta.com>
+ */
+#include <bpf/bpf.h>
+#include <scx/common.h>
+#include <sys/wait.h>
+#include <unistd.h>
+#include "maximal.bpf.skel.h"
+#include "scx_test.h"
+
+static enum scx_test_status setup(void **ctx)
+{
+ struct maximal *skel;
+
+ skel = maximal__open_and_load();
+ SCX_FAIL_IF(!skel, "Failed to open and load skel");
+ *ctx = skel;
+
+ return SCX_TEST_PASS;
+}
+
+static enum scx_test_status run(void *ctx)
+{
+ struct maximal *skel = ctx;
+ struct bpf_link *link;
+
+ link = bpf_map__attach_struct_ops(skel->maps.maximal_ops);
+ SCX_FAIL_IF(!link, "Failed to attach scheduler");
+
+ bpf_link__destroy(link);
+
+ return SCX_TEST_PASS;
+}
+
+static void cleanup(void *ctx)
+{
+ struct maximal *skel = ctx;
+
+ maximal__destroy(skel);
+}
+
+struct scx_test maximal = {
+ .name = "maximal",
+ .description = "Verify we can load a scheduler with every callback defined",
+ .setup = setup,
+ .run = run,
+ .cleanup = cleanup,
+};
+REGISTER_SCX_TEST(&maximal)
diff --git a/tools/testing/selftests/sched_ext/maybe_null.bpf.c b/tools/testing/selftests/sched_ext/maybe_null.bpf.c
new file mode 100644
index 000000000000..27d0f386acfb
--- /dev/null
+++ b/tools/testing/selftests/sched_ext/maybe_null.bpf.c
@@ -0,0 +1,36 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (c) 2024 Meta Platforms, Inc. and affiliates.
+ */
+
+#include <scx/common.bpf.h>
+
+char _license[] SEC("license") = "GPL";
+
+u64 vtime_test;
+
+void BPF_STRUCT_OPS(maybe_null_running, struct task_struct *p)
+{}
+
+void BPF_STRUCT_OPS(maybe_null_success_dispatch, s32 cpu, struct task_struct *p)
+{
+ if (p != NULL)
+ vtime_test = p->scx.dsq_vtime;
+}
+
+bool BPF_STRUCT_OPS(maybe_null_success_yield, struct task_struct *from,
+ struct task_struct *to)
+{
+ if (to)
+ bpf_printk("Yielding to %s[%d]", to->comm, to->pid);
+
+ return false;
+}
+
+SEC(".struct_ops.link")
+struct sched_ext_ops maybe_null_success = {
+ .dispatch = maybe_null_success_dispatch,
+ .yield = maybe_null_success_yield,
+ .enable = maybe_null_running,
+ .name = "minimal",
+};
diff --git a/tools/testing/selftests/sched_ext/maybe_null.c b/tools/testing/selftests/sched_ext/maybe_null.c
new file mode 100644
index 000000000000..31cfafb0cf65
--- /dev/null
+++ b/tools/testing/selftests/sched_ext/maybe_null.c
@@ -0,0 +1,49 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (c) 2024 Meta Platforms, Inc. and affiliates.
+ */
+#include <bpf/bpf.h>
+#include <scx/common.h>
+#include <sys/wait.h>
+#include <unistd.h>
+#include "maybe_null.bpf.skel.h"
+#include "maybe_null_fail_dsp.bpf.skel.h"
+#include "maybe_null_fail_yld.bpf.skel.h"
+#include "scx_test.h"
+
+static enum scx_test_status run(void *ctx)
+{
+ struct maybe_null *skel;
+ struct maybe_null_fail_dsp *fail_dsp;
+ struct maybe_null_fail_yld *fail_yld;
+
+ skel = maybe_null__open_and_load();
+ if (!skel) {
+ SCX_ERR("Failed to open and load maybe_null skel");
+ return SCX_TEST_FAIL;
+ }
+ maybe_null__destroy(skel);
+
+ fail_dsp = maybe_null_fail_dsp__open_and_load();
+ if (fail_dsp) {
+ maybe_null_fail_dsp__destroy(fail_dsp);
+ SCX_ERR("Should failed to open and load maybe_null_fail_dsp skel");
+ return SCX_TEST_FAIL;
+ }
+
+ fail_yld = maybe_null_fail_yld__open_and_load();
+ if (fail_yld) {
+ maybe_null_fail_yld__destroy(fail_yld);
+ SCX_ERR("Should failed to open and load maybe_null_fail_yld skel");
+ return SCX_TEST_FAIL;
+ }
+
+ return SCX_TEST_PASS;
+}
+
+struct scx_test maybe_null = {
+ .name = "maybe_null",
+ .description = "Verify if PTR_MAYBE_NULL work for .dispatch",
+ .run = run,
+};
+REGISTER_SCX_TEST(&maybe_null)
diff --git a/tools/testing/selftests/sched_ext/maybe_null_fail_dsp.bpf.c b/tools/testing/selftests/sched_ext/maybe_null_fail_dsp.bpf.c
new file mode 100644
index 000000000000..c0641050271d
--- /dev/null
+++ b/tools/testing/selftests/sched_ext/maybe_null_fail_dsp.bpf.c
@@ -0,0 +1,25 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (c) 2024 Meta Platforms, Inc. and affiliates.
+ */
+
+#include <scx/common.bpf.h>
+
+char _license[] SEC("license") = "GPL";
+
+u64 vtime_test;
+
+void BPF_STRUCT_OPS(maybe_null_running, struct task_struct *p)
+{}
+
+void BPF_STRUCT_OPS(maybe_null_fail_dispatch, s32 cpu, struct task_struct *p)
+{
+ vtime_test = p->scx.dsq_vtime;
+}
+
+SEC(".struct_ops.link")
+struct sched_ext_ops maybe_null_fail = {
+ .dispatch = maybe_null_fail_dispatch,
+ .enable = maybe_null_running,
+ .name = "maybe_null_fail_dispatch",
+};
diff --git a/tools/testing/selftests/sched_ext/maybe_null_fail_yld.bpf.c b/tools/testing/selftests/sched_ext/maybe_null_fail_yld.bpf.c
new file mode 100644
index 000000000000..3c1740028e3b
--- /dev/null
+++ b/tools/testing/selftests/sched_ext/maybe_null_fail_yld.bpf.c
@@ -0,0 +1,28 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (c) 2024 Meta Platforms, Inc. and affiliates.
+ */
+
+#include <scx/common.bpf.h>
+
+char _license[] SEC("license") = "GPL";
+
+u64 vtime_test;
+
+void BPF_STRUCT_OPS(maybe_null_running, struct task_struct *p)
+{}
+
+bool BPF_STRUCT_OPS(maybe_null_fail_yield, struct task_struct *from,
+ struct task_struct *to)
+{
+ bpf_printk("Yielding to %s[%d]", to->comm, to->pid);
+
+ return false;
+}
+
+SEC(".struct_ops.link")
+struct sched_ext_ops maybe_null_fail = {
+ .yield = maybe_null_fail_yield,
+ .enable = maybe_null_running,
+ .name = "maybe_null_fail_yield",
+};
diff --git a/tools/testing/selftests/sched_ext/minimal.bpf.c b/tools/testing/selftests/sched_ext/minimal.bpf.c
new file mode 100644
index 000000000000..6a7eccef0104
--- /dev/null
+++ b/tools/testing/selftests/sched_ext/minimal.bpf.c
@@ -0,0 +1,21 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * A completely minimal scheduler.
+ *
+ * This scheduler defines the absolute minimal set of struct sched_ext_ops
+ * fields: its name. It should _not_ fail to be loaded, and can be used to
+ * exercise the default scheduling paths in ext.c.
+ *
+ * Copyright (c) 2023 Meta Platforms, Inc. and affiliates.
+ * Copyright (c) 2023 David Vernet <dvernet@meta.com>
+ * Copyright (c) 2023 Tejun Heo <tj@kernel.org>
+ */
+
+#include <scx/common.bpf.h>
+
+char _license[] SEC("license") = "GPL";
+
+SEC(".struct_ops.link")
+struct sched_ext_ops minimal_ops = {
+ .name = "minimal",
+};
diff --git a/tools/testing/selftests/sched_ext/minimal.c b/tools/testing/selftests/sched_ext/minimal.c
new file mode 100644
index 000000000000..6c5db8ebbf8a
--- /dev/null
+++ b/tools/testing/selftests/sched_ext/minimal.c
@@ -0,0 +1,58 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (c) 2023 Meta Platforms, Inc. and affiliates.
+ * Copyright (c) 2023 David Vernet <dvernet@meta.com>
+ * Copyright (c) 2023 Tejun Heo <tj@kernel.org>
+ */
+#include <bpf/bpf.h>
+#include <scx/common.h>
+#include <sys/wait.h>
+#include <unistd.h>
+#include "minimal.bpf.skel.h"
+#include "scx_test.h"
+
+static enum scx_test_status setup(void **ctx)
+{
+ struct minimal *skel;
+
+ skel = minimal__open_and_load();
+ if (!skel) {
+ SCX_ERR("Failed to open and load skel");
+ return SCX_TEST_FAIL;
+ }
+ *ctx = skel;
+
+ return SCX_TEST_PASS;
+}
+
+static enum scx_test_status run(void *ctx)
+{
+ struct minimal *skel = ctx;
+ struct bpf_link *link;
+
+ link = bpf_map__attach_struct_ops(skel->maps.minimal_ops);
+ if (!link) {
+ SCX_ERR("Failed to attach scheduler");
+ return SCX_TEST_FAIL;
+ }
+
+ bpf_link__destroy(link);
+
+ return SCX_TEST_PASS;
+}
+
+static void cleanup(void *ctx)
+{
+ struct minimal *skel = ctx;
+
+ minimal__destroy(skel);
+}
+
+struct scx_test minimal = {
+ .name = "minimal",
+ .description = "Verify we can load a fully minimal scheduler",
+ .setup = setup,
+ .run = run,
+ .cleanup = cleanup,
+};
+REGISTER_SCX_TEST(&minimal)
diff --git a/tools/testing/selftests/sched_ext/prog_run.bpf.c b/tools/testing/selftests/sched_ext/prog_run.bpf.c
new file mode 100644
index 000000000000..6a4d7c48e3f2
--- /dev/null
+++ b/tools/testing/selftests/sched_ext/prog_run.bpf.c
@@ -0,0 +1,33 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * A scheduler that validates that we can invoke sched_ext kfuncs in
+ * BPF_PROG_TYPE_SYSCALL programs.
+ *
+ * Copyright (c) 2024 Meta Platforms, Inc. and affiliates.
+ * Copyright (c) 2024 David Vernet <dvernet@meta.com>
+ */
+
+#include <scx/common.bpf.h>
+
+UEI_DEFINE(uei);
+
+char _license[] SEC("license") = "GPL";
+
+SEC("syscall")
+int BPF_PROG(prog_run_syscall)
+{
+ scx_bpf_create_dsq(0, -1);
+ scx_bpf_exit(0xdeadbeef, "Exited from PROG_RUN");
+ return 0;
+}
+
+void BPF_STRUCT_OPS(prog_run_exit, struct scx_exit_info *ei)
+{
+ UEI_RECORD(uei, ei);
+}
+
+SEC(".struct_ops.link")
+struct sched_ext_ops prog_run_ops = {
+ .exit = prog_run_exit,
+ .name = "prog_run",
+};
diff --git a/tools/testing/selftests/sched_ext/prog_run.c b/tools/testing/selftests/sched_ext/prog_run.c
new file mode 100644
index 000000000000..3cd57ef8daaa
--- /dev/null
+++ b/tools/testing/selftests/sched_ext/prog_run.c
@@ -0,0 +1,78 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (c) 2024 Meta Platforms, Inc. and affiliates.
+ * Copyright (c) 2024 David Vernet <dvernet@meta.com>
+ */
+#include <bpf/bpf.h>
+#include <sched.h>
+#include <scx/common.h>
+#include <sys/wait.h>
+#include <unistd.h>
+#include "prog_run.bpf.skel.h"
+#include "scx_test.h"
+
+static enum scx_test_status setup(void **ctx)
+{
+ struct prog_run *skel;
+
+ skel = prog_run__open_and_load();
+ if (!skel) {
+ SCX_ERR("Failed to open and load skel");
+ return SCX_TEST_FAIL;
+ }
+ *ctx = skel;
+
+ return SCX_TEST_PASS;
+}
+
+static enum scx_test_status run(void *ctx)
+{
+ struct prog_run *skel = ctx;
+ struct bpf_link *link;
+ int prog_fd, err = 0;
+
+ prog_fd = bpf_program__fd(skel->progs.prog_run_syscall);
+ if (prog_fd < 0) {
+ SCX_ERR("Failed to get BPF_PROG_RUN prog");
+ return SCX_TEST_FAIL;
+ }
+
+ LIBBPF_OPTS(bpf_test_run_opts, topts);
+
+ link = bpf_map__attach_struct_ops(skel->maps.prog_run_ops);
+ if (!link) {
+ SCX_ERR("Failed to attach scheduler");
+ close(prog_fd);
+ return SCX_TEST_FAIL;
+ }
+
+ err = bpf_prog_test_run_opts(prog_fd, &topts);
+ SCX_EQ(err, 0);
+
+ /* Assumes uei.kind is written last */
+ while (skel->data->uei.kind == EXIT_KIND(SCX_EXIT_NONE))
+ sched_yield();
+
+ SCX_EQ(skel->data->uei.kind, EXIT_KIND(SCX_EXIT_UNREG_BPF));
+ SCX_EQ(skel->data->uei.exit_code, 0xdeadbeef);
+ close(prog_fd);
+ bpf_link__destroy(link);
+
+ return SCX_TEST_PASS;
+}
+
+static void cleanup(void *ctx)
+{
+ struct prog_run *skel = ctx;
+
+ prog_run__destroy(skel);
+}
+
+struct scx_test prog_run = {
+ .name = "prog_run",
+ .description = "Verify we can call into a scheduler with BPF_PROG_RUN, and invoke kfuncs",
+ .setup = setup,
+ .run = run,
+ .cleanup = cleanup,
+};
+REGISTER_SCX_TEST(&prog_run)
diff --git a/tools/testing/selftests/sched_ext/reload_loop.c b/tools/testing/selftests/sched_ext/reload_loop.c
new file mode 100644
index 000000000000..5cfba2d6e056
--- /dev/null
+++ b/tools/testing/selftests/sched_ext/reload_loop.c
@@ -0,0 +1,75 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (c) 2024 Meta Platforms, Inc. and affiliates.
+ * Copyright (c) 2024 David Vernet <dvernet@meta.com>
+ */
+#include <bpf/bpf.h>
+#include <pthread.h>
+#include <scx/common.h>
+#include <sys/wait.h>
+#include <unistd.h>
+#include "maximal.bpf.skel.h"
+#include "scx_test.h"
+
+static struct maximal *skel;
+static pthread_t threads[2];
+
+bool force_exit = false;
+
+static enum scx_test_status setup(void **ctx)
+{
+ skel = maximal__open_and_load();
+ if (!skel) {
+ SCX_ERR("Failed to open and load skel");
+ return SCX_TEST_FAIL;
+ }
+
+ return SCX_TEST_PASS;
+}
+
+static void *do_reload_loop(void *arg)
+{
+ u32 i;
+
+ for (i = 0; i < 1024 && !force_exit; i++) {
+ struct bpf_link *link;
+
+ link = bpf_map__attach_struct_ops(skel->maps.maximal_ops);
+ if (link)
+ bpf_link__destroy(link);
+ }
+
+ return NULL;
+}
+
+static enum scx_test_status run(void *ctx)
+{
+ int err;
+ void *ret;
+
+ err = pthread_create(&threads[0], NULL, do_reload_loop, NULL);
+ SCX_FAIL_IF(err, "Failed to create thread 0");
+
+ err = pthread_create(&threads[1], NULL, do_reload_loop, NULL);
+ SCX_FAIL_IF(err, "Failed to create thread 1");
+
+ SCX_FAIL_IF(pthread_join(threads[0], &ret), "thread 0 failed");
+ SCX_FAIL_IF(pthread_join(threads[1], &ret), "thread 1 failed");
+
+ return SCX_TEST_PASS;
+}
+
+static void cleanup(void *ctx)
+{
+ force_exit = true;
+ maximal__destroy(skel);
+}
+
+struct scx_test reload_loop = {
+ .name = "reload_loop",
+ .description = "Stress test loading and unloading schedulers repeatedly in a tight loop",
+ .setup = setup,
+ .run = run,
+ .cleanup = cleanup,
+};
+REGISTER_SCX_TEST(&reload_loop)
diff --git a/tools/testing/selftests/sched_ext/runner.c b/tools/testing/selftests/sched_ext/runner.c
new file mode 100644
index 000000000000..eab48c7ff309
--- /dev/null
+++ b/tools/testing/selftests/sched_ext/runner.c
@@ -0,0 +1,201 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (c) 2024 Meta Platforms, Inc. and affiliates.
+ * Copyright (c) 2024 David Vernet <dvernet@meta.com>
+ * Copyright (c) 2024 Tejun Heo <tj@kernel.org>
+ */
+#include <stdio.h>
+#include <unistd.h>
+#include <signal.h>
+#include <libgen.h>
+#include <bpf/bpf.h>
+#include "scx_test.h"
+
+const char help_fmt[] =
+"The runner for sched_ext tests.\n"
+"\n"
+"The runner is statically linked against all testcases, and runs them all serially.\n"
+"It's required for the testcases to be serial, as only a single host-wide sched_ext\n"
+"scheduler may be loaded at any given time."
+"\n"
+"Usage: %s [-t TEST] [-h]\n"
+"\n"
+" -t TEST Only run tests whose name includes this string\n"
+" -s Include print output for skipped tests\n"
+" -q Don't print the test descriptions during run\n"
+" -h Display this help and exit\n";
+
+static volatile int exit_req;
+static bool quiet, print_skipped;
+
+#define MAX_SCX_TESTS 2048
+
+static struct scx_test __scx_tests[MAX_SCX_TESTS];
+static unsigned __scx_num_tests = 0;
+
+static void sigint_handler(int simple)
+{
+ exit_req = 1;
+}
+
+static void print_test_preamble(const struct scx_test *test, bool quiet)
+{
+ printf("===== START =====\n");
+ printf("TEST: %s\n", test->name);
+ if (!quiet)
+ printf("DESCRIPTION: %s\n", test->description);
+ printf("OUTPUT:\n");
+}
+
+static const char *status_to_result(enum scx_test_status status)
+{
+ switch (status) {
+ case SCX_TEST_PASS:
+ case SCX_TEST_SKIP:
+ return "ok";
+ case SCX_TEST_FAIL:
+ return "not ok";
+ default:
+ return "<UNKNOWN>";
+ }
+}
+
+static void print_test_result(const struct scx_test *test,
+ enum scx_test_status status,
+ unsigned int testnum)
+{
+ const char *result = status_to_result(status);
+ const char *directive = status == SCX_TEST_SKIP ? "SKIP " : "";
+
+ printf("%s %u %s # %s\n", result, testnum, test->name, directive);
+ printf("===== END =====\n");
+}
+
+static bool should_skip_test(const struct scx_test *test, const char * filter)
+{
+ return !strstr(test->name, filter);
+}
+
+static enum scx_test_status run_test(const struct scx_test *test)
+{
+ enum scx_test_status status;
+ void *context = NULL;
+
+ if (test->setup) {
+ status = test->setup(&context);
+ if (status != SCX_TEST_PASS)
+ return status;
+ }
+
+ status = test->run(context);
+
+ if (test->cleanup)
+ test->cleanup(context);
+
+ return status;
+}
+
+static bool test_valid(const struct scx_test *test)
+{
+ if (!test) {
+ fprintf(stderr, "NULL test detected\n");
+ return false;
+ }
+
+ if (!test->name) {
+ fprintf(stderr,
+ "Test with no name found. Must specify test name.\n");
+ return false;
+ }
+
+ if (!test->description) {
+ fprintf(stderr, "Test %s requires description.\n", test->name);
+ return false;
+ }
+
+ if (!test->run) {
+ fprintf(stderr, "Test %s has no run() callback\n", test->name);
+ return false;
+ }
+
+ return true;
+}
+
+int main(int argc, char **argv)
+{
+ const char *filter = NULL;
+ unsigned testnum = 0, i;
+ unsigned passed = 0, skipped = 0, failed = 0;
+ int opt;
+
+ signal(SIGINT, sigint_handler);
+ signal(SIGTERM, sigint_handler);
+
+ libbpf_set_strict_mode(LIBBPF_STRICT_ALL);
+
+ while ((opt = getopt(argc, argv, "qst:h")) != -1) {
+ switch (opt) {
+ case 'q':
+ quiet = true;
+ break;
+ case 's':
+ print_skipped = true;
+ break;
+ case 't':
+ filter = optarg;
+ break;
+ default:
+ fprintf(stderr, help_fmt, basename(argv[0]));
+ return opt != 'h';
+ }
+ }
+
+ for (i = 0; i < __scx_num_tests; i++) {
+ enum scx_test_status status;
+ struct scx_test *test = &__scx_tests[i];
+
+ if (filter && should_skip_test(test, filter)) {
+ /*
+ * Printing the skipped tests and their preambles can
+ * add a lot of noise to the runner output. Printing
+ * this is only really useful for CI, so let's skip it
+ * by default.
+ */
+ if (print_skipped) {
+ print_test_preamble(test, quiet);
+ print_test_result(test, SCX_TEST_SKIP, ++testnum);
+ }
+ continue;
+ }
+
+ print_test_preamble(test, quiet);
+ status = run_test(test);
+ print_test_result(test, status, ++testnum);
+ switch (status) {
+ case SCX_TEST_PASS:
+ passed++;
+ break;
+ case SCX_TEST_SKIP:
+ skipped++;
+ break;
+ case SCX_TEST_FAIL:
+ failed++;
+ break;
+ }
+ }
+ printf("\n\n=============================\n\n");
+ printf("RESULTS:\n\n");
+ printf("PASSED: %u\n", passed);
+ printf("SKIPPED: %u\n", skipped);
+ printf("FAILED: %u\n", failed);
+
+ return 0;
+}
+
+void scx_test_register(struct scx_test *test)
+{
+ SCX_BUG_ON(!test_valid(test), "Invalid test found");
+ SCX_BUG_ON(__scx_num_tests >= MAX_SCX_TESTS, "Maximum tests exceeded");
+
+ __scx_tests[__scx_num_tests++] = *test;
+}
diff --git a/tools/testing/selftests/sched_ext/scx_test.h b/tools/testing/selftests/sched_ext/scx_test.h
new file mode 100644
index 000000000000..90b8d6915bb7
--- /dev/null
+++ b/tools/testing/selftests/sched_ext/scx_test.h
@@ -0,0 +1,131 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (c) 2023 Meta Platforms, Inc. and affiliates.
+ * Copyright (c) 2023 Tejun Heo <tj@kernel.org>
+ * Copyright (c) 2023 David Vernet <dvernet@meta.com>
+ */
+
+#ifndef __SCX_TEST_H__
+#define __SCX_TEST_H__
+
+#include <errno.h>
+#include <scx/common.h>
+#include <scx/compat.h>
+
+enum scx_test_status {
+ SCX_TEST_PASS = 0,
+ SCX_TEST_SKIP,
+ SCX_TEST_FAIL,
+};
+
+#define EXIT_KIND(__ent) __COMPAT_ENUM_OR_ZERO("scx_exit_kind", #__ent)
+
+struct scx_test {
+ /**
+ * name - The name of the testcase.
+ */
+ const char *name;
+
+ /**
+ * description - A description of your testcase: what it tests and is
+ * meant to validate.
+ */
+ const char *description;
+
+ /*
+ * setup - Setup the test.
+ * @ctx: A pointer to a context object that will be passed to run and
+ * cleanup.
+ *
+ * An optional callback that allows a testcase to perform setup for its
+ * run. A test may return SCX_TEST_SKIP to skip the run.
+ */
+ enum scx_test_status (*setup)(void **ctx);
+
+ /*
+ * run - Run the test.
+ * @ctx: Context set in the setup() callback. If @ctx was not set in
+ * setup(), it is NULL.
+ *
+ * The main test. Callers should return one of:
+ *
+ * - SCX_TEST_PASS: Test passed
+ * - SCX_TEST_SKIP: Test should be skipped
+ * - SCX_TEST_FAIL: Test failed
+ *
+ * This callback must be defined.
+ */
+ enum scx_test_status (*run)(void *ctx);
+
+ /*
+ * cleanup - Perform cleanup following the test
+ * @ctx: Context set in the setup() callback. If @ctx was not set in
+ * setup(), it is NULL.
+ *
+ * An optional callback that allows a test to perform cleanup after
+ * being run. This callback is run even if the run() callback returns
+ * SCX_TEST_SKIP or SCX_TEST_FAIL. It is not run if setup() returns
+ * SCX_TEST_SKIP or SCX_TEST_FAIL.
+ */
+ void (*cleanup)(void *ctx);
+};
+
+void scx_test_register(struct scx_test *test);
+
+#define REGISTER_SCX_TEST(__test) \
+ __attribute__((constructor)) \
+ static void ___scxregister##__LINE__(void) \
+ { \
+ scx_test_register(__test); \
+ }
+
+#define SCX_ERR(__fmt, ...) \
+ do { \
+ fprintf(stderr, "ERR: %s:%d\n", __FILE__, __LINE__); \
+ fprintf(stderr, __fmt"\n", ##__VA_ARGS__); \
+ } while (0)
+
+#define SCX_FAIL(__fmt, ...) \
+ do { \
+ SCX_ERR(__fmt, ##__VA_ARGS__); \
+ return SCX_TEST_FAIL; \
+ } while (0)
+
+#define SCX_FAIL_IF(__cond, __fmt, ...) \
+ do { \
+ if (__cond) \
+ SCX_FAIL(__fmt, ##__VA_ARGS__); \
+ } while (0)
+
+#define SCX_GT(_x, _y) SCX_FAIL_IF((_x) <= (_y), "Expected %s > %s (%lu > %lu)", \
+ #_x, #_y, (u64)(_x), (u64)(_y))
+#define SCX_GE(_x, _y) SCX_FAIL_IF((_x) < (_y), "Expected %s >= %s (%lu >= %lu)", \
+ #_x, #_y, (u64)(_x), (u64)(_y))
+#define SCX_LT(_x, _y) SCX_FAIL_IF((_x) >= (_y), "Expected %s < %s (%lu < %lu)", \
+ #_x, #_y, (u64)(_x), (u64)(_y))
+#define SCX_LE(_x, _y) SCX_FAIL_IF((_x) > (_y), "Expected %s <= %s (%lu <= %lu)", \
+ #_x, #_y, (u64)(_x), (u64)(_y))
+#define SCX_EQ(_x, _y) SCX_FAIL_IF((_x) != (_y), "Expected %s == %s (%lu == %lu)", \
+ #_x, #_y, (u64)(_x), (u64)(_y))
+#define SCX_ASSERT(_x) SCX_FAIL_IF(!(_x), "Expected %s to be true (%lu)", \
+ #_x, (u64)(_x))
+
+#define SCX_ECODE_VAL(__ecode) ({ \
+ u64 __val = 0; \
+ bool __found = false; \
+ \
+ __found = __COMPAT_read_enum("scx_exit_code", #__ecode, &__val); \
+ SCX_ASSERT(__found); \
+ (s64)__val; \
+})
+
+#define SCX_KIND_VAL(__kind) ({ \
+ u64 __val = 0; \
+ bool __found = false; \
+ \
+ __found = __COMPAT_read_enum("scx_exit_kind", #__kind, &__val); \
+ SCX_ASSERT(__found); \
+ __val; \
+})
+
+#endif // # __SCX_TEST_H__
diff --git a/tools/testing/selftests/sched_ext/select_cpu_dfl.bpf.c b/tools/testing/selftests/sched_ext/select_cpu_dfl.bpf.c
new file mode 100644
index 000000000000..2ed2991afafe
--- /dev/null
+++ b/tools/testing/selftests/sched_ext/select_cpu_dfl.bpf.c
@@ -0,0 +1,40 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * A scheduler that validates the behavior of direct dispatching with a default
+ * select_cpu implementation.
+ *
+ * Copyright (c) 2023 Meta Platforms, Inc. and affiliates.
+ * Copyright (c) 2023 David Vernet <dvernet@meta.com>
+ * Copyright (c) 2023 Tejun Heo <tj@kernel.org>
+ */
+
+#include <scx/common.bpf.h>
+
+char _license[] SEC("license") = "GPL";
+
+bool saw_local = false;
+
+static bool task_is_test(const struct task_struct *p)
+{
+ return !bpf_strncmp(p->comm, 9, "select_cpu");
+}
+
+void BPF_STRUCT_OPS(select_cpu_dfl_enqueue, struct task_struct *p,
+ u64 enq_flags)
+{
+ const struct cpumask *idle_mask = scx_bpf_get_idle_cpumask();
+
+ if (task_is_test(p) &&
+ bpf_cpumask_test_cpu(scx_bpf_task_cpu(p), idle_mask)) {
+ saw_local = true;
+ }
+ scx_bpf_put_idle_cpumask(idle_mask);
+
+ scx_bpf_dispatch(p, SCX_DSQ_GLOBAL, SCX_SLICE_DFL, enq_flags);
+}
+
+SEC(".struct_ops.link")
+struct sched_ext_ops select_cpu_dfl_ops = {
+ .enqueue = select_cpu_dfl_enqueue,
+ .name = "select_cpu_dfl",
+};
diff --git a/tools/testing/selftests/sched_ext/select_cpu_dfl.c b/tools/testing/selftests/sched_ext/select_cpu_dfl.c
new file mode 100644
index 000000000000..a53a40c2d2f0
--- /dev/null
+++ b/tools/testing/selftests/sched_ext/select_cpu_dfl.c
@@ -0,0 +1,72 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (c) 2023 Meta Platforms, Inc. and affiliates.
+ * Copyright (c) 2023 David Vernet <dvernet@meta.com>
+ * Copyright (c) 2023 Tejun Heo <tj@kernel.org>
+ */
+#include <bpf/bpf.h>
+#include <scx/common.h>
+#include <sys/wait.h>
+#include <unistd.h>
+#include "select_cpu_dfl.bpf.skel.h"
+#include "scx_test.h"
+
+#define NUM_CHILDREN 1028
+
+static enum scx_test_status setup(void **ctx)
+{
+ struct select_cpu_dfl *skel;
+
+ skel = select_cpu_dfl__open_and_load();
+ SCX_FAIL_IF(!skel, "Failed to open and load skel");
+ *ctx = skel;
+
+ return SCX_TEST_PASS;
+}
+
+static enum scx_test_status run(void *ctx)
+{
+ struct select_cpu_dfl *skel = ctx;
+ struct bpf_link *link;
+ pid_t pids[NUM_CHILDREN];
+ int i, status;
+
+ link = bpf_map__attach_struct_ops(skel->maps.select_cpu_dfl_ops);
+ SCX_FAIL_IF(!link, "Failed to attach scheduler");
+
+ for (i = 0; i < NUM_CHILDREN; i++) {
+ pids[i] = fork();
+ if (pids[i] == 0) {
+ sleep(1);
+ exit(0);
+ }
+ }
+
+ for (i = 0; i < NUM_CHILDREN; i++) {
+ SCX_EQ(waitpid(pids[i], &status, 0), pids[i]);
+ SCX_EQ(status, 0);
+ }
+
+ SCX_ASSERT(!skel->bss->saw_local);
+
+ bpf_link__destroy(link);
+
+ return SCX_TEST_PASS;
+}
+
+static void cleanup(void *ctx)
+{
+ struct select_cpu_dfl *skel = ctx;
+
+ select_cpu_dfl__destroy(skel);
+}
+
+struct scx_test select_cpu_dfl = {
+ .name = "select_cpu_dfl",
+ .description = "Verify the default ops.select_cpu() dispatches tasks "
+ "when idles cores are found, and skips ops.enqueue()",
+ .setup = setup,
+ .run = run,
+ .cleanup = cleanup,
+};
+REGISTER_SCX_TEST(&select_cpu_dfl)
diff --git a/tools/testing/selftests/sched_ext/select_cpu_dfl_nodispatch.bpf.c b/tools/testing/selftests/sched_ext/select_cpu_dfl_nodispatch.bpf.c
new file mode 100644
index 000000000000..4bb5abb2d369
--- /dev/null
+++ b/tools/testing/selftests/sched_ext/select_cpu_dfl_nodispatch.bpf.c
@@ -0,0 +1,89 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * A scheduler that validates the behavior of direct dispatching with a default
+ * select_cpu implementation, and with the SCX_OPS_ENQ_DFL_NO_DISPATCH ops flag
+ * specified.
+ *
+ * Copyright (c) 2023 Meta Platforms, Inc. and affiliates.
+ * Copyright (c) 2023 David Vernet <dvernet@meta.com>
+ * Copyright (c) 2023 Tejun Heo <tj@kernel.org>
+ */
+
+#include <scx/common.bpf.h>
+
+char _license[] SEC("license") = "GPL";
+
+bool saw_local = false;
+
+/* Per-task scheduling context */
+struct task_ctx {
+ bool force_local; /* CPU changed by ops.select_cpu() */
+};
+
+struct {
+ __uint(type, BPF_MAP_TYPE_TASK_STORAGE);
+ __uint(map_flags, BPF_F_NO_PREALLOC);
+ __type(key, int);
+ __type(value, struct task_ctx);
+} task_ctx_stor SEC(".maps");
+
+/* Manually specify the signature until the kfunc is added to the scx repo. */
+s32 scx_bpf_select_cpu_dfl(struct task_struct *p, s32 prev_cpu, u64 wake_flags,
+ bool *found) __ksym;
+
+s32 BPF_STRUCT_OPS(select_cpu_dfl_nodispatch_select_cpu, struct task_struct *p,
+ s32 prev_cpu, u64 wake_flags)
+{
+ struct task_ctx *tctx;
+ s32 cpu;
+
+ tctx = bpf_task_storage_get(&task_ctx_stor, p, 0, 0);
+ if (!tctx) {
+ scx_bpf_error("task_ctx lookup failed");
+ return -ESRCH;
+ }
+
+ cpu = scx_bpf_select_cpu_dfl(p, prev_cpu, wake_flags,
+ &tctx->force_local);
+
+ return cpu;
+}
+
+void BPF_STRUCT_OPS(select_cpu_dfl_nodispatch_enqueue, struct task_struct *p,
+ u64 enq_flags)
+{
+ u64 dsq_id = SCX_DSQ_GLOBAL;
+ struct task_ctx *tctx;
+
+ tctx = bpf_task_storage_get(&task_ctx_stor, p, 0, 0);
+ if (!tctx) {
+ scx_bpf_error("task_ctx lookup failed");
+ return;
+ }
+
+ if (tctx->force_local) {
+ dsq_id = SCX_DSQ_LOCAL;
+ tctx->force_local = false;
+ saw_local = true;
+ }
+
+ scx_bpf_dispatch(p, dsq_id, SCX_SLICE_DFL, enq_flags);
+}
+
+s32 BPF_STRUCT_OPS(select_cpu_dfl_nodispatch_init_task,
+ struct task_struct *p, struct scx_init_task_args *args)
+{
+ if (bpf_task_storage_get(&task_ctx_stor, p, 0,
+ BPF_LOCAL_STORAGE_GET_F_CREATE))
+ return 0;
+ else
+ return -ENOMEM;
+}
+
+SEC(".struct_ops.link")
+struct sched_ext_ops select_cpu_dfl_nodispatch_ops = {
+ .select_cpu = select_cpu_dfl_nodispatch_select_cpu,
+ .enqueue = select_cpu_dfl_nodispatch_enqueue,
+ .init_task = select_cpu_dfl_nodispatch_init_task,
+ .name = "select_cpu_dfl_nodispatch",
+};
diff --git a/tools/testing/selftests/sched_ext/select_cpu_dfl_nodispatch.c b/tools/testing/selftests/sched_ext/select_cpu_dfl_nodispatch.c
new file mode 100644
index 000000000000..1d85bf4bf3a3
--- /dev/null
+++ b/tools/testing/selftests/sched_ext/select_cpu_dfl_nodispatch.c
@@ -0,0 +1,72 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (c) 2023 Meta Platforms, Inc. and affiliates.
+ * Copyright (c) 2023 David Vernet <dvernet@meta.com>
+ * Copyright (c) 2023 Tejun Heo <tj@kernel.org>
+ */
+#include <bpf/bpf.h>
+#include <scx/common.h>
+#include <sys/wait.h>
+#include <unistd.h>
+#include "select_cpu_dfl_nodispatch.bpf.skel.h"
+#include "scx_test.h"
+
+#define NUM_CHILDREN 1028
+
+static enum scx_test_status setup(void **ctx)
+{
+ struct select_cpu_dfl_nodispatch *skel;
+
+ skel = select_cpu_dfl_nodispatch__open_and_load();
+ SCX_FAIL_IF(!skel, "Failed to open and load skel");
+ *ctx = skel;
+
+ return SCX_TEST_PASS;
+}
+
+static enum scx_test_status run(void *ctx)
+{
+ struct select_cpu_dfl_nodispatch *skel = ctx;
+ struct bpf_link *link;
+ pid_t pids[NUM_CHILDREN];
+ int i, status;
+
+ link = bpf_map__attach_struct_ops(skel->maps.select_cpu_dfl_nodispatch_ops);
+ SCX_FAIL_IF(!link, "Failed to attach scheduler");
+
+ for (i = 0; i < NUM_CHILDREN; i++) {
+ pids[i] = fork();
+ if (pids[i] == 0) {
+ sleep(1);
+ exit(0);
+ }
+ }
+
+ for (i = 0; i < NUM_CHILDREN; i++) {
+ SCX_EQ(waitpid(pids[i], &status, 0), pids[i]);
+ SCX_EQ(status, 0);
+ }
+
+ SCX_ASSERT(skel->bss->saw_local);
+
+ bpf_link__destroy(link);
+
+ return SCX_TEST_PASS;
+}
+
+static void cleanup(void *ctx)
+{
+ struct select_cpu_dfl_nodispatch *skel = ctx;
+
+ select_cpu_dfl_nodispatch__destroy(skel);
+}
+
+struct scx_test select_cpu_dfl_nodispatch = {
+ .name = "select_cpu_dfl_nodispatch",
+ .description = "Verify behavior of scx_bpf_select_cpu_dfl() in "
+ "ops.select_cpu()",
+ .setup = setup,
+ .run = run,
+ .cleanup = cleanup,
+};
+REGISTER_SCX_TEST(&select_cpu_dfl_nodispatch)
diff --git a/tools/testing/selftests/sched_ext/select_cpu_dispatch.bpf.c b/tools/testing/selftests/sched_ext/select_cpu_dispatch.bpf.c
new file mode 100644
index 000000000000..f0b96a4a04b2
--- /dev/null
+++ b/tools/testing/selftests/sched_ext/select_cpu_dispatch.bpf.c
@@ -0,0 +1,41 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * A scheduler that validates the behavior of direct dispatching with a default
+ * select_cpu implementation.
+ *
+ * Copyright (c) 2023 Meta Platforms, Inc. and affiliates.
+ * Copyright (c) 2023 David Vernet <dvernet@meta.com>
+ * Copyright (c) 2023 Tejun Heo <tj@kernel.org>
+ */
+
+#include <scx/common.bpf.h>
+
+char _license[] SEC("license") = "GPL";
+
+s32 BPF_STRUCT_OPS(select_cpu_dispatch_select_cpu, struct task_struct *p,
+ s32 prev_cpu, u64 wake_flags)
+{
+ u64 dsq_id = SCX_DSQ_LOCAL;
+ s32 cpu = prev_cpu;
+
+ if (scx_bpf_test_and_clear_cpu_idle(cpu))
+ goto dispatch;
+
+ cpu = scx_bpf_pick_idle_cpu(p->cpus_ptr, 0);
+ if (cpu >= 0)
+ goto dispatch;
+
+ dsq_id = SCX_DSQ_GLOBAL;
+ cpu = prev_cpu;
+
+dispatch:
+ scx_bpf_dispatch(p, dsq_id, SCX_SLICE_DFL, 0);
+ return cpu;
+}
+
+SEC(".struct_ops.link")
+struct sched_ext_ops select_cpu_dispatch_ops = {
+ .select_cpu = select_cpu_dispatch_select_cpu,
+ .name = "select_cpu_dispatch",
+ .timeout_ms = 1000U,
+};
diff --git a/tools/testing/selftests/sched_ext/select_cpu_dispatch.c b/tools/testing/selftests/sched_ext/select_cpu_dispatch.c
new file mode 100644
index 000000000000..0309ca8785b3
--- /dev/null
+++ b/tools/testing/selftests/sched_ext/select_cpu_dispatch.c
@@ -0,0 +1,70 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (c) 2023 Meta Platforms, Inc. and affiliates.
+ * Copyright (c) 2023 David Vernet <dvernet@meta.com>
+ * Copyright (c) 2023 Tejun Heo <tj@kernel.org>
+ */
+#include <bpf/bpf.h>
+#include <scx/common.h>
+#include <sys/wait.h>
+#include <unistd.h>
+#include "select_cpu_dispatch.bpf.skel.h"
+#include "scx_test.h"
+
+#define NUM_CHILDREN 1028
+
+static enum scx_test_status setup(void **ctx)
+{
+ struct select_cpu_dispatch *skel;
+
+ skel = select_cpu_dispatch__open_and_load();
+ SCX_FAIL_IF(!skel, "Failed to open and load skel");
+ *ctx = skel;
+
+ return SCX_TEST_PASS;
+}
+
+static enum scx_test_status run(void *ctx)
+{
+ struct select_cpu_dispatch *skel = ctx;
+ struct bpf_link *link;
+ pid_t pids[NUM_CHILDREN];
+ int i, status;
+
+ link = bpf_map__attach_struct_ops(skel->maps.select_cpu_dispatch_ops);
+ SCX_FAIL_IF(!link, "Failed to attach scheduler");
+
+ for (i = 0; i < NUM_CHILDREN; i++) {
+ pids[i] = fork();
+ if (pids[i] == 0) {
+ sleep(1);
+ exit(0);
+ }
+ }
+
+ for (i = 0; i < NUM_CHILDREN; i++) {
+ SCX_EQ(waitpid(pids[i], &status, 0), pids[i]);
+ SCX_EQ(status, 0);
+ }
+
+ bpf_link__destroy(link);
+
+ return SCX_TEST_PASS;
+}
+
+static void cleanup(void *ctx)
+{
+ struct select_cpu_dispatch *skel = ctx;
+
+ select_cpu_dispatch__destroy(skel);
+}
+
+struct scx_test select_cpu_dispatch = {
+ .name = "select_cpu_dispatch",
+ .description = "Test direct dispatching to built-in DSQs from "
+ "ops.select_cpu()",
+ .setup = setup,
+ .run = run,
+ .cleanup = cleanup,
+};
+REGISTER_SCX_TEST(&select_cpu_dispatch)
diff --git a/tools/testing/selftests/sched_ext/select_cpu_dispatch_bad_dsq.bpf.c b/tools/testing/selftests/sched_ext/select_cpu_dispatch_bad_dsq.bpf.c
new file mode 100644
index 000000000000..7b42ddce0f56
--- /dev/null
+++ b/tools/testing/selftests/sched_ext/select_cpu_dispatch_bad_dsq.bpf.c
@@ -0,0 +1,37 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * A scheduler that validates the behavior of direct dispatching with a default
+ * select_cpu implementation.
+ *
+ * Copyright (c) 2023 Meta Platforms, Inc. and affiliates.
+ * Copyright (c) 2023 David Vernet <dvernet@meta.com>
+ * Copyright (c) 2023 Tejun Heo <tj@kernel.org>
+ */
+
+#include <scx/common.bpf.h>
+
+char _license[] SEC("license") = "GPL";
+
+UEI_DEFINE(uei);
+
+s32 BPF_STRUCT_OPS(select_cpu_dispatch_bad_dsq_select_cpu, struct task_struct *p,
+ s32 prev_cpu, u64 wake_flags)
+{
+ /* Dispatching to a random DSQ should fail. */
+ scx_bpf_dispatch(p, 0xcafef00d, SCX_SLICE_DFL, 0);
+
+ return prev_cpu;
+}
+
+void BPF_STRUCT_OPS(select_cpu_dispatch_bad_dsq_exit, struct scx_exit_info *ei)
+{
+ UEI_RECORD(uei, ei);
+}
+
+SEC(".struct_ops.link")
+struct sched_ext_ops select_cpu_dispatch_bad_dsq_ops = {
+ .select_cpu = select_cpu_dispatch_bad_dsq_select_cpu,
+ .exit = select_cpu_dispatch_bad_dsq_exit,
+ .name = "select_cpu_dispatch_bad_dsq",
+ .timeout_ms = 1000U,
+};
diff --git a/tools/testing/selftests/sched_ext/select_cpu_dispatch_bad_dsq.c b/tools/testing/selftests/sched_ext/select_cpu_dispatch_bad_dsq.c
new file mode 100644
index 000000000000..47eb6ed7627d
--- /dev/null
+++ b/tools/testing/selftests/sched_ext/select_cpu_dispatch_bad_dsq.c
@@ -0,0 +1,56 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (c) 2023 Meta Platforms, Inc. and affiliates.
+ * Copyright (c) 2023 David Vernet <dvernet@meta.com>
+ * Copyright (c) 2023 Tejun Heo <tj@kernel.org>
+ */
+#include <bpf/bpf.h>
+#include <scx/common.h>
+#include <sys/wait.h>
+#include <unistd.h>
+#include "select_cpu_dispatch_bad_dsq.bpf.skel.h"
+#include "scx_test.h"
+
+static enum scx_test_status setup(void **ctx)
+{
+ struct select_cpu_dispatch_bad_dsq *skel;
+
+ skel = select_cpu_dispatch_bad_dsq__open_and_load();
+ SCX_FAIL_IF(!skel, "Failed to open and load skel");
+ *ctx = skel;
+
+ return SCX_TEST_PASS;
+}
+
+static enum scx_test_status run(void *ctx)
+{
+ struct select_cpu_dispatch_bad_dsq *skel = ctx;
+ struct bpf_link *link;
+
+ link = bpf_map__attach_struct_ops(skel->maps.select_cpu_dispatch_bad_dsq_ops);
+ SCX_FAIL_IF(!link, "Failed to attach scheduler");
+
+ sleep(1);
+
+ SCX_EQ(skel->data->uei.kind, EXIT_KIND(SCX_EXIT_ERROR));
+ bpf_link__destroy(link);
+
+ return SCX_TEST_PASS;
+}
+
+static void cleanup(void *ctx)
+{
+ struct select_cpu_dispatch_bad_dsq *skel = ctx;
+
+ select_cpu_dispatch_bad_dsq__destroy(skel);
+}
+
+struct scx_test select_cpu_dispatch_bad_dsq = {
+ .name = "select_cpu_dispatch_bad_dsq",
+ .description = "Verify graceful failure if we direct-dispatch to a "
+ "bogus DSQ in ops.select_cpu()",
+ .setup = setup,
+ .run = run,
+ .cleanup = cleanup,
+};
+REGISTER_SCX_TEST(&select_cpu_dispatch_bad_dsq)
diff --git a/tools/testing/selftests/sched_ext/select_cpu_dispatch_dbl_dsp.bpf.c b/tools/testing/selftests/sched_ext/select_cpu_dispatch_dbl_dsp.bpf.c
new file mode 100644
index 000000000000..653e3dc0b4dc
--- /dev/null
+++ b/tools/testing/selftests/sched_ext/select_cpu_dispatch_dbl_dsp.bpf.c
@@ -0,0 +1,38 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * A scheduler that validates the behavior of direct dispatching with a default
+ * select_cpu implementation.
+ *
+ * Copyright (c) 2023 Meta Platforms, Inc. and affiliates.
+ * Copyright (c) 2023 David Vernet <dvernet@meta.com>
+ * Copyright (c) 2023 Tejun Heo <tj@kernel.org>
+ */
+
+#include <scx/common.bpf.h>
+
+char _license[] SEC("license") = "GPL";
+
+UEI_DEFINE(uei);
+
+s32 BPF_STRUCT_OPS(select_cpu_dispatch_dbl_dsp_select_cpu, struct task_struct *p,
+ s32 prev_cpu, u64 wake_flags)
+{
+ /* Dispatching twice in a row is disallowed. */
+ scx_bpf_dispatch(p, SCX_DSQ_GLOBAL, SCX_SLICE_DFL, 0);
+ scx_bpf_dispatch(p, SCX_DSQ_GLOBAL, SCX_SLICE_DFL, 0);
+
+ return prev_cpu;
+}
+
+void BPF_STRUCT_OPS(select_cpu_dispatch_dbl_dsp_exit, struct scx_exit_info *ei)
+{
+ UEI_RECORD(uei, ei);
+}
+
+SEC(".struct_ops.link")
+struct sched_ext_ops select_cpu_dispatch_dbl_dsp_ops = {
+ .select_cpu = select_cpu_dispatch_dbl_dsp_select_cpu,
+ .exit = select_cpu_dispatch_dbl_dsp_exit,
+ .name = "select_cpu_dispatch_dbl_dsp",
+ .timeout_ms = 1000U,
+};
diff --git a/tools/testing/selftests/sched_ext/select_cpu_dispatch_dbl_dsp.c b/tools/testing/selftests/sched_ext/select_cpu_dispatch_dbl_dsp.c
new file mode 100644
index 000000000000..48ff028a3c46
--- /dev/null
+++ b/tools/testing/selftests/sched_ext/select_cpu_dispatch_dbl_dsp.c
@@ -0,0 +1,56 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (c) 2023 Meta Platforms, Inc. and affiliates.
+ * Copyright (c) 2023 David Vernet <dvernet@meta.com>
+ * Copyright (c) 2023 Tejun Heo <tj@kernel.org>
+ */
+#include <bpf/bpf.h>
+#include <scx/common.h>
+#include <sys/wait.h>
+#include <unistd.h>
+#include "select_cpu_dispatch_dbl_dsp.bpf.skel.h"
+#include "scx_test.h"
+
+static enum scx_test_status setup(void **ctx)
+{
+ struct select_cpu_dispatch_dbl_dsp *skel;
+
+ skel = select_cpu_dispatch_dbl_dsp__open_and_load();
+ SCX_FAIL_IF(!skel, "Failed to open and load skel");
+ *ctx = skel;
+
+ return SCX_TEST_PASS;
+}
+
+static enum scx_test_status run(void *ctx)
+{
+ struct select_cpu_dispatch_dbl_dsp *skel = ctx;
+ struct bpf_link *link;
+
+ link = bpf_map__attach_struct_ops(skel->maps.select_cpu_dispatch_dbl_dsp_ops);
+ SCX_FAIL_IF(!link, "Failed to attach scheduler");
+
+ sleep(1);
+
+ SCX_EQ(skel->data->uei.kind, EXIT_KIND(SCX_EXIT_ERROR));
+ bpf_link__destroy(link);
+
+ return SCX_TEST_PASS;
+}
+
+static void cleanup(void *ctx)
+{
+ struct select_cpu_dispatch_dbl_dsp *skel = ctx;
+
+ select_cpu_dispatch_dbl_dsp__destroy(skel);
+}
+
+struct scx_test select_cpu_dispatch_dbl_dsp = {
+ .name = "select_cpu_dispatch_dbl_dsp",
+ .description = "Verify graceful failure if we dispatch twice to a "
+ "DSQ in ops.select_cpu()",
+ .setup = setup,
+ .run = run,
+ .cleanup = cleanup,
+};
+REGISTER_SCX_TEST(&select_cpu_dispatch_dbl_dsp)
diff --git a/tools/testing/selftests/sched_ext/select_cpu_vtime.bpf.c b/tools/testing/selftests/sched_ext/select_cpu_vtime.bpf.c
new file mode 100644
index 000000000000..7f3ebf4fc2ea
--- /dev/null
+++ b/tools/testing/selftests/sched_ext/select_cpu_vtime.bpf.c
@@ -0,0 +1,92 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * A scheduler that validates that enqueue flags are properly stored and
+ * applied at dispatch time when a task is directly dispatched from
+ * ops.select_cpu(). We validate this by using scx_bpf_dispatch_vtime(), and
+ * making the test a very basic vtime scheduler.
+ *
+ * Copyright (c) 2024 Meta Platforms, Inc. and affiliates.
+ * Copyright (c) 2024 David Vernet <dvernet@meta.com>
+ * Copyright (c) 2024 Tejun Heo <tj@kernel.org>
+ */
+
+#include <scx/common.bpf.h>
+
+char _license[] SEC("license") = "GPL";
+
+volatile bool consumed;
+
+static u64 vtime_now;
+
+#define VTIME_DSQ 0
+
+static inline bool vtime_before(u64 a, u64 b)
+{
+ return (s64)(a - b) < 0;
+}
+
+static inline u64 task_vtime(const struct task_struct *p)
+{
+ u64 vtime = p->scx.dsq_vtime;
+
+ if (vtime_before(vtime, vtime_now - SCX_SLICE_DFL))
+ return vtime_now - SCX_SLICE_DFL;
+ else
+ return vtime;
+}
+
+s32 BPF_STRUCT_OPS(select_cpu_vtime_select_cpu, struct task_struct *p,
+ s32 prev_cpu, u64 wake_flags)
+{
+ s32 cpu;
+
+ cpu = scx_bpf_pick_idle_cpu(p->cpus_ptr, 0);
+ if (cpu >= 0)
+ goto ddsp;
+
+ cpu = prev_cpu;
+ scx_bpf_test_and_clear_cpu_idle(cpu);
+ddsp:
+ scx_bpf_dispatch_vtime(p, VTIME_DSQ, SCX_SLICE_DFL, task_vtime(p), 0);
+ return cpu;
+}
+
+void BPF_STRUCT_OPS(select_cpu_vtime_dispatch, s32 cpu, struct task_struct *p)
+{
+ if (scx_bpf_consume(VTIME_DSQ))
+ consumed = true;
+}
+
+void BPF_STRUCT_OPS(select_cpu_vtime_running, struct task_struct *p)
+{
+ if (vtime_before(vtime_now, p->scx.dsq_vtime))
+ vtime_now = p->scx.dsq_vtime;
+}
+
+void BPF_STRUCT_OPS(select_cpu_vtime_stopping, struct task_struct *p,
+ bool runnable)
+{
+ p->scx.dsq_vtime += (SCX_SLICE_DFL - p->scx.slice) * 100 / p->scx.weight;
+}
+
+void BPF_STRUCT_OPS(select_cpu_vtime_enable, struct task_struct *p)
+{
+ p->scx.dsq_vtime = vtime_now;
+}
+
+s32 BPF_STRUCT_OPS_SLEEPABLE(select_cpu_vtime_init)
+{
+ return scx_bpf_create_dsq(VTIME_DSQ, -1);
+}
+
+SEC(".struct_ops.link")
+struct sched_ext_ops select_cpu_vtime_ops = {
+ .select_cpu = select_cpu_vtime_select_cpu,
+ .dispatch = select_cpu_vtime_dispatch,
+ .running = select_cpu_vtime_running,
+ .stopping = select_cpu_vtime_stopping,
+ .enable = select_cpu_vtime_enable,
+ .init = select_cpu_vtime_init,
+ .name = "select_cpu_vtime",
+ .timeout_ms = 1000U,
+};
diff --git a/tools/testing/selftests/sched_ext/select_cpu_vtime.c b/tools/testing/selftests/sched_ext/select_cpu_vtime.c
new file mode 100644
index 000000000000..b4629c2364f5
--- /dev/null
+++ b/tools/testing/selftests/sched_ext/select_cpu_vtime.c
@@ -0,0 +1,59 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (c) 2024 Meta Platforms, Inc. and affiliates.
+ * Copyright (c) 2024 David Vernet <dvernet@meta.com>
+ * Copyright (c) 2024 Tejun Heo <tj@kernel.org>
+ */
+#include <bpf/bpf.h>
+#include <scx/common.h>
+#include <sys/wait.h>
+#include <unistd.h>
+#include "select_cpu_vtime.bpf.skel.h"
+#include "scx_test.h"
+
+static enum scx_test_status setup(void **ctx)
+{
+ struct select_cpu_vtime *skel;
+
+ skel = select_cpu_vtime__open_and_load();
+ SCX_FAIL_IF(!skel, "Failed to open and load skel");
+ *ctx = skel;
+
+ return SCX_TEST_PASS;
+}
+
+static enum scx_test_status run(void *ctx)
+{
+ struct select_cpu_vtime *skel = ctx;
+ struct bpf_link *link;
+
+ SCX_ASSERT(!skel->bss->consumed);
+
+ link = bpf_map__attach_struct_ops(skel->maps.select_cpu_vtime_ops);
+ SCX_FAIL_IF(!link, "Failed to attach scheduler");
+
+ sleep(1);
+
+ SCX_ASSERT(skel->bss->consumed);
+
+ bpf_link__destroy(link);
+
+ return SCX_TEST_PASS;
+}
+
+static void cleanup(void *ctx)
+{
+ struct select_cpu_vtime *skel = ctx;
+
+ select_cpu_vtime__destroy(skel);
+}
+
+struct scx_test select_cpu_vtime = {
+ .name = "select_cpu_vtime",
+ .description = "Test doing direct vtime-dispatching from "
+ "ops.select_cpu(), to a non-built-in DSQ",
+ .setup = setup,
+ .run = run,
+ .cleanup = cleanup,
+};
+REGISTER_SCX_TEST(&select_cpu_vtime)
diff --git a/tools/testing/selftests/sched_ext/test_example.c b/tools/testing/selftests/sched_ext/test_example.c
new file mode 100644
index 000000000000..ce36cdf03cdc
--- /dev/null
+++ b/tools/testing/selftests/sched_ext/test_example.c
@@ -0,0 +1,49 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (c) 2024 Meta Platforms, Inc. and affiliates.
+ * Copyright (c) 2024 Tejun Heo <tj@kernel.org>
+ * Copyright (c) 2024 David Vernet <dvernet@meta.com>
+ */
+#include <bpf/bpf.h>
+#include <scx/common.h>
+#include "scx_test.h"
+
+static bool setup_called = false;
+static bool run_called = false;
+static bool cleanup_called = false;
+
+static int context = 10;
+
+static enum scx_test_status setup(void **ctx)
+{
+ setup_called = true;
+ *ctx = &context;
+
+ return SCX_TEST_PASS;
+}
+
+static enum scx_test_status run(void *ctx)
+{
+ int *arg = ctx;
+
+ SCX_ASSERT(setup_called);
+ SCX_ASSERT(!run_called && !cleanup_called);
+ SCX_EQ(*arg, context);
+
+ run_called = true;
+ return SCX_TEST_PASS;
+}
+
+static void cleanup (void *ctx)
+{
+ SCX_BUG_ON(!run_called || cleanup_called, "Wrong callbacks invoked");
+}
+
+struct scx_test example = {
+ .name = "example",
+ .description = "Validate the basic function of the test suite itself",
+ .setup = setup,
+ .run = run,
+ .cleanup = cleanup,
+};
+REGISTER_SCX_TEST(&example)
diff --git a/tools/testing/selftests/sched_ext/util.c b/tools/testing/selftests/sched_ext/util.c
new file mode 100644
index 000000000000..e47769c91918
--- /dev/null
+++ b/tools/testing/selftests/sched_ext/util.c
@@ -0,0 +1,71 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (c) 2024 Meta Platforms, Inc. and affiliates.
+ * Copyright (c) 2024 David Vernet <dvernet@meta.com>
+ */
+#include <errno.h>
+#include <fcntl.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+#include <unistd.h>
+
+/* Returns read len on success, or -errno on failure. */
+static ssize_t read_text(const char *path, char *buf, size_t max_len)
+{
+ ssize_t len;
+ int fd;
+
+ fd = open(path, O_RDONLY);
+ if (fd < 0)
+ return -errno;
+
+ len = read(fd, buf, max_len - 1);
+
+ if (len >= 0)
+ buf[len] = 0;
+
+ close(fd);
+ return len < 0 ? -errno : len;
+}
+
+/* Returns written len on success, or -errno on failure. */
+static ssize_t write_text(const char *path, char *buf, ssize_t len)
+{
+ int fd;
+ ssize_t written;
+
+ fd = open(path, O_WRONLY | O_APPEND);
+ if (fd < 0)
+ return -errno;
+
+ written = write(fd, buf, len);
+ close(fd);
+ return written < 0 ? -errno : written;
+}
+
+long file_read_long(const char *path)
+{
+ char buf[128];
+
+
+ if (read_text(path, buf, sizeof(buf)) <= 0)
+ return -1;
+
+ return atol(buf);
+}
+
+int file_write_long(const char *path, long val)
+{
+ char buf[64];
+ int ret;
+
+ ret = sprintf(buf, "%lu", val);
+ if (ret < 0)
+ return ret;
+
+ if (write_text(path, buf, sizeof(buf)) <= 0)
+ return -1;
+
+ return 0;
+}
diff --git a/tools/testing/selftests/sched_ext/util.h b/tools/testing/selftests/sched_ext/util.h
new file mode 100644
index 000000000000..bc13dfec1267
--- /dev/null
+++ b/tools/testing/selftests/sched_ext/util.h
@@ -0,0 +1,13 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (c) 2024 Meta Platforms, Inc. and affiliates.
+ * Copyright (c) 2024 David Vernet <void@manifault.com>
+ */
+
+#ifndef __SCX_TEST_UTIL_H__
+#define __SCX_TEST_UTIL_H__
+
+long file_read_long(const char *path);
+int file_write_long(const char *path, long val);
+
+#endif // __SCX_TEST_H__
diff --git a/tools/testing/selftests/vDSO/vdso_standalone_test_x86.c b/tools/testing/selftests/vDSO/vdso_standalone_test_x86.c
index 27f6fdf11969..644915862af8 100644
--- a/tools/testing/selftests/vDSO/vdso_standalone_test_x86.c
+++ b/tools/testing/selftests/vDSO/vdso_standalone_test_x86.c
@@ -131,6 +131,8 @@ asm (
"_start:\n\t"
#ifdef __x86_64__
"mov %rsp,%rdi\n\t"
+ "and $-16,%rsp\n\t"
+ "sub $8,%rsp\n\t"
"jmp c_main"
#else
"push %esp\n\t"
diff --git a/tools/testing/radix-tree/generated/autoconf.h b/tools/testing/shared/autoconf.h
index 92dc474c349b..92dc474c349b 100644
--- a/tools/testing/radix-tree/generated/autoconf.h
+++ b/tools/testing/shared/autoconf.h
diff --git a/tools/testing/radix-tree/linux.c b/tools/testing/shared/linux.c
index 4eb442206d01..17263696b5d8 100644
--- a/tools/testing/radix-tree/linux.c
+++ b/tools/testing/shared/linux.c
@@ -26,8 +26,21 @@ struct kmem_cache {
unsigned int non_kernel;
unsigned long nr_allocated;
unsigned long nr_tallocated;
+ bool exec_callback;
+ void (*callback)(void *);
+ void *private;
};
+void kmem_cache_set_callback(struct kmem_cache *cachep, void (*callback)(void *))
+{
+ cachep->callback = callback;
+}
+
+void kmem_cache_set_private(struct kmem_cache *cachep, void *private)
+{
+ cachep->private = private;
+}
+
void kmem_cache_set_non_kernel(struct kmem_cache *cachep, unsigned int val)
{
cachep->non_kernel = val;
@@ -58,9 +71,17 @@ void *kmem_cache_alloc_lru(struct kmem_cache *cachep, struct list_lru *lru,
{
void *p;
+ if (cachep->exec_callback) {
+ if (cachep->callback)
+ cachep->callback(cachep->private);
+ cachep->exec_callback = false;
+ }
+
if (!(gfp & __GFP_DIRECT_RECLAIM)) {
- if (!cachep->non_kernel)
+ if (!cachep->non_kernel) {
+ cachep->exec_callback = true;
return NULL;
+ }
cachep->non_kernel--;
}
@@ -223,6 +244,9 @@ kmem_cache_create(const char *name, unsigned int size, unsigned int align,
ret->objs = NULL;
ret->ctor = ctor;
ret->non_kernel = 0;
+ ret->exec_callback = false;
+ ret->callback = NULL;
+ ret->private = NULL;
return ret;
}
diff --git a/tools/testing/radix-tree/linux/bug.h b/tools/testing/shared/linux/bug.h
index 03dc8a57eb99..03dc8a57eb99 100644
--- a/tools/testing/radix-tree/linux/bug.h
+++ b/tools/testing/shared/linux/bug.h
diff --git a/tools/testing/radix-tree/linux/cpu.h b/tools/testing/shared/linux/cpu.h
index a45530d78107..a45530d78107 100644
--- a/tools/testing/radix-tree/linux/cpu.h
+++ b/tools/testing/shared/linux/cpu.h
diff --git a/tools/testing/radix-tree/linux/idr.h b/tools/testing/shared/linux/idr.h
index 4e342f2e37cf..4e342f2e37cf 100644
--- a/tools/testing/radix-tree/linux/idr.h
+++ b/tools/testing/shared/linux/idr.h
diff --git a/tools/testing/radix-tree/linux/kconfig.h b/tools/testing/shared/linux/kconfig.h
index 6c8675859913..6c8675859913 100644
--- a/tools/testing/radix-tree/linux/kconfig.h
+++ b/tools/testing/shared/linux/kconfig.h
diff --git a/tools/testing/radix-tree/linux/kernel.h b/tools/testing/shared/linux/kernel.h
index c0a2bb785b92..c0a2bb785b92 100644
--- a/tools/testing/radix-tree/linux/kernel.h
+++ b/tools/testing/shared/linux/kernel.h
diff --git a/tools/testing/radix-tree/linux/kmemleak.h b/tools/testing/shared/linux/kmemleak.h
index 155f112786c4..155f112786c4 100644
--- a/tools/testing/radix-tree/linux/kmemleak.h
+++ b/tools/testing/shared/linux/kmemleak.h
diff --git a/tools/testing/radix-tree/linux/local_lock.h b/tools/testing/shared/linux/local_lock.h
index b3cf8b233ca4..b3cf8b233ca4 100644
--- a/tools/testing/radix-tree/linux/local_lock.h
+++ b/tools/testing/shared/linux/local_lock.h
diff --git a/tools/testing/radix-tree/linux/lockdep.h b/tools/testing/shared/linux/lockdep.h
index 62473ab57f99..62473ab57f99 100644
--- a/tools/testing/radix-tree/linux/lockdep.h
+++ b/tools/testing/shared/linux/lockdep.h
diff --git a/tools/testing/radix-tree/linux/maple_tree.h b/tools/testing/shared/linux/maple_tree.h
index 06c89bdcc515..06c89bdcc515 100644
--- a/tools/testing/radix-tree/linux/maple_tree.h
+++ b/tools/testing/shared/linux/maple_tree.h
diff --git a/tools/testing/radix-tree/linux/percpu.h b/tools/testing/shared/linux/percpu.h
index b2403aa743b2..b2403aa743b2 100644
--- a/tools/testing/radix-tree/linux/percpu.h
+++ b/tools/testing/shared/linux/percpu.h
diff --git a/tools/testing/radix-tree/linux/preempt.h b/tools/testing/shared/linux/preempt.h
index edb10302b903..edb10302b903 100644
--- a/tools/testing/radix-tree/linux/preempt.h
+++ b/tools/testing/shared/linux/preempt.h
diff --git a/tools/testing/radix-tree/linux/radix-tree.h b/tools/testing/shared/linux/radix-tree.h
index d1635a5bef02..d1635a5bef02 100644
--- a/tools/testing/radix-tree/linux/radix-tree.h
+++ b/tools/testing/shared/linux/radix-tree.h
diff --git a/tools/testing/radix-tree/linux/rcupdate.h b/tools/testing/shared/linux/rcupdate.h
index fed468fb0c78..fed468fb0c78 100644
--- a/tools/testing/radix-tree/linux/rcupdate.h
+++ b/tools/testing/shared/linux/rcupdate.h
diff --git a/tools/testing/radix-tree/linux/xarray.h b/tools/testing/shared/linux/xarray.h
index df3812cda376..df3812cda376 100644
--- a/tools/testing/radix-tree/linux/xarray.h
+++ b/tools/testing/shared/linux/xarray.h
diff --git a/tools/testing/shared/maple-shared.h b/tools/testing/shared/maple-shared.h
new file mode 100644
index 000000000000..dc4d30f3860b
--- /dev/null
+++ b/tools/testing/shared/maple-shared.h
@@ -0,0 +1,13 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
+#ifndef __MAPLE_SHARED_H__
+#define __MAPLE_SHARED_H__
+
+#define CONFIG_DEBUG_MAPLE_TREE
+#define CONFIG_MAPLE_SEARCH
+#define MAPLE_32BIT (MAPLE_NODE_SLOTS > 31)
+#include "shared.h"
+#include <stdlib.h>
+#include <time.h>
+#include "linux/init.h"
+
+#endif /* __MAPLE_SHARED_H__ */
diff --git a/tools/testing/shared/maple-shim.c b/tools/testing/shared/maple-shim.c
new file mode 100644
index 000000000000..640df76f483e
--- /dev/null
+++ b/tools/testing/shared/maple-shim.c
@@ -0,0 +1,7 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+
+/* Very simple shim around the maple tree. */
+
+#include "maple-shared.h"
+
+#include "../../../lib/maple_tree.c"
diff --git a/tools/testing/shared/shared.h b/tools/testing/shared/shared.h
new file mode 100644
index 000000000000..13fb4d39966b
--- /dev/null
+++ b/tools/testing/shared/shared.h
@@ -0,0 +1,37 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef __SHARED_H__
+#define __SHARED_H__
+
+#include <linux/types.h>
+#include <linux/bug.h>
+#include <linux/kernel.h>
+#include <linux/bitops.h>
+
+#include <linux/gfp.h>
+#include <linux/rcupdate.h>
+
+#ifndef module_init
+#define module_init(x)
+#endif
+
+#ifndef module_exit
+#define module_exit(x)
+#endif
+
+#ifndef MODULE_AUTHOR
+#define MODULE_AUTHOR(x)
+#endif
+
+#ifndef MODULE_LICENSE
+#define MODULE_LICENSE(x)
+#endif
+
+#ifndef MODULE_DESCRIPTION
+#define MODULE_DESCRIPTION(x)
+#endif
+
+#ifndef dump_stack
+#define dump_stack() assert(0)
+#endif
+
+#endif /* __SHARED_H__ */
diff --git a/tools/testing/shared/shared.mk b/tools/testing/shared/shared.mk
new file mode 100644
index 000000000000..a6bc51d0b0bf
--- /dev/null
+++ b/tools/testing/shared/shared.mk
@@ -0,0 +1,74 @@
+# SPDX-License-Identifier: GPL-2.0
+
+CFLAGS += -I../shared -I. -I../../include -I../../../lib -g -Og -Wall \
+ -D_LGPL_SOURCE -fsanitize=address -fsanitize=undefined
+LDFLAGS += -fsanitize=address -fsanitize=undefined
+LDLIBS += -lpthread -lurcu
+LIBS := slab.o find_bit.o bitmap.o hweight.o vsprintf.o
+SHARED_OFILES = xarray-shared.o radix-tree.o idr.o linux.o $(LIBS)
+
+SHARED_DEPS = Makefile ../shared/shared.mk ../shared/*.h generated/map-shift.h \
+ generated/bit-length.h generated/autoconf.h \
+ ../../include/linux/*.h \
+ ../../include/asm/*.h \
+ ../../../include/linux/xarray.h \
+ ../../../include/linux/maple_tree.h \
+ ../../../include/linux/radix-tree.h \
+ ../../../lib/radix-tree.h \
+ ../../../include/linux/idr.h \
+ ../../../lib/maple_tree.c \
+ ../../../lib/test_maple_tree.c
+
+ifndef SHIFT
+ SHIFT=3
+endif
+
+ifeq ($(BUILD), 32)
+ CFLAGS += -m32
+ LDFLAGS += -m32
+LONG_BIT := 32
+endif
+
+ifndef LONG_BIT
+LONG_BIT := $(shell getconf LONG_BIT)
+endif
+
+%.o: ../shared/%.c
+ $(CC) -c $(CFLAGS) $< -o $@
+
+vpath %.c ../../lib
+
+$(SHARED_OFILES): $(SHARED_DEPS)
+
+radix-tree.c: ../../../lib/radix-tree.c
+ sed -e 's/^static //' -e 's/__always_inline //' -e 's/inline //' < $< > $@
+
+idr.c: ../../../lib/idr.c
+ sed -e 's/^static //' -e 's/__always_inline //' -e 's/inline //' < $< > $@
+
+xarray-shared.o: ../shared/xarray-shared.c ../../../lib/xarray.c \
+ ../../../lib/test_xarray.c
+
+maple-shared.o: ../shared/maple-shared.c ../../../lib/maple_tree.c \
+ ../../../lib/test_maple_tree.c
+
+generated/autoconf.h:
+ @mkdir -p generated
+ cp ../shared/autoconf.h generated/autoconf.h
+
+generated/map-shift.h:
+ @mkdir -p generated
+ @if ! grep -qws $(SHIFT) generated/map-shift.h; then \
+ echo "Generating $@"; \
+ echo "#define XA_CHUNK_SHIFT $(SHIFT)" > \
+ generated/map-shift.h; \
+ fi
+
+generated/bit-length.h: FORCE
+ @mkdir -p generated
+ @if ! grep -qws CONFIG_$(LONG_BIT)BIT generated/bit-length.h; then \
+ echo "Generating $@"; \
+ echo "#define CONFIG_$(LONG_BIT)BIT 1" > $@; \
+ fi
+
+FORCE: ;
diff --git a/tools/testing/radix-tree/trace/events/maple_tree.h b/tools/testing/shared/trace/events/maple_tree.h
index 97d0e1ddcf08..97d0e1ddcf08 100644
--- a/tools/testing/radix-tree/trace/events/maple_tree.h
+++ b/tools/testing/shared/trace/events/maple_tree.h
diff --git a/tools/testing/shared/xarray-shared.c b/tools/testing/shared/xarray-shared.c
new file mode 100644
index 000000000000..e90901958dcd
--- /dev/null
+++ b/tools/testing/shared/xarray-shared.c
@@ -0,0 +1,5 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+
+#include "xarray-shared.h"
+
+#include "../../../lib/xarray.c"
diff --git a/tools/testing/shared/xarray-shared.h b/tools/testing/shared/xarray-shared.h
new file mode 100644
index 000000000000..d50de7884803
--- /dev/null
+++ b/tools/testing/shared/xarray-shared.h
@@ -0,0 +1,8 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
+#ifndef __XARRAY_SHARED_H__
+#define __XARRAY_SHARED_H__
+
+#define XA_DEBUG
+#include "shared.h"
+
+#endif /* __XARRAY_SHARED_H__ */
diff --git a/tools/testing/vma/.gitignore b/tools/testing/vma/.gitignore
new file mode 100644
index 000000000000..b003258eba79
--- /dev/null
+++ b/tools/testing/vma/.gitignore
@@ -0,0 +1,7 @@
+# SPDX-License-Identifier: GPL-2.0-only
+generated/bit-length.h
+generated/map-shift.h
+generated/autoconf.h
+idr.c
+radix-tree.c
+vma
diff --git a/tools/testing/vma/Makefile b/tools/testing/vma/Makefile
new file mode 100644
index 000000000000..860fd2311dcc
--- /dev/null
+++ b/tools/testing/vma/Makefile
@@ -0,0 +1,18 @@
+# SPDX-License-Identifier: GPL-2.0-or-later
+
+.PHONY: default clean
+
+default: vma
+
+include ../shared/shared.mk
+
+OFILES = $(SHARED_OFILES) vma.o maple-shim.o
+TARGETS = vma
+
+vma.o: vma.c vma_internal.h ../../../mm/vma.c ../../../mm/vma.h
+
+vma: $(OFILES)
+ $(CC) $(CFLAGS) -o $@ $(OFILES) $(LDLIBS)
+
+clean:
+ $(RM) $(TARGETS) *.o radix-tree.c idr.c generated/map-shift.h generated/bit-length.h generated/autoconf.h
diff --git a/tools/testing/vma/linux/atomic.h b/tools/testing/vma/linux/atomic.h
new file mode 100644
index 000000000000..e01f66f98982
--- /dev/null
+++ b/tools/testing/vma/linux/atomic.h
@@ -0,0 +1,12 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+
+#ifndef _LINUX_ATOMIC_H
+#define _LINUX_ATOMIC_H
+
+#define atomic_t int32_t
+#define atomic_inc(x) uatomic_inc(x)
+#define atomic_read(x) uatomic_read(x)
+#define atomic_set(x, y) do {} while (0)
+#define U8_MAX UCHAR_MAX
+
+#endif /* _LINUX_ATOMIC_H */
diff --git a/tools/testing/vma/linux/mmzone.h b/tools/testing/vma/linux/mmzone.h
new file mode 100644
index 000000000000..33cd1517f7a3
--- /dev/null
+++ b/tools/testing/vma/linux/mmzone.h
@@ -0,0 +1,38 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+
+#ifndef _LINUX_MMZONE_H
+#define _LINUX_MMZONE_H
+
+#include <linux/atomic.h>
+
+struct pglist_data *first_online_pgdat(void);
+struct pglist_data *next_online_pgdat(struct pglist_data *pgdat);
+
+#define for_each_online_pgdat(pgdat) \
+ for (pgdat = first_online_pgdat(); \
+ pgdat; \
+ pgdat = next_online_pgdat(pgdat))
+
+enum zone_type {
+ __MAX_NR_ZONES
+};
+
+#define MAX_NR_ZONES __MAX_NR_ZONES
+#define MAX_PAGE_ORDER 10
+#define MAX_ORDER_NR_PAGES (1 << MAX_PAGE_ORDER)
+
+#define pageblock_order MAX_PAGE_ORDER
+#define pageblock_nr_pages BIT(pageblock_order)
+#define pageblock_align(pfn) ALIGN((pfn), pageblock_nr_pages)
+#define pageblock_start_pfn(pfn) ALIGN_DOWN((pfn), pageblock_nr_pages)
+
+struct zone {
+ atomic_long_t managed_pages;
+};
+
+typedef struct pglist_data {
+ struct zone node_zones[MAX_NR_ZONES];
+
+} pg_data_t;
+
+#endif /* _LINUX_MMZONE_H */
diff --git a/tools/testing/vma/vma.c b/tools/testing/vma/vma.c
new file mode 100644
index 000000000000..c53f220eb6cc
--- /dev/null
+++ b/tools/testing/vma/vma.c
@@ -0,0 +1,1563 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+
+#include <stdbool.h>
+#include <stdio.h>
+#include <stdlib.h>
+
+#include "maple-shared.h"
+#include "vma_internal.h"
+
+/* Include so header guard set. */
+#include "../../../mm/vma.h"
+
+static bool fail_prealloc;
+
+/* Then override vma_iter_prealloc() so we can choose to fail it. */
+#define vma_iter_prealloc(vmi, vma) \
+ (fail_prealloc ? -ENOMEM : mas_preallocate(&(vmi)->mas, (vma), GFP_KERNEL))
+
+/*
+ * Directly import the VMA implementation here. Our vma_internal.h wrapper
+ * provides userland-equivalent functionality for everything vma.c uses.
+ */
+#include "../../../mm/vma.c"
+
+const struct vm_operations_struct vma_dummy_vm_ops;
+static struct anon_vma dummy_anon_vma;
+
+#define ASSERT_TRUE(_expr) \
+ do { \
+ if (!(_expr)) { \
+ fprintf(stderr, \
+ "Assert FAILED at %s:%d:%s(): %s is FALSE.\n", \
+ __FILE__, __LINE__, __FUNCTION__, #_expr); \
+ return false; \
+ } \
+ } while (0)
+#define ASSERT_FALSE(_expr) ASSERT_TRUE(!(_expr))
+#define ASSERT_EQ(_val1, _val2) ASSERT_TRUE((_val1) == (_val2))
+#define ASSERT_NE(_val1, _val2) ASSERT_TRUE((_val1) != (_val2))
+
+static struct task_struct __current;
+
+struct task_struct *get_current(void)
+{
+ return &__current;
+}
+
+/* Helper function to simply allocate a VMA. */
+static struct vm_area_struct *alloc_vma(struct mm_struct *mm,
+ unsigned long start,
+ unsigned long end,
+ pgoff_t pgoff,
+ vm_flags_t flags)
+{
+ struct vm_area_struct *ret = vm_area_alloc(mm);
+
+ if (ret == NULL)
+ return NULL;
+
+ ret->vm_start = start;
+ ret->vm_end = end;
+ ret->vm_pgoff = pgoff;
+ ret->__vm_flags = flags;
+
+ return ret;
+}
+
+/* Helper function to allocate a VMA and link it to the tree. */
+static struct vm_area_struct *alloc_and_link_vma(struct mm_struct *mm,
+ unsigned long start,
+ unsigned long end,
+ pgoff_t pgoff,
+ vm_flags_t flags)
+{
+ struct vm_area_struct *vma = alloc_vma(mm, start, end, pgoff, flags);
+
+ if (vma == NULL)
+ return NULL;
+
+ if (vma_link(mm, vma)) {
+ vm_area_free(vma);
+ return NULL;
+ }
+
+ /*
+ * Reset this counter which we use to track whether writes have
+ * begun. Linking to the tree will have caused this to be incremented,
+ * which means we will get a false positive otherwise.
+ */
+ vma->vm_lock_seq = -1;
+
+ return vma;
+}
+
+/* Helper function which provides a wrapper around a merge new VMA operation. */
+static struct vm_area_struct *merge_new(struct vma_merge_struct *vmg)
+{
+ /*
+ * For convenience, get prev and next VMAs. Which the new VMA operation
+ * requires.
+ */
+ vmg->next = vma_next(vmg->vmi);
+ vmg->prev = vma_prev(vmg->vmi);
+ vma_iter_next_range(vmg->vmi);
+
+ return vma_merge_new_range(vmg);
+}
+
+/*
+ * Helper function which provides a wrapper around a merge existing VMA
+ * operation.
+ */
+static struct vm_area_struct *merge_existing(struct vma_merge_struct *vmg)
+{
+ return vma_merge_existing_range(vmg);
+}
+
+/*
+ * Helper function which provides a wrapper around the expansion of an existing
+ * VMA.
+ */
+static int expand_existing(struct vma_merge_struct *vmg)
+{
+ return vma_expand(vmg);
+}
+
+/*
+ * Helper function to reset merge state the associated VMA iterator to a
+ * specified new range.
+ */
+static void vmg_set_range(struct vma_merge_struct *vmg, unsigned long start,
+ unsigned long end, pgoff_t pgoff, vm_flags_t flags)
+{
+ vma_iter_set(vmg->vmi, start);
+
+ vmg->prev = NULL;
+ vmg->next = NULL;
+ vmg->vma = NULL;
+
+ vmg->start = start;
+ vmg->end = end;
+ vmg->pgoff = pgoff;
+ vmg->flags = flags;
+}
+
+/*
+ * Helper function to try to merge a new VMA.
+ *
+ * Update vmg and the iterator for it and try to merge, otherwise allocate a new
+ * VMA, link it to the maple tree and return it.
+ */
+static struct vm_area_struct *try_merge_new_vma(struct mm_struct *mm,
+ struct vma_merge_struct *vmg,
+ unsigned long start, unsigned long end,
+ pgoff_t pgoff, vm_flags_t flags,
+ bool *was_merged)
+{
+ struct vm_area_struct *merged;
+
+ vmg_set_range(vmg, start, end, pgoff, flags);
+
+ merged = merge_new(vmg);
+ if (merged) {
+ *was_merged = true;
+ ASSERT_EQ(vmg->state, VMA_MERGE_SUCCESS);
+ return merged;
+ }
+
+ *was_merged = false;
+
+ ASSERT_EQ(vmg->state, VMA_MERGE_NOMERGE);
+
+ return alloc_and_link_vma(mm, start, end, pgoff, flags);
+}
+
+/*
+ * Helper function to reset the dummy anon_vma to indicate it has not been
+ * duplicated.
+ */
+static void reset_dummy_anon_vma(void)
+{
+ dummy_anon_vma.was_cloned = false;
+ dummy_anon_vma.was_unlinked = false;
+}
+
+/*
+ * Helper function to remove all VMAs and destroy the maple tree associated with
+ * a virtual address space. Returns a count of VMAs in the tree.
+ */
+static int cleanup_mm(struct mm_struct *mm, struct vma_iterator *vmi)
+{
+ struct vm_area_struct *vma;
+ int count = 0;
+
+ fail_prealloc = false;
+ reset_dummy_anon_vma();
+
+ vma_iter_set(vmi, 0);
+ for_each_vma(*vmi, vma) {
+ vm_area_free(vma);
+ count++;
+ }
+
+ mtree_destroy(&mm->mm_mt);
+ mm->map_count = 0;
+ return count;
+}
+
+/* Helper function to determine if VMA has had vma_start_write() performed. */
+static bool vma_write_started(struct vm_area_struct *vma)
+{
+ int seq = vma->vm_lock_seq;
+
+ /* We reset after each check. */
+ vma->vm_lock_seq = -1;
+
+ /* The vma_start_write() stub simply increments this value. */
+ return seq > -1;
+}
+
+/* Helper function providing a dummy vm_ops->close() method.*/
+static void dummy_close(struct vm_area_struct *)
+{
+}
+
+static bool test_simple_merge(void)
+{
+ struct vm_area_struct *vma;
+ unsigned long flags = VM_READ | VM_WRITE | VM_MAYREAD | VM_MAYWRITE;
+ struct mm_struct mm = {};
+ struct vm_area_struct *vma_left = alloc_vma(&mm, 0, 0x1000, 0, flags);
+ struct vm_area_struct *vma_right = alloc_vma(&mm, 0x2000, 0x3000, 2, flags);
+ VMA_ITERATOR(vmi, &mm, 0x1000);
+ struct vma_merge_struct vmg = {
+ .mm = &mm,
+ .vmi = &vmi,
+ .start = 0x1000,
+ .end = 0x2000,
+ .flags = flags,
+ .pgoff = 1,
+ };
+
+ ASSERT_FALSE(vma_link(&mm, vma_left));
+ ASSERT_FALSE(vma_link(&mm, vma_right));
+
+ vma = merge_new(&vmg);
+ ASSERT_NE(vma, NULL);
+
+ ASSERT_EQ(vma->vm_start, 0);
+ ASSERT_EQ(vma->vm_end, 0x3000);
+ ASSERT_EQ(vma->vm_pgoff, 0);
+ ASSERT_EQ(vma->vm_flags, flags);
+
+ vm_area_free(vma);
+ mtree_destroy(&mm.mm_mt);
+
+ return true;
+}
+
+static bool test_simple_modify(void)
+{
+ struct vm_area_struct *vma;
+ unsigned long flags = VM_READ | VM_WRITE | VM_MAYREAD | VM_MAYWRITE;
+ struct mm_struct mm = {};
+ struct vm_area_struct *init_vma = alloc_vma(&mm, 0, 0x3000, 0, flags);
+ VMA_ITERATOR(vmi, &mm, 0x1000);
+
+ ASSERT_FALSE(vma_link(&mm, init_vma));
+
+ /*
+ * The flags will not be changed, the vma_modify_flags() function
+ * performs the merge/split only.
+ */
+ vma = vma_modify_flags(&vmi, init_vma, init_vma,
+ 0x1000, 0x2000, VM_READ | VM_MAYREAD);
+ ASSERT_NE(vma, NULL);
+ /* We modify the provided VMA, and on split allocate new VMAs. */
+ ASSERT_EQ(vma, init_vma);
+
+ ASSERT_EQ(vma->vm_start, 0x1000);
+ ASSERT_EQ(vma->vm_end, 0x2000);
+ ASSERT_EQ(vma->vm_pgoff, 1);
+
+ /*
+ * Now walk through the three split VMAs and make sure they are as
+ * expected.
+ */
+
+ vma_iter_set(&vmi, 0);
+ vma = vma_iter_load(&vmi);
+
+ ASSERT_EQ(vma->vm_start, 0);
+ ASSERT_EQ(vma->vm_end, 0x1000);
+ ASSERT_EQ(vma->vm_pgoff, 0);
+
+ vm_area_free(vma);
+ vma_iter_clear(&vmi);
+
+ vma = vma_next(&vmi);
+
+ ASSERT_EQ(vma->vm_start, 0x1000);
+ ASSERT_EQ(vma->vm_end, 0x2000);
+ ASSERT_EQ(vma->vm_pgoff, 1);
+
+ vm_area_free(vma);
+ vma_iter_clear(&vmi);
+
+ vma = vma_next(&vmi);
+
+ ASSERT_EQ(vma->vm_start, 0x2000);
+ ASSERT_EQ(vma->vm_end, 0x3000);
+ ASSERT_EQ(vma->vm_pgoff, 2);
+
+ vm_area_free(vma);
+ mtree_destroy(&mm.mm_mt);
+
+ return true;
+}
+
+static bool test_simple_expand(void)
+{
+ unsigned long flags = VM_READ | VM_WRITE | VM_MAYREAD | VM_MAYWRITE;
+ struct mm_struct mm = {};
+ struct vm_area_struct *vma = alloc_vma(&mm, 0, 0x1000, 0, flags);
+ VMA_ITERATOR(vmi, &mm, 0);
+ struct vma_merge_struct vmg = {
+ .vmi = &vmi,
+ .vma = vma,
+ .start = 0,
+ .end = 0x3000,
+ .pgoff = 0,
+ };
+
+ ASSERT_FALSE(vma_link(&mm, vma));
+
+ ASSERT_FALSE(expand_existing(&vmg));
+
+ ASSERT_EQ(vma->vm_start, 0);
+ ASSERT_EQ(vma->vm_end, 0x3000);
+ ASSERT_EQ(vma->vm_pgoff, 0);
+
+ vm_area_free(vma);
+ mtree_destroy(&mm.mm_mt);
+
+ return true;
+}
+
+static bool test_simple_shrink(void)
+{
+ unsigned long flags = VM_READ | VM_WRITE | VM_MAYREAD | VM_MAYWRITE;
+ struct mm_struct mm = {};
+ struct vm_area_struct *vma = alloc_vma(&mm, 0, 0x3000, 0, flags);
+ VMA_ITERATOR(vmi, &mm, 0);
+
+ ASSERT_FALSE(vma_link(&mm, vma));
+
+ ASSERT_FALSE(vma_shrink(&vmi, vma, 0, 0x1000, 0));
+
+ ASSERT_EQ(vma->vm_start, 0);
+ ASSERT_EQ(vma->vm_end, 0x1000);
+ ASSERT_EQ(vma->vm_pgoff, 0);
+
+ vm_area_free(vma);
+ mtree_destroy(&mm.mm_mt);
+
+ return true;
+}
+
+static bool test_merge_new(void)
+{
+ unsigned long flags = VM_READ | VM_WRITE | VM_MAYREAD | VM_MAYWRITE;
+ struct mm_struct mm = {};
+ VMA_ITERATOR(vmi, &mm, 0);
+ struct vma_merge_struct vmg = {
+ .mm = &mm,
+ .vmi = &vmi,
+ };
+ struct anon_vma_chain dummy_anon_vma_chain_a = {
+ .anon_vma = &dummy_anon_vma,
+ };
+ struct anon_vma_chain dummy_anon_vma_chain_b = {
+ .anon_vma = &dummy_anon_vma,
+ };
+ struct anon_vma_chain dummy_anon_vma_chain_c = {
+ .anon_vma = &dummy_anon_vma,
+ };
+ struct anon_vma_chain dummy_anon_vma_chain_d = {
+ .anon_vma = &dummy_anon_vma,
+ };
+ const struct vm_operations_struct vm_ops = {
+ .close = dummy_close,
+ };
+ int count;
+ struct vm_area_struct *vma, *vma_a, *vma_b, *vma_c, *vma_d;
+ bool merged;
+
+ /*
+ * 0123456789abc
+ * AA B CC
+ */
+ vma_a = alloc_and_link_vma(&mm, 0, 0x2000, 0, flags);
+ ASSERT_NE(vma_a, NULL);
+ /* We give each VMA a single avc so we can test anon_vma duplication. */
+ INIT_LIST_HEAD(&vma_a->anon_vma_chain);
+ list_add(&dummy_anon_vma_chain_a.same_vma, &vma_a->anon_vma_chain);
+
+ vma_b = alloc_and_link_vma(&mm, 0x3000, 0x4000, 3, flags);
+ ASSERT_NE(vma_b, NULL);
+ INIT_LIST_HEAD(&vma_b->anon_vma_chain);
+ list_add(&dummy_anon_vma_chain_b.same_vma, &vma_b->anon_vma_chain);
+
+ vma_c = alloc_and_link_vma(&mm, 0xb000, 0xc000, 0xb, flags);
+ ASSERT_NE(vma_c, NULL);
+ INIT_LIST_HEAD(&vma_c->anon_vma_chain);
+ list_add(&dummy_anon_vma_chain_c.same_vma, &vma_c->anon_vma_chain);
+
+ /*
+ * NO merge.
+ *
+ * 0123456789abc
+ * AA B ** CC
+ */
+ vma_d = try_merge_new_vma(&mm, &vmg, 0x7000, 0x9000, 7, flags, &merged);
+ ASSERT_NE(vma_d, NULL);
+ INIT_LIST_HEAD(&vma_d->anon_vma_chain);
+ list_add(&dummy_anon_vma_chain_d.same_vma, &vma_d->anon_vma_chain);
+ ASSERT_FALSE(merged);
+ ASSERT_EQ(mm.map_count, 4);
+
+ /*
+ * Merge BOTH sides.
+ *
+ * 0123456789abc
+ * AA*B DD CC
+ */
+ vma_a->vm_ops = &vm_ops; /* This should have no impact. */
+ vma_b->anon_vma = &dummy_anon_vma;
+ vma = try_merge_new_vma(&mm, &vmg, 0x2000, 0x3000, 2, flags, &merged);
+ ASSERT_EQ(vma, vma_a);
+ /* Merge with A, delete B. */
+ ASSERT_TRUE(merged);
+ ASSERT_EQ(vma->vm_start, 0);
+ ASSERT_EQ(vma->vm_end, 0x4000);
+ ASSERT_EQ(vma->vm_pgoff, 0);
+ ASSERT_EQ(vma->anon_vma, &dummy_anon_vma);
+ ASSERT_TRUE(vma_write_started(vma));
+ ASSERT_EQ(mm.map_count, 3);
+
+ /*
+ * Merge to PREVIOUS VMA.
+ *
+ * 0123456789abc
+ * AAAA* DD CC
+ */
+ vma = try_merge_new_vma(&mm, &vmg, 0x4000, 0x5000, 4, flags, &merged);
+ ASSERT_EQ(vma, vma_a);
+ /* Extend A. */
+ ASSERT_TRUE(merged);
+ ASSERT_EQ(vma->vm_start, 0);
+ ASSERT_EQ(vma->vm_end, 0x5000);
+ ASSERT_EQ(vma->vm_pgoff, 0);
+ ASSERT_EQ(vma->anon_vma, &dummy_anon_vma);
+ ASSERT_TRUE(vma_write_started(vma));
+ ASSERT_EQ(mm.map_count, 3);
+
+ /*
+ * Merge to NEXT VMA.
+ *
+ * 0123456789abc
+ * AAAAA *DD CC
+ */
+ vma_d->anon_vma = &dummy_anon_vma;
+ vma_d->vm_ops = &vm_ops; /* This should have no impact. */
+ vma = try_merge_new_vma(&mm, &vmg, 0x6000, 0x7000, 6, flags, &merged);
+ ASSERT_EQ(vma, vma_d);
+ /* Prepend. */
+ ASSERT_TRUE(merged);
+ ASSERT_EQ(vma->vm_start, 0x6000);
+ ASSERT_EQ(vma->vm_end, 0x9000);
+ ASSERT_EQ(vma->vm_pgoff, 6);
+ ASSERT_EQ(vma->anon_vma, &dummy_anon_vma);
+ ASSERT_TRUE(vma_write_started(vma));
+ ASSERT_EQ(mm.map_count, 3);
+
+ /*
+ * Merge BOTH sides.
+ *
+ * 0123456789abc
+ * AAAAA*DDD CC
+ */
+ vma_d->vm_ops = NULL; /* This would otherwise degrade the merge. */
+ vma = try_merge_new_vma(&mm, &vmg, 0x5000, 0x6000, 5, flags, &merged);
+ ASSERT_EQ(vma, vma_a);
+ /* Merge with A, delete D. */
+ ASSERT_TRUE(merged);
+ ASSERT_EQ(vma->vm_start, 0);
+ ASSERT_EQ(vma->vm_end, 0x9000);
+ ASSERT_EQ(vma->vm_pgoff, 0);
+ ASSERT_EQ(vma->anon_vma, &dummy_anon_vma);
+ ASSERT_TRUE(vma_write_started(vma));
+ ASSERT_EQ(mm.map_count, 2);
+
+ /*
+ * Merge to NEXT VMA.
+ *
+ * 0123456789abc
+ * AAAAAAAAA *CC
+ */
+ vma_c->anon_vma = &dummy_anon_vma;
+ vma = try_merge_new_vma(&mm, &vmg, 0xa000, 0xb000, 0xa, flags, &merged);
+ ASSERT_EQ(vma, vma_c);
+ /* Prepend C. */
+ ASSERT_TRUE(merged);
+ ASSERT_EQ(vma->vm_start, 0xa000);
+ ASSERT_EQ(vma->vm_end, 0xc000);
+ ASSERT_EQ(vma->vm_pgoff, 0xa);
+ ASSERT_EQ(vma->anon_vma, &dummy_anon_vma);
+ ASSERT_TRUE(vma_write_started(vma));
+ ASSERT_EQ(mm.map_count, 2);
+
+ /*
+ * Merge BOTH sides.
+ *
+ * 0123456789abc
+ * AAAAAAAAA*CCC
+ */
+ vma = try_merge_new_vma(&mm, &vmg, 0x9000, 0xa000, 0x9, flags, &merged);
+ ASSERT_EQ(vma, vma_a);
+ /* Extend A and delete C. */
+ ASSERT_TRUE(merged);
+ ASSERT_EQ(vma->vm_start, 0);
+ ASSERT_EQ(vma->vm_end, 0xc000);
+ ASSERT_EQ(vma->vm_pgoff, 0);
+ ASSERT_EQ(vma->anon_vma, &dummy_anon_vma);
+ ASSERT_TRUE(vma_write_started(vma));
+ ASSERT_EQ(mm.map_count, 1);
+
+ /*
+ * Final state.
+ *
+ * 0123456789abc
+ * AAAAAAAAAAAAA
+ */
+
+ count = 0;
+ vma_iter_set(&vmi, 0);
+ for_each_vma(vmi, vma) {
+ ASSERT_NE(vma, NULL);
+ ASSERT_EQ(vma->vm_start, 0);
+ ASSERT_EQ(vma->vm_end, 0xc000);
+ ASSERT_EQ(vma->vm_pgoff, 0);
+ ASSERT_EQ(vma->anon_vma, &dummy_anon_vma);
+
+ vm_area_free(vma);
+ count++;
+ }
+
+ /* Should only have one VMA left (though freed) after all is done.*/
+ ASSERT_EQ(count, 1);
+
+ mtree_destroy(&mm.mm_mt);
+ return true;
+}
+
+static bool test_vma_merge_special_flags(void)
+{
+ unsigned long flags = VM_READ | VM_WRITE | VM_MAYREAD | VM_MAYWRITE;
+ struct mm_struct mm = {};
+ VMA_ITERATOR(vmi, &mm, 0);
+ struct vma_merge_struct vmg = {
+ .mm = &mm,
+ .vmi = &vmi,
+ };
+ vm_flags_t special_flags[] = { VM_IO, VM_DONTEXPAND, VM_PFNMAP, VM_MIXEDMAP };
+ vm_flags_t all_special_flags = 0;
+ int i;
+ struct vm_area_struct *vma_left, *vma;
+
+ /* Make sure there aren't new VM_SPECIAL flags. */
+ for (i = 0; i < ARRAY_SIZE(special_flags); i++) {
+ all_special_flags |= special_flags[i];
+ }
+ ASSERT_EQ(all_special_flags, VM_SPECIAL);
+
+ /*
+ * 01234
+ * AAA
+ */
+ vma_left = alloc_and_link_vma(&mm, 0, 0x3000, 0, flags);
+ ASSERT_NE(vma_left, NULL);
+
+ /* 1. Set up new VMA with special flag that would otherwise merge. */
+
+ /*
+ * 01234
+ * AAA*
+ *
+ * This should merge if not for the VM_SPECIAL flag.
+ */
+ vmg_set_range(&vmg, 0x3000, 0x4000, 3, flags);
+ for (i = 0; i < ARRAY_SIZE(special_flags); i++) {
+ vm_flags_t special_flag = special_flags[i];
+
+ vma_left->__vm_flags = flags | special_flag;
+ vmg.flags = flags | special_flag;
+ vma = merge_new(&vmg);
+ ASSERT_EQ(vma, NULL);
+ ASSERT_EQ(vmg.state, VMA_MERGE_NOMERGE);
+ }
+
+ /* 2. Modify VMA with special flag that would otherwise merge. */
+
+ /*
+ * 01234
+ * AAAB
+ *
+ * Create a VMA to modify.
+ */
+ vma = alloc_and_link_vma(&mm, 0x3000, 0x4000, 3, flags);
+ ASSERT_NE(vma, NULL);
+ vmg.vma = vma;
+
+ for (i = 0; i < ARRAY_SIZE(special_flags); i++) {
+ vm_flags_t special_flag = special_flags[i];
+
+ vma_left->__vm_flags = flags | special_flag;
+ vmg.flags = flags | special_flag;
+ vma = merge_existing(&vmg);
+ ASSERT_EQ(vma, NULL);
+ ASSERT_EQ(vmg.state, VMA_MERGE_NOMERGE);
+ }
+
+ cleanup_mm(&mm, &vmi);
+ return true;
+}
+
+static bool test_vma_merge_with_close(void)
+{
+ unsigned long flags = VM_READ | VM_WRITE | VM_MAYREAD | VM_MAYWRITE;
+ struct mm_struct mm = {};
+ VMA_ITERATOR(vmi, &mm, 0);
+ struct vma_merge_struct vmg = {
+ .mm = &mm,
+ .vmi = &vmi,
+ };
+ const struct vm_operations_struct vm_ops = {
+ .close = dummy_close,
+ };
+ struct vm_area_struct *vma_prev, *vma_next, *vma;
+
+ /*
+ * When merging VMAs we are not permitted to remove any VMA that has a
+ * vm_ops->close() hook.
+ *
+ * Considering the two possible adjacent VMAs to which a VMA can be
+ * merged:
+ *
+ * [ prev ][ vma ][ next ]
+ *
+ * In no case will we need to delete prev. If the operation is
+ * mergeable, then prev will be extended with one or both of vma and
+ * next deleted.
+ *
+ * As a result, during initial mergeability checks, only
+ * can_vma_merge_before() (which implies the VMA being merged with is
+ * 'next' as shown above) bothers to check to see whether the next VMA
+ * has a vm_ops->close() callback that will need to be called when
+ * removed.
+ *
+ * If it does, then we cannot merge as the resources that the close()
+ * operation potentially clears down are tied only to the existing VMA
+ * range and we have no way of extending those to the nearly merged one.
+ *
+ * We must consider two scenarios:
+ *
+ * A.
+ *
+ * vm_ops->close: - - !NULL
+ * [ prev ][ vma ][ next ]
+ *
+ * Where prev may or may not be present/mergeable.
+ *
+ * This is picked up by a specific check in can_vma_merge_before().
+ *
+ * B.
+ *
+ * vm_ops->close: - !NULL
+ * [ prev ][ vma ]
+ *
+ * Where prev and vma are present and mergeable.
+ *
+ * This is picked up by a specific check in the modified VMA merge.
+ *
+ * IMPORTANT NOTE: We make the assumption that the following case:
+ *
+ * - !NULL NULL
+ * [ prev ][ vma ][ next ]
+ *
+ * Cannot occur, because vma->vm_ops being the same implies the same
+ * vma->vm_file, and therefore this would mean that next->vm_ops->close
+ * would be set too, and thus scenario A would pick this up.
+ */
+
+ /*
+ * The only case of a new VMA merge that results in a VMA being deleted
+ * is one where both the previous and next VMAs are merged - in this
+ * instance the next VMA is deleted, and the previous VMA is extended.
+ *
+ * If we are unable to do so, we reduce the operation to simply
+ * extending the prev VMA and not merging next.
+ *
+ * 0123456789
+ * PPP**NNNN
+ * ->
+ * 0123456789
+ * PPPPPPNNN
+ */
+
+ vma_prev = alloc_and_link_vma(&mm, 0, 0x3000, 0, flags);
+ vma_next = alloc_and_link_vma(&mm, 0x5000, 0x9000, 5, flags);
+ vma_next->vm_ops = &vm_ops;
+
+ vmg_set_range(&vmg, 0x3000, 0x5000, 3, flags);
+ ASSERT_EQ(merge_new(&vmg), vma_prev);
+ ASSERT_EQ(vmg.state, VMA_MERGE_SUCCESS);
+ ASSERT_EQ(vma_prev->vm_start, 0);
+ ASSERT_EQ(vma_prev->vm_end, 0x5000);
+ ASSERT_EQ(vma_prev->vm_pgoff, 0);
+
+ ASSERT_EQ(cleanup_mm(&mm, &vmi), 2);
+
+ /*
+ * When modifying an existing VMA there are further cases where we
+ * delete VMAs.
+ *
+ * <>
+ * 0123456789
+ * PPPVV
+ *
+ * In this instance, if vma has a close hook, the merge simply cannot
+ * proceed.
+ */
+
+ vma_prev = alloc_and_link_vma(&mm, 0, 0x3000, 0, flags);
+ vma = alloc_and_link_vma(&mm, 0x3000, 0x5000, 3, flags);
+ vma->vm_ops = &vm_ops;
+
+ vmg_set_range(&vmg, 0x3000, 0x5000, 3, flags);
+ vmg.prev = vma_prev;
+ vmg.vma = vma;
+
+ /*
+ * The VMA being modified in a way that would otherwise merge should
+ * also fail.
+ */
+ ASSERT_EQ(merge_existing(&vmg), NULL);
+ ASSERT_EQ(vmg.state, VMA_MERGE_NOMERGE);
+
+ ASSERT_EQ(cleanup_mm(&mm, &vmi), 2);
+
+ /*
+ * This case is mirrored if merging with next.
+ *
+ * <>
+ * 0123456789
+ * VVNNNN
+ *
+ * In this instance, if vma has a close hook, the merge simply cannot
+ * proceed.
+ */
+
+ vma = alloc_and_link_vma(&mm, 0x3000, 0x5000, 3, flags);
+ vma_next = alloc_and_link_vma(&mm, 0x5000, 0x9000, 5, flags);
+ vma->vm_ops = &vm_ops;
+
+ vmg_set_range(&vmg, 0x3000, 0x5000, 3, flags);
+ vmg.vma = vma;
+ ASSERT_EQ(merge_existing(&vmg), NULL);
+ /*
+ * Initially this is misapprehended as an out of memory report, as the
+ * close() check is handled in the same way as anon_vma duplication
+ * failures, however a subsequent patch resolves this.
+ */
+ ASSERT_EQ(vmg.state, VMA_MERGE_NOMERGE);
+
+ ASSERT_EQ(cleanup_mm(&mm, &vmi), 2);
+
+ /*
+ * Finally, we consider two variants of the case where we modify a VMA
+ * to merge with both the previous and next VMAs.
+ *
+ * The first variant is where vma has a close hook. In this instance, no
+ * merge can proceed.
+ *
+ * <>
+ * 0123456789
+ * PPPVVNNNN
+ */
+
+ vma_prev = alloc_and_link_vma(&mm, 0, 0x3000, 0, flags);
+ vma = alloc_and_link_vma(&mm, 0x3000, 0x5000, 3, flags);
+ vma_next = alloc_and_link_vma(&mm, 0x5000, 0x9000, 5, flags);
+ vma->vm_ops = &vm_ops;
+
+ vmg_set_range(&vmg, 0x3000, 0x5000, 3, flags);
+ vmg.prev = vma_prev;
+ vmg.vma = vma;
+
+ ASSERT_EQ(merge_existing(&vmg), NULL);
+ ASSERT_EQ(vmg.state, VMA_MERGE_NOMERGE);
+
+ ASSERT_EQ(cleanup_mm(&mm, &vmi), 3);
+
+ /*
+ * The second variant is where next has a close hook. In this instance,
+ * we reduce the operation to a merge between prev and vma.
+ *
+ * <>
+ * 0123456789
+ * PPPVVNNNN
+ * ->
+ * 0123456789
+ * PPPPPNNNN
+ */
+
+ vma_prev = alloc_and_link_vma(&mm, 0, 0x3000, 0, flags);
+ vma = alloc_and_link_vma(&mm, 0x3000, 0x5000, 3, flags);
+ vma_next = alloc_and_link_vma(&mm, 0x5000, 0x9000, 5, flags);
+ vma_next->vm_ops = &vm_ops;
+
+ vmg_set_range(&vmg, 0x3000, 0x5000, 3, flags);
+ vmg.prev = vma_prev;
+ vmg.vma = vma;
+
+ ASSERT_EQ(merge_existing(&vmg), vma_prev);
+ ASSERT_EQ(vmg.state, VMA_MERGE_SUCCESS);
+ ASSERT_EQ(vma_prev->vm_start, 0);
+ ASSERT_EQ(vma_prev->vm_end, 0x5000);
+ ASSERT_EQ(vma_prev->vm_pgoff, 0);
+
+ ASSERT_EQ(cleanup_mm(&mm, &vmi), 2);
+
+ return true;
+}
+
+static bool test_vma_merge_new_with_close(void)
+{
+ unsigned long flags = VM_READ | VM_WRITE | VM_MAYREAD | VM_MAYWRITE;
+ struct mm_struct mm = {};
+ VMA_ITERATOR(vmi, &mm, 0);
+ struct vma_merge_struct vmg = {
+ .mm = &mm,
+ .vmi = &vmi,
+ };
+ struct vm_area_struct *vma_prev = alloc_and_link_vma(&mm, 0, 0x2000, 0, flags);
+ struct vm_area_struct *vma_next = alloc_and_link_vma(&mm, 0x5000, 0x7000, 5, flags);
+ const struct vm_operations_struct vm_ops = {
+ .close = dummy_close,
+ };
+ struct vm_area_struct *vma;
+
+ /*
+ * We should allow the partial merge of a proposed new VMA if the
+ * surrounding VMAs have vm_ops->close() hooks (but are otherwise
+ * compatible), e.g.:
+ *
+ * New VMA
+ * A v-------v B
+ * |-----| |-----|
+ * close close
+ *
+ * Since the rule is to not DELETE a VMA with a close operation, this
+ * should be permitted, only rather than expanding A and deleting B, we
+ * should simply expand A and leave B intact, e.g.:
+ *
+ * New VMA
+ * A B
+ * |------------||-----|
+ * close close
+ */
+
+ /* Have prev and next have a vm_ops->close() hook. */
+ vma_prev->vm_ops = &vm_ops;
+ vma_next->vm_ops = &vm_ops;
+
+ vmg_set_range(&vmg, 0x2000, 0x5000, 2, flags);
+ vma = merge_new(&vmg);
+ ASSERT_NE(vma, NULL);
+ ASSERT_EQ(vmg.state, VMA_MERGE_SUCCESS);
+ ASSERT_EQ(vma->vm_start, 0);
+ ASSERT_EQ(vma->vm_end, 0x5000);
+ ASSERT_EQ(vma->vm_pgoff, 0);
+ ASSERT_EQ(vma->vm_ops, &vm_ops);
+ ASSERT_TRUE(vma_write_started(vma));
+ ASSERT_EQ(mm.map_count, 2);
+
+ cleanup_mm(&mm, &vmi);
+ return true;
+}
+
+static bool test_merge_existing(void)
+{
+ unsigned long flags = VM_READ | VM_WRITE | VM_MAYREAD | VM_MAYWRITE;
+ struct mm_struct mm = {};
+ VMA_ITERATOR(vmi, &mm, 0);
+ struct vm_area_struct *vma, *vma_prev, *vma_next;
+ struct vma_merge_struct vmg = {
+ .mm = &mm,
+ .vmi = &vmi,
+ };
+ const struct vm_operations_struct vm_ops = {
+ .close = dummy_close,
+ };
+
+ /*
+ * Merge right case - partial span.
+ *
+ * <->
+ * 0123456789
+ * VVVVNNN
+ * ->
+ * 0123456789
+ * VNNNNNN
+ */
+ vma = alloc_and_link_vma(&mm, 0x2000, 0x6000, 2, flags);
+ vma->vm_ops = &vm_ops; /* This should have no impact. */
+ vma_next = alloc_and_link_vma(&mm, 0x6000, 0x9000, 6, flags);
+ vma_next->vm_ops = &vm_ops; /* This should have no impact. */
+ vmg_set_range(&vmg, 0x3000, 0x6000, 3, flags);
+ vmg.vma = vma;
+ vmg.prev = vma;
+ vma->anon_vma = &dummy_anon_vma;
+ ASSERT_EQ(merge_existing(&vmg), vma_next);
+ ASSERT_EQ(vmg.state, VMA_MERGE_SUCCESS);
+ ASSERT_EQ(vma_next->vm_start, 0x3000);
+ ASSERT_EQ(vma_next->vm_end, 0x9000);
+ ASSERT_EQ(vma_next->vm_pgoff, 3);
+ ASSERT_EQ(vma_next->anon_vma, &dummy_anon_vma);
+ ASSERT_EQ(vma->vm_start, 0x2000);
+ ASSERT_EQ(vma->vm_end, 0x3000);
+ ASSERT_EQ(vma->vm_pgoff, 2);
+ ASSERT_TRUE(vma_write_started(vma));
+ ASSERT_TRUE(vma_write_started(vma_next));
+ ASSERT_EQ(mm.map_count, 2);
+
+ /* Clear down and reset. */
+ ASSERT_EQ(cleanup_mm(&mm, &vmi), 2);
+
+ /*
+ * Merge right case - full span.
+ *
+ * <-->
+ * 0123456789
+ * VVVVNNN
+ * ->
+ * 0123456789
+ * NNNNNNN
+ */
+ vma = alloc_and_link_vma(&mm, 0x2000, 0x6000, 2, flags);
+ vma_next = alloc_and_link_vma(&mm, 0x6000, 0x9000, 6, flags);
+ vma_next->vm_ops = &vm_ops; /* This should have no impact. */
+ vmg_set_range(&vmg, 0x2000, 0x6000, 2, flags);
+ vmg.vma = vma;
+ vma->anon_vma = &dummy_anon_vma;
+ ASSERT_EQ(merge_existing(&vmg), vma_next);
+ ASSERT_EQ(vmg.state, VMA_MERGE_SUCCESS);
+ ASSERT_EQ(vma_next->vm_start, 0x2000);
+ ASSERT_EQ(vma_next->vm_end, 0x9000);
+ ASSERT_EQ(vma_next->vm_pgoff, 2);
+ ASSERT_EQ(vma_next->anon_vma, &dummy_anon_vma);
+ ASSERT_TRUE(vma_write_started(vma_next));
+ ASSERT_EQ(mm.map_count, 1);
+
+ /* Clear down and reset. We should have deleted vma. */
+ ASSERT_EQ(cleanup_mm(&mm, &vmi), 1);
+
+ /*
+ * Merge left case - partial span.
+ *
+ * <->
+ * 0123456789
+ * PPPVVVV
+ * ->
+ * 0123456789
+ * PPPPPPV
+ */
+ vma_prev = alloc_and_link_vma(&mm, 0, 0x3000, 0, flags);
+ vma_prev->vm_ops = &vm_ops; /* This should have no impact. */
+ vma = alloc_and_link_vma(&mm, 0x3000, 0x7000, 3, flags);
+ vma->vm_ops = &vm_ops; /* This should have no impact. */
+ vmg_set_range(&vmg, 0x3000, 0x6000, 3, flags);
+ vmg.prev = vma_prev;
+ vmg.vma = vma;
+ vma->anon_vma = &dummy_anon_vma;
+
+ ASSERT_EQ(merge_existing(&vmg), vma_prev);
+ ASSERT_EQ(vmg.state, VMA_MERGE_SUCCESS);
+ ASSERT_EQ(vma_prev->vm_start, 0);
+ ASSERT_EQ(vma_prev->vm_end, 0x6000);
+ ASSERT_EQ(vma_prev->vm_pgoff, 0);
+ ASSERT_EQ(vma_prev->anon_vma, &dummy_anon_vma);
+ ASSERT_EQ(vma->vm_start, 0x6000);
+ ASSERT_EQ(vma->vm_end, 0x7000);
+ ASSERT_EQ(vma->vm_pgoff, 6);
+ ASSERT_TRUE(vma_write_started(vma_prev));
+ ASSERT_TRUE(vma_write_started(vma));
+ ASSERT_EQ(mm.map_count, 2);
+
+ /* Clear down and reset. */
+ ASSERT_EQ(cleanup_mm(&mm, &vmi), 2);
+
+ /*
+ * Merge left case - full span.
+ *
+ * <-->
+ * 0123456789
+ * PPPVVVV
+ * ->
+ * 0123456789
+ * PPPPPPP
+ */
+ vma_prev = alloc_and_link_vma(&mm, 0, 0x3000, 0, flags);
+ vma_prev->vm_ops = &vm_ops; /* This should have no impact. */
+ vma = alloc_and_link_vma(&mm, 0x3000, 0x7000, 3, flags);
+ vmg_set_range(&vmg, 0x3000, 0x7000, 3, flags);
+ vmg.prev = vma_prev;
+ vmg.vma = vma;
+ vma->anon_vma = &dummy_anon_vma;
+ ASSERT_EQ(merge_existing(&vmg), vma_prev);
+ ASSERT_EQ(vmg.state, VMA_MERGE_SUCCESS);
+ ASSERT_EQ(vma_prev->vm_start, 0);
+ ASSERT_EQ(vma_prev->vm_end, 0x7000);
+ ASSERT_EQ(vma_prev->vm_pgoff, 0);
+ ASSERT_EQ(vma_prev->anon_vma, &dummy_anon_vma);
+ ASSERT_TRUE(vma_write_started(vma_prev));
+ ASSERT_EQ(mm.map_count, 1);
+
+ /* Clear down and reset. We should have deleted vma. */
+ ASSERT_EQ(cleanup_mm(&mm, &vmi), 1);
+
+ /*
+ * Merge both case.
+ *
+ * <-->
+ * 0123456789
+ * PPPVVVVNNN
+ * ->
+ * 0123456789
+ * PPPPPPPPPP
+ */
+ vma_prev = alloc_and_link_vma(&mm, 0, 0x3000, 0, flags);
+ vma_prev->vm_ops = &vm_ops; /* This should have no impact. */
+ vma = alloc_and_link_vma(&mm, 0x3000, 0x7000, 3, flags);
+ vma_next = alloc_and_link_vma(&mm, 0x7000, 0x9000, 7, flags);
+ vmg_set_range(&vmg, 0x3000, 0x7000, 3, flags);
+ vmg.prev = vma_prev;
+ vmg.vma = vma;
+ vma->anon_vma = &dummy_anon_vma;
+ ASSERT_EQ(merge_existing(&vmg), vma_prev);
+ ASSERT_EQ(vmg.state, VMA_MERGE_SUCCESS);
+ ASSERT_EQ(vma_prev->vm_start, 0);
+ ASSERT_EQ(vma_prev->vm_end, 0x9000);
+ ASSERT_EQ(vma_prev->vm_pgoff, 0);
+ ASSERT_EQ(vma_prev->anon_vma, &dummy_anon_vma);
+ ASSERT_TRUE(vma_write_started(vma_prev));
+ ASSERT_EQ(mm.map_count, 1);
+
+ /* Clear down and reset. We should have deleted prev and next. */
+ ASSERT_EQ(cleanup_mm(&mm, &vmi), 1);
+
+ /*
+ * Non-merge ranges. the modified VMA merge operation assumes that the
+ * caller always specifies ranges within the input VMA so we need only
+ * examine these cases.
+ *
+ * -
+ * -
+ * -
+ * <->
+ * <>
+ * <>
+ * 0123456789a
+ * PPPVVVVVNNN
+ */
+
+ vma_prev = alloc_and_link_vma(&mm, 0, 0x3000, 0, flags);
+ vma = alloc_and_link_vma(&mm, 0x3000, 0x8000, 3, flags);
+ vma_next = alloc_and_link_vma(&mm, 0x8000, 0xa000, 8, flags);
+
+ vmg_set_range(&vmg, 0x4000, 0x5000, 4, flags);
+ vmg.prev = vma;
+ vmg.vma = vma;
+ ASSERT_EQ(merge_existing(&vmg), NULL);
+ ASSERT_EQ(vmg.state, VMA_MERGE_NOMERGE);
+
+ vmg_set_range(&vmg, 0x5000, 0x6000, 5, flags);
+ vmg.prev = vma;
+ vmg.vma = vma;
+ ASSERT_EQ(merge_existing(&vmg), NULL);
+ ASSERT_EQ(vmg.state, VMA_MERGE_NOMERGE);
+
+ vmg_set_range(&vmg, 0x6000, 0x7000, 6, flags);
+ vmg.prev = vma;
+ vmg.vma = vma;
+ ASSERT_EQ(merge_existing(&vmg), NULL);
+ ASSERT_EQ(vmg.state, VMA_MERGE_NOMERGE);
+
+ vmg_set_range(&vmg, 0x4000, 0x7000, 4, flags);
+ vmg.prev = vma;
+ vmg.vma = vma;
+ ASSERT_EQ(merge_existing(&vmg), NULL);
+ ASSERT_EQ(vmg.state, VMA_MERGE_NOMERGE);
+
+ vmg_set_range(&vmg, 0x4000, 0x6000, 4, flags);
+ vmg.prev = vma;
+ vmg.vma = vma;
+ ASSERT_EQ(merge_existing(&vmg), NULL);
+ ASSERT_EQ(vmg.state, VMA_MERGE_NOMERGE);
+
+ vmg_set_range(&vmg, 0x5000, 0x6000, 5, flags);
+ vmg.prev = vma;
+ vmg.vma = vma;
+ ASSERT_EQ(merge_existing(&vmg), NULL);
+ ASSERT_EQ(vmg.state, VMA_MERGE_NOMERGE);
+
+ ASSERT_EQ(cleanup_mm(&mm, &vmi), 3);
+
+ return true;
+}
+
+static bool test_anon_vma_non_mergeable(void)
+{
+ unsigned long flags = VM_READ | VM_WRITE | VM_MAYREAD | VM_MAYWRITE;
+ struct mm_struct mm = {};
+ VMA_ITERATOR(vmi, &mm, 0);
+ struct vm_area_struct *vma, *vma_prev, *vma_next;
+ struct vma_merge_struct vmg = {
+ .mm = &mm,
+ .vmi = &vmi,
+ };
+ struct anon_vma_chain dummy_anon_vma_chain1 = {
+ .anon_vma = &dummy_anon_vma,
+ };
+ struct anon_vma_chain dummy_anon_vma_chain2 = {
+ .anon_vma = &dummy_anon_vma,
+ };
+
+ /*
+ * In the case of modified VMA merge, merging both left and right VMAs
+ * but where prev and next have incompatible anon_vma objects, we revert
+ * to a merge of prev and VMA:
+ *
+ * <-->
+ * 0123456789
+ * PPPVVVVNNN
+ * ->
+ * 0123456789
+ * PPPPPPPNNN
+ */
+ vma_prev = alloc_and_link_vma(&mm, 0, 0x3000, 0, flags);
+ vma = alloc_and_link_vma(&mm, 0x3000, 0x7000, 3, flags);
+ vma_next = alloc_and_link_vma(&mm, 0x7000, 0x9000, 7, flags);
+
+ /*
+ * Give both prev and next single anon_vma_chain fields, so they will
+ * merge with the NULL vmg->anon_vma.
+ *
+ * However, when prev is compared to next, the merge should fail.
+ */
+
+ INIT_LIST_HEAD(&vma_prev->anon_vma_chain);
+ list_add(&dummy_anon_vma_chain1.same_vma, &vma_prev->anon_vma_chain);
+ ASSERT_TRUE(list_is_singular(&vma_prev->anon_vma_chain));
+ vma_prev->anon_vma = &dummy_anon_vma;
+ ASSERT_TRUE(is_mergeable_anon_vma(NULL, vma_prev->anon_vma, vma_prev));
+
+ INIT_LIST_HEAD(&vma_next->anon_vma_chain);
+ list_add(&dummy_anon_vma_chain2.same_vma, &vma_next->anon_vma_chain);
+ ASSERT_TRUE(list_is_singular(&vma_next->anon_vma_chain));
+ vma_next->anon_vma = (struct anon_vma *)2;
+ ASSERT_TRUE(is_mergeable_anon_vma(NULL, vma_next->anon_vma, vma_next));
+
+ ASSERT_FALSE(is_mergeable_anon_vma(vma_prev->anon_vma, vma_next->anon_vma, NULL));
+
+ vmg_set_range(&vmg, 0x3000, 0x7000, 3, flags);
+ vmg.prev = vma_prev;
+ vmg.vma = vma;
+
+ ASSERT_EQ(merge_existing(&vmg), vma_prev);
+ ASSERT_EQ(vmg.state, VMA_MERGE_SUCCESS);
+ ASSERT_EQ(vma_prev->vm_start, 0);
+ ASSERT_EQ(vma_prev->vm_end, 0x7000);
+ ASSERT_EQ(vma_prev->vm_pgoff, 0);
+ ASSERT_TRUE(vma_write_started(vma_prev));
+ ASSERT_FALSE(vma_write_started(vma_next));
+
+ /* Clear down and reset. */
+ ASSERT_EQ(cleanup_mm(&mm, &vmi), 2);
+
+ /*
+ * Now consider the new VMA case. This is equivalent, only adding a new
+ * VMA in a gap between prev and next.
+ *
+ * <-->
+ * 0123456789
+ * PPP****NNN
+ * ->
+ * 0123456789
+ * PPPPPPPNNN
+ */
+ vma_prev = alloc_and_link_vma(&mm, 0, 0x3000, 0, flags);
+ vma_next = alloc_and_link_vma(&mm, 0x7000, 0x9000, 7, flags);
+
+ INIT_LIST_HEAD(&vma_prev->anon_vma_chain);
+ list_add(&dummy_anon_vma_chain1.same_vma, &vma_prev->anon_vma_chain);
+ vma_prev->anon_vma = (struct anon_vma *)1;
+
+ INIT_LIST_HEAD(&vma_next->anon_vma_chain);
+ list_add(&dummy_anon_vma_chain2.same_vma, &vma_next->anon_vma_chain);
+ vma_next->anon_vma = (struct anon_vma *)2;
+
+ vmg_set_range(&vmg, 0x3000, 0x7000, 3, flags);
+ vmg.prev = vma_prev;
+
+ ASSERT_EQ(merge_new(&vmg), vma_prev);
+ ASSERT_EQ(vmg.state, VMA_MERGE_SUCCESS);
+ ASSERT_EQ(vma_prev->vm_start, 0);
+ ASSERT_EQ(vma_prev->vm_end, 0x7000);
+ ASSERT_EQ(vma_prev->vm_pgoff, 0);
+ ASSERT_TRUE(vma_write_started(vma_prev));
+ ASSERT_FALSE(vma_write_started(vma_next));
+
+ /* Final cleanup. */
+ ASSERT_EQ(cleanup_mm(&mm, &vmi), 2);
+
+ return true;
+}
+
+static bool test_dup_anon_vma(void)
+{
+ unsigned long flags = VM_READ | VM_WRITE | VM_MAYREAD | VM_MAYWRITE;
+ struct mm_struct mm = {};
+ VMA_ITERATOR(vmi, &mm, 0);
+ struct vma_merge_struct vmg = {
+ .mm = &mm,
+ .vmi = &vmi,
+ };
+ struct anon_vma_chain dummy_anon_vma_chain = {
+ .anon_vma = &dummy_anon_vma,
+ };
+ struct vm_area_struct *vma_prev, *vma_next, *vma;
+
+ reset_dummy_anon_vma();
+
+ /*
+ * Expanding a VMA delete the next one duplicates next's anon_vma and
+ * assigns it to the expanded VMA.
+ *
+ * This covers new VMA merging, as these operations amount to a VMA
+ * expand.
+ */
+ vma_prev = alloc_and_link_vma(&mm, 0, 0x3000, 0, flags);
+ vma_next = alloc_and_link_vma(&mm, 0x3000, 0x5000, 3, flags);
+ vma_next->anon_vma = &dummy_anon_vma;
+
+ vmg_set_range(&vmg, 0, 0x5000, 0, flags);
+ vmg.vma = vma_prev;
+ vmg.next = vma_next;
+
+ ASSERT_EQ(expand_existing(&vmg), 0);
+
+ /* Will have been cloned. */
+ ASSERT_EQ(vma_prev->anon_vma, &dummy_anon_vma);
+ ASSERT_TRUE(vma_prev->anon_vma->was_cloned);
+
+ /* Cleanup ready for next run. */
+ cleanup_mm(&mm, &vmi);
+
+ /*
+ * next has anon_vma, we assign to prev.
+ *
+ * |<----->|
+ * |-------*********-------|
+ * prev vma next
+ * extend delete delete
+ */
+
+ vma_prev = alloc_and_link_vma(&mm, 0, 0x3000, 0, flags);
+ vma = alloc_and_link_vma(&mm, 0x3000, 0x5000, 3, flags);
+ vma_next = alloc_and_link_vma(&mm, 0x5000, 0x8000, 5, flags);
+
+ /* Initialise avc so mergeability check passes. */
+ INIT_LIST_HEAD(&vma_next->anon_vma_chain);
+ list_add(&dummy_anon_vma_chain.same_vma, &vma_next->anon_vma_chain);
+
+ vma_next->anon_vma = &dummy_anon_vma;
+ vmg_set_range(&vmg, 0x3000, 0x5000, 3, flags);
+ vmg.prev = vma_prev;
+ vmg.vma = vma;
+
+ ASSERT_EQ(merge_existing(&vmg), vma_prev);
+ ASSERT_EQ(vmg.state, VMA_MERGE_SUCCESS);
+
+ ASSERT_EQ(vma_prev->vm_start, 0);
+ ASSERT_EQ(vma_prev->vm_end, 0x8000);
+
+ ASSERT_EQ(vma_prev->anon_vma, &dummy_anon_vma);
+ ASSERT_TRUE(vma_prev->anon_vma->was_cloned);
+
+ cleanup_mm(&mm, &vmi);
+
+ /*
+ * vma has anon_vma, we assign to prev.
+ *
+ * |<----->|
+ * |-------*********-------|
+ * prev vma next
+ * extend delete delete
+ */
+
+ vma_prev = alloc_and_link_vma(&mm, 0, 0x3000, 0, flags);
+ vma = alloc_and_link_vma(&mm, 0x3000, 0x5000, 3, flags);
+ vma_next = alloc_and_link_vma(&mm, 0x5000, 0x8000, 5, flags);
+
+ vma->anon_vma = &dummy_anon_vma;
+ vmg_set_range(&vmg, 0x3000, 0x5000, 3, flags);
+ vmg.prev = vma_prev;
+ vmg.vma = vma;
+
+ ASSERT_EQ(merge_existing(&vmg), vma_prev);
+ ASSERT_EQ(vmg.state, VMA_MERGE_SUCCESS);
+
+ ASSERT_EQ(vma_prev->vm_start, 0);
+ ASSERT_EQ(vma_prev->vm_end, 0x8000);
+
+ ASSERT_EQ(vma_prev->anon_vma, &dummy_anon_vma);
+ ASSERT_TRUE(vma_prev->anon_vma->was_cloned);
+
+ cleanup_mm(&mm, &vmi);
+
+ /*
+ * vma has anon_vma, we assign to prev.
+ *
+ * |<----->|
+ * |-------*************
+ * prev vma
+ * extend shrink/delete
+ */
+
+ vma_prev = alloc_and_link_vma(&mm, 0, 0x3000, 0, flags);
+ vma = alloc_and_link_vma(&mm, 0x3000, 0x8000, 3, flags);
+
+ vma->anon_vma = &dummy_anon_vma;
+ vmg_set_range(&vmg, 0x3000, 0x5000, 3, flags);
+ vmg.prev = vma_prev;
+ vmg.vma = vma;
+
+ ASSERT_EQ(merge_existing(&vmg), vma_prev);
+ ASSERT_EQ(vmg.state, VMA_MERGE_SUCCESS);
+
+ ASSERT_EQ(vma_prev->vm_start, 0);
+ ASSERT_EQ(vma_prev->vm_end, 0x5000);
+
+ ASSERT_EQ(vma_prev->anon_vma, &dummy_anon_vma);
+ ASSERT_TRUE(vma_prev->anon_vma->was_cloned);
+
+ cleanup_mm(&mm, &vmi);
+
+ /*
+ * vma has anon_vma, we assign to next.
+ *
+ * |<----->|
+ * *************-------|
+ * vma next
+ * shrink/delete extend
+ */
+
+ vma = alloc_and_link_vma(&mm, 0, 0x5000, 0, flags);
+ vma_next = alloc_and_link_vma(&mm, 0x5000, 0x8000, 5, flags);
+
+ vma->anon_vma = &dummy_anon_vma;
+ vmg_set_range(&vmg, 0x3000, 0x5000, 3, flags);
+ vmg.prev = vma;
+ vmg.vma = vma;
+
+ ASSERT_EQ(merge_existing(&vmg), vma_next);
+ ASSERT_EQ(vmg.state, VMA_MERGE_SUCCESS);
+
+ ASSERT_EQ(vma_next->vm_start, 0x3000);
+ ASSERT_EQ(vma_next->vm_end, 0x8000);
+
+ ASSERT_EQ(vma_next->anon_vma, &dummy_anon_vma);
+ ASSERT_TRUE(vma_next->anon_vma->was_cloned);
+
+ cleanup_mm(&mm, &vmi);
+ return true;
+}
+
+static bool test_vmi_prealloc_fail(void)
+{
+ unsigned long flags = VM_READ | VM_WRITE | VM_MAYREAD | VM_MAYWRITE;
+ struct mm_struct mm = {};
+ VMA_ITERATOR(vmi, &mm, 0);
+ struct vma_merge_struct vmg = {
+ .mm = &mm,
+ .vmi = &vmi,
+ };
+ struct vm_area_struct *vma_prev, *vma;
+
+ /*
+ * We are merging vma into prev, with vma possessing an anon_vma, which
+ * will be duplicated. We cause the vmi preallocation to fail and assert
+ * the duplicated anon_vma is unlinked.
+ */
+
+ vma_prev = alloc_and_link_vma(&mm, 0, 0x3000, 0, flags);
+ vma = alloc_and_link_vma(&mm, 0x3000, 0x5000, 3, flags);
+ vma->anon_vma = &dummy_anon_vma;
+
+ vmg_set_range(&vmg, 0x3000, 0x5000, 3, flags);
+ vmg.prev = vma_prev;
+ vmg.vma = vma;
+
+ fail_prealloc = true;
+
+ /* This will cause the merge to fail. */
+ ASSERT_EQ(merge_existing(&vmg), NULL);
+ ASSERT_EQ(vmg.state, VMA_MERGE_ERROR_NOMEM);
+ /* We will already have assigned the anon_vma. */
+ ASSERT_EQ(vma_prev->anon_vma, &dummy_anon_vma);
+ /* And it was both cloned and unlinked. */
+ ASSERT_TRUE(dummy_anon_vma.was_cloned);
+ ASSERT_TRUE(dummy_anon_vma.was_unlinked);
+
+ cleanup_mm(&mm, &vmi); /* Resets fail_prealloc too. */
+
+ /*
+ * We repeat the same operation for expanding a VMA, which is what new
+ * VMA merging ultimately uses too. This asserts that unlinking is
+ * performed in this case too.
+ */
+
+ vma_prev = alloc_and_link_vma(&mm, 0, 0x3000, 0, flags);
+ vma = alloc_and_link_vma(&mm, 0x3000, 0x5000, 3, flags);
+ vma->anon_vma = &dummy_anon_vma;
+
+ vmg_set_range(&vmg, 0, 0x5000, 3, flags);
+ vmg.vma = vma_prev;
+ vmg.next = vma;
+
+ fail_prealloc = true;
+ ASSERT_EQ(expand_existing(&vmg), -ENOMEM);
+ ASSERT_EQ(vmg.state, VMA_MERGE_ERROR_NOMEM);
+
+ ASSERT_EQ(vma_prev->anon_vma, &dummy_anon_vma);
+ ASSERT_TRUE(dummy_anon_vma.was_cloned);
+ ASSERT_TRUE(dummy_anon_vma.was_unlinked);
+
+ cleanup_mm(&mm, &vmi);
+ return true;
+}
+
+static bool test_merge_extend(void)
+{
+ unsigned long flags = VM_READ | VM_WRITE | VM_MAYREAD | VM_MAYWRITE;
+ struct mm_struct mm = {};
+ VMA_ITERATOR(vmi, &mm, 0x1000);
+ struct vm_area_struct *vma;
+
+ vma = alloc_and_link_vma(&mm, 0, 0x1000, 0, flags);
+ alloc_and_link_vma(&mm, 0x3000, 0x4000, 3, flags);
+
+ /*
+ * Extend a VMA into the gap between itself and the following VMA.
+ * This should result in a merge.
+ *
+ * <->
+ * * *
+ *
+ */
+
+ ASSERT_EQ(vma_merge_extend(&vmi, vma, 0x2000), vma);
+ ASSERT_EQ(vma->vm_start, 0);
+ ASSERT_EQ(vma->vm_end, 0x4000);
+ ASSERT_EQ(vma->vm_pgoff, 0);
+ ASSERT_TRUE(vma_write_started(vma));
+ ASSERT_EQ(mm.map_count, 1);
+
+ cleanup_mm(&mm, &vmi);
+ return true;
+}
+
+static bool test_copy_vma(void)
+{
+ unsigned long flags = VM_READ | VM_WRITE | VM_MAYREAD | VM_MAYWRITE;
+ struct mm_struct mm = {};
+ bool need_locks = false;
+ VMA_ITERATOR(vmi, &mm, 0);
+ struct vm_area_struct *vma, *vma_new, *vma_next;
+
+ /* Move backwards and do not merge. */
+
+ vma = alloc_and_link_vma(&mm, 0x3000, 0x5000, 3, flags);
+ vma_new = copy_vma(&vma, 0, 0x2000, 0, &need_locks);
+
+ ASSERT_NE(vma_new, vma);
+ ASSERT_EQ(vma_new->vm_start, 0);
+ ASSERT_EQ(vma_new->vm_end, 0x2000);
+ ASSERT_EQ(vma_new->vm_pgoff, 0);
+
+ cleanup_mm(&mm, &vmi);
+
+ /* Move a VMA into position next to another and merge the two. */
+
+ vma = alloc_and_link_vma(&mm, 0, 0x2000, 0, flags);
+ vma_next = alloc_and_link_vma(&mm, 0x6000, 0x8000, 6, flags);
+ vma_new = copy_vma(&vma, 0x4000, 0x2000, 4, &need_locks);
+
+ ASSERT_EQ(vma_new, vma_next);
+
+ cleanup_mm(&mm, &vmi);
+ return true;
+}
+
+int main(void)
+{
+ int num_tests = 0, num_fail = 0;
+
+ maple_tree_init();
+
+#define TEST(name) \
+ do { \
+ num_tests++; \
+ if (!test_##name()) { \
+ num_fail++; \
+ fprintf(stderr, "Test " #name " FAILED\n"); \
+ } \
+ } while (0)
+
+ /* Very simple tests to kick the tyres. */
+ TEST(simple_merge);
+ TEST(simple_modify);
+ TEST(simple_expand);
+ TEST(simple_shrink);
+
+ TEST(merge_new);
+ TEST(vma_merge_special_flags);
+ TEST(vma_merge_with_close);
+ TEST(vma_merge_new_with_close);
+ TEST(merge_existing);
+ TEST(anon_vma_non_mergeable);
+ TEST(dup_anon_vma);
+ TEST(vmi_prealloc_fail);
+ TEST(merge_extend);
+ TEST(copy_vma);
+
+#undef TEST
+
+ printf("%d tests run, %d passed, %d failed.\n",
+ num_tests, num_tests - num_fail, num_fail);
+
+ return num_fail == 0 ? EXIT_SUCCESS : EXIT_FAILURE;
+}
diff --git a/tools/testing/vma/vma_internal.h b/tools/testing/vma/vma_internal.h
new file mode 100644
index 000000000000..c5b9da034511
--- /dev/null
+++ b/tools/testing/vma/vma_internal.h
@@ -0,0 +1,923 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
+/*
+ * vma_internal.h
+ *
+ * Header providing userland wrappers and shims for the functionality provided
+ * by mm/vma_internal.h.
+ *
+ * We make the header guard the same as mm/vma_internal.h, so if this shim
+ * header is included, it precludes the inclusion of the kernel one.
+ */
+
+#ifndef __MM_VMA_INTERNAL_H
+#define __MM_VMA_INTERNAL_H
+
+#define __private
+#define __bitwise
+#define __randomize_layout
+
+#define CONFIG_MMU
+#define CONFIG_PER_VMA_LOCK
+
+#include <stdlib.h>
+
+#include <linux/list.h>
+#include <linux/maple_tree.h>
+#include <linux/mm.h>
+#include <linux/rbtree.h>
+#include <linux/rwsem.h>
+
+#define VM_WARN_ON(_expr) (WARN_ON(_expr))
+#define VM_WARN_ON_ONCE(_expr) (WARN_ON_ONCE(_expr))
+#define VM_BUG_ON(_expr) (BUG_ON(_expr))
+#define VM_BUG_ON_VMA(_expr, _vma) (BUG_ON(_expr))
+
+#define VM_NONE 0x00000000
+#define VM_READ 0x00000001
+#define VM_WRITE 0x00000002
+#define VM_EXEC 0x00000004
+#define VM_SHARED 0x00000008
+#define VM_MAYREAD 0x00000010
+#define VM_MAYWRITE 0x00000020
+#define VM_GROWSDOWN 0x00000100
+#define VM_PFNMAP 0x00000400
+#define VM_LOCKED 0x00002000
+#define VM_IO 0x00004000
+#define VM_DONTEXPAND 0x00040000
+#define VM_ACCOUNT 0x00100000
+#define VM_MIXEDMAP 0x10000000
+#define VM_STACK VM_GROWSDOWN
+#define VM_SHADOW_STACK VM_NONE
+#define VM_SOFTDIRTY 0
+
+#define VM_ACCESS_FLAGS (VM_READ | VM_WRITE | VM_EXEC)
+#define VM_SPECIAL (VM_IO | VM_DONTEXPAND | VM_PFNMAP | VM_MIXEDMAP)
+
+#define FIRST_USER_ADDRESS 0UL
+#define USER_PGTABLES_CEILING 0UL
+
+#define vma_policy(vma) NULL
+
+#define down_write_nest_lock(sem, nest_lock)
+
+#define pgprot_val(x) ((x).pgprot)
+#define __pgprot(x) ((pgprot_t) { (x) } )
+
+#define for_each_vma(__vmi, __vma) \
+ while (((__vma) = vma_next(&(__vmi))) != NULL)
+
+/* The MM code likes to work with exclusive end addresses */
+#define for_each_vma_range(__vmi, __vma, __end) \
+ while (((__vma) = vma_find(&(__vmi), (__end))) != NULL)
+
+#define offset_in_page(p) ((unsigned long)(p) & ~PAGE_MASK)
+
+#define PHYS_PFN(x) ((unsigned long)((x) >> PAGE_SHIFT))
+
+#define test_and_set_bit(nr, addr) __test_and_set_bit(nr, addr)
+#define test_and_clear_bit(nr, addr) __test_and_clear_bit(nr, addr)
+
+#define TASK_SIZE ((1ul << 47)-PAGE_SIZE)
+
+#define AS_MM_ALL_LOCKS 2
+
+/* We hardcode this for now. */
+#define sysctl_max_map_count 0x1000000UL
+
+#define pgoff_t unsigned long
+typedef unsigned long pgprotval_t;
+typedef struct pgprot { pgprotval_t pgprot; } pgprot_t;
+typedef unsigned long vm_flags_t;
+typedef __bitwise unsigned int vm_fault_t;
+
+/*
+ * The shared stubs do not implement this, it amounts to an fprintf(STDERR,...)
+ * either way :)
+ */
+#define pr_warn_once pr_err
+
+typedef struct refcount_struct {
+ atomic_t refs;
+} refcount_t;
+
+struct kref {
+ refcount_t refcount;
+};
+
+/*
+ * Define the task command name length as enum, then it can be visible to
+ * BPF programs.
+ */
+enum {
+ TASK_COMM_LEN = 16,
+};
+
+struct task_struct {
+ char comm[TASK_COMM_LEN];
+ pid_t pid;
+ struct mm_struct *mm;
+};
+
+struct task_struct *get_current(void);
+#define current get_current()
+
+struct anon_vma {
+ struct anon_vma *root;
+ struct rb_root_cached rb_root;
+
+ /* Test fields. */
+ bool was_cloned;
+ bool was_unlinked;
+};
+
+struct anon_vma_chain {
+ struct anon_vma *anon_vma;
+ struct list_head same_vma;
+};
+
+struct anon_vma_name {
+ struct kref kref;
+ /* The name needs to be at the end because it is dynamically sized. */
+ char name[];
+};
+
+struct vma_iterator {
+ struct ma_state mas;
+};
+
+#define VMA_ITERATOR(name, __mm, __addr) \
+ struct vma_iterator name = { \
+ .mas = { \
+ .tree = &(__mm)->mm_mt, \
+ .index = __addr, \
+ .node = NULL, \
+ .status = ma_start, \
+ }, \
+ }
+
+struct address_space {
+ struct rb_root_cached i_mmap;
+ unsigned long flags;
+ atomic_t i_mmap_writable;
+};
+
+struct vm_userfaultfd_ctx {};
+struct mempolicy {};
+struct mmu_gather {};
+struct mutex {};
+#define DEFINE_MUTEX(mutexname) \
+ struct mutex mutexname = {}
+
+struct mm_struct {
+ struct maple_tree mm_mt;
+ int map_count; /* number of VMAs */
+ unsigned long total_vm; /* Total pages mapped */
+ unsigned long locked_vm; /* Pages that have PG_mlocked set */
+ unsigned long data_vm; /* VM_WRITE & ~VM_SHARED & ~VM_STACK */
+ unsigned long exec_vm; /* VM_EXEC & ~VM_WRITE & ~VM_STACK */
+ unsigned long stack_vm; /* VM_STACK */
+};
+
+struct vma_lock {
+ struct rw_semaphore lock;
+};
+
+
+struct file {
+ struct address_space *f_mapping;
+};
+
+struct vm_area_struct {
+ /* The first cache line has the info for VMA tree walking. */
+
+ union {
+ struct {
+ /* VMA covers [vm_start; vm_end) addresses within mm */
+ unsigned long vm_start;
+ unsigned long vm_end;
+ };
+#ifdef CONFIG_PER_VMA_LOCK
+ struct rcu_head vm_rcu; /* Used for deferred freeing. */
+#endif
+ };
+
+ struct mm_struct *vm_mm; /* The address space we belong to. */
+ pgprot_t vm_page_prot; /* Access permissions of this VMA. */
+
+ /*
+ * Flags, see mm.h.
+ * To modify use vm_flags_{init|reset|set|clear|mod} functions.
+ */
+ union {
+ const vm_flags_t vm_flags;
+ vm_flags_t __private __vm_flags;
+ };
+
+#ifdef CONFIG_PER_VMA_LOCK
+ /* Flag to indicate areas detached from the mm->mm_mt tree */
+ bool detached;
+
+ /*
+ * Can only be written (using WRITE_ONCE()) while holding both:
+ * - mmap_lock (in write mode)
+ * - vm_lock->lock (in write mode)
+ * Can be read reliably while holding one of:
+ * - mmap_lock (in read or write mode)
+ * - vm_lock->lock (in read or write mode)
+ * Can be read unreliably (using READ_ONCE()) for pessimistic bailout
+ * while holding nothing (except RCU to keep the VMA struct allocated).
+ *
+ * This sequence counter is explicitly allowed to overflow; sequence
+ * counter reuse can only lead to occasional unnecessary use of the
+ * slowpath.
+ */
+ int vm_lock_seq;
+ struct vma_lock *vm_lock;
+#endif
+
+ /*
+ * For areas with an address space and backing store,
+ * linkage into the address_space->i_mmap interval tree.
+ *
+ */
+ struct {
+ struct rb_node rb;
+ unsigned long rb_subtree_last;
+ } shared;
+
+ /*
+ * A file's MAP_PRIVATE vma can be in both i_mmap tree and anon_vma
+ * list, after a COW of one of the file pages. A MAP_SHARED vma
+ * can only be in the i_mmap tree. An anonymous MAP_PRIVATE, stack
+ * or brk vma (with NULL file) can only be in an anon_vma list.
+ */
+ struct list_head anon_vma_chain; /* Serialized by mmap_lock &
+ * page_table_lock */
+ struct anon_vma *anon_vma; /* Serialized by page_table_lock */
+
+ /* Function pointers to deal with this struct. */
+ const struct vm_operations_struct *vm_ops;
+
+ /* Information about our backing store: */
+ unsigned long vm_pgoff; /* Offset (within vm_file) in PAGE_SIZE
+ units */
+ struct file * vm_file; /* File we map to (can be NULL). */
+ void * vm_private_data; /* was vm_pte (shared mem) */
+
+#ifdef CONFIG_ANON_VMA_NAME
+ /*
+ * For private and shared anonymous mappings, a pointer to a null
+ * terminated string containing the name given to the vma, or NULL if
+ * unnamed. Serialized by mmap_lock. Use anon_vma_name to access.
+ */
+ struct anon_vma_name *anon_name;
+#endif
+#ifdef CONFIG_SWAP
+ atomic_long_t swap_readahead_info;
+#endif
+#ifndef CONFIG_MMU
+ struct vm_region *vm_region; /* NOMMU mapping region */
+#endif
+#ifdef CONFIG_NUMA
+ struct mempolicy *vm_policy; /* NUMA policy for the VMA */
+#endif
+#ifdef CONFIG_NUMA_BALANCING
+ struct vma_numab_state *numab_state; /* NUMA Balancing state */
+#endif
+ struct vm_userfaultfd_ctx vm_userfaultfd_ctx;
+} __randomize_layout;
+
+struct vm_fault {};
+
+struct vm_operations_struct {
+ void (*open)(struct vm_area_struct * area);
+ /**
+ * @close: Called when the VMA is being removed from the MM.
+ * Context: User context. May sleep. Caller holds mmap_lock.
+ */
+ void (*close)(struct vm_area_struct * area);
+ /* Called any time before splitting to check if it's allowed */
+ int (*may_split)(struct vm_area_struct *area, unsigned long addr);
+ int (*mremap)(struct vm_area_struct *area);
+ /*
+ * Called by mprotect() to make driver-specific permission
+ * checks before mprotect() is finalised. The VMA must not
+ * be modified. Returns 0 if mprotect() can proceed.
+ */
+ int (*mprotect)(struct vm_area_struct *vma, unsigned long start,
+ unsigned long end, unsigned long newflags);
+ vm_fault_t (*fault)(struct vm_fault *vmf);
+ vm_fault_t (*huge_fault)(struct vm_fault *vmf, unsigned int order);
+ vm_fault_t (*map_pages)(struct vm_fault *vmf,
+ pgoff_t start_pgoff, pgoff_t end_pgoff);
+ unsigned long (*pagesize)(struct vm_area_struct * area);
+
+ /* notification that a previously read-only page is about to become
+ * writable, if an error is returned it will cause a SIGBUS */
+ vm_fault_t (*page_mkwrite)(struct vm_fault *vmf);
+
+ /* same as page_mkwrite when using VM_PFNMAP|VM_MIXEDMAP */
+ vm_fault_t (*pfn_mkwrite)(struct vm_fault *vmf);
+
+ /* called by access_process_vm when get_user_pages() fails, typically
+ * for use by special VMAs. See also generic_access_phys() for a generic
+ * implementation useful for any iomem mapping.
+ */
+ int (*access)(struct vm_area_struct *vma, unsigned long addr,
+ void *buf, int len, int write);
+
+ /* Called by the /proc/PID/maps code to ask the vma whether it
+ * has a special name. Returning non-NULL will also cause this
+ * vma to be dumped unconditionally. */
+ const char *(*name)(struct vm_area_struct *vma);
+
+#ifdef CONFIG_NUMA
+ /*
+ * set_policy() op must add a reference to any non-NULL @new mempolicy
+ * to hold the policy upon return. Caller should pass NULL @new to
+ * remove a policy and fall back to surrounding context--i.e. do not
+ * install a MPOL_DEFAULT policy, nor the task or system default
+ * mempolicy.
+ */
+ int (*set_policy)(struct vm_area_struct *vma, struct mempolicy *new);
+
+ /*
+ * get_policy() op must add reference [mpol_get()] to any policy at
+ * (vma,addr) marked as MPOL_SHARED. The shared policy infrastructure
+ * in mm/mempolicy.c will do this automatically.
+ * get_policy() must NOT add a ref if the policy at (vma,addr) is not
+ * marked as MPOL_SHARED. vma policies are protected by the mmap_lock.
+ * If no [shared/vma] mempolicy exists at the addr, get_policy() op
+ * must return NULL--i.e., do not "fallback" to task or system default
+ * policy.
+ */
+ struct mempolicy *(*get_policy)(struct vm_area_struct *vma,
+ unsigned long addr, pgoff_t *ilx);
+#endif
+ /*
+ * Called by vm_normal_page() for special PTEs to find the
+ * page for @addr. This is useful if the default behavior
+ * (using pte_page()) would not find the correct page.
+ */
+ struct page *(*find_special_page)(struct vm_area_struct *vma,
+ unsigned long addr);
+};
+
+static inline void vma_iter_invalidate(struct vma_iterator *vmi)
+{
+ mas_pause(&vmi->mas);
+}
+
+static inline pgprot_t pgprot_modify(pgprot_t oldprot, pgprot_t newprot)
+{
+ return __pgprot(pgprot_val(oldprot) | pgprot_val(newprot));
+}
+
+static inline pgprot_t vm_get_page_prot(unsigned long vm_flags)
+{
+ return __pgprot(vm_flags);
+}
+
+static inline bool is_shared_maywrite(vm_flags_t vm_flags)
+{
+ return (vm_flags & (VM_SHARED | VM_MAYWRITE)) ==
+ (VM_SHARED | VM_MAYWRITE);
+}
+
+static inline bool vma_is_shared_maywrite(struct vm_area_struct *vma)
+{
+ return is_shared_maywrite(vma->vm_flags);
+}
+
+static inline struct vm_area_struct *vma_next(struct vma_iterator *vmi)
+{
+ /*
+ * Uses mas_find() to get the first VMA when the iterator starts.
+ * Calling mas_next() could skip the first entry.
+ */
+ return mas_find(&vmi->mas, ULONG_MAX);
+}
+
+static inline bool vma_lock_alloc(struct vm_area_struct *vma)
+{
+ vma->vm_lock = calloc(1, sizeof(struct vma_lock));
+
+ if (!vma->vm_lock)
+ return false;
+
+ init_rwsem(&vma->vm_lock->lock);
+ vma->vm_lock_seq = -1;
+
+ return true;
+}
+
+static inline void vma_assert_write_locked(struct vm_area_struct *);
+static inline void vma_mark_detached(struct vm_area_struct *vma, bool detached)
+{
+ /* When detaching vma should be write-locked */
+ if (detached)
+ vma_assert_write_locked(vma);
+ vma->detached = detached;
+}
+
+extern const struct vm_operations_struct vma_dummy_vm_ops;
+
+static inline void vma_init(struct vm_area_struct *vma, struct mm_struct *mm)
+{
+ memset(vma, 0, sizeof(*vma));
+ vma->vm_mm = mm;
+ vma->vm_ops = &vma_dummy_vm_ops;
+ INIT_LIST_HEAD(&vma->anon_vma_chain);
+ vma_mark_detached(vma, false);
+}
+
+static inline struct vm_area_struct *vm_area_alloc(struct mm_struct *mm)
+{
+ struct vm_area_struct *vma = calloc(1, sizeof(struct vm_area_struct));
+
+ if (!vma)
+ return NULL;
+
+ vma_init(vma, mm);
+ if (!vma_lock_alloc(vma)) {
+ free(vma);
+ return NULL;
+ }
+
+ return vma;
+}
+
+static inline struct vm_area_struct *vm_area_dup(struct vm_area_struct *orig)
+{
+ struct vm_area_struct *new = calloc(1, sizeof(struct vm_area_struct));
+
+ if (!new)
+ return NULL;
+
+ memcpy(new, orig, sizeof(*new));
+ if (!vma_lock_alloc(new)) {
+ free(new);
+ return NULL;
+ }
+ INIT_LIST_HEAD(&new->anon_vma_chain);
+
+ return new;
+}
+
+/*
+ * These are defined in vma.h, but sadly vm_stat_account() is referenced by
+ * kernel/fork.c, so we have to these broadly available there, and temporarily
+ * define them here to resolve the dependency cycle.
+ */
+
+#define is_exec_mapping(flags) \
+ ((flags & (VM_EXEC | VM_WRITE | VM_STACK)) == VM_EXEC)
+
+#define is_stack_mapping(flags) \
+ (((flags & VM_STACK) == VM_STACK) || (flags & VM_SHADOW_STACK))
+
+#define is_data_mapping(flags) \
+ ((flags & (VM_WRITE | VM_SHARED | VM_STACK)) == VM_WRITE)
+
+static inline void vm_stat_account(struct mm_struct *mm, vm_flags_t flags,
+ long npages)
+{
+ WRITE_ONCE(mm->total_vm, READ_ONCE(mm->total_vm)+npages);
+
+ if (is_exec_mapping(flags))
+ mm->exec_vm += npages;
+ else if (is_stack_mapping(flags))
+ mm->stack_vm += npages;
+ else if (is_data_mapping(flags))
+ mm->data_vm += npages;
+}
+
+#undef is_exec_mapping
+#undef is_stack_mapping
+#undef is_data_mapping
+
+/* Currently stubbed but we may later wish to un-stub. */
+static inline void vm_acct_memory(long pages);
+static inline void vm_unacct_memory(long pages)
+{
+ vm_acct_memory(-pages);
+}
+
+static inline void mapping_allow_writable(struct address_space *mapping)
+{
+ atomic_inc(&mapping->i_mmap_writable);
+}
+
+static inline void vma_set_range(struct vm_area_struct *vma,
+ unsigned long start, unsigned long end,
+ pgoff_t pgoff)
+{
+ vma->vm_start = start;
+ vma->vm_end = end;
+ vma->vm_pgoff = pgoff;
+}
+
+static inline
+struct vm_area_struct *vma_find(struct vma_iterator *vmi, unsigned long max)
+{
+ return mas_find(&vmi->mas, max - 1);
+}
+
+static inline int vma_iter_clear_gfp(struct vma_iterator *vmi,
+ unsigned long start, unsigned long end, gfp_t gfp)
+{
+ __mas_set_range(&vmi->mas, start, end - 1);
+ mas_store_gfp(&vmi->mas, NULL, gfp);
+ if (unlikely(mas_is_err(&vmi->mas)))
+ return -ENOMEM;
+
+ return 0;
+}
+
+static inline void mmap_assert_locked(struct mm_struct *);
+static inline struct vm_area_struct *find_vma_intersection(struct mm_struct *mm,
+ unsigned long start_addr,
+ unsigned long end_addr)
+{
+ unsigned long index = start_addr;
+
+ mmap_assert_locked(mm);
+ return mt_find(&mm->mm_mt, &index, end_addr - 1);
+}
+
+static inline
+struct vm_area_struct *vma_lookup(struct mm_struct *mm, unsigned long addr)
+{
+ return mtree_load(&mm->mm_mt, addr);
+}
+
+static inline struct vm_area_struct *vma_prev(struct vma_iterator *vmi)
+{
+ return mas_prev(&vmi->mas, 0);
+}
+
+static inline void vma_iter_set(struct vma_iterator *vmi, unsigned long addr)
+{
+ mas_set(&vmi->mas, addr);
+}
+
+static inline bool vma_is_anonymous(struct vm_area_struct *vma)
+{
+ return !vma->vm_ops;
+}
+
+/* Defined in vma.h, so temporarily define here to avoid circular dependency. */
+#define vma_iter_load(vmi) \
+ mas_walk(&(vmi)->mas)
+
+static inline struct vm_area_struct *
+find_vma_prev(struct mm_struct *mm, unsigned long addr,
+ struct vm_area_struct **pprev)
+{
+ struct vm_area_struct *vma;
+ VMA_ITERATOR(vmi, mm, addr);
+
+ vma = vma_iter_load(&vmi);
+ *pprev = vma_prev(&vmi);
+ if (!vma)
+ vma = vma_next(&vmi);
+ return vma;
+}
+
+#undef vma_iter_load
+
+static inline void vma_iter_init(struct vma_iterator *vmi,
+ struct mm_struct *mm, unsigned long addr)
+{
+ mas_init(&vmi->mas, &mm->mm_mt, addr);
+}
+
+/* Stubbed functions. */
+
+static inline struct anon_vma_name *anon_vma_name(struct vm_area_struct *vma)
+{
+ return NULL;
+}
+
+static inline bool is_mergeable_vm_userfaultfd_ctx(struct vm_area_struct *vma,
+ struct vm_userfaultfd_ctx vm_ctx)
+{
+ return true;
+}
+
+static inline bool anon_vma_name_eq(struct anon_vma_name *anon_name1,
+ struct anon_vma_name *anon_name2)
+{
+ return true;
+}
+
+static inline void might_sleep(void)
+{
+}
+
+static inline unsigned long vma_pages(struct vm_area_struct *vma)
+{
+ return (vma->vm_end - vma->vm_start) >> PAGE_SHIFT;
+}
+
+static inline void fput(struct file *)
+{
+}
+
+static inline void mpol_put(struct mempolicy *)
+{
+}
+
+static inline void vma_lock_free(struct vm_area_struct *vma)
+{
+ free(vma->vm_lock);
+}
+
+static inline void __vm_area_free(struct vm_area_struct *vma)
+{
+ vma_lock_free(vma);
+ free(vma);
+}
+
+static inline void vm_area_free(struct vm_area_struct *vma)
+{
+ __vm_area_free(vma);
+}
+
+static inline void lru_add_drain(void)
+{
+}
+
+static inline void tlb_gather_mmu(struct mmu_gather *, struct mm_struct *)
+{
+}
+
+static inline void update_hiwater_rss(struct mm_struct *)
+{
+}
+
+static inline void update_hiwater_vm(struct mm_struct *)
+{
+}
+
+static inline void unmap_vmas(struct mmu_gather *tlb, struct ma_state *mas,
+ struct vm_area_struct *vma, unsigned long start_addr,
+ unsigned long end_addr, unsigned long tree_end,
+ bool mm_wr_locked)
+{
+ (void)tlb;
+ (void)mas;
+ (void)vma;
+ (void)start_addr;
+ (void)end_addr;
+ (void)tree_end;
+ (void)mm_wr_locked;
+}
+
+static inline void free_pgtables(struct mmu_gather *tlb, struct ma_state *mas,
+ struct vm_area_struct *vma, unsigned long floor,
+ unsigned long ceiling, bool mm_wr_locked)
+{
+ (void)tlb;
+ (void)mas;
+ (void)vma;
+ (void)floor;
+ (void)ceiling;
+ (void)mm_wr_locked;
+}
+
+static inline void mapping_unmap_writable(struct address_space *)
+{
+}
+
+static inline void flush_dcache_mmap_lock(struct address_space *)
+{
+}
+
+static inline void tlb_finish_mmu(struct mmu_gather *)
+{
+}
+
+static inline void get_file(struct file *)
+{
+}
+
+static inline int vma_dup_policy(struct vm_area_struct *, struct vm_area_struct *)
+{
+ return 0;
+}
+
+static inline int anon_vma_clone(struct vm_area_struct *dst, struct vm_area_struct *src)
+{
+ /* For testing purposes. We indicate that an anon_vma has been cloned. */
+ if (src->anon_vma != NULL) {
+ dst->anon_vma = src->anon_vma;
+ dst->anon_vma->was_cloned = true;
+ }
+
+ return 0;
+}
+
+static inline void vma_start_write(struct vm_area_struct *vma)
+{
+ /* Used to indicate to tests that a write operation has begun. */
+ vma->vm_lock_seq++;
+}
+
+static inline void vma_adjust_trans_huge(struct vm_area_struct *vma,
+ unsigned long start,
+ unsigned long end,
+ long adjust_next)
+{
+ (void)vma;
+ (void)start;
+ (void)end;
+ (void)adjust_next;
+}
+
+static inline void vma_iter_free(struct vma_iterator *vmi)
+{
+ mas_destroy(&vmi->mas);
+}
+
+static inline
+struct vm_area_struct *vma_iter_next_range(struct vma_iterator *vmi)
+{
+ return mas_next_range(&vmi->mas, ULONG_MAX);
+}
+
+static inline void vm_acct_memory(long pages)
+{
+}
+
+static inline void vma_interval_tree_insert(struct vm_area_struct *,
+ struct rb_root_cached *)
+{
+}
+
+static inline void vma_interval_tree_remove(struct vm_area_struct *,
+ struct rb_root_cached *)
+{
+}
+
+static inline void flush_dcache_mmap_unlock(struct address_space *)
+{
+}
+
+static inline void anon_vma_interval_tree_insert(struct anon_vma_chain*,
+ struct rb_root_cached *)
+{
+}
+
+static inline void anon_vma_interval_tree_remove(struct anon_vma_chain*,
+ struct rb_root_cached *)
+{
+}
+
+static inline void uprobe_mmap(struct vm_area_struct *)
+{
+}
+
+static inline void uprobe_munmap(struct vm_area_struct *vma,
+ unsigned long start, unsigned long end)
+{
+ (void)vma;
+ (void)start;
+ (void)end;
+}
+
+static inline void i_mmap_lock_write(struct address_space *)
+{
+}
+
+static inline void anon_vma_lock_write(struct anon_vma *)
+{
+}
+
+static inline void vma_assert_write_locked(struct vm_area_struct *)
+{
+}
+
+static inline void unlink_anon_vmas(struct vm_area_struct *vma)
+{
+ /* For testing purposes, indicate that the anon_vma was unlinked. */
+ vma->anon_vma->was_unlinked = true;
+}
+
+static inline void anon_vma_unlock_write(struct anon_vma *)
+{
+}
+
+static inline void i_mmap_unlock_write(struct address_space *)
+{
+}
+
+static inline void anon_vma_merge(struct vm_area_struct *,
+ struct vm_area_struct *)
+{
+}
+
+static inline int userfaultfd_unmap_prep(struct vm_area_struct *vma,
+ unsigned long start,
+ unsigned long end,
+ struct list_head *unmaps)
+{
+ (void)vma;
+ (void)start;
+ (void)end;
+ (void)unmaps;
+
+ return 0;
+}
+
+static inline void mmap_write_downgrade(struct mm_struct *)
+{
+}
+
+static inline void mmap_read_unlock(struct mm_struct *)
+{
+}
+
+static inline void mmap_write_unlock(struct mm_struct *)
+{
+}
+
+static inline bool can_modify_mm(struct mm_struct *mm,
+ unsigned long start,
+ unsigned long end)
+{
+ (void)mm;
+ (void)start;
+ (void)end;
+
+ return true;
+}
+
+static inline void arch_unmap(struct mm_struct *mm,
+ unsigned long start,
+ unsigned long end)
+{
+ (void)mm;
+ (void)start;
+ (void)end;
+}
+
+static inline void mmap_assert_locked(struct mm_struct *)
+{
+}
+
+static inline bool mpol_equal(struct mempolicy *, struct mempolicy *)
+{
+ return true;
+}
+
+static inline void khugepaged_enter_vma(struct vm_area_struct *vma,
+ unsigned long vm_flags)
+{
+ (void)vma;
+ (void)vm_flags;
+}
+
+static inline bool mapping_can_writeback(struct address_space *)
+{
+ return true;
+}
+
+static inline bool is_vm_hugetlb_page(struct vm_area_struct *)
+{
+ return false;
+}
+
+static inline bool vma_soft_dirty_enabled(struct vm_area_struct *)
+{
+ return false;
+}
+
+static inline bool userfaultfd_wp(struct vm_area_struct *)
+{
+ return false;
+}
+
+static inline void mmap_assert_write_locked(struct mm_struct *)
+{
+}
+
+static inline void mutex_lock(struct mutex *)
+{
+}
+
+static inline void mutex_unlock(struct mutex *)
+{
+}
+
+static inline bool mutex_is_locked(struct mutex *)
+{
+ return true;
+}
+
+static inline bool signal_pending(void *)
+{
+ return false;
+}
+
+#endif /* __MM_VMA_INTERNAL_H */
diff --git a/tools/usb/p9_fwd.py b/tools/usb/p9_fwd.py
new file mode 100755
index 000000000000..12c76cbb046b
--- /dev/null
+++ b/tools/usb/p9_fwd.py
@@ -0,0 +1,243 @@
+#!/usr/bin/env python3
+# SPDX-License-Identifier: GPL-2.0
+
+import argparse
+import errno
+import logging
+import socket
+import struct
+import time
+
+import usb.core
+import usb.util
+
+
+def path_from_usb_dev(dev):
+ """Takes a pyUSB device as argument and returns a string.
+ The string is a Path representation of the position of the USB device on the USB bus tree.
+
+ This path is used to find a USB device on the bus or all devices connected to a HUB.
+ The path is made up of the number of the USB controller followed be the ports of the HUB tree."""
+ if dev.port_numbers:
+ dev_path = ".".join(str(i) for i in dev.port_numbers)
+ return f"{dev.bus}-{dev_path}"
+ return ""
+
+
+HEXDUMP_FILTER = "".join(chr(x).isprintable() and chr(x) or "." for x in range(128)) + "." * 128
+
+
+class Forwarder:
+ @staticmethod
+ def _log_hexdump(data):
+ if not logging.root.isEnabledFor(logging.TRACE):
+ return
+ L = 16
+ for c in range(0, len(data), L):
+ chars = data[c : c + L]
+ dump = " ".join(f"{x:02x}" for x in chars)
+ printable = "".join(HEXDUMP_FILTER[x] for x in chars)
+ line = f"{c:08x} {dump:{L*3}s} |{printable:{L}s}|"
+ logging.root.log(logging.TRACE, "%s", line)
+
+ def __init__(self, server, vid, pid, path):
+ self.stats = {
+ "c2s packets": 0,
+ "c2s bytes": 0,
+ "s2c packets": 0,
+ "s2c bytes": 0,
+ }
+ self.stats_logged = time.monotonic()
+
+ def find_filter(dev):
+ dev_path = path_from_usb_dev(dev)
+ if path is not None:
+ return dev_path == path
+ return True
+
+ dev = usb.core.find(idVendor=vid, idProduct=pid, custom_match=find_filter)
+ if dev is None:
+ raise ValueError("Device not found")
+
+ logging.info(f"found device: {dev.bus}/{dev.address} located at {path_from_usb_dev(dev)}")
+
+ # dev.set_configuration() is not necessary since g_multi has only one
+ usb9pfs = None
+ # g_multi adds 9pfs as last interface
+ cfg = dev.get_active_configuration()
+ for intf in cfg:
+ # we have to detach the usb-storage driver from multi gadget since
+ # stall option could be set, which will lead to spontaneous port
+ # resets and our transfers will run dead
+ if intf.bInterfaceClass == 0x08:
+ if dev.is_kernel_driver_active(intf.bInterfaceNumber):
+ dev.detach_kernel_driver(intf.bInterfaceNumber)
+
+ if intf.bInterfaceClass == 0xFF and intf.bInterfaceSubClass == 0xFF and intf.bInterfaceProtocol == 0x09:
+ usb9pfs = intf
+ if usb9pfs is None:
+ raise ValueError("Interface not found")
+
+ logging.info(f"claiming interface:\n{usb9pfs}")
+ usb.util.claim_interface(dev, usb9pfs.bInterfaceNumber)
+ ep_out = usb.util.find_descriptor(
+ usb9pfs,
+ custom_match=lambda e: usb.util.endpoint_direction(e.bEndpointAddress) == usb.util.ENDPOINT_OUT,
+ )
+ assert ep_out is not None
+ ep_in = usb.util.find_descriptor(
+ usb9pfs,
+ custom_match=lambda e: usb.util.endpoint_direction(e.bEndpointAddress) == usb.util.ENDPOINT_IN,
+ )
+ assert ep_in is not None
+ logging.info("interface claimed")
+
+ self.ep_out = ep_out
+ self.ep_in = ep_in
+ self.dev = dev
+
+ # create and connect socket
+ self.s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
+ self.s.connect(server)
+
+ logging.info("connected to server")
+
+ def c2s(self):
+ """forward a request from the USB client to the TCP server"""
+ data = None
+ while data is None:
+ try:
+ logging.log(logging.TRACE, "c2s: reading")
+ data = self.ep_in.read(self.ep_in.wMaxPacketSize)
+ except usb.core.USBTimeoutError:
+ logging.log(logging.TRACE, "c2s: reading timed out")
+ continue
+ except usb.core.USBError as e:
+ if e.errno == errno.EIO:
+ logging.debug("c2s: reading failed with %s, retrying", repr(e))
+ time.sleep(0.5)
+ continue
+ logging.error("c2s: reading failed with %s, aborting", repr(e))
+ raise
+ size = struct.unpack("<I", data[:4])[0]
+ while len(data) < size:
+ data += self.ep_in.read(size - len(data))
+ logging.log(logging.TRACE, "c2s: writing")
+ self._log_hexdump(data)
+ self.s.send(data)
+ logging.debug("c2s: forwarded %i bytes", size)
+ self.stats["c2s packets"] += 1
+ self.stats["c2s bytes"] += size
+
+ def s2c(self):
+ """forward a response from the TCP server to the USB client"""
+ logging.log(logging.TRACE, "s2c: reading")
+ data = self.s.recv(4)
+ size = struct.unpack("<I", data[:4])[0]
+ while len(data) < size:
+ data += self.s.recv(size - len(data))
+ logging.log(logging.TRACE, "s2c: writing")
+ self._log_hexdump(data)
+ while data:
+ written = self.ep_out.write(data)
+ assert written > 0
+ data = data[written:]
+ if size % self.ep_out.wMaxPacketSize == 0:
+ logging.log(logging.TRACE, "sending zero length packet")
+ self.ep_out.write(b"")
+ logging.debug("s2c: forwarded %i bytes", size)
+ self.stats["s2c packets"] += 1
+ self.stats["s2c bytes"] += size
+
+ def log_stats(self):
+ logging.info("statistics:")
+ for k, v in self.stats.items():
+ logging.info(f" {k+':':14s} {v}")
+
+ def log_stats_interval(self, interval=5):
+ if (time.monotonic() - self.stats_logged) < interval:
+ return
+
+ self.log_stats()
+ self.stats_logged = time.monotonic()
+
+
+def try_get_usb_str(dev, name):
+ try:
+ with open(f"/sys/bus/usb/devices/{dev.bus}-{dev.address}/{name}") as f:
+ return f.read().strip()
+ except FileNotFoundError:
+ return None
+
+
+def list_usb(args):
+ vid, pid = [int(x, 16) for x in args.id.split(":", 1)]
+
+ print("Bus | Addr | Manufacturer | Product | ID | Path")
+ print("--- | ---- | ---------------- | ---------------- | --------- | ----")
+ for dev in usb.core.find(find_all=True, idVendor=vid, idProduct=pid):
+ path = path_from_usb_dev(dev) or ""
+ manufacturer = try_get_usb_str(dev, "manufacturer") or "unknown"
+ product = try_get_usb_str(dev, "product") or "unknown"
+ print(
+ f"{dev.bus:3} | {dev.address:4} | {manufacturer:16} | {product:16} | {dev.idVendor:04x}:{dev.idProduct:04x} | {path:18}"
+ )
+
+
+def connect(args):
+ vid, pid = [int(x, 16) for x in args.id.split(":", 1)]
+
+ f = Forwarder(server=(args.server, args.port), vid=vid, pid=pid, path=args.path)
+
+ try:
+ while True:
+ f.c2s()
+ f.s2c()
+ f.log_stats_interval()
+ finally:
+ f.log_stats()
+
+
+def main():
+ parser = argparse.ArgumentParser(
+ description="Forward 9PFS requests from USB to TCP",
+ )
+
+ parser.add_argument("--id", type=str, default="1d6b:0109", help="vid:pid of target device")
+ parser.add_argument("--path", type=str, required=False, help="path of target device")
+ parser.add_argument("-v", "--verbose", action="count", default=0)
+
+ subparsers = parser.add_subparsers()
+ subparsers.required = True
+ subparsers.dest = "command"
+
+ parser_list = subparsers.add_parser("list", help="List all connected 9p gadgets")
+ parser_list.set_defaults(func=list_usb)
+
+ parser_connect = subparsers.add_parser(
+ "connect", help="Forward messages between the usb9pfs gadget and the 9p server"
+ )
+ parser_connect.set_defaults(func=connect)
+ connect_group = parser_connect.add_argument_group()
+ connect_group.required = True
+ parser_connect.add_argument("-s", "--server", type=str, default="127.0.0.1", help="server hostname")
+ parser_connect.add_argument("-p", "--port", type=int, default=564, help="server port")
+
+ args = parser.parse_args()
+
+ logging.TRACE = logging.DEBUG - 5
+ logging.addLevelName(logging.TRACE, "TRACE")
+
+ if args.verbose >= 2:
+ level = logging.TRACE
+ elif args.verbose:
+ level = logging.DEBUG
+ else:
+ level = logging.INFO
+ logging.basicConfig(level=level, format="%(asctime)-15s %(levelname)-8s %(message)s")
+
+ args.func(args)
+
+
+if __name__ == "__main__":
+ main()
diff --git a/tools/virtio/ringtest/main.c b/tools/virtio/ringtest/main.c
index 5a18b2301a63..e471d8e7cfaa 100644
--- a/tools/virtio/ringtest/main.c
+++ b/tools/virtio/ringtest/main.c
@@ -276,7 +276,7 @@ static void help(void)
fprintf(stderr, "Usage: <test> [--help]"
" [--host-affinity H]"
" [--guest-affinity G]"
- " [--ring-size R (default: %d)]"
+ " [--ring-size R (default: %u)]"
" [--run-cycles C (default: %d)]"
" [--batch b]"
" [--outstanding o]"
diff --git a/virt/kvm/coalesced_mmio.c b/virt/kvm/coalesced_mmio.c
index 1b90acb6e3fe..375d6285475e 100644
--- a/virt/kvm/coalesced_mmio.c
+++ b/virt/kvm/coalesced_mmio.c
@@ -40,27 +40,6 @@ static int coalesced_mmio_in_range(struct kvm_coalesced_mmio_dev *dev,
return 1;
}
-static int coalesced_mmio_has_room(struct kvm_coalesced_mmio_dev *dev, u32 last)
-{
- struct kvm_coalesced_mmio_ring *ring;
- unsigned avail;
-
- /* Are we able to batch it ? */
-
- /* last is the first free entry
- * check if we don't meet the first used entry
- * there is always one unused entry in the buffer
- */
- ring = dev->kvm->coalesced_mmio_ring;
- avail = (ring->first - last - 1) % KVM_COALESCED_MMIO_MAX;
- if (avail == 0) {
- /* full */
- return 0;
- }
-
- return 1;
-}
-
static int coalesced_mmio_write(struct kvm_vcpu *vcpu,
struct kvm_io_device *this, gpa_t addr,
int len, const void *val)
@@ -74,9 +53,15 @@ static int coalesced_mmio_write(struct kvm_vcpu *vcpu,
spin_lock(&dev->kvm->ring_lock);
+ /*
+ * last is the index of the entry to fill. Verify userspace hasn't
+ * set last to be out of range, and that there is room in the ring.
+ * Leave one entry free in the ring so that userspace can differentiate
+ * between an empty ring and a full ring.
+ */
insert = READ_ONCE(ring->last);
- if (!coalesced_mmio_has_room(dev, insert) ||
- insert >= KVM_COALESCED_MMIO_MAX) {
+ if (insert >= KVM_COALESCED_MMIO_MAX ||
+ (insert + 1) % KVM_COALESCED_MMIO_MAX == READ_ONCE(ring->first)) {
spin_unlock(&dev->kvm->ring_lock);
return -EOPNOTSUPP;
}
diff --git a/virt/kvm/eventfd.c b/virt/kvm/eventfd.c
index 992f9beb3e7d..6b390b622b72 100644
--- a/virt/kvm/eventfd.c
+++ b/virt/kvm/eventfd.c
@@ -328,12 +328,12 @@ kvm_irqfd_assign(struct kvm *kvm, struct kvm_irqfd *args)
seqcount_spinlock_init(&irqfd->irq_entry_sc, &kvm->irqfds.lock);
f = fdget(args->fd);
- if (!f.file) {
+ if (!fd_file(f)) {
ret = -EBADF;
goto out;
}
- eventfd = eventfd_ctx_fileget(f.file);
+ eventfd = eventfd_ctx_fileget(fd_file(f));
if (IS_ERR(eventfd)) {
ret = PTR_ERR(eventfd);
goto fail;
@@ -420,7 +420,7 @@ kvm_irqfd_assign(struct kvm *kvm, struct kvm_irqfd *args)
* Check if there was an event already pending on the eventfd
* before we registered, and trigger it as if we didn't miss it.
*/
- events = vfs_poll(f.file, &irqfd->pt);
+ events = vfs_poll(fd_file(f), &irqfd->pt);
if (events & EPOLLIN)
schedule_work(&irqfd->inject);
diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c
index cb2b78e92910..05cbb2548d99 100644
--- a/virt/kvm/kvm_main.c
+++ b/virt/kvm/kvm_main.c
@@ -136,8 +136,8 @@ static int kvm_no_compat_open(struct inode *inode, struct file *file)
#define KVM_COMPAT(c) .compat_ioctl = kvm_no_compat_ioctl, \
.open = kvm_no_compat_open
#endif
-static int hardware_enable_all(void);
-static void hardware_disable_all(void);
+static int kvm_enable_virtualization(void);
+static void kvm_disable_virtualization(void);
static void kvm_io_bus_destroy(struct kvm_io_bus *bus);
@@ -1220,7 +1220,7 @@ static struct kvm *kvm_create_vm(unsigned long type, const char *fdname)
if (r)
goto out_err_no_arch_destroy_vm;
- r = hardware_enable_all();
+ r = kvm_enable_virtualization();
if (r)
goto out_err_no_disable;
@@ -1263,7 +1263,7 @@ out_no_coalesced_mmio:
mmu_notifier_unregister(&kvm->mmu_notifier, current->mm);
#endif
out_err_no_mmu_notifier:
- hardware_disable_all();
+ kvm_disable_virtualization();
out_err_no_disable:
kvm_arch_destroy_vm(kvm);
out_err_no_arch_destroy_vm:
@@ -1360,7 +1360,7 @@ static void kvm_destroy_vm(struct kvm *kvm)
#endif
kvm_arch_free_vm(kvm);
preempt_notifier_dec();
- hardware_disable_all();
+ kvm_disable_virtualization();
mmdrop(mm);
}
@@ -2860,13 +2860,11 @@ static int hva_to_pfn_remapped(struct vm_area_struct *vma,
unsigned long addr, bool write_fault,
bool *writable, kvm_pfn_t *p_pfn)
{
+ struct follow_pfnmap_args args = { .vma = vma, .address = addr };
kvm_pfn_t pfn;
- pte_t *ptep;
- pte_t pte;
- spinlock_t *ptl;
int r;
- r = follow_pte(vma, addr, &ptep, &ptl);
+ r = follow_pfnmap_start(&args);
if (r) {
/*
* get_user_pages fails for VM_IO and VM_PFNMAP vmas and does
@@ -2881,21 +2879,19 @@ static int hva_to_pfn_remapped(struct vm_area_struct *vma,
if (r)
return r;
- r = follow_pte(vma, addr, &ptep, &ptl);
+ r = follow_pfnmap_start(&args);
if (r)
return r;
}
- pte = ptep_get(ptep);
-
- if (write_fault && !pte_write(pte)) {
+ if (write_fault && !args.writable) {
pfn = KVM_PFN_ERR_RO_FAULT;
goto out;
}
if (writable)
- *writable = pte_write(pte);
- pfn = pte_pfn(pte);
+ *writable = args.writable;
+ pfn = args.pfn;
/*
* Get a reference here because callers of *hva_to_pfn* and
@@ -2916,9 +2912,8 @@ static int hva_to_pfn_remapped(struct vm_area_struct *vma,
*/
if (!kvm_try_get_pfn(pfn))
r = -EFAULT;
-
out:
- pte_unmap_unlock(ptep, ptl);
+ follow_pfnmap_end(&args);
*p_pfn = pfn;
return r;
@@ -3275,6 +3270,9 @@ static int __kvm_read_guest_page(struct kvm_memory_slot *slot, gfn_t gfn,
int r;
unsigned long addr;
+ if (WARN_ON_ONCE(offset + len > PAGE_SIZE))
+ return -EFAULT;
+
addr = gfn_to_hva_memslot_prot(slot, gfn, NULL);
if (kvm_is_error_hva(addr))
return -EFAULT;
@@ -3348,6 +3346,9 @@ static int __kvm_read_guest_atomic(struct kvm_memory_slot *slot, gfn_t gfn,
int r;
unsigned long addr;
+ if (WARN_ON_ONCE(offset + len > PAGE_SIZE))
+ return -EFAULT;
+
addr = gfn_to_hva_memslot_prot(slot, gfn, NULL);
if (kvm_is_error_hva(addr))
return -EFAULT;
@@ -3378,6 +3379,9 @@ static int __kvm_write_guest_page(struct kvm *kvm,
int r;
unsigned long addr;
+ if (WARN_ON_ONCE(offset + len > PAGE_SIZE))
+ return -EFAULT;
+
addr = gfn_to_hva_memslot(memslot, gfn);
if (kvm_is_error_hva(addr))
return -EFAULT;
@@ -3581,7 +3585,7 @@ int kvm_clear_guest(struct kvm *kvm, gpa_t gpa, unsigned long len)
int ret;
while ((seg = next_segment(len, offset)) != 0) {
- ret = kvm_write_guest_page(kvm, gfn, zero_page, offset, len);
+ ret = kvm_write_guest_page(kvm, gfn, zero_page, offset, seg);
if (ret < 0)
return ret;
offset = 0;
@@ -5571,137 +5575,67 @@ static struct miscdevice kvm_dev = {
};
#ifdef CONFIG_KVM_GENERIC_HARDWARE_ENABLING
+static bool enable_virt_at_load = true;
+module_param(enable_virt_at_load, bool, 0444);
+
__visible bool kvm_rebooting;
EXPORT_SYMBOL_GPL(kvm_rebooting);
-static DEFINE_PER_CPU(bool, hardware_enabled);
+static DEFINE_PER_CPU(bool, virtualization_enabled);
+static DEFINE_MUTEX(kvm_usage_lock);
static int kvm_usage_count;
-static int __hardware_enable_nolock(void)
+__weak void kvm_arch_enable_virtualization(void)
+{
+
+}
+
+__weak void kvm_arch_disable_virtualization(void)
+{
+
+}
+
+static int kvm_enable_virtualization_cpu(void)
{
- if (__this_cpu_read(hardware_enabled))
+ if (__this_cpu_read(virtualization_enabled))
return 0;
- if (kvm_arch_hardware_enable()) {
+ if (kvm_arch_enable_virtualization_cpu()) {
pr_info("kvm: enabling virtualization on CPU%d failed\n",
raw_smp_processor_id());
return -EIO;
}
- __this_cpu_write(hardware_enabled, true);
+ __this_cpu_write(virtualization_enabled, true);
return 0;
}
-static void hardware_enable_nolock(void *failed)
-{
- if (__hardware_enable_nolock())
- atomic_inc(failed);
-}
-
static int kvm_online_cpu(unsigned int cpu)
{
- int ret = 0;
-
/*
* Abort the CPU online process if hardware virtualization cannot
* be enabled. Otherwise running VMs would encounter unrecoverable
* errors when scheduled to this CPU.
*/
- mutex_lock(&kvm_lock);
- if (kvm_usage_count)
- ret = __hardware_enable_nolock();
- mutex_unlock(&kvm_lock);
- return ret;
+ return kvm_enable_virtualization_cpu();
}
-static void hardware_disable_nolock(void *junk)
+static void kvm_disable_virtualization_cpu(void *ign)
{
- /*
- * Note, hardware_disable_all_nolock() tells all online CPUs to disable
- * hardware, not just CPUs that successfully enabled hardware!
- */
- if (!__this_cpu_read(hardware_enabled))
+ if (!__this_cpu_read(virtualization_enabled))
return;
- kvm_arch_hardware_disable();
+ kvm_arch_disable_virtualization_cpu();
- __this_cpu_write(hardware_enabled, false);
+ __this_cpu_write(virtualization_enabled, false);
}
static int kvm_offline_cpu(unsigned int cpu)
{
- mutex_lock(&kvm_lock);
- if (kvm_usage_count)
- hardware_disable_nolock(NULL);
- mutex_unlock(&kvm_lock);
+ kvm_disable_virtualization_cpu(NULL);
return 0;
}
-static void hardware_disable_all_nolock(void)
-{
- BUG_ON(!kvm_usage_count);
-
- kvm_usage_count--;
- if (!kvm_usage_count)
- on_each_cpu(hardware_disable_nolock, NULL, 1);
-}
-
-static void hardware_disable_all(void)
-{
- cpus_read_lock();
- mutex_lock(&kvm_lock);
- hardware_disable_all_nolock();
- mutex_unlock(&kvm_lock);
- cpus_read_unlock();
-}
-
-static int hardware_enable_all(void)
-{
- atomic_t failed = ATOMIC_INIT(0);
- int r;
-
- /*
- * Do not enable hardware virtualization if the system is going down.
- * If userspace initiated a forced reboot, e.g. reboot -f, then it's
- * possible for an in-flight KVM_CREATE_VM to trigger hardware enabling
- * after kvm_reboot() is called. Note, this relies on system_state
- * being set _before_ kvm_reboot(), which is why KVM uses a syscore ops
- * hook instead of registering a dedicated reboot notifier (the latter
- * runs before system_state is updated).
- */
- if (system_state == SYSTEM_HALT || system_state == SYSTEM_POWER_OFF ||
- system_state == SYSTEM_RESTART)
- return -EBUSY;
-
- /*
- * When onlining a CPU, cpu_online_mask is set before kvm_online_cpu()
- * is called, and so on_each_cpu() between them includes the CPU that
- * is being onlined. As a result, hardware_enable_nolock() may get
- * invoked before kvm_online_cpu(), which also enables hardware if the
- * usage count is non-zero. Disable CPU hotplug to avoid attempting to
- * enable hardware multiple times.
- */
- cpus_read_lock();
- mutex_lock(&kvm_lock);
-
- r = 0;
-
- kvm_usage_count++;
- if (kvm_usage_count == 1) {
- on_each_cpu(hardware_enable_nolock, &failed, 1);
-
- if (atomic_read(&failed)) {
- hardware_disable_all_nolock();
- r = -EBUSY;
- }
- }
-
- mutex_unlock(&kvm_lock);
- cpus_read_unlock();
-
- return r;
-}
-
static void kvm_shutdown(void)
{
/*
@@ -5717,34 +5651,32 @@ static void kvm_shutdown(void)
*/
pr_info("kvm: exiting hardware virtualization\n");
kvm_rebooting = true;
- on_each_cpu(hardware_disable_nolock, NULL, 1);
+ on_each_cpu(kvm_disable_virtualization_cpu, NULL, 1);
}
static int kvm_suspend(void)
{
/*
* Secondary CPUs and CPU hotplug are disabled across the suspend/resume
- * callbacks, i.e. no need to acquire kvm_lock to ensure the usage count
- * is stable. Assert that kvm_lock is not held to ensure the system
- * isn't suspended while KVM is enabling hardware. Hardware enabling
- * can be preempted, but the task cannot be frozen until it has dropped
- * all locks (userspace tasks are frozen via a fake signal).
+ * callbacks, i.e. no need to acquire kvm_usage_lock to ensure the usage
+ * count is stable. Assert that kvm_usage_lock is not held to ensure
+ * the system isn't suspended while KVM is enabling hardware. Hardware
+ * enabling can be preempted, but the task cannot be frozen until it has
+ * dropped all locks (userspace tasks are frozen via a fake signal).
*/
- lockdep_assert_not_held(&kvm_lock);
+ lockdep_assert_not_held(&kvm_usage_lock);
lockdep_assert_irqs_disabled();
- if (kvm_usage_count)
- hardware_disable_nolock(NULL);
+ kvm_disable_virtualization_cpu(NULL);
return 0;
}
static void kvm_resume(void)
{
- lockdep_assert_not_held(&kvm_lock);
+ lockdep_assert_not_held(&kvm_usage_lock);
lockdep_assert_irqs_disabled();
- if (kvm_usage_count)
- WARN_ON_ONCE(__hardware_enable_nolock());
+ WARN_ON_ONCE(kvm_enable_virtualization_cpu());
}
static struct syscore_ops kvm_syscore_ops = {
@@ -5752,13 +5684,95 @@ static struct syscore_ops kvm_syscore_ops = {
.resume = kvm_resume,
.shutdown = kvm_shutdown,
};
+
+static int kvm_enable_virtualization(void)
+{
+ int r;
+
+ guard(mutex)(&kvm_usage_lock);
+
+ if (kvm_usage_count++)
+ return 0;
+
+ kvm_arch_enable_virtualization();
+
+ r = cpuhp_setup_state(CPUHP_AP_KVM_ONLINE, "kvm/cpu:online",
+ kvm_online_cpu, kvm_offline_cpu);
+ if (r)
+ goto err_cpuhp;
+
+ register_syscore_ops(&kvm_syscore_ops);
+
+ /*
+ * Undo virtualization enabling and bail if the system is going down.
+ * If userspace initiated a forced reboot, e.g. reboot -f, then it's
+ * possible for an in-flight operation to enable virtualization after
+ * syscore_shutdown() is called, i.e. without kvm_shutdown() being
+ * invoked. Note, this relies on system_state being set _before_
+ * kvm_shutdown(), e.g. to ensure either kvm_shutdown() is invoked
+ * or this CPU observes the impending shutdown. Which is why KVM uses
+ * a syscore ops hook instead of registering a dedicated reboot
+ * notifier (the latter runs before system_state is updated).
+ */
+ if (system_state == SYSTEM_HALT || system_state == SYSTEM_POWER_OFF ||
+ system_state == SYSTEM_RESTART) {
+ r = -EBUSY;
+ goto err_rebooting;
+ }
+
+ return 0;
+
+err_rebooting:
+ unregister_syscore_ops(&kvm_syscore_ops);
+ cpuhp_remove_state(CPUHP_AP_KVM_ONLINE);
+err_cpuhp:
+ kvm_arch_disable_virtualization();
+ --kvm_usage_count;
+ return r;
+}
+
+static void kvm_disable_virtualization(void)
+{
+ guard(mutex)(&kvm_usage_lock);
+
+ if (--kvm_usage_count)
+ return;
+
+ unregister_syscore_ops(&kvm_syscore_ops);
+ cpuhp_remove_state(CPUHP_AP_KVM_ONLINE);
+ kvm_arch_disable_virtualization();
+}
+
+static int kvm_init_virtualization(void)
+{
+ if (enable_virt_at_load)
+ return kvm_enable_virtualization();
+
+ return 0;
+}
+
+static void kvm_uninit_virtualization(void)
+{
+ if (enable_virt_at_load)
+ kvm_disable_virtualization();
+}
#else /* CONFIG_KVM_GENERIC_HARDWARE_ENABLING */
-static int hardware_enable_all(void)
+static int kvm_enable_virtualization(void)
+{
+ return 0;
+}
+
+static int kvm_init_virtualization(void)
{
return 0;
}
-static void hardware_disable_all(void)
+static void kvm_disable_virtualization(void)
+{
+
+}
+
+static void kvm_uninit_virtualization(void)
{
}
@@ -6191,7 +6205,6 @@ static const struct file_operations stat_fops_per_vm = {
.release = kvm_debugfs_release,
.read = simple_attr_read,
.write = simple_attr_write,
- .llseek = no_llseek,
};
static int vm_stat_get(void *_offset, u64 *val)
@@ -6460,15 +6473,6 @@ int kvm_init(unsigned vcpu_size, unsigned vcpu_align, struct module *module)
int r;
int cpu;
-#ifdef CONFIG_KVM_GENERIC_HARDWARE_ENABLING
- r = cpuhp_setup_state_nocalls(CPUHP_AP_KVM_ONLINE, "kvm/cpu:online",
- kvm_online_cpu, kvm_offline_cpu);
- if (r)
- return r;
-
- register_syscore_ops(&kvm_syscore_ops);
-#endif
-
/* A kmem cache lets us meet the alignment requirements of fx_save. */
if (!vcpu_align)
vcpu_align = __alignof__(struct kvm_vcpu);
@@ -6479,10 +6483,8 @@ int kvm_init(unsigned vcpu_size, unsigned vcpu_align, struct module *module)
offsetofend(struct kvm_vcpu, stats_id)
- offsetof(struct kvm_vcpu, arch),
NULL);
- if (!kvm_vcpu_cache) {
- r = -ENOMEM;
- goto err_vcpu_cache;
- }
+ if (!kvm_vcpu_cache)
+ return -ENOMEM;
for_each_possible_cpu(cpu) {
if (!alloc_cpumask_var_node(&per_cpu(cpu_kick_mask, cpu),
@@ -6516,6 +6518,10 @@ int kvm_init(unsigned vcpu_size, unsigned vcpu_align, struct module *module)
kvm_gmem_init(module);
+ r = kvm_init_virtualization();
+ if (r)
+ goto err_virt;
+
/*
* Registration _must_ be the very last thing done, as this exposes
* /dev/kvm to userspace, i.e. all infrastructure must be setup!
@@ -6529,6 +6535,8 @@ int kvm_init(unsigned vcpu_size, unsigned vcpu_align, struct module *module)
return 0;
err_register:
+ kvm_uninit_virtualization();
+err_virt:
kvm_vfio_ops_exit();
err_vfio:
kvm_async_pf_deinit();
@@ -6539,11 +6547,6 @@ err_cpu_kick_mask:
for_each_possible_cpu(cpu)
free_cpumask_var(per_cpu(cpu_kick_mask, cpu));
kmem_cache_destroy(kvm_vcpu_cache);
-err_vcpu_cache:
-#ifdef CONFIG_KVM_GENERIC_HARDWARE_ENABLING
- unregister_syscore_ops(&kvm_syscore_ops);
- cpuhp_remove_state_nocalls(CPUHP_AP_KVM_ONLINE);
-#endif
return r;
}
EXPORT_SYMBOL_GPL(kvm_init);
@@ -6559,16 +6562,14 @@ void kvm_exit(void)
*/
misc_deregister(&kvm_dev);
+ kvm_uninit_virtualization();
+
debugfs_remove_recursive(kvm_debugfs_dir);
for_each_possible_cpu(cpu)
free_cpumask_var(per_cpu(cpu_kick_mask, cpu));
kmem_cache_destroy(kvm_vcpu_cache);
kvm_vfio_ops_exit();
kvm_async_pf_deinit();
-#ifdef CONFIG_KVM_GENERIC_HARDWARE_ENABLING
- unregister_syscore_ops(&kvm_syscore_ops);
- cpuhp_remove_state_nocalls(CPUHP_AP_KVM_ONLINE);
-#endif
kvm_irqfd_exit();
}
EXPORT_SYMBOL_GPL(kvm_exit);
diff --git a/virt/kvm/vfio.c b/virt/kvm/vfio.c
index 76b7f6085dcd..388ae471d258 100644
--- a/virt/kvm/vfio.c
+++ b/virt/kvm/vfio.c
@@ -194,7 +194,7 @@ static int kvm_vfio_file_del(struct kvm_device *dev, unsigned int fd)
int ret;
f = fdget(fd);
- if (!f.file)
+ if (!fd_file(f))
return -EBADF;
ret = -ENOENT;
@@ -202,7 +202,7 @@ static int kvm_vfio_file_del(struct kvm_device *dev, unsigned int fd)
mutex_lock(&kv->lock);
list_for_each_entry(kvf, &kv->file_list, node) {
- if (kvf->file != f.file)
+ if (kvf->file != fd_file(f))
continue;
list_del(&kvf->node);
@@ -240,7 +240,7 @@ static int kvm_vfio_file_set_spapr_tce(struct kvm_device *dev,
return -EFAULT;
f = fdget(param.groupfd);
- if (!f.file)
+ if (!fd_file(f))
return -EBADF;
ret = -ENOENT;
@@ -248,7 +248,7 @@ static int kvm_vfio_file_set_spapr_tce(struct kvm_device *dev,
mutex_lock(&kv->lock);
list_for_each_entry(kvf, &kv->file_list, node) {
- if (kvf->file != f.file)
+ if (kvf->file != fd_file(f))
continue;
if (!kvf->iommu_group) {